Merge branch 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm:
  ARM: fix missing branch in __error_a
  ARM: fix /proc/$PID/stack on SMP
  ARM: Fix build regression on SA11x0, PXA, and H720x targets
  ARM: 6625/1: use memblock memory regions for "System RAM" I/O resources
  ARM: fix wrongly patched constants
  ARM: 6624/1: fix dependency for CONFIG_SMP_ON_UP
  ARM: 6623/1: Thumb-2: Fix out-of-range offset for Thumb-2 in proc-v7.S
  ARM: 6622/1: fix dma_unmap_sg() documentation
  ARM: 6621/1: bitops: remove condition code clobber for CLZ
  ARM: 6620/1: Change misleading warning when CONFIG_CMDLINE_FORCE is used
  ARM: 6619/1: nommu: avoid mapping vectors page when !CONFIG_MMU
  ARM: sched_clock: make minsec argument to clocks_calc_mult_shift() zero
  ARM: sched_clock: allow init_sched_clock() to be called early
  ARM: integrator: fix compile warning in cpu.c
  ARM: 6616/1: Fix ep93xx-fb init/exit annotations
  ARM: twd: fix display of twd frequency
  ARM: udelay: prevent math rounding resulting in short udelays
diff --git a/.mailmap b/.mailmap
index a62e6a8..581fd39 100644
--- a/.mailmap
+++ b/.mailmap
@@ -105,3 +105,4 @@
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Takashi YOSHII <takashi.yoshii.zj@renesas.com>
diff --git a/CREDITS b/CREDITS
index 494b6e4..1d39a6d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2811,8 +2811,8 @@
 N: Stelian Pop
 E: stelian@popies.net
 P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0  D3F7 7185 9E7A EDBB 6147
-D: sonypi, meye drivers, mct_u232 usb serial hacks
-S: Paris, France
+D: random kernel hacks
+S: Paimpont, France
 
 N: Pete Popov
 E: pete_popov@yahoo.com
diff --git a/Documentation/ABI/stable/thermal-notification b/Documentation/ABI/stable/thermal-notification
new file mode 100644
index 0000000..9723e8b
--- /dev/null
+++ b/Documentation/ABI/stable/thermal-notification
@@ -0,0 +1,4 @@
+What:		A notification mechanism for thermal related events
+Description:
+	This interface enables notification for thermal related events.
+	The notification is in the form of a netlink event.
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index 9e4541d..edff663 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -26,3 +26,12 @@
 		scheduler is chosen. Trigger specific parameters can appear in
 		/sys/class/leds/<led> once a given trigger is selected.
 
+What:		/sys/class/leds/<led>/inverted
+Date:		January 2011
+KernelVersion:	2.6.38
+Contact:	Richard Purdie <rpurdie@rpsys.net>
+Description:
+		Invert the LED on/off state. This parameter is specific to
+		gpio and backlight triggers. In case of the backlight trigger,
+		it is usefull when driving a LED which is intended to indicate
+		a device in a standby like state.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
index 063bda7..698b808 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-kone
@@ -1,4 +1,4 @@
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_dpi
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/actual_dpi
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	It is possible to switch the dpi setting of the mouse with the
@@ -17,13 +17,13 @@
 
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_profile
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/actual_profile
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	When read, this file returns the number of the actual profile.
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/firmware_version
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/firmware_version
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	When read, this file returns the raw integer version number of the
@@ -33,7 +33,7 @@
 		left. E.g. a returned value of 138 means 1.38
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/profile[1-5]
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The mouse can store 5 profiles which can be switched by the
@@ -48,7 +48,7 @@
 		stored in the profile doesn't need to fit the number of the
 		store.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/settings
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/settings
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	When read, this file returns the settings stored in the mouse.
@@ -58,7 +58,7 @@
 		The data has to be 36 bytes long. The mouse will reject invalid
 		data.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/startup_profile
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/startup_profile
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The integer value of this attribute ranges from 1 to 5.
@@ -67,7 +67,7 @@
 		When written, this file sets the number of the startup profile
 		and the mouse activates this profile immediately.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/tcu
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/tcu
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The mouse has a "Tracking Control Unit" which lets the user
@@ -78,7 +78,7 @@
 		Writing 1 in this file will start the calibration which takes
 		around 6 seconds to complete and activates the TCU.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/weight
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/kone/roccatkone<minor>/weight
 Date:		March 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The mouse can be equipped with one of four supplied weights
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
new file mode 100644
index 0000000..0f9f30e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
@@ -0,0 +1,108 @@
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/actual_profile
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read, this file returns the number of the actual profile in
+		range 0-4.
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/firmware_version
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read, this file returns the raw integer version number of the
+		firmware reported by the mouse. Using the integer value eases
+		further usage in other programs. To receive the real version
+		number the decimal point has to be shifted 2 positions to the
+		left. E.g. a returned value of 121 means 1.21
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/macro
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can store a macro with max 500 key/button strokes
+		internally.
+		When written, this file lets one set the sequence for a specific
+		button for a specific profile. Button and profile numbers are
+		included in written data. The data has to be 2082 bytes long.
+		This file is writeonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile_buttons
+Date:		August 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can store 5 profiles which can be switched by the
+		press of a button. A profile is split in settings and buttons.
+		profile_buttons holds informations about button layout.
+		When written, this file lets one write the respective profile
+		buttons back to the mouse. The data has to be 77 bytes long.
+		The mouse will reject invalid data.
+		Which profile to write is determined by the profile number
+		contained in the data.
+		This file is writeonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_buttons
+Date:		August 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can store 5 profiles which can be switched by the
+		press of a button. A profile is split in settings and buttons.
+		profile_buttons holds informations about button layout.
+		When read, these files return the respective profile buttons.
+		The returned data is 77 bytes in size.
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile_settings
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can store 5 profiles which can be switched by the
+		press of a button. A profile is split in settings and buttons.
+		profile_settings holds informations like resolution, sensitivity
+		and light effects.
+		When written, this file lets one write the respective profile
+		settings back to the mouse. The data has to be 43 bytes long.
+		The mouse will reject invalid data.
+		Which profile to write is determined by the profile number
+		contained in the data.
+		This file is writeonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/profile[1-5]_settings
+Date:		August 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse can store 5 profiles which can be switched by the
+		press of a button. A profile is split in settings and buttons.
+		profile_settings holds informations like resolution, sensitivity
+		and light effects.
+		When read, these files return the respective profile settings.
+		The returned data is 43 bytes in size.
+		This file is readonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/sensor
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The mouse has a tracking- and a distance-control-unit. These
+		can be activated/deactivated and the lift-off distance can be
+		set. The data has to be 6 bytes long.
+		This file is writeonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/startup_profile
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	The integer value of this attribute ranges from 0-4.
+                When read, this attribute returns the number of the profile
+                that's active when the mouse is powered on.
+		When written, this file sets the number of the startup profile
+		and the mouse activates this profile immediately.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/tcu
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When written a calibration process for the tracking control unit
+		can be initiated/cancelled.
+		The data has to be 3 bytes long.
+		This file is writeonly.
+
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/tcu_image
+Date:		October 2010
+Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
+Description:	When read the mouse returns a 30x30 pixel image of the
+		sampled underground. This works only in the course of a
+		calibration process initiated with tcu.
+		The returned data is 1028 bytes in size.
+		This file is readonly.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
index ad1125b..1c37b82 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-pyra
@@ -1,4 +1,4 @@
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_cpi
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/actual_cpi
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	It is possible to switch the cpi setting of the mouse with the
@@ -14,14 +14,14 @@
 
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/actual_profile
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/actual_profile
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	When read, this file returns the number of the actual profile in
 		range 0-4.
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/firmware_version
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/firmware_version
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	When read, this file returns the raw integer version number of the
@@ -31,7 +31,7 @@
 		left. E.g. a returned value of 138 means 1.38
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile_settings
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile_settings
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The mouse can store 5 profiles which can be switched by the
@@ -45,7 +45,7 @@
 		contained in the data.
 		This file is writeonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]_settings
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_settings
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The mouse can store 5 profiles which can be switched by the
@@ -56,7 +56,7 @@
 		The returned data is 13 bytes in size.
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile_buttons
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile_buttons
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The mouse can store 5 profiles which can be switched by the
@@ -69,7 +69,7 @@
 		contained in the data.
 		This file is writeonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/profile[1-5]_buttons
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/profile[1-5]_buttons
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The mouse can store 5 profiles which can be switched by the
@@ -79,7 +79,7 @@
 		The returned data is 19 bytes in size.
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/startup_profile
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/startup_profile
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	The integer value of this attribute ranges from 0-4.
@@ -87,7 +87,7 @@
                 that's active when the mouse is powered on.
 		This file is readonly.
 
-What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/settings
+What:		/sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/pyra/roccatpyra<minor>/settings
 Date:		August 2010
 Contact:	Stefan Achatz <erazor_de@users.sourceforge.net>
 Description:	When read, this file returns the settings stored in the mouse.
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
new file mode 100644
index 0000000..807fca2
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
@@ -0,0 +1,6 @@
+What:		/sys/devices/platform/ideapad/camera_power
+Date:		Dec 2010
+KernelVersion:	2.6.37
+Contact:	"Ike Panhc <ike.pan@canonical.com>"
+Description:
+		Control the power of camera module. 1 means on, 0 means off.
diff --git a/Documentation/ABI/testing/sysfs-tty b/Documentation/ABI/testing/sysfs-tty
new file mode 100644
index 0000000..b138b66
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-tty
@@ -0,0 +1,19 @@
+What:		/sys/class/tty/console/active
+Date:		Nov 2010
+Contact:	Kay Sievers <kay.sievers@vrfy.org>
+Description:
+		 Shows the list of currently configured
+		 console devices, like 'tty1 ttyS0'.
+		 The last entry in the file is the active
+		 device connected to /dev/console.
+		 The file supports poll() to detect virtual
+		 console switches.
+
+What:		/sys/class/tty/tty0/active
+Date:		Nov 2010
+Contact:	Kay Sievers <kay.sievers@vrfy.org>
+Description:
+		 Shows the currently active virtual console
+		 device, like 'tty1'.
+		 The file supports poll() to detect virtual
+		 console switches.
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 03641a0..8906648 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -268,10 +268,6 @@
 !Finclude/net/mac80211.h ieee80211_ops
 !Finclude/net/mac80211.h ieee80211_alloc_hw
 !Finclude/net/mac80211.h ieee80211_register_hw
-!Finclude/net/mac80211.h ieee80211_get_tx_led_name
-!Finclude/net/mac80211.h ieee80211_get_rx_led_name
-!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
-!Finclude/net/mac80211.h ieee80211_get_radio_led_name
 !Finclude/net/mac80211.h ieee80211_unregister_hw
 !Finclude/net/mac80211.h ieee80211_free_hw
       </chapter>
@@ -382,6 +378,23 @@
         </para>
       </partintro>
 
+      <chapter id="led-support">
+        <title>LED support</title>
+        <para>
+         Mac80211 supports various ways of blinking LEDs. Wherever possible,
+         device LEDs should be exposed as LED class devices and hooked up to
+         the appropriate trigger, which will then be triggered appropriately
+         by mac80211.
+        </para>
+!Finclude/net/mac80211.h ieee80211_get_tx_led_name
+!Finclude/net/mac80211.h ieee80211_get_rx_led_name
+!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
+!Finclude/net/mac80211.h ieee80211_get_radio_led_name
+!Finclude/net/mac80211.h ieee80211_tpt_blink
+!Finclude/net/mac80211.h ieee80211_tpt_led_trigger_flags
+!Finclude/net/mac80211.h ieee80211_create_tpt_led_trigger
+      </chapter>
+
       <chapter id="hardware-crypto-offload">
         <title>Hardware crypto acceleration</title>
 !Pinclude/net/mac80211.h Hardware crypto acceleration
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 22edcbb..35447e0 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -304,6 +304,10 @@
 !Edrivers/input/ff-core.c
 !Edrivers/input/ff-memless.c
      </sect1>
+     <sect1><title>Multitouch Library</title>
+!Iinclude/linux/input/mt.h
+!Edrivers/input/input-mt.c
+     </sect1>
      <sect1><title>Polled input devices</title>
 !Iinclude/linux/input-polldev.h
 !Edrivers/input/input-polldev.c
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 020ac80..620eb3f 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -250,7 +250,7 @@
 		<title>Device ready function</title>
 		<para>
 			If the hardware interface has the ready busy pin of the NAND chip connected to a
-			GPIO or other accesible I/O pin, this function is used to read back the state of the
+			GPIO or other accessible I/O pin, this function is used to read back the state of the
 			pin. The function has no arguments and should return 0, if the device is busy (R/B pin 
 			is low) and 1, if the device is ready (R/B pin is high).
 			If the hardware interface does not give access to the ready busy pin, then
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 69dd29e..b2bea15 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -533,6 +533,33 @@
 Other Pieces
 ------------
 
+Get the detailed info related with the IPMI device
+--------------------------------------------------
+
+Some users need more detailed information about a device, like where
+the address came from or the raw base device for the IPMI interface.
+You can use the IPMI smi_watcher to catch the IPMI interfaces as they
+come or go, and to grab the information, you can use the function
+ipmi_get_smi_info(), which returns the following structure:
+
+struct ipmi_smi_info {
+	enum ipmi_addr_src addr_src;
+	struct device *dev;
+	union {
+		struct {
+			void *acpi_handle;
+		} acpi_info;
+	} addr_info;
+};
+
+Currently special info for only for SI_ACPI address sources is
+returned.  Others may be added as necessary.
+
+Note that the dev pointer is included in the above structure, and
+assuming ipmi_smi_get_info returns success, you must call put_device
+on the dev pointer.
+
+
 Watchdog
 --------
 
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt
new file mode 100644
index 0000000..9146952
--- /dev/null
+++ b/Documentation/acpi/apei/output_format.txt
@@ -0,0 +1,122 @@
+                     APEI output format
+                     ~~~~~~~~~~~~~~~~~~
+
+APEI uses printk as hardware error reporting interface, the output
+format is as follow.
+
+<error record> :=
+APEI generic hardware error status
+severity: <integer>, <severity string>
+section: <integer>, severity: <integer>, <severity string>
+flags: <integer>
+<section flags strings>
+fru_id: <uuid string>
+fru_text: <string>
+section_type: <section type string>
+<section data>
+
+<severity string>* := recoverable | fatal | corrected | info
+
+<section flags strings># :=
+[primary][, containment warning][, reset][, threshold exceeded]\
+[, resource not accessible][, latent error]
+
+<section type string> := generic processor error | memory error | \
+PCIe error | unknown, <uuid string>
+
+<section data> :=
+<generic processor section data> | <memory section data> | \
+<pcie section data> | <null>
+
+<generic processor section data> :=
+[processor_type: <integer>, <proc type string>]
+[processor_isa: <integer>, <proc isa string>]
+[error_type: <integer>
+<proc error type strings>]
+[operation: <integer>, <proc operation string>]
+[flags: <integer>
+<proc flags strings>]
+[level: <integer>]
+[version_info: <integer>]
+[processor_id: <integer>]
+[target_address: <integer>]
+[requestor_id: <integer>]
+[responder_id: <integer>]
+[IP: <integer>]
+
+<proc type string>* := IA32/X64 | IA64
+
+<proc isa string>* := IA32 | IA64 | X64
+
+<processor error type strings># :=
+[cache error][, TLB error][, bus error][, micro-architectural error]
+
+<proc operation string>* := unknown or generic | data read | data write | \
+instruction execution
+
+<proc flags strings># :=
+[restartable][, precise IP][, overflow][, corrected]
+
+<memory section data> :=
+[error_status: <integer>]
+[physical_address: <integer>]
+[physical_address_mask: <integer>]
+[node: <integer>]
+[card: <integer>]
+[module: <integer>]
+[bank: <integer>]
+[device: <integer>]
+[row: <integer>]
+[column: <integer>]
+[bit_position: <integer>]
+[requestor_id: <integer>]
+[responder_id: <integer>]
+[target_id: <integer>]
+[error_type: <integer>, <mem error type string>]
+
+<mem error type string>* :=
+unknown | no error | single-bit ECC | multi-bit ECC | \
+single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \
+target abort | parity error | watchdog timeout | invalid address | \
+mirror Broken | memory sparing | scrub corrected error | \
+scrub uncorrected error
+
+<pcie section data> :=
+[port_type: <integer>, <pcie port type string>]
+[version: <integer>.<integer>]
+[command: <integer>, status: <integer>]
+[device_id: <integer>:<integer>:<integer>.<integer>
+slot: <integer>
+secondary_bus: <integer>
+vendor_id: <integer>, device_id: <integer>
+class_code: <integer>]
+[serial number: <integer>, <integer>]
+[bridge: secondary_status: <integer>, control: <integer>]
+
+<pcie port type string>* := PCIe end point | legacy PCI end point | \
+unknown | unknown | root port | upstream switch port | \
+downstream switch port | PCIe to PCI/PCI-X bridge | \
+PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
+root complex event collector
+
+Where, [] designate corresponding content is optional
+
+All <field string> description with * has the following format:
+
+field: <integer>, <field string>
+
+Where value of <integer> should be the position of "string" in <field
+string> description. Otherwise, <field string> will be "unknown".
+
+All <field strings> description with # has the following format:
+
+field: <integer>
+<field strings>
+
+Where each string in <fields strings> corresponding to one set bit of
+<integer>. The bit position is the position of "string" in <field
+strings> description.
+
+For more detailed explanation of every field, please refer to UEFI
+specification version 2.3 or later, section Appendix N: Common
+Platform Error Record.
diff --git a/Documentation/arm/OMAP/omap_pm b/Documentation/arm/OMAP/omap_pm
index 5389440..9012bb0 100644
--- a/Documentation/arm/OMAP/omap_pm
+++ b/Documentation/arm/OMAP/omap_pm
@@ -127,3 +127,28 @@
 10. (*pdata->cpu_set_freq)(unsigned long f)
 
 11. (*pdata->cpu_get_freq)(void)
+
+Customizing OPP for platform
+============================
+Defining CONFIG_PM should enable OPP layer for the silicon
+and the registration of OPP table should take place automatically.
+However, in special cases, the default OPP table may need to be
+tweaked, for e.g.:
+ * enable default OPPs which are disabled by default, but which
+   could be enabled on a platform
+ * Disable an unsupported OPP on the platform
+ * Define and add a custom opp table entry
+in these cases, the board file needs to do additional steps as follows:
+arch/arm/mach-omapx/board-xyz.c
+	#include "pm.h"
+	....
+	static void __init omap_xyz_init_irq(void)
+	{
+		....
+		/* Initialize the default table */
+		omapx_opp_init();
+		/* Do customization to the defaults */
+		....
+	}
+NOTE: omapx_opp_init will be omap3_opp_init or as required
+based on the omap family.
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index d6da611..4ed7b5c 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -89,6 +89,33 @@
 
  Limits for writes can be put using blkio.write_bps_device file.
 
+Hierarchical Cgroups
+====================
+- Currently none of the IO control policy supports hierarhical groups. But
+  cgroup interface does allow creation of hierarhical cgroups and internally
+  IO policies treat them as flat hierarchy.
+
+  So this patch will allow creation of cgroup hierarhcy but at the backend
+  everything will be treated as flat. So if somebody created a hierarchy like
+  as follows.
+
+			root
+			/  \
+		     test1 test2
+			|
+		     test3
+
+  CFQ and throttling will practically treat all groups at same level.
+
+				pivot
+			     /  |   \  \
+			root  test1 test2  test3
+
+  Down the line we can implement hierarchical accounting/control support
+  and also introduce a new cgroup file "use_hierarchy" which will control
+  whether cgroup hierarchy is viewed as flat or hierarchical by the policy..
+  This is how memory controller also has implemented the things.
+
 Various user visible config options
 ===================================
 CONFIG_BLK_CGROUP
diff --git a/Documentation/cgroups/cgroup_event_listener.c b/Documentation/cgroups/cgroup_event_listener.c
index 8c2bfc4..3e082f9 100644
--- a/Documentation/cgroups/cgroup_event_listener.c
+++ b/Documentation/cgroups/cgroup_event_listener.c
@@ -91,7 +91,7 @@
 
 		if (ret == -1) {
 			perror("cgroup.event_control "
-					"is not accessable any more");
+					"is not accessible any more");
 			break;
 		}
 
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 190018b..44b8b7a 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -355,13 +355,13 @@
 
 To change the set of subsystems bound to a mounted hierarchy, just
 remount with different options:
-# mount -o remount,cpuset,ns hier1 /dev/cgroup
+# mount -o remount,cpuset,blkio hier1 /dev/cgroup
 
-Now memory is removed from the hierarchy and ns is added.
+Now memory is removed from the hierarchy and blkio is added.
 
-Note this will add ns to the hierarchy but won't remove memory or
+Note this will add blkio to the hierarchy but won't remove memory or
 cpuset, because the new options are appended to the old ones:
-# mount -o remount,ns /dev/cgroup
+# mount -o remount,blkio /dev/cgroup
 
 To Specify a hierarchy's release_agent:
 # mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
index b7eecec..fc8fa97 100644
--- a/Documentation/cgroups/memcg_test.txt
+++ b/Documentation/cgroups/memcg_test.txt
@@ -398,7 +398,7 @@
 	written to move_charge_at_immigrate.
 
  9.10 Memory thresholds
-	Memory controler implements memory thresholds using cgroups notification
+	Memory controller implements memory thresholds using cgroups notification
 	API. You can use Documentation/cgroups/cgroup_event_listener.c to test
 	it.
 
diff --git a/Documentation/coccinelle.txt b/Documentation/coccinelle.txt
index 4a276ea..96b6903 100644
--- a/Documentation/coccinelle.txt
+++ b/Documentation/coccinelle.txt
@@ -36,6 +36,10 @@
 
         sudo make install
 
+The semantic patches in the kernel will work best with Coccinelle version
+0.2.4 or later.  Using earlier versions may incur some parse errors in the
+semantic patch code, but any results that are obtained should still be
+correct.
 
  Using Coccinelle on the Linux kernel
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 524de92..59293ac 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -8,7 +8,7 @@
 
 <cipher>
     Encryption cipher and an optional IV generation mode.
-    (In format cipher-chainmode-ivopts:ivmode).
+    (In format cipher[:keycount]-chainmode-ivopts:ivmode).
     Examples:
        des
        aes-cbc-essiv:sha256
@@ -20,6 +20,11 @@
     Key used for encryption. It is encoded as a hexadecimal number.
     You can only use key sizes that are valid for the selected cipher.
 
+<keycount>
+    Multi-key compatibility mode. You can define <keycount> keys and
+    then sectors are encrypted according to their offsets (sector 0 uses key0;
+    sector 1 uses key1 etc.).  <keycount> must be a power of two.
+
 <iv_offset>
     The IV offset is a sector count that is added to the sector number
     before creating the IV.
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
new file mode 100644
index 0000000..33b6b70
--- /dev/null
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -0,0 +1,70 @@
+Device-mapper RAID (dm-raid) is a bridge from DM to MD.  It
+provides a way to use device-mapper interfaces to access the MD RAID
+drivers.
+
+As with all device-mapper targets, the nominal public interfaces are the
+constructor (CTR) tables and the status outputs (both STATUSTYPE_INFO
+and STATUSTYPE_TABLE).  The CTR table looks like the following:
+
+1: <s> <l> raid \
+2:      <raid_type> <#raid_params> <raid_params> \
+3:      <#raid_devs> <meta_dev1> <dev1> .. <meta_devN> <devN>
+
+Line 1 contains the standard first three arguments to any device-mapper
+target - the start, length, and target type fields.  The target type in
+this case is "raid".
+
+Line 2 contains the arguments that define the particular raid
+type/personality/level, the required arguments for that raid type, and
+any optional arguments.  Possible raid types include: raid4, raid5_la,
+raid5_ls, raid5_rs, raid6_zr, raid6_nr, and raid6_nc.  (raid1 is
+planned for the future.)  The list of required and optional parameters
+is the same for all the current raid types.  The required parameters are
+positional, while the optional parameters are given as key/value pairs.
+The possible parameters are as follows:
+ <chunk_size>           Chunk size in sectors.
+ [[no]sync]             Force/Prevent RAID initialization
+ [rebuild <idx>]        Rebuild the drive indicated by the index
+ [daemon_sleep <ms>]    Time between bitmap daemon work to clear bits
+ [min_recovery_rate <kB/sec/disk>]      Throttle RAID initialization
+ [max_recovery_rate <kB/sec/disk>]      Throttle RAID initialization
+ [max_write_behind <sectors>]           See '-write-behind=' (man mdadm)
+ [stripe_cache <sectors>]               Stripe cache size for higher RAIDs
+
+Line 3 contains the list of devices that compose the array in
+metadata/data device pairs.  If the metadata is stored separately, a '-'
+is given for the metadata device position.  If a drive has failed or is
+missing at creation time, a '-' can be given for both the metadata and
+data drives for a given position.
+
+NB. Currently all metadata devices must be specified as '-'.
+
+Examples:
+# RAID4 - 4 data drives, 1 parity
+# No metadata devices specified to hold superblock/bitmap info
+# Chunk size of 1MiB
+# (Lines separated for easy reading)
+0 1960893648 raid \
+        raid4 1 2048 \
+        5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
+
+# RAID4 - 4 data drives, 1 parity (no metadata devices)
+# Chunk size of 1MiB, force RAID initialization,
+#       min recovery rate at 20 kiB/sec/disk
+0 1960893648 raid \
+        raid4 4 2048 min_recovery_rate 20 sync\
+        5 - 8:17 - 8:33 - 8:49 - 8:65 - 8:81
+
+Performing a 'dmsetup table' should display the CTR table used to
+construct the mapping (with possible reordering of optional
+parameters).
+
+Performing a 'dmsetup status' will yield information on the state and
+health of the array.  The output is as follows:
+1: <s> <l> raid \
+2:      <raid_type> <#devices> <1 health char for each dev> <resync_ratio>
+
+Line 1 is standard DM output.  Line 2 is best shown by example:
+        0 1960893648 raid raid4 5 AAAAA 2/490221568
+Here we can see the RAID type is raid4, there are 5 devices - all of
+which are 'A'live, and the array is 2/490221568 complete with recovery.
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 945ff3f..a0b58e2 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -104,6 +104,13 @@
 As an added bonus you can customise the message creation toolbar menu
 and put the "insert file" icon there.
 
+Make the the composer window wide enough so that no lines wrap. As of
+KMail 1.13.5 (KDE 4.5.4), KMail will apply word wrapping when sending
+the email if the lines wrap in the composer window. Having word wrapping
+disabled in the Options menu isn't enough. Thus, if your patch has very
+long lines, you must make the composer window very wide before sending
+the email. See: https://bugs.kde.org/show_bug.cgi?id=174034
+
 You can safely GPG sign attachments, but inlined text is preferred for
 patches so do not GPG sign them.  Signing patches that have been inserted
 as inlined text will make them tricky to extract from their 7-bit encoding.
@@ -179,26 +186,8 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Thunderbird (GUI)
 
-By default, thunderbird likes to mangle text, but there are ways to
-coerce it into being nice.
-
-- Under account settings, composition and addressing, uncheck "Compose
-  messages in HTML format".
-
-- Edit your Thunderbird config settings to tell it not to wrap lines:
-      user_pref("mailnews.wraplength", 0);
-
-- Edit your Thunderbird config settings so that it won't use format=flowed:
-      user_pref("mailnews.send_plaintext_flowed", false);
-
-- You need to get Thunderbird into preformat mode:
-. If you compose HTML messages by default, it's not too hard. Just select
-  "Preformat" from the drop-down box just under the subject line.
-. If you compose in text by default, you have to tell it to compose a new
-  message in HTML (just as a one-off), and then force it from there back to
-  text, else it will wrap lines. To do this, use shift-click on the Write
-  icon to compose to get HTML compose mode, then select "Preformat" from
-  the drop-down box just under the subject line.
+Thunderbird is an Outlook clone that likes to mangle text, but there are ways
+to coerce it into behaving.
 
 - Allows use of an external editor:
   The easiest thing to do with Thunderbird and patches is to use an
@@ -208,6 +197,27 @@
   View->Toolbars->Customize... and finally just click on it when in the
   Compose dialog.
 
+To beat some sense out of the internal editor, do this:
+
+- Under account settings, composition and addressing, uncheck "Compose
+  messages in HTML format".
+
+- Edit your Thunderbird config settings so that it won't use format=flowed.
+  Go to "edit->preferences->advanced->config editor" to bring up the
+  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
+  "false".
+
+- Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML
+  composer, select "Preformat" from the drop-down box just under the subject
+  line, then close the message without saving.  (This setting also applies to
+  the text composer, but the only control for it is in the HTML composer.)
+
+- Install the "toggle wordwrap" extension.  Download the file from:
+    https://addons.mozilla.org/thunderbird/addon/2351/
+  Then go to "tools->add ons", select "install" at the bottom of the screen,
+  and browse to where you saved the .xul file.  This adds an "Enable
+  Wordwrap" entry under the Options menu of the message composer.
+
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
 
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index f2742e1..8c594c4 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -193,6 +193,20 @@
 
 ---------------------------
 
+What:	CS5535/CS5536 obsolete GPIO driver
+When:	June 2011
+Files:	drivers/staging/cs5535_gpio/*
+Check:	drivers/staging/cs5535_gpio/cs5535_gpio.c
+Why:	A newer driver replaces this; it is drivers/gpio/cs5535-gpio.c, and
+	integrates with the Linux GPIO subsystem.  The old driver has been
+	moved to staging, and will be removed altogether around 2.6.40.
+	Please test the new driver, and ensure that the functionality you
+	need and any bugfixes from the old driver are available in the new
+	one.
+Who:	Andres Salomon <dilinger@queued.net>
+
+--------------------------
+
 What:	remove EXPORT_SYMBOL(kernel_thread)
 When:	August 2006
 Files:	arch/*/kernel/*_ksyms.c
@@ -234,6 +248,17 @@
 
 ---------------------------
 
+What:	CONFIG_ACPI_PROCFS_POWER
+When:	2.6.39
+Why:	sysfs I/F for ACPI power devices, including AC and Battery,
+        has been working in upstream kenrel since 2.6.24, Sep 2007.
+	In 2.6.37, we make the sysfs I/F always built in and this option
+	disabled by default.
+	Remove this option and the ACPI power procfs interface in 2.6.39.
+Who:	Zhang Rui <rui.zhang@intel.com>
+
+---------------------------
+
 What:	/proc/acpi/button
 When:	August 2007
 Why:	/proc/acpi/button has been replaced by events to the input layer
@@ -566,3 +591,23 @@
 Who:	Jean Delvare <khali@linux-fr.org>
 
 ----------------------------
+
+What:	cancel_rearming_delayed_work[queue]()
+When:	2.6.39
+
+Why:	The functions have been superceded by cancel_delayed_work_sync()
+	quite some time ago.  The conversion is trivial and there is no
+	in-kernel user left.
+Who:	Tejun Heo <tj@kernel.org>
+
+----------------------------
+
+What:	Legacy, non-standard chassis intrusion detection interface.
+When:	June 2011
+Why:	The adm9240, w83792d and w83793 hardware monitoring drivers have
+	legacy interfaces for chassis intrusion detection. A standard
+	interface has been added to each driver, so the legacy interface
+	can be removed.
+Who:	Jean Delvare <khali@linux-fr.org>
+
+----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 33fa3e5..ef9349a 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -9,22 +9,25 @@
 
 --------------------------- dentry_operations --------------------------
 prototypes:
-	int (*d_revalidate)(struct dentry *, int);
-	int (*d_hash) (struct dentry *, struct qstr *);
-	int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
+	int (*d_revalidate)(struct dentry *, struct nameidata *);
+	int (*d_hash)(const struct dentry *, const struct inode *,
+			struct qstr *);
+	int (*d_compare)(const struct dentry *, const struct inode *,
+			const struct dentry *, const struct inode *,
+			unsigned int, const char *, const struct qstr *);
 	int (*d_delete)(struct dentry *);
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
 
 locking rules:
-		dcache_lock	rename_lock	->d_lock	may block
-d_revalidate:	no		no		no		yes
-d_hash		no		no		no		yes
-d_compare:	no		yes		no		no 
-d_delete:	yes		no		yes		no
-d_release:	no		no		no		yes
-d_iput:		no		no		no		yes
+		rename_lock	->d_lock	may block	rcu-walk
+d_revalidate:	no		no		yes (ref-walk)	maybe
+d_hash		no		no		no		maybe
+d_compare:	yes		no		no		maybe
+d_delete:	no		yes		no		no
+d_release:	no		no		yes		no
+d_iput:		no		no		yes		no
 d_dname:	no		no		no		no
 
 --------------------------- inode_operations --------------------------- 
@@ -44,8 +47,8 @@
 	void * (*follow_link) (struct dentry *, struct nameidata *);
 	void (*put_link) (struct dentry *, struct nameidata *, void *);
 	void (*truncate) (struct inode *);
-	int (*permission) (struct inode *, int, struct nameidata *);
-	int (*check_acl)(struct inode *, int);
+	int (*permission) (struct inode *, int, unsigned int);
+	int (*check_acl)(struct inode *, int, unsigned int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -73,7 +76,7 @@
 put_link:	no
 truncate:	yes		(see below)
 setattr:	yes
-permission:	no
+permission:	no (may not block if called in rcu-walk mode)
 check_acl:	no
 getattr:	no
 setxattr:	yes
@@ -340,7 +343,6 @@
 	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *); /* break_lease callback */
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
 	int (*fl_change)(struct file_lock **, int);
 
 locking rules:
@@ -350,7 +352,6 @@
 fl_grant:		no		no
 fl_release_private:	maybe		no
 fl_break:		yes		no
-fl_mylease:		yes		no
 fl_change		yes		no
 
 --------------------------- buffer_head -----------------------------------
diff --git a/Documentation/filesystems/dentry-locking.txt b/Documentation/filesystems/dentry-locking.txt
deleted file mode 100644
index 79334ed..0000000
--- a/Documentation/filesystems/dentry-locking.txt
+++ /dev/null
@@ -1,174 +0,0 @@
-RCU-based dcache locking model
-==============================
-
-On many workloads, the most common operation on dcache is to look up a
-dentry, given a parent dentry and the name of the child. Typically,
-for every open(), stat() etc., the dentry corresponding to the
-pathname will be looked up by walking the tree starting with the first
-component of the pathname and using that dentry along with the next
-component to look up the next level and so on. Since it is a frequent
-operation for workloads like multiuser environments and web servers,
-it is important to optimize this path.
-
-Prior to 2.5.10, dcache_lock was acquired in d_lookup and thus in
-every component during path look-up. Since 2.5.10 onwards, fast-walk
-algorithm changed this by holding the dcache_lock at the beginning and
-walking as many cached path component dentries as possible. This
-significantly decreases the number of acquisition of
-dcache_lock. However it also increases the lock hold time
-significantly and affects performance in large SMP machines. Since
-2.5.62 kernel, dcache has been using a new locking model that uses RCU
-to make dcache look-up lock-free.
-
-The current dcache locking model is not very different from the
-existing dcache locking model. Prior to 2.5.62 kernel, dcache_lock
-protected the hash chain, d_child, d_alias, d_lru lists as well as
-d_inode and several other things like mount look-up. RCU-based changes
-affect only the way the hash chain is protected. For everything else
-the dcache_lock must be taken for both traversing as well as
-updating. The hash chain updates too take the dcache_lock.  The
-significant change is the way d_lookup traverses the hash chain, it
-doesn't acquire the dcache_lock for this and rely on RCU to ensure
-that the dentry has not been *freed*.
-
-
-Dcache locking details
-======================
-
-For many multi-user workloads, open() and stat() on files are very
-frequently occurring operations. Both involve walking of path names to
-find the dentry corresponding to the concerned file. In 2.4 kernel,
-dcache_lock was held during look-up of each path component. Contention
-and cache-line bouncing of this global lock caused significant
-scalability problems. With the introduction of RCU in Linux kernel,
-this was worked around by making the look-up of path components during
-path walking lock-free.
-
-
-Safe lock-free look-up of dcache hash table
-===========================================
-
-Dcache is a complex data structure with the hash table entries also
-linked together in other lists. In 2.4 kernel, dcache_lock protected
-all the lists. We applied RCU only on hash chain walking. The rest of
-the lists are still protected by dcache_lock.  Some of the important
-changes are :
-
-1. The deletion from hash chain is done using hlist_del_rcu() macro
-   which doesn't initialize next pointer of the deleted dentry and
-   this allows us to walk safely lock-free while a deletion is
-   happening.
-
-2. Insertion of a dentry into the hash table is done using
-   hlist_add_head_rcu() which take care of ordering the writes - the
-   writes to the dentry must be visible before the dentry is
-   inserted. This works in conjunction with hlist_for_each_rcu(),
-   which has since been replaced by hlist_for_each_entry_rcu(), while
-   walking the hash chain. The only requirement is that all
-   initialization to the dentry must be done before
-   hlist_add_head_rcu() since we don't have dcache_lock protection
-   while traversing the hash chain. This isn't different from the
-   existing code.
-
-3. The dentry looked up without holding dcache_lock by cannot be
-   returned for walking if it is unhashed. It then may have a NULL
-   d_inode or other bogosity since RCU doesn't protect the other
-   fields in the dentry. We therefore use a flag DCACHE_UNHASHED to
-   indicate unhashed dentries and use this in conjunction with a
-   per-dentry lock (d_lock). Once looked up without the dcache_lock,
-   we acquire the per-dentry lock (d_lock) and check if the dentry is
-   unhashed. If so, the look-up is failed. If not, the reference count
-   of the dentry is increased and the dentry is returned.
-
-4. Once a dentry is looked up, it must be ensured during the path walk
-   for that component it doesn't go away. In pre-2.5.10 code, this was
-   done holding a reference to the dentry. dcache_rcu does the same.
-   In some sense, dcache_rcu path walking looks like the pre-2.5.10
-   version.
-
-5. All dentry hash chain updates must take the dcache_lock as well as
-   the per-dentry lock in that order. dput() does this to ensure that
-   a dentry that has just been looked up in another CPU doesn't get
-   deleted before dget() can be done on it.
-
-6. There are several ways to do reference counting of RCU protected
-   objects. One such example is in ipv4 route cache where deferred
-   freeing (using call_rcu()) is done as soon as the reference count
-   goes to zero. This cannot be done in the case of dentries because
-   tearing down of dentries require blocking (dentry_iput()) which
-   isn't supported from RCU callbacks. Instead, tearing down of
-   dentries happen synchronously in dput(), but actual freeing happens
-   later when RCU grace period is over. This allows safe lock-free
-   walking of the hash chains, but a matched dentry may have been
-   partially torn down. The checking of DCACHE_UNHASHED flag with
-   d_lock held detects such dentries and prevents them from being
-   returned from look-up.
-
-
-Maintaining POSIX rename semantics
-==================================
-
-Since look-up of dentries is lock-free, it can race against a
-concurrent rename operation. For example, during rename of file A to
-B, look-up of either A or B must succeed.  So, if look-up of B happens
-after A has been removed from the hash chain but not added to the new
-hash chain, it may fail.  Also, a comparison while the name is being
-written concurrently by a rename may result in false positive matches
-violating rename semantics.  Issues related to race with rename are
-handled as described below :
-
-1. Look-up can be done in two ways - d_lookup() which is safe from
-   simultaneous renames and __d_lookup() which is not.  If
-   __d_lookup() fails, it must be followed up by a d_lookup() to
-   correctly determine whether a dentry is in the hash table or
-   not. d_lookup() protects look-ups using a sequence lock
-   (rename_lock).
-
-2. The name associated with a dentry (d_name) may be changed if a
-   rename is allowed to happen simultaneously. To avoid memcmp() in
-   __d_lookup() go out of bounds due to a rename and false positive
-   comparison, the name comparison is done while holding the
-   per-dentry lock. This prevents concurrent renames during this
-   operation.
-
-3. Hash table walking during look-up may move to a different bucket as
-   the current dentry is moved to a different bucket due to rename.
-   But we use hlists in dcache hash table and they are
-   null-terminated.  So, even if a dentry moves to a different bucket,
-   hash chain walk will terminate. [with a list_head list, it may not
-   since termination is when the list_head in the original bucket is
-   reached].  Since we redo the d_parent check and compare name while
-   holding d_lock, lock-free look-up will not race against d_move().
-
-4. There can be a theoretical race when a dentry keeps coming back to
-   original bucket due to double moves. Due to this look-up may
-   consider that it has never moved and can end up in a infinite loop.
-   But this is not any worse that theoretical livelocks we already
-   have in the kernel.
-
-
-Important guidelines for filesystem developers related to dcache_rcu
-====================================================================
-
-1. Existing dcache interfaces (pre-2.5.62) exported to filesystem
-   don't change. Only dcache internal implementation changes. However
-   filesystems *must not* delete from the dentry hash chains directly
-   using the list macros like allowed earlier. They must use dcache
-   APIs like d_drop() or __d_drop() depending on the situation.
-
-2. d_flags is now protected by a per-dentry lock (d_lock). All access
-   to d_flags must be protected by it.
-
-3. For a hashed dentry, checking of d_count needs to be protected by
-   d_lock.
-
-
-Papers and other documentation on dcache locking
-================================================
-
-1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
-
-2. http://lse.sourceforge.net/locking/dcache/dcache.html
-
-
-
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index ac2a261..6ef8cf3 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -457,6 +457,9 @@
 
 Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
 
+2.1.30:
+	- Fix writev() (it kept writing the first segment over and over again
+	  instead of moving onto subsequent segments).
 2.1.29:
 	- Fix a deadlock when mounting read-write.
 2.1.28:
diff --git a/Documentation/filesystems/path-lookup.txt b/Documentation/filesystems/path-lookup.txt
new file mode 100644
index 0000000..eb59c8b
--- /dev/null
+++ b/Documentation/filesystems/path-lookup.txt
@@ -0,0 +1,382 @@
+Path walking and name lookup locking
+====================================
+
+Path resolution is the finding a dentry corresponding to a path name string, by
+performing a path walk. Typically, for every open(), stat() etc., the path name
+will be resolved. Paths are resolved by walking the namespace tree, starting
+with the first component of the pathname (eg. root or cwd) with a known dentry,
+then finding the child of that dentry, which is named the next component in the
+path string. Then repeating the lookup from the child dentry and finding its
+child with the next element, and so on.
+
+Since it is a frequent operation for workloads like multiuser environments and
+web servers, it is important to optimize this code.
+
+Path walking synchronisation history:
+Prior to 2.5.10, dcache_lock was acquired in d_lookup (dcache hash lookup) and
+thus in every component during path look-up. Since 2.5.10 onwards, fast-walk
+algorithm changed this by holding the dcache_lock at the beginning and walking
+as many cached path component dentries as possible. This significantly
+decreases the number of acquisition of dcache_lock. However it also increases
+the lock hold time significantly and affects performance in large SMP machines.
+Since 2.5.62 kernel, dcache has been using a new locking model that uses RCU to
+make dcache look-up lock-free.
+
+All the above algorithms required taking a lock and reference count on the
+dentry that was looked up, so that may be used as the basis for walking the
+next path element. This is inefficient and unscalable. It is inefficient
+because of the locks and atomic operations required for every dentry element
+slows things down. It is not scalable because many parallel applications that
+are path-walk intensive tend to do path lookups starting from a common dentry
+(usually, the root "/" or current working directory). So contention on these
+common path elements causes lock and cacheline queueing.
+
+Since 2.6.38, RCU is used to make a significant part of the entire path walk
+(including dcache look-up) completely "store-free" (so, no locks, atomics, or
+even stores into cachelines of common dentries). This is known as "rcu-walk"
+path walking.
+
+Path walking overview
+=====================
+
+A name string specifies a start (root directory, cwd, fd-relative) and a
+sequence of elements (directory entry names), which together refer to a path in
+the namespace. A path is represented as a (dentry, vfsmount) tuple. The name
+elements are sub-strings, seperated by '/'.
+
+Name lookups will want to find a particular path that a name string refers to
+(usually the final element, or parent of final element). This is done by taking
+the path given by the name's starting point (which we know in advance -- eg.
+current->fs->cwd or current->fs->root) as the first parent of the lookup. Then
+iteratively for each subsequent name element, look up the child of the current
+parent with the given name and if it is not the desired entry, make it the
+parent for the next lookup.
+
+A parent, of course, must be a directory, and we must have appropriate
+permissions on the parent inode to be able to walk into it.
+
+Turning the child into a parent for the next lookup requires more checks and
+procedures. Symlinks essentially substitute the symlink name for the target
+name in the name string, and require some recursive path walking.  Mount points
+must be followed into (thus changing the vfsmount that subsequent path elements
+refer to), switching from the mount point path to the root of the particular
+mounted vfsmount. These behaviours are variously modified depending on the
+exact path walking flags.
+
+Path walking then must, broadly, do several particular things:
+- find the start point of the walk;
+- perform permissions and validity checks on inodes;
+- perform dcache hash name lookups on (parent, name element) tuples;
+- traverse mount points;
+- traverse symlinks;
+- lookup and create missing parts of the path on demand.
+
+Safe store-free look-up of dcache hash table
+============================================
+
+Dcache name lookup
+------------------
+In order to lookup a dcache (parent, name) tuple, we take a hash on the tuple
+and use that to select a bucket in the dcache-hash table. The list of entries
+in that bucket is then walked, and we do a full comparison of each entry
+against our (parent, name) tuple.
+
+The hash lists are RCU protected, so list walking is not serialised with
+concurrent updates (insertion, deletion from the hash). This is a standard RCU
+list application with the exception of renames, which will be covered below.
+
+Parent and name members of a dentry, as well as its membership in the dcache
+hash, and its inode are protected by the per-dentry d_lock spinlock. A
+reference is taken on the dentry (while the fields are verified under d_lock),
+and this stabilises its d_inode pointer and actual inode. This gives a stable
+point to perform the next step of our path walk against.
+
+These members are also protected by d_seq seqlock, although this offers
+read-only protection and no durability of results, so care must be taken when
+using d_seq for synchronisation (see seqcount based lookups, below).
+
+Renames
+-------
+Back to the rename case. In usual RCU protected lists, the only operations that
+will happen to an object is insertion, and then eventually removal from the
+list. The object will not be reused until an RCU grace period is complete.
+This ensures the RCU list traversal primitives can run over the object without
+problems (see RCU documentation for how this works).
+
+However when a dentry is renamed, its hash value can change, requiring it to be
+moved to a new hash list. Allocating and inserting a new alias would be
+expensive and also problematic for directory dentries. Latency would be far to
+high to wait for a grace period after removing the dentry and before inserting
+it in the new hash bucket. So what is done is to insert the dentry into the
+new list immediately.
+
+However, when the dentry's list pointers are updated to point to objects in the
+new list before waiting for a grace period, this can result in a concurrent RCU
+lookup of the old list veering off into the new (incorrect) list and missing
+the remaining dentries on the list.
+
+There is no fundamental problem with walking down the wrong list, because the
+dentry comparisons will never match. However it is fatal to miss a matching
+dentry. So a seqlock is used to detect when a rename has occurred, and so the
+lookup can be retried.
+
+         1      2      3
+        +---+  +---+  +---+
+hlist-->| N-+->| N-+->| N-+->
+head <--+-P |<-+-P |<-+-P |
+        +---+  +---+  +---+
+
+Rename of dentry 2 may require it deleted from the above list, and inserted
+into a new list. Deleting 2 gives the following list.
+
+         1             3
+        +---+         +---+     (don't worry, the longer pointers do not
+hlist-->| N-+-------->| N-+->    impose a measurable performance overhead
+head <--+-P |<--------+-P |      on modern CPUs)
+        +---+         +---+
+          ^      2      ^
+          |    +---+    |
+          |    | N-+----+
+          +----+-P |
+               +---+
+
+This is a standard RCU-list deletion, which leaves the deleted object's
+pointers intact, so a concurrent list walker that is currently looking at
+object 2 will correctly continue to object 3 when it is time to traverse the
+next object.
+
+However, when inserting object 2 onto a new list, we end up with this:
+
+         1             3
+        +---+         +---+
+hlist-->| N-+-------->| N-+->
+head <--+-P |<--------+-P |
+        +---+         +---+
+                 2
+               +---+
+               | N-+---->
+          <----+-P |
+               +---+
+
+Because we didn't wait for a grace period, there may be a concurrent lookup
+still at 2. Now when it follows 2's 'next' pointer, it will walk off into
+another list without ever having checked object 3.
+
+A related, but distinctly different, issue is that of rename atomicity versus
+lookup operations. If a file is renamed from 'A' to 'B', a lookup must only
+find either 'A' or 'B'. So if a lookup of 'A' returns NULL, a subsequent lookup
+of 'B' must succeed (note the reverse is not true).
+
+Between deleting the dentry from the old hash list, and inserting it on the new
+hash list, a lookup may find neither 'A' nor 'B' matching the dentry. The same
+rename seqlock is also used to cover this race in much the same way, by
+retrying a negative lookup result if a rename was in progress.
+
+Seqcount based lookups
+----------------------
+In refcount based dcache lookups, d_lock is used to serialise access to
+the dentry, stabilising it while comparing its name and parent and then
+taking a reference count (the reference count then gives a stable place to
+start the next part of the path walk from).
+
+As explained above, we would like to do path walking without taking locks or
+reference counts on intermediate dentries along the path. To do this, a per
+dentry seqlock (d_seq) is used to take a "coherent snapshot" of what the dentry
+looks like (its name, parent, and inode). That snapshot is then used to start
+the next part of the path walk. When loading the coherent snapshot under d_seq,
+care must be taken to load the members up-front, and use those pointers rather
+than reloading from the dentry later on (otherwise we'd have interesting things
+like d_inode going NULL underneath us, if the name was unlinked).
+
+Also important is to avoid performing any destructive operations (pretty much:
+no non-atomic stores to shared data), and to recheck the seqcount when we are
+"done" with the operation. Retry or abort if the seqcount does not match.
+Avoiding destructive or changing operations means we can easily unwind from
+failure.
+
+What this means is that a caller, provided they are holding RCU lock to
+protect the dentry object from disappearing, can perform a seqcount based
+lookup which does not increment the refcount on the dentry or write to
+it in any way. This returned dentry can be used for subsequent operations,
+provided that d_seq is rechecked after that operation is complete.
+
+Inodes are also rcu freed, so the seqcount lookup dentry's inode may also be
+queried for permissions.
+
+With this two parts of the puzzle, we can do path lookups without taking
+locks or refcounts on dentry elements.
+
+RCU-walk path walking design
+============================
+
+Path walking code now has two distinct modes, ref-walk and rcu-walk. ref-walk
+is the traditional[*] way of performing dcache lookups using d_lock to
+serialise concurrent modifications to the dentry and take a reference count on
+it. ref-walk is simple and obvious, and may sleep, take locks, etc while path
+walking is operating on each dentry. rcu-walk uses seqcount based dentry
+lookups, and can perform lookup of intermediate elements without any stores to
+shared data in the dentry or inode. rcu-walk can not be applied to all cases,
+eg. if the filesystem must sleep or perform non trivial operations, rcu-walk
+must be switched to ref-walk mode.
+
+[*] RCU is still used for the dentry hash lookup in ref-walk, but not the full
+    path walk.
+
+Where ref-walk uses a stable, refcounted ``parent'' to walk the remaining
+path string, rcu-walk uses a d_seq protected snapshot. When looking up a
+child of this parent snapshot, we open d_seq critical section on the child
+before closing d_seq critical section on the parent. This gives an interlocking
+ladder of snapshots to walk down.
+
+
+     proc 101
+      /----------------\
+     / comm:    "vi"    \
+    /  fs.root: dentry0  \
+    \  fs.cwd:  dentry2  /
+     \                  /
+      \----------------/
+
+So when vi wants to open("/home/npiggin/test.c", O_RDWR), then it will
+start from current->fs->root, which is a pinned dentry. Alternatively,
+"./test.c" would start from cwd; both names refer to the same path in
+the context of proc101.
+
+     dentry 0
+    +---------------------+   rcu-walk begins here, we note d_seq, check the
+    | name:    "/"        |   inode's permission, and then look up the next
+    | inode:   10         |   path element which is "home"...
+    | children:"home", ...|
+    +---------------------+
+              |
+     dentry 1 V
+    +---------------------+   ... which brings us here. We find dentry1 via
+    | name:    "home"     |   hash lookup, then note d_seq and compare name
+    | inode:   678        |   string and parent pointer. When we have a match,
+    | children:"npiggin"  |   we now recheck the d_seq of dentry0. Then we
+    +---------------------+   check inode and look up the next element.
+              |
+     dentry2  V
+    +---------------------+   Note: if dentry0 is now modified, lookup is
+    | name:    "npiggin"  |   not necessarily invalid, so we need only keep a
+    | inode:   543        |   parent for d_seq verification, and grandparents
+    | children:"a.c", ... |   can be forgotten.
+    +---------------------+
+              |
+     dentry3  V
+    +---------------------+   At this point we have our destination dentry.
+    | name:    "a.c"      |   We now take its d_lock, verify d_seq of this
+    | inode:   14221      |   dentry. If that checks out, we can increment
+    | children:NULL       |   its refcount because we're holding d_lock.
+    +---------------------+
+
+Taking a refcount on a dentry from rcu-walk mode, by taking its d_lock,
+re-checking its d_seq, and then incrementing its refcount is called
+"dropping rcu" or dropping from rcu-walk into ref-walk mode.
+
+It is, in some sense, a bit of a house of cards. If the seqcount check of the
+parent snapshot fails, the house comes down, because we had closed the d_seq
+section on the grandparent, so we have nothing left to stand on. In that case,
+the path walk must be fully restarted (which we do in ref-walk mode, to avoid
+live locks). It is costly to have a full restart, but fortunately they are
+quite rare.
+
+When we reach a point where sleeping is required, or a filesystem callout
+requires ref-walk, then instead of restarting the walk, we attempt to drop rcu
+at the last known good dentry we have. Avoiding a full restart in ref-walk in
+these cases is fundamental for performance and scalability because blocking
+operations such as creates and unlinks are not uncommon.
+
+The detailed design for rcu-walk is like this:
+* LOOKUP_RCU is set in nd->flags, which distinguishes rcu-walk from ref-walk.
+* Take the RCU lock for the entire path walk, starting with the acquiring
+  of the starting path (eg. root/cwd/fd-path). So now dentry refcounts are
+  not required for dentry persistence.
+* synchronize_rcu is called when unregistering a filesystem, so we can
+  access d_ops and i_ops during rcu-walk.
+* Similarly take the vfsmount lock for the entire path walk. So now mnt
+  refcounts are not required for persistence. Also we are free to perform mount
+  lookups, and to assume dentry mount points and mount roots are stable up and
+  down the path.
+* Have a per-dentry seqlock to protect the dentry name, parent, and inode,
+  so we can load this tuple atomically, and also check whether any of its
+  members have changed.
+* Dentry lookups (based on parent, candidate string tuple) recheck the parent
+  sequence after the child is found in case anything changed in the parent
+  during the path walk.
+* inode is also RCU protected so we can load d_inode and use the inode for
+  limited things.
+* i_mode, i_uid, i_gid can be tested for exec permissions during path walk.
+* i_op can be loaded.
+* When the destination dentry is reached, drop rcu there (ie. take d_lock,
+  verify d_seq, increment refcount).
+* If seqlock verification fails anywhere along the path, do a full restart
+  of the path lookup in ref-walk mode. -ECHILD tends to be used (for want of
+  a better errno) to signal an rcu-walk failure.
+
+The cases where rcu-walk cannot continue are:
+* NULL dentry (ie. any uncached path element)
+* Following links
+
+It may be possible eventually to make following links rcu-walk aware.
+
+Uncached path elements will always require dropping to ref-walk mode, at the
+very least because i_mutex needs to be grabbed, and objects allocated.
+
+Final note:
+"store-free" path walking is not strictly store free. We take vfsmount lock
+and refcounts (both of which can be made per-cpu), and we also store to the
+stack (which is essentially CPU-local), and we also have to take locks and
+refcount on final dentry.
+
+The point is that shared data, where practically possible, is not locked
+or stored into. The result is massive improvements in performance and
+scalability of path resolution.
+
+
+Interesting statistics
+======================
+
+The following table gives rcu lookup statistics for a few simple workloads
+(2s12c24t Westmere, debian non-graphical system). Ungraceful are attempts to
+drop rcu that fail due to d_seq failure and requiring the entire path lookup
+again. Other cases are successful rcu-drops that are required before the final
+element, nodentry for missing dentry, revalidate for filesystem revalidate
+routine requiring rcu drop, permission for permission check requiring drop,
+and link for symlink traversal requiring drop.
+
+     rcu-lookups     restart  nodentry          link  revalidate  permission
+bootup     47121           0      4624          1010       10283        7852
+dbench  25386793           0   6778659(26.7%)     55         549        1156
+kbuild   2696672          10     64442(2.3%)  108764(4.0%)     1        1590
+git diff   39605           0        28             2           0         106
+vfstest 24185492        4945    708725(2.9%) 1076136(4.4%)     0        2651
+
+What this shows is that failed rcu-walk lookups, ie. ones that are restarted
+entirely with ref-walk, are quite rare. Even the "vfstest" case which
+specifically has concurrent renames/mkdir/rmdir/ creat/unlink/etc to excercise
+such races is not showing a huge amount of restarts.
+
+Dropping from rcu-walk to ref-walk mean that we have encountered a dentry where
+the reference count needs to be taken for some reason. This is either because
+we have reached the target of the path walk, or because we have encountered a
+condition that can't be resolved in rcu-walk mode.  Ideally, we drop rcu-walk
+only when we have reached the target dentry, so the other statistics show where
+this does not happen.
+
+Note that a graceful drop from rcu-walk mode due to something such as the
+dentry not existing (which can be common) is not necessarily a failure of
+rcu-walk scheme, because some elements of the path may have been walked in
+rcu-walk mode. The further we get from common path elements (such as cwd or
+root), the less contended the dentry is likely to be. The closer we are to
+common path elements, the more likely they will exist in dentry cache.
+
+
+Papers and other documentation on dcache locking
+================================================
+
+1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
+
+2. http://lse.sourceforge.net/locking/dcache/dcache.html
+
+
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index b12c895..dfbcd1b 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -216,7 +216,6 @@
 ->d_parent changes are not protected by BKL anymore.  Read access is safe
 if at least one of the following is true:
 	* filesystem has no cross-directory rename()
-	* dcache_lock is held
 	* we know that parent had been locked (e.g. we are looking at
 ->d_parent of ->lookup() argument).
 	* we are called from ->rename().
@@ -318,3 +317,80 @@
 may happen while the inode is in the middle of ->write_inode(); e.g. if you blindly
 free the on-disk inode, you may end up doing that while ->write_inode() is writing
 to it.
+
+---
+[mandatory]
+
+	.d_delete() now only advises the dcache as to whether or not to cache
+unreferenced dentries, and is now only called when the dentry refcount goes to
+0. Even on 0 refcount transition, it must be able to tolerate being called 0,
+1, or more times (eg. constant, idempotent).
+
+---
+[mandatory]
+
+	.d_compare() calling convention and locking rules are significantly
+changed. Read updated documentation in Documentation/filesystems/vfs.txt (and
+look at examples of other filesystems) for guidance.
+
+---
+[mandatory]
+
+	.d_hash() calling convention and locking rules are significantly
+changed. Read updated documentation in Documentation/filesystems/vfs.txt (and
+look at examples of other filesystems) for guidance.
+
+---
+[mandatory]
+	dcache_lock is gone, replaced by fine grained locks. See fs/dcache.c
+for details of what locks to replace dcache_lock with in order to protect
+particular things. Most of the time, a filesystem only needs ->d_lock, which
+protects *all* the dcache state of a given dentry.
+
+--
+[mandatory]
+
+	Filesystems must RCU-free their inodes, if they can have been accessed
+via rcu-walk path walk (basically, if the file can have had a path name in the
+vfs namespace).
+
+	i_dentry and i_rcu share storage in a union, and the vfs expects
+i_dentry to be reinitialized before it is freed, so an:
+
+  INIT_LIST_HEAD(&inode->i_dentry);
+
+must be done in the RCU callback.
+
+--
+[recommended]
+	vfs now tries to do path walking in "rcu-walk mode", which avoids
+atomic operations and scalability hazards on dentries and inodes (see
+Documentation/filesystems/path-lookup.txt). d_hash and d_compare changes
+(above) are examples of the changes required to support this. For more complex
+filesystem callbacks, the vfs drops out of rcu-walk mode before the fs call, so
+no changes are required to the filesystem. However, this is costly and loses
+the benefits of rcu-walk mode. We will begin to add filesystem callbacks that
+are rcu-walk aware, shown below. Filesystems should take advantage of this
+where possible.
+
+--
+[mandatory]
+	d_revalidate is a callback that is made on every path element (if
+the filesystem provides it), which requires dropping out of rcu-walk mode. This
+may now be called in rcu-walk mode (nd->flags & LOOKUP_RCU). -ECHILD should be
+returned if the filesystem cannot handle rcu-walk. See
+Documentation/filesystems/vfs.txt for more details.
+
+	permission and check_acl are inode permission checks that are called
+on many or all directory inodes on the way down a path walk (to check for
+exec permission). These must now be rcu-walk aware (flags & IPERM_FLAG_RCU).
+See Documentation/filesystems/vfs.txt for more details.
+ 
+--
+[mandatory]
+	In ->fallocate() you must check the mode option passed in.  If your
+filesystem does not support hole punching (deallocating space in the middle of a
+file) you must return -EOPNOTSUPP if FALLOC_FL_PUNCH_HOLE is set in mode.
+Currently you can only have FALLOC_FL_PUNCH_HOLE with FALLOC_FL_KEEP_SIZE set,
+so the i_size should not change when hole punching, even when puching the end of
+a file off.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index e73df27..23cae65 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -375,6 +375,7 @@
 Swap:                  0 kB
 KernelPageSize:        4 kB
 MMUPageSize:           4 kB
+Locked:              374 kB
 
 The first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
@@ -670,6 +671,8 @@
 
 > cat /proc/meminfo
 
+The "Locked" indicates whether the mapping is locked in memory or not.
+
 
 MemTotal:     16344972 kB
 MemFree:      13634064 kB
@@ -1181,6 +1184,30 @@
  mb_groups       details of multiblock allocator buddy cache of free blocks
 ..............................................................................
 
+2.0 /proc/consoles
+------------------
+Shows registered system console lines.
+
+To see which character device lines are currently used for the system console
+/dev/console, you may simply look into the file /proc/consoles:
+
+  > cat /proc/consoles
+  tty0                 -WU (ECp)       4:7
+  ttyS0                -W- (Ep)        4:64
+
+The columns are:
+
+  device               name of the device
+  operations           R = can do read operations
+                       W = can do write operations
+                       U = can do unblank
+  flags                E = it is enabled
+                       C = it is prefered console
+                       B = it is primary boot console
+                       p = it is used for printk buffer
+                       b = it is not a TTY but a Braille device
+                       a = it is safe to use when cpu is offline
+  major:minor          major and minor number of the device separated by a colon
 
 ------------------------------------------------------------------------------
 Summary
@@ -1296,6 +1323,10 @@
 Writing to /proc/<pid>/oom_score_adj or /proc/<pid>/oom_adj will change the
 other with its scaled value.
 
+The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
+value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
+requires CAP_SYS_RESOURCE.
+
 NOTICE: /proc/<pid>/oom_adj is deprecated and will be removed, please see
 Documentation/feature-removal-schedule.txt.
 
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 20899e0..cae6d27 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -325,7 +325,8 @@
         void * (*follow_link) (struct dentry *, struct nameidata *);
         void (*put_link) (struct dentry *, struct nameidata *, void *);
 	void (*truncate) (struct inode *);
-	int (*permission) (struct inode *, int, struct nameidata *);
+	int (*permission) (struct inode *, int, unsigned int);
+	int (*check_acl)(struct inode *, int, unsigned int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -414,6 +415,13 @@
   permission: called by the VFS to check for access rights on a POSIX-like
   	filesystem.
 
+	May be called in rcu-walk mode (flags & IPERM_FLAG_RCU). If in rcu-walk
+        mode, the filesystem must check the permission without blocking or
+	storing to the inode.
+
+	If a situation is encountered that rcu-walk cannot handle, return
+	-ECHILD and it will be called again in ref-walk mode.
+
   setattr: called by the VFS to set attributes for a file. This method
   	is called by chmod(2) and related system calls.
 
@@ -847,9 +855,12 @@
 
 struct dentry_operations {
 	int (*d_revalidate)(struct dentry *, struct nameidata *);
-	int (*d_hash) (struct dentry *, struct qstr *);
-	int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
-	int (*d_delete)(struct dentry *);
+	int (*d_hash)(const struct dentry *, const struct inode *,
+			struct qstr *);
+	int (*d_compare)(const struct dentry *, const struct inode *,
+			const struct dentry *, const struct inode *,
+			unsigned int, const char *, const struct qstr *);
+	int (*d_delete)(const struct dentry *);
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)(struct dentry *, char *, int);
@@ -860,13 +871,45 @@
 	dcache. Most filesystems leave this as NULL, because all their
 	dentries in the dcache are valid
 
-  d_hash: called when the VFS adds a dentry to the hash table
+	d_revalidate may be called in rcu-walk mode (nd->flags & LOOKUP_RCU).
+	If in rcu-walk mode, the filesystem must revalidate the dentry without
+	blocking or storing to the dentry, d_parent and d_inode should not be
+	used without care (because they can go NULL), instead nd->inode should
+	be used.
 
-  d_compare: called when a dentry should be compared with another
+	If a situation is encountered that rcu-walk cannot handle, return
+	-ECHILD and it will be called again in ref-walk mode.
 
-  d_delete: called when the last reference to a dentry is
-	deleted. This means no-one is using the dentry, however it is
-	still valid and in the dcache
+  d_hash: called when the VFS adds a dentry to the hash table. The first
+	dentry passed to d_hash is the parent directory that the name is
+	to be hashed into. The inode is the dentry's inode.
+
+	Same locking and synchronisation rules as d_compare regarding
+	what is safe to dereference etc.
+
+  d_compare: called to compare a dentry name with a given name. The first
+	dentry is the parent of the dentry to be compared, the second is
+	the parent's inode, then the dentry and inode (may be NULL) of the
+	child dentry. len and name string are properties of the dentry to be
+	compared. qstr is the name to compare it with.
+
+	Must be constant and idempotent, and should not take locks if
+	possible, and should not or store into the dentry or inodes.
+	Should not dereference pointers outside the dentry or inodes without
+	lots of care (eg.  d_parent, d_inode, d_name should not be used).
+
+	However, our vfsmount is pinned, and RCU held, so the dentries and
+	inodes won't disappear, neither will our sb or filesystem module.
+	->i_sb and ->d_sb may be used.
+
+	It is a tricky calling convention because it needs to be called under
+	"rcu-walk", ie. without any locks or references on things.
+
+  d_delete: called when the last reference to a dentry is dropped and the
+	dcache is deciding whether or not to cache it. Return 1 to delete
+	immediately, or 0 to cache the dentry. Default is NULL which means to
+	always cache a reachable dentry. d_delete must be constant and
+	idempotent.
 
   d_release: called when a dentry is really deallocated
 
@@ -910,14 +953,11 @@
 	the usage count)
 
   dput: close a handle for a dentry (decrements the usage count). If
-	the usage count drops to 0, the "d_delete" method is called
-	and the dentry is placed on the unused list if the dentry is
-	still in its parents hash list. Putting the dentry on the
-	unused list just means that if the system needs some RAM, it
-	goes through the unused list of dentries and deallocates them.
-	If the dentry has already been unhashed and the usage count
-	drops to 0, in this case the dentry is deallocated after the
-	"d_delete" method is called
+	the usage count drops to 0, and the dentry is still in its
+	parent's hash, the "d_delete" method is called to check whether
+	it should be cached. If it should not be cached, or if the dentry
+	is not hashed, it is deleted. Otherwise cached dentries are put
+	into an LRU list to be reclaimed on memory shortage.
 
   d_drop: this unhashes a dentry from its parents hash list. A
 	subsequent call to dput() will deallocate the dentry if its
diff --git a/Documentation/hwmon/adm9240 b/Documentation/hwmon/adm9240
index 2c6f1fe..36e8ec6 100644
--- a/Documentation/hwmon/adm9240
+++ b/Documentation/hwmon/adm9240
@@ -155,7 +155,7 @@
 The ADM9240 provides an internal open drain on this line, and may output
 a 20 ms active low pulse to reset an external Chassis Intrusion latch.
 
-Clear the CI latch by writing value 1 to the sysfs chassis_clear file.
+Clear the CI latch by writing value 0 to the sysfs intrusion0_alarm file.
 
 Alarm flags reported as 16-bit word
 
diff --git a/Documentation/hwmon/ads7828 b/Documentation/hwmon/ads7828
index 75bc4be..2bbebe6 100644
--- a/Documentation/hwmon/ads7828
+++ b/Documentation/hwmon/ads7828
@@ -9,7 +9,7 @@
                http://focus.ti.com/lit/ds/symlink/ads7828.pdf
 
 Authors:
-        Steve Hardy <steve@linuxrealtime.co.uk>
+        Steve Hardy <shardy@redhat.com>
 
 Module Parameters
 -----------------
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737
index fc5df76..4d29351 100644
--- a/Documentation/hwmon/dme1737
+++ b/Documentation/hwmon/dme1737
@@ -42,7 +42,7 @@
 This driver implements support for the hardware monitoring capabilities of the
 SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, SCH311x,
 and SCH5127 Super-I/O chips. These chips feature monitoring of 3 temp sensors
-temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and
+temp[1-3] (2 remote diodes and 1 internal), 8 voltages in[0-7] (7 external and
 1 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement
 up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and
 automatically.
@@ -105,6 +105,7 @@
 	in4: V1_IN				0V - 1.5V
 	in5: VTR	(+3.3V standby)		0V - 4.38V
 	in6: Vbat	(+3.0V)			0V - 4.38V
+	in7: Vtrip	(+1.5V)			0V - 1.99V
 
 Each voltage input has associated min and max limits which trigger an alarm
 when crossed.
@@ -217,10 +218,10 @@
 vrm				RW	Voltage regulator module version
 					number.
 
-in[0-6]_input			RO	Measured voltage in millivolts.
-in[0-6]_min			RW	Low limit for voltage input.
-in[0-6]_max			RW	High limit for voltage input.
-in[0-6]_alarm			RO	Voltage input alarm. Returns 1 if
+in[0-7]_input			RO	Measured voltage in millivolts.
+in[0-7]_min			RW	Low limit for voltage input.
+in[0-7]_max			RW	High limit for voltage input.
+in[0-7]_alarm			RO	Voltage input alarm. Returns 1 if
 					voltage input is or went outside the
 					associated min-max range, 0 otherwise.
 
@@ -324,3 +325,4 @@
 pwm5			opt		opt
 fan6			opt		opt
 pwm6			opt		opt
+in7						yes
diff --git a/Documentation/hwmon/ds620 b/Documentation/hwmon/ds620
new file mode 100644
index 0000000..1fbe3cd
--- /dev/null
+++ b/Documentation/hwmon/ds620
@@ -0,0 +1,34 @@
+Kernel driver ds620
+===================
+
+Supported chips:
+  * Dallas Semiconductor DS620
+    Prefix: 'ds620'
+    Datasheet: Publicly available at the Dallas Semiconductor website
+               http://www.dalsemi.com/
+
+Authors:
+        Roland Stigge <stigge@antcom.de>
+        based on ds1621.c by
+        Christian W. Zuckschwerdt <zany@triq.net>
+
+Description
+-----------
+
+The DS620 is a (one instance) digital thermometer and thermostat. It has both
+high and low temperature limits which can be user defined (i.e.  programmed
+into non-volatile on-chip registers). Temperature range is -55 degree Celsius
+to +125. Between 0 and 70 degree Celsius, accuracy is 0.5 Kelvin. The value
+returned via sysfs displays post decimal positions.
+
+The thermostat function works as follows: When configured via platform_data
+(struct ds620_platform_data) .pomode == 0 (default), the thermostat output pin
+PO is always low. If .pomode == 1, the thermostat is in PO_LOW mode. I.e., the
+output pin PO becomes active when the temperature falls below temp1_min and
+stays active until the temperature goes above temp1_max.
+
+Likewise, with .pomode == 2, the thermostat is in PO_HIGH mode. I.e., the PO
+output pin becomes active when the temperature goes above temp1_max and stays
+active until the temperature falls below temp1_min.
+
+The PO output pin of the DS620 operates active-low.
diff --git a/Documentation/hwmon/sht21 b/Documentation/hwmon/sht21
new file mode 100644
index 0000000..db17fda
--- /dev/null
+++ b/Documentation/hwmon/sht21
@@ -0,0 +1,49 @@
+Kernel driver sht21
+===================
+
+Supported chips:
+  * Sensirion SHT21
+    Prefix: 'sht21'
+    Addresses scanned: none
+    Datasheet: Publicly available at the Sensirion website
+    http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT21.pdf
+
+  * Sensirion SHT25
+    Prefix: 'sht21'
+    Addresses scanned: none
+    Datasheet: Publicly available at the Sensirion website
+    http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT25.pdf
+
+Author:
+  Urs Fleisch <urs.fleisch@sensirion.com>
+
+Description
+-----------
+
+The SHT21 and SHT25 are humidity and temperature sensors in a DFN package of
+only 3 x 3 mm footprint and 1.1 mm height. The difference between the two
+devices is the higher level of precision of the SHT25 (1.8% relative humidity,
+0.2 degree Celsius) compared with the SHT21 (2.0% relative humidity,
+0.3 degree Celsius).
+
+The devices communicate with the I2C protocol. All sensors are set to the same
+I2C address 0x40, so an entry with I2C_BOARD_INFO("sht21", 0x40) can be used
+in the board setup code.
+
+sysfs-Interface
+---------------
+
+temp1_input - temperature input
+humidity1_input - humidity input
+
+Notes
+-----
+
+The driver uses the default resolution settings of 12 bit for humidity and 14
+bit for temperature, which results in typical measurement times of 22 ms for
+humidity and 66 ms for temperature. To keep self heating below 0.1 degree
+Celsius, the device should not be active for more than 10% of the time,
+e.g. maximum two measurements per second at the given resolution.
+
+Different resolutions, the on-chip heater, using the CRC checksum and reading
+the serial number are not supported yet.
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 6456990..c6559f1 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -384,10 +384,20 @@
 		Unit: milliampere
 		RW
 
+curr[1-*]_lcrit	Current critical low value
+		Unit: milliampere
+		RW
+
+curr[1-*]_crit	Current critical high value.
+		Unit: milliampere
+		RW
+
 curr[1-*]_input	Current input value
 		Unit: milliampere
 		RO
 
+Also see the Alarms section for status flags associated with currents.
+
 *********
 * Power *
 *********
@@ -450,13 +460,6 @@
 				Unit: Percent
 				RO
 
-power[1-*]_alarm		1 if the system is drawing more power than the
-				cap allows; 0 otherwise.  A poll notification is
-				sent to this file when the power use exceeds the
-				cap.  This file only appears if the cap is known
-				to be enforced by hardware.
-				RO
-
 power[1-*]_cap			If power use rises above this limit, the
 				system should take action to reduce power use.
 				A poll notification is sent to this file if the
@@ -479,6 +482,20 @@
 				Unit: microWatt
 				RO
 
+power[1-*]_max			Maximum power.
+				Unit: microWatt
+				RW
+
+power[1-*]_crit			Critical maximum power.
+				If power rises to or above this limit, the
+				system is expected take drastic action to reduce
+				power consumption, such as a system shutdown or
+				a forced powerdown of some devices.
+				Unit: microWatt
+				RW
+
+Also see the Alarms section for status flags associated with power readings.
+
 **********
 * Energy *
 **********
@@ -488,6 +505,15 @@
 				RO
 
 
+************
+* Humidity *
+************
+
+humidity[1-*]_input		Humidity
+				Unit: milli-percent (per cent mille, pcm)
+				RO
+
+
 **********
 * Alarms *
 **********
@@ -501,6 +527,7 @@
 
 in[0-*]_alarm
 curr[1-*]_alarm
+power[1-*]_alarm
 fan[1-*]_alarm
 temp[1-*]_alarm
 		Channel alarm
@@ -512,12 +539,20 @@
 
 in[0-*]_min_alarm
 in[0-*]_max_alarm
+in[0-*]_lcrit_alarm
+in[0-*]_crit_alarm
 curr[1-*]_min_alarm
 curr[1-*]_max_alarm
+curr[1-*]_lcrit_alarm
+curr[1-*]_crit_alarm
+power[1-*]_cap_alarm
+power[1-*]_max_alarm
+power[1-*]_crit_alarm
 fan[1-*]_min_alarm
 fan[1-*]_max_alarm
 temp[1-*]_min_alarm
 temp[1-*]_max_alarm
+temp[1-*]_lcrit_alarm
 temp[1-*]_crit_alarm
 temp[1-*]_emergency_alarm
 		Limit alarm
diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf
index fb145e5..8432e11 100644
--- a/Documentation/hwmon/w83627hf
+++ b/Documentation/hwmon/w83627hf
@@ -91,3 +91,25 @@
 
 The above sequence assumes a Super-I/O config space at 0x2e/0x2f, but
 0x4e/0x4f is also possible.
+
+Voltage pin mapping
+-------------------
+
+Here is a summary of the voltage pin mapping for the W83627THF. This
+can be useful to convert data provided by board manufacturers into
+working libsensors configuration statements.
+
+    W83627THF		|
+  Pin	| Name		| Register	| Sysfs attribute
+-----------------------------------------------------
+  100	| CPUVCORE	| 20h		| in0
+   99	| VIN0		| 21h		| in1
+   98	| VIN1		| 22h		| in2
+   97	| VIN2		| 24h		| in4
+  114	| AVCC		| 23h		| in3
+   61	| 5VSB		| 50h (bank 5)	| in7
+   74	| VBAT		| 51h (bank 5)	| in8
+
+For other supported devices, you'll have to take the hard path and
+look up the information in the datasheet yourself (and then add it
+to this document please.)
diff --git a/Documentation/hwmon/w83793 b/Documentation/hwmon/w83793
index 51171a8..6cc5f63 100644
--- a/Documentation/hwmon/w83793
+++ b/Documentation/hwmon/w83793
@@ -92,7 +92,7 @@
 
 * Chassis
   If the case open alarm triggers, it will stay in this state unless cleared
-  by any write to the sysfs file "chassis".
+  by writing 0 to the sysfs file "intrusion0_alarm".
 
 * VID and VRM
   The VRM version is detected automatically, don't modify the it unless you
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/gpio-i2cmux
new file mode 100644
index 0000000..811cd78
--- /dev/null
+++ b/Documentation/i2c/muxes/gpio-i2cmux
@@ -0,0 +1,65 @@
+Kernel driver gpio-i2cmux
+
+Author: Peter Korsgaard <peter.korsgaard@barco.com>
+
+Description
+-----------
+
+gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
+from a master I2C bus and a hardware MUX controlled through GPIO pins.
+
+E.G.:
+
+  ----------              ----------  Bus segment 1   - - - - -
+ |          | SCL/SDA    |          |-------------- |           |
+ |          |------------|          |
+ |          |            |          | Bus segment 2 |           |
+ |  Linux   | GPIO 1..N  |   MUX    |---------------   Devices
+ |          |------------|          |               |           |
+ |          |            |          | Bus segment M
+ |          |            |          |---------------|           |
+  ----------              ----------                  - - - - -
+
+SCL/SDA of the master I2C bus is multiplexed to bus segment 1..M
+according to the settings of the GPIO pins 1..N.
+
+Usage
+-----
+
+gpio-i2cmux uses the platform bus, so you need to provide a struct
+platform_device with the platform_data pointing to a struct
+gpio_i2cmux_platform_data with the I2C adapter number of the master
+bus, the number of bus segments to create and the GPIO pins used
+to control it. See include/linux/gpio-i2cmux.h for details.
+
+E.G. something like this for a MUX providing 4 bus segments
+controlled through 3 GPIO pins:
+
+#include <linux/gpio-i2cmux.h>
+#include <linux/platform_device.h>
+
+static const unsigned myboard_gpiomux_gpios[] = {
+	AT91_PIN_PC26, AT91_PIN_PC25, AT91_PIN_PC24
+};
+
+static const unsigned myboard_gpiomux_values[] = {
+	0, 1, 2, 3
+};
+
+static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
+	.parent		= 1,
+	.base_nr	= 2, /* optional */
+	.values		= myboard_gpiomux_values,
+	.n_values	= ARRAY_SIZE(myboard_gpiomux_values),
+	.gpios		= myboard_gpiomux_gpios,
+	.n_gpios	= ARRAY_SIZE(myboard_gpiomux_gpios),
+	.idle		= 4, /* optional */
+};
+
+static struct platform_device myboard_i2cmux = {
+	.name		= "gpio-i2cmux",
+	.id		= 0,
+	.dev		= {
+		.platform_data	= &myboard_i2cmux_data,
+	},
+};
diff --git a/Documentation/input/cma3000_d0x.txt b/Documentation/input/cma3000_d0x.txt
new file mode 100644
index 0000000..29d088d
--- /dev/null
+++ b/Documentation/input/cma3000_d0x.txt
@@ -0,0 +1,115 @@
+Kernel driver for CMA3000-D0x
+============================
+
+Supported chips:
+* VTI CMA3000-D0x
+Datasheet:
+  CMA3000-D0X Product Family Specification 8281000A.02.pdf
+  <http://www.vti.fi/en/>
+
+Author: Hemanth V <hemanthv@ti.com>
+
+
+Description
+-----------
+CMA3000 Tri-axis accelerometer supports Motion detect, Measurement and
+Free fall modes.
+
+Motion Detect Mode: Its the low power mode where interrupts are generated only
+when motion exceeds the defined thresholds.
+
+Measurement Mode: This mode is used to read the acceleration data on X,Y,Z
+axis and supports 400, 100, 40 Hz sample frequency.
+
+Free fall Mode: This mode is intended to save system resources.
+
+Threshold values: Chip supports defining threshold values for above modes
+which includes time and g value. Refer product specifications for more details.
+
+CMA3000 chip supports mutually exclusive I2C and SPI interfaces for
+communication, currently the driver supports I2C based communication only.
+Initial configuration for bus mode is set in non volatile memory and can later
+be modified through bus interface command.
+
+Driver reports acceleration data through input subsystem. It generates ABS_MISC
+event with value 1 when free fall is detected.
+
+Platform data need to be configured for initial default values.
+
+Platform Data
+-------------
+fuzz_x: Noise on X Axis
+
+fuzz_y: Noise on Y Axis
+
+fuzz_z: Noise on Z Axis
+
+g_range: G range in milli g i.e 2000 or 8000
+
+mode: Default Operating mode
+
+mdthr: Motion detect g range threshold value
+
+mdfftmr: Motion detect and free fall time threshold value
+
+ffthr: Free fall g range threshold value
+
+Input Interface
+--------------
+Input driver version is 1.0.0
+Input device ID: bus 0x18 vendor 0x0 product 0x0 version 0x0
+Input device name: "cma3000-accelerometer"
+Supported events:
+  Event type 0 (Sync)
+  Event type 3 (Absolute)
+    Event code 0 (X)
+      Value     47
+      Min    -8000
+      Max     8000
+      Fuzz     200
+    Event code 1 (Y)
+      Value    -28
+      Min    -8000
+      Max     8000
+      Fuzz     200
+    Event code 2 (Z)
+      Value    905
+      Min    -8000
+      Max     8000
+      Fuzz     200
+    Event code 40 (Misc)
+      Value      0
+      Min        0
+      Max        1
+  Event type 4 (Misc)
+
+
+Register/Platform parameters Description
+----------------------------------------
+
+mode:
+	0: power down mode
+	1: 100 Hz Measurement mode
+	2: 400 Hz Measurement mode
+	3: 40 Hz Measurement mode
+	4: Motion Detect mode (default)
+	5: 100 Hz Free fall mode
+	6: 40 Hz Free fall mode
+	7: Power off mode
+
+grange:
+	2000: 2000 mg or 2G Range
+	8000: 8000 mg or 8G Range
+
+mdthr:
+	X: X * 71mg (8G Range)
+	X: X * 18mg (2G Range)
+
+mdfftmr:
+	X: (X & 0x70) * 100 ms (MDTMR)
+	   (X & 0x0F) * 2.5 ms (FFTMR 400 Hz)
+	   (X & 0x0F) * 10 ms  (FFTMR 100 Hz)
+
+ffthr:
+       X: (X >> 2) * 18mg (2G Range)
+       X: (X & 0x0F) * 71 mg (8G Range)
diff --git a/Documentation/input/ff.txt b/Documentation/input/ff.txt
index ded4d5f..b3867bf 100644
--- a/Documentation/input/ff.txt
+++ b/Documentation/input/ff.txt
@@ -49,7 +49,9 @@
 #include <linux/input.h>
 #include <sys/ioctl.h>
 
-unsigned long features[1 + FF_MAX/sizeof(unsigned long)];
+#define BITS_TO_LONGS(x) \
+	(((x) + 8 * sizeof (unsigned long) - 1) / (8 * sizeof (unsigned long)))
+unsigned long features[BITS_TO_LONGS(FF_CNT)];
 int ioctl(int file_descriptor, int request, unsigned long *features);
 
 "request" must be EVIOCGBIT(EV_FF, size of features array in bytes )
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index bdcba154..71536e7 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -1,6 +1,6 @@
 Multi-touch (MT) Protocol
 -------------------------
-	Copyright (C) 2009	Henrik Rydberg <rydberg@euromail.se>
+	Copyright (C) 2009-2010	Henrik Rydberg <rydberg@euromail.se>
 
 
 Introduction
@@ -161,19 +161,24 @@
 ratio ABS_MT_TOUCH_MAJOR / ABS_MT_WIDTH_MAJOR, which is always smaller than
 unity, is related to the contact pressure. For pressure-based devices,
 ABS_MT_PRESSURE may be used to provide the pressure on the contact area
-instead.
+instead. Devices capable of contact hovering can use ABS_MT_DISTANCE to
+indicate the distance between the contact and the surface.
 
 In addition to the MAJOR parameters, the oval shape of the contact can be
 described by adding the MINOR parameters, such that MAJOR and MINOR are the
 major and minor axis of an ellipse. Finally, the orientation of the oval
 shape can be describe with the ORIENTATION parameter.
 
+For type A devices, further specification of the touch shape is possible
+via ABS_MT_BLOB_ID.
+
 The ABS_MT_TOOL_TYPE may be used to specify whether the touching tool is a
-contact or a pen or something else.  Devices with more granular information
-may specify general shapes as blobs, i.e., as a sequence of rectangular
-shapes grouped together by an ABS_MT_BLOB_ID. Finally, for the few devices
-that currently support it, the ABS_MT_TRACKING_ID event may be used to
-report contact tracking from hardware [5].
+finger or a pen or something else. Finally, the ABS_MT_TRACKING_ID event
+may be used to track identified contacts over time [5].
+
+In the type B protocol, ABS_MT_TOOL_TYPE and ABS_MT_TRACKING_ID are
+implicitly handled by input core; drivers should instead call
+input_mt_report_slot_state().
 
 
 Event Semantics
@@ -213,6 +218,12 @@
 of TOUCH and WIDTH for pressure-based devices or any device with a spatial
 signal intensity distribution.
 
+ABS_MT_DISTANCE
+
+The distance, in surface units, between the contact and the surface. Zero
+distance means the contact is touching the surface. A positive number means
+the contact is hovering above the surface.
+
 ABS_MT_ORIENTATION
 
 The orientation of the ellipse. The value should describe a signed quarter
@@ -240,21 +251,24 @@
 The type of approaching tool. A lot of kernel drivers cannot distinguish
 between different tool types, such as a finger or a pen. In such cases, the
 event should be omitted. The protocol currently supports MT_TOOL_FINGER and
-MT_TOOL_PEN [2].
+MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
+drivers should instead use input_mt_report_slot_state().
 
 ABS_MT_BLOB_ID
 
 The BLOB_ID groups several packets together into one arbitrarily shaped
-contact. This is a low-level anonymous grouping for type A devices, and
+contact. The sequence of points forms a polygon which defines the shape of
+the contact. This is a low-level anonymous grouping for type A devices, and
 should not be confused with the high-level trackingID [5]. Most type A
 devices do not have blob capability, so drivers can safely omit this event.
 
 ABS_MT_TRACKING_ID
 
 The TRACKING_ID identifies an initiated contact throughout its life cycle
-[5]. This event is mandatory for type B devices. The value range of the
-TRACKING_ID should be large enough to ensure unique identification of a
-contact maintained over an extended period of time.
+[5]. The value range of the TRACKING_ID should be large enough to ensure
+unique identification of a contact maintained over an extended period of
+time. For type B devices, this event is handled by input core; drivers
+should instead use input_mt_report_slot_state().
 
 
 Event Computation
@@ -301,18 +315,19 @@
 Notes
 -----
 
-In order to stay compatible with existing applications, the data
-reported in a finger packet must not be recognized as single-touch
-events. In addition, all finger data must bypass input filtering,
-since subsequent events of the same type refer to different fingers.
+In order to stay compatible with existing applications, the data reported
+in a finger packet must not be recognized as single-touch events.
 
-The first kernel driver to utilize the MT protocol is the bcm5974 driver,
-where examples can be found.
+For type A devices, all finger data bypasses input filtering, since
+subsequent events of the same type refer to different fingers.
+
+For example usage of the type A protocol, see the bcm5974 driver. For
+example usage of the type B protocol, see the hid-egalax driver.
 
 [1] With the extension ABS_MT_APPROACH_X and ABS_MT_APPROACH_Y, the
 difference between the contact position and the approaching tool position
 could be used to derive tilt.
 [2] The list can of course be extended.
-[3] Multitouch X driver project: http://bitmath.org/code/multitouch/.
+[3] The mtdev project: http://bitmath.org/code/mtdev/.
 [4] See the section on event computation.
 [5] See the section on finger tracking.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index d6a63c7..ac293e9 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -247,7 +247,7 @@
 'p'	40-7F	linux/nvram.h
 'p'	80-9F	linux/ppdev.h		user-space parport
 					<mailto:tim@cyberelk.net>
-'p'	A1-A4	linux/pps.h		LinuxPPS
+'p'	A1-A5	linux/pps.h		LinuxPPS
 					<mailto:giometti@linux.it>
 'q'	00-1F	linux/serio.h
 'q'	80-FF	linux/telephony.h	Internet PhoneJACK, Internet LineJACK
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 59a69ec..f6dece5 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -81,7 +81,7 @@
     The only field that should go to zero. Incremented as requests are
     given to appropriate struct request_queue and decremented as they finish.
 Field 10 -- # of milliseconds spent doing I/Os
-    This field is increases so long as field 9 is nonzero.
+    This field increases so long as field 9 is nonzero.
 Field 11 -- weighted # of milliseconds spent doing I/Os
     This field is incremented at each I/O start, I/O completion, I/O
     merge, or read of these stats by the number of I/Os in progress
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 1e5165a..4a99031 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -73,6 +73,14 @@
 The output directory can also be specified using "O=...".
 Setting "O=..." takes precedence over KBUILD_OUTPUT.
 
+KBUILD_DEBARCH
+--------------------------------------------------
+For the deb-pkg target, allows overriding the normal heuristics deployed by
+deb-pkg. Normally deb-pkg attempts to guess the right architecture based on
+the UTS_MACHINE variable, and on some architectures also the kernel config.
+The value of KBUILD_DEBARCH is assumed (not checked) to be a valid Debian
+architecture.
+
 ARCH
 --------------------------------------------------
 Set ARCH to the architecture to be built.
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 2fe93ca..b507d61 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -112,7 +112,6 @@
 	(no prompts anywhere) and for symbols with no dependencies.
 	That will limit the usefulness but on the other hand avoid
 	the illegal configurations all over.
-	kconfig should one day warn about such things.
 
 - numerical ranges: "range" <symbol> <symbol> ["if" <expr>]
   This allows to limit the range of possible input values for int
@@ -268,7 +267,7 @@
 
 choices:
 
-	"choice"
+	"choice" [symbol]
 	<choice options>
 	<choice block>
 	"endchoice"
@@ -282,6 +281,10 @@
 can be compiled as modules.
 A choice accepts another option "optional", which allows to set the
 choice to 'n' and no entry needs to be selected.
+If no [symbol] is associated with a choice, then you can not have multiple
+definitions of that choice. If a [symbol] is associated to the choice,
+then you may define the same choice (ie. with the same entries) in another
+place.
 
 comment:
 
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 0ef00bd..86e3cd0 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1136,6 +1136,21 @@
 	      resulting in the target file being recompiled for no
 	      obvious reason.
 
+    dtc
+	Create flattend device tree blob object suitable for linking
+	into vmlinux. Device tree blobs linked into vmlinux are placed
+	in an init section in the image. Platform code *must* copy the
+	blob to non-init memory prior to calling unflatten_device_tree().
+
+	Example:
+		#arch/x86/platform/ce4100/Makefile
+		clean-files := *dtb.S
+
+		DTC_FLAGS := -p 1024
+		obj-y += foo.dtb.o
+
+		$(obj)/%.dtb: $(src)/%.dts
+			$(call cmd,dtc)
 
 --- 6.7 Custom kbuild commands
 
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index cab61d8..7a9e0b4 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -65,18 +65,21 @@
 
 2) Download the kexec-tools user-space package from the following URL:
 
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools.tar.gz
+http://kernel.org/pub/linux/utils/kernel/kexec/kexec-tools.tar.gz
 
 This is a symlink to the latest version.
 
 The latest kexec-tools git tree is available at:
 
-git://git.kernel.org/pub/scm/linux/kernel/git/horms/kexec-tools.git
-or
-http://www.kernel.org/git/?p=linux/kernel/git/horms/kexec-tools.git
+git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+and
+http://www.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+
+There is also a gitweb interface available at
+http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
 
 More information about kexec-tools can be found at
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/README.html
+http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
 
 3) Unpack the tarball with the tar command, as follows:
 
@@ -439,6 +442,6 @@
 Contact
 =======
 
-Vivek Goyal (vgoyal@in.ibm.com)
+Vivek Goyal (vgoyal@redhat.com)
 Maneesh Soni (maneesh@in.ibm.com)
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f3dc951..b72e071 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -199,11 +199,6 @@
 			unusable.  The "log_buf_len" parameter may be useful
 			if you need to capture more output.
 
-	acpi_display_output=	[HW,ACPI]
-			acpi_display_output=vendor
-			acpi_display_output=video
-			See above.
-
 	acpi_irq_balance [HW,ACPI]
 			ACPI will balance active IRQs
 			default in APIC mode
@@ -403,6 +398,10 @@
 	bttv.pll=	See Documentation/video4linux/bttv/Insmod-options
 	bttv.tuner=	and Documentation/video4linux/bttv/CARDLIST
 
+	bulk_remove=off	[PPC]  This parameter disables the use of the pSeries
+			firmware feature for flushing multiple hpte entries
+			at a time.
+
 	c101=		[NET] Moxa C101 synchronous serial card
 
 	cachesize=	[BUGS=X86-32] Override level 2 CPU cache size detection.
@@ -655,11 +654,6 @@
 
 	dscc4.setup=	[NET]
 
-	dynamic_printk	Enables pr_debug()/dev_dbg() calls if
-			CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled.
-			These can also be switched on/off via
-			<debugfs>/dynamic_printk/modules
-
 	earlycon=	[KNL] Output early console device and options.
 		uart[8250],io,<addr>[,options]
 		uart[8250],mmio,<addr>[,options]
@@ -884,6 +878,7 @@
 			     controller
 	i8042.nopnp	[HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
 			     controllers
+	i8042.notimeout	[HW] Ignore timeout condition signalled by conroller
 	i8042.reset	[HW] Reset the controller during init and cleanup
 	i8042.unlock	[HW] Unlock (ignore) the keylock
 
@@ -1490,6 +1485,10 @@
 	mtdparts=	[MTD]
 			See drivers/mtd/cmdlinepart.c.
 
+	multitce=off	[PPC]  This parameter disables the use of the pSeries
+			firmware feature for updating multiple TCE entries
+			at a time.
+
 	onenand.bdry=	[HW,MTD] Flex-OneNAND Boundary Configuration
 
 			Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
@@ -1701,6 +1700,9 @@
 
 	no-kvmclock	[X86,KVM] Disable paravirtualized KVM clock driver
 
+	no-kvmapf	[X86,KVM] Disable paravirtualized asynchronous page
+			fault handling.
+
 	nolapic		[X86-32,APIC] Do not enable or use the local APIC.
 
 	nolapic_timer	[X86-32,APIC] Do not use the local APIC timer.
diff --git a/Documentation/keys-trusted-encrypted.txt b/Documentation/keys-trusted-encrypted.txt
new file mode 100644
index 0000000..8fb79bc
--- /dev/null
+++ b/Documentation/keys-trusted-encrypted.txt
@@ -0,0 +1,145 @@
+			Trusted and Encrypted Keys
+
+Trusted and Encrypted Keys are two new key types added to the existing kernel
+key ring service.  Both of these new types are variable length symmetic keys,
+and in both cases all keys are created in the kernel, and user space sees,
+stores, and loads only encrypted blobs.  Trusted Keys require the availability
+of a Trusted Platform Module (TPM) chip for greater security, while Encrypted
+Keys can be used on any system.  All user level blobs, are displayed and loaded
+in hex ascii for convenience, and are integrity verified.
+
+Trusted Keys use a TPM both to generate and to seal the keys.  Keys are sealed
+under a 2048 bit RSA key in the TPM, and optionally sealed to specified PCR
+(integrity measurement) values, and only unsealed by the TPM, if PCRs and blob
+integrity verifications match.  A loaded Trusted Key can be updated with new
+(future) PCR values, so keys are easily migrated to new pcr values, such as
+when the kernel and initramfs are updated.  The same key can have many saved
+blobs under different PCR values, so multiple boots are easily supported.
+
+By default, trusted keys are sealed under the SRK, which has the default
+authorization value (20 zeros).  This can be set at takeownership time with the
+trouser's utility: "tpm_takeownership -u -z".
+
+Usage:
+    keyctl add trusted name "new keylen [options]" ring
+    keyctl add trusted name "load hex_blob [pcrlock=pcrnum]" ring
+    keyctl update key "update [options]"
+    keyctl print keyid
+
+    options:
+       keyhandle= ascii hex value of sealing key default 0x40000000 (SRK)
+       keyauth=	  ascii hex auth for sealing key default 0x00...i
+		  (40 ascii zeros)
+       blobauth=  ascii hex auth for sealed data default 0x00...
+		  (40 ascii zeros)
+       blobauth=  ascii hex auth for sealed data default 0x00...
+		  (40 ascii zeros)
+       pcrinfo=	  ascii hex of PCR_INFO or PCR_INFO_LONG (no default)
+       pcrlock=	  pcr number to be extended to "lock" blob
+       migratable= 0|1 indicating permission to reseal to new PCR values,
+                   default 1 (resealing allowed)
+
+"keyctl print" returns an ascii hex copy of the sealed key, which is in standard
+TPM_STORED_DATA format.  The key length for new keys are always in bytes.
+Trusted Keys can be 32 - 128 bytes (256 - 1024 bits), the upper limit is to fit
+within the 2048 bit SRK (RSA) keylength, with all necessary structure/padding.
+
+Encrypted keys do not depend on a TPM, and are faster, as they use AES for
+encryption/decryption.  New keys are created from kernel generated random
+numbers, and are encrypted/decrypted using a specified 'master' key.  The
+'master' key can either be a trusted-key or user-key type.  The main
+disadvantage of encrypted keys is that if they are not rooted in a trusted key,
+they are only as secure as the user key encrypting them.  The master user key
+should therefore be loaded in as secure a way as possible, preferably early in
+boot.
+
+Usage:
+  keyctl add encrypted name "new key-type:master-key-name keylen" ring
+  keyctl add encrypted name "load hex_blob" ring
+  keyctl update keyid "update key-type:master-key-name"
+
+where 'key-type' is either 'trusted' or 'user'.
+
+Examples of trusted and encrypted key usage:
+
+Create and save a trusted key named "kmk" of length 32 bytes:
+
+    $ keyctl add trusted kmk "new 32" @u
+    440502848
+
+    $ keyctl show
+    Session Keyring
+           -3 --alswrv    500   500  keyring: _ses
+     97833714 --alswrv    500    -1   \_ keyring: _uid.500
+    440502848 --alswrv    500   500       \_ trusted: kmk
+
+    $ keyctl print 440502848
+    0101000000000000000001005d01b7e3f4a6be5709930f3b70a743cbb42e0cc95e18e915
+    3f60da455bbf1144ad12e4f92b452f966929f6105fd29ca28e4d4d5a031d068478bacb0b
+    27351119f822911b0a11ba3d3498ba6a32e50dac7f32894dd890eb9ad578e4e292c83722
+    a52e56a097e6a68b3f56f7a52ece0cdccba1eb62cad7d817f6dc58898b3ac15f36026fec
+    d568bd4a706cb60bb37be6d8f1240661199d640b66fb0fe3b079f97f450b9ef9c22c6d5d
+    dd379f0facd1cd020281dfa3c70ba21a3fa6fc2471dc6d13ecf8298b946f65345faa5ef0
+    f1f8fff03ad0acb083725535636addb08d73dedb9832da198081e5deae84bfaf0409c22b
+    e4a8aea2b607ec96931e6f4d4fe563ba
+
+    $ keyctl pipe 440502848 > kmk.blob
+
+Load a trusted key from the saved blob:
+
+    $ keyctl add trusted kmk "load `cat kmk.blob`" @u
+    268728824
+
+    $ keyctl print 268728824
+    0101000000000000000001005d01b7e3f4a6be5709930f3b70a743cbb42e0cc95e18e915
+    3f60da455bbf1144ad12e4f92b452f966929f6105fd29ca28e4d4d5a031d068478bacb0b
+    27351119f822911b0a11ba3d3498ba6a32e50dac7f32894dd890eb9ad578e4e292c83722
+    a52e56a097e6a68b3f56f7a52ece0cdccba1eb62cad7d817f6dc58898b3ac15f36026fec
+    d568bd4a706cb60bb37be6d8f1240661199d640b66fb0fe3b079f97f450b9ef9c22c6d5d
+    dd379f0facd1cd020281dfa3c70ba21a3fa6fc2471dc6d13ecf8298b946f65345faa5ef0
+    f1f8fff03ad0acb083725535636addb08d73dedb9832da198081e5deae84bfaf0409c22b
+    e4a8aea2b607ec96931e6f4d4fe563ba
+
+Reseal a trusted key under new pcr values:
+
+    $ keyctl update 268728824 "update pcrinfo=`cat pcr.blob`"
+    $ keyctl print 268728824
+    010100000000002c0002800093c35a09b70fff26e7a98ae786c641e678ec6ffb6b46d805
+    77c8a6377aed9d3219c6dfec4b23ffe3000001005d37d472ac8a44023fbb3d18583a4f73
+    d3a076c0858f6f1dcaa39ea0f119911ff03f5406df4f7f27f41da8d7194f45c9f4e00f2e
+    df449f266253aa3f52e55c53de147773e00f0f9aca86c64d94c95382265968c354c5eab4
+    9638c5ae99c89de1e0997242edfb0b501744e11ff9762dfd951cffd93227cc513384e7e6
+    e782c29435c7ec2edafaa2f4c1fe6e7a781b59549ff5296371b42133777dcc5b8b971610
+    94bc67ede19e43ddb9dc2baacad374a36feaf0314d700af0a65c164b7082401740e489c9
+    7ef6a24defe4846104209bf0c3eced7fa1a672ed5b125fc9d8cd88b476a658a4434644ef
+    df8ae9a178e9f83ba9f08d10fa47e4226b98b0702f06b3b8
+
+Create and save an encrypted key "evm" using the above trusted key "kmk":
+
+    $ keyctl add encrypted evm "new trusted:kmk 32" @u
+    159771175
+
+    $ keyctl print 159771175
+    trusted:kmk 32 2375725ad57798846a9bbd240de8906f006e66c03af53b1b382dbbc55
+    be2a44616e4959430436dc4f2a7a9659aa60bb4652aeb2120f149ed197c564e024717c64
+    5972dcb82ab2dde83376d82b2e3c09ffc
+
+    $ keyctl pipe 159771175 > evm.blob
+
+Load an encrypted key "evm" from saved blob:
+
+    $ keyctl add encrypted evm "load `cat evm.blob`" @u
+    831684262
+
+    $ keyctl print 831684262
+    trusted:kmk 32 2375725ad57798846a9bbd240de8906f006e66c03af53b1b382dbbc55
+    be2a44616e4959430436dc4f2a7a9659aa60bb4652aeb2120f149ed197c564e024717c64
+    5972dcb82ab2dde83376d82b2e3c09ffc
+
+
+The initial consumer of trusted keys is EVM, which at boot time needs a high
+quality symmetric key for HMAC protection of file metadata.  The use of a
+trusted key provides strong guarantees that the EVM key has not been
+compromised by a user level problem, and when sealed to specific boot PCR
+values, protects against boot and offline attacks.  Other uses for trusted and
+encrypted keys, such as for disk and file encryption are anticipated.
diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO
index e3a55b6..ab5189a 100644
--- a/Documentation/ko_KR/HOWTO
+++ b/Documentation/ko_KR/HOWTO
@@ -391,8 +391,8 @@
 bugme-janitor 메일링 리스트(bugzilla에 모든 변화들이 여기서 메일로 전해진다)
 에 등록하면 된다.
 
-      http://lists.osdl.org/mailman/listinfo/bugme-new
-      http://lists.osdl.org/mailman/listinfo/bugme-janitors
+      https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+      https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
 
 
 
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 741fe66..0cfb00f 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -598,7 +598,7 @@
 a) The instructions in DCR must be relocatable.
 b) The instructions in DCR must not include a call instruction.
 c) JTPR must not be targeted by any jump or call instruction.
-d) DCR must not straddle the border betweeen functions.
+d) DCR must not straddle the border between functions.
 
 Anyway, these limitations are checked by the in-kernel instruction
 decoder, so you don't need to worry about that.
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index b336266..ad85797 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -874,7 +874,7 @@
  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
                                  is waiting for an interrupt
  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
-                                 accesible via KVM_GET_VCPU_EVENTS)
+                                 accessible via KVM_GET_VCPU_EVENTS)
 
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 irqchip, the multiprocessing state must be maintained by userspace.
@@ -1085,6 +1085,184 @@
 If any additional field gets added to this structure later on, a bit for that
 additional piece of information will be set in the flags bitmap.
 
+4.47 KVM_ASSIGN_PCI_DEVICE
+
+Capability: KVM_CAP_DEVICE_ASSIGNMENT
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_pci_dev (in)
+Returns: 0 on success, -1 on error
+
+Assigns a host PCI device to the VM.
+
+struct kvm_assigned_pci_dev {
+	__u32 assigned_dev_id;
+	__u32 busnr;
+	__u32 devfn;
+	__u32 flags;
+	__u32 segnr;
+	union {
+		__u32 reserved[11];
+	};
+};
+
+The PCI device is specified by the triple segnr, busnr, and devfn.
+Identification in succeeding service requests is done via assigned_dev_id. The
+following flags are specified:
+
+/* Depends on KVM_CAP_IOMMU */
+#define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
+
+4.48 KVM_DEASSIGN_PCI_DEVICE
+
+Capability: KVM_CAP_DEVICE_DEASSIGNMENT
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_pci_dev (in)
+Returns: 0 on success, -1 on error
+
+Ends PCI device assignment, releasing all associated resources.
+
+See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
+used in kvm_assigned_pci_dev to identify the device.
+
+4.49 KVM_ASSIGN_DEV_IRQ
+
+Capability: KVM_CAP_ASSIGN_DEV_IRQ
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_irq (in)
+Returns: 0 on success, -1 on error
+
+Assigns an IRQ to a passed-through device.
+
+struct kvm_assigned_irq {
+	__u32 assigned_dev_id;
+	__u32 host_irq;
+	__u32 guest_irq;
+	__u32 flags;
+	union {
+		struct {
+			__u32 addr_lo;
+			__u32 addr_hi;
+			__u32 data;
+		} guest_msi;
+		__u32 reserved[12];
+	};
+};
+
+The following flags are defined:
+
+#define KVM_DEV_IRQ_HOST_INTX    (1 << 0)
+#define KVM_DEV_IRQ_HOST_MSI     (1 << 1)
+#define KVM_DEV_IRQ_HOST_MSIX    (1 << 2)
+
+#define KVM_DEV_IRQ_GUEST_INTX   (1 << 8)
+#define KVM_DEV_IRQ_GUEST_MSI    (1 << 9)
+#define KVM_DEV_IRQ_GUEST_MSIX   (1 << 10)
+
+It is not valid to specify multiple types per host or guest IRQ. However, the
+IRQ type of host and guest can differ or can even be null.
+
+4.50 KVM_DEASSIGN_DEV_IRQ
+
+Capability: KVM_CAP_ASSIGN_DEV_IRQ
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_irq (in)
+Returns: 0 on success, -1 on error
+
+Ends an IRQ assignment to a passed-through device.
+
+See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
+by assigned_dev_id, flags must correspond to the IRQ type specified on
+KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
+
+4.51 KVM_SET_GSI_ROUTING
+
+Capability: KVM_CAP_IRQ_ROUTING
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_irq_routing (in)
+Returns: 0 on success, -1 on error
+
+Sets the GSI routing table entries, overwriting any previously set entries.
+
+struct kvm_irq_routing {
+	__u32 nr;
+	__u32 flags;
+	struct kvm_irq_routing_entry entries[0];
+};
+
+No flags are specified so far, the corresponding field must be set to zero.
+
+struct kvm_irq_routing_entry {
+	__u32 gsi;
+	__u32 type;
+	__u32 flags;
+	__u32 pad;
+	union {
+		struct kvm_irq_routing_irqchip irqchip;
+		struct kvm_irq_routing_msi msi;
+		__u32 pad[8];
+	} u;
+};
+
+/* gsi routing entry types */
+#define KVM_IRQ_ROUTING_IRQCHIP 1
+#define KVM_IRQ_ROUTING_MSI 2
+
+No flags are specified so far, the corresponding field must be set to zero.
+
+struct kvm_irq_routing_irqchip {
+	__u32 irqchip;
+	__u32 pin;
+};
+
+struct kvm_irq_routing_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 pad;
+};
+
+4.52 KVM_ASSIGN_SET_MSIX_NR
+
+Capability: KVM_CAP_DEVICE_MSIX
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_msix_nr (in)
+Returns: 0 on success, -1 on error
+
+Set the number of MSI-X interrupts for an assigned device. This service can
+only be called once in the lifetime of an assigned device.
+
+struct kvm_assigned_msix_nr {
+	__u32 assigned_dev_id;
+	__u16 entry_nr;
+	__u16 padding;
+};
+
+#define KVM_MAX_MSIX_PER_DEV		256
+
+4.53 KVM_ASSIGN_SET_MSIX_ENTRY
+
+Capability: KVM_CAP_DEVICE_MSIX
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_msix_entry (in)
+Returns: 0 on success, -1 on error
+
+Specifies the routing of an MSI-X assigned device interrupt to a GSI. Setting
+the GSI vector to zero means disabling the interrupt.
+
+struct kvm_assigned_msix_entry {
+	__u32 assigned_dev_id;
+	__u32 gsi;
+	__u16 entry; /* The index of entry in the MSI-X table */
+	__u16 padding[3];
+};
+
 5. The kvm_run structure
 
 Application code obtains a pointer to the kvm_run structure by
diff --git a/Documentation/kvm/cpuid.txt b/Documentation/kvm/cpuid.txt
index 14a12ea..8820685 100644
--- a/Documentation/kvm/cpuid.txt
+++ b/Documentation/kvm/cpuid.txt
@@ -36,6 +36,9 @@
 KVM_FEATURE_CLOCKSOURCE2           ||     3 || kvmclock available at msrs
                                    ||       || 0x4b564d00 and 0x4b564d01
 ------------------------------------------------------------------------------
+KVM_FEATURE_ASYNC_PF               ||     4 || async pf can be enabled by
+                                   ||       || writing to msr 0x4b564d02
+------------------------------------------------------------------------------
 KVM_FEATURE_CLOCKSOURCE_STABLE_BIT ||    24 || host will warn if no guest-side
                                    ||       || per-cpu warps are expected in
                                    ||       || kvmclock.
diff --git a/Documentation/kvm/msr.txt b/Documentation/kvm/msr.txt
index 8ddcfe8..d079aed 100644
--- a/Documentation/kvm/msr.txt
+++ b/Documentation/kvm/msr.txt
@@ -3,7 +3,6 @@
 =====================================================
 
 KVM makes use of some custom MSRs to service some requests.
-At present, this facility is only used by kvmclock.
 
 Custom MSRs have a range reserved for them, that goes from
 0x4b564d00 to 0x4b564dff. There are MSRs outside this area,
@@ -151,3 +150,38 @@
 			return PRESENT;
 		} else
 			return NON_PRESENT;
+
+MSR_KVM_ASYNC_PF_EN: 0x4b564d02
+	data: Bits 63-6 hold 64-byte aligned physical address of a
+	64 byte memory area which must be in guest RAM and must be
+	zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
+	when asynchronous page faults are enabled on the vcpu 0 when
+	disabled. Bit 2 is 1 if asynchronous page faults can be injected
+	when vcpu is in cpl == 0.
+
+	First 4 byte of 64 byte memory location will be written to by
+	the hypervisor at the time of asynchronous page fault (APF)
+	injection to indicate type of asynchronous page fault. Value
+	of 1 means that the page referred to by the page fault is not
+	present. Value 2 means that the page is now available. Disabling
+	interrupt inhibits APFs. Guest must not enable interrupt
+	before the reason is read, or it may be overwritten by another
+	APF. Since APF uses the same exception vector as regular page
+	fault guest must reset the reason to 0 before it does
+	something that can generate normal page fault.  If during page
+	fault APF reason is 0 it means that this is regular page
+	fault.
+
+	During delivery of type 1 APF cr2 contains a token that will
+	be used to notify a guest when missing page becomes
+	available. When page becomes available type 2 APF is sent with
+	cr2 set to the token associated with the page. There is special
+	kind of token 0xffffffff which tells vcpu that it should wake
+	up all processes waiting for APFs and no individual type 2 APFs
+	will be sent.
+
+	If APF is disabled while there are outstanding APFs, they will
+	not be delivered.
+
+	Currently type 2 APF will be always delivered on the same vcpu as
+	type 1 was, but guest should not rely on that.
diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt
index efb3a6a..6ccaf8e 100644
--- a/Documentation/lguest/lguest.txt
+++ b/Documentation/lguest/lguest.txt
@@ -111,8 +111,11 @@
 
   Then use --tunnet=bridge:lg0 when launching the guest.
 
-  See http://linux-net.osdl.org/index.php/Bridge for general information
-  on how to get bridging working.
+  See:
+  
+    http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
+    
+  for general information on how to get bridging to work.
 
 There is a helpful mailing list at http://ozlabs.org/mailman/listinfo/lguest
 
diff --git a/Documentation/magic-number.txt b/Documentation/magic-number.txt
index 505f196..4b12abc 100644
--- a/Documentation/magic-number.txt
+++ b/Documentation/magic-number.txt
@@ -150,7 +150,7 @@
 STL_BOARDMAGIC        0xa2267f52  stlbrd            include/linux/stallion.h
 ENI155_MAGIC          0xa54b872d  midway_eprom	    drivers/atm/eni.h
 SCI_MAGIC             0xbabeface  gs_port           drivers/char/sh-sci.h
-CODA_MAGIC            0xC0DAC0DA  coda_file_info    include/linux/coda_fs_i.h
+CODA_MAGIC            0xC0DAC0DA  coda_file_info    fs/coda/coda_fs_i.h
 DPMEM_MAGIC           0xc0ffee11  gdt_pci_sram      drivers/scsi/gdth.h
 STLI_PORTMAGIC        0xe671c7a1  stliport          include/linux/istallion.h
 YAM_MAGIC             0xF10A7654  yam_port          drivers/net/hamradio/yam.c
diff --git a/Documentation/make/headers_install.txt b/Documentation/make/headers_install.txt
index f2481ca..951eb9f 100644
--- a/Documentation/make/headers_install.txt
+++ b/Documentation/make/headers_install.txt
@@ -39,8 +39,9 @@
 The command "make headers_install_all" exports headers for all architectures
 simultaneously.  (This is mostly of interest to distribution maintainers,
 who create an architecture-independent tarball from the resulting include
-directory.)  Remember to provide the appropriate linux/asm directory via "mv"
-or "ln -s" before building a C library with headers exported this way.
+directory.)  You also can use HDR_ARCH_LIST to specify list of architectures.
+Remember to provide the appropriate linux/asm directory via "mv" or "ln -s"
+before building a C library with headers exported this way.
 
 The kernel header export infrastructure is maintained by David Woodhouse
 <dwmw2@infradead.org>.
diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.txt
index bec69a8..a7ba5e4 100644
--- a/Documentation/networking/bridge.txt
+++ b/Documentation/networking/bridge.txt
@@ -1,8 +1,8 @@
 In order to use the Ethernet bridging functionality, you'll need the
 userspace tools. These programs and documentation are available
-at http://www.linux-foundation.org/en/Net:Bridge.  The download page is
+at http://www.linuxfoundation.org/en/Net:Bridge.  The download page is
 http://prdownloads.sourceforge.net/bridge.
 
 If you still have questions, don't hesitate to post to the mailing list 
-(more info http://lists.osdl.org/mailman/listinfo/bridge).
+(more info https://lists.linux-foundation.org/mailman/listinfo/bridge).
 
diff --git a/Documentation/networking/caif/spi_porting.txt b/Documentation/networking/caif/spi_porting.txt
index 61d7c92..0cb8cb9 100644
--- a/Documentation/networking/caif/spi_porting.txt
+++ b/Documentation/networking/caif/spi_porting.txt
@@ -32,7 +32,7 @@
 	This function is called by the CAIF SPI interface to give
 	you a chance to set up your hardware to be ready to receive
 	a stream of data from the master. The xfer structure contains
-	both physical and logical adresses, as well as the total length
+	both physical and logical addresses, as well as the total length
 	of the transfer in both directions.The dev parameter can be used
 	to map to different CAIF SPI slave devices.
 
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index b395ca6..d718bc2 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -38,11 +38,11 @@
 specified in RFCs 4340...42.
 
 The known bugs are at:
-	http://linux-net.osdl.org/index.php/TODO#DCCP
+	http://www.linuxfoundation.org/collaborate/workgroups/networking/todo#DCCP
 
 For more up-to-date versions of the DCCP implementation, please consider using
 the experimental DCCP test tree; instructions for checking this out are on:
-http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
+http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp_testing#Experimental_DCCP_source_tree
 
 
 Socket options
@@ -167,6 +167,7 @@
 seq_window = 100
 	The initial sequence window (sec. 7.5.2) of the sender. This influences
 	the local ackno validity and the remote seqno validity windows (7.5.1).
+	Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
 
 tx_qlen = 5
 	The size of the transmit buffer in packets. A value of 0 corresponds
diff --git a/Documentation/networking/generic_netlink.txt b/Documentation/networking/generic_netlink.txt
index d4f8b8b..3e07111 100644
--- a/Documentation/networking/generic_netlink.txt
+++ b/Documentation/networking/generic_netlink.txt
@@ -1,3 +1,3 @@
 A wiki document on how to use Generic Netlink can be found here:
 
- * http://linux-net.osdl.org/index.php/Generic_Netlink_HOWTO
+ * http://www.linuxfoundation.org/collaborate/workgroups/networking/generic_netlink_howto
diff --git a/Documentation/nfc/nfc-pn544.txt b/Documentation/nfc/nfc-pn544.txt
new file mode 100644
index 0000000..2fcac9f
--- /dev/null
+++ b/Documentation/nfc/nfc-pn544.txt
@@ -0,0 +1,114 @@
+Kernel driver for the NXP Semiconductors PN544 Near Field
+Communication chip
+
+Author: Jari Vanhala
+Contact: Matti Aaltonen (matti.j.aaltonen at nokia.com)
+
+General
+-------
+
+The PN544 is an integrated transmission module for contactless
+communication. The driver goes under drives/nfc/ and is compiled as a
+module named "pn544". It registers a misc device and creates a device
+file named "/dev/pn544".
+
+Host Interfaces: I2C, SPI and HSU, this driver supports currently only I2C.
+
+The Interface
+-------------
+
+The driver offers a sysfs interface for a hardware test and an IOCTL
+interface for selecting between two operating modes. There are read,
+write and poll functions for transferring messages. The two operating
+modes are the normal (HCI) mode and the firmware update mode.
+
+PN544 is controlled by sending messages from the userspace to the
+chip. The main function of the driver is just to pass those messages
+without caring about the message content.
+
+
+Protocols
+---------
+
+In the normal (HCI) mode and in the firmware update mode read and
+write functions behave a bit differently because the message formats
+or the protocols are different.
+
+In the normal (HCI) mode the protocol used is derived from the ETSI
+HCI specification. The firmware is updated using a specific protocol,
+which is different from HCI.
+
+HCI messages consist of an eight bit header and the message body. The
+header contains the message length. Maximum size for an HCI message is
+33. In HCI mode sent messages are tested for a correct
+checksum. Firmware update messages have the length in the second (MSB)
+and third (LSB) bytes of the message. The maximum FW message length is
+1024 bytes.
+
+For the ETSI HCI specification see
+http://www.etsi.org/WebSite/Technologies/ProtocolSpecification.aspx
+
+The Hardware Test
+-----------------
+
+The idea of the test is that it can performed by reading from the
+corresponding sysfs file. The test is implemented in the board file
+and it should test that PN544 can be put into the firmware update
+mode. If the test is not implemented the sysfs file does not get
+created.
+
+Example:
+> cat /sys/module/pn544/drivers/i2c\:pn544/3-002b/nfc_test
+1
+
+Normal Operation
+----------------
+
+PN544 is powered up when the device file is opened, otherwise it's
+turned off. Only one instance can use the device at a time.
+
+Userspace applications control PN544 with HCI messages. The hardware
+sends an interrupt when data is available for reading. Data is
+physically read when the read function is called by a userspace
+application. Poll() checks the read interrupt state. Configuration and
+self testing are also done from the userspace using read and write.
+
+Example platform data:
+
+static int rx71_pn544_nfc_request_resources(struct i2c_client *client)
+{
+	/* Get and setup the HW resources for the device */
+}
+
+static void rx71_pn544_nfc_free_resources(void)
+{
+	/* Release the HW resources */
+}
+
+static void rx71_pn544_nfc_enable(int fw)
+{
+	/* Turn the device on */
+}
+
+static int rx71_pn544_nfc_test(void)
+{
+	/*
+	 * Put the device into the FW update mode
+	 * and then back to the normal mode.
+	 * Check the behavior and return one on success,
+	 * zero on failure.
+	 */
+}
+
+static void rx71_pn544_nfc_disable(void)
+{
+	/* turn the power off */
+}
+
+static struct pn544_nfc_platform_data rx71_nfc_data = {
+	.request_resources = rx71_pn544_nfc_request_resources,
+	.free_resources = rx71_pn544_nfc_free_resources,
+	.enable = rx71_pn544_nfc_enable,
+	.test = rx71_pn544_nfc_test,
+	.disable = rx71_pn544_nfc_disable,
+};
diff --git a/Documentation/power/drivers-testing.txt b/Documentation/power/drivers-testing.txt
index 7f7a737..638afdf 100644
--- a/Documentation/power/drivers-testing.txt
+++ b/Documentation/power/drivers-testing.txt
@@ -23,10 +23,10 @@
 without the new driver, you are ready to test it:
 
 a) Build the driver as a module, load it and try the test modes of hibernation
-   (see: Documents/power/basic-pm-debugging.txt, 1).
+   (see: Documentation/power/basic-pm-debugging.txt, 1).
 
 b) Load the driver and attempt to hibernate in the "reboot", "shutdown" and
-   "platform" modes (see: Documents/power/basic-pm-debugging.txt, 1).
+   "platform" modes (see: Documentation/power/basic-pm-debugging.txt, 1).
 
 c) Compile the driver directly into the kernel and try the test modes of
    hibernation.
@@ -34,12 +34,12 @@
 d) Attempt to hibernate with the driver compiled directly into the kernel
    in the "reboot", "shutdown" and "platform" modes.
 
-e) Try the test modes of suspend (see: Documents/power/basic-pm-debugging.txt,
+e) Try the test modes of suspend (see: Documentation/power/basic-pm-debugging.txt,
    2).  [As far as the STR tests are concerned, it should not matter whether or
    not the driver is built as a module.]
 
 f) Attempt to suspend to RAM using the s2ram tool with the driver loaded
-   (see: Documents/power/basic-pm-debugging.txt, 2).
+   (see: Documentation/power/basic-pm-debugging.txt, 2).
 
 Each of the above tests should be repeated several times and the STD tests
 should be mixed with the STR tests.  If any of them fails, the driver cannot be
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 41cc7b3..ffe55ff 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -50,6 +50,15 @@
 and device class callbacks are referred to as subsystem-level callbacks in what
 follows.
 
+By default, the callbacks are always invoked in process context with interrupts
+enabled.  However, subsystems can use the pm_runtime_irq_safe() helper function
+to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
+callbacks should be invoked in atomic context with interrupts disabled
+(->runtime_idle() is still invoked the default way).  This implies that these
+callback routines must not block or sleep, but it also means that the
+synchronous helper functions listed at the end of Section 4 can be used within
+an interrupt handler or in an atomic context.
+
 The subsystem-level suspend callback is _entirely_ _responsible_ for handling
 the suspend of the device as appropriate, which may, but need not include
 executing the device driver's own ->runtime_suspend() callback (from the
@@ -237,6 +246,10 @@
       Section 8); it may be modified only by the pm_runtime_no_callbacks()
       helper function
 
+  unsigned int irq_safe;
+    - indicates that the ->runtime_suspend() and ->runtime_resume() callbacks
+      will be invoked with the spinlock held and interrupts disabled
+
   unsigned int use_autosuspend;
     - indicates that the device's driver supports delayed autosuspend (see
       Section 9); it may be modified only by the
@@ -344,6 +357,10 @@
     - decrement the device's usage counter; if the result is 0 then run
       pm_runtime_idle(dev) and return its result
 
+  int pm_runtime_put_sync_suspend(struct device *dev);
+    - decrement the device's usage counter; if the result is 0 then run
+      pm_runtime_suspend(dev) and return its result
+
   int pm_runtime_put_sync_autosuspend(struct device *dev);
     - decrement the device's usage counter; if the result is 0 then run
       pm_runtime_autosuspend(dev) and return its result
@@ -397,6 +414,11 @@
       PM attributes from /sys/devices/.../power (or prevent them from being
       added when the device is registered)
 
+  void pm_runtime_irq_safe(struct device *dev);
+    - set the power.irq_safe flag for the device, causing the runtime-PM
+      suspend and resume callbacks (but not the idle callback) to be invoked
+      with interrupts disabled
+
   void pm_runtime_mark_last_busy(struct device *dev);
     - set the power.last_busy field to the current time
 
@@ -438,6 +460,15 @@
 pm_runtime_mark_last_busy()
 pm_runtime_autosuspend_expiration()
 
+If pm_runtime_irq_safe() has been called for a device then the following helper
+functions may also be used in interrupt context:
+
+pm_runtime_suspend()
+pm_runtime_autosuspend()
+pm_runtime_resume()
+pm_runtime_get_sync()
+pm_runtime_put_sync_suspend()
+
 5. Run-time PM Initialization, Device Probing and Removal
 
 Initially, the run-time PM is disabled for all devices, which means that the
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 302db5d..7400d75 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -131,7 +131,7 @@
 point and the way a new platform should be added to the kernel. The
 legacy iSeries platform breaks those rules as it predates this scheme,
 but no new board support will be accepted in the main tree that
-doesn't follows them properly.  In addition, since the advent of the
+doesn't follow them properly.  In addition, since the advent of the
 arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit
 platforms and 32-bit platforms which move into arch/powerpc will be
 required to use these rules as well.
@@ -1025,7 +1025,7 @@
 
 WARNING: This version is still in early development stage; the
 resulting device-tree "blobs" have not yet been validated with the
-kernel. The current generated bloc lacks a useful reserve map (it will
+kernel. The current generated block lacks a useful reserve map (it will
 be fixed to generate an empty one, it's up to the bootloader to fill
 it up) among others. The error handling needs work, bugs are lurking,
 etc...
@@ -1098,7 +1098,7 @@
                                  * an arbitrary array of bytes
                                  */
 
-  childnode@addresss {	/* define a child node named "childnode"
+  childnode@address {	/* define a child node named "childnode"
                                  * whose unit name is "childnode at
 				 * address"
                                  */
diff --git a/Documentation/powerpc/dts-bindings/4xx/cpm.txt b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
new file mode 100644
index 0000000..ee45980
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
@@ -0,0 +1,52 @@
+PPC4xx Clock Power Management (CPM) node
+
+Required properties:
+	- compatible		: compatible list, currently only "ibm,cpm"
+	- dcr-access-method	: "native"
+	- dcr-reg		: < DCR register range >
+
+Optional properties:
+	- er-offset		: All 4xx SoCs with a CPM controller have
+				  one of two different order for the CPM
+				  registers. Some have the CPM registers
+				  in the following order (ER,FR,SR). The
+				  others have them in the following order
+				  (SR,ER,FR). For the second case set
+				  er-offset = <1>.
+	- unused-units		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set to turn off unused
+				  devices.
+	- idle-doze		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set to turn off unused
+				  devices. This is usually just CPM[CPU].
+	- standby		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set on standby and
+				  restored on resume.
+	- suspend		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set on suspend (mem) and
+				  restored on resume. Note, for standby
+				  and suspend the corresponding bits can
+				  be different or the same. Usually for
+				  standby only class 2 and 3 units are set.
+				  However, the interface does not care.
+				  If they are the same, the additional
+				  power saving will be seeing if support
+				  is available to put the DDR in self
+				  refresh mode and any additional power
+				  saving techniques for the specific SoC.
+
+Example:
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x160 0x003>;
+		er-offset = <0>;
+		unused-units = <0x00000100>;
+		idle-doze = <0x02000000>;
+		standby = <0xfeff0000>;
+		suspend = <0xfeff791d>;
+};
diff --git a/Documentation/powerpc/dts-bindings/eeprom.txt b/Documentation/powerpc/dts-bindings/eeprom.txt
new file mode 100644
index 0000000..4342c10
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/eeprom.txt
@@ -0,0 +1,28 @@
+EEPROMs (I2C)
+
+Required properties:
+
+  - compatible : should be "<manufacturer>,<type>"
+		 If there is no specific driver for <manufacturer>, a generic
+		 driver based on <type> is selected. Possible types are:
+		 24c00, 24c01, 24c02, 24c04, 24c08, 24c16, 24c32, 24c64,
+		 24c128, 24c256, 24c512, 24c1024, spd
+
+  - reg : the I2C address of the EEPROM
+
+Optional properties:
+
+  - pagesize : the length of the pagesize for writing. Please consult the
+               manual of your device, that value varies a lot. A wrong value
+	       may result in data loss! If not specified, a safety value of
+	       '1' is used which will be very slow.
+
+  - read-only: this parameterless property disables writes to the eeprom
+
+Example:
+
+eeprom@52 {
+	compatible = "atmel,24c32";
+	reg = <0x52>;
+	pagesize = <32>;
+};
diff --git a/Documentation/pps/pps.txt b/Documentation/pps/pps.txt
index 125f4ab..d35dcdd 100644
--- a/Documentation/pps/pps.txt
+++ b/Documentation/pps/pps.txt
@@ -170,3 +170,49 @@
 
 Please, note that to compile userland programs you need the file timepps.h
 (see Documentation/pps/).
+
+
+Generators
+----------
+
+Sometimes one needs to be able not only to catch PPS signals but to produce
+them also. For example, running a distributed simulation, which requires
+computers' clock to be synchronized very tightly. One way to do this is to
+invent some complicated hardware solutions but it may be neither necessary
+nor affordable. The cheap way is to load a PPS generator on one of the
+computers (master) and PPS clients on others (slaves), and use very simple
+cables to deliver signals using parallel ports, for example.
+
+Parallel port cable pinout:
+pin	name	master      slave
+1	STROBE	  *------     *
+2	D0	  *     |     *
+3	D1	  *     |     *
+4	D2	  *     |     *
+5	D3	  *     |     *
+6	D4	  *     |     *
+7	D5	  *     |     *
+8	D6	  *     |     *
+9	D7	  *     |     *
+10	ACK	  *     ------*
+11	BUSY	  *           *
+12	PE	  *           *
+13	SEL	  *           *
+14	AUTOFD	  *           *
+15	ERROR	  *           *
+16	INIT	  *           *
+17	SELIN	  *           *
+18-25	GND	  *-----------*
+
+Please note that parallel port interrupt occurs only on high->low transition,
+so it is used for PPS assert edge. PPS clear edge can be determined only
+using polling in the interrupt handler which actually can be done way more
+precisely because interrupt handling delays can be quite big and random. So
+current parport PPS generator implementation (pps_gen_parport module) is
+geared towards using the clear edge for time synchronization.
+
+Clear edge polling is done with disabled interrupts so it's better to select
+delay between assert and clear edge as small as possible to reduce system
+latencies. But if it is too small slave won't be able to capture clear edge
+transition. The default of 30us should be good enough in most situations.
+The delay can be selected using 'delay' pps_gen_parport module parameter.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index 3c00c9c..d2651c4 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -3,7 +3,7 @@
 sched-arch.txt
 	- CPU Scheduler implementation hints for architecture specific code.
 sched-design-CFS.txt
-	- goals, design and implementation of the Complete Fair Scheduler.
+	- goals, design and implementation of the Completely Fair Scheduler.
 sched-domains.txt
 	- information on scheduling domains.
 sched-nice-design.txt
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
index 337c924..5e83769 100644
--- a/Documentation/scsi/ChangeLog.lpfc
+++ b/Documentation/scsi/ChangeLog.lpfc
@@ -573,7 +573,7 @@
 	* Backround nodev_timeout processing to DPC This enables us to
 	  unblock (stop dev_loss_tmo) when appopriate.
 	* Fix array discovery with multiple luns.  The max_luns was 0 at
-	  the time the host structure was intialized.  lpfc_cfg_params
+	  the time the host structure was initialized.  lpfc_cfg_params
 	  then set the max_luns to the correct value afterwards.
 	* Remove unused define LPFC_MAX_LUN and set the default value of
 	  lpfc_max_lun parameter to 512.
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 00301ed..b64d10d 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,25 @@
+Release Date    : Tues.  Dec 14, 2010 17:00:00 PST 2010 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 00.00.05.29-rc1
+Old Version     : 00.00.04.31-rc1
+    1. Rename megaraid_sas.c to megaraid_sas_base.c.
+    2. Update GPL headers.
+    3. Add MSI-X support and 'msix_disable' module parameter.
+    4. Use lowest memory bar (for SR-IOV VF support).
+    5. Add struct megasas_instance_temlate changes, and change all code to use
+       new instance entries:
+
+       irqreturn_t (*service_isr )(int irq, void *devp);
+       void (*tasklet)(unsigned long);
+       u32 (*init_adapter)(struct megasas_instance *);
+       u32 (*build_and_issue_cmd) (struct megasas_instance *,
+       struct scsi_cmnd *);
+       void (*issue_dcmd) (struct megasas_instance *instance,
+                              struct megasas_cmd *cmd);
+
+   6. Add code to support MegaRAID 9265/9285 controllers device id (0x5b).
+-------------------------------------------------------------------------------
 1 Release Date    : Thur.  May 03, 2010 09:12:45 PST 2009 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Bo Yang
diff --git a/Documentation/serial/00-INDEX b/Documentation/serial/00-INDEX
index 07dcdb0..e09468a 100644
--- a/Documentation/serial/00-INDEX
+++ b/Documentation/serial/00-INDEX
@@ -14,6 +14,8 @@
 	- notes on using the RISCom/8 multi-port serial driver.
 rocket.txt
 	- info on the Comtrol RocketPort multiport serial driver.
+serial-rs485.txt
+	- info about RS485 structures and support in the kernel.
 specialix.txt
 	- info on hardware/driver for specialix IO8+ multiport serial card.
 stallion.txt
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.txt
new file mode 100644
index 0000000..a493238
--- /dev/null
+++ b/Documentation/serial/serial-rs485.txt
@@ -0,0 +1,120 @@
+                        RS485 SERIAL COMMUNICATIONS
+
+1. INTRODUCTION
+
+   EIA-485, also known as TIA/EIA-485 or RS-485, is a standard defining the
+   electrical characteristics of drivers and receivers for use in balanced
+   digital multipoint systems.
+   This standard is widely used for communications in industrial automation
+   because it can be used effectively over long distances and in electrically
+   noisy environments.
+
+2. HARDWARE-RELATED CONSIDERATIONS
+
+   Some CPUs/UARTs (e.g., Atmel AT91 or 16C950 UART) contain a built-in
+   half-duplex mode capable of automatically controlling line direction by
+   toggling RTS or DTR signals. That can be used to control external
+   half-duplex hardware like an RS485 transceiver or any RS232-connected
+   half-duplex devices like some modems.
+
+   For these microcontrollers, the Linux driver should be made capable of
+   working in both modes, and proper ioctls (see later) should be made
+   available at user-level to allow switching from one mode to the other, and
+   vice versa.
+
+3. DATA STRUCTURES ALREADY AVAILABLE IN THE KERNEL
+
+   The Linux kernel provides the serial_rs485 structure (see [1]) to handle
+   RS485 communications. This data structure is used to set and configure RS485
+   parameters in the platform data and in ioctls.
+
+   Any driver for devices capable of working both as RS232 and RS485 should
+   provide at least the following ioctls:
+
+    - TIOCSRS485 (typically associated with number 0x542F). This ioctl is used
+      to enable/disable RS485 mode from user-space
+
+    - TIOCGRS485 (typically associated with number 0x542E). This ioctl is used
+      to get RS485 mode from kernel-space (i.e., driver) to user-space.
+
+   In other words, the serial driver should contain a code similar to the next
+   one:
+
+	static struct uart_ops atmel_pops = {
+		/* ... */
+		.ioctl		= handle_ioctl,
+	};
+
+	static int handle_ioctl(struct uart_port *port,
+		unsigned int cmd,
+		unsigned long arg)
+	{
+		struct serial_rs485 rs485conf;
+
+		switch (cmd) {
+		case TIOCSRS485:
+			if (copy_from_user(&rs485conf,
+				(struct serial_rs485 *) arg,
+				sizeof(rs485conf)))
+					return -EFAULT;
+
+			/* ... */
+			break;
+
+		case TIOCGRS485:
+			if (copy_to_user((struct serial_rs485 *) arg,
+				...,
+				sizeof(rs485conf)))
+					return -EFAULT;
+			/* ... */
+			break;
+
+		/* ... */
+		}
+	}
+
+
+4. USAGE FROM USER-LEVEL
+
+   From user-level, RS485 configuration can be get/set using the previous
+   ioctls. For instance, to set RS485 you can use the following code:
+
+	#include <linux/serial.h>
+
+	/* Driver-specific ioctls: */
+	#define TIOCGRS485      0x542E
+	#define TIOCSRS485      0x542F
+
+	/* Open your specific device (e.g., /dev/mydevice): */
+	int fd = open ("/dev/mydevice", O_RDWR);
+	if (fd < 0) {
+		/* Error handling. See errno. */
+	}
+
+	struct serial_rs485 rs485conf;
+
+	/* Set RS485 mode: */
+	rs485conf.flags |= SER_RS485_ENABLED;
+
+	/* Set rts delay before send, if needed: */
+	rs485conf.flags |= SER_RS485_RTS_BEFORE_SEND;
+	rs485conf.delay_rts_before_send = ...;
+
+	/* Set rts delay after send, if needed: */
+	rs485conf.flags |= SER_RS485_RTS_AFTER_SEND;
+	rs485conf.delay_rts_after_send = ...;
+
+	if (ioctl (fd, TIOCSRS485, &rs485conf) < 0) {
+		/* Error handling. See errno. */
+	}
+
+	/* Use read() and write() syscalls here... */
+
+	/* Close the device when finished: */
+	if (close (fd) < 0) {
+		/* Error handling. See errno. */
+	}
+
+5. REFERENCES
+
+ [1]	include/linux/serial.h
diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
index 7c90050..540db41 100644
--- a/Documentation/serial/tty.txt
+++ b/Documentation/serial/tty.txt
@@ -107,7 +107,7 @@
 
 dcd_change()	-	Report to the tty line the current DCD pin status
 			changes and the relative timestamp. The timestamp
-			can be NULL.
+			cannot be NULL.
 
 
 Driver Access
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index d0eb696..3c1eddd 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -974,13 +974,6 @@
 
     See hdspm.txt for details.
 
-  Module snd-hifier
-  -----------------
-
-    Module for the MediaTek/TempoTec HiFier Fantasia sound card.
-
-    This module supports autoprobe and multiple cards.
-
   Module snd-ice1712
   ------------------
 
@@ -1531,15 +1524,20 @@
   Module snd-oxygen
   -----------------
 
-    Module for sound cards based on the C-Media CMI8788 chip:
+    Module for sound cards based on the C-Media CMI8786/8787/8788 chip:
     * Asound A-8788
+    * Asus Xonar DG
     * AuzenTech X-Meridian
+    * AuzenTech X-Meridian 2G
     * Bgears b-Enspirer
     * Club3D Theatron DTS
     * HT-Omega Claro (plus)
     * HT-Omega Claro halo (XT)
+    * Kuroutoshikou CMI8787-HG2PCI
     * Razer Barracuda AC-1
     * Sondigo Inferno
+    * TempoTec HiFier Fantasia
+    * TempoTec HiFier Serenade
 
     This module supports autoprobe and multiple cards.
 
@@ -2006,9 +2004,9 @@
   Module snd-virtuoso
   -------------------
 
-    Module for sound cards based on the Asus AV100/AV200 chips,
-    i.e., Xonar D1, DX, D2, D2X, DS, HDAV1.3 (Deluxe), Essence ST
-    (Deluxe) and Essence STX.
+    Module for sound cards based on the Asus AV66/AV100/AV200 chips,
+    i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
+    HDAV1.3 (Deluxe), and HDAV1.3 Slim.
 
     This module supports autoprobe and multiple cards.
 
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 37c6aad..16ae430 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -149,7 +149,6 @@
   acer-aspire-7730g Acer Aspire 7730G
   acer-aspire-8930g Acer Aspire 8930G
   medion	Medion Laptops
-  medion-md2	Medion MD2
   targa-dig	Targa/MSI
   targa-2ch-dig	Targa/MSI with 2-channel
   targa-8ch-dig Targa/MSI with 8-channel (MSI GX620)
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
index 6bb916d..68a4fe3 100644
--- a/Documentation/spi/pxa2xx
+++ b/Documentation/spi/pxa2xx
@@ -19,7 +19,7 @@
 -----------------------------------
 Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
 "platform device".  The master configuration is passed to the driver via a table
-found in arch/arm/mach-pxa/include/mach/pxa2xx_spi.h:
+found in include/linux/spi/pxa2xx_spi.h:
 
 struct pxa2xx_spi_master {
 	enum pxa_ssp_type ssp_type;
@@ -94,7 +94,7 @@
 
 Each slave device attached to the PXA must provide slave specific configuration
 information via the structure "pxa2xx_spi_chip" found in
-"arch/arm/mach-pxa/include/mach/pxa2xx_spi.h".  The pxa2xx_spi master controller driver
+"include/linux/spi/pxa2xx_spi.h".  The pxa2xx_spi master controller driver
 will uses the configuration whenever the driver communicates with the slave
 device. All fields are optional.
 
diff --git a/Documentation/sysctl/00-INDEX b/Documentation/sysctl/00-INDEX
index 1286f45..8cf5d49 100644
--- a/Documentation/sysctl/00-INDEX
+++ b/Documentation/sysctl/00-INDEX
@@ -4,8 +4,6 @@
 	- general information about /proc/sys/ sysctl files.
 abi.txt
 	- documentation for /proc/sys/abi/*.
-ctl_unnumbered.txt
-	- explanation of why one should not add new binary sysctl numbers.
 fs.txt
 	- documentation for /proc/sys/fs/*.
 kernel.txt
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 209e158..11d5ced 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -34,6 +34,7 @@
 - hotplug
 - java-appletviewer           [ binfmt_java, obsolete ]
 - java-interpreter            [ binfmt_java, obsolete ]
+- kptr_restrict
 - kstack_depth_to_print       [ X86 only ]
 - l2cr                        [ PPC only ]
 - modprobe                    ==> Documentation/debugging-modules.txt
@@ -219,7 +220,7 @@
 This toggle indicates whether unprivileged users are prevented from using
 dmesg(8) to view messages from the kernel's log buffer.  When
 dmesg_restrict is set to (0) there are no restrictions.  When
-dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
+dmesg_restrict is set set to (1), users must have CAP_SYSLOG to use
 dmesg(8).
 
 The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
@@ -261,6 +262,19 @@
 
 ==============================================================
 
+kptr_restrict:
+
+This toggle indicates whether restrictions are placed on
+exposing kernel addresses via /proc and other interfaces.  When
+kptr_restrict is set to (0), there are no restrictions.  When
+kptr_restrict is set to (1), the default, kernel pointers
+printed using the %pK format specifier will be replaced with 0's
+unless the user has CAP_SYSLOG.  When kptr_restrict is set to
+(2), kernel pointers printed using %pK will be replaced with 0's
+regardless of privileges.
+
+==============================================================
+
 kstack_depth_to_print: (X86 only)
 
 Controls the number of words to print when dumping the raw
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
new file mode 100755
index 0000000..dbeb8a0
--- /dev/null
+++ b/Documentation/target/tcm_mod_builder.py
@@ -0,0 +1,1094 @@
+#!/usr/bin/python
+# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
+#
+# Copyright (c) 2010 Rising Tide Systems
+# Copyright (c) 2010 Linux-iSCSI.org
+#
+# Author: nab@kernel.org
+#
+import os, sys
+import subprocess as sub
+import string
+import re
+import optparse
+
+tcm_dir = ""
+
+fabric_ops = []
+fabric_mod_dir = ""
+fabric_mod_port = ""
+fabric_mod_init_port = ""
+
+def tcm_mod_err(msg):
+	print msg
+	sys.exit(1)
+
+def tcm_mod_create_module_subdir(fabric_mod_dir_var):
+
+	if os.path.isdir(fabric_mod_dir_var) == True:
+		return 1
+
+	print "Creating fabric_mod_dir: " + fabric_mod_dir_var
+	ret = os.mkdir(fabric_mod_dir_var)
+	if ret:
+		tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
+
+	return
+
+def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION	\"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN	32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* Binary World Wide unique Port Name for FC Initiator Nport */\n"
+	buf += "	u64 nport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for FC Initiator Nport */\n"
+	buf += "	char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* FC lport target portal group tag for TCM */\n"
+	buf += "	u16 lport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_lport */\n"
+	buf += "	struct " + fabric_mod_name + "_lport *lport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_lport {\n"
+	buf += "	/* SCSI protocol the lport is providing */\n"
+	buf += "	u8 lport_proto_id;\n"
+	buf += "	/* Binary World Wide unique Port Name for FC Target Lport */\n"
+	buf += "	u64 lport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for FC Target Lport */\n"
+	buf += "	char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_lport() */\n"
+	buf += "	struct se_wwn lport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "lport"
+	fabric_mod_init_port = "nport"
+
+	return
+
+def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* Binary World Wide unique Port Name for SAS Initiator port */\n"
+	buf += "	u64 iport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for Sas Initiator port */\n"
+	buf += "	char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* SAS port target portal group tag for TCM */\n"
+	buf += "	u16 tport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_tport */\n"
+	buf += "	struct " + fabric_mod_name + "_tport *tport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tport {\n"
+	buf += "	/* SCSI protocol the tport is providing */\n"
+	buf += "	u8 tport_proto_id;\n"
+	buf += "	/* Binary World Wide unique Port Name for SAS Target port */\n"
+	buf += "	u64 tport_wwpn;\n"
+	buf += "	/* ASCII formatted WWPN for SAS Target port */\n"
+	buf += "	char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tport() */\n"
+	buf += "	struct se_wwn tport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "tport"
+	fabric_mod_init_port = "iport"
+
+	return
+
+def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
+	global fabric_mod_port
+	global fabric_mod_init_port
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
+	print "Writing file: " + f
+
+	p = open(f, 'w');
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#define " + fabric_mod_name.upper() + "_VERSION  \"v0.1\"\n"
+	buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
+	buf += "\n"
+	buf += "struct " + fabric_mod_name + "_nacl {\n"
+	buf += "	/* ASCII formatted InitiatorName */\n"
+	buf += "	char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
+	buf += "	struct se_node_acl se_node_acl;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tpg {\n"
+	buf += "	/* iSCSI target portal group tag for TCM */\n"
+	buf += "	u16 tport_tpgt;\n"
+	buf += "	/* Pointer back to " + fabric_mod_name + "_tport */\n"
+	buf += "	struct " + fabric_mod_name + "_tport *tport;\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tpg() */\n"
+	buf += "	struct se_portal_group se_tpg;\n"
+	buf += "};\n\n"
+	buf += "struct " + fabric_mod_name + "_tport {\n"
+	buf += "	/* SCSI protocol the tport is providing */\n"
+	buf += "	u8 tport_proto_id;\n"
+	buf += "	/* ASCII formatted TargetName for IQN */\n"
+	buf += "	char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
+	buf += "	/* Returned by " + fabric_mod_name + "_make_tport() */\n"
+	buf += "	struct se_wwn tport_wwn;\n"
+	buf += "};\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	fabric_mod_port = "tport"
+	fabric_mod_init_port = "iport"
+
+	return
+
+def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
+
+	if proto_ident == "FC":
+		tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
+	elif proto_ident == "SAS":
+		tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
+	elif proto_ident == "iSCSI":
+		tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
+	else:
+		print "Unsupported proto_ident: " + proto_ident
+		sys.exit(1)
+
+	return
+
+def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
+	buf = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
+	print "Writing file: " + f
+
+        p = open(f, 'w');
+        if not p:
+                tcm_mod_err("Unable to open file: " + f)
+
+	buf = "#include <linux/module.h>\n"
+	buf += "#include <linux/moduleparam.h>\n"
+	buf += "#include <linux/version.h>\n"
+	buf += "#include <generated/utsrelease.h>\n"
+	buf += "#include <linux/utsname.h>\n"
+	buf += "#include <linux/init.h>\n"
+	buf += "#include <linux/slab.h>\n"
+	buf += "#include <linux/kthread.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/string.h>\n"
+	buf += "#include <linux/configfs.h>\n"
+	buf += "#include <linux/ctype.h>\n"
+	buf += "#include <asm/unaligned.h>\n\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/target_core_transport.h>\n"
+	buf += "#include <target/target_core_fabric_ops.h>\n"
+	buf += "#include <target/target_core_fabric_configfs.h>\n"
+	buf += "#include <target/target_core_fabric_lib.h>\n"
+	buf += "#include <target/target_core_device.h>\n"
+	buf += "#include <target/target_core_tpg.h>\n"
+	buf += "#include <target/target_core_configfs.h>\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/configfs_macros.h>\n\n"
+	buf += "#include <" + fabric_mod_name + "_base.h>\n"
+	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+
+	buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
+	buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
+
+	buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
+	buf += "	struct se_portal_group *se_tpg,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct se_node_acl *se_nacl, *se_nacl_new;\n"
+	buf += "	struct " + fabric_mod_name + "_nacl *nacl;\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	u64 wwpn = 0;\n"
+
+	buf += "	u32 nexus_depth;\n\n"
+	buf += "	/* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
+	buf += "		return ERR_PTR(-EINVAL); */\n"
+	buf += "	se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
+	buf += "	if (!(se_nacl_new))\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
+	buf += "	nexus_depth = 1;\n"
+	buf += "	/*\n"
+	buf += "	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
+	buf += "	 * when converting a NodeACL from demo mode -> explict\n"
+	buf += "	 */\n"
+	buf += "	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
+	buf += "				name, nexus_depth);\n"
+	buf += "	if (IS_ERR(se_nacl)) {\n"
+	buf += "		" + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
+	buf += "		return se_nacl;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
+	buf += "	 */\n"
+	buf += "	nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
+
+	buf += "	/* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
+	buf += "	return se_nacl;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
+	buf += "				struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+	buf += "	kfree(nacl);\n"
+	buf += "}\n\n"
+
+	buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
+	buf += "	struct se_wwn *wwn,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
+	buf += "			struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
+	buf += "	struct " + fabric_mod_name + "_tpg *tpg;\n"
+	buf += "	unsigned long tpgt;\n"
+	buf += "	int ret;\n\n"
+	buf += "	if (strstr(name, \"tpgt_\") != name)\n"
+	buf += "		return ERR_PTR(-EINVAL);\n"
+	buf += "	if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
+	buf += "		return ERR_PTR(-EINVAL);\n\n"
+	buf += "	tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
+	buf += "	if (!(tpg)) {\n"
+	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "	}\n"
+	buf += "	tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
+	buf += "	tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
+	buf += "	ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
+	buf += "				&tpg->se_tpg, (void *)tpg,\n"
+	buf += "				TRANSPORT_TPG_TYPE_NORMAL);\n"
+	buf += "	if (ret < 0) {\n"
+	buf += "		kfree(tpg);\n"
+	buf += "		return NULL;\n"
+	buf += "	}\n"
+	buf += "	return &tpg->se_tpg;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+	buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
+	buf += "	core_tpg_deregister(se_tpg);\n"
+	buf += "	kfree(tpg);\n"
+	buf += "}\n\n"
+
+	buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
+	buf += "	struct target_fabric_configfs *tf,\n"
+	buf += "	struct config_group *group,\n"
+	buf += "	const char *name)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	u64 wwpn = 0;\n\n"
+
+	buf += "	/* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
+	buf += "		return ERR_PTR(-EINVAL); */\n\n"
+	buf += "	" + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
+	buf += "	if (!(" + fabric_mod_port + ")) {\n"
+	buf += "		printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
+	buf += "		return ERR_PTR(-ENOMEM);\n"
+	buf += "	}\n"
+
+	if proto_ident == "FC" or proto_ident == "SAS":
+		buf += "	" + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
+
+	buf += "	/* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
+	buf += "	return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
+	buf += "}\n\n"
+	buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
+	buf += "{\n"
+	buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
+	buf += "				struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
+	buf += "	kfree(" + fabric_mod_port + ");\n"
+	buf += "}\n\n"
+	buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
+	buf += "	struct target_fabric_configfs *tf,\n"
+	buf += "	char *page)\n"
+	buf += "{\n"
+	buf += "	return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
+	buf += "		\"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
+	buf += "		utsname()->machine);\n"
+	buf += "}\n\n"
+	buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
+	buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
+	buf += "	&" + fabric_mod_name + "_wwn_version.attr,\n"
+	buf += "	NULL,\n"
+	buf += "};\n\n"
+
+	buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
+	buf += "	.get_fabric_name		= " + fabric_mod_name + "_get_fabric_name,\n"
+	buf += "	.get_fabric_proto_ident		= " + fabric_mod_name + "_get_fabric_proto_ident,\n"
+	buf += "	.tpg_get_wwn			= " + fabric_mod_name + "_get_fabric_wwn,\n"
+	buf += "	.tpg_get_tag			= " + fabric_mod_name + "_get_tag,\n"
+	buf += "	.tpg_get_default_depth		= " + fabric_mod_name + "_get_default_depth,\n"
+	buf += "	.tpg_get_pr_transport_id	= " + fabric_mod_name + "_get_pr_transport_id,\n"
+	buf += "	.tpg_get_pr_transport_id_len	= " + fabric_mod_name + "_get_pr_transport_id_len,\n"
+	buf += "	.tpg_parse_pr_out_transport_id	= " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
+	buf += "	.tpg_check_demo_mode		= " + fabric_mod_name + "_check_false,\n"
+	buf += "	.tpg_check_demo_mode_cache	= " + fabric_mod_name + "_check_true,\n"
+	buf += "	.tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
+	buf += "	.tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
+	buf += "	.tpg_alloc_fabric_acl		= " + fabric_mod_name + "_alloc_fabric_acl,\n"
+	buf += "	.tpg_release_fabric_acl		= " + fabric_mod_name + "_release_fabric_acl,\n"
+	buf += "	.tpg_get_inst_index		= " + fabric_mod_name + "_tpg_get_inst_index,\n"
+	buf += "	.release_cmd_to_pool		= " + fabric_mod_name + "_release_cmd,\n"
+	buf += "	.release_cmd_direct		= " + fabric_mod_name + "_release_cmd,\n"
+	buf += "	.shutdown_session		= " + fabric_mod_name + "_shutdown_session,\n"
+	buf += "	.close_session			= " + fabric_mod_name + "_close_session,\n"
+	buf += "	.stop_session			= " + fabric_mod_name + "_stop_session,\n"
+	buf += "	.fall_back_to_erl0		= " + fabric_mod_name + "_reset_nexus,\n"
+	buf += "	.sess_logged_in			= " + fabric_mod_name + "_sess_logged_in,\n"
+	buf += "	.sess_get_index			= " + fabric_mod_name + "_sess_get_index,\n"
+	buf += "	.sess_get_initiator_sid		= NULL,\n"
+	buf += "	.write_pending			= " + fabric_mod_name + "_write_pending,\n"
+	buf += "	.write_pending_status		= " + fabric_mod_name + "_write_pending_status,\n"
+	buf += "	.set_default_node_attributes	= " + fabric_mod_name + "_set_default_node_attrs,\n"
+	buf += "	.get_task_tag			= " + fabric_mod_name + "_get_task_tag,\n"
+	buf += "	.get_cmd_state			= " + fabric_mod_name + "_get_cmd_state,\n"
+	buf += "	.new_cmd_failure		= " + fabric_mod_name + "_new_cmd_failure,\n"
+	buf += "	.queue_data_in			= " + fabric_mod_name + "_queue_data_in,\n"
+	buf += "	.queue_status			= " + fabric_mod_name + "_queue_status,\n"
+	buf += "	.queue_tm_rsp			= " + fabric_mod_name + "_queue_tm_rsp,\n"
+	buf += "	.get_fabric_sense_len		= " + fabric_mod_name + "_get_fabric_sense_len,\n"
+	buf += "	.set_fabric_sense_len		= " + fabric_mod_name + "_set_fabric_sense_len,\n"
+	buf += "	.is_state_remove		= " + fabric_mod_name + "_is_state_remove,\n"
+	buf += "	.pack_lun			= " + fabric_mod_name + "_pack_lun,\n"
+	buf += "	/*\n"
+	buf += "	 * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
+	buf += "	 */\n"
+	buf += "	.fabric_make_wwn		= " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
+	buf += "	.fabric_drop_wwn		= " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
+	buf += "	.fabric_make_tpg		= " + fabric_mod_name + "_make_tpg,\n"
+	buf += "	.fabric_drop_tpg		= " + fabric_mod_name + "_drop_tpg,\n"
+	buf += "	.fabric_post_link		= NULL,\n"
+	buf += "	.fabric_pre_unlink		= NULL,\n"
+	buf += "	.fabric_make_np			= NULL,\n"
+	buf += "	.fabric_drop_np			= NULL,\n"
+	buf += "	.fabric_make_nodeacl		= " + fabric_mod_name + "_make_nodeacl,\n"
+	buf += "	.fabric_drop_nodeacl		= " + fabric_mod_name + "_drop_nodeacl,\n"
+	buf += "};\n\n"
+
+	buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
+	buf += "{\n"
+	buf += "	struct target_fabric_configfs *fabric;\n"
+	buf += "	int ret;\n\n"
+	buf += "	printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
+	buf += "		\" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
+	buf += "		utsname()->machine);\n"
+	buf += "	/*\n"
+	buf += "	 * Register the top level struct config_item_type with TCM core\n"
+	buf += "	 */\n"
+	buf += "	fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+	buf += "	if (!(fabric)) {\n"
+	buf += "		printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
+	buf += "		return -ENOMEM;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
+	buf += "	 */\n"
+	buf += "	fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
+	buf += "	/*\n"
+	buf += "	 * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
+	buf += "	 */\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
+	buf += "	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
+	buf += "	/*\n"
+	buf += "	 * Register the fabric for use within TCM\n"
+	buf += "	 */\n"
+	buf += "	ret = target_fabric_configfs_register(fabric);\n"
+	buf += "	if (ret < 0) {\n"
+	buf += "		printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
+	buf += "				\" for " + fabric_mod_name.upper() + "\\n\");\n"
+	buf += "		return ret;\n"
+	buf += "	}\n"
+	buf += "	/*\n"
+	buf += "	 * Setup our local pointer to *fabric\n"
+	buf += "	 */\n"
+	buf += "	" + fabric_mod_name + "_fabric_configfs = fabric;\n"
+	buf += "	printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
+	buf += "	return 0;\n"
+	buf += "};\n\n"
+	buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
+	buf += "{\n"
+	buf += "	if (!(" + fabric_mod_name + "_fabric_configfs))\n"
+	buf += "		return;\n\n"
+	buf += "	target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
+	buf += "	" + fabric_mod_name + "_fabric_configfs = NULL;\n"
+	buf += "	printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
+	buf += "};\n\n"
+
+	buf += "static int __init " + fabric_mod_name + "_init(void)\n"
+	buf += "{\n"
+	buf += "	int ret;\n\n"
+	buf += "	ret = " + fabric_mod_name + "_register_configfs();\n"
+	buf += "	if (ret < 0)\n"
+	buf += "		return ret;\n\n"
+	buf += "	return 0;\n"
+	buf += "};\n\n"
+	buf += "static void " + fabric_mod_name + "_exit(void)\n"
+	buf += "{\n"
+	buf += "	" + fabric_mod_name + "_deregister_configfs();\n"
+	buf += "};\n\n"
+
+	buf += "#ifdef MODULE\n"
+	buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
+	buf += "MODULE_LICENSE(\"GPL\");\n"
+	buf += "module_init(" + fabric_mod_name + "_init);\n"
+	buf += "module_exit(" + fabric_mod_name + "_exit);\n"
+	buf += "#endif\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	return
+
+def tcm_mod_scan_fabric_ops(tcm_dir):
+
+	fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
+
+	print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
+	process_fo = 0;
+
+	p = open(fabric_ops_api, 'r')
+
+	line = p.readline()
+	while line:
+		if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
+			line = p.readline()
+			continue
+
+		if process_fo == 0:
+			process_fo = 1;
+			line = p.readline()
+			# Search for function pointer
+			if not re.search('\(\*', line):
+				continue
+
+			fabric_ops.append(line.rstrip())
+			continue
+
+		line = p.readline()
+		# Search for function pointer
+		if not re.search('\(\*', line):
+			continue
+
+		fabric_ops.append(line.rstrip())
+
+	p.close()
+	return
+
+def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
+	buf = ""
+	bufi = ""
+
+	f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
+	print "Writing file: " + fi
+
+	pi = open(fi, 'w')
+	if not pi:
+		tcm_mod_err("Unable to open file: " + fi)
+
+	buf = "#include <linux/slab.h>\n"
+	buf += "#include <linux/kthread.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/list.h>\n"
+	buf += "#include <linux/types.h>\n"
+	buf += "#include <linux/string.h>\n"
+	buf += "#include <linux/ctype.h>\n"
+	buf += "#include <asm/unaligned.h>\n"
+	buf += "#include <scsi/scsi.h>\n"
+	buf += "#include <scsi/scsi_host.h>\n"
+	buf += "#include <scsi/scsi_device.h>\n"
+	buf += "#include <scsi/scsi_cmnd.h>\n"
+	buf += "#include <scsi/libfc.h>\n\n"
+	buf += "#include <target/target_core_base.h>\n"
+	buf += "#include <target/target_core_transport.h>\n"
+	buf += "#include <target/target_core_fabric_ops.h>\n"
+	buf += "#include <target/target_core_fabric_lib.h>\n"
+	buf += "#include <target/target_core_device.h>\n"
+	buf += "#include <target/target_core_tpg.h>\n"
+	buf += "#include <target/target_core_configfs.h>\n"
+	buf += "#include <" + fabric_mod_name + "_base.h>\n"
+	buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
+
+	buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	return 1;\n"
+	buf += "}\n\n"
+	bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
+
+	buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
+	buf += "{\n"
+	buf += "	return 0;\n"
+	buf += "}\n\n"
+	bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
+
+	total_fabric_ops = len(fabric_ops)
+	i = 0
+
+	while i < total_fabric_ops:
+		fo = fabric_ops[i]
+		i += 1
+#		print "fabric_ops: " + fo
+
+		if re.search('get_fabric_name', fo):
+			buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
+			buf += "{\n"
+			buf += "	return \"" + fabric_mod_name[4:] + "\";\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
+			continue
+
+		if re.search('get_fabric_proto_ident', fo):
+			buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	u8 proto_id;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
+				buf += "		break;\n"
+
+			buf += "	}\n\n"
+			buf += "	return proto_id;\n"
+			buf += "}\n\n"
+			bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
+
+		if re.search('get_wwn', fo):
+			buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
+			buf += "	return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
+
+		if re.search('get_tag', fo):
+			buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	return tpg->" + fabric_mod_port + "_tpgt;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
+
+		if re.search('get_default_depth', fo):
+			buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	return 1;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
+
+		if re.search('get_pr_transport_id\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl,\n"
+			buf += "	struct t10_pr_registration *pr_reg,\n"
+			buf += "	int *format_code,\n"
+			buf += "	unsigned char *buf)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	int ret = 0;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code, buf);\n"
+				buf += "		break;\n"
+
+			buf += "	}\n\n"
+			buf += "	return ret;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
+			bufi += "			struct se_node_acl *, struct t10_pr_registration *,\n"
+			bufi += "			int *, unsigned char *);\n"
+
+		if re.search('get_pr_transport_id_len\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl,\n"
+			buf += "	struct t10_pr_registration *pr_reg,\n"
+			buf += "	int *format_code)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	int ret = 0;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
+				buf += "					format_code);\n"
+				buf += "		break;\n"
+
+
+			buf += "	}\n\n"
+			buf += "	return ret;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
+			bufi += "			struct se_node_acl *, struct t10_pr_registration *,\n"
+			bufi += "			int *);\n"
+
+		if re.search('parse_pr_out_transport_id\)\(', fo):
+			buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	const char *buf,\n"
+			buf += "	u32 *out_tid_len,\n"
+			buf += "	char **port_nexus_ptr)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
+			buf += "				struct " + fabric_mod_name + "_tpg, se_tpg);\n"
+			buf += "	struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
+			buf += "	char *tid = NULL;\n\n"
+			buf += "	switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
+			if proto_ident == "FC":
+				buf += "	case SCSI_PROTOCOL_FCP:\n"
+				buf += "	default:\n"
+				buf += "		tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+			elif proto_ident == "SAS":
+				buf += "	case SCSI_PROTOCOL_SAS:\n"
+				buf += "	default:\n"
+				buf += "		tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+			elif proto_ident == "iSCSI":
+				buf += "	case SCSI_PROTOCOL_ISCSI:\n"
+				buf += "	default:\n"
+				buf += "		tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
+				buf += "					port_nexus_ptr);\n"
+
+			buf += "	}\n\n"
+			buf += "	return tid;\n"
+			buf += "}\n\n"
+			bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
+			bufi +=	"			const char *, u32 *, char **);\n"
+
+		if re.search('alloc_fabric_acl\)\(', fo):
+			buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_nacl *nacl;\n\n"
+			buf += "	nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
+			buf += "	if (!(nacl)) {\n"
+			buf += "		printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
+			buf += "		return NULL;\n"
+			buf += "	}\n\n"
+			buf += "	return &nacl->se_node_acl;\n"
+			buf += "}\n\n"
+			bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
+
+		if re.search('release_fabric_acl\)\(', fo):
+			buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
+			buf += "	struct se_portal_group *se_tpg,\n"
+			buf += "	struct se_node_acl *se_nacl)\n"
+			buf += "{\n"
+			buf += "	struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
+			buf += "			struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
+			buf += "	kfree(nacl);\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
+			bufi +=	"			struct se_node_acl *);\n"
+
+		if re.search('tpg_get_inst_index\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
+			buf += "{\n"
+			buf += "	return 1;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
+
+		if re.search('release_cmd_to_pool', fo):
+			buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
+
+		if re.search('shutdown_session\)\(', fo):
+			buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
+
+		if re.search('close_session\)\(', fo):
+			buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
+
+		if re.search('stop_session\)\(', fo):
+			buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
+
+		if re.search('fall_back_to_erl0\)\(', fo):
+			buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
+
+		if re.search('sess_logged_in\)\(', fo):
+			buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
+
+		if re.search('sess_get_index\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
+
+		if re.search('write_pending\)\(', fo):
+			buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
+
+		if re.search('write_pending_status\)\(', fo):
+			buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
+
+		if re.search('set_default_node_attributes\)\(', fo):
+			buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
+
+		if re.search('get_task_tag\)\(', fo):
+			buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
+
+		if re.search('get_cmd_state\)\(', fo):
+			buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
+
+		if re.search('new_cmd_failure\)\(', fo):
+			buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return;\n"
+			buf += "}\n\n"
+			bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
+
+		if re.search('queue_data_in\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
+
+		if re.search('queue_status\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
+
+		if re.search('queue_tm_rsp\)\(', fo):
+			buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+
+		if re.search('get_fabric_sense_len\)\(', fo):
+			buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
+
+		if re.search('set_fabric_sense_len\)\(', fo):
+			buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
+
+		if re.search('is_state_remove\)\(', fo):
+			buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+			buf += "{\n"
+			buf += "	return 0;\n"
+			buf += "}\n\n"
+			bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
+
+		if re.search('pack_lun\)\(', fo):
+			buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
+			buf += "{\n"
+			buf += "	WARN_ON(lun >= 256);\n"
+			buf += "	/* Caller wants this byte-swapped */\n"
+			buf += "	return cpu_to_le64((lun & 0xff) << 8);\n"
+			buf += "}\n\n"
+			bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
+
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+
+	ret = pi.write(bufi)
+	if ret:
+		tcm_mod_err("Unable to write fi: " + fi)
+
+	pi.close()
+	return
+
+def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
+
+	buf = ""
+	f = fabric_mod_dir_var + "/Kbuild"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n"
+	buf += fabric_mod_name + "-objs			:= " + fabric_mod_name + "_fabric.o \\\n"
+	buf += "					   " + fabric_mod_name + "_configfs.o\n"
+	buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ")		+= " + fabric_mod_name + ".o\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+	return
+
+def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
+
+	buf = ""
+	f = fabric_mod_dir_var + "/Kconfig"
+	print "Writing file: " + f
+
+	p = open(f, 'w')
+	if not p:
+		tcm_mod_err("Unable to open file: " + f)
+
+	buf = "config " + fabric_mod_name.upper() + "\n"
+	buf += "	tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
+	buf += "	depends on TARGET_CORE && CONFIGFS_FS\n"
+	buf += "	default n\n"
+	buf += "	---help---\n"
+	buf += "	Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
+
+	ret = p.write(buf)
+	if ret:
+		tcm_mod_err("Unable to write f: " + f)
+
+	p.close()
+	return
+
+def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
+	buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ")	+= " + fabric_mod_name.lower() + "/\n"
+	kbuild = tcm_dir + "/drivers/target/Kbuild"
+
+	f = open(kbuild, 'a')
+	f.write(buf)
+	f.close()
+	return
+
+def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
+	buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
+	kconfig = tcm_dir + "/drivers/target/Kconfig"
+
+	f = open(kconfig, 'a')
+	f.write(buf)
+	f.close()
+	return
+
+def main(modname, proto_ident):
+#	proto_ident = "FC"
+#	proto_ident = "SAS"
+#	proto_ident = "iSCSI"
+
+	tcm_dir = os.getcwd();
+	tcm_dir += "/../../"
+	print "tcm_dir: " + tcm_dir
+	fabric_mod_name = modname
+	fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
+	print "Set fabric_mod_name: " + fabric_mod_name
+	print "Set fabric_mod_dir: " + fabric_mod_dir
+	print "Using proto_ident: " + proto_ident
+
+	if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
+		print "Unsupported proto_ident: " + proto_ident
+		sys.exit(1)
+
+	ret = tcm_mod_create_module_subdir(fabric_mod_dir)
+	if ret:
+		print "tcm_mod_create_module_subdir() failed because module already exists!"
+		sys.exit(1)
+
+	tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_scan_fabric_ops(tcm_dir)
+	tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
+	tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
+
+	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ")
+	if input == "yes" or input == "y":
+		tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
+
+	input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+	if input == "yes" or input == "y":
+		tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
+
+	return
+
+parser = optparse.OptionParser()
+parser.add_option('-m', '--modulename', help='Module name', dest='modname',
+		action='store', nargs=1, type='string')
+parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
+		action='store', nargs=1, type='string')
+
+(opts, args) = parser.parse_args()
+
+mandatories = ['modname', 'protoident']
+for m in mandatories:
+	if not opts.__dict__[m]:
+		print "mandatory option is missing\n"
+		parser.print_help()
+		exit(-1)
+
+if __name__ == "__main__":
+
+	main(str(opts.modname), opts.protoident)
diff --git a/Documentation/target/tcm_mod_builder.txt b/Documentation/target/tcm_mod_builder.txt
new file mode 100644
index 0000000..84533d8
--- /dev/null
+++ b/Documentation/target/tcm_mod_builder.txt
@@ -0,0 +1,145 @@
+>>>>>>>>>> The TCM v4 fabric module script generator <<<<<<<<<<
+
+Greetings all,
+
+This document is intended to be a mini-HOWTO for using the tcm_mod_builder.py
+script to generate a brand new functional TCM v4 fabric .ko module of your very own,
+that once built can be immediately be loaded to start access the new TCM/ConfigFS
+fabric skeleton, by simply using:
+
+	modprobe $TCM_NEW_MOD
+	mkdir -p /sys/kernel/config/target/$TCM_NEW_MOD
+
+This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following
+
+	*) Generate new API callers for drivers/target/target_core_fabric_configs.c logic
+	   ->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg()
+	   ->make_wwn(), ->drop_wwn().  These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c
+	*) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module
+	   using a skeleton struct target_core_fabric_ops API template.
+	*) Based on user defined T10 Proto_Ident for the new fabric module being built,
+	   the TransportID / Initiator and Target WWPN related handlers for
+	   SPC-3 persistent reservation are automatically generated in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
+	   using drivers/target/target_core_fabric_lib.c logic.
+	*) NOP API calls for all other Data I/O path and fabric dependent attribute logic
+	   in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
+
+tcm_mod_builder.py depends upon the mandatory '-p $PROTO_IDENT' and '-m
+$FABRIC_MOD_name' parameters, and actually running the script looks like:
+
+target:/mnt/sdb/lio-core-2.6.git/Documentation/target# python tcm_mod_builder.py -p iSCSI -m tcm_nab5000
+tcm_dir: /mnt/sdb/lio-core-2.6.git/Documentation/target/../../
+Set fabric_mod_name: tcm_nab5000
+Set fabric_mod_dir:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
+Using proto_ident: iSCSI
+Creating fabric_mod_dir:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_base.h
+Using tcm_mod_scan_fabric_ops:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../include/target/target_core_fabric_ops.h
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.c
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.h
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_configfs.c
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kbuild
+Writing file:
+/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kconfig
+Would you like to add tcm_nab5000to drivers/target/Kbuild..? [yes,no]: yes
+Would you like to add tcm_nab5000to drivers/target/Kconfig..? [yes,no]: yes
+
+At the end of tcm_mod_builder.py. the script will ask to add the following
+line to drivers/target/Kbuild:
+
+	obj-$(CONFIG_TCM_NAB5000)       += tcm_nab5000/
+
+and the same for drivers/target/Kconfig:
+
+	source "drivers/target/tcm_nab5000/Kconfig"
+
+*) Run 'make menuconfig' and select the new CONFIG_TCM_NAB5000 item:
+
+	<M>   TCM_NAB5000 fabric module
+
+*) Build using 'make modules', once completed you will have:
+
+target:/mnt/sdb/lio-core-2.6.git# ls -la drivers/target/tcm_nab5000/
+total 1348
+drwxr-xr-x 2 root root   4096 2010-10-05 03:23 .
+drwxr-xr-x 9 root root   4096 2010-10-05 03:22 ..
+-rw-r--r-- 1 root root    282 2010-10-05 03:22 Kbuild
+-rw-r--r-- 1 root root    171 2010-10-05 03:22 Kconfig
+-rw-r--r-- 1 root root     49 2010-10-05 03:23 modules.order
+-rw-r--r-- 1 root root    738 2010-10-05 03:22 tcm_nab5000_base.h
+-rw-r--r-- 1 root root   9096 2010-10-05 03:22 tcm_nab5000_configfs.c
+-rw-r--r-- 1 root root 191200 2010-10-05 03:23 tcm_nab5000_configfs.o
+-rw-r--r-- 1 root root  40504 2010-10-05 03:23 .tcm_nab5000_configfs.o.cmd
+-rw-r--r-- 1 root root   5414 2010-10-05 03:22 tcm_nab5000_fabric.c
+-rw-r--r-- 1 root root   2016 2010-10-05 03:22 tcm_nab5000_fabric.h
+-rw-r--r-- 1 root root 190932 2010-10-05 03:23 tcm_nab5000_fabric.o
+-rw-r--r-- 1 root root  40713 2010-10-05 03:23 .tcm_nab5000_fabric.o.cmd
+-rw-r--r-- 1 root root 401861 2010-10-05 03:23 tcm_nab5000.ko
+-rw-r--r-- 1 root root    265 2010-10-05 03:23 .tcm_nab5000.ko.cmd
+-rw-r--r-- 1 root root    459 2010-10-05 03:23 tcm_nab5000.mod.c
+-rw-r--r-- 1 root root  23896 2010-10-05 03:23 tcm_nab5000.mod.o
+-rw-r--r-- 1 root root  22655 2010-10-05 03:23 .tcm_nab5000.mod.o.cmd
+-rw-r--r-- 1 root root 379022 2010-10-05 03:23 tcm_nab5000.o
+-rw-r--r-- 1 root root    211 2010-10-05 03:23 .tcm_nab5000.o.cmd
+
+*) Load the new module, create a lun_0 configfs group, and add new TCM Core
+   IBLOCK backstore symlink to port:
+
+target:/mnt/sdb/lio-core-2.6.git# insmod drivers/target/tcm_nab5000.ko
+target:/mnt/sdb/lio-core-2.6.git# mkdir -p /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0
+target:/mnt/sdb/lio-core-2.6.git# cd /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0/
+target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# ln -s /sys/kernel/config/target/core/iblock_0/lvm_test0 nab5000_port
+
+target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# cd -
+target:/mnt/sdb/lio-core-2.6.git# tree /sys/kernel/config/target/nab5000/
+/sys/kernel/config/target/nab5000/
+|-- discovery_auth
+|-- iqn.foo
+|   `-- tpgt_1
+|       |-- acls
+|       |-- attrib
+|       |-- lun
+|       |   `-- lun_0
+|       |       |-- alua_tg_pt_gp
+|       |       |-- alua_tg_pt_offline
+|       |       |-- alua_tg_pt_status
+|       |       |-- alua_tg_pt_write_md
+|	|	`-- nab5000_port -> ../../../../../../target/core/iblock_0/lvm_test0
+|       |-- np
+|       `-- param
+`-- version
+
+target:/mnt/sdb/lio-core-2.6.git# lsmod
+Module                  Size  Used by
+tcm_nab5000             3935  4
+iscsi_target_mod      193211  0
+target_core_stgt        8090  0
+target_core_pscsi      11122  1
+target_core_file        9172  2
+target_core_iblock      9280  1
+target_core_mod       228575  31
+tcm_nab5000,iscsi_target_mod,target_core_stgt,target_core_pscsi,target_core_file,target_core_iblock
+libfc                  73681  0
+scsi_debug             56265  0
+scsi_tgt                8666  1 target_core_stgt
+configfs               20644  2 target_core_mod
+
+----------------------------------------------------------------------
+
+Future TODO items:
+
+	*) Add more T10 proto_idents
+	*) Make tcm_mod_dump_fabric_ops() smarter and generate function pointer
+	   defs directly from include/target/target_core_fabric_ops.h:struct target_core_fabric_ops
+	   structure members.
+
+October 5th, 2010
+Nicholas A. Bellinger <nab@linux-iscsi.org>
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index cb3d15b..b61e46f 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -278,3 +278,15 @@
     |---name:			acpitz
     |---temp1_input:		37000
     |---temp1_crit:		100000
+
+4. Event Notification
+
+The framework includes a simple notification mechanism, in the form of a
+netlink event. Netlink socket initialization is done during the _init_
+of the framework. Drivers which intend to use the notification mechanism
+just need to call generate_netlink_event() with two arguments viz
+(originator, event). Typically the originator will be an integer assigned
+to a thermal_zone_device when it registers itself with the framework. The
+event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
+THERMAL_DEV_FAULT}. Notification can be sent when the current temperature
+crosses any of the configured thresholds.
diff --git a/Documentation/timers/timer_stats.txt b/Documentation/timers/timer_stats.txt
index 9bd00fc..8abd40b 100644
--- a/Documentation/timers/timer_stats.txt
+++ b/Documentation/timers/timer_stats.txt
@@ -19,7 +19,7 @@
 
 - the pid of the task(process) which initialized the timer
 - the name of the process which initialized the timer
-- the function where the timer was intialized
+- the function where the timer was initialized
 - the callback function which is associated to the timer
 - the number of events (callbacks)
 
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 09bd8e9..b510564 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -125,7 +125,7 @@
 For example, here's the information displayed for the 'sched_wakeup'
 event:
 
-# cat /debug/tracing/events/sched/sched_wakeup/format
+# cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/format
 
 name: sched_wakeup
 ID: 60
@@ -201,19 +201,19 @@
 
 For example:
 
-# cd /debug/tracing/events/sched/sched_wakeup
+# cd /sys/kernel/debug/tracing/events/sched/sched_wakeup
 # echo "common_preempt_count > 4" > filter
 
 A slightly more involved example:
 
-# cd /debug/tracing/events/sched/sched_signal_send
+# cd /sys/kernel/debug/tracing/events/signal/signal_generate
 # echo "((sig >= 10 && sig < 15) || sig == 17) && comm != bash" > filter
 
 If there is an error in the expression, you'll get an 'Invalid
 argument' error when setting it, and the erroneous string along with
 an error message can be seen by looking at the filter e.g.:
 
-# cd /debug/tracing/events/sched/sched_signal_send
+# cd /sys/kernel/debug/tracing/events/signal/signal_generate
 # echo "((sig >= 10 && sig < 15) || dsig == 17) && comm != bash" > filter
 -bash: echo: write error: Invalid argument
 # cat filter
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index b29d8e5..c9ffa9c 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
 
 		 Alan Stern <stern@rowland.harvard.edu>
 
-			    December 11, 2009
+			    October 28, 2010
 
 
 
@@ -107,9 +107,14 @@
 The user interface for controlling dynamic PM is located in the power/
 subdirectory of each USB device's sysfs directory, that is, in
 /sys/bus/usb/devices/.../power/ where "..." is the device's ID.  The
-relevant attribute files are: wakeup, control, and autosuspend.
-(There may also be a file named "level"; this file was deprecated
-as of the 2.6.35 kernel and replaced by the "control" file.)
+relevant attribute files are: wakeup, control, and
+autosuspend_delay_ms.  (There may also be a file named "level"; this
+file was deprecated as of the 2.6.35 kernel and replaced by the
+"control" file.  In 2.6.38 the "autosuspend" file will be deprecated
+and replaced by the "autosuspend_delay_ms" file.  The only difference
+is that the newer file expresses the delay in milliseconds whereas the
+older file uses seconds.  Confusingly, both files are present in 2.6.37
+but only "autosuspend" works.)
 
 	power/wakeup
 
@@ -140,33 +145,36 @@
 		suspended and autoresume was not allowed.  This
 		setting is no longer supported.)
 
-	power/autosuspend
+	power/autosuspend_delay_ms
 
 		This file contains an integer value, which is the
-		number of seconds the device should remain idle before
-		the kernel will autosuspend it (the idle-delay time).
-		The default is 2.  0 means to autosuspend as soon as
-		the device becomes idle, and negative values mean
-		never to autosuspend.  You can write a number to the
-		file to change the autosuspend idle-delay time.
+		number of milliseconds the device should remain idle
+		before the kernel will autosuspend it (the idle-delay
+		time).  The default is 2000.  0 means to autosuspend
+		as soon as the device becomes idle, and negative
+		values mean never to autosuspend.  You can write a
+		number to the file to change the autosuspend
+		idle-delay time.
 
-Writing "-1" to power/autosuspend and writing "on" to power/control do
-essentially the same thing -- they both prevent the device from being
-autosuspended.  Yes, this is a redundancy in the API.
+Writing "-1" to power/autosuspend_delay_ms and writing "on" to
+power/control do essentially the same thing -- they both prevent the
+device from being autosuspended.  Yes, this is a redundancy in the
+API.
 
 (In 2.6.21 writing "0" to power/autosuspend would prevent the device
 from being autosuspended; the behavior was changed in 2.6.22.  The
 power/autosuspend attribute did not exist prior to 2.6.21, and the
 power/level attribute did not exist prior to 2.6.22.  power/control
-was added in 2.6.34.)
+was added in 2.6.34, and power/autosuspend_delay_ms was added in
+2.6.37 but did not become functional until 2.6.38.)
 
 
 	Changing the default idle-delay time
 	------------------------------------
 
-The default autosuspend idle-delay time is controlled by a module
-parameter in usbcore.  You can specify the value when usbcore is
-loaded.  For example, to set it to 5 seconds instead of 2 you would
+The default autosuspend idle-delay time (in seconds) is controlled by
+a module parameter in usbcore.  You can specify the value when usbcore
+is loaded.  For example, to set it to 5 seconds instead of 2 you would
 do:
 
 	modprobe usbcore autosuspend=5
@@ -234,25 +242,23 @@
 
 If a driver knows that its device has proper suspend/resume support,
 it can enable autosuspend all by itself.  For example, the video
-driver for a laptop's webcam might do this, since these devices are
-rarely used and so should normally be autosuspended.
+driver for a laptop's webcam might do this (in recent kernels they
+do), since these devices are rarely used and so should normally be
+autosuspended.
 
 Sometimes it turns out that even when a device does work okay with
-autosuspend there are still problems.  For example, there are
-experimental patches adding autosuspend support to the usbhid driver,
-which manages keyboards and mice, among other things.  Tests with a
-number of keyboards showed that typing on a suspended keyboard, while
-causing the keyboard to do a remote wakeup all right, would
-nonetheless frequently result in lost keystrokes.  Tests with mice
-showed that some of them would issue a remote-wakeup request in
-response to button presses but not to motion, and some in response to
-neither.
+autosuspend there are still problems.  For example, the usbhid driver,
+which manages keyboards and mice, has autosuspend support.  Tests with
+a number of keyboards show that typing on a suspended keyboard, while
+causing the keyboard to do a remote wakeup all right, will nonetheless
+frequently result in lost keystrokes.  Tests with mice show that some
+of them will issue a remote-wakeup request in response to button
+presses but not to motion, and some in response to neither.
 
 The kernel will not prevent you from enabling autosuspend on devices
 that can't handle it.  It is even possible in theory to damage a
-device by suspending it at the wrong time -- for example, suspending a
-USB hard disk might cause it to spin down without parking the heads.
-(Highly unlikely, but possible.)  Take care.
+device by suspending it at the wrong time.  (Highly unlikely, but
+possible.)  Take care.
 
 
 	The driver interface for Power Management
@@ -336,10 +342,6 @@
 then the interface is considered to be idle, and the kernel may
 autosuspend the device.
 
-(There is a similar usage counter field in struct usb_device,
-associated with the device itself rather than any of its interfaces.
-This counter is used only by the USB core.)
-
 Drivers need not be concerned about balancing changes to the usage
 counter; the USB core will undo any remaining "get"s when a driver
 is unbound from its interface.  As a corollary, drivers must not call
@@ -409,11 +411,11 @@
 autosuspending a keyboard if the user can't cause the keyboard to do a
 remote wakeup by typing on it.  If the driver sets
 intf->needs_remote_wakeup to 1, the kernel won't autosuspend the
-device if remote wakeup isn't available or has been disabled through
-the power/wakeup attribute.  (If the device is already autosuspended,
-though, setting this flag won't cause the kernel to autoresume it.
-Normally a driver would set this flag in its probe method, at which
-time the device is guaranteed not to be autosuspended.)
+device if remote wakeup isn't available.  (If the device is already
+autosuspended, though, setting this flag won't cause the kernel to
+autoresume it.  Normally a driver would set this flag in its probe
+method, at which time the device is guaranteed not to be
+autosuspended.)
 
 If a driver does its I/O asynchronously in interrupt context, it
 should call usb_autopm_get_interface_async() before starting output and
@@ -422,20 +424,19 @@
 
 	usb_mark_last_busy(struct usb_device *udev);
 
-in the event handler.  This sets udev->last_busy to the current time.
-udev->last_busy is the field used for idle-delay calculations;
-updating it will cause any pending autosuspend to be moved back.  Most
-of the usb_autopm_* routines will also set the last_busy field to the
-current time.
+in the event handler.  This tells the PM core that the device was just
+busy and therefore the next autosuspend idle-delay expiration should
+be pushed back.  Many of the usb_autopm_* routines also make this call,
+so drivers need to worry only when interrupt-driven input arrives.
 
 Asynchronous operation is always subject to races.  For example, a
-driver may call one of the usb_autopm_*_interface_async() routines at
-a time when the core has just finished deciding the device has been
-idle for long enough but not yet gotten around to calling the driver's
-suspend method.  The suspend method must be responsible for
-synchronizing with the output request routine and the URB completion
-handler; it should cause autosuspends to fail with -EBUSY if the
-driver needs to use the device.
+driver may call the usb_autopm_get_interface_async() routine at a time
+when the core has just finished deciding the device has been idle for
+long enough but not yet gotten around to calling the driver's suspend
+method.  The suspend method must be responsible for synchronizing with
+the I/O request routine and the URB completion handler; it should
+cause autosuspends to fail with -EBUSY if the driver needs to use the
+device.
 
 External suspend calls should never be allowed to fail in this way,
 only autosuspend calls.  The driver can tell them apart by checking
@@ -472,7 +473,9 @@
 occurs.  Since system suspends are supposed to be as transparent as
 possible, the device should remain suspended following the system
 resume.  But this theory may not work out well in practice; over time
-the kernel's behavior in this regard has changed.
+the kernel's behavior in this regard has changed.  As of 2.6.37 the
+policy is to resume all devices during a system resume and let them
+handle their own runtime suspends afterward.
 
 Secondly, a dynamic power-management event may occur as a system
 suspend is underway.  The window for this is short, since system
diff --git a/Documentation/vm/Makefile b/Documentation/vm/Makefile
index 9dcff32..3fa4d06 100644
--- a/Documentation/vm/Makefile
+++ b/Documentation/vm/Makefile
@@ -2,7 +2,7 @@
 obj- := dummy.o
 
 # List of programs to build
-hostprogs-y := slabinfo page-types hugepage-mmap hugepage-shm map_hugetlb
+hostprogs-y := page-types hugepage-mmap hugepage-shm map_hugetlb
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
deleted file mode 100644
index 92e729f..0000000
--- a/Documentation/vm/slabinfo.c
+++ /dev/null
@@ -1,1364 +0,0 @@
-/*
- * Slabinfo: Tool to get reports about slabs
- *
- * (C) 2007 sgi, Christoph Lameter
- *
- * Compile by:
- *
- * gcc -o slabinfo slabinfo.c
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <dirent.h>
-#include <strings.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <getopt.h>
-#include <regex.h>
-#include <errno.h>
-
-#define MAX_SLABS 500
-#define MAX_ALIASES 500
-#define MAX_NODES 1024
-
-struct slabinfo {
-	char *name;
-	int alias;
-	int refs;
-	int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-	int hwcache_align, object_size, objs_per_slab;
-	int sanity_checks, slab_size, store_user, trace;
-	int order, poison, reclaim_account, red_zone;
-	unsigned long partial, objects, slabs, objects_partial, objects_total;
-	unsigned long alloc_fastpath, alloc_slowpath;
-	unsigned long free_fastpath, free_slowpath;
-	unsigned long free_frozen, free_add_partial, free_remove_partial;
-	unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill;
-	unsigned long cpuslab_flush, deactivate_full, deactivate_empty;
-	unsigned long deactivate_to_head, deactivate_to_tail;
-	unsigned long deactivate_remote_frees, order_fallback;
-	int numa[MAX_NODES];
-	int numa_partial[MAX_NODES];
-} slabinfo[MAX_SLABS];
-
-struct aliasinfo {
-	char *name;
-	char *ref;
-	struct slabinfo *slab;
-} aliasinfo[MAX_ALIASES];
-
-int slabs = 0;
-int actual_slabs = 0;
-int aliases = 0;
-int alias_targets = 0;
-int highest_node = 0;
-
-char buffer[4096];
-
-int show_empty = 0;
-int show_report = 0;
-int show_alias = 0;
-int show_slab = 0;
-int skip_zero = 1;
-int show_numa = 0;
-int show_track = 0;
-int show_first_alias = 0;
-int validate = 0;
-int shrink = 0;
-int show_inverted = 0;
-int show_single_ref = 0;
-int show_totals = 0;
-int sort_size = 0;
-int sort_active = 0;
-int set_debug = 0;
-int show_ops = 0;
-int show_activity = 0;
-
-/* Debug options */
-int sanity = 0;
-int redzone = 0;
-int poison = 0;
-int tracking = 0;
-int tracing = 0;
-
-int page_size;
-
-regex_t pattern;
-
-static void fatal(const char *x, ...)
-{
-	va_list ap;
-
-	va_start(ap, x);
-	vfprintf(stderr, x, ap);
-	va_end(ap);
-	exit(EXIT_FAILURE);
-}
-
-static void usage(void)
-{
-	printf("slabinfo 5/7/2007. (c) 2007 sgi.\n\n"
-		"slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
-		"-a|--aliases           Show aliases\n"
-		"-A|--activity          Most active slabs first\n"
-		"-d<options>|--debug=<options> Set/Clear Debug options\n"
-		"-D|--display-active    Switch line format to activity\n"
-		"-e|--empty             Show empty slabs\n"
-		"-f|--first-alias       Show first alias\n"
-		"-h|--help              Show usage information\n"
-		"-i|--inverted          Inverted list\n"
-		"-l|--slabs             Show slabs\n"
-		"-n|--numa              Show NUMA information\n"
-		"-o|--ops		Show kmem_cache_ops\n"
-		"-s|--shrink            Shrink slabs\n"
-		"-r|--report		Detailed report on single slabs\n"
-		"-S|--Size              Sort by size\n"
-		"-t|--tracking          Show alloc/free information\n"
-		"-T|--Totals            Show summary information\n"
-		"-v|--validate          Validate slabs\n"
-		"-z|--zero              Include empty slabs\n"
-		"-1|--1ref              Single reference\n"
-		"\nValid debug options (FZPUT may be combined)\n"
-		"a / A          Switch on all debug options (=FZUP)\n"
-		"-              Switch off all debug options\n"
-		"f / F          Sanity Checks (SLAB_DEBUG_FREE)\n"
-		"z / Z          Redzoning\n"
-		"p / P          Poisoning\n"
-		"u / U          Tracking\n"
-		"t / T          Tracing\n"
-	);
-}
-
-static unsigned long read_obj(const char *name)
-{
-	FILE *f = fopen(name, "r");
-
-	if (!f)
-		buffer[0] = 0;
-	else {
-		if (!fgets(buffer, sizeof(buffer), f))
-			buffer[0] = 0;
-		fclose(f);
-		if (buffer[strlen(buffer)] == '\n')
-			buffer[strlen(buffer)] = 0;
-	}
-	return strlen(buffer);
-}
-
-
-/*
- * Get the contents of an attribute
- */
-static unsigned long get_obj(const char *name)
-{
-	if (!read_obj(name))
-		return 0;
-
-	return atol(buffer);
-}
-
-static unsigned long get_obj_and_str(const char *name, char **x)
-{
-	unsigned long result = 0;
-	char *p;
-
-	*x = NULL;
-
-	if (!read_obj(name)) {
-		x = NULL;
-		return 0;
-	}
-	result = strtoul(buffer, &p, 10);
-	while (*p == ' ')
-		p++;
-	if (*p)
-		*x = strdup(p);
-	return result;
-}
-
-static void set_obj(struct slabinfo *s, const char *name, int n)
-{
-	char x[100];
-	FILE *f;
-
-	snprintf(x, 100, "%s/%s", s->name, name);
-	f = fopen(x, "w");
-	if (!f)
-		fatal("Cannot write to %s\n", x);
-
-	fprintf(f, "%d\n", n);
-	fclose(f);
-}
-
-static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
-{
-	char x[100];
-	FILE *f;
-	size_t l;
-
-	snprintf(x, 100, "%s/%s", s->name, name);
-	f = fopen(x, "r");
-	if (!f) {
-		buffer[0] = 0;
-		l = 0;
-	} else {
-		l = fread(buffer, 1, sizeof(buffer), f);
-		buffer[l] = 0;
-		fclose(f);
-	}
-	return l;
-}
-
-
-/*
- * Put a size string together
- */
-static int store_size(char *buffer, unsigned long value)
-{
-	unsigned long divisor = 1;
-	char trailer = 0;
-	int n;
-
-	if (value > 1000000000UL) {
-		divisor = 100000000UL;
-		trailer = 'G';
-	} else if (value > 1000000UL) {
-		divisor = 100000UL;
-		trailer = 'M';
-	} else if (value > 1000UL) {
-		divisor = 100;
-		trailer = 'K';
-	}
-
-	value /= divisor;
-	n = sprintf(buffer, "%ld",value);
-	if (trailer) {
-		buffer[n] = trailer;
-		n++;
-		buffer[n] = 0;
-	}
-	if (divisor != 1) {
-		memmove(buffer + n - 2, buffer + n - 3, 4);
-		buffer[n-2] = '.';
-		n++;
-	}
-	return n;
-}
-
-static void decode_numa_list(int *numa, char *t)
-{
-	int node;
-	int nr;
-
-	memset(numa, 0, MAX_NODES * sizeof(int));
-
-	if (!t)
-		return;
-
-	while (*t == 'N') {
-		t++;
-		node = strtoul(t, &t, 10);
-		if (*t == '=') {
-			t++;
-			nr = strtoul(t, &t, 10);
-			numa[node] = nr;
-			if (node > highest_node)
-				highest_node = node;
-		}
-		while (*t == ' ')
-			t++;
-	}
-}
-
-static void slab_validate(struct slabinfo *s)
-{
-	if (strcmp(s->name, "*") == 0)
-		return;
-
-	set_obj(s, "validate", 1);
-}
-
-static void slab_shrink(struct slabinfo *s)
-{
-	if (strcmp(s->name, "*") == 0)
-		return;
-
-	set_obj(s, "shrink", 1);
-}
-
-int line = 0;
-
-static void first_line(void)
-{
-	if (show_activity)
-		printf("Name                   Objects      Alloc       Free   %%Fast Fallb O\n");
-	else
-		printf("Name                   Objects Objsize    Space "
-			"Slabs/Part/Cpu  O/S O %%Fr %%Ef Flg\n");
-}
-
-/*
- * Find the shortest alias of a slab
- */
-static struct aliasinfo *find_one_alias(struct slabinfo *find)
-{
-	struct aliasinfo *a;
-	struct aliasinfo *best = NULL;
-
-	for(a = aliasinfo;a < aliasinfo + aliases; a++) {
-		if (a->slab == find &&
-			(!best || strlen(best->name) < strlen(a->name))) {
-				best = a;
-				if (strncmp(a->name,"kmall", 5) == 0)
-					return best;
-			}
-	}
-	return best;
-}
-
-static unsigned long slab_size(struct slabinfo *s)
-{
-	return 	s->slabs * (page_size << s->order);
-}
-
-static unsigned long slab_activity(struct slabinfo *s)
-{
-	return 	s->alloc_fastpath + s->free_fastpath +
-		s->alloc_slowpath + s->free_slowpath;
-}
-
-static void slab_numa(struct slabinfo *s, int mode)
-{
-	int node;
-
-	if (strcmp(s->name, "*") == 0)
-		return;
-
-	if (!highest_node) {
-		printf("\n%s: No NUMA information available.\n", s->name);
-		return;
-	}
-
-	if (skip_zero && !s->slabs)
-		return;
-
-	if (!line) {
-		printf("\n%-21s:", mode ? "NUMA nodes" : "Slab");
-		for(node = 0; node <= highest_node; node++)
-			printf(" %4d", node);
-		printf("\n----------------------");
-		for(node = 0; node <= highest_node; node++)
-			printf("-----");
-		printf("\n");
-	}
-	printf("%-21s ", mode ? "All slabs" : s->name);
-	for(node = 0; node <= highest_node; node++) {
-		char b[20];
-
-		store_size(b, s->numa[node]);
-		printf(" %4s", b);
-	}
-	printf("\n");
-	if (mode) {
-		printf("%-21s ", "Partial slabs");
-		for(node = 0; node <= highest_node; node++) {
-			char b[20];
-
-			store_size(b, s->numa_partial[node]);
-			printf(" %4s", b);
-		}
-		printf("\n");
-	}
-	line++;
-}
-
-static void show_tracking(struct slabinfo *s)
-{
-	printf("\n%s: Kernel object allocation\n", s->name);
-	printf("-----------------------------------------------------------------------\n");
-	if (read_slab_obj(s, "alloc_calls"))
-		printf(buffer);
-	else
-		printf("No Data\n");
-
-	printf("\n%s: Kernel object freeing\n", s->name);
-	printf("------------------------------------------------------------------------\n");
-	if (read_slab_obj(s, "free_calls"))
-		printf(buffer);
-	else
-		printf("No Data\n");
-
-}
-
-static void ops(struct slabinfo *s)
-{
-	if (strcmp(s->name, "*") == 0)
-		return;
-
-	if (read_slab_obj(s, "ops")) {
-		printf("\n%s: kmem_cache operations\n", s->name);
-		printf("--------------------------------------------\n");
-		printf(buffer);
-	} else
-		printf("\n%s has no kmem_cache operations\n", s->name);
-}
-
-static const char *onoff(int x)
-{
-	if (x)
-		return "On ";
-	return "Off";
-}
-
-static void slab_stats(struct slabinfo *s)
-{
-	unsigned long total_alloc;
-	unsigned long total_free;
-	unsigned long total;
-
-	if (!s->alloc_slab)
-		return;
-
-	total_alloc = s->alloc_fastpath + s->alloc_slowpath;
-	total_free = s->free_fastpath + s->free_slowpath;
-
-	if (!total_alloc)
-		return;
-
-	printf("\n");
-	printf("Slab Perf Counter       Alloc     Free %%Al %%Fr\n");
-	printf("--------------------------------------------------\n");
-	printf("Fastpath             %8lu %8lu %3lu %3lu\n",
-		s->alloc_fastpath, s->free_fastpath,
-		s->alloc_fastpath * 100 / total_alloc,
-		s->free_fastpath * 100 / total_free);
-	printf("Slowpath             %8lu %8lu %3lu %3lu\n",
-		total_alloc - s->alloc_fastpath, s->free_slowpath,
-		(total_alloc - s->alloc_fastpath) * 100 / total_alloc,
-		s->free_slowpath * 100 / total_free);
-	printf("Page Alloc           %8lu %8lu %3lu %3lu\n",
-		s->alloc_slab, s->free_slab,
-		s->alloc_slab * 100 / total_alloc,
-		s->free_slab * 100 / total_free);
-	printf("Add partial          %8lu %8lu %3lu %3lu\n",
-		s->deactivate_to_head + s->deactivate_to_tail,
-		s->free_add_partial,
-		(s->deactivate_to_head + s->deactivate_to_tail) * 100 / total_alloc,
-		s->free_add_partial * 100 / total_free);
-	printf("Remove partial       %8lu %8lu %3lu %3lu\n",
-		s->alloc_from_partial, s->free_remove_partial,
-		s->alloc_from_partial * 100 / total_alloc,
-		s->free_remove_partial * 100 / total_free);
-
-	printf("RemoteObj/SlabFrozen %8lu %8lu %3lu %3lu\n",
-		s->deactivate_remote_frees, s->free_frozen,
-		s->deactivate_remote_frees * 100 / total_alloc,
-		s->free_frozen * 100 / total_free);
-
-	printf("Total                %8lu %8lu\n\n", total_alloc, total_free);
-
-	if (s->cpuslab_flush)
-		printf("Flushes %8lu\n", s->cpuslab_flush);
-
-	if (s->alloc_refill)
-		printf("Refill %8lu\n", s->alloc_refill);
-
-	total = s->deactivate_full + s->deactivate_empty +
-			s->deactivate_to_head + s->deactivate_to_tail;
-
-	if (total)
-		printf("Deactivate Full=%lu(%lu%%) Empty=%lu(%lu%%) "
-			"ToHead=%lu(%lu%%) ToTail=%lu(%lu%%)\n",
-			s->deactivate_full, (s->deactivate_full * 100) / total,
-			s->deactivate_empty, (s->deactivate_empty * 100) / total,
-			s->deactivate_to_head, (s->deactivate_to_head * 100) / total,
-			s->deactivate_to_tail, (s->deactivate_to_tail * 100) / total);
-}
-
-static void report(struct slabinfo *s)
-{
-	if (strcmp(s->name, "*") == 0)
-		return;
-
-	printf("\nSlabcache: %-20s  Aliases: %2d Order : %2d Objects: %lu\n",
-		s->name, s->aliases, s->order, s->objects);
-	if (s->hwcache_align)
-		printf("** Hardware cacheline aligned\n");
-	if (s->cache_dma)
-		printf("** Memory is allocated in a special DMA zone\n");
-	if (s->destroy_by_rcu)
-		printf("** Slabs are destroyed via RCU\n");
-	if (s->reclaim_account)
-		printf("** Reclaim accounting active\n");
-
-	printf("\nSizes (bytes)     Slabs              Debug                Memory\n");
-	printf("------------------------------------------------------------------------\n");
-	printf("Object : %7d  Total  : %7ld   Sanity Checks : %s  Total: %7ld\n",
-			s->object_size, s->slabs, onoff(s->sanity_checks),
-			s->slabs * (page_size << s->order));
-	printf("SlabObj: %7d  Full   : %7ld   Redzoning     : %s  Used : %7ld\n",
-			s->slab_size, s->slabs - s->partial - s->cpu_slabs,
-			onoff(s->red_zone), s->objects * s->object_size);
-	printf("SlabSiz: %7d  Partial: %7ld   Poisoning     : %s  Loss : %7ld\n",
-			page_size << s->order, s->partial, onoff(s->poison),
-			s->slabs * (page_size << s->order) - s->objects * s->object_size);
-	printf("Loss   : %7d  CpuSlab: %7d   Tracking      : %s  Lalig: %7ld\n",
-			s->slab_size - s->object_size, s->cpu_slabs, onoff(s->store_user),
-			(s->slab_size - s->object_size) * s->objects);
-	printf("Align  : %7d  Objects: %7d   Tracing       : %s  Lpadd: %7ld\n",
-			s->align, s->objs_per_slab, onoff(s->trace),
-			((page_size << s->order) - s->objs_per_slab * s->slab_size) *
-			s->slabs);
-
-	ops(s);
-	show_tracking(s);
-	slab_numa(s, 1);
-	slab_stats(s);
-}
-
-static void slabcache(struct slabinfo *s)
-{
-	char size_str[20];
-	char dist_str[40];
-	char flags[20];
-	char *p = flags;
-
-	if (strcmp(s->name, "*") == 0)
-		return;
-
-	if (actual_slabs == 1) {
-		report(s);
-		return;
-	}
-
-	if (skip_zero && !show_empty && !s->slabs)
-		return;
-
-	if (show_empty && s->slabs)
-		return;
-
-	store_size(size_str, slab_size(s));
-	snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs,
-						s->partial, s->cpu_slabs);
-
-	if (!line++)
-		first_line();
-
-	if (s->aliases)
-		*p++ = '*';
-	if (s->cache_dma)
-		*p++ = 'd';
-	if (s->hwcache_align)
-		*p++ = 'A';
-	if (s->poison)
-		*p++ = 'P';
-	if (s->reclaim_account)
-		*p++ = 'a';
-	if (s->red_zone)
-		*p++ = 'Z';
-	if (s->sanity_checks)
-		*p++ = 'F';
-	if (s->store_user)
-		*p++ = 'U';
-	if (s->trace)
-		*p++ = 'T';
-
-	*p = 0;
-	if (show_activity) {
-		unsigned long total_alloc;
-		unsigned long total_free;
-
-		total_alloc = s->alloc_fastpath + s->alloc_slowpath;
-		total_free = s->free_fastpath + s->free_slowpath;
-
-		printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d\n",
-			s->name, s->objects,
-			total_alloc, total_free,
-			total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0,
-			total_free ? (s->free_fastpath * 100 / total_free) : 0,
-			s->order_fallback, s->order);
-	}
-	else
-		printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n",
-			s->name, s->objects, s->object_size, size_str, dist_str,
-			s->objs_per_slab, s->order,
-			s->slabs ? (s->partial * 100) / s->slabs : 100,
-			s->slabs ? (s->objects * s->object_size * 100) /
-				(s->slabs * (page_size << s->order)) : 100,
-			flags);
-}
-
-/*
- * Analyze debug options. Return false if something is amiss.
- */
-static int debug_opt_scan(char *opt)
-{
-	if (!opt || !opt[0] || strcmp(opt, "-") == 0)
-		return 1;
-
-	if (strcasecmp(opt, "a") == 0) {
-		sanity = 1;
-		poison = 1;
-		redzone = 1;
-		tracking = 1;
-		return 1;
-	}
-
-	for ( ; *opt; opt++)
-	 	switch (*opt) {
-		case 'F' : case 'f':
-			if (sanity)
-				return 0;
-			sanity = 1;
-			break;
-		case 'P' : case 'p':
-			if (poison)
-				return 0;
-			poison = 1;
-			break;
-
-		case 'Z' : case 'z':
-			if (redzone)
-				return 0;
-			redzone = 1;
-			break;
-
-		case 'U' : case 'u':
-			if (tracking)
-				return 0;
-			tracking = 1;
-			break;
-
-		case 'T' : case 't':
-			if (tracing)
-				return 0;
-			tracing = 1;
-			break;
-		default:
-			return 0;
-		}
-	return 1;
-}
-
-static int slab_empty(struct slabinfo *s)
-{
-	if (s->objects > 0)
-		return 0;
-
-	/*
-	 * We may still have slabs even if there are no objects. Shrinking will
-	 * remove them.
-	 */
-	if (s->slabs != 0)
-		set_obj(s, "shrink", 1);
-
-	return 1;
-}
-
-static void slab_debug(struct slabinfo *s)
-{
-	if (strcmp(s->name, "*") == 0)
-		return;
-
-	if (sanity && !s->sanity_checks) {
-		set_obj(s, "sanity", 1);
-	}
-	if (!sanity && s->sanity_checks) {
-		if (slab_empty(s))
-			set_obj(s, "sanity", 0);
-		else
-			fprintf(stderr, "%s not empty cannot disable sanity checks\n", s->name);
-	}
-	if (redzone && !s->red_zone) {
-		if (slab_empty(s))
-			set_obj(s, "red_zone", 1);
-		else
-			fprintf(stderr, "%s not empty cannot enable redzoning\n", s->name);
-	}
-	if (!redzone && s->red_zone) {
-		if (slab_empty(s))
-			set_obj(s, "red_zone", 0);
-		else
-			fprintf(stderr, "%s not empty cannot disable redzoning\n", s->name);
-	}
-	if (poison && !s->poison) {
-		if (slab_empty(s))
-			set_obj(s, "poison", 1);
-		else
-			fprintf(stderr, "%s not empty cannot enable poisoning\n", s->name);
-	}
-	if (!poison && s->poison) {
-		if (slab_empty(s))
-			set_obj(s, "poison", 0);
-		else
-			fprintf(stderr, "%s not empty cannot disable poisoning\n", s->name);
-	}
-	if (tracking && !s->store_user) {
-		if (slab_empty(s))
-			set_obj(s, "store_user", 1);
-		else
-			fprintf(stderr, "%s not empty cannot enable tracking\n", s->name);
-	}
-	if (!tracking && s->store_user) {
-		if (slab_empty(s))
-			set_obj(s, "store_user", 0);
-		else
-			fprintf(stderr, "%s not empty cannot disable tracking\n", s->name);
-	}
-	if (tracing && !s->trace) {
-		if (slabs == 1)
-			set_obj(s, "trace", 1);
-		else
-			fprintf(stderr, "%s can only enable trace for one slab at a time\n", s->name);
-	}
-	if (!tracing && s->trace)
-		set_obj(s, "trace", 1);
-}
-
-static void totals(void)
-{
-	struct slabinfo *s;
-
-	int used_slabs = 0;
-	char b1[20], b2[20], b3[20], b4[20];
-	unsigned long long max = 1ULL << 63;
-
-	/* Object size */
-	unsigned long long min_objsize = max, max_objsize = 0, avg_objsize;
-
-	/* Number of partial slabs in a slabcache */
-	unsigned long long min_partial = max, max_partial = 0,
-				avg_partial, total_partial = 0;
-
-	/* Number of slabs in a slab cache */
-	unsigned long long min_slabs = max, max_slabs = 0,
-				avg_slabs, total_slabs = 0;
-
-	/* Size of the whole slab */
-	unsigned long long min_size = max, max_size = 0,
-				avg_size, total_size = 0;
-
-	/* Bytes used for object storage in a slab */
-	unsigned long long min_used = max, max_used = 0,
-				avg_used, total_used = 0;
-
-	/* Waste: Bytes used for alignment and padding */
-	unsigned long long min_waste = max, max_waste = 0,
-				avg_waste, total_waste = 0;
-	/* Number of objects in a slab */
-	unsigned long long min_objects = max, max_objects = 0,
-				avg_objects, total_objects = 0;
-	/* Waste per object */
-	unsigned long long min_objwaste = max,
-				max_objwaste = 0, avg_objwaste,
-				total_objwaste = 0;
-
-	/* Memory per object */
-	unsigned long long min_memobj = max,
-				max_memobj = 0, avg_memobj,
-				total_objsize = 0;
-
-	/* Percentage of partial slabs per slab */
-	unsigned long min_ppart = 100, max_ppart = 0,
-				avg_ppart, total_ppart = 0;
-
-	/* Number of objects in partial slabs */
-	unsigned long min_partobj = max, max_partobj = 0,
-				avg_partobj, total_partobj = 0;
-
-	/* Percentage of partial objects of all objects in a slab */
-	unsigned long min_ppartobj = 100, max_ppartobj = 0,
-				avg_ppartobj, total_ppartobj = 0;
-
-
-	for (s = slabinfo; s < slabinfo + slabs; s++) {
-		unsigned long long size;
-		unsigned long used;
-		unsigned long long wasted;
-		unsigned long long objwaste;
-		unsigned long percentage_partial_slabs;
-		unsigned long percentage_partial_objs;
-
-		if (!s->slabs || !s->objects)
-			continue;
-
-		used_slabs++;
-
-		size = slab_size(s);
-		used = s->objects * s->object_size;
-		wasted = size - used;
-		objwaste = s->slab_size - s->object_size;
-
-		percentage_partial_slabs = s->partial * 100 / s->slabs;
-		if (percentage_partial_slabs > 100)
-			percentage_partial_slabs = 100;
-
-		percentage_partial_objs = s->objects_partial * 100
-							/ s->objects;
-
-		if (percentage_partial_objs > 100)
-			percentage_partial_objs = 100;
-
-		if (s->object_size < min_objsize)
-			min_objsize = s->object_size;
-		if (s->partial < min_partial)
-			min_partial = s->partial;
-		if (s->slabs < min_slabs)
-			min_slabs = s->slabs;
-		if (size < min_size)
-			min_size = size;
-		if (wasted < min_waste)
-			min_waste = wasted;
-		if (objwaste < min_objwaste)
-			min_objwaste = objwaste;
-		if (s->objects < min_objects)
-			min_objects = s->objects;
-		if (used < min_used)
-			min_used = used;
-		if (s->objects_partial < min_partobj)
-			min_partobj = s->objects_partial;
-		if (percentage_partial_slabs < min_ppart)
-			min_ppart = percentage_partial_slabs;
-		if (percentage_partial_objs < min_ppartobj)
-			min_ppartobj = percentage_partial_objs;
-		if (s->slab_size < min_memobj)
-			min_memobj = s->slab_size;
-
-		if (s->object_size > max_objsize)
-			max_objsize = s->object_size;
-		if (s->partial > max_partial)
-			max_partial = s->partial;
-		if (s->slabs > max_slabs)
-			max_slabs = s->slabs;
-		if (size > max_size)
-			max_size = size;
-		if (wasted > max_waste)
-			max_waste = wasted;
-		if (objwaste > max_objwaste)
-			max_objwaste = objwaste;
-		if (s->objects > max_objects)
-			max_objects = s->objects;
-		if (used > max_used)
-			max_used = used;
-		if (s->objects_partial > max_partobj)
-			max_partobj = s->objects_partial;
-		if (percentage_partial_slabs > max_ppart)
-			max_ppart = percentage_partial_slabs;
-		if (percentage_partial_objs > max_ppartobj)
-			max_ppartobj = percentage_partial_objs;
-		if (s->slab_size > max_memobj)
-			max_memobj = s->slab_size;
-
-		total_partial += s->partial;
-		total_slabs += s->slabs;
-		total_size += size;
-		total_waste += wasted;
-
-		total_objects += s->objects;
-		total_used += used;
-		total_partobj += s->objects_partial;
-		total_ppart += percentage_partial_slabs;
-		total_ppartobj += percentage_partial_objs;
-
-		total_objwaste += s->objects * objwaste;
-		total_objsize += s->objects * s->slab_size;
-	}
-
-	if (!total_objects) {
-		printf("No objects\n");
-		return;
-	}
-	if (!used_slabs) {
-		printf("No slabs\n");
-		return;
-	}
-
-	/* Per slab averages */
-	avg_partial = total_partial / used_slabs;
-	avg_slabs = total_slabs / used_slabs;
-	avg_size = total_size / used_slabs;
-	avg_waste = total_waste / used_slabs;
-
-	avg_objects = total_objects / used_slabs;
-	avg_used = total_used / used_slabs;
-	avg_partobj = total_partobj / used_slabs;
-	avg_ppart = total_ppart / used_slabs;
-	avg_ppartobj = total_ppartobj / used_slabs;
-
-	/* Per object object sizes */
-	avg_objsize = total_used / total_objects;
-	avg_objwaste = total_objwaste / total_objects;
-	avg_partobj = total_partobj * 100 / total_objects;
-	avg_memobj = total_objsize / total_objects;
-
-	printf("Slabcache Totals\n");
-	printf("----------------\n");
-	printf("Slabcaches : %3d      Aliases  : %3d->%-3d Active: %3d\n",
-			slabs, aliases, alias_targets, used_slabs);
-
-	store_size(b1, total_size);store_size(b2, total_waste);
-	store_size(b3, total_waste * 100 / total_used);
-	printf("Memory used: %6s   # Loss   : %6s   MRatio:%6s%%\n", b1, b2, b3);
-
-	store_size(b1, total_objects);store_size(b2, total_partobj);
-	store_size(b3, total_partobj * 100 / total_objects);
-	printf("# Objects  : %6s   # PartObj: %6s   ORatio:%6s%%\n", b1, b2, b3);
-
-	printf("\n");
-	printf("Per Cache    Average         Min         Max       Total\n");
-	printf("---------------------------------------------------------\n");
-
-	store_size(b1, avg_objects);store_size(b2, min_objects);
-	store_size(b3, max_objects);store_size(b4, total_objects);
-	printf("#Objects  %10s  %10s  %10s  %10s\n",
-			b1,	b2,	b3,	b4);
-
-	store_size(b1, avg_slabs);store_size(b2, min_slabs);
-	store_size(b3, max_slabs);store_size(b4, total_slabs);
-	printf("#Slabs    %10s  %10s  %10s  %10s\n",
-			b1,	b2,	b3,	b4);
-
-	store_size(b1, avg_partial);store_size(b2, min_partial);
-	store_size(b3, max_partial);store_size(b4, total_partial);
-	printf("#PartSlab %10s  %10s  %10s  %10s\n",
-			b1,	b2,	b3,	b4);
-	store_size(b1, avg_ppart);store_size(b2, min_ppart);
-	store_size(b3, max_ppart);
-	store_size(b4, total_partial * 100  / total_slabs);
-	printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n",
-			b1,	b2,	b3,	b4);
-
-	store_size(b1, avg_partobj);store_size(b2, min_partobj);
-	store_size(b3, max_partobj);
-	store_size(b4, total_partobj);
-	printf("PartObjs  %10s  %10s  %10s  %10s\n",
-			b1,	b2,	b3,	b4);
-
-	store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj);
-	store_size(b3, max_ppartobj);
-	store_size(b4, total_partobj * 100 / total_objects);
-	printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n",
-			b1,	b2,	b3,	b4);
-
-	store_size(b1, avg_size);store_size(b2, min_size);
-	store_size(b3, max_size);store_size(b4, total_size);
-	printf("Memory    %10s  %10s  %10s  %10s\n",
-			b1,	b2,	b3,	b4);
-
-	store_size(b1, avg_used);store_size(b2, min_used);
-	store_size(b3, max_used);store_size(b4, total_used);
-	printf("Used      %10s  %10s  %10s  %10s\n",
-			b1,	b2,	b3,	b4);
-
-	store_size(b1, avg_waste);store_size(b2, min_waste);
-	store_size(b3, max_waste);store_size(b4, total_waste);
-	printf("Loss      %10s  %10s  %10s  %10s\n",
-			b1,	b2,	b3,	b4);
-
-	printf("\n");
-	printf("Per Object   Average         Min         Max\n");
-	printf("---------------------------------------------\n");
-
-	store_size(b1, avg_memobj);store_size(b2, min_memobj);
-	store_size(b3, max_memobj);
-	printf("Memory    %10s  %10s  %10s\n",
-			b1,	b2,	b3);
-	store_size(b1, avg_objsize);store_size(b2, min_objsize);
-	store_size(b3, max_objsize);
-	printf("User      %10s  %10s  %10s\n",
-			b1,	b2,	b3);
-
-	store_size(b1, avg_objwaste);store_size(b2, min_objwaste);
-	store_size(b3, max_objwaste);
-	printf("Loss      %10s  %10s  %10s\n",
-			b1,	b2,	b3);
-}
-
-static void sort_slabs(void)
-{
-	struct slabinfo *s1,*s2;
-
-	for (s1 = slabinfo; s1 < slabinfo + slabs; s1++) {
-		for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
-			int result;
-
-			if (sort_size)
-				result = slab_size(s1) < slab_size(s2);
-			else if (sort_active)
-				result = slab_activity(s1) < slab_activity(s2);
-			else
-				result = strcasecmp(s1->name, s2->name);
-
-			if (show_inverted)
-				result = -result;
-
-			if (result > 0) {
-				struct slabinfo t;
-
-				memcpy(&t, s1, sizeof(struct slabinfo));
-				memcpy(s1, s2, sizeof(struct slabinfo));
-				memcpy(s2, &t, sizeof(struct slabinfo));
-			}
-		}
-	}
-}
-
-static void sort_aliases(void)
-{
-	struct aliasinfo *a1,*a2;
-
-	for (a1 = aliasinfo; a1 < aliasinfo + aliases; a1++) {
-		for (a2 = a1 + 1; a2 < aliasinfo + aliases; a2++) {
-			char *n1, *n2;
-
-			n1 = a1->name;
-			n2 = a2->name;
-			if (show_alias && !show_inverted) {
-				n1 = a1->ref;
-				n2 = a2->ref;
-			}
-			if (strcasecmp(n1, n2) > 0) {
-				struct aliasinfo t;
-
-				memcpy(&t, a1, sizeof(struct aliasinfo));
-				memcpy(a1, a2, sizeof(struct aliasinfo));
-				memcpy(a2, &t, sizeof(struct aliasinfo));
-			}
-		}
-	}
-}
-
-static void link_slabs(void)
-{
-	struct aliasinfo *a;
-	struct slabinfo *s;
-
-	for (a = aliasinfo; a < aliasinfo + aliases; a++) {
-
-		for (s = slabinfo; s < slabinfo + slabs; s++)
-			if (strcmp(a->ref, s->name) == 0) {
-				a->slab = s;
-				s->refs++;
-				break;
-			}
-		if (s == slabinfo + slabs)
-			fatal("Unresolved alias %s\n", a->ref);
-	}
-}
-
-static void alias(void)
-{
-	struct aliasinfo *a;
-	char *active = NULL;
-
-	sort_aliases();
-	link_slabs();
-
-	for(a = aliasinfo; a < aliasinfo + aliases; a++) {
-
-		if (!show_single_ref && a->slab->refs == 1)
-			continue;
-
-		if (!show_inverted) {
-			if (active) {
-				if (strcmp(a->slab->name, active) == 0) {
-					printf(" %s", a->name);
-					continue;
-				}
-			}
-			printf("\n%-12s <- %s", a->slab->name, a->name);
-			active = a->slab->name;
-		}
-		else
-			printf("%-20s -> %s\n", a->name, a->slab->name);
-	}
-	if (active)
-		printf("\n");
-}
-
-
-static void rename_slabs(void)
-{
-	struct slabinfo *s;
-	struct aliasinfo *a;
-
-	for (s = slabinfo; s < slabinfo + slabs; s++) {
-		if (*s->name != ':')
-			continue;
-
-		if (s->refs > 1 && !show_first_alias)
-			continue;
-
-		a = find_one_alias(s);
-
-		if (a)
-			s->name = a->name;
-		else {
-			s->name = "*";
-			actual_slabs--;
-		}
-	}
-}
-
-static int slab_mismatch(char *slab)
-{
-	return regexec(&pattern, slab, 0, NULL, 0);
-}
-
-static void read_slab_dir(void)
-{
-	DIR *dir;
-	struct dirent *de;
-	struct slabinfo *slab = slabinfo;
-	struct aliasinfo *alias = aliasinfo;
-	char *p;
-	char *t;
-	int count;
-
-	if (chdir("/sys/kernel/slab") && chdir("/sys/slab"))
-		fatal("SYSFS support for SLUB not active\n");
-
-	dir = opendir(".");
-	while ((de = readdir(dir))) {
-		if (de->d_name[0] == '.' ||
-			(de->d_name[0] != ':' && slab_mismatch(de->d_name)))
-				continue;
-		switch (de->d_type) {
-		   case DT_LNK:
-		   	alias->name = strdup(de->d_name);
-			count = readlink(de->d_name, buffer, sizeof(buffer));
-
-			if (count < 0)
-				fatal("Cannot read symlink %s\n", de->d_name);
-
-			buffer[count] = 0;
-			p = buffer + count;
-			while (p > buffer && p[-1] != '/')
-				p--;
-			alias->ref = strdup(p);
-			alias++;
-			break;
-		   case DT_DIR:
-			if (chdir(de->d_name))
-				fatal("Unable to access slab %s\n", slab->name);
-		   	slab->name = strdup(de->d_name);
-			slab->alias = 0;
-			slab->refs = 0;
-			slab->aliases = get_obj("aliases");
-			slab->align = get_obj("align");
-			slab->cache_dma = get_obj("cache_dma");
-			slab->cpu_slabs = get_obj("cpu_slabs");
-			slab->destroy_by_rcu = get_obj("destroy_by_rcu");
-			slab->hwcache_align = get_obj("hwcache_align");
-			slab->object_size = get_obj("object_size");
-			slab->objects = get_obj("objects");
-			slab->objects_partial = get_obj("objects_partial");
-			slab->objects_total = get_obj("objects_total");
-			slab->objs_per_slab = get_obj("objs_per_slab");
-			slab->order = get_obj("order");
-			slab->partial = get_obj("partial");
-			slab->partial = get_obj_and_str("partial", &t);
-			decode_numa_list(slab->numa_partial, t);
-			free(t);
-			slab->poison = get_obj("poison");
-			slab->reclaim_account = get_obj("reclaim_account");
-			slab->red_zone = get_obj("red_zone");
-			slab->sanity_checks = get_obj("sanity_checks");
-			slab->slab_size = get_obj("slab_size");
-			slab->slabs = get_obj_and_str("slabs", &t);
-			decode_numa_list(slab->numa, t);
-			free(t);
-			slab->store_user = get_obj("store_user");
-			slab->trace = get_obj("trace");
-			slab->alloc_fastpath = get_obj("alloc_fastpath");
-			slab->alloc_slowpath = get_obj("alloc_slowpath");
-			slab->free_fastpath = get_obj("free_fastpath");
-			slab->free_slowpath = get_obj("free_slowpath");
-			slab->free_frozen= get_obj("free_frozen");
-			slab->free_add_partial = get_obj("free_add_partial");
-			slab->free_remove_partial = get_obj("free_remove_partial");
-			slab->alloc_from_partial = get_obj("alloc_from_partial");
-			slab->alloc_slab = get_obj("alloc_slab");
-			slab->alloc_refill = get_obj("alloc_refill");
-			slab->free_slab = get_obj("free_slab");
-			slab->cpuslab_flush = get_obj("cpuslab_flush");
-			slab->deactivate_full = get_obj("deactivate_full");
-			slab->deactivate_empty = get_obj("deactivate_empty");
-			slab->deactivate_to_head = get_obj("deactivate_to_head");
-			slab->deactivate_to_tail = get_obj("deactivate_to_tail");
-			slab->deactivate_remote_frees = get_obj("deactivate_remote_frees");
-			slab->order_fallback = get_obj("order_fallback");
-			chdir("..");
-			if (slab->name[0] == ':')
-				alias_targets++;
-			slab++;
-			break;
-		   default :
-			fatal("Unknown file type %lx\n", de->d_type);
-		}
-	}
-	closedir(dir);
-	slabs = slab - slabinfo;
-	actual_slabs = slabs;
-	aliases = alias - aliasinfo;
-	if (slabs > MAX_SLABS)
-		fatal("Too many slabs\n");
-	if (aliases > MAX_ALIASES)
-		fatal("Too many aliases\n");
-}
-
-static void output_slabs(void)
-{
-	struct slabinfo *slab;
-
-	for (slab = slabinfo; slab < slabinfo + slabs; slab++) {
-
-		if (slab->alias)
-			continue;
-
-
-		if (show_numa)
-			slab_numa(slab, 0);
-		else if (show_track)
-			show_tracking(slab);
-		else if (validate)
-			slab_validate(slab);
-		else if (shrink)
-			slab_shrink(slab);
-		else if (set_debug)
-			slab_debug(slab);
-		else if (show_ops)
-			ops(slab);
-		else if (show_slab)
-			slabcache(slab);
-		else if (show_report)
-			report(slab);
-	}
-}
-
-struct option opts[] = {
-	{ "aliases", 0, NULL, 'a' },
-	{ "activity", 0, NULL, 'A' },
-	{ "debug", 2, NULL, 'd' },
-	{ "display-activity", 0, NULL, 'D' },
-	{ "empty", 0, NULL, 'e' },
-	{ "first-alias", 0, NULL, 'f' },
-	{ "help", 0, NULL, 'h' },
-	{ "inverted", 0, NULL, 'i'},
-	{ "numa", 0, NULL, 'n' },
-	{ "ops", 0, NULL, 'o' },
-	{ "report", 0, NULL, 'r' },
-	{ "shrink", 0, NULL, 's' },
-	{ "slabs", 0, NULL, 'l' },
-	{ "track", 0, NULL, 't'},
-	{ "validate", 0, NULL, 'v' },
-	{ "zero", 0, NULL, 'z' },
-	{ "1ref", 0, NULL, '1'},
-	{ NULL, 0, NULL, 0 }
-};
-
-int main(int argc, char *argv[])
-{
-	int c;
-	int err;
-	char *pattern_source;
-
-	page_size = getpagesize();
-
-	while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTS",
-						opts, NULL)) != -1)
-		switch (c) {
-		case '1':
-			show_single_ref = 1;
-			break;
-		case 'a':
-			show_alias = 1;
-			break;
-		case 'A':
-			sort_active = 1;
-			break;
-		case 'd':
-			set_debug = 1;
-			if (!debug_opt_scan(optarg))
-				fatal("Invalid debug option '%s'\n", optarg);
-			break;
-		case 'D':
-			show_activity = 1;
-			break;
-		case 'e':
-			show_empty = 1;
-			break;
-		case 'f':
-			show_first_alias = 1;
-			break;
-		case 'h':
-			usage();
-			return 0;
-		case 'i':
-			show_inverted = 1;
-			break;
-		case 'n':
-			show_numa = 1;
-			break;
-		case 'o':
-			show_ops = 1;
-			break;
-		case 'r':
-			show_report = 1;
-			break;
-		case 's':
-			shrink = 1;
-			break;
-		case 'l':
-			show_slab = 1;
-			break;
-		case 't':
-			show_track = 1;
-			break;
-		case 'v':
-			validate = 1;
-			break;
-		case 'z':
-			skip_zero = 0;
-			break;
-		case 'T':
-			show_totals = 1;
-			break;
-		case 'S':
-			sort_size = 1;
-			break;
-
-		default:
-			fatal("%s: Invalid option '%c'\n", argv[0], optopt);
-
-	}
-
-	if (!show_slab && !show_alias && !show_track && !show_report
-		&& !validate && !shrink && !set_debug && !show_ops)
-			show_slab = 1;
-
-	if (argc > optind)
-		pattern_source = argv[optind];
-	else
-		pattern_source = ".*";
-
-	err = regcomp(&pattern, pattern_source, REG_ICASE|REG_NOSUB);
-	if (err)
-		fatal("%s: Invalid pattern '%s' code %d\n",
-			argv[0], pattern_source, err);
-	read_slab_dir();
-	if (show_alias)
-		alias();
-	else
-	if (show_totals)
-		totals();
-	else {
-		link_slabs();
-		rename_slabs();
-		sort_slabs();
-		output_slabs();
-	}
-	return 0;
-}
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
new file mode 100644
index 0000000..0924aac
--- /dev/null
+++ b/Documentation/vm/transhuge.txt
@@ -0,0 +1,298 @@
+= Transparent Hugepage Support =
+
+== Objective ==
+
+Performance critical computing applications dealing with large memory
+working sets are already running on top of libhugetlbfs and in turn
+hugetlbfs. Transparent Hugepage Support is an alternative means of
+using huge pages for the backing of virtual memory with huge pages
+that supports the automatic promotion and demotion of page sizes and
+without the shortcomings of hugetlbfs.
+
+Currently it only works for anonymous memory mappings but in the
+future it can expand over the pagecache layer starting with tmpfs.
+
+The reason applications are running faster is because of two
+factors. The first factor is almost completely irrelevant and it's not
+of significant interest because it'll also have the downside of
+requiring larger clear-page copy-page in page faults which is a
+potentially negative effect. The first factor consists in taking a
+single page fault for each 2M virtual region touched by userland (so
+reducing the enter/exit kernel frequency by a 512 times factor). This
+only matters the first time the memory is accessed for the lifetime of
+a memory mapping. The second long lasting and much more important
+factor will affect all subsequent accesses to the memory for the whole
+runtime of the application. The second factor consist of two
+components: 1) the TLB miss will run faster (especially with
+virtualization using nested pagetables but almost always also on bare
+metal without virtualization) and 2) a single TLB entry will be
+mapping a much larger amount of virtual memory in turn reducing the
+number of TLB misses. With virtualization and nested pagetables the
+TLB can be mapped of larger size only if both KVM and the Linux guest
+are using hugepages but a significant speedup already happens if only
+one of the two is using hugepages just because of the fact the TLB
+miss is going to run faster.
+
+== Design ==
+
+- "graceful fallback": mm components which don't have transparent
+  hugepage knowledge fall back to breaking a transparent hugepage and
+  working on the regular pages and their respective regular pmd/pte
+  mappings
+
+- if a hugepage allocation fails because of memory fragmentation,
+  regular pages should be gracefully allocated instead and mixed in
+  the same vma without any failure or significant delay and without
+  userland noticing
+
+- if some task quits and more hugepages become available (either
+  immediately in the buddy or through the VM), guest physical memory
+  backed by regular pages should be relocated on hugepages
+  automatically (with khugepaged)
+
+- it doesn't require memory reservation and in turn it uses hugepages
+  whenever possible (the only possible reservation here is kernelcore=
+  to avoid unmovable pages to fragment all the memory but such a tweak
+  is not specific to transparent hugepage support and it's a generic
+  feature that applies to all dynamic high order allocations in the
+  kernel)
+
+- this initial support only offers the feature in the anonymous memory
+  regions but it'd be ideal to move it to tmpfs and the pagecache
+  later
+
+Transparent Hugepage Support maximizes the usefulness of free memory
+if compared to the reservation approach of hugetlbfs by allowing all
+unused memory to be used as cache or other movable (or even unmovable
+entities). It doesn't require reservation to prevent hugepage
+allocation failures to be noticeable from userland. It allows paging
+and all other advanced VM features to be available on the
+hugepages. It requires no modifications for applications to take
+advantage of it.
+
+Applications however can be further optimized to take advantage of
+this feature, like for example they've been optimized before to avoid
+a flood of mmap system calls for every malloc(4k). Optimizing userland
+is by far not mandatory and khugepaged already can take care of long
+lived page allocations even for hugepage unaware applications that
+deals with large amounts of memory.
+
+In certain cases when hugepages are enabled system wide, application
+may end up allocating more memory resources. An application may mmap a
+large region but only touch 1 byte of it, in that case a 2M page might
+be allocated instead of a 4k page for no good. This is why it's
+possible to disable hugepages system-wide and to only have them inside
+MADV_HUGEPAGE madvise regions.
+
+Embedded systems should enable hugepages only inside madvise regions
+to eliminate any risk of wasting any precious byte of memory and to
+only run faster.
+
+Applications that gets a lot of benefit from hugepages and that don't
+risk to lose memory by using hugepages, should use
+madvise(MADV_HUGEPAGE) on their critical mmapped regions.
+
+== sysfs ==
+
+Transparent Hugepage Support can be entirely disabled (mostly for
+debugging purposes) or only enabled inside MADV_HUGEPAGE regions (to
+avoid the risk of consuming more memory resources) or enabled system
+wide. This can be achieved with one of:
+
+echo always >/sys/kernel/mm/transparent_hugepage/enabled
+echo madvise >/sys/kernel/mm/transparent_hugepage/enabled
+echo never >/sys/kernel/mm/transparent_hugepage/enabled
+
+It's also possible to limit defrag efforts in the VM to generate
+hugepages in case they're not immediately free to madvise regions or
+to never try to defrag memory and simply fallback to regular pages
+unless hugepages are immediately available. Clearly if we spend CPU
+time to defrag memory, we would expect to gain even more by the fact
+we use hugepages later instead of regular pages. This isn't always
+guaranteed, but it may be more likely in case the allocation is for a
+MADV_HUGEPAGE region.
+
+echo always >/sys/kernel/mm/transparent_hugepage/defrag
+echo madvise >/sys/kernel/mm/transparent_hugepage/defrag
+echo never >/sys/kernel/mm/transparent_hugepage/defrag
+
+khugepaged will be automatically started when
+transparent_hugepage/enabled is set to "always" or "madvise, and it'll
+be automatically shutdown if it's set to "never".
+
+khugepaged runs usually at low frequency so while one may not want to
+invoke defrag algorithms synchronously during the page faults, it
+should be worth invoking defrag at least in khugepaged. However it's
+also possible to disable defrag in khugepaged:
+
+echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag
+
+You can also control how many pages khugepaged should scan at each
+pass:
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/pages_to_scan
+
+and how many milliseconds to wait in khugepaged between each pass (you
+can set this to 0 to run khugepaged at 100% utilization of one core):
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/scan_sleep_millisecs
+
+and how many milliseconds to wait in khugepaged if there's an hugepage
+allocation failure to throttle the next allocation attempt.
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/alloc_sleep_millisecs
+
+The khugepaged progress can be seen in the number of pages collapsed:
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/pages_collapsed
+
+for each pass:
+
+/sys/kernel/mm/transparent_hugepage/khugepaged/full_scans
+
+== Boot parameter ==
+
+You can change the sysfs boot time defaults of Transparent Hugepage
+Support by passing the parameter "transparent_hugepage=always" or
+"transparent_hugepage=madvise" or "transparent_hugepage=never"
+(without "") to the kernel command line.
+
+== Need of application restart ==
+
+The transparent_hugepage/enabled values only affect future
+behavior. So to make them effective you need to restart any
+application that could have been using hugepages. This also applies to
+the regions registered in khugepaged.
+
+== get_user_pages and follow_page ==
+
+get_user_pages and follow_page if run on a hugepage, will return the
+head or tail pages as usual (exactly as they would do on
+hugetlbfs). Most gup users will only care about the actual physical
+address of the page and its temporary pinning to release after the I/O
+is complete, so they won't ever notice the fact the page is huge. But
+if any driver is going to mangle over the page structure of the tail
+page (like for checking page->mapping or other bits that are relevant
+for the head page and not the tail page), it should be updated to jump
+to check head page instead (while serializing properly against
+split_huge_page() to avoid the head and tail pages to disappear from
+under it, see the futex code to see an example of that, hugetlbfs also
+needed special handling in futex code for similar reasons).
+
+NOTE: these aren't new constraints to the GUP API, and they match the
+same constrains that applies to hugetlbfs too, so any driver capable
+of handling GUP on hugetlbfs will also work fine on transparent
+hugepage backed mappings.
+
+In case you can't handle compound pages if they're returned by
+follow_page, the FOLL_SPLIT bit can be specified as parameter to
+follow_page, so that it will split the hugepages before returning
+them. Migration for example passes FOLL_SPLIT as parameter to
+follow_page because it's not hugepage aware and in fact it can't work
+at all on hugetlbfs (but it instead works fine on transparent
+hugepages thanks to FOLL_SPLIT). migration simply can't deal with
+hugepages being returned (as it's not only checking the pfn of the
+page and pinning it during the copy but it pretends to migrate the
+memory in regular page sizes and with regular pte/pmd mappings).
+
+== Optimizing the applications ==
+
+To be guaranteed that the kernel will map a 2M page immediately in any
+memory region, the mmap region has to be hugepage naturally
+aligned. posix_memalign() can provide that guarantee.
+
+== Hugetlbfs ==
+
+You can use hugetlbfs on a kernel that has transparent hugepage
+support enabled just fine as always. No difference can be noted in
+hugetlbfs other than there will be less overall fragmentation. All
+usual features belonging to hugetlbfs are preserved and
+unaffected. libhugetlbfs will also work fine as usual.
+
+== Graceful fallback ==
+
+Code walking pagetables but unware about huge pmds can simply call
+split_huge_page_pmd(mm, pmd) where the pmd is the one returned by
+pmd_offset. It's trivial to make the code transparent hugepage aware
+by just grepping for "pmd_offset" and adding split_huge_page_pmd where
+missing after pmd_offset returns the pmd. Thanks to the graceful
+fallback design, with a one liner change, you can avoid to write
+hundred if not thousand of lines of complex code to make your code
+hugepage aware.
+
+If you're not walking pagetables but you run into a physical hugepage
+but you can't handle it natively in your code, you can split it by
+calling split_huge_page(page). This is what the Linux VM does before
+it tries to swapout the hugepage for example.
+
+Example to make mremap.c transparent hugepage aware with a one liner
+change:
+
+diff --git a/mm/mremap.c b/mm/mremap.c
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -41,6 +41,7 @@ static pmd_t *get_old_pmd(struct mm_stru
+		return NULL;
+
+	pmd = pmd_offset(pud, addr);
++	split_huge_page_pmd(mm, pmd);
+	if (pmd_none_or_clear_bad(pmd))
+		return NULL;
+
+== Locking in hugepage aware code ==
+
+We want as much code as possible hugepage aware, as calling
+split_huge_page() or split_huge_page_pmd() has a cost.
+
+To make pagetable walks huge pmd aware, all you need to do is to call
+pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the
+mmap_sem in read (or write) mode to be sure an huge pmd cannot be
+created from under you by khugepaged (khugepaged collapse_huge_page
+takes the mmap_sem in write mode in addition to the anon_vma lock). If
+pmd_trans_huge returns false, you just fallback in the old code
+paths. If instead pmd_trans_huge returns true, you have to take the
+mm->page_table_lock and re-run pmd_trans_huge. Taking the
+page_table_lock will prevent the huge pmd to be converted into a
+regular pmd from under you (split_huge_page can run in parallel to the
+pagetable walk). If the second pmd_trans_huge returns false, you
+should just drop the page_table_lock and fallback to the old code as
+before. Otherwise you should run pmd_trans_splitting on the pmd. In
+case pmd_trans_splitting returns true, it means split_huge_page is
+already in the middle of splitting the page. So if pmd_trans_splitting
+returns true it's enough to drop the page_table_lock and call
+wait_split_huge_page and then fallback the old code paths. You are
+guaranteed by the time wait_split_huge_page returns, the pmd isn't
+huge anymore. If pmd_trans_splitting returns false, you can proceed to
+process the huge pmd and the hugepage natively. Once finished you can
+drop the page_table_lock.
+
+== compound_lock, get_user_pages and put_page ==
+
+split_huge_page internally has to distribute the refcounts in the head
+page to the tail pages before clearing all PG_head/tail bits from the
+page structures. It can do that easily for refcounts taken by huge pmd
+mappings. But the GUI API as created by hugetlbfs (that returns head
+and tail pages if running get_user_pages on an address backed by any
+hugepage), requires the refcount to be accounted on the tail pages and
+not only in the head pages, if we want to be able to run
+split_huge_page while there are gup pins established on any tail
+page. Failure to be able to run split_huge_page if there's any gup pin
+on any tail page, would mean having to split all hugepages upfront in
+get_user_pages which is unacceptable as too many gup users are
+performance critical and they must work natively on hugepages like
+they work natively on hugetlbfs already (hugetlbfs is simpler because
+hugetlbfs pages cannot be splitted so there wouldn't be requirement of
+accounting the pins on the tail pages for hugetlbfs). If we wouldn't
+account the gup refcounts on the tail pages during gup, we won't know
+anymore which tail page is pinned by gup and which is not while we run
+split_huge_page. But we still have to add the gup pin to the head page
+too, to know when we can free the compound page in case it's never
+splitted during its lifetime. That requires changing not just
+get_page, but put_page as well so that when put_page runs on a tail
+page (and only on a tail page) it will find its respective head page,
+and then it will decrease the head page refcount in addition to the
+tail page refcount. To obtain a head page reliably and to decrease its
+refcount without race conditions, put_page has to serialize against
+__split_huge_page_refcount using a special per-page lock called
+compound_lock.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index f8101d6..75613c9 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -2,3 +2,5 @@
 	- This file
 w1_therm
 	- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
+w1_ds2423
+	- The Maxim/Dallas Semiconductor ds2423 counter device.
diff --git a/Documentation/w1/slaves/w1_ds2423 b/Documentation/w1/slaves/w1_ds2423
new file mode 100644
index 0000000..90a65d2
--- /dev/null
+++ b/Documentation/w1/slaves/w1_ds2423
@@ -0,0 +1,47 @@
+Kernel driver w1_ds2423
+=======================
+
+Supported chips:
+  * Maxim DS2423 based counter devices.
+
+supported family codes:
+	W1_THERM_DS2423	0x1D
+
+Author: Mika Laitio <lamikr@pilppa.org>
+
+Description
+-----------
+
+Support is provided through the sysfs w1_slave file. Each opening and
+read sequence of w1_slave file initiates the read of counters and ram
+available in DS2423 pages 12 - 15.
+
+Result of each page is provided as an ASCII output where each counter
+value and associated ram buffer is outpputed to own line.
+
+Each lines will contain the values of 42 bytes read from the counter and
+memory page along the crc=YES or NO for indicating whether the read operation
+was successfull and CRC matched.
+If the operation was successfull, there is also in the end of each line
+a counter value expressed as an integer after c=
+
+Meaning of 42 bytes represented is following:
+ - 1 byte from ram page
+ - 4 bytes for the counter value
+ - 4 zero bytes
+ - 2 bytes for crc16 which was calculated from the data read since the previous crc bytes
+ - 31 remaining bytes from the ram page
+ - crc=YES/NO indicating whether read was ok and crc matched
+ - c=<int> current counter value
+
+example from the successfull read:
+00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 02 00 00 00 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 29 c6 5d 18 00 00 00 00 04 37 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=408798761
+00 05 00 00 00 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=YES c=5
+
+example from the read with crc errors:
+00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 02 00 00 22 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO
+00 e1 61 5d 19 00 00 00 00 df 0b 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO
+00 05 00 00 20 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=NO
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index bdeb81c..9b7221a 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -622,9 +622,9 @@
   The payload may be compressed. The format of both the compressed and
   uncompressed data should be determined using the standard magic
   numbers.  The currently supported compression formats are gzip
-  (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA
-  (magic number 5D 00).  The uncompressed payload is currently always ELF
-  (magic number 7F 45 4C 46).
+  (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA
+  (magic number 5D 00), and XZ (magic number FD 37).  The uncompressed
+  payload is currently always ELF (magic number 7F 45 4C 46).
   
 Field name:	payload_length
 Type:		read
diff --git a/Documentation/xz.txt b/Documentation/xz.txt
new file mode 100644
index 0000000..2cf3e26
--- /dev/null
+++ b/Documentation/xz.txt
@@ -0,0 +1,121 @@
+
+XZ data compression in Linux
+============================
+
+Introduction
+
+    XZ is a general purpose data compression format with high compression
+    ratio and relatively fast decompression. The primary compression
+    algorithm (filter) is LZMA2. Additional filters can be used to improve
+    compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters
+    improve compression ratio of executable data.
+
+    The XZ decompressor in Linux is called XZ Embedded. It supports
+    the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
+    for integrity checking. The home page of XZ Embedded is at
+    <http://tukaani.org/xz/embedded.html>, where you can find the
+    latest version and also information about using the code outside
+    the Linux kernel.
+
+    For userspace, XZ Utils provide a zlib-like compression library
+    and a gzip-like command line tool. XZ Utils can be downloaded from
+    <http://tukaani.org/xz/>.
+
+XZ related components in the kernel
+
+    The xz_dec module provides XZ decompressor with single-call (buffer
+    to buffer) and multi-call (stateful) APIs. The usage of the xz_dec
+    module is documented in include/linux/xz.h.
+
+    The xz_dec_test module is for testing xz_dec. xz_dec_test is not
+    useful unless you are hacking the XZ decompressor. xz_dec_test
+    allocates a char device major dynamically to which one can write
+    .xz files from userspace. The decompressed output is thrown away.
+    Keep an eye on dmesg to see diagnostics printed by xz_dec_test.
+    See the xz_dec_test source code for the details.
+
+    For decompressing the kernel image, initramfs, and initrd, there
+    is a wrapper function in lib/decompress_unxz.c. Its API is the
+    same as in other decompress_*.c files, which is defined in
+    include/linux/decompress/generic.h.
+
+    scripts/xz_wrap.sh is a wrapper for the xz command line tool found
+    from XZ Utils. The wrapper sets compression options to values suitable
+    for compressing the kernel image.
+
+    For kernel makefiles, two commands are provided for use with
+    $(call if_needed). The kernel image should be compressed with
+    $(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2
+    dictionary. It will also append a four-byte trailer containing the
+    uncompressed size of the file, which is needed by the boot code.
+    Other things should be compressed with $(call if_needed,xzmisc)
+    which will use no BCJ filter and 1 MiB LZMA2 dictionary.
+
+Notes on compression options
+
+    Since the XZ Embedded supports only streams with no integrity check or
+    CRC32, make sure that you don't use some other integrity check type
+    when encoding files that are supposed to be decoded by the kernel. With
+    liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32
+    when encoding. With the xz command line tool, use --check=none or
+    --check=crc32.
+
+    Using CRC32 is strongly recommended unless there is some other layer
+    which will verify the integrity of the uncompressed data anyway.
+    Double checking the integrity would probably be waste of CPU cycles.
+    Note that the headers will always have a CRC32 which will be validated
+    by the decoder; you can only change the integrity check type (or
+    disable it) for the actual uncompressed data.
+
+    In userspace, LZMA2 is typically used with dictionary sizes of several
+    megabytes. The decoder needs to have the dictionary in RAM, thus big
+    dictionaries cannot be used for files that are intended to be decoded
+    by the kernel. 1 MiB is probably the maximum reasonable dictionary
+    size for in-kernel use (maybe more is OK for initramfs). The presets
+    in XZ Utils may not be optimal when creating files for the kernel,
+    so don't hesitate to use custom settings. Example:
+
+        xz --check=crc32 --lzma2=dict=512KiB inputfile
+
+    An exception to above dictionary size limitation is when the decoder
+    is used in single-call mode. Decompressing the kernel itself is an
+    example of this situation. In single-call mode, the memory usage
+    doesn't depend on the dictionary size, and it is perfectly fine to
+    use a big dictionary: for maximum compression, the dictionary should
+    be at least as big as the uncompressed data itself.
+
+Future plans
+
+    Creating a limited XZ encoder may be considered if people think it is
+    useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at
+    the fastest settings, so it isn't clear if LZMA2 encoder is wanted
+    into the kernel.
+
+    Support for limited random-access reading is planned for the
+    decompression code. I don't know if it could have any use in the
+    kernel, but I know that it would be useful in some embedded projects
+    outside the Linux kernel.
+
+Conformance to the .xz file format specification
+
+    There are a couple of corner cases where things have been simplified
+    at expense of detecting errors as early as possible. These should not
+    matter in practice all, since they don't cause security issues. But
+    it is good to know this if testing the code e.g. with the test files
+    from XZ Utils.
+
+Reporting bugs
+
+    Before reporting a bug, please check that it's not fixed already
+    at upstream. See <http://tukaani.org/xz/embedded.html> to get the
+    latest code.
+
+    Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
+    Freenode and talk to Larhzu. I don't actively read LKML or other
+    kernel-related mailing lists, so if there's something I should know,
+    you should email to me personally or use IRC.
+
+    Don't bother Igor Pavlov with questions about the XZ implementation
+    in the kernel or about XZ Utils. While these two implementations
+    include essential code that is directly based on Igor Pavlov's code,
+    these implementations aren't maintained nor supported by him.
diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
index 6916077..faf976c 100644
--- a/Documentation/zh_CN/HOWTO
+++ b/Documentation/zh_CN/HOWTO
@@ -347,8 +347,8 @@
 最新bug的通知,可以订阅bugme-new邮件列表(只有新的bug报告会被寄到这里)
 或者订阅bugme-janitor邮件列表(所有bugzilla的变动都会被寄到这里)。
 
-	http://lists.osdl.org/mailman/listinfo/bugme-new
-	http://lists.osdl.org/mailman/listinfo/bugme-janitors
+	https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+	https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
 
 
 邮件列表
diff --git a/Documentation/zh_CN/SubmittingDrivers b/Documentation/zh_CN/SubmittingDrivers
index c27b0f6..5889f8d 100644
--- a/Documentation/zh_CN/SubmittingDrivers
+++ b/Documentation/zh_CN/SubmittingDrivers
@@ -61,7 +61,7 @@
 Linux 2.6:
 	除了遵循和 2.4 版内核同样的规则外,你还需要在 linux-kernel 邮件
 	列表上跟踪最新的 API 变化。向 Linux 2.6 内核提交驱动的顶级联系人
-	是 Andrew Morton <akpm@osdl.org>。
+	是 Andrew Morton <akpm@linux-foundation.org>。
 
 决定设备驱动能否被接受的条件
 ----------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index fd78afa..89e4d4b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -285,6 +285,41 @@
 S:	Maintained
 F:	sound/pci/ad1889.*
 
+AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD5254
+S:	Supported
+F:	drivers/misc/ad525x_dpot.c
+
+AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD5398
+S:	Supported
+F:	drivers/regulator/ad5398.c
+
+AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD7142
+S:	Supported
+F:	drivers/input/misc/ad714x.c
+
+AD7877 TOUCHSCREEN DRIVER
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD7877
+S:	Supported
+F:	drivers/input/touchscreen/ad7877.c
+
+AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD7879
+S:	Supported
+F:	drivers/input/touchscreen/ad7879.c
+
 ADM1025 HARDWARE MONITOR DRIVER
 M:	Jean Delvare <khali@linux-fr.org>
 L:	lm-sensors@lm-sensors.org
@@ -304,6 +339,32 @@
 S:	Orphan
 F:	drivers/net/wireless/adm8211.*
 
+ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADP5520
+S:	Supported
+F:	drivers/mfd/adp5520.c
+F:	drivers/video/backlight/adp5520_bl.c
+F:	drivers/led/leds-adp5520.c
+F:	drivers/gpio/adp5520-gpio.c
+F:	drivers/input/keyboard/adp5520-keys.c
+
+ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADP5588
+S:	Supported
+F:	drivers/input/keyboard/adp5588-keys.c
+F:	drivers/gpio/adp5588-gpio.c
+
+ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADP8860
+S:	Supported
+F:	drivers/video/backlight/adp8860_bl.c
+
 ADT746X FAN DRIVER
 M:	Colin Leroy <colin@colino.net>
 S:	Maintained
@@ -316,6 +377,13 @@
 F:	Documentation/hwmon/adt7475
 F:	drivers/hwmon/adt7475.c
 
+ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADXL345
+S:	Supported
+F:	drivers/input/misc/adxl34x.c
+
 ADVANSYS SCSI DRIVER
 M:	Matthew Wilcox <matthew@wil.cx>
 L:	linux-scsi@vger.kernel.org
@@ -428,7 +496,6 @@
 F:	arch/x86/kernel/microcode_amd.c
 
 AMS (Apple Motion Sensor) DRIVER
-M:	Stelian Pop <stelian@popies.net>
 M:	Michael Hanselmann <linux-kernel@hansmi.ch>
 S:	Supported
 F:	drivers/macintosh/ams/
@@ -440,16 +507,22 @@
 S:	Maintained
 F:	drivers/infiniband/hw/amso1100/
 
+ANALOG DEVICES INC ASOC CODEC DRIVERS
+L:	device-driver-devel@blackfin.uclinux.org
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+W:	http://wiki-analog.com/
+S:	Supported
+F:	sound/soc/codecs/ad1*
+F:	sound/soc/codecs/adau*
+F:	sound/soc/codecs/adav*
+F:	sound/soc/codecs/ssm*
+
 ANALOG DEVICES INC ASOC DRIVERS
 L:	uclinux-dist-devel@blackfin.uclinux.org
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org/
 S:	Supported
 F:	sound/soc/blackfin/*
-F:	sound/soc/codecs/ad1*
-F:	sound/soc/codecs/adau*
-F:	sound/soc/codecs/adav*
-F:	sound/soc/codecs/ssm*
 
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:	Johannes Berg <johannes@sipsolutions.net>
@@ -1423,7 +1496,9 @@
 BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
 M:	Brett Rudley <brudley@broadcom.com>
 M:	Henry Ptasinski <henryp@broadcom.com>
-M:	Nohee Ko <noheek@broadcom.com>
+M:	Dowan Kim <dowan@broadcom.com>
+M:	Roland Vossen <rvossen@broadcom.com>
+M:	Arend van Spriel <arend@broadcom.com>
 L:	linux-wireless@vger.kernel.org
 S:	Supported
 F:	drivers/staging/brcm80211/
@@ -1448,6 +1523,14 @@
 F:	block/bsg.c
 F:	include/linux/bsg.h
 
+BT87X AUDIO DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	Documentation/sound/alsa/Bt87x.txt
+F:	sound/pci/bt87x.c
+
 BT8XXGPIO DRIVER
 M:	Michael Buesch <mb@bu3sch.de>
 W:	http://bu3sch.de/btgpio.php
@@ -1473,6 +1556,13 @@
 F:	Documentation/video4linux/bttv/
 F:	drivers/media/video/bt8xx/bttv*
 
+C-MEDIA CMI8788 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/pci/oxygen/
+
 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
 M:	David Howells <dhowells@redhat.com>
 L:	linux-cachefs@redhat.com
@@ -1709,7 +1799,8 @@
 F:	drivers/usb/atm/cxacru.c
 
 CONFIGFS
-M:	Joel Becker <joel.becker@oracle.com>
+M:	Joel Becker <jlbec@evilplan.org>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs.git
 S:	Supported
 F:	fs/configfs/
 F:	include/linux/configfs.h
@@ -1931,7 +2022,7 @@
 DCCP PROTOCOL
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 L:	dccp@vger.kernel.org
-W:	http://linux-net.osdl.org/index.php/DCCP
+W:	http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 S:	Maintained
 F:	include/linux/dccp.h
 F:	include/linux/tfrc.h
@@ -2263,6 +2354,13 @@
 S:	Maintained
 F:	drivers/edac/r82600_edac.c
 
+EDIROL UA-101/UA-1000 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/usb/misc/ua101.c
+
 EEEPC LAPTOP EXTRAS DRIVER
 M:	Corentin Chary <corentincj@iksaif.net>
 L:	acpi4asus-user@lists.sourceforge.net
@@ -2271,6 +2369,14 @@
 S:	Maintained
 F:	drivers/platform/x86/eeepc-laptop.c
 
+EEEPC WMI EXTRAS DRIVER
+M:	Corentin Chary <corentincj@iksaif.net>
+L:	acpi4asus-user@lists.sourceforge.net
+L:	platform-driver-x86@vger.kernel.org
+W:	http://acpi4asus.sf.net
+S:	Maintained
+F:	drivers/platform/x86/eeepc-wmi.c
+
 EFIFB FRAMEBUFFER DRIVER
 L:	linux-fbdev@vger.kernel.org
 M:	Peter Jones <pjones@redhat.com>
@@ -2345,7 +2451,7 @@
 M:	Stephen Hemminger <shemminger@linux-foundation.org>
 L:	bridge@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
-W:	http://www.linux-foundation.org/en/Net:Bridge
+W:	http://www.linuxfoundation.org/en/Net:Bridge
 S:	Maintained
 F:	include/linux/netfilter_bridge/
 F:	net/bridge/
@@ -2608,6 +2714,14 @@
 F:	drivers/i2c/busses/i2c-gpio.c
 F:	include/linux/i2c-gpio.h
 
+GENERIC GPIO I2C MULTIPLEXER DRIVER
+M:	Peter Korsgaard <peter.korsgaard@barco.com>
+L:	linux-i2c@vger.kernel.org
+S:	Supported
+F:	drivers/i2c/muxes/gpio-i2cmux.c
+F:	include/linux/gpio-i2cmux.h
+F:	Documentation/i2c/muxes/gpio-i2cmux
+
 GENERIC HDLC (WAN) DRIVERS
 M:	Krzysztof Halasa <khc@pm.waw.pl>
 W:	http://www.kernel.org/pub/linux/utils/net/hdlc/
@@ -3065,8 +3179,10 @@
 INPUT MULTITOUCH (MT) PROTOCOL
 M:	Henrik Rydberg <rydberg@euromail.se>
 L:	linux-input@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
 S:	Maintained
 F:	Documentation/input/multi-touch-protocol.txt
+F:	drivers/input/input-mt.c
 K:	\b(ABS|SYN)_MT_
 
 INTEL IDLE DRIVER
@@ -3413,6 +3529,13 @@
 S:	Maintained
 F:	drivers/serial/jsm/
 
+K10TEMP HARDWARE MONITORING DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+F:	Documentation/hwmon/k10temp
+F:	drivers/hwmon/k10temp.c
+
 K8TEMP HARDWARE MONITORING DRIVER
 M:	Rudolf Marek <r.marek@assembler.cz>
 L:	lm-sensors@lm-sensors.org
@@ -3998,9 +4121,8 @@
 F:	kernel/module.c
 
 MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER
-M:	Stelian Pop <stelian@popies.net>
 W:	http://popies.net/meye/
-S:	Maintained
+S:	Orphan
 F:	Documentation/video4linux/meye.txt
 F:	drivers/media/video/meye.*
 F:	include/linux/meye.h
@@ -4266,6 +4388,7 @@
 M:	KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
 L:	linux-nilfs@vger.kernel.org
 W:	http://www.nilfs.org/en/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2.git
 S:	Supported
 F:	Documentation/filesystems/nilfs2.txt
 F:	fs/nilfs2/
@@ -4287,11 +4410,11 @@
 F:	drivers/scsi/nsp32*
 
 NTFS FILESYSTEM
-M:	Anton Altaparmakov <aia21@cantab.net>
+M:	Anton Altaparmakov <anton@tuxera.com>
 L:	linux-ntfs-dev@lists.sourceforge.net
-W:	http://www.linux-ntfs.org/
+W:	http://www.tuxera.com/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
-S:	Maintained
+S:	Supported
 F:	Documentation/filesystems/ntfs.txt
 F:	fs/ntfs/
 
@@ -4363,6 +4486,20 @@
 S:	Maintained
 F:	drivers/char/hw_random/omap-rng.c
 
+OMAP HWMOD SUPPORT
+M:	Benoît Cousson <b-cousson@ti.com>
+M:	Paul Walmsley <paul@pwsan.com>
+L:	linux-omap@vger.kernel.org
+S:	Maintained
+F:	arch/arm/mach-omap2/omap_hwmod.c
+F:	arch/arm/plat-omap/include/plat/omap_hwmod.h
+
+OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
+M:	Benoît Cousson <b-cousson@ti.com>
+L:	linux-omap@vger.kernel.org
+S:	Maintained
+F:	arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+
 OMAP USB SUPPORT
 M:	Felipe Balbi <balbi@ti.com>
 M:	David Brownell <dbrownell@users.sourceforge.net>
@@ -4429,6 +4566,13 @@
 F:	include/linux/of*.h
 K:	of_get_property
 
+OPL4 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/drivers/opl4/
+
 OPROFILE
 M:	Robert Richter <robert.richter@amd.com>
 L:	oprofile-list@lists.sf.net
@@ -4440,7 +4584,7 @@
 
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
 M:	Mark Fasheh <mfasheh@suse.com>
-M:	Joel Becker <joel.becker@oracle.com>
+M:	Joel Becker <jlbec@evilplan.org>
 L:	ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 W:	http://oss.oracle.com/projects/ocfs2/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
@@ -4525,7 +4669,7 @@
 M:	Chris Wright <chrisw@sous-sol.org>
 M:	Alok Kataria <akataria@vmware.com>
 M:	Rusty Russell <rusty@rustcorp.com.au>
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 S:	Supported
 F:	Documentation/ia64/paravirt_ops.txt
 F:	arch/*/kernel/paravirt*
@@ -4637,6 +4781,16 @@
 F:	crypto/pcrypt.c
 F:	include/crypto/pcrypt.h
 
+PER-CPU MEMORY ALLOCATOR
+M:	Tejun Heo <tj@kernel.org>
+M:	Christoph Lameter <cl@linux-foundation.org>
+L:	linux-kernel@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+S:	Maintained
+F:	include/linux/percpu*.h
+F:	mm/percpu*.c
+F:	arch/*/include/asm/percpu.h
+
 PER-TASK DELAY ACCOUNTING
 M:	Balbir Singh <balbir@linux.vnet.ibm.com>
 S:	Maintained
@@ -5013,11 +5167,6 @@
 F:	kernel/srcu*
 X:	kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER (LEGACY)
-M:	Paul Gortmaker <p_gortmaker@yahoo.com>
-S:	Maintained
-F:	drivers/char/rtc.c
-
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:	Alessandro Zummo <a.zummo@towertech.it>
 L:	rtc-linux@googlegroups.com
@@ -5123,8 +5272,7 @@
 F:	drivers/s390/net/
 
 S390 ZCRYPT DRIVER
-M:	Felix Beck <felix.beck@de.ibm.com>
-M:	Ralph Wuerthner <ralph.wuerthner@de.ibm.com>
+M:	Holger Dengler <hd@linux.vnet.ibm.com>
 M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
@@ -5170,7 +5318,7 @@
 M:	Jassi Brar <jassi.brar@samsung.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Supported
-F:	sound/soc/s3c24xx
+F:	sound/soc/samsung
 
 TIMEKEEPING, NTP
 M:	John Stultz <johnstul@us.ibm.com>
@@ -5903,7 +6051,8 @@
 TOMOYO SECURITY MODULE
 M:	Kentaro Takeda <takedakn@nttdata.co.jp>
 M:	Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
-L:	tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English)
+L:	tomoyo-dev-en@lists.sourceforge.jp (subscribers-only, for developers in English)
+L:	tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English)
 L:	tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
 L:	tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
 W:	http://tomoyo.sourceforge.jp/
@@ -6177,6 +6326,13 @@
 W:	http://www.one-eyed-alien.net/~mdharm/linux-usb/
 F:	drivers/usb/storage/
 
+USB MIDI DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/usb/midi.*
+
 USB OHCI DRIVER
 M:	David Brownell <dbrownell@users.sourceforge.net>
 L:	linux-usb@vger.kernel.org
@@ -6416,7 +6572,7 @@
 VIRTIO HOST (VHOST)
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 L:	kvm@vger.kernel.org
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/vhost/
@@ -6435,13 +6591,12 @@
 F:	drivers/i2c/busses/i2c-viapro.c
 
 VIA SD/MMC CARD CONTROLLER DRIVER
-M:	Joseph Chan <JosephChan@via.com.tw>
+M:	Bruce Chang <brucechang@via.com.tw>
 M:	Harald Welte <HaraldWelte@viatech.com>
 S:	Maintained
 F:	drivers/mmc/host/via-sdmmc.c
 
 VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
-M:	Joseph Chan <JosephChan@via.com.tw>
 M:	Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
 L:	linux-fbdev@vger.kernel.org
 S:	Maintained
@@ -6466,7 +6621,7 @@
 
 VLYNQ BUS
 M:	Florian Fainelli <florian@openwrt.org>
-L:	openwrt-devel@lists.openwrt.org
+L:	openwrt-devel@lists.openwrt.org (subscribers-only)
 S:	Maintained
 F:	drivers/vlynq/vlynq.c
 F:	include/linux/vlynq.h
@@ -6686,7 +6841,7 @@
 M:	Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 S:	Supported
 F:	arch/x86/xen/
 F:	drivers/*/xen-*front.c
diff --git a/Makefile b/Makefile
index 74b2555..6a45769 100644
--- a/Makefile
+++ b/Makefile
@@ -224,6 +224,7 @@
 endif
 
 KCONFIG_CONFIG	?= .config
+export KCONFIG_CONFIG
 
 # SHELL used by kbuild
 CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h
index 59617c3..034b6cf 100644
--- a/arch/alpha/include/asm/ioctls.h
+++ b/arch/alpha/include/asm/ioctls.h
@@ -92,6 +92,7 @@
 #define TIOCGSID	0x5429  /* Return the session ID of FD */
 #define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
 #define TIOCSIG		_IOW('T',0x36, int)  /* Generate signal on Pty slave */
 
 #define TIOCSERCONFIG	0x5453
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h
index 99c56d4..72db984 100644
--- a/arch/alpha/include/asm/mman.h
+++ b/arch/alpha/include/asm/mman.h
@@ -53,6 +53,9 @@
 #define MADV_MERGEABLE   12		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 0f1d849..c1f3e7c 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -506,7 +506,7 @@
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
  		retval = -1;
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 53ea547..629ff82 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -632,9 +632,15 @@
 	  (clock and power control, etc).
 
 config ARCH_SHMOBILE
-	bool "Renesas SH-Mobile"
+	bool "Renesas SH-Mobile / R-Mobile"
+	select HAVE_CLK
+	select CLKDEV_LOOKUP
+	select GENERIC_CLOCKEVENTS
+	select NO_IOPORT
+	select SPARSE_IRQ
+	select MULTI_IRQ_HANDLER
 	help
-	  Support for Renesas's SH-Mobile ARM platforms
+	  Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
 
 config ARCH_RPC
 	bool "RiscPC"
@@ -1252,7 +1258,7 @@
 	depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
 		 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
 		 ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
-		 ARCH_MSM_SCORPIONMP
+		 ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE
 	select USE_GENERIC_SMP_HELPERS
 	select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
 	help
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 42ff90b..665ebf7 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -236,7 +236,7 @@
 
 /*
  * The following functions are needed for DMA bouncing.
- * ITE8152 chip can addrees up to 64MByte, so all the devices
+ * ITE8152 chip can address up to 64MByte, so all the devices
  * connected to ITE8152 (PCI and USB) should have limited DMA window
  */
 
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index ba65f6e..cb660bc 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -70,7 +70,7 @@
  * vic_init2 - common initialisation code
  * @base: Base of the VIC.
  *
- * Common initialisation code for registeration
+ * Common initialisation code for registration
  * and resume.
 */
 static void vic_init2(void __iomem *base)
diff --git a/arch/arm/configs/ag5evm_defconfig b/arch/arm/configs/ag5evm_defconfig
new file mode 100644
index 0000000..2b9cf56
--- /dev/null
+++ b/arch/arm/configs/ag5evm_defconfig
@@ -0,0 +1,83 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_SHMOBILE=y
+CONFIG_ARCH_SH73A0=y
+CONFIG_MACH_AG5EVM=y
+CONFIG_MEMORY_SIZE=0x10000000
+CONFIG_CPU_BPREDICT_DISABLE=y
+CONFIG_ARM_ERRATA_430973=y
+CONFIG_ARM_ERRATA_458693=y
+CONFIG_NO_HZ=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=tty0 console=ttySC2,115200 earlyprintk=sh-sci.2,115200 ignore_loglevel"
+CONFIG_CMDLINE_FORCE=y
+CONFIG_KEXEC=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_SPARSEKMAP=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=9
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_SH_MOBILE=y
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+CONFIG_FB=y
+CONFIG_FB_SH_MOBILE_LCDC=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_FTRACE is not set
diff --git a/arch/arm/configs/ams_delta_defconfig b/arch/arm/configs/ams_delta_defconfig
deleted file mode 100644
index 75de45e..0000000
--- a/arch/arm/configs/ams_delta_defconfig
+++ /dev/null
@@ -1,121 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_TREE_PREEMPT_RCU=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
-# CONFIG_KALLSYMS is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_OMAP_MBOX_FWK=m
-CONFIG_MACH_AMS_DELTA=y
-CONFIG_OMAP_ARM_150MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200n8 root=/dev/ram0 initrd=0x11c00000,4M"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-# CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IPV6=y
-# CONFIG_FW_LOADER is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_AMS_DELTA=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_USB_CATC=y
-CONFIG_USB_KAWETH=y
-CONFIG_USB_PEGASUS=y
-CONFIG_USB_RTL8150=y
-CONFIG_USB_USBNET=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_OMAP=y
-CONFIG_GPIO_SYSFS=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_OMAP=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_6x11=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SOC=y
-CONFIG_SND_OMAP_SOC=y
-CONFIG_SND_OMAP_SOC_AMS_DELTA=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_AMS_DELTA=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_NFS_FS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_CODEPAGE_852=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/htcherald_defconfig b/arch/arm/configs/htcherald_defconfig
deleted file mode 100644
index edfa1c0..0000000
--- a/arch/arm/configs/htcherald_defconfig
+++ /dev/null
@@ -1,73 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_ARCH_OMAP850=y
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_MACH_HERALD=y
-CONFIG_OMAP_ARM_195MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-CONFIG_CPU_ARM925T=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200 ip=dhcp"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=m
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_OMAP=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_FONTS=y
-CONFIG_FONT_MINI_4x6=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-# CONFIG_USB_ETH_RNDIS is not set
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_OMAP=y
-CONFIG_RTC_CLASS=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/mackerel_defconfig b/arch/arm/configs/mackerel_defconfig
new file mode 100644
index 0000000..306a2e2
--- /dev/null
+++ b/arch/arm/configs/mackerel_defconfig
@@ -0,0 +1,138 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_SHMOBILE=y
+CONFIG_ARCH_SH7372=y
+CONFIG_MACH_MACKEREL=y
+CONFIG_MEMORY_SIZE=0x10000000
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_FORCE_MAX_ZONEORDER=15
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 earlyprintk=sh-sci.0,115200 root=/dev/nfs nfsroot=,tcp,v3 ip=dhcp memchunk.vpu=64m memchunk.veu0=8m memchunk.spu0=2m mem=240m"
+CONFIG_KEXEC=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM=y
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_ARM_INTEGRATOR=y
+CONFIG_MTD_BLOCK2MTD=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=8
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_MFD_SUPPORT is not set
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_SH_MOBILE_LCDC=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_ARM_UNWIND is not set
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/arm/configs/n770_defconfig b/arch/arm/configs/n770_defconfig
deleted file mode 100644
index 993e94d..0000000
--- a/arch/arm/configs/n770_defconfig
+++ /dev/null
@@ -1,138 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_OMAP_RESET_CLOCKS=y
-# CONFIG_OMAP_MUX is not set
-CONFIG_OMAP_MBOX_FWK=y
-CONFIG_OMAP_32K_TIMER=y
-CONFIG_OMAP_DM_TIMER=y
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_ARCH_OMAP16XX=y
-CONFIG_MACH_NOKIA770=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-CONFIG_OMAP_ARM_216MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=1f03 rootfstype=jffs2 time"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_PM_RUNTIME=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-CONFIG_BT=y
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
-CONFIG_BT_RFCOMM=y
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=y
-CONFIG_BT_HIDP=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_CONNECTOR=y
-# CONFIG_PROC_EVENTS is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-CONFIG_NETDEVICES=y
-CONFIG_TUN=y
-CONFIG_NET_ETHERNET=y
-CONFIG_USB_USBNET=y
-# CONFIG_USB_NET_AX8817X is not set
-# CONFIG_USB_NET_CDC_SUBSET is not set
-CONFIG_PPP=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_I2C=y
-CONFIG_I2C_OMAP=y
-CONFIG_SPI=y
-CONFIG_SPI_OMAP_UWIRE=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_OMAP_WATCHDOG=y
-CONFIG_FB=y
-CONFIG_FB_OMAP=y
-CONFIG_FB_OMAP_LCDC_EXTERNAL=y
-CONFIG_FB_OMAP_LCDC_HWA742=y
-CONFIG_FB_OMAP_MANUAL_UPDATE=y
-CONFIG_FB_OMAP_LCD_MIPID=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-CONFIG_SND_DUMMY=y
-CONFIG_SND_USB_AUDIO=y
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_SUSPEND=y
-CONFIG_USB_OTG=y
-# CONFIG_USB_OTG_WHITELIST is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_SERIAL_CONSOLE=y
-CONFIG_USB_SERIAL_PL2303=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-CONFIG_USB_FILE_STORAGE=m
-CONFIG_USB_FILE_STORAGE_TEST=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_852=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_SECURITY=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
new file mode 100644
index 0000000..a350cc6
--- /dev/null
+++ b/arch/arm/configs/omap1_defconfig
@@ -0,0 +1,286 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_ELF_CORE is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_SHMEM is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLOB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_OMAP=y
+CONFIG_ARCH_OMAP1=y
+CONFIG_OMAP_RESET_CLOCKS=y
+# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MBOX_FWK=y
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_DM_TIMER=y
+CONFIG_ARCH_OMAP730=y
+CONFIG_ARCH_OMAP850=y
+CONFIG_ARCH_OMAP16XX=y
+CONFIG_MACH_OMAP_INNOVATOR=y
+CONFIG_MACH_OMAP_H2=y
+CONFIG_MACH_OMAP_H3=y
+CONFIG_MACH_OMAP_HTCWIZARD=y
+CONFIG_MACH_HERALD=y
+CONFIG_MACH_OMAP_OSK=y
+CONFIG_MACH_OMAP_PERSEUS2=y
+CONFIG_MACH_OMAP_FSAMPLE=y
+CONFIG_MACH_VOICEBLUE=y
+CONFIG_MACH_OMAP_PALMTE=y
+CONFIG_MACH_OMAP_PALMZ71=y
+CONFIG_MACH_OMAP_PALMTT=y
+CONFIG_MACH_SX1=y
+CONFIG_MACH_NOKIA770=y
+CONFIG_MACH_AMS_DELTA=y
+CONFIG_MACH_OMAP_GENERIC=y
+CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
+CONFIG_OMAP_ARM_216MHZ=y
+CONFIG_OMAP_ARM_195MHZ=y
+CONFIG_OMAP_ARM_192MHZ=y
+CONFIG_OMAP_ARM_182MHZ=y
+CONFIG_OMAP_ARM_168MHZ=y
+# CONFIG_OMAP_ARM_60MHZ is not set
+# CONFIG_ARM_THUMB is not set
+CONFIG_PCCARD=y
+CONFIG_OMAP_CF=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_LEDS=y
+CONFIG_LEDS_CPU=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=1f03 rootfstype=jffs2"
+CONFIG_FPE_NWFPE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PM=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+CONFIG_NETFILTER=y
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_HIDP=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_CONNECTOR=y
+# CONFIG_PROC_EVENTS is not set
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_DEBUG_VERBOSE=3
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_NAND=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_IDE=m
+CONFIG_BLK_DEV_IDECS=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_PHYLIB=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMC91X=y
+CONFIG_USB_CATC=y
+CONFIG_USB_KAWETH=y
+CONFIG_USB_PEGASUS=y
+CONFIG_USB_RTL8150=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+CONFIG_PPP=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_SLIP=y
+CONFIG_SLIP_COMPRESSED=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=3
+CONFIG_SERIAL_8250_RUNTIME_UARTS=3
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_OMAP_UWIRE=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_OMAP_WATCHDOG=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_FB_OMAP=y
+CONFIG_FB_OMAP_LCDC_EXTERNAL=y
+CONFIG_FB_OMAP_LCDC_HWA742=y
+CONFIG_FB_OMAP_MANUAL_UPDATE=y
+CONFIG_FB_OMAP_LCD_MIPID=y
+CONFIG_FB_OMAP_BOOTLOADER_INIT=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_6x11=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+CONFIG_SND_DUMMY=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_TEST=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_ETH=m
+# CONFIG_USB_ETH_RNDIS is not set
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_FILE_STORAGE_TEST=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_OMAP=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_OMAP=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=866
+CONFIG_FAT_DEFAULT_IOCHARSET="koi8-r"
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_ERRORS=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/omap_generic_1510_defconfig b/arch/arm/configs/omap_generic_1510_defconfig
deleted file mode 100644
index 0e42ba4e..0000000
--- a/arch/arm/configs/omap_generic_1510_defconfig
+++ /dev/null
@@ -1,84 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-CONFIG_PREEMPT=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=ttyS2,115200 root=0803 ro init=/bin/sh"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_USB_RTL8150=y
-CONFIG_USB_USBNET=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_EPSON2888=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_NFS_FS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/omap_generic_1610_defconfig b/arch/arm/configs/omap_generic_1610_defconfig
deleted file mode 100644
index 5e536cf..0000000
--- a/arch/arm/configs/omap_generic_1610_defconfig
+++ /dev/null
@@ -1,87 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_ARCH_OMAP16XX=y
-CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_192MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-CONFIG_PREEMPT=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=ttyS2,115200 root=0803 ro init=/bin/sh"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_USB_RTL8150=y
-CONFIG_USB_USBNET=y
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_EPSON2888=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_NFS_FS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/omap_generic_1710_defconfig b/arch/arm/configs/omap_generic_1710_defconfig
deleted file mode 100644
index c0867b1..0000000
--- a/arch/arm/configs/omap_generic_1710_defconfig
+++ /dev/null
@@ -1,75 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-# CONFIG_OMAP_MUX is not set
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_ARCH_OMAP16XX=y
-CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_192MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=64M console=tty0 console=ttyS2,115200 root=0801"
-CONFIG_FPE_NWFPE=y
-CONFIG_ARTHUR=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_USB_USBNET=y
-CONFIG_USB_ALI_M5632=y
-# CONFIG_USB_BELKIN is not set
-# CONFIG_USB_ARMLINUX is not set
-CONFIG_PPP=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_852=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_ERRORS=y
-CONFIG_SECURITY=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_PCBC=y
diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig
deleted file mode 100644
index e2de2aa..0000000
--- a/arch/arm/configs/omap_h2_1610_defconfig
+++ /dev/null
@@ -1,109 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_OMAP_MUX_DEBUG=y
-CONFIG_OMAP_32K_TIMER=y
-CONFIG_OMAP_DM_TIMER=y
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_ARCH_OMAP16XX=y
-CONFIG_MACH_OMAP_H2=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200n8 root=/dev/ram0 rw initrd=0x10600000,8M ramdisk_size=8192"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_PM_RUNTIME=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEBUG_DRIVER=y
-CONFIG_MTD=y
-CONFIG_MTD_DEBUG=y
-CONFIG_MTD_DEBUG_VERBOSE=3
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-CONFIG_PPP=y
-CONFIG_SLIP=y
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_EVBUG=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_I2C=y
-CONFIG_I2C_OMAP=y
-CONFIG_SPI=y
-CONFIG_SPI_OMAP_UWIRE=y
-CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_NOWAYOUT=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_OMAP=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_SUSPEND=y
-CONFIG_USB_OTG=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_TEST=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm/configs/omap_innovator_1510_defconfig b/arch/arm/configs/omap_innovator_1510_defconfig
deleted file mode 100644
index 265af26..0000000
--- a/arch/arm/configs/omap_innovator_1510_defconfig
+++ /dev/null
@@ -1,102 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_MACH_OMAP_INNOVATOR=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-CONFIG_PREEMPT=y
-CONFIG_LEDS=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=ttyS0,115200n8 root=/dev/nfs ip=bootp noinitrd"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-CONFIG_USB_RTL8150=y
-CONFIG_USB_USBNET=y
-# CONFIG_USB_NET_CDC_SUBSET is not set
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_I2C=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FB_OMAP=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_LOGO=y
-CONFIG_USB=y
-CONFIG_USB_DEBUG=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
diff --git a/arch/arm/configs/omap_innovator_1610_defconfig b/arch/arm/configs/omap_innovator_1610_defconfig
deleted file mode 100644
index cc7fbf8..0000000
--- a/arch/arm/configs/omap_innovator_1610_defconfig
+++ /dev/null
@@ -1,58 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_ARCH_OMAP16XX=y
-CONFIG_MACH_OMAP_INNOVATOR=y
-CONFIG_OMAP_ARM_192MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-CONFIG_CPU_DCACHE_WRITETHROUGH=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=tty0 console=ttyS0,115200 initrd=0x10200000,8M root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=m
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_OMAP=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_LOGO=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
diff --git a/arch/arm/configs/omap_osk_5912_defconfig b/arch/arm/configs/omap_osk_5912_defconfig
deleted file mode 100644
index 9105de7..0000000
--- a/arch/arm/configs/omap_osk_5912_defconfig
+++ /dev/null
@@ -1,87 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_OMAP_RESET_CLOCKS=y
-CONFIG_OMAP_32K_TIMER=y
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_ARCH_OMAP16XX=y
-CONFIG_MACH_OMAP_OSK=y
-CONFIG_OMAP_ARM_192MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-CONFIG_PCCARD=y
-CONFIG_OMAP_CF=y
-CONFIG_NO_HZ=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x10400000,8M root=/dev/ram0 rw"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=m
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECS=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_OMAP=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_ISO8859_1=m
diff --git a/arch/arm/configs/omap_perseus2_730_defconfig b/arch/arm/configs/omap_perseus2_730_defconfig
deleted file mode 100644
index aa777e6..0000000
--- a/arch/arm/configs/omap_perseus2_730_defconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_ARCH_OMAP730=y
-# CONFIG_ARCH_OMAP15XX is not set
-CONFIG_MACH_OMAP_PERSEUS2=y
-CONFIG_OMAP_ARM_182MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-CONFIG_PREEMPT=y
-CONFIG_LEDS=y
-CONFIG_LEDS_CPU=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyS0,115200 ip=dhcp"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_NAND=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_VIRTUAL=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_INOTIFY=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
diff --git a/arch/arm/configs/palmte_defconfig b/arch/arm/configs/palmte_defconfig
deleted file mode 100644
index 828d7cb..0000000
--- a/arch/arm/configs/palmte_defconfig
+++ /dev/null
@@ -1,48 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SLAB=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_MACH_OMAP_PALMTE=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_OMAP=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_USB_GADGET=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=850
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/palmtt_defconfig b/arch/arm/configs/palmtt_defconfig
deleted file mode 100644
index 31d02c4..0000000
--- a/arch/arm/configs/palmtt_defconfig
+++ /dev/null
@@ -1,56 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SLAB=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_MACH_OMAP_PALMTT=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/mmcblk0p2 rw init=/init"
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=y
-# CONFIG_SERIO is not set
-CONFIG_SPI=y
-CONFIG_SPI_OMAP_UWIRE=y
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_OMAP=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/palmz71_defconfig b/arch/arm/configs/palmz71_defconfig
deleted file mode 100644
index c478db6..0000000
--- a/arch/arm/configs/palmz71_defconfig
+++ /dev/null
@@ -1,53 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOCALVERSION="-z71"
-CONFIG_SYSVIPC=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SLAB=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_MACH_OMAP_PALMZ71=y
-CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_ARM_THUMB is not set
-# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=320
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ADS7846=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
-CONFIG_SPI=y
-CONFIG_SPI_OMAP_UWIRE=y
-CONFIG_FB=y
-CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_OMAP=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/sx1_defconfig b/arch/arm/configs/sx1_defconfig
deleted file mode 100644
index 20a8618..0000000
--- a/arch/arm/configs/sx1_defconfig
+++ /dev/null
@@ -1,110 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
-# CONFIG_KALLSYMS is not set
-# CONFIG_ELF_CORE is not set
-# CONFIG_BASE_FULL is not set
-# CONFIG_SHMEM is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-CONFIG_SLOB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP1=y
-CONFIG_OMAP_MBOX_FWK=y
-CONFIG_MACH_SX1=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
-# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
-CONFIG_PREEMPT=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_MISC=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_FW_LOADER is not set
-CONFIG_CONNECTOR=y
-# CONFIG_PROC_EVENTS is not set
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_RAM=m
-CONFIG_BLK_DEV_RAM_COUNT=2
-CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_OMAP=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_NR_UARTS=3
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_OMAP=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_OMAP=y
-CONFIG_FB_OMAP_BOOTLOADER_INIT=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_MINI_4x6=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_VERBOSE_PROCFS is not set
-CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=m
-CONFIG_MMC=y
-CONFIG_MMC_OMAP=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_OMAP=y
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_INOTIFY=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_FAT_DEFAULT_CODEPAGE=866
-CONFIG_FAT_DEFAULT_IOCHARSET="koi8-r"
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_866=y
-CONFIG_NLS_CODEPAGE_1251=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_5=y
-CONFIG_NLS_KOI8_R=y
-CONFIG_NLS_UTF8=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index be80f03..52d86c4 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -9,6 +9,10 @@
 # CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_U8500=y
+CONFIG_UX500_SOC_DB5500=y
+CONFIG_UX500_SOC_DB8500=y
+CONFIG_MACH_U8500=y
+CONFIG_MACH_U5500=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_PREEMPT=y
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index cc42d5f..5aeec1e 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -59,7 +59,17 @@
 #define L2X0_CACHE_ID_PART_MASK		(0xf << 6)
 #define L2X0_CACHE_ID_PART_L210		(1 << 6)
 #define L2X0_CACHE_ID_PART_L310		(3 << 6)
-#define L2X0_AUX_CTRL_WAY_SIZE_MASK	(0x3 << 17)
+
+#define L2X0_AUX_CTRL_MASK			0xc0000fff
+#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT	16
+#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT		17
+#define L2X0_AUX_CTRL_WAY_SIZE_MASK		(0x3 << 17)
+#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT	22
+#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT		26
+#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT		27
+#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT	28
+#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT	29
+#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT		30
 
 #ifndef __ASSEMBLY__
 extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 0c1bb68..2cfe816 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -38,17 +38,9 @@
 #ifdef CONFIG_MMU
 void *module_alloc(unsigned long size)
 {
-	struct vm_struct *area;
-
-	size = PAGE_ALIGN(size);
-	if (!size)
-		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+				__builtin_return_address(0));
 }
 #else /* CONFIG_MMU */
 void *module_alloc(unsigned long size)
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index 7b58c94..de2fd04 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -128,17 +128,17 @@
 		.platform_data	= &my_flash0_platform,
 #endif
 	},
-	{	/* User accessable spi - cs1 (250KHz) */
+	{	/* User accessible spi - cs1 (250KHz) */
 		.modalias	= "spi-cs1",
 		.chip_select	= 1,
 		.max_speed_hz	= 250 * 1000,
 	},
-	{	/* User accessable spi - cs2 (1MHz) */
+	{	/* User accessible spi - cs2 (1MHz) */
 		.modalias	= "spi-cs2",
 		.chip_select	= 2,
 		.max_speed_hz	= 1 * 1000 * 1000,
 	},
-	{	/* User accessable spi - cs3 (10MHz) */
+	{	/* User accessible spi - cs3 (10MHz) */
 		.modalias	= "spi-cs3",
 		.chip_select	= 3,
 		.max_speed_hz	= 10 * 1000 * 1000,
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index dafbacc..ea53f4d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -301,7 +301,7 @@
 }
 
 
-static struct platform_suspend_ops at91_pm_ops ={
+static const struct platform_suspend_ops at91_pm_ops = {
 	.valid	= at91_pm_valid_state,
 	.begin	= at91_pm_begin,
 	.enter	= at91_pm_enter,
diff --git a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
index b3a61d8..96273ff 100644
--- a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
+++ b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
@@ -757,7 +757,7 @@
 		t = t << 1;
 	}
 
-	/* Intialize the result */
+	/* Initialize the result */
 	r = 0;
 
 	do {
diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
index 7b9bac2..6b9be2e 100644
--- a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
+++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
@@ -893,7 +893,7 @@
 */
 /****************************************************************************/
 uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle,	/*  [ IN ]  DMA Channel handle */
-					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controler attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
+					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controller attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
     ) {
 	dmacHw_CBLK_t *pCblk = dmacHw_HANDLE_TO_CBLK(handle);
 
diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
index ff7b436..77f84b4 100644
--- a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
+++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
@@ -316,7 +316,7 @@
 /**
 *  @brief   Check if DMA channel is the flow controller
 *
-*  @return  1 : If DMA is a flow controler
+*  @return  1 : If DMA is a flow controller
 *           0 : Peripheral is the flow controller
 *
 *  @note
diff --git a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
index 5c1c9a0..16225e4 100644
--- a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
+++ b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
@@ -558,7 +558,7 @@
 		t = t << 1;
 	}
 
-	/* Intialize the result */
+	/* Initialize the result */
 	r = 0;
 
 	do {
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 77eb35c..8d1baf3 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -671,7 +671,7 @@
 
 /****************************************************************************/
 /**
-*   Intializes all of the data structures associated with the DMA.
+*   Initializes all of the data structures associated with the DMA.
 *   @return
 *       >= 0    - Initialization was successfull.
 *
diff --git a/arch/arm/mach-bcmring/include/csp/dmacHw.h b/arch/arm/mach-bcmring/include/csp/dmacHw.h
index 5d51013..6c8da2b 100644
--- a/arch/arm/mach-bcmring/include/csp/dmacHw.h
+++ b/arch/arm/mach-bcmring/include/csp/dmacHw.h
@@ -590,7 +590,7 @@
 */
 /****************************************************************************/
 uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle,	/*  [ IN ]  DMA Channel handle  */
-					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controler attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
+					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controller attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
     );
 
 #endif /* _DMACHW_H */
diff --git a/arch/arm/mach-bcmring/include/csp/tmrHw.h b/arch/arm/mach-bcmring/include/csp/tmrHw.h
index f1236d0..2cbb530 100644
--- a/arch/arm/mach-bcmring/include/csp/tmrHw.h
+++ b/arch/arm/mach-bcmring/include/csp/tmrHw.h
@@ -76,7 +76,7 @@
 *           certain time interval
 *
 *  This function initializes a periodic timer to generate timer interrupt
-*  after every time interval in milisecond
+*  after every time interval in millisecond
 *
 *  @return   On success: Effective interval set in mili-second
 *            On failure: 0
@@ -93,7 +93,7 @@
 *           after certain time interval
 *
 *  This function initializes a periodic timer to generate a single ticks after
-*  certain time interval in milisecond
+*  certain time interval in millisecond
 *
 *  @return   On success: Effective interval set in mili-second
 *            On failure: 0
diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
index cbf334d..d67e2f8 100644
--- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
+++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
@@ -28,7 +28,7 @@
 
 /* Data type for DMA Link List Item */
 typedef struct {
-	uint32_t sar;		/* Source Adress Register.
+	uint32_t sar;		/* Source Address Register.
 				   Address must be aligned to CTLx.SRC_TR_WIDTH.             */
 	uint32_t dar;		/* Destination Address Register.
 				   Address must be aligned to CTLx.DST_TR_WIDTH.             */
diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
index 891cea8..f1ecf96 100644
--- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
+++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
@@ -35,7 +35,7 @@
 
 /* Data type representing DMA channel registers */
 typedef struct {
-	dmacHw_REG64_t ChannelSar;	/*  Source Adress Register. 64 bits (upper 32 bits are reserved)
+	dmacHw_REG64_t ChannelSar;	/*  Source Address Register. 64 bits (upper 32 bits are reserved)
 					   Address must be aligned to CTLx.SRC_TR_WIDTH.
 					 */
 	dmacHw_REG64_t ChannelDar;	/*  Destination Address Register.64 bits (upper 32 bits are reserved)
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 2652af1..a5f8a80 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -412,12 +412,7 @@
 static struct davinci_spi_platform_data dm355_spi0_pdata = {
 	.version 	= SPI_VERSION_1,
 	.num_chipselect = 2,
-	.clk_internal	= 1,
-	.cs_hold	= 1,
-	.intr_level	= 0,
-	.poll_mode	= 1,	/* 0 -> interrupt mode 1-> polling mode */
-	.c2tdelay	= 0,
-	.t2cdelay	= 0,
+	.cshold_bug	= true,
 };
 static struct platform_device dm355_spi0_device = {
 	.name = "spi_davinci",
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index c466d71..02d2cc3 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -625,12 +625,6 @@
 static struct davinci_spi_platform_data dm365_spi0_pdata = {
 	.version 	= SPI_VERSION_1,
 	.num_chipselect = 2,
-	.clk_internal	= 1,
-	.cs_hold	= 1,
-	.intr_level	= 0,
-	.poll_mode	= 1,	/* 0 -> interrupt mode 1-> polling mode */
-	.c2tdelay	= 0,
-	.t2cdelay	= 0,
 };
 
 static struct resource dm365_spi0_resources[] = {
diff --git a/arch/arm/mach-davinci/include/mach/spi.h b/arch/arm/mach-davinci/include/mach/spi.h
index 910efbf..38f4da5 100644
--- a/arch/arm/mach-davinci/include/mach/spi.h
+++ b/arch/arm/mach-davinci/include/mach/spi.h
@@ -19,26 +19,66 @@
 #ifndef __ARCH_ARM_DAVINCI_SPI_H
 #define __ARCH_ARM_DAVINCI_SPI_H
 
+#define SPI_INTERN_CS	0xFF
+
 enum {
 	SPI_VERSION_1, /* For DM355/DM365/DM6467 */
 	SPI_VERSION_2, /* For DA8xx */
 };
 
+/**
+ * davinci_spi_platform_data - Platform data for SPI master device on DaVinci
+ *
+ * @version:	version of the SPI IP. Different DaVinci devices have slightly
+ *		varying versions of the same IP.
+ * @num_chipselect: number of chipselects supported by this SPI master
+ * @intr_line:	interrupt line used to connect the SPI IP to the ARM interrupt
+ *		controller withn the SoC. Possible values are 0 and 1.
+ * @chip_sel:	list of GPIOs which can act as chip-selects for the SPI.
+ *		SPI_INTERN_CS denotes internal SPI chip-select. Not necessary
+ *		to populate if all chip-selects are internal.
+ * @cshold_bug:	set this to true if the SPI controller on your chip requires
+ *		a write to CSHOLD bit in between transfers (like in DM355).
+ */
 struct davinci_spi_platform_data {
 	u8	version;
 	u8	num_chipselect;
+	u8	intr_line;
+	u8	*chip_sel;
+	bool	cshold_bug;
+};
+
+/**
+ * davinci_spi_config - Per-chip-select configuration for SPI slave devices
+ *
+ * @wdelay:	amount of delay between transmissions. Measured in number of
+ *		SPI module clocks.
+ * @odd_parity:	polarity of parity flag at the end of transmit data stream.
+ *		0 - odd parity, 1 - even parity.
+ * @parity_enable: enable transmission of parity at end of each transmit
+ *		data stream.
+ * @io_type:	type of IO transfer. Choose between polled, interrupt and DMA.
+ * @timer_disable: disable chip-select timers (setup and hold)
+ * @c2tdelay:	chip-select setup time. Measured in number of SPI module clocks.
+ * @t2cdelay:	chip-select hold time. Measured in number of SPI module clocks.
+ * @t2edelay:	transmit data finished to SPI ENAn pin inactive time. Measured
+ *		in number of SPI clocks.
+ * @c2edelay:	chip-select active to SPI ENAn signal active time. Measured in
+ *		number of SPI clocks.
+ */
+struct davinci_spi_config {
 	u8	wdelay;
 	u8	odd_parity;
 	u8	parity_enable;
-	u8	wait_enable;
+#define SPI_IO_TYPE_INTR	0
+#define SPI_IO_TYPE_POLL	1
+#define SPI_IO_TYPE_DMA		2
+	u8	io_type;
 	u8	timer_disable;
-	u8	clk_internal;
-	u8	cs_hold;
-	u8	intr_level;
-	u8	poll_mode;
-	u8	use_dma;
 	u8	c2tdelay;
 	u8	t2cdelay;
+	u8	t2edelay;
+	u8	c2edelay;
 };
 
 #endif	/* __ARCH_ARM_DAVINCI_SPI_H */
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index fab953b..1bd73a0 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -110,7 +110,7 @@
 	return ret;
 }
 
-static struct platform_suspend_ops davinci_pm_ops = {
+static const struct platform_suspend_ops davinci_pm_ops = {
 	.enter		= davinci_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index 31f0cbe..23d2b6d 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -64,17 +64,19 @@
 	{
 		.start          = IRQ_USBINT,
 		.flags          = IORESOURCE_IRQ,
+		.name		= "mc"
 	},
 	{
 		/* placeholder for the dedicated CPPI IRQ */
 		.flags          = IORESOURCE_IRQ,
+		.name		= "dma"
 	},
 };
 
 static u64 usb_dmamask = DMA_BIT_MASK(32);
 
 static struct platform_device usb_dev = {
-	.name           = "musb_hdrc",
+	.name           = "musb-davinci",
 	.id             = -1,
 	.dev = {
 		.platform_data		= &usb_data,
@@ -110,6 +112,7 @@
 	{
 		.start		= IRQ_DA8XX_USB_INT,
 		.flags		= IORESOURCE_IRQ,
+		.name		= "mc",
 	},
 };
 
@@ -121,6 +124,7 @@
 
 	usb_dev.resource = da8xx_usb20_resources;
 	usb_dev.num_resources = ARRAY_SIZE(da8xx_usb20_resources);
+	usb_dev.name = "musb-da8xx";
 
 	return platform_device_register(&usb_dev);
 }
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index f7a1258..fe627ab 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -770,7 +770,7 @@
 };
 
 static struct platform_device dove_sdio0 = {
-	.name		= "sdhci-mv",
+	.name		= "sdhci-dove",
 	.id		= 0,
 	.dev		= {
 		.dma_mask		= &sdio_dmamask,
@@ -798,7 +798,7 @@
 };
 
 static struct platform_device dove_sdio1 = {
-	.name		= "sdhci-mv",
+	.name		= "sdhci-dove",
 	.id		= 1,
 	.dev		= {
 		.dma_mask		= &sdio_dmamask,
diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h
index 213a4fc..8c950e1 100644
--- a/arch/arm/mach-gemini/include/mach/hardware.h
+++ b/arch/arm/mach-gemini/include/mach/hardware.h
@@ -33,7 +33,7 @@
 #define GEMINI_LPC_HOST_BASE	0x47000000
 #define GEMINI_LPC_IO_BASE	0x47800000
 #define GEMINI_INTERRUPT_BASE	0x48000000
-/* TODO: Different interrupt controlers when SMP
+/* TODO: Different interrupt controllers when SMP
  * #define GEMINI_INTERRUPT0_BASE	0x48000000
  * #define GEMINI_INTERRUPT1_BASE	0x49000000
  */
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index f667a26..5056148 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -254,10 +254,10 @@
 
 static struct mc13783_regulator_init_data pcm038_regulators[] = {
 	{
-		.id = MC13783_REGU_VCAM,
+		.id = MC13783_REG_VCAM,
 		.init_data = &cam_data,
 	}, {
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &sdhc1_data,
 	},
 };
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 6bf81ce..acf1769 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -32,7 +32,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops mx27_suspend_ops = {
+static const struct platform_suspend_ops mx27_suspend_ops = {
 	.enter = mx27_suspend_enter,
 	.valid = suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index c9d77fa..cfcca41 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -171,7 +171,7 @@
 
 	kirkwood_i2c_init();
 
-	if (machine_is_openrd_client()) {
+	if (machine_is_openrd_client() || machine_is_openrd_ultimate()) {
 		i2c_register_board_info(0, i2c_board_info,
 			ARRAY_SIZE(i2c_board_info));
 		kirkwood_audio_init();
diff --git a/arch/arm/mach-lpc32xx/pm.c b/arch/arm/mach-lpc32xx/pm.c
index a6e2aed..e76d41b 100644
--- a/arch/arm/mach-lpc32xx/pm.c
+++ b/arch/arm/mach-lpc32xx/pm.c
@@ -123,7 +123,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops lpc32xx_pm_ops = {
+static const struct platform_suspend_ops lpc32xx_pm_ops = {
 	.valid	= suspend_valid_only_mem,
 	.enter	= lpc32xx_pm_enter,
 };
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index fae931a..5d3d9ad 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -40,11 +40,13 @@
 	bool "MSM8X60"
 	select MACH_MSM8X60_SURF if (!MACH_MSM8X60_RUMI3 && !MACH_MSM8X60_SIM \
 				  && !MACH_MSM8X60_FFA)
+	select ARCH_MSM_SCORPIONMP
 	select ARM_GIC
 	select CPU_V7
 	select MSM_V2_TLMM
 	select MSM_GPIOMUX
 	select IOMMU_API
+	select MSM_SCM if SMP
 
 endchoice
 
@@ -172,4 +174,7 @@
 
 config IOMMU_API
 	bool
+
+config MSM_SCM
+	bool
 endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 59646bb..94195c1 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -18,6 +18,10 @@
 obj-$(CONFIG_ARCH_QSD8X50) += sirc.o
 obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
 obj-$(CONFIG_MSM_SMD) += last_radio_log.o
+obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o
+
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
 
 obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o devices-msm7x00.o
 obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o board-trout-mmc.o board-trout-panel.o devices-msm7x00.o
diff --git a/arch/arm/mach-msm/headsmp.S b/arch/arm/mach-msm/headsmp.S
new file mode 100644
index 0000000..d0c2143
--- /dev/null
+++ b/arch/arm/mach-msm/headsmp.S
@@ -0,0 +1,40 @@
+/*
+ *  linux/arch/arm/mach-realview/headsmp.S
+ *
+ *  Copyright (c) 2003 ARM Limited
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	__INIT
+
+/*
+ * MSM specific entry point for secondary CPUs.  This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ */
+ENTRY(msm_secondary_startup)
+	mrc	p15, 0, r0, c0, c0, 5
+	and	r0, r0, #15
+	adr	r4, 1f
+	ldmia	r4, {r5, r6}
+	sub	r4, r4, r5
+	add	r6, r6, r4
+pen:	ldr	r7, [r6]
+	cmp	r7, r0
+	bne	pen
+
+	/*
+	 * we've been released from the holding pen: secondary_stack
+	 * should now contain the SVC stack for this core
+	 */
+	b	secondary_startup
+
+	.align
+1:	.long	.
+	.long	pen_release
diff --git a/arch/arm/mach-msm/hotplug.c b/arch/arm/mach-msm/hotplug.c
new file mode 100644
index 0000000..5a31f70
--- /dev/null
+++ b/arch/arm/mach-msm/hotplug.c
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+	/* Just flush the cache. Changing the coherency is not yet
+	 * available on msm. */
+	flush_cache_all();
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+}
+
+static inline void platform_do_lowpower(unsigned int cpu)
+{
+	/* Just enter wfi for now. TODO: Properly shut off the cpu. */
+	for (;;) {
+		/*
+		 * here's the WFI
+		 */
+		asm("wfi"
+		    :
+		    :
+		    : "memory", "cc");
+
+		if (pen_release == cpu) {
+			/*
+			 * OK, proper wakeup, we're done
+			 */
+			break;
+		}
+
+		/*
+		 * getting here, means that we have come out of WFI without
+		 * having been woken up - this shouldn't happen
+		 *
+		 * The trouble is, letting people know about this is not really
+		 * possible, since we are currently running incoherently, and
+		 * therefore cannot safely call printk() or anything else
+		 */
+		pr_debug("CPU%u: spurious wakeup call\n", cpu);
+	}
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+	return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void platform_cpu_die(unsigned int cpu)
+{
+	/*
+	 * we're ready for shutdown now, so do it
+	 */
+	cpu_enter_lowpower();
+	platform_do_lowpower(cpu);
+
+	/*
+	 * bring this CPU back into the world of cache
+	 * coherency, and then restore interrupts
+	 */
+	cpu_leave_lowpower();
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+	/*
+	 * we don't allow CPU 0 to be shutdown (it is still too special
+	 * e.g. clock tick interrupts)
+	 */
+	return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
index 4dc99aa..1246715 100644
--- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
+++ b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
@@ -26,7 +26,7 @@
 	 * The interrupt numbering scheme is defined in the
 	 * interrupt controller spec.  To wit:
 	 *
-	 * Migrated the code from ARM MP port to be more consistant
+	 * Migrated the code from ARM MP port to be more consistent
 	 * with interrupt processing , the following still holds true
 	 * however, all interrupts are treated the same regardless of
 	 * if they are local IPI or PPI
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
index 7c43a9b..a54e33b 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
@@ -60,7 +60,11 @@
 
 #define MSM_TMR_BASE		IOMEM(0xF0200000)
 #define MSM_TMR_PHYS		0x02000000
-#define MSM_TMR_SIZE		(SZ_1M)
+#define MSM_TMR_SIZE		SZ_4K
+
+#define MSM_TMR0_BASE		IOMEM(0xF0201000)
+#define MSM_TMR0_PHYS		0x02040000
+#define MSM_TMR0_SIZE		SZ_4K
 
 #define MSM_GPT_BASE		(MSM_TMR_BASE + 0x4)
 #define MSM_DGT_BASE		(MSM_TMR_BASE + 0x24)
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index f912d7b..1260007 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -105,6 +105,7 @@
 	MSM_DEVICE(QGIC_DIST),
 	MSM_DEVICE(QGIC_CPU),
 	MSM_DEVICE(TMR),
+	MSM_DEVICE(TMR0),
 	MSM_DEVICE(ACC),
 	MSM_DEVICE(GCC),
 };
@@ -153,7 +154,7 @@
 {
 	if (mtype == MT_DEVICE) {
 		/* The peripherals in the 88000000 - D0000000 range
-		 * are only accessable by type MT_DEVICE_NONSHARED.
+		 * are only accessible by type MT_DEVICE_NONSHARED.
 		 * Adjust mtype as necessary to make this "just work."
 		 */
 		if ((phys_addr >= 0x88000000) && (phys_addr < 0xD0000000))
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
new file mode 100644
index 0000000..0f427bc
--- /dev/null
+++ b/arch/arm/mach-msm/platsmp.c
@@ -0,0 +1,166 @@
+/*
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *  Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+
+#include <asm/hardware/gic.h>
+#include <asm/cacheflush.h>
+#include <asm/mach-types.h>
+
+#include <mach/msm_iomap.h>
+
+#include "scm-boot.h"
+
+#define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0
+#define SCSS_CPU1CORE_RESET 0xD80
+#define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64
+
+/* Mask for edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */
+#define GIC_PPI_EDGE_MASK 0xFFFFD7FF
+
+extern void msm_secondary_startup(void);
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen".
+ */
+volatile int pen_release = -1;
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+	/* Configure edge-triggered PPIs */
+	writel(GIC_PPI_EDGE_MASK, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4);
+
+	/*
+	 * if any interrupts are already enabled for the primary
+	 * core (e.g. timer irq), then they will not have been enabled
+	 * for us: do so
+	 */
+	gic_secondary_init(0);
+
+	/*
+	 * let the primary processor know we're out of the
+	 * pen, then head off into the C entry point
+	 */
+	pen_release = -1;
+	smp_wmb();
+
+	/*
+	 * Synchronise with the boot thread.
+	 */
+	spin_lock(&boot_lock);
+	spin_unlock(&boot_lock);
+}
+
+static __cpuinit void prepare_cold_cpu(unsigned int cpu)
+{
+	int ret;
+	ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup),
+				SCM_FLAG_COLDBOOT_CPU1);
+	if (ret == 0) {
+		void *sc1_base_ptr;
+		sc1_base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
+		if (sc1_base_ptr) {
+			writel(0, sc1_base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL);
+			writel(0, sc1_base_ptr + SCSS_CPU1CORE_RESET);
+			writel(3, sc1_base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP);
+			iounmap(sc1_base_ptr);
+		}
+	} else
+		printk(KERN_DEBUG "Failed to set secondary core boot "
+				  "address\n");
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	unsigned long timeout;
+	static int cold_boot_done;
+
+	/* Only need to bring cpu out of reset this way once */
+	if (cold_boot_done == false) {
+		prepare_cold_cpu(cpu);
+		cold_boot_done = true;
+	}
+
+	/*
+	 * set synchronisation state between this boot processor
+	 * and the secondary one
+	 */
+	spin_lock(&boot_lock);
+
+	/*
+	 * The secondary processor is waiting to be released from
+	 * the holding pen - release it, then wait for it to flag
+	 * that it has been released by resetting pen_release.
+	 *
+	 * Note that "pen_release" is the hardware CPU ID, whereas
+	 * "cpu" is Linux's internal ID.
+	 */
+	pen_release = cpu;
+	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+
+	/*
+	 * Send the secondary CPU a soft interrupt, thereby causing
+	 * the boot monitor to read the system wide flags register,
+	 * and branch to the address found there.
+	 */
+	smp_cross_call(cpumask_of(cpu), 1);
+
+	timeout = jiffies + (1 * HZ);
+	while (time_before(jiffies, timeout)) {
+		smp_rmb();
+		if (pen_release == -1)
+			break;
+
+		udelay(10);
+	}
+
+	/*
+	 * now the secondary core is starting up let it run its
+	 * calibrations, then wait for it to finish
+	 */
+	spin_unlock(&boot_lock);
+
+	return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system. The msm8x60
+ * does not support the ARM SCU, so just set the possible cpu mask to
+ * NR_CPUS.
+ */
+void __init smp_init_cpus(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < NR_CPUS; i++)
+		set_cpu_possible(i, true);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+	int i;
+
+	/*
+	 * Initialise the present map, which describes the set of CPUs
+	 * actually populated at the present time.
+	 */
+	for (i = 0; i < max_cpus; i++)
+		set_cpu_present(i, true);
+}
diff --git a/arch/arm/mach-msm/scm-boot.c b/arch/arm/mach-msm/scm-boot.c
new file mode 100644
index 0000000..45cee3e
--- /dev/null
+++ b/arch/arm/mach-msm/scm-boot.c
@@ -0,0 +1,39 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "scm.h"
+#include "scm-boot.h"
+
+/*
+ * Set the cold/warm boot address for one of the CPU cores.
+ */
+int scm_set_boot_addr(phys_addr_t addr, int flags)
+{
+	struct {
+		unsigned int flags;
+		phys_addr_t  addr;
+	} cmd;
+
+	cmd.addr = addr;
+	cmd.flags = flags;
+	return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
+			&cmd, sizeof(cmd), NULL, 0);
+}
+EXPORT_SYMBOL(scm_set_boot_addr);
diff --git a/arch/arm/mach-msm/scm-boot.h b/arch/arm/mach-msm/scm-boot.h
new file mode 100644
index 0000000..68f9b61
--- /dev/null
+++ b/arch/arm/mach-msm/scm-boot.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __MACH_SCM_BOOT_H
+#define __MACH_SCM_BOOT_H
+
+#define SCM_BOOT_ADDR			0x1
+#define SCM_FLAG_COLDBOOT_CPU1		0x1
+#define SCM_FLAG_WARMBOOT_CPU1		0x2
+#define SCM_FLAG_WARMBOOT_CPU0		0x4
+
+int scm_set_boot_addr(phys_addr_t addr, int flags);
+
+#endif
diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
new file mode 100644
index 0000000..f4b9bc9
--- /dev/null
+++ b/arch/arm/mach-msm/scm.c
@@ -0,0 +1,287 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include <asm/cacheflush.h>
+
+#include "scm.h"
+
+/* Cache line size for msm8x60 */
+#define CACHELINESIZE 32
+
+#define SCM_ENOMEM		-5
+#define SCM_EOPNOTSUPP		-4
+#define SCM_EINVAL_ADDR		-3
+#define SCM_EINVAL_ARG		-2
+#define SCM_ERROR		-1
+#define SCM_INTERRUPTED		1
+
+static DEFINE_MUTEX(scm_lock);
+
+/**
+ * struct scm_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_get_command_buffer()
+ *
+ * An SCM command is layed out in memory as follows:
+ *
+ *	------------------- <--- struct scm_command
+ *	| command header  |
+ *	------------------- <--- scm_get_command_buffer()
+ *	| command buffer  |
+ *	------------------- <--- struct scm_response and
+ *	| response header |      scm_command_to_response()
+ *	------------------- <--- scm_get_response_buffer()
+ *	| response buffer |
+ *	-------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_command {
+	u32	len;
+	u32	buf_offset;
+	u32	resp_hdr_offset;
+	u32	id;
+	u32	buf[0];
+};
+
+/**
+ * struct scm_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_response {
+	u32	len;
+	u32	buf_offset;
+	u32	is_complete;
+};
+
+/**
+ * alloc_scm_command() - Allocate an SCM command
+ * @cmd_size: size of the command buffer
+ * @resp_size: size of the response buffer
+ *
+ * Allocate an SCM command, including enough room for the command
+ * and response headers as well as the command and response buffers.
+ *
+ * Returns a valid &scm_command on success or %NULL if the allocation fails.
+ */
+static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
+{
+	struct scm_command *cmd;
+	size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
+		resp_size;
+
+	cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
+	if (cmd) {
+		cmd->len = len;
+		cmd->buf_offset = offsetof(struct scm_command, buf);
+		cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
+	}
+	return cmd;
+}
+
+/**
+ * free_scm_command() - Free an SCM command
+ * @cmd: command to free
+ *
+ * Free an SCM command.
+ */
+static inline void free_scm_command(struct scm_command *cmd)
+{
+	kfree(cmd);
+}
+
+/**
+ * scm_command_to_response() - Get a pointer to a scm_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_response *scm_command_to_response(
+		const struct scm_command *cmd)
+{
+	return (void *)cmd + cmd->resp_hdr_offset;
+}
+
+/**
+ * scm_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_get_command_buffer(const struct scm_command *cmd)
+{
+	return (void *)cmd->buf;
+}
+
+/**
+ * scm_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_get_response_buffer(const struct scm_response *rsp)
+{
+	return (void *)rsp + rsp->buf_offset;
+}
+
+static int scm_remap_error(int err)
+{
+	switch (err) {
+	case SCM_ERROR:
+		return -EIO;
+	case SCM_EINVAL_ADDR:
+	case SCM_EINVAL_ARG:
+		return -EINVAL;
+	case SCM_EOPNOTSUPP:
+		return -EOPNOTSUPP;
+	case SCM_ENOMEM:
+		return -ENOMEM;
+	}
+	return -EINVAL;
+}
+
+static u32 smc(u32 cmd_addr)
+{
+	int context_id;
+	register u32 r0 asm("r0") = 1;
+	register u32 r1 asm("r1") = (u32)&context_id;
+	register u32 r2 asm("r2") = cmd_addr;
+	asm(
+		__asmeq("%0", "r0")
+		__asmeq("%1", "r0")
+		__asmeq("%2", "r1")
+		__asmeq("%3", "r2")
+		"smc	#0	@ switch to secure world\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2)
+		: "r3");
+	return r0;
+}
+
+static int __scm_call(const struct scm_command *cmd)
+{
+	int ret;
+	u32 cmd_addr = virt_to_phys(cmd);
+
+	/*
+	 * Flush the entire cache here so callers don't have to remember
+	 * to flush the cache when passing physical addresses to the secure
+	 * side in the buffer.
+	 */
+	flush_cache_all();
+	do {
+		ret = smc(cmd_addr);
+		if (ret < 0) {
+			ret = scm_remap_error(ret);
+			break;
+		}
+	} while (ret == SCM_INTERRUPTED);
+
+	return ret;
+}
+
+/**
+ * scm_call() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ */
+int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len)
+{
+	int ret;
+	struct scm_command *cmd;
+	struct scm_response *rsp;
+
+	cmd = alloc_scm_command(cmd_len, resp_len);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->id = (svc_id << 10) | cmd_id;
+	if (cmd_buf)
+		memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
+
+	mutex_lock(&scm_lock);
+	ret = __scm_call(cmd);
+	mutex_unlock(&scm_lock);
+	if (ret)
+		goto out;
+
+	rsp = scm_command_to_response(cmd);
+	do {
+		u32 start = (u32)rsp;
+		u32 end = (u32)scm_get_response_buffer(rsp) + resp_len;
+		start &= ~(CACHELINESIZE - 1);
+		while (start < end) {
+			asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
+			     : "memory");
+			start += CACHELINESIZE;
+		}
+	} while (!rsp->is_complete);
+
+	if (resp_buf)
+		memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
+out:
+	free_scm_command(cmd);
+	return ret;
+}
+EXPORT_SYMBOL(scm_call);
+
+u32 scm_get_version(void)
+{
+	int context_id;
+	static u32 version = -1;
+	register u32 r0 asm("r0") = 0x1 << 8;
+	register u32 r1 asm("r1") = (u32)&context_id;
+
+	if (version != -1)
+		return version;
+
+	mutex_lock(&scm_lock);
+	asm(
+		__asmeq("%0", "r1")
+		__asmeq("%1", "r0")
+		__asmeq("%2", "r1")
+		"smc	#0	@ switch to secure world\n"
+		: "=r" (r1)
+		: "r" (r0), "r" (r1)
+		: "r2", "r3");
+	version = r1;
+	mutex_unlock(&scm_lock);
+
+	return version;
+}
+EXPORT_SYMBOL(scm_get_version);
diff --git a/arch/arm/mach-msm/scm.h b/arch/arm/mach-msm/scm.h
new file mode 100644
index 0000000..261786b
--- /dev/null
+++ b/arch/arm/mach-msm/scm.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of Code Aurora Forum, Inc. nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __MACH_SCM_H
+#define __MACH_SCM_H
+
+#define SCM_SVC_BOOT			0x1
+#define SCM_SVC_PIL			0x2
+
+extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len);
+
+#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 scm_get_version(void);
+
+#endif
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 595be7f..c105d28 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -47,6 +47,19 @@
 
 #define GPT_HZ 32768
 
+enum timer_location {
+	LOCAL_TIMER = 0,
+	GLOBAL_TIMER = 1,
+};
+
+#ifdef MSM_TMR0_BASE
+#define MSM_TMR_GLOBAL		(MSM_TMR0_BASE - MSM_TMR_BASE)
+#else
+#define MSM_TMR_GLOBAL		0
+#endif
+
+#define MSM_GLOBAL_TIMER MSM_CLOCK_DGT
+
 #if defined(CONFIG_ARCH_QSD8X50)
 #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
 #define MSM_DGT_SHIFT (0)
@@ -65,49 +78,67 @@
 	void __iomem                *regbase;
 	uint32_t                    freq;
 	uint32_t                    shift;
+	void __iomem                *global_counter;
+	void __iomem                *local_counter;
 };
 
+enum {
+	MSM_CLOCK_GPT,
+	MSM_CLOCK_DGT,
+	NR_TIMERS,
+};
+
+
+static struct msm_clock msm_clocks[];
+static struct clock_event_device *local_clock_event;
+
 static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
 {
 	struct clock_event_device *evt = dev_id;
+	if (smp_processor_id() != 0)
+		evt = local_clock_event;
+	if (evt->event_handler == NULL)
+		return IRQ_HANDLED;
 	evt->event_handler(evt);
 	return IRQ_HANDLED;
 }
 
-static cycle_t msm_gpt_read(struct clocksource *cs)
+static cycle_t msm_read_timer_count(struct clocksource *cs)
 {
-	return readl(MSM_GPT_BASE + TIMER_COUNT_VAL);
+	struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
+
+	return readl(clk->global_counter);
 }
 
-static cycle_t msm_dgt_read(struct clocksource *cs)
+static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
 {
-	return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT;
+#ifdef CONFIG_SMP
+	int i;
+	for (i = 0; i < NR_TIMERS; i++)
+		if (evt == &(msm_clocks[i].clockevent))
+			return &msm_clocks[i];
+	return &msm_clocks[MSM_GLOBAL_TIMER];
+#else
+	return container_of(evt, struct msm_clock, clockevent);
+#endif
 }
 
 static int msm_timer_set_next_event(unsigned long cycles,
 				    struct clock_event_device *evt)
 {
-	struct msm_clock *clock = container_of(evt, struct msm_clock, clockevent);
-	uint32_t now = readl(clock->regbase + TIMER_COUNT_VAL);
+	struct msm_clock *clock = clockevent_to_clock(evt);
+	uint32_t now = readl(clock->local_counter);
 	uint32_t alarm = now + (cycles << clock->shift);
-	int late;
 
 	writel(alarm, clock->regbase + TIMER_MATCH_VAL);
-	now = readl(clock->regbase + TIMER_COUNT_VAL);
-	late = now - alarm;
-	if (late >= (-2 << clock->shift) && late < DGT_HZ*5) {
-		printk(KERN_NOTICE "msm_timer_set_next_event(%lu) clock %s, "
-		       "alarm already expired, now %x, alarm %x, late %d\n",
-		       cycles, clock->clockevent.name, now, alarm, late);
-		return -ETIME;
-	}
 	return 0;
 }
 
 static void msm_timer_set_mode(enum clock_event_mode mode,
 			      struct clock_event_device *evt)
 {
-	struct msm_clock *clock = container_of(evt, struct msm_clock, clockevent);
+	struct msm_clock *clock = clockevent_to_clock(evt);
+
 	switch (mode) {
 	case CLOCK_EVT_MODE_RESUME:
 	case CLOCK_EVT_MODE_PERIODIC:
@@ -123,7 +154,7 @@
 }
 
 static struct msm_clock msm_clocks[] = {
-	{
+	[MSM_CLOCK_GPT] = {
 		.clockevent = {
 			.name           = "gp_timer",
 			.features       = CLOCK_EVT_FEAT_ONESHOT,
@@ -135,7 +166,7 @@
 		.clocksource = {
 			.name           = "gp_timer",
 			.rating         = 200,
-			.read           = msm_gpt_read,
+			.read           = msm_read_timer_count,
 			.mask           = CLOCKSOURCE_MASK(32),
 			.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 		},
@@ -147,9 +178,12 @@
 			.irq     = INT_GP_TIMER_EXP
 		},
 		.regbase = MSM_GPT_BASE,
-		.freq = GPT_HZ
+		.freq = GPT_HZ,
+		.local_counter = MSM_GPT_BASE + TIMER_COUNT_VAL,
+		.global_counter = MSM_GPT_BASE + TIMER_COUNT_VAL +
+			MSM_TMR_GLOBAL,
 	},
-	{
+	[MSM_CLOCK_DGT] = {
 		.clockevent = {
 			.name           = "dg_timer",
 			.features       = CLOCK_EVT_FEAT_ONESHOT,
@@ -161,7 +195,7 @@
 		.clocksource = {
 			.name           = "dg_timer",
 			.rating         = 300,
-			.read           = msm_dgt_read,
+			.read           = msm_read_timer_count,
 			.mask           = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
 			.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 		},
@@ -174,7 +208,10 @@
 		},
 		.regbase = MSM_DGT_BASE,
 		.freq = DGT_HZ >> MSM_DGT_SHIFT,
-		.shift = MSM_DGT_SHIFT
+		.shift = MSM_DGT_SHIFT,
+		.local_counter = MSM_DGT_BASE + TIMER_COUNT_VAL,
+		.global_counter = MSM_DGT_BASE + TIMER_COUNT_VAL +
+			MSM_TMR_GLOBAL,
 	}
 };
 
@@ -183,7 +220,7 @@
 	int i;
 	int res;
 
-#ifdef CONFIG_ARCH_MSM8X60
+#ifdef CONFIG_ARCH_MSM_SCORPIONMP
 	writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
 #endif
 
@@ -217,6 +254,48 @@
 	}
 }
 
+#ifdef CONFIG_SMP
+void __cpuinit local_timer_setup(struct clock_event_device *evt)
+{
+	struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
+
+	/* Use existing clock_event for cpu 0 */
+	if (!smp_processor_id())
+		return;
+
+	writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
+
+	if (!local_clock_event) {
+		writel(0, clock->regbase  + TIMER_ENABLE);
+		writel(0, clock->regbase + TIMER_CLEAR);
+		writel(~0, clock->regbase + TIMER_MATCH_VAL);
+	}
+	evt->irq = clock->irq.irq;
+	evt->name = "local_timer";
+	evt->features = CLOCK_EVT_FEAT_ONESHOT;
+	evt->rating = clock->clockevent.rating;
+	evt->set_mode = msm_timer_set_mode;
+	evt->set_next_event = msm_timer_set_next_event;
+	evt->shift = clock->clockevent.shift;
+	evt->mult = div_sc(clock->freq, NSEC_PER_SEC, evt->shift);
+	evt->max_delta_ns =
+		clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
+	evt->min_delta_ns = clockevent_delta2ns(4, evt);
+
+	local_clock_event = evt;
+
+	gic_enable_ppi(clock->irq.irq);
+
+	clockevents_register_device(evt);
+}
+
+inline int local_timer_ack(void)
+{
+	return 1;
+}
+
+#endif
+
 struct sys_timer msm_timer = {
 	.init = msm_timer_init
 };
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index 4e516b4..899a969 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -140,10 +140,10 @@
 
 static struct mc13783_regulator_init_data mx31_3ds_regulators[] = {
 	{
-		.id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */
+		.id = MC13783_REG_PWGT1SPI, /* Power Gate for ARM core. */
 		.init_data = &pwgtx_init,
 	}, {
-		.id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */
+		.id = MC13783_REG_PWGT2SPI, /* Power Gate for L2 Cache. */
 		.init_data = &pwgtx_init,
 	}, {
 
diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c
index 203d21a..1aa8d65 100644
--- a/arch/arm/mach-mx3/mach-mx31moboard.c
+++ b/arch/arm/mach-mx3/mach-mx31moboard.c
@@ -216,11 +216,11 @@
 
 static struct mc13783_regulator_init_data moboard_regulators[] = {
 	{
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &sdhc_vreg_data,
 	},
 	{
-		.id = MC13783_REGU_VCAM,
+		.id = MC13783_REG_VCAM,
 		.init_data = &cam_vreg_data,
 	},
 };
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 5f64963..8d2f2da 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -152,20 +152,11 @@
 config MACH_AMS_DELTA
 	bool "Amstrad E3 (Delta)"
 	depends on ARCH_OMAP1 && ARCH_OMAP15XX
+	select FIQ
 	help
 	  Support for the Amstrad E3 (codename Delta) videophone. Say Y here
 	  if you have such a device.
 
-config AMS_DELTA_FIQ
-	bool "Fast Interrupt Request (FIQ) support for the E3"
-	depends on MACH_AMS_DELTA
-	select FIQ
-	help
-	  Provide a FIQ handler for the E3.
-	  This allows for fast handling of interrupts generated
-	  by the clock line of the E3 mailboard (or a PS/2 keyboard)
-	  connected to the GPIO based external keyboard port.
-
 config MACH_OMAP_GENERIC
 	bool "Generic OMAP board"
 	depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 9a304d8..6ee1950 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,7 +3,7 @@
 #
 
 # Common support
-obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o
+obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o
 obj-y += clock.o clock_data.o opp_data.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
@@ -39,8 +39,8 @@
 obj-$(CONFIG_MACH_OMAP_PALMZ71)		+= board-palmz71.o
 obj-$(CONFIG_MACH_OMAP_PALMTT)		+= board-palmtt.o
 obj-$(CONFIG_MACH_NOKIA770)		+= board-nokia770.o
-obj-$(CONFIG_MACH_AMS_DELTA)		+= board-ams-delta.o
-obj-$(CONFIG_AMS_DELTA_FIQ)		+= ams-delta-fiq.o ams-delta-fiq-handler.o
+obj-$(CONFIG_MACH_AMS_DELTA)		+= board-ams-delta.o ams-delta-fiq.o \
+					   ams-delta-fiq-handler.o
 obj-$(CONFIG_MACH_SX1)			+= board-sx1.o board-sx1-mmc.o
 obj-$(CONFIG_MACH_HERALD)		+= board-htcherald.o
 
@@ -49,6 +49,12 @@
 obj-$(CONFIG_MACH_OMAP_INNOVATOR)	+= fpga.o
 endif
 
+# GPIO
+obj-$(CONFIG_ARCH_OMAP730)		+= gpio7xx.o
+obj-$(CONFIG_ARCH_OMAP850)		+= gpio7xx.o
+obj-$(CONFIG_ARCH_OMAP15XX)		+= gpio15xx.o
+obj-$(CONFIG_ARCH_OMAP16XX)		+= gpio16xx.o
+
 # LEDs support
 led-$(CONFIG_MACH_OMAP_H2)		+= leds-h2p2-debug.o
 led-$(CONFIG_MACH_OMAP_H3)		+= leds-h2p2-debug.o
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 1d4163b..bd0495a 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -28,6 +28,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
+#include <plat/io.h>
 #include <plat/board-ams-delta.h>
 #include <mach/gpio.h>
 #include <plat/keypad.h>
@@ -42,84 +43,82 @@
 static u8 ams_delta_latch1_reg;
 static u16 ams_delta_latch2_reg;
 
-static int ams_delta_keymap[] = {
+static const unsigned int ams_delta_keymap[] = {
 	KEY(0, 0, KEY_F1),		/* Advert    */
 
-	KEY(3, 0, KEY_COFFEE),		/* Games     */
-	KEY(2, 0, KEY_QUESTION),	/* Directory */
-	KEY(3, 2, KEY_CONNECT),		/* Internet  */
-	KEY(2, 1, KEY_SHOP),		/* Services  */
+	KEY(0, 3, KEY_COFFEE),		/* Games     */
+	KEY(0, 2, KEY_QUESTION),	/* Directory */
+	KEY(2, 3, KEY_CONNECT),		/* Internet  */
+	KEY(1, 2, KEY_SHOP),		/* Services  */
 	KEY(1, 1, KEY_PHONE),		/* VoiceMail */
 
-	KEY(1, 0, KEY_DELETE),		/* Delete    */
+	KEY(0, 1, KEY_DELETE),		/* Delete    */
 	KEY(2, 2, KEY_PLAY),		/* Play      */
-	KEY(0, 1, KEY_PAGEUP),		/* Up        */
-	KEY(3, 1, KEY_PAGEDOWN),	/* Down      */
-	KEY(0, 2, KEY_EMAIL),		/* ReadEmail */
-	KEY(1, 2, KEY_STOP),		/* Stop      */
+	KEY(1, 0, KEY_PAGEUP),		/* Up        */
+	KEY(1, 3, KEY_PAGEDOWN),	/* Down      */
+	KEY(2, 0, KEY_EMAIL),		/* ReadEmail */
+	KEY(2, 1, KEY_STOP),		/* Stop      */
 
 	/* Numeric keypad portion */
-	KEY(7, 0, KEY_KP1),
-	KEY(6, 0, KEY_KP2),
-	KEY(5, 0, KEY_KP3),
-	KEY(7, 1, KEY_KP4),
-	KEY(6, 1, KEY_KP5),
-	KEY(5, 1, KEY_KP6),
-	KEY(7, 2, KEY_KP7),
-	KEY(6, 2, KEY_KP8),
-	KEY(5, 2, KEY_KP9),
-	KEY(6, 3, KEY_KP0),
-	KEY(7, 3, KEY_KPASTERISK),
-	KEY(5, 3, KEY_KPDOT),		/* # key     */
-	KEY(2, 7, KEY_NUMLOCK),		/* Mute      */
-	KEY(1, 7, KEY_KPMINUS),		/* Recall    */
-	KEY(1, 6, KEY_KPPLUS),		/* Redial    */
-	KEY(6, 7, KEY_KPSLASH),		/* Handsfree */
-	KEY(0, 6, KEY_ENTER),		/* Video     */
+	KEY(0, 7, KEY_KP1),
+	KEY(0, 6, KEY_KP2),
+	KEY(0, 5, KEY_KP3),
+	KEY(1, 7, KEY_KP4),
+	KEY(1, 6, KEY_KP5),
+	KEY(1, 5, KEY_KP6),
+	KEY(2, 7, KEY_KP7),
+	KEY(2, 6, KEY_KP8),
+	KEY(2, 5, KEY_KP9),
+	KEY(3, 6, KEY_KP0),
+	KEY(3, 7, KEY_KPASTERISK),
+	KEY(3, 5, KEY_KPDOT),		/* # key     */
+	KEY(7, 2, KEY_NUMLOCK),		/* Mute      */
+	KEY(7, 1, KEY_KPMINUS),		/* Recall    */
+	KEY(6, 1, KEY_KPPLUS),		/* Redial    */
+	KEY(7, 6, KEY_KPSLASH),		/* Handsfree */
+	KEY(6, 0, KEY_ENTER),		/* Video     */
 
-	KEY(4, 7, KEY_CAMERA),		/* Photo     */
+	KEY(7, 4, KEY_CAMERA),		/* Photo     */
 
-	KEY(4, 0, KEY_F2),		/* Home      */
-	KEY(4, 1, KEY_F3),		/* Office    */
-	KEY(4, 2, KEY_F4),		/* Mobile    */
+	KEY(0, 4, KEY_F2),		/* Home      */
+	KEY(1, 4, KEY_F3),		/* Office    */
+	KEY(2, 4, KEY_F4),		/* Mobile    */
 	KEY(7, 7, KEY_F5),		/* SMS       */
-	KEY(5, 7, KEY_F6),		/* Email     */
+	KEY(7, 5, KEY_F6),		/* Email     */
 
 	/* QWERTY portion of keypad */
-	KEY(4, 3, KEY_Q),
+	KEY(3, 4, KEY_Q),
 	KEY(3, 3, KEY_W),
-	KEY(2, 3, KEY_E),
-	KEY(1, 3, KEY_R),
-	KEY(0, 3, KEY_T),
-	KEY(7, 4, KEY_Y),
-	KEY(6, 4, KEY_U),
-	KEY(5, 4, KEY_I),
+	KEY(3, 2, KEY_E),
+	KEY(3, 1, KEY_R),
+	KEY(3, 0, KEY_T),
+	KEY(4, 7, KEY_Y),
+	KEY(4, 6, KEY_U),
+	KEY(4, 5, KEY_I),
 	KEY(4, 4, KEY_O),
-	KEY(3, 4, KEY_P),
+	KEY(4, 3, KEY_P),
 
-	KEY(2, 4, KEY_A),
-	KEY(1, 4, KEY_S),
-	KEY(0, 4, KEY_D),
-	KEY(7, 5, KEY_F),
-	KEY(6, 5, KEY_G),
+	KEY(4, 2, KEY_A),
+	KEY(4, 1, KEY_S),
+	KEY(4, 0, KEY_D),
+	KEY(5, 7, KEY_F),
+	KEY(5, 6, KEY_G),
 	KEY(5, 5, KEY_H),
-	KEY(4, 5, KEY_J),
-	KEY(3, 5, KEY_K),
-	KEY(2, 5, KEY_L),
+	KEY(5, 4, KEY_J),
+	KEY(5, 3, KEY_K),
+	KEY(5, 2, KEY_L),
 
-	KEY(1, 5, KEY_Z),
-	KEY(0, 5, KEY_X),
-	KEY(7, 6, KEY_C),
+	KEY(5, 1, KEY_Z),
+	KEY(5, 0, KEY_X),
+	KEY(6, 7, KEY_C),
 	KEY(6, 6, KEY_V),
-	KEY(5, 6, KEY_B),
-	KEY(4, 6, KEY_N),
-	KEY(3, 6, KEY_M),
-	KEY(2, 6, KEY_SPACE),
+	KEY(6, 5, KEY_B),
+	KEY(6, 4, KEY_N),
+	KEY(6, 3, KEY_M),
+	KEY(6, 2, KEY_SPACE),
 
-	KEY(0, 7, KEY_LEFTSHIFT),	/* Vol up    */
-	KEY(3, 7, KEY_LEFTCTRL),	/* Vol down  */
-
-	0
+	KEY(7, 0, KEY_LEFTSHIFT),	/* Vol up    */
+	KEY(7, 3, KEY_LEFTCTRL),	/* Vol down  */
 };
 
 void ams_delta_latch1_write(u8 mask, u8 value)
@@ -140,7 +139,6 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct map_desc ams_delta_io_desc[] __initdata = {
@@ -189,11 +187,15 @@
 	},
 };
 
+static const struct matrix_keymap_data ams_delta_keymap_data = {
+	.keymap		= ams_delta_keymap,
+	.keymap_size	= ARRAY_SIZE(ams_delta_keymap),
+};
+
 static struct omap_kp_platform_data ams_delta_kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap 	= ams_delta_keymap,
-	.keymapsize	= ARRAY_SIZE(ams_delta_keymap),
+	.keymap_data	= &ams_delta_keymap_data,
 	.delay		= 9,
 };
 
@@ -307,16 +309,14 @@
 #endif
 	platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
 
-#ifdef CONFIG_AMS_DELTA_FIQ
 	ams_delta_init_fiq();
-#endif
 
 	omap_writew(omap_readw(ARM_RSTCT1) | 0x0004, ARM_RSTCT1);
 }
 
 static struct plat_serial8250_port ams_delta_modem_ports[] = {
 	{
-		.membase	= (void *) AMS_DELTA_MODEM_VIRT,
+		.membase	= IOMEM(AMS_DELTA_MODEM_VIRT),
 		.mapbase	= AMS_DELTA_MODEM_PHYS,
 		.irq		= -EINVAL, /* changed later */
 		.flags		= UPF_BOOT_AUTOCONF,
@@ -340,6 +340,9 @@
 {
 	int err;
 
+	if (!machine_is_ams_delta())
+		return -ENODEV;
+
 	omap_cfg_reg(M14_1510_GPIO2);
 	ams_delta_modem_ports[0].irq =
 			gpio_to_irq(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 149fdd3..0efb9db 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -69,36 +69,35 @@
 #define fsample_cpld_clear(bit) \
     fsample_cpld_write(0xf0 | ((bit) & 15), FSAMPLE_CPLD_SET_CLR)
 
-static int fsample_keymap[] = {
-	KEY(0,0,KEY_UP),
-	KEY(0,1,KEY_RIGHT),
-	KEY(0,2,KEY_LEFT),
-	KEY(0,3,KEY_DOWN),
-	KEY(0,4,KEY_ENTER),
-	KEY(1,0,KEY_F10),
-	KEY(1,1,KEY_SEND),
-	KEY(1,2,KEY_END),
-	KEY(1,3,KEY_VOLUMEDOWN),
-	KEY(1,4,KEY_VOLUMEUP),
-	KEY(1,5,KEY_RECORD),
-	KEY(2,0,KEY_F9),
-	KEY(2,1,KEY_3),
-	KEY(2,2,KEY_6),
-	KEY(2,3,KEY_9),
-	KEY(2,4,KEY_KPDOT),
-	KEY(3,0,KEY_BACK),
-	KEY(3,1,KEY_2),
-	KEY(3,2,KEY_5),
-	KEY(3,3,KEY_8),
-	KEY(3,4,KEY_0),
-	KEY(3,5,KEY_KPSLASH),
-	KEY(4,0,KEY_HOME),
-	KEY(4,1,KEY_1),
-	KEY(4,2,KEY_4),
-	KEY(4,3,KEY_7),
-	KEY(4,4,KEY_KPASTERISK),
-	KEY(4,5,KEY_POWER),
-	0
+static const unsigned int fsample_keymap[] = {
+	KEY(0, 0, KEY_UP),
+	KEY(1, 0, KEY_RIGHT),
+	KEY(2, 0, KEY_LEFT),
+	KEY(3, 0, KEY_DOWN),
+	KEY(4, 0, KEY_ENTER),
+	KEY(0, 1, KEY_F10),
+	KEY(1, 1, KEY_SEND),
+	KEY(2, 1, KEY_END),
+	KEY(3, 1, KEY_VOLUMEDOWN),
+	KEY(4, 1, KEY_VOLUMEUP),
+	KEY(5, 1, KEY_RECORD),
+	KEY(0, 2, KEY_F9),
+	KEY(1, 2, KEY_3),
+	KEY(2, 2, KEY_6),
+	KEY(3, 2, KEY_9),
+	KEY(4, 2, KEY_KPDOT),
+	KEY(0, 3, KEY_BACK),
+	KEY(1, 3, KEY_2),
+	KEY(2, 3, KEY_5),
+	KEY(3, 3, KEY_8),
+	KEY(4, 3, KEY_0),
+	KEY(5, 3, KEY_KPSLASH),
+	KEY(0, 4, KEY_HOME),
+	KEY(1, 4, KEY_1),
+	KEY(2, 4, KEY_4),
+	KEY(3, 4, KEY_7),
+	KEY(4, 4, KEY_KPASTERISK),
+	KEY(5, 4, KEY_POWER),
 };
 
 static struct smc91x_platdata smc91x_info = {
@@ -120,6 +119,15 @@
 	},
 };
 
+static void __init fsample_init_smc91x(void)
+{
+	fpga_write(1, H2P2_DBG_FPGA_LAN_RESET);
+	mdelay(50);
+	fpga_write(fpga_read(H2P2_DBG_FPGA_LAN_RESET) & ~1,
+		   H2P2_DBG_FPGA_LAN_RESET);
+	mdelay(50);
+}
+
 static struct mtd_partition nor_partitions[] = {
 	/* bootloader (U-Boot, etc) in first sector */
 	{
@@ -244,11 +252,15 @@
 	},
 };
 
+static const struct matrix_keymap_data fsample_keymap_data = {
+	.keymap		= fsample_keymap,
+	.keymap_size	= ARRAY_SIZE(fsample_keymap),
+};
+
 static struct omap_kp_platform_data kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap		= fsample_keymap,
-	.keymapsize	= ARRAY_SIZE(fsample_keymap),
+	.keymap_data	= &fsample_keymap_data,
 	.delay		= 4,
 };
 
@@ -285,6 +297,8 @@
 
 static void __init omap_fsample_init(void)
 {
+	fsample_init_smc91x();
+
 	if (gpio_request(FSAMPLE_NAND_RB_GPIO_PIN, "NAND ready") < 0)
 		BUG();
 	gpio_direction_input(FSAMPLE_NAND_RB_GPIO_PIN);
@@ -312,21 +326,10 @@
 	omap_register_i2c_bus(1, 100, NULL, 0);
 }
 
-static void __init fsample_init_smc91x(void)
-{
-	fpga_write(1, H2P2_DBG_FPGA_LAN_RESET);
-	mdelay(50);
-	fpga_write(fpga_read(H2P2_DBG_FPGA_LAN_RESET) & ~1,
-		   H2P2_DBG_FPGA_LAN_RESET);
-	mdelay(50);
-}
-
 static void __init omap_fsample_init_irq(void)
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
-	fsample_init_smc91x();
 }
 
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 197adb4..28b84aa 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -52,43 +52,42 @@
 /* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */
 #define OMAP1610_ETHR_START		0x04000300
 
-static int h2_keymap[] = {
+static const unsigned int h2_keymap[] = {
 	KEY(0, 0, KEY_LEFT),
-	KEY(0, 1, KEY_RIGHT),
-	KEY(0, 2, KEY_3),
-	KEY(0, 3, KEY_F10),
-	KEY(0, 4, KEY_F5),
-	KEY(0, 5, KEY_9),
-	KEY(1, 0, KEY_DOWN),
+	KEY(1, 0, KEY_RIGHT),
+	KEY(2, 0, KEY_3),
+	KEY(3, 0, KEY_F10),
+	KEY(4, 0, KEY_F5),
+	KEY(5, 0, KEY_9),
+	KEY(0, 1, KEY_DOWN),
 	KEY(1, 1, KEY_UP),
-	KEY(1, 2, KEY_2),
-	KEY(1, 3, KEY_F9),
-	KEY(1, 4, KEY_F7),
-	KEY(1, 5, KEY_0),
-	KEY(2, 0, KEY_ENTER),
-	KEY(2, 1, KEY_6),
+	KEY(2, 1, KEY_2),
+	KEY(3, 1, KEY_F9),
+	KEY(4, 1, KEY_F7),
+	KEY(5, 1, KEY_0),
+	KEY(0, 2, KEY_ENTER),
+	KEY(1, 2, KEY_6),
 	KEY(2, 2, KEY_1),
-	KEY(2, 3, KEY_F2),
-	KEY(2, 4, KEY_F6),
-	KEY(2, 5, KEY_HOME),
-	KEY(3, 0, KEY_8),
-	KEY(3, 1, KEY_5),
-	KEY(3, 2, KEY_F12),
+	KEY(3, 2, KEY_F2),
+	KEY(4, 2, KEY_F6),
+	KEY(5, 2, KEY_HOME),
+	KEY(0, 3, KEY_8),
+	KEY(1, 3, KEY_5),
+	KEY(2, 3, KEY_F12),
 	KEY(3, 3, KEY_F3),
-	KEY(3, 4, KEY_F8),
-	KEY(3, 5, KEY_END),
-	KEY(4, 0, KEY_7),
-	KEY(4, 1, KEY_4),
-	KEY(4, 2, KEY_F11),
-	KEY(4, 3, KEY_F1),
+	KEY(4, 3, KEY_F8),
+	KEY(5, 3, KEY_END),
+	KEY(0, 4, KEY_7),
+	KEY(1, 4, KEY_4),
+	KEY(2, 4, KEY_F11),
+	KEY(3, 4, KEY_F1),
 	KEY(4, 4, KEY_F4),
-	KEY(4, 5, KEY_ESC),
-	KEY(5, 0, KEY_F13),
-	KEY(5, 1, KEY_F14),
-	KEY(5, 2, KEY_F15),
-	KEY(5, 3, KEY_F16),
-	KEY(5, 4, KEY_SLEEP),
-	0
+	KEY(5, 4, KEY_ESC),
+	KEY(0, 5, KEY_F13),
+	KEY(1, 5, KEY_F14),
+	KEY(2, 5, KEY_F15),
+	KEY(3, 5, KEY_F16),
+	KEY(4, 5, KEY_SLEEP),
 };
 
 static struct mtd_partition h2_nor_partitions[] = {
@@ -270,14 +269,18 @@
 	},
 };
 
+static const struct matrix_keymap_data h2_keymap_data = {
+	.keymap		= h2_keymap,
+	.keymap_size	= ARRAY_SIZE(h2_keymap),
+};
+
 static struct omap_kp_platform_data h2_kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap		= h2_keymap,
-	.keymapsize	= ARRAY_SIZE(h2_keymap),
-	.rep		= 1,
+	.keymap_data	= &h2_keymap_data,
+	.rep		= true,
 	.delay		= 9,
-	.dbounce	= 1,
+	.dbounce	= true,
 };
 
 static struct platform_device h2_kp_device = {
@@ -374,8 +377,6 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
-	h2_init_smc91x();
 }
 
 static struct omap_usb_config h2_usb_config __initdata = {
@@ -403,6 +404,8 @@
 
 static void __init h2_init(void)
 {
+	h2_init_smc91x();
+
 	/* Here we assume the NOR boot config:  NOR on CS3 (possibly swapped
 	 * to address 0 by a dip switch), NAND on CS2B.  The NAND driver will
 	 * notice whether a NAND chip is enabled at probe time.
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 9126e3e..dbc8b8d 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -56,43 +56,42 @@
 
 #define H3_TS_GPIO	48
 
-static int h3_keymap[] = {
+static const unsigned int h3_keymap[] = {
 	KEY(0, 0, KEY_LEFT),
-	KEY(0, 1, KEY_RIGHT),
-	KEY(0, 2, KEY_3),
-	KEY(0, 3, KEY_F10),
-	KEY(0, 4, KEY_F5),
-	KEY(0, 5, KEY_9),
-	KEY(1, 0, KEY_DOWN),
+	KEY(1, 0, KEY_RIGHT),
+	KEY(2, 0, KEY_3),
+	KEY(3, 0, KEY_F10),
+	KEY(4, 0, KEY_F5),
+	KEY(5, 0, KEY_9),
+	KEY(0, 1, KEY_DOWN),
 	KEY(1, 1, KEY_UP),
-	KEY(1, 2, KEY_2),
-	KEY(1, 3, KEY_F9),
-	KEY(1, 4, KEY_F7),
-	KEY(1, 5, KEY_0),
-	KEY(2, 0, KEY_ENTER),
-	KEY(2, 1, KEY_6),
+	KEY(2, 1, KEY_2),
+	KEY(3, 1, KEY_F9),
+	KEY(4, 1, KEY_F7),
+	KEY(5, 1, KEY_0),
+	KEY(0, 2, KEY_ENTER),
+	KEY(1, 2, KEY_6),
 	KEY(2, 2, KEY_1),
-	KEY(2, 3, KEY_F2),
-	KEY(2, 4, KEY_F6),
-	KEY(2, 5, KEY_HOME),
-	KEY(3, 0, KEY_8),
-	KEY(3, 1, KEY_5),
-	KEY(3, 2, KEY_F12),
+	KEY(3, 2, KEY_F2),
+	KEY(4, 2, KEY_F6),
+	KEY(5, 2, KEY_HOME),
+	KEY(0, 3, KEY_8),
+	KEY(1, 3, KEY_5),
+	KEY(2, 3, KEY_F12),
 	KEY(3, 3, KEY_F3),
-	KEY(3, 4, KEY_F8),
-	KEY(3, 5, KEY_END),
-	KEY(4, 0, KEY_7),
-	KEY(4, 1, KEY_4),
-	KEY(4, 2, KEY_F11),
-	KEY(4, 3, KEY_F1),
+	KEY(4, 3, KEY_F8),
+	KEY(5, 3, KEY_END),
+	KEY(0, 4, KEY_7),
+	KEY(1, 4, KEY_4),
+	KEY(2, 4, KEY_F11),
+	KEY(3, 4, KEY_F1),
 	KEY(4, 4, KEY_F4),
-	KEY(4, 5, KEY_ESC),
-	KEY(5, 0, KEY_F13),
-	KEY(5, 1, KEY_F14),
-	KEY(5, 2, KEY_F15),
-	KEY(5, 3, KEY_F16),
-	KEY(5, 4, KEY_SLEEP),
-	0
+	KEY(5, 4, KEY_ESC),
+	KEY(0, 5, KEY_F13),
+	KEY(1, 5, KEY_F14),
+	KEY(2, 5, KEY_F15),
+	KEY(3, 5, KEY_F16),
+	KEY(4, 5, KEY_SLEEP),
 };
 
 
@@ -264,6 +263,15 @@
 	.resource	= smc91x_resources,
 };
 
+static void __init h3_init_smc91x(void)
+{
+	omap_cfg_reg(W15_1710_GPIO40);
+	if (gpio_request(40, "SMC91x irq") < 0) {
+		printk("Error requesting gpio 40 for smc91x irq\n");
+		return;
+	}
+}
+
 #define GPTIMER_BASE		0xFFFB1400
 #define GPTIMER_REGS(x)	(0xFFFB1400 + (x * 0x800))
 #define GPTIMER_REGS_SIZE	0x46
@@ -296,14 +304,18 @@
 	},
 };
 
+static const struct matrix_keymap_data h3_keymap_data = {
+	.keymap		= h3_keymap,
+	.keymap_size	= ARRAY_SIZE(h3_keymap),
+};
+
 static struct omap_kp_platform_data h3_kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap		= h3_keymap,
-	.keymapsize	= ARRAY_SIZE(h3_keymap),
-	.rep		= 1,
+	.keymap_data	= &h3_keymap_data,
+	.rep		= true,
 	.delay		= 9,
-	.dbounce	= 1,
+	.dbounce	= true,
 };
 
 static struct platform_device h3_kp_device = {
@@ -376,6 +388,8 @@
 
 static void __init h3_init(void)
 {
+	h3_init_smc91x();
+
 	/* Here we assume the NOR boot config:  NOR on CS3 (possibly swapped
 	 * to address 0 by a dip switch), NAND on CS2B.  The NAND driver will
 	 * notice whether a NAND chip is enabled at probe time.
@@ -422,21 +436,10 @@
 	h3_mmc_init();
 }
 
-static void __init h3_init_smc91x(void)
-{
-	omap_cfg_reg(W15_1710_GPIO40);
-	if (gpio_request(40, "SMC91x irq") < 0) {
-		printk("Error requesting gpio 40 for smc91x irq\n");
-		return;
-	}
-}
-
 static void __init h3_init_irq(void)
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
-	h3_init_smc91x();
 }
 
 static void __init h3_map_io(void)
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 071af3e..f2c5c58 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -180,64 +180,68 @@
 
 /* Keyboard definition */
 
-static int htc_herald_keymap[] = {
+static const unsigned int htc_herald_keymap[] = {
 	KEY(0, 0, KEY_RECORD), /* Mail button */
-	KEY(0, 1, KEY_CAMERA), /* Camera */
-	KEY(0, 2, KEY_PHONE), /* Send key */
-	KEY(0, 3, KEY_VOLUMEUP), /* Volume up */
-	KEY(0, 4, KEY_F2),  /* Right bar (landscape) */
-	KEY(0, 5, KEY_MAIL), /* Win key (portrait) */
-	KEY(0, 6, KEY_DIRECTORY), /* Right bar (protrait) */
-	KEY(1, 0, KEY_LEFTCTRL), /* Windows key */
+	KEY(1, 0, KEY_CAMERA), /* Camera */
+	KEY(2, 0, KEY_PHONE), /* Send key */
+	KEY(3, 0, KEY_VOLUMEUP), /* Volume up */
+	KEY(4, 0, KEY_F2),  /* Right bar (landscape) */
+	KEY(5, 0, KEY_MAIL), /* Win key (portrait) */
+	KEY(6, 0, KEY_DIRECTORY), /* Right bar (protrait) */
+	KEY(0, 1, KEY_LEFTCTRL), /* Windows key */
 	KEY(1, 1, KEY_COMMA),
-	KEY(1, 2, KEY_M),
-	KEY(1, 3, KEY_K),
-	KEY(1, 4, KEY_SLASH), /* OK key */
-	KEY(1, 5, KEY_I),
-	KEY(1, 6, KEY_U),
-	KEY(2, 0, KEY_LEFTALT),
-	KEY(2, 1, KEY_TAB),
+	KEY(2, 1, KEY_M),
+	KEY(3, 1, KEY_K),
+	KEY(4, 1, KEY_SLASH), /* OK key */
+	KEY(5, 1, KEY_I),
+	KEY(6, 1, KEY_U),
+	KEY(0, 2, KEY_LEFTALT),
+	KEY(1, 2, KEY_TAB),
 	KEY(2, 2, KEY_N),
-	KEY(2, 3, KEY_J),
-	KEY(2, 4, KEY_ENTER),
-	KEY(2, 5, KEY_H),
-	KEY(2, 6, KEY_Y),
-	KEY(3, 0, KEY_SPACE),
-	KEY(3, 1, KEY_L),
-	KEY(3, 2, KEY_B),
+	KEY(3, 2, KEY_J),
+	KEY(4, 2, KEY_ENTER),
+	KEY(5, 2, KEY_H),
+	KEY(6, 2, KEY_Y),
+	KEY(0, 3, KEY_SPACE),
+	KEY(1, 3, KEY_L),
+	KEY(2, 3, KEY_B),
 	KEY(3, 3, KEY_V),
-	KEY(3, 4, KEY_BACKSPACE),
-	KEY(3, 5, KEY_G),
-	KEY(3, 6, KEY_T),
-	KEY(4, 0, KEY_CAPSLOCK), /* Shift */
-	KEY(4, 1, KEY_C),
-	KEY(4, 2, KEY_F),
-	KEY(4, 3, KEY_R),
+	KEY(4, 3, KEY_BACKSPACE),
+	KEY(5, 3, KEY_G),
+	KEY(6, 3, KEY_T),
+	KEY(0, 4, KEY_CAPSLOCK), /* Shift */
+	KEY(1, 4, KEY_C),
+	KEY(2, 4, KEY_F),
+	KEY(3, 4, KEY_R),
 	KEY(4, 4, KEY_O),
-	KEY(4, 5, KEY_E),
-	KEY(4, 6, KEY_D),
-	KEY(5, 0, KEY_X),
-	KEY(5, 1, KEY_Z),
-	KEY(5, 2, KEY_S),
-	KEY(5, 3, KEY_W),
-	KEY(5, 4, KEY_P),
+	KEY(5, 4, KEY_E),
+	KEY(6, 4, KEY_D),
+	KEY(0, 5, KEY_X),
+	KEY(1, 5, KEY_Z),
+	KEY(2, 5, KEY_S),
+	KEY(3, 5, KEY_W),
+	KEY(4, 5, KEY_P),
 	KEY(5, 5, KEY_Q),
-	KEY(5, 6, KEY_A),
-	KEY(6, 0, KEY_CONNECT), /* Voice button */
-	KEY(6, 2, KEY_CANCEL), /* End key */
-	KEY(6, 3, KEY_VOLUMEDOWN), /* Volume down */
-	KEY(6, 4, KEY_F1), /* Left bar (landscape) */
-	KEY(6, 5, KEY_WWW), /* OK button (portrait) */
+	KEY(6, 5, KEY_A),
+	KEY(0, 6, KEY_CONNECT), /* Voice button */
+	KEY(2, 6, KEY_CANCEL), /* End key */
+	KEY(3, 6, KEY_VOLUMEDOWN), /* Volume down */
+	KEY(4, 6, KEY_F1), /* Left bar (landscape) */
+	KEY(5, 6, KEY_WWW), /* OK button (portrait) */
 	KEY(6, 6, KEY_CALENDAR), /* Left bar (portrait) */
-	0
 };
 
-struct omap_kp_platform_data htcherald_kp_data = {
+static const struct matrix_keymap_data htc_herald_keymap_data = {
+	.keymap		= htc_herald_keymap,
+	.keymap_size	= ARRAY_SIZE(htc_herald_keymap),
+};
+
+static struct omap_kp_platform_data htcherald_kp_data = {
 	.rows	= 7,
 	.cols	= 7,
 	.delay = 20,
-	.rep = 1,
-	.keymap = htc_herald_keymap,
+	.rep = true,
+	.keymap_data = &htc_herald_keymap_data,
 };
 
 static struct resource kp_resources[] = {
@@ -278,7 +282,7 @@
 static struct gpio_keys_platform_data herald_gpio_keys_data = {
 	.buttons	= herald_gpio_keys_table,
 	.nbuttons	= ARRAY_SIZE(herald_gpio_keys_table),
-	.rep		= 1,
+	.rep		= true,
 };
 
 static struct platform_device herald_gpiokeys_device = {
@@ -439,7 +443,7 @@
 	.keep_vref_on		= 1,
 	.x_plate_ohms		= 496,
 	.gpio_pendown		= HTCHERALD_GPIO_TS,
-	.pressure_max		= 100000,
+	.pressure_max		= 10000,
 	.pressure_min		= 5000,
 	.x_min			= 528,
 	.x_max			= 3760,
@@ -577,8 +581,6 @@
 	printk(KERN_INFO "HTC Herald init.\n");
 
 	/* Do board initialization before we register all the devices */
-	omap_gpio_init();
-
 	omap_board_config = htcherald_config;
 	omap_board_config_size = ARRAY_SIZE(htcherald_config);
 	platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index dc2b86f..a36e674 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -44,17 +44,16 @@
 /* At OMAP1610 Innovator the Ethernet is directly connected to CS1 */
 #define INNOVATOR1610_ETHR_START	0x04000300
 
-static int innovator_keymap[] = {
+static const unsigned int innovator_keymap[] = {
 	KEY(0, 0, KEY_F1),
-	KEY(0, 3, KEY_DOWN),
+	KEY(3, 0, KEY_DOWN),
 	KEY(1, 1, KEY_F2),
-	KEY(1, 2, KEY_RIGHT),
-	KEY(2, 0, KEY_F3),
-	KEY(2, 1, KEY_F4),
+	KEY(2, 1, KEY_RIGHT),
+	KEY(0, 2, KEY_F3),
+	KEY(1, 2, KEY_F4),
 	KEY(2, 2, KEY_UP),
-	KEY(3, 2, KEY_ENTER),
+	KEY(2, 3, KEY_ENTER),
 	KEY(3, 3, KEY_LEFT),
-	0
 };
 
 static struct mtd_partition innovator_partitions[] = {
@@ -126,11 +125,15 @@
 	},
 };
 
+static const struct matrix_keymap_data innovator_keymap_data = {
+	.keymap		= innovator_keymap,
+	.keymap_size	= ARRAY_SIZE(innovator_keymap),
+};
+
 static struct omap_kp_platform_data innovator_kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap		= innovator_keymap,
-	.keymapsize	= ARRAY_SIZE(innovator_keymap),
+	.keymap_data	= &innovator_keymap_data,
 	.delay		= 4,
 };
 
@@ -290,13 +293,6 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
-#ifdef CONFIG_ARCH_OMAP15XX
-	if (cpu_is_omap1510()) {
-		omap1510_fpga_init_irq();
-	}
-#endif
-	innovator_init_smc91x();
 }
 
 #ifdef CONFIG_ARCH_OMAP15XX
@@ -387,6 +383,10 @@
 
 static void __init innovator_init(void)
 {
+	if (cpu_is_omap1510())
+		omap1510_fpga_init_irq();
+	innovator_init_smc91x();
+
 #ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
 		unsigned char reg;
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index aa8375b..d21f09d 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -54,19 +54,18 @@
 	omap_init_irq();
 }
 
-static int nokia770_keymap[] = {
-	KEY(0, 1, GROUP_0 | KEY_UP),
-	KEY(0, 2, GROUP_1 | KEY_F5),
-	KEY(1, 0, GROUP_0 | KEY_LEFT),
+static const unsigned int nokia770_keymap[] = {
+	KEY(1, 0, GROUP_0 | KEY_UP),
+	KEY(2, 0, GROUP_1 | KEY_F5),
+	KEY(0, 1, GROUP_0 | KEY_LEFT),
 	KEY(1, 1, GROUP_0 | KEY_ENTER),
-	KEY(1, 2, GROUP_0 | KEY_RIGHT),
-	KEY(2, 0, GROUP_1 | KEY_ESC),
-	KEY(2, 1, GROUP_0 | KEY_DOWN),
+	KEY(2, 1, GROUP_0 | KEY_RIGHT),
+	KEY(0, 2, GROUP_1 | KEY_ESC),
+	KEY(1, 2, GROUP_0 | KEY_DOWN),
 	KEY(2, 2, GROUP_1 | KEY_F4),
-	KEY(3, 0, GROUP_2 | KEY_F7),
-	KEY(3, 1, GROUP_2 | KEY_F8),
-	KEY(3, 2, GROUP_2 | KEY_F6),
-	0
+	KEY(0, 3, GROUP_2 | KEY_F7),
+	KEY(1, 3, GROUP_2 | KEY_F8),
+	KEY(2, 3, GROUP_2 | KEY_F6),
 };
 
 static struct resource nokia770_kp_resources[] = {
@@ -77,11 +76,15 @@
 	},
 };
 
+static const struct matrix_keymap_data nokia770_keymap_data = {
+	.keymap		= nokia770_keymap,
+	.keymap_size	= ARRAY_SIZE(nokia770_keymap),
+};
+
 static struct omap_kp_platform_data nokia770_kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap		= nokia770_keymap,
-	.keymapsize	= ARRAY_SIZE(nokia770_keymap),
+	.keymap_data	= &nokia770_keymap_data,
 	.delay		= 4,
 };
 
@@ -246,7 +249,6 @@
 	platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices));
 	spi_register_board_info(nokia770_spi_board_info,
 				ARRAY_SIZE(nokia770_spi_board_info));
-	omap_gpio_init();
 	omap_serial_init();
 	omap_register_i2c_bus(1, 100, NULL, 0);
 	hwa742_dev_init();
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index e9dd791..7c5e211 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -283,9 +283,6 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
-	osk_init_smc91x();
-	osk_init_cf();
 }
 
 static struct omap_usb_config osk_usb_config __initdata = {
@@ -341,25 +338,28 @@
 	 */
 };
 
-static const int osk_keymap[] = {
+static const unsigned int osk_keymap[] = {
 	/* KEY(col, row, code) */
 	KEY(0, 0, KEY_F1),		/* SW4 */
-	KEY(0, 3, KEY_UP),		/* (sw2/up) */
+	KEY(3, 0, KEY_UP),		/* (sw2/up) */
 	KEY(1, 1, KEY_LEFTCTRL),	/* SW5 */
-	KEY(1, 2, KEY_LEFT),		/* (sw2/left) */
-	KEY(2, 0, KEY_SPACE),		/* SW3 */
-	KEY(2, 1, KEY_ESC),		/* SW6 */
+	KEY(2, 1, KEY_LEFT),		/* (sw2/left) */
+	KEY(0, 2, KEY_SPACE),		/* SW3 */
+	KEY(1, 2, KEY_ESC),		/* SW6 */
 	KEY(2, 2, KEY_DOWN),		/* (sw2/down) */
-	KEY(3, 2, KEY_ENTER),		/* (sw2/select) */
+	KEY(2, 3, KEY_ENTER),		/* (sw2/select) */
 	KEY(3, 3, KEY_RIGHT),		/* (sw2/right) */
-	0
+};
+
+static const struct matrix_keymap_data osk_keymap_data = {
+	.keymap		= osk_keymap,
+	.keymap_size	= ARRAY_SIZE(osk_keymap),
 };
 
 static struct omap_kp_platform_data osk_kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap		= (int *) osk_keymap,
-	.keymapsize	= ARRAY_SIZE(osk_keymap),
+	.keymap_data	= &osk_keymap_data,
 	.delay		= 9,
 };
 
@@ -541,6 +541,9 @@
 {
 	u32 l;
 
+	osk_init_smc91x();
+	osk_init_cf();
+
 	/* Workaround for wrong CS3 (NOR flash) timing
 	 * There are some U-Boot versions out there which configure
 	 * wrong CS3 memory timings. This mainly leads to CRC
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index f32738b..fb51ce6 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -63,28 +63,31 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
 }
 
-static const int palmte_keymap[] = {
+static const unsigned int palmte_keymap[] = {
 	KEY(0, 0, KEY_F1),		/* Calendar */
-	KEY(0, 1, KEY_F2),		/* Contacts */
-	KEY(0, 2, KEY_F3),		/* Tasks List */
-	KEY(0, 3, KEY_F4),		/* Note Pad */
-	KEY(0, 4, KEY_POWER),
-	KEY(1, 0, KEY_LEFT),
+	KEY(1, 0, KEY_F2),		/* Contacts */
+	KEY(2, 0, KEY_F3),		/* Tasks List */
+	KEY(3, 0, KEY_F4),		/* Note Pad */
+	KEY(4, 0, KEY_POWER),
+	KEY(0, 1, KEY_LEFT),
 	KEY(1, 1, KEY_DOWN),
-	KEY(1, 2, KEY_UP),
-	KEY(1, 3, KEY_RIGHT),
-	KEY(1, 4, KEY_ENTER),
-	0,
+	KEY(2, 1, KEY_UP),
+	KEY(3, 1, KEY_RIGHT),
+	KEY(4, 1, KEY_ENTER),
+};
+
+static const struct matrix_keymap_data palmte_keymap_data = {
+	.keymap		= palmte_keymap,
+	.keymap_size	= ARRAY_SIZE(palmte_keymap),
 };
 
 static struct omap_kp_platform_data palmte_kp_data = {
 	.rows	= 8,
 	.cols	= 8,
-	.keymap = (int *) palmte_keymap,
-	.rep	= 1,
+	.keymap_data = &palmte_keymap_data,
+	.rep	= true,
 	.delay	= 12,
 };
 
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index ed1400a..f04f2d3 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -51,19 +51,18 @@
 #define PALMTT_MMC_WP_GPIO	8
 #define PALMTT_HDQ_GPIO		11
 
-static int palmtt_keymap[] = {
+static const unsigned int palmtt_keymap[] = {
 	KEY(0, 0, KEY_ESC),
-	KEY(0, 1, KEY_SPACE),
-	KEY(0, 2, KEY_LEFTCTRL),
-	KEY(0, 3, KEY_TAB),
-	KEY(0, 4, KEY_ENTER),
-	KEY(1, 0, KEY_LEFT),
+	KEY(1, 0, KEY_SPACE),
+	KEY(2, 0, KEY_LEFTCTRL),
+	KEY(3, 0, KEY_TAB),
+	KEY(4, 0, KEY_ENTER),
+	KEY(0, 1, KEY_LEFT),
 	KEY(1, 1, KEY_DOWN),
-	KEY(1, 2, KEY_UP),
-	KEY(1, 3, KEY_RIGHT),
-	KEY(2, 0, KEY_SLEEP),
-	KEY(2, 4, KEY_Y),
-	0
+	KEY(2, 1, KEY_UP),
+	KEY(3, 1, KEY_RIGHT),
+	KEY(0, 2, KEY_SLEEP),
+	KEY(4, 2, KEY_Y),
 };
 
 static struct mtd_partition palmtt_partitions[] = {
@@ -136,10 +135,15 @@
 	},
 };
 
+static const struct matrix_keymap_data palmtt_keymap_data = {
+	.keymap		= palmtt_keymap,
+	.keymap_size	= ARRAY_SIZE(palmtt_keymap),
+};
+
 static struct omap_kp_platform_data palmtt_kp_data = {
 	.rows	= 6,
 	.cols	= 3,
-	.keymap = palmtt_keymap,
+	.keymap_data = &palmtt_keymap_data,
 };
 
 static struct platform_device palmtt_kp_device = {
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index d7a245c..d7bbbe7 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -62,29 +62,32 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
 }
 
-static int palmz71_keymap[] = {
+static const unsigned int palmz71_keymap[] = {
 	KEY(0, 0, KEY_F1),
-	KEY(0, 1, KEY_F2),
-	KEY(0, 2, KEY_F3),
-	KEY(0, 3, KEY_F4),
-	KEY(0, 4, KEY_POWER),
-	KEY(1, 0, KEY_LEFT),
+	KEY(1, 0, KEY_F2),
+	KEY(2, 0, KEY_F3),
+	KEY(3, 0, KEY_F4),
+	KEY(4, 0, KEY_POWER),
+	KEY(0, 1, KEY_LEFT),
 	KEY(1, 1, KEY_DOWN),
-	KEY(1, 2, KEY_UP),
-	KEY(1, 3, KEY_RIGHT),
-	KEY(1, 4, KEY_ENTER),
-	KEY(2, 0, KEY_CAMERA),
-	0,
+	KEY(2, 1, KEY_UP),
+	KEY(3, 1, KEY_RIGHT),
+	KEY(4, 1, KEY_ENTER),
+	KEY(0, 2, KEY_CAMERA),
+};
+
+static const struct matrix_keymap_data palmz71_keymap_data = {
+	.keymap		= palmz71_keymap,
+	.keymap_size	= ARRAY_SIZE(palmz71_keymap),
 };
 
 static struct omap_kp_platform_data palmz71_kp_data = {
 	.rows	= 8,
 	.cols	= 8,
-	.keymap	= palmz71_keymap,
-	.rep	= 1,
+	.keymap_data	= &palmz71_keymap_data,
+	.rep	= true,
 	.delay	= 80,
 };
 
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index a8d16a2..3c8ee84 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -36,36 +36,35 @@
 #include <plat/common.h>
 #include <plat/board.h>
 
-static int p2_keymap[] = {
-	KEY(0,0,KEY_UP),
-	KEY(0,1,KEY_RIGHT),
-	KEY(0,2,KEY_LEFT),
-	KEY(0,3,KEY_DOWN),
-	KEY(0,4,KEY_ENTER),
-	KEY(1,0,KEY_F10),
-	KEY(1,1,KEY_SEND),
-	KEY(1,2,KEY_END),
-	KEY(1,3,KEY_VOLUMEDOWN),
-	KEY(1,4,KEY_VOLUMEUP),
-	KEY(1,5,KEY_RECORD),
-	KEY(2,0,KEY_F9),
-	KEY(2,1,KEY_3),
-	KEY(2,2,KEY_6),
-	KEY(2,3,KEY_9),
-	KEY(2,4,KEY_KPDOT),
-	KEY(3,0,KEY_BACK),
-	KEY(3,1,KEY_2),
-	KEY(3,2,KEY_5),
-	KEY(3,3,KEY_8),
-	KEY(3,4,KEY_0),
-	KEY(3,5,KEY_KPSLASH),
-	KEY(4,0,KEY_HOME),
-	KEY(4,1,KEY_1),
-	KEY(4,2,KEY_4),
-	KEY(4,3,KEY_7),
-	KEY(4,4,KEY_KPASTERISK),
-	KEY(4,5,KEY_POWER),
-	0
+static const unsigned int p2_keymap[] = {
+	KEY(0, 0, KEY_UP),
+	KEY(1, 0, KEY_RIGHT),
+	KEY(2, 0, KEY_LEFT),
+	KEY(3, 0, KEY_DOWN),
+	KEY(4, 0, KEY_ENTER),
+	KEY(0, 1, KEY_F10),
+	KEY(1, 1, KEY_SEND),
+	KEY(2, 1, KEY_END),
+	KEY(3, 1, KEY_VOLUMEDOWN),
+	KEY(4, 1, KEY_VOLUMEUP),
+	KEY(5, 1, KEY_RECORD),
+	KEY(0, 2, KEY_F9),
+	KEY(1, 2, KEY_3),
+	KEY(2, 2, KEY_6),
+	KEY(3, 2, KEY_9),
+	KEY(4, 2, KEY_KPDOT),
+	KEY(0, 3, KEY_BACK),
+	KEY(1, 3, KEY_2),
+	KEY(2, 3, KEY_5),
+	KEY(3, 3, KEY_8),
+	KEY(4, 3, KEY_0),
+	KEY(5, 3, KEY_KPSLASH),
+	KEY(0, 4, KEY_HOME),
+	KEY(1, 4, KEY_1),
+	KEY(2, 4, KEY_4),
+	KEY(3, 4, KEY_7),
+	KEY(4, 4, KEY_KPASTERISK),
+	KEY(5, 4, KEY_POWER),
 };
 
 static struct smc91x_platdata smc91x_info = {
@@ -211,13 +210,17 @@
 	},
 };
 
+static const struct matrix_keymap_data p2_keymap_data = {
+	.keymap		= p2_keymap,
+	.keymap_size	= ARRAY_SIZE(p2_keymap),
+};
+
 static struct omap_kp_platform_data kp_data = {
 	.rows		= 8,
 	.cols		= 8,
-	.keymap		= p2_keymap,
-	.keymapsize	= ARRAY_SIZE(p2_keymap),
+	.keymap_data	= &p2_keymap_data,
 	.delay		= 4,
-	.dbounce	= 1,
+	.dbounce	= true,
 };
 
 static struct platform_device kp_device = {
@@ -251,8 +254,19 @@
 	{ OMAP_TAG_LCD,		&perseus2_lcd_config },
 };
 
+static void __init perseus2_init_smc91x(void)
+{
+	fpga_write(1, H2P2_DBG_FPGA_LAN_RESET);
+	mdelay(50);
+	fpga_write(fpga_read(H2P2_DBG_FPGA_LAN_RESET) & ~1,
+		   H2P2_DBG_FPGA_LAN_RESET);
+	mdelay(50);
+}
+
 static void __init omap_perseus2_init(void)
 {
+	perseus2_init_smc91x();
+
 	if (gpio_request(P2_NAND_RB_GPIO_PIN, "NAND ready") < 0)
 		BUG();
 	gpio_direction_input(P2_NAND_RB_GPIO_PIN);
@@ -280,21 +294,10 @@
 	omap_register_i2c_bus(1, 100, NULL, 0);
 }
 
-static void __init perseus2_init_smc91x(void)
-{
-	fpga_write(1, H2P2_DBG_FPGA_LAN_RESET);
-	mdelay(50);
-	fpga_write(fpga_read(H2P2_DBG_FPGA_LAN_RESET) & ~1,
-		   H2P2_DBG_FPGA_LAN_RESET);
-	mdelay(50);
-}
-
 static void __init omap_perseus2_init_irq(void)
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
-	perseus2_init_smc91x();
 }
 /* Only FPGA needs to be mapped here. All others are done with ioremap */
 static struct map_desc omap_perseus2_io_desc[] __initdata = {
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index d25f59e..d41fe2d 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -164,36 +164,35 @@
 
 /*----------- Keypad -------------------------*/
 
-static int sx1_keymap[] = {
-	KEY(5, 3, GROUP_0 | 117), /* camera Qt::Key_F17 */
-	KEY(0, 4, GROUP_0 | 114), /* voice memo Qt::Key_F14 */
-	KEY(1, 4, GROUP_2 | 114), /* voice memo */
-	KEY(2, 4, GROUP_3 | 114), /* voice memo */
+static const unsigned int sx1_keymap[] = {
+	KEY(3, 5, GROUP_0 | 117), /* camera Qt::Key_F17 */
+	KEY(4, 0, GROUP_0 | 114), /* voice memo Qt::Key_F14 */
+	KEY(4, 1, GROUP_2 | 114), /* voice memo */
+	KEY(4, 2, GROUP_3 | 114), /* voice memo */
 	KEY(0, 0, GROUP_1 | KEY_F12),	/* red button Qt::Key_Hangup */
-	KEY(4, 3, GROUP_1 | KEY_LEFT),
-	KEY(2, 3, GROUP_1 | KEY_DOWN),
-	KEY(1, 3, GROUP_1 | KEY_RIGHT),
-	KEY(0, 3, GROUP_1 | KEY_UP),
+	KEY(3, 4, GROUP_1 | KEY_LEFT),
+	KEY(3, 2, GROUP_1 | KEY_DOWN),
+	KEY(3, 1, GROUP_1 | KEY_RIGHT),
+	KEY(3, 0, GROUP_1 | KEY_UP),
 	KEY(3, 3, GROUP_1 | KEY_POWER), /* joystick press or Qt::Key_Select */
-	KEY(5, 0, GROUP_1 | KEY_1),
-	KEY(4, 0, GROUP_1 | KEY_2),
-	KEY(3, 0, GROUP_1 | KEY_3),
-	KEY(3, 4, GROUP_1 | KEY_4),
+	KEY(0, 5, GROUP_1 | KEY_1),
+	KEY(0, 4, GROUP_1 | KEY_2),
+	KEY(0, 3, GROUP_1 | KEY_3),
+	KEY(4, 3, GROUP_1 | KEY_4),
 	KEY(4, 4, GROUP_1 | KEY_5),
-	KEY(5, 4, GROUP_1 | KEY_KPASTERISK),/* "*" */
-	KEY(4, 1, GROUP_1 | KEY_6),
-	KEY(5, 1, GROUP_1 | KEY_7),
-	KEY(3, 1, GROUP_1 | KEY_8),
-	KEY(3, 2, GROUP_1 | KEY_9),
-	KEY(5, 2, GROUP_1 | KEY_0),
-	KEY(4, 2, GROUP_1 | 113),	/* # F13 Toggle input method Qt::Key_F13 */
-	KEY(0, 1, GROUP_1 | KEY_F11),	/* green button Qt::Key_Call */
-	KEY(1, 2, GROUP_1 | KEY_YEN),	/* left soft Qt::Key_Context1 */
+	KEY(4, 5, GROUP_1 | KEY_KPASTERISK),/* "*" */
+	KEY(1, 4, GROUP_1 | KEY_6),
+	KEY(1, 5, GROUP_1 | KEY_7),
+	KEY(1, 3, GROUP_1 | KEY_8),
+	KEY(2, 3, GROUP_1 | KEY_9),
+	KEY(2, 5, GROUP_1 | KEY_0),
+	KEY(2, 4, GROUP_1 | 113), /* # F13 Toggle input method Qt::Key_F13 */
+	KEY(1, 0, GROUP_1 | KEY_F11),	/* green button Qt::Key_Call */
+	KEY(2, 1, GROUP_1 | KEY_YEN),	/* left soft Qt::Key_Context1 */
 	KEY(2, 2, GROUP_1 | KEY_F8),	/* right soft Qt::Key_Back */
-	KEY(2, 1, GROUP_1 | KEY_LEFTSHIFT), /* shift */
+	KEY(1, 2, GROUP_1 | KEY_LEFTSHIFT), /* shift */
 	KEY(1, 1, GROUP_1 | KEY_BACKSPACE), /* C (clear) */
-	KEY(0, 2, GROUP_1 | KEY_F7),	/* menu Qt::Key_Menu */
-	0
+	KEY(2, 0, GROUP_1 | KEY_F7),	/* menu Qt::Key_Menu */
 };
 
 static struct resource sx1_kp_resources[] = {
@@ -204,11 +203,15 @@
 	},
 };
 
+static const struct matrix_keymap_data sx1_keymap_data = {
+	.keymap		= sx1_keymap,
+	.keymap_size	= ARRAY_SIZE(sx1_keymap),
+};
+
 static struct omap_kp_platform_data sx1_kp_data = {
 	.rows		= 6,
 	.cols		= 6,
-	.keymap	= sx1_keymap,
-	.keymapsize = ARRAY_SIZE(sx1_keymap),
+	.keymap_data	= &sx1_keymap_data,
 	.delay	= 80,
 };
 
@@ -409,7 +412,6 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
 }
 /*----------------------------------------*/
 
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index f5992c2..815a69c 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -83,6 +83,9 @@
 
 static int __init ext_uart_init(void)
 {
+	if (!machine_is_voiceblue())
+		return -ENODEV;
+
 	return platform_device_register(&serial_device);
 }
 arch_initcall(ext_uart_init);
@@ -158,7 +161,6 @@
 {
 	omap1_init_common_hw();
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static void __init voiceblue_init(void)
@@ -236,6 +238,9 @@
 
 static int __init voiceblue_setup(void)
 {
+	if (!machine_is_voiceblue())
+		return -ENODEV;
+
 	/* Setup panic notifier */
 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
 
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index af54114..92400b9 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -143,7 +143,7 @@
  * activation.  [ GPIO code for 1510 ]
  */
 static struct clk arm_gpio_ck = {
-	.name		= "arm_gpio_ck",
+	.name		= "ick",
 	.ops		= &clkops_generic,
 	.parent		= &ck_dpll1,
 	.flags		= ENABLE_ON_INIT,
@@ -684,7 +684,7 @@
 	CLK(NULL,	"ck_sossi",	&sossi_ck,	CK_16XX),
 	CLK(NULL,	"arm_ck",	&arm_ck,	CK_16XX | CK_1510 | CK_310),
 	CLK(NULL,	"armper_ck",	&armper_ck.clk,	CK_16XX | CK_1510 | CK_310),
-	CLK(NULL,	"arm_gpio_ck",	&arm_gpio_ck,	CK_1510 | CK_310),
+	CLK("omap_gpio.0", "ick",	&arm_gpio_ck,	CK_1510 | CK_310),
 	CLK(NULL,	"armxor_ck",	&armxor_ck.clk,	CK_16XX | CK_1510 | CK_310 | CK_7XX),
 	CLK(NULL,	"armtim_ck",	&armtim_ck.clk,	CK_16XX | CK_1510 | CK_310),
 	CLK("omap_wdt",	"fck",		&armwdt_ck.clk,	CK_16XX | CK_1510 | CK_310),
@@ -736,9 +736,9 @@
 	CLK("mmci-omap.1", "ick",	&armper_ck.clk,	CK_16XX),
 	/* Virtual clocks */
 	CLK(NULL,	"mpu",		&virtual_ck_mpu, CK_16XX | CK_1510 | CK_310),
-	CLK("i2c_omap.1", "fck",	&i2c_fck,	CK_16XX | CK_1510 | CK_310 | CK_7XX),
-	CLK("i2c_omap.1", "ick",	&i2c_ick,	CK_16XX),
-	CLK("i2c_omap.1", "ick",	&dummy_ck,	CK_1510 | CK_310 | CK_7XX),
+	CLK("omap_i2c.1", "fck",	&i2c_fck,	CK_16XX | CK_1510 | CK_310 | CK_7XX),
+	CLK("omap_i2c.1", "ick",	&i2c_ick,	CK_16XX),
+	CLK("omap_i2c.1", "ick",	&dummy_ck,	CK_1510 | CK_310 | CK_7XX),
 	CLK("omap1_spi100k.1", "fck",	&dummy_ck,	CK_7XX),
 	CLK("omap1_spi100k.1", "ick",	&dummy_ck,	CK_7XX),
 	CLK("omap1_spi100k.2", "fck",	&dummy_ck,	CK_7XX),
@@ -823,12 +823,10 @@
 			crystal_type = info->system_clock_type;
 	}
 
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
-	ck_ref.rate = 13000000;
-#elif defined(CONFIG_ARCH_OMAP16XX)
-	if (crystal_type == 2)
+	if (cpu_is_omap7xx())
+		ck_ref.rate = 13000000;
+	if (cpu_is_omap16xx() && crystal_type == 2)
 		ck_ref.rate = 19200000;
-#endif
 
 	pr_info("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: "
 		"0x%04x\n", omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
@@ -883,10 +881,11 @@
 	       ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
 	       arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
 
-#if defined(CONFIG_MACH_OMAP_PERSEUS2) || defined(CONFIG_MACH_OMAP_FSAMPLE)
-	/* Select slicer output as OMAP input clock */
-	omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1, OMAP7XX_PCC_UPLD_CTRL);
-#endif
+	if (machine_is_omap_perseus2() || machine_is_omap_fsample()) {
+		/* Select slicer output as OMAP input clock */
+		omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1,
+				OMAP7XX_PCC_UPLD_CTRL);
+	}
 
 	/* Amstrad Delta wants BCLK high when inactive */
 	if (machine_is_ams_delta())
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index e7f9ee6..b0f4c23 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -17,6 +17,7 @@
 #include <linux/io.h>
 #include <linux/spi/spi.h>
 
+#include <mach/camera.h>
 #include <mach/hardware.h>
 #include <asm/mach/map.h>
 
@@ -287,6 +288,9 @@
  */
 static int __init omap1_init_devices(void)
 {
+	if (!cpu_class_is_omap1())
+		return -ENODEV;
+
 	/* please keep these calls, and their implementations above,
 	 * in alphabetical order so they're easier to sort through.
 	 */
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
new file mode 100644
index 0000000..d855934
--- /dev/null
+++ b/arch/arm/mach-omap1/dma.c
@@ -0,0 +1,390 @@
+/*
+ * OMAP1/OMAP7xx - specific DMA driver
+ *
+ * Copyright (C) 2003 - 2008 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
+ * Graphics DMA and LCD DMA graphics tranformations
+ * by Imre Deak <imre.deak@nokia.com>
+ * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
+ * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Converted DMA library into platform driver
+ *                   - G, Manjunath Kondaiah <manjugk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <plat/dma.h>
+#include <plat/tc.h>
+#include <plat/irqs.h>
+
+#define OMAP1_DMA_BASE			(0xfffed800)
+#define OMAP1_LOGICAL_DMA_CH_COUNT	17
+#define OMAP1_DMA_STRIDE		0x40
+
+static u32 errata;
+static u32 enable_1510_mode;
+static u8 dma_stride;
+static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
+
+static u16 reg_map[] = {
+	[GCR]		= 0x400,
+	[GSCR]		= 0x404,
+	[GRST1]		= 0x408,
+	[HW_ID]		= 0x442,
+	[PCH2_ID]	= 0x444,
+	[PCH0_ID]	= 0x446,
+	[PCH1_ID]	= 0x448,
+	[PCHG_ID]	= 0x44a,
+	[PCHD_ID]	= 0x44c,
+	[CAPS_0]	= 0x44e,
+	[CAPS_1]	= 0x452,
+	[CAPS_2]	= 0x456,
+	[CAPS_3]	= 0x458,
+	[CAPS_4]	= 0x45a,
+	[PCH2_SR]	= 0x460,
+	[PCH0_SR]	= 0x480,
+	[PCH1_SR]	= 0x482,
+	[PCHD_SR]	= 0x4c0,
+
+	/* Common Registers */
+	[CSDP]		= 0x00,
+	[CCR]		= 0x02,
+	[CICR]		= 0x04,
+	[CSR]		= 0x06,
+	[CEN]		= 0x10,
+	[CFN]		= 0x12,
+	[CSFI]		= 0x14,
+	[CSEI]		= 0x16,
+	[CPC]		= 0x18,	/* 15xx only */
+	[CSAC]		= 0x18,
+	[CDAC]		= 0x1a,
+	[CDEI]		= 0x1c,
+	[CDFI]		= 0x1e,
+	[CLNK_CTRL]	= 0x28,
+
+	/* Channel specific register offsets */
+	[CSSA]		= 0x08,
+	[CDSA]		= 0x0c,
+	[COLOR]		= 0x20,
+	[CCR2]		= 0x24,
+	[LCH_CTRL]	= 0x2a,
+};
+
+static struct resource res[] __initdata = {
+	[0] = {
+		.start	= OMAP1_DMA_BASE,
+		.end	= OMAP1_DMA_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.name   = "0",
+		.start  = INT_DMA_CH0_6,
+		.flags  = IORESOURCE_IRQ,
+	},
+	[2] = {
+		.name   = "1",
+		.start  = INT_DMA_CH1_7,
+		.flags  = IORESOURCE_IRQ,
+	},
+	[3] = {
+		.name   = "2",
+		.start  = INT_DMA_CH2_8,
+		.flags  = IORESOURCE_IRQ,
+	},
+	[4] = {
+		.name   = "3",
+		.start  = INT_DMA_CH3,
+		.flags  = IORESOURCE_IRQ,
+	},
+	[5] = {
+		.name   = "4",
+		.start  = INT_DMA_CH4,
+		.flags  = IORESOURCE_IRQ,
+	},
+	[6] = {
+		.name   = "5",
+		.start  = INT_DMA_CH5,
+		.flags  = IORESOURCE_IRQ,
+	},
+	/* Handled in lcd_dma.c */
+	[7] = {
+		.name   = "6",
+		.start  = INT_1610_DMA_CH6,
+		.flags  = IORESOURCE_IRQ,
+	},
+	/* irq's for omap16xx and omap7xx */
+	[8] = {
+		.name   = "7",
+		.start  = INT_1610_DMA_CH7,
+		.flags  = IORESOURCE_IRQ,
+	},
+	[9] = {
+		.name   = "8",
+		.start  = INT_1610_DMA_CH8,
+		.flags  = IORESOURCE_IRQ,
+	},
+	[10] = {
+		.name  = "9",
+		.start = INT_1610_DMA_CH9,
+		.flags = IORESOURCE_IRQ,
+	},
+	[11] = {
+		.name  = "10",
+		.start = INT_1610_DMA_CH10,
+		.flags = IORESOURCE_IRQ,
+	},
+	[12] = {
+		.name  = "11",
+		.start = INT_1610_DMA_CH11,
+		.flags = IORESOURCE_IRQ,
+	},
+	[13] = {
+		.name  = "12",
+		.start = INT_1610_DMA_CH12,
+		.flags = IORESOURCE_IRQ,
+	},
+	[14] = {
+		.name  = "13",
+		.start = INT_1610_DMA_CH13,
+		.flags = IORESOURCE_IRQ,
+	},
+	[15] = {
+		.name  = "14",
+		.start = INT_1610_DMA_CH14,
+		.flags = IORESOURCE_IRQ,
+	},
+	[16] = {
+		.name  = "15",
+		.start = INT_1610_DMA_CH15,
+		.flags = IORESOURCE_IRQ,
+	},
+	[17] = {
+		.name  = "16",
+		.start = INT_DMA_LCD,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static void __iomem *dma_base;
+static inline void dma_write(u32 val, int reg, int lch)
+{
+	u8  stride;
+	u32 offset;
+
+	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
+	offset = reg_map[reg] + (stride * lch);
+
+	__raw_writew(val, dma_base + offset);
+	if ((reg > CLNK_CTRL && reg < CCEN) ||
+			(reg > PCHD_ID && reg < CAPS_2)) {
+		u32 offset2 = reg_map[reg] + 2 + (stride * lch);
+		__raw_writew(val >> 16, dma_base + offset2);
+	}
+}
+
+static inline u32 dma_read(int reg, int lch)
+{
+	u8 stride;
+	u32 offset, val;
+
+	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
+	offset = reg_map[reg] + (stride * lch);
+
+	val = __raw_readw(dma_base + offset);
+	if ((reg > CLNK_CTRL && reg < CCEN) ||
+			(reg > PCHD_ID && reg < CAPS_2)) {
+		u16 upper;
+		u32 offset2 = reg_map[reg] + 2 + (stride * lch);
+		upper = __raw_readw(dma_base + offset2);
+		val |= (upper << 16);
+	}
+	return val;
+}
+
+static void omap1_clear_lch_regs(int lch)
+{
+	int i = dma_common_ch_start;
+
+	for (; i <= dma_common_ch_end; i += 1)
+		dma_write(0, i, lch);
+}
+
+static void omap1_clear_dma(int lch)
+{
+	u32 l;
+
+	l = dma_read(CCR, lch);
+	l &= ~OMAP_DMA_CCR_EN;
+	dma_write(l, CCR, lch);
+
+	/* Clear pending interrupts */
+	l = dma_read(CSR, lch);
+}
+
+static void omap1_show_dma_caps(void)
+{
+	if (enable_1510_mode) {
+		printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
+	} else {
+		u16 w;
+		printk(KERN_INFO "OMAP DMA hardware version %d\n",
+							dma_read(HW_ID, 0));
+		printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
+			dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
+			dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
+			dma_read(CAPS_4, 0));
+
+		/* Disable OMAP 3.0/3.1 compatibility mode. */
+		w = dma_read(GSCR, 0);
+		w |= 1 << 3;
+		dma_write(w, GSCR, 0);
+	}
+	return;
+}
+
+static u32 configure_dma_errata(void)
+{
+
+	/*
+	 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
+	 * read before the DMA controller finished disabling the channel.
+	 */
+	if (!cpu_is_omap15xx())
+		SET_DMA_ERRATA(DMA_ERRATA_3_3);
+
+	return errata;
+}
+
+static int __init omap1_system_dma_init(void)
+{
+	struct omap_system_dma_plat_info	*p;
+	struct omap_dma_dev_attr		*d;
+	struct platform_device			*pdev;
+	int ret;
+
+	pdev = platform_device_alloc("omap_dma_system", 0);
+	if (!pdev) {
+		pr_err("%s: Unable to device alloc for dma\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	dma_base = ioremap(res[0].start, resource_size(&res[0]));
+	if (!dma_base) {
+		pr_err("%s: Unable to ioremap\n", __func__);
+		return -ENODEV;
+	}
+
+	ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
+			__func__, pdev->name, pdev->id);
+		goto exit_device_del;
+	}
+
+	p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
+	if (!p) {
+		dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
+			__func__, pdev->name);
+		ret = -ENOMEM;
+		goto exit_device_put;
+	}
+
+	d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
+	if (!d) {
+		dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n",
+			__func__, pdev->name);
+		ret = -ENOMEM;
+		goto exit_release_p;
+	}
+
+	d->lch_count		= OMAP1_LOGICAL_DMA_CH_COUNT;
+
+	/* Valid attributes for omap1 plus processors */
+	if (cpu_is_omap15xx())
+		d->dev_caps = ENABLE_1510_MODE;
+	enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
+
+	d->dev_caps		|= SRC_PORT;
+	d->dev_caps		|= DST_PORT;
+	d->dev_caps		|= SRC_INDEX;
+	d->dev_caps		|= DST_INDEX;
+	d->dev_caps		|= IS_BURST_ONLY4;
+	d->dev_caps		|= CLEAR_CSR_ON_READ;
+	d->dev_caps		|= IS_WORD_16;
+
+
+	d->chan = kzalloc(sizeof(struct omap_dma_lch) *
+					(d->lch_count), GFP_KERNEL);
+	if (!d->chan) {
+		dev_err(&pdev->dev, "%s: Memory allocation failed"
+					"for d->chan!!!\n", __func__);
+		goto exit_release_d;
+	}
+
+	if (cpu_is_omap15xx())
+		d->chan_count = 9;
+	else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+		if (!(d->dev_caps & ENABLE_1510_MODE))
+			d->chan_count = 16;
+		else
+			d->chan_count = 9;
+	}
+
+	p->dma_attr = d;
+
+	p->show_dma_caps	= omap1_show_dma_caps;
+	p->clear_lch_regs	= omap1_clear_lch_regs;
+	p->clear_dma		= omap1_clear_dma;
+	p->dma_write		= dma_write;
+	p->dma_read		= dma_read;
+	p->disable_irq_lch	= NULL;
+
+	p->errata = configure_dma_errata();
+
+	ret = platform_device_add_data(pdev, p, sizeof(*p));
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
+			__func__, pdev->name, pdev->id);
+		goto exit_release_chan;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
+			__func__, pdev->name, pdev->id);
+		goto exit_release_chan;
+	}
+
+	dma_stride		= OMAP1_DMA_STRIDE;
+	dma_common_ch_start	= CPC;
+	dma_common_ch_end	= COLOR;
+
+	return ret;
+
+exit_release_chan:
+	kfree(d->chan);
+exit_release_d:
+	kfree(d);
+exit_release_p:
+	kfree(p);
+exit_device_put:
+	platform_device_put(pdev);
+exit_device_del:
+	platform_device_del(pdev);
+
+	return ret;
+}
+arch_initcall(omap1_system_dma_init);
diff --git a/arch/arm/mach-omap1/flash.c b/arch/arm/mach-omap1/flash.c
index 0b07a78..acd1616 100644
--- a/arch/arm/mach-omap1/flash.c
+++ b/arch/arm/mach-omap1/flash.c
@@ -11,6 +11,7 @@
 
 #include <plat/io.h>
 #include <plat/tc.h>
+#include <plat/flash.h>
 
 void omap1_set_vpp(struct map_info *map, int enable)
 {
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c
index 5cfce16..8780e75 100644
--- a/arch/arm/mach-omap1/fpga.c
+++ b/arch/arm/mach-omap1/fpga.c
@@ -143,7 +143,7 @@
  */
 void omap1510_fpga_init_irq(void)
 {
-	int i;
+	int i, res;
 
 	__raw_writeb(0, OMAP1510_FPGA_IMR_LO);
 	__raw_writeb(0, OMAP1510_FPGA_IMR_HI);
@@ -177,10 +177,12 @@
 	 * NOTE: For general GPIO/MPUIO access and interrupts, please see
 	 * gpio.[ch]
 	 */
-	gpio_request(13, "FPGA irq");
+	res = gpio_request(13, "FPGA irq");
+	if (res) {
+		pr_err("%s failed to get gpio\n", __func__);
+		return;
+	}
 	gpio_direction_input(13);
 	set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
 	set_irq_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux);
 }
-
-EXPORT_SYMBOL(omap1510_fpga_init_irq);
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
new file mode 100644
index 0000000..04c4b04
--- /dev/null
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -0,0 +1,99 @@
+/*
+ * OMAP15xx specific gpio init
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ *	Charulatha V <charu@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+
+#define OMAP1_MPUIO_VBASE		OMAP1_MPUIO_BASE
+#define OMAP1510_GPIO_BASE		0xFFFCE000
+
+/* gpio1 */
+static struct __initdata resource omap15xx_mpu_gpio_resources[] = {
+	{
+		.start	= OMAP1_MPUIO_VBASE,
+		.end	= OMAP1_MPUIO_VBASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_MPUIO,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
+	.virtual_irq_start	= IH_MPUIO_BASE,
+	.bank_type		= METHOD_MPUIO,
+	.bank_width		= 16,
+	.bank_stride		= 1,
+};
+
+static struct __initdata platform_device omap15xx_mpu_gpio = {
+	.name           = "omap_gpio",
+	.id             = 0,
+	.dev            = {
+		.platform_data = &omap15xx_mpu_gpio_config,
+	},
+	.num_resources = ARRAY_SIZE(omap15xx_mpu_gpio_resources),
+	.resource = omap15xx_mpu_gpio_resources,
+};
+
+/* gpio2 */
+static struct __initdata resource omap15xx_gpio_resources[] = {
+	{
+		.start	= OMAP1510_GPIO_BASE,
+		.end	= OMAP1510_GPIO_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_GPIO_BANK1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
+	.virtual_irq_start	= IH_GPIO_BASE,
+	.bank_type		= METHOD_GPIO_1510,
+	.bank_width		= 16,
+};
+
+static struct __initdata platform_device omap15xx_gpio = {
+	.name           = "omap_gpio",
+	.id             = 1,
+	.dev            = {
+		.platform_data = &omap15xx_gpio_config,
+	},
+	.num_resources = ARRAY_SIZE(omap15xx_gpio_resources),
+	.resource = omap15xx_gpio_resources,
+};
+
+/*
+ * omap15xx_gpio_init needs to be done before
+ * machine_init functions access gpio APIs.
+ * Hence omap15xx_gpio_init is a postcore_initcall.
+ */
+static int __init omap15xx_gpio_init(void)
+{
+	if (!cpu_is_omap15xx())
+		return -EINVAL;
+
+	platform_device_register(&omap15xx_mpu_gpio);
+	platform_device_register(&omap15xx_gpio);
+
+	gpio_bank_count = 2;
+	return 0;
+}
+postcore_initcall(omap15xx_gpio_init);
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
new file mode 100644
index 0000000..5dd0d4c
--- /dev/null
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -0,0 +1,200 @@
+/*
+ * OMAP16xx specific gpio init
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ *	Charulatha V <charu@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+
+#define OMAP1610_GPIO1_BASE		0xfffbe400
+#define OMAP1610_GPIO2_BASE		0xfffbec00
+#define OMAP1610_GPIO3_BASE		0xfffbb400
+#define OMAP1610_GPIO4_BASE		0xfffbbc00
+#define OMAP1_MPUIO_VBASE		OMAP1_MPUIO_BASE
+
+/* mpu gpio */
+static struct __initdata resource omap16xx_mpu_gpio_resources[] = {
+	{
+		.start	= OMAP1_MPUIO_VBASE,
+		.end	= OMAP1_MPUIO_VBASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_MPUIO,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
+	.virtual_irq_start	= IH_MPUIO_BASE,
+	.bank_type		= METHOD_MPUIO,
+	.bank_width		= 16,
+	.bank_stride		= 1,
+};
+
+static struct __initdata platform_device omap16xx_mpu_gpio = {
+	.name           = "omap_gpio",
+	.id             = 0,
+	.dev            = {
+		.platform_data = &omap16xx_mpu_gpio_config,
+	},
+	.num_resources = ARRAY_SIZE(omap16xx_mpu_gpio_resources),
+	.resource = omap16xx_mpu_gpio_resources,
+};
+
+/* gpio1 */
+static struct __initdata resource omap16xx_gpio1_resources[] = {
+	{
+		.start	= OMAP1610_GPIO1_BASE,
+		.end	= OMAP1610_GPIO1_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_GPIO_BANK1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
+	.virtual_irq_start	= IH_GPIO_BASE,
+	.bank_type		= METHOD_GPIO_1610,
+	.bank_width		= 16,
+};
+
+static struct __initdata platform_device omap16xx_gpio1 = {
+	.name           = "omap_gpio",
+	.id             = 1,
+	.dev            = {
+		.platform_data = &omap16xx_gpio1_config,
+	},
+	.num_resources = ARRAY_SIZE(omap16xx_gpio1_resources),
+	.resource = omap16xx_gpio1_resources,
+};
+
+/* gpio2 */
+static struct __initdata resource omap16xx_gpio2_resources[] = {
+	{
+		.start	= OMAP1610_GPIO2_BASE,
+		.end	= OMAP1610_GPIO2_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_1610_GPIO_BANK2,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 16,
+	.bank_type		= METHOD_GPIO_1610,
+	.bank_width		= 16,
+};
+
+static struct __initdata platform_device omap16xx_gpio2 = {
+	.name           = "omap_gpio",
+	.id             = 2,
+	.dev            = {
+		.platform_data = &omap16xx_gpio2_config,
+	},
+	.num_resources = ARRAY_SIZE(omap16xx_gpio2_resources),
+	.resource = omap16xx_gpio2_resources,
+};
+
+/* gpio3 */
+static struct __initdata resource omap16xx_gpio3_resources[] = {
+	{
+		.start	= OMAP1610_GPIO3_BASE,
+		.end	= OMAP1610_GPIO3_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_1610_GPIO_BANK3,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 32,
+	.bank_type		= METHOD_GPIO_1610,
+	.bank_width		= 16,
+};
+
+static struct __initdata platform_device omap16xx_gpio3 = {
+	.name           = "omap_gpio",
+	.id             = 3,
+	.dev            = {
+		.platform_data = &omap16xx_gpio3_config,
+	},
+	.num_resources = ARRAY_SIZE(omap16xx_gpio3_resources),
+	.resource = omap16xx_gpio3_resources,
+};
+
+/* gpio4 */
+static struct __initdata resource omap16xx_gpio4_resources[] = {
+	{
+		.start	= OMAP1610_GPIO4_BASE,
+		.end	= OMAP1610_GPIO4_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_1610_GPIO_BANK4,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 48,
+	.bank_type		= METHOD_GPIO_1610,
+	.bank_width		= 16,
+};
+
+static struct __initdata platform_device omap16xx_gpio4 = {
+	.name           = "omap_gpio",
+	.id             = 4,
+	.dev            = {
+		.platform_data = &omap16xx_gpio4_config,
+	},
+	.num_resources = ARRAY_SIZE(omap16xx_gpio4_resources),
+	.resource = omap16xx_gpio4_resources,
+};
+
+static struct __initdata platform_device * omap16xx_gpio_dev[] = {
+	&omap16xx_mpu_gpio,
+	&omap16xx_gpio1,
+	&omap16xx_gpio2,
+	&omap16xx_gpio3,
+	&omap16xx_gpio4,
+};
+
+/*
+ * omap16xx_gpio_init needs to be done before
+ * machine_init functions access gpio APIs.
+ * Hence omap16xx_gpio_init is a postcore_initcall.
+ */
+static int __init omap16xx_gpio_init(void)
+{
+	int i;
+
+	if (!cpu_is_omap16xx())
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(omap16xx_gpio_dev); i++)
+		platform_device_register(omap16xx_gpio_dev[i]);
+
+	gpio_bank_count = ARRAY_SIZE(omap16xx_gpio_dev);
+
+	return 0;
+}
+postcore_initcall(omap16xx_gpio_init);
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
new file mode 100644
index 0000000..1204c8b
--- /dev/null
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -0,0 +1,262 @@
+/*
+ * OMAP7xx specific gpio init
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ *	Charulatha V <charu@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+
+#define OMAP7XX_GPIO1_BASE		0xfffbc000
+#define OMAP7XX_GPIO2_BASE		0xfffbc800
+#define OMAP7XX_GPIO3_BASE		0xfffbd000
+#define OMAP7XX_GPIO4_BASE		0xfffbd800
+#define OMAP7XX_GPIO5_BASE		0xfffbe000
+#define OMAP7XX_GPIO6_BASE		0xfffbe800
+#define OMAP1_MPUIO_VBASE		OMAP1_MPUIO_BASE
+
+/* mpu gpio */
+static struct __initdata resource omap7xx_mpu_gpio_resources[] = {
+	{
+		.start	= OMAP1_MPUIO_VBASE,
+		.end	= OMAP1_MPUIO_VBASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_7XX_MPUIO,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
+	.virtual_irq_start	= IH_MPUIO_BASE,
+	.bank_type		= METHOD_MPUIO,
+	.bank_width		= 32,
+	.bank_stride		= 2,
+};
+
+static struct __initdata platform_device omap7xx_mpu_gpio = {
+	.name           = "omap_gpio",
+	.id             = 0,
+	.dev            = {
+		.platform_data = &omap7xx_mpu_gpio_config,
+	},
+	.num_resources = ARRAY_SIZE(omap7xx_mpu_gpio_resources),
+	.resource = omap7xx_mpu_gpio_resources,
+};
+
+/* gpio1 */
+static struct __initdata resource omap7xx_gpio1_resources[] = {
+	{
+		.start	= OMAP7XX_GPIO1_BASE,
+		.end	= OMAP7XX_GPIO1_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_7XX_GPIO_BANK1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
+	.virtual_irq_start	= IH_GPIO_BASE,
+	.bank_type		= METHOD_GPIO_7XX,
+	.bank_width		= 32,
+};
+
+static struct __initdata platform_device omap7xx_gpio1 = {
+	.name           = "omap_gpio",
+	.id             = 1,
+	.dev            = {
+		.platform_data = &omap7xx_gpio1_config,
+	},
+	.num_resources = ARRAY_SIZE(omap7xx_gpio1_resources),
+	.resource = omap7xx_gpio1_resources,
+};
+
+/* gpio2 */
+static struct __initdata resource omap7xx_gpio2_resources[] = {
+	{
+		.start	= OMAP7XX_GPIO2_BASE,
+		.end	= OMAP7XX_GPIO2_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_7XX_GPIO_BANK2,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 32,
+	.bank_type		= METHOD_GPIO_7XX,
+	.bank_width		= 32,
+};
+
+static struct __initdata platform_device omap7xx_gpio2 = {
+	.name           = "omap_gpio",
+	.id             = 2,
+	.dev            = {
+		.platform_data = &omap7xx_gpio2_config,
+	},
+	.num_resources = ARRAY_SIZE(omap7xx_gpio2_resources),
+	.resource = omap7xx_gpio2_resources,
+};
+
+/* gpio3 */
+static struct __initdata resource omap7xx_gpio3_resources[] = {
+	{
+		.start	= OMAP7XX_GPIO3_BASE,
+		.end	= OMAP7XX_GPIO3_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_7XX_GPIO_BANK3,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 64,
+	.bank_type		= METHOD_GPIO_7XX,
+	.bank_width		= 32,
+};
+
+static struct __initdata platform_device omap7xx_gpio3 = {
+	.name           = "omap_gpio",
+	.id             = 3,
+	.dev            = {
+		.platform_data = &omap7xx_gpio3_config,
+	},
+	.num_resources = ARRAY_SIZE(omap7xx_gpio3_resources),
+	.resource = omap7xx_gpio3_resources,
+};
+
+/* gpio4 */
+static struct __initdata resource omap7xx_gpio4_resources[] = {
+	{
+		.start	= OMAP7XX_GPIO4_BASE,
+		.end	= OMAP7XX_GPIO4_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_7XX_GPIO_BANK4,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 96,
+	.bank_type		= METHOD_GPIO_7XX,
+	.bank_width		= 32,
+};
+
+static struct __initdata platform_device omap7xx_gpio4 = {
+	.name           = "omap_gpio",
+	.id             = 4,
+	.dev            = {
+		.platform_data = &omap7xx_gpio4_config,
+	},
+	.num_resources = ARRAY_SIZE(omap7xx_gpio4_resources),
+	.resource = omap7xx_gpio4_resources,
+};
+
+/* gpio5 */
+static struct __initdata resource omap7xx_gpio5_resources[] = {
+	{
+		.start	= OMAP7XX_GPIO5_BASE,
+		.end	= OMAP7XX_GPIO5_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_7XX_GPIO_BANK5,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 128,
+	.bank_type		= METHOD_GPIO_7XX,
+	.bank_width		= 32,
+};
+
+static struct __initdata platform_device omap7xx_gpio5 = {
+	.name           = "omap_gpio",
+	.id             = 5,
+	.dev            = {
+		.platform_data = &omap7xx_gpio5_config,
+	},
+	.num_resources = ARRAY_SIZE(omap7xx_gpio5_resources),
+	.resource = omap7xx_gpio5_resources,
+};
+
+/* gpio6 */
+static struct __initdata resource omap7xx_gpio6_resources[] = {
+	{
+		.start	= OMAP7XX_GPIO6_BASE,
+		.end	= OMAP7XX_GPIO6_BASE + SZ_2K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	{
+		.start	= INT_7XX_GPIO_BANK6,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
+	.virtual_irq_start	= IH_GPIO_BASE + 160,
+	.bank_type		= METHOD_GPIO_7XX,
+	.bank_width		= 32,
+};
+
+static struct __initdata platform_device omap7xx_gpio6 = {
+	.name           = "omap_gpio",
+	.id             = 6,
+	.dev            = {
+		.platform_data = &omap7xx_gpio6_config,
+	},
+	.num_resources = ARRAY_SIZE(omap7xx_gpio6_resources),
+	.resource = omap7xx_gpio6_resources,
+};
+
+static struct __initdata platform_device * omap7xx_gpio_dev[] = {
+	&omap7xx_mpu_gpio,
+	&omap7xx_gpio1,
+	&omap7xx_gpio2,
+	&omap7xx_gpio3,
+	&omap7xx_gpio4,
+	&omap7xx_gpio5,
+	&omap7xx_gpio6,
+};
+
+/*
+ * omap7xx_gpio_init needs to be done before
+ * machine_init functions access gpio APIs.
+ * Hence omap7xx_gpio_init is a postcore_initcall.
+ */
+static int __init omap7xx_gpio_init(void)
+{
+	int i;
+
+	if (!cpu_is_omap7xx())
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(omap7xx_gpio_dev); i++)
+		platform_device_register(omap7xx_gpio_dev[i]);
+
+	gpio_bank_count = ARRAY_SIZE(omap7xx_gpio_dev);
+
+	return 0;
+}
+postcore_initcall(omap7xx_gpio_init);
diff --git a/arch/arm/mach-omap1/include/mach/entry-macro.S b/arch/arm/mach-omap1/include/mach/entry-macro.S
index df9060e..c9be6d4 100644
--- a/arch/arm/mach-omap1/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap1/include/mach/entry-macro.S
@@ -14,18 +14,17 @@
 #include <mach/irqs.h>
 #include <asm/hardware/gic.h>
 
-#if (defined(CONFIG_ARCH_OMAP730)||defined(CONFIG_ARCH_OMAP850)) && \
-	(defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX))
-#error "FIXME: OMAP7XX doesn't support multiple-OMAP"
-#elif defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
-#define INT_IH2_IRQ		INT_7XX_IH2_IRQ
-#elif defined(CONFIG_ARCH_OMAP15XX)
-#define INT_IH2_IRQ		INT_1510_IH2_IRQ
-#elif defined(CONFIG_ARCH_OMAP16XX)
-#define INT_IH2_IRQ		INT_1610_IH2_IRQ
-#else
-#warning "IH2 IRQ defaulted"
-#define INT_IH2_IRQ		INT_1510_IH2_IRQ
+/*
+ * We use __glue to avoid errors with multiple definitions of
+ * .globl omap_irq_flags as it's included from entry-armv.S but not
+ * from entry-common.S.
+ */
+#ifdef __glue
+		.pushsection .data
+		.globl	omap_irq_flags
+omap_irq_flags:
+		.word	0
+		.popsection
 #endif
 
  		.macro	disable_fiq
@@ -47,9 +46,11 @@
 		beq	1510f
 
 		ldr	\irqnr, [\base, #IRQ_SIR_FIQ_REG_OFFSET]
+		ldr	\tmp, =omap_irq_flags	@ irq flags address
+		ldr	\tmp, [\tmp, #0]	@ irq flags value
 		cmp	\irqnr, #0
 		ldreq	\irqnr, [\base, #IRQ_SIR_IRQ_REG_OFFSET]
-		cmpeq	\irqnr, #INT_IH2_IRQ
+		cmpeq	\irqnr, \tmp
 		ldreq	\base, =OMAP1_IO_ADDRESS(OMAP_IH2_BASE)
 		ldreq	\irqnr, [\base, #IRQ_SIR_IRQ_REG_OFFSET]
 		addeqs	\irqnr, \irqnr, #32
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 0ce3fec..870886a 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -142,3 +142,42 @@
 	omap1_mux_init();
 }
 
+/*
+ * NOTE: Please use ioremap + __raw_read/write where possible instead of these
+ */
+
+u8 omap_readb(u32 pa)
+{
+	return __raw_readb(OMAP1_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_readb);
+
+u16 omap_readw(u32 pa)
+{
+	return __raw_readw(OMAP1_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_readw);
+
+u32 omap_readl(u32 pa)
+{
+	return __raw_readl(OMAP1_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_readl);
+
+void omap_writeb(u8 v, u32 pa)
+{
+	__raw_writeb(v, OMAP1_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_writeb);
+
+void omap_writew(u16 v, u32 pa)
+{
+	__raw_writew(v, OMAP1_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_writew);
+
+void omap_writel(u32 v, u32 pa)
+{
+	__raw_writel(v, OMAP1_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_writel);
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index db913c3..6bddbc8 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -176,26 +176,31 @@
 
 void __init omap_init_irq(void)
 {
+	extern unsigned int omap_irq_flags;
 	int i, j;
 
 #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
 	if (cpu_is_omap7xx()) {
+		omap_irq_flags = INT_7XX_IH2_IRQ;
 		irq_banks = omap7xx_irq_banks;
 		irq_bank_count = ARRAY_SIZE(omap7xx_irq_banks);
 	}
 #endif
 #ifdef CONFIG_ARCH_OMAP15XX
 	if (cpu_is_omap1510()) {
+		omap_irq_flags = INT_1510_IH2_IRQ;
 		irq_banks = omap1510_irq_banks;
 		irq_bank_count = ARRAY_SIZE(omap1510_irq_banks);
 	}
 	if (cpu_is_omap310()) {
+		omap_irq_flags = INT_1510_IH2_IRQ;
 		irq_banks = omap310_irq_banks;
 		irq_bank_count = ARRAY_SIZE(omap310_irq_banks);
 	}
 #endif
 #if defined(CONFIG_ARCH_OMAP16XX)
 	if (cpu_is_omap16xx()) {
+		omap_irq_flags = INT_1510_IH2_IRQ;
 		irq_banks = omap1610_irq_banks;
 		irq_bank_count = ARRAY_SIZE(omap1610_irq_banks);
 	}
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c
index 3be11af..c9088d8 100644
--- a/arch/arm/mach-omap1/lcd_dma.c
+++ b/arch/arm/mach-omap1/lcd_dma.c
@@ -424,6 +424,9 @@
 {
 	int r;
 
+	if (!cpu_class_is_omap1())
+		return -ENODEV;
+
 	if (cpu_is_omap16xx()) {
 		u16 w;
 
diff --git a/arch/arm/mach-omap1/leds.c b/arch/arm/mach-omap1/leds.c
index 277f356..22eb11d 100644
--- a/arch/arm/mach-omap1/leds.c
+++ b/arch/arm/mach-omap1/leds.c
@@ -17,6 +17,9 @@
 static int __init
 omap_leds_init(void)
 {
+	if (!cpu_class_is_omap1())
+		return -ENODEV;
+
 	if (machine_is_omap_innovator())
 		leds_event = innovator_leds_event;
 
diff --git a/arch/arm/mach-omap1/mailbox.c b/arch/arm/mach-omap1/mailbox.c
index 1a85a42..c0e1f48 100644
--- a/arch/arm/mach-omap1/mailbox.c
+++ b/arch/arm/mach-omap1/mailbox.c
@@ -133,19 +133,18 @@
 	},
 };
 
-struct omap_mbox mbox_dsp_info = {
+static struct omap_mbox mbox_dsp_info = {
 	.name	= "dsp",
 	.ops	= &omap1_mbox_ops,
 	.priv	= &omap1_mbox_dsp_priv,
 };
 
-struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL };
+static struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL };
 
 static int __devinit omap1_mbox_probe(struct platform_device *pdev)
 {
 	struct resource *mem;
 	int ret;
-	int i;
 	struct omap_mbox **list;
 
 	list = omap1_mboxes;
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index b3a796a..8209736 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -174,8 +174,11 @@
 #define OMAP16XX_MCBSP_REG_NUM		0
 #endif
 
-int __init omap1_mcbsp_init(void)
+static int __init omap1_mcbsp_init(void)
 {
+	if (!cpu_class_is_omap1())
+		return -ENODEV;
+
 	if (cpu_is_omap7xx()) {
 		omap_mcbsp_count = OMAP7XX_MCBSP_PDATA_SZ;
 		omap_mcbsp_cache_size = OMAP7XX_MCBSP_REG_NUM * sizeof(u16);
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
index 7835add..5fdef7a 100644
--- a/arch/arm/mach-omap1/mux.c
+++ b/arch/arm/mach-omap1/mux.c
@@ -343,7 +343,7 @@
 #define OMAP1XXX_PINS_SZ	0
 #endif	/* CONFIG_ARCH_OMAP15XX || CONFIG_ARCH_OMAP16XX */
 
-int __init_or_module omap1_cfg_reg(const struct pin_config *cfg)
+static int __init_or_module omap1_cfg_reg(const struct pin_config *cfg)
 {
 	static DEFINE_SPINLOCK(mux_spin_lock);
 	unsigned long flags;
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index b1d3f9f..98ba978 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -647,7 +647,7 @@
 
 
 
-static struct platform_suspend_ops omap_pm_ops ={
+static const struct platform_suspend_ops omap_pm_ops = {
 	.prepare	= omap_pm_prepare,
 	.enter		= omap_pm_enter,
 	.finish		= omap_pm_finish,
@@ -661,6 +661,9 @@
 	int error;
 #endif
 
+	if (!cpu_class_is_omap1())
+		return -ENODEV;
+
 	printk("Power Management for TI OMAP.\n");
 
 	/*
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 8b66392..6588c22 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -48,7 +48,6 @@
 
 static int omap1_pm_runtime_resume(struct device *dev)
 {
-	int ret = 0;
 	struct clk *iclk, *fclk;
 
 	dev_dbg(dev, "%s\n", __func__);
@@ -73,6 +72,9 @@
 	const struct dev_pm_ops *pm;
 	struct dev_pm_ops *omap_pm;
 
+	if (!cpu_class_is_omap1())
+		return -ENODEV;
+
 	pm = platform_bus_get_pm_ops();
 	if (!pm) {
 		pr_err("%s: unable to get dev_pm_ops from platform_bus\n",
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index b78d074..550ca9d 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -27,6 +27,8 @@
 #include <mach/gpio.h>
 #include <plat/fpga.h>
 
+#include "pm.h"
+
 static struct clk * uart1_ck;
 static struct clk * uart2_ck;
 static struct clk * uart3_ck;
@@ -52,9 +54,11 @@
  */
 static void __init omap_serial_reset(struct plat_serial8250_port *p)
 {
-	omap_serial_outp(p, UART_OMAP_MDR1, 0x07);	/* disable UART */
+	omap_serial_outp(p, UART_OMAP_MDR1,
+			UART_OMAP_MDR1_DISABLE);	/* disable UART */
 	omap_serial_outp(p, UART_OMAP_SCR, 0x08);	/* TX watermark */
-	omap_serial_outp(p, UART_OMAP_MDR1, 0x00);	/* enable UART */
+	omap_serial_outp(p, UART_OMAP_MDR1,
+			UART_OMAP_MDR1_16X_MODE);	/* enable UART */
 
 	if (!cpu_is_omap15xx()) {
 		omap_serial_outp(p, UART_OMAP_SYSC, 0x01);
@@ -254,6 +258,9 @@
 
 static int __init omap_init(void)
 {
+	if (!cpu_class_is_omap1())
+		return -ENODEV;
+
 	return platform_device_register(&serial_device);
 }
 arch_initcall(omap_init);
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index abb34ff..ed7a61f 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -52,6 +52,7 @@
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 
+#include <plat/common.h>
 
 #define OMAP_MPU_TIMER_BASE		OMAP_MPU_TIMER1_BASE
 #define OMAP_MPU_TIMER_OFFSET		0x100
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index ab784bf..1a2cf62 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -15,7 +15,7 @@
 	select SERIAL_OMAP_CONSOLE
 	select I2C
 	select I2C_OMAP
-	select MFD
+	select MFD_SUPPORT
 	select MENELAUS if ARCH_OMAP2
 	select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
 	select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
@@ -35,6 +35,8 @@
 	select CPU_V7
 	select USB_ARCH_HAS_EHCI
 	select ARM_L1_CACHE_SHIFT_6 if !ARCH_OMAP4
+	select ARCH_HAS_OPP
+	select PM_OPP if PM
 
 config ARCH_OMAP4
 	bool "TI OMAP4"
@@ -44,6 +46,9 @@
 	select ARM_GIC
 	select PL310_ERRATA_588369
 	select ARM_ERRATA_720789
+	select ARCH_HAS_OPP
+	select PM_OPP if PM
+	select USB_ARCH_HAS_EHCI
 
 comment "OMAP Core Type"
 	depends on ARCH_OMAP2
@@ -85,6 +90,12 @@
 config OMAP_PACKAGE_CBP
        bool
 
+config OMAP_PACKAGE_CBL
+       bool
+
+config OMAP_PACKAGE_CBS
+       bool
+
 comment "OMAP Board Type"
 	depends on ARCH_OMAP2PLUS
 
@@ -128,7 +139,6 @@
 	depends on ARCH_OMAP3
 	default y
 	select OMAP_PACKAGE_CUS
-	select OMAP_MUX
 
 config MACH_OMAP_LDP
 	bool "OMAP3 LDP board"
@@ -174,11 +184,17 @@
 	default y
 	select OMAP_PACKAGE_CBB
 
+config MACH_CRANEBOARD
+	bool "AM3517/05 CRANE board"
+	depends on ARCH_OMAP3
+	select OMAP_PACKAGE_CBB
+
 config MACH_OMAP3_PANDORA
 	bool "OMAP3 Pandora"
 	depends on ARCH_OMAP3
 	default y
 	select OMAP_PACKAGE_CBB
+	select REGULATOR_FIXED_VOLTAGE
 
 config MACH_OMAP3_TOUCHBOOK
 	bool "OMAP3 Touch Book"
@@ -210,6 +226,12 @@
 	select MACH_NOKIA_N810
 	select MACH_NOKIA_N810_WIMAX
 
+config MACH_NOKIA_RM680
+	bool "Nokia RM-680 board"
+	depends on ARCH_OMAP3
+	default y
+	select OMAP_PACKAGE_CBB
+
 config MACH_NOKIA_RX51
 	bool "Nokia RX-51 board"
 	depends on ARCH_OMAP3
@@ -224,6 +246,7 @@
 	select SERIAL_8250
 	select SERIAL_CORE_CONSOLE
 	select SERIAL_8250_CONSOLE
+	select REGULATOR_FIXED_VOLTAGE
 
 config MACH_OMAP_ZOOM3
 	bool "OMAP3630 Zoom3 board"
@@ -233,20 +256,19 @@
 	select SERIAL_8250
 	select SERIAL_CORE_CONSOLE
 	select SERIAL_8250_CONSOLE
+	select REGULATOR_FIXED_VOLTAGE
 
 config MACH_CM_T35
 	bool "CompuLab CM-T35 module"
 	depends on ARCH_OMAP3
 	default y
 	select OMAP_PACKAGE_CUS
-	select OMAP_MUX
 
 config MACH_CM_T3517
 	bool "CompuLab CM-T3517 module"
 	depends on ARCH_OMAP3
 	default y
 	select OMAP_PACKAGE_CBB
-	select OMAP_MUX
 
 config MACH_IGEP0020
 	bool "IGEP v2 board"
@@ -265,7 +287,6 @@
 	depends on ARCH_OMAP3
 	default y
 	select OMAP_PACKAGE_CUS
-	select OMAP_MUX
 
 config MACH_OMAP_3630SDP
 	bool "OMAP3630 SDP board"
@@ -277,11 +298,15 @@
 	bool "OMAP 4430 SDP board"
 	default y
 	depends on ARCH_OMAP4
+	select OMAP_PACKAGE_CBL
+	select OMAP_PACKAGE_CBS
 
 config MACH_OMAP4_PANDA
 	bool "OMAP4 Panda Board"
 	default y
 	depends on ARCH_OMAP4
+	select OMAP_PACKAGE_CBL
+	select OMAP_PACKAGE_CBS
 
 config OMAP3_EMU
 	bool "OMAP3 debugging peripherals"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 60e51bc..1c0c2b0 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -4,30 +4,31 @@
 
 # Common support
 obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o pm.o \
-	 common.o
+	 common.o gpio.o dma.o wd_timer.o
 
-omap-2-3-common				= irq.o sdrc.o prm2xxx_3xxx.o
+omap-2-3-common				= irq.o sdrc.o
 hwmod-common				= omap_hwmod.o \
 					  omap_hwmod_common_data.o
-prcm-common				= prcm.o powerdomain.o
 clock-common				= clock.o clock_common_data.o \
-					  clockdomain.o clkt_dpll.o \
-					  clkt_clksel.o
+					  clkt_dpll.o clkt_clksel.o
 
-obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(prcm-common) $(hwmod-common)
-obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(prcm-common) $(hwmod-common)
-obj-$(CONFIG_ARCH_OMAP4) += $(prcm-common) prm44xx.o $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common)
+obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common)
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
+obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
+
 # SMP support ONLY available for OMAP4
 obj-$(CONFIG_SMP)			+= omap-smp.o omap-headsmp.o
 obj-$(CONFIG_LOCAL_TIMERS)		+= timer-mpu.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= omap-hotplug.o
 obj-$(CONFIG_ARCH_OMAP4)		+= omap44xx-smc.o omap4-common.o
 
-AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a
-AFLAGS_omap44xx-smc.o			:=-Wa,-march=armv7-a
+plus_sec := $(call as-instr,.arch_extension sec,+sec)
+AFLAGS_omap-headsmp.o			:=-Wa,-march=armv7-a$(plus_sec)
+AFLAGS_omap44xx-smc.o			:=-Wa,-march=armv7-a$(plus_sec)
 
 # Functions loaded to SRAM
 obj-$(CONFIG_ARCH_OMAP2420)		+= sram242x.o
@@ -42,18 +43,29 @@
 obj-$(CONFIG_ARCH_OMAP2420)		+= mux2420.o
 obj-$(CONFIG_ARCH_OMAP2430)		+= mux2430.o
 obj-$(CONFIG_ARCH_OMAP3)		+= mux34xx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= mux44xx.o
 
 # SMS/SDRC
 obj-$(CONFIG_ARCH_OMAP2)		+= sdrc2xxx.o
 # obj-$(CONFIG_ARCH_OMAP3)		+= sdrc3xxx.o
 
+# OPP table initialization
+ifeq ($(CONFIG_PM_OPP),y)
+obj-y					+= opp.o
+obj-$(CONFIG_ARCH_OMAP3)		+= opp3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= opp4xxx_data.o
+endif
+
 # Power Management
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_ARCH_OMAP2)		+= pm24xx.o
-obj-$(CONFIG_ARCH_OMAP2)		+= sleep24xx.o pm_bus.o
-obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o cpuidle34xx.o pm_bus.o
-obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o pm_bus.o
+obj-$(CONFIG_ARCH_OMAP2)		+= sleep24xx.o pm_bus.o voltage.o
+obj-$(CONFIG_ARCH_OMAP3)		+= pm34xx.o sleep34xx.o voltage.o \
+					   cpuidle34xx.o pm_bus.o
+obj-$(CONFIG_ARCH_OMAP4)		+= pm44xx.o voltage.o pm_bus.o
 obj-$(CONFIG_PM_DEBUG)			+= pm-debug.o
+obj-$(CONFIG_OMAP_SMARTREFLEX)          += sr_device.o smartreflex.o
+obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3)	+= smartreflex-class3.o
 
 AFLAGS_sleep24xx.o			:=-Wa,-march=armv6
 AFLAGS_sleep34xx.o			:=-Wa,-march=armv7-a
@@ -65,10 +77,36 @@
 endif
 
 # PRCM
-obj-$(CONFIG_ARCH_OMAP2)		+= cm.o
-obj-$(CONFIG_ARCH_OMAP3)		+= cm.o
-obj-$(CONFIG_ARCH_OMAP4)		+= cm4xxx.o
+obj-$(CONFIG_ARCH_OMAP2)		+= prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)		+= prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
+# XXX The presence of cm2xxx_3xxx.o on the line below is temporary and
+# will be removed once the OMAP4 part of the codebase is converted to
+# use OMAP4-specific PRCM functions.
+obj-$(CONFIG_ARCH_OMAP4)		+= prcm.o cm2xxx_3xxx.o cminst44xx.o \
+					   cm44xx.o prcm_mpu44xx.o \
+					   prminst44xx.o
 
+# OMAP powerdomain framework
+powerdomain-common			+= powerdomain.o powerdomain-common.o
+obj-$(CONFIG_ARCH_OMAP2)		+= $(powerdomain-common) \
+					   powerdomain2xxx_3xxx.o \
+					   powerdomains2xxx_data.o \
+					   powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= $(powerdomain-common) \
+					   powerdomain2xxx_3xxx.o \
+					   powerdomains3xxx_data.o \
+					   powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= $(powerdomain-common) \
+					   powerdomain44xx.o \
+					   powerdomains44xx_data.o
+
+# PRCM clockdomain control
+obj-$(CONFIG_ARCH_OMAP2)		+= clockdomain.o \
+					   clockdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)		+= clockdomain.o \
+					   clockdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4)		+= clockdomain.o \
+					   clockdomains44xx_data.o
 # Clock framework
 obj-$(CONFIG_ARCH_OMAP2)		+= $(clock-common) clock2xxx.o \
 					   clkt2xxx_sys.o \
@@ -139,23 +177,29 @@
 					   hsmmc.o \
 					   board-flash.o
 obj-$(CONFIG_MACH_NOKIA_N8X0)		+= board-n8x0.o
+obj-$(CONFIG_MACH_NOKIA_RM680)		+= board-rm680.o \
+					   sdram-nokia.o \
+					   hsmmc.o
 obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51.o \
-					   board-rx51-sdram.o \
+					   sdram-nokia.o \
 					   board-rx51-peripherals.o \
 					   board-rx51-video.o \
 					   hsmmc.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom2.o \
+obj-$(CONFIG_MACH_OMAP_ZOOM2)		+= board-zoom.o \
 					   board-zoom-peripherals.o \
+					   board-zoom-display.o \
 					   board-flash.o \
 					   hsmmc.o \
 					   board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3)		+= board-zoom3.o \
+obj-$(CONFIG_MACH_OMAP_ZOOM3)		+= board-zoom.o \
 					   board-zoom-peripherals.o \
+					   board-zoom-display.o \
 					   board-flash.o \
 					   hsmmc.o \
 					   board-zoom-debugboard.o
 obj-$(CONFIG_MACH_OMAP_3630SDP)		+= board-3630sdp.o \
 					   board-zoom-peripherals.o \
+					   board-zoom-display.o \
 					   board-flash.o \
 					   hsmmc.o
 obj-$(CONFIG_MACH_CM_T35)		+= board-cm-t35.o \
@@ -168,12 +212,16 @@
 obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK)	+= board-omap3touchbook.o \
 					   hsmmc.o
 obj-$(CONFIG_MACH_OMAP_4430SDP)		+= board-4430sdp.o \
-					   hsmmc.o
+					   hsmmc.o \
+					   omap_phy_internal.o
 obj-$(CONFIG_MACH_OMAP4_PANDA)		+= board-omap4panda.o \
-					   hsmmc.o
+					   hsmmc.o \
+					   omap_phy_internal.o
 
 obj-$(CONFIG_MACH_OMAP3517EVM)		+= board-am3517evm.o
 
+obj-$(CONFIG_MACH_CRANEBOARD)		+= board-am3517crane.o
+
 obj-$(CONFIG_MACH_SBC3530)		+= board-omap3stalker.o \
 					   hsmmc.o
 # Platform specific device init code
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index b527f8d..e066177 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -135,7 +135,7 @@
 
 #endif
 
-static struct omap_board_config_kernel sdp2430_config[] = {
+static struct omap_board_config_kernel sdp2430_config[] __initdata = {
 	{OMAP_TAG_LCD, &sdp2430_lcd_config},
 };
 
@@ -143,9 +143,9 @@
 {
 	omap_board_config = sdp2430_config;
 	omap_board_config_size = ARRAY_SIZE(sdp2430_config);
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct twl4030_gpio_platform_data sdp2430_gpio_data = {
@@ -218,8 +218,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static void __init omap_2430sdp_init(void)
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 4e3742c..d4e41ef 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -38,6 +38,7 @@
 #include <plat/dma.h>
 #include <plat/gpmc.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include <plat/gpmc-smc91x.h>
 
@@ -270,15 +271,20 @@
 	.platform_disable	= sdp3430_panel_disable_lcd,
 };
 
-static struct omap_dss_device sdp3430_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= sdp3430_panel_enable_dvi,
 	.platform_disable	= sdp3430_panel_disable_dvi,
 };
 
+static struct omap_dss_device sdp3430_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device sdp3430_tv_device = {
 	.name			= "tv",
 	.driver_name		= "venc",
@@ -326,9 +332,9 @@
 	omap_board_config = sdp3430_config;
 	omap_board_config_size = ARRAY_SIZE(sdp3430_config);
 	omap3_pm_init_cpuidle(omap3_cpuidle_params_table);
-	omap2_init_common_hw(hyb18m512160af6_sdrc_params, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(hyb18m512160af6_sdrc_params, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static int sdp3430_batt_table[] = {
@@ -663,8 +669,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 /*
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index bbcf580..6264564 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -73,18 +73,16 @@
 {
 	omap_board_config = sdp_config;
 	omap_board_config_size = ARRAY_SIZE(sdp_config);
-	omap2_init_common_hw(h8mbx00u0mer0em_sdrc_params,
-			h8mbx00u0mer0em_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(h8mbx00u0mer0em_sdrc_params,
+				  h8mbx00u0mer0em_sdrc_params);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 /*
@@ -209,6 +207,7 @@
 {
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
 	zoom_peripherals_init();
+	zoom_display_init();
 	board_smc91x_init();
 	board_flash_init(sdp_flash_partitions, chip_sel_sdp);
 	enable_board_wakeup_source();
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index df5a425..07d1b20 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -23,6 +23,7 @@
 #include <linux/gpio_keys.h>
 #include <linux/regulator/machine.h>
 #include <linux/leds.h>
+#include <linux/leds_pwm.h>
 
 #include <mach/hardware.h>
 #include <mach/omap4-common.h>
@@ -35,6 +36,7 @@
 #include <plat/usb.h>
 #include <plat/mmc.h>
 
+#include "mux.h"
 #include "hsmmc.h"
 #include "timer-gp.h"
 #include "control.h"
@@ -42,6 +44,7 @@
 #define ETH_KS8851_IRQ			34
 #define ETH_KS8851_POWER_ON		48
 #define ETH_KS8851_QUART		138
+#define OMAP4SDP_MDM_PWR_EN_GPIO	157
 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO	184
 #define OMAP4_SFH7741_ENABLE_GPIO		188
 
@@ -96,6 +99,28 @@
 	.num_leds	= ARRAY_SIZE(sdp4430_gpio_leds),
 };
 
+static struct led_pwm sdp4430_pwm_leds[] = {
+	{
+		.name		= "omap4:green:chrg",
+		.pwm_id		= 1,
+		.max_brightness	= 255,
+		.pwm_period_ns	= 7812500,
+	},
+};
+
+static struct led_pwm_platform_data sdp4430_pwm_data = {
+	.num_leds	= ARRAY_SIZE(sdp4430_pwm_leds),
+	.leds		= sdp4430_pwm_leds,
+};
+
+static struct platform_device sdp4430_leds_pwm = {
+	.name	= "leds_pwm",
+	.id	= -1,
+	.dev	= {
+		.platform_data = &sdp4430_pwm_data,
+	},
+};
+
 static int omap_prox_activate(struct device *dev)
 {
 	gpio_set_value(OMAP4_SFH7741_ENABLE_GPIO , 1);
@@ -203,6 +228,7 @@
 	&sdp4430_lcd_device,
 	&sdp4430_gpio_keys_device,
 	&sdp4430_leds_gpio,
+	&sdp4430_leds_pwm,
 };
 
 static struct omap_lcd_config sdp4430_lcd_config __initdata = {
@@ -217,20 +243,37 @@
 {
 	omap_board_config = sdp4430_config;
 	omap_board_config_size = ARRAY_SIZE(sdp4430_config);
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 #ifdef CONFIG_OMAP_32K_TIMER
 	omap2_gp_clockevent_set_gptimer(1);
 #endif
 	gic_init_irq();
-	omap_gpio_init();
 }
 
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+	.port_mode[0]	= EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[1]	= EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[2]	= EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.phy_reset	= false,
+	.reset_gpio_port[0]  = -EINVAL,
+	.reset_gpio_port[1]  = -EINVAL,
+	.reset_gpio_port[2]  = -EINVAL,
+};
+
 static struct omap_musb_board_data musb_board_data = {
 	.interface_type		= MUSB_INTERFACE_UTMI,
-	.mode			= MUSB_PERIPHERAL,
+	.mode			= MUSB_OTG,
 	.power			= 100,
 };
 
+static struct twl4030_usb_data omap4_usbphy_data = {
+	.phy_init	= omap4430_phy_init,
+	.phy_exit	= omap4430_phy_exit,
+	.phy_power	= omap4430_phy_power,
+	.phy_set_clock	= omap4430_phy_set_clk,
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	{
 		.mmc		= 1,
@@ -450,6 +493,7 @@
 	.vaux1		= &sdp4430_vaux1,
 	.vaux2		= &sdp4430_vaux2,
 	.vaux3		= &sdp4430_vaux3,
+	.usb		= &omap4_usbphy_data
 };
 
 static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
@@ -464,6 +508,9 @@
 	{
 		I2C_BOARD_INFO("tmp105", 0x48),
 	},
+	{
+		I2C_BOARD_INFO("bh1780", 0x29),
+	},
 };
 static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = {
 	{
@@ -505,20 +552,39 @@
 	}
 }
 
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+	OMAP4_MUX(USBB2_ULPITLL_CLK, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux	NULL
+#endif
+
 static void __init omap_4430sdp_init(void)
 {
 	int status;
+	int package = OMAP_PACKAGE_CBS;
+
+	if (omap_rev() == OMAP4430_REV_ES1_0)
+		package = OMAP_PACKAGE_CBL;
+	omap4_mux_init(board_mux, package);
 
 	omap4_i2c_init();
 	omap_sfh7741prox_init();
 	platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
 	omap_serial_init();
 	omap4_twl6030_hsmmc_init(mmc);
-	/* OMAP4 SDP uses internal transceiver so register nop transceiver */
-	usb_nop_xceiv_register();
-	/* FIXME: allow multi-omap to boot until musb is updated for omap4 */
-	if (!cpu_is_omap44xx())
-		usb_musb_init(&musb_board_data);
+
+	/* Power on the ULPI PHY */
+	status = gpio_request(OMAP4SDP_MDM_PWR_EN_GPIO, "USBB1 PHY VMDM_3V3");
+	if (status)
+		pr_err("%s: Could not get USBB1 PHY GPIO\n", __func__);
+	else
+		gpio_direction_output(OMAP4SDP_MDM_PWR_EN_GPIO, 1);
+
+	usb_ehci_init(&ehci_pdata);
+	usb_musb_init(&musb_board_data);
 
 	status = omap_ethernet_init();
 	if (status) {
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
new file mode 100644
index 0000000..71acb5a
--- /dev/null
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -0,0 +1,116 @@
+/*
+ * Support for AM3517/05 Craneboard
+ * http://www.mistralsolutions.com/products/craneboard.php
+ *
+ * Copyright (C) 2010 Mistral Solutions Pvt Ltd. <www.mistralsolutions.com>
+ * Author: R.Srinath <srinath@mistralsolutions.com>
+ *
+ * Based on mach-omap2/board-am3517evm.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as  published by the
+ * Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/usb.h>
+
+#include "mux.h"
+#include "control.h"
+
+#define GPIO_USB_POWER		35
+#define GPIO_USB_NRESET		38
+
+
+/* Board initialization */
+static struct omap_board_config_kernel am3517_crane_config[] __initdata = {
+};
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux	NULL
+#endif
+
+static void __init am3517_crane_init_irq(void)
+{
+	omap_board_config = am3517_crane_config;
+	omap_board_config_size = ARRAY_SIZE(am3517_crane_config);
+
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
+	omap_init_irq();
+}
+
+static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
+	.port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+	.phy_reset  = true,
+	.reset_gpio_port[0]  = GPIO_USB_NRESET,
+	.reset_gpio_port[1]  = -EINVAL,
+	.reset_gpio_port[2]  = -EINVAL
+};
+
+static void __init am3517_crane_init(void)
+{
+	int ret;
+
+	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+	omap_serial_init();
+
+	/* Configure GPIO for EHCI port */
+	if (omap_mux_init_gpio(GPIO_USB_NRESET, OMAP_PIN_OUTPUT)) {
+		pr_err("Can not configure mux for GPIO_USB_NRESET %d\n",
+			GPIO_USB_NRESET);
+		return;
+	}
+
+	if (omap_mux_init_gpio(GPIO_USB_POWER, OMAP_PIN_OUTPUT)) {
+		pr_err("Can not configure mux for GPIO_USB_POWER %d\n",
+			GPIO_USB_POWER);
+		return;
+	}
+
+	ret = gpio_request(GPIO_USB_POWER, "usb_ehci_enable");
+	if (ret < 0) {
+		pr_err("Can not request GPIO %d\n", GPIO_USB_POWER);
+		return;
+	}
+
+	ret = gpio_direction_output(GPIO_USB_POWER, 1);
+	if (ret < 0) {
+		gpio_free(GPIO_USB_POWER);
+		pr_err("Unable to initialize EHCI power\n");
+		return;
+	}
+
+	usb_ehci_init(&ehci_pdata);
+}
+
+MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
+	.boot_params	= 0x80000100,
+	.map_io		= omap3_map_io,
+	.reserve	= omap_reserve,
+	.init_irq	= am3517_crane_init_irq,
+	.init_machine	= am3517_crane_init,
+	.timer		= &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 0739950..10d60b7 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -35,6 +35,7 @@
 #include <plat/common.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include "mux.h"
 #include "control.h"
@@ -303,13 +304,18 @@
 	lcd_enabled = 0;
 }
 
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "sharp_lq",
+	.platform_enable	= am3517_evm_panel_enable_lcd,
+	.platform_disable	= am3517_evm_panel_disable_lcd,
+};
+
 static struct omap_dss_device am3517_evm_lcd_device = {
 	.type			= OMAP_DISPLAY_TYPE_DPI,
 	.name			= "lcd",
-	.driver_name		= "sharp_lq_panel",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &lcd_panel,
 	.phy.dpi.data_lines 	= 16,
-	.platform_enable	= am3517_evm_panel_enable_lcd,
-	.platform_disable	= am3517_evm_panel_disable_lcd,
 };
 
 static int am3517_evm_panel_enable_tv(struct omap_dss_device *dssdev)
@@ -346,13 +352,18 @@
 	dvi_enabled = 0;
 }
 
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
+	.platform_enable	= am3517_evm_panel_enable_dvi,
+	.platform_disable	= am3517_evm_panel_disable_dvi,
+};
+
 static struct omap_dss_device am3517_evm_dvi_device = {
 	.type			= OMAP_DISPLAY_TYPE_DPI,
 	.name			= "dvi",
-	.driver_name		= "generic_panel",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
 	.phy.dpi.data_lines	= 24,
-	.platform_enable	= am3517_evm_panel_enable_dvi,
-	.platform_disable	= am3517_evm_panel_disable_dvi,
 };
 
 static struct omap_dss_device *am3517_evm_dss_devices[] = {
@@ -389,10 +400,9 @@
 {
 	omap_board_config = am3517_evm_config;
 	omap_board_config_size = ARRAY_SIZE(am3517_evm_config);
-
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct omap_musb_board_data musb_board_data = {
@@ -442,8 +452,6 @@
 	OMAP3_MUX(SAD2D_MCAD23, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 2c6db1a..9f55b68 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -270,7 +270,7 @@
 	.ctrl_name	= "internal",
 };
 
-static struct omap_board_config_kernel apollon_config[] = {
+static struct omap_board_config_kernel apollon_config[] __initdata = {
 	{ OMAP_TAG_LCD,		&apollon_lcd_config },
 };
 
@@ -278,10 +278,9 @@
 {
 	omap_board_config = apollon_config;
 	omap_board_config_size = ARRAY_SIZE(apollon_config);
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
-	apollon_init_smc91x();
 }
 
 static void __init apollon_led_init(void)
@@ -314,8 +313,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static void __init omap_apollon_init(void)
@@ -324,6 +321,7 @@
 
 	omap2420_mux_init(board_mux, OMAP_PACKAGE_ZAC);
 
+	apollon_init_smc91x();
 	apollon_led_init();
 	apollon_flash_init();
 	apollon_usb_init();
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 63f764e..dac1416 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -46,6 +46,7 @@
 #include <plat/gpmc.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 #include <plat/mcspi.h>
 
 #include <mach/hardware.h>
@@ -351,24 +352,34 @@
 {
 }
 
-static struct omap_dss_device cm_t35_lcd_device = {
-	.name			= "lcd",
-	.driver_name		= "toppoly_tdo35s_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 18,
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "toppoly_tdo35s",
 	.platform_enable	= cm_t35_panel_enable_lcd,
 	.platform_disable	= cm_t35_panel_disable_lcd,
 };
 
-static struct omap_dss_device cm_t35_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
+static struct omap_dss_device cm_t35_lcd_device = {
+	.name			= "lcd",
 	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &lcd_panel,
+	.phy.dpi.data_lines	= 18,
+};
+
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= cm_t35_panel_enable_dvi,
 	.platform_disable	= cm_t35_panel_disable_dvi,
 };
 
+static struct omap_dss_device cm_t35_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device cm_t35_tv_device = {
 	.name			= "tv",
 	.driver_name		= "venc",
@@ -600,8 +611,8 @@
 	.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
 
 	.phy_reset  = true,
-	.reset_gpio_port[0]  = -EINVAL,
-	.reset_gpio_port[1]  = -EINVAL,
+	.reset_gpio_port[0]  = OMAP_MAX_GPIO_LINES + 6,
+	.reset_gpio_port[1]  = OMAP_MAX_GPIO_LINES + 7,
 	.reset_gpio_port[2]  = -EINVAL
 };
 
@@ -630,12 +641,6 @@
 	cm_t35_vmmc1_supply.dev = mmc[0].dev;
 	cm_t35_vsim_supply.dev = mmc[0].dev;
 
-	/* setup USB with proper PHY reset GPIOs */
-	ehci_pdata.reset_gpio_port[0] = gpio + 6;
-	ehci_pdata.reset_gpio_port[1] = gpio + 7;
-
-	usb_ehci_init(&ehci_pdata);
-
 	return 0;
 }
 
@@ -683,10 +688,10 @@
 	omap_board_config = cm_t35_config;
 	omap_board_config_size = ARRAY_SIZE(cm_t35_config);
 
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
 			     mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct omap_board_mux board_mux[] __initdata = {
@@ -805,6 +810,7 @@
 	cm_t35_init_display();
 
 	usb_musb_init(&musb_board_data);
+	usb_ehci_init(&ehci_pdata);
 }
 
 MACHINE_START(CM_T35, "Compulab CM-T35")
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 1dd303e..5b0c777 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -248,9 +248,9 @@
 	omap_board_config = cm_t3517_config;
 	omap_board_config_size = ARRAY_SIZE(cm_t3517_config);
 
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct omap_board_mux board_mux[] __initdata = {
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 53ac762..00bb1fc 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -46,6 +46,7 @@
 #include <plat/nand.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include <plat/mcspi.h>
 #include <linux/input/matrix_keypad.h>
@@ -118,27 +119,27 @@
 	twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0);
 
 	if (gpio_is_valid(dssdev->reset_gpio))
-		gpio_set_value(dssdev->reset_gpio, 1);
+		gpio_set_value_cansleep(dssdev->reset_gpio, 1);
 	return 0;
 }
 
 static void devkit8000_panel_disable_lcd(struct omap_dss_device *dssdev)
 {
 	if (gpio_is_valid(dssdev->reset_gpio))
-		gpio_set_value(dssdev->reset_gpio, 0);
+		gpio_set_value_cansleep(dssdev->reset_gpio, 0);
 }
 
 static int devkit8000_panel_enable_dvi(struct omap_dss_device *dssdev)
 {
 	if (gpio_is_valid(dssdev->reset_gpio))
-		gpio_set_value(dssdev->reset_gpio, 1);
+		gpio_set_value_cansleep(dssdev->reset_gpio, 1);
 	return 0;
 }
 
 static void devkit8000_panel_disable_dvi(struct omap_dss_device *dssdev)
 {
 	if (gpio_is_valid(dssdev->reset_gpio))
-		gpio_set_value(dssdev->reset_gpio, 0);
+		gpio_set_value_cansleep(dssdev->reset_gpio, 0);
 }
 
 static struct regulator_consumer_supply devkit8000_vmmc1_supply =
@@ -149,25 +150,34 @@
 static struct regulator_consumer_supply devkit8000_vio_supply =
 	REGULATOR_SUPPLY("vcc", "spi2.0");
 
-static struct omap_dss_device devkit8000_lcd_device = {
-	.name                   = "lcd",
-	.driver_name            = "generic_panel",
-	.type                   = OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines     = 24,
-	.reset_gpio             = -EINVAL, /* will be replaced */
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "generic",
 	.platform_enable        = devkit8000_panel_enable_lcd,
 	.platform_disable       = devkit8000_panel_disable_lcd,
 };
-static struct omap_dss_device devkit8000_dvi_device = {
-	.name                   = "dvi",
-	.driver_name            = "generic_panel",
+
+static struct omap_dss_device devkit8000_lcd_device = {
+	.name                   = "lcd",
 	.type                   = OMAP_DISPLAY_TYPE_DPI,
+	.driver_name            = "generic_dpi_panel",
+	.data			= &lcd_panel,
 	.phy.dpi.data_lines     = 24,
-	.reset_gpio             = -EINVAL, /* will be replaced */
+};
+
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable        = devkit8000_panel_enable_dvi,
 	.platform_disable       = devkit8000_panel_disable_dvi,
 };
 
+static struct omap_dss_device devkit8000_dvi_device = {
+	.name                   = "dvi",
+	.type                   = OMAP_DISPLAY_TYPE_DPI,
+	.driver_name            = "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines     = 24,
+};
+
 static struct omap_dss_device devkit8000_tv_device = {
 	.name                   = "tv",
 	.driver_name            = "venc",
@@ -444,13 +454,13 @@
 
 static void __init devkit8000_init_irq(void)
 {
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
-			     mt46h32m32lf6_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
+				  mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
 #ifdef CONFIG_OMAP_32K_TIMER
 	omap2_gp_clockevent_set_gptimer(12);
 #endif
-	omap_gpio_init();
 }
 
 static void __init devkit8000_ads7846_init(void)
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index b1c2c9a..0e3d81e 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -37,7 +37,8 @@
 {
 	omap_board_config = generic_config;
 	omap_board_config_size = ARRAY_SIZE(generic_config);
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
 }
 
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 929993b..25cc9da 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -51,38 +51,37 @@
 static unsigned int row_gpios[6] = { 88, 89, 124, 11, 6, 96 };
 static unsigned int col_gpios[7] = { 90, 91, 100, 36, 12, 97, 98 };
 
-static int h4_keymap[] = {
+static const unsigned int h4_keymap[] = {
 	KEY(0, 0, KEY_LEFT),
-	KEY(0, 1, KEY_RIGHT),
-	KEY(0, 2, KEY_A),
-	KEY(0, 3, KEY_B),
-	KEY(0, 4, KEY_C),
-	KEY(1, 0, KEY_DOWN),
+	KEY(1, 0, KEY_RIGHT),
+	KEY(2, 0, KEY_A),
+	KEY(3, 0, KEY_B),
+	KEY(4, 0, KEY_C),
+	KEY(0, 1, KEY_DOWN),
 	KEY(1, 1, KEY_UP),
-	KEY(1, 2, KEY_E),
-	KEY(1, 3, KEY_F),
-	KEY(1, 4, KEY_G),
-	KEY(2, 0, KEY_ENTER),
-	KEY(2, 1, KEY_I),
+	KEY(2, 1, KEY_E),
+	KEY(3, 1, KEY_F),
+	KEY(4, 1, KEY_G),
+	KEY(0, 2, KEY_ENTER),
+	KEY(1, 2, KEY_I),
 	KEY(2, 2, KEY_J),
-	KEY(2, 3, KEY_K),
-	KEY(2, 4, KEY_3),
-	KEY(3, 0, KEY_M),
-	KEY(3, 1, KEY_N),
-	KEY(3, 2, KEY_O),
+	KEY(3, 2, KEY_K),
+	KEY(4, 2, KEY_3),
+	KEY(0, 3, KEY_M),
+	KEY(1, 3, KEY_N),
+	KEY(2, 3, KEY_O),
 	KEY(3, 3, KEY_P),
-	KEY(3, 4, KEY_Q),
-	KEY(4, 0, KEY_R),
-	KEY(4, 1, KEY_4),
-	KEY(4, 2, KEY_T),
-	KEY(4, 3, KEY_U),
+	KEY(4, 3, KEY_Q),
+	KEY(0, 4, KEY_R),
+	KEY(1, 4, KEY_4),
+	KEY(2, 4, KEY_T),
+	KEY(3, 4, KEY_U),
 	KEY(4, 4, KEY_ENTER),
-	KEY(5, 0, KEY_V),
-	KEY(5, 1, KEY_W),
-	KEY(5, 2, KEY_L),
-	KEY(5, 3, KEY_S),
-	KEY(5, 4, KEY_ENTER),
-	0
+	KEY(0, 5, KEY_V),
+	KEY(1, 5, KEY_W),
+	KEY(2, 5, KEY_L),
+	KEY(3, 5, KEY_S),
+	KEY(4, 5, KEY_ENTER),
 };
 
 static struct mtd_partition h4_partitions[] = {
@@ -136,12 +135,16 @@
 	.resource	= &h4_flash_resource,
 };
 
+static const struct matrix_keymap_data h4_keymap_data = {
+	.keymap		= h4_keymap,
+	.keymap_size	= ARRAY_SIZE(h4_keymap),
+};
+
 static struct omap_kp_platform_data h4_kp_data = {
 	.rows		= 6,
 	.cols		= 7,
-	.keymap 	= h4_keymap,
-	.keymapsize 	= ARRAY_SIZE(h4_keymap),
-	.rep		= 1,
+	.keymap_data	= &h4_keymap_data,
+	.rep		= true,
 	.row_gpios 	= row_gpios,
 	.col_gpios 	= col_gpios,
 };
@@ -283,7 +286,7 @@
 	.hmc_mode	= 0x00,		/* 0:dev|otg 1:disable 2:disable */
 };
 
-static struct omap_board_config_kernel h4_config[] = {
+static struct omap_board_config_kernel h4_config[] __initdata = {
 	{ OMAP_TAG_LCD,		&h4_lcd_config },
 };
 
@@ -291,9 +294,9 @@
 {
 	omap_board_config = h4_config;
 	omap_board_config_size = ARRAY_SIZE(h4_config);
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 	h4_init_flash();
 }
 
@@ -321,8 +324,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static void __init omap_h4_init(void)
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 5e035a5..3be85a1 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -17,8 +17,10 @@
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
+#include <linux/input.h>
 
 #include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 #include <linux/i2c/twl.h>
 #include <linux/mmc/host.h>
 
@@ -30,6 +32,7 @@
 #include <plat/gpmc.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 #include <plat/onenand.h>
 
 #include "mux.h"
@@ -136,16 +139,9 @@
 	},
 };
 
-static int igep2_onenand_setup(void __iomem *onenand_base, int freq)
-{
-       /* nothing is required to be setup for onenand as of now */
-       return 0;
-}
-
 static struct omap_onenand_platform_data igep2_onenand_data = {
 	.parts = igep2_onenand_partitions,
 	.nr_parts = ARRAY_SIZE(igep2_onenand_partitions),
-	.onenand_setup = igep2_onenand_setup,
 	.dma_channel	= -1,	/* disable DMA in OMAP OneNAND driver */
 };
 
@@ -159,35 +155,34 @@
 
 static void __init igep2_flash_init(void)
 {
-	u8		cs = 0;
-	u8		onenandcs = GPMC_CS_NUM + 1;
+	u8 cs = 0;
+	u8 onenandcs = GPMC_CS_NUM + 1;
 
-	while (cs < GPMC_CS_NUM) {
-		u32 ret = 0;
+	for (cs = 0; cs < GPMC_CS_NUM; cs++) {
+		u32 ret;
 		ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
 
 		/* Check if NAND/oneNAND is configured */
 		if ((ret & 0xC00) == 0x800)
 			/* NAND found */
-			pr_err("IGEP v2: Unsupported NAND found\n");
+			pr_err("IGEP2: Unsupported NAND found\n");
 		else {
 			ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
 			if ((ret & 0x3F) == (ONENAND_MAP >> 24))
 				/* ONENAND found */
 				onenandcs = cs;
 		}
-		cs++;
 	}
+
 	if (onenandcs > GPMC_CS_NUM) {
-		pr_err("IGEP v2: Unable to find configuration in GPMC\n");
+		pr_err("IGEP2: Unable to find configuration in GPMC\n");
 		return;
 	}
 
-	if (onenandcs < GPMC_CS_NUM) {
-		igep2_onenand_data.cs = onenandcs;
-		if (platform_device_register(&igep2_onenand_device) < 0)
-			pr_err("IGEP v2: Unable to register OneNAND device\n");
-	}
+	igep2_onenand_data.cs = onenandcs;
+
+	if (platform_device_register(&igep2_onenand_device) < 0)
+		pr_err("IGEP2: Unable to register OneNAND device\n");
 }
 
 #else
@@ -254,12 +249,8 @@
 static inline void __init igep2_init_smsc911x(void) { }
 #endif
 
-static struct omap_board_config_kernel igep2_config[] __initdata = {
-};
-
-static struct regulator_consumer_supply igep2_vmmc1_supply = {
-	.supply		= "vmmc",
-};
+static struct regulator_consumer_supply igep2_vmmc1_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
 
 /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
 static struct regulator_init_data igep2_vmmc1 = {
@@ -276,6 +267,52 @@
 	.consumer_supplies      = &igep2_vmmc1_supply,
 };
 
+static struct regulator_consumer_supply igep2_vio_supply =
+	REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep2_vio = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV		= 1,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies  = 1,
+	.consumer_supplies      = &igep2_vio_supply,
+};
+
+static struct regulator_consumer_supply igep2_vmmc2_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep2_vmmc2 = {
+	.constraints		= {
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
+		.always_on		= 1,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &igep2_vmmc2_supply,
+};
+
+static struct fixed_voltage_config igep2_vwlan = {
+	.supply_name		= "vwlan",
+	.microvolts		= 3300000,
+	.gpio			= -EINVAL,
+	.enabled_at_boot	= 1,
+	.init_data		= &igep2_vmmc2,
+};
+
+static struct platform_device igep2_vwlan_device = {
+	.name		= "reg-fixed-voltage",
+	.id		= 0,
+	.dev = {
+		.platform_data	= &igep2_vwlan,
+	},
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	{
 		.mmc		= 1,
@@ -317,6 +354,7 @@
 		.name			= "gpio-led:green:d1",
 		.default_trigger	= "heartbeat",
 		.gpio			= -EINVAL, /* gets replaced */
+		.active_low		= 1,
 	},
 };
 
@@ -342,24 +380,21 @@
 static inline void igep2_leds_init(void)
 {
 	if ((gpio_request(IGEP2_GPIO_LED0_RED, "gpio-led:red:d0") == 0) &&
-	    (gpio_direction_output(IGEP2_GPIO_LED0_RED, 1) == 0)) {
+	    (gpio_direction_output(IGEP2_GPIO_LED0_RED, 0) == 0))
 		gpio_export(IGEP2_GPIO_LED0_RED, 0);
-		gpio_set_value(IGEP2_GPIO_LED0_RED, 0);
-	} else
+	else
 		pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n");
 
 	if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) &&
-	    (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 1) == 0)) {
+	    (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 0) == 0))
 		gpio_export(IGEP2_GPIO_LED0_GREEN, 0);
-		gpio_set_value(IGEP2_GPIO_LED0_GREEN, 0);
-	} else
+	else
 		pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n");
 
 	if ((gpio_request(IGEP2_GPIO_LED1_RED, "gpio-led:red:d1") == 0) &&
-	    (gpio_direction_output(IGEP2_GPIO_LED1_RED, 1) == 0)) {
+	    (gpio_direction_output(IGEP2_GPIO_LED1_RED, 0) == 0))
 		gpio_export(IGEP2_GPIO_LED1_RED, 0);
-		gpio_set_value(IGEP2_GPIO_LED1_RED, 0);
-	} else
+	else
 		pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n");
 
 }
@@ -373,12 +408,6 @@
 	omap2_hsmmc_init(mmc);
 
 	/*
-	 * link regulators to MMC adapters ... we "know" the
-	 * regulators will be set up only *after* we return.
-	 */
-	igep2_vmmc1_supply.dev = mmc[0].dev;
-
-	/*
 	 * REVISIT: need ehci-omap hooks for external VBUS
 	 * power switch and overcurrent detect
 	 */
@@ -397,10 +426,9 @@
 	/* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
 #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
 	if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
-	    && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) {
+	    && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0))
 		gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
-		gpio_set_value(gpio + TWL4030_GPIO_MAX + 1, 0);
-	} else
+	else
 		pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_GREEN\n");
 #else
 	igep2_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -433,13 +461,18 @@
 	gpio_direction_output(IGEP2_GPIO_DVI_PUP, 0);
 }
 
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
+	.platform_enable	= igep2_enable_dvi,
+	.platform_disable	= igep2_disable_dvi,
+};
+
 static struct omap_dss_device igep2_dvi_device = {
 	.type			= OMAP_DISPLAY_TYPE_DPI,
 	.name			= "dvi",
-	.driver_name		= "generic_panel",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
 	.phy.dpi.data_lines	= 24,
-	.platform_enable	= igep2_enable_dvi,
-	.platform_disable	= igep2_disable_dvi,
 };
 
 static struct omap_dss_device *igep2_dss_devices[] = {
@@ -489,15 +522,15 @@
 
 static struct platform_device *igep2_devices[] __initdata = {
 	&igep2_dss_device,
+	&igep2_vwlan_device,
 };
 
 static void __init igep2_init_irq(void)
 {
-	omap_board_config = igep2_config;
-	omap_board_config_size = ARRAY_SIZE(igep2_config);
-	omap2_init_common_hw(m65kxxxxam_sdrc_params, m65kxxxxam_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(m65kxxxxam_sdrc_params,
+				  m65kxxxxam_sdrc_params);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct twl4030_codec_audio_data igep2_audio_data = {
@@ -509,6 +542,37 @@
 	.audio = &igep2_audio_data,
 };
 
+static int igep2_keymap[] = {
+	KEY(0, 0, KEY_LEFT),
+	KEY(0, 1, KEY_RIGHT),
+	KEY(0, 2, KEY_A),
+	KEY(0, 3, KEY_B),
+	KEY(1, 0, KEY_DOWN),
+	KEY(1, 1, KEY_UP),
+	KEY(1, 2, KEY_E),
+	KEY(1, 3, KEY_F),
+	KEY(2, 0, KEY_ENTER),
+	KEY(2, 1, KEY_I),
+	KEY(2, 2, KEY_J),
+	KEY(2, 3, KEY_K),
+	KEY(3, 0, KEY_M),
+	KEY(3, 1, KEY_N),
+	KEY(3, 2, KEY_O),
+	KEY(3, 3, KEY_P)
+};
+
+static struct matrix_keymap_data igep2_keymap_data = {
+	.keymap			= igep2_keymap,
+	.keymap_size		= ARRAY_SIZE(igep2_keymap),
+};
+
+static struct twl4030_keypad_data igep2_keypad_pdata = {
+	.keymap_data	= &igep2_keymap_data,
+	.rows		= 4,
+	.cols		= 4,
+	.rep		= 1,
+};
+
 static struct twl4030_platform_data igep2_twldata = {
 	.irq_base	= TWL4030_IRQ_BASE,
 	.irq_end	= TWL4030_IRQ_END,
@@ -517,9 +581,10 @@
 	.usb		= &igep2_usb_data,
 	.codec		= &igep2_codec_data,
 	.gpio		= &igep2_twl4030_gpio_pdata,
+	.keypad		= &igep2_keypad_pdata,
 	.vmmc1          = &igep2_vmmc1,
 	.vpll2		= &igep2_vpll2,
-
+	.vio		= &igep2_vio,
 };
 
 static struct i2c_board_info __initdata igep2_i2c1_boardinfo[] = {
@@ -577,8 +642,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c
index 22b0b25..4dc62a9 100644
--- a/arch/arm/mach-omap2/board-igep0030.c
+++ b/arch/arm/mach-omap2/board-igep0030.c
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 
 #include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 #include <linux/i2c/twl.h>
 #include <linux/mmc/host.h>
 
@@ -43,7 +44,7 @@
 #define IGEP3_GPIO_WIFI_NRESET	139
 #define IGEP3_GPIO_BT_NRESET	137
 
-#define IGEP3_GPIO_USBH_NRESET  115
+#define IGEP3_GPIO_USBH_NRESET  183
 
 
 #if defined(CONFIG_MTD_ONENAND_OMAP2) || \
@@ -103,7 +104,7 @@
 	},
 };
 
-void __init igep3_flash_init(void)
+static void __init igep3_flash_init(void)
 {
 	u8 cs = 0;
 	u8 onenandcs = GPMC_CS_NUM + 1;
@@ -137,12 +138,11 @@
 }
 
 #else
-void __init igep3_flash_init(void) {}
+static void __init igep3_flash_init(void) {}
 #endif
 
-static struct regulator_consumer_supply igep3_vmmc1_supply = {
-	.supply		= "vmmc",
-};
+static struct regulator_consumer_supply igep3_vmmc1_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.0");
 
 /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
 static struct regulator_init_data igep3_vmmc1 = {
@@ -159,6 +159,52 @@
 	.consumer_supplies      = &igep3_vmmc1_supply,
 };
 
+static struct regulator_consumer_supply igep3_vio_supply =
+	REGULATOR_SUPPLY("vmmc_aux", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep3_vio = {
+	.constraints = {
+		.min_uV			= 1800000,
+		.max_uV			= 1800000,
+		.apply_uV		= 1,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
+					| REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &igep3_vio_supply,
+};
+
+static struct regulator_consumer_supply igep3_vmmc2_supply =
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1");
+
+static struct regulator_init_data igep3_vmmc2 = {
+	.constraints	= {
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
+		.always_on		= 1,
+	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= &igep3_vmmc2_supply,
+};
+
+static struct fixed_voltage_config igep3_vwlan = {
+	.supply_name		= "vwlan",
+	.microvolts		= 3300000,
+	.gpio			= -EINVAL,
+	.enabled_at_boot	= 1,
+	.init_data		= &igep3_vmmc2,
+};
+
+static struct platform_device igep3_vwlan_device = {
+	.name	= "reg-fixed-voltage",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &igep3_vwlan,
+	},
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	[0] = {
 		.mmc		= 1,
@@ -254,12 +300,6 @@
 	mmc[0].gpio_cd = gpio + 0;
 	omap2_hsmmc_init(mmc);
 
-	/*
-	 * link regulators to MMC adapters ... we "know" the
-	 * regulators will be set up only *after* we return.
-	 */
-	igep3_vmmc1_supply.dev = mmc[0].dev;
-
 	/* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
 #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
 	if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
@@ -287,11 +327,16 @@
 	.usb_mode	= T2_USB_MODE_ULPI,
 };
 
+static struct platform_device *igep3_devices[] __initdata = {
+	&igep3_vwlan_device,
+};
+
 static void __init igep3_init_irq(void)
 {
-	omap2_init_common_hw(m65kxxxxam_sdrc_params, m65kxxxxam_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(m65kxxxxam_sdrc_params,
+				  m65kxxxxam_sdrc_params);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct twl4030_platform_data igep3_twl4030_pdata = {
@@ -302,6 +347,7 @@
 	.usb		= &igep3_twl4030_usb_data,
 	.gpio		= &igep3_twl4030_gpio_pdata,
 	.vmmc1		= &igep3_vmmc1,
+	.vio		= &igep3_vio,
 };
 
 static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = {
@@ -362,12 +408,22 @@
 void __init igep3_wifi_bt_init(void) {}
 #endif
 
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+	.port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+	.phy_reset = true,
+	.reset_gpio_port[0] = -EINVAL,
+	.reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET,
+	.reset_gpio_port[2] = -EINVAL,
+};
+
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
+	OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static void __init igep3_init(void)
@@ -376,9 +432,10 @@
 
 	/* Register I2C busses and drivers */
 	igep3_i2c_init();
-
+	platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices));
 	omap_serial_init();
 	usb_musb_init(&musb_board_data);
+	usb_ehci_init(&ehci_pdata);
 
 	igep3_flash_init();
 	igep3_leds_init();
@@ -393,6 +450,7 @@
 
 MACHINE_START(IGEP0030, "IGEP OMAP3 module")
 	.boot_params	= 0x80000100,
+	.reserve	= omap_reserve,
 	.map_io		= omap3_map_io,
 	.init_irq	= igep3_init_irq,
 	.init_machine	= igep3_init,
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 001fd97..e5dc748 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -292,10 +292,9 @@
 {
 	omap_board_config = ldp_config;
 	omap_board_config_size = ARRAY_SIZE(ldp_config);
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
-	ldp_init_smsc911x();
 }
 
 static struct twl4030_usb_data ldp_usb_data = {
@@ -381,8 +380,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static struct omap_musb_board_data musb_board_data = {
@@ -426,6 +423,7 @@
 static void __init omap_ldp_init(void)
 {
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+	ldp_init_smsc911x();
 	omap_i2c_init();
 	platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices));
 	ts_gpio = 54;
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index e823c70..f396756 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -46,8 +46,7 @@
 #define TUSB6010_GPIO_ENABLE	0
 #define TUSB6010_DMACHAN	0x3f
 
-#if defined(CONFIG_USB_TUSB6010) || \
-	defined(CONFIG_USB_TUSB6010_MODULE)
+#ifdef CONFIG_USB_MUSB_TUSB6010
 /*
  * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
  * 1.5 V voltage regulators of PM companion chip. Companion chip will then
@@ -134,7 +133,7 @@
 
 static void __init n8x0_usb_init(void) {}
 
-#endif /*CONFIG_USB_TUSB6010 */
+#endif /*CONFIG_USB_MUSB_TUSB6010 */
 
 
 static struct omap2_mcspi_device_config p54spi_mcspi_config = {
@@ -184,23 +183,15 @@
 	},
 };
 
-static struct omap_onenand_platform_data board_onenand_data = {
-	.cs		= 0,
-	.gpio_irq	= 26,
-	.parts		= onenand_partitions,
-	.nr_parts	= ARRAY_SIZE(onenand_partitions),
-	.flags		= ONENAND_SYNC_READ,
+static struct omap_onenand_platform_data board_onenand_data[] = {
+	{
+		.cs		= 0,
+		.gpio_irq	= 26,
+		.parts		= onenand_partitions,
+		.nr_parts	= ARRAY_SIZE(onenand_partitions),
+		.flags		= ONENAND_SYNC_READ,
+	}
 };
-
-static void __init n8x0_onenand_init(void)
-{
-	gpmc_onenand_init(&board_onenand_data);
-}
-
-#else
-
-static void __init n8x0_onenand_init(void) {}
-
 #endif
 
 #if defined(CONFIG_MENELAUS) &&						\
@@ -639,9 +630,9 @@
 
 static void __init n8x0_init_irq(void)
 {
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 #ifdef CONFIG_OMAP_MUX
@@ -653,8 +644,43 @@
 	OMAP2420_MUX(EAC_AC_DOUT, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
+
+static struct omap_device_pad serial2_pads[] __initdata = {
+	{
+		.name	= "uart3_rx_irrx.uart3_rx_irrx",
+		.flags	= OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP,
+		.enable	= OMAP_MUX_MODE0,
+		.idle	= OMAP_MUX_MODE3	/* Mux as GPIO for idle */
+	},
+};
+
+static inline void board_serial_init(void)
+{
+	struct omap_board_data bdata;
+
+	bdata.flags = 0;
+	bdata.pads = NULL;
+	bdata.pads_cnt = 0;
+
+	bdata.id = 0;
+	omap_serial_init_port(&bdata);
+
+	bdata.id = 1;
+	omap_serial_init_port(&bdata);
+
+	bdata.id = 2;
+	bdata.pads = serial2_pads;
+	bdata.pads_cnt = ARRAY_SIZE(serial2_pads);
+	omap_serial_init_port(&bdata);
+}
+
 #else
-#define board_mux	NULL
+
+static inline void board_serial_init(void)
+{
+	omap_serial_init();
+}
+
 #endif
 
 static void __init n8x0_init_machine(void)
@@ -669,9 +695,8 @@
 	if (machine_is_nokia_n810())
 		i2c_register_board_info(2, n810_i2c_board_info_2,
 					ARRAY_SIZE(n810_i2c_board_info_2));
-
-	omap_serial_init();
-	n8x0_onenand_init();
+	board_serial_init();
+	gpmc_onenand_init(board_onenand_data);
 	n8x0_mmc_init();
 	n8x0_usb_init();
 }
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 14f4224..46d814a 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -41,6 +41,7 @@
 #include <plat/board.h>
 #include <plat/common.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 #include <plat/gpmc.h>
 #include <plat/nand.h>
 #include <plat/usb.h>
@@ -194,14 +195,19 @@
 		gpio_set_value(dssdev->reset_gpio, 0);
 }
 
+static struct panel_generic_dpi_data dvi_panel = {
+	.name = "generic",
+	.platform_enable = beagle_enable_dvi,
+	.platform_disable = beagle_disable_dvi,
+};
+
 static struct omap_dss_device beagle_dvi_device = {
 	.type = OMAP_DISPLAY_TYPE_DPI,
 	.name = "dvi",
-	.driver_name = "generic_panel",
+	.driver_name = "generic_dpi_panel",
+	.data = &dvi_panel,
 	.phy.dpi.data_lines = 24,
-	.reset_gpio = 170,
-	.platform_enable = beagle_enable_dvi,
-	.platform_disable = beagle_disable_dvi,
+	.reset_gpio = -EINVAL,
 };
 
 static struct omap_dss_device beagle_tv_device = {
@@ -273,6 +279,8 @@
 static int beagle_twl_gpio_setup(struct device *dev,
 		unsigned gpio, unsigned ngpio)
 {
+	int r;
+
 	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
 		mmc[0].gpio_wp = -EINVAL;
 	} else if ((omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C1_3) ||
@@ -293,17 +301,63 @@
 	/* REVISIT: need ehci-omap hooks for external VBUS
 	 * power switch and overcurrent detect
 	 */
+	if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM) {
+		r = gpio_request(gpio + 1, "EHCI_nOC");
+		if (!r) {
+			r = gpio_direction_input(gpio + 1);
+			if (r)
+				gpio_free(gpio + 1);
+		}
+		if (r)
+			pr_err("%s: unable to configure EHCI_nOC\n", __func__);
+	}
 
-	gpio_request(gpio + 1, "EHCI_nOC");
-	gpio_direction_input(gpio + 1);
-
-	/* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
+	/*
+	 * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
+	 * high / others active low)
+	 */
 	gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
-	gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
+		gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
+	else
+		gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+
+	/* DVI reset GPIO is different between beagle revisions */
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
+		beagle_dvi_device.reset_gpio = 129;
+	else
+		beagle_dvi_device.reset_gpio = 170;
 
 	/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
 	gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
 
+	/*
+	 * gpio + 1 on Xm controls the TFP410's enable line (active low)
+	 * gpio + 2 control varies depending on the board rev as follows:
+	 * P7/P8 revisions(prototype): Camera EN
+	 * A2+ revisions (production): LDO (supplies DVI, serial, led blocks)
+	 */
+	if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
+		r = gpio_request(gpio + 1, "nDVI_PWR_EN");
+		if (!r) {
+			r = gpio_direction_output(gpio + 1, 0);
+			if (r)
+				gpio_free(gpio + 1);
+		}
+		if (r)
+			pr_err("%s: unable to configure nDVI_PWR_EN\n",
+				__func__);
+		r = gpio_request(gpio + 2, "DVI_LDO_EN");
+		if (!r) {
+			r = gpio_direction_output(gpio + 2, 1);
+			if (r)
+				gpio_free(gpio + 2);
+		}
+		if (r)
+			pr_err("%s: unable to configure DVI_LDO_EN\n",
+				__func__);
+	}
+
 	return 0;
 }
 
@@ -484,13 +538,13 @@
 
 static void __init omap3_beagle_init_irq(void)
 {
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
-			     mt46h32m32lf6_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
+				  mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
 #ifdef CONFIG_OMAP_32K_TIMER
 	omap2_gp_clockevent_set_gptimer(12);
 #endif
-	omap_gpio_init();
 }
 
 static struct platform_device *omap3_beagle_devices[] __initdata = {
@@ -548,8 +602,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static struct omap_musb_board_data musb_board_data = {
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index b04365c..323c380 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -43,6 +43,7 @@
 #include <plat/common.h>
 #include <plat/mcspi.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include "mux.h"
 #include "sdram-micron-mt46h32m32lf-6.h"
@@ -301,15 +302,20 @@
 	dvi_enabled = 0;
 }
 
-static struct omap_dss_device omap3_evm_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= omap3_evm_enable_dvi,
 	.platform_disable	= omap3_evm_disable_dvi,
 };
 
+static struct omap_dss_device omap3_evm_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device *omap3_evm_dss_devices[] = {
 	&omap3_evm_lcd_device,
 	&omap3_evm_tv_device,
@@ -623,9 +629,9 @@
 {
 	omap_board_config = omap3_evm_config;
 	omap_board_config_size = ARRAY_SIZE(omap3_evm_config);
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct platform_device *omap3_evm_devices[] __initdata = {
@@ -654,8 +660,6 @@
 				OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW),
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static struct omap_musb_board_data musb_board_data = {
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 5f7d2c1..15e4b08 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -197,17 +197,15 @@
 
 static void __init omap3logic_init_irq(void)
 {
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux       NULL
 #endif
 
 static void __init omap3logic_init(void)
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 8be2615..0b34bed 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -293,7 +293,7 @@
 	},
 	{
 		.mmc		= 3,
-		.caps		= MMC_CAP_4_BIT_DATA,
+		.caps		= MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
 		.gpio_cd	= -EINVAL,
 		.gpio_wp	= -EINVAL,
 		.init_card	= pandora_wl1251_init_card,
@@ -636,10 +636,10 @@
 
 static void __init omap3pandora_init_irq(void)
 {
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
-			     mt46h32m32lf6_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
+				  mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static void __init pandora_wl1251_init(void)
@@ -697,8 +697,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static struct omap_musb_board_data musb_board_data = {
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index f252721..2a2dad4 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -40,6 +40,7 @@
 #include <plat/nand.h>
 #include <plat/usb.h>
 #include <plat/display.h>
+#include <plat/panel-generic-dpi.h>
 
 #include <plat/mcspi.h>
 #include <linux/input/matrix_keypad.h>
@@ -160,15 +161,20 @@
 	lcd_enabled = 0;
 }
 
-static struct omap_dss_device omap3_stalker_lcd_device = {
-	.name			= "lcd",
-	.driver_name		= "generic_panel",
-	.phy.dpi.data_lines	= 24,
-	.type			= OMAP_DISPLAY_TYPE_DPI,
+static struct panel_generic_dpi_data lcd_panel = {
+	.name			= "generic",
 	.platform_enable	= omap3_stalker_enable_lcd,
 	.platform_disable	= omap3_stalker_disable_lcd,
 };
 
+static struct omap_dss_device omap3_stalker_lcd_device = {
+	.name			= "lcd",
+	.driver_name		= "generic_dpi_panel",
+	.data			= &lcd_panel,
+	.phy.dpi.data_lines	= 24,
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+};
+
 static int omap3_stalker_enable_tv(struct omap_dss_device *dssdev)
 {
 	return 0;
@@ -208,15 +214,20 @@
 	dvi_enabled = 0;
 }
 
-static struct omap_dss_device omap3_stalker_dvi_device = {
-	.name			= "dvi",
-	.driver_name		= "generic_panel",
-	.type			= OMAP_DISPLAY_TYPE_DPI,
-	.phy.dpi.data_lines	= 24,
+static struct panel_generic_dpi_data dvi_panel = {
+	.name			= "generic",
 	.platform_enable	= omap3_stalker_enable_dvi,
 	.platform_disable	= omap3_stalker_disable_dvi,
 };
 
+static struct omap_dss_device omap3_stalker_dvi_device = {
+	.name			= "dvi",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.driver_name		= "generic_dpi_panel",
+	.data			= &dvi_panel,
+	.phy.dpi.data_lines	= 24,
+};
+
 static struct omap_dss_device *omap3_stalker_dss_devices[] = {
 	&omap3_stalker_lcd_device,
 	&omap3_stalker_tv_device,
@@ -584,12 +595,12 @@
 {
 	omap_board_config = omap3_stalker_config;
 	omap_board_config_size = ARRAY_SIZE(omap3_stalker_config);
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params, NULL);
 	omap_init_irq();
 #ifdef CONFIG_OMAP_32K_TIMER
 	omap2_gp_clockevent_set_gptimer(12);
 #endif
-	omap_gpio_init();
 }
 
 static struct platform_device *omap3_stalker_devices[] __initdata = {
@@ -616,8 +627,6 @@
 		  OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
 	{.reg_offset = OMAP_MUX_TERMINATOR},
 };
-#else
-#define board_mux	NULL
 #endif
 
 static struct omap_musb_board_data musb_board_data = {
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 41104bb..db1f74f 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -413,8 +413,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static void __init omap3_touchbook_init_irq(void)
@@ -422,13 +420,13 @@
 	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 	omap_board_config = omap3_touchbook_config;
 	omap_board_config_size = ARRAY_SIZE(omap3_touchbook_config);
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
-			     mt46h32m32lf6_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
+				  mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
 #ifdef CONFIG_OMAP_32K_TIMER
 	omap2_gp_clockevent_set_gptimer(12);
 #endif
-	omap_gpio_init();
 }
 
 static struct platform_device *omap3_touchbook_devices[] __initdata = {
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 1ecd0a6..e001a04 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/leds.h>
 #include <linux/gpio.h>
@@ -40,6 +41,7 @@
 
 #include "hsmmc.h"
 #include "control.h"
+#include "mux.h"
 
 #define GPIO_HUB_POWER		1
 #define GPIO_HUB_NRESET		62
@@ -76,9 +78,9 @@
 
 static void __init omap4_panda_init_irq(void)
 {
-	omap2_init_common_hw(NULL, NULL);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(NULL, NULL);
 	gic_init_irq();
-	omap_gpio_init();
 }
 
 static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
@@ -94,7 +96,16 @@
 static void __init omap4_ehci_init(void)
 {
 	int ret;
+	struct clk *phy_ref_clk;
 
+	/* FREF_CLK3 provides the 19.2 MHz reference clock to the PHY */
+	phy_ref_clk = clk_get(NULL, "auxclk3_ck");
+	if (IS_ERR(phy_ref_clk)) {
+		pr_err("Cannot request auxclk3\n");
+		goto error1;
+	}
+	clk_set_rate(phy_ref_clk, 19200000);
+	clk_enable(phy_ref_clk);
 
 	/* disable the power to the usb hub prior to init */
 	ret = gpio_request(GPIO_HUB_POWER, "hub_power");
@@ -133,15 +144,23 @@
 
 static struct omap_musb_board_data musb_board_data = {
 	.interface_type		= MUSB_INTERFACE_UTMI,
-	.mode			= MUSB_PERIPHERAL,
+	.mode			= MUSB_OTG,
 	.power			= 100,
 };
 
+static struct twl4030_usb_data omap4_usbphy_data = {
+	.phy_init	= omap4430_phy_init,
+	.phy_exit	= omap4430_phy_exit,
+	.phy_power	= omap4430_phy_power,
+	.phy_set_clock	= omap4430_phy_set_clk,
+};
+
 static struct omap2_hsmmc_info mmc[] = {
 	{
 		.mmc		= 1,
 		.caps		= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
 		.gpio_wp	= -EINVAL,
+		.gpio_cd	= -EINVAL,
 	},
 	{}	/* Terminator */
 };
@@ -345,6 +364,7 @@
 	.vaux1		= &omap4_panda_vaux1,
 	.vaux2		= &omap4_panda_vaux2,
 	.vaux3		= &omap4_panda_vaux3,
+	.usb		= &omap4_usbphy_data,
 };
 
 static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = {
@@ -368,8 +388,23 @@
 	omap_register_i2c_bus(4, 400, NULL, 0);
 	return 0;
 }
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux	NULL
+#endif
+
 static void __init omap4_panda_init(void)
 {
+	int package = OMAP_PACKAGE_CBS;
+
+	if (omap_rev() == OMAP4430_REV_ES1_0)
+		package = OMAP_PACKAGE_CBL;
+	omap4_mux_init(board_mux, package);
+
 	omap4_panda_i2c_init();
 	platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
 	omap_serial_init();
@@ -377,9 +412,7 @@
 	/* OMAP4 Panda uses internal transceiver so register nop transceiver */
 	usb_nop_xceiv_register();
 	omap4_ehci_init();
-	/* FIXME: allow multi-omap to boot until musb is updated for omap4 */
-	if (!cpu_is_omap44xx())
-		usb_musb_init(&musb_board_data);
+	usb_musb_init(&musb_board_data);
 }
 
 static void __init omap4_panda_map_io(void)
@@ -391,6 +424,7 @@
 MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
 	/* Maintainer: David Anders - Texas Instruments Inc */
 	.boot_params	= 0x80000100,
+	.reserve	= omap_reserve,
 	.map_io		= omap4_panda_map_io,
 	.init_irq	= omap4_panda_init_irq,
 	.init_machine	= omap4_panda_init,
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 7053bc0..cb26e5d 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -413,10 +413,10 @@
 {
 	omap_board_config = overo_config;
 	omap_board_config_size = ARRAY_SIZE(overo_config);
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
-			     mt46h32m32lf6_sdrc_params);
+	omap2_init_common_infrastructure();
+	omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
+				  mt46h32m32lf6_sdrc_params);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 static struct platform_device *overo_devices[] __initdata = {
@@ -438,8 +438,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static struct omap_musb_board_data musb_board_data = {
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
new file mode 100644
index 0000000..cb77be7
--- /dev/null
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -0,0 +1,187 @@
+/*
+ * Board support file for Nokia RM-680.
+ *
+ * Copyright (C) 2010 Nokia
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/i2c/twl.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach-types.h>
+
+#include <plat/i2c.h>
+#include <plat/mmc.h>
+#include <plat/usb.h>
+#include <plat/gpmc.h>
+#include <plat/common.h>
+#include <plat/onenand.h>
+
+#include "mux.h"
+#include "hsmmc.h"
+#include "sdram-nokia.h"
+
+static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
+	REGULATOR_SUPPLY("vmmc", "mmci-omap-hs.1"),
+};
+
+/* Fixed regulator for internal eMMC */
+static struct regulator_init_data rm680_vemmc = {
+	.constraints =	{
+		.name			= "rm680_vemmc",
+		.min_uV			= 2900000,
+		.max_uV			= 2900000,
+		.apply_uV		= 1,
+		.valid_modes_mask	= REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
+					| REGULATOR_CHANGE_MODE,
+	},
+	.num_consumer_supplies		= ARRAY_SIZE(rm680_vemmc_consumers),
+	.consumer_supplies		= rm680_vemmc_consumers,
+};
+
+static struct fixed_voltage_config rm680_vemmc_config = {
+	.supply_name		= "VEMMC",
+	.microvolts		= 2900000,
+	.gpio			= 157,
+	.startup_delay		= 150,
+	.enable_high		= 1,
+	.init_data		= &rm680_vemmc,
+};
+
+static struct platform_device rm680_vemmc_device = {
+	.name			= "reg-fixed-voltage",
+	.dev			= {
+		.platform_data	= &rm680_vemmc_config,
+	},
+};
+
+static struct platform_device *rm680_peripherals_devices[] __initdata = {
+	&rm680_vemmc_device,
+};
+
+/* TWL */
+static struct twl4030_gpio_platform_data rm680_gpio_data = {
+	.gpio_base		= OMAP_MAX_GPIO_LINES,
+	.irq_base		= TWL4030_GPIO_IRQ_BASE,
+	.irq_end		= TWL4030_GPIO_IRQ_END,
+	.pullups		= BIT(0),
+	.pulldowns		= BIT(1) | BIT(2) | BIT(8) | BIT(15),
+};
+
+static struct twl4030_usb_data rm680_usb_data = {
+	.usb_mode		= T2_USB_MODE_ULPI,
+};
+
+static struct twl4030_platform_data rm680_twl_data = {
+	.irq_base		= TWL4030_IRQ_BASE,
+	.irq_end		= TWL4030_IRQ_END,
+	.gpio			= &rm680_gpio_data,
+	.usb			= &rm680_usb_data,
+	/* add rest of the children here */
+};
+
+static struct i2c_board_info __initdata rm680_twl_i2c_board_info[] = {
+	{
+		I2C_BOARD_INFO("twl5031", 0x48),
+		.flags		= I2C_CLIENT_WAKE,
+		.irq		= INT_34XX_SYS_NIRQ,
+		.platform_data	= &rm680_twl_data,
+	},
+};
+
+static void __init rm680_i2c_init(void)
+{
+	omap_register_i2c_bus(1, 2900, rm680_twl_i2c_board_info,
+				ARRAY_SIZE(rm680_twl_i2c_board_info));
+	omap_register_i2c_bus(2, 400, NULL, 0);
+	omap_register_i2c_bus(3, 400, NULL, 0);
+}
+
+#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
+	defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
+static struct omap_onenand_platform_data board_onenand_data[] = {
+	{
+		.gpio_irq	= 65,
+		.flags		= ONENAND_SYNC_READWRITE,
+	}
+};
+#endif
+
+/* eMMC */
+static struct omap2_hsmmc_info mmc[] __initdata = {
+	{
+		.name		= "internal",
+		.mmc		= 2,
+		.caps		= MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED,
+		.gpio_cd	= -EINVAL,
+		.gpio_wp	= -EINVAL,
+	},
+	{ /* Terminator */ }
+};
+
+static void __init rm680_peripherals_init(void)
+{
+	platform_add_devices(rm680_peripherals_devices,
+				ARRAY_SIZE(rm680_peripherals_devices));
+	rm680_i2c_init();
+	gpmc_onenand_init(board_onenand_data);
+	omap2_hsmmc_init(mmc);
+}
+
+static void __init rm680_init_irq(void)
+{
+	struct omap_sdrc_params *sdrc_params;
+
+	omap2_init_common_infrastructure();
+	sdrc_params = nokia_get_sdram_timings();
+	omap2_init_common_devices(sdrc_params, sdrc_params);
+	omap_init_irq();
+}
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#endif
+
+static struct omap_musb_board_data rm680_musb_data = {
+	.interface_type	= MUSB_INTERFACE_ULPI,
+	.mode		= MUSB_PERIPHERAL,
+	.power		= 100,
+};
+
+static void __init rm680_init(void)
+{
+	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+	omap_serial_init();
+	usb_musb_init(&rm680_musb_data);
+	rm680_peripherals_init();
+}
+
+static void __init rm680_map_io(void)
+{
+	omap2_set_globals_3xxx();
+	omap34xx_map_common_io();
+}
+
+MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
+	.boot_params	= 0x80000100,
+	.map_io		= rm680_map_io,
+	.reserve	= omap_reserve,
+	.init_irq	= rm680_init_irq,
+	.init_machine	= rm680_init,
+	.timer		= &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 3fec4d6..e75e240 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -23,7 +23,6 @@
 #include <linux/gpio.h>
 #include <linux/gpio_keys.h>
 #include <linux/mmc/host.h>
-#include <sound/tlv320aic3x.h>
 
 #include <plat/mcspi.h>
 #include <plat/board.h>
@@ -293,6 +292,8 @@
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 
+static struct omap_mux_partition *partition;
+
 /*
  * Current flows to eMMC when eMMC is off and the data lines are pulled up,
  * so pull them down. N.B. we pull 8 lines because we are using 8 lines.
@@ -300,9 +301,9 @@
 static void rx51_mmc2_remux(struct device *dev, int slot, int power_on)
 {
 	if (power_on)
-		omap_mux_write_array(rx51_mmc2_on_mux);
+		omap_mux_write_array(partition, rx51_mmc2_on_mux);
 	else
-		omap_mux_write_array(rx51_mmc2_off_mux);
+		omap_mux_write_array(partition, rx51_mmc2_off_mux);
 }
 
 static struct omap2_hsmmc_info mmc[] __initdata = {
@@ -342,6 +343,8 @@
 	/* tlv320aic3x analog supplies */
 	REGULATOR_SUPPLY("AVDD", "2-0018"),
 	REGULATOR_SUPPLY("DRVDD", "2-0018"),
+	REGULATOR_SUPPLY("AVDD", "2-0019"),
+	REGULATOR_SUPPLY("DRVDD", "2-0019"),
 	/* tpa6130a2 */
 	REGULATOR_SUPPLY("Vdd", "2-0060"),
 	/* Keep vmmc as last item. It is not iterated for newer boards */
@@ -352,19 +355,16 @@
 	/* tlv320aic3x digital supplies */
 	REGULATOR_SUPPLY("IOVDD", "2-0018"),
 	REGULATOR_SUPPLY("DVDD", "2-0018"),
+	REGULATOR_SUPPLY("IOVDD", "2-0019"),
+	REGULATOR_SUPPLY("DVDD", "2-0019"),
 };
 
-#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
-extern struct platform_device rx51_display_device;
-#endif
-
 static struct regulator_consumer_supply rx51_vaux1_consumers[] = {
-#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
-	{
-		.supply	= "vdds_sdi",
-		.dev	= &rx51_display_device.dev,
-	},
-#endif
+	REGULATOR_SUPPLY("vdds_sdi", "omapdss"),
+};
+
+static struct regulator_consumer_supply rx51_vdac_supply[] = {
+	REGULATOR_SUPPLY("vdda_dac", "omapdss"),
 };
 
 static struct regulator_init_data rx51_vaux1 = {
@@ -484,14 +484,17 @@
 
 static struct regulator_init_data rx51_vdac = {
 	.constraints = {
+		.name			= "VDAC",
 		.min_uV			= 1800000,
 		.max_uV			= 1800000,
+		.apply_uV		= true,
 		.valid_modes_mask	= REGULATOR_MODE_NORMAL
 					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
+		.valid_ops_mask		= REGULATOR_CHANGE_MODE
 					| REGULATOR_CHANGE_STATUS,
 	},
+	.num_consumer_supplies	= 1,
+	.consumer_supplies	= rx51_vdac_supply,
 };
 
 static struct regulator_init_data rx51_vio = {
@@ -717,7 +720,7 @@
 	.vio			= &rx51_vio,
 };
 
-static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata = {
+static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata_or_module = {
 	.id			= TPA6130A2,
 	.power_gpio		= 98,
 };
@@ -742,11 +745,19 @@
 	.gpio_reset = 60,
 };
 
+static struct aic3x_pdata rx51_aic3x_data2 = {
+	.gpio_reset = 60,
+};
+
 static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_2[] = {
 	{
 		I2C_BOARD_INFO("tlv320aic3x", 0x18),
 		.platform_data = &rx51_aic3x_data,
 	},
+	{
+		I2C_BOARD_INFO("tlv320aic3x", 0x19),
+		.platform_data = &rx51_aic3x_data2,
+	},
 #if defined(CONFIG_SENSORS_TSL2563) || defined(CONFIG_SENSORS_TSL2563_MODULE)
 	{
 		I2C_BOARD_INFO("tsl2563", 0x29),
@@ -815,25 +826,15 @@
 	},
 };
 
-static struct omap_onenand_platform_data board_onenand_data = {
-	.cs		= 0,
-	.gpio_irq	= 65,
-	.parts		= onenand_partitions,
-	.nr_parts	= ARRAY_SIZE(onenand_partitions),
-	.flags		= ONENAND_SYNC_READWRITE,
+static struct omap_onenand_platform_data board_onenand_data[] = {
+	{
+		.cs		= 0,
+		.gpio_irq	= 65,
+		.parts		= onenand_partitions,
+		.nr_parts	= ARRAY_SIZE(onenand_partitions),
+		.flags		= ONENAND_SYNC_READWRITE,
+	}
 };
-
-static void __init board_onenand_init(void)
-{
-	gpmc_onenand_init(&board_onenand_data);
-}
-
-#else
-
-static inline void board_onenand_init(void)
-{
-}
-
 #endif
 
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
@@ -916,13 +917,17 @@
 void __init rx51_peripherals_init(void)
 {
 	rx51_i2c_init();
-	board_onenand_init();
+	gpmc_onenand_init(board_onenand_data);
 	board_smc91x_init();
 	rx51_add_gpio_keys();
 	rx51_init_wl1251();
 	spi_register_board_info(rx51_peripherals_spi_board_info,
 				ARRAY_SIZE(rx51_peripherals_spi_board_info));
-	omap2_hsmmc_init(mmc);
+
+	partition = omap_mux_get("core");
+	if (partition)
+		omap2_hsmmc_init(mmc);
+
 	platform_device_register(&rx51_charger_device);
 }
 
diff --git a/arch/arm/mach-omap2/board-rx51-sdram.c b/arch/arm/mach-omap2/board-rx51-sdram.c
deleted file mode 100644
index a43b2c5..0000000
--- a/arch/arm/mach-omap2/board-rx51-sdram.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * SDRC register values for RX51
- *
- * Copyright (C) 2008 Nokia Corporation
- *
- * Lauri Leukkunen <lauri.leukkunen@nokia.com>
- *
- * Original code by Juha Yrjola <juha.yrjola@solidboot.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <plat/io.h>
-#include <plat/common.h>
-#include <plat/clock.h>
-#include <plat/sdrc.h>
-
-
-/* In picoseconds, except for tREF (ns), tXP, tCKE, tWTR (clks) */
-struct sdram_timings {
-	u32 casl;
-	u32 tDAL;
-	u32 tDPL;
-	u32 tRRD;
-	u32 tRCD;
-	u32 tRP;
-	u32 tRAS;
-	u32 tRC;
-	u32 tRFC;
-	u32 tXSR;
-
-	u32 tREF; /* in ns */
-
-	u32 tXP;
-	u32 tCKE;
-	u32 tWTR;
-};
-
-static struct omap_sdrc_params rx51_sdrc_params[4];
-
-static const struct sdram_timings rx51_timings[] = {
-	{
-		.casl = 3,
-		.tDAL = 33000,
-		.tDPL = 15000,
-		.tRRD = 12000,
-		.tRCD = 22500,
-		.tRP = 18000,
-		.tRAS = 42000,
-		.tRC = 66000,
-		.tRFC = 138000,
-		.tXSR = 200000,
-
-		.tREF = 7800,
-
-		.tXP = 2,
-		.tCKE = 2,
-		.tWTR = 2
-	},
-};
-
-static unsigned long sdrc_get_fclk_period(long rate)
-{
-	/* In picoseconds */
-	return 1000000000 / rate;
-}
-
-static unsigned int sdrc_ps_to_ticks(unsigned int time_ps, long rate)
-{
-	unsigned long tick_ps;
-
-	/* Calculate in picosecs to yield more exact results */
-	tick_ps = sdrc_get_fclk_period(rate);
-
-	return (time_ps + tick_ps - 1) / tick_ps;
-}
-#undef DEBUG
-#ifdef DEBUG
-static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
-				int ticks, long rate, const char *name)
-#else
-static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
-			       int ticks)
-#endif
-{
-	int mask, nr_bits;
-
-	nr_bits = end_bit - st_bit + 1;
-	if (ticks >= 1 << nr_bits)
-		return -1;
-	mask = (1 << nr_bits) - 1;
-	*regval &= ~(mask << st_bit);
-	*regval |= ticks << st_bit;
-#ifdef DEBUG
-	printk(KERN_INFO "SDRC %s: %i ticks %i ns\n", name, ticks,
-			(unsigned int)sdrc_get_fclk_period(rate) * ticks /
-			1000);
-#endif
-
-	return 0;
-}
-
-#ifdef DEBUG
-#define SDRC_SET_ONE(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval((reg), (st), (end), \
-			rx51_timings->field, (rate), #field) < 0) \
-		err = -1;
-#else
-#define SDRC_SET_ONE(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval((reg), (st), (end), \
-			rx51_timings->field) < 0) \
-		err = -1;
-#endif
-
-#ifdef DEBUG
-static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
-				int time, long rate, const char *name)
-#else
-static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
-				int time, long rate)
-#endif
-{
-	int ticks, ret;
-	ret = 0;
-
-	if (time == 0)
-		ticks = 0;
-	else
-		ticks = sdrc_ps_to_ticks(time, rate);
-
-#ifdef DEBUG
-	ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks,
-				     rate, name);
-#else
-	ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks);
-#endif
-
-	return ret;
-}
-
-#ifdef DEBUG
-#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval_ps((reg), (st), (end), \
-			rx51_timings->field, \
-			(rate), #field) < 0) \
-		err = -1;
-
-#else
-#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval_ps((reg), (st), (end), \
-			rx51_timings->field, (rate)) < 0) \
-		err = -1;
-#endif
-
-static int sdrc_timings(int id, long rate)
-{
-	u32 ticks_per_ms;
-	u32 rfr, l;
-	u32 actim_ctrla = 0, actim_ctrlb = 0;
-	u32 rfr_ctrl;
-	int err = 0;
-	long l3_rate = rate / 1000;
-
-	SDRC_SET_ONE_PS(&actim_ctrla,  0,  4, tDAL, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla,  6,  8, tDPL, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla,  9, 11, tRRD, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 12, 14, tRCD, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 15, 17, tRP, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 18, 21, tRAS, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 22, 26, tRC, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 27, 31, tRFC, l3_rate);
-
-	SDRC_SET_ONE_PS(&actim_ctrlb,  0,  7, tXSR, l3_rate);
-
-	SDRC_SET_ONE(&actim_ctrlb,  8, 10, tXP, l3_rate);
-	SDRC_SET_ONE(&actim_ctrlb, 12, 14, tCKE, l3_rate);
-	SDRC_SET_ONE(&actim_ctrlb, 16, 17, tWTR, l3_rate);
-
-	ticks_per_ms = l3_rate;
-	rfr = rx51_timings[0].tREF * ticks_per_ms / 1000000;
-	if (rfr > 65535 + 50)
-		rfr = 65535;
-	else
-		rfr -= 50;
-
-#ifdef DEBUG
-	printk(KERN_INFO "SDRC tREF: %i ticks\n", rfr);
-#endif
-
-	l = rfr << 8;
-	rfr_ctrl = l | 0x1; /* autorefresh, reload counter with 1xARCV */
-
-	rx51_sdrc_params[id].rate = rate;
-	rx51_sdrc_params[id].actim_ctrla = actim_ctrla;
-	rx51_sdrc_params[id].actim_ctrlb = actim_ctrlb;
-	rx51_sdrc_params[id].rfr_ctrl = rfr_ctrl;
-	rx51_sdrc_params[id].mr = 0x32;
-
-	rx51_sdrc_params[id + 1].rate = 0;
-
-	return err;
-}
-
-struct omap_sdrc_params *rx51_get_sdram_timings(void)
-{
-	int err;
-
-	err = sdrc_timings(0, 41500000);
-	err |= sdrc_timings(1, 83000000);
-	err |= sdrc_timings(2, 166000000);
-
-	return &rx51_sdrc_params[0];
-}
-
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index 85503fe..acd6700 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -14,7 +14,6 @@
 #include <linux/gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/mm.h>
-
 #include <asm/mach-types.h>
 #include <plat/display.h>
 #include <plat/vram.h>
@@ -49,8 +48,16 @@
 	.platform_disable	= rx51_lcd_disable,
 };
 
+static struct omap_dss_device  rx51_tv_device = {
+	.name			= "tv",
+	.type			= OMAP_DISPLAY_TYPE_VENC,
+	.driver_name		= "venc",
+	.phy.venc.type	        = OMAP_DSS_VENC_TYPE_COMPOSITE,
+};
+
 static struct omap_dss_device *rx51_dss_devices[] = {
 	&rx51_lcd_device,
+	&rx51_tv_device,
 };
 
 static struct omap_dss_board_info rx51_dss_board_info = {
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 36f2cf4..f53fc55 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -32,10 +32,10 @@
 
 #include "mux.h"
 #include "pm.h"
+#include "sdram-nokia.h"
 
 #define RX51_GPIO_SLEEP_IND 162
 
-struct omap_sdrc_params *rx51_get_sdram_timings(void);
 extern void rx51_video_mem_init(void);
 
 static struct gpio_led gpio_leds[] = {
@@ -105,10 +105,10 @@
 	omap_board_config = rx51_config;
 	omap_board_config_size = ARRAY_SIZE(rx51_config);
 	omap3_pm_init_cpuidle(rx51_cpuidle_params);
-	sdrc_params = rx51_get_sdram_timings();
-	omap2_init_common_hw(sdrc_params, sdrc_params);
+	omap2_init_common_infrastructure();
+	sdrc_params = nokia_get_sdram_timings();
+	omap2_init_common_devices(sdrc_params, sdrc_params);
 	omap_init_irq();
-	omap_gpio_init();
 }
 
 extern void __init rx51_peripherals_init(void);
@@ -117,8 +117,6 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
-#else
-#define board_mux	NULL
 #endif
 
 static struct omap_musb_board_data musb_board_data = {
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
new file mode 100644
index 0000000..6bcd436
--- /dev/null
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc.
+ *
+ * Modified from mach-omap2/board-zoom-peripherals.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl.h>
+#include <linux/spi/spi.h>
+#include <plat/mcspi.h>
+#include <plat/display.h>
+
+#define LCD_PANEL_RESET_GPIO_PROD	96
+#define LCD_PANEL_RESET_GPIO_PILOT	55
+#define LCD_PANEL_QVGA_GPIO		56
+
+static void zoom_lcd_panel_init(void)
+{
+	int ret;
+	unsigned char lcd_panel_reset_gpio;
+
+	lcd_panel_reset_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
+			LCD_PANEL_RESET_GPIO_PROD :
+			LCD_PANEL_RESET_GPIO_PILOT;
+
+	ret = gpio_request(lcd_panel_reset_gpio, "lcd reset");
+	if (ret) {
+		pr_err("Failed to get LCD reset GPIO (gpio%d).\n",
+			lcd_panel_reset_gpio);
+		return;
+	}
+	gpio_direction_output(lcd_panel_reset_gpio, 1);
+
+	ret = gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga");
+	if (ret) {
+		pr_err("Failed to get LCD_PANEL_QVGA_GPIO (gpio%d).\n",
+			LCD_PANEL_QVGA_GPIO);
+		goto err0;
+	}
+	gpio_direction_output(LCD_PANEL_QVGA_GPIO, 1);
+
+	return;
+err0:
+	gpio_free(lcd_panel_reset_gpio);
+}
+
+static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev)
+{
+	return 0;
+}
+
+static void zoom_panel_disable_lcd(struct omap_dss_device *dssdev)
+{
+}
+
+/*
+ * PWMA/B register offsets (TWL4030_MODULE_PWMA)
+ */
+#define TWL_INTBR_PMBR1	0xD
+#define TWL_INTBR_GPBR1	0xC
+#define TWL_LED_PWMON	0x0
+#define TWL_LED_PWMOFF	0x1
+
+static int zoom_set_bl_intensity(struct omap_dss_device *dssdev, int level)
+{
+	unsigned char c;
+	u8 mux_pwm, enb_pwm;
+
+	if (level > 100)
+		return -1;
+
+	twl_i2c_read_u8(TWL4030_MODULE_INTBR, &mux_pwm, TWL_INTBR_PMBR1);
+	twl_i2c_read_u8(TWL4030_MODULE_INTBR, &enb_pwm, TWL_INTBR_GPBR1);
+
+	if (level == 0) {
+		/* disable pwm1 output and clock */
+		enb_pwm = enb_pwm & 0xF5;
+		/* change pwm1 pin to gpio pin */
+		mux_pwm = mux_pwm & 0xCF;
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					enb_pwm, TWL_INTBR_GPBR1);
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					mux_pwm, TWL_INTBR_PMBR1);
+		return 0;
+	}
+
+	if (!((enb_pwm & 0xA) && (mux_pwm & 0x30))) {
+		/* change gpio pin to pwm1 pin */
+		mux_pwm = mux_pwm | 0x30;
+		/* enable pwm1 output and clock*/
+		enb_pwm = enb_pwm | 0x0A;
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					mux_pwm, TWL_INTBR_PMBR1);
+		twl_i2c_write_u8(TWL4030_MODULE_INTBR,
+					enb_pwm, TWL_INTBR_GPBR1);
+	}
+
+	c = ((50 * (100 - level)) / 100) + 1;
+	twl_i2c_write_u8(TWL4030_MODULE_PWM1, 0x7F, TWL_LED_PWMOFF);
+	twl_i2c_write_u8(TWL4030_MODULE_PWM1, c, TWL_LED_PWMON);
+
+	return 0;
+}
+
+static struct omap_dss_device zoom_lcd_device = {
+	.name			= "lcd",
+	.driver_name		= "NEC_8048_panel",
+	.type			= OMAP_DISPLAY_TYPE_DPI,
+	.phy.dpi.data_lines	= 24,
+	.platform_enable	= zoom_panel_enable_lcd,
+	.platform_disable	= zoom_panel_disable_lcd,
+	.max_backlight_level	= 100,
+	.set_backlight		= zoom_set_bl_intensity,
+};
+
+static struct omap_dss_device *zoom_dss_devices[] = {
+	&zoom_lcd_device,
+};
+
+static struct omap_dss_board_info zoom_dss_data = {
+	.num_devices		= ARRAY_SIZE(zoom_dss_devices),
+	.devices		= zoom_dss_devices,
+	.default_device		= &zoom_lcd_device,
+};
+
+static struct platform_device zoom_dss_device = {
+	.name				= "omapdss",
+	.id				= -1,
+	.dev				= {
+		.platform_data		= &zoom_dss_data,
+	},
+};
+
+static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
+	.turbo_mode		= 1,
+	.single_channel	= 1,  /* 0: slave, 1: master */
+};
+
+static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
+	[0] = {
+		.modalias		= "nec_8048_spi",
+		.bus_num		= 1,
+		.chip_select		= 2,
+		.max_speed_hz		= 375000,
+		.controller_data	= &dss_lcd_mcspi_config,
+	},
+};
+
+static struct platform_device *zoom_display_devices[] __initdata = {
+	&zoom_dss_device,
+};
+
+void __init zoom_display_init(void)
+{
+	platform_add_devices(zoom_display_devices,
+				ARRAY_SIZE(zoom_display_devices));
+	spi_register_board_info(nec_8048_spi_board_info,
+				ARRAY_SIZE(nec_8048_spi_board_info));
+	zoom_lcd_panel_init();
+}
+
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 9db9203..e0e040f 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -35,6 +35,8 @@
 #define OMAP_ZOOM_WLAN_PMENA_GPIO	(101)
 #define OMAP_ZOOM_WLAN_IRQ_GPIO		(162)
 
+#define LCD_PANEL_ENABLE_GPIO		(7 + OMAP_MAX_GPIO_LINES)
+
 /* Zoom2 has Qwerty keyboard*/
 static uint32_t board_keymap[] = {
 	KEY(0, 0, KEY_E),
@@ -190,13 +192,13 @@
 	},
 };
 
-struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
+static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
 	.irq = OMAP_GPIO_IRQ(OMAP_ZOOM_WLAN_IRQ_GPIO),
 	/* ZOOM ref clock is 26 MHz */
 	.board_ref_clock = 1,
 };
 
-static struct omap2_hsmmc_info mmc[] __initdata = {
+static struct omap2_hsmmc_info mmc[] = {
 	{
 		.name		= "external",
 		.mmc		= 1,
@@ -224,9 +226,43 @@
 	{}      /* Terminator */
 };
 
+static struct regulator_consumer_supply zoom_vpll2_supply =
+	REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+
+static struct regulator_consumer_supply zoom_vdda_dac_supply =
+	REGULATOR_SUPPLY("vdda_dac", "omapdss");
+
+static struct regulator_init_data zoom_vpll2 = {
+	.constraints = {
+		.min_uV                 = 1800000,
+		.max_uV                 = 1800000,
+		.valid_modes_mask       = REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask         = REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies		= 1,
+	.consumer_supplies		= &zoom_vpll2_supply,
+};
+
+static struct regulator_init_data zoom_vdac = {
+	.constraints = {
+		.min_uV                 = 1800000,
+		.max_uV                 = 1800000,
+		.valid_modes_mask       = REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+		.valid_ops_mask         = REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies		= 1,
+	.consumer_supplies		= &zoom_vdda_dac_supply,
+};
+
 static int zoom_twl_gpio_setup(struct device *dev,
 		unsigned gpio, unsigned ngpio)
 {
+	int ret;
+
 	/* gpio + 0 is "mmc0_cd" (input/IRQ) */
 	mmc[0].gpio_cd = gpio + 0;
 	omap2_hsmmc_init(mmc);
@@ -238,11 +274,19 @@
 	zoom_vsim_supply.dev = mmc[0].dev;
 	zoom_vmmc2_supply.dev = mmc[1].dev;
 
-	return 0;
+	ret = gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd enable");
+	if (ret) {
+		pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n",
+				LCD_PANEL_ENABLE_GPIO);
+		return ret;
+	}
+	gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
+
+	return ret;
 }
 
 /* EXTMUTE callback function */
-void zoom2_set_hs_extmute(int mute)
+static void zoom2_set_hs_extmute(int mute)
 {
 	gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
 }
@@ -301,7 +345,8 @@
 	.vmmc1          = &zoom_vmmc1,
 	.vmmc2          = &zoom_vmmc2,
 	.vsim           = &zoom_vsim,
-
+	.vpll2		= &zoom_vpll2,
+	.vdac		= &zoom_vdac,
 };
 
 static struct i2c_board_info __initdata zoom_i2c_boardinfo[] = {
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
new file mode 100644
index 0000000..e26754c
--- /dev/null
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2009-2010 Texas Instruments Inc.
+ * Mikkel Christensen <mlc@ti.com>
+ * Felipe Balbi <balbi@ti.com>
+ *
+ * Modified from mach-omap2/board-ldp.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/i2c/twl.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <plat/common.h>
+#include <plat/board.h>
+#include <plat/usb.h>
+
+#include <mach/board-zoom.h>
+
+#include "board-flash.h"
+#include "mux.h"
+#include "sdram-micron-mt46h32m32lf-6.h"
+#include "sdram-hynix-h8mbx00u0mer-0em.h"
+
+#define ZOOM3_EHCI_RESET_GPIO		64
+
+static void __init omap_zoom_init_irq(void)
+{
+	omap2_init_common_infrastructure();
+	if (machine_is_omap_zoom2())
+		omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
+					  mt46h32m32lf6_sdrc_params);
+	else if (machine_is_omap_zoom3())
+		omap2_init_common_devices(h8mbx00u0mer0em_sdrc_params,
+					  h8mbx00u0mer0em_sdrc_params);
+
+	omap_init_irq();
+}
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+	/* WLAN IRQ - GPIO 162 */
+	OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
+	/* WLAN POWER ENABLE - GPIO 101 */
+	OMAP3_MUX(CAM_D2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+	/* WLAN SDIO: MMC3 CMD */
+	OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT_PULLUP),
+	/* WLAN SDIO: MMC3 CLK */
+	OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
+	/* WLAN SDIO: MMC3 DAT[0-3] */
+	OMAP3_MUX(ETK_D3, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
+	OMAP3_MUX(ETK_D4, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
+	OMAP3_MUX(ETK_D5, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
+	OMAP3_MUX(ETK_D6, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#endif
+
+static struct mtd_partition zoom_nand_partitions[] = {
+	/* All the partition sizes are listed in terms of NAND block size */
+	{
+		.name		= "X-Loader-NAND",
+		.offset		= 0,
+		.size		= 4 * (64 * 2048),	/* 512KB, 0x80000 */
+		.mask_flags	= MTD_WRITEABLE,	/* force read-only */
+	},
+	{
+		.name		= "U-Boot-NAND",
+		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x80000 */
+		.size		= 10 * (64 * 2048),	/* 1.25MB, 0x140000 */
+		.mask_flags	= MTD_WRITEABLE,	/* force read-only */
+	},
+	{
+		.name		= "Boot Env-NAND",
+		.offset		= MTDPART_OFS_APPEND,   /* Offset = 0x1c0000 */
+		.size		= 2 * (64 * 2048),	/* 256KB, 0x40000 */
+	},
+	{
+		.name		= "Kernel-NAND",
+		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x0200000*/
+		.size		= 240 * (64 * 2048),	/* 30M, 0x1E00000 */
+	},
+	{
+		.name		= "system",
+		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x2000000 */
+		.size		= 3328 * (64 * 2048),	/* 416M, 0x1A000000 */
+	},
+	{
+		.name		= "userdata",
+		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x1C000000*/
+		.size		= 256 * (64 * 2048),	/* 32M, 0x2000000 */
+	},
+	{
+		.name		= "cache",
+		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x1E000000*/
+		.size		= 256 * (64 * 2048),	/* 32M, 0x2000000 */
+	},
+};
+
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+	.port_mode[0]		= EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.port_mode[1]		= EHCI_HCD_OMAP_MODE_PHY,
+	.port_mode[2]		= EHCI_HCD_OMAP_MODE_UNKNOWN,
+	.phy_reset		= true,
+	.reset_gpio_port[0]	= -EINVAL,
+	.reset_gpio_port[1]	= ZOOM3_EHCI_RESET_GPIO,
+	.reset_gpio_port[2]	= -EINVAL,
+};
+
+static void __init omap_zoom_init(void)
+{
+	if (machine_is_omap_zoom2()) {
+		omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+	} else if (machine_is_omap_zoom3()) {
+		omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
+		omap_mux_init_gpio(ZOOM3_EHCI_RESET_GPIO, OMAP_PIN_OUTPUT);
+		usb_ehci_init(&ehci_pdata);
+	}
+
+	board_nand_init(zoom_nand_partitions,
+			ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS);
+	zoom_debugboard_init();
+	zoom_peripherals_init();
+	zoom_display_init();
+}
+
+MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
+	.boot_params	= 0x80000100,
+	.map_io		= omap3_map_io,
+	.reserve	= omap_reserve,
+	.init_irq	= omap_zoom_init_irq,
+	.init_machine	= omap_zoom_init,
+	.timer		= &omap_timer,
+MACHINE_END
+
+MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
+	.boot_params	= 0x80000100,
+	.map_io		= omap3_map_io,
+	.reserve	= omap_reserve,
+	.init_irq	= omap_zoom_init_irq,
+	.init_machine	= omap_zoom_init,
+	.timer		= &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
deleted file mode 100644
index 2992a9f..0000000
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2009 Texas Instruments Inc.
- * Mikkel Christensen <mlc@ti.com>
- *
- * Modified from mach-omap2/board-ldp.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/gpio.h>
-#include <linux/i2c/twl.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include <plat/common.h>
-#include <plat/board.h>
-
-#include <mach/board-zoom.h>
-
-#include "board-flash.h"
-#include "mux.h"
-#include "sdram-micron-mt46h32m32lf-6.h"
-
-static void __init omap_zoom2_init_irq(void)
-{
-	omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
-				 mt46h32m32lf6_sdrc_params);
-	omap_init_irq();
-	omap_gpio_init();
-}
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
-	/* WLAN IRQ - GPIO 162 */
-	OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
-	/* WLAN POWER ENABLE - GPIO 101 */
-	OMAP3_MUX(CAM_D2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-	/* WLAN SDIO: MMC3 CMD */
-	OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT_PULLUP),
-	/* WLAN SDIO: MMC3 CLK */
-	OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	/* WLAN SDIO: MMC3 DAT[0-3] */
-	OMAP3_MUX(ETK_D3, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	OMAP3_MUX(ETK_D4, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	OMAP3_MUX(ETK_D5, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	OMAP3_MUX(ETK_D6, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define board_mux	NULL
-#endif
-
-static struct mtd_partition zoom_nand_partitions[] = {
-	/* All the partition sizes are listed in terms of NAND block size */
-	{
-		.name		= "X-Loader-NAND",
-		.offset		= 0,
-		.size		= 4 * (64 * 2048),	/* 512KB, 0x80000 */
-		.mask_flags	= MTD_WRITEABLE,	/* force read-only */
-	},
-	{
-		.name		= "U-Boot-NAND",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x80000 */
-		.size		= 10 * (64 * 2048),	/* 1.25MB, 0x140000 */
-		.mask_flags	= MTD_WRITEABLE,	/* force read-only */
-	},
-	{
-		.name		= "Boot Env-NAND",
-		.offset		= MTDPART_OFS_APPEND,   /* Offset = 0x1c0000 */
-		.size		= 2 * (64 * 2048),	/* 256KB, 0x40000 */
-	},
-	{
-		.name		= "Kernel-NAND",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x0200000*/
-		.size		= 240 * (64 * 2048),	/* 30M, 0x1E00000 */
-	},
-	{
-		.name		= "system",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x2000000 */
-		.size		= 3328 * (64 * 2048),	/* 416M, 0x1A000000 */
-	},
-	{
-		.name		= "userdata",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x1C000000*/
-		.size		= 256 * (64 * 2048),	/* 32M, 0x2000000 */
-	},
-	{
-		.name		= "cache",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x1E000000*/
-		.size		= 256 * (64 * 2048),	/* 32M, 0x2000000 */
-	},
-};
-
-static void __init omap_zoom2_init(void)
-{
-	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-	zoom_peripherals_init();
-	board_nand_init(zoom_nand_partitions,
-			ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS);
-	zoom_debugboard_init();
-}
-
-MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
-	.boot_params	= 0x80000100,
-	.map_io		= omap3_map_io,
-	.reserve	= omap_reserve,
-	.init_irq	= omap_zoom2_init_irq,
-	.init_machine	= omap_zoom2_init,
-	.timer		= &omap_timer,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
deleted file mode 100644
index 5adde12..0000000
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (C) 2009 Texas Instruments Inc.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/gpio.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include <mach/board-zoom.h>
-
-#include <plat/common.h>
-#include <plat/board.h>
-#include <plat/usb.h>
-
-#include "board-flash.h"
-#include "mux.h"
-#include "sdram-hynix-h8mbx00u0mer-0em.h"
-
-static struct omap_board_config_kernel zoom_config[] __initdata = {
-};
-
-static struct mtd_partition zoom_nand_partitions[] = {
-	/* All the partition sizes are listed in terms of NAND block size */
-	{
-		.name		= "X-Loader-NAND",
-		.offset		= 0,
-		.size		= 4 * (64 * 2048),	/* 512KB, 0x80000 */
-		.mask_flags	= MTD_WRITEABLE,	/* force read-only */
-	},
-	{
-		.name		= "U-Boot-NAND",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x80000 */
-		.size		= 10 * (64 * 2048),	/* 1.25MB, 0x140000 */
-		.mask_flags	= MTD_WRITEABLE,	/* force read-only */
-	},
-	{
-		.name		= "Boot Env-NAND",
-		.offset		= MTDPART_OFS_APPEND,   /* Offset = 0x1c0000 */
-		.size		= 2 * (64 * 2048),	/* 256KB, 0x40000 */
-	},
-	{
-		.name		= "Kernel-NAND",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x0200000*/
-		.size		= 240 * (64 * 2048),	/* 30M, 0x1E00000 */
-	},
-	{
-		.name		= "system",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x2000000 */
-		.size		= 3328 * (64 * 2048),	/* 416M, 0x1A000000 */
-	},
-	{
-		.name		= "userdata",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x1C000000*/
-		.size		= 256 * (64 * 2048),	/* 32M, 0x2000000 */
-	},
-	{
-		.name		= "cache",
-		.offset		= MTDPART_OFS_APPEND,	/* Offset = 0x1E000000*/
-		.size		= 256 * (64 * 2048),	/* 32M, 0x2000000 */
-	},
-};
-
-static void __init omap_zoom_init_irq(void)
-{
-	omap_board_config = zoom_config;
-	omap_board_config_size = ARRAY_SIZE(zoom_config);
-	omap2_init_common_hw(h8mbx00u0mer0em_sdrc_params,
-			h8mbx00u0mer0em_sdrc_params);
-	omap_init_irq();
-	omap_gpio_init();
-}
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
-	/* WLAN IRQ - GPIO 162 */
-	OMAP3_MUX(MCBSP1_CLKX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
-	/* WLAN POWER ENABLE - GPIO 101 */
-	OMAP3_MUX(CAM_D2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-	/* WLAN SDIO: MMC3 CMD */
-	OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE3 | OMAP_PIN_INPUT_PULLUP),
-	/* WLAN SDIO: MMC3 CLK */
-	OMAP3_MUX(ETK_CLK, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	/* WLAN SDIO: MMC3 DAT[0-3] */
-	OMAP3_MUX(ETK_D3, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	OMAP3_MUX(ETK_D4, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	OMAP3_MUX(ETK_D5, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	OMAP3_MUX(ETK_D6, OMAP_MUX_MODE2 | OMAP_PIN_INPUT_PULLUP),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define board_mux	NULL
-#endif
-
-static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
-	.port_mode[0]		= EHCI_HCD_OMAP_MODE_UNKNOWN,
-	.port_mode[1]		= EHCI_HCD_OMAP_MODE_PHY,
-	.port_mode[2]		= EHCI_HCD_OMAP_MODE_UNKNOWN,
-	.phy_reset		= true,
-	.reset_gpio_port[0]	= -EINVAL,
-	.reset_gpio_port[1]	= 64,
-	.reset_gpio_port[2]	= -EINVAL,
-};
-
-static void __init omap_zoom_init(void)
-{
-	omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
-	zoom_peripherals_init();
-	board_nand_init(zoom_nand_partitions,
-			 ARRAY_SIZE(zoom_nand_partitions), ZOOM_NAND_CS);
-	zoom_debugboard_init();
-
-	omap_mux_init_gpio(64, OMAP_PIN_OUTPUT);
-	usb_ehci_init(&ehci_pdata);
-}
-
-MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
-	.boot_params	= 0x80000100,
-	.map_io		= omap3_map_io,
-	.reserve	= omap_reserve,
-	.init_irq	= omap_zoom_init_irq,
-	.init_machine	= omap_zoom_init,
-	.timer		= &omap_timer,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/clkt2xxx_apll.c b/arch/arm/mach-omap2/clkt2xxx_apll.c
index 66e01ac..f51cffd 100644
--- a/arch/arm/mach-omap2/clkt2xxx_apll.c
+++ b/arch/arm/mach-omap2/clkt2xxx_apll.c
@@ -26,7 +26,7 @@
 
 #include "clock.h"
 #include "clock2xxx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-24xx.h"
 
 /* CM_CLKEN_PLL.EN_{54,96}M_PLL options (24XX) */
@@ -49,14 +49,14 @@
 
 	apll_mask = EN_APLL_LOCKED << clk->enable_bit;
 
-	cval = cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+	cval = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
 
 	if ((cval & apll_mask) == apll_mask)
 		return 0;   /* apll already enabled */
 
 	cval &= ~apll_mask;
 	cval |= apll_mask;
-	cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
+	omap2_cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
 
 	omap2_cm_wait_idlest(cm_idlest_pll, status_mask,
 			     OMAP24XX_CM_IDLEST_VAL, clk->name);
@@ -83,9 +83,9 @@
 {
 	u32 cval;
 
-	cval = cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+	cval = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
 	cval &= ~(EN_APLL_LOCKED << clk->enable_bit);
-	cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
+	omap2_cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
 }
 
 /* Public data */
@@ -106,7 +106,7 @@
 {
 	u32 aplls, srate = 0;
 
-	aplls = cm_read_mod_reg(PLL_MOD, CM_CLKSEL1);
+	aplls = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL1);
 	aplls &= OMAP24XX_APLLS_CLKIN_MASK;
 	aplls >>= OMAP24XX_APLLS_CLKIN_SHIFT;
 
diff --git a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
index 01904843..4ae4392 100644
--- a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
+++ b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
@@ -32,7 +32,7 @@
 #include "clock.h"
 #include "clock2xxx.h"
 #include "opp2xxx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-24xx.h"
 
 /* #define DOWN_VARIABLE_DPLL 1 */		/* Experimental */
@@ -54,7 +54,7 @@
 
 	core_clk = omap2_get_dpll_rate(clk);
 
-	v = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+	v = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
 	v &= OMAP24XX_CORE_CLK_SRC_MASK;
 
 	if (v == CORE_CLK_SRC_32K)
@@ -73,7 +73,7 @@
 {
 	u32 high, low, core_clk_src;
 
-	core_clk_src = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+	core_clk_src = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
 	core_clk_src &= OMAP24XX_CORE_CLK_SRC_MASK;
 
 	if (core_clk_src == CORE_CLK_SRC_DPLL) {	/* DPLL clockout */
@@ -111,7 +111,7 @@
 	const struct dpll_data *dd;
 
 	cur_rate = omap2xxx_clk_get_core_rate(dclk);
-	mult = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+	mult = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
 	mult &= OMAP24XX_CORE_CLK_SRC_MASK;
 
 	if ((rate == (cur_rate / 2)) && (mult == 2)) {
@@ -136,7 +136,7 @@
 		tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
 					   dd->div1_mask);
 		div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
-		tmpset.cm_clksel2_pll = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+		tmpset.cm_clksel2_pll = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
 		tmpset.cm_clksel2_pll &= ~OMAP24XX_CORE_CLK_SRC_MASK;
 		if (rate > low) {
 			tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL_X2;
diff --git a/arch/arm/mach-omap2/clkt2xxx_osc.c b/arch/arm/mach-omap2/clkt2xxx_osc.c
index 2167be8..df7b805 100644
--- a/arch/arm/mach-omap2/clkt2xxx_osc.c
+++ b/arch/arm/mach-omap2/clkt2xxx_osc.c
@@ -27,7 +27,7 @@
 
 #include "clock.h"
 #include "clock2xxx.h"
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
 
 static int omap2_enable_osc_ck(struct clk *clk)
diff --git a/arch/arm/mach-omap2/clkt2xxx_sys.c b/arch/arm/mach-omap2/clkt2xxx_sys.c
index 822b5a7..8693cfd 100644
--- a/arch/arm/mach-omap2/clkt2xxx_sys.c
+++ b/arch/arm/mach-omap2/clkt2xxx_sys.c
@@ -26,7 +26,7 @@
 
 #include "clock.h"
 #include "clock2xxx.h"
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
 
 void __iomem *prcm_clksrc_ctrl;
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index aef6291..39f9d5a 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -40,7 +40,7 @@
 #include "clock.h"
 #include "clock2xxx.h"
 #include "opp2xxx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-24xx.h"
 
 const struct prcm_config *curr_prcm_set;
@@ -133,21 +133,21 @@
 			done_rate = CORE_CLK_SRC_DPLL;
 
 		/* MPU divider */
-		cm_write_mod_reg(prcm->cm_clksel_mpu, MPU_MOD, CM_CLKSEL);
+		omap2_cm_write_mod_reg(prcm->cm_clksel_mpu, MPU_MOD, CM_CLKSEL);
 
 		/* dsp + iva1 div(2420), iva2.1(2430) */
-		cm_write_mod_reg(prcm->cm_clksel_dsp,
+		omap2_cm_write_mod_reg(prcm->cm_clksel_dsp,
 				 OMAP24XX_DSP_MOD, CM_CLKSEL);
 
-		cm_write_mod_reg(prcm->cm_clksel_gfx, GFX_MOD, CM_CLKSEL);
+		omap2_cm_write_mod_reg(prcm->cm_clksel_gfx, GFX_MOD, CM_CLKSEL);
 
 		/* Major subsystem dividers */
-		tmp = cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK;
-		cm_write_mod_reg(prcm->cm_clksel1_core | tmp, CORE_MOD,
+		tmp = omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK;
+		omap2_cm_write_mod_reg(prcm->cm_clksel1_core | tmp, CORE_MOD,
 				 CM_CLKSEL1);
 
 		if (cpu_is_omap2430())
-			cm_write_mod_reg(prcm->cm_clksel_mdm,
+			omap2_cm_write_mod_reg(prcm->cm_clksel_mdm,
 					 OMAP2430_MDM_MOD, CM_CLKSEL);
 
 		/* x2 to enter omap2xxx_sdrc_init_params() */
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 6ce512e..337392c 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -24,7 +24,6 @@
 #include <plat/clock.h>
 
 #include "clock.h"
-#include "cm.h"
 #include "cm-regbits-24xx.h"
 #include "cm-regbits-34xx.h"
 
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index b5babf5..2a2f152 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -24,14 +24,12 @@
 #include <linux/bitops.h>
 
 #include <plat/clock.h>
-#include <plat/clockdomain.h>
+#include "clockdomain.h"
 #include <plat/cpu.h>
 #include <plat/prcm.h>
 
 #include "clock.h"
-#include "prm.h"
-#include "prm-regbits-24xx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-24xx.h"
 #include "cm-regbits-34xx.h"
 
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index a535c7a..896584e 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -49,7 +49,6 @@
 
 /* DPLL Type and DCO Selection Flags */
 #define DPLL_J_TYPE		0x1
-#define DPLL_NO_DCO_SEL		0x2
 
 int omap2_clk_enable(struct clk *clk);
 void omap2_clk_disable(struct clk *clk);
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index 21f8562..0a992bc 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -22,8 +22,8 @@
 #include "clock.h"
 #include "clock2xxx.h"
 #include "opp2xxx.h"
-#include "prm.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
 #include "cm-regbits-24xx.h"
 #include "sdrc.h"
@@ -812,7 +812,7 @@
 	.clksel_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
 	.clksel_mask	= OMAP24XX_CLKSEL_DSS2_MASK,
 	.clksel		= dss2_fck_clksel,
-	.recalc		= &followparent_recalc,
+	.recalc		= &omap2_clksel_recalc,
 };
 
 static struct clk dss_54m_fck = {	/* Alt clk used in power management */
@@ -1862,10 +1862,10 @@
 	CLK(NULL,	"eac_fck",	&eac_fck,	CK_242X),
 	CLK("omap_hdq.0", "ick",	&hdq_ick,	CK_242X),
 	CLK("omap_hdq.1", "fck",	&hdq_fck,	CK_242X),
-	CLK("i2c_omap.1", "ick",	&i2c1_ick,	CK_242X),
-	CLK("i2c_omap.1", "fck",	&i2c1_fck,	CK_242X),
-	CLK("i2c_omap.2", "ick",	&i2c2_ick,	CK_242X),
-	CLK("i2c_omap.2", "fck",	&i2c2_fck,	CK_242X),
+	CLK("omap_i2c.1", "ick",	&i2c1_ick,	CK_242X),
+	CLK("omap_i2c.1", "fck",	&i2c1_fck,	CK_242X),
+	CLK("omap_i2c.2", "ick",	&i2c2_ick,	CK_242X),
+	CLK("omap_i2c.2", "fck",	&i2c2_fck,	CK_242X),
 	CLK(NULL,	"gpmc_fck",	&gpmc_fck,	CK_242X),
 	CLK(NULL,	"sdma_fck",	&sdma_fck,	CK_242X),
 	CLK(NULL,	"sdma_ick",	&sdma_ick,	CK_242X),
@@ -1877,7 +1877,7 @@
 	CLK("omap-aes",	"ick",	&aes_ick,	CK_242X),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_242X),
 	CLK(NULL,	"usb_fck",	&usb_fck,	CK_242X),
-	CLK("musb_hdrc",	"fck",	&osc_ck,	CK_242X),
+	CLK("musb-hdrc",	"fck",	&osc_ck,	CK_242X),
 };
 
 /*
diff --git a/arch/arm/mach-omap2/clock2430.c b/arch/arm/mach-omap2/clock2430.c
index 44d0ccc..d87bc9c 100644
--- a/arch/arm/mach-omap2/clock2430.c
+++ b/arch/arm/mach-omap2/clock2430.c
@@ -25,7 +25,7 @@
 
 #include "clock.h"
 #include "clock2xxx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-24xx.h"
 
 /**
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index e32afcb..c047dcd 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -22,8 +22,8 @@
 #include "clock.h"
 #include "clock2xxx.h"
 #include "opp2xxx.h"
-#include "prm.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
 #include "cm-regbits-24xx.h"
 #include "sdrc.h"
@@ -800,7 +800,7 @@
 	.clksel_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
 	.clksel_mask	= OMAP24XX_CLKSEL_DSS2_MASK,
 	.clksel		= dss2_fck_clksel,
-	.recalc		= &followparent_recalc,
+	.recalc		= &omap2_clksel_recalc,
 };
 
 static struct clk dss_54m_fck = {	/* Alt clk used in power management */
@@ -1969,10 +1969,10 @@
 	CLK(NULL,	"fac_fck",	&fac_fck,	CK_243X),
 	CLK("omap_hdq.0", "ick",	&hdq_ick,	CK_243X),
 	CLK("omap_hdq.1", "fck",	&hdq_fck,	CK_243X),
-	CLK("i2c_omap.1", "ick",	&i2c1_ick,	CK_243X),
-	CLK("i2c_omap.1", "fck",	&i2chs1_fck,	CK_243X),
-	CLK("i2c_omap.2", "ick",	&i2c2_ick,	CK_243X),
-	CLK("i2c_omap.2", "fck",	&i2chs2_fck,	CK_243X),
+	CLK("omap_i2c.1", "ick",	&i2c1_ick,	CK_243X),
+	CLK("omap_i2c.1", "fck",	&i2chs1_fck,	CK_243X),
+	CLK("omap_i2c.2", "ick",	&i2c2_ick,	CK_243X),
+	CLK("omap_i2c.2", "fck",	&i2chs2_fck,	CK_243X),
 	CLK(NULL,	"gpmc_fck",	&gpmc_fck,	CK_243X),
 	CLK(NULL,	"sdma_fck",	&sdma_fck,	CK_243X),
 	CLK(NULL,	"sdma_ick",	&sdma_ick,	CK_243X),
@@ -1983,7 +1983,7 @@
 	CLK("omap-aes",	"ick",	&aes_ick,	CK_243X),
 	CLK(NULL,	"pka_ick",	&pka_ick,	CK_243X),
 	CLK(NULL,	"usb_fck",	&usb_fck,	CK_243X),
-	CLK("musb_hdrc",	"ick",	&usbhs_ick,	CK_243X),
+	CLK("musb-omap2430",	"ick",	&usbhs_ick,	CK_243X),
 	CLK("mmci-omap-hs.0", "ick",	&mmchs1_ick,	CK_243X),
 	CLK("mmci-omap-hs.0", "fck",	&mmchs1_fck,	CK_243X),
 	CLK("mmci-omap-hs.1", "ick",	&mmchs2_ick,	CK_243X),
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 6febd5f..287abc4 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -25,7 +25,7 @@
 
 #include "clock.h"
 #include "clock34xx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
 
 /**
diff --git a/arch/arm/mach-omap2/clock3517.c b/arch/arm/mach-omap2/clock3517.c
index b496a93..74116a3 100644
--- a/arch/arm/mach-omap2/clock3517.c
+++ b/arch/arm/mach-omap2/clock3517.c
@@ -25,7 +25,7 @@
 
 #include "clock.h"
 #include "clock3517.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
 
 /*
diff --git a/arch/arm/mach-omap2/clock3xxx.c b/arch/arm/mach-omap2/clock3xxx.c
index a447c4d..e9f66b6 100644
--- a/arch/arm/mach-omap2/clock3xxx.c
+++ b/arch/arm/mach-omap2/clock3xxx.c
@@ -25,9 +25,9 @@
 
 #include "clock.h"
 #include "clock3xxx.h"
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-34xx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
 
 /*
@@ -94,7 +94,7 @@
 
 	ret = omap2_clk_switch_mpurate_at_boot("dpll1_ck");
 	if (!ret)
-		omap2_clk_print_new_rates("osc_sys_ck", "arm_fck", "core_ck");
+		omap2_clk_print_new_rates("osc_sys_ck", "core_ck", "arm_fck");
 
 	return ret;
 }
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index d85ecd5..403a4a1 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -28,9 +28,9 @@
 #include "clock36xx.h"
 #include "clock3517.h"
 
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-34xx.h"
 #include "control.h"
 
@@ -120,7 +120,7 @@
 };
 
 static const struct clksel_rate osc_sys_16_8m_rates[] = {
-	{ .div = 1, .val = 5, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 1, .val = 5, .flags = RATE_IN_3430ES2PLUS_36XX },
 	{ .div = 0 }
 };
 
@@ -452,35 +452,35 @@
 static const struct clksel_rate div31_dpll3_rates[] = {
 	{ .div = 1, .val = 1, .flags = RATE_IN_3XXX },
 	{ .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-	{ .div = 3, .val = 3, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 4, .val = 4, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 5, .val = 5, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 6, .val = 6, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 7, .val = 7, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 8, .val = 8, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 9, .val = 9, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 10, .val = 10, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 11, .val = 11, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 12, .val = 12, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 13, .val = 13, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 14, .val = 14, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 15, .val = 15, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 16, .val = 16, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 17, .val = 17, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 18, .val = 18, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 19, .val = 19, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 20, .val = 20, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 21, .val = 21, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 22, .val = 22, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 23, .val = 23, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 24, .val = 24, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 25, .val = 25, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 26, .val = 26, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 27, .val = 27, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 28, .val = 28, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 29, .val = 29, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 30, .val = 30, .flags = RATE_IN_3430ES2PLUS },
-	{ .div = 31, .val = 31, .flags = RATE_IN_3430ES2PLUS },
+	{ .div = 3, .val = 3, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 4, .val = 4, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 5, .val = 5, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 6, .val = 6, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 7, .val = 7, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 8, .val = 8, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 9, .val = 9, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 10, .val = 10, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 11, .val = 11, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 12, .val = 12, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 13, .val = 13, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 14, .val = 14, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 15, .val = 15, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 16, .val = 16, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 17, .val = 17, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 18, .val = 18, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 19, .val = 19, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 20, .val = 20, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 21, .val = 21, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 22, .val = 22, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 23, .val = 23, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 24, .val = 24, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 25, .val = 25, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 26, .val = 26, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 27, .val = 27, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 28, .val = 28, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 29, .val = 29, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 30, .val = 30, .flags = RATE_IN_3430ES2PLUS_36XX },
+	{ .div = 31, .val = 31, .flags = RATE_IN_3430ES2PLUS_36XX },
 	{ .div = 0 },
 };
 
@@ -602,6 +602,8 @@
 	.autoidle_mask	= OMAP3430_AUTO_PERIPH_DPLL_MASK,
 	.idlest_reg	= OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
 	.idlest_mask	= OMAP3430_ST_PERIPH_CLK_MASK,
+	.dco_mask	= OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
+	.sddiv_mask	= OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
 	.max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
 	.min_divider	= 1,
 	.max_divider	= OMAP3_MAX_DPLL_DIV,
@@ -1558,6 +1560,7 @@
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
 	.enable_bit	= OMAP3430_EN_MCSPI4_SHIFT,
 	.recalc		= &followparent_recalc,
+	.clkdm_name	= "core_l4_clkdm",
 };
 
 static struct clk mcspi3_fck = {
@@ -1567,6 +1570,7 @@
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
 	.enable_bit	= OMAP3430_EN_MCSPI3_SHIFT,
 	.recalc		= &followparent_recalc,
+	.clkdm_name	= "core_l4_clkdm",
 };
 
 static struct clk mcspi2_fck = {
@@ -1576,6 +1580,7 @@
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
 	.enable_bit	= OMAP3430_EN_MCSPI2_SHIFT,
 	.recalc		= &followparent_recalc,
+	.clkdm_name	= "core_l4_clkdm",
 };
 
 static struct clk mcspi1_fck = {
@@ -1585,6 +1590,7 @@
 	.enable_reg	= OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
 	.enable_bit	= OMAP3430_EN_MCSPI1_SHIFT,
 	.recalc		= &followparent_recalc,
+	.clkdm_name	= "core_l4_clkdm",
 };
 
 static struct clk uart2_fck = {
@@ -3044,6 +3050,7 @@
 	.parent		= &sys_ck,
 	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_SR1_SHIFT,
+	.clkdm_name	= "wkup_clkdm",
 	.recalc		= &followparent_recalc,
 };
 
@@ -3054,6 +3061,7 @@
 	.parent		= &sys_ck,
 	.enable_reg	= OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
 	.enable_bit	= OMAP3430_EN_SR2_SHIFT,
+	.clkdm_name	= "wkup_clkdm",
 	.recalc		= &followparent_recalc,
 };
 
@@ -3201,7 +3209,7 @@
 	CLK(NULL,	"omap_32k_fck",	&omap_32k_fck,	CK_3XXX),
 	CLK(NULL,	"virt_12m_ck",	&virt_12m_ck,	CK_3XXX),
 	CLK(NULL,	"virt_13m_ck",	&virt_13m_ck,	CK_3XXX),
-	CLK(NULL,	"virt_16_8m_ck", &virt_16_8m_ck, CK_3430ES2 | CK_AM35XX),
+	CLK(NULL,	"virt_16_8m_ck", &virt_16_8m_ck, CK_3430ES2PLUS | CK_AM35XX  | CK_36XX),
 	CLK(NULL,	"virt_19_2m_ck", &virt_19_2m_ck, CK_3XXX),
 	CLK(NULL,	"virt_26m_ck",	&virt_26m_ck,	CK_3XXX),
 	CLK(NULL,	"virt_38_4m_ck", &virt_38_4m_ck, CK_3XXX),
@@ -3218,8 +3226,8 @@
 	CLK(NULL,	"dpll1_ck",	&dpll1_ck,	CK_3XXX),
 	CLK(NULL,	"dpll1_x2_ck",	&dpll1_x2_ck,	CK_3XXX),
 	CLK(NULL,	"dpll1_x2m2_ck", &dpll1_x2m2_ck, CK_3XXX),
-	CLK(NULL,	"dpll2_ck",	&dpll2_ck,	CK_343X),
-	CLK(NULL,	"dpll2_m2_ck",	&dpll2_m2_ck,	CK_343X),
+	CLK(NULL,	"dpll2_ck",	&dpll2_ck,	CK_34XX | CK_36XX),
+	CLK(NULL,	"dpll2_m2_ck",	&dpll2_m2_ck,	CK_34XX | CK_36XX),
 	CLK(NULL,	"dpll3_ck",	&dpll3_ck,	CK_3XXX),
 	CLK(NULL,	"core_ck",	&core_ck,	CK_3XXX),
 	CLK(NULL,	"dpll3_x2_ck",	&dpll3_x2_ck,	CK_3XXX),
@@ -3248,8 +3256,8 @@
 	CLK(NULL,	"dpll4_m6_ck",	&dpll4_m6_ck,	CK_3XXX),
 	CLK(NULL,	"dpll4_m6x2_ck", &dpll4_m6x2_ck, CK_3XXX),
 	CLK("etb",	"emu_per_alwon_ck", &emu_per_alwon_ck, CK_3XXX),
-	CLK(NULL,	"dpll5_ck",	&dpll5_ck,	CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"dpll5_m2_ck",	&dpll5_m2_ck,	CK_3430ES2 | CK_AM35XX),
+	CLK(NULL,	"dpll5_ck",	&dpll5_ck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"dpll5_m2_ck",	&dpll5_m2_ck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK(NULL,	"clkout2_src_ck", &clkout2_src_ck, CK_3XXX),
 	CLK(NULL,	"sys_clkout2",	&sys_clkout2,	CK_3XXX),
 	CLK(NULL,	"corex2_fck",	&corex2_fck,	CK_3XXX),
@@ -3257,8 +3265,8 @@
 	CLK(NULL,	"mpu_ck",	&mpu_ck,	CK_3XXX),
 	CLK(NULL,	"arm_fck",	&arm_fck,	CK_3XXX),
 	CLK("etb",	"emu_mpu_alwon_ck", &emu_mpu_alwon_ck, CK_3XXX),
-	CLK(NULL,	"dpll2_fck",	&dpll2_fck,	CK_343X),
-	CLK(NULL,	"iva2_ck",	&iva2_ck,	CK_343X),
+	CLK(NULL,	"dpll2_fck",	&dpll2_fck,	CK_34XX | CK_36XX),
+	CLK(NULL,	"iva2_ck",	&iva2_ck,	CK_34XX | CK_36XX),
 	CLK(NULL,	"l3_ick",	&l3_ick,	CK_3XXX),
 	CLK(NULL,	"l4_ick",	&l4_ick,	CK_3XXX),
 	CLK(NULL,	"rm_ick",	&rm_ick,	CK_3XXX),
@@ -3267,27 +3275,28 @@
 	CLK(NULL,	"gfx_l3_ick",	&gfx_l3_ick,	CK_3430ES1),
 	CLK(NULL,	"gfx_cg1_ck",	&gfx_cg1_ck,	CK_3430ES1),
 	CLK(NULL,	"gfx_cg2_ck",	&gfx_cg2_ck,	CK_3430ES1),
-	CLK(NULL,	"sgx_fck",	&sgx_fck,	CK_3430ES2 | CK_3517),
-	CLK(NULL,	"sgx_ick",	&sgx_ick,	CK_3430ES2 | CK_3517),
+	CLK(NULL,	"sgx_fck",	&sgx_fck,	CK_3430ES2PLUS | CK_3517 | CK_36XX),
+	CLK(NULL,	"sgx_ick",	&sgx_ick,	CK_3430ES2PLUS | CK_3517 | CK_36XX),
 	CLK(NULL,	"d2d_26m_fck",	&d2d_26m_fck,	CK_3430ES1),
-	CLK(NULL,	"modem_fck",	&modem_fck,	CK_343X),
-	CLK(NULL,	"sad2d_ick",	&sad2d_ick,	CK_343X),
-	CLK(NULL,	"mad2d_ick",	&mad2d_ick,	CK_343X),
+	CLK(NULL,	"modem_fck",	&modem_fck,	CK_34XX | CK_36XX),
+	CLK(NULL,	"sad2d_ick",	&sad2d_ick,	CK_34XX | CK_36XX),
+	CLK(NULL,	"mad2d_ick",	&mad2d_ick,	CK_34XX | CK_36XX),
 	CLK(NULL,	"gpt10_fck",	&gpt10_fck,	CK_3XXX),
 	CLK(NULL,	"gpt11_fck",	&gpt11_fck,	CK_3XXX),
-	CLK(NULL,	"cpefuse_fck",	&cpefuse_fck,	CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"ts_fck",	&ts_fck,	CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"usbtll_fck",	&usbtll_fck,	CK_3430ES2 | CK_AM35XX),
+	CLK(NULL,	"cpefuse_fck",	&cpefuse_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"ts_fck",	&ts_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"usbtll_fck",	&usbtll_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK("omap-mcbsp.1",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK("omap-mcbsp.5",	"prcm_fck",	&core_96m_fck,	CK_3XXX),
 	CLK(NULL,	"core_96m_fck",	&core_96m_fck,	CK_3XXX),
-	CLK("mmci-omap-hs.2",	"fck",	&mmchs3_fck,	CK_3430ES2 | CK_AM35XX),
+	CLK("mmci-omap-hs.2",	"fck",	&mmchs3_fck,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK("mmci-omap-hs.1",	"fck",	&mmchs2_fck,	CK_3XXX),
-	CLK(NULL,	"mspro_fck",	&mspro_fck,	CK_343X),
+	CLK(NULL,	"mspro_fck",	&mspro_fck,	CK_34XX | CK_36XX),
 	CLK("mmci-omap-hs.0",	"fck",	&mmchs1_fck,	CK_3XXX),
-	CLK("i2c_omap.3", "fck",	&i2c3_fck,	CK_3XXX),
-	CLK("i2c_omap.2", "fck",	&i2c2_fck,	CK_3XXX),
-	CLK("i2c_omap.1", "fck",	&i2c1_fck,	CK_3XXX),
+	CLK("omap_i2c.3", "fck",	&i2c3_fck,	CK_3XXX),
+	CLK("omap_i2c.2", "fck",	&i2c2_fck,	CK_3XXX),
+	CLK("omap_i2c.1", "fck",	&i2c1_fck,	CK_3XXX),
 	CLK("omap-mcbsp.5", "fck",	&mcbsp5_fck,	CK_3XXX),
 	CLK("omap-mcbsp.1", "fck",	&mcbsp1_fck,	CK_3XXX),
 	CLK(NULL,	"core_48m_fck",	&core_48m_fck,	CK_3XXX),
@@ -3301,34 +3310,35 @@
 	CLK(NULL,	"core_12m_fck",	&core_12m_fck,	CK_3XXX),
 	CLK("omap_hdq.0", "fck",	&hdq_fck,	CK_3XXX),
 	CLK(NULL,	"ssi_ssr_fck",	&ssi_ssr_fck_3430es1,	CK_3430ES1),
-	CLK(NULL,	"ssi_ssr_fck",	&ssi_ssr_fck_3430es2,	CK_3430ES2),
+	CLK(NULL,	"ssi_ssr_fck",	&ssi_ssr_fck_3430es2,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck_3430es1,	CK_3430ES1),
-	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck_3430es2,	CK_3430ES2),
+	CLK(NULL,	"ssi_sst_fck",	&ssi_sst_fck_3430es2,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"core_l3_ick",	&core_l3_ick,	CK_3XXX),
-	CLK("musb_hdrc",	"ick",	&hsotgusb_ick_3430es1,	CK_3430ES1),
-	CLK("musb_hdrc",	"ick",	&hsotgusb_ick_3430es2,	CK_3430ES2),
+	CLK("musb-omap2430",	"ick",	&hsotgusb_ick_3430es1,	CK_3430ES1),
+	CLK("musb-omap2430",	"ick",	&hsotgusb_ick_3430es2,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"sdrc_ick",	&sdrc_ick,	CK_3XXX),
 	CLK(NULL,	"gpmc_fck",	&gpmc_fck,	CK_3XXX),
-	CLK(NULL,	"security_l3_ick", &security_l3_ick, CK_343X),
-	CLK(NULL,	"pka_ick",	&pka_ick,	CK_343X),
+	CLK(NULL,	"security_l3_ick", &security_l3_ick, CK_34XX | CK_36XX),
+	CLK(NULL,	"pka_ick",	&pka_ick,	CK_34XX | CK_36XX),
 	CLK(NULL,	"core_l4_ick",	&core_l4_ick,	CK_3XXX),
-	CLK(NULL,	"usbtll_ick",	&usbtll_ick,	CK_3430ES2 | CK_AM35XX),
-	CLK("mmci-omap-hs.2",	"ick",	&mmchs3_ick,	CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"icr_ick",	&icr_ick,	CK_343X),
-	CLK("omap-aes",	"ick",	&aes2_ick,	CK_343X),
-	CLK("omap-sham",	"ick",	&sha12_ick,	CK_343X),
-	CLK(NULL,	"des2_ick",	&des2_ick,	CK_343X),
+	CLK(NULL,	"usbtll_ick",	&usbtll_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"usbtll_ick",	&usbtll_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("mmci-omap-hs.2",	"ick",	&mmchs3_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"icr_ick",	&icr_ick,	CK_34XX | CK_36XX),
+	CLK("omap-aes",	"ick",	&aes2_ick,	CK_34XX | CK_36XX),
+	CLK("omap-sham",	"ick",	&sha12_ick,	CK_34XX | CK_36XX),
+	CLK(NULL,	"des2_ick",	&des2_ick,	CK_34XX | CK_36XX),
 	CLK("mmci-omap-hs.1",	"ick",	&mmchs2_ick,	CK_3XXX),
 	CLK("mmci-omap-hs.0",	"ick",	&mmchs1_ick,	CK_3XXX),
-	CLK(NULL,	"mspro_ick",	&mspro_ick,	CK_343X),
+	CLK(NULL,	"mspro_ick",	&mspro_ick,	CK_34XX | CK_36XX),
 	CLK("omap_hdq.0", "ick",	&hdq_ick,	CK_3XXX),
 	CLK("omap2_mcspi.4", "ick",	&mcspi4_ick,	CK_3XXX),
 	CLK("omap2_mcspi.3", "ick",	&mcspi3_ick,	CK_3XXX),
 	CLK("omap2_mcspi.2", "ick",	&mcspi2_ick,	CK_3XXX),
 	CLK("omap2_mcspi.1", "ick",	&mcspi1_ick,	CK_3XXX),
-	CLK("i2c_omap.3", "ick",	&i2c3_ick,	CK_3XXX),
-	CLK("i2c_omap.2", "ick",	&i2c2_ick,	CK_3XXX),
-	CLK("i2c_omap.1", "ick",	&i2c1_ick,	CK_3XXX),
+	CLK("omap_i2c.3", "ick",	&i2c3_ick,	CK_3XXX),
+	CLK("omap_i2c.2", "ick",	&i2c2_ick,	CK_3XXX),
+	CLK("omap_i2c.1", "ick",	&i2c1_ick,	CK_3XXX),
 	CLK(NULL,	"uart2_ick",	&uart2_ick,	CK_3XXX),
 	CLK(NULL,	"uart1_ick",	&uart1_ick,	CK_3XXX),
 	CLK(NULL,	"gpt11_ick",	&gpt11_ick,	CK_3XXX),
@@ -3336,37 +3346,40 @@
 	CLK("omap-mcbsp.5", "ick",	&mcbsp5_ick,	CK_3XXX),
 	CLK("omap-mcbsp.1", "ick",	&mcbsp1_ick,	CK_3XXX),
 	CLK(NULL,	"fac_ick",	&fac_ick,	CK_3430ES1),
-	CLK(NULL,	"mailboxes_ick", &mailboxes_ick, CK_343X),
+	CLK(NULL,	"mailboxes_ick", &mailboxes_ick, CK_34XX | CK_36XX),
 	CLK(NULL,	"omapctrl_ick",	&omapctrl_ick,	CK_3XXX),
-	CLK(NULL,	"ssi_l4_ick",	&ssi_l4_ick,	CK_343X),
+	CLK(NULL,	"ssi_l4_ick",	&ssi_l4_ick,	CK_34XX | CK_36XX),
 	CLK(NULL,	"ssi_ick",	&ssi_ick_3430es1,	CK_3430ES1),
-	CLK(NULL,	"ssi_ick",	&ssi_ick_3430es2,	CK_3430ES2),
+	CLK(NULL,	"ssi_ick",	&ssi_ick_3430es2,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"usb_l4_ick",	&usb_l4_ick,	CK_3430ES1),
-	CLK(NULL,	"security_l4_ick2", &security_l4_ick2, CK_343X),
-	CLK(NULL,	"aes1_ick",	&aes1_ick,	CK_343X),
-	CLK("omap_rng",	"ick",		&rng_ick,	CK_343X),
-	CLK(NULL,	"sha11_ick",	&sha11_ick,	CK_343X),
-	CLK(NULL,	"des1_ick",	&des1_ick,	CK_343X),
+	CLK(NULL,	"security_l4_ick2", &security_l4_ick2, CK_34XX | CK_36XX),
+	CLK(NULL,	"aes1_ick",	&aes1_ick,	CK_34XX | CK_36XX),
+	CLK("omap_rng",	"ick",		&rng_ick,	CK_34XX | CK_36XX),
+	CLK(NULL,	"sha11_ick",	&sha11_ick,	CK_34XX | CK_36XX),
+	CLK(NULL,	"des1_ick",	&des1_ick,	CK_34XX | CK_36XX),
 	CLK("omapdss",	"dss1_fck",	&dss1_alwon_fck_3430es1, CK_3430ES1),
-	CLK("omapdss",	"dss1_fck",	&dss1_alwon_fck_3430es2, CK_3430ES2 | CK_AM35XX),
+	CLK("omapdss",	"dss1_fck",	&dss1_alwon_fck_3430es2, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
 	CLK("omapdss",	"tv_fck",	&dss_tv_fck,	CK_3XXX),
 	CLK("omapdss",	"video_fck",	&dss_96m_fck,	CK_3XXX),
 	CLK("omapdss",	"dss2_fck",	&dss2_alwon_fck, CK_3XXX),
 	CLK("omapdss",	"ick",		&dss_ick_3430es1,	CK_3430ES1),
-	CLK("omapdss",	"ick",		&dss_ick_3430es2,	CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"cam_mclk",	&cam_mclk,	CK_343X),
-	CLK(NULL,	"cam_ick",	&cam_ick,	CK_343X),
-	CLK(NULL,	"csi2_96m_fck",	&csi2_96m_fck,	CK_343X),
-	CLK(NULL,	"usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"usbhost_ick",	&usbhost_ick,	CK_3430ES2 | CK_AM35XX),
-	CLK(NULL,	"usim_fck",	&usim_fck,	CK_3430ES2),
+	CLK("omapdss",	"ick",		&dss_ick_3430es2,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"cam_mclk",	&cam_mclk,	CK_34XX | CK_36XX),
+	CLK(NULL,	"cam_ick",	&cam_ick,	CK_34XX | CK_36XX),
+	CLK(NULL,	"csi2_96m_fck",	&csi2_96m_fck,	CK_34XX | CK_36XX),
+	CLK(NULL,	"usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"hs_fck", &usbhost_120m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"fs_fck", &usbhost_48m_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"usbhost_ick",	&usbhost_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK("ehci-omap.0",	"usbhost_ick",	&usbhost_ick,	CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
+	CLK(NULL,	"usim_fck",	&usim_fck,	CK_3430ES2PLUS | CK_36XX),
 	CLK(NULL,	"gpt1_fck",	&gpt1_fck,	CK_3XXX),
 	CLK(NULL,	"wkup_32k_fck",	&wkup_32k_fck,	CK_3XXX),
 	CLK(NULL,	"gpio1_dbck",	&gpio1_dbck,	CK_3XXX),
 	CLK("omap_wdt",	"fck",		&wdt2_fck,	CK_3XXX),
-	CLK(NULL,	"wkup_l4_ick",	&wkup_l4_ick,	CK_343X),
-	CLK(NULL,	"usim_ick",	&usim_ick,	CK_3430ES2),
+	CLK(NULL,	"wkup_l4_ick",	&wkup_l4_ick,	CK_34XX | CK_36XX),
+	CLK(NULL,	"usim_ick",	&usim_ick,	CK_3430ES2PLUS | CK_36XX),
 	CLK("omap_wdt",	"ick",		&wdt2_ick,	CK_3XXX),
 	CLK(NULL,	"wdt1_ick",	&wdt1_ick,	CK_3XXX),
 	CLK(NULL,	"gpio1_ick",	&gpio1_ick,	CK_3XXX),
@@ -3424,9 +3437,9 @@
 	CLK(NULL,	"atclk_fck",	&atclk_fck,	CK_3XXX),
 	CLK(NULL,	"traceclk_src_fck", &traceclk_src_fck, CK_3XXX),
 	CLK(NULL,	"traceclk_fck",	&traceclk_fck,	CK_3XXX),
-	CLK(NULL,	"sr1_fck",	&sr1_fck,	CK_343X),
-	CLK(NULL,	"sr2_fck",	&sr2_fck,	CK_343X),
-	CLK(NULL,	"sr_l4_ick",	&sr_l4_ick,	CK_343X),
+	CLK(NULL,	"sr1_fck",	&sr1_fck,	CK_34XX | CK_36XX),
+	CLK(NULL,	"sr2_fck",	&sr2_fck,	CK_34XX | CK_36XX),
+	CLK(NULL,	"sr_l4_ick",	&sr_l4_ick,	CK_34XX | CK_36XX),
 	CLK(NULL,	"secure_32k_fck", &secure_32k_fck, CK_3XXX),
 	CLK(NULL,	"gpt12_fck",	&gpt12_fck,	CK_3XXX),
 	CLK(NULL,	"wdt1_fck",	&wdt1_fck,	CK_3XXX),
@@ -3437,8 +3450,8 @@
 	CLK("davinci_emac",	"phy_clk",	&emac_fck,	CK_AM35XX),
 	CLK("vpfe-capture",	"master",	&vpfe_ick,	CK_AM35XX),
 	CLK("vpfe-capture",	"slave",	&vpfe_fck,	CK_AM35XX),
-	CLK("musb_hdrc",	"ick",		&hsotgusb_ick_am35xx,	CK_AM35XX),
-	CLK("musb_hdrc",	"fck",		&hsotgusb_fck_am35xx,	CK_AM35XX),
+	CLK("musb-am35x",	"ick",		&hsotgusb_ick_am35xx,	CK_AM35XX),
+	CLK("musb-am35x",	"fck",		&hsotgusb_fck_am35xx,	CK_AM35XX),
 	CLK(NULL,	"hecc_ck",	&hecc_ck,	CK_AM35XX),
 	CLK(NULL,	"uart4_ick",	&uart4_ick_am35xx,	CK_AM35XX),
 };
@@ -3447,38 +3460,37 @@
 int __init omap3xxx_clk_init(void)
 {
 	struct omap_clk *c;
-	u32 cpu_clkflg = CK_3XXX;
+	u32 cpu_clkflg = 0;
 
 	if (cpu_is_omap3517()) {
-		cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
-		cpu_clkflg |= CK_3517;
+		cpu_mask = RATE_IN_34XX;
+		cpu_clkflg = CK_3517;
 	} else if (cpu_is_omap3505()) {
-		cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
-		cpu_clkflg |= CK_3505;
+		cpu_mask = RATE_IN_34XX;
+		cpu_clkflg = CK_3505;
+	} else if (cpu_is_omap3630()) {
+		cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
+		cpu_clkflg = CK_36XX;
 	} else if (cpu_is_omap34xx()) {
-		cpu_mask = RATE_IN_3XXX;
-		cpu_clkflg |= CK_343X;
-
-		/*
-		 * Update this if there are further clock changes between ES2
-		 * and production parts
-		 */
 		if (omap_rev() == OMAP3430_REV_ES1_0) {
-			/* No 3430ES1-only rates exist, so no RATE_IN_3430ES1 */
-			cpu_clkflg |= CK_3430ES1;
+			cpu_mask = RATE_IN_3430ES1;
+			cpu_clkflg = CK_3430ES1;
 		} else {
-			cpu_mask |= RATE_IN_3430ES2PLUS;
-			cpu_clkflg |= CK_3430ES2;
+			/*
+			 * Assume that anything that we haven't matched yet
+			 * has 3430ES2-type clocks.
+			 */
+			cpu_mask = RATE_IN_3430ES2PLUS;
+			cpu_clkflg = CK_3430ES2PLUS;
 		}
+	} else {
+		WARN(1, "clock: could not identify OMAP3 variant\n");
 	}
 
 	if (omap3_has_192mhz_clk())
 		omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
 
 	if (cpu_is_omap3630()) {
-		cpu_mask |= RATE_IN_36XX;
-		cpu_clkflg |= CK_36XX;
-
 		/*
 		 * XXX This type of dynamic rewriting of the clock tree is
 		 * deprecated and should be revised soon.
@@ -3525,10 +3537,9 @@
 
 	recalculate_root_clocks();
 
-	printk(KERN_INFO "Clocking rate (Crystal/Core/MPU): "
-	       "%ld.%01ld/%ld/%ld MHz\n",
-	       (osc_sys_ck.rate / 1000000), (osc_sys_ck.rate / 100000) % 10,
-	       (core_ck.rate / 1000000), (arm_fck.rate / 1000000));
+	pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+		(osc_sys_ck.rate / 1000000), (osc_sys_ck.rate / 100000) % 10,
+		(core_ck.rate / 1000000), (arm_fck.rate / 1000000));
 
 	/*
 	 * Only enable those clocks we will need, let the drivers
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 1599836..e8cb32f 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -30,11 +30,18 @@
 
 #include "clock.h"
 #include "clock44xx.h"
-#include "cm.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
 #include "cm-regbits-44xx.h"
-#include "prm.h"
+#include "prm44xx.h"
+#include "prm44xx.h"
 #include "prm-regbits-44xx.h"
 #include "control.h"
+#include "scrm44xx.h"
+
+/* OMAP4 modulemode control */
+#define OMAP4430_MODULEMODE_HWCTRL			0
+#define OMAP4430_MODULEMODE_SWCTRL			1
 
 /* Root clocks */
 
@@ -47,7 +54,9 @@
 static struct clk pad_clks_ck = {
 	.name		= "pad_clks_ck",
 	.rate		= 12000000,
-	.ops		= &clkops_null,
+	.ops            = &clkops_omap2_dflt,
+	.enable_reg     = OMAP4430_CM_CLKSEL_ABE,
+	.enable_bit     = OMAP4430_PAD_CLKS_GATE_SHIFT,
 };
 
 static struct clk pad_slimbus_core_clks_ck = {
@@ -65,7 +74,9 @@
 static struct clk slimbus_clk = {
 	.name		= "slimbus_clk",
 	.rate		= 12000000,
-	.ops		= &clkops_null,
+	.ops            = &clkops_omap2_dflt,
+	.enable_reg     = OMAP4430_CM_CLKSEL_ABE,
+	.enable_bit     = OMAP4430_SLIMBUS_CLK_GATE_SHIFT,
 };
 
 static struct clk sys_32k_ck = {
@@ -265,18 +276,71 @@
 	.set_rate	= &omap3_noncore_dpll_set_rate,
 };
 
-static struct clk dpll_abe_m2x2_ck = {
-	.name		= "dpll_abe_m2x2_ck",
+static struct clk dpll_abe_x2_ck = {
+	.name		= "dpll_abe_x2_ck",
 	.parent		= &dpll_abe_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.recalc		= &omap3_clkoutx2_recalc,
+};
+
+static const struct clksel_rate div31_1to31_rates[] = {
+	{ .div = 1, .val = 1, .flags = RATE_IN_4430 },
+	{ .div = 2, .val = 2, .flags = RATE_IN_4430 },
+	{ .div = 3, .val = 3, .flags = RATE_IN_4430 },
+	{ .div = 4, .val = 4, .flags = RATE_IN_4430 },
+	{ .div = 5, .val = 5, .flags = RATE_IN_4430 },
+	{ .div = 6, .val = 6, .flags = RATE_IN_4430 },
+	{ .div = 7, .val = 7, .flags = RATE_IN_4430 },
+	{ .div = 8, .val = 8, .flags = RATE_IN_4430 },
+	{ .div = 9, .val = 9, .flags = RATE_IN_4430 },
+	{ .div = 10, .val = 10, .flags = RATE_IN_4430 },
+	{ .div = 11, .val = 11, .flags = RATE_IN_4430 },
+	{ .div = 12, .val = 12, .flags = RATE_IN_4430 },
+	{ .div = 13, .val = 13, .flags = RATE_IN_4430 },
+	{ .div = 14, .val = 14, .flags = RATE_IN_4430 },
+	{ .div = 15, .val = 15, .flags = RATE_IN_4430 },
+	{ .div = 16, .val = 16, .flags = RATE_IN_4430 },
+	{ .div = 17, .val = 17, .flags = RATE_IN_4430 },
+	{ .div = 18, .val = 18, .flags = RATE_IN_4430 },
+	{ .div = 19, .val = 19, .flags = RATE_IN_4430 },
+	{ .div = 20, .val = 20, .flags = RATE_IN_4430 },
+	{ .div = 21, .val = 21, .flags = RATE_IN_4430 },
+	{ .div = 22, .val = 22, .flags = RATE_IN_4430 },
+	{ .div = 23, .val = 23, .flags = RATE_IN_4430 },
+	{ .div = 24, .val = 24, .flags = RATE_IN_4430 },
+	{ .div = 25, .val = 25, .flags = RATE_IN_4430 },
+	{ .div = 26, .val = 26, .flags = RATE_IN_4430 },
+	{ .div = 27, .val = 27, .flags = RATE_IN_4430 },
+	{ .div = 28, .val = 28, .flags = RATE_IN_4430 },
+	{ .div = 29, .val = 29, .flags = RATE_IN_4430 },
+	{ .div = 30, .val = 30, .flags = RATE_IN_4430 },
+	{ .div = 31, .val = 31, .flags = RATE_IN_4430 },
+	{ .div = 0 },
+};
+
+static const struct clksel dpll_abe_m2x2_div[] = {
+	{ .parent = &dpll_abe_x2_ck, .rates = div31_1to31_rates },
+	{ .parent = NULL },
+};
+
+static struct clk dpll_abe_m2x2_ck = {
+	.name		= "dpll_abe_m2x2_ck",
+	.parent		= &dpll_abe_x2_ck,
+	.clksel		= dpll_abe_m2x2_div,
+	.clksel_reg	= OMAP4430_CM_DIV_M2_DPLL_ABE,
+	.clksel_mask	= OMAP4430_DPLL_CLKOUT_DIV_MASK,
+	.ops		= &clkops_null,
+	.recalc		= &omap2_clksel_recalc,
+	.round_rate	= &omap2_clksel_round_rate,
+	.set_rate	= &omap2_clksel_set_rate,
 };
 
 static struct clk abe_24m_fclk = {
 	.name		= "abe_24m_fclk",
 	.parent		= &dpll_abe_m2x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 8,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static const struct clksel_rate div3_1to4_rates[] = {
@@ -326,50 +390,10 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static const struct clksel_rate div31_1to31_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_4430 },
-	{ .div = 2, .val = 2, .flags = RATE_IN_4430 },
-	{ .div = 3, .val = 3, .flags = RATE_IN_4430 },
-	{ .div = 4, .val = 4, .flags = RATE_IN_4430 },
-	{ .div = 5, .val = 5, .flags = RATE_IN_4430 },
-	{ .div = 6, .val = 6, .flags = RATE_IN_4430 },
-	{ .div = 7, .val = 7, .flags = RATE_IN_4430 },
-	{ .div = 8, .val = 8, .flags = RATE_IN_4430 },
-	{ .div = 9, .val = 9, .flags = RATE_IN_4430 },
-	{ .div = 10, .val = 10, .flags = RATE_IN_4430 },
-	{ .div = 11, .val = 11, .flags = RATE_IN_4430 },
-	{ .div = 12, .val = 12, .flags = RATE_IN_4430 },
-	{ .div = 13, .val = 13, .flags = RATE_IN_4430 },
-	{ .div = 14, .val = 14, .flags = RATE_IN_4430 },
-	{ .div = 15, .val = 15, .flags = RATE_IN_4430 },
-	{ .div = 16, .val = 16, .flags = RATE_IN_4430 },
-	{ .div = 17, .val = 17, .flags = RATE_IN_4430 },
-	{ .div = 18, .val = 18, .flags = RATE_IN_4430 },
-	{ .div = 19, .val = 19, .flags = RATE_IN_4430 },
-	{ .div = 20, .val = 20, .flags = RATE_IN_4430 },
-	{ .div = 21, .val = 21, .flags = RATE_IN_4430 },
-	{ .div = 22, .val = 22, .flags = RATE_IN_4430 },
-	{ .div = 23, .val = 23, .flags = RATE_IN_4430 },
-	{ .div = 24, .val = 24, .flags = RATE_IN_4430 },
-	{ .div = 25, .val = 25, .flags = RATE_IN_4430 },
-	{ .div = 26, .val = 26, .flags = RATE_IN_4430 },
-	{ .div = 27, .val = 27, .flags = RATE_IN_4430 },
-	{ .div = 28, .val = 28, .flags = RATE_IN_4430 },
-	{ .div = 29, .val = 29, .flags = RATE_IN_4430 },
-	{ .div = 30, .val = 30, .flags = RATE_IN_4430 },
-	{ .div = 31, .val = 31, .flags = RATE_IN_4430 },
-	{ .div = 0 },
-};
-
-static const struct clksel dpll_abe_m3_div[] = {
-	{ .parent = &dpll_abe_ck, .rates = div31_1to31_rates },
-	{ .parent = NULL },
-};
-
-static struct clk dpll_abe_m3_ck = {
-	.name		= "dpll_abe_m3_ck",
-	.parent		= &dpll_abe_ck,
-	.clksel		= dpll_abe_m3_div,
+static struct clk dpll_abe_m3x2_ck = {
+	.name		= "dpll_abe_m3x2_ck",
+	.parent		= &dpll_abe_x2_ck,
+	.clksel		= dpll_abe_m2x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M3_DPLL_ABE,
 	.clksel_mask	= OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
 	.ops		= &clkops_null,
@@ -380,7 +404,7 @@
 
 static const struct clksel core_hsd_byp_clk_mux_sel[] = {
 	{ .parent = &sys_clkin_ck, .rates = div_1_0_rates },
-	{ .parent = &dpll_abe_m3_ck, .rates = div_1_1_rates },
+	{ .parent = &dpll_abe_m3x2_ck, .rates = div_1_1_rates },
 	{ .parent = NULL },
 };
 
@@ -424,15 +448,22 @@
 	.recalc		= &omap3_dpll_recalc,
 };
 
-static const struct clksel dpll_core_m6_div[] = {
-	{ .parent = &dpll_core_ck, .rates = div31_1to31_rates },
+static struct clk dpll_core_x2_ck = {
+	.name		= "dpll_core_x2_ck",
+	.parent		= &dpll_core_ck,
+	.ops		= &clkops_null,
+	.recalc		= &omap3_clkoutx2_recalc,
+};
+
+static const struct clksel dpll_core_m6x2_div[] = {
+	{ .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
 	{ .parent = NULL },
 };
 
-static struct clk dpll_core_m6_ck = {
-	.name		= "dpll_core_m6_ck",
-	.parent		= &dpll_core_ck,
-	.clksel		= dpll_core_m6_div,
+static struct clk dpll_core_m6x2_ck = {
+	.name		= "dpll_core_m6x2_ck",
+	.parent		= &dpll_core_x2_ck,
+	.clksel		= dpll_core_m6x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M6_DPLL_CORE,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK,
 	.ops		= &clkops_null,
@@ -443,7 +474,7 @@
 
 static const struct clksel dbgclk_mux_sel[] = {
 	{ .parent = &sys_clkin_ck, .rates = div_1_0_rates },
-	{ .parent = &dpll_core_m6_ck, .rates = div_1_1_rates },
+	{ .parent = &dpll_core_m6x2_ck, .rates = div_1_1_rates },
 	{ .parent = NULL },
 };
 
@@ -454,10 +485,15 @@
 	.recalc		= &followparent_recalc,
 };
 
+static const struct clksel dpll_core_m2_div[] = {
+	{ .parent = &dpll_core_ck, .rates = div31_1to31_rates },
+	{ .parent = NULL },
+};
+
 static struct clk dpll_core_m2_ck = {
 	.name		= "dpll_core_m2_ck",
 	.parent		= &dpll_core_ck,
-	.clksel		= dpll_core_m6_div,
+	.clksel		= dpll_core_m2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M2_DPLL_CORE,
 	.clksel_mask	= OMAP4430_DPLL_CLKOUT_DIV_MASK,
 	.ops		= &clkops_null,
@@ -470,13 +506,14 @@
 	.name		= "ddrphy_ck",
 	.parent		= &dpll_core_m2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 2,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
-static struct clk dpll_core_m5_ck = {
-	.name		= "dpll_core_m5_ck",
-	.parent		= &dpll_core_ck,
-	.clksel		= dpll_core_m6_div,
+static struct clk dpll_core_m5x2_ck = {
+	.name		= "dpll_core_m5x2_ck",
+	.parent		= &dpll_core_x2_ck,
+	.clksel		= dpll_core_m6x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M5_DPLL_CORE,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
 	.ops		= &clkops_null,
@@ -486,13 +523,13 @@
 };
 
 static const struct clksel div_core_div[] = {
-	{ .parent = &dpll_core_m5_ck, .rates = div2_1to2_rates },
+	{ .parent = &dpll_core_m5x2_ck, .rates = div2_1to2_rates },
 	{ .parent = NULL },
 };
 
 static struct clk div_core_ck = {
 	.name		= "div_core_ck",
-	.parent		= &dpll_core_m5_ck,
+	.parent		= &dpll_core_m5x2_ck,
 	.clksel		= div_core_div,
 	.clksel_reg	= OMAP4430_CM_CLKSEL_CORE,
 	.clksel_mask	= OMAP4430_CLKSEL_CORE_MASK,
@@ -511,13 +548,13 @@
 };
 
 static const struct clksel div_iva_hs_clk_div[] = {
-	{ .parent = &dpll_core_m5_ck, .rates = div4_1to8_rates },
+	{ .parent = &dpll_core_m5x2_ck, .rates = div4_1to8_rates },
 	{ .parent = NULL },
 };
 
 static struct clk div_iva_hs_clk = {
 	.name		= "div_iva_hs_clk",
-	.parent		= &dpll_core_m5_ck,
+	.parent		= &dpll_core_m5x2_ck,
 	.clksel		= div_iva_hs_clk_div,
 	.clksel_reg	= OMAP4430_CM_BYPCLK_DPLL_IVA,
 	.clksel_mask	= OMAP4430_CLKSEL_0_1_MASK,
@@ -529,7 +566,7 @@
 
 static struct clk div_mpu_hs_clk = {
 	.name		= "div_mpu_hs_clk",
-	.parent		= &dpll_core_m5_ck,
+	.parent		= &dpll_core_m5x2_ck,
 	.clksel		= div_iva_hs_clk_div,
 	.clksel_reg	= OMAP4430_CM_BYPCLK_DPLL_MPU,
 	.clksel_mask	= OMAP4430_CLKSEL_0_1_MASK,
@@ -539,10 +576,10 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_core_m4_ck = {
-	.name		= "dpll_core_m4_ck",
-	.parent		= &dpll_core_ck,
-	.clksel		= dpll_core_m6_div,
+static struct clk dpll_core_m4x2_ck = {
+	.name		= "dpll_core_m4x2_ck",
+	.parent		= &dpll_core_x2_ck,
+	.clksel		= dpll_core_m6x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M4_DPLL_CORE,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
 	.ops		= &clkops_null,
@@ -553,15 +590,21 @@
 
 static struct clk dll_clk_div_ck = {
 	.name		= "dll_clk_div_ck",
-	.parent		= &dpll_core_m4_ck,
+	.parent		= &dpll_core_m4x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 2,
+	.recalc		= &omap_fixed_divisor_recalc,
+};
+
+static const struct clksel dpll_abe_m2_div[] = {
+	{ .parent = &dpll_abe_ck, .rates = div31_1to31_rates },
+	{ .parent = NULL },
 };
 
 static struct clk dpll_abe_m2_ck = {
 	.name		= "dpll_abe_m2_ck",
 	.parent		= &dpll_abe_ck,
-	.clksel		= dpll_abe_m3_div,
+	.clksel		= dpll_abe_m2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M2_DPLL_ABE,
 	.clksel_mask	= OMAP4430_DPLL_CLKOUT_DIV_MASK,
 	.ops		= &clkops_null,
@@ -570,22 +613,24 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_core_m3_ck = {
-	.name		= "dpll_core_m3_ck",
-	.parent		= &dpll_core_ck,
-	.clksel		= dpll_core_m6_div,
+static struct clk dpll_core_m3x2_ck = {
+	.name		= "dpll_core_m3x2_ck",
+	.parent		= &dpll_core_x2_ck,
+	.clksel		= dpll_core_m6x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M3_DPLL_CORE,
 	.clksel_mask	= OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
-	.ops		= &clkops_null,
+	.ops		= &clkops_omap2_dflt,
+	.enable_reg	= OMAP4430_CM_DIV_M3_DPLL_CORE,
+	.enable_bit	= OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
 	.recalc		= &omap2_clksel_recalc,
 	.round_rate	= &omap2_clksel_round_rate,
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_core_m7_ck = {
-	.name		= "dpll_core_m7_ck",
-	.parent		= &dpll_core_ck,
-	.clksel		= dpll_core_m6_div,
+static struct clk dpll_core_m7x2_ck = {
+	.name		= "dpll_core_m7x2_ck",
+	.parent		= &dpll_core_x2_ck,
+	.clksel		= dpll_core_m6x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M7_DPLL_CORE,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK,
 	.ops		= &clkops_null,
@@ -603,8 +648,12 @@
 static struct clk iva_hsd_byp_clk_mux_ck = {
 	.name		= "iva_hsd_byp_clk_mux_ck",
 	.parent		= &sys_clkin_ck,
+	.clksel		= iva_hsd_byp_clk_mux_sel,
+	.init		= &omap2_init_clksel_parent,
+	.clksel_reg	= OMAP4430_CM_CLKSEL_DPLL_IVA,
+	.clksel_mask	= OMAP4430_DPLL_BYP_CLKSEL_MASK,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.recalc		= &omap2_clksel_recalc,
 };
 
 /* DPLL_IVA */
@@ -638,15 +687,22 @@
 	.set_rate	= &omap3_noncore_dpll_set_rate,
 };
 
-static const struct clksel dpll_iva_m4_div[] = {
-	{ .parent = &dpll_iva_ck, .rates = div31_1to31_rates },
+static struct clk dpll_iva_x2_ck = {
+	.name		= "dpll_iva_x2_ck",
+	.parent		= &dpll_iva_ck,
+	.ops		= &clkops_null,
+	.recalc		= &omap3_clkoutx2_recalc,
+};
+
+static const struct clksel dpll_iva_m4x2_div[] = {
+	{ .parent = &dpll_iva_x2_ck, .rates = div31_1to31_rates },
 	{ .parent = NULL },
 };
 
-static struct clk dpll_iva_m4_ck = {
-	.name		= "dpll_iva_m4_ck",
-	.parent		= &dpll_iva_ck,
-	.clksel		= dpll_iva_m4_div,
+static struct clk dpll_iva_m4x2_ck = {
+	.name		= "dpll_iva_m4x2_ck",
+	.parent		= &dpll_iva_x2_ck,
+	.clksel		= dpll_iva_m4x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M4_DPLL_IVA,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
 	.ops		= &clkops_null,
@@ -655,10 +711,10 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_iva_m5_ck = {
-	.name		= "dpll_iva_m5_ck",
-	.parent		= &dpll_iva_ck,
-	.clksel		= dpll_iva_m4_div,
+static struct clk dpll_iva_m5x2_ck = {
+	.name		= "dpll_iva_m5x2_ck",
+	.parent		= &dpll_iva_x2_ck,
+	.clksel		= dpll_iva_m4x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M5_DPLL_IVA,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
 	.ops		= &clkops_null,
@@ -717,9 +773,10 @@
 
 static struct clk per_hs_clk_div_ck = {
 	.name		= "per_hs_clk_div_ck",
-	.parent		= &dpll_abe_m3_ck,
+	.parent		= &dpll_abe_m3x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 2,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static const struct clksel per_hsd_byp_clk_mux_sel[] = {
@@ -787,29 +844,48 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_per_m2x2_ck = {
-	.name		= "dpll_per_m2x2_ck",
+static struct clk dpll_per_x2_ck = {
+	.name		= "dpll_per_x2_ck",
 	.parent		= &dpll_per_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.recalc		= &omap3_clkoutx2_recalc,
 };
 
-static struct clk dpll_per_m3_ck = {
-	.name		= "dpll_per_m3_ck",
-	.parent		= &dpll_per_ck,
-	.clksel		= dpll_per_m2_div,
-	.clksel_reg	= OMAP4430_CM_DIV_M3_DPLL_PER,
-	.clksel_mask	= OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
+static const struct clksel dpll_per_m2x2_div[] = {
+	{ .parent = &dpll_per_x2_ck, .rates = div31_1to31_rates },
+	{ .parent = NULL },
+};
+
+static struct clk dpll_per_m2x2_ck = {
+	.name		= "dpll_per_m2x2_ck",
+	.parent		= &dpll_per_x2_ck,
+	.clksel		= dpll_per_m2x2_div,
+	.clksel_reg	= OMAP4430_CM_DIV_M2_DPLL_PER,
+	.clksel_mask	= OMAP4430_DPLL_CLKOUT_DIV_MASK,
 	.ops		= &clkops_null,
 	.recalc		= &omap2_clksel_recalc,
 	.round_rate	= &omap2_clksel_round_rate,
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_per_m4_ck = {
-	.name		= "dpll_per_m4_ck",
-	.parent		= &dpll_per_ck,
-	.clksel		= dpll_per_m2_div,
+static struct clk dpll_per_m3x2_ck = {
+	.name		= "dpll_per_m3x2_ck",
+	.parent		= &dpll_per_x2_ck,
+	.clksel		= dpll_per_m2x2_div,
+	.clksel_reg	= OMAP4430_CM_DIV_M3_DPLL_PER,
+	.clksel_mask	= OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
+	.ops		= &clkops_omap2_dflt,
+	.enable_reg	= OMAP4430_CM_DIV_M3_DPLL_PER,
+	.enable_bit	= OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT,
+	.recalc		= &omap2_clksel_recalc,
+	.round_rate	= &omap2_clksel_round_rate,
+	.set_rate	= &omap2_clksel_set_rate,
+};
+
+static struct clk dpll_per_m4x2_ck = {
+	.name		= "dpll_per_m4x2_ck",
+	.parent		= &dpll_per_x2_ck,
+	.clksel		= dpll_per_m2x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M4_DPLL_PER,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
 	.ops		= &clkops_null,
@@ -818,10 +894,10 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_per_m5_ck = {
-	.name		= "dpll_per_m5_ck",
-	.parent		= &dpll_per_ck,
-	.clksel		= dpll_per_m2_div,
+static struct clk dpll_per_m5x2_ck = {
+	.name		= "dpll_per_m5x2_ck",
+	.parent		= &dpll_per_x2_ck,
+	.clksel		= dpll_per_m2x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M5_DPLL_PER,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
 	.ops		= &clkops_null,
@@ -830,10 +906,10 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_per_m6_ck = {
-	.name		= "dpll_per_m6_ck",
-	.parent		= &dpll_per_ck,
-	.clksel		= dpll_per_m2_div,
+static struct clk dpll_per_m6x2_ck = {
+	.name		= "dpll_per_m6x2_ck",
+	.parent		= &dpll_per_x2_ck,
+	.clksel		= dpll_per_m2x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M6_DPLL_PER,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK,
 	.ops		= &clkops_null,
@@ -842,10 +918,10 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
-static struct clk dpll_per_m7_ck = {
-	.name		= "dpll_per_m7_ck",
-	.parent		= &dpll_per_ck,
-	.clksel		= dpll_per_m2_div,
+static struct clk dpll_per_m7x2_ck = {
+	.name		= "dpll_per_m7x2_ck",
+	.parent		= &dpll_per_x2_ck,
+	.clksel		= dpll_per_m2x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M7_DPLL_PER,
 	.clksel_mask	= OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK,
 	.ops		= &clkops_null,
@@ -868,6 +944,7 @@
 	.enable_mask	= OMAP4430_DPLL_EN_MASK,
 	.autoidle_mask	= OMAP4430_AUTO_DPLL_MODE_MASK,
 	.idlest_mask	= OMAP4430_ST_DPLL_CLK_MASK,
+	.sddiv_mask	= OMAP4430_DPLL_SD_DIV_MASK,
 	.max_multiplier	= OMAP4430_MAX_DPLL_MULT,
 	.max_divider	= OMAP4430_MAX_DPLL_DIV,
 	.min_divider	= 1,
@@ -885,14 +962,21 @@
 	.set_rate	= &omap3_noncore_dpll_set_rate,
 };
 
+static struct clk dpll_unipro_x2_ck = {
+	.name		= "dpll_unipro_x2_ck",
+	.parent		= &dpll_unipro_ck,
+	.ops		= &clkops_null,
+	.recalc		= &omap3_clkoutx2_recalc,
+};
+
 static const struct clksel dpll_unipro_m2x2_div[] = {
-	{ .parent = &dpll_unipro_ck, .rates = div31_1to31_rates },
+	{ .parent = &dpll_unipro_x2_ck, .rates = div31_1to31_rates },
 	{ .parent = NULL },
 };
 
 static struct clk dpll_unipro_m2x2_ck = {
 	.name		= "dpll_unipro_m2x2_ck",
-	.parent		= &dpll_unipro_ck,
+	.parent		= &dpll_unipro_x2_ck,
 	.clksel		= dpll_unipro_m2x2_div,
 	.clksel_reg	= OMAP4430_CM_DIV_M2_DPLL_UNIPRO,
 	.clksel_mask	= OMAP4430_DPLL_CLKOUT_DIV_MASK,
@@ -904,16 +988,17 @@
 
 static struct clk usb_hs_clk_div_ck = {
 	.name		= "usb_hs_clk_div_ck",
-	.parent		= &dpll_abe_m3_ck,
+	.parent		= &dpll_abe_m3x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 3,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 /* DPLL_USB */
 static struct dpll_data dpll_usb_dd = {
 	.mult_div1_reg	= OMAP4430_CM_CLKSEL_DPLL_USB,
 	.clk_bypass	= &usb_hs_clk_div_ck,
-	.flags		= DPLL_J_TYPE | DPLL_NO_DCO_SEL,
+	.flags		= DPLL_J_TYPE,
 	.clk_ref	= &sys_clkin_ck,
 	.control_reg	= OMAP4430_CM_CLKMODE_DPLL_USB,
 	.modes		= (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
@@ -967,7 +1052,7 @@
 
 static const struct clksel ducati_clk_mux_sel[] = {
 	{ .parent = &div_core_ck, .rates = div_1_0_rates },
-	{ .parent = &dpll_per_m6_ck, .rates = div_1_1_rates },
+	{ .parent = &dpll_per_m6x2_ck, .rates = div_1_1_rates },
 	{ .parent = NULL },
 };
 
@@ -986,21 +1071,24 @@
 	.name		= "func_12m_fclk",
 	.parent		= &dpll_per_m2x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 16,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static struct clk func_24m_clk = {
 	.name		= "func_24m_clk",
 	.parent		= &dpll_per_m2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 4,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static struct clk func_24mc_fclk = {
 	.name		= "func_24mc_fclk",
 	.parent		= &dpll_per_m2x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 8,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static const struct clksel_rate div2_4to8_rates[] = {
@@ -1030,7 +1118,8 @@
 	.name		= "func_48mc_fclk",
 	.parent		= &dpll_per_m2x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 4,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static const struct clksel_rate div2_2to4_rates[] = {
@@ -1040,13 +1129,13 @@
 };
 
 static const struct clksel func_64m_fclk_div[] = {
-	{ .parent = &dpll_per_m4_ck, .rates = div2_2to4_rates },
+	{ .parent = &dpll_per_m4x2_ck, .rates = div2_2to4_rates },
 	{ .parent = NULL },
 };
 
 static struct clk func_64m_fclk = {
 	.name		= "func_64m_fclk",
-	.parent		= &dpll_per_m4_ck,
+	.parent		= &dpll_per_m4x2_ck,
 	.clksel		= func_64m_fclk_div,
 	.clksel_reg	= OMAP4430_CM_SCALE_FCLK,
 	.clksel_mask	= OMAP4430_SCALE_FCLK_MASK,
@@ -1147,7 +1236,8 @@
 	.name		= "lp_clk_div_ck",
 	.parent		= &dpll_abe_m2x2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 16,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static const struct clksel l4_wkup_clk_mux_sel[] = {
@@ -1215,12 +1305,13 @@
 	.name		= "per_abe_24m_fclk",
 	.parent		= &dpll_abe_m2_ck,
 	.ops		= &clkops_null,
-	.recalc		= &followparent_recalc,
+	.fixed_div	= 4,
+	.recalc		= &omap_fixed_divisor_recalc,
 };
 
 static const struct clksel pmd_stm_clock_mux_sel[] = {
 	{ .parent = &sys_clkin_ck, .rates = div_1_0_rates },
-	{ .parent = &dpll_core_m6_ck, .rates = div_1_1_rates },
+	{ .parent = &dpll_core_m6x2_ck, .rates = div_1_1_rates },
 	{ .parent = &tie_low_clock_ck, .rates = div_1_2_rates },
 	{ .parent = NULL },
 };
@@ -1354,7 +1445,7 @@
 	.enable_reg	= OMAP4430_CM_TESLA_TESLA_CLKCTRL,
 	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL,
 	.clkdm_name	= "tesla_clkdm",
-	.parent		= &dpll_iva_m4_ck,
+	.parent		= &dpll_iva_m4x2_ck,
 	.recalc		= &followparent_recalc,
 };
 
@@ -1384,7 +1475,7 @@
 	.enable_reg	= OMAP4430_CM_DSS_DSS_CLKCTRL,
 	.enable_bit	= OMAP4430_OPTFCLKEN_DSSCLK_SHIFT,
 	.clkdm_name	= "l3_dss_clkdm",
-	.parent		= &dpll_per_m5_ck,
+	.parent		= &dpll_per_m5x2_ck,
 	.recalc		= &followparent_recalc,
 };
 
@@ -1441,14 +1532,14 @@
 };
 
 static const struct clksel fdif_fclk_div[] = {
-	{ .parent = &dpll_per_m4_ck, .rates = div3_1to4_rates },
+	{ .parent = &dpll_per_m4x2_ck, .rates = div3_1to4_rates },
 	{ .parent = NULL },
 };
 
 /* Merged fdif_fclk into fdif */
 static struct clk fdif_fck = {
 	.name		= "fdif_fck",
-	.parent		= &dpll_per_m4_ck,
+	.parent		= &dpll_per_m4x2_ck,
 	.clksel		= fdif_fclk_div,
 	.clksel_reg	= OMAP4430_CM_CAM_FDIF_CLKCTRL,
 	.clksel_mask	= OMAP4430_CLKSEL_FCLK_MASK,
@@ -1602,15 +1693,15 @@
 };
 
 static const struct clksel sgx_clk_mux_sel[] = {
-	{ .parent = &dpll_core_m7_ck, .rates = div_1_0_rates },
-	{ .parent = &dpll_per_m7_ck, .rates = div_1_1_rates },
+	{ .parent = &dpll_core_m7x2_ck, .rates = div_1_0_rates },
+	{ .parent = &dpll_per_m7x2_ck, .rates = div_1_1_rates },
 	{ .parent = NULL },
 };
 
 /* Merged sgx_clk_mux into gpu */
 static struct clk gpu_fck = {
 	.name		= "gpu_fck",
-	.parent		= &dpll_core_m7_ck,
+	.parent		= &dpll_core_m7x2_ck,
 	.clksel		= sgx_clk_mux_sel,
 	.init		= &omap2_init_clksel_parent,
 	.clksel_reg	= OMAP4430_CM_GFX_GFX_CLKCTRL,
@@ -1729,7 +1820,7 @@
 	.enable_reg	= OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
 	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL,
 	.clkdm_name	= "ivahd_clkdm",
-	.parent		= &dpll_iva_m5_ck,
+	.parent		= &dpll_iva_m5x2_ck,
 	.recalc		= &followparent_recalc,
 };
 
@@ -1749,6 +1840,7 @@
 	.enable_reg	= OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
 	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL,
 	.clkdm_name	= "l3_instr_clkdm",
+	.flags		= ENABLE_ON_INIT,
 	.parent		= &l3_div_ck,
 	.recalc		= &followparent_recalc,
 };
@@ -1759,6 +1851,7 @@
 	.enable_reg	= OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
 	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL,
 	.clkdm_name	= "l3_instr_clkdm",
+	.flags		= ENABLE_ON_INIT,
 	.parent		= &l3_div_ck,
 	.recalc		= &followparent_recalc,
 };
@@ -2063,6 +2156,7 @@
 	.enable_reg	= OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
 	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL,
 	.clkdm_name	= "l3_instr_clkdm",
+	.flags		= ENABLE_ON_INIT,
 	.parent		= &l3_div_ck,
 	.recalc		= &followparent_recalc,
 };
@@ -2093,7 +2187,7 @@
 	.enable_reg	= OMAP4430_CM_IVAHD_SL2_CLKCTRL,
 	.enable_bit	= OMAP4430_MODULEMODE_HWCTRL,
 	.clkdm_name	= "ivahd_clkdm",
-	.parent		= &dpll_iva_m5_ck,
+	.parent		= &dpll_iva_m5x2_ck,
 	.recalc		= &followparent_recalc,
 };
 
@@ -2438,36 +2532,6 @@
 	.recalc		= &followparent_recalc,
 };
 
-static struct clk usb_host_hs_utmi_p3_clk = {
-	.name		= "usb_host_hs_utmi_p3_clk",
-	.ops		= &clkops_omap2_dflt,
-	.enable_reg	= OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-	.enable_bit	= OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT,
-	.clkdm_name	= "l3_init_clkdm",
-	.parent		= &init_60m_fclk,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk usb_host_hs_hsic60m_p1_clk = {
-	.name		= "usb_host_hs_hsic60m_p1_clk",
-	.ops		= &clkops_omap2_dflt,
-	.enable_reg	= OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-	.enable_bit	= OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT,
-	.clkdm_name	= "l3_init_clkdm",
-	.parent		= &init_60m_fclk,
-	.recalc		= &followparent_recalc,
-};
-
-static struct clk usb_host_hs_hsic60m_p2_clk = {
-	.name		= "usb_host_hs_hsic60m_p2_clk",
-	.ops		= &clkops_omap2_dflt,
-	.enable_reg	= OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
-	.enable_bit	= OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT,
-	.clkdm_name	= "l3_init_clkdm",
-	.parent		= &init_60m_fclk,
-	.recalc		= &followparent_recalc,
-};
-
 static const struct clksel utmi_p1_gfclk_sel[] = {
 	{ .parent = &init_60m_fclk, .rates = div_1_0_rates },
 	{ .parent = &xclk60mhsp1_ck, .rates = div_1_1_rates },
@@ -2522,6 +2586,16 @@
 	.recalc		= &followparent_recalc,
 };
 
+static struct clk usb_host_hs_utmi_p3_clk = {
+	.name		= "usb_host_hs_utmi_p3_clk",
+	.ops		= &clkops_omap2_dflt,
+	.enable_reg	= OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+	.enable_bit	= OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT,
+	.clkdm_name	= "l3_init_clkdm",
+	.parent		= &init_60m_fclk,
+	.recalc		= &followparent_recalc,
+};
+
 static struct clk usb_host_hs_hsic480m_p1_clk = {
 	.name		= "usb_host_hs_hsic480m_p1_clk",
 	.ops		= &clkops_omap2_dflt,
@@ -2532,6 +2606,26 @@
 	.recalc		= &followparent_recalc,
 };
 
+static struct clk usb_host_hs_hsic60m_p1_clk = {
+	.name		= "usb_host_hs_hsic60m_p1_clk",
+	.ops		= &clkops_omap2_dflt,
+	.enable_reg	= OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+	.enable_bit	= OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT,
+	.clkdm_name	= "l3_init_clkdm",
+	.parent		= &init_60m_fclk,
+	.recalc		= &followparent_recalc,
+};
+
+static struct clk usb_host_hs_hsic60m_p2_clk = {
+	.name		= "usb_host_hs_hsic60m_p2_clk",
+	.ops		= &clkops_omap2_dflt,
+	.enable_reg	= OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+	.enable_bit	= OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT,
+	.clkdm_name	= "l3_init_clkdm",
+	.parent		= &init_60m_fclk,
+	.recalc		= &followparent_recalc,
+};
+
 static struct clk usb_host_hs_hsic480m_p2_clk = {
 	.name		= "usb_host_hs_hsic480m_p2_clk",
 	.ops		= &clkops_omap2_dflt,
@@ -2656,13 +2750,13 @@
 };
 
 static const struct clksel usim_fclk_div[] = {
-	{ .parent = &dpll_per_m4_ck, .rates = div2_14to18_rates },
+	{ .parent = &dpll_per_m4x2_ck, .rates = div2_14to18_rates },
 	{ .parent = NULL },
 };
 
 static struct clk usim_ck = {
 	.name		= "usim_ck",
-	.parent		= &dpll_per_m4_ck,
+	.parent		= &dpll_per_m4x2_ck,
 	.clksel		= usim_fclk_div,
 	.clksel_reg	= OMAP4430_CM_WKUP_USIM_CLKCTRL,
 	.clksel_mask	= OMAP4430_CLKSEL_DIV_MASK,
@@ -2747,6 +2841,168 @@
 	.set_rate	= &omap2_clksel_set_rate,
 };
 
+/* SCRM aux clk nodes */
+
+static const struct clksel auxclk_sel[] = {
+	{ .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+	{ .parent = &dpll_core_m3x2_ck, .rates = div_1_1_rates },
+	{ .parent = &dpll_per_m3x2_ck, .rates = div_1_2_rates },
+	{ .parent = NULL },
+};
+
+static struct clk auxclk0_ck = {
+	.name		= "auxclk0_ck",
+	.parent		= &sys_clkin_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_omap2_dflt,
+	.clksel		= auxclk_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLK0,
+	.clksel_mask	= OMAP4_SRCSELECT_MASK,
+	.recalc		= &omap2_clksel_recalc,
+	.enable_reg	= OMAP4_SCRM_AUXCLK0,
+	.enable_bit	= OMAP4_ENABLE_SHIFT,
+};
+
+static struct clk auxclk1_ck = {
+	.name		= "auxclk1_ck",
+	.parent		= &sys_clkin_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_omap2_dflt,
+	.clksel		= auxclk_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLK1,
+	.clksel_mask	= OMAP4_SRCSELECT_MASK,
+	.recalc		= &omap2_clksel_recalc,
+	.enable_reg	= OMAP4_SCRM_AUXCLK1,
+	.enable_bit	= OMAP4_ENABLE_SHIFT,
+};
+
+static struct clk auxclk2_ck = {
+	.name		= "auxclk2_ck",
+	.parent		= &sys_clkin_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_omap2_dflt,
+	.clksel		= auxclk_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLK2,
+	.clksel_mask	= OMAP4_SRCSELECT_MASK,
+	.recalc		= &omap2_clksel_recalc,
+	.enable_reg	= OMAP4_SCRM_AUXCLK2,
+	.enable_bit	= OMAP4_ENABLE_SHIFT,
+};
+static struct clk auxclk3_ck = {
+	.name		= "auxclk3_ck",
+	.parent		= &sys_clkin_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_omap2_dflt,
+	.clksel		= auxclk_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLK3,
+	.clksel_mask	= OMAP4_SRCSELECT_MASK,
+	.recalc		= &omap2_clksel_recalc,
+	.enable_reg	= OMAP4_SCRM_AUXCLK3,
+	.enable_bit	= OMAP4_ENABLE_SHIFT,
+};
+
+static struct clk auxclk4_ck = {
+	.name		= "auxclk4_ck",
+	.parent		= &sys_clkin_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_omap2_dflt,
+	.clksel		= auxclk_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLK4,
+	.clksel_mask	= OMAP4_SRCSELECT_MASK,
+	.recalc		= &omap2_clksel_recalc,
+	.enable_reg	= OMAP4_SCRM_AUXCLK4,
+	.enable_bit	= OMAP4_ENABLE_SHIFT,
+};
+
+static struct clk auxclk5_ck = {
+	.name		= "auxclk5_ck",
+	.parent		= &sys_clkin_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_omap2_dflt,
+	.clksel		= auxclk_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLK5,
+	.clksel_mask	= OMAP4_SRCSELECT_MASK,
+	.recalc		= &omap2_clksel_recalc,
+	.enable_reg	= OMAP4_SCRM_AUXCLK5,
+	.enable_bit	= OMAP4_ENABLE_SHIFT,
+};
+
+static const struct clksel auxclkreq_sel[] = {
+	{ .parent = &auxclk0_ck, .rates = div_1_0_rates },
+	{ .parent = &auxclk1_ck, .rates = div_1_1_rates },
+	{ .parent = &auxclk2_ck, .rates = div_1_2_rates },
+	{ .parent = &auxclk3_ck, .rates = div_1_3_rates },
+	{ .parent = &auxclk4_ck, .rates = div_1_4_rates },
+	{ .parent = &auxclk5_ck, .rates = div_1_5_rates },
+	{ .parent = NULL },
+};
+
+static struct clk auxclkreq0_ck = {
+	.name		= "auxclkreq0_ck",
+	.parent		= &auxclk0_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_null,
+	.clksel         = auxclkreq_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLKREQ0,
+	.clksel_mask	= OMAP4_MAPPING_MASK,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk auxclkreq1_ck = {
+	.name		= "auxclkreq1_ck",
+	.parent		= &auxclk1_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_null,
+	.clksel         = auxclkreq_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLKREQ1,
+	.clksel_mask	= OMAP4_MAPPING_MASK,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk auxclkreq2_ck = {
+	.name		= "auxclkreq2_ck",
+	.parent		= &auxclk2_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_null,
+	.clksel         = auxclkreq_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLKREQ2,
+	.clksel_mask	= OMAP4_MAPPING_MASK,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk auxclkreq3_ck = {
+	.name		= "auxclkreq3_ck",
+	.parent		= &auxclk3_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_null,
+	.clksel         = auxclkreq_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLKREQ3,
+	.clksel_mask	= OMAP4_MAPPING_MASK,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk auxclkreq4_ck = {
+	.name		= "auxclkreq4_ck",
+	.parent		= &auxclk4_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_null,
+	.clksel         = auxclkreq_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLKREQ4,
+	.clksel_mask	= OMAP4_MAPPING_MASK,
+	.recalc		= &omap2_clksel_recalc,
+};
+
+static struct clk auxclkreq5_ck = {
+	.name		= "auxclkreq5_ck",
+	.parent		= &auxclk5_ck,
+	.init		= &omap2_init_clksel_parent,
+	.ops		= &clkops_null,
+	.clksel         = auxclkreq_sel,
+	.clksel_reg	= OMAP4_SCRM_AUXCLKREQ5,
+	.clksel_mask	= OMAP4_MAPPING_MASK,
+	.recalc		= &omap2_clksel_recalc,
+};
+
 /*
  * clkdev
  */
@@ -2774,43 +3030,48 @@
 	CLK(NULL,	"abe_dpll_bypass_clk_mux_ck",	&abe_dpll_bypass_clk_mux_ck,	CK_443X),
 	CLK(NULL,	"abe_dpll_refclk_mux_ck",	&abe_dpll_refclk_mux_ck,	CK_443X),
 	CLK(NULL,	"dpll_abe_ck",			&dpll_abe_ck,	CK_443X),
+	CLK(NULL,	"dpll_abe_x2_ck",		&dpll_abe_x2_ck,	CK_443X),
 	CLK(NULL,	"dpll_abe_m2x2_ck",		&dpll_abe_m2x2_ck,	CK_443X),
 	CLK(NULL,	"abe_24m_fclk",			&abe_24m_fclk,	CK_443X),
 	CLK(NULL,	"abe_clk",			&abe_clk,	CK_443X),
 	CLK(NULL,	"aess_fclk",			&aess_fclk,	CK_443X),
-	CLK(NULL,	"dpll_abe_m3_ck",		&dpll_abe_m3_ck,	CK_443X),
+	CLK(NULL,	"dpll_abe_m3x2_ck",		&dpll_abe_m3x2_ck,	CK_443X),
 	CLK(NULL,	"core_hsd_byp_clk_mux_ck",	&core_hsd_byp_clk_mux_ck,	CK_443X),
 	CLK(NULL,	"dpll_core_ck",			&dpll_core_ck,	CK_443X),
-	CLK(NULL,	"dpll_core_m6_ck",		&dpll_core_m6_ck,	CK_443X),
+	CLK(NULL,	"dpll_core_x2_ck",		&dpll_core_x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_core_m6x2_ck",		&dpll_core_m6x2_ck,	CK_443X),
 	CLK(NULL,	"dbgclk_mux_ck",		&dbgclk_mux_ck,	CK_443X),
 	CLK(NULL,	"dpll_core_m2_ck",		&dpll_core_m2_ck,	CK_443X),
 	CLK(NULL,	"ddrphy_ck",			&ddrphy_ck,	CK_443X),
-	CLK(NULL,	"dpll_core_m5_ck",		&dpll_core_m5_ck,	CK_443X),
+	CLK(NULL,	"dpll_core_m5x2_ck",		&dpll_core_m5x2_ck,	CK_443X),
 	CLK(NULL,	"div_core_ck",			&div_core_ck,	CK_443X),
 	CLK(NULL,	"div_iva_hs_clk",		&div_iva_hs_clk,	CK_443X),
 	CLK(NULL,	"div_mpu_hs_clk",		&div_mpu_hs_clk,	CK_443X),
-	CLK(NULL,	"dpll_core_m4_ck",		&dpll_core_m4_ck,	CK_443X),
+	CLK(NULL,	"dpll_core_m4x2_ck",		&dpll_core_m4x2_ck,	CK_443X),
 	CLK(NULL,	"dll_clk_div_ck",		&dll_clk_div_ck,	CK_443X),
 	CLK(NULL,	"dpll_abe_m2_ck",		&dpll_abe_m2_ck,	CK_443X),
-	CLK(NULL,	"dpll_core_m3_ck",		&dpll_core_m3_ck,	CK_443X),
-	CLK(NULL,	"dpll_core_m7_ck",		&dpll_core_m7_ck,	CK_443X),
+	CLK(NULL,	"dpll_core_m3x2_ck",		&dpll_core_m3x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_core_m7x2_ck",		&dpll_core_m7x2_ck,	CK_443X),
 	CLK(NULL,	"iva_hsd_byp_clk_mux_ck",	&iva_hsd_byp_clk_mux_ck,	CK_443X),
 	CLK(NULL,	"dpll_iva_ck",			&dpll_iva_ck,	CK_443X),
-	CLK(NULL,	"dpll_iva_m4_ck",		&dpll_iva_m4_ck,	CK_443X),
-	CLK(NULL,	"dpll_iva_m5_ck",		&dpll_iva_m5_ck,	CK_443X),
+	CLK(NULL,	"dpll_iva_x2_ck",		&dpll_iva_x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_iva_m4x2_ck",		&dpll_iva_m4x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_iva_m5x2_ck",		&dpll_iva_m5x2_ck,	CK_443X),
 	CLK(NULL,	"dpll_mpu_ck",			&dpll_mpu_ck,	CK_443X),
 	CLK(NULL,	"dpll_mpu_m2_ck",		&dpll_mpu_m2_ck,	CK_443X),
 	CLK(NULL,	"per_hs_clk_div_ck",		&per_hs_clk_div_ck,	CK_443X),
 	CLK(NULL,	"per_hsd_byp_clk_mux_ck",	&per_hsd_byp_clk_mux_ck,	CK_443X),
 	CLK(NULL,	"dpll_per_ck",			&dpll_per_ck,	CK_443X),
 	CLK(NULL,	"dpll_per_m2_ck",		&dpll_per_m2_ck,	CK_443X),
+	CLK(NULL,	"dpll_per_x2_ck",		&dpll_per_x2_ck,	CK_443X),
 	CLK(NULL,	"dpll_per_m2x2_ck",		&dpll_per_m2x2_ck,	CK_443X),
-	CLK(NULL,	"dpll_per_m3_ck",		&dpll_per_m3_ck,	CK_443X),
-	CLK(NULL,	"dpll_per_m4_ck",		&dpll_per_m4_ck,	CK_443X),
-	CLK(NULL,	"dpll_per_m5_ck",		&dpll_per_m5_ck,	CK_443X),
-	CLK(NULL,	"dpll_per_m6_ck",		&dpll_per_m6_ck,	CK_443X),
-	CLK(NULL,	"dpll_per_m7_ck",		&dpll_per_m7_ck,	CK_443X),
+	CLK(NULL,	"dpll_per_m3x2_ck",		&dpll_per_m3x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_per_m4x2_ck",		&dpll_per_m4x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_per_m5x2_ck",		&dpll_per_m5x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_per_m6x2_ck",		&dpll_per_m6x2_ck,	CK_443X),
+	CLK(NULL,	"dpll_per_m7x2_ck",		&dpll_per_m7x2_ck,	CK_443X),
 	CLK(NULL,	"dpll_unipro_ck",		&dpll_unipro_ck,	CK_443X),
+	CLK(NULL,	"dpll_unipro_x2_ck",		&dpll_unipro_x2_ck,	CK_443X),
 	CLK(NULL,	"dpll_unipro_m2x2_ck",		&dpll_unipro_m2x2_ck,	CK_443X),
 	CLK(NULL,	"usb_hs_clk_div_ck",		&usb_hs_clk_div_ck,	CK_443X),
 	CLK(NULL,	"dpll_usb_ck",			&dpll_usb_ck,	CK_443X),
@@ -2856,26 +3117,26 @@
 	CLK(NULL,	"emif2_fck",			&emif2_fck,	CK_443X),
 	CLK(NULL,	"fdif_fck",			&fdif_fck,	CK_443X),
 	CLK(NULL,	"fpka_fck",			&fpka_fck,	CK_443X),
-	CLK(NULL,	"gpio1_dbck",			&gpio1_dbclk,	CK_443X),
+	CLK(NULL,	"gpio1_dbclk",			&gpio1_dbclk,	CK_443X),
 	CLK(NULL,	"gpio1_ick",			&gpio1_ick,	CK_443X),
-	CLK(NULL,	"gpio2_dbck",			&gpio2_dbclk,	CK_443X),
+	CLK(NULL,	"gpio2_dbclk",			&gpio2_dbclk,	CK_443X),
 	CLK(NULL,	"gpio2_ick",			&gpio2_ick,	CK_443X),
-	CLK(NULL,	"gpio3_dbck",			&gpio3_dbclk,	CK_443X),
+	CLK(NULL,	"gpio3_dbclk",			&gpio3_dbclk,	CK_443X),
 	CLK(NULL,	"gpio3_ick",			&gpio3_ick,	CK_443X),
-	CLK(NULL,	"gpio4_dbck",			&gpio4_dbclk,	CK_443X),
+	CLK(NULL,	"gpio4_dbclk",			&gpio4_dbclk,	CK_443X),
 	CLK(NULL,	"gpio4_ick",			&gpio4_ick,	CK_443X),
-	CLK(NULL,	"gpio5_dbck",			&gpio5_dbclk,	CK_443X),
+	CLK(NULL,	"gpio5_dbclk",			&gpio5_dbclk,	CK_443X),
 	CLK(NULL,	"gpio5_ick",			&gpio5_ick,	CK_443X),
-	CLK(NULL,	"gpio6_dbck",			&gpio6_dbclk,	CK_443X),
+	CLK(NULL,	"gpio6_dbclk",			&gpio6_dbclk,	CK_443X),
 	CLK(NULL,	"gpio6_ick",			&gpio6_ick,	CK_443X),
 	CLK(NULL,	"gpmc_ick",			&gpmc_ick,	CK_443X),
 	CLK(NULL,	"gpu_fck",			&gpu_fck,	CK_443X),
 	CLK("omap2_hdq.0",	"fck",				&hdq1w_fck,	CK_443X),
 	CLK(NULL,	"hsi_fck",			&hsi_fck,	CK_443X),
-	CLK("i2c_omap.1",	"fck",				&i2c1_fck,	CK_443X),
-	CLK("i2c_omap.2",	"fck",				&i2c2_fck,	CK_443X),
-	CLK("i2c_omap.3",	"fck",				&i2c3_fck,	CK_443X),
-	CLK("i2c_omap.4",	"fck",				&i2c4_fck,	CK_443X),
+	CLK("omap_i2c.1",	"fck",				&i2c1_fck,	CK_443X),
+	CLK("omap_i2c.2",	"fck",				&i2c2_fck,	CK_443X),
+	CLK("omap_i2c.3",	"fck",				&i2c3_fck,	CK_443X),
+	CLK("omap_i2c.4",	"fck",				&i2c4_fck,	CK_443X),
 	CLK(NULL,	"ipu_fck",			&ipu_fck,	CK_443X),
 	CLK(NULL,	"iss_ctrlclk",			&iss_ctrlclk,	CK_443X),
 	CLK(NULL,	"iss_fck",			&iss_fck,	CK_443X),
@@ -2937,29 +3198,35 @@
 	CLK(NULL,	"uart3_fck",			&uart3_fck,	CK_443X),
 	CLK(NULL,	"uart4_fck",			&uart4_fck,	CK_443X),
 	CLK(NULL,	"usb_host_fs_fck",		&usb_host_fs_fck,	CK_443X),
-	CLK(NULL,	"usb_host_hs_utmi_p3_clk",	&usb_host_hs_utmi_p3_clk,	CK_443X),
-	CLK(NULL,	"usb_host_hs_hsic60m_p1_clk",	&usb_host_hs_hsic60m_p1_clk,	CK_443X),
-	CLK(NULL,	"usb_host_hs_hsic60m_p2_clk",	&usb_host_hs_hsic60m_p2_clk,	CK_443X),
+	CLK("ehci-omap.0",	"fs_fck",		&usb_host_fs_fck,	CK_443X),
 	CLK(NULL,	"utmi_p1_gfclk",		&utmi_p1_gfclk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_utmi_p1_clk",	&usb_host_hs_utmi_p1_clk,	CK_443X),
 	CLK(NULL,	"utmi_p2_gfclk",		&utmi_p2_gfclk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_utmi_p2_clk",	&usb_host_hs_utmi_p2_clk,	CK_443X),
+	CLK(NULL,	"usb_host_hs_utmi_p3_clk",	&usb_host_hs_utmi_p3_clk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_hsic480m_p1_clk",	&usb_host_hs_hsic480m_p1_clk,	CK_443X),
+	CLK(NULL,	"usb_host_hs_hsic60m_p1_clk",	&usb_host_hs_hsic60m_p1_clk,	CK_443X),
+	CLK(NULL,	"usb_host_hs_hsic60m_p2_clk",	&usb_host_hs_hsic60m_p2_clk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_hsic480m_p2_clk",	&usb_host_hs_hsic480m_p2_clk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_func48mclk",	&usb_host_hs_func48mclk,	CK_443X),
 	CLK(NULL,	"usb_host_hs_fck",		&usb_host_hs_fck,	CK_443X),
+	CLK("ehci-omap.0",	"hs_fck",		&usb_host_hs_fck,	CK_443X),
+	CLK("ehci-omap.0",	"usbhost_ick",		&dummy_ck,		CK_443X),
 	CLK(NULL,	"otg_60m_gfclk",		&otg_60m_gfclk,	CK_443X),
 	CLK(NULL,	"usb_otg_hs_xclk",		&usb_otg_hs_xclk,	CK_443X),
-	CLK("musb_hdrc",	"ick",				&usb_otg_hs_ick,	CK_443X),
+	CLK("musb-omap2430",	"ick",				&usb_otg_hs_ick,	CK_443X),
 	CLK(NULL,	"usb_phy_cm_clk32k",		&usb_phy_cm_clk32k,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_usb_ch2_clk",	&usb_tll_hs_usb_ch2_clk,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_usb_ch0_clk",	&usb_tll_hs_usb_ch0_clk,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_usb_ch1_clk",	&usb_tll_hs_usb_ch1_clk,	CK_443X),
 	CLK(NULL,	"usb_tll_hs_ick",		&usb_tll_hs_ick,	CK_443X),
+	CLK("ehci-omap.0",	"usbtll_ick",		&usb_tll_hs_ick,	CK_443X),
+	CLK("ehci-omap.0",	"usbtll_fck",		&dummy_ck,	CK_443X),
 	CLK(NULL,	"usim_ck",			&usim_ck,	CK_443X),
 	CLK(NULL,	"usim_fclk",			&usim_fclk,	CK_443X),
 	CLK(NULL,	"usim_fck",			&usim_fck,	CK_443X),
 	CLK("omap_wdt",	"fck",				&wd_timer2_fck,	CK_443X),
+	CLK(NULL,	"mailboxes_ick",		&dummy_ck,	CK_443X),
 	CLK(NULL,	"wd_timer3_fck",		&wd_timer3_fck,	CK_443X),
 	CLK(NULL,	"stm_clk_div_ck",		&stm_clk_div_ck,	CK_443X),
 	CLK(NULL,	"trace_clk_div_ck",		&trace_clk_div_ck,	CK_443X),
@@ -2975,10 +3242,10 @@
 	CLK(NULL,	"gpt9_ick",			&dummy_ck,	CK_443X),
 	CLK(NULL,	"gpt10_ick",			&dummy_ck,	CK_443X),
 	CLK(NULL,	"gpt11_ick",			&dummy_ck,	CK_443X),
-	CLK("i2c_omap.1",	"ick",				&dummy_ck,	CK_443X),
-	CLK("i2c_omap.2",	"ick",				&dummy_ck,	CK_443X),
-	CLK("i2c_omap.3",	"ick",				&dummy_ck,	CK_443X),
-	CLK("i2c_omap.4",	"ick",				&dummy_ck,	CK_443X),
+	CLK("omap_i2c.1",	"ick",				&dummy_ck,	CK_443X),
+	CLK("omap_i2c.2",	"ick",				&dummy_ck,	CK_443X),
+	CLK("omap_i2c.3",	"ick",				&dummy_ck,	CK_443X),
+	CLK("omap_i2c.4",	"ick",				&dummy_ck,	CK_443X),
 	CLK("mmci-omap-hs.0",	"ick",				&dummy_ck,	CK_443X),
 	CLK("mmci-omap-hs.1",	"ick",				&dummy_ck,	CK_443X),
 	CLK("mmci-omap-hs.2",	"ick",				&dummy_ck,	CK_443X),
@@ -2997,6 +3264,18 @@
 	CLK(NULL,	"uart3_ick",			&dummy_ck,	CK_443X),
 	CLK(NULL,	"uart4_ick",			&dummy_ck,	CK_443X),
 	CLK("omap_wdt",	"ick",				&dummy_ck,	CK_443X),
+	CLK(NULL,	"auxclk0_ck",			&auxclk0_ck,	CK_443X),
+	CLK(NULL,	"auxclk1_ck",			&auxclk1_ck,	CK_443X),
+	CLK(NULL,	"auxclk2_ck",			&auxclk2_ck,	CK_443X),
+	CLK(NULL,	"auxclk3_ck",			&auxclk3_ck,	CK_443X),
+	CLK(NULL,	"auxclk4_ck",			&auxclk4_ck,	CK_443X),
+	CLK(NULL,	"auxclk5_ck",			&auxclk5_ck,	CK_443X),
+	CLK(NULL,	"auxclkreq0_ck",		&auxclkreq0_ck,	CK_443X),
+	CLK(NULL,	"auxclkreq1_ck",		&auxclkreq1_ck,	CK_443X),
+	CLK(NULL,	"auxclkreq2_ck",		&auxclkreq2_ck,	CK_443X),
+	CLK(NULL,	"auxclkreq3_ck",		&auxclkreq3_ck,	CK_443X),
+	CLK(NULL,	"auxclkreq4_ck",		&auxclkreq4_ck,	CK_443X),
+	CLK(NULL,	"auxclkreq5_ck",		&auxclkreq5_ck,	CK_443X),
 };
 
 int __init omap4xxx_clk_init(void)
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 6fb61b1..e20b986 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -13,7 +13,6 @@
  */
 #undef DEBUG
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/list.h>
@@ -27,13 +26,16 @@
 
 #include <linux/bitops.h>
 
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
+#include "cm-regbits-24xx.h"
+#include "cminst44xx.h"
+#include "prcm44xx.h"
 
 #include <plat/clock.h>
-#include <plat/powerdomain.h>
-#include <plat/clockdomain.h>
+#include "powerdomain.h"
+#include "clockdomain.h"
 #include <plat/prcm.h>
 
 /* clkdm_list contains all registered struct clockdomains */
@@ -141,6 +143,9 @@
  * clockdomain is in hardware-supervised mode.	Meant to be called
  * once at clockdomain layer initialization, since these should remain
  * fixed for a particular architecture.  No return value.
+ *
+ * XXX autodeps are deprecated and should be removed at the earliest
+ * opportunity
  */
 static void _autodep_lookup(struct clkdm_autodep *autodep)
 {
@@ -168,6 +173,9 @@
  * Add the "autodep" sleep & wakeup dependencies to clockdomain 'clkdm'
  * in hardware-supervised mode.  Meant to be called from clock framework
  * when a clock inside clockdomain 'clkdm' is enabled.	No return value.
+ *
+ * XXX autodeps are deprecated and should be removed at the earliest
+ * opportunity
  */
 static void _clkdm_add_autodeps(struct clockdomain *clkdm)
 {
@@ -199,6 +207,9 @@
  * Remove the "autodep" sleep & wakeup dependencies from clockdomain 'clkdm'
  * in hardware-supervised mode.  Meant to be called from clock framework
  * when a clock inside clockdomain 'clkdm' is disabled.  No return value.
+ *
+ * XXX autodeps are deprecated and should be removed at the earliest
+ * opportunity
  */
 static void _clkdm_del_autodeps(struct clockdomain *clkdm)
 {
@@ -223,39 +234,56 @@
 	}
 }
 
-/*
- * _omap2_clkdm_set_hwsup - set the hwsup idle transition bit
+/**
+ * _enable_hwsup - place a clockdomain into hardware-supervised idle
  * @clkdm: struct clockdomain *
- * @enable: int 0 to disable, 1 to enable
  *
- * Internal helper for actually switching the bit that controls hwsup
- * idle transitions for clkdm.
+ * Place the clockdomain into hardware-supervised idle mode.  No return
+ * value.
+ *
+ * XXX Should this return an error if the clockdomain does not support
+ * hardware-supervised idle mode?
  */
-static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable)
+static void _enable_hwsup(struct clockdomain *clkdm)
 {
-	u32 bits, v;
-
-	if (cpu_is_omap24xx()) {
-		if (enable)
-			bits = OMAP24XX_CLKSTCTRL_ENABLE_AUTO;
-		else
-			bits = OMAP24XX_CLKSTCTRL_DISABLE_AUTO;
-	} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
-		if (enable)
-			bits = OMAP34XX_CLKSTCTRL_ENABLE_AUTO;
-		else
-			bits = OMAP34XX_CLKSTCTRL_DISABLE_AUTO;
-	} else {
+	if (cpu_is_omap24xx())
+		omap2xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+					       clkdm->clktrctrl_mask);
+	else if (cpu_is_omap34xx())
+		omap3xxx_cm_clkdm_enable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+					       clkdm->clktrctrl_mask);
+	else if (cpu_is_omap44xx())
+		return omap4_cminst_clkdm_enable_hwsup(clkdm->prcm_partition,
+						       clkdm->cm_inst,
+						       clkdm->clkdm_offs);
+	else
 		BUG();
-	}
+}
 
-	bits = bits << __ffs(clkdm->clktrctrl_mask);
-
-	v = __raw_readl(clkdm->clkstctrl_reg);
-	v &= ~(clkdm->clktrctrl_mask);
-	v |= bits;
-	__raw_writel(v, clkdm->clkstctrl_reg);
-
+/**
+ * _disable_hwsup - place a clockdomain into software-supervised idle
+ * @clkdm: struct clockdomain *
+ *
+ * Place the clockdomain @clkdm into software-supervised idle mode.
+ * No return value.
+ *
+ * XXX Should this return an error if the clockdomain does not support
+ * software-supervised idle mode?
+ */
+static void _disable_hwsup(struct clockdomain *clkdm)
+{
+	if (cpu_is_omap24xx())
+		omap2xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+						clkdm->clktrctrl_mask);
+	else if (cpu_is_omap34xx())
+		omap3xxx_cm_clkdm_disable_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+						clkdm->clktrctrl_mask);
+	else if (cpu_is_omap44xx())
+		return omap4_cminst_clkdm_disable_hwsup(clkdm->prcm_partition,
+							clkdm->cm_inst,
+							clkdm->clkdm_offs);
+	else
+		BUG();
 }
 
 /* Public functions */
@@ -409,7 +437,7 @@
 		pr_debug("clockdomain: hardware will wake up %s when %s wakes "
 			 "up\n", clkdm1->name, clkdm2->name);
 
-		prm_set_mod_reg_bits((1 << clkdm2->dep_bit),
+		omap2_prm_set_mod_reg_bits((1 << clkdm2->dep_bit),
 				     clkdm1->pwrdm.ptr->prcm_offs, PM_WKDEP);
 	}
 
@@ -444,7 +472,7 @@
 		pr_debug("clockdomain: hardware will no longer wake up %s "
 			 "after %s wakes up\n", clkdm1->name, clkdm2->name);
 
-		prm_clear_mod_reg_bits((1 << clkdm2->dep_bit),
+		omap2_prm_clear_mod_reg_bits((1 << clkdm2->dep_bit),
 				       clkdm1->pwrdm.ptr->prcm_offs, PM_WKDEP);
 	}
 
@@ -480,7 +508,7 @@
 	}
 
 	/* XXX It's faster to return the atomic wkdep_usecount */
-	return prm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs, PM_WKDEP,
+	return omap2_prm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs, PM_WKDEP,
 				       (1 << clkdm2->dep_bit));
 }
 
@@ -514,7 +542,7 @@
 		atomic_set(&cd->wkdep_usecount, 0);
 	}
 
-	prm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs, PM_WKDEP);
+	omap2_prm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs, PM_WKDEP);
 
 	return 0;
 }
@@ -553,7 +581,7 @@
 		pr_debug("clockdomain: will prevent %s from sleeping if %s "
 			 "is active\n", clkdm1->name, clkdm2->name);
 
-		cm_set_mod_reg_bits((1 << clkdm2->dep_bit),
+		omap2_cm_set_mod_reg_bits((1 << clkdm2->dep_bit),
 				    clkdm1->pwrdm.ptr->prcm_offs,
 				    OMAP3430_CM_SLEEPDEP);
 	}
@@ -596,7 +624,7 @@
 			 "sleeping if %s is active\n", clkdm1->name,
 			 clkdm2->name);
 
-		cm_clear_mod_reg_bits((1 << clkdm2->dep_bit),
+		omap2_cm_clear_mod_reg_bits((1 << clkdm2->dep_bit),
 				      clkdm1->pwrdm.ptr->prcm_offs,
 				      OMAP3430_CM_SLEEPDEP);
 	}
@@ -639,7 +667,7 @@
 	}
 
 	/* XXX It's faster to return the atomic sleepdep_usecount */
-	return prm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs,
+	return omap2_prm_read_mod_bits_shift(clkdm1->pwrdm.ptr->prcm_offs,
 				       OMAP3430_CM_SLEEPDEP,
 				       (1 << clkdm2->dep_bit));
 }
@@ -677,35 +705,13 @@
 		atomic_set(&cd->sleepdep_usecount, 0);
 	}
 
-	prm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs,
+	omap2_prm_clear_mod_reg_bits(mask, clkdm->pwrdm.ptr->prcm_offs,
 			       OMAP3430_CM_SLEEPDEP);
 
 	return 0;
 }
 
 /**
- * omap2_clkdm_clktrctrl_read - read the clkdm's current state transition mode
- * @clkdm: struct clkdm * of a clockdomain
- *
- * Return the clockdomain @clkdm current state transition mode from the
- * corresponding domain CM_CLKSTCTRL register.	Returns -EINVAL if @clkdm
- * is NULL or the current mode upon success.
- */
-static int omap2_clkdm_clktrctrl_read(struct clockdomain *clkdm)
-{
-	u32 v;
-
-	if (!clkdm)
-		return -EINVAL;
-
-	v = __raw_readl(clkdm->clkstctrl_reg);
-	v &= clkdm->clktrctrl_mask;
-	v >>= __ffs(clkdm->clktrctrl_mask);
-
-	return v;
-}
-
-/**
  * omap2_clkdm_sleep - force clockdomain sleep transition
  * @clkdm: struct clockdomain *
  *
@@ -729,18 +735,19 @@
 
 	if (cpu_is_omap24xx()) {
 
-		cm_set_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
+		omap2_cm_set_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
 			    clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
 
-	} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+	} else if (cpu_is_omap34xx()) {
 
-		u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_SLEEP <<
-			 __ffs(clkdm->clktrctrl_mask));
+		omap3xxx_cm_clkdm_force_sleep(clkdm->pwrdm.ptr->prcm_offs,
+					      clkdm->clktrctrl_mask);
 
-		u32 v = __raw_readl(clkdm->clkstctrl_reg);
-		v &= ~(clkdm->clktrctrl_mask);
-		v |= bits;
-		__raw_writel(v, clkdm->clkstctrl_reg);
+	} else if (cpu_is_omap44xx()) {
+
+		omap4_cminst_clkdm_force_sleep(clkdm->prcm_partition,
+					       clkdm->cm_inst,
+					       clkdm->clkdm_offs);
 
 	} else {
 		BUG();
@@ -773,18 +780,19 @@
 
 	if (cpu_is_omap24xx()) {
 
-		cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
+		omap2_cm_clear_mod_reg_bits(OMAP24XX_FORCESTATE_MASK,
 			      clkdm->pwrdm.ptr->prcm_offs, OMAP2_PM_PWSTCTRL);
 
-	} else if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+	} else if (cpu_is_omap34xx()) {
 
-		u32 bits = (OMAP34XX_CLKSTCTRL_FORCE_WAKEUP <<
-			 __ffs(clkdm->clktrctrl_mask));
+		omap3xxx_cm_clkdm_force_wakeup(clkdm->pwrdm.ptr->prcm_offs,
+					       clkdm->clktrctrl_mask);
 
-		u32 v = __raw_readl(clkdm->clkstctrl_reg);
-		v &= ~(clkdm->clktrctrl_mask);
-		v |= bits;
-		__raw_writel(v, clkdm->clkstctrl_reg);
+	} else if (cpu_is_omap44xx()) {
+
+		omap4_cminst_clkdm_force_wakeup(clkdm->prcm_partition,
+						clkdm->cm_inst,
+						clkdm->clkdm_offs);
 
 	} else {
 		BUG();
@@ -829,7 +837,7 @@
 			_clkdm_add_autodeps(clkdm);
 	}
 
-	_omap2_clkdm_set_hwsup(clkdm, 1);
+	_enable_hwsup(clkdm);
 
 	pwrdm_clkdm_state_switch(clkdm);
 }
@@ -857,7 +865,7 @@
 	pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
 		 clkdm->name);
 
-	_omap2_clkdm_set_hwsup(clkdm, 0);
+	_disable_hwsup(clkdm);
 
 	/*
 	 * XXX This should be removed once TI adds wakeup/sleep
@@ -891,7 +899,7 @@
  */
 int omap2_clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
 {
-	int v;
+	bool hwsup = false;
 
 	/*
 	 * XXX Rewrite this code to maintain a list of enabled
@@ -909,17 +917,27 @@
 	pr_debug("clockdomain: clkdm %s: clk %s now enabled\n", clkdm->name,
 		 clk->name);
 
-	if (!clkdm->clkstctrl_reg)
-		return 0;
+	if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
 
-	v = omap2_clkdm_clktrctrl_read(clkdm);
+		if (!clkdm->clktrctrl_mask)
+			return 0;
 
-	if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
-	    (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) {
+		hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+						   clkdm->clktrctrl_mask);
+
+	} else if (cpu_is_omap44xx()) {
+
+		hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
+						       clkdm->cm_inst,
+						       clkdm->clkdm_offs);
+
+	}
+
+	if (hwsup) {
 		/* Disable HW transitions when we are changing deps */
-		_omap2_clkdm_set_hwsup(clkdm, 0);
+		_disable_hwsup(clkdm);
 		_clkdm_add_autodeps(clkdm);
-		_omap2_clkdm_set_hwsup(clkdm, 1);
+		_enable_hwsup(clkdm);
 	} else {
 		omap2_clkdm_wakeup(clkdm);
 	}
@@ -946,7 +964,7 @@
  */
 int omap2_clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
 {
-	int v;
+	bool hwsup = false;
 
 	/*
 	 * XXX Rewrite this code to maintain a list of enabled
@@ -971,17 +989,27 @@
 	pr_debug("clockdomain: clkdm %s: clk %s now disabled\n", clkdm->name,
 		 clk->name);
 
-	if (!clkdm->clkstctrl_reg)
-		return 0;
+	if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
 
-	v = omap2_clkdm_clktrctrl_read(clkdm);
+		if (!clkdm->clktrctrl_mask)
+			return 0;
 
-	if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
-	    (cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) {
+		hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+						   clkdm->clktrctrl_mask);
+
+	} else if (cpu_is_omap44xx()) {
+
+		hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
+						       clkdm->cm_inst,
+						       clkdm->clkdm_offs);
+
+	}
+
+	if (hwsup) {
 		/* Disable HW transitions when we are changing deps */
-		_omap2_clkdm_set_hwsup(clkdm, 0);
+		_disable_hwsup(clkdm);
 		_clkdm_del_autodeps(clkdm);
-		_omap2_clkdm_set_hwsup(clkdm, 1);
+		_enable_hwsup(clkdm);
 	} else {
 		omap2_clkdm_sleep(clkdm);
 	}
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
new file mode 100644
index 0000000..9b459c2
--- /dev/null
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -0,0 +1,147 @@
+/*
+ * arch/arm/plat-omap/include/mach/clockdomain.h
+ *
+ * OMAP2/3 clockdomain framework functions
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CLOCKDOMAIN_H
+#define __ARCH_ARM_MACH_OMAP2_CLOCKDOMAIN_H
+
+#include <linux/init.h>
+
+#include "powerdomain.h"
+#include <plat/clock.h>
+#include <plat/cpu.h>
+
+/* Clockdomain capability flags */
+#define CLKDM_CAN_FORCE_SLEEP			(1 << 0)
+#define CLKDM_CAN_FORCE_WAKEUP			(1 << 1)
+#define CLKDM_CAN_ENABLE_AUTO			(1 << 2)
+#define CLKDM_CAN_DISABLE_AUTO			(1 << 3)
+
+#define CLKDM_CAN_HWSUP		(CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
+#define CLKDM_CAN_SWSUP		(CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
+#define CLKDM_CAN_HWSUP_SWSUP	(CLKDM_CAN_SWSUP | CLKDM_CAN_HWSUP)
+
+/**
+ * struct clkdm_autodep - clkdm deps to add when entering/exiting hwsup mode
+ * @clkdm: clockdomain to add wkdep+sleepdep on - set name member only
+ * @omap_chip: OMAP chip types that this autodep is valid on
+ *
+ * A clockdomain that should have wkdeps and sleepdeps added when a
+ * clockdomain should stay active in hwsup mode; and conversely,
+ * removed when the clockdomain should be allowed to go inactive in
+ * hwsup mode.
+ *
+ * Autodeps are deprecated and should be removed after
+ * omap_hwmod-based fine-grained module idle control is added.
+ */
+struct clkdm_autodep {
+	union {
+		const char *name;
+		struct clockdomain *ptr;
+	} clkdm;
+	const struct omap_chip_id omap_chip;
+};
+
+/**
+ * struct clkdm_dep - encode dependencies between clockdomains
+ * @clkdm_name: clockdomain name
+ * @clkdm: pointer to the struct clockdomain of @clkdm_name
+ * @omap_chip: OMAP chip types that this dependency is valid on
+ * @wkdep_usecount: Number of wakeup dependencies causing this clkdm to wake
+ * @sleepdep_usecount: Number of sleep deps that could prevent clkdm from idle
+ *
+ * Statically defined.  @clkdm is resolved from @clkdm_name at runtime and
+ * should not be pre-initialized.
+ *
+ * XXX Should also include hardware (fixed) dependencies.
+ */
+struct clkdm_dep {
+	const char *clkdm_name;
+	struct clockdomain *clkdm;
+	atomic_t wkdep_usecount;
+	atomic_t sleepdep_usecount;
+	const struct omap_chip_id omap_chip;
+};
+
+/**
+ * struct clockdomain - OMAP clockdomain
+ * @name: clockdomain name
+ * @pwrdm: powerdomain containing this clockdomain
+ * @clktrctrl_reg: CLKSTCTRL reg for the given clock domain
+ * @clktrctrl_mask: CLKTRCTRL/AUTOSTATE field mask in CM_CLKSTCTRL reg
+ * @flags: Clockdomain capability flags
+ * @dep_bit: Bit shift of this clockdomain's PM_WKDEP/CM_SLEEPDEP bit
+ * @prcm_partition: (OMAP4 only) PRCM partition ID for this clkdm's registers
+ * @cm_inst: (OMAP4 only) CM instance register offset
+ * @clkdm_offs: (OMAP4 only) CM clockdomain register offset
+ * @wkdep_srcs: Clockdomains that can be told to wake this powerdomain up
+ * @sleepdep_srcs: Clockdomains that can be told to keep this clkdm from inact
+ * @omap_chip: OMAP chip types that this clockdomain is valid on
+ * @usecount: Usecount tracking
+ * @node: list_head to link all clockdomains together
+ *
+ * @prcm_partition should be a macro from mach-omap2/prcm44xx.h (OMAP4 only)
+ * @cm_inst should be a macro ending in _INST from the OMAP4 CM instance
+ *     definitions (OMAP4 only)
+ * @clkdm_offs should be a macro ending in _CDOFFS from the OMAP4 CM instance
+ *     definitions (OMAP4 only)
+ */
+struct clockdomain {
+	const char *name;
+	union {
+		const char *name;
+		struct powerdomain *ptr;
+	} pwrdm;
+	const u16 clktrctrl_mask;
+	const u8 flags;
+	const u8 dep_bit;
+	const u8 prcm_partition;
+	const s16 cm_inst;
+	const u16 clkdm_offs;
+	struct clkdm_dep *wkdep_srcs;
+	struct clkdm_dep *sleepdep_srcs;
+	const struct omap_chip_id omap_chip;
+	atomic_t usecount;
+	struct list_head node;
+};
+
+void clkdm_init(struct clockdomain **clkdms, struct clkdm_autodep *autodeps);
+struct clockdomain *clkdm_lookup(const char *name);
+
+int clkdm_for_each(int (*fn)(struct clockdomain *clkdm, void *user),
+			void *user);
+struct powerdomain *clkdm_get_pwrdm(struct clockdomain *clkdm);
+
+int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
+int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
+int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
+int clkdm_clear_all_wkdeps(struct clockdomain *clkdm);
+int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
+int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
+int clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
+int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm);
+
+void omap2_clkdm_allow_idle(struct clockdomain *clkdm);
+void omap2_clkdm_deny_idle(struct clockdomain *clkdm);
+
+int omap2_clkdm_wakeup(struct clockdomain *clkdm);
+int omap2_clkdm_sleep(struct clockdomain *clkdm);
+
+int omap2_clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk);
+int omap2_clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk);
+
+extern void __init omap2_clockdomains_init(void);
+extern void __init omap44xx_clockdomains_init(void);
+
+#endif
diff --git a/arch/arm/mach-omap2/clockdomains.h b/arch/arm/mach-omap2/clockdomains.h
deleted file mode 100644
index 8fc19ff..0000000
--- a/arch/arm/mach-omap2/clockdomains.h
+++ /dev/null
@@ -1,937 +0,0 @@
-/*
- * OMAP2/3 clockdomains
- *
- * Copyright (C) 2008-2009 Texas Instruments, Inc.
- * Copyright (C) 2008-2010 Nokia Corporation
- *
- * Written by Paul Walmsley and Jouni Högander
- *
- * This file contains clockdomains and clockdomain wakeup/sleep
- * dependencies for the OMAP2/3 chips.  Some notes:
- *
- * A useful validation rule for struct clockdomain: Any clockdomain
- * referenced by a wkdep_srcs or sleepdep_srcs array must have a
- * dep_bit assigned.  So wkdep_srcs/sleepdep_srcs are really just
- * software-controllable dependencies.  Non-software-controllable
- * dependencies do exist, but they are not encoded below (yet).
- *
- * 24xx does not support programmable sleep dependencies (SLEEPDEP)
- *
- * The overly-specific dep_bit names are due to a bit name collision
- * with CM_FCLKEN_{DSP,IVA2}.  The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift
- * value are the same for all powerdomains: 2
- *
- * XXX should dep_bit be a mask, so we can test to see if it is 0 as a
- * sanity check?
- * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE
- */
-
-/*
- * To-Do List
- * -> Port the Sleep/Wakeup dependencies for the domains
- *    from the Power domain framework
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCKDOMAINS_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCKDOMAINS_H
-
-#include <plat/clockdomain.h>
-#include "cm.h"
-#include "prm.h"
-
-/*
- * Clockdomain dependencies for wkdeps/sleepdeps
- *
- * XXX Hardware dependencies (e.g., dependencies that cannot be
- * changed in software) are not included here yet, but should be.
- */
-
-/* OMAP2/3-common wakeup dependencies */
-
-/*
- * 2420/2430 PM_WKDEP_GFX: CORE, MPU, WKUP
- * 3430ES1 PM_WKDEP_GFX: adds IVA2, removes CORE
- * 3430ES2 PM_WKDEP_SGX: adds IVA2, removes CORE
- * These can share data since they will never be present simultaneously
- * on the same device.
- */
-static struct clkdm_dep gfx_sgx_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX |
-					    CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX |
-					    CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-
-/* 24XX-specific possible dependencies */
-
-#ifdef CONFIG_ARCH_OMAP2
-
-/* Wakeup dependency source arrays */
-
-/* 2420/2430 PM_WKDEP_DSP: CORE, MPU, WKUP */
-static struct clkdm_dep dsp_24xx_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{ NULL },
-};
-
-/*
- * 2420 PM_WKDEP_MPU: CORE, DSP, WKUP
- * 2430 adds MDM
- */
-static struct clkdm_dep mpu_24xx_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "dsp_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "mdm_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
-	},
-	{ NULL },
-};
-
-/*
- * 2420 PM_WKDEP_CORE: DSP, GFX, MPU, WKUP
- * 2430 adds MDM
- */
-static struct clkdm_dep core_24xx_wkdeps[] = {
-	{
-		.clkdm_name = "dsp_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "gfx_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "mdm_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
-	},
-	{ NULL },
-};
-
-#endif
-
-
-/* 2430-specific possible wakeup dependencies */
-
-#ifdef CONFIG_ARCH_OMAP2430
-
-/* 2430 PM_WKDEP_MDM: CORE, MPU, WKUP */
-static struct clkdm_dep mdm_2430_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
-	},
-	{ NULL },
-};
-
-#endif /* CONFIG_ARCH_OMAP2430 */
-
-
-/* OMAP3-specific possible dependencies */
-
-#ifdef CONFIG_ARCH_OMAP3
-
-/* 3430: PM_WKDEP_PER: CORE, IVA2, MPU, WKUP */
-static struct clkdm_dep per_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430ES2: PM_WKDEP_USBHOST: CORE, IVA2, MPU, WKUP */
-static struct clkdm_dep usbhost_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430 PM_WKDEP_MPU: CORE, IVA2, DSS, PER */
-static struct clkdm_dep mpu_3xxx_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "dss_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "per_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430 PM_WKDEP_IVA2: CORE, MPU, WKUP, DSS, PER */
-static struct clkdm_dep iva2_wkdeps[] = {
-	{
-		.clkdm_name = "core_l3_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "core_l4_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "dss_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "per_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-
-/* 3430 PM_WKDEP_CAM: IVA2, MPU, WKUP */
-static struct clkdm_dep cam_wkdeps[] = {
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430 PM_WKDEP_DSS: IVA2, MPU, WKUP */
-static struct clkdm_dep dss_wkdeps[] = {
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "wkup_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430: PM_WKDEP_NEON: MPU */
-static struct clkdm_dep neon_wkdeps[] = {
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-
-/* Sleep dependency source arrays for OMAP3-specific clkdms */
-
-/* 3430: CM_SLEEPDEP_DSS: MPU, IVA */
-static struct clkdm_dep dss_sleepdeps[] = {
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430: CM_SLEEPDEP_PER: MPU, IVA */
-static struct clkdm_dep per_sleepdeps[] = {
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430ES2: CM_SLEEPDEP_USBHOST: MPU, IVA */
-static struct clkdm_dep usbhost_sleepdeps[] = {
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm_name = "iva2_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/* 3430: CM_SLEEPDEP_CAM: MPU */
-static struct clkdm_dep cam_sleepdeps[] = {
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-/*
- * 3430ES1: CM_SLEEPDEP_GFX: MPU
- * 3430ES2: CM_SLEEPDEP_SGX: MPU
- * These can share data since they will never be present simultaneously
- * on the same device.
- */
-static struct clkdm_dep gfx_sgx_sleepdeps[] = {
-	{
-		.clkdm_name = "mpu_clkdm",
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{ NULL },
-};
-
-#endif /* CONFIG_ARCH_OMAP3 */
-
-
-/*
- * OMAP2/3-common clockdomains
- *
- * Even though the 2420 has a single PRCM module from the
- * interconnect's perspective, internally it does appear to have
- * separate PRM and CM clockdomains.  The usual test case is
- * sys_clkout/sys_clkout2.
- */
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-
-/* This is an implicit clockdomain - it is never defined as such in TRM */
-static struct clockdomain wkup_clkdm = {
-	.name		= "wkup_clkdm",
-	.pwrdm		= { .name = "wkup_pwrdm" },
-	.dep_bit	= OMAP_EN_WKUP_SHIFT,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain prm_clkdm = {
-	.name		= "prm_clkdm",
-	.pwrdm		= { .name = "wkup_pwrdm" },
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain cm_clkdm = {
-	.name		= "cm_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
-};
-
-#endif
-
-/*
- * 2420-only clockdomains
- */
-
-#if defined(CONFIG_ARCH_OMAP2420)
-
-static struct clockdomain mpu_2420_clkdm = {
-	.name		= "mpu_clkdm",
-	.pwrdm		= { .name = "mpu_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg  = OMAP2420_CM_REGADDR(MPU_MOD, OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= mpu_24xx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_MPU_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
-};
-
-static struct clockdomain iva1_2420_clkdm = {
-	.name		= "iva1_clkdm",
-	.pwrdm		= { .name = "dsp_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg  = OMAP2420_CM_REGADDR(OMAP24XX_DSP_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP24XX_PM_WKDEP_MPU_EN_DSP_SHIFT,
-	.wkdep_srcs	= dsp_24xx_wkdeps,
-	.clktrctrl_mask = OMAP2420_AUTOSTATE_IVA_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
-};
-
-static struct clockdomain dsp_2420_clkdm = {
-	.name		= "dsp_clkdm",
-	.pwrdm		= { .name = "dsp_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg  = OMAP2420_CM_REGADDR(OMAP24XX_DSP_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSP_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
-};
-
-static struct clockdomain gfx_2420_clkdm = {
-	.name		= "gfx_clkdm",
-	.pwrdm		= { .name = "gfx_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg  = OMAP2420_CM_REGADDR(GFX_MOD, OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= gfx_sgx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_GFX_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
-};
-
-static struct clockdomain core_l3_2420_clkdm = {
-	.name		= "core_l3_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg  = OMAP2420_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= core_24xx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L3_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
-};
-
-static struct clockdomain core_l4_2420_clkdm = {
-	.name		= "core_l4_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg  = OMAP2420_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= core_24xx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L4_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
-};
-
-static struct clockdomain dss_2420_clkdm = {
-	.name		= "dss_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg  = OMAP2420_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSS_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
-};
-
-#endif   /* CONFIG_ARCH_OMAP2420 */
-
-
-/*
- * 2430-only clockdomains
- */
-
-#if defined(CONFIG_ARCH_OMAP2430)
-
-static struct clockdomain mpu_2430_clkdm = {
-	.name		= "mpu_clkdm",
-	.pwrdm		= { .name = "mpu_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg  = OMAP2430_CM_REGADDR(MPU_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= mpu_24xx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_MPU_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-};
-
-/* Another case of bit name collisions between several registers: EN_MDM */
-static struct clockdomain mdm_clkdm = {
-	.name		= "mdm_clkdm",
-	.pwrdm		= { .name = "mdm_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg  = OMAP2430_CM_REGADDR(OMAP2430_MDM_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP2430_PM_WKDEP_MPU_EN_MDM_SHIFT,
-	.wkdep_srcs	= mdm_2430_wkdeps,
-	.clktrctrl_mask = OMAP2430_AUTOSTATE_MDM_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-};
-
-static struct clockdomain dsp_2430_clkdm = {
-	.name		= "dsp_clkdm",
-	.pwrdm		= { .name = "dsp_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg  = OMAP2430_CM_REGADDR(OMAP24XX_DSP_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP24XX_PM_WKDEP_MPU_EN_DSP_SHIFT,
-	.wkdep_srcs	= dsp_24xx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSP_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-};
-
-static struct clockdomain gfx_2430_clkdm = {
-	.name		= "gfx_clkdm",
-	.pwrdm		= { .name = "gfx_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg  = OMAP2430_CM_REGADDR(GFX_MOD, OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= gfx_sgx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_GFX_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-};
-
-/*
- * XXX add usecounting for clkdm dependencies, otherwise the presence
- * of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm
- * could cause trouble
- */
-static struct clockdomain core_l3_2430_clkdm = {
-	.name		= "core_l3_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg  = OMAP2430_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP24XX_EN_CORE_SHIFT,
-	.wkdep_srcs	= core_24xx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L3_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-};
-
-/*
- * XXX add usecounting for clkdm dependencies, otherwise the presence
- * of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm
- * could cause trouble
- */
-static struct clockdomain core_l4_2430_clkdm = {
-	.name		= "core_l4_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg  = OMAP2430_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP24XX_EN_CORE_SHIFT,
-	.wkdep_srcs	= core_24xx_wkdeps,
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L4_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-};
-
-static struct clockdomain dss_2430_clkdm = {
-	.name		= "dss_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg  = OMAP2430_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSS_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-};
-
-#endif    /* CONFIG_ARCH_OMAP2430 */
-
-
-/*
- * OMAP3 clockdomains
- */
-
-#if defined(CONFIG_ARCH_OMAP3)
-
-static struct clockdomain mpu_3xxx_clkdm = {
-	.name		= "mpu_clkdm",
-	.pwrdm		= { .name = "mpu_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP | CLKDM_CAN_FORCE_WAKEUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(MPU_MOD, OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP3430_EN_MPU_SHIFT,
-	.wkdep_srcs	= mpu_3xxx_wkdeps,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain neon_clkdm = {
-	.name		= "neon_clkdm",
-	.pwrdm		= { .name = "neon_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430_NEON_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= neon_wkdeps,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_NEON_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain iva2_clkdm = {
-	.name		= "iva2_clkdm",
-	.pwrdm		= { .name = "iva2_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430_IVA2_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP3430_PM_WKDEP_MPU_EN_IVA2_SHIFT,
-	.wkdep_srcs	= iva2_wkdeps,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_IVA2_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain gfx_3430es1_clkdm = {
-	.name		= "gfx_clkdm",
-	.pwrdm		= { .name = "gfx_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(GFX_MOD, OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= gfx_sgx_wkdeps,
-	.sleepdep_srcs	= gfx_sgx_sleepdeps,
-	.clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_GFX_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1),
-};
-
-static struct clockdomain sgx_clkdm = {
-	.name		= "sgx_clkdm",
-	.pwrdm		= { .name = "sgx_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430ES2_SGX_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= gfx_sgx_wkdeps,
-	.sleepdep_srcs	= gfx_sgx_sleepdeps,
-	.clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
-};
-
-/*
- * The die-to-die clockdomain was documented in the 34xx ES1 TRM, but
- * then that information was removed from the 34xx ES2+ TRM.  It is
- * unclear whether the core is still there, but the clockdomain logic
- * is there, and must be programmed to an appropriate state if the
- * CORE clockdomain is to become inactive.
- */
-static struct clockdomain d2d_clkdm = {
-	.name		= "d2d_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_D2D_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-/*
- * XXX add usecounting for clkdm dependencies, otherwise the presence
- * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm
- * could cause trouble
- */
-static struct clockdomain core_l3_3xxx_clkdm = {
-	.name		= "core_l3_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP3430_EN_CORE_SHIFT,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_L3_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-/*
- * XXX add usecounting for clkdm dependencies, otherwise the presence
- * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm
- * could cause trouble
- */
-static struct clockdomain core_l4_3xxx_clkdm = {
-	.name		= "core_l4_clkdm",
-	.pwrdm		= { .name = "core_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(CORE_MOD, OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP3430_EN_CORE_SHIFT,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_L4_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-/* Another case of bit name collisions between several registers: EN_DSS */
-static struct clockdomain dss_3xxx_clkdm = {
-	.name		= "dss_clkdm",
-	.pwrdm		= { .name = "dss_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430_DSS_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT,
-	.wkdep_srcs	= dss_wkdeps,
-	.sleepdep_srcs	= dss_sleepdeps,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain cam_clkdm = {
-	.name		= "cam_clkdm",
-	.pwrdm		= { .name = "cam_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430_CAM_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= cam_wkdeps,
-	.sleepdep_srcs	= cam_sleepdeps,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_CAM_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain usbhost_clkdm = {
-	.name		= "usbhost_clkdm",
-	.pwrdm		= { .name = "usbhost_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430ES2_USBHOST_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.wkdep_srcs	= usbhost_wkdeps,
-	.sleepdep_srcs	= usbhost_sleepdeps,
-	.clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
-};
-
-static struct clockdomain per_clkdm = {
-	.name		= "per_clkdm",
-	.pwrdm		= { .name = "per_pwrdm" },
-	.flags		= CLKDM_CAN_HWSUP_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430_PER_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.dep_bit	= OMAP3430_EN_PER_SHIFT,
-	.wkdep_srcs	= per_wkdeps,
-	.sleepdep_srcs	= per_sleepdeps,
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-/*
- * Disable hw supervised mode for emu_clkdm, because emu_pwrdm is
- * switched of even if sdti is in use
- */
-static struct clockdomain emu_clkdm = {
-	.name		= "emu_clkdm",
-	.pwrdm		= { .name = "emu_pwrdm" },
-	.flags		= /* CLKDM_CAN_ENABLE_AUTO |  */CLKDM_CAN_SWSUP,
-	.clkstctrl_reg	= OMAP34XX_CM_REGADDR(OMAP3430_EMU_MOD,
-						 OMAP2_CM_CLKSTCTRL),
-	.clktrctrl_mask = OMAP3430_CLKTRCTRL_EMU_MASK,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain dpll1_clkdm = {
-	.name		= "dpll1_clkdm",
-	.pwrdm		= { .name = "dpll1_pwrdm" },
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain dpll2_clkdm = {
-	.name		= "dpll2_clkdm",
-	.pwrdm		= { .name = "dpll2_pwrdm" },
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain dpll3_clkdm = {
-	.name		= "dpll3_clkdm",
-	.pwrdm		= { .name = "dpll3_pwrdm" },
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain dpll4_clkdm = {
-	.name		= "dpll4_clkdm",
-	.pwrdm		= { .name = "dpll4_pwrdm" },
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct clockdomain dpll5_clkdm = {
-	.name		= "dpll5_clkdm",
-	.pwrdm		= { .name = "dpll5_pwrdm" },
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
-};
-
-#endif   /* CONFIG_ARCH_OMAP3 */
-
-#include "clockdomains44xx.h"
-
-/*
- * Clockdomain hwsup dependencies (OMAP3 only)
- */
-
-static struct clkdm_autodep clkdm_autodeps[] = {
-	{
-		.clkdm	   = { .name = "mpu_clkdm" },
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm	   = { .name = "iva2_clkdm" },
-		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
-	},
-	{
-		.clkdm	   = { .name = NULL },
-	}
-};
-
-/*
- * List of clockdomain pointers per platform
- */
-
-static struct clockdomain *clockdomains_omap[] = {
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-	&wkup_clkdm,
-	&cm_clkdm,
-	&prm_clkdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP2420
-	&mpu_2420_clkdm,
-	&iva1_2420_clkdm,
-	&dsp_2420_clkdm,
-	&gfx_2420_clkdm,
-	&core_l3_2420_clkdm,
-	&core_l4_2420_clkdm,
-	&dss_2420_clkdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP2430
-	&mpu_2430_clkdm,
-	&mdm_clkdm,
-	&dsp_2430_clkdm,
-	&gfx_2430_clkdm,
-	&core_l3_2430_clkdm,
-	&core_l4_2430_clkdm,
-	&dss_2430_clkdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-	&mpu_3xxx_clkdm,
-	&neon_clkdm,
-	&iva2_clkdm,
-	&gfx_3430es1_clkdm,
-	&sgx_clkdm,
-	&d2d_clkdm,
-	&core_l3_3xxx_clkdm,
-	&core_l4_3xxx_clkdm,
-	&dss_3xxx_clkdm,
-	&cam_clkdm,
-	&usbhost_clkdm,
-	&per_clkdm,
-	&emu_clkdm,
-	&dpll1_clkdm,
-	&dpll2_clkdm,
-	&dpll3_clkdm,
-	&dpll4_clkdm,
-	&dpll5_clkdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-	&l4_cefuse_44xx_clkdm,
-	&l4_cfg_44xx_clkdm,
-	&tesla_44xx_clkdm,
-	&l3_gfx_44xx_clkdm,
-	&ivahd_44xx_clkdm,
-	&l4_secure_44xx_clkdm,
-	&l4_per_44xx_clkdm,
-	&abe_44xx_clkdm,
-	&l3_instr_44xx_clkdm,
-	&l3_init_44xx_clkdm,
-	&mpuss_44xx_clkdm,
-	&mpu0_44xx_clkdm,
-	&mpu1_44xx_clkdm,
-	&l3_emif_44xx_clkdm,
-	&l4_ao_44xx_clkdm,
-	&ducati_44xx_clkdm,
-	&l3_2_44xx_clkdm,
-	&l3_1_44xx_clkdm,
-	&l3_d2d_44xx_clkdm,
-	&iss_44xx_clkdm,
-	&l3_dss_44xx_clkdm,
-	&l4_wkup_44xx_clkdm,
-	&emu_sys_44xx_clkdm,
-	&l3_dma_44xx_clkdm,
-#endif
-
-	NULL,
-};
-
-#endif
diff --git a/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
new file mode 100644
index 0000000..e4a7133
--- /dev/null
+++ b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
@@ -0,0 +1,860 @@
+/*
+ * OMAP2/3 clockdomains
+ *
+ * Copyright (C) 2008-2009 Texas Instruments, Inc.
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Paul Walmsley, Jouni Högander
+ *
+ * This file contains clockdomains and clockdomain wakeup/sleep
+ * dependencies for the OMAP2/3 chips.  Some notes:
+ *
+ * A useful validation rule for struct clockdomain: Any clockdomain
+ * referenced by a wkdep_srcs or sleepdep_srcs array must have a
+ * dep_bit assigned.  So wkdep_srcs/sleepdep_srcs are really just
+ * software-controllable dependencies.  Non-software-controllable
+ * dependencies do exist, but they are not encoded below (yet).
+ *
+ * 24xx does not support programmable sleep dependencies (SLEEPDEP)
+ *
+ * The overly-specific dep_bit names are due to a bit name collision
+ * with CM_FCLKEN_{DSP,IVA2}.  The DSP/IVA2 PM_WKDEP and CM_SLEEPDEP shift
+ * value are the same for all powerdomains: 2
+ *
+ * XXX should dep_bit be a mask, so we can test to see if it is 0 as a
+ * sanity check?
+ * XXX encode hardware fixed wakeup dependencies -- esp. for 3430 CORE
+ */
+
+/*
+ * To-Do List
+ * -> Port the Sleep/Wakeup dependencies for the domains
+ *    from the Power domain framework
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "clockdomain.h"
+#include "prm2xxx_3xxx.h"
+#include "cm2xxx_3xxx.h"
+#include "cm-regbits-24xx.h"
+#include "cm-regbits-34xx.h"
+#include "cm-regbits-44xx.h"
+#include "prm-regbits-24xx.h"
+#include "prm-regbits-34xx.h"
+
+/*
+ * Clockdomain dependencies for wkdeps/sleepdeps
+ *
+ * XXX Hardware dependencies (e.g., dependencies that cannot be
+ * changed in software) are not included here yet, but should be.
+ */
+
+/* OMAP2/3-common wakeup dependencies */
+
+/*
+ * 2420/2430 PM_WKDEP_GFX: CORE, MPU, WKUP
+ * 3430ES1 PM_WKDEP_GFX: adds IVA2, removes CORE
+ * 3430ES2 PM_WKDEP_SGX: adds IVA2, removes CORE
+ * These can share data since they will never be present simultaneously
+ * on the same device.
+ */
+static struct clkdm_dep gfx_sgx_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX |
+					    CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX |
+					    CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+
+/* 24XX-specific possible dependencies */
+
+/* Wakeup dependency source arrays */
+
+/* 2420/2430 PM_WKDEP_DSP: CORE, MPU, WKUP */
+static struct clkdm_dep dsp_24xx_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{ NULL },
+};
+
+/*
+ * 2420 PM_WKDEP_MPU: CORE, DSP, WKUP
+ * 2430 adds MDM
+ */
+static struct clkdm_dep mpu_24xx_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "dsp_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "mdm_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+	},
+	{ NULL },
+};
+
+/*
+ * 2420 PM_WKDEP_CORE: DSP, GFX, MPU, WKUP
+ * 2430 adds MDM
+ */
+static struct clkdm_dep core_24xx_wkdeps[] = {
+	{
+		.clkdm_name = "dsp_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "gfx_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "mdm_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430)
+	},
+	{ NULL },
+};
+
+
+/* 2430-specific possible wakeup dependencies */
+
+#ifdef CONFIG_ARCH_OMAP2430
+
+/* 2430 PM_WKDEP_MDM: CORE, MPU, WKUP */
+static struct clkdm_dep mdm_2430_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX)
+	},
+	{ NULL },
+};
+
+#endif /* CONFIG_ARCH_OMAP2430 */
+
+
+/* OMAP3-specific possible dependencies */
+
+#ifdef CONFIG_ARCH_OMAP3
+
+/* 3430: PM_WKDEP_PER: CORE, IVA2, MPU, WKUP */
+static struct clkdm_dep per_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430ES2: PM_WKDEP_USBHOST: CORE, IVA2, MPU, WKUP */
+static struct clkdm_dep usbhost_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430 PM_WKDEP_MPU: CORE, IVA2, DSS, PER */
+static struct clkdm_dep mpu_3xxx_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "dss_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "per_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430 PM_WKDEP_IVA2: CORE, MPU, WKUP, DSS, PER */
+static struct clkdm_dep iva2_wkdeps[] = {
+	{
+		.clkdm_name = "core_l3_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "core_l4_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "dss_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "per_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+
+/* 3430 PM_WKDEP_CAM: IVA2, MPU, WKUP */
+static struct clkdm_dep cam_wkdeps[] = {
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430 PM_WKDEP_DSS: IVA2, MPU, WKUP */
+static struct clkdm_dep dss_wkdeps[] = {
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "wkup_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430: PM_WKDEP_NEON: MPU */
+static struct clkdm_dep neon_wkdeps[] = {
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+
+/* Sleep dependency source arrays for OMAP3-specific clkdms */
+
+/* 3430: CM_SLEEPDEP_DSS: MPU, IVA */
+static struct clkdm_dep dss_sleepdeps[] = {
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430: CM_SLEEPDEP_PER: MPU, IVA */
+static struct clkdm_dep per_sleepdeps[] = {
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430ES2: CM_SLEEPDEP_USBHOST: MPU, IVA */
+static struct clkdm_dep usbhost_sleepdeps[] = {
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm_name = "iva2_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/* 3430: CM_SLEEPDEP_CAM: MPU */
+static struct clkdm_dep cam_sleepdeps[] = {
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+/*
+ * 3430ES1: CM_SLEEPDEP_GFX: MPU
+ * 3430ES2: CM_SLEEPDEP_SGX: MPU
+ * These can share data since they will never be present simultaneously
+ * on the same device.
+ */
+static struct clkdm_dep gfx_sgx_sleepdeps[] = {
+	{
+		.clkdm_name = "mpu_clkdm",
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{ NULL },
+};
+
+#endif /* CONFIG_ARCH_OMAP3 */
+
+
+/*
+ * OMAP2/3-common clockdomains
+ *
+ * Even though the 2420 has a single PRCM module from the
+ * interconnect's perspective, internally it does appear to have
+ * separate PRM and CM clockdomains.  The usual test case is
+ * sys_clkout/sys_clkout2.
+ */
+
+/* This is an implicit clockdomain - it is never defined as such in TRM */
+static struct clockdomain wkup_clkdm = {
+	.name		= "wkup_clkdm",
+	.pwrdm		= { .name = "wkup_pwrdm" },
+	.dep_bit	= OMAP_EN_WKUP_SHIFT,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain prm_clkdm = {
+	.name		= "prm_clkdm",
+	.pwrdm		= { .name = "wkup_pwrdm" },
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain cm_clkdm = {
+	.name		= "cm_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
+};
+
+/*
+ * 2420-only clockdomains
+ */
+
+#if defined(CONFIG_ARCH_OMAP2420)
+
+static struct clockdomain mpu_2420_clkdm = {
+	.name		= "mpu_clkdm",
+	.pwrdm		= { .name = "mpu_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.wkdep_srcs	= mpu_24xx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_MPU_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+static struct clockdomain iva1_2420_clkdm = {
+	.name		= "iva1_clkdm",
+	.pwrdm		= { .name = "dsp_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.dep_bit	= OMAP24XX_PM_WKDEP_MPU_EN_DSP_SHIFT,
+	.wkdep_srcs	= dsp_24xx_wkdeps,
+	.clktrctrl_mask = OMAP2420_AUTOSTATE_IVA_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+static struct clockdomain dsp_2420_clkdm = {
+	.name		= "dsp_clkdm",
+	.pwrdm		= { .name = "dsp_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSP_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+static struct clockdomain gfx_2420_clkdm = {
+	.name		= "gfx_clkdm",
+	.pwrdm		= { .name = "gfx_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= gfx_sgx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_GFX_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+static struct clockdomain core_l3_2420_clkdm = {
+	.name		= "core_l3_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.wkdep_srcs	= core_24xx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L3_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+static struct clockdomain core_l4_2420_clkdm = {
+	.name		= "core_l4_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.wkdep_srcs	= core_24xx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L4_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+static struct clockdomain dss_2420_clkdm = {
+	.name		= "dss_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSS_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+#endif   /* CONFIG_ARCH_OMAP2420 */
+
+
+/*
+ * 2430-only clockdomains
+ */
+
+#if defined(CONFIG_ARCH_OMAP2430)
+
+static struct clockdomain mpu_2430_clkdm = {
+	.name		= "mpu_clkdm",
+	.pwrdm		= { .name = "mpu_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= mpu_24xx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_MPU_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* Another case of bit name collisions between several registers: EN_MDM */
+static struct clockdomain mdm_clkdm = {
+	.name		= "mdm_clkdm",
+	.pwrdm		= { .name = "mdm_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.dep_bit	= OMAP2430_PM_WKDEP_MPU_EN_MDM_SHIFT,
+	.wkdep_srcs	= mdm_2430_wkdeps,
+	.clktrctrl_mask = OMAP2430_AUTOSTATE_MDM_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+static struct clockdomain dsp_2430_clkdm = {
+	.name		= "dsp_clkdm",
+	.pwrdm		= { .name = "dsp_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.dep_bit	= OMAP24XX_PM_WKDEP_MPU_EN_DSP_SHIFT,
+	.wkdep_srcs	= dsp_24xx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSP_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+static struct clockdomain gfx_2430_clkdm = {
+	.name		= "gfx_clkdm",
+	.pwrdm		= { .name = "gfx_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= gfx_sgx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_GFX_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/*
+ * XXX add usecounting for clkdm dependencies, otherwise the presence
+ * of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm
+ * could cause trouble
+ */
+static struct clockdomain core_l3_2430_clkdm = {
+	.name		= "core_l3_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.dep_bit	= OMAP24XX_EN_CORE_SHIFT,
+	.wkdep_srcs	= core_24xx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L3_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/*
+ * XXX add usecounting for clkdm dependencies, otherwise the presence
+ * of a single dep bit for core_l3_24xx_clkdm and core_l4_24xx_clkdm
+ * could cause trouble
+ */
+static struct clockdomain core_l4_2430_clkdm = {
+	.name		= "core_l4_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.dep_bit	= OMAP24XX_EN_CORE_SHIFT,
+	.wkdep_srcs	= core_24xx_wkdeps,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_L4_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+static struct clockdomain dss_2430_clkdm = {
+	.name		= "dss_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.clktrctrl_mask = OMAP24XX_AUTOSTATE_DSS_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+#endif    /* CONFIG_ARCH_OMAP2430 */
+
+
+/*
+ * OMAP3 clockdomains
+ */
+
+#if defined(CONFIG_ARCH_OMAP3)
+
+static struct clockdomain mpu_3xxx_clkdm = {
+	.name		= "mpu_clkdm",
+	.pwrdm		= { .name = "mpu_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP | CLKDM_CAN_FORCE_WAKEUP,
+	.dep_bit	= OMAP3430_EN_MPU_SHIFT,
+	.wkdep_srcs	= mpu_3xxx_wkdeps,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain neon_clkdm = {
+	.name		= "neon_clkdm",
+	.pwrdm		= { .name = "neon_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= neon_wkdeps,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_NEON_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain iva2_clkdm = {
+	.name		= "iva2_clkdm",
+	.pwrdm		= { .name = "iva2_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.dep_bit	= OMAP3430_PM_WKDEP_MPU_EN_IVA2_SHIFT,
+	.wkdep_srcs	= iva2_wkdeps,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_IVA2_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain gfx_3430es1_clkdm = {
+	.name		= "gfx_clkdm",
+	.pwrdm		= { .name = "gfx_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= gfx_sgx_wkdeps,
+	.sleepdep_srcs	= gfx_sgx_sleepdeps,
+	.clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_GFX_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1),
+};
+
+static struct clockdomain sgx_clkdm = {
+	.name		= "sgx_clkdm",
+	.pwrdm		= { .name = "sgx_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= gfx_sgx_wkdeps,
+	.sleepdep_srcs	= gfx_sgx_sleepdeps,
+	.clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
+};
+
+/*
+ * The die-to-die clockdomain was documented in the 34xx ES1 TRM, but
+ * then that information was removed from the 34xx ES2+ TRM.  It is
+ * unclear whether the core is still there, but the clockdomain logic
+ * is there, and must be programmed to an appropriate state if the
+ * CORE clockdomain is to become inactive.
+ */
+static struct clockdomain d2d_clkdm = {
+	.name		= "d2d_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.clktrctrl_mask = OMAP3430ES1_CLKTRCTRL_D2D_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/*
+ * XXX add usecounting for clkdm dependencies, otherwise the presence
+ * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm
+ * could cause trouble
+ */
+static struct clockdomain core_l3_3xxx_clkdm = {
+	.name		= "core_l3_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.dep_bit	= OMAP3430_EN_CORE_SHIFT,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_L3_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/*
+ * XXX add usecounting for clkdm dependencies, otherwise the presence
+ * of a single dep bit for core_l3_3xxx_clkdm and core_l4_3xxx_clkdm
+ * could cause trouble
+ */
+static struct clockdomain core_l4_3xxx_clkdm = {
+	.name		= "core_l4_clkdm",
+	.pwrdm		= { .name = "core_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP,
+	.dep_bit	= OMAP3430_EN_CORE_SHIFT,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_L4_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* Another case of bit name collisions between several registers: EN_DSS */
+static struct clockdomain dss_3xxx_clkdm = {
+	.name		= "dss_clkdm",
+	.pwrdm		= { .name = "dss_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.dep_bit	= OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT,
+	.wkdep_srcs	= dss_wkdeps,
+	.sleepdep_srcs	= dss_sleepdeps,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain cam_clkdm = {
+	.name		= "cam_clkdm",
+	.pwrdm		= { .name = "cam_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= cam_wkdeps,
+	.sleepdep_srcs	= cam_sleepdeps,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_CAM_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain usbhost_clkdm = {
+	.name		= "usbhost_clkdm",
+	.pwrdm		= { .name = "usbhost_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.wkdep_srcs	= usbhost_wkdeps,
+	.sleepdep_srcs	= usbhost_sleepdeps,
+	.clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
+};
+
+static struct clockdomain per_clkdm = {
+	.name		= "per_clkdm",
+	.pwrdm		= { .name = "per_pwrdm" },
+	.flags		= CLKDM_CAN_HWSUP_SWSUP,
+	.dep_bit	= OMAP3430_EN_PER_SHIFT,
+	.wkdep_srcs	= per_wkdeps,
+	.sleepdep_srcs	= per_sleepdeps,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/*
+ * Disable hw supervised mode for emu_clkdm, because emu_pwrdm is
+ * switched of even if sdti is in use
+ */
+static struct clockdomain emu_clkdm = {
+	.name		= "emu_clkdm",
+	.pwrdm		= { .name = "emu_pwrdm" },
+	.flags		= /* CLKDM_CAN_ENABLE_AUTO |  */CLKDM_CAN_SWSUP,
+	.clktrctrl_mask = OMAP3430_CLKTRCTRL_EMU_MASK,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain dpll1_clkdm = {
+	.name		= "dpll1_clkdm",
+	.pwrdm		= { .name = "dpll1_pwrdm" },
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain dpll2_clkdm = {
+	.name		= "dpll2_clkdm",
+	.pwrdm		= { .name = "dpll2_pwrdm" },
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain dpll3_clkdm = {
+	.name		= "dpll3_clkdm",
+	.pwrdm		= { .name = "dpll3_pwrdm" },
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain dpll4_clkdm = {
+	.name		= "dpll4_clkdm",
+	.pwrdm		= { .name = "dpll4_pwrdm" },
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct clockdomain dpll5_clkdm = {
+	.name		= "dpll5_clkdm",
+	.pwrdm		= { .name = "dpll5_pwrdm" },
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
+};
+
+#endif   /* CONFIG_ARCH_OMAP3 */
+
+/*
+ * Clockdomain hwsup dependencies (OMAP3 only)
+ */
+
+static struct clkdm_autodep clkdm_autodeps[] = {
+	{
+		.clkdm	   = { .name = "mpu_clkdm" },
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm	   = { .name = "iva2_clkdm" },
+		.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP3430)
+	},
+	{
+		.clkdm	   = { .name = NULL },
+	}
+};
+
+static struct clockdomain *clockdomains_omap2[] __initdata = {
+	&wkup_clkdm,
+	&cm_clkdm,
+	&prm_clkdm,
+
+#ifdef CONFIG_ARCH_OMAP2420
+	&mpu_2420_clkdm,
+	&iva1_2420_clkdm,
+	&dsp_2420_clkdm,
+	&gfx_2420_clkdm,
+	&core_l3_2420_clkdm,
+	&core_l4_2420_clkdm,
+	&dss_2420_clkdm,
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2430
+	&mpu_2430_clkdm,
+	&mdm_clkdm,
+	&dsp_2430_clkdm,
+	&gfx_2430_clkdm,
+	&core_l3_2430_clkdm,
+	&core_l4_2430_clkdm,
+	&dss_2430_clkdm,
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+	&mpu_3xxx_clkdm,
+	&neon_clkdm,
+	&iva2_clkdm,
+	&gfx_3430es1_clkdm,
+	&sgx_clkdm,
+	&d2d_clkdm,
+	&core_l3_3xxx_clkdm,
+	&core_l4_3xxx_clkdm,
+	&dss_3xxx_clkdm,
+	&cam_clkdm,
+	&usbhost_clkdm,
+	&per_clkdm,
+	&emu_clkdm,
+	&dpll1_clkdm,
+	&dpll2_clkdm,
+	&dpll3_clkdm,
+	&dpll4_clkdm,
+	&dpll5_clkdm,
+#endif
+	NULL,
+};
+
+void __init omap2_clockdomains_init(void)
+{
+	clkdm_init(clockdomains_omap2, clkdm_autodeps);
+}
diff --git a/arch/arm/mach-omap2/clockdomains44xx.h b/arch/arm/mach-omap2/clockdomains44xx.h
deleted file mode 100644
index 7e5ba0f..0000000
--- a/arch/arm/mach-omap2/clockdomains44xx.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * OMAP4 Clock domains framework
- *
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2009 Nokia Corporation
- *
- * Abhijit Pagare (abhijitpagare@ti.com)
- * Benoit Cousson (b-cousson@ti.com)
- *
- * This file is automatically generated from the OMAP hardware databases.
- * We respectfully ask that any modifications to this file be coordinated
- * with the public linux-omap@vger.kernel.org mailing list and the
- * authors above to ensure that the autogeneration scripts are kept
- * up-to-date with the file contents.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * To-Do List
- * -> Populate the Sleep/Wakeup dependencies for the domains
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCKDOMAINS44XX_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCKDOMAINS44XX_H
-
-#include <plat/clockdomain.h>
-
-#if defined(CONFIG_ARCH_OMAP4)
-
-static struct clockdomain l4_cefuse_44xx_clkdm = {
-	.name		  = "l4_cefuse_clkdm",
-	.pwrdm		  = { .name = "cefuse_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_CEFUSE_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l4_cfg_44xx_clkdm = {
-	.name		  = "l4_cfg_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_L4CFG_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain tesla_44xx_clkdm = {
-	.name		  = "tesla_clkdm",
-	.pwrdm		  = { .name = "tesla_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_TESLA_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_gfx_44xx_clkdm = {
-	.name		  = "l3_gfx_clkdm",
-	.pwrdm		  = { .name = "gfx_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_GFX_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain ivahd_44xx_clkdm = {
-	.name		  = "ivahd_clkdm",
-	.pwrdm		  = { .name = "ivahd_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_IVAHD_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l4_secure_44xx_clkdm = {
-	.name		  = "l4_secure_clkdm",
-	.pwrdm		  = { .name = "l4per_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_L4SEC_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l4_per_44xx_clkdm = {
-	.name		  = "l4_per_clkdm",
-	.pwrdm		  = { .name = "l4per_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_L4PER_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain abe_44xx_clkdm = {
-	.name		  = "abe_clkdm",
-	.pwrdm		  = { .name = "abe_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM1_ABE_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_instr_44xx_clkdm = {
-	.name		  = "l3_instr_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_L3INSTR_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_init_44xx_clkdm = {
-	.name		  = "l3_init_clkdm",
-	.pwrdm		  = { .name = "l3init_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_L3INIT_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain mpuss_44xx_clkdm = {
-	.name		  = "mpuss_clkdm",
-	.pwrdm		  = { .name = "mpu_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_MPU_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain mpu0_44xx_clkdm = {
-	.name		  = "mpu0_clkdm",
-	.pwrdm		  = { .name = "cpu0_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_CPU0_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain mpu1_44xx_clkdm = {
-	.name		  = "mpu1_clkdm",
-	.pwrdm		  = { .name = "cpu1_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_CPU1_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_emif_44xx_clkdm = {
-	.name		  = "l3_emif_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_MEMIF_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l4_ao_44xx_clkdm = {
-	.name		  = "l4_ao_clkdm",
-	.pwrdm		  = { .name = "always_on_core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_ALWON_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain ducati_44xx_clkdm = {
-	.name		  = "ducati_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_DUCATI_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_2_44xx_clkdm = {
-	.name		  = "l3_2_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_L3_2_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_1_44xx_clkdm = {
-	.name		  = "l3_1_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_L3_1_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_d2d_44xx_clkdm = {
-	.name		  = "l3_d2d_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_D2D_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain iss_44xx_clkdm = {
-	.name		  = "iss_clkdm",
-	.pwrdm		  = { .name = "cam_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_CAM_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_dss_44xx_clkdm = {
-	.name		  = "l3_dss_clkdm",
-	.pwrdm		  = { .name = "dss_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_DSS_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l4_wkup_44xx_clkdm = {
-	.name		  = "l4_wkup_clkdm",
-	.pwrdm		  = { .name = "wkup_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_WKUP_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain emu_sys_44xx_clkdm = {
-	.name		  = "emu_sys_clkdm",
-	.pwrdm		  = { .name = "emu_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_EMU_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-static struct clockdomain l3_dma_44xx_clkdm = {
-	.name		  = "l3_dma_clkdm",
-	.pwrdm		  = { .name = "core_pwrdm" },
-	.clkstctrl_reg	  = OMAP4430_CM_SDMA_CLKSTCTRL,
-	.clktrctrl_mask	  = OMAP4430_CLKTRCTRL_MASK,
-	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-#endif
-
-#endif
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
new file mode 100644
index 0000000..51920fc
--- /dev/null
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -0,0 +1,311 @@
+/*
+ * OMAP4 Clock domains framework
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Abhijit Pagare (abhijitpagare@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * To-Do List
+ * -> Populate the Sleep/Wakeup dependencies for the domains
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "clockdomain.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "cm-regbits-44xx.h"
+#include "prm44xx.h"
+#include "prcm44xx.h"
+#include "prcm_mpu44xx.h"
+
+
+static struct clockdomain l4_cefuse_44xx_clkdm = {
+	.name		  = "l4_cefuse_clkdm",
+	.pwrdm		  = { .name = "cefuse_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CEFUSE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CEFUSE_CEFUSE_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l4_cfg_44xx_clkdm = {
+	.name		  = "l4_cfg_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_L4CFG_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain tesla_44xx_clkdm = {
+	.name		  = "tesla_clkdm",
+	.pwrdm		  = { .name = "tesla_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM1_PARTITION,
+	.cm_inst	  = OMAP4430_CM1_TESLA_INST,
+	.clkdm_offs	  = OMAP4430_CM1_TESLA_TESLA_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_gfx_44xx_clkdm = {
+	.name		  = "l3_gfx_clkdm",
+	.pwrdm		  = { .name = "gfx_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_GFX_INST,
+	.clkdm_offs	  = OMAP4430_CM2_GFX_GFX_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain ivahd_44xx_clkdm = {
+	.name		  = "ivahd_clkdm",
+	.pwrdm		  = { .name = "ivahd_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_IVAHD_INST,
+	.clkdm_offs	  = OMAP4430_CM2_IVAHD_IVAHD_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l4_secure_44xx_clkdm = {
+	.name		  = "l4_secure_clkdm",
+	.pwrdm		  = { .name = "l4per_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_L4PER_INST,
+	.clkdm_offs	  = OMAP4430_CM2_L4PER_L4SEC_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l4_per_44xx_clkdm = {
+	.name		  = "l4_per_clkdm",
+	.pwrdm		  = { .name = "l4per_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_L4PER_INST,
+	.clkdm_offs	  = OMAP4430_CM2_L4PER_L4PER_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain abe_44xx_clkdm = {
+	.name		  = "abe_clkdm",
+	.pwrdm		  = { .name = "abe_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM1_PARTITION,
+	.cm_inst	  = OMAP4430_CM1_ABE_INST,
+	.clkdm_offs	  = OMAP4430_CM1_ABE_ABE_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_instr_44xx_clkdm = {
+	.name		  = "l3_instr_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_L3INSTR_CDOFFS,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_init_44xx_clkdm = {
+	.name		  = "l3_init_clkdm",
+	.pwrdm		  = { .name = "l3init_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_L3INIT_INST,
+	.clkdm_offs	  = OMAP4430_CM2_L3INIT_L3INIT_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain mpuss_44xx_clkdm = {
+	.name		  = "mpuss_clkdm",
+	.pwrdm		  = { .name = "mpu_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM1_PARTITION,
+	.cm_inst	  = OMAP4430_CM1_MPU_INST,
+	.clkdm_offs	  = OMAP4430_CM1_MPU_MPU_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain mpu0_44xx_clkdm = {
+	.name		  = "mpu0_clkdm",
+	.pwrdm		  = { .name = "cpu0_pwrdm" },
+	.prcm_partition	  = OMAP4430_PRCM_MPU_PARTITION,
+	.cm_inst	  = OMAP4430_PRCM_MPU_CPU0_INST,
+	.clkdm_offs	  = OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain mpu1_44xx_clkdm = {
+	.name		  = "mpu1_clkdm",
+	.pwrdm		  = { .name = "cpu1_pwrdm" },
+	.prcm_partition	  = OMAP4430_PRCM_MPU_PARTITION,
+	.cm_inst	  = OMAP4430_PRCM_MPU_CPU1_INST,
+	.clkdm_offs	  = OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_emif_44xx_clkdm = {
+	.name		  = "l3_emif_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_MEMIF_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l4_ao_44xx_clkdm = {
+	.name		  = "l4_ao_clkdm",
+	.pwrdm		  = { .name = "always_on_core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_ALWAYS_ON_INST,
+	.clkdm_offs	  = OMAP4430_CM2_ALWAYS_ON_ALWON_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain ducati_44xx_clkdm = {
+	.name		  = "ducati_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_DUCATI_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_2_44xx_clkdm = {
+	.name		  = "l3_2_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_L3_2_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_1_44xx_clkdm = {
+	.name		  = "l3_1_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_L3_1_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_d2d_44xx_clkdm = {
+	.name		  = "l3_d2d_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_D2D_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain iss_44xx_clkdm = {
+	.name		  = "iss_clkdm",
+	.pwrdm		  = { .name = "cam_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CAM_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CAM_CAM_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_dss_44xx_clkdm = {
+	.name		  = "l3_dss_clkdm",
+	.pwrdm		  = { .name = "dss_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_DSS_INST,
+	.clkdm_offs	  = OMAP4430_CM2_DSS_DSS_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l4_wkup_44xx_clkdm = {
+	.name		  = "l4_wkup_clkdm",
+	.pwrdm		  = { .name = "wkup_pwrdm" },
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.cm_inst	  = OMAP4430_PRM_WKUP_CM_INST,
+	.clkdm_offs	  = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain emu_sys_44xx_clkdm = {
+	.name		  = "emu_sys_clkdm",
+	.pwrdm		  = { .name = "emu_pwrdm" },
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.cm_inst	  = OMAP4430_PRM_EMU_CM_INST,
+	.clkdm_offs	  = OMAP4430_PRM_EMU_CM_EMU_CDOFFS,
+	.flags		  = CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain l3_dma_44xx_clkdm = {
+	.name		  = "l3_dma_clkdm",
+	.pwrdm		  = { .name = "core_pwrdm" },
+	.prcm_partition	  = OMAP4430_CM2_PARTITION,
+	.cm_inst	  = OMAP4430_CM2_CORE_INST,
+	.clkdm_offs	  = OMAP4430_CM2_CORE_SDMA_CDOFFS,
+	.flags		  = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct clockdomain *clockdomains_omap44xx[] __initdata = {
+	&l4_cefuse_44xx_clkdm,
+	&l4_cfg_44xx_clkdm,
+	&tesla_44xx_clkdm,
+	&l3_gfx_44xx_clkdm,
+	&ivahd_44xx_clkdm,
+	&l4_secure_44xx_clkdm,
+	&l4_per_44xx_clkdm,
+	&abe_44xx_clkdm,
+	&l3_instr_44xx_clkdm,
+	&l3_init_44xx_clkdm,
+	&mpuss_44xx_clkdm,
+	&mpu0_44xx_clkdm,
+	&mpu1_44xx_clkdm,
+	&l3_emif_44xx_clkdm,
+	&l4_ao_44xx_clkdm,
+	&ducati_44xx_clkdm,
+	&l3_2_44xx_clkdm,
+	&l3_1_44xx_clkdm,
+	&l3_d2d_44xx_clkdm,
+	&iss_44xx_clkdm,
+	&l3_dss_44xx_clkdm,
+	&l4_wkup_44xx_clkdm,
+	&emu_sys_44xx_clkdm,
+	&l3_dma_44xx_clkdm,
+	NULL,
+};
+
+void __init omap44xx_clockdomains_init(void)
+{
+	clkdm_init(clockdomains_omap44xx, NULL);
+}
diff --git a/arch/arm/mach-omap2/cm-regbits-24xx.h b/arch/arm/mach-omap2/cm-regbits-24xx.h
index da51cc3..d70660e 100644
--- a/arch/arm/mach-omap2/cm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-24xx.h
@@ -14,8 +14,6 @@
  * published by the Free Software Foundation.
  */
 
-#include "cm.h"
-
 /* Bits shared between registers */
 
 /* CM_FCLKEN1_CORE and CM_ICLKEN1_CORE shared bits */
@@ -126,8 +124,12 @@
 #define OMAP24XX_ST_HDQ_MASK				(1 << 23)
 #define OMAP2420_ST_I2C2_SHIFT				20
 #define OMAP2420_ST_I2C2_MASK				(1 << 20)
+#define OMAP2430_ST_I2CHS1_SHIFT			19
+#define OMAP2430_ST_I2CHS1_MASK				(1 << 19)
 #define OMAP2420_ST_I2C1_SHIFT				19
 #define OMAP2420_ST_I2C1_MASK				(1 << 19)
+#define OMAP2430_ST_I2CHS2_SHIFT			20
+#define OMAP2430_ST_I2CHS2_MASK				(1 << 20)
 #define OMAP24XX_ST_MCBSP2_SHIFT			16
 #define OMAP24XX_ST_MCBSP2_MASK				(1 << 16)
 #define OMAP24XX_ST_MCBSP1_SHIFT			15
@@ -432,4 +434,9 @@
 #define OMAP2430_AUTOSTATE_MDM_SHIFT			0
 #define OMAP2430_AUTOSTATE_MDM_MASK			(1 << 0)
 
+/* OMAP24XX CM_CLKSTCTRL_*.AUTOSTATE_* register bit values */
+#define OMAP24XX_CLKSTCTRL_DISABLE_AUTO		0x0
+#define OMAP24XX_CLKSTCTRL_ENABLE_AUTO		0x1
+
+
 #endif
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 4f959a7..b912759 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -14,8 +14,6 @@
  * published by the Free Software Foundation.
  */
 
-#include "cm.h"
-
 /* Bits shared between registers */
 
 /* CM_FCLKEN1_CORE and CM_ICLKEN1_CORE shared bits */
@@ -800,4 +798,15 @@
 #define OMAP3430ES2_CLKACTIVITY_USBHOST_SHIFT		0
 #define OMAP3430ES2_CLKACTIVITY_USBHOST_MASK		(1 << 0)
 
+/*
+ *
+ */
+
+/* OMAP3XXX CM_CLKSTCTRL_*.CLKTRCTRL_* register bit values */
+#define OMAP34XX_CLKSTCTRL_DISABLE_AUTO		0x0
+#define OMAP34XX_CLKSTCTRL_FORCE_SLEEP		0x1
+#define OMAP34XX_CLKSTCTRL_FORCE_WAKEUP		0x2
+#define OMAP34XX_CLKSTCTRL_ENABLE_AUTO		0x3
+
+
 #endif
diff --git a/arch/arm/mach-omap2/cm-regbits-44xx.h b/arch/arm/mach-omap2/cm-regbits-44xx.h
index 0b72be4..9d47a05 100644
--- a/arch/arm/mach-omap2/cm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-44xx.h
@@ -22,9 +22,6 @@
 #ifndef __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
 #define __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
 
-#include "cm.h"
-
-
 /*
  * Used by CM_L3_1_DYNAMICDEP, CM_L3_1_DYNAMICDEP_RESTORE, CM_MPU_DYNAMICDEP,
  * CM_TESLA_DYNAMICDEP
diff --git a/arch/arm/mach-omap2/cm.c b/arch/arm/mach-omap2/cm.c
deleted file mode 100644
index 721c3b6..0000000
--- a/arch/arm/mach-omap2/cm.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * OMAP2/3 CM module functions
- *
- * Copyright (C) 2009 Nokia Corporation
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <asm/atomic.h>
-
-#include <plat/common.h>
-
-#include "cm.h"
-#include "cm-regbits-24xx.h"
-#include "cm-regbits-34xx.h"
-
-static const u8 cm_idlest_offs[] = {
-	CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3
-};
-
-/**
- * omap2_cm_wait_idlest_ready - wait for a module to leave idle or standby
- * @prcm_mod: PRCM module offset
- * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3)
- * @idlest_shift: shift of the bit in the CM_IDLEST* register to check
- *
- * XXX document
- */
-int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift)
-{
-	int ena = 0, i = 0;
-	u8 cm_idlest_reg;
-	u32 mask;
-
-	if (!idlest_id || (idlest_id > ARRAY_SIZE(cm_idlest_offs)))
-		return -EINVAL;
-
-	cm_idlest_reg = cm_idlest_offs[idlest_id - 1];
-
-	mask = 1 << idlest_shift;
-
-	if (cpu_is_omap24xx())
-		ena = mask;
-	else if (cpu_is_omap34xx())
-		ena = 0;
-	else
-		BUG();
-
-	/* XXX should be OMAP2 CM */
-	omap_test_timeout(((cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena),
-			  MAX_MODULE_READY_TIME, i);
-
-	return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
-}
-
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index a02ca30..a7bc096 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -1,8 +1,5 @@
-#ifndef __ARCH_ASM_MACH_OMAP2_CM_H
-#define __ARCH_ASM_MACH_OMAP2_CM_H
-
 /*
- * OMAP2/3 Clock Management (CM) register definitions
+ * OMAP2+ Clock Management prototypes
  *
  * Copyright (C) 2007-2009 Texas Instruments, Inc.
  * Copyright (C) 2007-2009 Nokia Corporation
@@ -13,136 +10,8 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-
-#include "prcm-common.h"
-
-#define OMAP2420_CM_REGADDR(module, reg)				\
-			OMAP2_L4_IO_ADDRESS(OMAP2420_CM_BASE + (module) + (reg))
-#define OMAP2430_CM_REGADDR(module, reg)				\
-			OMAP2_L4_IO_ADDRESS(OMAP2430_CM_BASE + (module) + (reg))
-#define OMAP34XX_CM_REGADDR(module, reg)				\
-			OMAP2_L4_IO_ADDRESS(OMAP3430_CM_BASE + (module) + (reg))
-#define OMAP44XX_CM1_REGADDR(module, reg)				\
-			OMAP2_L4_IO_ADDRESS(OMAP4430_CM1_BASE + (module) + (reg))
-#define OMAP44XX_CM2_REGADDR(module, reg)				\
-			OMAP2_L4_IO_ADDRESS(OMAP4430_CM2_BASE + (module) + (reg))
-
-#include "cm44xx.h"
-
-/*
- * Architecture-specific global CM registers
- * Use cm_{read,write}_reg() with these registers.
- * These registers appear once per CM module.
- */
-
-#define OMAP3430_CM_REVISION		OMAP34XX_CM_REGADDR(OCP_MOD, 0x0000)
-#define OMAP3430_CM_SYSCONFIG		OMAP34XX_CM_REGADDR(OCP_MOD, 0x0010)
-#define OMAP3430_CM_POLCTRL		OMAP34XX_CM_REGADDR(OCP_MOD, 0x009c)
-
-#define OMAP3_CM_CLKOUT_CTRL_OFFSET	0x0070
-#define OMAP3430_CM_CLKOUT_CTRL		OMAP_CM_REGADDR(OMAP3430_CCR_MOD, 0x0070)
-
-/*
- * Module specific CM registers from CM_BASE + domain offset
- * Use cm_{read,write}_mod_reg() with these registers.
- * These register offsets generally appear in more than one PRCM submodule.
- */
-
-/* Common between 24xx and 34xx */
-
-#define CM_FCLKEN					0x0000
-#define CM_FCLKEN1					CM_FCLKEN
-#define CM_CLKEN					CM_FCLKEN
-#define CM_ICLKEN					0x0010
-#define CM_ICLKEN1					CM_ICLKEN
-#define CM_ICLKEN2					0x0014
-#define CM_ICLKEN3					0x0018
-#define CM_IDLEST					0x0020
-#define CM_IDLEST1					CM_IDLEST
-#define CM_IDLEST2					0x0024
-#define CM_AUTOIDLE					0x0030
-#define CM_AUTOIDLE1					CM_AUTOIDLE
-#define CM_AUTOIDLE2					0x0034
-#define CM_AUTOIDLE3					0x0038
-#define CM_CLKSEL					0x0040
-#define CM_CLKSEL1					CM_CLKSEL
-#define CM_CLKSEL2					0x0044
-#define OMAP2_CM_CLKSTCTRL				0x0048
-#define OMAP4_CM_CLKSTCTRL				0x0000
-
-
-/* Architecture-specific registers */
-
-#define OMAP24XX_CM_FCLKEN2				0x0004
-#define OMAP24XX_CM_ICLKEN4				0x001c
-#define OMAP24XX_CM_AUTOIDLE4				0x003c
-
-#define OMAP2430_CM_IDLEST3				0x0028
-
-#define OMAP3430_CM_CLKEN_PLL				0x0004
-#define OMAP3430ES2_CM_CLKEN2				0x0004
-#define OMAP3430ES2_CM_FCLKEN3				0x0008
-#define OMAP3430_CM_IDLEST_PLL				CM_IDLEST2
-#define OMAP3430_CM_AUTOIDLE_PLL			CM_AUTOIDLE2
-#define OMAP3430ES2_CM_AUTOIDLE2_PLL			CM_AUTOIDLE2
-#define OMAP3430_CM_CLKSEL1				CM_CLKSEL
-#define OMAP3430_CM_CLKSEL1_PLL				CM_CLKSEL
-#define OMAP3430_CM_CLKSEL2_PLL				CM_CLKSEL2
-#define OMAP3430_CM_SLEEPDEP				CM_CLKSEL2
-#define OMAP3430_CM_CLKSEL3				OMAP2_CM_CLKSTCTRL
-#define OMAP3430_CM_CLKSTST				0x004c
-#define OMAP3430ES2_CM_CLKSEL4				0x004c
-#define OMAP3430ES2_CM_CLKSEL5				0x0050
-#define OMAP3430_CM_CLKSEL2_EMU				0x0050
-#define OMAP3430_CM_CLKSEL3_EMU				0x0054
-
-/* CM2.CEFUSE_CM2 register offsets */
-
-/* OMAP4 modulemode control */
-#define OMAP4430_MODULEMODE_HWCTRL			0
-#define OMAP4430_MODULEMODE_SWCTRL			1
-
-/* Clock management domain register get/set */
-
-#ifndef __ASSEMBLER__
-
-extern u32 cm_read_mod_reg(s16 module, u16 idx);
-extern void cm_write_mod_reg(u32 val, s16 module, u16 idx);
-extern u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
-
-extern int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id,
-				      u8 idlest_shift);
-extern int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg);
-
-static inline u32 cm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
-{
-	return cm_rmw_mod_reg_bits(bits, bits, module, idx);
-}
-
-static inline u32 cm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
-{
-	return cm_rmw_mod_reg_bits(bits, 0x0, module, idx);
-}
-
-#endif
-
-/* CM register bits shared between 24XX and 3430 */
-
-/* CM_CLKSEL_GFX */
-#define OMAP_CLKSEL_GFX_SHIFT				0
-#define OMAP_CLKSEL_GFX_MASK				(0x7 << 0)
-
-/* CM_ICLKEN_GFX */
-#define OMAP_EN_GFX_SHIFT				0
-#define OMAP_EN_GFX_MASK				(1 << 0)
-
-/* CM_IDLEST_GFX */
-#define OMAP_ST_GFX_MASK				(1 << 0)
-
-
-/* CM_IDLEST indicator */
-#define OMAP24XX_CM_IDLEST_VAL		0
-#define OMAP34XX_CM_IDLEST_VAL		1
+#ifndef __ARCH_ASM_MACH_OMAP2_CM_H
+#define __ARCH_ASM_MACH_OMAP2_CM_H
 
 /*
  * MAX_MODULE_READY_TIME: max duration in microseconds to wait for the
diff --git a/arch/arm/mach-omap2/cm1_44xx.h b/arch/arm/mach-omap2/cm1_44xx.h
new file mode 100644
index 0000000..e2d7a56
--- /dev/null
+++ b/arch/arm/mach-omap2/cm1_44xx.h
@@ -0,0 +1,261 @@
+/*
+ * OMAP44xx CM1 instance offset macros
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX This file needs to be updated to align on one of "OMAP4", "OMAP44XX",
+ *     or "OMAP4430".
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM1_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM1_44XX_H
+
+/* CM1 base address */
+#define OMAP4430_CM1_BASE		0x4a004000
+
+#define OMAP44XX_CM1_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(OMAP4430_CM1_BASE + (inst) + (reg))
+
+/* CM1 instances */
+#define OMAP4430_CM1_OCP_SOCKET_INST	0x0000
+#define OMAP4430_CM1_CKGEN_INST		0x0100
+#define OMAP4430_CM1_MPU_INST		0x0300
+#define OMAP4430_CM1_TESLA_INST		0x0400
+#define OMAP4430_CM1_ABE_INST		0x0500
+#define OMAP4430_CM1_RESTORE_INST	0x0e00
+#define OMAP4430_CM1_INSTR_INST		0x0f00
+
+/* CM1 clockdomain register offsets (from instance start) */
+#define OMAP4430_CM1_ABE_ABE_CDOFFS		0x0000
+#define OMAP4430_CM1_MPU_MPU_CDOFFS		0x0000
+#define OMAP4430_CM1_TESLA_TESLA_CDOFFS		0x0000
+
+/* CM1 */
+
+/* CM1.OCP_SOCKET_CM1 register offsets */
+#define OMAP4_REVISION_CM1_OFFSET			0x0000
+#define OMAP4430_REVISION_CM1				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_INST, 0x0000)
+#define OMAP4_CM_CM1_PROFILING_CLKCTRL_OFFSET		0x0040
+#define OMAP4430_CM_CM1_PROFILING_CLKCTRL		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_INST, 0x0040)
+
+/* CM1.CKGEN_CM1 register offsets */
+#define OMAP4_CM_CLKSEL_CORE_OFFSET			0x0000
+#define OMAP4430_CM_CLKSEL_CORE				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0000)
+#define OMAP4_CM_CLKSEL_ABE_OFFSET			0x0008
+#define OMAP4430_CM_CLKSEL_ABE				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0008)
+#define OMAP4_CM_DLL_CTRL_OFFSET			0x0010
+#define OMAP4430_CM_DLL_CTRL				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0010)
+#define OMAP4_CM_CLKMODE_DPLL_CORE_OFFSET		0x0020
+#define OMAP4430_CM_CLKMODE_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0020)
+#define OMAP4_CM_IDLEST_DPLL_CORE_OFFSET		0x0024
+#define OMAP4430_CM_IDLEST_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0024)
+#define OMAP4_CM_AUTOIDLE_DPLL_CORE_OFFSET		0x0028
+#define OMAP4430_CM_AUTOIDLE_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0028)
+#define OMAP4_CM_CLKSEL_DPLL_CORE_OFFSET		0x002c
+#define OMAP4430_CM_CLKSEL_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x002c)
+#define OMAP4_CM_DIV_M2_DPLL_CORE_OFFSET		0x0030
+#define OMAP4430_CM_DIV_M2_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0030)
+#define OMAP4_CM_DIV_M3_DPLL_CORE_OFFSET		0x0034
+#define OMAP4430_CM_DIV_M3_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0034)
+#define OMAP4_CM_DIV_M4_DPLL_CORE_OFFSET		0x0038
+#define OMAP4430_CM_DIV_M4_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0038)
+#define OMAP4_CM_DIV_M5_DPLL_CORE_OFFSET		0x003c
+#define OMAP4430_CM_DIV_M5_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x003c)
+#define OMAP4_CM_DIV_M6_DPLL_CORE_OFFSET		0x0040
+#define OMAP4430_CM_DIV_M6_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0040)
+#define OMAP4_CM_DIV_M7_DPLL_CORE_OFFSET		0x0044
+#define OMAP4430_CM_DIV_M7_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0044)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET	0x0048
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0048)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_OFFSET	0x004c
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x004c)
+#define OMAP4_CM_EMU_OVERRIDE_DPLL_CORE_OFFSET		0x0050
+#define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0050)
+#define OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET		0x0060
+#define OMAP4430_CM_CLKMODE_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0060)
+#define OMAP4_CM_IDLEST_DPLL_MPU_OFFSET			0x0064
+#define OMAP4430_CM_IDLEST_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0064)
+#define OMAP4_CM_AUTOIDLE_DPLL_MPU_OFFSET		0x0068
+#define OMAP4430_CM_AUTOIDLE_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0068)
+#define OMAP4_CM_CLKSEL_DPLL_MPU_OFFSET			0x006c
+#define OMAP4430_CM_CLKSEL_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x006c)
+#define OMAP4_CM_DIV_M2_DPLL_MPU_OFFSET			0x0070
+#define OMAP4430_CM_DIV_M2_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0070)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET		0x0088
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0088)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_MPU_OFFSET		0x008c
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_MPU		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x008c)
+#define OMAP4_CM_BYPCLK_DPLL_MPU_OFFSET			0x009c
+#define OMAP4430_CM_BYPCLK_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x009c)
+#define OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET		0x00a0
+#define OMAP4430_CM_CLKMODE_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00a0)
+#define OMAP4_CM_IDLEST_DPLL_IVA_OFFSET			0x00a4
+#define OMAP4430_CM_IDLEST_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00a4)
+#define OMAP4_CM_AUTOIDLE_DPLL_IVA_OFFSET		0x00a8
+#define OMAP4430_CM_AUTOIDLE_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00a8)
+#define OMAP4_CM_CLKSEL_DPLL_IVA_OFFSET			0x00ac
+#define OMAP4430_CM_CLKSEL_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00ac)
+#define OMAP4_CM_DIV_M4_DPLL_IVA_OFFSET			0x00b8
+#define OMAP4430_CM_DIV_M4_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00b8)
+#define OMAP4_CM_DIV_M5_DPLL_IVA_OFFSET			0x00bc
+#define OMAP4430_CM_DIV_M5_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00bc)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET		0x00c8
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00c8)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_IVA_OFFSET		0x00cc
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_IVA		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00cc)
+#define OMAP4_CM_BYPCLK_DPLL_IVA_OFFSET			0x00dc
+#define OMAP4430_CM_BYPCLK_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00dc)
+#define OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET		0x00e0
+#define OMAP4430_CM_CLKMODE_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00e0)
+#define OMAP4_CM_IDLEST_DPLL_ABE_OFFSET			0x00e4
+#define OMAP4430_CM_IDLEST_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00e4)
+#define OMAP4_CM_AUTOIDLE_DPLL_ABE_OFFSET		0x00e8
+#define OMAP4430_CM_AUTOIDLE_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00e8)
+#define OMAP4_CM_CLKSEL_DPLL_ABE_OFFSET			0x00ec
+#define OMAP4430_CM_CLKSEL_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00ec)
+#define OMAP4_CM_DIV_M2_DPLL_ABE_OFFSET			0x00f0
+#define OMAP4430_CM_DIV_M2_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00f0)
+#define OMAP4_CM_DIV_M3_DPLL_ABE_OFFSET			0x00f4
+#define OMAP4430_CM_DIV_M3_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x00f4)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET		0x0108
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0108)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_ABE_OFFSET		0x010c
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_ABE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x010c)
+#define OMAP4_CM_CLKMODE_DPLL_DDRPHY_OFFSET		0x0120
+#define OMAP4430_CM_CLKMODE_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0120)
+#define OMAP4_CM_IDLEST_DPLL_DDRPHY_OFFSET		0x0124
+#define OMAP4430_CM_IDLEST_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0124)
+#define OMAP4_CM_AUTOIDLE_DPLL_DDRPHY_OFFSET		0x0128
+#define OMAP4430_CM_AUTOIDLE_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0128)
+#define OMAP4_CM_CLKSEL_DPLL_DDRPHY_OFFSET		0x012c
+#define OMAP4430_CM_CLKSEL_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x012c)
+#define OMAP4_CM_DIV_M2_DPLL_DDRPHY_OFFSET		0x0130
+#define OMAP4430_CM_DIV_M2_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0130)
+#define OMAP4_CM_DIV_M4_DPLL_DDRPHY_OFFSET		0x0138
+#define OMAP4430_CM_DIV_M4_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0138)
+#define OMAP4_CM_DIV_M5_DPLL_DDRPHY_OFFSET		0x013c
+#define OMAP4430_CM_DIV_M5_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x013c)
+#define OMAP4_CM_DIV_M6_DPLL_DDRPHY_OFFSET		0x0140
+#define OMAP4430_CM_DIV_M6_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0140)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_DDRPHY_OFFSET	0x0148
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0148)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_DDRPHY_OFFSET	0x014c
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x014c)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG1_OFFSET		0x0160
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG1			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0160)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG2_OFFSET		0x0164
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG2			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0164)
+#define OMAP4_CM_DYN_DEP_PRESCAL_OFFSET			0x0170
+#define OMAP4430_CM_DYN_DEP_PRESCAL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0170)
+#define OMAP4_CM_RESTORE_ST_OFFSET			0x0180
+#define OMAP4430_CM_RESTORE_ST				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_INST, 0x0180)
+
+/* CM1.MPU_CM1 register offsets */
+#define OMAP4_CM_MPU_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_MPU_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_INST, 0x0000)
+#define OMAP4_CM_MPU_STATICDEP_OFFSET			0x0004
+#define OMAP4430_CM_MPU_STATICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_INST, 0x0004)
+#define OMAP4_CM_MPU_DYNAMICDEP_OFFSET			0x0008
+#define OMAP4430_CM_MPU_DYNAMICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_INST, 0x0008)
+#define OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET			0x0020
+#define OMAP4430_CM_MPU_MPU_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_INST, 0x0020)
+
+/* CM1.TESLA_CM1 register offsets */
+#define OMAP4_CM_TESLA_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_TESLA_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_INST, 0x0000)
+#define OMAP4_CM_TESLA_STATICDEP_OFFSET			0x0004
+#define OMAP4430_CM_TESLA_STATICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_INST, 0x0004)
+#define OMAP4_CM_TESLA_DYNAMICDEP_OFFSET		0x0008
+#define OMAP4430_CM_TESLA_DYNAMICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_INST, 0x0008)
+#define OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET		0x0020
+#define OMAP4430_CM_TESLA_TESLA_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_INST, 0x0020)
+
+/* CM1.ABE_CM1 register offsets */
+#define OMAP4_CM1_ABE_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM1_ABE_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0000)
+#define OMAP4_CM1_ABE_L4ABE_CLKCTRL_OFFSET		0x0020
+#define OMAP4430_CM1_ABE_L4ABE_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0020)
+#define OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET		0x0028
+#define OMAP4430_CM1_ABE_AESS_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0028)
+#define OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET		0x0030
+#define OMAP4430_CM1_ABE_PDM_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0030)
+#define OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET		0x0038
+#define OMAP4430_CM1_ABE_DMIC_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0038)
+#define OMAP4_CM1_ABE_MCASP_CLKCTRL_OFFSET		0x0040
+#define OMAP4430_CM1_ABE_MCASP_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0040)
+#define OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET		0x0048
+#define OMAP4430_CM1_ABE_MCBSP1_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0048)
+#define OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET		0x0050
+#define OMAP4430_CM1_ABE_MCBSP2_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0050)
+#define OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET		0x0058
+#define OMAP4430_CM1_ABE_MCBSP3_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0058)
+#define OMAP4_CM1_ABE_SLIMBUS_CLKCTRL_OFFSET		0x0060
+#define OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0060)
+#define OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET		0x0068
+#define OMAP4430_CM1_ABE_TIMER5_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0068)
+#define OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET		0x0070
+#define OMAP4430_CM1_ABE_TIMER6_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0070)
+#define OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET		0x0078
+#define OMAP4430_CM1_ABE_TIMER7_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0078)
+#define OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET		0x0080
+#define OMAP4430_CM1_ABE_TIMER8_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0080)
+#define OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET		0x0088
+#define OMAP4430_CM1_ABE_WDT3_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_INST, 0x0088)
+
+/* CM1.RESTORE_CM1 register offsets */
+#define OMAP4_CM_CLKSEL_CORE_RESTORE_OFFSET		0x0000
+#define OMAP4430_CM_CLKSEL_CORE_RESTORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0000)
+#define OMAP4_CM_DIV_M2_DPLL_CORE_RESTORE_OFFSET	0x0004
+#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0004)
+#define OMAP4_CM_DIV_M3_DPLL_CORE_RESTORE_OFFSET	0x0008
+#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0008)
+#define OMAP4_CM_DIV_M4_DPLL_CORE_RESTORE_OFFSET	0x000c
+#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x000c)
+#define OMAP4_CM_DIV_M5_DPLL_CORE_RESTORE_OFFSET	0x0010
+#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0010)
+#define OMAP4_CM_DIV_M6_DPLL_CORE_RESTORE_OFFSET	0x0014
+#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0014)
+#define OMAP4_CM_DIV_M7_DPLL_CORE_RESTORE_OFFSET	0x0018
+#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0018)
+#define OMAP4_CM_CLKSEL_DPLL_CORE_RESTORE_OFFSET	0x001c
+#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x001c)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET	0x0020
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0020)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE_OFFSET	0x0024
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_CORE_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0024)
+#define OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET	0x0028
+#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0028)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET	0x002c
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG2_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x002c)
+#define OMAP4_CM_SHADOW_FREQ_CONFIG1_RESTORE_OFFSET	0x0030
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0030)
+#define OMAP4_CM_AUTOIDLE_DPLL_CORE_RESTORE_OFFSET	0x0034
+#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0034)
+#define OMAP4_CM_MPU_CLKSTCTRL_RESTORE_OFFSET		0x0038
+#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0038)
+#define OMAP4_CM_CM1_PROFILING_CLKCTRL_RESTORE_OFFSET	0x003c
+#define OMAP4430_CM_CM1_PROFILING_CLKCTRL_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x003c)
+#define OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET		0x0040
+#define OMAP4430_CM_DYN_DEP_PRESCAL_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_INST, 0x0040)
+
+/* Function prototypes */
+extern u32 omap4_cm1_read_inst_reg(s16 inst, u16 idx);
+extern void omap4_cm1_write_inst_reg(u32 val, s16 inst, u16 idx);
+extern u32 omap4_cm1_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
+
+#endif
diff --git a/arch/arm/mach-omap2/cm2_44xx.h b/arch/arm/mach-omap2/cm2_44xx.h
new file mode 100644
index 0000000..aa47450
--- /dev/null
+++ b/arch/arm/mach-omap2/cm2_44xx.h
@@ -0,0 +1,508 @@
+/*
+ * OMAP44xx CM2 instance offset macros
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX This file needs to be updated to align on one of "OMAP4", "OMAP44XX",
+ *     or "OMAP4430".
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM2_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM2_44XX_H
+
+/* CM2 base address */
+#define OMAP4430_CM2_BASE		0x4a008000
+
+#define OMAP44XX_CM2_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(OMAP4430_CM2_BASE + (inst) + (reg))
+
+/* CM2 instances */
+#define OMAP4430_CM2_OCP_SOCKET_INST	0x0000
+#define OMAP4430_CM2_CKGEN_INST		0x0100
+#define OMAP4430_CM2_ALWAYS_ON_INST	0x0600
+#define OMAP4430_CM2_CORE_INST		0x0700
+#define OMAP4430_CM2_IVAHD_INST		0x0f00
+#define OMAP4430_CM2_CAM_INST		0x1000
+#define OMAP4430_CM2_DSS_INST		0x1100
+#define OMAP4430_CM2_GFX_INST		0x1200
+#define OMAP4430_CM2_L3INIT_INST		0x1300
+#define OMAP4430_CM2_L4PER_INST		0x1400
+#define OMAP4430_CM2_CEFUSE_INST		0x1600
+#define OMAP4430_CM2_RESTORE_INST	0x1e00
+#define OMAP4430_CM2_INSTR_INST		0x1f00
+
+/* CM2 clockdomain register offsets (from instance start) */
+#define OMAP4430_CM2_ALWAYS_ON_ALWON_CDOFFS	0x0000
+#define OMAP4430_CM2_CORE_L3_1_CDOFFS		0x0000
+#define OMAP4430_CM2_CORE_L3_2_CDOFFS		0x0100
+#define OMAP4430_CM2_CORE_DUCATI_CDOFFS		0x0200
+#define OMAP4430_CM2_CORE_SDMA_CDOFFS		0x0300
+#define OMAP4430_CM2_CORE_MEMIF_CDOFFS		0x0400
+#define OMAP4430_CM2_CORE_D2D_CDOFFS		0x0500
+#define OMAP4430_CM2_CORE_L4CFG_CDOFFS		0x0600
+#define OMAP4430_CM2_CORE_L3INSTR_CDOFFS	0x0700
+#define OMAP4430_CM2_IVAHD_IVAHD_CDOFFS		0x0000
+#define OMAP4430_CM2_CAM_CAM_CDOFFS		0x0000
+#define OMAP4430_CM2_DSS_DSS_CDOFFS		0x0000
+#define OMAP4430_CM2_GFX_GFX_CDOFFS		0x0000
+#define OMAP4430_CM2_L3INIT_L3INIT_CDOFFS	0x0000
+#define OMAP4430_CM2_L4PER_L4PER_CDOFFS		0x0000
+#define OMAP4430_CM2_L4PER_L4SEC_CDOFFS		0x0180
+#define OMAP4430_CM2_CEFUSE_CEFUSE_CDOFFS	0x0000
+
+
+/* CM2 */
+
+/* CM2.OCP_SOCKET_CM2 register offsets */
+#define OMAP4_REVISION_CM2_OFFSET			0x0000
+#define OMAP4430_REVISION_CM2				OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_INST, 0x0000)
+#define OMAP4_CM_CM2_PROFILING_CLKCTRL_OFFSET		0x0040
+#define OMAP4430_CM_CM2_PROFILING_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_INST, 0x0040)
+
+/* CM2.CKGEN_CM2 register offsets */
+#define OMAP4_CM_CLKSEL_DUCATI_ISS_ROOT_OFFSET		0x0000
+#define OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0000)
+#define OMAP4_CM_CLKSEL_USB_60MHZ_OFFSET		0x0004
+#define OMAP4430_CM_CLKSEL_USB_60MHZ			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0004)
+#define OMAP4_CM_SCALE_FCLK_OFFSET			0x0008
+#define OMAP4430_CM_SCALE_FCLK				OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0008)
+#define OMAP4_CM_CORE_DVFS_PERF1_OFFSET			0x0010
+#define OMAP4430_CM_CORE_DVFS_PERF1			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0010)
+#define OMAP4_CM_CORE_DVFS_PERF2_OFFSET			0x0014
+#define OMAP4430_CM_CORE_DVFS_PERF2			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0014)
+#define OMAP4_CM_CORE_DVFS_PERF3_OFFSET			0x0018
+#define OMAP4430_CM_CORE_DVFS_PERF3			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0018)
+#define OMAP4_CM_CORE_DVFS_PERF4_OFFSET			0x001c
+#define OMAP4430_CM_CORE_DVFS_PERF4			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x001c)
+#define OMAP4_CM_CORE_DVFS_CURRENT_OFFSET		0x0024
+#define OMAP4430_CM_CORE_DVFS_CURRENT			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0024)
+#define OMAP4_CM_IVA_DVFS_PERF_TESLA_OFFSET		0x0028
+#define OMAP4430_CM_IVA_DVFS_PERF_TESLA			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0028)
+#define OMAP4_CM_IVA_DVFS_PERF_IVAHD_OFFSET		0x002c
+#define OMAP4430_CM_IVA_DVFS_PERF_IVAHD			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x002c)
+#define OMAP4_CM_IVA_DVFS_PERF_ABE_OFFSET		0x0030
+#define OMAP4430_CM_IVA_DVFS_PERF_ABE			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0030)
+#define OMAP4_CM_IVA_DVFS_CURRENT_OFFSET		0x0038
+#define OMAP4430_CM_IVA_DVFS_CURRENT			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0038)
+#define OMAP4_CM_CLKMODE_DPLL_PER_OFFSET		0x0040
+#define OMAP4430_CM_CLKMODE_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0040)
+#define OMAP4_CM_IDLEST_DPLL_PER_OFFSET			0x0044
+#define OMAP4430_CM_IDLEST_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0044)
+#define OMAP4_CM_AUTOIDLE_DPLL_PER_OFFSET		0x0048
+#define OMAP4430_CM_AUTOIDLE_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0048)
+#define OMAP4_CM_CLKSEL_DPLL_PER_OFFSET			0x004c
+#define OMAP4430_CM_CLKSEL_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x004c)
+#define OMAP4_CM_DIV_M2_DPLL_PER_OFFSET			0x0050
+#define OMAP4430_CM_DIV_M2_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0050)
+#define OMAP4_CM_DIV_M3_DPLL_PER_OFFSET			0x0054
+#define OMAP4430_CM_DIV_M3_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0054)
+#define OMAP4_CM_DIV_M4_DPLL_PER_OFFSET			0x0058
+#define OMAP4430_CM_DIV_M4_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0058)
+#define OMAP4_CM_DIV_M5_DPLL_PER_OFFSET			0x005c
+#define OMAP4430_CM_DIV_M5_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x005c)
+#define OMAP4_CM_DIV_M6_DPLL_PER_OFFSET			0x0060
+#define OMAP4430_CM_DIV_M6_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0060)
+#define OMAP4_CM_DIV_M7_DPLL_PER_OFFSET			0x0064
+#define OMAP4430_CM_DIV_M7_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0064)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET		0x0068
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0068)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_PER_OFFSET		0x006c
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_PER		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x006c)
+#define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET		0x0080
+#define OMAP4430_CM_CLKMODE_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0080)
+#define OMAP4_CM_IDLEST_DPLL_USB_OFFSET			0x0084
+#define OMAP4430_CM_IDLEST_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0084)
+#define OMAP4_CM_AUTOIDLE_DPLL_USB_OFFSET		0x0088
+#define OMAP4430_CM_AUTOIDLE_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0088)
+#define OMAP4_CM_CLKSEL_DPLL_USB_OFFSET			0x008c
+#define OMAP4430_CM_CLKSEL_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x008c)
+#define OMAP4_CM_DIV_M2_DPLL_USB_OFFSET			0x0090
+#define OMAP4430_CM_DIV_M2_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x0090)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET		0x00a8
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00a8)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_USB_OFFSET		0x00ac
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_USB		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ac)
+#define OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET		0x00b4
+#define OMAP4430_CM_CLKDCOLDO_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00b4)
+#define OMAP4_CM_CLKMODE_DPLL_UNIPRO_OFFSET		0x00c0
+#define OMAP4430_CM_CLKMODE_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00c0)
+#define OMAP4_CM_IDLEST_DPLL_UNIPRO_OFFSET		0x00c4
+#define OMAP4430_CM_IDLEST_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00c4)
+#define OMAP4_CM_AUTOIDLE_DPLL_UNIPRO_OFFSET		0x00c8
+#define OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00c8)
+#define OMAP4_CM_CLKSEL_DPLL_UNIPRO_OFFSET		0x00cc
+#define OMAP4430_CM_CLKSEL_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00cc)
+#define OMAP4_CM_DIV_M2_DPLL_UNIPRO_OFFSET		0x00d0
+#define OMAP4430_CM_DIV_M2_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00d0)
+#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_UNIPRO_OFFSET	0x00e8
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00e8)
+#define OMAP4_CM_SSC_INSTFREQDIV_DPLL_UNIPRO_OFFSET	0x00ec
+#define OMAP4430_CM_SSC_INSTFREQDIV_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_INST, 0x00ec)
+
+/* CM2.ALWAYS_ON_CM2 register offsets */
+#define OMAP4_CM_ALWON_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_ALWON_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_INST, 0x0000)
+#define OMAP4_CM_ALWON_MDMINTC_CLKCTRL_OFFSET		0x0020
+#define OMAP4430_CM_ALWON_MDMINTC_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_INST, 0x0020)
+#define OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET		0x0028
+#define OMAP4430_CM_ALWON_SR_MPU_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_INST, 0x0028)
+#define OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET		0x0030
+#define OMAP4430_CM_ALWON_SR_IVA_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_INST, 0x0030)
+#define OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET		0x0038
+#define OMAP4430_CM_ALWON_SR_CORE_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_INST, 0x0038)
+#define OMAP4_CM_ALWON_USBPHY_CLKCTRL_OFFSET		0x0040
+#define OMAP4430_CM_ALWON_USBPHY_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_INST, 0x0040)
+
+/* CM2.CORE_CM2 register offsets */
+#define OMAP4_CM_L3_1_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_L3_1_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0000)
+#define OMAP4_CM_L3_1_DYNAMICDEP_OFFSET			0x0008
+#define OMAP4430_CM_L3_1_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0008)
+#define OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET		0x0020
+#define OMAP4430_CM_L3_1_L3_1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0020)
+#define OMAP4_CM_L3_2_CLKSTCTRL_OFFSET			0x0100
+#define OMAP4430_CM_L3_2_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0100)
+#define OMAP4_CM_L3_2_DYNAMICDEP_OFFSET			0x0108
+#define OMAP4430_CM_L3_2_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0108)
+#define OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET		0x0120
+#define OMAP4430_CM_L3_2_L3_2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0120)
+#define OMAP4_CM_L3_2_GPMC_CLKCTRL_OFFSET		0x0128
+#define OMAP4430_CM_L3_2_GPMC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0128)
+#define OMAP4_CM_L3_2_OCMC_RAM_CLKCTRL_OFFSET		0x0130
+#define OMAP4430_CM_L3_2_OCMC_RAM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0130)
+#define OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET		0x0200
+#define OMAP4430_CM_DUCATI_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0200)
+#define OMAP4_CM_DUCATI_STATICDEP_OFFSET		0x0204
+#define OMAP4430_CM_DUCATI_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0204)
+#define OMAP4_CM_DUCATI_DYNAMICDEP_OFFSET		0x0208
+#define OMAP4430_CM_DUCATI_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0208)
+#define OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET		0x0220
+#define OMAP4430_CM_DUCATI_DUCATI_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0220)
+#define OMAP4_CM_SDMA_CLKSTCTRL_OFFSET			0x0300
+#define OMAP4430_CM_SDMA_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0300)
+#define OMAP4_CM_SDMA_STATICDEP_OFFSET			0x0304
+#define OMAP4430_CM_SDMA_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0304)
+#define OMAP4_CM_SDMA_DYNAMICDEP_OFFSET			0x0308
+#define OMAP4430_CM_SDMA_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0308)
+#define OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET		0x0320
+#define OMAP4430_CM_SDMA_SDMA_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0320)
+#define OMAP4_CM_MEMIF_CLKSTCTRL_OFFSET			0x0400
+#define OMAP4430_CM_MEMIF_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0400)
+#define OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET		0x0420
+#define OMAP4430_CM_MEMIF_DMM_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0420)
+#define OMAP4_CM_MEMIF_EMIF_FW_CLKCTRL_OFFSET		0x0428
+#define OMAP4430_CM_MEMIF_EMIF_FW_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0428)
+#define OMAP4_CM_MEMIF_EMIF_1_CLKCTRL_OFFSET		0x0430
+#define OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0430)
+#define OMAP4_CM_MEMIF_EMIF_2_CLKCTRL_OFFSET		0x0438
+#define OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0438)
+#define OMAP4_CM_MEMIF_DLL_CLKCTRL_OFFSET		0x0440
+#define OMAP4430_CM_MEMIF_DLL_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0440)
+#define OMAP4_CM_MEMIF_EMIF_H1_CLKCTRL_OFFSET		0x0450
+#define OMAP4430_CM_MEMIF_EMIF_H1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0450)
+#define OMAP4_CM_MEMIF_EMIF_H2_CLKCTRL_OFFSET		0x0458
+#define OMAP4430_CM_MEMIF_EMIF_H2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0458)
+#define OMAP4_CM_MEMIF_DLL_H_CLKCTRL_OFFSET		0x0460
+#define OMAP4430_CM_MEMIF_DLL_H_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0460)
+#define OMAP4_CM_D2D_CLKSTCTRL_OFFSET			0x0500
+#define OMAP4430_CM_D2D_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0500)
+#define OMAP4_CM_D2D_STATICDEP_OFFSET			0x0504
+#define OMAP4430_CM_D2D_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0504)
+#define OMAP4_CM_D2D_DYNAMICDEP_OFFSET			0x0508
+#define OMAP4430_CM_D2D_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0508)
+#define OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET		0x0520
+#define OMAP4430_CM_D2D_SAD2D_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0520)
+#define OMAP4_CM_D2D_INSTEM_ICR_CLKCTRL_OFFSET		0x0528
+#define OMAP4430_CM_D2D_INSTEM_ICR_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0528)
+#define OMAP4_CM_D2D_SAD2D_FW_CLKCTRL_OFFSET		0x0530
+#define OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0530)
+#define OMAP4_CM_L4CFG_CLKSTCTRL_OFFSET			0x0600
+#define OMAP4430_CM_L4CFG_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0600)
+#define OMAP4_CM_L4CFG_DYNAMICDEP_OFFSET		0x0608
+#define OMAP4430_CM_L4CFG_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0608)
+#define OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET		0x0620
+#define OMAP4430_CM_L4CFG_L4_CFG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0620)
+#define OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET		0x0628
+#define OMAP4430_CM_L4CFG_HW_SEM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0628)
+#define OMAP4_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET		0x0630
+#define OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0630)
+#define OMAP4_CM_L4CFG_SAR_ROM_CLKCTRL_OFFSET		0x0638
+#define OMAP4430_CM_L4CFG_SAR_ROM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0638)
+#define OMAP4_CM_L3INSTR_CLKSTCTRL_OFFSET		0x0700
+#define OMAP4430_CM_L3INSTR_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0700)
+#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET		0x0720
+#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0720)
+#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET	0x0728
+#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0728)
+#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_OFFSET		0x0740
+#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_INST, 0x0740)
+
+/* CM2.IVAHD_CM2 register offsets */
+#define OMAP4_CM_IVAHD_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_IVAHD_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_INST, 0x0000)
+#define OMAP4_CM_IVAHD_STATICDEP_OFFSET			0x0004
+#define OMAP4430_CM_IVAHD_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_INST, 0x0004)
+#define OMAP4_CM_IVAHD_DYNAMICDEP_OFFSET		0x0008
+#define OMAP4430_CM_IVAHD_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_INST, 0x0008)
+#define OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET		0x0020
+#define OMAP4430_CM_IVAHD_IVAHD_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_INST, 0x0020)
+#define OMAP4_CM_IVAHD_SL2_CLKCTRL_OFFSET		0x0028
+#define OMAP4430_CM_IVAHD_SL2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_INST, 0x0028)
+
+/* CM2.CAM_CM2 register offsets */
+#define OMAP4_CM_CAM_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_CAM_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_INST, 0x0000)
+#define OMAP4_CM_CAM_STATICDEP_OFFSET			0x0004
+#define OMAP4430_CM_CAM_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_INST, 0x0004)
+#define OMAP4_CM_CAM_DYNAMICDEP_OFFSET			0x0008
+#define OMAP4430_CM_CAM_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_INST, 0x0008)
+#define OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET			0x0020
+#define OMAP4430_CM_CAM_ISS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_INST, 0x0020)
+#define OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET		0x0028
+#define OMAP4430_CM_CAM_FDIF_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_INST, 0x0028)
+
+/* CM2.DSS_CM2 register offsets */
+#define OMAP4_CM_DSS_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_DSS_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_INST, 0x0000)
+#define OMAP4_CM_DSS_STATICDEP_OFFSET			0x0004
+#define OMAP4430_CM_DSS_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_INST, 0x0004)
+#define OMAP4_CM_DSS_DYNAMICDEP_OFFSET			0x0008
+#define OMAP4430_CM_DSS_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_INST, 0x0008)
+#define OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET			0x0020
+#define OMAP4430_CM_DSS_DSS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_INST, 0x0020)
+#define OMAP4_CM_DSS_DEISS_CLKCTRL_OFFSET		0x0028
+#define OMAP4430_CM_DSS_DEISS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_INST, 0x0028)
+
+/* CM2.GFX_CM2 register offsets */
+#define OMAP4_CM_GFX_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_GFX_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_INST, 0x0000)
+#define OMAP4_CM_GFX_STATICDEP_OFFSET			0x0004
+#define OMAP4430_CM_GFX_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_INST, 0x0004)
+#define OMAP4_CM_GFX_DYNAMICDEP_OFFSET			0x0008
+#define OMAP4430_CM_GFX_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_INST, 0x0008)
+#define OMAP4_CM_GFX_GFX_CLKCTRL_OFFSET			0x0020
+#define OMAP4430_CM_GFX_GFX_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_INST, 0x0020)
+
+/* CM2.L3INIT_CM2 register offsets */
+#define OMAP4_CM_L3INIT_CLKSTCTRL_OFFSET		0x0000
+#define OMAP4430_CM_L3INIT_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0000)
+#define OMAP4_CM_L3INIT_STATICDEP_OFFSET		0x0004
+#define OMAP4430_CM_L3INIT_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0004)
+#define OMAP4_CM_L3INIT_DYNAMICDEP_OFFSET		0x0008
+#define OMAP4430_CM_L3INIT_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0008)
+#define OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET		0x0028
+#define OMAP4430_CM_L3INIT_MMC1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0028)
+#define OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET		0x0030
+#define OMAP4430_CM_L3INIT_MMC2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0030)
+#define OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET		0x0038
+#define OMAP4430_CM_L3INIT_HSI_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0038)
+#define OMAP4_CM_L3INIT_UNIPRO1_CLKCTRL_OFFSET		0x0040
+#define OMAP4430_CM_L3INIT_UNIPRO1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0040)
+#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET		0x0058
+#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0058)
+#define OMAP4_CM_L3INIT_USB_OTG_CLKCTRL_OFFSET		0x0060
+#define OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0060)
+#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET		0x0068
+#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0068)
+#define OMAP4_CM_L3INIT_P1500_CLKCTRL_OFFSET		0x0078
+#define OMAP4430_CM_L3INIT_P1500_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0078)
+#define OMAP4_CM_L3INIT_EMAC_CLKCTRL_OFFSET		0x0080
+#define OMAP4430_CM_L3INIT_EMAC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0080)
+#define OMAP4_CM_L3INIT_SATA_CLKCTRL_OFFSET		0x0088
+#define OMAP4430_CM_L3INIT_SATA_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0088)
+#define OMAP4_CM_L3INIT_TPPSS_CLKCTRL_OFFSET		0x0090
+#define OMAP4430_CM_L3INIT_TPPSS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0090)
+#define OMAP4_CM_L3INIT_PCIESS_CLKCTRL_OFFSET		0x0098
+#define OMAP4430_CM_L3INIT_PCIESS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x0098)
+#define OMAP4_CM_L3INIT_CCPTX_CLKCTRL_OFFSET		0x00a8
+#define OMAP4430_CM_L3INIT_CCPTX_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x00a8)
+#define OMAP4_CM_L3INIT_XHPI_CLKCTRL_OFFSET		0x00c0
+#define OMAP4430_CM_L3INIT_XHPI_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x00c0)
+#define OMAP4_CM_L3INIT_MMC6_CLKCTRL_OFFSET		0x00c8
+#define OMAP4430_CM_L3INIT_MMC6_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x00c8)
+#define OMAP4_CM_L3INIT_USB_HOST_FS_CLKCTRL_OFFSET	0x00d0
+#define OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x00d0)
+#define OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET	0x00e0
+#define OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_INST, 0x00e0)
+
+/* CM2.L4PER_CM2 register offsets */
+#define OMAP4_CM_L4PER_CLKSTCTRL_OFFSET			0x0000
+#define OMAP4430_CM_L4PER_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0000)
+#define OMAP4_CM_L4PER_DYNAMICDEP_OFFSET		0x0008
+#define OMAP4430_CM_L4PER_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0008)
+#define OMAP4_CM_L4PER_ADC_CLKCTRL_OFFSET		0x0020
+#define OMAP4430_CM_L4PER_ADC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0020)
+#define OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET		0x0028
+#define OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0028)
+#define OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET		0x0030
+#define OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0030)
+#define OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET		0x0038
+#define OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0038)
+#define OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET		0x0040
+#define OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0040)
+#define OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET		0x0048
+#define OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0048)
+#define OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET		0x0050
+#define OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0050)
+#define OMAP4_CM_L4PER_ELM_CLKCTRL_OFFSET		0x0058
+#define OMAP4430_CM_L4PER_ELM_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0058)
+#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET		0x0060
+#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0060)
+#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET		0x0068
+#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0068)
+#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET		0x0070
+#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0070)
+#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET		0x0078
+#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0078)
+#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET		0x0080
+#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0080)
+#define OMAP4_CM_L4PER_HDQ1W_CLKCTRL_OFFSET		0x0088
+#define OMAP4430_CM_L4PER_HDQ1W_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0088)
+#define OMAP4_CM_L4PER_HECC1_CLKCTRL_OFFSET		0x0090
+#define OMAP4430_CM_L4PER_HECC1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0090)
+#define OMAP4_CM_L4PER_HECC2_CLKCTRL_OFFSET		0x0098
+#define OMAP4430_CM_L4PER_HECC2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0098)
+#define OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET		0x00a0
+#define OMAP4430_CM_L4PER_I2C1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00a0)
+#define OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET		0x00a8
+#define OMAP4430_CM_L4PER_I2C2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00a8)
+#define OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET		0x00b0
+#define OMAP4430_CM_L4PER_I2C3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00b0)
+#define OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET		0x00b8
+#define OMAP4430_CM_L4PER_I2C4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00b8)
+#define OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET		0x00c0
+#define OMAP4430_CM_L4PER_L4PER_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00c0)
+#define OMAP4_CM_L4PER_MCASP2_CLKCTRL_OFFSET		0x00d0
+#define OMAP4430_CM_L4PER_MCASP2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00d0)
+#define OMAP4_CM_L4PER_MCASP3_CLKCTRL_OFFSET		0x00d8
+#define OMAP4430_CM_L4PER_MCASP3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00d8)
+#define OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET		0x00e0
+#define OMAP4430_CM_L4PER_MCBSP4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00e0)
+#define OMAP4_CM_L4PER_MGATE_CLKCTRL_OFFSET		0x00e8
+#define OMAP4430_CM_L4PER_MGATE_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00e8)
+#define OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET		0x00f0
+#define OMAP4430_CM_L4PER_MCSPI1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00f0)
+#define OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET		0x00f8
+#define OMAP4430_CM_L4PER_MCSPI2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x00f8)
+#define OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET		0x0100
+#define OMAP4430_CM_L4PER_MCSPI3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0100)
+#define OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET		0x0108
+#define OMAP4430_CM_L4PER_MCSPI4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0108)
+#define OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET		0x0120
+#define OMAP4430_CM_L4PER_MMCSD3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0120)
+#define OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET		0x0128
+#define OMAP4430_CM_L4PER_MMCSD4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0128)
+#define OMAP4_CM_L4PER_MSPROHG_CLKCTRL_OFFSET		0x0130
+#define OMAP4430_CM_L4PER_MSPROHG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0130)
+#define OMAP4_CM_L4PER_SLIMBUS2_CLKCTRL_OFFSET		0x0138
+#define OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0138)
+#define OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET		0x0140
+#define OMAP4430_CM_L4PER_UART1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0140)
+#define OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET		0x0148
+#define OMAP4430_CM_L4PER_UART2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0148)
+#define OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET		0x0150
+#define OMAP4430_CM_L4PER_UART3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0150)
+#define OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET		0x0158
+#define OMAP4430_CM_L4PER_UART4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0158)
+#define OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET		0x0160
+#define OMAP4430_CM_L4PER_MMCSD5_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0160)
+#define OMAP4_CM_L4PER_I2C5_CLKCTRL_OFFSET		0x0168
+#define OMAP4430_CM_L4PER_I2C5_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0168)
+#define OMAP4_CM_L4SEC_CLKSTCTRL_OFFSET			0x0180
+#define OMAP4430_CM_L4SEC_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0180)
+#define OMAP4_CM_L4SEC_STATICDEP_OFFSET			0x0184
+#define OMAP4430_CM_L4SEC_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0184)
+#define OMAP4_CM_L4SEC_DYNAMICDEP_OFFSET		0x0188
+#define OMAP4430_CM_L4SEC_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x0188)
+#define OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET		0x01a0
+#define OMAP4430_CM_L4SEC_AES1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x01a0)
+#define OMAP4_CM_L4SEC_AES2_CLKCTRL_OFFSET		0x01a8
+#define OMAP4430_CM_L4SEC_AES2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x01a8)
+#define OMAP4_CM_L4SEC_DES3DES_CLKCTRL_OFFSET		0x01b0
+#define OMAP4430_CM_L4SEC_DES3DES_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x01b0)
+#define OMAP4_CM_L4SEC_PKAEIP29_CLKCTRL_OFFSET		0x01b8
+#define OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x01b8)
+#define OMAP4_CM_L4SEC_RNG_CLKCTRL_OFFSET		0x01c0
+#define OMAP4430_CM_L4SEC_RNG_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x01c0)
+#define OMAP4_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET		0x01c8
+#define OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x01c8)
+#define OMAP4_CM_L4SEC_CRYPTODMA_CLKCTRL_OFFSET		0x01d8
+#define OMAP4430_CM_L4SEC_CRYPTODMA_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_INST, 0x01d8)
+
+/* CM2.CEFUSE_CM2 register offsets */
+#define OMAP4_CM_CEFUSE_CLKSTCTRL_OFFSET		0x0000
+#define OMAP4430_CM_CEFUSE_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_INST, 0x0000)
+#define OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET		0x0020
+#define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_INST, 0x0020)
+
+/* CM2.RESTORE_CM2 register offsets */
+#define OMAP4_CM_L3_1_CLKSTCTRL_RESTORE_OFFSET		0x0000
+#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0000)
+#define OMAP4_CM_L3_2_CLKSTCTRL_RESTORE_OFFSET		0x0004
+#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0004)
+#define OMAP4_CM_L4CFG_CLKSTCTRL_RESTORE_OFFSET		0x0008
+#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0008)
+#define OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET		0x000c
+#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x000c)
+#define OMAP4_CM_L4PER_CLKSTCTRL_RESTORE_OFFSET		0x0010
+#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0010)
+#define OMAP4_CM_L3INIT_CLKSTCTRL_RESTORE_OFFSET	0x0014
+#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0014)
+#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_RESTORE_OFFSET	0x0018
+#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0018)
+#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE_OFFSET	0x001c
+#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x001c)
+#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE_OFFSET	0x0020
+#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0020)
+#define OMAP4_CM_CM2_PROFILING_CLKCTRL_RESTORE_OFFSET	0x0024
+#define OMAP4430_CM_CM2_PROFILING_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0024)
+#define OMAP4_CM_D2D_STATICDEP_RESTORE_OFFSET		0x0028
+#define OMAP4430_CM_D2D_STATICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0028)
+#define OMAP4_CM_L3_1_DYNAMICDEP_RESTORE_OFFSET		0x002c
+#define OMAP4430_CM_L3_1_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x002c)
+#define OMAP4_CM_L3_2_DYNAMICDEP_RESTORE_OFFSET		0x0030
+#define OMAP4430_CM_L3_2_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0030)
+#define OMAP4_CM_D2D_DYNAMICDEP_RESTORE_OFFSET		0x0034
+#define OMAP4430_CM_D2D_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0034)
+#define OMAP4_CM_L4CFG_DYNAMICDEP_RESTORE_OFFSET	0x0038
+#define OMAP4430_CM_L4CFG_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0038)
+#define OMAP4_CM_L4PER_DYNAMICDEP_RESTORE_OFFSET	0x003c
+#define OMAP4430_CM_L4PER_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x003c)
+#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_RESTORE_OFFSET	0x0040
+#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0040)
+#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_RESTORE_OFFSET	0x0044
+#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0044)
+#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_RESTORE_OFFSET	0x0048
+#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0048)
+#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_RESTORE_OFFSET	0x004c
+#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x004c)
+#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_RESTORE_OFFSET	0x0050
+#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0050)
+#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET	0x0054
+#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0054)
+#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET	0x0058
+#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x0058)
+#define OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET		0x005c
+#define OMAP4430_CM_SDMA_STATICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_INST, 0x005c)
+
+/* Function prototypes */
+extern u32 omap4_cm2_read_inst_reg(s16 inst, u16 idx);
+extern void omap4_cm2_write_inst_reg(u32 val, s16 inst, u16 idx);
+extern u32 omap4_cm2_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
+
+#endif
diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.c b/arch/arm/mach-omap2/cm2xxx_3xxx.c
new file mode 100644
index 0000000..96954aa
--- /dev/null
+++ b/arch/arm/mach-omap2/cm2xxx_3xxx.c
@@ -0,0 +1,471 @@
+/*
+ * OMAP2/3 CM module functions
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+
+#include "cm.h"
+#include "cm2xxx_3xxx.h"
+#include "cm-regbits-24xx.h"
+#include "cm-regbits-34xx.h"
+
+static const u8 cm_idlest_offs[] = {
+	CM_IDLEST1, CM_IDLEST2, OMAP2430_CM_IDLEST3
+};
+
+u32 omap2_cm_read_mod_reg(s16 module, u16 idx)
+{
+	return __raw_readl(cm_base + module + idx);
+}
+
+void omap2_cm_write_mod_reg(u32 val, s16 module, u16 idx)
+{
+	__raw_writel(val, cm_base + module + idx);
+}
+
+/* Read-modify-write a register in a CM module. Caller must lock */
+u32 omap2_cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
+{
+	u32 v;
+
+	v = omap2_cm_read_mod_reg(module, idx);
+	v &= ~mask;
+	v |= bits;
+	omap2_cm_write_mod_reg(v, module, idx);
+
+	return v;
+}
+
+u32 omap2_cm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	return omap2_cm_rmw_mod_reg_bits(bits, bits, module, idx);
+}
+
+u32 omap2_cm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	return omap2_cm_rmw_mod_reg_bits(bits, 0x0, module, idx);
+}
+
+/*
+ *
+ */
+
+static void _write_clktrctrl(u8 c, s16 module, u32 mask)
+{
+	u32 v;
+
+	v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL);
+	v &= ~mask;
+	v |= c << __ffs(mask);
+	omap2_cm_write_mod_reg(v, module, OMAP2_CM_CLKSTCTRL);
+}
+
+bool omap2_cm_is_clkdm_in_hwsup(s16 module, u32 mask)
+{
+	u32 v;
+	bool ret = 0;
+
+	BUG_ON(!cpu_is_omap24xx() && !cpu_is_omap34xx());
+
+	v = omap2_cm_read_mod_reg(module, OMAP2_CM_CLKSTCTRL);
+	v &= mask;
+	v >>= __ffs(mask);
+
+	if (cpu_is_omap24xx())
+		ret = (v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0;
+	else
+		ret = (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? 1 : 0;
+
+	return ret;
+}
+
+void omap2xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask)
+{
+	_write_clktrctrl(OMAP24XX_CLKSTCTRL_ENABLE_AUTO, module, mask);
+}
+
+void omap2xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask)
+{
+	_write_clktrctrl(OMAP24XX_CLKSTCTRL_DISABLE_AUTO, module, mask);
+}
+
+void omap3xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask)
+{
+	_write_clktrctrl(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, module, mask);
+}
+
+void omap3xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask)
+{
+	_write_clktrctrl(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, module, mask);
+}
+
+void omap3xxx_cm_clkdm_force_sleep(s16 module, u32 mask)
+{
+	_write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, module, mask);
+}
+
+void omap3xxx_cm_clkdm_force_wakeup(s16 module, u32 mask)
+{
+	_write_clktrctrl(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, module, mask);
+}
+
+
+/*
+ *
+ */
+
+/**
+ * omap2_cm_wait_idlest_ready - wait for a module to leave idle or standby
+ * @prcm_mod: PRCM module offset
+ * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3)
+ * @idlest_shift: shift of the bit in the CM_IDLEST* register to check
+ *
+ * XXX document
+ */
+int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift)
+{
+	int ena = 0, i = 0;
+	u8 cm_idlest_reg;
+	u32 mask;
+
+	if (!idlest_id || (idlest_id > ARRAY_SIZE(cm_idlest_offs)))
+		return -EINVAL;
+
+	cm_idlest_reg = cm_idlest_offs[idlest_id - 1];
+
+	mask = 1 << idlest_shift;
+
+	if (cpu_is_omap24xx())
+		ena = mask;
+	else if (cpu_is_omap34xx())
+		ena = 0;
+	else
+		BUG();
+
+	omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena),
+			  MAX_MODULE_READY_TIME, i);
+
+	return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+}
+
+/*
+ * Context save/restore code - OMAP3 only
+ */
+#ifdef CONFIG_ARCH_OMAP3
+struct omap3_cm_regs {
+	u32 iva2_cm_clksel1;
+	u32 iva2_cm_clksel2;
+	u32 cm_sysconfig;
+	u32 sgx_cm_clksel;
+	u32 dss_cm_clksel;
+	u32 cam_cm_clksel;
+	u32 per_cm_clksel;
+	u32 emu_cm_clksel;
+	u32 emu_cm_clkstctrl;
+	u32 pll_cm_autoidle2;
+	u32 pll_cm_clksel4;
+	u32 pll_cm_clksel5;
+	u32 pll_cm_clken2;
+	u32 cm_polctrl;
+	u32 iva2_cm_fclken;
+	u32 iva2_cm_clken_pll;
+	u32 core_cm_fclken1;
+	u32 core_cm_fclken3;
+	u32 sgx_cm_fclken;
+	u32 wkup_cm_fclken;
+	u32 dss_cm_fclken;
+	u32 cam_cm_fclken;
+	u32 per_cm_fclken;
+	u32 usbhost_cm_fclken;
+	u32 core_cm_iclken1;
+	u32 core_cm_iclken2;
+	u32 core_cm_iclken3;
+	u32 sgx_cm_iclken;
+	u32 wkup_cm_iclken;
+	u32 dss_cm_iclken;
+	u32 cam_cm_iclken;
+	u32 per_cm_iclken;
+	u32 usbhost_cm_iclken;
+	u32 iva2_cm_autoidle2;
+	u32 mpu_cm_autoidle2;
+	u32 iva2_cm_clkstctrl;
+	u32 mpu_cm_clkstctrl;
+	u32 core_cm_clkstctrl;
+	u32 sgx_cm_clkstctrl;
+	u32 dss_cm_clkstctrl;
+	u32 cam_cm_clkstctrl;
+	u32 per_cm_clkstctrl;
+	u32 neon_cm_clkstctrl;
+	u32 usbhost_cm_clkstctrl;
+	u32 core_cm_autoidle1;
+	u32 core_cm_autoidle2;
+	u32 core_cm_autoidle3;
+	u32 wkup_cm_autoidle;
+	u32 dss_cm_autoidle;
+	u32 cam_cm_autoidle;
+	u32 per_cm_autoidle;
+	u32 usbhost_cm_autoidle;
+	u32 sgx_cm_sleepdep;
+	u32 dss_cm_sleepdep;
+	u32 cam_cm_sleepdep;
+	u32 per_cm_sleepdep;
+	u32 usbhost_cm_sleepdep;
+	u32 cm_clkout_ctrl;
+};
+
+static struct omap3_cm_regs cm_context;
+
+void omap3_cm_save_context(void)
+{
+	cm_context.iva2_cm_clksel1 =
+		omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL1);
+	cm_context.iva2_cm_clksel2 =
+		omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL2);
+	cm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG);
+	cm_context.sgx_cm_clksel =
+		omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL);
+	cm_context.dss_cm_clksel =
+		omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL);
+	cm_context.cam_cm_clksel =
+		omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSEL);
+	cm_context.per_cm_clksel =
+		omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSEL);
+	cm_context.emu_cm_clksel =
+		omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1);
+	cm_context.emu_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.pll_cm_autoidle2 =
+		omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2);
+	cm_context.pll_cm_clksel4 =
+		omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4);
+	cm_context.pll_cm_clksel5 =
+		omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5);
+	cm_context.pll_cm_clken2 =
+		omap2_cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2);
+	cm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL);
+	cm_context.iva2_cm_fclken =
+		omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_FCLKEN);
+	cm_context.iva2_cm_clken_pll =
+		omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);
+	cm_context.core_cm_fclken1 =
+		omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
+	cm_context.core_cm_fclken3 =
+		omap2_cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
+	cm_context.sgx_cm_fclken =
+		omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_FCLKEN);
+	cm_context.wkup_cm_fclken =
+		omap2_cm_read_mod_reg(WKUP_MOD, CM_FCLKEN);
+	cm_context.dss_cm_fclken =
+		omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_FCLKEN);
+	cm_context.cam_cm_fclken =
+		omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_FCLKEN);
+	cm_context.per_cm_fclken =
+		omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN);
+	cm_context.usbhost_cm_fclken =
+		omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN);
+	cm_context.core_cm_iclken1 =
+		omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN1);
+	cm_context.core_cm_iclken2 =
+		omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN2);
+	cm_context.core_cm_iclken3 =
+		omap2_cm_read_mod_reg(CORE_MOD, CM_ICLKEN3);
+	cm_context.sgx_cm_iclken =
+		omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_ICLKEN);
+	cm_context.wkup_cm_iclken =
+		omap2_cm_read_mod_reg(WKUP_MOD, CM_ICLKEN);
+	cm_context.dss_cm_iclken =
+		omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_ICLKEN);
+	cm_context.cam_cm_iclken =
+		omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_ICLKEN);
+	cm_context.per_cm_iclken =
+		omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN);
+	cm_context.usbhost_cm_iclken =
+		omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN);
+	cm_context.iva2_cm_autoidle2 =
+		omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
+	cm_context.mpu_cm_autoidle2 =
+		omap2_cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2);
+	cm_context.iva2_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.mpu_cm_clkstctrl =
+		omap2_cm_read_mod_reg(MPU_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.core_cm_clkstctrl =
+		omap2_cm_read_mod_reg(CORE_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.sgx_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.dss_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.cam_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.per_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.neon_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL);
+	cm_context.usbhost_cm_clkstctrl =
+		omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
+				      OMAP2_CM_CLKSTCTRL);
+	cm_context.core_cm_autoidle1 =
+		omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE1);
+	cm_context.core_cm_autoidle2 =
+		omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE2);
+	cm_context.core_cm_autoidle3 =
+		omap2_cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE3);
+	cm_context.wkup_cm_autoidle =
+		omap2_cm_read_mod_reg(WKUP_MOD, CM_AUTOIDLE);
+	cm_context.dss_cm_autoidle =
+		omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, CM_AUTOIDLE);
+	cm_context.cam_cm_autoidle =
+		omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, CM_AUTOIDLE);
+	cm_context.per_cm_autoidle =
+		omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
+	cm_context.usbhost_cm_autoidle =
+		omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE);
+	cm_context.sgx_cm_sleepdep =
+		omap2_cm_read_mod_reg(OMAP3430ES2_SGX_MOD,
+				      OMAP3430_CM_SLEEPDEP);
+	cm_context.dss_cm_sleepdep =
+		omap2_cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP);
+	cm_context.cam_cm_sleepdep =
+		omap2_cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP);
+	cm_context.per_cm_sleepdep =
+		omap2_cm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP);
+	cm_context.usbhost_cm_sleepdep =
+		omap2_cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
+				      OMAP3430_CM_SLEEPDEP);
+	cm_context.cm_clkout_ctrl =
+		omap2_cm_read_mod_reg(OMAP3430_CCR_MOD,
+				      OMAP3_CM_CLKOUT_CTRL_OFFSET);
+}
+
+void omap3_cm_restore_context(void)
+{
+	omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel1, OMAP3430_IVA2_MOD,
+			       CM_CLKSEL1);
+	omap2_cm_write_mod_reg(cm_context.iva2_cm_clksel2, OMAP3430_IVA2_MOD,
+			       CM_CLKSEL2);
+	__raw_writel(cm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG);
+	omap2_cm_write_mod_reg(cm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD,
+			       CM_CLKSEL);
+	omap2_cm_write_mod_reg(cm_context.dss_cm_clksel, OMAP3430_DSS_MOD,
+			       CM_CLKSEL);
+	omap2_cm_write_mod_reg(cm_context.cam_cm_clksel, OMAP3430_CAM_MOD,
+			       CM_CLKSEL);
+	omap2_cm_write_mod_reg(cm_context.per_cm_clksel, OMAP3430_PER_MOD,
+			       CM_CLKSEL);
+	omap2_cm_write_mod_reg(cm_context.emu_cm_clksel, OMAP3430_EMU_MOD,
+			       CM_CLKSEL1);
+	omap2_cm_write_mod_reg(cm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.pll_cm_autoidle2, PLL_MOD,
+			       CM_AUTOIDLE2);
+	omap2_cm_write_mod_reg(cm_context.pll_cm_clksel4, PLL_MOD,
+			       OMAP3430ES2_CM_CLKSEL4);
+	omap2_cm_write_mod_reg(cm_context.pll_cm_clksel5, PLL_MOD,
+			       OMAP3430ES2_CM_CLKSEL5);
+	omap2_cm_write_mod_reg(cm_context.pll_cm_clken2, PLL_MOD,
+			       OMAP3430ES2_CM_CLKEN2);
+	__raw_writel(cm_context.cm_polctrl, OMAP3430_CM_POLCTRL);
+	omap2_cm_write_mod_reg(cm_context.iva2_cm_fclken, OMAP3430_IVA2_MOD,
+			       CM_FCLKEN);
+	omap2_cm_write_mod_reg(cm_context.iva2_cm_clken_pll, OMAP3430_IVA2_MOD,
+			       OMAP3430_CM_CLKEN_PLL);
+	omap2_cm_write_mod_reg(cm_context.core_cm_fclken1, CORE_MOD,
+			       CM_FCLKEN1);
+	omap2_cm_write_mod_reg(cm_context.core_cm_fclken3, CORE_MOD,
+			       OMAP3430ES2_CM_FCLKEN3);
+	omap2_cm_write_mod_reg(cm_context.sgx_cm_fclken, OMAP3430ES2_SGX_MOD,
+			       CM_FCLKEN);
+	omap2_cm_write_mod_reg(cm_context.wkup_cm_fclken, WKUP_MOD, CM_FCLKEN);
+	omap2_cm_write_mod_reg(cm_context.dss_cm_fclken, OMAP3430_DSS_MOD,
+			       CM_FCLKEN);
+	omap2_cm_write_mod_reg(cm_context.cam_cm_fclken, OMAP3430_CAM_MOD,
+			       CM_FCLKEN);
+	omap2_cm_write_mod_reg(cm_context.per_cm_fclken, OMAP3430_PER_MOD,
+			       CM_FCLKEN);
+	omap2_cm_write_mod_reg(cm_context.usbhost_cm_fclken,
+			       OMAP3430ES2_USBHOST_MOD, CM_FCLKEN);
+	omap2_cm_write_mod_reg(cm_context.core_cm_iclken1, CORE_MOD,
+			       CM_ICLKEN1);
+	omap2_cm_write_mod_reg(cm_context.core_cm_iclken2, CORE_MOD,
+			       CM_ICLKEN2);
+	omap2_cm_write_mod_reg(cm_context.core_cm_iclken3, CORE_MOD,
+			       CM_ICLKEN3);
+	omap2_cm_write_mod_reg(cm_context.sgx_cm_iclken, OMAP3430ES2_SGX_MOD,
+			       CM_ICLKEN);
+	omap2_cm_write_mod_reg(cm_context.wkup_cm_iclken, WKUP_MOD, CM_ICLKEN);
+	omap2_cm_write_mod_reg(cm_context.dss_cm_iclken, OMAP3430_DSS_MOD,
+			       CM_ICLKEN);
+	omap2_cm_write_mod_reg(cm_context.cam_cm_iclken, OMAP3430_CAM_MOD,
+			       CM_ICLKEN);
+	omap2_cm_write_mod_reg(cm_context.per_cm_iclken, OMAP3430_PER_MOD,
+			       CM_ICLKEN);
+	omap2_cm_write_mod_reg(cm_context.usbhost_cm_iclken,
+			       OMAP3430ES2_USBHOST_MOD, CM_ICLKEN);
+	omap2_cm_write_mod_reg(cm_context.iva2_cm_autoidle2, OMAP3430_IVA2_MOD,
+			       CM_AUTOIDLE2);
+	omap2_cm_write_mod_reg(cm_context.mpu_cm_autoidle2, MPU_MOD,
+			       CM_AUTOIDLE2);
+	omap2_cm_write_mod_reg(cm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.mpu_cm_clkstctrl, MPU_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.core_cm_clkstctrl, CORE_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.sgx_cm_clkstctrl, OMAP3430ES2_SGX_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.dss_cm_clkstctrl, OMAP3430_DSS_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.cam_cm_clkstctrl, OMAP3430_CAM_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.per_cm_clkstctrl, OMAP3430_PER_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.neon_cm_clkstctrl, OMAP3430_NEON_MOD,
+			       OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.usbhost_cm_clkstctrl,
+			       OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL);
+	omap2_cm_write_mod_reg(cm_context.core_cm_autoidle1, CORE_MOD,
+			       CM_AUTOIDLE1);
+	omap2_cm_write_mod_reg(cm_context.core_cm_autoidle2, CORE_MOD,
+			       CM_AUTOIDLE2);
+	omap2_cm_write_mod_reg(cm_context.core_cm_autoidle3, CORE_MOD,
+			       CM_AUTOIDLE3);
+	omap2_cm_write_mod_reg(cm_context.wkup_cm_autoidle, WKUP_MOD,
+			       CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(cm_context.dss_cm_autoidle, OMAP3430_DSS_MOD,
+			       CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(cm_context.cam_cm_autoidle, OMAP3430_CAM_MOD,
+			       CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(cm_context.per_cm_autoidle, OMAP3430_PER_MOD,
+			       CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(cm_context.usbhost_cm_autoidle,
+			       OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(cm_context.sgx_cm_sleepdep, OMAP3430ES2_SGX_MOD,
+			       OMAP3430_CM_SLEEPDEP);
+	omap2_cm_write_mod_reg(cm_context.dss_cm_sleepdep, OMAP3430_DSS_MOD,
+			       OMAP3430_CM_SLEEPDEP);
+	omap2_cm_write_mod_reg(cm_context.cam_cm_sleepdep, OMAP3430_CAM_MOD,
+			       OMAP3430_CM_SLEEPDEP);
+	omap2_cm_write_mod_reg(cm_context.per_cm_sleepdep, OMAP3430_PER_MOD,
+			       OMAP3430_CM_SLEEPDEP);
+	omap2_cm_write_mod_reg(cm_context.usbhost_cm_sleepdep,
+			       OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP);
+	omap2_cm_write_mod_reg(cm_context.cm_clkout_ctrl, OMAP3430_CCR_MOD,
+			       OMAP3_CM_CLKOUT_CTRL_OFFSET);
+}
+#endif
diff --git a/arch/arm/mach-omap2/cm2xxx_3xxx.h b/arch/arm/mach-omap2/cm2xxx_3xxx.h
new file mode 100644
index 0000000..5e9ea5b
--- /dev/null
+++ b/arch/arm/mach-omap2/cm2xxx_3xxx.h
@@ -0,0 +1,147 @@
+/*
+ * OMAP2/3 Clock Management (CM) register definitions
+ *
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2010 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The CM hardware modules on the OMAP2/3 are quite similar to each
+ * other.  The CM modules/instances on OMAP4 are quite different, so
+ * they are handled in a separate file.
+ */
+#ifndef __ARCH_ASM_MACH_OMAP2_CM2XXX_3XXX_H
+#define __ARCH_ASM_MACH_OMAP2_CM2XXX_3XXX_H
+
+#include "prcm-common.h"
+
+#define OMAP2420_CM_REGADDR(module, reg)				\
+			OMAP2_L4_IO_ADDRESS(OMAP2420_CM_BASE + (module) + (reg))
+#define OMAP2430_CM_REGADDR(module, reg)				\
+			OMAP2_L4_IO_ADDRESS(OMAP2430_CM_BASE + (module) + (reg))
+#define OMAP34XX_CM_REGADDR(module, reg)				\
+			OMAP2_L4_IO_ADDRESS(OMAP3430_CM_BASE + (module) + (reg))
+
+
+/*
+ * OMAP3-specific global CM registers
+ * Use cm_{read,write}_reg() with these registers.
+ * These registers appear once per CM module.
+ */
+
+#define OMAP3430_CM_REVISION		OMAP34XX_CM_REGADDR(OCP_MOD, 0x0000)
+#define OMAP3430_CM_SYSCONFIG		OMAP34XX_CM_REGADDR(OCP_MOD, 0x0010)
+#define OMAP3430_CM_POLCTRL		OMAP34XX_CM_REGADDR(OCP_MOD, 0x009c)
+
+#define OMAP3_CM_CLKOUT_CTRL_OFFSET	0x0070
+#define OMAP3430_CM_CLKOUT_CTRL		OMAP_CM_REGADDR(OMAP3430_CCR_MOD, 0x0070)
+
+/*
+ * Module specific CM register offsets from CM_BASE + domain offset
+ * Use cm_{read,write}_mod_reg() with these registers.
+ * These register offsets generally appear in more than one PRCM submodule.
+ */
+
+/* Common between OMAP2 and OMAP3 */
+
+#define CM_FCLKEN					0x0000
+#define CM_FCLKEN1					CM_FCLKEN
+#define CM_CLKEN					CM_FCLKEN
+#define CM_ICLKEN					0x0010
+#define CM_ICLKEN1					CM_ICLKEN
+#define CM_ICLKEN2					0x0014
+#define CM_ICLKEN3					0x0018
+#define CM_IDLEST					0x0020
+#define CM_IDLEST1					CM_IDLEST
+#define CM_IDLEST2					0x0024
+#define CM_AUTOIDLE					0x0030
+#define CM_AUTOIDLE1					CM_AUTOIDLE
+#define CM_AUTOIDLE2					0x0034
+#define CM_AUTOIDLE3					0x0038
+#define CM_CLKSEL					0x0040
+#define CM_CLKSEL1					CM_CLKSEL
+#define CM_CLKSEL2					0x0044
+#define OMAP2_CM_CLKSTCTRL				0x0048
+
+/* OMAP2-specific register offsets */
+
+#define OMAP24XX_CM_FCLKEN2				0x0004
+#define OMAP24XX_CM_ICLKEN4				0x001c
+#define OMAP24XX_CM_AUTOIDLE4				0x003c
+
+#define OMAP2430_CM_IDLEST3				0x0028
+
+/* OMAP3-specific register offsets */
+
+#define OMAP3430_CM_CLKEN_PLL				0x0004
+#define OMAP3430ES2_CM_CLKEN2				0x0004
+#define OMAP3430ES2_CM_FCLKEN3				0x0008
+#define OMAP3430_CM_IDLEST_PLL				CM_IDLEST2
+#define OMAP3430_CM_AUTOIDLE_PLL			CM_AUTOIDLE2
+#define OMAP3430ES2_CM_AUTOIDLE2_PLL			CM_AUTOIDLE2
+#define OMAP3430_CM_CLKSEL1				CM_CLKSEL
+#define OMAP3430_CM_CLKSEL1_PLL				CM_CLKSEL
+#define OMAP3430_CM_CLKSEL2_PLL				CM_CLKSEL2
+#define OMAP3430_CM_SLEEPDEP				CM_CLKSEL2
+#define OMAP3430_CM_CLKSEL3				OMAP2_CM_CLKSTCTRL
+#define OMAP3430_CM_CLKSTST				0x004c
+#define OMAP3430ES2_CM_CLKSEL4				0x004c
+#define OMAP3430ES2_CM_CLKSEL5				0x0050
+#define OMAP3430_CM_CLKSEL2_EMU				0x0050
+#define OMAP3430_CM_CLKSEL3_EMU				0x0054
+
+
+/* CM_IDLEST bit field values to indicate deasserted IdleReq */
+
+#define OMAP24XX_CM_IDLEST_VAL				0
+#define OMAP34XX_CM_IDLEST_VAL				1
+
+
+/* Clock management domain register get/set */
+
+#ifndef __ASSEMBLER__
+
+extern u32 omap2_cm_read_mod_reg(s16 module, u16 idx);
+extern void omap2_cm_write_mod_reg(u32 val, s16 module, u16 idx);
+extern u32 omap2_cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
+
+extern int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id,
+				      u8 idlest_shift);
+extern u32 omap2_cm_set_mod_reg_bits(u32 bits, s16 module, s16 idx);
+extern u32 omap2_cm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx);
+
+extern bool omap2_cm_is_clkdm_in_hwsup(s16 module, u32 mask);
+extern void omap2xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask);
+extern void omap2xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask);
+
+extern void omap3xxx_cm_clkdm_enable_hwsup(s16 module, u32 mask);
+extern void omap3xxx_cm_clkdm_disable_hwsup(s16 module, u32 mask);
+extern void omap3xxx_cm_clkdm_force_sleep(s16 module, u32 mask);
+extern void omap3xxx_cm_clkdm_force_wakeup(s16 module, u32 mask);
+
+#endif
+
+/* CM register bits shared between 24XX and 3430 */
+
+/* CM_CLKSEL_GFX */
+#define OMAP_CLKSEL_GFX_SHIFT				0
+#define OMAP_CLKSEL_GFX_MASK				(0x7 << 0)
+
+/* CM_ICLKEN_GFX */
+#define OMAP_EN_GFX_SHIFT				0
+#define OMAP_EN_GFX_MASK				(1 << 0)
+
+/* CM_IDLEST_GFX */
+#define OMAP_ST_GFX_MASK				(1 << 0)
+
+
+/* Function prototypes */
+# ifndef __ASSEMBLER__
+extern void omap3_cm_save_context(void);
+extern void omap3_cm_restore_context(void);
+# endif
+
+#endif
diff --git a/arch/arm/mach-omap2/cm44xx.c b/arch/arm/mach-omap2/cm44xx.c
new file mode 100644
index 0000000..e96f53e
--- /dev/null
+++ b/arch/arm/mach-omap2/cm44xx.c
@@ -0,0 +1,52 @@
+/*
+ * OMAP4 CM1, CM2 module low-level functions
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * These functions are intended to be used only by the cminst44xx.c file.
+ * XXX Perhaps we should just move them there and make them static.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+
+#include "cm.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "cm-regbits-44xx.h"
+
+/* CM1 hardware module low-level functions */
+
+/* Read a register in CM1 */
+u32 omap4_cm1_read_inst_reg(s16 inst, u16 reg)
+{
+	return __raw_readl(OMAP44XX_CM1_REGADDR(inst, reg));
+}
+
+/* Write into a register in CM1 */
+void omap4_cm1_write_inst_reg(u32 val, s16 inst, u16 reg)
+{
+	__raw_writel(val, OMAP44XX_CM1_REGADDR(inst, reg));
+}
+
+/* Read a register in CM2 */
+u32 omap4_cm2_read_inst_reg(s16 inst, u16 reg)
+{
+	return __raw_readl(OMAP44XX_CM2_REGADDR(inst, reg));
+}
+
+/* Write into a register in CM2 */
+void omap4_cm2_write_inst_reg(u32 val, s16 inst, u16 reg)
+{
+	__raw_writel(val, OMAP44XX_CM2_REGADDR(inst, reg));
+}
diff --git a/arch/arm/mach-omap2/cm44xx.h b/arch/arm/mach-omap2/cm44xx.h
index 3c35a87..48fc3f4 100644
--- a/arch/arm/mach-omap2/cm44xx.h
+++ b/arch/arm/mach-omap2/cm44xx.h
@@ -1,667 +1,31 @@
 /*
- * OMAP44xx CM1 & CM2 instance offset macros
+ * OMAP4 Clock Management (CM) definitions
  *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- * Copyright (C) 2009-2010 Nokia Corporation
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
  *
- * Paul Walmsley (paul@pwsan.com)
- * Rajendra Nayak (rnayak@ti.com)
- * Benoit Cousson (b-cousson@ti.com)
- *
- * This file is automatically generated from the OMAP hardware databases.
- * We respectfully ask that any modifications to this file be coordinated
- * with the public linux-omap@vger.kernel.org mailing list and the
- * authors above to ensure that the autogeneration scripts are kept
- * up-to-date with the file contents.
+ * Written by Paul Walmsley
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * OMAP4 has two separate CM blocks, CM1 and CM2.  This file contains
+ * macros and function prototypes that are applicable to both.
  */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CM44XX_H
-#define __ARCH_ARM_MACH_OMAP2_CM44XX_H
+#ifndef __ARCH_ASM_MACH_OMAP2_CM44XX_H
+#define __ARCH_ASM_MACH_OMAP2_CM44XX_H
 
 
-/* CM1 */
+#include "prcm-common.h"
+#include "cm.h"
 
-/* CM1.OCP_SOCKET_CM1 register offsets */
-#define OMAP4_REVISION_CM1_OFFSET			0x0000
-#define OMAP4430_REVISION_CM1				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0000)
-#define OMAP4_CM_CM1_PROFILING_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM_CM1_PROFILING_CLKCTRL		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0040)
+#define OMAP4_CM_CLKSTCTRL				0x0000
 
-/* CM1.CKGEN_CM1 register offsets */
-#define OMAP4_CM_CLKSEL_CORE_OFFSET			0x0000
-#define OMAP4430_CM_CLKSEL_CORE				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0000)
-#define OMAP4_CM_CLKSEL_ABE_OFFSET			0x0008
-#define OMAP4430_CM_CLKSEL_ABE				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0008)
-#define OMAP4_CM_DLL_CTRL_OFFSET			0x0010
-#define OMAP4430_CM_DLL_CTRL				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0010)
-#define OMAP4_CM_CLKMODE_DPLL_CORE_OFFSET		0x0020
-#define OMAP4430_CM_CLKMODE_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0020)
-#define OMAP4_CM_IDLEST_DPLL_CORE_OFFSET		0x0024
-#define OMAP4430_CM_IDLEST_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0024)
-#define OMAP4_CM_AUTOIDLE_DPLL_CORE_OFFSET		0x0028
-#define OMAP4430_CM_AUTOIDLE_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0028)
-#define OMAP4_CM_CLKSEL_DPLL_CORE_OFFSET		0x002c
-#define OMAP4430_CM_CLKSEL_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x002c)
-#define OMAP4_CM_DIV_M2_DPLL_CORE_OFFSET		0x0030
-#define OMAP4430_CM_DIV_M2_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0030)
-#define OMAP4_CM_DIV_M3_DPLL_CORE_OFFSET		0x0034
-#define OMAP4430_CM_DIV_M3_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0034)
-#define OMAP4_CM_DIV_M4_DPLL_CORE_OFFSET		0x0038
-#define OMAP4430_CM_DIV_M4_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0038)
-#define OMAP4_CM_DIV_M5_DPLL_CORE_OFFSET		0x003c
-#define OMAP4430_CM_DIV_M5_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x003c)
-#define OMAP4_CM_DIV_M6_DPLL_CORE_OFFSET		0x0040
-#define OMAP4430_CM_DIV_M6_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0040)
-#define OMAP4_CM_DIV_M7_DPLL_CORE_OFFSET		0x0044
-#define OMAP4430_CM_DIV_M7_DPLL_CORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0044)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET	0x0048
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0048)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET	0x004c
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x004c)
-#define OMAP4_CM_EMU_OVERRIDE_DPLL_CORE_OFFSET		0x0050
-#define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0050)
-#define OMAP4_CM_CLKMODE_DPLL_MPU_OFFSET		0x0060
-#define OMAP4430_CM_CLKMODE_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0060)
-#define OMAP4_CM_IDLEST_DPLL_MPU_OFFSET			0x0064
-#define OMAP4430_CM_IDLEST_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0064)
-#define OMAP4_CM_AUTOIDLE_DPLL_MPU_OFFSET		0x0068
-#define OMAP4430_CM_AUTOIDLE_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0068)
-#define OMAP4_CM_CLKSEL_DPLL_MPU_OFFSET			0x006c
-#define OMAP4430_CM_CLKSEL_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x006c)
-#define OMAP4_CM_DIV_M2_DPLL_MPU_OFFSET			0x0070
-#define OMAP4430_CM_DIV_M2_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0070)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET		0x0088
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0088)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET		0x008c
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_MPU		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x008c)
-#define OMAP4_CM_BYPCLK_DPLL_MPU_OFFSET			0x009c
-#define OMAP4430_CM_BYPCLK_DPLL_MPU			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x009c)
-#define OMAP4_CM_CLKMODE_DPLL_IVA_OFFSET		0x00a0
-#define OMAP4430_CM_CLKMODE_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a0)
-#define OMAP4_CM_IDLEST_DPLL_IVA_OFFSET			0x00a4
-#define OMAP4430_CM_IDLEST_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a4)
-#define OMAP4_CM_AUTOIDLE_DPLL_IVA_OFFSET		0x00a8
-#define OMAP4430_CM_AUTOIDLE_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a8)
-#define OMAP4_CM_CLKSEL_DPLL_IVA_OFFSET			0x00ac
-#define OMAP4430_CM_CLKSEL_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ac)
-#define OMAP4_CM_DIV_M4_DPLL_IVA_OFFSET			0x00b8
-#define OMAP4430_CM_DIV_M4_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00b8)
-#define OMAP4_CM_DIV_M5_DPLL_IVA_OFFSET			0x00bc
-#define OMAP4430_CM_DIV_M5_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00bc)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_IVA_OFFSET		0x00c8
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00c8)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_IVA_OFFSET		0x00cc
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_IVA		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00cc)
-#define OMAP4_CM_BYPCLK_DPLL_IVA_OFFSET			0x00dc
-#define OMAP4430_CM_BYPCLK_DPLL_IVA			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00dc)
-#define OMAP4_CM_CLKMODE_DPLL_ABE_OFFSET		0x00e0
-#define OMAP4430_CM_CLKMODE_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e0)
-#define OMAP4_CM_IDLEST_DPLL_ABE_OFFSET			0x00e4
-#define OMAP4430_CM_IDLEST_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e4)
-#define OMAP4_CM_AUTOIDLE_DPLL_ABE_OFFSET		0x00e8
-#define OMAP4430_CM_AUTOIDLE_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e8)
-#define OMAP4_CM_CLKSEL_DPLL_ABE_OFFSET			0x00ec
-#define OMAP4430_CM_CLKSEL_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ec)
-#define OMAP4_CM_DIV_M2_DPLL_ABE_OFFSET			0x00f0
-#define OMAP4430_CM_DIV_M2_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f0)
-#define OMAP4_CM_DIV_M3_DPLL_ABE_OFFSET			0x00f4
-#define OMAP4430_CM_DIV_M3_DPLL_ABE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f4)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_ABE_OFFSET		0x0108
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0108)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_ABE_OFFSET		0x010c
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_ABE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x010c)
-#define OMAP4_CM_CLKMODE_DPLL_DDRPHY_OFFSET		0x0120
-#define OMAP4430_CM_CLKMODE_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0120)
-#define OMAP4_CM_IDLEST_DPLL_DDRPHY_OFFSET		0x0124
-#define OMAP4430_CM_IDLEST_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0124)
-#define OMAP4_CM_AUTOIDLE_DPLL_DDRPHY_OFFSET		0x0128
-#define OMAP4430_CM_AUTOIDLE_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0128)
-#define OMAP4_CM_CLKSEL_DPLL_DDRPHY_OFFSET		0x012c
-#define OMAP4430_CM_CLKSEL_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x012c)
-#define OMAP4_CM_DIV_M2_DPLL_DDRPHY_OFFSET		0x0130
-#define OMAP4430_CM_DIV_M2_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0130)
-#define OMAP4_CM_DIV_M4_DPLL_DDRPHY_OFFSET		0x0138
-#define OMAP4430_CM_DIV_M4_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0138)
-#define OMAP4_CM_DIV_M5_DPLL_DDRPHY_OFFSET		0x013c
-#define OMAP4430_CM_DIV_M5_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x013c)
-#define OMAP4_CM_DIV_M6_DPLL_DDRPHY_OFFSET		0x0140
-#define OMAP4430_CM_DIV_M6_DPLL_DDRPHY			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0140)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_DDRPHY_OFFSET	0x0148
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0148)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_DDRPHY_OFFSET	0x014c
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_DDRPHY		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x014c)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG1_OFFSET		0x0160
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG1			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0160)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG2_OFFSET		0x0164
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG2			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0164)
-#define OMAP4_CM_DYN_DEP_PRESCAL_OFFSET			0x0170
-#define OMAP4430_CM_DYN_DEP_PRESCAL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0170)
-#define OMAP4_CM_RESTORE_ST_OFFSET			0x0180
-#define OMAP4430_CM_RESTORE_ST				OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0180)
+/* Function prototypes */
+# ifndef __ASSEMBLER__
 
-/* CM1.MPU_CM1 register offsets */
-#define OMAP4_CM_MPU_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_MPU_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0000)
-#define OMAP4_CM_MPU_STATICDEP_OFFSET			0x0004
-#define OMAP4430_CM_MPU_STATICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0004)
-#define OMAP4_CM_MPU_DYNAMICDEP_OFFSET			0x0008
-#define OMAP4430_CM_MPU_DYNAMICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0008)
-#define OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET			0x0020
-#define OMAP4430_CM_MPU_MPU_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0020)
+extern int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg);
 
-/* CM1.TESLA_CM1 register offsets */
-#define OMAP4_CM_TESLA_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_TESLA_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0000)
-#define OMAP4_CM_TESLA_STATICDEP_OFFSET			0x0004
-#define OMAP4430_CM_TESLA_STATICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0004)
-#define OMAP4_CM_TESLA_DYNAMICDEP_OFFSET		0x0008
-#define OMAP4430_CM_TESLA_DYNAMICDEP			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0008)
-#define OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_TESLA_TESLA_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0020)
-
-/* CM1.ABE_CM1 register offsets */
-#define OMAP4_CM1_ABE_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM1_ABE_CLKSTCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0000)
-#define OMAP4_CM1_ABE_L4ABE_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM1_ABE_L4ABE_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0020)
-#define OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM1_ABE_AESS_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0028)
-#define OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET		0x0030
-#define OMAP4430_CM1_ABE_PDM_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0030)
-#define OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET		0x0038
-#define OMAP4430_CM1_ABE_DMIC_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0038)
-#define OMAP4_CM1_ABE_MCASP_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM1_ABE_MCASP_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0040)
-#define OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET		0x0048
-#define OMAP4430_CM1_ABE_MCBSP1_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0048)
-#define OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET		0x0050
-#define OMAP4430_CM1_ABE_MCBSP2_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0050)
-#define OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET		0x0058
-#define OMAP4430_CM1_ABE_MCBSP3_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0058)
-#define OMAP4_CM1_ABE_SLIMBUS_CLKCTRL_OFFSET		0x0060
-#define OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0060)
-#define OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET		0x0068
-#define OMAP4430_CM1_ABE_TIMER5_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0068)
-#define OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET		0x0070
-#define OMAP4430_CM1_ABE_TIMER6_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0070)
-#define OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET		0x0078
-#define OMAP4430_CM1_ABE_TIMER7_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0078)
-#define OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET		0x0080
-#define OMAP4430_CM1_ABE_TIMER8_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0080)
-#define OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET		0x0088
-#define OMAP4430_CM1_ABE_WDT3_CLKCTRL			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0088)
-
-/* CM1.RESTORE_CM1 register offsets */
-#define OMAP4_CM_CLKSEL_CORE_RESTORE_OFFSET		0x0000
-#define OMAP4430_CM_CLKSEL_CORE_RESTORE			OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0000)
-#define OMAP4_CM_DIV_M2_DPLL_CORE_RESTORE_OFFSET	0x0004
-#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0004)
-#define OMAP4_CM_DIV_M3_DPLL_CORE_RESTORE_OFFSET	0x0008
-#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0008)
-#define OMAP4_CM_DIV_M4_DPLL_CORE_RESTORE_OFFSET	0x000c
-#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x000c)
-#define OMAP4_CM_DIV_M5_DPLL_CORE_RESTORE_OFFSET	0x0010
-#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0010)
-#define OMAP4_CM_DIV_M6_DPLL_CORE_RESTORE_OFFSET	0x0014
-#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0014)
-#define OMAP4_CM_DIV_M7_DPLL_CORE_RESTORE_OFFSET	0x0018
-#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0018)
-#define OMAP4_CM_CLKSEL_DPLL_CORE_RESTORE_OFFSET	0x001c
-#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x001c)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE_OFFSET	0x0020
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0020)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE_OFFSET	0x0024
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0024)
-#define OMAP4_CM_CLKMODE_DPLL_CORE_RESTORE_OFFSET	0x0028
-#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0028)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG2_RESTORE_OFFSET	0x002c
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG2_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x002c)
-#define OMAP4_CM_SHADOW_FREQ_CONFIG1_RESTORE_OFFSET	0x0030
-#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0030)
-#define OMAP4_CM_AUTOIDLE_DPLL_CORE_RESTORE_OFFSET	0x0034
-#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0034)
-#define OMAP4_CM_MPU_CLKSTCTRL_RESTORE_OFFSET		0x0038
-#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0038)
-#define OMAP4_CM_CM1_PROFILING_CLKCTRL_RESTORE_OFFSET	0x003c
-#define OMAP4430_CM_CM1_PROFILING_CLKCTRL_RESTORE	OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x003c)
-#define OMAP4_CM_DYN_DEP_PRESCAL_RESTORE_OFFSET		0x0040
-#define OMAP4430_CM_DYN_DEP_PRESCAL_RESTORE		OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0040)
-
-/* CM2 */
-
-/* CM2.OCP_SOCKET_CM2 register offsets */
-#define OMAP4_REVISION_CM2_OFFSET			0x0000
-#define OMAP4430_REVISION_CM2				OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0000)
-#define OMAP4_CM_CM2_PROFILING_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM_CM2_PROFILING_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0040)
-
-/* CM2.CKGEN_CM2 register offsets */
-#define OMAP4_CM_CLKSEL_DUCATI_ISS_ROOT_OFFSET		0x0000
-#define OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0000)
-#define OMAP4_CM_CLKSEL_USB_60MHZ_OFFSET		0x0004
-#define OMAP4430_CM_CLKSEL_USB_60MHZ			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0004)
-#define OMAP4_CM_SCALE_FCLK_OFFSET			0x0008
-#define OMAP4430_CM_SCALE_FCLK				OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0008)
-#define OMAP4_CM_CORE_DVFS_PERF1_OFFSET			0x0010
-#define OMAP4430_CM_CORE_DVFS_PERF1			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0010)
-#define OMAP4_CM_CORE_DVFS_PERF2_OFFSET			0x0014
-#define OMAP4430_CM_CORE_DVFS_PERF2			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0014)
-#define OMAP4_CM_CORE_DVFS_PERF3_OFFSET			0x0018
-#define OMAP4430_CM_CORE_DVFS_PERF3			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0018)
-#define OMAP4_CM_CORE_DVFS_PERF4_OFFSET			0x001c
-#define OMAP4430_CM_CORE_DVFS_PERF4			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x001c)
-#define OMAP4_CM_CORE_DVFS_CURRENT_OFFSET		0x0024
-#define OMAP4430_CM_CORE_DVFS_CURRENT			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0024)
-#define OMAP4_CM_IVA_DVFS_PERF_TESLA_OFFSET		0x0028
-#define OMAP4430_CM_IVA_DVFS_PERF_TESLA			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0028)
-#define OMAP4_CM_IVA_DVFS_PERF_IVAHD_OFFSET		0x002c
-#define OMAP4430_CM_IVA_DVFS_PERF_IVAHD			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x002c)
-#define OMAP4_CM_IVA_DVFS_PERF_ABE_OFFSET		0x0030
-#define OMAP4430_CM_IVA_DVFS_PERF_ABE			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0030)
-#define OMAP4_CM_IVA_DVFS_CURRENT_OFFSET		0x0038
-#define OMAP4430_CM_IVA_DVFS_CURRENT			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0038)
-#define OMAP4_CM_CLKMODE_DPLL_PER_OFFSET		0x0040
-#define OMAP4430_CM_CLKMODE_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0040)
-#define OMAP4_CM_IDLEST_DPLL_PER_OFFSET			0x0044
-#define OMAP4430_CM_IDLEST_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0044)
-#define OMAP4_CM_AUTOIDLE_DPLL_PER_OFFSET		0x0048
-#define OMAP4430_CM_AUTOIDLE_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0048)
-#define OMAP4_CM_CLKSEL_DPLL_PER_OFFSET			0x004c
-#define OMAP4430_CM_CLKSEL_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x004c)
-#define OMAP4_CM_DIV_M2_DPLL_PER_OFFSET			0x0050
-#define OMAP4430_CM_DIV_M2_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0050)
-#define OMAP4_CM_DIV_M3_DPLL_PER_OFFSET			0x0054
-#define OMAP4430_CM_DIV_M3_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0054)
-#define OMAP4_CM_DIV_M4_DPLL_PER_OFFSET			0x0058
-#define OMAP4430_CM_DIV_M4_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0058)
-#define OMAP4_CM_DIV_M5_DPLL_PER_OFFSET			0x005c
-#define OMAP4430_CM_DIV_M5_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x005c)
-#define OMAP4_CM_DIV_M6_DPLL_PER_OFFSET			0x0060
-#define OMAP4430_CM_DIV_M6_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0060)
-#define OMAP4_CM_DIV_M7_DPLL_PER_OFFSET			0x0064
-#define OMAP4430_CM_DIV_M7_DPLL_PER			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0064)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET		0x0068
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0068)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET		0x006c
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x006c)
-#define OMAP4_CM_CLKMODE_DPLL_USB_OFFSET		0x0080
-#define OMAP4430_CM_CLKMODE_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0080)
-#define OMAP4_CM_IDLEST_DPLL_USB_OFFSET			0x0084
-#define OMAP4430_CM_IDLEST_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0084)
-#define OMAP4_CM_AUTOIDLE_DPLL_USB_OFFSET		0x0088
-#define OMAP4430_CM_AUTOIDLE_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0088)
-#define OMAP4_CM_CLKSEL_DPLL_USB_OFFSET			0x008c
-#define OMAP4430_CM_CLKSEL_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x008c)
-#define OMAP4_CM_DIV_M2_DPLL_USB_OFFSET			0x0090
-#define OMAP4430_CM_DIV_M2_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0090)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_USB_OFFSET		0x00a8
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00a8)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_USB_OFFSET		0x00ac
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_USB		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ac)
-#define OMAP4_CM_CLKDCOLDO_DPLL_USB_OFFSET		0x00b4
-#define OMAP4430_CM_CLKDCOLDO_DPLL_USB			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00b4)
-#define OMAP4_CM_CLKMODE_DPLL_UNIPRO_OFFSET		0x00c0
-#define OMAP4430_CM_CLKMODE_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c0)
-#define OMAP4_CM_IDLEST_DPLL_UNIPRO_OFFSET		0x00c4
-#define OMAP4430_CM_IDLEST_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c4)
-#define OMAP4_CM_AUTOIDLE_DPLL_UNIPRO_OFFSET		0x00c8
-#define OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c8)
-#define OMAP4_CM_CLKSEL_DPLL_UNIPRO_OFFSET		0x00cc
-#define OMAP4430_CM_CLKSEL_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00cc)
-#define OMAP4_CM_DIV_M2_DPLL_UNIPRO_OFFSET		0x00d0
-#define OMAP4430_CM_DIV_M2_DPLL_UNIPRO			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00d0)
-#define OMAP4_CM_SSC_DELTAMSTEP_DPLL_UNIPRO_OFFSET	0x00e8
-#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00e8)
-#define OMAP4_CM_SSC_MODFREQDIV_DPLL_UNIPRO_OFFSET	0x00ec
-#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_UNIPRO		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ec)
-
-/* CM2.ALWAYS_ON_CM2 register offsets */
-#define OMAP4_CM_ALWON_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_ALWON_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0000)
-#define OMAP4_CM_ALWON_MDMINTC_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_ALWON_MDMINTC_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0020)
-#define OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM_ALWON_SR_MPU_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0028)
-#define OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET		0x0030
-#define OMAP4430_CM_ALWON_SR_IVA_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0030)
-#define OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET		0x0038
-#define OMAP4430_CM_ALWON_SR_CORE_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0038)
-#define OMAP4_CM_ALWON_USBPHY_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM_ALWON_USBPHY_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0040)
-
-/* CM2.CORE_CM2 register offsets */
-#define OMAP4_CM_L3_1_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_L3_1_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0000)
-#define OMAP4_CM_L3_1_DYNAMICDEP_OFFSET			0x0008
-#define OMAP4430_CM_L3_1_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0008)
-#define OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_L3_1_L3_1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0020)
-#define OMAP4_CM_L3_2_CLKSTCTRL_OFFSET			0x0100
-#define OMAP4430_CM_L3_2_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0100)
-#define OMAP4_CM_L3_2_DYNAMICDEP_OFFSET			0x0108
-#define OMAP4430_CM_L3_2_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0108)
-#define OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET		0x0120
-#define OMAP4430_CM_L3_2_L3_2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0120)
-#define OMAP4_CM_L3_2_GPMC_CLKCTRL_OFFSET		0x0128
-#define OMAP4430_CM_L3_2_GPMC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0128)
-#define OMAP4_CM_L3_2_OCMC_RAM_CLKCTRL_OFFSET		0x0130
-#define OMAP4430_CM_L3_2_OCMC_RAM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0130)
-#define OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET		0x0200
-#define OMAP4430_CM_DUCATI_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0200)
-#define OMAP4_CM_DUCATI_STATICDEP_OFFSET		0x0204
-#define OMAP4430_CM_DUCATI_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0204)
-#define OMAP4_CM_DUCATI_DYNAMICDEP_OFFSET		0x0208
-#define OMAP4430_CM_DUCATI_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0208)
-#define OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET		0x0220
-#define OMAP4430_CM_DUCATI_DUCATI_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0220)
-#define OMAP4_CM_SDMA_CLKSTCTRL_OFFSET			0x0300
-#define OMAP4430_CM_SDMA_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0300)
-#define OMAP4_CM_SDMA_STATICDEP_OFFSET			0x0304
-#define OMAP4430_CM_SDMA_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0304)
-#define OMAP4_CM_SDMA_DYNAMICDEP_OFFSET			0x0308
-#define OMAP4430_CM_SDMA_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0308)
-#define OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET		0x0320
-#define OMAP4430_CM_SDMA_SDMA_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0320)
-#define OMAP4_CM_MEMIF_CLKSTCTRL_OFFSET			0x0400
-#define OMAP4430_CM_MEMIF_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0400)
-#define OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET		0x0420
-#define OMAP4430_CM_MEMIF_DMM_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0420)
-#define OMAP4_CM_MEMIF_EMIF_FW_CLKCTRL_OFFSET		0x0428
-#define OMAP4430_CM_MEMIF_EMIF_FW_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0428)
-#define OMAP4_CM_MEMIF_EMIF_1_CLKCTRL_OFFSET		0x0430
-#define OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0430)
-#define OMAP4_CM_MEMIF_EMIF_2_CLKCTRL_OFFSET		0x0438
-#define OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0438)
-#define OMAP4_CM_MEMIF_DLL_CLKCTRL_OFFSET		0x0440
-#define OMAP4430_CM_MEMIF_DLL_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0440)
-#define OMAP4_CM_MEMIF_EMIF_H1_CLKCTRL_OFFSET		0x0450
-#define OMAP4430_CM_MEMIF_EMIF_H1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0450)
-#define OMAP4_CM_MEMIF_EMIF_H2_CLKCTRL_OFFSET		0x0458
-#define OMAP4430_CM_MEMIF_EMIF_H2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0458)
-#define OMAP4_CM_MEMIF_DLL_H_CLKCTRL_OFFSET		0x0460
-#define OMAP4430_CM_MEMIF_DLL_H_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0460)
-#define OMAP4_CM_D2D_CLKSTCTRL_OFFSET			0x0500
-#define OMAP4430_CM_D2D_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0500)
-#define OMAP4_CM_D2D_STATICDEP_OFFSET			0x0504
-#define OMAP4430_CM_D2D_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0504)
-#define OMAP4_CM_D2D_DYNAMICDEP_OFFSET			0x0508
-#define OMAP4430_CM_D2D_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0508)
-#define OMAP4_CM_D2D_SAD2D_CLKCTRL_OFFSET		0x0520
-#define OMAP4430_CM_D2D_SAD2D_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0520)
-#define OMAP4_CM_D2D_MODEM_ICR_CLKCTRL_OFFSET		0x0528
-#define OMAP4430_CM_D2D_MODEM_ICR_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0528)
-#define OMAP4_CM_D2D_SAD2D_FW_CLKCTRL_OFFSET		0x0530
-#define OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0530)
-#define OMAP4_CM_L4CFG_CLKSTCTRL_OFFSET			0x0600
-#define OMAP4430_CM_L4CFG_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0600)
-#define OMAP4_CM_L4CFG_DYNAMICDEP_OFFSET		0x0608
-#define OMAP4430_CM_L4CFG_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0608)
-#define OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET		0x0620
-#define OMAP4430_CM_L4CFG_L4_CFG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0620)
-#define OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET		0x0628
-#define OMAP4430_CM_L4CFG_HW_SEM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0628)
-#define OMAP4_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET		0x0630
-#define OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0630)
-#define OMAP4_CM_L4CFG_SAR_ROM_CLKCTRL_OFFSET		0x0638
-#define OMAP4430_CM_L4CFG_SAR_ROM_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0638)
-#define OMAP4_CM_L3INSTR_CLKSTCTRL_OFFSET		0x0700
-#define OMAP4430_CM_L3INSTR_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0700)
-#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET		0x0720
-#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0720)
-#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET	0x0728
-#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0728)
-#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_OFFSET		0x0740
-#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0740)
-
-/* CM2.IVAHD_CM2 register offsets */
-#define OMAP4_CM_IVAHD_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_IVAHD_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0000)
-#define OMAP4_CM_IVAHD_STATICDEP_OFFSET			0x0004
-#define OMAP4430_CM_IVAHD_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0004)
-#define OMAP4_CM_IVAHD_DYNAMICDEP_OFFSET		0x0008
-#define OMAP4430_CM_IVAHD_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0008)
-#define OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_IVAHD_IVAHD_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0020)
-#define OMAP4_CM_IVAHD_SL2_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM_IVAHD_SL2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0028)
-
-/* CM2.CAM_CM2 register offsets */
-#define OMAP4_CM_CAM_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_CAM_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0000)
-#define OMAP4_CM_CAM_STATICDEP_OFFSET			0x0004
-#define OMAP4430_CM_CAM_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0004)
-#define OMAP4_CM_CAM_DYNAMICDEP_OFFSET			0x0008
-#define OMAP4430_CM_CAM_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0008)
-#define OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET			0x0020
-#define OMAP4430_CM_CAM_ISS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0020)
-#define OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM_CAM_FDIF_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0028)
-
-/* CM2.DSS_CM2 register offsets */
-#define OMAP4_CM_DSS_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_DSS_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0000)
-#define OMAP4_CM_DSS_STATICDEP_OFFSET			0x0004
-#define OMAP4430_CM_DSS_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0004)
-#define OMAP4_CM_DSS_DYNAMICDEP_OFFSET			0x0008
-#define OMAP4430_CM_DSS_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0008)
-#define OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET			0x0020
-#define OMAP4430_CM_DSS_DSS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0020)
-#define OMAP4_CM_DSS_DEISS_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM_DSS_DEISS_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0028)
-
-/* CM2.GFX_CM2 register offsets */
-#define OMAP4_CM_GFX_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_GFX_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0000)
-#define OMAP4_CM_GFX_STATICDEP_OFFSET			0x0004
-#define OMAP4430_CM_GFX_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0004)
-#define OMAP4_CM_GFX_DYNAMICDEP_OFFSET			0x0008
-#define OMAP4430_CM_GFX_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0008)
-#define OMAP4_CM_GFX_GFX_CLKCTRL_OFFSET			0x0020
-#define OMAP4430_CM_GFX_GFX_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0020)
-
-/* CM2.L3INIT_CM2 register offsets */
-#define OMAP4_CM_L3INIT_CLKSTCTRL_OFFSET		0x0000
-#define OMAP4430_CM_L3INIT_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0000)
-#define OMAP4_CM_L3INIT_STATICDEP_OFFSET		0x0004
-#define OMAP4430_CM_L3INIT_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0004)
-#define OMAP4_CM_L3INIT_DYNAMICDEP_OFFSET		0x0008
-#define OMAP4430_CM_L3INIT_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0008)
-#define OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM_L3INIT_MMC1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0028)
-#define OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET		0x0030
-#define OMAP4430_CM_L3INIT_MMC2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0030)
-#define OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET		0x0038
-#define OMAP4430_CM_L3INIT_HSI_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0038)
-#define OMAP4_CM_L3INIT_UNIPRO1_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM_L3INIT_UNIPRO1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0040)
-#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET		0x0058
-#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0058)
-#define OMAP4_CM_L3INIT_USB_OTG_CLKCTRL_OFFSET		0x0060
-#define OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0060)
-#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET		0x0068
-#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0068)
-#define OMAP4_CM_L3INIT_P1500_CLKCTRL_OFFSET		0x0078
-#define OMAP4430_CM_L3INIT_P1500_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0078)
-#define OMAP4_CM_L3INIT_EMAC_CLKCTRL_OFFSET		0x0080
-#define OMAP4430_CM_L3INIT_EMAC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0080)
-#define OMAP4_CM_L3INIT_SATA_CLKCTRL_OFFSET		0x0088
-#define OMAP4430_CM_L3INIT_SATA_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0088)
-#define OMAP4_CM_L3INIT_TPPSS_CLKCTRL_OFFSET		0x0090
-#define OMAP4430_CM_L3INIT_TPPSS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0090)
-#define OMAP4_CM_L3INIT_PCIESS_CLKCTRL_OFFSET		0x0098
-#define OMAP4430_CM_L3INIT_PCIESS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0098)
-#define OMAP4_CM_L3INIT_CCPTX_CLKCTRL_OFFSET		0x00a8
-#define OMAP4430_CM_L3INIT_CCPTX_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00a8)
-#define OMAP4_CM_L3INIT_XHPI_CLKCTRL_OFFSET		0x00c0
-#define OMAP4430_CM_L3INIT_XHPI_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c0)
-#define OMAP4_CM_L3INIT_MMC6_CLKCTRL_OFFSET		0x00c8
-#define OMAP4430_CM_L3INIT_MMC6_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c8)
-#define OMAP4_CM_L3INIT_USB_HOST_FS_CLKCTRL_OFFSET	0x00d0
-#define OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00d0)
-#define OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET	0x00e0
-#define OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00e0)
-
-/* CM2.L4PER_CM2 register offsets */
-#define OMAP4_CM_L4PER_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_L4PER_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0000)
-#define OMAP4_CM_L4PER_DYNAMICDEP_OFFSET		0x0008
-#define OMAP4430_CM_L4PER_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0008)
-#define OMAP4_CM_L4PER_ADC_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_L4PER_ADC_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0020)
-#define OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0028)
-#define OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET		0x0030
-#define OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0030)
-#define OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET		0x0038
-#define OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0038)
-#define OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0040)
-#define OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET		0x0048
-#define OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0048)
-#define OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET		0x0050
-#define OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0050)
-#define OMAP4_CM_L4PER_ELM_CLKCTRL_OFFSET		0x0058
-#define OMAP4430_CM_L4PER_ELM_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0058)
-#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET		0x0060
-#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0060)
-#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET		0x0068
-#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0068)
-#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET		0x0070
-#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0070)
-#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET		0x0078
-#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0078)
-#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET		0x0080
-#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0080)
-#define OMAP4_CM_L4PER_HDQ1W_CLKCTRL_OFFSET		0x0088
-#define OMAP4430_CM_L4PER_HDQ1W_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0088)
-#define OMAP4_CM_L4PER_HECC1_CLKCTRL_OFFSET		0x0090
-#define OMAP4430_CM_L4PER_HECC1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0090)
-#define OMAP4_CM_L4PER_HECC2_CLKCTRL_OFFSET		0x0098
-#define OMAP4430_CM_L4PER_HECC2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0098)
-#define OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET		0x00a0
-#define OMAP4430_CM_L4PER_I2C1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a0)
-#define OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET		0x00a8
-#define OMAP4430_CM_L4PER_I2C2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a8)
-#define OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET		0x00b0
-#define OMAP4430_CM_L4PER_I2C3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b0)
-#define OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET		0x00b8
-#define OMAP4430_CM_L4PER_I2C4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b8)
-#define OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET		0x00c0
-#define OMAP4430_CM_L4PER_L4PER_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00c0)
-#define OMAP4_CM_L4PER_MCASP2_CLKCTRL_OFFSET		0x00d0
-#define OMAP4430_CM_L4PER_MCASP2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d0)
-#define OMAP4_CM_L4PER_MCASP3_CLKCTRL_OFFSET		0x00d8
-#define OMAP4430_CM_L4PER_MCASP3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d8)
-#define OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET		0x00e0
-#define OMAP4430_CM_L4PER_MCBSP4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e0)
-#define OMAP4_CM_L4PER_MGATE_CLKCTRL_OFFSET		0x00e8
-#define OMAP4430_CM_L4PER_MGATE_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e8)
-#define OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET		0x00f0
-#define OMAP4430_CM_L4PER_MCSPI1_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f0)
-#define OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET		0x00f8
-#define OMAP4430_CM_L4PER_MCSPI2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f8)
-#define OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET		0x0100
-#define OMAP4430_CM_L4PER_MCSPI3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0100)
-#define OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET		0x0108
-#define OMAP4430_CM_L4PER_MCSPI4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0108)
-#define OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET		0x0120
-#define OMAP4430_CM_L4PER_MMCSD3_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0120)
-#define OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET		0x0128
-#define OMAP4430_CM_L4PER_MMCSD4_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0128)
-#define OMAP4_CM_L4PER_MSPROHG_CLKCTRL_OFFSET		0x0130
-#define OMAP4430_CM_L4PER_MSPROHG_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0130)
-#define OMAP4_CM_L4PER_SLIMBUS2_CLKCTRL_OFFSET		0x0138
-#define OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0138)
-#define OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET		0x0140
-#define OMAP4430_CM_L4PER_UART1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0140)
-#define OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET		0x0148
-#define OMAP4430_CM_L4PER_UART2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0148)
-#define OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET		0x0150
-#define OMAP4430_CM_L4PER_UART3_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0150)
-#define OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET		0x0158
-#define OMAP4430_CM_L4PER_UART4_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0158)
-#define OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET		0x0160
-#define OMAP4430_CM_L4PER_MMCSD5_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0160)
-#define OMAP4_CM_L4PER_I2C5_CLKCTRL_OFFSET		0x0168
-#define OMAP4430_CM_L4PER_I2C5_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0168)
-#define OMAP4_CM_L4SEC_CLKSTCTRL_OFFSET			0x0180
-#define OMAP4430_CM_L4SEC_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0180)
-#define OMAP4_CM_L4SEC_STATICDEP_OFFSET			0x0184
-#define OMAP4430_CM_L4SEC_STATICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0184)
-#define OMAP4_CM_L4SEC_DYNAMICDEP_OFFSET		0x0188
-#define OMAP4430_CM_L4SEC_DYNAMICDEP			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0188)
-#define OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET		0x01a0
-#define OMAP4430_CM_L4SEC_AES1_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a0)
-#define OMAP4_CM_L4SEC_AES2_CLKCTRL_OFFSET		0x01a8
-#define OMAP4430_CM_L4SEC_AES2_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a8)
-#define OMAP4_CM_L4SEC_DES3DES_CLKCTRL_OFFSET		0x01b0
-#define OMAP4430_CM_L4SEC_DES3DES_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b0)
-#define OMAP4_CM_L4SEC_PKAEIP29_CLKCTRL_OFFSET		0x01b8
-#define OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b8)
-#define OMAP4_CM_L4SEC_RNG_CLKCTRL_OFFSET		0x01c0
-#define OMAP4430_CM_L4SEC_RNG_CLKCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c0)
-#define OMAP4_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET		0x01c8
-#define OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c8)
-#define OMAP4_CM_L4SEC_CRYPTODMA_CLKCTRL_OFFSET		0x01d8
-#define OMAP4430_CM_L4SEC_CRYPTODMA_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01d8)
-
-/* CM2.CEFUSE_CM2 register offsets */
-#define OMAP4_CM_CEFUSE_CLKSTCTRL_OFFSET		0x0000
-#define OMAP4430_CM_CEFUSE_CLKSTCTRL			OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0000)
-#define OMAP4_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0020)
-
-/* CM2.RESTORE_CM2 register offsets */
-#define OMAP4_CM_L3_1_CLKSTCTRL_RESTORE_OFFSET		0x0000
-#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0000)
-#define OMAP4_CM_L3_2_CLKSTCTRL_RESTORE_OFFSET		0x0004
-#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0004)
-#define OMAP4_CM_L4CFG_CLKSTCTRL_RESTORE_OFFSET		0x0008
-#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0008)
-#define OMAP4_CM_MEMIF_CLKSTCTRL_RESTORE_OFFSET		0x000c
-#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x000c)
-#define OMAP4_CM_L4PER_CLKSTCTRL_RESTORE_OFFSET		0x0010
-#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0010)
-#define OMAP4_CM_L3INIT_CLKSTCTRL_RESTORE_OFFSET	0x0014
-#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0014)
-#define OMAP4_CM_L3INSTR_L3_3_CLKCTRL_RESTORE_OFFSET	0x0018
-#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0018)
-#define OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE_OFFSET	0x001c
-#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x001c)
-#define OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE_OFFSET	0x0020
-#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0020)
-#define OMAP4_CM_CM2_PROFILING_CLKCTRL_RESTORE_OFFSET	0x0024
-#define OMAP4430_CM_CM2_PROFILING_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0024)
-#define OMAP4_CM_D2D_STATICDEP_RESTORE_OFFSET		0x0028
-#define OMAP4430_CM_D2D_STATICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0028)
-#define OMAP4_CM_L3_1_DYNAMICDEP_RESTORE_OFFSET		0x002c
-#define OMAP4430_CM_L3_1_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x002c)
-#define OMAP4_CM_L3_2_DYNAMICDEP_RESTORE_OFFSET		0x0030
-#define OMAP4430_CM_L3_2_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0030)
-#define OMAP4_CM_D2D_DYNAMICDEP_RESTORE_OFFSET		0x0034
-#define OMAP4430_CM_D2D_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0034)
-#define OMAP4_CM_L4CFG_DYNAMICDEP_RESTORE_OFFSET	0x0038
-#define OMAP4430_CM_L4CFG_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0038)
-#define OMAP4_CM_L4PER_DYNAMICDEP_RESTORE_OFFSET	0x003c
-#define OMAP4430_CM_L4PER_DYNAMICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x003c)
-#define OMAP4_CM_L4PER_GPIO2_CLKCTRL_RESTORE_OFFSET	0x0040
-#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0040)
-#define OMAP4_CM_L4PER_GPIO3_CLKCTRL_RESTORE_OFFSET	0x0044
-#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0044)
-#define OMAP4_CM_L4PER_GPIO4_CLKCTRL_RESTORE_OFFSET	0x0048
-#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0048)
-#define OMAP4_CM_L4PER_GPIO5_CLKCTRL_RESTORE_OFFSET	0x004c
-#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x004c)
-#define OMAP4_CM_L4PER_GPIO6_CLKCTRL_RESTORE_OFFSET	0x0050
-#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0050)
-#define OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE_OFFSET	0x0054
-#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0054)
-#define OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE_OFFSET	0x0058
-#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE	OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0058)
-#define OMAP4_CM_SDMA_STATICDEP_RESTORE_OFFSET		0x005c
-#define OMAP4430_CM_SDMA_STATICDEP_RESTORE		OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x005c)
+# endif
 #endif
diff --git a/arch/arm/mach-omap2/cm4xxx.c b/arch/arm/mach-omap2/cm4xxx.c
deleted file mode 100644
index f8a660a..0000000
--- a/arch/arm/mach-omap2/cm4xxx.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * OMAP4 CM module functions
- *
- * Copyright (C) 2009 Nokia Corporation
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <asm/atomic.h>
-
-#include <plat/common.h>
-
-#include "cm.h"
-#include "cm-regbits-44xx.h"
-
-/**
- * omap4_cm_wait_module_ready - wait for a module to be in 'func' state
- * @clkctrl_reg: CLKCTRL module address
- *
- * Wait for the module IDLEST to be functional. If the idle state is in any
- * the non functional state (trans, idle or disabled), module and thus the
- * sysconfig cannot be accessed and will probably lead to an "imprecise
- * external abort"
- *
- * Module idle state:
- *   0x0 func:     Module is fully functional, including OCP
- *   0x1 trans:    Module is performing transition: wakeup, or sleep, or sleep
- *                 abortion
- *   0x2 idle:     Module is in Idle mode (only OCP part). It is functional if
- *                 using separate functional clock
- *   0x3 disabled: Module is disabled and cannot be accessed
- *
- */
-int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg)
-{
-	int i = 0;
-
-	if (!clkctrl_reg)
-		return 0;
-
-	omap_test_timeout((
-		((__raw_readl(clkctrl_reg) & OMAP4430_IDLEST_MASK) == 0) ||
-		 (((__raw_readl(clkctrl_reg) & OMAP4430_IDLEST_MASK) >>
-		  OMAP4430_IDLEST_SHIFT) == 0x2)),
-		MAX_MODULE_READY_TIME, i);
-
-	return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
-}
-
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
new file mode 100644
index 0000000..c04bbbe
--- /dev/null
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -0,0 +1,214 @@
+/*
+ * OMAP4 CM instance functions
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is needed since CM instances can be in the PRM, PRCM_MPU, CM1,
+ * or CM2 hardware modules.  For example, the EMU_CM CM instance is in
+ * the PRM hardware module.  What a mess...
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+
+#include "cm.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "cm44xx.h"
+#include "cminst44xx.h"
+#include "cm-regbits-34xx.h"
+#include "cm-regbits-44xx.h"
+#include "prcm44xx.h"
+#include "prm44xx.h"
+#include "prcm_mpu44xx.h"
+
+static u32 _cm_bases[OMAP4_MAX_PRCM_PARTITIONS] = {
+	[OMAP4430_INVALID_PRCM_PARTITION]	= 0,
+	[OMAP4430_PRM_PARTITION]		= OMAP4430_PRM_BASE,
+	[OMAP4430_CM1_PARTITION]		= OMAP4430_CM1_BASE,
+	[OMAP4430_CM2_PARTITION]		= OMAP4430_CM2_BASE,
+	[OMAP4430_SCRM_PARTITION]		= 0,
+	[OMAP4430_PRCM_MPU_PARTITION]		= OMAP4430_PRCM_MPU_BASE,
+};
+
+/* Read a register in a CM instance */
+u32 omap4_cminst_read_inst_reg(u8 part, s16 inst, u16 idx)
+{
+	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
+	       part == OMAP4430_INVALID_PRCM_PARTITION ||
+	       !_cm_bases[part]);
+	return __raw_readl(OMAP2_L4_IO_ADDRESS(_cm_bases[part] + inst + idx));
+}
+
+/* Write into a register in a CM instance */
+void omap4_cminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx)
+{
+	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
+	       part == OMAP4430_INVALID_PRCM_PARTITION ||
+	       !_cm_bases[part]);
+	__raw_writel(val, OMAP2_L4_IO_ADDRESS(_cm_bases[part] + inst + idx));
+}
+
+/* Read-modify-write a register in CM1. Caller must lock */
+u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
+				   s16 idx)
+{
+	u32 v;
+
+	v = omap4_cminst_read_inst_reg(part, inst, idx);
+	v &= ~mask;
+	v |= bits;
+	omap4_cminst_write_inst_reg(v, part, inst, idx);
+
+	return v;
+}
+
+/*
+ *
+ */
+
+/**
+ * _clktrctrl_write - write @c to a CM_CLKSTCTRL.CLKTRCTRL register bitfield
+ * @c: CLKTRCTRL register bitfield (LSB = bit 0, i.e., unshifted)
+ * @part: PRCM partition ID that the CM_CLKSTCTRL register exists in
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * @c must be the unshifted value for CLKTRCTRL - i.e., this function
+ * will handle the shift itself.
+ */
+static void _clktrctrl_write(u8 c, u8 part, s16 inst, u16 cdoffs)
+{
+	u32 v;
+
+	v = omap4_cminst_read_inst_reg(part, inst, cdoffs + OMAP4_CM_CLKSTCTRL);
+	v &= ~OMAP4430_CLKTRCTRL_MASK;
+	v |= c << OMAP4430_CLKTRCTRL_SHIFT;
+	omap4_cminst_write_inst_reg(v, part, inst, cdoffs + OMAP4_CM_CLKSTCTRL);
+}
+
+/**
+ * omap4_cminst_is_clkdm_in_hwsup - is a clockdomain in hwsup idle mode?
+ * @part: PRCM partition ID that the CM_CLKSTCTRL register exists in
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Returns true if the clockdomain referred to by (@part, @inst, @cdoffs)
+ * is in hardware-supervised idle mode, or 0 otherwise.
+ */
+bool omap4_cminst_is_clkdm_in_hwsup(u8 part, s16 inst, u16 cdoffs)
+{
+	u32 v;
+
+	v = omap4_cminst_read_inst_reg(part, inst, cdoffs + OMAP4_CM_CLKSTCTRL);
+	v &= OMAP4430_CLKTRCTRL_MASK;
+	v >>= OMAP4430_CLKTRCTRL_SHIFT;
+
+	return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? true : false;
+}
+
+/**
+ * omap4_cminst_clkdm_enable_hwsup - put a clockdomain in hwsup-idle mode
+ * @part: PRCM partition ID that the clockdomain registers exist in
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Put a clockdomain referred to by (@part, @inst, @cdoffs) into
+ * hardware-supervised idle mode.  No return value.
+ */
+void omap4_cminst_clkdm_enable_hwsup(u8 part, s16 inst, u16 cdoffs)
+{
+	_clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, part, inst, cdoffs);
+}
+
+/**
+ * omap4_cminst_clkdm_disable_hwsup - put a clockdomain in swsup-idle mode
+ * @part: PRCM partition ID that the clockdomain registers exist in
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Put a clockdomain referred to by (@part, @inst, @cdoffs) into
+ * software-supervised idle mode, i.e., controlled manually by the
+ * Linux OMAP clockdomain code.  No return value.
+ */
+void omap4_cminst_clkdm_disable_hwsup(u8 part, s16 inst, u16 cdoffs)
+{
+	_clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, part, inst, cdoffs);
+}
+
+/**
+ * omap4_cminst_clkdm_force_sleep - try to put a clockdomain into idle
+ * @part: PRCM partition ID that the clockdomain registers exist in
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Put a clockdomain referred to by (@part, @inst, @cdoffs) into idle
+ * No return value.
+ */
+void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs)
+{
+	_clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, part, inst, cdoffs);
+}
+
+/**
+ * omap4_cminst_clkdm_force_sleep - try to take a clockdomain out of idle
+ * @part: PRCM partition ID that the clockdomain registers exist in
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Take a clockdomain referred to by (@part, @inst, @cdoffs) out of idle,
+ * waking it up.  No return value.
+ */
+void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs)
+{
+	_clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, part, inst, cdoffs);
+}
+
+/*
+ *
+ */
+
+/**
+ * omap4_cm_wait_module_ready - wait for a module to be in 'func' state
+ * @clkctrl_reg: CLKCTRL module address
+ *
+ * Wait for the module IDLEST to be functional. If the idle state is in any
+ * the non functional state (trans, idle or disabled), module and thus the
+ * sysconfig cannot be accessed and will probably lead to an "imprecise
+ * external abort"
+ *
+ * Module idle state:
+ *   0x0 func:     Module is fully functional, including OCP
+ *   0x1 trans:    Module is performing transition: wakeup, or sleep, or sleep
+ *                 abortion
+ *   0x2 idle:     Module is in Idle mode (only OCP part). It is functional if
+ *                 using separate functional clock
+ *   0x3 disabled: Module is disabled and cannot be accessed
+ *
+ */
+int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg)
+{
+	int i = 0;
+
+	if (!clkctrl_reg)
+		return 0;
+
+	omap_test_timeout((
+		((__raw_readl(clkctrl_reg) & OMAP4430_IDLEST_MASK) == 0) ||
+		 (((__raw_readl(clkctrl_reg) & OMAP4430_IDLEST_MASK) >>
+		  OMAP4430_IDLEST_SHIFT) == 0x2)),
+		MAX_MODULE_READY_TIME, i);
+
+	return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+}
+
diff --git a/arch/arm/mach-omap2/cminst44xx.h b/arch/arm/mach-omap2/cminst44xx.h
new file mode 100644
index 0000000..a6abd0a
--- /dev/null
+++ b/arch/arm/mach-omap2/cminst44xx.h
@@ -0,0 +1,31 @@
+/*
+ * OMAP4 Clock Management (CM) function prototypes
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ARCH_ASM_MACH_OMAP2_CMINST44XX_H
+#define __ARCH_ASM_MACH_OMAP2_CMINST44XX_H
+
+extern bool omap4_cminst_is_clkdm_in_hwsup(u8 part, s16 inst, u16 cdoffs);
+extern void omap4_cminst_clkdm_enable_hwsup(u8 part, s16 inst, u16 cdoffs);
+extern void omap4_cminst_clkdm_disable_hwsup(u8 part, s16 inst, u16 cdoffs);
+extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs);
+extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs);
+
+/*
+ * In an ideal world, we would not export these low-level functions,
+ * but this will probably take some time to fix properly
+ */
+extern u32 omap4_cminst_read_inst_reg(u8 part, s16 inst, u16 idx);
+extern void omap4_cminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx);
+extern u32 omap4_cminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part,
+					   s16 inst, s16 idx);
+
+extern int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg);
+
+#endif
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index 1fa3294..6952794 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -20,12 +20,16 @@
 
 #include "cm-regbits-34xx.h"
 #include "prm-regbits-34xx.h"
-#include "cm.h"
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
+#include "cm2xxx_3xxx.h"
 #include "sdrc.h"
 #include "pm.h"
 #include "control.h"
 
+/* Used by omap3_ctrl_save_padconf() */
+#define START_PADCONF_SAVE		0x2
+#define PADCONF_SAVE_DONE		0x1
+
 static void __iomem *omap2_ctrl_base;
 static void __iomem *omap4_ctrl_pad_base;
 
@@ -134,6 +138,7 @@
 	u32 sramldo4;
 	u32 sramldo5;
 	u32 csi;
+	u32 padconf_sys_nirq;
 };
 
 static struct omap3_control_regs control_context;
@@ -209,6 +214,37 @@
 	__raw_writel(val, OMAP4_CTRL_PAD_REGADDR(offset));
 }
 
+#ifdef CONFIG_ARCH_OMAP3
+
+/**
+ * omap3_ctrl_write_boot_mode - set scratchpad boot mode for the next boot
+ * @bootmode: 8-bit value to pass to some boot code
+ *
+ * Set the bootmode in the scratchpad RAM.  This is used after the
+ * system restarts.  Not sure what actually uses this - it may be the
+ * bootloader, rather than the boot ROM - contrary to the preserved
+ * comment below.  No return value.
+ */
+void omap3_ctrl_write_boot_mode(u8 bootmode)
+{
+	u32 l;
+
+	l = ('B' << 24) | ('M' << 16) | bootmode;
+
+	/*
+	 * Reserve the first word in scratchpad for communicating
+	 * with the boot ROM. A pointer to a data structure
+	 * describing the boot process can be stored there,
+	 * cf. OMAP34xx TRM, Initialization / Software Booting
+	 * Configuration.
+	 *
+	 * XXX This should use some omap_ctrl_writel()-type function
+	 */
+	__raw_writel(l, OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD + 4));
+}
+
+#endif
+
 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
 /*
  * Clears the scratchpad contents in case of cold boot-
@@ -220,13 +256,13 @@
 	void __iomem *v_addr;
 	u32 offset = 0;
 	v_addr = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD_ROM);
-	if (prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) &
+	if (omap2_prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) &
 	    OMAP3430_GLOBAL_COLD_RST_MASK) {
 		for ( ; offset <= max_offset; offset += 0x4)
 			__raw_writel(0x0, (v_addr + offset));
-		prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST_MASK,
-				     OMAP3430_GR_MOD,
-				     OMAP3_PRM_RSTST_OFFSET);
+		omap2_prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST_MASK,
+					   OMAP3430_GR_MOD,
+					   OMAP3_PRM_RSTST_OFFSET);
 	}
 }
 
@@ -239,9 +275,19 @@
 	struct omap3_scratchpad_prcm_block prcm_block_contents;
 	struct omap3_scratchpad_sdrc_block sdrc_block_contents;
 
-	/* Populate the Scratchpad contents */
+	/*
+	 * Populate the Scratchpad contents
+	 *
+	 * The "get_*restore_pointer" functions are used to provide a
+	 * physical restore address where the ROM code jumps while waking
+	 * up from MPU OFF/OSWR state.
+	 * The restore pointer is stored into the scratchpad.
+	 */
 	scratchpad_contents.boot_config_ptr = 0x0;
-	if (omap_rev() != OMAP3430_REV_ES3_0 &&
+	if (cpu_is_omap3630())
+		scratchpad_contents.public_restore_ptr =
+			virt_to_phys(get_omap3630_restore_pointer());
+	else if (omap_rev() != OMAP3430_REV_ES3_0 &&
 					omap_rev() != OMAP3430_REV_ES3_1)
 		scratchpad_contents.public_restore_ptr =
 			virt_to_phys(get_restore_pointer());
@@ -258,32 +304,34 @@
 	scratchpad_contents.sdrc_block_offset = 0x64;
 
 	/* Populate the PRCM block contents */
-	prcm_block_contents.prm_clksrc_ctrl = prm_read_mod_reg(OMAP3430_GR_MOD,
-			OMAP3_PRM_CLKSRC_CTRL_OFFSET);
-	prcm_block_contents.prm_clksel = prm_read_mod_reg(OMAP3430_CCR_MOD,
-			OMAP3_PRM_CLKSEL_OFFSET);
+	prcm_block_contents.prm_clksrc_ctrl =
+		omap2_prm_read_mod_reg(OMAP3430_GR_MOD,
+				       OMAP3_PRM_CLKSRC_CTRL_OFFSET);
+	prcm_block_contents.prm_clksel =
+		omap2_prm_read_mod_reg(OMAP3430_CCR_MOD,
+				       OMAP3_PRM_CLKSEL_OFFSET);
 	prcm_block_contents.cm_clksel_core =
-			cm_read_mod_reg(CORE_MOD, CM_CLKSEL);
+			omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL);
 	prcm_block_contents.cm_clksel_wkup =
-			cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
+			omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
 	prcm_block_contents.cm_clken_pll =
-			cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+			omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
 	prcm_block_contents.cm_autoidle_pll =
-			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+			omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL);
 	prcm_block_contents.cm_clksel1_pll =
-			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL);
+			omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL);
 	prcm_block_contents.cm_clksel2_pll =
-			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL);
+			omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL);
 	prcm_block_contents.cm_clksel3_pll =
-			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3);
+			omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3);
 	prcm_block_contents.cm_clken_pll_mpu =
-			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL);
+			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL);
 	prcm_block_contents.cm_autoidle_pll_mpu =
-			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL);
+			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL);
 	prcm_block_contents.cm_clksel1_pll_mpu =
-			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL);
+			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL);
 	prcm_block_contents.cm_clksel2_pll_mpu =
-			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL);
+			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL);
 	prcm_block_contents.prcm_block_size = 0x0;
 
 	/* Populate the SDRC block contents */
@@ -416,6 +464,8 @@
 	control_context.sramldo4 = omap_ctrl_readl(OMAP343X_CONTROL_SRAMLDO4);
 	control_context.sramldo5 = omap_ctrl_readl(OMAP343X_CONTROL_SRAMLDO5);
 	control_context.csi = omap_ctrl_readl(OMAP343X_CONTROL_CSI);
+	control_context.padconf_sys_nirq =
+		omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_SYSNIRQ);
 	return;
 }
 
@@ -472,6 +522,43 @@
 	omap_ctrl_writel(control_context.sramldo4, OMAP343X_CONTROL_SRAMLDO4);
 	omap_ctrl_writel(control_context.sramldo5, OMAP343X_CONTROL_SRAMLDO5);
 	omap_ctrl_writel(control_context.csi, OMAP343X_CONTROL_CSI);
+	omap_ctrl_writel(control_context.padconf_sys_nirq,
+			 OMAP343X_CONTROL_PADCONF_SYSNIRQ);
 	return;
 }
+
+void omap3630_ctrl_disable_rta(void)
+{
+	if (!cpu_is_omap3630())
+		return;
+	omap_ctrl_writel(OMAP36XX_RTA_DISABLE, OMAP36XX_CONTROL_MEM_RTA_CTRL);
+}
+
+/**
+ * omap3_ctrl_save_padconf - save padconf registers to scratchpad RAM
+ *
+ * Tell the SCM to start saving the padconf registers, then wait for
+ * the process to complete.  Returns 0 unconditionally, although it
+ * should also eventually be able to return -ETIMEDOUT, if the save
+ * does not complete.
+ *
+ * XXX This function is missing a timeout.  What should it be?
+ */
+int omap3_ctrl_save_padconf(void)
+{
+	u32 cpo;
+
+	/* Save the padconf registers */
+	cpo = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
+	cpo |= START_PADCONF_SAVE;
+	omap_ctrl_writel(cpo, OMAP343X_CONTROL_PADCONF_OFF);
+
+	/* wait for the save to complete */
+	while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
+		 & PADCONF_SAVE_DONE))
+		udelay(1);
+
+	return 0;
+}
+
 #endif /* CONFIG_ARCH_OMAP3 && CONFIG_PM */
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index b6c6b7c..f0629ae 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -148,6 +148,15 @@
 #define OMAP343X_CONTROL_TEST_KEY_11	(OMAP2_CONTROL_GENERAL + 0x00f4)
 #define OMAP343X_CONTROL_TEST_KEY_12	(OMAP2_CONTROL_GENERAL + 0x00f8)
 #define OMAP343X_CONTROL_TEST_KEY_13	(OMAP2_CONTROL_GENERAL + 0x00fc)
+#define OMAP343X_CONTROL_FUSE_OPP1_VDD1 (OMAP2_CONTROL_GENERAL + 0x0110)
+#define OMAP343X_CONTROL_FUSE_OPP2_VDD1 (OMAP2_CONTROL_GENERAL + 0x0114)
+#define OMAP343X_CONTROL_FUSE_OPP3_VDD1 (OMAP2_CONTROL_GENERAL + 0x0118)
+#define OMAP343X_CONTROL_FUSE_OPP4_VDD1 (OMAP2_CONTROL_GENERAL + 0x011c)
+#define OMAP343X_CONTROL_FUSE_OPP5_VDD1 (OMAP2_CONTROL_GENERAL + 0x0120)
+#define OMAP343X_CONTROL_FUSE_OPP1_VDD2 (OMAP2_CONTROL_GENERAL + 0x0124)
+#define OMAP343X_CONTROL_FUSE_OPP2_VDD2 (OMAP2_CONTROL_GENERAL + 0x0128)
+#define OMAP343X_CONTROL_FUSE_OPP3_VDD2 (OMAP2_CONTROL_GENERAL + 0x012c)
+#define OMAP343X_CONTROL_FUSE_SR        (OMAP2_CONTROL_GENERAL + 0x0130)
 #define OMAP343X_CONTROL_IVA2_BOOTADDR	(OMAP2_CONTROL_GENERAL + 0x0190)
 #define OMAP343X_CONTROL_IVA2_BOOTMOD	(OMAP2_CONTROL_GENERAL + 0x0194)
 #define OMAP343X_CONTROL_DEBOBS(i)	(OMAP2_CONTROL_GENERAL + 0x01B0 \
@@ -164,6 +173,26 @@
 #define OMAP343X_CONTROL_SRAMLDO5	(OMAP2_CONTROL_GENERAL + 0x02C0)
 #define OMAP343X_CONTROL_CSI		(OMAP2_CONTROL_GENERAL + 0x02C4)
 
+/* OMAP3630 only CONTROL_GENERAL register offsets */
+#define OMAP3630_CONTROL_FUSE_OPP1G_VDD1        (OMAP2_CONTROL_GENERAL + 0x0110)
+#define OMAP3630_CONTROL_FUSE_OPP50_VDD1        (OMAP2_CONTROL_GENERAL + 0x0114)
+#define OMAP3630_CONTROL_FUSE_OPP100_VDD1       (OMAP2_CONTROL_GENERAL + 0x0118)
+#define OMAP3630_CONTROL_FUSE_OPP120_VDD1       (OMAP2_CONTROL_GENERAL + 0x0120)
+#define OMAP3630_CONTROL_FUSE_OPP50_VDD2        (OMAP2_CONTROL_GENERAL + 0x0128)
+#define OMAP3630_CONTROL_FUSE_OPP100_VDD2       (OMAP2_CONTROL_GENERAL + 0x012C)
+
+/* OMAP44xx control efuse offsets */
+#define OMAP44XX_CONTROL_FUSE_IVA_OPP50		0x22C
+#define OMAP44XX_CONTROL_FUSE_IVA_OPP100	0x22F
+#define OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO	0x232
+#define OMAP44XX_CONTROL_FUSE_IVA_OPPNITRO	0x235
+#define OMAP44XX_CONTROL_FUSE_MPU_OPP50		0x240
+#define OMAP44XX_CONTROL_FUSE_MPU_OPP100	0x243
+#define OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO	0x246
+#define OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO	0x249
+#define OMAP44XX_CONTROL_FUSE_CORE_OPP50	0x254
+#define OMAP44XX_CONTROL_FUSE_CORE_OPP100	0x257
+
 /* AM35XX only CONTROL_GENERAL register offsets */
 #define AM35XX_CONTROL_MSUSPENDMUX_6    (OMAP2_CONTROL_GENERAL + 0x0038)
 #define AM35XX_CONTROL_DEVCONF2         (OMAP2_CONTROL_GENERAL + 0x0310)
@@ -204,6 +233,10 @@
 #define OMAP343X_CONTROL_WKUP_DEBOBS3 (OMAP343X_CONTROL_GENERAL_WKUP + 0x014)
 #define OMAP343X_CONTROL_WKUP_DEBOBS4 (OMAP343X_CONTROL_GENERAL_WKUP + 0x018)
 
+/* 36xx-only RTA - Retention till Accesss control registers and bits */
+#define OMAP36XX_CONTROL_MEM_RTA_CTRL	0x40C
+#define OMAP36XX_RTA_DISABLE		0x0
+
 /* 34xx D2D idle-related pins, handled by PM core */
 #define OMAP3_PADCONF_SAD2D_MSTANDBY   0x250
 #define OMAP3_PADCONF_SAD2D_IDLEACK    0x254
@@ -270,6 +303,8 @@
 #define OMAP343X_SCRATCHPAD_ROM		(OMAP343X_CTRL_BASE + 0x860)
 #define OMAP343X_SCRATCHPAD		(OMAP343X_CTRL_BASE + 0x910)
 #define OMAP343X_SCRATCHPAD_ROM_OFFSET	0x19C
+#define OMAP343X_SCRATCHPAD_REGADDR(reg)	OMAP2_L4_IO_ADDRESS(\
+						OMAP343X_SCRATCHPAD + reg)
 
 /* AM35XX_CONTROL_IPSS_CLK_CTRL bits */
 #define AM35XX_USBOTG_VBUSP_CLK_SHIFT   0
@@ -309,7 +344,7 @@
 #define		FEAT_SGX_NONE		2
 
 #define OMAP3_IVA_SHIFT			12
-#define OMAP3_IVA_MASK			(1 << OMAP3_SGX_SHIFT)
+#define OMAP3_IVA_MASK			(1 << OMAP3_IVA_SHIFT)
 #define		FEAT_IVA		0
 #define		FEAT_IVA_NONE		1
 
@@ -347,10 +382,13 @@
 extern void omap3_clear_scratchpad_contents(void);
 extern u32 *get_restore_pointer(void);
 extern u32 *get_es3_restore_pointer(void);
+extern u32 *get_omap3630_restore_pointer(void);
 extern u32 omap3_arm_context[128];
 extern void omap3_control_save_context(void);
 extern void omap3_control_restore_context(void);
-
+extern void omap3_ctrl_write_boot_mode(u8 bootmode);
+extern void omap3630_ctrl_disable_rta(void);
+extern int omap3_ctrl_save_padconf(void);
 #else
 #define omap_ctrl_base_get()		0
 #define omap_ctrl_readb(x)		0
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 0d50b45..f7b22a1 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -27,8 +27,8 @@
 
 #include <plat/prcm.h>
 #include <plat/irqs.h>
-#include <plat/powerdomain.h>
-#include <plat/clockdomain.h>
+#include "powerdomain.h"
+#include "clockdomain.h"
 #include <plat/serial.h>
 
 #include "pm.h"
@@ -47,6 +47,8 @@
 
 #define OMAP3_STATE_MAX OMAP3_STATE_C7
 
+#define CPUIDLE_FLAG_CHECK_BM	0x10000	/* use omap3_enter_idle_bm() */
+
 struct omap3_processor_cx {
 	u8 valid;
 	u8 type;
@@ -252,7 +254,7 @@
 	 * FIXME: we currently manage device-specific idle states
 	 *        for PER and CORE in combination with CPU-specific
 	 *        idle states.  This is wrong, and device-specific
-	 *        idle managment needs to be separated out into 
+	 *        idle management needs to be separated out into 
 	 *        its own code.
 	 */
 
@@ -293,25 +295,26 @@
 DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
 
 /**
- * omap3_cpuidle_update_states - Update the cpuidle states.
+ * omap3_cpuidle_update_states() - Update the cpuidle states
+ * @mpu_deepest_state:	Enable states upto and including this for mpu domain
+ * @core_deepest_state:	Enable states upto and including this for core domain
  *
- * Currently, this function toggles the validity of idle states based upon
- * the flag 'enable_off_mode'. When the flag is set all states are valid.
- * Else, states leading to OFF state set to be invalid.
+ * This goes through the list of states available and enables and disables the
+ * validity of C states based on deepest state that can be achieved for the
+ * variable domain
  */
-void omap3_cpuidle_update_states(void)
+void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
 {
 	int i;
 
 	for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
 		struct omap3_processor_cx *cx = &omap3_power_states[i];
 
-		if (enable_off_mode) {
+		if ((cx->mpu_state >= mpu_deepest_state) &&
+		    (cx->core_state >= core_deepest_state)) {
 			cx->valid = 1;
 		} else {
-			if ((cx->mpu_state == PWRDM_POWER_OFF) ||
-				(cx->core_state	== PWRDM_POWER_OFF))
-				cx->valid = 0;
+			cx->valid = 0;
 		}
 	}
 }
@@ -452,6 +455,18 @@
 	omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
 	omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
 				CPUIDLE_FLAG_CHECK_BM;
+
+	/*
+	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
+	 * enable OFF mode in a stable form for previous revisions.
+	 * we disable C7 state as a result.
+	 */
+	if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
+		omap3_power_states[OMAP3_STATE_C7].valid = 0;
+		cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
+		WARN_ONCE(1, "%s: core off state C7 disabled due to i583\n",
+				__func__);
+	}
 }
 
 struct cpuidle_driver omap3_idle_driver = {
@@ -504,7 +519,10 @@
 		return -EINVAL;
 	dev->state_count = count;
 
-	omap3_cpuidle_update_states();
+	if (enable_off_mode)
+		omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
+	else
+		omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
 
 	if (cpuidle_register_device(dev)) {
 		printk(KERN_ERR "%s: CPUidle register device failed\n",
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 5a0c148..2c9c912 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -638,6 +638,7 @@
 static void __init omap_hsmmc_reset(void)
 {
 	u32 i, nr_controllers;
+	struct clk *iclk, *fclk;
 
 	if (cpu_is_omap242x())
 		return;
@@ -647,7 +648,6 @@
 
 	for (i = 0; i < nr_controllers; i++) {
 		u32 v, base = 0;
-		struct clk *iclk, *fclk;
 		struct device *dev = &dummy_pdev.dev;
 
 		switch (i) {
@@ -678,19 +678,16 @@
 		dummy_pdev.id = i;
 		dev_set_name(&dummy_pdev.dev, "mmci-omap-hs.%d", i);
 		iclk = clk_get(dev, "ick");
-		if (iclk && clk_enable(iclk))
-			iclk = NULL;
+		if (IS_ERR(iclk))
+			goto err1;
+		if (clk_enable(iclk))
+			goto err2;
 
 		fclk = clk_get(dev, "fck");
-		if (fclk && clk_enable(fclk))
-			fclk = NULL;
-
-		if (!iclk || !fclk) {
-			printk(KERN_WARNING
-			       "%s: Unable to enable clocks for MMC%d, "
-			       "cannot reset.\n",  __func__, i);
-			break;
-		}
+		if (IS_ERR(fclk))
+			goto err3;
+		if (clk_enable(fclk))
+			goto err4;
 
 		omap_writel(MMCHS_SYSCONFIG_SWRESET, base + MMCHS_SYSCONFIG);
 		v = omap_readl(base + MMCHS_SYSSTATUS);
@@ -698,15 +695,22 @@
 			 MMCHS_SYSSTATUS_RESETDONE))
 			cpu_relax();
 
-		if (fclk) {
-			clk_disable(fclk);
-			clk_put(fclk);
-		}
-		if (iclk) {
-			clk_disable(iclk);
-			clk_put(iclk);
-		}
+		clk_disable(fclk);
+		clk_put(fclk);
+		clk_disable(iclk);
+		clk_put(iclk);
 	}
+	return;
+
+err4:
+	clk_put(fclk);
+err3:
+	clk_disable(iclk);
+err2:
+	clk_put(iclk);
+err1:
+	printk(KERN_WARNING "%s: Unable to enable clocks for MMC%d, "
+			    "cannot reset.\n",  __func__, i);
 }
 #else
 static inline void omap_hsmmc_reset(void) {}
@@ -951,72 +955,12 @@
 
 /*-------------------------------------------------------------------------*/
 
-/*
- * Inorder to avoid any assumptions from bootloader regarding WDT
- * settings, WDT module is reset during init. This enables the watchdog
- * timer. Hence it is required to disable the watchdog after the WDT reset
- * during init. Otherwise the system would reboot as per the default
- * watchdog timer registers settings.
- */
-#define OMAP_WDT_WPS	(0x34)
-#define OMAP_WDT_SPR	(0x48)
-
-static int omap2_disable_wdt(struct omap_hwmod *oh, void *unused)
-{
-	void __iomem *base;
-	int ret;
-
-	if (!oh) {
-		pr_err("%s: Could not look up wdtimer_hwmod\n", __func__);
-		return -EINVAL;
-	}
-
-	base = omap_hwmod_get_mpu_rt_va(oh);
-	if (!base) {
-		pr_err("%s: Could not get the base address for %s\n",
-				oh->name, __func__);
-		return -EINVAL;
-	}
-
-	/* Enable the clocks before accessing the WDT registers */
-	ret = omap_hwmod_enable(oh);
-	if (ret) {
-		pr_err("%s: Could not enable clocks for %s\n",
-				oh->name, __func__);
-		return ret;
-	}
-
-	/* sequence required to disable watchdog */
-	__raw_writel(0xAAAA, base + OMAP_WDT_SPR);
-	while (__raw_readl(base + OMAP_WDT_WPS) & 0x10)
-		cpu_relax();
-
-	__raw_writel(0x5555, base + OMAP_WDT_SPR);
-	while (__raw_readl(base + OMAP_WDT_WPS) & 0x10)
-		cpu_relax();
-
-	ret = omap_hwmod_idle(oh);
-	if (ret)
-		pr_err("%s: Could not disable clocks for %s\n",
-				oh->name, __func__);
-
-	return ret;
-}
-
-static void __init omap_disable_wdt(void)
-{
-	if (cpu_class_is_omap2())
-		omap_hwmod_for_each_by_class("wd_timer",
-						omap2_disable_wdt, NULL);
-	return;
-}
-
 static int __init omap2_init_devices(void)
 {
-	/* please keep these calls, and their implementations above,
+	/*
+	 * please keep these calls, and their implementations above,
 	 * in alphabetical order so they're easier to sort through.
 	 */
-	omap_disable_wdt();
 	omap_hsmmc_reset();
 	omap_init_audio();
 	omap_init_camera();
@@ -1034,7 +978,7 @@
 arch_initcall(omap2_init_devices);
 
 #if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
-struct omap_device_pm_latency omap_wdt_latency[] = {
+static struct omap_device_pm_latency omap_wdt_latency[] = {
 	[0] = {
 		.deactivate_func = omap_device_idle_hwmods,
 		.activate_func   = omap_device_enable_hwmods,
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
new file mode 100644
index 0000000..d2f15f5
--- /dev/null
+++ b/arch/arm/mach-omap2/dma.c
@@ -0,0 +1,297 @@
+/*
+ * OMAP2+ DMA driver
+ *
+ * Copyright (C) 2003 - 2008 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
+ * Graphics DMA and LCD DMA graphics tranformations
+ * by Imre Deak <imre.deak@nokia.com>
+ * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
+ * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Converted DMA library into platform driver
+ *	- G, Manjunath Kondaiah <manjugk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+#include <plat/dma.h>
+
+#define OMAP2_DMA_STRIDE	0x60
+
+static u32 errata;
+static u8 dma_stride;
+
+static struct omap_dma_dev_attr *d;
+
+static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
+
+static u16 reg_map[] = {
+	[REVISION]		= 0x00,
+	[GCR]			= 0x78,
+	[IRQSTATUS_L0]		= 0x08,
+	[IRQSTATUS_L1]		= 0x0c,
+	[IRQSTATUS_L2]		= 0x10,
+	[IRQSTATUS_L3]		= 0x14,
+	[IRQENABLE_L0]		= 0x18,
+	[IRQENABLE_L1]		= 0x1c,
+	[IRQENABLE_L2]		= 0x20,
+	[IRQENABLE_L3]		= 0x24,
+	[SYSSTATUS]		= 0x28,
+	[OCP_SYSCONFIG]		= 0x2c,
+	[CAPS_0]		= 0x64,
+	[CAPS_2]		= 0x6c,
+	[CAPS_3]		= 0x70,
+	[CAPS_4]		= 0x74,
+
+	/* Common register offsets */
+	[CCR]			= 0x80,
+	[CLNK_CTRL]		= 0x84,
+	[CICR]			= 0x88,
+	[CSR]			= 0x8c,
+	[CSDP]			= 0x90,
+	[CEN]			= 0x94,
+	[CFN]			= 0x98,
+	[CSEI]			= 0xa4,
+	[CSFI]			= 0xa8,
+	[CDEI]			= 0xac,
+	[CDFI]			= 0xb0,
+	[CSAC]			= 0xb4,
+	[CDAC]			= 0xb8,
+
+	/* Channel specific register offsets */
+	[CSSA]			= 0x9c,
+	[CDSA]			= 0xa0,
+	[CCEN]			= 0xbc,
+	[CCFN]			= 0xc0,
+	[COLOR]			= 0xc4,
+
+	/* OMAP4 specific registers */
+	[CDP]			= 0xd0,
+	[CNDP]			= 0xd4,
+	[CCDN]			= 0xd8,
+};
+
+static struct omap_device_pm_latency omap2_dma_latency[] = {
+	{
+		.deactivate_func = omap_device_idle_hwmods,
+		.activate_func	 = omap_device_enable_hwmods,
+		.flags		 = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+	},
+};
+
+static void __iomem *dma_base;
+static inline void dma_write(u32 val, int reg, int lch)
+{
+	u8  stride;
+	u32 offset;
+
+	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
+	offset = reg_map[reg] + (stride * lch);
+	__raw_writel(val, dma_base + offset);
+}
+
+static inline u32 dma_read(int reg, int lch)
+{
+	u8 stride;
+	u32 offset, val;
+
+	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
+	offset = reg_map[reg] + (stride * lch);
+	val = __raw_readl(dma_base + offset);
+	return val;
+}
+
+static inline void omap2_disable_irq_lch(int lch)
+{
+	u32 val;
+
+	val = dma_read(IRQENABLE_L0, lch);
+	val &= ~(1 << lch);
+	dma_write(val, IRQENABLE_L0, lch);
+}
+
+static void omap2_clear_dma(int lch)
+{
+	int i = dma_common_ch_start;
+
+	for (; i <= dma_common_ch_end; i += 1)
+		dma_write(0, i, lch);
+}
+
+static void omap2_show_dma_caps(void)
+{
+	u8 revision = dma_read(REVISION, 0) & 0xff;
+	printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
+				revision >> 4, revision & 0xf);
+	return;
+}
+
+static u32 configure_dma_errata(void)
+{
+
+	/*
+	 * Errata applicable for OMAP2430ES1.0 and all omap2420
+	 *
+	 * I.
+	 * Erratum ID: Not Available
+	 * Inter Frame DMA buffering issue DMA will wrongly
+	 * buffer elements if packing and bursting is enabled. This might
+	 * result in data gets stalled in FIFO at the end of the block.
+	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
+	 * guarantee no data will stay in the DMA FIFO in case inter frame
+	 * buffering occurs
+	 *
+	 * II.
+	 * Erratum ID: Not Available
+	 * DMA may hang when several channels are used in parallel
+	 * In the following configuration, DMA channel hanging can occur:
+	 * a. Channel i, hardware synchronized, is enabled
+	 * b. Another channel (Channel x), software synchronized, is enabled.
+	 * c. Channel i is disabled before end of transfer
+	 * d. Channel i is reenabled.
+	 * e. Steps 1 to 4 are repeated a certain number of times.
+	 * f. A third channel (Channel y), software synchronized, is enabled.
+	 * Channel x and Channel y may hang immediately after step 'f'.
+	 * Workaround:
+	 * For any channel used - make sure NextLCH_ID is set to the value j.
+	 */
+	if (cpu_is_omap2420() || (cpu_is_omap2430() &&
+				(omap_type() == OMAP2430_REV_ES1_0))) {
+
+		SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
+		SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
+	}
+
+	/*
+	 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
+	 * after a transaction error.
+	 * Workaround: SW should explicitely disable the channel.
+	 */
+	if (cpu_class_is_omap2())
+		SET_DMA_ERRATA(DMA_ERRATA_i378);
+
+	/*
+	 * Erratum ID: i541: sDMA FIFO draining does not finish
+	 * If sDMA channel is disabled on the fly, sDMA enters standby even
+	 * through FIFO Drain is still in progress
+	 * Workaround: Put sDMA in NoStandby more before a logical channel is
+	 * disabled, then put it back to SmartStandby right after the channel
+	 * finishes FIFO draining.
+	 */
+	if (cpu_is_omap34xx())
+		SET_DMA_ERRATA(DMA_ERRATA_i541);
+
+	/*
+	 * Erratum ID: i88 : Special programming model needed to disable DMA
+	 * before end of block.
+	 * Workaround: software must ensure that the DMA is configured in No
+	 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
+	 */
+	if (omap_type() == OMAP3430_REV_ES1_0)
+		SET_DMA_ERRATA(DMA_ERRATA_i88);
+
+	/*
+	 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
+	 * read before the DMA controller finished disabling the channel.
+	 */
+	SET_DMA_ERRATA(DMA_ERRATA_3_3);
+
+	/*
+	 * Erratum ID: Not Available
+	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
+	 * after secure sram context save and restore.
+	 * Work around: Hence we need to manually clear those IRQs to avoid
+	 * spurious interrupts. This affects only secure devices.
+	 */
+	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
+		SET_DMA_ERRATA(DMA_ROMCODE_BUG);
+
+	return errata;
+}
+
+/* One time initializations */
+static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
+{
+	struct omap_device			*od;
+	struct omap_system_dma_plat_info	*p;
+	struct resource				*mem;
+	char					*name = "omap_dma_system";
+
+	dma_stride		= OMAP2_DMA_STRIDE;
+	dma_common_ch_start	= CSDP;
+	if (cpu_is_omap3630() || cpu_is_omap4430())
+		dma_common_ch_end = CCDN;
+	else
+		dma_common_ch_end = CCFN;
+
+	p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
+	if (!p) {
+		pr_err("%s: Unable to allocate pdata for %s:%s\n",
+			__func__, name, oh->name);
+		return -ENOMEM;
+	}
+
+	p->dma_attr		= (struct omap_dma_dev_attr *)oh->dev_attr;
+	p->disable_irq_lch	= omap2_disable_irq_lch;
+	p->show_dma_caps	= omap2_show_dma_caps;
+	p->clear_dma		= omap2_clear_dma;
+	p->dma_write		= dma_write;
+	p->dma_read		= dma_read;
+
+	p->clear_lch_regs	= NULL;
+
+	p->errata		= configure_dma_errata();
+
+	od = omap_device_build(name, 0, oh, p, sizeof(*p),
+			omap2_dma_latency, ARRAY_SIZE(omap2_dma_latency), 0);
+	kfree(p);
+	if (IS_ERR(od)) {
+		pr_err("%s: Cant build omap_device for %s:%s.\n",
+			__func__, name, oh->name);
+		return IS_ERR(od);
+	}
+
+	mem = platform_get_resource(&od->pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&od->pdev.dev, "%s: no mem resource\n", __func__);
+		return -EINVAL;
+	}
+	dma_base = ioremap(mem->start, resource_size(mem));
+	if (!dma_base) {
+		dev_err(&od->pdev.dev, "%s: ioremap fail\n", __func__);
+		return -ENOMEM;
+	}
+
+	d = oh->dev_attr;
+	d->chan = kzalloc(sizeof(struct omap_dma_lch) *
+					(d->lch_count), GFP_KERNEL);
+
+	if (!d->chan) {
+		dev_err(&od->pdev.dev, "%s: kzalloc fail\n", __func__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int __init omap2_system_dma_init(void)
+{
+	return omap_hwmod_for_each_by_class("dma",
+			omap2_system_dma_init_dev, NULL);
+}
+arch_initcall(omap2_system_dma_init);
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index ebb888f..f77022b 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -32,9 +32,7 @@
 #include <plat/clock.h>
 
 #include "clock.h"
-#include "prm.h"
-#include "prm-regbits-34xx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
 
 /* CM_AUTOIDLE_PLL*.AUTO_* bit values */
@@ -225,9 +223,33 @@
 }
 
 /**
- * lookup_dco_sddiv -  Set j-type DPLL4 compensation variables
+ * _lookup_dco - Lookup DCO used by j-type DPLL
  * @clk: pointer to a DPLL struct clk
  * @dco: digital control oscillator selector
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ *
+ * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
+ *
+ * XXX This code is not needed for 3430/AM35xx; can it be optimized
+ * out in non-multi-OMAP builds for those chips?
+ */
+static void _lookup_dco(struct clk *clk, u8 *dco, u16 m, u8 n)
+{
+	unsigned long fint, clkinp; /* watch out for overflow */
+
+	clkinp = clk->parent->rate;
+	fint = (clkinp / n) * m;
+
+	if (fint < 1000000000)
+		*dco = 2;
+	else
+		*dco = 4;
+}
+
+/**
+ * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
+ * @clk: pointer to a DPLL struct clk
  * @sd_div: target sigma-delta divider
  * @m: DPLL multiplier to set
  * @n: DPLL divider to set
@@ -237,19 +259,13 @@
  * XXX This code is not needed for 3430/AM35xx; can it be optimized
  * out in non-multi-OMAP builds for those chips?
  */
-static void lookup_dco_sddiv(struct clk *clk, u8 *dco, u8 *sd_div, u16 m,
-			     u8 n)
+static void _lookup_sddiv(struct clk *clk, u8 *sd_div, u16 m, u8 n)
 {
-	unsigned long fint, clkinp, sd; /* watch out for overflow */
+	unsigned long clkinp, sd; /* watch out for overflow */
 	int mod1, mod2;
 
 	clkinp = clk->parent->rate;
-	fint = (clkinp / n) * m;
 
-	if (fint < 1000000000)
-		*dco = 2;
-	else
-		*dco = 4;
 	/*
 	 * target sigma-delta to near 250MHz
 	 * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
@@ -278,6 +294,7 @@
 static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
 {
 	struct dpll_data *dd = clk->dpll_data;
+	u8 dco, sd_div;
 	u32 v;
 
 	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
@@ -300,18 +317,16 @@
 	v |= m << __ffs(dd->mult_mask);
 	v |= (n - 1) << __ffs(dd->div1_mask);
 
-	/*
-	 * XXX This code is not needed for 3430/AM35XX; can it be optimized
-	 * out in non-multi-OMAP builds for those chips?
-	 */
-	if ((dd->flags & DPLL_J_TYPE) && !(dd->flags & DPLL_NO_DCO_SEL)) {
-		u8 dco, sd_div;
-		lookup_dco_sddiv(clk, &dco, &sd_div, m, n);
-		/* XXX This probably will need revision for OMAP4 */
-		v &= ~(OMAP3630_PERIPH_DPLL_DCO_SEL_MASK
-			| OMAP3630_PERIPH_DPLL_SD_DIV_MASK);
-		v |= dco << __ffs(OMAP3630_PERIPH_DPLL_DCO_SEL_MASK);
-		v |= sd_div << __ffs(OMAP3630_PERIPH_DPLL_SD_DIV_MASK);
+	/* Configure dco and sd_div for dplls that have these fields */
+	if (dd->dco_mask) {
+		_lookup_dco(clk, &dco, m, n);
+		v &= ~(dd->dco_mask);
+		v |= dco << __ffs(dd->dco_mask);
+	}
+	if (dd->sddiv_mask) {
+		_lookup_sddiv(clk, &sd_div, m, n);
+		v &= ~(dd->sddiv_mask);
+		v |= sd_div << __ffs(dd->sddiv_mask);
 	}
 
 	__raw_writel(v, dd->mult_div1_reg);
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 6feeeae..911cd2e 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -11,9 +11,16 @@
  * published by the Free Software Foundation.
  */
 
+/*
+ * XXX The function pointers to the PRM/CM functions are incorrect and
+ * should be removed.  No device driver should be changing PRM/CM bits
+ * directly; that's a layering violation -- those bits are the responsibility
+ * of the OMAP PM core code.
+ */
+
 #include <linux/platform_device.h>
-#include "prm.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
+#include "prm2xxx_3xxx.h"
 #ifdef CONFIG_BRIDGE_DVFS
 #include <plat/omap-pm.h>
 #endif
@@ -31,12 +38,12 @@
 	.cpu_set_freq = omap_pm_cpu_set_freq,
 	.cpu_get_freq = omap_pm_cpu_get_freq,
 #endif
-	.dsp_prm_read = prm_read_mod_reg,
-	.dsp_prm_write = prm_write_mod_reg,
-	.dsp_prm_rmw_bits = prm_rmw_mod_reg_bits,
-	.dsp_cm_read = cm_read_mod_reg,
-	.dsp_cm_write = cm_write_mod_reg,
-	.dsp_cm_rmw_bits = cm_rmw_mod_reg_bits,
+	.dsp_prm_read = omap2_prm_read_mod_reg,
+	.dsp_prm_write = omap2_prm_write_mod_reg,
+	.dsp_prm_rmw_bits = omap2_prm_rmw_mod_reg_bits,
+	.dsp_cm_read = omap2_cm_read_mod_reg,
+	.dsp_cm_write = omap2_cm_write_mod_reg,
+	.dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits,
 };
 
 static int __init omap_dsp_init(void)
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
new file mode 100644
index 0000000..413de18
--- /dev/null
+++ b/arch/arm/mach-omap2/gpio.c
@@ -0,0 +1,104 @@
+/*
+ * OMAP2+ specific gpio initialization
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ *	Charulatha V <charu@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
+
+static struct omap_device_pm_latency omap_gpio_latency[] = {
+	[0] = {
+		.deactivate_func = omap_device_idle_hwmods,
+		.activate_func   = omap_device_enable_hwmods,
+		.flags		 = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+	},
+};
+
+static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
+{
+	struct omap_device *od;
+	struct omap_gpio_platform_data *pdata;
+	struct omap_gpio_dev_attr *dev_attr;
+	char *name = "omap_gpio";
+	int id;
+
+	/*
+	 * extract the device id from name field available in the
+	 * hwmod database and use the same for constructing ids for
+	 * gpio devices.
+	 * CAUTION: Make sure the name in the hwmod database does
+	 * not change. If changed, make corresponding change here
+	 * or make use of static variable mechanism to handle this.
+	 */
+	sscanf(oh->name, "gpio%d", &id);
+
+	pdata = kzalloc(sizeof(struct omap_gpio_platform_data), GFP_KERNEL);
+	if (!pdata) {
+		pr_err("gpio%d: Memory allocation failed\n", id);
+		return -ENOMEM;
+	}
+
+	dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
+	pdata->bank_width = dev_attr->bank_width;
+	pdata->dbck_flag = dev_attr->dbck_flag;
+	pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1);
+
+	switch (oh->class->rev) {
+	case 0:
+	case 1:
+		pdata->bank_type = METHOD_GPIO_24XX;
+		break;
+	case 2:
+		pdata->bank_type = METHOD_GPIO_44XX;
+		break;
+	default:
+		WARN(1, "Invalid gpio bank_type\n");
+		kfree(pdata);
+		return -EINVAL;
+	}
+
+	od = omap_device_build(name, id - 1, oh, pdata,
+				sizeof(*pdata),	omap_gpio_latency,
+				ARRAY_SIZE(omap_gpio_latency),
+				false);
+	kfree(pdata);
+
+	if (IS_ERR(od)) {
+		WARN(1, "Cant build omap_device for %s:%s.\n",
+					name, oh->name);
+		return PTR_ERR(od);
+	}
+
+	gpio_bank_count++;
+	return 0;
+}
+
+/*
+ * gpio_init needs to be done before
+ * machine_init functions access gpio APIs.
+ * Hence gpio_init is a postcore_initcall.
+ */
+static int __init omap2_gpio_init(void)
+{
+	return omap_hwmod_for_each_by_class("gpio", omap2_gpio_dev_init,
+						NULL);
+}
+postcore_initcall(omap2_gpio_init);
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 7222096..2bb29c1 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -41,7 +41,7 @@
 		return 0;
 
 	memset(&t, 0, sizeof(t));
-	t.sync_clk = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->sync_clk);
+	t.sync_clk = gpmc_nand_data->gpmc_t->sync_clk;
 	t.cs_on = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->cs_on);
 	t.adv_on = gpmc_round_ns_to_ticks(gpmc_nand_data->gpmc_t->adv_on);
 
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 7bb6922..3a7d25f 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -173,8 +173,17 @@
 	}
 
 	switch (freq) {
+	case 104:
+		min_gpmc_clk_period = 9600; /* 104 MHz */
+		t_ces   = 3;
+		t_avds  = 4;
+		t_avdh  = 2;
+		t_ach   = 3;
+		t_aavdh = 6;
+		t_rdyo  = 9;
+		break;
 	case 83:
-		min_gpmc_clk_period = 12; /* 83 MHz */
+		min_gpmc_clk_period = 12000; /* 83 MHz */
 		t_ces   = 5;
 		t_avds  = 4;
 		t_avdh  = 2;
@@ -183,7 +192,7 @@
 		t_rdyo  = 9;
 		break;
 	case 66:
-		min_gpmc_clk_period = 15; /* 66 MHz */
+		min_gpmc_clk_period = 15000; /* 66 MHz */
 		t_ces   = 6;
 		t_avds  = 5;
 		t_avdh  = 2;
@@ -192,7 +201,7 @@
 		t_rdyo  = 11;
 		break;
 	default:
-		min_gpmc_clk_period = 18; /* 54 MHz */
+		min_gpmc_clk_period = 18500; /* 54 MHz */
 		t_ces   = 7;
 		t_avds  = 7;
 		t_avdh  = 7;
@@ -271,8 +280,8 @@
 		t.wr_cycle  = t.rd_cycle;
 		if (cpu_is_omap34xx()) {
 			t.wr_data_mux_bus = gpmc_ticks_to_ns(fclk_offset +
-					gpmc_ns_to_ticks(min_gpmc_clk_period +
-					t_rdyo));
+					gpmc_ps_to_ticks(min_gpmc_clk_period +
+					t_rdyo * 1000));
 			t.wr_access = t.access;
 		}
 	} else {
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index f46933b..1b7b3e7 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -168,6 +168,16 @@
 	return (time_ns * 1000 + tick_ps - 1) / tick_ps;
 }
 
+unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
+{
+	unsigned long tick_ps;
+
+	/* Calculate in picosecs to yield more exact results */
+	tick_ps = gpmc_get_fclk_period();
+
+	return (time_ps + tick_ps - 1) / tick_ps;
+}
+
 unsigned int gpmc_ticks_to_ns(unsigned int ticks)
 {
 	return ticks * gpmc_get_fclk_period() / 1000;
@@ -235,7 +245,7 @@
 	int div;
 	u32 l;
 
-	l = sync_clk * 1000 + (gpmc_get_fclk_period() - 1);
+	l = sync_clk + (gpmc_get_fclk_period() - 1);
 	div = l / gpmc_get_fclk_period();
 	if (div > 4)
 		return -1;
diff --git a/arch/arm/mach-omap2/include/mach/board-zoom.h b/arch/arm/mach-omap2/include/mach/board-zoom.h
index f93ca39..d20bd9c 100644
--- a/arch/arm/mach-omap2/include/mach/board-zoom.h
+++ b/arch/arm/mach-omap2/include/mach/board-zoom.h
@@ -1,9 +1,12 @@
 /*
  * Defines for zoom boards
  */
+#include <plat/display.h>
+
 #define ZOOM_NAND_CS    0
 
 extern int __init zoom_debugboard_init(void);
 extern void __init zoom_peripherals_init(void);
+extern void __init zoom_display_init(void);
 
 #define ZOOM2_HEADSET_EXTMUTE_GPIO	153
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index d54c4f8..befa321 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -38,41 +38,27 @@
  */
 
 #ifdef MULTI_OMAP2
-		.pushsection .data
-omap_irq_base:	.word	0
-		.popsection
 
-		/* Configure the interrupt base on the first interrupt */
+/*
+ * We use __glue to avoid errors with multiple definitions of
+ * .globl omap_irq_base as it's included from entry-armv.S but not
+ * from entry-common.S.
+ */
+#ifdef __glue
+		.pushsection .data
+		.globl	omap_irq_base
+omap_irq_base:
+		.word	0
+		.popsection
+#endif
+
+		/*
+		 * Configure the interrupt base on the first interrupt.
+		 * See also omap_irq_base_init for setting omap_irq_base.
+		 */
 		.macro  get_irqnr_preamble, base, tmp
-9:
 		ldr	\base, =omap_irq_base	@ irq base address
 		ldr	\base, [\base, #0]	@ irq base value
-		cmp	\base, #0		@ already configured?
-		bne	9997f			@ nothing to do
-
-		mrc	p15, 0, \tmp, c0, c0, 0	@ get processor revision
-		and	\tmp, \tmp, #0x000f0000	@ only check architecture
-		cmp	\tmp, #0x00070000	@ is v6?
-		beq	2400f			@ found v6 so it's omap24xx
-		mrc	p15, 0, \tmp, c0, c0, 0	@ get processor revision
-		and	\tmp, \tmp, #0x000000f0	@ check cortex 8 or 9
-		cmp	\tmp, #0x00000080	@ cortex A-8?
-		beq	3400f			@ found A-8 so it's omap34xx
-		cmp	\tmp, #0x00000090	@ cortex A-9?
-		beq	4400f			@ found A-9 so it's omap44xx
-2400:		ldr	\base, =OMAP2_IRQ_BASE
-		ldr	\tmp, =omap_irq_base
-		str	\base, [\tmp, #0]
-		b	9b
-3400:		ldr	\base, =OMAP3_IRQ_BASE
-		ldr	\tmp, =omap_irq_base
-		str	\base, [\tmp, #0]
-		b	9b
-4400:		ldr	\base, =OMAP4_IRQ_BASE
-		ldr	\tmp, =omap_irq_base
-		str	\base, [\tmp, #0]
-		b	9b
-9997:
 		.endm
 
 		/* Check the pending interrupts. Note that base already set */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index a1939b1..e66687b 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -39,13 +39,11 @@
 #include "io.h"
 
 #include <plat/omap-pm.h>
-#include <plat/powerdomain.h>
-#include "powerdomains.h"
+#include "powerdomain.h"
 
-#include <plat/clockdomain.h>
-#include "clockdomains.h"
-
+#include "clockdomain.h"
 #include <plat/omap_hwmod.h>
+#include <plat/multi.h>
 
 /*
  * The machine specific code may provide the extra mapping besides the
@@ -311,24 +309,81 @@
 	return v;
 }
 
-void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
-				 struct omap_sdrc_params *sdrc_cs1)
+static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
 {
-	u8 skip_setup_idle = 0;
+	return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
+}
 
-	pwrdm_init(powerdomains_omap);
-	clkdm_init(clockdomains_omap, clkdm_autodeps);
-	if (cpu_is_omap242x())
-		omap2420_hwmod_init();
-	else if (cpu_is_omap243x())
-		omap2430_hwmod_init();
+/*
+ * Initialize asm_irq_base for entry-macro.S
+ */
+static inline void omap_irq_base_init(void)
+{
+	extern void __iomem *omap_irq_base;
+
+#ifdef MULTI_OMAP2
+	if (cpu_is_omap24xx())
+		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
 	else if (cpu_is_omap34xx())
-		omap3xxx_hwmod_init();
+		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE);
 	else if (cpu_is_omap44xx())
-		omap44xx_hwmod_init();
+		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
+	else
+		pr_err("Could not initialize omap_irq_base\n");
+#endif
+}
 
-	/* The OPP tables have to be registered before a clk init */
-	omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
+void __init omap2_init_common_infrastructure(void)
+{
+	u8 postsetup_state;
+
+	if (cpu_is_omap242x()) {
+		omap2xxx_powerdomains_init();
+		omap2_clockdomains_init();
+		omap2420_hwmod_init();
+	} else if (cpu_is_omap243x()) {
+		omap2xxx_powerdomains_init();
+		omap2_clockdomains_init();
+		omap2430_hwmod_init();
+	} else if (cpu_is_omap34xx()) {
+		omap3xxx_powerdomains_init();
+		omap2_clockdomains_init();
+		omap3xxx_hwmod_init();
+	} else if (cpu_is_omap44xx()) {
+		omap44xx_powerdomains_init();
+		omap44xx_clockdomains_init();
+		omap44xx_hwmod_init();
+	} else {
+		pr_err("Could not init hwmod data - unknown SoC\n");
+        }
+
+	/* Set the default postsetup state for all hwmods */
+#ifdef CONFIG_PM_RUNTIME
+	postsetup_state = _HWMOD_STATE_IDLE;
+#else
+	postsetup_state = _HWMOD_STATE_ENABLED;
+#endif
+	omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state);
+
+	/*
+	 * Set the default postsetup state for unusual modules (like
+	 * MPU WDT).
+	 *
+	 * The postsetup_state is not actually used until
+	 * omap_hwmod_late_init(), so boards that desire full watchdog
+	 * coverage of kernel initialization can reprogram the
+	 * postsetup_state between the calls to
+	 * omap2_init_common_infra() and omap2_init_common_devices().
+	 *
+	 * XXX ideally we could detect whether the MPU WDT was currently
+	 * enabled here and make this conditional
+	 */
+	postsetup_state = _HWMOD_STATE_DISABLED;
+	omap_hwmod_for_each_by_class("wd_timer",
+				     _set_hwmod_postsetup_state,
+				     &postsetup_state);
+
+	omap_pm_if_early_init();
 
 	if (cpu_is_omap2420())
 		omap2420_clk_init();
@@ -339,17 +394,61 @@
 	else if (cpu_is_omap44xx())
 		omap4xxx_clk_init();
 	else
-		pr_err("Could not init clock framework - unknown CPU\n");
+		pr_err("Could not init clock framework - unknown SoC\n");
+}
 
+void __init omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0,
+				      struct omap_sdrc_params *sdrc_cs1)
+{
 	omap_serial_early_init();
 
-#ifndef CONFIG_PM_RUNTIME
-	skip_setup_idle = 1;
-#endif
-	omap_hwmod_late_init(skip_setup_idle);
+	omap_hwmod_late_init();
+
 	if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
 		omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
 		_omap2_init_reprogram_sdrc();
 	}
 	gpmc_init();
+
+	omap_irq_base_init();
 }
+
+/*
+ * NOTE: Please use ioremap + __raw_read/write where possible instead of these
+ */
+
+u8 omap_readb(u32 pa)
+{
+	return __raw_readb(OMAP2_L4_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_readb);
+
+u16 omap_readw(u32 pa)
+{
+	return __raw_readw(OMAP2_L4_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_readw);
+
+u32 omap_readl(u32 pa)
+{
+	return __raw_readl(OMAP2_L4_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_readl);
+
+void omap_writeb(u8 v, u32 pa)
+{
+	__raw_writeb(v, OMAP2_L4_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_writeb);
+
+void omap_writew(u16 v, u32 pa)
+{
+	__raw_writew(v, OMAP2_L4_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_writew);
+
+void omap_writel(u32 v, u32 pa)
+{
+	__raw_writel(v, OMAP2_L4_IO_ADDRESS(pa));
+}
+EXPORT_SYMBOL(omap_writel);
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 32eeabe..85bf8ca 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -284,7 +284,10 @@
 
 void omap3_intc_prepare_idle(void)
 {
-	/* Disable autoidle as it can stall interrupt controller */
+	/*
+	 * Disable autoidle as it can stall interrupt controller,
+	 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
+	 */
 	intc_bank_write_reg(0, &irq_banks[0], INTC_SYSCONFIG);
 }
 
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 40ddeca..394413d 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -281,7 +281,7 @@
 
 /* FIXME: the following structs should be filled automatically by the user id */
 
-#if defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_ARCH_OMAP2420)
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP2)
 /* DSP */
 static struct omap_mbox2_priv omap2_mbox_dsp_priv = {
 	.tx_fifo = {
@@ -306,7 +306,7 @@
 };
 #endif
 
-#if defined(CONFIG_ARCH_OMAP3430)
+#if defined(CONFIG_ARCH_OMAP3)
 struct omap_mbox *omap3_mboxes[] = { &mbox_dsp_info, NULL };
 #endif
 
@@ -394,15 +394,19 @@
 
 	if (false)
 		;
-#if defined(CONFIG_ARCH_OMAP3430)
-	else if (cpu_is_omap3430()) {
+#if defined(CONFIG_ARCH_OMAP3)
+	else if (cpu_is_omap34xx()) {
 		list = omap3_mboxes;
 
 		list[0]->irq = platform_get_irq_byname(pdev, "dsp");
 	}
 #endif
-#if defined(CONFIG_ARCH_OMAP2420)
-	else if (cpu_is_omap2420()) {
+#if defined(CONFIG_ARCH_OMAP2)
+	else if (cpu_is_omap2430()) {
+		list = omap2_mboxes;
+
+		list[0]->irq = platform_get_irq_byname(pdev, "dsp");
+	} else if (cpu_is_omap2420()) {
 		list = omap2_mboxes;
 
 		list[0]->irq = platform_get_irq_byname(pdev, "dsp");
@@ -432,9 +436,8 @@
 		iounmap(mbox_base);
 		return ret;
 	}
-	return 0;
 
-	return ret;
+	return 0;
 }
 
 static int __devexit omap2_mbox_remove(struct platform_device *pdev)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 074536a..df8d2f2 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -1,9 +1,9 @@
 /*
  * linux/arch/arm/mach-omap2/mux.c
  *
- * OMAP2 and OMAP3 pin multiplexing configurations
+ * OMAP2, OMAP3 and OMAP4 pin multiplexing configurations
  *
- * Copyright (C) 2004 - 2008 Texas Instruments Inc.
+ * Copyright (C) 2004 - 2010 Texas Instruments Inc.
  * Copyright (C) 2003 - 2008 Nokia Corporation
  *
  * Written by Tony Lindgren
@@ -35,65 +35,79 @@
 
 #include <asm/system.h>
 
+#include <plat/omap_hwmod.h>
+
 #include "control.h"
 #include "mux.h"
 
 #define OMAP_MUX_BASE_OFFSET		0x30	/* Offset from CTRL_BASE */
 #define OMAP_MUX_BASE_SZ		0x5ca
-#define MUXABLE_GPIO_MODE3		BIT(0)
 
 struct omap_mux_entry {
 	struct omap_mux		mux;
 	struct list_head	node;
 };
 
-static unsigned long mux_phys;
-static void __iomem *mux_base;
-static u8 omap_mux_flags;
+static LIST_HEAD(mux_partitions);
+static DEFINE_MUTEX(muxmode_mutex);
 
-u16 omap_mux_read(u16 reg)
+struct omap_mux_partition *omap_mux_get(const char *name)
 {
-	if (cpu_is_omap24xx())
-		return __raw_readb(mux_base + reg);
-	else
-		return __raw_readw(mux_base + reg);
+	struct omap_mux_partition *partition;
+
+	list_for_each_entry(partition, &mux_partitions, node) {
+		if (!strcmp(name, partition->name))
+			return partition;
+	}
+
+	return NULL;
 }
 
-void omap_mux_write(u16 val, u16 reg)
+u16 omap_mux_read(struct omap_mux_partition *partition, u16 reg)
 {
-	if (cpu_is_omap24xx())
-		__raw_writeb(val, mux_base + reg);
+	if (partition->flags & OMAP_MUX_REG_8BIT)
+		return __raw_readb(partition->base + reg);
 	else
-		__raw_writew(val, mux_base + reg);
+		return __raw_readw(partition->base + reg);
 }
 
-void omap_mux_write_array(struct omap_board_mux *board_mux)
+void omap_mux_write(struct omap_mux_partition *partition, u16 val,
+			   u16 reg)
 {
-	while (board_mux->reg_offset !=  OMAP_MUX_TERMINATOR) {
-		omap_mux_write(board_mux->value, board_mux->reg_offset);
+	if (partition->flags & OMAP_MUX_REG_8BIT)
+		__raw_writeb(val, partition->base + reg);
+	else
+		__raw_writew(val, partition->base + reg);
+}
+
+void omap_mux_write_array(struct omap_mux_partition *partition,
+				 struct omap_board_mux *board_mux)
+{
+	while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
+		omap_mux_write(partition, board_mux->value,
+			       board_mux->reg_offset);
 		board_mux++;
 	}
 }
 
-static LIST_HEAD(muxmodes);
-static DEFINE_MUTEX(muxmode_mutex);
-
 #ifdef CONFIG_OMAP_MUX
 
 static char *omap_mux_options;
 
-int __init omap_mux_init_gpio(int gpio, int val)
+static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
+				      int gpio, int val)
 {
 	struct omap_mux_entry *e;
 	struct omap_mux *gpio_mux = NULL;
 	u16 old_mode;
 	u16 mux_mode;
 	int found = 0;
+	struct list_head *muxmodes = &partition->muxmodes;
 
 	if (!gpio)
 		return -EINVAL;
 
-	list_for_each_entry(e, &muxmodes, node) {
+	list_for_each_entry(e, muxmodes, node) {
 		struct omap_mux *m = &e->mux;
 		if (gpio == m->gpio) {
 			gpio_mux = m;
@@ -102,34 +116,52 @@
 	}
 
 	if (found == 0) {
-		printk(KERN_ERR "mux: Could not set gpio%i\n", gpio);
+		pr_err("%s: Could not set gpio%i\n", __func__, gpio);
 		return -ENODEV;
 	}
 
 	if (found > 1) {
-		printk(KERN_INFO "mux: Multiple gpio paths (%d) for gpio%i\n",
-				found, gpio);
+		pr_info("%s: Multiple gpio paths (%d) for gpio%i\n", __func__,
+			found, gpio);
 		return -EINVAL;
 	}
 
-	old_mode = omap_mux_read(gpio_mux->reg_offset);
+	old_mode = omap_mux_read(partition, gpio_mux->reg_offset);
 	mux_mode = val & ~(OMAP_MUX_NR_MODES - 1);
-	if (omap_mux_flags & MUXABLE_GPIO_MODE3)
+	if (partition->flags & OMAP_MUX_GPIO_IN_MODE3)
 		mux_mode |= OMAP_MUX_MODE3;
 	else
 		mux_mode |= OMAP_MUX_MODE4;
-	printk(KERN_DEBUG "mux: Setting signal %s.gpio%i 0x%04x -> 0x%04x\n",
-			gpio_mux->muxnames[0], gpio, old_mode, mux_mode);
-	omap_mux_write(mux_mode, gpio_mux->reg_offset);
+	pr_debug("%s: Setting signal %s.gpio%i 0x%04x -> 0x%04x\n", __func__,
+		 gpio_mux->muxnames[0], gpio, old_mode, mux_mode);
+	omap_mux_write(partition, mux_mode, gpio_mux->reg_offset);
 
 	return 0;
 }
 
-int __init omap_mux_init_signal(const char *muxname, int val)
+int __init omap_mux_init_gpio(int gpio, int val)
 {
+	struct omap_mux_partition *partition;
+	int ret;
+
+	list_for_each_entry(partition, &mux_partitions, node) {
+		ret = _omap_mux_init_gpio(partition, gpio, val);
+		if (!ret)
+			return ret;
+	}
+
+	return -ENODEV;
+}
+
+static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
+					const char *muxname,
+					struct omap_mux **found_mux)
+{
+	struct omap_mux *mux = NULL;
 	struct omap_mux_entry *e;
 	const char *mode_name;
-	int found = 0, mode0_len = 0;
+	int found = 0, found_mode, mode0_len = 0;
+	struct list_head *muxmodes = &partition->muxmodes;
 
 	mode_name = strchr(muxname, '.');
 	if (mode_name) {
@@ -139,51 +171,200 @@
 		mode_name = muxname;
 	}
 
-	list_for_each_entry(e, &muxmodes, node) {
-		struct omap_mux *m = &e->mux;
-		char *m0_entry = m->muxnames[0];
+	list_for_each_entry(e, muxmodes, node) {
+		char *m0_entry;
 		int i;
 
+		mux = &e->mux;
+		m0_entry = mux->muxnames[0];
+
 		/* First check for full name in mode0.muxmode format */
 		if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
 			continue;
 
 		/* Then check for muxmode only */
 		for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
-			char *mode_cur = m->muxnames[i];
+			char *mode_cur = mux->muxnames[i];
 
 			if (!mode_cur)
 				continue;
 
 			if (!strcmp(mode_name, mode_cur)) {
-				u16 old_mode;
-				u16 mux_mode;
-
-				old_mode = omap_mux_read(m->reg_offset);
-				mux_mode = val | i;
-				printk(KERN_DEBUG "mux: Setting signal "
-					"%s.%s 0x%04x -> 0x%04x\n",
-					m0_entry, muxname, old_mode, mux_mode);
-				omap_mux_write(mux_mode, m->reg_offset);
+				*found_mux = mux;
 				found++;
+				found_mode = i;
 			}
 		}
 	}
 
-	if (found == 1)
-		return 0;
+	if (found == 1) {
+		return found_mode;
+	}
 
 	if (found > 1) {
-		printk(KERN_ERR "mux: Multiple signal paths (%i) for %s\n",
-				found, muxname);
+		pr_err("%s: Multiple signal paths (%i) for %s\n", __func__,
+		       found, muxname);
 		return -EINVAL;
 	}
 
-	printk(KERN_ERR "mux: Could not set signal %s\n", muxname);
+	pr_err("%s: Could not find signal %s\n", __func__, muxname);
 
 	return -ENODEV;
 }
 
+static int __init
+omap_mux_get_by_name(const char *muxname,
+			struct omap_mux_partition **found_partition,
+			struct omap_mux **found_mux)
+{
+	struct omap_mux_partition *partition;
+
+	list_for_each_entry(partition, &mux_partitions, node) {
+		struct omap_mux *mux = NULL;
+		int mux_mode = _omap_mux_get_by_name(partition, muxname, &mux);
+		if (mux_mode < 0)
+			continue;
+
+		*found_partition = partition;
+		*found_mux = mux;
+
+		return mux_mode;
+	}
+
+	return -ENODEV;
+}
+
+int __init omap_mux_init_signal(const char *muxname, int val)
+{
+	struct omap_mux_partition *partition = NULL;
+	struct omap_mux *mux = NULL;
+	u16 old_mode;
+	int mux_mode;
+
+	mux_mode = omap_mux_get_by_name(muxname, &partition, &mux);
+	if (mux_mode < 0)
+		return mux_mode;
+
+	old_mode = omap_mux_read(partition, mux->reg_offset);
+	mux_mode |= val;
+	pr_debug("%s: Setting signal %s 0x%04x -> 0x%04x\n",
+			 __func__, muxname, old_mode, mux_mode);
+	omap_mux_write(partition, mux_mode, mux->reg_offset);
+
+	return 0;
+}
+
+struct omap_hwmod_mux_info * __init
+omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
+{
+	struct omap_hwmod_mux_info *hmux;
+	int i;
+
+	if (!bpads || nr_pads < 1)
+		return NULL;
+
+	hmux = kzalloc(sizeof(struct omap_hwmod_mux_info), GFP_KERNEL);
+	if (!hmux)
+		goto err1;
+
+	hmux->nr_pads = nr_pads;
+
+	hmux->pads = kzalloc(sizeof(struct omap_device_pad) *
+				nr_pads, GFP_KERNEL);
+	if (!hmux->pads)
+		goto err2;
+
+	for (i = 0; i < hmux->nr_pads; i++) {
+		struct omap_mux_partition *partition;
+		struct omap_device_pad *bpad = &bpads[i], *pad = &hmux->pads[i];
+		struct omap_mux *mux;
+		int mux_mode;
+
+		mux_mode = omap_mux_get_by_name(bpad->name, &partition, &mux);
+		if (mux_mode < 0)
+			goto err3;
+		if (!pad->partition)
+			pad->partition = partition;
+		if (!pad->mux)
+			pad->mux = mux;
+
+		pad->name = kzalloc(strlen(bpad->name) + 1, GFP_KERNEL);
+		if (!pad->name) {
+			int j;
+
+			for (j = i - 1; j >= 0; j--)
+				kfree(hmux->pads[j].name);
+			goto err3;
+		}
+		strcpy(pad->name, bpad->name);
+
+		pad->flags = bpad->flags;
+		pad->enable = bpad->enable;
+		pad->idle = bpad->idle;
+		pad->off = bpad->off;
+		pr_debug("%s: Initialized %s\n", __func__, pad->name);
+	}
+
+	return hmux;
+
+err3:
+	kfree(hmux->pads);
+err2:
+	kfree(hmux);
+err1:
+	pr_err("%s: Could not allocate device mux entry\n", __func__);
+
+	return NULL;
+}
+
+/* Assumes the calling function takes care of locking */
+void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
+{
+	int i;
+
+	for (i = 0; i < hmux->nr_pads; i++) {
+		struct omap_device_pad *pad = &hmux->pads[i];
+		int flags, val = -EINVAL;
+
+		flags = pad->flags;
+
+		switch (state) {
+		case _HWMOD_STATE_ENABLED:
+			if (flags & OMAP_DEVICE_PAD_ENABLED)
+				break;
+			flags |= OMAP_DEVICE_PAD_ENABLED;
+			val = pad->enable;
+			pr_debug("%s: Enabling %s %x\n", __func__,
+					pad->name, val);
+			break;
+		case _HWMOD_STATE_IDLE:
+			if (!(flags & OMAP_DEVICE_PAD_REMUX))
+				break;
+			flags &= ~OMAP_DEVICE_PAD_ENABLED;
+			val = pad->idle;
+			pr_debug("%s: Idling %s %x\n", __func__,
+					pad->name, val);
+			break;
+		case _HWMOD_STATE_DISABLED:
+		default:
+			/* Use safe mode unless OMAP_DEVICE_PAD_REMUX */
+			if (flags & OMAP_DEVICE_PAD_REMUX)
+				val = pad->off;
+			else
+				val = OMAP_MUX_MODE7;
+			flags &= ~OMAP_DEVICE_PAD_ENABLED;
+			pr_debug("%s: Disabling %s %x\n", __func__,
+					pad->name, val);
+		};
+
+		if (val >= 0) {
+			omap_mux_write(pad->partition, val,
+					pad->mux->reg_offset);
+			pad->flags = flags;
+		}
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 
 #define OMAP_MUX_MAX_NR_FLAGS	10
@@ -248,13 +429,15 @@
 	} while (i-- > 0);
 }
 
-#define OMAP_MUX_DEFNAME_LEN	16
+#define OMAP_MUX_DEFNAME_LEN	32
 
 static int omap_mux_dbg_board_show(struct seq_file *s, void *unused)
 {
+	struct omap_mux_partition *partition = s->private;
 	struct omap_mux_entry *e;
+	u8 omap_gen = omap_rev() >> 28;
 
-	list_for_each_entry(e, &muxmodes, node) {
+	list_for_each_entry(e, &partition->muxmodes, node) {
 		struct omap_mux *m = &e->mux;
 		char m0_def[OMAP_MUX_DEFNAME_LEN];
 		char *m0_name = m->muxnames[0];
@@ -272,11 +455,16 @@
 			}
 			m0_def[i] = toupper(m0_name[i]);
 		}
-		val = omap_mux_read(m->reg_offset);
+		val = omap_mux_read(partition, m->reg_offset);
 		mode = val & OMAP_MUX_MODE7;
+		if (mode != 0)
+			seq_printf(s, "/* %s */\n", m->muxnames[mode]);
 
-		seq_printf(s, "OMAP%i_MUX(%s, ",
-					cpu_is_omap34xx() ? 3 : 0, m0_def);
+		/*
+		 * XXX: Might be revisited to support differences accross
+		 * same OMAP generation.
+		 */
+		seq_printf(s, "OMAP%d_MUX(%s, ", omap_gen, m0_def);
 		omap_mux_decode(s, val);
 		seq_printf(s, "),\n");
 	}
@@ -286,7 +474,7 @@
 
 static int omap_mux_dbg_board_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, omap_mux_dbg_board_show, &inode->i_private);
+	return single_open(file, omap_mux_dbg_board_show, inode->i_private);
 }
 
 static const struct file_operations omap_mux_dbg_board_fops = {
@@ -296,19 +484,43 @@
 	.release	= single_release,
 };
 
+static struct omap_mux_partition *omap_mux_get_partition(struct omap_mux *mux)
+{
+	struct omap_mux_partition *partition;
+
+	list_for_each_entry(partition, &mux_partitions, node) {
+		struct list_head *muxmodes = &partition->muxmodes;
+		struct omap_mux_entry *e;
+
+		list_for_each_entry(e, muxmodes, node) {
+			struct omap_mux *m = &e->mux;
+
+			if (m == mux)
+				return partition;
+		}
+	}
+
+	return NULL;
+}
+
 static int omap_mux_dbg_signal_show(struct seq_file *s, void *unused)
 {
 	struct omap_mux *m = s->private;
+	struct omap_mux_partition *partition;
 	const char *none = "NA";
 	u16 val;
 	int mode;
 
-	val = omap_mux_read(m->reg_offset);
+	partition = omap_mux_get_partition(m);
+	if (!partition)
+		return 0;
+
+	val = omap_mux_read(partition, m->reg_offset);
 	mode = val & OMAP_MUX_MODE7;
 
-	seq_printf(s, "name: %s.%s (0x%08lx/0x%03x = 0x%04x), b %s, t %s\n",
+	seq_printf(s, "name: %s.%s (0x%08x/0x%03x = 0x%04x), b %s, t %s\n",
 			m->muxnames[0], m->muxnames[mode],
-			mux_phys + m->reg_offset, m->reg_offset, val,
+			partition->phys + m->reg_offset, m->reg_offset, val,
 			m->balls[0] ? m->balls[0] : none,
 			m->balls[1] ? m->balls[1] : none);
 	seq_printf(s, "mode: ");
@@ -330,14 +542,15 @@
 #define OMAP_MUX_MAX_ARG_CHAR  7
 
 static ssize_t omap_mux_dbg_signal_write(struct file *file,
-						const char __user *user_buf,
-						size_t count, loff_t *ppos)
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
 {
 	char buf[OMAP_MUX_MAX_ARG_CHAR];
 	struct seq_file *seqf;
 	struct omap_mux *m;
 	unsigned long val;
 	int buf_size, ret;
+	struct omap_mux_partition *partition;
 
 	if (count > OMAP_MUX_MAX_ARG_CHAR)
 		return -EINVAL;
@@ -358,7 +571,11 @@
 	seqf = file->private_data;
 	m = seqf->private;
 
-	omap_mux_write((u16)val, m->reg_offset);
+	partition = omap_mux_get_partition(m);
+	if (!partition)
+		return -ENODEV;
+
+	omap_mux_write(partition, (u16)val, m->reg_offset);
 	*ppos += count;
 
 	return count;
@@ -379,22 +596,38 @@
 
 static struct dentry *mux_dbg_dir;
 
-static void __init omap_mux_dbg_init(void)
+static void __init omap_mux_dbg_create_entry(
+				struct omap_mux_partition *partition,
+				struct dentry *mux_dbg_dir)
 {
 	struct omap_mux_entry *e;
 
+	list_for_each_entry(e, &partition->muxmodes, node) {
+		struct omap_mux *m = &e->mux;
+
+		(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+					  m, &omap_mux_dbg_signal_fops);
+	}
+}
+
+static void __init omap_mux_dbg_init(void)
+{
+	struct omap_mux_partition *partition;
+	static struct dentry *mux_dbg_board_dir;
+
 	mux_dbg_dir = debugfs_create_dir("omap_mux", NULL);
 	if (!mux_dbg_dir)
 		return;
 
-	(void)debugfs_create_file("board", S_IRUGO, mux_dbg_dir,
-					NULL, &omap_mux_dbg_board_fops);
+	mux_dbg_board_dir = debugfs_create_dir("board", mux_dbg_dir);
+	if (!mux_dbg_board_dir)
+		return;
 
-	list_for_each_entry(e, &muxmodes, node) {
-		struct omap_mux *m = &e->mux;
-
-		(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
-					m, &omap_mux_dbg_signal_fops);
+	list_for_each_entry(partition, &mux_partitions, node) {
+		omap_mux_dbg_create_entry(partition, mux_dbg_dir);
+		(void)debugfs_create_file(partition->name, S_IRUGO,
+					  mux_dbg_board_dir, partition,
+					  &omap_mux_dbg_board_fops);
 	}
 }
 
@@ -421,23 +654,25 @@
 /* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
 static int __init omap_mux_late_init(void)
 {
-	struct omap_mux_entry *e, *tmp;
+	struct omap_mux_partition *partition;
 
-	list_for_each_entry_safe(e, tmp, &muxmodes, node) {
-		struct omap_mux *m = &e->mux;
-		u16 mode = omap_mux_read(m->reg_offset);
+	list_for_each_entry(partition, &mux_partitions, node) {
+		struct omap_mux_entry *e, *tmp;
+		list_for_each_entry_safe(e, tmp, &partition->muxmodes, node) {
+			struct omap_mux *m = &e->mux;
+			u16 mode = omap_mux_read(partition, m->reg_offset);
 
-		if (OMAP_MODE_GPIO(mode))
-			continue;
+			if (OMAP_MODE_GPIO(mode))
+				continue;
 
 #ifndef CONFIG_DEBUG_FS
-		mutex_lock(&muxmode_mutex);
-		list_del(&e->node);
-		mutex_unlock(&muxmode_mutex);
-		omap_mux_free_names(m);
-		kfree(m);
+			mutex_lock(&muxmode_mutex);
+			list_del(&e->node);
+			mutex_unlock(&muxmode_mutex);
+			omap_mux_free_names(m);
+			kfree(m);
 #endif
-
+		}
 	}
 
 	omap_mux_dbg_init();
@@ -462,8 +697,8 @@
 			s++;
 		}
 		if (!found)
-			printk(KERN_ERR "mux: Unknown entry offset 0x%x\n",
-					p->reg_offset);
+			pr_err("%s: Unknown entry offset 0x%x\n", __func__,
+			       p->reg_offset);
 		p++;
 	}
 }
@@ -487,8 +722,8 @@
 			s++;
 		}
 		if (!found)
-			printk(KERN_ERR "mux: Unknown ball offset 0x%x\n",
-					b->reg_offset);
+			pr_err("%s: Unknown ball offset 0x%x\n", __func__,
+			       b->reg_offset);
 		b++;
 	}
 }
@@ -554,7 +789,7 @@
 }
 
 static int __init omap_mux_copy_names(struct omap_mux *src,
-					struct omap_mux *dst)
+				      struct omap_mux *dst)
 {
 	int i;
 
@@ -592,51 +827,63 @@
 
 #endif	/* CONFIG_OMAP_MUX */
 
-static u16 omap_mux_get_by_gpio(int gpio)
+static struct omap_mux *omap_mux_get_by_gpio(
+				struct omap_mux_partition *partition,
+				int gpio)
 {
 	struct omap_mux_entry *e;
-	u16 offset = OMAP_MUX_TERMINATOR;
+	struct omap_mux *ret = NULL;
 
-	list_for_each_entry(e, &muxmodes, node) {
+	list_for_each_entry(e, &partition->muxmodes, node) {
 		struct omap_mux *m = &e->mux;
 		if (m->gpio == gpio) {
-			offset = m->reg_offset;
+			ret = m;
 			break;
 		}
 	}
 
-	return offset;
+	return ret;
 }
 
 /* Needed for dynamic muxing of GPIO pins for off-idle */
 u16 omap_mux_get_gpio(int gpio)
 {
-	u16 offset;
+	struct omap_mux_partition *partition;
+	struct omap_mux *m;
 
-	offset = omap_mux_get_by_gpio(gpio);
-	if (offset == OMAP_MUX_TERMINATOR) {
-		printk(KERN_ERR "mux: Could not get gpio%i\n", gpio);
-		return offset;
+	list_for_each_entry(partition, &mux_partitions, node) {
+		m = omap_mux_get_by_gpio(partition, gpio);
+		if (m)
+			return omap_mux_read(partition, m->reg_offset);
 	}
 
-	return omap_mux_read(offset);
+	if (!m || m->reg_offset == OMAP_MUX_TERMINATOR)
+		pr_err("%s: Could not get gpio%i\n", __func__, gpio);
+
+	return OMAP_MUX_TERMINATOR;
 }
 
 /* Needed for dynamic muxing of GPIO pins for off-idle */
 void omap_mux_set_gpio(u16 val, int gpio)
 {
-	u16 offset;
+	struct omap_mux_partition *partition;
+	struct omap_mux *m = NULL;
 
-	offset = omap_mux_get_by_gpio(gpio);
-	if (offset == OMAP_MUX_TERMINATOR) {
-		printk(KERN_ERR "mux: Could not set gpio%i\n", gpio);
-		return;
+	list_for_each_entry(partition, &mux_partitions, node) {
+		m = omap_mux_get_by_gpio(partition, gpio);
+		if (m) {
+			omap_mux_write(partition, val, m->reg_offset);
+			return;
+		}
 	}
 
-	omap_mux_write(val, offset);
+	if (!m || m->reg_offset == OMAP_MUX_TERMINATOR)
+		pr_err("%s: Could not set gpio%i\n", __func__, gpio);
 }
 
-static struct omap_mux * __init omap_mux_list_add(struct omap_mux *src)
+static struct omap_mux * __init omap_mux_list_add(
+					struct omap_mux_partition *partition,
+					struct omap_mux *src)
 {
 	struct omap_mux_entry *entry;
 	struct omap_mux *m;
@@ -646,7 +893,7 @@
 		return NULL;
 
 	m = &entry->mux;
-	memcpy(m, src, sizeof(struct omap_mux_entry));
+	entry->mux = *src;
 
 #ifdef CONFIG_OMAP_MUX
 	if (omap_mux_copy_names(src, m)) {
@@ -656,7 +903,7 @@
 #endif
 
 	mutex_lock(&muxmode_mutex);
-	list_add_tail(&entry->node, &muxmodes);
+	list_add_tail(&entry->node, &partition->muxmodes);
 	mutex_unlock(&muxmode_mutex);
 
 	return m;
@@ -667,7 +914,8 @@
  * the GPIO to mux offset mapping that is needed for dynamic muxing
  * of GPIO pins for off-idle.
  */
-static void __init omap_mux_init_list(struct omap_mux *superset)
+static void __init omap_mux_init_list(struct omap_mux_partition *partition,
+				      struct omap_mux *superset)
 {
 	while (superset->reg_offset !=  OMAP_MUX_TERMINATOR) {
 		struct omap_mux *entry;
@@ -679,15 +927,16 @@
 		}
 #else
 		/* Skip pins that are not muxed as GPIO by bootloader */
-		if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
+		if (!OMAP_MODE_GPIO(omap_mux_read(partition,
+				    superset->reg_offset))) {
 			superset++;
 			continue;
 		}
 #endif
 
-		entry = omap_mux_list_add(superset);
+		entry = omap_mux_list_add(partition, superset);
 		if (!entry) {
-			printk(KERN_ERR "mux: Could not add entry\n");
+			pr_err("%s: Could not add entry\n", __func__);
 			return;
 		}
 		superset++;
@@ -706,10 +955,11 @@
 		omap_mux_package_init_balls(package_balls, superset);
 }
 
-static void omap_mux_init_signals(struct omap_board_mux *board_mux)
+static void omap_mux_init_signals(struct omap_mux_partition *partition,
+				  struct omap_board_mux *board_mux)
 {
 	omap_mux_set_cmdline_signals();
-	omap_mux_write_array(board_mux);
+	omap_mux_write_array(partition, board_mux);
 }
 
 #else
@@ -720,34 +970,49 @@
 {
 }
 
-static void omap_mux_init_signals(struct omap_board_mux *board_mux)
+static void omap_mux_init_signals(struct omap_mux_partition *partition,
+				  struct omap_board_mux *board_mux)
 {
 }
 
 #endif
 
-int __init omap_mux_init(u32 mux_pbase, u32 mux_size,
-				struct omap_mux *superset,
-				struct omap_mux *package_subset,
-				struct omap_board_mux *board_mux,
-				struct omap_ball *package_balls)
-{
-	if (mux_base)
-		return -EBUSY;
+static u32 mux_partitions_cnt;
 
-	mux_phys = mux_pbase;
-	mux_base = ioremap(mux_pbase, mux_size);
-	if (!mux_base) {
-		printk(KERN_ERR "mux: Could not ioremap\n");
+int __init omap_mux_init(const char *name, u32 flags,
+			 u32 mux_pbase, u32 mux_size,
+			 struct omap_mux *superset,
+			 struct omap_mux *package_subset,
+			 struct omap_board_mux *board_mux,
+			 struct omap_ball *package_balls)
+{
+	struct omap_mux_partition *partition;
+
+	partition = kzalloc(sizeof(struct omap_mux_partition), GFP_KERNEL);
+	if (!partition)
+		return -ENOMEM;
+
+	partition->name = name;
+	partition->flags = flags;
+	partition->size = mux_size;
+	partition->phys = mux_pbase;
+	partition->base = ioremap(mux_pbase, mux_size);
+	if (!partition->base) {
+		pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
+			__func__, partition->phys);
 		return -ENODEV;
 	}
 
-	if (cpu_is_omap24xx())
-		omap_mux_flags = MUXABLE_GPIO_MODE3;
+	INIT_LIST_HEAD(&partition->muxmodes);
+
+	list_add_tail(&partition->node, &mux_partitions);
+	mux_partitions_cnt++;
+	pr_info("%s: Add partition: #%d: %s, flags: %x\n", __func__,
+		mux_partitions_cnt, partition->name, partition->flags);
 
 	omap_mux_init_package(superset, package_subset, package_balls);
-	omap_mux_init_list(superset);
-	omap_mux_init_signals(board_mux);
+	omap_mux_init_list(partition, superset);
+	omap_mux_init_signals(partition, board_mux);
 
 	return 0;
 }
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 350c04f..a4ab17a 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia
- * Copyright (C) 2009 Texas Instruments
+ * Copyright (C) 2009-2010 Texas Instruments
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -10,6 +10,7 @@
 #include "mux2420.h"
 #include "mux2430.h"
 #include "mux34xx.h"
+#include "mux44xx.h"
 
 #define OMAP_MUX_TERMINATOR	0xffff
 
@@ -37,6 +38,9 @@
 #define OMAP_OFF_PULL_UP		(1 << 13)
 #define OMAP_WAKEUP_EN			(1 << 14)
 
+/* 44xx specific mux bit defines */
+#define OMAP_WAKEUP_EVENT		(1 << 15)
+
 /* Active pin states */
 #define OMAP_PIN_OUTPUT			0
 #define OMAP_PIN_INPUT			OMAP_INPUT_EN
@@ -56,8 +60,10 @@
 
 #define OMAP_MODE_GPIO(x)	(((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
 
-/* Flags for omap_mux_init */
+/* Flags for omapX_mux_init */
 #define OMAP_PACKAGE_MASK		0xffff
+#define OMAP_PACKAGE_CBS		8		/* 547-pin 0.40 0.40 */
+#define OMAP_PACKAGE_CBL		7		/* 547-pin 0.40 0.40 */
 #define OMAP_PACKAGE_CBP		6		/* 515-pin 0.40 0.50 */
 #define OMAP_PACKAGE_CUS		5		/* 423-pin 0.65 */
 #define OMAP_PACKAGE_CBB		4		/* 515-pin 0.40 0.50 */
@@ -66,14 +72,61 @@
 #define OMAP_PACKAGE_ZAF		1		/* 2420 447-pin SIP */
 
 
-#define OMAP_MUX_NR_MODES	8			/* Available modes */
-#define OMAP_MUX_NR_SIDES	2			/* Bottom & top */
+#define OMAP_MUX_NR_MODES		8		/* Available modes */
+#define OMAP_MUX_NR_SIDES		2		/* Bottom & top */
+
+/*
+ * omap_mux_init flags definition:
+ *
+ * OMAP_MUX_REG_8BIT: Ensure that access to padconf is done in 8 bits.
+ * The default value is 16 bits.
+ * OMAP_MUX_GPIO_IN_MODE3: The GPIO is selected in mode3.
+ * The default is mode4.
+ */
+#define OMAP_MUX_REG_8BIT		(1 << 0)
+#define OMAP_MUX_GPIO_IN_MODE3		(1 << 1)
+
+/**
+ * struct omap_board_data - board specific device data
+ * @id: instance id
+ * @flags: additional flags for platform init code
+ * @pads: array of device specific pads
+ * @pads_cnt: ARRAY_SIZE() of pads
+ */
+struct omap_board_data {
+	int			id;
+	u32			flags;
+	struct omap_device_pad	*pads;
+	int			pads_cnt;
+};
+
+/**
+ * struct mux_partition - contain partition related information
+ * @name: name of the current partition
+ * @flags: flags specific to this partition
+ * @phys: physical address
+ * @size: partition size
+ * @base: virtual address after ioremap
+ * @muxmodes: list of nodes that belong to a partition
+ * @node: list node for the partitions linked list
+ */
+struct omap_mux_partition {
+	const char		*name;
+	u32			flags;
+	u32			phys;
+	u32			size;
+	void __iomem		*base;
+	struct list_head	muxmodes;
+	struct list_head	node;
+};
 
 /**
  * struct omap_mux - data for omap mux register offset and it's value
  * @reg_offset:	mux register offset from the mux base
  * @gpio:	GPIO number
  * @muxnames:	available signal modes for a ball
+ * @balls:	available balls on the package
+ * @partition:	mux partition
  */
 struct omap_mux {
 	u16	reg_offset;
@@ -106,6 +159,34 @@
 	u16	value;
 };
 
+#define OMAP_DEVICE_PAD_ENABLED		BIT(7)	/* Not needed for board-*.c */
+#define OMAP_DEVICE_PAD_REMUX		BIT(1)	/* Dynamically remux a pad,
+						   needs enable, idle and off
+						   values */
+#define OMAP_DEVICE_PAD_WAKEUP		BIT(0)	/* Pad is wake-up capable */
+
+/**
+ * struct omap_device_pad - device specific pad configuration
+ * @name:		signal name
+ * @flags:		pad specific runtime flags
+ * @enable:		runtime value for a pad
+ * @idle:		idle value for a pad
+ * @off:		off value for a pad, defaults to safe mode
+ * @partition:		mux partition
+ * @mux:		mux register
+ */
+struct omap_device_pad {
+	char				*name;
+	u8				flags;
+	u16				enable;
+	u16				idle;
+	u16				off;
+	struct omap_mux_partition	*partition;
+	struct omap_mux			*mux;
+};
+
+struct omap_hwmod_mux_info;
+
 #if defined(CONFIG_OMAP_MUX)
 
 /**
@@ -122,6 +203,23 @@
  */
 int omap_mux_init_signal(const char *muxname, int val);
 
+/**
+ * omap_hwmod_mux_init - initialize hwmod specific mux data
+ * @bpads:		Board specific device signal names
+ * @nr_pads:		Number of signal names for the device
+ */
+extern struct omap_hwmod_mux_info *
+omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads);
+
+/**
+ * omap_hwmod_mux - omap hwmod specific pin muxing
+ * @hmux:		Pads for a hwmod
+ * @state:		Desired _HWMOD_STATE
+ *
+ * Called only from omap_hwmod.c, do not use.
+ */
+void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
+
 #else
 
 static inline int omap_mux_init_gpio(int gpio, int val)
@@ -133,6 +231,18 @@
 	return 0;
 }
 
+static inline struct omap_hwmod_mux_info *
+omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
+{
+	return NULL;
+}
+
+static inline void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
+{
+}
+
+static struct omap_board_mux *board_mux __initdata __maybe_unused;
+
 #endif
 
 /**
@@ -151,28 +261,39 @@
 void omap_mux_set_gpio(u16 val, int gpio);
 
 /**
+ * omap_mux_get() - get a mux partition by name
+ * @name:		Name of the mux partition
+ *
+ */
+struct omap_mux_partition *omap_mux_get(const char *name);
+
+/**
  * omap_mux_read() - read mux register
+ * @partition:		Mux partition
  * @mux_offset:		Offset of the mux register
  *
  */
-u16 omap_mux_read(u16 mux_offset);
+u16 omap_mux_read(struct omap_mux_partition *p, u16 mux_offset);
 
 /**
  * omap_mux_write() - write mux register
+ * @partition:		Mux partition
  * @val:		New mux register value
  * @mux_offset:		Offset of the mux register
  *
  * This should be only needed for dynamic remuxing of non-gpio signals.
  */
-void omap_mux_write(u16 val, u16 mux_offset);
+void omap_mux_write(struct omap_mux_partition *p, u16 val, u16 mux_offset);
 
 /**
  * omap_mux_write_array() - write an array of mux registers
+ * @partition:		Mux partition
  * @board_mux:		Array of mux registers terminated by MAP_MUX_TERMINATOR
  *
  * This should be only needed for dynamic remuxing of non-gpio signals.
  */
-void omap_mux_write_array(struct omap_board_mux *board_mux);
+void omap_mux_write_array(struct omap_mux_partition *p,
+			  struct omap_board_mux *board_mux);
 
 /**
  * omap2420_mux_init() - initialize mux system with board specific set
@@ -196,10 +317,19 @@
 int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
 
 /**
+ * omap4_mux_init() - initialize mux system with board specific set
+ * @board_mux:		Board specific mux table
+ * @flags:		OMAP package type used for the board
+ */
+int omap4_mux_init(struct omap_board_mux *board_mux, int flags);
+
+/**
  * omap_mux_init - private mux init function, do not call
  */
-int omap_mux_init(u32 mux_pbase, u32 mux_size,
-				struct omap_mux *superset,
-				struct omap_mux *package_subset,
-				struct omap_board_mux *board_mux,
-				struct omap_ball *package_balls);
+int omap_mux_init(const char *name, u32 flags,
+		  u32 mux_pbase, u32 mux_size,
+		  struct omap_mux *superset,
+		  struct omap_mux *package_subset,
+		  struct omap_board_mux *board_mux,
+		  struct omap_ball *package_balls);
+
diff --git a/arch/arm/mach-omap2/mux2420.c b/arch/arm/mach-omap2/mux2420.c
index 414af54..cf6de097 100644
--- a/arch/arm/mach-omap2/mux2420.c
+++ b/arch/arm/mach-omap2/mux2420.c
@@ -678,11 +678,13 @@
 	case OMAP_PACKAGE_ZAF:
 		/* REVISIT: Please add data */
 	default:
-		pr_warning("mux: No ball data available for omap2420 package\n");
+		pr_warning("%s: No ball data available for omap2420 package\n",
+				__func__);
 	}
 
-	return omap_mux_init(OMAP2420_CONTROL_PADCONF_MUX_PBASE,
+	return omap_mux_init("core", OMAP_MUX_REG_8BIT | OMAP_MUX_GPIO_IN_MODE3,
+			     OMAP2420_CONTROL_PADCONF_MUX_PBASE,
 			     OMAP2420_CONTROL_PADCONF_MUX_SIZE,
-				omap2420_muxmodes, NULL, board_subset,
-				package_balls);
+			     omap2420_muxmodes, NULL, board_subset,
+			     package_balls);
 }
diff --git a/arch/arm/mach-omap2/mux2430.c b/arch/arm/mach-omap2/mux2430.c
index 84d2c5a..4185f92 100644
--- a/arch/arm/mach-omap2/mux2430.c
+++ b/arch/arm/mach-omap2/mux2430.c
@@ -781,11 +781,13 @@
 		package_balls = omap2430_pop_ball;
 		break;
 	default:
-		pr_warning("mux: No ball data available for omap2420 package\n");
+		pr_warning("%s: No ball data available for omap2420 package\n",
+				__func__);
 	}
 
-	return omap_mux_init(OMAP2430_CONTROL_PADCONF_MUX_PBASE,
+	return omap_mux_init("core", OMAP_MUX_REG_8BIT | OMAP_MUX_GPIO_IN_MODE3,
+			     OMAP2430_CONTROL_PADCONF_MUX_PBASE,
 			     OMAP2430_CONTROL_PADCONF_MUX_SIZE,
-				omap2430_muxmodes, NULL, board_subset,
-				package_balls);
+			     omap2430_muxmodes, NULL, board_subset,
+			     package_balls);
 }
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 574e54e..17f80e4 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -703,7 +703,7 @@
  * Signals different on CBC package compared to the superset
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBC)
-struct omap_mux __initdata omap3_cbc_subset[] = {
+static struct omap_mux __initdata omap3_cbc_subset[] = {
 	{ .reg_offset = OMAP_MUX_TERMINATOR },
 };
 #else
@@ -721,7 +721,7 @@
  */
 #if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)	\
 		&& defined(CONFIG_OMAP_PACKAGE_CBC)
-struct omap_ball __initdata omap3_cbc_ball[] = {
+static struct omap_ball __initdata omap3_cbc_ball[] = {
 	_OMAP3_BALLENTRY(CAM_D0, "ae16", NULL),
 	_OMAP3_BALLENTRY(CAM_D1, "ae15", NULL),
 	_OMAP3_BALLENTRY(CAM_D10, "d25", NULL),
@@ -2049,12 +2049,13 @@
 		package_balls = omap36xx_cbp_ball;
 		break;
 	default:
-		printk(KERN_ERR "mux: Unknown omap package, mux disabled\n");
+		pr_err("%s Unknown omap package, mux disabled\n", __func__);
 		return -EINVAL;
 	}
 
-	return omap_mux_init(OMAP3_CONTROL_PADCONF_MUX_PBASE,
+	return omap_mux_init("core", 0,
+			     OMAP3_CONTROL_PADCONF_MUX_PBASE,
 			     OMAP3_CONTROL_PADCONF_MUX_SIZE,
-				omap3_muxmodes, package_subset, board_subset,
-				package_balls);
+			     omap3_muxmodes, package_subset, board_subset,
+			     package_balls);
 }
diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c
new file mode 100644
index 0000000..c322e7b
--- /dev/null
+++ b/arch/arm/mach-omap2/mux44xx.c
@@ -0,0 +1,1625 @@
+/*
+ * OMAP44xx ES1.0 pin mux definition
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * - Based on mux34xx.c done by Tony Lindgren <tony@atomide.com>
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "mux.h"
+
+#ifdef CONFIG_OMAP_MUX
+
+#define _OMAP4_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7)	\
+{									\
+	.reg_offset	= (OMAP4_CTRL_MODULE_PAD_##M0##_OFFSET),	\
+	.gpio		= (g),						\
+	.muxnames	= { m0, m1, m2, m3, m4, m5, m6, m7 },		\
+}
+
+#else
+
+#define _OMAP4_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7)	\
+{									\
+	.reg_offset	= (OMAP4_CTRL_MODULE_PAD_##M0##_OFFSET),	\
+	.gpio		= (g),						\
+}
+
+#endif
+
+#define _OMAP4_BALLENTRY(M0, bb, bt)				\
+{									\
+	.reg_offset	= (OMAP4_CTRL_MODULE_PAD_##M0##_OFFSET),	\
+	.balls		= { bb, bt },					\
+}
+
+/*
+ * Superset of all mux modes for omap4 ES1.0
+ */
+static struct omap_mux __initdata omap4_core_muxmodes[] = {
+	_OMAP4_MUXENTRY(GPMC_AD0, 0, "gpmc_ad0", "sdmmc2_dat0", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD1, 0, "gpmc_ad1", "sdmmc2_dat1", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD2, 0, "gpmc_ad2", "sdmmc2_dat2", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD3, 0, "gpmc_ad3", "sdmmc2_dat3", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD4, 0, "gpmc_ad4", "sdmmc2_dat4",
+			"sdmmc2_dir_dat0", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD5, 0, "gpmc_ad5", "sdmmc2_dat5",
+			"sdmmc2_dir_dat1", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD6, 0, "gpmc_ad6", "sdmmc2_dat6",
+			"sdmmc2_dir_cmd", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD7, 0, "gpmc_ad7", "sdmmc2_dat7",
+			"sdmmc2_clk_fdbk", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD8, 32, "gpmc_ad8", "kpd_row0", "c2c_data15",
+			"gpio_32", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD9, 33, "gpmc_ad9", "kpd_row1", "c2c_data14",
+			"gpio_33", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD10, 34, "gpmc_ad10", "kpd_row2", "c2c_data13",
+			"gpio_34", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD11, 35, "gpmc_ad11", "kpd_row3", "c2c_data12",
+			"gpio_35", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD12, 36, "gpmc_ad12", "kpd_col0", "c2c_data11",
+			"gpio_36", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD13, 37, "gpmc_ad13", "kpd_col1", "c2c_data10",
+			"gpio_37", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD14, 38, "gpmc_ad14", "kpd_col2", "c2c_data9",
+			"gpio_38", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD15, 39, "gpmc_ad15", "kpd_col3", "c2c_data8",
+			"gpio_39", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_A16, 40, "gpmc_a16", "kpd_row4", "c2c_datain0",
+			"gpio_40", "venc_656_data0", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_A17, 41, "gpmc_a17", "kpd_row5", "c2c_datain1",
+			"gpio_41", "venc_656_data1", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A18, 42, "gpmc_a18", "kpd_row6", "c2c_datain2",
+			"gpio_42", "venc_656_data2", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A19, 43, "gpmc_a19", "kpd_row7", "c2c_datain3",
+			"gpio_43", "venc_656_data3", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A20, 44, "gpmc_a20", "kpd_col4", "c2c_datain4",
+			"gpio_44", "venc_656_data4", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A21, 45, "gpmc_a21", "kpd_col5", "c2c_datain5",
+			"gpio_45", "venc_656_data5", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A22, 46, "gpmc_a22", "kpd_col6", "c2c_datain6",
+			"gpio_46", "venc_656_data6", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A23, 47, "gpmc_a23", "kpd_col7", "c2c_datain7",
+			"gpio_47", "venc_656_data7", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A24, 48, "gpmc_a24", NULL, "c2c_clkout0",
+			"gpio_48", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A25, 49, "gpmc_a25", NULL, "c2c_clkout1",
+			"gpio_49", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS0, 50, "gpmc_ncs0", NULL, NULL, "gpio_50",
+			"sys_ndmareq0", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NCS1, 51, "gpmc_ncs1", NULL, "c2c_dataout6",
+			"gpio_51", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS2, 52, "gpmc_ncs2", NULL, "c2c_dataout7",
+			"gpio_52", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS3, 53, "gpmc_ncs3", "gpmc_dir",
+			"c2c_dataout4", "gpio_53", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NWP, 54, "gpmc_nwp", "dsi1_te0", NULL, "gpio_54",
+			"sys_ndmareq1", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_CLK, 55, "gpmc_clk", NULL, NULL, "gpio_55",
+			"sys_ndmareq2", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NADV_ALE, 56, "gpmc_nadv_ale", "dsi1_te1", NULL,
+			"gpio_56", "sys_ndmareq3", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NOE, 0, "gpmc_noe", "sdmmc2_clk", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NWE, 0, "gpmc_nwe", "sdmmc2_cmd", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NBE0_CLE, 59, "gpmc_nbe0_cle", "dsi2_te0", NULL,
+			"gpio_59", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NBE1, 60, "gpmc_nbe1", NULL, "c2c_dataout5",
+			"gpio_60", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_WAIT0, 61, "gpmc_wait0", "dsi2_te1", NULL,
+			"gpio_61", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_WAIT1, 62, "gpmc_wait1", NULL, "c2c_dataout2",
+			"gpio_62", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(C2C_DATA11, 100, "c2c_data11", "usbc1_icusb_txen",
+			"c2c_dataout3", "gpio_100", "sys_ndmareq0", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(C2C_DATA12, 101, "c2c_data12", "dsi1_te0",
+			"c2c_clkin0", "gpio_101", "sys_ndmareq1", NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(C2C_DATA13, 102, "c2c_data13", "dsi1_te1",
+			"c2c_clkin1", "gpio_102", "sys_ndmareq2", NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(C2C_DATA14, 103, "c2c_data14", "dsi2_te0",
+			"c2c_dataout0", "gpio_103", "sys_ndmareq3", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(C2C_DATA15, 104, "c2c_data15", "dsi2_te1",
+			"c2c_dataout1", "gpio_104", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_HPD, 63, "hdmi_hpd", NULL, NULL, "gpio_63", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_CEC, 64, "hdmi_cec", NULL, NULL, "gpio_64", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_DDC_SCL, 65, "hdmi_ddc_scl", NULL, NULL,
+			"gpio_65", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_DDC_SDA, 66, "hdmi_ddc_sda", NULL, NULL,
+			"gpio_66", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX0, 0, "csi21_dx0", NULL, NULL, "gpi_67", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY0, 0, "csi21_dy0", NULL, NULL, "gpi_68", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX1, 0, "csi21_dx1", NULL, NULL, "gpi_69", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY1, 0, "csi21_dy1", NULL, NULL, "gpi_70", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX2, 0, "csi21_dx2", NULL, NULL, "gpi_71", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY2, 0, "csi21_dy2", NULL, NULL, "gpi_72", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX3, 0, "csi21_dx3", NULL, NULL, "gpi_73", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY3, 0, "csi21_dy3", NULL, NULL, "gpi_74", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX4, 0, "csi21_dx4", NULL, NULL, "gpi_75", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY4, 0, "csi21_dy4", NULL, NULL, "gpi_76", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DX0, 0, "csi22_dx0", NULL, NULL, "gpi_77", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DY0, 0, "csi22_dy0", NULL, NULL, "gpi_78", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DX1, 0, "csi22_dx1", NULL, NULL, "gpi_79", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DY1, 0, "csi22_dy1", NULL, NULL, "gpi_80", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CAM_SHUTTER, 81, "cam_shutter", NULL, NULL, "gpio_81",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CAM_STROBE, 82, "cam_strobe", NULL, NULL, "gpio_82",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CAM_GLOBALRESET, 83, "cam_globalreset", NULL, NULL,
+			"gpio_83", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_CLK, 84, "usbb1_ulpitll_clk",
+			"hsi1_cawake", NULL, "gpio_84", "usbb1_ulpiphy_clk",
+			NULL, "hw_dbg20", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_STP, 85, "usbb1_ulpitll_stp",
+			"hsi1_cadata", "mcbsp4_clkr", "gpio_85",
+			"usbb1_ulpiphy_stp", "usbb1_mm_rxdp", "hw_dbg21",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DIR, 86, "usbb1_ulpitll_dir",
+			"hsi1_caflag", "mcbsp4_fsr", "gpio_86",
+			"usbb1_ulpiphy_dir", NULL, "hw_dbg22", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_NXT, 87, "usbb1_ulpitll_nxt",
+			"hsi1_acready", "mcbsp4_fsx", "gpio_87",
+			"usbb1_ulpiphy_nxt", "usbb1_mm_rxdm", "hw_dbg23",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT0, 88, "usbb1_ulpitll_dat0",
+			"hsi1_acwake", "mcbsp4_clkx", "gpio_88",
+			"usbb1_ulpiphy_dat0", "usbb1_mm_rxrcv", "hw_dbg24",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT1, 89, "usbb1_ulpitll_dat1",
+			"hsi1_acdata", "mcbsp4_dx", "gpio_89",
+			"usbb1_ulpiphy_dat1", "usbb1_mm_txse0", "hw_dbg25",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT2, 90, "usbb1_ulpitll_dat2",
+			"hsi1_acflag", "mcbsp4_dr", "gpio_90",
+			"usbb1_ulpiphy_dat2", "usbb1_mm_txdat", "hw_dbg26",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT3, 91, "usbb1_ulpitll_dat3",
+			"hsi1_caready", NULL, "gpio_91", "usbb1_ulpiphy_dat3",
+			"usbb1_mm_txen", "hw_dbg27", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT4, 92, "usbb1_ulpitll_dat4",
+			"dmtimer8_pwm_evt", "abe_mcbsp3_dr", "gpio_92",
+			"usbb1_ulpiphy_dat4", NULL, "hw_dbg28", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT5, 93, "usbb1_ulpitll_dat5",
+			"dmtimer9_pwm_evt", "abe_mcbsp3_dx", "gpio_93",
+			"usbb1_ulpiphy_dat5", NULL, "hw_dbg29", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT6, 94, "usbb1_ulpitll_dat6",
+			"dmtimer10_pwm_evt", "abe_mcbsp3_clkx", "gpio_94",
+			"usbb1_ulpiphy_dat6", "abe_dmic_din3", "hw_dbg30",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT7, 95, "usbb1_ulpitll_dat7",
+			"dmtimer11_pwm_evt", "abe_mcbsp3_fsx", "gpio_95",
+			"usbb1_ulpiphy_dat7", "abe_dmic_clk3", "hw_dbg31",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_HSIC_DATA, 96, "usbb1_hsic_data", NULL, NULL,
+			"gpio_96", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_HSIC_STROBE, 97, "usbb1_hsic_strobe", NULL,
+			NULL, "gpio_97", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBC1_ICUSB_DP, 98, "usbc1_icusb_dp", NULL, NULL,
+			"gpio_98", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBC1_ICUSB_DM, 99, "usbc1_icusb_dm", NULL, NULL,
+			"gpio_99", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_CLK, 100, "sdmmc1_clk", NULL, "dpm_emu19",
+			"gpio_100", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_CMD, 101, "sdmmc1_cmd", NULL, "uart1_rx",
+			"gpio_101", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT0, 102, "sdmmc1_dat0", NULL, "dpm_emu18",
+			"gpio_102", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT1, 103, "sdmmc1_dat1", NULL, "dpm_emu17",
+			"gpio_103", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT2, 104, "sdmmc1_dat2", NULL, "dpm_emu16",
+			"gpio_104", "jtag_tms_tmsc", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT3, 105, "sdmmc1_dat3", NULL, "dpm_emu15",
+			"gpio_105", "jtag_tck", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT4, 106, "sdmmc1_dat4", NULL, NULL,
+			"gpio_106", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT5, 107, "sdmmc1_dat5", NULL, NULL,
+			"gpio_107", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT6, 108, "sdmmc1_dat6", NULL, NULL,
+			"gpio_108", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT7, 109, "sdmmc1_dat7", NULL, NULL,
+			"gpio_109", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_CLKX, 110, "abe_mcbsp2_clkx", "mcspi2_clk",
+			"abe_mcasp_ahclkx", "gpio_110", "usbb2_mm_rxdm",
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_DR, 111, "abe_mcbsp2_dr", "mcspi2_somi",
+			"abe_mcasp_axr", "gpio_111", "usbb2_mm_rxdp", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_DX, 112, "abe_mcbsp2_dx", "mcspi2_simo",
+			"abe_mcasp_amute", "gpio_112", "usbb2_mm_rxrcv", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_FSX, 113, "abe_mcbsp2_fsx", "mcspi2_cs0",
+			"abe_mcasp_afsx", "gpio_113", "usbb2_mm_txen", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_CLKX, 114, "abe_mcbsp1_clkx",
+			"abe_slimbus1_clock", NULL, "gpio_114", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_DR, 115, "abe_mcbsp1_dr",
+			"abe_slimbus1_data", NULL, "gpio_115", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_DX, 116, "abe_mcbsp1_dx", "sdmmc3_dat2",
+			"abe_mcasp_aclkx", "gpio_116", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_FSX, 117, "abe_mcbsp1_fsx", "sdmmc3_dat3",
+			"abe_mcasp_amutein", "gpio_117", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_UL_DATA, 0, "abe_pdm_ul_data",
+			"abe_mcbsp3_dr", NULL, NULL, NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_DL_DATA, 0, "abe_pdm_dl_data",
+			"abe_mcbsp3_dx", NULL, NULL, NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_FRAME, 0, "abe_pdm_frame", "abe_mcbsp3_clkx",
+			NULL, NULL, NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_LB_CLK, 0, "abe_pdm_lb_clk", "abe_mcbsp3_fsx",
+			NULL, NULL, NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_CLKS, 118, "abe_clks", NULL, NULL, "gpio_118",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_CLK1, 119, "abe_dmic_clk1", NULL, NULL,
+			"gpio_119", "usbb2_mm_txse0", NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_DIN1, 120, "abe_dmic_din1", NULL, NULL,
+			"gpio_120", "usbb2_mm_txdat", NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_DIN2, 121, "abe_dmic_din2", "slimbus2_clock",
+			NULL, "gpio_121", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_DIN3, 122, "abe_dmic_din3", "slimbus2_data",
+			"abe_dmic_clk2", "gpio_122", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(UART2_CTS, 123, "uart2_cts", "sdmmc3_clk", NULL,
+			"gpio_123", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART2_RTS, 124, "uart2_rts", "sdmmc3_cmd", NULL,
+			"gpio_124", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART2_RX, 125, "uart2_rx", "sdmmc3_dat0", NULL,
+			"gpio_125", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART2_TX, 126, "uart2_tx", "sdmmc3_dat1", NULL,
+			"gpio_126", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDQ_SIO, 127, "hdq_sio", "i2c3_sccb", "i2c2_sccb",
+			"gpio_127", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C1_SCL, 0, "i2c1_scl", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(I2C1_SDA, 0, "i2c1_sda", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(I2C2_SCL, 128, "i2c2_scl", "uart1_rx", NULL,
+			"gpio_128", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C2_SDA, 129, "i2c2_sda", "uart1_tx", NULL,
+			"gpio_129", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C3_SCL, 130, "i2c3_scl", NULL, NULL, "gpio_130",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C3_SDA, 131, "i2c3_sda", NULL, NULL, "gpio_131",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C4_SCL, 132, "i2c4_scl", NULL, NULL, "gpio_132",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C4_SDA, 133, "i2c4_sda", NULL, NULL, "gpio_133",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CLK, 134, "mcspi1_clk", NULL, NULL, "gpio_134",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_SOMI, 135, "mcspi1_somi", NULL, NULL,
+			"gpio_135", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_SIMO, 136, "mcspi1_simo", NULL, NULL,
+			"gpio_136", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS0, 137, "mcspi1_cs0", NULL, NULL, "gpio_137",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS1, 138, "mcspi1_cs1", "uart1_rx", NULL,
+			"gpio_138", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS2, 139, "mcspi1_cs2", "uart1_cts",
+			"slimbus2_clock", "gpio_139", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS3, 140, "mcspi1_cs3", "uart1_rts",
+			"slimbus2_data", "gpio_140", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(UART3_CTS_RCTX, 141, "uart3_cts_rctx", "uart1_tx",
+			NULL, "gpio_141", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART3_RTS_SD, 142, "uart3_rts_sd", NULL, NULL,
+			"gpio_142", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART3_RX_IRRX, 143, "uart3_rx_irrx",
+			"dmtimer8_pwm_evt", NULL, "gpio_143", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART3_TX_IRTX, 144, "uart3_tx_irtx",
+			"dmtimer9_pwm_evt", NULL, "gpio_144", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_CLK, 145, "sdmmc5_clk", "mcspi2_clk",
+			"usbc1_icusb_dp", "gpio_145", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_CMD, 146, "sdmmc5_cmd", "mcspi2_simo",
+			"usbc1_icusb_dm", "gpio_146", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT0, 147, "sdmmc5_dat0", "mcspi2_somi",
+			"usbc1_icusb_rcv", "gpio_147", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT1, 148, "sdmmc5_dat1", NULL,
+			"usbc1_icusb_txen", "gpio_148", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT2, 149, "sdmmc5_dat2", "mcspi2_cs1", NULL,
+			"gpio_149", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT3, 150, "sdmmc5_dat3", "mcspi2_cs0", NULL,
+			"gpio_150", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_CLK, 151, "mcspi4_clk", "sdmmc4_clk", NULL,
+			"gpio_151", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_SIMO, 152, "mcspi4_simo", "sdmmc4_cmd", NULL,
+			"gpio_152", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_SOMI, 153, "mcspi4_somi", "sdmmc4_dat0", NULL,
+			"gpio_153", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_CS0, 154, "mcspi4_cs0", "sdmmc4_dat3", NULL,
+			"gpio_154", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART4_RX, 155, "uart4_rx", "sdmmc4_dat2", NULL,
+			"gpio_155", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART4_TX, 156, "uart4_tx", "sdmmc4_dat1", NULL,
+			"gpio_156", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_CLK, 157, "usbb2_ulpitll_clk",
+			"usbb2_ulpiphy_clk", "sdmmc4_cmd", "gpio_157",
+			"hsi2_cawake", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_STP, 158, "usbb2_ulpitll_stp",
+			"usbb2_ulpiphy_stp", "sdmmc4_clk", "gpio_158",
+			"hsi2_cadata", "dispc2_data23", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DIR, 159, "usbb2_ulpitll_dir",
+			"usbb2_ulpiphy_dir", "sdmmc4_dat0", "gpio_159",
+			"hsi2_caflag", "dispc2_data22", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_NXT, 160, "usbb2_ulpitll_nxt",
+			"usbb2_ulpiphy_nxt", "sdmmc4_dat1", "gpio_160",
+			"hsi2_acready", "dispc2_data21", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT0, 161, "usbb2_ulpitll_dat0",
+			"usbb2_ulpiphy_dat0", "sdmmc4_dat2", "gpio_161",
+			"hsi2_acwake", "dispc2_data20", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT1, 162, "usbb2_ulpitll_dat1",
+			"usbb2_ulpiphy_dat1", "sdmmc4_dat3", "gpio_162",
+			"hsi2_acdata", "dispc2_data19", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT2, 163, "usbb2_ulpitll_dat2",
+			"usbb2_ulpiphy_dat2", "sdmmc3_dat2", "gpio_163",
+			"hsi2_acflag", "dispc2_data18", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT3, 164, "usbb2_ulpitll_dat3",
+			"usbb2_ulpiphy_dat3", "sdmmc3_dat1", "gpio_164",
+			"hsi2_caready", "dispc2_data15", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT4, 165, "usbb2_ulpitll_dat4",
+			"usbb2_ulpiphy_dat4", "sdmmc3_dat0", "gpio_165",
+			"mcspi3_somi", "dispc2_data14", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT5, 166, "usbb2_ulpitll_dat5",
+			"usbb2_ulpiphy_dat5", "sdmmc3_dat3", "gpio_166",
+			"mcspi3_cs0", "dispc2_data13", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT6, 167, "usbb2_ulpitll_dat6",
+			"usbb2_ulpiphy_dat6", "sdmmc3_cmd", "gpio_167",
+			"mcspi3_simo", "dispc2_data12", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT7, 168, "usbb2_ulpitll_dat7",
+			"usbb2_ulpiphy_dat7", "sdmmc3_clk", "gpio_168",
+			"mcspi3_clk", "dispc2_data11", NULL, "reserved"),
+	_OMAP4_MUXENTRY(USBB2_HSIC_DATA, 169, "usbb2_hsic_data", NULL, NULL,
+			"gpio_169", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_HSIC_STROBE, 170, "usbb2_hsic_strobe", NULL,
+			NULL, "gpio_170", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_TX0, 171, "unipro_tx0", "kpd_col0", NULL,
+			"gpio_171", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_TY0, 172, "unipro_ty0", "kpd_col1", NULL,
+			"gpio_172", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_TX1, 173, "unipro_tx1", "kpd_col2", NULL,
+			"gpio_173", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_TY1, 174, "unipro_ty1", "kpd_col3", NULL,
+			"gpio_174", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_TX2, 0, "unipro_tx2", "kpd_col4", NULL,
+			"gpio_0", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_TY2, 1, "unipro_ty2", "kpd_col5", NULL,
+			"gpio_1", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_RX0, 0, "unipro_rx0", "kpd_row0", NULL,
+			"gpi_175", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_RY0, 0, "unipro_ry0", "kpd_row1", NULL,
+			"gpi_176", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_RX1, 0, "unipro_rx1", "kpd_row2", NULL,
+			"gpi_177", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_RY1, 0, "unipro_ry1", "kpd_row3", NULL,
+			"gpi_178", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_RX2, 0, "unipro_rx2", "kpd_row4", NULL,
+			"gpi_2", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UNIPRO_RY2, 0, "unipro_ry2", "kpd_row5", NULL,
+			"gpi_3", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBA0_OTG_CE, 0, "usba0_otg_ce", NULL, NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(USBA0_OTG_DP, 179, "usba0_otg_dp", "uart3_rx_irrx",
+			"uart2_rx", "gpio_179", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBA0_OTG_DM, 180, "usba0_otg_dm", "uart3_tx_irtx",
+			"uart2_tx", "gpio_180", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK1_OUT, 181, "fref_clk1_out", NULL, NULL,
+			"gpio_181", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK2_OUT, 182, "fref_clk2_out", NULL, NULL,
+			"gpio_182", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_NIRQ1, 0, "sys_nirq1", NULL, NULL, NULL, NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_NIRQ2, 183, "sys_nirq2", NULL, NULL, "gpio_183",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT0, 184, "sys_boot0", NULL, NULL, "gpio_184",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT1, 185, "sys_boot1", NULL, NULL, "gpio_185",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT2, 186, "sys_boot2", NULL, NULL, "gpio_186",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT3, 187, "sys_boot3", NULL, NULL, "gpio_187",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT4, 188, "sys_boot4", NULL, NULL, "gpio_188",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT5, 189, "sys_boot5", NULL, NULL, "gpio_189",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU0, 11, "dpm_emu0", NULL, NULL, "gpio_11", NULL,
+			NULL, "hw_dbg0", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU1, 12, "dpm_emu1", NULL, NULL, "gpio_12", NULL,
+			NULL, "hw_dbg1", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU2, 13, "dpm_emu2", "usba0_ulpiphy_clk", NULL,
+			"gpio_13", NULL, "dispc2_fid", "hw_dbg2", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU3, 14, "dpm_emu3", "usba0_ulpiphy_stp", NULL,
+			"gpio_14", NULL, "dispc2_data10", "hw_dbg3",
+			"reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU4, 15, "dpm_emu4", "usba0_ulpiphy_dir", NULL,
+			"gpio_15", NULL, "dispc2_data9", "hw_dbg4",
+			"reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU5, 16, "dpm_emu5", "usba0_ulpiphy_nxt", NULL,
+			"gpio_16", "rfbi_te_vsync0", "dispc2_data16",
+			"hw_dbg5", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU6, 17, "dpm_emu6", "usba0_ulpiphy_dat0",
+			"uart3_tx_irtx", "gpio_17", "rfbi_hsync0",
+			"dispc2_data17", "hw_dbg6", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU7, 18, "dpm_emu7", "usba0_ulpiphy_dat1",
+			"uart3_rx_irrx", "gpio_18", "rfbi_cs0",
+			"dispc2_hsync", "hw_dbg7", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU8, 19, "dpm_emu8", "usba0_ulpiphy_dat2",
+			"uart3_rts_sd", "gpio_19", "rfbi_re", "dispc2_pclk",
+			"hw_dbg8", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU9, 20, "dpm_emu9", "usba0_ulpiphy_dat3",
+			"uart3_cts_rctx", "gpio_20", "rfbi_we",
+			"dispc2_vsync", "hw_dbg9", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU10, 21, "dpm_emu10", "usba0_ulpiphy_dat4",
+			NULL, "gpio_21", "rfbi_a0", "dispc2_de", "hw_dbg10",
+			"reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU11, 22, "dpm_emu11", "usba0_ulpiphy_dat5",
+			NULL, "gpio_22", "rfbi_data8", "dispc2_data8",
+			"hw_dbg11", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU12, 23, "dpm_emu12", "usba0_ulpiphy_dat6",
+			NULL, "gpio_23", "rfbi_data7", "dispc2_data7",
+			"hw_dbg12", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU13, 24, "dpm_emu13", "usba0_ulpiphy_dat7",
+			NULL, "gpio_24", "rfbi_data6", "dispc2_data6",
+			"hw_dbg13", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU14, 25, "dpm_emu14", "sys_drm_msecure",
+			"uart1_rx", "gpio_25", "rfbi_data5", "dispc2_data5",
+			"hw_dbg14", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU15, 26, "dpm_emu15", "sys_secure_indicator",
+			NULL, "gpio_26", "rfbi_data4", "dispc2_data4",
+			"hw_dbg15", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU16, 27, "dpm_emu16", "dmtimer8_pwm_evt",
+			"dsi1_te0", "gpio_27", "rfbi_data3", "dispc2_data3",
+			"hw_dbg16", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU17, 28, "dpm_emu17", "dmtimer9_pwm_evt",
+			"dsi1_te1", "gpio_28", "rfbi_data2", "dispc2_data2",
+			"hw_dbg17", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU18, 190, "dpm_emu18", "dmtimer10_pwm_evt",
+			"dsi2_te0", "gpio_190", "rfbi_data1", "dispc2_data1",
+			"hw_dbg18", "reserved"),
+	_OMAP4_MUXENTRY(DPM_EMU19, 191, "dpm_emu19", "dmtimer11_pwm_evt",
+			"dsi2_te1", "gpio_191", "rfbi_data0", "dispc2_data0",
+			"hw_dbg19", "reserved"),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
+/*
+ * Balls for 44XX CBL package
+ * 547-pin CBL ES1.0 S-FPGA-N547, 0.40mm Ball Pitch (Top),
+ *				  0.40mm Ball Pitch (Bottom)
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
+		&& defined(CONFIG_OMAP_PACKAGE_CBL)
+static struct omap_ball __initdata omap4_core_cbl_ball[] = {
+	_OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD3, "d13", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD4, "c15", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD5, "d15", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD6, "a16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD7, "b16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD8, "c16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD9, "d16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD10, "c17", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD11, "d17", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD12, "c18", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD13, "d18", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD14, "c19", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD15, "d19", NULL),
+	_OMAP4_BALLENTRY(GPMC_A16, "b17", NULL),
+	_OMAP4_BALLENTRY(GPMC_A17, "a18", NULL),
+	_OMAP4_BALLENTRY(GPMC_A18, "b18", NULL),
+	_OMAP4_BALLENTRY(GPMC_A19, "a19", NULL),
+	_OMAP4_BALLENTRY(GPMC_A20, "b19", NULL),
+	_OMAP4_BALLENTRY(GPMC_A21, "b20", NULL),
+	_OMAP4_BALLENTRY(GPMC_A22, "a21", NULL),
+	_OMAP4_BALLENTRY(GPMC_A23, "b21", NULL),
+	_OMAP4_BALLENTRY(GPMC_A24, "c20", NULL),
+	_OMAP4_BALLENTRY(GPMC_A25, "d20", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS0, "b25", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS1, "c21", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS2, "d21", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS3, "c22", NULL),
+	_OMAP4_BALLENTRY(GPMC_NWP, "c25", NULL),
+	_OMAP4_BALLENTRY(GPMC_CLK, "b22", NULL),
+	_OMAP4_BALLENTRY(GPMC_NADV_ALE, "d25", NULL),
+	_OMAP4_BALLENTRY(GPMC_NOE, "b11", NULL),
+	_OMAP4_BALLENTRY(GPMC_NWE, "b12", NULL),
+	_OMAP4_BALLENTRY(GPMC_NBE0_CLE, "c23", NULL),
+	_OMAP4_BALLENTRY(GPMC_NBE1, "d22", NULL),
+	_OMAP4_BALLENTRY(GPMC_WAIT0, "b26", NULL),
+	_OMAP4_BALLENTRY(GPMC_WAIT1, "b23", NULL),
+	_OMAP4_BALLENTRY(C2C_DATA11, "d23", NULL),
+	_OMAP4_BALLENTRY(C2C_DATA12, "a24", NULL),
+	_OMAP4_BALLENTRY(C2C_DATA13, "b24", NULL),
+	_OMAP4_BALLENTRY(C2C_DATA14, "c24", NULL),
+	_OMAP4_BALLENTRY(C2C_DATA15, "d24", NULL),
+	_OMAP4_BALLENTRY(HDMI_HPD, "b9", NULL),
+	_OMAP4_BALLENTRY(HDMI_CEC, "b10", NULL),
+	_OMAP4_BALLENTRY(HDMI_DDC_SCL, "a8", NULL),
+	_OMAP4_BALLENTRY(HDMI_DDC_SDA, "b8", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX0, "r26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY0, "r25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX1, "t26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY1, "t25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX2, "u26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY2, "u25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX3, "v26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY3, "v25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX4, "w26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY4, "w25", NULL),
+	_OMAP4_BALLENTRY(CSI22_DX0, "m26", NULL),
+	_OMAP4_BALLENTRY(CSI22_DY0, "m25", NULL),
+	_OMAP4_BALLENTRY(CSI22_DX1, "n26", NULL),
+	_OMAP4_BALLENTRY(CSI22_DY1, "n25", NULL),
+	_OMAP4_BALLENTRY(CAM_SHUTTER, "t27", NULL),
+	_OMAP4_BALLENTRY(CAM_STROBE, "u27", NULL),
+	_OMAP4_BALLENTRY(CAM_GLOBALRESET, "v27", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_CLK, "ae18", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_STP, "ag19", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DIR, "af19", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_NXT, "ae19", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT0, "af18", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT1, "ag18", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT2, "ae17", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT3, "af17", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT4, "ah17", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT5, "ae16", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT6, "af16", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT7, "ag16", NULL),
+	_OMAP4_BALLENTRY(USBB1_HSIC_DATA, "af14", NULL),
+	_OMAP4_BALLENTRY(USBB1_HSIC_STROBE, "ae14", NULL),
+	_OMAP4_BALLENTRY(USBC1_ICUSB_DP, "h2", NULL),
+	_OMAP4_BALLENTRY(USBC1_ICUSB_DM, "h3", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_CLK, "d2", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_CMD, "e3", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT0, "e4", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT1, "e2", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT2, "e1", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT3, "f4", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT4, "f3", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT5, "f1", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT6, "g4", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT7, "g3", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_CLKX, "ad27", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_DR, "ad26", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_DX, "ad25", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_FSX, "ac28", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_CLKX, "ac26", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_DR, "ac25", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_DX, "ab25", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_FSX, "ac27", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_UL_DATA, "ag25", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_DL_DATA, "af25", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_FRAME, "ae25", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_LB_CLK, "af26", NULL),
+	_OMAP4_BALLENTRY(ABE_CLKS, "ah26", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_CLK1, "ae24", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_DIN1, "af24", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_DIN2, "ag24", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_DIN3, "ah24", NULL),
+	_OMAP4_BALLENTRY(UART2_CTS, "ab26", NULL),
+	_OMAP4_BALLENTRY(UART2_RTS, "ab27", NULL),
+	_OMAP4_BALLENTRY(UART2_RX, "aa25", NULL),
+	_OMAP4_BALLENTRY(UART2_TX, "aa26", NULL),
+	_OMAP4_BALLENTRY(HDQ_SIO, "aa27", NULL),
+	_OMAP4_BALLENTRY(I2C1_SCL, "ae28", NULL),
+	_OMAP4_BALLENTRY(I2C1_SDA, "ae26", NULL),
+	_OMAP4_BALLENTRY(I2C2_SCL, "c26", NULL),
+	_OMAP4_BALLENTRY(I2C2_SDA, "d26", NULL),
+	_OMAP4_BALLENTRY(I2C3_SCL, "w27", NULL),
+	_OMAP4_BALLENTRY(I2C3_SDA, "y27", NULL),
+	_OMAP4_BALLENTRY(I2C4_SCL, "ag21", NULL),
+	_OMAP4_BALLENTRY(I2C4_SDA, "ah22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CLK, "af22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_SOMI, "ae22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_SIMO, "ag22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS0, "ae23", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS1, "af23", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS2, "ag23", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS3, "ah23", NULL),
+	_OMAP4_BALLENTRY(UART3_CTS_RCTX, "f27", NULL),
+	_OMAP4_BALLENTRY(UART3_RTS_SD, "f28", NULL),
+	_OMAP4_BALLENTRY(UART3_RX_IRRX, "g27", NULL),
+	_OMAP4_BALLENTRY(UART3_TX_IRTX, "g28", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_CLK, "ae5", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_CMD, "af5", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT0, "ae4", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT1, "af4", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT2, "ag3", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT3, "af3", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_CLK, "ae21", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_SIMO, "af20", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_SOMI, "af21", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_CS0, "ae20", NULL),
+	_OMAP4_BALLENTRY(UART4_RX, "ag20", NULL),
+	_OMAP4_BALLENTRY(UART4_TX, "ah19", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_CLK, "ag12", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_STP, "af12", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DIR, "ae12", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_NXT, "ag13", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT0, "ae11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT1, "af11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT2, "ag11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT3, "ah11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT4, "ae10", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT5, "af10", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT6, "ag10", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT7, "ae9", NULL),
+	_OMAP4_BALLENTRY(USBB2_HSIC_DATA, "af13", NULL),
+	_OMAP4_BALLENTRY(USBB2_HSIC_STROBE, "ae13", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_TX0, "g26", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_TY0, "g25", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_TX1, "h26", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_TY1, "h25", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_TX2, "j27", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_TY2, "h27", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_RX0, "j26", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_RY0, "j25", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_RX1, "k26", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_RY1, "k25", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_RX2, "l27", NULL),
+	_OMAP4_BALLENTRY(UNIPRO_RY2, "k27", NULL),
+	_OMAP4_BALLENTRY(USBA0_OTG_CE, "c3", NULL),
+	_OMAP4_BALLENTRY(USBA0_OTG_DP, "b5", NULL),
+	_OMAP4_BALLENTRY(USBA0_OTG_DM, "b4", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK1_OUT, "aa28", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK2_OUT, "y28", NULL),
+	_OMAP4_BALLENTRY(SYS_NIRQ1, "ae6", NULL),
+	_OMAP4_BALLENTRY(SYS_NIRQ2, "af6", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT0, "f26", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT1, "e27", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT2, "e26", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT3, "e25", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT4, "d28", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT5, "d27", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU0, "m2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU1, "n2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU2, "p2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU3, "v1", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU4, "v2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU5, "w1", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU6, "w2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU7, "w3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU8, "w4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU9, "y2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU10, "y3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU11, "y4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU12, "aa1", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU13, "aa2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU14, "aa3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU15, "aa4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU16, "ab2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU17, "ab3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU18, "ab4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU19, "ac4", NULL),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap4_core_cbl_ball  NULL
+#endif
+
+/*
+ * Superset of all mux modes for omap4 ES2.0
+ */
+static struct omap_mux __initdata omap4_es2_core_muxmodes[] = {
+	_OMAP4_MUXENTRY(GPMC_AD0, 0, "gpmc_ad0", "sdmmc2_dat0", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD1, 0, "gpmc_ad1", "sdmmc2_dat1", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD2, 0, "gpmc_ad2", "sdmmc2_dat2", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD3, 0, "gpmc_ad3", "sdmmc2_dat3", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD4, 0, "gpmc_ad4", "sdmmc2_dat4",
+			"sdmmc2_dir_dat0", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD5, 0, "gpmc_ad5", "sdmmc2_dat5",
+			"sdmmc2_dir_dat1", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD6, 0, "gpmc_ad6", "sdmmc2_dat6",
+			"sdmmc2_dir_cmd", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD7, 0, "gpmc_ad7", "sdmmc2_dat7",
+			"sdmmc2_clk_fdbk", NULL, NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD8, 32, "gpmc_ad8", "kpd_row0", "c2c_data15",
+			"gpio_32", NULL, "sdmmc1_dat0", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD9, 33, "gpmc_ad9", "kpd_row1", "c2c_data14",
+			"gpio_33", NULL, "sdmmc1_dat1", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD10, 34, "gpmc_ad10", "kpd_row2", "c2c_data13",
+			"gpio_34", NULL, "sdmmc1_dat2", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD11, 35, "gpmc_ad11", "kpd_row3", "c2c_data12",
+			"gpio_35", NULL, "sdmmc1_dat3", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD12, 36, "gpmc_ad12", "kpd_col0", "c2c_data11",
+			"gpio_36", NULL, "sdmmc1_dat4", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD13, 37, "gpmc_ad13", "kpd_col1", "c2c_data10",
+			"gpio_37", NULL, "sdmmc1_dat5", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD14, 38, "gpmc_ad14", "kpd_col2", "c2c_data9",
+			"gpio_38", NULL, "sdmmc1_dat6", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_AD15, 39, "gpmc_ad15", "kpd_col3", "c2c_data8",
+			"gpio_39", NULL, "sdmmc1_dat7", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_A16, 40, "gpmc_a16", "kpd_row4", "c2c_datain0",
+			"gpio_40", "venc_656_data0", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A17, 41, "gpmc_a17", "kpd_row5", "c2c_datain1",
+			"gpio_41", "venc_656_data1", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A18, 42, "gpmc_a18", "kpd_row6", "c2c_datain2",
+			"gpio_42", "venc_656_data2", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A19, 43, "gpmc_a19", "kpd_row7", "c2c_datain3",
+			"gpio_43", "venc_656_data3", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A20, 44, "gpmc_a20", "kpd_col4", "c2c_datain4",
+			"gpio_44", "venc_656_data4", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A21, 45, "gpmc_a21", "kpd_col5", "c2c_datain5",
+			"gpio_45", "venc_656_data5", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A22, 46, "gpmc_a22", "kpd_col6", "c2c_datain6",
+			"gpio_46", "venc_656_data6", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A23, 47, "gpmc_a23", "kpd_col7", "c2c_datain7",
+			"gpio_47", "venc_656_data7", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A24, 48, "gpmc_a24", "kpd_col8", "c2c_clkout0",
+			"gpio_48", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_A25, 49, "gpmc_a25", NULL, "c2c_clkout1",
+			"gpio_49", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS0, 50, "gpmc_ncs0", NULL, NULL, "gpio_50",
+			"sys_ndmareq0", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NCS1, 51, "gpmc_ncs1", NULL, "c2c_dataout6",
+			"gpio_51", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS2, 52, "gpmc_ncs2", "kpd_row8",
+			"c2c_dataout7", "gpio_52", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS3, 53, "gpmc_ncs3", "gpmc_dir",
+			"c2c_dataout4", "gpio_53", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NWP, 54, "gpmc_nwp", "dsi1_te0", NULL, "gpio_54",
+			"sys_ndmareq1", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_CLK, 55, "gpmc_clk", NULL, NULL, "gpio_55",
+			"sys_ndmareq2", "sdmmc1_cmd", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NADV_ALE, 56, "gpmc_nadv_ale", "dsi1_te1", NULL,
+			"gpio_56", "sys_ndmareq3", "sdmmc1_clk", NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NOE, 0, "gpmc_noe", "sdmmc2_clk", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NWE, 0, "gpmc_nwe", "sdmmc2_cmd", NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NBE0_CLE, 59, "gpmc_nbe0_cle", "dsi2_te0", NULL,
+			"gpio_59", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_NBE1, 60, "gpmc_nbe1", NULL, "c2c_dataout5",
+			"gpio_60", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_WAIT0, 61, "gpmc_wait0", "dsi2_te1", NULL,
+			"gpio_61", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(GPMC_WAIT1, 62, "gpmc_wait1", NULL, "c2c_dataout2",
+			"gpio_62", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_WAIT2, 100, "gpmc_wait2", "usbc1_icusb_txen",
+			"c2c_dataout3", "gpio_100", "sys_ndmareq0", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS4, 101, "gpmc_ncs4", "dsi1_te0", "c2c_clkin0",
+			"gpio_101", "sys_ndmareq1", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS5, 102, "gpmc_ncs5", "dsi1_te1", "c2c_clkin1",
+			"gpio_102", "sys_ndmareq2", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS6, 103, "gpmc_ncs6", "dsi2_te0",
+			"c2c_dataout0", "gpio_103", "sys_ndmareq3", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(GPMC_NCS7, 104, "gpmc_ncs7", "dsi2_te1",
+			"c2c_dataout1", "gpio_104", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_HPD, 63, "hdmi_hpd", NULL, NULL, "gpio_63", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_CEC, 64, "hdmi_cec", NULL, NULL, "gpio_64", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_DDC_SCL, 65, "hdmi_ddc_scl", NULL, NULL,
+			"gpio_65", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDMI_DDC_SDA, 66, "hdmi_ddc_sda", NULL, NULL,
+			"gpio_66", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX0, 0, "csi21_dx0", NULL, NULL, "gpi_67", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY0, 0, "csi21_dy0", NULL, NULL, "gpi_68", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX1, 0, "csi21_dx1", NULL, NULL, "gpi_69", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY1, 0, "csi21_dy1", NULL, NULL, "gpi_70", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX2, 0, "csi21_dx2", NULL, NULL, "gpi_71", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY2, 0, "csi21_dy2", NULL, NULL, "gpi_72", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX3, 0, "csi21_dx3", NULL, NULL, "gpi_73", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY3, 0, "csi21_dy3", NULL, NULL, "gpi_74", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DX4, 0, "csi21_dx4", NULL, NULL, "gpi_75", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI21_DY4, 0, "csi21_dy4", NULL, NULL, "gpi_76", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DX0, 0, "csi22_dx0", NULL, NULL, "gpi_77", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DY0, 0, "csi22_dy0", NULL, NULL, "gpi_78", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DX1, 0, "csi22_dx1", NULL, NULL, "gpi_79", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CSI22_DY1, 0, "csi22_dy1", NULL, NULL, "gpi_80", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CAM_SHUTTER, 81, "cam_shutter", NULL, NULL, "gpio_81",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CAM_STROBE, 82, "cam_strobe", NULL, NULL, "gpio_82",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(CAM_GLOBALRESET, 83, "cam_globalreset", NULL, NULL,
+			"gpio_83", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_CLK, 84, "usbb1_ulpitll_clk",
+			"hsi1_cawake", NULL, "gpio_84", "usbb1_ulpiphy_clk",
+			NULL, "hw_dbg20", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_STP, 85, "usbb1_ulpitll_stp",
+			"hsi1_cadata", "mcbsp4_clkr", "gpio_85",
+			"usbb1_ulpiphy_stp", "usbb1_mm_rxdp", "hw_dbg21",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DIR, 86, "usbb1_ulpitll_dir",
+			"hsi1_caflag", "mcbsp4_fsr", "gpio_86",
+			"usbb1_ulpiphy_dir", NULL, "hw_dbg22", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_NXT, 87, "usbb1_ulpitll_nxt",
+			"hsi1_acready", "mcbsp4_fsx", "gpio_87",
+			"usbb1_ulpiphy_nxt", "usbb1_mm_rxdm", "hw_dbg23",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT0, 88, "usbb1_ulpitll_dat0",
+			"hsi1_acwake", "mcbsp4_clkx", "gpio_88",
+			"usbb1_ulpiphy_dat0", "usbb1_mm_txen", "hw_dbg24",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT1, 89, "usbb1_ulpitll_dat1",
+			"hsi1_acdata", "mcbsp4_dx", "gpio_89",
+			"usbb1_ulpiphy_dat1", "usbb1_mm_txdat", "hw_dbg25",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT2, 90, "usbb1_ulpitll_dat2",
+			"hsi1_acflag", "mcbsp4_dr", "gpio_90",
+			"usbb1_ulpiphy_dat2", "usbb1_mm_txse0", "hw_dbg26",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT3, 91, "usbb1_ulpitll_dat3",
+			"hsi1_caready", NULL, "gpio_91", "usbb1_ulpiphy_dat3",
+			"usbb1_mm_rxrcv", "hw_dbg27", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT4, 92, "usbb1_ulpitll_dat4",
+			"dmtimer8_pwm_evt", "abe_mcbsp3_dr", "gpio_92",
+			"usbb1_ulpiphy_dat4", NULL, "hw_dbg28", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT5, 93, "usbb1_ulpitll_dat5",
+			"dmtimer9_pwm_evt", "abe_mcbsp3_dx", "gpio_93",
+			"usbb1_ulpiphy_dat5", NULL, "hw_dbg29", "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT6, 94, "usbb1_ulpitll_dat6",
+			"dmtimer10_pwm_evt", "abe_mcbsp3_clkx", "gpio_94",
+			"usbb1_ulpiphy_dat6", "abe_dmic_din3", "hw_dbg30",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_ULPITLL_DAT7, 95, "usbb1_ulpitll_dat7",
+			"dmtimer11_pwm_evt", "abe_mcbsp3_fsx", "gpio_95",
+			"usbb1_ulpiphy_dat7", "abe_dmic_clk3", "hw_dbg31",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_HSIC_DATA, 96, "usbb1_hsic_data", NULL, NULL,
+			"gpio_96", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB1_HSIC_STROBE, 97, "usbb1_hsic_strobe", NULL,
+			NULL, "gpio_97", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBC1_ICUSB_DP, 98, "usbc1_icusb_dp", NULL, NULL,
+			"gpio_98", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBC1_ICUSB_DM, 99, "usbc1_icusb_dm", NULL, NULL,
+			"gpio_99", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_CLK, 100, "sdmmc1_clk", NULL, "dpm_emu19",
+			"gpio_100", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_CMD, 101, "sdmmc1_cmd", NULL, "uart1_rx",
+			"gpio_101", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT0, 102, "sdmmc1_dat0", NULL, "dpm_emu18",
+			"gpio_102", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT1, 103, "sdmmc1_dat1", NULL, "dpm_emu17",
+			"gpio_103", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT2, 104, "sdmmc1_dat2", NULL, "dpm_emu16",
+			"gpio_104", "jtag_tms_tmsc", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT3, 105, "sdmmc1_dat3", NULL, "dpm_emu15",
+			"gpio_105", "jtag_tck", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT4, 106, "sdmmc1_dat4", NULL, NULL,
+			"gpio_106", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT5, 107, "sdmmc1_dat5", NULL, NULL,
+			"gpio_107", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT6, 108, "sdmmc1_dat6", NULL, NULL,
+			"gpio_108", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC1_DAT7, 109, "sdmmc1_dat7", NULL, NULL,
+			"gpio_109", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_CLKX, 110, "abe_mcbsp2_clkx", "mcspi2_clk",
+			"abe_mcasp_ahclkx", "gpio_110", "usbb2_mm_rxdm",
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_DR, 111, "abe_mcbsp2_dr", "mcspi2_somi",
+			"abe_mcasp_axr", "gpio_111", "usbb2_mm_rxdp", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_DX, 112, "abe_mcbsp2_dx", "mcspi2_simo",
+			"abe_mcasp_amute", "gpio_112", "usbb2_mm_rxrcv", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP2_FSX, 113, "abe_mcbsp2_fsx", "mcspi2_cs0",
+			"abe_mcasp_afsx", "gpio_113", "usbb2_mm_txen", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_CLKX, 114, "abe_mcbsp1_clkx",
+			"abe_slimbus1_clock", NULL, "gpio_114", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_DR, 115, "abe_mcbsp1_dr",
+			"abe_slimbus1_data", NULL, "gpio_115", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_DX, 116, "abe_mcbsp1_dx", "sdmmc3_dat2",
+			"abe_mcasp_aclkx", "gpio_116", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_MCBSP1_FSX, 117, "abe_mcbsp1_fsx", "sdmmc3_dat3",
+			"abe_mcasp_amutein", "gpio_117", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_UL_DATA, 0, "abe_pdm_ul_data",
+			"abe_mcbsp3_dr", NULL, NULL, NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_DL_DATA, 0, "abe_pdm_dl_data",
+			"abe_mcbsp3_dx", NULL, NULL, NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_FRAME, 0, "abe_pdm_frame", "abe_mcbsp3_clkx",
+			NULL, NULL, NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_PDM_LB_CLK, 0, "abe_pdm_lb_clk", "abe_mcbsp3_fsx",
+			NULL, NULL, NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_CLKS, 118, "abe_clks", NULL, NULL, "gpio_118",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_CLK1, 119, "abe_dmic_clk1", NULL, NULL,
+			"gpio_119", "usbb2_mm_txse0", "uart4_cts", NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_DIN1, 120, "abe_dmic_din1", NULL, NULL,
+			"gpio_120", "usbb2_mm_txdat", "uart4_rts", NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_DIN2, 121, "abe_dmic_din2", "slimbus2_clock",
+			"abe_mcasp_axr", "gpio_121", NULL,
+			"dmtimer11_pwm_evt", NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(ABE_DMIC_DIN3, 122, "abe_dmic_din3", "slimbus2_data",
+			"abe_dmic_clk2", "gpio_122", NULL, "dmtimer9_pwm_evt",
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART2_CTS, 123, "uart2_cts", "sdmmc3_clk", NULL,
+			"gpio_123", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART2_RTS, 124, "uart2_rts", "sdmmc3_cmd", NULL,
+			"gpio_124", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART2_RX, 125, "uart2_rx", "sdmmc3_dat0", NULL,
+			"gpio_125", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART2_TX, 126, "uart2_tx", "sdmmc3_dat1", NULL,
+			"gpio_126", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(HDQ_SIO, 127, "hdq_sio", "i2c3_sccb", "i2c2_sccb",
+			"gpio_127", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C1_SCL, 0, "i2c1_scl", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(I2C1_SDA, 0, "i2c1_sda", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(I2C2_SCL, 128, "i2c2_scl", "uart1_rx", NULL,
+			"gpio_128", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C2_SDA, 129, "i2c2_sda", "uart1_tx", NULL,
+			"gpio_129", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C3_SCL, 130, "i2c3_scl", NULL, NULL, "gpio_130",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C3_SDA, 131, "i2c3_sda", NULL, NULL, "gpio_131",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C4_SCL, 132, "i2c4_scl", NULL, NULL, "gpio_132",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(I2C4_SDA, 133, "i2c4_sda", NULL, NULL, "gpio_133",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CLK, 134, "mcspi1_clk", NULL, NULL, "gpio_134",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_SOMI, 135, "mcspi1_somi", NULL, NULL,
+			"gpio_135", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_SIMO, 136, "mcspi1_simo", NULL, NULL,
+			"gpio_136", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS0, 137, "mcspi1_cs0", NULL, NULL, "gpio_137",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS1, 138, "mcspi1_cs1", "uart1_rx", NULL,
+			"gpio_138", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS2, 139, "mcspi1_cs2", "uart1_cts",
+			"slimbus2_clock", "gpio_139", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI1_CS3, 140, "mcspi1_cs3", "uart1_rts",
+			"slimbus2_data", "gpio_140", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(UART3_CTS_RCTX, 141, "uart3_cts_rctx", "uart1_tx",
+			NULL, "gpio_141", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART3_RTS_SD, 142, "uart3_rts_sd", NULL, NULL,
+			"gpio_142", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART3_RX_IRRX, 143, "uart3_rx_irrx",
+			"dmtimer8_pwm_evt", NULL, "gpio_143", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART3_TX_IRTX, 144, "uart3_tx_irtx",
+			"dmtimer9_pwm_evt", NULL, "gpio_144", NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_CLK, 145, "sdmmc5_clk", "mcspi2_clk",
+			"usbc1_icusb_dp", "gpio_145", NULL, "sdmmc2_clk",
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_CMD, 146, "sdmmc5_cmd", "mcspi2_simo",
+			"usbc1_icusb_dm", "gpio_146", NULL, "sdmmc2_cmd",
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT0, 147, "sdmmc5_dat0", "mcspi2_somi",
+			"usbc1_icusb_rcv", "gpio_147", NULL, "sdmmc2_dat0",
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT1, 148, "sdmmc5_dat1", NULL,
+			"usbc1_icusb_txen", "gpio_148", NULL, "sdmmc2_dat1",
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT2, 149, "sdmmc5_dat2", "mcspi2_cs1", NULL,
+			"gpio_149", NULL, "sdmmc2_dat2", NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SDMMC5_DAT3, 150, "sdmmc5_dat3", "mcspi2_cs0", NULL,
+			"gpio_150", NULL, "sdmmc2_dat3", NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_CLK, 151, "mcspi4_clk", "sdmmc4_clk",
+			"kpd_col6", "gpio_151", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_SIMO, 152, "mcspi4_simo", "sdmmc4_cmd",
+			"kpd_col7", "gpio_152", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_SOMI, 153, "mcspi4_somi", "sdmmc4_dat0",
+			"kpd_row6", "gpio_153", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(MCSPI4_CS0, 154, "mcspi4_cs0", "sdmmc4_dat3",
+			"kpd_row7", "gpio_154", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(UART4_RX, 155, "uart4_rx", "sdmmc4_dat2", "kpd_row8",
+			"gpio_155", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(UART4_TX, 156, "uart4_tx", "sdmmc4_dat1", "kpd_col8",
+			"gpio_156", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_CLK, 157, "usbb2_ulpitll_clk",
+			"usbb2_ulpiphy_clk", "sdmmc4_cmd", "gpio_157",
+			"hsi2_cawake", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_STP, 158, "usbb2_ulpitll_stp",
+			"usbb2_ulpiphy_stp", "sdmmc4_clk", "gpio_158",
+			"hsi2_cadata", "dispc2_data23", NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DIR, 159, "usbb2_ulpitll_dir",
+			"usbb2_ulpiphy_dir", "sdmmc4_dat0", "gpio_159",
+			"hsi2_caflag", "dispc2_data22", NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_NXT, 160, "usbb2_ulpitll_nxt",
+			"usbb2_ulpiphy_nxt", "sdmmc4_dat1", "gpio_160",
+			"hsi2_acready", "dispc2_data21", NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT0, 161, "usbb2_ulpitll_dat0",
+			"usbb2_ulpiphy_dat0", "sdmmc4_dat2", "gpio_161",
+			"hsi2_acwake", "dispc2_data20", "usbb2_mm_txen",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT1, 162, "usbb2_ulpitll_dat1",
+			"usbb2_ulpiphy_dat1", "sdmmc4_dat3", "gpio_162",
+			"hsi2_acdata", "dispc2_data19", "usbb2_mm_txdat",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT2, 163, "usbb2_ulpitll_dat2",
+			"usbb2_ulpiphy_dat2", "sdmmc3_dat2", "gpio_163",
+			"hsi2_acflag", "dispc2_data18", "usbb2_mm_txse0",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT3, 164, "usbb2_ulpitll_dat3",
+			"usbb2_ulpiphy_dat3", "sdmmc3_dat1", "gpio_164",
+			"hsi2_caready", "dispc2_data15", "rfbi_data15",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT4, 165, "usbb2_ulpitll_dat4",
+			"usbb2_ulpiphy_dat4", "sdmmc3_dat0", "gpio_165",
+			"mcspi3_somi", "dispc2_data14", "rfbi_data14",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT5, 166, "usbb2_ulpitll_dat5",
+			"usbb2_ulpiphy_dat5", "sdmmc3_dat3", "gpio_166",
+			"mcspi3_cs0", "dispc2_data13", "rfbi_data13",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT6, 167, "usbb2_ulpitll_dat6",
+			"usbb2_ulpiphy_dat6", "sdmmc3_cmd", "gpio_167",
+			"mcspi3_simo", "dispc2_data12", "rfbi_data12",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_ULPITLL_DAT7, 168, "usbb2_ulpitll_dat7",
+			"usbb2_ulpiphy_dat7", "sdmmc3_clk", "gpio_168",
+			"mcspi3_clk", "dispc2_data11", "rfbi_data11",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_HSIC_DATA, 169, "usbb2_hsic_data", NULL, NULL,
+			"gpio_169", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBB2_HSIC_STROBE, 170, "usbb2_hsic_strobe", NULL,
+			NULL, "gpio_170", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_COL3, 171, "kpd_col3", "kpd_col0", NULL,
+			"gpio_171", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_COL4, 172, "kpd_col4", "kpd_col1", NULL,
+			"gpio_172", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_COL5, 173, "kpd_col5", "kpd_col2", NULL,
+			"gpio_173", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_COL0, 174, "kpd_col0", "kpd_col3", NULL,
+			"gpio_174", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_COL1, 0, "kpd_col1", "kpd_col4", NULL, "gpio_0",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_COL2, 1, "kpd_col2", "kpd_col5", NULL, "gpio_1",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_ROW3, 175, "kpd_row3", "kpd_row0", NULL,
+			"gpio_175", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_ROW4, 176, "kpd_row4", "kpd_row1", NULL,
+			"gpio_176", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_ROW5, 177, "kpd_row5", "kpd_row2", NULL,
+			"gpio_177", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_ROW0, 178, "kpd_row0", "kpd_row3", NULL,
+			"gpio_178", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_ROW1, 2, "kpd_row1", "kpd_row4", NULL, "gpio_2",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(KPD_ROW2, 3, "kpd_row2", "kpd_row5", NULL, "gpio_3",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBA0_OTG_CE, 0, "usba0_otg_ce", NULL, NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(USBA0_OTG_DP, 0, "usba0_otg_dp", "uart3_rx_irrx",
+			"uart2_rx", NULL, NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(USBA0_OTG_DM, 0, "usba0_otg_dm", "uart3_tx_irtx",
+			"uart2_tx", NULL, NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK1_OUT, 181, "fref_clk1_out", NULL, NULL,
+			"gpio_181", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK2_OUT, 182, "fref_clk2_out", NULL, NULL,
+			"gpio_182", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_NIRQ1, 0, "sys_nirq1", NULL, NULL, NULL, NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_NIRQ2, 183, "sys_nirq2", NULL, NULL, "gpio_183",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT0, 184, "sys_boot0", NULL, NULL, "gpio_184",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT1, 185, "sys_boot1", NULL, NULL, "gpio_185",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT2, 186, "sys_boot2", NULL, NULL, "gpio_186",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT3, 187, "sys_boot3", NULL, NULL, "gpio_187",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT4, 188, "sys_boot4", NULL, NULL, "gpio_188",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT5, 189, "sys_boot5", NULL, NULL, "gpio_189",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU0, 11, "dpm_emu0", NULL, NULL, "gpio_11", NULL,
+			NULL, "hw_dbg0", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU1, 12, "dpm_emu1", NULL, NULL, "gpio_12", NULL,
+			NULL, "hw_dbg1", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU2, 13, "dpm_emu2", "usba0_ulpiphy_clk", NULL,
+			"gpio_13", NULL, "dispc2_fid", "hw_dbg2",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU3, 14, "dpm_emu3", "usba0_ulpiphy_stp", NULL,
+			"gpio_14", "rfbi_data10", "dispc2_data10", "hw_dbg3",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU4, 15, "dpm_emu4", "usba0_ulpiphy_dir", NULL,
+			"gpio_15", "rfbi_data9", "dispc2_data9", "hw_dbg4",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU5, 16, "dpm_emu5", "usba0_ulpiphy_nxt", NULL,
+			"gpio_16", "rfbi_te_vsync0", "dispc2_data16",
+			"hw_dbg5", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU6, 17, "dpm_emu6", "usba0_ulpiphy_dat0",
+			"uart3_tx_irtx", "gpio_17", "rfbi_hsync0",
+			"dispc2_data17", "hw_dbg6", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU7, 18, "dpm_emu7", "usba0_ulpiphy_dat1",
+			"uart3_rx_irrx", "gpio_18", "rfbi_cs0",
+			"dispc2_hsync", "hw_dbg7", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU8, 19, "dpm_emu8", "usba0_ulpiphy_dat2",
+			"uart3_rts_sd", "gpio_19", "rfbi_re", "dispc2_pclk",
+			"hw_dbg8", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU9, 20, "dpm_emu9", "usba0_ulpiphy_dat3",
+			"uart3_cts_rctx", "gpio_20", "rfbi_we",
+			"dispc2_vsync", "hw_dbg9", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU10, 21, "dpm_emu10", "usba0_ulpiphy_dat4",
+			NULL, "gpio_21", "rfbi_a0", "dispc2_de", "hw_dbg10",
+			"safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU11, 22, "dpm_emu11", "usba0_ulpiphy_dat5",
+			NULL, "gpio_22", "rfbi_data8", "dispc2_data8",
+			"hw_dbg11", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU12, 23, "dpm_emu12", "usba0_ulpiphy_dat6",
+			NULL, "gpio_23", "rfbi_data7", "dispc2_data7",
+			"hw_dbg12", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU13, 24, "dpm_emu13", "usba0_ulpiphy_dat7",
+			NULL, "gpio_24", "rfbi_data6", "dispc2_data6",
+			"hw_dbg13", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU14, 25, "dpm_emu14", "sys_drm_msecure",
+			"uart1_rx", "gpio_25", "rfbi_data5", "dispc2_data5",
+			"hw_dbg14", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU15, 26, "dpm_emu15", "sys_secure_indicator",
+			NULL, "gpio_26", "rfbi_data4", "dispc2_data4",
+			"hw_dbg15", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU16, 27, "dpm_emu16", "dmtimer8_pwm_evt",
+			"dsi1_te0", "gpio_27", "rfbi_data3", "dispc2_data3",
+			"hw_dbg16", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU17, 28, "dpm_emu17", "dmtimer9_pwm_evt",
+			"dsi1_te1", "gpio_28", "rfbi_data2", "dispc2_data2",
+			"hw_dbg17", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU18, 190, "dpm_emu18", "dmtimer10_pwm_evt",
+			"dsi2_te0", "gpio_190", "rfbi_data1", "dispc2_data1",
+			"hw_dbg18", "safe_mode"),
+	_OMAP4_MUXENTRY(DPM_EMU19, 191, "dpm_emu19", "dmtimer11_pwm_evt",
+			"dsi2_te1", "gpio_191", "rfbi_data0", "dispc2_data0",
+			"hw_dbg19", "safe_mode"),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
+/*
+ * Balls for 44XX CBS package
+ * 547-pin CBL ES2.0 S-FPGA-N547, 0.40mm Ball Pitch (Top),
+ *				  0.40mm Ball Pitch (Bottom)
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
+		&& defined(CONFIG_OMAP_PACKAGE_CBS)
+static struct omap_ball __initdata omap4_core_cbs_ball[] = {
+	_OMAP4_BALLENTRY(GPMC_AD0, "c12", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD1, "d12", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD2, "c13", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD3, "d13", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD4, "c15", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD5, "d15", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD6, "a16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD7, "b16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD8, "c16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD9, "d16", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD10, "c17", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD11, "d17", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD12, "c18", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD13, "d18", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD14, "c19", NULL),
+	_OMAP4_BALLENTRY(GPMC_AD15, "d19", NULL),
+	_OMAP4_BALLENTRY(GPMC_A16, "b17", NULL),
+	_OMAP4_BALLENTRY(GPMC_A17, "a18", NULL),
+	_OMAP4_BALLENTRY(GPMC_A18, "b18", NULL),
+	_OMAP4_BALLENTRY(GPMC_A19, "a19", NULL),
+	_OMAP4_BALLENTRY(GPMC_A20, "b19", NULL),
+	_OMAP4_BALLENTRY(GPMC_A21, "b20", NULL),
+	_OMAP4_BALLENTRY(GPMC_A22, "a21", NULL),
+	_OMAP4_BALLENTRY(GPMC_A23, "b21", NULL),
+	_OMAP4_BALLENTRY(GPMC_A24, "c20", NULL),
+	_OMAP4_BALLENTRY(GPMC_A25, "d20", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS0, "b25", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS1, "c21", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS2, "d21", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS3, "c22", NULL),
+	_OMAP4_BALLENTRY(GPMC_NWP, "c25", NULL),
+	_OMAP4_BALLENTRY(GPMC_CLK, "b22", NULL),
+	_OMAP4_BALLENTRY(GPMC_NADV_ALE, "d25", NULL),
+	_OMAP4_BALLENTRY(GPMC_NOE, "b11", NULL),
+	_OMAP4_BALLENTRY(GPMC_NWE, "b12", NULL),
+	_OMAP4_BALLENTRY(GPMC_NBE0_CLE, "c23", NULL),
+	_OMAP4_BALLENTRY(GPMC_NBE1, "d22", NULL),
+	_OMAP4_BALLENTRY(GPMC_WAIT0, "b26", NULL),
+	_OMAP4_BALLENTRY(GPMC_WAIT1, "b23", NULL),
+	_OMAP4_BALLENTRY(GPMC_WAIT2, "d23", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS4, "a24", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS5, "b24", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS6, "c24", NULL),
+	_OMAP4_BALLENTRY(GPMC_NCS7, "d24", NULL),
+	_OMAP4_BALLENTRY(HDMI_HPD, "b9", NULL),
+	_OMAP4_BALLENTRY(HDMI_CEC, "b10", NULL),
+	_OMAP4_BALLENTRY(HDMI_DDC_SCL, "a8", NULL),
+	_OMAP4_BALLENTRY(HDMI_DDC_SDA, "b8", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX0, "r26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY0, "r25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX1, "t26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY1, "t25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX2, "u26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY2, "u25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX3, "v26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY3, "v25", NULL),
+	_OMAP4_BALLENTRY(CSI21_DX4, "w26", NULL),
+	_OMAP4_BALLENTRY(CSI21_DY4, "w25", NULL),
+	_OMAP4_BALLENTRY(CSI22_DX0, "m26", NULL),
+	_OMAP4_BALLENTRY(CSI22_DY0, "m25", NULL),
+	_OMAP4_BALLENTRY(CSI22_DX1, "n26", NULL),
+	_OMAP4_BALLENTRY(CSI22_DY1, "n25", NULL),
+	_OMAP4_BALLENTRY(CAM_SHUTTER, "t27", NULL),
+	_OMAP4_BALLENTRY(CAM_STROBE, "u27", NULL),
+	_OMAP4_BALLENTRY(CAM_GLOBALRESET, "v27", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_CLK, "ae18", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_STP, "ag19", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DIR, "af19", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_NXT, "ae19", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT0, "af18", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT1, "ag18", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT2, "ae17", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT3, "af17", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT4, "ah17", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT5, "ae16", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT6, "af16", NULL),
+	_OMAP4_BALLENTRY(USBB1_ULPITLL_DAT7, "ag16", NULL),
+	_OMAP4_BALLENTRY(USBB1_HSIC_DATA, "af14", NULL),
+	_OMAP4_BALLENTRY(USBB1_HSIC_STROBE, "ae14", NULL),
+	_OMAP4_BALLENTRY(USBC1_ICUSB_DP, "h2", NULL),
+	_OMAP4_BALLENTRY(USBC1_ICUSB_DM, "h3", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_CLK, "d2", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_CMD, "e3", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT0, "e4", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT1, "e2", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT2, "e1", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT3, "f4", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT4, "f3", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT5, "f1", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT6, "g4", NULL),
+	_OMAP4_BALLENTRY(SDMMC1_DAT7, "g3", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_CLKX, "ad27", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_DR, "ad26", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_DX, "ad25", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP2_FSX, "ac28", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_CLKX, "ac26", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_DR, "ac25", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_DX, "ab25", NULL),
+	_OMAP4_BALLENTRY(ABE_MCBSP1_FSX, "ac27", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_UL_DATA, "ag25", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_DL_DATA, "af25", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_FRAME, "ae25", NULL),
+	_OMAP4_BALLENTRY(ABE_PDM_LB_CLK, "af26", NULL),
+	_OMAP4_BALLENTRY(ABE_CLKS, "ah26", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_CLK1, "ae24", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_DIN1, "af24", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_DIN2, "ag24", NULL),
+	_OMAP4_BALLENTRY(ABE_DMIC_DIN3, "ah24", NULL),
+	_OMAP4_BALLENTRY(UART2_CTS, "ab26", NULL),
+	_OMAP4_BALLENTRY(UART2_RTS, "ab27", NULL),
+	_OMAP4_BALLENTRY(UART2_RX, "aa25", NULL),
+	_OMAP4_BALLENTRY(UART2_TX, "aa26", NULL),
+	_OMAP4_BALLENTRY(HDQ_SIO, "aa27", NULL),
+	_OMAP4_BALLENTRY(I2C1_SCL, "ae28", NULL),
+	_OMAP4_BALLENTRY(I2C1_SDA, "ae26", NULL),
+	_OMAP4_BALLENTRY(I2C2_SCL, "c26", NULL),
+	_OMAP4_BALLENTRY(I2C2_SDA, "d26", NULL),
+	_OMAP4_BALLENTRY(I2C3_SCL, "w27", NULL),
+	_OMAP4_BALLENTRY(I2C3_SDA, "y27", NULL),
+	_OMAP4_BALLENTRY(I2C4_SCL, "ag21", NULL),
+	_OMAP4_BALLENTRY(I2C4_SDA, "ah22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CLK, "af22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_SOMI, "ae22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_SIMO, "ag22", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS0, "ae23", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS1, "af23", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS2, "ag23", NULL),
+	_OMAP4_BALLENTRY(MCSPI1_CS3, "ah23", NULL),
+	_OMAP4_BALLENTRY(UART3_CTS_RCTX, "f27", NULL),
+	_OMAP4_BALLENTRY(UART3_RTS_SD, "f28", NULL),
+	_OMAP4_BALLENTRY(UART3_RX_IRRX, "g27", NULL),
+	_OMAP4_BALLENTRY(UART3_TX_IRTX, "g28", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_CLK, "ae5", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_CMD, "af5", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT0, "ae4", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT1, "af4", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT2, "ag3", NULL),
+	_OMAP4_BALLENTRY(SDMMC5_DAT3, "af3", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_CLK, "ae21", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_SIMO, "af20", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_SOMI, "af21", NULL),
+	_OMAP4_BALLENTRY(MCSPI4_CS0, "ae20", NULL),
+	_OMAP4_BALLENTRY(UART4_RX, "ag20", NULL),
+	_OMAP4_BALLENTRY(UART4_TX, "ah19", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_CLK, "ag12", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_STP, "af12", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DIR, "ae12", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_NXT, "ag13", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT0, "ae11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT1, "af11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT2, "ag11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT3, "ah11", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT4, "ae10", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT5, "af10", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT6, "ag10", NULL),
+	_OMAP4_BALLENTRY(USBB2_ULPITLL_DAT7, "ae9", NULL),
+	_OMAP4_BALLENTRY(USBB2_HSIC_DATA, "af13", NULL),
+	_OMAP4_BALLENTRY(USBB2_HSIC_STROBE, "ae13", NULL),
+	_OMAP4_BALLENTRY(KPD_COL3, "g26", NULL),
+	_OMAP4_BALLENTRY(KPD_COL4, "g25", NULL),
+	_OMAP4_BALLENTRY(KPD_COL5, "h26", NULL),
+	_OMAP4_BALLENTRY(KPD_COL0, "h25", NULL),
+	_OMAP4_BALLENTRY(KPD_COL1, "j27", NULL),
+	_OMAP4_BALLENTRY(KPD_COL2, "h27", NULL),
+	_OMAP4_BALLENTRY(KPD_ROW3, "j26", NULL),
+	_OMAP4_BALLENTRY(KPD_ROW4, "j25", NULL),
+	_OMAP4_BALLENTRY(KPD_ROW5, "k26", NULL),
+	_OMAP4_BALLENTRY(KPD_ROW0, "k25", NULL),
+	_OMAP4_BALLENTRY(KPD_ROW1, "l27", NULL),
+	_OMAP4_BALLENTRY(KPD_ROW2, "k27", NULL),
+	_OMAP4_BALLENTRY(USBA0_OTG_CE, "c3", NULL),
+	_OMAP4_BALLENTRY(USBA0_OTG_DP, "b5", NULL),
+	_OMAP4_BALLENTRY(USBA0_OTG_DM, "b4", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK1_OUT, "aa28", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK2_OUT, "y28", NULL),
+	_OMAP4_BALLENTRY(SYS_NIRQ1, "ae6", NULL),
+	_OMAP4_BALLENTRY(SYS_NIRQ2, "af6", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT0, "f26", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT1, "e27", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT2, "e26", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT3, "e25", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT4, "d28", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT5, "d27", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU0, "m2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU1, "n2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU2, "p2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU3, "v1", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU4, "v2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU5, "w1", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU6, "w2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU7, "w3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU8, "w4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU9, "y2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU10, "y3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU11, "y4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU12, "aa1", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU13, "aa2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU14, "aa3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU15, "aa4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU16, "ab2", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU17, "ab3", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU18, "ab4", NULL),
+	_OMAP4_BALLENTRY(DPM_EMU19, "ac4", NULL),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap4_core_cbs_ball  NULL
+#endif
+
+/*
+ * Superset of all mux modes for omap4
+ */
+static struct omap_mux __initdata omap4_wkup_muxmodes[] = {
+	_OMAP4_MUXENTRY(SIM_IO, 0, "sim_io", NULL, NULL, "gpio_wk0", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SIM_CLK, 1, "sim_clk", NULL, NULL, "gpio_wk1", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SIM_RESET, 2, "sim_reset", NULL, NULL, "gpio_wk2",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SIM_CD, 3, "sim_cd", NULL, NULL, "gpio_wk3", NULL,
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SIM_PWRCTRL, 4, "sim_pwrctrl", NULL, NULL, "gpio_wk4",
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(SR_SCL, 0, "sr_scl", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(SR_SDA, 0, "sr_sda", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(FREF_XTAL_IN, 0, "fref_xtal_in", NULL, NULL, NULL,
+			"c2c_wakereqin", NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(FREF_SLICER_IN, 0, "fref_slicer_in", NULL, NULL,
+			"gpi_wk5", "c2c_wakereqin", NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK_IOREQ, 0, "fref_clk_ioreq", NULL, NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(FREF_CLK0_OUT, 6, "fref_clk0_out", "fref_clk1_req",
+			"sys_drm_msecure", "gpio_wk6", NULL, NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK3_REQ, 30, "fref_clk3_req", "fref_clk1_req",
+			"sys_drm_msecure", "gpio_wk30", "c2c_wakereqin", NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK3_OUT, 31, "fref_clk3_out", "fref_clk2_req",
+			"sys_secure_indicator", "gpio_wk31", "c2c_wakereqout",
+			NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(FREF_CLK4_REQ, 7, "fref_clk4_req", "fref_clk5_out",
+			NULL, "gpio_wk7", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(FREF_CLK4_OUT, 8, "fref_clk4_out", NULL, NULL,
+			"gpio_wk8", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(SYS_32K, 0, "sys_32k", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(SYS_NRESPWRON, 0, "sys_nrespwron", NULL, NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(SYS_NRESWARM, 0, "sys_nreswarm", NULL, NULL, NULL,
+			NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(SYS_PWR_REQ, 0, "sys_pwr_req", NULL, NULL, NULL, NULL,
+			NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(SYS_PWRON_RESET_OUT, 29, "sys_pwron_reset_out", NULL,
+			NULL, "gpio_wk29", NULL, NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(SYS_BOOT6, 9, "sys_boot6", "dpm_emu18", NULL,
+			"gpio_wk9", "c2c_wakereqout", NULL, NULL,
+			"safe_mode"),
+	_OMAP4_MUXENTRY(SYS_BOOT7, 10, "sys_boot7", "dpm_emu19", NULL,
+			"gpio_wk10", NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(JTAG_NTRST, 0, "jtag_ntrst", NULL, NULL, NULL, NULL,
+			NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(JTAG_TCK, 0, "jtag_tck", NULL, NULL, NULL, NULL, NULL,
+			NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(JTAG_RTCK, 0, "jtag_rtck", NULL, NULL, NULL, NULL,
+			NULL, NULL, NULL),
+	_OMAP4_MUXENTRY(JTAG_TMS_TMSC, 0, "jtag_tms_tmsc", NULL, NULL, NULL,
+			NULL, NULL, NULL, "safe_mode"),
+	_OMAP4_MUXENTRY(JTAG_TDI, 0, "jtag_tdi", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	_OMAP4_MUXENTRY(JTAG_TDO, 0, "jtag_tdo", NULL, NULL, NULL, NULL, NULL,
+			NULL, NULL),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
+/*
+ * Balls for 44XX CBL & CBS package - wakeup partition
+ * 547-pin CBL ES1.0 S-FPGA-N547, 0.40mm Ball Pitch (Top),
+ *				  0.40mm Ball Pitch (Bottom)
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
+		&& defined(CONFIG_OMAP_PACKAGE_CBL)
+static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
+	_OMAP4_BALLENTRY(SIM_IO, "h4", NULL),
+	_OMAP4_BALLENTRY(SIM_CLK, "j2", NULL),
+	_OMAP4_BALLENTRY(SIM_RESET, "g2", NULL),
+	_OMAP4_BALLENTRY(SIM_CD, "j1", NULL),
+	_OMAP4_BALLENTRY(SIM_PWRCTRL, "k1", NULL),
+	_OMAP4_BALLENTRY(SR_SCL, "ag9", NULL),
+	_OMAP4_BALLENTRY(SR_SDA, "af9", NULL),
+	_OMAP4_BALLENTRY(FREF_XTAL_IN, "ah6", NULL),
+	_OMAP4_BALLENTRY(FREF_SLICER_IN, "ag8", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK_IOREQ, "ad1", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK0_OUT, "ad2", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK3_REQ, "ad3", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK3_OUT, "ad4", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK4_REQ, "ac2", NULL),
+	_OMAP4_BALLENTRY(FREF_CLK4_OUT, "ac3", NULL),
+	_OMAP4_BALLENTRY(SYS_32K, "ag7", NULL),
+	_OMAP4_BALLENTRY(SYS_NRESPWRON, "ae7", NULL),
+	_OMAP4_BALLENTRY(SYS_NRESWARM, "af7", NULL),
+	_OMAP4_BALLENTRY(SYS_PWR_REQ, "ah7", NULL),
+	_OMAP4_BALLENTRY(SYS_PWRON_RESET_OUT, "ag6", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT6, "af8", NULL),
+	_OMAP4_BALLENTRY(SYS_BOOT7, "ae8", NULL),
+	_OMAP4_BALLENTRY(JTAG_NTRST, "ah2", NULL),
+	_OMAP4_BALLENTRY(JTAG_TCK, "ag1", NULL),
+	_OMAP4_BALLENTRY(JTAG_RTCK, "ae3", NULL),
+	_OMAP4_BALLENTRY(JTAG_TMS_TMSC, "ah1", NULL),
+	_OMAP4_BALLENTRY(JTAG_TDI, "ae1", NULL),
+	_OMAP4_BALLENTRY(JTAG_TDO, "ae2", NULL),
+	{ .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap4_wkup_cbl_cbs_ball  NULL
+#endif
+
+int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
+{
+	struct omap_ball *package_balls_core;
+	struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball;
+	struct omap_mux *core_muxmodes;
+	int ret;
+
+	switch (flags & OMAP_PACKAGE_MASK) {
+	case OMAP_PACKAGE_CBL:
+		pr_debug("%s: OMAP4430 ES1.0 -> OMAP_PACKAGE_CBL\n", __func__);
+		package_balls_core = omap4_core_cbl_ball;
+		core_muxmodes = omap4_core_muxmodes;
+		break;
+	case OMAP_PACKAGE_CBS:
+		pr_debug("%s: OMAP4430 ES2.X -> OMAP_PACKAGE_CBS\n", __func__);
+		package_balls_core = omap4_core_cbs_ball;
+		core_muxmodes = omap4_es2_core_muxmodes;
+		break;
+	default:
+		pr_err("%s: Unknown omap package, mux disabled\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = omap_mux_init("core",
+			    OMAP_MUX_GPIO_IN_MODE3,
+			    OMAP4_CTRL_MODULE_PAD_CORE_MUX_PBASE,
+			    OMAP4_CTRL_MODULE_PAD_CORE_MUX_SIZE,
+			    core_muxmodes, NULL, board_subset,
+			    package_balls_core);
+	if (ret)
+		return ret;
+
+	ret = omap_mux_init("wkup",
+			    OMAP_MUX_GPIO_IN_MODE3,
+			    OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE,
+			    OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE,
+			    omap4_wkup_muxmodes, NULL, board_subset,
+			    package_balls_wkup);
+
+	return ret;
+}
+
diff --git a/arch/arm/mach-omap2/mux44xx.h b/arch/arm/mach-omap2/mux44xx.h
new file mode 100644
index 0000000..c635026
--- /dev/null
+++ b/arch/arm/mach-omap2/mux44xx.h
@@ -0,0 +1,298 @@
+/*
+ * OMAP44xx MUX registers and bitfields
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_MUX_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_MUX_44XX_H
+
+#define OMAP4_MUX(M0, mux_value)					\
+{									\
+	.reg_offset	= (OMAP4_CTRL_MODULE_PAD_##M0##_OFFSET),	\
+	.value		= (mux_value),					\
+}
+
+/* ctrl_module_pad_core base address */
+#define OMAP4_CTRL_MODULE_PAD_CORE_MUX_PBASE			0x4a100000
+
+/* ctrl_module_pad_core registers offset */
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD0_OFFSET			0x0040
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD1_OFFSET			0x0042
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD2_OFFSET			0x0044
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD3_OFFSET			0x0046
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD4_OFFSET			0x0048
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD5_OFFSET			0x004a
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD6_OFFSET			0x004c
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD7_OFFSET			0x004e
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD8_OFFSET			0x0050
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD9_OFFSET			0x0052
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD10_OFFSET			0x0054
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD11_OFFSET			0x0056
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD12_OFFSET			0x0058
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD13_OFFSET			0x005a
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD14_OFFSET			0x005c
+#define OMAP4_CTRL_MODULE_PAD_GPMC_AD15_OFFSET			0x005e
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A16_OFFSET			0x0060
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A17_OFFSET			0x0062
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A18_OFFSET			0x0064
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A19_OFFSET			0x0066
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A20_OFFSET			0x0068
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A21_OFFSET			0x006a
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A22_OFFSET			0x006c
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A23_OFFSET			0x006e
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A24_OFFSET			0x0070
+#define OMAP4_CTRL_MODULE_PAD_GPMC_A25_OFFSET			0x0072
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS0_OFFSET			0x0074
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS1_OFFSET			0x0076
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS2_OFFSET			0x0078
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS3_OFFSET			0x007a
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NWP_OFFSET			0x007c
+#define OMAP4_CTRL_MODULE_PAD_GPMC_CLK_OFFSET			0x007e
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NADV_ALE_OFFSET		0x0080
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NOE_OFFSET			0x0082
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NWE_OFFSET			0x0084
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NBE0_CLE_OFFSET		0x0086
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NBE1_OFFSET			0x0088
+#define OMAP4_CTRL_MODULE_PAD_GPMC_WAIT0_OFFSET			0x008a
+#define OMAP4_CTRL_MODULE_PAD_GPMC_WAIT1_OFFSET			0x008c
+#define OMAP4_CTRL_MODULE_PAD_C2C_DATA11_OFFSET			0x008e
+#define OMAP4_CTRL_MODULE_PAD_C2C_DATA12_OFFSET			0x0090
+#define OMAP4_CTRL_MODULE_PAD_C2C_DATA13_OFFSET			0x0092
+#define OMAP4_CTRL_MODULE_PAD_C2C_DATA14_OFFSET			0x0094
+#define OMAP4_CTRL_MODULE_PAD_C2C_DATA15_OFFSET			0x0096
+#define OMAP4_CTRL_MODULE_PAD_HDMI_HPD_OFFSET			0x0098
+#define OMAP4_CTRL_MODULE_PAD_HDMI_CEC_OFFSET			0x009a
+#define OMAP4_CTRL_MODULE_PAD_HDMI_DDC_SCL_OFFSET		0x009c
+#define OMAP4_CTRL_MODULE_PAD_HDMI_DDC_SDA_OFFSET		0x009e
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DX0_OFFSET			0x00a0
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DY0_OFFSET			0x00a2
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DX1_OFFSET			0x00a4
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DY1_OFFSET			0x00a6
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DX2_OFFSET			0x00a8
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DY2_OFFSET			0x00aa
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DX3_OFFSET			0x00ac
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DY3_OFFSET			0x00ae
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DX4_OFFSET			0x00b0
+#define OMAP4_CTRL_MODULE_PAD_CSI21_DY4_OFFSET			0x00b2
+#define OMAP4_CTRL_MODULE_PAD_CSI22_DX0_OFFSET			0x00b4
+#define OMAP4_CTRL_MODULE_PAD_CSI22_DY0_OFFSET			0x00b6
+#define OMAP4_CTRL_MODULE_PAD_CSI22_DX1_OFFSET			0x00b8
+#define OMAP4_CTRL_MODULE_PAD_CSI22_DY1_OFFSET			0x00ba
+#define OMAP4_CTRL_MODULE_PAD_CAM_SHUTTER_OFFSET		0x00bc
+#define OMAP4_CTRL_MODULE_PAD_CAM_STROBE_OFFSET			0x00be
+#define OMAP4_CTRL_MODULE_PAD_CAM_GLOBALRESET_OFFSET		0x00c0
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_CLK_OFFSET		0x00c2
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_STP_OFFSET		0x00c4
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DIR_OFFSET		0x00c6
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_NXT_OFFSET		0x00c8
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT0_OFFSET		0x00ca
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT1_OFFSET		0x00cc
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT2_OFFSET		0x00ce
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT3_OFFSET		0x00d0
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT4_OFFSET		0x00d2
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT5_OFFSET		0x00d4
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT6_OFFSET		0x00d6
+#define OMAP4_CTRL_MODULE_PAD_USBB1_ULPITLL_DAT7_OFFSET		0x00d8
+#define OMAP4_CTRL_MODULE_PAD_USBB1_HSIC_DATA_OFFSET		0x00da
+#define OMAP4_CTRL_MODULE_PAD_USBB1_HSIC_STROBE_OFFSET		0x00dc
+#define OMAP4_CTRL_MODULE_PAD_USBC1_ICUSB_DP_OFFSET		0x00de
+#define OMAP4_CTRL_MODULE_PAD_USBC1_ICUSB_DM_OFFSET		0x00e0
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_CLK_OFFSET			0x00e2
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_CMD_OFFSET			0x00e4
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT0_OFFSET		0x00e6
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT1_OFFSET		0x00e8
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT2_OFFSET		0x00ea
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT3_OFFSET		0x00ec
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT4_OFFSET		0x00ee
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT5_OFFSET		0x00f0
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT6_OFFSET		0x00f2
+#define OMAP4_CTRL_MODULE_PAD_SDMMC1_DAT7_OFFSET		0x00f4
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP2_CLKX_OFFSET		0x00f6
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP2_DR_OFFSET		0x00f8
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP2_DX_OFFSET		0x00fa
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP2_FSX_OFFSET		0x00fc
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP1_CLKX_OFFSET		0x00fe
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP1_DR_OFFSET		0x0100
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP1_DX_OFFSET		0x0102
+#define OMAP4_CTRL_MODULE_PAD_ABE_MCBSP1_FSX_OFFSET		0x0104
+#define OMAP4_CTRL_MODULE_PAD_ABE_PDM_UL_DATA_OFFSET		0x0106
+#define OMAP4_CTRL_MODULE_PAD_ABE_PDM_DL_DATA_OFFSET		0x0108
+#define OMAP4_CTRL_MODULE_PAD_ABE_PDM_FRAME_OFFSET		0x010a
+#define OMAP4_CTRL_MODULE_PAD_ABE_PDM_LB_CLK_OFFSET		0x010c
+#define OMAP4_CTRL_MODULE_PAD_ABE_CLKS_OFFSET			0x010e
+#define OMAP4_CTRL_MODULE_PAD_ABE_DMIC_CLK1_OFFSET		0x0110
+#define OMAP4_CTRL_MODULE_PAD_ABE_DMIC_DIN1_OFFSET		0x0112
+#define OMAP4_CTRL_MODULE_PAD_ABE_DMIC_DIN2_OFFSET		0x0114
+#define OMAP4_CTRL_MODULE_PAD_ABE_DMIC_DIN3_OFFSET		0x0116
+#define OMAP4_CTRL_MODULE_PAD_UART2_CTS_OFFSET			0x0118
+#define OMAP4_CTRL_MODULE_PAD_UART2_RTS_OFFSET			0x011a
+#define OMAP4_CTRL_MODULE_PAD_UART2_RX_OFFSET			0x011c
+#define OMAP4_CTRL_MODULE_PAD_UART2_TX_OFFSET			0x011e
+#define OMAP4_CTRL_MODULE_PAD_HDQ_SIO_OFFSET			0x0120
+#define OMAP4_CTRL_MODULE_PAD_I2C1_SCL_OFFSET			0x0122
+#define OMAP4_CTRL_MODULE_PAD_I2C1_SDA_OFFSET			0x0124
+#define OMAP4_CTRL_MODULE_PAD_I2C2_SCL_OFFSET			0x0126
+#define OMAP4_CTRL_MODULE_PAD_I2C2_SDA_OFFSET			0x0128
+#define OMAP4_CTRL_MODULE_PAD_I2C3_SCL_OFFSET			0x012a
+#define OMAP4_CTRL_MODULE_PAD_I2C3_SDA_OFFSET			0x012c
+#define OMAP4_CTRL_MODULE_PAD_I2C4_SCL_OFFSET			0x012e
+#define OMAP4_CTRL_MODULE_PAD_I2C4_SDA_OFFSET			0x0130
+#define OMAP4_CTRL_MODULE_PAD_MCSPI1_CLK_OFFSET			0x0132
+#define OMAP4_CTRL_MODULE_PAD_MCSPI1_SOMI_OFFSET		0x0134
+#define OMAP4_CTRL_MODULE_PAD_MCSPI1_SIMO_OFFSET		0x0136
+#define OMAP4_CTRL_MODULE_PAD_MCSPI1_CS0_OFFSET			0x0138
+#define OMAP4_CTRL_MODULE_PAD_MCSPI1_CS1_OFFSET			0x013a
+#define OMAP4_CTRL_MODULE_PAD_MCSPI1_CS2_OFFSET			0x013c
+#define OMAP4_CTRL_MODULE_PAD_MCSPI1_CS3_OFFSET			0x013e
+#define OMAP4_CTRL_MODULE_PAD_UART3_CTS_RCTX_OFFSET		0x0140
+#define OMAP4_CTRL_MODULE_PAD_UART3_RTS_SD_OFFSET		0x0142
+#define OMAP4_CTRL_MODULE_PAD_UART3_RX_IRRX_OFFSET		0x0144
+#define OMAP4_CTRL_MODULE_PAD_UART3_TX_IRTX_OFFSET		0x0146
+#define OMAP4_CTRL_MODULE_PAD_SDMMC5_CLK_OFFSET			0x0148
+#define OMAP4_CTRL_MODULE_PAD_SDMMC5_CMD_OFFSET			0x014a
+#define OMAP4_CTRL_MODULE_PAD_SDMMC5_DAT0_OFFSET		0x014c
+#define OMAP4_CTRL_MODULE_PAD_SDMMC5_DAT1_OFFSET		0x014e
+#define OMAP4_CTRL_MODULE_PAD_SDMMC5_DAT2_OFFSET		0x0150
+#define OMAP4_CTRL_MODULE_PAD_SDMMC5_DAT3_OFFSET		0x0152
+#define OMAP4_CTRL_MODULE_PAD_MCSPI4_CLK_OFFSET			0x0154
+#define OMAP4_CTRL_MODULE_PAD_MCSPI4_SIMO_OFFSET		0x0156
+#define OMAP4_CTRL_MODULE_PAD_MCSPI4_SOMI_OFFSET		0x0158
+#define OMAP4_CTRL_MODULE_PAD_MCSPI4_CS0_OFFSET			0x015a
+#define OMAP4_CTRL_MODULE_PAD_UART4_RX_OFFSET			0x015c
+#define OMAP4_CTRL_MODULE_PAD_UART4_TX_OFFSET			0x015e
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_CLK_OFFSET		0x0160
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_STP_OFFSET		0x0162
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DIR_OFFSET		0x0164
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_NXT_OFFSET		0x0166
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT0_OFFSET		0x0168
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT1_OFFSET		0x016a
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT2_OFFSET		0x016c
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT3_OFFSET		0x016e
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT4_OFFSET		0x0170
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT5_OFFSET		0x0172
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT6_OFFSET		0x0174
+#define OMAP4_CTRL_MODULE_PAD_USBB2_ULPITLL_DAT7_OFFSET		0x0176
+#define OMAP4_CTRL_MODULE_PAD_USBB2_HSIC_DATA_OFFSET		0x0178
+#define OMAP4_CTRL_MODULE_PAD_USBB2_HSIC_STROBE_OFFSET		0x017a
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_TX0_OFFSET			0x017c
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_TY0_OFFSET			0x017e
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_TX1_OFFSET			0x0180
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_TY1_OFFSET			0x0182
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_TX2_OFFSET			0x0184
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_TY2_OFFSET			0x0186
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_RX0_OFFSET			0x0188
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_RY0_OFFSET			0x018a
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_RX1_OFFSET			0x018c
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_RY1_OFFSET			0x018e
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_RX2_OFFSET			0x0190
+#define OMAP4_CTRL_MODULE_PAD_UNIPRO_RY2_OFFSET			0x0192
+#define OMAP4_CTRL_MODULE_PAD_USBA0_OTG_CE_OFFSET		0x0194
+#define OMAP4_CTRL_MODULE_PAD_USBA0_OTG_DP_OFFSET		0x0196
+#define OMAP4_CTRL_MODULE_PAD_USBA0_OTG_DM_OFFSET		0x0198
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK1_OUT_OFFSET		0x019a
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK2_OUT_OFFSET		0x019c
+#define OMAP4_CTRL_MODULE_PAD_SYS_NIRQ1_OFFSET			0x019e
+#define OMAP4_CTRL_MODULE_PAD_SYS_NIRQ2_OFFSET			0x01a0
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT0_OFFSET			0x01a2
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT1_OFFSET			0x01a4
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT2_OFFSET			0x01a6
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT3_OFFSET			0x01a8
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT4_OFFSET			0x01aa
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT5_OFFSET			0x01ac
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU0_OFFSET			0x01ae
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU1_OFFSET			0x01b0
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU2_OFFSET			0x01b2
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU3_OFFSET			0x01b4
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU4_OFFSET			0x01b6
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU5_OFFSET			0x01b8
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU6_OFFSET			0x01ba
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU7_OFFSET			0x01bc
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU8_OFFSET			0x01be
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU9_OFFSET			0x01c0
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU10_OFFSET			0x01c2
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU11_OFFSET			0x01c4
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU12_OFFSET			0x01c6
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU13_OFFSET			0x01c8
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU14_OFFSET			0x01ca
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU15_OFFSET			0x01cc
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU16_OFFSET			0x01ce
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU17_OFFSET			0x01d0
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU18_OFFSET			0x01d2
+#define OMAP4_CTRL_MODULE_PAD_DPM_EMU19_OFFSET			0x01d4
+
+/* ES2.0 only */
+#define OMAP4_CTRL_MODULE_PAD_GPMC_WAIT2_OFFSET			0x008e
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS4_OFFSET			0x0090
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS5_OFFSET			0x0092
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS6_OFFSET			0x0094
+#define OMAP4_CTRL_MODULE_PAD_GPMC_NCS7_OFFSET			0x0096
+
+#define OMAP4_CTRL_MODULE_PAD_KPD_COL3_OFFSET			0x017c
+#define OMAP4_CTRL_MODULE_PAD_KPD_COL4_OFFSET			0x017e
+#define OMAP4_CTRL_MODULE_PAD_KPD_COL5_OFFSET			0x0180
+#define OMAP4_CTRL_MODULE_PAD_KPD_COL0_OFFSET			0x0182
+#define OMAP4_CTRL_MODULE_PAD_KPD_COL1_OFFSET			0x0184
+#define OMAP4_CTRL_MODULE_PAD_KPD_COL2_OFFSET			0x0186
+#define OMAP4_CTRL_MODULE_PAD_KPD_ROW3_OFFSET			0x0188
+#define OMAP4_CTRL_MODULE_PAD_KPD_ROW4_OFFSET			0x018a
+#define OMAP4_CTRL_MODULE_PAD_KPD_ROW5_OFFSET			0x018c
+#define OMAP4_CTRL_MODULE_PAD_KPD_ROW0_OFFSET			0x018e
+#define OMAP4_CTRL_MODULE_PAD_KPD_ROW1_OFFSET			0x0190
+#define OMAP4_CTRL_MODULE_PAD_KPD_ROW2_OFFSET			0x0192
+
+
+#define OMAP4_CTRL_MODULE_PAD_CORE_MUX_SIZE			\
+		(OMAP4_CTRL_MODULE_PAD_DPM_EMU19_OFFSET		\
+		 - OMAP4_CTRL_MODULE_PAD_GPMC_AD0_OFFSET + 2)
+
+/* ctrl_module_pad_wkup base address */
+#define OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE			0x4a31e000
+
+/* ctrl_module_pad_wkup registers offset */
+#define OMAP4_CTRL_MODULE_PAD_SIM_IO_OFFSET			0x0040
+#define OMAP4_CTRL_MODULE_PAD_SIM_CLK_OFFSET			0x0042
+#define OMAP4_CTRL_MODULE_PAD_SIM_RESET_OFFSET			0x0044
+#define OMAP4_CTRL_MODULE_PAD_SIM_CD_OFFSET			0x0046
+#define OMAP4_CTRL_MODULE_PAD_SIM_PWRCTRL_OFFSET		0x0048
+#define OMAP4_CTRL_MODULE_PAD_SR_SCL_OFFSET			0x004a
+#define OMAP4_CTRL_MODULE_PAD_SR_SDA_OFFSET			0x004c
+#define OMAP4_CTRL_MODULE_PAD_FREF_XTAL_IN_OFFSET		0x004e
+#define OMAP4_CTRL_MODULE_PAD_FREF_SLICER_IN_OFFSET		0x0050
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK_IOREQ_OFFSET		0x0052
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK0_OUT_OFFSET		0x0054
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK3_REQ_OFFSET		0x0056
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK3_OUT_OFFSET		0x0058
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK4_REQ_OFFSET		0x005a
+#define OMAP4_CTRL_MODULE_PAD_FREF_CLK4_OUT_OFFSET		0x005c
+#define OMAP4_CTRL_MODULE_PAD_SYS_32K_OFFSET			0x005e
+#define OMAP4_CTRL_MODULE_PAD_SYS_NRESPWRON_OFFSET		0x0060
+#define OMAP4_CTRL_MODULE_PAD_SYS_NRESWARM_OFFSET		0x0062
+#define OMAP4_CTRL_MODULE_PAD_SYS_PWR_REQ_OFFSET		0x0064
+#define OMAP4_CTRL_MODULE_PAD_SYS_PWRON_RESET_OUT_OFFSET	0x0066
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT6_OFFSET			0x0068
+#define OMAP4_CTRL_MODULE_PAD_SYS_BOOT7_OFFSET			0x006a
+#define OMAP4_CTRL_MODULE_PAD_JTAG_NTRST_OFFSET			0x006c
+#define OMAP4_CTRL_MODULE_PAD_JTAG_TCK_OFFSET			0x006e
+#define OMAP4_CTRL_MODULE_PAD_JTAG_RTCK_OFFSET			0x0070
+#define OMAP4_CTRL_MODULE_PAD_JTAG_TMS_TMSC_OFFSET		0x0072
+#define OMAP4_CTRL_MODULE_PAD_JTAG_TDI_OFFSET			0x0074
+#define OMAP4_CTRL_MODULE_PAD_JTAG_TDO_OFFSET			0x0076
+
+#define OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE			\
+		(OMAP4_CTRL_MODULE_PAD_JTAG_TDO_OFFSET		\
+		 - OMAP4_CTRL_MODULE_PAD_SIM_IO_OFFSET + 2)
+
+#endif
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index f5a1aad..3fc5dc7 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -33,9 +33,11 @@
 			.name = "isp",
 			.nr_tlb_entries = 8,
 			.clk_name = "cam_ick",
+			.da_start = 0x0,
+			.da_end = 0xFFFFF000,
 		},
 	},
-#if defined(CONFIG_MPU_BRIDGE_IOMMU)
+#if defined(CONFIG_OMAP_IOMMU_IVA2)
 	{
 		.base = 0x5d000000,
 		.irq = 28,
@@ -43,6 +45,8 @@
 			.name = "iva2",
 			.nr_tlb_entries = 32,
 			.clk_name = "iva2_ck",
+			.da_start = 0x11000000,
+			.da_end = 0xFFFFF000,
 		},
 	},
 #endif
@@ -64,6 +68,8 @@
 			.name = "ducati",
 			.nr_tlb_entries = 32,
 			.clk_name = "ducati_ick",
+			.da_start = 0x0,
+			.da_end = 0xFFFFF000,
 		},
 	},
 #if defined(CONFIG_MPU_TESLA_IOMMU)
@@ -74,6 +80,8 @@
 			.name = "tesla",
 			.nr_tlb_entries = 32,
 			.clk_name = "tesla_ick",
+			.da_start = 0x0,
+			.da_end = 0xFFFFF000,
 		},
 	},
 #endif
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 666e852..1926864 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -54,6 +54,8 @@
 
 static int __init omap_l2_cache_init(void)
 {
+	u32 aux_ctrl = 0;
+
 	/*
 	 * To avoid code running on other OMAPs in
 	 * multi-omap builds
@@ -65,18 +67,32 @@
 	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
 	BUG_ON(!l2cache_base);
 
-	/* Enable PL310 L2 Cache controller */
-	omap_smc1(0x102, 0x1);
-
 	/*
 	 * 16-way associativity, parity disabled
 	 * Way size - 32KB (es1.0)
 	 * Way size - 64KB (es2.0 +)
 	 */
-	if (omap_rev() == OMAP4430_REV_ES1_0)
-		l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff);
-	else
-		l2x0_init(l2cache_base, 0x0e070000, 0xc0000fff);
+	aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
+			(0x1 << 25) |
+			(0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
+			(0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
+
+	if (omap_rev() == OMAP4430_REV_ES1_0) {
+		aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
+	} else {
+		aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
+			(1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
+			(1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
+			(1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
+			(1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
+	}
+	if (omap_rev() != OMAP4430_REV_ES1_0)
+		omap_smc1(0x109, aux_ctrl);
+
+	/* Enable PL310 L2 Cache controller */
+	omap_smc1(0x102, 0x1);
+
+	l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
 
 	/*
 	 * Override default outer_cache.disable with a OMAP4
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 5a30658..e282e35 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -116,7 +116,6 @@
  * - Open Core Protocol Specification 2.2
  *
  * To do:
- * - pin mux handling
  * - handle IO mapping
  * - bus throughput & module latency measurement code
  *
@@ -135,17 +134,21 @@
 #include <linux/err.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
 
 #include <plat/common.h>
 #include <plat/cpu.h>
-#include <plat/clockdomain.h>
-#include <plat/powerdomain.h>
+#include "clockdomain.h"
+#include "powerdomain.h"
 #include <plat/clock.h>
 #include <plat/omap_hwmod.h>
 #include <plat/prcm.h>
 
-#include "cm.h"
-#include "prm.h"
+#include "cm2xxx_3xxx.h"
+#include "cm44xx.h"
+#include "prm2xxx_3xxx.h"
+#include "prm44xx.h"
+#include "mux.h"
 
 /* Maximum microseconds to wait for OMAP module to softreset */
 #define MAX_MODULE_SOFTRESET_WAIT	10000
@@ -156,8 +159,6 @@
 /* omap_hwmod_list contains all registered struct omap_hwmods */
 static LIST_HEAD(omap_hwmod_list);
 
-static DEFINE_MUTEX(omap_hwmod_mutex);
-
 /* mpu_oh: used to add/remove MPU initiator from sleepdep list */
 static struct omap_hwmod *mpu_oh;
 
@@ -209,10 +210,9 @@
 
 	/* XXX ensure module interface clock is up */
 
-	if (oh->_sysc_cache != v) {
-		oh->_sysc_cache = v;
-		omap_hwmod_write(v, oh, oh->class->sysc->sysc_offs);
-	}
+	/* Module might have lost context, always update cache and register */
+	oh->_sysc_cache = v;
+	omap_hwmod_write(v, oh, oh->class->sysc->sysc_offs);
 }
 
 /**
@@ -388,12 +388,13 @@
  * Allow the hardware module @oh to send wakeups.  Returns -EINVAL
  * upon error or 0 upon success.
  */
-static int _enable_wakeup(struct omap_hwmod *oh)
+static int _enable_wakeup(struct omap_hwmod *oh, u32 *v)
 {
-	u32 v, wakeup_mask;
+	u32 wakeup_mask;
 
 	if (!oh->class->sysc ||
-	    !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
+	    !((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
+	      (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
 		return -EINVAL;
 
 	if (!oh->class->sysc->sysc_fields) {
@@ -403,9 +404,10 @@
 
 	wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
 
-	v = oh->_sysc_cache;
-	v |= wakeup_mask;
-	_write_sysconfig(v, oh);
+	*v |= wakeup_mask;
+
+	if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+		_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART_WKUP, v);
 
 	/* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
@@ -421,12 +423,13 @@
  * Prevent the hardware module @oh to send wakeups.  Returns -EINVAL
  * upon error or 0 upon success.
  */
-static int _disable_wakeup(struct omap_hwmod *oh)
+static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
 {
-	u32 v, wakeup_mask;
+	u32 wakeup_mask;
 
 	if (!oh->class->sysc ||
-	    !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
+	    !((oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP) ||
+	      (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)))
 		return -EINVAL;
 
 	if (!oh->class->sysc->sysc_fields) {
@@ -436,9 +439,10 @@
 
 	wakeup_mask = (0x1 << oh->class->sysc->sysc_fields->enwkup_shift);
 
-	v = oh->_sysc_cache;
-	v &= ~wakeup_mask;
-	_write_sysconfig(v, oh);
+	*v &= ~wakeup_mask;
+
+	if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
+		_set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
 
 	/* XXX test pwrdm_get_wken for this hwmod's subsystem */
 
@@ -675,7 +679,7 @@
  * Returns the array index of the OCP slave port that the MPU
  * addresses the device on, or -EINVAL upon error or not found.
  */
-static int _find_mpu_port_index(struct omap_hwmod *oh)
+static int __init _find_mpu_port_index(struct omap_hwmod *oh)
 {
 	int i;
 	int found = 0;
@@ -709,7 +713,7 @@
  * Return the virtual address of the base of the register target of
  * device @oh, or NULL on error.
  */
-static void __iomem *_find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
+static void __iomem * __init _find_mpu_rt_base(struct omap_hwmod *oh, u8 index)
 {
 	struct omap_hwmod_ocp_if *os;
 	struct omap_hwmod_addr_space *mem;
@@ -786,11 +790,11 @@
 	    (sf & SYSC_HAS_CLOCKACTIVITY))
 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
 
-	_write_sysconfig(v, oh);
-
 	/* If slave is in SMARTIDLE, also enable wakeup */
 	if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
-		_enable_wakeup(oh);
+		_enable_wakeup(oh, &v);
+
+	_write_sysconfig(v, oh);
 
 	/*
 	 * Set the autoidle bit only after setting the smartidle bit
@@ -836,6 +840,10 @@
 		_set_master_standbymode(oh, idlemode, &v);
 	}
 
+	/* If slave is in SMARTIDLE, also enable wakeup */
+	if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
+		_enable_wakeup(oh, &v);
+
 	_write_sysconfig(v, oh);
 }
 
@@ -874,7 +882,6 @@
  * @name: find an omap_hwmod by name
  *
  * Return a pointer to an omap_hwmod by name, or NULL if not found.
- * Caller must hold omap_hwmod_mutex.
  */
 static struct omap_hwmod *_lookup(const char *name)
 {
@@ -1089,7 +1096,7 @@
 }
 
 /**
- * _reset - reset an omap_hwmod
+ * _ocp_softreset - reset an omap_hwmod via the OCP_SYSCONFIG bit
  * @oh: struct omap_hwmod *
  *
  * Resets an omap_hwmod @oh via the OCP_SYSCONFIG bit.  hwmod must be
@@ -1098,12 +1105,13 @@
  * the module did not reset in time, or 0 upon success.
  *
  * In OMAP3 a specific SYSSTATUS register is used to get the reset status.
- * Starting in OMAP4, some IPs does not have SYSSTATUS register and instead
+ * Starting in OMAP4, some IPs do not have SYSSTATUS registers and instead
  * use the SYSCONFIG softreset bit to provide the status.
  *
- * Note that some IP like McBSP does have a reset control but no reset status.
+ * Note that some IP like McBSP do have reset control but don't have
+ * reset status.
  */
-static int _reset(struct omap_hwmod *oh)
+static int _ocp_softreset(struct omap_hwmod *oh)
 {
 	u32 v;
 	int c = 0;
@@ -1124,7 +1132,7 @@
 	if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
 		_enable_optional_clocks(oh);
 
-	pr_debug("omap_hwmod: %s: resetting\n", oh->name);
+	pr_debug("omap_hwmod: %s: resetting via OCP SOFTRESET\n", oh->name);
 
 	v = oh->_sysc_cache;
 	ret = _set_softreset(oh, &v);
@@ -1164,17 +1172,41 @@
 }
 
 /**
- * _omap_hwmod_enable - enable an omap_hwmod
+ * _reset - reset an omap_hwmod
+ * @oh: struct omap_hwmod *
+ *
+ * Resets an omap_hwmod @oh.  The default software reset mechanism for
+ * most OMAP IP blocks is triggered via the OCP_SYSCONFIG.SOFTRESET
+ * bit.  However, some hwmods cannot be reset via this method: some
+ * are not targets and therefore have no OCP header registers to
+ * access; others (like the IVA) have idiosyncratic reset sequences.
+ * So for these relatively rare cases, custom reset code can be
+ * supplied in the struct omap_hwmod_class .reset function pointer.
+ * Passes along the return value from either _reset() or the custom
+ * reset function - these must return -EINVAL if the hwmod cannot be
+ * reset this way or if the hwmod is in the wrong state, -ETIMEDOUT if
+ * the module did not reset in time, or 0 upon success.
+ */
+static int _reset(struct omap_hwmod *oh)
+{
+	int ret;
+
+	pr_debug("omap_hwmod: %s: resetting\n", oh->name);
+
+	ret = (oh->class->reset) ? oh->class->reset(oh) : _ocp_softreset(oh);
+
+	return ret;
+}
+
+/**
+ * _enable - enable an omap_hwmod
  * @oh: struct omap_hwmod *
  *
  * Enables an omap_hwmod @oh such that the MPU can access the hwmod's
- * register target.  (This function has a full name --
- * _omap_hwmod_enable() rather than simply _enable() -- because it is
- * currently required by the pm34xx.c idle loop.)  Returns -EINVAL if
- * the hwmod is in the wrong state or passes along the return value of
- * _wait_target_ready().
+ * register target.  Returns -EINVAL if the hwmod is in the wrong
+ * state or passes along the return value of _wait_target_ready().
  */
-int _omap_hwmod_enable(struct omap_hwmod *oh)
+static int _enable(struct omap_hwmod *oh)
 {
 	int r;
 
@@ -1197,7 +1229,9 @@
 	     oh->_state == _HWMOD_STATE_DISABLED) && oh->rst_lines_cnt == 1)
 		_deassert_hardreset(oh, oh->rst_lines[0].name);
 
-	/* XXX mux balls */
+	/* Mux pins for device runtime if populated */
+	if (oh->mux)
+		omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
 
 	_add_initiator_dep(oh, mpu_oh);
 	_enable_clocks(oh);
@@ -1213,6 +1247,7 @@
 			_enable_sysc(oh);
 		}
 	} else {
+		_disable_clocks(oh);
 		pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
 			 oh->name, r);
 	}
@@ -1221,16 +1256,14 @@
 }
 
 /**
- * _omap_hwmod_idle - idle an omap_hwmod
+ * _idle - idle an omap_hwmod
  * @oh: struct omap_hwmod *
  *
  * Idles an omap_hwmod @oh.  This should be called once the hwmod has
- * no further work.  (This function has a full name --
- * _omap_hwmod_idle() rather than simply _idle() -- because it is
- * currently required by the pm34xx.c idle loop.)  Returns -EINVAL if
- * the hwmod is in the wrong state or returns 0.
+ * no further work.  Returns -EINVAL if the hwmod is in the wrong
+ * state or returns 0.
  */
-int _omap_hwmod_idle(struct omap_hwmod *oh)
+static int _idle(struct omap_hwmod *oh)
 {
 	if (oh->_state != _HWMOD_STATE_ENABLED) {
 		WARN(1, "omap_hwmod: %s: idle state can only be entered from "
@@ -1245,6 +1278,10 @@
 	_del_initiator_dep(oh, mpu_oh);
 	_disable_clocks(oh);
 
+	/* Mux pins for device idle if populated */
+	if (oh->mux)
+		omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
+
 	oh->_state = _HWMOD_STATE_IDLE;
 
 	return 0;
@@ -1261,6 +1298,9 @@
  */
 static int _shutdown(struct omap_hwmod *oh)
 {
+	int ret;
+	u8 prev_state;
+
 	if (oh->_state != _HWMOD_STATE_IDLE &&
 	    oh->_state != _HWMOD_STATE_ENABLED) {
 		WARN(1, "omap_hwmod: %s: disabled state can only be entered "
@@ -1270,6 +1310,18 @@
 
 	pr_debug("omap_hwmod: %s: disabling\n", oh->name);
 
+	if (oh->class->pre_shutdown) {
+		prev_state = oh->_state;
+		if (oh->_state == _HWMOD_STATE_IDLE)
+			_enable(oh);
+		ret = oh->class->pre_shutdown(oh);
+		if (ret) {
+			if (prev_state == _HWMOD_STATE_IDLE)
+				_idle(oh);
+			return ret;
+		}
+	}
+
 	if (oh->class->sysc)
 		_shutdown_sysc(oh);
 
@@ -1288,7 +1340,9 @@
 	}
 	/* XXX Should this code also force-disable the optional clocks? */
 
-	/* XXX mux any associated balls to safe mode */
+	/* Mux pins to safe mode or use populated off mode values */
+	if (oh->mux)
+		omap_hwmod_mux(oh->mux, _HWMOD_STATE_DISABLED);
 
 	oh->_state = _HWMOD_STATE_DISABLED;
 
@@ -1298,23 +1352,15 @@
 /**
  * _setup - do initial configuration of omap_hwmod
  * @oh: struct omap_hwmod *
- * @skip_setup_idle_p: do not idle hwmods at the end of the fn if 1
  *
  * Writes the CLOCKACTIVITY bits @clockact to the hwmod @oh
- * OCP_SYSCONFIG register.  @skip_setup_idle is intended to be used on
- * a system that will not call omap_hwmod_enable() to enable devices
- * (e.g., a system without PM runtime).  Returns -EINVAL if the hwmod
- * is in the wrong state or returns 0.
+ * OCP_SYSCONFIG register.  Returns -EINVAL if the hwmod is in the
+ * wrong state or returns 0.
  */
 static int _setup(struct omap_hwmod *oh, void *data)
 {
 	int i, r;
-	u8 skip_setup_idle;
-
-	if (!oh || !data)
-		return -EINVAL;
-
-	skip_setup_idle = *(u8 *)data;
+	u8 postsetup_state;
 
 	/* Set iclk autoidle mode */
 	if (oh->slaves_cnt > 0) {
@@ -1334,7 +1380,6 @@
 		}
 	}
 
-	mutex_init(&oh->_mutex);
 	oh->_state = _HWMOD_STATE_INITIALIZED;
 
 	/*
@@ -1347,7 +1392,7 @@
 	if ((oh->flags & HWMOD_INIT_NO_RESET) && oh->rst_lines_cnt == 1)
 		return 0;
 
-	r = _omap_hwmod_enable(oh);
+	r = _enable(oh);
 	if (r) {
 		pr_warning("omap_hwmod: %s: cannot be enabled (%d)\n",
 			   oh->name, oh->_state);
@@ -1359,7 +1404,7 @@
 
 		/*
 		 * OCP_SYSCONFIG bits need to be reprogrammed after a softreset.
-		 * The _omap_hwmod_enable() function should be split to
+		 * The _enable() function should be split to
 		 * avoid the rewrite of the OCP_SYSCONFIG register.
 		 */
 		if (oh->class->sysc) {
@@ -1368,12 +1413,77 @@
 		}
 	}
 
-	if (!(oh->flags & HWMOD_INIT_NO_IDLE) && !skip_setup_idle)
-		_omap_hwmod_idle(oh);
+	postsetup_state = oh->_postsetup_state;
+	if (postsetup_state == _HWMOD_STATE_UNKNOWN)
+		postsetup_state = _HWMOD_STATE_ENABLED;
+
+	/*
+	 * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
+	 * it should be set by the core code as a runtime flag during startup
+	 */
+	if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
+	    (postsetup_state == _HWMOD_STATE_IDLE))
+		postsetup_state = _HWMOD_STATE_ENABLED;
+
+	if (postsetup_state == _HWMOD_STATE_IDLE)
+		_idle(oh);
+	else if (postsetup_state == _HWMOD_STATE_DISABLED)
+		_shutdown(oh);
+	else if (postsetup_state != _HWMOD_STATE_ENABLED)
+		WARN(1, "hwmod: %s: unknown postsetup state %d! defaulting to enabled\n",
+		     oh->name, postsetup_state);
 
 	return 0;
 }
 
+/**
+ * _register - register a struct omap_hwmod
+ * @oh: struct omap_hwmod *
+ *
+ * Registers the omap_hwmod @oh.  Returns -EEXIST if an omap_hwmod
+ * already has been registered by the same name; -EINVAL if the
+ * omap_hwmod is in the wrong state, if @oh is NULL, if the
+ * omap_hwmod's class field is NULL; if the omap_hwmod is missing a
+ * name, or if the omap_hwmod's class is missing a name; or 0 upon
+ * success.
+ *
+ * XXX The data should be copied into bootmem, so the original data
+ * should be marked __initdata and freed after init.  This would allow
+ * unneeded omap_hwmods to be freed on multi-OMAP configurations.  Note
+ * that the copy process would be relatively complex due to the large number
+ * of substructures.
+ */
+static int __init _register(struct omap_hwmod *oh)
+{
+	int ret, ms_id;
+
+	if (!oh || !oh->name || !oh->class || !oh->class->name ||
+	    (oh->_state != _HWMOD_STATE_UNKNOWN))
+		return -EINVAL;
+
+	pr_debug("omap_hwmod: %s: registering\n", oh->name);
+
+	if (_lookup(oh->name))
+		return -EEXIST;
+
+	ms_id = _find_mpu_port_index(oh);
+	if (!IS_ERR_VALUE(ms_id)) {
+		oh->_mpu_port_index = ms_id;
+		oh->_mpu_rt_va = _find_mpu_rt_base(oh, oh->_mpu_port_index);
+	} else {
+		oh->_int_flags |= _HWMOD_NO_MPU_PORT;
+	}
+
+	list_add_tail(&oh->node, &omap_hwmod_list);
+
+	spin_lock_init(&oh->_lock);
+
+	oh->_state = _HWMOD_STATE_REGISTERED;
+
+	ret = 0;
+
+	return ret;
+}
 
 
 /* Public functions */
@@ -1427,59 +1537,6 @@
 }
 
 /**
- * omap_hwmod_register - register a struct omap_hwmod
- * @oh: struct omap_hwmod *
- *
- * Registers the omap_hwmod @oh.  Returns -EEXIST if an omap_hwmod
- * already has been registered by the same name; -EINVAL if the
- * omap_hwmod is in the wrong state, if @oh is NULL, if the
- * omap_hwmod's class field is NULL; if the omap_hwmod is missing a
- * name, or if the omap_hwmod's class is missing a name; or 0 upon
- * success.
- *
- * XXX The data should be copied into bootmem, so the original data
- * should be marked __initdata and freed after init.  This would allow
- * unneeded omap_hwmods to be freed on multi-OMAP configurations.  Note
- * that the copy process would be relatively complex due to the large number
- * of substructures.
- */
-int omap_hwmod_register(struct omap_hwmod *oh)
-{
-	int ret, ms_id;
-
-	if (!oh || !oh->name || !oh->class || !oh->class->name ||
-	    (oh->_state != _HWMOD_STATE_UNKNOWN))
-		return -EINVAL;
-
-	mutex_lock(&omap_hwmod_mutex);
-
-	pr_debug("omap_hwmod: %s: registering\n", oh->name);
-
-	if (_lookup(oh->name)) {
-		ret = -EEXIST;
-		goto ohr_unlock;
-	}
-
-	ms_id = _find_mpu_port_index(oh);
-	if (!IS_ERR_VALUE(ms_id)) {
-		oh->_mpu_port_index = ms_id;
-		oh->_mpu_rt_va = _find_mpu_rt_base(oh, oh->_mpu_port_index);
-	} else {
-		oh->_int_flags |= _HWMOD_NO_MPU_PORT;
-	}
-
-	list_add_tail(&oh->node, &omap_hwmod_list);
-
-	oh->_state = _HWMOD_STATE_REGISTERED;
-
-	ret = 0;
-
-ohr_unlock:
-	mutex_unlock(&omap_hwmod_mutex);
-	return ret;
-}
-
-/**
  * omap_hwmod_lookup - look up a registered omap_hwmod by name
  * @name: name of the omap_hwmod to look up
  *
@@ -1493,9 +1550,7 @@
 	if (!name)
 		return NULL;
 
-	mutex_lock(&omap_hwmod_mutex);
 	oh = _lookup(name);
-	mutex_unlock(&omap_hwmod_mutex);
 
 	return oh;
 }
@@ -1521,13 +1576,11 @@
 	if (!fn)
 		return -EINVAL;
 
-	mutex_lock(&omap_hwmod_mutex);
 	list_for_each_entry(temp_oh, &omap_hwmod_list, node) {
 		ret = (*fn)(temp_oh, data);
 		if (ret)
 			break;
 	}
-	mutex_unlock(&omap_hwmod_mutex);
 
 	return ret;
 }
@@ -1542,7 +1595,7 @@
  * listed in @ohs that are valid for this chip.  Returns -EINVAL if
  * omap_hwmod_init() has already been called or 0 otherwise.
  */
-int omap_hwmod_init(struct omap_hwmod **ohs)
+int __init omap_hwmod_init(struct omap_hwmod **ohs)
 {
 	struct omap_hwmod *oh;
 	int r;
@@ -1558,8 +1611,8 @@
 	oh = *ohs;
 	while (oh) {
 		if (omap_chip_is(oh->omap_chip)) {
-			r = omap_hwmod_register(oh);
-			WARN(r, "omap_hwmod: %s: omap_hwmod_register returned "
+			r = _register(oh);
+			WARN(r, "omap_hwmod: %s: _register returned "
 			     "%d\n", oh->name, r);
 		}
 		oh = *++ohs;
@@ -1570,13 +1623,12 @@
 
 /**
  * omap_hwmod_late_init - do some post-clock framework initialization
- * @skip_setup_idle: if 1, do not idle hwmods in _setup()
  *
  * Must be called after omap2_clk_init().  Resolves the struct clk names
  * to struct clk pointers for each registered omap_hwmod.  Also calls
  * _setup() on each hwmod.  Returns 0.
  */
-int omap_hwmod_late_init(u8 skip_setup_idle)
+int omap_hwmod_late_init(void)
 {
 	int r;
 
@@ -1588,36 +1640,7 @@
 	WARN(!mpu_oh, "omap_hwmod: could not find MPU initiator hwmod %s\n",
 	     MPU_INITIATOR_NAME);
 
-	if (skip_setup_idle)
-		pr_debug("omap_hwmod: will leave hwmods enabled during setup\n");
-
-	omap_hwmod_for_each(_setup, &skip_setup_idle);
-
-	return 0;
-}
-
-/**
- * omap_hwmod_unregister - unregister an omap_hwmod
- * @oh: struct omap_hwmod *
- *
- * Unregisters a previously-registered omap_hwmod @oh.  There's probably
- * no use case for this, so it is likely to be removed in a later version.
- *
- * XXX Free all of the bootmem-allocated structures here when that is
- * implemented.  Make it clear that core code is the only code that is
- * expected to unregister modules.
- */
-int omap_hwmod_unregister(struct omap_hwmod *oh)
-{
-	if (!oh)
-		return -EINVAL;
-
-	pr_debug("omap_hwmod: %s: unregistering\n", oh->name);
-
-	mutex_lock(&omap_hwmod_mutex);
-	iounmap(oh->_mpu_rt_va);
-	list_del(&oh->node);
-	mutex_unlock(&omap_hwmod_mutex);
+	omap_hwmod_for_each(_setup, NULL);
 
 	return 0;
 }
@@ -1632,18 +1655,18 @@
 int omap_hwmod_enable(struct omap_hwmod *oh)
 {
 	int r;
+	unsigned long flags;
 
 	if (!oh)
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
-	r = _omap_hwmod_enable(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
+	r = _enable(oh);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return r;
 }
 
-
 /**
  * omap_hwmod_idle - idle an omap_hwmod
  * @oh: struct omap_hwmod *
@@ -1653,12 +1676,14 @@
  */
 int omap_hwmod_idle(struct omap_hwmod *oh)
 {
+	unsigned long flags;
+
 	if (!oh)
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
-	_omap_hwmod_idle(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
+	_idle(oh);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
 }
@@ -1673,12 +1698,14 @@
  */
 int omap_hwmod_shutdown(struct omap_hwmod *oh)
 {
+	unsigned long flags;
+
 	if (!oh)
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
 	_shutdown(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
 }
@@ -1691,9 +1718,11 @@
  */
 int omap_hwmod_enable_clocks(struct omap_hwmod *oh)
 {
-	mutex_lock(&oh->_mutex);
+	unsigned long flags;
+
+	spin_lock_irqsave(&oh->_lock, flags);
 	_enable_clocks(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
 }
@@ -1706,9 +1735,11 @@
  */
 int omap_hwmod_disable_clocks(struct omap_hwmod *oh)
 {
-	mutex_lock(&oh->_mutex);
+	unsigned long flags;
+
+	spin_lock_irqsave(&oh->_lock, flags);
 	_disable_clocks(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
 }
@@ -1752,13 +1783,14 @@
 int omap_hwmod_reset(struct omap_hwmod *oh)
 {
 	int r;
+	unsigned long flags;
 
 	if (!oh)
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
 	r = _reset(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return r;
 }
@@ -1955,13 +1987,18 @@
  */
 int omap_hwmod_enable_wakeup(struct omap_hwmod *oh)
 {
+	unsigned long flags;
+	u32 v;
+
 	if (!oh->class->sysc ||
 	    !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
-	_enable_wakeup(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
+	v = oh->_sysc_cache;
+	_enable_wakeup(oh, &v);
+	_write_sysconfig(v, oh);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
 }
@@ -1980,13 +2017,18 @@
  */
 int omap_hwmod_disable_wakeup(struct omap_hwmod *oh)
 {
+	unsigned long flags;
+	u32 v;
+
 	if (!oh->class->sysc ||
 	    !(oh->class->sysc->sysc_flags & SYSC_HAS_ENAWAKEUP))
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
-	_disable_wakeup(oh);
-	mutex_unlock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
+	v = oh->_sysc_cache;
+	_disable_wakeup(oh, &v);
+	_write_sysconfig(v, oh);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
 }
@@ -2006,13 +2048,14 @@
 int omap_hwmod_assert_hardreset(struct omap_hwmod *oh, const char *name)
 {
 	int ret;
+	unsigned long flags;
 
 	if (!oh)
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
 	ret = _assert_hardreset(oh, name);
-	mutex_unlock(&oh->_mutex);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return ret;
 }
@@ -2032,13 +2075,14 @@
 int omap_hwmod_deassert_hardreset(struct omap_hwmod *oh, const char *name)
 {
 	int ret;
+	unsigned long flags;
 
 	if (!oh)
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
 	ret = _deassert_hardreset(oh, name);
-	mutex_unlock(&oh->_mutex);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return ret;
 }
@@ -2057,13 +2101,14 @@
 int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name)
 {
 	int ret;
+	unsigned long flags;
 
 	if (!oh)
 		return -EINVAL;
 
-	mutex_lock(&oh->_mutex);
+	spin_lock_irqsave(&oh->_lock, flags);
 	ret = _read_hardreset(oh, name);
-	mutex_unlock(&oh->_mutex);
+	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return ret;
 }
@@ -2075,9 +2120,8 @@
  * @fn: callback function pointer to call for each hwmod in class @classname
  * @user: arbitrary context data to pass to the callback function
  *
- * For each omap_hwmod of class @classname, call @fn.  Takes
- * omap_hwmod_mutex to prevent the hwmod list from changing during the
- * iteration.  If the callback function returns something other than
+ * For each omap_hwmod of class @classname, call @fn.
+ * If the callback function returns something other than
  * zero, the iterator is terminated, and the callback function's return
  * value is passed back to the caller.  Returns 0 upon success, -EINVAL
  * if @classname or @fn are NULL, or passes back the error code from @fn.
@@ -2096,8 +2140,6 @@
 	pr_debug("omap_hwmod: %s: looking for modules of class %s\n",
 		 __func__, classname);
 
-	mutex_lock(&omap_hwmod_mutex);
-
 	list_for_each_entry(temp_oh, &omap_hwmod_list, node) {
 		if (!strcmp(temp_oh->class->name, classname)) {
 			pr_debug("omap_hwmod: %s: %s: calling callback fn\n",
@@ -2108,8 +2150,6 @@
 		}
 	}
 
-	mutex_unlock(&omap_hwmod_mutex);
-
 	if (ret)
 		pr_debug("omap_hwmod: %s: iterator terminated early: %d\n",
 			 __func__, ret);
@@ -2117,3 +2157,64 @@
 	return ret;
 }
 
+/**
+ * omap_hwmod_set_postsetup_state - set the post-_setup() state for this hwmod
+ * @oh: struct omap_hwmod *
+ * @state: state that _setup() should leave the hwmod in
+ *
+ * Sets the hwmod state that @oh will enter at the end of _setup() (called by
+ * omap_hwmod_late_init()).  Only valid to call between calls to
+ * omap_hwmod_init() and omap_hwmod_late_init().  Returns 0 upon success or
+ * -EINVAL if there is a problem with the arguments or if the hwmod is
+ * in the wrong state.
+ */
+int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state)
+{
+	int ret;
+	unsigned long flags;
+
+	if (!oh)
+		return -EINVAL;
+
+	if (state != _HWMOD_STATE_DISABLED &&
+	    state != _HWMOD_STATE_ENABLED &&
+	    state != _HWMOD_STATE_IDLE)
+		return -EINVAL;
+
+	spin_lock_irqsave(&oh->_lock, flags);
+
+	if (oh->_state != _HWMOD_STATE_REGISTERED) {
+		ret = -EINVAL;
+		goto ohsps_unlock;
+	}
+
+	oh->_postsetup_state = state;
+	ret = 0;
+
+ohsps_unlock:
+	spin_unlock_irqrestore(&oh->_lock, flags);
+
+	return ret;
+}
+
+/**
+ * omap_hwmod_get_context_loss_count - get lost context count
+ * @oh: struct omap_hwmod *
+ *
+ * Query the powerdomain of of @oh to get the context loss
+ * count for this device.
+ *
+ * Returns the context loss count of the powerdomain assocated with @oh
+ * upon success, or zero if no powerdomain exists for @oh.
+ */
+u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh)
+{
+	struct powerdomain *pwrdm;
+	int ret = 0;
+
+	pwrdm = omap_hwmod_get_pwrdm(oh);
+	if (pwrdm)
+		ret = pwrdm_get_context_loss_count(pwrdm);
+
+	return ret;
+}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index adf6e36..b85c630 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -16,11 +16,14 @@
 #include <plat/cpu.h>
 #include <plat/dma.h>
 #include <plat/serial.h>
+#include <plat/i2c.h>
+#include <plat/gpio.h>
 
 #include "omap_hwmod_common_data.h"
 
-#include "prm-regbits-24xx.h"
 #include "cm-regbits-24xx.h"
+#include "prm-regbits-24xx.h"
+#include "wd_timer.h"
 
 /*
  * OMAP2420 hardware module integration data
@@ -36,6 +39,11 @@
 static struct omap_hwmod omap2420_l3_main_hwmod;
 static struct omap_hwmod omap2420_l4_core_hwmod;
 static struct omap_hwmod omap2420_wd_timer2_hwmod;
+static struct omap_hwmod omap2420_gpio1_hwmod;
+static struct omap_hwmod omap2420_gpio2_hwmod;
+static struct omap_hwmod omap2420_gpio3_hwmod;
+static struct omap_hwmod omap2420_gpio4_hwmod;
+static struct omap_hwmod omap2420_dma_system_hwmod;
 
 /* L3 -> L4_CORE interface */
 static struct omap_hwmod_ocp_if omap2420_l3_main__l4_core = {
@@ -77,6 +85,8 @@
 static struct omap_hwmod omap2420_uart1_hwmod;
 static struct omap_hwmod omap2420_uart2_hwmod;
 static struct omap_hwmod omap2420_uart3_hwmod;
+static struct omap_hwmod omap2420_i2c1_hwmod;
+static struct omap_hwmod omap2420_i2c2_hwmod;
 
 /* L4_CORE -> L4_WKUP interface */
 static struct omap_hwmod_ocp_if omap2420_l4_core__l4_wkup = {
@@ -139,6 +149,45 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* I2C IP block address space length (in bytes) */
+#define OMAP2_I2C_AS_LEN		128
+
+/* L4 CORE -> I2C1 interface */
+static struct omap_hwmod_addr_space omap2420_i2c1_addr_space[] = {
+	{
+		.pa_start	= 0x48070000,
+		.pa_end		= 0x48070000 + OMAP2_I2C_AS_LEN - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_core__i2c1 = {
+	.master		= &omap2420_l4_core_hwmod,
+	.slave		= &omap2420_i2c1_hwmod,
+	.clk		= "i2c1_ick",
+	.addr		= omap2420_i2c1_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2420_i2c1_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> I2C2 interface */
+static struct omap_hwmod_addr_space omap2420_i2c2_addr_space[] = {
+	{
+		.pa_start	= 0x48072000,
+		.pa_end		= 0x48072000 + OMAP2_I2C_AS_LEN - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_core__i2c2 = {
+	.master		= &omap2420_l4_core_hwmod,
+	.slave		= &omap2420_i2c2_hwmod,
+	.clk		= "i2c2_ick",
+	.addr		= omap2420_i2c2_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2420_i2c2_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* Slave interfaces on the L4_CORE interconnect */
 static struct omap_hwmod_ocp_if *omap2420_l4_core_slaves[] = {
 	&omap2420_l3_main__l4_core,
@@ -150,6 +199,8 @@
 	&omap2_l4_core__uart1,
 	&omap2_l4_core__uart2,
 	&omap2_l4_core__uart3,
+	&omap2420_l4_core__i2c1,
+	&omap2420_l4_core__i2c2
 };
 
 /* L4 CORE */
@@ -262,8 +313,9 @@
 };
 
 static struct omap_hwmod_class omap2420_wd_timer_hwmod_class = {
-	.name = "wd_timer",
-	.sysc = &omap2420_wd_timer_sysc,
+	.name		= "wd_timer",
+	.sysc		= &omap2420_wd_timer_sysc,
+	.pre_shutdown	= &omap2_wd_timer_disable
 };
 
 /* wd_timer2 */
@@ -418,6 +470,400 @@
 	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
 };
 
+/* I2C common */
+static struct omap_hwmod_class_sysconfig i2c_sysc = {
+	.rev_offs	= 0x00,
+	.sysc_offs	= 0x20,
+	.syss_offs	= 0x10,
+	.sysc_flags	= SYSC_HAS_SOFTRESET,
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class i2c_class = {
+	.name		= "i2c",
+	.sysc		= &i2c_sysc,
+};
+
+static struct omap_i2c_dev_attr i2c_dev_attr;
+
+/* I2C1 */
+
+static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
+	{ .irq = INT_24XX_I2C1_IRQ, },
+};
+
+static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
+	{ .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
+};
+
+static struct omap_hwmod_ocp_if *omap2420_i2c1_slaves[] = {
+	&omap2420_l4_core__i2c1,
+};
+
+static struct omap_hwmod omap2420_i2c1_hwmod = {
+	.name		= "i2c1",
+	.mpu_irqs	= i2c1_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(i2c1_mpu_irqs),
+	.sdma_reqs	= i2c1_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(i2c1_sdma_reqs),
+	.main_clk	= "i2c1_fck",
+	.prcm		= {
+		.omap2 = {
+			.module_offs = CORE_MOD,
+			.prcm_reg_id = 1,
+			.module_bit = OMAP2420_EN_I2C1_SHIFT,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP2420_ST_I2C1_SHIFT,
+		},
+	},
+	.slaves		= omap2420_i2c1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2420_i2c1_slaves),
+	.class		= &i2c_class,
+	.dev_attr	= &i2c_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+	.flags		= HWMOD_16BIT_REG,
+};
+
+/* I2C2 */
+
+static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
+	{ .irq = INT_24XX_I2C2_IRQ, },
+};
+
+static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
+	{ .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
+};
+
+static struct omap_hwmod_ocp_if *omap2420_i2c2_slaves[] = {
+	&omap2420_l4_core__i2c2,
+};
+
+static struct omap_hwmod omap2420_i2c2_hwmod = {
+	.name		= "i2c2",
+	.mpu_irqs	= i2c2_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(i2c2_mpu_irqs),
+	.sdma_reqs	= i2c2_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(i2c2_sdma_reqs),
+	.main_clk	= "i2c2_fck",
+	.prcm		= {
+		.omap2 = {
+			.module_offs = CORE_MOD,
+			.prcm_reg_id = 1,
+			.module_bit = OMAP2420_EN_I2C2_SHIFT,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP2420_ST_I2C2_SHIFT,
+		},
+	},
+	.slaves		= omap2420_i2c2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2420_i2c2_slaves),
+	.class		= &i2c_class,
+	.dev_attr	= &i2c_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+	.flags		= HWMOD_16BIT_REG,
+};
+
+/* l4_wkup -> gpio1 */
+static struct omap_hwmod_addr_space omap2420_gpio1_addr_space[] = {
+	{
+		.pa_start	= 0x48018000,
+		.pa_end		= 0x480181ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio1 = {
+	.master		= &omap2420_l4_wkup_hwmod,
+	.slave		= &omap2420_gpio1_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2420_gpio1_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2420_gpio1_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> gpio2 */
+static struct omap_hwmod_addr_space omap2420_gpio2_addr_space[] = {
+	{
+		.pa_start	= 0x4801a000,
+		.pa_end		= 0x4801a1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio2 = {
+	.master		= &omap2420_l4_wkup_hwmod,
+	.slave		= &omap2420_gpio2_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2420_gpio2_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2420_gpio2_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> gpio3 */
+static struct omap_hwmod_addr_space omap2420_gpio3_addr_space[] = {
+	{
+		.pa_start	= 0x4801c000,
+		.pa_end		= 0x4801c1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio3 = {
+	.master		= &omap2420_l4_wkup_hwmod,
+	.slave		= &omap2420_gpio3_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2420_gpio3_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2420_gpio3_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> gpio4 */
+static struct omap_hwmod_addr_space omap2420_gpio4_addr_space[] = {
+	{
+		.pa_start	= 0x4801e000,
+		.pa_end		= 0x4801e1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2420_l4_wkup__gpio4 = {
+	.master		= &omap2420_l4_wkup_hwmod,
+	.slave		= &omap2420_gpio4_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2420_gpio4_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2420_gpio4_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio dev_attr */
+static struct omap_gpio_dev_attr gpio_dev_attr = {
+	.bank_width = 32,
+	.dbck_flag = false,
+};
+
+static struct omap_hwmod_class_sysconfig omap242x_gpio_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+/*
+ * 'gpio' class
+ * general purpose io module
+ */
+static struct omap_hwmod_class omap242x_gpio_hwmod_class = {
+	.name = "gpio",
+	.sysc = &omap242x_gpio_sysc,
+	.rev = 0,
+};
+
+/* gpio1 */
+static struct omap_hwmod_irq_info omap242x_gpio1_irqs[] = {
+	{ .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
+};
+
+static struct omap_hwmod_ocp_if *omap2420_gpio1_slaves[] = {
+	&omap2420_l4_wkup__gpio1,
+};
+
+static struct omap_hwmod omap2420_gpio1_hwmod = {
+	.name		= "gpio1",
+	.mpu_irqs	= omap242x_gpio1_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap242x_gpio1_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2420_gpio1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2420_gpio1_slaves),
+	.class		= &omap242x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* gpio2 */
+static struct omap_hwmod_irq_info omap242x_gpio2_irqs[] = {
+	{ .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
+};
+
+static struct omap_hwmod_ocp_if *omap2420_gpio2_slaves[] = {
+	&omap2420_l4_wkup__gpio2,
+};
+
+static struct omap_hwmod omap2420_gpio2_hwmod = {
+	.name		= "gpio2",
+	.mpu_irqs	= omap242x_gpio2_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap242x_gpio2_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2420_gpio2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2420_gpio2_slaves),
+	.class		= &omap242x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* gpio3 */
+static struct omap_hwmod_irq_info omap242x_gpio3_irqs[] = {
+	{ .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
+};
+
+static struct omap_hwmod_ocp_if *omap2420_gpio3_slaves[] = {
+	&omap2420_l4_wkup__gpio3,
+};
+
+static struct omap_hwmod omap2420_gpio3_hwmod = {
+	.name		= "gpio3",
+	.mpu_irqs	= omap242x_gpio3_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap242x_gpio3_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2420_gpio3_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2420_gpio3_slaves),
+	.class		= &omap242x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* gpio4 */
+static struct omap_hwmod_irq_info omap242x_gpio4_irqs[] = {
+	{ .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
+};
+
+static struct omap_hwmod_ocp_if *omap2420_gpio4_slaves[] = {
+	&omap2420_l4_wkup__gpio4,
+};
+
+static struct omap_hwmod omap2420_gpio4_hwmod = {
+	.name		= "gpio4",
+	.mpu_irqs	= omap242x_gpio4_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap242x_gpio4_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2420_gpio4_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2420_gpio4_slaves),
+	.class		= &omap242x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+};
+
+/* system dma */
+static struct omap_hwmod_class_sysconfig omap2420_dma_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x002c,
+	.syss_offs	= 0x0028,
+	.sysc_flags	= (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
+			   SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
+			   SYSC_HAS_AUTOIDLE),
+	.idlemodes	= (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2420_dma_hwmod_class = {
+	.name = "dma",
+	.sysc = &omap2420_dma_sysc,
+};
+
+/* dma attributes */
+static struct omap_dma_dev_attr dma_dev_attr = {
+	.dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
+						IS_CSSA_32 | IS_CDSA_32,
+	.lch_count = 32,
+};
+
+static struct omap_hwmod_irq_info omap2420_dma_system_irqs[] = {
+	{ .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
+	{ .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
+	{ .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
+	{ .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
+};
+
+static struct omap_hwmod_addr_space omap2420_dma_system_addrs[] = {
+	{
+		.pa_start	= 0x48056000,
+		.pa_end		= 0x4a0560ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* dma_system -> L3 */
+static struct omap_hwmod_ocp_if omap2420_dma_system__l3 = {
+	.master		= &omap2420_dma_system_hwmod,
+	.slave		= &omap2420_l3_main_hwmod,
+	.clk		= "core_l3_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dma_system master ports */
+static struct omap_hwmod_ocp_if *omap2420_dma_system_masters[] = {
+	&omap2420_dma_system__l3,
+};
+
+/* l4_core -> dma_system */
+static struct omap_hwmod_ocp_if omap2420_l4_core__dma_system = {
+	.master		= &omap2420_l4_core_hwmod,
+	.slave		= &omap2420_dma_system_hwmod,
+	.clk		= "sdma_ick",
+	.addr		= omap2420_dma_system_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap2420_dma_system_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dma_system slave ports */
+static struct omap_hwmod_ocp_if *omap2420_dma_system_slaves[] = {
+	&omap2420_l4_core__dma_system,
+};
+
+static struct omap_hwmod omap2420_dma_system_hwmod = {
+	.name		= "dma",
+	.class		= &omap2420_dma_hwmod_class,
+	.mpu_irqs	= omap2420_dma_system_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap2420_dma_system_irqs),
+	.main_clk	= "core_l3_ck",
+	.slaves		= omap2420_dma_system_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2420_dma_system_slaves),
+	.masters	= omap2420_dma_system_masters,
+	.masters_cnt	= ARRAY_SIZE(omap2420_dma_system_masters),
+	.dev_attr	= &dma_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2420),
+	.flags		= HWMOD_NO_IDLEST,
+};
+
 static __initdata struct omap_hwmod *omap2420_hwmods[] = {
 	&omap2420_l3_main_hwmod,
 	&omap2420_l4_core_hwmod,
@@ -428,6 +874,17 @@
 	&omap2420_uart1_hwmod,
 	&omap2420_uart2_hwmod,
 	&omap2420_uart3_hwmod,
+	&omap2420_i2c1_hwmod,
+	&omap2420_i2c2_hwmod,
+
+	/* gpio class */
+	&omap2420_gpio1_hwmod,
+	&omap2420_gpio2_hwmod,
+	&omap2420_gpio3_hwmod,
+	&omap2420_gpio4_hwmod,
+
+	/* dma_system class*/
+	&omap2420_dma_system_hwmod,
 	NULL,
 };
 
@@ -435,5 +892,3 @@
 {
 	return omap_hwmod_init(omap2420_hwmods);
 }
-
-
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 12d939e..8ecfbcd 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -16,11 +16,14 @@
 #include <plat/cpu.h>
 #include <plat/dma.h>
 #include <plat/serial.h>
+#include <plat/i2c.h>
+#include <plat/gpio.h>
 
 #include "omap_hwmod_common_data.h"
 
 #include "prm-regbits-24xx.h"
 #include "cm-regbits-24xx.h"
+#include "wd_timer.h"
 
 /*
  * OMAP2430 hardware module integration data
@@ -36,6 +39,12 @@
 static struct omap_hwmod omap2430_l3_main_hwmod;
 static struct omap_hwmod omap2430_l4_core_hwmod;
 static struct omap_hwmod omap2430_wd_timer2_hwmod;
+static struct omap_hwmod omap2430_gpio1_hwmod;
+static struct omap_hwmod omap2430_gpio2_hwmod;
+static struct omap_hwmod omap2430_gpio3_hwmod;
+static struct omap_hwmod omap2430_gpio4_hwmod;
+static struct omap_hwmod omap2430_gpio5_hwmod;
+static struct omap_hwmod omap2430_dma_system_hwmod;
 
 /* L3 -> L4_CORE interface */
 static struct omap_hwmod_ocp_if omap2430_l3_main__l4_core = {
@@ -77,6 +86,47 @@
 static struct omap_hwmod omap2430_uart1_hwmod;
 static struct omap_hwmod omap2430_uart2_hwmod;
 static struct omap_hwmod omap2430_uart3_hwmod;
+static struct omap_hwmod omap2430_i2c1_hwmod;
+static struct omap_hwmod omap2430_i2c2_hwmod;
+
+/* I2C IP block address space length (in bytes) */
+#define OMAP2_I2C_AS_LEN		128
+
+/* L4 CORE -> I2C1 interface */
+static struct omap_hwmod_addr_space omap2430_i2c1_addr_space[] = {
+	{
+		.pa_start	= 0x48070000,
+		.pa_end		= 0x48070000 + OMAP2_I2C_AS_LEN - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__i2c1 = {
+	.master		= &omap2430_l4_core_hwmod,
+	.slave		= &omap2430_i2c1_hwmod,
+	.clk		= "i2c1_ick",
+	.addr		= omap2430_i2c1_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2430_i2c1_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> I2C2 interface */
+static struct omap_hwmod_addr_space omap2430_i2c2_addr_space[] = {
+	{
+		.pa_start	= 0x48072000,
+		.pa_end		= 0x48072000 + OMAP2_I2C_AS_LEN - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__i2c2 = {
+	.master		= &omap2430_l4_core_hwmod,
+	.slave		= &omap2430_i2c2_hwmod,
+	.clk		= "i2c2_ick",
+	.addr		= omap2430_i2c2_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2430_i2c2_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
 
 /* L4_CORE -> L4_WKUP interface */
 static struct omap_hwmod_ocp_if omap2430_l4_core__l4_wkup = {
@@ -262,8 +312,9 @@
 };
 
 static struct omap_hwmod_class omap2430_wd_timer_hwmod_class = {
-	.name = "wd_timer",
-	.sysc = &omap2430_wd_timer_sysc,
+	.name		= "wd_timer",
+	.sysc		= &omap2430_wd_timer_sysc,
+	.pre_shutdown	= &omap2_wd_timer_disable
 };
 
 /* wd_timer2 */
@@ -418,6 +469,456 @@
 	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
 };
 
+/* I2C common */
+static struct omap_hwmod_class_sysconfig i2c_sysc = {
+	.rev_offs	= 0x00,
+	.sysc_offs	= 0x20,
+	.syss_offs	= 0x10,
+	.sysc_flags	= (SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class i2c_class = {
+	.name		= "i2c",
+	.sysc		= &i2c_sysc,
+};
+
+static struct omap_i2c_dev_attr i2c_dev_attr = {
+	.fifo_depth	= 8, /* bytes */
+};
+
+/* I2C1 */
+
+static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
+	{ .irq = INT_24XX_I2C1_IRQ, },
+};
+
+static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
+	{ .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
+};
+
+static struct omap_hwmod_ocp_if *omap2430_i2c1_slaves[] = {
+	&omap2430_l4_core__i2c1,
+};
+
+static struct omap_hwmod omap2430_i2c1_hwmod = {
+	.name		= "i2c1",
+	.mpu_irqs	= i2c1_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(i2c1_mpu_irqs),
+	.sdma_reqs	= i2c1_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(i2c1_sdma_reqs),
+	.main_clk	= "i2chs1_fck",
+	.prcm		= {
+		.omap2 = {
+			/*
+			 * NOTE: The CM_FCLKEN* and CM_ICLKEN* for
+			 * I2CHS IP's do not follow the usual pattern.
+			 * prcm_reg_id alone cannot be used to program
+			 * the iclk and fclk. Needs to be handled using
+			 * additonal flags when clk handling is moved
+			 * to hwmod framework.
+			 */
+			.module_offs = CORE_MOD,
+			.prcm_reg_id = 1,
+			.module_bit = OMAP2430_EN_I2CHS1_SHIFT,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP2430_ST_I2CHS1_SHIFT,
+		},
+	},
+	.slaves		= omap2430_i2c1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_i2c1_slaves),
+	.class		= &i2c_class,
+	.dev_attr	= &i2c_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* I2C2 */
+
+static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
+	{ .irq = INT_24XX_I2C2_IRQ, },
+};
+
+static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
+	{ .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
+};
+
+static struct omap_hwmod_ocp_if *omap2430_i2c2_slaves[] = {
+	&omap2430_l4_core__i2c2,
+};
+
+static struct omap_hwmod omap2430_i2c2_hwmod = {
+	.name		= "i2c2",
+	.mpu_irqs	= i2c2_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(i2c2_mpu_irqs),
+	.sdma_reqs	= i2c2_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(i2c2_sdma_reqs),
+	.main_clk	= "i2chs2_fck",
+	.prcm		= {
+		.omap2 = {
+			.module_offs = CORE_MOD,
+			.prcm_reg_id = 1,
+			.module_bit = OMAP2430_EN_I2CHS2_SHIFT,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP2430_ST_I2CHS2_SHIFT,
+		},
+	},
+	.slaves		= omap2430_i2c2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_i2c2_slaves),
+	.class		= &i2c_class,
+	.dev_attr	= &i2c_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* l4_wkup -> gpio1 */
+static struct omap_hwmod_addr_space omap2430_gpio1_addr_space[] = {
+	{
+		.pa_start	= 0x4900C000,
+		.pa_end		= 0x4900C1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio1 = {
+	.master		= &omap2430_l4_wkup_hwmod,
+	.slave		= &omap2430_gpio1_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2430_gpio1_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2430_gpio1_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> gpio2 */
+static struct omap_hwmod_addr_space omap2430_gpio2_addr_space[] = {
+	{
+		.pa_start	= 0x4900E000,
+		.pa_end		= 0x4900E1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio2 = {
+	.master		= &omap2430_l4_wkup_hwmod,
+	.slave		= &omap2430_gpio2_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2430_gpio2_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2430_gpio2_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> gpio3 */
+static struct omap_hwmod_addr_space omap2430_gpio3_addr_space[] = {
+	{
+		.pa_start	= 0x49010000,
+		.pa_end		= 0x490101ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio3 = {
+	.master		= &omap2430_l4_wkup_hwmod,
+	.slave		= &omap2430_gpio3_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2430_gpio3_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2430_gpio3_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_wkup -> gpio4 */
+static struct omap_hwmod_addr_space omap2430_gpio4_addr_space[] = {
+	{
+		.pa_start	= 0x49012000,
+		.pa_end		= 0x490121ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_wkup__gpio4 = {
+	.master		= &omap2430_l4_wkup_hwmod,
+	.slave		= &omap2430_gpio4_hwmod,
+	.clk		= "gpios_ick",
+	.addr		= omap2430_gpio4_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2430_gpio4_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_core -> gpio5 */
+static struct omap_hwmod_addr_space omap2430_gpio5_addr_space[] = {
+	{
+		.pa_start	= 0x480B6000,
+		.pa_end		= 0x480B61ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap2430_l4_core__gpio5 = {
+	.master		= &omap2430_l4_core_hwmod,
+	.slave		= &omap2430_gpio5_hwmod,
+	.clk		= "gpio5_ick",
+	.addr		= omap2430_gpio5_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap2430_gpio5_addr_space),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio dev_attr */
+static struct omap_gpio_dev_attr gpio_dev_attr = {
+	.bank_width = 32,
+	.dbck_flag = false,
+};
+
+static struct omap_hwmod_class_sysconfig omap243x_gpio_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+/*
+ * 'gpio' class
+ * general purpose io module
+ */
+static struct omap_hwmod_class omap243x_gpio_hwmod_class = {
+	.name = "gpio",
+	.sysc = &omap243x_gpio_sysc,
+	.rev = 0,
+};
+
+/* gpio1 */
+static struct omap_hwmod_irq_info omap243x_gpio1_irqs[] = {
+	{ .irq = 29 }, /* INT_24XX_GPIO_BANK1 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_gpio1_slaves[] = {
+	&omap2430_l4_wkup__gpio1,
+};
+
+static struct omap_hwmod omap2430_gpio1_hwmod = {
+	.name		= "gpio1",
+	.mpu_irqs	= omap243x_gpio1_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap243x_gpio1_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_EN_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2430_gpio1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_gpio1_slaves),
+	.class		= &omap243x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* gpio2 */
+static struct omap_hwmod_irq_info omap243x_gpio2_irqs[] = {
+	{ .irq = 30 }, /* INT_24XX_GPIO_BANK2 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_gpio2_slaves[] = {
+	&omap2430_l4_wkup__gpio2,
+};
+
+static struct omap_hwmod omap2430_gpio2_hwmod = {
+	.name		= "gpio2",
+	.mpu_irqs	= omap243x_gpio2_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap243x_gpio2_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2430_gpio2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_gpio2_slaves),
+	.class		= &omap243x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* gpio3 */
+static struct omap_hwmod_irq_info omap243x_gpio3_irqs[] = {
+	{ .irq = 31 }, /* INT_24XX_GPIO_BANK3 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_gpio3_slaves[] = {
+	&omap2430_l4_wkup__gpio3,
+};
+
+static struct omap_hwmod omap2430_gpio3_hwmod = {
+	.name		= "gpio3",
+	.mpu_irqs	= omap243x_gpio3_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap243x_gpio3_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2430_gpio3_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_gpio3_slaves),
+	.class		= &omap243x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* gpio4 */
+static struct omap_hwmod_irq_info omap243x_gpio4_irqs[] = {
+	{ .irq = 32 }, /* INT_24XX_GPIO_BANK4 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_gpio4_slaves[] = {
+	&omap2430_l4_wkup__gpio4,
+};
+
+static struct omap_hwmod omap2430_gpio4_hwmod = {
+	.name		= "gpio4",
+	.mpu_irqs	= omap243x_gpio4_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap243x_gpio4_irqs),
+	.main_clk	= "gpios_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP24XX_EN_GPIOS_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP24XX_ST_GPIOS_SHIFT,
+		},
+	},
+	.slaves		= omap2430_gpio4_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_gpio4_slaves),
+	.class		= &omap243x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* gpio5 */
+static struct omap_hwmod_irq_info omap243x_gpio5_irqs[] = {
+	{ .irq = 33 }, /* INT_24XX_GPIO_BANK5 */
+};
+
+static struct omap_hwmod_ocp_if *omap2430_gpio5_slaves[] = {
+	&omap2430_l4_core__gpio5,
+};
+
+static struct omap_hwmod omap2430_gpio5_hwmod = {
+	.name		= "gpio5",
+	.mpu_irqs	= omap243x_gpio5_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap243x_gpio5_irqs),
+	.main_clk	= "gpio5_fck",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 2,
+			.module_bit = OMAP2430_EN_GPIO5_SHIFT,
+			.module_offs = CORE_MOD,
+			.idlest_reg_id = 2,
+			.idlest_idle_bit = OMAP2430_ST_GPIO5_SHIFT,
+		},
+	},
+	.slaves		= omap2430_gpio5_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_gpio5_slaves),
+	.class		= &omap243x_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+};
+
+/* dma_system */
+static struct omap_hwmod_class_sysconfig omap2430_dma_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x002c,
+	.syss_offs	= 0x0028,
+	.sysc_flags	= (SYSC_HAS_SOFTRESET | SYSC_HAS_MIDLEMODE |
+			   SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE |
+			   SYSC_HAS_AUTOIDLE),
+	.idlemodes	= (MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap2430_dma_hwmod_class = {
+	.name = "dma",
+	.sysc = &omap2430_dma_sysc,
+};
+
+/* dma attributes */
+static struct omap_dma_dev_attr dma_dev_attr = {
+	.dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
+				IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
+	.lch_count = 32,
+};
+
+static struct omap_hwmod_irq_info omap2430_dma_system_irqs[] = {
+	{ .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
+	{ .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
+	{ .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
+	{ .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
+};
+
+static struct omap_hwmod_addr_space omap2430_dma_system_addrs[] = {
+	{
+		.pa_start	= 0x48056000,
+		.pa_end		= 0x4a0560ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* dma_system -> L3 */
+static struct omap_hwmod_ocp_if omap2430_dma_system__l3 = {
+	.master		= &omap2430_dma_system_hwmod,
+	.slave		= &omap2430_l3_main_hwmod,
+	.clk		= "core_l3_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dma_system master ports */
+static struct omap_hwmod_ocp_if *omap2430_dma_system_masters[] = {
+	&omap2430_dma_system__l3,
+};
+
+/* l4_core -> dma_system */
+static struct omap_hwmod_ocp_if omap2430_l4_core__dma_system = {
+	.master		= &omap2430_l4_core_hwmod,
+	.slave		= &omap2430_dma_system_hwmod,
+	.clk		= "sdma_ick",
+	.addr		= omap2430_dma_system_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap2430_dma_system_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dma_system slave ports */
+static struct omap_hwmod_ocp_if *omap2430_dma_system_slaves[] = {
+	&omap2430_l4_core__dma_system,
+};
+
+static struct omap_hwmod omap2430_dma_system_hwmod = {
+	.name		= "dma",
+	.class		= &omap2430_dma_hwmod_class,
+	.mpu_irqs	= omap2430_dma_system_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap2430_dma_system_irqs),
+	.main_clk	= "core_l3_ck",
+	.slaves		= omap2430_dma_system_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap2430_dma_system_slaves),
+	.masters	= omap2430_dma_system_masters,
+	.masters_cnt	= ARRAY_SIZE(omap2430_dma_system_masters),
+	.dev_attr	= &dma_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+	.flags		= HWMOD_NO_IDLEST,
+};
+
 static __initdata struct omap_hwmod *omap2430_hwmods[] = {
 	&omap2430_l3_main_hwmod,
 	&omap2430_l4_core_hwmod,
@@ -428,6 +929,18 @@
 	&omap2430_uart1_hwmod,
 	&omap2430_uart2_hwmod,
 	&omap2430_uart3_hwmod,
+	&omap2430_i2c1_hwmod,
+	&omap2430_i2c2_hwmod,
+
+	/* gpio class */
+	&omap2430_gpio1_hwmod,
+	&omap2430_gpio2_hwmod,
+	&omap2430_gpio3_hwmod,
+	&omap2430_gpio4_hwmod,
+	&omap2430_gpio5_hwmod,
+
+	/* dma_system class*/
+	&omap2430_dma_system_hwmod,
 	NULL,
 };
 
@@ -435,5 +948,3 @@
 {
 	return omap_hwmod_init(omap2430_hwmods);
 }
-
-
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index cb97ecf..8d81813 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -18,11 +18,16 @@
 #include <plat/cpu.h>
 #include <plat/dma.h>
 #include <plat/serial.h>
+#include <plat/l4_3xxx.h>
+#include <plat/i2c.h>
+#include <plat/gpio.h>
+#include <plat/smartreflex.h>
 
 #include "omap_hwmod_common_data.h"
 
 #include "prm-regbits-34xx.h"
 #include "cm-regbits-34xx.h"
+#include "wd_timer.h"
 
 /*
  * OMAP3xxx hardware module integration data
@@ -39,6 +44,19 @@
 static struct omap_hwmod omap3xxx_l4_core_hwmod;
 static struct omap_hwmod omap3xxx_l4_per_hwmod;
 static struct omap_hwmod omap3xxx_wd_timer2_hwmod;
+static struct omap_hwmod omap3xxx_i2c1_hwmod;
+static struct omap_hwmod omap3xxx_i2c2_hwmod;
+static struct omap_hwmod omap3xxx_i2c3_hwmod;
+static struct omap_hwmod omap3xxx_gpio1_hwmod;
+static struct omap_hwmod omap3xxx_gpio2_hwmod;
+static struct omap_hwmod omap3xxx_gpio3_hwmod;
+static struct omap_hwmod omap3xxx_gpio4_hwmod;
+static struct omap_hwmod omap3xxx_gpio5_hwmod;
+static struct omap_hwmod omap3xxx_gpio6_hwmod;
+static struct omap_hwmod omap34xx_sr1_hwmod;
+static struct omap_hwmod omap34xx_sr2_hwmod;
+
+static struct omap_hwmod omap3xxx_dma_system_hwmod;
 
 /* L3 -> L4_CORE interface */
 static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_core = {
@@ -169,9 +187,125 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* I2C IP block address space length (in bytes) */
+#define OMAP2_I2C_AS_LEN		128
+
+/* L4 CORE -> I2C1 interface */
+static struct omap_hwmod_addr_space omap3xxx_i2c1_addr_space[] = {
+	{
+		.pa_start	= 0x48070000,
+		.pa_end		= 0x48070000 + OMAP2_I2C_AS_LEN - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
+	.master		= &omap3xxx_l4_core_hwmod,
+	.slave		= &omap3xxx_i2c1_hwmod,
+	.clk		= "i2c1_ick",
+	.addr		= omap3xxx_i2c1_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_i2c1_addr_space),
+	.fw = {
+		.omap2 = {
+			.l4_fw_region  = OMAP3_L4_CORE_FW_I2C1_REGION,
+			.l4_prot_group = 7,
+			.flags	= OMAP_FIREWALL_L4,
+		}
+	},
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> I2C2 interface */
+static struct omap_hwmod_addr_space omap3xxx_i2c2_addr_space[] = {
+	{
+		.pa_start	= 0x48072000,
+		.pa_end		= 0x48072000 + OMAP2_I2C_AS_LEN - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = {
+	.master		= &omap3xxx_l4_core_hwmod,
+	.slave		= &omap3xxx_i2c2_hwmod,
+	.clk		= "i2c2_ick",
+	.addr		= omap3xxx_i2c2_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_i2c2_addr_space),
+	.fw = {
+		.omap2 = {
+			.l4_fw_region  = OMAP3_L4_CORE_FW_I2C2_REGION,
+			.l4_prot_group = 7,
+			.flags = OMAP_FIREWALL_L4,
+		}
+	},
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> I2C3 interface */
+static struct omap_hwmod_addr_space omap3xxx_i2c3_addr_space[] = {
+	{
+		.pa_start	= 0x48060000,
+		.pa_end		= 0x48060000 + OMAP2_I2C_AS_LEN - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
+	.master		= &omap3xxx_l4_core_hwmod,
+	.slave		= &omap3xxx_i2c3_hwmod,
+	.clk		= "i2c3_ick",
+	.addr		= omap3xxx_i2c3_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_i2c3_addr_space),
+	.fw = {
+		.omap2 = {
+			.l4_fw_region  = OMAP3_L4_CORE_FW_I2C3_REGION,
+			.l4_prot_group = 7,
+			.flags = OMAP_FIREWALL_L4,
+		}
+	},
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
+	{
+		.pa_start	= OMAP34XX_SR1_BASE,
+		.pa_end		= OMAP34XX_SR1_BASE + SZ_1K - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_core__sr1 = {
+	.master		= &omap3xxx_l4_core_hwmod,
+	.slave		= &omap34xx_sr1_hwmod,
+	.clk		= "sr_l4_ick",
+	.addr		= omap3_sr1_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap3_sr1_addr_space),
+	.user		= OCP_USER_MPU,
+};
+
+/* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
+	{
+		.pa_start	= OMAP34XX_SR2_BASE,
+		.pa_end		= OMAP34XX_SR2_BASE + SZ_1K - 1,
+		.flags		= ADDR_TYPE_RT,
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3_l4_core__sr2 = {
+	.master		= &omap3xxx_l4_core_hwmod,
+	.slave		= &omap34xx_sr2_hwmod,
+	.clk		= "sr_l4_ick",
+	.addr		= omap3_sr2_addr_space,
+	.addr_cnt	= ARRAY_SIZE(omap3_sr2_addr_space),
+	.user		= OCP_USER_MPU,
+};
+
 /* Slave interfaces on the L4_CORE interconnect */
 static struct omap_hwmod_ocp_if *omap3xxx_l4_core_slaves[] = {
 	&omap3xxx_l3_main__l4_core,
+	&omap3_l4_core__sr1,
+	&omap3_l4_core__sr2,
 };
 
 /* Master interfaces on the L4_CORE interconnect */
@@ -179,6 +313,9 @@
 	&omap3xxx_l4_core__l4_wkup,
 	&omap3_l4_core__uart1,
 	&omap3_l4_core__uart2,
+	&omap3_l4_core__i2c1,
+	&omap3_l4_core__i2c2,
+	&omap3_l4_core__i2c3,
 };
 
 /* L4 CORE */
@@ -315,9 +452,22 @@
 	.sysc_fields    = &omap_hwmod_sysc_type1,
 };
 
+/* I2C common */
+static struct omap_hwmod_class_sysconfig i2c_sysc = {
+	.rev_offs	= 0x00,
+	.sysc_offs	= 0x20,
+	.syss_offs	= 0x10,
+	.sysc_flags	= (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
+			   SYSC_HAS_AUTOIDLE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
 static struct omap_hwmod_class omap3xxx_wd_timer_hwmod_class = {
-	.name = "wd_timer",
-	.sysc = &omap3xxx_wd_timer_sysc,
+	.name		= "wd_timer",
+	.sysc		= &omap3xxx_wd_timer_sysc,
+	.pre_shutdown	= &omap2_wd_timer_disable
 };
 
 /* wd_timer2 */
@@ -509,6 +659,703 @@
 	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
 };
 
+static struct omap_hwmod_class i2c_class = {
+	.name = "i2c",
+	.sysc = &i2c_sysc,
+};
+
+/* I2C1 */
+
+static struct omap_i2c_dev_attr i2c1_dev_attr = {
+	.fifo_depth	= 8, /* bytes */
+};
+
+static struct omap_hwmod_irq_info i2c1_mpu_irqs[] = {
+	{ .irq = INT_24XX_I2C1_IRQ, },
+};
+
+static struct omap_hwmod_dma_info i2c1_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = OMAP24XX_DMA_I2C1_TX },
+	{ .name = "rx", .dma_req = OMAP24XX_DMA_I2C1_RX },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_i2c1_slaves[] = {
+	&omap3_l4_core__i2c1,
+};
+
+static struct omap_hwmod omap3xxx_i2c1_hwmod = {
+	.name		= "i2c1",
+	.mpu_irqs	= i2c1_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(i2c1_mpu_irqs),
+	.sdma_reqs	= i2c1_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(i2c1_sdma_reqs),
+	.main_clk	= "i2c1_fck",
+	.prcm		= {
+		.omap2 = {
+			.module_offs = CORE_MOD,
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_I2C1_SHIFT,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_I2C1_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_i2c1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_i2c1_slaves),
+	.class		= &i2c_class,
+	.dev_attr	= &i2c1_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* I2C2 */
+
+static struct omap_i2c_dev_attr i2c2_dev_attr = {
+	.fifo_depth	= 8, /* bytes */
+};
+
+static struct omap_hwmod_irq_info i2c2_mpu_irqs[] = {
+	{ .irq = INT_24XX_I2C2_IRQ, },
+};
+
+static struct omap_hwmod_dma_info i2c2_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = OMAP24XX_DMA_I2C2_TX },
+	{ .name = "rx", .dma_req = OMAP24XX_DMA_I2C2_RX },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_i2c2_slaves[] = {
+	&omap3_l4_core__i2c2,
+};
+
+static struct omap_hwmod omap3xxx_i2c2_hwmod = {
+	.name		= "i2c2",
+	.mpu_irqs	= i2c2_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(i2c2_mpu_irqs),
+	.sdma_reqs	= i2c2_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(i2c2_sdma_reqs),
+	.main_clk	= "i2c2_fck",
+	.prcm		= {
+		.omap2 = {
+			.module_offs = CORE_MOD,
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_I2C2_SHIFT,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_I2C2_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_i2c2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_i2c2_slaves),
+	.class		= &i2c_class,
+	.dev_attr	= &i2c2_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* I2C3 */
+
+static struct omap_i2c_dev_attr i2c3_dev_attr = {
+	.fifo_depth	= 64, /* bytes */
+};
+
+static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
+	{ .irq = INT_34XX_I2C3_IRQ, },
+};
+
+static struct omap_hwmod_dma_info i2c3_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = OMAP34XX_DMA_I2C3_TX },
+	{ .name = "rx", .dma_req = OMAP34XX_DMA_I2C3_RX },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_i2c3_slaves[] = {
+	&omap3_l4_core__i2c3,
+};
+
+static struct omap_hwmod omap3xxx_i2c3_hwmod = {
+	.name		= "i2c3",
+	.mpu_irqs	= i2c3_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(i2c3_mpu_irqs),
+	.sdma_reqs	= i2c3_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(i2c3_sdma_reqs),
+	.main_clk	= "i2c3_fck",
+	.prcm		= {
+		.omap2 = {
+			.module_offs = CORE_MOD,
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_I2C3_SHIFT,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_I2C3_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_i2c3_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_i2c3_slaves),
+	.class		= &i2c_class,
+	.dev_attr	= &i2c3_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* l4_wkup -> gpio1 */
+static struct omap_hwmod_addr_space omap3xxx_gpio1_addrs[] = {
+	{
+		.pa_start	= 0x48310000,
+		.pa_end		= 0x483101ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = {
+	.master		= &omap3xxx_l4_wkup_hwmod,
+	.slave		= &omap3xxx_gpio1_hwmod,
+	.addr		= omap3xxx_gpio1_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_gpio1_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per -> gpio2 */
+static struct omap_hwmod_addr_space omap3xxx_gpio2_addrs[] = {
+	{
+		.pa_start	= 0x49050000,
+		.pa_end		= 0x490501ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = {
+	.master		= &omap3xxx_l4_per_hwmod,
+	.slave		= &omap3xxx_gpio2_hwmod,
+	.addr		= omap3xxx_gpio2_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_gpio2_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per -> gpio3 */
+static struct omap_hwmod_addr_space omap3xxx_gpio3_addrs[] = {
+	{
+		.pa_start	= 0x49052000,
+		.pa_end		= 0x490521ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = {
+	.master		= &omap3xxx_l4_per_hwmod,
+	.slave		= &omap3xxx_gpio3_hwmod,
+	.addr		= omap3xxx_gpio3_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_gpio3_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per -> gpio4 */
+static struct omap_hwmod_addr_space omap3xxx_gpio4_addrs[] = {
+	{
+		.pa_start	= 0x49054000,
+		.pa_end		= 0x490541ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = {
+	.master		= &omap3xxx_l4_per_hwmod,
+	.slave		= &omap3xxx_gpio4_hwmod,
+	.addr		= omap3xxx_gpio4_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_gpio4_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per -> gpio5 */
+static struct omap_hwmod_addr_space omap3xxx_gpio5_addrs[] = {
+	{
+		.pa_start	= 0x49056000,
+		.pa_end		= 0x490561ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = {
+	.master		= &omap3xxx_l4_per_hwmod,
+	.slave		= &omap3xxx_gpio5_hwmod,
+	.addr		= omap3xxx_gpio5_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_gpio5_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l4_per -> gpio6 */
+static struct omap_hwmod_addr_space omap3xxx_gpio6_addrs[] = {
+	{
+		.pa_start	= 0x49058000,
+		.pa_end		= 0x490581ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = {
+	.master		= &omap3xxx_l4_per_hwmod,
+	.slave		= &omap3xxx_gpio6_hwmod,
+	.addr		= omap3xxx_gpio6_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_gpio6_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/*
+ * 'gpio' class
+ * general purpose io module
+ */
+
+static struct omap_hwmod_class_sysconfig omap3xxx_gpio_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_gpio_hwmod_class = {
+	.name = "gpio",
+	.sysc = &omap3xxx_gpio_sysc,
+	.rev = 1,
+};
+
+/* gpio_dev_attr*/
+static struct omap_gpio_dev_attr gpio_dev_attr = {
+	.bank_width = 32,
+	.dbck_flag = true,
+};
+
+/* gpio1 */
+static struct omap_hwmod_irq_info omap3xxx_gpio1_irqs[] = {
+	{ .irq = 29 }, /* INT_34XX_GPIO_BANK1 */
+};
+
+static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio1_dbck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_gpio1_slaves[] = {
+	&omap3xxx_l4_wkup__gpio1,
+};
+
+static struct omap_hwmod omap3xxx_gpio1_hwmod = {
+	.name		= "gpio1",
+	.mpu_irqs	= omap3xxx_gpio1_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap3xxx_gpio1_irqs),
+	.main_clk	= "gpio1_ick",
+	.opt_clks	= gpio1_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio1_opt_clks),
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_GPIO1_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_GPIO1_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_gpio1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_gpio1_slaves),
+	.class		= &omap3xxx_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* gpio2 */
+static struct omap_hwmod_irq_info omap3xxx_gpio2_irqs[] = {
+	{ .irq = 30 }, /* INT_34XX_GPIO_BANK2 */
+};
+
+static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio2_dbck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_gpio2_slaves[] = {
+	&omap3xxx_l4_per__gpio2,
+};
+
+static struct omap_hwmod omap3xxx_gpio2_hwmod = {
+	.name		= "gpio2",
+	.mpu_irqs	= omap3xxx_gpio2_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap3xxx_gpio2_irqs),
+	.main_clk	= "gpio2_ick",
+	.opt_clks	= gpio2_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio2_opt_clks),
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_GPIO2_SHIFT,
+			.module_offs = OMAP3430_PER_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_GPIO2_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_gpio2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_gpio2_slaves),
+	.class		= &omap3xxx_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* gpio3 */
+static struct omap_hwmod_irq_info omap3xxx_gpio3_irqs[] = {
+	{ .irq = 31 }, /* INT_34XX_GPIO_BANK3 */
+};
+
+static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio3_dbck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_gpio3_slaves[] = {
+	&omap3xxx_l4_per__gpio3,
+};
+
+static struct omap_hwmod omap3xxx_gpio3_hwmod = {
+	.name		= "gpio3",
+	.mpu_irqs	= omap3xxx_gpio3_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap3xxx_gpio3_irqs),
+	.main_clk	= "gpio3_ick",
+	.opt_clks	= gpio3_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio3_opt_clks),
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_GPIO3_SHIFT,
+			.module_offs = OMAP3430_PER_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_GPIO3_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_gpio3_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_gpio3_slaves),
+	.class		= &omap3xxx_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* gpio4 */
+static struct omap_hwmod_irq_info omap3xxx_gpio4_irqs[] = {
+	{ .irq = 32 }, /* INT_34XX_GPIO_BANK4 */
+};
+
+static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio4_dbck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_gpio4_slaves[] = {
+	&omap3xxx_l4_per__gpio4,
+};
+
+static struct omap_hwmod omap3xxx_gpio4_hwmod = {
+	.name		= "gpio4",
+	.mpu_irqs	= omap3xxx_gpio4_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap3xxx_gpio4_irqs),
+	.main_clk	= "gpio4_ick",
+	.opt_clks	= gpio4_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio4_opt_clks),
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_GPIO4_SHIFT,
+			.module_offs = OMAP3430_PER_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_GPIO4_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_gpio4_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_gpio4_slaves),
+	.class		= &omap3xxx_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* gpio5 */
+static struct omap_hwmod_irq_info omap3xxx_gpio5_irqs[] = {
+	{ .irq = 33 }, /* INT_34XX_GPIO_BANK5 */
+};
+
+static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio5_dbck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_gpio5_slaves[] = {
+	&omap3xxx_l4_per__gpio5,
+};
+
+static struct omap_hwmod omap3xxx_gpio5_hwmod = {
+	.name		= "gpio5",
+	.mpu_irqs	= omap3xxx_gpio5_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap3xxx_gpio5_irqs),
+	.main_clk	= "gpio5_ick",
+	.opt_clks	= gpio5_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio5_opt_clks),
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_GPIO5_SHIFT,
+			.module_offs = OMAP3430_PER_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_GPIO5_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_gpio5_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_gpio5_slaves),
+	.class		= &omap3xxx_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* gpio6 */
+static struct omap_hwmod_irq_info omap3xxx_gpio6_irqs[] = {
+	{ .irq = 34 }, /* INT_34XX_GPIO_BANK6 */
+};
+
+static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio6_dbck", },
+};
+
+static struct omap_hwmod_ocp_if *omap3xxx_gpio6_slaves[] = {
+	&omap3xxx_l4_per__gpio6,
+};
+
+static struct omap_hwmod omap3xxx_gpio6_hwmod = {
+	.name		= "gpio6",
+	.mpu_irqs	= omap3xxx_gpio6_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap3xxx_gpio6_irqs),
+	.main_clk	= "gpio6_ick",
+	.opt_clks	= gpio6_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio6_opt_clks),
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_GPIO6_SHIFT,
+			.module_offs = OMAP3430_PER_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_ST_GPIO6_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_gpio6_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_gpio6_slaves),
+	.class		= &omap3xxx_gpio_hwmod_class,
+	.dev_attr	= &gpio_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+/* dma_system -> L3 */
+static struct omap_hwmod_ocp_if omap3xxx_dma_system__l3 = {
+	.master		= &omap3xxx_dma_system_hwmod,
+	.slave		= &omap3xxx_l3_main_hwmod,
+	.clk		= "core_l3_ick",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dma attributes */
+static struct omap_dma_dev_attr dma_dev_attr = {
+	.dev_caps  = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
+				IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
+	.lch_count = 32,
+};
+
+static struct omap_hwmod_class_sysconfig omap3xxx_dma_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x002c,
+	.syss_offs	= 0x0028,
+	.sysc_flags	= (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_EMUFREE | SYSC_HAS_AUTOIDLE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3xxx_dma_hwmod_class = {
+	.name = "dma",
+	.sysc = &omap3xxx_dma_sysc,
+};
+
+/* dma_system */
+static struct omap_hwmod_irq_info omap3xxx_dma_system_irqs[] = {
+	{ .name = "0", .irq = 12 }, /* INT_24XX_SDMA_IRQ0 */
+	{ .name = "1", .irq = 13 }, /* INT_24XX_SDMA_IRQ1 */
+	{ .name = "2", .irq = 14 }, /* INT_24XX_SDMA_IRQ2 */
+	{ .name = "3", .irq = 15 }, /* INT_24XX_SDMA_IRQ3 */
+};
+
+static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = {
+	{
+		.pa_start	= 0x48056000,
+		.pa_end		= 0x4a0560ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* dma_system master ports */
+static struct omap_hwmod_ocp_if *omap3xxx_dma_system_masters[] = {
+	&omap3xxx_dma_system__l3,
+};
+
+/* l4_cfg -> dma_system */
+static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = {
+	.master		= &omap3xxx_l4_core_hwmod,
+	.slave		= &omap3xxx_dma_system_hwmod,
+	.clk		= "core_l4_ick",
+	.addr		= omap3xxx_dma_system_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap3xxx_dma_system_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dma_system slave ports */
+static struct omap_hwmod_ocp_if *omap3xxx_dma_system_slaves[] = {
+	&omap3xxx_l4_core__dma_system,
+};
+
+static struct omap_hwmod omap3xxx_dma_system_hwmod = {
+	.name		= "dma",
+	.class		= &omap3xxx_dma_hwmod_class,
+	.mpu_irqs	= omap3xxx_dma_system_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap3xxx_dma_system_irqs),
+	.main_clk	= "core_l3_ick",
+	.prcm = {
+		.omap2 = {
+			.module_offs		= CORE_MOD,
+			.prcm_reg_id		= 1,
+			.module_bit		= OMAP3430_ST_SDMA_SHIFT,
+			.idlest_reg_id		= 1,
+			.idlest_idle_bit	= OMAP3430_ST_SDMA_SHIFT,
+		},
+	},
+	.slaves		= omap3xxx_dma_system_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3xxx_dma_system_slaves),
+	.masters	= omap3xxx_dma_system_masters,
+	.masters_cnt	= ARRAY_SIZE(omap3xxx_dma_system_masters),
+	.dev_attr	= &dma_dev_attr,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+	.flags		= HWMOD_NO_IDLEST,
+};
+
+/* SR common */
+static struct omap_hwmod_sysc_fields omap34xx_sr_sysc_fields = {
+	.clkact_shift	= 20,
+};
+
+static struct omap_hwmod_class_sysconfig omap34xx_sr_sysc = {
+	.sysc_offs	= 0x24,
+	.sysc_flags	= (SYSC_HAS_CLOCKACTIVITY | SYSC_NO_CACHE),
+	.clockact	= CLOCKACT_TEST_ICLK,
+	.sysc_fields	= &omap34xx_sr_sysc_fields,
+};
+
+static struct omap_hwmod_class omap34xx_smartreflex_hwmod_class = {
+	.name = "smartreflex",
+	.sysc = &omap34xx_sr_sysc,
+	.rev  = 1,
+};
+
+static struct omap_hwmod_sysc_fields omap36xx_sr_sysc_fields = {
+	.sidle_shift	= 24,
+	.enwkup_shift	= 26
+};
+
+static struct omap_hwmod_class_sysconfig omap36xx_sr_sysc = {
+	.sysc_offs	= 0x38,
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_flags	= (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
+			SYSC_NO_CACHE),
+	.sysc_fields	= &omap36xx_sr_sysc_fields,
+};
+
+static struct omap_hwmod_class omap36xx_smartreflex_hwmod_class = {
+	.name = "smartreflex",
+	.sysc = &omap36xx_sr_sysc,
+	.rev  = 2,
+};
+
+/* SR1 */
+static struct omap_hwmod_ocp_if *omap3_sr1_slaves[] = {
+	&omap3_l4_core__sr1,
+};
+
+static struct omap_hwmod omap34xx_sr1_hwmod = {
+	.name		= "sr1_hwmod",
+	.class		= &omap34xx_smartreflex_hwmod_class,
+	.main_clk	= "sr1_fck",
+	.vdd_name	= "mpu",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_SR1_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_EN_SR1_SHIFT,
+		},
+	},
+	.slaves		= omap3_sr1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3_sr1_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2 |
+					CHIP_IS_OMAP3430ES3_0 |
+					CHIP_IS_OMAP3430ES3_1),
+	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
+};
+
+static struct omap_hwmod omap36xx_sr1_hwmod = {
+	.name		= "sr1_hwmod",
+	.class		= &omap36xx_smartreflex_hwmod_class,
+	.main_clk	= "sr1_fck",
+	.vdd_name	= "mpu",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_SR1_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_EN_SR1_SHIFT,
+		},
+	},
+	.slaves		= omap3_sr1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3_sr1_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
+};
+
+/* SR2 */
+static struct omap_hwmod_ocp_if *omap3_sr2_slaves[] = {
+	&omap3_l4_core__sr2,
+};
+
+static struct omap_hwmod omap34xx_sr2_hwmod = {
+	.name		= "sr2_hwmod",
+	.class		= &omap34xx_smartreflex_hwmod_class,
+	.main_clk	= "sr2_fck",
+	.vdd_name	= "core",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_SR2_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_EN_SR2_SHIFT,
+		},
+	},
+	.slaves		= omap3_sr2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3_sr2_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES2 |
+					CHIP_IS_OMAP3430ES3_0 |
+					CHIP_IS_OMAP3430ES3_1),
+	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
+};
+
+static struct omap_hwmod omap36xx_sr2_hwmod = {
+	.name		= "sr2_hwmod",
+	.class		= &omap36xx_smartreflex_hwmod_class,
+	.main_clk	= "sr2_fck",
+	.vdd_name	= "core",
+	.prcm		= {
+		.omap2 = {
+			.prcm_reg_id = 1,
+			.module_bit = OMAP3430_EN_SR2_SHIFT,
+			.module_offs = WKUP_MOD,
+			.idlest_reg_id = 1,
+			.idlest_idle_bit = OMAP3430_EN_SR2_SHIFT,
+		},
+	},
+	.slaves		= omap3_sr2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap3_sr2_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3630ES1),
+};
+
 static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
 	&omap3xxx_l3_main_hwmod,
 	&omap3xxx_l4_core_hwmod,
@@ -521,6 +1368,25 @@
 	&omap3xxx_uart2_hwmod,
 	&omap3xxx_uart3_hwmod,
 	&omap3xxx_uart4_hwmod,
+	&omap3xxx_i2c1_hwmod,
+	&omap3xxx_i2c2_hwmod,
+	&omap3xxx_i2c3_hwmod,
+	&omap34xx_sr1_hwmod,
+	&omap34xx_sr2_hwmod,
+	&omap36xx_sr1_hwmod,
+	&omap36xx_sr2_hwmod,
+
+
+	/* gpio class */
+	&omap3xxx_gpio1_hwmod,
+	&omap3xxx_gpio2_hwmod,
+	&omap3xxx_gpio3_hwmod,
+	&omap3xxx_gpio4_hwmod,
+	&omap3xxx_gpio5_hwmod,
+	&omap3xxx_gpio6_hwmod,
+
+	/* dma_system class*/
+	&omap3xxx_dma_system_hwmod,
 	NULL,
 };
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 7274db4..c2806bd 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -22,11 +22,16 @@
 
 #include <plat/omap_hwmod.h>
 #include <plat/cpu.h>
+#include <plat/gpio.h>
+#include <plat/dma.h>
 
 #include "omap_hwmod_common_data.h"
 
-#include "cm.h"
+#include "cm1_44xx.h"
+#include "cm2_44xx.h"
+#include "prm44xx.h"
 #include "prm-regbits-44xx.h"
+#include "wd_timer.h"
 
 /* Base offset for all OMAP4 interrupts external to MPUSS */
 #define OMAP44XX_IRQ_GIC_START	32
@@ -35,8 +40,11 @@
 #define OMAP44XX_DMA_REQ_START  1
 
 /* Backward references (IPs with Bus Master capability) */
+static struct omap_hwmod omap44xx_dma_system_hwmod;
 static struct omap_hwmod omap44xx_dmm_hwmod;
+static struct omap_hwmod omap44xx_dsp_hwmod;
 static struct omap_hwmod omap44xx_emif_fw_hwmod;
+static struct omap_hwmod omap44xx_iva_hwmod;
 static struct omap_hwmod omap44xx_l3_instr_hwmod;
 static struct omap_hwmod omap44xx_l3_main_1_hwmod;
 static struct omap_hwmod omap44xx_l3_main_2_hwmod;
@@ -58,7 +66,7 @@
  * instance(s): dmm
  */
 static struct omap_hwmod_class omap44xx_dmm_hwmod_class = {
-	.name = "dmm",
+	.name	= "dmm",
 };
 
 /* dmm interface data */
@@ -67,7 +75,15 @@
 	.master		= &omap44xx_l3_main_1_hwmod,
 	.slave		= &omap44xx_dmm_hwmod,
 	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+	.user		= OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dmm_addrs[] = {
+	{
+		.pa_start	= 0x4e000000,
+		.pa_end		= 0x4e0007ff,
+		.flags		= ADDR_TYPE_RT
+	},
 };
 
 /* mpu -> dmm */
@@ -75,7 +91,9 @@
 	.master		= &omap44xx_mpu_hwmod,
 	.slave		= &omap44xx_dmm_hwmod,
 	.clk		= "l3_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+	.addr		= omap44xx_dmm_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_dmm_addrs),
+	.user		= OCP_USER_MPU,
 };
 
 /* dmm slave ports */
@@ -103,7 +121,7 @@
  * instance(s): emif_fw
  */
 static struct omap_hwmod_class omap44xx_emif_fw_hwmod_class = {
-	.name = "emif_fw",
+	.name	= "emif_fw",
 };
 
 /* emif_fw interface data */
@@ -115,12 +133,22 @@
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+static struct omap_hwmod_addr_space omap44xx_emif_fw_addrs[] = {
+	{
+		.pa_start	= 0x4a20c000,
+		.pa_end		= 0x4a20c0ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
 /* l4_cfg -> emif_fw */
 static struct omap_hwmod_ocp_if omap44xx_l4_cfg__emif_fw = {
 	.master		= &omap44xx_l4_cfg_hwmod,
 	.slave		= &omap44xx_emif_fw_hwmod,
 	.clk		= "l4_div_ck",
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+	.addr		= omap44xx_emif_fw_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_emif_fw_addrs),
+	.user		= OCP_USER_MPU,
 };
 
 /* emif_fw slave ports */
@@ -142,10 +170,18 @@
  * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3
  */
 static struct omap_hwmod_class omap44xx_l3_hwmod_class = {
-	.name = "l3",
+	.name	= "l3",
 };
 
 /* l3_instr interface data */
+/* iva -> l3_instr */
+static struct omap_hwmod_ocp_if omap44xx_iva__l3_instr = {
+	.master		= &omap44xx_iva_hwmod,
+	.slave		= &omap44xx_l3_instr_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l3_main_3 -> l3_instr */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_3__l3_instr = {
 	.master		= &omap44xx_l3_main_3_hwmod,
@@ -156,6 +192,7 @@
 
 /* l3_instr slave ports */
 static struct omap_hwmod_ocp_if *omap44xx_l3_instr_slaves[] = {
+	&omap44xx_iva__l3_instr,
 	&omap44xx_l3_main_3__l3_instr,
 };
 
@@ -167,6 +204,15 @@
 	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
+/* l3_main_1 interface data */
+/* dsp -> l3_main_1 */
+static struct omap_hwmod_ocp_if omap44xx_dsp__l3_main_1 = {
+	.master		= &omap44xx_dsp_hwmod,
+	.slave		= &omap44xx_l3_main_1_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l3_main_2 -> l3_main_1 */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = {
 	.master		= &omap44xx_l3_main_2_hwmod,
@@ -193,6 +239,7 @@
 
 /* l3_main_1 slave ports */
 static struct omap_hwmod_ocp_if *omap44xx_l3_main_1_slaves[] = {
+	&omap44xx_dsp__l3_main_1,
 	&omap44xx_l3_main_2__l3_main_1,
 	&omap44xx_l4_cfg__l3_main_1,
 	&omap44xx_mpu__l3_main_1,
@@ -207,6 +254,22 @@
 };
 
 /* l3_main_2 interface data */
+/* dma_system -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
+	.master		= &omap44xx_dma_system_hwmod,
+	.slave		= &omap44xx_l3_main_2_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* iva -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_iva__l3_main_2 = {
+	.master		= &omap44xx_iva_hwmod,
+	.slave		= &omap44xx_l3_main_2_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l3_main_1 -> l3_main_2 */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = {
 	.master		= &omap44xx_l3_main_1_hwmod,
@@ -225,6 +288,8 @@
 
 /* l3_main_2 slave ports */
 static struct omap_hwmod_ocp_if *omap44xx_l3_main_2_slaves[] = {
+	&omap44xx_dma_system__l3_main_2,
+	&omap44xx_iva__l3_main_2,
 	&omap44xx_l3_main_1__l3_main_2,
 	&omap44xx_l4_cfg__l3_main_2,
 };
@@ -282,10 +347,18 @@
  * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup
  */
 static struct omap_hwmod_class omap44xx_l4_hwmod_class = {
-	.name = "l4",
+	.name	= "l4",
 };
 
 /* l4_abe interface data */
+/* dsp -> l4_abe */
+static struct omap_hwmod_ocp_if omap44xx_dsp__l4_abe = {
+	.master		= &omap44xx_dsp_hwmod,
+	.slave		= &omap44xx_l4_abe_hwmod,
+	.clk		= "ocp_abe_iclk",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l3_main_1 -> l4_abe */
 static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_abe = {
 	.master		= &omap44xx_l3_main_1_hwmod,
@@ -304,6 +377,7 @@
 
 /* l4_abe slave ports */
 static struct omap_hwmod_ocp_if *omap44xx_l4_abe_slaves[] = {
+	&omap44xx_dsp__l4_abe,
 	&omap44xx_l3_main_1__l4_abe,
 	&omap44xx_mpu__l4_abe,
 };
@@ -387,7 +461,7 @@
  * instance(s): mpu_private
  */
 static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = {
-	.name = "mpu_bus",
+	.name	= "mpu_bus",
 };
 
 /* mpu_private interface data */
@@ -413,12 +487,960 @@
 };
 
 /*
+ * Modules omap_hwmod structures
+ *
+ * The following IPs are excluded for the moment because:
+ * - They do not need an explicit SW control using omap_hwmod API.
+ * - They still need to be validated with the driver
+ *   properly adapted to omap_hwmod / omap_device
+ *
+ *  aess
+ *  bandgap
+ *  c2c
+ *  c2c_target_fw
+ *  cm_core
+ *  cm_core_aon
+ *  counter_32k
+ *  ctrl_module_core
+ *  ctrl_module_pad_core
+ *  ctrl_module_pad_wkup
+ *  ctrl_module_wkup
+ *  debugss
+ *  dmic
+ *  dss
+ *  dss_dispc
+ *  dss_dsi1
+ *  dss_dsi2
+ *  dss_hdmi
+ *  dss_rfbi
+ *  dss_venc
+ *  efuse_ctrl_cust
+ *  efuse_ctrl_std
+ *  elm
+ *  emif1
+ *  emif2
+ *  fdif
+ *  gpmc
+ *  gpu
+ *  hdq1w
+ *  hsi
+ *  ipu
+ *  iss
+ *  kbd
+ *  mailbox
+ *  mcasp
+ *  mcbsp1
+ *  mcbsp2
+ *  mcbsp3
+ *  mcbsp4
+ *  mcpdm
+ *  mcspi1
+ *  mcspi2
+ *  mcspi3
+ *  mcspi4
+ *  mmc1
+ *  mmc2
+ *  mmc3
+ *  mmc4
+ *  mmc5
+ *  mpu_c0
+ *  mpu_c1
+ *  ocmc_ram
+ *  ocp2scp_usb_phy
+ *  ocp_wp_noc
+ *  prcm
+ *  prcm_mpu
+ *  prm
+ *  scrm
+ *  sl2if
+ *  slimbus1
+ *  slimbus2
+ *  spinlock
+ *  timer1
+ *  timer10
+ *  timer11
+ *  timer2
+ *  timer3
+ *  timer4
+ *  timer5
+ *  timer6
+ *  timer7
+ *  timer8
+ *  timer9
+ *  usb_host_fs
+ *  usb_host_hs
+ *  usb_otg_hs
+ *  usb_phy_cm
+ *  usb_tll_hs
+ *  usim
+ */
+
+/*
+ * 'dma' class
+ * dma controller for data exchange between memory to memory (i.e. internal or
+ * external memory) and gp peripherals to memory or memory to gp peripherals
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_dma_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x002c,
+	.syss_offs	= 0x0028,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_dma_hwmod_class = {
+	.name	= "dma",
+	.sysc	= &omap44xx_dma_sysc,
+};
+
+/* dma dev_attr */
+static struct omap_dma_dev_attr dma_dev_attr = {
+	.dev_caps	= RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY |
+			  IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY,
+	.lch_count	= 32,
+};
+
+/* dma_system */
+static struct omap_hwmod_irq_info omap44xx_dma_system_irqs[] = {
+	{ .name = "0", .irq = 12 + OMAP44XX_IRQ_GIC_START },
+	{ .name = "1", .irq = 13 + OMAP44XX_IRQ_GIC_START },
+	{ .name = "2", .irq = 14 + OMAP44XX_IRQ_GIC_START },
+	{ .name = "3", .irq = 15 + OMAP44XX_IRQ_GIC_START },
+};
+
+/* dma_system master ports */
+static struct omap_hwmod_ocp_if *omap44xx_dma_system_masters[] = {
+	&omap44xx_dma_system__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = {
+	{
+		.pa_start	= 0x4a056000,
+		.pa_end		= 0x4a0560ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_cfg -> dma_system */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dma_system = {
+	.master		= &omap44xx_l4_cfg_hwmod,
+	.slave		= &omap44xx_dma_system_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_dma_system_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_dma_system_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dma_system slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dma_system_slaves[] = {
+	&omap44xx_l4_cfg__dma_system,
+};
+
+static struct omap_hwmod omap44xx_dma_system_hwmod = {
+	.name		= "dma_system",
+	.class		= &omap44xx_dma_hwmod_class,
+	.mpu_irqs	= omap44xx_dma_system_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_dma_system_irqs),
+	.main_clk	= "l3_div_ck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_SDMA_SDMA_CLKCTRL,
+		},
+	},
+	.dev_attr	= &dma_dev_attr,
+	.slaves		= omap44xx_dma_system_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_dma_system_slaves),
+	.masters	= omap44xx_dma_system_masters,
+	.masters_cnt	= ARRAY_SIZE(omap44xx_dma_system_masters),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'dsp' class
+ * dsp sub-system
+ */
+
+static struct omap_hwmod_class omap44xx_dsp_hwmod_class = {
+	.name	= "dsp",
+};
+
+/* dsp */
+static struct omap_hwmod_irq_info omap44xx_dsp_irqs[] = {
+	{ .irq = 28 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_rst_info omap44xx_dsp_resets[] = {
+	{ .name = "mmu_cache", .rst_shift = 1 },
+};
+
+static struct omap_hwmod_rst_info omap44xx_dsp_c0_resets[] = {
+	{ .name = "dsp", .rst_shift = 0 },
+};
+
+/* dsp -> iva */
+static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
+	.master		= &omap44xx_dsp_hwmod,
+	.slave		= &omap44xx_iva_hwmod,
+	.clk		= "dpll_iva_m5x2_ck",
+};
+
+/* dsp master ports */
+static struct omap_hwmod_ocp_if *omap44xx_dsp_masters[] = {
+	&omap44xx_dsp__l3_main_1,
+	&omap44xx_dsp__l4_abe,
+	&omap44xx_dsp__iva,
+};
+
+/* l4_cfg -> dsp */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dsp = {
+	.master		= &omap44xx_l4_cfg_hwmod,
+	.slave		= &omap44xx_dsp_hwmod,
+	.clk		= "l4_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* dsp slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_dsp_slaves[] = {
+	&omap44xx_l4_cfg__dsp,
+};
+
+/* Pseudo hwmod for reset control purpose only */
+static struct omap_hwmod omap44xx_dsp_c0_hwmod = {
+	.name		= "dsp_c0",
+	.class		= &omap44xx_dsp_hwmod_class,
+	.flags		= HWMOD_INIT_NO_RESET,
+	.rst_lines	= omap44xx_dsp_c0_resets,
+	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_dsp_c0_resets),
+	.prcm = {
+		.omap4 = {
+			.rstctrl_reg = OMAP4430_RM_TESLA_RSTCTRL,
+		},
+	},
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct omap_hwmod omap44xx_dsp_hwmod = {
+	.name		= "dsp",
+	.class		= &omap44xx_dsp_hwmod_class,
+	.mpu_irqs	= omap44xx_dsp_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_dsp_irqs),
+	.rst_lines	= omap44xx_dsp_resets,
+	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_dsp_resets),
+	.main_clk	= "dsp_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_TESLA_TESLA_CLKCTRL,
+			.rstctrl_reg = OMAP4430_RM_TESLA_RSTCTRL,
+		},
+	},
+	.slaves		= omap44xx_dsp_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_dsp_slaves),
+	.masters	= omap44xx_dsp_masters,
+	.masters_cnt	= ARRAY_SIZE(omap44xx_dsp_masters),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'gpio' class
+ * general purpose io module
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_gpio_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0114,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_gpio_hwmod_class = {
+	.name	= "gpio",
+	.sysc	= &omap44xx_gpio_sysc,
+	.rev	= 2,
+};
+
+/* gpio dev_attr */
+static struct omap_gpio_dev_attr gpio_dev_attr = {
+	.bank_width	= 32,
+	.dbck_flag	= true,
+};
+
+/* gpio1 */
+static struct omap_hwmod omap44xx_gpio1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_gpio1_irqs[] = {
+	{ .irq = 29 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_gpio1_addrs[] = {
+	{
+		.pa_start	= 0x4a310000,
+		.pa_end		= 0x4a3101ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_wkup -> gpio1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_wkup__gpio1 = {
+	.master		= &omap44xx_l4_wkup_hwmod,
+	.slave		= &omap44xx_gpio1_hwmod,
+	.clk		= "l4_wkup_clk_mux_ck",
+	.addr		= omap44xx_gpio1_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_gpio1_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpio1_slaves[] = {
+	&omap44xx_l4_wkup__gpio1,
+};
+
+static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio1_dbclk" },
+};
+
+static struct omap_hwmod omap44xx_gpio1_hwmod = {
+	.name		= "gpio1",
+	.class		= &omap44xx_gpio_hwmod_class,
+	.mpu_irqs	= omap44xx_gpio1_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_gpio1_irqs),
+	.main_clk	= "gpio1_ick",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+		},
+	},
+	.opt_clks	= gpio1_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio1_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+	.slaves		= omap44xx_gpio1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_gpio1_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* gpio2 */
+static struct omap_hwmod omap44xx_gpio2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_gpio2_irqs[] = {
+	{ .irq = 30 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_gpio2_addrs[] = {
+	{
+		.pa_start	= 0x48055000,
+		.pa_end		= 0x480551ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> gpio2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio2 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_gpio2_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_gpio2_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_gpio2_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpio2_slaves[] = {
+	&omap44xx_l4_per__gpio2,
+};
+
+static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio2_dbclk" },
+};
+
+static struct omap_hwmod omap44xx_gpio2_hwmod = {
+	.name		= "gpio2",
+	.class		= &omap44xx_gpio_hwmod_class,
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.mpu_irqs	= omap44xx_gpio2_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_gpio2_irqs),
+	.main_clk	= "gpio2_ick",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
+		},
+	},
+	.opt_clks	= gpio2_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio2_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+	.slaves		= omap44xx_gpio2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_gpio2_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* gpio3 */
+static struct omap_hwmod omap44xx_gpio3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_gpio3_irqs[] = {
+	{ .irq = 31 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_gpio3_addrs[] = {
+	{
+		.pa_start	= 0x48057000,
+		.pa_end		= 0x480571ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> gpio3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio3 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_gpio3_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_gpio3_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_gpio3_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpio3_slaves[] = {
+	&omap44xx_l4_per__gpio3,
+};
+
+static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio3_dbclk" },
+};
+
+static struct omap_hwmod omap44xx_gpio3_hwmod = {
+	.name		= "gpio3",
+	.class		= &omap44xx_gpio_hwmod_class,
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.mpu_irqs	= omap44xx_gpio3_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_gpio3_irqs),
+	.main_clk	= "gpio3_ick",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+		},
+	},
+	.opt_clks	= gpio3_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio3_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+	.slaves		= omap44xx_gpio3_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_gpio3_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* gpio4 */
+static struct omap_hwmod omap44xx_gpio4_hwmod;
+static struct omap_hwmod_irq_info omap44xx_gpio4_irqs[] = {
+	{ .irq = 32 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_gpio4_addrs[] = {
+	{
+		.pa_start	= 0x48059000,
+		.pa_end		= 0x480591ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> gpio4 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio4 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_gpio4_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_gpio4_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_gpio4_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio4 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpio4_slaves[] = {
+	&omap44xx_l4_per__gpio4,
+};
+
+static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio4_dbclk" },
+};
+
+static struct omap_hwmod omap44xx_gpio4_hwmod = {
+	.name		= "gpio4",
+	.class		= &omap44xx_gpio_hwmod_class,
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.mpu_irqs	= omap44xx_gpio4_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_gpio4_irqs),
+	.main_clk	= "gpio4_ick",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
+		},
+	},
+	.opt_clks	= gpio4_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio4_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+	.slaves		= omap44xx_gpio4_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_gpio4_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* gpio5 */
+static struct omap_hwmod omap44xx_gpio5_hwmod;
+static struct omap_hwmod_irq_info omap44xx_gpio5_irqs[] = {
+	{ .irq = 33 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_gpio5_addrs[] = {
+	{
+		.pa_start	= 0x4805b000,
+		.pa_end		= 0x4805b1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> gpio5 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio5 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_gpio5_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_gpio5_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_gpio5_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio5 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpio5_slaves[] = {
+	&omap44xx_l4_per__gpio5,
+};
+
+static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio5_dbclk" },
+};
+
+static struct omap_hwmod omap44xx_gpio5_hwmod = {
+	.name		= "gpio5",
+	.class		= &omap44xx_gpio_hwmod_class,
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.mpu_irqs	= omap44xx_gpio5_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_gpio5_irqs),
+	.main_clk	= "gpio5_ick",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
+		},
+	},
+	.opt_clks	= gpio5_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio5_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+	.slaves		= omap44xx_gpio5_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_gpio5_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* gpio6 */
+static struct omap_hwmod omap44xx_gpio6_hwmod;
+static struct omap_hwmod_irq_info omap44xx_gpio6_irqs[] = {
+	{ .irq = 34 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_gpio6_addrs[] = {
+	{
+		.pa_start	= 0x4805d000,
+		.pa_end		= 0x4805d1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> gpio6 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio6 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_gpio6_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_gpio6_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_gpio6_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* gpio6 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_gpio6_slaves[] = {
+	&omap44xx_l4_per__gpio6,
+};
+
+static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
+	{ .role = "dbclk", .clk = "gpio6_dbclk" },
+};
+
+static struct omap_hwmod omap44xx_gpio6_hwmod = {
+	.name		= "gpio6",
+	.class		= &omap44xx_gpio_hwmod_class,
+	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
+	.mpu_irqs	= omap44xx_gpio6_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_gpio6_irqs),
+	.main_clk	= "gpio6_ick",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
+		},
+	},
+	.opt_clks	= gpio6_opt_clks,
+	.opt_clks_cnt	= ARRAY_SIZE(gpio6_opt_clks),
+	.dev_attr	= &gpio_dev_attr,
+	.slaves		= omap44xx_gpio6_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_gpio6_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'i2c' class
+ * multimaster high-speed i2c controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = {
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0090,
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_i2c_hwmod_class = {
+	.name	= "i2c",
+	.sysc	= &omap44xx_i2c_sysc,
+};
+
+/* i2c1 */
+static struct omap_hwmod omap44xx_i2c1_hwmod;
+static struct omap_hwmod_irq_info omap44xx_i2c1_irqs[] = {
+	{ .irq = 56 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_i2c1_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = 26 + OMAP44XX_DMA_REQ_START },
+	{ .name = "rx", .dma_req = 27 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_i2c1_addrs[] = {
+	{
+		.pa_start	= 0x48070000,
+		.pa_end		= 0x480700ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> i2c1 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c1 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_i2c1_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_i2c1_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_i2c1_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* i2c1 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_i2c1_slaves[] = {
+	&omap44xx_l4_per__i2c1,
+};
+
+static struct omap_hwmod omap44xx_i2c1_hwmod = {
+	.name		= "i2c1",
+	.class		= &omap44xx_i2c_hwmod_class,
+	.flags		= HWMOD_INIT_NO_RESET,
+	.mpu_irqs	= omap44xx_i2c1_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_i2c1_irqs),
+	.sdma_reqs	= omap44xx_i2c1_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(omap44xx_i2c1_sdma_reqs),
+	.main_clk	= "i2c1_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_I2C1_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_i2c1_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_i2c1_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* i2c2 */
+static struct omap_hwmod omap44xx_i2c2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_i2c2_irqs[] = {
+	{ .irq = 57 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_i2c2_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = 28 + OMAP44XX_DMA_REQ_START },
+	{ .name = "rx", .dma_req = 29 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_i2c2_addrs[] = {
+	{
+		.pa_start	= 0x48072000,
+		.pa_end		= 0x480720ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> i2c2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c2 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_i2c2_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_i2c2_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_i2c2_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* i2c2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_i2c2_slaves[] = {
+	&omap44xx_l4_per__i2c2,
+};
+
+static struct omap_hwmod omap44xx_i2c2_hwmod = {
+	.name		= "i2c2",
+	.class		= &omap44xx_i2c_hwmod_class,
+	.flags		= HWMOD_INIT_NO_RESET,
+	.mpu_irqs	= omap44xx_i2c2_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_i2c2_irqs),
+	.sdma_reqs	= omap44xx_i2c2_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(omap44xx_i2c2_sdma_reqs),
+	.main_clk	= "i2c2_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_I2C2_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_i2c2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_i2c2_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* i2c3 */
+static struct omap_hwmod omap44xx_i2c3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_i2c3_irqs[] = {
+	{ .irq = 61 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_i2c3_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = 24 + OMAP44XX_DMA_REQ_START },
+	{ .name = "rx", .dma_req = 25 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_i2c3_addrs[] = {
+	{
+		.pa_start	= 0x48060000,
+		.pa_end		= 0x480600ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> i2c3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c3 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_i2c3_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_i2c3_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_i2c3_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* i2c3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_i2c3_slaves[] = {
+	&omap44xx_l4_per__i2c3,
+};
+
+static struct omap_hwmod omap44xx_i2c3_hwmod = {
+	.name		= "i2c3",
+	.class		= &omap44xx_i2c_hwmod_class,
+	.flags		= HWMOD_INIT_NO_RESET,
+	.mpu_irqs	= omap44xx_i2c3_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_i2c3_irqs),
+	.sdma_reqs	= omap44xx_i2c3_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(omap44xx_i2c3_sdma_reqs),
+	.main_clk	= "i2c3_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_I2C3_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_i2c3_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_i2c3_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* i2c4 */
+static struct omap_hwmod omap44xx_i2c4_hwmod;
+static struct omap_hwmod_irq_info omap44xx_i2c4_irqs[] = {
+	{ .irq = 62 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_dma_info omap44xx_i2c4_sdma_reqs[] = {
+	{ .name = "tx", .dma_req = 123 + OMAP44XX_DMA_REQ_START },
+	{ .name = "rx", .dma_req = 124 + OMAP44XX_DMA_REQ_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_i2c4_addrs[] = {
+	{
+		.pa_start	= 0x48350000,
+		.pa_end		= 0x483500ff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_per -> i2c4 */
+static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c4 = {
+	.master		= &omap44xx_l4_per_hwmod,
+	.slave		= &omap44xx_i2c4_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_i2c4_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_i2c4_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* i2c4 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_i2c4_slaves[] = {
+	&omap44xx_l4_per__i2c4,
+};
+
+static struct omap_hwmod omap44xx_i2c4_hwmod = {
+	.name		= "i2c4",
+	.class		= &omap44xx_i2c_hwmod_class,
+	.flags		= HWMOD_INIT_NO_RESET,
+	.mpu_irqs	= omap44xx_i2c4_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_i2c4_irqs),
+	.sdma_reqs	= omap44xx_i2c4_sdma_reqs,
+	.sdma_reqs_cnt	= ARRAY_SIZE(omap44xx_i2c4_sdma_reqs),
+	.main_clk	= "i2c4_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_L4PER_I2C4_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_i2c4_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_i2c4_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
+ * 'iva' class
+ * multi-standard video encoder/decoder hardware accelerator
+ */
+
+static struct omap_hwmod_class omap44xx_iva_hwmod_class = {
+	.name	= "iva",
+};
+
+/* iva */
+static struct omap_hwmod_irq_info omap44xx_iva_irqs[] = {
+	{ .name = "sync_1", .irq = 103 + OMAP44XX_IRQ_GIC_START },
+	{ .name = "sync_0", .irq = 104 + OMAP44XX_IRQ_GIC_START },
+	{ .name = "mailbox_0", .irq = 107 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_rst_info omap44xx_iva_resets[] = {
+	{ .name = "logic", .rst_shift = 2 },
+};
+
+static struct omap_hwmod_rst_info omap44xx_iva_seq0_resets[] = {
+	{ .name = "seq0", .rst_shift = 0 },
+};
+
+static struct omap_hwmod_rst_info omap44xx_iva_seq1_resets[] = {
+	{ .name = "seq1", .rst_shift = 1 },
+};
+
+/* iva master ports */
+static struct omap_hwmod_ocp_if *omap44xx_iva_masters[] = {
+	&omap44xx_iva__l3_main_2,
+	&omap44xx_iva__l3_instr,
+};
+
+static struct omap_hwmod_addr_space omap44xx_iva_addrs[] = {
+	{
+		.pa_start	= 0x5a000000,
+		.pa_end		= 0x5a07ffff,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l3_main_2 -> iva */
+static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iva = {
+	.master		= &omap44xx_l3_main_2_hwmod,
+	.slave		= &omap44xx_iva_hwmod,
+	.clk		= "l3_div_ck",
+	.addr		= omap44xx_iva_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_iva_addrs),
+	.user		= OCP_USER_MPU,
+};
+
+/* iva slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_iva_slaves[] = {
+	&omap44xx_dsp__iva,
+	&omap44xx_l3_main_2__iva,
+};
+
+/* Pseudo hwmod for reset control purpose only */
+static struct omap_hwmod omap44xx_iva_seq0_hwmod = {
+	.name		= "iva_seq0",
+	.class		= &omap44xx_iva_hwmod_class,
+	.flags		= HWMOD_INIT_NO_RESET,
+	.rst_lines	= omap44xx_iva_seq0_resets,
+	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_iva_seq0_resets),
+	.prcm = {
+		.omap4 = {
+			.rstctrl_reg = OMAP4430_RM_IVAHD_RSTCTRL,
+		},
+	},
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* Pseudo hwmod for reset control purpose only */
+static struct omap_hwmod omap44xx_iva_seq1_hwmod = {
+	.name		= "iva_seq1",
+	.class		= &omap44xx_iva_hwmod_class,
+	.flags		= HWMOD_INIT_NO_RESET,
+	.rst_lines	= omap44xx_iva_seq1_resets,
+	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_iva_seq1_resets),
+	.prcm = {
+		.omap4 = {
+			.rstctrl_reg = OMAP4430_RM_IVAHD_RSTCTRL,
+		},
+	},
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+static struct omap_hwmod omap44xx_iva_hwmod = {
+	.name		= "iva",
+	.class		= &omap44xx_iva_hwmod_class,
+	.mpu_irqs	= omap44xx_iva_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_iva_irqs),
+	.rst_lines	= omap44xx_iva_resets,
+	.rst_lines_cnt	= ARRAY_SIZE(omap44xx_iva_resets),
+	.main_clk	= "iva_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
+			.rstctrl_reg = OMAP4430_RM_IVAHD_RSTCTRL,
+		},
+	},
+	.slaves		= omap44xx_iva_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_iva_slaves),
+	.masters	= omap44xx_iva_masters,
+	.masters_cnt	= ARRAY_SIZE(omap44xx_iva_masters),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/*
  * 'mpu' class
  * mpu sub-system
  */
 
 static struct omap_hwmod_class omap44xx_mpu_hwmod_class = {
-	.name = "mpu",
+	.name	= "mpu",
 };
 
 /* mpu */
@@ -453,19 +1475,167 @@
 };
 
 /*
- * 'wd_timer' class
- * 32-bit watchdog upward counter that generates a pulse on the reset pin on
- * overflow condition
+ * 'smartreflex' class
+ * smartreflex module (monitor silicon performance and outputs a measure of
+ * performance error)
  */
 
-static struct omap_hwmod_class_sysconfig omap44xx_wd_timer_sysc = {
-	.rev_offs	= 0x0000,
-	.sysc_offs	= 0x0010,
-	.syss_offs	= 0x0014,
-	.sysc_flags	= (SYSC_HAS_SIDLEMODE | SYSC_HAS_EMUFREE |
-			   SYSC_HAS_SOFTRESET),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
-	.sysc_fields	= &omap_hwmod_sysc_type1,
+/* The IP is not compliant to type1 / type2 scheme */
+static struct omap_hwmod_sysc_fields omap_hwmod_sysc_type_smartreflex = {
+	.sidle_shift	= 24,
+	.enwkup_shift	= 26,
+};
+
+static struct omap_hwmod_class_sysconfig omap44xx_smartreflex_sysc = {
+	.sysc_offs	= 0x0038,
+	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type_smartreflex,
+};
+
+static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
+	.name	= "smartreflex",
+	.sysc	= &omap44xx_smartreflex_sysc,
+	.rev	= 2,
+};
+
+/* smartreflex_core */
+static struct omap_hwmod omap44xx_smartreflex_core_hwmod;
+static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = {
+	{ .irq = 19 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = {
+	{
+		.pa_start	= 0x4a0dd000,
+		.pa_end		= 0x4a0dd03f,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_cfg -> smartreflex_core */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = {
+	.master		= &omap44xx_l4_cfg_hwmod,
+	.slave		= &omap44xx_smartreflex_core_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_smartreflex_core_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_smartreflex_core_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* smartreflex_core slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_smartreflex_core_slaves[] = {
+	&omap44xx_l4_cfg__smartreflex_core,
+};
+
+static struct omap_hwmod omap44xx_smartreflex_core_hwmod = {
+	.name		= "smartreflex_core",
+	.class		= &omap44xx_smartreflex_hwmod_class,
+	.mpu_irqs	= omap44xx_smartreflex_core_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_smartreflex_core_irqs),
+	.main_clk	= "smartreflex_core_fck",
+	.vdd_name	= "core",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_smartreflex_core_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_smartreflex_core_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* smartreflex_iva */
+static struct omap_hwmod omap44xx_smartreflex_iva_hwmod;
+static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = {
+	{ .irq = 102 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = {
+	{
+		.pa_start	= 0x4a0db000,
+		.pa_end		= 0x4a0db03f,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_cfg -> smartreflex_iva */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = {
+	.master		= &omap44xx_l4_cfg_hwmod,
+	.slave		= &omap44xx_smartreflex_iva_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_smartreflex_iva_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_smartreflex_iva_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* smartreflex_iva slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_smartreflex_iva_slaves[] = {
+	&omap44xx_l4_cfg__smartreflex_iva,
+};
+
+static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = {
+	.name		= "smartreflex_iva",
+	.class		= &omap44xx_smartreflex_hwmod_class,
+	.mpu_irqs	= omap44xx_smartreflex_iva_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_smartreflex_iva_irqs),
+	.main_clk	= "smartreflex_iva_fck",
+	.vdd_name	= "iva",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_smartreflex_iva_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_smartreflex_iva_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* smartreflex_mpu */
+static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod;
+static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = {
+	{ .irq = 18 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = {
+	{
+		.pa_start	= 0x4a0d9000,
+		.pa_end		= 0x4a0d903f,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_cfg -> smartreflex_mpu */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = {
+	.master		= &omap44xx_l4_cfg_hwmod,
+	.slave		= &omap44xx_smartreflex_mpu_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_smartreflex_mpu_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_smartreflex_mpu_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* smartreflex_mpu slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_smartreflex_mpu_slaves[] = {
+	&omap44xx_l4_cfg__smartreflex_mpu,
+};
+
+static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = {
+	.name		= "smartreflex_mpu",
+	.class		= &omap44xx_smartreflex_hwmod_class,
+	.mpu_irqs	= omap44xx_smartreflex_mpu_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_smartreflex_mpu_irqs),
+	.main_clk	= "smartreflex_mpu_fck",
+	.vdd_name	= "mpu",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_smartreflex_mpu_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_smartreflex_mpu_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
 /*
@@ -477,34 +1647,17 @@
 	.rev_offs	= 0x0050,
 	.sysc_offs	= 0x0054,
 	.syss_offs	= 0x0058,
-	.sysc_flags	= (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
-			   SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+			   SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
 	.sysc_fields	= &omap_hwmod_sysc_type1,
 };
 
-static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = {
-	.name = "wd_timer",
-	.sysc = &omap44xx_wd_timer_sysc,
-};
-
-/* wd_timer2 */
-static struct omap_hwmod omap44xx_wd_timer2_hwmod;
-static struct omap_hwmod_irq_info omap44xx_wd_timer2_irqs[] = {
-	{ .irq = 80 + OMAP44XX_IRQ_GIC_START },
-};
-
-static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = {
-	{
-		.pa_start	= 0x4a314000,
-		.pa_end		= 0x4a31407f,
-		.flags		= ADDR_TYPE_RT
-	},
-};
-
 static struct omap_hwmod_class omap44xx_uart_hwmod_class = {
-	.name = "uart",
-	.sysc = &omap44xx_uart_sysc,
+	.name	= "uart",
+	.sysc	= &omap44xx_uart_sysc,
 };
 
 /* uart1 */
@@ -578,51 +1731,6 @@
 	},
 };
 
-/* l4_wkup -> wd_timer2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = {
-	.master		= &omap44xx_l4_wkup_hwmod,
-	.slave		= &omap44xx_wd_timer2_hwmod,
-	.clk		= "l4_wkup_clk_mux_ck",
-	.addr		= omap44xx_wd_timer2_addrs,
-	.addr_cnt	= ARRAY_SIZE(omap44xx_wd_timer2_addrs),
-	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* wd_timer2 slave ports */
-static struct omap_hwmod_ocp_if *omap44xx_wd_timer2_slaves[] = {
-	&omap44xx_l4_wkup__wd_timer2,
-};
-
-static struct omap_hwmod omap44xx_wd_timer2_hwmod = {
-	.name		= "wd_timer2",
-	.class		= &omap44xx_wd_timer_hwmod_class,
-	.mpu_irqs	= omap44xx_wd_timer2_irqs,
-	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_wd_timer2_irqs),
-	.main_clk	= "wd_timer2_fck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
-		},
-	},
-	.slaves		= omap44xx_wd_timer2_slaves,
-	.slaves_cnt	= ARRAY_SIZE(omap44xx_wd_timer2_slaves),
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
-/* wd_timer3 */
-static struct omap_hwmod omap44xx_wd_timer3_hwmod;
-static struct omap_hwmod_irq_info omap44xx_wd_timer3_irqs[] = {
-	{ .irq = 36 + OMAP44XX_IRQ_GIC_START },
-};
-
-static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = {
-	{
-		.pa_start	= 0x40130000,
-		.pa_end		= 0x4013007f,
-		.flags		= ADDR_TYPE_RT
-	},
-};
-
 /* l4_per -> uart2 */
 static struct omap_hwmod_ocp_if omap44xx_l4_per__uart2 = {
 	.master		= &omap44xx_l4_per_hwmod,
@@ -675,25 +1783,6 @@
 	},
 };
 
-/* l4_abe -> wd_timer3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_wd_timer3_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.addr		= omap44xx_wd_timer3_addrs,
-	.addr_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_addrs),
-	.user		= OCP_USER_MPU,
-};
-
-/* l4_abe -> wd_timer3 (dma) */
-static struct omap_hwmod_addr_space omap44xx_wd_timer3_dma_addrs[] = {
-	{
-		.pa_start	= 0x49030000,
-		.pa_end		= 0x4903007f,
-		.flags		= ADDR_TYPE_RT
-	},
-};
-
 /* l4_per -> uart3 */
 static struct omap_hwmod_ocp_if omap44xx_l4_per__uart3 = {
 	.master		= &omap44xx_l4_per_hwmod,
@@ -747,37 +1836,6 @@
 	},
 };
 
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = {
-	.master		= &omap44xx_l4_abe_hwmod,
-	.slave		= &omap44xx_wd_timer3_hwmod,
-	.clk		= "ocp_abe_iclk",
-	.addr		= omap44xx_wd_timer3_dma_addrs,
-	.addr_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_dma_addrs),
-	.user		= OCP_USER_SDMA,
-};
-
-/* wd_timer3 slave ports */
-static struct omap_hwmod_ocp_if *omap44xx_wd_timer3_slaves[] = {
-	&omap44xx_l4_abe__wd_timer3,
-	&omap44xx_l4_abe__wd_timer3_dma,
-};
-
-static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
-	.name		= "wd_timer3",
-	.class		= &omap44xx_wd_timer_hwmod_class,
-	.mpu_irqs	= omap44xx_wd_timer3_irqs,
-	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_irqs),
-	.main_clk	= "wd_timer3_fck",
-	.prcm = {
-		.omap4 = {
-			.clkctrl_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
-		},
-	},
-	.slaves		= omap44xx_wd_timer3_slaves,
-	.slaves_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_slaves),
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-};
-
 /* l4_per -> uart4 */
 static struct omap_hwmod_ocp_if omap44xx_l4_per__uart4 = {
 	.master		= &omap44xx_l4_per_hwmod,
@@ -811,35 +1869,205 @@
 	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
 };
 
+/*
+ * 'wd_timer' class
+ * 32-bit watchdog upward counter that generates a pulse on the reset pin on
+ * overflow condition
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_wd_timer_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.syss_offs	= 0x0014,
+	.sysc_flags	= (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE |
+			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   SIDLE_SMART_WKUP),
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = {
+	.name		= "wd_timer",
+	.sysc		= &omap44xx_wd_timer_sysc,
+	.pre_shutdown	= &omap2_wd_timer_disable,
+};
+
+/* wd_timer2 */
+static struct omap_hwmod omap44xx_wd_timer2_hwmod;
+static struct omap_hwmod_irq_info omap44xx_wd_timer2_irqs[] = {
+	{ .irq = 80 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = {
+	{
+		.pa_start	= 0x4a314000,
+		.pa_end		= 0x4a31407f,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_wkup -> wd_timer2 */
+static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = {
+	.master		= &omap44xx_l4_wkup_hwmod,
+	.slave		= &omap44xx_wd_timer2_hwmod,
+	.clk		= "l4_wkup_clk_mux_ck",
+	.addr		= omap44xx_wd_timer2_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_wd_timer2_addrs),
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* wd_timer2 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_wd_timer2_slaves[] = {
+	&omap44xx_l4_wkup__wd_timer2,
+};
+
+static struct omap_hwmod omap44xx_wd_timer2_hwmod = {
+	.name		= "wd_timer2",
+	.class		= &omap44xx_wd_timer_hwmod_class,
+	.mpu_irqs	= omap44xx_wd_timer2_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_wd_timer2_irqs),
+	.main_clk	= "wd_timer2_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_wd_timer2_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_wd_timer2_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
+/* wd_timer3 */
+static struct omap_hwmod omap44xx_wd_timer3_hwmod;
+static struct omap_hwmod_irq_info omap44xx_wd_timer3_irqs[] = {
+	{ .irq = 36 + OMAP44XX_IRQ_GIC_START },
+};
+
+static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = {
+	{
+		.pa_start	= 0x40130000,
+		.pa_end		= 0x4013007f,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_abe -> wd_timer3 */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = {
+	.master		= &omap44xx_l4_abe_hwmod,
+	.slave		= &omap44xx_wd_timer3_hwmod,
+	.clk		= "ocp_abe_iclk",
+	.addr		= omap44xx_wd_timer3_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_addrs),
+	.user		= OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space omap44xx_wd_timer3_dma_addrs[] = {
+	{
+		.pa_start	= 0x49030000,
+		.pa_end		= 0x4903007f,
+		.flags		= ADDR_TYPE_RT
+	},
+};
+
+/* l4_abe -> wd_timer3 (dma) */
+static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = {
+	.master		= &omap44xx_l4_abe_hwmod,
+	.slave		= &omap44xx_wd_timer3_hwmod,
+	.clk		= "ocp_abe_iclk",
+	.addr		= omap44xx_wd_timer3_dma_addrs,
+	.addr_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_dma_addrs),
+	.user		= OCP_USER_SDMA,
+};
+
+/* wd_timer3 slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_wd_timer3_slaves[] = {
+	&omap44xx_l4_abe__wd_timer3,
+	&omap44xx_l4_abe__wd_timer3_dma,
+};
+
+static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
+	.name		= "wd_timer3",
+	.class		= &omap44xx_wd_timer_hwmod_class,
+	.mpu_irqs	= omap44xx_wd_timer3_irqs,
+	.mpu_irqs_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_irqs),
+	.main_clk	= "wd_timer3_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
+		},
+	},
+	.slaves		= omap44xx_wd_timer3_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_slaves),
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+};
+
 static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
+
 	/* dmm class */
 	&omap44xx_dmm_hwmod,
+
 	/* emif_fw class */
 	&omap44xx_emif_fw_hwmod,
+
 	/* l3 class */
 	&omap44xx_l3_instr_hwmod,
 	&omap44xx_l3_main_1_hwmod,
 	&omap44xx_l3_main_2_hwmod,
 	&omap44xx_l3_main_3_hwmod,
+
 	/* l4 class */
 	&omap44xx_l4_abe_hwmod,
 	&omap44xx_l4_cfg_hwmod,
 	&omap44xx_l4_per_hwmod,
 	&omap44xx_l4_wkup_hwmod,
+
 	/* mpu_bus class */
 	&omap44xx_mpu_private_hwmod,
 
+	/* dma class */
+	&omap44xx_dma_system_hwmod,
+
+	/* dsp class */
+	&omap44xx_dsp_hwmod,
+	&omap44xx_dsp_c0_hwmod,
+
+	/* gpio class */
+	&omap44xx_gpio1_hwmod,
+	&omap44xx_gpio2_hwmod,
+	&omap44xx_gpio3_hwmod,
+	&omap44xx_gpio4_hwmod,
+	&omap44xx_gpio5_hwmod,
+	&omap44xx_gpio6_hwmod,
+
+	/* i2c class */
+	&omap44xx_i2c1_hwmod,
+	&omap44xx_i2c2_hwmod,
+	&omap44xx_i2c3_hwmod,
+	&omap44xx_i2c4_hwmod,
+
+	/* iva class */
+	&omap44xx_iva_hwmod,
+	&omap44xx_iva_seq0_hwmod,
+	&omap44xx_iva_seq1_hwmod,
+
 	/* mpu class */
 	&omap44xx_mpu_hwmod,
-	/* wd_timer class */
-	&omap44xx_wd_timer2_hwmod,
-	&omap44xx_wd_timer3_hwmod,
+
+	/* smartreflex class */
+	&omap44xx_smartreflex_core_hwmod,
+	&omap44xx_smartreflex_iva_hwmod,
+	&omap44xx_smartreflex_mpu_hwmod,
 
 	/* uart class */
 	&omap44xx_uart1_hwmod,
 	&omap44xx_uart2_hwmod,
 	&omap44xx_uart3_hwmod,
 	&omap44xx_uart4_hwmod,
+
+	/* wd_timer class */
+	&omap44xx_wd_timer2_hwmod,
+	&omap44xx_wd_timer3_hwmod,
+
 	NULL,
 };
 
diff --git a/arch/arm/mach-omap2/omap_opp_data.h b/arch/arm/mach-omap2/omap_opp_data.h
new file mode 100644
index 0000000..46ac27d
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_opp_data.h
@@ -0,0 +1,72 @@
+/*
+ * OMAP SoC specific OPP Data helpers
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated - http://www.ti.com/
+ *	Nishanth Menon
+ *	Kevin Hilman
+ * Copyright (C) 2010 Nokia Corporation.
+ *      Eduardo Valentin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ARCH_ARM_MACH_OMAP2_OMAP_OPP_DATA_H
+#define __ARCH_ARM_MACH_OMAP2_OMAP_OPP_DATA_H
+
+#include <plat/omap_hwmod.h>
+
+/*
+ * *BIG FAT WARNING*:
+ * USE the following ONLY in opp data initialization common to an SoC.
+ * DO NOT USE these in board files/pm core etc.
+ */
+
+/**
+ * struct omap_opp_def - OMAP OPP Definition
+ * @hwmod_name:	Name of the hwmod for this domain
+ * @freq:	Frequency in hertz corresponding to this OPP
+ * @u_volt:	Nominal voltage in microvolts corresponding to this OPP
+ * @default_available:	True/false - is this OPP available by default
+ *
+ * OMAP SOCs have a standard set of tuples consisting of frequency and voltage
+ * pairs that the device will support per voltage domain. This is called
+ * Operating Points or OPP. The actual definitions of OMAP Operating Points
+ * varies over silicon within the same family of devices. For a specific
+ * domain, you can have a set of {frequency, voltage} pairs and this is denoted
+ * by an array of omap_opp_def. As the kernel boots and more information is
+ * available, a set of these are activated based on the precise nature of
+ * device the kernel boots up on. It is interesting to remember that each IP
+ * which belongs to a voltage domain may define their own set of OPPs on top
+ * of this - but this is handled by the appropriate driver.
+ */
+struct omap_opp_def {
+	char *hwmod_name;
+
+	unsigned long freq;
+	unsigned long u_volt;
+
+	bool default_available;
+};
+
+/*
+ * Initialization wrapper used to define an OPP for OMAP variants.
+ */
+#define OPP_INITIALIZER(_hwmod_name, _enabled, _freq, _uv)	\
+{								\
+	.hwmod_name	= _hwmod_name,				\
+	.default_available	= _enabled,			\
+	.freq		= _freq,				\
+	.u_volt		= _uv,					\
+}
+
+/* Use this to initialize the default table */
+extern int __init omap_init_opp_table(struct omap_opp_def *opp_def,
+		u32 opp_def_size);
+
+#endif		/* __ARCH_ARM_MACH_OMAP2_OMAP_OPP_DATA_H */
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
new file mode 100644
index 0000000..745252c
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -0,0 +1,149 @@
+/*
+  * This file configures the internal USB PHY in OMAP4430. Used
+  * with TWL6030 transceiver and MUSB on OMAP4430.
+  *
+  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * Author: Hema HK <hemahk@ti.com>
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write to the Free Software
+  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+  *
+  */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/usb.h>
+
+#include <plat/usb.h>
+
+/* OMAP control module register for UTMI PHY */
+#define CONTROL_DEV_CONF		0x300
+#define PHY_PD				0x1
+
+#define USBOTGHS_CONTROL		0x33c
+#define	AVALID				BIT(0)
+#define	BVALID				BIT(1)
+#define	VBUSVALID			BIT(2)
+#define	SESSEND				BIT(3)
+#define	IDDIG				BIT(4)
+
+static struct clk *phyclk, *clk48m, *clk32k;
+static void __iomem *ctrl_base;
+
+int omap4430_phy_init(struct device *dev)
+{
+	ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K);
+	if (!ctrl_base) {
+		dev_err(dev, "control module ioremap failed\n");
+		return -ENOMEM;
+	}
+	/* Power down the phy */
+	__raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
+	phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
+
+	if (IS_ERR(phyclk)) {
+		dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n");
+		iounmap(ctrl_base);
+		return PTR_ERR(phyclk);
+	}
+
+	clk48m = clk_get(dev, "ocp2scp_usb_phy_phy_48m");
+	if (IS_ERR(clk48m)) {
+		dev_err(dev, "cannot clk_get ocp2scp_usb_phy_phy_48m\n");
+		clk_put(phyclk);
+		iounmap(ctrl_base);
+		return PTR_ERR(clk48m);
+	}
+
+	clk32k = clk_get(dev, "usb_phy_cm_clk32k");
+	if (IS_ERR(clk32k)) {
+		dev_err(dev, "cannot clk_get usb_phy_cm_clk32k\n");
+		clk_put(phyclk);
+		clk_put(clk48m);
+		iounmap(ctrl_base);
+		return PTR_ERR(clk32k);
+	}
+	return 0;
+}
+
+int omap4430_phy_set_clk(struct device *dev, int on)
+{
+	static int state;
+
+	if (on && !state) {
+		/* Enable the phy clocks */
+		clk_enable(phyclk);
+		clk_enable(clk48m);
+		clk_enable(clk32k);
+		state = 1;
+	} else if (state) {
+		/* Disable the phy clocks */
+		clk_disable(phyclk);
+		clk_disable(clk48m);
+		clk_disable(clk32k);
+		state = 0;
+	}
+	return 0;
+}
+
+int omap4430_phy_power(struct device *dev, int ID, int on)
+{
+	if (on) {
+		/* enabled the clocks */
+		omap4430_phy_set_clk(dev, 1);
+		/* power on the phy */
+		if (__raw_readl(ctrl_base + CONTROL_DEV_CONF) & PHY_PD) {
+			__raw_writel(~PHY_PD, ctrl_base + CONTROL_DEV_CONF);
+			mdelay(200);
+		}
+		if (ID)
+			/* enable VBUS valid, IDDIG groung */
+			__raw_writel(AVALID | VBUSVALID, ctrl_base +
+							USBOTGHS_CONTROL);
+		else
+			/*
+			 * Enable VBUS Valid, AValid and IDDIG
+			 * high impedence
+			 */
+			__raw_writel(IDDIG | AVALID | VBUSVALID,
+						ctrl_base + USBOTGHS_CONTROL);
+	} else {
+		/* Enable session END and IDIG to high impedence. */
+		__raw_writel(SESSEND | IDDIG, ctrl_base +
+					USBOTGHS_CONTROL);
+		/* Disable the clocks */
+		omap4430_phy_set_clk(dev, 0);
+		/* Power down the phy */
+		__raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
+	}
+
+	return 0;
+}
+
+int omap4430_phy_exit(struct device *dev)
+{
+	if (ctrl_base)
+		iounmap(ctrl_base);
+	if (phyclk)
+		clk_put(phyclk);
+	if (clk48m)
+		clk_put(clk48m);
+	if (clk32k)
+		clk_put(clk32k);
+
+	return 0;
+}
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
new file mode 100644
index 0000000..00e1d2b
--- /dev/null
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -0,0 +1,279 @@
+/**
+ * OMAP and TWL PMIC specific intializations.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated.
+ * Thara Gopinath
+ * Copyright (C) 2009 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Copyright (C) 2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/i2c/twl.h>
+
+#include <plat/voltage.h>
+
+#include "pm.h"
+
+#define OMAP3_SRI2C_SLAVE_ADDR		0x12
+#define OMAP3_VDD_MPU_SR_CONTROL_REG	0x00
+#define OMAP3_VDD_CORE_SR_CONTROL_REG	0x01
+#define OMAP3_VP_CONFIG_ERROROFFSET	0x00
+#define OMAP3_VP_VSTEPMIN_VSTEPMIN	0x1
+#define OMAP3_VP_VSTEPMAX_VSTEPMAX	0x04
+#define OMAP3_VP_VLIMITTO_TIMEOUT_US	200
+
+#define OMAP3430_VP1_VLIMITTO_VDDMIN	0x14
+#define OMAP3430_VP1_VLIMITTO_VDDMAX	0x42
+#define OMAP3430_VP2_VLIMITTO_VDDMIN	0x18
+#define OMAP3430_VP2_VLIMITTO_VDDMAX	0x2c
+
+#define OMAP3630_VP1_VLIMITTO_VDDMIN	0x18
+#define OMAP3630_VP1_VLIMITTO_VDDMAX	0x3c
+#define OMAP3630_VP2_VLIMITTO_VDDMIN	0x18
+#define OMAP3630_VP2_VLIMITTO_VDDMAX	0x30
+
+#define OMAP4_SRI2C_SLAVE_ADDR		0x12
+#define OMAP4_VDD_MPU_SR_VOLT_REG	0x55
+#define OMAP4_VDD_IVA_SR_VOLT_REG	0x5B
+#define OMAP4_VDD_CORE_SR_VOLT_REG	0x61
+
+#define OMAP4_VP_CONFIG_ERROROFFSET	0x00
+#define OMAP4_VP_VSTEPMIN_VSTEPMIN	0x01
+#define OMAP4_VP_VSTEPMAX_VSTEPMAX	0x04
+#define OMAP4_VP_VLIMITTO_TIMEOUT_US	200
+
+#define OMAP4_VP_MPU_VLIMITTO_VDDMIN	0xA
+#define OMAP4_VP_MPU_VLIMITTO_VDDMAX	0x39
+#define OMAP4_VP_IVA_VLIMITTO_VDDMIN	0xA
+#define OMAP4_VP_IVA_VLIMITTO_VDDMAX	0x2D
+#define OMAP4_VP_CORE_VLIMITTO_VDDMIN	0xA
+#define OMAP4_VP_CORE_VLIMITTO_VDDMAX	0x28
+
+static bool is_offset_valid;
+static u8 smps_offset;
+
+#define REG_SMPS_OFFSET         0xE0
+
+static unsigned long twl4030_vsel_to_uv(const u8 vsel)
+{
+	return (((vsel * 125) + 6000)) * 100;
+}
+
+static u8 twl4030_uv_to_vsel(unsigned long uv)
+{
+	return DIV_ROUND_UP(uv - 600000, 12500);
+}
+
+static unsigned long twl6030_vsel_to_uv(const u8 vsel)
+{
+	/*
+	 * In TWL6030 depending on the value of SMPS_OFFSET
+	 * efuse register the voltage range supported in
+	 * standard mode can be either between 0.6V - 1.3V or
+	 * 0.7V - 1.4V. In TWL6030 ES1.0 SMPS_OFFSET efuse
+	 * is programmed to all 0's where as starting from
+	 * TWL6030 ES1.1 the efuse is programmed to 1
+	 */
+	if (!is_offset_valid) {
+		twl_i2c_read_u8(TWL6030_MODULE_ID0, &smps_offset,
+				REG_SMPS_OFFSET);
+		is_offset_valid = true;
+	}
+
+	/*
+	 * There is no specific formula for voltage to vsel
+	 * conversion above 1.3V. There are special hardcoded
+	 * values for voltages above 1.3V. Currently we are
+	 * hardcoding only for 1.35 V which is used for 1GH OPP for
+	 * OMAP4430.
+	 */
+	if (vsel == 0x3A)
+		return 1350000;
+
+	if (smps_offset & 0x8)
+		return ((((vsel - 1) * 125) + 7000)) * 100;
+	else
+		return ((((vsel - 1) * 125) + 6000)) * 100;
+}
+
+static u8 twl6030_uv_to_vsel(unsigned long uv)
+{
+	/*
+	 * In TWL6030 depending on the value of SMPS_OFFSET
+	 * efuse register the voltage range supported in
+	 * standard mode can be either between 0.6V - 1.3V or
+	 * 0.7V - 1.4V. In TWL6030 ES1.0 SMPS_OFFSET efuse
+	 * is programmed to all 0's where as starting from
+	 * TWL6030 ES1.1 the efuse is programmed to 1
+	 */
+	if (!is_offset_valid) {
+		twl_i2c_read_u8(TWL6030_MODULE_ID0, &smps_offset,
+				REG_SMPS_OFFSET);
+		is_offset_valid = true;
+	}
+
+	/*
+	 * There is no specific formula for voltage to vsel
+	 * conversion above 1.3V. There are special hardcoded
+	 * values for voltages above 1.3V. Currently we are
+	 * hardcoding only for 1.35 V which is used for 1GH OPP for
+	 * OMAP4430.
+	 */
+	if (uv == 1350000)
+		return 0x3A;
+
+	if (smps_offset & 0x8)
+		return DIV_ROUND_UP(uv - 700000, 12500) + 1;
+	else
+		return DIV_ROUND_UP(uv - 600000, 12500) + 1;
+}
+
+static struct omap_volt_pmic_info omap3_mpu_volt_info = {
+	.slew_rate		= 4000,
+	.step_size		= 12500,
+	.on_volt		= 1200000,
+	.onlp_volt		= 1000000,
+	.ret_volt		= 975000,
+	.off_volt		= 600000,
+	.volt_setup_time	= 0xfff,
+	.vp_erroroffset		= OMAP3_VP_CONFIG_ERROROFFSET,
+	.vp_vstepmin		= OMAP3_VP_VSTEPMIN_VSTEPMIN,
+	.vp_vstepmax		= OMAP3_VP_VSTEPMAX_VSTEPMAX,
+	.vp_vddmin		= OMAP3430_VP1_VLIMITTO_VDDMIN,
+	.vp_vddmax		= OMAP3430_VP1_VLIMITTO_VDDMAX,
+	.vp_timeout_us		= OMAP3_VP_VLIMITTO_TIMEOUT_US,
+	.i2c_slave_addr		= OMAP3_SRI2C_SLAVE_ADDR,
+	.pmic_reg		= OMAP3_VDD_MPU_SR_CONTROL_REG,
+	.vsel_to_uv		= twl4030_vsel_to_uv,
+	.uv_to_vsel		= twl4030_uv_to_vsel,
+};
+
+static struct omap_volt_pmic_info omap3_core_volt_info = {
+	.slew_rate		= 4000,
+	.step_size		= 12500,
+	.on_volt                = 1200000,
+	.onlp_volt              = 1000000,
+	.ret_volt               = 975000,
+	.off_volt               = 600000,
+	.volt_setup_time        = 0xfff,
+	.vp_erroroffset		= OMAP3_VP_CONFIG_ERROROFFSET,
+	.vp_vstepmin		= OMAP3_VP_VSTEPMIN_VSTEPMIN,
+	.vp_vstepmax		= OMAP3_VP_VSTEPMAX_VSTEPMAX,
+	.vp_vddmin		= OMAP3430_VP2_VLIMITTO_VDDMIN,
+	.vp_vddmax		= OMAP3430_VP2_VLIMITTO_VDDMAX,
+	.vp_timeout_us		= OMAP3_VP_VLIMITTO_TIMEOUT_US,
+	.i2c_slave_addr		= OMAP3_SRI2C_SLAVE_ADDR,
+	.pmic_reg		= OMAP3_VDD_CORE_SR_CONTROL_REG,
+	.vsel_to_uv		= twl4030_vsel_to_uv,
+	.uv_to_vsel		= twl4030_uv_to_vsel,
+};
+
+static struct omap_volt_pmic_info omap4_mpu_volt_info = {
+	.slew_rate		= 4000,
+	.step_size		= 12500,
+	.on_volt		= 1350000,
+	.onlp_volt		= 1350000,
+	.ret_volt		= 837500,
+	.off_volt		= 600000,
+	.volt_setup_time	= 0,
+	.vp_erroroffset		= OMAP4_VP_CONFIG_ERROROFFSET,
+	.vp_vstepmin		= OMAP4_VP_VSTEPMIN_VSTEPMIN,
+	.vp_vstepmax		= OMAP4_VP_VSTEPMAX_VSTEPMAX,
+	.vp_vddmin		= OMAP4_VP_MPU_VLIMITTO_VDDMIN,
+	.vp_vddmax		= OMAP4_VP_MPU_VLIMITTO_VDDMAX,
+	.vp_timeout_us		= OMAP4_VP_VLIMITTO_TIMEOUT_US,
+	.i2c_slave_addr		= OMAP4_SRI2C_SLAVE_ADDR,
+	.pmic_reg		= OMAP4_VDD_MPU_SR_VOLT_REG,
+	.vsel_to_uv		= twl6030_vsel_to_uv,
+	.uv_to_vsel		= twl6030_uv_to_vsel,
+};
+
+static struct omap_volt_pmic_info omap4_iva_volt_info = {
+	.slew_rate		= 4000,
+	.step_size		= 12500,
+	.on_volt		= 1100000,
+	.onlp_volt		= 1100000,
+	.ret_volt		= 837500,
+	.off_volt		= 600000,
+	.volt_setup_time	= 0,
+	.vp_erroroffset		= OMAP4_VP_CONFIG_ERROROFFSET,
+	.vp_vstepmin		= OMAP4_VP_VSTEPMIN_VSTEPMIN,
+	.vp_vstepmax		= OMAP4_VP_VSTEPMAX_VSTEPMAX,
+	.vp_vddmin		= OMAP4_VP_IVA_VLIMITTO_VDDMIN,
+	.vp_vddmax		= OMAP4_VP_IVA_VLIMITTO_VDDMAX,
+	.vp_timeout_us		= OMAP4_VP_VLIMITTO_TIMEOUT_US,
+	.i2c_slave_addr		= OMAP4_SRI2C_SLAVE_ADDR,
+	.pmic_reg		= OMAP4_VDD_IVA_SR_VOLT_REG,
+	.vsel_to_uv		= twl6030_vsel_to_uv,
+	.uv_to_vsel		= twl6030_uv_to_vsel,
+};
+
+static struct omap_volt_pmic_info omap4_core_volt_info = {
+	.slew_rate		= 4000,
+	.step_size		= 12500,
+	.on_volt		= 1100000,
+	.onlp_volt		= 1100000,
+	.ret_volt		= 837500,
+	.off_volt		= 600000,
+	.volt_setup_time	= 0,
+	.vp_erroroffset		= OMAP4_VP_CONFIG_ERROROFFSET,
+	.vp_vstepmin		= OMAP4_VP_VSTEPMIN_VSTEPMIN,
+	.vp_vstepmax		= OMAP4_VP_VSTEPMAX_VSTEPMAX,
+	.vp_vddmin		= OMAP4_VP_CORE_VLIMITTO_VDDMIN,
+	.vp_vddmax		= OMAP4_VP_CORE_VLIMITTO_VDDMAX,
+	.vp_timeout_us		= OMAP4_VP_VLIMITTO_TIMEOUT_US,
+	.i2c_slave_addr		= OMAP4_SRI2C_SLAVE_ADDR,
+	.pmic_reg		= OMAP4_VDD_CORE_SR_VOLT_REG,
+	.vsel_to_uv		= twl6030_vsel_to_uv,
+	.uv_to_vsel		= twl6030_uv_to_vsel,
+};
+
+int __init omap4_twl_init(void)
+{
+	struct voltagedomain *voltdm;
+
+	if (!cpu_is_omap44xx())
+		return -ENODEV;
+
+	voltdm = omap_voltage_domain_lookup("mpu");
+	omap_voltage_register_pmic(voltdm, &omap4_mpu_volt_info);
+
+	voltdm = omap_voltage_domain_lookup("iva");
+	omap_voltage_register_pmic(voltdm, &omap4_iva_volt_info);
+
+	voltdm = omap_voltage_domain_lookup("core");
+	omap_voltage_register_pmic(voltdm, &omap4_core_volt_info);
+
+	return 0;
+}
+
+int __init omap3_twl_init(void)
+{
+	struct voltagedomain *voltdm;
+
+	if (!cpu_is_omap34xx())
+		return -ENODEV;
+
+	if (cpu_is_omap3630()) {
+		omap3_mpu_volt_info.vp_vddmin = OMAP3630_VP1_VLIMITTO_VDDMIN;
+		omap3_mpu_volt_info.vp_vddmax = OMAP3630_VP1_VLIMITTO_VDDMAX;
+		omap3_core_volt_info.vp_vddmin = OMAP3630_VP2_VLIMITTO_VDDMIN;
+		omap3_core_volt_info.vp_vddmax = OMAP3630_VP2_VLIMITTO_VDDMAX;
+	}
+
+	voltdm = omap_voltage_domain_lookup("mpu");
+	omap_voltage_register_pmic(voltdm, &omap3_mpu_volt_info);
+
+	voltdm = omap_voltage_domain_lookup("core");
+	omap_voltage_register_pmic(voltdm, &omap3_core_volt_info);
+
+	return 0;
+}
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
new file mode 100644
index 0000000..ab8b35b
--- /dev/null
+++ b/arch/arm/mach-omap2/opp.c
@@ -0,0 +1,93 @@
+/*
+ * OMAP SoC specific OPP wrapper function
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated - http://www.ti.com/
+ *	Nishanth Menon
+ *	Kevin Hilman
+ * Copyright (C) 2010 Nokia Corporation.
+ *      Eduardo Valentin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/opp.h>
+
+#include <plat/omap_device.h>
+
+#include "omap_opp_data.h"
+
+/* Temp variable to allow multiple calls */
+static u8 __initdata omap_table_init;
+
+/**
+ * omap_init_opp_table() - Initialize opp table as per the CPU type
+ * @opp_def:		opp default list for this silicon
+ * @opp_def_size:	number of opp entries for this silicon
+ *
+ * Register the initial OPP table with the OPP library based on the CPU
+ * type. This is meant to be used only by SoC specific registration.
+ */
+int __init omap_init_opp_table(struct omap_opp_def *opp_def,
+		u32 opp_def_size)
+{
+	int i, r;
+
+	if (!opp_def || !opp_def_size) {
+		pr_err("%s: invalid params!\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Initialize only if not already initialized even if the previous
+	 * call failed, because, no reason we'd succeed again.
+	 */
+	if (omap_table_init)
+		return -EEXIST;
+	omap_table_init = 1;
+
+	/* Lets now register with OPP library */
+	for (i = 0; i < opp_def_size; i++) {
+		struct omap_hwmod *oh;
+		struct device *dev;
+
+		if (!opp_def->hwmod_name) {
+			pr_err("%s: NULL name of omap_hwmod, failing [%d].\n",
+				__func__, i);
+			return -EINVAL;
+		}
+		oh = omap_hwmod_lookup(opp_def->hwmod_name);
+		if (!oh || !oh->od) {
+			pr_warn("%s: no hwmod or odev for %s, [%d] "
+				"cannot add OPPs.\n", __func__,
+				opp_def->hwmod_name, i);
+			return -EINVAL;
+		}
+		dev = &oh->od->pdev.dev;
+
+		r = opp_add(dev, opp_def->freq, opp_def->u_volt);
+		if (r) {
+			dev_err(dev, "%s: add OPP %ld failed for %s [%d] "
+				"result=%d\n",
+			       __func__, opp_def->freq,
+			       opp_def->hwmod_name, i, r);
+		} else {
+			if (!opp_def->default_available)
+				r = opp_disable(dev, opp_def->freq);
+			if (r)
+				dev_err(dev, "%s: disable %ld failed for %s "
+					"[%d] result=%d\n",
+					__func__, opp_def->freq,
+					opp_def->hwmod_name, i, r);
+		}
+		opp_def++;
+	}
+
+	return 0;
+}
diff --git a/arch/arm/mach-omap2/opp3xxx_data.c b/arch/arm/mach-omap2/opp3xxx_data.c
new file mode 100644
index 0000000..0486fce
--- /dev/null
+++ b/arch/arm/mach-omap2/opp3xxx_data.c
@@ -0,0 +1,107 @@
+/*
+ * OMAP3 OPP table definitions.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated - http://www.ti.com/
+ *	Nishanth Menon
+ *	Kevin Hilman
+ * Copyright (C) 2010 Nokia Corporation.
+ *      Eduardo Valentin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+
+#include <plat/cpu.h>
+
+#include "omap_opp_data.h"
+
+static struct omap_opp_def __initdata omap34xx_opp_def_list[] = {
+	/* MPU OPP1 */
+	OPP_INITIALIZER("mpu", true, 125000000, 975000),
+	/* MPU OPP2 */
+	OPP_INITIALIZER("mpu", true, 250000000, 1075000),
+	/* MPU OPP3 */
+	OPP_INITIALIZER("mpu", true, 500000000, 1200000),
+	/* MPU OPP4 */
+	OPP_INITIALIZER("mpu", true, 550000000, 1270000),
+	/* MPU OPP5 */
+	OPP_INITIALIZER("mpu", true, 600000000, 1350000),
+
+	/*
+	 * L3 OPP1 - 41.5 MHz is disabled because: The voltage for that OPP is
+	 * almost the same than the one at 83MHz thus providing very little
+	 * gain for the power point of view. In term of energy it will even
+	 * increase the consumption due to the very negative performance
+	 * impact that frequency will do to the MPU and the whole system in
+	 * general.
+	 */
+	OPP_INITIALIZER("l3_main", false, 41500000, 975000),
+	/* L3 OPP2 */
+	OPP_INITIALIZER("l3_main", true, 83000000, 1050000),
+	/* L3 OPP3 */
+	OPP_INITIALIZER("l3_main", true, 166000000, 1150000),
+
+	/* DSP OPP1 */
+	OPP_INITIALIZER("iva", true, 90000000, 975000),
+	/* DSP OPP2 */
+	OPP_INITIALIZER("iva", true, 180000000, 1075000),
+	/* DSP OPP3 */
+	OPP_INITIALIZER("iva", true, 360000000, 1200000),
+	/* DSP OPP4 */
+	OPP_INITIALIZER("iva", true, 400000000, 1270000),
+	/* DSP OPP5 */
+	OPP_INITIALIZER("iva", true, 430000000, 1350000),
+};
+
+static struct omap_opp_def __initdata omap36xx_opp_def_list[] = {
+	/* MPU OPP1 - OPP50 */
+	OPP_INITIALIZER("mpu", true,  300000000, 1012500),
+	/* MPU OPP2 - OPP100 */
+	OPP_INITIALIZER("mpu", true,  600000000, 1200000),
+	/* MPU OPP3 - OPP-Turbo */
+	OPP_INITIALIZER("mpu", false, 800000000, 1325000),
+	/* MPU OPP4 - OPP-SB */
+	OPP_INITIALIZER("mpu", false, 1000000000, 1375000),
+
+	/* L3 OPP1 - OPP50 */
+	OPP_INITIALIZER("l3_main", true, 100000000, 1000000),
+	/* L3 OPP2 - OPP100, OPP-Turbo, OPP-SB */
+	OPP_INITIALIZER("l3_main", true, 200000000, 1200000),
+
+	/* DSP OPP1 - OPP50 */
+	OPP_INITIALIZER("iva", true,  260000000, 1012500),
+	/* DSP OPP2 - OPP100 */
+	OPP_INITIALIZER("iva", true,  520000000, 1200000),
+	/* DSP OPP3 - OPP-Turbo */
+	OPP_INITIALIZER("iva", false, 660000000, 1325000),
+	/* DSP OPP4 - OPP-SB */
+	OPP_INITIALIZER("iva", false, 800000000, 1375000),
+};
+
+/**
+ * omap3_opp_init() - initialize omap3 opp table
+ */
+static int __init omap3_opp_init(void)
+{
+	int r = -ENODEV;
+
+	if (!cpu_is_omap34xx())
+		return r;
+
+	if (cpu_is_omap3630())
+		r = omap_init_opp_table(omap36xx_opp_def_list,
+			ARRAY_SIZE(omap36xx_opp_def_list));
+	else
+		r = omap_init_opp_table(omap34xx_opp_def_list,
+			ARRAY_SIZE(omap34xx_opp_def_list));
+
+	return r;
+}
+device_initcall(omap3_opp_init);
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
new file mode 100644
index 0000000..a11fa56
--- /dev/null
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -0,0 +1,57 @@
+/*
+ * OMAP4 OPP table definitions.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *	Nishanth Menon
+ *	Kevin Hilman
+ *	Thara Gopinath
+ * Copyright (C) 2010 Nokia Corporation.
+ *      Eduardo Valentin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+
+#include <plat/cpu.h>
+
+#include "omap_opp_data.h"
+
+static struct omap_opp_def __initdata omap44xx_opp_def_list[] = {
+	/* MPU OPP1 - OPP50 */
+	OPP_INITIALIZER("mpu", true, 300000000, 1100000),
+	/* MPU OPP2 - OPP100 */
+	OPP_INITIALIZER("mpu", true, 600000000, 1200000),
+	/* MPU OPP3 - OPP-Turbo */
+	OPP_INITIALIZER("mpu", false, 800000000, 1260000),
+	/* MPU OPP4 - OPP-SB */
+	OPP_INITIALIZER("mpu", false, 1008000000, 1350000),
+	/* L3 OPP1 - OPP50 */
+	OPP_INITIALIZER("l3_main_1", true, 100000000, 930000),
+	/* L3 OPP2 - OPP100, OPP-Turbo, OPP-SB */
+	OPP_INITIALIZER("l3_main_1", true, 200000000, 1100000),
+	/* TODO: add IVA, DSP, aess, fdif, gpu */
+};
+
+/**
+ * omap4_opp_init() - initialize omap4 opp table
+ */
+static int __init omap4_opp_init(void)
+{
+	int r = -ENODEV;
+
+	if (!cpu_is_omap44xx())
+		return r;
+
+	r = omap_init_opp_table(omap44xx_opp_def_list,
+			ARRAY_SIZE(omap44xx_opp_def_list));
+
+	return r;
+}
+device_initcall(omap4_opp_init);
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index a8afb61..125f565 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -29,12 +29,13 @@
 
 #include <plat/clock.h>
 #include <plat/board.h>
-#include <plat/powerdomain.h>
-#include <plat/clockdomain.h>
+#include "powerdomain.h"
+#include "clockdomain.h"
 #include <plat/dmtimer.h>
+#include <plat/omap-pm.h>
 
-#include "prm.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
+#include "prm2xxx_3xxx.h"
 #include "pm.h"
 
 int omap2_pm_debug;
@@ -45,10 +46,10 @@
 
 #define DUMP_PRM_MOD_REG(mod, reg)    \
 	regs[reg_count].name = #mod "." #reg; \
-	regs[reg_count++].val = prm_read_mod_reg(mod, reg)
+	regs[reg_count++].val = omap2_prm_read_mod_reg(mod, reg)
 #define DUMP_CM_MOD_REG(mod, reg)     \
 	regs[reg_count].name = #mod "." #reg; \
-	regs[reg_count++].val = cm_read_mod_reg(mod, reg)
+	regs[reg_count++].val = omap2_cm_read_mod_reg(mod, reg)
 #define DUMP_PRM_REG(reg) \
 	regs[reg_count].name = #reg; \
 	regs[reg_count++].val = __raw_readl(reg)
@@ -328,10 +329,10 @@
 		for (j = pm_dbg_reg_modules[i].low;
 			j <= pm_dbg_reg_modules[i].high; j += 4) {
 			if (pm_dbg_reg_modules[i].type == MOD_CM)
-				val = cm_read_mod_reg(
+				val = omap2_cm_read_mod_reg(
 					pm_dbg_reg_modules[i].offset, j);
 			else
-				val = prm_read_mod_reg(
+				val = omap2_prm_read_mod_reg(
 					pm_dbg_reg_modules[i].offset, j);
 			*(ptr++) = val;
 		}
@@ -581,6 +582,10 @@
 	*option = val;
 
 	if (option == &enable_off_mode) {
+		if (val)
+			omap_pm_enable_off_mode();
+		else
+			omap_pm_disable_off_mode();
 		if (cpu_is_omap34xx())
 			omap3_pm_off_mode_enable(val);
 	}
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 59ca03b..d5a102c 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -13,13 +13,16 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/err.h>
+#include <linux/opp.h>
 
 #include <plat/omap-pm.h>
 #include <plat/omap_device.h>
 #include <plat/common.h>
+#include <plat/voltage.h>
 
-#include <plat/powerdomain.h>
-#include <plat/clockdomain.h>
+#include "powerdomain.h"
+#include "clockdomain.h"
+#include "pm.h"
 
 static struct omap_device_pm_latency *pm_lats;
 
@@ -89,10 +92,13 @@
 	}
 }
 
+/* Types of sleep_switch used in omap_set_pwrdm_state */
+#define FORCEWAKEUP_SWITCH	0
+#define LOWPOWERSTATE_SWITCH	1
+
 /*
  * This sets pwrdm state (other than mpu & core. Currently only ON &
- * RET are supported. Function is assuming that clkdm doesn't have
- * hw_sup mode enabled.
+ * RET are supported.
  */
 int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
 {
@@ -114,9 +120,14 @@
 		return ret;
 
 	if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
-		omap2_clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
-		sleep_switch = 1;
-		pwrdm_wait_transition(pwrdm);
+		if ((pwrdm_read_pwrst(pwrdm) > state) &&
+			(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) {
+			sleep_switch = LOWPOWERSTATE_SWITCH;
+		} else {
+			omap2_clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
+			pwrdm_wait_transition(pwrdm);
+			sleep_switch = FORCEWAKEUP_SWITCH;
+		}
 	}
 
 	ret = pwrdm_set_next_pwrst(pwrdm, state);
@@ -126,16 +137,106 @@
 		goto err;
 	}
 
-	if (sleep_switch) {
-		omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
-		pwrdm_wait_transition(pwrdm);
-		pwrdm_state_switch(pwrdm);
+	switch (sleep_switch) {
+	case FORCEWAKEUP_SWITCH:
+		if (pwrdm->pwrdm_clkdms[0]->flags & CLKDM_CAN_ENABLE_AUTO)
+			omap2_clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
+		else
+			omap2_clkdm_sleep(pwrdm->pwrdm_clkdms[0]);
+		break;
+	case LOWPOWERSTATE_SWITCH:
+		pwrdm_set_lowpwrstchange(pwrdm);
+		break;
+	default:
+		return ret;
 	}
 
+	pwrdm_wait_transition(pwrdm);
+	pwrdm_state_switch(pwrdm);
 err:
 	return ret;
 }
 
+/*
+ * This API is to be called during init to put the various voltage
+ * domains to the voltage as per the opp table. Typically we boot up
+ * at the nominal voltage. So this function finds out the rate of
+ * the clock associated with the voltage domain, finds out the correct
+ * opp entry and puts the voltage domain to the voltage specifies
+ * in the opp entry
+ */
+static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
+						struct device *dev)
+{
+	struct voltagedomain *voltdm;
+	struct clk *clk;
+	struct opp *opp;
+	unsigned long freq, bootup_volt;
+
+	if (!vdd_name || !clk_name || !dev) {
+		printk(KERN_ERR "%s: Invalid parameters!\n", __func__);
+		goto exit;
+	}
+
+	voltdm = omap_voltage_domain_lookup(vdd_name);
+	if (IS_ERR(voltdm)) {
+		printk(KERN_ERR "%s: Unable to get vdd pointer for vdd_%s\n",
+			__func__, vdd_name);
+		goto exit;
+	}
+
+	clk =  clk_get(NULL, clk_name);
+	if (IS_ERR(clk)) {
+		printk(KERN_ERR "%s: unable to get clk %s\n",
+			__func__, clk_name);
+		goto exit;
+	}
+
+	freq = clk->rate;
+	clk_put(clk);
+
+	opp = opp_find_freq_ceil(dev, &freq);
+	if (IS_ERR(opp)) {
+		printk(KERN_ERR "%s: unable to find boot up OPP for vdd_%s\n",
+			__func__, vdd_name);
+		goto exit;
+	}
+
+	bootup_volt = opp_get_voltage(opp);
+	if (!bootup_volt) {
+		printk(KERN_ERR "%s: unable to find voltage corresponding"
+			"to the bootup OPP for vdd_%s\n", __func__, vdd_name);
+		goto exit;
+	}
+
+	omap_voltage_scale_vdd(voltdm, bootup_volt);
+	return 0;
+
+exit:
+	printk(KERN_ERR "%s: Unable to put vdd_%s to its init voltage\n\n",
+		__func__, vdd_name);
+	return -EINVAL;
+}
+
+static void __init omap3_init_voltages(void)
+{
+	if (!cpu_is_omap34xx())
+		return;
+
+	omap2_set_init_voltage("mpu", "dpll1_ck", mpu_dev);
+	omap2_set_init_voltage("core", "l3_ick", l3_dev);
+}
+
+static void __init omap4_init_voltages(void)
+{
+	if (!cpu_is_omap44xx())
+		return;
+
+	omap2_set_init_voltage("mpu", "dpll_mpu_ck", mpu_dev);
+	omap2_set_init_voltage("core", "l3_div_ck", l3_dev);
+	omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", iva_dev);
+}
+
 static int __init omap2_common_pm_init(void)
 {
 	omap2_init_processor_devices();
@@ -143,5 +244,24 @@
 
 	return 0;
 }
-device_initcall(omap2_common_pm_init);
+postcore_initcall(omap2_common_pm_init);
 
+static int __init omap2_common_pm_late_init(void)
+{
+	/* Init the OMAP TWL parameters */
+	omap3_twl_init();
+	omap4_twl_init();
+
+	/* Init the voltage layer */
+	omap_voltage_late_init();
+
+	/* Initialize the voltages */
+	omap3_init_voltages();
+	omap4_init_voltages();
+
+	/* Smartreflex device init */
+	omap_devinit_smartreflex();
+
+	return 0;
+}
+late_initcall(omap2_common_pm_late_init);
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 0d75bfd..1c1b0ab 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -11,7 +11,9 @@
 #ifndef __ARCH_ARM_MACH_OMAP2_PM_H
 #define __ARCH_ARM_MACH_OMAP2_PM_H
 
-#include <plat/powerdomain.h>
+#include <linux/err.h>
+
+#include "powerdomain.h"
 
 extern void *omap3_secure_ram_storage;
 extern void omap3_pm_off_mode_enable(int);
@@ -20,6 +22,20 @@
 extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
 extern int omap3_idle_init(void);
 
+#if defined(CONFIG_PM_OPP)
+extern int omap3_opp_init(void);
+extern int omap4_opp_init(void);
+#else
+static inline int omap3_opp_init(void)
+{
+	return -EINVAL;
+}
+static inline int omap4_opp_init(void)
+{
+	return -EINVAL;
+}
+#endif
+
 struct cpuidle_params {
 	u8  valid;
 	u32 sleep_latency;
@@ -58,7 +74,7 @@
 #endif
 
 #if defined(CONFIG_CPU_IDLE)
-extern void omap3_cpuidle_update_states(void);
+extern void omap3_cpuidle_update_states(u32, u32);
 #endif
 
 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
@@ -80,9 +96,46 @@
 extern void omap3_save_scratchpad_contents(void);
 
 extern unsigned int omap24xx_idle_loop_suspend_sz;
-extern unsigned int omap34xx_suspend_sz;
 extern unsigned int save_secure_ram_context_sz;
 extern unsigned int omap24xx_cpu_suspend_sz;
 extern unsigned int omap34xx_cpu_suspend_sz;
 
+#define PM_RTA_ERRATUM_i608		(1 << 0)
+#define PM_SDRC_WAKEUP_ERRATUM_i583	(1 << 1)
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
+extern u16 pm34xx_errata;
+#define IS_PM34XX_ERRATUM(id)		(pm34xx_errata & (id))
+extern void enable_omap3630_toggle_l2_on_restore(void);
+#else
+#define IS_PM34XX_ERRATUM(id)		0
+static inline void enable_omap3630_toggle_l2_on_restore(void) { }
+#endif		/* defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3) */
+
+#ifdef CONFIG_OMAP_SMARTREFLEX
+extern int omap_devinit_smartreflex(void);
+extern void omap_enable_smartreflex_on_init(void);
+#else
+static inline int omap_devinit_smartreflex(void)
+{
+	return -EINVAL;
+}
+
+static inline void omap_enable_smartreflex_on_init(void) {}
+#endif
+
+#ifdef CONFIG_TWL4030_CORE
+extern int omap3_twl_init(void);
+extern int omap4_twl_init(void);
+#else
+static inline int omap3_twl_init(void)
+{
+	return -EINVAL;
+}
+static inline int omap4_twl_init(void)
+{
+	return -EINVAL;
+}
+#endif
+
 #endif
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index aaeea49..9e5dc8e 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -42,16 +42,16 @@
 #include <plat/dma.h>
 #include <plat/board.h>
 
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-24xx.h"
 #include "sdrc.h"
 #include "pm.h"
 #include "control.h"
 
-#include <plat/powerdomain.h>
-#include <plat/clockdomain.h>
+#include "powerdomain.h"
+#include "clockdomain.h"
 
 #ifdef CONFIG_SUSPEND
 static suspend_state_t suspend_state = PM_SUSPEND_ON;
@@ -79,8 +79,8 @@
 {
 	u32 f1, f2;
 
-	f1 = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
-	f2 = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
+	f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
+	f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
 
 	/* Ignore UART clocks.  These are handled by UART core (serial.c) */
 	f1 &= ~(OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_UART2_MASK);
@@ -105,9 +105,9 @@
 
 	/* Clear old wake-up events */
 	/* REVISIT: These write to reserved bits? */
-	prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
-	prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
-	prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
+	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
+	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
+	omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
 
 	/*
 	 * Set MPU powerdomain's next power state to RETENTION;
@@ -120,7 +120,7 @@
 	l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
 	omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
 
-	omap2_gpio_prepare_for_idle(PWRDM_POWER_RET);
+	omap2_gpio_prepare_for_idle(0);
 
 	if (omap2_pm_debug) {
 		omap2_pm_dump(0, 0, 0);
@@ -167,30 +167,30 @@
 	clk_enable(osc_ck);
 
 	/* clear CORE wake-up events */
-	prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
-	prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
+	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
+	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
 
 	/* wakeup domain events - bit 1: GPT1, bit5 GPIO */
-	prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST);
+	omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST);
 
 	/* MPU domain wake events */
-	l = prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
+	l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
 	if (l & 0x01)
-		prm_write_mod_reg(0x01, OCP_MOD,
+		omap2_prm_write_mod_reg(0x01, OCP_MOD,
 				  OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
 	if (l & 0x20)
-		prm_write_mod_reg(0x20, OCP_MOD,
+		omap2_prm_write_mod_reg(0x20, OCP_MOD,
 				  OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
 
 	/* Mask future PRCM-to-MPU interrupts */
-	prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
+	omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
 }
 
 static int omap2_i2c_active(void)
 {
 	u32 l;
 
-	l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
+	l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
 	return l & (OMAP2420_EN_I2C2_MASK | OMAP2420_EN_I2C1_MASK);
 }
 
@@ -201,13 +201,13 @@
 	u32 l;
 
 	/* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */
-	l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
+	l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
 	if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK |
 		 OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK |
 		 OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK))
 		return 0;
 	/* Check for UART3. */
-	l = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
+	l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
 	if (l & OMAP24XX_EN_UART3_MASK)
 		return 0;
 	if (sti_console_enabled)
@@ -230,18 +230,18 @@
 	 * it is in retention mode. */
 	if (omap2_allow_mpu_retention()) {
 		/* REVISIT: These write to reserved bits? */
-		prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
-		prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
-		prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
+		omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
+		omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
+		omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
 
 		/* Try to enter MPU retention */
-		prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) |
+		omap2_prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) |
 				  OMAP_LOGICRETSTATE_MASK,
 				  MPU_MOD, OMAP2_PM_PWSTCTRL);
 	} else {
 		/* Block MPU retention */
 
-		prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD,
+		omap2_prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD,
 						 OMAP2_PM_PWSTCTRL);
 		only_idle = 1;
 	}
@@ -299,16 +299,11 @@
 	local_irq_enable();
 }
 
+#ifdef CONFIG_SUSPEND
 static int omap2_pm_begin(suspend_state_t state)
 {
-	suspend_state = state;
-	return 0;
-}
-
-static int omap2_pm_prepare(void)
-{
-	/* We cannot sleep in idle until we have resumed */
 	disable_hlt();
+	suspend_state = state;
 	return 0;
 }
 
@@ -316,9 +311,9 @@
 {
 	u32 wken_wkup, mir1;
 
-	wken_wkup = prm_read_mod_reg(WKUP_MOD, PM_WKEN);
+	wken_wkup = omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
 	wken_wkup &= ~OMAP24XX_EN_GPT1_MASK;
-	prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
+	omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
 
 	/* Mask GPT1 */
 	mir1 = omap_readl(0x480fe0a4);
@@ -328,7 +323,7 @@
 	omap2_enter_full_retention();
 
 	omap_writel(mir1, 0x480fe0a4);
-	prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
+	omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
 
 	return 0;
 }
@@ -349,24 +344,21 @@
 	return ret;
 }
 
-static void omap2_pm_finish(void)
-{
-	enable_hlt();
-}
-
 static void omap2_pm_end(void)
 {
 	suspend_state = PM_SUSPEND_ON;
+	enable_hlt();
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap2_pm_begin,
-	.prepare	= omap2_pm_prepare,
 	.enter		= omap2_pm_enter,
-	.finish		= omap2_pm_finish,
 	.end		= omap2_pm_end,
 	.valid		= suspend_valid_only_mem,
 };
+#else
+static const struct platform_suspend_ops __initdata omap_pm_ops;
+#endif /* CONFIG_SUSPEND */
 
 /* XXX This function should be shareable between OMAP2xxx and OMAP3 */
 static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
@@ -388,7 +380,7 @@
 	struct powerdomain *pwrdm;
 
 	/* Enable autoidle */
-	prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
+	omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
 			  OMAP2_PRCM_SYSCONFIG_OFFSET);
 
 	/*
@@ -427,87 +419,87 @@
 	clkdm_add_wkdep(mpu_clkdm, wkup_clkdm);
 
 	/* Enable clock autoidle for all domains */
-	cm_write_mod_reg(OMAP24XX_AUTO_CAM_MASK |
-			 OMAP24XX_AUTO_MAILBOXES_MASK |
-			 OMAP24XX_AUTO_WDT4_MASK |
-			 OMAP2420_AUTO_WDT3_MASK |
-			 OMAP24XX_AUTO_MSPRO_MASK |
-			 OMAP2420_AUTO_MMC_MASK |
-			 OMAP24XX_AUTO_FAC_MASK |
-			 OMAP2420_AUTO_EAC_MASK |
-			 OMAP24XX_AUTO_HDQ_MASK |
-			 OMAP24XX_AUTO_UART2_MASK |
-			 OMAP24XX_AUTO_UART1_MASK |
-			 OMAP24XX_AUTO_I2C2_MASK |
-			 OMAP24XX_AUTO_I2C1_MASK |
-			 OMAP24XX_AUTO_MCSPI2_MASK |
-			 OMAP24XX_AUTO_MCSPI1_MASK |
-			 OMAP24XX_AUTO_MCBSP2_MASK |
-			 OMAP24XX_AUTO_MCBSP1_MASK |
-			 OMAP24XX_AUTO_GPT12_MASK |
-			 OMAP24XX_AUTO_GPT11_MASK |
-			 OMAP24XX_AUTO_GPT10_MASK |
-			 OMAP24XX_AUTO_GPT9_MASK |
-			 OMAP24XX_AUTO_GPT8_MASK |
-			 OMAP24XX_AUTO_GPT7_MASK |
-			 OMAP24XX_AUTO_GPT6_MASK |
-			 OMAP24XX_AUTO_GPT5_MASK |
-			 OMAP24XX_AUTO_GPT4_MASK |
-			 OMAP24XX_AUTO_GPT3_MASK |
-			 OMAP24XX_AUTO_GPT2_MASK |
-			 OMAP2420_AUTO_VLYNQ_MASK |
-			 OMAP24XX_AUTO_DSS_MASK,
-			 CORE_MOD, CM_AUTOIDLE1);
-	cm_write_mod_reg(OMAP24XX_AUTO_UART3_MASK |
-			 OMAP24XX_AUTO_SSI_MASK |
-			 OMAP24XX_AUTO_USB_MASK,
-			 CORE_MOD, CM_AUTOIDLE2);
-	cm_write_mod_reg(OMAP24XX_AUTO_SDRC_MASK |
-			 OMAP24XX_AUTO_GPMC_MASK |
-			 OMAP24XX_AUTO_SDMA_MASK,
-			 CORE_MOD, CM_AUTOIDLE3);
-	cm_write_mod_reg(OMAP24XX_AUTO_PKA_MASK |
-			 OMAP24XX_AUTO_AES_MASK |
-			 OMAP24XX_AUTO_RNG_MASK |
-			 OMAP24XX_AUTO_SHA_MASK |
-			 OMAP24XX_AUTO_DES_MASK,
-			 CORE_MOD, OMAP24XX_CM_AUTOIDLE4);
+	omap2_cm_write_mod_reg(OMAP24XX_AUTO_CAM_MASK |
+			       OMAP24XX_AUTO_MAILBOXES_MASK |
+			       OMAP24XX_AUTO_WDT4_MASK |
+			       OMAP2420_AUTO_WDT3_MASK |
+			       OMAP24XX_AUTO_MSPRO_MASK |
+			       OMAP2420_AUTO_MMC_MASK |
+			       OMAP24XX_AUTO_FAC_MASK |
+			       OMAP2420_AUTO_EAC_MASK |
+			       OMAP24XX_AUTO_HDQ_MASK |
+			       OMAP24XX_AUTO_UART2_MASK |
+			       OMAP24XX_AUTO_UART1_MASK |
+			       OMAP24XX_AUTO_I2C2_MASK |
+			       OMAP24XX_AUTO_I2C1_MASK |
+			       OMAP24XX_AUTO_MCSPI2_MASK |
+			       OMAP24XX_AUTO_MCSPI1_MASK |
+			       OMAP24XX_AUTO_MCBSP2_MASK |
+			       OMAP24XX_AUTO_MCBSP1_MASK |
+			       OMAP24XX_AUTO_GPT12_MASK |
+			       OMAP24XX_AUTO_GPT11_MASK |
+			       OMAP24XX_AUTO_GPT10_MASK |
+			       OMAP24XX_AUTO_GPT9_MASK |
+			       OMAP24XX_AUTO_GPT8_MASK |
+			       OMAP24XX_AUTO_GPT7_MASK |
+			       OMAP24XX_AUTO_GPT6_MASK |
+			       OMAP24XX_AUTO_GPT5_MASK |
+			       OMAP24XX_AUTO_GPT4_MASK |
+			       OMAP24XX_AUTO_GPT3_MASK |
+			       OMAP24XX_AUTO_GPT2_MASK |
+			       OMAP2420_AUTO_VLYNQ_MASK |
+			       OMAP24XX_AUTO_DSS_MASK,
+			       CORE_MOD, CM_AUTOIDLE1);
+	omap2_cm_write_mod_reg(OMAP24XX_AUTO_UART3_MASK |
+			       OMAP24XX_AUTO_SSI_MASK |
+			       OMAP24XX_AUTO_USB_MASK,
+			       CORE_MOD, CM_AUTOIDLE2);
+	omap2_cm_write_mod_reg(OMAP24XX_AUTO_SDRC_MASK |
+			       OMAP24XX_AUTO_GPMC_MASK |
+			       OMAP24XX_AUTO_SDMA_MASK,
+			       CORE_MOD, CM_AUTOIDLE3);
+	omap2_cm_write_mod_reg(OMAP24XX_AUTO_PKA_MASK |
+			       OMAP24XX_AUTO_AES_MASK |
+			       OMAP24XX_AUTO_RNG_MASK |
+			       OMAP24XX_AUTO_SHA_MASK |
+			       OMAP24XX_AUTO_DES_MASK,
+			       CORE_MOD, OMAP24XX_CM_AUTOIDLE4);
 
-	cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI_MASK, OMAP24XX_DSP_MOD,
-			 CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI_MASK, OMAP24XX_DSP_MOD,
+			       CM_AUTOIDLE);
 
 	/* Put DPLL and both APLLs into autoidle mode */
-	cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) |
-			 (0x03 << OMAP24XX_AUTO_96M_SHIFT) |
-			 (0x03 << OMAP24XX_AUTO_54M_SHIFT),
-			 PLL_MOD, CM_AUTOIDLE);
+	omap2_cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) |
+			       (0x03 << OMAP24XX_AUTO_96M_SHIFT) |
+			       (0x03 << OMAP24XX_AUTO_54M_SHIFT),
+			       PLL_MOD, CM_AUTOIDLE);
 
-	cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL_MASK |
-			 OMAP24XX_AUTO_WDT1_MASK |
-			 OMAP24XX_AUTO_MPU_WDT_MASK |
-			 OMAP24XX_AUTO_GPIOS_MASK |
-			 OMAP24XX_AUTO_32KSYNC_MASK |
-			 OMAP24XX_AUTO_GPT1_MASK,
-			 WKUP_MOD, CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL_MASK |
+			       OMAP24XX_AUTO_WDT1_MASK |
+			       OMAP24XX_AUTO_MPU_WDT_MASK |
+			       OMAP24XX_AUTO_GPIOS_MASK |
+			       OMAP24XX_AUTO_32KSYNC_MASK |
+			       OMAP24XX_AUTO_GPT1_MASK,
+			       WKUP_MOD, CM_AUTOIDLE);
 
 	/* REVISIT: Configure number of 32 kHz clock cycles for sys_clk
 	 * stabilisation */
-	prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
-			  OMAP2_PRCM_CLKSSETUP_OFFSET);
+	omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
+				OMAP2_PRCM_CLKSSETUP_OFFSET);
 
 	/* Configure automatic voltage transition */
-	prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
-			  OMAP2_PRCM_VOLTSETUP_OFFSET);
-	prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
-			  (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
-			  OMAP24XX_MEMRETCTRL_MASK |
-			  (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
-			  (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
-			  OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
+	omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
+				OMAP2_PRCM_VOLTSETUP_OFFSET);
+	omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
+				(0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
+				OMAP24XX_MEMRETCTRL_MASK |
+				(0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
+				(0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
+				OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
 
 	/* Enable wake-up events */
-	prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
-			  WKUP_MOD, PM_WKEN);
+	omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
+				WKUP_MOD, PM_WKEN);
 }
 
 static int __init omap2_pm_init(void)
@@ -518,7 +510,7 @@
 		return -ENODEV;
 
 	printk(KERN_INFO "Power Management for OMAP2 initializing\n");
-	l = prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
+	l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
 	printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
 
 	/* Look up important powerdomains */
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 648b8c5..8cbbead 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -31,8 +31,8 @@
 #include <linux/console.h>
 
 #include <plat/sram.h>
-#include <plat/clockdomain.h>
-#include <plat/powerdomain.h>
+#include "clockdomain.h"
+#include "powerdomain.h"
 #include <plat/serial.h>
 #include <plat/sdrc.h>
 #include <plat/prcm.h>
@@ -41,11 +41,11 @@
 
 #include <asm/tlbflush.h>
 
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "cm-regbits-34xx.h"
 #include "prm-regbits-34xx.h"
 
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "pm.h"
 #include "sdrc.h"
 #include "control.h"
@@ -68,6 +68,9 @@
 #define OMAP343X_TABLE_VALUE_OFFSET	   0xc0
 #define OMAP343X_CONTROL_REG_VALUE_OFFSET  0xc8
 
+/* pm34xx errata defined in pm.h */
+u16 pm34xx_errata;
+
 struct power_state {
 	struct powerdomain *pwrdm;
 	u32 next_state;
@@ -102,12 +105,12 @@
 	int timeout = 0;
 
 	if (omap_rev() >= OMAP3430_REV_ES3_1) {
-		prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
 				     PM_WKEN);
 		/* Do a readback to assure write has been done */
-		prm_read_mod_reg(WKUP_MOD, PM_WKEN);
+		omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
 
-		while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
+		while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
 			 OMAP3430_ST_IO_CHAIN_MASK)) {
 			timeout++;
 			if (timeout > 1000) {
@@ -115,7 +118,7 @@
 				       "activation failed.\n");
 				return;
 			}
-			prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
+			omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
 					     WKUP_MOD, PM_WKEN);
 		}
 	}
@@ -124,26 +127,17 @@
 static void omap3_disable_io_chain(void)
 {
 	if (omap_rev() >= OMAP3430_REV_ES3_1)
-		prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
 				       PM_WKEN);
 }
 
 static void omap3_core_save_context(void)
 {
-	u32 control_padconf_off;
-
-	/* Save the padconf registers */
-	control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
-	control_padconf_off |= START_PADCONF_SAVE;
-	omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
-	/* wait for the save to complete */
-	while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
-			& PADCONF_SAVE_DONE))
-		udelay(1);
+	omap3_ctrl_save_padconf();
 
 	/*
 	 * Force write last pad into memory, as this can fail in some
-	 * cases according to erratas 1.157, 1.185
+	 * cases according to errata 1.157, 1.185
 	 */
 	omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
 		OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
@@ -218,27 +212,27 @@
 		OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
 	int c = 0;
 
-	wkst = prm_read_mod_reg(module, wkst_off);
-	wkst &= prm_read_mod_reg(module, grpsel_off);
+	wkst = omap2_prm_read_mod_reg(module, wkst_off);
+	wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
 	if (wkst) {
-		iclk = cm_read_mod_reg(module, iclk_off);
-		fclk = cm_read_mod_reg(module, fclk_off);
+		iclk = omap2_cm_read_mod_reg(module, iclk_off);
+		fclk = omap2_cm_read_mod_reg(module, fclk_off);
 		while (wkst) {
 			clken = wkst;
-			cm_set_mod_reg_bits(clken, module, iclk_off);
+			omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
 			/*
 			 * For USBHOST, we don't know whether HOST1 or
 			 * HOST2 woke us up, so enable both f-clocks
 			 */
 			if (module == OMAP3430ES2_USBHOST_MOD)
 				clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
-			cm_set_mod_reg_bits(clken, module, fclk_off);
-			prm_write_mod_reg(wkst, module, wkst_off);
-			wkst = prm_read_mod_reg(module, wkst_off);
+			omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
+			omap2_prm_write_mod_reg(wkst, module, wkst_off);
+			wkst = omap2_prm_read_mod_reg(module, wkst_off);
 			c++;
 		}
-		cm_write_mod_reg(iclk, module, iclk_off);
-		cm_write_mod_reg(fclk, module, fclk_off);
+		omap2_cm_write_mod_reg(iclk, module, iclk_off);
+		omap2_cm_write_mod_reg(fclk, module, fclk_off);
 	}
 
 	return c;
@@ -281,9 +275,9 @@
 	u32 irqenable_mpu, irqstatus_mpu;
 	int c = 0;
 
-	irqenable_mpu = prm_read_mod_reg(OCP_MOD,
+	irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
 					 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
-	irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
+	irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
 					 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 	irqstatus_mpu &= irqenable_mpu;
 
@@ -304,10 +298,10 @@
 			     "no code to handle it (%08x)\n", irqstatus_mpu);
 		}
 
-		prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
+		omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
 					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 
-		irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
+		irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
 					OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 		irqstatus_mpu &= irqenable_mpu;
 
@@ -357,6 +351,7 @@
 	int mpu_next_state = PWRDM_POWER_ON;
 	int per_next_state = PWRDM_POWER_ON;
 	int core_next_state = PWRDM_POWER_ON;
+	int per_going_off;
 	int core_prev_state, per_prev_state;
 	u32 sdrc_pwr = 0;
 
@@ -395,7 +390,7 @@
 	if (omap3_has_io_wakeup() &&
 	    (per_next_state < PWRDM_POWER_ON ||
 	     core_next_state < PWRDM_POWER_ON)) {
-		prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
+		omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
 		omap3_enable_io_chain();
 	}
 
@@ -408,9 +403,10 @@
 
 	/* PER */
 	if (per_next_state < PWRDM_POWER_ON) {
+		per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
 		omap_uart_prepare_idle(2);
 		omap_uart_prepare_idle(3);
-		omap2_gpio_prepare_for_idle(per_next_state);
+		omap2_gpio_prepare_for_idle(per_going_off);
 		if (per_next_state == PWRDM_POWER_OFF)
 				omap3_per_save_context();
 	}
@@ -421,7 +417,7 @@
 		omap_uart_prepare_idle(1);
 		if (core_next_state == PWRDM_POWER_OFF) {
 			omap3_core_save_context();
-			omap3_prcm_save_context();
+			omap3_cm_save_context();
 		}
 	}
 
@@ -430,7 +426,7 @@
 	/*
 	* On EMU/HS devices ROM code restores a SRDC value
 	* from scratchpad which has automatic self refresh on timeout
-	* of AUTO_CNT = 1 enabled. This takes care of errata 1.142.
+	* of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
 	* Hence store/restore the SDRC_POWER register here.
 	*/
 	if (omap_rev() >= OMAP3430_REV_ES3_0 &&
@@ -461,14 +457,14 @@
 		core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
 		if (core_prev_state == PWRDM_POWER_OFF) {
 			omap3_core_restore_context();
-			omap3_prcm_restore_context();
+			omap3_cm_restore_context();
 			omap3_sram_restore_context();
 			omap2_sms_restore_context();
 		}
 		omap_uart_resume_idle(0);
 		omap_uart_resume_idle(1);
 		if (core_next_state == PWRDM_POWER_OFF)
-			prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
+			omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
 					       OMAP3430_GR_MOD,
 					       OMAP3_PRM_VOLTCTRL_OFFSET);
 	}
@@ -492,7 +488,8 @@
 	if (omap3_has_io_wakeup() &&
 	    (per_next_state < PWRDM_POWER_ON ||
 	     core_next_state < PWRDM_POWER_ON)) {
-		prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
+		omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+					     PM_WKEN);
 		omap3_disable_io_chain();
 	}
 
@@ -529,12 +526,6 @@
 }
 
 #ifdef CONFIG_SUSPEND
-static int omap3_pm_prepare(void)
-{
-	disable_hlt();
-	return 0;
-}
-
 static int omap3_pm_suspend(void)
 {
 	struct power_state *pwrst;
@@ -597,14 +588,10 @@
 	return ret;
 }
 
-static void omap3_pm_finish(void)
-{
-	enable_hlt();
-}
-
 /* Hooks to enable / disable UART interrupts during suspend */
 static int omap3_pm_begin(suspend_state_t state)
 {
+	disable_hlt();
 	suspend_state = state;
 	omap_uart_enable_irqs(0);
 	return 0;
@@ -614,15 +601,14 @@
 {
 	suspend_state = PM_SUSPEND_ON;
 	omap_uart_enable_irqs(1);
+	enable_hlt();
 	return;
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap3_pm_begin,
 	.end		= omap3_pm_end,
-	.prepare	= omap3_pm_prepare,
 	.enter		= omap3_pm_enter,
-	.finish		= omap3_pm_finish,
 	.valid		= suspend_valid_only_mem,
 };
 #endif /* CONFIG_SUSPEND */
@@ -641,21 +627,21 @@
 static void __init omap3_iva_idle(void)
 {
 	/* ensure IVA2 clock is disabled */
-	cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
+	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
 
 	/* if no clock activity, nothing else to do */
-	if (!(cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
+	if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
 	      OMAP3430_CLKACTIVITY_IVA2_MASK))
 		return;
 
 	/* Reset IVA2 */
-	prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
+	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
 			  OMAP3430_RST2_IVA2_MASK |
 			  OMAP3430_RST3_IVA2_MASK,
 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 
 	/* Enable IVA2 clock */
-	cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
+	omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
 			 OMAP3430_IVA2_MOD, CM_FCLKEN);
 
 	/* Set IVA2 boot mode to 'idle' */
@@ -663,13 +649,13 @@
 			 OMAP343X_CONTROL_IVA2_BOOTMOD);
 
 	/* Un-reset IVA2 */
-	prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 
 	/* Disable IVA2 clock */
-	cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
+	omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
 
 	/* Reset IVA2 */
-	prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
+	omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
 			  OMAP3430_RST2_IVA2_MASK |
 			  OMAP3430_RST3_IVA2_MASK,
 			  OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -693,10 +679,10 @@
 	omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
 
 	/* reset modem */
-	prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
+	omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
 			  OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
 			  CORE_MOD, OMAP2_RM_RSTCTRL);
-	prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
+	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
 }
 
 static void __init prcm_setup_regs(void)
@@ -711,23 +697,23 @@
 
 	/* XXX Reset all wkdeps. This should be done when initializing
 	 * powerdomains */
-	prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
-	prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
-	prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
-	prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
-	prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
-	prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
+	omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
+	omap2_prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
+	omap2_prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
+	omap2_prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
+	omap2_prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
+	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
 	if (omap_rev() > OMAP3430_REV_ES1_0) {
-		prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
-		prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
+		omap2_prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
+		omap2_prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
 	} else
-		prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
+		omap2_prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
 
 	/*
 	 * Enable interface clock autoidle for all modules.
 	 * Note that in the long run this should be done by clockfw
 	 */
-	cm_write_mod_reg(
+	omap2_cm_write_mod_reg(
 		OMAP3430_AUTO_MODEM_MASK |
 		OMAP3430ES2_AUTO_MMC3_MASK |
 		OMAP3430ES2_AUTO_ICR_MASK |
@@ -760,7 +746,7 @@
 		OMAP3430_AUTO_SSI_MASK,
 		CORE_MOD, CM_AUTOIDLE1);
 
-	cm_write_mod_reg(
+	omap2_cm_write_mod_reg(
 		OMAP3430_AUTO_PKA_MASK |
 		OMAP3430_AUTO_AES1_MASK |
 		OMAP3430_AUTO_RNG_MASK |
@@ -769,13 +755,13 @@
 		CORE_MOD, CM_AUTOIDLE2);
 
 	if (omap_rev() > OMAP3430_REV_ES1_0) {
-		cm_write_mod_reg(
+		omap2_cm_write_mod_reg(
 			OMAP3430_AUTO_MAD2D_MASK |
 			OMAP3430ES2_AUTO_USBTLL_MASK,
 			CORE_MOD, CM_AUTOIDLE3);
 	}
 
-	cm_write_mod_reg(
+	omap2_cm_write_mod_reg(
 		OMAP3430_AUTO_WDT2_MASK |
 		OMAP3430_AUTO_WDT1_MASK |
 		OMAP3430_AUTO_GPIO1_MASK |
@@ -784,17 +770,17 @@
 		OMAP3430_AUTO_GPT1_MASK,
 		WKUP_MOD, CM_AUTOIDLE);
 
-	cm_write_mod_reg(
+	omap2_cm_write_mod_reg(
 		OMAP3430_AUTO_DSS_MASK,
 		OMAP3430_DSS_MOD,
 		CM_AUTOIDLE);
 
-	cm_write_mod_reg(
+	omap2_cm_write_mod_reg(
 		OMAP3430_AUTO_CAM_MASK,
 		OMAP3430_CAM_MOD,
 		CM_AUTOIDLE);
 
-	cm_write_mod_reg(
+	omap2_cm_write_mod_reg(
 		omap3630_auto_uart4_mask |
 		OMAP3430_AUTO_GPIO6_MASK |
 		OMAP3430_AUTO_GPIO5_MASK |
@@ -818,7 +804,7 @@
 		CM_AUTOIDLE);
 
 	if (omap_rev() > OMAP3430_REV_ES1_0) {
-		cm_write_mod_reg(
+		omap2_cm_write_mod_reg(
 			OMAP3430ES2_AUTO_USBHOST_MASK,
 			OMAP3430ES2_USBHOST_MOD,
 			CM_AUTOIDLE);
@@ -830,16 +816,16 @@
 	 * Set all plls to autoidle. This is needed until autoidle is
 	 * enabled by clockfw
 	 */
-	cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
+	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
 			 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
-	cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
+	omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
 			 MPU_MOD,
 			 CM_AUTOIDLE2);
-	cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
+	omap2_cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
 			 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
 			 PLL_MOD,
 			 CM_AUTOIDLE);
-	cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
+	omap2_cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
 			 PLL_MOD,
 			 CM_AUTOIDLE2);
 
@@ -848,31 +834,31 @@
 	 * sys_clkreq. In the long run clock framework should
 	 * take care of this.
 	 */
-	prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
+	omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
 			     1 << OMAP_AUTOEXTCLKMODE_SHIFT,
 			     OMAP3430_GR_MOD,
 			     OMAP3_PRM_CLKSRC_CTRL_OFFSET);
 
 	/* setup wakup source */
-	prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
+	omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
 			  OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
 			  WKUP_MOD, PM_WKEN);
 	/* No need to write EN_IO, that is always enabled */
-	prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
+	omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
 			  OMAP3430_GRPSEL_GPT1_MASK |
 			  OMAP3430_GRPSEL_GPT12_MASK,
 			  WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
 	/* For some reason IO doesn't generate wakeup event even if
 	 * it is selected to mpu wakeup goup */
-	prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
+	omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
 			  OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
 
 	/* Enable PM_WKEN to support DSS LPR */
-	prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
+	omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
 				OMAP3430_DSS_MOD, PM_WKEN);
 
 	/* Enable wakeups in PER */
-	prm_write_mod_reg(omap3630_en_uart4_mask |
+	omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
 			  OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
 			  OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
 			  OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
@@ -880,7 +866,7 @@
 			  OMAP3430_EN_MCBSP4_MASK,
 			  OMAP3430_PER_MOD, PM_WKEN);
 	/* and allow them to wake up MPU */
-	prm_write_mod_reg(omap3630_grpsel_uart4_mask |
+	omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
 			  OMAP3430_GRPSEL_GPIO2_MASK |
 			  OMAP3430_GRPSEL_GPIO3_MASK |
 			  OMAP3430_GRPSEL_GPIO4_MASK |
@@ -893,22 +879,22 @@
 			  OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
 
 	/* Don't attach IVA interrupts */
-	prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
-	prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
-	prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
-	prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
+	omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
+	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
+	omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
+	omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
 
 	/* Clear any pending 'reset' flags */
-	prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
-	prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
-	prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
-	prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
-	prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
-	prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
-	prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
+	omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
+	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
+	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
+	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
+	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
+	omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
+	omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
 
 	/* Clear any pending PRCM interrupts */
-	prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+	omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 
 	omap3_iva_idle();
 	omap3_d2d_idle();
@@ -925,12 +911,29 @@
 		state = PWRDM_POWER_RET;
 
 #ifdef CONFIG_CPU_IDLE
-	omap3_cpuidle_update_states();
+	/*
+	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
+	 * enable OFF mode in a stable form for previous revisions, restrict
+	 * instead to RET
+	 */
+	if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
+		omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
+	else
+		omap3_cpuidle_update_states(state, state);
 #endif
 
 	list_for_each_entry(pwrst, &pwrst_list, node) {
-		pwrst->next_state = state;
-		omap_set_pwrdm_state(pwrst->pwrdm, state);
+		if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
+				pwrst->pwrdm == core_pwrdm &&
+				state == PWRDM_POWER_OFF) {
+			pwrst->next_state = PWRDM_POWER_RET;
+			WARN_ONCE(1,
+				"%s: Core OFF disabled due to errata i583\n",
+				__func__);
+		} else {
+			pwrst->next_state = state;
+		}
+		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
 	}
 }
 
@@ -1002,6 +1005,17 @@
 				save_secure_ram_context_sz);
 }
 
+static void __init pm_errata_configure(void)
+{
+	if (cpu_is_omap3630()) {
+		pm34xx_errata |= PM_RTA_ERRATUM_i608;
+		/* Enable the l2 cache toggling in sleep logic */
+		enable_omap3630_toggle_l2_on_restore();
+		if (omap_rev() < OMAP3630_REV_ES1_2)
+			pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
+	}
+}
+
 static int __init omap3_pm_init(void)
 {
 	struct power_state *pwrst, *tmp;
@@ -1011,6 +1025,8 @@
 	if (!cpu_is_omap34xx())
 		return -ENODEV;
 
+	pm_errata_configure();
+
 	printk(KERN_ERR "Power Management for TI OMAP3.\n");
 
 	/* XXX prcm_setup_regs needs to be before enabling hw
@@ -1058,6 +1074,14 @@
 	pm_idle = omap3_pm_idle;
 	omap3_idle_init();
 
+	/*
+	 * RTA is disabled during initialization as per erratum i608
+	 * it is safer to disable RTA by the bootloader, but we would like
+	 * to be doubly sure here and prevent any mishaps.
+	 */
+	if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
+		omap3630_ctrl_disable_rta();
+
 	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
 		omap3_secure_ram_storage =
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 54544b4..76cfff2 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -16,7 +16,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 
-#include <plat/powerdomain.h>
+#include "powerdomain.h"
 #include <mach/omap4-common.h>
 
 struct power_state {
@@ -31,12 +31,6 @@
 static LIST_HEAD(pwrst_list);
 
 #ifdef CONFIG_SUSPEND
-static int omap4_pm_prepare(void)
-{
-	disable_hlt();
-	return 0;
-}
-
 static int omap4_pm_suspend(void)
 {
 	do_wfi();
@@ -59,28 +53,22 @@
 	return ret;
 }
 
-static void omap4_pm_finish(void)
-{
-	enable_hlt();
-	return;
-}
-
 static int omap4_pm_begin(suspend_state_t state)
 {
+	disable_hlt();
 	return 0;
 }
 
 static void omap4_pm_end(void)
 {
+	enable_hlt();
 	return;
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap4_pm_begin,
 	.end		= omap4_pm_end,
-	.prepare	= omap4_pm_prepare,
 	.enter		= omap4_pm_enter,
-	.finish		= omap4_pm_finish,
 	.valid		= suspend_valid_only_mem,
 };
 #endif /* CONFIG_SUSPEND */
diff --git a/arch/arm/mach-omap2/pm_bus.c b/arch/arm/mach-omap2/pm_bus.c
index 784989f..5acd2ab 100644
--- a/arch/arm/mach-omap2/pm_bus.c
+++ b/arch/arm/mach-omap2/pm_bus.c
@@ -20,7 +20,7 @@
 #include <plat/omap-pm.h>
 
 #ifdef CONFIG_PM_RUNTIME
-int omap_pm_runtime_suspend(struct device *dev)
+static int omap_pm_runtime_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	int r, ret = 0;
@@ -37,7 +37,7 @@
 	return ret;
 };
 
-int omap_pm_runtime_resume(struct device *dev)
+static int omap_pm_runtime_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	int r;
diff --git a/arch/arm/mach-omap2/powerdomain-common.c b/arch/arm/mach-omap2/powerdomain-common.c
new file mode 100644
index 0000000..171fccd
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomain-common.c
@@ -0,0 +1,110 @@
+/*
+ *  linux/arch/arm/mach-omap2/powerdomain-common.c
+ *  Contains common powerdomain framework functions
+ *
+ *  Copyright (C) 2010 Texas Instruments, Inc.
+ *  Copyright (C) 2010 Nokia Corporation
+ *
+ * Derived from mach-omap2/powerdomain.c written by Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include "pm.h"
+#include "cm.h"
+#include "cm-regbits-34xx.h"
+#include "cm-regbits-44xx.h"
+#include "prm-regbits-34xx.h"
+#include "prm-regbits-44xx.h"
+
+/*
+ * OMAP3 and OMAP4 specific register bit initialisations
+ * Notice that the names here are not according to each power
+ * domain but the bit mapping used applies to all of them
+ */
+/* OMAP3 and OMAP4 Memory Onstate Masks (common across all power domains) */
+#define OMAP_MEM0_ONSTATE_MASK OMAP3430_SHAREDL1CACHEFLATONSTATE_MASK
+#define OMAP_MEM1_ONSTATE_MASK OMAP3430_L1FLATMEMONSTATE_MASK
+#define OMAP_MEM2_ONSTATE_MASK OMAP3430_SHAREDL2CACHEFLATONSTATE_MASK
+#define OMAP_MEM3_ONSTATE_MASK OMAP3430_L2FLATMEMONSTATE_MASK
+#define OMAP_MEM4_ONSTATE_MASK OMAP4430_OCP_NRET_BANK_ONSTATE_MASK
+
+/* OMAP3 and OMAP4 Memory Retstate Masks (common across all power domains) */
+#define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK
+#define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE_MASK
+#define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK
+#define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE_MASK
+#define OMAP_MEM4_RETSTATE_MASK OMAP4430_OCP_NRET_BANK_RETSTATE_MASK
+
+/* OMAP3 and OMAP4 Memory Status bits */
+#define OMAP_MEM0_STATEST_MASK OMAP3430_SHAREDL1CACHEFLATSTATEST_MASK
+#define OMAP_MEM1_STATEST_MASK OMAP3430_L1FLATMEMSTATEST_MASK
+#define OMAP_MEM2_STATEST_MASK OMAP3430_SHAREDL2CACHEFLATSTATEST_MASK
+#define OMAP_MEM3_STATEST_MASK OMAP3430_L2FLATMEMSTATEST_MASK
+#define OMAP_MEM4_STATEST_MASK OMAP4430_OCP_NRET_BANK_STATEST_MASK
+
+/* Common Internal functions used across OMAP rev's*/
+u32 omap2_pwrdm_get_mem_bank_onstate_mask(u8 bank)
+{
+	switch (bank) {
+	case 0:
+		return OMAP_MEM0_ONSTATE_MASK;
+	case 1:
+		return OMAP_MEM1_ONSTATE_MASK;
+	case 2:
+		return OMAP_MEM2_ONSTATE_MASK;
+	case 3:
+		return OMAP_MEM3_ONSTATE_MASK;
+	case 4:
+		return OMAP_MEM4_ONSTATE_MASK;
+	default:
+		WARN_ON(1); /* should never happen */
+		return -EEXIST;
+	}
+	return 0;
+}
+
+u32 omap2_pwrdm_get_mem_bank_retst_mask(u8 bank)
+{
+	switch (bank) {
+	case 0:
+		return OMAP_MEM0_RETSTATE_MASK;
+	case 1:
+		return OMAP_MEM1_RETSTATE_MASK;
+	case 2:
+		return OMAP_MEM2_RETSTATE_MASK;
+	case 3:
+		return OMAP_MEM3_RETSTATE_MASK;
+	case 4:
+		return OMAP_MEM4_RETSTATE_MASK;
+	default:
+		WARN_ON(1); /* should never happen */
+		return -EEXIST;
+	}
+	return 0;
+}
+
+u32 omap2_pwrdm_get_mem_bank_stst_mask(u8 bank)
+{
+	switch (bank) {
+	case 0:
+		return OMAP_MEM0_STATEST_MASK;
+	case 1:
+		return OMAP_MEM1_STATEST_MASK;
+	case 2:
+		return OMAP_MEM2_STATEST_MASK;
+	case 3:
+		return OMAP_MEM3_STATEST_MASK;
+	case 4:
+		return OMAP_MEM4_STATEST_MASK;
+	default:
+		WARN_ON(1); /* should never happen */
+		return -EEXIST;
+	}
+	return 0;
+}
+
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 6527ec3..eaed0df 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -15,27 +15,19 @@
 #undef DEBUG
 
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <asm/atomic.h>
-
-#include "cm.h"
-#include "cm-regbits-34xx.h"
-#include "cm-regbits-44xx.h"
-#include "prm.h"
-#include "prm-regbits-34xx.h"
-#include "prm-regbits-44xx.h"
+#include <linux/string.h>
+#include "cm2xxx_3xxx.h"
+#include "prcm44xx.h"
+#include "cm44xx.h"
+#include "prm2xxx_3xxx.h"
+#include "prm44xx.h"
 
 #include <plat/cpu.h>
-#include <plat/powerdomain.h>
-#include <plat/clockdomain.h>
+#include "powerdomain.h"
+#include "clockdomain.h"
 #include <plat/prcm.h>
 
 #include "pm.h"
@@ -45,41 +37,12 @@
 	PWRDM_STATE_PREV,
 };
 
-/* Variable holding value of the CPU dependent PWRSTCTRL Register Offset */
-static u16 pwrstctrl_reg_offs;
-
-/* Variable holding value of the CPU dependent PWRSTST Register Offset */
-static u16 pwrstst_reg_offs;
-
-/* OMAP3 and OMAP4 specific register bit initialisations
- * Notice that the names here are not according to each power
- * domain but the bit mapping used applies to all of them
- */
-
-/* OMAP3 and OMAP4 Memory Onstate Masks (common across all power domains) */
-#define OMAP_MEM0_ONSTATE_MASK OMAP3430_SHAREDL1CACHEFLATONSTATE_MASK
-#define OMAP_MEM1_ONSTATE_MASK OMAP3430_L1FLATMEMONSTATE_MASK
-#define OMAP_MEM2_ONSTATE_MASK OMAP3430_SHAREDL2CACHEFLATONSTATE_MASK
-#define OMAP_MEM3_ONSTATE_MASK OMAP3430_L2FLATMEMONSTATE_MASK
-#define OMAP_MEM4_ONSTATE_MASK OMAP4430_OCP_NRET_BANK_ONSTATE_MASK
-
-/* OMAP3 and OMAP4 Memory Retstate Masks (common across all power domains) */
-#define OMAP_MEM0_RETSTATE_MASK OMAP3430_SHAREDL1CACHEFLATRETSTATE_MASK
-#define OMAP_MEM1_RETSTATE_MASK OMAP3430_L1FLATMEMRETSTATE_MASK
-#define OMAP_MEM2_RETSTATE_MASK OMAP3430_SHAREDL2CACHEFLATRETSTATE_MASK
-#define OMAP_MEM3_RETSTATE_MASK OMAP3430_L2FLATMEMRETSTATE_MASK
-#define OMAP_MEM4_RETSTATE_MASK OMAP4430_OCP_NRET_BANK_RETSTATE_MASK
-
-/* OMAP3 and OMAP4 Memory Status bits */
-#define OMAP_MEM0_STATEST_MASK OMAP3430_SHAREDL1CACHEFLATSTATEST_MASK
-#define OMAP_MEM1_STATEST_MASK OMAP3430_L1FLATMEMSTATEST_MASK
-#define OMAP_MEM2_STATEST_MASK OMAP3430_SHAREDL2CACHEFLATSTATEST_MASK
-#define OMAP_MEM3_STATEST_MASK OMAP3430_L2FLATMEMSTATEST_MASK
-#define OMAP_MEM4_STATEST_MASK OMAP4430_OCP_NRET_BANK_STATEST_MASK
 
 /* pwrdm_list contains all registered struct powerdomains */
 static LIST_HEAD(pwrdm_list);
 
+static struct pwrdm_ops *arch_pwrdm;
+
 /* Private functions */
 
 static struct powerdomain *_pwrdm_lookup(const char *name)
@@ -110,12 +73,19 @@
 {
 	int i;
 
-	if (!pwrdm)
+	if (!pwrdm || !pwrdm->name)
 		return -EINVAL;
 
 	if (!omap_chip_is(pwrdm->omap_chip))
 		return -EINVAL;
 
+	if (cpu_is_omap44xx() &&
+	    pwrdm->prcm_partition == OMAP4430_INVALID_PRCM_PARTITION) {
+		pr_err("powerdomain: %s: missing OMAP4 PRCM partition ID\n",
+		       pwrdm->name);
+		return -EINVAL;
+	}
+
 	if (_pwrdm_lookup(pwrdm->name))
 		return -EEXIST;
 
@@ -211,6 +181,7 @@
 /**
  * pwrdm_init - set up the powerdomain layer
  * @pwrdm_list: array of struct powerdomain pointers to register
+ * @custom_funcs: func pointers for arch specfic implementations
  *
  * Loop through the array of powerdomains @pwrdm_list, registering all
  * that are available on the current CPU. If pwrdm_list is supplied
@@ -218,21 +189,14 @@
  * registered.  No return value.  XXX pwrdm_list is not really a
  * "list"; it is an array.  Rename appropriately.
  */
-void pwrdm_init(struct powerdomain **pwrdm_list)
+void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs)
 {
 	struct powerdomain **p = NULL;
 
-	if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
-		pwrstctrl_reg_offs = OMAP2_PM_PWSTCTRL;
-		pwrstst_reg_offs = OMAP2_PM_PWSTST;
-	} else if (cpu_is_omap44xx()) {
-		pwrstctrl_reg_offs = OMAP4_PM_PWSTCTRL;
-		pwrstst_reg_offs = OMAP4_PM_PWSTST;
-	} else {
-		printk(KERN_ERR "Power Domain struct not supported for " \
-							"this CPU\n");
-		return;
-	}
+	if (!custom_funcs)
+		WARN(1, "powerdomain: No custom pwrdm functions registered\n");
+	else
+		arch_pwrdm = custom_funcs;
 
 	if (pwrdm_list) {
 		for (p = pwrdm_list; *p; p++)
@@ -431,6 +395,8 @@
  */
 int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
@@ -440,11 +406,10 @@
 	pr_debug("powerdomain: setting next powerstate for %s to %0x\n",
 		 pwrdm->name, pwrst);
 
-	prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK,
-			     (pwrst << OMAP_POWERSTATE_SHIFT),
-			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst)
+		ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -457,11 +422,15 @@
  */
 int pwrdm_read_next_pwrst(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs,
-				 pwrstctrl_reg_offs, OMAP_POWERSTATE_MASK);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_next_pwrst)
+		ret = arch_pwrdm->pwrdm_read_next_pwrst(pwrdm);
+
+	return ret;
 }
 
 /**
@@ -474,11 +443,15 @@
  */
 int pwrdm_read_pwrst(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs,
-				 pwrstst_reg_offs, OMAP_POWERSTATEST_MASK);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_pwrst)
+		ret = arch_pwrdm->pwrdm_read_pwrst(pwrdm);
+
+	return ret;
 }
 
 /**
@@ -491,11 +464,15 @@
  */
 int pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST,
-					OMAP3430_LASTPOWERSTATEENTERED_MASK);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_prev_pwrst)
+		ret = arch_pwrdm->pwrdm_read_prev_pwrst(pwrdm);
+
+	return ret;
 }
 
 /**
@@ -511,7 +488,7 @@
  */
 int pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
 {
-	u32 v;
+	int ret = -EINVAL;
 
 	if (!pwrdm)
 		return -EINVAL;
@@ -522,17 +499,10 @@
 	pr_debug("powerdomain: setting next logic powerstate for %s to %0x\n",
 		 pwrdm->name, pwrst);
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	v = pwrst << __ffs(OMAP3430_LOGICL1CACHERETSTATE_MASK);
-	prm_rmw_mod_reg_bits(OMAP3430_LOGICL1CACHERETSTATE_MASK, v,
-			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_set_logic_retst)
+		ret = arch_pwrdm->pwrdm_set_logic_retst(pwrdm, pwrst);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -552,7 +522,7 @@
  */
 int pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst)
 {
-	u32 m;
+	int ret = -EINVAL;
 
 	if (!pwrdm)
 		return -EINVAL;
@@ -566,37 +536,10 @@
 	pr_debug("powerdomain: setting next memory powerstate for domain %s "
 		 "bank %0x while pwrdm-ON to %0x\n", pwrdm->name, bank, pwrst);
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	switch (bank) {
-	case 0:
-		m = OMAP_MEM0_ONSTATE_MASK;
-		break;
-	case 1:
-		m = OMAP_MEM1_ONSTATE_MASK;
-		break;
-	case 2:
-		m = OMAP_MEM2_ONSTATE_MASK;
-		break;
-	case 3:
-		m = OMAP_MEM3_ONSTATE_MASK;
-		break;
-	case 4:
-		m = OMAP_MEM4_ONSTATE_MASK;
-		break;
-	default:
-		WARN_ON(1); /* should never happen */
-		return -EEXIST;
-	}
+	if (arch_pwrdm && arch_pwrdm->pwrdm_set_mem_onst)
+		ret = arch_pwrdm->pwrdm_set_mem_onst(pwrdm, bank, pwrst);
 
-	prm_rmw_mod_reg_bits(m, (pwrst << __ffs(m)),
-			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
-
-	return 0;
+	return ret;
 }
 
 /**
@@ -617,7 +560,7 @@
  */
 int pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst)
 {
-	u32 m;
+	int ret = -EINVAL;
 
 	if (!pwrdm)
 		return -EINVAL;
@@ -631,37 +574,10 @@
 	pr_debug("powerdomain: setting next memory powerstate for domain %s "
 		 "bank %0x while pwrdm-RET to %0x\n", pwrdm->name, bank, pwrst);
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	switch (bank) {
-	case 0:
-		m = OMAP_MEM0_RETSTATE_MASK;
-		break;
-	case 1:
-		m = OMAP_MEM1_RETSTATE_MASK;
-		break;
-	case 2:
-		m = OMAP_MEM2_RETSTATE_MASK;
-		break;
-	case 3:
-		m = OMAP_MEM3_RETSTATE_MASK;
-		break;
-	case 4:
-		m = OMAP_MEM4_RETSTATE_MASK;
-		break;
-	default:
-		WARN_ON(1); /* should never happen */
-		return -EEXIST;
-	}
+	if (arch_pwrdm && arch_pwrdm->pwrdm_set_mem_retst)
+		ret = arch_pwrdm->pwrdm_set_mem_retst(pwrdm, bank, pwrst);
 
-	prm_rmw_mod_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs,
-			     pwrstctrl_reg_offs);
-
-	return 0;
+	return ret;
 }
 
 /**
@@ -675,11 +591,15 @@
  */
 int pwrdm_read_logic_pwrst(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs, pwrstst_reg_offs,
-				       OMAP3430_LOGICSTATEST_MASK);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_logic_pwrst)
+		ret = arch_pwrdm->pwrdm_read_logic_pwrst(pwrdm);
+
+	return ret;
 }
 
 /**
@@ -692,17 +612,15 @@
  */
 int pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST,
-					OMAP3430_LASTLOGICSTATEENTERED_MASK);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_prev_logic_pwrst)
+		ret = arch_pwrdm->pwrdm_read_prev_logic_pwrst(pwrdm);
+
+	return ret;
 }
 
 /**
@@ -715,17 +633,15 @@
  */
 int pwrdm_read_logic_retst(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs, pwrstctrl_reg_offs,
-				       OMAP3430_LOGICSTATEST_MASK);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_logic_retst)
+		ret = arch_pwrdm->pwrdm_read_logic_retst(pwrdm);
+
+	return ret;
 }
 
 /**
@@ -740,46 +656,21 @@
  */
 int pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
 {
-	u32 m;
+	int ret = -EINVAL;
 
 	if (!pwrdm)
-		return -EINVAL;
+		return ret;
 
 	if (pwrdm->banks < (bank + 1))
-		return -EEXIST;
+		return ret;
 
 	if (pwrdm->flags & PWRDM_HAS_MPU_QUIRK)
 		bank = 1;
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	switch (bank) {
-	case 0:
-		m = OMAP_MEM0_STATEST_MASK;
-		break;
-	case 1:
-		m = OMAP_MEM1_STATEST_MASK;
-		break;
-	case 2:
-		m = OMAP_MEM2_STATEST_MASK;
-		break;
-	case 3:
-		m = OMAP_MEM3_STATEST_MASK;
-		break;
-	case 4:
-		m = OMAP_MEM4_STATEST_MASK;
-		break;
-	default:
-		WARN_ON(1); /* should never happen */
-		return -EEXIST;
-	}
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_mem_pwrst)
+		ret = arch_pwrdm->pwrdm_read_mem_pwrst(pwrdm, bank);
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs,
-					 pwrstst_reg_offs, m);
+	return ret;
 }
 
 /**
@@ -795,43 +686,21 @@
  */
 int pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
 {
-	u32 m;
+	int ret = -EINVAL;
 
 	if (!pwrdm)
-		return -EINVAL;
+		return ret;
 
 	if (pwrdm->banks < (bank + 1))
-		return -EEXIST;
+		return ret;
 
 	if (pwrdm->flags & PWRDM_HAS_MPU_QUIRK)
 		bank = 1;
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	switch (bank) {
-	case 0:
-		m = OMAP3430_LASTMEM1STATEENTERED_MASK;
-		break;
-	case 1:
-		m = OMAP3430_LASTMEM2STATEENTERED_MASK;
-		break;
-	case 2:
-		m = OMAP3430_LASTSHAREDL2CACHEFLATSTATEENTERED_MASK;
-		break;
-	case 3:
-		m = OMAP3430_LASTL2FLATMEMSTATEENTERED_MASK;
-		break;
-	default:
-		WARN_ON(1); /* should never happen */
-		return -EEXIST;
-	}
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_prev_mem_pwrst)
+		ret = arch_pwrdm->pwrdm_read_prev_mem_pwrst(pwrdm, bank);
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs,
-					OMAP3430_PM_PREPWSTST, m);
+	return ret;
 }
 
 /**
@@ -846,43 +715,18 @@
  */
 int pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank)
 {
-	u32 m;
+	int ret = -EINVAL;
 
 	if (!pwrdm)
-		return -EINVAL;
+		return ret;
 
 	if (pwrdm->banks < (bank + 1))
-		return -EEXIST;
+		return ret;
 
-	/*
-	 * The register bit names below may not correspond to the
-	 * actual names of the bits in each powerdomain's register,
-	 * but the type of value returned is the same for each
-	 * powerdomain.
-	 */
-	switch (bank) {
-	case 0:
-		m = OMAP_MEM0_RETSTATE_MASK;
-		break;
-	case 1:
-		m = OMAP_MEM1_RETSTATE_MASK;
-		break;
-	case 2:
-		m = OMAP_MEM2_RETSTATE_MASK;
-		break;
-	case 3:
-		m = OMAP_MEM3_RETSTATE_MASK;
-		break;
-	case 4:
-		m = OMAP_MEM4_RETSTATE_MASK;
-		break;
-	default:
-		WARN_ON(1); /* should never happen */
-		return -EEXIST;
-	}
+	if (arch_pwrdm && arch_pwrdm->pwrdm_read_mem_retst)
+		ret = arch_pwrdm->pwrdm_read_mem_retst(pwrdm, bank);
 
-	return prm_read_mod_bits_shift(pwrdm->prcm_offs,
-					pwrstctrl_reg_offs, m);
+	return ret;
 }
 
 /**
@@ -896,8 +740,10 @@
  */
 int pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
-		return -EINVAL;
+		return ret;
 
 	/*
 	 * XXX should get the powerdomain's current state here;
@@ -907,9 +753,10 @@
 	pr_debug("powerdomain: clearing previous power state reg for %s\n",
 		 pwrdm->name);
 
-	prm_write_mod_reg(0, pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_clear_all_prev_pwrst)
+		ret = arch_pwrdm->pwrdm_clear_all_prev_pwrst(pwrdm);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -925,19 +772,21 @@
  */
 int pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
-		return -EINVAL;
+		return ret;
 
 	if (!(pwrdm->flags & PWRDM_HAS_HDWR_SAR))
-		return -EINVAL;
+		return ret;
 
 	pr_debug("powerdomain: %s: setting SAVEANDRESTORE bit\n",
 		 pwrdm->name);
 
-	prm_rmw_mod_reg_bits(0, 1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT,
-			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_enable_hdwr_sar)
+		ret = arch_pwrdm->pwrdm_enable_hdwr_sar(pwrdm);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -953,19 +802,21 @@
  */
 int pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
-		return -EINVAL;
+		return ret;
 
 	if (!(pwrdm->flags & PWRDM_HAS_HDWR_SAR))
-		return -EINVAL;
+		return ret;
 
 	pr_debug("powerdomain: %s: clearing SAVEANDRESTORE bit\n",
 		 pwrdm->name);
 
-	prm_rmw_mod_reg_bits(1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT, 0,
-			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_disable_hdwr_sar)
+		ret = arch_pwrdm->pwrdm_disable_hdwr_sar(pwrdm);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -992,6 +843,8 @@
  */
 int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
 {
+	int ret = -EINVAL;
+
 	if (!pwrdm)
 		return -EINVAL;
 
@@ -1001,11 +854,10 @@
 	pr_debug("powerdomain: %s: setting LOWPOWERSTATECHANGE bit\n",
 		 pwrdm->name);
 
-	prm_rmw_mod_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK,
-			     (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT),
-			     pwrdm->prcm_offs, pwrstctrl_reg_offs);
+	if (arch_pwrdm && arch_pwrdm->pwrdm_set_lowpwrstchange)
+		ret = arch_pwrdm->pwrdm_set_lowpwrstchange(pwrdm);
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -1020,32 +872,15 @@
  */
 int pwrdm_wait_transition(struct powerdomain *pwrdm)
 {
-	u32 c = 0;
+	int ret = -EINVAL;
 
 	if (!pwrdm)
 		return -EINVAL;
 
-	/*
-	 * REVISIT: pwrdm_wait_transition() may be better implemented
-	 * via a callback and a periodic timer check -- how long do we expect
-	 * powerdomain transitions to take?
-	 */
+	if (arch_pwrdm && arch_pwrdm->pwrdm_wait_transition)
+		ret = arch_pwrdm->pwrdm_wait_transition(pwrdm);
 
-	/* XXX Is this udelay() value meaningful? */
-	while ((prm_read_mod_reg(pwrdm->prcm_offs, pwrstst_reg_offs) &
-		OMAP_INTRANSITION_MASK) &&
-	       (c++ < PWRDM_TRANSITION_BAILOUT))
-			udelay(1);
-
-	if (c > PWRDM_TRANSITION_BAILOUT) {
-		printk(KERN_ERR "powerdomain: waited too long for "
-		       "powerdomain %s to complete transition\n", pwrdm->name);
-		return -EAGAIN;
-	}
-
-	pr_debug("powerdomain: completed transition in %d loops\n", c);
-
-	return 0;
+	return ret;
 }
 
 int pwrdm_state_switch(struct powerdomain *pwrdm)
@@ -1075,3 +910,31 @@
 	return 0;
 }
 
+/**
+ * pwrdm_get_context_loss_count - get powerdomain's context loss count
+ * @pwrdm: struct powerdomain * to wait for
+ *
+ * Context loss count is the sum of powerdomain off-mode counter, the
+ * logic off counter and the per-bank memory off counter.  Returns 0
+ * (and WARNs) upon error, otherwise, returns the context loss count.
+ */
+u32 pwrdm_get_context_loss_count(struct powerdomain *pwrdm)
+{
+	int i, count;
+
+	if (!pwrdm) {
+		WARN(1, "powerdomain: %s: pwrdm is null\n", __func__);
+		return 0;
+	}
+
+	count = pwrdm->state_counter[PWRDM_POWER_OFF];
+	count += pwrdm->ret_logic_off_counter;
+
+	for (i = 0; i < pwrdm->banks; i++)
+		count += pwrdm->ret_mem_off_counter[i];
+
+	pr_debug("powerdomain: %s: context loss count = %u\n",
+		 pwrdm->name, count);
+
+	return count;
+}
diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
new file mode 100644
index 0000000..c66431e
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -0,0 +1,233 @@
+/*
+ * OMAP2/3/4 powerdomain control
+ *
+ * Copyright (C) 2007-2008, 2010 Texas Instruments, Inc.
+ * Copyright (C) 2007-2010 Nokia Corporation
+ *
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX This should be moved to the mach-omap2/ directory at the earliest
+ * opportunity.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_POWERDOMAIN_H
+#define __ARCH_ARM_MACH_OMAP2_POWERDOMAIN_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <linux/atomic.h>
+
+#include <plat/cpu.h>
+
+/* Powerdomain basic power states */
+#define PWRDM_POWER_OFF		0x0
+#define PWRDM_POWER_RET		0x1
+#define PWRDM_POWER_INACTIVE	0x2
+#define PWRDM_POWER_ON		0x3
+
+#define PWRDM_MAX_PWRSTS	4
+
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_ON		(1 << PWRDM_POWER_ON)
+#define PWRSTS_OFF		(1 << PWRDM_POWER_OFF)
+#define PWRSTS_OFF_ON		((1 << PWRDM_POWER_OFF) | \
+				 (1 << PWRDM_POWER_ON))
+
+#define PWRSTS_OFF_RET		((1 << PWRDM_POWER_OFF) | \
+				 (1 << PWRDM_POWER_RET))
+
+#define PWRSTS_RET_ON		((1 << PWRDM_POWER_RET) | \
+				 (1 << PWRDM_POWER_ON))
+
+#define PWRSTS_OFF_RET_ON	(PWRSTS_OFF_RET | (1 << PWRDM_POWER_ON))
+
+
+/* Powerdomain flags */
+#define PWRDM_HAS_HDWR_SAR	(1 << 0) /* hardware save-and-restore support */
+#define PWRDM_HAS_MPU_QUIRK	(1 << 1) /* MPU pwr domain has MEM bank 0 bits
+					  * in MEM bank 1 position. This is
+					  * true for OMAP3430
+					  */
+#define PWRDM_HAS_LOWPOWERSTATECHANGE	(1 << 2) /*
+						  * support to transition from a
+						  * sleep state to a lower sleep
+						  * state without waking up the
+						  * powerdomain
+						  */
+
+/*
+ * Number of memory banks that are power-controllable.	On OMAP4430, the
+ * maximum is 5.
+ */
+#define PWRDM_MAX_MEM_BANKS	5
+
+/*
+ * Maximum number of clockdomains that can be associated with a powerdomain.
+ * CORE powerdomain on OMAP4 is the worst case
+ */
+#define PWRDM_MAX_CLKDMS	9
+
+/* XXX A completely arbitrary number. What is reasonable here? */
+#define PWRDM_TRANSITION_BAILOUT 100000
+
+struct clockdomain;
+struct powerdomain;
+
+/**
+ * struct powerdomain - OMAP powerdomain
+ * @name: Powerdomain name
+ * @omap_chip: represents the OMAP chip types containing this pwrdm
+ * @prcm_offs: the address offset from CM_BASE/PRM_BASE
+ * @prcm_partition: (OMAP4 only) the PRCM partition ID containing @prcm_offs
+ * @pwrsts: Possible powerdomain power states
+ * @pwrsts_logic_ret: Possible logic power states when pwrdm in RETENTION
+ * @flags: Powerdomain flags
+ * @banks: Number of software-controllable memory banks in this powerdomain
+ * @pwrsts_mem_ret: Possible memory bank pwrstates when pwrdm in RETENTION
+ * @pwrsts_mem_on: Possible memory bank pwrstates when pwrdm in ON
+ * @pwrdm_clkdms: Clockdomains in this powerdomain
+ * @node: list_head linking all powerdomains
+ * @state:
+ * @state_counter:
+ * @timer:
+ * @state_timer:
+ *
+ * @prcm_partition possible values are defined in mach-omap2/prcm44xx.h.
+ */
+struct powerdomain {
+	const char *name;
+	const struct omap_chip_id omap_chip;
+	const s16 prcm_offs;
+	const u8 pwrsts;
+	const u8 pwrsts_logic_ret;
+	const u8 flags;
+	const u8 banks;
+	const u8 pwrsts_mem_ret[PWRDM_MAX_MEM_BANKS];
+	const u8 pwrsts_mem_on[PWRDM_MAX_MEM_BANKS];
+	const u8 prcm_partition;
+	struct clockdomain *pwrdm_clkdms[PWRDM_MAX_CLKDMS];
+	struct list_head node;
+	int state;
+	unsigned state_counter[PWRDM_MAX_PWRSTS];
+	unsigned ret_logic_off_counter;
+	unsigned ret_mem_off_counter[PWRDM_MAX_MEM_BANKS];
+
+#ifdef CONFIG_PM_DEBUG
+	s64 timer;
+	s64 state_timer[PWRDM_MAX_PWRSTS];
+#endif
+};
+
+/**
+ * struct pwrdm_ops - Arch specfic function implementations
+ * @pwrdm_set_next_pwrst: Set the target power state for a pd
+ * @pwrdm_read_next_pwrst: Read the target power state set for a pd
+ * @pwrdm_read_pwrst: Read the current power state of a pd
+ * @pwrdm_read_prev_pwrst: Read the prev power state entered by the pd
+ * @pwrdm_set_logic_retst: Set the logic state in RET for a pd
+ * @pwrdm_set_mem_onst: Set the Memory state in ON for a pd
+ * @pwrdm_set_mem_retst: Set the Memory state in RET for a pd
+ * @pwrdm_read_logic_pwrst: Read the current logic state of a pd
+ * @pwrdm_read_prev_logic_pwrst: Read the previous logic state entered by a pd
+ * @pwrdm_read_logic_retst: Read the logic state in RET for a pd
+ * @pwrdm_read_mem_pwrst: Read the current memory state of a pd
+ * @pwrdm_read_prev_mem_pwrst: Read the previous memory state entered by a pd
+ * @pwrdm_read_mem_retst: Read the memory state in RET for a pd
+ * @pwrdm_clear_all_prev_pwrst: Clear all previous power states logged for a pd
+ * @pwrdm_enable_hdwr_sar: Enable Hardware Save-Restore feature for the pd
+ * @pwrdm_disable_hdwr_sar: Disable Hardware Save-Restore feature for a pd
+ * @pwrdm_set_lowpwrstchange: Enable pd transitions from a shallow to deep sleep
+ * @pwrdm_wait_transition: Wait for a pd state transition to complete
+ */
+struct pwrdm_ops {
+	int	(*pwrdm_set_next_pwrst)(struct powerdomain *pwrdm, u8 pwrst);
+	int	(*pwrdm_read_next_pwrst)(struct powerdomain *pwrdm);
+	int	(*pwrdm_read_pwrst)(struct powerdomain *pwrdm);
+	int	(*pwrdm_read_prev_pwrst)(struct powerdomain *pwrdm);
+	int	(*pwrdm_set_logic_retst)(struct powerdomain *pwrdm, u8 pwrst);
+	int	(*pwrdm_set_mem_onst)(struct powerdomain *pwrdm, u8 bank, u8 pwrst);
+	int	(*pwrdm_set_mem_retst)(struct powerdomain *pwrdm, u8 bank, u8 pwrst);
+	int	(*pwrdm_read_logic_pwrst)(struct powerdomain *pwrdm);
+	int	(*pwrdm_read_prev_logic_pwrst)(struct powerdomain *pwrdm);
+	int	(*pwrdm_read_logic_retst)(struct powerdomain *pwrdm);
+	int	(*pwrdm_read_mem_pwrst)(struct powerdomain *pwrdm, u8 bank);
+	int	(*pwrdm_read_prev_mem_pwrst)(struct powerdomain *pwrdm, u8 bank);
+	int	(*pwrdm_read_mem_retst)(struct powerdomain *pwrdm, u8 bank);
+	int	(*pwrdm_clear_all_prev_pwrst)(struct powerdomain *pwrdm);
+	int	(*pwrdm_enable_hdwr_sar)(struct powerdomain *pwrdm);
+	int	(*pwrdm_disable_hdwr_sar)(struct powerdomain *pwrdm);
+	int	(*pwrdm_set_lowpwrstchange)(struct powerdomain *pwrdm);
+	int	(*pwrdm_wait_transition)(struct powerdomain *pwrdm);
+};
+
+void pwrdm_fw_init(void);
+void pwrdm_init(struct powerdomain **pwrdm_list, struct pwrdm_ops *custom_funcs);
+
+struct powerdomain *pwrdm_lookup(const char *name);
+
+int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
+			void *user);
+int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
+			void *user);
+
+int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
+int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
+int pwrdm_for_each_clkdm(struct powerdomain *pwrdm,
+			 int (*fn)(struct powerdomain *pwrdm,
+				   struct clockdomain *clkdm));
+
+int pwrdm_get_mem_bank_count(struct powerdomain *pwrdm);
+
+int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst);
+int pwrdm_read_next_pwrst(struct powerdomain *pwrdm);
+int pwrdm_read_pwrst(struct powerdomain *pwrdm);
+int pwrdm_read_prev_pwrst(struct powerdomain *pwrdm);
+int pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm);
+
+int pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst);
+int pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst);
+int pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst);
+
+int pwrdm_read_logic_pwrst(struct powerdomain *pwrdm);
+int pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm);
+int pwrdm_read_logic_retst(struct powerdomain *pwrdm);
+int pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank);
+int pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank);
+int pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank);
+
+int pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm);
+int pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm);
+bool pwrdm_has_hdwr_sar(struct powerdomain *pwrdm);
+
+int pwrdm_wait_transition(struct powerdomain *pwrdm);
+
+int pwrdm_state_switch(struct powerdomain *pwrdm);
+int pwrdm_clkdm_state_switch(struct clockdomain *clkdm);
+int pwrdm_pre_transition(void);
+int pwrdm_post_transition(void);
+int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);
+u32 pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
+
+extern void omap2xxx_powerdomains_init(void);
+extern void omap3xxx_powerdomains_init(void);
+extern void omap44xx_powerdomains_init(void);
+
+extern struct pwrdm_ops omap2_pwrdm_operations;
+extern struct pwrdm_ops omap3_pwrdm_operations;
+extern struct pwrdm_ops omap4_pwrdm_operations;
+
+/* Common Internal functions used across OMAP rev's */
+extern u32 omap2_pwrdm_get_mem_bank_onstate_mask(u8 bank);
+extern u32 omap2_pwrdm_get_mem_bank_retst_mask(u8 bank);
+extern u32 omap2_pwrdm_get_mem_bank_stst_mask(u8 bank);
+
+extern struct powerdomain wkup_omap2_pwrdm;
+extern struct powerdomain gfx_omap2_pwrdm;
+
+
+#endif
diff --git a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
new file mode 100644
index 0000000..d523389
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
@@ -0,0 +1,242 @@
+/*
+ * OMAP2 and OMAP3 powerdomain control
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
+ *
+ * Derived from mach-omap2/powerdomain.c written by Paul Walmsley
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include <plat/prcm.h>
+
+#include "powerdomain.h"
+#include "prm-regbits-34xx.h"
+#include "prm.h"
+#include "prm-regbits-24xx.h"
+#include "prm-regbits-34xx.h"
+
+
+/* Common functions across OMAP2 and OMAP3 */
+static int omap2_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
+{
+	omap2_prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK,
+				(pwrst << OMAP_POWERSTATE_SHIFT),
+				pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL);
+	return 0;
+}
+
+static int omap2_pwrdm_read_next_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     OMAP2_PM_PWSTCTRL,
+					     OMAP_POWERSTATE_MASK);
+}
+
+static int omap2_pwrdm_read_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     OMAP2_PM_PWSTST,
+					     OMAP_POWERSTATEST_MASK);
+}
+
+static int omap2_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank,
+								u8 pwrst)
+{
+	u32 m;
+
+	m = omap2_pwrdm_get_mem_bank_onstate_mask(bank);
+
+	omap2_prm_rmw_mod_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs,
+				   OMAP2_PM_PWSTCTRL);
+
+	return 0;
+}
+
+static int omap2_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank,
+								u8 pwrst)
+{
+	u32 m;
+
+	m = omap2_pwrdm_get_mem_bank_retst_mask(bank);
+
+	omap2_prm_rmw_mod_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_offs,
+				   OMAP2_PM_PWSTCTRL);
+
+	return 0;
+}
+
+static int omap2_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
+{
+	u32 m;
+
+	m = omap2_pwrdm_get_mem_bank_stst_mask(bank);
+
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, OMAP2_PM_PWSTST,
+					     m);
+}
+
+static int omap2_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank)
+{
+	u32 m;
+
+	m = omap2_pwrdm_get_mem_bank_retst_mask(bank);
+
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     OMAP2_PM_PWSTCTRL, m);
+}
+
+static int omap2_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
+{
+	u32 v;
+
+	v = pwrst << __ffs(OMAP3430_LOGICL1CACHERETSTATE_MASK);
+	omap2_prm_rmw_mod_reg_bits(OMAP3430_LOGICL1CACHERETSTATE_MASK, v,
+				   pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL);
+
+	return 0;
+}
+
+static int omap2_pwrdm_wait_transition(struct powerdomain *pwrdm)
+{
+	u32 c = 0;
+
+	/*
+	 * REVISIT: pwrdm_wait_transition() may be better implemented
+	 * via a callback and a periodic timer check -- how long do we expect
+	 * powerdomain transitions to take?
+	 */
+
+	/* XXX Is this udelay() value meaningful? */
+	while ((omap2_prm_read_mod_reg(pwrdm->prcm_offs, OMAP2_PM_PWSTST) &
+		OMAP_INTRANSITION_MASK) &&
+		(c++ < PWRDM_TRANSITION_BAILOUT))
+			udelay(1);
+
+	if (c > PWRDM_TRANSITION_BAILOUT) {
+		printk(KERN_ERR "powerdomain: waited too long for "
+			"powerdomain %s to complete transition\n", pwrdm->name);
+		return -EAGAIN;
+	}
+
+	pr_debug("powerdomain: completed transition in %d loops\n", c);
+
+	return 0;
+}
+
+/* Applicable only for OMAP3. Not supported on OMAP2 */
+static int omap3_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     OMAP3430_PM_PREPWSTST,
+					     OMAP3430_LASTPOWERSTATEENTERED_MASK);
+}
+
+static int omap3_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     OMAP2_PM_PWSTST,
+					     OMAP3430_LOGICSTATEST_MASK);
+}
+
+static int omap3_pwrdm_read_logic_retst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     OMAP2_PM_PWSTCTRL,
+					     OMAP3430_LOGICSTATEST_MASK);
+}
+
+static int omap3_pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     OMAP3430_PM_PREPWSTST,
+					     OMAP3430_LASTLOGICSTATEENTERED_MASK);
+}
+
+static int omap3_get_mem_bank_lastmemst_mask(u8 bank)
+{
+	switch (bank) {
+	case 0:
+		return OMAP3430_LASTMEM1STATEENTERED_MASK;
+	case 1:
+		return OMAP3430_LASTMEM2STATEENTERED_MASK;
+	case 2:
+		return OMAP3430_LASTSHAREDL2CACHEFLATSTATEENTERED_MASK;
+	case 3:
+		return OMAP3430_LASTL2FLATMEMSTATEENTERED_MASK;
+	default:
+		WARN_ON(1); /* should never happen */
+		return -EEXIST;
+	}
+	return 0;
+}
+
+static int omap3_pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
+{
+	u32 m;
+
+	m = omap3_get_mem_bank_lastmemst_mask(bank);
+
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+				OMAP3430_PM_PREPWSTST, m);
+}
+
+static int omap3_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm)
+{
+	omap2_prm_write_mod_reg(0, pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST);
+	return 0;
+}
+
+static int omap3_pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm)
+{
+	return omap2_prm_rmw_mod_reg_bits(0,
+					  1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT,
+					  pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL);
+}
+
+static int omap3_pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm)
+{
+	return omap2_prm_rmw_mod_reg_bits(1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT,
+					  0, pwrdm->prcm_offs,
+					  OMAP2_PM_PWSTCTRL);
+}
+
+struct pwrdm_ops omap2_pwrdm_operations = {
+	.pwrdm_set_next_pwrst	= omap2_pwrdm_set_next_pwrst,
+	.pwrdm_read_next_pwrst	= omap2_pwrdm_read_next_pwrst,
+	.pwrdm_read_pwrst	= omap2_pwrdm_read_pwrst,
+	.pwrdm_set_logic_retst	= omap2_pwrdm_set_logic_retst,
+	.pwrdm_set_mem_onst	= omap2_pwrdm_set_mem_onst,
+	.pwrdm_set_mem_retst	= omap2_pwrdm_set_mem_retst,
+	.pwrdm_read_mem_pwrst	= omap2_pwrdm_read_mem_pwrst,
+	.pwrdm_read_mem_retst	= omap2_pwrdm_read_mem_retst,
+	.pwrdm_wait_transition	= omap2_pwrdm_wait_transition,
+};
+
+struct pwrdm_ops omap3_pwrdm_operations = {
+	.pwrdm_set_next_pwrst	= omap2_pwrdm_set_next_pwrst,
+	.pwrdm_read_next_pwrst	= omap2_pwrdm_read_next_pwrst,
+	.pwrdm_read_pwrst	= omap2_pwrdm_read_pwrst,
+	.pwrdm_read_prev_pwrst	= omap3_pwrdm_read_prev_pwrst,
+	.pwrdm_set_logic_retst	= omap2_pwrdm_set_logic_retst,
+	.pwrdm_read_logic_pwrst	= omap3_pwrdm_read_logic_pwrst,
+	.pwrdm_read_logic_retst	= omap3_pwrdm_read_logic_retst,
+	.pwrdm_read_prev_logic_pwrst	= omap3_pwrdm_read_prev_logic_pwrst,
+	.pwrdm_set_mem_onst	= omap2_pwrdm_set_mem_onst,
+	.pwrdm_set_mem_retst	= omap2_pwrdm_set_mem_retst,
+	.pwrdm_read_mem_pwrst	= omap2_pwrdm_read_mem_pwrst,
+	.pwrdm_read_mem_retst	= omap2_pwrdm_read_mem_retst,
+	.pwrdm_read_prev_mem_pwrst	= omap3_pwrdm_read_prev_mem_pwrst,
+	.pwrdm_clear_all_prev_pwrst	= omap3_pwrdm_clear_all_prev_pwrst,
+	.pwrdm_enable_hdwr_sar	= omap3_pwrdm_enable_hdwr_sar,
+	.pwrdm_disable_hdwr_sar	= omap3_pwrdm_disable_hdwr_sar,
+	.pwrdm_wait_transition	= omap2_pwrdm_wait_transition,
+};
diff --git a/arch/arm/mach-omap2/powerdomain44xx.c b/arch/arm/mach-omap2/powerdomain44xx.c
new file mode 100644
index 0000000..a7880af
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomain44xx.c
@@ -0,0 +1,225 @@
+/*
+ * OMAP4 powerdomain control
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
+ *
+ * Derived from mach-omap2/powerdomain.c written by Paul Walmsley
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include "powerdomain.h"
+#include <plat/prcm.h>
+#include "prm2xxx_3xxx.h"
+#include "prm44xx.h"
+#include "prminst44xx.h"
+#include "prm-regbits-44xx.h"
+
+static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
+{
+	omap4_prminst_rmw_inst_reg_bits(OMAP_POWERSTATE_MASK,
+					(pwrst << OMAP_POWERSTATE_SHIFT),
+					pwrdm->prcm_partition,
+					pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL);
+	return 0;
+}
+
+static int omap4_pwrdm_read_next_pwrst(struct powerdomain *pwrdm)
+{
+	u32 v;
+
+	v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTCTRL);
+	v &= OMAP_POWERSTATE_MASK;
+	v >>= OMAP_POWERSTATE_SHIFT;
+
+	return v;
+}
+
+static int omap4_pwrdm_read_pwrst(struct powerdomain *pwrdm)
+{
+	u32 v;
+
+	v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTST);
+	v &= OMAP_POWERSTATEST_MASK;
+	v >>= OMAP_POWERSTATEST_SHIFT;
+
+	return v;
+}
+
+static int omap4_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
+{
+	u32 v;
+
+	v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTST);
+	v &= OMAP4430_LASTPOWERSTATEENTERED_MASK;
+	v >>= OMAP4430_LASTPOWERSTATEENTERED_SHIFT;
+
+	return v;
+}
+
+static int omap4_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
+{
+	omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK,
+					(1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT),
+					pwrdm->prcm_partition,
+					pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL);
+	return 0;
+}
+
+static int omap4_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm)
+{
+	omap4_prminst_rmw_inst_reg_bits(OMAP4430_LASTPOWERSTATEENTERED_MASK,
+					OMAP4430_LASTPOWERSTATEENTERED_MASK,
+					pwrdm->prcm_partition,
+					pwrdm->prcm_offs, OMAP4_PM_PWSTST);
+	return 0;
+}
+
+static int omap4_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
+{
+	u32 v;
+
+	v = pwrst << __ffs(OMAP4430_LOGICRETSTATE_MASK);
+	omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOGICRETSTATE_MASK, v,
+					pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTCTRL);
+
+	return 0;
+}
+
+static int omap4_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank,
+				    u8 pwrst)
+{
+	u32 m;
+
+	m = omap2_pwrdm_get_mem_bank_onstate_mask(bank);
+
+	omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)),
+					pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTCTRL);
+
+	return 0;
+}
+
+static int omap4_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank,
+				     u8 pwrst)
+{
+	u32 m;
+
+	m = omap2_pwrdm_get_mem_bank_retst_mask(bank);
+
+	omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)),
+					pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTCTRL);
+
+	return 0;
+}
+
+static int omap4_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm)
+{
+	u32 v;
+
+	v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTST);
+	v &= OMAP4430_LOGICSTATEST_MASK;
+	v >>= OMAP4430_LOGICSTATEST_SHIFT;
+
+	return v;
+}
+
+static int omap4_pwrdm_read_logic_retst(struct powerdomain *pwrdm)
+{
+	u32 v;
+
+	v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTCTRL);
+	v &= OMAP4430_LOGICRETSTATE_MASK;
+	v >>= OMAP4430_LOGICRETSTATE_SHIFT;
+
+	return v;
+}
+
+static int omap4_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
+{
+	u32 m, v;
+
+	m = omap2_pwrdm_get_mem_bank_stst_mask(bank);
+
+	v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTST);
+	v &= m;
+	v >>= __ffs(m);
+
+	return v;
+}
+
+static int omap4_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank)
+{
+	u32 m, v;
+
+	m = omap2_pwrdm_get_mem_bank_retst_mask(bank);
+
+	v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs,
+					OMAP4_PM_PWSTCTRL);
+	v &= m;
+	v >>= __ffs(m);
+
+	return v;
+}
+
+static int omap4_pwrdm_wait_transition(struct powerdomain *pwrdm)
+{
+	u32 c = 0;
+
+	/*
+	 * REVISIT: pwrdm_wait_transition() may be better implemented
+	 * via a callback and a periodic timer check -- how long do we expect
+	 * powerdomain transitions to take?
+	 */
+
+	/* XXX Is this udelay() value meaningful? */
+	while ((omap4_prminst_read_inst_reg(pwrdm->prcm_partition,
+					    pwrdm->prcm_offs,
+					    OMAP4_PM_PWSTST) &
+		OMAP_INTRANSITION_MASK) &&
+	       (c++ < PWRDM_TRANSITION_BAILOUT))
+		udelay(1);
+
+	if (c > PWRDM_TRANSITION_BAILOUT) {
+		printk(KERN_ERR "powerdomain: waited too long for "
+		       "powerdomain %s to complete transition\n", pwrdm->name);
+		return -EAGAIN;
+	}
+
+	pr_debug("powerdomain: completed transition in %d loops\n", c);
+
+	return 0;
+}
+
+struct pwrdm_ops omap4_pwrdm_operations = {
+	.pwrdm_set_next_pwrst	= omap4_pwrdm_set_next_pwrst,
+	.pwrdm_read_next_pwrst	= omap4_pwrdm_read_next_pwrst,
+	.pwrdm_read_pwrst	= omap4_pwrdm_read_pwrst,
+	.pwrdm_read_prev_pwrst	= omap4_pwrdm_read_prev_pwrst,
+	.pwrdm_set_lowpwrstchange	= omap4_pwrdm_set_lowpwrstchange,
+	.pwrdm_clear_all_prev_pwrst	= omap4_pwrdm_clear_all_prev_pwrst,
+	.pwrdm_set_logic_retst	= omap4_pwrdm_set_logic_retst,
+	.pwrdm_read_logic_pwrst	= omap4_pwrdm_read_logic_pwrst,
+	.pwrdm_read_logic_retst	= omap4_pwrdm_read_logic_retst,
+	.pwrdm_read_mem_pwrst	= omap4_pwrdm_read_mem_pwrst,
+	.pwrdm_read_mem_retst	= omap4_pwrdm_read_mem_retst,
+	.pwrdm_set_mem_onst	= omap4_pwrdm_set_mem_onst,
+	.pwrdm_set_mem_retst	= omap4_pwrdm_set_mem_retst,
+	.pwrdm_wait_transition	= omap4_pwrdm_wait_transition,
+};
diff --git a/arch/arm/mach-omap2/powerdomains.h b/arch/arm/mach-omap2/powerdomains.h
deleted file mode 100644
index 105cbca..0000000
--- a/arch/arm/mach-omap2/powerdomains.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * OMAP2/3 common powerdomain definitions
- *
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2009 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Debugging and integration fixes by Jouni Högander
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * To Do List
- * -> Move the Sleep/Wakeup dependencies from Power Domain framework to
- *    Clock Domain Framework
- */
-
-#ifndef ARCH_ARM_MACH_OMAP2_POWERDOMAINS
-#define ARCH_ARM_MACH_OMAP2_POWERDOMAINS
-
-/*
- * This file contains all of the powerdomains that have some element
- * of software control for the OMAP24xx and OMAP34xx chips.
- *
- * This is not an exhaustive listing of powerdomains on the chips; only
- * powerdomains that can be controlled in software.
- */
-
-/*
- * The names for the DSP/IVA2 powerdomains are confusing.
- *
- * Most OMAP chips have an on-board DSP.
- *
- * On the 2420, this is a 'C55 DSP called, simply, the DSP.  Its
- * powerdomain is called the "DSP power domain."  On the 2430, the
- * on-board DSP is a 'C64 DSP, now called (along with its hardware
- * accelerators) the IVA2 or IVA2.1.  Its powerdomain is still called
- * the "DSP power domain." On the 3430, the DSP is a 'C64 DSP like the
- * 2430, also known as the IVA2; but its powerdomain is now called the
- * "IVA2 power domain."
- *
- * The 2420 also has something called the IVA, which is a separate ARM
- * core, and has nothing to do with the DSP/IVA2.
- *
- * Ideally the DSP/IVA2 could just be the same powerdomain, but the PRCM
- * address offset is different between the C55 and C64 DSPs.
- */
-
-#include <plat/powerdomain.h>
-
-#include "prcm-common.h"
-#include "prm.h"
-#include "cm.h"
-#include "powerdomains24xx.h"
-#include "powerdomains34xx.h"
-#include "powerdomains44xx.h"
-
-/* OMAP2/3-common powerdomains */
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-
-/*
- * The GFX powerdomain is not present on 3430ES2, but currently we do not
- * have a macro to filter it out at compile-time.
- */
-static struct powerdomain gfx_omap2_pwrdm = {
-	.name		  = "gfx_pwrdm",
-	.prcm_offs	  = GFX_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX |
-					   CHIP_IS_OMAP3430ES1),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
-	},
-};
-
-static struct powerdomain wkup_omap2_pwrdm = {
-	.name		= "wkup_pwrdm",
-	.prcm_offs	= WKUP_MOD,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
-};
-
-#endif
-
-
-/* As powerdomains are added or removed above, this list must also be changed */
-static struct powerdomain *powerdomains_omap[] __initdata = {
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-	&wkup_omap2_pwrdm,
-	&gfx_omap2_pwrdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP2
-	&dsp_pwrdm,
-	&mpu_24xx_pwrdm,
-	&core_24xx_pwrdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP2430
-	&mdm_pwrdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP3
-	&iva2_pwrdm,
-	&mpu_3xxx_pwrdm,
-	&neon_pwrdm,
-	&core_3xxx_pre_es3_1_pwrdm,
-	&core_3xxx_es3_1_pwrdm,
-	&cam_pwrdm,
-	&dss_pwrdm,
-	&per_pwrdm,
-	&emu_pwrdm,
-	&sgx_pwrdm,
-	&usbhost_pwrdm,
-	&dpll1_pwrdm,
-	&dpll2_pwrdm,
-	&dpll3_pwrdm,
-	&dpll4_pwrdm,
-	&dpll5_pwrdm,
-#endif
-
-#ifdef CONFIG_ARCH_OMAP4
-	&core_44xx_pwrdm,
-	&gfx_44xx_pwrdm,
-	&abe_44xx_pwrdm,
-	&dss_44xx_pwrdm,
-	&tesla_44xx_pwrdm,
-	&wkup_44xx_pwrdm,
-	&cpu0_44xx_pwrdm,
-	&cpu1_44xx_pwrdm,
-	&emu_44xx_pwrdm,
-	&mpu_44xx_pwrdm,
-	&ivahd_44xx_pwrdm,
-	&cam_44xx_pwrdm,
-	&l3init_44xx_pwrdm,
-	&l4per_44xx_pwrdm,
-	&always_on_core_44xx_pwrdm,
-	&cefuse_44xx_pwrdm,
-#endif
-	NULL
-};
-
-
-#endif
diff --git a/arch/arm/mach-omap2/powerdomains24xx.h b/arch/arm/mach-omap2/powerdomains24xx.h
deleted file mode 100644
index 775093a..0000000
--- a/arch/arm/mach-omap2/powerdomains24xx.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * OMAP24XX powerdomain definitions
- *
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2009 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Debugging and integration fixes by Jouni Högander
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ARCH_ARM_MACH_OMAP2_POWERDOMAINS24XX
-#define ARCH_ARM_MACH_OMAP2_POWERDOMAINS24XX
-
-/*
- * N.B. If powerdomains are added or removed from this file, update
- * the array in mach-omap2/powerdomains.h.
- */
-
-#include <plat/powerdomain.h>
-
-#include "prcm-common.h"
-#include "prm.h"
-#include "prm-regbits-24xx.h"
-#include "cm.h"
-#include "cm-regbits-24xx.h"
-
-/* 24XX powerdomains and dependencies */
-
-#ifdef CONFIG_ARCH_OMAP2
-
-/* Powerdomains */
-
-static struct powerdomain dsp_pwrdm = {
-	.name		  = "dsp_pwrdm",
-	.prcm_offs	  = OMAP24XX_DSP_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET,
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,
-	},
-};
-
-static struct powerdomain mpu_24xx_pwrdm = {
-	.name		  = "mpu_pwrdm",
-	.prcm_offs	  = MPU_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET,
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,
-	},
-};
-
-static struct powerdomain core_24xx_pwrdm = {
-	.name		  = "core_pwrdm",
-	.prcm_offs	  = CORE_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.banks		  = 3,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRSTS_OFF_RET,	 /* MEM1RETSTATE */
-		[1] = PWRSTS_OFF_RET,	 /* MEM2RETSTATE */
-		[2] = PWRSTS_OFF_RET,	 /* MEM3RETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
-		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
-		[2] = PWRSTS_OFF_RET_ON, /* MEM3ONSTATE */
-	},
-};
-
-#endif	   /* CONFIG_ARCH_OMAP2 */
-
-
-
-/*
- * 2430-specific powerdomains
- */
-
-#ifdef CONFIG_ARCH_OMAP2430
-
-/* XXX 2430 KILLDOMAINWKUP bit?  No current users apparently */
-
-static struct powerdomain mdm_pwrdm = {
-	.name		  = "mdm_pwrdm",
-	.prcm_offs	  = OMAP2430_MDM_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
-	},
-};
-
-#endif     /* CONFIG_ARCH_OMAP2430 */
-
-
-#endif
diff --git a/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c
new file mode 100644
index 0000000..5b4dd97
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.c
@@ -0,0 +1,79 @@
+/*
+ * OMAP2/3 common powerdomain definitions
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Copyright (C) 2007-2010 Nokia Corporation
+ *
+ * Paul Walmsley, Jouni Högander
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * To Do List
+ * -> Move the Sleep/Wakeup dependencies from Power Domain framework to
+ *    Clock Domain Framework
+ */
+
+/*
+ * This file contains all of the powerdomains that have some element
+ * of software control for the OMAP24xx and OMAP34xx chips.
+ *
+ * This is not an exhaustive listing of powerdomains on the chips; only
+ * powerdomains that can be controlled in software.
+ */
+
+/*
+ * The names for the DSP/IVA2 powerdomains are confusing.
+ *
+ * Most OMAP chips have an on-board DSP.
+ *
+ * On the 2420, this is a 'C55 DSP called, simply, the DSP.  Its
+ * powerdomain is called the "DSP power domain."  On the 2430, the
+ * on-board DSP is a 'C64 DSP, now called (along with its hardware
+ * accelerators) the IVA2 or IVA2.1.  Its powerdomain is still called
+ * the "DSP power domain." On the 3430, the DSP is a 'C64 DSP like the
+ * 2430, also known as the IVA2; but its powerdomain is now called the
+ * "IVA2 power domain."
+ *
+ * The 2420 also has something called the IVA, which is a separate ARM
+ * core, and has nothing to do with the DSP/IVA2.
+ *
+ * Ideally the DSP/IVA2 could just be the same powerdomain, but the PRCM
+ * address offset is different between the C55 and C64 DSPs.
+ */
+
+#include "powerdomain.h"
+
+#include "prcm-common.h"
+#include "prm.h"
+
+/* OMAP2/3-common powerdomains */
+
+/*
+ * The GFX powerdomain is not present on 3430ES2, but currently we do not
+ * have a macro to filter it out at compile-time.
+ */
+struct powerdomain gfx_omap2_pwrdm = {
+	.name		  = "gfx_pwrdm",
+	.prcm_offs	  = GFX_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX |
+					   CHIP_IS_OMAP3430ES1),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
+	},
+};
+
+struct powerdomain wkup_omap2_pwrdm = {
+	.name		= "wkup_pwrdm",
+	.prcm_offs	= WKUP_MOD,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP24XX | CHIP_IS_OMAP3430),
+};
diff --git a/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.h b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.h
new file mode 100644
index 0000000..fa31166
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains2xxx_3xxx_data.h
@@ -0,0 +1,22 @@
+/*
+ * OMAP2/3 common powerdomains - prototypes
+ *
+ * Copyright (C) 2008 Texas Instruments, Inc.
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_POWERDOMAINS2XXX_3XXX_DATA_H
+#define __ARCH_ARM_MACH_OMAP2_POWERDOMAINS2XXX_3XXX_DATA_H
+
+#include "powerdomain.h"
+
+extern struct powerdomain gfx_omap2_pwrdm;
+extern struct powerdomain wkup_omap2_pwrdm;
+
+#endif
diff --git a/arch/arm/mach-omap2/powerdomains2xxx_data.c b/arch/arm/mach-omap2/powerdomains2xxx_data.c
new file mode 100644
index 0000000..9b1a335
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains2xxx_data.c
@@ -0,0 +1,123 @@
+/*
+ * OMAP2XXX powerdomain definitions
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Copyright (C) 2007-2010 Nokia Corporation
+ *
+ * Paul Walmsley, Jouni Högander
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "powerdomain.h"
+#include "powerdomains2xxx_3xxx_data.h"
+
+#include "prcm-common.h"
+#include "prm2xxx_3xxx.h"
+#include "prm-regbits-24xx.h"
+
+/* 24XX powerdomains and dependencies */
+
+/* Powerdomains */
+
+static struct powerdomain dsp_pwrdm = {
+	.name		  = "dsp_pwrdm",
+	.prcm_offs	  = OMAP24XX_DSP_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET,
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,
+	},
+};
+
+static struct powerdomain mpu_24xx_pwrdm = {
+	.name		  = "mpu_pwrdm",
+	.prcm_offs	  = MPU_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET,
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,
+	},
+};
+
+static struct powerdomain core_24xx_pwrdm = {
+	.name		  = "core_pwrdm",
+	.prcm_offs	  = CORE_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.banks		  = 3,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRSTS_OFF_RET,	 /* MEM1RETSTATE */
+		[1] = PWRSTS_OFF_RET,	 /* MEM2RETSTATE */
+		[2] = PWRSTS_OFF_RET,	 /* MEM3RETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
+		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
+		[2] = PWRSTS_OFF_RET_ON, /* MEM3ONSTATE */
+	},
+};
+
+
+/*
+ * 2430-specific powerdomains
+ */
+
+#ifdef CONFIG_ARCH_OMAP2430
+
+/* XXX 2430 KILLDOMAINWKUP bit?  No current users apparently */
+
+static struct powerdomain mdm_pwrdm = {
+	.name		  = "mdm_pwrdm",
+	.prcm_offs	  = OMAP2430_MDM_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
+	},
+};
+
+#endif     /* CONFIG_ARCH_OMAP2430 */
+
+/* As powerdomains are added or removed above, this list must also be changed */
+static struct powerdomain *powerdomains_omap2xxx[] __initdata = {
+
+	&wkup_omap2_pwrdm,
+	&gfx_omap2_pwrdm,
+
+#ifdef CONFIG_ARCH_OMAP2
+	&dsp_pwrdm,
+	&mpu_24xx_pwrdm,
+	&core_24xx_pwrdm,
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2430
+	&mdm_pwrdm,
+#endif
+	NULL
+};
+
+void __init omap2xxx_powerdomains_init(void)
+{
+	pwrdm_init(powerdomains_omap2xxx, &omap2_pwrdm_operations);
+}
diff --git a/arch/arm/mach-omap2/powerdomains34xx.h b/arch/arm/mach-omap2/powerdomains34xx.h
deleted file mode 100644
index fa90486..0000000
--- a/arch/arm/mach-omap2/powerdomains34xx.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * OMAP3 powerdomain definitions
- *
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2010 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Debugging and integration fixes by Jouni Högander
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ARCH_ARM_MACH_OMAP2_POWERDOMAINS34XX
-#define ARCH_ARM_MACH_OMAP2_POWERDOMAINS34XX
-
-/*
- * N.B. If powerdomains are added or removed from this file, update
- * the array in mach-omap2/powerdomains.h.
- */
-
-#include <plat/powerdomain.h>
-
-#include "prcm-common.h"
-#include "prm.h"
-#include "prm-regbits-34xx.h"
-#include "cm.h"
-#include "cm-regbits-34xx.h"
-
-/*
- * 34XX-specific powerdomains, dependencies
- */
-
-#ifdef CONFIG_ARCH_OMAP3
-
-/*
- * Powerdomains
- */
-
-static struct powerdomain iva2_pwrdm = {
-	.name		  = "iva2_pwrdm",
-	.prcm_offs	  = OMAP3430_IVA2_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 4,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRSTS_OFF_RET,
-		[1] = PWRSTS_OFF_RET,
-		[2] = PWRSTS_OFF_RET,
-		[3] = PWRSTS_OFF_RET,
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,
-		[1] = PWRDM_POWER_ON,
-		[2] = PWRSTS_OFF_ON,
-		[3] = PWRDM_POWER_ON,
-	},
-};
-
-static struct powerdomain mpu_3xxx_pwrdm = {
-	.name		  = "mpu_pwrdm",
-	.prcm_offs	  = MPU_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.flags		  = PWRDM_HAS_MPU_QUIRK,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRSTS_OFF_RET,
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRSTS_OFF_ON,
-	},
-};
-
-/*
- * The USBTLL Save-and-Restore mechanism is broken on
- * 3430s upto ES3.0 and 3630ES1.0. Hence this feature
- * needs to be disabled on these chips.
- * Refer: 3430 errata ID i459 and 3630 errata ID i579
- */
-static struct powerdomain core_3xxx_pre_es3_1_pwrdm = {
-	.name		  = "core_pwrdm",
-	.prcm_offs	  = CORE_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
-					   CHIP_IS_OMAP3430ES2 |
-					   CHIP_IS_OMAP3430ES3_0 |
-					   CHIP_IS_OMAP3630ES1),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 2,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRSTS_OFF_RET,	 /* MEM1RETSTATE */
-		[1] = PWRSTS_OFF_RET,	 /* MEM2RETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
-		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
-	},
-};
-
-static struct powerdomain core_3xxx_es3_1_pwrdm = {
-	.name		  = "core_pwrdm",
-	.prcm_offs	  = CORE_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES3_1 |
-					  CHIP_GE_OMAP3630ES1_1),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.flags		  = PWRDM_HAS_HDWR_SAR, /* for USBTLL only */
-	.banks		  = 2,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRSTS_OFF_RET,	 /* MEM1RETSTATE */
-		[1] = PWRSTS_OFF_RET,	 /* MEM2RETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
-		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
-	},
-};
-
-static struct powerdomain dss_pwrdm = {
-	.name		  = "dss_pwrdm",
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-	.prcm_offs	  = OMAP3430_DSS_MOD,
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
-	},
-};
-
-/*
- * Although the 34XX TRM Rev K Table 4-371 notes that retention is a
- * possible SGX powerstate, the SGX device itself does not support
- * retention.
- */
-static struct powerdomain sgx_pwrdm = {
-	.name		  = "sgx_pwrdm",
-	.prcm_offs	  = OMAP3430ES2_SGX_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
-	/* XXX This is accurate for 3430 SGX, but what about GFX? */
-	.pwrsts		  = PWRSTS_OFF_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
-	},
-};
-
-static struct powerdomain cam_pwrdm = {
-	.name		  = "cam_pwrdm",
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-	.prcm_offs	  = OMAP3430_CAM_MOD,
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
-	},
-};
-
-static struct powerdomain per_pwrdm = {
-	.name		  = "per_pwrdm",
-	.prcm_offs	  = OMAP3430_PER_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
-	},
-};
-
-static struct powerdomain emu_pwrdm = {
-	.name		= "emu_pwrdm",
-	.prcm_offs	= OMAP3430_EMU_MOD,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct powerdomain neon_pwrdm = {
-	.name		  = "neon_pwrdm",
-	.prcm_offs	  = OMAP3430_NEON_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-};
-
-static struct powerdomain usbhost_pwrdm = {
-	.name		  = "usbhost_pwrdm",
-	.prcm_offs	  = OMAP3430ES2_USBHOST_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_RET,
-	/*
-	 * REVISIT: Enabling usb host save and restore mechanism seems to
-	 * leave the usb host domain permanently in ACTIVE mode after
-	 * changing the usb host power domain state from OFF to active once.
-	 * Disabling for now.
-	 */
-	/*.flags	  = PWRDM_HAS_HDWR_SAR,*/ /* for USBHOST ctrlr only */
-	.banks		  = 1,
-	.pwrsts_mem_ret	  = {
-		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
-	},
-	.pwrsts_mem_on	  = {
-		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
-	},
-};
-
-static struct powerdomain dpll1_pwrdm = {
-	.name		= "dpll1_pwrdm",
-	.prcm_offs	= MPU_MOD,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct powerdomain dpll2_pwrdm = {
-	.name		= "dpll2_pwrdm",
-	.prcm_offs	= OMAP3430_IVA2_MOD,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct powerdomain dpll3_pwrdm = {
-	.name		= "dpll3_pwrdm",
-	.prcm_offs	= PLL_MOD,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct powerdomain dpll4_pwrdm = {
-	.name		= "dpll4_pwrdm",
-	.prcm_offs	= PLL_MOD,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
-};
-
-static struct powerdomain dpll5_pwrdm = {
-	.name		= "dpll5_pwrdm",
-	.prcm_offs	= PLL_MOD,
-	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
-};
-
-
-#endif    /* CONFIG_ARCH_OMAP3 */
-
-
-#endif
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
new file mode 100644
index 0000000..e1bec56
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -0,0 +1,287 @@
+/*
+ * OMAP3 powerdomain definitions
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Copyright (C) 2007-2010 Nokia Corporation
+ *
+ * Paul Walmsley, Jouni Högander
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "powerdomain.h"
+#include "powerdomains2xxx_3xxx_data.h"
+
+#include "prcm-common.h"
+#include "prm2xxx_3xxx.h"
+#include "prm-regbits-34xx.h"
+#include "cm2xxx_3xxx.h"
+#include "cm-regbits-34xx.h"
+
+/*
+ * 34XX-specific powerdomains, dependencies
+ */
+
+#ifdef CONFIG_ARCH_OMAP3
+
+/*
+ * Powerdomains
+ */
+
+static struct powerdomain iva2_pwrdm = {
+	.name		  = "iva2_pwrdm",
+	.prcm_offs	  = OMAP3430_IVA2_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 4,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRSTS_OFF_RET,
+		[1] = PWRSTS_OFF_RET,
+		[2] = PWRSTS_OFF_RET,
+		[3] = PWRSTS_OFF_RET,
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,
+		[1] = PWRDM_POWER_ON,
+		[2] = PWRSTS_OFF_ON,
+		[3] = PWRDM_POWER_ON,
+	},
+};
+
+static struct powerdomain mpu_3xxx_pwrdm = {
+	.name		  = "mpu_pwrdm",
+	.prcm_offs	  = MPU_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.flags		  = PWRDM_HAS_MPU_QUIRK,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRSTS_OFF_RET,
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRSTS_OFF_ON,
+	},
+};
+
+/*
+ * The USBTLL Save-and-Restore mechanism is broken on
+ * 3430s upto ES3.0 and 3630ES1.0. Hence this feature
+ * needs to be disabled on these chips.
+ * Refer: 3430 errata ID i459 and 3630 errata ID i579
+ *
+ * Note: setting the SAR flag could help for errata ID i478
+ *  which applies to 3430 <= ES3.1, but since the SAR feature
+ *  is broken, do not use it.
+ */
+static struct powerdomain core_3xxx_pre_es3_1_pwrdm = {
+	.name		  = "core_pwrdm",
+	.prcm_offs	  = CORE_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES1 |
+					   CHIP_IS_OMAP3430ES2 |
+					   CHIP_IS_OMAP3430ES3_0 |
+					   CHIP_IS_OMAP3630ES1),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 2,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRSTS_OFF_RET,	 /* MEM1RETSTATE */
+		[1] = PWRSTS_OFF_RET,	 /* MEM2RETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
+		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
+	},
+};
+
+static struct powerdomain core_3xxx_es3_1_pwrdm = {
+	.name		  = "core_pwrdm",
+	.prcm_offs	  = CORE_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430ES3_1 |
+					  CHIP_GE_OMAP3630ES1_1),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	/*
+	 * Setting the SAR flag for errata ID i478 which applies
+	 *  to 3430 <= ES3.1
+	 */
+	.flags		  = PWRDM_HAS_HDWR_SAR, /* for USBTLL only */
+	.banks		  = 2,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRSTS_OFF_RET,	 /* MEM1RETSTATE */
+		[1] = PWRSTS_OFF_RET,	 /* MEM2RETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
+		[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
+	},
+};
+
+static struct powerdomain dss_pwrdm = {
+	.name		  = "dss_pwrdm",
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+	.prcm_offs	  = OMAP3430_DSS_MOD,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
+	},
+};
+
+/*
+ * Although the 34XX TRM Rev K Table 4-371 notes that retention is a
+ * possible SGX powerstate, the SGX device itself does not support
+ * retention.
+ */
+static struct powerdomain sgx_pwrdm = {
+	.name		  = "sgx_pwrdm",
+	.prcm_offs	  = OMAP3430ES2_SGX_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
+	/* XXX This is accurate for 3430 SGX, but what about GFX? */
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
+	},
+};
+
+static struct powerdomain cam_pwrdm = {
+	.name		  = "cam_pwrdm",
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+	.prcm_offs	  = OMAP3430_CAM_MOD,
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
+	},
+};
+
+static struct powerdomain per_pwrdm = {
+	.name		  = "per_pwrdm",
+	.prcm_offs	  = OMAP3430_PER_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
+	},
+};
+
+static struct powerdomain emu_pwrdm = {
+	.name		= "emu_pwrdm",
+	.prcm_offs	= OMAP3430_EMU_MOD,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct powerdomain neon_pwrdm = {
+	.name		  = "neon_pwrdm",
+	.prcm_offs	  = OMAP3430_NEON_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+};
+
+static struct powerdomain usbhost_pwrdm = {
+	.name		  = "usbhost_pwrdm",
+	.prcm_offs	  = OMAP3430ES2_USBHOST_MOD,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_RET,
+	/*
+	 * REVISIT: Enabling usb host save and restore mechanism seems to
+	 * leave the usb host domain permanently in ACTIVE mode after
+	 * changing the usb host power domain state from OFF to active once.
+	 * Disabling for now.
+	 */
+	/*.flags	  = PWRDM_HAS_HDWR_SAR,*/ /* for USBHOST ctrlr only */
+	.banks		  = 1,
+	.pwrsts_mem_ret	  = {
+		[0] = PWRDM_POWER_RET, /* MEMRETSTATE */
+	},
+	.pwrsts_mem_on	  = {
+		[0] = PWRDM_POWER_ON,  /* MEMONSTATE */
+	},
+};
+
+static struct powerdomain dpll1_pwrdm = {
+	.name		= "dpll1_pwrdm",
+	.prcm_offs	= MPU_MOD,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct powerdomain dpll2_pwrdm = {
+	.name		= "dpll2_pwrdm",
+	.prcm_offs	= OMAP3430_IVA2_MOD,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct powerdomain dpll3_pwrdm = {
+	.name		= "dpll3_pwrdm",
+	.prcm_offs	= PLL_MOD,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct powerdomain dpll4_pwrdm = {
+	.name		= "dpll4_pwrdm",
+	.prcm_offs	= PLL_MOD,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_IS_OMAP3430),
+};
+
+static struct powerdomain dpll5_pwrdm = {
+	.name		= "dpll5_pwrdm",
+	.prcm_offs	= PLL_MOD,
+	.omap_chip	= OMAP_CHIP_INIT(CHIP_GE_OMAP3430ES2),
+};
+
+/* As powerdomains are added or removed above, this list must also be changed */
+static struct powerdomain *powerdomains_omap3xxx[] __initdata = {
+
+	&wkup_omap2_pwrdm,
+	&gfx_omap2_pwrdm,
+	&iva2_pwrdm,
+	&mpu_3xxx_pwrdm,
+	&neon_pwrdm,
+	&core_3xxx_pre_es3_1_pwrdm,
+	&core_3xxx_es3_1_pwrdm,
+	&cam_pwrdm,
+	&dss_pwrdm,
+	&per_pwrdm,
+	&emu_pwrdm,
+	&sgx_pwrdm,
+	&usbhost_pwrdm,
+	&dpll1_pwrdm,
+	&dpll2_pwrdm,
+	&dpll3_pwrdm,
+	&dpll4_pwrdm,
+	&dpll5_pwrdm,
+#endif
+	NULL
+};
+
+
+void __init omap3xxx_powerdomains_init(void)
+{
+	pwrdm_init(powerdomains_omap3xxx, &omap3_pwrdm_operations);
+}
diff --git a/arch/arm/mach-omap2/powerdomains44xx.h b/arch/arm/mach-omap2/powerdomains44xx.h
deleted file mode 100644
index 9c01b55..0000000
--- a/arch/arm/mach-omap2/powerdomains44xx.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * OMAP4 Power domains framework
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- * Copyright (C) 2009-2010 Nokia Corporation
- *
- * Abhijit Pagare (abhijitpagare@ti.com)
- * Benoit Cousson (b-cousson@ti.com)
- * Paul Walmsley (paul@pwsan.com)
- *
- * This file is automatically generated from the OMAP hardware databases.
- * We respectfully ask that any modifications to this file be coordinated
- * with the public linux-omap@vger.kernel.org mailing list and the
- * authors above to ensure that the autogeneration scripts are kept
- * up-to-date with the file contents.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_POWERDOMAINS44XX_H
-#define __ARCH_ARM_MACH_OMAP2_POWERDOMAINS44XX_H
-
-#include <plat/powerdomain.h>
-
-#include "prcm-common.h"
-#include "cm.h"
-#include "cm-regbits-44xx.h"
-#include "prm.h"
-#include "prm-regbits-44xx.h"
-
-#if defined(CONFIG_ARCH_OMAP4)
-
-/* core_44xx_pwrdm: CORE power domain */
-static struct powerdomain core_44xx_pwrdm = {
-	.name		  = "core_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_CORE_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 5,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* core_nret_bank */
-		[1] = PWRSTS_OFF_RET,	/* core_ocmram */
-		[2] = PWRDM_POWER_RET,	/* core_other_bank */
-		[3] = PWRSTS_OFF_RET,	/* ducati_l2ram */
-		[4] = PWRSTS_OFF_RET,	/* ducati_unicache */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* core_nret_bank */
-		[1] = PWRSTS_OFF_RET,	/* core_ocmram */
-		[2] = PWRDM_POWER_ON,	/* core_other_bank */
-		[3] = PWRDM_POWER_ON,	/* ducati_l2ram */
-		[4] = PWRDM_POWER_ON,	/* ducati_unicache */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* gfx_44xx_pwrdm: 3D accelerator power domain */
-static struct powerdomain gfx_44xx_pwrdm = {
-	.name		  = "gfx_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_GFX_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_ON,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* gfx_mem */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* gfx_mem */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* abe_44xx_pwrdm: Audio back end power domain */
-static struct powerdomain abe_44xx_pwrdm = {
-	.name		  = "abe_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_ABE_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_OFF,
-	.banks		  = 2,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_RET,	/* aessmem */
-		[1] = PWRDM_POWER_OFF,	/* periphmem */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* aessmem */
-		[1] = PWRDM_POWER_ON,	/* periphmem */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* dss_44xx_pwrdm: Display subsystem power domain */
-static struct powerdomain dss_44xx_pwrdm = {
-	.name		  = "dss_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_DSS_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* dss_mem */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* dss_mem */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* tesla_44xx_pwrdm: Tesla processor power domain */
-static struct powerdomain tesla_44xx_pwrdm = {
-	.name		  = "tesla_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_TESLA_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 3,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_RET,	/* tesla_edma */
-		[1] = PWRSTS_OFF_RET,	/* tesla_l1 */
-		[2] = PWRSTS_OFF_RET,	/* tesla_l2 */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* tesla_edma */
-		[1] = PWRDM_POWER_ON,	/* tesla_l1 */
-		[2] = PWRDM_POWER_ON,	/* tesla_l2 */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* wkup_44xx_pwrdm: Wake-up power domain */
-static struct powerdomain wkup_44xx_pwrdm = {
-	.name		  = "wkup_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_WKUP_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_ON,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* wkup_bank */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* wkup_bank */
-	},
-};
-
-/* cpu0_44xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
-static struct powerdomain cpu0_44xx_pwrdm = {
-	.name		  = "cpu0_pwrdm",
-	.prcm_offs	  = OMAP4430_PRCM_MPU_CPU0_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRSTS_OFF_RET,	/* cpu0_l1 */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* cpu0_l1 */
-	},
-};
-
-/* cpu1_44xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
-static struct powerdomain cpu1_44xx_pwrdm = {
-	.name		  = "cpu1_pwrdm",
-	.prcm_offs	  = OMAP4430_PRCM_MPU_CPU1_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRSTS_OFF_RET,	/* cpu1_l1 */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* cpu1_l1 */
-	},
-};
-
-/* emu_44xx_pwrdm: Emulation power domain */
-static struct powerdomain emu_44xx_pwrdm = {
-	.name		  = "emu_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_EMU_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_ON,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* emu_bank */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* emu_bank */
-	},
-};
-
-/* mpu_44xx_pwrdm: Modena processor and the Neon coprocessor power domain */
-static struct powerdomain mpu_44xx_pwrdm = {
-	.name		  = "mpu_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_MPU_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 3,
-	.pwrsts_mem_ret	= {
-		[0] = PWRSTS_OFF_RET,	/* mpu_l1 */
-		[1] = PWRSTS_OFF_RET,	/* mpu_l2 */
-		[2] = PWRDM_POWER_RET,	/* mpu_ram */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* mpu_l1 */
-		[1] = PWRDM_POWER_ON,	/* mpu_l2 */
-		[2] = PWRDM_POWER_ON,	/* mpu_ram */
-	},
-};
-
-/* ivahd_44xx_pwrdm: IVA-HD power domain */
-static struct powerdomain ivahd_44xx_pwrdm = {
-	.name		  = "ivahd_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_IVAHD_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRDM_POWER_OFF,
-	.banks		  = 4,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* hwa_mem */
-		[1] = PWRSTS_OFF_RET,	/* sl2_mem */
-		[2] = PWRSTS_OFF_RET,	/* tcm1_mem */
-		[3] = PWRSTS_OFF_RET,	/* tcm2_mem */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* hwa_mem */
-		[1] = PWRDM_POWER_ON,	/* sl2_mem */
-		[2] = PWRDM_POWER_ON,	/* tcm1_mem */
-		[3] = PWRDM_POWER_ON,	/* tcm2_mem */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* cam_44xx_pwrdm: Camera subsystem power domain */
-static struct powerdomain cam_44xx_pwrdm = {
-	.name		  = "cam_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_CAM_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_ON,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* cam_mem */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* cam_mem */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* l3init_44xx_pwrdm: L3 initators pheripherals power domain  */
-static struct powerdomain l3init_44xx_pwrdm = {
-	.name		  = "l3init_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_L3INIT_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 1,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* l3init_bank1 */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* l3init_bank1 */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/* l4per_44xx_pwrdm: Target peripherals power domain */
-static struct powerdomain l4per_44xx_pwrdm = {
-	.name		  = "l4per_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_L4PER_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_RET_ON,
-	.pwrsts_logic_ret = PWRSTS_OFF_RET,
-	.banks		  = 2,
-	.pwrsts_mem_ret	= {
-		[0] = PWRDM_POWER_OFF,	/* nonretained_bank */
-		[1] = PWRDM_POWER_RET,	/* retained_bank */
-	},
-	.pwrsts_mem_on	= {
-		[0] = PWRDM_POWER_ON,	/* nonretained_bank */
-		[1] = PWRDM_POWER_ON,	/* retained_bank */
-	},
-	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
-};
-
-/*
- * always_on_core_44xx_pwrdm: Always ON logic that sits in VDD_CORE voltage
- * domain
- */
-static struct powerdomain always_on_core_44xx_pwrdm = {
-	.name		  = "always_on_core_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_ALWAYS_ON_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_ON,
-};
-
-/* cefuse_44xx_pwrdm: Customer efuse controller power domain */
-static struct powerdomain cefuse_44xx_pwrdm = {
-	.name		  = "cefuse_pwrdm",
-	.prcm_offs	  = OMAP4430_PRM_CEFUSE_MOD,
-	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
-	.pwrsts		  = PWRSTS_OFF_ON,
-};
-
-/*
- * The following power domains are not under SW control
- *
- * always_on_iva
- * always_on_mpu
- * stdefuse
- */
-
-#endif
-
-#endif
diff --git a/arch/arm/mach-omap2/powerdomains44xx_data.c b/arch/arm/mach-omap2/powerdomains44xx_data.c
new file mode 100644
index 0000000..26d7641
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains44xx_data.c
@@ -0,0 +1,355 @@
+/*
+ * OMAP4 Power domains framework
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Abhijit Pagare (abhijitpagare@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ * Paul Walmsley (paul@pwsan.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "powerdomain.h"
+
+#include "prcm-common.h"
+#include "prcm44xx.h"
+#include "prm-regbits-44xx.h"
+#include "prm44xx.h"
+#include "prcm_mpu44xx.h"
+
+/* core_44xx_pwrdm: CORE power domain */
+static struct powerdomain core_44xx_pwrdm = {
+	.name		  = "core_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_CORE_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 5,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* core_nret_bank */
+		[1] = PWRSTS_OFF_RET,	/* core_ocmram */
+		[2] = PWRDM_POWER_RET,	/* core_other_bank */
+		[3] = PWRSTS_OFF_RET,	/* ducati_l2ram */
+		[4] = PWRSTS_OFF_RET,	/* ducati_unicache */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* core_nret_bank */
+		[1] = PWRSTS_OFF_RET,	/* core_ocmram */
+		[2] = PWRDM_POWER_ON,	/* core_other_bank */
+		[3] = PWRDM_POWER_ON,	/* ducati_l2ram */
+		[4] = PWRDM_POWER_ON,	/* ducati_unicache */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* gfx_44xx_pwrdm: 3D accelerator power domain */
+static struct powerdomain gfx_44xx_pwrdm = {
+	.name		  = "gfx_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_GFX_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* gfx_mem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* gfx_mem */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* abe_44xx_pwrdm: Audio back end power domain */
+static struct powerdomain abe_44xx_pwrdm = {
+	.name		  = "abe_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_ABE_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_OFF,
+	.banks		  = 2,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_RET,	/* aessmem */
+		[1] = PWRDM_POWER_OFF,	/* periphmem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* aessmem */
+		[1] = PWRDM_POWER_ON,	/* periphmem */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* dss_44xx_pwrdm: Display subsystem power domain */
+static struct powerdomain dss_44xx_pwrdm = {
+	.name		  = "dss_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_DSS_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* dss_mem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* dss_mem */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* tesla_44xx_pwrdm: Tesla processor power domain */
+static struct powerdomain tesla_44xx_pwrdm = {
+	.name		  = "tesla_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_TESLA_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 3,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_RET,	/* tesla_edma */
+		[1] = PWRSTS_OFF_RET,	/* tesla_l1 */
+		[2] = PWRSTS_OFF_RET,	/* tesla_l2 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* tesla_edma */
+		[1] = PWRDM_POWER_ON,	/* tesla_l1 */
+		[2] = PWRDM_POWER_ON,	/* tesla_l2 */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* wkup_44xx_pwrdm: Wake-up power domain */
+static struct powerdomain wkup_44xx_pwrdm = {
+	.name		  = "wkup_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_WKUP_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* wkup_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* wkup_bank */
+	},
+};
+
+/* cpu0_44xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
+static struct powerdomain cpu0_44xx_pwrdm = {
+	.name		  = "cpu0_pwrdm",
+	.prcm_offs	  = OMAP4430_PRCM_MPU_CPU0_INST,
+	.prcm_partition	  = OMAP4430_PRCM_MPU_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* cpu0_l1 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* cpu0_l1 */
+	},
+};
+
+/* cpu1_44xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
+static struct powerdomain cpu1_44xx_pwrdm = {
+	.name		  = "cpu1_pwrdm",
+	.prcm_offs	  = OMAP4430_PRCM_MPU_CPU1_INST,
+	.prcm_partition	  = OMAP4430_PRCM_MPU_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* cpu1_l1 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* cpu1_l1 */
+	},
+};
+
+/* emu_44xx_pwrdm: Emulation power domain */
+static struct powerdomain emu_44xx_pwrdm = {
+	.name		  = "emu_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_EMU_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* emu_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* emu_bank */
+	},
+};
+
+/* mpu_44xx_pwrdm: Modena processor and the Neon coprocessor power domain */
+static struct powerdomain mpu_44xx_pwrdm = {
+	.name		  = "mpu_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_MPU_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 3,
+	.pwrsts_mem_ret	= {
+		[0] = PWRSTS_OFF_RET,	/* mpu_l1 */
+		[1] = PWRSTS_OFF_RET,	/* mpu_l2 */
+		[2] = PWRDM_POWER_RET,	/* mpu_ram */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* mpu_l1 */
+		[1] = PWRDM_POWER_ON,	/* mpu_l2 */
+		[2] = PWRDM_POWER_ON,	/* mpu_ram */
+	},
+};
+
+/* ivahd_44xx_pwrdm: IVA-HD power domain */
+static struct powerdomain ivahd_44xx_pwrdm = {
+	.name		  = "ivahd_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_IVAHD_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_RET_ON,
+	.pwrsts_logic_ret = PWRDM_POWER_OFF,
+	.banks		  = 4,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* hwa_mem */
+		[1] = PWRSTS_OFF_RET,	/* sl2_mem */
+		[2] = PWRSTS_OFF_RET,	/* tcm1_mem */
+		[3] = PWRSTS_OFF_RET,	/* tcm2_mem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* hwa_mem */
+		[1] = PWRDM_POWER_ON,	/* sl2_mem */
+		[2] = PWRDM_POWER_ON,	/* tcm1_mem */
+		[3] = PWRDM_POWER_ON,	/* tcm2_mem */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* cam_44xx_pwrdm: Camera subsystem power domain */
+static struct powerdomain cam_44xx_pwrdm = {
+	.name		  = "cam_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_CAM_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_ON,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* cam_mem */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* cam_mem */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* l3init_44xx_pwrdm: L3 initators pheripherals power domain  */
+static struct powerdomain l3init_44xx_pwrdm = {
+	.name		  = "l3init_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_L3INIT_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 1,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* l3init_bank1 */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* l3init_bank1 */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/* l4per_44xx_pwrdm: Target peripherals power domain */
+static struct powerdomain l4per_44xx_pwrdm = {
+	.name		  = "l4per_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_L4PER_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_RET_ON,
+	.pwrsts_logic_ret = PWRSTS_OFF_RET,
+	.banks		  = 2,
+	.pwrsts_mem_ret	= {
+		[0] = PWRDM_POWER_OFF,	/* nonretained_bank */
+		[1] = PWRDM_POWER_RET,	/* retained_bank */
+	},
+	.pwrsts_mem_on	= {
+		[0] = PWRDM_POWER_ON,	/* nonretained_bank */
+		[1] = PWRDM_POWER_ON,	/* retained_bank */
+	},
+	.flags		= PWRDM_HAS_LOWPOWERSTATECHANGE,
+};
+
+/*
+ * always_on_core_44xx_pwrdm: Always ON logic that sits in VDD_CORE voltage
+ * domain
+ */
+static struct powerdomain always_on_core_44xx_pwrdm = {
+	.name		  = "always_on_core_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_ALWAYS_ON_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_ON,
+};
+
+/* cefuse_44xx_pwrdm: Customer efuse controller power domain */
+static struct powerdomain cefuse_44xx_pwrdm = {
+	.name		  = "cefuse_pwrdm",
+	.prcm_offs	  = OMAP4430_PRM_CEFUSE_INST,
+	.prcm_partition	  = OMAP4430_PRM_PARTITION,
+	.omap_chip	  = OMAP_CHIP_INIT(CHIP_IS_OMAP4430),
+	.pwrsts		  = PWRSTS_OFF_ON,
+};
+
+/*
+ * The following power domains are not under SW control
+ *
+ * always_on_iva
+ * always_on_mpu
+ * stdefuse
+ */
+
+/* As powerdomains are added or removed above, this list must also be changed */
+static struct powerdomain *powerdomains_omap44xx[] __initdata = {
+	&core_44xx_pwrdm,
+	&gfx_44xx_pwrdm,
+	&abe_44xx_pwrdm,
+	&dss_44xx_pwrdm,
+	&tesla_44xx_pwrdm,
+	&wkup_44xx_pwrdm,
+	&cpu0_44xx_pwrdm,
+	&cpu1_44xx_pwrdm,
+	&emu_44xx_pwrdm,
+	&mpu_44xx_pwrdm,
+	&ivahd_44xx_pwrdm,
+	&cam_44xx_pwrdm,
+	&l3init_44xx_pwrdm,
+	&l4per_44xx_pwrdm,
+	&always_on_core_44xx_pwrdm,
+	&cefuse_44xx_pwrdm,
+	NULL
+};
+
+void __init omap44xx_powerdomains_init(void)
+{
+	pwrdm_init(powerdomains_omap44xx, &omap4_pwrdm_operations);
+}
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index f81acee..87486f5 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -8,15 +8,12 @@
  * Copyright (C) 2007-2009 Nokia Corporation
  *
  * Written by Paul Walmsley
- * OMAP4 defines in this file are automatically generated from the OMAP hardware
- * databases.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
 
-
 /* Module offsets from both CM_BASE & PRM_BASE */
 
 /*
@@ -51,75 +48,6 @@
 #define OMAP3430_NEON_MOD				0xb00
 #define OMAP3430ES2_USBHOST_MOD				0xc00
 
-#define BITS(n_bit)	\
-	(((1 << n_bit) - 1) | (1 << n_bit))
-
-#define BITFIELD(l_bit, u_bit)	\
-	(BITS(u_bit) & ~((BITS(l_bit)) >> 1))
-
-/* OMAP44XX specific module offsets */
-
-/* CM1 instances */
-
-#define OMAP4430_CM1_OCP_SOCKET_MOD	0x0000
-#define OMAP4430_CM1_CKGEN_MOD		0x0100
-#define OMAP4430_CM1_MPU_MOD		0x0300
-#define OMAP4430_CM1_TESLA_MOD		0x0400
-#define OMAP4430_CM1_ABE_MOD		0x0500
-#define OMAP4430_CM1_RESTORE_MOD	0x0e00
-#define OMAP4430_CM1_INSTR_MOD		0x0f00
-
-/* CM2 instances */
-
-#define OMAP4430_CM2_OCP_SOCKET_MOD	0x0000
-#define OMAP4430_CM2_CKGEN_MOD		0x0100
-#define OMAP4430_CM2_ALWAYS_ON_MOD	0x0600
-#define OMAP4430_CM2_CORE_MOD		0x0700
-#define OMAP4430_CM2_IVAHD_MOD		0x0f00
-#define OMAP4430_CM2_CAM_MOD		0x1000
-#define OMAP4430_CM2_DSS_MOD		0x1100
-#define OMAP4430_CM2_GFX_MOD		0x1200
-#define OMAP4430_CM2_L3INIT_MOD		0x1300
-#define OMAP4430_CM2_L4PER_MOD		0x1400
-#define OMAP4430_CM2_CEFUSE_MOD		0x1600
-#define OMAP4430_CM2_RESTORE_MOD	0x1e00
-#define OMAP4430_CM2_INSTR_MOD		0x1f00
-
-/* PRM instances */
-
-#define OMAP4430_PRM_OCP_SOCKET_MOD	0x0000
-#define OMAP4430_PRM_CKGEN_MOD		0x0100
-#define OMAP4430_PRM_MPU_MOD		0x0300
-#define OMAP4430_PRM_TESLA_MOD		0x0400
-#define OMAP4430_PRM_ABE_MOD		0x0500
-#define OMAP4430_PRM_ALWAYS_ON_MOD	0x0600
-#define OMAP4430_PRM_CORE_MOD		0x0700
-#define OMAP4430_PRM_IVAHD_MOD		0x0f00
-#define OMAP4430_PRM_CAM_MOD		0x1000
-#define OMAP4430_PRM_DSS_MOD		0x1100
-#define OMAP4430_PRM_GFX_MOD		0x1200
-#define OMAP4430_PRM_L3INIT_MOD		0x1300
-#define OMAP4430_PRM_L4PER_MOD		0x1400
-#define OMAP4430_PRM_CEFUSE_MOD		0x1600
-#define OMAP4430_PRM_WKUP_MOD		0x1700
-#define OMAP4430_PRM_WKUP_CM_MOD	0x1800
-#define OMAP4430_PRM_EMU_MOD		0x1900
-#define OMAP4430_PRM_EMU_CM_MOD		0x1a00
-#define OMAP4430_PRM_DEVICE_MOD		0x1b00
-#define OMAP4430_PRM_INSTR_MOD		0x1f00
-
-/* SCRM instances */
-
-#define OMAP4430_SCRM_SCRM_MOD	0x0000
-
-/* PRCM_MPU instances */
-
-#define OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_MOD	0x0000
-#define OMAP4430_PRCM_MPU_DEVICE_PRM_MOD	0x0200
-#define OMAP4430_PRCM_MPU_CPU0_MOD		0x0400
-#define OMAP4430_PRCM_MPU_CPU1_MOD		0x0800
-
-
 /* 24XX register bits shared between CM & PRM registers */
 
 /* CM_FCLKEN1_CORE, CM_ICLKEN1_CORE, PM_WKEN1_CORE shared bits */
@@ -461,5 +389,18 @@
 #define OMAP3430_EN_CORE_SHIFT				0
 #define OMAP3430_EN_CORE_MASK				(1 << 0)
 
+
+/*
+ * MAX_MODULE_HARDRESET_WAIT: Maximum microseconds to wait for an OMAP
+ * submodule to exit hardreset
+ */
+#define MAX_MODULE_HARDRESET_WAIT		10000
+
+# ifndef __ASSEMBLER__
+extern void __iomem *prm_base;
+extern void __iomem *cm_base;
+extern void __iomem *cm2_base;
+# endif
+
 #endif
 
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index a51846e..679bcd2 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -17,7 +17,8 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/module.h>
+
+#include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/io.h>
@@ -29,105 +30,27 @@
 
 #include "clock.h"
 #include "clock2xxx.h"
-#include "cm.h"
-#include "prm.h"
+#include "cm2xxx_3xxx.h"
+#include "prm2xxx_3xxx.h"
+#include "prm44xx.h"
+#include "prminst44xx.h"
 #include "prm-regbits-24xx.h"
 #include "prm-regbits-44xx.h"
 #include "control.h"
 
-static void __iomem *prm_base;
-static void __iomem *cm_base;
-static void __iomem *cm2_base;
+void __iomem *prm_base;
+void __iomem *cm_base;
+void __iomem *cm2_base;
 
 #define MAX_MODULE_ENABLE_WAIT		100000
 
-struct omap3_prcm_regs {
-	u32 control_padconf_sys_nirq;
-	u32 iva2_cm_clksel1;
-	u32 iva2_cm_clksel2;
-	u32 cm_sysconfig;
-	u32 sgx_cm_clksel;
-	u32 dss_cm_clksel;
-	u32 cam_cm_clksel;
-	u32 per_cm_clksel;
-	u32 emu_cm_clksel;
-	u32 emu_cm_clkstctrl;
-	u32 pll_cm_autoidle2;
-	u32 pll_cm_clksel4;
-	u32 pll_cm_clksel5;
-	u32 pll_cm_clken2;
-	u32 cm_polctrl;
-	u32 iva2_cm_fclken;
-	u32 iva2_cm_clken_pll;
-	u32 core_cm_fclken1;
-	u32 core_cm_fclken3;
-	u32 sgx_cm_fclken;
-	u32 wkup_cm_fclken;
-	u32 dss_cm_fclken;
-	u32 cam_cm_fclken;
-	u32 per_cm_fclken;
-	u32 usbhost_cm_fclken;
-	u32 core_cm_iclken1;
-	u32 core_cm_iclken2;
-	u32 core_cm_iclken3;
-	u32 sgx_cm_iclken;
-	u32 wkup_cm_iclken;
-	u32 dss_cm_iclken;
-	u32 cam_cm_iclken;
-	u32 per_cm_iclken;
-	u32 usbhost_cm_iclken;
-	u32 iva2_cm_autiidle2;
-	u32 mpu_cm_autoidle2;
-	u32 iva2_cm_clkstctrl;
-	u32 mpu_cm_clkstctrl;
-	u32 core_cm_clkstctrl;
-	u32 sgx_cm_clkstctrl;
-	u32 dss_cm_clkstctrl;
-	u32 cam_cm_clkstctrl;
-	u32 per_cm_clkstctrl;
-	u32 neon_cm_clkstctrl;
-	u32 usbhost_cm_clkstctrl;
-	u32 core_cm_autoidle1;
-	u32 core_cm_autoidle2;
-	u32 core_cm_autoidle3;
-	u32 wkup_cm_autoidle;
-	u32 dss_cm_autoidle;
-	u32 cam_cm_autoidle;
-	u32 per_cm_autoidle;
-	u32 usbhost_cm_autoidle;
-	u32 sgx_cm_sleepdep;
-	u32 dss_cm_sleepdep;
-	u32 cam_cm_sleepdep;
-	u32 per_cm_sleepdep;
-	u32 usbhost_cm_sleepdep;
-	u32 cm_clkout_ctrl;
-	u32 prm_clkout_ctrl;
-	u32 sgx_pm_wkdep;
-	u32 dss_pm_wkdep;
-	u32 cam_pm_wkdep;
-	u32 per_pm_wkdep;
-	u32 neon_pm_wkdep;
-	u32 usbhost_pm_wkdep;
-	u32 core_pm_mpugrpsel1;
-	u32 iva2_pm_ivagrpsel1;
-	u32 core_pm_mpugrpsel3;
-	u32 core_pm_ivagrpsel3;
-	u32 wkup_pm_mpugrpsel;
-	u32 wkup_pm_ivagrpsel;
-	u32 per_pm_mpugrpsel;
-	u32 per_pm_ivagrpsel;
-	u32 wkup_pm_wken;
-};
-
-static struct omap3_prcm_regs prcm_context;
-
 u32 omap_prcm_get_reset_sources(void)
 {
 	/* XXX This presumably needs modification for 34XX */
 	if (cpu_is_omap24xx() || cpu_is_omap34xx())
-		return prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST) & 0x7f;
+		return omap2_prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST) & 0x7f;
 	if (cpu_is_omap44xx())
-		return prm_read_mod_reg(WKUP_MOD, OMAP4_RM_RSTST) & 0x7f;
+		return omap2_prm_read_mod_reg(WKUP_MOD, OMAP4_RM_RSTST) & 0x7f;
 
 	return 0;
 }
@@ -143,126 +66,46 @@
 
 		prcm_offs = WKUP_MOD;
 	} else if (cpu_is_omap34xx()) {
-		u32 l;
-
 		prcm_offs = OMAP3430_GR_MOD;
-		l = ('B' << 24) | ('M' << 16) | (cmd ? (u8)*cmd : 0);
-		/* Reserve the first word in scratchpad for communicating
-		 * with the boot ROM. A pointer to a data structure
-		 * describing the boot process can be stored there,
-		 * cf. OMAP34xx TRM, Initialization / Software Booting
-		 * Configuration. */
-		omap_writel(l, OMAP343X_SCRATCHPAD + 4);
-	} else if (cpu_is_omap44xx())
-		prcm_offs = OMAP4430_PRM_DEVICE_MOD;
-	else
+		omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0));
+	} else if (cpu_is_omap44xx()) {
+		omap4_prm_global_warm_sw_reset(); /* never returns */
+	} else {
 		WARN_ON(1);
+	}
 
-	if (cpu_is_omap24xx() || cpu_is_omap34xx())
-		prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
-						 OMAP2_RM_RSTCTRL);
-	if (cpu_is_omap44xx())
-		prm_set_mod_reg_bits(OMAP4430_RST_GLOBAL_WARM_SW_MASK,
-				     prcm_offs, OMAP4_RM_RSTCTRL);
-}
+	/*
+	 * As per Errata i520, in some cases, user will not be able to
+	 * access DDR memory after warm-reset.
+	 * This situation occurs while the warm-reset happens during a read
+	 * access to DDR memory. In that particular condition, DDR memory
+	 * does not respond to a corrupted read command due to the warm
+	 * reset occurrence but SDRC is waiting for read completion.
+	 * SDRC is not sensitive to the warm reset, but the interconnect is
+	 * reset on the fly, thus causing a misalignment between SDRC logic,
+	 * interconnect logic and DDR memory state.
+	 * WORKAROUND:
+	 * Steps to perform before a Warm reset is trigged:
+	 * 1. enable self-refresh on idle request
+	 * 2. put SDRC in idle
+	 * 3. wait until SDRC goes to idle
+	 * 4. generate SW reset (Global SW reset)
+	 *
+	 * Steps to be performed after warm reset occurs (in bootloader):
+	 * if HW warm reset is the source, apply below steps before any
+	 * accesses to SDRAM:
+	 * 1. Reset SMS and SDRC and wait till reset is complete
+	 * 2. Re-initialize SMS, SDRC and memory
+	 *
+	 * NOTE: Above work around is required only if arch reset is implemented
+	 * using Global SW reset(GLOBAL_SW_RST). DPLL3 reset does not need
+	 * the WA since it resets SDRC as well as part of cold reset.
+	 */
 
-static inline u32 __omap_prcm_read(void __iomem *base, s16 module, u16 reg)
-{
-	BUG_ON(!base);
-	return __raw_readl(base + module + reg);
-}
-
-static inline void __omap_prcm_write(u32 value, void __iomem *base,
-						s16 module, u16 reg)
-{
-	BUG_ON(!base);
-	__raw_writel(value, base + module + reg);
-}
-
-/* Read a register in a PRM module */
-u32 prm_read_mod_reg(s16 module, u16 idx)
-{
-	return __omap_prcm_read(prm_base, module, idx);
-}
-
-/* Write into a register in a PRM module */
-void prm_write_mod_reg(u32 val, s16 module, u16 idx)
-{
-	__omap_prcm_write(val, prm_base, module, idx);
-}
-
-/* Read-modify-write a register in a PRM module. Caller must lock */
-u32 prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
-{
-	u32 v;
-
-	v = prm_read_mod_reg(module, idx);
-	v &= ~mask;
-	v |= bits;
-	prm_write_mod_reg(v, module, idx);
-
-	return v;
-}
-
-/* Read a PRM register, AND it, and shift the result down to bit 0 */
-u32 prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
-{
-	u32 v;
-
-	v = prm_read_mod_reg(domain, idx);
-	v &= mask;
-	v >>= __ffs(mask);
-
-	return v;
-}
-
-/* Read a PRM register, AND it, and shift the result down to bit 0 */
-u32 omap4_prm_read_bits_shift(void __iomem *reg, u32 mask)
-{
-	u32 v;
-
-	v = __raw_readl(reg);
-	v &= mask;
-	v >>= __ffs(mask);
-
-	return v;
-}
-
-/* Read-modify-write a register in a PRM module. Caller must lock */
-u32 omap4_prm_rmw_reg_bits(u32 mask, u32 bits, void __iomem *reg)
-{
-	u32 v;
-
-	v = __raw_readl(reg);
-	v &= ~mask;
-	v |= bits;
-	__raw_writel(v, reg);
-
-	return v;
-}
-/* Read a register in a CM module */
-u32 cm_read_mod_reg(s16 module, u16 idx)
-{
-	return __omap_prcm_read(cm_base, module, idx);
-}
-
-/* Write into a register in a CM module */
-void cm_write_mod_reg(u32 val, s16 module, u16 idx)
-{
-	__omap_prcm_write(val, cm_base, module, idx);
-}
-
-/* Read-modify-write a register in a CM module. Caller must lock */
-u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
-{
-	u32 v;
-
-	v = cm_read_mod_reg(module, idx);
-	v &= ~mask;
-	v |= bits;
-	cm_write_mod_reg(v, module, idx);
-
-	return v;
+	/* XXX should be moved to some OMAP2/3 specific code */
+	omap2_prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs,
+				   OMAP2_RM_RSTCTRL);
+	omap2_prm_read_mod_reg(prcm_offs, OMAP2_RM_RSTCTRL); /* OCP barrier */
 }
 
 /**
@@ -274,6 +117,9 @@
  *
  * Returns 1 if the module indicated readiness in time, or 0 if it
  * failed to enable in roughly MAX_MODULE_ENABLE_WAIT microseconds.
+ *
+ * XXX This function is deprecated.  It should be removed once the
+ * hwmod conversion is complete.
  */
 int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, u8 idlest,
 				const char *name)
@@ -316,303 +162,3 @@
 		WARN_ON(!cm2_base);
 	}
 }
-
-#ifdef CONFIG_ARCH_OMAP3
-void omap3_prcm_save_context(void)
-{
-	prcm_context.control_padconf_sys_nirq =
-			 omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_SYSNIRQ);
-	prcm_context.iva2_cm_clksel1 =
-			 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL1);
-	prcm_context.iva2_cm_clksel2 =
-			 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_CLKSEL2);
-	prcm_context.cm_sysconfig = __raw_readl(OMAP3430_CM_SYSCONFIG);
-	prcm_context.sgx_cm_clksel =
-			 cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_CLKSEL);
-	prcm_context.dss_cm_clksel =
-			 cm_read_mod_reg(OMAP3430_DSS_MOD, CM_CLKSEL);
-	prcm_context.cam_cm_clksel =
-			 cm_read_mod_reg(OMAP3430_CAM_MOD, CM_CLKSEL);
-	prcm_context.per_cm_clksel =
-			 cm_read_mod_reg(OMAP3430_PER_MOD, CM_CLKSEL);
-	prcm_context.emu_cm_clksel =
-			 cm_read_mod_reg(OMAP3430_EMU_MOD, CM_CLKSEL1);
-	prcm_context.emu_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430_EMU_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.pll_cm_autoidle2 =
-			 cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE2);
-	prcm_context.pll_cm_clksel4 =
-			cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL4);
-	prcm_context.pll_cm_clksel5 =
-			 cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKSEL5);
-	prcm_context.pll_cm_clken2 =
-			cm_read_mod_reg(PLL_MOD, OMAP3430ES2_CM_CLKEN2);
-	prcm_context.cm_polctrl = __raw_readl(OMAP3430_CM_POLCTRL);
-	prcm_context.iva2_cm_fclken =
-			 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_FCLKEN);
-	prcm_context.iva2_cm_clken_pll = cm_read_mod_reg(OMAP3430_IVA2_MOD,
-			OMAP3430_CM_CLKEN_PLL);
-	prcm_context.core_cm_fclken1 =
-			 cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
-	prcm_context.core_cm_fclken3 =
-			 cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
-	prcm_context.sgx_cm_fclken =
-			 cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_FCLKEN);
-	prcm_context.wkup_cm_fclken =
-			 cm_read_mod_reg(WKUP_MOD, CM_FCLKEN);
-	prcm_context.dss_cm_fclken =
-			 cm_read_mod_reg(OMAP3430_DSS_MOD, CM_FCLKEN);
-	prcm_context.cam_cm_fclken =
-			 cm_read_mod_reg(OMAP3430_CAM_MOD, CM_FCLKEN);
-	prcm_context.per_cm_fclken =
-			 cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN);
-	prcm_context.usbhost_cm_fclken =
-			 cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN);
-	prcm_context.core_cm_iclken1 =
-			 cm_read_mod_reg(CORE_MOD, CM_ICLKEN1);
-	prcm_context.core_cm_iclken2 =
-			 cm_read_mod_reg(CORE_MOD, CM_ICLKEN2);
-	prcm_context.core_cm_iclken3 =
-			 cm_read_mod_reg(CORE_MOD, CM_ICLKEN3);
-	prcm_context.sgx_cm_iclken =
-			 cm_read_mod_reg(OMAP3430ES2_SGX_MOD, CM_ICLKEN);
-	prcm_context.wkup_cm_iclken =
-			 cm_read_mod_reg(WKUP_MOD, CM_ICLKEN);
-	prcm_context.dss_cm_iclken =
-			 cm_read_mod_reg(OMAP3430_DSS_MOD, CM_ICLKEN);
-	prcm_context.cam_cm_iclken =
-			 cm_read_mod_reg(OMAP3430_CAM_MOD, CM_ICLKEN);
-	prcm_context.per_cm_iclken =
-			 cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN);
-	prcm_context.usbhost_cm_iclken =
-			 cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN);
-	prcm_context.iva2_cm_autiidle2 =
-			 cm_read_mod_reg(OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
-	prcm_context.mpu_cm_autoidle2 =
-			 cm_read_mod_reg(MPU_MOD, CM_AUTOIDLE2);
-	prcm_context.iva2_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.mpu_cm_clkstctrl =
-			 cm_read_mod_reg(MPU_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.core_cm_clkstctrl =
-			 cm_read_mod_reg(CORE_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.sgx_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430ES2_SGX_MOD,
-						OMAP2_CM_CLKSTCTRL);
-	prcm_context.dss_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.cam_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.per_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430_PER_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.neon_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430_NEON_MOD, OMAP2_CM_CLKSTCTRL);
-	prcm_context.usbhost_cm_clkstctrl =
-			 cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
-						OMAP2_CM_CLKSTCTRL);
-	prcm_context.core_cm_autoidle1 =
-			 cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE1);
-	prcm_context.core_cm_autoidle2 =
-			 cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE2);
-	prcm_context.core_cm_autoidle3 =
-			 cm_read_mod_reg(CORE_MOD, CM_AUTOIDLE3);
-	prcm_context.wkup_cm_autoidle =
-			 cm_read_mod_reg(WKUP_MOD, CM_AUTOIDLE);
-	prcm_context.dss_cm_autoidle =
-			 cm_read_mod_reg(OMAP3430_DSS_MOD, CM_AUTOIDLE);
-	prcm_context.cam_cm_autoidle =
-			 cm_read_mod_reg(OMAP3430_CAM_MOD, CM_AUTOIDLE);
-	prcm_context.per_cm_autoidle =
-			 cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
-	prcm_context.usbhost_cm_autoidle =
-			 cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE);
-	prcm_context.sgx_cm_sleepdep =
-		 cm_read_mod_reg(OMAP3430ES2_SGX_MOD, OMAP3430_CM_SLEEPDEP);
-	prcm_context.dss_cm_sleepdep =
-		 cm_read_mod_reg(OMAP3430_DSS_MOD, OMAP3430_CM_SLEEPDEP);
-	prcm_context.cam_cm_sleepdep =
-		 cm_read_mod_reg(OMAP3430_CAM_MOD, OMAP3430_CM_SLEEPDEP);
-	prcm_context.per_cm_sleepdep =
-		 cm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_CM_SLEEPDEP);
-	prcm_context.usbhost_cm_sleepdep =
-		 cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP);
-	prcm_context.cm_clkout_ctrl = cm_read_mod_reg(OMAP3430_CCR_MOD,
-		 OMAP3_CM_CLKOUT_CTRL_OFFSET);
-	prcm_context.prm_clkout_ctrl = prm_read_mod_reg(OMAP3430_CCR_MOD,
-		OMAP3_PRM_CLKOUT_CTRL_OFFSET);
-	prcm_context.sgx_pm_wkdep =
-		 prm_read_mod_reg(OMAP3430ES2_SGX_MOD, PM_WKDEP);
-	prcm_context.dss_pm_wkdep =
-		 prm_read_mod_reg(OMAP3430_DSS_MOD, PM_WKDEP);
-	prcm_context.cam_pm_wkdep =
-		 prm_read_mod_reg(OMAP3430_CAM_MOD, PM_WKDEP);
-	prcm_context.per_pm_wkdep =
-		 prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKDEP);
-	prcm_context.neon_pm_wkdep =
-		 prm_read_mod_reg(OMAP3430_NEON_MOD, PM_WKDEP);
-	prcm_context.usbhost_pm_wkdep =
-		 prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
-	prcm_context.core_pm_mpugrpsel1 =
-		 prm_read_mod_reg(CORE_MOD, OMAP3430_PM_MPUGRPSEL1);
-	prcm_context.iva2_pm_ivagrpsel1 =
-		 prm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_PM_IVAGRPSEL1);
-	prcm_context.core_pm_mpugrpsel3 =
-		 prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_MPUGRPSEL3);
-	prcm_context.core_pm_ivagrpsel3 =
-		 prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
-	prcm_context.wkup_pm_mpugrpsel =
-		 prm_read_mod_reg(WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
-	prcm_context.wkup_pm_ivagrpsel =
-		 prm_read_mod_reg(WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
-	prcm_context.per_pm_mpugrpsel =
-		 prm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
-	prcm_context.per_pm_ivagrpsel =
-		 prm_read_mod_reg(OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
-	prcm_context.wkup_pm_wken = prm_read_mod_reg(WKUP_MOD, PM_WKEN);
-	return;
-}
-
-void omap3_prcm_restore_context(void)
-{
-	omap_ctrl_writel(prcm_context.control_padconf_sys_nirq,
-					 OMAP343X_CONTROL_PADCONF_SYSNIRQ);
-	cm_write_mod_reg(prcm_context.iva2_cm_clksel1, OMAP3430_IVA2_MOD,
-					 CM_CLKSEL1);
-	cm_write_mod_reg(prcm_context.iva2_cm_clksel2, OMAP3430_IVA2_MOD,
-					 CM_CLKSEL2);
-	__raw_writel(prcm_context.cm_sysconfig, OMAP3430_CM_SYSCONFIG);
-	cm_write_mod_reg(prcm_context.sgx_cm_clksel, OMAP3430ES2_SGX_MOD,
-					 CM_CLKSEL);
-	cm_write_mod_reg(prcm_context.dss_cm_clksel, OMAP3430_DSS_MOD,
-					 CM_CLKSEL);
-	cm_write_mod_reg(prcm_context.cam_cm_clksel, OMAP3430_CAM_MOD,
-					 CM_CLKSEL);
-	cm_write_mod_reg(prcm_context.per_cm_clksel, OMAP3430_PER_MOD,
-					 CM_CLKSEL);
-	cm_write_mod_reg(prcm_context.emu_cm_clksel, OMAP3430_EMU_MOD,
-					 CM_CLKSEL1);
-	cm_write_mod_reg(prcm_context.emu_cm_clkstctrl, OMAP3430_EMU_MOD,
-					 OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.pll_cm_autoidle2, PLL_MOD,
-					 CM_AUTOIDLE2);
-	cm_write_mod_reg(prcm_context.pll_cm_clksel4, PLL_MOD,
-					OMAP3430ES2_CM_CLKSEL4);
-	cm_write_mod_reg(prcm_context.pll_cm_clksel5, PLL_MOD,
-					 OMAP3430ES2_CM_CLKSEL5);
-	cm_write_mod_reg(prcm_context.pll_cm_clken2, PLL_MOD,
-					OMAP3430ES2_CM_CLKEN2);
-	__raw_writel(prcm_context.cm_polctrl, OMAP3430_CM_POLCTRL);
-	cm_write_mod_reg(prcm_context.iva2_cm_fclken, OMAP3430_IVA2_MOD,
-					 CM_FCLKEN);
-	cm_write_mod_reg(prcm_context.iva2_cm_clken_pll, OMAP3430_IVA2_MOD,
-					OMAP3430_CM_CLKEN_PLL);
-	cm_write_mod_reg(prcm_context.core_cm_fclken1, CORE_MOD, CM_FCLKEN1);
-	cm_write_mod_reg(prcm_context.core_cm_fclken3, CORE_MOD,
-					 OMAP3430ES2_CM_FCLKEN3);
-	cm_write_mod_reg(prcm_context.sgx_cm_fclken, OMAP3430ES2_SGX_MOD,
-					 CM_FCLKEN);
-	cm_write_mod_reg(prcm_context.wkup_cm_fclken, WKUP_MOD, CM_FCLKEN);
-	cm_write_mod_reg(prcm_context.dss_cm_fclken, OMAP3430_DSS_MOD,
-					 CM_FCLKEN);
-	cm_write_mod_reg(prcm_context.cam_cm_fclken, OMAP3430_CAM_MOD,
-					 CM_FCLKEN);
-	cm_write_mod_reg(prcm_context.per_cm_fclken, OMAP3430_PER_MOD,
-					 CM_FCLKEN);
-	cm_write_mod_reg(prcm_context.usbhost_cm_fclken,
-					 OMAP3430ES2_USBHOST_MOD, CM_FCLKEN);
-	cm_write_mod_reg(prcm_context.core_cm_iclken1, CORE_MOD, CM_ICLKEN1);
-	cm_write_mod_reg(prcm_context.core_cm_iclken2, CORE_MOD, CM_ICLKEN2);
-	cm_write_mod_reg(prcm_context.core_cm_iclken3, CORE_MOD, CM_ICLKEN3);
-	cm_write_mod_reg(prcm_context.sgx_cm_iclken, OMAP3430ES2_SGX_MOD,
-					CM_ICLKEN);
-	cm_write_mod_reg(prcm_context.wkup_cm_iclken, WKUP_MOD, CM_ICLKEN);
-	cm_write_mod_reg(prcm_context.dss_cm_iclken, OMAP3430_DSS_MOD,
-					CM_ICLKEN);
-	cm_write_mod_reg(prcm_context.cam_cm_iclken, OMAP3430_CAM_MOD,
-					CM_ICLKEN);
-	cm_write_mod_reg(prcm_context.per_cm_iclken, OMAP3430_PER_MOD,
-					CM_ICLKEN);
-	cm_write_mod_reg(prcm_context.usbhost_cm_iclken,
-					OMAP3430ES2_USBHOST_MOD, CM_ICLKEN);
-	cm_write_mod_reg(prcm_context.iva2_cm_autiidle2, OMAP3430_IVA2_MOD,
-					CM_AUTOIDLE2);
-	cm_write_mod_reg(prcm_context.mpu_cm_autoidle2, MPU_MOD, CM_AUTOIDLE2);
-	cm_write_mod_reg(prcm_context.iva2_cm_clkstctrl, OMAP3430_IVA2_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.mpu_cm_clkstctrl, MPU_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.core_cm_clkstctrl, CORE_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.sgx_cm_clkstctrl, OMAP3430ES2_SGX_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.dss_cm_clkstctrl, OMAP3430_DSS_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.cam_cm_clkstctrl, OMAP3430_CAM_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.per_cm_clkstctrl, OMAP3430_PER_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.neon_cm_clkstctrl, OMAP3430_NEON_MOD,
-					OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.usbhost_cm_clkstctrl,
-				OMAP3430ES2_USBHOST_MOD, OMAP2_CM_CLKSTCTRL);
-	cm_write_mod_reg(prcm_context.core_cm_autoidle1, CORE_MOD,
-					CM_AUTOIDLE1);
-	cm_write_mod_reg(prcm_context.core_cm_autoidle2, CORE_MOD,
-					CM_AUTOIDLE2);
-	cm_write_mod_reg(prcm_context.core_cm_autoidle3, CORE_MOD,
-					CM_AUTOIDLE3);
-	cm_write_mod_reg(prcm_context.wkup_cm_autoidle, WKUP_MOD, CM_AUTOIDLE);
-	cm_write_mod_reg(prcm_context.dss_cm_autoidle, OMAP3430_DSS_MOD,
-					CM_AUTOIDLE);
-	cm_write_mod_reg(prcm_context.cam_cm_autoidle, OMAP3430_CAM_MOD,
-					CM_AUTOIDLE);
-	cm_write_mod_reg(prcm_context.per_cm_autoidle, OMAP3430_PER_MOD,
-					CM_AUTOIDLE);
-	cm_write_mod_reg(prcm_context.usbhost_cm_autoidle,
-					OMAP3430ES2_USBHOST_MOD, CM_AUTOIDLE);
-	cm_write_mod_reg(prcm_context.sgx_cm_sleepdep, OMAP3430ES2_SGX_MOD,
-					OMAP3430_CM_SLEEPDEP);
-	cm_write_mod_reg(prcm_context.dss_cm_sleepdep, OMAP3430_DSS_MOD,
-					OMAP3430_CM_SLEEPDEP);
-	cm_write_mod_reg(prcm_context.cam_cm_sleepdep, OMAP3430_CAM_MOD,
-					OMAP3430_CM_SLEEPDEP);
-	cm_write_mod_reg(prcm_context.per_cm_sleepdep, OMAP3430_PER_MOD,
-					OMAP3430_CM_SLEEPDEP);
-	cm_write_mod_reg(prcm_context.usbhost_cm_sleepdep,
-				OMAP3430ES2_USBHOST_MOD, OMAP3430_CM_SLEEPDEP);
-	cm_write_mod_reg(prcm_context.cm_clkout_ctrl, OMAP3430_CCR_MOD,
-					OMAP3_CM_CLKOUT_CTRL_OFFSET);
-	prm_write_mod_reg(prcm_context.prm_clkout_ctrl, OMAP3430_CCR_MOD,
-					OMAP3_PRM_CLKOUT_CTRL_OFFSET);
-	prm_write_mod_reg(prcm_context.sgx_pm_wkdep, OMAP3430ES2_SGX_MOD,
-					PM_WKDEP);
-	prm_write_mod_reg(prcm_context.dss_pm_wkdep, OMAP3430_DSS_MOD,
-					PM_WKDEP);
-	prm_write_mod_reg(prcm_context.cam_pm_wkdep, OMAP3430_CAM_MOD,
-					PM_WKDEP);
-	prm_write_mod_reg(prcm_context.per_pm_wkdep, OMAP3430_PER_MOD,
-					PM_WKDEP);
-	prm_write_mod_reg(prcm_context.neon_pm_wkdep, OMAP3430_NEON_MOD,
-					PM_WKDEP);
-	prm_write_mod_reg(prcm_context.usbhost_pm_wkdep,
-					OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
-	prm_write_mod_reg(prcm_context.core_pm_mpugrpsel1, CORE_MOD,
-					OMAP3430_PM_MPUGRPSEL1);
-	prm_write_mod_reg(prcm_context.iva2_pm_ivagrpsel1, OMAP3430_IVA2_MOD,
-					OMAP3430_PM_IVAGRPSEL1);
-	prm_write_mod_reg(prcm_context.core_pm_mpugrpsel3, CORE_MOD,
-					OMAP3430ES2_PM_MPUGRPSEL3);
-	prm_write_mod_reg(prcm_context.core_pm_ivagrpsel3, CORE_MOD,
-					OMAP3430ES2_PM_IVAGRPSEL3);
-	prm_write_mod_reg(prcm_context.wkup_pm_mpugrpsel, WKUP_MOD,
-					OMAP3430_PM_MPUGRPSEL);
-	prm_write_mod_reg(prcm_context.wkup_pm_ivagrpsel, WKUP_MOD,
-					OMAP3430_PM_IVAGRPSEL);
-	prm_write_mod_reg(prcm_context.per_pm_mpugrpsel, OMAP3430_PER_MOD,
-					OMAP3430_PM_MPUGRPSEL);
-	prm_write_mod_reg(prcm_context.per_pm_ivagrpsel, OMAP3430_PER_MOD,
-					 OMAP3430_PM_IVAGRPSEL);
-	prm_write_mod_reg(prcm_context.wkup_pm_wken, WKUP_MOD, PM_WKEN);
-	return;
-}
-#endif
diff --git a/arch/arm/mach-omap2/prcm44xx.h b/arch/arm/mach-omap2/prcm44xx.h
new file mode 100644
index 0000000..7334ffb
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm44xx.h
@@ -0,0 +1,42 @@
+/*
+ * OMAP4 PRCM definitions
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains macros and functions that are common to all of
+ * the PRM/CM/PRCM blocks on the OMAP4 devices: PRM, CM1, CM2,
+ * PRCM_MPU, SCRM
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRCM44XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRCM44XX_H
+
+/*
+ * OMAP4 PRCM partition IDs
+ *
+ * The numbers and order are arbitrary, but 0 is reserved for the
+ * 'invalid' partition in case someone forgets to add a
+ * .prcm_partition field.
+ */
+#define OMAP4430_INVALID_PRCM_PARTITION		0
+#define OMAP4430_PRM_PARTITION			1
+#define OMAP4430_CM1_PARTITION			2
+#define OMAP4430_CM2_PARTITION			3
+#define OMAP4430_SCRM_PARTITION			4
+#define OMAP4430_PRCM_MPU_PARTITION		5
+
+/*
+ * OMAP4_MAX_PRCM_PARTITIONS: set to the highest value of the PRCM partition
+ * IDs, plus one
+ */
+#define OMAP4_MAX_PRCM_PARTITIONS		6
+
+
+#endif
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.c b/arch/arm/mach-omap2/prcm_mpu44xx.c
new file mode 100644
index 0000000..171fe17
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.c
@@ -0,0 +1,45 @@
+/*
+ * OMAP4 PRCM_MPU module functions
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+
+#include "prcm_mpu44xx.h"
+#include "cm-regbits-44xx.h"
+
+/* PRCM_MPU low-level functions */
+
+u32 omap4_prcm_mpu_read_inst_reg(s16 inst, u16 reg)
+{
+	return __raw_readl(OMAP44XX_PRCM_MPU_REGADDR(inst, reg));
+}
+
+void omap4_prcm_mpu_write_inst_reg(u32 val, s16 inst, u16 reg)
+{
+	__raw_writel(val, OMAP44XX_PRCM_MPU_REGADDR(inst, reg));
+}
+
+u32 omap4_prcm_mpu_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 reg)
+{
+	u32 v;
+
+	v = omap4_prcm_mpu_read_inst_reg(inst, reg);
+	v &= ~mask;
+	v |= bits;
+	omap4_prcm_mpu_write_inst_reg(v, inst, reg);
+
+	return v;
+}
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
new file mode 100644
index 0000000..729a644
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -0,0 +1,104 @@
+/*
+ * OMAP44xx PRCM MPU instance offset macros
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * XXX This file needs to be updated to align on one of "OMAP4", "OMAP44XX",
+ *     or "OMAP4430".
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRCM_MPU44XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRCM_MPU44XX_H
+
+#define OMAP4430_PRCM_MPU_BASE			0x48243000
+
+#define OMAP44XX_PRCM_MPU_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE + (inst) + (reg))
+
+/* PRCM_MPU instances */
+
+#define OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST	0x0000
+#define OMAP4430_PRCM_MPU_DEVICE_PRM_INST	0x0200
+#define OMAP4430_PRCM_MPU_CPU0_INST		0x0400
+#define OMAP4430_PRCM_MPU_CPU1_INST		0x0800
+
+/* PRCM_MPU clockdomain register offsets (from instance start) */
+#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS	0x0000
+#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS	0x0000
+
+
+/*
+ * PRCM_MPU
+ *
+ * The PRCM_MPU is a local PRCM inside the MPU subsystem. For the PRCM (global)
+ * point of view the PRCM_MPU is a single entity. It shares the same
+ * programming model as the global PRCM and thus can be assimilate as two new
+ * MOD inside the PRCM
+ */
+
+/* PRCM_MPU.OCP_SOCKET_PRCM register offsets */
+#define OMAP4_REVISION_PRCM_OFFSET			0x0000
+#define OMAP4430_REVISION_PRCM				OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_INST, 0x0000)
+
+/* PRCM_MPU.DEVICE_PRM register offsets */
+#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET			0x0000
+#define OMAP4430_PRCM_MPU_PRM_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0000)
+#define OMAP4_PRCM_MPU_PRM_PSCON_COUNT_OFFSET		0x0004
+#define OMAP4430_PRCM_MPU_PRM_PSCON_COUNT		OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_INST, 0x0004)
+
+/* PRCM_MPU.CPU0 register offsets */
+#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET			0x0000
+#define OMAP4430_PM_CPU0_PWRSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0000)
+#define OMAP4_PM_CPU0_PWRSTST_OFFSET			0x0004
+#define OMAP4430_PM_CPU0_PWRSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0004)
+#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET		0x0008
+#define OMAP4430_RM_CPU0_CPU0_CONTEXT			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0008)
+#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET		0x000c
+#define OMAP4430_RM_CPU0_CPU0_RSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x000c)
+#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET			0x0010
+#define OMAP4430_RM_CPU0_CPU0_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0010)
+#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET		0x0014
+#define OMAP4430_CM_CPU0_CPU0_CLKCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0014)
+#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET			0x0018
+#define OMAP4430_CM_CPU0_CLKSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_INST, 0x0018)
+
+/* PRCM_MPU.CPU1 register offsets */
+#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET			0x0000
+#define OMAP4430_PM_CPU1_PWRSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0000)
+#define OMAP4_PM_CPU1_PWRSTST_OFFSET			0x0004
+#define OMAP4430_PM_CPU1_PWRSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0004)
+#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET		0x0008
+#define OMAP4430_RM_CPU1_CPU1_CONTEXT			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0008)
+#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET		0x000c
+#define OMAP4430_RM_CPU1_CPU1_RSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x000c)
+#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET			0x0010
+#define OMAP4430_RM_CPU1_CPU1_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0010)
+#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET		0x0014
+#define OMAP4430_CM_CPU1_CPU1_CLKCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0014)
+#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET			0x0018
+#define OMAP4430_CM_CPU1_CLKSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_INST, 0x0018)
+
+/* Function prototypes */
+# ifndef __ASSEMBLER__
+extern u32 omap4_prcm_mpu_read_inst_reg(s16 inst, u16 idx);
+extern void omap4_prcm_mpu_write_inst_reg(u32 val, s16 inst, u16 idx);
+extern u32 omap4_prcm_mpu_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst,
+					    s16 idx);
+# endif
+
+#endif
diff --git a/arch/arm/mach-omap2/prm-regbits-24xx.h b/arch/arm/mach-omap2/prm-regbits-24xx.h
index 0b188ff..6ac9661 100644
--- a/arch/arm/mach-omap2/prm-regbits-24xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-24xx.h
@@ -14,7 +14,7 @@
  * published by the Free Software Foundation.
  */
 
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 
 /* Bits shared between registers */
 
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 9e63cb7..64c087a 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -1,6 +1,3 @@
-#ifndef __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_34XX_H
-#define __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_34XX_H
-
 /*
  * OMAP3430 Power/Reset Management register bits
  *
@@ -13,8 +10,11 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_34XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_34XX_H
 
-#include "prm.h"
+
+#include "prm2xxx_3xxx.h"
 
 /* Shared register bits */
 
@@ -101,8 +101,11 @@
 #define OMAP3430_GRPSEL_MCSPI3_MASK			(1 << 20)
 #define OMAP3430_GRPSEL_MCSPI2_MASK			(1 << 19)
 #define OMAP3430_GRPSEL_MCSPI1_MASK			(1 << 18)
+#define OMAP3430_GRPSEL_I2C3_SHIFT			17
 #define OMAP3430_GRPSEL_I2C3_MASK			(1 << 17)
+#define OMAP3430_GRPSEL_I2C2_SHIFT			16
 #define OMAP3430_GRPSEL_I2C2_MASK			(1 << 16)
+#define OMAP3430_GRPSEL_I2C1_SHIFT			15
 #define OMAP3430_GRPSEL_I2C1_MASK			(1 << 15)
 #define OMAP3430_GRPSEL_UART2_MASK			(1 << 14)
 #define OMAP3430_GRPSEL_UART1_MASK			(1 << 13)
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
index 25b19b6..6d2776f 100644
--- a/arch/arm/mach-omap2/prm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
@@ -22,8 +22,6 @@
 #ifndef __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_44XX_H
 #define __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_44XX_H
 
-#include "prm.h"
-
 
 /*
  * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index 7be040b..39d5621 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -1,321 +1,20 @@
-#ifndef __ARCH_ARM_MACH_OMAP2_PRM_H
-#define __ARCH_ARM_MACH_OMAP2_PRM_H
-
 /*
- * OMAP2/3 Power/Reset Management (PRM) register definitions
+ * OMAP2/3/4 Power/Reset Management (PRM) bitfield definitions
  *
  * Copyright (C) 2007-2009 Texas Instruments, Inc.
  * Copyright (C) 2010 Nokia Corporation
  *
- * Written by Paul Walmsley
+ * Paul Walmsley
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM_H
+#define __ARCH_ARM_MACH_OMAP2_PRM_H
 
 #include "prcm-common.h"
 
-#define OMAP2420_PRM_REGADDR(module, reg)				\
-		OMAP2_L4_IO_ADDRESS(OMAP2420_PRM_BASE + (module) + (reg))
-#define OMAP2430_PRM_REGADDR(module, reg)				\
-		OMAP2_L4_IO_ADDRESS(OMAP2430_PRM_BASE + (module) + (reg))
-#define OMAP34XX_PRM_REGADDR(module, reg)				\
-		OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg))
-#define OMAP44XX_PRM_REGADDR(module, reg)				\
-		OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg))
-#define OMAP44XX_PRCM_MPU_REGADDR(module, reg)				\
-		OMAP2_L4_IO_ADDRESS(OMAP4430_PRCM_MPU_BASE + (module) + (reg))
-
-#include "prm44xx.h"
-
-/*
- * Architecture-specific global PRM registers
- * Use __raw_{read,write}l() with these registers.
- *
- * With a few exceptions, these are the register names beginning with
- * PRCM_* on 24xx, and PRM_* on 34xx.  (The exceptions are the
- * IRQSTATUS and IRQENABLE bits.)
- *
- */
-
-#define OMAP2_PRCM_REVISION_OFFSET	0x0000
-#define OMAP2420_PRCM_REVISION		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0000)
-#define OMAP2_PRCM_SYSCONFIG_OFFSET	0x0010
-#define OMAP2420_PRCM_SYSCONFIG		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0010)
-
-#define OMAP2_PRCM_IRQSTATUS_MPU_OFFSET	0x0018
-#define OMAP2420_PRCM_IRQSTATUS_MPU	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0018)
-#define OMAP2_PRCM_IRQENABLE_MPU_OFFSET	0x001c
-#define OMAP2420_PRCM_IRQENABLE_MPU	OMAP2420_PRM_REGADDR(OCP_MOD, 0x001c)
-
-#define OMAP2_PRCM_VOLTCTRL_OFFSET	0x0050
-#define OMAP2420_PRCM_VOLTCTRL		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0050)
-#define OMAP2_PRCM_VOLTST_OFFSET	0x0054
-#define OMAP2420_PRCM_VOLTST		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0054)
-#define OMAP2_PRCM_CLKSRC_CTRL_OFFSET	0x0060
-#define OMAP2420_PRCM_CLKSRC_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0060)
-#define OMAP2_PRCM_CLKOUT_CTRL_OFFSET	0x0070
-#define OMAP2420_PRCM_CLKOUT_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0070)
-#define OMAP2_PRCM_CLKEMUL_CTRL_OFFSET	0x0078
-#define OMAP2420_PRCM_CLKEMUL_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0078)
-#define OMAP2_PRCM_CLKCFG_CTRL_OFFSET	0x0080
-#define OMAP2420_PRCM_CLKCFG_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0080)
-#define OMAP2_PRCM_CLKCFG_STATUS_OFFSET	0x0084
-#define OMAP2420_PRCM_CLKCFG_STATUS	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0084)
-#define OMAP2_PRCM_VOLTSETUP_OFFSET	0x0090
-#define OMAP2420_PRCM_VOLTSETUP		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0090)
-#define OMAP2_PRCM_CLKSSETUP_OFFSET	0x0094
-#define OMAP2420_PRCM_CLKSSETUP		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0094)
-#define OMAP2_PRCM_POLCTRL_OFFSET	0x0098
-#define OMAP2420_PRCM_POLCTRL		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0098)
-
-#define OMAP2430_PRCM_REVISION		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0000)
-#define OMAP2430_PRCM_SYSCONFIG		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0010)
-
-#define OMAP2430_PRCM_IRQSTATUS_MPU	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0018)
-#define OMAP2430_PRCM_IRQENABLE_MPU	OMAP2430_PRM_REGADDR(OCP_MOD, 0x001c)
-
-#define OMAP2430_PRCM_VOLTCTRL		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0050)
-#define OMAP2430_PRCM_VOLTST		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0054)
-#define OMAP2430_PRCM_CLKSRC_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0060)
-#define OMAP2430_PRCM_CLKOUT_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0070)
-#define OMAP2430_PRCM_CLKEMUL_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0078)
-#define OMAP2430_PRCM_CLKCFG_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0080)
-#define OMAP2430_PRCM_CLKCFG_STATUS	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0084)
-#define OMAP2430_PRCM_VOLTSETUP		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0090)
-#define OMAP2430_PRCM_CLKSSETUP		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0094)
-#define OMAP2430_PRCM_POLCTRL		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0098)
-
-#define OMAP3_PRM_REVISION_OFFSET	0x0004
-#define OMAP3430_PRM_REVISION		OMAP34XX_PRM_REGADDR(OCP_MOD, 0x0004)
-#define OMAP3_PRM_SYSCONFIG_OFFSET	0x0014
-#define OMAP3430_PRM_SYSCONFIG		OMAP34XX_PRM_REGADDR(OCP_MOD, 0x0014)
-
-#define OMAP3_PRM_IRQSTATUS_MPU_OFFSET	0x0018
-#define OMAP3430_PRM_IRQSTATUS_MPU	OMAP34XX_PRM_REGADDR(OCP_MOD, 0x0018)
-#define OMAP3_PRM_IRQENABLE_MPU_OFFSET	0x001c
-#define OMAP3430_PRM_IRQENABLE_MPU	OMAP34XX_PRM_REGADDR(OCP_MOD, 0x001c)
-
-
-#define OMAP3_PRM_VC_SMPS_SA_OFFSET	0x0020
-#define OMAP3430_PRM_VC_SMPS_SA		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0020)
-#define OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET	0x0024
-#define OMAP3430_PRM_VC_SMPS_VOL_RA	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0024)
-#define OMAP3_PRM_VC_SMPS_CMD_RA_OFFSET	0x0028
-#define OMAP3430_PRM_VC_SMPS_CMD_RA	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0028)
-#define OMAP3_PRM_VC_CMD_VAL_0_OFFSET	0x002c
-#define OMAP3430_PRM_VC_CMD_VAL_0	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x002c)
-#define OMAP3_PRM_VC_CMD_VAL_1_OFFSET	0x0030
-#define OMAP3430_PRM_VC_CMD_VAL_1	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0030)
-#define OMAP3_PRM_VC_CH_CONF_OFFSET	0x0034
-#define OMAP3430_PRM_VC_CH_CONF		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0034)
-#define OMAP3_PRM_VC_I2C_CFG_OFFSET	0x0038
-#define OMAP3430_PRM_VC_I2C_CFG		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0038)
-#define OMAP3_PRM_VC_BYPASS_VAL_OFFSET	0x003c
-#define OMAP3430_PRM_VC_BYPASS_VAL	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x003c)
-#define OMAP3_PRM_RSTCTRL_OFFSET	0x0050
-#define OMAP3430_PRM_RSTCTRL		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0050)
-#define OMAP3_PRM_RSTTIME_OFFSET	0x0054
-#define OMAP3430_PRM_RSTTIME		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0054)
-#define OMAP3_PRM_RSTST_OFFSET	0x0058
-#define OMAP3430_PRM_RSTST		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0058)
-#define OMAP3_PRM_VOLTCTRL_OFFSET	0x0060
-#define OMAP3430_PRM_VOLTCTRL		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0060)
-#define OMAP3_PRM_SRAM_PCHARGE_OFFSET	0x0064
-#define OMAP3430_PRM_SRAM_PCHARGE	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0064)
-#define OMAP3_PRM_CLKSRC_CTRL_OFFSET	0x0070
-#define OMAP3430_PRM_CLKSRC_CTRL	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0070)
-#define OMAP3_PRM_VOLTSETUP1_OFFSET	0x0090
-#define OMAP3430_PRM_VOLTSETUP1		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0090)
-#define OMAP3_PRM_VOLTOFFSET_OFFSET	0x0094
-#define OMAP3430_PRM_VOLTOFFSET		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0094)
-#define OMAP3_PRM_CLKSETUP_OFFSET	0x0098
-#define OMAP3430_PRM_CLKSETUP		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0098)
-#define OMAP3_PRM_POLCTRL_OFFSET	0x009c
-#define OMAP3430_PRM_POLCTRL		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x009c)
-#define OMAP3_PRM_VOLTSETUP2_OFFSET	0x00a0
-#define OMAP3430_PRM_VOLTSETUP2		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00a0)
-#define OMAP3_PRM_VP1_CONFIG_OFFSET	0x00b0
-#define OMAP3430_PRM_VP1_CONFIG		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00b0)
-#define OMAP3_PRM_VP1_VSTEPMIN_OFFSET	0x00b4
-#define OMAP3430_PRM_VP1_VSTEPMIN	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00b4)
-#define OMAP3_PRM_VP1_VSTEPMAX_OFFSET	0x00b8
-#define OMAP3430_PRM_VP1_VSTEPMAX	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00b8)
-#define OMAP3_PRM_VP1_VLIMITTO_OFFSET	0x00bc
-#define OMAP3430_PRM_VP1_VLIMITTO	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00bc)
-#define OMAP3_PRM_VP1_VOLTAGE_OFFSET	0x00c0
-#define OMAP3430_PRM_VP1_VOLTAGE	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00c0)
-#define OMAP3_PRM_VP1_STATUS_OFFSET	0x00c4
-#define OMAP3430_PRM_VP1_STATUS		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00c4)
-#define OMAP3_PRM_VP2_CONFIG_OFFSET	0x00d0
-#define OMAP3430_PRM_VP2_CONFIG		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00d0)
-#define OMAP3_PRM_VP2_VSTEPMIN_OFFSET	0x00d4
-#define OMAP3430_PRM_VP2_VSTEPMIN	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00d4)
-#define OMAP3_PRM_VP2_VSTEPMAX_OFFSET	0x00d8
-#define OMAP3430_PRM_VP2_VSTEPMAX	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00d8)
-#define OMAP3_PRM_VP2_VLIMITTO_OFFSET	0x00dc
-#define OMAP3430_PRM_VP2_VLIMITTO	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00dc)
-#define OMAP3_PRM_VP2_VOLTAGE_OFFSET	0x00e0
-#define OMAP3430_PRM_VP2_VOLTAGE	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00e0)
-#define OMAP3_PRM_VP2_STATUS_OFFSET	0x00e4
-#define OMAP3430_PRM_VP2_STATUS		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00e4)
-
-#define OMAP3_PRM_CLKSEL_OFFSET	0x0040
-#define OMAP3430_PRM_CLKSEL		OMAP34XX_PRM_REGADDR(OMAP3430_CCR_MOD, 0x0040)
-#define OMAP3_PRM_CLKOUT_CTRL_OFFSET	0x0070
-#define OMAP3430_PRM_CLKOUT_CTRL	OMAP34XX_PRM_REGADDR(OMAP3430_CCR_MOD, 0x0070)
-
-/*
- * Module specific PRM registers from PRM_BASE + domain offset
- *
- * Use prm_{read,write}_mod_reg() with these registers.
- *
- * With a few exceptions, these are the register names beginning with
- * {PM,RM}_* on both architectures.  (The exceptions are the IRQSTATUS
- * and IRQENABLE bits.)
- *
- */
-
-/* Registers appearing on both 24xx and 34xx */
-
-#define OMAP2_RM_RSTCTRL				0x0050
-#define OMAP2_RM_RSTTIME				0x0054
-#define OMAP2_RM_RSTST					0x0058
-#define OMAP2_PM_PWSTCTRL				0x00e0
-#define OMAP2_PM_PWSTST					0x00e4
-
-#define PM_WKEN						0x00a0
-#define PM_WKEN1					PM_WKEN
-#define PM_WKST						0x00b0
-#define PM_WKST1					PM_WKST
-#define PM_WKDEP					0x00c8
-#define PM_EVGENCTRL					0x00d4
-#define PM_EVGENONTIM					0x00d8
-#define PM_EVGENOFFTIM					0x00dc
-
-/* Omap2 specific registers */
-#define OMAP24XX_PM_WKEN2				0x00a4
-#define OMAP24XX_PM_WKST2				0x00b4
-
-#define OMAP24XX_PRCM_IRQSTATUS_DSP			0x00f0	/* IVA mod */
-#define OMAP24XX_PRCM_IRQENABLE_DSP			0x00f4	/* IVA mod */
-#define OMAP24XX_PRCM_IRQSTATUS_IVA			0x00f8
-#define OMAP24XX_PRCM_IRQENABLE_IVA			0x00fc
-
-/* Omap3 specific registers */
-#define OMAP3430ES2_PM_WKEN3				0x00f0
-#define OMAP3430ES2_PM_WKST3				0x00b8
-
-#define OMAP3430_PM_MPUGRPSEL				0x00a4
-#define OMAP3430_PM_MPUGRPSEL1				OMAP3430_PM_MPUGRPSEL
-#define OMAP3430ES2_PM_MPUGRPSEL3			0x00f8
-
-#define OMAP3430_PM_IVAGRPSEL				0x00a8
-#define OMAP3430_PM_IVAGRPSEL1				OMAP3430_PM_IVAGRPSEL
-#define OMAP3430ES2_PM_IVAGRPSEL3			0x00f4
-
-#define OMAP3430_PM_PREPWSTST				0x00e8
-
-#define OMAP3430_PRM_IRQSTATUS_IVA2			0x00f8
-#define OMAP3430_PRM_IRQENABLE_IVA2			0x00fc
-
-/* Omap4 specific registers */
-#define OMAP4_RM_RSTCTRL				0x0000
-#define OMAP4_RM_RSTTIME				0x0004
-#define OMAP4_RM_RSTST					0x0008
-#define OMAP4_PM_PWSTCTRL				0x0000
-#define OMAP4_PM_PWSTST					0x0004
-
-
-#ifndef __ASSEMBLER__
-
-/* Power/reset management domain register get/set */
-extern u32 prm_read_mod_reg(s16 module, u16 idx);
-extern void prm_write_mod_reg(u32 val, s16 module, u16 idx);
-extern u32 prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
-
-/* Read-modify-write bits in a PRM register (by domain) */
-static inline u32 prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
-{
-	return prm_rmw_mod_reg_bits(bits, bits, module, idx);
-}
-
-static inline u32 prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
-{
-	return prm_rmw_mod_reg_bits(bits, 0x0, module, idx);
-}
-
-/* These omap2_ PRM functions apply to both OMAP2 and 3 */
-int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift);
-int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift);
-int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift);
-
-int omap4_prm_is_hardreset_asserted(void __iomem *rstctrl_reg, u8 shift);
-int omap4_prm_assert_hardreset(void __iomem *rstctrl_reg, u8 shift);
-int omap4_prm_deassert_hardreset(void __iomem *rstctrl_reg, u8 shift);
-
-#endif
-
-/*
- * Bits common to specific registers
- *
- * The 3430 register and bit names are generally used,
- * since they tend to make more sense
- */
-
-/* PM_EVGENONTIM_MPU */
-/* Named PM_EVEGENONTIM_MPU on the 24XX */
-#define OMAP_ONTIMEVAL_SHIFT				0
-#define OMAP_ONTIMEVAL_MASK				(0xffffffff << 0)
-
-/* PM_EVGENOFFTIM_MPU */
-/* Named PM_EVEGENOFFTIM_MPU on the 24XX */
-#define OMAP_OFFTIMEVAL_SHIFT				0
-#define OMAP_OFFTIMEVAL_MASK				(0xffffffff << 0)
-
-/* PRM_CLKSETUP and PRCM_VOLTSETUP */
-/* Named PRCM_CLKSSETUP on the 24XX */
-#define OMAP_SETUP_TIME_SHIFT				0
-#define OMAP_SETUP_TIME_MASK				(0xffff << 0)
-
-/* PRM_CLKSRC_CTRL */
-/* Named PRCM_CLKSRC_CTRL on the 24XX */
-#define OMAP_SYSCLKDIV_SHIFT				6
-#define OMAP_SYSCLKDIV_MASK				(0x3 << 6)
-#define OMAP_AUTOEXTCLKMODE_SHIFT			3
-#define OMAP_AUTOEXTCLKMODE_MASK			(0x3 << 3)
-#define OMAP_SYSCLKSEL_SHIFT				0
-#define OMAP_SYSCLKSEL_MASK				(0x3 << 0)
-
-/* PM_EVGENCTRL_MPU */
-#define OMAP_OFFLOADMODE_SHIFT				3
-#define OMAP_OFFLOADMODE_MASK				(0x3 << 3)
-#define OMAP_ONLOADMODE_SHIFT				1
-#define OMAP_ONLOADMODE_MASK				(0x3 << 1)
-#define OMAP_ENABLE_MASK				(1 << 0)
-
-/* PRM_RSTTIME */
-/* Named RM_RSTTIME_WKUP on the 24xx */
-#define OMAP_RSTTIME2_SHIFT				8
-#define OMAP_RSTTIME2_MASK				(0x1f << 8)
-#define OMAP_RSTTIME1_SHIFT				0
-#define OMAP_RSTTIME1_MASK				(0xff << 0)
-
-/* PRM_RSTCTRL */
-/* Named RM_RSTCTRL_WKUP on the 24xx */
-/* 2420 calls RST_DPLL3 'RST_DPLL' */
-#define OMAP_RST_DPLL3_MASK				(1 << 2)
-#define OMAP_RST_GS_MASK				(1 << 1)
-
-
-/*
- * Bits common to module-shared registers
- *
- * Not all registers of a particular type support all of these bits -
- * check TRM if you are unsure
- */
-
 /*
  * 24XX: PM_PWSTST_CORE, PM_PWSTST_GFX, PM_PWSTST_MPU, PM_PWSTST_DSP
  *
@@ -341,59 +40,6 @@
 #define OMAP_POWERSTATEST_MASK				(0x3 << 0)
 
 /*
- * 24XX: RM_RSTST_MPU and RM_RSTST_DSP - on 24XX, 'COREDOMAINWKUP_RST' is
- *	 called 'COREWKUP_RST'
- *
- * 3430: RM_RSTST_IVA2, RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSS,
- *	 RM_RSTST_CAM, RM_RSTST_PER, RM_RSTST_NEON
- */
-#define OMAP_COREDOMAINWKUP_RST_MASK			(1 << 3)
-
-/*
- * 24XX: RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSP
- *
- * 2430: RM_RSTST_MDM
- *
- * 3430: RM_RSTST_CORE, RM_RSTST_EMU
- */
-#define OMAP_DOMAINWKUP_RST_MASK			(1 << 2)
-
-/*
- * 24XX: RM_RSTST_MPU, RM_RSTST_WKUP, RM_RSTST_DSP
- *	 On 24XX, 'GLOBALWARM_RST' is called 'GLOBALWMPU_RST'.
- *
- * 2430: RM_RSTST_MDM
- *
- * 3430: RM_RSTST_CORE, RM_RSTST_EMU
- */
-#define OMAP_GLOBALWARM_RST_MASK			(1 << 1)
-#define OMAP_GLOBALCOLD_RST_MASK			(1 << 0)
-
-/*
- * 24XX: PM_WKDEP_GFX, PM_WKDEP_MPU, PM_WKDEP_CORE, PM_WKDEP_DSP
- *	 2420 TRM sometimes uses "EN_WAKEUP" instead of "EN_WKUP"
- *
- * 2430: PM_WKDEP_MDM
- *
- * 3430: PM_WKDEP_IVA2, PM_WKDEP_GFX, PM_WKDEP_DSS, PM_WKDEP_CAM,
- *	 PM_WKDEP_PER
- */
-#define OMAP_EN_WKUP_SHIFT				4
-#define OMAP_EN_WKUP_MASK				(1 << 4)
-
-/*
- * 24XX: PM_PWSTCTRL_MPU, PM_PWSTCTRL_CORE, PM_PWSTCTRL_GFX,
- *	 PM_PWSTCTRL_DSP
- *
- * 2430: PM_PWSTCTRL_MDM
- *
- * 3430: PM_PWSTCTRL_IVA2, PM_PWSTCTRL_CORE, PM_PWSTCTRL_GFX,
- *	 PM_PWSTCTRL_DSS, PM_PWSTCTRL_CAM, PM_PWSTCTRL_PER,
- *	 PM_PWSTCTRL_NEON
- */
-#define OMAP_LOGICRETSTATE_MASK				(1 << 2)
-
-/*
  * 24XX: PM_PWSTCTRL_MPU, PM_PWSTCTRL_CORE, PM_PWSTCTRL_GFX,
  *       PM_PWSTCTRL_DSP, PM_PWSTST_MPU
  *
@@ -407,11 +53,4 @@
 #define OMAP_POWERSTATE_MASK				(0x3 << 0)
 
 
-/*
- * MAX_MODULE_HARDRESET_WAIT: Maximum microseconds to wait for an OMAP
- * submodule to exit hardreset
- */
-#define MAX_MODULE_HARDRESET_WAIT		10000
-
-
 #endif
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index 421771e..ec03625 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -12,18 +12,65 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/err.h>
+#include <linux/io.h>
 
 #include <plat/common.h>
 #include <plat/cpu.h>
 #include <plat/prcm.h>
 
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
+#include "cm2xxx_3xxx.h"
 #include "prm-regbits-24xx.h"
 #include "prm-regbits-34xx.h"
 
+u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
+{
+	return __raw_readl(prm_base + module + idx);
+}
+
+void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx)
+{
+	__raw_writel(val, prm_base + module + idx);
+}
+
+/* Read-modify-write a register in a PRM module. Caller must lock */
+u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
+{
+	u32 v;
+
+	v = omap2_prm_read_mod_reg(module, idx);
+	v &= ~mask;
+	v |= bits;
+	omap2_prm_write_mod_reg(v, module, idx);
+
+	return v;
+}
+
+/* Read a PRM register, AND it, and shift the result down to bit 0 */
+u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
+{
+	u32 v;
+
+	v = omap2_prm_read_mod_reg(domain, idx);
+	v &= mask;
+	v >>= __ffs(mask);
+
+	return v;
+}
+
+u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	return omap2_prm_rmw_mod_reg_bits(bits, bits, module, idx);
+}
+
+u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	return omap2_prm_rmw_mod_reg_bits(bits, 0x0, module, idx);
+}
+
+
 /**
  * omap2_prm_is_hardreset_asserted - read the HW reset line state of
  * submodules contained in the hwmod module
@@ -39,7 +86,7 @@
 	if (!(cpu_is_omap24xx() || cpu_is_omap34xx()))
 		return -EINVAL;
 
-	return prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL,
+	return omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL,
 				       (1 << shift));
 }
 
@@ -63,7 +110,7 @@
 		return -EINVAL;
 
 	mask = 1 << shift;
-	prm_rmw_mod_reg_bits(mask, mask, prm_mod, OMAP2_RM_RSTCTRL);
+	omap2_prm_rmw_mod_reg_bits(mask, mask, prm_mod, OMAP2_RM_RSTCTRL);
 
 	return 0;
 }
@@ -93,18 +140,17 @@
 	mask = 1 << shift;
 
 	/* Check the current status to avoid de-asserting the line twice */
-	if (prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, mask) == 0)
+	if (omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, mask) == 0)
 		return -EEXIST;
 
 	/* Clear the reset status by writing 1 to the status bit */
-	prm_rmw_mod_reg_bits(0xffffffff, mask, prm_mod, OMAP2_RM_RSTST);
+	omap2_prm_rmw_mod_reg_bits(0xffffffff, mask, prm_mod, OMAP2_RM_RSTST);
 	/* de-assert the reset control line */
-	prm_rmw_mod_reg_bits(mask, 0, prm_mod, OMAP2_RM_RSTCTRL);
+	omap2_prm_rmw_mod_reg_bits(mask, 0, prm_mod, OMAP2_RM_RSTCTRL);
 	/* wait the status to be set */
-	omap_test_timeout(prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTST,
+	omap_test_timeout(omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTST,
 						  mask),
 			  MAX_MODULE_HARDRESET_WAIT, c);
 
 	return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
 }
-
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
new file mode 100644
index 0000000..49654c8
--- /dev/null
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -0,0 +1,428 @@
+/*
+ * OMAP2/3 Power/Reset Management (PRM) register definitions
+ *
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2008-2010 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The PRM hardware modules on the OMAP2/3 are quite similar to each
+ * other.  The PRM on OMAP4 has a new register layout, and is handled
+ * in a separate file.
+ */
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM2XXX_3XXX_H
+#define __ARCH_ARM_MACH_OMAP2_PRM2XXX_3XXX_H
+
+#include "prcm-common.h"
+#include "prm.h"
+
+#define OMAP2420_PRM_REGADDR(module, reg)				\
+		OMAP2_L4_IO_ADDRESS(OMAP2420_PRM_BASE + (module) + (reg))
+#define OMAP2430_PRM_REGADDR(module, reg)				\
+		OMAP2_L4_IO_ADDRESS(OMAP2430_PRM_BASE + (module) + (reg))
+#define OMAP34XX_PRM_REGADDR(module, reg)				\
+		OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg))
+
+
+/*
+ * OMAP2-specific global PRM registers
+ * Use __raw_{read,write}l() with these registers.
+ *
+ * With a few exceptions, these are the register names beginning with
+ * PRCM_* on 24xx.  (The exceptions are the IRQSTATUS and IRQENABLE
+ * bits.)
+ *
+ */
+
+#define OMAP2_PRCM_REVISION_OFFSET	0x0000
+#define OMAP2420_PRCM_REVISION		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0000)
+#define OMAP2_PRCM_SYSCONFIG_OFFSET	0x0010
+#define OMAP2420_PRCM_SYSCONFIG		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0010)
+
+#define OMAP2_PRCM_IRQSTATUS_MPU_OFFSET	0x0018
+#define OMAP2420_PRCM_IRQSTATUS_MPU	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0018)
+#define OMAP2_PRCM_IRQENABLE_MPU_OFFSET	0x001c
+#define OMAP2420_PRCM_IRQENABLE_MPU	OMAP2420_PRM_REGADDR(OCP_MOD, 0x001c)
+
+#define OMAP2_PRCM_VOLTCTRL_OFFSET	0x0050
+#define OMAP2420_PRCM_VOLTCTRL		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0050)
+#define OMAP2_PRCM_VOLTST_OFFSET	0x0054
+#define OMAP2420_PRCM_VOLTST		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0054)
+#define OMAP2_PRCM_CLKSRC_CTRL_OFFSET	0x0060
+#define OMAP2420_PRCM_CLKSRC_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0060)
+#define OMAP2_PRCM_CLKOUT_CTRL_OFFSET	0x0070
+#define OMAP2420_PRCM_CLKOUT_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0070)
+#define OMAP2_PRCM_CLKEMUL_CTRL_OFFSET	0x0078
+#define OMAP2420_PRCM_CLKEMUL_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0078)
+#define OMAP2_PRCM_CLKCFG_CTRL_OFFSET	0x0080
+#define OMAP2420_PRCM_CLKCFG_CTRL	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0080)
+#define OMAP2_PRCM_CLKCFG_STATUS_OFFSET	0x0084
+#define OMAP2420_PRCM_CLKCFG_STATUS	OMAP2420_PRM_REGADDR(OCP_MOD, 0x0084)
+#define OMAP2_PRCM_VOLTSETUP_OFFSET	0x0090
+#define OMAP2420_PRCM_VOLTSETUP		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0090)
+#define OMAP2_PRCM_CLKSSETUP_OFFSET	0x0094
+#define OMAP2420_PRCM_CLKSSETUP		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0094)
+#define OMAP2_PRCM_POLCTRL_OFFSET	0x0098
+#define OMAP2420_PRCM_POLCTRL		OMAP2420_PRM_REGADDR(OCP_MOD, 0x0098)
+
+#define OMAP2430_PRCM_REVISION		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0000)
+#define OMAP2430_PRCM_SYSCONFIG		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0010)
+
+#define OMAP2430_PRCM_IRQSTATUS_MPU	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0018)
+#define OMAP2430_PRCM_IRQENABLE_MPU	OMAP2430_PRM_REGADDR(OCP_MOD, 0x001c)
+
+#define OMAP2430_PRCM_VOLTCTRL		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0050)
+#define OMAP2430_PRCM_VOLTST		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0054)
+#define OMAP2430_PRCM_CLKSRC_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0060)
+#define OMAP2430_PRCM_CLKOUT_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0070)
+#define OMAP2430_PRCM_CLKEMUL_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0078)
+#define OMAP2430_PRCM_CLKCFG_CTRL	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0080)
+#define OMAP2430_PRCM_CLKCFG_STATUS	OMAP2430_PRM_REGADDR(OCP_MOD, 0x0084)
+#define OMAP2430_PRCM_VOLTSETUP		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0090)
+#define OMAP2430_PRCM_CLKSSETUP		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0094)
+#define OMAP2430_PRCM_POLCTRL		OMAP2430_PRM_REGADDR(OCP_MOD, 0x0098)
+
+/*
+ * OMAP3-specific global PRM registers
+ * Use __raw_{read,write}l() with these registers.
+ *
+ * With a few exceptions, these are the register names beginning with
+ * PRM_* on 34xx.  (The exceptions are the IRQSTATUS and IRQENABLE
+ * bits.)
+ */
+
+#define OMAP3_PRM_REVISION_OFFSET	0x0004
+#define OMAP3430_PRM_REVISION		OMAP34XX_PRM_REGADDR(OCP_MOD, 0x0004)
+#define OMAP3_PRM_SYSCONFIG_OFFSET	0x0014
+#define OMAP3430_PRM_SYSCONFIG		OMAP34XX_PRM_REGADDR(OCP_MOD, 0x0014)
+
+#define OMAP3_PRM_IRQSTATUS_MPU_OFFSET	0x0018
+#define OMAP3430_PRM_IRQSTATUS_MPU	OMAP34XX_PRM_REGADDR(OCP_MOD, 0x0018)
+#define OMAP3_PRM_IRQENABLE_MPU_OFFSET	0x001c
+#define OMAP3430_PRM_IRQENABLE_MPU	OMAP34XX_PRM_REGADDR(OCP_MOD, 0x001c)
+
+
+#define OMAP3_PRM_VC_SMPS_SA_OFFSET	0x0020
+#define OMAP3430_PRM_VC_SMPS_SA		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0020)
+#define OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET	0x0024
+#define OMAP3430_PRM_VC_SMPS_VOL_RA	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0024)
+#define OMAP3_PRM_VC_SMPS_CMD_RA_OFFSET	0x0028
+#define OMAP3430_PRM_VC_SMPS_CMD_RA	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0028)
+#define OMAP3_PRM_VC_CMD_VAL_0_OFFSET	0x002c
+#define OMAP3430_PRM_VC_CMD_VAL_0	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x002c)
+#define OMAP3_PRM_VC_CMD_VAL_1_OFFSET	0x0030
+#define OMAP3430_PRM_VC_CMD_VAL_1	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0030)
+#define OMAP3_PRM_VC_CH_CONF_OFFSET	0x0034
+#define OMAP3430_PRM_VC_CH_CONF		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0034)
+#define OMAP3_PRM_VC_I2C_CFG_OFFSET	0x0038
+#define OMAP3430_PRM_VC_I2C_CFG		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0038)
+#define OMAP3_PRM_VC_BYPASS_VAL_OFFSET	0x003c
+#define OMAP3430_PRM_VC_BYPASS_VAL	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x003c)
+#define OMAP3_PRM_RSTCTRL_OFFSET	0x0050
+#define OMAP3430_PRM_RSTCTRL		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0050)
+#define OMAP3_PRM_RSTTIME_OFFSET	0x0054
+#define OMAP3430_PRM_RSTTIME		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0054)
+#define OMAP3_PRM_RSTST_OFFSET	0x0058
+#define OMAP3430_PRM_RSTST		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0058)
+#define OMAP3_PRM_VOLTCTRL_OFFSET	0x0060
+#define OMAP3430_PRM_VOLTCTRL		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0060)
+#define OMAP3_PRM_SRAM_PCHARGE_OFFSET	0x0064
+#define OMAP3430_PRM_SRAM_PCHARGE	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0064)
+#define OMAP3_PRM_CLKSRC_CTRL_OFFSET	0x0070
+#define OMAP3430_PRM_CLKSRC_CTRL	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0070)
+#define OMAP3_PRM_VOLTSETUP1_OFFSET	0x0090
+#define OMAP3430_PRM_VOLTSETUP1		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0090)
+#define OMAP3_PRM_VOLTOFFSET_OFFSET	0x0094
+#define OMAP3430_PRM_VOLTOFFSET		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0094)
+#define OMAP3_PRM_CLKSETUP_OFFSET	0x0098
+#define OMAP3430_PRM_CLKSETUP		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x0098)
+#define OMAP3_PRM_POLCTRL_OFFSET	0x009c
+#define OMAP3430_PRM_POLCTRL		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x009c)
+#define OMAP3_PRM_VOLTSETUP2_OFFSET	0x00a0
+#define OMAP3430_PRM_VOLTSETUP2		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00a0)
+#define OMAP3_PRM_VP1_CONFIG_OFFSET	0x00b0
+#define OMAP3430_PRM_VP1_CONFIG		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00b0)
+#define OMAP3_PRM_VP1_VSTEPMIN_OFFSET	0x00b4
+#define OMAP3430_PRM_VP1_VSTEPMIN	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00b4)
+#define OMAP3_PRM_VP1_VSTEPMAX_OFFSET	0x00b8
+#define OMAP3430_PRM_VP1_VSTEPMAX	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00b8)
+#define OMAP3_PRM_VP1_VLIMITTO_OFFSET	0x00bc
+#define OMAP3430_PRM_VP1_VLIMITTO	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00bc)
+#define OMAP3_PRM_VP1_VOLTAGE_OFFSET	0x00c0
+#define OMAP3430_PRM_VP1_VOLTAGE	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00c0)
+#define OMAP3_PRM_VP1_STATUS_OFFSET	0x00c4
+#define OMAP3430_PRM_VP1_STATUS		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00c4)
+#define OMAP3_PRM_VP2_CONFIG_OFFSET	0x00d0
+#define OMAP3430_PRM_VP2_CONFIG		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00d0)
+#define OMAP3_PRM_VP2_VSTEPMIN_OFFSET	0x00d4
+#define OMAP3430_PRM_VP2_VSTEPMIN	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00d4)
+#define OMAP3_PRM_VP2_VSTEPMAX_OFFSET	0x00d8
+#define OMAP3430_PRM_VP2_VSTEPMAX	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00d8)
+#define OMAP3_PRM_VP2_VLIMITTO_OFFSET	0x00dc
+#define OMAP3430_PRM_VP2_VLIMITTO	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00dc)
+#define OMAP3_PRM_VP2_VOLTAGE_OFFSET	0x00e0
+#define OMAP3430_PRM_VP2_VOLTAGE	OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00e0)
+#define OMAP3_PRM_VP2_STATUS_OFFSET	0x00e4
+#define OMAP3430_PRM_VP2_STATUS		OMAP34XX_PRM_REGADDR(OMAP3430_GR_MOD, 0x00e4)
+
+#define OMAP3_PRM_CLKSEL_OFFSET	0x0040
+#define OMAP3430_PRM_CLKSEL		OMAP34XX_PRM_REGADDR(OMAP3430_CCR_MOD, 0x0040)
+#define OMAP3_PRM_CLKOUT_CTRL_OFFSET	0x0070
+#define OMAP3430_PRM_CLKOUT_CTRL	OMAP34XX_PRM_REGADDR(OMAP3430_CCR_MOD, 0x0070)
+
+/*
+ * Module specific PRM register offsets from PRM_BASE + domain offset
+ *
+ * Use prm_{read,write}_mod_reg() with these registers.
+ *
+ * With a few exceptions, these are the register names beginning with
+ * {PM,RM}_* on both OMAP2/3 SoC families..  (The exceptions are the
+ * IRQSTATUS and IRQENABLE bits.)
+ */
+
+/* Register offsets appearing on both OMAP2 and OMAP3 */
+
+#define OMAP2_RM_RSTCTRL				0x0050
+#define OMAP2_RM_RSTTIME				0x0054
+#define OMAP2_RM_RSTST					0x0058
+#define OMAP2_PM_PWSTCTRL				0x00e0
+#define OMAP2_PM_PWSTST					0x00e4
+
+#define PM_WKEN						0x00a0
+#define PM_WKEN1					PM_WKEN
+#define PM_WKST						0x00b0
+#define PM_WKST1					PM_WKST
+#define PM_WKDEP					0x00c8
+#define PM_EVGENCTRL					0x00d4
+#define PM_EVGENONTIM					0x00d8
+#define PM_EVGENOFFTIM					0x00dc
+
+/* OMAP2xxx specific register offsets */
+#define OMAP24XX_PM_WKEN2				0x00a4
+#define OMAP24XX_PM_WKST2				0x00b4
+
+#define OMAP24XX_PRCM_IRQSTATUS_DSP			0x00f0	/* IVA mod */
+#define OMAP24XX_PRCM_IRQENABLE_DSP			0x00f4	/* IVA mod */
+#define OMAP24XX_PRCM_IRQSTATUS_IVA			0x00f8
+#define OMAP24XX_PRCM_IRQENABLE_IVA			0x00fc
+
+/* OMAP3 specific register offsets */
+#define OMAP3430ES2_PM_WKEN3				0x00f0
+#define OMAP3430ES2_PM_WKST3				0x00b8
+
+#define OMAP3430_PM_MPUGRPSEL				0x00a4
+#define OMAP3430_PM_MPUGRPSEL1				OMAP3430_PM_MPUGRPSEL
+#define OMAP3430ES2_PM_MPUGRPSEL3			0x00f8
+
+#define OMAP3430_PM_IVAGRPSEL				0x00a8
+#define OMAP3430_PM_IVAGRPSEL1				OMAP3430_PM_IVAGRPSEL
+#define OMAP3430ES2_PM_IVAGRPSEL3			0x00f4
+
+#define OMAP3430_PM_PREPWSTST				0x00e8
+
+#define OMAP3430_PRM_IRQSTATUS_IVA2			0x00f8
+#define OMAP3430_PRM_IRQENABLE_IVA2			0x00fc
+
+
+#ifndef __ASSEMBLER__
+/*
+ * Stub omap2xxx/omap3xxx functions so that common files
+ * continue to build when custom builds are used
+ */
+#if defined(CONFIG_ARCH_OMAP4) && !(defined(CONFIG_ARCH_OMAP2) ||	\
+					defined(CONFIG_ARCH_OMAP3))
+static inline u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+}
+static inline u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits,
+		s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+static inline int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift)
+{
+	WARN(1, "prm: omap2xxx/omap3xxx specific function and "
+		"not suppose to be used on omap4\n");
+	return 0;
+}
+#else
+/* Power/reset management domain register get/set */
+extern u32 omap2_prm_read_mod_reg(s16 module, u16 idx);
+extern void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx);
+extern u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
+extern u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx);
+extern u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx);
+extern u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask);
+
+/* These omap2_ PRM functions apply to both OMAP2 and 3 */
+extern int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift);
+extern int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift);
+extern int omap2_prm_deassert_hardreset(s16 prm_mod, u8 shift);
+
+#endif	/* CONFIG_ARCH_OMAP4 */
+#endif
+
+/*
+ * Bits common to specific registers
+ *
+ * The 3430 register and bit names are generally used,
+ * since they tend to make more sense
+ */
+
+/* PM_EVGENONTIM_MPU */
+/* Named PM_EVEGENONTIM_MPU on the 24XX */
+#define OMAP_ONTIMEVAL_SHIFT				0
+#define OMAP_ONTIMEVAL_MASK				(0xffffffff << 0)
+
+/* PM_EVGENOFFTIM_MPU */
+/* Named PM_EVEGENOFFTIM_MPU on the 24XX */
+#define OMAP_OFFTIMEVAL_SHIFT				0
+#define OMAP_OFFTIMEVAL_MASK				(0xffffffff << 0)
+
+/* PRM_CLKSETUP and PRCM_VOLTSETUP */
+/* Named PRCM_CLKSSETUP on the 24XX */
+#define OMAP_SETUP_TIME_SHIFT				0
+#define OMAP_SETUP_TIME_MASK				(0xffff << 0)
+
+/* PRM_CLKSRC_CTRL */
+/* Named PRCM_CLKSRC_CTRL on the 24XX */
+#define OMAP_SYSCLKDIV_SHIFT				6
+#define OMAP_SYSCLKDIV_MASK				(0x3 << 6)
+#define OMAP_AUTOEXTCLKMODE_SHIFT			3
+#define OMAP_AUTOEXTCLKMODE_MASK			(0x3 << 3)
+#define OMAP_SYSCLKSEL_SHIFT				0
+#define OMAP_SYSCLKSEL_MASK				(0x3 << 0)
+
+/* PM_EVGENCTRL_MPU */
+#define OMAP_OFFLOADMODE_SHIFT				3
+#define OMAP_OFFLOADMODE_MASK				(0x3 << 3)
+#define OMAP_ONLOADMODE_SHIFT				1
+#define OMAP_ONLOADMODE_MASK				(0x3 << 1)
+#define OMAP_ENABLE_MASK				(1 << 0)
+
+/* PRM_RSTTIME */
+/* Named RM_RSTTIME_WKUP on the 24xx */
+#define OMAP_RSTTIME2_SHIFT				8
+#define OMAP_RSTTIME2_MASK				(0x1f << 8)
+#define OMAP_RSTTIME1_SHIFT				0
+#define OMAP_RSTTIME1_MASK				(0xff << 0)
+
+/* PRM_RSTCTRL */
+/* Named RM_RSTCTRL_WKUP on the 24xx */
+/* 2420 calls RST_DPLL3 'RST_DPLL' */
+#define OMAP_RST_DPLL3_MASK				(1 << 2)
+#define OMAP_RST_GS_MASK				(1 << 1)
+
+
+/*
+ * Bits common to module-shared registers
+ *
+ * Not all registers of a particular type support all of these bits -
+ * check TRM if you are unsure
+ */
+
+/*
+ * 24XX: RM_RSTST_MPU and RM_RSTST_DSP - on 24XX, 'COREDOMAINWKUP_RST' is
+ *	 called 'COREWKUP_RST'
+ *
+ * 3430: RM_RSTST_IVA2, RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSS,
+ *	 RM_RSTST_CAM, RM_RSTST_PER, RM_RSTST_NEON
+ */
+#define OMAP_COREDOMAINWKUP_RST_MASK			(1 << 3)
+
+/*
+ * 24XX: RM_RSTST_MPU, RM_RSTST_GFX, RM_RSTST_DSP
+ *
+ * 2430: RM_RSTST_MDM
+ *
+ * 3430: RM_RSTST_CORE, RM_RSTST_EMU
+ */
+#define OMAP_DOMAINWKUP_RST_MASK			(1 << 2)
+
+/*
+ * 24XX: RM_RSTST_MPU, RM_RSTST_WKUP, RM_RSTST_DSP
+ *	 On 24XX, 'GLOBALWARM_RST' is called 'GLOBALWMPU_RST'.
+ *
+ * 2430: RM_RSTST_MDM
+ *
+ * 3430: RM_RSTST_CORE, RM_RSTST_EMU
+ */
+#define OMAP_GLOBALWARM_RST_MASK			(1 << 1)
+#define OMAP_GLOBALCOLD_RST_MASK			(1 << 0)
+
+/*
+ * 24XX: PM_WKDEP_GFX, PM_WKDEP_MPU, PM_WKDEP_CORE, PM_WKDEP_DSP
+ *	 2420 TRM sometimes uses "EN_WAKEUP" instead of "EN_WKUP"
+ *
+ * 2430: PM_WKDEP_MDM
+ *
+ * 3430: PM_WKDEP_IVA2, PM_WKDEP_GFX, PM_WKDEP_DSS, PM_WKDEP_CAM,
+ *	 PM_WKDEP_PER
+ */
+#define OMAP_EN_WKUP_SHIFT				4
+#define OMAP_EN_WKUP_MASK				(1 << 4)
+
+/*
+ * 24XX: PM_PWSTCTRL_MPU, PM_PWSTCTRL_CORE, PM_PWSTCTRL_GFX,
+ *	 PM_PWSTCTRL_DSP
+ *
+ * 2430: PM_PWSTCTRL_MDM
+ *
+ * 3430: PM_PWSTCTRL_IVA2, PM_PWSTCTRL_CORE, PM_PWSTCTRL_GFX,
+ *	 PM_PWSTCTRL_DSS, PM_PWSTCTRL_CAM, PM_PWSTCTRL_PER,
+ *	 PM_PWSTCTRL_NEON
+ */
+#define OMAP_LOGICRETSTATE_MASK				(1 << 2)
+
+
+/*
+ * MAX_MODULE_HARDRESET_WAIT: Maximum microseconds to wait for an OMAP
+ * submodule to exit hardreset
+ */
+#define MAX_MODULE_HARDRESET_WAIT		10000
+
+
+#endif
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index a1ff918..a2a04bf 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -15,12 +15,13 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/err.h>
+#include <linux/io.h>
 
 #include <plat/common.h>
 #include <plat/cpu.h>
 #include <plat/prcm.h>
 
-#include "prm.h"
+#include "prm44xx.h"
 #include "prm-regbits-44xx.h"
 
 /*
@@ -29,6 +30,70 @@
  */
 #define OMAP4_RST_CTRL_ST_OFFSET		4
 
+/* PRM low-level functions */
+
+/* Read a register in a CM/PRM instance in the PRM module */
+u32 omap4_prm_read_inst_reg(s16 inst, u16 reg)
+{
+	return __raw_readl(OMAP44XX_PRM_REGADDR(inst, reg));
+}
+
+/* Write into a register in a CM/PRM instance in the PRM module */
+void omap4_prm_write_inst_reg(u32 val, s16 inst, u16 reg)
+{
+	__raw_writel(val, OMAP44XX_PRM_REGADDR(inst, reg));
+}
+
+/* Read-modify-write a register in a PRM module. Caller must lock */
+u32 omap4_prm_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 reg)
+{
+	u32 v;
+
+	v = omap4_prm_read_inst_reg(inst, reg);
+	v &= ~mask;
+	v |= bits;
+	omap4_prm_write_inst_reg(v, inst, reg);
+
+	return v;
+}
+
+/* Read a PRM register, AND it, and shift the result down to bit 0 */
+/* XXX deprecated */
+u32 omap4_prm_read_bits_shift(void __iomem *reg, u32 mask)
+{
+	u32 v;
+
+	v = __raw_readl(reg);
+	v &= mask;
+	v >>= __ffs(mask);
+
+	return v;
+}
+
+/* Read-modify-write a register in a PRM module. Caller must lock */
+/* XXX deprecated */
+u32 omap4_prm_rmw_reg_bits(u32 mask, u32 bits, void __iomem *reg)
+{
+	u32 v;
+
+	v = __raw_readl(reg);
+	v &= ~mask;
+	v |= bits;
+	__raw_writel(v, reg);
+
+	return v;
+}
+
+u32 omap4_prm_set_inst_reg_bits(u32 bits, s16 inst, s16 reg)
+{
+	return omap4_prm_rmw_inst_reg_bits(bits, bits, inst, reg);
+}
+
+u32 omap4_prm_clear_inst_reg_bits(u32 bits, s16 inst, s16 reg)
+{
+	return omap4_prm_rmw_inst_reg_bits(bits, 0x0, inst, reg);
+}
+
 /**
  * omap4_prm_is_hardreset_asserted - read the HW reset line state of
  * submodules contained in the hwmod module
@@ -114,3 +179,17 @@
 	return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
 }
 
+void omap4_prm_global_warm_sw_reset(void)
+{
+	u32 v;
+
+	v = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+				    OMAP4_RM_RSTCTRL);
+	v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
+	omap4_prm_write_inst_reg(v, OMAP4430_PRM_DEVICE_INST,
+				 OMAP4_RM_RSTCTRL);
+
+	/* OCP barrier */
+	v = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+				    OMAP4_RM_RSTCTRL);
+}
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index 59839db..67a0d3f 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -17,736 +17,762 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * XXX This file needs to be updated to align on one of "OMAP4", "OMAP44XX",
+ *     or "OMAP4430".
  */
 
 #ifndef __ARCH_ARM_MACH_OMAP2_PRM44XX_H
 #define __ARCH_ARM_MACH_OMAP2_PRM44XX_H
 
+#include "prcm-common.h"
+#include "prm.h"
+
+#define OMAP4430_PRM_BASE		0x4a306000
+
+#define OMAP44XX_PRM_REGADDR(inst, reg)				\
+	OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE +	(inst) + (reg))
+
+
+/* PRM instances */
+#define OMAP4430_PRM_OCP_SOCKET_INST	0x0000
+#define OMAP4430_PRM_CKGEN_INST		0x0100
+#define OMAP4430_PRM_MPU_INST		0x0300
+#define OMAP4430_PRM_TESLA_INST		0x0400
+#define OMAP4430_PRM_ABE_INST		0x0500
+#define OMAP4430_PRM_ALWAYS_ON_INST	0x0600
+#define OMAP4430_PRM_CORE_INST		0x0700
+#define OMAP4430_PRM_IVAHD_INST		0x0f00
+#define OMAP4430_PRM_CAM_INST		0x1000
+#define OMAP4430_PRM_DSS_INST		0x1100
+#define OMAP4430_PRM_GFX_INST		0x1200
+#define OMAP4430_PRM_L3INIT_INST		0x1300
+#define OMAP4430_PRM_L4PER_INST		0x1400
+#define OMAP4430_PRM_CEFUSE_INST		0x1600
+#define OMAP4430_PRM_WKUP_INST		0x1700
+#define OMAP4430_PRM_WKUP_CM_INST	0x1800
+#define OMAP4430_PRM_EMU_INST		0x1900
+#define OMAP4430_PRM_EMU_CM_INST		0x1a00
+#define OMAP4430_PRM_DEVICE_INST		0x1b00
+#define OMAP4430_PRM_INSTR_INST		0x1f00
+
+/* PRM clockdomain register offsets (from instance start) */
+#define OMAP4430_PRM_MPU_MPU_CDOFFS		0x0000
+#define OMAP4430_PRM_TESLA_TESLA_CDOFFS		0x0000
+#define OMAP4430_PRM_ABE_ABE_CDOFFS		0x0000
+#define OMAP4430_PRM_CORE_CORE_CDOFFS		0x0000
+#define OMAP4430_PRM_IVAHD_IVAHD_CDOFFS		0x0000
+#define OMAP4430_PRM_CAM_CAM_CDOFFS		0x0000
+#define OMAP4430_PRM_DSS_DSS_CDOFFS		0x0000
+#define OMAP4430_PRM_GFX_GFX_CDOFFS		0x0000
+#define OMAP4430_PRM_L3INIT_L3INIT_CDOFFS	0x0000
+#define OMAP4430_PRM_L4PER_L4PER_CDOFFS		0x0000
+#define OMAP4430_PRM_CEFUSE_CEFUSE_CDOFFS	0x0000
+#define OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS	0x0000
+#define OMAP4430_PRM_EMU_EMU_CDOFFS		0x0000
+#define OMAP4430_PRM_EMU_CM_EMU_CDOFFS		0x0000
+
+/* OMAP4 specific register offsets */
+#define OMAP4_RM_RSTCTRL				0x0000
+#define OMAP4_RM_RSTTIME				0x0004
+#define OMAP4_RM_RSTST					0x0008
+#define OMAP4_PM_PWSTCTRL				0x0000
+#define OMAP4_PM_PWSTST					0x0004
+
 
 /* PRM */
 
 /* PRM.OCP_SOCKET_PRM register offsets */
 #define OMAP4_REVISION_PRM_OFFSET			0x0000
-#define OMAP4430_REVISION_PRM				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4430_REVISION_PRM				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0000)
 #define OMAP4_PRM_IRQSTATUS_MPU_OFFSET			0x0010
-#define OMAP4430_PRM_IRQSTATUS_MPU			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0010)
+#define OMAP4430_PRM_IRQSTATUS_MPU			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0010)
 #define OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET		0x0014
-#define OMAP4430_PRM_IRQSTATUS_MPU_2			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0014)
+#define OMAP4430_PRM_IRQSTATUS_MPU_2			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0014)
 #define OMAP4_PRM_IRQENABLE_MPU_OFFSET			0x0018
-#define OMAP4430_PRM_IRQENABLE_MPU			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0018)
+#define OMAP4430_PRM_IRQENABLE_MPU			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0018)
 #define OMAP4_PRM_IRQENABLE_MPU_2_OFFSET		0x001c
-#define OMAP4430_PRM_IRQENABLE_MPU_2			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x001c)
+#define OMAP4430_PRM_IRQENABLE_MPU_2			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x001c)
 #define OMAP4_PRM_IRQSTATUS_DUCATI_OFFSET		0x0020
-#define OMAP4430_PRM_IRQSTATUS_DUCATI			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0020)
+#define OMAP4430_PRM_IRQSTATUS_DUCATI			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0020)
 #define OMAP4_PRM_IRQENABLE_DUCATI_OFFSET		0x0028
-#define OMAP4430_PRM_IRQENABLE_DUCATI			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0028)
+#define OMAP4430_PRM_IRQENABLE_DUCATI			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0028)
 #define OMAP4_PRM_IRQSTATUS_TESLA_OFFSET		0x0030
-#define OMAP4430_PRM_IRQSTATUS_TESLA			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0030)
+#define OMAP4430_PRM_IRQSTATUS_TESLA			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0030)
 #define OMAP4_PRM_IRQENABLE_TESLA_OFFSET		0x0038
-#define OMAP4430_PRM_IRQENABLE_TESLA			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0038)
+#define OMAP4430_PRM_IRQENABLE_TESLA			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0038)
 #define OMAP4_CM_PRM_PROFILING_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM_PRM_PROFILING_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0040)
+#define OMAP4430_CM_PRM_PROFILING_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0040)
 
 /* PRM.CKGEN_PRM register offsets */
 #define OMAP4_CM_ABE_DSS_SYS_CLKSEL_OFFSET		0x0000
-#define OMAP4430_CM_ABE_DSS_SYS_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0000)
+#define OMAP4430_CM_ABE_DSS_SYS_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_INST, 0x0000)
 #define OMAP4_CM_L4_WKUP_CLKSEL_OFFSET			0x0008
-#define OMAP4430_CM_L4_WKUP_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0008)
+#define OMAP4430_CM_L4_WKUP_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_INST, 0x0008)
 #define OMAP4_CM_ABE_PLL_REF_CLKSEL_OFFSET		0x000c
-#define OMAP4430_CM_ABE_PLL_REF_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x000c)
+#define OMAP4430_CM_ABE_PLL_REF_CLKSEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_INST, 0x000c)
 #define OMAP4_CM_SYS_CLKSEL_OFFSET			0x0010
-#define OMAP4430_CM_SYS_CLKSEL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0010)
+#define OMAP4430_CM_SYS_CLKSEL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_INST, 0x0010)
 
 /* PRM.MPU_PRM register offsets */
 #define OMAP4_PM_MPU_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_MPU_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0000)
+#define OMAP4430_PM_MPU_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_INST, 0x0000)
 #define OMAP4_PM_MPU_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_MPU_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0004)
+#define OMAP4430_PM_MPU_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_INST, 0x0004)
 #define OMAP4_RM_MPU_RSTST_OFFSET			0x0014
-#define OMAP4430_RM_MPU_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0014)
+#define OMAP4430_RM_MPU_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_INST, 0x0014)
 #define OMAP4_RM_MPU_MPU_CONTEXT_OFFSET			0x0024
-#define OMAP4430_RM_MPU_MPU_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0024)
+#define OMAP4430_RM_MPU_MPU_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_INST, 0x0024)
 
 /* PRM.TESLA_PRM register offsets */
 #define OMAP4_PM_TESLA_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_TESLA_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0000)
+#define OMAP4430_PM_TESLA_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_INST, 0x0000)
 #define OMAP4_PM_TESLA_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_TESLA_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0004)
+#define OMAP4430_PM_TESLA_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_INST, 0x0004)
 #define OMAP4_RM_TESLA_RSTCTRL_OFFSET			0x0010
-#define OMAP4430_RM_TESLA_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0010)
+#define OMAP4430_RM_TESLA_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_INST, 0x0010)
 #define OMAP4_RM_TESLA_RSTST_OFFSET			0x0014
-#define OMAP4430_RM_TESLA_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0014)
+#define OMAP4430_RM_TESLA_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_INST, 0x0014)
 #define OMAP4_RM_TESLA_TESLA_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_TESLA_TESLA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0024)
+#define OMAP4430_RM_TESLA_TESLA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_INST, 0x0024)
 
 /* PRM.ABE_PRM register offsets */
 #define OMAP4_PM_ABE_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_ABE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0000)
+#define OMAP4430_PM_ABE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0000)
 #define OMAP4_PM_ABE_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_ABE_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0004)
+#define OMAP4430_PM_ABE_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0004)
 #define OMAP4_RM_ABE_AESS_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_ABE_AESS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x002c)
+#define OMAP4430_RM_ABE_AESS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x002c)
 #define OMAP4_PM_ABE_PDM_WKDEP_OFFSET			0x0030
-#define OMAP4430_PM_ABE_PDM_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0030)
+#define OMAP4430_PM_ABE_PDM_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0030)
 #define OMAP4_RM_ABE_PDM_CONTEXT_OFFSET			0x0034
-#define OMAP4430_RM_ABE_PDM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0034)
+#define OMAP4430_RM_ABE_PDM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0034)
 #define OMAP4_PM_ABE_DMIC_WKDEP_OFFSET			0x0038
-#define OMAP4430_PM_ABE_DMIC_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0038)
+#define OMAP4430_PM_ABE_DMIC_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0038)
 #define OMAP4_RM_ABE_DMIC_CONTEXT_OFFSET		0x003c
-#define OMAP4430_RM_ABE_DMIC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x003c)
+#define OMAP4430_RM_ABE_DMIC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x003c)
 #define OMAP4_PM_ABE_MCASP_WKDEP_OFFSET			0x0040
-#define OMAP4430_PM_ABE_MCASP_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0040)
+#define OMAP4430_PM_ABE_MCASP_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0040)
 #define OMAP4_RM_ABE_MCASP_CONTEXT_OFFSET		0x0044
-#define OMAP4430_RM_ABE_MCASP_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0044)
+#define OMAP4430_RM_ABE_MCASP_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0044)
 #define OMAP4_PM_ABE_MCBSP1_WKDEP_OFFSET		0x0048
-#define OMAP4430_PM_ABE_MCBSP1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0048)
+#define OMAP4430_PM_ABE_MCBSP1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0048)
 #define OMAP4_RM_ABE_MCBSP1_CONTEXT_OFFSET		0x004c
-#define OMAP4430_RM_ABE_MCBSP1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x004c)
+#define OMAP4430_RM_ABE_MCBSP1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x004c)
 #define OMAP4_PM_ABE_MCBSP2_WKDEP_OFFSET		0x0050
-#define OMAP4430_PM_ABE_MCBSP2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0050)
+#define OMAP4430_PM_ABE_MCBSP2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0050)
 #define OMAP4_RM_ABE_MCBSP2_CONTEXT_OFFSET		0x0054
-#define OMAP4430_RM_ABE_MCBSP2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0054)
+#define OMAP4430_RM_ABE_MCBSP2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0054)
 #define OMAP4_PM_ABE_MCBSP3_WKDEP_OFFSET		0x0058
-#define OMAP4430_PM_ABE_MCBSP3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0058)
+#define OMAP4430_PM_ABE_MCBSP3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0058)
 #define OMAP4_RM_ABE_MCBSP3_CONTEXT_OFFSET		0x005c
-#define OMAP4430_RM_ABE_MCBSP3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x005c)
+#define OMAP4430_RM_ABE_MCBSP3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x005c)
 #define OMAP4_PM_ABE_SLIMBUS_WKDEP_OFFSET		0x0060
-#define OMAP4430_PM_ABE_SLIMBUS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0060)
+#define OMAP4430_PM_ABE_SLIMBUS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0060)
 #define OMAP4_RM_ABE_SLIMBUS_CONTEXT_OFFSET		0x0064
-#define OMAP4430_RM_ABE_SLIMBUS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0064)
+#define OMAP4430_RM_ABE_SLIMBUS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0064)
 #define OMAP4_PM_ABE_TIMER5_WKDEP_OFFSET		0x0068
-#define OMAP4430_PM_ABE_TIMER5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0068)
+#define OMAP4430_PM_ABE_TIMER5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0068)
 #define OMAP4_RM_ABE_TIMER5_CONTEXT_OFFSET		0x006c
-#define OMAP4430_RM_ABE_TIMER5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x006c)
+#define OMAP4430_RM_ABE_TIMER5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x006c)
 #define OMAP4_PM_ABE_TIMER6_WKDEP_OFFSET		0x0070
-#define OMAP4430_PM_ABE_TIMER6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0070)
+#define OMAP4430_PM_ABE_TIMER6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0070)
 #define OMAP4_RM_ABE_TIMER6_CONTEXT_OFFSET		0x0074
-#define OMAP4430_RM_ABE_TIMER6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0074)
+#define OMAP4430_RM_ABE_TIMER6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0074)
 #define OMAP4_PM_ABE_TIMER7_WKDEP_OFFSET		0x0078
-#define OMAP4430_PM_ABE_TIMER7_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0078)
+#define OMAP4430_PM_ABE_TIMER7_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0078)
 #define OMAP4_RM_ABE_TIMER7_CONTEXT_OFFSET		0x007c
-#define OMAP4430_RM_ABE_TIMER7_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x007c)
+#define OMAP4430_RM_ABE_TIMER7_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x007c)
 #define OMAP4_PM_ABE_TIMER8_WKDEP_OFFSET		0x0080
-#define OMAP4430_PM_ABE_TIMER8_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0080)
+#define OMAP4430_PM_ABE_TIMER8_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0080)
 #define OMAP4_RM_ABE_TIMER8_CONTEXT_OFFSET		0x0084
-#define OMAP4430_RM_ABE_TIMER8_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0084)
+#define OMAP4430_RM_ABE_TIMER8_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0084)
 #define OMAP4_PM_ABE_WDT3_WKDEP_OFFSET			0x0088
-#define OMAP4430_PM_ABE_WDT3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0088)
+#define OMAP4430_PM_ABE_WDT3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x0088)
 #define OMAP4_RM_ABE_WDT3_CONTEXT_OFFSET		0x008c
-#define OMAP4430_RM_ABE_WDT3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x008c)
+#define OMAP4430_RM_ABE_WDT3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_INST, 0x008c)
 
 /* PRM.ALWAYS_ON_PRM register offsets */
 #define OMAP4_RM_ALWON_MDMINTC_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_ALWON_MDMINTC_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0024)
+#define OMAP4430_RM_ALWON_MDMINTC_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_INST, 0x0024)
 #define OMAP4_PM_ALWON_SR_MPU_WKDEP_OFFSET		0x0028
-#define OMAP4430_PM_ALWON_SR_MPU_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0028)
+#define OMAP4430_PM_ALWON_SR_MPU_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_INST, 0x0028)
 #define OMAP4_RM_ALWON_SR_MPU_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_ALWON_SR_MPU_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x002c)
+#define OMAP4430_RM_ALWON_SR_MPU_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_INST, 0x002c)
 #define OMAP4_PM_ALWON_SR_IVA_WKDEP_OFFSET		0x0030
-#define OMAP4430_PM_ALWON_SR_IVA_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0030)
+#define OMAP4430_PM_ALWON_SR_IVA_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_INST, 0x0030)
 #define OMAP4_RM_ALWON_SR_IVA_CONTEXT_OFFSET		0x0034
-#define OMAP4430_RM_ALWON_SR_IVA_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0034)
+#define OMAP4430_RM_ALWON_SR_IVA_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_INST, 0x0034)
 #define OMAP4_PM_ALWON_SR_CORE_WKDEP_OFFSET		0x0038
-#define OMAP4430_PM_ALWON_SR_CORE_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0038)
+#define OMAP4430_PM_ALWON_SR_CORE_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_INST, 0x0038)
 #define OMAP4_RM_ALWON_SR_CORE_CONTEXT_OFFSET		0x003c
-#define OMAP4430_RM_ALWON_SR_CORE_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x003c)
+#define OMAP4430_RM_ALWON_SR_CORE_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_INST, 0x003c)
 
 /* PRM.CORE_PRM register offsets */
 #define OMAP4_PM_CORE_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_CORE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0000)
+#define OMAP4430_PM_CORE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0000)
 #define OMAP4_PM_CORE_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_CORE_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0004)
+#define OMAP4430_PM_CORE_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0004)
 #define OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_L3_1_L3_1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0024)
+#define OMAP4430_RM_L3_1_L3_1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0024)
 #define OMAP4_RM_L3_2_L3_2_CONTEXT_OFFSET		0x0124
-#define OMAP4430_RM_L3_2_L3_2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0124)
+#define OMAP4430_RM_L3_2_L3_2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0124)
 #define OMAP4_RM_L3_2_GPMC_CONTEXT_OFFSET		0x012c
-#define OMAP4430_RM_L3_2_GPMC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x012c)
+#define OMAP4430_RM_L3_2_GPMC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x012c)
 #define OMAP4_RM_L3_2_OCMC_RAM_CONTEXT_OFFSET		0x0134
-#define OMAP4430_RM_L3_2_OCMC_RAM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0134)
+#define OMAP4430_RM_L3_2_OCMC_RAM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0134)
 #define OMAP4_RM_DUCATI_RSTCTRL_OFFSET			0x0210
-#define OMAP4430_RM_DUCATI_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0210)
+#define OMAP4430_RM_DUCATI_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0210)
 #define OMAP4_RM_DUCATI_RSTST_OFFSET			0x0214
-#define OMAP4430_RM_DUCATI_RSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0214)
+#define OMAP4430_RM_DUCATI_RSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0214)
 #define OMAP4_RM_DUCATI_DUCATI_CONTEXT_OFFSET		0x0224
-#define OMAP4430_RM_DUCATI_DUCATI_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0224)
+#define OMAP4430_RM_DUCATI_DUCATI_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0224)
 #define OMAP4_RM_SDMA_SDMA_CONTEXT_OFFSET		0x0324
-#define OMAP4430_RM_SDMA_SDMA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0324)
+#define OMAP4430_RM_SDMA_SDMA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0324)
 #define OMAP4_RM_MEMIF_DMM_CONTEXT_OFFSET		0x0424
-#define OMAP4430_RM_MEMIF_DMM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0424)
+#define OMAP4430_RM_MEMIF_DMM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0424)
 #define OMAP4_RM_MEMIF_EMIF_FW_CONTEXT_OFFSET		0x042c
-#define OMAP4430_RM_MEMIF_EMIF_FW_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x042c)
+#define OMAP4430_RM_MEMIF_EMIF_FW_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x042c)
 #define OMAP4_RM_MEMIF_EMIF_1_CONTEXT_OFFSET		0x0434
-#define OMAP4430_RM_MEMIF_EMIF_1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0434)
+#define OMAP4430_RM_MEMIF_EMIF_1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0434)
 #define OMAP4_RM_MEMIF_EMIF_2_CONTEXT_OFFSET		0x043c
-#define OMAP4430_RM_MEMIF_EMIF_2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x043c)
+#define OMAP4430_RM_MEMIF_EMIF_2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x043c)
 #define OMAP4_RM_MEMIF_DLL_CONTEXT_OFFSET		0x0444
-#define OMAP4430_RM_MEMIF_DLL_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0444)
+#define OMAP4430_RM_MEMIF_DLL_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0444)
 #define OMAP4_RM_MEMIF_EMIF_H1_CONTEXT_OFFSET		0x0454
-#define OMAP4430_RM_MEMIF_EMIF_H1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0454)
+#define OMAP4430_RM_MEMIF_EMIF_H1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0454)
 #define OMAP4_RM_MEMIF_EMIF_H2_CONTEXT_OFFSET		0x045c
-#define OMAP4430_RM_MEMIF_EMIF_H2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x045c)
+#define OMAP4430_RM_MEMIF_EMIF_H2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x045c)
 #define OMAP4_RM_MEMIF_DLL_H_CONTEXT_OFFSET		0x0464
-#define OMAP4430_RM_MEMIF_DLL_H_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0464)
+#define OMAP4430_RM_MEMIF_DLL_H_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0464)
 #define OMAP4_RM_D2D_SAD2D_CONTEXT_OFFSET		0x0524
-#define OMAP4430_RM_D2D_SAD2D_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0524)
-#define OMAP4_RM_D2D_MODEM_ICR_CONTEXT_OFFSET		0x052c
-#define OMAP4430_RM_D2D_MODEM_ICR_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x052c)
+#define OMAP4430_RM_D2D_SAD2D_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0524)
+#define OMAP4_RM_D2D_INSTEM_ICR_CONTEXT_OFFSET		0x052c
+#define OMAP4430_RM_D2D_INSTEM_ICR_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x052c)
 #define OMAP4_RM_D2D_SAD2D_FW_CONTEXT_OFFSET		0x0534
-#define OMAP4430_RM_D2D_SAD2D_FW_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0534)
+#define OMAP4430_RM_D2D_SAD2D_FW_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0534)
 #define OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET		0x0624
-#define OMAP4430_RM_L4CFG_L4_CFG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0624)
+#define OMAP4430_RM_L4CFG_L4_CFG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0624)
 #define OMAP4_RM_L4CFG_HW_SEM_CONTEXT_OFFSET		0x062c
-#define OMAP4430_RM_L4CFG_HW_SEM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x062c)
+#define OMAP4430_RM_L4CFG_HW_SEM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x062c)
 #define OMAP4_RM_L4CFG_MAILBOX_CONTEXT_OFFSET		0x0634
-#define OMAP4430_RM_L4CFG_MAILBOX_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0634)
+#define OMAP4430_RM_L4CFG_MAILBOX_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0634)
 #define OMAP4_RM_L4CFG_SAR_ROM_CONTEXT_OFFSET		0x063c
-#define OMAP4430_RM_L4CFG_SAR_ROM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x063c)
+#define OMAP4430_RM_L4CFG_SAR_ROM_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x063c)
 #define OMAP4_RM_L3INSTR_L3_3_CONTEXT_OFFSET		0x0724
-#define OMAP4430_RM_L3INSTR_L3_3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0724)
+#define OMAP4430_RM_L3INSTR_L3_3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0724)
 #define OMAP4_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET	0x072c
-#define OMAP4430_RM_L3INSTR_L3_INSTR_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x072c)
+#define OMAP4430_RM_L3INSTR_L3_INSTR_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x072c)
 #define OMAP4_RM_L3INSTR_OCP_WP1_CONTEXT_OFFSET		0x0744
-#define OMAP4430_RM_L3INSTR_OCP_WP1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0744)
+#define OMAP4430_RM_L3INSTR_OCP_WP1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_INST, 0x0744)
 
 /* PRM.IVAHD_PRM register offsets */
 #define OMAP4_PM_IVAHD_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_IVAHD_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0000)
+#define OMAP4430_PM_IVAHD_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_INST, 0x0000)
 #define OMAP4_PM_IVAHD_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_IVAHD_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0004)
+#define OMAP4430_PM_IVAHD_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_INST, 0x0004)
 #define OMAP4_RM_IVAHD_RSTCTRL_OFFSET			0x0010
-#define OMAP4430_RM_IVAHD_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0010)
+#define OMAP4430_RM_IVAHD_RSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_INST, 0x0010)
 #define OMAP4_RM_IVAHD_RSTST_OFFSET			0x0014
-#define OMAP4430_RM_IVAHD_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0014)
+#define OMAP4430_RM_IVAHD_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_INST, 0x0014)
 #define OMAP4_RM_IVAHD_IVAHD_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_IVAHD_IVAHD_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0024)
+#define OMAP4430_RM_IVAHD_IVAHD_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_INST, 0x0024)
 #define OMAP4_RM_IVAHD_SL2_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_IVAHD_SL2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x002c)
+#define OMAP4430_RM_IVAHD_SL2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_INST, 0x002c)
 
 /* PRM.CAM_PRM register offsets */
 #define OMAP4_PM_CAM_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_CAM_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0000)
+#define OMAP4430_PM_CAM_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_INST, 0x0000)
 #define OMAP4_PM_CAM_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_CAM_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0004)
+#define OMAP4430_PM_CAM_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_INST, 0x0004)
 #define OMAP4_RM_CAM_ISS_CONTEXT_OFFSET			0x0024
-#define OMAP4430_RM_CAM_ISS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0024)
+#define OMAP4430_RM_CAM_ISS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_INST, 0x0024)
 #define OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_CAM_FDIF_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x002c)
+#define OMAP4430_RM_CAM_FDIF_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_INST, 0x002c)
 
 /* PRM.DSS_PRM register offsets */
 #define OMAP4_PM_DSS_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_DSS_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0000)
+#define OMAP4430_PM_DSS_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_INST, 0x0000)
 #define OMAP4_PM_DSS_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_DSS_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0004)
+#define OMAP4430_PM_DSS_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_INST, 0x0004)
 #define OMAP4_PM_DSS_DSS_WKDEP_OFFSET			0x0020
-#define OMAP4430_PM_DSS_DSS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0020)
+#define OMAP4430_PM_DSS_DSS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_INST, 0x0020)
 #define OMAP4_RM_DSS_DSS_CONTEXT_OFFSET			0x0024
-#define OMAP4430_RM_DSS_DSS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0024)
+#define OMAP4430_RM_DSS_DSS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_INST, 0x0024)
 #define OMAP4_RM_DSS_DEISS_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_DSS_DEISS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x002c)
+#define OMAP4430_RM_DSS_DEISS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_INST, 0x002c)
 
 /* PRM.GFX_PRM register offsets */
 #define OMAP4_PM_GFX_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_GFX_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0000)
+#define OMAP4430_PM_GFX_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_INST, 0x0000)
 #define OMAP4_PM_GFX_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_GFX_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0004)
+#define OMAP4430_PM_GFX_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_INST, 0x0004)
 #define OMAP4_RM_GFX_GFX_CONTEXT_OFFSET			0x0024
-#define OMAP4430_RM_GFX_GFX_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0024)
+#define OMAP4430_RM_GFX_GFX_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_INST, 0x0024)
 
 /* PRM.L3INIT_PRM register offsets */
 #define OMAP4_PM_L3INIT_PWRSTCTRL_OFFSET		0x0000
-#define OMAP4430_PM_L3INIT_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0000)
+#define OMAP4430_PM_L3INIT_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0000)
 #define OMAP4_PM_L3INIT_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_L3INIT_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0004)
+#define OMAP4430_PM_L3INIT_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0004)
 #define OMAP4_PM_L3INIT_MMC1_WKDEP_OFFSET		0x0028
-#define OMAP4430_PM_L3INIT_MMC1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0028)
+#define OMAP4430_PM_L3INIT_MMC1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0028)
 #define OMAP4_RM_L3INIT_MMC1_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_L3INIT_MMC1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x002c)
+#define OMAP4430_RM_L3INIT_MMC1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x002c)
 #define OMAP4_PM_L3INIT_MMC2_WKDEP_OFFSET		0x0030
-#define OMAP4430_PM_L3INIT_MMC2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0030)
+#define OMAP4430_PM_L3INIT_MMC2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0030)
 #define OMAP4_RM_L3INIT_MMC2_CONTEXT_OFFSET		0x0034
-#define OMAP4430_RM_L3INIT_MMC2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0034)
+#define OMAP4430_RM_L3INIT_MMC2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0034)
 #define OMAP4_PM_L3INIT_HSI_WKDEP_OFFSET		0x0038
-#define OMAP4430_PM_L3INIT_HSI_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0038)
+#define OMAP4430_PM_L3INIT_HSI_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0038)
 #define OMAP4_RM_L3INIT_HSI_CONTEXT_OFFSET		0x003c
-#define OMAP4430_RM_L3INIT_HSI_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x003c)
+#define OMAP4430_RM_L3INIT_HSI_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x003c)
 #define OMAP4_PM_L3INIT_UNIPRO1_WKDEP_OFFSET		0x0040
-#define OMAP4430_PM_L3INIT_UNIPRO1_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0040)
+#define OMAP4430_PM_L3INIT_UNIPRO1_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0040)
 #define OMAP4_RM_L3INIT_UNIPRO1_CONTEXT_OFFSET		0x0044
-#define OMAP4430_RM_L3INIT_UNIPRO1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0044)
+#define OMAP4430_RM_L3INIT_UNIPRO1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0044)
 #define OMAP4_PM_L3INIT_USB_HOST_WKDEP_OFFSET		0x0058
-#define OMAP4430_PM_L3INIT_USB_HOST_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0058)
+#define OMAP4430_PM_L3INIT_USB_HOST_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0058)
 #define OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET		0x005c
-#define OMAP4430_RM_L3INIT_USB_HOST_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x005c)
+#define OMAP4430_RM_L3INIT_USB_HOST_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x005c)
 #define OMAP4_PM_L3INIT_USB_OTG_WKDEP_OFFSET		0x0060
-#define OMAP4430_PM_L3INIT_USB_OTG_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0060)
+#define OMAP4430_PM_L3INIT_USB_OTG_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0060)
 #define OMAP4_RM_L3INIT_USB_OTG_CONTEXT_OFFSET		0x0064
-#define OMAP4430_RM_L3INIT_USB_OTG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0064)
+#define OMAP4430_RM_L3INIT_USB_OTG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0064)
 #define OMAP4_PM_L3INIT_USB_TLL_WKDEP_OFFSET		0x0068
-#define OMAP4430_PM_L3INIT_USB_TLL_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0068)
+#define OMAP4430_PM_L3INIT_USB_TLL_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0068)
 #define OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET		0x006c
-#define OMAP4430_RM_L3INIT_USB_TLL_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x006c)
+#define OMAP4430_RM_L3INIT_USB_TLL_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x006c)
 #define OMAP4_RM_L3INIT_P1500_CONTEXT_OFFSET		0x007c
-#define OMAP4430_RM_L3INIT_P1500_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x007c)
+#define OMAP4430_RM_L3INIT_P1500_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x007c)
 #define OMAP4_RM_L3INIT_EMAC_CONTEXT_OFFSET		0x0084
-#define OMAP4430_RM_L3INIT_EMAC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0084)
+#define OMAP4430_RM_L3INIT_EMAC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0084)
 #define OMAP4_PM_L3INIT_SATA_WKDEP_OFFSET		0x0088
-#define OMAP4430_PM_L3INIT_SATA_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0088)
+#define OMAP4430_PM_L3INIT_SATA_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0088)
 #define OMAP4_RM_L3INIT_SATA_CONTEXT_OFFSET		0x008c
-#define OMAP4430_RM_L3INIT_SATA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x008c)
+#define OMAP4430_RM_L3INIT_SATA_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x008c)
 #define OMAP4_RM_L3INIT_TPPSS_CONTEXT_OFFSET		0x0094
-#define OMAP4430_RM_L3INIT_TPPSS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0094)
+#define OMAP4430_RM_L3INIT_TPPSS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0094)
 #define OMAP4_PM_L3INIT_PCIESS_WKDEP_OFFSET		0x0098
-#define OMAP4430_PM_L3INIT_PCIESS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0098)
+#define OMAP4430_PM_L3INIT_PCIESS_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x0098)
 #define OMAP4_RM_L3INIT_PCIESS_CONTEXT_OFFSET		0x009c
-#define OMAP4430_RM_L3INIT_PCIESS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x009c)
+#define OMAP4430_RM_L3INIT_PCIESS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x009c)
 #define OMAP4_RM_L3INIT_CCPTX_CONTEXT_OFFSET		0x00ac
-#define OMAP4430_RM_L3INIT_CCPTX_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00ac)
+#define OMAP4430_RM_L3INIT_CCPTX_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00ac)
 #define OMAP4_PM_L3INIT_XHPI_WKDEP_OFFSET		0x00c0
-#define OMAP4430_PM_L3INIT_XHPI_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c0)
+#define OMAP4430_PM_L3INIT_XHPI_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00c0)
 #define OMAP4_RM_L3INIT_XHPI_CONTEXT_OFFSET		0x00c4
-#define OMAP4430_RM_L3INIT_XHPI_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c4)
+#define OMAP4430_RM_L3INIT_XHPI_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00c4)
 #define OMAP4_PM_L3INIT_MMC6_WKDEP_OFFSET		0x00c8
-#define OMAP4430_PM_L3INIT_MMC6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c8)
+#define OMAP4430_PM_L3INIT_MMC6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00c8)
 #define OMAP4_RM_L3INIT_MMC6_CONTEXT_OFFSET		0x00cc
-#define OMAP4430_RM_L3INIT_MMC6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00cc)
+#define OMAP4430_RM_L3INIT_MMC6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00cc)
 #define OMAP4_PM_L3INIT_USB_HOST_FS_WKDEP_OFFSET	0x00d0
-#define OMAP4430_PM_L3INIT_USB_HOST_FS_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d0)
+#define OMAP4430_PM_L3INIT_USB_HOST_FS_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00d0)
 #define OMAP4_RM_L3INIT_USB_HOST_FS_CONTEXT_OFFSET	0x00d4
-#define OMAP4430_RM_L3INIT_USB_HOST_FS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d4)
+#define OMAP4430_RM_L3INIT_USB_HOST_FS_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00d4)
 #define OMAP4_RM_L3INIT_USBPHYOCP2SCP_CONTEXT_OFFSET	0x00e4
-#define OMAP4430_RM_L3INIT_USBPHYOCP2SCP_CONTEXT	OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00e4)
+#define OMAP4430_RM_L3INIT_USBPHYOCP2SCP_CONTEXT	OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_INST, 0x00e4)
 
 /* PRM.L4PER_PRM register offsets */
 #define OMAP4_PM_L4PER_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_L4PER_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0000)
+#define OMAP4430_PM_L4PER_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0000)
 #define OMAP4_PM_L4PER_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_L4PER_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0004)
+#define OMAP4430_PM_L4PER_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0004)
 #define OMAP4_RM_L4PER_ADC_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_L4PER_ADC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0024)
+#define OMAP4430_RM_L4PER_ADC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0024)
 #define OMAP4_PM_L4PER_DMTIMER10_WKDEP_OFFSET		0x0028
-#define OMAP4430_PM_L4PER_DMTIMER10_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0028)
+#define OMAP4430_PM_L4PER_DMTIMER10_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0028)
 #define OMAP4_RM_L4PER_DMTIMER10_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_L4PER_DMTIMER10_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x002c)
+#define OMAP4430_RM_L4PER_DMTIMER10_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x002c)
 #define OMAP4_PM_L4PER_DMTIMER11_WKDEP_OFFSET		0x0030
-#define OMAP4430_PM_L4PER_DMTIMER11_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0030)
+#define OMAP4430_PM_L4PER_DMTIMER11_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0030)
 #define OMAP4_RM_L4PER_DMTIMER11_CONTEXT_OFFSET		0x0034
-#define OMAP4430_RM_L4PER_DMTIMER11_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0034)
+#define OMAP4430_RM_L4PER_DMTIMER11_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0034)
 #define OMAP4_PM_L4PER_DMTIMER2_WKDEP_OFFSET		0x0038
-#define OMAP4430_PM_L4PER_DMTIMER2_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0038)
+#define OMAP4430_PM_L4PER_DMTIMER2_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0038)
 #define OMAP4_RM_L4PER_DMTIMER2_CONTEXT_OFFSET		0x003c
-#define OMAP4430_RM_L4PER_DMTIMER2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x003c)
+#define OMAP4430_RM_L4PER_DMTIMER2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x003c)
 #define OMAP4_PM_L4PER_DMTIMER3_WKDEP_OFFSET		0x0040
-#define OMAP4430_PM_L4PER_DMTIMER3_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0040)
+#define OMAP4430_PM_L4PER_DMTIMER3_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0040)
 #define OMAP4_RM_L4PER_DMTIMER3_CONTEXT_OFFSET		0x0044
-#define OMAP4430_RM_L4PER_DMTIMER3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0044)
+#define OMAP4430_RM_L4PER_DMTIMER3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0044)
 #define OMAP4_PM_L4PER_DMTIMER4_WKDEP_OFFSET		0x0048
-#define OMAP4430_PM_L4PER_DMTIMER4_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0048)
+#define OMAP4430_PM_L4PER_DMTIMER4_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0048)
 #define OMAP4_RM_L4PER_DMTIMER4_CONTEXT_OFFSET		0x004c
-#define OMAP4430_RM_L4PER_DMTIMER4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x004c)
+#define OMAP4430_RM_L4PER_DMTIMER4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x004c)
 #define OMAP4_PM_L4PER_DMTIMER9_WKDEP_OFFSET		0x0050
-#define OMAP4430_PM_L4PER_DMTIMER9_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0050)
+#define OMAP4430_PM_L4PER_DMTIMER9_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0050)
 #define OMAP4_RM_L4PER_DMTIMER9_CONTEXT_OFFSET		0x0054
-#define OMAP4430_RM_L4PER_DMTIMER9_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0054)
+#define OMAP4430_RM_L4PER_DMTIMER9_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0054)
 #define OMAP4_RM_L4PER_ELM_CONTEXT_OFFSET		0x005c
-#define OMAP4430_RM_L4PER_ELM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x005c)
+#define OMAP4430_RM_L4PER_ELM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x005c)
 #define OMAP4_PM_L4PER_GPIO2_WKDEP_OFFSET		0x0060
-#define OMAP4430_PM_L4PER_GPIO2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0060)
+#define OMAP4430_PM_L4PER_GPIO2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0060)
 #define OMAP4_RM_L4PER_GPIO2_CONTEXT_OFFSET		0x0064
-#define OMAP4430_RM_L4PER_GPIO2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0064)
+#define OMAP4430_RM_L4PER_GPIO2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0064)
 #define OMAP4_PM_L4PER_GPIO3_WKDEP_OFFSET		0x0068
-#define OMAP4430_PM_L4PER_GPIO3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0068)
+#define OMAP4430_PM_L4PER_GPIO3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0068)
 #define OMAP4_RM_L4PER_GPIO3_CONTEXT_OFFSET		0x006c
-#define OMAP4430_RM_L4PER_GPIO3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x006c)
+#define OMAP4430_RM_L4PER_GPIO3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x006c)
 #define OMAP4_PM_L4PER_GPIO4_WKDEP_OFFSET		0x0070
-#define OMAP4430_PM_L4PER_GPIO4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0070)
+#define OMAP4430_PM_L4PER_GPIO4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0070)
 #define OMAP4_RM_L4PER_GPIO4_CONTEXT_OFFSET		0x0074
-#define OMAP4430_RM_L4PER_GPIO4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0074)
+#define OMAP4430_RM_L4PER_GPIO4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0074)
 #define OMAP4_PM_L4PER_GPIO5_WKDEP_OFFSET		0x0078
-#define OMAP4430_PM_L4PER_GPIO5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0078)
+#define OMAP4430_PM_L4PER_GPIO5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0078)
 #define OMAP4_RM_L4PER_GPIO5_CONTEXT_OFFSET		0x007c
-#define OMAP4430_RM_L4PER_GPIO5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x007c)
+#define OMAP4430_RM_L4PER_GPIO5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x007c)
 #define OMAP4_PM_L4PER_GPIO6_WKDEP_OFFSET		0x0080
-#define OMAP4430_PM_L4PER_GPIO6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0080)
+#define OMAP4430_PM_L4PER_GPIO6_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0080)
 #define OMAP4_RM_L4PER_GPIO6_CONTEXT_OFFSET		0x0084
-#define OMAP4430_RM_L4PER_GPIO6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0084)
+#define OMAP4430_RM_L4PER_GPIO6_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0084)
 #define OMAP4_RM_L4PER_HDQ1W_CONTEXT_OFFSET		0x008c
-#define OMAP4430_RM_L4PER_HDQ1W_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x008c)
+#define OMAP4430_RM_L4PER_HDQ1W_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x008c)
 #define OMAP4_PM_L4PER_HECC1_WKDEP_OFFSET		0x0090
-#define OMAP4430_PM_L4PER_HECC1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0090)
+#define OMAP4430_PM_L4PER_HECC1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0090)
 #define OMAP4_RM_L4PER_HECC1_CONTEXT_OFFSET		0x0094
-#define OMAP4430_RM_L4PER_HECC1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0094)
+#define OMAP4430_RM_L4PER_HECC1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0094)
 #define OMAP4_PM_L4PER_HECC2_WKDEP_OFFSET		0x0098
-#define OMAP4430_PM_L4PER_HECC2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0098)
+#define OMAP4430_PM_L4PER_HECC2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0098)
 #define OMAP4_RM_L4PER_HECC2_CONTEXT_OFFSET		0x009c
-#define OMAP4430_RM_L4PER_HECC2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x009c)
+#define OMAP4430_RM_L4PER_HECC2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x009c)
 #define OMAP4_PM_L4PER_I2C1_WKDEP_OFFSET		0x00a0
-#define OMAP4430_PM_L4PER_I2C1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a0)
+#define OMAP4430_PM_L4PER_I2C1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00a0)
 #define OMAP4_RM_L4PER_I2C1_CONTEXT_OFFSET		0x00a4
-#define OMAP4430_RM_L4PER_I2C1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a4)
+#define OMAP4430_RM_L4PER_I2C1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00a4)
 #define OMAP4_PM_L4PER_I2C2_WKDEP_OFFSET		0x00a8
-#define OMAP4430_PM_L4PER_I2C2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a8)
+#define OMAP4430_PM_L4PER_I2C2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00a8)
 #define OMAP4_RM_L4PER_I2C2_CONTEXT_OFFSET		0x00ac
-#define OMAP4430_RM_L4PER_I2C2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ac)
+#define OMAP4430_RM_L4PER_I2C2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00ac)
 #define OMAP4_PM_L4PER_I2C3_WKDEP_OFFSET		0x00b0
-#define OMAP4430_PM_L4PER_I2C3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b0)
+#define OMAP4430_PM_L4PER_I2C3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00b0)
 #define OMAP4_RM_L4PER_I2C3_CONTEXT_OFFSET		0x00b4
-#define OMAP4430_RM_L4PER_I2C3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b4)
+#define OMAP4430_RM_L4PER_I2C3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00b4)
 #define OMAP4_PM_L4PER_I2C4_WKDEP_OFFSET		0x00b8
-#define OMAP4430_PM_L4PER_I2C4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b8)
+#define OMAP4430_PM_L4PER_I2C4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00b8)
 #define OMAP4_RM_L4PER_I2C4_CONTEXT_OFFSET		0x00bc
-#define OMAP4430_RM_L4PER_I2C4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00bc)
+#define OMAP4430_RM_L4PER_I2C4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00bc)
 #define OMAP4_RM_L4PER_L4_PER_CONTEXT_OFFSET		0x00c0
-#define OMAP4430_RM_L4PER_L4_PER_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00c0)
+#define OMAP4430_RM_L4PER_L4_PER_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00c0)
 #define OMAP4_PM_L4PER_MCASP2_WKDEP_OFFSET		0x00d0
-#define OMAP4430_PM_L4PER_MCASP2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d0)
+#define OMAP4430_PM_L4PER_MCASP2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00d0)
 #define OMAP4_RM_L4PER_MCASP2_CONTEXT_OFFSET		0x00d4
-#define OMAP4430_RM_L4PER_MCASP2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d4)
+#define OMAP4430_RM_L4PER_MCASP2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00d4)
 #define OMAP4_PM_L4PER_MCASP3_WKDEP_OFFSET		0x00d8
-#define OMAP4430_PM_L4PER_MCASP3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d8)
+#define OMAP4430_PM_L4PER_MCASP3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00d8)
 #define OMAP4_RM_L4PER_MCASP3_CONTEXT_OFFSET		0x00dc
-#define OMAP4430_RM_L4PER_MCASP3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00dc)
+#define OMAP4430_RM_L4PER_MCASP3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00dc)
 #define OMAP4_PM_L4PER_MCBSP4_WKDEP_OFFSET		0x00e0
-#define OMAP4430_PM_L4PER_MCBSP4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e0)
+#define OMAP4430_PM_L4PER_MCBSP4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00e0)
 #define OMAP4_RM_L4PER_MCBSP4_CONTEXT_OFFSET		0x00e4
-#define OMAP4430_RM_L4PER_MCBSP4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e4)
+#define OMAP4430_RM_L4PER_MCBSP4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00e4)
 #define OMAP4_RM_L4PER_MGATE_CONTEXT_OFFSET		0x00ec
-#define OMAP4430_RM_L4PER_MGATE_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ec)
+#define OMAP4430_RM_L4PER_MGATE_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00ec)
 #define OMAP4_PM_L4PER_MCSPI1_WKDEP_OFFSET		0x00f0
-#define OMAP4430_PM_L4PER_MCSPI1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f0)
+#define OMAP4430_PM_L4PER_MCSPI1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00f0)
 #define OMAP4_RM_L4PER_MCSPI1_CONTEXT_OFFSET		0x00f4
-#define OMAP4430_RM_L4PER_MCSPI1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f4)
+#define OMAP4430_RM_L4PER_MCSPI1_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00f4)
 #define OMAP4_PM_L4PER_MCSPI2_WKDEP_OFFSET		0x00f8
-#define OMAP4430_PM_L4PER_MCSPI2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f8)
+#define OMAP4430_PM_L4PER_MCSPI2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00f8)
 #define OMAP4_RM_L4PER_MCSPI2_CONTEXT_OFFSET		0x00fc
-#define OMAP4430_RM_L4PER_MCSPI2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00fc)
+#define OMAP4430_RM_L4PER_MCSPI2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x00fc)
 #define OMAP4_PM_L4PER_MCSPI3_WKDEP_OFFSET		0x0100
-#define OMAP4430_PM_L4PER_MCSPI3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0100)
+#define OMAP4430_PM_L4PER_MCSPI3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0100)
 #define OMAP4_RM_L4PER_MCSPI3_CONTEXT_OFFSET		0x0104
-#define OMAP4430_RM_L4PER_MCSPI3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0104)
+#define OMAP4430_RM_L4PER_MCSPI3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0104)
 #define OMAP4_PM_L4PER_MCSPI4_WKDEP_OFFSET		0x0108
-#define OMAP4430_PM_L4PER_MCSPI4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0108)
+#define OMAP4430_PM_L4PER_MCSPI4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0108)
 #define OMAP4_RM_L4PER_MCSPI4_CONTEXT_OFFSET		0x010c
-#define OMAP4430_RM_L4PER_MCSPI4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x010c)
+#define OMAP4430_RM_L4PER_MCSPI4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x010c)
 #define OMAP4_PM_L4PER_MMCSD3_WKDEP_OFFSET		0x0120
-#define OMAP4430_PM_L4PER_MMCSD3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0120)
+#define OMAP4430_PM_L4PER_MMCSD3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0120)
 #define OMAP4_RM_L4PER_MMCSD3_CONTEXT_OFFSET		0x0124
-#define OMAP4430_RM_L4PER_MMCSD3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0124)
+#define OMAP4430_RM_L4PER_MMCSD3_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0124)
 #define OMAP4_PM_L4PER_MMCSD4_WKDEP_OFFSET		0x0128
-#define OMAP4430_PM_L4PER_MMCSD4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0128)
+#define OMAP4430_PM_L4PER_MMCSD4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0128)
 #define OMAP4_RM_L4PER_MMCSD4_CONTEXT_OFFSET		0x012c
-#define OMAP4430_RM_L4PER_MMCSD4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x012c)
+#define OMAP4430_RM_L4PER_MMCSD4_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x012c)
 #define OMAP4_RM_L4PER_MSPROHG_CONTEXT_OFFSET		0x0134
-#define OMAP4430_RM_L4PER_MSPROHG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0134)
+#define OMAP4430_RM_L4PER_MSPROHG_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0134)
 #define OMAP4_PM_L4PER_SLIMBUS2_WKDEP_OFFSET		0x0138
-#define OMAP4430_PM_L4PER_SLIMBUS2_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0138)
+#define OMAP4430_PM_L4PER_SLIMBUS2_WKDEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0138)
 #define OMAP4_RM_L4PER_SLIMBUS2_CONTEXT_OFFSET		0x013c
-#define OMAP4430_RM_L4PER_SLIMBUS2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x013c)
+#define OMAP4430_RM_L4PER_SLIMBUS2_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x013c)
 #define OMAP4_PM_L4PER_UART1_WKDEP_OFFSET		0x0140
-#define OMAP4430_PM_L4PER_UART1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0140)
+#define OMAP4430_PM_L4PER_UART1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0140)
 #define OMAP4_RM_L4PER_UART1_CONTEXT_OFFSET		0x0144
-#define OMAP4430_RM_L4PER_UART1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0144)
+#define OMAP4430_RM_L4PER_UART1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0144)
 #define OMAP4_PM_L4PER_UART2_WKDEP_OFFSET		0x0148
-#define OMAP4430_PM_L4PER_UART2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0148)
+#define OMAP4430_PM_L4PER_UART2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0148)
 #define OMAP4_RM_L4PER_UART2_CONTEXT_OFFSET		0x014c
-#define OMAP4430_RM_L4PER_UART2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x014c)
+#define OMAP4430_RM_L4PER_UART2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x014c)
 #define OMAP4_PM_L4PER_UART3_WKDEP_OFFSET		0x0150
-#define OMAP4430_PM_L4PER_UART3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0150)
+#define OMAP4430_PM_L4PER_UART3_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0150)
 #define OMAP4_RM_L4PER_UART3_CONTEXT_OFFSET		0x0154
-#define OMAP4430_RM_L4PER_UART3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0154)
+#define OMAP4430_RM_L4PER_UART3_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0154)
 #define OMAP4_PM_L4PER_UART4_WKDEP_OFFSET		0x0158
-#define OMAP4430_PM_L4PER_UART4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0158)
+#define OMAP4430_PM_L4PER_UART4_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0158)
 #define OMAP4_RM_L4PER_UART4_CONTEXT_OFFSET		0x015c
-#define OMAP4430_RM_L4PER_UART4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x015c)
+#define OMAP4430_RM_L4PER_UART4_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x015c)
 #define OMAP4_PM_L4PER_MMCSD5_WKDEP_OFFSET		0x0160
-#define OMAP4430_PM_L4PER_MMCSD5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0160)
+#define OMAP4430_PM_L4PER_MMCSD5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0160)
 #define OMAP4_RM_L4PER_MMCSD5_CONTEXT_OFFSET		0x0164
-#define OMAP4430_RM_L4PER_MMCSD5_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0164)
+#define OMAP4430_RM_L4PER_MMCSD5_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0164)
 #define OMAP4_PM_L4PER_I2C5_WKDEP_OFFSET		0x0168
-#define OMAP4430_PM_L4PER_I2C5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0168)
+#define OMAP4430_PM_L4PER_I2C5_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x0168)
 #define OMAP4_RM_L4PER_I2C5_CONTEXT_OFFSET		0x016c
-#define OMAP4430_RM_L4PER_I2C5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x016c)
+#define OMAP4430_RM_L4PER_I2C5_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x016c)
 #define OMAP4_RM_L4SEC_AES1_CONTEXT_OFFSET		0x01a4
-#define OMAP4430_RM_L4SEC_AES1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01a4)
+#define OMAP4430_RM_L4SEC_AES1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x01a4)
 #define OMAP4_RM_L4SEC_AES2_CONTEXT_OFFSET		0x01ac
-#define OMAP4430_RM_L4SEC_AES2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01ac)
+#define OMAP4430_RM_L4SEC_AES2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x01ac)
 #define OMAP4_RM_L4SEC_DES3DES_CONTEXT_OFFSET		0x01b4
-#define OMAP4430_RM_L4SEC_DES3DES_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01b4)
+#define OMAP4430_RM_L4SEC_DES3DES_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x01b4)
 #define OMAP4_RM_L4SEC_PKAEIP29_CONTEXT_OFFSET		0x01bc
-#define OMAP4430_RM_L4SEC_PKAEIP29_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01bc)
+#define OMAP4430_RM_L4SEC_PKAEIP29_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x01bc)
 #define OMAP4_RM_L4SEC_RNG_CONTEXT_OFFSET		0x01c4
-#define OMAP4430_RM_L4SEC_RNG_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01c4)
+#define OMAP4430_RM_L4SEC_RNG_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x01c4)
 #define OMAP4_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET		0x01cc
-#define OMAP4430_RM_L4SEC_SHA2MD51_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01cc)
+#define OMAP4430_RM_L4SEC_SHA2MD51_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x01cc)
 #define OMAP4_RM_L4SEC_CRYPTODMA_CONTEXT_OFFSET		0x01dc
-#define OMAP4430_RM_L4SEC_CRYPTODMA_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01dc)
+#define OMAP4430_RM_L4SEC_CRYPTODMA_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_INST, 0x01dc)
 
 /* PRM.CEFUSE_PRM register offsets */
 #define OMAP4_PM_CEFUSE_PWRSTCTRL_OFFSET		0x0000
-#define OMAP4430_PM_CEFUSE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0000)
+#define OMAP4430_PM_CEFUSE_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_INST, 0x0000)
 #define OMAP4_PM_CEFUSE_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_CEFUSE_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0004)
+#define OMAP4430_PM_CEFUSE_PWRSTST			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_INST, 0x0004)
 #define OMAP4_RM_CEFUSE_CEFUSE_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_CEFUSE_CEFUSE_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0024)
+#define OMAP4430_RM_CEFUSE_CEFUSE_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_INST, 0x0024)
 
 /* PRM.WKUP_PRM register offsets */
 #define OMAP4_RM_WKUP_L4WKUP_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_WKUP_L4WKUP_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0024)
+#define OMAP4430_RM_WKUP_L4WKUP_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0024)
 #define OMAP4_RM_WKUP_WDT1_CONTEXT_OFFSET		0x002c
-#define OMAP4430_RM_WKUP_WDT1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x002c)
+#define OMAP4430_RM_WKUP_WDT1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x002c)
 #define OMAP4_PM_WKUP_WDT2_WKDEP_OFFSET			0x0030
-#define OMAP4430_PM_WKUP_WDT2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0030)
+#define OMAP4430_PM_WKUP_WDT2_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0030)
 #define OMAP4_RM_WKUP_WDT2_CONTEXT_OFFSET		0x0034
-#define OMAP4430_RM_WKUP_WDT2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0034)
+#define OMAP4430_RM_WKUP_WDT2_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0034)
 #define OMAP4_PM_WKUP_GPIO1_WKDEP_OFFSET		0x0038
-#define OMAP4430_PM_WKUP_GPIO1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0038)
+#define OMAP4430_PM_WKUP_GPIO1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0038)
 #define OMAP4_RM_WKUP_GPIO1_CONTEXT_OFFSET		0x003c
-#define OMAP4430_RM_WKUP_GPIO1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x003c)
+#define OMAP4430_RM_WKUP_GPIO1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x003c)
 #define OMAP4_PM_WKUP_TIMER1_WKDEP_OFFSET		0x0040
-#define OMAP4430_PM_WKUP_TIMER1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0040)
+#define OMAP4430_PM_WKUP_TIMER1_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0040)
 #define OMAP4_RM_WKUP_TIMER1_CONTEXT_OFFSET		0x0044
-#define OMAP4430_RM_WKUP_TIMER1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0044)
+#define OMAP4430_RM_WKUP_TIMER1_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0044)
 #define OMAP4_PM_WKUP_TIMER12_WKDEP_OFFSET		0x0048
-#define OMAP4430_PM_WKUP_TIMER12_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0048)
+#define OMAP4430_PM_WKUP_TIMER12_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0048)
 #define OMAP4_RM_WKUP_TIMER12_CONTEXT_OFFSET		0x004c
-#define OMAP4430_RM_WKUP_TIMER12_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x004c)
+#define OMAP4430_RM_WKUP_TIMER12_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x004c)
 #define OMAP4_RM_WKUP_SYNCTIMER_CONTEXT_OFFSET		0x0054
-#define OMAP4430_RM_WKUP_SYNCTIMER_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0054)
+#define OMAP4430_RM_WKUP_SYNCTIMER_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0054)
 #define OMAP4_PM_WKUP_USIM_WKDEP_OFFSET			0x0058
-#define OMAP4430_PM_WKUP_USIM_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0058)
+#define OMAP4430_PM_WKUP_USIM_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0058)
 #define OMAP4_RM_WKUP_USIM_CONTEXT_OFFSET		0x005c
-#define OMAP4430_RM_WKUP_USIM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x005c)
+#define OMAP4430_RM_WKUP_USIM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x005c)
 #define OMAP4_RM_WKUP_SARRAM_CONTEXT_OFFSET		0x0064
-#define OMAP4430_RM_WKUP_SARRAM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0064)
+#define OMAP4430_RM_WKUP_SARRAM_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0064)
 #define OMAP4_PM_WKUP_KEYBOARD_WKDEP_OFFSET		0x0078
-#define OMAP4430_PM_WKUP_KEYBOARD_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0078)
+#define OMAP4430_PM_WKUP_KEYBOARD_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0078)
 #define OMAP4_RM_WKUP_KEYBOARD_CONTEXT_OFFSET		0x007c
-#define OMAP4430_RM_WKUP_KEYBOARD_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x007c)
+#define OMAP4430_RM_WKUP_KEYBOARD_CONTEXT		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x007c)
 #define OMAP4_PM_WKUP_RTC_WKDEP_OFFSET			0x0080
-#define OMAP4430_PM_WKUP_RTC_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0080)
+#define OMAP4430_PM_WKUP_RTC_WKDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0080)
 #define OMAP4_RM_WKUP_RTC_CONTEXT_OFFSET		0x0084
-#define OMAP4430_RM_WKUP_RTC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0084)
+#define OMAP4430_RM_WKUP_RTC_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_INST, 0x0084)
 
 /* PRM.WKUP_CM register offsets */
 #define OMAP4_CM_WKUP_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_WKUP_CLKSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0000)
+#define OMAP4430_CM_WKUP_CLKSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0000)
 #define OMAP4_CM_WKUP_L4WKUP_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_WKUP_L4WKUP_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0020)
+#define OMAP4430_CM_WKUP_L4WKUP_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0020)
 #define OMAP4_CM_WKUP_WDT1_CLKCTRL_OFFSET		0x0028
-#define OMAP4430_CM_WKUP_WDT1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0028)
+#define OMAP4430_CM_WKUP_WDT1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0028)
 #define OMAP4_CM_WKUP_WDT2_CLKCTRL_OFFSET		0x0030
-#define OMAP4430_CM_WKUP_WDT2_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0030)
+#define OMAP4430_CM_WKUP_WDT2_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0030)
 #define OMAP4_CM_WKUP_GPIO1_CLKCTRL_OFFSET		0x0038
-#define OMAP4430_CM_WKUP_GPIO1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0038)
+#define OMAP4430_CM_WKUP_GPIO1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0038)
 #define OMAP4_CM_WKUP_TIMER1_CLKCTRL_OFFSET		0x0040
-#define OMAP4430_CM_WKUP_TIMER1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0040)
+#define OMAP4430_CM_WKUP_TIMER1_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0040)
 #define OMAP4_CM_WKUP_TIMER12_CLKCTRL_OFFSET		0x0048
-#define OMAP4430_CM_WKUP_TIMER12_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0048)
+#define OMAP4430_CM_WKUP_TIMER12_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0048)
 #define OMAP4_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET		0x0050
-#define OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0050)
+#define OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0050)
 #define OMAP4_CM_WKUP_USIM_CLKCTRL_OFFSET		0x0058
-#define OMAP4430_CM_WKUP_USIM_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0058)
+#define OMAP4430_CM_WKUP_USIM_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0058)
 #define OMAP4_CM_WKUP_SARRAM_CLKCTRL_OFFSET		0x0060
-#define OMAP4430_CM_WKUP_SARRAM_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0060)
+#define OMAP4430_CM_WKUP_SARRAM_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0060)
 #define OMAP4_CM_WKUP_KEYBOARD_CLKCTRL_OFFSET		0x0078
-#define OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0078)
+#define OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0078)
 #define OMAP4_CM_WKUP_RTC_CLKCTRL_OFFSET		0x0080
-#define OMAP4430_CM_WKUP_RTC_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0080)
+#define OMAP4430_CM_WKUP_RTC_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0080)
 #define OMAP4_CM_WKUP_BANDGAP_CLKCTRL_OFFSET		0x0088
-#define OMAP4430_CM_WKUP_BANDGAP_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0088)
+#define OMAP4430_CM_WKUP_BANDGAP_CLKCTRL		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_INST, 0x0088)
 
 /* PRM.EMU_PRM register offsets */
 #define OMAP4_PM_EMU_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_EMU_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0000)
+#define OMAP4430_PM_EMU_PWRSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_INST, 0x0000)
 #define OMAP4_PM_EMU_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_EMU_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0004)
+#define OMAP4430_PM_EMU_PWRSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_INST, 0x0004)
 #define OMAP4_RM_EMU_DEBUGSS_CONTEXT_OFFSET		0x0024
-#define OMAP4430_RM_EMU_DEBUGSS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0024)
+#define OMAP4430_RM_EMU_DEBUGSS_CONTEXT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_INST, 0x0024)
 
 /* PRM.EMU_CM register offsets */
 #define OMAP4_CM_EMU_CLKSTCTRL_OFFSET			0x0000
-#define OMAP4430_CM_EMU_CLKSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0000)
+#define OMAP4430_CM_EMU_CLKSTCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_INST, 0x0000)
 #define OMAP4_CM_EMU_DYNAMICDEP_OFFSET			0x0008
-#define OMAP4430_CM_EMU_DYNAMICDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0008)
+#define OMAP4430_CM_EMU_DYNAMICDEP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_INST, 0x0008)
 #define OMAP4_CM_EMU_DEBUGSS_CLKCTRL_OFFSET		0x0020
-#define OMAP4430_CM_EMU_DEBUGSS_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0020)
+#define OMAP4430_CM_EMU_DEBUGSS_CLKCTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_INST, 0x0020)
 
 /* PRM.DEVICE_PRM register offsets */
 #define OMAP4_PRM_RSTCTRL_OFFSET			0x0000
-#define OMAP4430_PRM_RSTCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0000)
+#define OMAP4430_PRM_RSTCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0000)
 #define OMAP4_PRM_RSTST_OFFSET				0x0004
-#define OMAP4430_PRM_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0004)
+#define OMAP4430_PRM_RSTST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0004)
 #define OMAP4_PRM_RSTTIME_OFFSET			0x0008
-#define OMAP4430_PRM_RSTTIME				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0008)
+#define OMAP4430_PRM_RSTTIME				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0008)
 #define OMAP4_PRM_CLKREQCTRL_OFFSET			0x000c
-#define OMAP4430_PRM_CLKREQCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x000c)
+#define OMAP4430_PRM_CLKREQCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x000c)
 #define OMAP4_PRM_VOLTCTRL_OFFSET			0x0010
-#define OMAP4430_PRM_VOLTCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0010)
+#define OMAP4430_PRM_VOLTCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0010)
 #define OMAP4_PRM_PWRREQCTRL_OFFSET			0x0014
-#define OMAP4430_PRM_PWRREQCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0014)
+#define OMAP4430_PRM_PWRREQCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0014)
 #define OMAP4_PRM_PSCON_COUNT_OFFSET			0x0018
-#define OMAP4430_PRM_PSCON_COUNT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0018)
+#define OMAP4430_PRM_PSCON_COUNT			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0018)
 #define OMAP4_PRM_IO_COUNT_OFFSET			0x001c
-#define OMAP4430_PRM_IO_COUNT				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x001c)
+#define OMAP4430_PRM_IO_COUNT				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x001c)
 #define OMAP4_PRM_IO_PMCTRL_OFFSET			0x0020
-#define OMAP4430_PRM_IO_PMCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0020)
+#define OMAP4430_PRM_IO_PMCTRL				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0020)
 #define OMAP4_PRM_VOLTSETUP_WARMRESET_OFFSET		0x0024
-#define OMAP4430_PRM_VOLTSETUP_WARMRESET		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0024)
+#define OMAP4430_PRM_VOLTSETUP_WARMRESET		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0024)
 #define OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET		0x0028
-#define OMAP4430_PRM_VOLTSETUP_CORE_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0028)
+#define OMAP4430_PRM_VOLTSETUP_CORE_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0028)
 #define OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET		0x002c
-#define OMAP4430_PRM_VOLTSETUP_MPU_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x002c)
+#define OMAP4430_PRM_VOLTSETUP_MPU_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x002c)
 #define OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET		0x0030
-#define OMAP4430_PRM_VOLTSETUP_IVA_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0030)
+#define OMAP4430_PRM_VOLTSETUP_IVA_OFF			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0030)
 #define OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET	0x0034
-#define OMAP4430_PRM_VOLTSETUP_CORE_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0034)
+#define OMAP4430_PRM_VOLTSETUP_CORE_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0034)
 #define OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET	0x0038
-#define OMAP4430_PRM_VOLTSETUP_MPU_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0038)
+#define OMAP4430_PRM_VOLTSETUP_MPU_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0038)
 #define OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET	0x003c
-#define OMAP4430_PRM_VOLTSETUP_IVA_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x003c)
+#define OMAP4430_PRM_VOLTSETUP_IVA_RET_SLEEP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x003c)
 #define OMAP4_PRM_VP_CORE_CONFIG_OFFSET			0x0040
-#define OMAP4430_PRM_VP_CORE_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0040)
+#define OMAP4430_PRM_VP_CORE_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0040)
 #define OMAP4_PRM_VP_CORE_STATUS_OFFSET			0x0044
-#define OMAP4430_PRM_VP_CORE_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0044)
+#define OMAP4430_PRM_VP_CORE_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0044)
 #define OMAP4_PRM_VP_CORE_VLIMITTO_OFFSET		0x0048
-#define OMAP4430_PRM_VP_CORE_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0048)
+#define OMAP4430_PRM_VP_CORE_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0048)
 #define OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET		0x004c
-#define OMAP4430_PRM_VP_CORE_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x004c)
+#define OMAP4430_PRM_VP_CORE_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x004c)
 #define OMAP4_PRM_VP_CORE_VSTEPMAX_OFFSET		0x0050
-#define OMAP4430_PRM_VP_CORE_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0050)
+#define OMAP4430_PRM_VP_CORE_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0050)
 #define OMAP4_PRM_VP_CORE_VSTEPMIN_OFFSET		0x0054
-#define OMAP4430_PRM_VP_CORE_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0054)
+#define OMAP4430_PRM_VP_CORE_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0054)
 #define OMAP4_PRM_VP_MPU_CONFIG_OFFSET			0x0058
-#define OMAP4430_PRM_VP_MPU_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0058)
+#define OMAP4430_PRM_VP_MPU_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0058)
 #define OMAP4_PRM_VP_MPU_STATUS_OFFSET			0x005c
-#define OMAP4430_PRM_VP_MPU_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x005c)
+#define OMAP4430_PRM_VP_MPU_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x005c)
 #define OMAP4_PRM_VP_MPU_VLIMITTO_OFFSET		0x0060
-#define OMAP4430_PRM_VP_MPU_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0060)
+#define OMAP4430_PRM_VP_MPU_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0060)
 #define OMAP4_PRM_VP_MPU_VOLTAGE_OFFSET			0x0064
-#define OMAP4430_PRM_VP_MPU_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0064)
+#define OMAP4430_PRM_VP_MPU_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0064)
 #define OMAP4_PRM_VP_MPU_VSTEPMAX_OFFSET		0x0068
-#define OMAP4430_PRM_VP_MPU_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0068)
+#define OMAP4430_PRM_VP_MPU_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0068)
 #define OMAP4_PRM_VP_MPU_VSTEPMIN_OFFSET		0x006c
-#define OMAP4430_PRM_VP_MPU_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x006c)
+#define OMAP4430_PRM_VP_MPU_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x006c)
 #define OMAP4_PRM_VP_IVA_CONFIG_OFFSET			0x0070
-#define OMAP4430_PRM_VP_IVA_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0070)
+#define OMAP4430_PRM_VP_IVA_CONFIG			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0070)
 #define OMAP4_PRM_VP_IVA_STATUS_OFFSET			0x0074
-#define OMAP4430_PRM_VP_IVA_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0074)
+#define OMAP4430_PRM_VP_IVA_STATUS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0074)
 #define OMAP4_PRM_VP_IVA_VLIMITTO_OFFSET		0x0078
-#define OMAP4430_PRM_VP_IVA_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0078)
+#define OMAP4430_PRM_VP_IVA_VLIMITTO			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0078)
 #define OMAP4_PRM_VP_IVA_VOLTAGE_OFFSET			0x007c
-#define OMAP4430_PRM_VP_IVA_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x007c)
+#define OMAP4430_PRM_VP_IVA_VOLTAGE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x007c)
 #define OMAP4_PRM_VP_IVA_VSTEPMAX_OFFSET		0x0080
-#define OMAP4430_PRM_VP_IVA_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0080)
+#define OMAP4430_PRM_VP_IVA_VSTEPMAX			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0080)
 #define OMAP4_PRM_VP_IVA_VSTEPMIN_OFFSET		0x0084
-#define OMAP4430_PRM_VP_IVA_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0084)
+#define OMAP4430_PRM_VP_IVA_VSTEPMIN			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0084)
 #define OMAP4_PRM_VC_SMPS_SA_OFFSET			0x0088
-#define OMAP4430_PRM_VC_SMPS_SA				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0088)
+#define OMAP4430_PRM_VC_SMPS_SA				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0088)
 #define OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET		0x008c
-#define OMAP4430_PRM_VC_VAL_SMPS_RA_VOL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x008c)
+#define OMAP4430_PRM_VC_VAL_SMPS_RA_VOL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x008c)
 #define OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET		0x0090
-#define OMAP4430_PRM_VC_VAL_SMPS_RA_CMD			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0090)
+#define OMAP4430_PRM_VC_VAL_SMPS_RA_CMD			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0090)
 #define OMAP4_PRM_VC_VAL_CMD_VDD_CORE_L_OFFSET		0x0094
-#define OMAP4430_PRM_VC_VAL_CMD_VDD_CORE_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0094)
+#define OMAP4430_PRM_VC_VAL_CMD_VDD_CORE_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0094)
 #define OMAP4_PRM_VC_VAL_CMD_VDD_MPU_L_OFFSET		0x0098
-#define OMAP4430_PRM_VC_VAL_CMD_VDD_MPU_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0098)
+#define OMAP4430_PRM_VC_VAL_CMD_VDD_MPU_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x0098)
 #define OMAP4_PRM_VC_VAL_CMD_VDD_IVA_L_OFFSET		0x009c
-#define OMAP4430_PRM_VC_VAL_CMD_VDD_IVA_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x009c)
+#define OMAP4430_PRM_VC_VAL_CMD_VDD_IVA_L		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x009c)
 #define OMAP4_PRM_VC_VAL_BYPASS_OFFSET			0x00a0
-#define OMAP4430_PRM_VC_VAL_BYPASS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a0)
+#define OMAP4430_PRM_VC_VAL_BYPASS			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a0)
 #define OMAP4_PRM_VC_CFG_CHANNEL_OFFSET			0x00a4
-#define OMAP4430_PRM_VC_CFG_CHANNEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a4)
-#define OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET		0x00a8
-#define OMAP4430_PRM_VC_CFG_I2C_MODE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a8)
+#define OMAP4430_PRM_VC_CFG_CHANNEL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a4)
+#define OMAP4_PRM_VC_CFG_I2C_INSTE_OFFSET		0x00a8
+#define OMAP4430_PRM_VC_CFG_I2C_INSTE			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00a8)
 #define OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET			0x00ac
-#define OMAP4430_PRM_VC_CFG_I2C_CLK			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ac)
+#define OMAP4430_PRM_VC_CFG_I2C_CLK			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00ac)
 #define OMAP4_PRM_SRAM_COUNT_OFFSET			0x00b0
-#define OMAP4430_PRM_SRAM_COUNT				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b0)
+#define OMAP4430_PRM_SRAM_COUNT				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00b0)
 #define OMAP4_PRM_SRAM_WKUP_SETUP_OFFSET		0x00b4
-#define OMAP4430_PRM_SRAM_WKUP_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b4)
+#define OMAP4430_PRM_SRAM_WKUP_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00b4)
 #define OMAP4_PRM_LDO_SRAM_CORE_SETUP_OFFSET		0x00b8
-#define OMAP4430_PRM_LDO_SRAM_CORE_SETUP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b8)
+#define OMAP4430_PRM_LDO_SRAM_CORE_SETUP		OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00b8)
 #define OMAP4_PRM_LDO_SRAM_CORE_CTRL_OFFSET		0x00bc
-#define OMAP4430_PRM_LDO_SRAM_CORE_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00bc)
+#define OMAP4430_PRM_LDO_SRAM_CORE_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00bc)
 #define OMAP4_PRM_LDO_SRAM_MPU_SETUP_OFFSET		0x00c0
-#define OMAP4430_PRM_LDO_SRAM_MPU_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c0)
+#define OMAP4430_PRM_LDO_SRAM_MPU_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00c0)
 #define OMAP4_PRM_LDO_SRAM_MPU_CTRL_OFFSET		0x00c4
-#define OMAP4430_PRM_LDO_SRAM_MPU_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c4)
+#define OMAP4430_PRM_LDO_SRAM_MPU_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00c4)
 #define OMAP4_PRM_LDO_SRAM_IVA_SETUP_OFFSET		0x00c8
-#define OMAP4430_PRM_LDO_SRAM_IVA_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c8)
+#define OMAP4430_PRM_LDO_SRAM_IVA_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00c8)
 #define OMAP4_PRM_LDO_SRAM_IVA_CTRL_OFFSET		0x00cc
-#define OMAP4430_PRM_LDO_SRAM_IVA_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00cc)
+#define OMAP4430_PRM_LDO_SRAM_IVA_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00cc)
 #define OMAP4_PRM_LDO_ABB_MPU_SETUP_OFFSET		0x00d0
-#define OMAP4430_PRM_LDO_ABB_MPU_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d0)
+#define OMAP4430_PRM_LDO_ABB_MPU_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00d0)
 #define OMAP4_PRM_LDO_ABB_MPU_CTRL_OFFSET		0x00d4
-#define OMAP4430_PRM_LDO_ABB_MPU_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d4)
+#define OMAP4430_PRM_LDO_ABB_MPU_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00d4)
 #define OMAP4_PRM_LDO_ABB_IVA_SETUP_OFFSET		0x00d8
-#define OMAP4430_PRM_LDO_ABB_IVA_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d8)
+#define OMAP4430_PRM_LDO_ABB_IVA_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00d8)
 #define OMAP4_PRM_LDO_ABB_IVA_CTRL_OFFSET		0x00dc
-#define OMAP4430_PRM_LDO_ABB_IVA_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00dc)
+#define OMAP4430_PRM_LDO_ABB_IVA_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00dc)
 #define OMAP4_PRM_LDO_BANDGAP_SETUP_OFFSET		0x00e0
-#define OMAP4430_PRM_LDO_BANDGAP_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e0)
+#define OMAP4430_PRM_LDO_BANDGAP_SETUP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00e0)
 #define OMAP4_PRM_DEVICE_OFF_CTRL_OFFSET		0x00e4
-#define OMAP4430_PRM_DEVICE_OFF_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e4)
+#define OMAP4430_PRM_DEVICE_OFF_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00e4)
 #define OMAP4_PRM_PHASE1_CNDP_OFFSET			0x00e8
-#define OMAP4430_PRM_PHASE1_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e8)
+#define OMAP4430_PRM_PHASE1_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00e8)
 #define OMAP4_PRM_PHASE2A_CNDP_OFFSET			0x00ec
-#define OMAP4430_PRM_PHASE2A_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ec)
+#define OMAP4430_PRM_PHASE2A_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00ec)
 #define OMAP4_PRM_PHASE2B_CNDP_OFFSET			0x00f0
-#define OMAP4430_PRM_PHASE2B_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f0)
-#define OMAP4_PRM_MODEM_IF_CTRL_OFFSET			0x00f4
-#define OMAP4430_PRM_MODEM_IF_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f4)
+#define OMAP4430_PRM_PHASE2B_CNDP			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f0)
+#define OMAP4_PRM_INSTEM_IF_CTRL_OFFSET			0x00f4
+#define OMAP4430_PRM_INSTEM_IF_CTRL			OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f4)
 #define OMAP4_PRM_VC_ERRST_OFFSET			0x00f8
-#define OMAP4430_PRM_VC_ERRST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f8)
+#define OMAP4430_PRM_VC_ERRST				OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_INST, 0x00f8)
 
-/*
- * PRCM_MPU
- *
- * The PRCM_MPU is a local PRCM inside the MPU subsystem. For the PRCM (global)
- * point of view the PRCM_MPU is a single entity. It shares the same
- * programming model as the global PRCM and thus can be assimilate as two new
- * MOD inside the PRCM
- */
+/* Function prototypes */
+# ifndef __ASSEMBLER__
 
-/* PRCM_MPU.OCP_SOCKET_PRCM register offsets */
-#define OMAP4_REVISION_PRCM_OFFSET			0x0000
-#define OMAP4430_REVISION_PRCM				OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_OCP_SOCKET_PRCM_MOD, 0x0000)
+extern u32 omap4_prm_read_inst_reg(s16 inst, u16 idx);
+extern void omap4_prm_write_inst_reg(u32 val, s16 inst, u16 idx);
+extern u32 omap4_prm_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
+extern u32 omap4_prm_rmw_reg_bits(u32 mask, u32 bits, void __iomem *reg);
+extern u32 omap4_prm_set_inst_reg_bits(u32 bits, s16 inst, s16 idx);
+extern u32 omap4_prm_clear_inst_reg_bits(u32 bits, s16 inst, s16 idx);
+extern u32 omap4_prm_read_bits_shift(void __iomem *reg, u32 mask);
 
-/* PRCM_MPU.DEVICE_PRM register offsets */
-#define OMAP4_PRCM_MPU_PRM_RSTST_OFFSET			0x0000
-#define OMAP4430_PRCM_MPU_PRM_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_MOD, 0x0000)
-#define OMAP4_PRCM_MPU_PRM_PSCON_COUNT_OFFSET		0x0004
-#define OMAP4430_PRCM_MPU_PRM_PSCON_COUNT		OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_DEVICE_PRM_MOD, 0x0004)
+extern int omap4_prm_is_hardreset_asserted(void __iomem *rstctrl_reg, u8 shift);
+extern int omap4_prm_assert_hardreset(void __iomem *rstctrl_reg, u8 shift);
+extern int omap4_prm_deassert_hardreset(void __iomem *rstctrl_reg, u8 shift);
 
-/* PRCM_MPU.CPU0 register offsets */
-#define OMAP4_PM_CPU0_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_CPU0_PWRSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0000)
-#define OMAP4_PM_CPU0_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_CPU0_PWRSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0004)
-#define OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET		0x0008
-#define OMAP4430_RM_CPU0_CPU0_CONTEXT			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0008)
-#define OMAP4_RM_CPU0_CPU0_RSTCTRL_OFFSET		0x000c
-#define OMAP4430_RM_CPU0_CPU0_RSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x000c)
-#define OMAP4_RM_CPU0_CPU0_RSTST_OFFSET			0x0010
-#define OMAP4430_RM_CPU0_CPU0_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0010)
-#define OMAP4_CM_CPU0_CPU0_CLKCTRL_OFFSET		0x0014
-#define OMAP4430_CM_CPU0_CPU0_CLKCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0014)
-#define OMAP4_CM_CPU0_CLKSTCTRL_OFFSET			0x0018
-#define OMAP4430_CM_CPU0_CLKSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU0_MOD, 0x0018)
+extern void omap4_prm_global_warm_sw_reset(void);
 
-/* PRCM_MPU.CPU1 register offsets */
-#define OMAP4_PM_CPU1_PWRSTCTRL_OFFSET			0x0000
-#define OMAP4430_PM_CPU1_PWRSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0000)
-#define OMAP4_PM_CPU1_PWRSTST_OFFSET			0x0004
-#define OMAP4430_PM_CPU1_PWRSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0004)
-#define OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET		0x0008
-#define OMAP4430_RM_CPU1_CPU1_CONTEXT			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0008)
-#define OMAP4_RM_CPU1_CPU1_RSTCTRL_OFFSET		0x000c
-#define OMAP4430_RM_CPU1_CPU1_RSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x000c)
-#define OMAP4_RM_CPU1_CPU1_RSTST_OFFSET			0x0010
-#define OMAP4430_RM_CPU1_CPU1_RSTST			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0010)
-#define OMAP4_CM_CPU1_CPU1_CLKCTRL_OFFSET		0x0014
-#define OMAP4430_CM_CPU1_CPU1_CLKCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0014)
-#define OMAP4_CM_CPU1_CLKSTCTRL_OFFSET			0x0018
-#define OMAP4430_CM_CPU1_CLKSTCTRL			OMAP44XX_PRCM_MPU_REGADDR(OMAP4430_PRCM_MPU_CPU1_MOD, 0x0018)
+# endif
+
 #endif
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
new file mode 100644
index 0000000..a303242
--- /dev/null
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -0,0 +1,66 @@
+/*
+ * OMAP4 PRM instance functions
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+
+#include "prm44xx.h"
+#include "prminst44xx.h"
+#include "prm-regbits-44xx.h"
+#include "prcm44xx.h"
+#include "prcm_mpu44xx.h"
+
+static u32 _prm_bases[OMAP4_MAX_PRCM_PARTITIONS] = {
+	[OMAP4430_INVALID_PRCM_PARTITION]	= 0,
+	[OMAP4430_PRM_PARTITION]		= OMAP4430_PRM_BASE,
+	[OMAP4430_CM1_PARTITION]		= 0,
+	[OMAP4430_CM2_PARTITION]		= 0,
+	[OMAP4430_SCRM_PARTITION]		= 0,
+	[OMAP4430_PRCM_MPU_PARTITION]		= OMAP4430_PRCM_MPU_BASE,
+};
+
+/* Read a register in a PRM instance */
+u32 omap4_prminst_read_inst_reg(u8 part, s16 inst, u16 idx)
+{
+	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
+	       part == OMAP4430_INVALID_PRCM_PARTITION ||
+	       !_prm_bases[part]);
+	return __raw_readl(OMAP2_L4_IO_ADDRESS(_prm_bases[part] + inst +
+					       idx));
+}
+
+/* Write into a register in a PRM instance */
+void omap4_prminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx)
+{
+	BUG_ON(part >= OMAP4_MAX_PRCM_PARTITIONS ||
+	       part == OMAP4430_INVALID_PRCM_PARTITION ||
+	       !_prm_bases[part]);
+	__raw_writel(val, OMAP2_L4_IO_ADDRESS(_prm_bases[part] + inst + idx));
+}
+
+/* Read-modify-write a register in PRM. Caller must lock */
+u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
+				   s16 idx)
+{
+	u32 v;
+
+	v = omap4_prminst_read_inst_reg(part, inst, idx);
+	v &= ~mask;
+	v |= bits;
+	omap4_prminst_write_inst_reg(v, part, inst, idx);
+
+	return v;
+}
diff --git a/arch/arm/mach-omap2/prminst44xx.h b/arch/arm/mach-omap2/prminst44xx.h
new file mode 100644
index 0000000..02dd66d
--- /dev/null
+++ b/arch/arm/mach-omap2/prminst44xx.h
@@ -0,0 +1,25 @@
+/*
+ * OMAP4 Power/Reset Management (PRM) function prototypes
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ARCH_ASM_MACH_OMAP2_PRMINST44XX_H
+#define __ARCH_ASM_MACH_OMAP2_PRMINST44XX_H
+
+/*
+ * In an ideal world, we would not export these low-level functions,
+ * but this will probably take some time to fix properly
+ */
+extern u32 omap4_prminst_read_inst_reg(u8 part, s16 inst, u16 idx);
+extern void omap4_prminst_write_inst_reg(u32 val, u8 part, s16 inst, u16 idx);
+extern u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part,
+					   s16 inst, s16 idx);
+
+extern void omap4_prm_global_warm_sw_reset(void);
+
+#endif
diff --git a/arch/arm/mach-omap2/scrm44xx.h b/arch/arm/mach-omap2/scrm44xx.h
new file mode 100644
index 0000000..701bf2d
--- /dev/null
+++ b/arch/arm/mach-omap2/scrm44xx.h
@@ -0,0 +1,175 @@
+/*
+ * OMAP44xx SCRM registers and bitfields
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_SCRM_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_SCRM_44XX_H
+
+#define OMAP4_SCRM_BASE				0x4a30a000
+
+#define OMAP44XX_SCRM_REGADDR(reg)	\
+		OMAP2_L4_IO_ADDRESS(OMAP4_SCRM_BASE + (reg))
+
+/* Registers offset */
+#define OMAP4_SCRM_REVISION_SCRM_OFFSET		0x0000
+#define OMAP4_SCRM_REVISION_SCRM		OMAP44XX_SCRM_REGADDR(0x0000)
+#define OMAP4_SCRM_CLKSETUPTIME_OFFSET		0x0100
+#define OMAP4_SCRM_CLKSETUPTIME			OMAP44XX_SCRM_REGADDR(0x0100)
+#define OMAP4_SCRM_PMICSETUPTIME_OFFSET		0x0104
+#define OMAP4_SCRM_PMICSETUPTIME		OMAP44XX_SCRM_REGADDR(0x0104)
+#define OMAP4_SCRM_ALTCLKSRC_OFFSET		0x0110
+#define OMAP4_SCRM_ALTCLKSRC			OMAP44XX_SCRM_REGADDR(0x0110)
+#define OMAP4_SCRM_MODEMCLKM_OFFSET		0x0118
+#define OMAP4_SCRM_MODEMCLKM			OMAP44XX_SCRM_REGADDR(0x0118)
+#define OMAP4_SCRM_D2DCLKM_OFFSET		0x011c
+#define OMAP4_SCRM_D2DCLKM			OMAP44XX_SCRM_REGADDR(0x011c)
+#define OMAP4_SCRM_EXTCLKREQ_OFFSET		0x0200
+#define OMAP4_SCRM_EXTCLKREQ			OMAP44XX_SCRM_REGADDR(0x0200)
+#define OMAP4_SCRM_ACCCLKREQ_OFFSET		0x0204
+#define OMAP4_SCRM_ACCCLKREQ			OMAP44XX_SCRM_REGADDR(0x0204)
+#define OMAP4_SCRM_PWRREQ_OFFSET		0x0208
+#define OMAP4_SCRM_PWRREQ			OMAP44XX_SCRM_REGADDR(0x0208)
+#define OMAP4_SCRM_AUXCLKREQ0_OFFSET		0x0210
+#define OMAP4_SCRM_AUXCLKREQ0			OMAP44XX_SCRM_REGADDR(0x0210)
+#define OMAP4_SCRM_AUXCLKREQ1_OFFSET		0x0214
+#define OMAP4_SCRM_AUXCLKREQ1			OMAP44XX_SCRM_REGADDR(0x0214)
+#define OMAP4_SCRM_AUXCLKREQ2_OFFSET		0x0218
+#define OMAP4_SCRM_AUXCLKREQ2			OMAP44XX_SCRM_REGADDR(0x0218)
+#define OMAP4_SCRM_AUXCLKREQ3_OFFSET		0x021c
+#define OMAP4_SCRM_AUXCLKREQ3			OMAP44XX_SCRM_REGADDR(0x021c)
+#define OMAP4_SCRM_AUXCLKREQ4_OFFSET		0x0220
+#define OMAP4_SCRM_AUXCLKREQ4			OMAP44XX_SCRM_REGADDR(0x0220)
+#define OMAP4_SCRM_AUXCLKREQ5_OFFSET		0x0224
+#define OMAP4_SCRM_AUXCLKREQ5			OMAP44XX_SCRM_REGADDR(0x0224)
+#define OMAP4_SCRM_D2DCLKREQ_OFFSET		0x0234
+#define OMAP4_SCRM_D2DCLKREQ			OMAP44XX_SCRM_REGADDR(0x0234)
+#define OMAP4_SCRM_AUXCLK0_OFFSET		0x0310
+#define OMAP4_SCRM_AUXCLK0			OMAP44XX_SCRM_REGADDR(0x0310)
+#define OMAP4_SCRM_AUXCLK1_OFFSET		0x0314
+#define OMAP4_SCRM_AUXCLK1			OMAP44XX_SCRM_REGADDR(0x0314)
+#define OMAP4_SCRM_AUXCLK2_OFFSET		0x0318
+#define OMAP4_SCRM_AUXCLK2			OMAP44XX_SCRM_REGADDR(0x0318)
+#define OMAP4_SCRM_AUXCLK3_OFFSET		0x031c
+#define OMAP4_SCRM_AUXCLK3			OMAP44XX_SCRM_REGADDR(0x031c)
+#define OMAP4_SCRM_AUXCLK4_OFFSET		0x0320
+#define OMAP4_SCRM_AUXCLK4			OMAP44XX_SCRM_REGADDR(0x0320)
+#define OMAP4_SCRM_AUXCLK5_OFFSET		0x0324
+#define OMAP4_SCRM_AUXCLK5			OMAP44XX_SCRM_REGADDR(0x0324)
+#define OMAP4_SCRM_RSTTIME_OFFSET		0x0400
+#define OMAP4_SCRM_RSTTIME			OMAP44XX_SCRM_REGADDR(0x0400)
+#define OMAP4_SCRM_MODEMRSTCTRL_OFFSET		0x0418
+#define OMAP4_SCRM_MODEMRSTCTRL			OMAP44XX_SCRM_REGADDR(0x0418)
+#define OMAP4_SCRM_D2DRSTCTRL_OFFSET		0x041c
+#define OMAP4_SCRM_D2DRSTCTRL			OMAP44XX_SCRM_REGADDR(0x041c)
+#define OMAP4_SCRM_EXTPWRONRSTCTRL_OFFSET	0x0420
+#define OMAP4_SCRM_EXTPWRONRSTCTRL		OMAP44XX_SCRM_REGADDR(0x0420)
+#define OMAP4_SCRM_EXTWARMRSTST_OFFSET		0x0510
+#define OMAP4_SCRM_EXTWARMRSTST			OMAP44XX_SCRM_REGADDR(0x0510)
+#define OMAP4_SCRM_APEWARMRSTST_OFFSET		0x0514
+#define OMAP4_SCRM_APEWARMRSTST			OMAP44XX_SCRM_REGADDR(0x0514)
+#define OMAP4_SCRM_MODEMWARMRSTST_OFFSET	0x0518
+#define OMAP4_SCRM_MODEMWARMRSTST		OMAP44XX_SCRM_REGADDR(0x0518)
+#define OMAP4_SCRM_D2DWARMRSTST_OFFSET		0x051c
+#define OMAP4_SCRM_D2DWARMRSTST			OMAP44XX_SCRM_REGADDR(0x051c)
+
+/* Registers shifts and masks */
+
+/* REVISION_SCRM */
+#define OMAP4_REV_SHIFT				0
+#define OMAP4_REV_MASK				(0xff << 0)
+
+/* CLKSETUPTIME */
+#define OMAP4_DOWNTIME_SHIFT			16
+#define OMAP4_DOWNTIME_MASK			(0x3f << 16)
+#define OMAP4_SETUPTIME_SHIFT			0
+#define OMAP4_SETUPTIME_MASK			(0xfff << 0)
+
+/* PMICSETUPTIME */
+#define OMAP4_WAKEUPTIME_SHIFT			16
+#define OMAP4_WAKEUPTIME_MASK			(0x3f << 16)
+#define OMAP4_SLEEPTIME_SHIFT			0
+#define OMAP4_SLEEPTIME_MASK			(0x3f << 0)
+
+/* ALTCLKSRC */
+#define OMAP4_ENABLE_EXT_SHIFT			3
+#define OMAP4_ENABLE_EXT_MASK			(1 << 3)
+#define OMAP4_ENABLE_INT_SHIFT			2
+#define OMAP4_ENABLE_INT_MASK			(1 << 2)
+#define OMAP4_ALTCLKSRC_MODE_SHIFT		0
+#define OMAP4_ALTCLKSRC_MODE_MASK		(0x3 << 0)
+
+/* MODEMCLKM */
+#define OMAP4_CLK_32KHZ_SHIFT			0
+#define OMAP4_CLK_32KHZ_MASK			(1 << 0)
+
+/* D2DCLKM */
+#define OMAP4_SYSCLK_SHIFT			1
+#define OMAP4_SYSCLK_MASK			(1 << 1)
+
+/* EXTCLKREQ */
+#define OMAP4_POLARITY_SHIFT			0
+#define OMAP4_POLARITY_MASK			(1 << 0)
+
+/* AUXCLKREQ0 */
+#define OMAP4_MAPPING_SHIFT			2
+#define OMAP4_MAPPING_MASK			(0x7 << 2)
+#define OMAP4_ACCURACY_SHIFT			1
+#define OMAP4_ACCURACY_MASK			(1 << 1)
+
+/* AUXCLK0 */
+#define OMAP4_CLKDIV_SHIFT			16
+#define OMAP4_CLKDIV_MASK			(0xf << 16)
+#define OMAP4_DISABLECLK_SHIFT			9
+#define OMAP4_DISABLECLK_MASK			(1 << 9)
+#define OMAP4_ENABLE_SHIFT			8
+#define OMAP4_ENABLE_MASK			(1 << 8)
+#define OMAP4_SRCSELECT_SHIFT			1
+#define OMAP4_SRCSELECT_MASK			(0x3 << 1)
+
+/* RSTTIME */
+#define OMAP4_RSTTIME_SHIFT			0
+#define OMAP4_RSTTIME_MASK			(0xf << 0)
+
+/* MODEMRSTCTRL */
+#define OMAP4_WARMRST_SHIFT			1
+#define OMAP4_WARMRST_MASK			(1 << 1)
+#define OMAP4_COLDRST_SHIFT			0
+#define OMAP4_COLDRST_MASK			(1 << 0)
+
+/* EXTPWRONRSTCTRL */
+#define OMAP4_PWRONRST_SHIFT			1
+#define OMAP4_PWRONRST_MASK			(1 << 1)
+#define OMAP4_ENABLE_EXTPWRONRSTCTRL_SHIFT	0
+#define OMAP4_ENABLE_EXTPWRONRSTCTRL_MASK	(1 << 0)
+
+/* EXTWARMRSTST */
+#define OMAP4_EXTWARMRSTST_SHIFT		0
+#define OMAP4_EXTWARMRSTST_MASK			(1 << 0)
+
+/* APEWARMRSTST */
+#define OMAP4_APEWARMRSTST_SHIFT		1
+#define OMAP4_APEWARMRSTST_MASK			(1 << 1)
+
+/* MODEMWARMRSTST */
+#define OMAP4_MODEMWARMRSTST_SHIFT		2
+#define OMAP4_MODEMWARMRSTST_MASK		(1 << 2)
+
+/* D2DWARMRSTST */
+#define OMAP4_D2DWARMRSTST_SHIFT		3
+#define OMAP4_D2DWARMRSTST_MASK			(1 << 3)
+
+#endif
diff --git a/arch/arm/mach-omap2/sdram-nokia.c b/arch/arm/mach-omap2/sdram-nokia.c
new file mode 100644
index 0000000..14caa22
--- /dev/null
+++ b/arch/arm/mach-omap2/sdram-nokia.c
@@ -0,0 +1,279 @@
+/*
+ * SDRC register values for Nokia boards
+ *
+ * Copyright (C) 2008, 2010 Nokia Corporation
+ *
+ * Lauri Leukkunen <lauri.leukkunen@nokia.com>
+ *
+ * Original code by Juha Yrjola <juha.yrjola@solidboot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/io.h>
+#include <plat/common.h>
+#include <plat/clock.h>
+#include <plat/sdrc.h>
+
+#include "sdram-nokia.h"
+
+/* In picoseconds, except for tREF (ns), tXP, tCKE, tWTR (clks) */
+struct sdram_timings {
+	u32 casl;
+	u32 tDAL;
+	u32 tDPL;
+	u32 tRRD;
+	u32 tRCD;
+	u32 tRP;
+	u32 tRAS;
+	u32 tRC;
+	u32 tRFC;
+	u32 tXSR;
+
+	u32 tREF; /* in ns */
+
+	u32 tXP;
+	u32 tCKE;
+	u32 tWTR;
+};
+
+static const struct sdram_timings nokia_97dot6mhz_timings[] = {
+	{
+		.casl = 3,
+		.tDAL = 30725,
+		.tDPL = 15362,
+		.tRRD = 10241,
+		.tRCD = 20483,
+		.tRP = 15362,
+		.tRAS = 40967,
+		.tRC = 56330,
+		.tRFC = 138266,
+		.tXSR = 204839,
+
+		.tREF = 7798,
+
+		.tXP = 2,
+		.tCKE = 4,
+		.tWTR = 2,
+	},
+};
+
+static const struct sdram_timings nokia_166mhz_timings[] = {
+	{
+		.casl = 3,
+		.tDAL = 33000,
+		.tDPL = 15000,
+		.tRRD = 12000,
+		.tRCD = 22500,
+		.tRP = 18000,
+		.tRAS = 42000,
+		.tRC = 66000,
+		.tRFC = 138000,
+		.tXSR = 200000,
+
+		.tREF = 7800,
+
+		.tXP = 2,
+		.tCKE = 2,
+		.tWTR = 2
+	},
+};
+
+static const struct sdram_timings nokia_195dot2mhz_timings[] = {
+	{
+		.casl = 3,
+		.tDAL = 30725,
+		.tDPL = 15362,
+		.tRRD = 10241,
+		.tRCD = 20483,
+		.tRP = 15362,
+		.tRAS = 40967,
+		.tRC = 56330,
+		.tRFC = 138266,
+		.tXSR = 204839,
+
+		.tREF = 7752,
+
+		.tXP = 2,
+		.tCKE = 4,
+		.tWTR = 2,
+	},
+};
+
+static const struct {
+	long rate;
+	struct sdram_timings const *data;
+} nokia_timings[] = {
+	{ 83000000, nokia_166mhz_timings },
+	{ 97600000, nokia_97dot6mhz_timings },
+	{ 166000000, nokia_166mhz_timings },
+	{ 195200000, nokia_195dot2mhz_timings },
+};
+static struct omap_sdrc_params nokia_sdrc_params[ARRAY_SIZE(nokia_timings) + 1];
+
+static unsigned long sdrc_get_fclk_period(long rate)
+{
+	/* In picoseconds */
+	return 1000000000 / rate;
+}
+
+static unsigned int sdrc_ps_to_ticks(unsigned int time_ps, long rate)
+{
+	unsigned long tick_ps;
+
+	/* Calculate in picosecs to yield more exact results */
+	tick_ps = sdrc_get_fclk_period(rate);
+
+	return (time_ps + tick_ps - 1) / tick_ps;
+}
+#undef DEBUG
+#ifdef DEBUG
+static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
+				int ticks, long rate, const char *name)
+#else
+static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
+			       int ticks)
+#endif
+{
+	int mask, nr_bits;
+
+	nr_bits = end_bit - st_bit + 1;
+	if (ticks >= 1 << nr_bits)
+		return -1;
+	mask = (1 << nr_bits) - 1;
+	*regval &= ~(mask << st_bit);
+	*regval |= ticks << st_bit;
+#ifdef DEBUG
+	printk(KERN_INFO "SDRC %s: %i ticks %i ns\n", name, ticks,
+			(unsigned int)sdrc_get_fclk_period(rate) * ticks /
+			1000);
+#endif
+
+	return 0;
+}
+
+#ifdef DEBUG
+#define SDRC_SET_ONE(reg, st, end, field, rate) \
+	if (set_sdrc_timing_regval((reg), (st), (end), \
+			memory_timings->field, (rate), #field) < 0) \
+		err = -1;
+#else
+#define SDRC_SET_ONE(reg, st, end, field, rate) \
+	if (set_sdrc_timing_regval((reg), (st), (end), \
+			memory_timings->field) < 0) \
+		err = -1;
+#endif
+
+#ifdef DEBUG
+static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
+				int time, long rate, const char *name)
+#else
+static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
+				int time, long rate)
+#endif
+{
+	int ticks, ret;
+	ret = 0;
+
+	if (time == 0)
+		ticks = 0;
+	else
+		ticks = sdrc_ps_to_ticks(time, rate);
+
+#ifdef DEBUG
+	ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks,
+				     rate, name);
+#else
+	ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks);
+#endif
+
+	return ret;
+}
+
+#ifdef DEBUG
+#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
+	if (set_sdrc_timing_regval_ps((reg), (st), (end), \
+			memory_timings->field, \
+			(rate), #field) < 0) \
+		err = -1;
+
+#else
+#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
+	if (set_sdrc_timing_regval_ps((reg), (st), (end), \
+			memory_timings->field, (rate)) < 0) \
+		err = -1;
+#endif
+
+static int sdrc_timings(int id, long rate,
+			const struct sdram_timings *memory_timings)
+{
+	u32 ticks_per_ms;
+	u32 rfr, l;
+	u32 actim_ctrla = 0, actim_ctrlb = 0;
+	u32 rfr_ctrl;
+	int err = 0;
+	long l3_rate = rate / 1000;
+
+	SDRC_SET_ONE_PS(&actim_ctrla,  0,  4, tDAL, l3_rate);
+	SDRC_SET_ONE_PS(&actim_ctrla,  6,  8, tDPL, l3_rate);
+	SDRC_SET_ONE_PS(&actim_ctrla,  9, 11, tRRD, l3_rate);
+	SDRC_SET_ONE_PS(&actim_ctrla, 12, 14, tRCD, l3_rate);
+	SDRC_SET_ONE_PS(&actim_ctrla, 15, 17, tRP, l3_rate);
+	SDRC_SET_ONE_PS(&actim_ctrla, 18, 21, tRAS, l3_rate);
+	SDRC_SET_ONE_PS(&actim_ctrla, 22, 26, tRC, l3_rate);
+	SDRC_SET_ONE_PS(&actim_ctrla, 27, 31, tRFC, l3_rate);
+
+	SDRC_SET_ONE_PS(&actim_ctrlb,  0,  7, tXSR, l3_rate);
+
+	SDRC_SET_ONE(&actim_ctrlb,  8, 10, tXP, l3_rate);
+	SDRC_SET_ONE(&actim_ctrlb, 12, 14, tCKE, l3_rate);
+	SDRC_SET_ONE(&actim_ctrlb, 16, 17, tWTR, l3_rate);
+
+	ticks_per_ms = l3_rate;
+	rfr = memory_timings[0].tREF * ticks_per_ms / 1000000;
+	if (rfr > 65535 + 50)
+		rfr = 65535;
+	else
+		rfr -= 50;
+
+#ifdef DEBUG
+	printk(KERN_INFO "SDRC tREF: %i ticks\n", rfr);
+#endif
+
+	l = rfr << 8;
+	rfr_ctrl = l | 0x1; /* autorefresh, reload counter with 1xARCV */
+
+	nokia_sdrc_params[id].rate = rate;
+	nokia_sdrc_params[id].actim_ctrla = actim_ctrla;
+	nokia_sdrc_params[id].actim_ctrlb = actim_ctrlb;
+	nokia_sdrc_params[id].rfr_ctrl = rfr_ctrl;
+	nokia_sdrc_params[id].mr = 0x32;
+
+	nokia_sdrc_params[id + 1].rate = 0;
+
+	return err;
+}
+
+struct omap_sdrc_params *nokia_get_sdram_timings(void)
+{
+	int err = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nokia_timings); i++) {
+		err |= sdrc_timings(i, nokia_timings[i].rate,
+				       nokia_timings[i].data);
+		if (err)
+			pr_err("%s: error with rate %ld: %d\n", __func__,
+			       nokia_timings[i].rate, err);
+	}
+
+	return err ? NULL : nokia_sdrc_params;
+}
+
diff --git a/arch/arm/mach-omap2/sdram-nokia.h b/arch/arm/mach-omap2/sdram-nokia.h
new file mode 100644
index 0000000..ee63da5
--- /dev/null
+++ b/arch/arm/mach-omap2/sdram-nokia.h
@@ -0,0 +1,12 @@
+/*
+ * SDRC register values for Nokia boards
+ *
+ * Copyright (C) 2010 Nokia
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct omap_sdrc_params *nokia_get_sdram_timings(void);
+
diff --git a/arch/arm/mach-omap2/sdrc.c b/arch/arm/mach-omap2/sdrc.c
index 4c65f56..da6f3a6 100644
--- a/arch/arm/mach-omap2/sdrc.c
+++ b/arch/arm/mach-omap2/sdrc.c
@@ -27,8 +27,6 @@
 #include <plat/clock.h>
 #include <plat/sram.h>
 
-#include "prm.h"
-
 #include <plat/sdrc.h>
 #include "sdrc.h"
 
diff --git a/arch/arm/mach-omap2/sdrc.h b/arch/arm/mach-omap2/sdrc.h
index 68f57bb..b3f8379 100644
--- a/arch/arm/mach-omap2/sdrc.h
+++ b/arch/arm/mach-omap2/sdrc.h
@@ -74,5 +74,4 @@
  */
 #define SDRC_MPURATE_LOOPS		96
 
-
 #endif
diff --git a/arch/arm/mach-omap2/sdrc2xxx.c b/arch/arm/mach-omap2/sdrc2xxx.c
index 0f4d27a..ccdb010 100644
--- a/arch/arm/mach-omap2/sdrc2xxx.c
+++ b/arch/arm/mach-omap2/sdrc2xxx.c
@@ -28,7 +28,7 @@
 #include <plat/clock.h>
 #include <plat/sram.h>
 
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "clock.h"
 #include <plat/sdrc.h>
 #include "sdrc.h"
@@ -99,6 +99,10 @@
 	m_type = omap2xxx_sdrc_get_type();
 
 	local_irq_save(flags);
+	/*
+	 * XXX These calls should be abstracted out through a
+	 * prm2xxx.c function
+	 */
 	if (cpu_is_omap2420())
 		__raw_writel(0xffff, OMAP2420_PRCM_VOLTSETUP);
 	else
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index d17960a..302da74 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -40,11 +40,12 @@
 #include <plat/omap_hwmod.h>
 #include <plat/omap_device.h>
 
-#include "prm.h"
+#include "prm2xxx_3xxx.h"
 #include "pm.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 #include "prm-regbits-34xx.h"
 #include "control.h"
+#include "mux.h"
 
 #define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV	0x52
 #define UART_OMAP_WER		0x17	/* Wake-up enable register */
@@ -106,21 +107,16 @@
 static LIST_HEAD(uart_list);
 static u8 num_uarts;
 
-/*
- * Since these idle/enable hooks are used in the idle path itself
- * which has interrupts disabled, use the non-locking versions of
- * the hwmod enable/disable functions.
- */
 static int uart_idle_hwmod(struct omap_device *od)
 {
-	_omap_hwmod_idle(od->hwmods[0]);
+	omap_hwmod_idle(od->hwmods[0]);
 
 	return 0;
 }
 
 static int uart_enable_hwmod(struct omap_device *od)
 {
-	_omap_hwmod_enable(od->hwmods[0]);
+	omap_hwmod_enable(od->hwmods[0]);
 
 	return 0;
 }
@@ -169,9 +165,9 @@
 
 static inline void __init omap_uart_reset(struct omap_uart_state *uart)
 {
-	serial_write_reg(uart, UART_OMAP_MDR1, 0x07);
+	serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
 	serial_write_reg(uart, UART_OMAP_SCR, 0x08);
-	serial_write_reg(uart, UART_OMAP_MDR1, 0x00);
+	serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
 }
 
 #if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
@@ -219,7 +215,7 @@
 		return;
 
 	lcr = serial_read_reg(uart, UART_LCR);
-	serial_write_reg(uart, UART_LCR, 0xBF);
+	serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
 	uart->dll = serial_read_reg(uart, UART_DLL);
 	uart->dlh = serial_read_reg(uart, UART_DLM);
 	serial_write_reg(uart, UART_LCR, lcr);
@@ -227,7 +223,7 @@
 	uart->sysc = serial_read_reg(uart, UART_OMAP_SYSC);
 	uart->scr = serial_read_reg(uart, UART_OMAP_SCR);
 	uart->wer = serial_read_reg(uart, UART_OMAP_WER);
-	serial_write_reg(uart, UART_LCR, 0x80);
+	serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
 	uart->mcr = serial_read_reg(uart, UART_MCR);
 	serial_write_reg(uart, UART_LCR, lcr);
 
@@ -247,32 +243,35 @@
 	uart->context_valid = 0;
 
 	if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
-		omap_uart_mdr1_errataset(uart, 0x07, 0xA0);
+		omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_DISABLE, 0xA0);
 	else
-		serial_write_reg(uart, UART_OMAP_MDR1, 0x7);
-	serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
+		serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+
+	serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
 	efr = serial_read_reg(uart, UART_EFR);
 	serial_write_reg(uart, UART_EFR, UART_EFR_ECB);
 	serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
 	serial_write_reg(uart, UART_IER, 0x0);
-	serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
+	serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
 	serial_write_reg(uart, UART_DLL, uart->dll);
 	serial_write_reg(uart, UART_DLM, uart->dlh);
 	serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
 	serial_write_reg(uart, UART_IER, uart->ier);
-	serial_write_reg(uart, UART_LCR, 0x80);
+	serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
 	serial_write_reg(uart, UART_MCR, uart->mcr);
-	serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
+	serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
 	serial_write_reg(uart, UART_EFR, efr);
 	serial_write_reg(uart, UART_LCR, UART_LCR_WLEN8);
 	serial_write_reg(uart, UART_OMAP_SCR, uart->scr);
 	serial_write_reg(uart, UART_OMAP_WER, uart->wer);
 	serial_write_reg(uart, UART_OMAP_SYSC, uart->sysc);
+
 	if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
-		omap_uart_mdr1_errataset(uart, 0x00, 0xA1);
+		omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_16X_MODE, 0xA1);
 	else
 		/* UART 16x mode */
-		serial_write_reg(uart, UART_OMAP_MDR1, 0x00);
+		serial_write_reg(uart, UART_OMAP_MDR1,
+				UART_OMAP_MDR1_16X_MODE);
 }
 #else
 static inline void omap_uart_save_context(struct omap_uart_state *uart) {}
@@ -492,6 +491,7 @@
 		u32 wk_mask = 0;
 		u32 padconf = 0;
 
+		/* XXX These PRM accesses do not belong here */
 		uart->wk_en = OMAP34XX_PRM_REGADDR(mod, PM_WKEN1);
 		uart->wk_st = OMAP34XX_PRM_REGADDR(mod, PM_WKST1);
 		switch (uart->num) {
@@ -695,16 +695,16 @@
 
 /**
  * omap_serial_init_port() - initialize single serial port
- * @port: serial port number (0-3)
+ * @bdata: port specific board data pointer
  *
- * This function initialies serial driver for given @port only.
+ * This function initialies serial driver for given port only.
  * Platforms can call this function instead of omap_serial_init()
  * if they don't plan to use all available UARTs as serial ports.
  *
  * Don't mix calls to omap_serial_init_port() and omap_serial_init(),
  * use only one of the two.
  */
-void __init omap_serial_init_port(int port)
+void __init omap_serial_init_port(struct omap_board_data *bdata)
 {
 	struct omap_uart_state *uart;
 	struct omap_hwmod *oh;
@@ -722,13 +722,15 @@
 	struct omap_uart_port_info omap_up;
 #endif
 
-	if (WARN_ON(port < 0))
+	if (WARN_ON(!bdata))
 		return;
-	if (WARN_ON(port >= num_uarts))
+	if (WARN_ON(bdata->id < 0))
+		return;
+	if (WARN_ON(bdata->id >= num_uarts))
 		return;
 
 	list_for_each_entry(uart, &uart_list, node)
-		if (port == uart->num)
+		if (bdata->id == uart->num)
 			break;
 
 	oh = uart->oh;
@@ -800,6 +802,8 @@
 	WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
 	     name, oh->name);
 
+	oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
+
 	uart->irq = oh->mpu_irqs[0].irq;
 	uart->regshift = 2;
 	uart->mapbase = oh->slaves[0]->addr->pa_start;
@@ -848,7 +852,7 @@
 }
 
 /**
- * omap_serial_init() - intialize all supported serial ports
+ * omap_serial_init() - initialize all supported serial ports
  *
  * Initializes all available UARTs as serial ports. Platforms
  * can call this function when they want to have default behaviour
@@ -857,7 +861,14 @@
 void __init omap_serial_init(void)
 {
 	struct omap_uart_state *uart;
+	struct omap_board_data bdata;
 
-	list_for_each_entry(uart, &uart_list, node)
-		omap_serial_init_port(uart->num);
+	list_for_each_entry(uart, &uart_list, node) {
+		bdata.id = uart->num;
+		bdata.flags = 0;
+		bdata.pads = NULL;
+		bdata.pads_cnt = 0;
+		omap_serial_init_port(&bdata);
+
+	}
 }
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 2fb205a..98d8232 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -1,6 +1,4 @@
 /*
- * linux/arch/arm/mach-omap2/sleep.S
- *
  * (C) Copyright 2007
  * Texas Instruments
  * Karthik Dasu <karthik-dp@ti.com>
@@ -26,28 +24,35 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <plat/sram.h>
 #include <mach/io.h>
 
-#include "cm.h"
-#include "prm.h"
+#include "cm2xxx_3xxx.h"
+#include "prm2xxx_3xxx.h"
 #include "sdrc.h"
 #include "control.h"
 
-#define SDRC_SCRATCHPAD_SEM_V	0xfa00291c
-
-#define PM_PREPWSTST_CORE_V	OMAP34XX_PRM_REGADDR(CORE_MOD, \
-				OMAP3430_PM_PREPWSTST)
-#define PM_PREPWSTST_CORE_P	0x48306AE8
-#define PM_PREPWSTST_MPU_V	OMAP34XX_PRM_REGADDR(MPU_MOD, \
-				OMAP3430_PM_PREPWSTST)
+/*
+ * Registers access definitions
+ */
+#define SDRC_SCRATCHPAD_SEM_OFFS	0xc
+#define SDRC_SCRATCHPAD_SEM_V	OMAP343X_SCRATCHPAD_REGADDR\
+					(SDRC_SCRATCHPAD_SEM_OFFS)
+#define PM_PREPWSTST_CORE_P	OMAP3430_PRM_BASE + CORE_MOD +\
+					OMAP3430_PM_PREPWSTST
 #define PM_PWSTCTRL_MPU_P	OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
 #define CM_IDLEST1_CORE_V	OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
-#define SRAM_BASE_P		0x40200000
-#define CONTROL_STAT		0x480022F0
-#define SCRATCHPAD_MEM_OFFS	0x310 /* Move this as correct place is
-				       * available */
-#define SCRATCHPAD_BASE_P	(OMAP343X_CTRL_BASE + OMAP343X_CONTROL_MEM_WKUP\
-						+ SCRATCHPAD_MEM_OFFS)
+#define CM_IDLEST_CKGEN_V	OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
+#define SRAM_BASE_P		OMAP3_SRAM_PA
+#define CONTROL_STAT		OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
+#define CONTROL_MEM_RTA_CTRL	(OMAP343X_CTRL_BASE +\
+					OMAP36XX_CONTROL_MEM_RTA_CTRL)
+
+/* Move this as correct place is available */
+#define SCRATCHPAD_MEM_OFFS	0x310
+#define SCRATCHPAD_BASE_P	(OMAP343X_CTRL_BASE +\
+					OMAP343X_CONTROL_MEM_WKUP +\
+					SCRATCHPAD_MEM_OFFS)
 #define SDRC_POWER_V		OMAP34XX_SDRC_REGADDR(SDRC_POWER)
 #define SDRC_SYSCONFIG_P	(OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
 #define SDRC_MR_0_P		(OMAP343X_SDRC_BASE + SDRC_MR_0)
@@ -59,48 +64,38 @@
 #define SDRC_DLLA_STATUS_V	OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
 #define SDRC_DLLA_CTRL_V	OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
 
-        .text
-/* Function to acquire the semaphore in scratchpad */
-ENTRY(lock_scratchpad_sem)
-	stmfd	sp!, {lr}	@ save registers on stack
-wait_sem:
-	mov	r0,#1
-	ldr	r1, sdrc_scratchpad_sem
-wait_loop:
-	ldr	r2, [r1]	@ load the lock value
-	cmp	r2, r0		@ is the lock free ?
-	beq	wait_loop	@ not free...
-	swp	r2, r0, [r1]	@ semaphore free so lock it and proceed
-	cmp	r2, r0		@ did we succeed ?
-	beq	wait_sem	@ no - try again
-	ldmfd	sp!, {pc}	@ restore regs and return
-sdrc_scratchpad_sem:
-        .word SDRC_SCRATCHPAD_SEM_V
-ENTRY(lock_scratchpad_sem_sz)
-        .word   . - lock_scratchpad_sem
 
-        .text
-/* Function to release the scratchpad semaphore */
-ENTRY(unlock_scratchpad_sem)
-	stmfd	sp!, {lr}	@ save registers on stack
-	ldr	r3, sdrc_scratchpad_sem
-	mov	r2,#0
-	str	r2,[r3]
-	ldmfd	sp!, {pc}	@ restore regs and return
-ENTRY(unlock_scratchpad_sem_sz)
-        .word   . - unlock_scratchpad_sem
+/*
+ * API functions
+ */
+
+/*
+ * The "get_*restore_pointer" functions are used to provide a
+ * physical restore address where the ROM code jumps while waking
+ * up from MPU OFF/OSWR state.
+ * The restore pointer is stored into the scratchpad.
+ */
 
 	.text
 /* Function call to get the restore pointer for resume from OFF */
 ENTRY(get_restore_pointer)
-        stmfd   sp!, {lr}     @ save registers on stack
+	stmfd	sp!, {lr}	@ save registers on stack
 	adr	r0, restore
-        ldmfd   sp!, {pc}     @ restore regs and return
+	ldmfd	sp!, {pc}	@ restore regs and return
 ENTRY(get_restore_pointer_sz)
-        .word   . - get_restore_pointer
+	.word	. - get_restore_pointer
 
 	.text
-/* Function call to get the restore pointer for for ES3 to resume from OFF */
+/* Function call to get the restore pointer for 3630 resume from OFF */
+ENTRY(get_omap3630_restore_pointer)
+	stmfd	sp!, {lr}	@ save registers on stack
+	adr	r0, restore_3630
+	ldmfd	sp!, {pc}	@ restore regs and return
+ENTRY(get_omap3630_restore_pointer_sz)
+	.word	. - get_omap3630_restore_pointer
+
+	.text
+/* Function call to get the restore pointer for ES3 to resume from OFF */
 ENTRY(get_es3_restore_pointer)
 	stmfd	sp!, {lr}	@ save registers on stack
 	adr	r0, restore_es3
@@ -108,54 +103,23 @@
 ENTRY(get_es3_restore_pointer_sz)
 	.word	. - get_es3_restore_pointer
 
-ENTRY(es3_sdrc_fix)
-	ldr	r4, sdrc_syscfg		@ get config addr
-	ldr	r5, [r4]		@ get value
-	tst	r5, #0x100		@ is part access blocked
-	it	eq
-	biceq	r5, r5, #0x100		@ clear bit if set
-	str	r5, [r4]		@ write back change
-	ldr	r4, sdrc_mr_0		@ get config addr
-	ldr	r5, [r4]		@ get value
-	str	r5, [r4]		@ write back change
-	ldr	r4, sdrc_emr2_0		@ get config addr
-	ldr	r5, [r4]		@ get value
-	str	r5, [r4]		@ write back change
-	ldr	r4, sdrc_manual_0	@ get config addr
-	mov	r5, #0x2		@ autorefresh command
-	str	r5, [r4]		@ kick off refreshes
-	ldr	r4, sdrc_mr_1		@ get config addr
-	ldr	r5, [r4]		@ get value
-	str	r5, [r4]		@ write back change
-	ldr	r4, sdrc_emr2_1		@ get config addr
-	ldr	r5, [r4]		@ get value
-	str	r5, [r4]		@ write back change
-	ldr	r4, sdrc_manual_1	@ get config addr
-	mov	r5, #0x2		@ autorefresh command
-	str	r5, [r4]		@ kick off refreshes
-	bx	lr
-sdrc_syscfg:
-	.word	SDRC_SYSCONFIG_P
-sdrc_mr_0:
-	.word	SDRC_MR_0_P
-sdrc_emr2_0:
-	.word	SDRC_EMR2_0_P
-sdrc_manual_0:
-	.word	SDRC_MANUAL_0_P
-sdrc_mr_1:
-	.word	SDRC_MR_1_P
-sdrc_emr2_1:
-	.word	SDRC_EMR2_1_P
-sdrc_manual_1:
-	.word	SDRC_MANUAL_1_P
-ENTRY(es3_sdrc_fix_sz)
-	.word	. - es3_sdrc_fix
+	.text
+/*
+ * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
+ * This function sets up a flag that will allow for this toggling to take
+ * place on 3630. Hopefully some version in the future may not need this.
+ */
+ENTRY(enable_omap3630_toggle_l2_on_restore)
+	stmfd	sp!, {lr}	@ save registers on stack
+	/* Setup so that we will disable and enable l2 */
+	mov	r1, #0x1
+	str	r1, l2dis_3630
+	ldmfd	sp!, {pc}	@ restore regs and return
 
+	.text
 /* Function to call rom code to save secure ram context */
 ENTRY(save_secure_ram_context)
 	stmfd	sp!, {r1-r12, lr}	@ save registers on stack
-save_secure_ram_debug:
-	/* b save_secure_ram_debug */	@ enable to debug save code
 	adr	r3, api_params		@ r3 points to parameters
 	str	r0, [r3,#0x4]		@ r0 has sdram address
 	ldr	r12, high_mask
@@ -185,278 +149,55 @@
 	.word	. - save_secure_ram_context
 
 /*
+ * ======================
+ * == Idle entry point ==
+ * ======================
+ */
+
+/*
  * Forces OMAP into idle state
  *
- * omap34xx_suspend() - This bit of code just executes the WFI
- * for normal idles.
+ * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
+ * and executes the WFI instruction. Calling WFI effectively changes the
+ * power domains states to the desired target power states.
  *
- * Note: This code get's copied to internal SRAM at boot. When the OMAP
- *	 wakes up it continues execution at the point it went to sleep.
+ *
+ * Notes:
+ * - this code gets copied to internal SRAM at boot and after wake-up
+ *   from OFF mode. The execution pointer in SRAM is _omap_sram_idle.
+ * - when the OMAP wakes up it continues at different execution points
+ *   depending on the low power mode (non-OFF vs OFF modes),
+ *   cf. 'Resume path for xxx mode' comments.
  */
 ENTRY(omap34xx_cpu_suspend)
-	stmfd	sp!, {r0-r12, lr}		@ save registers on stack
-loop:
-	/*b	loop*/	@Enable to debug by stepping through code
-	/* r0 contains restore pointer in sdram */
-	/* r1 contains information about saving context */
-	ldr     r4, sdrc_power          @ read the SDRC_POWER register
-	ldr     r5, [r4]                @ read the contents of SDRC_POWER
-	orr     r5, r5, #0x40           @ enable self refresh on idle req
-	str     r5, [r4]                @ write back to SDRC_POWER register
+	stmfd	sp!, {r0-r12, lr}	@ save registers on stack
 
+	/*
+	 * r0 contains restore pointer in sdram
+	 * r1 contains information about saving context:
+	 *   0 - No context lost
+	 *   1 - Only L1 and logic lost
+	 *   2 - Only L2 lost
+	 *   3 - Both L1 and L2 lost
+	 */
+
+	/* Directly jump to WFI is the context save is not required */
 	cmp	r1, #0x0
-	/* If context save is required, do that and execute wfi */
-	bne	save_context_wfi
-	/* Data memory barrier and Data sync barrier */
-	mov	r1, #0
-	mcr	p15, 0, r1, c7, c10, 4
-	mcr	p15, 0, r1, c7, c10, 5
+	beq	omap3_do_wfi
 
-	wfi				@ wait for interrupt
-
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	nop
-	bl wait_sdrc_ok
-
-	ldmfd	sp!, {r0-r12, pc}		@ restore regs and return
-restore_es3:
-	/*b restore_es3*/		@ Enable to debug restore code
-	ldr	r5, pm_prepwstst_core_p
-	ldr	r4, [r5]
-	and	r4, r4, #0x3
-	cmp	r4, #0x0	@ Check if previous power state of CORE is OFF
-	bne	restore
-	adr	r0, es3_sdrc_fix
-	ldr	r1, sram_base
-	ldr	r2, es3_sdrc_fix_sz
-	mov	r2, r2, ror #2
-copy_to_sram:
-	ldmia	r0!, {r3}	@ val = *src
-	stmia	r1!, {r3}	@ *dst = val
-	subs	r2, r2, #0x1	@ num_words--
-	bne	copy_to_sram
-	ldr	r1, sram_base
-	blx	r1
-restore:
-	/* b restore*/  @ Enable to debug restore code
-        /* Check what was the reason for mpu reset and store the reason in r9*/
-        /* 1 - Only L1 and logic lost */
-        /* 2 - Only L2 lost - In this case, we wont be here */
-        /* 3 - Both L1 and L2 lost */
-	ldr     r1, pm_pwstctrl_mpu
-	ldr	r2, [r1]
-	and     r2, r2, #0x3
-	cmp     r2, #0x0	@ Check if target power state was OFF or RET
-        moveq   r9, #0x3        @ MPU OFF => L1 and L2 lost
-	movne	r9, #0x1	@ Only L1 and L2 lost => avoid L2 invalidation
-	bne	logic_l1_restore
-	ldr	r0, control_stat
-	ldr	r1, [r0]
-	and	r1, #0x700
-	cmp	r1, #0x300
-	beq	l2_inv_gp
-	mov	r0, #40		@ set service ID for PPA
-	mov	r12, r0		@ copy secure Service ID in r12
-	mov	r1, #0		@ set task id for ROM code in r1
-	mov	r2, #4		@ set some flags in r2, r6
-	mov	r6, #0xff
-	adr	r3, l2_inv_api_params	@ r3 points to dummy parameters
-	mcr	p15, 0, r0, c7, c10, 4	@ data write barrier
-	mcr	p15, 0, r0, c7, c10, 5	@ data memory barrier
-	.word	0xE1600071		@ call SMI monitor (smi #1)
-	/* Write to Aux control register to set some bits */
-	mov	r0, #42		@ set service ID for PPA
-	mov	r12, r0		@ copy secure Service ID in r12
-	mov	r1, #0		@ set task id for ROM code in r1
-	mov	r2, #4		@ set some flags in r2, r6
-	mov	r6, #0xff
-	ldr	r4, scratchpad_base
-	ldr	r3, [r4, #0xBC]	@ r3 points to parameters
-	mcr	p15, 0, r0, c7, c10, 4	@ data write barrier
-	mcr	p15, 0, r0, c7, c10, 5	@ data memory barrier
-	.word	0xE1600071		@ call SMI monitor (smi #1)
-
-#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
-	/* Restore L2 aux control register */
-	@ set service ID for PPA
-	mov	r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
-	mov	r12, r0		@ copy service ID in r12
-	mov	r1, #0		@ set task ID for ROM code in r1
-	mov	r2, #4		@ set some flags in r2, r6
-	mov	r6, #0xff
-	ldr	r4, scratchpad_base
-	ldr	r3, [r4, #0xBC]
-	adds	r3, r3, #8	@ r3 points to parameters
-	mcr	p15, 0, r0, c7, c10, 4	@ data write barrier
-	mcr	p15, 0, r0, c7, c10, 5	@ data memory barrier
-	.word	0xE1600071		@ call SMI monitor (smi #1)
-#endif
-	b	logic_l1_restore
-l2_inv_api_params:
-	.word   0x1, 0x00
-l2_inv_gp:
-	/* Execute smi to invalidate L2 cache */
-	mov r12, #0x1                         @ set up to invalide L2
-smi:    .word 0xE1600070		@ Call SMI monitor (smieq)
-	/* Write to Aux control register to set some bits */
-	ldr	r4, scratchpad_base
-	ldr	r3, [r4,#0xBC]
-	ldr	r0, [r3,#4]
-	mov	r12, #0x3
-	.word 0xE1600070	@ Call SMI monitor (smieq)
-	ldr	r4, scratchpad_base
-	ldr	r3, [r4,#0xBC]
-	ldr	r0, [r3,#12]
-	mov	r12, #0x2
-	.word 0xE1600070	@ Call SMI monitor (smieq)
-logic_l1_restore:
-	mov	r1, #0
-	/* Invalidate all instruction caches to PoU
-	 * and flush branch target cache */
-	mcr	p15, 0, r1, c7, c5, 0
-
-	ldr	r4, scratchpad_base
-	ldr	r3, [r4,#0xBC]
-	adds	r3, r3, #16
-	ldmia	r3!, {r4-r6}
-	mov	sp, r4
-	msr	spsr_cxsf, r5
-	mov	lr, r6
-
-	ldmia	r3!, {r4-r9}
-	/* Coprocessor access Control Register */
-	mcr p15, 0, r4, c1, c0, 2
-
-	/* TTBR0 */
-	MCR p15, 0, r5, c2, c0, 0
-	/* TTBR1 */
-	MCR p15, 0, r6, c2, c0, 1
-	/* Translation table base control register */
-	MCR p15, 0, r7, c2, c0, 2
-	/*domain access Control Register */
-	MCR p15, 0, r8, c3, c0, 0
-	/* data fault status Register */
-	MCR p15, 0, r9, c5, c0, 0
-
-	ldmia  r3!,{r4-r8}
-	/* instruction fault status Register */
-	MCR p15, 0, r4, c5, c0, 1
-	/*Data Auxiliary Fault Status Register */
-	MCR p15, 0, r5, c5, c1, 0
-	/*Instruction Auxiliary Fault Status Register*/
-	MCR p15, 0, r6, c5, c1, 1
-	/*Data Fault Address Register */
-	MCR p15, 0, r7, c6, c0, 0
-	/*Instruction Fault Address Register*/
-	MCR p15, 0, r8, c6, c0, 2
-	ldmia  r3!,{r4-r7}
-
-	/* user r/w thread and process ID */
-	MCR p15, 0, r4, c13, c0, 2
-	/* user ro thread and process ID */
-	MCR p15, 0, r5, c13, c0, 3
-	/*Privileged only thread and process ID */
-	MCR p15, 0, r6, c13, c0, 4
-	/* cache size selection */
-	MCR p15, 2, r7, c0, c0, 0
-	ldmia  r3!,{r4-r8}
-	/* Data TLB lockdown registers */
-	MCR p15, 0, r4, c10, c0, 0
-	/* Instruction TLB lockdown registers */
-	MCR p15, 0, r5, c10, c0, 1
-	/* Secure or Nonsecure Vector Base Address */
-	MCR p15, 0, r6, c12, c0, 0
-	/* FCSE PID */
-	MCR p15, 0, r7, c13, c0, 0
-	/* Context PID */
-	MCR p15, 0, r8, c13, c0, 1
-
-	ldmia  r3!,{r4-r5}
-	/* primary memory remap register */
-	MCR p15, 0, r4, c10, c2, 0
-	/*normal memory remap register */
-	MCR p15, 0, r5, c10, c2, 1
-
-	/* Restore cpsr */
-	ldmia	r3!,{r4}	/*load CPSR from SDRAM*/
-	msr	cpsr, r4	/*store cpsr */
-
-	/* Enabling MMU here */
-	mrc	p15, 0, r7, c2, c0, 2 /* Read TTBRControl */
-	/* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1*/
-	and	r7, #0x7
-	cmp	r7, #0x0
-	beq	usettbr0
-ttbr_error:
-	/* More work needs to be done to support N[0:2] value other than 0
-	* So looping here so that the error can be detected
-	*/
-	b	ttbr_error
-usettbr0:
-	mrc	p15, 0, r2, c2, c0, 0
-	ldr	r5, ttbrbit_mask
-	and	r2, r5
-	mov	r4, pc
-	ldr	r5, table_index_mask
-	and	r4, r5 /* r4 = 31 to 20 bits of pc */
-	/* Extract the value to be written to table entry */
-	ldr	r1, table_entry
-	add	r1, r1, r4 /* r1 has value to be written to table entry*/
-	/* Getting the address of table entry to modify */
-	lsr	r4, #18
-	add	r2, r4 /* r2 has the location which needs to be modified */
-	/* Storing previous entry of location being modified */
-	ldr	r5, scratchpad_base
-	ldr	r4, [r2]
-	str	r4, [r5, #0xC0]
-	/* Modify the table entry */
-	str	r1, [r2]
-	/* Storing address of entry being modified
-	 * - will be restored after enabling MMU */
-	ldr	r5, scratchpad_base
-	str	r2, [r5, #0xC4]
-
-	mov	r0, #0
-	mcr	p15, 0, r0, c7, c5, 4	@ Flush prefetch buffer
-	mcr	p15, 0, r0, c7, c5, 6	@ Invalidate branch predictor array
-	mcr	p15, 0, r0, c8, c5, 0	@ Invalidate instruction TLB
-	mcr	p15, 0, r0, c8, c6, 0	@ Invalidate data TLB
-	/* Restore control register  but dont enable caches here*/
-	/* Caches will be enabled after restoring MMU table entry */
-	ldmia	r3!, {r4}
-	/* Store previous value of control register in scratchpad */
-	str	r4, [r5, #0xC8]
-	ldr	r2, cache_pred_disable_mask
-	and	r4, r2
-	mcr	p15, 0, r4, c1, c0, 0
-
-	ldmfd	sp!, {r0-r12, pc}		@ restore regs and return
+	/* Otherwise fall through to the save context code */
 save_context_wfi:
-	/*b	save_context_wfi*/	@ enable to debug save code
-	mov	r8, r0 /* Store SDRAM address in r8 */
+	mov	r8, r0			@ Store SDRAM address in r8
 	mrc	p15, 0, r5, c1, c0, 1	@ Read Auxiliary Control Register
 	mov	r4, #0x1		@ Number of parameters for restore call
 	stmia	r8!, {r4-r5}		@ Push parameters for restore call
 	mrc	p15, 1, r5, c9, c0, 2	@ Read L2 AUX ctrl register
 	stmia	r8!, {r4-r5}		@ Push parameters for restore call
-        /* Check what that target sleep state is:stored in r1*/
-        /* 1 - Only L1 and logic lost */
-        /* 2 - Only L2 lost */
-        /* 3 - Both L1 and L2 lost */
-	cmp	r1, #0x2 /* Only L2 lost */
-	beq	clean_l2
-	cmp	r1, #0x1 /* L2 retained */
-	/* r9 stores whether to clean L2 or not*/
-	moveq	r9, #0x0 /* Dont Clean L2 */
-	movne	r9, #0x1 /* Clean L2 */
+
+        /* Check what that target sleep state is from r1 */
+	cmp	r1, #0x2		@ Only L2 lost, no need to save context
+	beq	clean_caches
+
 l1_logic_lost:
 	/* Store sp and spsr to SDRAM */
 	mov	r4, sp
@@ -472,21 +213,27 @@
 	mrc	p15, 0, r5, c2, c0, 1
 	mrc	p15, 0, r6, c2, c0, 2
 	stmia	r8!, {r4-r6}
-	/* Domain access control register, data fault status register,
-	and instruction fault status register */
+	/*
+	 * Domain access control register, data fault status register,
+	 * and instruction fault status register
+	 */
 	mrc	p15, 0, r4, c3, c0, 0
 	mrc	p15, 0, r5, c5, c0, 0
 	mrc	p15, 0, r6, c5, c0, 1
 	stmia	r8!, {r4-r6}
-	/* Data aux fault status register, instruction aux fault status,
-	datat fault address register and instruction fault address register*/
+	/*
+	 * Data aux fault status register, instruction aux fault status,
+	 * data fault address register and instruction fault address register
+	 */
 	mrc	p15, 0, r4, c5, c1, 0
 	mrc	p15, 0, r5, c5, c1, 1
 	mrc	p15, 0, r6, c6, c0, 0
 	mrc	p15, 0, r7, c6, c0, 2
 	stmia	r8!, {r4-r7}
-	/* user r/w thread and process ID, user r/o thread and process ID,
-	priv only thread and process ID, cache size selection */
+	/*
+	 * user r/w thread and process ID, user r/o thread and process ID,
+	 * priv only thread and process ID, cache size selection
+	 */
 	mrc	p15, 0, r4, c13, c0, 2
 	mrc	p15, 0, r5, c13, c0, 3
 	mrc	p15, 0, r6, c13, c0, 4
@@ -513,86 +260,51 @@
 	mrc	p15, 0, r4, c1, c0, 0
 	/* save control register */
 	stmia	r8!, {r4}
-clean_caches:
-	/* Clean Data or unified cache to POU*/
-	/* How to invalidate only L1 cache???? - #FIX_ME# */
-	/* mcr	p15, 0, r11, c7, c11, 1 */
-	cmp	r9, #1 /* Check whether L2 inval is required or not*/
-	bne	skip_l2_inval
-clean_l2:
-	/* read clidr */
-	mrc     p15, 1, r0, c0, c0, 1
-	/* extract loc from clidr */
-	ands    r3, r0, #0x7000000
-	/* left align loc bit field */
-	mov     r3, r3, lsr #23
-	/* if loc is 0, then no need to clean */
-	beq     finished
-	/* start clean at cache level 0 */
-	mov     r10, #0
-loop1:
-	/* work out 3x current cache level */
-	add     r2, r10, r10, lsr #1
-	/* extract cache type bits from clidr*/
-	mov     r1, r0, lsr r2
-	/* mask of the bits for current cache only */
-	and     r1, r1, #7
-	/* see what cache we have at this level */
-	cmp     r1, #2
-	/* skip if no cache, or just i-cache */
-	blt     skip
-	/* select current cache level in cssr */
-	mcr     p15, 2, r10, c0, c0, 0
-	/* isb to sych the new cssr&csidr */
-	isb
-	/* read the new csidr */
-	mrc     p15, 1, r1, c0, c0, 0
-	/* extract the length of the cache lines */
-	and     r2, r1, #7
-	/* add 4 (line length offset) */
-	add     r2, r2, #4
-	ldr     r4, assoc_mask
-	/* find maximum number on the way size */
-	ands    r4, r4, r1, lsr #3
-	/* find bit position of way size increment */
-	clz     r5, r4
-	ldr     r7, numset_mask
-	/* extract max number of the index size*/
-	ands    r7, r7, r1, lsr #13
-loop2:
-	mov     r9, r4
-	/* create working copy of max way size*/
-loop3:
-	/* factor way and cache number into r11 */
-	orr     r11, r10, r9, lsl r5
-	/* factor index number into r11 */
-	orr     r11, r11, r7, lsl r2
-	/*clean & invalidate by set/way */
-	mcr     p15, 0, r11, c7, c10, 2
-	/* decrement the way*/
-	subs    r9, r9, #1
-	bge     loop3
-	/*decrement the index */
-	subs    r7, r7, #1
-	bge     loop2
-skip:
-	add     r10, r10, #2
-	/* increment cache number */
-	cmp     r3, r10
-	bgt     loop1
-finished:
-	/*swith back to cache level 0 */
-	mov     r10, #0
-	/* select current cache level in cssr */
-	mcr     p15, 2, r10, c0, c0, 0
-	isb
-skip_l2_inval:
-	/* Data memory barrier and Data sync barrier */
-	mov     r1, #0
-	mcr     p15, 0, r1, c7, c10, 4
-	mcr     p15, 0, r1, c7, c10, 5
 
-	wfi                             @ wait for interrupt
+clean_caches:
+	/*
+	 * Clean Data or unified cache to POU
+	 * How to invalidate only L1 cache???? - #FIX_ME#
+	 * mcr	p15, 0, r11, c7, c11, 1
+	 */
+	cmp	r1, #0x1 		@ Check whether L2 inval is required
+	beq	omap3_do_wfi
+
+clean_l2:
+	/*
+	 * jump out to kernel flush routine
+	 *  - reuse that code is better
+	 *  - it executes in a cached space so is faster than refetch per-block
+	 *  - should be faster and will change with kernel
+	 *  - 'might' have to copy address, load and jump to it
+	 */
+	ldr	r1, kernel_flush
+	mov	lr, pc
+	bx	r1
+
+omap3_do_wfi:
+	ldr	r4, sdrc_power		@ read the SDRC_POWER register
+	ldr	r5, [r4]		@ read the contents of SDRC_POWER
+	orr	r5, r5, #0x40		@ enable self refresh on idle req
+	str	r5, [r4]		@ write back to SDRC_POWER register
+
+	/* Data memory barrier and Data sync barrier */
+	mov	r1, #0
+	mcr	p15, 0, r1, c7, c10, 4
+	mcr	p15, 0, r1, c7, c10, 5
+
+/*
+ * ===================================
+ * == WFI instruction => Enter idle ==
+ * ===================================
+ */
+	wfi				@ wait for interrupt
+
+/*
+ * ===================================
+ * == Resume path for non-OFF modes ==
+ * ===================================
+ */
 	nop
 	nop
 	nop
@@ -604,46 +316,421 @@
 	nop
 	nop
 	bl wait_sdrc_ok
-	/* restore regs and return */
-	ldmfd   sp!, {r0-r12, pc}
+
+/*
+ * ===================================
+ * == Exit point from non-OFF modes ==
+ * ===================================
+ */
+	ldmfd	sp!, {r0-r12, pc}	@ restore regs and return
+
+
+/*
+ * ==============================
+ * == Resume path for OFF mode ==
+ * ==============================
+ */
+
+/*
+ * The restore_* functions are called by the ROM code
+ *  when back from WFI in OFF mode.
+ * Cf. the get_*restore_pointer functions.
+ *
+ *  restore_es3: applies to 34xx >= ES3.0
+ *  restore_3630: applies to 36xx
+ *  restore: common code for 3xxx
+ */
+restore_es3:
+	ldr	r5, pm_prepwstst_core_p
+	ldr	r4, [r5]
+	and	r4, r4, #0x3
+	cmp	r4, #0x0	@ Check if previous power state of CORE is OFF
+	bne	restore
+	adr	r0, es3_sdrc_fix
+	ldr	r1, sram_base
+	ldr	r2, es3_sdrc_fix_sz
+	mov	r2, r2, ror #2
+copy_to_sram:
+	ldmia	r0!, {r3}	@ val = *src
+	stmia	r1!, {r3}	@ *dst = val
+	subs	r2, r2, #0x1	@ num_words--
+	bne	copy_to_sram
+	ldr	r1, sram_base
+	blx	r1
+	b	restore
+
+restore_3630:
+	ldr	r1, pm_prepwstst_core_p
+	ldr	r2, [r1]
+	and	r2, r2, #0x3
+	cmp	r2, #0x0	@ Check if previous power state of CORE is OFF
+	bne	restore
+	/* Disable RTA before giving control */
+	ldr	r1, control_mem_rta
+	mov	r2, #OMAP36XX_RTA_DISABLE
+	str	r2, [r1]
+
+	/* Fall through to common code for the remaining logic */
+
+restore:
+	/*
+	 * Check what was the reason for mpu reset and store the reason in r9:
+	 *  0 - No context lost
+	 *  1 - Only L1 and logic lost
+	 *  2 - Only L2 lost - In this case, we wont be here
+	 *  3 - Both L1 and L2 lost
+	 */
+	ldr	r1, pm_pwstctrl_mpu
+	ldr	r2, [r1]
+	and	r2, r2, #0x3
+	cmp	r2, #0x0	@ Check if target power state was OFF or RET
+	moveq	r9, #0x3	@ MPU OFF => L1 and L2 lost
+	movne	r9, #0x1	@ Only L1 and L2 lost => avoid L2 invalidation
+	bne	logic_l1_restore
+
+	ldr	r0, l2dis_3630
+	cmp	r0, #0x1	@ should we disable L2 on 3630?
+	bne	skipl2dis
+	mrc	p15, 0, r0, c1, c0, 1
+	bic	r0, r0, #2	@ disable L2 cache
+	mcr	p15, 0, r0, c1, c0, 1
+skipl2dis:
+	ldr	r0, control_stat
+	ldr	r1, [r0]
+	and	r1, #0x700
+	cmp	r1, #0x300
+	beq	l2_inv_gp
+	mov	r0, #40			@ set service ID for PPA
+	mov	r12, r0			@ copy secure Service ID in r12
+	mov	r1, #0			@ set task id for ROM code in r1
+	mov	r2, #4			@ set some flags in r2, r6
+	mov	r6, #0xff
+	adr	r3, l2_inv_api_params	@ r3 points to dummy parameters
+	mcr	p15, 0, r0, c7, c10, 4	@ data write barrier
+	mcr	p15, 0, r0, c7, c10, 5	@ data memory barrier
+	.word	0xE1600071		@ call SMI monitor (smi #1)
+	/* Write to Aux control register to set some bits */
+	mov	r0, #42			@ set service ID for PPA
+	mov	r12, r0			@ copy secure Service ID in r12
+	mov	r1, #0			@ set task id for ROM code in r1
+	mov	r2, #4			@ set some flags in r2, r6
+	mov	r6, #0xff
+	ldr	r4, scratchpad_base
+	ldr	r3, [r4, #0xBC]		@ r3 points to parameters
+	mcr	p15, 0, r0, c7, c10, 4	@ data write barrier
+	mcr	p15, 0, r0, c7, c10, 5	@ data memory barrier
+	.word	0xE1600071		@ call SMI monitor (smi #1)
+
+#ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
+	/* Restore L2 aux control register */
+					@ set service ID for PPA
+	mov	r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
+	mov	r12, r0			@ copy service ID in r12
+	mov	r1, #0			@ set task ID for ROM code in r1
+	mov	r2, #4			@ set some flags in r2, r6
+	mov	r6, #0xff
+	ldr	r4, scratchpad_base
+	ldr	r3, [r4, #0xBC]
+	adds	r3, r3, #8		@ r3 points to parameters
+	mcr	p15, 0, r0, c7, c10, 4	@ data write barrier
+	mcr	p15, 0, r0, c7, c10, 5	@ data memory barrier
+	.word	0xE1600071		@ call SMI monitor (smi #1)
+#endif
+	b	logic_l1_restore
+
+l2_inv_api_params:
+	.word	0x1, 0x00
+l2_inv_gp:
+	/* Execute smi to invalidate L2 cache */
+	mov r12, #0x1			@ set up to invalidate L2
+	.word 0xE1600070		@ Call SMI monitor (smieq)
+	/* Write to Aux control register to set some bits */
+	ldr	r4, scratchpad_base
+	ldr	r3, [r4,#0xBC]
+	ldr	r0, [r3,#4]
+	mov	r12, #0x3
+	.word	0xE1600070		@ Call SMI monitor (smieq)
+	ldr	r4, scratchpad_base
+	ldr	r3, [r4,#0xBC]
+	ldr	r0, [r3,#12]
+	mov	r12, #0x2
+	.word	0xE1600070		@ Call SMI monitor (smieq)
+logic_l1_restore:
+	ldr	r1, l2dis_3630
+	cmp	r1, #0x1		@ Test if L2 re-enable needed on 3630
+	bne	skipl2reen
+	mrc	p15, 0, r1, c1, c0, 1
+	orr	r1, r1, #2		@ re-enable L2 cache
+	mcr	p15, 0, r1, c1, c0, 1
+skipl2reen:
+	mov	r1, #0
+	/*
+	 * Invalidate all instruction caches to PoU
+	 * and flush branch target cache
+	 */
+	mcr	p15, 0, r1, c7, c5, 0
+
+	ldr	r4, scratchpad_base
+	ldr	r3, [r4,#0xBC]
+	adds	r3, r3, #16
+	ldmia	r3!, {r4-r6}
+	mov	sp, r4
+	msr	spsr_cxsf, r5
+	mov	lr, r6
+
+	ldmia	r3!, {r4-r9}
+	/* Coprocessor access Control Register */
+	mcr p15, 0, r4, c1, c0, 2
+
+	/* TTBR0 */
+	MCR p15, 0, r5, c2, c0, 0
+	/* TTBR1 */
+	MCR p15, 0, r6, c2, c0, 1
+	/* Translation table base control register */
+	MCR p15, 0, r7, c2, c0, 2
+	/* Domain access Control Register */
+	MCR p15, 0, r8, c3, c0, 0
+	/* Data fault status Register */
+	MCR p15, 0, r9, c5, c0, 0
+
+	ldmia	r3!,{r4-r8}
+	/* Instruction fault status Register */
+	MCR p15, 0, r4, c5, c0, 1
+	/* Data Auxiliary Fault Status Register */
+	MCR p15, 0, r5, c5, c1, 0
+	/* Instruction Auxiliary Fault Status Register*/
+	MCR p15, 0, r6, c5, c1, 1
+	/* Data Fault Address Register */
+	MCR p15, 0, r7, c6, c0, 0
+	/* Instruction Fault Address Register*/
+	MCR p15, 0, r8, c6, c0, 2
+	ldmia	r3!,{r4-r7}
+
+	/* User r/w thread and process ID */
+	MCR p15, 0, r4, c13, c0, 2
+	/* User ro thread and process ID */
+	MCR p15, 0, r5, c13, c0, 3
+	/* Privileged only thread and process ID */
+	MCR p15, 0, r6, c13, c0, 4
+	/* Cache size selection */
+	MCR p15, 2, r7, c0, c0, 0
+	ldmia	r3!,{r4-r8}
+	/* Data TLB lockdown registers */
+	MCR p15, 0, r4, c10, c0, 0
+	/* Instruction TLB lockdown registers */
+	MCR p15, 0, r5, c10, c0, 1
+	/* Secure or Nonsecure Vector Base Address */
+	MCR p15, 0, r6, c12, c0, 0
+	/* FCSE PID */
+	MCR p15, 0, r7, c13, c0, 0
+	/* Context PID */
+	MCR p15, 0, r8, c13, c0, 1
+
+	ldmia	r3!,{r4-r5}
+	/* Primary memory remap register */
+	MCR p15, 0, r4, c10, c2, 0
+	/* Normal memory remap register */
+	MCR p15, 0, r5, c10, c2, 1
+
+	/* Restore cpsr */
+	ldmia	r3!,{r4}		@ load CPSR from SDRAM
+	msr	cpsr, r4		@ store cpsr
+
+	/* Enabling MMU here */
+	mrc	p15, 0, r7, c2, c0, 2 	@ Read TTBRControl
+	/* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
+	and	r7, #0x7
+	cmp	r7, #0x0
+	beq	usettbr0
+ttbr_error:
+	/*
+	 * More work needs to be done to support N[0:2] value other than 0
+	 * So looping here so that the error can be detected
+	 */
+	b	ttbr_error
+usettbr0:
+	mrc	p15, 0, r2, c2, c0, 0
+	ldr	r5, ttbrbit_mask
+	and	r2, r5
+	mov	r4, pc
+	ldr	r5, table_index_mask
+	and	r4, r5			@ r4 = 31 to 20 bits of pc
+	/* Extract the value to be written to table entry */
+	ldr	r1, table_entry
+	/* r1 has the value to be written to table entry*/
+	add	r1, r1, r4
+	/* Getting the address of table entry to modify */
+	lsr	r4, #18
+	/* r2 has the location which needs to be modified */
+	add	r2, r4
+	/* Storing previous entry of location being modified */
+	ldr	r5, scratchpad_base
+	ldr	r4, [r2]
+	str	r4, [r5, #0xC0]
+	/* Modify the table entry */
+	str	r1, [r2]
+	/*
+	 * Storing address of entry being modified
+	 * - will be restored after enabling MMU
+	 */
+	ldr	r5, scratchpad_base
+	str	r2, [r5, #0xC4]
+
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 4	@ Flush prefetch buffer
+	mcr	p15, 0, r0, c7, c5, 6	@ Invalidate branch predictor array
+	mcr	p15, 0, r0, c8, c5, 0	@ Invalidate instruction TLB
+	mcr	p15, 0, r0, c8, c6, 0	@ Invalidate data TLB
+	/*
+	 * Restore control register. This enables the MMU.
+	 * The caches and prediction are not enabled here, they
+	 * will be enabled after restoring the MMU table entry.
+	 */
+	ldmia	r3!, {r4}
+	/* Store previous value of control register in scratchpad */
+	str	r4, [r5, #0xC8]
+	ldr	r2, cache_pred_disable_mask
+	and	r4, r2
+	mcr	p15, 0, r4, c1, c0, 0
+
+/*
+ * ==============================
+ * == Exit point from OFF mode ==
+ * ==============================
+ */
+	ldmfd	sp!, {r0-r12, pc}	@ restore regs and return
+
+
+/*
+ * Internal functions
+ */
+
+/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */
+	.text
+ENTRY(es3_sdrc_fix)
+	ldr	r4, sdrc_syscfg		@ get config addr
+	ldr	r5, [r4]		@ get value
+	tst	r5, #0x100		@ is part access blocked
+	it	eq
+	biceq	r5, r5, #0x100		@ clear bit if set
+	str	r5, [r4]		@ write back change
+	ldr	r4, sdrc_mr_0		@ get config addr
+	ldr	r5, [r4]		@ get value
+	str	r5, [r4]		@ write back change
+	ldr	r4, sdrc_emr2_0		@ get config addr
+	ldr	r5, [r4]		@ get value
+	str	r5, [r4]		@ write back change
+	ldr	r4, sdrc_manual_0	@ get config addr
+	mov	r5, #0x2		@ autorefresh command
+	str	r5, [r4]		@ kick off refreshes
+	ldr	r4, sdrc_mr_1		@ get config addr
+	ldr	r5, [r4]		@ get value
+	str	r5, [r4]		@ write back change
+	ldr	r4, sdrc_emr2_1		@ get config addr
+	ldr	r5, [r4]		@ get value
+	str	r5, [r4]		@ write back change
+	ldr	r4, sdrc_manual_1	@ get config addr
+	mov	r5, #0x2		@ autorefresh command
+	str	r5, [r4]		@ kick off refreshes
+	bx	lr
+
+sdrc_syscfg:
+	.word	SDRC_SYSCONFIG_P
+sdrc_mr_0:
+	.word	SDRC_MR_0_P
+sdrc_emr2_0:
+	.word	SDRC_EMR2_0_P
+sdrc_manual_0:
+	.word	SDRC_MANUAL_0_P
+sdrc_mr_1:
+	.word	SDRC_MR_1_P
+sdrc_emr2_1:
+	.word	SDRC_EMR2_1_P
+sdrc_manual_1:
+	.word	SDRC_MANUAL_1_P
+ENTRY(es3_sdrc_fix_sz)
+	.word	. - es3_sdrc_fix
+
+/*
+ * This function implements the erratum ID i581 WA:
+ *  SDRC state restore before accessing the SDRAM
+ *
+ * Only used at return from non-OFF mode. For OFF
+ * mode the ROM code configures the SDRC and
+ * the DPLL before calling the restore code directly
+ * from DDR.
+ */
 
 /* Make sure SDRC accesses are ok */
 wait_sdrc_ok:
-        ldr     r4, cm_idlest1_core
-        ldr     r5, [r4]
-        and     r5, r5, #0x2
-        cmp     r5, #0
-        bne     wait_sdrc_ok
-        ldr     r4, sdrc_power
-        ldr     r5, [r4]
-        bic     r5, r5, #0x40
-        str     r5, [r4]
+
+/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
+	ldr	r4, cm_idlest_ckgen
+wait_dpll3_lock:
+	ldr	r5, [r4]
+	tst	r5, #1
+	beq	wait_dpll3_lock
+
+	ldr	r4, cm_idlest1_core
+wait_sdrc_ready:
+	ldr	r5, [r4]
+	tst	r5, #0x2
+	bne	wait_sdrc_ready
+	/* allow DLL powerdown upon hw idle req */
+	ldr	r4, sdrc_power
+	ldr	r5, [r4]
+	bic	r5, r5, #0x40
+	str	r5, [r4]
+
+is_dll_in_lock_mode:
+	/* Is dll in lock mode? */
+	ldr	r4, sdrc_dlla_ctrl
+	ldr	r5, [r4]
+	tst	r5, #0x4
+	bxne	lr			@ Return if locked
+	/* wait till dll locks */
+wait_dll_lock_timed:
+	ldr	r4, wait_dll_lock_counter
+	add	r4, r4, #1
+	str	r4, wait_dll_lock_counter
+	ldr	r4, sdrc_dlla_status
+	/* Wait 20uS for lock */
+	mov	r6, #8
 wait_dll_lock:
-        /* Is dll in lock mode? */
-        ldr     r4, sdrc_dlla_ctrl
-        ldr     r5, [r4]
-        tst     r5, #0x4
-        bxne    lr
-        /* wait till dll locks */
-        ldr     r4, sdrc_dlla_status
-        ldr     r5, [r4]
-        and     r5, r5, #0x4
-        cmp     r5, #0x4
-        bne     wait_dll_lock
-        bx      lr
+	subs	r6, r6, #0x1
+	beq	kick_dll
+	ldr	r5, [r4]
+	and	r5, r5, #0x4
+	cmp	r5, #0x4
+	bne	wait_dll_lock
+	bx	lr			@ Return when locked
+
+	/* disable/reenable DLL if not locked */
+kick_dll:
+	ldr	r4, sdrc_dlla_ctrl
+	ldr	r5, [r4]
+	mov	r6, r5
+	bic	r6, #(1<<3)		@ disable dll
+	str	r6, [r4]
+	dsb
+	orr	r6, r6, #(1<<3)		@ enable dll
+	str	r6, [r4]
+	dsb
+	ldr	r4, kick_counter
+	add	r4, r4, #1
+	str	r4, kick_counter
+	b	wait_dll_lock_timed
 
 cm_idlest1_core:
 	.word	CM_IDLEST1_CORE_V
+cm_idlest_ckgen:
+	.word	CM_IDLEST_CKGEN_V
 sdrc_dlla_status:
 	.word	SDRC_DLLA_STATUS_V
 sdrc_dlla_ctrl:
 	.word	SDRC_DLLA_CTRL_V
-pm_prepwstst_core:
-	.word	PM_PREPWSTST_CORE_V
 pm_prepwstst_core_p:
 	.word	PM_PREPWSTST_CORE_P
-pm_prepwstst_mpu:
-	.word	PM_PREPWSTST_MPU_V
 pm_pwstctrl_mpu:
 	.word	PM_PWSTCTRL_MPU_P
 scratchpad_base:
@@ -651,13 +738,7 @@
 sram_base:
 	.word	SRAM_BASE_P + 0x8000
 sdrc_power:
-	.word SDRC_POWER_V
-clk_stabilize_delay:
-	.word 0x000001FF
-assoc_mask:
-	.word	0x3ff
-numset_mask:
-	.word	0x7fff
+	.word	SDRC_POWER_V
 ttbrbit_mask:
 	.word	0xFFFFC000
 table_index_mask:
@@ -668,5 +749,20 @@
 	.word	0xFFFFE7FB
 control_stat:
 	.word	CONTROL_STAT
+control_mem_rta:
+	.word	CONTROL_MEM_RTA_CTRL
+kernel_flush:
+	.word	v7_flush_dcache_all
+l2dis_3630:
+	.word	0
+	/*
+	 * When exporting to userspace while the counters are in SRAM,
+	 * these 2 words need to be at the end to facilitate retrival!
+	 */
+kick_counter:
+	.word	0
+wait_dll_lock_counter:
+	.word	0
+
 ENTRY(omap34xx_cpu_suspend_sz)
 	.word	. - omap34xx_cpu_suspend
diff --git a/arch/arm/mach-omap2/smartreflex-class3.c b/arch/arm/mach-omap2/smartreflex-class3.c
new file mode 100644
index 0000000..60e7055
--- /dev/null
+++ b/arch/arm/mach-omap2/smartreflex-class3.c
@@ -0,0 +1,59 @@
+/*
+ * Smart reflex Class 3 specific implementations
+ *
+ * Author: Thara Gopinath       <thara@ti.com>
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <plat/smartreflex.h>
+
+static int sr_class3_enable(struct voltagedomain *voltdm)
+{
+	unsigned long volt = omap_voltage_get_nom_volt(voltdm);
+
+	if (!volt) {
+		pr_warning("%s: Curr voltage unknown. Cannot enable sr_%s\n",
+				__func__, voltdm->name);
+		return -ENODATA;
+	}
+
+	omap_vp_enable(voltdm);
+	return sr_enable(voltdm, volt);
+}
+
+static int sr_class3_disable(struct voltagedomain *voltdm, int is_volt_reset)
+{
+	omap_vp_disable(voltdm);
+	sr_disable(voltdm);
+	if (is_volt_reset)
+		omap_voltage_reset(voltdm);
+
+	return 0;
+}
+
+static int sr_class3_configure(struct voltagedomain *voltdm)
+{
+	return sr_configure_errgen(voltdm);
+}
+
+/* SR class3 structure */
+static struct omap_sr_class_data class3_data = {
+	.enable = sr_class3_enable,
+	.disable = sr_class3_disable,
+	.configure = sr_class3_configure,
+	.class_type = SR_CLASS3,
+};
+
+/* Smartreflex Class3 init API to be called from board file */
+static int __init sr_class3_init(void)
+{
+	pr_info("SmartReflex Class3 initialized\n");
+	return sr_register_class(&class3_data);
+}
+late_initcall(sr_class3_init);
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
new file mode 100644
index 0000000..77ecebf
--- /dev/null
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -0,0 +1,1029 @@
+/*
+ * OMAP SmartReflex Voltage Control
+ *
+ * Author: Thara Gopinath	<thara@ti.com>
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include <plat/common.h>
+#include <plat/smartreflex.h>
+
+#include "pm.h"
+
+#define SMARTREFLEX_NAME_LEN	16
+#define NVALUE_NAME_LEN		40
+#define SR_DISABLE_TIMEOUT	200
+
+struct omap_sr {
+	int				srid;
+	int				ip_type;
+	int				nvalue_count;
+	bool				autocomp_active;
+	u32				clk_length;
+	u32				err_weight;
+	u32				err_minlimit;
+	u32				err_maxlimit;
+	u32				accum_data;
+	u32				senn_avgweight;
+	u32				senp_avgweight;
+	u32				senp_mod;
+	u32				senn_mod;
+	unsigned int			irq;
+	void __iomem			*base;
+	struct platform_device		*pdev;
+	struct list_head		node;
+	struct omap_sr_nvalue_table	*nvalue_table;
+	struct voltagedomain		*voltdm;
+};
+
+/* sr_list contains all the instances of smartreflex module */
+static LIST_HEAD(sr_list);
+
+static struct omap_sr_class_data *sr_class;
+static struct omap_sr_pmic_data *sr_pmic_data;
+
+static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
+{
+	__raw_writel(value, (sr->base + offset));
+}
+
+static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
+					u32 value)
+{
+	u32 reg_val;
+	u32 errconfig_offs = 0, errconfig_mask = 0;
+
+	reg_val = __raw_readl(sr->base + offset);
+	reg_val &= ~mask;
+
+	/*
+	 * Smartreflex error config register is special as it contains
+	 * certain status bits which if written a 1 into means a clear
+	 * of those bits. So in order to make sure no accidental write of
+	 * 1 happens to those status bits, do a clear of them in the read
+	 * value. This mean this API doesn't rewrite values in these bits
+	 * if they are currently set, but does allow the caller to write
+	 * those bits.
+	 */
+	if (sr->ip_type == SR_TYPE_V1) {
+		errconfig_offs = ERRCONFIG_V1;
+		errconfig_mask = ERRCONFIG_STATUS_V1_MASK;
+	} else if (sr->ip_type == SR_TYPE_V2) {
+		errconfig_offs = ERRCONFIG_V2;
+		errconfig_mask = ERRCONFIG_VPBOUNDINTST_V2;
+	}
+
+	if (offset == errconfig_offs)
+		reg_val &= ~errconfig_mask;
+
+	reg_val |= value;
+
+	__raw_writel(reg_val, (sr->base + offset));
+}
+
+static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset)
+{
+	return __raw_readl(sr->base + offset);
+}
+
+static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
+{
+	struct omap_sr *sr_info;
+
+	if (!voltdm) {
+		pr_err("%s: Null voltage domain passed!\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	list_for_each_entry(sr_info, &sr_list, node) {
+		if (voltdm == sr_info->voltdm)
+			return sr_info;
+	}
+
+	return ERR_PTR(-ENODATA);
+}
+
+static irqreturn_t sr_interrupt(int irq, void *data)
+{
+	struct omap_sr *sr_info = (struct omap_sr *)data;
+	u32 status = 0;
+
+	if (sr_info->ip_type == SR_TYPE_V1) {
+		/* Read the status bits */
+		status = sr_read_reg(sr_info, ERRCONFIG_V1);
+
+		/* Clear them by writing back */
+		sr_write_reg(sr_info, ERRCONFIG_V1, status);
+	} else if (sr_info->ip_type == SR_TYPE_V2) {
+		/* Read the status bits */
+		sr_read_reg(sr_info, IRQSTATUS);
+
+		/* Clear them by writing back */
+		sr_write_reg(sr_info, IRQSTATUS, status);
+	}
+
+	if (sr_class->class_type == SR_CLASS2 && sr_class->notify)
+		sr_class->notify(sr_info->voltdm, status);
+
+	return IRQ_HANDLED;
+}
+
+static void sr_set_clk_length(struct omap_sr *sr)
+{
+	struct clk *sys_ck;
+	u32 sys_clk_speed;
+
+	if (cpu_is_omap34xx())
+		sys_ck = clk_get(NULL, "sys_ck");
+	else
+		sys_ck = clk_get(NULL, "sys_clkin_ck");
+
+	if (IS_ERR(sys_ck)) {
+		dev_err(&sr->pdev->dev, "%s: unable to get sys clk\n",
+			__func__);
+		return;
+	}
+	sys_clk_speed = clk_get_rate(sys_ck);
+	clk_put(sys_ck);
+
+	switch (sys_clk_speed) {
+	case 12000000:
+		sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
+		break;
+	case 13000000:
+		sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK;
+		break;
+	case 19200000:
+		sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK;
+		break;
+	case 26000000:
+		sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK;
+		break;
+	case 38400000:
+		sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
+		break;
+	default:
+		dev_err(&sr->pdev->dev, "%s: Invalid sysclk value: %d\n",
+			__func__, sys_clk_speed);
+		break;
+	}
+}
+
+static void sr_set_regfields(struct omap_sr *sr)
+{
+	/*
+	 * For time being these values are defined in smartreflex.h
+	 * and populated during init. May be they can be moved to board
+	 * file or pmic specific data structure. In that case these structure
+	 * fields will have to be populated using the pdata or pmic structure.
+	 */
+	if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+		sr->err_weight = OMAP3430_SR_ERRWEIGHT;
+		sr->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT;
+		sr->accum_data = OMAP3430_SR_ACCUMDATA;
+		if (!(strcmp(sr->voltdm->name, "mpu"))) {
+			sr->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT;
+			sr->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT;
+		} else {
+			sr->senn_avgweight = OMAP3430_SR2_SENNAVGWEIGHT;
+			sr->senp_avgweight = OMAP3430_SR2_SENPAVGWEIGHT;
+		}
+	}
+}
+
+static void sr_start_vddautocomp(struct omap_sr *sr)
+{
+	if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+		dev_warn(&sr->pdev->dev,
+			"%s: smartreflex class driver not registered\n",
+			__func__);
+		return;
+	}
+
+	if (!sr_class->enable(sr->voltdm))
+		sr->autocomp_active = true;
+}
+
+static void sr_stop_vddautocomp(struct omap_sr *sr)
+{
+	if (!sr_class || !(sr_class->disable)) {
+		dev_warn(&sr->pdev->dev,
+			"%s: smartreflex class driver not registered\n",
+			__func__);
+		return;
+	}
+
+	if (sr->autocomp_active) {
+		sr_class->disable(sr->voltdm, 1);
+		sr->autocomp_active = false;
+	}
+}
+
+/*
+ * This function handles the intializations which have to be done
+ * only when both sr device and class driver regiter has
+ * completed. This will be attempted to be called from both sr class
+ * driver register and sr device intializtion API's. Only one call
+ * will ultimately succeed.
+ *
+ * Currenly this function registers interrrupt handler for a particular SR
+ * if smartreflex class driver is already registered and has
+ * requested for interrupts and the SR interrupt line in present.
+ */
+static int sr_late_init(struct omap_sr *sr_info)
+{
+	char *name;
+	struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
+	struct resource *mem;
+	int ret = 0;
+
+	if (sr_class->class_type == SR_CLASS2 &&
+		sr_class->notify_flags && sr_info->irq) {
+
+		name = kzalloc(SMARTREFLEX_NAME_LEN + 1, GFP_KERNEL);
+		strcpy(name, "sr_");
+		strcat(name, sr_info->voltdm->name);
+		ret = request_irq(sr_info->irq, sr_interrupt,
+				0, name, (void *)sr_info);
+		if (ret)
+			goto error;
+	}
+
+	if (pdata && pdata->enable_on_init)
+		sr_start_vddautocomp(sr_info);
+
+	return ret;
+
+error:
+		iounmap(sr_info->base);
+		mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
+		release_mem_region(mem->start, resource_size(mem));
+		list_del(&sr_info->node);
+		dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
+			"interrupt handler. Smartreflex will"
+			"not function as desired\n", __func__);
+		kfree(sr_info);
+		return ret;
+}
+
+static void sr_v1_disable(struct omap_sr *sr)
+{
+	int timeout = 0;
+
+	/* Enable MCUDisableAcknowledge interrupt */
+	sr_modify_reg(sr, ERRCONFIG_V1,
+			ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN);
+
+	/* SRCONFIG - disable SR */
+	sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+	/* Disable all other SR interrupts and clear the status */
+	sr_modify_reg(sr, ERRCONFIG_V1,
+			(ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+			ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
+			(ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+			ERRCONFIG_MCUBOUNDINTST |
+			ERRCONFIG_VPBOUNDINTST_V1));
+
+	/*
+	 * Wait for SR to be disabled.
+	 * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
+	 */
+	omap_test_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
+			ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
+			timeout);
+
+	if (timeout >= SR_DISABLE_TIMEOUT)
+		dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+			__func__);
+
+	/* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+	sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN,
+			ERRCONFIG_MCUDISACKINTST);
+}
+
+static void sr_v2_disable(struct omap_sr *sr)
+{
+	int timeout = 0;
+
+	/* Enable MCUDisableAcknowledge interrupt */
+	sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT);
+
+	/* SRCONFIG - disable SR */
+	sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+	/* Disable all other SR interrupts and clear the status */
+	sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+			ERRCONFIG_VPBOUNDINTST_V2);
+	sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
+			IRQENABLE_MCUVALIDINT |
+			IRQENABLE_MCUBOUNDSINT));
+	sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
+			IRQSTATUS_MCVALIDINT |
+			IRQSTATUS_MCBOUNDSINT));
+
+	/*
+	 * Wait for SR to be disabled.
+	 * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
+	 */
+	omap_test_timeout((sr_read_reg(sr, IRQSTATUS) &
+			IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
+			timeout);
+
+	if (timeout >= SR_DISABLE_TIMEOUT)
+		dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+			__func__);
+
+	/* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+	sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
+	sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
+}
+
+static u32 sr_retrieve_nvalue(struct omap_sr *sr, u32 efuse_offs)
+{
+	int i;
+
+	if (!sr->nvalue_table) {
+		dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
+			__func__);
+		return 0;
+	}
+
+	for (i = 0; i < sr->nvalue_count; i++) {
+		if (sr->nvalue_table[i].efuse_offs == efuse_offs)
+			return sr->nvalue_table[i].nvalue;
+	}
+
+	return 0;
+}
+
+/* Public Functions */
+
+/**
+ * sr_configure_errgen() - Configures the smrtreflex to perform AVS using the
+ *			 error generator module.
+ * @voltdm:	VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the error generator module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_errgen(struct voltagedomain *voltdm)
+{
+	u32 sr_config, sr_errconfig, errconfig_offs, vpboundint_en;
+	u32 vpboundint_st, senp_en = 0, senn_en = 0;
+	u8 senp_shift, senn_shift;
+	struct omap_sr *sr = _sr_lookup(voltdm);
+
+	if (IS_ERR(sr)) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, voltdm->name);
+		return -EINVAL;
+	}
+
+	if (!sr->clk_length)
+		sr_set_clk_length(sr);
+
+	senp_en = sr->senp_mod;
+	senn_en = sr->senn_mod;
+
+	sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+		SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
+
+	if (sr->ip_type == SR_TYPE_V1) {
+		sr_config |= SRCONFIG_DELAYCTRL;
+		senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+		senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+		errconfig_offs = ERRCONFIG_V1;
+		vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+		vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+	} else if (sr->ip_type == SR_TYPE_V2) {
+		senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+		senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+		errconfig_offs = ERRCONFIG_V2;
+		vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+		vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+	} else {
+		dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+			"module without specifying the ip\n", __func__);
+		return -EINVAL;
+	}
+
+	sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+	sr_write_reg(sr, SRCONFIG, sr_config);
+	sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) |
+		(sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) |
+		(sr->err_minlimit <<  ERRCONFIG_ERRMINLIMIT_SHIFT);
+	sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK |
+		SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK),
+		sr_errconfig);
+
+	/* Enabling the interrupts if the ERROR module is used */
+	sr_modify_reg(sr, errconfig_offs,
+		vpboundint_en, (vpboundint_en | vpboundint_st));
+
+	return 0;
+}
+
+/**
+ * sr_configure_minmax() - Configures the smrtreflex to perform AVS using the
+ *			 minmaxavg module.
+ * @voltdm:	VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the minmaxavg module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_minmax(struct voltagedomain *voltdm)
+{
+	u32 sr_config, sr_avgwt;
+	u32 senp_en = 0, senn_en = 0;
+	u8 senp_shift, senn_shift;
+	struct omap_sr *sr = _sr_lookup(voltdm);
+
+	if (IS_ERR(sr)) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, voltdm->name);
+		return -EINVAL;
+	}
+
+	if (!sr->clk_length)
+		sr_set_clk_length(sr);
+
+	senp_en = sr->senp_mod;
+	senn_en = sr->senn_mod;
+
+	sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+		SRCONFIG_SENENABLE |
+		(sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
+
+	if (sr->ip_type == SR_TYPE_V1) {
+		sr_config |= SRCONFIG_DELAYCTRL;
+		senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+		senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+	} else if (sr->ip_type == SR_TYPE_V2) {
+		senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+		senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+	} else {
+		dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+			"module without specifying the ip\n", __func__);
+		return -EINVAL;
+	}
+
+	sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+	sr_write_reg(sr, SRCONFIG, sr_config);
+	sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) |
+		(sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT);
+	sr_write_reg(sr, AVGWEIGHT, sr_avgwt);
+
+	/*
+	 * Enabling the interrupts if MINMAXAVG module is used.
+	 * TODO: check if all the interrupts are mandatory
+	 */
+	if (sr->ip_type == SR_TYPE_V1) {
+		sr_modify_reg(sr, ERRCONFIG_V1,
+			(ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+			ERRCONFIG_MCUBOUNDINTEN),
+			(ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
+			 ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
+			 ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
+	} else if (sr->ip_type == SR_TYPE_V2) {
+		sr_write_reg(sr, IRQSTATUS,
+			IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
+			IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
+		sr_write_reg(sr, IRQENABLE_SET,
+			IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
+			IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
+	}
+
+	return 0;
+}
+
+/**
+ * sr_enable() - Enables the smartreflex module.
+ * @voltdm:	VDD pointer to which the SR module to be configured belongs to.
+ * @volt:	The voltage at which the Voltage domain associated with
+ *		the smartreflex module is operating at.
+ *		This is required only to program the correct Ntarget value.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * enable a smartreflex module. Returns 0 on success. Returns error
+ * value if the voltage passed is wrong or if ntarget value is wrong.
+ */
+int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
+{
+	u32 nvalue_reciprocal;
+	struct omap_volt_data *volt_data;
+	struct omap_sr *sr = _sr_lookup(voltdm);
+	int ret;
+
+	if (IS_ERR(sr)) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, voltdm->name);
+		return -EINVAL;
+	}
+
+	volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
+
+	if (IS_ERR(volt_data)) {
+		dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table"
+			"for nominal voltage %ld\n", __func__, volt);
+		return -ENODATA;
+	}
+
+	nvalue_reciprocal = sr_retrieve_nvalue(sr, volt_data->sr_efuse_offs);
+
+	if (!nvalue_reciprocal) {
+		dev_warn(&sr->pdev->dev, "%s: NVALUE = 0 at voltage %ld\n",
+			__func__, volt);
+		return -ENODATA;
+	}
+
+	/* errminlimit is opp dependent and hence linked to voltage */
+	sr->err_minlimit = volt_data->sr_errminlimit;
+
+	pm_runtime_get_sync(&sr->pdev->dev);
+
+	/* Check if SR is already enabled. If yes do nothing */
+	if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
+		return 0;
+
+	/* Configure SR */
+	ret = sr_class->configure(voltdm);
+	if (ret)
+		return ret;
+
+	sr_write_reg(sr, NVALUERECIPROCAL, nvalue_reciprocal);
+
+	/* SRCONFIG - enable SR */
+	sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
+	return 0;
+}
+
+/**
+ * sr_disable() - Disables the smartreflex module.
+ * @voltdm:	VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable a smartreflex module.
+ */
+void sr_disable(struct voltagedomain *voltdm)
+{
+	struct omap_sr *sr = _sr_lookup(voltdm);
+
+	if (IS_ERR(sr)) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, voltdm->name);
+		return;
+	}
+
+	/* Check if SR clocks are already disabled. If yes do nothing */
+	if (pm_runtime_suspended(&sr->pdev->dev))
+		return;
+
+	/*
+	 * Disable SR if only it is indeed enabled. Else just
+	 * disable the clocks.
+	 */
+	if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
+		if (sr->ip_type == SR_TYPE_V1)
+			sr_v1_disable(sr);
+		else if (sr->ip_type == SR_TYPE_V2)
+			sr_v2_disable(sr);
+	}
+
+	pm_runtime_put_sync(&sr->pdev->dev);
+}
+
+/**
+ * sr_register_class() - API to register a smartreflex class parameters.
+ * @class_data:	The structure containing various sr class specific data.
+ *
+ * This API is to be called by the smartreflex class driver to register itself
+ * with the smartreflex driver during init. Returns 0 on success else the
+ * error value.
+ */
+int sr_register_class(struct omap_sr_class_data *class_data)
+{
+	struct omap_sr *sr_info;
+
+	if (!class_data) {
+		pr_warning("%s:, Smartreflex class data passed is NULL\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (sr_class) {
+		pr_warning("%s: Smartreflex class driver already registered\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	sr_class = class_data;
+
+	/*
+	 * Call into late init to do intializations that require
+	 * both sr driver and sr class driver to be initiallized.
+	 */
+	list_for_each_entry(sr_info, &sr_list, node)
+		sr_late_init(sr_info);
+
+	return 0;
+}
+
+/**
+ * omap_sr_enable() -  API to enable SR clocks and to call into the
+ *			registered smartreflex class enable API.
+ * @voltdm:	VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to enable
+ * a particular smartreflex module. This API will do the initial
+ * configurations to turn on the smartreflex module and in turn call
+ * into the registered smartreflex class enable API.
+ */
+void omap_sr_enable(struct voltagedomain *voltdm)
+{
+	struct omap_sr *sr = _sr_lookup(voltdm);
+
+	if (IS_ERR(sr)) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, voltdm->name);
+		return;
+	}
+
+	if (!sr->autocomp_active)
+		return;
+
+	if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+		dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
+			"registered\n", __func__);
+		return;
+	}
+
+	sr_class->enable(voltdm);
+}
+
+/**
+ * omap_sr_disable() - API to disable SR without resetting the voltage
+ *			processor voltage
+ * @voltdm:	VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable not to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable(struct voltagedomain *voltdm)
+{
+	struct omap_sr *sr = _sr_lookup(voltdm);
+
+	if (IS_ERR(sr)) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, voltdm->name);
+		return;
+	}
+
+	if (!sr->autocomp_active)
+		return;
+
+	if (!sr_class || !(sr_class->disable)) {
+		dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
+			"registered\n", __func__);
+		return;
+	}
+
+	sr_class->disable(voltdm, 0);
+}
+
+/**
+ * omap_sr_disable_reset_volt() - API to disable SR and reset the
+ *				voltage processor voltage
+ * @voltdm:	VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
+{
+	struct omap_sr *sr = _sr_lookup(voltdm);
+
+	if (IS_ERR(sr)) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, voltdm->name);
+		return;
+	}
+
+	if (!sr->autocomp_active)
+		return;
+
+	if (!sr_class || !(sr_class->disable)) {
+		dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
+			"registered\n", __func__);
+		return;
+	}
+
+	sr_class->disable(voltdm, 1);
+}
+
+/**
+ * omap_sr_register_pmic() - API to register pmic specific info.
+ * @pmic_data:	The structure containing pmic specific data.
+ *
+ * This API is to be called from the PMIC specific code to register with
+ * smartreflex driver pmic specific info. Currently the only info required
+ * is the smartreflex init on the PMIC side.
+ */
+void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data)
+{
+	if (!pmic_data) {
+		pr_warning("%s: Trying to register NULL PMIC data structure"
+			"with smartreflex\n", __func__);
+		return;
+	}
+
+	sr_pmic_data = pmic_data;
+}
+
+/* PM Debug Fs enteries to enable disable smartreflex. */
+static int omap_sr_autocomp_show(void *data, u64 *val)
+{
+	struct omap_sr *sr_info = (struct omap_sr *) data;
+
+	if (!sr_info) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, sr_info->voltdm->name);
+		return -EINVAL;
+	}
+
+	*val = sr_info->autocomp_active;
+
+	return 0;
+}
+
+static int omap_sr_autocomp_store(void *data, u64 val)
+{
+	struct omap_sr *sr_info = (struct omap_sr *) data;
+
+	if (!sr_info) {
+		pr_warning("%s: omap_sr struct for sr_%s not found\n",
+			__func__, sr_info->voltdm->name);
+		return -EINVAL;
+	}
+
+	/* Sanity check */
+	if (val && (val != 1)) {
+		pr_warning("%s: Invalid argument %lld\n", __func__, val);
+		return -EINVAL;
+	}
+
+	if (!val)
+		sr_stop_vddautocomp(sr_info);
+	else
+		sr_start_vddautocomp(sr_info);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
+		omap_sr_autocomp_store, "%llu\n");
+
+static int __init omap_sr_probe(struct platform_device *pdev)
+{
+	struct omap_sr *sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
+	struct omap_sr_data *pdata = pdev->dev.platform_data;
+	struct resource *mem, *irq;
+	struct dentry *vdd_dbg_dir, *dbg_dir, *nvalue_dir;
+	struct omap_volt_data *volt_data;
+	int i, ret = 0;
+
+	if (!sr_info) {
+		dev_err(&pdev->dev, "%s: unable to allocate sr_info\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+		return -EINVAL;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
+		ret = -ENODEV;
+		goto err_free_devinfo;
+	}
+
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+	pm_runtime_enable(&pdev->dev);
+
+	sr_info->pdev = pdev;
+	sr_info->srid = pdev->id;
+	sr_info->voltdm = pdata->voltdm;
+	sr_info->nvalue_table = pdata->nvalue_table;
+	sr_info->nvalue_count = pdata->nvalue_count;
+	sr_info->senn_mod = pdata->senn_mod;
+	sr_info->senp_mod = pdata->senp_mod;
+	sr_info->autocomp_active = false;
+	sr_info->ip_type = pdata->ip_type;
+	sr_info->base = ioremap(mem->start, resource_size(mem));
+	if (!sr_info->base) {
+		dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
+		ret = -ENOMEM;
+		goto err_release_region;
+	}
+
+	if (irq)
+		sr_info->irq = irq->start;
+
+	sr_set_clk_length(sr_info);
+	sr_set_regfields(sr_info);
+
+	list_add(&sr_info->node, &sr_list);
+
+	/*
+	 * Call into late init to do intializations that require
+	 * both sr driver and sr class driver to be initiallized.
+	 */
+	if (sr_class) {
+		ret = sr_late_init(sr_info);
+		if (ret) {
+			pr_warning("%s: Error in SR late init\n", __func__);
+			return ret;
+		}
+	}
+
+	dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
+
+	/*
+	 * If the voltage domain debugfs directory is not created, do
+	 * not try to create rest of the debugfs entries.
+	 */
+	vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
+	if (!vdd_dbg_dir)
+		return -EINVAL;
+
+	dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
+	if (IS_ERR(dbg_dir)) {
+		dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
+			__func__);
+		return PTR_ERR(dbg_dir);
+	}
+
+	(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
+				(void *)sr_info, &pm_sr_fops);
+	(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
+			&sr_info->err_weight);
+	(void) debugfs_create_x32("errmaxlimit", S_IRUGO, dbg_dir,
+			&sr_info->err_maxlimit);
+	(void) debugfs_create_x32("errminlimit", S_IRUGO, dbg_dir,
+			&sr_info->err_minlimit);
+
+	nvalue_dir = debugfs_create_dir("nvalue", dbg_dir);
+	if (IS_ERR(nvalue_dir)) {
+		dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
+			"for n-values\n", __func__);
+		return PTR_ERR(nvalue_dir);
+	}
+
+	omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
+	if (!volt_data) {
+		dev_warn(&pdev->dev, "%s: No Voltage table for the"
+			" corresponding vdd vdd_%s. Cannot create debugfs"
+			"entries for n-values\n",
+			__func__, sr_info->voltdm->name);
+		return -ENODATA;
+	}
+
+	for (i = 0; i < sr_info->nvalue_count; i++) {
+		char *name;
+		char volt_name[32];
+
+		name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
+		if (!name) {
+			dev_err(&pdev->dev, "%s: Unable to allocate memory"
+				" for n-value directory name\n",  __func__);
+			return -ENOMEM;
+		}
+
+		strcpy(name, "volt_");
+		sprintf(volt_name, "%d", volt_data[i].volt_nominal);
+		strcat(name, volt_name);
+		(void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
+				&(sr_info->nvalue_table[i].nvalue));
+	}
+
+	return ret;
+
+err_release_region:
+	release_mem_region(mem->start, resource_size(mem));
+err_free_devinfo:
+	kfree(sr_info);
+
+	return ret;
+}
+
+static int __devexit omap_sr_remove(struct platform_device *pdev)
+{
+	struct omap_sr_data *pdata = pdev->dev.platform_data;
+	struct omap_sr *sr_info;
+	struct resource *mem;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+		return -EINVAL;
+	}
+
+	sr_info = _sr_lookup(pdata->voltdm);
+	if (!sr_info) {
+		dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (sr_info->autocomp_active)
+		sr_stop_vddautocomp(sr_info);
+
+	list_del(&sr_info->node);
+	iounmap(sr_info->base);
+	kfree(sr_info);
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(mem->start, resource_size(mem));
+
+	return 0;
+}
+
+static struct platform_driver smartreflex_driver = {
+	.remove         = omap_sr_remove,
+	.driver		= {
+		.name	= "smartreflex",
+	},
+};
+
+static int __init sr_init(void)
+{
+	int ret = 0;
+
+	/*
+	 * sr_init is a late init. If by then a pmic specific API is not
+	 * registered either there is no need for anything to be done on
+	 * the PMIC side or somebody has forgotten to register a PMIC
+	 * handler. Warn for the second condition.
+	 */
+	if (sr_pmic_data && sr_pmic_data->sr_pmic_init)
+		sr_pmic_data->sr_pmic_init();
+	else
+		pr_warning("%s: No PMIC hook to init smartreflex\n", __func__);
+
+	ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe);
+	if (ret) {
+		pr_err("%s: platform driver register failed for SR\n",
+			__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit sr_exit(void)
+{
+	platform_driver_unregister(&smartreflex_driver);
+}
+late_initcall(sr_init);
+module_exit(sr_exit);
+
+MODULE_DESCRIPTION("OMAP Smartreflex Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
new file mode 100644
index 0000000..b1e0af1
--- /dev/null
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -0,0 +1,147 @@
+/*
+ * OMAP3/OMAP4 smartreflex device file
+ *
+ * Author: Thara Gopinath	<thara@ti.com>
+ *
+ * Based originally on code from smartreflex.c
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <plat/omap_device.h>
+#include <plat/smartreflex.h>
+#include <plat/voltage.h>
+
+#include "control.h"
+#include "pm.h"
+
+static bool sr_enable_on_init;
+
+static struct omap_device_pm_latency omap_sr_latency[] = {
+	{
+		.deactivate_func = omap_device_idle_hwmods,
+		.activate_func	 = omap_device_enable_hwmods,
+		.flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST
+	},
+};
+
+/* Read EFUSE values from control registers for OMAP3430 */
+static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
+				struct omap_sr_data *sr_data)
+{
+	struct omap_sr_nvalue_table *nvalue_table;
+	int i, count = 0;
+
+	while (volt_data[count].volt_nominal)
+		count++;
+
+	nvalue_table = kzalloc(sizeof(struct omap_sr_nvalue_table)*count,
+			GFP_KERNEL);
+
+	for (i = 0; i < count; i++) {
+		u32 v;
+		/*
+		 * In OMAP4 the efuse registers are 24 bit aligned.
+		 * A __raw_readl will fail for non-32 bit aligned address
+		 * and hence the 8-bit read and shift.
+		 */
+		if (cpu_is_omap44xx()) {
+			u16 offset = volt_data[i].sr_efuse_offs;
+
+			v = omap_ctrl_readb(offset) |
+				omap_ctrl_readb(offset + 1) << 8 |
+				omap_ctrl_readb(offset + 2) << 16;
+		} else {
+			 v = omap_ctrl_readl(volt_data[i].sr_efuse_offs);
+		}
+
+		nvalue_table[i].efuse_offs = volt_data[i].sr_efuse_offs;
+		nvalue_table[i].nvalue = v;
+	}
+
+	sr_data->nvalue_table = nvalue_table;
+	sr_data->nvalue_count = count;
+}
+
+static int sr_dev_init(struct omap_hwmod *oh, void *user)
+{
+	struct omap_sr_data *sr_data;
+	struct omap_device *od;
+	struct omap_volt_data *volt_data;
+	char *name = "smartreflex";
+	static int i;
+
+	sr_data = kzalloc(sizeof(struct omap_sr_data), GFP_KERNEL);
+	if (!sr_data) {
+		pr_err("%s: Unable to allocate memory for %s sr_data.Error!\n",
+			__func__, oh->name);
+		return -ENOMEM;
+	}
+
+	if (!oh->vdd_name) {
+		pr_err("%s: No voltage domain specified for %s."
+			"Cannot initialize\n", __func__, oh->name);
+		goto exit;
+	}
+
+	sr_data->ip_type = oh->class->rev;
+	sr_data->senn_mod = 0x1;
+	sr_data->senp_mod = 0x1;
+
+	sr_data->voltdm = omap_voltage_domain_lookup(oh->vdd_name);
+	if (IS_ERR(sr_data->voltdm)) {
+		pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
+			__func__, oh->vdd_name);
+		goto exit;
+	}
+
+	omap_voltage_get_volttable(sr_data->voltdm, &volt_data);
+	if (!volt_data) {
+		pr_warning("%s: No Voltage table registerd fo VDD%d."
+			"Something really wrong\n\n", __func__, i + 1);
+		goto exit;
+	}
+
+	sr_set_nvalues(volt_data, sr_data);
+
+	sr_data->enable_on_init = sr_enable_on_init;
+
+	od = omap_device_build(name, i, oh, sr_data, sizeof(*sr_data),
+			       omap_sr_latency,
+			       ARRAY_SIZE(omap_sr_latency), 0);
+	if (IS_ERR(od))
+		pr_warning("%s: Could not build omap_device for %s: %s.\n\n",
+			__func__, name, oh->name);
+exit:
+	i++;
+	kfree(sr_data);
+	return 0;
+}
+
+/*
+ * API to be called from board files to enable smartreflex
+ * autocompensation at init.
+ */
+void __init omap_enable_smartreflex_on_init(void)
+{
+	sr_enable_on_init = true;
+}
+
+int __init omap_devinit_smartreflex(void)
+{
+	return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
+}
diff --git a/arch/arm/mach-omap2/sram242x.S b/arch/arm/mach-omap2/sram242x.S
index 92e6e1a..055310c 100644
--- a/arch/arm/mach-omap2/sram242x.S
+++ b/arch/arm/mach-omap2/sram242x.S
@@ -21,14 +21,20 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  * MA 02111-1307 USA
+ *
+ * Richard Woodruff notes that any changes to this code must be carefully
+ * audited and tested to ensure that they don't cause a TLB miss while
+ * the SDRAM is inaccessible.  Such a situation will crash the system
+ * since it will cause the ARM MMU to attempt to walk the page tables.
+ * These crashes may be intermittent.
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <mach/io.h>
 #include <mach/hardware.h>
 
-#include "prm.h"
-#include "cm.h"
+#include "prm2xxx_3xxx.h"
+#include "cm2xxx_3xxx.h"
 #include "sdrc.h"
 
 	.text
diff --git a/arch/arm/mach-omap2/sram243x.S b/arch/arm/mach-omap2/sram243x.S
index ab49736..f900758 100644
--- a/arch/arm/mach-omap2/sram243x.S
+++ b/arch/arm/mach-omap2/sram243x.S
@@ -21,14 +21,20 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  * MA 02111-1307 USA
+ *
+ * Richard Woodruff notes that any changes to this code must be carefully
+ * audited and tested to ensure that they don't cause a TLB miss while
+ * the SDRAM is inaccessible.  Such a situation will crash the system
+ * since it will cause the ARM MMU to attempt to walk the page tables.
+ * These crashes may be intermittent.
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <mach/io.h>
 #include <mach/hardware.h>
 
-#include "prm.h"
-#include "cm.h"
+#include "prm2xxx_3xxx.h"
+#include "cm2xxx_3xxx.h"
 #include "sdrc.h"
 
 	.text
diff --git a/arch/arm/mach-omap2/sram34xx.S b/arch/arm/mach-omap2/sram34xx.S
index 3637274..7f893a2 100644
--- a/arch/arm/mach-omap2/sram34xx.S
+++ b/arch/arm/mach-omap2/sram34xx.S
@@ -32,7 +32,7 @@
 #include <mach/io.h>
 
 #include "sdrc.h"
-#include "cm.h"
+#include "cm2xxx_3xxx.h"
 
 	.text
 
@@ -104,6 +104,12 @@
  * touching the SDRAM.  Until that time, users who know that their use case
  * can satisfy the above requirement can enable the CONFIG_OMAP3_SDRC_AC_TIMING
  * option.
+ *
+ * Richard Woodruff notes that any changes to this code must be carefully
+ * audited and tested to ensure that they don't cause a TLB miss while
+ * the SDRAM is inaccessible.  Such a situation will crash the system
+ * since it will cause the ARM MMU to attempt to walk the page tables.
+ * These crashes may be intermittent.
  */
 ENTRY(omap3_sram_configure_core_dpll)
 	stmfd	sp!, {r1-r12, lr}	@ store regs to stack
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index a7816db..4e48e78 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -202,7 +202,7 @@
 static void __init omap2_gp_clocksource_init(void)
 {
 	static struct omap_dm_timer *gpt;
-	u32 tick_rate, tick_period;
+	u32 tick_rate;
 	static char err1[] __initdata = KERN_ERR
 		"%s: failed to request dm-timer\n";
 	static char err2[] __initdata = KERN_ERR
@@ -215,7 +215,6 @@
 
 	omap_dm_timer_set_source(gpt, OMAP_TIMER_SRC_SYS_CLK);
 	tick_rate = clk_get_rate(omap_dm_timer_get_fclk(gpt));
-	tick_period = (tick_rate / HZ) - 1;
 
 	omap_dm_timer_set_load_start(gpt, 1, 0);
 
diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-ehci.c
index b11bf38..25eeada 100644
--- a/arch/arm/mach-omap2/usb-ehci.c
+++ b/arch/arm/mach-omap2/usb-ehci.c
@@ -34,22 +34,15 @@
 
 static struct resource ehci_resources[] = {
 	{
-		.start	= OMAP34XX_EHCI_BASE,
-		.end	= OMAP34XX_EHCI_BASE + SZ_1K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.start	= OMAP34XX_UHH_CONFIG_BASE,
-		.end	= OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
-		.start	= OMAP34XX_USBTLL_BASE,
-		.end	= OMAP34XX_USBTLL_BASE + SZ_4K - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{         /* general IRQ */
-		.start   = INT_34XX_EHCI_IRQ,
 		.flags   = IORESOURCE_IRQ,
 	}
 };
@@ -214,13 +207,148 @@
 	return;
 }
 
+static void setup_4430ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode)
+{
+	switch (port_mode[0]) {
+	case EHCI_HCD_OMAP_MODE_PHY:
+		omap_mux_init_signal("usbb1_ulpiphy_stp",
+			OMAP_PIN_OUTPUT);
+		omap_mux_init_signal("usbb1_ulpiphy_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpiphy_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_TLL:
+		omap_mux_init_signal("usbb1_ulpitll_stp",
+			OMAP_PIN_INPUT_PULLUP);
+		omap_mux_init_signal("usbb1_ulpitll_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb1_ulpitll_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_UNKNOWN:
+	default:
+			break;
+	}
+	switch (port_mode[1]) {
+	case EHCI_HCD_OMAP_MODE_PHY:
+		omap_mux_init_signal("usbb2_ulpiphy_stp",
+			OMAP_PIN_OUTPUT);
+		omap_mux_init_signal("usbb2_ulpiphy_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpiphy_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_TLL:
+		omap_mux_init_signal("usbb2_ulpitll_stp",
+			OMAP_PIN_INPUT_PULLUP);
+		omap_mux_init_signal("usbb2_ulpitll_clk",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dir",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_nxt",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat0",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat1",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat2",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat3",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat4",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat5",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat6",
+			OMAP_PIN_INPUT_PULLDOWN);
+		omap_mux_init_signal("usbb2_ulpitll_dat7",
+			OMAP_PIN_INPUT_PULLDOWN);
+			break;
+	case EHCI_HCD_OMAP_MODE_UNKNOWN:
+	default:
+			break;
+	}
+}
+
 void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata)
 {
 	platform_device_add_data(&ehci_device, pdata, sizeof(*pdata));
 
 	/* Setup Pin IO MUX for EHCI */
-	if (cpu_is_omap34xx())
+	if (cpu_is_omap34xx()) {
+		ehci_resources[0].start	= OMAP34XX_EHCI_BASE;
+		ehci_resources[0].end	= OMAP34XX_EHCI_BASE + SZ_1K - 1;
+		ehci_resources[1].start	= OMAP34XX_UHH_CONFIG_BASE;
+		ehci_resources[1].end	= OMAP34XX_UHH_CONFIG_BASE + SZ_1K - 1;
+		ehci_resources[2].start	= OMAP34XX_USBTLL_BASE;
+		ehci_resources[2].end	= OMAP34XX_USBTLL_BASE + SZ_4K - 1;
+		ehci_resources[3].start = INT_34XX_EHCI_IRQ;
 		setup_ehci_io_mux(pdata->port_mode);
+	} else if (cpu_is_omap44xx()) {
+		ehci_resources[0].start	= OMAP44XX_HSUSB_EHCI_BASE;
+		ehci_resources[0].end	= OMAP44XX_HSUSB_EHCI_BASE + SZ_1K - 1;
+		ehci_resources[1].start	= OMAP44XX_UHH_CONFIG_BASE;
+		ehci_resources[1].end	= OMAP44XX_UHH_CONFIG_BASE + SZ_2K - 1;
+		ehci_resources[2].start	= OMAP44XX_USBTLL_BASE;
+		ehci_resources[2].end	= OMAP44XX_USBTLL_BASE + SZ_4K - 1;
+		ehci_resources[3].start = OMAP44XX_IRQ_EHCI;
+		setup_4430ehci_io_mux(pdata->port_mode);
+	}
 
 	if (platform_device_register(&ehci_device) < 0) {
 		printk(KERN_ERR "Unable to register HS-USB (EHCI) device\n");
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 7260558..5298949 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -30,8 +30,101 @@
 #include <mach/irqs.h>
 #include <mach/am35xx.h>
 #include <plat/usb.h>
+#include "control.h"
 
-#ifdef CONFIG_USB_MUSB_SOC
+#if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X)
+
+static void am35x_musb_reset(void)
+{
+	u32	regval;
+
+	/* Reset the musb interface */
+	regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+
+	regval |= AM35XX_USBOTGSS_SW_RST;
+	omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+
+	regval &= ~AM35XX_USBOTGSS_SW_RST;
+	omap_ctrl_writel(regval, AM35XX_CONTROL_IP_SW_RESET);
+
+	regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
+}
+
+static void am35x_musb_phy_power(u8 on)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(100);
+	u32 devconf2;
+
+	if (on) {
+		/*
+		 * Start the on-chip PHY and its PLL.
+		 */
+		devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+		devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
+		devconf2 |= CONF2_PHY_PLLON;
+
+		omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+
+		pr_info(KERN_INFO "Waiting for PHY clock good...\n");
+		while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
+				& CONF2_PHYCLKGD)) {
+			cpu_relax();
+
+			if (time_after(jiffies, timeout)) {
+				pr_err(KERN_ERR "musb PHY clock good timed out\n");
+				break;
+			}
+		}
+	} else {
+		/*
+		 * Power down the on-chip PHY.
+		 */
+		devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+		devconf2 &= ~CONF2_PHY_PLLON;
+		devconf2 |=  CONF2_PHYPWRDN | CONF2_OTGPWRDN;
+		omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+	}
+}
+
+static void am35x_musb_clear_irq(void)
+{
+	u32 regval;
+
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval |= AM35XX_USBOTGSS_INT_CLR;
+	omap_ctrl_writel(regval, AM35XX_CONTROL_LVL_INTR_CLEAR);
+	regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
+}
+
+static void am35x_musb_set_mode(u8 musb_mode)
+{
+	u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+
+	devconf2 &= ~CONF2_OTGMODE;
+	switch (musb_mode) {
+#ifdef	CONFIG_USB_MUSB_HDRC_HCD
+	case MUSB_HOST:		/* Force VBUS valid, ID = 0 */
+		devconf2 |= CONF2_FORCE_HOST;
+		break;
+#endif
+#ifdef	CONFIG_USB_GADGET_MUSB_HDRC
+	case MUSB_PERIPHERAL:	/* Force VBUS valid, ID = 1 */
+		devconf2 |= CONF2_FORCE_DEVICE;
+		break;
+#endif
+#ifdef	CONFIG_USB_MUSB_OTG
+	case MUSB_OTG:		/* Don't override the VBUS/ID comparators */
+		devconf2 |= CONF2_NO_OVERRIDE;
+		break;
+#endif
+	default:
+		pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
+	}
+
+	omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
+}
 
 static struct resource musb_resources[] = {
 	[0] = { /* start and end set dynamically */
@@ -40,10 +133,12 @@
 	[1] = {	/* general IRQ */
 		.start	= INT_243X_HS_USB_MC,
 		.flags	= IORESOURCE_IRQ,
+		.name	= "mc",
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= INT_243X_HS_USB_DMA,
 		.flags	= IORESOURCE_IRQ,
+		.name	= "dma",
 	},
 };
 
@@ -75,7 +170,7 @@
 static u64 musb_dmamask = DMA_BIT_MASK(32);
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-omap2430",
 	.id		= -1,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -91,8 +186,13 @@
 	if (cpu_is_omap243x()) {
 		musb_resources[0].start = OMAP243X_HS_BASE;
 	} else if (cpu_is_omap3517() || cpu_is_omap3505()) {
+		musb_device.name = "musb-am35x";
 		musb_resources[0].start = AM35XX_IPSS_USBOTGSS_BASE;
 		musb_resources[1].start = INT_35XX_USBOTG_IRQ;
+		board_data->set_phy_power = am35x_musb_phy_power;
+		board_data->clear_irq = am35x_musb_clear_irq;
+		board_data->set_mode = am35x_musb_set_mode;
+		board_data->reset = am35x_musb_reset;
 	} else if (cpu_is_omap34xx()) {
 		musb_resources[0].start = OMAP34XX_HSUSB_OTG_BASE;
 	} else if (cpu_is_omap44xx()) {
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index 64a0112..8a3c05f 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -120,8 +120,8 @@
 	t.adv_on = next_clk(t.cs_on, t_scsnh_advnh - 7000, fclk_ps);
 
 	/* GPMC_CLK rate = fclk rate / div */
-	t.sync_clk = 12 /* 11.1 nsec */;
-	tmp = (t.sync_clk * 1000 + fclk_ps - 1) / fclk_ps;
+	t.sync_clk = 11100 /* 11.1 nsec */;
+	tmp = (t.sync_clk + fclk_ps - 1) / fclk_ps;
 	if (tmp > 4)
 		return -ERANGE;
 	if (tmp <= 0)
@@ -216,6 +216,7 @@
 		.flags	= IORESOURCE_MEM,
 	},
 	{ /* IRQ */
+		.name	= "mc",
 		.flags	= IORESOURCE_IRQ,
 	},
 };
@@ -223,7 +224,7 @@
 static u64 tusb_dmamask = ~(u32)0;
 
 static struct platform_device tusb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-tusb",
 	.id		= -1,
 	.dev = {
 		.dma_mask		= &tusb_dmamask,
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
new file mode 100644
index 0000000..ed6079c
--- /dev/null
+++ b/arch/arm/mach-omap2/voltage.c
@@ -0,0 +1,1571 @@
+/*
+ * OMAP3/OMAP4 Voltage Management Routines
+ *
+ * Author: Thara Gopinath	<thara@ti.com>
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ * Lesly A M <x0080970@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <plat/common.h>
+#include <plat/voltage.h>
+
+#include "prm-regbits-34xx.h"
+#include "prm-regbits-44xx.h"
+#include "prm44xx.h"
+#include "prcm44xx.h"
+#include "prminst44xx.h"
+#include "control.h"
+
+#define VP_IDLE_TIMEOUT		200
+#define VP_TRANXDONE_TIMEOUT	300
+#define VOLTAGE_DIR_SIZE	16
+
+/* Voltage processor register offsets */
+struct vp_reg_offs {
+	u8 vpconfig;
+	u8 vstepmin;
+	u8 vstepmax;
+	u8 vlimitto;
+	u8 vstatus;
+	u8 voltage;
+};
+
+/* Voltage Processor bit field values, shifts and masks */
+struct vp_reg_val {
+	/* PRM module */
+	u16 prm_mod;
+	/* VPx_VPCONFIG */
+	u32 vpconfig_erroroffset;
+	u16 vpconfig_errorgain;
+	u32 vpconfig_errorgain_mask;
+	u8 vpconfig_errorgain_shift;
+	u32 vpconfig_initvoltage_mask;
+	u8 vpconfig_initvoltage_shift;
+	u32 vpconfig_timeouten;
+	u32 vpconfig_initvdd;
+	u32 vpconfig_forceupdate;
+	u32 vpconfig_vpenable;
+	/* VPx_VSTEPMIN */
+	u8 vstepmin_stepmin;
+	u16 vstepmin_smpswaittimemin;
+	u8 vstepmin_stepmin_shift;
+	u8 vstepmin_smpswaittimemin_shift;
+	/* VPx_VSTEPMAX */
+	u8 vstepmax_stepmax;
+	u16 vstepmax_smpswaittimemax;
+	u8 vstepmax_stepmax_shift;
+	u8 vstepmax_smpswaittimemax_shift;
+	/* VPx_VLIMITTO */
+	u8 vlimitto_vddmin;
+	u8 vlimitto_vddmax;
+	u16 vlimitto_timeout;
+	u8 vlimitto_vddmin_shift;
+	u8 vlimitto_vddmax_shift;
+	u8 vlimitto_timeout_shift;
+	/* PRM_IRQSTATUS*/
+	u32 tranxdone_status;
+};
+
+/* Voltage controller registers and offsets */
+struct vc_reg_info {
+	/* PRM module */
+	u16 prm_mod;
+	/* VC register offsets */
+	u8 smps_sa_reg;
+	u8 smps_volra_reg;
+	u8 bypass_val_reg;
+	u8 cmdval_reg;
+	u8 voltsetup_reg;
+	/*VC_SMPS_SA*/
+	u8 smps_sa_shift;
+	u32 smps_sa_mask;
+	/* VC_SMPS_VOL_RA */
+	u8 smps_volra_shift;
+	u32 smps_volra_mask;
+	/* VC_BYPASS_VAL */
+	u8 data_shift;
+	u8 slaveaddr_shift;
+	u8 regaddr_shift;
+	u32 valid;
+	/* VC_CMD_VAL */
+	u8 cmd_on_shift;
+	u8 cmd_onlp_shift;
+	u8 cmd_ret_shift;
+	u8 cmd_off_shift;
+	u32 cmd_on_mask;
+	/* PRM_VOLTSETUP */
+	u8 voltsetup_shift;
+	u32 voltsetup_mask;
+};
+
+/**
+ * omap_vdd_info - Per Voltage Domain info
+ *
+ * @volt_data		: voltage table having the distinct voltages supported
+ *			  by the domain and other associated per voltage data.
+ * @pmic_info		: pmic specific parameters which should be populted by
+ *			  the pmic drivers.
+ * @vp_offs		: structure containing the offsets for various
+ *			  vp registers
+ * @vp_reg		: the register values, shifts, masks for various
+ *			  vp registers
+ * @vc_reg		: structure containing various various vc registers,
+ *			  shifts, masks etc.
+ * @voltdm		: pointer to the voltage domain structure
+ * @debug_dir		: debug directory for this voltage domain.
+ * @curr_volt		: current voltage for this vdd.
+ * @ocp_mod		: The prm module for accessing the prm irqstatus reg.
+ * @prm_irqst_reg	: prm irqstatus register.
+ * @vp_enabled		: flag to keep track of whether vp is enabled or not
+ * @volt_scale		: API to scale the voltage of the vdd.
+ */
+struct omap_vdd_info {
+	struct omap_volt_data *volt_data;
+	struct omap_volt_pmic_info *pmic_info;
+	struct vp_reg_offs vp_offs;
+	struct vp_reg_val vp_reg;
+	struct vc_reg_info vc_reg;
+	struct voltagedomain voltdm;
+	struct dentry *debug_dir;
+	u32 curr_volt;
+	u16 ocp_mod;
+	u8 prm_irqst_reg;
+	bool vp_enabled;
+	u32 (*read_reg) (u16 mod, u8 offset);
+	void (*write_reg) (u32 val, u16 mod, u8 offset);
+	int (*volt_scale) (struct omap_vdd_info *vdd,
+		unsigned long target_volt);
+};
+
+static struct omap_vdd_info *vdd_info;
+/*
+ * Number of scalable voltage domains.
+ */
+static int nr_scalable_vdd;
+
+/* OMAP3 VDD sturctures */
+static struct omap_vdd_info omap3_vdd_info[] = {
+	{
+		.vp_offs = {
+			.vpconfig = OMAP3_PRM_VP1_CONFIG_OFFSET,
+			.vstepmin = OMAP3_PRM_VP1_VSTEPMIN_OFFSET,
+			.vstepmax = OMAP3_PRM_VP1_VSTEPMAX_OFFSET,
+			.vlimitto = OMAP3_PRM_VP1_VLIMITTO_OFFSET,
+			.vstatus = OMAP3_PRM_VP1_STATUS_OFFSET,
+			.voltage = OMAP3_PRM_VP1_VOLTAGE_OFFSET,
+		},
+		.voltdm = {
+			.name = "mpu",
+		},
+	},
+	{
+		.vp_offs = {
+			.vpconfig = OMAP3_PRM_VP2_CONFIG_OFFSET,
+			.vstepmin = OMAP3_PRM_VP2_VSTEPMIN_OFFSET,
+			.vstepmax = OMAP3_PRM_VP2_VSTEPMAX_OFFSET,
+			.vlimitto = OMAP3_PRM_VP2_VLIMITTO_OFFSET,
+			.vstatus = OMAP3_PRM_VP2_STATUS_OFFSET,
+			.voltage = OMAP3_PRM_VP2_VOLTAGE_OFFSET,
+		},
+		.voltdm = {
+			.name = "core",
+		},
+	},
+};
+
+#define OMAP3_NR_SCALABLE_VDD ARRAY_SIZE(omap3_vdd_info)
+
+/* OMAP4 VDD sturctures */
+static struct omap_vdd_info omap4_vdd_info[] = {
+	{
+		.vp_offs = {
+			.vpconfig = OMAP4_PRM_VP_MPU_CONFIG_OFFSET,
+			.vstepmin = OMAP4_PRM_VP_MPU_VSTEPMIN_OFFSET,
+			.vstepmax = OMAP4_PRM_VP_MPU_VSTEPMAX_OFFSET,
+			.vlimitto = OMAP4_PRM_VP_MPU_VLIMITTO_OFFSET,
+			.vstatus = OMAP4_PRM_VP_MPU_STATUS_OFFSET,
+			.voltage = OMAP4_PRM_VP_MPU_VOLTAGE_OFFSET,
+		},
+		.voltdm = {
+			.name = "mpu",
+		},
+	},
+	{
+		.vp_offs = {
+			.vpconfig = OMAP4_PRM_VP_IVA_CONFIG_OFFSET,
+			.vstepmin = OMAP4_PRM_VP_IVA_VSTEPMIN_OFFSET,
+			.vstepmax = OMAP4_PRM_VP_IVA_VSTEPMAX_OFFSET,
+			.vlimitto = OMAP4_PRM_VP_IVA_VLIMITTO_OFFSET,
+			.vstatus = OMAP4_PRM_VP_IVA_STATUS_OFFSET,
+			.voltage = OMAP4_PRM_VP_IVA_VOLTAGE_OFFSET,
+		},
+		.voltdm = {
+			.name = "iva",
+		},
+	},
+	{
+		.vp_offs = {
+			.vpconfig = OMAP4_PRM_VP_CORE_CONFIG_OFFSET,
+			.vstepmin = OMAP4_PRM_VP_CORE_VSTEPMIN_OFFSET,
+			.vstepmax = OMAP4_PRM_VP_CORE_VSTEPMAX_OFFSET,
+			.vlimitto = OMAP4_PRM_VP_CORE_VLIMITTO_OFFSET,
+			.vstatus = OMAP4_PRM_VP_CORE_STATUS_OFFSET,
+			.voltage = OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET,
+		},
+		.voltdm = {
+			.name = "core",
+		},
+	},
+};
+
+#define OMAP4_NR_SCALABLE_VDD ARRAY_SIZE(omap4_vdd_info)
+
+/*
+ * Structures containing OMAP3430/OMAP3630 voltage supported and various
+ * voltage dependent data for each VDD.
+ */
+#define VOLT_DATA_DEFINE(_v_nom, _efuse_offs, _errminlimit, _errgain)	\
+{									\
+	.volt_nominal	= _v_nom,					\
+	.sr_efuse_offs	= _efuse_offs,					\
+	.sr_errminlimit	= _errminlimit,					\
+	.vp_errgain	= _errgain					\
+}
+
+/* VDD1 */
+static struct omap_volt_data omap34xx_vddmpu_volt_data[] = {
+	VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP1_UV, OMAP343X_CONTROL_FUSE_OPP1_VDD1, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP2_UV, OMAP343X_CONTROL_FUSE_OPP2_VDD1, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP3_UV, OMAP343X_CONTROL_FUSE_OPP3_VDD1, 0xf9, 0x18),
+	VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP4_UV, OMAP343X_CONTROL_FUSE_OPP4_VDD1, 0xf9, 0x18),
+	VOLT_DATA_DEFINE(OMAP3430_VDD_MPU_OPP5_UV, OMAP343X_CONTROL_FUSE_OPP5_VDD1, 0xf9, 0x18),
+	VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+static struct omap_volt_data omap36xx_vddmpu_volt_data[] = {
+	VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP50_UV, OMAP3630_CONTROL_FUSE_OPP50_VDD1, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP100_UV, OMAP3630_CONTROL_FUSE_OPP100_VDD1, 0xf9, 0x16),
+	VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP120_UV, OMAP3630_CONTROL_FUSE_OPP120_VDD1, 0xfa, 0x23),
+	VOLT_DATA_DEFINE(OMAP3630_VDD_MPU_OPP1G_UV, OMAP3630_CONTROL_FUSE_OPP1G_VDD1, 0xfa, 0x27),
+	VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+/* VDD2 */
+static struct omap_volt_data omap34xx_vddcore_volt_data[] = {
+	VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP1_UV, OMAP343X_CONTROL_FUSE_OPP1_VDD2, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP2_UV, OMAP343X_CONTROL_FUSE_OPP2_VDD2, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP3430_VDD_CORE_OPP3_UV, OMAP343X_CONTROL_FUSE_OPP3_VDD2, 0xf9, 0x18),
+	VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+static struct omap_volt_data omap36xx_vddcore_volt_data[] = {
+	VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP50_UV, OMAP3630_CONTROL_FUSE_OPP50_VDD2, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP3630_VDD_CORE_OPP100_UV, OMAP3630_CONTROL_FUSE_OPP100_VDD2, 0xf9, 0x16),
+	VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+/*
+ * Structures containing OMAP4430 voltage supported and various
+ * voltage dependent data for each VDD.
+ */
+static struct omap_volt_data omap44xx_vdd_mpu_volt_data[] = {
+	VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP50_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP50, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPP100_UV, OMAP44XX_CONTROL_FUSE_MPU_OPP100, 0xf9, 0x16),
+	VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPTURBO, 0xfa, 0x23),
+	VOLT_DATA_DEFINE(OMAP4430_VDD_MPU_OPPNITRO_UV, OMAP44XX_CONTROL_FUSE_MPU_OPPNITRO, 0xfa, 0x27),
+	VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+static struct omap_volt_data omap44xx_vdd_iva_volt_data[] = {
+	VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP50_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP50, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPP100_UV, OMAP44XX_CONTROL_FUSE_IVA_OPP100, 0xf9, 0x16),
+	VOLT_DATA_DEFINE(OMAP4430_VDD_IVA_OPPTURBO_UV, OMAP44XX_CONTROL_FUSE_IVA_OPPTURBO, 0xfa, 0x23),
+	VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+static struct omap_volt_data omap44xx_vdd_core_volt_data[] = {
+	VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP50_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP50, 0xf4, 0x0c),
+	VOLT_DATA_DEFINE(OMAP4430_VDD_CORE_OPP100_UV, OMAP44XX_CONTROL_FUSE_CORE_OPP100, 0xf9, 0x16),
+	VOLT_DATA_DEFINE(0, 0, 0, 0),
+};
+
+static struct dentry *voltage_dir;
+
+/* Init function pointers */
+static void (*vc_init) (struct omap_vdd_info *vdd);
+static int (*vdd_data_configure) (struct omap_vdd_info *vdd);
+
+static u32 omap3_voltage_read_reg(u16 mod, u8 offset)
+{
+	return omap2_prm_read_mod_reg(mod, offset);
+}
+
+static void omap3_voltage_write_reg(u32 val, u16 mod, u8 offset)
+{
+	omap2_prm_write_mod_reg(val, mod, offset);
+}
+
+static u32 omap4_voltage_read_reg(u16 mod, u8 offset)
+{
+	return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
+					mod, offset);
+}
+
+static void omap4_voltage_write_reg(u32 val, u16 mod, u8 offset)
+{
+	omap4_prminst_write_inst_reg(val, OMAP4430_PRM_PARTITION, mod, offset);
+}
+
+/* Voltage debugfs support */
+static int vp_volt_debug_get(void *data, u64 *val)
+{
+	struct omap_vdd_info *vdd = (struct omap_vdd_info *) data;
+	u8 vsel;
+
+	if (!vdd) {
+		pr_warning("Wrong paramater passed\n");
+		return -EINVAL;
+	}
+
+	vsel = vdd->read_reg(vdd->vp_reg.prm_mod, vdd->vp_offs.voltage);
+	pr_notice("curr_vsel = %x\n", vsel);
+
+	if (!vdd->pmic_info->vsel_to_uv) {
+		pr_warning("PMIC function to convert vsel to voltage"
+			"in uV not registerd\n");
+		return -EINVAL;
+	}
+
+	*val = vdd->pmic_info->vsel_to_uv(vsel);
+	return 0;
+}
+
+static int nom_volt_debug_get(void *data, u64 *val)
+{
+	struct omap_vdd_info *vdd = (struct omap_vdd_info *) data;
+
+	if (!vdd) {
+		pr_warning("Wrong paramater passed\n");
+		return -EINVAL;
+	}
+
+	*val = omap_voltage_get_nom_volt(&vdd->voltdm);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vp_volt_debug_fops, vp_volt_debug_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(nom_volt_debug_fops, nom_volt_debug_get, NULL,
+								"%llu\n");
+static void vp_latch_vsel(struct omap_vdd_info *vdd)
+{
+	u32 vpconfig;
+	u16 mod;
+	unsigned long uvdc;
+	char vsel;
+
+	uvdc = omap_voltage_get_nom_volt(&vdd->voltdm);
+	if (!uvdc) {
+		pr_warning("%s: unable to find current voltage for vdd_%s\n",
+			__func__, vdd->voltdm.name);
+		return;
+	}
+
+	if (!vdd->pmic_info || !vdd->pmic_info->uv_to_vsel) {
+		pr_warning("%s: PMIC function to convert voltage in uV to"
+			" vsel not registered\n", __func__);
+		return;
+	}
+
+	mod = vdd->vp_reg.prm_mod;
+
+	vsel = vdd->pmic_info->uv_to_vsel(uvdc);
+
+	vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig);
+	vpconfig &= ~(vdd->vp_reg.vpconfig_initvoltage_mask |
+			vdd->vp_reg.vpconfig_initvdd);
+	vpconfig |= vsel << vdd->vp_reg.vpconfig_initvoltage_shift;
+
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+
+	/* Trigger initVDD value copy to voltage processor */
+	vdd->write_reg((vpconfig | vdd->vp_reg.vpconfig_initvdd), mod,
+			vdd->vp_offs.vpconfig);
+
+	/* Clear initVDD copy trigger bit */
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+}
+
+/* Generic voltage init functions */
+static void __init vp_init(struct omap_vdd_info *vdd)
+{
+	u32 vp_val;
+	u16 mod;
+
+	if (!vdd->read_reg || !vdd->write_reg) {
+		pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+			__func__, vdd->voltdm.name);
+		return;
+	}
+
+	mod = vdd->vp_reg.prm_mod;
+
+	vp_val = vdd->vp_reg.vpconfig_erroroffset |
+		(vdd->vp_reg.vpconfig_errorgain <<
+		vdd->vp_reg.vpconfig_errorgain_shift) |
+		vdd->vp_reg.vpconfig_timeouten;
+	vdd->write_reg(vp_val, mod, vdd->vp_offs.vpconfig);
+
+	vp_val = ((vdd->vp_reg.vstepmin_smpswaittimemin <<
+		vdd->vp_reg.vstepmin_smpswaittimemin_shift) |
+		(vdd->vp_reg.vstepmin_stepmin <<
+		vdd->vp_reg.vstepmin_stepmin_shift));
+	vdd->write_reg(vp_val, mod, vdd->vp_offs.vstepmin);
+
+	vp_val = ((vdd->vp_reg.vstepmax_smpswaittimemax <<
+		vdd->vp_reg.vstepmax_smpswaittimemax_shift) |
+		(vdd->vp_reg.vstepmax_stepmax <<
+		vdd->vp_reg.vstepmax_stepmax_shift));
+	vdd->write_reg(vp_val, mod, vdd->vp_offs.vstepmax);
+
+	vp_val = ((vdd->vp_reg.vlimitto_vddmax <<
+		vdd->vp_reg.vlimitto_vddmax_shift) |
+		(vdd->vp_reg.vlimitto_vddmin <<
+		vdd->vp_reg.vlimitto_vddmin_shift) |
+		(vdd->vp_reg.vlimitto_timeout <<
+		vdd->vp_reg.vlimitto_timeout_shift));
+	vdd->write_reg(vp_val, mod, vdd->vp_offs.vlimitto);
+}
+
+static void __init vdd_debugfs_init(struct omap_vdd_info *vdd)
+{
+	char *name;
+
+	name = kzalloc(VOLTAGE_DIR_SIZE, GFP_KERNEL);
+	if (!name) {
+		pr_warning("%s: Unable to allocate memory for debugfs"
+			" directory name for vdd_%s",
+			__func__, vdd->voltdm.name);
+		return;
+	}
+	strcpy(name, "vdd_");
+	strcat(name, vdd->voltdm.name);
+
+	vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
+	if (IS_ERR(vdd->debug_dir)) {
+		pr_warning("%s: Unable to create debugfs directory for"
+			" vdd_%s\n", __func__, vdd->voltdm.name);
+		vdd->debug_dir = NULL;
+		return;
+	}
+
+	(void) debugfs_create_x16("vp_errorgain", S_IRUGO, vdd->debug_dir,
+				&(vdd->vp_reg.vpconfig_errorgain));
+	(void) debugfs_create_x16("vp_smpswaittimemin", S_IRUGO,
+				vdd->debug_dir,
+				&(vdd->vp_reg.vstepmin_smpswaittimemin));
+	(void) debugfs_create_x8("vp_stepmin", S_IRUGO, vdd->debug_dir,
+				&(vdd->vp_reg.vstepmin_stepmin));
+	(void) debugfs_create_x16("vp_smpswaittimemax", S_IRUGO,
+				vdd->debug_dir,
+				&(vdd->vp_reg.vstepmax_smpswaittimemax));
+	(void) debugfs_create_x8("vp_stepmax", S_IRUGO, vdd->debug_dir,
+				&(vdd->vp_reg.vstepmax_stepmax));
+	(void) debugfs_create_x8("vp_vddmax", S_IRUGO, vdd->debug_dir,
+				&(vdd->vp_reg.vlimitto_vddmax));
+	(void) debugfs_create_x8("vp_vddmin", S_IRUGO, vdd->debug_dir,
+				&(vdd->vp_reg.vlimitto_vddmin));
+	(void) debugfs_create_x16("vp_timeout", S_IRUGO, vdd->debug_dir,
+				&(vdd->vp_reg.vlimitto_timeout));
+	(void) debugfs_create_file("curr_vp_volt", S_IRUGO, vdd->debug_dir,
+				(void *) vdd, &vp_volt_debug_fops);
+	(void) debugfs_create_file("curr_nominal_volt", S_IRUGO,
+				vdd->debug_dir, (void *) vdd,
+				&nom_volt_debug_fops);
+}
+
+/* Voltage scale and accessory APIs */
+static int _pre_volt_scale(struct omap_vdd_info *vdd,
+		unsigned long target_volt, u8 *target_vsel, u8 *current_vsel)
+{
+	struct omap_volt_data *volt_data;
+	u32 vc_cmdval, vp_errgain_val;
+	u16 vp_mod, vc_mod;
+
+	/* Check if suffiecient pmic info is available for this vdd */
+	if (!vdd->pmic_info) {
+		pr_err("%s: Insufficient pmic info to scale the vdd_%s\n",
+			__func__, vdd->voltdm.name);
+		return -EINVAL;
+	}
+
+	if (!vdd->pmic_info->uv_to_vsel) {
+		pr_err("%s: PMIC function to convert voltage in uV to"
+			"vsel not registered. Hence unable to scale voltage"
+			"for vdd_%s\n", __func__, vdd->voltdm.name);
+		return -ENODATA;
+	}
+
+	if (!vdd->read_reg || !vdd->write_reg) {
+		pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+			__func__, vdd->voltdm.name);
+		return -EINVAL;
+	}
+
+	vp_mod = vdd->vp_reg.prm_mod;
+	vc_mod = vdd->vc_reg.prm_mod;
+
+	/* Get volt_data corresponding to target_volt */
+	volt_data = omap_voltage_get_voltdata(&vdd->voltdm, target_volt);
+	if (IS_ERR(volt_data))
+		volt_data = NULL;
+
+	*target_vsel = vdd->pmic_info->uv_to_vsel(target_volt);
+	*current_vsel = vdd->read_reg(vp_mod, vdd->vp_offs.voltage);
+
+	/* Setting the ON voltage to the new target voltage */
+	vc_cmdval = vdd->read_reg(vc_mod, vdd->vc_reg.cmdval_reg);
+	vc_cmdval &= ~vdd->vc_reg.cmd_on_mask;
+	vc_cmdval |= (*target_vsel << vdd->vc_reg.cmd_on_shift);
+	vdd->write_reg(vc_cmdval, vc_mod, vdd->vc_reg.cmdval_reg);
+
+	/* Setting vp errorgain based on the voltage */
+	if (volt_data) {
+		vp_errgain_val = vdd->read_reg(vp_mod,
+				vdd->vp_offs.vpconfig);
+		vdd->vp_reg.vpconfig_errorgain = volt_data->vp_errgain;
+		vp_errgain_val &= ~vdd->vp_reg.vpconfig_errorgain_mask;
+		vp_errgain_val |= vdd->vp_reg.vpconfig_errorgain <<
+				vdd->vp_reg.vpconfig_errorgain_shift;
+		vdd->write_reg(vp_errgain_val, vp_mod,
+				vdd->vp_offs.vpconfig);
+	}
+
+	return 0;
+}
+
+static void _post_volt_scale(struct omap_vdd_info *vdd,
+		unsigned long target_volt, u8 target_vsel, u8 current_vsel)
+{
+	u32 smps_steps = 0, smps_delay = 0;
+
+	smps_steps = abs(target_vsel - current_vsel);
+	/* SMPS slew rate / step size. 2us added as buffer. */
+	smps_delay = ((smps_steps * vdd->pmic_info->step_size) /
+			vdd->pmic_info->slew_rate) + 2;
+	udelay(smps_delay);
+
+	vdd->curr_volt = target_volt;
+}
+
+/* vc_bypass_scale_voltage - VC bypass method of voltage scaling */
+static int vc_bypass_scale_voltage(struct omap_vdd_info *vdd,
+		unsigned long target_volt)
+{
+	u32 loop_cnt = 0, retries_cnt = 0;
+	u32 vc_valid, vc_bypass_val_reg, vc_bypass_value;
+	u16 mod;
+	u8 target_vsel, current_vsel;
+	int ret;
+
+	ret = _pre_volt_scale(vdd, target_volt, &target_vsel, &current_vsel);
+	if (ret)
+		return ret;
+
+	mod = vdd->vc_reg.prm_mod;
+
+	vc_valid = vdd->vc_reg.valid;
+	vc_bypass_val_reg = vdd->vc_reg.bypass_val_reg;
+	vc_bypass_value = (target_vsel << vdd->vc_reg.data_shift) |
+			(vdd->pmic_info->pmic_reg <<
+			vdd->vc_reg.regaddr_shift) |
+			(vdd->pmic_info->i2c_slave_addr <<
+			vdd->vc_reg.slaveaddr_shift);
+
+	vdd->write_reg(vc_bypass_value, mod, vc_bypass_val_reg);
+	vdd->write_reg(vc_bypass_value | vc_valid, mod, vc_bypass_val_reg);
+
+	vc_bypass_value = vdd->read_reg(mod, vc_bypass_val_reg);
+	/*
+	 * Loop till the bypass command is acknowledged from the SMPS.
+	 * NOTE: This is legacy code. The loop count and retry count needs
+	 * to be revisited.
+	 */
+	while (!(vc_bypass_value & vc_valid)) {
+		loop_cnt++;
+
+		if (retries_cnt > 10) {
+			pr_warning("%s: Retry count exceeded\n", __func__);
+			return -ETIMEDOUT;
+		}
+
+		if (loop_cnt > 50) {
+			retries_cnt++;
+			loop_cnt = 0;
+			udelay(10);
+		}
+		vc_bypass_value = vdd->read_reg(mod, vc_bypass_val_reg);
+	}
+
+	_post_volt_scale(vdd, target_volt, target_vsel, current_vsel);
+	return 0;
+}
+
+/* VP force update method of voltage scaling */
+static int vp_forceupdate_scale_voltage(struct omap_vdd_info *vdd,
+		unsigned long target_volt)
+{
+	u32 vpconfig;
+	u16 mod, ocp_mod;
+	u8 target_vsel, current_vsel, prm_irqst_reg;
+	int ret, timeout = 0;
+
+	ret = _pre_volt_scale(vdd, target_volt, &target_vsel, &current_vsel);
+	if (ret)
+		return ret;
+
+	mod = vdd->vp_reg.prm_mod;
+	ocp_mod = vdd->ocp_mod;
+	prm_irqst_reg = vdd->prm_irqst_reg;
+
+	/*
+	 * Clear all pending TransactionDone interrupt/status. Typical latency
+	 * is <3us
+	 */
+	while (timeout++ < VP_TRANXDONE_TIMEOUT) {
+		vdd->write_reg(vdd->vp_reg.tranxdone_status,
+				ocp_mod, prm_irqst_reg);
+		if (!(vdd->read_reg(ocp_mod, prm_irqst_reg) &
+				vdd->vp_reg.tranxdone_status))
+				break;
+		udelay(1);
+	}
+	if (timeout >= VP_TRANXDONE_TIMEOUT) {
+		pr_warning("%s: vdd_%s TRANXDONE timeout exceeded."
+			"Voltage change aborted", __func__, vdd->voltdm.name);
+		return -ETIMEDOUT;
+	}
+
+	/* Configure for VP-Force Update */
+	vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig);
+	vpconfig &= ~(vdd->vp_reg.vpconfig_initvdd |
+			vdd->vp_reg.vpconfig_forceupdate |
+			vdd->vp_reg.vpconfig_initvoltage_mask);
+	vpconfig |= ((target_vsel <<
+			vdd->vp_reg.vpconfig_initvoltage_shift));
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+
+	/* Trigger initVDD value copy to voltage processor */
+	vpconfig |= vdd->vp_reg.vpconfig_initvdd;
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+
+	/* Force update of voltage */
+	vpconfig |= vdd->vp_reg.vpconfig_forceupdate;
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+
+	/*
+	 * Wait for TransactionDone. Typical latency is <200us.
+	 * Depends on SMPSWAITTIMEMIN/MAX and voltage change
+	 */
+	timeout = 0;
+	omap_test_timeout((vdd->read_reg(ocp_mod, prm_irqst_reg) &
+			vdd->vp_reg.tranxdone_status),
+			VP_TRANXDONE_TIMEOUT, timeout);
+	if (timeout >= VP_TRANXDONE_TIMEOUT)
+		pr_err("%s: vdd_%s TRANXDONE timeout exceeded."
+			"TRANXDONE never got set after the voltage update\n",
+			__func__, vdd->voltdm.name);
+
+	_post_volt_scale(vdd, target_volt, target_vsel, current_vsel);
+
+	/*
+	 * Disable TransactionDone interrupt , clear all status, clear
+	 * control registers
+	 */
+	timeout = 0;
+	while (timeout++ < VP_TRANXDONE_TIMEOUT) {
+		vdd->write_reg(vdd->vp_reg.tranxdone_status,
+				ocp_mod, prm_irqst_reg);
+		if (!(vdd->read_reg(ocp_mod, prm_irqst_reg) &
+				vdd->vp_reg.tranxdone_status))
+				break;
+		udelay(1);
+	}
+
+	if (timeout >= VP_TRANXDONE_TIMEOUT)
+		pr_warning("%s: vdd_%s TRANXDONE timeout exceeded while trying"
+			"to clear the TRANXDONE status\n",
+			__func__, vdd->voltdm.name);
+
+	vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig);
+	/* Clear initVDD copy trigger bit */
+	vpconfig &= ~vdd->vp_reg.vpconfig_initvdd;;
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+	/* Clear force bit */
+	vpconfig &= ~vdd->vp_reg.vpconfig_forceupdate;
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+
+	return 0;
+}
+
+/* OMAP3 specific voltage init functions */
+
+/*
+ * Intializes the voltage controller registers with the PMIC and board
+ * specific parameters and voltage setup times for OMAP3.
+ */
+static void __init omap3_vc_init(struct omap_vdd_info *vdd)
+{
+	u32 vc_val;
+	u16 mod;
+	u8 on_vsel, onlp_vsel, ret_vsel, off_vsel;
+	static bool is_initialized;
+
+	if (!vdd->pmic_info || !vdd->pmic_info->uv_to_vsel) {
+		pr_err("%s: PMIC info requried to configure vc for"
+			"vdd_%s not populated.Hence cannot initialize vc\n",
+			__func__, vdd->voltdm.name);
+		return;
+	}
+
+	if (!vdd->read_reg || !vdd->write_reg) {
+		pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+			__func__, vdd->voltdm.name);
+		return;
+	}
+
+	mod = vdd->vc_reg.prm_mod;
+
+	/* Set up the SMPS_SA(i2c slave address in VC */
+	vc_val = vdd->read_reg(mod, vdd->vc_reg.smps_sa_reg);
+	vc_val &= ~vdd->vc_reg.smps_sa_mask;
+	vc_val |= vdd->pmic_info->i2c_slave_addr << vdd->vc_reg.smps_sa_shift;
+	vdd->write_reg(vc_val, mod, vdd->vc_reg.smps_sa_reg);
+
+	/* Setup the VOLRA(pmic reg addr) in VC */
+	vc_val = vdd->read_reg(mod, vdd->vc_reg.smps_volra_reg);
+	vc_val &= ~vdd->vc_reg.smps_volra_mask;
+	vc_val |= vdd->pmic_info->pmic_reg << vdd->vc_reg.smps_volra_shift;
+	vdd->write_reg(vc_val, mod, vdd->vc_reg.smps_volra_reg);
+
+	/*Configure the setup times */
+	vc_val = vdd->read_reg(mod, vdd->vc_reg.voltsetup_reg);
+	vc_val &= ~vdd->vc_reg.voltsetup_mask;
+	vc_val |= vdd->pmic_info->volt_setup_time <<
+			vdd->vc_reg.voltsetup_shift;
+	vdd->write_reg(vc_val, mod, vdd->vc_reg.voltsetup_reg);
+
+	/* Set up the on, inactive, retention and off voltage */
+	on_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->on_volt);
+	onlp_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->onlp_volt);
+	ret_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->ret_volt);
+	off_vsel = vdd->pmic_info->uv_to_vsel(vdd->pmic_info->off_volt);
+	vc_val	= ((on_vsel << vdd->vc_reg.cmd_on_shift) |
+		(onlp_vsel << vdd->vc_reg.cmd_onlp_shift) |
+		(ret_vsel << vdd->vc_reg.cmd_ret_shift) |
+		(off_vsel << vdd->vc_reg.cmd_off_shift));
+	vdd->write_reg(vc_val, mod, vdd->vc_reg.cmdval_reg);
+
+	if (is_initialized)
+		return;
+
+	/* Generic VC parameters init */
+	vdd->write_reg(OMAP3430_CMD1_MASK | OMAP3430_RAV1_MASK, mod,
+			OMAP3_PRM_VC_CH_CONF_OFFSET);
+	vdd->write_reg(OMAP3430_MCODE_SHIFT | OMAP3430_HSEN_MASK, mod,
+			OMAP3_PRM_VC_I2C_CFG_OFFSET);
+	vdd->write_reg(OMAP3_CLKSETUP, mod, OMAP3_PRM_CLKSETUP_OFFSET);
+	vdd->write_reg(OMAP3_VOLTOFFSET, mod, OMAP3_PRM_VOLTOFFSET_OFFSET);
+	vdd->write_reg(OMAP3_VOLTSETUP2, mod, OMAP3_PRM_VOLTSETUP2_OFFSET);
+	is_initialized = true;
+}
+
+/* Sets up all the VDD related info for OMAP3 */
+static int __init omap3_vdd_data_configure(struct omap_vdd_info *vdd)
+{
+	struct clk *sys_ck;
+	u32 sys_clk_speed, timeout_val, waittime;
+
+	if (!vdd->pmic_info) {
+		pr_err("%s: PMIC info requried to configure vdd_%s not"
+			"populated.Hence cannot initialize vdd_%s\n",
+			__func__, vdd->voltdm.name, vdd->voltdm.name);
+		return -EINVAL;
+	}
+
+	if (!strcmp(vdd->voltdm.name, "mpu")) {
+		if (cpu_is_omap3630())
+			vdd->volt_data = omap36xx_vddmpu_volt_data;
+		else
+			vdd->volt_data = omap34xx_vddmpu_volt_data;
+
+		vdd->vp_reg.tranxdone_status = OMAP3430_VP1_TRANXDONE_ST_MASK;
+		vdd->vc_reg.cmdval_reg = OMAP3_PRM_VC_CMD_VAL_0_OFFSET;
+		vdd->vc_reg.smps_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA0_SHIFT;
+		vdd->vc_reg.smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA0_MASK;
+		vdd->vc_reg.smps_volra_shift = OMAP3430_VOLRA0_SHIFT;
+		vdd->vc_reg.smps_volra_mask = OMAP3430_VOLRA0_MASK;
+		vdd->vc_reg.voltsetup_shift = OMAP3430_SETUP_TIME1_SHIFT;
+		vdd->vc_reg.voltsetup_mask = OMAP3430_SETUP_TIME1_MASK;
+	} else if (!strcmp(vdd->voltdm.name, "core")) {
+		if (cpu_is_omap3630())
+			vdd->volt_data = omap36xx_vddcore_volt_data;
+		else
+			vdd->volt_data = omap34xx_vddcore_volt_data;
+
+		vdd->vp_reg.tranxdone_status = OMAP3430_VP2_TRANXDONE_ST_MASK;
+		vdd->vc_reg.cmdval_reg = OMAP3_PRM_VC_CMD_VAL_1_OFFSET;
+		vdd->vc_reg.smps_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA1_SHIFT;
+		vdd->vc_reg.smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA1_MASK;
+		vdd->vc_reg.smps_volra_shift = OMAP3430_VOLRA1_SHIFT;
+		vdd->vc_reg.smps_volra_mask = OMAP3430_VOLRA1_MASK;
+		vdd->vc_reg.voltsetup_shift = OMAP3430_SETUP_TIME2_SHIFT;
+		vdd->vc_reg.voltsetup_mask = OMAP3430_SETUP_TIME2_MASK;
+	} else {
+		pr_warning("%s: vdd_%s does not exisit in OMAP3\n",
+			__func__, vdd->voltdm.name);
+		return -EINVAL;
+	}
+
+	/*
+	 * Sys clk rate is require to calculate vp timeout value and
+	 * smpswaittimemin and smpswaittimemax.
+	 */
+	sys_ck = clk_get(NULL, "sys_ck");
+	if (IS_ERR(sys_ck)) {
+		pr_warning("%s: Could not get the sys clk to calculate"
+			"various vdd_%s params\n", __func__, vdd->voltdm.name);
+		return -EINVAL;
+	}
+	sys_clk_speed = clk_get_rate(sys_ck);
+	clk_put(sys_ck);
+	/* Divide to avoid overflow */
+	sys_clk_speed /= 1000;
+
+	/* Generic voltage parameters */
+	vdd->curr_volt = 1200000;
+	vdd->ocp_mod = OCP_MOD;
+	vdd->prm_irqst_reg = OMAP3_PRM_IRQSTATUS_MPU_OFFSET;
+	vdd->read_reg = omap3_voltage_read_reg;
+	vdd->write_reg = omap3_voltage_write_reg;
+	vdd->volt_scale = vp_forceupdate_scale_voltage;
+	vdd->vp_enabled = false;
+
+	/* VC parameters */
+	vdd->vc_reg.prm_mod = OMAP3430_GR_MOD;
+	vdd->vc_reg.smps_sa_reg = OMAP3_PRM_VC_SMPS_SA_OFFSET;
+	vdd->vc_reg.smps_volra_reg = OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET;
+	vdd->vc_reg.bypass_val_reg = OMAP3_PRM_VC_BYPASS_VAL_OFFSET;
+	vdd->vc_reg.voltsetup_reg = OMAP3_PRM_VOLTSETUP1_OFFSET;
+	vdd->vc_reg.data_shift = OMAP3430_DATA_SHIFT;
+	vdd->vc_reg.slaveaddr_shift = OMAP3430_SLAVEADDR_SHIFT;
+	vdd->vc_reg.regaddr_shift = OMAP3430_REGADDR_SHIFT;
+	vdd->vc_reg.valid = OMAP3430_VALID_MASK;
+	vdd->vc_reg.cmd_on_shift = OMAP3430_VC_CMD_ON_SHIFT;
+	vdd->vc_reg.cmd_on_mask = OMAP3430_VC_CMD_ON_MASK;
+	vdd->vc_reg.cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT;
+	vdd->vc_reg.cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT;
+	vdd->vc_reg.cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT;
+
+	vdd->vp_reg.prm_mod = OMAP3430_GR_MOD;
+
+	/* VPCONFIG bit fields */
+	vdd->vp_reg.vpconfig_erroroffset = (vdd->pmic_info->vp_erroroffset <<
+				 OMAP3430_ERROROFFSET_SHIFT);
+	vdd->vp_reg.vpconfig_errorgain_mask = OMAP3430_ERRORGAIN_MASK;
+	vdd->vp_reg.vpconfig_errorgain_shift = OMAP3430_ERRORGAIN_SHIFT;
+	vdd->vp_reg.vpconfig_initvoltage_shift = OMAP3430_INITVOLTAGE_SHIFT;
+	vdd->vp_reg.vpconfig_initvoltage_mask = OMAP3430_INITVOLTAGE_MASK;
+	vdd->vp_reg.vpconfig_timeouten = OMAP3430_TIMEOUTEN_MASK;
+	vdd->vp_reg.vpconfig_initvdd = OMAP3430_INITVDD_MASK;
+	vdd->vp_reg.vpconfig_forceupdate = OMAP3430_FORCEUPDATE_MASK;
+	vdd->vp_reg.vpconfig_vpenable = OMAP3430_VPENABLE_MASK;
+
+	/* VSTEPMIN VSTEPMAX bit fields */
+	waittime = ((vdd->pmic_info->step_size / vdd->pmic_info->slew_rate) *
+				sys_clk_speed) / 1000;
+	vdd->vp_reg.vstepmin_smpswaittimemin = waittime;
+	vdd->vp_reg.vstepmax_smpswaittimemax = waittime;
+	vdd->vp_reg.vstepmin_stepmin = vdd->pmic_info->vp_vstepmin;
+	vdd->vp_reg.vstepmax_stepmax = vdd->pmic_info->vp_vstepmax;
+	vdd->vp_reg.vstepmin_smpswaittimemin_shift =
+				OMAP3430_SMPSWAITTIMEMIN_SHIFT;
+	vdd->vp_reg.vstepmax_smpswaittimemax_shift =
+				OMAP3430_SMPSWAITTIMEMAX_SHIFT;
+	vdd->vp_reg.vstepmin_stepmin_shift = OMAP3430_VSTEPMIN_SHIFT;
+	vdd->vp_reg.vstepmax_stepmax_shift = OMAP3430_VSTEPMAX_SHIFT;
+
+	/* VLIMITTO bit fields */
+	timeout_val = (sys_clk_speed * vdd->pmic_info->vp_timeout_us) / 1000;
+	vdd->vp_reg.vlimitto_timeout = timeout_val;
+	vdd->vp_reg.vlimitto_vddmin = vdd->pmic_info->vp_vddmin;
+	vdd->vp_reg.vlimitto_vddmax = vdd->pmic_info->vp_vddmax;
+	vdd->vp_reg.vlimitto_vddmin_shift = OMAP3430_VDDMIN_SHIFT;
+	vdd->vp_reg.vlimitto_vddmax_shift = OMAP3430_VDDMAX_SHIFT;
+	vdd->vp_reg.vlimitto_timeout_shift = OMAP3430_TIMEOUT_SHIFT;
+
+	return 0;
+}
+
+/* OMAP4 specific voltage init functions */
+static void __init omap4_vc_init(struct omap_vdd_info *vdd)
+{
+	u32 vc_val;
+	u16 mod;
+	static bool is_initialized;
+
+	if (!vdd->pmic_info || !vdd->pmic_info->uv_to_vsel) {
+		pr_err("%s: PMIC info requried to configure vc for"
+			"vdd_%s not populated.Hence cannot initialize vc\n",
+			__func__, vdd->voltdm.name);
+		return;
+	}
+
+	if (!vdd->read_reg || !vdd->write_reg) {
+		pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+			__func__, vdd->voltdm.name);
+		return;
+	}
+
+	mod = vdd->vc_reg.prm_mod;
+
+	/* Set up the SMPS_SA(i2c slave address in VC */
+	vc_val = vdd->read_reg(mod, vdd->vc_reg.smps_sa_reg);
+	vc_val &= ~vdd->vc_reg.smps_sa_mask;
+	vc_val |= vdd->pmic_info->i2c_slave_addr << vdd->vc_reg.smps_sa_shift;
+	vdd->write_reg(vc_val, mod, vdd->vc_reg.smps_sa_reg);
+
+	/* Setup the VOLRA(pmic reg addr) in VC */
+	vc_val = vdd->read_reg(mod, vdd->vc_reg.smps_volra_reg);
+	vc_val &= ~vdd->vc_reg.smps_volra_mask;
+	vc_val |= vdd->pmic_info->pmic_reg << vdd->vc_reg.smps_volra_shift;
+	vdd->write_reg(vc_val, mod, vdd->vc_reg.smps_volra_reg);
+
+	/* TODO: Configure setup times and CMD_VAL values*/
+
+	if (is_initialized)
+		return;
+
+	/* Generic VC parameters init */
+	vc_val = (OMAP4430_RAV_VDD_MPU_L_MASK | OMAP4430_CMD_VDD_MPU_L_MASK |
+		OMAP4430_RAV_VDD_IVA_L_MASK | OMAP4430_CMD_VDD_IVA_L_MASK |
+		OMAP4430_RAV_VDD_CORE_L_MASK | OMAP4430_CMD_VDD_CORE_L_MASK);
+	vdd->write_reg(vc_val, mod, OMAP4_PRM_VC_CFG_CHANNEL_OFFSET);
+
+	vc_val = (0x60 << OMAP4430_SCLL_SHIFT | 0x26 << OMAP4430_SCLH_SHIFT);
+	vdd->write_reg(vc_val, mod, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET);
+
+	is_initialized = true;
+}
+
+/* Sets up all the VDD related info for OMAP4 */
+static int __init omap4_vdd_data_configure(struct omap_vdd_info *vdd)
+{
+	struct clk *sys_ck;
+	u32 sys_clk_speed, timeout_val, waittime;
+
+	if (!vdd->pmic_info) {
+		pr_err("%s: PMIC info requried to configure vdd_%s not"
+			"populated.Hence cannot initialize vdd_%s\n",
+			__func__, vdd->voltdm.name, vdd->voltdm.name);
+		return -EINVAL;
+	}
+
+	if (!strcmp(vdd->voltdm.name, "mpu")) {
+		vdd->volt_data = omap44xx_vdd_mpu_volt_data;
+		vdd->vp_reg.tranxdone_status =
+				OMAP4430_VP_MPU_TRANXDONE_ST_MASK;
+		vdd->vc_reg.cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_MPU_L_OFFSET;
+		vdd->vc_reg.smps_sa_shift =
+				OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_SHIFT;
+		vdd->vc_reg.smps_sa_mask =
+				OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_MASK;
+		vdd->vc_reg.smps_volra_shift = OMAP4430_VOLRA_VDD_MPU_L_SHIFT;
+		vdd->vc_reg.smps_volra_mask = OMAP4430_VOLRA_VDD_MPU_L_MASK;
+		vdd->vc_reg.voltsetup_reg =
+				OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET;
+		vdd->prm_irqst_reg = OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET;
+	} else if (!strcmp(vdd->voltdm.name, "core")) {
+		vdd->volt_data = omap44xx_vdd_core_volt_data;
+		vdd->vp_reg.tranxdone_status =
+				OMAP4430_VP_CORE_TRANXDONE_ST_MASK;
+		vdd->vc_reg.cmdval_reg =
+				OMAP4_PRM_VC_VAL_CMD_VDD_CORE_L_OFFSET;
+		vdd->vc_reg.smps_sa_shift = OMAP4430_SA_VDD_CORE_L_0_6_SHIFT;
+		vdd->vc_reg.smps_sa_mask = OMAP4430_SA_VDD_CORE_L_0_6_MASK;
+		vdd->vc_reg.smps_volra_shift = OMAP4430_VOLRA_VDD_CORE_L_SHIFT;
+		vdd->vc_reg.smps_volra_mask = OMAP4430_VOLRA_VDD_CORE_L_MASK;
+		vdd->vc_reg.voltsetup_reg =
+				OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET;
+		vdd->prm_irqst_reg = OMAP4_PRM_IRQSTATUS_MPU_OFFSET;
+	} else if (!strcmp(vdd->voltdm.name, "iva")) {
+		vdd->volt_data = omap44xx_vdd_iva_volt_data;
+		vdd->vp_reg.tranxdone_status =
+				OMAP4430_VP_IVA_TRANXDONE_ST_MASK;
+		vdd->vc_reg.cmdval_reg = OMAP4_PRM_VC_VAL_CMD_VDD_IVA_L_OFFSET;
+		vdd->vc_reg.smps_sa_shift =
+				OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_SHIFT;
+		vdd->vc_reg.smps_sa_mask =
+				OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_MASK;
+		vdd->vc_reg.smps_volra_shift = OMAP4430_VOLRA_VDD_IVA_L_SHIFT;
+		vdd->vc_reg.smps_volra_mask = OMAP4430_VOLRA_VDD_IVA_L_MASK;
+		vdd->vc_reg.voltsetup_reg =
+				OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET;
+		vdd->prm_irqst_reg = OMAP4_PRM_IRQSTATUS_MPU_OFFSET;
+	} else {
+		pr_warning("%s: vdd_%s does not exisit in OMAP4\n",
+			__func__, vdd->voltdm.name);
+		return -EINVAL;
+	}
+
+	/*
+	 * Sys clk rate is require to calculate vp timeout value and
+	 * smpswaittimemin and smpswaittimemax.
+	 */
+	sys_ck = clk_get(NULL, "sys_clkin_ck");
+	if (IS_ERR(sys_ck)) {
+		pr_warning("%s: Could not get the sys clk to calculate"
+			"various vdd_%s params\n", __func__, vdd->voltdm.name);
+		return -EINVAL;
+	}
+	sys_clk_speed = clk_get_rate(sys_ck);
+	clk_put(sys_ck);
+	/* Divide to avoid overflow */
+	sys_clk_speed /= 1000;
+
+	/* Generic voltage parameters */
+	vdd->curr_volt = 1200000;
+	vdd->ocp_mod = OMAP4430_PRM_OCP_SOCKET_INST;
+	vdd->read_reg = omap4_voltage_read_reg;
+	vdd->write_reg = omap4_voltage_write_reg;
+	vdd->volt_scale = vp_forceupdate_scale_voltage;
+	vdd->vp_enabled = false;
+
+	/* VC parameters */
+	vdd->vc_reg.prm_mod = OMAP4430_PRM_DEVICE_INST;
+	vdd->vc_reg.smps_sa_reg = OMAP4_PRM_VC_SMPS_SA_OFFSET;
+	vdd->vc_reg.smps_volra_reg = OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET;
+	vdd->vc_reg.bypass_val_reg = OMAP4_PRM_VC_VAL_BYPASS_OFFSET;
+	vdd->vc_reg.data_shift = OMAP4430_DATA_SHIFT;
+	vdd->vc_reg.slaveaddr_shift = OMAP4430_SLAVEADDR_SHIFT;
+	vdd->vc_reg.regaddr_shift = OMAP4430_REGADDR_SHIFT;
+	vdd->vc_reg.valid = OMAP4430_VALID_MASK;
+	vdd->vc_reg.cmd_on_shift = OMAP4430_ON_SHIFT;
+	vdd->vc_reg.cmd_on_mask = OMAP4430_ON_MASK;
+	vdd->vc_reg.cmd_onlp_shift = OMAP4430_ONLP_SHIFT;
+	vdd->vc_reg.cmd_ret_shift = OMAP4430_RET_SHIFT;
+	vdd->vc_reg.cmd_off_shift = OMAP4430_OFF_SHIFT;
+
+	vdd->vp_reg.prm_mod = OMAP4430_PRM_DEVICE_INST;
+
+	/* VPCONFIG bit fields */
+	vdd->vp_reg.vpconfig_erroroffset = (vdd->pmic_info->vp_erroroffset <<
+				 OMAP4430_ERROROFFSET_SHIFT);
+	vdd->vp_reg.vpconfig_errorgain_mask = OMAP4430_ERRORGAIN_MASK;
+	vdd->vp_reg.vpconfig_errorgain_shift = OMAP4430_ERRORGAIN_SHIFT;
+	vdd->vp_reg.vpconfig_initvoltage_shift = OMAP4430_INITVOLTAGE_SHIFT;
+	vdd->vp_reg.vpconfig_initvoltage_mask = OMAP4430_INITVOLTAGE_MASK;
+	vdd->vp_reg.vpconfig_timeouten = OMAP4430_TIMEOUTEN_MASK;
+	vdd->vp_reg.vpconfig_initvdd = OMAP4430_INITVDD_MASK;
+	vdd->vp_reg.vpconfig_forceupdate = OMAP4430_FORCEUPDATE_MASK;
+	vdd->vp_reg.vpconfig_vpenable = OMAP4430_VPENABLE_MASK;
+
+	/* VSTEPMIN VSTEPMAX bit fields */
+	waittime = ((vdd->pmic_info->step_size / vdd->pmic_info->slew_rate) *
+				sys_clk_speed) / 1000;
+	vdd->vp_reg.vstepmin_smpswaittimemin = waittime;
+	vdd->vp_reg.vstepmax_smpswaittimemax = waittime;
+	vdd->vp_reg.vstepmin_stepmin = vdd->pmic_info->vp_vstepmin;
+	vdd->vp_reg.vstepmax_stepmax = vdd->pmic_info->vp_vstepmax;
+	vdd->vp_reg.vstepmin_smpswaittimemin_shift =
+			OMAP4430_SMPSWAITTIMEMIN_SHIFT;
+	vdd->vp_reg.vstepmax_smpswaittimemax_shift =
+			OMAP4430_SMPSWAITTIMEMAX_SHIFT;
+	vdd->vp_reg.vstepmin_stepmin_shift = OMAP4430_VSTEPMIN_SHIFT;
+	vdd->vp_reg.vstepmax_stepmax_shift = OMAP4430_VSTEPMAX_SHIFT;
+
+	/* VLIMITTO bit fields */
+	timeout_val = (sys_clk_speed * vdd->pmic_info->vp_timeout_us) / 1000;
+	vdd->vp_reg.vlimitto_timeout = timeout_val;
+	vdd->vp_reg.vlimitto_vddmin = vdd->pmic_info->vp_vddmin;
+	vdd->vp_reg.vlimitto_vddmax = vdd->pmic_info->vp_vddmax;
+	vdd->vp_reg.vlimitto_vddmin_shift = OMAP4430_VDDMIN_SHIFT;
+	vdd->vp_reg.vlimitto_vddmax_shift = OMAP4430_VDDMAX_SHIFT;
+	vdd->vp_reg.vlimitto_timeout_shift = OMAP4430_TIMEOUT_SHIFT;
+
+	return 0;
+}
+
+/* Public functions */
+/**
+ * omap_voltage_get_nom_volt() - Gets the current non-auto-compensated voltage
+ * @voltdm:	pointer to the VDD for which current voltage info is needed
+ *
+ * API to get the current non-auto-compensated voltage for a VDD.
+ * Returns 0 in case of error else returns the current voltage for the VDD.
+ */
+unsigned long omap_voltage_get_nom_volt(struct voltagedomain *voltdm)
+{
+	struct omap_vdd_info *vdd;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return 0;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+
+	return vdd->curr_volt;
+}
+
+/**
+ * omap_vp_get_curr_volt() - API to get the current vp voltage.
+ * @voltdm:	pointer to the VDD.
+ *
+ * This API returns the current voltage for the specified voltage processor
+ */
+unsigned long omap_vp_get_curr_volt(struct voltagedomain *voltdm)
+{
+	struct omap_vdd_info *vdd;
+	u8 curr_vsel;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return 0;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+	if (!vdd->read_reg) {
+		pr_err("%s: No read API for reading vdd_%s regs\n",
+			__func__, voltdm->name);
+		return 0;
+	}
+
+	curr_vsel = vdd->read_reg(vdd->vp_reg.prm_mod,
+			vdd->vp_offs.voltage);
+
+	if (!vdd->pmic_info || !vdd->pmic_info->vsel_to_uv) {
+		pr_warning("%s: PMIC function to convert vsel to voltage"
+			"in uV not registerd\n", __func__);
+		return 0;
+	}
+
+	return vdd->pmic_info->vsel_to_uv(curr_vsel);
+}
+
+/**
+ * omap_vp_enable() - API to enable a particular VP
+ * @voltdm:	pointer to the VDD whose VP is to be enabled.
+ *
+ * This API enables a particular voltage processor. Needed by the smartreflex
+ * class drivers.
+ */
+void omap_vp_enable(struct voltagedomain *voltdm)
+{
+	struct omap_vdd_info *vdd;
+	u32 vpconfig;
+	u16 mod;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+	if (!vdd->read_reg || !vdd->write_reg) {
+		pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+			__func__, voltdm->name);
+		return;
+	}
+
+	mod = vdd->vp_reg.prm_mod;
+
+	/* If VP is already enabled, do nothing. Return */
+	if (vdd->vp_enabled)
+		return;
+
+	vp_latch_vsel(vdd);
+
+	/* Enable VP */
+	vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig);
+	vpconfig |= vdd->vp_reg.vpconfig_vpenable;
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+	vdd->vp_enabled = true;
+}
+
+/**
+ * omap_vp_disable() - API to disable a particular VP
+ * @voltdm:	pointer to the VDD whose VP is to be disabled.
+ *
+ * This API disables a particular voltage processor. Needed by the smartreflex
+ * class drivers.
+ */
+void omap_vp_disable(struct voltagedomain *voltdm)
+{
+	struct omap_vdd_info *vdd;
+	u32 vpconfig;
+	u16 mod;
+	int timeout;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+	if (!vdd->read_reg || !vdd->write_reg) {
+		pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+			__func__, voltdm->name);
+		return;
+	}
+
+	mod = vdd->vp_reg.prm_mod;
+
+	/* If VP is already disabled, do nothing. Return */
+	if (!vdd->vp_enabled) {
+		pr_warning("%s: Trying to disable VP for vdd_%s when"
+			"it is already disabled\n", __func__, voltdm->name);
+		return;
+	}
+
+	/* Disable VP */
+	vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig);
+	vpconfig &= ~vdd->vp_reg.vpconfig_vpenable;
+	vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig);
+
+	/*
+	 * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us
+	 */
+	omap_test_timeout((vdd->read_reg(mod, vdd->vp_offs.vstatus)),
+				VP_IDLE_TIMEOUT, timeout);
+
+	if (timeout >= VP_IDLE_TIMEOUT)
+		pr_warning("%s: vdd_%s idle timedout\n",
+			__func__, voltdm->name);
+
+	vdd->vp_enabled = false;
+
+	return;
+}
+
+/**
+ * omap_voltage_scale_vdd() - API to scale voltage of a particular
+ *				voltage domain.
+ * @voltdm:	pointer to the VDD which is to be scaled.
+ * @target_volt:	The target voltage of the voltage domain
+ *
+ * This API should be called by the kernel to do the voltage scaling
+ * for a particular voltage domain during dvfs or any other situation.
+ */
+int omap_voltage_scale_vdd(struct voltagedomain *voltdm,
+		unsigned long target_volt)
+{
+	struct omap_vdd_info *vdd;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return -EINVAL;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+
+	if (!vdd->volt_scale) {
+		pr_err("%s: No voltage scale API registered for vdd_%s\n",
+			__func__, voltdm->name);
+		return -ENODATA;
+	}
+
+	return vdd->volt_scale(vdd, target_volt);
+}
+
+/**
+ * omap_voltage_reset() - Resets the voltage of a particular voltage domain
+ *			to that of the current OPP.
+ * @voltdm:	pointer to the VDD whose voltage is to be reset.
+ *
+ * This API finds out the correct voltage the voltage domain is supposed
+ * to be at and resets the voltage to that level. Should be used expecially
+ * while disabling any voltage compensation modules.
+ */
+void omap_voltage_reset(struct voltagedomain *voltdm)
+{
+	unsigned long target_uvdc;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return;
+	}
+
+	target_uvdc = omap_voltage_get_nom_volt(voltdm);
+	if (!target_uvdc) {
+		pr_err("%s: unable to find current voltage for vdd_%s\n",
+			__func__, voltdm->name);
+		return;
+	}
+
+	omap_voltage_scale_vdd(voltdm, target_uvdc);
+}
+
+/**
+ * omap_voltage_get_volttable() - API to get the voltage table associated with a
+ *				particular voltage domain.
+ * @voltdm:	pointer to the VDD for which the voltage table is required
+ * @volt_data:	the voltage table for the particular vdd which is to be
+ *		populated by this API
+ *
+ * This API populates the voltage table associated with a VDD into the
+ * passed parameter pointer. Returns the count of distinct voltages
+ * supported by this vdd.
+ *
+ */
+void omap_voltage_get_volttable(struct voltagedomain *voltdm,
+		struct omap_volt_data **volt_data)
+{
+	struct omap_vdd_info *vdd;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+
+	*volt_data = vdd->volt_data;
+}
+
+/**
+ * omap_voltage_get_voltdata() - API to get the voltage table entry for a
+ *				particular voltage
+ * @voltdm:	pointer to the VDD whose voltage table has to be searched
+ * @volt:	the voltage to be searched in the voltage table
+ *
+ * This API searches through the voltage table for the required voltage
+ * domain and tries to find a matching entry for the passed voltage volt.
+ * If a matching entry is found volt_data is populated with that entry.
+ * This API searches only through the non-compensated voltages int the
+ * voltage table.
+ * Returns pointer to the voltage table entry corresponding to volt on
+ * sucess. Returns -ENODATA if no voltage table exisits for the passed voltage
+ * domain or if there is no matching entry.
+ */
+struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
+		unsigned long volt)
+{
+	struct omap_vdd_info *vdd;
+	int i;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+
+	if (!vdd->volt_data) {
+		pr_warning("%s: voltage table does not exist for vdd_%s\n",
+			__func__, voltdm->name);
+		return ERR_PTR(-ENODATA);
+	}
+
+	for (i = 0; vdd->volt_data[i].volt_nominal != 0; i++) {
+		if (vdd->volt_data[i].volt_nominal == volt)
+			return &vdd->volt_data[i];
+	}
+
+	pr_notice("%s: Unable to match the current voltage with the voltage"
+		"table for vdd_%s\n", __func__, voltdm->name);
+
+	return ERR_PTR(-ENODATA);
+}
+
+/**
+ * omap_voltage_register_pmic() - API to register PMIC specific data
+ * @voltdm:	pointer to the VDD for which the PMIC specific data is
+ *		to be registered
+ * @pmic_info:	the structure containing pmic info
+ *
+ * This API is to be called by the SOC/PMIC file to specify the
+ * pmic specific info as present in omap_volt_pmic_info structure.
+ */
+int omap_voltage_register_pmic(struct voltagedomain *voltdm,
+		struct omap_volt_pmic_info *pmic_info)
+{
+	struct omap_vdd_info *vdd;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return -EINVAL;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+
+	vdd->pmic_info = pmic_info;
+
+	return 0;
+}
+
+/**
+ * omap_voltage_get_dbgdir() - API to get pointer to the debugfs directory
+ *				corresponding to a voltage domain.
+ *
+ * @voltdm:	pointer to the VDD whose debug directory is required.
+ *
+ * This API returns pointer to the debugfs directory corresponding
+ * to the voltage domain. Should be used by drivers requiring to
+ * add any debug entry for a particular voltage domain. Returns NULL
+ * in case of error.
+ */
+struct dentry *omap_voltage_get_dbgdir(struct voltagedomain *voltdm)
+{
+	struct omap_vdd_info *vdd;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return NULL;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+
+	return vdd->debug_dir;
+}
+
+/**
+ * omap_change_voltscale_method() - API to change the voltage scaling method.
+ * @voltdm:	pointer to the VDD whose voltage scaling method
+ *		has to be changed.
+ * @voltscale_method:	the method to be used for voltage scaling.
+ *
+ * This API can be used by the board files to change the method of voltage
+ * scaling between vpforceupdate and vcbypass. The parameter values are
+ * defined in voltage.h
+ */
+void omap_change_voltscale_method(struct voltagedomain *voltdm,
+		int voltscale_method)
+{
+	struct omap_vdd_info *vdd;
+
+	if (!voltdm || IS_ERR(voltdm)) {
+		pr_warning("%s: VDD specified does not exist!\n", __func__);
+		return;
+	}
+
+	vdd = container_of(voltdm, struct omap_vdd_info, voltdm);
+
+	switch (voltscale_method) {
+	case VOLTSCALE_VPFORCEUPDATE:
+		vdd->volt_scale = vp_forceupdate_scale_voltage;
+		return;
+	case VOLTSCALE_VCBYPASS:
+		vdd->volt_scale = vc_bypass_scale_voltage;
+		return;
+	default:
+		pr_warning("%s: Trying to change the method of voltage scaling"
+			"to an unsupported one!\n", __func__);
+	}
+}
+
+/**
+ * omap_voltage_domain_lookup() - API to get the voltage domain pointer
+ * @name:	Name of the voltage domain
+ *
+ * This API looks up in the global vdd_info struct for the
+ * existence of voltage domain <name>. If it exists, the API returns
+ * a pointer to the voltage domain structure corresponding to the
+ * VDD<name>. Else retuns error pointer.
+ */
+struct voltagedomain *omap_voltage_domain_lookup(char *name)
+{
+	int i;
+
+	if (!vdd_info) {
+		pr_err("%s: Voltage driver init not yet happened.Faulting!\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!name) {
+		pr_err("%s: No name to get the votage domain!\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	for (i = 0; i < nr_scalable_vdd; i++) {
+		if (!(strcmp(name, vdd_info[i].voltdm.name)))
+			return &vdd_info[i].voltdm;
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+/**
+ * omap_voltage_late_init() - Init the various voltage parameters
+ *
+ * This API is to be called in the later stages of the
+ * system boot to init the voltage controller and
+ * voltage processors.
+ */
+int __init omap_voltage_late_init(void)
+{
+	int i;
+
+	if (!vdd_info) {
+		pr_err("%s: Voltage driver support not added\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	voltage_dir = debugfs_create_dir("voltage", NULL);
+	if (IS_ERR(voltage_dir))
+		pr_err("%s: Unable to create voltage debugfs main dir\n",
+			__func__);
+	for (i = 0; i < nr_scalable_vdd; i++) {
+		if (vdd_data_configure(&vdd_info[i]))
+			continue;
+		vc_init(&vdd_info[i]);
+		vp_init(&vdd_info[i]);
+		vdd_debugfs_init(&vdd_info[i]);
+	}
+
+	return 0;
+}
+
+/**
+ * omap_voltage_early_init()- Volatage driver early init
+ */
+static int __init omap_voltage_early_init(void)
+{
+	if (cpu_is_omap34xx()) {
+		vdd_info = omap3_vdd_info;
+		nr_scalable_vdd = OMAP3_NR_SCALABLE_VDD;
+		vc_init = omap3_vc_init;
+		vdd_data_configure = omap3_vdd_data_configure;
+	} else if (cpu_is_omap44xx()) {
+		vdd_info = omap4_vdd_info;
+		nr_scalable_vdd = OMAP4_NR_SCALABLE_VDD;
+		vc_init = omap4_vc_init;
+		vdd_data_configure = omap4_vdd_data_configure;
+	} else {
+		pr_warning("%s: voltage driver support not added\n", __func__);
+	}
+
+	return 0;
+}
+core_initcall(omap_voltage_early_init);
diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
new file mode 100644
index 0000000..4067669
--- /dev/null
+++ b/arch/arm/mach-omap2/wd_timer.c
@@ -0,0 +1,56 @@
+/*
+ * OMAP2+ MPU WD_TIMER-specific code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <plat/omap_hwmod.h>
+
+#include "wd_timer.h"
+
+/*
+ * In order to avoid any assumptions from bootloader regarding WDT
+ * settings, WDT module is reset during init. This enables the watchdog
+ * timer. Hence it is required to disable the watchdog after the WDT reset
+ * during init. Otherwise the system would reboot as per the default
+ * watchdog timer registers settings.
+ */
+#define OMAP_WDT_WPS		0x34
+#define OMAP_WDT_SPR		0x48
+
+
+int omap2_wd_timer_disable(struct omap_hwmod *oh)
+{
+	void __iomem *base;
+
+	if (!oh) {
+		pr_err("%s: Could not look up wdtimer_hwmod\n", __func__);
+		return -EINVAL;
+	}
+
+	base = omap_hwmod_get_mpu_rt_va(oh);
+	if (!base) {
+		pr_err("%s: Could not get the base address for %s\n",
+				oh->name, __func__);
+		return -EINVAL;
+	}
+
+	/* sequence required to disable watchdog */
+	__raw_writel(0xAAAA, base + OMAP_WDT_SPR);
+	while (__raw_readl(base + OMAP_WDT_WPS) & 0x10)
+		cpu_relax();
+
+	__raw_writel(0x5555, base + OMAP_WDT_SPR);
+	while (__raw_readl(base + OMAP_WDT_WPS) & 0x10)
+		cpu_relax();
+
+	return 0;
+}
+
diff --git a/arch/arm/mach-omap2/wd_timer.h b/arch/arm/mach-omap2/wd_timer.h
new file mode 100644
index 0000000..e0054a2
--- /dev/null
+++ b/arch/arm/mach-omap2/wd_timer.h
@@ -0,0 +1,17 @@
+/*
+ * OMAP2+ MPU WD_TIMER-specific function prototypes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_WD_TIMER_H
+#define __ARCH_ARM_MACH_OMAP2_WD_TIMER_H
+
+#include <plat/omap_hwmod.h>
+
+extern int omap2_wd_timer_disable(struct omap_hwmod *oh);
+
+#endif
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index ee3c29c..f3e60a0 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -119,7 +119,7 @@
 	       (state == PM_SUSPEND_MEM);
 }
 
-static struct platform_suspend_ops pnx4008_pm_ops = {
+static const struct platform_suspend_ops pnx4008_pm_ops = {
 	.enter = pnx4008_pm_enter,
 	.valid = pnx4008_pm_valid,
 };
diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c
index f1a7703..93f59f8 100644
--- a/arch/arm/mach-pxa/cm-x255.c
+++ b/arch/arm/mach-pxa/cm-x255.c
@@ -17,13 +17,13 @@
 #include <linux/mtd/nand-gpio.h>
 
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
 
 #include <mach/pxa25x.h>
-#include <mach/pxa2xx_spi.h>
 
 #include "generic.h"
 
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
index a9926bb..b88d601 100644
--- a/arch/arm/mach-pxa/cm-x270.c
+++ b/arch/arm/mach-pxa/cm-x270.c
@@ -19,12 +19,12 @@
 #include <video/mbxfb.h>
 
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/spi/libertas_spi.h>
 
 #include <mach/pxa27x.h>
 #include <mach/ohci.h>
 #include <mach/mmc.h>
-#include <mach/pxa2xx_spi.h>
 
 #include "generic.h"
 
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 9f3e5af..a5452a3 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -28,6 +28,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 #include <linux/spi/corgi_lcd.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/mtd/sharpsl.h>
 #include <linux/input/matrix_keypad.h>
 #include <video/w100fb.h>
@@ -48,7 +49,6 @@
 #include <mach/irda.h>
 #include <mach/mmc.h>
 #include <mach/udc.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/corgi.h>
 #include <mach/sharpsl_pm.h>
 
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 022c2fa..4c766e3 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -3,6 +3,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/spi/pxa2xx_spi.h>
 
 #include <asm/pmu.h>
 #include <mach/udc.h>
@@ -12,7 +13,6 @@
 #include <mach/irda.h>
 #include <mach/ohci.h>
 #include <plat/pxa27x_keypad.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/camera.h>
 #include <mach/audio.h>
 #include <mach/hardware.h>
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 4cefd1d..a78bb30 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -26,6 +26,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/tdo24m.h>
 #include <linux/spi/libertas_spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/power_supply.h>
 #include <linux/apm-emulation.h>
 #include <linux/i2c.h>
@@ -46,7 +47,6 @@
 #include <plat/pxa27x_keypad.h>
 #include <plat/i2c.h>
 #include <mach/camera.h>
-#include <mach/pxa2xx_spi.h>
 
 #include "generic.h"
 #include "devices.h"
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index cacb21b..a908e0a 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -33,6 +33,7 @@
 #include <linux/regulator/max1586.h>
 #include <linux/spi/ads7846.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/usb/gpio_vbus.h>
 
 #include <mach/hardware.h>
@@ -43,7 +44,6 @@
 #include <mach/hx4700.h>
 #include <plat/i2c.h>
 #include <mach/irda.h>
-#include <mach/pxa2xx_spi.h>
 
 #include <video/platform_lcd.h>
 #include <video/w100fb.h>
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index ac6ee12..6cedc81 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -24,7 +24,7 @@
 #include <mach/mxm8x10.h>
 
 #include <linux/spi/spi.h>
-#include <mach/pxa2xx_spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/can/platform/mcp251x.h>
 
 #include "generic.h"
diff --git a/arch/arm/mach-pxa/include/mach/pxa2xx_spi.h b/arch/arm/mach-pxa/include/mach/pxa2xx_spi.h
deleted file mode 100644
index b87cecd..0000000
--- a/arch/arm/mach-pxa/include/mach/pxa2xx_spi.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef PXA2XX_SPI_H_
-#define PXA2XX_SPI_H_
-
-#define PXA2XX_CS_ASSERT (0x01)
-#define PXA2XX_CS_DEASSERT (0x02)
-
-/* device.platform_data for SSP controller devices */
-struct pxa2xx_spi_master {
-	u32 clock_enable;
-	u16 num_chipselect;
-	u8 enable_dma;
-};
-
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
- */
-struct pxa2xx_spi_chip {
-	u8 tx_threshold;
-	u8 rx_threshold;
-	u8 dma_burst_size;
-	u32 timeout;
-	u8 enable_loopback;
-	int gpio_cs;
-	void (*cs_control)(u32 command);
-};
-
-extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
-
-#endif /*PXA2XX_SPI_H_*/
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 719c260..ccb7bfa 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -22,6 +22,7 @@
 #include <linux/clk.h>
 #include <linux/gpio.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/smc91x.h>
 #include <linux/i2c.h>
 #include <linux/leds.h>
@@ -42,7 +43,6 @@
 #include <mach/pxa300.h>
 #include <mach/pxafb.h>
 #include <mach/mmc.h>
-#include <mach/pxa2xx_spi.h>
 #include <plat/pxa27x_keypad.h>
 #include <mach/littleton.h>
 #include <plat/i2c.h>
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index d337548..3072dbe 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -25,7 +25,7 @@
 
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
-#include <mach/pxa2xx_spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 
 #include <asm/setup.h>
 #include <asm/memory.h>
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index 462167a..cdf7f41 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -337,7 +337,7 @@
 }
 #endif
 
-/* USB Open Host Controler Interface */
+/* USB Open Host Controller Interface */
 static struct pxaohci_platform_data mxm_8x10_ohci_platform_data = {
 	.port_mode = PMM_NPS_MODE,
 	.flags = ENABLE_PORT_ALL
diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c
index 8547c9a..1fc8a66 100644
--- a/arch/arm/mach-pxa/pcm027.c
+++ b/arch/arm/mach-pxa/pcm027.c
@@ -25,12 +25,12 @@
 #include <linux/mtd/physmap.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/max7301.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/leds.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <mach/pxa27x.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/pcm027.h>
 #include "generic.h"
 
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 166c15f..978e1b2 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -96,7 +96,7 @@
 		pxa_cpu_pm_fns->finish();
 }
 
-static struct platform_suspend_ops pxa_pm_ops = {
+static const struct platform_suspend_ops pxa_pm_ops = {
 	.valid		= pxa_pm_valid,
 	.enter		= pxa_pm_enter,
 	.prepare	= pxa_pm_prepare,
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 8451790..4f0ff1a 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -25,6 +25,7 @@
 #include <linux/i2c.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/mtd/sharpsl.h>
 
 #include <mach/hardware.h>
@@ -43,7 +44,6 @@
 #include <mach/irda.h>
 #include <mach/poodle.h>
 #include <mach/pxafb.h>
-#include <mach/pxa2xx_spi.h>
 #include <plat/i2c.h>
 
 #include <asm/hardware/scoop.h>
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 8fed027..785880f 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -579,7 +579,8 @@
 static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	sharpsl_pm.flags |= SHARPSL_SUSPENDED;
-	flush_scheduled_work();
+	flush_delayed_work_sync(&toggle_charger);
+	flush_delayed_work_sync(&sharpsl_bat);
 
 	if (sharpsl_pm.charge_mode == CHRG_ON)
 		sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
@@ -868,7 +869,7 @@
 }
 
 #ifdef CONFIG_PM
-static struct platform_suspend_ops sharpsl_pm_ops = {
+static const struct platform_suspend_ops sharpsl_pm_ops = {
 	.prepare	= pxa_pm_prepare,
 	.finish		= pxa_pm_finish,
 	.enter		= corgi_pxa_pm_enter,
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 0499a69..0bc9387 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -23,7 +23,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 #include <linux/spi/corgi_lcd.h>
-#include <linux/mtd/physmap.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/mtd/sharpsl.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/regulator/machine.h>
@@ -42,7 +42,6 @@
 #include <mach/mmc.h>
 #include <mach/ohci.h>
 #include <mach/pxafb.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/spitz.h>
 #include <mach/sharpsl_pm.h>
 #include <mach/smemc.h>
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 3498a14..9a14fdb 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -46,11 +46,11 @@
 #include <plat/i2c.h>
 #include <mach/mmc.h>
 #include <mach/udc.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/pxa27x-udc.h>
 #include <mach/smemc.h>
 
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/mfd/da903x.h>
 #include <linux/sht15.h>
 
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 57d61ee..af152e7 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -32,6 +32,7 @@
 #include <linux/gpio.h>
 #include <linux/pda_power.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/input/matrix_keypad.h>
 
 #include <asm/setup.h>
@@ -44,7 +45,6 @@
 #include <mach/mmc.h>
 #include <mach/udc.h>
 #include <mach/tosa_bt.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/audio.h>
 #include <mach/smemc.h>
 
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 43fc9ca..423261d 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -40,7 +40,6 @@
 #include <asm/mach/flash.h>
 
 #include <mach/pxa27x.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/trizeps4.h>
 #include <mach/audio.h>
 #include <mach/pxafb.h>
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index 527c2a1..a323e07 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -20,6 +20,7 @@
 #include <linux/z2_battery.h>
 #include <linux/dma-mapping.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/spi/libertas_spi.h>
 #include <linux/spi/lms283gf05.h>
 #include <linux/power_supply.h>
@@ -38,7 +39,6 @@
 #include <mach/pxafb.h>
 #include <mach/mmc.h>
 #include <plat/pxa27x_keypad.h>
-#include <mach/pxa2xx_spi.h>
 
 #include <plat/i2c.h>
 
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index c87f2b3..bf034c7 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -20,6 +20,7 @@
 #include <linux/dm9000.h>
 #include <linux/mmc/host.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
@@ -41,7 +42,6 @@
 #include <mach/pxa27x-udc.h>
 #include <mach/udc.h>
 #include <mach/pxafb.h>
-#include <mach/pxa2xx_spi.h>
 #include <mach/mfp-pxa27x.h>
 #include <mach/pm.h>
 #include <mach/audio.h>
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index d7ada8c..1a81fe1 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -387,7 +387,7 @@
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
 	&s3c_device_iis,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c_device_usbgadget,
 	&h1940_device_leds,
 	&h1940_device_bluetooth,
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index e0622bb..eab6ae5 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -692,7 +692,7 @@
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
 	&s3c_device_iis,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c_device_usbgadget,
 	&s3c_device_rtc,
 	&s3c_device_nand,
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 7e03f0a..1c98d2f 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -695,7 +695,7 @@
 	}, {
 		.clk	= {
 			.name		= "audio-bus",
-			.id		= -1,  /* There's only one IISv4 port */
+			.id		= 2,
 			.ctrlbit        = S3C6410_CLKCON_SCLK_AUDIO2,
 			.enable		= s3c64xx_sclk_ctrl,
 		},
diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
index 76426a3..cad6702 100644
--- a/arch/arm/mach-s3c64xx/dev-audio.c
+++ b/arch/arm/mach-s3c64xx/dev-audio.c
@@ -22,7 +22,12 @@
 #include <plat/audio.h>
 #include <plat/gpio-cfg.h>
 
-static int s3c64xx_i2sv3_cfg_gpio(struct platform_device *pdev)
+static const char *rclksrc[] = {
+	[0] = "iis",
+	[1] = "audio-bus",
+};
+
+static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
 {
 	unsigned int base;
 
@@ -33,6 +38,12 @@
 	case 1:
 		base = S3C64XX_GPE(0);
 		break;
+	case 2:
+		s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
+		return 0;
 	default:
 		printk(KERN_DEBUG "Invalid I2S Controller number: %d\n",
 			pdev->id);
@@ -44,16 +55,6 @@
 	return 0;
 }
 
-static int s3c64xx_i2sv4_cfg_gpio(struct platform_device *pdev)
-{
-	s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
-
-	return 0;
-}
-
 static struct resource s3c64xx_iis0_resource[] = {
 	[0] = {
 		.start = S3C64XX_PA_IIS0,
@@ -72,17 +73,22 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2s0_pdata = {
-	.cfg_gpio = s3c64xx_i2sv3_cfg_gpio,
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 struct platform_device s3c64xx_device_iis0 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 0,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis0_resource),
 	.resource	  = s3c64xx_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s0_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iis0);
@@ -105,17 +111,13 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2s1_pdata = {
-	.cfg_gpio = s3c64xx_i2sv3_cfg_gpio,
-};
-
 struct platform_device s3c64xx_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis1_resource),
 	.resource	  = s3c64xx_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s1_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iis1);
@@ -138,17 +140,23 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2sv4_pdata = {
-	.cfg_gpio = s3c64xx_i2sv4_cfg_gpio,
+static struct s3c_audio_pdata i2sv4_pdata = {
+	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 struct platform_device s3c64xx_device_iisv4 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 2,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iisv4_resource),
 	.resource	  = s3c64xx_iisv4_resource,
 	.dev = {
-		.platform_data = &s3c_i2sv4_pdata,
+		.platform_data = &i2sv4_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iisv4);
@@ -288,7 +296,7 @@
 static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s3c64xx_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_ac97_resource),
 	.resource	  = s3c64xx_ac97_resource,
@@ -307,16 +315,3 @@
 	else
 		s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpe;
 }
-
-static u64 s3c_device_audio_dmamask = 0xffffffffUL;
-
-struct platform_device s3c_device_pcm = {
-	.name		  = "s3c24xx-pcm-audio",
-	.id		  = -1,
-	.dev              = {
-		.dma_mask = &s3c_device_audio_dmamask,
-		.coherent_dma_mask = 0xffffffffUL
-	}
-};
-EXPORT_SYMBOL(s3c_device_pcm);
-
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index e7d03ab..372ea68 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -740,7 +740,7 @@
 	/* Set all DMA configuration to be DMA, not SDMA */
 	writel(0xffffff, S3C_SYSREG(0x110));
 
-	/* Register standard DMA controlers */
+	/* Register standard DMA controllers */
 	s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
 	s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
 
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 77488fa..e85192a 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -283,7 +283,7 @@
 	&s3c_device_fb,
 	&s3c_device_ohci,
 	&s3c_device_usb_hsotg,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c64xx_device_iisv4,
 	&samsung_device_keypad,
 
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
index 3462197..8719dc4 100644
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ b/arch/arm/mach-s5p6442/dev-audio.c
@@ -29,7 +29,7 @@
 		base = S5P6442_GPC1(0);
 		break;
 
-	case -1:
+	case 0:
 		base = S5P6442_GPC0(0);
 		break;
 
@@ -42,8 +42,19 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static const char *rclksrc_v35[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
+static struct s3c_audio_pdata i2sv35_pdata = {
 	.cfg_gpio = s5p6442_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc_v35,
+		},
+	},
 };
 
 static struct resource s5p6442_iis0_resource[] = {
@@ -62,15 +73,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5p6442_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5p6442_iis0_resource),
 	.resource	  = s5p6442_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv35_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "sclk_audio",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5p6442_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -93,12 +123,12 @@
 };
 
 struct platform_device s5p6442_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5p6442_iis1_resource),
 	.resource	  = s5p6442_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index e4883dc..409c5fc 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -261,7 +261,7 @@
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 25),
 	}, {
-		.name		= "i2s_v40",
+		.name		= "iis",
 		.id		= 0,
 		.parent		= &clk_pclk_low.clk,
 		.enable		= s5p64x0_pclk_ctrl,
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 7dbf3c9..7fc6abd 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -256,7 +256,7 @@
 		.ctrlbit	= (1 << 22),
 	}, {
 		.name		= "iis",
-		.id		= -1,
+		.id		= 0,
 		.parent		= &clk_pclk_low.clk,
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 26),
diff --git a/arch/arm/mach-s5p64x0/dev-audio.c b/arch/arm/mach-s5p64x0/dev-audio.c
index 396bacc..14f89e7 100644
--- a/arch/arm/mach-s5p64x0/dev-audio.c
+++ b/arch/arm/mach-s5p64x0/dev-audio.c
@@ -19,15 +19,19 @@
 #include <mach/dma.h>
 #include <mach/irqs.h>
 
-static int s5p6440_cfg_i2s(struct platform_device *pdev)
+static const char *rclksrc[] = {
+	[0] = "iis",
+	[1] = "sclk_audio2",
+};
+
+static int s5p64x0_cfg_i2s(struct platform_device *pdev)
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
-	case -1:
+	case 0:
 		s3c_gpio_cfgpin_range(S5P6440_GPR(4), 5, S3C_GPIO_SFN(5));
 		s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(5));
 		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -36,31 +40,14 @@
 	return 0;
 }
 
-static int s5p6450_cfg_i2s(struct platform_device *pdev)
-{
-	/* configure GPIO for i2s port */
-	switch (pdev->id) {
-	case -1:
-		s3c_gpio_cfgpin(S5P6450_GPB(4), S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6450_GPR(4), 5, S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6450_GPR(13), 2, S3C_GPIO_SFN(5));
-
-		break;
-
-	default:
-		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct s3c_audio_pdata s5p6440_i2s_pdata = {
-	.cfg_gpio = s5p6440_cfg_i2s,
-};
-
-static struct s3c_audio_pdata s5p6450_i2s_pdata = {
-	.cfg_gpio = s5p6450_cfg_i2s,
+static struct s3c_audio_pdata s5p64x0_i2s_pdata = {
+	.cfg_gpio = s5p64x0_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 static struct resource s5p64x0_iis0_resource[] = {
@@ -82,22 +69,22 @@
 };
 
 struct platform_device s5p6440_device_iis = {
-	.name		= "s3c64xx-iis-v4",
-	.id		= -1,
+	.name		= "samsung-i2s",
+	.id		= 0,
 	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
 	.resource	= s5p64x0_iis0_resource,
 	.dev = {
-		.platform_data = &s5p6440_i2s_pdata,
+		.platform_data = &s5p64x0_i2s_pdata,
 	},
 };
 
 struct platform_device s5p6450_device_iis0 = {
-	.name		= "s3c64xx-iis-v4",
-	.id		= -1,
+	.name		= "samsung-i2s",
+	.id		= 0,
 	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
 	.resource	= s5p64x0_iis0_resource,
 	.dev = {
-		.platform_data = &s5p6450_i2s_pdata,
+		.platform_data = &s5p64x0_i2s_pdata,
 	},
 };
 
diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c
index 564e195..ab2d271 100644
--- a/arch/arm/mach-s5pc100/dev-audio.c
+++ b/arch/arm/mach-s5pc100/dev-audio.c
@@ -23,17 +23,14 @@
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
+	case 0: /* Dedicated pins */
+		break;
 	case 1:
 		s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(2));
 		break;
-
 	case 2:
 		s3c_gpio_cfgpin_range(S5PC100_GPG3(0), 5, S3C_GPIO_SFN(4));
 		break;
-
-	case -1: /* Dedicated pins */
-		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -42,8 +39,20 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static const char *rclksrc_v5[] = {
+	[0] = "iis",
+	[1] = "i2sclkd2",
+};
+
+static struct s3c_audio_pdata i2sv5_pdata = {
 	.cfg_gpio = s5pc100_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc_v5,
+		},
+	},
 };
 
 static struct resource s5pc100_iis0_resource[] = {
@@ -62,15 +71,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5pc100_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis0_resource),
 	.resource	  = s5pc100_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "sclk_audio",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pc100_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -93,12 +121,12 @@
 };
 
 struct platform_device s5pc100_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis1_resource),
 	.resource	  = s5pc100_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -121,12 +149,12 @@
 };
 
 struct platform_device s5pc100_device_iis2 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 2,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis2_resource),
 	.resource	  = s5pc100_iis2_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -253,7 +281,7 @@
 static u64 s5pc100_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s5pc100_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s5pc100_ac97_resource),
 	.resource	  = s5pc100_ac97_resource,
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 18b405d..dd192a2 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -96,6 +96,7 @@
 
 /* I2C0 */
 static struct i2c_board_info i2c_devs0[] __initdata = {
+	{I2C_BOARD_INFO("wm8580", 0x1b),},
 };
 
 /* I2C1 */
@@ -190,6 +191,7 @@
 	&s3c_device_ts,
 	&s3c_device_wdt,
 	&smdkc100_lcd_powerdev,
+	&samsung_asoc_dma,
 	&s5pc100_device_iis0,
 	&samsung_device_keypad,
 	&s5pc100_device_ac97,
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 019c3a6..b774ff1 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -467,20 +467,20 @@
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1<<21),
 	}, {
-		.name		= "i2s_v50",
+		.name		= "iis",
 		.id		= 0,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1<<4),
 	}, {
-		.name		= "i2s_v32",
-		.id		= 0,
+		.name		= "iis",
+		.id		= 1,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 5),
 	}, {
-		.name		= "i2s_v32",
-		.id		= 1,
+		.name		= "iis",
+		.id		= 2,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 6),
diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c
index 1303fcb..8d58f19 100644
--- a/arch/arm/mach-s5pv210/dev-audio.c
+++ b/arch/arm/mach-s5pv210/dev-audio.c
@@ -19,22 +19,24 @@
 #include <mach/dma.h>
 #include <mach/irqs.h>
 
+static const char *rclksrc[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
 static int s5pv210_cfg_i2s(struct platform_device *pdev)
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
+		break;
 	case 1:
 		s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(2));
 		break;
-
 	case 2:
 		s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 5, S3C_GPIO_SFN(4));
 		break;
-
-	case -1:
-		s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
-		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -43,8 +45,15 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static struct s3c_audio_pdata i2sv5_pdata = {
 	.cfg_gpio = s5pv210_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 static struct resource s5pv210_iis0_resource[] = {
@@ -63,15 +72,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5pv210_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis0_resource),
 	.resource	  = s5pv210_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "audio-bus",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pv210_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -94,12 +122,12 @@
 };
 
 struct platform_device s5pv210_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis1_resource),
 	.resource	  = s5pv210_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -122,12 +150,12 @@
 };
 
 struct platform_device s5pv210_device_iis2 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 2,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis2_resource),
 	.resource	  = s5pv210_iis2_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -283,7 +311,7 @@
 static u64 s5pv210_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s5pv210_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s5pv210_ac97_resource),
 	.resource	  = s5pv210_ac97_resource,
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig
index 1150b36..d64efe0 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-s5pv310/Kconfig
@@ -11,6 +11,7 @@
 
 config CPU_S5PV310
 	bool
+	select S3C_PL330_DMA
 	help
 	  Enable S5PV310 CPU support
 
diff --git a/arch/arm/mach-s5pv310/Makefile b/arch/arm/mach-s5pv310/Makefile
index 84afc64..61e3cb6 100644
--- a/arch/arm/mach-s5pv310/Makefile
+++ b/arch/arm/mach-s5pv310/Makefile
@@ -13,7 +13,7 @@
 # Core support for S5PV310 system
 
 obj-$(CONFIG_CPU_S5PV310)	+= cpu.o init.o clock.o irq-combiner.o
-obj-$(CONFIG_CPU_S5PV310)	+= setup-i2c0.o time.o gpiolib.o irq-eint.o
+obj-$(CONFIG_CPU_S5PV310)	+= setup-i2c0.o time.o gpiolib.o irq-eint.o dma.o
 
 obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
 obj-$(CONFIG_LOCAL_TIMERS)	+= localtimer.o
@@ -27,6 +27,7 @@
 
 # device support
 
+obj-y += dev-audio.o
 obj-$(CONFIG_S5PV310_SETUP_I2C1)	+= setup-i2c1.o
 obj-$(CONFIG_S5PV310_SETUP_I2C2)	+= setup-i2c2.o
 obj-$(CONFIG_S5PV310_SETUP_I2C3)	+= setup-i2c3.o
diff --git a/arch/arm/mach-s5pv310/dev-audio.c b/arch/arm/mach-s5pv310/dev-audio.c
new file mode 100644
index 0000000..a196424
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-audio.c
@@ -0,0 +1,364 @@
+/* linux/arch/arm/mach-s5pv310/dev-audio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/audio.h>
+
+#include <mach/map.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+
+static const char *rclksrc[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
+static int s5pv310_cfg_i2s(struct platform_device *pdev)
+{
+	/* configure GPIO for i2s port */
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 7, S3C_GPIO_SFN(2));
+		break;
+	case 1:
+		s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(2));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(4));
+		break;
+	default:
+		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata i2sv5_pdata = {
+	.cfg_gpio = s5pv310_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc,
+		},
+	},
+};
+
+static struct resource s5pv310_i2s0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S0,
+		.end	= S5PV310_PA_I2S0 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S0_TX,
+		.end	= DMACH_I2S0_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S0_RX,
+		.end	= DMACH_I2S0_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[3] = {
+		.start	= DMACH_I2S0S_TX,
+		.end	= DMACH_I2S0S_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s0 = {
+	.name = "samsung-i2s",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s0_resource),
+	.resource = s5pv310_i2s0_resource,
+	.dev = {
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "sclk_i2s",
+	[1] = "no_such_clock",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pv310_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_NO_MUXPSR,
+			.src_clk = rclksrc_v3,
+		},
+	},
+};
+
+static struct resource s5pv310_i2s1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S1,
+		.end	= S5PV310_PA_I2S1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S1_TX,
+		.end	= DMACH_I2S1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S1_RX,
+		.end	= DMACH_I2S1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s1 = {
+	.name = "samsung-i2s",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s1_resource),
+	.resource = s5pv310_i2s1_resource,
+	.dev = {
+		.platform_data = &i2sv3_pdata,
+	},
+};
+
+static struct resource s5pv310_i2s2_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S2,
+		.end	= S5PV310_PA_I2S2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S2_TX,
+		.end	= DMACH_I2S2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S2_RX,
+		.end	= DMACH_I2S2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s2 = {
+	.name = "samsung-i2s",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s2_resource),
+	.resource = s5pv310_i2s2_resource,
+	.dev = {
+		.platform_data = &i2sv3_pdata,
+	},
+};
+
+/* PCM Controller platform_devices */
+
+static int s5pv310_pcm_cfg_gpio(struct platform_device *pdev)
+{
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 5, S3C_GPIO_SFN(3));
+		break;
+	case 1:
+		s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(3));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(3));
+		break;
+	default:
+		printk(KERN_DEBUG "Invalid PCM Controller number!");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata s3c_pcm_pdata = {
+	.cfg_gpio = s5pv310_pcm_cfg_gpio,
+};
+
+static struct resource s5pv310_pcm0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM0,
+		.end	= S5PV310_PA_PCM0 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM0_TX,
+		.end	= DMACH_PCM0_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM0_RX,
+		.end	= DMACH_PCM0_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm0 = {
+	.name = "samsung-pcm",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm0_resource),
+	.resource = s5pv310_pcm0_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+static struct resource s5pv310_pcm1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM1,
+		.end	= S5PV310_PA_PCM1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM1_TX,
+		.end	= DMACH_PCM1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM1_RX,
+		.end	= DMACH_PCM1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm1 = {
+	.name = "samsung-pcm",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm1_resource),
+	.resource = s5pv310_pcm1_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+static struct resource s5pv310_pcm2_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM2,
+		.end	= S5PV310_PA_PCM2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM2_TX,
+		.end	= DMACH_PCM2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM2_RX,
+		.end	= DMACH_PCM2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm2 = {
+	.name = "samsung-pcm",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm2_resource),
+	.resource = s5pv310_pcm2_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+/* AC97 Controller platform devices */
+
+static int s5pv310_ac97_cfg_gpio(struct platform_device *pdev)
+{
+	return s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(4));
+}
+
+static struct resource s5pv310_ac97_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_AC97,
+		.end	= S5PV310_PA_AC97 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_AC97_PCMOUT,
+		.end	= DMACH_AC97_PCMOUT,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_AC97_PCMIN,
+		.end	= DMACH_AC97_PCMIN,
+		.flags	= IORESOURCE_DMA,
+	},
+	[3] = {
+		.start	= DMACH_AC97_MICIN,
+		.end	= DMACH_AC97_MICIN,
+		.flags	= IORESOURCE_DMA,
+	},
+	[4] = {
+		.start	= IRQ_AC97,
+		.end	= IRQ_AC97,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_audio_pdata s3c_ac97_pdata = {
+	.cfg_gpio = s5pv310_ac97_cfg_gpio,
+};
+
+static u64 s5pv310_ac97_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv310_device_ac97 = {
+	.name = "samsung-ac97",
+	.id = -1,
+	.num_resources = ARRAY_SIZE(s5pv310_ac97_resource),
+	.resource = s5pv310_ac97_resource,
+	.dev = {
+		.platform_data = &s3c_ac97_pdata,
+		.dma_mask = &s5pv310_ac97_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+};
+
+/* S/PDIF Controller platform_device */
+
+static int s5pv310_spdif_cfg_gpio(struct platform_device *pdev)
+{
+	s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 2, S3C_GPIO_SFN(3));
+
+	return 0;
+}
+
+static struct resource s5pv310_spdif_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_SPDIF,
+		.end	= S5PV310_PA_SPDIF + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_SPDIF,
+		.end	= DMACH_SPDIF,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+static struct s3c_audio_pdata samsung_spdif_pdata = {
+	.cfg_gpio = s5pv310_spdif_cfg_gpio,
+};
+
+static u64 s5pv310_spdif_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv310_device_spdif = {
+	.name = "samsung-spdif",
+	.id = -1,
+	.num_resources = ARRAY_SIZE(s5pv310_spdif_resource),
+	.resource = s5pv310_spdif_resource,
+	.dev = {
+		.platform_data = &samsung_spdif_pdata,
+		.dma_mask = &s5pv310_spdif_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+};
diff --git a/arch/arm/mach-s5pv310/dma.c b/arch/arm/mach-s5pv310/dma.c
new file mode 100644
index 0000000..20066c7
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dma.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/devs.h>
+#include <plat/irqs.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+static u64 dma_dmamask = DMA_BIT_MASK(32);
+
+static struct resource s5pv310_pdma0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PDMA0,
+		.end	= S5PV310_PA_PDMA0 + SZ_4K,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_PDMA0,
+		.end	= IRQ_PDMA0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_pl330_platdata s5pv310_pdma0_pdata = {
+	.peri = {
+		[0] = DMACH_PCM0_RX,
+		[1] = DMACH_PCM0_TX,
+		[2] = DMACH_PCM2_RX,
+		[3] = DMACH_PCM2_TX,
+		[4] = DMACH_MSM_REQ0,
+		[5] = DMACH_MSM_REQ2,
+		[6] = DMACH_SPI0_RX,
+		[7] = DMACH_SPI0_TX,
+		[8] = DMACH_SPI2_RX,
+		[9] = DMACH_SPI2_TX,
+		[10] = DMACH_I2S0S_TX,
+		[11] = DMACH_I2S0_RX,
+		[12] = DMACH_I2S0_TX,
+		[13] = DMACH_I2S2_RX,
+		[14] = DMACH_I2S2_TX,
+		[15] = DMACH_UART0_RX,
+		[16] = DMACH_UART0_TX,
+		[17] = DMACH_UART2_RX,
+		[18] = DMACH_UART2_TX,
+		[19] = DMACH_UART4_RX,
+		[20] = DMACH_UART4_TX,
+		[21] = DMACH_SLIMBUS0_RX,
+		[22] = DMACH_SLIMBUS0_TX,
+		[23] = DMACH_SLIMBUS2_RX,
+		[24] = DMACH_SLIMBUS2_TX,
+		[25] = DMACH_SLIMBUS4_RX,
+		[26] = DMACH_SLIMBUS4_TX,
+		[27] = DMACH_AC97_MICIN,
+		[28] = DMACH_AC97_PCMIN,
+		[29] = DMACH_AC97_PCMOUT,
+		[30] = DMACH_MAX,
+		[31] = DMACH_MAX,
+	},
+};
+
+static struct platform_device s5pv310_device_pdma0 = {
+	.name		= "s3c-pl330",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s5pv310_pdma0_resource),
+	.resource	= s5pv310_pdma0_resource,
+	.dev		= {
+		.dma_mask = &dma_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+		.platform_data = &s5pv310_pdma0_pdata,
+	},
+};
+
+static struct resource s5pv310_pdma1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PDMA1,
+		.end	= S5PV310_PA_PDMA1 + SZ_4K,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_PDMA1,
+		.end	= IRQ_PDMA1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_pl330_platdata s5pv310_pdma1_pdata = {
+	.peri = {
+		[0] = DMACH_PCM0_RX,
+		[1] = DMACH_PCM0_TX,
+		[2] = DMACH_PCM1_RX,
+		[3] = DMACH_PCM1_TX,
+		[4] = DMACH_MSM_REQ1,
+		[5] = DMACH_MSM_REQ3,
+		[6] = DMACH_SPI1_RX,
+		[7] = DMACH_SPI1_TX,
+		[8] = DMACH_I2S0S_TX,
+		[9] = DMACH_I2S0_RX,
+		[10] = DMACH_I2S0_TX,
+		[11] = DMACH_I2S1_RX,
+		[12] = DMACH_I2S1_TX,
+		[13] = DMACH_UART0_RX,
+		[14] = DMACH_UART0_TX,
+		[15] = DMACH_UART1_RX,
+		[16] = DMACH_UART1_TX,
+		[17] = DMACH_UART3_RX,
+		[18] = DMACH_UART3_TX,
+		[19] = DMACH_SLIMBUS1_RX,
+		[20] = DMACH_SLIMBUS1_TX,
+		[21] = DMACH_SLIMBUS3_RX,
+		[22] = DMACH_SLIMBUS3_TX,
+		[23] = DMACH_SLIMBUS5_RX,
+		[24] = DMACH_SLIMBUS5_TX,
+		[25] = DMACH_SLIMBUS0AUX_RX,
+		[26] = DMACH_SLIMBUS0AUX_TX,
+		[27] = DMACH_SPDIF,
+		[28] = DMACH_MAX,
+		[29] = DMACH_MAX,
+		[30] = DMACH_MAX,
+		[31] = DMACH_MAX,
+	},
+};
+
+static struct platform_device s5pv310_device_pdma1 = {
+	.name		= "s3c-pl330",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(s5pv310_pdma1_resource),
+	.resource	= s5pv310_pdma1_resource,
+	.dev		= {
+		.dma_mask = &dma_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+		.platform_data = &s5pv310_pdma1_pdata,
+	},
+};
+
+static struct platform_device *s5pv310_dmacs[] __initdata = {
+	&s5pv310_device_pdma0,
+	&s5pv310_device_pdma1,
+};
+
+static int __init s5pv310_dma_init(void)
+{
+	platform_add_devices(s5pv310_dmacs, ARRAY_SIZE(s5pv310_dmacs));
+
+	return 0;
+}
+arch_initcall(s5pv310_dma_init);
diff --git a/arch/arm/mach-s5pv310/include/mach/dma.h b/arch/arm/mach-s5pv310/include/mach/dma.h
new file mode 100644
index 0000000..81209eb
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/dma.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* This platform uses the common S3C DMA API driver for PL330 */
+#include <plat/s3c-dma-pl330.h>
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 99e7dad..3c05c58 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -54,6 +54,9 @@
 #define COMBINER_GROUP(x)	((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(64))
 #define COMBINER_IRQ(x, y)	(COMBINER_GROUP(x) + y)
 
+#define IRQ_PDMA0		COMBINER_IRQ(21, 0)
+#define IRQ_PDMA1		COMBINER_IRQ(21, 1)
+
 #define IRQ_TIMER0_VIC		COMBINER_IRQ(22, 0)
 #define IRQ_TIMER1_VIC		COMBINER_IRQ(22, 1)
 #define IRQ_TIMER2_VIC		COMBINER_IRQ(22, 2)
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 7acf4e7..5399446 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -52,6 +52,11 @@
 #define S5PV310_PA_GIC_DIST		(0x10501000)
 #define S5PV310_PA_L2CC			(0x10502000)
 
+/* DMA */
+#define S5PV310_PA_MDMA		0x10810000
+#define S5PV310_PA_PDMA0	0x12680000
+#define S5PV310_PA_PDMA1	0x12690000
+
 #define S5PV310_PA_GPIO1		(0x11400000)
 #define S5PV310_PA_GPIO2		(0x11000000)
 #define S5PV310_PA_GPIO3		(0x03860000)
@@ -60,6 +65,22 @@
 
 #define S5PV310_PA_SROMC		(0x12570000)
 
+/* S/PDIF */
+#define S5PV310_PA_SPDIF	0xE1100000
+
+/* I2S */
+#define S5PV310_PA_I2S0		0x03830000
+#define S5PV310_PA_I2S1		0xE3100000
+#define S5PV310_PA_I2S2		0xE2A00000
+
+/* PCM */
+#define S5PV310_PA_PCM0		0x03840000
+#define S5PV310_PA_PCM1		0x13980000
+#define S5PV310_PA_PCM2		0x13990000
+
+/* AC97 */
+#define S5PV310_PA_AC97		0x139A0000
+
 #define S5PV310_PA_UART			(0x13800000)
 
 #define S5P_PA_UART(x)			(S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index c83fdc8..ab9fc44 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -120,7 +120,7 @@
 	return virt_to_phys(sp);
 }
 
-static struct platform_suspend_ops sa11x0_pm_ops = {
+static const struct platform_suspend_ops sa11x0_pm_ops = {
 	.enter		= sa11x0_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 6329333..4d1b4c5 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -5,26 +5,27 @@
 config ARCH_SH7367
 	bool "SH-Mobile G3 (SH7367)"
 	select CPU_V6
-	select HAVE_CLK
-	select CLKDEV_LOOKUP
 	select SH_CLK_CPG
-	select GENERIC_CLOCKEVENTS
+	select ARCH_WANT_OPTIONAL_GPIOLIB
 
 config ARCH_SH7377
 	bool "SH-Mobile G4 (SH7377)"
 	select CPU_V7
-	select HAVE_CLK
-	select CLKDEV_LOOKUP
 	select SH_CLK_CPG
-	select GENERIC_CLOCKEVENTS
+	select ARCH_WANT_OPTIONAL_GPIOLIB
 
 config ARCH_SH7372
 	bool "SH-Mobile AP4 (SH7372)"
 	select CPU_V7
-	select HAVE_CLK
-	select CLKDEV_LOOKUP
 	select SH_CLK_CPG
-	select GENERIC_CLOCKEVENTS
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+
+config ARCH_SH73A0
+	bool "SH-Mobile AG5 (R8A73A00)"
+	select CPU_V7
+	select SH_CLK_CPG
+	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select ARM_GIC
 
 comment "SH-Mobile Board Type"
 
@@ -57,6 +58,15 @@
 
 endchoice
 
+config MACH_AG5EVM
+	bool "AG5EVM board"
+	depends on ARCH_SH73A0
+
+config MACH_MACKEREL
+	bool "mackerel board"
+	depends on ARCH_SH7372
+	select ARCH_REQUIRE_GPIOLIB
+
 comment "SH-Mobile System Configuration"
 
 menu "Memory configuration"
@@ -64,8 +74,8 @@
 config MEMORY_START
 	hex "Physical memory start address"
 	default "0x50000000" if MACH_G3EVM
-	default "0x40000000" if MACH_G4EVM
-	default "0x40000000" if MACH_AP4EVB
+	default "0x40000000" if MACH_G4EVM || MACH_AP4EVB || MACH_AG5EVM || \
+				MACH_MACKEREL
 	default "0x00000000"
 	---help---
 	  Tweak this only when porting to a new machine which does not
@@ -76,7 +86,8 @@
 	hex "Physical memory size"
 	default "0x08000000" if MACH_G3EVM
 	default "0x08000000" if MACH_G4EVM
-	default "0x10000000" if MACH_AP4EVB
+	default "0x20000000" if MACH_AG5EVM
+	default "0x10000000" if MACH_AP4EVB || MACH_MACKEREL
 	default "0x04000000"
 	help
 	  This sets the default memory size assumed by your kernel. It can
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index ae416fe..e2507f6 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -9,14 +9,34 @@
 obj-$(CONFIG_ARCH_SH7367)	+= setup-sh7367.o clock-sh7367.o intc-sh7367.o
 obj-$(CONFIG_ARCH_SH7377)	+= setup-sh7377.o clock-sh7377.o intc-sh7377.o
 obj-$(CONFIG_ARCH_SH7372)	+= setup-sh7372.o clock-sh7372.o intc-sh7372.o
+obj-$(CONFIG_ARCH_SH73A0)	+= setup-sh73a0.o clock-sh73a0.o intc-sh73a0.o
+
+# SMP objects
+smp-y				:= platsmp.o headsmp.o
+smp-$(CONFIG_HOTPLUG_CPU)	+= hotplug.o
+smp-$(CONFIG_LOCAL_TIMERS)	+= localtimer.o
+smp-$(CONFIG_ARCH_SH73A0)	+= smp-sh73a0.o
 
 # Pinmux setup
-pfc-$(CONFIG_ARCH_SH7367)	:= pfc-sh7367.o
-pfc-$(CONFIG_ARCH_SH7377)	:= pfc-sh7377.o
-pfc-$(CONFIG_ARCH_SH7372)	:= pfc-sh7372.o
-obj-$(CONFIG_GENERIC_GPIO)	+= $(pfc-y)
+pfc-y				:=
+pfc-$(CONFIG_ARCH_SH7367)	+= pfc-sh7367.o
+pfc-$(CONFIG_ARCH_SH7377)	+= pfc-sh7377.o
+pfc-$(CONFIG_ARCH_SH7372)	+= pfc-sh7372.o
+pfc-$(CONFIG_ARCH_SH73A0)	+= pfc-sh73a0.o
+
+# IRQ objects
+obj-$(CONFIG_ARCH_SH7367)	+= entry-intc.o
+obj-$(CONFIG_ARCH_SH7377)	+= entry-intc.o
+obj-$(CONFIG_ARCH_SH7372)	+= entry-intc.o
+obj-$(CONFIG_ARCH_SH73A0)	+= entry-gic.o
 
 # Board objects
 obj-$(CONFIG_MACH_G3EVM)	+= board-g3evm.o
 obj-$(CONFIG_MACH_G4EVM)	+= board-g4evm.o
 obj-$(CONFIG_MACH_AP4EVB)	+= board-ap4evb.o
+obj-$(CONFIG_MACH_AG5EVM)	+= board-ag5evm.o
+obj-$(CONFIG_MACH_MACKEREL)	+= board-mackerel.o
+
+# Framework support
+obj-$(CONFIG_SMP)		+= $(smp-y)
+obj-$(CONFIG_GENERIC_GPIO)	+= $(pfc-y)
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
new file mode 100644
index 0000000..c18a740
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -0,0 +1,315 @@
+/*
+ * arch/arm/mach-shmobile/board-ag5evm.c
+ *
+ * Copyright (C) 2010  Takashi Yoshii <yoshii.takashi.zj@renesas.com>
+ * Copyright (C) 2009  Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/serial_sci.h>
+#include <linux/smsc911x.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/input/sh_keysc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mmcif.h>
+
+#include <sound/sh_fsi.h>
+
+#include <mach/hardware.h>
+#include <mach/sh73a0.h>
+#include <mach/common.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/hardware/gic.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/traps.h>
+
+static struct resource smsc9220_resources[] = {
+	[0] = {
+		.start		= 0x14000000,
+		.end		= 0x14000000 + SZ_64K - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start		= gic_spi(33), /* PINT1 */
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct smsc911x_platform_config smsc9220_platdata = {
+	.flags		= SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
+	.phy_interface	= PHY_INTERFACE_MODE_MII,
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
+};
+
+static struct platform_device eth_device = {
+	.name		= "smsc911x",
+	.id		= 0,
+	.dev  = {
+		.platform_data = &smsc9220_platdata,
+	},
+	.resource	= smsc9220_resources,
+	.num_resources	= ARRAY_SIZE(smsc9220_resources),
+};
+
+static struct sh_keysc_info keysc_platdata = {
+	.mode		= SH_KEYSC_MODE_6,
+	.scan_timing	= 3,
+	.delay		= 100,
+	.keycodes	= {
+		KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G,
+		KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N,
+		KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
+		KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP,
+		KEY_SPACE, KEY_9, KEY_6, KEY_3, KEY_WAKEUP, KEY_RIGHT, \
+		KEY_COFFEE,
+		KEY_0, KEY_8, KEY_5, KEY_2, KEY_DOWN, KEY_ENTER, KEY_UP,
+		KEY_KPASTERISK, KEY_7, KEY_4, KEY_1, KEY_STOP, KEY_LEFT, \
+		KEY_COMPUTER,
+	},
+};
+
+static struct resource keysc_resources[] = {
+	[0] = {
+		.name	= "KEYSC",
+		.start	= 0xe61b0000,
+		.end	= 0xe61b0098 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(71),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device keysc_device = {
+	.name		= "sh_keysc",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(keysc_resources),
+	.resource	= keysc_resources,
+	.dev		= {
+		.platform_data	= &keysc_platdata,
+	},
+};
+
+/* FSI A */
+static struct sh_fsi_platform_info fsi_info = {
+	.porta_flags = SH_FSI_OUT_SLAVE_MODE	|
+		       SH_FSI_IN_SLAVE_MODE	|
+		       SH_FSI_OFMT(I2S)		|
+		       SH_FSI_IFMT(I2S),
+};
+
+static struct resource fsi_resources[] = {
+	[0] = {
+		.name	= "FSI",
+		.start	= 0xEC230000,
+		.end	= 0xEC230400 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = gic_spi(146),
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device fsi_device = {
+	.name		= "sh_fsi2",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(fsi_resources),
+	.resource	= fsi_resources,
+	.dev	= {
+		.platform_data	= &fsi_info,
+	},
+};
+
+static struct resource sh_mmcif_resources[] = {
+	[0] = {
+		.name	= "MMCIF",
+		.start	= 0xe6bd0000,
+		.end	= 0xe6bd00ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(141),
+		.flags	= IORESOURCE_IRQ,
+	},
+	[2] = {
+		.start	= gic_spi(140),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct sh_mmcif_plat_data sh_mmcif_platdata = {
+	.sup_pclk	= 0,
+	.ocr		= MMC_VDD_165_195,
+	.caps		= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+};
+
+static struct platform_device mmc_device = {
+	.name		= "sh_mmcif",
+	.id		= 0,
+	.dev		= {
+		.dma_mask		= NULL,
+		.coherent_dma_mask	= 0xffffffff,
+		.platform_data		= &sh_mmcif_platdata,
+	},
+	.num_resources	= ARRAY_SIZE(sh_mmcif_resources),
+	.resource	= sh_mmcif_resources,
+};
+
+static struct platform_device *ag5evm_devices[] __initdata = {
+	&eth_device,
+	&keysc_device,
+	&fsi_device,
+	&mmc_device,
+};
+
+static struct map_desc ag5evm_io_desc[] __initdata = {
+	/* create a 1:1 entity map for 0xe6xxxxxx
+	 * used by CPGA, INTC and PFC.
+	 */
+	{
+		.virtual	= 0xe6000000,
+		.pfn		= __phys_to_pfn(0xe6000000),
+		.length		= 256 << 20,
+		.type		= MT_DEVICE_NONSHARED
+	},
+};
+
+static void __init ag5evm_map_io(void)
+{
+	iotable_init(ag5evm_io_desc, ARRAY_SIZE(ag5evm_io_desc));
+
+	/* setup early devices and console here as well */
+	sh73a0_add_early_devices();
+	shmobile_setup_console();
+}
+
+#define PINTC_ADDR	0xe6900000
+#define PINTER0A	(PINTC_ADDR + 0xa0)
+#define PINTCR0A	(PINTC_ADDR + 0xb0)
+
+void __init ag5evm_init_irq(void)
+{
+	sh73a0_init_irq();
+
+	/* setup PINT: enable PINTA2 as active low */
+	__raw_writel(__raw_readl(PINTER0A) | (1<<29), PINTER0A);
+	__raw_writew(__raw_readw(PINTCR0A) | (2<<10), PINTCR0A);
+}
+
+static void __init ag5evm_init(void)
+{
+	sh73a0_pinmux_init();
+
+	/* enable SCIFA2 */
+	gpio_request(GPIO_FN_SCIFA2_TXD1, NULL);
+	gpio_request(GPIO_FN_SCIFA2_RXD1, NULL);
+	gpio_request(GPIO_FN_SCIFA2_RTS1_, NULL);
+	gpio_request(GPIO_FN_SCIFA2_CTS1_, NULL);
+
+	/* enable KEYSC */
+	gpio_request(GPIO_FN_KEYIN0_PU, NULL);
+	gpio_request(GPIO_FN_KEYIN1_PU, NULL);
+	gpio_request(GPIO_FN_KEYIN2_PU, NULL);
+	gpio_request(GPIO_FN_KEYIN3_PU, NULL);
+	gpio_request(GPIO_FN_KEYIN4_PU, NULL);
+	gpio_request(GPIO_FN_KEYIN5_PU, NULL);
+	gpio_request(GPIO_FN_KEYIN6_PU, NULL);
+	gpio_request(GPIO_FN_KEYIN7_PU, NULL);
+	gpio_request(GPIO_FN_KEYOUT0, NULL);
+	gpio_request(GPIO_FN_KEYOUT1, NULL);
+	gpio_request(GPIO_FN_KEYOUT2, NULL);
+	gpio_request(GPIO_FN_KEYOUT3, NULL);
+	gpio_request(GPIO_FN_KEYOUT4, NULL);
+	gpio_request(GPIO_FN_KEYOUT5, NULL);
+	gpio_request(GPIO_FN_PORT59_KEYOUT6, NULL);
+	gpio_request(GPIO_FN_PORT58_KEYOUT7, NULL);
+	gpio_request(GPIO_FN_KEYOUT8, NULL);
+	gpio_request(GPIO_FN_PORT149_KEYOUT9, NULL);
+
+	/* enable I2C channel 2 and 3 */
+	gpio_request(GPIO_FN_PORT236_I2C_SDA2, NULL);
+	gpio_request(GPIO_FN_PORT237_I2C_SCL2, NULL);
+	gpio_request(GPIO_FN_PORT248_I2C_SCL3, NULL);
+	gpio_request(GPIO_FN_PORT249_I2C_SDA3, NULL);
+
+	/* enable MMCIF */
+	gpio_request(GPIO_FN_MMCCLK0, NULL);
+	gpio_request(GPIO_FN_MMCCMD0_PU, NULL);
+	gpio_request(GPIO_FN_MMCD0_0, NULL);
+	gpio_request(GPIO_FN_MMCD0_1, NULL);
+	gpio_request(GPIO_FN_MMCD0_2, NULL);
+	gpio_request(GPIO_FN_MMCD0_3, NULL);
+	gpio_request(GPIO_FN_MMCD0_4, NULL);
+	gpio_request(GPIO_FN_MMCD0_5, NULL);
+	gpio_request(GPIO_FN_MMCD0_6, NULL);
+	gpio_request(GPIO_FN_MMCD0_7, NULL);
+	gpio_request(GPIO_PORT208, NULL); /* Reset */
+	gpio_direction_output(GPIO_PORT208, 1);
+
+	/* enable SMSC911X */
+	gpio_request(GPIO_PORT144, NULL); /* PINTA2 */
+	gpio_direction_input(GPIO_PORT144);
+	gpio_request(GPIO_PORT145, NULL); /* RESET */
+	gpio_direction_output(GPIO_PORT145, 1);
+
+	/* FSI A */
+	gpio_request(GPIO_FN_FSIACK, NULL);
+	gpio_request(GPIO_FN_FSIAILR, NULL);
+	gpio_request(GPIO_FN_FSIAIBT, NULL);
+	gpio_request(GPIO_FN_FSIAISLD, NULL);
+	gpio_request(GPIO_FN_FSIAOSLD, NULL);
+
+#ifdef CONFIG_CACHE_L2X0
+	/* Shared attribute override enable, 64K*8way */
+	l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
+#endif
+	sh73a0_add_standard_devices();
+	platform_add_devices(ag5evm_devices, ARRAY_SIZE(ag5evm_devices));
+}
+
+static void __init ag5evm_timer_init(void)
+{
+	sh73a0_clock_init();
+	shmobile_timer.init();
+	return;
+}
+
+struct sys_timer ag5evm_timer = {
+	.init	= ag5evm_timer_init,
+};
+
+MACHINE_START(AG5EVM, "ag5evm")
+	.map_io		= ag5evm_map_io,
+	.init_irq	= ag5evm_init_irq,
+	.handle_irq	= shmobile_handle_irq_gic,
+	.init_machine	= ag5evm_init,
+	.timer		= &ag5evm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index f92dbd0..3cf0951 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -247,10 +247,7 @@
  */
 static int slot_cn7_get_cd(struct platform_device *pdev)
 {
-	if (gpio_is_valid(GPIO_PORT41))
-		return !gpio_get_value(GPIO_PORT41);
-	else
-		return -ENXIO;
+	return !gpio_get_value(GPIO_PORT41);
 }
 
 /* SH_MMCIF */
@@ -273,6 +270,15 @@
 	},
 };
 
+static struct sh_mmcif_dma sh_mmcif_dma = {
+	.chan_priv_rx	= {
+		.slave_id	= SHDMA_SLAVE_MMCIF_RX,
+	},
+	.chan_priv_tx	= {
+		.slave_id	= SHDMA_SLAVE_MMCIF_TX,
+	},
+};
+
 static struct sh_mmcif_plat_data sh_mmcif_plat = {
 	.sup_pclk	= 0,
 	.ocr		= MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
@@ -280,6 +286,7 @@
 			  MMC_CAP_8_BIT_DATA |
 			  MMC_CAP_NEEDS_POLL,
 	.get_cd		= slot_cn7_get_cd,
+	.dma		= &sh_mmcif_dma,
 };
 
 static struct platform_device sh_mmcif_device = {
@@ -298,6 +305,7 @@
 static struct sh_mobile_sdhi_info sdhi0_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_caps	= MMC_CAP_SDIO_IRQ,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -329,7 +337,7 @@
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
 	.tmio_ocr_mask	= MMC_VDD_165_195,
 	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
-	.tmio_caps	= MMC_CAP_NEEDS_POLL,
+	.tmio_caps	= MMC_CAP_NEEDS_POLL | MMC_CAP_SDIO_IRQ,
 	.get_cd		= slot_cn7_get_cd,
 };
 
@@ -633,9 +641,8 @@
 		return -EIO;
 
 	ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
-	clk_put(fsib_clk);
 	if (ret < 0)
-		return ret;
+		goto fsi_set_rate_end;
 
 	/* FSI DIV setting */
 	ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
@@ -643,10 +650,14 @@
 		/* disable FSI B */
 		if (enable)
 			__fsi_set_round_rate(fsib_clk, fsib_rate, 0);
-		return ret;
+		goto fsi_set_rate_end;
 	}
 
-	return ackmd_bpfmd;
+	ret = ackmd_bpfmd;
+
+fsi_set_rate_end:
+	clk_put(fsib_clk);
+	return ret;
 }
 
 static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
@@ -698,6 +709,10 @@
 	},
 };
 
+static struct platform_device fsi_ak4643_device = {
+	.name		= "sh_fsi2_a_ak4643",
+};
+
 static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
 	.clock_source = LCDC_CLK_EXTERNAL,
 	.ch[0] = {
@@ -920,6 +935,7 @@
 	&sdhi1_device,
 	&usb1_host_device,
 	&fsi_device,
+	&fsi_ak4643_device,
 	&sh_mmcif_device,
 	&lcdc1_device,
 	&lcdc_device,
@@ -1174,7 +1190,7 @@
 	gpio_request(GPIO_FN_OVCN2_1,    NULL);
 
 	/* setup USB phy */
-	__raw_writew(0x8a0a, 0xE6058130);	/* USBCR2 */
+	__raw_writew(0x8a0a, 0xE6058130);	/* USBCR4 */
 
 	/* enable FSI2 port A (ak4643) */
 	gpio_request(GPIO_FN_FSIAIBT,	NULL);
@@ -1348,6 +1364,7 @@
 MACHINE_START(AP4EVB, "ap4evb")
 	.map_io		= ap4evb_map_io,
 	.init_irq	= sh7372_init_irq,
+	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= ap4evb_init,
 	.timer		= &ap4evb_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 3b83d63..686b304 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -367,6 +367,7 @@
 MACHINE_START(G3EVM, "g3evm")
 	.map_io		= g3evm_map_io,
 	.init_irq	= sh7367_init_irq,
+	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= g3evm_init,
 	.timer		= &g3evm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 5b3b582..dee3e92 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -30,6 +30,7 @@
 #include <linux/io.h>
 #include <linux/input.h>
 #include <linux/input/sh_keysc.h>
+#include <linux/mmc/host.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
 #include <linux/gpio.h>
 #include <mach/sh7377.h>
@@ -196,6 +197,10 @@
 };
 
 /* SDHI */
+static struct sh_mobile_sdhi_info sdhi0_info = {
+	.tmio_caps	= MMC_CAP_SDIO_IRQ,
+};
+
 static struct resource sdhi0_resources[] = {
 	[0] = {
 		.name	= "SDHI0",
@@ -214,6 +219,13 @@
 	.num_resources  = ARRAY_SIZE(sdhi0_resources),
 	.resource       = sdhi0_resources,
 	.id             = 0,
+	.dev	= {
+		.platform_data	= &sdhi0_info,
+	},
+};
+
+static struct sh_mobile_sdhi_info sdhi1_info = {
+	.tmio_caps	= MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -234,6 +246,9 @@
 	.num_resources  = ARRAY_SIZE(sdhi1_resources),
 	.resource       = sdhi1_resources,
 	.id             = 1,
+	.dev	= {
+		.platform_data	= &sdhi1_info,
+	},
 };
 
 static struct platform_device *g4evm_devices[] __initdata = {
@@ -394,6 +409,7 @@
 MACHINE_START(G4EVM, "g4evm")
 	.map_io		= g4evm_map_io,
 	.init_irq	= sh7377_init_irq,
+	.handle_irq	= shmobile_handle_irq_intc,
 	.init_machine	= g4evm_init,
 	.timer		= &g4evm_timer,
 MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
new file mode 100644
index 0000000..7b15d21
--- /dev/null
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -0,0 +1,1221 @@
+/*
+ * mackerel board support
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on ap4evb
+ * Copyright (C) 2010  Magnus Damm
+ * Copyright (C) 2008  Yoshihiro Shimoda
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mmcif.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/smsc911x.h>
+#include <linux/sh_intc.h>
+#include <linux/tca6416_keypad.h>
+#include <linux/usb/r8a66597.h>
+
+#include <video/sh_mobile_hdmi.h>
+#include <video/sh_mobile_lcdc.h>
+#include <media/sh_mobile_ceu.h>
+#include <media/soc_camera.h>
+#include <media/soc_camera_platform.h>
+#include <sound/sh_fsi.h>
+
+#include <mach/common.h>
+#include <mach/sh7372.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <asm/mach/map.h>
+#include <asm/mach-types.h>
+
+/*
+ * Address	Interface		BusWidth	note
+ * ------------------------------------------------------------------
+ * 0x0000_0000	NOR Flash ROM (MCP)	16bit		SW7 : bit1 = ON
+ * 0x0800_0000	user area		-
+ * 0x1000_0000	NOR Flash ROM (MCP)	16bit		SW7 : bit1 = OFF
+ * 0x1400_0000	Ether (LAN9220)		16bit
+ * 0x1600_0000	user area		-		cannot use with NAND
+ * 0x1800_0000	user area		-
+ * 0x1A00_0000	-
+ * 0x4000_0000	LPDDR2-SDRAM (POP)	32bit
+ */
+
+/*
+ * CPU mode
+ *
+ * SW4                                     | Boot Area| Master   | Remarks
+ *  1  | 2   | 3   | 4   | 5   | 6   | 8   |          | Processor|
+ * ----+-----+-----+-----+-----+-----+-----+----------+----------+--------------
+ * ON  | ON  | OFF | ON  | ON  | OFF | OFF | External | System   | External ROM
+ * ON  | ON  | ON  | ON  | ON  | OFF | OFF | External | System   | ROM Debug
+ * ON  | ON  | X   | ON  | OFF | OFF | OFF | Built-in | System   | ROM Debug
+ * X   | OFF | X   | X   | X   | X   | OFF | Built-in | System   | MaskROM
+ * OFF | X   | X   | X   | X   | X   | OFF | Built-in | System   | MaskROM
+ * X   | X   | X   | OFF | X   | X   | OFF | Built-in | System   | MaskROM
+ * OFF | ON  | OFF | X   | X   | OFF | ON  | External | System   | Standalone
+ * ON  | OFF | OFF | X   | X   | OFF | ON  | External | Realtime | Standalone
+*/
+
+/*
+ * NOR Flash ROM
+ *
+ *  SW1  |     SW2    | SW7  | NOR Flash ROM
+ *  bit1 | bit1  bit2 | bit1 | Memory allocation
+ * ------+------------+------+------------------
+ *  OFF  | ON     OFF | ON   |    Area 0
+ *  OFF  | ON     OFF | OFF  |    Area 4
+ */
+
+/*
+ * SMSC 9220
+ *
+ *  SW1		SMSC 9220
+ * -----------------------
+ *  ON		access disable
+ *  OFF		access enable
+ */
+
+/*
+ * NAND Flash ROM
+ *
+ *  SW1  |     SW2    | SW7  | NAND Flash ROM
+ *  bit1 | bit1  bit2 | bit2 | Memory allocation
+ * ------+------------+------+------------------
+ *  OFF  | ON     OFF | ON   |    FCE 0
+ *  OFF  | ON     OFF | OFF  |    FCE 1
+ */
+
+/*
+ * External interrupt pin settings
+ *
+ * IRQX  | pin setting        | device             | level
+ * ------+--------------------+--------------------+-------
+ * IRQ0  | ICR1A.IRQ0SA=0010  | SDHI2 card detect  | Low
+ * IRQ6  | ICR1A.IRQ6SA=0011  | Ether(LAN9220)     | High
+ * IRQ7  | ICR1A.IRQ7SA=0010  | LCD Tuch Panel     | Low
+ * IRQ8  | ICR2A.IRQ8SA=0010  | MMC/SD card detect | Low
+ * IRQ9  | ICR2A.IRQ9SA=0010  | KEY(TCA6408)       | Low
+ * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345)    | High
+ * IRQ22 | ICR4A.IRQ22SA=0011 | Sensor(AK8975)     | High
+ */
+
+/*
+ * USB
+ *
+ * USB0 : CN22 : Function
+ * USB1 : CN31 : Function/Host *1
+ *
+ * J30 (for CN31) *1
+ * ----------+---------------+-------------
+ * 1-2 short | VBUS 5V       | Host
+ * open      | external VBUS | Function
+ *
+ * *1
+ * CN31 is used as Host in Linux.
+ */
+
+/*
+ * SDHI0 (CN12)
+ *
+ * SW56 : OFF
+ *
+ */
+
+/* MMC /SDHI1 (CN7)
+ *
+ * I/O voltage : 1.8v
+ *
+ * Power voltage : 1.8v or 3.3v
+ *  J22 : select power voltage *1
+ *	1-2 pin : 1.8v
+ *	2-3 pin : 3.3v
+ *
+ * *1
+ * Please change J22 depends the card to be used.
+ * MMC's OCR field set to support either voltage for the card inserted.
+ *
+ *	SW1	|	SW33
+ *		| bit1 | bit2 | bit3 | bit4
+ * -------------+------+------+------+-------
+ * MMC0	  OFF	|  OFF |  ON  |  ON  |  X
+ * MMC1	  ON	|  OFF |  ON  |  X   | ON
+ * SDHI1  OFF	|  ON  |   X  |  OFF | ON
+ *
+ */
+
+/*
+ * SDHI2 (CN23)
+ *
+ * microSD card sloct
+ *
+ */
+
+/*
+ * FIXME !!
+ *
+ * gpio_no_direction
+ * are quick_hack.
+ *
+ * current gpio frame work doesn't have
+ * the method to control only pull up/down/free.
+ * this function should be replaced by correct gpio function
+ */
+static void __init gpio_no_direction(u32 addr)
+{
+	__raw_writeb(0x00, addr);
+}
+
+/* MTD */
+static struct mtd_partition nor_flash_partitions[] = {
+	{
+		.name		= "loader",
+		.offset		= 0x00000000,
+		.size		= 512 * 1024,
+		.mask_flags	= MTD_WRITEABLE,
+	},
+	{
+		.name		= "bootenv",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 512 * 1024,
+		.mask_flags	= MTD_WRITEABLE,
+	},
+	{
+		.name		= "kernel_ro",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 8 * 1024 * 1024,
+		.mask_flags	= MTD_WRITEABLE,
+	},
+	{
+		.name		= "kernel",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 8 * 1024 * 1024,
+	},
+	{
+		.name		= "data",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= MTDPART_SIZ_FULL,
+	},
+};
+
+static struct physmap_flash_data nor_flash_data = {
+	.width		= 2,
+	.parts		= nor_flash_partitions,
+	.nr_parts	= ARRAY_SIZE(nor_flash_partitions),
+};
+
+static struct resource nor_flash_resources[] = {
+	[0]	= {
+		.start	= 0x00000000,
+		.end	= 0x08000000 - 1,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device nor_flash_device = {
+	.name		= "physmap-flash",
+	.dev		= {
+		.platform_data	= &nor_flash_data,
+	},
+	.num_resources	= ARRAY_SIZE(nor_flash_resources),
+	.resource	= nor_flash_resources,
+};
+
+/* SMSC */
+static struct resource smc911x_resources[] = {
+	{
+		.start	= 0x14000000,
+		.end	= 0x16000000 - 1,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.start	= evt2irq(0x02c0) /* IRQ6A */,
+		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
+	},
+};
+
+static struct smsc911x_platform_config smsc911x_info = {
+	.flags		= SMSC911X_USE_16BIT | SMSC911X_SAVE_MAC_ADDRESS,
+	.irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type       = SMSC911X_IRQ_TYPE_PUSH_PULL,
+};
+
+static struct platform_device smc911x_device = {
+	.name           = "smsc911x",
+	.id             = -1,
+	.num_resources  = ARRAY_SIZE(smc911x_resources),
+	.resource       = smc911x_resources,
+	.dev            = {
+		.platform_data = &smsc911x_info,
+	},
+};
+
+/* LCDC */
+static struct fb_videomode mackerel_lcdc_modes[] = {
+	{
+		.name		= "WVGA Panel",
+		.xres		= 800,
+		.yres		= 480,
+		.left_margin	= 220,
+		.right_margin	= 110,
+		.hsync_len	= 70,
+		.upper_margin	= 20,
+		.lower_margin	= 5,
+		.vsync_len	= 5,
+		.sync		= 0,
+	},
+};
+
+static struct sh_mobile_lcdc_info lcdc_info = {
+	.clock_source = LCDC_CLK_BUS,
+	.ch[0] = {
+		.chan = LCDC_CHAN_MAINLCD,
+		.bpp = 16,
+		.lcd_cfg = mackerel_lcdc_modes,
+		.num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
+		.interface_type		= RGB24,
+		.clock_divider		= 2,
+		.flags			= 0,
+		.lcd_size_cfg.width	= 152,
+		.lcd_size_cfg.height	= 91,
+	}
+};
+
+static struct resource lcdc_resources[] = {
+	[0] = {
+		.name	= "LCDC",
+		.start	= 0xfe940000,
+		.end	= 0xfe943fff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= intcs_evt2irq(0x580),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device lcdc_device = {
+	.name		= "sh_mobile_lcdc_fb",
+	.num_resources	= ARRAY_SIZE(lcdc_resources),
+	.resource	= lcdc_resources,
+	.dev	= {
+		.platform_data	= &lcdc_info,
+		.coherent_dma_mask = ~0,
+	},
+};
+
+/* HDMI */
+static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
+	.clock_source = LCDC_CLK_EXTERNAL,
+	.ch[0] = {
+		.chan = LCDC_CHAN_MAINLCD,
+		.bpp = 16,
+		.interface_type = RGB24,
+		.clock_divider = 1,
+		.flags = LCDC_FLAGS_DWPOL,
+	}
+};
+
+static struct resource hdmi_lcdc_resources[] = {
+	[0] = {
+		.name	= "LCDC1",
+		.start	= 0xfe944000,
+		.end	= 0xfe947fff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= intcs_evt2irq(0x1780),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device hdmi_lcdc_device = {
+	.name		= "sh_mobile_lcdc_fb",
+	.num_resources	= ARRAY_SIZE(hdmi_lcdc_resources),
+	.resource	= hdmi_lcdc_resources,
+	.id		= 1,
+	.dev	= {
+		.platform_data	= &hdmi_lcdc_info,
+		.coherent_dma_mask = ~0,
+	},
+};
+
+static struct sh_mobile_hdmi_info hdmi_info = {
+	.lcd_chan	= &hdmi_lcdc_info.ch[0],
+	.lcd_dev	= &hdmi_lcdc_device.dev,
+	.flags		= HDMI_SND_SRC_SPDIF,
+};
+
+static struct resource hdmi_resources[] = {
+	[0] = {
+		.name	= "HDMI",
+		.start	= 0xe6be0000,
+		.end	= 0xe6be00ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		/* There's also an HDMI interrupt on INTCS @ 0x18e0 */
+		.start	= evt2irq(0x17e0),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device hdmi_device = {
+	.name		= "sh-mobile-hdmi",
+	.num_resources	= ARRAY_SIZE(hdmi_resources),
+	.resource	= hdmi_resources,
+	.id             = -1,
+	.dev	= {
+		.platform_data	= &hdmi_info,
+	},
+};
+
+static int __init hdmi_init_pm_clock(void)
+{
+	struct clk *hdmi_ick = clk_get(&hdmi_device.dev, "ick");
+	int ret;
+	long rate;
+
+	if (IS_ERR(hdmi_ick)) {
+		ret = PTR_ERR(hdmi_ick);
+		pr_err("Cannot get HDMI ICK: %d\n", ret);
+		goto out;
+	}
+
+	ret = clk_set_parent(&sh7372_pllc2_clk, &sh7372_dv_clki_div2_clk);
+	if (ret < 0) {
+		pr_err("Cannot set PLLC2 parent: %d, %d users\n",
+		       ret, sh7372_pllc2_clk.usecount);
+		goto out;
+	}
+
+	pr_debug("PLLC2 initial frequency %lu\n",
+		 clk_get_rate(&sh7372_pllc2_clk));
+
+	rate = clk_round_rate(&sh7372_pllc2_clk, 594000000);
+	if (rate < 0) {
+		pr_err("Cannot get suitable rate: %ld\n", rate);
+		ret = rate;
+		goto out;
+	}
+
+	ret = clk_set_rate(&sh7372_pllc2_clk, rate);
+	if (ret < 0) {
+		pr_err("Cannot set rate %ld: %d\n", rate, ret);
+		goto out;
+	}
+
+	ret = clk_enable(&sh7372_pllc2_clk);
+	if (ret < 0) {
+		pr_err("Cannot enable pllc2 clock\n");
+		goto out;
+	}
+
+	pr_debug("PLLC2 set frequency %lu\n", rate);
+
+	ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
+	if (ret < 0) {
+		pr_err("Cannot set HDMI parent: %d\n", ret);
+		goto out;
+	}
+
+out:
+	if (!IS_ERR(hdmi_ick))
+		clk_put(hdmi_ick);
+	return ret;
+}
+device_initcall(hdmi_init_pm_clock);
+
+/* USB1 (Host) */
+static void usb1_host_port_power(int port, int power)
+{
+	if (!power) /* only power-on is supported for now */
+		return;
+
+	/* set VBOUT/PWEN and EXTLP1 in DVSTCTR */
+	__raw_writew(__raw_readw(0xE68B0008) | 0x600, 0xE68B0008);
+}
+
+static struct r8a66597_platdata usb1_host_data = {
+	.on_chip	= 1,
+	.port_power	= usb1_host_port_power,
+};
+
+static struct resource usb1_host_resources[] = {
+	[0] = {
+		.name	= "USBHS",
+		.start	= 0xE68B0000,
+		.end	= 0xE68B00E6 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= evt2irq(0x1ce0) /* USB1_USB1I0 */,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device usb1_host_device = {
+	.name	= "r8a66597_hcd",
+	.id	= 1,
+	.dev = {
+		.dma_mask		= NULL,         /*  not use dma */
+		.coherent_dma_mask	= 0xffffffff,
+		.platform_data		= &usb1_host_data,
+	},
+	.num_resources	= ARRAY_SIZE(usb1_host_resources),
+	.resource	= usb1_host_resources,
+};
+
+/* LED */
+static struct gpio_led mackerel_leds[] = {
+	{
+		.name		= "led0",
+		.gpio		= GPIO_PORT0,
+		.default_state	= LEDS_GPIO_DEFSTATE_ON,
+	},
+	{
+		.name		= "led1",
+		.gpio		= GPIO_PORT1,
+		.default_state	= LEDS_GPIO_DEFSTATE_ON,
+	},
+	{
+		.name		= "led2",
+		.gpio		= GPIO_PORT2,
+		.default_state	= LEDS_GPIO_DEFSTATE_ON,
+	},
+	{
+		.name		= "led3",
+		.gpio		= GPIO_PORT159,
+		.default_state	= LEDS_GPIO_DEFSTATE_ON,
+	}
+};
+
+static struct gpio_led_platform_data mackerel_leds_pdata = {
+	.leds = mackerel_leds,
+	.num_leds = ARRAY_SIZE(mackerel_leds),
+};
+
+static struct platform_device leds_device = {
+	.name = "leds-gpio",
+	.id = 0,
+	.dev = {
+		.platform_data  = &mackerel_leds_pdata,
+	},
+};
+
+/* FSI */
+#define IRQ_FSI evt2irq(0x1840)
+static int __fsi_set_round_rate(struct clk *clk, long rate, int enable)
+{
+	int ret;
+
+	if (rate <= 0)
+		return 0;
+
+	if (!enable) {
+		clk_disable(clk);
+		return 0;
+	}
+
+	ret = clk_set_rate(clk, clk_round_rate(clk, rate));
+	if (ret < 0)
+		return ret;
+
+	return clk_enable(clk);
+}
+
+static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
+{
+	struct clk *fsib_clk;
+	struct clk *fdiv_clk = &sh7372_fsidivb_clk;
+	long fsib_rate = 0;
+	long fdiv_rate = 0;
+	int ackmd_bpfmd;
+	int ret;
+
+	/* FSIA is slave mode. nothing to do here */
+	if (is_porta)
+		return 0;
+
+	/* clock start */
+	switch (rate) {
+	case 44100:
+		fsib_rate	= rate * 256;
+		ackmd_bpfmd	= SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+		break;
+	case 48000:
+		fsib_rate	= 85428000; /* around 48kHz x 256 x 7 */
+		fdiv_rate	= rate * 256;
+		ackmd_bpfmd	= SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+		break;
+	default:
+		pr_err("unsupported rate in FSI2 port B\n");
+		return -EINVAL;
+	}
+
+	/* FSI B setting */
+	fsib_clk = clk_get(dev, "ickb");
+	if (IS_ERR(fsib_clk))
+		return -EIO;
+
+	/* fsib */
+	ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
+	if (ret < 0)
+		goto fsi_set_rate_end;
+
+	/* FSI DIV */
+	ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
+	if (ret < 0) {
+		/* disable FSI B */
+		if (enable)
+			__fsi_set_round_rate(fsib_clk, fsib_rate, 0);
+		goto fsi_set_rate_end;
+	}
+
+	ret = ackmd_bpfmd;
+
+fsi_set_rate_end:
+	clk_put(fsib_clk);
+	return ret;
+}
+
+static struct sh_fsi_platform_info fsi_info = {
+	.porta_flags =	SH_FSI_BRS_INV		|
+			SH_FSI_OUT_SLAVE_MODE	|
+			SH_FSI_IN_SLAVE_MODE	|
+			SH_FSI_OFMT(PCM)	|
+			SH_FSI_IFMT(PCM),
+
+	.portb_flags =	SH_FSI_BRS_INV	|
+			SH_FSI_BRM_INV	|
+			SH_FSI_LRS_INV	|
+			SH_FSI_OFMT(SPDIF),
+
+	.set_rate = fsi_set_rate,
+};
+
+static struct resource fsi_resources[] = {
+	[0] = {
+		.name	= "FSI",
+		.start	= 0xFE3C0000,
+		.end	= 0xFE3C0400 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = IRQ_FSI,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device fsi_device = {
+	.name		= "sh_fsi2",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(fsi_resources),
+	.resource	= fsi_resources,
+	.dev	= {
+		.platform_data	= &fsi_info,
+	},
+};
+
+static struct platform_device fsi_ak4643_device = {
+	.name		= "sh_fsi2_a_ak4643",
+};
+
+/*
+ * The card detect pin of the top SD/MMC slot (CN7) is active low and is
+ * connected to GPIO A22 of SH7372 (GPIO_PORT41).
+ */
+static int slot_cn7_get_cd(struct platform_device *pdev)
+{
+	return !gpio_get_value(GPIO_PORT41);
+}
+
+/* SDHI0 */
+static struct sh_mobile_sdhi_info sdhi0_info = {
+	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
+	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_caps	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
+};
+
+static struct resource sdhi0_resources[] = {
+	[0] = {
+		.name	= "SDHI0",
+		.start	= 0xe6850000,
+		.end	= 0xe68501ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= evt2irq(0x0e00) /* SDHI0 */,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device sdhi0_device = {
+	.name		= "sh_mobile_sdhi",
+	.num_resources	= ARRAY_SIZE(sdhi0_resources),
+	.resource	= sdhi0_resources,
+	.id		= 0,
+	.dev	= {
+		.platform_data	= &sdhi0_info,
+	},
+};
+
+#if !defined(CONFIG_MMC_SH_MMCIF)
+/* SDHI1 */
+static struct sh_mobile_sdhi_info sdhi1_info = {
+	.dma_slave_tx	= SHDMA_SLAVE_SDHI1_TX,
+	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
+	.tmio_ocr_mask	= MMC_VDD_165_195,
+	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
+	.tmio_caps	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+			  MMC_CAP_NEEDS_POLL,
+	.get_cd		= slot_cn7_get_cd,
+};
+
+static struct resource sdhi1_resources[] = {
+	[0] = {
+		.name	= "SDHI1",
+		.start	= 0xe6860000,
+		.end	= 0xe68601ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= evt2irq(0x0e80),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device sdhi1_device = {
+	.name		= "sh_mobile_sdhi",
+	.num_resources	= ARRAY_SIZE(sdhi1_resources),
+	.resource	= sdhi1_resources,
+	.id		= 1,
+	.dev	= {
+		.platform_data	= &sdhi1_info,
+	},
+};
+#endif
+
+/*
+ * The card detect pin of the top SD/MMC slot (CN23) is active low and is
+ * connected to GPIO SCIFB_SCK of SH7372 (GPIO_PORT162).
+ */
+static int slot_cn23_get_cd(struct platform_device *pdev)
+{
+	return !gpio_get_value(GPIO_PORT162);
+}
+
+/* SDHI2 */
+static struct sh_mobile_sdhi_info sdhi2_info = {
+	.dma_slave_tx	= SHDMA_SLAVE_SDHI2_TX,
+	.dma_slave_rx	= SHDMA_SLAVE_SDHI2_RX,
+	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
+	.tmio_caps	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+			  MMC_CAP_NEEDS_POLL,
+	.get_cd		= slot_cn23_get_cd,
+};
+
+static struct resource sdhi2_resources[] = {
+	[0] = {
+		.name	= "SDHI2",
+		.start	= 0xe6870000,
+		.end	= 0xe68701ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= evt2irq(0x1200),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device sdhi2_device = {
+	.name	= "sh_mobile_sdhi",
+	.num_resources	= ARRAY_SIZE(sdhi2_resources),
+	.resource	= sdhi2_resources,
+	.id		= 2,
+	.dev	= {
+		.platform_data	= &sdhi2_info,
+	},
+};
+
+/* SH_MMCIF */
+static struct resource sh_mmcif_resources[] = {
+	[0] = {
+		.name	= "MMCIF",
+		.start	= 0xE6BD0000,
+		.end	= 0xE6BD00FF,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		/* MMC ERR */
+		.start	= evt2irq(0x1ac0),
+		.flags	= IORESOURCE_IRQ,
+	},
+	[2] = {
+		/* MMC NOR */
+		.start	= evt2irq(0x1ae0),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct sh_mmcif_plat_data sh_mmcif_plat = {
+	.sup_pclk	= 0,
+	.ocr		= MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
+	.caps		= MMC_CAP_4_BIT_DATA |
+			  MMC_CAP_8_BIT_DATA |
+			  MMC_CAP_NEEDS_POLL,
+	.get_cd		= slot_cn7_get_cd,
+};
+
+static struct platform_device sh_mmcif_device = {
+	.name		= "sh_mmcif",
+	.id		= 0,
+	.dev		= {
+		.dma_mask		= NULL,
+		.coherent_dma_mask	= 0xffffffff,
+		.platform_data		= &sh_mmcif_plat,
+	},
+	.num_resources	= ARRAY_SIZE(sh_mmcif_resources),
+	.resource	= sh_mmcif_resources,
+};
+
+
+static int mackerel_camera_add(struct soc_camera_link *icl, struct device *dev);
+static void mackerel_camera_del(struct soc_camera_link *icl);
+
+static int camera_set_capture(struct soc_camera_platform_info *info,
+			      int enable)
+{
+	return 0; /* camera sensor always enabled */
+}
+
+static struct soc_camera_platform_info camera_info = {
+	.format_name = "UYVY",
+	.format_depth = 16,
+	.format = {
+		.code = V4L2_MBUS_FMT_UYVY8_2X8,
+		.colorspace = V4L2_COLORSPACE_SMPTE170M,
+		.field = V4L2_FIELD_NONE,
+		.width = 640,
+		.height = 480,
+	},
+	.bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
+	SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8 |
+	SOCAM_DATA_ACTIVE_HIGH,
+	.set_capture = camera_set_capture,
+};
+
+static struct soc_camera_link camera_link = {
+	.bus_id		= 0,
+	.add_device	= mackerel_camera_add,
+	.del_device	= mackerel_camera_del,
+	.module_name	= "soc_camera_platform",
+	.priv		= &camera_info,
+};
+
+static void dummy_release(struct device *dev)
+{
+}
+
+static struct platform_device camera_device = {
+	.name		= "soc_camera_platform",
+	.dev		= {
+		.platform_data	= &camera_info,
+		.release	= dummy_release,
+	},
+};
+
+static int mackerel_camera_add(struct soc_camera_link *icl,
+			       struct device *dev)
+{
+	if (icl != &camera_link)
+		return -ENODEV;
+
+	camera_info.dev = dev;
+
+	return platform_device_register(&camera_device);
+}
+
+static void mackerel_camera_del(struct soc_camera_link *icl)
+{
+	if (icl != &camera_link)
+		return;
+
+	platform_device_unregister(&camera_device);
+	memset(&camera_device.dev.kobj, 0,
+	       sizeof(camera_device.dev.kobj));
+}
+
+static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
+	.flags = SH_CEU_FLAG_USE_8BIT_BUS,
+};
+
+static struct resource ceu_resources[] = {
+	[0] = {
+		.name	= "CEU",
+		.start	= 0xfe910000,
+		.end	= 0xfe91009f,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = intcs_evt2irq(0x880),
+		.flags  = IORESOURCE_IRQ,
+	},
+	[2] = {
+		/* place holder for contiguous memory */
+	},
+};
+
+static struct platform_device ceu_device = {
+	.name		= "sh_mobile_ceu",
+	.id             = 0, /* "ceu0" clock */
+	.num_resources	= ARRAY_SIZE(ceu_resources),
+	.resource	= ceu_resources,
+	.dev		= {
+		.platform_data	= &sh_mobile_ceu_info,
+	},
+};
+
+static struct platform_device mackerel_camera = {
+	.name	= "soc-camera-pdrv",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &camera_link,
+	},
+};
+
+static struct platform_device *mackerel_devices[] __initdata = {
+	&nor_flash_device,
+	&smc911x_device,
+	&lcdc_device,
+	&usb1_host_device,
+	&leds_device,
+	&fsi_device,
+	&fsi_ak4643_device,
+	&sdhi0_device,
+#if !defined(CONFIG_MMC_SH_MMCIF)
+	&sdhi1_device,
+#endif
+	&sdhi2_device,
+	&sh_mmcif_device,
+	&ceu_device,
+	&mackerel_camera,
+	&hdmi_lcdc_device,
+	&hdmi_device,
+};
+
+/* Keypad Initialization */
+#define KEYPAD_BUTTON(ev_type, ev_code, act_low) \
+{								\
+	.type		= ev_type,				\
+	.code		= ev_code,				\
+	.active_low	= act_low,				\
+}
+
+#define KEYPAD_BUTTON_LOW(event_code) KEYPAD_BUTTON(EV_KEY, event_code, 1)
+
+static struct tca6416_button mackerel_gpio_keys[] = {
+	KEYPAD_BUTTON_LOW(KEY_HOME),
+	KEYPAD_BUTTON_LOW(KEY_MENU),
+	KEYPAD_BUTTON_LOW(KEY_BACK),
+	KEYPAD_BUTTON_LOW(KEY_POWER),
+};
+
+static struct tca6416_keys_platform_data mackerel_tca6416_keys_info = {
+	.buttons	= mackerel_gpio_keys,
+	.nbuttons	= ARRAY_SIZE(mackerel_gpio_keys),
+	.rep		= 1,
+	.use_polling	= 0,
+	.pinmask	= 0x000F,
+};
+
+/* I2C */
+#define IRQ7 evt2irq(0x02e0)
+#define IRQ9 evt2irq(0x0320)
+
+static struct i2c_board_info i2c0_devices[] = {
+	{
+		I2C_BOARD_INFO("ak4643", 0x13),
+	},
+	/* Keypad */
+	{
+		I2C_BOARD_INFO("tca6408-keys", 0x20),
+		.platform_data = &mackerel_tca6416_keys_info,
+		.irq = IRQ9,
+	},
+	/* Touchscreen */
+	{
+		I2C_BOARD_INFO("st1232-ts", 0x55),
+		.irq = IRQ7,
+	},
+};
+
+#define IRQ21 evt2irq(0x32a0)
+
+static struct i2c_board_info i2c1_devices[] = {
+	/* Accelerometer */
+	{
+		I2C_BOARD_INFO("adxl34x", 0x53),
+		.irq = IRQ21,
+	},
+};
+
+static struct map_desc mackerel_io_desc[] __initdata = {
+	/* create a 1:1 entity map for 0xe6xxxxxx
+	 * used by CPGA, INTC and PFC.
+	 */
+	{
+		.virtual	= 0xe6000000,
+		.pfn		= __phys_to_pfn(0xe6000000),
+		.length		= 256 << 20,
+		.type		= MT_DEVICE_NONSHARED
+	},
+};
+
+static void __init mackerel_map_io(void)
+{
+	iotable_init(mackerel_io_desc, ARRAY_SIZE(mackerel_io_desc));
+
+	/* setup early devices and console here as well */
+	sh7372_add_early_devices();
+	shmobile_setup_console();
+}
+
+#define GPIO_PORT9CR	0xE6051009
+#define GPIO_PORT10CR	0xE605100A
+#define SRCR4		0xe61580bc
+#define USCCR1		0xE6058144
+static void __init mackerel_init(void)
+{
+	u32 srcr4;
+	struct clk *clk;
+
+	sh7372_pinmux_init();
+
+	/* enable SCIFA0 */
+	gpio_request(GPIO_FN_SCIFA0_TXD, NULL);
+	gpio_request(GPIO_FN_SCIFA0_RXD, NULL);
+
+	/* enable SMSC911X */
+	gpio_request(GPIO_FN_CS5A,	NULL);
+	gpio_request(GPIO_FN_IRQ6_39,	NULL);
+
+	/* LCDC */
+	gpio_request(GPIO_FN_LCDD23,   NULL);
+	gpio_request(GPIO_FN_LCDD22,   NULL);
+	gpio_request(GPIO_FN_LCDD21,   NULL);
+	gpio_request(GPIO_FN_LCDD20,   NULL);
+	gpio_request(GPIO_FN_LCDD19,   NULL);
+	gpio_request(GPIO_FN_LCDD18,   NULL);
+	gpio_request(GPIO_FN_LCDD17,   NULL);
+	gpio_request(GPIO_FN_LCDD16,   NULL);
+	gpio_request(GPIO_FN_LCDD15,   NULL);
+	gpio_request(GPIO_FN_LCDD14,   NULL);
+	gpio_request(GPIO_FN_LCDD13,   NULL);
+	gpio_request(GPIO_FN_LCDD12,   NULL);
+	gpio_request(GPIO_FN_LCDD11,   NULL);
+	gpio_request(GPIO_FN_LCDD10,   NULL);
+	gpio_request(GPIO_FN_LCDD9,    NULL);
+	gpio_request(GPIO_FN_LCDD8,    NULL);
+	gpio_request(GPIO_FN_LCDD7,    NULL);
+	gpio_request(GPIO_FN_LCDD6,    NULL);
+	gpio_request(GPIO_FN_LCDD5,    NULL);
+	gpio_request(GPIO_FN_LCDD4,    NULL);
+	gpio_request(GPIO_FN_LCDD3,    NULL);
+	gpio_request(GPIO_FN_LCDD2,    NULL);
+	gpio_request(GPIO_FN_LCDD1,    NULL);
+	gpio_request(GPIO_FN_LCDD0,    NULL);
+	gpio_request(GPIO_FN_LCDDISP,  NULL);
+	gpio_request(GPIO_FN_LCDDCK,   NULL);
+
+	gpio_request(GPIO_PORT31, NULL); /* backlight */
+	gpio_direction_output(GPIO_PORT31, 1);
+
+	gpio_request(GPIO_PORT151, NULL); /* LCDDON */
+	gpio_direction_output(GPIO_PORT151, 1);
+
+	/* USB enable */
+	gpio_request(GPIO_FN_VBUS0_1,    NULL);
+	gpio_request(GPIO_FN_IDIN_1_18,  NULL);
+	gpio_request(GPIO_FN_PWEN_1_115, NULL);
+	gpio_request(GPIO_FN_OVCN_1_114, NULL);
+	gpio_request(GPIO_FN_EXTLP_1,    NULL);
+	gpio_request(GPIO_FN_OVCN2_1,    NULL);
+
+	/* setup USB phy */
+	__raw_writew(0x8a0a, 0xE6058130);	/* USBCR4 */
+
+	/* enable FSI2 port A (ak4643) */
+	gpio_request(GPIO_FN_FSIAIBT,	NULL);
+	gpio_request(GPIO_FN_FSIAILR,	NULL);
+	gpio_request(GPIO_FN_FSIAISLD,	NULL);
+	gpio_request(GPIO_FN_FSIAOSLD,	NULL);
+	gpio_request(GPIO_PORT161,	NULL);
+	gpio_direction_output(GPIO_PORT161, 0); /* slave */
+
+	gpio_request(GPIO_PORT9,  NULL);
+	gpio_request(GPIO_PORT10, NULL);
+	gpio_no_direction(GPIO_PORT9CR);  /* FSIAOBT needs no direction */
+	gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */
+
+	intc_set_priority(IRQ_FSI, 3); /* irq priority FSI(3) > SMSC911X(2) */
+
+	/* setup FSI2 port B (HDMI) */
+	gpio_request(GPIO_FN_FSIBCK, NULL);
+	__raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
+
+	/* set SPU2 clock to 119.6 MHz */
+	clk = clk_get(NULL, "spu_clk");
+	if (!IS_ERR(clk)) {
+		clk_set_rate(clk, clk_round_rate(clk, 119600000));
+		clk_put(clk);
+	}
+
+	/* enable Keypad */
+	gpio_request(GPIO_FN_IRQ9_42,	NULL);
+	set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH);
+
+	/* enable Touchscreen */
+	gpio_request(GPIO_FN_IRQ7_40,	NULL);
+	set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW);
+
+	/* enable Accelerometer */
+	gpio_request(GPIO_FN_IRQ21,	NULL);
+	set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
+
+	/* enable SDHI0 */
+	gpio_request(GPIO_FN_SDHICD0, NULL);
+	gpio_request(GPIO_FN_SDHIWP0, NULL);
+	gpio_request(GPIO_FN_SDHICMD0, NULL);
+	gpio_request(GPIO_FN_SDHICLK0, NULL);
+	gpio_request(GPIO_FN_SDHID0_3, NULL);
+	gpio_request(GPIO_FN_SDHID0_2, NULL);
+	gpio_request(GPIO_FN_SDHID0_1, NULL);
+	gpio_request(GPIO_FN_SDHID0_0, NULL);
+
+#if !defined(CONFIG_MMC_SH_MMCIF)
+	/* enable SDHI1 */
+	gpio_request(GPIO_FN_SDHICMD1, NULL);
+	gpio_request(GPIO_FN_SDHICLK1, NULL);
+	gpio_request(GPIO_FN_SDHID1_3, NULL);
+	gpio_request(GPIO_FN_SDHID1_2, NULL);
+	gpio_request(GPIO_FN_SDHID1_1, NULL);
+	gpio_request(GPIO_FN_SDHID1_0, NULL);
+#endif
+	/* card detect pin for MMC slot (CN7) */
+	gpio_request(GPIO_PORT41, NULL);
+	gpio_direction_input(GPIO_PORT41);
+
+	/* enable SDHI2 */
+	gpio_request(GPIO_FN_SDHICMD2, NULL);
+	gpio_request(GPIO_FN_SDHICLK2, NULL);
+	gpio_request(GPIO_FN_SDHID2_3, NULL);
+	gpio_request(GPIO_FN_SDHID2_2, NULL);
+	gpio_request(GPIO_FN_SDHID2_1, NULL);
+	gpio_request(GPIO_FN_SDHID2_0, NULL);
+
+	/* card detect pin for microSD slot (CN23) */
+	gpio_request(GPIO_PORT162, NULL);
+	gpio_direction_input(GPIO_PORT162);
+
+	/* MMCIF */
+	gpio_request(GPIO_FN_MMCD0_0, NULL);
+	gpio_request(GPIO_FN_MMCD0_1, NULL);
+	gpio_request(GPIO_FN_MMCD0_2, NULL);
+	gpio_request(GPIO_FN_MMCD0_3, NULL);
+	gpio_request(GPIO_FN_MMCD0_4, NULL);
+	gpio_request(GPIO_FN_MMCD0_5, NULL);
+	gpio_request(GPIO_FN_MMCD0_6, NULL);
+	gpio_request(GPIO_FN_MMCD0_7, NULL);
+	gpio_request(GPIO_FN_MMCCMD0, NULL);
+	gpio_request(GPIO_FN_MMCCLK0, NULL);
+
+	/* enable GPS module (GT-720F) */
+	gpio_request(GPIO_FN_SCIFA2_TXD1, NULL);
+	gpio_request(GPIO_FN_SCIFA2_RXD1, NULL);
+
+	/* CEU */
+	gpio_request(GPIO_FN_VIO_CLK, NULL);
+	gpio_request(GPIO_FN_VIO_VD, NULL);
+	gpio_request(GPIO_FN_VIO_HD, NULL);
+	gpio_request(GPIO_FN_VIO_FIELD, NULL);
+	gpio_request(GPIO_FN_VIO_CKO, NULL);
+	gpio_request(GPIO_FN_VIO_D7, NULL);
+	gpio_request(GPIO_FN_VIO_D6, NULL);
+	gpio_request(GPIO_FN_VIO_D5, NULL);
+	gpio_request(GPIO_FN_VIO_D4, NULL);
+	gpio_request(GPIO_FN_VIO_D3, NULL);
+	gpio_request(GPIO_FN_VIO_D2, NULL);
+	gpio_request(GPIO_FN_VIO_D1, NULL);
+	gpio_request(GPIO_FN_VIO_D0, NULL);
+
+	/* HDMI */
+	gpio_request(GPIO_FN_HDMI_HPD, NULL);
+	gpio_request(GPIO_FN_HDMI_CEC, NULL);
+
+	/* Reset HDMI, must be held at least one EXTALR (32768Hz) period */
+	srcr4 = __raw_readl(SRCR4);
+	__raw_writel(srcr4 | (1 << 13), SRCR4);
+	udelay(50);
+	__raw_writel(srcr4 & ~(1 << 13), SRCR4);
+
+	i2c_register_board_info(0, i2c0_devices,
+				ARRAY_SIZE(i2c0_devices));
+	i2c_register_board_info(1, i2c1_devices,
+				ARRAY_SIZE(i2c1_devices));
+
+	sh7372_add_standard_devices();
+
+	platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
+}
+
+static void __init mackerel_timer_init(void)
+{
+	sh7372_clock_init();
+	shmobile_timer.init();
+
+	/* External clock source */
+	clk_set_rate(&sh7372_dv_clki_clk, 27000000);
+}
+
+static struct sys_timer mackerel_timer = {
+	.init		= mackerel_timer_init,
+};
+
+MACHINE_START(MACKEREL, "mackerel")
+	.map_io		= mackerel_map_io,
+	.init_irq	= sh7372_init_irq,
+	.handle_irq	= shmobile_handle_irq_intc,
+	.init_machine	= mackerel_init,
+	.timer		= &mackerel_timer,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
new file mode 100644
index 0000000..720a714
--- /dev/null
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -0,0 +1,356 @@
+/*
+ * sh73a0 clock framework support
+ *
+ * Copyright (C) 2010 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/sh_clk.h>
+#include <linux/clkdev.h>
+#include <mach/common.h>
+
+#define FRQCRA		0xe6150000
+#define FRQCRB		0xe6150004
+#define FRQCRD		0xe61500e4
+#define VCLKCR1		0xe6150008
+#define VCLKCR2		0xe615000C
+#define VCLKCR3		0xe615001C
+#define ZBCKCR		0xe6150010
+#define FLCKCR		0xe6150014
+#define SD0CKCR		0xe6150074
+#define SD1CKCR		0xe6150078
+#define SD2CKCR		0xe615007C
+#define FSIACKCR	0xe6150018
+#define FSIBCKCR	0xe6150090
+#define SUBCKCR		0xe6150080
+#define SPUACKCR	0xe6150084
+#define SPUVCKCR	0xe6150094
+#define MSUCKCR		0xe6150088
+#define HSICKCR		0xe615008C
+#define MFCK1CR		0xe6150098
+#define MFCK2CR		0xe615009C
+#define DSITCKCR	0xe6150060
+#define DSI0PCKCR	0xe6150064
+#define DSI1PCKCR	0xe6150068
+#define DSI0PHYCR	0xe615006C
+#define DSI1PHYCR	0xe6150070
+#define PLLECR		0xe61500d0
+#define PLL0CR		0xe61500d8
+#define PLL1CR		0xe6150028
+#define PLL2CR		0xe615002c
+#define PLL3CR		0xe61500dc
+#define SMSTPCR0	0xe6150130
+#define SMSTPCR1	0xe6150134
+#define SMSTPCR2	0xe6150138
+#define SMSTPCR3	0xe615013c
+#define SMSTPCR4	0xe6150140
+#define SMSTPCR5	0xe6150144
+#define CKSCR		0xe61500c0
+
+/* Fixed 32 KHz root clock from EXTALR pin */
+static struct clk r_clk = {
+	.rate           = 32768,
+};
+
+/*
+ * 26MHz default rate for the EXTAL1 root input clock.
+ * If needed, reset this with clk_set_rate() from the platform code.
+ */
+struct clk sh73a0_extal1_clk = {
+	.rate		= 26000000,
+};
+
+/*
+ * 48MHz default rate for the EXTAL2 root input clock.
+ * If needed, reset this with clk_set_rate() from the platform code.
+ */
+struct clk sh73a0_extal2_clk = {
+	.rate		= 48000000,
+};
+
+/* A fixed divide-by-2 block */
+static unsigned long div2_recalc(struct clk *clk)
+{
+	return clk->parent->rate / 2;
+}
+
+static struct clk_ops div2_clk_ops = {
+	.recalc		= div2_recalc,
+};
+
+/* Divide extal1 by two */
+static struct clk extal1_div2_clk = {
+	.ops		= &div2_clk_ops,
+	.parent		= &sh73a0_extal1_clk,
+};
+
+/* Divide extal2 by two */
+static struct clk extal2_div2_clk = {
+	.ops		= &div2_clk_ops,
+	.parent		= &sh73a0_extal2_clk,
+};
+
+static struct clk_ops main_clk_ops = {
+	.recalc		= followparent_recalc,
+};
+
+/* Main clock */
+static struct clk main_clk = {
+	.ops		= &main_clk_ops,
+};
+
+/* PLL0, PLL1, PLL2, PLL3 */
+static unsigned long pll_recalc(struct clk *clk)
+{
+	unsigned long mult = 1;
+
+	if (__raw_readl(PLLECR) & (1 << clk->enable_bit))
+		mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1);
+
+	return clk->parent->rate * mult;
+}
+
+static struct clk_ops pll_clk_ops = {
+	.recalc		= pll_recalc,
+};
+
+static struct clk pll0_clk = {
+	.ops		= &pll_clk_ops,
+	.flags		= CLK_ENABLE_ON_INIT,
+	.parent		= &main_clk,
+	.enable_reg	= (void __iomem *)PLL0CR,
+	.enable_bit	= 0,
+};
+
+static struct clk pll1_clk = {
+	.ops		= &pll_clk_ops,
+	.flags		= CLK_ENABLE_ON_INIT,
+	.parent		= &main_clk,
+	.enable_reg	= (void __iomem *)PLL1CR,
+	.enable_bit	= 1,
+};
+
+static struct clk pll2_clk = {
+	.ops		= &pll_clk_ops,
+	.flags		= CLK_ENABLE_ON_INIT,
+	.parent		= &main_clk,
+	.enable_reg	= (void __iomem *)PLL2CR,
+	.enable_bit	= 2,
+};
+
+static struct clk pll3_clk = {
+	.ops		= &pll_clk_ops,
+	.flags		= CLK_ENABLE_ON_INIT,
+	.parent		= &main_clk,
+	.enable_reg	= (void __iomem *)PLL3CR,
+	.enable_bit	= 3,
+};
+
+/* Divide PLL1 by two */
+static struct clk pll1_div2_clk = {
+	.ops		= &div2_clk_ops,
+	.parent		= &pll1_clk,
+};
+
+static struct clk *main_clks[] = {
+	&r_clk,
+	&sh73a0_extal1_clk,
+	&sh73a0_extal2_clk,
+	&extal1_div2_clk,
+	&extal2_div2_clk,
+	&main_clk,
+	&pll0_clk,
+	&pll1_clk,
+	&pll2_clk,
+	&pll3_clk,
+	&pll1_div2_clk,
+};
+
+static void div4_kick(struct clk *clk)
+{
+	unsigned long value;
+
+	/* set KICK bit in FRQCRB to update hardware setting */
+	value = __raw_readl(FRQCRB);
+	value |= (1 << 31);
+	__raw_writel(value, FRQCRB);
+}
+
+static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18,
+			  24, 0, 36, 48, 7 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+	.divisors = divisors,
+	.nr_divisors = ARRAY_SIZE(divisors),
+};
+
+static struct clk_div4_table div4_table = {
+	.div_mult_table = &div4_div_mult_table,
+	.kick = div4_kick,
+};
+
+enum { DIV4_I, DIV4_ZG, DIV4_M3, DIV4_B, DIV4_M1, DIV4_M2,
+	DIV4_Z, DIV4_ZTR, DIV4_ZT, DIV4_ZX, DIV4_HP, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+	SH_CLK_DIV4(&pll1_clk, _reg, _bit, _mask, _flags)
+
+static struct clk div4_clks[DIV4_NR] = {
+	[DIV4_I] = DIV4(FRQCRA, 20, 0xfff, CLK_ENABLE_ON_INIT),
+	[DIV4_ZG] = DIV4(FRQCRA, 16, 0xbff, CLK_ENABLE_ON_INIT),
+	[DIV4_M3] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
+	[DIV4_B] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
+	[DIV4_M1] = DIV4(FRQCRA, 4, 0xfff, 0),
+	[DIV4_M2] = DIV4(FRQCRA, 0, 0xfff, 0),
+	[DIV4_Z] = DIV4(FRQCRB, 24, 0xbff, 0),
+	[DIV4_ZTR] = DIV4(FRQCRB, 20, 0xfff, 0),
+	[DIV4_ZT] = DIV4(FRQCRB, 16, 0xfff, 0),
+	[DIV4_ZX] = DIV4(FRQCRB, 12, 0xfff, 0),
+	[DIV4_HP] = DIV4(FRQCRB, 4, 0xfff, 0),
+};
+
+enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_ZB1,
+	DIV6_FLCTL, DIV6_SDHI0, DIV6_SDHI1, DIV6_SDHI2,
+	DIV6_FSIA, DIV6_FSIB, DIV6_SUB,
+	DIV6_SPUA, DIV6_SPUV, DIV6_MSU,
+	DIV6_HSI,  DIV6_MFG1, DIV6_MFG2,
+	DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P,
+	DIV6_NR };
+
+static struct clk div6_clks[DIV6_NR] = {
+	[DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
+	[DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
+	[DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
+	[DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+	[DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
+	[DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
+	[DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
+	[DIV6_SDHI2] = SH_CLK_DIV6(&pll1_div2_clk, SD2CKCR, 0),
+	[DIV6_FSIA] = SH_CLK_DIV6(&pll1_div2_clk, FSIACKCR, 0),
+	[DIV6_FSIB] = SH_CLK_DIV6(&pll1_div2_clk, FSIBCKCR, 0),
+	[DIV6_SUB] = SH_CLK_DIV6(&sh73a0_extal2_clk, SUBCKCR, 0),
+	[DIV6_SPUA] = SH_CLK_DIV6(&pll1_div2_clk, SPUACKCR, 0),
+	[DIV6_SPUV] = SH_CLK_DIV6(&pll1_div2_clk, SPUVCKCR, 0),
+	[DIV6_MSU] = SH_CLK_DIV6(&pll1_div2_clk, MSUCKCR, 0),
+	[DIV6_HSI] = SH_CLK_DIV6(&pll1_div2_clk, HSICKCR, 0),
+	[DIV6_MFG1] = SH_CLK_DIV6(&pll1_div2_clk, MFCK1CR, 0),
+	[DIV6_MFG2] = SH_CLK_DIV6(&pll1_div2_clk, MFCK2CR, 0),
+	[DIV6_DSIT] = SH_CLK_DIV6(&pll1_div2_clk, DSITCKCR, 0),
+	[DIV6_DSI0P] = SH_CLK_DIV6(&pll1_div2_clk, DSI0PCKCR, 0),
+	[DIV6_DSI1P] = SH_CLK_DIV6(&pll1_div2_clk, DSI1PCKCR, 0),
+};
+
+enum { MSTP001,
+	MSTP125, MSTP116,
+	MSTP219,
+	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+	MSTP331, MSTP329, MSTP323, MSTP312,
+	MSTP411, MSTP410, MSTP403,
+	MSTP_NR };
+
+#define MSTP(_parent, _reg, _bit, _flags) \
+	SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+static struct clk mstp_clks[MSTP_NR] = {
+	[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
+	[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
+	[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
+	[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
+	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
+	[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
+	[MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
+	[MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
+	[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
+	[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
+	[MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */
+	[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
+	[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
+	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
+	[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
+	[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
+	[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
+};
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+
+static struct clk_lookup lookups[] = {
+	/* main clocks */
+	CLKDEV_CON_ID("r_clk", &r_clk),
+
+	/* MSTP32 clocks */
+	CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
+	CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
+	CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
+	CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
+	CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
+	CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
+	CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
+	CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
+	CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
+	CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
+	CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
+	CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
+	CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
+	CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
+	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
+	CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
+	CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
+	CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
+};
+
+void __init sh73a0_clock_init(void)
+{
+	int k, ret = 0;
+
+	/* detect main clock parent */
+	switch ((__raw_readl(CKSCR) >> 24) & 0x03) {
+	case 0:
+		main_clk.parent = &sh73a0_extal1_clk;
+		break;
+	case 1:
+		main_clk.parent = &extal1_div2_clk;
+		break;
+	case 2:
+		main_clk.parent = &sh73a0_extal2_clk;
+		break;
+	case 3:
+		main_clk.parent = &extal2_div2_clk;
+		break;
+	}
+
+	for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+		ret = clk_register(main_clks[k]);
+
+	if (!ret)
+		ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+	if (!ret)
+		ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+	if (!ret)
+		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+
+	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+	if (!ret)
+		clk_init();
+	else
+		panic("failed to setup sh73a0 clocks\n");
+}
diff --git a/arch/arm/mach-shmobile/entry-gic.S b/arch/arm/mach-shmobile/entry-gic.S
new file mode 100644
index 0000000..e20239b
--- /dev/null
+++ b/arch/arm/mach-shmobile/entry-gic.S
@@ -0,0 +1,18 @@
+/*
+ * ARM Interrupt demux handler using GIC
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2011 Paul Mundt
+ * Copyright (C) 2010 - 2011 Renesas Solutions Corp.
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/assembler.h>
+#include <asm/entry-macro-multi.S>
+#include <asm/hardware/gic.h>
+#include <asm/hardware/entry-macro-gic.S>
+
+	arch_irq_handler shmobile_handle_irq_gic
diff --git a/arch/arm/mach-shmobile/entry-intc.S b/arch/arm/mach-shmobile/entry-intc.S
new file mode 100644
index 0000000..cac0a7a
--- /dev/null
+++ b/arch/arm/mach-shmobile/entry-intc.S
@@ -0,0 +1,57 @@
+/*
+ * ARM Interrupt demux handler using INTC
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2008 Renesas Solutions Corp.
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/entry-macro-multi.S>
+
+#define INTCA_BASE	0xe6980000
+#define INTFLGA_OFFS	0x00000018 /* accept pending interrupt */
+#define INTEVTA_OFFS	0x00000020 /* vector number of accepted interrupt */
+#define INTLVLA_OFFS	0x00000030 /* priority level of accepted interrupt */
+#define INTLVLB_OFFS	0x00000034 /* previous priority level */
+
+	.macro  get_irqnr_preamble, base, tmp
+	ldr     \base, =INTCA_BASE
+	.endm
+
+	.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
+	/* The single INTFLGA read access below results in the following:
+	 *
+	 * 1. INTLVLB is updated with old priority value from INTLVLA
+	 * 2. Highest priority interrupt is accepted
+	 * 3. INTLVLA is updated to contain priority of accepted interrupt
+	 * 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA
+	 */
+	ldr     \irqnr, [\base, #INTFLGA_OFFS]
+
+	/* Restore INTLVLA with the value saved in INTLVLB.
+	 * This is required to support interrupt priorities properly.
+	 */
+	ldrb	\tmp, [\base, #INTLVLB_OFFS]
+	strb    \tmp, [\base, #INTLVLA_OFFS]
+
+	/* Handle invalid vector number case */
+	cmp	\irqnr, #0
+	beq	1000f
+
+	/* Convert vector to irq number, same as the evt2irq() macro */
+	lsr	\irqnr, \irqnr, #0x5
+	subs	\irqnr, \irqnr, #16
+
+1000:
+	.endm
+
+	.macro  test_for_ipi, irqnr, irqstat, base, tmp
+	.endm
+
+	.macro  test_for_ltirq, irqnr, irqstat, base, tmp
+	.endm
+
+	arch_irq_handler shmobile_handle_irq_intc
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
new file mode 100644
index 0000000..d4cec6b
--- /dev/null
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -0,0 +1,27 @@
+/*
+ * SMP support for R-Mobile / SH-Mobile
+ *
+ * Copyright (C) 2010  Magnus Damm
+ * Copyright (C) 2010  Takashi Yoshii
+ *
+ * Based on vexpress, Copyright (c) 2003 ARM Limited, All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/memory.h>
+
+	__INIT
+
+/*
+ * Reset vector for secondary CPUs.
+ * This will be mapped at address 0 by SBAR register.
+ * We need _long_ jump to the physical address.
+ */
+	.align  12
+ENTRY(shmobile_secondary_vector)
+	ldr     pc, 1f
+1:	.long   secondary_startup - PAGE_OFFSET + PHYS_OFFSET
diff --git a/arch/arm/mach-shmobile/hotplug.c b/arch/arm/mach-shmobile/hotplug.c
new file mode 100644
index 0000000..238a0d9
--- /dev/null
+++ b/arch/arm/mach-shmobile/hotplug.c
@@ -0,0 +1,41 @@
+/*
+ * SMP support for R-Mobile / SH-Mobile
+ *
+ * Copyright (C) 2010  Magnus Damm
+ *
+ * Based on realview, Copyright (C) 2002 ARM Ltd, All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+int platform_cpu_kill(unsigned int cpu)
+{
+	return 1;
+}
+
+void platform_cpu_die(unsigned int cpu)
+{
+	while (1) {
+		/*
+		 * here's the WFI
+		 */
+		asm(".word	0xe320f003\n"
+		    :
+		    :
+		    : "memory", "cc");
+	}
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+	/*
+	 * we don't allow CPU 0 to be shutdown (it is still too special
+	 * e.g. clock tick interrupts)
+	 */
+	return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index efeef77..013ac0e 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -3,8 +3,11 @@
 
 extern struct sys_timer shmobile_timer;
 extern void shmobile_setup_console(void);
+extern void shmobile_secondary_vector(void);
 struct clk;
 extern int clk_init(void);
+extern void shmobile_handle_irq_intc(struct pt_regs *);
+extern void shmobile_handle_irq_gic(struct pt_regs *);
 
 extern void sh7367_init_irq(void);
 extern void sh7367_add_early_devices(void);
@@ -30,4 +33,17 @@
 extern struct clk sh7372_extal1_clk;
 extern struct clk sh7372_extal2_clk;
 
+extern void sh73a0_init_irq(void);
+extern void sh73a0_add_early_devices(void);
+extern void sh73a0_add_standard_devices(void);
+extern void sh73a0_clock_init(void);
+extern void sh73a0_pinmux_init(void);
+extern struct clk sh73a0_extal1_clk;
+extern struct clk sh73a0_extal2_clk;
+
+extern unsigned int sh73a0_get_core_count(void);
+extern void sh73a0_secondary_init(unsigned int cpu);
+extern int sh73a0_boot_secondary(unsigned int cpu);
+extern void sh73a0_smp_prepare_cpus(void);
+
 #endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/include/mach/entry-macro.S b/arch/arm/mach-shmobile/include/mach/entry-macro.S
index f428c4d..d791f10 100644
--- a/arch/arm/mach-shmobile/include/mach/entry-macro.S
+++ b/arch/arm/mach-shmobile/include/mach/entry-macro.S
@@ -1,6 +1,5 @@
 /*
- * Copyright (C) 2010 Magnus Damm
- * Copyright (C) 2008 Renesas Solutions Corp.
+ * Copyright (C) 2010  Paul Mundt
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -15,47 +14,21 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
-#include <mach/irqs.h>
-
-#define INTCA_BASE	0xe6980000
-#define INTFLGA_OFFS	0x00000018 /* accept pending interrupt */
-#define INTEVTA_OFFS	0x00000020 /* vector number of accepted interrupt */
-#define INTLVLA_OFFS	0x00000030 /* priority level of accepted interrupt */
-#define INTLVLB_OFFS	0x00000034 /* previous priority level */
 
 	.macro  disable_fiq
 	.endm
 
 	.macro  get_irqnr_preamble, base, tmp
-	ldr     \base, =INTCA_BASE
-	.endm
-
-	.macro  arch_ret_to_user, tmp1, tmp2
 	.endm
 
 	.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
-	/* The single INTFLGA read access below results in the following:
-	 *
-	 * 1. INTLVLB is updated with old priority value from INTLVLA
-	 * 2. Highest priority interrupt is accepted
-	 * 3. INTLVLA is updated to contain priority of accepted interrupt
-	 * 4. Accepted interrupt vector is stored in INTFLGA and INTEVTA
-	 */
-	ldr     \irqnr, [\base, #INTFLGA_OFFS]
+	.endm
 
-	/* Restore INTLVLA with the value saved in INTLVLB.
-	 * This is required to support interrupt priorities properly.
-	 */
-	ldrb	\tmp, [\base, #INTLVLB_OFFS]
-	strb    \tmp, [\base, #INTLVLA_OFFS]
+	.macro  test_for_ipi, irqnr, irqstat, base, tmp
+	.endm
 
-	/* Handle invalid vector number case */
-	cmp	\irqnr, #0
-	beq	1000f
+	.macro  test_for_ltirq, irqnr, irqstat, base, tmp
+	.endm
 
-	/* Convert vector to irq number, same as the evt2irq() macro */
-	lsr	\irqnr, \irqnr, #0x5
-	subs	\irqnr, \irqnr, #16
-
-1000:
+	.macro  arch_ret_to_user, tmp1, tmp2
 	.endm
diff --git a/arch/arm/mach-shmobile/include/mach/hardware.h b/arch/arm/mach-shmobile/include/mach/hardware.h
index 3f0ef19..99264a5 100644
--- a/arch/arm/mach-shmobile/include/mach/hardware.h
+++ b/arch/arm/mach-shmobile/include/mach/hardware.h
@@ -1,7 +1,4 @@
 #ifndef __ASM_MACH_HARDWARE_H
 #define __ASM_MACH_HARDWARE_H
 
-/* INTFLGA register - used by low level interrupt code in entry-macro.S */
-#define INTFLGA			0xe6980018
-
 #endif /* __ASM_MACH_HARDWARE_H */
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index e3ebfa7..efd3687 100644
--- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -85,3 +85,10 @@
 WAIT 1, 0xFE40009C
 
 ED 0xE6150354, 0x00000002
+
+LIST "SCIF0 - Serial port for earlyprintk"
+EB 0xE6053098, 0x11
+EB 0xE6053098, 0xe1
+EW 0xE6C40000, 0x0000
+EB 0xE6C40004, 0x19
+EW 0xE6C40008, 0x3000
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
new file mode 100644
index 0000000..efd3687
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -0,0 +1,94 @@
+LIST "partner-jet-setup.txt"
+LIST "(C) Copyright 2010 Renesas Solutions Corp"
+LIST "Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"
+
+LIST "RWT Setting"
+EW 0xE6020004, 0xA500
+EW 0xE6030004, 0xA500
+
+DD 0x01001000, 0x01001000
+
+LIST "GPIO Setting"
+EB 0xE6051013, 0xA2
+
+LIST "CPG"
+ED 0xE6150080, 0x00000180
+ED 0xE61500C0, 0x00000002
+
+WAIT 1, 0xFE40009C
+
+LIST "FRQCR"
+ED 0xE6150000, 0x2D1305C3
+ED 0xE61500E0, 0x9E40358E
+ED 0xE6150004, 0x80331050
+
+WAIT 1, 0xFE40009C
+
+ED 0xE61500E4, 0x00002000
+
+WAIT 1, 0xFE40009C
+
+LIST "PLL"
+ED 0xE6150028, 0x00004000
+
+WAIT 1, 0xFE40009C
+
+ED 0xE615002C, 0x93000040
+
+WAIT 1, 0xFE40009C
+
+LIST "BSC"
+ED 0xFEC10000, 0x00E0001B
+
+LIST "SBSC1"
+ED 0xFE400354, 0x01AD8000
+ED 0xFE400354, 0x01AD8001
+
+WAIT 5, 0xFE40009C
+
+ED 0xFE400008, 0xBCC90151
+ED 0xFE400040, 0x41774113
+ED 0xFE400044, 0x2712E229
+ED 0xFE400048, 0x20C18505
+ED 0xFE40004C, 0x00110209
+ED 0xFE400010, 0x00000087
+
+WAIT 10, 0xFE40009C
+
+ED 0xFE400084, 0x0000003F
+EB 0xFE500000, 0x00
+
+WAIT 5, 0xFE40009C
+
+ED 0xFE400084, 0x0000FF0A
+EB 0xFE500000, 0x00
+
+WAIT 1, 0xFE40009C
+
+ED 0xFE400084, 0x00002201
+EB 0xFE500000, 0x00
+ED 0xFE400084, 0x00000302
+EB 0xFE500000, 0x00
+EB 0xFE5C0000, 0x00
+ED 0xFE400008, 0xBCC90159
+ED 0xFE40008C, 0x88800004
+ED 0xFE400094, 0x00000004
+ED 0xFE400028, 0xA55A0032
+ED 0xFE40002C, 0xA55A000C
+ED 0xFE400020, 0xA55A2048
+ED 0xFE400008, 0xBCC90959
+
+LIST "Change CPGA setting"
+ED 0xE61500E0, 0x9E40352E
+ED 0xE6150004, 0x80331050
+
+WAIT 1, 0xFE40009C
+
+ED 0xE6150354, 0x00000002
+
+LIST "SCIF0 - Serial port for earlyprintk"
+EB 0xE6053098, 0x11
+EB 0xE6053098, 0xe1
+EW 0xE6C40000, 0x0000
+EB 0xE6C40004, 0x19
+EW 0xE6C40008, 0x3000
diff --git a/arch/arm/mach-shmobile/include/mach/irqs.h b/arch/arm/mach-shmobile/include/mach/irqs.h
index fa15b5f..dcb714f 100644
--- a/arch/arm/mach-shmobile/include/mach/irqs.h
+++ b/arch/arm/mach-shmobile/include/mach/irqs.h
@@ -1,7 +1,10 @@
 #ifndef __ASM_MACH_IRQS_H
 #define __ASM_MACH_IRQS_H
 
-#define NR_IRQS         512
+#define NR_IRQS         1024
+
+/* GIC */
+#define gic_spi(nr)		((nr) + 32)
 
 /* INTCA */
 #define evt2irq(evt)		(((evt) >> 5) - 16)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index e4f9004..5736efc 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -455,6 +455,8 @@
 	SHDMA_SLAVE_SDHI1_TX,
 	SHDMA_SLAVE_SDHI2_RX,
 	SHDMA_SLAVE_SDHI2_TX,
+	SHDMA_SLAVE_MMCIF_RX,
+	SHDMA_SLAVE_MMCIF_TX,
 };
 
 extern struct clk sh7372_extal1_clk;
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
new file mode 100644
index 0000000..ceb2cdc
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -0,0 +1,467 @@
+#ifndef __ASM_SH73A0_H__
+#define __ASM_SH73A0_H__
+
+/* Pin Function Controller:
+ * GPIO_FN_xx - GPIO used to select pin function and MSEL switch
+ * GPIO_PORTxx - GPIO mapped to real I/O pin on CPU
+ */
+enum {
+	/* Hardware manual Table 25-1 (GPIO) */
+	GPIO_PORT0, GPIO_PORT1, GPIO_PORT2, GPIO_PORT3, GPIO_PORT4,
+	GPIO_PORT5, GPIO_PORT6, GPIO_PORT7, GPIO_PORT8, GPIO_PORT9,
+
+	GPIO_PORT10, GPIO_PORT11, GPIO_PORT12, GPIO_PORT13, GPIO_PORT14,
+	GPIO_PORT15, GPIO_PORT16, GPIO_PORT17, GPIO_PORT18, GPIO_PORT19,
+
+	GPIO_PORT20, GPIO_PORT21, GPIO_PORT22, GPIO_PORT23, GPIO_PORT24,
+	GPIO_PORT25, GPIO_PORT26, GPIO_PORT27, GPIO_PORT28, GPIO_PORT29,
+
+	GPIO_PORT30, GPIO_PORT31, GPIO_PORT32, GPIO_PORT33, GPIO_PORT34,
+	GPIO_PORT35, GPIO_PORT36, GPIO_PORT37, GPIO_PORT38, GPIO_PORT39,
+
+	GPIO_PORT40, GPIO_PORT41, GPIO_PORT42, GPIO_PORT43, GPIO_PORT44,
+	GPIO_PORT45, GPIO_PORT46, GPIO_PORT47, GPIO_PORT48, GPIO_PORT49,
+
+	GPIO_PORT50, GPIO_PORT51, GPIO_PORT52, GPIO_PORT53, GPIO_PORT54,
+	GPIO_PORT55, GPIO_PORT56, GPIO_PORT57, GPIO_PORT58, GPIO_PORT59,
+
+	GPIO_PORT60, GPIO_PORT61, GPIO_PORT62, GPIO_PORT63, GPIO_PORT64,
+	GPIO_PORT65, GPIO_PORT66, GPIO_PORT67, GPIO_PORT68, GPIO_PORT69,
+
+	GPIO_PORT70, GPIO_PORT71, GPIO_PORT72, GPIO_PORT73, GPIO_PORT74,
+	GPIO_PORT75, GPIO_PORT76, GPIO_PORT77, GPIO_PORT78, GPIO_PORT79,
+
+	GPIO_PORT80, GPIO_PORT81, GPIO_PORT82, GPIO_PORT83, GPIO_PORT84,
+	GPIO_PORT85, GPIO_PORT86, GPIO_PORT87, GPIO_PORT88, GPIO_PORT89,
+
+	GPIO_PORT90, GPIO_PORT91, GPIO_PORT92, GPIO_PORT93, GPIO_PORT94,
+	GPIO_PORT95, GPIO_PORT96, GPIO_PORT97, GPIO_PORT98, GPIO_PORT99,
+
+	GPIO_PORT100, GPIO_PORT101, GPIO_PORT102, GPIO_PORT103, GPIO_PORT104,
+	GPIO_PORT105, GPIO_PORT106, GPIO_PORT107, GPIO_PORT108, GPIO_PORT109,
+
+	GPIO_PORT110, GPIO_PORT111, GPIO_PORT112, GPIO_PORT113, GPIO_PORT114,
+	GPIO_PORT115, GPIO_PORT116, GPIO_PORT117, GPIO_PORT118,
+
+	GPIO_PORT128, GPIO_PORT129,
+
+	GPIO_PORT130, GPIO_PORT131, GPIO_PORT132, GPIO_PORT133, GPIO_PORT134,
+	GPIO_PORT135, GPIO_PORT136, GPIO_PORT137, GPIO_PORT138, GPIO_PORT139,
+
+	GPIO_PORT140, GPIO_PORT141, GPIO_PORT142, GPIO_PORT143, GPIO_PORT144,
+	GPIO_PORT145, GPIO_PORT146, GPIO_PORT147, GPIO_PORT148, GPIO_PORT149,
+
+	GPIO_PORT150, GPIO_PORT151, GPIO_PORT152, GPIO_PORT153, GPIO_PORT154,
+	GPIO_PORT155, GPIO_PORT156, GPIO_PORT157, GPIO_PORT158, GPIO_PORT159,
+
+	GPIO_PORT160, GPIO_PORT161, GPIO_PORT162, GPIO_PORT163, GPIO_PORT164,
+
+	GPIO_PORT192, GPIO_PORT193, GPIO_PORT194,
+	GPIO_PORT195, GPIO_PORT196, GPIO_PORT197, GPIO_PORT198, GPIO_PORT199,
+
+	GPIO_PORT200, GPIO_PORT201, GPIO_PORT202, GPIO_PORT203, GPIO_PORT204,
+	GPIO_PORT205, GPIO_PORT206, GPIO_PORT207, GPIO_PORT208, GPIO_PORT209,
+
+	GPIO_PORT210, GPIO_PORT211, GPIO_PORT212, GPIO_PORT213, GPIO_PORT214,
+	GPIO_PORT215, GPIO_PORT216, GPIO_PORT217, GPIO_PORT218, GPIO_PORT219,
+
+	GPIO_PORT220, GPIO_PORT221, GPIO_PORT222, GPIO_PORT223, GPIO_PORT224,
+	GPIO_PORT225, GPIO_PORT226, GPIO_PORT227, GPIO_PORT228, GPIO_PORT229,
+
+	GPIO_PORT230, GPIO_PORT231, GPIO_PORT232, GPIO_PORT233, GPIO_PORT234,
+	GPIO_PORT235, GPIO_PORT236, GPIO_PORT237, GPIO_PORT238, GPIO_PORT239,
+
+	GPIO_PORT240, GPIO_PORT241, GPIO_PORT242, GPIO_PORT243, GPIO_PORT244,
+	GPIO_PORT245, GPIO_PORT246, GPIO_PORT247, GPIO_PORT248, GPIO_PORT249,
+
+	GPIO_PORT250, GPIO_PORT251, GPIO_PORT252, GPIO_PORT253, GPIO_PORT254,
+	GPIO_PORT255, GPIO_PORT256, GPIO_PORT257, GPIO_PORT258, GPIO_PORT259,
+
+	GPIO_PORT260, GPIO_PORT261, GPIO_PORT262, GPIO_PORT263, GPIO_PORT264,
+	GPIO_PORT265, GPIO_PORT266, GPIO_PORT267, GPIO_PORT268, GPIO_PORT269,
+
+	GPIO_PORT270, GPIO_PORT271, GPIO_PORT272, GPIO_PORT273, GPIO_PORT274,
+	GPIO_PORT275, GPIO_PORT276, GPIO_PORT277, GPIO_PORT278, GPIO_PORT279,
+
+	GPIO_PORT280, GPIO_PORT281, GPIO_PORT282,
+
+	GPIO_PORT288, GPIO_PORT289,
+
+	GPIO_PORT290, GPIO_PORT291, GPIO_PORT292, GPIO_PORT293, GPIO_PORT294,
+	GPIO_PORT295, GPIO_PORT296, GPIO_PORT297, GPIO_PORT298, GPIO_PORT299,
+
+	GPIO_PORT300, GPIO_PORT301, GPIO_PORT302, GPIO_PORT303, GPIO_PORT304,
+	GPIO_PORT305, GPIO_PORT306, GPIO_PORT307, GPIO_PORT308, GPIO_PORT309,
+
+	/* Table 25-1 (Function 0-7) */
+	GPIO_FN_VBUS_0,
+	GPIO_FN_GPI0,
+	GPIO_FN_GPI1,
+	GPIO_FN_GPI2,
+	GPIO_FN_GPI3,
+	GPIO_FN_GPI4,
+	GPIO_FN_GPI5,
+	GPIO_FN_GPI6,
+	GPIO_FN_GPI7,
+	GPIO_FN_SCIFA7_RXD,
+	GPIO_FN_SCIFA7_CTS_,
+	GPIO_FN_GPO7, GPIO_FN_MFG0_OUT2,
+	GPIO_FN_GPO6, GPIO_FN_MFG1_OUT2,
+	GPIO_FN_GPO5, GPIO_FN_SCIFA0_SCK, GPIO_FN_FSICOSLDT3, \
+	GPIO_FN_PORT16_VIO_CKOR,
+	GPIO_FN_SCIFA0_TXD,
+	GPIO_FN_SCIFA7_TXD,
+	GPIO_FN_SCIFA7_RTS_, GPIO_FN_PORT19_VIO_CKO2,
+	GPIO_FN_GPO0,
+	GPIO_FN_GPO1,
+	GPIO_FN_GPO2, GPIO_FN_STATUS0,
+	GPIO_FN_GPO3, GPIO_FN_STATUS1,
+	GPIO_FN_GPO4, GPIO_FN_STATUS2,
+	GPIO_FN_VINT,
+	GPIO_FN_TCKON,
+	GPIO_FN_XDVFS1, GPIO_FN_PORT27_I2C_SCL2, GPIO_FN_PORT27_I2C_SCL3, \
+	GPIO_FN_MFG0_OUT1, GPIO_FN_PORT27_IROUT,
+	GPIO_FN_XDVFS2, GPIO_FN_PORT28_I2C_SDA2, GPIO_FN_PORT28_I2C_SDA3, \
+	GPIO_FN_PORT28_TPU1TO1,
+	GPIO_FN_SIM_RST, GPIO_FN_PORT29_TPU1TO1,
+	GPIO_FN_SIM_CLK, GPIO_FN_PORT30_VIO_CKOR,
+	GPIO_FN_SIM_D, GPIO_FN_PORT31_IROUT,
+	GPIO_FN_SCIFA4_TXD,
+	GPIO_FN_SCIFA4_RXD, GPIO_FN_XWUP,
+	GPIO_FN_SCIFA4_RTS_,
+	GPIO_FN_SCIFA4_CTS_,
+	GPIO_FN_FSIBOBT, GPIO_FN_FSIBIBT,
+	GPIO_FN_FSIBOLR, GPIO_FN_FSIBILR,
+	GPIO_FN_FSIBOSLD,
+	GPIO_FN_FSIBISLD,
+	GPIO_FN_VACK,
+	GPIO_FN_XTAL1L,
+	GPIO_FN_SCIFA0_RTS_, GPIO_FN_FSICOSLDT2,
+	GPIO_FN_SCIFA0_RXD,
+	GPIO_FN_SCIFA0_CTS_, GPIO_FN_FSICOSLDT1,
+	GPIO_FN_FSICOBT, GPIO_FN_FSICIBT, GPIO_FN_FSIDOBT, GPIO_FN_FSIDIBT,
+	GPIO_FN_FSICOLR, GPIO_FN_FSICILR, GPIO_FN_FSIDOLR, GPIO_FN_FSIDILR,
+	GPIO_FN_FSICOSLD, GPIO_FN_PORT47_FSICSPDIF,
+	GPIO_FN_FSICISLD, GPIO_FN_FSIDISLD,
+	GPIO_FN_FSIACK, GPIO_FN_PORT49_IRDA_OUT, GPIO_FN_PORT49_IROUT, \
+	GPIO_FN_FSIAOMC,
+	GPIO_FN_FSIAOLR, GPIO_FN_BBIF2_TSYNC2, GPIO_FN_TPU2TO2, GPIO_FN_FSIAILR,
+
+	GPIO_FN_FSIAOBT, GPIO_FN_BBIF2_TSCK2, GPIO_FN_TPU2TO3, GPIO_FN_FSIAIBT,
+	GPIO_FN_FSIAOSLD, GPIO_FN_BBIF2_TXD2,
+	GPIO_FN_FSIASPDIF, GPIO_FN_PORT53_IRDA_IN, GPIO_FN_TPU3TO3, \
+	GPIO_FN_FSIBSPDIF, GPIO_FN_PORT53_FSICSPDIF,
+	GPIO_FN_FSIBCK, GPIO_FN_PORT54_IRDA_FIRSEL, GPIO_FN_TPU3TO2, \
+	GPIO_FN_FSIBOMC, GPIO_FN_FSICCK, GPIO_FN_FSICOMC,
+	GPIO_FN_FSIAISLD, GPIO_FN_TPU0TO0,
+	GPIO_FN_A0, GPIO_FN_BS_,
+	GPIO_FN_A12, GPIO_FN_PORT58_KEYOUT7, GPIO_FN_TPU4TO2,
+	GPIO_FN_A13, GPIO_FN_PORT59_KEYOUT6, GPIO_FN_TPU0TO1,
+	GPIO_FN_A14, GPIO_FN_KEYOUT5,
+	GPIO_FN_A15, GPIO_FN_KEYOUT4,
+	GPIO_FN_A16, GPIO_FN_KEYOUT3, GPIO_FN_MSIOF0_SS1,
+	GPIO_FN_A17, GPIO_FN_KEYOUT2, GPIO_FN_MSIOF0_TSYNC,
+	GPIO_FN_A18, GPIO_FN_KEYOUT1, GPIO_FN_MSIOF0_TSCK,
+	GPIO_FN_A19, GPIO_FN_KEYOUT0, GPIO_FN_MSIOF0_TXD,
+	GPIO_FN_A20, GPIO_FN_KEYIN0, GPIO_FN_MSIOF0_RSCK,
+	GPIO_FN_A21, GPIO_FN_KEYIN1, GPIO_FN_MSIOF0_RSYNC,
+	GPIO_FN_A22, GPIO_FN_KEYIN2, GPIO_FN_MSIOF0_MCK0,
+	GPIO_FN_A23, GPIO_FN_KEYIN3, GPIO_FN_MSIOF0_MCK1,
+	GPIO_FN_A24, GPIO_FN_KEYIN4, GPIO_FN_MSIOF0_RXD,
+	GPIO_FN_A25, GPIO_FN_KEYIN5, GPIO_FN_MSIOF0_SS2,
+	GPIO_FN_A26, GPIO_FN_KEYIN6,
+	GPIO_FN_KEYIN7,
+	GPIO_FN_D0_NAF0,
+	GPIO_FN_D1_NAF1,
+	GPIO_FN_D2_NAF2,
+	GPIO_FN_D3_NAF3,
+	GPIO_FN_D4_NAF4,
+	GPIO_FN_D5_NAF5,
+	GPIO_FN_D6_NAF6,
+	GPIO_FN_D7_NAF7,
+	GPIO_FN_D8_NAF8,
+	GPIO_FN_D9_NAF9,
+	GPIO_FN_D10_NAF10,
+	GPIO_FN_D11_NAF11,
+	GPIO_FN_D12_NAF12,
+	GPIO_FN_D13_NAF13,
+	GPIO_FN_D14_NAF14,
+	GPIO_FN_D15_NAF15,
+	GPIO_FN_CS4_,
+	GPIO_FN_CS5A_, GPIO_FN_PORT91_RDWR,
+	GPIO_FN_CS5B_, GPIO_FN_FCE1_,
+	GPIO_FN_CS6B_, GPIO_FN_DACK0,
+	GPIO_FN_FCE0_, GPIO_FN_CS6A_,
+	GPIO_FN_WAIT_, GPIO_FN_DREQ0,
+	GPIO_FN_RD__FSC,
+	GPIO_FN_WE0__FWE, GPIO_FN_RDWR_FWE,
+	GPIO_FN_WE1_,
+	GPIO_FN_FRB,
+	GPIO_FN_CKO,
+	GPIO_FN_NBRSTOUT_,
+	GPIO_FN_NBRST_,
+	GPIO_FN_BBIF2_TXD,
+	GPIO_FN_BBIF2_RXD,
+	GPIO_FN_BBIF2_SYNC,
+	GPIO_FN_BBIF2_SCK,
+	GPIO_FN_SCIFA3_CTS_, GPIO_FN_MFG3_IN2,
+	GPIO_FN_SCIFA3_RXD, GPIO_FN_MFG3_IN1,
+	GPIO_FN_BBIF1_SS2, GPIO_FN_SCIFA3_RTS_, GPIO_FN_MFG3_OUT1,
+	GPIO_FN_SCIFA3_TXD,
+	GPIO_FN_HSI_RX_DATA, GPIO_FN_BBIF1_RXD,
+	GPIO_FN_HSI_TX_WAKE, GPIO_FN_BBIF1_TSCK,
+	GPIO_FN_HSI_TX_DATA, GPIO_FN_BBIF1_TSYNC,
+	GPIO_FN_HSI_TX_READY, GPIO_FN_BBIF1_TXD,
+	GPIO_FN_HSI_RX_READY, GPIO_FN_BBIF1_RSCK, GPIO_FN_PORT115_I2C_SCL2, \
+	GPIO_FN_PORT115_I2C_SCL3,
+	GPIO_FN_HSI_RX_WAKE, GPIO_FN_BBIF1_RSYNC, GPIO_FN_PORT116_I2C_SDA2, \
+	GPIO_FN_PORT116_I2C_SDA3,
+	GPIO_FN_HSI_RX_FLAG, GPIO_FN_BBIF1_SS1, GPIO_FN_BBIF1_FLOW,
+	GPIO_FN_HSI_TX_FLAG,
+	GPIO_FN_VIO_VD, GPIO_FN_PORT128_LCD2VSYN, GPIO_FN_VIO2_VD, \
+	GPIO_FN_LCD2D0,
+
+	GPIO_FN_VIO_HD, GPIO_FN_PORT129_LCD2HSYN, GPIO_FN_PORT129_LCD2CS_, \
+	GPIO_FN_VIO2_HD, GPIO_FN_LCD2D1,
+	GPIO_FN_VIO_D0, GPIO_FN_PORT130_MSIOF2_RXD, GPIO_FN_LCD2D10,
+	GPIO_FN_VIO_D1, GPIO_FN_PORT131_KEYOUT6, GPIO_FN_PORT131_MSIOF2_SS1, \
+	GPIO_FN_PORT131_KEYOUT11, GPIO_FN_LCD2D11,
+	GPIO_FN_VIO_D2, GPIO_FN_PORT132_KEYOUT7, GPIO_FN_PORT132_MSIOF2_SS2, \
+	GPIO_FN_PORT132_KEYOUT10, GPIO_FN_LCD2D12,
+	GPIO_FN_VIO_D3, GPIO_FN_MSIOF2_TSYNC, GPIO_FN_LCD2D13,
+	GPIO_FN_VIO_D4, GPIO_FN_MSIOF2_TXD, GPIO_FN_LCD2D14,
+	GPIO_FN_VIO_D5, GPIO_FN_MSIOF2_TSCK, GPIO_FN_LCD2D15,
+	GPIO_FN_VIO_D6, GPIO_FN_PORT136_KEYOUT8, GPIO_FN_LCD2D16,
+	GPIO_FN_VIO_D7, GPIO_FN_PORT137_KEYOUT9, GPIO_FN_LCD2D17,
+	GPIO_FN_VIO_D8, GPIO_FN_PORT138_KEYOUT8, GPIO_FN_VIO2_D0, \
+	GPIO_FN_LCD2D6,
+	GPIO_FN_VIO_D9, GPIO_FN_PORT139_KEYOUT9, GPIO_FN_VIO2_D1, \
+	GPIO_FN_LCD2D7,
+	GPIO_FN_VIO_D10, GPIO_FN_TPU0TO2, GPIO_FN_VIO2_D2, GPIO_FN_LCD2D8,
+	GPIO_FN_VIO_D11, GPIO_FN_TPU0TO3, GPIO_FN_VIO2_D3, GPIO_FN_LCD2D9,
+	GPIO_FN_VIO_D12, GPIO_FN_PORT142_KEYOUT10, GPIO_FN_VIO2_D4, \
+	GPIO_FN_LCD2D2,
+	GPIO_FN_VIO_D13, GPIO_FN_PORT143_KEYOUT11, GPIO_FN_PORT143_KEYOUT6, \
+	GPIO_FN_VIO2_D5, GPIO_FN_LCD2D3,
+	GPIO_FN_VIO_D14, GPIO_FN_PORT144_KEYOUT7, GPIO_FN_VIO2_D6, \
+	GPIO_FN_LCD2D4,
+	GPIO_FN_VIO_D15, GPIO_FN_TPU1TO3, GPIO_FN_PORT145_LCD2DISP, \
+	GPIO_FN_PORT145_LCD2RS, GPIO_FN_VIO2_D7, GPIO_FN_LCD2D5,
+	GPIO_FN_VIO_CLK, GPIO_FN_LCD2DCK, GPIO_FN_PORT146_LCD2WR_, \
+	GPIO_FN_VIO2_CLK, GPIO_FN_LCD2D18,
+	GPIO_FN_VIO_FIELD, GPIO_FN_LCD2RD_, GPIO_FN_VIO2_FIELD, GPIO_FN_LCD2D19,
+	GPIO_FN_VIO_CKO,
+	GPIO_FN_A27, GPIO_FN_PORT149_RDWR, GPIO_FN_MFG0_IN1, \
+	GPIO_FN_PORT149_KEYOUT9,
+	GPIO_FN_MFG0_IN2,
+	GPIO_FN_TS_SPSYNC3, GPIO_FN_MSIOF2_RSCK,
+	GPIO_FN_TS_SDAT3, GPIO_FN_MSIOF2_RSYNC,
+	GPIO_FN_TPU1TO2, GPIO_FN_TS_SDEN3, GPIO_FN_PORT153_MSIOF2_SS1,
+	GPIO_FN_SCIFA2_TXD1, GPIO_FN_MSIOF2_MCK0,
+	GPIO_FN_SCIFA2_RXD1, GPIO_FN_MSIOF2_MCK1,
+	GPIO_FN_SCIFA2_RTS1_, GPIO_FN_PORT156_MSIOF2_SS2,
+	GPIO_FN_SCIFA2_CTS1_, GPIO_FN_PORT157_MSIOF2_RXD,
+	GPIO_FN_DINT_, GPIO_FN_SCIFA2_SCK1, GPIO_FN_TS_SCK3,
+	GPIO_FN_PORT159_SCIFB_SCK, GPIO_FN_PORT159_SCIFA5_SCK, GPIO_FN_NMI,
+	GPIO_FN_PORT160_SCIFB_TXD, GPIO_FN_PORT160_SCIFA5_TXD,
+	GPIO_FN_PORT161_SCIFB_CTS_, GPIO_FN_PORT161_SCIFA5_CTS_,
+	GPIO_FN_PORT162_SCIFB_RXD, GPIO_FN_PORT162_SCIFA5_RXD,
+	GPIO_FN_PORT163_SCIFB_RTS_, GPIO_FN_PORT163_SCIFA5_RTS_, \
+	GPIO_FN_TPU3TO0,
+	GPIO_FN_LCDD0,
+	GPIO_FN_LCDD1, GPIO_FN_PORT193_SCIFA5_CTS_, GPIO_FN_BBIF2_TSYNC1,
+	GPIO_FN_LCDD2, GPIO_FN_PORT194_SCIFA5_RTS_, GPIO_FN_BBIF2_TSCK1,
+	GPIO_FN_LCDD3, GPIO_FN_PORT195_SCIFA5_RXD, GPIO_FN_BBIF2_TXD1,
+	GPIO_FN_LCDD4, GPIO_FN_PORT196_SCIFA5_TXD,
+	GPIO_FN_LCDD5, GPIO_FN_PORT197_SCIFA5_SCK, GPIO_FN_MFG2_OUT2, \
+	GPIO_FN_TPU2TO1,
+	GPIO_FN_LCDD6,
+	GPIO_FN_LCDD7, GPIO_FN_TPU4TO1, GPIO_FN_MFG4_OUT2,
+	GPIO_FN_LCDD8, GPIO_FN_D16,
+	GPIO_FN_LCDD9, GPIO_FN_D17,
+	GPIO_FN_LCDD10, GPIO_FN_D18,
+	GPIO_FN_LCDD11, GPIO_FN_D19,
+	GPIO_FN_LCDD12, GPIO_FN_D20,
+	GPIO_FN_LCDD13, GPIO_FN_D21,
+	GPIO_FN_LCDD14, GPIO_FN_D22,
+	GPIO_FN_LCDD15, GPIO_FN_PORT207_MSIOF0L_SS1, GPIO_FN_D23,
+	GPIO_FN_LCDD16, GPIO_FN_PORT208_MSIOF0L_SS2, GPIO_FN_D24,
+	GPIO_FN_LCDD17, GPIO_FN_D25,
+	GPIO_FN_LCDD18, GPIO_FN_DREQ2, GPIO_FN_PORT210_MSIOF0L_SS1, GPIO_FN_D26,
+	GPIO_FN_LCDD19, GPIO_FN_PORT211_MSIOF0L_SS2, GPIO_FN_D27,
+	GPIO_FN_LCDD20, GPIO_FN_TS_SPSYNC1, GPIO_FN_MSIOF0L_MCK0, GPIO_FN_D28,
+	GPIO_FN_LCDD21, GPIO_FN_TS_SDAT1, GPIO_FN_MSIOF0L_MCK1, GPIO_FN_D29,
+	GPIO_FN_LCDD22, GPIO_FN_TS_SDEN1, GPIO_FN_MSIOF0L_RSCK, GPIO_FN_D30,
+	GPIO_FN_LCDD23, GPIO_FN_TS_SCK1, GPIO_FN_MSIOF0L_RSYNC, GPIO_FN_D31,
+	GPIO_FN_LCDDCK, GPIO_FN_LCDWR_,
+	GPIO_FN_LCDRD_, GPIO_FN_DACK2, GPIO_FN_PORT217_LCD2RS, \
+	GPIO_FN_MSIOF0L_TSYNC, GPIO_FN_VIO2_FIELD3, GPIO_FN_PORT217_LCD2DISP,
+	GPIO_FN_LCDHSYN, GPIO_FN_LCDCS_, GPIO_FN_LCDCS2_, GPIO_FN_DACK3, \
+	GPIO_FN_PORT218_VIO_CKOR,
+	GPIO_FN_LCDDISP, GPIO_FN_LCDRS, GPIO_FN_PORT219_LCD2WR_, \
+	GPIO_FN_DREQ3, GPIO_FN_MSIOF0L_TSCK, GPIO_FN_VIO2_CLK3, \
+	GPIO_FN_LCD2DCK_2,
+	GPIO_FN_LCDVSYN, GPIO_FN_LCDVSYN2,
+	GPIO_FN_LCDLCLK, GPIO_FN_DREQ1, GPIO_FN_PORT221_LCD2CS_, \
+	GPIO_FN_PWEN, GPIO_FN_MSIOF0L_RXD, GPIO_FN_VIO2_HD3, \
+	GPIO_FN_PORT221_LCD2HSYN,
+	GPIO_FN_LCDDON, GPIO_FN_LCDDON2, GPIO_FN_DACK1, GPIO_FN_OVCN, \
+	GPIO_FN_MSIOF0L_TXD, GPIO_FN_VIO2_VD3, GPIO_FN_PORT222_LCD2VSYN,
+
+	GPIO_FN_SCIFA1_TXD, GPIO_FN_OVCN2,
+	GPIO_FN_EXTLP, GPIO_FN_SCIFA1_SCK, GPIO_FN_PORT226_VIO_CKO2,
+	GPIO_FN_SCIFA1_RTS_, GPIO_FN_IDIN,
+	GPIO_FN_SCIFA1_RXD,
+	GPIO_FN_SCIFA1_CTS_, GPIO_FN_MFG1_IN1,
+	GPIO_FN_MSIOF1_TXD, GPIO_FN_SCIFA2_TXD2,
+	GPIO_FN_MSIOF1_TSYNC, GPIO_FN_SCIFA2_CTS2_,
+	GPIO_FN_MSIOF1_TSCK, GPIO_FN_SCIFA2_SCK2,
+	GPIO_FN_MSIOF1_RXD, GPIO_FN_SCIFA2_RXD2,
+	GPIO_FN_MSIOF1_RSCK, GPIO_FN_SCIFA2_RTS2_, GPIO_FN_VIO2_CLK2, \
+	GPIO_FN_LCD2D20,
+	GPIO_FN_MSIOF1_RSYNC, GPIO_FN_MFG1_IN2, GPIO_FN_VIO2_VD2, \
+	GPIO_FN_LCD2D21,
+	GPIO_FN_MSIOF1_MCK0, GPIO_FN_PORT236_I2C_SDA2,
+	GPIO_FN_MSIOF1_MCK1, GPIO_FN_PORT237_I2C_SCL2,
+	GPIO_FN_MSIOF1_SS1, GPIO_FN_VIO2_FIELD2, GPIO_FN_LCD2D22,
+	GPIO_FN_MSIOF1_SS2, GPIO_FN_VIO2_HD2, GPIO_FN_LCD2D23,
+	GPIO_FN_SCIFA6_TXD,
+	GPIO_FN_PORT241_IRDA_OUT, GPIO_FN_PORT241_IROUT, GPIO_FN_MFG4_OUT1, \
+	GPIO_FN_TPU4TO0,
+	GPIO_FN_PORT242_IRDA_IN, GPIO_FN_MFG4_IN2,
+	GPIO_FN_PORT243_IRDA_FIRSEL, GPIO_FN_PORT243_VIO_CKO2,
+	GPIO_FN_PORT244_SCIFA5_CTS_, GPIO_FN_MFG2_IN1, \
+	GPIO_FN_PORT244_SCIFB_CTS_, GPIO_FN_MSIOF2R_RXD,
+	GPIO_FN_PORT245_SCIFA5_RTS_, GPIO_FN_MFG2_IN2, \
+	GPIO_FN_PORT245_SCIFB_RTS_, GPIO_FN_MSIOF2R_TXD,
+	GPIO_FN_PORT246_SCIFA5_RXD, GPIO_FN_MFG1_OUT1, \
+	GPIO_FN_PORT246_SCIFB_RXD, GPIO_FN_TPU1TO0,
+	GPIO_FN_PORT247_SCIFA5_TXD, GPIO_FN_MFG3_OUT2, \
+	GPIO_FN_PORT247_SCIFB_TXD, GPIO_FN_TPU3TO1,
+	GPIO_FN_PORT248_SCIFA5_SCK, GPIO_FN_MFG2_OUT1, \
+	GPIO_FN_PORT248_SCIFB_SCK, GPIO_FN_TPU2TO0, \
+	GPIO_FN_PORT248_I2C_SCL3, GPIO_FN_MSIOF2R_TSCK,
+	GPIO_FN_PORT249_IROUT, GPIO_FN_MFG4_IN1, \
+	GPIO_FN_PORT249_I2C_SDA3, GPIO_FN_MSIOF2R_TSYNC,
+	GPIO_FN_SDHICLK0,
+	GPIO_FN_SDHICD0,
+	GPIO_FN_SDHID0_0,
+	GPIO_FN_SDHID0_1,
+	GPIO_FN_SDHID0_2,
+	GPIO_FN_SDHID0_3,
+	GPIO_FN_SDHICMD0,
+	GPIO_FN_SDHIWP0,
+	GPIO_FN_SDHICLK1,
+	GPIO_FN_SDHID1_0, GPIO_FN_TS_SPSYNC2,
+	GPIO_FN_SDHID1_1, GPIO_FN_TS_SDAT2,
+	GPIO_FN_SDHID1_2, GPIO_FN_TS_SDEN2,
+	GPIO_FN_SDHID1_3, GPIO_FN_TS_SCK2,
+	GPIO_FN_SDHICMD1,
+	GPIO_FN_SDHICLK2,
+	GPIO_FN_SDHID2_0, GPIO_FN_TS_SPSYNC4,
+	GPIO_FN_SDHID2_1, GPIO_FN_TS_SDAT4,
+	GPIO_FN_SDHID2_2, GPIO_FN_TS_SDEN4,
+	GPIO_FN_SDHID2_3, GPIO_FN_TS_SCK4,
+	GPIO_FN_SDHICMD2,
+	GPIO_FN_MMCCLK0,
+	GPIO_FN_MMCD0_0,
+	GPIO_FN_MMCD0_1,
+	GPIO_FN_MMCD0_2,
+	GPIO_FN_MMCD0_3,
+	GPIO_FN_MMCD0_4, GPIO_FN_TS_SPSYNC5,
+	GPIO_FN_MMCD0_5, GPIO_FN_TS_SDAT5,
+	GPIO_FN_MMCD0_6, GPIO_FN_TS_SDEN5,
+	GPIO_FN_MMCD0_7, GPIO_FN_TS_SCK5,
+	GPIO_FN_MMCCMD0,
+	GPIO_FN_RESETOUTS_, GPIO_FN_EXTAL2OUT,
+	GPIO_FN_MCP_WAIT__MCP_FRB,
+	GPIO_FN_MCP_CKO, GPIO_FN_MMCCLK1,
+	GPIO_FN_MCP_D15_MCP_NAF15,
+	GPIO_FN_MCP_D14_MCP_NAF14,
+	GPIO_FN_MCP_D13_MCP_NAF13,
+	GPIO_FN_MCP_D12_MCP_NAF12,
+	GPIO_FN_MCP_D11_MCP_NAF11,
+	GPIO_FN_MCP_D10_MCP_NAF10,
+	GPIO_FN_MCP_D9_MCP_NAF9,
+	GPIO_FN_MCP_D8_MCP_NAF8, GPIO_FN_MMCCMD1,
+	GPIO_FN_MCP_D7_MCP_NAF7, GPIO_FN_MMCD1_7,
+
+	GPIO_FN_MCP_D6_MCP_NAF6, GPIO_FN_MMCD1_6,
+	GPIO_FN_MCP_D5_MCP_NAF5, GPIO_FN_MMCD1_5,
+	GPIO_FN_MCP_D4_MCP_NAF4, GPIO_FN_MMCD1_4,
+	GPIO_FN_MCP_D3_MCP_NAF3, GPIO_FN_MMCD1_3,
+	GPIO_FN_MCP_D2_MCP_NAF2, GPIO_FN_MMCD1_2,
+	GPIO_FN_MCP_D1_MCP_NAF1, GPIO_FN_MMCD1_1,
+	GPIO_FN_MCP_D0_MCP_NAF0, GPIO_FN_MMCD1_0,
+	GPIO_FN_MCP_NBRSTOUT_,
+	GPIO_FN_MCP_WE0__MCP_FWE, GPIO_FN_MCP_RDWR_MCP_FWE,
+
+	/* MSEL2 special case */
+	GPIO_FN_TSIF2_TS_XX1,
+	GPIO_FN_TSIF2_TS_XX2,
+	GPIO_FN_TSIF2_TS_XX3,
+	GPIO_FN_TSIF2_TS_XX4,
+	GPIO_FN_TSIF2_TS_XX5,
+	GPIO_FN_TSIF1_TS_XX1,
+	GPIO_FN_TSIF1_TS_XX2,
+	GPIO_FN_TSIF1_TS_XX3,
+	GPIO_FN_TSIF1_TS_XX4,
+	GPIO_FN_TSIF1_TS_XX5,
+	GPIO_FN_TSIF0_TS_XX1,
+	GPIO_FN_TSIF0_TS_XX2,
+	GPIO_FN_TSIF0_TS_XX3,
+	GPIO_FN_TSIF0_TS_XX4,
+	GPIO_FN_TSIF0_TS_XX5,
+	GPIO_FN_MST1_TS_XX1,
+	GPIO_FN_MST1_TS_XX2,
+	GPIO_FN_MST1_TS_XX3,
+	GPIO_FN_MST1_TS_XX4,
+	GPIO_FN_MST1_TS_XX5,
+	GPIO_FN_MST0_TS_XX1,
+	GPIO_FN_MST0_TS_XX2,
+	GPIO_FN_MST0_TS_XX3,
+	GPIO_FN_MST0_TS_XX4,
+	GPIO_FN_MST0_TS_XX5,
+
+	/* MSEL3 special cases */
+	GPIO_FN_SDHI0_VCCQ_MC0_ON,
+	GPIO_FN_SDHI0_VCCQ_MC0_OFF,
+	GPIO_FN_DEBUG_MON_VIO,
+	GPIO_FN_DEBUG_MON_LCDD,
+	GPIO_FN_LCDC_LCDC0,
+	GPIO_FN_LCDC_LCDC1,
+
+	/* MSEL4 special cases */
+	GPIO_FN_IRQ9_MEM_INT,
+	GPIO_FN_IRQ9_MCP_INT,
+	GPIO_FN_A11,
+	GPIO_FN_KEYOUT8,
+	GPIO_FN_TPU4TO3,
+	GPIO_FN_RESETA_N_PU_ON,
+	GPIO_FN_RESETA_N_PU_OFF,
+	GPIO_FN_EDBGREQ_PD,
+	GPIO_FN_EDBGREQ_PU,
+
+	/* Functions with pull-ups */
+	GPIO_FN_KEYIN0_PU,
+	GPIO_FN_KEYIN1_PU,
+	GPIO_FN_KEYIN2_PU,
+	GPIO_FN_KEYIN3_PU,
+	GPIO_FN_KEYIN4_PU,
+	GPIO_FN_KEYIN5_PU,
+	GPIO_FN_KEYIN6_PU,
+	GPIO_FN_KEYIN7_PU,
+	GPIO_FN_SDHID1_0_PU,
+	GPIO_FN_SDHID1_1_PU,
+	GPIO_FN_SDHID1_2_PU,
+	GPIO_FN_SDHID1_3_PU,
+	GPIO_FN_SDHICMD1_PU,
+	GPIO_FN_MMCCMD0_PU,
+	GPIO_FN_MMCCMD1_PU,
+	GPIO_FN_FSIACK_PU,
+	GPIO_FN_FSIAILR_PU,
+	GPIO_FN_FSIAIBT_PU,
+	GPIO_FN_FSIAISLD_PU,
+};
+
+#endif /* __ASM_SH73A0_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/smp.h b/arch/arm/mach-shmobile/include/mach/smp.h
new file mode 100644
index 0000000..50db94e
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/smp.h
@@ -0,0 +1,16 @@
+#ifndef __MACH_SMP_H
+#define __MACH_SMP_H
+
+#include <asm/hardware/gic.h>
+
+/*
+ * We use IRQ1 as the IPI
+ */
+static inline void smp_cross_call(const struct cpumask *mask, int ipi)
+{
+#if defined(CONFIG_ARM_GIC)
+	gic_raise_softirq(mask, ipi);
+#endif
+}
+
+#endif
diff --git a/arch/arm/mach-shmobile/include/mach/zboot.h b/arch/arm/mach-shmobile/include/mach/zboot.h
index 3ad86b7..6d6a205 100644
--- a/arch/arm/mach-shmobile/include/mach/zboot.h
+++ b/arch/arm/mach-shmobile/include/mach/zboot.h
@@ -13,6 +13,9 @@
 #ifdef CONFIG_MACH_AP4EVB
 #define MACH_TYPE	MACH_TYPE_AP4EVB
 #include "mach/head-ap4evb.txt"
+#elif CONFIG_MACH_MACKEREL
+#define MACH_TYPE	MACH_TYPE_MACKEREL
+#include "mach/head-mackerel.txt"
 #else
 #error "unsupported board."
 #endif
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c
index 1a20c48..2fe9704 100644
--- a/arch/arm/mach-shmobile/intc-sh7367.c
+++ b/arch/arm/mach-shmobile/intc-sh7367.c
@@ -189,10 +189,10 @@
 	  { SCIFB, SCIFA5, SCIFA4, MSIOF1,
 	    0, 0, MSIOF2, 0 } },
 	{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    TTI20, USBDMAC_USHDMI, SPU, SIU } },
 	{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
 	  { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
@@ -207,7 +207,7 @@
 	  { 0, 0, TPU0, TPU1,
 	    TPU2, TPU3, TPU4, 0 } },
 	{ 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    MISTY, CMT3, RWDT1, RWDT0 } },
 };
 
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 30b2f40..f78a1ea 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -230,10 +230,10 @@
 	  { SCIFB, SCIFA5, SCIFA4, MSIOF1,
 	    0, 0, MSIOF2, 0 } },
 	{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
-	  { 0, DISABLED, ENABLED, ENABLED,
+	  { 0, ENABLED, ENABLED, ENABLED,
 	    TTI20, USBHSDMAC0_USHDMI, 0, 0 } },
 	{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
 	  { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c
index 2cdeb8c..dd56838 100644
--- a/arch/arm/mach-shmobile/intc-sh7377.c
+++ b/arch/arm/mach-shmobile/intc-sh7377.c
@@ -234,10 +234,10 @@
 	  { SCIFB, SCIFA5, SCIFA4, MSIOF1,
 	    0, 0, MSIOF2, 0 } },
 	{ 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    TTI20, USBDMAC_USHDMI, 0, MSUG } },
 	{ 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */
 	  { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10,
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
new file mode 100644
index 0000000..322d8d5
--- /dev/null
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -0,0 +1,267 @@
+/*
+ * sh73a0 processor support - INTC hardware block
+ *
+ * Copyright (C) 2010  Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/sh_intc.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+enum {
+	UNUSED = 0,
+
+	/* interrupt sources INTCS */
+	PINTCS_PINT1, PINTCS_PINT2,
+	RTDMAC_0_DEI0, RTDMAC_0_DEI1, RTDMAC_0_DEI2, RTDMAC_0_DEI3,
+	CEU, MFI, BBIF2, VPU, TSIF1, _3DG_SGX543, _2DDMAC_2DDM0,
+	RTDMAC_1_DEI4, RTDMAC_1_DEI5, RTDMAC_1_DADERR,
+	KEYSC_KEY, VINT, MSIOF,
+	TMU0_TUNI00, TMU0_TUNI01, TMU0_TUNI02,
+	CMT0, TSIF0, CMT2, LMB, MSUG, MSU_MSU, MSU_MSU2,
+	CTI, RWDT0, ICB, PEP, ASA, JPU_JPEG, LCDC, LCRC,
+	RTDMAC_2_DEI6, RTDMAC_2_DEI7, RTDMAC_2_DEI8, RTDMAC_2_DEI9,
+	RTDMAC_3_DEI10, RTDMAC_3_DEI11,
+	FRC, GCU, LCDC1, CSIRX,
+	DSITX0_DSITX00, DSITX0_DSITX01,
+	SPU2_SPU0, SPU2_SPU1, FSI,
+	TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12,
+	TSIF2, CMT4, MFIS2, CPORTS2R, TSG, DMASCH1, SCUW,
+	VIO60, VIO61, CEU21, CSI21, DSITX1_DSITX10, DSITX1_DSITX11,
+	DISP, DSRV, EMUX2_EMUX20I, EMUX2_EMUX21I,
+	MSTIF0_MST00I, MSTIF0_MST01I, MSTIF1_MST10I, MSTIF1_MST11I,
+	SPUV,
+
+	/* interrupt groups INTCS */
+	RTDMAC_0, RTDMAC_1, RTDMAC_2, RTDMAC_3,
+	DSITX0, SPU2, TMU1, MSU,
+};
+
+static struct intc_vect intcs_vectors[] = {
+	INTCS_VECT(PINTCS_PINT1, 0x0600), INTCS_VECT(PINTCS_PINT2, 0x0620),
+	INTCS_VECT(RTDMAC_0_DEI0, 0x0800), INTCS_VECT(RTDMAC_0_DEI1, 0x0820),
+	INTCS_VECT(RTDMAC_0_DEI2, 0x0840), INTCS_VECT(RTDMAC_0_DEI3, 0x0860),
+	INTCS_VECT(CEU, 0x0880), INTCS_VECT(MFI, 0x0900),
+	INTCS_VECT(BBIF2, 0x0960), INTCS_VECT(VPU, 0x0980),
+	INTCS_VECT(TSIF1, 0x09a0), INTCS_VECT(_3DG_SGX543, 0x09e0),
+	INTCS_VECT(_2DDMAC_2DDM0, 0x0a00),
+	INTCS_VECT(RTDMAC_1_DEI4, 0x0b80), INTCS_VECT(RTDMAC_1_DEI5, 0x0ba0),
+	INTCS_VECT(RTDMAC_1_DADERR, 0x0bc0),
+	INTCS_VECT(KEYSC_KEY, 0x0be0), INTCS_VECT(VINT, 0x0c80),
+	INTCS_VECT(MSIOF, 0x0d20),
+	INTCS_VECT(TMU0_TUNI00, 0x0e80), INTCS_VECT(TMU0_TUNI01, 0x0ea0),
+	INTCS_VECT(TMU0_TUNI02, 0x0ec0),
+	INTCS_VECT(CMT0, 0x0f00), INTCS_VECT(TSIF0, 0x0f20),
+	INTCS_VECT(CMT2, 0x0f40), INTCS_VECT(LMB, 0x0f60),
+	INTCS_VECT(MSUG, 0x0f80),
+	INTCS_VECT(MSU_MSU, 0x0fa0), INTCS_VECT(MSU_MSU2, 0x0fc0),
+	INTCS_VECT(CTI, 0x0400), INTCS_VECT(RWDT0, 0x0440),
+	INTCS_VECT(ICB, 0x0480), INTCS_VECT(PEP, 0x04a0),
+	INTCS_VECT(ASA, 0x04c0), INTCS_VECT(JPU_JPEG, 0x0560),
+	INTCS_VECT(LCDC, 0x0580), INTCS_VECT(LCRC, 0x05a0),
+	INTCS_VECT(RTDMAC_2_DEI6, 0x1300), INTCS_VECT(RTDMAC_2_DEI7, 0x1320),
+	INTCS_VECT(RTDMAC_2_DEI8, 0x1340), INTCS_VECT(RTDMAC_2_DEI9, 0x1360),
+	INTCS_VECT(RTDMAC_3_DEI10, 0x1380), INTCS_VECT(RTDMAC_3_DEI11, 0x13a0),
+	INTCS_VECT(FRC, 0x1700), INTCS_VECT(GCU, 0x1760),
+	INTCS_VECT(LCDC1, 0x1780), INTCS_VECT(CSIRX, 0x17a0),
+	INTCS_VECT(DSITX0_DSITX00, 0x17c0), INTCS_VECT(DSITX0_DSITX01, 0x17e0),
+	INTCS_VECT(SPU2_SPU0, 0x1800), INTCS_VECT(SPU2_SPU1, 0x1820),
+	INTCS_VECT(FSI, 0x1840),
+	INTCS_VECT(TMU1_TUNI10, 0x1900), INTCS_VECT(TMU1_TUNI11, 0x1920),
+	INTCS_VECT(TMU1_TUNI12, 0x1940),
+	INTCS_VECT(TSIF2, 0x1960), INTCS_VECT(CMT4, 0x1980),
+	INTCS_VECT(MFIS2, 0x1a00), INTCS_VECT(CPORTS2R, 0x1a20),
+	INTCS_VECT(TSG, 0x1ae0), INTCS_VECT(DMASCH1, 0x1b00),
+	INTCS_VECT(SCUW, 0x1b40),
+	INTCS_VECT(VIO60, 0x1b60), INTCS_VECT(VIO61, 0x1b80),
+	INTCS_VECT(CEU21, 0x1ba0), INTCS_VECT(CSI21, 0x1be0),
+	INTCS_VECT(DSITX1_DSITX10, 0x1c00), INTCS_VECT(DSITX1_DSITX11, 0x1c20),
+	INTCS_VECT(DISP, 0x1c40), INTCS_VECT(DSRV, 0x1c60),
+	INTCS_VECT(EMUX2_EMUX20I, 0x1c80), INTCS_VECT(EMUX2_EMUX21I, 0x1ca0),
+	INTCS_VECT(MSTIF0_MST00I, 0x1cc0), INTCS_VECT(MSTIF0_MST01I, 0x1ce0),
+	INTCS_VECT(MSTIF1_MST10I, 0x1d00), INTCS_VECT(MSTIF1_MST11I, 0x1d20),
+	INTCS_VECT(SPUV, 0x2300),
+};
+
+static struct intc_group intcs_groups[] __initdata = {
+	INTC_GROUP(RTDMAC_0, RTDMAC_0_DEI0, RTDMAC_0_DEI1,
+		   RTDMAC_0_DEI2, RTDMAC_0_DEI3),
+	INTC_GROUP(RTDMAC_1, RTDMAC_1_DEI4, RTDMAC_1_DEI5, RTDMAC_1_DADERR),
+	INTC_GROUP(RTDMAC_2, RTDMAC_2_DEI6, RTDMAC_2_DEI7,
+		   RTDMAC_2_DEI8, RTDMAC_2_DEI9),
+	INTC_GROUP(RTDMAC_3, RTDMAC_3_DEI10, RTDMAC_3_DEI11),
+	INTC_GROUP(TMU1, TMU1_TUNI12, TMU1_TUNI11, TMU1_TUNI10),
+	INTC_GROUP(DSITX0, DSITX0_DSITX00, DSITX0_DSITX01),
+	INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1),
+	INTC_GROUP(MSU, MSU_MSU, MSU_MSU2),
+};
+
+static struct intc_mask_reg intcs_mask_registers[] = {
+	{ 0xffd20184, 0xffd201c4, 8, /* IMR1SA / IMCR1SA */
+	  { 0, 0, 0, CEU,
+	    0, 0, 0, 0 } },
+	{ 0xffd20188, 0xffd201c8, 8, /* IMR2SA / IMCR2SA */
+	  { 0, 0, 0, VPU,
+	    BBIF2, 0, 0, MFI } },
+	{ 0xffd2018c, 0xffd201cc, 8, /* IMR3SA / IMCR3SA */
+	  { 0, 0, 0, _2DDMAC_2DDM0,
+	    0, ASA, PEP, ICB } },
+	{ 0xffd20190, 0xffd201d0, 8, /* IMR4SA / IMCR4SA */
+	  { 0, 0, 0, CTI,
+	    JPU_JPEG, 0, LCRC, LCDC } },
+	{ 0xffd20194, 0xffd201d4, 8, /* IMR5SA / IMCR5SA */
+	  { KEYSC_KEY, RTDMAC_1_DADERR, RTDMAC_1_DEI5, RTDMAC_1_DEI4,
+	    RTDMAC_0_DEI3, RTDMAC_0_DEI2, RTDMAC_0_DEI1, RTDMAC_0_DEI0 } },
+	{ 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
+	  { 0, 0, MSIOF, 0,
+	    _3DG_SGX543, 0, 0, 0 } },
+	{ 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
+	  { 0, TMU0_TUNI02, TMU0_TUNI01, TMU0_TUNI00,
+	    0, 0, 0, 0 } },
+	{ 0xffd201a0, 0xffd201e0, 8, /* IMR8SA / IMCR8SA */
+	  { 0, 0, 0, 0,
+	    0, MSU_MSU, MSU_MSU2, MSUG } },
+	{ 0xffd201a4, 0xffd201e4, 8, /* IMR9SA / IMCR9SA */
+	  { 0, RWDT0, CMT2, CMT0,
+	    0, 0, 0, 0 } },
+	{ 0xffd201ac, 0xffd201ec, 8, /* IMR11SA / IMCR11SA */
+	  { 0, 0, 0, 0,
+	    0, TSIF1, LMB, TSIF0 } },
+	{ 0xffd201b0, 0xffd201f0, 8, /* IMR12SA / IMCR12SA */
+	  { 0, 0, 0, 0,
+	    0, 0, PINTCS_PINT2, PINTCS_PINT1 } },
+	{ 0xffd50180, 0xffd501c0, 8, /* IMR0SA3 / IMCR0SA3 */
+	  { RTDMAC_2_DEI6, RTDMAC_2_DEI7, RTDMAC_2_DEI8, RTDMAC_2_DEI9,
+	    RTDMAC_3_DEI10, RTDMAC_3_DEI11, 0, 0 } },
+	{ 0xffd50190, 0xffd501d0, 8, /* IMR4SA3 / IMCR4SA3 */
+	  { FRC, 0, 0, GCU,
+	    LCDC1, CSIRX, DSITX0_DSITX00, DSITX0_DSITX01 } },
+	{ 0xffd50194, 0xffd501d4, 8, /* IMR5SA3 / IMCR5SA3 */
+	  { SPU2_SPU0, SPU2_SPU1, FSI, 0,
+	    0, 0, 0, 0 } },
+	{ 0xffd50198, 0xffd501d8, 8, /* IMR6SA3 / IMCR6SA3 */
+	  { TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12, 0,
+	    TSIF2, CMT4, 0, 0 } },
+	{ 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */
+	  { MFIS2, CPORTS2R, 0, 0,
+	    0, 0, 0, TSG } },
+	{ 0xffd501a0, 0xffd501e0, 8, /* IMR8SA3 / IMCR8SA3 */
+	  { DMASCH1, 0, SCUW, VIO60,
+	    VIO61, CEU21, 0, CSI21 } },
+	{ 0xffd501a4, 0xffd501e4, 8, /* IMR9SA3 / IMCR9SA3 */
+	  { DSITX1_DSITX10, DSITX1_DSITX11, DISP, DSRV,
+	    EMUX2_EMUX20I, EMUX2_EMUX21I, MSTIF0_MST00I, MSTIF0_MST01I } },
+	{ 0xffd501a8, 0xffd501e8, 8, /* IMR10SA3 / IMCR10SA3 */
+	  { MSTIF0_MST00I, MSTIF0_MST01I, 0, 0,
+	    0, 0, 0, 0  } },
+	{ 0xffd60180, 0xffd601c0, 8, /* IMR0SA4 / IMCR0SA4 */
+	  { SPUV, 0, 0, 0,
+	    0, 0, 0, 0  } },
+};
+
+/* Priority is needed for INTCA to receive the INTCS interrupt */
+static struct intc_prio_reg intcs_prio_registers[] = {
+	{ 0xffd20000, 0, 16, 4, /* IPRAS */ { CTI, 0, _2DDMAC_2DDM0, ICB } },
+	{ 0xffd20004, 0, 16, 4, /* IPRBS */ { JPU_JPEG, LCDC, 0, LCRC } },
+	{ 0xffd20008, 0, 16, 4, /* IPRCS */ { BBIF2, 0, 0, 0 } },
+	{ 0xffd2000c, 0, 16, 4, /* IPRDS */ { PINTCS_PINT1, PINTCS_PINT2,
+					      0, 0 } },
+	{ 0xffd20010, 0, 16, 4, /* IPRES */ { RTDMAC_0, CEU, MFI, VPU } },
+	{ 0xffd20014, 0, 16, 4, /* IPRFS */ { KEYSC_KEY, RTDMAC_1,
+					      CMT2, CMT0 } },
+	{ 0xffd20018, 0, 16, 4, /* IPRGS */ { TMU0_TUNI00, TMU0_TUNI01,
+					      TMU0_TUNI02, TSIF1 } },
+	{ 0xffd2001c, 0, 16, 4, /* IPRHS */ { VINT, 0, 0, 0 } },
+	{ 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, 0 } },
+	{ 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX543, MSUG, MSU } },
+	{ 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, ASA, LMB, PEP } },
+	{ 0xffd20030, 0, 16, 4, /* IPRMS */ { 0, 0, 0, RWDT0 } },
+	{ 0xffd50000, 0, 16, 4, /* IPRAS3 */ { RTDMAC_2, 0, 0, 0 } },
+	{ 0xffd50004, 0, 16, 4, /* IPRBS3 */ { RTDMAC_3, 0, 0, 0 } },
+	{ 0xffd50020, 0, 16, 4, /* IPRIS3 */ { FRC, 0, 0, 0 } },
+	{ 0xffd50024, 0, 16, 4, /* IPRJS3 */ { LCDC1, CSIRX, DSITX0, 0 } },
+	{ 0xffd50028, 0, 16, 4, /* IPRKS3 */ { SPU2, 0, FSI, 0 } },
+	{ 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, TSIF2 } },
+	{ 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, 0, 0, 0 } },
+	{ 0xffd50038, 0, 16, 4, /* IPROS3 */ { MFIS2, CPORTS2R, 0, 0 } },
+	{ 0xffd50040, 0, 16, 4, /* IPRQS3 */ { DMASCH1, 0, SCUW, VIO60 } },
+	{ 0xffd50044, 0, 16, 4, /* IPRRS3 */ { VIO61, CEU21, 0, CSI21 } },
+	{ 0xffd50048, 0, 16, 4, /* IPRSS3 */ { DSITX1_DSITX10, DSITX1_DSITX11,
+					       DISP, DSRV } },
+	{ 0xffd5004c, 0, 16, 4, /* IPRTS3 */ { EMUX2_EMUX20I, EMUX2_EMUX21I,
+					       MSTIF0_MST00I, MSTIF0_MST01I } },
+	{ 0xffd50050, 0, 16, 4, /* IPRUS3 */ { MSTIF1_MST10I, MSTIF1_MST11I,
+					       0, 0 } },
+	{ 0xffd60000, 0, 16, 4, /* IPRAS4 */ { SPUV, 0, 0, 0 } },
+};
+
+static struct resource intcs_resources[] __initdata = {
+	[0] = {
+		.start	= 0xffd20000,
+		.end	= 0xffd201ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= 0xffd50000,
+		.end	= 0xffd501ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[2] = {
+		.start	= 0xffd60000,
+		.end	= 0xffd601ff,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct intc_desc intcs_desc __initdata = {
+	.name = "sh73a0-intcs",
+	.resource = intcs_resources,
+	.num_resources = ARRAY_SIZE(intcs_resources),
+	.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
+			   intcs_prio_registers, NULL, NULL),
+};
+
+static struct irqaction sh73a0_intcs_cascade;
+
+static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
+{
+	unsigned int evtcodeas = ioread32((void __iomem *)dev_id);
+
+	generic_handle_irq(intcs_evt2irq(evtcodeas));
+
+	return IRQ_HANDLED;
+}
+
+void __init sh73a0_init_irq(void)
+{
+	void __iomem *gic_base = __io(0xf0001000);
+	void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
+
+	gic_init(0, 29, gic_base, gic_base);
+
+	register_intc_controller(&intcs_desc);
+
+	/* demux using INTEVTSA */
+	sh73a0_intcs_cascade.name = "INTCS cascade";
+	sh73a0_intcs_cascade.handler = sh73a0_intcs_demux;
+	sh73a0_intcs_cascade.dev_id = intevtsa;
+	setup_irq(gic_spi(50), &sh73a0_intcs_cascade);
+}
diff --git a/arch/arm/mach-shmobile/localtimer.c b/arch/arm/mach-shmobile/localtimer.c
new file mode 100644
index 0000000..2111c28
--- /dev/null
+++ b/arch/arm/mach-shmobile/localtimer.c
@@ -0,0 +1,25 @@
+/*
+ * SMP support for R-Mobile / SH-Mobile - local timer portion
+ *
+ * Copyright (C) 2010  Magnus Damm
+ *
+ * Based on vexpress, Copyright (C) 2002 ARM Ltd, All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/clockchips.h>
+#include <asm/smp_twd.h>
+#include <asm/localtimer.h>
+
+/*
+ * Setup the local clock events for a CPU.
+ */
+void __cpuinit local_timer_setup(struct clock_event_device *evt)
+{
+	evt->irq = 29;
+	twd_timer_setup(evt);
+}
diff --git a/arch/arm/mach-shmobile/pfc-sh73a0.c b/arch/arm/mach-shmobile/pfc-sh73a0.c
new file mode 100644
index 0000000..3eed44e
--- /dev/null
+++ b/arch/arm/mach-shmobile/pfc-sh73a0.c
@@ -0,0 +1,2746 @@
+/*
+ * sh73a0 processor support - PFC hardware block
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ * Copyright (C) 2010 NISHIMOTO Hiroki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <mach/sh73a0.h>
+
+#define _1(fn, pfx, sfx) fn(pfx, sfx)
+
+#define _10(fn, pfx, sfx)				\
+	_1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx),	\
+	_1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx),	\
+	_1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx),	\
+	_1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx),	\
+	_1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx)
+
+#define _310(fn, pfx, sfx)				\
+	_10(fn, pfx,    sfx), _10(fn, pfx##1, sfx),	\
+	_10(fn, pfx##2, sfx), _10(fn, pfx##3, sfx),	\
+	_10(fn, pfx##4, sfx), _10(fn, pfx##5, sfx),	\
+	_10(fn, pfx##6, sfx), _10(fn, pfx##7, sfx),	\
+	_10(fn, pfx##8, sfx), _10(fn, pfx##9, sfx),	\
+	_10(fn, pfx##10, sfx),				\
+	_1(fn, pfx##110, sfx), _1(fn, pfx##111, sfx),	\
+	_1(fn, pfx##112, sfx), _1(fn, pfx##113, sfx),	\
+	_1(fn, pfx##114, sfx), _1(fn, pfx##115, sfx),	\
+	_1(fn, pfx##116, sfx), _1(fn, pfx##117, sfx),	\
+	_1(fn, pfx##118, sfx),				\
+	_1(fn, pfx##128, sfx), _1(fn, pfx##129, sfx),	\
+	_10(fn, pfx##13, sfx), _10(fn, pfx##14, sfx),	\
+	_10(fn, pfx##15, sfx),				\
+	_1(fn, pfx##160, sfx), _1(fn, pfx##161, sfx),	\
+	_1(fn, pfx##162, sfx), _1(fn, pfx##163, sfx),	\
+	_1(fn, pfx##164, sfx),				\
+	_1(fn, pfx##192, sfx), _1(fn, pfx##193, sfx),	\
+	_1(fn, pfx##194, sfx), _1(fn, pfx##195, sfx),	\
+	_1(fn, pfx##196, sfx), _1(fn, pfx##197, sfx),	\
+	_1(fn, pfx##198, sfx), _1(fn, pfx##199, sfx),	\
+	_10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx),	\
+	_10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx),	\
+	_10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx),	\
+	_10(fn, pfx##26, sfx), _10(fn, pfx##27, sfx),	\
+	_1(fn, pfx##280, sfx), _1(fn, pfx##281, sfx),	\
+	_1(fn, pfx##282, sfx),				\
+	_1(fn, pfx##288, sfx), _1(fn, pfx##289, sfx),	\
+	_10(fn, pfx##29, sfx), _10(fn, pfx##30, sfx)
+
+#define _PORT(pfx, sfx) pfx##_##sfx
+#define PORT_310(str) _310(_PORT, PORT, str)
+
+enum {
+	PINMUX_RESERVED = 0,
+
+	PINMUX_DATA_BEGIN,
+	PORT_310(DATA),			/* PORT0_DATA -> PORT309_DATA */
+	PINMUX_DATA_END,
+
+	PINMUX_INPUT_BEGIN,
+	PORT_310(IN),			/* PORT0_IN -> PORT309_IN */
+	PINMUX_INPUT_END,
+
+	PINMUX_INPUT_PULLUP_BEGIN,
+	PORT_310(IN_PU),		/* PORT0_IN_PU -> PORT309_IN_PU */
+	PINMUX_INPUT_PULLUP_END,
+
+	PINMUX_INPUT_PULLDOWN_BEGIN,
+	PORT_310(IN_PD),		/* PORT0_IN_PD -> PORT309_IN_PD */
+	PINMUX_INPUT_PULLDOWN_END,
+
+	PINMUX_OUTPUT_BEGIN,
+	PORT_310(OUT),			/* PORT0_OUT -> PORT309_OUT */
+	PINMUX_OUTPUT_END,
+
+	PINMUX_FUNCTION_BEGIN,
+	PORT_310(FN_IN),		/* PORT0_FN_IN -> PORT309_FN_IN */
+	PORT_310(FN_OUT),		/* PORT0_FN_OUT -> PORT309_FN_OUT */
+	PORT_310(FN0),			/* PORT0_FN0 -> PORT309_FN0 */
+	PORT_310(FN1),			/* PORT0_FN1 -> PORT309_FN1 */
+	PORT_310(FN2),			/* PORT0_FN2 -> PORT309_FN2 */
+	PORT_310(FN3),			/* PORT0_FN3 -> PORT309_FN3 */
+	PORT_310(FN4),			/* PORT0_FN4 -> PORT309_FN4 */
+	PORT_310(FN5),			/* PORT0_FN5 -> PORT309_FN5 */
+	PORT_310(FN6),			/* PORT0_FN6 -> PORT309_FN6 */
+	PORT_310(FN7),			/* PORT0_FN7 -> PORT309_FN7 */
+
+	MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+	MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+	MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+	MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+	MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+	MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+	MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+	MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+	MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+	MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+	MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+	MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+	MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+	MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+	MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+	MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+	MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+	MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+	MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+	MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+	MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+	MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+	MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+	MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+	MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+	MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+	MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+	MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+	MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+	MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+	MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+	MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+	MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+	MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+	MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+	MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+	MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+	MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+	MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+	MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+	MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+	MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+	PINMUX_FUNCTION_END,
+
+	PINMUX_MARK_BEGIN,
+	/* Hardware manual Table 25-1 (Function 0-7) */
+	VBUS_0_MARK,
+	GPI0_MARK,
+	GPI1_MARK,
+	GPI2_MARK,
+	GPI3_MARK,
+	GPI4_MARK,
+	GPI5_MARK,
+	GPI6_MARK,
+	GPI7_MARK,
+	SCIFA7_RXD_MARK,
+	SCIFA7_CTS__MARK,
+	GPO7_MARK, MFG0_OUT2_MARK,
+	GPO6_MARK, MFG1_OUT2_MARK,
+	GPO5_MARK, SCIFA0_SCK_MARK, FSICOSLDT3_MARK, PORT16_VIO_CKOR_MARK,
+	SCIFA0_TXD_MARK,
+	SCIFA7_TXD_MARK,
+	SCIFA7_RTS__MARK, PORT19_VIO_CKO2_MARK,
+	GPO0_MARK,
+	GPO1_MARK,
+	GPO2_MARK, STATUS0_MARK,
+	GPO3_MARK, STATUS1_MARK,
+	GPO4_MARK, STATUS2_MARK,
+	VINT_MARK,
+	TCKON_MARK,
+	XDVFS1_MARK, PORT27_I2C_SCL2_MARK, PORT27_I2C_SCL3_MARK, \
+	MFG0_OUT1_MARK, PORT27_IROUT_MARK,
+	XDVFS2_MARK, PORT28_I2C_SDA2_MARK, PORT28_I2C_SDA3_MARK, \
+	PORT28_TPU1TO1_MARK,
+	SIM_RST_MARK, PORT29_TPU1TO1_MARK,
+	SIM_CLK_MARK, PORT30_VIO_CKOR_MARK,
+	SIM_D_MARK, PORT31_IROUT_MARK,
+	SCIFA4_TXD_MARK,
+	SCIFA4_RXD_MARK, XWUP_MARK,
+	SCIFA4_RTS__MARK,
+	SCIFA4_CTS__MARK,
+	FSIBOBT_MARK, FSIBIBT_MARK,
+	FSIBOLR_MARK, FSIBILR_MARK,
+	FSIBOSLD_MARK,
+	FSIBISLD_MARK,
+	VACK_MARK,
+	XTAL1L_MARK,
+	SCIFA0_RTS__MARK, FSICOSLDT2_MARK,
+	SCIFA0_RXD_MARK,
+	SCIFA0_CTS__MARK, FSICOSLDT1_MARK,
+	FSICOBT_MARK, FSICIBT_MARK, FSIDOBT_MARK, FSIDIBT_MARK,
+	FSICOLR_MARK, FSICILR_MARK, FSIDOLR_MARK, FSIDILR_MARK,
+	FSICOSLD_MARK, PORT47_FSICSPDIF_MARK,
+	FSICISLD_MARK, FSIDISLD_MARK,
+	FSIACK_MARK, PORT49_IRDA_OUT_MARK, PORT49_IROUT_MARK, FSIAOMC_MARK,
+	FSIAOLR_MARK, BBIF2_TSYNC2_MARK, TPU2TO2_MARK, FSIAILR_MARK,
+
+	FSIAOBT_MARK, BBIF2_TSCK2_MARK, TPU2TO3_MARK, FSIAIBT_MARK,
+	FSIAOSLD_MARK, BBIF2_TXD2_MARK,
+	FSIASPDIF_MARK, PORT53_IRDA_IN_MARK, TPU3TO3_MARK, FSIBSPDIF_MARK, \
+	PORT53_FSICSPDIF_MARK,
+	FSIBCK_MARK, PORT54_IRDA_FIRSEL_MARK, TPU3TO2_MARK, FSIBOMC_MARK, \
+	FSICCK_MARK, FSICOMC_MARK,
+	FSIAISLD_MARK, TPU0TO0_MARK,
+	A0_MARK, BS__MARK,
+	A12_MARK, PORT58_KEYOUT7_MARK, TPU4TO2_MARK,
+	A13_MARK, PORT59_KEYOUT6_MARK, TPU0TO1_MARK,
+	A14_MARK, KEYOUT5_MARK,
+	A15_MARK, KEYOUT4_MARK,
+	A16_MARK, KEYOUT3_MARK, MSIOF0_SS1_MARK,
+	A17_MARK, KEYOUT2_MARK, MSIOF0_TSYNC_MARK,
+	A18_MARK, KEYOUT1_MARK, MSIOF0_TSCK_MARK,
+	A19_MARK, KEYOUT0_MARK, MSIOF0_TXD_MARK,
+	A20_MARK, KEYIN0_MARK, MSIOF0_RSCK_MARK,
+	A21_MARK, KEYIN1_MARK, MSIOF0_RSYNC_MARK,
+	A22_MARK, KEYIN2_MARK, MSIOF0_MCK0_MARK,
+	A23_MARK, KEYIN3_MARK, MSIOF0_MCK1_MARK,
+	A24_MARK, KEYIN4_MARK, MSIOF0_RXD_MARK,
+	A25_MARK, KEYIN5_MARK, MSIOF0_SS2_MARK,
+	A26_MARK, KEYIN6_MARK,
+	KEYIN7_MARK,
+	D0_NAF0_MARK,
+	D1_NAF1_MARK,
+	D2_NAF2_MARK,
+	D3_NAF3_MARK,
+	D4_NAF4_MARK,
+	D5_NAF5_MARK,
+	D6_NAF6_MARK,
+	D7_NAF7_MARK,
+	D8_NAF8_MARK,
+	D9_NAF9_MARK,
+	D10_NAF10_MARK,
+	D11_NAF11_MARK,
+	D12_NAF12_MARK,
+	D13_NAF13_MARK,
+	D14_NAF14_MARK,
+	D15_NAF15_MARK,
+	CS4__MARK,
+	CS5A__MARK, PORT91_RDWR_MARK,
+	CS5B__MARK, FCE1__MARK,
+	CS6B__MARK, DACK0_MARK,
+	FCE0__MARK, CS6A__MARK,
+	WAIT__MARK, DREQ0_MARK,
+	RD__FSC_MARK,
+	WE0__FWE_MARK, RDWR_FWE_MARK,
+	WE1__MARK,
+	FRB_MARK,
+	CKO_MARK,
+	NBRSTOUT__MARK,
+	NBRST__MARK,
+	BBIF2_TXD_MARK,
+	BBIF2_RXD_MARK,
+	BBIF2_SYNC_MARK,
+	BBIF2_SCK_MARK,
+	SCIFA3_CTS__MARK, MFG3_IN2_MARK,
+	SCIFA3_RXD_MARK, MFG3_IN1_MARK,
+	BBIF1_SS2_MARK, SCIFA3_RTS__MARK, MFG3_OUT1_MARK,
+	SCIFA3_TXD_MARK,
+	HSI_RX_DATA_MARK, BBIF1_RXD_MARK,
+	HSI_TX_WAKE_MARK, BBIF1_TSCK_MARK,
+	HSI_TX_DATA_MARK, BBIF1_TSYNC_MARK,
+	HSI_TX_READY_MARK, BBIF1_TXD_MARK,
+	HSI_RX_READY_MARK, BBIF1_RSCK_MARK, PORT115_I2C_SCL2_MARK, \
+	PORT115_I2C_SCL3_MARK,
+	HSI_RX_WAKE_MARK, BBIF1_RSYNC_MARK, PORT116_I2C_SDA2_MARK, \
+	PORT116_I2C_SDA3_MARK,
+	HSI_RX_FLAG_MARK, BBIF1_SS1_MARK, BBIF1_FLOW_MARK,
+	HSI_TX_FLAG_MARK,
+	VIO_VD_MARK, PORT128_LCD2VSYN_MARK, VIO2_VD_MARK, LCD2D0_MARK,
+
+	VIO_HD_MARK, PORT129_LCD2HSYN_MARK, PORT129_LCD2CS__MARK, \
+	VIO2_HD_MARK, LCD2D1_MARK,
+	VIO_D0_MARK, PORT130_MSIOF2_RXD_MARK, LCD2D10_MARK,
+	VIO_D1_MARK, PORT131_KEYOUT6_MARK, PORT131_MSIOF2_SS1_MARK, \
+	PORT131_KEYOUT11_MARK, LCD2D11_MARK,
+	VIO_D2_MARK, PORT132_KEYOUT7_MARK, PORT132_MSIOF2_SS2_MARK, \
+	PORT132_KEYOUT10_MARK, LCD2D12_MARK,
+	VIO_D3_MARK, MSIOF2_TSYNC_MARK, LCD2D13_MARK,
+	VIO_D4_MARK, MSIOF2_TXD_MARK, LCD2D14_MARK,
+	VIO_D5_MARK, MSIOF2_TSCK_MARK, LCD2D15_MARK,
+	VIO_D6_MARK, PORT136_KEYOUT8_MARK, LCD2D16_MARK,
+	VIO_D7_MARK, PORT137_KEYOUT9_MARK, LCD2D17_MARK,
+	VIO_D8_MARK, PORT138_KEYOUT8_MARK, VIO2_D0_MARK, LCD2D6_MARK,
+	VIO_D9_MARK, PORT139_KEYOUT9_MARK, VIO2_D1_MARK, LCD2D7_MARK,
+	VIO_D10_MARK, TPU0TO2_MARK, VIO2_D2_MARK, LCD2D8_MARK,
+	VIO_D11_MARK, TPU0TO3_MARK, VIO2_D3_MARK, LCD2D9_MARK,
+	VIO_D12_MARK, PORT142_KEYOUT10_MARK, VIO2_D4_MARK, LCD2D2_MARK,
+	VIO_D13_MARK, PORT143_KEYOUT11_MARK, PORT143_KEYOUT6_MARK, \
+	VIO2_D5_MARK, LCD2D3_MARK,
+	VIO_D14_MARK, PORT144_KEYOUT7_MARK, VIO2_D6_MARK, LCD2D4_MARK,
+	VIO_D15_MARK, TPU1TO3_MARK, PORT145_LCD2DISP_MARK, \
+	PORT145_LCD2RS_MARK, VIO2_D7_MARK, LCD2D5_MARK,
+	VIO_CLK_MARK, LCD2DCK_MARK, PORT146_LCD2WR__MARK, VIO2_CLK_MARK, \
+	LCD2D18_MARK,
+	VIO_FIELD_MARK, LCD2RD__MARK, VIO2_FIELD_MARK, LCD2D19_MARK,
+	VIO_CKO_MARK,
+	A27_MARK, PORT149_RDWR_MARK, MFG0_IN1_MARK, PORT149_KEYOUT9_MARK,
+	MFG0_IN2_MARK,
+	TS_SPSYNC3_MARK, MSIOF2_RSCK_MARK,
+	TS_SDAT3_MARK, MSIOF2_RSYNC_MARK,
+	TPU1TO2_MARK, TS_SDEN3_MARK, PORT153_MSIOF2_SS1_MARK,
+	SCIFA2_TXD1_MARK, MSIOF2_MCK0_MARK,
+	SCIFA2_RXD1_MARK, MSIOF2_MCK1_MARK,
+	SCIFA2_RTS1__MARK, PORT156_MSIOF2_SS2_MARK,
+	SCIFA2_CTS1__MARK, PORT157_MSIOF2_RXD_MARK,
+	DINT__MARK, SCIFA2_SCK1_MARK, TS_SCK3_MARK,
+	PORT159_SCIFB_SCK_MARK, PORT159_SCIFA5_SCK_MARK, NMI_MARK,
+	PORT160_SCIFB_TXD_MARK, PORT160_SCIFA5_TXD_MARK,
+	PORT161_SCIFB_CTS__MARK, PORT161_SCIFA5_CTS__MARK,
+	PORT162_SCIFB_RXD_MARK, PORT162_SCIFA5_RXD_MARK,
+	PORT163_SCIFB_RTS__MARK, PORT163_SCIFA5_RTS__MARK, TPU3TO0_MARK,
+	LCDD0_MARK,
+	LCDD1_MARK, PORT193_SCIFA5_CTS__MARK, BBIF2_TSYNC1_MARK,
+	LCDD2_MARK, PORT194_SCIFA5_RTS__MARK, BBIF2_TSCK1_MARK,
+	LCDD3_MARK, PORT195_SCIFA5_RXD_MARK, BBIF2_TXD1_MARK,
+	LCDD4_MARK, PORT196_SCIFA5_TXD_MARK,
+	LCDD5_MARK, PORT197_SCIFA5_SCK_MARK, MFG2_OUT2_MARK, TPU2TO1_MARK,
+	LCDD6_MARK,
+	LCDD7_MARK, TPU4TO1_MARK, MFG4_OUT2_MARK,
+	LCDD8_MARK, D16_MARK,
+	LCDD9_MARK, D17_MARK,
+	LCDD10_MARK, D18_MARK,
+	LCDD11_MARK, D19_MARK,
+	LCDD12_MARK, D20_MARK,
+	LCDD13_MARK, D21_MARK,
+	LCDD14_MARK, D22_MARK,
+	LCDD15_MARK, PORT207_MSIOF0L_SS1_MARK, D23_MARK,
+	LCDD16_MARK, PORT208_MSIOF0L_SS2_MARK, D24_MARK,
+	LCDD17_MARK, D25_MARK,
+	LCDD18_MARK, DREQ2_MARK, PORT210_MSIOF0L_SS1_MARK, D26_MARK,
+	LCDD19_MARK, PORT211_MSIOF0L_SS2_MARK, D27_MARK,
+	LCDD20_MARK, TS_SPSYNC1_MARK, MSIOF0L_MCK0_MARK, D28_MARK,
+	LCDD21_MARK, TS_SDAT1_MARK, MSIOF0L_MCK1_MARK, D29_MARK,
+	LCDD22_MARK, TS_SDEN1_MARK, MSIOF0L_RSCK_MARK, D30_MARK,
+	LCDD23_MARK, TS_SCK1_MARK, MSIOF0L_RSYNC_MARK, D31_MARK,
+	LCDDCK_MARK, LCDWR__MARK,
+	LCDRD__MARK, DACK2_MARK, PORT217_LCD2RS_MARK, MSIOF0L_TSYNC_MARK, \
+	VIO2_FIELD3_MARK, PORT217_LCD2DISP_MARK,
+	LCDHSYN_MARK, LCDCS__MARK, LCDCS2__MARK, DACK3_MARK, \
+	PORT218_VIO_CKOR_MARK,
+	LCDDISP_MARK, LCDRS_MARK, PORT219_LCD2WR__MARK, DREQ3_MARK, \
+	MSIOF0L_TSCK_MARK, VIO2_CLK3_MARK, LCD2DCK_2_MARK,
+	LCDVSYN_MARK, LCDVSYN2_MARK,
+	LCDLCLK_MARK, DREQ1_MARK, PORT221_LCD2CS__MARK, PWEN_MARK, \
+	MSIOF0L_RXD_MARK, VIO2_HD3_MARK, PORT221_LCD2HSYN_MARK,
+	LCDDON_MARK, LCDDON2_MARK, DACK1_MARK, OVCN_MARK, MSIOF0L_TXD_MARK, \
+	VIO2_VD3_MARK, PORT222_LCD2VSYN_MARK,
+
+	SCIFA1_TXD_MARK, OVCN2_MARK,
+	EXTLP_MARK, SCIFA1_SCK_MARK, PORT226_VIO_CKO2_MARK,
+	SCIFA1_RTS__MARK, IDIN_MARK,
+	SCIFA1_RXD_MARK,
+	SCIFA1_CTS__MARK, MFG1_IN1_MARK,
+	MSIOF1_TXD_MARK, SCIFA2_TXD2_MARK,
+	MSIOF1_TSYNC_MARK, SCIFA2_CTS2__MARK,
+	MSIOF1_TSCK_MARK, SCIFA2_SCK2_MARK,
+	MSIOF1_RXD_MARK, SCIFA2_RXD2_MARK,
+	MSIOF1_RSCK_MARK, SCIFA2_RTS2__MARK, VIO2_CLK2_MARK, LCD2D20_MARK,
+	MSIOF1_RSYNC_MARK, MFG1_IN2_MARK, VIO2_VD2_MARK, LCD2D21_MARK,
+	MSIOF1_MCK0_MARK, PORT236_I2C_SDA2_MARK,
+	MSIOF1_MCK1_MARK, PORT237_I2C_SCL2_MARK,
+	MSIOF1_SS1_MARK, VIO2_FIELD2_MARK, LCD2D22_MARK,
+	MSIOF1_SS2_MARK, VIO2_HD2_MARK, LCD2D23_MARK,
+	SCIFA6_TXD_MARK,
+	PORT241_IRDA_OUT_MARK, PORT241_IROUT_MARK, MFG4_OUT1_MARK, TPU4TO0_MARK,
+	PORT242_IRDA_IN_MARK, MFG4_IN2_MARK,
+	PORT243_IRDA_FIRSEL_MARK, PORT243_VIO_CKO2_MARK,
+	PORT244_SCIFA5_CTS__MARK, MFG2_IN1_MARK, PORT244_SCIFB_CTS__MARK, \
+	MSIOF2R_RXD_MARK,
+	PORT245_SCIFA5_RTS__MARK, MFG2_IN2_MARK, PORT245_SCIFB_RTS__MARK, \
+	MSIOF2R_TXD_MARK,
+	PORT246_SCIFA5_RXD_MARK, MFG1_OUT1_MARK, PORT246_SCIFB_RXD_MARK, \
+	TPU1TO0_MARK,
+	PORT247_SCIFA5_TXD_MARK, MFG3_OUT2_MARK, PORT247_SCIFB_TXD_MARK, \
+	TPU3TO1_MARK,
+	PORT248_SCIFA5_SCK_MARK, MFG2_OUT1_MARK, PORT248_SCIFB_SCK_MARK, \
+	TPU2TO0_MARK, PORT248_I2C_SCL3_MARK, MSIOF2R_TSCK_MARK,
+	PORT249_IROUT_MARK, MFG4_IN1_MARK, PORT249_I2C_SDA3_MARK, \
+	MSIOF2R_TSYNC_MARK,
+	SDHICLK0_MARK,
+	SDHICD0_MARK,
+	SDHID0_0_MARK,
+	SDHID0_1_MARK,
+	SDHID0_2_MARK,
+	SDHID0_3_MARK,
+	SDHICMD0_MARK,
+	SDHIWP0_MARK,
+	SDHICLK1_MARK,
+	SDHID1_0_MARK, TS_SPSYNC2_MARK,
+	SDHID1_1_MARK, TS_SDAT2_MARK,
+	SDHID1_2_MARK, TS_SDEN2_MARK,
+	SDHID1_3_MARK, TS_SCK2_MARK,
+	SDHICMD1_MARK,
+	SDHICLK2_MARK,
+	SDHID2_0_MARK, TS_SPSYNC4_MARK,
+	SDHID2_1_MARK, TS_SDAT4_MARK,
+	SDHID2_2_MARK, TS_SDEN4_MARK,
+	SDHID2_3_MARK, TS_SCK4_MARK,
+	SDHICMD2_MARK,
+	MMCCLK0_MARK,
+	MMCD0_0_MARK,
+	MMCD0_1_MARK,
+	MMCD0_2_MARK,
+	MMCD0_3_MARK,
+	MMCD0_4_MARK, TS_SPSYNC5_MARK,
+	MMCD0_5_MARK, TS_SDAT5_MARK,
+	MMCD0_6_MARK, TS_SDEN5_MARK,
+	MMCD0_7_MARK, TS_SCK5_MARK,
+	MMCCMD0_MARK,
+	RESETOUTS__MARK, EXTAL2OUT_MARK,
+	MCP_WAIT__MCP_FRB_MARK,
+	MCP_CKO_MARK, MMCCLK1_MARK,
+	MCP_D15_MCP_NAF15_MARK,
+	MCP_D14_MCP_NAF14_MARK,
+	MCP_D13_MCP_NAF13_MARK,
+	MCP_D12_MCP_NAF12_MARK,
+	MCP_D11_MCP_NAF11_MARK,
+	MCP_D10_MCP_NAF10_MARK,
+	MCP_D9_MCP_NAF9_MARK,
+	MCP_D8_MCP_NAF8_MARK, MMCCMD1_MARK,
+	MCP_D7_MCP_NAF7_MARK, MMCD1_7_MARK,
+
+	MCP_D6_MCP_NAF6_MARK, MMCD1_6_MARK,
+	MCP_D5_MCP_NAF5_MARK, MMCD1_5_MARK,
+	MCP_D4_MCP_NAF4_MARK, MMCD1_4_MARK,
+	MCP_D3_MCP_NAF3_MARK, MMCD1_3_MARK,
+	MCP_D2_MCP_NAF2_MARK, MMCD1_2_MARK,
+	MCP_D1_MCP_NAF1_MARK, MMCD1_1_MARK,
+	MCP_D0_MCP_NAF0_MARK, MMCD1_0_MARK,
+	MCP_NBRSTOUT__MARK,
+	MCP_WE0__MCP_FWE_MARK, MCP_RDWR_MCP_FWE_MARK,
+
+	/* MSEL2 special cases */
+	TSIF2_TS_XX1_MARK,
+	TSIF2_TS_XX2_MARK,
+	TSIF2_TS_XX3_MARK,
+	TSIF2_TS_XX4_MARK,
+	TSIF2_TS_XX5_MARK,
+	TSIF1_TS_XX1_MARK,
+	TSIF1_TS_XX2_MARK,
+	TSIF1_TS_XX3_MARK,
+	TSIF1_TS_XX4_MARK,
+	TSIF1_TS_XX5_MARK,
+	TSIF0_TS_XX1_MARK,
+	TSIF0_TS_XX2_MARK,
+	TSIF0_TS_XX3_MARK,
+	TSIF0_TS_XX4_MARK,
+	TSIF0_TS_XX5_MARK,
+	MST1_TS_XX1_MARK,
+	MST1_TS_XX2_MARK,
+	MST1_TS_XX3_MARK,
+	MST1_TS_XX4_MARK,
+	MST1_TS_XX5_MARK,
+	MST0_TS_XX1_MARK,
+	MST0_TS_XX2_MARK,
+	MST0_TS_XX3_MARK,
+	MST0_TS_XX4_MARK,
+	MST0_TS_XX5_MARK,
+
+	/* MSEL3 special cases */
+	SDHI0_VCCQ_MC0_ON_MARK,
+	SDHI0_VCCQ_MC0_OFF_MARK,
+	DEBUG_MON_VIO_MARK,
+	DEBUG_MON_LCDD_MARK,
+	LCDC_LCDC0_MARK,
+	LCDC_LCDC1_MARK,
+
+	/* MSEL4 special cases */
+	IRQ9_MEM_INT_MARK,
+	IRQ9_MCP_INT_MARK,
+	A11_MARK,
+	KEYOUT8_MARK,
+	TPU4TO3_MARK,
+	RESETA_N_PU_ON_MARK,
+	RESETA_N_PU_OFF_MARK,
+	EDBGREQ_PD_MARK,
+	EDBGREQ_PU_MARK,
+
+	/* Functions with pull-ups */
+	KEYIN0_PU_MARK,
+	KEYIN1_PU_MARK,
+	KEYIN2_PU_MARK,
+	KEYIN3_PU_MARK,
+	KEYIN4_PU_MARK,
+	KEYIN5_PU_MARK,
+	KEYIN6_PU_MARK,
+	KEYIN7_PU_MARK,
+	SDHID1_0_PU_MARK,
+	SDHID1_1_PU_MARK,
+	SDHID1_2_PU_MARK,
+	SDHID1_3_PU_MARK,
+	SDHICMD1_PU_MARK,
+	MMCCMD0_PU_MARK,
+	MMCCMD1_PU_MARK,
+	FSIACK_PU_MARK,
+	FSIAILR_PU_MARK,
+	FSIAIBT_PU_MARK,
+	FSIAISLD_PU_MARK,
+
+	PINMUX_MARK_END,
+};
+
+#define PORT_DATA_I(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
+
+#define PORT_DATA_I_PD(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_IN, PORT##nr##_IN_PD)
+
+#define PORT_DATA_I_PU(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_IN, PORT##nr##_IN_PU)
+
+#define PORT_DATA_I_PU_PD(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_IN, PORT##nr##_IN_PD,	\
+				PORT##nr##_IN_PU)
+
+#define PORT_DATA_O(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_OUT)
+
+#define PORT_DATA_IO(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_OUT, PORT##nr##_IN)
+
+#define PORT_DATA_IO_PD(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_OUT, PORT##nr##_IN,		\
+				PORT##nr##_IN_PD)
+
+#define PORT_DATA_IO_PU(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_OUT, PORT##nr##_IN,		\
+				PORT##nr##_IN_PU)
+
+#define PORT_DATA_IO_PU_PD(nr)	\
+	PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0,	\
+				PORT##nr##_OUT, PORT##nr##_IN,		\
+				PORT##nr##_IN_PD, PORT##nr##_IN_PU)
+
+static pinmux_enum_t pinmux_data[] = {
+	/* specify valid pin states for each pin in GPIO mode */
+
+	/* Table 25-1 (I/O and Pull U/D) */
+	PORT_DATA_I_PD(0),
+	PORT_DATA_I_PU(1),
+	PORT_DATA_I_PU(2),
+	PORT_DATA_I_PU(3),
+	PORT_DATA_I_PU(4),
+	PORT_DATA_I_PU(5),
+	PORT_DATA_I_PU(6),
+	PORT_DATA_I_PU(7),
+	PORT_DATA_I_PU(8),
+	PORT_DATA_I_PD(9),
+	PORT_DATA_I_PD(10),
+	PORT_DATA_I_PU_PD(11),
+	PORT_DATA_IO_PU_PD(12),
+	PORT_DATA_IO_PU_PD(13),
+	PORT_DATA_IO_PU_PD(14),
+	PORT_DATA_IO_PU_PD(15),
+	PORT_DATA_IO_PD(16),
+	PORT_DATA_IO_PD(17),
+	PORT_DATA_IO_PU(18),
+	PORT_DATA_IO_PU(19),
+	PORT_DATA_O(20),
+	PORT_DATA_O(21),
+	PORT_DATA_O(22),
+	PORT_DATA_O(23),
+	PORT_DATA_O(24),
+	PORT_DATA_I_PD(25),
+	PORT_DATA_I_PD(26),
+	PORT_DATA_IO_PU(27),
+	PORT_DATA_IO_PU(28),
+	PORT_DATA_IO_PD(29),
+	PORT_DATA_IO_PD(30),
+	PORT_DATA_IO_PU(31),
+	PORT_DATA_IO_PD(32),
+	PORT_DATA_I_PU_PD(33),
+	PORT_DATA_IO_PD(34),
+	PORT_DATA_I_PU_PD(35),
+	PORT_DATA_IO_PD(36),
+	PORT_DATA_IO(37),
+	PORT_DATA_O(38),
+	PORT_DATA_I_PU(39),
+	PORT_DATA_I_PU_PD(40),
+	PORT_DATA_O(41),
+	PORT_DATA_IO_PD(42),
+	PORT_DATA_IO_PU_PD(43),
+	PORT_DATA_IO_PU_PD(44),
+	PORT_DATA_IO_PD(45),
+	PORT_DATA_IO_PD(46),
+	PORT_DATA_IO_PD(47),
+	PORT_DATA_I_PD(48),
+	PORT_DATA_IO_PU_PD(49),
+	PORT_DATA_IO_PD(50),
+
+	PORT_DATA_IO_PD(51),
+	PORT_DATA_O(52),
+	PORT_DATA_IO_PU_PD(53),
+	PORT_DATA_IO_PU_PD(54),
+	PORT_DATA_IO_PD(55),
+	PORT_DATA_I_PU_PD(56),
+	PORT_DATA_IO(57),
+	PORT_DATA_IO(58),
+	PORT_DATA_IO(59),
+	PORT_DATA_IO(60),
+	PORT_DATA_IO(61),
+	PORT_DATA_IO_PD(62),
+	PORT_DATA_IO_PD(63),
+	PORT_DATA_IO_PU_PD(64),
+	PORT_DATA_IO_PD(65),
+	PORT_DATA_IO_PU_PD(66),
+	PORT_DATA_IO_PU_PD(67),
+	PORT_DATA_IO_PU_PD(68),
+	PORT_DATA_IO_PU_PD(69),
+	PORT_DATA_IO_PU_PD(70),
+	PORT_DATA_IO_PU_PD(71),
+	PORT_DATA_IO_PU_PD(72),
+	PORT_DATA_I_PU_PD(73),
+	PORT_DATA_IO_PU(74),
+	PORT_DATA_IO_PU(75),
+	PORT_DATA_IO_PU(76),
+	PORT_DATA_IO_PU(77),
+	PORT_DATA_IO_PU(78),
+	PORT_DATA_IO_PU(79),
+	PORT_DATA_IO_PU(80),
+	PORT_DATA_IO_PU(81),
+	PORT_DATA_IO_PU(82),
+	PORT_DATA_IO_PU(83),
+	PORT_DATA_IO_PU(84),
+	PORT_DATA_IO_PU(85),
+	PORT_DATA_IO_PU(86),
+	PORT_DATA_IO_PU(87),
+	PORT_DATA_IO_PU(88),
+	PORT_DATA_IO_PU(89),
+	PORT_DATA_O(90),
+	PORT_DATA_IO_PU(91),
+	PORT_DATA_O(92),
+	PORT_DATA_IO_PU(93),
+	PORT_DATA_O(94),
+	PORT_DATA_I_PU_PD(95),
+	PORT_DATA_IO(96),
+	PORT_DATA_IO(97),
+	PORT_DATA_IO(98),
+	PORT_DATA_I_PU(99),
+	PORT_DATA_O(100),
+	PORT_DATA_O(101),
+	PORT_DATA_I_PU(102),
+	PORT_DATA_IO_PD(103),
+	PORT_DATA_I_PU_PD(104),
+	PORT_DATA_I_PD(105),
+	PORT_DATA_I_PD(106),
+	PORT_DATA_I_PU_PD(107),
+	PORT_DATA_I_PU_PD(108),
+	PORT_DATA_IO_PD(109),
+	PORT_DATA_IO_PD(110),
+	PORT_DATA_IO_PU_PD(111),
+	PORT_DATA_IO_PU_PD(112),
+	PORT_DATA_IO_PU_PD(113),
+	PORT_DATA_IO_PD(114),
+	PORT_DATA_IO_PU(115),
+	PORT_DATA_IO_PU(116),
+	PORT_DATA_IO_PU_PD(117),
+	PORT_DATA_IO_PU_PD(118),
+	PORT_DATA_IO_PD(128),
+
+	PORT_DATA_IO_PD(129),
+	PORT_DATA_IO_PU_PD(130),
+	PORT_DATA_IO_PD(131),
+	PORT_DATA_IO_PD(132),
+	PORT_DATA_IO_PD(133),
+	PORT_DATA_IO_PU_PD(134),
+	PORT_DATA_IO_PU_PD(135),
+	PORT_DATA_IO_PU_PD(136),
+	PORT_DATA_IO_PU_PD(137),
+	PORT_DATA_IO_PD(138),
+	PORT_DATA_IO_PD(139),
+	PORT_DATA_IO_PD(140),
+	PORT_DATA_IO_PD(141),
+	PORT_DATA_IO_PD(142),
+	PORT_DATA_IO_PD(143),
+	PORT_DATA_IO_PU_PD(144),
+	PORT_DATA_IO_PD(145),
+	PORT_DATA_IO_PU_PD(146),
+	PORT_DATA_IO_PU_PD(147),
+	PORT_DATA_IO_PU_PD(148),
+	PORT_DATA_IO_PU_PD(149),
+	PORT_DATA_I_PU_PD(150),
+	PORT_DATA_IO_PU_PD(151),
+	PORT_DATA_IO_PU_PD(152),
+	PORT_DATA_IO_PD(153),
+	PORT_DATA_IO_PD(154),
+	PORT_DATA_I_PU_PD(155),
+	PORT_DATA_IO_PU_PD(156),
+	PORT_DATA_I_PD(157),
+	PORT_DATA_IO_PD(158),
+	PORT_DATA_IO_PU_PD(159),
+	PORT_DATA_IO_PU_PD(160),
+	PORT_DATA_I_PU_PD(161),
+	PORT_DATA_I_PU_PD(162),
+	PORT_DATA_IO_PU_PD(163),
+	PORT_DATA_I_PU_PD(164),
+	PORT_DATA_IO_PD(192),
+	PORT_DATA_IO_PU_PD(193),
+	PORT_DATA_IO_PD(194),
+	PORT_DATA_IO_PU_PD(195),
+	PORT_DATA_IO_PD(196),
+	PORT_DATA_IO_PD(197),
+	PORT_DATA_IO_PD(198),
+	PORT_DATA_IO_PD(199),
+	PORT_DATA_IO_PU_PD(200),
+	PORT_DATA_IO_PU_PD(201),
+	PORT_DATA_IO_PU_PD(202),
+	PORT_DATA_IO_PU_PD(203),
+	PORT_DATA_IO_PU_PD(204),
+	PORT_DATA_IO_PU_PD(205),
+	PORT_DATA_IO_PU_PD(206),
+	PORT_DATA_IO_PD(207),
+	PORT_DATA_IO_PD(208),
+	PORT_DATA_IO_PD(209),
+	PORT_DATA_IO_PD(210),
+	PORT_DATA_IO_PD(211),
+	PORT_DATA_IO_PD(212),
+	PORT_DATA_IO_PD(213),
+	PORT_DATA_IO_PU_PD(214),
+	PORT_DATA_IO_PU_PD(215),
+	PORT_DATA_IO_PD(216),
+	PORT_DATA_IO_PD(217),
+	PORT_DATA_O(218),
+	PORT_DATA_IO_PD(219),
+	PORT_DATA_IO_PD(220),
+	PORT_DATA_IO_PU_PD(221),
+	PORT_DATA_IO_PU_PD(222),
+	PORT_DATA_I_PU_PD(223),
+	PORT_DATA_I_PU_PD(224),
+
+	PORT_DATA_IO_PU_PD(225),
+	PORT_DATA_O(226),
+	PORT_DATA_IO_PU_PD(227),
+	PORT_DATA_I_PU_PD(228),
+	PORT_DATA_I_PD(229),
+	PORT_DATA_IO(230),
+	PORT_DATA_IO_PU_PD(231),
+	PORT_DATA_IO_PU_PD(232),
+	PORT_DATA_I_PU_PD(233),
+	PORT_DATA_IO_PU_PD(234),
+	PORT_DATA_IO_PU_PD(235),
+	PORT_DATA_IO_PU_PD(236),
+	PORT_DATA_IO_PD(237),
+	PORT_DATA_IO_PU_PD(238),
+	PORT_DATA_IO_PU_PD(239),
+	PORT_DATA_IO_PU_PD(240),
+	PORT_DATA_O(241),
+	PORT_DATA_I_PD(242),
+	PORT_DATA_IO_PU_PD(243),
+	PORT_DATA_IO_PU_PD(244),
+	PORT_DATA_IO_PU_PD(245),
+	PORT_DATA_IO_PU_PD(246),
+	PORT_DATA_IO_PU_PD(247),
+	PORT_DATA_IO_PU_PD(248),
+	PORT_DATA_IO_PU_PD(249),
+	PORT_DATA_IO_PU_PD(250),
+	PORT_DATA_IO_PU_PD(251),
+	PORT_DATA_IO_PU_PD(252),
+	PORT_DATA_IO_PU_PD(253),
+	PORT_DATA_IO_PU_PD(254),
+	PORT_DATA_IO_PU_PD(255),
+	PORT_DATA_IO_PU_PD(256),
+	PORT_DATA_IO_PU_PD(257),
+	PORT_DATA_IO_PU_PD(258),
+	PORT_DATA_IO_PU_PD(259),
+	PORT_DATA_IO_PU_PD(260),
+	PORT_DATA_IO_PU_PD(261),
+	PORT_DATA_IO_PU_PD(262),
+	PORT_DATA_IO_PU_PD(263),
+	PORT_DATA_IO_PU_PD(264),
+	PORT_DATA_IO_PU_PD(265),
+	PORT_DATA_IO_PU_PD(266),
+	PORT_DATA_IO_PU_PD(267),
+	PORT_DATA_IO_PU_PD(268),
+	PORT_DATA_IO_PU_PD(269),
+	PORT_DATA_IO_PU_PD(270),
+	PORT_DATA_IO_PU_PD(271),
+	PORT_DATA_IO_PU_PD(272),
+	PORT_DATA_IO_PU_PD(273),
+	PORT_DATA_IO_PU_PD(274),
+	PORT_DATA_IO_PU_PD(275),
+	PORT_DATA_IO_PU_PD(276),
+	PORT_DATA_IO_PU_PD(277),
+	PORT_DATA_IO_PU_PD(278),
+	PORT_DATA_IO_PU_PD(279),
+	PORT_DATA_IO_PU_PD(280),
+	PORT_DATA_O(281),
+	PORT_DATA_O(282),
+	PORT_DATA_I_PU(288),
+	PORT_DATA_IO_PU_PD(289),
+	PORT_DATA_IO_PU_PD(290),
+	PORT_DATA_IO_PU_PD(291),
+	PORT_DATA_IO_PU_PD(292),
+	PORT_DATA_IO_PU_PD(293),
+	PORT_DATA_IO_PU_PD(294),
+	PORT_DATA_IO_PU_PD(295),
+	PORT_DATA_IO_PU_PD(296),
+	PORT_DATA_IO_PU_PD(297),
+	PORT_DATA_IO_PU_PD(298),
+
+	PORT_DATA_IO_PU_PD(299),
+	PORT_DATA_IO_PU_PD(300),
+	PORT_DATA_IO_PU_PD(301),
+	PORT_DATA_IO_PU_PD(302),
+	PORT_DATA_IO_PU_PD(303),
+	PORT_DATA_IO_PU_PD(304),
+	PORT_DATA_IO_PU_PD(305),
+	PORT_DATA_O(306),
+	PORT_DATA_O(307),
+	PORT_DATA_I_PU(308),
+	PORT_DATA_O(309),
+
+	/* Table 25-1 (Function 0-7) */
+	PINMUX_DATA(VBUS_0_MARK, PORT0_FN1),
+	PINMUX_DATA(GPI0_MARK, PORT1_FN1),
+	PINMUX_DATA(GPI1_MARK, PORT2_FN1),
+	PINMUX_DATA(GPI2_MARK, PORT3_FN1),
+	PINMUX_DATA(GPI3_MARK, PORT4_FN1),
+	PINMUX_DATA(GPI4_MARK, PORT5_FN1),
+	PINMUX_DATA(GPI5_MARK, PORT6_FN1),
+	PINMUX_DATA(GPI6_MARK, PORT7_FN1),
+	PINMUX_DATA(GPI7_MARK, PORT8_FN1),
+	PINMUX_DATA(SCIFA7_RXD_MARK, PORT12_FN2),
+	PINMUX_DATA(SCIFA7_CTS__MARK, PORT13_FN2),
+	PINMUX_DATA(GPO7_MARK, PORT14_FN1), \
+	PINMUX_DATA(MFG0_OUT2_MARK, PORT14_FN4),
+	PINMUX_DATA(GPO6_MARK, PORT15_FN1), \
+	PINMUX_DATA(MFG1_OUT2_MARK, PORT15_FN4),
+	PINMUX_DATA(GPO5_MARK, PORT16_FN1), \
+	PINMUX_DATA(SCIFA0_SCK_MARK, PORT16_FN2), \
+	PINMUX_DATA(FSICOSLDT3_MARK, PORT16_FN3), \
+	PINMUX_DATA(PORT16_VIO_CKOR_MARK, PORT16_FN4),
+	PINMUX_DATA(SCIFA0_TXD_MARK, PORT17_FN2),
+	PINMUX_DATA(SCIFA7_TXD_MARK, PORT18_FN2),
+	PINMUX_DATA(SCIFA7_RTS__MARK, PORT19_FN2), \
+	PINMUX_DATA(PORT19_VIO_CKO2_MARK, PORT19_FN3),
+	PINMUX_DATA(GPO0_MARK, PORT20_FN1),
+	PINMUX_DATA(GPO1_MARK, PORT21_FN1),
+	PINMUX_DATA(GPO2_MARK, PORT22_FN1), \
+	PINMUX_DATA(STATUS0_MARK, PORT22_FN2),
+	PINMUX_DATA(GPO3_MARK, PORT23_FN1), \
+	PINMUX_DATA(STATUS1_MARK, PORT23_FN2),
+	PINMUX_DATA(GPO4_MARK, PORT24_FN1), \
+	PINMUX_DATA(STATUS2_MARK, PORT24_FN2),
+	PINMUX_DATA(VINT_MARK, PORT25_FN1),
+	PINMUX_DATA(TCKON_MARK, PORT26_FN1),
+	PINMUX_DATA(XDVFS1_MARK, PORT27_FN1), \
+	PINMUX_DATA(PORT27_I2C_SCL2_MARK, PORT27_FN2, MSEL2CR_MSEL17_0,
+		MSEL2CR_MSEL16_1), \
+	PINMUX_DATA(PORT27_I2C_SCL3_MARK, PORT27_FN3, MSEL2CR_MSEL19_0,
+		MSEL2CR_MSEL18_0), \
+	PINMUX_DATA(MFG0_OUT1_MARK, PORT27_FN4), \
+	PINMUX_DATA(PORT27_IROUT_MARK, PORT27_FN7),
+	PINMUX_DATA(XDVFS2_MARK, PORT28_FN1), \
+	PINMUX_DATA(PORT28_I2C_SDA2_MARK, PORT28_FN2, MSEL2CR_MSEL17_0,
+		MSEL2CR_MSEL16_1), \
+	PINMUX_DATA(PORT28_I2C_SDA3_MARK, PORT28_FN3, MSEL2CR_MSEL19_0,
+		MSEL2CR_MSEL18_0), \
+	PINMUX_DATA(PORT28_TPU1TO1_MARK, PORT28_FN7),
+	PINMUX_DATA(SIM_RST_MARK, PORT29_FN1), \
+	PINMUX_DATA(PORT29_TPU1TO1_MARK, PORT29_FN4),
+	PINMUX_DATA(SIM_CLK_MARK, PORT30_FN1), \
+	PINMUX_DATA(PORT30_VIO_CKOR_MARK, PORT30_FN4),
+	PINMUX_DATA(SIM_D_MARK, PORT31_FN1), \
+	PINMUX_DATA(PORT31_IROUT_MARK, PORT31_FN4),
+	PINMUX_DATA(SCIFA4_TXD_MARK, PORT32_FN2),
+	PINMUX_DATA(SCIFA4_RXD_MARK, PORT33_FN2), \
+	PINMUX_DATA(XWUP_MARK, PORT33_FN3),
+	PINMUX_DATA(SCIFA4_RTS__MARK, PORT34_FN2),
+	PINMUX_DATA(SCIFA4_CTS__MARK, PORT35_FN2),
+	PINMUX_DATA(FSIBOBT_MARK, PORT36_FN1), \
+	PINMUX_DATA(FSIBIBT_MARK, PORT36_FN2),
+	PINMUX_DATA(FSIBOLR_MARK, PORT37_FN1), \
+	PINMUX_DATA(FSIBILR_MARK, PORT37_FN2),
+	PINMUX_DATA(FSIBOSLD_MARK, PORT38_FN1),
+	PINMUX_DATA(FSIBISLD_MARK, PORT39_FN1),
+	PINMUX_DATA(VACK_MARK, PORT40_FN1),
+	PINMUX_DATA(XTAL1L_MARK, PORT41_FN1),
+	PINMUX_DATA(SCIFA0_RTS__MARK, PORT42_FN2), \
+	PINMUX_DATA(FSICOSLDT2_MARK, PORT42_FN3),
+	PINMUX_DATA(SCIFA0_RXD_MARK, PORT43_FN2),
+	PINMUX_DATA(SCIFA0_CTS__MARK, PORT44_FN2), \
+	PINMUX_DATA(FSICOSLDT1_MARK, PORT44_FN3),
+	PINMUX_DATA(FSICOBT_MARK, PORT45_FN1), \
+	PINMUX_DATA(FSICIBT_MARK, PORT45_FN2), \
+	PINMUX_DATA(FSIDOBT_MARK, PORT45_FN3), \
+	PINMUX_DATA(FSIDIBT_MARK, PORT45_FN4),
+	PINMUX_DATA(FSICOLR_MARK, PORT46_FN1), \
+	PINMUX_DATA(FSICILR_MARK, PORT46_FN2), \
+	PINMUX_DATA(FSIDOLR_MARK, PORT46_FN3), \
+	PINMUX_DATA(FSIDILR_MARK, PORT46_FN4),
+	PINMUX_DATA(FSICOSLD_MARK, PORT47_FN1), \
+	PINMUX_DATA(PORT47_FSICSPDIF_MARK, PORT47_FN2),
+	PINMUX_DATA(FSICISLD_MARK, PORT48_FN1), \
+	PINMUX_DATA(FSIDISLD_MARK, PORT48_FN3),
+	PINMUX_DATA(FSIACK_MARK, PORT49_FN1), \
+	PINMUX_DATA(PORT49_IRDA_OUT_MARK, PORT49_FN2, MSEL4CR_MSEL19_1), \
+	PINMUX_DATA(PORT49_IROUT_MARK, PORT49_FN4), \
+	PINMUX_DATA(FSIAOMC_MARK, PORT49_FN5),
+	PINMUX_DATA(FSIAOLR_MARK, PORT50_FN1), \
+	PINMUX_DATA(BBIF2_TSYNC2_MARK, PORT50_FN2), \
+	PINMUX_DATA(TPU2TO2_MARK, PORT50_FN3), \
+	PINMUX_DATA(FSIAILR_MARK, PORT50_FN5),
+
+	PINMUX_DATA(FSIAOBT_MARK, PORT51_FN1), \
+	PINMUX_DATA(BBIF2_TSCK2_MARK, PORT51_FN2), \
+	PINMUX_DATA(TPU2TO3_MARK, PORT51_FN3), \
+	PINMUX_DATA(FSIAIBT_MARK, PORT51_FN5),
+	PINMUX_DATA(FSIAOSLD_MARK, PORT52_FN1), \
+	PINMUX_DATA(BBIF2_TXD2_MARK, PORT52_FN2),
+	PINMUX_DATA(FSIASPDIF_MARK, PORT53_FN1), \
+	PINMUX_DATA(PORT53_IRDA_IN_MARK, PORT53_FN2, MSEL4CR_MSEL19_1), \
+	PINMUX_DATA(TPU3TO3_MARK, PORT53_FN3), \
+	PINMUX_DATA(FSIBSPDIF_MARK, PORT53_FN5), \
+	PINMUX_DATA(PORT53_FSICSPDIF_MARK, PORT53_FN6),
+	PINMUX_DATA(FSIBCK_MARK, PORT54_FN1), \
+	PINMUX_DATA(PORT54_IRDA_FIRSEL_MARK, PORT54_FN2, MSEL4CR_MSEL19_1), \
+	PINMUX_DATA(TPU3TO2_MARK, PORT54_FN3), \
+	PINMUX_DATA(FSIBOMC_MARK, PORT54_FN5), \
+	PINMUX_DATA(FSICCK_MARK, PORT54_FN6), \
+	PINMUX_DATA(FSICOMC_MARK, PORT54_FN7),
+	PINMUX_DATA(FSIAISLD_MARK, PORT55_FN1), \
+	PINMUX_DATA(TPU0TO0_MARK, PORT55_FN3),
+	PINMUX_DATA(A0_MARK, PORT57_FN1), \
+	PINMUX_DATA(BS__MARK, PORT57_FN2),
+	PINMUX_DATA(A12_MARK, PORT58_FN1), \
+	PINMUX_DATA(PORT58_KEYOUT7_MARK, PORT58_FN2), \
+	PINMUX_DATA(TPU4TO2_MARK, PORT58_FN4),
+	PINMUX_DATA(A13_MARK, PORT59_FN1), \
+	PINMUX_DATA(PORT59_KEYOUT6_MARK, PORT59_FN2), \
+	PINMUX_DATA(TPU0TO1_MARK, PORT59_FN4),
+	PINMUX_DATA(A14_MARK, PORT60_FN1), \
+	PINMUX_DATA(KEYOUT5_MARK, PORT60_FN2),
+	PINMUX_DATA(A15_MARK, PORT61_FN1), \
+	PINMUX_DATA(KEYOUT4_MARK, PORT61_FN2),
+	PINMUX_DATA(A16_MARK, PORT62_FN1), \
+	PINMUX_DATA(KEYOUT3_MARK, PORT62_FN2), \
+	PINMUX_DATA(MSIOF0_SS1_MARK, PORT62_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A17_MARK, PORT63_FN1), \
+	PINMUX_DATA(KEYOUT2_MARK, PORT63_FN2), \
+	PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT63_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A18_MARK, PORT64_FN1), \
+	PINMUX_DATA(KEYOUT1_MARK, PORT64_FN2), \
+	PINMUX_DATA(MSIOF0_TSCK_MARK, PORT64_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A19_MARK, PORT65_FN1), \
+	PINMUX_DATA(KEYOUT0_MARK, PORT65_FN2), \
+	PINMUX_DATA(MSIOF0_TXD_MARK, PORT65_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A20_MARK, PORT66_FN1), \
+	PINMUX_DATA(KEYIN0_MARK, PORT66_FN2), \
+	PINMUX_DATA(MSIOF0_RSCK_MARK, PORT66_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A21_MARK, PORT67_FN1), \
+	PINMUX_DATA(KEYIN1_MARK, PORT67_FN2), \
+	PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT67_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A22_MARK, PORT68_FN1), \
+	PINMUX_DATA(KEYIN2_MARK, PORT68_FN2), \
+	PINMUX_DATA(MSIOF0_MCK0_MARK, PORT68_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A23_MARK, PORT69_FN1), \
+	PINMUX_DATA(KEYIN3_MARK, PORT69_FN2), \
+	PINMUX_DATA(MSIOF0_MCK1_MARK, PORT69_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A24_MARK, PORT70_FN1), \
+	PINMUX_DATA(KEYIN4_MARK, PORT70_FN2), \
+	PINMUX_DATA(MSIOF0_RXD_MARK, PORT70_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A25_MARK, PORT71_FN1), \
+	PINMUX_DATA(KEYIN5_MARK, PORT71_FN2), \
+	PINMUX_DATA(MSIOF0_SS2_MARK, PORT71_FN4, MSEL3CR_MSEL11_0),
+	PINMUX_DATA(A26_MARK, PORT72_FN1), \
+	PINMUX_DATA(KEYIN6_MARK, PORT72_FN2),
+	PINMUX_DATA(KEYIN7_MARK, PORT73_FN2),
+	PINMUX_DATA(D0_NAF0_MARK, PORT74_FN1),
+	PINMUX_DATA(D1_NAF1_MARK, PORT75_FN1),
+	PINMUX_DATA(D2_NAF2_MARK, PORT76_FN1),
+	PINMUX_DATA(D3_NAF3_MARK, PORT77_FN1),
+	PINMUX_DATA(D4_NAF4_MARK, PORT78_FN1),
+	PINMUX_DATA(D5_NAF5_MARK, PORT79_FN1),
+	PINMUX_DATA(D6_NAF6_MARK, PORT80_FN1),
+	PINMUX_DATA(D7_NAF7_MARK, PORT81_FN1),
+	PINMUX_DATA(D8_NAF8_MARK, PORT82_FN1),
+	PINMUX_DATA(D9_NAF9_MARK, PORT83_FN1),
+	PINMUX_DATA(D10_NAF10_MARK, PORT84_FN1),
+	PINMUX_DATA(D11_NAF11_MARK, PORT85_FN1),
+	PINMUX_DATA(D12_NAF12_MARK, PORT86_FN1),
+	PINMUX_DATA(D13_NAF13_MARK, PORT87_FN1),
+	PINMUX_DATA(D14_NAF14_MARK, PORT88_FN1),
+	PINMUX_DATA(D15_NAF15_MARK, PORT89_FN1),
+	PINMUX_DATA(CS4__MARK, PORT90_FN1),
+	PINMUX_DATA(CS5A__MARK, PORT91_FN1), \
+	PINMUX_DATA(PORT91_RDWR_MARK, PORT91_FN2),
+	PINMUX_DATA(CS5B__MARK, PORT92_FN1), \
+	PINMUX_DATA(FCE1__MARK, PORT92_FN2),
+	PINMUX_DATA(CS6B__MARK, PORT93_FN1), \
+	PINMUX_DATA(DACK0_MARK, PORT93_FN4),
+	PINMUX_DATA(FCE0__MARK, PORT94_FN1), \
+	PINMUX_DATA(CS6A__MARK, PORT94_FN2),
+	PINMUX_DATA(WAIT__MARK, PORT95_FN1), \
+	PINMUX_DATA(DREQ0_MARK, PORT95_FN2),
+	PINMUX_DATA(RD__FSC_MARK, PORT96_FN1),
+	PINMUX_DATA(WE0__FWE_MARK, PORT97_FN1), \
+	PINMUX_DATA(RDWR_FWE_MARK, PORT97_FN2),
+	PINMUX_DATA(WE1__MARK, PORT98_FN1),
+	PINMUX_DATA(FRB_MARK, PORT99_FN1),
+	PINMUX_DATA(CKO_MARK, PORT100_FN1),
+	PINMUX_DATA(NBRSTOUT__MARK, PORT101_FN1),
+	PINMUX_DATA(NBRST__MARK, PORT102_FN1),
+	PINMUX_DATA(BBIF2_TXD_MARK, PORT103_FN3),
+	PINMUX_DATA(BBIF2_RXD_MARK, PORT104_FN3),
+	PINMUX_DATA(BBIF2_SYNC_MARK, PORT105_FN3),
+	PINMUX_DATA(BBIF2_SCK_MARK, PORT106_FN3),
+	PINMUX_DATA(SCIFA3_CTS__MARK, PORT107_FN3), \
+	PINMUX_DATA(MFG3_IN2_MARK, PORT107_FN4),
+	PINMUX_DATA(SCIFA3_RXD_MARK, PORT108_FN3), \
+	PINMUX_DATA(MFG3_IN1_MARK, PORT108_FN4),
+	PINMUX_DATA(BBIF1_SS2_MARK, PORT109_FN2), \
+	PINMUX_DATA(SCIFA3_RTS__MARK, PORT109_FN3), \
+	PINMUX_DATA(MFG3_OUT1_MARK, PORT109_FN4),
+	PINMUX_DATA(SCIFA3_TXD_MARK, PORT110_FN3),
+	PINMUX_DATA(HSI_RX_DATA_MARK, PORT111_FN1), \
+	PINMUX_DATA(BBIF1_RXD_MARK, PORT111_FN3),
+	PINMUX_DATA(HSI_TX_WAKE_MARK, PORT112_FN1), \
+	PINMUX_DATA(BBIF1_TSCK_MARK, PORT112_FN3),
+	PINMUX_DATA(HSI_TX_DATA_MARK, PORT113_FN1), \
+	PINMUX_DATA(BBIF1_TSYNC_MARK, PORT113_FN3),
+	PINMUX_DATA(HSI_TX_READY_MARK, PORT114_FN1), \
+	PINMUX_DATA(BBIF1_TXD_MARK, PORT114_FN3),
+	PINMUX_DATA(HSI_RX_READY_MARK, PORT115_FN1), \
+	PINMUX_DATA(BBIF1_RSCK_MARK, PORT115_FN3), \
+	PINMUX_DATA(PORT115_I2C_SCL2_MARK, PORT115_FN5, MSEL2CR_MSEL17_1), \
+	PINMUX_DATA(PORT115_I2C_SCL3_MARK, PORT115_FN6, MSEL2CR_MSEL19_1),
+	PINMUX_DATA(HSI_RX_WAKE_MARK, PORT116_FN1), \
+	PINMUX_DATA(BBIF1_RSYNC_MARK, PORT116_FN3), \
+	PINMUX_DATA(PORT116_I2C_SDA2_MARK, PORT116_FN5, MSEL2CR_MSEL17_1), \
+	PINMUX_DATA(PORT116_I2C_SDA3_MARK, PORT116_FN6, MSEL2CR_MSEL19_1),
+	PINMUX_DATA(HSI_RX_FLAG_MARK, PORT117_FN1), \
+	PINMUX_DATA(BBIF1_SS1_MARK, PORT117_FN2), \
+	PINMUX_DATA(BBIF1_FLOW_MARK, PORT117_FN3),
+	PINMUX_DATA(HSI_TX_FLAG_MARK, PORT118_FN1),
+	PINMUX_DATA(VIO_VD_MARK, PORT128_FN1), \
+	PINMUX_DATA(PORT128_LCD2VSYN_MARK, PORT128_FN4, MSEL3CR_MSEL2_0), \
+	PINMUX_DATA(VIO2_VD_MARK, PORT128_FN6, MSEL4CR_MSEL27_0), \
+	PINMUX_DATA(LCD2D0_MARK, PORT128_FN7),
+
+	PINMUX_DATA(VIO_HD_MARK, PORT129_FN1), \
+	PINMUX_DATA(PORT129_LCD2HSYN_MARK, PORT129_FN4), \
+	PINMUX_DATA(PORT129_LCD2CS__MARK, PORT129_FN5), \
+	PINMUX_DATA(VIO2_HD_MARK, PORT129_FN6, MSEL4CR_MSEL27_0), \
+	PINMUX_DATA(LCD2D1_MARK, PORT129_FN7),
+	PINMUX_DATA(VIO_D0_MARK, PORT130_FN1), \
+	PINMUX_DATA(PORT130_MSIOF2_RXD_MARK, PORT130_FN3, MSEL4CR_MSEL11_0,
+		MSEL4CR_MSEL10_1), \
+	PINMUX_DATA(LCD2D10_MARK, PORT130_FN7),
+	PINMUX_DATA(VIO_D1_MARK, PORT131_FN1), \
+	PINMUX_DATA(PORT131_KEYOUT6_MARK, PORT131_FN2), \
+	PINMUX_DATA(PORT131_MSIOF2_SS1_MARK, PORT131_FN3), \
+	PINMUX_DATA(PORT131_KEYOUT11_MARK, PORT131_FN4), \
+	PINMUX_DATA(LCD2D11_MARK, PORT131_FN7),
+	PINMUX_DATA(VIO_D2_MARK, PORT132_FN1), \
+	PINMUX_DATA(PORT132_KEYOUT7_MARK, PORT132_FN2), \
+	PINMUX_DATA(PORT132_MSIOF2_SS2_MARK, PORT132_FN3), \
+	PINMUX_DATA(PORT132_KEYOUT10_MARK, PORT132_FN4), \
+	PINMUX_DATA(LCD2D12_MARK, PORT132_FN7),
+	PINMUX_DATA(VIO_D3_MARK, PORT133_FN1), \
+	PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT133_FN3, MSEL4CR_MSEL11_0), \
+	PINMUX_DATA(LCD2D13_MARK, PORT133_FN7),
+	PINMUX_DATA(VIO_D4_MARK, PORT134_FN1), \
+	PINMUX_DATA(MSIOF2_TXD_MARK, PORT134_FN3, MSEL4CR_MSEL11_0), \
+	PINMUX_DATA(LCD2D14_MARK, PORT134_FN7),
+	PINMUX_DATA(VIO_D5_MARK, PORT135_FN1), \
+	PINMUX_DATA(MSIOF2_TSCK_MARK, PORT135_FN3, MSEL4CR_MSEL11_0), \
+	PINMUX_DATA(LCD2D15_MARK, PORT135_FN7),
+	PINMUX_DATA(VIO_D6_MARK, PORT136_FN1), \
+	PINMUX_DATA(PORT136_KEYOUT8_MARK, PORT136_FN2), \
+	PINMUX_DATA(LCD2D16_MARK, PORT136_FN7),
+	PINMUX_DATA(VIO_D7_MARK, PORT137_FN1), \
+	PINMUX_DATA(PORT137_KEYOUT9_MARK, PORT137_FN2), \
+	PINMUX_DATA(LCD2D17_MARK, PORT137_FN7),
+	PINMUX_DATA(VIO_D8_MARK, PORT138_FN1), \
+	PINMUX_DATA(PORT138_KEYOUT8_MARK, PORT138_FN2), \
+	PINMUX_DATA(VIO2_D0_MARK, PORT138_FN6), \
+	PINMUX_DATA(LCD2D6_MARK, PORT138_FN7),
+	PINMUX_DATA(VIO_D9_MARK, PORT139_FN1), \
+	PINMUX_DATA(PORT139_KEYOUT9_MARK, PORT139_FN2), \
+	PINMUX_DATA(VIO2_D1_MARK, PORT139_FN6), \
+	PINMUX_DATA(LCD2D7_MARK, PORT139_FN7),
+	PINMUX_DATA(VIO_D10_MARK, PORT140_FN1), \
+	PINMUX_DATA(TPU0TO2_MARK, PORT140_FN4), \
+	PINMUX_DATA(VIO2_D2_MARK, PORT140_FN6), \
+	PINMUX_DATA(LCD2D8_MARK, PORT140_FN7),
+	PINMUX_DATA(VIO_D11_MARK, PORT141_FN1), \
+	PINMUX_DATA(TPU0TO3_MARK, PORT141_FN4), \
+	PINMUX_DATA(VIO2_D3_MARK, PORT141_FN6), \
+	PINMUX_DATA(LCD2D9_MARK, PORT141_FN7),
+	PINMUX_DATA(VIO_D12_MARK, PORT142_FN1), \
+	PINMUX_DATA(PORT142_KEYOUT10_MARK, PORT142_FN2), \
+	PINMUX_DATA(VIO2_D4_MARK, PORT142_FN6), \
+	PINMUX_DATA(LCD2D2_MARK, PORT142_FN7),
+	PINMUX_DATA(VIO_D13_MARK, PORT143_FN1), \
+	PINMUX_DATA(PORT143_KEYOUT11_MARK, PORT143_FN2), \
+	PINMUX_DATA(PORT143_KEYOUT6_MARK, PORT143_FN3), \
+	PINMUX_DATA(VIO2_D5_MARK, PORT143_FN6), \
+	PINMUX_DATA(LCD2D3_MARK, PORT143_FN7),
+	PINMUX_DATA(VIO_D14_MARK, PORT144_FN1), \
+	PINMUX_DATA(PORT144_KEYOUT7_MARK, PORT144_FN2), \
+	PINMUX_DATA(VIO2_D6_MARK, PORT144_FN6), \
+	PINMUX_DATA(LCD2D4_MARK, PORT144_FN7),
+	PINMUX_DATA(VIO_D15_MARK, PORT145_FN1), \
+	PINMUX_DATA(TPU1TO3_MARK, PORT145_FN3), \
+	PINMUX_DATA(PORT145_LCD2DISP_MARK, PORT145_FN4), \
+	PINMUX_DATA(PORT145_LCD2RS_MARK, PORT145_FN5), \
+	PINMUX_DATA(VIO2_D7_MARK, PORT145_FN6), \
+	PINMUX_DATA(LCD2D5_MARK, PORT145_FN7),
+	PINMUX_DATA(VIO_CLK_MARK, PORT146_FN1), \
+	PINMUX_DATA(LCD2DCK_MARK, PORT146_FN4), \
+	PINMUX_DATA(PORT146_LCD2WR__MARK, PORT146_FN5), \
+	PINMUX_DATA(VIO2_CLK_MARK, PORT146_FN6, MSEL4CR_MSEL27_0), \
+	PINMUX_DATA(LCD2D18_MARK, PORT146_FN7),
+	PINMUX_DATA(VIO_FIELD_MARK, PORT147_FN1), \
+	PINMUX_DATA(LCD2RD__MARK, PORT147_FN4), \
+	PINMUX_DATA(VIO2_FIELD_MARK, PORT147_FN6, MSEL4CR_MSEL27_0), \
+	PINMUX_DATA(LCD2D19_MARK, PORT147_FN7),
+	PINMUX_DATA(VIO_CKO_MARK, PORT148_FN1),
+	PINMUX_DATA(A27_MARK, PORT149_FN1), \
+	PINMUX_DATA(PORT149_RDWR_MARK, PORT149_FN2), \
+	PINMUX_DATA(MFG0_IN1_MARK, PORT149_FN3), \
+	PINMUX_DATA(PORT149_KEYOUT9_MARK, PORT149_FN4),
+	PINMUX_DATA(MFG0_IN2_MARK, PORT150_FN3),
+	PINMUX_DATA(TS_SPSYNC3_MARK, PORT151_FN4), \
+	PINMUX_DATA(MSIOF2_RSCK_MARK, PORT151_FN5),
+	PINMUX_DATA(TS_SDAT3_MARK, PORT152_FN4), \
+	PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT152_FN5),
+	PINMUX_DATA(TPU1TO2_MARK, PORT153_FN3), \
+	PINMUX_DATA(TS_SDEN3_MARK, PORT153_FN4), \
+	PINMUX_DATA(PORT153_MSIOF2_SS1_MARK, PORT153_FN5),
+	PINMUX_DATA(SCIFA2_TXD1_MARK, PORT154_FN2, MSEL3CR_MSEL9_0), \
+	PINMUX_DATA(MSIOF2_MCK0_MARK, PORT154_FN5),
+	PINMUX_DATA(SCIFA2_RXD1_MARK, PORT155_FN2, MSEL3CR_MSEL9_0), \
+	PINMUX_DATA(MSIOF2_MCK1_MARK, PORT155_FN5),
+	PINMUX_DATA(SCIFA2_RTS1__MARK, PORT156_FN2, MSEL3CR_MSEL9_0), \
+	PINMUX_DATA(PORT156_MSIOF2_SS2_MARK, PORT156_FN5),
+	PINMUX_DATA(SCIFA2_CTS1__MARK, PORT157_FN2, MSEL3CR_MSEL9_0), \
+	PINMUX_DATA(PORT157_MSIOF2_RXD_MARK, PORT157_FN5, MSEL4CR_MSEL11_0,
+		MSEL4CR_MSEL10_0),
+	PINMUX_DATA(DINT__MARK, PORT158_FN1), \
+	PINMUX_DATA(SCIFA2_SCK1_MARK, PORT158_FN2, MSEL3CR_MSEL9_0), \
+	PINMUX_DATA(TS_SCK3_MARK, PORT158_FN4),
+	PINMUX_DATA(PORT159_SCIFB_SCK_MARK, PORT159_FN1, MSEL4CR_MSEL22_0), \
+	PINMUX_DATA(PORT159_SCIFA5_SCK_MARK, PORT159_FN2, MSEL4CR_MSEL21_1), \
+	PINMUX_DATA(NMI_MARK, PORT159_FN3),
+	PINMUX_DATA(PORT160_SCIFB_TXD_MARK, PORT160_FN1, MSEL4CR_MSEL22_0), \
+	PINMUX_DATA(PORT160_SCIFA5_TXD_MARK, PORT160_FN2, MSEL4CR_MSEL21_1),
+	PINMUX_DATA(PORT161_SCIFB_CTS__MARK, PORT161_FN1, MSEL4CR_MSEL22_0), \
+	PINMUX_DATA(PORT161_SCIFA5_CTS__MARK, PORT161_FN2, MSEL4CR_MSEL21_1),
+	PINMUX_DATA(PORT162_SCIFB_RXD_MARK, PORT162_FN1, MSEL4CR_MSEL22_0), \
+	PINMUX_DATA(PORT162_SCIFA5_RXD_MARK, PORT162_FN2, MSEL4CR_MSEL21_1),
+	PINMUX_DATA(PORT163_SCIFB_RTS__MARK, PORT163_FN1, MSEL4CR_MSEL22_0), \
+	PINMUX_DATA(PORT163_SCIFA5_RTS__MARK, PORT163_FN2, MSEL4CR_MSEL21_1), \
+	PINMUX_DATA(TPU3TO0_MARK, PORT163_FN5),
+	PINMUX_DATA(LCDD0_MARK, PORT192_FN1),
+	PINMUX_DATA(LCDD1_MARK, PORT193_FN1), \
+	PINMUX_DATA(PORT193_SCIFA5_CTS__MARK, PORT193_FN3, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_1), \
+	PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT193_FN5),
+	PINMUX_DATA(LCDD2_MARK, PORT194_FN1), \
+	PINMUX_DATA(PORT194_SCIFA5_RTS__MARK, PORT194_FN3, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_1), \
+	PINMUX_DATA(BBIF2_TSCK1_MARK, PORT194_FN5),
+	PINMUX_DATA(LCDD3_MARK, PORT195_FN1), \
+	PINMUX_DATA(PORT195_SCIFA5_RXD_MARK, PORT195_FN3, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_1), \
+	PINMUX_DATA(BBIF2_TXD1_MARK, PORT195_FN5),
+	PINMUX_DATA(LCDD4_MARK, PORT196_FN1), \
+	PINMUX_DATA(PORT196_SCIFA5_TXD_MARK, PORT196_FN3, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_1),
+	PINMUX_DATA(LCDD5_MARK, PORT197_FN1), \
+	PINMUX_DATA(PORT197_SCIFA5_SCK_MARK, PORT197_FN3, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_1), \
+	PINMUX_DATA(MFG2_OUT2_MARK, PORT197_FN5), \
+	PINMUX_DATA(TPU2TO1_MARK, PORT197_FN7),
+	PINMUX_DATA(LCDD6_MARK, PORT198_FN1),
+	PINMUX_DATA(LCDD7_MARK, PORT199_FN1), \
+	PINMUX_DATA(TPU4TO1_MARK, PORT199_FN2), \
+	PINMUX_DATA(MFG4_OUT2_MARK, PORT199_FN5),
+	PINMUX_DATA(LCDD8_MARK, PORT200_FN1), \
+	PINMUX_DATA(D16_MARK, PORT200_FN6),
+	PINMUX_DATA(LCDD9_MARK, PORT201_FN1), \
+	PINMUX_DATA(D17_MARK, PORT201_FN6),
+	PINMUX_DATA(LCDD10_MARK, PORT202_FN1), \
+	PINMUX_DATA(D18_MARK, PORT202_FN6),
+	PINMUX_DATA(LCDD11_MARK, PORT203_FN1), \
+	PINMUX_DATA(D19_MARK, PORT203_FN6),
+	PINMUX_DATA(LCDD12_MARK, PORT204_FN1), \
+	PINMUX_DATA(D20_MARK, PORT204_FN6),
+	PINMUX_DATA(LCDD13_MARK, PORT205_FN1), \
+	PINMUX_DATA(D21_MARK, PORT205_FN6),
+	PINMUX_DATA(LCDD14_MARK, PORT206_FN1), \
+	PINMUX_DATA(D22_MARK, PORT206_FN6),
+	PINMUX_DATA(LCDD15_MARK, PORT207_FN1), \
+	PINMUX_DATA(PORT207_MSIOF0L_SS1_MARK, PORT207_FN2, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D23_MARK, PORT207_FN6),
+	PINMUX_DATA(LCDD16_MARK, PORT208_FN1), \
+	PINMUX_DATA(PORT208_MSIOF0L_SS2_MARK, PORT208_FN2, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D24_MARK, PORT208_FN6),
+	PINMUX_DATA(LCDD17_MARK, PORT209_FN1), \
+	PINMUX_DATA(D25_MARK, PORT209_FN6),
+	PINMUX_DATA(LCDD18_MARK, PORT210_FN1), \
+	PINMUX_DATA(DREQ2_MARK, PORT210_FN2), \
+	PINMUX_DATA(PORT210_MSIOF0L_SS1_MARK, PORT210_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D26_MARK, PORT210_FN6),
+	PINMUX_DATA(LCDD19_MARK, PORT211_FN1), \
+	PINMUX_DATA(PORT211_MSIOF0L_SS2_MARK, PORT211_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D27_MARK, PORT211_FN6),
+	PINMUX_DATA(LCDD20_MARK, PORT212_FN1), \
+	PINMUX_DATA(TS_SPSYNC1_MARK, PORT212_FN2), \
+	PINMUX_DATA(MSIOF0L_MCK0_MARK, PORT212_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D28_MARK, PORT212_FN6),
+	PINMUX_DATA(LCDD21_MARK, PORT213_FN1), \
+	PINMUX_DATA(TS_SDAT1_MARK, PORT213_FN2), \
+	PINMUX_DATA(MSIOF0L_MCK1_MARK, PORT213_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D29_MARK, PORT213_FN6),
+	PINMUX_DATA(LCDD22_MARK, PORT214_FN1), \
+	PINMUX_DATA(TS_SDEN1_MARK, PORT214_FN2), \
+	PINMUX_DATA(MSIOF0L_RSCK_MARK, PORT214_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D30_MARK, PORT214_FN6),
+	PINMUX_DATA(LCDD23_MARK, PORT215_FN1), \
+	PINMUX_DATA(TS_SCK1_MARK, PORT215_FN2), \
+	PINMUX_DATA(MSIOF0L_RSYNC_MARK, PORT215_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(D31_MARK, PORT215_FN6),
+	PINMUX_DATA(LCDDCK_MARK, PORT216_FN1), \
+	PINMUX_DATA(LCDWR__MARK, PORT216_FN2),
+	PINMUX_DATA(LCDRD__MARK, PORT217_FN1), \
+	PINMUX_DATA(DACK2_MARK, PORT217_FN2), \
+	PINMUX_DATA(PORT217_LCD2RS_MARK, PORT217_FN3), \
+	PINMUX_DATA(MSIOF0L_TSYNC_MARK, PORT217_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(VIO2_FIELD3_MARK, PORT217_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_1), \
+	PINMUX_DATA(PORT217_LCD2DISP_MARK, PORT217_FN7),
+	PINMUX_DATA(LCDHSYN_MARK, PORT218_FN1), \
+	PINMUX_DATA(LCDCS__MARK, PORT218_FN2), \
+	PINMUX_DATA(LCDCS2__MARK, PORT218_FN3), \
+	PINMUX_DATA(DACK3_MARK, PORT218_FN4), \
+	PINMUX_DATA(PORT218_VIO_CKOR_MARK, PORT218_FN5),
+	PINMUX_DATA(LCDDISP_MARK, PORT219_FN1), \
+	PINMUX_DATA(LCDRS_MARK, PORT219_FN2), \
+	PINMUX_DATA(PORT219_LCD2WR__MARK, PORT219_FN3), \
+	PINMUX_DATA(DREQ3_MARK, PORT219_FN4), \
+	PINMUX_DATA(MSIOF0L_TSCK_MARK, PORT219_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(VIO2_CLK3_MARK, PORT219_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_1), \
+	PINMUX_DATA(LCD2DCK_2_MARK, PORT219_FN7),
+	PINMUX_DATA(LCDVSYN_MARK, PORT220_FN1), \
+	PINMUX_DATA(LCDVSYN2_MARK, PORT220_FN2),
+	PINMUX_DATA(LCDLCLK_MARK, PORT221_FN1), \
+	PINMUX_DATA(DREQ1_MARK, PORT221_FN2), \
+	PINMUX_DATA(PORT221_LCD2CS__MARK, PORT221_FN3), \
+	PINMUX_DATA(PWEN_MARK, PORT221_FN4), \
+	PINMUX_DATA(MSIOF0L_RXD_MARK, PORT221_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(VIO2_HD3_MARK, PORT221_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_1), \
+	PINMUX_DATA(PORT221_LCD2HSYN_MARK, PORT221_FN7),
+	PINMUX_DATA(LCDDON_MARK, PORT222_FN1), \
+	PINMUX_DATA(LCDDON2_MARK, PORT222_FN2), \
+	PINMUX_DATA(DACK1_MARK, PORT222_FN3), \
+	PINMUX_DATA(OVCN_MARK, PORT222_FN4), \
+	PINMUX_DATA(MSIOF0L_TXD_MARK, PORT222_FN5, MSEL3CR_MSEL11_1), \
+	PINMUX_DATA(VIO2_VD3_MARK, PORT222_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_1), \
+	PINMUX_DATA(PORT222_LCD2VSYN_MARK, PORT222_FN7, MSEL3CR_MSEL2_1),
+
+	PINMUX_DATA(SCIFA1_TXD_MARK, PORT225_FN2), \
+	PINMUX_DATA(OVCN2_MARK, PORT225_FN4),
+	PINMUX_DATA(EXTLP_MARK, PORT226_FN1), \
+	PINMUX_DATA(SCIFA1_SCK_MARK, PORT226_FN2), \
+	PINMUX_DATA(PORT226_VIO_CKO2_MARK, PORT226_FN5),
+	PINMUX_DATA(SCIFA1_RTS__MARK, PORT227_FN2), \
+	PINMUX_DATA(IDIN_MARK, PORT227_FN4),
+	PINMUX_DATA(SCIFA1_RXD_MARK, PORT228_FN2),
+	PINMUX_DATA(SCIFA1_CTS__MARK, PORT229_FN2), \
+	PINMUX_DATA(MFG1_IN1_MARK, PORT229_FN3),
+	PINMUX_DATA(MSIOF1_TXD_MARK, PORT230_FN1), \
+	PINMUX_DATA(SCIFA2_TXD2_MARK, PORT230_FN2, MSEL3CR_MSEL9_1),
+	PINMUX_DATA(MSIOF1_TSYNC_MARK, PORT231_FN1), \
+	PINMUX_DATA(SCIFA2_CTS2__MARK, PORT231_FN2, MSEL3CR_MSEL9_1),
+	PINMUX_DATA(MSIOF1_TSCK_MARK, PORT232_FN1), \
+	PINMUX_DATA(SCIFA2_SCK2_MARK, PORT232_FN2, MSEL3CR_MSEL9_1),
+	PINMUX_DATA(MSIOF1_RXD_MARK, PORT233_FN1), \
+	PINMUX_DATA(SCIFA2_RXD2_MARK, PORT233_FN2, MSEL3CR_MSEL9_1),
+	PINMUX_DATA(MSIOF1_RSCK_MARK, PORT234_FN1), \
+	PINMUX_DATA(SCIFA2_RTS2__MARK, PORT234_FN2, MSEL3CR_MSEL9_1), \
+	PINMUX_DATA(VIO2_CLK2_MARK, PORT234_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_0), \
+	PINMUX_DATA(LCD2D20_MARK, PORT234_FN7),
+	PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT235_FN1), \
+	PINMUX_DATA(MFG1_IN2_MARK, PORT235_FN3), \
+	PINMUX_DATA(VIO2_VD2_MARK, PORT235_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_0), \
+	PINMUX_DATA(LCD2D21_MARK, PORT235_FN7),
+	PINMUX_DATA(MSIOF1_MCK0_MARK, PORT236_FN1), \
+	PINMUX_DATA(PORT236_I2C_SDA2_MARK, PORT236_FN2, MSEL2CR_MSEL17_0,
+		MSEL2CR_MSEL16_0),
+	PINMUX_DATA(MSIOF1_MCK1_MARK, PORT237_FN1), \
+	PINMUX_DATA(PORT237_I2C_SCL2_MARK, PORT237_FN2, MSEL2CR_MSEL17_0,
+		MSEL2CR_MSEL16_0),
+	PINMUX_DATA(MSIOF1_SS1_MARK, PORT238_FN1), \
+	PINMUX_DATA(VIO2_FIELD2_MARK, PORT238_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_0), \
+	PINMUX_DATA(LCD2D22_MARK, PORT238_FN7),
+	PINMUX_DATA(MSIOF1_SS2_MARK, PORT239_FN1), \
+	PINMUX_DATA(VIO2_HD2_MARK, PORT239_FN6, MSEL4CR_MSEL27_1,
+		MSEL4CR_MSEL26_0), \
+	PINMUX_DATA(LCD2D23_MARK, PORT239_FN7),
+	PINMUX_DATA(SCIFA6_TXD_MARK, PORT240_FN1),
+	PINMUX_DATA(PORT241_IRDA_OUT_MARK, PORT241_FN1, MSEL4CR_MSEL19_0), \
+	PINMUX_DATA(PORT241_IROUT_MARK, PORT241_FN2), \
+	PINMUX_DATA(MFG4_OUT1_MARK, PORT241_FN3), \
+	PINMUX_DATA(TPU4TO0_MARK, PORT241_FN4),
+	PINMUX_DATA(PORT242_IRDA_IN_MARK, PORT242_FN1, MSEL4CR_MSEL19_0), \
+	PINMUX_DATA(MFG4_IN2_MARK, PORT242_FN3),
+	PINMUX_DATA(PORT243_IRDA_FIRSEL_MARK, PORT243_FN1, MSEL4CR_MSEL19_0), \
+	PINMUX_DATA(PORT243_VIO_CKO2_MARK, PORT243_FN2),
+	PINMUX_DATA(PORT244_SCIFA5_CTS__MARK, PORT244_FN1, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_0), \
+	PINMUX_DATA(MFG2_IN1_MARK, PORT244_FN2), \
+	PINMUX_DATA(PORT244_SCIFB_CTS__MARK, PORT244_FN3, MSEL4CR_MSEL22_1), \
+	PINMUX_DATA(MSIOF2R_RXD_MARK, PORT244_FN7, MSEL4CR_MSEL11_1),
+	PINMUX_DATA(PORT245_SCIFA5_RTS__MARK, PORT245_FN1, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_0), \
+	PINMUX_DATA(MFG2_IN2_MARK, PORT245_FN2), \
+	PINMUX_DATA(PORT245_SCIFB_RTS__MARK, PORT245_FN3, MSEL4CR_MSEL22_1), \
+	PINMUX_DATA(MSIOF2R_TXD_MARK, PORT245_FN7, MSEL4CR_MSEL11_1),
+	PINMUX_DATA(PORT246_SCIFA5_RXD_MARK, PORT246_FN1, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_0), \
+	PINMUX_DATA(MFG1_OUT1_MARK, PORT246_FN2), \
+	PINMUX_DATA(PORT246_SCIFB_RXD_MARK, PORT246_FN3, MSEL4CR_MSEL22_1), \
+	PINMUX_DATA(TPU1TO0_MARK, PORT246_FN4),
+	PINMUX_DATA(PORT247_SCIFA5_TXD_MARK, PORT247_FN1, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_0), \
+	PINMUX_DATA(MFG3_OUT2_MARK, PORT247_FN2), \
+	PINMUX_DATA(PORT247_SCIFB_TXD_MARK, PORT247_FN3, MSEL4CR_MSEL22_1), \
+	PINMUX_DATA(TPU3TO1_MARK, PORT247_FN4),
+	PINMUX_DATA(PORT248_SCIFA5_SCK_MARK, PORT248_FN1, MSEL4CR_MSEL21_0,
+		MSEL4CR_MSEL20_0), \
+	PINMUX_DATA(MFG2_OUT1_MARK, PORT248_FN2), \
+	PINMUX_DATA(PORT248_SCIFB_SCK_MARK, PORT248_FN3, MSEL4CR_MSEL22_1), \
+	PINMUX_DATA(TPU2TO0_MARK, PORT248_FN4), \
+	PINMUX_DATA(PORT248_I2C_SCL3_MARK, PORT248_FN5, MSEL2CR_MSEL19_0,
+		MSEL2CR_MSEL18_0), \
+	PINMUX_DATA(MSIOF2R_TSCK_MARK, PORT248_FN7, MSEL4CR_MSEL11_1),
+	PINMUX_DATA(PORT249_IROUT_MARK, PORT249_FN1), \
+	PINMUX_DATA(MFG4_IN1_MARK, PORT249_FN2), \
+	PINMUX_DATA(PORT249_I2C_SDA3_MARK, PORT249_FN5, MSEL2CR_MSEL19_0,
+		MSEL2CR_MSEL18_0), \
+	PINMUX_DATA(MSIOF2R_TSYNC_MARK, PORT249_FN7, MSEL4CR_MSEL11_1),
+	PINMUX_DATA(SDHICLK0_MARK, PORT250_FN1),
+	PINMUX_DATA(SDHICD0_MARK, PORT251_FN1),
+	PINMUX_DATA(SDHID0_0_MARK, PORT252_FN1),
+	PINMUX_DATA(SDHID0_1_MARK, PORT253_FN1),
+	PINMUX_DATA(SDHID0_2_MARK, PORT254_FN1),
+	PINMUX_DATA(SDHID0_3_MARK, PORT255_FN1),
+	PINMUX_DATA(SDHICMD0_MARK, PORT256_FN1),
+	PINMUX_DATA(SDHIWP0_MARK, PORT257_FN1),
+	PINMUX_DATA(SDHICLK1_MARK, PORT258_FN1),
+	PINMUX_DATA(SDHID1_0_MARK, PORT259_FN1), \
+	PINMUX_DATA(TS_SPSYNC2_MARK, PORT259_FN3),
+	PINMUX_DATA(SDHID1_1_MARK, PORT260_FN1), \
+	PINMUX_DATA(TS_SDAT2_MARK, PORT260_FN3),
+	PINMUX_DATA(SDHID1_2_MARK, PORT261_FN1), \
+	PINMUX_DATA(TS_SDEN2_MARK, PORT261_FN3),
+	PINMUX_DATA(SDHID1_3_MARK, PORT262_FN1), \
+	PINMUX_DATA(TS_SCK2_MARK, PORT262_FN3),
+	PINMUX_DATA(SDHICMD1_MARK, PORT263_FN1),
+	PINMUX_DATA(SDHICLK2_MARK, PORT264_FN1),
+	PINMUX_DATA(SDHID2_0_MARK, PORT265_FN1), \
+	PINMUX_DATA(TS_SPSYNC4_MARK, PORT265_FN3),
+	PINMUX_DATA(SDHID2_1_MARK, PORT266_FN1), \
+	PINMUX_DATA(TS_SDAT4_MARK, PORT266_FN3),
+	PINMUX_DATA(SDHID2_2_MARK, PORT267_FN1), \
+	PINMUX_DATA(TS_SDEN4_MARK, PORT267_FN3),
+	PINMUX_DATA(SDHID2_3_MARK, PORT268_FN1), \
+	PINMUX_DATA(TS_SCK4_MARK, PORT268_FN3),
+	PINMUX_DATA(SDHICMD2_MARK, PORT269_FN1),
+	PINMUX_DATA(MMCCLK0_MARK, PORT270_FN1, MSEL4CR_MSEL15_0),
+	PINMUX_DATA(MMCD0_0_MARK, PORT271_FN1, MSEL4CR_MSEL15_0),
+	PINMUX_DATA(MMCD0_1_MARK, PORT272_FN1, MSEL4CR_MSEL15_0),
+	PINMUX_DATA(MMCD0_2_MARK, PORT273_FN1, MSEL4CR_MSEL15_0),
+	PINMUX_DATA(MMCD0_3_MARK, PORT274_FN1, MSEL4CR_MSEL15_0),
+	PINMUX_DATA(MMCD0_4_MARK, PORT275_FN1, MSEL4CR_MSEL15_0), \
+	PINMUX_DATA(TS_SPSYNC5_MARK, PORT275_FN3),
+	PINMUX_DATA(MMCD0_5_MARK, PORT276_FN1, MSEL4CR_MSEL15_0), \
+	PINMUX_DATA(TS_SDAT5_MARK, PORT276_FN3),
+	PINMUX_DATA(MMCD0_6_MARK, PORT277_FN1, MSEL4CR_MSEL15_0), \
+	PINMUX_DATA(TS_SDEN5_MARK, PORT277_FN3),
+	PINMUX_DATA(MMCD0_7_MARK, PORT278_FN1, MSEL4CR_MSEL15_0), \
+	PINMUX_DATA(TS_SCK5_MARK, PORT278_FN3),
+	PINMUX_DATA(MMCCMD0_MARK, PORT279_FN1, MSEL4CR_MSEL15_0),
+	PINMUX_DATA(RESETOUTS__MARK, PORT281_FN1), \
+	PINMUX_DATA(EXTAL2OUT_MARK, PORT281_FN2),
+	PINMUX_DATA(MCP_WAIT__MCP_FRB_MARK, PORT288_FN1),
+	PINMUX_DATA(MCP_CKO_MARK, PORT289_FN1), \
+	PINMUX_DATA(MMCCLK1_MARK, PORT289_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D15_MCP_NAF15_MARK, PORT290_FN1),
+	PINMUX_DATA(MCP_D14_MCP_NAF14_MARK, PORT291_FN1),
+	PINMUX_DATA(MCP_D13_MCP_NAF13_MARK, PORT292_FN1),
+	PINMUX_DATA(MCP_D12_MCP_NAF12_MARK, PORT293_FN1),
+	PINMUX_DATA(MCP_D11_MCP_NAF11_MARK, PORT294_FN1),
+	PINMUX_DATA(MCP_D10_MCP_NAF10_MARK, PORT295_FN1),
+	PINMUX_DATA(MCP_D9_MCP_NAF9_MARK, PORT296_FN1),
+	PINMUX_DATA(MCP_D8_MCP_NAF8_MARK, PORT297_FN1), \
+	PINMUX_DATA(MMCCMD1_MARK, PORT297_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D7_MCP_NAF7_MARK, PORT298_FN1), \
+	PINMUX_DATA(MMCD1_7_MARK, PORT298_FN2, MSEL4CR_MSEL15_1),
+
+	PINMUX_DATA(MCP_D6_MCP_NAF6_MARK, PORT299_FN1), \
+	PINMUX_DATA(MMCD1_6_MARK, PORT299_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D5_MCP_NAF5_MARK, PORT300_FN1), \
+	PINMUX_DATA(MMCD1_5_MARK, PORT300_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D4_MCP_NAF4_MARK, PORT301_FN1), \
+	PINMUX_DATA(MMCD1_4_MARK, PORT301_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D3_MCP_NAF3_MARK, PORT302_FN1), \
+	PINMUX_DATA(MMCD1_3_MARK, PORT302_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D2_MCP_NAF2_MARK, PORT303_FN1), \
+	PINMUX_DATA(MMCD1_2_MARK, PORT303_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D1_MCP_NAF1_MARK, PORT304_FN1), \
+	PINMUX_DATA(MMCD1_1_MARK, PORT304_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_D0_MCP_NAF0_MARK, PORT305_FN1), \
+	PINMUX_DATA(MMCD1_0_MARK, PORT305_FN2, MSEL4CR_MSEL15_1),
+	PINMUX_DATA(MCP_NBRSTOUT__MARK, PORT306_FN1),
+	PINMUX_DATA(MCP_WE0__MCP_FWE_MARK, PORT309_FN1), \
+	PINMUX_DATA(MCP_RDWR_MCP_FWE_MARK, PORT309_FN2),
+
+	/* MSEL2 special cases */
+	PINMUX_DATA(TSIF2_TS_XX1_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+		MSEL2CR_MSEL12_0),
+	PINMUX_DATA(TSIF2_TS_XX2_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0,
+		MSEL2CR_MSEL12_1),
+	PINMUX_DATA(TSIF2_TS_XX3_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+		MSEL2CR_MSEL12_0),
+	PINMUX_DATA(TSIF2_TS_XX4_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1,
+		MSEL2CR_MSEL12_1),
+	PINMUX_DATA(TSIF2_TS_XX5_MARK, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0,
+		MSEL2CR_MSEL12_0),
+	PINMUX_DATA(TSIF1_TS_XX1_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+		MSEL2CR_MSEL9_0),
+	PINMUX_DATA(TSIF1_TS_XX2_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0,
+		MSEL2CR_MSEL9_1),
+	PINMUX_DATA(TSIF1_TS_XX3_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+		MSEL2CR_MSEL9_0),
+	PINMUX_DATA(TSIF1_TS_XX4_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1,
+		MSEL2CR_MSEL9_1),
+	PINMUX_DATA(TSIF1_TS_XX5_MARK, MSEL2CR_MSEL11_1, MSEL2CR_MSEL10_0,
+		MSEL2CR_MSEL9_0),
+	PINMUX_DATA(TSIF0_TS_XX1_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+		MSEL2CR_MSEL6_0),
+	PINMUX_DATA(TSIF0_TS_XX2_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0,
+		MSEL2CR_MSEL6_1),
+	PINMUX_DATA(TSIF0_TS_XX3_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+		MSEL2CR_MSEL6_0),
+	PINMUX_DATA(TSIF0_TS_XX4_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1,
+		MSEL2CR_MSEL6_1),
+	PINMUX_DATA(TSIF0_TS_XX5_MARK, MSEL2CR_MSEL8_1, MSEL2CR_MSEL7_0,
+		MSEL2CR_MSEL6_0),
+	PINMUX_DATA(MST1_TS_XX1_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+		MSEL2CR_MSEL3_0),
+	PINMUX_DATA(MST1_TS_XX2_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0,
+		MSEL2CR_MSEL3_1),
+	PINMUX_DATA(MST1_TS_XX3_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+		MSEL2CR_MSEL3_0),
+	PINMUX_DATA(MST1_TS_XX4_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1,
+		MSEL2CR_MSEL3_1),
+	PINMUX_DATA(MST1_TS_XX5_MARK, MSEL2CR_MSEL5_1, MSEL2CR_MSEL4_0,
+		MSEL2CR_MSEL3_0),
+	PINMUX_DATA(MST0_TS_XX1_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+		MSEL2CR_MSEL0_0),
+	PINMUX_DATA(MST0_TS_XX2_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0,
+		MSEL2CR_MSEL0_1),
+	PINMUX_DATA(MST0_TS_XX3_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+		MSEL2CR_MSEL0_0),
+	PINMUX_DATA(MST0_TS_XX4_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1,
+		MSEL2CR_MSEL0_1),
+	PINMUX_DATA(MST0_TS_XX5_MARK, MSEL2CR_MSEL2_1, MSEL2CR_MSEL1_0,
+		MSEL2CR_MSEL0_0),
+
+	/* MSEL3 special cases */
+	PINMUX_DATA(SDHI0_VCCQ_MC0_ON_MARK, MSEL3CR_MSEL28_1),
+	PINMUX_DATA(SDHI0_VCCQ_MC0_OFF_MARK, MSEL3CR_MSEL28_0),
+	PINMUX_DATA(DEBUG_MON_VIO_MARK, MSEL3CR_MSEL15_0),
+	PINMUX_DATA(DEBUG_MON_LCDD_MARK, MSEL3CR_MSEL15_1),
+	PINMUX_DATA(LCDC_LCDC0_MARK, MSEL3CR_MSEL6_0),
+	PINMUX_DATA(LCDC_LCDC1_MARK, MSEL3CR_MSEL6_1),
+
+	/* MSEL4 special cases */
+	PINMUX_DATA(IRQ9_MEM_INT_MARK, MSEL4CR_MSEL29_0),
+	PINMUX_DATA(IRQ9_MCP_INT_MARK, MSEL4CR_MSEL29_1),
+	PINMUX_DATA(A11_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_0),
+	PINMUX_DATA(KEYOUT8_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_1),
+	PINMUX_DATA(TPU4TO3_MARK, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0),
+	PINMUX_DATA(RESETA_N_PU_ON_MARK, MSEL4CR_MSEL4_0),
+	PINMUX_DATA(RESETA_N_PU_OFF_MARK, MSEL4CR_MSEL4_1),
+	PINMUX_DATA(EDBGREQ_PD_MARK, MSEL4CR_MSEL1_0),
+	PINMUX_DATA(EDBGREQ_PU_MARK, MSEL4CR_MSEL1_1),
+
+	/* Functions with pull-ups */
+	PINMUX_DATA(KEYIN0_PU_MARK, PORT66_FN2, PORT66_IN_PU),
+	PINMUX_DATA(KEYIN1_PU_MARK, PORT67_FN2, PORT67_IN_PU),
+	PINMUX_DATA(KEYIN2_PU_MARK, PORT68_FN2, PORT68_IN_PU),
+	PINMUX_DATA(KEYIN3_PU_MARK, PORT69_FN2, PORT69_IN_PU),
+	PINMUX_DATA(KEYIN4_PU_MARK, PORT70_FN2, PORT70_IN_PU),
+	PINMUX_DATA(KEYIN5_PU_MARK, PORT71_FN2, PORT71_IN_PU),
+	PINMUX_DATA(KEYIN6_PU_MARK, PORT72_FN2, PORT72_IN_PU),
+	PINMUX_DATA(KEYIN7_PU_MARK, PORT73_FN2, PORT73_IN_PU),
+
+	PINMUX_DATA(SDHID1_0_PU_MARK, PORT259_IN_PU, PORT259_FN1),
+	PINMUX_DATA(SDHID1_1_PU_MARK, PORT260_IN_PU, PORT260_FN1),
+	PINMUX_DATA(SDHID1_2_PU_MARK, PORT261_IN_PU, PORT261_FN1),
+	PINMUX_DATA(SDHID1_3_PU_MARK, PORT262_IN_PU, PORT262_FN1),
+	PINMUX_DATA(SDHICMD1_PU_MARK, PORT263_IN_PU, PORT263_FN1),
+
+	PINMUX_DATA(MMCCMD0_PU_MARK, PORT279_FN1, PORT279_IN_PU,
+		MSEL4CR_MSEL15_0),
+	PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT279_IN_PU,
+		MSEL4CR_MSEL15_1),
+	PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU),
+	PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU),
+	PINMUX_DATA(FSIAIBT_PU_MARK, PORT51_FN5, PORT51_IN_PU),
+	PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU),
+};
+
+#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
+#define GPIO_PORT_310() _310(_GPIO_PORT, , unused)
+#define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK)
+
+static struct pinmux_gpio pinmux_gpios[] = {
+	GPIO_PORT_310(),
+
+	/* Table 25-1 (Functions 0-7) */
+	GPIO_FN(VBUS_0),
+	GPIO_FN(GPI0),
+	GPIO_FN(GPI1),
+	GPIO_FN(GPI2),
+	GPIO_FN(GPI3),
+	GPIO_FN(GPI4),
+	GPIO_FN(GPI5),
+	GPIO_FN(GPI6),
+	GPIO_FN(GPI7),
+	GPIO_FN(SCIFA7_RXD),
+	GPIO_FN(SCIFA7_CTS_),
+	GPIO_FN(GPO7), \
+	GPIO_FN(MFG0_OUT2),
+	GPIO_FN(GPO6), \
+	GPIO_FN(MFG1_OUT2),
+	GPIO_FN(GPO5), \
+	GPIO_FN(SCIFA0_SCK), \
+	GPIO_FN(FSICOSLDT3), \
+	GPIO_FN(PORT16_VIO_CKOR),
+	GPIO_FN(SCIFA0_TXD),
+	GPIO_FN(SCIFA7_TXD),
+	GPIO_FN(SCIFA7_RTS_), \
+	GPIO_FN(PORT19_VIO_CKO2),
+	GPIO_FN(GPO0),
+	GPIO_FN(GPO1),
+	GPIO_FN(GPO2), \
+	GPIO_FN(STATUS0),
+	GPIO_FN(GPO3), \
+	GPIO_FN(STATUS1),
+	GPIO_FN(GPO4), \
+	GPIO_FN(STATUS2),
+	GPIO_FN(VINT),
+	GPIO_FN(TCKON),
+	GPIO_FN(XDVFS1), \
+	GPIO_FN(PORT27_I2C_SCL2), \
+	GPIO_FN(PORT27_I2C_SCL3), \
+	GPIO_FN(MFG0_OUT1), \
+	GPIO_FN(PORT27_IROUT),
+	GPIO_FN(XDVFS2), \
+	GPIO_FN(PORT28_I2C_SDA2), \
+	GPIO_FN(PORT28_I2C_SDA3), \
+	GPIO_FN(PORT28_TPU1TO1),
+	GPIO_FN(SIM_RST), \
+	GPIO_FN(PORT29_TPU1TO1),
+	GPIO_FN(SIM_CLK), \
+	GPIO_FN(PORT30_VIO_CKOR),
+	GPIO_FN(SIM_D), \
+	GPIO_FN(PORT31_IROUT),
+	GPIO_FN(SCIFA4_TXD),
+	GPIO_FN(SCIFA4_RXD), \
+	GPIO_FN(XWUP),
+	GPIO_FN(SCIFA4_RTS_),
+	GPIO_FN(SCIFA4_CTS_),
+	GPIO_FN(FSIBOBT), \
+	GPIO_FN(FSIBIBT),
+	GPIO_FN(FSIBOLR), \
+	GPIO_FN(FSIBILR),
+	GPIO_FN(FSIBOSLD),
+	GPIO_FN(FSIBISLD),
+	GPIO_FN(VACK),
+	GPIO_FN(XTAL1L),
+	GPIO_FN(SCIFA0_RTS_), \
+	GPIO_FN(FSICOSLDT2),
+	GPIO_FN(SCIFA0_RXD),
+	GPIO_FN(SCIFA0_CTS_), \
+	GPIO_FN(FSICOSLDT1),
+	GPIO_FN(FSICOBT), \
+	GPIO_FN(FSICIBT), \
+	GPIO_FN(FSIDOBT), \
+	GPIO_FN(FSIDIBT),
+	GPIO_FN(FSICOLR), \
+	GPIO_FN(FSICILR), \
+	GPIO_FN(FSIDOLR), \
+	GPIO_FN(FSIDILR),
+	GPIO_FN(FSICOSLD), \
+	GPIO_FN(PORT47_FSICSPDIF),
+	GPIO_FN(FSICISLD), \
+	GPIO_FN(FSIDISLD),
+	GPIO_FN(FSIACK), \
+	GPIO_FN(PORT49_IRDA_OUT), \
+	GPIO_FN(PORT49_IROUT), \
+	GPIO_FN(FSIAOMC),
+	GPIO_FN(FSIAOLR), \
+	GPIO_FN(BBIF2_TSYNC2), \
+	GPIO_FN(TPU2TO2), \
+	GPIO_FN(FSIAILR),
+
+	GPIO_FN(FSIAOBT), \
+	GPIO_FN(BBIF2_TSCK2), \
+	GPIO_FN(TPU2TO3), \
+	GPIO_FN(FSIAIBT),
+	GPIO_FN(FSIAOSLD), \
+	GPIO_FN(BBIF2_TXD2),
+	GPIO_FN(FSIASPDIF), \
+	GPIO_FN(PORT53_IRDA_IN), \
+	GPIO_FN(TPU3TO3), \
+	GPIO_FN(FSIBSPDIF), \
+	GPIO_FN(PORT53_FSICSPDIF),
+	GPIO_FN(FSIBCK), \
+	GPIO_FN(PORT54_IRDA_FIRSEL), \
+	GPIO_FN(TPU3TO2), \
+	GPIO_FN(FSIBOMC), \
+	GPIO_FN(FSICCK), \
+	GPIO_FN(FSICOMC),
+	GPIO_FN(FSIAISLD), \
+	GPIO_FN(TPU0TO0),
+	GPIO_FN(A0), \
+	GPIO_FN(BS_),
+	GPIO_FN(A12), \
+	GPIO_FN(PORT58_KEYOUT7), \
+	GPIO_FN(TPU4TO2),
+	GPIO_FN(A13), \
+	GPIO_FN(PORT59_KEYOUT6), \
+	GPIO_FN(TPU0TO1),
+	GPIO_FN(A14), \
+	GPIO_FN(KEYOUT5),
+	GPIO_FN(A15), \
+	GPIO_FN(KEYOUT4),
+	GPIO_FN(A16), \
+	GPIO_FN(KEYOUT3), \
+	GPIO_FN(MSIOF0_SS1),
+	GPIO_FN(A17), \
+	GPIO_FN(KEYOUT2), \
+	GPIO_FN(MSIOF0_TSYNC),
+	GPIO_FN(A18), \
+	GPIO_FN(KEYOUT1), \
+	GPIO_FN(MSIOF0_TSCK),
+	GPIO_FN(A19), \
+	GPIO_FN(KEYOUT0), \
+	GPIO_FN(MSIOF0_TXD),
+	GPIO_FN(A20), \
+	GPIO_FN(KEYIN0), \
+	GPIO_FN(MSIOF0_RSCK),
+	GPIO_FN(A21), \
+	GPIO_FN(KEYIN1), \
+	GPIO_FN(MSIOF0_RSYNC),
+	GPIO_FN(A22), \
+	GPIO_FN(KEYIN2), \
+	GPIO_FN(MSIOF0_MCK0),
+	GPIO_FN(A23), \
+	GPIO_FN(KEYIN3), \
+	GPIO_FN(MSIOF0_MCK1),
+	GPIO_FN(A24), \
+	GPIO_FN(KEYIN4), \
+	GPIO_FN(MSIOF0_RXD),
+	GPIO_FN(A25), \
+	GPIO_FN(KEYIN5), \
+	GPIO_FN(MSIOF0_SS2),
+	GPIO_FN(A26), \
+	GPIO_FN(KEYIN6),
+	GPIO_FN(KEYIN7),
+	GPIO_FN(D0_NAF0),
+	GPIO_FN(D1_NAF1),
+	GPIO_FN(D2_NAF2),
+	GPIO_FN(D3_NAF3),
+	GPIO_FN(D4_NAF4),
+	GPIO_FN(D5_NAF5),
+	GPIO_FN(D6_NAF6),
+	GPIO_FN(D7_NAF7),
+	GPIO_FN(D8_NAF8),
+	GPIO_FN(D9_NAF9),
+	GPIO_FN(D10_NAF10),
+	GPIO_FN(D11_NAF11),
+	GPIO_FN(D12_NAF12),
+	GPIO_FN(D13_NAF13),
+	GPIO_FN(D14_NAF14),
+	GPIO_FN(D15_NAF15),
+	GPIO_FN(CS4_),
+	GPIO_FN(CS5A_), \
+	GPIO_FN(PORT91_RDWR),
+	GPIO_FN(CS5B_), \
+	GPIO_FN(FCE1_),
+	GPIO_FN(CS6B_), \
+	GPIO_FN(DACK0),
+	GPIO_FN(FCE0_), \
+	GPIO_FN(CS6A_),
+	GPIO_FN(WAIT_), \
+	GPIO_FN(DREQ0),
+	GPIO_FN(RD__FSC),
+	GPIO_FN(WE0__FWE), \
+	GPIO_FN(RDWR_FWE),
+	GPIO_FN(WE1_),
+	GPIO_FN(FRB),
+	GPIO_FN(CKO),
+	GPIO_FN(NBRSTOUT_),
+	GPIO_FN(NBRST_),
+	GPIO_FN(BBIF2_TXD),
+	GPIO_FN(BBIF2_RXD),
+	GPIO_FN(BBIF2_SYNC),
+	GPIO_FN(BBIF2_SCK),
+	GPIO_FN(SCIFA3_CTS_), \
+	GPIO_FN(MFG3_IN2),
+	GPIO_FN(SCIFA3_RXD), \
+	GPIO_FN(MFG3_IN1),
+	GPIO_FN(BBIF1_SS2), \
+	GPIO_FN(SCIFA3_RTS_), \
+	GPIO_FN(MFG3_OUT1),
+	GPIO_FN(SCIFA3_TXD),
+	GPIO_FN(HSI_RX_DATA), \
+	GPIO_FN(BBIF1_RXD),
+	GPIO_FN(HSI_TX_WAKE), \
+	GPIO_FN(BBIF1_TSCK),
+	GPIO_FN(HSI_TX_DATA), \
+	GPIO_FN(BBIF1_TSYNC),
+	GPIO_FN(HSI_TX_READY), \
+	GPIO_FN(BBIF1_TXD),
+	GPIO_FN(HSI_RX_READY), \
+	GPIO_FN(BBIF1_RSCK), \
+	GPIO_FN(PORT115_I2C_SCL2), \
+	GPIO_FN(PORT115_I2C_SCL3),
+	GPIO_FN(HSI_RX_WAKE), \
+	GPIO_FN(BBIF1_RSYNC), \
+	GPIO_FN(PORT116_I2C_SDA2), \
+	GPIO_FN(PORT116_I2C_SDA3),
+	GPIO_FN(HSI_RX_FLAG), \
+	GPIO_FN(BBIF1_SS1), \
+	GPIO_FN(BBIF1_FLOW),
+	GPIO_FN(HSI_TX_FLAG),
+	GPIO_FN(VIO_VD), \
+	GPIO_FN(PORT128_LCD2VSYN), \
+	GPIO_FN(VIO2_VD), \
+	GPIO_FN(LCD2D0),
+
+	GPIO_FN(VIO_HD), \
+	GPIO_FN(PORT129_LCD2HSYN), \
+	GPIO_FN(PORT129_LCD2CS_), \
+	GPIO_FN(VIO2_HD), \
+	GPIO_FN(LCD2D1),
+	GPIO_FN(VIO_D0), \
+	GPIO_FN(PORT130_MSIOF2_RXD), \
+	GPIO_FN(LCD2D10),
+	GPIO_FN(VIO_D1), \
+	GPIO_FN(PORT131_KEYOUT6), \
+	GPIO_FN(PORT131_MSIOF2_SS1), \
+	GPIO_FN(PORT131_KEYOUT11), \
+	GPIO_FN(LCD2D11),
+	GPIO_FN(VIO_D2), \
+	GPIO_FN(PORT132_KEYOUT7), \
+	GPIO_FN(PORT132_MSIOF2_SS2), \
+	GPIO_FN(PORT132_KEYOUT10), \
+	GPIO_FN(LCD2D12),
+	GPIO_FN(VIO_D3), \
+	GPIO_FN(MSIOF2_TSYNC), \
+	GPIO_FN(LCD2D13),
+	GPIO_FN(VIO_D4), \
+	GPIO_FN(MSIOF2_TXD), \
+	GPIO_FN(LCD2D14),
+	GPIO_FN(VIO_D5), \
+	GPIO_FN(MSIOF2_TSCK), \
+	GPIO_FN(LCD2D15),
+	GPIO_FN(VIO_D6), \
+	GPIO_FN(PORT136_KEYOUT8), \
+	GPIO_FN(LCD2D16),
+	GPIO_FN(VIO_D7), \
+	GPIO_FN(PORT137_KEYOUT9), \
+	GPIO_FN(LCD2D17),
+	GPIO_FN(VIO_D8), \
+	GPIO_FN(PORT138_KEYOUT8), \
+	GPIO_FN(VIO2_D0), \
+	GPIO_FN(LCD2D6),
+	GPIO_FN(VIO_D9), \
+	GPIO_FN(PORT139_KEYOUT9), \
+	GPIO_FN(VIO2_D1), \
+	GPIO_FN(LCD2D7),
+	GPIO_FN(VIO_D10), \
+	GPIO_FN(TPU0TO2), \
+	GPIO_FN(VIO2_D2), \
+	GPIO_FN(LCD2D8),
+	GPIO_FN(VIO_D11), \
+	GPIO_FN(TPU0TO3), \
+	GPIO_FN(VIO2_D3), \
+	GPIO_FN(LCD2D9),
+	GPIO_FN(VIO_D12), \
+	GPIO_FN(PORT142_KEYOUT10), \
+	GPIO_FN(VIO2_D4), \
+	GPIO_FN(LCD2D2),
+	GPIO_FN(VIO_D13), \
+	GPIO_FN(PORT143_KEYOUT11), \
+	GPIO_FN(PORT143_KEYOUT6), \
+	GPIO_FN(VIO2_D5), \
+	GPIO_FN(LCD2D3),
+	GPIO_FN(VIO_D14), \
+	GPIO_FN(PORT144_KEYOUT7), \
+	GPIO_FN(VIO2_D6), \
+	GPIO_FN(LCD2D4),
+	GPIO_FN(VIO_D15), \
+	GPIO_FN(TPU1TO3), \
+	GPIO_FN(PORT145_LCD2DISP), \
+	GPIO_FN(PORT145_LCD2RS), \
+	GPIO_FN(VIO2_D7), \
+	GPIO_FN(LCD2D5),
+	GPIO_FN(VIO_CLK), \
+	GPIO_FN(LCD2DCK), \
+	GPIO_FN(PORT146_LCD2WR_), \
+	GPIO_FN(VIO2_CLK), \
+	GPIO_FN(LCD2D18),
+	GPIO_FN(VIO_FIELD), \
+	GPIO_FN(LCD2RD_), \
+	GPIO_FN(VIO2_FIELD), \
+	GPIO_FN(LCD2D19),
+	GPIO_FN(VIO_CKO),
+	GPIO_FN(A27), \
+	GPIO_FN(PORT149_RDWR), \
+	GPIO_FN(MFG0_IN1), \
+	GPIO_FN(PORT149_KEYOUT9),
+	GPIO_FN(MFG0_IN2),
+	GPIO_FN(TS_SPSYNC3), \
+	GPIO_FN(MSIOF2_RSCK),
+	GPIO_FN(TS_SDAT3), \
+	GPIO_FN(MSIOF2_RSYNC),
+	GPIO_FN(TPU1TO2), \
+	GPIO_FN(TS_SDEN3), \
+	GPIO_FN(PORT153_MSIOF2_SS1),
+	GPIO_FN(SCIFA2_TXD1), \
+	GPIO_FN(MSIOF2_MCK0),
+	GPIO_FN(SCIFA2_RXD1), \
+	GPIO_FN(MSIOF2_MCK1),
+	GPIO_FN(SCIFA2_RTS1_), \
+	GPIO_FN(PORT156_MSIOF2_SS2),
+	GPIO_FN(SCIFA2_CTS1_), \
+	GPIO_FN(PORT157_MSIOF2_RXD),
+	GPIO_FN(DINT_), \
+	GPIO_FN(SCIFA2_SCK1), \
+	GPIO_FN(TS_SCK3),
+	GPIO_FN(PORT159_SCIFB_SCK), \
+	GPIO_FN(PORT159_SCIFA5_SCK), \
+	GPIO_FN(NMI),
+	GPIO_FN(PORT160_SCIFB_TXD), \
+	GPIO_FN(PORT160_SCIFA5_TXD),
+	GPIO_FN(PORT161_SCIFB_CTS_), \
+	GPIO_FN(PORT161_SCIFA5_CTS_),
+	GPIO_FN(PORT162_SCIFB_RXD), \
+	GPIO_FN(PORT162_SCIFA5_RXD),
+	GPIO_FN(PORT163_SCIFB_RTS_), \
+	GPIO_FN(PORT163_SCIFA5_RTS_), \
+	GPIO_FN(TPU3TO0),
+	GPIO_FN(LCDD0),
+	GPIO_FN(LCDD1), \
+	GPIO_FN(PORT193_SCIFA5_CTS_), \
+	GPIO_FN(BBIF2_TSYNC1),
+	GPIO_FN(LCDD2), \
+	GPIO_FN(PORT194_SCIFA5_RTS_), \
+	GPIO_FN(BBIF2_TSCK1),
+	GPIO_FN(LCDD3), \
+	GPIO_FN(PORT195_SCIFA5_RXD), \
+	GPIO_FN(BBIF2_TXD1),
+	GPIO_FN(LCDD4), \
+	GPIO_FN(PORT196_SCIFA5_TXD),
+	GPIO_FN(LCDD5), \
+	GPIO_FN(PORT197_SCIFA5_SCK), \
+	GPIO_FN(MFG2_OUT2), \
+	GPIO_FN(TPU2TO1),
+	GPIO_FN(LCDD6),
+	GPIO_FN(LCDD7), \
+	GPIO_FN(TPU4TO1), \
+	GPIO_FN(MFG4_OUT2),
+	GPIO_FN(LCDD8), \
+	GPIO_FN(D16),
+	GPIO_FN(LCDD9), \
+	GPIO_FN(D17),
+	GPIO_FN(LCDD10), \
+	GPIO_FN(D18),
+	GPIO_FN(LCDD11), \
+	GPIO_FN(D19),
+	GPIO_FN(LCDD12), \
+	GPIO_FN(D20),
+	GPIO_FN(LCDD13), \
+	GPIO_FN(D21),
+	GPIO_FN(LCDD14), \
+	GPIO_FN(D22),
+	GPIO_FN(LCDD15), \
+	GPIO_FN(PORT207_MSIOF0L_SS1), \
+	GPIO_FN(D23),
+	GPIO_FN(LCDD16), \
+	GPIO_FN(PORT208_MSIOF0L_SS2), \
+	GPIO_FN(D24),
+	GPIO_FN(LCDD17), \
+	GPIO_FN(D25),
+	GPIO_FN(LCDD18), \
+	GPIO_FN(DREQ2), \
+	GPIO_FN(PORT210_MSIOF0L_SS1), \
+	GPIO_FN(D26),
+	GPIO_FN(LCDD19), \
+	GPIO_FN(PORT211_MSIOF0L_SS2), \
+	GPIO_FN(D27),
+	GPIO_FN(LCDD20), \
+	GPIO_FN(TS_SPSYNC1), \
+	GPIO_FN(MSIOF0L_MCK0), \
+	GPIO_FN(D28),
+	GPIO_FN(LCDD21), \
+	GPIO_FN(TS_SDAT1), \
+	GPIO_FN(MSIOF0L_MCK1), \
+	GPIO_FN(D29),
+	GPIO_FN(LCDD22), \
+	GPIO_FN(TS_SDEN1), \
+	GPIO_FN(MSIOF0L_RSCK), \
+	GPIO_FN(D30),
+	GPIO_FN(LCDD23), \
+	GPIO_FN(TS_SCK1), \
+	GPIO_FN(MSIOF0L_RSYNC), \
+	GPIO_FN(D31),
+	GPIO_FN(LCDDCK), \
+	GPIO_FN(LCDWR_),
+	GPIO_FN(LCDRD_), \
+	GPIO_FN(DACK2), \
+	GPIO_FN(PORT217_LCD2RS), \
+	GPIO_FN(MSIOF0L_TSYNC), \
+	GPIO_FN(VIO2_FIELD3), \
+	GPIO_FN(PORT217_LCD2DISP),
+	GPIO_FN(LCDHSYN), \
+	GPIO_FN(LCDCS_), \
+	GPIO_FN(LCDCS2_), \
+	GPIO_FN(DACK3), \
+	GPIO_FN(PORT218_VIO_CKOR),
+	GPIO_FN(LCDDISP), \
+	GPIO_FN(LCDRS), \
+	GPIO_FN(PORT219_LCD2WR_), \
+	GPIO_FN(DREQ3), \
+	GPIO_FN(MSIOF0L_TSCK), \
+	GPIO_FN(VIO2_CLK3), \
+	GPIO_FN(LCD2DCK_2),
+	GPIO_FN(LCDVSYN), \
+	GPIO_FN(LCDVSYN2),
+	GPIO_FN(LCDLCLK), \
+	GPIO_FN(DREQ1), \
+	GPIO_FN(PORT221_LCD2CS_), \
+	GPIO_FN(PWEN), \
+	GPIO_FN(MSIOF0L_RXD), \
+	GPIO_FN(VIO2_HD3), \
+	GPIO_FN(PORT221_LCD2HSYN),
+	GPIO_FN(LCDDON), \
+	GPIO_FN(LCDDON2), \
+	GPIO_FN(DACK1), \
+	GPIO_FN(OVCN), \
+	GPIO_FN(MSIOF0L_TXD), \
+	GPIO_FN(VIO2_VD3), \
+	GPIO_FN(PORT222_LCD2VSYN),
+
+	GPIO_FN(SCIFA1_TXD), \
+	GPIO_FN(OVCN2),
+	GPIO_FN(EXTLP), \
+	GPIO_FN(SCIFA1_SCK), \
+	GPIO_FN(PORT226_VIO_CKO2),
+	GPIO_FN(SCIFA1_RTS_), \
+	GPIO_FN(IDIN),
+	GPIO_FN(SCIFA1_RXD),
+	GPIO_FN(SCIFA1_CTS_), \
+	GPIO_FN(MFG1_IN1),
+	GPIO_FN(MSIOF1_TXD), \
+	GPIO_FN(SCIFA2_TXD2),
+	GPIO_FN(MSIOF1_TSYNC), \
+	GPIO_FN(SCIFA2_CTS2_),
+	GPIO_FN(MSIOF1_TSCK), \
+	GPIO_FN(SCIFA2_SCK2),
+	GPIO_FN(MSIOF1_RXD), \
+	GPIO_FN(SCIFA2_RXD2),
+	GPIO_FN(MSIOF1_RSCK), \
+	GPIO_FN(SCIFA2_RTS2_), \
+	GPIO_FN(VIO2_CLK2), \
+	GPIO_FN(LCD2D20),
+	GPIO_FN(MSIOF1_RSYNC), \
+	GPIO_FN(MFG1_IN2), \
+	GPIO_FN(VIO2_VD2), \
+	GPIO_FN(LCD2D21),
+	GPIO_FN(MSIOF1_MCK0), \
+	GPIO_FN(PORT236_I2C_SDA2),
+	GPIO_FN(MSIOF1_MCK1), \
+	GPIO_FN(PORT237_I2C_SCL2),
+	GPIO_FN(MSIOF1_SS1), \
+	GPIO_FN(VIO2_FIELD2), \
+	GPIO_FN(LCD2D22),
+	GPIO_FN(MSIOF1_SS2), \
+	GPIO_FN(VIO2_HD2), \
+	GPIO_FN(LCD2D23),
+	GPIO_FN(SCIFA6_TXD),
+	GPIO_FN(PORT241_IRDA_OUT), \
+	GPIO_FN(PORT241_IROUT), \
+	GPIO_FN(MFG4_OUT1), \
+	GPIO_FN(TPU4TO0),
+	GPIO_FN(PORT242_IRDA_IN), \
+	GPIO_FN(MFG4_IN2),
+	GPIO_FN(PORT243_IRDA_FIRSEL), \
+	GPIO_FN(PORT243_VIO_CKO2),
+	GPIO_FN(PORT244_SCIFA5_CTS_), \
+	GPIO_FN(MFG2_IN1), \
+	GPIO_FN(PORT244_SCIFB_CTS_), \
+	GPIO_FN(MSIOF2R_RXD),
+	GPIO_FN(PORT245_SCIFA5_RTS_), \
+	GPIO_FN(MFG2_IN2), \
+	GPIO_FN(PORT245_SCIFB_RTS_), \
+	GPIO_FN(MSIOF2R_TXD),
+	GPIO_FN(PORT246_SCIFA5_RXD), \
+	GPIO_FN(MFG1_OUT1), \
+	GPIO_FN(PORT246_SCIFB_RXD), \
+	GPIO_FN(TPU1TO0),
+	GPIO_FN(PORT247_SCIFA5_TXD), \
+	GPIO_FN(MFG3_OUT2), \
+	GPIO_FN(PORT247_SCIFB_TXD), \
+	GPIO_FN(TPU3TO1),
+	GPIO_FN(PORT248_SCIFA5_SCK), \
+	GPIO_FN(MFG2_OUT1), \
+	GPIO_FN(PORT248_SCIFB_SCK), \
+	GPIO_FN(TPU2TO0), \
+	GPIO_FN(PORT248_I2C_SCL3), \
+	GPIO_FN(MSIOF2R_TSCK),
+	GPIO_FN(PORT249_IROUT), \
+	GPIO_FN(MFG4_IN1), \
+	GPIO_FN(PORT249_I2C_SDA3), \
+	GPIO_FN(MSIOF2R_TSYNC),
+	GPIO_FN(SDHICLK0),
+	GPIO_FN(SDHICD0),
+	GPIO_FN(SDHID0_0),
+	GPIO_FN(SDHID0_1),
+	GPIO_FN(SDHID0_2),
+	GPIO_FN(SDHID0_3),
+	GPIO_FN(SDHICMD0),
+	GPIO_FN(SDHIWP0),
+	GPIO_FN(SDHICLK1),
+	GPIO_FN(SDHID1_0), \
+	GPIO_FN(TS_SPSYNC2),
+	GPIO_FN(SDHID1_1), \
+	GPIO_FN(TS_SDAT2),
+	GPIO_FN(SDHID1_2), \
+	GPIO_FN(TS_SDEN2),
+	GPIO_FN(SDHID1_3), \
+	GPIO_FN(TS_SCK2),
+	GPIO_FN(SDHICMD1),
+	GPIO_FN(SDHICLK2),
+	GPIO_FN(SDHID2_0), \
+	GPIO_FN(TS_SPSYNC4),
+	GPIO_FN(SDHID2_1), \
+	GPIO_FN(TS_SDAT4),
+	GPIO_FN(SDHID2_2), \
+	GPIO_FN(TS_SDEN4),
+	GPIO_FN(SDHID2_3), \
+	GPIO_FN(TS_SCK4),
+	GPIO_FN(SDHICMD2),
+	GPIO_FN(MMCCLK0),
+	GPIO_FN(MMCD0_0),
+	GPIO_FN(MMCD0_1),
+	GPIO_FN(MMCD0_2),
+	GPIO_FN(MMCD0_3),
+	GPIO_FN(MMCD0_4), \
+	GPIO_FN(TS_SPSYNC5),
+	GPIO_FN(MMCD0_5), \
+	GPIO_FN(TS_SDAT5),
+	GPIO_FN(MMCD0_6), \
+	GPIO_FN(TS_SDEN5),
+	GPIO_FN(MMCD0_7), \
+	GPIO_FN(TS_SCK5),
+	GPIO_FN(MMCCMD0),
+	GPIO_FN(RESETOUTS_), \
+	GPIO_FN(EXTAL2OUT),
+	GPIO_FN(MCP_WAIT__MCP_FRB),
+	GPIO_FN(MCP_CKO), \
+	GPIO_FN(MMCCLK1),
+	GPIO_FN(MCP_D15_MCP_NAF15),
+	GPIO_FN(MCP_D14_MCP_NAF14),
+	GPIO_FN(MCP_D13_MCP_NAF13),
+	GPIO_FN(MCP_D12_MCP_NAF12),
+	GPIO_FN(MCP_D11_MCP_NAF11),
+	GPIO_FN(MCP_D10_MCP_NAF10),
+	GPIO_FN(MCP_D9_MCP_NAF9),
+	GPIO_FN(MCP_D8_MCP_NAF8), \
+	GPIO_FN(MMCCMD1),
+	GPIO_FN(MCP_D7_MCP_NAF7), \
+	GPIO_FN(MMCD1_7),
+
+	GPIO_FN(MCP_D6_MCP_NAF6), \
+	GPIO_FN(MMCD1_6),
+	GPIO_FN(MCP_D5_MCP_NAF5), \
+	GPIO_FN(MMCD1_5),
+	GPIO_FN(MCP_D4_MCP_NAF4), \
+	GPIO_FN(MMCD1_4),
+	GPIO_FN(MCP_D3_MCP_NAF3), \
+	GPIO_FN(MMCD1_3),
+	GPIO_FN(MCP_D2_MCP_NAF2), \
+	GPIO_FN(MMCD1_2),
+	GPIO_FN(MCP_D1_MCP_NAF1), \
+	GPIO_FN(MMCD1_1),
+	GPIO_FN(MCP_D0_MCP_NAF0), \
+	GPIO_FN(MMCD1_0),
+	GPIO_FN(MCP_NBRSTOUT_),
+	GPIO_FN(MCP_WE0__MCP_FWE), \
+	GPIO_FN(MCP_RDWR_MCP_FWE),
+
+	/* MSEL2 special cases */
+	GPIO_FN(TSIF2_TS_XX1),
+	GPIO_FN(TSIF2_TS_XX2),
+	GPIO_FN(TSIF2_TS_XX3),
+	GPIO_FN(TSIF2_TS_XX4),
+	GPIO_FN(TSIF2_TS_XX5),
+	GPIO_FN(TSIF1_TS_XX1),
+	GPIO_FN(TSIF1_TS_XX2),
+	GPIO_FN(TSIF1_TS_XX3),
+	GPIO_FN(TSIF1_TS_XX4),
+	GPIO_FN(TSIF1_TS_XX5),
+	GPIO_FN(TSIF0_TS_XX1),
+	GPIO_FN(TSIF0_TS_XX2),
+	GPIO_FN(TSIF0_TS_XX3),
+	GPIO_FN(TSIF0_TS_XX4),
+	GPIO_FN(TSIF0_TS_XX5),
+	GPIO_FN(MST1_TS_XX1),
+	GPIO_FN(MST1_TS_XX2),
+	GPIO_FN(MST1_TS_XX3),
+	GPIO_FN(MST1_TS_XX4),
+	GPIO_FN(MST1_TS_XX5),
+	GPIO_FN(MST0_TS_XX1),
+	GPIO_FN(MST0_TS_XX2),
+	GPIO_FN(MST0_TS_XX3),
+	GPIO_FN(MST0_TS_XX4),
+	GPIO_FN(MST0_TS_XX5),
+
+	/* MSEL3 special cases */
+	GPIO_FN(SDHI0_VCCQ_MC0_ON),
+	GPIO_FN(SDHI0_VCCQ_MC0_OFF),
+	GPIO_FN(DEBUG_MON_VIO),
+	GPIO_FN(DEBUG_MON_LCDD),
+	GPIO_FN(LCDC_LCDC0),
+	GPIO_FN(LCDC_LCDC1),
+
+	/* MSEL4 special cases */
+	GPIO_FN(IRQ9_MEM_INT),
+	GPIO_FN(IRQ9_MCP_INT),
+	GPIO_FN(A11),
+	GPIO_FN(KEYOUT8),
+	GPIO_FN(TPU4TO3),
+	GPIO_FN(RESETA_N_PU_ON),
+	GPIO_FN(RESETA_N_PU_OFF),
+	GPIO_FN(EDBGREQ_PD),
+	GPIO_FN(EDBGREQ_PU),
+
+	/* Functions with pull-ups */
+	GPIO_FN(KEYIN0_PU),
+	GPIO_FN(KEYIN1_PU),
+	GPIO_FN(KEYIN2_PU),
+	GPIO_FN(KEYIN3_PU),
+	GPIO_FN(KEYIN4_PU),
+	GPIO_FN(KEYIN5_PU),
+	GPIO_FN(KEYIN6_PU),
+	GPIO_FN(KEYIN7_PU),
+	GPIO_FN(SDHID1_0_PU),
+	GPIO_FN(SDHID1_1_PU),
+	GPIO_FN(SDHID1_2_PU),
+	GPIO_FN(SDHID1_3_PU),
+	GPIO_FN(SDHICMD1_PU),
+	GPIO_FN(MMCCMD0_PU),
+	GPIO_FN(MMCCMD1_PU),
+	GPIO_FN(FSIACK_PU),
+	GPIO_FN(FSIAILR_PU),
+	GPIO_FN(FSIAIBT_PU),
+	GPIO_FN(FSIAISLD_PU),
+};
+
+#define PORTCR(nr, reg)	\
+	{ PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \
+		0, \
+		/*0001*/ PORT##nr##_OUT , \
+		/*0010*/ PORT##nr##_IN , 0, 0, 0, 0, 0, 0, 0, \
+		/*1010*/ PORT##nr##_IN_PD, 0, 0, 0, \
+		/*1110*/ PORT##nr##_IN_PU, 0, \
+		PORT##nr##_FN0, PORT##nr##_FN1,	PORT##nr##_FN2, \
+		PORT##nr##_FN3,	PORT##nr##_FN4, PORT##nr##_FN5, \
+		PORT##nr##_FN6, PORT##nr##_FN7, 0, 0, 0, 0, 0, 0, 0, 0 } \
+	}
+
+static struct pinmux_cfg_reg pinmux_config_regs[] = {
+	PORTCR(0, 0xe6050000), /* PORT0CR */
+	PORTCR(1, 0xe6050001), /* PORT1CR */
+	PORTCR(2, 0xe6050002), /* PORT2CR */
+	PORTCR(3, 0xe6050003), /* PORT3CR */
+	PORTCR(4, 0xe6050004), /* PORT4CR */
+	PORTCR(5, 0xe6050005), /* PORT5CR */
+	PORTCR(6, 0xe6050006), /* PORT6CR */
+	PORTCR(7, 0xe6050007), /* PORT7CR */
+	PORTCR(8, 0xe6050008), /* PORT8CR */
+	PORTCR(9, 0xe6050009), /* PORT9CR */
+
+	PORTCR(10, 0xe605000a), /* PORT10CR */
+	PORTCR(11, 0xe605000b), /* PORT11CR */
+	PORTCR(12, 0xe605000c), /* PORT12CR */
+	PORTCR(13, 0xe605000d), /* PORT13CR */
+	PORTCR(14, 0xe605000e), /* PORT14CR */
+	PORTCR(15, 0xe605000f), /* PORT15CR */
+	PORTCR(16, 0xe6050010), /* PORT16CR */
+	PORTCR(17, 0xe6050011), /* PORT17CR */
+	PORTCR(18, 0xe6050012), /* PORT18CR */
+	PORTCR(19, 0xe6050013), /* PORT19CR */
+
+	PORTCR(20, 0xe6050014), /* PORT20CR */
+	PORTCR(21, 0xe6050015), /* PORT21CR */
+	PORTCR(22, 0xe6050016), /* PORT22CR */
+	PORTCR(23, 0xe6050017), /* PORT23CR */
+	PORTCR(24, 0xe6050018), /* PORT24CR */
+	PORTCR(25, 0xe6050019), /* PORT25CR */
+	PORTCR(26, 0xe605001a), /* PORT26CR */
+	PORTCR(27, 0xe605001b), /* PORT27CR */
+	PORTCR(28, 0xe605001c), /* PORT28CR */
+	PORTCR(29, 0xe605001d), /* PORT29CR */
+
+	PORTCR(30, 0xe605001e), /* PORT30CR */
+	PORTCR(31, 0xe605001f), /* PORT31CR */
+	PORTCR(32, 0xe6051020), /* PORT32CR */
+	PORTCR(33, 0xe6051021), /* PORT33CR */
+	PORTCR(34, 0xe6051022), /* PORT34CR */
+	PORTCR(35, 0xe6051023), /* PORT35CR */
+	PORTCR(36, 0xe6051024), /* PORT36CR */
+	PORTCR(37, 0xe6051025), /* PORT37CR */
+	PORTCR(38, 0xe6051026), /* PORT38CR */
+	PORTCR(39, 0xe6051027), /* PORT39CR */
+
+	PORTCR(40, 0xe6051028), /* PORT40CR */
+	PORTCR(41, 0xe6051029), /* PORT41CR */
+	PORTCR(42, 0xe605102a), /* PORT42CR */
+	PORTCR(43, 0xe605102b), /* PORT43CR */
+	PORTCR(44, 0xe605102c), /* PORT44CR */
+	PORTCR(45, 0xe605102d), /* PORT45CR */
+	PORTCR(46, 0xe605102e), /* PORT46CR */
+	PORTCR(47, 0xe605102f), /* PORT47CR */
+	PORTCR(48, 0xe6051030), /* PORT48CR */
+	PORTCR(49, 0xe6051031), /* PORT49CR */
+
+	PORTCR(50, 0xe6051032), /* PORT50CR */
+	PORTCR(51, 0xe6051033), /* PORT51CR */
+	PORTCR(52, 0xe6051034), /* PORT52CR */
+	PORTCR(53, 0xe6051035), /* PORT53CR */
+	PORTCR(54, 0xe6051036), /* PORT54CR */
+	PORTCR(55, 0xe6051037), /* PORT55CR */
+	PORTCR(56, 0xe6051038), /* PORT56CR */
+	PORTCR(57, 0xe6051039), /* PORT57CR */
+	PORTCR(58, 0xe605103a), /* PORT58CR */
+	PORTCR(59, 0xe605103b), /* PORT59CR */
+
+	PORTCR(60, 0xe605103c), /* PORT60CR */
+	PORTCR(61, 0xe605103d), /* PORT61CR */
+	PORTCR(62, 0xe605103e), /* PORT62CR */
+	PORTCR(63, 0xe605103f), /* PORT63CR */
+	PORTCR(64, 0xe6051040), /* PORT64CR */
+	PORTCR(65, 0xe6051041), /* PORT65CR */
+	PORTCR(66, 0xe6051042), /* PORT66CR */
+	PORTCR(67, 0xe6051043), /* PORT67CR */
+	PORTCR(68, 0xe6051044), /* PORT68CR */
+	PORTCR(69, 0xe6051045), /* PORT69CR */
+
+	PORTCR(70, 0xe6051046), /* PORT70CR */
+	PORTCR(71, 0xe6051047), /* PORT71CR */
+	PORTCR(72, 0xe6051048), /* PORT72CR */
+	PORTCR(73, 0xe6051049), /* PORT73CR */
+	PORTCR(74, 0xe605104a), /* PORT74CR */
+	PORTCR(75, 0xe605104b), /* PORT75CR */
+	PORTCR(76, 0xe605104c), /* PORT76CR */
+	PORTCR(77, 0xe605104d), /* PORT77CR */
+	PORTCR(78, 0xe605104e), /* PORT78CR */
+	PORTCR(79, 0xe605104f), /* PORT79CR */
+
+	PORTCR(80, 0xe6051050), /* PORT80CR */
+	PORTCR(81, 0xe6051051), /* PORT81CR */
+	PORTCR(82, 0xe6051052), /* PORT82CR */
+	PORTCR(83, 0xe6051053), /* PORT83CR */
+	PORTCR(84, 0xe6051054), /* PORT84CR */
+	PORTCR(85, 0xe6051055), /* PORT85CR */
+	PORTCR(86, 0xe6051056), /* PORT86CR */
+	PORTCR(87, 0xe6051057), /* PORT87CR */
+	PORTCR(88, 0xe6051058), /* PORT88CR */
+	PORTCR(89, 0xe6051059), /* PORT89CR */
+
+	PORTCR(90, 0xe605105a), /* PORT90CR */
+	PORTCR(91, 0xe605105b), /* PORT91CR */
+	PORTCR(92, 0xe605105c), /* PORT92CR */
+	PORTCR(93, 0xe605105d), /* PORT93CR */
+	PORTCR(94, 0xe605105e), /* PORT94CR */
+	PORTCR(95, 0xe605105f), /* PORT95CR */
+	PORTCR(96, 0xe6052060), /* PORT96CR */
+	PORTCR(97, 0xe6052061), /* PORT97CR */
+	PORTCR(98, 0xe6052062), /* PORT98CR */
+	PORTCR(99, 0xe6052063), /* PORT99CR */
+
+	PORTCR(100, 0xe6052064), /* PORT100CR */
+	PORTCR(101, 0xe6052065), /* PORT101CR */
+	PORTCR(102, 0xe6052066), /* PORT102CR */
+	PORTCR(103, 0xe6052067), /* PORT103CR */
+	PORTCR(104, 0xe6052068), /* PORT104CR */
+	PORTCR(105, 0xe6052069), /* PORT105CR */
+	PORTCR(106, 0xe605206a), /* PORT106CR */
+	PORTCR(107, 0xe605206b), /* PORT107CR */
+	PORTCR(108, 0xe605206c), /* PORT108CR */
+	PORTCR(109, 0xe605206d), /* PORT109CR */
+
+	PORTCR(110, 0xe605206e), /* PORT110CR */
+	PORTCR(111, 0xe605206f), /* PORT111CR */
+	PORTCR(112, 0xe6052070), /* PORT112CR */
+	PORTCR(113, 0xe6052071), /* PORT113CR */
+	PORTCR(114, 0xe6052072), /* PORT114CR */
+	PORTCR(115, 0xe6052073), /* PORT115CR */
+	PORTCR(116, 0xe6052074), /* PORT116CR */
+	PORTCR(117, 0xe6052075), /* PORT117CR */
+	PORTCR(118, 0xe6052076), /* PORT118CR */
+
+	PORTCR(128, 0xe6052080), /* PORT128CR */
+	PORTCR(129, 0xe6052081), /* PORT129CR */
+
+	PORTCR(130, 0xe6052082), /* PORT130CR */
+	PORTCR(131, 0xe6052083), /* PORT131CR */
+	PORTCR(132, 0xe6052084), /* PORT132CR */
+	PORTCR(133, 0xe6052085), /* PORT133CR */
+	PORTCR(134, 0xe6052086), /* PORT134CR */
+	PORTCR(135, 0xe6052087), /* PORT135CR */
+	PORTCR(136, 0xe6052088), /* PORT136CR */
+	PORTCR(137, 0xe6052089), /* PORT137CR */
+	PORTCR(138, 0xe605208a), /* PORT138CR */
+	PORTCR(139, 0xe605208b), /* PORT139CR */
+
+	PORTCR(140, 0xe605208c), /* PORT140CR */
+	PORTCR(141, 0xe605208d), /* PORT141CR */
+	PORTCR(142, 0xe605208e), /* PORT142CR */
+	PORTCR(143, 0xe605208f), /* PORT143CR */
+	PORTCR(144, 0xe6052090), /* PORT144CR */
+	PORTCR(145, 0xe6052091), /* PORT145CR */
+	PORTCR(146, 0xe6052092), /* PORT146CR */
+	PORTCR(147, 0xe6052093), /* PORT147CR */
+	PORTCR(148, 0xe6052094), /* PORT148CR */
+	PORTCR(149, 0xe6052095), /* PORT149CR */
+
+	PORTCR(150, 0xe6052096), /* PORT150CR */
+	PORTCR(151, 0xe6052097), /* PORT151CR */
+	PORTCR(152, 0xe6052098), /* PORT152CR */
+	PORTCR(153, 0xe6052099), /* PORT153CR */
+	PORTCR(154, 0xe605209a), /* PORT154CR */
+	PORTCR(155, 0xe605209b), /* PORT155CR */
+	PORTCR(156, 0xe605209c), /* PORT156CR */
+	PORTCR(157, 0xe605209d), /* PORT157CR */
+	PORTCR(158, 0xe605209e), /* PORT158CR */
+	PORTCR(159, 0xe605209f), /* PORT159CR */
+
+	PORTCR(160, 0xe60520a0), /* PORT160CR */
+	PORTCR(161, 0xe60520a1), /* PORT161CR */
+	PORTCR(162, 0xe60520a2), /* PORT162CR */
+	PORTCR(163, 0xe60520a3), /* PORT163CR */
+	PORTCR(164, 0xe60520a4), /* PORT164CR */
+
+	PORTCR(192, 0xe60520c0), /* PORT192CR */
+	PORTCR(193, 0xe60520c1), /* PORT193CR */
+	PORTCR(194, 0xe60520c2), /* PORT194CR */
+	PORTCR(195, 0xe60520c3), /* PORT195CR */
+	PORTCR(196, 0xe60520c4), /* PORT196CR */
+	PORTCR(197, 0xe60520c5), /* PORT197CR */
+	PORTCR(198, 0xe60520c6), /* PORT198CR */
+	PORTCR(199, 0xe60520c7), /* PORT199CR */
+
+	PORTCR(200, 0xe60520c8), /* PORT200CR */
+	PORTCR(201, 0xe60520c9), /* PORT201CR */
+	PORTCR(202, 0xe60520ca), /* PORT202CR */
+	PORTCR(203, 0xe60520cb), /* PORT203CR */
+	PORTCR(204, 0xe60520cc), /* PORT204CR */
+	PORTCR(205, 0xe60520cd), /* PORT205CR */
+	PORTCR(206, 0xe60520ce), /* PORT206CR */
+	PORTCR(207, 0xe60520cf), /* PORT207CR */
+	PORTCR(208, 0xe60520d0), /* PORT208CR */
+	PORTCR(209, 0xe60520d1), /* PORT209CR */
+
+	PORTCR(210, 0xe60520d2), /* PORT210CR */
+	PORTCR(211, 0xe60520d3), /* PORT211CR */
+	PORTCR(212, 0xe60520d4), /* PORT212CR */
+	PORTCR(213, 0xe60520d5), /* PORT213CR */
+	PORTCR(214, 0xe60520d6), /* PORT214CR */
+	PORTCR(215, 0xe60520d7), /* PORT215CR */
+	PORTCR(216, 0xe60520d8), /* PORT216CR */
+	PORTCR(217, 0xe60520d9), /* PORT217CR */
+	PORTCR(218, 0xe60520da), /* PORT218CR */
+	PORTCR(219, 0xe60520db), /* PORT219CR */
+
+	PORTCR(220, 0xe60520dc), /* PORT220CR */
+	PORTCR(221, 0xe60520dd), /* PORT221CR */
+	PORTCR(222, 0xe60520de), /* PORT222CR */
+	PORTCR(223, 0xe60520df), /* PORT223CR */
+	PORTCR(224, 0xe60530e0), /* PORT224CR */
+	PORTCR(225, 0xe60530e1), /* PORT225CR */
+	PORTCR(226, 0xe60530e2), /* PORT226CR */
+	PORTCR(227, 0xe60530e3), /* PORT227CR */
+	PORTCR(228, 0xe60530e4), /* PORT228CR */
+	PORTCR(229, 0xe60530e5), /* PORT229CR */
+
+	PORTCR(230, 0xe60530e6), /* PORT230CR */
+	PORTCR(231, 0xe60530e7), /* PORT231CR */
+	PORTCR(232, 0xe60530e8), /* PORT232CR */
+	PORTCR(233, 0xe60530e9), /* PORT233CR */
+	PORTCR(234, 0xe60530ea), /* PORT234CR */
+	PORTCR(235, 0xe60530eb), /* PORT235CR */
+	PORTCR(236, 0xe60530ec), /* PORT236CR */
+	PORTCR(237, 0xe60530ed), /* PORT237CR */
+	PORTCR(238, 0xe60530ee), /* PORT238CR */
+	PORTCR(239, 0xe60530ef), /* PORT239CR */
+
+	PORTCR(240, 0xe60530f0), /* PORT240CR */
+	PORTCR(241, 0xe60530f1), /* PORT241CR */
+	PORTCR(242, 0xe60530f2), /* PORT242CR */
+	PORTCR(243, 0xe60530f3), /* PORT243CR */
+	PORTCR(244, 0xe60530f4), /* PORT244CR */
+	PORTCR(245, 0xe60530f5), /* PORT245CR */
+	PORTCR(246, 0xe60530f6), /* PORT246CR */
+	PORTCR(247, 0xe60530f7), /* PORT247CR */
+	PORTCR(248, 0xe60530f8), /* PORT248CR */
+	PORTCR(249, 0xe60530f9), /* PORT249CR */
+
+	PORTCR(250, 0xe60530fa), /* PORT250CR */
+	PORTCR(251, 0xe60530fb), /* PORT251CR */
+	PORTCR(252, 0xe60530fc), /* PORT252CR */
+	PORTCR(253, 0xe60530fd), /* PORT253CR */
+	PORTCR(254, 0xe60530fe), /* PORT254CR */
+	PORTCR(255, 0xe60530ff), /* PORT255CR */
+	PORTCR(256, 0xe6053100), /* PORT256CR */
+	PORTCR(257, 0xe6053101), /* PORT257CR */
+	PORTCR(258, 0xe6053102), /* PORT258CR */
+	PORTCR(259, 0xe6053103), /* PORT259CR */
+
+	PORTCR(260, 0xe6053104), /* PORT260CR */
+	PORTCR(261, 0xe6053105), /* PORT261CR */
+	PORTCR(262, 0xe6053106), /* PORT262CR */
+	PORTCR(263, 0xe6053107), /* PORT263CR */
+	PORTCR(264, 0xe6053108), /* PORT264CR */
+	PORTCR(265, 0xe6053109), /* PORT265CR */
+	PORTCR(266, 0xe605310a), /* PORT266CR */
+	PORTCR(267, 0xe605310b), /* PORT267CR */
+	PORTCR(268, 0xe605310c), /* PORT268CR */
+	PORTCR(269, 0xe605310d), /* PORT269CR */
+
+	PORTCR(270, 0xe605310e), /* PORT270CR */
+	PORTCR(271, 0xe605310f), /* PORT271CR */
+	PORTCR(272, 0xe6053110), /* PORT272CR */
+	PORTCR(273, 0xe6053111), /* PORT273CR */
+	PORTCR(274, 0xe6053112), /* PORT274CR */
+	PORTCR(275, 0xe6053113), /* PORT275CR */
+	PORTCR(276, 0xe6053114), /* PORT276CR */
+	PORTCR(277, 0xe6053115), /* PORT277CR */
+	PORTCR(278, 0xe6053116), /* PORT278CR */
+	PORTCR(279, 0xe6053117), /* PORT279CR */
+
+	PORTCR(280, 0xe6053118), /* PORT280CR */
+	PORTCR(281, 0xe6053119), /* PORT281CR */
+	PORTCR(282, 0xe605311a), /* PORT282CR */
+
+	PORTCR(288, 0xe6052120), /* PORT288CR */
+	PORTCR(289, 0xe6052121), /* PORT289CR */
+
+	PORTCR(290, 0xe6052122), /* PORT290CR */
+	PORTCR(291, 0xe6052123), /* PORT291CR */
+	PORTCR(292, 0xe6052124), /* PORT292CR */
+	PORTCR(293, 0xe6052125), /* PORT293CR */
+	PORTCR(294, 0xe6052126), /* PORT294CR */
+	PORTCR(295, 0xe6052127), /* PORT295CR */
+	PORTCR(296, 0xe6052128), /* PORT296CR */
+	PORTCR(297, 0xe6052129), /* PORT297CR */
+	PORTCR(298, 0xe605212a), /* PORT298CR */
+	PORTCR(299, 0xe605212b), /* PORT299CR */
+
+	PORTCR(300, 0xe605212c), /* PORT300CR */
+	PORTCR(301, 0xe605212d), /* PORT301CR */
+	PORTCR(302, 0xe605212e), /* PORT302CR */
+	PORTCR(303, 0xe605212f), /* PORT303CR */
+	PORTCR(304, 0xe6052130), /* PORT304CR */
+	PORTCR(305, 0xe6052131), /* PORT305CR */
+	PORTCR(306, 0xe6052132), /* PORT306CR */
+	PORTCR(307, 0xe6052133), /* PORT307CR */
+	PORTCR(308, 0xe6052134), /* PORT308CR */
+	PORTCR(309, 0xe6052135), /* PORT309CR */
+
+	{ PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1) {
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1,
+			MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1,
+			MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1,
+			MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1,
+			0, 0,
+			MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1,
+			MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1,
+			MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1,
+			MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1,
+			MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1,
+			MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1,
+			MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1,
+			MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1,
+			MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1,
+			MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1,
+			MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1,
+			MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1,
+			MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1,
+			MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1,
+			MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1,
+		}
+	},
+	{ PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) {
+			0, 0,
+			0, 0,
+			0, 0,
+			MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			0, 0,
+			MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1,
+			0, 0,
+			0, 0,
+			0, 0,
+			MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1,
+			0, 0,
+			MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1,
+			0, 0,
+			0, 0,
+			MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1,
+			0, 0,
+			0, 0,
+			0, 0,
+			MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1,
+			0, 0,
+			0, 0,
+		}
+	},
+	{ PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) {
+			0, 0,
+			0, 0,
+			MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1,
+			0, 0,
+			MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1,
+			MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1,
+			0, 0,
+			0, 0,
+			0, 0,
+			MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1,
+			MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1,
+			MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1,
+			MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1,
+			0, 0,
+			0, 0,
+			0, 0,
+			MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1,
+			0, 0,
+			MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1,
+			MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1,
+			MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1,
+			MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1,
+			MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1,
+			MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1,
+			MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1,
+			0, 0,
+			0, 0,
+			MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1,
+			0, 0,
+			0, 0,
+			MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1,
+			0, 0,
+		}
+	},
+	{ },
+};
+
+static struct pinmux_data_reg pinmux_data_regs[] = {
+	{ PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) {
+			PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA,
+			PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA,
+			PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA,
+			PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA,
+			PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA,
+			PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA,
+			PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA,
+			PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) {
+			PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA,
+			PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA,
+			PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA,
+			PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA,
+			PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA,
+			PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA,
+			PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA,
+			PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTD095_064DR", 0xe6055004, 32) {
+			PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA,
+			PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA,
+			PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA,
+			PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA,
+			PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA,
+			PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA,
+			PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA,
+			PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTR127_096DR", 0xe6056000, 32) {
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, PORT118_DATA, PORT117_DATA, PORT116_DATA,
+			PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA,
+			PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA,
+			PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA,
+			PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA,
+			PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTR159_128DR", 0xe6056004, 32) {
+			PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA,
+			PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA,
+			PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA,
+			PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA,
+			PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA,
+			PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA,
+			PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA,
+			PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTR191_160DR", 0xe6056008, 32) {
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, 0, PORT164_DATA,
+			PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTR223_192DR", 0xe605600C, 32) {
+			PORT223_DATA, PORT222_DATA, PORT221_DATA, PORT220_DATA,
+			PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA,
+			PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA,
+			PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA,
+			PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA,
+			PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA,
+			PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA,
+			PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTU255_224DR", 0xe6057000, 32) {
+			PORT255_DATA, PORT254_DATA, PORT253_DATA, PORT252_DATA,
+			PORT251_DATA, PORT250_DATA, PORT249_DATA, PORT248_DATA,
+			PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA,
+			PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA,
+			PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA,
+			PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA,
+			PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA,
+			PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTU287_256DR", 0xe6057004, 32) {
+			0, 0, 0, 0,
+			0, PORT282_DATA, PORT281_DATA, PORT280_DATA,
+			PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA,
+			PORT275_DATA, PORT274_DATA, PORT273_DATA, PORT272_DATA,
+			PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA,
+			PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA,
+			PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA,
+			PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA }
+	},
+	{ PINMUX_DATA_REG("PORTR319_288DR", 0xe6056010, 32) {
+			0, 0, 0, 0,
+			0, 0, 0, 0,
+			0, 0, PORT309_DATA, PORT308_DATA,
+			PORT307_DATA, PORT306_DATA, PORT305_DATA, PORT304_DATA,
+			PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA,
+			PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA,
+			PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA,
+			PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA }
+	},
+	{ },
+};
+
+static struct pinmux_info sh73a0_pinmux_info = {
+	.name = "sh73a0_pfc",
+	.reserved_id = PINMUX_RESERVED,
+	.data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END },
+	.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
+	.input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
+	.input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
+	.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
+	.mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END },
+	.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+	.first_gpio = GPIO_PORT0,
+	.last_gpio = GPIO_FN_FSIAISLD_PU,
+
+	.gpios = pinmux_gpios,
+	.cfg_regs = pinmux_config_regs,
+	.data_regs = pinmux_data_regs,
+
+	.gpio_data = pinmux_data,
+	.gpio_data_size = ARRAY_SIZE(pinmux_data),
+};
+
+void sh73a0_pinmux_init(void)
+{
+	register_pinmux(&sh73a0_pinmux_info);
+}
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
new file mode 100644
index 0000000..65e879b
--- /dev/null
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -0,0 +1,70 @@
+/*
+ * SMP support for R-Mobile / SH-Mobile
+ *
+ * Copyright (C) 2010  Magnus Damm
+ * Copyright (C) 2011  Paul Mundt
+ *
+ * Based on vexpress, Copyright (C) 2002 ARM Ltd, All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <asm/localtimer.h>
+#include <asm/mach-types.h>
+#include <mach/common.h>
+
+static unsigned int __init shmobile_smp_get_core_count(void)
+{
+	if (machine_is_ag5evm())
+		return sh73a0_get_core_count();
+
+	return 1;
+}
+
+static void __init shmobile_smp_prepare_cpus(void)
+{
+	if (machine_is_ag5evm())
+		sh73a0_smp_prepare_cpus();
+}
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+	trace_hardirqs_off();
+
+	if (machine_is_ag5evm())
+		sh73a0_secondary_init(cpu);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	if (machine_is_ag5evm())
+		return sh73a0_boot_secondary(cpu);
+
+	return -ENOSYS;
+}
+
+void __init smp_init_cpus(void)
+{
+	unsigned int ncores = shmobile_smp_get_core_count();
+	unsigned int i;
+
+	for (i = 0; i < ncores; i++)
+		set_cpu_possible(i, true);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+	int i;
+
+	for (i = 0; i < max_cpus; i++)
+		set_cpu_present(i, true);
+
+	shmobile_smp_prepare_cpus();
+}
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index 003008c..ce28141 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -35,6 +35,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xe6c40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc00), evt2irq(0xc00),
 			    evt2irq(0xc00), evt2irq(0xc00) },
@@ -52,6 +54,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xe6c50000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc20), evt2irq(0xc20),
 			    evt2irq(0xc20), evt2irq(0xc20) },
@@ -69,6 +73,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xe6c60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc40), evt2irq(0xc40),
 			    evt2irq(0xc40), evt2irq(0xc40) },
@@ -86,6 +92,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xe6c70000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc60), evt2irq(0xc60),
 			    evt2irq(0xc60), evt2irq(0xc60) },
@@ -103,6 +111,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xe6c80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd20), evt2irq(0xd20),
 			    evt2irq(0xd20), evt2irq(0xd20) },
@@ -120,6 +130,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xe6cb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd40), evt2irq(0xd40),
 			    evt2irq(0xd40), evt2irq(0xd40) },
@@ -137,6 +149,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xe6c30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd60), evt2irq(0xd60),
 			    evt2irq(0xd60), evt2irq(0xd60) },
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 564a6d0..ff0494f 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -38,6 +38,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xe6c40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c00), evt2irq(0x0c00),
 			    evt2irq(0x0c00), evt2irq(0x0c00) },
@@ -55,6 +57,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xe6c50000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c20), evt2irq(0x0c20),
 			    evt2irq(0x0c20), evt2irq(0x0c20) },
@@ -72,6 +76,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xe6c60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c40), evt2irq(0x0c40),
 			    evt2irq(0x0c40), evt2irq(0x0c40) },
@@ -89,6 +95,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xe6c70000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0c60), evt2irq(0x0c60),
 			    evt2irq(0x0c60), evt2irq(0x0c60) },
@@ -106,6 +114,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xe6c80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0d20), evt2irq(0x0d20),
 			    evt2irq(0x0d20), evt2irq(0x0d20) },
@@ -123,6 +133,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xe6cb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFA,
 	.irqs		= { evt2irq(0x0d40), evt2irq(0x0d40),
 			    evt2irq(0x0d40), evt2irq(0x0d40) },
@@ -140,6 +152,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xe6c30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIFB,
 	.irqs		= { evt2irq(0x0d60), evt2irq(0x0d60),
 			    evt2irq(0x0d60), evt2irq(0x0d60) },
@@ -416,6 +430,16 @@
 		.addr		= 0xe6870030,
 		.chcr		= DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
 		.mid_rid	= 0xce,
+	}, {
+		.slave_id	= SHDMA_SLAVE_MMCIF_TX,
+		.addr		= 0xe6bd0034,
+		.chcr		= DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+		.mid_rid	= 0xd1,
+	}, {
+		.slave_id	= SHDMA_SLAVE_MMCIF_RX,
+		.addr		= 0xe6bd0034,
+		.chcr		= DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+		.mid_rid	= 0xd2,
 	},
 };
 
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index 575dbd6..8099b0b 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -36,6 +36,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xe6c40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc00), evt2irq(0xc00),
 			    evt2irq(0xc00), evt2irq(0xc00) },
@@ -53,6 +55,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xe6c50000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc20), evt2irq(0xc20),
 			    evt2irq(0xc20), evt2irq(0xc20) },
@@ -70,6 +74,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xe6c60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc40), evt2irq(0xc40),
 			    evt2irq(0xc40), evt2irq(0xc40) },
@@ -87,6 +93,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xe6c70000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xc60), evt2irq(0xc60),
 			    evt2irq(0xc60), evt2irq(0xc60) },
@@ -104,6 +112,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xe6c80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd20), evt2irq(0xd20),
 			    evt2irq(0xd20), evt2irq(0xd20) },
@@ -121,6 +131,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xe6cb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd40), evt2irq(0xd40),
 			    evt2irq(0xd40), evt2irq(0xd40) },
@@ -138,6 +150,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xe6cc0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80),
 			    intcs_evt2irq(0x1a80), intcs_evt2irq(0x1a80) },
@@ -155,6 +169,8 @@
 static struct plat_sci_port scif7_platform_data = {
 	.mapbase	= 0xe6c30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { evt2irq(0xd60), evt2irq(0xd60),
 			    evt2irq(0xd60), evt2irq(0xd60) },
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
new file mode 100644
index 0000000..685c40a
--- /dev/null
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -0,0 +1,430 @@
+/*
+ * sh73a0 processor support
+ *
+ * Copyright (C) 2010  Takashi Yoshii
+ * Copyright (C) 2010  Magnus Damm
+ * Copyright (C) 2008  Yoshihiro Shimoda
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_intc.h>
+#include <linux/sh_timer.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+static struct plat_sci_port scif0_platform_data = {
+	.mapbase	= 0xe6c40000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(72), gic_spi(72),
+			    gic_spi(72), gic_spi(72) },
+};
+
+static struct platform_device scif0_device = {
+	.name		= "sh-sci",
+	.id		= 0,
+	.dev		= {
+		.platform_data	= &scif0_platform_data,
+	},
+};
+
+static struct plat_sci_port scif1_platform_data = {
+	.mapbase	= 0xe6c50000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(73), gic_spi(73),
+			    gic_spi(73), gic_spi(73) },
+};
+
+static struct platform_device scif1_device = {
+	.name		= "sh-sci",
+	.id		= 1,
+	.dev		= {
+		.platform_data	= &scif1_platform_data,
+	},
+};
+
+static struct plat_sci_port scif2_platform_data = {
+	.mapbase	= 0xe6c60000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(74), gic_spi(74),
+			    gic_spi(74), gic_spi(74) },
+};
+
+static struct platform_device scif2_device = {
+	.name		= "sh-sci",
+	.id		= 2,
+	.dev		= {
+		.platform_data	= &scif2_platform_data,
+	},
+};
+
+static struct plat_sci_port scif3_platform_data = {
+	.mapbase	= 0xe6c70000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(75), gic_spi(75),
+			    gic_spi(75), gic_spi(75) },
+};
+
+static struct platform_device scif3_device = {
+	.name		= "sh-sci",
+	.id		= 3,
+	.dev		= {
+		.platform_data	= &scif3_platform_data,
+	},
+};
+
+static struct plat_sci_port scif4_platform_data = {
+	.mapbase	= 0xe6c80000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(78), gic_spi(78),
+			    gic_spi(78), gic_spi(78) },
+};
+
+static struct platform_device scif4_device = {
+	.name		= "sh-sci",
+	.id		= 4,
+	.dev		= {
+		.platform_data	= &scif4_platform_data,
+	},
+};
+
+static struct plat_sci_port scif5_platform_data = {
+	.mapbase	= 0xe6cb0000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(79), gic_spi(79),
+			    gic_spi(79), gic_spi(79) },
+};
+
+static struct platform_device scif5_device = {
+	.name		= "sh-sci",
+	.id		= 5,
+	.dev		= {
+		.platform_data	= &scif5_platform_data,
+	},
+};
+
+static struct plat_sci_port scif6_platform_data = {
+	.mapbase	= 0xe6cc0000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(156), gic_spi(156),
+			    gic_spi(156), gic_spi(156) },
+};
+
+static struct platform_device scif6_device = {
+	.name		= "sh-sci",
+	.id		= 6,
+	.dev		= {
+		.platform_data	= &scif6_platform_data,
+	},
+};
+
+static struct plat_sci_port scif7_platform_data = {
+	.mapbase	= 0xe6cd0000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFA,
+	.irqs		= { gic_spi(143), gic_spi(143),
+			    gic_spi(143), gic_spi(143) },
+};
+
+static struct platform_device scif7_device = {
+	.name		= "sh-sci",
+	.id		= 7,
+	.dev		= {
+		.platform_data	= &scif7_platform_data,
+	},
+};
+
+static struct plat_sci_port scif8_platform_data = {
+	.mapbase	= 0xe6c30000,
+	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
+	.type		= PORT_SCIFB,
+	.irqs		= { gic_spi(80), gic_spi(80),
+			    gic_spi(80), gic_spi(80) },
+};
+
+static struct platform_device scif8_device = {
+	.name		= "sh-sci",
+	.id		= 8,
+	.dev		= {
+		.platform_data	= &scif8_platform_data,
+	},
+};
+
+static struct sh_timer_config cmt10_platform_data = {
+	.name = "CMT10",
+	.channel_offset = 0x10,
+	.timer_bit = 0,
+	.clockevent_rating = 125,
+	.clocksource_rating = 125,
+};
+
+static struct resource cmt10_resources[] = {
+	[0] = {
+		.name	= "CMT10",
+		.start	= 0xe6138010,
+		.end	= 0xe613801b,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(65),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device cmt10_device = {
+	.name		= "sh_cmt",
+	.id		= 10,
+	.dev = {
+		.platform_data	= &cmt10_platform_data,
+	},
+	.resource	= cmt10_resources,
+	.num_resources	= ARRAY_SIZE(cmt10_resources),
+};
+
+/* TMU */
+static struct sh_timer_config tmu00_platform_data = {
+	.name = "TMU00",
+	.channel_offset = 0x4,
+	.timer_bit = 0,
+	.clockevent_rating = 200,
+};
+
+static struct resource tmu00_resources[] = {
+	[0] = {
+		.name	= "TMU00",
+		.start	= 0xfff60008,
+		.end	= 0xfff60013,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= intcs_evt2irq(0x0e80), /* TMU0_TUNI00 */
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device tmu00_device = {
+	.name		= "sh_tmu",
+	.id		= 0,
+	.dev = {
+		.platform_data	= &tmu00_platform_data,
+	},
+	.resource	= tmu00_resources,
+	.num_resources	= ARRAY_SIZE(tmu00_resources),
+};
+
+static struct sh_timer_config tmu01_platform_data = {
+	.name = "TMU01",
+	.channel_offset = 0x10,
+	.timer_bit = 1,
+	.clocksource_rating = 200,
+};
+
+static struct resource tmu01_resources[] = {
+	[0] = {
+		.name	= "TMU01",
+		.start	= 0xfff60014,
+		.end	= 0xfff6001f,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= intcs_evt2irq(0x0ea0), /* TMU0_TUNI01 */
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device tmu01_device = {
+	.name		= "sh_tmu",
+	.id		= 1,
+	.dev = {
+		.platform_data	= &tmu01_platform_data,
+	},
+	.resource	= tmu01_resources,
+	.num_resources	= ARRAY_SIZE(tmu01_resources),
+};
+
+static struct resource i2c0_resources[] = {
+	[0] = {
+		.name	= "IIC0",
+		.start	= 0xe6820000,
+		.end	= 0xe6820425 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(167),
+		.end	= gic_spi(170),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct resource i2c1_resources[] = {
+	[0] = {
+		.name	= "IIC1",
+		.start	= 0xe6822000,
+		.end	= 0xe6822425 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(51),
+		.end	= gic_spi(54),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct resource i2c2_resources[] = {
+	[0] = {
+		.name	= "IIC2",
+		.start	= 0xe6824000,
+		.end	= 0xe6824425 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(171),
+		.end	= gic_spi(174),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct resource i2c3_resources[] = {
+	[0] = {
+		.name	= "IIC3",
+		.start	= 0xe6826000,
+		.end	= 0xe6826425 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(183),
+		.end	= gic_spi(186),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct resource i2c4_resources[] = {
+	[0] = {
+		.name	= "IIC4",
+		.start	= 0xe6828000,
+		.end	= 0xe6828425 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(187),
+		.end	= gic_spi(190),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c0_device = {
+	.name		= "i2c-sh_mobile",
+	.id		= 0,
+	.resource	= i2c0_resources,
+	.num_resources	= ARRAY_SIZE(i2c0_resources),
+};
+
+static struct platform_device i2c1_device = {
+	.name		= "i2c-sh_mobile",
+	.id		= 1,
+	.resource	= i2c1_resources,
+	.num_resources	= ARRAY_SIZE(i2c1_resources),
+};
+
+static struct platform_device i2c2_device = {
+	.name		= "i2c-sh_mobile",
+	.id		= 2,
+	.resource	= i2c2_resources,
+	.num_resources	= ARRAY_SIZE(i2c2_resources),
+};
+
+static struct platform_device i2c3_device = {
+	.name		= "i2c-sh_mobile",
+	.id		= 3,
+	.resource	= i2c3_resources,
+	.num_resources	= ARRAY_SIZE(i2c3_resources),
+};
+
+static struct platform_device i2c4_device = {
+	.name		= "i2c-sh_mobile",
+	.id		= 4,
+	.resource	= i2c4_resources,
+	.num_resources	= ARRAY_SIZE(i2c4_resources),
+};
+
+static struct platform_device *sh73a0_early_devices[] __initdata = {
+	&scif0_device,
+	&scif1_device,
+	&scif2_device,
+	&scif3_device,
+	&scif4_device,
+	&scif5_device,
+	&scif6_device,
+	&scif7_device,
+	&scif8_device,
+	&cmt10_device,
+	&tmu00_device,
+	&tmu01_device,
+};
+
+static struct platform_device *sh73a0_late_devices[] __initdata = {
+	&i2c0_device,
+	&i2c1_device,
+	&i2c2_device,
+	&i2c3_device,
+	&i2c4_device,
+};
+
+void __init sh73a0_add_standard_devices(void)
+{
+	platform_add_devices(sh73a0_early_devices,
+			    ARRAY_SIZE(sh73a0_early_devices));
+	platform_add_devices(sh73a0_late_devices,
+			    ARRAY_SIZE(sh73a0_late_devices));
+}
+
+void __init sh73a0_add_early_devices(void)
+{
+	early_platform_add_devices(sh73a0_early_devices,
+				   ARRAY_SIZE(sh73a0_early_devices));
+}
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
new file mode 100644
index 0000000..a156d21
--- /dev/null
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -0,0 +1,97 @@
+/*
+ * SMP support for R-Mobile / SH-Mobile - sh73a0 portion
+ *
+ * Copyright (C) 2010  Magnus Damm
+ * Copyright (C) 2010  Takashi Yoshii
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <asm/smp_scu.h>
+#include <asm/smp_twd.h>
+#include <asm/hardware/gic.h>
+
+#define WUPCR		0xe6151010
+#define SRESCR		0xe6151018
+#define PSTR		0xe6151040
+#define SBAR            0xe6180020
+#define APARMBAREA      0xe6f10020
+
+static void __iomem *scu_base_addr(void)
+{
+	return (void __iomem *)0xf0000000;
+}
+
+static DEFINE_SPINLOCK(scu_lock);
+static unsigned long tmp;
+
+static void modify_scu_cpu_psr(unsigned long set, unsigned long clr)
+{
+	void __iomem *scu_base = scu_base_addr();
+
+	spin_lock(&scu_lock);
+	tmp = __raw_readl(scu_base + 8);
+	tmp &= ~clr;
+	tmp |= set;
+	spin_unlock(&scu_lock);
+
+	/* disable cache coherency after releasing the lock */
+	__raw_writel(tmp, scu_base + 8);
+}
+
+unsigned int __init sh73a0_get_core_count(void)
+{
+	void __iomem *scu_base = scu_base_addr();
+
+	return scu_get_core_count(scu_base);
+}
+
+void __cpuinit sh73a0_secondary_init(unsigned int cpu)
+{
+	gic_secondary_init(0);
+}
+
+int __cpuinit sh73a0_boot_secondary(unsigned int cpu)
+{
+	/* enable cache coherency */
+	modify_scu_cpu_psr(0, 3 << (cpu * 8));
+
+	if (((__raw_readw(__io(PSTR)) >> (4 * cpu)) & 3) == 3)
+		__raw_writel(1 << cpu, __io(WUPCR));	/* wake up */
+	else
+		__raw_writel(1 << cpu, __io(SRESCR));	/* reset */
+
+	return 0;
+}
+
+void __init sh73a0_smp_prepare_cpus(void)
+{
+#ifdef CONFIG_HAVE_ARM_TWD
+	twd_base = (void __iomem *)0xf0000600;
+#endif
+
+	scu_enable(scu_base_addr());
+
+	/* Map the reset vector (in headsmp.S) */
+	__raw_writel(0, __io(APARMBAREA));      /* 4k */
+	__raw_writel(__pa(shmobile_secondary_vector), __io(SBAR));
+
+	/* enable cache coherency on CPU0 */
+	modify_scu_cpu_psr(0, 3 << (0 * 8));
+}
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 3560f8c..5aa2d54 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -371,7 +371,7 @@
 };
 
 /* Add spear300 specific devices here */
-/* arm gpio1 device registeration */
+/* arm gpio1 device registration */
 static struct pl061_platform_data gpio1_plat_data = {
 	.gpio_base	= 8,
 	.irq_base	= SPEAR_GPIO1_INT_BASE,
@@ -451,7 +451,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	shirq_ras1.regs.base =
 		ioremap(SPEAR300_TELECOM_BASE, SPEAR300_TELECOM_REG_SIZE);
 	if (shirq_ras1.regs.base) {
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index 96a1ab8..53b41b5 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -266,7 +266,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	base = ioremap(SPEAR310_SOC_CONFIG_BASE, SPEAR310_SOC_CONFIG_SIZE);
 	if (base) {
 		/* shirq 1 */
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index 6a12195..88b4652 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -519,7 +519,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	base = ioremap(SPEAR320_SOC_CONFIG_BASE, SPEAR320_SOC_CONFIG_SIZE);
 	if (base) {
 		/* shirq 1 */
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index e87313a..52f553c 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -22,7 +22,7 @@
 #include <mach/spear.h>
 
 /* Add spear3xx machines common devices here */
-/* gpio device registeration */
+/* gpio device registration */
 static struct pl061_platform_data gpio_plat_data = {
 	.gpio_base	= 0,
 	.irq_base	= SPEAR_GPIO_INT_BASE,
@@ -41,7 +41,7 @@
 	.irq = {IRQ_BASIC_GPIO, NO_IRQ},
 };
 
-/* uart device registeration */
+/* uart device registration */
 struct amba_device uart_device = {
 	.dev = {
 		.init_name = "uart",
@@ -543,6 +543,6 @@
 
 pmx_fail:
 	if (ret)
-		printk(KERN_ERR "padmux: registeration failed. err no: %d\n",
+		printk(KERN_ERR "padmux: registration failed. err no: %d\n",
 				ret);
 }
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index baf6bcc..f2fe14e 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -23,7 +23,7 @@
 #include <mach/spear.h>
 
 /* Add spear6xx machines common devices here */
-/* uart device registeration */
+/* uart device registration */
 struct amba_device uart_device[] = {
 	{
 		.dev = {
@@ -50,7 +50,7 @@
 	}
 };
 
-/* gpio device registeration */
+/* gpio device registration */
 static struct pl061_platform_data gpio_plat_data[] = {
 	{
 		.gpio_base	= 0,
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
new file mode 100644
index 0000000..3ad086e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-arm/arch-tegra/include/mach/sdhci.h
+ *
+ * Copyright (C) 2009 Palm, Inc.
+ * Author: Yvonne Yip <y@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARM_ARCH_TEGRA_SDHCI_H
+#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
+
+#include <linux/mmc/host.h>
+
+struct tegra_sdhci_platform_data {
+	int cd_gpio;
+	int wp_gpio;
+	int power_gpio;
+	int is_8bit;
+};
+
+#endif
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 801b21e..32a7b0f 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -64,7 +64,7 @@
 	bool "Dual RAM"
 	help
 		Select this if you want support for Dual RAM phones.
-		This is two RAM memorys on different EMIFs.
+		This is two RAM memories on different EMIFs.
 endchoice
 
 config U300_DEBUG
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
index 193da2d..6193aaa 100644
--- a/arch/arm/mach-u300/include/mach/coh901318.h
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -24,7 +24,7 @@
  * @src_addr: transfer source address
  * @dst_addr: transfer destination address
  * @link_addr:  physical address to next lli
- * @virt_link_addr: virtual addres of next lli (only used by pool_free)
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
  * @phy_this: physical address of current lli (only used by pool_free)
  */
 struct coh901318_lli {
@@ -90,7 +90,7 @@
  * struct coh901318_platform - platform arch structure
  * @chans_slave: specifying dma slave channels
  * @chans_memcpy: specifying dma memcpy channels
- * @access_memory_state: requesting DMA memeory access (on / off)
+ * @access_memory_state: requesting DMA memory access (on / off)
  * @chan_conf: dma channel configurations
  * @max_channels: max number of dma chanenls
  */
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 2dd44a0..247caa3 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -7,28 +7,30 @@
 	select HAS_MTU
 	select NOMADIK_GPIO
 
-config UX500_SOC_DB8500
-	bool
+menu "Ux500 SoC"
 
 config UX500_SOC_DB5500
-	bool
+	bool "DB5500"
 
-choice
-	prompt "Ux500 target platform"
-	default MACH_U8500_MOP
+config UX500_SOC_DB8500
+	bool "DB8500"
 
-config MACH_U8500_MOP
+endmenu
+
+menu "Ux500 target platform"
+
+config MACH_U8500
 	bool "U8500 Development platform"
-	select UX500_SOC_DB8500
+	depends on UX500_SOC_DB8500
 	help
 	  Include support for the mop500 development platform.
 
 config MACH_U5500
 	bool "U5500 Development platform"
-	select UX500_SOC_DB5500
+	depends on UX500_SOC_DB5500
 	help
 	  Include support for the U5500 development platform.
-endchoice
+endmenu
 
 config UX500_DEBUG_UART
 	int "Ux500 UART to use for low-level debug"
@@ -39,14 +41,14 @@
 
 config U5500_MODEM_IRQ
 	bool "Modem IRQ support"
-	depends on MACH_U5500
+	depends on UX500_SOC_DB5500
 	default y
 	help
 	  Add support for handling IRQ:s from modem side
 
 config U5500_MBOX
 	bool "Mailbox support"
-	depends on MACH_U5500 && U5500_MODEM_IRQ
+	depends on U5500_MODEM_IRQ
 	default y
 	help
 	  Add support for U5500 mailbox communication with modem side
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 12052e8..53ebb42 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -2,10 +2,11 @@
 # Makefile for the linux kernel, U8500 machine.
 #
 
-obj-y				:= clock.o cpu.o devices.o devices-common.o
+obj-y				:= clock.o cpu.o devices.o devices-common.o \
+				   id.o
 obj-$(CONFIG_UX500_SOC_DB5500)	+= cpu-db5500.o dma-db5500.o
 obj-$(CONFIG_UX500_SOC_DB8500)	+= cpu-db8500.o devices-db8500.o prcmu.o
-obj-$(CONFIG_MACH_U8500_MOP)	+= board-mop500.o board-mop500-sdi.o \
+obj-$(CONFIG_MACH_U8500)	+= board-mop500.o board-mop500-sdi.o \
 				board-mop500-keypads.o
 obj-$(CONFIG_MACH_U5500)	+= board-u5500.o board-u5500-sdi.o
 obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 1187f1f..533967c 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -3,99 +3,94 @@
  *
  * License Terms: GNU General Public License v2
  *
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com>
+ *          Bengt Jonsson <bengt.g.jonsson@stericsson.com>
  *
  * MOP500 board specific initialization for regulators
  */
 #include <linux/kernel.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500.h>
 
-/* supplies to the display/camera */
-static struct regulator_init_data ab8500_vaux1_regulator = {
-	.constraints = {
-		.name = "V-DISPLAY",
-		.min_uV = 2500000,
-		.max_uV = 2900000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
+/* AB8500 regulators */
+struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+	/* supplies to the display/camera */
+	[AB8500_LDO_AUX1] = {
+		.constraints = {
+			.name = "V-DISPLAY",
+			.min_uV = 2500000,
+			.max_uV = 2900000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supplies to the on-board eMMC */
+	[AB8500_LDO_AUX2] = {
+		.constraints = {
+			.name = "V-eMMC1",
+			.min_uV = 1100000,
+			.max_uV = 3300000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for VAUX3, supplies to SDcard slots */
+	[AB8500_LDO_AUX3] = {
+		.constraints = {
+			.name = "V-MMC-SD",
+			.min_uV = 1100000,
+			.max_uV = 3300000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for tvout, gpadc, TVOUT LDO */
+	[AB8500_LDO_TVOUT] = {
+		.constraints = {
+			.name = "V-TVOUT",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for ab8500-vaudio, VAUDIO LDO */
+	[AB8500_LDO_AUDIO] = {
+		.constraints = {
+			.name = "V-AUD",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-anamic1 VAMic1-LDO */
+	[AB8500_LDO_ANAMIC1] = {
+		.constraints = {
+			.name = "V-AMIC1",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+	[AB8500_LDO_ANAMIC2] = {
+		.constraints = {
+			.name = "V-AMIC2",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-dmic, VDMIC LDO */
+	[AB8500_LDO_DMIC] = {
+		.constraints = {
+			.name = "V-DMIC",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-intcore12, VINTCORE12 LDO */
+	[AB8500_LDO_INTCORE] = {
+		.constraints = {
+			.name = "V-INTCORE",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for U8500 CSI/DSI, VANA LDO */
+	[AB8500_LDO_ANA] = {
+		.constraints = {
+			.name = "V-CSI/DSI",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
 	},
 };
-
-/* supplies to the on-board eMMC */
-static struct regulator_init_data ab8500_vaux2_regulator = {
-	.constraints = {
-		.name = "V-eMMC1",
-		.min_uV = 1100000,
-		.max_uV = 3300000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for VAUX3, supplies to SDcard slots */
-static struct regulator_init_data ab8500_vaux3_regulator = {
-	.constraints = {
-		.name = "V-MMC-SD",
-		.min_uV = 1100000,
-		.max_uV = 3300000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for tvout, gpadc, TVOUT LDO */
-static struct regulator_init_data ab8500_vtvout_init = {
-	.constraints = {
-		.name = "V-TVOUT",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for ab8500-vaudio, VAUDIO LDO */
-static struct regulator_init_data ab8500_vaudio_init = {
-	.constraints = {
-		.name = "V-AUD",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-anamic1 VAMic1-LDO */
-static struct regulator_init_data ab8500_vamic1_init = {
-	.constraints = {
-		.name = "V-AMIC1",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
-static struct regulator_init_data ab8500_vamic2_init = {
-	.constraints = {
-		.name = "V-AMIC2",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-dmic, VDMIC LDO */
-static struct regulator_init_data ab8500_vdmic_init = {
-	.constraints = {
-		.name = "V-DMIC",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-intcore12, VINTCORE12 LDO */
-static struct regulator_init_data ab8500_vintcore_init = {
-	.constraints = {
-		.name = "V-INTCORE",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for U8500 CSI/DSI, VANA LDO */
-static struct regulator_init_data ab8500_vana_init = {
-	.constraints = {
-		.name = "V-CSI/DSI",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
new file mode 100644
index 0000000..2675fae
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * MOP500 board specific initialization for regulators
+ */
+
+#ifndef __BOARD_MOP500_REGULATORS_H
+#define __BOARD_MOP500_REGULATORS_H
+
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500.h>
+
+extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
+
+#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index a1c9ea1..a393f57 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -35,6 +35,7 @@
 #include "devices-db8500.h"
 #include "pins-db8500.h"
 #include "board-mop500.h"
+#include "board-mop500-regulators.h"
 
 static pin_cfg_t mop500_pins[] = {
 	/* SSP0 */
@@ -80,6 +81,8 @@
 
 static struct ab8500_platform_data ab8500_platdata = {
 	.irq_base	= MOP500_AB8500_IRQ_BASE,
+	.regulator	= ab8500_regulators,
+	.num_regulator	= ARRAY_SIZE(ab8500_regulators),
 };
 
 static struct resource ab8500_resources[] = {
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index ccff2dae1..b2b0a3b 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -136,8 +136,7 @@
  */
 static unsigned long clk_mtu_get_rate(struct clk *clk)
 {
-	void __iomem *addr = __io_address(UX500_PRCMU_BASE)
-		+ PRCM_TCR;
+	void __iomem *addr;
 	u32 tcr;
 	int mtu = (int) clk->data;
 	/*
@@ -149,13 +148,20 @@
 	unsigned long mturate;
 	unsigned long retclk;
 
+	if (cpu_is_u5500())
+		addr = __io_address(U5500_PRCMU_BASE);
+	else if (cpu_is_u8500())
+		addr = __io_address(U8500_PRCMU_BASE);
+	else
+		ux500_unknown_soc();
+
 	/*
 	 * On a startup, always conifgure the TCR to the doze mode;
 	 * bootloaders do it for us. Do this in the kernel too.
 	 */
-	writel(PRCM_TCR_DOZE_MODE, addr);
+	writel(PRCM_TCR_DOZE_MODE, addr + PRCM_TCR);
 
-	tcr = readl(addr);
+	tcr = readl(addr + PRCM_TCR);
 
 	/* Get the rate from the parent as a default */
 	if (clk->parent_periph)
diff --git a/arch/arm/mach-ux500/cpu-db5500.c b/arch/arm/mach-ux500/cpu-db5500.c
index acc841e..af04e08 100644
--- a/arch/arm/mach-ux500/cpu-db5500.c
+++ b/arch/arm/mach-ux500/cpu-db5500.c
@@ -21,7 +21,20 @@
 
 #include "devices-db5500.h"
 
+static struct map_desc u5500_uart_io_desc[] __initdata = {
+	__IO_DEV_DESC(U5500_UART0_BASE, SZ_4K),
+	__IO_DEV_DESC(U5500_UART2_BASE, SZ_4K),
+};
+
 static struct map_desc u5500_io_desc[] __initdata = {
+	__IO_DEV_DESC(U5500_GIC_CPU_BASE, SZ_4K),
+	__IO_DEV_DESC(U5500_GIC_DIST_BASE, SZ_4K),
+	__IO_DEV_DESC(U5500_L2CC_BASE, SZ_4K),
+	__IO_DEV_DESC(U5500_TWD_BASE, SZ_4K),
+	__IO_DEV_DESC(U5500_MTU0_BASE, SZ_4K),
+	__IO_DEV_DESC(U5500_SCU_BASE, SZ_4K),
+	__IO_DEV_DESC(U5500_BACKUPRAM0_BASE, SZ_8K),
+
 	__IO_DEV_DESC(U5500_GPIO0_BASE, SZ_4K),
 	__IO_DEV_DESC(U5500_GPIO1_BASE, SZ_4K),
 	__IO_DEV_DESC(U5500_GPIO2_BASE, SZ_4K),
@@ -143,6 +156,11 @@
 
 void __init u5500_map_io(void)
 {
+	/*
+	 * Map the UARTs early so that the DEBUG_LL stuff continues to work.
+	 */
+	iotable_init(u5500_uart_io_desc, ARRAY_SIZE(u5500_uart_io_desc));
+
 	ux500_map_io();
 
 	iotable_init(u5500_io_desc, ARRAY_SIZE(u5500_io_desc));
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index c0f34a4..1748fbc 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -29,13 +29,31 @@
 };
 
 /* minimum static i/o mapping required to boot U8500 platforms */
+static struct map_desc u8500_uart_io_desc[] __initdata = {
+	__IO_DEV_DESC(U8500_UART0_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_UART2_BASE, SZ_4K),
+};
+
 static struct map_desc u8500_io_desc[] __initdata = {
+	__IO_DEV_DESC(U8500_GIC_CPU_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_GIC_DIST_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_L2CC_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_TWD_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_MTU0_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_SCU_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_BACKUPRAM0_BASE, SZ_8K),
+
+	__IO_DEV_DESC(U8500_CLKRST1_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_CLKRST2_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_CLKRST3_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_CLKRST5_BASE, SZ_4K),
+	__IO_DEV_DESC(U8500_CLKRST6_BASE, SZ_4K),
+
 	__IO_DEV_DESC(U8500_PRCMU_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_GPIO0_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_GPIO1_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_GPIO2_BASE, SZ_4K),
 	__IO_DEV_DESC(U8500_GPIO3_BASE, SZ_4K),
-	__MEM_DEV_DESC(U8500_BOOT_ROM_BASE, SZ_1M),
 };
 
 static struct map_desc u8500_ed_io_desc[] __initdata = {
@@ -52,71 +70,13 @@
 	__IO_DEV_DESC(U8500_PRCMU_TCDM_BASE, SZ_4K),
 };
 
-/*
- * Functions to differentiate between later ASICs
- * We look into the end of the ROM to locate the hardcoded ASIC ID.
- * This is only needed to differentiate between minor revisions and
- * process variants of an ASIC, the major revisions are encoded in
- * the cpuid.
- */
-#define U8500_ASIC_ID_LOC_ED_V1	(U8500_BOOT_ROM_BASE + 0x1FFF4)
-#define U8500_ASIC_ID_LOC_V2	(U8500_BOOT_ROM_BASE + 0x1DBF4)
-#define U8500_ASIC_REV_ED	0x01
-#define U8500_ASIC_REV_V10	0xA0
-#define U8500_ASIC_REV_V11	0xA1
-#define U8500_ASIC_REV_V20	0xB0
-
-/**
- * struct db8500_asic_id - fields of the ASIC ID
- * @process: the manufacturing process, 0x40 is 40 nm
- *  0x00 is "standard"
- * @partnumber: hithereto 0x8500 for DB8500
- * @revision: version code in the series
- * This field definion is not formally defined but makes
- * sense.
- */
-struct db8500_asic_id {
-	u8 process;
-	u16 partnumber;
-	u8 revision;
-};
-
-/* This isn't going to change at runtime */
-static struct db8500_asic_id db8500_id;
-
-static void __init get_db8500_asic_id(void)
-{
-	u32 asicid;
-
-	if (cpu_is_u8500v1() || cpu_is_u8500ed())
-		asicid = readl(__io_address(U8500_ASIC_ID_LOC_ED_V1));
-	else if (cpu_is_u8500v2())
-		asicid = readl(__io_address(U8500_ASIC_ID_LOC_V2));
-	else
-		BUG();
-
-	db8500_id.process = (asicid >> 24);
-	db8500_id.partnumber = (asicid >> 16) & 0xFFFFU;
-	db8500_id.revision = asicid & 0xFFU;
-}
-
-bool cpu_is_u8500v10(void)
-{
-	return (db8500_id.revision == U8500_ASIC_REV_V10);
-}
-
-bool cpu_is_u8500v11(void)
-{
-	return (db8500_id.revision == U8500_ASIC_REV_V11);
-}
-
-bool cpu_is_u8500v20(void)
-{
-	return (db8500_id.revision == U8500_ASIC_REV_V20);
-}
-
 void __init u8500_map_io(void)
 {
+	/*
+	 * Map the UARTs early so that the DEBUG_LL stuff continues to work.
+	 */
+	iotable_init(u8500_uart_io_desc, ARRAY_SIZE(u8500_uart_io_desc));
+
 	ux500_map_io();
 
 	iotable_init(u8500_io_desc, ARRAY_SIZE(u8500_io_desc));
@@ -127,9 +87,6 @@
 		iotable_init(u8500_v1_io_desc, ARRAY_SIZE(u8500_v1_io_desc));
 	else if (cpu_is_u8500v2())
 		iotable_init(u8500_v2_io_desc, ARRAY_SIZE(u8500_v2_io_desc));
-
-	/* Read out the ASIC ID as early as we can */
-	get_db8500_asic_id();
 }
 
 static resource_size_t __initdata db8500_gpio_base[] = {
@@ -159,20 +116,6 @@
  */
 void __init u8500_init_devices(void)
 {
-	/* Display some ASIC boilerplate */
-	pr_info("DB8500: process: %02x, revision ID: 0x%02x\n",
-		db8500_id.process, db8500_id.revision);
-	if (cpu_is_u8500ed())
-		pr_info("DB8500: Early Drop (ED)\n");
-	else if (cpu_is_u8500v10())
-		pr_info("DB8500: version 1.0\n");
-	else if (cpu_is_u8500v11())
-		pr_info("DB8500: version 1.1\n");
-	else if (cpu_is_u8500v20())
-		pr_info("DB8500: version 2.0\n");
-	else
-		pr_warning("ASIC: UNKNOWN SILICON VERSION!\n");
-
 	if (cpu_is_u8500ed())
 		dma40_u8500ed_fixup();
 
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 5730409..5a43107 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -23,37 +23,25 @@
 
 #include "clock.h"
 
-static struct map_desc ux500_io_desc[] __initdata = {
-	__IO_DEV_DESC(UX500_UART0_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_UART2_BASE, SZ_4K),
-
-	__IO_DEV_DESC(UX500_GIC_CPU_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_GIC_DIST_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_L2CC_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_TWD_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_SCU_BASE, SZ_4K),
-
-	__IO_DEV_DESC(UX500_CLKRST1_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_CLKRST2_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_CLKRST3_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_CLKRST5_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_CLKRST6_BASE, SZ_4K),
-
-	__IO_DEV_DESC(UX500_MTU0_BASE, SZ_4K),
-	__IO_DEV_DESC(UX500_MTU1_BASE, SZ_4K),
-
-	__IO_DEV_DESC(UX500_BACKUPRAM0_BASE, SZ_8K),
-};
-
-void __init ux500_map_io(void)
-{
-	iotable_init(ux500_io_desc, ARRAY_SIZE(ux500_io_desc));
-}
+#ifdef CONFIG_CACHE_L2X0
+static void __iomem *l2x0_base;
+#endif
 
 void __init ux500_init_irq(void)
 {
-	gic_init(0, 29, __io_address(UX500_GIC_DIST_BASE),
-		 __io_address(UX500_GIC_CPU_BASE));
+	void __iomem *dist_base;
+	void __iomem *cpu_base;
+
+	if (cpu_is_u5500()) {
+		dist_base = __io_address(U5500_GIC_DIST_BASE);
+		cpu_base = __io_address(U5500_GIC_CPU_BASE);
+	} else if (cpu_is_u8500()) {
+		dist_base = __io_address(U8500_GIC_DIST_BASE);
+		cpu_base = __io_address(U8500_GIC_CPU_BASE);
+	} else
+		ux500_unknown_soc();
+
+	gic_init(0, 29, dist_base, cpu_base);
 
 	/*
 	 * Init clocks here so that they are available for system timer
@@ -74,7 +62,8 @@
 
 static inline void ux500_cache_sync(void)
 {
-	void __iomem *base = __io_address(UX500_L2CC_BASE);
+	void __iomem *base = l2x0_base;
+
 	writel_relaxed(0, base + L2X0_CACHE_SYNC);
 	ux500_cache_wait(base + L2X0_CACHE_SYNC, 1);
 }
@@ -96,20 +85,23 @@
  */
 static void ux500_l2x0_inv_all(void)
 {
-	void __iomem *l2x0_base = __io_address(UX500_L2CC_BASE);
+	void __iomem *base = l2x0_base;
 	uint32_t l2x0_way_mask = (1<<16) - 1;	/* Bitmask of active ways */
 
 	/* invalidate all ways */
-	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
-	ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+	writel_relaxed(l2x0_way_mask, base + L2X0_INV_WAY);
+	ux500_cache_wait(base + L2X0_INV_WAY, l2x0_way_mask);
 	ux500_cache_sync();
 }
 
 static int ux500_l2x0_init(void)
 {
-	void __iomem *l2x0_base;
-
-	l2x0_base = __io_address(UX500_L2CC_BASE);
+	if (cpu_is_u5500())
+		l2x0_base = __io_address(U5500_L2CC_BASE);
+	else if (cpu_is_u8500())
+		l2x0_base = __io_address(U8500_L2CC_BASE);
+	else
+		ux500_unknown_soc();
 
 	/* 64KB way size, 8 way associativity, force WA */
 	l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
@@ -127,13 +119,21 @@
 {
 #ifdef CONFIG_LOCAL_TIMERS
 	/* Setup the local timer base */
-	twd_base = __io_address(UX500_TWD_BASE);
-#endif
-	/* Setup the MTU base */
-	if (cpu_is_u8500ed())
-		mtu_base = __io_address(U8500_MTU0_BASE_ED);
+	if (cpu_is_u5500())
+		twd_base = __io_address(U5500_TWD_BASE);
+	else if (cpu_is_u8500())
+		twd_base = __io_address(U8500_TWD_BASE);
 	else
-		mtu_base = __io_address(UX500_MTU0_BASE);
+		ux500_unknown_soc();
+#endif
+	if (cpu_is_u5500())
+		mtu_base = __io_address(U5500_MTU0_BASE);
+	else if (cpu_is_u8500ed())
+		mtu_base = __io_address(U8500_MTU0_BASE_ED);
+	else if (cpu_is_u8500())
+		mtu_base = __io_address(U8500_MTU0_BASE);
+	else
+		ux500_unknown_soc();
 
 	nmdk_timer_init();
 }
diff --git a/arch/arm/mach-ux500/id.c b/arch/arm/mach-ux500/id.c
new file mode 100644
index 0000000..d35122e
--- /dev/null
+++ b/arch/arm/mach-ux500/id.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+#include <mach/hardware.h>
+#include <mach/setup.h>
+
+struct dbx500_asic_id dbx500_id;
+
+static unsigned int ux500_read_asicid(phys_addr_t addr)
+{
+	phys_addr_t base = addr & ~0xfff;
+	struct map_desc desc = {
+		.virtual	= IO_ADDRESS(base),
+		.pfn		= __phys_to_pfn(base),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	};
+
+	iotable_init(&desc, 1);
+
+	/* As in devicemaps_init() */
+	local_flush_tlb_all();
+	flush_cache_all();
+
+	return readl(__io_address(addr));
+}
+
+static void ux500_print_soc_info(unsigned int asicid)
+{
+	unsigned int rev = dbx500_revision();
+
+	pr_info("DB%4x ", dbx500_partnumber());
+
+	if (rev == 0x01)
+		pr_cont("Early Drop");
+	else if (rev >= 0xA0)
+		pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf);
+	else
+		pr_cont("Unknown");
+
+	pr_cont(" [%#010x]\n", asicid);
+}
+
+static unsigned int partnumber(unsigned int asicid)
+{
+	return (asicid >> 8) & 0xffff;
+}
+
+/*
+ * SOC		MIDR		ASICID ADDRESS		ASICID VALUE
+ * DB8500ed	0x410fc090	0x9001FFF4		0x00850001
+ * DB8500v1	0x411fc091	0x9001FFF4		0x008500A0
+ * DB8500v1.1	0x411fc091	0x9001FFF4		0x008500A1
+ * DB8500v2	0x412fc091	0x9001DBF4		0x008500B0
+ * DB5500v1	0x412fc091	0x9001FFF4		0x005500A0
+ */
+
+void __init ux500_map_io(void)
+{
+	unsigned int cpuid = read_cpuid_id();
+	unsigned int asicid = 0;
+	phys_addr_t addr = 0;
+
+	switch (cpuid) {
+	case 0x410fc090: /* DB8500ed */
+	case 0x411fc091: /* DB8500v1 */
+		addr = 0x9001FFF4;
+		break;
+
+	case 0x412fc091: /* DB8500v2 / DB5500v1 */
+		asicid = ux500_read_asicid(0x9001DBF4);
+		if (partnumber(asicid) == 0x8500)
+			/* DB8500v2 */
+			break;
+
+		/* DB5500v1 */
+		addr = 0x9001FFF4;
+		break;
+	}
+
+	if (addr)
+		asicid = ux500_read_asicid(addr);
+
+	if (!asicid) {
+		pr_err("Unable to identify SoC\n");
+		ux500_unknown_soc();
+	}
+
+	dbx500_id.process = asicid >> 24;
+	dbx500_id.partnumber = partnumber(asicid);
+	dbx500_id.revision = asicid & 0xff;
+
+	ux500_print_soc_info(asicid);
+}
diff --git a/arch/arm/mach-ux500/include/mach/debug-macro.S b/arch/arm/mach-ux500/include/mach/debug-macro.S
index be7c0f1..700fb05 100644
--- a/arch/arm/mach-ux500/include/mach/debug-macro.S
+++ b/arch/arm/mach-ux500/include/mach/debug-macro.S
@@ -14,7 +14,24 @@
 #error Invalid Ux500 debug UART
 #endif
 
-#define __UX500_UART(n)	UX500_UART##n##_BASE
+/*
+ * DEBUG_LL only works if only one SOC is built in.  We don't use #else below
+ * in order to get "__UX500_UART redefined" warnings if more than one SOC is
+ * built, so that there's some hint during the build that something is wrong.
+ */
+
+#ifdef CONFIG_UX500_SOC_DB5500
+#define __UX500_UART(n)	U5500_UART##n##_BASE
+#endif
+
+#ifdef CONFIG_UX500_SOC_DB8500
+#define __UX500_UART(n)	U8500_UART##n##_BASE
+#endif
+
+#ifndef __UX500_UART
+#error Unknown SOC
+#endif
+
 #define UX500_UART(n)	__UX500_UART(n)
 #define UART_BASE	UX500_UART(CONFIG_UX500_DEBUG_UART)
 
diff --git a/arch/arm/mach-ux500/include/mach/entry-macro.S b/arch/arm/mach-ux500/include/mach/entry-macro.S
index a37f585..071bba9 100644
--- a/arch/arm/mach-ux500/include/mach/entry-macro.S
+++ b/arch/arm/mach-ux500/include/mach/entry-macro.S
@@ -11,15 +11,10 @@
  * warranty of any kind, whether express or implied.
  */
 #include <mach/hardware.h>
-#define HAVE_GET_IRQNR_PREAMBLE
 #include <asm/hardware/entry-macro-gic.S>
 
 		.macro	disable_fiq
 		.endm
 
-		.macro  get_irqnr_preamble, base, tmp
-		ldr     \base, =IO_ADDRESS(UX500_GIC_CPU_BASE)
-		.endm
-
 		.macro  arch_ret_to_user, tmp1, tmp2
 		.endm
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index 6295cc5..bf63f26 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -29,118 +29,12 @@
 #include <mach/db8500-regs.h>
 #include <mach/db5500-regs.h>
 
-#ifdef CONFIG_UX500_SOC_DB8500
-#define UX500(periph)		U8500_##periph##_BASE
-#elif defined(CONFIG_UX500_SOC_DB5500)
-#define UX500(periph)		U5500_##periph##_BASE
-#endif
-
-#define UX500_BACKUPRAM0_BASE	UX500(BACKUPRAM0)
-#define UX500_BACKUPRAM1_BASE	UX500(BACKUPRAM1)
-#define UX500_B2R2_BASE		UX500(B2R2)
-
-#define UX500_CLKRST1_BASE	UX500(CLKRST1)
-#define UX500_CLKRST2_BASE	UX500(CLKRST2)
-#define UX500_CLKRST3_BASE	UX500(CLKRST3)
-#define UX500_CLKRST5_BASE	UX500(CLKRST5)
-#define UX500_CLKRST6_BASE	UX500(CLKRST6)
-
-#define UX500_DMA_BASE		UX500(DMA)
-#define UX500_FSMC_BASE		UX500(FSMC)
-
-#define UX500_GIC_CPU_BASE	UX500(GIC_CPU)
-#define UX500_GIC_DIST_BASE	UX500(GIC_DIST)
-
-#define UX500_I2C1_BASE		UX500(I2C1)
-#define UX500_I2C2_BASE		UX500(I2C2)
-#define UX500_I2C3_BASE		UX500(I2C3)
-
-#define UX500_L2CC_BASE		UX500(L2CC)
-#define UX500_MCDE_BASE		UX500(MCDE)
-#define UX500_MTU0_BASE		UX500(MTU0)
-#define UX500_MTU1_BASE		UX500(MTU1)
-#define UX500_PRCMU_BASE	UX500(PRCMU)
-
-#define UX500_RNG_BASE		UX500(RNG)
-#define UX500_RTC_BASE		UX500(RTC)
-
-#define UX500_SCU_BASE		UX500(SCU)
-
-#define UX500_SDI0_BASE		UX500(SDI0)
-#define UX500_SDI1_BASE		UX500(SDI1)
-#define UX500_SDI2_BASE		UX500(SDI2)
-#define UX500_SDI3_BASE		UX500(SDI3)
-#define UX500_SDI4_BASE		UX500(SDI4)
-
-#define UX500_SPI0_BASE		UX500(SPI0)
-#define UX500_SPI1_BASE		UX500(SPI1)
-#define UX500_SPI2_BASE		UX500(SPI2)
-#define UX500_SPI3_BASE		UX500(SPI3)
-
-#define UX500_SIA_BASE		UX500(SIA)
-#define UX500_SVA_BASE		UX500(SVA)
-
-#define UX500_TWD_BASE		UX500(TWD)
-
-#define UX500_UART0_BASE	UX500(UART0)
-#define UX500_UART1_BASE	UX500(UART1)
-#define UX500_UART2_BASE	UX500(UART2)
-
-#define UX500_USBOTG_BASE	UX500(USBOTG)
-
 /* ST-Ericsson modified pl022 id */
 #define SSP_PER_ID		0x01080022
 
 #ifndef __ASSEMBLY__
 
-#include <asm/cputype.h>
-
-static inline bool cpu_is_u8500(void)
-{
-#ifdef CONFIG_UX500_SOC_DB8500
-	return 1;
-#else
-	return 0;
-#endif
-}
-
-#define CPUID_DB8500ED	0x410fc090
-#define CPUID_DB8500V1	0x411fc091
-#define CPUID_DB8500V2	0x412fc091
-
-static inline bool cpu_is_u8500ed(void)
-{
-	return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500ED);
-}
-
-static inline bool cpu_is_u8500v1(void)
-{
-	return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500V1);
-}
-
-static inline bool cpu_is_u8500v2(void)
-{
-	return cpu_is_u8500() && (read_cpuid_id() == CPUID_DB8500V2);
-}
-
-#ifdef CONFIG_UX500_SOC_DB8500
-bool cpu_is_u8500v10(void);
-bool cpu_is_u8500v11(void);
-bool cpu_is_u8500v20(void);
-#else
-static inline bool cpu_is_u8500v10(void) { return false; }
-static inline bool cpu_is_u8500v11(void) { return false; }
-static inline bool cpu_is_u8500v20(void) { return false; }
-#endif
-
-static inline bool cpu_is_u5500(void)
-{
-#ifdef CONFIG_UX500_SOC_DB5500
-	return 1;
-#else
-	return 0;
-#endif
-}
+#include <mach/id.h>
 
 #define ARRAY_AND_SIZE(x)	(x), ARRAY_SIZE(x)
 
diff --git a/arch/arm/mach-ux500/include/mach/id.h b/arch/arm/mach-ux500/include/mach/id.h
new file mode 100644
index 0000000..f1288d1
--- /dev/null
+++ b/arch/arm/mach-ux500/include/mach/id.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __MACH_UX500_ID
+#define __MACH_UX500_ID
+
+/**
+ * struct dbx500_asic_id - fields of the ASIC ID
+ * @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard"
+ * @partnumber: hithereto 0x8500 for DB8500
+ * @revision: version code in the series
+ */
+struct dbx500_asic_id {
+	u16	partnumber;
+	u8	revision;
+	u8	process;
+};
+
+extern struct dbx500_asic_id dbx500_id;
+
+static inline unsigned int __attribute_const__ dbx500_partnumber(void)
+{
+	return dbx500_id.partnumber;
+}
+
+static inline unsigned int __attribute_const__ dbx500_revision(void)
+{
+	return dbx500_id.revision;
+}
+
+/*
+ * SOCs
+ */
+
+static inline bool __attribute_const__ cpu_is_u8500(void)
+{
+	return dbx500_partnumber() == 0x8500;
+}
+
+static inline bool __attribute_const__ cpu_is_u5500(void)
+{
+	return dbx500_partnumber() == 0x5500;
+}
+
+/*
+ * 8500 revisions
+ */
+
+static inline bool __attribute_const__ cpu_is_u8500ed(void)
+{
+	return cpu_is_u8500() && dbx500_revision() == 0x00;
+}
+
+static inline bool __attribute_const__ cpu_is_u8500v1(void)
+{
+	return cpu_is_u8500() && (dbx500_revision() & 0xf0) == 0xA0;
+}
+
+static inline bool __attribute_const__ cpu_is_u8500v10(void)
+{
+	return cpu_is_u8500() && dbx500_revision() == 0xA0;
+}
+
+static inline bool __attribute_const__ cpu_is_u8500v11(void)
+{
+	return cpu_is_u8500() && dbx500_revision() == 0xA1;
+}
+
+static inline bool __attribute_const__ cpu_is_u8500v2(void)
+{
+	return cpu_is_u8500() && ((dbx500_revision() & 0xf0) == 0xB0);
+}
+
+#define ux500_unknown_soc()	BUG()
+
+#endif
diff --git a/arch/arm/mach-ux500/include/mach/irqs.h b/arch/arm/mach-ux500/include/mach/irqs.h
index 880ae45..ba1294c 100644
--- a/arch/arm/mach-ux500/include/mach/irqs.h
+++ b/arch/arm/mach-ux500/include/mach/irqs.h
@@ -36,7 +36,7 @@
 /* This will be overridden by board-specific irq headers */
 #define IRQ_BOARD_END			IRQ_BOARD_START
 
-#ifdef CONFIG_MACH_U8500_MOP
+#ifdef CONFIG_MACH_U8500
 #include <mach/irqs-board-mop500.h>
 #endif
 
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index 469877e..a7d363f 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -14,7 +14,7 @@
 #include <asm/mach/time.h>
 #include <linux/init.h>
 
-extern void __init ux500_map_io(void);
+void __init ux500_map_io(void);
 extern void __init u5500_map_io(void);
 extern void __init u8500_map_io(void);
 
diff --git a/arch/arm/mach-ux500/modem-irq-db5500.c b/arch/arm/mach-ux500/modem-irq-db5500.c
index 3187f88..e1296a7 100644
--- a/arch/arm/mach-ux500/modem-irq-db5500.c
+++ b/arch/arm/mach-ux500/modem-irq-db5500.c
@@ -12,6 +12,8 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 
+#include <mach/id.h>
+
 #define MODEM_INTCON_BASE_ADDR 0xBFFD3000
 #define MODEM_INTCON_SIZE 0xFFF
 
@@ -101,6 +103,9 @@
 	static struct irq_chip  modem_irq_chip;
 	struct modem_irq *mi;
 
+	if (!cpu_is_u5500())
+		return -ENODEV;
+
 	pr_info("modem_irq: Set up IRQ handler for incoming modem IRQ %d\n",
 		   IRQ_DB5500_MODEM);
 
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index d77e76c..4fff4d4 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <asm/smp_scu.h>
 #include <mach/hardware.h>
+#include <mach/setup.h>
 
 /*
  * control for which core is the next to come out of the secondary
@@ -40,6 +41,18 @@
 	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
 }
 
+static void __iomem *scu_base_addr(void)
+{
+	if (cpu_is_u5500())
+		return __io_address(U5500_SCU_BASE);
+	else if (cpu_is_u8500())
+		return __io_address(U8500_SCU_BASE);
+	else
+		ux500_unknown_soc();
+
+	return NULL;
+}
+
 static DEFINE_SPINLOCK(boot_lock);
 
 void __cpuinit platform_secondary_init(unsigned int cpu)
@@ -100,21 +113,28 @@
 
 static void __init wakeup_secondary(void)
 {
+	void __iomem *backupram;
+
+	if (cpu_is_u5500())
+		backupram = __io_address(U5500_BACKUPRAM0_BASE);
+	else if (cpu_is_u8500())
+		backupram = __io_address(U8500_BACKUPRAM0_BASE);
+	else
+		ux500_unknown_soc();
+
 	/*
 	 * write the address of secondary startup into the backup ram register
 	 * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the
 	 * backup ram register at offset 0x1FF0, which is what boot rom code
 	 * is waiting for. This would wake up the secondary core from WFE
 	 */
-#define U8500_CPU1_JUMPADDR_OFFSET 0x1FF4
+#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4
 	__raw_writel(virt_to_phys(u8500_secondary_startup),
-		__io_address(UX500_BACKUPRAM0_BASE) +
-		U8500_CPU1_JUMPADDR_OFFSET);
+		     backupram + UX500_CPU1_JUMPADDR_OFFSET);
 
-#define U8500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
+#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
 	__raw_writel(0xA1FEED01,
-		__io_address(UX500_BACKUPRAM0_BASE) +
-		U8500_CPU1_WAKEMAGIC_OFFSET);
+		     backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
 
 	/* make sure write buffer is drained */
 	mb();
@@ -126,9 +146,10 @@
  */
 void __init smp_init_cpus(void)
 {
+	void __iomem *scu_base = scu_base_addr();
 	unsigned int i, ncores;
 
-	ncores = scu_get_core_count(__io_address(UX500_SCU_BASE));
+	ncores = scu_base ? scu_get_core_count(scu_base) : 1;
 
 	/* sanity check */
 	if (ncores > NR_CPUS) {
@@ -154,6 +175,6 @@
 	for (i = 0; i < max_cpus; i++)
 		set_cpu_present(i, true);
 
-	scu_enable(__io_address(UX500_SCU_BASE));
+	scu_enable(scu_base_addr());
 	wakeup_secondary();
 }
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 49db8b3..9d30c6f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -644,7 +644,7 @@
 
 config SWP_EMULATE
 	bool "Emulate SWP/SWPB instructions"
-	depends on CPU_V7
+	depends on CPU_V7 && !CPU_V6
 	select HAVE_PROC_CPU if PROC_FS
 	default y if SMP
 	help
@@ -813,7 +813,7 @@
 	depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
 		   REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
 		   ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \
-		   ARCH_U8500 || ARCH_VEXPRESS_CA9X4
+		   ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE
 	default y
 	select OUTER_CACHE
 	select OUTER_CACHE_SYNC
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c29f283..2b269c9 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -18,7 +18,6 @@
 #include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
-#include <asm/smp_plat.h>
 
 #include "mm.h"
 
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 93292a1..709244c 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -50,7 +50,7 @@
 		if (!new_pmd)
 			goto no_pmd;
 
-		new_pte = pte_alloc_map(mm, new_pmd, 0);
+		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
 		if (!new_pte)
 			goto no_pte;
 
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index 58a49cc..ba65c92 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -70,7 +70,7 @@
 
 /* all normal IRQs can be FIQs */
 #define FIQ_START	0
-/* switch betwean IRQ and FIQ */
+/* switch between IRQ and FIQ */
 extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type);
 
 #endif /* __ASM_ARCH_MXC_IRQS_H__ */
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index c940843..18fe3cb 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -18,6 +18,7 @@
 config ARCH_OMAP2PLUS
 	bool "TI OMAP2/3/4"
 	select CLKDEV_LOOKUP
+	select OMAP_DM_TIMER
 	help
 	  "Systems based on OMAP2, OMAP3 or OMAP4"
 
@@ -35,6 +36,37 @@
 	depends on OMAP_DEBUG_DEVICES
 	default y if LEDS_CLASS
 
+config OMAP_SMARTREFLEX
+	bool "SmartReflex support"
+	depends on (ARCH_OMAP3 || ARCH_OMAP4) && PM
+	help
+	  Say Y if you want to enable SmartReflex.
+
+	  SmartReflex can perform continuous dynamic voltage
+	  scaling around the nominal operating point voltage
+	  according to silicon characteristics and operating
+	  conditions. Enabling SmartReflex reduces power
+	  consumption.
+
+	  Please note, that by default SmartReflex is only
+	  initialized. To enable the automatic voltage
+	  compensation for vdd mpu  and vdd core from user space,
+	  user must write 1 to
+		/debug/voltage/vdd_<X>/smartreflex/autocomp,
+	  where X is mpu or core for OMAP3.
+	  Optionallly autocompensation can be enabled in the kernel
+	  by default during system init via the enable_on_init flag
+	  which an be passed as platform data to the smartreflex driver.
+
+config OMAP_SMARTREFLEX_CLASS3
+	bool "Class 3 mode of Smartreflex Implementation"
+	depends on OMAP_SMARTREFLEX && TWL4030_CORE
+	help
+	  Say Y to enable Class 3 implementation of Smartreflex
+
+	  Class 3 implementation of Smartreflex employs continuous hardware
+	  voltage calibration.
+
 config OMAP_RESET_CLOCKS
 	bool "Reset unused clocks during boot"
 	depends on ARCH_OMAP
@@ -109,6 +141,9 @@
 
          Say N unless you know you need this.
 
+config OMAP_IOMMU_IVA2
+	bool
+
 choice
 	prompt "System timer"
 	default OMAP_32K_TIMER if !ARCH_OMAP15XX
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index fc81912..10245b8 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -232,46 +232,6 @@
 static inline void omap_init_uwire(void) {}
 #endif
 
-/*-------------------------------------------------------------------------*/
-
-#if	defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
-
-static struct resource wdt_resources[] = {
-	{
-		.flags		= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device omap_wdt_device = {
-	.name	   = "omap_wdt",
-	.id	     = -1,
-	.num_resources	= ARRAY_SIZE(wdt_resources),
-	.resource	= wdt_resources,
-};
-
-static void omap_init_wdt(void)
-{
-	if (cpu_is_omap16xx())
-		wdt_resources[0].start = 0xfffeb000;
-	else if (cpu_is_omap2420())
-		wdt_resources[0].start = 0x48022000; /* WDT2 */
-	else if (cpu_is_omap2430())
-		wdt_resources[0].start = 0x49016000; /* WDT2 */
-	else if (cpu_is_omap343x())
-		wdt_resources[0].start = 0x48314000; /* WDT2 */
-	else if (cpu_is_omap44xx())
-		wdt_resources[0].start = 0x4a314000;
-	else
-		return;
-
-	wdt_resources[0].end = wdt_resources[0].start + 0x4f;
-
-	(void) platform_device_register(&omap_wdt_device);
-}
-#else
-static inline void omap_init_wdt(void) {}
-#endif
-
 #if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
 
 static phys_addr_t omap_dsp_phys_mempool_base;
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 2c28265..c4b2b47 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -15,6 +15,10 @@
  *
  * Support functions for the OMAP internal DMA channels.
  *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Converted DMA library into DMA platform driver.
+ *	- G, Manjunath Kondaiah <manjugk@ti.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -53,7 +57,11 @@
 
 #define OMAP_FUNC_MUX_ARM_BASE		(0xfffe1000 + 0xec)
 
+static struct omap_system_dma_plat_info *p;
+static struct omap_dma_dev_attr *d;
+
 static int enable_1510_mode;
+static u32 errata;
 
 static struct omap_dma_global_context_registers {
 	u32 dma_irqenable_l0;
@@ -61,27 +69,6 @@
 	u32 dma_gcr;
 } omap_dma_global_context;
 
-struct omap_dma_lch {
-	int next_lch;
-	int dev_id;
-	u16 saved_csr;
-	u16 enabled_irqs;
-	const char *dev_name;
-	void (*callback)(int lch, u16 ch_status, void *data);
-	void *data;
-
-#ifndef CONFIG_ARCH_OMAP1
-	/* required for Dynamic chaining */
-	int prev_linked_ch;
-	int next_linked_ch;
-	int state;
-	int chain_id;
-
-	int status;
-#endif
-	long flags;
-};
-
 struct dma_link_info {
 	int *linked_dmach_q;
 	int no_of_lchs_linked;
@@ -137,15 +124,6 @@
 
 static spinlock_t dma_chan_lock;
 static struct omap_dma_lch *dma_chan;
-static void __iomem *omap_dma_base;
-
-static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
-	INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
-	INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
-	INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
-	INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
-	INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
-};
 
 static inline void disable_lnk(int lch);
 static void omap_disable_channel_irq(int lch);
@@ -154,24 +132,6 @@
 #define REVISIT_24XX()		printk(KERN_ERR "FIXME: no %s on 24xx\n", \
 						__func__);
 
-#define dma_read(reg)							\
-({									\
-	u32 __val;							\
-	if (cpu_class_is_omap1())					\
-		__val = __raw_readw(omap_dma_base + OMAP1_DMA_##reg);	\
-	else								\
-		__val = __raw_readl(omap_dma_base + OMAP_DMA4_##reg);	\
-	__val;								\
-})
-
-#define dma_write(val, reg)						\
-({									\
-	if (cpu_class_is_omap1())					\
-		__raw_writew((u16)(val), omap_dma_base + OMAP1_DMA_##reg); \
-	else								\
-		__raw_writel((val), omap_dma_base + OMAP_DMA4_##reg);	\
-})
-
 #ifdef CONFIG_ARCH_OMAP15XX
 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
 int omap_dma_in_1510_mode(void)
@@ -206,16 +166,6 @@
 #define set_gdma_dev(req, dev)	do {} while (0)
 #endif
 
-/* Omap1 only */
-static void clear_lch_regs(int lch)
-{
-	int i;
-	void __iomem *lch_base = omap_dma_base + OMAP1_DMA_CH_BASE(lch);
-
-	for (i = 0; i < 0x2c; i += 2)
-		__raw_writew(0, lch_base + i);
-}
-
 void omap_set_dma_priority(int lch, int dst_port, int priority)
 {
 	unsigned long reg;
@@ -248,12 +198,12 @@
 	if (cpu_class_is_omap2()) {
 		u32 ccr;
 
-		ccr = dma_read(CCR(lch));
+		ccr = p->dma_read(CCR, lch);
 		if (priority)
 			ccr |= (1 << 6);
 		else
 			ccr &= ~(1 << 6);
-		dma_write(ccr, CCR(lch));
+		p->dma_write(ccr, CCR, lch);
 	}
 }
 EXPORT_SYMBOL(omap_set_dma_priority);
@@ -264,31 +214,31 @@
 {
 	u32 l;
 
-	l = dma_read(CSDP(lch));
+	l = p->dma_read(CSDP, lch);
 	l &= ~0x03;
 	l |= data_type;
-	dma_write(l, CSDP(lch));
+	p->dma_write(l, CSDP, lch);
 
 	if (cpu_class_is_omap1()) {
 		u16 ccr;
 
-		ccr = dma_read(CCR(lch));
+		ccr = p->dma_read(CCR, lch);
 		ccr &= ~(1 << 5);
 		if (sync_mode == OMAP_DMA_SYNC_FRAME)
 			ccr |= 1 << 5;
-		dma_write(ccr, CCR(lch));
+		p->dma_write(ccr, CCR, lch);
 
-		ccr = dma_read(CCR2(lch));
+		ccr = p->dma_read(CCR2, lch);
 		ccr &= ~(1 << 2);
 		if (sync_mode == OMAP_DMA_SYNC_BLOCK)
 			ccr |= 1 << 2;
-		dma_write(ccr, CCR2(lch));
+		p->dma_write(ccr, CCR2, lch);
 	}
 
 	if (cpu_class_is_omap2() && dma_trigger) {
 		u32 val;
 
-		val = dma_read(CCR(lch));
+		val = p->dma_read(CCR, lch);
 
 		/* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
 		val &= ~((1 << 23) | (3 << 19) | 0x1f);
@@ -313,11 +263,11 @@
 		} else {
 			val &= ~(1 << 24);	/* dest synch */
 		}
-		dma_write(val, CCR(lch));
+		p->dma_write(val, CCR, lch);
 	}
 
-	dma_write(elem_count, CEN(lch));
-	dma_write(frame_count, CFN(lch));
+	p->dma_write(elem_count, CEN, lch);
+	p->dma_write(frame_count, CFN, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_transfer_params);
 
@@ -328,7 +278,7 @@
 	if (cpu_class_is_omap1()) {
 		u16 w;
 
-		w = dma_read(CCR2(lch));
+		w = p->dma_read(CCR2, lch);
 		w &= ~0x03;
 
 		switch (mode) {
@@ -343,23 +293,22 @@
 		default:
 			BUG();
 		}
-		dma_write(w, CCR2(lch));
+		p->dma_write(w, CCR2, lch);
 
-		w = dma_read(LCH_CTRL(lch));
+		w = p->dma_read(LCH_CTRL, lch);
 		w &= ~0x0f;
 		/* Default is channel type 2D */
 		if (mode) {
-			dma_write((u16)color, COLOR_L(lch));
-			dma_write((u16)(color >> 16), COLOR_U(lch));
+			p->dma_write(color, COLOR, lch);
 			w |= 1;		/* Channel type G */
 		}
-		dma_write(w, LCH_CTRL(lch));
+		p->dma_write(w, LCH_CTRL, lch);
 	}
 
 	if (cpu_class_is_omap2()) {
 		u32 val;
 
-		val = dma_read(CCR(lch));
+		val = p->dma_read(CCR, lch);
 		val &= ~((1 << 17) | (1 << 16));
 
 		switch (mode) {
@@ -374,10 +323,10 @@
 		default:
 			BUG();
 		}
-		dma_write(val, CCR(lch));
+		p->dma_write(val, CCR, lch);
 
 		color &= 0xffffff;
-		dma_write(color, COLOR(lch));
+		p->dma_write(color, COLOR, lch);
 	}
 }
 EXPORT_SYMBOL(omap_set_dma_color_mode);
@@ -387,10 +336,10 @@
 	if (cpu_class_is_omap2()) {
 		u32 csdp;
 
-		csdp = dma_read(CSDP(lch));
+		csdp = p->dma_read(CSDP, lch);
 		csdp &= ~(0x3 << 16);
 		csdp |= (mode << 16);
-		dma_write(csdp, CSDP(lch));
+		p->dma_write(csdp, CSDP, lch);
 	}
 }
 EXPORT_SYMBOL(omap_set_dma_write_mode);
@@ -400,10 +349,10 @@
 	if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
 		u32 l;
 
-		l = dma_read(LCH_CTRL(lch));
+		l = p->dma_read(LCH_CTRL, lch);
 		l &= ~0x7;
 		l |= mode;
-		dma_write(l, LCH_CTRL(lch));
+		p->dma_write(l, LCH_CTRL, lch);
 	}
 }
 EXPORT_SYMBOL(omap_set_dma_channel_mode);
@@ -418,27 +367,21 @@
 	if (cpu_class_is_omap1()) {
 		u16 w;
 
-		w = dma_read(CSDP(lch));
+		w = p->dma_read(CSDP, lch);
 		w &= ~(0x1f << 2);
 		w |= src_port << 2;
-		dma_write(w, CSDP(lch));
+		p->dma_write(w, CSDP, lch);
 	}
 
-	l = dma_read(CCR(lch));
+	l = p->dma_read(CCR, lch);
 	l &= ~(0x03 << 12);
 	l |= src_amode << 12;
-	dma_write(l, CCR(lch));
+	p->dma_write(l, CCR, lch);
 
-	if (cpu_class_is_omap1()) {
-		dma_write(src_start >> 16, CSSA_U(lch));
-		dma_write((u16)src_start, CSSA_L(lch));
-	}
+	p->dma_write(src_start, CSSA, lch);
 
-	if (cpu_class_is_omap2())
-		dma_write(src_start, CSSA(lch));
-
-	dma_write(src_ei, CSEI(lch));
-	dma_write(src_fi, CSFI(lch));
+	p->dma_write(src_ei, CSEI, lch);
+	p->dma_write(src_fi, CSFI, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_src_params);
 
@@ -466,8 +409,8 @@
 	if (cpu_class_is_omap2())
 		return;
 
-	dma_write(eidx, CSEI(lch));
-	dma_write(fidx, CSFI(lch));
+	p->dma_write(eidx, CSEI, lch);
+	p->dma_write(fidx, CSFI, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_src_index);
 
@@ -475,11 +418,11 @@
 {
 	u32 l;
 
-	l = dma_read(CSDP(lch));
+	l = p->dma_read(CSDP, lch);
 	l &= ~(1 << 6);
 	if (enable)
 		l |= (1 << 6);
-	dma_write(l, CSDP(lch));
+	p->dma_write(l, CSDP, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
 
@@ -488,7 +431,7 @@
 	unsigned int burst = 0;
 	u32 l;
 
-	l = dma_read(CSDP(lch));
+	l = p->dma_read(CSDP, lch);
 	l &= ~(0x03 << 7);
 
 	switch (burst_mode) {
@@ -524,7 +467,7 @@
 	}
 
 	l |= (burst << 7);
-	dma_write(l, CSDP(lch));
+	p->dma_write(l, CSDP, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
 
@@ -536,27 +479,21 @@
 	u32 l;
 
 	if (cpu_class_is_omap1()) {
-		l = dma_read(CSDP(lch));
+		l = p->dma_read(CSDP, lch);
 		l &= ~(0x1f << 9);
 		l |= dest_port << 9;
-		dma_write(l, CSDP(lch));
+		p->dma_write(l, CSDP, lch);
 	}
 
-	l = dma_read(CCR(lch));
+	l = p->dma_read(CCR, lch);
 	l &= ~(0x03 << 14);
 	l |= dest_amode << 14;
-	dma_write(l, CCR(lch));
+	p->dma_write(l, CCR, lch);
 
-	if (cpu_class_is_omap1()) {
-		dma_write(dest_start >> 16, CDSA_U(lch));
-		dma_write(dest_start, CDSA_L(lch));
-	}
+	p->dma_write(dest_start, CDSA, lch);
 
-	if (cpu_class_is_omap2())
-		dma_write(dest_start, CDSA(lch));
-
-	dma_write(dst_ei, CDEI(lch));
-	dma_write(dst_fi, CDFI(lch));
+	p->dma_write(dst_ei, CDEI, lch);
+	p->dma_write(dst_fi, CDFI, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_dest_params);
 
@@ -565,8 +502,8 @@
 	if (cpu_class_is_omap2())
 		return;
 
-	dma_write(eidx, CDEI(lch));
-	dma_write(fidx, CDFI(lch));
+	p->dma_write(eidx, CDEI, lch);
+	p->dma_write(fidx, CDFI, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_dest_index);
 
@@ -574,11 +511,11 @@
 {
 	u32 l;
 
-	l = dma_read(CSDP(lch));
+	l = p->dma_read(CSDP, lch);
 	l &= ~(1 << 13);
 	if (enable)
 		l |= 1 << 13;
-	dma_write(l, CSDP(lch));
+	p->dma_write(l, CSDP, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
 
@@ -587,7 +524,7 @@
 	unsigned int burst = 0;
 	u32 l;
 
-	l = dma_read(CSDP(lch));
+	l = p->dma_read(CSDP, lch);
 	l &= ~(0x03 << 14);
 
 	switch (burst_mode) {
@@ -620,7 +557,7 @@
 		return;
 	}
 	l |= (burst << 14);
-	dma_write(l, CSDP(lch));
+	p->dma_write(l, CSDP, lch);
 }
 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
 
@@ -630,18 +567,18 @@
 
 	/* Clear CSR */
 	if (cpu_class_is_omap1())
-		status = dma_read(CSR(lch));
+		status = p->dma_read(CSR, lch);
 	else if (cpu_class_is_omap2())
-		dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
+		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
 
 	/* Enable some nice interrupts. */
-	dma_write(dma_chan[lch].enabled_irqs, CICR(lch));
+	p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
 }
 
 static void omap_disable_channel_irq(int lch)
 {
 	if (cpu_class_is_omap2())
-		dma_write(0, CICR(lch));
+		p->dma_write(0, CICR, lch);
 }
 
 void omap_enable_dma_irq(int lch, u16 bits)
@@ -660,7 +597,7 @@
 {
 	u32 l;
 
-	l = dma_read(CLNK_CTRL(lch));
+	l = p->dma_read(CLNK_CTRL, lch);
 
 	if (cpu_class_is_omap1())
 		l &= ~(1 << 14);
@@ -675,18 +612,18 @@
 			l = dma_chan[lch].next_linked_ch | (1 << 15);
 #endif
 
-	dma_write(l, CLNK_CTRL(lch));
+	p->dma_write(l, CLNK_CTRL, lch);
 }
 
 static inline void disable_lnk(int lch)
 {
 	u32 l;
 
-	l = dma_read(CLNK_CTRL(lch));
+	l = p->dma_read(CLNK_CTRL, lch);
 
 	/* Disable interrupts */
 	if (cpu_class_is_omap1()) {
-		dma_write(0, CICR(lch));
+		p->dma_write(0, CICR, lch);
 		/* Set the STOP_LNK bit */
 		l |= 1 << 14;
 	}
@@ -697,7 +634,7 @@
 		l &= ~(1 << 15);
 	}
 
-	dma_write(l, CLNK_CTRL(lch));
+	p->dma_write(l, CLNK_CTRL, lch);
 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
 }
 
@@ -710,9 +647,9 @@
 		return;
 
 	spin_lock_irqsave(&dma_chan_lock, flags);
-	val = dma_read(IRQENABLE_L0);
+	val = p->dma_read(IRQENABLE_L0, lch);
 	val |= 1 << lch;
-	dma_write(val, IRQENABLE_L0);
+	p->dma_write(val, IRQENABLE_L0, lch);
 	spin_unlock_irqrestore(&dma_chan_lock, flags);
 }
 
@@ -725,9 +662,9 @@
 		return;
 
 	spin_lock_irqsave(&dma_chan_lock, flags);
-	val = dma_read(IRQENABLE_L0);
+	val = p->dma_read(IRQENABLE_L0, lch);
 	val &= ~(1 << lch);
-	dma_write(val, IRQENABLE_L0);
+	p->dma_write(val, IRQENABLE_L0, lch);
 	spin_unlock_irqrestore(&dma_chan_lock, flags);
 }
 
@@ -754,8 +691,8 @@
 	chan = dma_chan + free_ch;
 	chan->dev_id = dev_id;
 
-	if (cpu_class_is_omap1())
-		clear_lch_regs(free_ch);
+	if (p->clear_lch_regs)
+		p->clear_lch_regs(free_ch);
 
 	if (cpu_class_is_omap2())
 		omap_clear_dma(free_ch);
@@ -792,17 +729,17 @@
 		 * Disable the 1510 compatibility mode and set the sync device
 		 * id.
 		 */
-		dma_write(dev_id | (1 << 10), CCR(free_ch));
+		p->dma_write(dev_id | (1 << 10), CCR, free_ch);
 	} else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
-		dma_write(dev_id, CCR(free_ch));
+		p->dma_write(dev_id, CCR, free_ch);
 	}
 
 	if (cpu_class_is_omap2()) {
 		omap2_enable_irq_lch(free_ch);
 		omap_enable_channel_irq(free_ch);
 		/* Clear the CSR register and IRQ status register */
-		dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(free_ch));
-		dma_write(1 << free_ch, IRQSTATUS_L0);
+		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
+		p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
 	}
 
 	*dma_ch_out = free_ch;
@@ -823,23 +760,23 @@
 
 	if (cpu_class_is_omap1()) {
 		/* Disable all DMA interrupts for the channel. */
-		dma_write(0, CICR(lch));
+		p->dma_write(0, CICR, lch);
 		/* Make sure the DMA transfer is stopped. */
-		dma_write(0, CCR(lch));
+		p->dma_write(0, CCR, lch);
 	}
 
 	if (cpu_class_is_omap2()) {
 		omap2_disable_irq_lch(lch);
 
 		/* Clear the CSR register and IRQ status register */
-		dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
-		dma_write(1 << lch, IRQSTATUS_L0);
+		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
+		p->dma_write(1 << lch, IRQSTATUS_L0, lch);
 
 		/* Disable all DMA interrupts for the channel. */
-		dma_write(0, CICR(lch));
+		p->dma_write(0, CICR, lch);
 
 		/* Make sure the DMA transfer is stopped. */
-		dma_write(0, CCR(lch));
+		p->dma_write(0, CCR, lch);
 		omap_clear_dma(lch);
 	}
 
@@ -880,7 +817,7 @@
 	reg |= (0x3 & tparams) << 12;
 	reg |= (arb_rate & 0xff) << 16;
 
-	dma_write(reg, GCR);
+	p->dma_write(reg, GCR, 0);
 }
 EXPORT_SYMBOL(omap_dma_set_global_params);
 
@@ -903,14 +840,14 @@
 		printk(KERN_ERR "Invalid channel id\n");
 		return -EINVAL;
 	}
-	l = dma_read(CCR(lch));
+	l = p->dma_read(CCR, lch);
 	l &= ~((1 << 6) | (1 << 26));
 	if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
 		l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
 	else
 		l |= ((read_prio & 0x1) << 6);
 
-	dma_write(l, CCR(lch));
+	p->dma_write(l, CCR, lch);
 
 	return 0;
 }
@@ -925,25 +862,7 @@
 	unsigned long flags;
 
 	local_irq_save(flags);
-
-	if (cpu_class_is_omap1()) {
-		u32 l;
-
-		l = dma_read(CCR(lch));
-		l &= ~OMAP_DMA_CCR_EN;
-		dma_write(l, CCR(lch));
-
-		/* Clear pending interrupts */
-		l = dma_read(CSR(lch));
-	}
-
-	if (cpu_class_is_omap2()) {
-		int i;
-		void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch);
-		for (i = 0; i < 0x44; i += 4)
-			__raw_writel(0, lch_base + i);
-	}
-
+	p->clear_dma(lch);
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(omap_clear_dma);
@@ -957,13 +876,13 @@
 	 * before starting dma transfer.
 	 */
 	if (cpu_is_omap15xx())
-		dma_write(0, CPC(lch));
+		p->dma_write(0, CPC, lch);
 	else
-		dma_write(0, CDAC(lch));
+		p->dma_write(0, CDAC, lch);
 
 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 		int next_lch, cur_lch;
-		char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
+		char dma_chan_link_map[dma_lch_count];
 
 		dma_chan_link_map[lch] = 1;
 		/* Set the link register of the first channel */
@@ -985,32 +904,18 @@
 
 			cur_lch = next_lch;
 		} while (next_lch != -1);
-	} else if (cpu_is_omap242x() ||
-		(cpu_is_omap243x() &&  omap_type() <= OMAP2430_REV_ES1_0)) {
-
-		/* Errata: Need to write lch even if not using chaining */
-		dma_write(lch, CLNK_CTRL(lch));
-	}
+	} else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
+		p->dma_write(lch, CLNK_CTRL, lch);
 
 	omap_enable_channel_irq(lch);
 
-	l = dma_read(CCR(lch));
+	l = p->dma_read(CCR, lch);
 
-	/*
-	 * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
-	 * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
-	 * bursting is enabled. This might result in data gets stalled in
-	 * FIFO at the end of the block.
-	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
-	 * guarantee no data will stay in the DMA FIFO in case inter frame
-	 * buffering occurs.
-	 */
-	if (cpu_is_omap2420() ||
-	    (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
-		l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
-
+	if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
+			l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
 	l |= OMAP_DMA_CCR_EN;
-	dma_write(l, CCR(lch));
+
+	p->dma_write(l, CCR, lch);
 
 	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
 }
@@ -1022,46 +927,46 @@
 
 	/* Disable all interrupts on the channel */
 	if (cpu_class_is_omap1())
-		dma_write(0, CICR(lch));
+		p->dma_write(0, CICR, lch);
 
-	l = dma_read(CCR(lch));
-	/* OMAP3 Errata i541: sDMA FIFO draining does not finish */
-	if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
+	l = p->dma_read(CCR, lch);
+	if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
+			(l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
 		int i = 0;
 		u32 sys_cf;
 
 		/* Configure No-Standby */
-		l = dma_read(OCP_SYSCONFIG);
+		l = p->dma_read(OCP_SYSCONFIG, lch);
 		sys_cf = l;
 		l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
 		l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
-		dma_write(l , OCP_SYSCONFIG);
+		p->dma_write(l , OCP_SYSCONFIG, 0);
 
-		l = dma_read(CCR(lch));
+		l = p->dma_read(CCR, lch);
 		l &= ~OMAP_DMA_CCR_EN;
-		dma_write(l, CCR(lch));
+		p->dma_write(l, CCR, lch);
 
 		/* Wait for sDMA FIFO drain */
-		l = dma_read(CCR(lch));
+		l = p->dma_read(CCR, lch);
 		while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
 					OMAP_DMA_CCR_WR_ACTIVE))) {
 			udelay(5);
 			i++;
-			l = dma_read(CCR(lch));
+			l = p->dma_read(CCR, lch);
 		}
 		if (i >= 100)
 			printk(KERN_ERR "DMA drain did not complete on "
 					"lch %d\n", lch);
 		/* Restore OCP_SYSCONFIG */
-		dma_write(sys_cf, OCP_SYSCONFIG);
+		p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
 	} else {
 		l &= ~OMAP_DMA_CCR_EN;
-		dma_write(l, CCR(lch));
+		p->dma_write(l, CCR, lch);
 	}
 
 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
 		int next_lch, cur_lch = lch;
-		char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
+		char dma_chan_link_map[dma_lch_count];
 
 		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
 		do {
@@ -1122,19 +1027,15 @@
 	dma_addr_t offset = 0;
 
 	if (cpu_is_omap15xx())
-		offset = dma_read(CPC(lch));
+		offset = p->dma_read(CPC, lch);
 	else
-		offset = dma_read(CSAC(lch));
+		offset = p->dma_read(CSAC, lch);
 
-	/*
-	 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
-	 * read before the DMA controller finished disabling the channel.
-	 */
-	if (!cpu_is_omap15xx() && offset == 0)
-		offset = dma_read(CSAC(lch));
+	if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
+		offset = p->dma_read(CSAC, lch);
 
 	if (cpu_class_is_omap1())
-		offset |= (dma_read(CSSA_U(lch)) << 16);
+		offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
 
 	return offset;
 }
@@ -1153,19 +1054,19 @@
 	dma_addr_t offset = 0;
 
 	if (cpu_is_omap15xx())
-		offset = dma_read(CPC(lch));
+		offset = p->dma_read(CPC, lch);
 	else
-		offset = dma_read(CDAC(lch));
+		offset = p->dma_read(CDAC, lch);
 
 	/*
 	 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
 	 * read before the DMA controller finished disabling the channel.
 	 */
 	if (!cpu_is_omap15xx() && offset == 0)
-		offset = dma_read(CDAC(lch));
+		offset = p->dma_read(CDAC, lch);
 
 	if (cpu_class_is_omap1())
-		offset |= (dma_read(CDSA_U(lch)) << 16);
+		offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
 
 	return offset;
 }
@@ -1173,7 +1074,7 @@
 
 int omap_get_dma_active_status(int lch)
 {
-	return (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN) != 0;
+	return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
 }
 EXPORT_SYMBOL(omap_get_dma_active_status);
 
@@ -1186,7 +1087,7 @@
 			return 1;
 
 	for (lch = 0; lch < dma_chan_count; lch++)
-		if (dma_read(CCR(lch)) & OMAP_DMA_CCR_EN)
+		if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
 			return 1;
 
 	return 0;
@@ -1201,8 +1102,8 @@
 {
 	if (omap_dma_in_1510_mode()) {
 		if (lch_head == lch_queue) {
-			dma_write(dma_read(CCR(lch_head)) | (3 << 8),
-								CCR(lch_head));
+			p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
+								CCR, lch_head);
 			return;
 		}
 		printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
@@ -1228,8 +1129,8 @@
 {
 	if (omap_dma_in_1510_mode()) {
 		if (lch_head == lch_queue) {
-			dma_write(dma_read(CCR(lch_head)) & ~(3 << 8),
-								CCR(lch_head));
+			p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
+								CCR, lch_head);
 			return;
 		}
 		printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
@@ -1255,8 +1156,6 @@
 }
 EXPORT_SYMBOL(omap_dma_unlink_lch);
 
-/*----------------------------------------------------------------------------*/
-
 #ifndef CONFIG_ARCH_OMAP1
 /* Create chain of DMA channesls */
 static void create_dma_lch_chain(int lch_head, int lch_queue)
@@ -1281,15 +1180,15 @@
 					lch_queue;
 	}
 
-	l = dma_read(CLNK_CTRL(lch_head));
+	l = p->dma_read(CLNK_CTRL, lch_head);
 	l &= ~(0x1f);
 	l |= lch_queue;
-	dma_write(l, CLNK_CTRL(lch_head));
+	p->dma_write(l, CLNK_CTRL, lch_head);
 
-	l = dma_read(CLNK_CTRL(lch_queue));
+	l = p->dma_read(CLNK_CTRL, lch_queue);
 	l &= ~(0x1f);
 	l |= (dma_chan[lch_queue].next_linked_ch);
-	dma_write(l, CLNK_CTRL(lch_queue));
+	p->dma_write(l, CLNK_CTRL, lch_queue);
 }
 
 /**
@@ -1565,13 +1464,13 @@
 
 	/* Set the params to the free channel */
 	if (src_start != 0)
-		dma_write(src_start, CSSA(lch));
+		p->dma_write(src_start, CSSA, lch);
 	if (dest_start != 0)
-		dma_write(dest_start, CDSA(lch));
+		p->dma_write(dest_start, CDSA, lch);
 
 	/* Write the buffer size */
-	dma_write(elem_count, CEN(lch));
-	dma_write(frame_count, CFN(lch));
+	p->dma_write(elem_count, CEN, lch);
+	p->dma_write(frame_count, CFN, lch);
 
 	/*
 	 * If the chain is dynamically linked,
@@ -1604,8 +1503,8 @@
 				enable_lnk(dma_chan[lch].prev_linked_ch);
 				dma_chan[lch].state = DMA_CH_QUEUED;
 				start_dma = 0;
-				if (0 == ((1 << 7) & dma_read(
-					CCR(dma_chan[lch].prev_linked_ch)))) {
+				if (0 == ((1 << 7) & p->dma_read(
+					CCR, dma_chan[lch].prev_linked_ch))) {
 					disable_lnk(dma_chan[lch].
 						    prev_linked_ch);
 					pr_debug("\n prev ch is stopped\n");
@@ -1621,7 +1520,7 @@
 			}
 			omap_enable_channel_irq(lch);
 
-			l = dma_read(CCR(lch));
+			l = p->dma_read(CCR, lch);
 
 			if ((0 == (l & (1 << 24))))
 				l &= ~(1 << 25);
@@ -1632,12 +1531,12 @@
 					l |= (1 << 7);
 					dma_chan[lch].state = DMA_CH_STARTED;
 					pr_debug("starting %d\n", lch);
-					dma_write(l, CCR(lch));
+					p->dma_write(l, CCR, lch);
 				} else
 					start_dma = 0;
 			} else {
 				if (0 == (l & (1 << 7)))
-					dma_write(l, CCR(lch));
+					p->dma_write(l, CCR, lch);
 			}
 			dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
 		}
@@ -1682,7 +1581,7 @@
 		omap_enable_channel_irq(channels[0]);
 	}
 
-	l = dma_read(CCR(channels[0]));
+	l = p->dma_read(CCR, channels[0]);
 	l |= (1 << 7);
 	dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
 	dma_chan[channels[0]].state = DMA_CH_STARTED;
@@ -1691,7 +1590,7 @@
 		l &= ~(1 << 25);
 	else
 		l |= (1 << 25);
-	dma_write(l, CCR(channels[0]));
+	p->dma_write(l, CCR, channels[0]);
 
 	dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
 
@@ -1711,7 +1610,7 @@
 {
 	int *channels;
 	u32 l, i;
-	u32 sys_cf;
+	u32 sys_cf = 0;
 
 	/* Check for input params */
 	if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
@@ -1726,22 +1625,20 @@
 	}
 	channels = dma_linked_lch[chain_id].linked_dmach_q;
 
-	/*
-	 * DMA Errata:
-	 * Special programming model needed to disable DMA before end of block
-	 */
-	sys_cf = dma_read(OCP_SYSCONFIG);
-	l = sys_cf;
-	/* Middle mode reg set no Standby */
-	l &= ~((1 << 12)|(1 << 13));
-	dma_write(l, OCP_SYSCONFIG);
+	if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
+		sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
+		l = sys_cf;
+		/* Middle mode reg set no Standby */
+		l &= ~((1 << 12)|(1 << 13));
+		p->dma_write(l, OCP_SYSCONFIG, 0);
+	}
 
 	for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
 
 		/* Stop the Channel transmission */
-		l = dma_read(CCR(channels[i]));
+		l = p->dma_read(CCR, channels[i]);
 		l &= ~(1 << 7);
-		dma_write(l, CCR(channels[i]));
+		p->dma_write(l, CCR, channels[i]);
 
 		/* Disable the link in all the channels */
 		disable_lnk(channels[i]);
@@ -1753,8 +1650,8 @@
 	/* Reset the Queue pointers */
 	OMAP_DMA_CHAIN_QINIT(chain_id);
 
-	/* Errata - put in the old value */
-	dma_write(sys_cf, OCP_SYSCONFIG);
+	if (IS_DMA_ERRATA(DMA_ERRATA_i88))
+		p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
 
 	return 0;
 }
@@ -1796,8 +1693,8 @@
 	/* Get the current channel */
 	lch = channels[dma_linked_lch[chain_id].q_head];
 
-	*ei = dma_read(CCEN(lch));
-	*fi = dma_read(CCFN(lch));
+	*ei = p->dma_read(CCEN, lch);
+	*fi = p->dma_read(CCFN, lch);
 
 	return 0;
 }
@@ -1834,7 +1731,7 @@
 	/* Get the current channel */
 	lch = channels[dma_linked_lch[chain_id].q_head];
 
-	return dma_read(CDAC(lch));
+	return p->dma_read(CDAC, lch);
 }
 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
 
@@ -1868,7 +1765,7 @@
 	/* Get the current channel */
 	lch = channels[dma_linked_lch[chain_id].q_head];
 
-	return dma_read(CSAC(lch));
+	return p->dma_read(CSAC, lch);
 }
 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
 #endif	/* ifndef CONFIG_ARCH_OMAP1 */
@@ -1885,7 +1782,7 @@
 		csr = dma_chan[ch].saved_csr;
 		dma_chan[ch].saved_csr = 0;
 	} else
-		csr = dma_read(CSR(ch));
+		csr = p->dma_read(CSR, ch);
 	if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
 		dma_chan[ch + 6].saved_csr = csr >> 7;
 		csr &= 0x7f;
@@ -1938,13 +1835,13 @@
 
 static int omap2_dma_handle_ch(int ch)
 {
-	u32 status = dma_read(CSR(ch));
+	u32 status = p->dma_read(CSR, ch);
 
 	if (!status) {
 		if (printk_ratelimit())
 			printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
 				ch);
-		dma_write(1 << ch, IRQSTATUS_L0);
+		p->dma_write(1 << ch, IRQSTATUS_L0, ch);
 		return 0;
 	}
 	if (unlikely(dma_chan[ch].dev_id == -1)) {
@@ -1960,17 +1857,12 @@
 	if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
 		printk(KERN_INFO "DMA transaction error with device %d\n",
 		       dma_chan[ch].dev_id);
-		if (cpu_class_is_omap2()) {
-			/*
-			 * Errata: sDMA Channel is not disabled
-			 * after a transaction error. So we explicitely
-			 * disable the channel
-			 */
+		if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
 			u32 ccr;
 
-			ccr = dma_read(CCR(ch));
+			ccr = p->dma_read(CCR, ch);
 			ccr &= ~OMAP_DMA_CCR_EN;
-			dma_write(ccr, CCR(ch));
+			p->dma_write(ccr, CCR, ch);
 			dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
 		}
 	}
@@ -1981,16 +1873,16 @@
 		printk(KERN_INFO "DMA misaligned error with device %d\n",
 		       dma_chan[ch].dev_id);
 
-	dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
-	dma_write(1 << ch, IRQSTATUS_L0);
+	p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
+	p->dma_write(1 << ch, IRQSTATUS_L0, ch);
 	/* read back the register to flush the write */
-	dma_read(IRQSTATUS_L0);
+	p->dma_read(IRQSTATUS_L0, ch);
 
 	/* If the ch is not chained then chain_id will be -1 */
 	if (dma_chan[ch].chain_id != -1) {
 		int chain_id = dma_chan[ch].chain_id;
 		dma_chan[ch].state = DMA_CH_NOTSTARTED;
-		if (dma_read(CLNK_CTRL(ch)) & (1 << 15))
+		if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
 			dma_chan[dma_chan[ch].next_linked_ch].state =
 							DMA_CH_STARTED;
 		if (dma_linked_lch[chain_id].chain_mode ==
@@ -2000,10 +1892,10 @@
 		if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
 			OMAP_DMA_CHAIN_INCQHEAD(chain_id);
 
-		status = dma_read(CSR(ch));
+		status = p->dma_read(CSR, ch);
 	}
 
-	dma_write(status, CSR(ch));
+	p->dma_write(status, CSR, ch);
 
 	if (likely(dma_chan[ch].callback != NULL))
 		dma_chan[ch].callback(ch, status, dma_chan[ch].data);
@@ -2017,13 +1909,13 @@
 	u32 val, enable_reg;
 	int i;
 
-	val = dma_read(IRQSTATUS_L0);
+	val = p->dma_read(IRQSTATUS_L0, 0);
 	if (val == 0) {
 		if (printk_ratelimit())
 			printk(KERN_WARNING "Spurious DMA IRQ\n");
 		return IRQ_HANDLED;
 	}
-	enable_reg = dma_read(IRQENABLE_L0);
+	enable_reg = p->dma_read(IRQENABLE_L0, 0);
 	val &= enable_reg; /* Dispatch only relevant interrupts */
 	for (i = 0; i < dma_lch_count && val != 0; i++) {
 		if (val & 1)
@@ -2049,119 +1941,66 @@
 void omap_dma_global_context_save(void)
 {
 	omap_dma_global_context.dma_irqenable_l0 =
-		dma_read(IRQENABLE_L0);
+		p->dma_read(IRQENABLE_L0, 0);
 	omap_dma_global_context.dma_ocp_sysconfig =
-		dma_read(OCP_SYSCONFIG);
-	omap_dma_global_context.dma_gcr = dma_read(GCR);
+		p->dma_read(OCP_SYSCONFIG, 0);
+	omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
 }
 
 void omap_dma_global_context_restore(void)
 {
 	int ch;
 
-	dma_write(omap_dma_global_context.dma_gcr, GCR);
-	dma_write(omap_dma_global_context.dma_ocp_sysconfig,
-		OCP_SYSCONFIG);
-	dma_write(omap_dma_global_context.dma_irqenable_l0,
-		IRQENABLE_L0);
+	p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
+	p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
+		OCP_SYSCONFIG, 0);
+	p->dma_write(omap_dma_global_context.dma_irqenable_l0,
+		IRQENABLE_L0, 0);
 
-	/*
-	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
-	 * after secure sram context save and restore. Hence we need to
-	 * manually clear those IRQs to avoid spurious interrupts. This
-	 * affects only secure devices.
-	 */
-	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
-		dma_write(0x3 , IRQSTATUS_L0);
+	if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
+		p->dma_write(0x3 , IRQSTATUS_L0, 0);
 
 	for (ch = 0; ch < dma_chan_count; ch++)
 		if (dma_chan[ch].dev_id != -1)
 			omap_clear_dma(ch);
 }
 
-/*----------------------------------------------------------------------------*/
-
-static int __init omap_init_dma(void)
+static int __devinit omap_system_dma_probe(struct platform_device *pdev)
 {
-	unsigned long base;
-	int ch, r;
+	int ch, ret = 0;
+	int dma_irq;
+	char irq_name[4];
+	int irq_rel;
 
-	if (cpu_class_is_omap1()) {
-		base = OMAP1_DMA_BASE;
-		dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
-	} else if (cpu_is_omap24xx()) {
-		base = OMAP24XX_DMA4_BASE;
-		dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
-	} else if (cpu_is_omap34xx()) {
-		base = OMAP34XX_DMA4_BASE;
-		dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
-	} else if (cpu_is_omap44xx()) {
-		base = OMAP44XX_DMA4_BASE;
-		dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
-	} else {
-		pr_err("DMA init failed for unsupported omap\n");
-		return -ENODEV;
+	p = pdev->dev.platform_data;
+	if (!p) {
+		dev_err(&pdev->dev, "%s: System DMA initialized without"
+			"platform data\n", __func__);
+		return -EINVAL;
 	}
 
-	omap_dma_base = ioremap(base, SZ_4K);
-	BUG_ON(!omap_dma_base);
+	d			= p->dma_attr;
+	errata			= p->errata;
 
-	if (cpu_class_is_omap2() && omap_dma_reserve_channels
+	if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
 			&& (omap_dma_reserve_channels <= dma_lch_count))
-		dma_lch_count = omap_dma_reserve_channels;
+		d->lch_count	= omap_dma_reserve_channels;
 
-	dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
-				GFP_KERNEL);
-	if (!dma_chan) {
-		r = -ENOMEM;
-		goto out_unmap;
-	}
+	dma_lch_count		= d->lch_count;
+	dma_chan_count		= dma_lch_count;
+	dma_chan		= d->chan;
+	enable_1510_mode	= d->dev_caps & ENABLE_1510_MODE;
 
 	if (cpu_class_is_omap2()) {
 		dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
 						dma_lch_count, GFP_KERNEL);
 		if (!dma_linked_lch) {
-			r = -ENOMEM;
-			goto out_free;
+			ret = -ENOMEM;
+			goto exit_dma_lch_fail;
 		}
 	}
 
-	if (cpu_is_omap15xx()) {
-		printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
-		dma_chan_count = 9;
-		enable_1510_mode = 1;
-	} else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
-		printk(KERN_INFO "OMAP DMA hardware version %d\n",
-		       dma_read(HW_ID));
-		printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
-		       (dma_read(CAPS_0_U) << 16) |
-		       dma_read(CAPS_0_L),
-		       (dma_read(CAPS_1_U) << 16) |
-		       dma_read(CAPS_1_L),
-		       dma_read(CAPS_2), dma_read(CAPS_3),
-		       dma_read(CAPS_4));
-		if (!enable_1510_mode) {
-			u16 w;
-
-			/* Disable OMAP 3.0/3.1 compatibility mode. */
-			w = dma_read(GSCR);
-			w |= 1 << 3;
-			dma_write(w, GSCR);
-			dma_chan_count = 16;
-		} else
-			dma_chan_count = 9;
-	} else if (cpu_class_is_omap2()) {
-		u8 revision = dma_read(REVISION) & 0xff;
-		printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
-		       revision >> 4, revision & 0xf);
-		dma_chan_count = dma_lch_count;
-	} else {
-		dma_chan_count = 0;
-		return 0;
-	}
-
 	spin_lock_init(&dma_chan_lock);
-
 	for (ch = 0; ch < dma_chan_count; ch++) {
 		omap_clear_dma(ch);
 		if (cpu_class_is_omap2())
@@ -2178,20 +2017,23 @@
 			 * request_irq() doesn't like dev_id (ie. ch) being
 			 * zero, so we have to kludge around this.
 			 */
-			r = request_irq(omap1_dma_irq[ch],
+			sprintf(&irq_name[0], "%d", ch);
+			dma_irq = platform_get_irq_byname(pdev, irq_name);
+
+			if (dma_irq < 0) {
+				ret = dma_irq;
+				goto exit_dma_irq_fail;
+			}
+
+			/* INT_DMA_LCD is handled in lcd_dma.c */
+			if (dma_irq == INT_DMA_LCD)
+				continue;
+
+			ret = request_irq(dma_irq,
 					omap1_dma_irq_handler, 0, "DMA",
 					(void *) (ch + 1));
-			if (r != 0) {
-				int i;
-
-				printk(KERN_ERR "unable to request IRQ %d "
-				       "for DMA (error %d)\n",
-				       omap1_dma_irq[ch], r);
-				for (i = 0; i < ch; i++)
-					free_irq(omap1_dma_irq[i],
-						 (void *) (i + 1));
-				goto out_free;
-			}
+			if (ret != 0)
+				goto exit_dma_irq_fail;
 		}
 	}
 
@@ -2200,46 +2042,91 @@
 				DMA_DEFAULT_FIFO_DEPTH, 0);
 
 	if (cpu_class_is_omap2()) {
-		int irq;
-		if (cpu_is_omap44xx())
-			irq = OMAP44XX_IRQ_SDMA_0;
-		else
-			irq = INT_24XX_SDMA_IRQ0;
-		setup_irq(irq, &omap24xx_dma_irq);
-	}
-
-	if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
-		/* Enable smartidle idlemodes and autoidle */
-		u32 v = dma_read(OCP_SYSCONFIG);
-		v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
-				DMA_SYSCONFIG_SIDLEMODE_MASK |
-				DMA_SYSCONFIG_AUTOIDLE);
-		v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
-			DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
-			DMA_SYSCONFIG_AUTOIDLE);
-		dma_write(v , OCP_SYSCONFIG);
-		/* reserve dma channels 0 and 1 in high security devices */
-		if (cpu_is_omap34xx() &&
-			(omap_type() != OMAP2_DEVICE_TYPE_GP)) {
-			printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
-					"HS ROM code\n");
-			dma_chan[0].dev_id = 0;
-			dma_chan[1].dev_id = 1;
+		strcpy(irq_name, "0");
+		dma_irq = platform_get_irq_byname(pdev, irq_name);
+		if (dma_irq < 0) {
+			dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
+			goto exit_dma_lch_fail;
+		}
+		ret = setup_irq(dma_irq, &omap24xx_dma_irq);
+		if (ret) {
+			dev_err(&pdev->dev, "set_up failed for IRQ %d"
+				"for DMA (error %d)\n", dma_irq, ret);
+			goto exit_dma_lch_fail;
 		}
 	}
 
+	/* reserve dma channels 0 and 1 in high security devices */
+	if (cpu_is_omap34xx() &&
+		(omap_type() != OMAP2_DEVICE_TYPE_GP)) {
+		printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
+				"HS ROM code\n");
+		dma_chan[0].dev_id = 0;
+		dma_chan[1].dev_id = 1;
+	}
+	p->show_dma_caps();
 	return 0;
 
-out_free:
+exit_dma_irq_fail:
+	dev_err(&pdev->dev, "unable to request IRQ %d"
+			"for DMA (error %d)\n", dma_irq, ret);
+	for (irq_rel = 0; irq_rel < ch;	irq_rel++) {
+		dma_irq = platform_get_irq(pdev, irq_rel);
+		free_irq(dma_irq, (void *)(irq_rel + 1));
+	}
+
+exit_dma_lch_fail:
+	kfree(p);
+	kfree(d);
 	kfree(dma_chan);
-
-out_unmap:
-	iounmap(omap_dma_base);
-
-	return r;
+	return ret;
 }
 
-arch_initcall(omap_init_dma);
+static int __devexit omap_system_dma_remove(struct platform_device *pdev)
+{
+	int dma_irq;
+
+	if (cpu_class_is_omap2()) {
+		char irq_name[4];
+		strcpy(irq_name, "0");
+		dma_irq = platform_get_irq_byname(pdev, irq_name);
+		remove_irq(dma_irq, &omap24xx_dma_irq);
+	} else {
+		int irq_rel = 0;
+		for ( ; irq_rel < dma_chan_count; irq_rel++) {
+			dma_irq = platform_get_irq(pdev, irq_rel);
+			free_irq(dma_irq, (void *)(irq_rel + 1));
+		}
+	}
+	kfree(p);
+	kfree(d);
+	kfree(dma_chan);
+	return 0;
+}
+
+static struct platform_driver omap_system_dma_driver = {
+	.probe		= omap_system_dma_probe,
+	.remove		= omap_system_dma_remove,
+	.driver		= {
+		.name	= "omap_dma_system"
+	},
+};
+
+static int __init omap_system_dma_init(void)
+{
+	return platform_driver_register(&omap_system_dma_driver);
+}
+arch_initcall(omap_system_dma_init);
+
+static void __exit omap_system_dma_exit(void)
+{
+	platform_driver_unregister(&omap_system_dma_driver);
+}
+
+MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
 
 /*
  * Reserve the omap SDMA channels using cmdline bootarg
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index c05c653..ccf2660 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -21,18 +21,18 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
 #include <mach/irqs.h>
 #include <mach/gpio.h>
 #include <asm/mach/irq.h>
-#include <plat/powerdomain.h>
 
 /*
  * OMAP1510 GPIO registers
  */
-#define OMAP1510_GPIO_BASE		0xfffce000
 #define OMAP1510_GPIO_DATA_INPUT	0x00
 #define OMAP1510_GPIO_DATA_OUTPUT	0x04
 #define OMAP1510_GPIO_DIR_CONTROL	0x08
@@ -46,10 +46,6 @@
 /*
  * OMAP1610 specific GPIO registers
  */
-#define OMAP1610_GPIO1_BASE		0xfffbe400
-#define OMAP1610_GPIO2_BASE		0xfffbec00
-#define OMAP1610_GPIO3_BASE		0xfffbb400
-#define OMAP1610_GPIO4_BASE		0xfffbbc00
 #define OMAP1610_GPIO_REVISION		0x0000
 #define OMAP1610_GPIO_SYSCONFIG		0x0010
 #define OMAP1610_GPIO_SYSSTATUS		0x0014
@@ -71,12 +67,6 @@
 /*
  * OMAP7XX specific GPIO registers
  */
-#define OMAP7XX_GPIO1_BASE		0xfffbc000
-#define OMAP7XX_GPIO2_BASE		0xfffbc800
-#define OMAP7XX_GPIO3_BASE		0xfffbd000
-#define OMAP7XX_GPIO4_BASE		0xfffbd800
-#define OMAP7XX_GPIO5_BASE		0xfffbe000
-#define OMAP7XX_GPIO6_BASE		0xfffbe800
 #define OMAP7XX_GPIO_DATA_INPUT		0x00
 #define OMAP7XX_GPIO_DATA_OUTPUT	0x04
 #define OMAP7XX_GPIO_DIR_CONTROL	0x08
@@ -84,25 +74,10 @@
 #define OMAP7XX_GPIO_INT_MASK		0x10
 #define OMAP7XX_GPIO_INT_STATUS		0x14
 
-#define OMAP1_MPUIO_VBASE		OMAP1_MPUIO_BASE
-
 /*
- * omap24xx specific GPIO registers
+ * omap2+ specific GPIO registers
  */
-#define OMAP242X_GPIO1_BASE		0x48018000
-#define OMAP242X_GPIO2_BASE		0x4801a000
-#define OMAP242X_GPIO3_BASE		0x4801c000
-#define OMAP242X_GPIO4_BASE		0x4801e000
-
-#define OMAP243X_GPIO1_BASE		0x4900C000
-#define OMAP243X_GPIO2_BASE		0x4900E000
-#define OMAP243X_GPIO3_BASE		0x49010000
-#define OMAP243X_GPIO4_BASE		0x49012000
-#define OMAP243X_GPIO5_BASE		0x480B6000
-
 #define OMAP24XX_GPIO_REVISION		0x0000
-#define OMAP24XX_GPIO_SYSCONFIG		0x0010
-#define OMAP24XX_GPIO_SYSSTATUS		0x0014
 #define OMAP24XX_GPIO_IRQSTATUS1	0x0018
 #define OMAP24XX_GPIO_IRQSTATUS2	0x0028
 #define OMAP24XX_GPIO_IRQENABLE2	0x002c
@@ -126,7 +101,6 @@
 #define OMAP24XX_GPIO_SETDATAOUT	0x0094
 
 #define OMAP4_GPIO_REVISION		0x0000
-#define OMAP4_GPIO_SYSCONFIG		0x0010
 #define OMAP4_GPIO_EOI			0x0020
 #define OMAP4_GPIO_IRQSTATUSRAW0	0x0024
 #define OMAP4_GPIO_IRQSTATUSRAW1	0x0028
@@ -138,7 +112,6 @@
 #define OMAP4_GPIO_IRQSTATUSCLR1	0x0040
 #define OMAP4_GPIO_IRQWAKEN0		0x0044
 #define OMAP4_GPIO_IRQWAKEN1		0x0048
-#define OMAP4_GPIO_SYSSTATUS		0x0114
 #define OMAP4_GPIO_IRQENABLE1		0x011c
 #define OMAP4_GPIO_WAKE_EN		0x0120
 #define OMAP4_GPIO_IRQSTATUS2		0x0128
@@ -159,26 +132,6 @@
 #define OMAP4_GPIO_SETWKUENA		0x0184
 #define OMAP4_GPIO_CLEARDATAOUT		0x0190
 #define OMAP4_GPIO_SETDATAOUT		0x0194
-/*
- * omap34xx specific GPIO registers
- */
-
-#define OMAP34XX_GPIO1_BASE		0x48310000
-#define OMAP34XX_GPIO2_BASE		0x49050000
-#define OMAP34XX_GPIO3_BASE		0x49052000
-#define OMAP34XX_GPIO4_BASE		0x49054000
-#define OMAP34XX_GPIO5_BASE		0x49056000
-#define OMAP34XX_GPIO6_BASE		0x49058000
-
-/*
- * OMAP44XX  specific GPIO registers
- */
-#define OMAP44XX_GPIO1_BASE             0x4a310000
-#define OMAP44XX_GPIO2_BASE             0x48055000
-#define OMAP44XX_GPIO3_BASE             0x48057000
-#define OMAP44XX_GPIO4_BASE             0x48059000
-#define OMAP44XX_GPIO5_BASE             0x4805B000
-#define OMAP44XX_GPIO6_BASE             0x4805D000
 
 struct gpio_bank {
 	unsigned long pbase;
@@ -190,14 +143,12 @@
 	u32 suspend_wakeup;
 	u32 saved_wakeup;
 #endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
 	u32 non_wakeup_gpios;
 	u32 enabled_non_wakeup_gpios;
 
 	u32 saved_datain;
 	u32 saved_fallingdetect;
 	u32 saved_risingdetect;
-#endif
 	u32 level_mask;
 	u32 toggle_mask;
 	spinlock_t lock;
@@ -205,104 +156,13 @@
 	struct clk *dbck;
 	u32 mod_usage;
 	u32 dbck_enable_mask;
+	struct device *dev;
+	bool dbck_flag;
+	int stride;
 };
 
-#define METHOD_MPUIO		0
-#define METHOD_GPIO_1510	1
-#define METHOD_GPIO_1610	2
-#define METHOD_GPIO_7XX		3
-#define METHOD_GPIO_24XX	5
-#define METHOD_GPIO_44XX	6
-
-#ifdef CONFIG_ARCH_OMAP16XX
-static struct gpio_bank gpio_bank_1610[5] = {
-	{ OMAP1_MPUIO_VBASE, NULL, INT_MPUIO, IH_MPUIO_BASE,
-		METHOD_MPUIO },
-	{ OMAP1610_GPIO1_BASE, NULL, INT_GPIO_BANK1, IH_GPIO_BASE,
-		METHOD_GPIO_1610 },
-	{ OMAP1610_GPIO2_BASE, NULL, INT_1610_GPIO_BANK2, IH_GPIO_BASE + 16,
-		METHOD_GPIO_1610 },
-	{ OMAP1610_GPIO3_BASE, NULL, INT_1610_GPIO_BANK3, IH_GPIO_BASE + 32,
-		METHOD_GPIO_1610 },
-	{ OMAP1610_GPIO4_BASE, NULL, INT_1610_GPIO_BANK4, IH_GPIO_BASE + 48,
-		METHOD_GPIO_1610 },
-};
-#endif
-
-#ifdef CONFIG_ARCH_OMAP15XX
-static struct gpio_bank gpio_bank_1510[2] = {
-	{ OMAP1_MPUIO_VBASE, NULL, INT_MPUIO, IH_MPUIO_BASE,
-		METHOD_MPUIO },
-	{ OMAP1510_GPIO_BASE, NULL, INT_GPIO_BANK1, IH_GPIO_BASE,
-		METHOD_GPIO_1510 }
-};
-#endif
-
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
-static struct gpio_bank gpio_bank_7xx[7] = {
-	{ OMAP1_MPUIO_VBASE, NULL, INT_7XX_MPUIO, IH_MPUIO_BASE,
-		METHOD_MPUIO },
-	{ OMAP7XX_GPIO1_BASE, NULL, INT_7XX_GPIO_BANK1, IH_GPIO_BASE,
-		METHOD_GPIO_7XX },
-	{ OMAP7XX_GPIO2_BASE, NULL, INT_7XX_GPIO_BANK2, IH_GPIO_BASE + 32,
-		METHOD_GPIO_7XX },
-	{ OMAP7XX_GPIO3_BASE, NULL, INT_7XX_GPIO_BANK3, IH_GPIO_BASE + 64,
-		METHOD_GPIO_7XX },
-	{ OMAP7XX_GPIO4_BASE, NULL, INT_7XX_GPIO_BANK4,  IH_GPIO_BASE + 96,
-		METHOD_GPIO_7XX },
-	{ OMAP7XX_GPIO5_BASE, NULL, INT_7XX_GPIO_BANK5,  IH_GPIO_BASE + 128,
-		METHOD_GPIO_7XX },
-	{ OMAP7XX_GPIO6_BASE, NULL, INT_7XX_GPIO_BANK6,  IH_GPIO_BASE + 160,
-		METHOD_GPIO_7XX },
-};
-#endif
-
-#ifdef CONFIG_ARCH_OMAP2
-
-static struct gpio_bank gpio_bank_242x[4] = {
-	{ OMAP242X_GPIO1_BASE, NULL, INT_24XX_GPIO_BANK1, IH_GPIO_BASE,
-		METHOD_GPIO_24XX },
-	{ OMAP242X_GPIO2_BASE, NULL, INT_24XX_GPIO_BANK2, IH_GPIO_BASE + 32,
-		METHOD_GPIO_24XX },
-	{ OMAP242X_GPIO3_BASE, NULL, INT_24XX_GPIO_BANK3, IH_GPIO_BASE + 64,
-		METHOD_GPIO_24XX },
-	{ OMAP242X_GPIO4_BASE, NULL, INT_24XX_GPIO_BANK4, IH_GPIO_BASE + 96,
-		METHOD_GPIO_24XX },
-};
-
-static struct gpio_bank gpio_bank_243x[5] = {
-	{ OMAP243X_GPIO1_BASE, NULL, INT_24XX_GPIO_BANK1, IH_GPIO_BASE,
-		METHOD_GPIO_24XX },
-	{ OMAP243X_GPIO2_BASE, NULL, INT_24XX_GPIO_BANK2, IH_GPIO_BASE + 32,
-		METHOD_GPIO_24XX },
-	{ OMAP243X_GPIO3_BASE, NULL, INT_24XX_GPIO_BANK3, IH_GPIO_BASE + 64,
-		METHOD_GPIO_24XX },
-	{ OMAP243X_GPIO4_BASE, NULL, INT_24XX_GPIO_BANK4, IH_GPIO_BASE + 96,
-		METHOD_GPIO_24XX },
-	{ OMAP243X_GPIO5_BASE, NULL, INT_24XX_GPIO_BANK5, IH_GPIO_BASE + 128,
-		METHOD_GPIO_24XX },
-};
-
-#endif
-
 #ifdef CONFIG_ARCH_OMAP3
-static struct gpio_bank gpio_bank_34xx[6] = {
-	{ OMAP34XX_GPIO1_BASE, NULL, INT_34XX_GPIO_BANK1, IH_GPIO_BASE,
-		METHOD_GPIO_24XX },
-	{ OMAP34XX_GPIO2_BASE, NULL, INT_34XX_GPIO_BANK2, IH_GPIO_BASE + 32,
-		METHOD_GPIO_24XX },
-	{ OMAP34XX_GPIO3_BASE, NULL, INT_34XX_GPIO_BANK3, IH_GPIO_BASE + 64,
-		METHOD_GPIO_24XX },
-	{ OMAP34XX_GPIO4_BASE, NULL, INT_34XX_GPIO_BANK4, IH_GPIO_BASE + 96,
-		METHOD_GPIO_24XX },
-	{ OMAP34XX_GPIO5_BASE, NULL, INT_34XX_GPIO_BANK5, IH_GPIO_BASE + 128,
-		METHOD_GPIO_24XX },
-	{ OMAP34XX_GPIO6_BASE, NULL, INT_34XX_GPIO_BANK6, IH_GPIO_BASE + 160,
-		METHOD_GPIO_24XX },
-};
-
 struct omap3_gpio_regs {
-	u32 sysconfig;
 	u32 irqenable1;
 	u32 irqenable2;
 	u32 wake_en;
@@ -318,26 +178,16 @@
 static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
 #endif
 
-#ifdef CONFIG_ARCH_OMAP4
-static struct gpio_bank gpio_bank_44xx[6] = {
-	{ OMAP44XX_GPIO1_BASE, NULL, OMAP44XX_IRQ_GPIO1, IH_GPIO_BASE,
-		METHOD_GPIO_44XX },
-	{ OMAP44XX_GPIO2_BASE, NULL, OMAP44XX_IRQ_GPIO2, IH_GPIO_BASE + 32,
-		METHOD_GPIO_44XX },
-	{ OMAP44XX_GPIO3_BASE, NULL, OMAP44XX_IRQ_GPIO3, IH_GPIO_BASE + 64,
-		METHOD_GPIO_44XX },
-	{ OMAP44XX_GPIO4_BASE, NULL, OMAP44XX_IRQ_GPIO4, IH_GPIO_BASE + 96,
-		METHOD_GPIO_44XX },
-	{ OMAP44XX_GPIO5_BASE, NULL, OMAP44XX_IRQ_GPIO5, IH_GPIO_BASE + 128,
-		METHOD_GPIO_44XX },
-	{ OMAP44XX_GPIO6_BASE, NULL, OMAP44XX_IRQ_GPIO6, IH_GPIO_BASE + 160,
-		METHOD_GPIO_44XX },
-};
-
-#endif
-
+/*
+ * TODO: Cleanup gpio_bank usage as it is having information
+ * related to all instances of the device
+ */
 static struct gpio_bank *gpio_bank;
-static int gpio_bank_count;
+
+static int bank_width;
+
+/* TODO: Analyze removing gpio_bank_count usage from driver code */
+int gpio_bank_count;
 
 static inline struct gpio_bank *get_gpio_bank(int gpio)
 {
@@ -417,7 +267,7 @@
 	switch (bank->method) {
 #ifdef CONFIG_ARCH_OMAP1
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_IO_CNTL;
+		reg += OMAP_MPUIO_IO_CNTL / bank->stride;
 		break;
 #endif
 #ifdef CONFIG_ARCH_OMAP15XX
@@ -465,7 +315,7 @@
 	switch (bank->method) {
 #ifdef CONFIG_ARCH_OMAP1
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_OUTPUT;
+		reg += OMAP_MPUIO_OUTPUT / bank->stride;
 		l = __raw_readl(reg);
 		if (enable)
 			l |= 1 << gpio;
@@ -537,7 +387,7 @@
 	switch (bank->method) {
 #ifdef CONFIG_ARCH_OMAP1
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_INPUT_LATCH;
+		reg += OMAP_MPUIO_INPUT_LATCH / bank->stride;
 		break;
 #endif
 #ifdef CONFIG_ARCH_OMAP15XX
@@ -583,7 +433,7 @@
 	switch (bank->method) {
 #ifdef CONFIG_ARCH_OMAP1
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_OUTPUT;
+		reg += OMAP_MPUIO_OUTPUT / bank->stride;
 		break;
 #endif
 #ifdef CONFIG_ARCH_OMAP15XX
@@ -642,6 +492,9 @@
 	u32			val;
 	u32			l;
 
+	if (!bank->dbck_flag)
+		return;
+
 	if (debounce < 32)
 		debounce = 0x01;
 	else if (debounce > 7936)
@@ -651,7 +504,7 @@
 
 	l = 1 << get_gpio_index(gpio);
 
-	if (cpu_is_omap44xx())
+	if (bank->method == METHOD_GPIO_44XX)
 		reg += OMAP4_GPIO_DEBOUNCINGTIME;
 	else
 		reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
@@ -659,7 +512,7 @@
 	__raw_writel(debounce, reg);
 
 	reg = bank->base;
-	if (cpu_is_omap44xx())
+	if (bank->method == METHOD_GPIO_44XX)
 		reg += OMAP4_GPIO_DEBOUNCENABLE;
 	else
 		reg += OMAP24XX_GPIO_DEBOUNCE_EN;
@@ -668,12 +521,10 @@
 
 	if (debounce) {
 		val |= l;
-		if (cpu_is_omap34xx() || cpu_is_omap44xx())
-			clk_enable(bank->dbck);
+		clk_enable(bank->dbck);
 	} else {
 		val &= ~l;
-		if (cpu_is_omap34xx() || cpu_is_omap44xx())
-			clk_disable(bank->dbck);
+		clk_disable(bank->dbck);
 	}
 	bank->dbck_enable_mask = val;
 
@@ -769,7 +620,7 @@
 
 	switch (bank->method) {
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_GPIO_INT_EDGE;
+		reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
 		break;
 #ifdef CONFIG_ARCH_OMAP15XX
 	case METHOD_GPIO_1510:
@@ -803,7 +654,7 @@
 	switch (bank->method) {
 #ifdef CONFIG_ARCH_OMAP1
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_GPIO_INT_EDGE;
+		reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
 		l = __raw_readl(reg);
 		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
 			bank->toggle_mask |= 1 << gpio;
@@ -867,7 +718,7 @@
 	case METHOD_GPIO_24XX:
 	case METHOD_GPIO_44XX:
 		set_24xx_gpio_triggering(bank, gpio, trigger);
-		break;
+		return 0;
 #endif
 	default:
 		goto bad;
@@ -905,8 +756,10 @@
 	spin_lock_irqsave(&bank->lock, flags);
 	retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
 	if (retval == 0) {
-		irq_desc[irq].status &= ~IRQ_TYPE_SENSE_MASK;
-		irq_desc[irq].status |= type;
+		struct irq_desc *d = irq_to_desc(irq);
+
+		d->status &= ~IRQ_TYPE_SENSE_MASK;
+		d->status |= type;
 	}
 	spin_unlock_irqrestore(&bank->lock, flags);
 
@@ -989,7 +842,7 @@
 	switch (bank->method) {
 #ifdef CONFIG_ARCH_OMAP1
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_GPIO_MASKIT;
+		reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 		mask = 0xffff;
 		inv = 1;
 		break;
@@ -1046,7 +899,7 @@
 	switch (bank->method) {
 #ifdef CONFIG_ARCH_OMAP1
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_GPIO_MASKIT;
+		reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 		l = __raw_readl(reg);
 		if (enable)
 			l &= ~(gpio_mask);
@@ -1296,7 +1149,8 @@
 	bank = get_irq_data(irq);
 #ifdef CONFIG_ARCH_OMAP1
 	if (bank->method == METHOD_MPUIO)
-		isr_reg = bank->base + OMAP_MPUIO_GPIO_INT;
+		isr_reg = bank->base +
+				OMAP_MPUIO_GPIO_INT / bank->stride;
 #endif
 #ifdef CONFIG_ARCH_OMAP15XX
 	if (bank->method == METHOD_GPIO_1510)
@@ -1318,6 +1172,10 @@
 	if (bank->method == METHOD_GPIO_44XX)
 		isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
 #endif
+
+	if (WARN_ON(!isr_reg))
+		goto exit;
+
 	while(1) {
 		u32 isr_saved, level_mask = 0;
 		u32 enabled;
@@ -1377,6 +1235,7 @@
 	configured, we must unmask the bank interrupt only after
 	handler(s) are executed in order to avoid spurious bank
 	interrupt */
+exit:
 	if (!unmasked)
 		desc->chip->unmask(irq);
 
@@ -1489,7 +1348,8 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct gpio_bank	*bank = platform_get_drvdata(pdev);
-	void __iomem		*mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT;
+	void __iomem		*mask_reg = bank->base +
+					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
 	spin_lock_irqsave(&bank->lock, flags);
@@ -1504,7 +1364,8 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct gpio_bank	*bank = platform_get_drvdata(pdev);
-	void __iomem		*mask_reg = bank->base + OMAP_MPUIO_GPIO_MASKIT;
+	void __iomem		*mask_reg = bank->base +
+					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
 	spin_lock_irqsave(&bank->lock, flags);
@@ -1540,7 +1401,8 @@
 
 static inline void mpuio_init(void)
 {
-	platform_set_drvdata(&omap_mpuio_device, &gpio_bank_1610[0]);
+	struct gpio_bank *bank = get_gpio_bank(OMAP_MPUIO(0));
+	platform_set_drvdata(&omap_mpuio_device, bank);
 
 	if (platform_driver_register(&omap_mpuio_driver) == 0)
 		(void) platform_device_register(&omap_mpuio_device);
@@ -1583,7 +1445,7 @@
 
 	switch (bank->method) {
 	case METHOD_MPUIO:
-		reg += OMAP_MPUIO_IO_CNTL;
+		reg += OMAP_MPUIO_IO_CNTL / bank->stride;
 		break;
 	case METHOD_GPIO_1510:
 		reg += OMAP1510_GPIO_DIR_CONTROL;
@@ -1645,6 +1507,13 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
+
+	if (!bank->dbck) {
+		bank->dbck = clk_get(bank->dev, "dbclk");
+		if (IS_ERR(bank->dbck))
+			dev_err(bank->dev, "Could not get gpio dbck\n");
+	}
+
 	spin_lock_irqsave(&bank->lock, flags);
 	_set_gpio_debounce(bank, offset, debounce);
 	spin_unlock_irqrestore(&bank->lock, flags);
@@ -1673,34 +1542,16 @@
 
 /*---------------------------------------------------------------------*/
 
-static int initialized;
-#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2)
-static struct clk * gpio_ick;
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2)
-static struct clk * gpio_fck;
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2430)
-static struct clk * gpio5_ick;
-static struct clk * gpio5_fck;
-#endif
-
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static struct clk *gpio_iclks[OMAP34XX_NR_GPIOS];
-#endif
-
-static void __init omap_gpio_show_rev(void)
+static void __init omap_gpio_show_rev(struct gpio_bank *bank)
 {
 	u32 rev;
 
-	if (cpu_is_omap16xx())
-		rev = __raw_readw(gpio_bank[1].base + OMAP1610_GPIO_REVISION);
+	if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
+		rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
 	else if (cpu_is_omap24xx() || cpu_is_omap34xx())
-		rev = __raw_readl(gpio_bank[0].base + OMAP24XX_GPIO_REVISION);
+		rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
 	else if (cpu_is_omap44xx())
-		rev = __raw_readl(gpio_bank[0].base + OMAP4_GPIO_REVISION);
+		rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
 	else
 		return;
 
@@ -1713,250 +1564,192 @@
  */
 static struct lock_class_key gpio_lock_class;
 
-static int __init _omap_gpio_init(void)
+static inline int init_gpio_info(struct platform_device *pdev)
 {
-	int i;
-	int gpio = 0;
-	struct gpio_bank *bank;
-	int bank_size = SZ_8K;	/* Module 4KB + L4 4KB except on omap1 */
-	char clk_name[11];
-
-	initialized = 1;
-
-#if defined(CONFIG_ARCH_OMAP1)
-	if (cpu_is_omap15xx()) {
-		gpio_ick = clk_get(NULL, "arm_gpio_ck");
-		if (IS_ERR(gpio_ick))
-			printk("Could not get arm_gpio_ck\n");
-		else
-			clk_enable(gpio_ick);
+	/* TODO: Analyze removing gpio_bank_count usage from driver code */
+	gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank),
+				GFP_KERNEL);
+	if (!gpio_bank) {
+		dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
+		return -ENOMEM;
 	}
-#endif
-#if defined(CONFIG_ARCH_OMAP2)
+	return 0;
+}
+
+/* TODO: Cleanup cpu_is_* checks */
+static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
+{
 	if (cpu_class_is_omap2()) {
-		gpio_ick = clk_get(NULL, "gpios_ick");
-		if (IS_ERR(gpio_ick))
-			printk("Could not get gpios_ick\n");
-		else
-			clk_enable(gpio_ick);
-		gpio_fck = clk_get(NULL, "gpios_fck");
-		if (IS_ERR(gpio_fck))
-			printk("Could not get gpios_fck\n");
-		else
-			clk_enable(gpio_fck);
+		if (cpu_is_omap44xx()) {
+			__raw_writel(0xffffffff, bank->base +
+					OMAP4_GPIO_IRQSTATUSCLR0);
+			__raw_writel(0x00000000, bank->base +
+					 OMAP4_GPIO_DEBOUNCENABLE);
+			/* Initialize interface clk ungated, module enabled */
+			__raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
+		} else if (cpu_is_omap34xx()) {
+			__raw_writel(0x00000000, bank->base +
+					OMAP24XX_GPIO_IRQENABLE1);
+			__raw_writel(0xffffffff, bank->base +
+					OMAP24XX_GPIO_IRQSTATUS1);
+			__raw_writel(0x00000000, bank->base +
+					OMAP24XX_GPIO_DEBOUNCE_EN);
 
-		/*
-		 * On 2430 & 3430 GPIO 5 uses CORE L4 ICLK
-		 */
-#if defined(CONFIG_ARCH_OMAP2430)
-		if (cpu_is_omap2430()) {
-			gpio5_ick = clk_get(NULL, "gpio5_ick");
-			if (IS_ERR(gpio5_ick))
-				printk("Could not get gpio5_ick\n");
-			else
-				clk_enable(gpio5_ick);
-			gpio5_fck = clk_get(NULL, "gpio5_fck");
-			if (IS_ERR(gpio5_fck))
-				printk("Could not get gpio5_fck\n");
-			else
-				clk_enable(gpio5_fck);
-		}
-#endif
-	}
-#endif
-
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-	if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
-		for (i = 0; i < OMAP34XX_NR_GPIOS; i++) {
-			sprintf(clk_name, "gpio%d_ick", i + 1);
-			gpio_iclks[i] = clk_get(NULL, clk_name);
-			if (IS_ERR(gpio_iclks[i]))
-				printk(KERN_ERR "Could not get %s\n", clk_name);
-			else
-				clk_enable(gpio_iclks[i]);
-		}
-	}
-#endif
-
-
-#ifdef CONFIG_ARCH_OMAP15XX
-	if (cpu_is_omap15xx()) {
-		gpio_bank_count = 2;
-		gpio_bank = gpio_bank_1510;
-		bank_size = SZ_2K;
-	}
-#endif
-#if defined(CONFIG_ARCH_OMAP16XX)
-	if (cpu_is_omap16xx()) {
-		gpio_bank_count = 5;
-		gpio_bank = gpio_bank_1610;
-		bank_size = SZ_2K;
-	}
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
-	if (cpu_is_omap7xx()) {
-		gpio_bank_count = 7;
-		gpio_bank = gpio_bank_7xx;
-		bank_size = SZ_2K;
-	}
-#endif
-#ifdef CONFIG_ARCH_OMAP2
-	if (cpu_is_omap242x()) {
-		gpio_bank_count = 4;
-		gpio_bank = gpio_bank_242x;
-	}
-	if (cpu_is_omap243x()) {
-		gpio_bank_count = 5;
-		gpio_bank = gpio_bank_243x;
-	}
-#endif
-#ifdef CONFIG_ARCH_OMAP3
-	if (cpu_is_omap34xx()) {
-		gpio_bank_count = OMAP34XX_NR_GPIOS;
-		gpio_bank = gpio_bank_34xx;
-	}
-#endif
-#ifdef CONFIG_ARCH_OMAP4
-	if (cpu_is_omap44xx()) {
-		gpio_bank_count = OMAP34XX_NR_GPIOS;
-		gpio_bank = gpio_bank_44xx;
-	}
-#endif
-	for (i = 0; i < gpio_bank_count; i++) {
-		int j, gpio_count = 16;
-
-		bank = &gpio_bank[i];
-		spin_lock_init(&bank->lock);
-
-		/* Static mapping, never released */
-		bank->base = ioremap(bank->pbase, bank_size);
-		if (!bank->base) {
-			printk(KERN_ERR "Could not ioremap gpio bank%i\n", i);
-			continue;
-		}
-
-		if (bank_is_mpuio(bank))
-			__raw_writew(0xffff, bank->base + OMAP_MPUIO_GPIO_MASKIT);
-		if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
-			__raw_writew(0xffff, bank->base + OMAP1510_GPIO_INT_MASK);
-			__raw_writew(0x0000, bank->base + OMAP1510_GPIO_INT_STATUS);
-		}
-		if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
-			__raw_writew(0x0000, bank->base + OMAP1610_GPIO_IRQENABLE1);
-			__raw_writew(0xffff, bank->base + OMAP1610_GPIO_IRQSTATUS1);
-			__raw_writew(0x0014, bank->base + OMAP1610_GPIO_SYSCONFIG);
-		}
-		if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
-			__raw_writel(0xffffffff, bank->base + OMAP7XX_GPIO_INT_MASK);
-			__raw_writel(0x00000000, bank->base + OMAP7XX_GPIO_INT_STATUS);
-
-			gpio_count = 32; /* 7xx has 32-bit GPIOs */
-		}
-
-#ifdef CONFIG_ARCH_OMAP2PLUS
-		if ((bank->method == METHOD_GPIO_24XX) ||
-				(bank->method == METHOD_GPIO_44XX)) {
+			/* Initialize interface clk ungated, module enabled */
+			__raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
+		} else if (cpu_is_omap24xx()) {
 			static const u32 non_wakeup_gpios[] = {
 				0xe203ffc0, 0x08700040
 			};
-
-			if (cpu_is_omap44xx()) {
-				__raw_writel(0xffffffff, bank->base +
-						OMAP4_GPIO_IRQSTATUSCLR0);
-				__raw_writew(0x0015, bank->base +
-						OMAP4_GPIO_SYSCONFIG);
-				__raw_writel(0x00000000, bank->base +
-						 OMAP4_GPIO_DEBOUNCENABLE);
-				/*
-				 * Initialize interface clock ungated,
-				 * module enabled
-				 */
-				__raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
-			} else {
-				__raw_writel(0x00000000, bank->base +
-						OMAP24XX_GPIO_IRQENABLE1);
-				__raw_writel(0xffffffff, bank->base +
-						OMAP24XX_GPIO_IRQSTATUS1);
-				__raw_writew(0x0015, bank->base +
-						OMAP24XX_GPIO_SYSCONFIG);
-				__raw_writel(0x00000000, bank->base +
-						OMAP24XX_GPIO_DEBOUNCE_EN);
-
-				/*
-				 * Initialize interface clock ungated,
-				 * module enabled
-				 */
-				__raw_writel(0, bank->base +
-						OMAP24XX_GPIO_CTRL);
-			}
-			if (cpu_is_omap24xx() &&
-			    i < ARRAY_SIZE(non_wakeup_gpios))
-				bank->non_wakeup_gpios = non_wakeup_gpios[i];
-			gpio_count = 32;
+			if (id < ARRAY_SIZE(non_wakeup_gpios))
+				bank->non_wakeup_gpios = non_wakeup_gpios[id];
 		}
-#endif
-
-		bank->mod_usage = 0;
-		/* REVISIT eventually switch from OMAP-specific gpio structs
-		 * over to the generic ones
-		 */
-		bank->chip.request = omap_gpio_request;
-		bank->chip.free = omap_gpio_free;
-		bank->chip.direction_input = gpio_input;
-		bank->chip.get = gpio_get;
-		bank->chip.direction_output = gpio_output;
-		bank->chip.set_debounce = gpio_debounce;
-		bank->chip.set = gpio_set;
-		bank->chip.to_irq = gpio_2irq;
-		if (bank_is_mpuio(bank)) {
-			bank->chip.label = "mpuio";
-#ifdef CONFIG_ARCH_OMAP16XX
-			bank->chip.dev = &omap_mpuio_device.dev;
-#endif
-			bank->chip.base = OMAP_MPUIO(0);
-		} else {
-			bank->chip.label = "gpio";
-			bank->chip.base = gpio;
-			gpio += gpio_count;
+	} else if (cpu_class_is_omap1()) {
+		if (bank_is_mpuio(bank))
+			__raw_writew(0xffff, bank->base +
+				OMAP_MPUIO_GPIO_MASKIT / bank->stride);
+		if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
+			__raw_writew(0xffff, bank->base
+						+ OMAP1510_GPIO_INT_MASK);
+			__raw_writew(0x0000, bank->base
+						+ OMAP1510_GPIO_INT_STATUS);
 		}
-		bank->chip.ngpio = gpio_count;
+		if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
+			__raw_writew(0x0000, bank->base
+						+ OMAP1610_GPIO_IRQENABLE1);
+			__raw_writew(0xffff, bank->base
+						+ OMAP1610_GPIO_IRQSTATUS1);
+			__raw_writew(0x0014, bank->base
+						+ OMAP1610_GPIO_SYSCONFIG);
 
-		gpiochip_add(&bank->chip);
-
-		for (j = bank->virtual_irq_start;
-		     j < bank->virtual_irq_start + gpio_count; j++) {
-			lockdep_set_class(&irq_desc[j].lock, &gpio_lock_class);
-			set_irq_chip_data(j, bank);
-			if (bank_is_mpuio(bank))
-				set_irq_chip(j, &mpuio_irq_chip);
-			else
-				set_irq_chip(j, &gpio_irq_chip);
-			set_irq_handler(j, handle_simple_irq);
-			set_irq_flags(j, IRQF_VALID);
+			/*
+			 * Enable system clock for GPIO module.
+			 * The CAM_CLK_CTRL *is* really the right place.
+			 */
+			omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
+						ULPD_CAM_CLK_CTRL);
 		}
-		set_irq_chained_handler(bank->irq, gpio_irq_handler);
-		set_irq_data(bank->irq, bank);
-
-		if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
-			sprintf(clk_name, "gpio%d_dbck", i + 1);
-			bank->dbck = clk_get(NULL, clk_name);
-			if (IS_ERR(bank->dbck))
-				printk(KERN_ERR "Could not get %s\n", clk_name);
+		if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
+			__raw_writel(0xffffffff, bank->base
+						+ OMAP7XX_GPIO_INT_MASK);
+			__raw_writel(0x00000000, bank->base
+						+ OMAP7XX_GPIO_INT_STATUS);
 		}
 	}
+}
 
-	/* Enable system clock for GPIO module.
-	 * The CAM_CLK_CTRL *is* really the right place. */
-	if (cpu_is_omap16xx())
-		omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04, ULPD_CAM_CLK_CTRL);
+static void __init omap_gpio_chip_init(struct gpio_bank *bank)
+{
+	int j;
+	static int gpio;
 
-	/* Enable autoidle for the OCP interface */
-	if (cpu_is_omap24xx())
-		omap_writel(1 << 0, 0x48019010);
-	if (cpu_is_omap34xx())
-		omap_writel(1 << 0, 0x48306814);
+	bank->mod_usage = 0;
+	/*
+	 * REVISIT eventually switch from OMAP-specific gpio structs
+	 * over to the generic ones
+	 */
+	bank->chip.request = omap_gpio_request;
+	bank->chip.free = omap_gpio_free;
+	bank->chip.direction_input = gpio_input;
+	bank->chip.get = gpio_get;
+	bank->chip.direction_output = gpio_output;
+	bank->chip.set_debounce = gpio_debounce;
+	bank->chip.set = gpio_set;
+	bank->chip.to_irq = gpio_2irq;
+	if (bank_is_mpuio(bank)) {
+		bank->chip.label = "mpuio";
+#ifdef CONFIG_ARCH_OMAP16XX
+		bank->chip.dev = &omap_mpuio_device.dev;
+#endif
+		bank->chip.base = OMAP_MPUIO(0);
+	} else {
+		bank->chip.label = "gpio";
+		bank->chip.base = gpio;
+		gpio += bank_width;
+	}
+	bank->chip.ngpio = bank_width;
 
-	omap_gpio_show_rev();
+	gpiochip_add(&bank->chip);
+
+	for (j = bank->virtual_irq_start;
+		     j < bank->virtual_irq_start + bank_width; j++) {
+		struct irq_desc *d = irq_to_desc(j);
+
+		lockdep_set_class(&d->lock, &gpio_lock_class);
+		set_irq_chip_data(j, bank);
+		if (bank_is_mpuio(bank))
+			set_irq_chip(j, &mpuio_irq_chip);
+		else
+			set_irq_chip(j, &gpio_irq_chip);
+		set_irq_handler(j, handle_simple_irq);
+		set_irq_flags(j, IRQF_VALID);
+	}
+	set_irq_chained_handler(bank->irq, gpio_irq_handler);
+	set_irq_data(bank->irq, bank);
+}
+
+static int __devinit omap_gpio_probe(struct platform_device *pdev)
+{
+	static int gpio_init_done;
+	struct omap_gpio_platform_data *pdata;
+	struct resource *res;
+	int id;
+	struct gpio_bank *bank;
+
+	if (!pdev->dev.platform_data)
+		return -EINVAL;
+
+	pdata = pdev->dev.platform_data;
+
+	if (!gpio_init_done) {
+		int ret;
+
+		ret = init_gpio_info(pdev);
+		if (ret)
+			return ret;
+	}
+
+	id = pdev->id;
+	bank = &gpio_bank[id];
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (unlikely(!res)) {
+		dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id);
+		return -ENODEV;
+	}
+
+	bank->irq = res->start;
+	bank->virtual_irq_start = pdata->virtual_irq_start;
+	bank->method = pdata->bank_type;
+	bank->dev = &pdev->dev;
+	bank->dbck_flag = pdata->dbck_flag;
+	bank->stride = pdata->bank_stride;
+	bank_width = pdata->bank_width;
+
+	spin_lock_init(&bank->lock);
+
+	/* Static mapping, never released */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(!res)) {
+		dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id);
+		return -ENODEV;
+	}
+
+	bank->base = ioremap(res->start, resource_size(res));
+	if (!bank->base) {
+		dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id);
+		return -ENOMEM;
+	}
+
+	pm_runtime_enable(bank->dev);
+	pm_runtime_get_sync(bank->dev);
+
+	omap_gpio_mod_init(bank, id);
+	omap_gpio_chip_init(bank);
+	omap_gpio_show_rev(bank);
+
+	if (!gpio_init_done)
+		gpio_init_done = 1;
 
 	return 0;
 }
@@ -2074,7 +1867,7 @@
 
 static int workaround_enabled;
 
-void omap2_gpio_prepare_for_idle(int power_state)
+void omap2_gpio_prepare_for_idle(int off_mode)
 {
 	int i, c = 0;
 	int min = 0;
@@ -2090,7 +1883,7 @@
 		for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
 			clk_disable(bank->dbck);
 
-		if (power_state > PWRDM_POWER_OFF)
+		if (!off_mode)
 			continue;
 
 		/* If going to OFF, remove triggering for all
@@ -2251,8 +2044,6 @@
 	/* saving banks from 2-6 only since GPIO1 is in WKUP */
 	for (i = 1; i < gpio_bank_count; i++) {
 		struct gpio_bank *bank = &gpio_bank[i];
-		gpio_context[i].sysconfig =
-			__raw_readl(bank->base + OMAP24XX_GPIO_SYSCONFIG);
 		gpio_context[i].irqenable1 =
 			__raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
 		gpio_context[i].irqenable2 =
@@ -2283,8 +2074,6 @@
 
 	for (i = 1; i < gpio_bank_count; i++) {
 		struct gpio_bank *bank = &gpio_bank[i];
-		__raw_writel(gpio_context[i].sysconfig,
-				bank->base + OMAP24XX_GPIO_SYSCONFIG);
 		__raw_writel(gpio_context[i].irqenable1,
 				bank->base + OMAP24XX_GPIO_IRQENABLE1);
 		__raw_writel(gpio_context[i].irqenable2,
@@ -2309,25 +2098,28 @@
 }
 #endif
 
+static struct platform_driver omap_gpio_driver = {
+	.probe		= omap_gpio_probe,
+	.driver		= {
+		.name	= "omap_gpio",
+	},
+};
+
 /*
- * This may get called early from board specific init
- * for boards that have interrupts routed via FPGA.
+ * gpio driver register needs to be done before
+ * machine_init functions access gpio APIs.
+ * Hence omap_gpio_drv_reg() is a postcore_initcall.
  */
-int __init omap_gpio_init(void)
+static int __init omap_gpio_drv_reg(void)
 {
-	if (!initialized)
-		return _omap_gpio_init();
-	else
-		return 0;
+	return platform_driver_register(&omap_gpio_driver);
 }
+postcore_initcall(omap_gpio_drv_reg);
 
 static int __init omap_gpio_sysinit(void)
 {
 	int ret = 0;
 
-	if (!initialized)
-		ret = _omap_gpio_init();
-
 	mpuio_init();
 
 #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index a5ce4f0..a4f8003 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -27,20 +27,20 @@
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
 #include <linux/i2c-omap.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
 
 #include <mach/irqs.h>
 #include <plat/mux.h>
 #include <plat/i2c.h>
 #include <plat/omap-pm.h>
+#include <plat/omap_device.h>
 
 #define OMAP_I2C_SIZE		0x3f
 #define OMAP1_I2C_BASE		0xfffb3800
-#define OMAP2_I2C_BASE1		0x48070000
-#define OMAP2_I2C_BASE2		0x48072000
-#define OMAP2_I2C_BASE3		0x48060000
-#define OMAP4_I2C_BASE4		0x48350000
 
-static const char name[] = "i2c_omap";
+static const char name[] = "omap_i2c";
 
 #define I2C_RESOURCE_BUILDER(base, irq)			\
 	{						\
@@ -55,15 +55,6 @@
 
 static struct resource i2c_resources[][2] = {
 	{ I2C_RESOURCE_BUILDER(0, 0) },
-#if	defined(CONFIG_ARCH_OMAP2PLUS)
-	{ I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE2, 0) },
-#endif
-#if	defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-	{ I2C_RESOURCE_BUILDER(OMAP2_I2C_BASE3, 0) },
-#endif
-#if	defined(CONFIG_ARCH_OMAP4)
-	{ I2C_RESOURCE_BUILDER(OMAP4_I2C_BASE4, 0) },
-#endif
 };
 
 #define I2C_DEV_BUILDER(bus_id, res, data)		\
@@ -77,18 +68,11 @@
 		},					\
 	}
 
-static struct omap_i2c_bus_platform_data i2c_pdata[ARRAY_SIZE(i2c_resources)];
+#define MAX_OMAP_I2C_HWMOD_NAME_LEN	16
+#define OMAP_I2C_MAX_CONTROLLERS 4
+static struct omap_i2c_bus_platform_data i2c_pdata[OMAP_I2C_MAX_CONTROLLERS];
 static struct platform_device omap_i2c_devices[] = {
 	I2C_DEV_BUILDER(1, i2c_resources[0], &i2c_pdata[0]),
-#if	defined(CONFIG_ARCH_OMAP2PLUS)
-	I2C_DEV_BUILDER(2, i2c_resources[1], &i2c_pdata[1]),
-#endif
-#if	defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-	I2C_DEV_BUILDER(3, i2c_resources[2], &i2c_pdata[2]),
-#endif
-#if	defined(CONFIG_ARCH_OMAP4)
-	I2C_DEV_BUILDER(4, i2c_resources[3], &i2c_pdata[3]),
-#endif
 };
 
 #define OMAP_I2C_CMDLINE_SETUP	(BIT(31))
@@ -109,35 +93,25 @@
 	return ports;
 }
 
-/* Shared between omap2 and 3 */
-static resource_size_t omap2_i2c_irq[3] __initdata = {
-	INT_24XX_I2C1_IRQ,
-	INT_24XX_I2C2_IRQ,
-	INT_34XX_I2C3_IRQ,
-};
-
-static resource_size_t omap4_i2c_irq[4] __initdata = {
-	OMAP44XX_IRQ_I2C1,
-	OMAP44XX_IRQ_I2C2,
-	OMAP44XX_IRQ_I2C3,
-	OMAP44XX_IRQ_I2C4,
-};
-
-static inline int omap1_i2c_add_bus(struct platform_device *pdev, int bus_id)
+static inline int omap1_i2c_add_bus(int bus_id)
 {
-	struct omap_i2c_bus_platform_data *pd;
+	struct platform_device *pdev;
+	struct omap_i2c_bus_platform_data *pdata;
 	struct resource *res;
 
-	pd = pdev->dev.platform_data;
+	omap1_i2c_mux_pins(bus_id);
+
+	pdev = &omap_i2c_devices[bus_id - 1];
 	res = pdev->resource;
 	res[0].start = OMAP1_I2C_BASE;
 	res[0].end = res[0].start + OMAP_I2C_SIZE;
 	res[1].start = INT_I2C;
-	omap1_i2c_mux_pins(bus_id);
+	pdata = &i2c_pdata[bus_id - 1];
 
 	return platform_device_register(pdev);
 }
 
+
 /*
  * XXX This function is a temporary compatibility wrapper - only
  * needed until the I2C driver can be converted to call
@@ -148,52 +122,64 @@
 	omap_pm_set_max_mpu_wakeup_lat(dev, t);
 }
 
-static inline int omap2_i2c_add_bus(struct platform_device *pdev, int bus_id)
+static struct omap_device_pm_latency omap_i2c_latency[] = {
+	[0] = {
+		.deactivate_func	= omap_device_idle_hwmods,
+		.activate_func		= omap_device_enable_hwmods,
+		.flags			= OMAP_DEVICE_LATENCY_AUTO_ADJUST,
+	},
+};
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+static inline int omap2_i2c_add_bus(int bus_id)
 {
-	struct resource *res;
-	resource_size_t *irq;
+	int l;
+	struct omap_hwmod *oh;
+	struct omap_device *od;
+	char oh_name[MAX_OMAP_I2C_HWMOD_NAME_LEN];
+	struct omap_i2c_bus_platform_data *pdata;
 
-	res = pdev->resource;
-
-	if (!cpu_is_omap44xx())
-		irq = omap2_i2c_irq;
-	else
-		irq = omap4_i2c_irq;
-
-	if (bus_id == 1) {
-		res[0].start = OMAP2_I2C_BASE1;
-		res[0].end = res[0].start + OMAP_I2C_SIZE;
-	}
-
-	res[1].start = irq[bus_id - 1];
 	omap2_i2c_mux_pins(bus_id);
 
+	l = snprintf(oh_name, MAX_OMAP_I2C_HWMOD_NAME_LEN, "i2c%d", bus_id);
+	WARN(l >= MAX_OMAP_I2C_HWMOD_NAME_LEN,
+		"String buffer overflow in I2C%d device setup\n", bus_id);
+	oh = omap_hwmod_lookup(oh_name);
+	if (!oh) {
+			pr_err("Could not look up %s\n", oh_name);
+			return -EEXIST;
+	}
+
+	pdata = &i2c_pdata[bus_id - 1];
 	/*
 	 * When waiting for completion of a i2c transfer, we need to
 	 * set a wake up latency constraint for the MPU. This is to
 	 * ensure quick enough wakeup from idle, when transfer
 	 * completes.
+	 * Only omap3 has support for constraints
 	 */
-	if (cpu_is_omap34xx()) {
-		struct omap_i2c_bus_platform_data *pd;
+	if (cpu_is_omap34xx())
+		pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
+	od = omap_device_build(name, bus_id, oh, pdata,
+			sizeof(struct omap_i2c_bus_platform_data),
+			omap_i2c_latency, ARRAY_SIZE(omap_i2c_latency), 0);
+	WARN(IS_ERR(od), "Could not build omap_device for %s\n", name);
 
-		pd = pdev->dev.platform_data;
-		pd->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
-	}
-
-	return platform_device_register(pdev);
+	return PTR_ERR(od);
 }
+#else
+static inline int omap2_i2c_add_bus(int bus_id)
+{
+	return 0;
+}
+#endif
 
 static int __init omap_i2c_add_bus(int bus_id)
 {
-	struct platform_device *pdev;
-
-	pdev = &omap_i2c_devices[bus_id - 1];
-
 	if (cpu_class_is_omap1())
-		return omap1_i2c_add_bus(pdev, bus_id);
+		return omap1_i2c_add_bus(bus_id);
 	else
-		return omap2_i2c_add_bus(pdev, bus_id);
+		return omap2_i2c_add_bus(bus_id);
 }
 
 /**
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index 4b2028a..256ab3f 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -31,18 +31,18 @@
 #define CK_1510		(1 << 2)
 #define CK_16XX		(1 << 3)	/* 16xx, 17xx, 5912 */
 #define CK_242X		(1 << 4)
-#define CK_243X		(1 << 5)
-#define CK_3XXX		(1 << 6)	/* OMAP3 + AM3 common clocks*/
-#define CK_343X		(1 << 7)	/* OMAP34xx common clocks */
-#define CK_3430ES1	(1 << 8)	/* 34xxES1 only */
-#define CK_3430ES2	(1 << 9)	/* 34xxES2, ES3, non-Sitara 35xx only */
-#define CK_3505		(1 << 10)
-#define CK_3517		(1 << 11)
-#define CK_36XX		(1 << 12)	/* OMAP36xx/37xx-specific clocks */
-#define CK_443X		(1 << 13)
+#define CK_243X		(1 << 5)	/* 243x, 253x */
+#define CK_3430ES1	(1 << 6)	/* 34xxES1 only */
+#define CK_3430ES2PLUS	(1 << 7)	/* 34xxES2, ES3, non-Sitara 35xx only */
+#define CK_3505		(1 << 8)
+#define CK_3517		(1 << 9)
+#define CK_36XX		(1 << 10)	/* 36xx/37xx-specific clocks */
+#define CK_443X		(1 << 11)
 
+
+#define CK_34XX		(CK_3430ES1 | CK_3430ES2PLUS)
 #define CK_AM35XX	(CK_3505 | CK_3517)	/* all Sitara AM35xx */
-
+#define CK_3XXX		(CK_34XX | CK_AM35XX | CK_36XX)
 
 
 #endif
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index fef4696..8eb0ada 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -49,13 +49,18 @@
 /* struct clksel_rate.flags possibilities */
 #define RATE_IN_242X		(1 << 0)
 #define RATE_IN_243X		(1 << 1)
-#define RATE_IN_3XXX		(1 << 2)	/* rates common to all OMAP3 */
-#define RATE_IN_3430ES2		(1 << 3)	/* 3430ES2 rates only */
+#define RATE_IN_3430ES1		(1 << 2)	/* 3430ES1 rates only */
+#define RATE_IN_3430ES2PLUS	(1 << 3)	/* 3430 ES >= 2 rates only */
 #define RATE_IN_36XX		(1 << 4)
 #define RATE_IN_4430		(1 << 5)
 
 #define RATE_IN_24XX		(RATE_IN_242X | RATE_IN_243X)
-#define RATE_IN_3430ES2PLUS	(RATE_IN_3430ES2 | RATE_IN_36XX)
+#define RATE_IN_34XX		(RATE_IN_3430ES1 | RATE_IN_3430ES2PLUS)
+#define RATE_IN_3XXX		(RATE_IN_34XX | RATE_IN_36XX)
+
+/* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */
+#define RATE_IN_3430ES2PLUS_36XX	(RATE_IN_3430ES2PLUS | RATE_IN_36XX)
+
 
 /**
  * struct clksel_rate - register bitfield values corresponding to clk divisors
@@ -119,8 +124,7 @@
  *
  * Possible values for @flags:
  * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs)
- * NO_DCO_SEL: don't program DCO (only for some J-type DPLLs)
-
+ *
  * @freqsel_mask is only used on the OMAP34xx family and AM35xx.
  *
  * XXX Some DPLLs have multiple bypass inputs, so it's not technically
@@ -156,6 +160,8 @@
 	u32			autoidle_mask;
 	u32			freqsel_mask;
 	u32			idlest_mask;
+	u32			dco_mask;
+	u32			sddiv_mask;
 	u8			auto_recal_bit;
 	u8			recal_en_bit;
 	u8			recal_st_bit;
diff --git a/arch/arm/plat-omap/include/plat/clockdomain.h b/arch/arm/plat-omap/include/plat/clockdomain.h
deleted file mode 100644
index ba0a6c0..0000000
--- a/arch/arm/plat-omap/include/plat/clockdomain.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * arch/arm/plat-omap/include/mach/clockdomain.h
- *
- * OMAP2/3 clockdomain framework functions
- *
- * Copyright (C) 2008 Texas Instruments, Inc.
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Written by Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARM_ARCH_OMAP_CLOCKDOMAIN_H
-#define __ASM_ARM_ARCH_OMAP_CLOCKDOMAIN_H
-
-#include <plat/powerdomain.h>
-#include <plat/clock.h>
-#include <plat/cpu.h>
-
-/* Clockdomain capability flags */
-#define CLKDM_CAN_FORCE_SLEEP			(1 << 0)
-#define CLKDM_CAN_FORCE_WAKEUP			(1 << 1)
-#define CLKDM_CAN_ENABLE_AUTO			(1 << 2)
-#define CLKDM_CAN_DISABLE_AUTO			(1 << 3)
-
-#define CLKDM_CAN_HWSUP		(CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
-#define CLKDM_CAN_SWSUP		(CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
-#define CLKDM_CAN_HWSUP_SWSUP	(CLKDM_CAN_SWSUP | CLKDM_CAN_HWSUP)
-
-/* OMAP24XX CM_CLKSTCTRL_*.AUTOSTATE_* register bit values */
-#define OMAP24XX_CLKSTCTRL_DISABLE_AUTO		0x0
-#define OMAP24XX_CLKSTCTRL_ENABLE_AUTO		0x1
-
-/* OMAP3XXX CM_CLKSTCTRL_*.CLKTRCTRL_* register bit values */
-#define OMAP34XX_CLKSTCTRL_DISABLE_AUTO		0x0
-#define OMAP34XX_CLKSTCTRL_FORCE_SLEEP		0x1
-#define OMAP34XX_CLKSTCTRL_FORCE_WAKEUP		0x2
-#define OMAP34XX_CLKSTCTRL_ENABLE_AUTO		0x3
-
-/**
- * struct clkdm_autodep - clkdm deps to add when entering/exiting hwsup mode
- * @clkdm: clockdomain to add wkdep+sleepdep on - set name member only
- * @omap_chip: OMAP chip types that this autodep is valid on
- *
- * A clockdomain that should have wkdeps and sleepdeps added when a
- * clockdomain should stay active in hwsup mode; and conversely,
- * removed when the clockdomain should be allowed to go inactive in
- * hwsup mode.
- *
- * Autodeps are deprecated and should be removed after
- * omap_hwmod-based fine-grained module idle control is added.
- */
-struct clkdm_autodep {
-	union {
-		const char *name;
-		struct clockdomain *ptr;
-	} clkdm;
-	const struct omap_chip_id omap_chip;
-};
-
-/**
- * struct clkdm_dep - encode dependencies between clockdomains
- * @clkdm_name: clockdomain name
- * @clkdm: pointer to the struct clockdomain of @clkdm_name
- * @omap_chip: OMAP chip types that this dependency is valid on
- * @wkdep_usecount: Number of wakeup dependencies causing this clkdm to wake
- * @sleepdep_usecount: Number of sleep deps that could prevent clkdm from idle
- *
- * Statically defined.  @clkdm is resolved from @clkdm_name at runtime and
- * should not be pre-initialized.
- *
- * XXX Should also include hardware (fixed) dependencies.
- */
-struct clkdm_dep {
-	const char *clkdm_name;
-	struct clockdomain *clkdm;
-	atomic_t wkdep_usecount;
-	atomic_t sleepdep_usecount;
-	const struct omap_chip_id omap_chip;
-};
-
-/**
- * struct clockdomain - OMAP clockdomain
- * @name: clockdomain name
- * @pwrdm: powerdomain containing this clockdomain
- * @clktrctrl_reg: CLKSTCTRL reg for the given clock domain
- * @clktrctrl_mask: CLKTRCTRL/AUTOSTATE field mask in CM_CLKSTCTRL reg
- * @flags: Clockdomain capability flags
- * @dep_bit: Bit shift of this clockdomain's PM_WKDEP/CM_SLEEPDEP bit
- * @wkdep_srcs: Clockdomains that can be told to wake this powerdomain up
- * @sleepdep_srcs: Clockdomains that can be told to keep this clkdm from inact
- * @omap_chip: OMAP chip types that this clockdomain is valid on
- * @usecount: Usecount tracking
- * @node: list_head to link all clockdomains together
- */
-struct clockdomain {
-	const char *name;
-	union {
-		const char *name;
-		struct powerdomain *ptr;
-	} pwrdm;
-	void __iomem *clkstctrl_reg;
-	const u16 clktrctrl_mask;
-	const u8 flags;
-	const u8 dep_bit;
-	struct clkdm_dep *wkdep_srcs;
-	struct clkdm_dep *sleepdep_srcs;
-	const struct omap_chip_id omap_chip;
-	atomic_t usecount;
-	struct list_head node;
-};
-
-void clkdm_init(struct clockdomain **clkdms, struct clkdm_autodep *autodeps);
-struct clockdomain *clkdm_lookup(const char *name);
-
-int clkdm_for_each(int (*fn)(struct clockdomain *clkdm, void *user),
-			void *user);
-struct powerdomain *clkdm_get_pwrdm(struct clockdomain *clkdm);
-
-int clkdm_add_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
-int clkdm_del_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
-int clkdm_read_wkdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
-int clkdm_clear_all_wkdeps(struct clockdomain *clkdm);
-int clkdm_add_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
-int clkdm_del_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
-int clkdm_read_sleepdep(struct clockdomain *clkdm1, struct clockdomain *clkdm2);
-int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm);
-
-void omap2_clkdm_allow_idle(struct clockdomain *clkdm);
-void omap2_clkdm_deny_idle(struct clockdomain *clkdm);
-
-int omap2_clkdm_wakeup(struct clockdomain *clkdm);
-int omap2_clkdm_sleep(struct clockdomain *clkdm);
-
-int omap2_clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk);
-int omap2_clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk);
-
-#endif
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index a9d69a0..6b8088e 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -27,6 +27,8 @@
 #ifndef __ARCH_ARM_MACH_OMAP_COMMON_H
 #define __ARCH_ARM_MACH_OMAP_COMMON_H
 
+#include <linux/delay.h>
+
 #include <plat/i2c.h>
 
 struct sys_timer;
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
index c915a66..537f4e4 100644
--- a/arch/arm/plat-omap/include/plat/display.h
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -42,6 +42,10 @@
 #define DISPC_IRQ_SYNC_LOST		(1 << 14)
 #define DISPC_IRQ_SYNC_LOST_DIGIT	(1 << 15)
 #define DISPC_IRQ_WAKEUP		(1 << 16)
+#define DISPC_IRQ_SYNC_LOST2		(1 << 17)
+#define DISPC_IRQ_VSYNC2		(1 << 18)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT2	(1 << 21)
+#define DISPC_IRQ_FRAMEDONE2		(1 << 22)
 
 struct omap_dss_device;
 struct omap_overlay_manager;
@@ -64,6 +68,7 @@
 enum omap_channel {
 	OMAP_DSS_CHANNEL_LCD	= 0,
 	OMAP_DSS_CHANNEL_DIGIT	= 1,
+	OMAP_DSS_CHANNEL_LCD2	= 2,
 };
 
 enum omap_color_mode {
@@ -142,6 +147,7 @@
 enum omap_dss_overlay_managers {
 	OMAP_DSS_OVL_MGR_LCD,
 	OMAP_DSS_OVL_MGR_TV,
+	OMAP_DSS_OVL_MGR_LCD2,
 };
 
 enum omap_dss_rotation_type {
@@ -268,6 +274,7 @@
 	u16 out_width;	/* if 0, out_width == width */
 	u16 out_height;	/* if 0, out_height == height */
 	u8 global_alpha;
+	u8 pre_mult_alpha;
 };
 
 struct omap_overlay {
@@ -351,6 +358,8 @@
 
 	enum omap_display_type type;
 
+	enum omap_channel channel;
+
 	union {
 		struct {
 			u8 data_lines;
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index 0cce4ca..d1c916f 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -21,142 +21,16 @@
 #ifndef __ASM_ARCH_DMA_H
 #define __ASM_ARCH_DMA_H
 
+#include <linux/platform_device.h>
+
+/*
+ * TODO: These dma channel defines should go away once all
+ * the omap drivers hwmod adapted.
+ */
+
 /* Move omap4 specific defines to dma-44xx.h */
 #include "dma-44xx.h"
 
-/* Hardware registers for omap1 */
-#define OMAP1_DMA_BASE			(0xfffed800)
-
-#define OMAP1_DMA_GCR			0x400
-#define OMAP1_DMA_GSCR			0x404
-#define OMAP1_DMA_GRST			0x408
-#define OMAP1_DMA_HW_ID			0x442
-#define OMAP1_DMA_PCH2_ID		0x444
-#define OMAP1_DMA_PCH0_ID		0x446
-#define OMAP1_DMA_PCH1_ID		0x448
-#define OMAP1_DMA_PCHG_ID		0x44a
-#define OMAP1_DMA_PCHD_ID		0x44c
-#define OMAP1_DMA_CAPS_0_U		0x44e
-#define OMAP1_DMA_CAPS_0_L		0x450
-#define OMAP1_DMA_CAPS_1_U		0x452
-#define OMAP1_DMA_CAPS_1_L		0x454
-#define OMAP1_DMA_CAPS_2		0x456
-#define OMAP1_DMA_CAPS_3		0x458
-#define OMAP1_DMA_CAPS_4		0x45a
-#define OMAP1_DMA_PCH2_SR		0x460
-#define OMAP1_DMA_PCH0_SR		0x480
-#define OMAP1_DMA_PCH1_SR		0x482
-#define OMAP1_DMA_PCHD_SR		0x4c0
-
-/* Hardware registers for omap2 and omap3 */
-#define OMAP24XX_DMA4_BASE		(L4_24XX_BASE + 0x56000)
-#define OMAP34XX_DMA4_BASE		(L4_34XX_BASE + 0x56000)
-#define OMAP44XX_DMA4_BASE		(L4_44XX_BASE + 0x56000)
-
-#define OMAP_DMA4_REVISION		0x00
-#define OMAP_DMA4_GCR			0x78
-#define OMAP_DMA4_IRQSTATUS_L0		0x08
-#define OMAP_DMA4_IRQSTATUS_L1		0x0c
-#define OMAP_DMA4_IRQSTATUS_L2		0x10
-#define OMAP_DMA4_IRQSTATUS_L3		0x14
-#define OMAP_DMA4_IRQENABLE_L0		0x18
-#define OMAP_DMA4_IRQENABLE_L1		0x1c
-#define OMAP_DMA4_IRQENABLE_L2		0x20
-#define OMAP_DMA4_IRQENABLE_L3		0x24
-#define OMAP_DMA4_SYSSTATUS		0x28
-#define OMAP_DMA4_OCP_SYSCONFIG		0x2c
-#define OMAP_DMA4_CAPS_0		0x64
-#define OMAP_DMA4_CAPS_2		0x6c
-#define OMAP_DMA4_CAPS_3		0x70
-#define OMAP_DMA4_CAPS_4		0x74
-
-#define OMAP1_LOGICAL_DMA_CH_COUNT	17
-#define OMAP_DMA4_LOGICAL_DMA_CH_COUNT	32	/* REVISIT: Is this 32 + 2? */
-
-/* Common channel specific registers for omap1 */
-#define OMAP1_DMA_CH_BASE(n)		(0x40 * (n) + 0x00)
-#define OMAP1_DMA_CSDP(n)		(0x40 * (n) + 0x00)
-#define OMAP1_DMA_CCR(n)		(0x40 * (n) + 0x02)
-#define OMAP1_DMA_CICR(n)		(0x40 * (n) + 0x04)
-#define OMAP1_DMA_CSR(n)		(0x40 * (n) + 0x06)
-#define OMAP1_DMA_CEN(n)		(0x40 * (n) + 0x10)
-#define OMAP1_DMA_CFN(n)		(0x40 * (n) + 0x12)
-#define OMAP1_DMA_CSFI(n)		(0x40 * (n) + 0x14)
-#define OMAP1_DMA_CSEI(n)		(0x40 * (n) + 0x16)
-#define OMAP1_DMA_CPC(n)		(0x40 * (n) + 0x18)	/* 15xx only */
-#define OMAP1_DMA_CSAC(n)		(0x40 * (n) + 0x18)
-#define OMAP1_DMA_CDAC(n)		(0x40 * (n) + 0x1a)
-#define OMAP1_DMA_CDEI(n)		(0x40 * (n) + 0x1c)
-#define OMAP1_DMA_CDFI(n)		(0x40 * (n) + 0x1e)
-#define OMAP1_DMA_CLNK_CTRL(n)		(0x40 * (n) + 0x28)
-
-/* Common channel specific registers for omap2 */
-#define OMAP_DMA4_CH_BASE(n)		(0x60 * (n) + 0x80)
-#define OMAP_DMA4_CCR(n)		(0x60 * (n) + 0x80)
-#define OMAP_DMA4_CLNK_CTRL(n)		(0x60 * (n) + 0x84)
-#define OMAP_DMA4_CICR(n)		(0x60 * (n) + 0x88)
-#define OMAP_DMA4_CSR(n)		(0x60 * (n) + 0x8c)
-#define OMAP_DMA4_CSDP(n)		(0x60 * (n) + 0x90)
-#define OMAP_DMA4_CEN(n)		(0x60 * (n) + 0x94)
-#define OMAP_DMA4_CFN(n)		(0x60 * (n) + 0x98)
-#define OMAP_DMA4_CSEI(n)		(0x60 * (n) + 0xa4)
-#define OMAP_DMA4_CSFI(n)		(0x60 * (n) + 0xa8)
-#define OMAP_DMA4_CDEI(n)		(0x60 * (n) + 0xac)
-#define OMAP_DMA4_CDFI(n)		(0x60 * (n) + 0xb0)
-#define OMAP_DMA4_CSAC(n)		(0x60 * (n) + 0xb4)
-#define OMAP_DMA4_CDAC(n)		(0x60 * (n) + 0xb8)
-
-/* Channel specific registers only on omap1 */
-#define OMAP1_DMA_CSSA_L(n)		(0x40 * (n) + 0x08)
-#define OMAP1_DMA_CSSA_U(n)		(0x40 * (n) + 0x0a)
-#define OMAP1_DMA_CDSA_L(n)		(0x40 * (n) + 0x0c)
-#define OMAP1_DMA_CDSA_U(n)		(0x40 * (n) + 0x0e)
-#define OMAP1_DMA_COLOR_L(n)		(0x40 * (n) + 0x20)
-#define OMAP1_DMA_COLOR_U(n)		(0x40 * (n) + 0x22)
-#define OMAP1_DMA_CCR2(n)		(0x40 * (n) + 0x24)
-#define OMAP1_DMA_LCH_CTRL(n)		(0x40 * (n) + 0x2a)	/* not on 15xx */
-#define OMAP1_DMA_CCEN(n)		0
-#define OMAP1_DMA_CCFN(n)		0
-
-/* Channel specific registers only on omap2 */
-#define OMAP_DMA4_CSSA(n)		(0x60 * (n) + 0x9c)
-#define OMAP_DMA4_CDSA(n)		(0x60 * (n) + 0xa0)
-#define OMAP_DMA4_CCEN(n)		(0x60 * (n) + 0xbc)
-#define OMAP_DMA4_CCFN(n)		(0x60 * (n) + 0xc0)
-#define OMAP_DMA4_COLOR(n)		(0x60 * (n) + 0xc4)
-
-/* Additional registers available on OMAP4 */
-#define OMAP_DMA4_CDP(n)		(0x60 * (n) + 0xd0)
-#define OMAP_DMA4_CNDP(n)		(0x60 * (n) + 0xd4)
-#define OMAP_DMA4_CCDN(n)		(0x60 * (n) + 0xd8)
-
-/* Dummy defines to keep multi-omap compiles happy */
-#define OMAP1_DMA_REVISION		0
-#define OMAP1_DMA_IRQSTATUS_L0		0
-#define OMAP1_DMA_IRQENABLE_L0		0
-#define OMAP1_DMA_OCP_SYSCONFIG		0
-#define OMAP_DMA4_HW_ID			0
-#define OMAP_DMA4_CAPS_0_L		0
-#define OMAP_DMA4_CAPS_0_U		0
-#define OMAP_DMA4_CAPS_1_L		0
-#define OMAP_DMA4_CAPS_1_U		0
-#define OMAP_DMA4_GSCR			0
-#define OMAP_DMA4_CPC(n)		0
-
-#define OMAP_DMA4_LCH_CTRL(n)		0
-#define OMAP_DMA4_COLOR_L(n)		0
-#define OMAP_DMA4_COLOR_U(n)		0
-#define OMAP_DMA4_CCR2(n)		0
-#define OMAP1_DMA_CSSA(n)		0
-#define OMAP1_DMA_CDSA(n)		0
-#define OMAP_DMA4_CSSA_L(n)		0
-#define OMAP_DMA4_CSSA_U(n)		0
-#define OMAP_DMA4_CDSA_L(n)		0
-#define OMAP_DMA4_CDSA_U(n)		0
-#define OMAP1_DMA_COLOR(n)		0
-
-/*----------------------------------------------------------------------------*/
-
 /* DMA channels for omap1 */
 #define OMAP_DMA_NO_DEVICE		0
 #define OMAP_DMA_MCSI1_TX		1
@@ -405,6 +279,63 @@
 #define DMA_CH_PRIO_HIGH		0x1
 #define DMA_CH_PRIO_LOW			0x0 /* Def */
 
+/* Errata handling */
+#define IS_DMA_ERRATA(id)		(errata & (id))
+#define SET_DMA_ERRATA(id)		(errata |= (id))
+
+#define DMA_ERRATA_IFRAME_BUFFERING	BIT(0x0)
+#define DMA_ERRATA_PARALLEL_CHANNELS	BIT(0x1)
+#define DMA_ERRATA_i378			BIT(0x2)
+#define DMA_ERRATA_i541			BIT(0x3)
+#define DMA_ERRATA_i88			BIT(0x4)
+#define DMA_ERRATA_3_3			BIT(0x5)
+#define DMA_ROMCODE_BUG			BIT(0x6)
+
+/* Attributes for OMAP DMA Contrller */
+#define DMA_LINKED_LCH			BIT(0x0)
+#define GLOBAL_PRIORITY			BIT(0x1)
+#define RESERVE_CHANNEL			BIT(0x2)
+#define IS_CSSA_32			BIT(0x3)
+#define IS_CDSA_32			BIT(0x4)
+#define IS_RW_PRIORITY			BIT(0x5)
+#define ENABLE_1510_MODE		BIT(0x6)
+#define SRC_PORT			BIT(0x7)
+#define DST_PORT			BIT(0x8)
+#define SRC_INDEX			BIT(0x9)
+#define DST_INDEX			BIT(0xA)
+#define IS_BURST_ONLY4			BIT(0xB)
+#define CLEAR_CSR_ON_READ		BIT(0xC)
+#define IS_WORD_16			BIT(0xD)
+
+enum omap_reg_offsets {
+
+GCR,		GSCR,		GRST1,		HW_ID,
+PCH2_ID,	PCH0_ID,	PCH1_ID,	PCHG_ID,
+PCHD_ID,	CAPS_0,		CAPS_1,		CAPS_2,
+CAPS_3,		CAPS_4,		PCH2_SR,	PCH0_SR,
+PCH1_SR,	PCHD_SR,	REVISION,	IRQSTATUS_L0,
+IRQSTATUS_L1,	IRQSTATUS_L2,	IRQSTATUS_L3,	IRQENABLE_L0,
+IRQENABLE_L1,	IRQENABLE_L2,	IRQENABLE_L3,	SYSSTATUS,
+OCP_SYSCONFIG,
+
+/* omap1+ specific */
+CPC, CCR2, LCH_CTRL,
+
+/* Common registers for all omap's */
+CSDP,		CCR,		CICR,		CSR,
+CEN,		CFN,		CSFI,		CSEI,
+CSAC,		CDAC,		CDEI,
+CDFI,		CLNK_CTRL,
+
+/* Channel specific registers */
+CSSA,		CDSA,		COLOR,
+CCEN,		CCFN,
+
+/* omap3630 and omap4 specific */
+CDP,		CNDP,		CCDN,
+
+};
+
 enum omap_dma_burst_mode {
 	OMAP_DMA_DATA_BURST_DIS = 0,
 	OMAP_DMA_DATA_BURST_4,
@@ -470,6 +401,41 @@
 #endif
 };
 
+struct omap_dma_lch {
+	int next_lch;
+	int dev_id;
+	u16 saved_csr;
+	u16 enabled_irqs;
+	const char *dev_name;
+	void (*callback)(int lch, u16 ch_status, void *data);
+	void *data;
+	long flags;
+	/* required for Dynamic chaining */
+	int prev_linked_ch;
+	int next_linked_ch;
+	int state;
+	int chain_id;
+	int status;
+};
+
+struct omap_dma_dev_attr {
+	u32 dev_caps;
+	u16 lch_count;
+	u16 chan_count;
+	struct omap_dma_lch *chan;
+};
+
+/* System DMA platform data structure */
+struct omap_system_dma_plat_info {
+	struct omap_dma_dev_attr *dma_attr;
+	u32 errata;
+	void (*disable_irq_lch)(int lch);
+	void (*show_dma_caps)(void);
+	void (*clear_lch_regs)(int lch);
+	void (*clear_dma)(int lch);
+	void (*dma_write)(u32 val, int reg, int lch);
+	u32 (*dma_read)(int reg, int lch);
+};
 
 extern void omap_set_dma_priority(int lch, int dst_port, int priority);
 extern int omap_request_dma(int dev_id, const char *dev_name,
diff --git a/arch/arm/plat-omap/include/plat/fpga.h b/arch/arm/plat-omap/include/plat/fpga.h
index f1864a6..ae39bcb 100644
--- a/arch/arm/plat-omap/include/plat/fpga.h
+++ b/arch/arm/plat-omap/include/plat/fpga.h
@@ -19,11 +19,7 @@
 #ifndef __ASM_ARCH_OMAP_FPGA_H
 #define __ASM_ARCH_OMAP_FPGA_H
 
-#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
 extern void omap1510_fpga_init_irq(void);
-#else
-#define omap1510_fpga_init_irq()	(0)
-#endif
 
 #define fpga_read(reg)			__raw_readb(reg)
 #define fpga_write(val, reg)		__raw_writeb(val, reg)
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index de1c604..d6f9fa0 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -27,26 +27,15 @@
 #define __ASM_ARCH_OMAP_GPIO_H
 
 #include <linux/io.h>
+#include <linux/platform_device.h>
 #include <mach/irqs.h>
 
 #define OMAP1_MPUIO_BASE			0xfffb5000
 
-#if (defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850))
-
-#define OMAP_MPUIO_INPUT_LATCH		0x00
-#define OMAP_MPUIO_OUTPUT		0x02
-#define OMAP_MPUIO_IO_CNTL		0x04
-#define OMAP_MPUIO_KBR_LATCH		0x08
-#define OMAP_MPUIO_KBC			0x0a
-#define OMAP_MPUIO_GPIO_EVENT_MODE	0x0c
-#define OMAP_MPUIO_GPIO_INT_EDGE	0x0e
-#define OMAP_MPUIO_KBD_INT		0x10
-#define OMAP_MPUIO_GPIO_INT		0x12
-#define OMAP_MPUIO_KBD_MASKIT		0x14
-#define OMAP_MPUIO_GPIO_MASKIT		0x16
-#define OMAP_MPUIO_GPIO_DEBOUNCING	0x18
-#define OMAP_MPUIO_LATCH		0x1a
-#else
+/*
+ * These are the omap15xx/16xx offsets. The omap7xx offset are
+ * OMAP_MPUIO_ / 2 offsets below.
+ */
 #define OMAP_MPUIO_INPUT_LATCH		0x00
 #define OMAP_MPUIO_OUTPUT		0x04
 #define OMAP_MPUIO_IO_CNTL		0x08
@@ -60,7 +49,6 @@
 #define OMAP_MPUIO_GPIO_MASKIT		0x2c
 #define OMAP_MPUIO_GPIO_DEBOUNCING	0x30
 #define OMAP_MPUIO_LATCH		0x34
-#endif
 
 #define OMAP34XX_NR_GPIOS		6
 
@@ -71,8 +59,30 @@
 				 IH_MPUIO_BASE + ((nr) & 0x0f) : \
 				 IH_GPIO_BASE + (nr))
 
-extern int omap_gpio_init(void);	/* Call from board init only */
-extern void omap2_gpio_prepare_for_idle(int power_state);
+#define METHOD_MPUIO		0
+#define METHOD_GPIO_1510	1
+#define METHOD_GPIO_1610	2
+#define METHOD_GPIO_7XX		3
+#define METHOD_GPIO_24XX	5
+#define METHOD_GPIO_44XX	6
+
+struct omap_gpio_dev_attr {
+	int bank_width;		/* GPIO bank width */
+	bool dbck_flag;		/* dbck required or not - True for OMAP3&4 */
+};
+
+struct omap_gpio_platform_data {
+	u16 virtual_irq_start;
+	int bank_type;
+	int bank_width;		/* GPIO bank width */
+	int bank_stride;	/* Only needed for omap1 MPUIO */
+	bool dbck_flag;		/* dbck required or not - True for OMAP3&4 */
+};
+
+/* TODO: Analyze removing gpio_bank_count usage from driver code */
+extern int gpio_bank_count;
+
+extern void omap2_gpio_prepare_for_idle(int off_mode);
 extern void omap2_gpio_resume_after_idle(void);
 extern void omap_set_gpio_debounce(int gpio, int enable);
 extern void omap_set_gpio_debounce_time(int gpio, int enable);
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 9fd99b9..85ded59 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -80,12 +80,12 @@
 #define GPMC_PREFETCH_STATUS_COUNT(val)	(val & 0x00003fff)
 
 /*
- * Note that all values in this struct are in nanoseconds, while
- * the register values are in gpmc_fck cycles.
+ * Note that all values in this struct are in nanoseconds except sync_clk
+ * (which is in picoseconds), while the register values are in gpmc_fck cycles.
  */
 struct gpmc_timings {
-	/* Minimum clock period for synchronous mode */
-	u16 sync_clk;
+	/* Minimum clock period for synchronous mode (in picoseconds) */
+	u32 sync_clk;
 
 	/* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
 	u16 cs_on;		/* Assertion time */
@@ -117,6 +117,7 @@
 };
 
 extern unsigned int gpmc_ns_to_ticks(unsigned int time_ns);
+extern unsigned int gpmc_ps_to_ticks(unsigned int time_ps);
 extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
 extern unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns);
 extern unsigned long gpmc_get_fclk_period(void);
diff --git a/arch/arm/plat-omap/include/plat/i2c.h b/arch/arm/plat-omap/include/plat/i2c.h
index 36a0bef..878d632 100644
--- a/arch/arm/plat-omap/include/plat/i2c.h
+++ b/arch/arm/plat-omap/include/plat/i2c.h
@@ -36,6 +36,19 @@
 }
 #endif
 
+/**
+ * i2c_dev_attr - OMAP I2C controller device attributes for omap_hwmod
+ * @fifo_depth: total controller FIFO size (in bytes)
+ * @flags: differences in hardware support capability
+ *
+ * @fifo_depth represents what exists on the hardware, not what is
+ * actually configured at runtime by the device driver.
+ */
+struct omap_i2c_dev_attr {
+	u8	fifo_depth;
+	u8	flags;
+};
+
 void __init omap1_i2c_mux_pins(int bus_id);
 void __init omap2_i2c_mux_pins(int bus_id);
 
diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
index 204865f..ef4106c 100644
--- a/arch/arm/plat-omap/include/plat/io.h
+++ b/arch/arm/plat-omap/include/plat/io.h
@@ -291,8 +291,9 @@
 }
 #endif
 
-extern void omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
-				 struct omap_sdrc_params *sdrc_cs1);
+extern void omap2_init_common_infrastructure(void);
+extern void omap2_init_common_devices(struct omap_sdrc_params *sdrc_cs0,
+				      struct omap_sdrc_params *sdrc_cs1);
 
 #define __arch_ioremap	omap_ioremap
 #define __arch_iounmap	omap_iounmap
diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h
index 33c7d41..69230d6 100644
--- a/arch/arm/plat-omap/include/plat/iommu.h
+++ b/arch/arm/plat-omap/include/plat/iommu.h
@@ -50,6 +50,8 @@
 	int (*isr)(struct iommu *obj);
 
 	void *ctx; /* iommu context: registres saved area */
+	u32 da_start;
+	u32 da_end;
 };
 
 struct cr_regs {
@@ -103,6 +105,8 @@
 	const char *name;
 	const char *clk_name;
 	const int nr_tlb_entries;
+	u32 da_start;
+	u32 da_end;
 };
 
 #if defined(CONFIG_ARCH_OMAP1)
@@ -152,6 +156,7 @@
 extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e);
 extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
 
+extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end);
 extern struct iommu *iommu_get(const char *name);
 extern void iommu_put(struct iommu *obj);
 
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index 65e20a6..2910de9 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -77,7 +77,7 @@
 /*
  * OMAP-1610 specific IRQ numbers for interrupt handler 1
  */
-#define INT_1610_IH2_IRQ	0
+#define INT_1610_IH2_IRQ	INT_1510_IH2_IRQ
 #define INT_1610_IH2_FIQ	2
 #define INT_1610_McBSP2_TX	4
 #define INT_1610_McBSP2_RX	5
diff --git a/arch/arm/plat-omap/include/plat/keypad.h b/arch/arm/plat-omap/include/plat/keypad.h
index 3ae52cc..793ce9d 100644
--- a/arch/arm/plat-omap/include/plat/keypad.h
+++ b/arch/arm/plat-omap/include/plat/keypad.h
@@ -10,16 +10,18 @@
 #ifndef ASMARM_ARCH_KEYPAD_H
 #define ASMARM_ARCH_KEYPAD_H
 
-#warning: Please update the board to use matrix_keypad.h instead
+#ifndef CONFIG_ARCH_OMAP1
+#warning Please update the board to use matrix-keypad driver
+#endif
+#include <linux/input/matrix_keypad.h>
 
 struct omap_kp_platform_data {
 	int rows;
 	int cols;
-	int *keymap;
-	unsigned int keymapsize;
-	unsigned int rep:1;
+	const struct matrix_keymap_data *keymap_data;
+	bool rep;
 	unsigned long delay;
-	unsigned int dbounce:1;
+	bool dbounce;
 	/* specific to OMAP242x*/
 	unsigned int *row_gpios;
 	unsigned int *col_gpios;
@@ -28,18 +30,21 @@
 /* Group (0..3) -- when multiple keys are pressed, only the
  * keys pressed in the same group are considered as pressed. This is
  * in order to workaround certain crappy HW designs that produce ghost
- * keypresses. */
-#define GROUP_0		(0 << 16)
-#define GROUP_1		(1 << 16)
-#define GROUP_2		(2 << 16)
-#define GROUP_3		(3 << 16)
+ * keypresses. Two free bits, not used by neither row/col nor keynum,
+ * must be available for use as group bits. The below GROUP_SHIFT
+ * macro definition is based on some prior knowledge of the
+ * matrix_keypad defined KEY() macro internals.
+ */
+#define GROUP_SHIFT	14
+#define GROUP_0		(0 << GROUP_SHIFT)
+#define GROUP_1		(1 << GROUP_SHIFT)
+#define GROUP_2		(2 << GROUP_SHIFT)
+#define GROUP_3		(3 << GROUP_SHIFT)
 #define GROUP_MASK	GROUP_3
+#if KEY_MAX & GROUP_MASK
+#error Group bits in conflict with keynum bits
+#endif
 
-#define KEY_PERSISTENT		0x00800000
-#define KEYNUM_MASK		0x00EFFFFF
-#define KEY(col, row, val) (((col) << 28) | ((row) << 24) | (val))
-#define PERSISTENT_KEY(col, row) (((col) << 28) | ((row) << 24) | \
-						KEY_PERSISTENT)
 
 #endif
 
diff --git a/arch/arm/plat-omap/include/plat/l4_3xxx.h b/arch/arm/plat-omap/include/plat/l4_3xxx.h
new file mode 100644
index 0000000..5e19493
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/l4_3xxx.h
@@ -0,0 +1,24 @@
+/*
+ * arch/arm/plat-omap/include/mach/l4_3xxx.h - L4 firewall definitions
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_L4_3XXX_H
+#define __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_L4_3XXX_H
+
+/* L4 CORE */
+#define OMAP3_L4_CORE_FW_I2C1_REGION				21
+#define OMAP3_L4_CORE_FW_I2C1_TA_REGION				22
+#define OMAP3_L4_CORE_FW_I2C2_REGION				23
+#define OMAP3_L4_CORE_FW_I2C2_TA_REGION				24
+#define OMAP3_L4_CORE_FW_I2C3_REGION				73
+#define OMAP3_L4_CORE_FW_I2C3_TA_REGION				74
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/mailbox.h b/arch/arm/plat-omap/include/plat/mailbox.h
index 9976565..cc3921e 100644
--- a/arch/arm/plat-omap/include/plat/mailbox.h
+++ b/arch/arm/plat-omap/include/plat/mailbox.h
@@ -46,8 +46,8 @@
 	struct kfifo		fifo;
 	struct work_struct	work;
 	struct tasklet_struct	tasklet;
-	int	(*callback)(void *);
 	struct omap_mbox	*mbox;
+	bool full;
 };
 
 struct omap_mbox {
@@ -57,13 +57,15 @@
 	struct omap_mbox_ops	*ops;
 	struct device		*dev;
 	void			*priv;
+	int			use_count;
+	struct blocking_notifier_head   notifier;
 };
 
 int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg);
 void omap_mbox_init_seq(struct omap_mbox *);
 
-struct omap_mbox *omap_mbox_get(const char *);
-void omap_mbox_put(struct omap_mbox *);
+struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb);
+void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb);
 
 int omap_mbox_register(struct device *parent, struct omap_mbox **);
 int omap_mbox_unregister(void);
diff --git a/arch/arm/plat-omap/include/plat/omap-pm.h b/arch/arm/plat-omap/include/plat/omap-pm.h
index 728fbb9..c0a7520 100644
--- a/arch/arm/plat-omap/include/plat/omap-pm.h
+++ b/arch/arm/plat-omap/include/plat/omap-pm.h
@@ -17,26 +17,7 @@
 #include <linux/device.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
-
-#include "powerdomain.h"
-
-/**
- * struct omap_opp - clock frequency-to-OPP ID table for DSP, MPU
- * @rate: target clock rate
- * @opp_id: OPP ID
- * @min_vdd: minimum VDD1 voltage (in millivolts) for this OPP
- *
- * Operating performance point data.  Can vary by OMAP chip and board.
- */
-struct omap_opp {
-	unsigned long rate;
-	u8 opp_id;
-	u16 min_vdd;
-};
-
-extern struct omap_opp *mpu_opps;
-extern struct omap_opp *dsp_opps;
-extern struct omap_opp *l3_opps;
+#include <linux/opp.h>
 
 /*
  * agent_id values for use with omap_pm_set_min_bus_tput():
@@ -59,9 +40,11 @@
  * framework starts.  The "_if_" is to avoid name collisions with the
  * PM idle-loop code.
  */
-int __init omap_pm_if_early_init(struct omap_opp *mpu_opp_table,
-				 struct omap_opp *dsp_opp_table,
-				 struct omap_opp *l3_opp_table);
+#ifdef CONFIG_OMAP_PM_NONE
+#define omap_pm_if_early_init() 0
+#else
+int __init omap_pm_if_early_init(void);
+#endif
 
 /**
  * omap_pm_if_init - OMAP PM init code called after clock fw init
@@ -69,7 +52,11 @@
  * The main initialization code.  OPP tables are passed in here.  The
  * "_if_" is to avoid name collisions with the PM idle-loop code.
  */
+#ifdef CONFIG_OMAP_PM_NONE
+#define omap_pm_if_init() 0
+#else
 int __init omap_pm_if_init(void);
+#endif
 
 /**
  * omap_pm_if_exit - OMAP PM exit code
@@ -363,9 +350,11 @@
  * driver must restore device context.   If the number of context losses
  * exceeds the maximum positive integer, the function will wrap to 0 and
  * continue counting.  Returns the number of context losses for this device,
- * or -EINVAL upon error.
+ * or zero upon error.
  */
-int omap_pm_get_dev_context_loss_count(struct device *dev);
+u32 omap_pm_get_dev_context_loss_count(struct device *dev);
 
+void omap_pm_enable_off_mode(void);
+void omap_pm_disable_off_mode(void);
 
 #endif
diff --git a/arch/arm/plat-omap/include/plat/omap-serial.h b/arch/arm/plat-omap/include/plat/omap-serial.h
index c8dae02..2682043 100644
--- a/arch/arm/plat-omap/include/plat/omap-serial.h
+++ b/arch/arm/plat-omap/include/plat/omap-serial.h
@@ -22,7 +22,7 @@
 
 #include <plat/mux.h>
 
-#define DRIVER_NAME	"omap-hsuart"
+#define DRIVER_NAME	"omap_uart"
 
 /*
  * Use tty device name as ttyO, [O -> OMAP]
@@ -31,20 +31,8 @@
  */
 #define OMAP_SERIAL_NAME	"ttyO"
 
-#define OMAP_MDR1_DISABLE	0x07
-#define OMAP_MDR1_MODE13X	0x03
-#define OMAP_MDR1_MODE16X	0x00
 #define OMAP_MODE13X_SPEED	230400
 
-/*
- * LCR = 0XBF: Switch to Configuration Mode B.
- * In configuration mode b allow access
- * to EFR,DLL,DLH.
- * Reference OMAP TRM Chapter 17
- * Section: 1.4.3 Mode Selection
- */
-#define OMAP_UART_LCR_CONF_MDB	0XBF
-
 /* WER = 0x7F
  * Enable module level wakeup in WER reg
  */
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index 8b3f12f..ea2b8a6 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -52,5 +52,10 @@
 #define OMAP4_MMU1_BASE			0x55082000
 #define OMAP4_MMU2_BASE			0x4A066000
 
+#define OMAP44XX_USBTLL_BASE		(L4_44XX_BASE + 0x62000)
+#define OMAP44XX_UHH_CONFIG_BASE	(L4_44XX_BASE + 0x64000)
+#define OMAP44XX_HSUSB_OHCI_BASE	(L4_44XX_BASE + 0x64800)
+#define OMAP44XX_HSUSB_EHCI_BASE	(L4_44XX_BASE + 0x64C00)
+
 #endif /* __ASM_ARCH_OMAP44XX_H */
 
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index 28e2d1a..e4c349f 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -107,6 +107,7 @@
 int omap_device_align_pm_lat(struct platform_device *pdev,
 			     u32 new_wakeup_lat_limit);
 struct powerdomain *omap_device_get_pwrdm(struct omap_device *od);
+u32 omap_device_get_context_loss_count(struct platform_device *pdev);
 
 /* Other */
 
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 7eaa8ed..1eee85a 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -23,7 +23,7 @@
  * - add pinmuxing
  * - init_conn_id_bit (CONNID_BIT_VECTOR)
  * - implement default hwmod SMS/SDRC flags?
- * - remove unused fields
+ * - move Linux-specific data ("non-ROM data") out
  *
  */
 #ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_OMAP_HWMOD_H
@@ -32,8 +32,9 @@
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/ioport.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
 #include <plat/cpu.h>
+#include <plat/voltage.h>
 
 struct omap_device;
 
@@ -76,6 +77,20 @@
 #define HWMOD_IDLEMODE_FORCE		(1 << 0)
 #define HWMOD_IDLEMODE_NO		(1 << 1)
 #define HWMOD_IDLEMODE_SMART		(1 << 2)
+/* Slave idle mode flag only */
+#define HWMOD_IDLEMODE_SMART_WKUP	(1 << 3)
+
+/**
+ * struct omap_hwmod_mux_info - hwmod specific mux configuration
+ * @pads:              array of omap_device_pad entries
+ * @nr_pads:           number of omap_device_pad entries
+ *
+ * Note that this is currently built during init as needed.
+ */
+struct omap_hwmod_mux_info {
+	int				nr_pads;
+	struct omap_device_pad		*pads;
+};
 
 /**
  * struct omap_hwmod_irq_info - MPU IRQs used by the hwmod
@@ -159,7 +174,7 @@
  * ADDR_MAP_ON_INIT: Map this address space during omap_hwmod init.
  * ADDR_TYPE_RT: Address space contains module register target data.
  */
-#define ADDR_MAP_ON_INIT	(1 << 0)
+#define ADDR_MAP_ON_INIT	(1 << 0)	/* XXX does not belong */
 #define ADDR_TYPE_RT		(1 << 1)
 
 /**
@@ -200,8 +215,6 @@
  * @fw: interface firewall data
  * @addr_cnt: ARRAY_SIZE(@addr)
  * @width: OCP data width
- * @thread_cnt: number of threads
- * @max_burst_len: maximum burst length in @width sized words (0 if unlimited)
  * @user: initiators using this interface (see OCP_USER_* macros above)
  * @flags: OCP interface flags (see OCPIF_* macros above)
  *
@@ -221,8 +234,6 @@
 	}				fw;
 	u8				addr_cnt;
 	u8				width;
-	u8				thread_cnt;
-	u8				max_burst_len;
 	u8				user;
 	u8				flags;
 };
@@ -231,11 +242,12 @@
 /* Macros for use in struct omap_hwmod_sysconfig */
 
 /* Flags for use in omap_hwmod_sysconfig.idlemodes */
-#define MASTER_STANDBY_SHIFT	2
+#define MASTER_STANDBY_SHIFT	4
 #define SLAVE_IDLE_SHIFT	0
 #define SIDLE_FORCE		(HWMOD_IDLEMODE_FORCE << SLAVE_IDLE_SHIFT)
 #define SIDLE_NO		(HWMOD_IDLEMODE_NO << SLAVE_IDLE_SHIFT)
 #define SIDLE_SMART		(HWMOD_IDLEMODE_SMART << SLAVE_IDLE_SHIFT)
+#define SIDLE_SMART_WKUP	(HWMOD_IDLEMODE_SMART_WKUP << SLAVE_IDLE_SHIFT)
 #define MSTANDBY_FORCE		(HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT)
 #define MSTANDBY_NO		(HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT)
 #define MSTANDBY_SMART		(HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT)
@@ -339,7 +351,7 @@
 /**
  * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
  * @clkctrl_reg: PRCM address of the clock control register
- * @rstctrl_reg: adress of the XXX_RSTCTRL register located in the PRM
+ * @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
  * @submodule_wkdep_bit: bit shift of the WKDEP range
  */
 struct omap_hwmod_omap4_prcm {
@@ -357,14 +369,14 @@
  * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
  *     of standby, rather than relying on module smart-standby
  * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
- *     SDRAM controller, etc.
+ *     SDRAM controller, etc. XXX probably belongs outside the main hwmod file
  * HWMOD_INIT_NO_IDLE: don't idle this module at boot - important for SDRAM
- *     controller, etc.
+ *     controller, etc. XXX probably belongs outside the main hwmod file
  * HWMOD_NO_AUTOIDLE: disable module autoidle (OCP_SYSCONFIG.AUTOIDLE)
  *     when module is enabled, rather than the default, which is to
  *     enable autoidle
  * HWMOD_SET_DEFAULT_CLOCKACT: program CLOCKACTIVITY bits at startup
- * HWMOD_NO_IDLEST : this module does not have idle status - this is the case
+ * HWMOD_NO_IDLEST: this module does not have idle status - this is the case
  *     only for few initiator modules on OMAP2 & 3.
  * HWMOD_CONTROL_OPT_CLKS_IN_RESET: Enable all optional clocks during reset.
  *     This is needed for devices like DSS that require optional clocks enabled
@@ -415,14 +427,31 @@
  * @name: name of the hwmod_class
  * @sysc: device SYSCONFIG/SYSSTATUS register data
  * @rev: revision of the IP class
+ * @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown
+ * @reset: ptr to fn to be executed in place of the standard hwmod reset fn
  *
  * Represent the class of a OMAP hardware "modules" (e.g. timer,
  * smartreflex, gpio, uart...)
+ *
+ * @pre_shutdown is a function that will be run immediately before
+ * hwmod clocks are disabled, etc.  It is intended for use for hwmods
+ * like the MPU watchdog, which cannot be disabled with the standard
+ * omap_hwmod_shutdown().  The function should return 0 upon success,
+ * or some negative error upon failure.  Returning an error will cause
+ * omap_hwmod_shutdown() to abort the device shutdown and return an
+ * error.
+ *
+ * If @reset is defined, then the function it points to will be
+ * executed in place of the standard hwmod _reset() code in
+ * mach-omap2/omap_hwmod.c.  This is needed for IP blocks which have
+ * unusual reset sequences - usually processor IP blocks like the IVA.
  */
 struct omap_hwmod_class {
 	const char				*name;
 	struct omap_hwmod_class_sysconfig	*sysc;
 	u32					rev;
+	int					(*pre_shutdown)(struct omap_hwmod *oh);
+	int					(*reset)(struct omap_hwmod *oh);
 };
 
 /**
@@ -436,14 +465,14 @@
  * @main_clk: main clock: OMAP clock name
  * @_clk: pointer to the main struct clk (filled in at runtime)
  * @opt_clks: other device clocks that drivers can request (0..*)
+ * @vdd_name: voltage domain name
+ * @voltdm: pointer to voltage domain (filled in at runtime)
  * @masters: ptr to array of OCP ifs that this hwmod can initiate on
  * @slaves: ptr to array of OCP ifs that this hwmod can respond on
  * @dev_attr: arbitrary device attributes that can be passed to the driver
  * @_sysc_cache: internal-use hwmod flags
  * @_mpu_rt_va: cached register target start address (internal use)
  * @_mpu_port_index: cached MPU register target slave ID (internal use)
- * @msuspendmux_reg_id: CONTROL_MSUSPENDMUX register ID (1-6)
- * @msuspendmux_shift: CONTROL_MSUSPENDMUX register bit shift
  * @mpu_irqs_cnt: number of @mpu_irqs
  * @sdma_reqs_cnt: number of @sdma_reqs
  * @opt_clks_cnt: number of @opt_clks
@@ -452,9 +481,10 @@
  * @response_lat: device OCP response latency (in interface clock cycles)
  * @_int_flags: internal-use hwmod flags
  * @_state: internal-use hwmod state
+ * @_postsetup_state: internal-use state to leave the hwmod in after _setup()
  * @flags: hwmod flags (documented below)
  * @omap_chip: OMAP chips this hwmod is present on
- * @_mutex: mutex serializing operations on this hwmod
+ * @_lock: spinlock serializing operations on this hwmod
  * @node: list node for hwmod list (internal use)
  *
  * @main_clk refers to this module's "main clock," which for our
@@ -469,6 +499,7 @@
 	const char			*name;
 	struct omap_hwmod_class		*class;
 	struct omap_device		*od;
+	struct omap_hwmod_mux_info	*mux;
 	struct omap_hwmod_irq_info	*mpu_irqs;
 	struct omap_hwmod_dma_info	*sdma_reqs;
 	struct omap_hwmod_rst_info	*rst_lines;
@@ -479,17 +510,17 @@
 	const char			*main_clk;
 	struct clk			*_clk;
 	struct omap_hwmod_opt_clk	*opt_clks;
+	char				*vdd_name;
+	struct voltagedomain		*voltdm;
 	struct omap_hwmod_ocp_if	**masters; /* connect to *_IA */
 	struct omap_hwmod_ocp_if	**slaves;  /* connect to *_TA */
 	void				*dev_attr;
 	u32				_sysc_cache;
 	void __iomem			*_mpu_rt_va;
-	struct mutex			_mutex;
+	spinlock_t			_lock;
 	struct list_head		node;
 	u16				flags;
 	u8				_mpu_port_index;
-	u8				msuspendmux_reg_id;
-	u8				msuspendmux_shift;
 	u8				response_lat;
 	u8				mpu_irqs_cnt;
 	u8				sdma_reqs_cnt;
@@ -500,16 +531,15 @@
 	u8				hwmods_cnt;
 	u8				_int_flags;
 	u8				_state;
+	u8				_postsetup_state;
 	const struct omap_chip_id	omap_chip;
 };
 
 int omap_hwmod_init(struct omap_hwmod **ohs);
-int omap_hwmod_register(struct omap_hwmod *oh);
-int omap_hwmod_unregister(struct omap_hwmod *oh);
 struct omap_hwmod *omap_hwmod_lookup(const char *name);
 int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
 			void *data);
-int omap_hwmod_late_init(u8 skip_setup_idle);
+int omap_hwmod_late_init(void);
 
 int omap_hwmod_enable(struct omap_hwmod *oh);
 int _omap_hwmod_enable(struct omap_hwmod *oh);
@@ -556,6 +586,9 @@
 					   void *user),
 				 void *user);
 
+int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state);
+u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
+
 /*
  * Chip variant-specific hwmod init routines - XXX should be converted
  * to use initcalls once the initial boot ordering is straightened out
diff --git a/arch/arm/plat-omap/include/plat/panel-generic-dpi.h b/arch/arm/plat-omap/include/plat/panel-generic-dpi.h
new file mode 100644
index 0000000..7906197
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/panel-generic-dpi.h
@@ -0,0 +1,37 @@
+/*
+ * Header for generic DPI panel driver
+ *
+ * Copyright (C) 2010 Canonical Ltd.
+ * Author: Bryan Wu <bryan.wu@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H
+#define __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H
+
+#include "display.h"
+
+/**
+ * struct panel_generic_dpi_data - panel driver configuration data
+ * @name: panel name
+ * @platform_enable: platform specific panel enable function
+ * @platform_disable: platform specific panel disable function
+ */
+struct panel_generic_dpi_data {
+	const char *name;
+	int (*platform_enable)(struct omap_dss_device *dssdev);
+	void (*platform_disable)(struct omap_dss_device *dssdev);
+};
+
+#endif /* __ARCH_ARM_PLAT_OMAP_PANEL_GENERIC_DPI_H */
diff --git a/arch/arm/plat-omap/include/plat/powerdomain.h b/arch/arm/plat-omap/include/plat/powerdomain.h
deleted file mode 100644
index 9ca420d..0000000
--- a/arch/arm/plat-omap/include/plat/powerdomain.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * OMAP2/3 powerdomain control
- *
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2009 Nokia Corporation
- *
- * Written by Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ASM_ARM_ARCH_OMAP_POWERDOMAIN
-#define ASM_ARM_ARCH_OMAP_POWERDOMAIN
-
-#include <linux/types.h>
-#include <linux/list.h>
-
-#include <asm/atomic.h>
-
-#include <plat/cpu.h>
-
-
-/* Powerdomain basic power states */
-#define PWRDM_POWER_OFF		0x0
-#define PWRDM_POWER_RET		0x1
-#define PWRDM_POWER_INACTIVE	0x2
-#define PWRDM_POWER_ON		0x3
-
-#define PWRDM_MAX_PWRSTS	4
-
-/* Powerdomain allowable state bitfields */
-#define PWRSTS_ON		(1 << PWRDM_POWER_ON)
-#define PWRSTS_OFF		(1 << PWRDM_POWER_OFF)
-#define PWRSTS_OFF_ON		((1 << PWRDM_POWER_OFF) | \
-				 (1 << PWRDM_POWER_ON))
-
-#define PWRSTS_OFF_RET		((1 << PWRDM_POWER_OFF) | \
-				 (1 << PWRDM_POWER_RET))
-
-#define PWRSTS_RET_ON		((1 << PWRDM_POWER_RET) | \
-				 (1 << PWRDM_POWER_ON))
-
-#define PWRSTS_OFF_RET_ON	(PWRSTS_OFF_RET | (1 << PWRDM_POWER_ON))
-
-
-/* Powerdomain flags */
-#define PWRDM_HAS_HDWR_SAR	(1 << 0) /* hardware save-and-restore support */
-#define PWRDM_HAS_MPU_QUIRK	(1 << 1) /* MPU pwr domain has MEM bank 0 bits
-					  * in MEM bank 1 position. This is
-					  * true for OMAP3430
-					  */
-#define PWRDM_HAS_LOWPOWERSTATECHANGE	(1 << 2) /*
-						  * support to transition from a
-						  * sleep state to a lower sleep
-						  * state without waking up the
-						  * powerdomain
-						  */
-
-/*
- * Number of memory banks that are power-controllable.	On OMAP4430, the
- * maximum is 5.
- */
-#define PWRDM_MAX_MEM_BANKS	5
-
-/*
- * Maximum number of clockdomains that can be associated with a powerdomain.
- * CORE powerdomain on OMAP4 is the worst case
- */
-#define PWRDM_MAX_CLKDMS	9
-
-/* XXX A completely arbitrary number. What is reasonable here? */
-#define PWRDM_TRANSITION_BAILOUT 100000
-
-struct clockdomain;
-struct powerdomain;
-
-/**
- * struct powerdomain - OMAP powerdomain
- * @name: Powerdomain name
- * @omap_chip: represents the OMAP chip types containing this pwrdm
- * @prcm_offs: the address offset from CM_BASE/PRM_BASE
- * @pwrsts: Possible powerdomain power states
- * @pwrsts_logic_ret: Possible logic power states when pwrdm in RETENTION
- * @flags: Powerdomain flags
- * @banks: Number of software-controllable memory banks in this powerdomain
- * @pwrsts_mem_ret: Possible memory bank pwrstates when pwrdm in RETENTION
- * @pwrsts_mem_on: Possible memory bank pwrstates when pwrdm in ON
- * @pwrdm_clkdms: Clockdomains in this powerdomain
- * @node: list_head linking all powerdomains
- * @state:
- * @state_counter:
- * @timer:
- * @state_timer:
- */
-struct powerdomain {
-	const char *name;
-	const struct omap_chip_id omap_chip;
-	const s16 prcm_offs;
-	const u8 pwrsts;
-	const u8 pwrsts_logic_ret;
-	const u8 flags;
-	const u8 banks;
-	const u8 pwrsts_mem_ret[PWRDM_MAX_MEM_BANKS];
-	const u8 pwrsts_mem_on[PWRDM_MAX_MEM_BANKS];
-	struct clockdomain *pwrdm_clkdms[PWRDM_MAX_CLKDMS];
-	struct list_head node;
-	int state;
-	unsigned state_counter[PWRDM_MAX_PWRSTS];
-	unsigned ret_logic_off_counter;
-	unsigned ret_mem_off_counter[PWRDM_MAX_MEM_BANKS];
-
-#ifdef CONFIG_PM_DEBUG
-	s64 timer;
-	s64 state_timer[PWRDM_MAX_PWRSTS];
-#endif
-};
-
-
-void pwrdm_init(struct powerdomain **pwrdm_list);
-
-struct powerdomain *pwrdm_lookup(const char *name);
-
-int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
-			void *user);
-int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
-			void *user);
-
-int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
-int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
-int pwrdm_for_each_clkdm(struct powerdomain *pwrdm,
-			 int (*fn)(struct powerdomain *pwrdm,
-				   struct clockdomain *clkdm));
-
-int pwrdm_get_mem_bank_count(struct powerdomain *pwrdm);
-
-int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst);
-int pwrdm_read_next_pwrst(struct powerdomain *pwrdm);
-int pwrdm_read_pwrst(struct powerdomain *pwrdm);
-int pwrdm_read_prev_pwrst(struct powerdomain *pwrdm);
-int pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm);
-
-int pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst);
-int pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst);
-int pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst);
-
-int pwrdm_read_logic_pwrst(struct powerdomain *pwrdm);
-int pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm);
-int pwrdm_read_logic_retst(struct powerdomain *pwrdm);
-int pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank);
-int pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank);
-int pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank);
-
-int pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm);
-int pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm);
-bool pwrdm_has_hdwr_sar(struct powerdomain *pwrdm);
-
-int pwrdm_wait_transition(struct powerdomain *pwrdm);
-
-int pwrdm_state_switch(struct powerdomain *pwrdm);
-int pwrdm_clkdm_state_switch(struct clockdomain *clkdm);
-int pwrdm_pre_transition(void);
-int pwrdm_post_transition(void);
-int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);
-
-#endif
diff --git a/arch/arm/plat-omap/include/plat/prcm.h b/arch/arm/plat-omap/include/plat/prcm.h
index ab77442..2fdf8c8 100644
--- a/arch/arm/plat-omap/include/plat/prcm.h
+++ b/arch/arm/plat-omap/include/plat/prcm.h
@@ -18,6 +18,10 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * XXX This file is deprecated.  The PRCM is an OMAP2+-only subsystem,
+ * so this file doesn't belong in plat-omap/include/plat.  Please
+ * do not add anything new to this file.
  */
 
 #ifndef __ASM_ARM_ARCH_OMAP_PRCM_H
@@ -28,22 +32,6 @@
 int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, u8 idlest,
 			 const char *name);
 
-#define START_PADCONF_SAVE 0x2
-#define PADCONF_SAVE_DONE  0x1
-
-void omap3_prcm_save_context(void);
-void omap3_prcm_restore_context(void);
-
-u32 prm_read_mod_reg(s16 module, u16 idx);
-void prm_write_mod_reg(u32 val, s16 module, u16 idx);
-u32 prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
-u32 prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask);
-u32 omap4_prm_read_bits_shift(void __iomem *reg, u32 mask);
-u32 omap4_prm_rmw_reg_bits(u32 mask, u32 bits, void __iomem *reg);
-u32 cm_read_mod_reg(s16 module, u16 idx);
-void cm_write_mod_reg(u32 val, s16 module, u16 idx);
-u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx);
-
 #endif
 
 
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 19145f5..cec5d56 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -93,9 +93,12 @@
 			})
 
 #ifndef __ASSEMBLER__
+
+struct omap_board_data;
+
 extern void __init omap_serial_early_init(void);
 extern void omap_serial_init(void);
-extern void omap_serial_init_port(int port);
+extern void omap_serial_init_port(struct omap_board_data *bdata);
 extern int omap_uart_can_sleep(void);
 extern void omap_uart_check_wakeup(void);
 extern void omap_uart_prepare_suspend(void);
diff --git a/arch/arm/plat-omap/include/plat/smartreflex.h b/arch/arm/plat-omap/include/plat/smartreflex.h
new file mode 100644
index 0000000..6568c88
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/smartreflex.h
@@ -0,0 +1,245 @@
+/*
+ * OMAP Smartreflex Defines and Routines
+ *
+ * Author: Thara Gopinath	<thara@ti.com>
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_OMAP_SMARTREFLEX_H
+#define __ASM_ARM_OMAP_SMARTREFLEX_H
+
+#include <linux/platform_device.h>
+#include <plat/voltage.h>
+
+/*
+ * Different Smartreflex IPs version. The v1 is the 65nm version used in
+ * OMAP3430. The v2 is the update for the 45nm version of the IP
+ * used in OMAP3630 and OMAP4430
+ */
+#define SR_TYPE_V1	1
+#define SR_TYPE_V2	2
+
+/* SMART REFLEX REG ADDRESS OFFSET */
+#define SRCONFIG		0x00
+#define SRSTATUS		0x04
+#define SENVAL			0x08
+#define SENMIN			0x0C
+#define SENMAX			0x10
+#define SENAVG			0x14
+#define AVGWEIGHT		0x18
+#define NVALUERECIPROCAL	0x1c
+#define SENERROR_V1		0x20
+#define ERRCONFIG_V1		0x24
+#define IRQ_EOI			0x20
+#define IRQSTATUS_RAW		0x24
+#define IRQSTATUS		0x28
+#define IRQENABLE_SET		0x2C
+#define IRQENABLE_CLR		0x30
+#define SENERROR_V2		0x34
+#define ERRCONFIG_V2		0x38
+
+/* Bit/Shift Positions */
+
+/* SRCONFIG */
+#define SRCONFIG_ACCUMDATA_SHIFT	22
+#define SRCONFIG_SRCLKLENGTH_SHIFT	12
+#define SRCONFIG_SENNENABLE_V1_SHIFT	5
+#define SRCONFIG_SENPENABLE_V1_SHIFT	3
+#define SRCONFIG_SENNENABLE_V2_SHIFT	1
+#define SRCONFIG_SENPENABLE_V2_SHIFT	0
+#define SRCONFIG_CLKCTRL_SHIFT		0
+
+#define SRCONFIG_ACCUMDATA_MASK		(0x3ff << 22)
+
+#define SRCONFIG_SRENABLE		BIT(11)
+#define SRCONFIG_SENENABLE		BIT(10)
+#define SRCONFIG_ERRGEN_EN		BIT(9)
+#define SRCONFIG_MINMAXAVG_EN		BIT(8)
+#define SRCONFIG_DELAYCTRL		BIT(2)
+
+/* AVGWEIGHT */
+#define AVGWEIGHT_SENPAVGWEIGHT_SHIFT	2
+#define AVGWEIGHT_SENNAVGWEIGHT_SHIFT	0
+
+/* NVALUERECIPROCAL */
+#define NVALUERECIPROCAL_SENPGAIN_SHIFT	20
+#define NVALUERECIPROCAL_SENNGAIN_SHIFT	16
+#define NVALUERECIPROCAL_RNSENP_SHIFT	8
+#define NVALUERECIPROCAL_RNSENN_SHIFT	0
+
+/* ERRCONFIG */
+#define ERRCONFIG_ERRWEIGHT_SHIFT	16
+#define ERRCONFIG_ERRMAXLIMIT_SHIFT	8
+#define ERRCONFIG_ERRMINLIMIT_SHIFT	0
+
+#define SR_ERRWEIGHT_MASK		(0x07 << 16)
+#define SR_ERRMAXLIMIT_MASK		(0xff << 8)
+#define SR_ERRMINLIMIT_MASK		(0xff << 0)
+
+#define ERRCONFIG_VPBOUNDINTEN_V1	BIT(31)
+#define ERRCONFIG_VPBOUNDINTST_V1	BIT(30)
+#define	ERRCONFIG_MCUACCUMINTEN		BIT(29)
+#define ERRCONFIG_MCUACCUMINTST		BIT(28)
+#define	ERRCONFIG_MCUVALIDINTEN		BIT(27)
+#define ERRCONFIG_MCUVALIDINTST		BIT(26)
+#define ERRCONFIG_MCUBOUNDINTEN		BIT(25)
+#define	ERRCONFIG_MCUBOUNDINTST		BIT(24)
+#define	ERRCONFIG_MCUDISACKINTEN	BIT(23)
+#define ERRCONFIG_VPBOUNDINTST_V2	BIT(23)
+#define ERRCONFIG_MCUDISACKINTST	BIT(22)
+#define ERRCONFIG_VPBOUNDINTEN_V2	BIT(22)
+
+#define ERRCONFIG_STATUS_V1_MASK	(ERRCONFIG_VPBOUNDINTST_V1 | \
+					ERRCONFIG_MCUACCUMINTST | \
+					ERRCONFIG_MCUVALIDINTST | \
+					ERRCONFIG_MCUBOUNDINTST | \
+					ERRCONFIG_MCUDISACKINTST)
+/* IRQSTATUS */
+#define IRQSTATUS_MCUACCUMINT		BIT(3)
+#define IRQSTATUS_MCVALIDINT		BIT(2)
+#define IRQSTATUS_MCBOUNDSINT		BIT(1)
+#define IRQSTATUS_MCUDISABLEACKINT	BIT(0)
+
+/* IRQENABLE_SET and IRQENABLE_CLEAR */
+#define IRQENABLE_MCUACCUMINT		BIT(3)
+#define IRQENABLE_MCUVALIDINT		BIT(2)
+#define IRQENABLE_MCUBOUNDSINT		BIT(1)
+#define IRQENABLE_MCUDISABLEACKINT	BIT(0)
+
+/* Common Bit values */
+
+#define SRCLKLENGTH_12MHZ_SYSCLK	0x3c
+#define SRCLKLENGTH_13MHZ_SYSCLK	0x41
+#define SRCLKLENGTH_19MHZ_SYSCLK	0x60
+#define SRCLKLENGTH_26MHZ_SYSCLK	0x82
+#define SRCLKLENGTH_38MHZ_SYSCLK	0xC0
+
+/*
+ * 3430 specific values. Maybe these should be passed from board file or
+ * pmic structures.
+ */
+#define OMAP3430_SR_ACCUMDATA		0x1f4
+
+#define OMAP3430_SR1_SENPAVGWEIGHT	0x03
+#define OMAP3430_SR1_SENNAVGWEIGHT	0x03
+
+#define OMAP3430_SR2_SENPAVGWEIGHT	0x01
+#define OMAP3430_SR2_SENNAVGWEIGHT	0x01
+
+#define OMAP3430_SR_ERRWEIGHT		0x04
+#define OMAP3430_SR_ERRMAXLIMIT		0x02
+
+/**
+ * struct omap_sr_pmic_data - Strucutre to be populated by pmic code to pass
+ *				pmic specific info to smartreflex driver
+ *
+ * @sr_pmic_init:	API to initialize smartreflex on the PMIC side.
+ */
+struct omap_sr_pmic_data {
+	void (*sr_pmic_init) (void);
+};
+
+#ifdef CONFIG_OMAP_SMARTREFLEX
+/*
+ * The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
+ * The smartreflex class driver should pass the class type.
+ * Should be used to populate the class_type field of the
+ * omap_smartreflex_class_data structure.
+ */
+#define SR_CLASS1	0x1
+#define SR_CLASS2	0x2
+#define SR_CLASS3	0x3
+
+/**
+ * struct omap_sr_class_data - Smartreflex class driver info
+ *
+ * @enable:		API to enable a particular class smaartreflex.
+ * @disable:		API to disable a particular class smartreflex.
+ * @configure:		API to configure a particular class smartreflex.
+ * @notify:		API to notify the class driver about an event in SR.
+ *			Not needed for class3.
+ * @notify_flags:	specify the events to be notified to the class driver
+ * @class_type:		specify which smartreflex class.
+ *			Can be used by the SR driver to take any class
+ *			based decisions.
+ */
+struct omap_sr_class_data {
+	int (*enable)(struct voltagedomain *voltdm);
+	int (*disable)(struct voltagedomain *voltdm, int is_volt_reset);
+	int (*configure)(struct voltagedomain *voltdm);
+	int (*notify)(struct voltagedomain *voltdm, u32 status);
+	u8 notify_flags;
+	u8 class_type;
+};
+
+/**
+ * struct omap_sr_nvalue_table	- Smartreflex n-target value info
+ *
+ * @efuse_offs:	The offset of the efuse where n-target values are stored.
+ * @nvalue:	The n-target value.
+ */
+struct omap_sr_nvalue_table {
+	u32 efuse_offs;
+	u32 nvalue;
+};
+
+/**
+ * struct omap_sr_data - Smartreflex platform data.
+ *
+ * @ip_type:		Smartreflex IP type.
+ * @senp_mod:		SENPENABLE value for the sr
+ * @senn_mod:		SENNENABLE value for sr
+ * @nvalue_count:	Number of distinct nvalues in the nvalue table
+ * @enable_on_init:	whether this sr module needs to enabled at
+ *			boot up or not.
+ * @nvalue_table:	table containing the  efuse offsets and nvalues
+ *			corresponding to them.
+ * @voltdm:		Pointer to the voltage domain associated with the SR
+ */
+struct omap_sr_data {
+	int				ip_type;
+	u32				senp_mod;
+	u32				senn_mod;
+	int				nvalue_count;
+	bool				enable_on_init;
+	struct omap_sr_nvalue_table	*nvalue_table;
+	struct voltagedomain		*voltdm;
+};
+
+/* Smartreflex module enable/disable interface */
+void omap_sr_enable(struct voltagedomain *voltdm);
+void omap_sr_disable(struct voltagedomain *voltdm);
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
+
+/* API to register the pmic specific data with the smartreflex driver. */
+void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
+
+/* Smartreflex driver hooks to be called from Smartreflex class driver */
+int sr_enable(struct voltagedomain *voltdm, unsigned long volt);
+void sr_disable(struct voltagedomain *voltdm);
+int sr_configure_errgen(struct voltagedomain *voltdm);
+int sr_configure_minmax(struct voltagedomain *voltdm);
+
+/* API to register the smartreflex class driver with the smartreflex driver */
+int sr_register_class(struct omap_sr_class_data *class_data);
+#else
+static inline void omap_sr_enable(struct voltagedomain *voltdm) {}
+static inline void omap_sr_disable(struct voltagedomain *voltdm) {}
+static inline void omap_sr_disable_reset_volt(
+		struct voltagedomain *voltdm) {}
+static inline void omap_sr_register_pmic(
+		struct omap_sr_pmic_data *pmic_data) {}
+#endif
+#endif
diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h
index 5905100..9967d5e 100644
--- a/arch/arm/plat-omap/include/plat/sram.h
+++ b/arch/arm/plat-omap/include/plat/sram.h
@@ -11,6 +11,7 @@
 #ifndef __ARCH_ARM_OMAP_SRAM_H
 #define __ARCH_ARM_OMAP_SRAM_H
 
+#ifndef __ASSEMBLY__
 extern void * omap_sram_push(void * start, unsigned long size);
 extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
 
@@ -74,4 +75,14 @@
 static inline void omap_push_sram_idle(void) {}
 #endif /* CONFIG_PM */
 
+#endif /* __ASSEMBLY__ */
+
+/*
+ * OMAP2+: define the SRAM PA addresses.
+ * Used by the SRAM management code and the idle sleep code.
+ */
+#define OMAP2_SRAM_PA		0x40200000
+#define OMAP3_SRAM_PA           0x40200000
+#define OMAP4_SRAM_PA		0x40300000
+
 #endif
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index 9036e37..ad98b85 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -145,8 +145,11 @@
 		/* omap3 based boards using UART3 */
 		DEBUG_LL_OMAP3(3, cm_t35);
 		DEBUG_LL_OMAP3(3, cm_t3517);
+		DEBUG_LL_OMAP3(3, craneboard);
+		DEBUG_LL_OMAP3(3, devkit8000);
 		DEBUG_LL_OMAP3(3, igep0020);
 		DEBUG_LL_OMAP3(3, igep0030);
+		DEBUG_LL_OMAP3(3, nokia_rm680);
 		DEBUG_LL_OMAP3(3, nokia_rx51);
 		DEBUG_LL_OMAP3(3, omap3517evm);
 		DEBUG_LL_OMAP3(3, omap3_beagle);
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 59c7fe7..450a332 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -11,6 +11,7 @@
 	EHCI_HCD_OMAP_MODE_UNKNOWN,
 	EHCI_HCD_OMAP_MODE_PHY,
 	EHCI_HCD_OMAP_MODE_TLL,
+	EHCI_HCD_OMAP_MODE_HSIC,
 };
 
 enum ohci_omap3_port_mode {
@@ -69,6 +70,10 @@
 	u8	mode;
 	u16	power;
 	unsigned extvbus:1;
+	void	(*set_phy_power)(u8 on);
+	void	(*clear_irq)(void);
+	void	(*set_mode)(u8 mode);
+	void	(*reset)(void);
 };
 
 enum musb_interface    {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI};
@@ -79,6 +84,11 @@
 
 extern void usb_ohci_init(const struct ohci_hcd_omap_platform_data *pdata);
 
+extern int omap4430_phy_power(struct device *dev, int ID, int on);
+extern int omap4430_phy_set_clk(struct device *dev, int on);
+extern int omap4430_phy_init(struct device *dev);
+extern int omap4430_phy_exit(struct device *dev);
+
 #endif
 
 
diff --git a/arch/arm/plat-omap/include/plat/voltage.h b/arch/arm/plat-omap/include/plat/voltage.h
new file mode 100644
index 0000000..5bd204e
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/voltage.h
@@ -0,0 +1,155 @@
+/*
+ * OMAP Voltage Management Routines
+ *
+ * Author: Thara Gopinath	<thara@ti.com>
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
+#define __ARCH_ARM_MACH_OMAP2_VOLTAGE_H
+
+#include <linux/err.h>
+
+#define VOLTSCALE_VPFORCEUPDATE		1
+#define VOLTSCALE_VCBYPASS		2
+
+/*
+ * OMAP3 GENERIC setup times. Revisit to see if these needs to be
+ * passed from board or PMIC file
+ */
+#define OMAP3_CLKSETUP		0xff
+#define OMAP3_VOLTOFFSET	0xff
+#define OMAP3_VOLTSETUP2	0xff
+
+/* Voltage value defines */
+#define OMAP3430_VDD_MPU_OPP1_UV		975000
+#define OMAP3430_VDD_MPU_OPP2_UV		1075000
+#define OMAP3430_VDD_MPU_OPP3_UV		1200000
+#define OMAP3430_VDD_MPU_OPP4_UV		1270000
+#define OMAP3430_VDD_MPU_OPP5_UV		1350000
+
+#define OMAP3430_VDD_CORE_OPP1_UV		975000
+#define OMAP3430_VDD_CORE_OPP2_UV		1050000
+#define OMAP3430_VDD_CORE_OPP3_UV		1150000
+
+#define OMAP3630_VDD_MPU_OPP50_UV		1012500
+#define OMAP3630_VDD_MPU_OPP100_UV		1200000
+#define OMAP3630_VDD_MPU_OPP120_UV		1325000
+#define OMAP3630_VDD_MPU_OPP1G_UV		1375000
+
+#define OMAP3630_VDD_CORE_OPP50_UV		1000000
+#define OMAP3630_VDD_CORE_OPP100_UV		1200000
+
+#define OMAP4430_VDD_MPU_OPP50_UV		930000
+#define OMAP4430_VDD_MPU_OPP100_UV		1100000
+#define OMAP4430_VDD_MPU_OPPTURBO_UV		1260000
+#define OMAP4430_VDD_MPU_OPPNITRO_UV		1350000
+
+#define OMAP4430_VDD_IVA_OPP50_UV		930000
+#define OMAP4430_VDD_IVA_OPP100_UV		1100000
+#define OMAP4430_VDD_IVA_OPPTURBO_UV		1260000
+
+#define OMAP4430_VDD_CORE_OPP50_UV		930000
+#define OMAP4430_VDD_CORE_OPP100_UV		1100000
+
+/**
+ * struct voltagedomain - omap voltage domain global structure.
+ * @name:	Name of the voltage domain which can be used as a unique
+ *		identifier.
+ */
+struct voltagedomain {
+	char *name;
+};
+
+/**
+ * struct omap_volt_data - Omap voltage specific data.
+ * @voltage_nominal:	The possible voltage value in uV
+ * @sr_efuse_offs:	The offset of the efuse register(from system
+ *			control module base address) from where to read
+ *			the n-target value for the smartreflex module.
+ * @sr_errminlimit:	Error min limit value for smartreflex. This value
+ *			differs at differnet opp and thus is linked
+ *			with voltage.
+ * @vp_errorgain:	Error gain value for the voltage processor. This
+ *			field also differs according to the voltage/opp.
+ */
+struct omap_volt_data {
+	u32	volt_nominal;
+	u32	sr_efuse_offs;
+	u8	sr_errminlimit;
+	u8	vp_errgain;
+};
+
+/**
+ * struct omap_volt_pmic_info - PMIC specific data required by voltage driver.
+ * @slew_rate:	PMIC slew rate (in uv/us)
+ * @step_size:	PMIC voltage step size (in uv)
+ * @vsel_to_uv:	PMIC API to convert vsel value to actual voltage in uV.
+ * @uv_to_vsel:	PMIC API to convert voltage in uV to vsel value.
+ */
+struct omap_volt_pmic_info {
+	int slew_rate;
+	int step_size;
+	u32 on_volt;
+	u32 onlp_volt;
+	u32 ret_volt;
+	u32 off_volt;
+	u16 volt_setup_time;
+	u8 vp_erroroffset;
+	u8 vp_vstepmin;
+	u8 vp_vstepmax;
+	u8 vp_vddmin;
+	u8 vp_vddmax;
+	u8 vp_timeout_us;
+	u8 i2c_slave_addr;
+	u8 pmic_reg;
+	unsigned long (*vsel_to_uv) (const u8 vsel);
+	u8 (*uv_to_vsel) (unsigned long uV);
+};
+
+unsigned long omap_vp_get_curr_volt(struct voltagedomain *voltdm);
+void omap_vp_enable(struct voltagedomain *voltdm);
+void omap_vp_disable(struct voltagedomain *voltdm);
+int omap_voltage_scale_vdd(struct voltagedomain *voltdm,
+		unsigned long target_volt);
+void omap_voltage_reset(struct voltagedomain *voltdm);
+void omap_voltage_get_volttable(struct voltagedomain *voltdm,
+		struct omap_volt_data **volt_data);
+struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
+		unsigned long volt);
+unsigned long omap_voltage_get_nom_volt(struct voltagedomain *voltdm);
+struct dentry *omap_voltage_get_dbgdir(struct voltagedomain *voltdm);
+#ifdef CONFIG_PM
+int omap_voltage_register_pmic(struct voltagedomain *voltdm,
+		struct omap_volt_pmic_info *pmic_info);
+void omap_change_voltscale_method(struct voltagedomain *voltdm,
+		int voltscale_method);
+/* API to get the voltagedomain pointer */
+struct voltagedomain *omap_voltage_domain_lookup(char *name);
+
+int omap_voltage_late_init(void);
+#else
+static inline int omap_voltage_register_pmic(struct voltagedomain *voltdm,
+		struct omap_volt_pmic_info *pmic_info)
+{
+	return -EINVAL;
+}
+static inline  void omap_change_voltscale_method(struct voltagedomain *voltdm,
+		int voltscale_method) {}
+static inline int omap_voltage_late_init(void)
+{
+	return -EINVAL;
+}
+static inline struct voltagedomain *omap_voltage_domain_lookup(char *name)
+{
+	return ERR_PTR(-EINVAL);
+}
+#endif
+
+#endif
diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c
index b0078cf..f1295fa 100644
--- a/arch/arm/plat-omap/io.c
+++ b/arch/arm/plat-omap/io.c
@@ -136,61 +136,3 @@
 		__iounmap(addr);
 }
 EXPORT_SYMBOL(omap_iounmap);
-
-/*
- * NOTE: Please use ioremap + __raw_read/write where possible instead of these
- */
-
-u8 omap_readb(u32 pa)
-{
-	if (cpu_class_is_omap1())
-		return __raw_readb(OMAP1_IO_ADDRESS(pa));
-	else
-		return __raw_readb(OMAP2_L4_IO_ADDRESS(pa));
-}
-EXPORT_SYMBOL(omap_readb);
-
-u16 omap_readw(u32 pa)
-{
-	if (cpu_class_is_omap1())
-		return __raw_readw(OMAP1_IO_ADDRESS(pa));
-	else
-		return __raw_readw(OMAP2_L4_IO_ADDRESS(pa));
-}
-EXPORT_SYMBOL(omap_readw);
-
-u32 omap_readl(u32 pa)
-{
-	if (cpu_class_is_omap1())
-		return __raw_readl(OMAP1_IO_ADDRESS(pa));
-	else
-		return __raw_readl(OMAP2_L4_IO_ADDRESS(pa));
-}
-EXPORT_SYMBOL(omap_readl);
-
-void omap_writeb(u8 v, u32 pa)
-{
-	if (cpu_class_is_omap1())
-		__raw_writeb(v, OMAP1_IO_ADDRESS(pa));
-	else
-		__raw_writeb(v, OMAP2_L4_IO_ADDRESS(pa));
-}
-EXPORT_SYMBOL(omap_writeb);
-
-void omap_writew(u16 v, u32 pa)
-{
-	if (cpu_class_is_omap1())
-		__raw_writew(v, OMAP1_IO_ADDRESS(pa));
-	else
-		__raw_writew(v, OMAP2_L4_IO_ADDRESS(pa));
-}
-EXPORT_SYMBOL(omap_writew);
-
-void omap_writel(u32 v, u32 pa)
-{
-	if (cpu_class_is_omap1())
-		__raw_writel(v, OMAP1_IO_ADDRESS(pa));
-	else
-		__raw_writel(v, OMAP2_L4_IO_ADDRESS(pa));
-}
-EXPORT_SYMBOL(omap_writel);
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index 6cd151b..b1107c0 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -830,6 +830,28 @@
 }
 
 /**
+ * iommu_set_da_range - Set a valid device address range
+ * @obj:		target iommu
+ * @start		Start of valid range
+ * @end			End of valid range
+ **/
+int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
+{
+
+	if (!obj)
+		return -EFAULT;
+
+	if (end < start || !PAGE_ALIGN(start | end))
+		return -EINVAL;
+
+	obj->da_start = start;
+	obj->da_end = end;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_set_da_range);
+
+/**
  * iommu_get - Get iommu handler
  * @name:	target iommu name
  **/
@@ -922,6 +944,8 @@
 	obj->name = pdata->name;
 	obj->dev = &pdev->dev;
 	obj->ctx = (void *)obj + sizeof(*obj);
+	obj->da_start = pdata->da_start;
+	obj->da_end = pdata->da_end;
 
 	mutex_init(&obj->iommu_lock);
 	mutex_init(&obj->mmap_lock);
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 8ce0de2..6dc1296 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -87,35 +87,43 @@
 }
 #define sgtable_ok(x)	(!!sgtable_len(x))
 
+static unsigned max_alignment(u32 addr)
+{
+	int i;
+	unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
+	for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
+		;
+	return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
+}
+
 /*
  * calculate the optimal number sg elements from total bytes based on
  * iommu superpages
  */
-static unsigned int sgtable_nents(size_t bytes)
+static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
 {
-	int i;
-	unsigned int nr_entries;
-	const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
+	unsigned nr_entries = 0, ent_sz;
 
 	if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
 		pr_err("%s: wrong size %08x\n", __func__, bytes);
 		return 0;
 	}
 
-	nr_entries = 0;
-	for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
-		if (bytes >= pagesize[i]) {
-			nr_entries += (bytes / pagesize[i]);
-			bytes %= pagesize[i];
-		}
+	while (bytes) {
+		ent_sz = max_alignment(da | pa);
+		ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
+		nr_entries++;
+		da += ent_sz;
+		pa += ent_sz;
+		bytes -= ent_sz;
 	}
-	BUG_ON(bytes);
 
 	return nr_entries;
 }
 
 /* allocate and initialize sg_table header(a kind of 'superblock') */
-static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
+static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
+							u32 da, u32 pa)
 {
 	unsigned int nr_entries;
 	int err;
@@ -127,9 +135,8 @@
 	if (!IS_ALIGNED(bytes, PAGE_SIZE))
 		return ERR_PTR(-EINVAL);
 
-	/* FIXME: IOVMF_DA_FIXED should support 'superpages' */
-	if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
-		nr_entries = sgtable_nents(bytes);
+	if (flags & IOVMF_LINEAR) {
+		nr_entries = sgtable_nents(bytes, da, pa);
 		if (!nr_entries)
 			return ERR_PTR(-EINVAL);
 	} else
@@ -273,13 +280,14 @@
 	alignement = PAGE_SIZE;
 
 	if (flags & IOVMF_DA_ANON) {
-		/*
-		 * Reserve the first page for NULL
-		 */
-		start = PAGE_SIZE;
+		start = obj->da_start;
+
 		if (flags & IOVMF_LINEAR)
 			alignement = iopgsz_max(bytes);
 		start = roundup(start, alignement);
+	} else if (start < obj->da_start || start > obj->da_end ||
+					obj->da_end - start < bytes) {
+		return ERR_PTR(-EINVAL);
 	}
 
 	tmp = NULL;
@@ -289,19 +297,19 @@
 	prev_end = 0;
 	list_for_each_entry(tmp, &obj->mmap, list) {
 
-		if (prev_end >= start)
+		if (prev_end > start)
 			break;
 
-		if (start + bytes < tmp->da_start)
+		if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
 			goto found;
 
-		if (flags & IOVMF_DA_ANON)
+		if (tmp->da_end >= start && flags & IOVMF_DA_ANON)
 			start = roundup(tmp->da_end + 1, alignement);
 
 		prev_end = tmp->da_end;
 	}
 
-	if ((start > prev_end) && (ULONG_MAX - start >= bytes))
+	if ((start >= prev_end) && (obj->da_end - start >= bytes))
 		goto found;
 
 	dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
@@ -409,7 +417,8 @@
 	BUG_ON(!sgt);
 }
 
-static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
+static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
+								size_t len)
 {
 	unsigned int i;
 	struct scatterlist *sg;
@@ -418,9 +427,10 @@
 	va = phys_to_virt(pa);
 
 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-		size_t bytes;
+		unsigned bytes;
 
-		bytes = iopgsz_max(len);
+		bytes = max_alignment(da | pa);
+		bytes = min_t(unsigned, bytes, iopgsz_max(len));
 
 		BUG_ON(!iopgsz_ok(bytes));
 
@@ -429,6 +439,7 @@
 		 * 'pa' is cotinuous(linear).
 		 */
 		pa += bytes;
+		da += bytes;
 		len -= bytes;
 	}
 	BUG_ON(len);
@@ -695,18 +706,18 @@
 	if (!va)
 		return -ENOMEM;
 
-	sgt = sgtable_alloc(bytes, flags);
+	flags &= IOVMF_HW_MASK;
+	flags |= IOVMF_DISCONT;
+	flags |= IOVMF_ALLOC;
+	flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
+
+	sgt = sgtable_alloc(bytes, flags, da, 0);
 	if (IS_ERR(sgt)) {
 		da = PTR_ERR(sgt);
 		goto err_sgt_alloc;
 	}
 	sgtable_fill_vmalloc(sgt, va);
 
-	flags &= IOVMF_HW_MASK;
-	flags |= IOVMF_DISCONT;
-	flags |= IOVMF_ALLOC;
-	flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
-
 	da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
 	if (IS_ERR_VALUE(da))
 		goto err_iommu_vmap;
@@ -746,11 +757,11 @@
 {
 	struct sg_table *sgt;
 
-	sgt = sgtable_alloc(bytes, flags);
+	sgt = sgtable_alloc(bytes, flags, da, pa);
 	if (IS_ERR(sgt))
 		return PTR_ERR(sgt);
 
-	sgtable_fill_kmalloc(sgt, pa, bytes);
+	sgtable_fill_kmalloc(sgt, pa, da, bytes);
 
 	da = map_iommu_region(obj, da, sgt, va, bytes, flags);
 	if (IS_ERR_VALUE(da)) {
@@ -811,7 +822,7 @@
 	struct sg_table *sgt;
 	typedef void (*func_t)(const void *);
 
-	sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
+	sgt = unmap_vm_area(obj, da, (func_t)iounmap,
 			    IOVMF_LINEAR | IOVMF_MMIO);
 	if (!sgt)
 		dev_dbg(obj->dev, "%s: No sgt\n", __func__);
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index d2fafb8..459b319 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -28,12 +28,12 @@
 #include <linux/slab.h>
 #include <linux/kfifo.h>
 #include <linux/err.h>
+#include <linux/notifier.h>
 
 #include <plat/mailbox.h>
 
 static struct workqueue_struct *mboxd;
 static struct omap_mbox **mboxes;
-static bool rq_full;
 
 static int mbox_configured;
 static DEFINE_MUTEX(mbox_configured_lock);
@@ -93,20 +93,25 @@
 	struct omap_mbox_queue *mq = mbox->txq;
 	int ret = 0, len;
 
-	spin_lock(&mq->lock);
+	spin_lock_bh(&mq->lock);
 
 	if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
+	if (kfifo_is_empty(&mq->fifo) && !__mbox_poll_for_space(mbox)) {
+		mbox_fifo_write(mbox, msg);
+		goto out;
+	}
+
 	len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
 	WARN_ON(len != sizeof(msg));
 
 	tasklet_schedule(&mbox->txq->tasklet);
 
 out:
-	spin_unlock(&mq->lock);
+	spin_unlock_bh(&mq->lock);
 	return ret;
 }
 EXPORT_SYMBOL(omap_mbox_msg_send);
@@ -146,8 +151,14 @@
 		len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
 		WARN_ON(len != sizeof(msg));
 
-		if (mq->callback)
-			mq->callback((void *)msg);
+		blocking_notifier_call_chain(&mq->mbox->notifier, len,
+								(void *)msg);
+		spin_lock_irq(&mq->lock);
+		if (mq->full) {
+			mq->full = false;
+			omap_mbox_enable_irq(mq->mbox, IRQ_RX);
+		}
+		spin_unlock_irq(&mq->lock);
 	}
 }
 
@@ -170,7 +181,7 @@
 	while (!mbox_fifo_empty(mbox)) {
 		if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
 			omap_mbox_disable_irq(mbox, IRQ_RX);
-			rq_full = true;
+			mq->full = true;
 			goto nomem;
 		}
 
@@ -239,73 +250,77 @@
 	int ret = 0;
 	struct omap_mbox_queue *mq;
 
-	if (mbox->ops->startup) {
-		mutex_lock(&mbox_configured_lock);
-		if (!mbox_configured)
+	mutex_lock(&mbox_configured_lock);
+	if (!mbox_configured++) {
+		if (likely(mbox->ops->startup)) {
 			ret = mbox->ops->startup(mbox);
+			if (unlikely(ret))
+				goto fail_startup;
+		} else
+			goto fail_startup;
+	}
 
-		if (ret) {
-			mutex_unlock(&mbox_configured_lock);
-			return ret;
+	if (!mbox->use_count++) {
+		ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
+							mbox->name, mbox);
+		if (unlikely(ret)) {
+			pr_err("failed to register mailbox interrupt:%d\n",
+									ret);
+			goto fail_request_irq;
 		}
-		mbox_configured++;
-		mutex_unlock(&mbox_configured_lock);
-	}
+		mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
+		if (!mq) {
+			ret = -ENOMEM;
+			goto fail_alloc_txq;
+		}
+		mbox->txq = mq;
 
-	ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
-				mbox->name, mbox);
-	if (ret) {
-		printk(KERN_ERR
-			"failed to register mailbox interrupt:%d\n", ret);
-		goto fail_request_irq;
+		mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL);
+		if (!mq) {
+			ret = -ENOMEM;
+			goto fail_alloc_rxq;
+		}
+		mbox->rxq = mq;
+		mq->mbox = mbox;
 	}
-
-	mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
-	if (!mq) {
-		ret = -ENOMEM;
-		goto fail_alloc_txq;
-	}
-	mbox->txq = mq;
-
-	mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL);
-	if (!mq) {
-		ret = -ENOMEM;
-		goto fail_alloc_rxq;
-	}
-	mbox->rxq = mq;
-
+	mutex_unlock(&mbox_configured_lock);
 	return 0;
 
- fail_alloc_rxq:
+fail_alloc_rxq:
 	mbox_queue_free(mbox->txq);
- fail_alloc_txq:
+fail_alloc_txq:
 	free_irq(mbox->irq, mbox);
- fail_request_irq:
+fail_request_irq:
 	if (mbox->ops->shutdown)
 		mbox->ops->shutdown(mbox);
-
+	mbox->use_count--;
+fail_startup:
+	mbox_configured--;
+	mutex_unlock(&mbox_configured_lock);
 	return ret;
 }
 
 static void omap_mbox_fini(struct omap_mbox *mbox)
 {
-	free_irq(mbox->irq, mbox);
-	tasklet_kill(&mbox->txq->tasklet);
-	flush_work(&mbox->rxq->work);
-	mbox_queue_free(mbox->txq);
-	mbox_queue_free(mbox->rxq);
+	mutex_lock(&mbox_configured_lock);
 
-	if (mbox->ops->shutdown) {
-		mutex_lock(&mbox_configured_lock);
-		if (mbox_configured > 0)
-			mbox_configured--;
-		if (!mbox_configured)
-			mbox->ops->shutdown(mbox);
-		mutex_unlock(&mbox_configured_lock);
+	if (!--mbox->use_count) {
+		free_irq(mbox->irq, mbox);
+		tasklet_kill(&mbox->txq->tasklet);
+		flush_work(&mbox->rxq->work);
+		mbox_queue_free(mbox->txq);
+		mbox_queue_free(mbox->rxq);
 	}
+
+	if (likely(mbox->ops->shutdown)) {
+		if (!--mbox_configured)
+			mbox->ops->shutdown(mbox);
+	}
+
+	mutex_unlock(&mbox_configured_lock);
 }
 
-struct omap_mbox *omap_mbox_get(const char *name)
+struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
 {
 	struct omap_mbox *mbox;
 	int ret;
@@ -324,12 +339,16 @@
 	if (ret)
 		return ERR_PTR(-ENODEV);
 
+	if (nb)
+		blocking_notifier_chain_register(&mbox->notifier, nb);
+
 	return mbox;
 }
 EXPORT_SYMBOL(omap_mbox_get);
 
-void omap_mbox_put(struct omap_mbox *mbox)
+void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
 {
+	blocking_notifier_chain_unregister(&mbox->notifier, nb);
 	omap_mbox_fini(mbox);
 }
 EXPORT_SYMBOL(omap_mbox_put);
@@ -353,6 +372,8 @@
 			ret = PTR_ERR(mbox->dev);
 			goto err_out;
 		}
+
+		BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
 	}
 	return 0;
 
@@ -391,7 +412,8 @@
 
 	/* kfifo size sanity check: alignment and minimal size */
 	mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t));
-	mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, sizeof(mbox_msg_t));
+	mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
+							sizeof(mbox_msg_t));
 
 	return 0;
 }
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index eac4b97..b5a6e17 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -28,6 +28,8 @@
 #include <plat/dma.h>
 #include <plat/mcbsp.h>
 
+/* XXX These "sideways" includes are a sign that something is wrong */
+#include "../mach-omap2/cm2xxx_3xxx.h"
 #include "../mach-omap2/cm-regbits-34xx.h"
 
 struct omap_mcbsp **mcbsp_ptr;
@@ -234,9 +236,9 @@
 	 * Sidetone uses McBSP ICLK - which must not idle when sidetones
 	 * are enabled or sidetones start sounding ugly.
 	 */
-	w = cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
+	w = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
 	w &= ~(1 << (mcbsp->id - 2));
-	cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE);
 
 	/* Enable McBSP Sidetone */
 	w = MCBSP_READ(mcbsp, SSELCR);
@@ -263,9 +265,9 @@
 	w = MCBSP_READ(mcbsp, SSELCR);
 	MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
 
-	w = cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
+	w = omap2_cm_read_mod_reg(OMAP3430_PER_MOD, CM_AUTOIDLE);
 	w |= 1 << (mcbsp->id - 2);
-	cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE);
+	omap2_cm_write_mod_reg(w, OMAP3430_PER_MOD, CM_AUTOIDLE);
 }
 
 static void omap_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir)
@@ -755,7 +757,7 @@
 		goto err_kfree;
 	}
 
-	mcbsp->free = 0;
+	mcbsp->free = false;
 	mcbsp->reg_cache = reg_cache;
 	spin_unlock(&mcbsp->lock);
 
@@ -815,7 +817,7 @@
 	clk_disable(mcbsp->iclk);
 
 	spin_lock(&mcbsp->lock);
-	mcbsp->free = 1;
+	mcbsp->free = true;
 	mcbsp->reg_cache = NULL;
 err_kfree:
 	spin_unlock(&mcbsp->lock);
@@ -858,7 +860,7 @@
 	if (mcbsp->free)
 		dev_err(mcbsp->dev, "McBSP%d was not reserved\n", mcbsp->id);
 	else
-		mcbsp->free = 1;
+		mcbsp->free = true;
 	mcbsp->reg_cache = NULL;
 	spin_unlock(&mcbsp->lock);
 
@@ -1771,7 +1773,7 @@
 
 	spin_lock_init(&mcbsp->lock);
 	mcbsp->id = id + 1;
-	mcbsp->free = 1;
+	mcbsp->free = true;
 	mcbsp->dma_tx_lch = -1;
 	mcbsp->dma_rx_lch = -1;
 
@@ -1836,17 +1838,11 @@
 
 		omap34xx_device_exit(mcbsp);
 
-		clk_disable(mcbsp->fclk);
-		clk_disable(mcbsp->iclk);
 		clk_put(mcbsp->fclk);
 		clk_put(mcbsp->iclk);
 
 		iounmap(mcbsp->io_base);
-
-		mcbsp->fclk = NULL;
-		mcbsp->iclk = NULL;
-		mcbsp->free = 0;
-		mcbsp->dev = NULL;
+		kfree(mcbsp);
 	}
 
 	return 0;
diff --git a/arch/arm/plat-omap/omap-pm-noop.c b/arch/arm/plat-omap/omap-pm-noop.c
index e129ce8..b0471bb2 100644
--- a/arch/arm/plat-omap/omap-pm-noop.c
+++ b/arch/arm/plat-omap/omap-pm-noop.c
@@ -20,15 +20,14 @@
 #include <linux/init.h>
 #include <linux/cpufreq.h>
 #include <linux/device.h>
+#include <linux/platform_device.h>
 
 /* Interface documentation is in mach/omap-pm.h */
 #include <plat/omap-pm.h>
+#include <plat/omap_device.h>
 
-#include <plat/powerdomain.h>
-
-struct omap_opp *dsp_opps;
-struct omap_opp *mpu_opps;
-struct omap_opp *l3_opps;
+static bool off_mode_enabled;
+static u32 dummy_context_loss_counter;
 
 /*
  * Device-driver-originated constraints (via board-*.c files)
@@ -284,37 +283,70 @@
 	return 0;
 }
 
+/**
+ * omap_pm_enable_off_mode - notify OMAP PM that off-mode is enabled
+ *
+ * Intended for use only by OMAP PM core code to notify this layer
+ * that off mode has been enabled.
+ */
+void omap_pm_enable_off_mode(void)
+{
+	off_mode_enabled = true;
+}
+
+/**
+ * omap_pm_disable_off_mode - notify OMAP PM that off-mode is disabled
+ *
+ * Intended for use only by OMAP PM core code to notify this layer
+ * that off mode has been disabled.
+ */
+void omap_pm_disable_off_mode(void)
+{
+	off_mode_enabled = false;
+}
+
 /*
  * Device context loss tracking
  */
 
-int omap_pm_get_dev_context_loss_count(struct device *dev)
+#ifdef CONFIG_ARCH_OMAP2PLUS
+
+u32 omap_pm_get_dev_context_loss_count(struct device *dev)
 {
-	if (!dev) {
-		WARN_ON(1);
-		return -EINVAL;
-	};
+	struct platform_device *pdev = to_platform_device(dev);
+	u32 count;
 
-	pr_debug("OMAP PM: returning context loss count for dev %s\n",
-		 dev_name(dev));
+	if (WARN_ON(!dev))
+		return 0;
 
-	/*
-	 * Map the device to the powerdomain.  Return the powerdomain
-	 * off counter.
-	 */
+	if (dev->parent == &omap_device_parent) {
+		count = omap_device_get_context_loss_count(pdev);
+	} else {
+		WARN_ONCE(off_mode_enabled, "omap_pm: using dummy context loss counter; device %s should be converted to omap_device",
+			  dev_name(dev));
+		if (off_mode_enabled)
+			dummy_context_loss_counter++;
+		count = dummy_context_loss_counter;
+	}
 
-	return 0;
+	pr_debug("OMAP PM: context loss count for dev %s = %d\n",
+		 dev_name(dev), count);
+
+	return count;
 }
 
+#else
+
+u32 omap_pm_get_dev_context_loss_count(struct device *dev)
+{
+	return dummy_context_loss_counter;
+}
+
+#endif
 
 /* Should be called before clk framework init */
-int __init omap_pm_if_early_init(struct omap_opp *mpu_opp_table,
-				 struct omap_opp *dsp_opp_table,
-				 struct omap_opp *l3_opp_table)
+int __init omap_pm_if_early_init(void)
 {
-	mpu_opps = mpu_opp_table;
-	dsp_opps = dsp_opp_table;
-	l3_opps = l3_opp_table;
 	return 0;
 }
 
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index abe933c..57adb27 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -280,6 +280,34 @@
 /* Public functions for use by core code */
 
 /**
+ * omap_device_get_context_loss_count - get lost context count
+ * @od: struct omap_device *
+ *
+ * Using the primary hwmod, query the context loss count for this
+ * device.
+ *
+ * Callers should consider context for this device lost any time this
+ * function returns a value different than the value the caller got
+ * the last time it called this function.
+ *
+ * If any hwmods exist for the omap_device assoiated with @pdev,
+ * return the context loss counter for that hwmod, otherwise return
+ * zero.
+ */
+u32 omap_device_get_context_loss_count(struct platform_device *pdev)
+{
+	struct omap_device *od;
+	u32 ret = 0;
+
+	od = _find_by_pdev(pdev);
+
+	if (od->hwmods_cnt)
+		ret = omap_hwmod_get_context_loss_count(od->hwmods[0]);
+
+	return ret;
+}
+
+/**
  * omap_device_count_resources - count number of struct resource entries needed
  * @od: struct omap_device *
  *
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 74dac41..e26e504 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -33,23 +33,21 @@
 
 #include "sram.h"
 #include "fb.h"
+
+/* XXX These "sideways" includes are a sign that something is wrong */
 #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-# include "../mach-omap2/prm.h"
-# include "../mach-omap2/cm.h"
+# include "../mach-omap2/prm2xxx_3xxx.h"
 # include "../mach-omap2/sdrc.h"
 #endif
 
 #define OMAP1_SRAM_PA		0x20000000
 #define OMAP1_SRAM_VA		VMALLOC_END
-#define OMAP2_SRAM_PA		0x40200000
-#define OMAP2_SRAM_PUB_PA	0x4020f800
+#define OMAP2_SRAM_PUB_PA	(OMAP2_SRAM_PA + 0xf800)
 #define OMAP2_SRAM_VA		0xfe400000
 #define OMAP2_SRAM_PUB_VA	(OMAP2_SRAM_VA + 0x800)
-#define OMAP3_SRAM_PA           0x40200000
 #define OMAP3_SRAM_VA           0xfe400000
-#define OMAP3_SRAM_PUB_PA       0x40208000
+#define OMAP3_SRAM_PUB_PA       (OMAP3_SRAM_PA + 0x8000)
 #define OMAP3_SRAM_PUB_VA       (OMAP3_SRAM_VA + 0x8000)
-#define OMAP4_SRAM_PA		0x40300000
 #define OMAP4_SRAM_VA		0xfe400000
 #define OMAP4_SRAM_PUB_PA	(OMAP4_SRAM_PA + 0x4000)
 #define OMAP4_SRAM_PUB_VA	(OMAP4_SRAM_VA + 0x4000)
@@ -270,7 +268,7 @@
 	_omap_sram_reprogram_clock(dpllctl, ckctl);
 }
 
-int __init omap1_sram_init(void)
+static int __init omap1_sram_init(void)
 {
 	_omap_sram_reprogram_clock =
 			omap_sram_push(omap1_sram_reprogram_clock,
diff --git a/arch/arm/plat-pxa/include/plat/ssp.h b/arch/arm/plat-pxa/include/plat/ssp.h
deleted file mode 100644
index fe43150..0000000
--- a/arch/arm/plat-pxa/include/plat/ssp.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *  ssp.h
- *
- *  Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This driver supports the following PXA CPU/SSP ports:-
- *
- *       PXA250     SSP
- *       PXA255     SSP, NSSP
- *       PXA26x     SSP, NSSP, ASSP
- *       PXA27x     SSP1, SSP2, SSP3
- *       PXA3xx     SSP1, SSP2, SSP3, SSP4
- */
-
-#ifndef __ASM_ARCH_SSP_H
-#define __ASM_ARCH_SSP_H
-
-#include <linux/list.h>
-#include <linux/io.h>
-
-/*
- * SSP Serial Port Registers
- * PXA250, PXA255, PXA26x and PXA27x SSP controllers are all slightly different.
- * PXA255, PXA26x and PXA27x have extra ports, registers and bits.
- */
-
-#define SSCR0		(0x00)  /* SSP Control Register 0 */
-#define SSCR1		(0x04)  /* SSP Control Register 1 */
-#define SSSR		(0x08)  /* SSP Status Register */
-#define SSITR		(0x0C)  /* SSP Interrupt Test Register */
-#define SSDR		(0x10)  /* SSP Data Write/Data Read Register */
-
-#define SSTO		(0x28)  /* SSP Time Out Register */
-#define SSPSP		(0x2C)  /* SSP Programmable Serial Protocol */
-#define SSTSA		(0x30)  /* SSP Tx Timeslot Active */
-#define SSRSA		(0x34)  /* SSP Rx Timeslot Active */
-#define SSTSS		(0x38)  /* SSP Timeslot Status */
-#define SSACD		(0x3C)  /* SSP Audio Clock Divider */
-#define SSACDD		(0x40)	/* SSP Audio Clock Dither Divider */
-
-/* Common PXA2xx bits first */
-#define SSCR0_DSS	(0x0000000f)	/* Data Size Select (mask) */
-#define SSCR0_DataSize(x)  ((x) - 1)	/* Data Size Select [4..16] */
-#define SSCR0_FRF	(0x00000030)	/* FRame Format (mask) */
-#define SSCR0_Motorola	(0x0 << 4)	/* Motorola's Serial Peripheral Interface (SPI) */
-#define SSCR0_TI	(0x1 << 4)	/* Texas Instruments' Synchronous Serial Protocol (SSP) */
-#define SSCR0_National	(0x2 << 4)	/* National Microwire */
-#define SSCR0_ECS	(1 << 6)	/* External clock select */
-#define SSCR0_SSE	(1 << 7)	/* Synchronous Serial Port Enable */
-#define SSCR0_SCR(x)	((x) << 8)	/* Serial Clock Rate (mask) */
-
-/* PXA27x, PXA3xx */
-#define SSCR0_EDSS	(1 << 20)	/* Extended data size select */
-#define SSCR0_NCS	(1 << 21)	/* Network clock select */
-#define SSCR0_RIM	(1 << 22)	/* Receive FIFO overrrun interrupt mask */
-#define SSCR0_TUM	(1 << 23)	/* Transmit FIFO underrun interrupt mask */
-#define SSCR0_FRDC	(0x07000000)	/* Frame rate divider control (mask) */
-#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24)	/* Time slots per frame [1..8] */
-#define SSCR0_FPCKE	(1 << 29)	/* FIFO packing enable */
-#define SSCR0_ACS	(1 << 30)	/* Audio clock select */
-#define SSCR0_MOD	(1 << 31)	/* Mode (normal or network) */
-
-
-#define SSCR1_RIE	(1 << 0)	/* Receive FIFO Interrupt Enable */
-#define SSCR1_TIE	(1 << 1)	/* Transmit FIFO Interrupt Enable */
-#define SSCR1_LBM	(1 << 2)	/* Loop-Back Mode */
-#define SSCR1_SPO	(1 << 3)	/* Motorola SPI SSPSCLK polarity setting */
-#define SSCR1_SPH	(1 << 4)	/* Motorola SPI SSPSCLK phase setting */
-#define SSCR1_MWDS	(1 << 5)	/* Microwire Transmit Data Size */
-#define SSCR1_TFT	(0x000003c0)	/* Transmit FIFO Threshold (mask) */
-#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
-#define SSCR1_RFT	(0x00003c00)	/* Receive FIFO Threshold (mask) */
-#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
-
-#define SSSR_TNF	(1 << 2)	/* Transmit FIFO Not Full */
-#define SSSR_RNE	(1 << 3)	/* Receive FIFO Not Empty */
-#define SSSR_BSY	(1 << 4)	/* SSP Busy */
-#define SSSR_TFS	(1 << 5)	/* Transmit FIFO Service Request */
-#define SSSR_RFS	(1 << 6)	/* Receive FIFO Service Request */
-#define SSSR_ROR	(1 << 7)	/* Receive FIFO Overrun */
-
-
-/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
-#define SSCR0_TISSP		(1 << 4)	/* TI Sync Serial Protocol */
-#define SSCR0_PSP		(3 << 4)	/* PSP - Programmable Serial Protocol */
-#define SSCR1_TTELP		(1 << 31)	/* TXD Tristate Enable Last Phase */
-#define SSCR1_TTE		(1 << 30)	/* TXD Tristate Enable */
-#define SSCR1_EBCEI		(1 << 29)	/* Enable Bit Count Error interrupt */
-#define SSCR1_SCFR		(1 << 28)	/* Slave Clock free Running */
-#define SSCR1_ECRA		(1 << 27)	/* Enable Clock Request A */
-#define SSCR1_ECRB		(1 << 26)	/* Enable Clock request B */
-#define SSCR1_SCLKDIR		(1 << 25)	/* Serial Bit Rate Clock Direction */
-#define SSCR1_SFRMDIR		(1 << 24)	/* Frame Direction */
-#define SSCR1_RWOT		(1 << 23)	/* Receive Without Transmit */
-#define SSCR1_TRAIL		(1 << 22)	/* Trailing Byte */
-#define SSCR1_TSRE		(1 << 21)	/* Transmit Service Request Enable */
-#define SSCR1_RSRE		(1 << 20)	/* Receive Service Request Enable */
-#define SSCR1_TINTE		(1 << 19)	/* Receiver Time-out Interrupt enable */
-#define SSCR1_PINTE		(1 << 18)	/* Peripheral Trailing Byte Interupt Enable */
-#define SSCR1_IFS		(1 << 16)	/* Invert Frame Signal */
-#define SSCR1_STRF		(1 << 15)	/* Select FIFO or EFWR */
-#define SSCR1_EFWR		(1 << 14)	/* Enable FIFO Write/Read */
-
-#define SSSR_BCE		(1 << 23)	/* Bit Count Error */
-#define SSSR_CSS		(1 << 22)	/* Clock Synchronisation Status */
-#define SSSR_TUR		(1 << 21)	/* Transmit FIFO Under Run */
-#define SSSR_EOC		(1 << 20)	/* End Of Chain */
-#define SSSR_TINT		(1 << 19)	/* Receiver Time-out Interrupt */
-#define SSSR_PINT		(1 << 18)	/* Peripheral Trailing Byte Interrupt */
-
-
-#define SSPSP_SCMODE(x)		((x) << 0)	/* Serial Bit Rate Clock Mode */
-#define SSPSP_SFRMP		(1 << 2)	/* Serial Frame Polarity */
-#define SSPSP_ETDS		(1 << 3)	/* End of Transfer data State */
-#define SSPSP_STRTDLY(x)	((x) << 4)	/* Start Delay */
-#define SSPSP_DMYSTRT(x)	((x) << 7)	/* Dummy Start */
-#define SSPSP_SFRMDLY(x)	((x) << 9)	/* Serial Frame Delay */
-#define SSPSP_SFRMWDTH(x)	((x) << 16)	/* Serial Frame Width */
-#define SSPSP_DMYSTOP(x)	((x) << 23)	/* Dummy Stop */
-#define SSPSP_FSRT		(1 << 25)	/* Frame Sync Relative Timing */
-
-/* PXA3xx */
-#define SSPSP_EDMYSTRT(x)	((x) << 26)     /* Extended Dummy Start */
-#define SSPSP_EDMYSTOP(x)	((x) << 28)     /* Extended Dummy Stop */
-#define SSPSP_TIMING_MASK	(0x7f8001f0)
-
-#define SSACD_SCDB		(1 << 3)	/* SSPSYSCLK Divider Bypass */
-#define SSACD_ACPS(x)		((x) << 4)	/* Audio clock PLL select */
-#define SSACD_ACDS(x)		((x) << 0)	/* Audio clock divider select */
-#define SSACD_SCDX8		(1 << 7)	/* SYSCLK division ratio select */
-
-enum pxa_ssp_type {
-	SSP_UNDEFINED = 0,
-	PXA25x_SSP,  /* pxa 210, 250, 255, 26x */
-	PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
-	PXA27x_SSP,
-	PXA168_SSP,
-};
-
-struct ssp_device {
-	struct platform_device *pdev;
-	struct list_head	node;
-
-	struct clk	*clk;
-	void __iomem	*mmio_base;
-	unsigned long	phys_base;
-
-	const char	*label;
-	int		port_id;
-	int		type;
-	int		use_count;
-	int		irq;
-	int		drcmr_rx;
-	int		drcmr_tx;
-};
-
-/**
- * pxa_ssp_write_reg - Write to a SSP register
- *
- * @dev: SSP device to access
- * @reg: Register to write to
- * @val: Value to be written.
- */
-static inline void pxa_ssp_write_reg(struct ssp_device *dev, u32 reg, u32 val)
-{
-	__raw_writel(val, dev->mmio_base + reg);
-}
-
-/**
- * pxa_ssp_read_reg - Read from a SSP register
- *
- * @dev: SSP device to access
- * @reg: Register to read from
- */
-static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
-{
-	return __raw_readl(dev->mmio_base + reg);
-}
-
-struct ssp_device *pxa_ssp_request(int port, const char *label);
-void pxa_ssp_free(struct ssp_device *);
-#endif /* __ASM_ARCH_SSP_H */
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index c6357e5..58b7980 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -28,11 +28,11 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/io.h>
 
 #include <asm/irq.h>
 #include <mach/hardware.h>
-#include <plat/ssp.h>
 
 static DEFINE_MUTEX(ssp_lock);
 static LIST_HEAD(ssp_list);
diff --git a/arch/arm/plat-s3c24xx/cpu-freq.c b/arch/arm/plat-s3c24xx/cpu-freq.c
index 1ecc15b..25a8fc7 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq.c
@@ -21,7 +21,6 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/sysdev.h>
-#include <linux/kobject.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 2f91057..8a42bc4 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -259,21 +259,6 @@
 
 EXPORT_SYMBOL(s3c_device_iis);
 
-/* ASoC PCM DMA */
-
-static u64 s3c_device_audio_dmamask = 0xffffffffUL;
-
-struct platform_device s3c_device_pcm = {
-	.name		  = "s3c24xx-pcm-audio",
-	.id		  = -1,
-	.dev              = {
-		.dma_mask = &s3c_device_audio_dmamask,
-		.coherent_dma_mask = 0xffffffffUL
-	}
-};
-
-EXPORT_SYMBOL(s3c_device_pcm);
-
 /* RTC */
 
 static struct resource s3c_rtc_resource[] = {
@@ -496,8 +481,10 @@
 	},
 };
 
+static u64 s3c_device_audio_dmamask = 0xffffffffUL;
+
 struct platform_device s3c_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s3c_ac97_resource),
 	.resource	  = s3c_ac97_resource,
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index afcce47..19d8a16 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -17,6 +17,7 @@
 obj-y				+= pwm-clock.o
 obj-y				+= gpio.o
 obj-y				+= gpio-config.o
+obj-y				+= dev-asocdma.o
 
 obj-$(CONFIG_SAMSUNG_GPIOLIB_4BIT)	+= gpiolib.o
 obj-$(CONFIG_SAMSUNG_CLKSRC)	+= clock-clksrc.o
diff --git a/arch/arm/plat-samsung/dev-asocdma.c b/arch/arm/plat-samsung/dev-asocdma.c
new file mode 100644
index 0000000..a068c4f
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-asocdma.c
@@ -0,0 +1,25 @@
+/* linux/arch/arm/plat-samsung/dev-asocdma.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <plat/devs.h>
+
+static u64 audio_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device samsung_asoc_dma = {
+	.name		  = "samsung-audio",
+	.id		  = -1,
+	.dev              = {
+		.dma_mask = &audio_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	}
+};
+EXPORT_SYMBOL(samsung_asoc_dma);
diff --git a/arch/arm/plat-samsung/include/plat/audio.h b/arch/arm/plat-samsung/include/plat/audio.h
index 7712ff6..a0826ed 100644
--- a/arch/arm/plat-samsung/include/plat/audio.h
+++ b/arch/arm/plat-samsung/include/plat/audio.h
@@ -25,10 +25,34 @@
 #define S5PC100_SPDIF_GPG3 1
 extern void s5pc100_spdif_setup_gpio(int);
 
+struct samsung_i2s {
+/* If the Primary DAI has 5.1 Channels */
+#define QUIRK_PRI_6CHAN		(1 << 0)
+/* If the I2S block has a Stereo Overlay Channel */
+#define QUIRK_SEC_DAI		(1 << 1)
+/*
+ * If the I2S block has no internal prescalar or MUX (I2SMOD[10] bit)
+ * The Machine driver must provide suitably set clock to the I2S block.
+ */
+#define QUIRK_NO_MUXPSR		(1 << 2)
+#define QUIRK_NEED_RSTCLR	(1 << 3)
+	/* Quirks of the I2S controller */
+	u32 quirks;
+
+	/*
+	 * Array of clock names that can be used to generate I2S signals.
+	 * Also corresponds to clocks of I2SMOD[10]
+	 */
+	const char **src_clk;
+};
+
 /**
  * struct s3c_audio_pdata - common platform data for audio device drivers
  * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode
  */
 struct s3c_audio_pdata {
 	int (*cfg_gpio)(struct platform_device *);
+	union {
+		struct samsung_i2s i2s;
+	} type;
 };
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2d82a6c..e9e3b6e 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -32,7 +32,7 @@
 extern struct platform_device s3c64xx_device_spi0;
 extern struct platform_device s3c64xx_device_spi1;
 
-extern struct platform_device s3c_device_pcm;
+extern struct platform_device samsung_asoc_dma;
 
 extern struct platform_device s3c64xx_device_pcm0;
 extern struct platform_device s3c64xx_device_pcm1;
@@ -96,6 +96,15 @@
 extern struct platform_device s5pv210_device_iis2;
 extern struct platform_device s5pv210_device_spdif;
 
+extern struct platform_device s5pv310_device_ac97;
+extern struct platform_device s5pv310_device_pcm0;
+extern struct platform_device s5pv310_device_pcm1;
+extern struct platform_device s5pv310_device_pcm2;
+extern struct platform_device s5pv310_device_i2s0;
+extern struct platform_device s5pv310_device_i2s1;
+extern struct platform_device s5pv310_device_i2s2;
+extern struct platform_device s5pv310_device_spdif;
+
 extern struct platform_device s5p6442_device_pcm0;
 extern struct platform_device s5p6442_device_pcm1;
 extern struct platform_device s5p6442_device_iis0;
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 27cfca5..5bf3f2f 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -355,7 +355,7 @@
 	s3c_pm_check_cleanup();
 }
 
-static struct platform_suspend_ops s3c_pm_ops = {
+static const struct platform_suspend_ops s3c_pm_ops = {
 	.enter		= s3c_pm_enter,
 	.prepare	= s3c_pm_prepare,
 	.finish		= s3c_pm_finish,
diff --git a/arch/arm/plat-spear/include/plat/keyboard.h b/arch/arm/plat-spear/include/plat/keyboard.h
new file mode 100644
index 0000000..68b5394
--- /dev/null
+++ b/arch/arm/plat-spear/include/plat/keyboard.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2010 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_KEYBOARD_H
+#define __PLAT_KEYBOARD_H
+
+#include <linux/bitops.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/types.h>
+
+#define DECLARE_KEYMAP(_name) \
+int _name[] = { \
+	KEY(0, 0, KEY_ESC), \
+	KEY(0, 1, KEY_1), \
+	KEY(0, 2, KEY_2), \
+	KEY(0, 3, KEY_3), \
+	KEY(0, 4, KEY_4), \
+	KEY(0, 5, KEY_5), \
+	KEY(0, 6, KEY_6), \
+	KEY(0, 7, KEY_7), \
+	KEY(0, 8, KEY_8), \
+	KEY(1, 0, KEY_9), \
+	KEY(1, 1, KEY_MINUS), \
+	KEY(1, 2, KEY_EQUAL), \
+	KEY(1, 3, KEY_BACKSPACE), \
+	KEY(1, 4, KEY_TAB), \
+	KEY(1, 5, KEY_Q), \
+	KEY(1, 6, KEY_W), \
+	KEY(1, 7, KEY_E), \
+	KEY(1, 8, KEY_R), \
+	KEY(2, 0, KEY_T), \
+	KEY(2, 1, KEY_Y), \
+	KEY(2, 2, KEY_U), \
+	KEY(2, 3, KEY_I), \
+	KEY(2, 4, KEY_O), \
+	KEY(2, 5, KEY_P), \
+	KEY(2, 6, KEY_LEFTBRACE), \
+	KEY(2, 7, KEY_RIGHTBRACE), \
+	KEY(2, 8, KEY_ENTER), \
+	KEY(3, 0, KEY_LEFTCTRL), \
+	KEY(3, 1, KEY_A), \
+	KEY(3, 2, KEY_S), \
+	KEY(3, 3, KEY_D), \
+	KEY(3, 4, KEY_F), \
+	KEY(3, 5, KEY_G), \
+	KEY(3, 6, KEY_H), \
+	KEY(3, 7, KEY_J), \
+	KEY(3, 8, KEY_K), \
+	KEY(4, 0, KEY_L), \
+	KEY(4, 1, KEY_SEMICOLON), \
+	KEY(4, 2, KEY_APOSTROPHE), \
+	KEY(4, 3, KEY_GRAVE), \
+	KEY(4, 4, KEY_LEFTSHIFT), \
+	KEY(4, 5, KEY_BACKSLASH), \
+	KEY(4, 6, KEY_Z), \
+	KEY(4, 7, KEY_X), \
+	KEY(4, 8, KEY_C), \
+	KEY(4, 0, KEY_L), \
+	KEY(4, 1, KEY_SEMICOLON), \
+	KEY(4, 2, KEY_APOSTROPHE), \
+	KEY(4, 3, KEY_GRAVE), \
+	KEY(4, 4, KEY_LEFTSHIFT), \
+	KEY(4, 5, KEY_BACKSLASH), \
+	KEY(4, 6, KEY_Z), \
+	KEY(4, 7, KEY_X), \
+	KEY(4, 8, KEY_C), \
+	KEY(4, 0, KEY_L), \
+	KEY(4, 1, KEY_SEMICOLON), \
+	KEY(4, 2, KEY_APOSTROPHE), \
+	KEY(4, 3, KEY_GRAVE), \
+	KEY(4, 4, KEY_LEFTSHIFT), \
+	KEY(4, 5, KEY_BACKSLASH), \
+	KEY(4, 6, KEY_Z), \
+	KEY(4, 7, KEY_X), \
+	KEY(4, 8, KEY_C), \
+	KEY(5, 0, KEY_V), \
+	KEY(5, 1, KEY_B), \
+	KEY(5, 2, KEY_N), \
+	KEY(5, 3, KEY_M), \
+	KEY(5, 4, KEY_COMMA), \
+	KEY(5, 5, KEY_DOT), \
+	KEY(5, 6, KEY_SLASH), \
+	KEY(5, 7, KEY_RIGHTSHIFT), \
+	KEY(5, 8, KEY_KPASTERISK), \
+	KEY(6, 0, KEY_LEFTALT), \
+	KEY(6, 1, KEY_SPACE), \
+	KEY(6, 2, KEY_CAPSLOCK), \
+	KEY(6, 3, KEY_F1), \
+	KEY(6, 4, KEY_F2), \
+	KEY(6, 5, KEY_F3), \
+	KEY(6, 6, KEY_F4), \
+	KEY(6, 7, KEY_F5), \
+	KEY(6, 8, KEY_F6), \
+	KEY(7, 0, KEY_F7), \
+	KEY(7, 1, KEY_F8), \
+	KEY(7, 2, KEY_F9), \
+	KEY(7, 3, KEY_F10), \
+	KEY(7, 4, KEY_NUMLOCK), \
+	KEY(7, 5, KEY_SCROLLLOCK), \
+	KEY(7, 6, KEY_KP7), \
+	KEY(7, 7, KEY_KP8), \
+	KEY(7, 8, KEY_KP9), \
+	KEY(8, 0, KEY_KPMINUS), \
+	KEY(8, 1, KEY_KP4), \
+	KEY(8, 2, KEY_KP5), \
+	KEY(8, 3, KEY_KP6), \
+	KEY(8, 4, KEY_KPPLUS), \
+	KEY(8, 5, KEY_KP1), \
+	KEY(8, 6, KEY_KP2), \
+	KEY(8, 7, KEY_KP3), \
+	KEY(8, 8, KEY_KP0), \
+}
+
+/**
+ * struct kbd_platform_data - spear keyboard platform data
+ * keymap: pointer to keymap data (table and size)
+ * rep: enables key autorepeat
+ *
+ * This structure is supposed to be used by platform code to supply
+ * keymaps to drivers that implement keyboards.
+ */
+struct kbd_platform_data {
+	const struct matrix_keymap_data *keymap;
+	bool rep;
+};
+
+/* This function is used to set platform data field of pdev->dev */
+static inline void
+kbd_set_plat_data(struct platform_device *pdev, struct kbd_platform_data *data)
+{
+	pdev->dev.platform_data = data;
+}
+
+#endif /* __PLAT_KEYBOARD_H */
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index 8c6a244..659d119 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -188,7 +188,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 2adc261..6ce30fb 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -203,7 +203,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 75f19f4..86fab77 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -206,7 +206,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c
index dd009875..da14fbd 100644
--- a/arch/avr32/boards/hammerhead/setup.c
+++ b/arch/avr32/boards/hammerhead/setup.c
@@ -150,7 +150,7 @@
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
 
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/merisc/setup.c b/arch/avr32/boards/merisc/setup.c
index 623b077..e61bc94 100644
--- a/arch/avr32/boards/merisc/setup.c
+++ b/arch/avr32/boards/merisc/setup.c
@@ -134,7 +134,7 @@
 
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c
index 523d8e1..c4da5cb 100644
--- a/arch/avr32/boards/mimc200/setup.c
+++ b/arch/avr32/boards/mimc200/setup.c
@@ -162,7 +162,7 @@
 	 */
 	regs = (void __iomem __force *)res->start;
 	pclk = clk_get(&pdev->dev, "pclk");
-	if (!pclk)
+	if (IS_ERR(pclk))
 		return;
 
 	clk_enable(pclk);
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 9854013..6f9ca56 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -29,6 +26,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -72,8 +70,8 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
-CONFIG_EEPROM_AT24=m
 CONFIG_NETDEVICES=y
 CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
@@ -106,6 +104,7 @@
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
 CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=350
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
@@ -115,14 +114,12 @@
 CONFIG_MMC=y
 CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -130,21 +127,23 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-CONFIG_UFS_FS=y
+CONFIG_UBIFS_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -155,5 +154,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_PCBC=m
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 7ceda35..7eece0a 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -31,6 +28,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -74,8 +72,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -104,6 +104,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -127,6 +128,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -141,11 +143,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -155,7 +160,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -166,4 +170,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 7bc5b2c..387eb9d 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -30,6 +27,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -73,8 +71,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -103,6 +103,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -126,6 +127,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -140,11 +142,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -154,7 +159,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -165,4 +169,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 4bd3682..f0fe237 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -29,6 +26,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -74,6 +72,7 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=m
@@ -107,6 +106,7 @@
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
 CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=350
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
@@ -116,14 +116,12 @@
 CONFIG_MMC=y
 CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -131,21 +129,23 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-CONFIG_UFS_FS=y
+CONFIG_UBIFS_FS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -156,5 +156,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_PCBC=m
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index f8437ef..e4a7c1d 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -32,6 +29,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -77,8 +75,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -107,6 +107,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -130,6 +131,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -144,11 +146,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -158,7 +163,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -169,4 +173,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index 7f58f99..6f37f70 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -2,20 +2,17 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_NO_HZ=y
@@ -31,6 +28,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -76,8 +74,10 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_TCLIB=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=m
 CONFIG_NET_ETHERNET=y
 CONFIG_MACB=y
 # CONFIG_NETDEV_1000 is not set
@@ -106,6 +106,7 @@
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
@@ -129,6 +130,7 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -143,11 +145,14 @@
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
@@ -157,7 +162,6 @@
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=m
 CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
 CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
 CONFIG_NLS_CODEPAGE_850=m
@@ -168,4 +172,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index aec4c43..4fb01f5 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -3,7 +3,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
@@ -11,7 +10,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -26,6 +25,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -35,6 +35,7 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
@@ -58,16 +59,14 @@
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
 CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_PWM=m
 CONFIG_ATMEL_TCLIB=y
 CONFIG_ATMEL_SSC=m
-CONFIG_EEPROM_AT24=m
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=m
 CONFIG_BLK_DEV_SR=m
@@ -120,7 +119,6 @@
 CONFIG_SND_PCM_OSS=m
 # CONFIG_SND_SUPPORT_OLD_API is not set
 # CONFIG_SND_VERBOSE_PROCFS is not set
-# CONFIG_SND_DRIVERS is not set
 CONFIG_SND_AT73C213=m
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
@@ -131,16 +129,15 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_ATMEL_PWM=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -149,20 +146,23 @@
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
 CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-# CONFIG_JFFS2_FS_WRITEBUFFER is not set
 CONFIG_UBIFS_FS=y
-CONFIG_MINIX_FS=m
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
@@ -170,6 +170,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 50ba3db..9faaf9b 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -2,22 +2,15 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_AUDIT=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
-# CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -33,6 +26,7 @@
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -54,18 +48,18 @@
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_M25P80=m
+CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_PWM=m
 CONFIG_ATMEL_TCLIB=y
 CONFIG_ATMEL_SSC=m
-CONFIG_EEPROM_AT24=m
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=m
 CONFIG_BLK_DEV_SR=m
+# CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=m
 # CONFIG_SATA_PMP is not set
 CONFIG_PATA_AT32=m
@@ -77,6 +71,7 @@
 CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_INPUT=m
+CONFIG_INPUT_EVDEV=m
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=m
 # CONFIG_MOUSE_PS2 is not set
@@ -106,7 +101,6 @@
 CONFIG_SND_AT73C213=m
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_DEBUG_FS=y
 CONFIG_USB_ZERO=m
 CONFIG_USB_ETH=m
 CONFIG_USB_GADGETFS=m
@@ -116,36 +110,39 @@
 CONFIG_MMC=y
 CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_ATMEL_PWM=m
-CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
-CONFIG_DW_DMAC=y
-CONFIG_EXT2_FS=m
-CONFIG_EXT3_FS=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-CONFIG_CRC_T10DIF=m
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 329e10b..3d2a5d8 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -1,19 +1,32 @@
 CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_BASE_FULL is not set
-# CONFIG_FUTEX is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_TIMERFD is not set
-# CONFIG_EVENTFD is not set
 # CONFIG_COMPAT_BRK is not set
-CONFIG_SLOB=y
-# CONFIG_BLOCK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+# CONFIG_KPROBES is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BOARD_ATSTK1004=y
 # CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+CONFIG_PM=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -31,40 +44,104 @@
 CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_INPUT is not set
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_PWM=m
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_SSC=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+CONFIG_BLK_DEV_SR=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=m
+# CONFIG_SATA_PMP is not set
+CONFIG_PATA_AT32=m
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_INPUT=m
+CONFIG_INPUT_EVDEV=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_GPIO=m
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
-# CONFIG_SERIAL_ATMEL_PDC is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
+CONFIG_I2C=m
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_GPIO=m
 CONFIG_SPI=y
 CONFIG_SPI_ATMEL=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_AT32AP700X_WDT=y
 CONFIG_FB=y
 CONFIG_FB_ATMEL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_LTV350QV=y
 # CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 CONFIG_USB_GADGET=y
-CONFIG_USB_ETH=y
-# CONFIG_USB_ETH_RNDIS is not set
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_CDC_COMPOSITE=m
+CONFIG_MMC=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_ATMELMCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_ATMEL_PWM=m
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
 CONFIG_RTC_CLASS=y
-# CONFIG_RTC_INTF_PROC is not set
 CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
 # CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
-# CONFIG_PROC_PAGE_MONITOR is not set
 CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
-# CONFIG_JFFS2_FS_WRITEBUFFER is not set
+CONFIG_UBIFS_FS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_FRAME_POINTER=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index dbcc1b5..1ed8f22 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -3,7 +3,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_SYSCTL_SYSCALL is not set
@@ -11,7 +10,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -37,6 +36,7 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
@@ -60,15 +60,13 @@
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_DATAFLASH=m
-CONFIG_MTD_DATAFLASH_OTP=y
-CONFIG_MTD_M25P80=m
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_ATMEL=y
 CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=m
+CONFIG_MISC_DEVICES=y
 CONFIG_ATMEL_PWM=m
 CONFIG_ATMEL_TCLIB=y
 CONFIG_ATMEL_SSC=m
@@ -132,17 +130,17 @@
 CONFIG_USB_GADGETFS=m
 CONFIG_USB_FILE_STORAGE=m
 CONFIG_USB_G_SERIAL=m
+CONFIG_USB_CDC_COMPOSITE=m
 CONFIG_MMC=y
+CONFIG_MMC_TEST=m
 CONFIG_MMC_ATMELMCI=y
-CONFIG_MMC_SPI=m
 CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_ATMEL_PWM=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_AT32AP700X=y
 CONFIG_DMADEVICES=y
@@ -156,15 +154,18 @@
 CONFIG_FUSE_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
 CONFIG_JFFS2_FS=y
 CONFIG_UBIFS_FS=y
-CONFIG_MINIX_FS=m
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
 CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_UTF8=m
 CONFIG_MAGIC_SYSRQ=y
@@ -172,7 +173,3 @@
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_FRAME_POINTER=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_FIPS=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=m
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index 0c813b6..aeadc95 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -11,7 +11,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index dcc01f0..1692bee 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -12,7 +12,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
+# CONFIG_KPROBES is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
index ab608b7..244f2ac 100644
--- a/arch/avr32/include/asm/syscalls.h
+++ b/arch/avr32/include/asm/syscalls.h
@@ -15,20 +15,6 @@
 #include <linux/types.h>
 #include <linux/signal.h>
 
-/* kernel/process.c */
-asmlinkage int sys_fork(struct pt_regs *);
-asmlinkage int sys_clone(unsigned long, unsigned long,
-			 unsigned long, unsigned long,
-			 struct pt_regs *);
-asmlinkage int sys_vfork(struct pt_regs *);
-asmlinkage int sys_execve(const char __user *, char __user *__user *,
-			  char __user *__user *, struct pt_regs *);
-
-/* kernel/signal.c */
-asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
-			       struct pt_regs *);
-asmlinkage int sys_rt_sigreturn(struct pt_regs *);
-
 /* mm/cache.c */
 asmlinkage int sys_cacheflush(int, void __user *, size_t);
 
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index 9c46aaa..ef5a2a0 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -367,14 +367,13 @@
 }
 
 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
-			 unsigned long parent_tidptr,
-			 unsigned long child_tidptr, struct pt_regs *regs)
+		void __user *parent_tidptr, void __user *child_tidptr,
+		struct pt_regs *regs)
 {
 	if (!newsp)
 		newsp = regs->sp;
-	return do_fork(clone_flags, newsp, regs, 0,
-		       (int __user *)parent_tidptr,
-		       (int __user *)child_tidptr);
+	return do_fork(clone_flags, newsp, regs, 0, parent_tidptr,
+			child_tidptr);
 }
 
 asmlinkage int sys_vfork(struct pt_regs *regs)
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 668ed28..05ad291 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -35,7 +35,6 @@
 	.rating		= 50,
 	.read		= read_cycle_count,
 	.mask		= CLOCKSOURCE_MASK(32),
-	.shift		= 16,
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
@@ -123,9 +122,7 @@
 
 	/* figure rate for counter */
 	counter_hz = clk_get_rate(boot_cpu_data.clk);
-	counter.mult = clocksource_hz2mult(counter_hz, counter.shift);
-
-	ret = clocksource_register(&counter);
+	ret = clocksource_register_hz(&counter, counter_hz);
 	if (ret)
 		pr_debug("timer: could not register clocksource: %d\n", ret);
 
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
index f021edf..32d680e 100644
--- a/arch/avr32/mach-at32ap/pm.c
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -176,7 +176,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops avr32_pm_ops = {
+static const struct platform_suspend_ops avr32_pm_ops = {
 	.valid	= avr32_pm_valid_state,
 	.enter	= avr32_pm_enter,
 };
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 46738d4..46f42b2 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -19,7 +19,7 @@
 endif
 KBUILD_AFLAGS           += $(call cc-option,-mno-fdpic)
 KBUILD_CFLAGS_MODULE    += -mlong-calls
-KBUILD_LDFLAGS_MODULE   += -m elf32bfin
+LDFLAGS                 += -m elf32bfin
 KALLSYMS         += --symbol-prefix=_
 
 KBUILD_DEFCONFIG := BF537-STAMP_defconfig
@@ -97,8 +97,11 @@
 rev-$(CONFIG_BF_REV_NONE) := none
 rev-$(CONFIG_BF_REV_ANY)  := any
 
-KBUILD_CFLAGS += -mcpu=$(cpu-y)-$(rev-y)
-KBUILD_AFLAGS += -mcpu=$(cpu-y)-$(rev-y)
+CPU_REV := $(cpu-y)-$(rev-y)
+export CPU_REV
+
+KBUILD_CFLAGS += -mcpu=$(CPU_REV)
+KBUILD_AFLAGS += -mcpu=$(CPU_REV)
 
 # - we utilize the silicon rev from the toolchain, so move it over to the checkflags
 CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }')
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 13d2dbd..0a49279 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -17,7 +17,7 @@
 
 quiet_cmd_uimage = UIMAGE  $@
       cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
-                   -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' \
+                   -C $(2) -n '$(CPU_REV)-$(KERNELRELEASE)' \
                    -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
                    $(UIMAGE_OPTS-y) -d $< $@
 
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
new file mode 100644
index 0000000..4cf4510
--- /dev/null
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -0,0 +1,113 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+# CONFIG_FUTEX is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+CONFIG_SLAB=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BF561=y
+CONFIG_SMP=y
+CONFIG_IRQ_TIMER0=10
+CONFIG_CLKIN_HZ=30000000
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+CONFIG_BFIN_GPTIMERS=m
+CONFIG_C_CDPRIO=y
+CONFIG_BANK_3=0xAAC2
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_IRDA=m
+CONFIG_IRLAN=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRTTY_SIR=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+CONFIG_MTD_PHYSMAP=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMC91X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT=m
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_BFIN_JTAG_COMM=m
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_BFIN=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_BFIN_WDT=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_JFFS2_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_SMB_FS=m
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_MMRS=y
+CONFIG_DEBUG_HWERR=y
+CONFIG_EXACT_HWERR=y
+CONFIG_DEBUG_DOUBLEFAULT=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CPLB_INFO=y
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
new file mode 100644
index 0000000..0ebc7d9
--- /dev/null
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -0,0 +1,121 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCALVERSION="DNP5370"
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_SLOB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_BF537=y
+CONFIG_BF_REV_0_3=y
+CONFIG_DNP5370=y
+CONFIG_IRQ_ERROR=7
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+CONFIG_C_CDPRIO=y
+CONFIG_C_AMBEN_B0_B1_B2=y
+CONFIG_PM=y
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_LLC2=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_DEBUG_VERBOSE=1
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_ABSENT=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UCLINUX=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_BLOCK2MTD=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_MISC_DEVICES is not set
+CONFIG_NETDEVICES=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_BFIN_MAC=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_BFIN_DMA_INTERFACE is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_BFIN_JTAG_COMM=y
+CONFIG_BFIN_JTAG_COMM_CONSOLE=y
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_UART0=y
+CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_BLACKFIN_TWI=y
+CONFIG_SPI=y
+CONFIG_SPI_BFIN=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_LM75=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_DMADEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_BOTH=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_DEBUG_KOBJECT=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_PAGE_POISONING=y
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_DOUBLEFAULT=y
+CONFIG_CPLB_INFO=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/blackfin/include/asm/bfin_dma.h b/arch/blackfin/include/asm/bfin_dma.h
new file mode 100644
index 0000000..d511207
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_dma.h
@@ -0,0 +1,91 @@
+/*
+ * bfin_dma.h - Blackfin DMA defines/structures/etc...
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_DMA_H__
+#define __ASM_BFIN_DMA_H__
+
+#include <linux/types.h>
+
+/* DMA_CONFIG Masks */
+#define DMAEN			0x0001	/* DMA Channel Enable */
+#define WNR				0x0002	/* Channel Direction (W/R*) */
+#define WDSIZE_8		0x0000	/* Transfer Word Size = 8 */
+#define WDSIZE_16		0x0004	/* Transfer Word Size = 16 */
+#define WDSIZE_32		0x0008	/* Transfer Word Size = 32 */
+#define DMA2D			0x0010	/* DMA Mode (2D/1D*) */
+#define RESTART			0x0020	/* DMA Buffer Clear */
+#define DI_SEL			0x0040	/* Data Interrupt Timing Select */
+#define DI_EN			0x0080	/* Data Interrupt Enable */
+#define NDSIZE_0		0x0000	/* Next Descriptor Size = 0 (Stop/Autobuffer) */
+#define NDSIZE_1		0x0100	/* Next Descriptor Size = 1 */
+#define NDSIZE_2		0x0200	/* Next Descriptor Size = 2 */
+#define NDSIZE_3		0x0300	/* Next Descriptor Size = 3 */
+#define NDSIZE_4		0x0400	/* Next Descriptor Size = 4 */
+#define NDSIZE_5		0x0500	/* Next Descriptor Size = 5 */
+#define NDSIZE_6		0x0600	/* Next Descriptor Size = 6 */
+#define NDSIZE_7		0x0700	/* Next Descriptor Size = 7 */
+#define NDSIZE_8		0x0800	/* Next Descriptor Size = 8 */
+#define NDSIZE_9		0x0900	/* Next Descriptor Size = 9 */
+#define NDSIZE			0x0f00	/* Next Descriptor Size */
+#define DMAFLOW			0x7000	/* Flow Control */
+#define DMAFLOW_STOP	0x0000	/* Stop Mode */
+#define DMAFLOW_AUTO	0x1000	/* Autobuffer Mode */
+#define DMAFLOW_ARRAY	0x4000	/* Descriptor Array Mode */
+#define DMAFLOW_SMALL	0x6000	/* Small Model Descriptor List Mode */
+#define DMAFLOW_LARGE	0x7000	/* Large Model Descriptor List Mode */
+
+/* DMA_IRQ_STATUS Masks */
+#define DMA_DONE		0x0001	/* DMA Completion Interrupt Status */
+#define DMA_ERR			0x0002	/* DMA Error Interrupt Status */
+#define DFETCH			0x0004	/* DMA Descriptor Fetch Indicator */
+#define DMA_RUN			0x0008	/* DMA Channel Running Indicator */
+
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits.  So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/*
+ * bfin dma registers layout
+ */
+struct bfin_dma_regs {
+	u32 next_desc_ptr;
+	u32 start_addr;
+	__BFP(config);
+	u32 __pad0;
+	__BFP(x_count);
+	__BFP(x_modify);
+	__BFP(y_count);
+	__BFP(y_modify);
+	u32 curr_desc_ptr;
+	u32 curr_addr;
+	__BFP(irq_status);
+	__BFP(peripheral_map);
+	__BFP(curr_x_count);
+	u32 __pad1;
+	__BFP(curr_y_count);
+	u32 __pad2;
+};
+
+/*
+ * bfin handshake mdma registers layout
+ */
+struct bfin_hmdma_regs {
+	__BFP(control);
+	__BFP(ecinit);
+	__BFP(bcinit);
+	__BFP(ecurgent);
+	__BFP(ecoverflow);
+	__BFP(ecount);
+	__BFP(bcount);
+};
+
+#undef __BFP
+
+#endif
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
new file mode 100644
index 0000000..1ff9f14
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -0,0 +1,275 @@
+/*
+ * bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_ASM_SERIAL_H__
+#define __BFIN_ASM_SERIAL_H__
+
+#include <linux/serial_core.h>
+#include <mach/anomaly.h>
+#include <mach/bfin_serial.h>
+
+#if defined(CONFIG_BFIN_UART0_CTSRTS) || \
+    defined(CONFIG_BFIN_UART1_CTSRTS) || \
+    defined(CONFIG_BFIN_UART2_CTSRTS) || \
+    defined(CONFIG_BFIN_UART3_CTSRTS)
+# ifdef BFIN_UART_BF54X_STYLE
+#  define CONFIG_SERIAL_BFIN_HARD_CTSRTS
+# else
+#  define CONFIG_SERIAL_BFIN_CTSRTS
+# endif
+#endif
+
+struct circ_buf;
+struct timer_list;
+struct work_struct;
+
+struct bfin_serial_port {
+	struct uart_port port;
+	unsigned int old_status;
+	int status_irq;
+#ifndef BFIN_UART_BF54X_STYLE
+	unsigned int lsr;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	int tx_done;
+	int tx_count;
+	struct circ_buf rx_dma_buf;
+	struct timer_list rx_dma_timer;
+	int rx_dma_nrows;
+	unsigned int tx_dma_channel;
+	unsigned int rx_dma_channel;
+	struct work_struct tx_dma_workqueue;
+#elif ANOMALY_05000363
+	unsigned int anomaly_threshold;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+	int scts;
+#endif
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+	defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+	int cts_pin;
+	int rts_pin;
+#endif
+};
+
+/* UART_LCR Masks */
+#define WLS(x)                   (((x)-5) & 0x03)  /* Word Length Select */
+#define STB                      0x04  /* Stop Bits */
+#define PEN                      0x08  /* Parity Enable */
+#define EPS                      0x10  /* Even Parity Select */
+#define STP                      0x20  /* Stick Parity */
+#define SB                       0x40  /* Set Break */
+#define DLAB                     0x80  /* Divisor Latch Access */
+
+/* UART_LSR Masks */
+#define DR                       0x01  /* Data Ready */
+#define OE                       0x02  /* Overrun Error */
+#define PE                       0x04  /* Parity Error */
+#define FE                       0x08  /* Framing Error */
+#define BI                       0x10  /* Break Interrupt */
+#define THRE                     0x20  /* THR Empty */
+#define TEMT                     0x40  /* TSR and UART_THR Empty */
+#define TFI                      0x80  /* Transmission Finished Indicator */
+
+/* UART_IER Masks */
+#define ERBFI                    0x01  /* Enable Receive Buffer Full Interrupt */
+#define ETBEI                    0x02  /* Enable Transmit Buffer Empty Interrupt */
+#define ELSI                     0x04  /* Enable RX Status Interrupt */
+#define EDSSI                    0x08  /* Enable Modem Status Interrupt */
+#define EDTPTI                   0x10  /* Enable DMA Transmit PIRQ Interrupt */
+#define ETFI                     0x20  /* Enable Transmission Finished Interrupt */
+#define ERFCI                    0x40  /* Enable Receive FIFO Count Interrupt */
+
+/* UART_MCR Masks */
+#define XOFF                     0x01  /* Transmitter Off */
+#define MRTS                     0x02  /* Manual Request To Send */
+#define RFIT                     0x04  /* Receive FIFO IRQ Threshold */
+#define RFRT                     0x08  /* Receive FIFO RTS Threshold */
+#define LOOP_ENA                 0x10  /* Loopback Mode Enable */
+#define FCPOL                    0x20  /* Flow Control Pin Polarity */
+#define ARTS                     0x40  /* Automatic Request To Send */
+#define ACTS                     0x80  /* Automatic Clear To Send */
+
+/* UART_MSR Masks */
+#define SCTS                     0x01  /* Sticky CTS */
+#define CTS                      0x10  /* Clear To Send */
+#define RFCS                     0x20  /* Receive FIFO Count Status */
+
+/* UART_GCTL Masks */
+#define UCEN                     0x01  /* Enable UARTx Clocks */
+#define IREN                     0x02  /* Enable IrDA Mode */
+#define TPOLC                    0x04  /* IrDA TX Polarity Change */
+#define RPOLC                    0x08  /* IrDA RX Polarity Change */
+#define FPE                      0x10  /* Force Parity Error On Transmit */
+#define FFE                      0x20  /* Force Framing Error On Transmit */
+
+#ifdef BFIN_UART_BF54X_STYLE
+# define OFFSET_DLL              0x00  /* Divisor Latch (Low-Byte)        */
+# define OFFSET_DLH              0x04  /* Divisor Latch (High-Byte)       */
+# define OFFSET_GCTL             0x08  /* Global Control Register         */
+# define OFFSET_LCR              0x0C  /* Line Control Register           */
+# define OFFSET_MCR              0x10  /* Modem Control Register          */
+# define OFFSET_LSR              0x14  /* Line Status Register            */
+# define OFFSET_MSR              0x18  /* Modem Status Register           */
+# define OFFSET_SCR              0x1C  /* SCR Scratch Register            */
+# define OFFSET_IER_SET          0x20  /* Set Interrupt Enable Register   */
+# define OFFSET_IER_CLEAR        0x24  /* Clear Interrupt Enable Register */
+# define OFFSET_THR              0x28  /* Transmit Holding register       */
+# define OFFSET_RBR              0x2C  /* Receive Buffer register         */
+#else /* BF533 style */
+# define OFFSET_THR              0x00  /* Transmit Holding register         */
+# define OFFSET_RBR              0x00  /* Receive Buffer register           */
+# define OFFSET_DLL              0x00  /* Divisor Latch (Low-Byte)          */
+# define OFFSET_DLH              0x04  /* Divisor Latch (High-Byte)         */
+# define OFFSET_IER              0x04  /* Interrupt Enable Register         */
+# define OFFSET_IIR              0x08  /* Interrupt Identification Register */
+# define OFFSET_LCR              0x0C  /* Line Control Register             */
+# define OFFSET_MCR              0x10  /* Modem Control Register            */
+# define OFFSET_LSR              0x14  /* Line Status Register              */
+# define OFFSET_MSR              0x18  /* Modem Status Register             */
+# define OFFSET_SCR              0x1C  /* SCR Scratch Register              */
+# define OFFSET_GCTL             0x24  /* Global Control Register           */
+/* code should not need IIR, so force build error if they use it */
+# undef OFFSET_IIR
+#endif
+
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits.  So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+struct bfin_uart_regs {
+#ifdef BFIN_UART_BF54X_STYLE
+	__BFP(dll);
+	__BFP(dlh);
+	__BFP(gctl);
+	__BFP(lcr);
+	__BFP(mcr);
+	__BFP(lsr);
+	__BFP(msr);
+	__BFP(scr);
+	__BFP(ier_set);
+	__BFP(ier_clear);
+	__BFP(thr);
+	__BFP(rbr);
+#else
+	union {
+		u16 dll;
+		u16 thr;
+		const u16 rbr;
+	};
+	const u16 __pad0;
+	union {
+		u16 dlh;
+		u16 ier;
+	};
+	const u16 __pad1;
+	const __BFP(iir);
+	__BFP(lcr);
+	__BFP(mcr);
+	__BFP(lsr);
+	__BFP(msr);
+	__BFP(scr);
+	const u32 __pad2;
+	__BFP(gctl);
+#endif
+};
+#undef __BFP
+
+#ifndef port_membase
+# define port_membase(p) (((struct bfin_serial_port *)(p))->port.membase)
+#endif
+
+#define UART_GET_CHAR(p)      bfin_read16(port_membase(p) + OFFSET_RBR)
+#define UART_GET_DLL(p)       bfin_read16(port_membase(p) + OFFSET_DLL)
+#define UART_GET_DLH(p)       bfin_read16(port_membase(p) + OFFSET_DLH)
+#define UART_GET_GCTL(p)      bfin_read16(port_membase(p) + OFFSET_GCTL)
+#define UART_GET_LCR(p)       bfin_read16(port_membase(p) + OFFSET_LCR)
+#define UART_GET_MCR(p)       bfin_read16(port_membase(p) + OFFSET_MCR)
+#define UART_GET_MSR(p)       bfin_read16(port_membase(p) + OFFSET_MSR)
+
+#define UART_PUT_CHAR(p, v)   bfin_write16(port_membase(p) + OFFSET_THR, v)
+#define UART_PUT_DLL(p, v)    bfin_write16(port_membase(p) + OFFSET_DLL, v)
+#define UART_PUT_DLH(p, v)    bfin_write16(port_membase(p) + OFFSET_DLH, v)
+#define UART_PUT_GCTL(p, v)   bfin_write16(port_membase(p) + OFFSET_GCTL, v)
+#define UART_PUT_LCR(p, v)    bfin_write16(port_membase(p) + OFFSET_LCR, v)
+#define UART_PUT_MCR(p, v)    bfin_write16(port_membase(p) + OFFSET_MCR, v)
+
+#ifdef BFIN_UART_BF54X_STYLE
+
+#define UART_CLEAR_IER(p, v)  bfin_write16(port_membase(p) + OFFSET_IER_CLEAR, v)
+#define UART_GET_IER(p)       bfin_read16(port_membase(p) + OFFSET_IER_SET)
+#define UART_SET_IER(p, v)    bfin_write16(port_membase(p) + OFFSET_IER_SET, v)
+
+#define UART_CLEAR_DLAB(p)    /* MMRs not muxed on BF54x */
+#define UART_SET_DLAB(p)      /* MMRs not muxed on BF54x */
+
+#define UART_CLEAR_LSR(p)     bfin_write16(port_membase(p) + OFFSET_LSR, -1)
+#define UART_GET_LSR(p)       bfin_read16(port_membase(p) + OFFSET_LSR)
+#define UART_PUT_LSR(p, v)    bfin_write16(port_membase(p) + OFFSET_LSR, v)
+
+/* This handles hard CTS/RTS */
+#define BFIN_UART_CTSRTS_HARD
+#define UART_CLEAR_SCTS(p)      bfin_write16((port_membase(p) + OFFSET_MSR), SCTS)
+#define UART_GET_CTS(x)         (UART_GET_MSR(x) & CTS)
+#define UART_DISABLE_RTS(x)     UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS | MRTS))
+#define UART_ENABLE_RTS(x)      UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
+#define UART_ENABLE_INTS(x, v)  UART_SET_IER(x, v)
+#define UART_DISABLE_INTS(x)    UART_CLEAR_IER(x, 0xF)
+
+#else /* BF533 style */
+
+#define UART_CLEAR_IER(p, v)  UART_PUT_IER(p, UART_GET_IER(p) & ~(v))
+#define UART_GET_IER(p)       bfin_read16(port_membase(p) + OFFSET_IER)
+#define UART_PUT_IER(p, v)    bfin_write16(port_membase(p) + OFFSET_IER, v)
+#define UART_SET_IER(p, v)    UART_PUT_IER(p, UART_GET_IER(p) | (v))
+
+#define UART_CLEAR_DLAB(p)    do { UART_PUT_LCR(p, UART_GET_LCR(p) & ~DLAB); SSYNC(); } while (0)
+#define UART_SET_DLAB(p)      do { UART_PUT_LCR(p, UART_GET_LCR(p) | DLAB); SSYNC(); } while (0)
+
+#ifndef put_lsr_cache
+# define put_lsr_cache(p, v) (((struct bfin_serial_port *)(p))->lsr = (v))
+#endif
+#ifndef get_lsr_cache
+# define get_lsr_cache(p)    (((struct bfin_serial_port *)(p))->lsr)
+#endif
+
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline void UART_CLEAR_LSR(void *p)
+{
+	put_lsr_cache(p, 0);
+	bfin_write16(port_membase(p) + OFFSET_LSR, -1);
+}
+static inline unsigned int UART_GET_LSR(void *p)
+{
+	unsigned int lsr = bfin_read16(port_membase(p) + OFFSET_LSR);
+	put_lsr_cache(p, get_lsr_cache(p) | (lsr & (BI|FE|PE|OE)));
+	return lsr | get_lsr_cache(p);
+}
+static inline void UART_PUT_LSR(void *p, uint16_t val)
+{
+	put_lsr_cache(p, get_lsr_cache(p) & ~val);
+}
+
+/* This handles soft CTS/RTS */
+#define UART_GET_CTS(x)        gpio_get_value((x)->cts_pin)
+#define UART_DISABLE_RTS(x)    gpio_set_value((x)->rts_pin, 1)
+#define UART_ENABLE_RTS(x)     gpio_set_value((x)->rts_pin, 0)
+#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
+#define UART_DISABLE_INTS(x)   UART_PUT_IER(x, 0)
+
+#endif
+
+#ifndef BFIN_UART_TX_FIFO_SIZE
+# define BFIN_UART_TX_FIFO_SIZE 2
+#endif
+
+#endif /* __BFIN_ASM_SERIAL_H__ */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 3f7ef4d..29f4fd8 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -108,7 +108,9 @@
 #define smp_mb__before_clear_bit()	barrier()
 #define smp_mb__after_clear_bit()	barrier()
 
+#define test_bit __skip_test_bit
 #include <asm-generic/bitops/non-atomic.h>
+#undef test_bit
 
 #endif /* CONFIG_SMP */
 
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index bd0641a..568885a 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -7,6 +7,8 @@
 #ifndef __ARCH_BLACKFIN_CACHE_H
 #define __ARCH_BLACKFIN_CACHE_H
 
+#include <linux/linkage.h>	/* for asmlinkage */
+
 /*
  * Bytes per L1 cache line
  * Blackfin loads 32 bytes for cache
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 2666ff8..77135b6 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -11,6 +11,9 @@
 
 #include <asm/blackfin.h>	/* for SSYNC() */
 #include <asm/sections.h>	/* for _ramend */
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#endif
 
 extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
 extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index eedf3ca..d9dbc1a 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -14,40 +14,7 @@
 #include <asm/blackfin.h>
 #include <asm/page.h>
 #include <asm-generic/dma.h>
-
-/* DMA_CONFIG Masks */
-#define DMAEN			0x0001	/* DMA Channel Enable */
-#define WNR				0x0002	/* Channel Direction (W/R*) */
-#define WDSIZE_8		0x0000	/* Transfer Word Size = 8 */
-#define WDSIZE_16		0x0004	/* Transfer Word Size = 16 */
-#define WDSIZE_32		0x0008	/* Transfer Word Size = 32 */
-#define DMA2D			0x0010	/* DMA Mode (2D/1D*) */
-#define RESTART			0x0020	/* DMA Buffer Clear */
-#define DI_SEL			0x0040	/* Data Interrupt Timing Select */
-#define DI_EN			0x0080	/* Data Interrupt Enable */
-#define NDSIZE_0		0x0000	/* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1		0x0100	/* Next Descriptor Size = 1 */
-#define NDSIZE_2		0x0200	/* Next Descriptor Size = 2 */
-#define NDSIZE_3		0x0300	/* Next Descriptor Size = 3 */
-#define NDSIZE_4		0x0400	/* Next Descriptor Size = 4 */
-#define NDSIZE_5		0x0500	/* Next Descriptor Size = 5 */
-#define NDSIZE_6		0x0600	/* Next Descriptor Size = 6 */
-#define NDSIZE_7		0x0700	/* Next Descriptor Size = 7 */
-#define NDSIZE_8		0x0800	/* Next Descriptor Size = 8 */
-#define NDSIZE_9		0x0900	/* Next Descriptor Size = 9 */
-#define NDSIZE			0x0f00	/* Next Descriptor Size */
-#define DMAFLOW			0x7000	/* Flow Control */
-#define DMAFLOW_STOP	0x0000	/* Stop Mode */
-#define DMAFLOW_AUTO	0x1000	/* Autobuffer Mode */
-#define DMAFLOW_ARRAY	0x4000	/* Descriptor Array Mode */
-#define DMAFLOW_SMALL	0x6000	/* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE	0x7000	/* Large Model Descriptor List Mode */
-
-/* DMA_IRQ_STATUS Masks */
-#define DMA_DONE		0x0001	/* DMA Completion Interrupt Status */
-#define DMA_ERR			0x0002	/* DMA Error Interrupt Status */
-#define DFETCH			0x0004	/* DMA Descriptor Fetch Indicator */
-#define DMA_RUN			0x0008	/* DMA Channel Running Indicator */
+#include <asm/bfin_dma.h>
 
 /*-------------------------
  * config reg bits value
@@ -149,7 +116,7 @@
 *	DMA API's
 *******************************************************************************/
 extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
-extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS];
+extern struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS];
 extern int channel2irq(unsigned int channel);
 
 static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index efcc3ae..3047120 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -9,6 +9,8 @@
 #ifndef _BLACKFIN_DPMC_H_
 #define _BLACKFIN_DPMC_H_
 
+#include <mach/pll.h>
+
 /* PLL_CTL Masks */
 #define DF			0x0001	/* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
 #define PLL_OFF			0x0002	/* PLL Not Powered */
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index 234fbac..dccae26 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,148 +7,48 @@
 #ifndef _BFIN_IO_H
 #define _BFIN_IO_H
 
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
 #include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
 
-/*
- * These are for ISA/PCI shared memory _only_ and should never be used
- * on any other type of memory, including Zorro memory. They are meant to
- * access the bus in the bus byte order which is little-endian!.
- *
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the bfin architecture, we just read/write the
- * memory location directly.
- */
-#ifndef __ASSEMBLY__
-
-static inline unsigned char readb(const volatile void __iomem *addr)
-{
-	unsigned int val;
-	int tmp;
-
-	__asm__ __volatile__ (
-		"cli %1;"
-		"NOP; NOP; SSYNC;"
-		"%0 = b [%2] (z);"
-		"sti %1;"
-		: "=d"(val), "=d"(tmp)
-		: "a"(addr)
-	);
-
-	return (unsigned char) val;
+#define DECLARE_BFIN_RAW_READX(size, type, asm, asm_sign) \
+static inline type __raw_read##size(const volatile void __iomem *addr) \
+{ \
+	unsigned int val; \
+	int tmp; \
+	__asm__ __volatile__ ( \
+		"cli %1;" \
+		"NOP; NOP; SSYNC;" \
+		"%0 = "#asm" [%2] "#asm_sign";" \
+		"sti %1;" \
+		: "=d"(val), "=d"(tmp) \
+		: "a"(addr) \
+	); \
+	return (type) val; \
 }
-
-static inline unsigned short readw(const volatile void __iomem *addr)
-{
-	unsigned int val;
-	int tmp;
-
-	__asm__ __volatile__ (
-		"cli %1;"
-		"NOP; NOP; SSYNC;"
-		"%0 = w [%2] (z);"
-		"sti %1;"
-		: "=d"(val), "=d"(tmp)
-		: "a"(addr)
-	);
-
-	return (unsigned short) val;
-}
-
-static inline unsigned int readl(const volatile void __iomem *addr)
-{
-	unsigned int val;
-	int tmp;
-
-	__asm__ __volatile__ (
-		"cli %1;"
-		"NOP; NOP; SSYNC;"
-		"%0 = [%2];"
-		"sti %1;"
-		: "=d"(val), "=d"(tmp)
-		: "a"(addr)
-	);
-
-	return val;
-}
-
-#endif /*  __ASSEMBLY__ */
-
-#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
-#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
-#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
-
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-#define memset_io(a, b, c)	memset((void *)(a), (b), (c))
-#define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
-
-/* Convert "I/O port addresses" to actual addresses.  i.e. ugly casts. */
-#define __io(port) ((void *)(unsigned long)(port))
-
-#define inb(port)    readb(__io(port))
-#define inw(port)    readw(__io(port))
-#define inl(port)    readl(__io(port))
-#define outb(x, port) writeb(x, __io(port))
-#define outw(x, port) writew(x, __io(port))
-#define outl(x, port) writel(x, __io(port))
-
-#define inb_p(port)    inb(__io(port))
-#define inw_p(port)    inw(__io(port))
-#define inl_p(port)    inl(__io(port))
-#define outb_p(x, port) outb(x, __io(port))
-#define outw_p(x, port) outw(x, __io(port))
-#define outl_p(x, port) outl(x, __io(port))
-
-#define ioread8_rep(a, d, c)	readsb(a, d, c)
-#define ioread16_rep(a, d, c)	readsw(a, d, c)
-#define ioread32_rep(a, d, c)	readsl(a, d, c)
-#define iowrite8_rep(a, s, c)	writesb(a, s, c)
-#define iowrite16_rep(a, s, c)	writesw(a, s, c)
-#define iowrite32_rep(a, s, c)	writesl(a, s, c)
-
-#define ioread8(x)			readb(x)
-#define ioread16(x)			readw(x)
-#define ioread32(x)			readl(x)
-#define iowrite8(val, x)		writeb(val, x)
-#define iowrite16(val, x)		writew(val, x)
-#define iowrite32(val, x)		writel(val, x)
-
-/**
- * I/O write barrier
- *
- * Ensure ordering of I/O space writes. This will make sure that writes
- * following the barrier will arrive after all previous writes.
- */
-#define mmiowb() do { SSYNC(); wmb(); } while (0)
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/* Values for nocacheflag and cmode */
-#define IOMAP_NOCACHE_SER		1
-
-#ifndef __ASSEMBLY__
+DECLARE_BFIN_RAW_READX(b, u8, b, (z))
+#define __raw_readb __raw_readb
+DECLARE_BFIN_RAW_READX(w, u16, w, (z))
+#define __raw_readw __raw_readw
+DECLARE_BFIN_RAW_READX(l, u32, , )
+#define __raw_readl __raw_readl
 
 extern void outsb(unsigned long port, const void *addr, unsigned long count);
 extern void outsw(unsigned long port, const void *addr, unsigned long count);
 extern void outsw_8(unsigned long port, const void *addr, unsigned long count);
 extern void outsl(unsigned long port, const void *addr, unsigned long count);
+#define outsb outsb
+#define outsw outsw
+#define outsl outsl
 
 extern void insb(unsigned long port, void *addr, unsigned long count);
 extern void insw(unsigned long port, void *addr, unsigned long count);
 extern void insw_8(unsigned long port, void *addr, unsigned long count);
 extern void insl(unsigned long port, void *addr, unsigned long count);
 extern void insl_16(unsigned long port, void *addr, unsigned long count);
+#define insb insb
+#define insw insw
+#define insl insl
 
 extern void dma_outsb(unsigned long port, const void *addr, unsigned short count);
 extern void dma_outsw(unsigned long port, const void *addr, unsigned short count);
@@ -158,108 +58,14 @@
 extern void dma_insw(unsigned long port, void *addr, unsigned short count);
 extern void dma_insl(unsigned long port, void *addr, unsigned short count);
 
-static inline void readsl(const void __iomem *addr, void *buf, int len)
-{
-	insl((unsigned long)addr, buf, len);
-}
-
-static inline void readsw(const void __iomem *addr, void *buf, int len)
-{
-	insw((unsigned long)addr, buf, len);
-}
-
-static inline void readsb(const void __iomem *addr, void *buf, int len)
-{
-	insb((unsigned long)addr, buf, len);
-}
-
-static inline void writesl(const void __iomem *addr, const void *buf, int len)
-{
-	outsl((unsigned long)addr, buf, len);
-}
-
-static inline void writesw(const void __iomem *addr, const void *buf, int len)
-{
-	outsw((unsigned long)addr, buf, len);
-}
-
-static inline void writesb(const void __iomem *addr, const void *buf, int len)
-{
-	outsb((unsigned long)addr, buf, len);
-}
-
-/*
- * Map some physical address range into the kernel address space.
+/**
+ * I/O write barrier
+ *
+ * Ensure ordering of I/O space writes. This will make sure that writes
+ * following the barrier will arrive after all previous writes.
  */
-static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
-				int cacheflag)
-{
-	return (void __iomem *)physaddr;
-}
+#define mmiowb() do { SSYNC(); wmb(); } while (0)
 
-/*
- * Unmap a ioremap()ed region again
- */
-static inline void iounmap(void *addr)
-{
-}
-
-/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-static inline void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
- * Set new cache mode for some kernel address space.
- * The caller must push data for that range itself, if such data may already
- * be in the cache.
- */
-static inline void kernel_set_cachemode(void *addr, unsigned long size,
-					int cmode)
-{
-}
-
-static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
-{
-	return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void __iomem *ioremap_nocache(unsigned long physaddr,
-					    unsigned long size)
-{
-	return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-
-extern void blkfin_inv_cache_all(void);
+#include <asm-generic/io.h>
 
 #endif
-
-#define	ioport_map(port, nr)		((void __iomem*)(port))
-#define	ioport_unmap(addr)
-
-/* Pages to physical address... */
-#define page_to_bus(page)       ((page - mem_map) << PAGE_SHIFT)
-
-#define phys_to_virt(vaddr)	((void *) (vaddr))
-#define virt_to_phys(vaddr)	((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)	__va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)	p
-
-#endif				/* __KERNEL__ */
-
-#endif				/* _BFIN_IO_H */
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 41c4d70..3365cb9 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -13,9 +13,6 @@
 #ifdef CONFIG_SMP
 # include <asm/pda.h>
 # include <asm/processor.h>
-/* Forward decl needed due to cdef inter dependencies */
-static inline uint32_t __pure bfin_dspid(void);
-# define blackfin_core_id() (bfin_dspid() & 0xff)
 # define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
 #else
 extern unsigned long bfin_irq_flags;
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index aea8802..8af7772 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -14,7 +14,7 @@
 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
 
 #include <asm/ptrace.h>
-#include <asm/blackfin.h>
+#include <mach/blackfin.h>
 
 static inline unsigned long rdusp(void)
 {
@@ -134,6 +134,8 @@
 	return bfin_read_DSPID();
 }
 
+#define blackfin_core_id() (bfin_dspid() & 0xff)
+
 static inline uint32_t __pure bfin_compiled_revid(void)
 {
 #if defined(CONFIG_BF_REV_0_0)
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 1942ccf..1f286e7 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,12 +17,12 @@
 asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
 asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
 asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void arch_read_lock_asm(volatile int *ptr);
-asmlinkage int arch_read_trylock_asm(volatile int *ptr);
-asmlinkage void arch_read_unlock_asm(volatile int *ptr);
-asmlinkage void arch_write_lock_asm(volatile int *ptr);
-asmlinkage int arch_write_trylock_asm(volatile int *ptr);
-asmlinkage void arch_write_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_read_lock_asm(volatile int *ptr);
+asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_write_lock_asm(volatile int *ptr);
+asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
@@ -64,32 +64,36 @@
 
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
-	arch_read_lock_asm(&rw->lock);
+	__raw_read_lock_asm(&rw->lock);
 }
 
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-	return arch_read_trylock_asm(&rw->lock);
+	return __raw_read_trylock_asm(&rw->lock);
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
-	arch_read_unlock_asm(&rw->lock);
+	__raw_read_unlock_asm(&rw->lock);
 }
 
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
-	arch_write_lock_asm(&rw->lock);
+	__raw_write_lock_asm(&rw->lock);
 }
 
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
 static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-	return arch_write_trylock_asm(&rw->lock);
+	return __raw_write_trylock_asm(&rw->lock);
 }
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-	arch_write_unlock_asm(&rw->lock);
+	__raw_write_unlock_asm(&rw->lock);
 }
 
 #define arch_spin_relax(lock)  	cpu_relax()
diff --git a/arch/blackfin/include/mach-common/pll.h b/arch/blackfin/include/mach-common/pll.h
new file mode 100644
index 0000000..382178b
--- /dev/null
+++ b/arch/blackfin/include/mach-common/pll.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_COMMON_PLL_H
+#define _MACH_COMMON_PLL_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/blackfin.h>
+#include <asm/irqflags.h>
+
+#ifndef bfin_iwr_restore
+static inline void
+bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
+{
+#ifdef SIC_IWR
+	bfin_write_SIC_IWR(iwr0);
+#else
+	bfin_write_SIC_IWR0(iwr0);
+# ifdef SIC_IWR1
+	bfin_write_SIC_IWR1(iwr1);
+# endif
+# ifdef SIC_IWR2
+	bfin_write_SIC_IWR2(iwr2);
+# endif
+#endif
+}
+#endif
+
+#ifndef bfin_iwr_save
+static inline void
+bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
+              unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+#ifdef SIC_IWR
+	*iwr0 = bfin_read_SIC_IWR();
+#else
+	*iwr0 = bfin_read_SIC_IWR0();
+# ifdef SIC_IWR1
+	*iwr1 = bfin_read_SIC_IWR1();
+# endif
+# ifdef SIC_IWR2
+	*iwr2 = bfin_read_SIC_IWR2();
+# endif
+#endif
+	bfin_iwr_restore(niwr0, niwr1, niwr2);
+}
+#endif
+
+static inline void _bfin_write_pll_relock(u32 addr, unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1, iwr2;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	flags = hard_local_irq_save();
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	bfin_iwr_save(IWR_ENABLE(0), 0, 0, &iwr0, &iwr1, &iwr2);
+
+	bfin_write16(addr, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_iwr_restore(iwr0, iwr1, iwr2);
+	hard_local_irq_restore(flags);
+}
+
+/* Writing to PLL_CTL initiates a PLL relock sequence */
+static inline void bfin_write_PLL_CTL(unsigned int val)
+{
+	_bfin_write_pll_relock(PLL_CTL, val);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence */
+static inline void bfin_write_VR_CTL(unsigned int val)
+{
+	_bfin_write_pll_relock(VR_CTL, val);
+}
+
+#endif
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-a.h b/arch/blackfin/include/mach-common/ports-a.h
new file mode 100644
index 0000000..9f78a76
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-a.h
@@ -0,0 +1,25 @@
+/*
+ * Port A Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_A__
+#define __BFIN_PERIPHERAL_PORT_A__
+
+#define PA0		(1 << 0)
+#define PA1		(1 << 1)
+#define PA2		(1 << 2)
+#define PA3		(1 << 3)
+#define PA4		(1 << 4)
+#define PA5		(1 << 5)
+#define PA6		(1 << 6)
+#define PA7		(1 << 7)
+#define PA8		(1 << 8)
+#define PA9		(1 << 9)
+#define PA10		(1 << 10)
+#define PA11		(1 << 11)
+#define PA12		(1 << 12)
+#define PA13		(1 << 13)
+#define PA14		(1 << 14)
+#define PA15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-b.h b/arch/blackfin/include/mach-common/ports-b.h
new file mode 100644
index 0000000..b81702f
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-b.h
@@ -0,0 +1,25 @@
+/*
+ * Port B Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_B__
+#define __BFIN_PERIPHERAL_PORT_B__
+
+#define PB0		(1 << 0)
+#define PB1		(1 << 1)
+#define PB2		(1 << 2)
+#define PB3		(1 << 3)
+#define PB4		(1 << 4)
+#define PB5		(1 << 5)
+#define PB6		(1 << 6)
+#define PB7		(1 << 7)
+#define PB8		(1 << 8)
+#define PB9		(1 << 9)
+#define PB10		(1 << 10)
+#define PB11		(1 << 11)
+#define PB12		(1 << 12)
+#define PB13		(1 << 13)
+#define PB14		(1 << 14)
+#define PB15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-c.h b/arch/blackfin/include/mach-common/ports-c.h
new file mode 100644
index 0000000..3cc665e
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-c.h
@@ -0,0 +1,25 @@
+/*
+ * Port C Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_C__
+#define __BFIN_PERIPHERAL_PORT_C__
+
+#define PC0		(1 << 0)
+#define PC1		(1 << 1)
+#define PC2		(1 << 2)
+#define PC3		(1 << 3)
+#define PC4		(1 << 4)
+#define PC5		(1 << 5)
+#define PC6		(1 << 6)
+#define PC7		(1 << 7)
+#define PC8		(1 << 8)
+#define PC9		(1 << 9)
+#define PC10		(1 << 10)
+#define PC11		(1 << 11)
+#define PC12		(1 << 12)
+#define PC13		(1 << 13)
+#define PC14		(1 << 14)
+#define PC15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-d.h b/arch/blackfin/include/mach-common/ports-d.h
new file mode 100644
index 0000000..868c6a0
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-d.h
@@ -0,0 +1,25 @@
+/*
+ * Port D Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_D__
+#define __BFIN_PERIPHERAL_PORT_D__
+
+#define PD0		(1 << 0)
+#define PD1		(1 << 1)
+#define PD2		(1 << 2)
+#define PD3		(1 << 3)
+#define PD4		(1 << 4)
+#define PD5		(1 << 5)
+#define PD6		(1 << 6)
+#define PD7		(1 << 7)
+#define PD8		(1 << 8)
+#define PD9		(1 << 9)
+#define PD10		(1 << 10)
+#define PD11		(1 << 11)
+#define PD12		(1 << 12)
+#define PD13		(1 << 13)
+#define PD14		(1 << 14)
+#define PD15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-e.h b/arch/blackfin/include/mach-common/ports-e.h
new file mode 100644
index 0000000..c88b0d0
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-e.h
@@ -0,0 +1,25 @@
+/*
+ * Port E Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_E__
+#define __BFIN_PERIPHERAL_PORT_E__
+
+#define PE0		(1 << 0)
+#define PE1		(1 << 1)
+#define PE2		(1 << 2)
+#define PE3		(1 << 3)
+#define PE4		(1 << 4)
+#define PE5		(1 << 5)
+#define PE6		(1 << 6)
+#define PE7		(1 << 7)
+#define PE8		(1 << 8)
+#define PE9		(1 << 9)
+#define PE10		(1 << 10)
+#define PE11		(1 << 11)
+#define PE12		(1 << 12)
+#define PE13		(1 << 13)
+#define PE14		(1 << 14)
+#define PE15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-f.h b/arch/blackfin/include/mach-common/ports-f.h
new file mode 100644
index 0000000..d6af206
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-f.h
@@ -0,0 +1,25 @@
+/*
+ * Port F Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_F__
+#define __BFIN_PERIPHERAL_PORT_F__
+
+#define PF0		(1 << 0)
+#define PF1		(1 << 1)
+#define PF2		(1 << 2)
+#define PF3		(1 << 3)
+#define PF4		(1 << 4)
+#define PF5		(1 << 5)
+#define PF6		(1 << 6)
+#define PF7		(1 << 7)
+#define PF8		(1 << 8)
+#define PF9		(1 << 9)
+#define PF10		(1 << 10)
+#define PF11		(1 << 11)
+#define PF12		(1 << 12)
+#define PF13		(1 << 13)
+#define PF14		(1 << 14)
+#define PF15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-g.h b/arch/blackfin/include/mach-common/ports-g.h
new file mode 100644
index 0000000..09355d3
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-g.h
@@ -0,0 +1,25 @@
+/*
+ * Port G Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_G__
+#define __BFIN_PERIPHERAL_PORT_G__
+
+#define PG0		(1 << 0)
+#define PG1		(1 << 1)
+#define PG2		(1 << 2)
+#define PG3		(1 << 3)
+#define PG4		(1 << 4)
+#define PG5		(1 << 5)
+#define PG6		(1 << 6)
+#define PG7		(1 << 7)
+#define PG8		(1 << 8)
+#define PG9		(1 << 9)
+#define PG10		(1 << 10)
+#define PG11		(1 << 11)
+#define PG12		(1 << 12)
+#define PG13		(1 << 13)
+#define PG14		(1 << 14)
+#define PG15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-h.h b/arch/blackfin/include/mach-common/ports-h.h
new file mode 100644
index 0000000..fa3910c
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-h.h
@@ -0,0 +1,25 @@
+/*
+ * Port H Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_H__
+#define __BFIN_PERIPHERAL_PORT_H__
+
+#define PH0		(1 << 0)
+#define PH1		(1 << 1)
+#define PH2		(1 << 2)
+#define PH3		(1 << 3)
+#define PH4		(1 << 4)
+#define PH5		(1 << 5)
+#define PH6		(1 << 6)
+#define PH7		(1 << 7)
+#define PH8		(1 << 8)
+#define PH9		(1 << 9)
+#define PH10		(1 << 10)
+#define PH11		(1 << 11)
+#define PH12		(1 << 12)
+#define PH13		(1 << 13)
+#define PH14		(1 << 14)
+#define PH15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-i.h b/arch/blackfin/include/mach-common/ports-i.h
new file mode 100644
index 0000000..f176f08
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-i.h
@@ -0,0 +1,25 @@
+/*
+ * Port I Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_I__
+#define __BFIN_PERIPHERAL_PORT_I__
+
+#define PI0		(1 << 0)
+#define PI1		(1 << 1)
+#define PI2		(1 << 2)
+#define PI3		(1 << 3)
+#define PI4		(1 << 4)
+#define PI5		(1 << 5)
+#define PI6		(1 << 6)
+#define PI7		(1 << 7)
+#define PI8		(1 << 8)
+#define PI9		(1 << 9)
+#define PI10		(1 << 10)
+#define PI11		(1 << 11)
+#define PI12		(1 << 12)
+#define PI13		(1 << 13)
+#define PI14		(1 << 14)
+#define PI15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-j.h b/arch/blackfin/include/mach-common/ports-j.h
new file mode 100644
index 0000000..924123e
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-j.h
@@ -0,0 +1,25 @@
+/*
+ * Port J Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_J__
+#define __BFIN_PERIPHERAL_PORT_J__
+
+#define PJ0		(1 << 0)
+#define PJ1		(1 << 1)
+#define PJ2		(1 << 2)
+#define PJ3		(1 << 3)
+#define PJ4		(1 << 4)
+#define PJ5		(1 << 5)
+#define PJ6		(1 << 6)
+#define PJ7		(1 << 7)
+#define PJ8		(1 << 8)
+#define PJ9		(1 << 9)
+#define PJ10		(1 << 10)
+#define PJ11		(1 << 11)
+#define PJ12		(1 << 12)
+#define PJ13		(1 << 13)
+#define PJ14		(1 << 14)
+#define PJ15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index bfe75af..886e000 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -116,7 +116,7 @@
 	    ((_ramend - uncached_end) >= 1 * 1024 * 1024))
 		dcplb_bounds[i_d].eaddr = uncached_end;
 	else
-		dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
+		dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
 	dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
 	/* DMA uncached region.  */
 	if (DMA_UNCACHED_REGION) {
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index edae461..eb92592 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -345,6 +345,23 @@
 }
 #endif
 
+#ifdef CONFIG_IPIPE
+static unsigned long kgdb_arch_imask;
+#endif
+
+void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
+{
+	if (kgdb_single_step)
+		preempt_enable();
+
+#ifdef CONFIG_IPIPE
+	if (kgdb_arch_imask) {
+		cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask;
+		kgdb_arch_imask = 0;
+	}
+#endif
+}
+
 int kgdb_arch_handle_exception(int vector, int signo,
 			       int err_code, char *remcom_in_buffer,
 			       char *remcom_out_buffer,
@@ -388,6 +405,12 @@
 			 * kgdb_single_step > 0 means in single step mode
 			 */
 			kgdb_single_step = i + 1;
+
+			preempt_disable();
+#ifdef CONFIG_IPIPE
+			kgdb_arch_imask = cpu_pda[raw_smp_processor_id()].ex_imask;
+			cpu_pda[raw_smp_processor_id()].ex_imask = 0;
+#endif
 		}
 
 		bfin_correct_hw_break();
@@ -448,6 +471,9 @@
 int kgdb_arch_init(void)
 {
 	kgdb_single_step = 0;
+#ifdef CONFIG_IPIPE
+	kgdb_arch_imask = 0;
+#endif
 
 	bfin_remove_all_hw_break();
 	return 0;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 08c0236..2a6e9db 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -95,6 +95,10 @@
 {
 	struct proc_dir_entry *entry;
 
+#if L2_LENGTH
+	num2 = 0;
+#endif
+
 	entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
 	if (entry == NULL)
 		return -ENOMEM;
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index b894c8a..c0ccadc 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -104,24 +104,23 @@
 
 static struct bfin_phydev_platform_data bfin_phydev_data[] = {
 	{
-		.addr = 1,
-		.irq = IRQ_MAC_PHYINT,
-	},
-	{
-		.addr = 2,
-		.irq = IRQ_MAC_PHYINT,
-	},
-	{
+#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
 		.addr = 3,
+#else
+		.addr = 1,
+#endif
 		.irq = IRQ_MAC_PHYINT,
 	},
 };
 
 static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
-	.phydev_number = 3,
+	.phydev_number = 1,
 	.phydev_data = bfin_phydev_data,
 	.phy_mode = PHY_INTERFACE_MODE_MII,
 	.mac_peripherals = bfin_mac_peripherals,
+#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
+	.phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */
+#endif
 };
 
 static struct platform_device bfin_mii_bus = {
@@ -453,7 +452,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -496,7 +495,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -636,9 +635,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -670,9 +669,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index e6ce1d7..50fc5c8 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -377,7 +377,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -420,7 +420,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -547,9 +547,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -581,9 +581,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/dma.c b/arch/blackfin/mach-bf518/dma.c
index 78b4360..bcd1fbc 100644
--- a/arch/blackfin/mach-bf518/dma.c
+++ b/arch/blackfin/mach-bf518/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
new file mode 100644
index 0000000..00c603f
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	2
+
+#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
index 970d310..f6d924a 100644
--- a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,50 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-	struct uart_port port;
-	unsigned int old_status;
-	int status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int tx_done;
-	int tx_count;
-	struct circ_buf rx_dma_buf;
-	struct timer_list rx_dma_timer;
-	int rx_dma_nrows;
-	unsigned int tx_dma_channel;
-	unsigned int rx_dma_channel;
-	struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list cts_timer;
-	int cts_pin;
-	int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long uart_base_addr;
 	int uart_irq;
@@ -146,3 +75,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 9053462..a882886 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -1,61 +1,43 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
 #define _MACH_BLACKFIN_H_
 
 #include "bf518.h"
-#include "defBF512.h"
 #include "anomaly.h"
 
-#if defined(CONFIG_BF518)
-#include "defBF518.h"
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF512
+# include "defBF512.h"
+#endif
+#ifdef CONFIG_BF514
+# include "defBF514.h"
+#endif
+#ifdef CONFIG_BF516
+# include "defBF516.h"
+#endif
+#ifdef CONFIG_BF518
+# include "defBF518.h"
 #endif
 
-#if defined(CONFIG_BF516)
-#include "defBF516.h"
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF512
+#  include "cdefBF512.h"
+# endif
+# ifdef CONFIG_BF514
+#  include "cdefBF514.h"
+# endif
+# ifdef CONFIG_BF516
+#  include "cdefBF516.h"
+# endif
+# ifdef CONFIG_BF518
+#  include "cdefBF518.h"
+# endif
 #endif
 
-#if defined(CONFIG_BF514)
-#include "defBF514.h"
-#endif
-
-#if defined(CONFIG_BF512)
-#include "defBF512.h"
-#endif
-
-#if !defined(__ASSEMBLY__)
-#include "cdefBF512.h"
-
-#if defined(CONFIG_BF518)
-#include "cdefBF518.h"
-#endif
-
-#if defined(CONFIG_BF516)
-#include "cdefBF516.h"
-#endif
-
-#if defined(CONFIG_BF514)
-#include "cdefBF514.h"
-#endif
-#endif
-
-#define BFIN_UART_NR_PORTS	2
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
 #endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
index 493020d..b657d37 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,15 +7,1037 @@
 #ifndef _CDEF_BF512_H
 #define _CDEF_BF512_H
 
-/* include all Core registers and bit definitions */
-#include "defBF512.h"
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID()			bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
 
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
+#define bfin_read_SWRST()			bfin_read16(SWRST)
+#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
+#define bfin_read_SYSCR()			bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
 
-/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "cdefBF51x_base.h"
+#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
+#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
+
+#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
+
+#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
+#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
+
+#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
+#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+
+#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
+#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
+#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
+#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
+
+/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
+#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
+#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
+#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
+#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
+#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
+
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
+
+#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
+
+#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
+
+#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
+
+#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
+
+#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
+
+#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
+
+#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
+
+#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
+#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
+
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
+#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
+#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
+#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
+#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
+
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
+#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
+#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
+#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
+#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
+#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
+
+
+/* DMA Traffic Control Registers													*/
+#define bfin_read_DMAC_TC_PER()			bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)		bfin_write16(DMAC_TC_PER, val)
+#define bfin_read_DMAC_TC_CNT()			bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)		bfin_write16(DMAC_TC_CNT, val)
+
+/* DMA Controller																	*/
+#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
+#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
+#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
+#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
+#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
+#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
+#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
+#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
+#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
+#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
+#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
+#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
+#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
+#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
+#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
+#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
+#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
+#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
+#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
+#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
+#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
+#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
+#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
+#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
+#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
+#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
+#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
+#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
+#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
+#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
+#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
+#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
+#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
+#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
+#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
+#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
+#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
+#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
+#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
+#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
+#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
+#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
+#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
+#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
+#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
+#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
+#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
+#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
+#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
+#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
+#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
+#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
+#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
+#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
+#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
+#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
+#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
+#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
+#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
+#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
+#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
+#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
+#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
+#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
+#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
+#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
+#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
+
+/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
+#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
+#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
+#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
+#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
+#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
+#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
+#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
+#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
+#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
+#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
+#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
+#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
+#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
+#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
+#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
+#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
+#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
+#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
+#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
+#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
+#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
+#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
+
+#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
+#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
+#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
+#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
+#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
+#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
+#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
+#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
+#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
+#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
+#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
+#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
+#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
+#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
+
+/* ==== end from cdefBF534.h ==== */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+
+#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
+#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
+#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
+#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
+
+#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
+#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
+#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
+#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
+#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
+#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
+#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
+#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
+#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
+#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
+#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
+#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
+#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
+#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
+#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
+#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
+#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
+#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
+#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
+#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
+#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
+#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
+#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
+#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
+
+/* HOST Port Registers */
+
+#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
+#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
+#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
+#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
+#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
+#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
+
+/* Security Registers */
+
+#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
+#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
+#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
+#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
+#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
+#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
 
 #endif /* _CDEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
index 108fa4b..dc98866 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF514_H
 #define _CDEF_BF514_H
 
-/* include all Core registers and bit definitions */
-#include "defBF514.h"
-
 /* BF514 is BF512 + RSI */
 #include "cdefBF512.h"
 
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
index 2751592..142e45c 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF516_H
 #define _CDEF_BF516_H
 
-/* include all Core registers and bit definitions */
-#include "defBF516.h"
-
 /* BF516 is BF514 + EMAC */
 #include "cdefBF514.h"
 
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
index 7fb7f0e..e638197 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF518_H
 #define _CDEF_BF518_H
 
-/* include all Core registers and bit definitions */
-#include "defBF518.h"
-
 /* BF518 is BF516 + IEEE-1588 */
 #include "cdefBF516.h"
 
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
deleted file mode 100644
index e16969f..0000000
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
+++ /dev/null
@@ -1,1061 +0,0 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _CDEF_BF52X_H
-#define _CDEF_BF52X_H
-
-#include <asm/blackfin.h>
-
-#include "defBF51x_base.h"
-
-/* Include core specific register pointer definitions 								*/
-#include <asm/cdef_LPBlackfin.h>
-
-/* ==== begin from cdefBF534.h ==== */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
-#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
-#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
-#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
-#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
-#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
-#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
-#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
-#define bfin_read_CHIPID()			bfin_read32(CHIPID)
-#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
-#define bfin_read_SWRST()			bfin_read16(SWRST)
-#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
-#define bfin_read_SYSCR()			bfin_read16(SYSCR)
-#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
-
-#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
-#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
-#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
-#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
-#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
-#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
-
-#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
-#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
-#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
-#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
-#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
-#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
-#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
-#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
-
-#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
-#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
-#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
-#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
-
-#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
-#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
-#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
-#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
-
-/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
-
-#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
-#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
-#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
-#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
-#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
-#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
-#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
-#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
-#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
-#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
-#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
-#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
-#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
-#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
-
-/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
-#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
-#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
-#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
-#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
-#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
-#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
-#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
-#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
-#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
-#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
-#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
-#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
-#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
-#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
-#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
-#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
-#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
-#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
-#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
-#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
-#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
-#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
-#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
-#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
-#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
-#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
-#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
-#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
-#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
-#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
-#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
-#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
-#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
-#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
-#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
-#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
-#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
-#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
-#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
-#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
-#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
-#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
-
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
-#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
-#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
-#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
-#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
-#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
-#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
-#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
-
-#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
-#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
-#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
-#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
-#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
-#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
-#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
-#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
-
-#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
-#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
-#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
-#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
-#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
-#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
-#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
-#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
-
-#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
-#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
-#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
-#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
-#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
-#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
-#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
-#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
-
-#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
-#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
-#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
-#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
-#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
-#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
-#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
-#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
-
-#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
-#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
-#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
-#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
-#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
-#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
-#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
-#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
-
-#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
-#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
-#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
-#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
-#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
-#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
-#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
-#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
-
-#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
-#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
-#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
-#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
-#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
-#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
-#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
-#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
-
-#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
-#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
-#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
-#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
-#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
-#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
-#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
-#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
-#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
-#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
-#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
-#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
-#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
-#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
-#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
-#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
-#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
-#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
-#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
-#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
-#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
-#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
-#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
-#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
-#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
-#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
-#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
-#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
-#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
-#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
-#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
-#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
-#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
-#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
-#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
-#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
-#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
-
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
-#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
-#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
-#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
-
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
-#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
-#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
-#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
-#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
-#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
-#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
-#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
-#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
-#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
-#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
-#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
-#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
-#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
-#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
-#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
-#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
-#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
-#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
-#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
-#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
-#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
-#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
-#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
-#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
-#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
-#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
-#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
-#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
-#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
-#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
-#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
-#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
-#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
-#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
-#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
-#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
-#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
-#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
-#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
-#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
-#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
-#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
-#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
-#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
-#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
-#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
-#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
-#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
-#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
-#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
-#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
-#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
-#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
-#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
-#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
-#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
-#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
-
-
-/* DMA Traffic Control Registers													*/
-#define bfin_read_DMA_TC_PER()			bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)		bfin_write16(DMA_TC_PER, val)
-#define bfin_read_DMA_TC_CNT()			bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)		bfin_write16(DMA_TC_CNT, val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER()			bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)		bfin_write16(DMA_TCPER, val)
-#define bfin_read_DMA_TCCNT()			bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)		bfin_write16(DMA_TCCNT, val)
-
-/* DMA Controller																	*/
-#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
-#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
-#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
-#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
-#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
-#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
-#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
-#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
-#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
-#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
-#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
-#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
-#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
-#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
-#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
-#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
-#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
-#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
-#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
-#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
-#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
-#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
-#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
-#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
-#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
-#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
-#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
-#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
-#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
-#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
-#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
-#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
-#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
-#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
-#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
-#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
-#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
-#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
-#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
-#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
-#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
-#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
-#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
-#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
-#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
-#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
-#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
-#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
-#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
-#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
-#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
-#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
-#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
-#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
-#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
-#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
-#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
-#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
-#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
-#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
-#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
-#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
-#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
-#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
-#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
-#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
-#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
-#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
-#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
-#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
-#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
-#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
-#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
-#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
-#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
-#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
-#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
-#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
-#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
-#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
-#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
-#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
-#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
-#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
-#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
-#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
-#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
-#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
-#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
-#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
-#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
-#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
-#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
-#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
-#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
-#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
-#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
-#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
-#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
-#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
-#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
-#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
-#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
-#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
-#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
-#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
-#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
-#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
-#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
-#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
-#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
-#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
-#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
-#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
-#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
-#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
-#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
-#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
-#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
-#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
-#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
-#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
-#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
-#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
-#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
-#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
-#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
-#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
-#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
-#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
-#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
-#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
-#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
-#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
-#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
-#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
-#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
-#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
-#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
-#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
-#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
-#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
-#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
-#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
-#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
-#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
-#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
-#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
-#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
-#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
-#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
-#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
-#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
-#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
-#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
-#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
-#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
-#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
-#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
-#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
-#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
-#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
-#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
-#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
-#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
-#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
-#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
-#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
-#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
-#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
-#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
-#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
-#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
-#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
-#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
-#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
-#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
-#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
-#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
-#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
-#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
-#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
-#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
-#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
-#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
-#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
-#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
-#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
-#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
-#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
-#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
-#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
-#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
-#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
-#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
-#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
-#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
-#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
-#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
-#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
-#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
-#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
-#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
-#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
-#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
-#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
-#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
-#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
-#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
-#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
-#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
-#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
-#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
-#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
-#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
-#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
-#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
-#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
-#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
-#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
-#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
-#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
-#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
-#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
-#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
-#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
-#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
-#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
-#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
-#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
-#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
-#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
-#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
-#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
-#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
-#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
-#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
-#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
-#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
-#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
-#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
-#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
-#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
-#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
-#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
-#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
-#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
-#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
-#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
-#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
-#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
-#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
-#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
-#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
-#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
-#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
-#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
-#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
-#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
-#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
-#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
-#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
-#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
-#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
-#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
-#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
-#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
-#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
-#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
-#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
-#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
-#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
-#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
-#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
-#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
-#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
-#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
-#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
-#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
-#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
-#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
-#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
-#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
-#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
-#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
-#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
-#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
-#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
-#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
-#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
-#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
-#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
-#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
-#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
-#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
-#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
-#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
-#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
-#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
-#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
-#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
-#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
-#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
-#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
-#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
-#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
-#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
-#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
-#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
-#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
-#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
-#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
-#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
-#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
-#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
-#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
-#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
-#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
-#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
-#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
-#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
-#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
-#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
-#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
-#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
-#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
-#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
-#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
-#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
-#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
-#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
-#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
-#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
-#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
-#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
-#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
-#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
-#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
-#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
-#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
-#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
-#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
-#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
-#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
-#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
-#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
-#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
-#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
-#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
-#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
-#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
-#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
-#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
-#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
-#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
-#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
-#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
-#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
-#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
-#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
-#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
-#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
-#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
-#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
-#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
-#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
-#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
-#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
-#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
-#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
-#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
-#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
-#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
-#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
-#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
-#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
-#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
-#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
-#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
-#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
-#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
-#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
-#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
-#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
-#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
-#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
-#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
-#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
-#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
-#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
-#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
-#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
-#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
-#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
-#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
-#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
-#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
-#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
-#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
-#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
-#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
-#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
-#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
-#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
-#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
-#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
-#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
-#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
-#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
-#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
-#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
-#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
-#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
-#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
-#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
-#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
-#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
-#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
-#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
-#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
-#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
-#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
-#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
-#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
-#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
-#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
-#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
-#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
-#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
-#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
-#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
-#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
-#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
-#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
-#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
-#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
-#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
-#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
-#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
-#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
-#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
-#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
-#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
-#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
-#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
-#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
-#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
-#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
-#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
-#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
-#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
-#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
-#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
-#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
-#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
-#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
-#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
-#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
-#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
-
-/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
-#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
-#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
-#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
-#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
-#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
-#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
-#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
-#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
-#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
-
-#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
-
-/* ==== end from cdefBF534.h ==== */
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-
-#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
-#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
-#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
-#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
-#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
-#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
-
-#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
-#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
-#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
-#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
-#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
-#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
-#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
-#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
-#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
-#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
-#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
-#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
-#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
-#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
-#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
-#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
-#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
-#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
-#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
-#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
-#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
-#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
-#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
-#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
-
-/* Counter Registers */
-
-#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
-#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
-#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
-#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
-#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
-#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
-#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
-#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
-#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
-#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
-#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
-#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
-#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
-#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
-#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
-#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
-
-/* Security Registers */
-
-#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
-#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
-#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
-#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
-#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
-#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
-#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
index 9b505bb..2728582 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,12 +7,1388 @@
 #ifndef _DEF_BF512_H
 #define _DEF_BF512_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
+/* ************************************************************** */
+/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x    */
+/* ************************************************************** */
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
+#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
+#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register				*/
+#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
+#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
+#define CHIPID				0xFFC00014	/* Device ID Register */
 
-/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "defBF51x_base.h"
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)								*/
+#define SWRST				0xFFC00100	/* Software Reset Register					*/
+#define SYSCR				0xFFC00104	/* System Configuration Register				*/
+#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register			*/
+
+#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
+#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0				*/
+#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1				*/
+#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2				*/
+#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3				*/
+#define SIC_ISR0			0xFFC00120	/* Interrupt Status Register					*/
+#define SIC_IWR0			0xFFC00124	/* Interrupt Wakeup Register					*/
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
+#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
+#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
+#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
+#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
+#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
+#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
+
+
+/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
+#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
+#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
+#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
+#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
+#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
+#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
+#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
+#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
+#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
+#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
+#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
+#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
+#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
+#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
+#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
+#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
+#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
+#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
+#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
+#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
+
+/* SPI0 Controller			(0xFFC00500 - 0xFFC005FF)							*/
+#define SPI0_REGBASE			0xFFC00500
+#define SPI0_CTL			0xFFC00500	/* SPI Control Register						*/
+#define SPI0_FLG			0xFFC00504	/* SPI Flag register						*/
+#define SPI0_STAT			0xFFC00508	/* SPI Status register						*/
+#define SPI0_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register				*/
+#define SPI0_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register				*/
+#define SPI0_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
+#define SPI0_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
+
+/* SPI1 Controller			(0xFFC03400 - 0xFFC034FF)							*/
+#define SPI1_REGBASE			0xFFC03400
+#define SPI1_CTL			0xFFC03400	/* SPI Control Register						*/
+#define SPI1_FLG			0xFFC03404	/* SPI Flag register						*/
+#define SPI1_STAT			0xFFC03408	/* SPI Status register						*/
+#define SPI1_TDBR			0xFFC0340C	/* SPI Transmit Data Buffer Register				*/
+#define SPI1_RDBR			0xFFC03410	/* SPI Receive Data Buffer Register				*/
+#define SPI1_BAUD			0xFFC03414	/* SPI Baud rate Register					*/
+#define SPI1_SHADOW			0xFFC03418	/* SPI_RDBR Shadow Register					*/
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
+#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
+#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
+#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
+
+#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
+#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
+#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
+#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
+
+#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
+#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
+#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
+#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
+
+#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
+#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
+#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
+#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
+
+#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
+#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
+#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
+#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
+
+#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
+#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
+#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
+#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
+
+#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
+#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
+#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
+#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
+
+#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
+#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
+#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
+#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
+
+#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
+#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
+#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
+#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
+#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
+#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
+#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
+#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
+#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
+#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
+#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
+#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
+#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
+#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
+#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
+#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
+#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
+#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
+#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
+#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
+#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
+#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
+#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
+#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
+#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
+#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
+#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
+#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
+#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
+#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
+#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
+#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
+#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
+#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
+#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
+#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
+#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
+#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
+#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
+#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
+#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
+#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
+#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
+#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
+#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
+#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
+#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
+#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
+#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
+#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
+#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
+#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
+#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
+#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
+#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
+#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
+#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
+#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
+#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
+#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
+#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
+
+/* DMA Traffic Control Registers													*/
+#define DMAC_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMAC_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+
+/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
+#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
+#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
+#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
+#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
+#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
+#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
+#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
+#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
+#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
+#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
+#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
+#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
+#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
+
+#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
+#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
+#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
+#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
+#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
+#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
+#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
+#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
+#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
+#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
+#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
+#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
+#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
+
+#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
+#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
+#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
+#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
+#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
+#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
+#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
+#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
+#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
+#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
+#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
+#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
+#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
+
+#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
+#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
+#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
+#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
+#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
+#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
+#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
+#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
+#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
+#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
+#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
+#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
+#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
+
+#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
+#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
+#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
+#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
+#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
+#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
+#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
+#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
+#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
+#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
+#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
+#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
+#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
+
+#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
+#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
+#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
+#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
+#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
+#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
+#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
+#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
+#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
+#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
+#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
+#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
+#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
+
+#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
+#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
+#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
+#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
+#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
+#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
+#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
+#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
+#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
+#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
+#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
+#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
+#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
+
+#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
+#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
+#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
+#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
+#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
+#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
+#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
+#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
+#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
+#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
+#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
+#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
+#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
+
+#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
+#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
+#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
+#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
+#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
+#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
+#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
+#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
+#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
+#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
+#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
+#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
+#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
+
+#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
+#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
+#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
+#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
+#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
+#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
+#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
+#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
+#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
+#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
+#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
+#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
+#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
+
+#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
+#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
+#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
+#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
+#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
+#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
+#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
+#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
+#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
+#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
+#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
+#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
+#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
+
+#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
+#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
+#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
+#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
+#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
+#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
+#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
+#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
+#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
+#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
+#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
+#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
+#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
+
+#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
+#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
+#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
+#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
+#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
+#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
+#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
+#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
+#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
+#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
+#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
+
+#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
+#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
+#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
+#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
+#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
+#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
+#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
+#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
+#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
+#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
+#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
+#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
+#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
+
+#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
+#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
+#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
+#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
+#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
+#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
+#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
+#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
+#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
+#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
+#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
+
+#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
+#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
+#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
+#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
+#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
+#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
+#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
+#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
+#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
+#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
+#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
+#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
+#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
+#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
+#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
+#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
+#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
+#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+#define TWI0_REGBASE			0xFFC01400
+#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
+#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
+#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
+#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
+#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
+#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
+#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
+#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
+#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
+#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
+#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
+#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
+#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
+#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
+#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
+#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
+
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
+#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
+#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
+#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
+#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
+#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
+#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
+#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
+#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
+#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
+#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
+#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
+#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
+#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
+#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
+#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
+#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
+#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
+#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
+#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
+#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
+#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
+#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
+#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
+#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
+#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
+#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
+#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
+#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
+#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
+#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
+#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
+#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
+#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
+#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
+#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
+#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
+#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
+#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
+#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
+#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
+#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
+
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
+#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
+#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
+#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
+#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
+#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
+#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
+#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
+#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
+#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
+#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
+#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
+
+#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
+#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
+#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
+#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
+#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
+#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
+#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
+
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+#define PORTF_MUX               0xFFC03210      /* Port F mux control */
+#define PORTG_MUX               0xFFC03214      /* Port G mux control */
+#define PORTH_MUX               0xFFC03218      /* Port H mux control */
+#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
+#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
+#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
+#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
+#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
+#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
+#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
+#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
+#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
+#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
+#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
+#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
+
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer:	All macros are intended to make C and Assembly code more readable.
+**				Use these macros carefully, as any that do left shifts for field
+**				depositing will result in the lower order bits being destroyed.  Any
+**				macro that shifts left to properly position the bit-field should be
+**				used as part of an OR to initialize a register and NOT as a dynamic
+**				modifier UNLESS the lower order bits are saved and ORed back in when
+**				the macro is used.
+*************************************************************************************/
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* SWRST Masks																		*/
+#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
+#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
+#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
+#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
+#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
+
+/* SYSCR Masks																				*/
+#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
+#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
+/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
+
+#if 0
+#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
+
+#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
+#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
+#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
+#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
+#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
+#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
+#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
+
+#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
+#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
+#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
+#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
+#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
+#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
+#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
+#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
+
+#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
+#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
+#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
+#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
+#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
+#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
+#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
+#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
+#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
+#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
+
+#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
+#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
+#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
+#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
+#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
+#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
+#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
+#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
+#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
+#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
+#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
+#endif
+
+/* SIC_IAR0 Macros															*/
+#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
+#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
+#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
+#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
+#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
+#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
+#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
+#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
+
+/* SIC_IAR1 Macros															*/
+#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
+#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
+#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
+#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
+#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
+#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
+#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
+#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
+
+/* SIC_IAR2 Macros															*/
+#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
+#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
+#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
+#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
+#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
+#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
+#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
+#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
+
+/* SIC_IAR3 Macros															*/
+#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
+#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
+#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
+#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
+#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
+#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
+#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
+#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
+
+
+/* SIC_IMASK Masks																		*/
+#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
+#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
+#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
+#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
+
+/* SIC_IWR Masks																		*/
+#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
+#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
+#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
+#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
+
+/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
+/* TIMER_ENABLE Masks													*/
+#define TIMEN0			0x0001		/* Enable Timer 0					*/
+#define TIMEN1			0x0002		/* Enable Timer 1					*/
+#define TIMEN2			0x0004		/* Enable Timer 2					*/
+#define TIMEN3			0x0008		/* Enable Timer 3					*/
+#define TIMEN4			0x0010		/* Enable Timer 4					*/
+#define TIMEN5			0x0020		/* Enable Timer 5					*/
+#define TIMEN6			0x0040		/* Enable Timer 6					*/
+#define TIMEN7			0x0080		/* Enable Timer 7					*/
+
+/* TIMER_DISABLE Masks													*/
+#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
+#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
+#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
+#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
+#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
+#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
+#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
+#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
+
+/* TIMER_STATUS Masks													*/
+#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
+#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
+#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
+#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
+#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
+#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
+#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
+#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
+#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
+#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
+#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
+#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
+#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
+#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
+#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
+#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
+#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
+#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
+#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
+#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
+#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
+#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
+#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
+#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR3 TOVF_ERR3
+#define TOVL_ERR4 TOVF_ERR4
+#define TOVL_ERR5 TOVF_ERR5
+#define TOVL_ERR6 TOVF_ERR6
+#define TOVL_ERR7 TOVF_ERR7
+
+/* TIMERx_CONFIG Masks													*/
+#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
+#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
+#define EXT_CLK			0x0003	/* External Clock Mode					*/
+#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
+#define PERIOD_CNT		0x0008	/* Period Count							*/
+#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
+#define TIN_SEL			0x0020	/* Timer Input Select					*/
+#define OUT_DIS			0x0040	/* Output Pad Disable					*/
+#define CLK_SEL			0x0080	/* Timer Clock Select					*/
+#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
+#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
+#define ERR_TYP			0xC000	/* Error Type							*/
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
+/* EBIU_AMGCTL Masks																	*/
+#define AMCKEN			0x0001		/* Enable CLKOUT									*/
+#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
+#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
+#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
+#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
+#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
+
+/* EBIU_AMBCTL0 Masks																	*/
+#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
+#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
+#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
+#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
+#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
+#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
+#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
+#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
+#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
+#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
+#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
+#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
+#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
+#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
+#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
+#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
+#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
+#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
+#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
+#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
+#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
+#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
+#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
+#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
+#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
+#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
+#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
+#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
+#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
+#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
+#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
+#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
+#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
+#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
+#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
+#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
+
+#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
+#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
+#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
+#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
+#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
+#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
+#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
+#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
+#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
+#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
+#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
+#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
+#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
+#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
+#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
+#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
+#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
+#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
+#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
+#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
+#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
+#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
+#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
+#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
+#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
+#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
+#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
+#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
+#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
+#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
+#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
+#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
+#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
+#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
+#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
+#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
+#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
+#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
+#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
+#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
+#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
+
+/* EBIU_AMBCTL1 Masks																	*/
+#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
+#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
+#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
+#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
+#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
+#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
+#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
+#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
+#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
+#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
+#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
+#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
+#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
+#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
+#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
+#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
+#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
+#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
+#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
+#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
+#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
+#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
+#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
+#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
+#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
+#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
+#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
+#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
+#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
+#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
+#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
+#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
+#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
+#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
+#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
+#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
+
+#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
+#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
+#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
+#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
+#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
+#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
+#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
+#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
+#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
+#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
+#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
+#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
+#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
+#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
+#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
+#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
+#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
+#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
+#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
+#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
+#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
+#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
+#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
+#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
+#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
+#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
+#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
+#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
+#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
+#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
+#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
+#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
+#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
+#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
+#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
+#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
+
+
+/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
+/* EBIU_SDGCTL Masks																			*/
+#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
+#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
+#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
+#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
+#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
+#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
+#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
+#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
+#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
+#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
+#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
+#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
+#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
+#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
+#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
+#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
+#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
+#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
+#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
+#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
+#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
+#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
+#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
+#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
+#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
+#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
+#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
+#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
+#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
+#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
+#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
+#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
+#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
+#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
+#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
+#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
+#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
+#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
+#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
+#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
+#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
+#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
+#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
+#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
+#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
+#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
+#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
+
+/* EBIU_SDBCTL Masks																		*/
+#define EBE				0x0001		/* Enable SDRAM External Bank							*/
+#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
+#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
+#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
+#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
+#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
+#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
+#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
+#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
+#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
+#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
+
+/* EBIU_SDSTAT Masks														*/
+#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
+#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
+#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
+#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
+#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
+#define BGSTAT			0x0020		/* Bus Grant Status						*/
+
+
+/* **************************  DMA CONTROLLER MASKS  ********************************/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
+#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
+#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
+#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
+#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
+#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
+#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
+#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
+#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
+#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
+#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
+#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
+#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
+#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
+#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
+
+/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
+/*  PPI_CONTROL Masks													*/
+#define PORT_EN			0x0001		/* PPI Port Enable					*/
+#define PORT_DIR		0x0002		/* PPI Port Direction				*/
+#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
+#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
+#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
+#define PACK_EN			0x0080		/* PPI Packing Mode					*/
+#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
+#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
+#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
+#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
+#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
+#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
+#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
+#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
+#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
+#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
+#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
+#define DLENGTH			0x3800		/* PPI Data Length  */
+#define POLC			0x4000		/* PPI Clock Polarity				*/
+#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
+
+/* PPI_STATUS Masks														*/
+#define FLD				0x0400		/* Field Indicator					*/
+#define FT_ERR			0x0800		/* Frame Track Error				*/
+#define OVR				0x1000		/* FIFO Overflow Error				*/
+#define UNDR			0x2000		/* FIFO Underrun Error				*/
+#define ERR_DET			0x4000		/* Error Detected Indicator			*/
+#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
+#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
+
+/* TWI_PRESCALE Masks															*/
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
+#define	TWI_ENA		0x0080		/* TWI Enable									*/
+#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
+
+/* TWI_SLAVE_CTL Masks															*/
+#define	SEN			0x0001		/* Slave Enable									*/
+#define	SADD_LEN	0x0002		/* Slave Address Length							*/
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
+
+/* TWI_SLAVE_STAT Masks															*/
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
+#define GCALL		0x0002		/* General Call Indicator						*/
+
+/* TWI_MASTER_CTL Masks													*/
+#define	MEN			0x0001		/* Master Mode Enable						*/
+#define	MADD_LEN	0x0002		/* Master Address Length					*/
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
+#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
+#define	STOP		0x0010		/* Issue Stop Condition						*/
+#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
+#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
+#define	SDAOVR		0x4000		/* Serial Data Override						*/
+#define	SCLOVR		0x8000		/* Serial Clock Override					*/
+
+/* TWI_MASTER_STAT Masks														*/
+#define	MPROG		0x0001		/* Master Transfer In Progress					*/
+#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
+#define	ANAK		0x0004		/* Address Not Acknowledged						*/
+#define	DNAK		0x0008		/* Data Not Acknowledged						*/
+#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
+#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
+#define	SDASEN		0x0040		/* Serial Data Sense							*/
+#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
+#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
+#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
+#define	SERR		0x0004		/* Slave Transfer Error		*/
+#define	SOVF		0x0008		/* Slave Overflow			*/
+#define	MCOMP		0x0010		/* Master Transfer Complete	*/
+#define	MERR		0x0020		/* Master Transfer Error	*/
+#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
+#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
+
+/* TWI_FIFO_CTRL Masks												*/
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
+
+/* TWI_FIFO_STAT Masks															*/
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
+#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
+#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
+#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
+
+#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
+#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
+#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
+#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
+
+
+/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
+/* PORT_MUX Masks															*/
+#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
+#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
+#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
+
+#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
+#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
+#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
+#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
+
+#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
+#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
+#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
+
+#define	PFTE			0x0010			/* Port F Timer Enable				*/
+#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
+#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
+
+#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
+#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
+#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
+
+#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
+#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
+#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
+
+#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
+#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
+#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
+
+#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
+#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
+#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
+
+#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
+#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
+#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
+
+#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
+#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
+#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
+
+#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
+#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
+#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
+
+
+/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
+/* HDMAx_CTL Masks														*/
+#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
+#define	REP			0x0002	/* HDMA Request Polarity					*/
+#define	UTE			0x0004	/* Urgency Threshold Enable					*/
+#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
+#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
+#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
+#define	DRQ			0x0300	/* HDMA Request Type						*/
+#define	DRQ_NONE	0x0000	/* 		No Request							*/
+#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
+#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
+#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
+#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
+#define	PS			0x2000	/* HDMA Pin Status							*/
+#define	OI			0x4000	/* Overflow Interrupt Generated				*/
+#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
+
+/* entry addresses of the user-callable Boot ROM functions */
+
+#define _BOOTROM_RESET 0xEF000000
+#define _BOOTROM_FINAL_INIT 0xEF000002
+#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
+#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
+#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
+#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
+#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
+#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
+#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define	PGDE_UART   PFDE_UART
+#define	PGDE_DMA    PFDE_DMA
+#define	CKELOW		SCKELOW
+
+/* HOST Port Registers */
+
+#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
+#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
+#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
+
+/* Counter Registers */
+
+#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
+#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
+#define                       CNT_STATUS  0xffc03508   /* Status Register */
+#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
+#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
+#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
+#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
+#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
+
+/* OTP/FUSE Registers */
+
+#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
+#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
+#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
+#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
+
+/* Security Registers */
+
+#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
+#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
+#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+
+/* Motor Control PWM Registers */
+
+#define                         PWM_CTRL  0xffc03700   /* PWM Control Register */
+#define                         PWM_STAT  0xffc03704   /* PWM Status Register */
+#define                           PWM_TM  0xffc03708   /* PWM Period Register */
+#define                           PWM_DT  0xffc0370c   /* PWM Dead Time Register */
+#define                         PWM_GATE  0xffc03710   /* PWM Chopping Control */
+#define                          PWM_CHA  0xffc03714   /* PWM Channel A Duty Control */
+#define                          PWM_CHB  0xffc03718   /* PWM Channel B Duty Control */
+#define                          PWM_CHC  0xffc0371c   /* PWM Channel C Duty Control */
+#define                          PWM_SEG  0xffc03720   /* PWM Crossover and Output Enable */
+#define                       PWM_SYNCWT  0xffc03724   /* PWM Sync Pluse Width Control */
+#define                         PWM_CHAL  0xffc03728   /* PWM Channel AL Duty Control (SR mode only) */
+#define                         PWM_CHBL  0xffc0372c   /* PWM Channel BL Duty Control (SR mode only) */
+#define                         PWM_CHCL  0xffc03730   /* PWM Channel CL Duty Control (SR mode only) */
+#define                          PWM_LSI  0xffc03734   /* PWM Low Side Invert (SR mode only) */
+#define                        PWM_STAT2  0xffc03738   /* PWM Status Register 2 */
+
+
+/* ********************************************************** */
+/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
+/*     and MULTI BIT READ MACROS                              */
+/* ********************************************************** */
+
+/* Bit masks for HOST_CONTROL */
+
+#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
+#define                  HOST_CNTR_nHOST_EN  0x0
+#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
+#define                 HOST_CNTR_nHOST_END  0x0
+#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
+#define                HOST_CNTR_nDATA_SIZE  0x0
+#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
+#define                 HOST_CNTR_nHOST_RST  0x0
+#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
+#define                 HOST_CNTR_nHRDY_OVR  0x0
+#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
+#define                 HOST_CNTR_nINT_MODE  0x0
+#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
+#define                   HOST_CNTR_ nBT_EN  0x0
+#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
+#define                      HOST_CNTR_nEHW  0x0
+#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
+#define                      HOST_CNTR_nEHR  0x0
+#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
+#define                      HOST_CNTR_nBDR  0x0
+
+/* Bit masks for HOST_STATUS */
+
+#define                     HOST_STAT_READY  0x1        /* DMA Ready */
+#define                    HOST_STAT_nREADY  0x0
+#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
+#define                 HOST_STAT_nFIFOFULL  0x0
+#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
+#define                HOST_STAT_nFIFOEMPTY  0x0
+#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
+#define                 HOST_STAT_nCOMPLETE  0x0
+#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
+#define                     HOST_STAT_nHSHK  0x0
+#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
+#define                  HOST_STAT_nTIMEOUT  0x0
+#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
+#define                     HOST_STAT_nHIRQ  0x0
+#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
+#define               HOST_STAT_nALLOW_CNFG  0x0
+#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
+#define                  HOST_STAT_nDMA_DIR  0x0
+#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
+#define                      HOST_STAT_nBTE  0x0
+#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
+#define              HOST_STAT_nHOSTRD_DONE  0x0
+
+/* Bit masks for HOST_TIMEOUT */
+
+#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
+
+/* Bit masks for SECURE_SYSSWT */
+
+#define                   EMUDABL  0x1        /* Emulation Disable. */
+#define                  nEMUDABL  0x0
+#define                   RSTDABL  0x2        /* Reset Disable */
+#define                  nRSTDABL  0x0
+#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
+#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
+#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
+#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
+#define                  nDMA0OVR  0x0
+#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
+#define                  nDMA1OVR  0x0
+#define                    EMUOVR  0x4000     /* Emulation Override */
+#define                   nEMUOVR  0x0
+#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
+#define                   nOTPSEN  0x0
+#define                    L2DABL  0x70000    /* L2 Memory Disable. */
+
+/* Bit masks for SECURE_CONTROL */
+
+#define                   SECURE0  0x1        /* SECURE 0 */
+#define                  nSECURE0  0x0
+#define                   SECURE1  0x2        /* SECURE 1 */
+#define                  nSECURE1  0x0
+#define                   SECURE2  0x4        /* SECURE 2 */
+#define                  nSECURE2  0x0
+#define                   SECURE3  0x8        /* SECURE 3 */
+#define                  nSECURE3  0x0
+
+/* Bit masks for SECURE_STATUS */
+
+#define                   SECMODE  0x3        /* Secured Mode Control State */
+#define                       NMI  0x4        /* Non Maskable Interrupt */
+#define                      nNMI  0x0
+#define                   AFVALID  0x8        /* Authentication Firmware Valid */
+#define                  nAFVALID  0x0
+#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
+#define                   nAFEXIT  0x0
+#define                   SECSTAT  0xe0       /* Secure Status */
 
 #endif /* _DEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
deleted file mode 100644
index 5f84913..0000000
--- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
- */
-
-#ifndef _DEF_BF51X_H
-#define _DEF_BF51X_H
-
-
-/* ************************************************************** */
-/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x    */
-/* ************************************************************** */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
-#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
-#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register				*/
-#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
-#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
-#define CHIPID				0xFFC00014	/* Device ID Register */
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)								*/
-#define SWRST				0xFFC00100	/* Software Reset Register					*/
-#define SYSCR				0xFFC00104	/* System Configuration Register				*/
-#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register			*/
-
-#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
-#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0				*/
-#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1				*/
-#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2				*/
-#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3				*/
-#define SIC_ISR0			0xFFC00120	/* Interrupt Status Register					*/
-#define SIC_IWR0			0xFFC00124	/* Interrupt Wakeup Register					*/
-
-/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
-#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
-#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
-#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
-#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
-#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
-#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
-#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
-
-
-/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
-#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
-#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
-#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
-#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
-#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
-#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
-#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
-#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
-#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
-#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
-#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
-#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
-#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
-#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
-#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
-#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
-#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
-#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
-#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
-#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
-
-/* SPI0 Controller			(0xFFC00500 - 0xFFC005FF)							*/
-#define SPI0_REGBASE			0xFFC00500
-#define SPI0_CTL			0xFFC00500	/* SPI Control Register						*/
-#define SPI0_FLG			0xFFC00504	/* SPI Flag register						*/
-#define SPI0_STAT			0xFFC00508	/* SPI Status register						*/
-#define SPI0_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register				*/
-#define SPI0_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register				*/
-#define SPI0_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
-#define SPI0_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
-
-/* SPI1 Controller			(0xFFC03400 - 0xFFC034FF)							*/
-#define SPI1_REGBASE			0xFFC03400
-#define SPI1_CTL			0xFFC03400	/* SPI Control Register						*/
-#define SPI1_FLG			0xFFC03404	/* SPI Flag register						*/
-#define SPI1_STAT			0xFFC03408	/* SPI Status register						*/
-#define SPI1_TDBR			0xFFC0340C	/* SPI Transmit Data Buffer Register				*/
-#define SPI1_RDBR			0xFFC03410	/* SPI Receive Data Buffer Register				*/
-#define SPI1_BAUD			0xFFC03414	/* SPI Baud rate Register					*/
-#define SPI1_SHADOW			0xFFC03418	/* SPI_RDBR Shadow Register					*/
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
-#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
-#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
-#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
-
-#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
-#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
-#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
-#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
-
-#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
-#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
-#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
-#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
-
-#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
-#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
-#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
-#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
-
-#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
-#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
-#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
-#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
-
-#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
-#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
-#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
-#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
-
-#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
-#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
-#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
-#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
-
-#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
-#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
-#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
-#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
-
-#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
-#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
-#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
-#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
-#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
-#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
-#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
-#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
-#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
-#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
-#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
-#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
-#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
-#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
-#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
-#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
-#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
-#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
-#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
-#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
-#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
-#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
-#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
-#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
-#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
-#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
-#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
-#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
-#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
-#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
-#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
-#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
-#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
-#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
-#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
-#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
-#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
-#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
-#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
-#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
-#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
-#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
-#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
-#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
-#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
-#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
-#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
-#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
-#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
-#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
-#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
-#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
-#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
-#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
-#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
-#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
-#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
-#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
-#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
-#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
-#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
-
-/* DMA Traffic Control Registers													*/
-#define DMA_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TCCNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
-#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
-#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
-#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
-#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
-#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
-#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
-#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
-#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
-#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
-#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
-#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
-#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
-#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
-
-#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
-#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
-#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
-#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
-#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
-#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
-#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
-#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
-#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
-#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
-#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
-#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
-#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
-
-#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
-#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
-#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
-#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
-#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
-#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
-#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
-#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
-#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
-#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
-#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
-#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
-#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
-
-#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
-#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
-#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
-#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
-#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
-#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
-#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
-#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
-#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
-#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
-#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
-#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
-#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
-
-#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
-#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
-#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
-#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
-#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
-#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
-#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
-#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
-#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
-#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
-#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
-#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
-#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
-
-#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
-#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
-#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
-#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
-#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
-#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
-#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
-#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
-#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
-#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
-#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
-#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
-#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
-
-#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
-#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
-#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
-#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
-#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
-#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
-#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
-#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
-#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
-#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
-#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
-#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
-#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
-
-#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
-#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
-#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
-#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
-#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
-#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
-#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
-#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
-#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
-#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
-#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
-#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
-#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
-
-#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
-#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
-#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
-#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
-#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
-#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
-#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
-#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
-#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
-#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
-#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
-#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
-#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
-
-#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
-#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
-#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
-#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
-#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
-#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
-#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
-#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
-#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
-#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
-#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
-#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
-#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
-
-#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
-#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
-#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
-#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
-#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
-#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
-#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
-#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
-#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
-#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
-#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
-#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
-#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
-
-#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
-#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
-#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
-#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
-#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
-#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
-#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
-#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
-#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
-#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
-#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
-#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
-#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
-
-#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
-#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
-#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
-#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
-#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
-#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
-#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
-#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
-#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
-#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
-#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
-
-#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
-#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
-#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
-#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
-#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
-#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
-#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
-#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
-#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
-#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
-#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
-#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
-#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
-
-#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
-#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
-#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
-#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
-#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
-#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
-#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
-#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
-#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
-#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
-#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
-
-#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
-#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
-#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
-#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
-#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
-#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
-#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
-#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
-#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
-#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
-#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
-#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
-#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
-#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
-#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
-#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
-#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
-#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-#define TWI0_REGBASE			0xFFC01400
-#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
-#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
-#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
-#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
-#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
-#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
-#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
-#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
-#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
-#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
-#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
-#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
-#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
-#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
-#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
-#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
-
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
-#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
-#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
-#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
-#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
-#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
-#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
-#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
-#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
-#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
-#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
-#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
-#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
-#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
-#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
-#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
-#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
-#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
-#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
-#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
-#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
-#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
-#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
-#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
-#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
-#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
-#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
-#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
-#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
-#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
-#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
-#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
-#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
-#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
-#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
-#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
-#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
-#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
-#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
-#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
-#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
-#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
-
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
-#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
-#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
-#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
-#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
-#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
-#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
-#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
-#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
-#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
-#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
-#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
-
-#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
-#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
-#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
-#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
-#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
-#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
-#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
-
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-#define PORTF_MUX               0xFFC03210      /* Port F mux control */
-#define PORTG_MUX               0xFFC03214      /* Port G mux control */
-#define PORTH_MUX               0xFFC03218      /* Port H mux control */
-#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
-#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
-#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
-#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
-#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
-#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
-#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
-#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
-#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
-#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
-#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
-#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
-
-
-/***********************************************************************************
-** System MMR Register Bits And Macros
-**
-** Disclaimer:	All macros are intended to make C and Assembly code more readable.
-**				Use these macros carefully, as any that do left shifts for field
-**				depositing will result in the lower order bits being destroyed.  Any
-**				macro that shifts left to properly position the bit-field should be
-**				used as part of an OR to initialize a register and NOT as a dynamic
-**				modifier UNLESS the lower order bits are saved and ORed back in when
-**				the macro is used.
-*************************************************************************************/
-
-/* CHIPID Masks */
-#define CHIPID_VERSION         0xF0000000
-#define CHIPID_FAMILY          0x0FFFF000
-#define CHIPID_MANUFACTURE     0x00000FFE
-
-/* SWRST Masks																		*/
-#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
-#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
-#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
-#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
-#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
-
-/* SYSCR Masks																				*/
-#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
-#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
-
-
-/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
-/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
-
-#if 0
-#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
-
-#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
-#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
-#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
-#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
-#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
-#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
-#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
-
-#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
-#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
-#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
-#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
-#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
-#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
-#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
-#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
-
-#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
-#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
-#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
-#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
-#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
-#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
-#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
-#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
-#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
-#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
-
-#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
-#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
-#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
-#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
-#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
-#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
-#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
-#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
-#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
-#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
-#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
-#endif
-
-/* SIC_IAR0 Macros															*/
-#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
-#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
-#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
-#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
-#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
-#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
-#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
-#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
-
-/* SIC_IAR1 Macros															*/
-#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
-#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
-#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
-#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
-#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
-#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
-#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
-#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
-
-/* SIC_IAR2 Macros															*/
-#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
-#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
-#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
-#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
-#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
-#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
-#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
-#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
-
-/* SIC_IAR3 Macros															*/
-#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
-#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
-#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
-#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
-#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
-#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
-#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
-#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
-
-
-/* SIC_IMASK Masks																		*/
-#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
-#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
-#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
-#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
-
-/* SIC_IWR Masks																		*/
-#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
-#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
-#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
-#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
-
-
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks												*/
-#define WLS(x)		(((x)-5) & 0x03)	/* Word Length Select */
-#define STB			0x04				/* Stop Bits			*/
-#define PEN			0x08				/* Parity Enable		*/
-#define EPS			0x10				/* Even Parity Select	*/
-#define STP			0x20				/* Stick Parity			*/
-#define SB			0x40				/* Set Break			*/
-#define DLAB		0x80				/* Divisor Latch Access	*/
-
-/* UARTx_MCR Mask										*/
-#define LOOP_ENA	0x10	/* Loopback Mode Enable */
-#define LOOP_ENA_P	0x04
-
-/* UARTx_LSR Masks										*/
-#define DR			0x01	/* Data Ready				*/
-#define OE			0x02	/* Overrun Error			*/
-#define PE			0x04	/* Parity Error				*/
-#define FE			0x08	/* Framing Error			*/
-#define BI			0x10	/* Break Interrupt			*/
-#define THRE		0x20	/* THR Empty				*/
-#define TEMT		0x40	/* TSR and UART_THR Empty	*/
-
-/* UARTx_IER Masks															*/
-#define ERBFI		0x01		/* Enable Receive Buffer Full Interrupt		*/
-#define ETBEI		0x02		/* Enable Transmit Buffer Empty Interrupt	*/
-#define ELSI		0x04		/* Enable RX Status Interrupt				*/
-
-/* UARTx_IIR Masks														*/
-#define NINT		0x01		/* Pending Interrupt					*/
-#define IIR_TX_READY    0x02		/* UART_THR empty                               */
-#define IIR_RX_READY    0x04		/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06		/* Receive line status    			*/
-#define IIR_STATUS	0x06		/* Highest Priority Pending Interrupt	*/
-
-/* UARTx_GCTL Masks													*/
-#define UCEN		0x01		/* Enable UARTx Clocks				*/
-#define IREN		0x02		/* Enable IrDA Mode					*/
-#define TPOLC		0x04		/* IrDA TX Polarity Change			*/
-#define RPOLC		0x08		/* IrDA RX Polarity Change			*/
-#define FPE			0x10		/* Force Parity Error On Transmit	*/
-#define FFE			0x20		/* Force Framing Error On Transmit	*/
-
-
-/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
-/* TIMER_ENABLE Masks													*/
-#define TIMEN0			0x0001		/* Enable Timer 0					*/
-#define TIMEN1			0x0002		/* Enable Timer 1					*/
-#define TIMEN2			0x0004		/* Enable Timer 2					*/
-#define TIMEN3			0x0008		/* Enable Timer 3					*/
-#define TIMEN4			0x0010		/* Enable Timer 4					*/
-#define TIMEN5			0x0020		/* Enable Timer 5					*/
-#define TIMEN6			0x0040		/* Enable Timer 6					*/
-#define TIMEN7			0x0080		/* Enable Timer 7					*/
-
-/* TIMER_DISABLE Masks													*/
-#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
-#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
-#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
-#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
-#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
-#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
-#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
-#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
-
-/* TIMER_STATUS Masks													*/
-#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
-#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
-#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
-#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
-#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
-#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
-#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
-#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
-#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
-#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
-#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
-#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
-#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
-#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
-#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
-#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
-#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
-#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
-#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
-#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
-#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
-#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
-#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
-#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define TOVL_ERR0 TOVF_ERR0
-#define TOVL_ERR1 TOVF_ERR1
-#define TOVL_ERR2 TOVF_ERR2
-#define TOVL_ERR3 TOVF_ERR3
-#define TOVL_ERR4 TOVF_ERR4
-#define TOVL_ERR5 TOVF_ERR5
-#define TOVL_ERR6 TOVF_ERR6
-#define TOVL_ERR7 TOVF_ERR7
-
-/* TIMERx_CONFIG Masks													*/
-#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
-#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
-#define EXT_CLK			0x0003	/* External Clock Mode					*/
-#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
-#define PERIOD_CNT		0x0008	/* Period Count							*/
-#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
-#define TIN_SEL			0x0020	/* Timer Input Select					*/
-#define OUT_DIS			0x0040	/* Output Pad Disable					*/
-#define CLK_SEL			0x0080	/* Timer Clock Select					*/
-#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
-#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
-#define ERR_TYP			0xC000	/* Error Type							*/
-
-
-/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks 				*/
-/* Port F Masks 														*/
-#define PF0		0x0001
-#define PF1		0x0002
-#define PF2		0x0004
-#define PF3		0x0008
-#define PF4		0x0010
-#define PF5		0x0020
-#define PF6		0x0040
-#define PF7		0x0080
-#define PF8		0x0100
-#define PF9		0x0200
-#define PF10	0x0400
-#define PF11	0x0800
-#define PF12	0x1000
-#define PF13	0x2000
-#define PF14	0x4000
-#define PF15	0x8000
-
-/* Port G Masks															*/
-#define PG0		0x0001
-#define PG1		0x0002
-#define PG2		0x0004
-#define PG3		0x0008
-#define PG4		0x0010
-#define PG5		0x0020
-#define PG6		0x0040
-#define PG7		0x0080
-#define PG8		0x0100
-#define PG9		0x0200
-#define PG10	0x0400
-#define PG11	0x0800
-#define PG12	0x1000
-#define PG13	0x2000
-#define PG14	0x4000
-#define PG15	0x8000
-
-/* Port H Masks															*/
-#define PH0		0x0001
-#define PH1		0x0002
-#define PH2		0x0004
-#define PH3		0x0008
-#define PH4		0x0010
-#define PH5		0x0020
-#define PH6		0x0040
-#define PH7		0x0080
-
-/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
-/* EBIU_AMGCTL Masks																	*/
-#define AMCKEN			0x0001		/* Enable CLKOUT									*/
-#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
-#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
-#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
-#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
-#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
-
-/* EBIU_AMBCTL0 Masks																	*/
-#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
-#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
-#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
-#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
-#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
-#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
-#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
-#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
-#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
-#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
-#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
-#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
-#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
-#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
-#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
-#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
-#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
-#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
-#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
-#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
-#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
-#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
-#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
-#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
-#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
-#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
-#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
-#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
-#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
-#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
-#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
-#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
-#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
-#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
-#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
-#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
-
-#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
-#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
-#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
-#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
-#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
-#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
-#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
-#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
-#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
-#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
-#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
-#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
-#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
-#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
-#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
-#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
-#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
-#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
-#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
-#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
-#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
-#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
-#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
-#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
-#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
-#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
-#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
-#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
-#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
-#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
-#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
-#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
-#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
-#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
-#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
-#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
-#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
-#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
-#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
-#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
-#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
-
-/* EBIU_AMBCTL1 Masks																	*/
-#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
-#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
-#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
-#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
-#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
-#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
-#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
-#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
-#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
-#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
-#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
-#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
-#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
-#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
-#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
-#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
-#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
-#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
-#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
-#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
-#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
-#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
-#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
-#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
-#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
-#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
-#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
-#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
-#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
-#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
-#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
-#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
-#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
-#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
-#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
-#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
-
-#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
-#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
-#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
-#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
-#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
-#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
-#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
-#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
-#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
-#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
-#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
-#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
-#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
-#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
-#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
-#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
-#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
-#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
-#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
-#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
-#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
-#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
-#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
-#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
-#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
-#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
-#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
-#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
-#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
-#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
-#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
-#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
-#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
-#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
-#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
-#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
-
-
-/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
-/* EBIU_SDGCTL Masks																			*/
-#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
-#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
-#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
-#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
-#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
-#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
-#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
-#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
-#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
-#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
-#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
-#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
-#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
-#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
-#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
-#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
-#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
-#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
-#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
-#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
-#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
-#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
-#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
-#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
-#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
-#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
-#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
-#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
-#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
-#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
-#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
-#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
-#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
-#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
-#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
-#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
-#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
-#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
-#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
-#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
-#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
-#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
-#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
-#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
-#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
-#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
-#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
-
-/* EBIU_SDBCTL Masks																		*/
-#define EBE				0x0001		/* Enable SDRAM External Bank							*/
-#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
-#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
-#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
-#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
-#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
-#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
-#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
-#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
-#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
-#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
-
-/* EBIU_SDSTAT Masks														*/
-#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
-#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
-#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
-#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
-#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
-#define BGSTAT			0x0020		/* Bus Grant Status						*/
-
-
-/* **************************  DMA CONTROLLER MASKS  ********************************/
-
-/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
-#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
-#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
-#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
-#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
-#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
-#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
-#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
-#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
-#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
-#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
-#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
-#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
-#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
-#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
-
-/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
-/*  PPI_CONTROL Masks													*/
-#define PORT_EN			0x0001		/* PPI Port Enable					*/
-#define PORT_DIR		0x0002		/* PPI Port Direction				*/
-#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
-#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
-#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
-#define PACK_EN			0x0080		/* PPI Packing Mode					*/
-#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
-#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
-#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
-#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
-#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
-#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
-#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
-#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
-#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
-#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
-#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
-#define DLENGTH			0x3800		/* PPI Data Length  */
-#define POLC			0x4000		/* PPI Clock Polarity				*/
-#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
-
-/* PPI_STATUS Masks														*/
-#define FLD				0x0400		/* Field Indicator					*/
-#define FT_ERR			0x0800		/* Frame Track Error				*/
-#define OVR				0x1000		/* FIFO Overflow Error				*/
-#define UNDR			0x2000		/* FIFO Underrun Error				*/
-#define ERR_DET			0x4000		/* Error Detected Indicator			*/
-#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
-
-
-/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
-#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
-
-/* TWI_PRESCALE Masks															*/
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
-#define	TWI_ENA		0x0080		/* TWI Enable									*/
-#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
-
-/* TWI_SLAVE_CTL Masks															*/
-#define	SEN			0x0001		/* Slave Enable									*/
-#define	SADD_LEN	0x0002		/* Slave Address Length							*/
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
-
-/* TWI_SLAVE_STAT Masks															*/
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
-#define GCALL		0x0002		/* General Call Indicator						*/
-
-/* TWI_MASTER_CTL Masks													*/
-#define	MEN			0x0001		/* Master Mode Enable						*/
-#define	MADD_LEN	0x0002		/* Master Address Length					*/
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
-#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
-#define	STOP		0x0010		/* Issue Stop Condition						*/
-#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
-#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
-#define	SDAOVR		0x4000		/* Serial Data Override						*/
-#define	SCLOVR		0x8000		/* Serial Clock Override					*/
-
-/* TWI_MASTER_STAT Masks														*/
-#define	MPROG		0x0001		/* Master Transfer In Progress					*/
-#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
-#define	ANAK		0x0004		/* Address Not Acknowledged						*/
-#define	DNAK		0x0008		/* Data Not Acknowledged						*/
-#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
-#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
-#define	SDASEN		0x0040		/* Serial Data Sense							*/
-#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
-#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
-#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
-#define	SERR		0x0004		/* Slave Transfer Error		*/
-#define	SOVF		0x0008		/* Slave Overflow			*/
-#define	MCOMP		0x0010		/* Master Transfer Complete	*/
-#define	MERR		0x0020		/* Master Transfer Error	*/
-#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
-#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
-
-/* TWI_FIFO_CTRL Masks												*/
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
-
-/* TWI_FIFO_STAT Masks															*/
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
-#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
-#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
-#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
-
-#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
-#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
-#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
-#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
-
-
-/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
-/* PORT_MUX Masks															*/
-#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
-#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
-#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
-
-#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
-#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
-#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
-#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
-
-#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
-#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
-#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
-
-#define	PFTE			0x0010			/* Port F Timer Enable				*/
-#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
-#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
-
-#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
-#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
-#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
-
-#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
-#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
-#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
-
-#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
-#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
-#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
-
-#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
-#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
-#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
-
-#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
-#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
-#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
-
-#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
-#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
-#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
-
-#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
-#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
-#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
-
-
-/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
-/* HDMAx_CTL Masks														*/
-#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
-#define	REP			0x0002	/* HDMA Request Polarity					*/
-#define	UTE			0x0004	/* Urgency Threshold Enable					*/
-#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
-#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
-#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
-#define	DRQ			0x0300	/* HDMA Request Type						*/
-#define	DRQ_NONE	0x0000	/* 		No Request							*/
-#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
-#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
-#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
-#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
-#define	PS			0x2000	/* HDMA Pin Status							*/
-#define	OI			0x4000	/* Overflow Interrupt Generated				*/
-#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
-
-/* entry addresses of the user-callable Boot ROM functions */
-
-#define _BOOTROM_RESET 0xEF000000
-#define _BOOTROM_FINAL_INIT 0xEF000002
-#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
-#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
-#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
-#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
-#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
-#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
-#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define	PGDE_UART   PFDE_UART
-#define	PGDE_DMA    PFDE_DMA
-#define	CKELOW		SCKELOW
-
-/* HOST Port Registers */
-
-#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
-#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
-#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
-
-/* Counter Registers */
-
-#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
-#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
-#define                       CNT_STATUS  0xffc03508   /* Status Register */
-#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
-#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
-#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
-#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
-#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
-
-/* OTP/FUSE Registers */
-
-#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
-#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
-#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
-#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
-
-/* Security Registers */
-
-#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
-#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
-#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
-
-/* OTP Read/Write Data Buffer Registers */
-
-#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-
-/* Motor Control PWM Registers */
-
-#define                         PWM_CTRL  0xffc03700   /* PWM Control Register */
-#define                         PWM_STAT  0xffc03704   /* PWM Status Register */
-#define                           PWM_TM  0xffc03708   /* PWM Period Register */
-#define                           PWM_DT  0xffc0370c   /* PWM Dead Time Register */
-#define                         PWM_GATE  0xffc03710   /* PWM Chopping Control */
-#define                          PWM_CHA  0xffc03714   /* PWM Channel A Duty Control */
-#define                          PWM_CHB  0xffc03718   /* PWM Channel B Duty Control */
-#define                          PWM_CHC  0xffc0371c   /* PWM Channel C Duty Control */
-#define                          PWM_SEG  0xffc03720   /* PWM Crossover and Output Enable */
-#define                       PWM_SYNCWT  0xffc03724   /* PWM Sync Pluse Width Control */
-#define                         PWM_CHAL  0xffc03728   /* PWM Channel AL Duty Control (SR mode only) */
-#define                         PWM_CHBL  0xffc0372c   /* PWM Channel BL Duty Control (SR mode only) */
-#define                         PWM_CHCL  0xffc03730   /* PWM Channel CL Duty Control (SR mode only) */
-#define                          PWM_LSI  0xffc03734   /* PWM Low Side Invert (SR mode only) */
-#define                        PWM_STAT2  0xffc03738   /* PWM Status Register 2 */
-
-
-/* ********************************************************** */
-/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
-/*     and MULTI BIT READ MACROS                              */
-/* ********************************************************** */
-
-/* Bit masks for HOST_CONTROL */
-
-#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
-#define                  HOST_CNTR_nHOST_EN  0x0
-#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
-#define                 HOST_CNTR_nHOST_END  0x0
-#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
-#define                HOST_CNTR_nDATA_SIZE  0x0
-#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
-#define                 HOST_CNTR_nHOST_RST  0x0
-#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
-#define                 HOST_CNTR_nHRDY_OVR  0x0
-#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
-#define                 HOST_CNTR_nINT_MODE  0x0
-#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
-#define                   HOST_CNTR_ nBT_EN  0x0
-#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
-#define                      HOST_CNTR_nEHW  0x0
-#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
-#define                      HOST_CNTR_nEHR  0x0
-#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
-#define                      HOST_CNTR_nBDR  0x0
-
-/* Bit masks for HOST_STATUS */
-
-#define                     HOST_STAT_READY  0x1        /* DMA Ready */
-#define                    HOST_STAT_nREADY  0x0
-#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
-#define                 HOST_STAT_nFIFOFULL  0x0
-#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
-#define                HOST_STAT_nFIFOEMPTY  0x0
-#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
-#define                 HOST_STAT_nCOMPLETE  0x0
-#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
-#define                     HOST_STAT_nHSHK  0x0
-#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
-#define                  HOST_STAT_nTIMEOUT  0x0
-#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
-#define                     HOST_STAT_nHIRQ  0x0
-#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
-#define               HOST_STAT_nALLOW_CNFG  0x0
-#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
-#define                  HOST_STAT_nDMA_DIR  0x0
-#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
-#define                      HOST_STAT_nBTE  0x0
-#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
-#define              HOST_STAT_nHOSTRD_DONE  0x0
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
-
-/* Bit masks for SECURE_SYSSWT */
-
-#define                   EMUDABL  0x1        /* Emulation Disable. */
-#define                  nEMUDABL  0x0
-#define                   RSTDABL  0x2        /* Reset Disable */
-#define                  nRSTDABL  0x0
-#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
-#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
-#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
-#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
-#define                  nDMA0OVR  0x0
-#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
-#define                  nDMA1OVR  0x0
-#define                    EMUOVR  0x4000     /* Emulation Override */
-#define                   nEMUOVR  0x0
-#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
-#define                   nOTPSEN  0x0
-#define                    L2DABL  0x70000    /* L2 Memory Disable. */
-
-/* Bit masks for SECURE_CONTROL */
-
-#define                   SECURE0  0x1        /* SECURE 0 */
-#define                  nSECURE0  0x0
-#define                   SECURE1  0x2        /* SECURE 1 */
-#define                  nSECURE1  0x0
-#define                   SECURE2  0x4        /* SECURE 2 */
-#define                  nSECURE2  0x0
-#define                   SECURE3  0x8        /* SECURE 3 */
-#define                  nSECURE3  0x0
-
-/* Bit masks for SECURE_STATUS */
-
-#define                   SECMODE  0x3        /* Secured Mode Control State */
-#define                       NMI  0x4        /* Non Maskable Interrupt */
-#define                      nNMI  0x0
-#define                   AFVALID  0x8        /* Authentication Firmware Valid */
-#define                  nAFVALID  0x0
-#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
-#define                   nAFEXIT  0x0
-#define                   SECSTAT  0xe0       /* Secure Status */
-
-
-
-#endif /* _DEF_BF51X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/gpio.h b/arch/blackfin/mach-bf518/include/mach/gpio.h
index 9af6ce0..b480705 100644
--- a/arch/blackfin/mach-bf518/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf518/include/mach/gpio.h
@@ -55,4 +55,8 @@
 #define PORT_G GPIO_PG0
 #define PORT_H GPIO_PH0
 
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf518/include/mach/pll.h b/arch/blackfin/mach-bf518/include/mach/pll.h
index d550298..94cca67 100644
--- a/arch/blackfin/mach-bf518/include/mach/pll.h
+++ b/arch/blackfin/mach-bf518/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index fc767ac..ccab4c6 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -67,6 +67,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -83,7 +84,7 @@
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -419,7 +420,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -474,7 +475,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -627,9 +628,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -661,9 +662,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 2c31af7..c9d6dc8 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -82,11 +82,13 @@
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -102,6 +104,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -118,7 +121,7 @@
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -612,7 +615,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -667,7 +670,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -799,9 +802,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -833,9 +836,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 9a736a8..b7101aa 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -46,11 +46,13 @@
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -66,6 +68,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -82,7 +85,7 @@
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -497,7 +500,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -552,7 +555,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -679,9 +682,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -713,9 +716,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 9222bc0..2cd2ff6 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -86,11 +86,13 @@
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -106,6 +108,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -122,7 +125,7 @@
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -706,7 +709,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -761,7 +764,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -960,6 +963,11 @@
 		I2C_BOARD_INFO("ad5252", 0x2f),
 	},
 #endif
+#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE)
+	{
+		I2C_BOARD_INFO("adau1373", 0x1A),
+	},
+#endif
 };
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
@@ -982,9 +990,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -1016,9 +1024,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 9ec5757..18d303d 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -91,7 +91,7 @@
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -193,7 +193,7 @@
 	GPIO_PG1, GPIO_PH9, GPIO_PH10
 };
 
-static struct gpio_decoder_platfrom_data spi_decoded_cs = {
+static struct gpio_decoder_platform_data spi_decoded_cs = {
 	.base		= EXP_GPIO_SPISEL_BASE,
 	.input_addrs	= gpio_addr_inputs,
 	.nr_input_addrs = ARRAY_SIZE(gpio_addr_inputs),
@@ -586,7 +586,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -642,7 +642,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -799,9 +799,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -834,9 +834,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/dma.c b/arch/blackfin/mach-bf527/dma.c
index 7bc7577..1fabdef 100644
--- a/arch/blackfin/mach-bf527/dma.c
+++ b/arch/blackfin/mach-bf527/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
new file mode 100644
index 0000000..00c603f
--- /dev/null
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	2
+
+#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
index c1d55b8..960e089 100644
--- a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,50 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-	struct uart_port port;
-	unsigned int old_status;
-	int status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int tx_done;
-	int tx_count;
-	struct circ_buf rx_dma_buf;
-	struct timer_list rx_dma_timer;
-	int rx_dma_nrows;
-	unsigned int tx_dma_channel;
-	unsigned int rx_dma_channel;
-	struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list cts_timer;
-	int cts_pin;
-	int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long uart_base_addr;
 	int uart_irq;
@@ -146,3 +75,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index f714c5d..e1d2792 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -1,49 +1,37 @@
 /*
- * Copyright 2007-2009 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
 #define _MACH_BLACKFIN_H_
 
 #include "bf527.h"
-#include "defBF522.h"
 #include "anomaly.h"
 
-#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
-#include "defBF527.h"
+#include <asm/def_LPBlackfin.h>
+#if defined(CONFIG_BF523) || defined(CONFIG_BF522)
+# include "defBF522.h"
 #endif
-
 #if defined(CONFIG_BF525) || defined(CONFIG_BF524)
-#include "defBF525.h"
+# include "defBF525.h"
+#endif
+#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
+# include "defBF527.h"
 #endif
 
 #if !defined(__ASSEMBLY__)
-#include "cdefBF522.h"
-
-#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
-#include "cdefBF527.h"
+# include <asm/cdef_LPBlackfin.h>
+# if defined(CONFIG_BF523) || defined(CONFIG_BF522)
+#  include "cdefBF522.h"
+# endif
+# if defined(CONFIG_BF525) || defined(CONFIG_BF524)
+#  include "cdefBF525.h"
+# endif
+# if defined(CONFIG_BF527) || defined(CONFIG_BF526)
+#  include "cdefBF527.h"
+# endif
 #endif
 
-#if defined(CONFIG_BF525) || defined(CONFIG_BF524)
-#include "cdefBF525.h"
-#endif
-#endif
-
-#define BFIN_UART_NR_PORTS	2
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
 #endif
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
index 1079af8..618dfcd 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
@@ -1,21 +1,1095 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF522_H
 #define _CDEF_BF522_H
 
-/* include all Core registers and bit definitions */
-#include "defBF522.h"
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID()			bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
 
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
+#define bfin_read_SWRST()			bfin_read16(SWRST)
+#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
+#define bfin_read_SYSCR()			bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
 
-/* include cdefBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "cdefBF52x_base.h"
+#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
+#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
+
+#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
+
+#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
+#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
+
+#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
+#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
+
+/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
+
+#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
+#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
+#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
+#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
+
+/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
+#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
+#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
+#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
+#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
+#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
+
+
+/* SPI Controller		(0xFFC00500 - 0xFFC005FF)									*/
+#define bfin_read_SPI_CTL()			bfin_read16(SPI_CTL)
+#define bfin_write_SPI_CTL(val)			bfin_write16(SPI_CTL, val)
+#define bfin_read_SPI_FLG()			bfin_read16(SPI_FLG)
+#define bfin_write_SPI_FLG(val)			bfin_write16(SPI_FLG, val)
+#define bfin_read_SPI_STAT()			bfin_read16(SPI_STAT)
+#define bfin_write_SPI_STAT(val)		bfin_write16(SPI_STAT, val)
+#define bfin_read_SPI_TDBR()			bfin_read16(SPI_TDBR)
+#define bfin_write_SPI_TDBR(val)		bfin_write16(SPI_TDBR, val)
+#define bfin_read_SPI_RDBR()			bfin_read16(SPI_RDBR)
+#define bfin_write_SPI_RDBR(val)		bfin_write16(SPI_RDBR, val)
+#define bfin_read_SPI_BAUD()			bfin_read16(SPI_BAUD)
+#define bfin_write_SPI_BAUD(val)		bfin_write16(SPI_BAUD, val)
+#define bfin_read_SPI_SHADOW()			bfin_read16(SPI_SHADOW)
+#define bfin_write_SPI_SHADOW(val)		bfin_write16(SPI_SHADOW, val)
+
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
+
+#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
+
+#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
+
+#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
+
+#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
+
+#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
+
+#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
+
+#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
+
+#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
+#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
+
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
+#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
+#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
+#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
+#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
+
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
+#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
+#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
+#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
+#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
+#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
+
+
+/* DMA Traffic Control Registers													*/
+#define bfin_read_DMAC_TC_PER()			bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)		bfin_write16(DMAC_TC_PER, val)
+#define bfin_read_DMAC_TC_CNT()			bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)		bfin_write16(DMAC_TC_CNT, val)
+
+/* DMA Controller																	*/
+#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
+#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
+#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
+#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
+#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
+#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
+#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
+#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
+#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
+#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
+#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
+#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
+#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
+#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
+#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
+#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
+#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
+#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
+#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
+#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
+#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
+#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
+#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
+#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
+#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
+#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
+#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
+#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
+#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
+#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
+#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
+#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
+#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
+#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
+#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
+#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
+#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
+#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
+#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
+#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
+#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
+#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
+#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
+#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
+#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
+#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
+#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
+#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
+#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
+#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
+#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
+#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
+#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
+#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
+#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
+#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
+#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
+#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
+#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
+#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
+#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
+#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
+#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
+#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
+#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
+#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
+#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
+
+/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
+#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
+#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
+#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
+#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
+#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
+#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
+#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
+#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
+#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
+#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
+#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
+#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
+#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
+#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
+#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
+#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
+#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
+#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
+#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
+#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
+#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
+#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
+
+#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
+#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
+#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
+#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
+#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
+#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
+#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
+#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
+#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
+#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
+#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
+#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
+#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
+#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
+
+/* ==== end from cdefBF534.h ==== */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+
+#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
+#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
+#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
+#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
+
+#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
+#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
+#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
+#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
+#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
+#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
+#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
+#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
+#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
+#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
+#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
+#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
+#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
+#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
+#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
+#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
+#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
+#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
+#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
+#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
+#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
+#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
+#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
+#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
+
+/* HOST Port Registers */
+
+#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
+#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
+#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
+#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
+#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
+#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
+
+/* Security Registers */
+
+#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
+#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
+#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
+#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
+#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
+#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
+
+/* NFC Registers */
+
+#define bfin_read_NFC_CTL()			bfin_read16(NFC_CTL)
+#define bfin_write_NFC_CTL(val)			bfin_write16(NFC_CTL, val)
+#define bfin_read_NFC_STAT()			bfin_read16(NFC_STAT)
+#define bfin_write_NFC_STAT(val)		bfin_write16(NFC_STAT, val)
+#define bfin_read_NFC_IRQSTAT()			bfin_read16(NFC_IRQSTAT)
+#define bfin_write_NFC_IRQSTAT(val)		bfin_write16(NFC_IRQSTAT, val)
+#define bfin_read_NFC_IRQMASK()			bfin_read16(NFC_IRQMASK)
+#define bfin_write_NFC_IRQMASK(val)		bfin_write16(NFC_IRQMASK, val)
+#define bfin_read_NFC_ECC0()			bfin_read16(NFC_ECC0)
+#define bfin_write_NFC_ECC0(val)		bfin_write16(NFC_ECC0, val)
+#define bfin_read_NFC_ECC1()			bfin_read16(NFC_ECC1)
+#define bfin_write_NFC_ECC1(val)		bfin_write16(NFC_ECC1, val)
+#define bfin_read_NFC_ECC2()			bfin_read16(NFC_ECC2)
+#define bfin_write_NFC_ECC2(val)		bfin_write16(NFC_ECC2, val)
+#define bfin_read_NFC_ECC3()			bfin_read16(NFC_ECC3)
+#define bfin_write_NFC_ECC3(val)		bfin_write16(NFC_ECC3, val)
+#define bfin_read_NFC_COUNT()			bfin_read16(NFC_COUNT)
+#define bfin_write_NFC_COUNT(val)		bfin_write16(NFC_COUNT, val)
+#define bfin_read_NFC_RST()			bfin_read16(NFC_RST)
+#define bfin_write_NFC_RST(val)			bfin_write16(NFC_RST, val)
+#define bfin_read_NFC_PGCTL()			bfin_read16(NFC_PGCTL)
+#define bfin_write_NFC_PGCTL(val)		bfin_write16(NFC_PGCTL, val)
+#define bfin_read_NFC_READ()			bfin_read16(NFC_READ)
+#define bfin_write_NFC_READ(val)		bfin_write16(NFC_READ, val)
+#define bfin_read_NFC_ADDR()			bfin_read16(NFC_ADDR)
+#define bfin_write_NFC_ADDR(val)		bfin_write16(NFC_ADDR, val)
+#define bfin_read_NFC_CMD()			bfin_read16(NFC_CMD)
+#define bfin_write_NFC_CMD(val)			bfin_write16(NFC_CMD, val)
+#define bfin_read_NFC_DATA_WR()			bfin_read16(NFC_DATA_WR)
+#define bfin_write_NFC_DATA_WR(val)		bfin_write16(NFC_DATA_WR, val)
+#define bfin_read_NFC_DATA_RD()			bfin_read16(NFC_DATA_RD)
+#define bfin_write_NFC_DATA_RD(val)		bfin_write16(NFC_DATA_RD, val)
 
 #endif /* _CDEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
index d7e2751..d90a85b 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
@@ -1,15 +1,12 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF525_H
 #define _CDEF_BF525_H
 
-/* include all Core registers and bit definitions */
-#include "defBF525.h"
-
 /* BF525 is BF522 + USB */
 #include "cdefBF522.h"
 
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
index c7ba544..eb22f58 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
@@ -1,15 +1,12 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF527_H
 #define _CDEF_BF527_H
 
-/* include all Core registers and bit definitions */
-#include "defBF527.h"
-
 /* BF527 is BF525 + EMAC */
 #include "cdefBF525.h"
 
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
deleted file mode 100644
index 3048b52..0000000
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
+++ /dev/null
@@ -1,1113 +0,0 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _CDEF_BF52X_H
-#define _CDEF_BF52X_H
-
-#include <asm/blackfin.h>
-
-#include "defBF52x_base.h"
-
-/* Include core specific register pointer definitions 								*/
-#include <asm/cdef_LPBlackfin.h>
-
-/* ==== begin from cdefBF534.h ==== */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
-#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
-#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
-#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
-#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
-#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
-#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
-#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
-#define bfin_read_CHIPID()			bfin_read32(CHIPID)
-#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
-#define bfin_read_SWRST()			bfin_read16(SWRST)
-#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
-#define bfin_read_SYSCR()			bfin_read16(SYSCR)
-#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
-
-#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
-#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
-#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
-#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
-#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
-#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
-
-#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
-#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
-#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
-#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
-#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
-#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
-#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
-#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
-
-#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
-#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
-#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
-#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
-
-#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
-#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
-#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
-#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
-
-/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
-
-#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
-#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
-#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
-#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
-#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
-#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
-#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
-#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
-#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
-#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
-#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
-#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
-#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
-#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
-
-/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
-#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
-#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
-#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
-#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
-#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
-#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
-#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
-#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
-#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
-#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
-#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
-#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
-#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
-#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
-#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
-#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
-#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
-#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
-#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
-#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
-#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
-#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
-#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
-#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
-#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
-#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
-#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
-#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
-#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
-#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
-#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
-#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
-#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
-#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
-#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
-#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
-#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
-#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
-#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
-#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
-#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
-#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
-
-
-/* SPI Controller		(0xFFC00500 - 0xFFC005FF)									*/
-#define bfin_read_SPI_CTL()			bfin_read16(SPI_CTL)
-#define bfin_write_SPI_CTL(val)			bfin_write16(SPI_CTL, val)
-#define bfin_read_SPI_FLG()			bfin_read16(SPI_FLG)
-#define bfin_write_SPI_FLG(val)			bfin_write16(SPI_FLG, val)
-#define bfin_read_SPI_STAT()			bfin_read16(SPI_STAT)
-#define bfin_write_SPI_STAT(val)		bfin_write16(SPI_STAT, val)
-#define bfin_read_SPI_TDBR()			bfin_read16(SPI_TDBR)
-#define bfin_write_SPI_TDBR(val)		bfin_write16(SPI_TDBR, val)
-#define bfin_read_SPI_RDBR()			bfin_read16(SPI_RDBR)
-#define bfin_write_SPI_RDBR(val)		bfin_write16(SPI_RDBR, val)
-#define bfin_read_SPI_BAUD()			bfin_read16(SPI_BAUD)
-#define bfin_write_SPI_BAUD(val)		bfin_write16(SPI_BAUD, val)
-#define bfin_read_SPI_SHADOW()			bfin_read16(SPI_SHADOW)
-#define bfin_write_SPI_SHADOW(val)		bfin_write16(SPI_SHADOW, val)
-
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
-#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
-#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
-#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
-#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
-#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
-#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
-#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
-
-#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
-#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
-#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
-#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
-#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
-#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
-#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
-#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
-
-#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
-#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
-#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
-#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
-#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
-#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
-#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
-#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
-
-#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
-#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
-#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
-#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
-#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
-#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
-#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
-#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
-
-#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
-#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
-#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
-#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
-#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
-#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
-#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
-#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
-
-#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
-#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
-#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
-#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
-#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
-#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
-#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
-#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
-
-#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
-#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
-#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
-#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
-#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
-#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
-#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
-#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
-
-#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
-#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
-#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
-#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
-#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
-#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
-#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
-#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
-
-#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
-#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
-#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
-#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
-#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
-#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
-#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
-#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
-#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
-#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
-#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
-#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
-#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
-#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
-#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
-#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
-#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
-#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
-#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
-#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
-#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
-#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
-#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
-#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
-#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
-#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
-#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
-#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
-#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
-#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
-#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
-#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
-#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
-#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
-#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
-#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
-#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
-
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
-#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
-#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
-#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
-
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
-#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
-#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
-#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
-#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
-#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
-#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
-#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
-#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
-#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
-#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
-#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
-#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
-#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
-#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
-#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
-#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
-#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
-#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
-#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
-#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
-#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
-#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
-#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
-#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
-#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
-#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
-#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
-#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
-#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
-#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
-#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
-#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
-#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
-#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
-#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
-#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
-#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
-#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
-#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
-#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
-#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
-#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
-#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
-#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
-#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
-#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
-#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
-#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
-#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
-#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
-#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
-#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
-#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
-#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
-#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
-#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
-#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
-
-
-/* DMA Traffic Control Registers													*/
-#define bfin_read_DMA_TC_PER()			bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)		bfin_write16(DMA_TC_PER, val)
-#define bfin_read_DMA_TC_CNT()			bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)		bfin_write16(DMA_TC_CNT, val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER()			bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)		bfin_write16(DMA_TCPER, val)
-#define bfin_read_DMA_TCCNT()			bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)		bfin_write16(DMA_TCCNT, val)
-
-/* DMA Controller																	*/
-#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
-#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
-#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
-#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
-#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
-#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
-#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
-#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
-#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
-#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
-#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
-#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
-#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
-#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
-#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
-#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
-#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
-#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
-#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
-#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
-#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
-#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
-#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
-#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
-#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
-#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
-#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
-#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
-#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
-#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
-#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
-#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
-#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
-#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
-#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
-#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
-#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
-#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
-#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
-#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
-#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
-#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
-#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
-#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
-#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
-#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
-#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
-#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
-#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
-#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
-#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
-#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
-#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
-#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
-#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
-#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
-#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
-#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
-#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
-#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
-#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
-#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
-#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
-#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
-#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
-#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
-#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
-#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
-#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
-#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
-#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
-#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
-#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
-#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
-#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
-#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
-#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
-#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
-#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
-#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
-#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
-#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
-#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
-#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
-#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
-#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
-#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
-#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
-#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
-#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
-#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
-#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
-#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
-#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
-#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
-#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
-#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
-#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
-#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
-#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
-#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
-#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
-#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
-#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
-#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
-#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
-#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
-#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
-#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
-#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
-#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
-#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
-#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
-#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
-#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
-#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
-#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
-#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
-#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
-#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
-#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
-#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
-#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
-#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
-#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
-#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
-#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
-#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
-#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
-#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
-#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
-#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
-#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
-#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
-#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
-#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
-#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
-#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
-#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
-#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
-#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
-#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
-#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
-#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
-#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
-#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
-#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
-#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
-#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
-#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
-#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
-#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
-#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
-#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
-#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
-#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
-#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
-#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
-#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
-#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
-#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
-#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
-#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
-#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
-#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
-#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
-#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
-#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
-#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
-#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
-#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
-#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
-#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
-#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
-#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
-#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
-#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
-#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
-#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
-#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
-#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
-#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
-#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
-#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
-#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
-#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
-#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
-#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
-#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
-#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
-#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
-#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
-#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
-#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
-#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
-#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
-#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
-#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
-#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
-#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
-#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
-#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
-#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
-#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
-#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
-#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
-#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
-#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
-#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
-#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
-#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
-#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
-#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
-#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
-#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
-#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
-#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
-#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
-#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
-#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
-#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
-#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
-#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
-#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
-#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
-#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
-#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
-#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
-#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
-#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
-#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
-#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
-#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
-#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
-#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
-#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
-#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
-#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
-#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
-#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
-#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
-#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
-#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
-#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
-#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
-#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
-#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
-#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
-#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
-#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
-#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
-#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
-#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
-#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
-#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
-#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
-#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
-#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
-#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
-#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
-#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
-#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
-#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
-#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
-#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
-#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
-#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
-#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
-#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
-#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
-#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
-#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
-#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
-#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
-#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
-#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
-#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
-#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
-#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
-#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
-#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
-#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
-#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
-#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
-#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
-#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
-#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
-#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
-#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
-#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
-#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
-#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
-#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
-#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
-#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
-#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
-#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
-#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
-#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
-#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
-#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
-#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
-#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
-#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
-#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
-#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
-#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
-#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
-#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
-#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
-#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
-#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
-#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
-#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
-#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
-#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
-#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
-#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
-#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
-#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
-#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
-#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
-#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
-#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
-#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
-#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
-#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
-#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
-#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
-#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
-#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
-#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
-#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
-#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
-#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
-#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
-#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
-#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
-#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
-#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
-#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
-#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
-#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
-#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
-#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
-#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
-#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
-#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
-#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
-#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
-#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
-#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
-#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
-#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
-#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
-#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
-#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
-#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
-#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
-#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
-#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
-#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
-#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
-#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
-#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
-#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
-#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
-#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
-#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
-#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
-#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
-#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
-#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
-#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
-#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
-#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
-#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
-#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
-#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
-#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
-#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
-#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
-#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
-#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
-#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
-#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
-#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
-#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
-#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
-#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
-#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
-#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
-#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
-#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
-#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
-#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
-#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
-#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
-#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
-#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
-#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
-#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
-#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
-#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
-#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
-#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
-#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
-#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
-#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
-#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
-#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
-#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
-#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
-#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
-#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
-#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
-#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
-#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
-#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
-#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
-#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
-#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
-#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
-#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
-#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
-#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
-#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
-#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
-#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
-#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
-#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
-#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
-#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
-#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
-#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
-#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
-#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
-#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
-#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
-#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
-#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
-#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
-#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
-#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
-#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
-#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
-#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
-#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
-#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
-#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
-#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
-#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
-#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
-#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
-#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
-#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
-#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
-#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
-#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
-
-/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
-#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
-#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
-#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
-#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
-#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
-#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
-#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
-#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
-#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
-
-#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
-
-/* ==== end from cdefBF534.h ==== */
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-
-#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
-#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
-#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
-#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
-#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
-#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
-
-#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
-#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
-#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
-#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
-#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
-#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
-#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
-#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
-#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
-#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
-#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
-#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
-#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
-#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
-#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
-#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
-#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
-#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
-#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
-#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
-#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
-#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
-#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
-#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
-
-/* Counter Registers */
-
-#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
-#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
-#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
-#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
-#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
-#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
-#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
-#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
-#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
-#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
-#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
-#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
-#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
-#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
-#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
-#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
-
-/* Security Registers */
-
-#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
-#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
-#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
-#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
-#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
-#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
-
-/* NFC Registers */
-
-#define bfin_read_NFC_CTL()			bfin_read16(NFC_CTL)
-#define bfin_write_NFC_CTL(val)			bfin_write16(NFC_CTL, val)
-#define bfin_read_NFC_STAT()			bfin_read16(NFC_STAT)
-#define bfin_write_NFC_STAT(val)		bfin_write16(NFC_STAT, val)
-#define bfin_read_NFC_IRQSTAT()			bfin_read16(NFC_IRQSTAT)
-#define bfin_write_NFC_IRQSTAT(val)		bfin_write16(NFC_IRQSTAT, val)
-#define bfin_read_NFC_IRQMASK()			bfin_read16(NFC_IRQMASK)
-#define bfin_write_NFC_IRQMASK(val)		bfin_write16(NFC_IRQMASK, val)
-#define bfin_read_NFC_ECC0()			bfin_read16(NFC_ECC0)
-#define bfin_write_NFC_ECC0(val)		bfin_write16(NFC_ECC0, val)
-#define bfin_read_NFC_ECC1()			bfin_read16(NFC_ECC1)
-#define bfin_write_NFC_ECC1(val)		bfin_write16(NFC_ECC1, val)
-#define bfin_read_NFC_ECC2()			bfin_read16(NFC_ECC2)
-#define bfin_write_NFC_ECC2(val)		bfin_write16(NFC_ECC2, val)
-#define bfin_read_NFC_ECC3()			bfin_read16(NFC_ECC3)
-#define bfin_write_NFC_ECC3(val)		bfin_write16(NFC_ECC3, val)
-#define bfin_read_NFC_COUNT()			bfin_read16(NFC_COUNT)
-#define bfin_write_NFC_COUNT(val)		bfin_write16(NFC_COUNT, val)
-#define bfin_read_NFC_RST()			bfin_read16(NFC_RST)
-#define bfin_write_NFC_RST(val)			bfin_write16(NFC_RST, val)
-#define bfin_read_NFC_PGCTL()			bfin_read16(NFC_PGCTL)
-#define bfin_write_NFC_PGCTL(val)		bfin_write16(NFC_PGCTL, val)
-#define bfin_read_NFC_READ()			bfin_read16(NFC_READ)
-#define bfin_write_NFC_READ(val)		bfin_write16(NFC_READ, val)
-#define bfin_read_NFC_ADDR()			bfin_read16(NFC_ADDR)
-#define bfin_write_NFC_ADDR(val)		bfin_write16(NFC_ADDR, val)
-#define bfin_read_NFC_CMD()			bfin_read16(NFC_CMD)
-#define bfin_write_NFC_CMD(val)			bfin_write16(NFC_CMD, val)
-#define bfin_read_NFC_DATA_WR()			bfin_read16(NFC_DATA_WR)
-#define bfin_write_NFC_DATA_WR(val)		bfin_write16(NFC_DATA_WR, val)
-#define bfin_read_NFC_DATA_RD()			bfin_read16(NFC_DATA_RD)
-#define bfin_write_NFC_DATA_RD(val)		bfin_write16(NFC_DATA_RD, val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
-#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF522.h b/arch/blackfin/mach-bf527/include/mach/defBF522.h
index cb139a2..89f5420 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF522.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,12 +7,1393 @@
 #ifndef _DEF_BF522_H
 #define _DEF_BF522_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
+/* ************************************************************** */
+/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x    */
+/* ************************************************************** */
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */
+/* ==== begin from defBF534.h ==== */
 
-/* Include defBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "defBF52x_base.h"
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
+#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
+#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register		*/
+#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
+#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
+#define CHIPID        0xFFC00014  /* Device ID Register */
+
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
+#define SWRST				0xFFC00100	/* Software Reset Register					*/
+#define SYSCR				0xFFC00104	/* System Configuration Register			*/
+#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register	*/
+
+#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
+#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0			*/
+#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1			*/
+#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2			*/
+#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3			*/
+#define SIC_ISR0				0xFFC00120	/* Interrupt Status Register				*/
+#define SIC_IWR0				0xFFC00124	/* Interrupt Wakeup Register				*/
+
+/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
+#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
+#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
+#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
+#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
+#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
+#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
+#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
+
+
+/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
+#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
+#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
+#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
+#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
+#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
+#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
+#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
+#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
+#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
+#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
+#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
+#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
+#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
+#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
+#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
+#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
+#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
+#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
+#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
+#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
+
+
+/* SPI Controller			(0xFFC00500 - 0xFFC005FF)								*/
+#define SPI0_REGBASE			0xFFC00500
+#define SPI_CTL				0xFFC00500	/* SPI Control Register						*/
+#define SPI_FLG				0xFFC00504	/* SPI Flag register						*/
+#define SPI_STAT			0xFFC00508	/* SPI Status register						*/
+#define SPI_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register		*/
+#define SPI_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register			*/
+#define SPI_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
+#define SPI_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
+
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
+#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
+#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
+#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
+
+#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
+#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
+#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
+#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
+
+#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
+#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
+#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
+#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
+
+#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
+#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
+#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
+#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
+
+#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
+#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
+#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
+#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
+
+#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
+#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
+#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
+#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
+
+#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
+#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
+#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
+#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
+
+#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
+#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
+#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
+#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
+
+#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
+#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
+#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
+#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
+#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
+#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
+#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
+#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
+#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
+#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
+#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
+#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
+#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
+#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
+#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
+#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
+#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
+#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
+
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
+#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
+#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
+#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
+#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
+#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
+#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
+#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
+#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
+#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
+#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
+#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
+#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
+#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
+#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
+#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
+#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
+#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
+#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
+
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
+#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
+#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
+#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
+#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
+#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
+#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
+#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
+#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
+#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
+#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
+#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
+#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
+#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
+#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
+#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
+#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
+#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
+#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
+#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
+#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
+#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
+#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
+#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
+#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
+#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
+
+
+/* DMA Traffic Control Registers													*/
+#define DMAC_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMAC_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+
+/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
+#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
+#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
+#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
+#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
+#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
+#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
+#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
+#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
+#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
+#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
+#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
+#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
+#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
+
+#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
+#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
+#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
+#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
+#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
+#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
+#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
+#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
+#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
+#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
+#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
+#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
+#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
+
+#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
+#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
+#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
+#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
+#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
+#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
+#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
+#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
+#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
+#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
+#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
+#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
+#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
+
+#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
+#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
+#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
+#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
+#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
+#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
+#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
+#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
+#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
+#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
+#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
+#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
+#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
+
+#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
+#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
+#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
+#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
+#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
+#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
+#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
+#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
+#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
+#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
+#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
+#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
+#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
+
+#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
+#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
+#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
+#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
+#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
+#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
+#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
+#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
+#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
+#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
+#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
+#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
+#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
+
+#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
+#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
+#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
+#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
+#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
+#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
+#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
+#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
+#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
+#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
+#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
+#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
+#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
+
+#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
+#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
+#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
+#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
+#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
+#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
+#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
+#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
+#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
+#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
+#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
+#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
+#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
+
+#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
+#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
+#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
+#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
+#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
+#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
+#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
+#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
+#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
+#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
+#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
+#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
+#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
+
+#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
+#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
+#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
+#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
+#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
+#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
+#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
+#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
+#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
+#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
+#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
+#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
+#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
+
+#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
+#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
+#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
+#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
+#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
+#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
+#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
+#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
+#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
+#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
+#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
+#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
+#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
+
+#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
+#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
+#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
+#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
+#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
+#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
+#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
+#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
+#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
+#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
+#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
+#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
+#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
+
+#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
+#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
+#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
+#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
+#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
+#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
+#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
+#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
+#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
+#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
+#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
+
+#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
+#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
+#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
+#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
+#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
+#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
+#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
+#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
+#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
+#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
+#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
+#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
+#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
+
+#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
+#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
+#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
+#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
+#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
+#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
+#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
+#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
+#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
+#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
+#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
+
+#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
+#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
+#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
+#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
+#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
+#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
+#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
+#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
+#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
+#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
+#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
+#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
+#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
+#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
+#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
+#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
+#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
+#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+#define TWI0_REGBASE			0xFFC01400
+#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
+#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
+#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
+#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
+#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
+#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
+#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
+#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
+#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
+#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
+#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
+#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
+#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
+#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
+#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
+#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
+
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
+#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
+#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
+#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
+#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
+#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
+#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
+#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
+#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
+#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
+#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
+#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
+#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
+#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
+#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
+#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
+#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
+#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
+#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
+#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
+#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
+#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
+#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
+#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
+#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
+#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
+#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
+#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
+#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
+#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
+#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
+#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
+#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
+#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
+#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
+#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
+#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
+#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
+#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
+#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
+#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
+#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
+
+
+/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
+#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
+#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
+#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
+#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
+#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
+#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
+#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
+#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
+#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
+#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
+#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
+
+#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
+#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
+#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
+#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
+#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
+#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
+#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+#define PORTF_MUX               0xFFC03210      /* Port F mux control */
+#define PORTG_MUX               0xFFC03214      /* Port G mux control */
+#define PORTH_MUX               0xFFC03218      /* Port H mux control */
+#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
+#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
+#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
+#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
+#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
+#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
+#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
+#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
+#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
+#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
+#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
+#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
+
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer:	All macros are intended to make C and Assembly code more readable.
+**				Use these macros carefully, as any that do left shifts for field
+**				depositing will result in the lower order bits being destroyed.  Any
+**				macro that shifts left to properly position the bit-field should be
+**				used as part of an OR to initialize a register and NOT as a dynamic
+**				modifier UNLESS the lower order bits are saved and ORed back in when
+**				the macro is used.
+*************************************************************************************/
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* SWRST Masks																		*/
+#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
+#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
+#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
+#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
+#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
+
+/* SYSCR Masks																				*/
+#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
+#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
+/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
+
+#if 0
+#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
+
+#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
+#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
+#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
+#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
+#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
+#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
+#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
+
+#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
+#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
+#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
+#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
+#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
+#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
+#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
+#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
+
+#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
+#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
+#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
+#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
+#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
+#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
+#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
+#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
+#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
+#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
+
+#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
+#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
+#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
+#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
+#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
+#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
+#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
+#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
+#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
+#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
+#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
+#endif
+
+/* SIC_IAR0 Macros															*/
+#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
+#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
+#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
+#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
+#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
+#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
+#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
+#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
+
+/* SIC_IAR1 Macros															*/
+#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
+#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
+#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
+#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
+#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
+#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
+#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
+#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
+
+/* SIC_IAR2 Macros															*/
+#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
+#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
+#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
+#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
+#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
+#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
+#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
+#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
+
+/* SIC_IAR3 Macros															*/
+#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
+#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
+#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
+#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
+#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
+#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
+#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
+#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
+
+
+/* SIC_IMASK Masks																		*/
+#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
+#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
+#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
+#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
+
+/* SIC_IWR Masks																		*/
+#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
+#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
+#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
+#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
+
+/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
+/* TIMER_ENABLE Masks													*/
+#define TIMEN0			0x0001		/* Enable Timer 0					*/
+#define TIMEN1			0x0002		/* Enable Timer 1					*/
+#define TIMEN2			0x0004		/* Enable Timer 2					*/
+#define TIMEN3			0x0008		/* Enable Timer 3					*/
+#define TIMEN4			0x0010		/* Enable Timer 4					*/
+#define TIMEN5			0x0020		/* Enable Timer 5					*/
+#define TIMEN6			0x0040		/* Enable Timer 6					*/
+#define TIMEN7			0x0080		/* Enable Timer 7					*/
+
+/* TIMER_DISABLE Masks													*/
+#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
+#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
+#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
+#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
+#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
+#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
+#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
+#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
+
+/* TIMER_STATUS Masks													*/
+#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
+#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
+#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
+#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
+#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
+#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
+#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
+#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
+#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
+#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
+#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
+#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
+#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
+#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
+#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
+#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
+#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
+#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
+#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
+#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
+#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
+#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
+#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
+#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR3 TOVF_ERR3
+#define TOVL_ERR4 TOVF_ERR4
+#define TOVL_ERR5 TOVF_ERR5
+#define TOVL_ERR6 TOVF_ERR6
+#define TOVL_ERR7 TOVF_ERR7
+
+/* TIMERx_CONFIG Masks													*/
+#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
+#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
+#define EXT_CLK			0x0003	/* External Clock Mode					*/
+#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
+#define PERIOD_CNT		0x0008	/* Period Count							*/
+#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
+#define TIN_SEL			0x0020	/* Timer Input Select					*/
+#define OUT_DIS			0x0040	/* Output Pad Disable					*/
+#define CLK_SEL			0x0080	/* Timer Clock Select					*/
+#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
+#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
+#define ERR_TYP			0xC000	/* Error Type							*/
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
+/* EBIU_AMGCTL Masks																	*/
+#define AMCKEN			0x0001		/* Enable CLKOUT									*/
+#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
+#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
+#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
+#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
+#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
+
+/* EBIU_AMBCTL0 Masks																	*/
+#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
+#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
+#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
+#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
+#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
+#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
+#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
+#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
+#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
+#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
+#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
+#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
+#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
+#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
+#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
+#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
+#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
+#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
+#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
+#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
+#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
+#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
+#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
+#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
+#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
+#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
+#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
+#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
+#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
+#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
+#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
+#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
+#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
+#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
+#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
+#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
+
+#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
+#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
+#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
+#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
+#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
+#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
+#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
+#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
+#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
+#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
+#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
+#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
+#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
+#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
+#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
+#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
+#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
+#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
+#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
+#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
+#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
+#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
+#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
+#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
+#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
+#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
+#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
+#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
+#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
+#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
+#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
+#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
+#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
+#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
+#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
+#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
+#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
+#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
+#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
+#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
+#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
+
+/* EBIU_AMBCTL1 Masks																	*/
+#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
+#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
+#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
+#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
+#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
+#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
+#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
+#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
+#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
+#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
+#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
+#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
+#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
+#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
+#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
+#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
+#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
+#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
+#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
+#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
+#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
+#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
+#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
+#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
+#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
+#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
+#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
+#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
+#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
+#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
+#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
+#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
+#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
+#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
+#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
+#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
+
+#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
+#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
+#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
+#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
+#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
+#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
+#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
+#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
+#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
+#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
+#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
+#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
+#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
+#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
+#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
+#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
+#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
+#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
+#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
+#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
+#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
+#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
+#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
+#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
+#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
+#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
+#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
+#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
+#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
+#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
+#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
+#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
+#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
+#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
+#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
+#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
+
+
+/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
+/* EBIU_SDGCTL Masks																			*/
+#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
+#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
+#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
+#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
+#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
+#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
+#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
+#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
+#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
+#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
+#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
+#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
+#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
+#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
+#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
+#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
+#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
+#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
+#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
+#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
+#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
+#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
+#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
+#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
+#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
+#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
+#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
+#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
+#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
+#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
+#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
+#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
+#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
+#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
+#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
+#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
+#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
+#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
+#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
+#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
+#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
+#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
+#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
+#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
+#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
+#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
+#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
+
+/* EBIU_SDBCTL Masks																		*/
+#define EBE				0x0001		/* Enable SDRAM External Bank							*/
+#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
+#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
+#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
+#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
+#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
+#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
+#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
+#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
+#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
+#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
+
+/* EBIU_SDSTAT Masks														*/
+#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
+#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
+#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
+#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
+#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
+#define BGSTAT			0x0020		/* Bus Grant Status						*/
+
+
+/* **************************  DMA CONTROLLER MASKS  ********************************/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
+#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
+#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
+#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
+#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
+#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
+#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
+#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
+#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
+#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
+#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
+#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
+#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
+#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
+#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
+
+/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
+/*  PPI_CONTROL Masks													*/
+#define PORT_EN			0x0001		/* PPI Port Enable					*/
+#define PORT_DIR		0x0002		/* PPI Port Direction				*/
+#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
+#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
+#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
+#define PACK_EN			0x0080		/* PPI Packing Mode					*/
+#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
+#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
+#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
+#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
+#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
+#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
+#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
+#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
+#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
+#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
+#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
+#define DLENGTH			0x3800		/* PPI Data Length  */
+#define POLC			0x4000		/* PPI Clock Polarity				*/
+#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
+
+/* PPI_STATUS Masks														*/
+#define FLD				0x0400		/* Field Indicator					*/
+#define FT_ERR			0x0800		/* Frame Track Error				*/
+#define OVR				0x1000		/* FIFO Overflow Error				*/
+#define UNDR			0x2000		/* FIFO Underrun Error				*/
+#define ERR_DET			0x4000		/* Error Detected Indicator			*/
+#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
+#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
+
+/* TWI_PRESCALE Masks															*/
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
+#define	TWI_ENA		0x0080		/* TWI Enable									*/
+#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
+
+/* TWI_SLAVE_CTL Masks															*/
+#define	SEN			0x0001		/* Slave Enable									*/
+#define	SADD_LEN	0x0002		/* Slave Address Length							*/
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
+
+/* TWI_SLAVE_STAT Masks															*/
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
+#define GCALL		0x0002		/* General Call Indicator						*/
+
+/* TWI_MASTER_CTL Masks													*/
+#define	MEN			0x0001		/* Master Mode Enable						*/
+#define	MADD_LEN	0x0002		/* Master Address Length					*/
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
+#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
+#define	STOP		0x0010		/* Issue Stop Condition						*/
+#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
+#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
+#define	SDAOVR		0x4000		/* Serial Data Override						*/
+#define	SCLOVR		0x8000		/* Serial Clock Override					*/
+
+/* TWI_MASTER_STAT Masks														*/
+#define	MPROG		0x0001		/* Master Transfer In Progress					*/
+#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
+#define	ANAK		0x0004		/* Address Not Acknowledged						*/
+#define	DNAK		0x0008		/* Data Not Acknowledged						*/
+#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
+#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
+#define	SDASEN		0x0040		/* Serial Data Sense							*/
+#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
+#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
+#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
+#define	SERR		0x0004		/* Slave Transfer Error		*/
+#define	SOVF		0x0008		/* Slave Overflow			*/
+#define	MCOMP		0x0010		/* Master Transfer Complete	*/
+#define	MERR		0x0020		/* Master Transfer Error	*/
+#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
+#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
+
+/* TWI_FIFO_CTRL Masks												*/
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
+
+/* TWI_FIFO_STAT Masks															*/
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
+#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
+#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
+#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
+
+#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
+#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
+#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
+#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
+
+
+/* Omit CAN masks from defBF534.h */
+
+/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
+/* PORT_MUX Masks															*/
+#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
+#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
+#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
+
+#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
+#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
+#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
+#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
+
+#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
+#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
+#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
+
+#define	PFTE			0x0010			/* Port F Timer Enable				*/
+#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
+#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
+
+#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
+#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
+#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
+
+#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
+#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
+#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
+
+#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
+#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
+#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
+
+#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
+#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
+#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
+
+#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
+#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
+#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
+
+#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
+#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
+#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
+
+#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
+#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
+#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
+
+
+/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
+/* HDMAx_CTL Masks														*/
+#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
+#define	REP			0x0002	/* HDMA Request Polarity					*/
+#define	UTE			0x0004	/* Urgency Threshold Enable					*/
+#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
+#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
+#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
+#define	DRQ			0x0300	/* HDMA Request Type						*/
+#define	DRQ_NONE	0x0000	/* 		No Request							*/
+#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
+#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
+#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
+#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
+#define	PS			0x2000	/* HDMA Pin Status							*/
+#define	OI			0x4000	/* Overflow Interrupt Generated				*/
+#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
+
+/* entry addresses of the user-callable Boot ROM functions */
+
+#define _BOOTROM_RESET 0xEF000000
+#define _BOOTROM_FINAL_INIT 0xEF000002
+#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
+#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
+#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
+#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
+#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
+#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
+#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define	PGDE_UART   PFDE_UART
+#define	PGDE_DMA    PFDE_DMA
+#define	CKELOW		SCKELOW
+
+/* ==== end from defBF534.h ==== */
+
+/* HOST Port Registers */
+
+#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
+#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
+#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
+
+/* Counter Registers */
+
+#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
+#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
+#define                       CNT_STATUS  0xffc03508   /* Status Register */
+#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
+#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
+#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
+#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
+#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
+
+/* OTP/FUSE Registers */
+
+#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
+#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
+#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
+#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
+
+/* Security Registers */
+
+#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
+#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
+#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+
+/* NFC Registers */
+
+#define                          NFC_CTL  0xffc03700   /* NAND Control Register */
+#define                         NFC_STAT  0xffc03704   /* NAND Status Register */
+#define                      NFC_IRQSTAT  0xffc03708   /* NAND Interrupt Status Register */
+#define                      NFC_IRQMASK  0xffc0370c   /* NAND Interrupt Mask Register */
+#define                         NFC_ECC0  0xffc03710   /* NAND ECC Register 0 */
+#define                         NFC_ECC1  0xffc03714   /* NAND ECC Register 1 */
+#define                         NFC_ECC2  0xffc03718   /* NAND ECC Register 2 */
+#define                         NFC_ECC3  0xffc0371c   /* NAND ECC Register 3 */
+#define                        NFC_COUNT  0xffc03720   /* NAND ECC Count Register */
+#define                          NFC_RST  0xffc03724   /* NAND ECC Reset Register */
+#define                        NFC_PGCTL  0xffc03728   /* NAND Page Control Register */
+#define                         NFC_READ  0xffc0372c   /* NAND Read Data Register */
+#define                         NFC_ADDR  0xffc03740   /* NAND Address Register */
+#define                          NFC_CMD  0xffc03744   /* NAND Command Register */
+#define                      NFC_DATA_WR  0xffc03748   /* NAND Data Write Register */
+#define                      NFC_DATA_RD  0xffc0374c   /* NAND Data Read Register */
+
+/* ********************************************************** */
+/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
+/*     and MULTI BIT READ MACROS                              */
+/* ********************************************************** */
+
+/* Bit masks for HOST_CONTROL */
+
+#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
+#define                  HOST_CNTR_nHOST_EN  0x0
+#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
+#define                 HOST_CNTR_nHOST_END  0x0
+#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
+#define                HOST_CNTR_nDATA_SIZE  0x0
+#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
+#define                 HOST_CNTR_nHOST_RST  0x0
+#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
+#define                 HOST_CNTR_nHRDY_OVR  0x0
+#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
+#define                 HOST_CNTR_nINT_MODE  0x0
+#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
+#define                   HOST_CNTR_ nBT_EN  0x0
+#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
+#define                      HOST_CNTR_nEHW  0x0
+#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
+#define                      HOST_CNTR_nEHR  0x0
+#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
+#define                      HOST_CNTR_nBDR  0x0
+
+/* Bit masks for HOST_STATUS */
+
+#define                     HOST_STAT_READY  0x1        /* DMA Ready */
+#define                    HOST_STAT_nREADY  0x0
+#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
+#define                 HOST_STAT_nFIFOFULL  0x0
+#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
+#define                HOST_STAT_nFIFOEMPTY  0x0
+#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
+#define                 HOST_STAT_nCOMPLETE  0x0
+#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
+#define                     HOST_STAT_nHSHK  0x0
+#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
+#define                  HOST_STAT_nTIMEOUT  0x0
+#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
+#define                     HOST_STAT_nHIRQ  0x0
+#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
+#define               HOST_STAT_nALLOW_CNFG  0x0
+#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
+#define                  HOST_STAT_nDMA_DIR  0x0
+#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
+#define                      HOST_STAT_nBTE  0x0
+#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
+#define              HOST_STAT_nHOSTRD_DONE  0x0
+
+/* Bit masks for HOST_TIMEOUT */
+
+#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
+
+/* Bit masks for SECURE_SYSSWT */
+
+#define                   EMUDABL  0x1        /* Emulation Disable. */
+#define                  nEMUDABL  0x0
+#define                   RSTDABL  0x2        /* Reset Disable */
+#define                  nRSTDABL  0x0
+#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
+#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
+#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
+#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
+#define                  nDMA0OVR  0x0
+#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
+#define                  nDMA1OVR  0x0
+#define                    EMUOVR  0x4000     /* Emulation Override */
+#define                   nEMUOVR  0x0
+#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
+#define                   nOTPSEN  0x0
+#define                    L2DABL  0x70000    /* L2 Memory Disable. */
+
+/* Bit masks for SECURE_CONTROL */
+
+#define                   SECURE0  0x1        /* SECURE 0 */
+#define                  nSECURE0  0x0
+#define                   SECURE1  0x2        /* SECURE 1 */
+#define                  nSECURE1  0x0
+#define                   SECURE2  0x4        /* SECURE 2 */
+#define                  nSECURE2  0x0
+#define                   SECURE3  0x8        /* SECURE 3 */
+#define                  nSECURE3  0x0
+
+/* Bit masks for SECURE_STATUS */
+
+#define                   SECMODE  0x3        /* Secured Mode Control State */
+#define                       NMI  0x4        /* Non Maskable Interrupt */
+#define                      nNMI  0x0
+#define                   AFVALID  0x8        /* Authentication Firmware Valid */
+#define                  nAFVALID  0x0
+#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
+#define                   nAFEXIT  0x0
+#define                   SECSTAT  0xe0       /* Secure Status */
 
 #endif /* _DEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index c136f70..cc383ad 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF527.h b/arch/blackfin/mach-bf527/include/mach/defBF527.h
index 4dd58fb..05369a9 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF527.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
deleted file mode 100644
index 0947503..0000000
--- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ /dev/null
@@ -1,1506 +0,0 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
- */
-
-#ifndef _DEF_BF52X_H
-#define _DEF_BF52X_H
-
-
-/* ************************************************************** */
-/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x    */
-/* ************************************************************** */
-
-/* ==== begin from defBF534.h ==== */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
-#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
-#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register		*/
-#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
-#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
-#define CHIPID        0xFFC00014  /* Device ID Register */
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
-#define SWRST				0xFFC00100	/* Software Reset Register					*/
-#define SYSCR				0xFFC00104	/* System Configuration Register			*/
-#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register	*/
-
-#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
-#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0			*/
-#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1			*/
-#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2			*/
-#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3			*/
-#define SIC_ISR0				0xFFC00120	/* Interrupt Status Register				*/
-#define SIC_IWR0				0xFFC00124	/* Interrupt Wakeup Register				*/
-
-/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
-#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
-#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
-#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
-#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
-#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
-#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
-#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
-
-
-/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
-#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
-#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
-#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
-#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
-#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
-#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
-#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
-#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
-#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
-#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
-#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
-#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
-#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
-#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
-#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
-#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
-#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
-#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
-#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
-#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
-
-
-/* SPI Controller			(0xFFC00500 - 0xFFC005FF)								*/
-#define SPI0_REGBASE			0xFFC00500
-#define SPI_CTL				0xFFC00500	/* SPI Control Register						*/
-#define SPI_FLG				0xFFC00504	/* SPI Flag register						*/
-#define SPI_STAT			0xFFC00508	/* SPI Status register						*/
-#define SPI_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register		*/
-#define SPI_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register			*/
-#define SPI_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
-#define SPI_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
-
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
-#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
-#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
-#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
-
-#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
-#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
-#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
-#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
-
-#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
-#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
-#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
-#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
-
-#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
-#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
-#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
-#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
-
-#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
-#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
-#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
-#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
-
-#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
-#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
-#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
-#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
-
-#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
-#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
-#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
-#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
-
-#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
-#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
-#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
-#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
-
-#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
-#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
-#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
-#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
-#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
-#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
-#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
-#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
-#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
-#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
-#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
-#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
-#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
-#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
-#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
-#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
-#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
-#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
-
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
-#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
-#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
-#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
-#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
-#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
-#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
-#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
-#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
-#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
-#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
-#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
-#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
-#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
-#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
-#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
-#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
-#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
-#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
-
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
-#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
-#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
-#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
-#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
-#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
-#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
-#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
-#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
-#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
-#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
-#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
-#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
-#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
-#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
-#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
-#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
-#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
-#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
-#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
-#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
-#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
-#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
-#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
-#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
-#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
-
-
-/* DMA Traffic Control Registers													*/
-#define DMA_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TCCNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
-#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
-#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
-#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
-#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
-#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
-#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
-#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
-#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
-#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
-#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
-#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
-#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
-#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
-
-#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
-#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
-#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
-#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
-#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
-#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
-#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
-#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
-#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
-#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
-#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
-#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
-#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
-
-#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
-#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
-#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
-#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
-#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
-#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
-#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
-#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
-#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
-#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
-#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
-#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
-#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
-
-#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
-#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
-#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
-#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
-#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
-#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
-#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
-#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
-#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
-#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
-#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
-#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
-#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
-
-#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
-#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
-#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
-#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
-#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
-#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
-#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
-#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
-#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
-#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
-#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
-#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
-#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
-
-#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
-#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
-#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
-#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
-#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
-#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
-#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
-#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
-#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
-#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
-#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
-#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
-#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
-
-#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
-#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
-#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
-#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
-#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
-#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
-#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
-#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
-#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
-#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
-#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
-#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
-#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
-
-#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
-#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
-#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
-#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
-#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
-#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
-#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
-#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
-#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
-#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
-#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
-#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
-#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
-
-#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
-#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
-#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
-#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
-#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
-#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
-#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
-#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
-#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
-#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
-#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
-#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
-#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
-
-#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
-#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
-#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
-#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
-#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
-#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
-#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
-#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
-#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
-#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
-#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
-#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
-#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
-
-#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
-#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
-#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
-#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
-#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
-#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
-#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
-#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
-#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
-#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
-#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
-#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
-#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
-
-#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
-#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
-#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
-#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
-#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
-#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
-#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
-#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
-#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
-#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
-#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
-#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
-#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
-
-#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
-#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
-#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
-#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
-#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
-#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
-#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
-#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
-#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
-#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
-#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
-
-#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
-#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
-#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
-#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
-#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
-#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
-#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
-#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
-#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
-#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
-#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
-#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
-#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
-
-#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
-#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
-#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
-#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
-#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
-#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
-#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
-#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
-#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
-#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
-#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
-
-#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
-#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
-#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
-#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
-#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
-#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
-#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
-#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
-#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
-#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
-#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
-#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
-#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
-#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
-#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
-#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
-#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
-#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-#define TWI0_REGBASE			0xFFC01400
-#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
-#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
-#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
-#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
-#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
-#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
-#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
-#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
-#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
-#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
-#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
-#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
-#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
-#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
-#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
-#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
-
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
-#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
-#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
-#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
-#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
-#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
-#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
-#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
-#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
-#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
-#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
-#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
-#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
-#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
-#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
-#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
-#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
-#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
-#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
-#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
-#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
-#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
-#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
-#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
-#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
-#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
-#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
-#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
-#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
-#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
-#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
-#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
-#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
-#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
-#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
-#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
-#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
-#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
-#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
-#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
-#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
-#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
-
-
-/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
-#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
-#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
-#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
-#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
-#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
-#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
-#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
-#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
-#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
-#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
-#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
-
-#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
-#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
-#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
-#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
-#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
-#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
-#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-#define PORTF_MUX               0xFFC03210      /* Port F mux control */
-#define PORTG_MUX               0xFFC03214      /* Port G mux control */
-#define PORTH_MUX               0xFFC03218      /* Port H mux control */
-#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
-#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
-#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
-#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
-#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
-#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
-#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
-#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
-#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
-#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
-#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
-#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
-
-
-/***********************************************************************************
-** System MMR Register Bits And Macros
-**
-** Disclaimer:	All macros are intended to make C and Assembly code more readable.
-**				Use these macros carefully, as any that do left shifts for field
-**				depositing will result in the lower order bits being destroyed.  Any
-**				macro that shifts left to properly position the bit-field should be
-**				used as part of an OR to initialize a register and NOT as a dynamic
-**				modifier UNLESS the lower order bits are saved and ORed back in when
-**				the macro is used.
-*************************************************************************************/
-
-/* CHIPID Masks */
-#define CHIPID_VERSION         0xF0000000
-#define CHIPID_FAMILY          0x0FFFF000
-#define CHIPID_MANUFACTURE     0x00000FFE
-
-/* SWRST Masks																		*/
-#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
-#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
-#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
-#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
-#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
-
-/* SYSCR Masks																				*/
-#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
-#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
-
-
-/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
-/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
-
-#if 0
-#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
-
-#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
-#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
-#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
-#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
-#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
-#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
-#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
-
-#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
-#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
-#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
-#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
-#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
-#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
-#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
-#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
-
-#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
-#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
-#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
-#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
-#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
-#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
-#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
-#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
-#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
-#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
-
-#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
-#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
-#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
-#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
-#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
-#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
-#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
-#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
-#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
-#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
-#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
-#endif
-
-/* SIC_IAR0 Macros															*/
-#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
-#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
-#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
-#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
-#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
-#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
-#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
-#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
-
-/* SIC_IAR1 Macros															*/
-#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
-#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
-#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
-#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
-#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
-#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
-#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
-#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
-
-/* SIC_IAR2 Macros															*/
-#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
-#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
-#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
-#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
-#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
-#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
-#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
-#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
-
-/* SIC_IAR3 Macros															*/
-#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
-#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
-#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
-#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
-#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
-#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
-#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
-#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
-
-
-/* SIC_IMASK Masks																		*/
-#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
-#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
-#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
-#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
-
-/* SIC_IWR Masks																		*/
-#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
-#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
-#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
-#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
-
-
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks												*/
-#define WLS(x)		(((x)-5) & 0x03)	/* Word Length Select */
-#define STB			0x04				/* Stop Bits			*/
-#define PEN			0x08				/* Parity Enable		*/
-#define EPS			0x10				/* Even Parity Select	*/
-#define STP			0x20				/* Stick Parity			*/
-#define SB			0x40				/* Set Break			*/
-#define DLAB		0x80				/* Divisor Latch Access	*/
-
-/* UARTx_MCR Mask										*/
-#define LOOP_ENA	0x10	/* Loopback Mode Enable */
-#define LOOP_ENA_P	0x04
-
-/* UARTx_LSR Masks										*/
-#define DR			0x01	/* Data Ready				*/
-#define OE			0x02	/* Overrun Error			*/
-#define PE			0x04	/* Parity Error				*/
-#define FE			0x08	/* Framing Error			*/
-#define BI			0x10	/* Break Interrupt			*/
-#define THRE		0x20	/* THR Empty				*/
-#define TEMT		0x40	/* TSR and UART_THR Empty	*/
-
-/* UARTx_IER Masks															*/
-#define ERBFI		0x01		/* Enable Receive Buffer Full Interrupt		*/
-#define ETBEI		0x02		/* Enable Transmit Buffer Empty Interrupt	*/
-#define ELSI		0x04		/* Enable RX Status Interrupt				*/
-
-/* UARTx_IIR Masks														*/
-#define NINT		0x01		/* Pending Interrupt					*/
-#define IIR_TX_READY    0x02		/* UART_THR empty                               */
-#define IIR_RX_READY    0x04		/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06		/* Receive line status    			*/
-#define IIR_STATUS	0x06		/* Highest Priority Pending Interrupt	*/
-
-/* UARTx_GCTL Masks													*/
-#define UCEN		0x01		/* Enable UARTx Clocks				*/
-#define IREN		0x02		/* Enable IrDA Mode					*/
-#define TPOLC		0x04		/* IrDA TX Polarity Change			*/
-#define RPOLC		0x08		/* IrDA RX Polarity Change			*/
-#define FPE			0x10		/* Force Parity Error On Transmit	*/
-#define FFE			0x20		/* Force Framing Error On Transmit	*/
-
-
-/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
-/* TIMER_ENABLE Masks													*/
-#define TIMEN0			0x0001		/* Enable Timer 0					*/
-#define TIMEN1			0x0002		/* Enable Timer 1					*/
-#define TIMEN2			0x0004		/* Enable Timer 2					*/
-#define TIMEN3			0x0008		/* Enable Timer 3					*/
-#define TIMEN4			0x0010		/* Enable Timer 4					*/
-#define TIMEN5			0x0020		/* Enable Timer 5					*/
-#define TIMEN6			0x0040		/* Enable Timer 6					*/
-#define TIMEN7			0x0080		/* Enable Timer 7					*/
-
-/* TIMER_DISABLE Masks													*/
-#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
-#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
-#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
-#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
-#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
-#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
-#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
-#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
-
-/* TIMER_STATUS Masks													*/
-#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
-#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
-#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
-#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
-#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
-#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
-#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
-#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
-#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
-#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
-#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
-#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
-#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
-#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
-#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
-#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
-#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
-#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
-#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
-#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
-#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
-#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
-#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
-#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define TOVL_ERR0 TOVF_ERR0
-#define TOVL_ERR1 TOVF_ERR1
-#define TOVL_ERR2 TOVF_ERR2
-#define TOVL_ERR3 TOVF_ERR3
-#define TOVL_ERR4 TOVF_ERR4
-#define TOVL_ERR5 TOVF_ERR5
-#define TOVL_ERR6 TOVF_ERR6
-#define TOVL_ERR7 TOVF_ERR7
-
-/* TIMERx_CONFIG Masks													*/
-#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
-#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
-#define EXT_CLK			0x0003	/* External Clock Mode					*/
-#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
-#define PERIOD_CNT		0x0008	/* Period Count							*/
-#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
-#define TIN_SEL			0x0020	/* Timer Input Select					*/
-#define OUT_DIS			0x0040	/* Output Pad Disable					*/
-#define CLK_SEL			0x0080	/* Timer Clock Select					*/
-#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
-#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
-#define ERR_TYP			0xC000	/* Error Type							*/
-
-
-/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks 				*/
-/* Port F Masks 														*/
-#define PF0		0x0001
-#define PF1		0x0002
-#define PF2		0x0004
-#define PF3		0x0008
-#define PF4		0x0010
-#define PF5		0x0020
-#define PF6		0x0040
-#define PF7		0x0080
-#define PF8		0x0100
-#define PF9		0x0200
-#define PF10	0x0400
-#define PF11	0x0800
-#define PF12	0x1000
-#define PF13	0x2000
-#define PF14	0x4000
-#define PF15	0x8000
-
-/* Port G Masks															*/
-#define PG0		0x0001
-#define PG1		0x0002
-#define PG2		0x0004
-#define PG3		0x0008
-#define PG4		0x0010
-#define PG5		0x0020
-#define PG6		0x0040
-#define PG7		0x0080
-#define PG8		0x0100
-#define PG9		0x0200
-#define PG10	0x0400
-#define PG11	0x0800
-#define PG12	0x1000
-#define PG13	0x2000
-#define PG14	0x4000
-#define PG15	0x8000
-
-/* Port H Masks															*/
-#define PH0		0x0001
-#define PH1		0x0002
-#define PH2		0x0004
-#define PH3		0x0008
-#define PH4		0x0010
-#define PH5		0x0020
-#define PH6		0x0040
-#define PH7		0x0080
-#define PH8		0x0100
-#define PH9		0x0200
-#define PH10	0x0400
-#define PH11	0x0800
-#define PH12	0x1000
-#define PH13	0x2000
-#define PH14	0x4000
-#define PH15	0x8000
-
-/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
-/* EBIU_AMGCTL Masks																	*/
-#define AMCKEN			0x0001		/* Enable CLKOUT									*/
-#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
-#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
-#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
-#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
-#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
-
-/* EBIU_AMBCTL0 Masks																	*/
-#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
-#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
-#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
-#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
-#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
-#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
-#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
-#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
-#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
-#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
-#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
-#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
-#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
-#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
-#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
-#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
-#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
-#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
-#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
-#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
-#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
-#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
-#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
-#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
-#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
-#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
-#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
-#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
-#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
-#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
-#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
-#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
-#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
-#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
-#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
-#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
-
-#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
-#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
-#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
-#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
-#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
-#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
-#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
-#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
-#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
-#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
-#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
-#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
-#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
-#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
-#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
-#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
-#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
-#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
-#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
-#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
-#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
-#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
-#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
-#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
-#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
-#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
-#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
-#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
-#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
-#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
-#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
-#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
-#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
-#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
-#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
-#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
-#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
-#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
-#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
-#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
-#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
-
-/* EBIU_AMBCTL1 Masks																	*/
-#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
-#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
-#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
-#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
-#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
-#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
-#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
-#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
-#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
-#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
-#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
-#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
-#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
-#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
-#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
-#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
-#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
-#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
-#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
-#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
-#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
-#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
-#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
-#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
-#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
-#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
-#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
-#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
-#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
-#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
-#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
-#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
-#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
-#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
-#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
-#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
-
-#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
-#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
-#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
-#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
-#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
-#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
-#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
-#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
-#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
-#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
-#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
-#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
-#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
-#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
-#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
-#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
-#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
-#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
-#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
-#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
-#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
-#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
-#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
-#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
-#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
-#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
-#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
-#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
-#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
-#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
-#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
-#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
-#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
-#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
-#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
-#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
-
-
-/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
-/* EBIU_SDGCTL Masks																			*/
-#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
-#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
-#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
-#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
-#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
-#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
-#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
-#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
-#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
-#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
-#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
-#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
-#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
-#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
-#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
-#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
-#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
-#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
-#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
-#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
-#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
-#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
-#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
-#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
-#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
-#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
-#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
-#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
-#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
-#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
-#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
-#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
-#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
-#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
-#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
-#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
-#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
-#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
-#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
-#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
-#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
-#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
-#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
-#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
-#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
-#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
-#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
-
-/* EBIU_SDBCTL Masks																		*/
-#define EBE				0x0001		/* Enable SDRAM External Bank							*/
-#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
-#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
-#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
-#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
-#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
-#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
-#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
-#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
-#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
-#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
-
-/* EBIU_SDSTAT Masks														*/
-#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
-#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
-#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
-#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
-#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
-#define BGSTAT			0x0020		/* Bus Grant Status						*/
-
-
-/* **************************  DMA CONTROLLER MASKS  ********************************/
-
-/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
-#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
-#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
-#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
-#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
-#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
-#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
-#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
-#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
-#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
-#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
-#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
-#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
-#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
-#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
-
-/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
-/*  PPI_CONTROL Masks													*/
-#define PORT_EN			0x0001		/* PPI Port Enable					*/
-#define PORT_DIR		0x0002		/* PPI Port Direction				*/
-#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
-#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
-#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
-#define PACK_EN			0x0080		/* PPI Packing Mode					*/
-#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
-#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
-#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
-#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
-#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
-#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
-#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
-#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
-#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
-#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
-#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
-#define DLENGTH			0x3800		/* PPI Data Length  */
-#define POLC			0x4000		/* PPI Clock Polarity				*/
-#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
-
-/* PPI_STATUS Masks														*/
-#define FLD				0x0400		/* Field Indicator					*/
-#define FT_ERR			0x0800		/* Frame Track Error				*/
-#define OVR				0x1000		/* FIFO Overflow Error				*/
-#define UNDR			0x2000		/* FIFO Underrun Error				*/
-#define ERR_DET			0x4000		/* Error Detected Indicator			*/
-#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
-
-
-/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
-#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
-
-/* TWI_PRESCALE Masks															*/
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
-#define	TWI_ENA		0x0080		/* TWI Enable									*/
-#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
-
-/* TWI_SLAVE_CTL Masks															*/
-#define	SEN			0x0001		/* Slave Enable									*/
-#define	SADD_LEN	0x0002		/* Slave Address Length							*/
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
-
-/* TWI_SLAVE_STAT Masks															*/
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
-#define GCALL		0x0002		/* General Call Indicator						*/
-
-/* TWI_MASTER_CTL Masks													*/
-#define	MEN			0x0001		/* Master Mode Enable						*/
-#define	MADD_LEN	0x0002		/* Master Address Length					*/
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
-#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
-#define	STOP		0x0010		/* Issue Stop Condition						*/
-#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
-#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
-#define	SDAOVR		0x4000		/* Serial Data Override						*/
-#define	SCLOVR		0x8000		/* Serial Clock Override					*/
-
-/* TWI_MASTER_STAT Masks														*/
-#define	MPROG		0x0001		/* Master Transfer In Progress					*/
-#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
-#define	ANAK		0x0004		/* Address Not Acknowledged						*/
-#define	DNAK		0x0008		/* Data Not Acknowledged						*/
-#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
-#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
-#define	SDASEN		0x0040		/* Serial Data Sense							*/
-#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
-#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
-#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
-#define	SERR		0x0004		/* Slave Transfer Error		*/
-#define	SOVF		0x0008		/* Slave Overflow			*/
-#define	MCOMP		0x0010		/* Master Transfer Complete	*/
-#define	MERR		0x0020		/* Master Transfer Error	*/
-#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
-#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
-
-/* TWI_FIFO_CTRL Masks												*/
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
-
-/* TWI_FIFO_STAT Masks															*/
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
-#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
-#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
-#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
-
-#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
-#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
-#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
-#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
-
-
-/* Omit CAN masks from defBF534.h */
-
-/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
-/* PORT_MUX Masks															*/
-#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
-#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
-#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
-
-#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
-#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
-#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
-#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
-
-#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
-#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
-#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
-
-#define	PFTE			0x0010			/* Port F Timer Enable				*/
-#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
-#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
-
-#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
-#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
-#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
-
-#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
-#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
-#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
-
-#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
-#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
-#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
-
-#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
-#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
-#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
-
-#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
-#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
-#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
-
-#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
-#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
-#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
-
-#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
-#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
-#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
-
-
-/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
-/* HDMAx_CTL Masks														*/
-#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
-#define	REP			0x0002	/* HDMA Request Polarity					*/
-#define	UTE			0x0004	/* Urgency Threshold Enable					*/
-#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
-#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
-#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
-#define	DRQ			0x0300	/* HDMA Request Type						*/
-#define	DRQ_NONE	0x0000	/* 		No Request							*/
-#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
-#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
-#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
-#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
-#define	PS			0x2000	/* HDMA Pin Status							*/
-#define	OI			0x4000	/* Overflow Interrupt Generated				*/
-#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
-
-/* entry addresses of the user-callable Boot ROM functions */
-
-#define _BOOTROM_RESET 0xEF000000
-#define _BOOTROM_FINAL_INIT 0xEF000002
-#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
-#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
-#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
-#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
-#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
-#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
-#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define	PGDE_UART   PFDE_UART
-#define	PGDE_DMA    PFDE_DMA
-#define	CKELOW		SCKELOW
-
-/* ==== end from defBF534.h ==== */
-
-/* HOST Port Registers */
-
-#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
-#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
-#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
-
-/* Counter Registers */
-
-#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
-#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
-#define                       CNT_STATUS  0xffc03508   /* Status Register */
-#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
-#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
-#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
-#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
-#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
-
-/* OTP/FUSE Registers */
-
-#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
-#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
-#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
-#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
-
-/* Security Registers */
-
-#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
-#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
-#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
-
-/* OTP Read/Write Data Buffer Registers */
-
-#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-
-/* NFC Registers */
-
-#define                          NFC_CTL  0xffc03700   /* NAND Control Register */
-#define                         NFC_STAT  0xffc03704   /* NAND Status Register */
-#define                      NFC_IRQSTAT  0xffc03708   /* NAND Interrupt Status Register */
-#define                      NFC_IRQMASK  0xffc0370c   /* NAND Interrupt Mask Register */
-#define                         NFC_ECC0  0xffc03710   /* NAND ECC Register 0 */
-#define                         NFC_ECC1  0xffc03714   /* NAND ECC Register 1 */
-#define                         NFC_ECC2  0xffc03718   /* NAND ECC Register 2 */
-#define                         NFC_ECC3  0xffc0371c   /* NAND ECC Register 3 */
-#define                        NFC_COUNT  0xffc03720   /* NAND ECC Count Register */
-#define                          NFC_RST  0xffc03724   /* NAND ECC Reset Register */
-#define                        NFC_PGCTL  0xffc03728   /* NAND Page Control Register */
-#define                         NFC_READ  0xffc0372c   /* NAND Read Data Register */
-#define                         NFC_ADDR  0xffc03740   /* NAND Address Register */
-#define                          NFC_CMD  0xffc03744   /* NAND Command Register */
-#define                      NFC_DATA_WR  0xffc03748   /* NAND Data Write Register */
-#define                      NFC_DATA_RD  0xffc0374c   /* NAND Data Read Register */
-
-/* ********************************************************** */
-/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
-/*     and MULTI BIT READ MACROS                              */
-/* ********************************************************** */
-
-/* Bit masks for HOST_CONTROL */
-
-#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
-#define                  HOST_CNTR_nHOST_EN  0x0
-#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
-#define                 HOST_CNTR_nHOST_END  0x0
-#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
-#define                HOST_CNTR_nDATA_SIZE  0x0
-#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
-#define                 HOST_CNTR_nHOST_RST  0x0
-#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
-#define                 HOST_CNTR_nHRDY_OVR  0x0
-#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
-#define                 HOST_CNTR_nINT_MODE  0x0
-#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
-#define                   HOST_CNTR_ nBT_EN  0x0
-#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
-#define                      HOST_CNTR_nEHW  0x0
-#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
-#define                      HOST_CNTR_nEHR  0x0
-#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
-#define                      HOST_CNTR_nBDR  0x0
-
-/* Bit masks for HOST_STATUS */
-
-#define                     HOST_STAT_READY  0x1        /* DMA Ready */
-#define                    HOST_STAT_nREADY  0x0
-#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
-#define                 HOST_STAT_nFIFOFULL  0x0
-#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
-#define                HOST_STAT_nFIFOEMPTY  0x0
-#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
-#define                 HOST_STAT_nCOMPLETE  0x0
-#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
-#define                     HOST_STAT_nHSHK  0x0
-#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
-#define                  HOST_STAT_nTIMEOUT  0x0
-#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
-#define                     HOST_STAT_nHIRQ  0x0
-#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
-#define               HOST_STAT_nALLOW_CNFG  0x0
-#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
-#define                  HOST_STAT_nDMA_DIR  0x0
-#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
-#define                      HOST_STAT_nBTE  0x0
-#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
-#define              HOST_STAT_nHOSTRD_DONE  0x0
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
-
-/* Bit masks for SECURE_SYSSWT */
-
-#define                   EMUDABL  0x1        /* Emulation Disable. */
-#define                  nEMUDABL  0x0
-#define                   RSTDABL  0x2        /* Reset Disable */
-#define                  nRSTDABL  0x0
-#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
-#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
-#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
-#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
-#define                  nDMA0OVR  0x0
-#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
-#define                  nDMA1OVR  0x0
-#define                    EMUOVR  0x4000     /* Emulation Override */
-#define                   nEMUOVR  0x0
-#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
-#define                   nOTPSEN  0x0
-#define                    L2DABL  0x70000    /* L2 Memory Disable. */
-
-/* Bit masks for SECURE_CONTROL */
-
-#define                   SECURE0  0x1        /* SECURE 0 */
-#define                  nSECURE0  0x0
-#define                   SECURE1  0x2        /* SECURE 1 */
-#define                  nSECURE1  0x0
-#define                   SECURE2  0x4        /* SECURE 2 */
-#define                  nSECURE2  0x0
-#define                   SECURE3  0x8        /* SECURE 3 */
-#define                  nSECURE3  0x0
-
-/* Bit masks for SECURE_STATUS */
-
-#define                   SECMODE  0x3        /* Secured Mode Control State */
-#define                       NMI  0x4        /* Non Maskable Interrupt */
-#define                      nNMI  0x0
-#define                   AFVALID  0x8        /* Authentication Firmware Valid */
-#define                  nAFVALID  0x0
-#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
-#define                   nAFEXIT  0x0
-#define                   SECSTAT  0xe0       /* Secure Status */
-
-#endif /* _DEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/gpio.h b/arch/blackfin/mach-bf527/include/mach/gpio.h
index f80c299..fba606b 100644
--- a/arch/blackfin/mach-bf527/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf527/include/mach/gpio.h
@@ -62,4 +62,8 @@
 #define PORT_G GPIO_PG0
 #define PORT_H GPIO_PH0
 
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf527/include/mach/pll.h b/arch/blackfin/mach-bf527/include/mach/pll.h
index 24f1d7c..94cca67 100644
--- a/arch/blackfin/mach-bf527/include/mach/pll.h
+++ b/arch/blackfin/mach-bf527/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 2ce7b16..d4bfcea 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -286,7 +286,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 20c1022..87b5af3 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -25,7 +25,6 @@
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
 #include <asm/dpmc.h>
-#include <mach/fio_flag.h>
 
 /*
  * Name the Board for the /proc/cpuinfo
@@ -225,7 +224,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -290,9 +289,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -324,9 +323,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -476,10 +475,16 @@
 		return ret;
 
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-	/* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */
-	bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0);
-	bfin_write_FIO_FLAG_S(PF0);
-	SSYNC();
+	/*
+	 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
+	 * the bfin-async-map driver takes care of flipping between
+	 * flash and ethernet when necessary.
+	 */
+	ret = gpio_request(GPIO_PF0, "enet_cpld");
+	if (!ret) {
+		gpio_direction_output(GPIO_PF0, 1);
+		gpio_free(GPIO_PF0);
+	}
 #endif
 
 	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index adbe62a..4d5604e 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -271,7 +271,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -336,9 +336,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -370,9 +370,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index a1cb8e7..b67b91d 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -349,7 +349,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index 5ba4b02..f869a37 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -22,7 +22,6 @@
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
-#include <mach/fio_flag.h>
 
 /*
  * Name the Board for the /proc/cpuinfo
@@ -174,7 +173,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -295,15 +294,7 @@
 	printk(KERN_INFO "%s(): registering device resources\n", __func__);
 	platform_add_devices(ip0x_devices, ARRAY_SIZE(ip0x_devices));
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
-	for (i = 0; i < ARRAY_SIZE(bfin_spi_board_info); ++i) {
-		int j = 1 << bfin_spi_board_info[i].chip_select;
-		/* set spi cs to 1 */
-		bfin_write_FIO_DIR(bfin_read_FIO_DIR() | j);
-		bfin_write_FIO_FLAG_S(j);
-	}
 	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-#endif
 
 	return 0;
 }
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index b3b1cde..43224ef 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -24,7 +24,6 @@
 #include <asm/reboot.h>
 #include <asm/portmux.h>
 #include <asm/dpmc.h>
-#include <mach/fio_flag.h>
 
 /*
  * Name the Board for the /proc/cpuinfo
@@ -354,7 +353,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -419,9 +418,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +452,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -674,10 +673,16 @@
 		return ret;
 
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-	/* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */
-	bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0);
-	bfin_write_FIO_FLAG_S(PF0);
-	SSYNC();
+	/*
+	 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
+	 * the bfin-async-map driver takes care of flipping between
+	 * flash and ethernet when necessary.
+	 */
+	ret = gpio_request(GPIO_PF0, "enet_cpld");
+	if (!ret) {
+		gpio_direction_output(GPIO_PF0, 1);
+		gpio_free(GPIO_PF0);
+	}
 #endif
 
 	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
@@ -713,7 +718,6 @@
 void native_machine_restart(char *cmd)
 {
 	/* workaround pull up on cpld / flash pin not being strong enough */
-	bfin_write_FIO_INEN(~PF0);
-	bfin_write_FIO_DIR(PF0);
-	bfin_write_FIO_FLAG_C(PF0);
+	gpio_request(GPIO_PF0, "flash_cpld");
+	gpio_direction_output(GPIO_PF0, 0);
 }
diff --git a/arch/blackfin/mach-bf533/dma.c b/arch/blackfin/mach-bf533/dma.c
index 4a14a46..1f5988d 100644
--- a/arch/blackfin/mach-bf533/dma.c
+++ b/arch/blackfin/mach-bf533/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
new file mode 100644
index 0000000..08072c8
--- /dev/null
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	1
+
+#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
index 9e1f3de..45dcaa4 100644
--- a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #ifdef CONFIG_BFIN_UART0_CTSRTS
 # define CONFIG_SERIAL_BFIN_CTSRTS
 # ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#else
-# if ANOMALY_05000363
-	unsigned int anomaly_threshold;
-# endif
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list       cts_timer;
-	int			cts_pin;
-	int			rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -120,3 +48,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index f4bd6df..e366207 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -1,7 +1,7 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
@@ -10,26 +10,14 @@
 #define BF533_FAMILY
 
 #include "bf533.h"
-#include "defBF532.h"
 #include "anomaly.h"
 
-#if !defined(__ASSEMBLY__)
-#include "cdefBF532.h"
+#include <asm/def_LPBlackfin.h>
+#include "defBF532.h"
+
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# include "cdefBF532.h"
 #endif
 
-#define BFIN_UART_NR_PORTS      1
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
-#endif				/* _MACH_BLACKFIN_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
index 401e524..fd0cbe4 100644
--- a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF532_H
 #define _CDEF_BF532_H
 
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 /* Clock and System Control (0xFFC0 0400-0xFFC0 07FF) */
 #define bfin_read_PLL_CTL()                  bfin_read16(PLL_CTL)
 #define bfin_read_PLL_STAT()                 bfin_read16(PLL_STAT)
@@ -66,16 +63,10 @@
 #define bfin_write_RTC_PREN(val)             bfin_write16(RTC_PREN,val)
 
 /* DMA Traffic controls */
-#define bfin_read_DMA_TCPER()                bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)            bfin_write16(DMA_TCPER,val)
-#define bfin_read_DMA_TCCNT()                bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)            bfin_write16(DMA_TCCNT,val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TC_PER()               bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)           bfin_write16(DMA_TC_PER,val)
-#define bfin_read_DMA_TC_CNT()               bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)           bfin_write16(DMA_TC_CNT,val)
+#define bfin_read_DMAC_TC_PER()              bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)          bfin_write16(DMAC_TC_PER,val)
+#define bfin_read_DMAC_TC_CNT()              bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)          bfin_write16(DMAC_TC_CNT,val)
 
 /* General Purpose IO (0xFFC0 2400-0xFFC0 27FF) */
 #define bfin_read_FIO_DIR()                  bfin_read16(FIO_DIR)
@@ -105,6 +96,47 @@
 #define bfin_read_FIO_MASKB_T()              bfin_read16(FIO_MASKB_T)
 #define bfin_write_FIO_MASKB_T(val)          bfin_write16(FIO_MASKB_T,val)
 
+#if ANOMALY_05000311
+/* Keep at the CPP expansion to avoid circular header dependency loops */
+#define BFIN_WRITE_FIO_FLAG(name, val) \
+	do { \
+		unsigned long __flags; \
+		__flags = hard_local_irq_save(); \
+		bfin_write16(FIO_FLAG_##name, val); \
+		bfin_read_CHIPID(); \
+		hard_local_irq_restore(__flags); \
+	} while (0)
+#define bfin_write_FIO_FLAG_D(val)           BFIN_WRITE_FIO_FLAG(D, val)
+#define bfin_write_FIO_FLAG_C(val)           BFIN_WRITE_FIO_FLAG(C, val)
+#define bfin_write_FIO_FLAG_S(val)           BFIN_WRITE_FIO_FLAG(S, val)
+#define bfin_write_FIO_FLAG_T(val)           BFIN_WRITE_FIO_FLAG(T, val)
+
+#define BFIN_READ_FIO_FLAG(name) \
+	({ \
+		unsigned long __flags; \
+		u16 __ret; \
+		__flags = hard_local_irq_save(); \
+		__ret = bfin_read16(FIO_FLAG_##name); \
+		bfin_read_CHIPID(); \
+		hard_local_irq_restore(__flags); \
+		__ret; \
+	})
+#define bfin_read_FIO_FLAG_D()               BFIN_READ_FIO_FLAG(D)
+#define bfin_read_FIO_FLAG_C()               BFIN_READ_FIO_FLAG(C)
+#define bfin_read_FIO_FLAG_S()               BFIN_READ_FIO_FLAG(S)
+#define bfin_read_FIO_FLAG_T()               BFIN_READ_FIO_FLAG(T)
+
+#else
+#define bfin_write_FIO_FLAG_D(val)           bfin_write16(FIO_FLAG_D, val)
+#define bfin_write_FIO_FLAG_C(val)           bfin_write16(FIO_FLAG_C, val)
+#define bfin_write_FIO_FLAG_S(val)           bfin_write16(FIO_FLAG_S, val)
+#define bfin_write_FIO_FLAG_T(val)           bfin_write16(FIO_FLAG_T, val)
+#define bfin_read_FIO_FLAG_D()               bfin_read16(FIO_FLAG_D)
+#define bfin_read_FIO_FLAG_C()               bfin_read16(FIO_FLAG_C)
+#define bfin_read_FIO_FLAG_S()               bfin_read16(FIO_FLAG_S)
+#define bfin_read_FIO_FLAG_T()               bfin_read16(FIO_FLAG_T)
+#endif
+
 /* DMA Controller */
 #define bfin_read_DMA0_CONFIG()              bfin_read16(DMA0_CONFIG)
 #define bfin_write_DMA0_CONFIG(val)          bfin_write16(DMA0_CONFIG,val)
@@ -647,7 +679,4 @@
 #define bfin_read_PPI_FRAME()                bfin_read16(PPI_FRAME)
 #define bfin_write_PPI_FRAME(val)            bfin_write16(PPI_FRAME,val)
 
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif				/* _CDEF_BF532_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index 3adb0b4..2376d53 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -1,7 +1,7 @@
 /*
  * System & MMR bit and Address definitions for ADSP-BF532
  *
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -9,9 +9,6 @@
 #ifndef _DEF_BF532_H
 #define _DEF_BF532_H
 
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
 /*********************************************************************************** */
 /* System MMR Register Map */
 /*********************************************************************************** */
@@ -182,12 +179,8 @@
 #define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register */
 
 /* DMA Traffic controls */
-#define DMA_TC_PER 0xFFC00B0C	/* Traffic Control Periods Register */
-#define DMA_TC_CNT 0xFFC00B10	/* Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER 0xFFC00B0C	/* Traffic Control Periods Register */
-#define DMA_TCCNT 0xFFC00B10	/* Traffic Control Current Counts Register */
+#define DMAC_TC_PER 0xFFC00B0C	/* Traffic Control Periods Register */
+#define DMAC_TC_CNT 0xFFC00B10	/* Traffic Control Current Counts Register */
 
 /* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
 #define DMA0_CONFIG		0xFFC00C08	/* DMA Channel 0 Configuration Register */
@@ -432,83 +425,6 @@
 #define IWR_ENABLE(x)	       (1 << (x))	/* Wakeup Enable Peripheral #x */
 #define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x)))	/* Wakeup Disable Peripheral #x */
 
-/* ***************************** UART CONTROLLER MASKS ********************** */
-
-/* UART_LCR Register */
-
-#define DLAB	0x80
-#define SB      0x40
-#define STP      0x20
-#define EPS     0x10
-#define PEN	0x08
-#define STB	0x04
-#define WLS(x)	((x-5) & 0x03)
-
-#define DLAB_P	0x07
-#define SB_P	0x06
-#define STP_P	0x05
-#define EPS_P	0x04
-#define PEN_P	0x03
-#define STB_P	0x02
-#define WLS_P1	0x01
-#define WLS_P0	0x00
-
-/* UART_MCR Register */
-#define LOOP_ENA	0x10
-#define LOOP_ENA_P	0x04
-
-/* UART_LSR Register */
-#define TEMT	0x40
-#define THRE	0x20
-#define BI	0x10
-#define FE	0x08
-#define PE	0x04
-#define OE	0x02
-#define DR	0x01
-
-#define TEMP_P	0x06
-#define THRE_P	0x05
-#define BI_P	0x04
-#define FE_P	0x03
-#define PE_P	0x02
-#define OE_P	0x01
-#define DR_P	0x00
-
-/* UART_IER Register */
-#define ELSI	0x04
-#define ETBEI	0x02
-#define ERBFI	0x01
-
-#define ELSI_P	0x02
-#define ETBEI_P	0x01
-#define ERBFI_P	0x00
-
-/* UART_IIR Register */
-#define STATUS(x)	((x << 1) & 0x06)
-#define NINT		0x01
-#define STATUS_P1	0x02
-#define STATUS_P0	0x01
-#define NINT_P		0x00
-#define IIR_TX_READY    0x02	/* UART_THR empty                               */
-#define IIR_RX_READY    0x04	/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06	/* Receive line status                          */
-#define IIR_STATUS	0x06
-
-/* UART_GCTL Register */
-#define FFE	0x20
-#define FPE	0x10
-#define RPOLC	0x08
-#define TPOLC	0x04
-#define IREN	0x02
-#define UCEN	0x01
-
-#define FFE_P	0x05
-#define FPE_P	0x04
-#define RPOLC_P	0x03
-#define TPOLC_P	0x02
-#define IREN_P	0x01
-#define UCEN_P	0x00
-
 /*  *********  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS ****************   */
 
 /*  PPI_CONTROL Masks         */
@@ -643,44 +559,6 @@
 #define ERR_TYP_P0		0x0E
 #define ERR_TYP_P1		0x0F
 
-/*/ ******************   PROGRAMMABLE FLAG MASKS  ********************* */
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks */
-#define PF0         0x0001
-#define PF1         0x0002
-#define PF2         0x0004
-#define PF3         0x0008
-#define PF4         0x0010
-#define PF5         0x0020
-#define PF6         0x0040
-#define PF7         0x0080
-#define PF8         0x0100
-#define PF9         0x0200
-#define PF10        0x0400
-#define PF11        0x0800
-#define PF12        0x1000
-#define PF13        0x2000
-#define PF14        0x4000
-#define PF15        0x8000
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  BIT POSITIONS */
-#define PF0_P         0
-#define PF1_P         1
-#define PF2_P         2
-#define PF3_P         3
-#define PF4_P         4
-#define PF5_P         5
-#define PF6_P         6
-#define PF7_P         7
-#define PF8_P         8
-#define PF9_P         9
-#define PF10_P        10
-#define PF11_P        11
-#define PF12_P        12
-#define PF13_P        13
-#define PF14_P        14
-#define PF15_P        15
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  ************* */
 
 /* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf533/include/mach/fio_flag.h b/arch/blackfin/mach-bf533/include/mach/fio_flag.h
deleted file mode 100644
index d0bfba0..0000000
--- a/arch/blackfin/mach-bf533/include/mach/fio_flag.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_FIO_FLAG_H
-#define _MACH_FIO_FLAG_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-#if ANOMALY_05000311
-#define BFIN_WRITE_FIO_FLAG(name) \
-static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \
-{ \
-	unsigned long flags; \
-	flags = hard_local_irq_save(); \
-	bfin_write16(FIO_FLAG_##name, val); \
-	bfin_read_CHIPID(); \
-	hard_local_irq_restore(flags); \
-}
-BFIN_WRITE_FIO_FLAG(D)
-BFIN_WRITE_FIO_FLAG(C)
-BFIN_WRITE_FIO_FLAG(S)
-BFIN_WRITE_FIO_FLAG(T)
-
-#define BFIN_READ_FIO_FLAG(name) \
-static inline u16 bfin_read_FIO_FLAG_##name(void) \
-{ \
-	unsigned long flags; \
-	u16 ret; \
-	flags = hard_local_irq_save(); \
-	ret = bfin_read16(FIO_FLAG_##name); \
-	bfin_read_CHIPID(); \
-	hard_local_irq_restore(flags); \
-	return ret; \
-}
-BFIN_READ_FIO_FLAG(D)
-BFIN_READ_FIO_FLAG(C)
-BFIN_READ_FIO_FLAG(S)
-BFIN_READ_FIO_FLAG(T)
-
-#else
-#define bfin_write_FIO_FLAG_D(val)           bfin_write16(FIO_FLAG_D, val)
-#define bfin_write_FIO_FLAG_C(val)           bfin_write16(FIO_FLAG_C, val)
-#define bfin_write_FIO_FLAG_S(val)           bfin_write16(FIO_FLAG_S, val)
-#define bfin_write_FIO_FLAG_T(val)           bfin_write16(FIO_FLAG_T, val)
-#define bfin_read_FIO_FLAG_T()               bfin_read16(FIO_FLAG_T)
-#define bfin_read_FIO_FLAG_C()               bfin_read16(FIO_FLAG_C)
-#define bfin_read_FIO_FLAG_S()               bfin_read16(FIO_FLAG_S)
-#define bfin_read_FIO_FLAG_D()               bfin_read16(FIO_FLAG_D)
-#endif
-
-#endif /* _MACH_FIO_FLAG_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/gpio.h b/arch/blackfin/mach-bf533/include/mach/gpio.h
index e02416d..cce4f8f 100644
--- a/arch/blackfin/mach-bf533/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf533/include/mach/gpio.h
@@ -28,4 +28,6 @@
 
 #define PORT_F GPIO_PF0
 
+#include <mach-common/ports-f.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf533/include/mach/pll.h b/arch/blackfin/mach-bf533/include/mach/pll.h
index 169c106..94cca67 100644
--- a/arch/blackfin/mach-bf533/include/mach/pll.h
+++ b/arch/blackfin/mach-bf533/include/mach/pll.h
@@ -1,57 +1 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf537/boards/Kconfig b/arch/blackfin/mach-bf537/boards/Kconfig
index 44132fd..a44bf3a 100644
--- a/arch/blackfin/mach-bf537/boards/Kconfig
+++ b/arch/blackfin/mach-bf537/boards/Kconfig
@@ -39,4 +39,10 @@
 	help
 	  Board supply package for CSP Minotaur
 
+config DNP5370
+	bool "SSV Dil/NetPC DNP/5370"
+	depends on (BF537)
+	help
+	  Board supply package for DNP/5370 DIL64 module
+
 endchoice
diff --git a/arch/blackfin/mach-bf537/boards/Makefile b/arch/blackfin/mach-bf537/boards/Makefile
index 7e6aa4e..fe42258 100644
--- a/arch/blackfin/mach-bf537/boards/Makefile
+++ b/arch/blackfin/mach-bf537/boards/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM)  += tcm_bf537.o
 obj-$(CONFIG_PNAV10)                   += pnav10.o
 obj-$(CONFIG_CAMSIG_MINOTAUR)          += minotaur.o
+obj-$(CONFIG_DNP5370)                  += dnp5370.o
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 836698c..2c776e1 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -373,7 +373,7 @@
 #endif
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -434,7 +434,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -545,9 +545,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -579,9 +579,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 2a85670..0856611 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -356,7 +356,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -399,7 +399,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -510,9 +510,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -544,9 +544,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
new file mode 100644
index 0000000..e1e9ea0
--- /dev/null
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -0,0 +1,418 @@
+/*
+ * This is the configuration for SSV Dil/NetPC DNP/5370 board.
+ *
+ * DIL module:         http://www.dilnetpc.com/dnp0086.htm
+ * SK28 (starter kit): http://www.dilnetpc.com/dnp0088.htm
+ *
+ * Copyright 2010 3ality Digital Systems
+ * Copyright 2005 National ICT Australia (NICTA)
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/spi/mmc_spi.h>
+#include <linux/phy.h>
+#include <asm/dma.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/reboot.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "DNP/5370";
+#define FLASH_MAC               0x202f0000
+#define CONFIG_MTD_PHYSMAP_LEN  0x300000
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+	.name = "rtc-bfin",
+	.id   = -1,
+};
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+	{
+		.addr = 1,
+		.irq = PHY_POLL, /* IRQ_MAC_PHYINT */
+	},
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+	.phydev_number   = 1,
+	.phydev_data     = bfin_phydev_data,
+	.phy_mode        = PHY_INTERFACE_MODE_RMII,
+	.mac_peripherals = bfin_mac_peripherals,
+};
+
+static struct platform_device bfin_mii_bus = {
+	.name = "bfin_mii_bus",
+	.dev = {
+		.platform_data = &bfin_mii_bus_data,
+	}
+};
+
+static struct platform_device bfin_mac_device = {
+	.name = "bfin_mac",
+	.dev = {
+		.platform_data = &bfin_mii_bus,
+	}
+};
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct mtd_partition asmb_flash_partitions[] = {
+	{
+		.name       = "bootloader(nor)",
+		.size       = 0x30000,
+		.offset     = 0,
+	}, {
+		.name       = "linux kernel and rootfs(nor)",
+		.size       = 0x300000 - 0x30000 - 0x10000,
+		.offset     = MTDPART_OFS_APPEND,
+	}, {
+		.name       = "MAC address(nor)",
+		.size       = 0x10000,
+		.offset     = MTDPART_OFS_APPEND,
+		.mask_flags = MTD_WRITEABLE,
+	}
+};
+
+static struct physmap_flash_data asmb_flash_data = {
+	.width      = 1,
+	.parts      = asmb_flash_partitions,
+	.nr_parts   = ARRAY_SIZE(asmb_flash_partitions),
+};
+
+static struct resource asmb_flash_resource = {
+	.start = 0x20000000,
+	.end   = 0x202fffff,
+	.flags = IORESOURCE_MEM,
+};
+
+/* 4 MB NOR flash attached to async memory banks 0-2,
+ * therefore only 3 MB visible.
+ */
+static struct platform_device asmb_flash_device = {
+	.name	  = "physmap-flash",
+	.id	  = 0,
+	.dev = {
+		.platform_data = &asmb_flash_data,
+	},
+	.num_resources = 1,
+	.resource      = &asmb_flash_resource,
+};
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+
+#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
+
+static int bfin_mmc_spi_init(struct device *dev,
+	irqreturn_t (*detect_int)(int, void *), void *data)
+{
+	return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
+		IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
+}
+
+static void bfin_mmc_spi_exit(struct device *dev, void *data)
+{
+	free_irq(MMC_SPI_CARD_DETECT_INT, data);
+}
+
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+	.enable_dma    = 0,	 /* use no dma transfer with this chip*/
+	.bits_per_word = 8,
+};
+
+static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
+	.init = bfin_mmc_spi_init,
+	.exit = bfin_mmc_spi_exit,
+	.detect_delay = 100, /* msecs */
+};
+#endif
+
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+/* This mapping is for at45db642 it has 1056 page size,
+ * partition size and offset should be page aligned
+ */
+static struct mtd_partition bfin_spi_dataflash_partitions[] = {
+	{
+		.name   = "JFFS2 dataflash(nor)",
+#ifdef CONFIG_MTD_PAGESIZE_1024
+		.offset = 0x40000,
+		.size   = 0x7C0000,
+#else
+		.offset = 0x0,
+		.size   = 0x840000,
+#endif
+	}
+};
+
+static struct flash_platform_data bfin_spi_dataflash_data = {
+	.name     = "mtd_dataflash",
+	.parts    = bfin_spi_dataflash_partitions,
+	.nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
+	.type     = "mtd_dataflash",
+};
+
+static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
+	.enable_dma    = 0,	 /* use no dma transfer with this chip*/
+	.bits_per_word = 8,
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+/* SD/MMC card reader at SPI bus */
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+	{
+		.modalias	 = "mmc_spi",
+		.max_speed_hz    = 20000000,
+		.bus_num	 = 0,
+		.chip_select     = 1,
+		.platform_data   = &bfin_mmc_spi_pdata,
+		.controller_data = &mmc_spi_chip_info,
+		.mode	         = SPI_MODE_3,
+	},
+#endif
+
+/* 8 Megabyte Atmel NOR flash chip at SPI bus */
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+	{
+	.modalias        = "mtd_dataflash",
+	.max_speed_hz    = 16700000,
+	.bus_num         = 0,
+	.chip_select     = 2,
+	.platform_data   = &bfin_spi_dataflash_data,
+	.controller_data = &spi_dataflash_chip_info,
+	.mode            = SPI_MODE_3, /* SPI_CPHA and SPI_CPOL */
+	},
+#endif
+};
+
+/* SPI controller data */
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+	[0] = {
+		.start = SPI0_REGBASE,
+		.end   = SPI0_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = CH_SPI,
+		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct bfin5xx_spi_master spi_bfin_master_info = {
+	.num_chipselect = 8,
+	.enable_dma     = 1,  /* master has the ability to do dma transfer */
+	.pin_req        = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device spi_bfin_master_device = {
+	.name          = "bfin-spi",
+	.id            = 0, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi0_resource),
+	.resource      = bfin_spi0_resource,
+	.dev           = {
+		.platform_data = &spi_bfin_master_info, /* Passed to driver */
+	},
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+static struct resource bfin_uart0_resources[] = {
+	{
+		.start = UART0_THR,
+		.end = UART0_GCTL+2,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART0_ERROR,
+		.end = IRQ_UART0_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_TX,
+		.end = CH_UART0_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static unsigned short bfin_uart0_peripherals[] = {
+	P_UART0_TX, P_UART0_RX, 0
+};
+
+static struct platform_device bfin_uart0_device = {
+	.name = "bfin-uart",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_uart0_resources),
+	.resource = bfin_uart0_resources,
+	.dev = {
+		.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
+	},
+};
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_UART1
+static struct resource bfin_uart1_resources[] = {
+	{
+		.start = UART1_THR,
+		.end   = UART1_GCTL+2,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART1_RX,
+		.end   = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART1_ERROR,
+		.end   = IRQ_UART1_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_TX,
+		.end   = CH_UART1_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end   = CH_UART1_RX,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static unsigned short bfin_uart1_peripherals[] = {
+	P_UART1_TX, P_UART1_RX, 0
+};
+
+static struct platform_device bfin_uart1_device = {
+	.name          = "bfin-uart",
+	.id            = 1,
+	.num_resources = ARRAY_SIZE(bfin_uart1_resources),
+	.resource      = bfin_uart1_resources,
+	.dev = {
+		.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
+	},
+};
+#endif
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static struct resource bfin_twi0_resource[] = {
+	[0] = {
+		.start = TWI0_REGBASE,
+		.end   = TWI0_REGBASE + 0xff,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_TWI,
+		.end   = IRQ_TWI,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c_bfin_twi_device = {
+	.name          = "i2c-bfin-twi",
+	.id            = 0,
+	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
+	.resource      = bfin_twi0_resource,
+};
+#endif
+
+static struct platform_device *dnp5370_devices[] __initdata = {
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	&bfin_uart0_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	&bfin_uart1_device,
+#endif
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+	&asmb_flash_device,
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+	&bfin_mii_bus,
+	&bfin_mac_device,
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+	&spi_bfin_master_device,
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+	&i2c_bfin_twi_device,
+#endif
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+	&rtc_device,
+#endif
+
+};
+
+static int __init dnp5370_init(void)
+{
+	printk(KERN_INFO "DNP/5370: registering device resources\n");
+	platform_add_devices(dnp5370_devices, ARRAY_SIZE(dnp5370_devices));
+	printk(KERN_INFO "DNP/5370: registering %zu SPI slave devices\n",
+	       ARRAY_SIZE(bfin_spi_board_info));
+	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+	printk(KERN_INFO "DNP/5370: MAC %pM\n", (void *)FLASH_MAC);
+	return 0;
+}
+arch_initcall(dnp5370_init);
+
+/*
+ * Currently the MAC address is saved in Flash by U-Boot
+ */
+void bfin_get_ether_addr(char *addr)
+{
+	*(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
+	*(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
+}
+EXPORT_SYMBOL(bfin_get_ether_addr);
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 4980051..bfb3671 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -263,7 +263,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -306,7 +306,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -419,9 +419,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +453,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index b958078..9389f03 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -367,7 +367,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -410,7 +410,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 3aa344c..2c69785 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -289,7 +289,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
@@ -693,7 +693,7 @@
 #endif
 
 #if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
-unsigned short ad2s120x_platform_data[] = {
+static unsigned short ad2s120x_platform_data[] = {
 	/* used as SAMPLE and RDVEL */
 	GPIO_PF5, GPIO_PF6, 0
 };
@@ -705,7 +705,7 @@
 #endif
 
 #if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
-unsigned short ad2s1210_platform_data[] = {
+static unsigned short ad2s1210_platform_data[] = {
 	/* use as SAMPLE, A0, A1 */
 	GPIO_PF7, GPIO_PF8, GPIO_PF9,
 # if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
@@ -1717,7 +1717,7 @@
 #endif
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -1760,7 +1760,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -2447,9 +2447,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -2481,9 +2481,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 31498ad..0761b20 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -356,7 +356,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -399,7 +399,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -512,9 +512,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -546,9 +546,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/dma.c b/arch/blackfin/mach-bf537/dma.c
index 5c8c4ed..5c62e99 100644
--- a/arch/blackfin/mach-bf537/dma.c
+++ b/arch/blackfin/mach-bf537/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
new file mode 100644
index 0000000..00c603f
--- /dev/null
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	2
+
+#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
index 635c91c..3e955db 100644
--- a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,49 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	int		cts_pin;
-	int 		rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -145,3 +75,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index a12d4b6..baa096f 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -1,7 +1,7 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
@@ -10,34 +10,24 @@
 #define BF537_FAMILY
 
 #include "bf537.h"
-#include "defBF534.h"
 #include "anomaly.h"
 
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF534
+# include "defBF534.h"
+#endif
 #if defined(CONFIG_BF537) || defined(CONFIG_BF536)
-#include "defBF537.h"
+# include "defBF537.h"
 #endif
 
 #if !defined(__ASSEMBLY__)
-#include "cdefBF534.h"
-
-#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
-#include "cdefBF537.h"
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF534
+#  include "cdefBF534.h"
+# endif
+# if defined(CONFIG_BF537) || defined(CONFIG_BF536)
+#  include "cdefBF537.h"
+# endif
 #endif
-#endif
-
-#define BFIN_UART_NR_PORTS	2
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
 
 #endif
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
index fbeb35e..563ede9 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF534_H
 #define _CDEF_BF534_H
 
-#include <asm/blackfin.h>
-
-/* Include all Core registers and bit definitions 									*/
-#include "defBF534.h"
-
-/* Include core specific register pointer definitions 								*/
-#include <asm/cdef_LPBlackfin.h>
-
 /* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
 #define bfin_read_PLL_CTL()                  bfin_read16(PLL_CTL)
 #define bfin_read_PLL_DIV()                  bfin_read16(PLL_DIV)
@@ -355,16 +347,10 @@
 #define bfin_write_EBIU_SDSTAT(val)          bfin_write16(EBIU_SDSTAT,val)
 
 /* DMA Traffic Control Registers													*/
-#define bfin_read_DMA_TC_PER()                bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)            bfin_write16(DMA_TC_PER,val)
-#define bfin_read_DMA_TC_CNT()                bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)            bfin_write16(DMA_TC_CNT,val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER()                bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)            bfin_write16(DMA_TCPER,val)
-#define bfin_read_DMA_TCCNT()                bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)            bfin_write16(DMA_TCCNT,val)
+#define bfin_read_DMAC_TC_PER()              bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)          bfin_write16(DMAC_TC_PER,val)
+#define bfin_read_DMAC_TC_CNT()              bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)          bfin_write16(DMAC_TC_CNT,val)
 
 /* DMA Controller																	*/
 #define bfin_read_DMA0_CONFIG()              bfin_read16(DMA0_CONFIG)
@@ -1747,7 +1733,4 @@
 #define bfin_read_HMDMA1_BCOUNT()            bfin_read16(HMDMA1_BCOUNT)
 #define bfin_write_HMDMA1_BCOUNT(val)        bfin_write16(HMDMA1_BCOUNT,val)
 
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif				/* _CDEF_BF534_H */
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
index 9363c39..19ec21e 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later
  */
@@ -10,9 +10,6 @@
 /* Include MMRs Common to BF534 								*/
 #include "cdefBF534.h"
 
-/* Include all Core registers and bit definitions 									*/
-#include "defBF537.h"
-
 /* Include Macro "Defines" For EMAC (Unique to BF536/BF537		*/
 /* 10/100 Ethernet Controller	(0xFFC03000 - 0xFFC031FF) 						*/
 #define bfin_read_EMAC_OPMODE()              bfin_read32(EMAC_OPMODE)
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 0323e6b..725bb35 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _DEF_BF534_H
 #define _DEF_BF534_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
 /************************************************************************************
 ** System MMR Register Map
 *************************************************************************************/
@@ -193,12 +190,8 @@
 #define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register                                                */
 
 /* DMA Traffic Control Registers													*/
-#define DMA_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TCCNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+#define DMAC_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMAC_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
 
 /* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
 #define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register               */
@@ -1029,48 +1022,6 @@
 #define IWR_ENABLE(x)	(1 << ((x)&0x1F))	/* Wakeup Enable Peripheral #x          */
 #define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Wakeup Disable Peripheral #x         */
 
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks												*/
-#define WLS(x)		(((x)-5) & 0x03)	/* Word Length Select   */
-#define STB			0x04	/* Stop Bits                    */
-#define PEN			0x08	/* Parity Enable                */
-#define EPS			0x10	/* Even Parity Select   */
-#define STP			0x20	/* Stick Parity                 */
-#define SB			0x40	/* Set Break                    */
-#define DLAB		0x80	/* Divisor Latch Access */
-
-/* UARTx_MCR Mask										*/
-#define LOOP_ENA		0x10	/* Loopback Mode Enable         */
-#define LOOP_ENA_P	0x04
-/* UARTx_LSR Masks										*/
-#define DR			0x01	/* Data Ready                           */
-#define OE			0x02	/* Overrun Error                        */
-#define PE			0x04	/* Parity Error                         */
-#define FE			0x08	/* Framing Error                        */
-#define BI			0x10	/* Break Interrupt                      */
-#define THRE		0x20	/* THR Empty                            */
-#define TEMT		0x40	/* TSR and UART_THR Empty       */
-
-/* UARTx_IER Masks															*/
-#define ERBFI		0x01	/* Enable Receive Buffer Full Interrupt         */
-#define ETBEI		0x02	/* Enable Transmit Buffer Empty Interrupt       */
-#define ELSI		0x04	/* Enable RX Status Interrupt                           */
-
-/* UARTx_IIR Masks														*/
-#define NINT		0x01	/* Pending Interrupt                                    */
-#define IIR_TX_READY    0x02	/* UART_THR empty                               */
-#define IIR_RX_READY    0x04	/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06	/* Receive line status                          */
-#define IIR_STATUS	0x06
-
-/* UARTx_GCTL Masks													*/
-#define UCEN		0x01	/* Enable UARTx Clocks                          */
-#define IREN		0x02	/* Enable IrDA Mode                                     */
-#define TPOLC		0x04	/* IrDA TX Polarity Change                      */
-#define RPOLC		0x08	/* IrDA RX Polarity Change                      */
-#define FPE			0x10	/* Force Parity Error On Transmit       */
-#define FFE			0x20	/* Force Framing Error On Transmit      */
-
 /*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
 /* TIMER_ENABLE Masks													*/
 #define TIMEN0			0x0001	/* Enable Timer 0                                       */
@@ -1141,62 +1092,6 @@
 #define EMU_RUN			0x0200	/* Emulation Behavior Select                    */
 #define ERR_TYP			0xC000	/* Error Type                                                   */
 
-/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks 				*/
-/* Port F Masks 														*/
-#define PF0		0x0001
-#define PF1		0x0002
-#define PF2		0x0004
-#define PF3		0x0008
-#define PF4		0x0010
-#define PF5		0x0020
-#define PF6		0x0040
-#define PF7		0x0080
-#define PF8		0x0100
-#define PF9		0x0200
-#define PF10	0x0400
-#define PF11	0x0800
-#define PF12	0x1000
-#define PF13	0x2000
-#define PF14	0x4000
-#define PF15	0x8000
-
-/* Port G Masks															*/
-#define PG0		0x0001
-#define PG1		0x0002
-#define PG2		0x0004
-#define PG3		0x0008
-#define PG4		0x0010
-#define PG5		0x0020
-#define PG6		0x0040
-#define PG7		0x0080
-#define PG8		0x0100
-#define PG9		0x0200
-#define PG10	0x0400
-#define PG11	0x0800
-#define PG12	0x1000
-#define PG13	0x2000
-#define PG14	0x4000
-#define PG15	0x8000
-
-/* Port H Masks															*/
-#define PH0		0x0001
-#define PH1		0x0002
-#define PH2		0x0004
-#define PH3		0x0008
-#define PH4		0x0010
-#define PH5		0x0020
-#define PH6		0x0040
-#define PH7		0x0080
-#define PH8		0x0100
-#define PH9		0x0200
-#define PH10	0x0400
-#define PH11	0x0800
-#define PH12	0x1000
-#define PH13	0x2000
-#define PH14	0x4000
-#define PH15	0x8000
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
 /* EBIU_AMGCTL Masks																	*/
 #define AMCKEN			0x0001	/* Enable CLKOUT                                                                        */
@@ -1523,7 +1418,7 @@
 #define	SADD_LEN	0x0002	/* Slave Address Length                                                 */
 #define	STDVAL		0x0004	/* Slave Transmit Data Valid                                    */
 #define	NAK			0x0008	/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010	/* General Call Adrress Matching Enabled                */
+#define	GEN			0x0010	/* General Call Address Matching Enabled                */
 
 /* TWI_SLAVE_STAT Masks															*/
 #define	SDIR		0x0001	/* Slave Transfer Direction (Transmit/Receive*) */
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF537.h b/arch/blackfin/mach-bf537/include/mach/defBF537.h
index 8cb5d5c..3d471d7 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF537.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _DEF_BF537_H
 #define _DEF_BF537_H
 
-/* Include all Core registers and bit definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 /* Include all MMR and bit defines common to BF534 */
 #include "defBF534.h"
 
diff --git a/arch/blackfin/mach-bf537/include/mach/gpio.h b/arch/blackfin/mach-bf537/include/mach/gpio.h
index f80c299..fba606b 100644
--- a/arch/blackfin/mach-bf537/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf537/include/mach/gpio.h
@@ -62,4 +62,8 @@
 #define PORT_G GPIO_PG0
 #define PORT_H GPIO_PH0
 
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf537/include/mach/pll.h b/arch/blackfin/mach-bf537/include/mach/pll.h
index 169c106..94cca67 100644
--- a/arch/blackfin/mach-bf537/include/mach/pll.h
+++ b/arch/blackfin/mach-bf537/include/mach/pll.h
@@ -1,57 +1 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index c6fb0a5..e61424e 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -82,7 +82,7 @@
 #endif
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -125,7 +125,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -168,7 +168,7 @@
 	},
 };
 
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
 	P_UART2_TX, P_UART2_RX, 0
 };
 
@@ -282,9 +282,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -316,9 +316,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -350,7 +350,7 @@
 	},
 };
 
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
 	P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
 	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
 };
@@ -384,7 +384,7 @@
 	},
 };
 
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
 	P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
 	P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
 };
@@ -402,7 +402,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
diff --git a/arch/blackfin/mach-bf538/dma.c b/arch/blackfin/mach-bf538/dma.c
index 5dc0225..cce8ef5 100644
--- a/arch/blackfin/mach-bf538/dma.c
+++ b/arch/blackfin/mach-bf538/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
@@ -32,14 +32,14 @@
 	(struct dma_register *) DMA17_NEXT_DESC_PTR,
 	(struct dma_register *) DMA18_NEXT_DESC_PTR,
 	(struct dma_register *) DMA19_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_S1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
 };
 EXPORT_SYMBOL(dma_io_base_addr);
 
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
new file mode 100644
index 0000000..c66e276
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	3
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
index 5c14814..beb502e 100644
--- a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,50 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-	struct uart_port	port;
-	unsigned int		old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list	cts_timer;
-	int		cts_pin;
-	int		rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -160,3 +89,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 08b5eab..791d084 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -10,31 +10,24 @@
 #define BF538_FAMILY
 
 #include "bf538.h"
-#include "defBF539.h"
 #include "anomaly.h"
 
-
-#if !defined(__ASSEMBLY__)
-#include "cdefBF538.h"
-
-#if defined(CONFIG_BF539)
-#include "cdefBF539.h"
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF538
+# include "defBF538.h"
 #endif
+#ifdef CONFIG_BF539
+# include "defBF539.h"
 #endif
 
-#define BFIN_UART_NR_PORTS	3
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF538
+#  include "cdefBF538.h"
+# endif
+# ifdef CONFIG_BF539
+#  include "cdefBF539.h"
+# endif
+#endif
 
 #endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
index 085b06b..f6a5679 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF538_H
 #define _CDEF_BF538_H
 
-#include <asm/blackfin.h>
-
-/*include all Core registers and bit definitions*/
-#include "defBF539.h"
-
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 #define bfin_writePTR(addr, val) bfin_write32(addr, val)
 
 #define bfin_read_PLL_CTL()            bfin_read16(PLL_CTL)
@@ -487,10 +479,10 @@
 #define bfin_write_EBIU_SDRRC(val)     bfin_write16(EBIU_SDRRC, val)
 #define bfin_read_EBIU_SDSTAT()        bfin_read16(EBIU_SDSTAT)
 #define bfin_write_EBIU_SDSTAT(val)    bfin_write16(EBIU_SDSTAT, val)
-#define bfin_read_DMA0_TC_PER()        bfin_read16(DMA0_TC_PER)
-#define bfin_write_DMA0_TC_PER(val)    bfin_write16(DMA0_TC_PER, val)
-#define bfin_read_DMA0_TC_CNT()        bfin_read16(DMA0_TC_CNT)
-#define bfin_write_DMA0_TC_CNT(val)    bfin_write16(DMA0_TC_CNT, val)
+#define bfin_read_DMAC0_TC_PER()       bfin_read16(DMAC0_TC_PER)
+#define bfin_write_DMAC0_TC_PER(val)   bfin_write16(DMAC0_TC_PER, val)
+#define bfin_read_DMAC0_TC_CNT()       bfin_read16(DMAC0_TC_CNT)
+#define bfin_write_DMAC0_TC_CNT(val)   bfin_write16(DMAC0_TC_CNT, val)
 #define bfin_read_DMA0_NEXT_DESC_PTR() bfin_readPTR(DMA0_NEXT_DESC_PTR)
 #define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_writePTR(DMA0_NEXT_DESC_PTR, val)
 #define bfin_read_DMA0_START_ADDR()    bfin_readPTR(DMA0_START_ADDR)
@@ -699,10 +691,10 @@
 #define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
 #define bfin_read_DMA7_CURR_Y_COUNT()  bfin_read16(DMA7_CURR_Y_COUNT)
 #define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_TC_PER()        bfin_read16(DMA1_TC_PER)
-#define bfin_write_DMA1_TC_PER(val)    bfin_write16(DMA1_TC_PER, val)
-#define bfin_read_DMA1_TC_CNT()        bfin_read16(DMA1_TC_CNT)
-#define bfin_write_DMA1_TC_CNT(val)    bfin_write16(DMA1_TC_CNT, val)
+#define bfin_read_DMAC1_TC_PER()       bfin_read16(DMAC1_TC_PER)
+#define bfin_write_DMAC1_TC_PER(val)   bfin_write16(DMAC1_TC_PER, val)
+#define bfin_read_DMAC1_TC_CNT()       bfin_read16(DMAC1_TC_CNT)
+#define bfin_write_DMAC1_TC_CNT(val)   bfin_write16(DMAC1_TC_CNT, val)
 #define bfin_read_DMA8_NEXT_DESC_PTR() bfin_readPTR(DMA8_NEXT_DESC_PTR)
 #define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_writePTR(DMA8_NEXT_DESC_PTR, val)
 #define bfin_read_DMA8_START_ADDR()    bfin_readPTR(DMA8_START_ADDR)
@@ -1015,273 +1007,214 @@
 #define bfin_write_DMA19_CURR_X_COUNT(val) bfin_write16(DMA19_CURR_X_COUNT, val)
 #define bfin_read_DMA19_CURR_Y_COUNT() bfin_read16(DMA19_CURR_Y_COUNT)
 #define bfin_write_DMA19_CURR_Y_COUNT(val) bfin_write16(DMA19_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_D0_START_ADDR() bfin_readPTR(MDMA0_D0_START_ADDR)
-#define bfin_write_MDMA0_D0_START_ADDR(val) bfin_writePTR(MDMA0_D0_START_ADDR, val)
-#define bfin_read_MDMA0_D0_CONFIG()    bfin_read16(MDMA0_D0_CONFIG)
-#define bfin_write_MDMA0_D0_CONFIG(val) bfin_write16(MDMA0_D0_CONFIG, val)
-#define bfin_read_MDMA0_D0_X_COUNT()   bfin_read16(MDMA0_D0_X_COUNT)
-#define bfin_write_MDMA0_D0_X_COUNT(val) bfin_write16(MDMA0_D0_X_COUNT, val)
-#define bfin_read_MDMA0_D0_X_MODIFY()  bfin_read16(MDMA0_D0_X_MODIFY)
-#define bfin_write_MDMA0_D0_X_MODIFY(val) bfin_write16(MDMA0_D0_X_MODIFY, val)
-#define bfin_read_MDMA0_D0_Y_COUNT()   bfin_read16(MDMA0_D0_Y_COUNT)
-#define bfin_write_MDMA0_D0_Y_COUNT(val) bfin_write16(MDMA0_D0_Y_COUNT, val)
-#define bfin_read_MDMA0_D0_Y_MODIFY()  bfin_read16(MDMA0_D0_Y_MODIFY)
-#define bfin_write_MDMA0_D0_Y_MODIFY(val) bfin_write16(MDMA0_D0_Y_MODIFY, val)
-#define bfin_read_MDMA0_D0_CURR_DESC_PTR() bfin_readPTR(MDMA0_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA0_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_D0_CURR_ADDR() bfin_readPTR(MDMA0_D0_CURR_ADDR)
-#define bfin_write_MDMA0_D0_CURR_ADDR(val) bfin_writePTR(MDMA0_D0_CURR_ADDR, val)
-#define bfin_read_MDMA0_D0_IRQ_STATUS() bfin_read16(MDMA0_D0_IRQ_STATUS)
-#define bfin_write_MDMA0_D0_IRQ_STATUS(val) bfin_write16(MDMA0_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA0_D0_PERIPHERAL_MAP() bfin_read16(MDMA0_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_D0_CURR_X_COUNT() bfin_read16(MDMA0_D0_CURR_X_COUNT)
-#define bfin_write_MDMA0_D0_CURR_X_COUNT(val) bfin_write16(MDMA0_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_D0_CURR_Y_COUNT() bfin_read16(MDMA0_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA0_D0_CURR_Y_COUNT(val) bfin_write16(MDMA0_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_S0_START_ADDR() bfin_readPTR(MDMA0_S0_START_ADDR)
-#define bfin_write_MDMA0_S0_START_ADDR(val) bfin_writePTR(MDMA0_S0_START_ADDR, val)
-#define bfin_read_MDMA0_S0_CONFIG()    bfin_read16(MDMA0_S0_CONFIG)
-#define bfin_write_MDMA0_S0_CONFIG(val) bfin_write16(MDMA0_S0_CONFIG, val)
-#define bfin_read_MDMA0_S0_X_COUNT()   bfin_read16(MDMA0_S0_X_COUNT)
-#define bfin_write_MDMA0_S0_X_COUNT(val) bfin_write16(MDMA0_S0_X_COUNT, val)
-#define bfin_read_MDMA0_S0_X_MODIFY()  bfin_read16(MDMA0_S0_X_MODIFY)
-#define bfin_write_MDMA0_S0_X_MODIFY(val) bfin_write16(MDMA0_S0_X_MODIFY, val)
-#define bfin_read_MDMA0_S0_Y_COUNT()   bfin_read16(MDMA0_S0_Y_COUNT)
-#define bfin_write_MDMA0_S0_Y_COUNT(val) bfin_write16(MDMA0_S0_Y_COUNT, val)
-#define bfin_read_MDMA0_S0_Y_MODIFY()  bfin_read16(MDMA0_S0_Y_MODIFY)
-#define bfin_write_MDMA0_S0_Y_MODIFY(val) bfin_write16(MDMA0_S0_Y_MODIFY, val)
-#define bfin_read_MDMA0_S0_CURR_DESC_PTR() bfin_readPTR(MDMA0_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA0_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_S0_CURR_ADDR() bfin_readPTR(MDMA0_S0_CURR_ADDR)
-#define bfin_write_MDMA0_S0_CURR_ADDR(val) bfin_writePTR(MDMA0_S0_CURR_ADDR, val)
-#define bfin_read_MDMA0_S0_IRQ_STATUS() bfin_read16(MDMA0_S0_IRQ_STATUS)
-#define bfin_write_MDMA0_S0_IRQ_STATUS(val) bfin_write16(MDMA0_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA0_S0_PERIPHERAL_MAP() bfin_read16(MDMA0_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_S0_CURR_X_COUNT() bfin_read16(MDMA0_S0_CURR_X_COUNT)
-#define bfin_write_MDMA0_S0_CURR_X_COUNT(val) bfin_write16(MDMA0_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_S0_CURR_Y_COUNT() bfin_read16(MDMA0_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA0_S0_CURR_Y_COUNT(val) bfin_write16(MDMA0_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_D1_START_ADDR() bfin_readPTR(MDMA0_D1_START_ADDR)
-#define bfin_write_MDMA0_D1_START_ADDR(val) bfin_writePTR(MDMA0_D1_START_ADDR, val)
-#define bfin_read_MDMA0_D1_CONFIG()    bfin_read16(MDMA0_D1_CONFIG)
-#define bfin_write_MDMA0_D1_CONFIG(val) bfin_write16(MDMA0_D1_CONFIG, val)
-#define bfin_read_MDMA0_D1_X_COUNT()   bfin_read16(MDMA0_D1_X_COUNT)
-#define bfin_write_MDMA0_D1_X_COUNT(val) bfin_write16(MDMA0_D1_X_COUNT, val)
-#define bfin_read_MDMA0_D1_X_MODIFY()  bfin_read16(MDMA0_D1_X_MODIFY)
-#define bfin_write_MDMA0_D1_X_MODIFY(val) bfin_write16(MDMA0_D1_X_MODIFY, val)
-#define bfin_read_MDMA0_D1_Y_COUNT()   bfin_read16(MDMA0_D1_Y_COUNT)
-#define bfin_write_MDMA0_D1_Y_COUNT(val) bfin_write16(MDMA0_D1_Y_COUNT, val)
-#define bfin_read_MDMA0_D1_Y_MODIFY()  bfin_read16(MDMA0_D1_Y_MODIFY)
-#define bfin_write_MDMA0_D1_Y_MODIFY(val) bfin_write16(MDMA0_D1_Y_MODIFY, val)
-#define bfin_read_MDMA0_D1_CURR_DESC_PTR() bfin_readPTR(MDMA0_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA0_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_D1_CURR_ADDR() bfin_readPTR(MDMA0_D1_CURR_ADDR)
-#define bfin_write_MDMA0_D1_CURR_ADDR(val) bfin_writePTR(MDMA0_D1_CURR_ADDR, val)
-#define bfin_read_MDMA0_D1_IRQ_STATUS() bfin_read16(MDMA0_D1_IRQ_STATUS)
-#define bfin_write_MDMA0_D1_IRQ_STATUS(val) bfin_write16(MDMA0_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA0_D1_PERIPHERAL_MAP() bfin_read16(MDMA0_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_D1_CURR_X_COUNT() bfin_read16(MDMA0_D1_CURR_X_COUNT)
-#define bfin_write_MDMA0_D1_CURR_X_COUNT(val) bfin_write16(MDMA0_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_D1_CURR_Y_COUNT() bfin_read16(MDMA0_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA0_D1_CURR_Y_COUNT(val) bfin_write16(MDMA0_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_S1_START_ADDR() bfin_readPTR(MDMA0_S1_START_ADDR)
-#define bfin_write_MDMA0_S1_START_ADDR(val) bfin_writePTR(MDMA0_S1_START_ADDR, val)
-#define bfin_read_MDMA0_S1_CONFIG()    bfin_read16(MDMA0_S1_CONFIG)
-#define bfin_write_MDMA0_S1_CONFIG(val) bfin_write16(MDMA0_S1_CONFIG, val)
-#define bfin_read_MDMA0_S1_X_COUNT()   bfin_read16(MDMA0_S1_X_COUNT)
-#define bfin_write_MDMA0_S1_X_COUNT(val) bfin_write16(MDMA0_S1_X_COUNT, val)
-#define bfin_read_MDMA0_S1_X_MODIFY()  bfin_read16(MDMA0_S1_X_MODIFY)
-#define bfin_write_MDMA0_S1_X_MODIFY(val) bfin_write16(MDMA0_S1_X_MODIFY, val)
-#define bfin_read_MDMA0_S1_Y_COUNT()   bfin_read16(MDMA0_S1_Y_COUNT)
-#define bfin_write_MDMA0_S1_Y_COUNT(val) bfin_write16(MDMA0_S1_Y_COUNT, val)
-#define bfin_read_MDMA0_S1_Y_MODIFY()  bfin_read16(MDMA0_S1_Y_MODIFY)
-#define bfin_write_MDMA0_S1_Y_MODIFY(val) bfin_write16(MDMA0_S1_Y_MODIFY, val)
-#define bfin_read_MDMA0_S1_CURR_DESC_PTR() bfin_readPTR(MDMA0_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA0_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_S1_CURR_ADDR() bfin_readPTR(MDMA0_S1_CURR_ADDR)
-#define bfin_write_MDMA0_S1_CURR_ADDR(val) bfin_writePTR(MDMA0_S1_CURR_ADDR, val)
-#define bfin_read_MDMA0_S1_IRQ_STATUS() bfin_read16(MDMA0_S1_IRQ_STATUS)
-#define bfin_write_MDMA0_S1_IRQ_STATUS(val) bfin_write16(MDMA0_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA0_S1_PERIPHERAL_MAP() bfin_read16(MDMA0_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_S1_CURR_X_COUNT() bfin_read16(MDMA0_S1_CURR_X_COUNT)
-#define bfin_write_MDMA0_S1_CURR_X_COUNT(val) bfin_write16(MDMA0_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_S1_CURR_Y_COUNT() bfin_read16(MDMA0_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA0_S1_CURR_Y_COUNT(val) bfin_write16(MDMA0_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_D0_START_ADDR() bfin_readPTR(MDMA1_D0_START_ADDR)
-#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_writePTR(MDMA1_D0_START_ADDR, val)
-#define bfin_read_MDMA1_D0_CONFIG()    bfin_read16(MDMA1_D0_CONFIG)
-#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG, val)
-#define bfin_read_MDMA1_D0_X_COUNT()   bfin_read16(MDMA1_D0_X_COUNT)
-#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT, val)
-#define bfin_read_MDMA1_D0_X_MODIFY()  bfin_read16(MDMA1_D0_X_MODIFY)
-#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY, val)
-#define bfin_read_MDMA1_D0_Y_COUNT()   bfin_read16(MDMA1_D0_Y_COUNT)
-#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT, val)
-#define bfin_read_MDMA1_D0_Y_MODIFY()  bfin_read16(MDMA1_D0_Y_MODIFY)
-#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY, val)
-#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_readPTR(MDMA1_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_readPTR(MDMA1_D0_CURR_ADDR)
-#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_writePTR(MDMA1_D0_CURR_ADDR, val)
-#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS)
-#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT)
-#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_S0_START_ADDR() bfin_readPTR(MDMA1_S0_START_ADDR)
-#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_writePTR(MDMA1_S0_START_ADDR, val)
-#define bfin_read_MDMA1_S0_CONFIG()    bfin_read16(MDMA1_S0_CONFIG)
-#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG, val)
-#define bfin_read_MDMA1_S0_X_COUNT()   bfin_read16(MDMA1_S0_X_COUNT)
-#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT, val)
-#define bfin_read_MDMA1_S0_X_MODIFY()  bfin_read16(MDMA1_S0_X_MODIFY)
-#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY, val)
-#define bfin_read_MDMA1_S0_Y_COUNT()   bfin_read16(MDMA1_S0_Y_COUNT)
-#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT, val)
-#define bfin_read_MDMA1_S0_Y_MODIFY()  bfin_read16(MDMA1_S0_Y_MODIFY)
-#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY, val)
-#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_readPTR(MDMA1_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_readPTR(MDMA1_S0_CURR_ADDR)
-#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_writePTR(MDMA1_S0_CURR_ADDR, val)
-#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS)
-#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT)
-#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_D1_START_ADDR() bfin_readPTR(MDMA1_D1_START_ADDR)
-#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_writePTR(MDMA1_D1_START_ADDR, val)
-#define bfin_read_MDMA1_D1_CONFIG()    bfin_read16(MDMA1_D1_CONFIG)
-#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG, val)
-#define bfin_read_MDMA1_D1_X_COUNT()   bfin_read16(MDMA1_D1_X_COUNT)
-#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT, val)
-#define bfin_read_MDMA1_D1_X_MODIFY()  bfin_read16(MDMA1_D1_X_MODIFY)
-#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY, val)
-#define bfin_read_MDMA1_D1_Y_COUNT()   bfin_read16(MDMA1_D1_Y_COUNT)
-#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT, val)
-#define bfin_read_MDMA1_D1_Y_MODIFY()  bfin_read16(MDMA1_D1_Y_MODIFY)
-#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY, val)
-#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_readPTR(MDMA1_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_readPTR(MDMA1_D1_CURR_ADDR)
-#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_writePTR(MDMA1_D1_CURR_ADDR, val)
-#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS)
-#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT)
-#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_S1_START_ADDR() bfin_readPTR(MDMA1_S1_START_ADDR)
-#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_writePTR(MDMA1_S1_START_ADDR, val)
-#define bfin_read_MDMA1_S1_CONFIG()    bfin_read16(MDMA1_S1_CONFIG)
-#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG, val)
-#define bfin_read_MDMA1_S1_X_COUNT()   bfin_read16(MDMA1_S1_X_COUNT)
-#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT, val)
-#define bfin_read_MDMA1_S1_X_MODIFY()  bfin_read16(MDMA1_S1_X_MODIFY)
-#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY, val)
-#define bfin_read_MDMA1_S1_Y_COUNT()   bfin_read16(MDMA1_S1_Y_COUNT)
-#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT, val)
-#define bfin_read_MDMA1_S1_Y_MODIFY()  bfin_read16(MDMA1_S1_Y_MODIFY)
-#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY, val)
-#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_readPTR(MDMA1_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_readPTR(MDMA1_S1_CURR_ADDR)
-#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_writePTR(MDMA1_S1_CURR_ADDR, val)
-#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS)
-#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT)
-#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT, val)
-
-#define bfin_read_MDMA_S0_CONFIG()  bfin_read_MDMA0_S0_CONFIG()
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA0_S0_CONFIG(val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()  bfin_read_MDMA0_S0_IRQ_STATUS()
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA0_S0_IRQ_STATUS(val)
-#define bfin_read_MDMA_S0_X_MODIFY()  bfin_read_MDMA0_S0_X_MODIFY()
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA0_S0_X_MODIFY(val)
-#define bfin_read_MDMA_S0_Y_MODIFY()  bfin_read_MDMA0_S0_Y_MODIFY()
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA0_S0_Y_MODIFY(val)
-#define bfin_read_MDMA_S0_X_COUNT()  bfin_read_MDMA0_S0_X_COUNT()
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA0_S0_X_COUNT(val)
-#define bfin_read_MDMA_S0_Y_COUNT()  bfin_read_MDMA0_S0_Y_COUNT()
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA0_S0_Y_COUNT(val)
-#define bfin_read_MDMA_S0_START_ADDR()  bfin_read_MDMA0_S0_START_ADDR()
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA0_S0_START_ADDR(val)
-#define bfin_read_MDMA_D0_CONFIG()  bfin_read_MDMA0_D0_CONFIG()
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA0_D0_CONFIG(val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()  bfin_read_MDMA0_D0_IRQ_STATUS()
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA0_D0_IRQ_STATUS(val)
-#define bfin_read_MDMA_D0_X_MODIFY()  bfin_read_MDMA0_D0_X_MODIFY()
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA0_D0_X_MODIFY(val)
-#define bfin_read_MDMA_D0_Y_MODIFY()  bfin_read_MDMA0_D0_Y_MODIFY()
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA0_D0_Y_MODIFY(val)
-#define bfin_read_MDMA_D0_X_COUNT()  bfin_read_MDMA0_D0_X_COUNT()
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA0_D0_X_COUNT(val)
-#define bfin_read_MDMA_D0_Y_COUNT()  bfin_read_MDMA0_D0_Y_COUNT()
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA0_D0_Y_COUNT(val)
-#define bfin_read_MDMA_D0_START_ADDR()  bfin_read_MDMA0_D0_START_ADDR()
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA0_D0_START_ADDR(val)
-
-#define bfin_read_MDMA_S1_CONFIG()  bfin_read_MDMA0_S1_CONFIG()
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA0_S1_CONFIG(val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()  bfin_read_MDMA0_S1_IRQ_STATUS()
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA0_S1_IRQ_STATUS(val)
-#define bfin_read_MDMA_S1_X_MODIFY()  bfin_read_MDMA0_S1_X_MODIFY()
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA0_S1_X_MODIFY(val)
-#define bfin_read_MDMA_S1_Y_MODIFY()  bfin_read_MDMA0_S1_Y_MODIFY()
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA0_S1_Y_MODIFY(val)
-#define bfin_read_MDMA_S1_X_COUNT()  bfin_read_MDMA0_S1_X_COUNT()
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA0_S1_X_COUNT(val)
-#define bfin_read_MDMA_S1_Y_COUNT()  bfin_read_MDMA0_S1_Y_COUNT()
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA0_S1_Y_COUNT(val)
-#define bfin_read_MDMA_S1_START_ADDR()  bfin_read_MDMA0_S1_START_ADDR()
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA0_S1_START_ADDR(val)
-#define bfin_read_MDMA_D1_CONFIG()  bfin_read_MDMA0_D1_CONFIG()
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA0_D1_CONFIG(val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()  bfin_read_MDMA0_D1_IRQ_STATUS()
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA0_D1_IRQ_STATUS(val)
-#define bfin_read_MDMA_D1_X_MODIFY()  bfin_read_MDMA0_D1_X_MODIFY()
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA0_D1_X_MODIFY(val)
-#define bfin_read_MDMA_D1_Y_MODIFY()  bfin_read_MDMA0_D1_Y_MODIFY()
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA0_D1_Y_MODIFY(val)
-#define bfin_read_MDMA_D1_X_COUNT()  bfin_read_MDMA0_D1_X_COUNT()
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA0_D1_X_COUNT(val)
-#define bfin_read_MDMA_D1_Y_COUNT()  bfin_read_MDMA0_D1_Y_COUNT()
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA0_D1_Y_COUNT(val)
-#define bfin_read_MDMA_D1_START_ADDR()  bfin_read_MDMA0_D1_START_ADDR()
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA0_D1_START_ADDR(val)
-
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR() bfin_readPTR(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val) bfin_writePTR(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_CONFIG()    bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_X_COUNT()   bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY()  bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_COUNT()   bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_Y_MODIFY()  bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_readPTR(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR() bfin_readPTR(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_writePTR(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR() bfin_readPTR(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val) bfin_writePTR(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_CONFIG()    bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_X_COUNT()   bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY()  bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_COUNT()   bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_Y_MODIFY()  bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_readPTR(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR() bfin_readPTR(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_writePTR(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR() bfin_readPTR(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val) bfin_writePTR(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_CONFIG()    bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_X_COUNT()   bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY()  bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_COUNT()   bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_Y_MODIFY()  bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_readPTR(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR() bfin_readPTR(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_writePTR(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR() bfin_readPTR(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val) bfin_writePTR(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_CONFIG()    bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_X_COUNT()   bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY()  bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_COUNT()   bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_Y_MODIFY()  bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_readPTR(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR() bfin_readPTR(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_writePTR(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D2_NEXT_DESC_PTR() bfin_readPTR(MDMA_D2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D2_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D2_START_ADDR() bfin_readPTR(MDMA_D2_START_ADDR)
+#define bfin_write_MDMA_D2_START_ADDR(val) bfin_writePTR(MDMA_D2_START_ADDR, val)
+#define bfin_read_MDMA_D2_CONFIG()    bfin_read16(MDMA_D2_CONFIG)
+#define bfin_write_MDMA_D2_CONFIG(val) bfin_write16(MDMA_D2_CONFIG, val)
+#define bfin_read_MDMA_D2_X_COUNT()   bfin_read16(MDMA_D2_X_COUNT)
+#define bfin_write_MDMA_D2_X_COUNT(val) bfin_write16(MDMA_D2_X_COUNT, val)
+#define bfin_read_MDMA_D2_X_MODIFY()  bfin_read16(MDMA_D2_X_MODIFY)
+#define bfin_write_MDMA_D2_X_MODIFY(val) bfin_write16(MDMA_D2_X_MODIFY, val)
+#define bfin_read_MDMA_D2_Y_COUNT()   bfin_read16(MDMA_D2_Y_COUNT)
+#define bfin_write_MDMA_D2_Y_COUNT(val) bfin_write16(MDMA_D2_Y_COUNT, val)
+#define bfin_read_MDMA_D2_Y_MODIFY()  bfin_read16(MDMA_D2_Y_MODIFY)
+#define bfin_write_MDMA_D2_Y_MODIFY(val) bfin_write16(MDMA_D2_Y_MODIFY, val)
+#define bfin_read_MDMA_D2_CURR_DESC_PTR() bfin_readPTR(MDMA_D2_CURR_DESC_PTR)
+#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D2_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D2_CURR_ADDR() bfin_readPTR(MDMA_D2_CURR_ADDR)
+#define bfin_write_MDMA_D2_CURR_ADDR(val) bfin_writePTR(MDMA_D2_CURR_ADDR, val)
+#define bfin_read_MDMA_D2_IRQ_STATUS() bfin_read16(MDMA_D2_IRQ_STATUS)
+#define bfin_write_MDMA_D2_IRQ_STATUS(val) bfin_write16(MDMA_D2_IRQ_STATUS, val)
+#define bfin_read_MDMA_D2_PERIPHERAL_MAP() bfin_read16(MDMA_D2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D2_CURR_X_COUNT() bfin_read16(MDMA_D2_CURR_X_COUNT)
+#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D2_CURR_Y_COUNT() bfin_read16(MDMA_D2_CURR_Y_COUNT)
+#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S2_NEXT_DESC_PTR() bfin_readPTR(MDMA_S2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S2_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S2_START_ADDR() bfin_readPTR(MDMA_S2_START_ADDR)
+#define bfin_write_MDMA_S2_START_ADDR(val) bfin_writePTR(MDMA_S2_START_ADDR, val)
+#define bfin_read_MDMA_S2_CONFIG()    bfin_read16(MDMA_S2_CONFIG)
+#define bfin_write_MDMA_S2_CONFIG(val) bfin_write16(MDMA_S2_CONFIG, val)
+#define bfin_read_MDMA_S2_X_COUNT()   bfin_read16(MDMA_S2_X_COUNT)
+#define bfin_write_MDMA_S2_X_COUNT(val) bfin_write16(MDMA_S2_X_COUNT, val)
+#define bfin_read_MDMA_S2_X_MODIFY()  bfin_read16(MDMA_S2_X_MODIFY)
+#define bfin_write_MDMA_S2_X_MODIFY(val) bfin_write16(MDMA_S2_X_MODIFY, val)
+#define bfin_read_MDMA_S2_Y_COUNT()   bfin_read16(MDMA_S2_Y_COUNT)
+#define bfin_write_MDMA_S2_Y_COUNT(val) bfin_write16(MDMA_S2_Y_COUNT, val)
+#define bfin_read_MDMA_S2_Y_MODIFY()  bfin_read16(MDMA_S2_Y_MODIFY)
+#define bfin_write_MDMA_S2_Y_MODIFY(val) bfin_write16(MDMA_S2_Y_MODIFY, val)
+#define bfin_read_MDMA_S2_CURR_DESC_PTR() bfin_readPTR(MDMA_S2_CURR_DESC_PTR)
+#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S2_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S2_CURR_ADDR() bfin_readPTR(MDMA_S2_CURR_ADDR)
+#define bfin_write_MDMA_S2_CURR_ADDR(val) bfin_writePTR(MDMA_S2_CURR_ADDR, val)
+#define bfin_read_MDMA_S2_IRQ_STATUS() bfin_read16(MDMA_S2_IRQ_STATUS)
+#define bfin_write_MDMA_S2_IRQ_STATUS(val) bfin_write16(MDMA_S2_IRQ_STATUS, val)
+#define bfin_read_MDMA_S2_PERIPHERAL_MAP() bfin_read16(MDMA_S2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S2_CURR_X_COUNT() bfin_read16(MDMA_S2_CURR_X_COUNT)
+#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S2_CURR_Y_COUNT() bfin_read16(MDMA_S2_CURR_Y_COUNT)
+#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D3_NEXT_DESC_PTR() bfin_readPTR(MDMA_D3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D3_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D3_START_ADDR() bfin_readPTR(MDMA_D3_START_ADDR)
+#define bfin_write_MDMA_D3_START_ADDR(val) bfin_writePTR(MDMA_D3_START_ADDR, val)
+#define bfin_read_MDMA_D3_CONFIG()    bfin_read16(MDMA_D3_CONFIG)
+#define bfin_write_MDMA_D3_CONFIG(val) bfin_write16(MDMA_D3_CONFIG, val)
+#define bfin_read_MDMA_D3_X_COUNT()   bfin_read16(MDMA_D3_X_COUNT)
+#define bfin_write_MDMA_D3_X_COUNT(val) bfin_write16(MDMA_D3_X_COUNT, val)
+#define bfin_read_MDMA_D3_X_MODIFY()  bfin_read16(MDMA_D3_X_MODIFY)
+#define bfin_write_MDMA_D3_X_MODIFY(val) bfin_write16(MDMA_D3_X_MODIFY, val)
+#define bfin_read_MDMA_D3_Y_COUNT()   bfin_read16(MDMA_D3_Y_COUNT)
+#define bfin_write_MDMA_D3_Y_COUNT(val) bfin_write16(MDMA_D3_Y_COUNT, val)
+#define bfin_read_MDMA_D3_Y_MODIFY()  bfin_read16(MDMA_D3_Y_MODIFY)
+#define bfin_write_MDMA_D3_Y_MODIFY(val) bfin_write16(MDMA_D3_Y_MODIFY, val)
+#define bfin_read_MDMA_D3_CURR_DESC_PTR() bfin_readPTR(MDMA_D3_CURR_DESC_PTR)
+#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D3_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D3_CURR_ADDR() bfin_readPTR(MDMA_D3_CURR_ADDR)
+#define bfin_write_MDMA_D3_CURR_ADDR(val) bfin_writePTR(MDMA_D3_CURR_ADDR, val)
+#define bfin_read_MDMA_D3_IRQ_STATUS() bfin_read16(MDMA_D3_IRQ_STATUS)
+#define bfin_write_MDMA_D3_IRQ_STATUS(val) bfin_write16(MDMA_D3_IRQ_STATUS, val)
+#define bfin_read_MDMA_D3_PERIPHERAL_MAP() bfin_read16(MDMA_D3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D3_CURR_X_COUNT() bfin_read16(MDMA_D3_CURR_X_COUNT)
+#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D3_CURR_Y_COUNT() bfin_read16(MDMA_D3_CURR_Y_COUNT)
+#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S3_NEXT_DESC_PTR() bfin_readPTR(MDMA_S3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S3_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S3_START_ADDR() bfin_readPTR(MDMA_S3_START_ADDR)
+#define bfin_write_MDMA_S3_START_ADDR(val) bfin_writePTR(MDMA_S3_START_ADDR, val)
+#define bfin_read_MDMA_S3_CONFIG()    bfin_read16(MDMA_S3_CONFIG)
+#define bfin_write_MDMA_S3_CONFIG(val) bfin_write16(MDMA_S3_CONFIG, val)
+#define bfin_read_MDMA_S3_X_COUNT()   bfin_read16(MDMA_S3_X_COUNT)
+#define bfin_write_MDMA_S3_X_COUNT(val) bfin_write16(MDMA_S3_X_COUNT, val)
+#define bfin_read_MDMA_S3_X_MODIFY()  bfin_read16(MDMA_S3_X_MODIFY)
+#define bfin_write_MDMA_S3_X_MODIFY(val) bfin_write16(MDMA_S3_X_MODIFY, val)
+#define bfin_read_MDMA_S3_Y_COUNT()   bfin_read16(MDMA_S3_Y_COUNT)
+#define bfin_write_MDMA_S3_Y_COUNT(val) bfin_write16(MDMA_S3_Y_COUNT, val)
+#define bfin_read_MDMA_S3_Y_MODIFY()  bfin_read16(MDMA_S3_Y_MODIFY)
+#define bfin_write_MDMA_S3_Y_MODIFY(val) bfin_write16(MDMA_S3_Y_MODIFY, val)
+#define bfin_read_MDMA_S3_CURR_DESC_PTR() bfin_readPTR(MDMA_S3_CURR_DESC_PTR)
+#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S3_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S3_CURR_ADDR() bfin_readPTR(MDMA_S3_CURR_ADDR)
+#define bfin_write_MDMA_S3_CURR_ADDR(val) bfin_writePTR(MDMA_S3_CURR_ADDR, val)
+#define bfin_read_MDMA_S3_IRQ_STATUS() bfin_read16(MDMA_S3_IRQ_STATUS)
+#define bfin_write_MDMA_S3_IRQ_STATUS(val) bfin_write16(MDMA_S3_IRQ_STATUS, val)
+#define bfin_read_MDMA_S3_PERIPHERAL_MAP() bfin_read16(MDMA_S3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S3_CURR_X_COUNT() bfin_read16(MDMA_S3_CURR_X_COUNT)
+#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S3_CURR_Y_COUNT() bfin_read16(MDMA_S3_CURR_Y_COUNT)
+#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT, val)
 #define bfin_read_PPI_CONTROL()        bfin_read16(PPI_CONTROL)
 #define bfin_write_PPI_CONTROL(val)    bfin_write16(PPI_CONTROL, val)
 #define bfin_read_PPI_STATUS()         bfin_read16(PPI_STATUS)
@@ -2024,7 +1957,4 @@
 #define bfin_read_CAN_MB31_ID1()       bfin_read16(CAN_MB31_ID1)
 #define bfin_write_CAN_MB31_ID1(val)   bfin_write16(CAN_MB31_ID1, val)
 
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
index 198c4bb..acc15f3 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
@@ -1,6 +1,7 @@
-/* DO NOT EDIT THIS FILE
- * Automatically generated by generate-cdef-headers.xsl
- * DO NOT EDIT THIS FILE
+/*
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF539_H
@@ -9,7 +10,6 @@
 /* Include MMRs Common to BF538 								*/
 #include "cdefBF538.h"
 
-
 #define bfin_read_MXVR_CONFIG()        bfin_read16(MXVR_CONFIG)
 #define bfin_write_MXVR_CONFIG(val)    bfin_write16(MXVR_CONFIG, val)
 #define bfin_read_MXVR_PLL_CTL_0()     bfin_read32(MXVR_PLL_CTL_0)
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF538.h b/arch/blackfin/mach-bf538/include/mach/defBF538.h
new file mode 100644
index 0000000..d27f81d
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/defBF538.h
@@ -0,0 +1,1825 @@
+/*
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
+ */
+
+#ifndef _DEF_BF538_H
+#define _DEF_BF538_H
+
+/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
+#define	PLL_CTL			0xFFC00000	/* PLL Control register (16-bit) */
+#define	PLL_DIV			0xFFC00004	/* PLL Divide Register (16-bit) */
+#define	VR_CTL			0xFFC00008	/* Voltage Regulator Control Register (16-bit) */
+#define	PLL_STAT		0xFFC0000C	/* PLL Status register (16-bit) */
+#define	PLL_LOCKCNT		0xFFC00010	/* PLL Lock	Count register (16-bit) */
+#define	CHIPID			0xFFC00014	/* Chip	ID Register */
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define	SWRST			0xFFC00100  /* Software	Reset Register (16-bit) */
+#define	SYSCR			0xFFC00104  /* System Configuration registe */
+#define	SIC_RVECT		0xFFC00108
+#define	SIC_IMASK0		0xFFC0010C  /* Interrupt Mask Register */
+#define	SIC_IAR0		0xFFC00110  /* Interrupt Assignment Register 0 */
+#define	SIC_IAR1		0xFFC00114  /* Interrupt Assignment Register 1 */
+#define	SIC_IAR2		0xFFC00118  /* Interrupt Assignment Register 2 */
+#define	SIC_IAR3			0xFFC0011C	/* Interrupt Assignment	Register 3 */
+#define	SIC_ISR0			0xFFC00120  /* Interrupt Status	Register */
+#define	SIC_IWR0			0xFFC00124  /* Interrupt Wakeup	Register */
+#define	SIC_IMASK1			0xFFC00128	/* Interrupt Mask Register 1 */
+#define	SIC_ISR1			0xFFC0012C	/* Interrupt Status Register 1 */
+#define	SIC_IWR1			0xFFC00130	/* Interrupt Wakeup Register 1 */
+#define	SIC_IAR4			0xFFC00134	/* Interrupt Assignment	Register 4 */
+#define	SIC_IAR5			0xFFC00138	/* Interrupt Assignment	Register 5 */
+#define	SIC_IAR6			0xFFC0013C	/* Interrupt Assignment	Register 6 */
+
+
+/* Watchdog Timer (0xFFC00200 -	0xFFC002FF) */
+#define	WDOG_CTL	0xFFC00200  /* Watchdog	Control	Register */
+#define	WDOG_CNT	0xFFC00204  /* Watchdog	Count Register */
+#define	WDOG_STAT	0xFFC00208  /* Watchdog	Status Register */
+
+
+/* Real	Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define	RTC_STAT	0xFFC00300  /* RTC Status Register */
+#define	RTC_ICTL	0xFFC00304  /* RTC Interrupt Control Register */
+#define	RTC_ISTAT	0xFFC00308  /* RTC Interrupt Status Register */
+#define	RTC_SWCNT	0xFFC0030C  /* RTC Stopwatch Count Register */
+#define	RTC_ALARM	0xFFC00310  /* RTC Alarm Time Register */
+#define	RTC_FAST	0xFFC00314  /* RTC Prescaler Enable Register */
+#define	RTC_PREN		0xFFC00314  /* RTC Prescaler Enable Register (alternate	macro) */
+
+
+/* UART0 Controller (0xFFC00400	- 0xFFC004FF) */
+#define	UART0_THR	      0xFFC00400  /* Transmit Holding register */
+#define	UART0_RBR	      0xFFC00400  /* Receive Buffer register */
+#define	UART0_DLL	      0xFFC00400  /* Divisor Latch (Low-Byte) */
+#define	UART0_IER	      0xFFC00404  /* Interrupt Enable Register */
+#define	UART0_DLH	      0xFFC00404  /* Divisor Latch (High-Byte) */
+#define	UART0_IIR	      0xFFC00408  /* Interrupt Identification Register */
+#define	UART0_LCR	      0xFFC0040C  /* Line Control Register */
+#define	UART0_MCR			 0xFFC00410  /*	Modem Control Register */
+#define	UART0_LSR	      0xFFC00414  /* Line Status Register */
+#define	UART0_SCR	      0xFFC0041C  /* SCR Scratch Register */
+#define	UART0_GCTL		     0xFFC00424	 /* Global Control Register */
+
+
+/* SPI0	Controller (0xFFC00500 - 0xFFC005FF) */
+
+#define	SPI0_CTL			0xFFC00500  /* SPI0 Control Register */
+#define	SPI0_FLG			0xFFC00504  /* SPI0 Flag register */
+#define	SPI0_STAT			0xFFC00508  /* SPI0 Status register */
+#define	SPI0_TDBR			0xFFC0050C  /* SPI0 Transmit Data Buffer Register */
+#define	SPI0_RDBR			0xFFC00510  /* SPI0 Receive Data Buffer	Register */
+#define	SPI0_BAUD			0xFFC00514  /* SPI0 Baud rate Register */
+#define	SPI0_SHADOW			0xFFC00518  /* SPI0_RDBR Shadow	Register */
+#define SPI0_REGBASE			SPI0_CTL
+
+
+/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
+#define	TIMER0_CONFIG			0xFFC00600     /* Timer	0 Configuration	Register */
+#define	TIMER0_COUNTER				0xFFC00604     /* Timer	0 Counter Register */
+#define	TIMER0_PERIOD			0xFFC00608     /* Timer	0 Period Register */
+#define	TIMER0_WIDTH			0xFFC0060C     /* Timer	0 Width	Register */
+
+#define	TIMER1_CONFIG			0xFFC00610	/*  Timer 1 Configuration Register   */
+#define	TIMER1_COUNTER			0xFFC00614	/*  Timer 1 Counter Register	     */
+#define	TIMER1_PERIOD			0xFFC00618	/*  Timer 1 Period Register	     */
+#define	TIMER1_WIDTH			0xFFC0061C	/*  Timer 1 Width Register	     */
+
+#define	TIMER2_CONFIG			0xFFC00620	/* Timer 2 Configuration Register   */
+#define	TIMER2_COUNTER			0xFFC00624	/* Timer 2 Counter Register	    */
+#define	TIMER2_PERIOD			0xFFC00628	/* Timer 2 Period Register	    */
+#define	TIMER2_WIDTH			0xFFC0062C	/* Timer 2 Width Register	    */
+
+#define	TIMER_ENABLE				0xFFC00640	/* Timer Enable	Register */
+#define	TIMER_DISABLE				0xFFC00644	/* Timer Disable Register */
+#define	TIMER_STATUS				0xFFC00648	/* Timer Status	Register */
+
+
+/* Programmable	Flags (0xFFC00700 - 0xFFC007FF) */
+#define	FIO_FLAG_D				0xFFC00700  /* Flag Mask to directly specify state of pins */
+#define	FIO_FLAG_C			0xFFC00704  /* Peripheral Interrupt Flag Register (clear) */
+#define	FIO_FLAG_S			0xFFC00708  /* Peripheral Interrupt Flag Register (set) */
+#define	FIO_FLAG_T					0xFFC0070C  /* Flag Mask to directly toggle state of pins */
+#define	FIO_MASKA_D			0xFFC00710  /* Flag Mask Interrupt A Register (set directly) */
+#define	FIO_MASKA_C			0xFFC00714  /* Flag Mask Interrupt A Register (clear) */
+#define	FIO_MASKA_S			0xFFC00718  /* Flag Mask Interrupt A Register (set) */
+#define	FIO_MASKA_T			0xFFC0071C  /* Flag Mask Interrupt A Register (toggle) */
+#define	FIO_MASKB_D			0xFFC00720  /* Flag Mask Interrupt B Register (set directly) */
+#define	FIO_MASKB_C			0xFFC00724  /* Flag Mask Interrupt B Register (clear) */
+#define	FIO_MASKB_S			0xFFC00728  /* Flag Mask Interrupt B Register (set) */
+#define	FIO_MASKB_T			0xFFC0072C  /* Flag Mask Interrupt B Register (toggle) */
+#define	FIO_DIR				0xFFC00730  /* Peripheral Flag Direction Register */
+#define	FIO_POLAR			0xFFC00734  /* Flag Source Polarity Register */
+#define	FIO_EDGE			0xFFC00738  /* Flag Source Sensitivity Register */
+#define	FIO_BOTH			0xFFC0073C  /* Flag Set	on BOTH	Edges Register */
+#define	FIO_INEN					0xFFC00740  /* Flag Input Enable Register  */
+
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define	SPORT0_TCR1				0xFFC00800  /* SPORT0 Transmit Configuration 1 Register */
+#define	SPORT0_TCR2				0xFFC00804  /* SPORT0 Transmit Configuration 2 Register */
+#define	SPORT0_TCLKDIV			0xFFC00808  /* SPORT0 Transmit Clock Divider */
+#define	SPORT0_TFSDIV			0xFFC0080C  /* SPORT0 Transmit Frame Sync Divider */
+#define	SPORT0_TX			0xFFC00810  /* SPORT0 TX Data Register */
+#define	SPORT0_RX			0xFFC00818  /* SPORT0 RX Data Register */
+#define	SPORT0_RCR1				0xFFC00820  /* SPORT0 Transmit Configuration 1 Register */
+#define	SPORT0_RCR2				0xFFC00824  /* SPORT0 Transmit Configuration 2 Register */
+#define	SPORT0_RCLKDIV			0xFFC00828  /* SPORT0 Receive Clock Divider */
+#define	SPORT0_RFSDIV			0xFFC0082C  /* SPORT0 Receive Frame Sync Divider */
+#define	SPORT0_STAT			0xFFC00830  /* SPORT0 Status Register */
+#define	SPORT0_CHNL			0xFFC00834  /* SPORT0 Current Channel Register */
+#define	SPORT0_MCMC1			0xFFC00838  /* SPORT0 Multi-Channel Configuration Register 1 */
+#define	SPORT0_MCMC2			0xFFC0083C  /* SPORT0 Multi-Channel Configuration Register 2 */
+#define	SPORT0_MTCS0			0xFFC00840  /* SPORT0 Multi-Channel Transmit Select Register 0 */
+#define	SPORT0_MTCS1			0xFFC00844  /* SPORT0 Multi-Channel Transmit Select Register 1 */
+#define	SPORT0_MTCS2			0xFFC00848  /* SPORT0 Multi-Channel Transmit Select Register 2 */
+#define	SPORT0_MTCS3			0xFFC0084C  /* SPORT0 Multi-Channel Transmit Select Register 3 */
+#define	SPORT0_MRCS0			0xFFC00850  /* SPORT0 Multi-Channel Receive Select Register 0 */
+#define	SPORT0_MRCS1			0xFFC00854  /* SPORT0 Multi-Channel Receive Select Register 1 */
+#define	SPORT0_MRCS2			0xFFC00858  /* SPORT0 Multi-Channel Receive Select Register 2 */
+#define	SPORT0_MRCS3			0xFFC0085C  /* SPORT0 Multi-Channel Receive Select Register 3 */
+
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define	SPORT1_TCR1				0xFFC00900  /* SPORT1 Transmit Configuration 1 Register */
+#define	SPORT1_TCR2				0xFFC00904  /* SPORT1 Transmit Configuration 2 Register */
+#define	SPORT1_TCLKDIV			0xFFC00908  /* SPORT1 Transmit Clock Divider */
+#define	SPORT1_TFSDIV			0xFFC0090C  /* SPORT1 Transmit Frame Sync Divider */
+#define	SPORT1_TX			0xFFC00910  /* SPORT1 TX Data Register */
+#define	SPORT1_RX			0xFFC00918  /* SPORT1 RX Data Register */
+#define	SPORT1_RCR1				0xFFC00920  /* SPORT1 Transmit Configuration 1 Register */
+#define	SPORT1_RCR2				0xFFC00924  /* SPORT1 Transmit Configuration 2 Register */
+#define	SPORT1_RCLKDIV			0xFFC00928  /* SPORT1 Receive Clock Divider */
+#define	SPORT1_RFSDIV			0xFFC0092C  /* SPORT1 Receive Frame Sync Divider */
+#define	SPORT1_STAT			0xFFC00930  /* SPORT1 Status Register */
+#define	SPORT1_CHNL			0xFFC00934  /* SPORT1 Current Channel Register */
+#define	SPORT1_MCMC1			0xFFC00938  /* SPORT1 Multi-Channel Configuration Register 1 */
+#define	SPORT1_MCMC2			0xFFC0093C  /* SPORT1 Multi-Channel Configuration Register 2 */
+#define	SPORT1_MTCS0			0xFFC00940  /* SPORT1 Multi-Channel Transmit Select Register 0 */
+#define	SPORT1_MTCS1			0xFFC00944  /* SPORT1 Multi-Channel Transmit Select Register 1 */
+#define	SPORT1_MTCS2			0xFFC00948  /* SPORT1 Multi-Channel Transmit Select Register 2 */
+#define	SPORT1_MTCS3			0xFFC0094C  /* SPORT1 Multi-Channel Transmit Select Register 3 */
+#define	SPORT1_MRCS0			0xFFC00950  /* SPORT1 Multi-Channel Receive Select Register 0 */
+#define	SPORT1_MRCS1			0xFFC00954  /* SPORT1 Multi-Channel Receive Select Register 1 */
+#define	SPORT1_MRCS2			0xFFC00958  /* SPORT1 Multi-Channel Receive Select Register 2 */
+#define	SPORT1_MRCS3			0xFFC0095C  /* SPORT1 Multi-Channel Receive Select Register 3 */
+
+
+/* External Bus	Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+/* Asynchronous	Memory Controller  */
+#define	EBIU_AMGCTL			0xFFC00A00  /* Asynchronous Memory Global Control Register */
+#define	EBIU_AMBCTL0		0xFFC00A04  /* Asynchronous Memory Bank	Control	Register 0 */
+#define	EBIU_AMBCTL1		0xFFC00A08  /* Asynchronous Memory Bank	Control	Register 1 */
+
+/* SDRAM Controller */
+#define	EBIU_SDGCTL			0xFFC00A10  /* SDRAM Global Control Register */
+#define	EBIU_SDBCTL			0xFFC00A14  /* SDRAM Bank Control Register */
+#define	EBIU_SDRRC			0xFFC00A18  /* SDRAM Refresh Rate Control Register */
+#define	EBIU_SDSTAT			0xFFC00A1C  /* SDRAM Status Register */
+
+
+
+/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
+
+#define	DMAC0_TC_PER			0xFFC00B0C	/* DMA Controller 0 Traffic Control Periods Register */
+#define	DMAC0_TC_CNT			0xFFC00B10	/* DMA Controller 0 Traffic Control Current Counts Register */
+
+
+
+/* DMA Controller 0 (0xFFC00C00	- 0xFFC00FFF)							 */
+
+#define	DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register */
+#define	DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register */
+#define	DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register */
+#define	DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register */
+#define	DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register */
+#define	DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register */
+#define	DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register */
+#define	DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register */
+#define	DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register */
+#define	DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register */
+#define	DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map	Register */
+#define	DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register */
+#define	DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register */
+
+#define	DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register */
+#define	DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register */
+#define	DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register */
+#define	DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register */
+#define	DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register */
+#define	DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register */
+#define	DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register */
+#define	DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register */
+#define	DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register */
+#define	DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register */
+#define	DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map	Register */
+#define	DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register */
+#define	DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register */
+
+#define	DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register */
+#define	DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register */
+#define	DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register */
+#define	DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register */
+#define	DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register */
+#define	DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register */
+#define	DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register */
+#define	DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register */
+#define	DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register */
+#define	DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register */
+#define	DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map	Register */
+#define	DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register */
+#define	DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register */
+
+#define	DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register */
+#define	DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register */
+#define	DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register */
+#define	DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register */
+#define	DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register */
+#define	DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register */
+#define	DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register */
+#define	DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register */
+#define	DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register */
+#define	DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register */
+#define	DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map	Register */
+#define	DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register */
+#define	DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register */
+
+#define	DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register */
+#define	DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register */
+#define	DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register */
+#define	DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register */
+#define	DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register */
+#define	DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register */
+#define	DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register */
+#define	DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register */
+#define	DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register */
+#define	DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register */
+#define	DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map	Register */
+#define	DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register */
+#define	DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register */
+
+#define	DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register */
+#define	DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register */
+#define	DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register */
+#define	DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register */
+#define	DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register */
+#define	DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register */
+#define	DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register */
+#define	DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register */
+#define	DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register */
+#define	DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register */
+#define	DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map	Register */
+#define	DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register */
+#define	DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register */
+
+#define	DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register */
+#define	DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register */
+#define	DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register */
+#define	DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register */
+#define	DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register */
+#define	DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register */
+#define	DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register */
+#define	DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register */
+#define	DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register */
+#define	DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register */
+#define	DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map	Register */
+#define	DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register */
+#define	DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register */
+
+#define	DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register */
+#define	DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register */
+#define	DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register */
+#define	DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register */
+#define	DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register */
+#define	DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register */
+#define	DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register */
+#define	DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register */
+#define	DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register */
+#define	DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register */
+#define	DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map	Register */
+#define	DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register */
+#define	DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register */
+
+#define	MDMA_D0_NEXT_DESC_PTR	0xFFC00E00	/* MemDMA0 Stream 0 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D0_START_ADDR		0xFFC00E04	/* MemDMA0 Stream 0 Destination	Start Address Register */
+#define	MDMA_D0_CONFIG			0xFFC00E08	/* MemDMA0 Stream 0 Destination	Configuration Register */
+#define	MDMA_D0_X_COUNT		0xFFC00E10	/* MemDMA0 Stream 0 Destination	X Count	Register */
+#define	MDMA_D0_X_MODIFY		0xFFC00E14	/* MemDMA0 Stream 0 Destination	X Modify Register */
+#define	MDMA_D0_Y_COUNT		0xFFC00E18	/* MemDMA0 Stream 0 Destination	Y Count	Register */
+#define	MDMA_D0_Y_MODIFY		0xFFC00E1C	/* MemDMA0 Stream 0 Destination	Y Modify Register */
+#define	MDMA_D0_CURR_DESC_PTR	0xFFC00E20	/* MemDMA0 Stream 0 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D0_CURR_ADDR		0xFFC00E24	/* MemDMA0 Stream 0 Destination	Current	Address	Register */
+#define	MDMA_D0_IRQ_STATUS		0xFFC00E28	/* MemDMA0 Stream 0 Destination	Interrupt/Status Register */
+#define	MDMA_D0_PERIPHERAL_MAP	0xFFC00E2C	/* MemDMA0 Stream 0 Destination	Peripheral Map Register */
+#define	MDMA_D0_CURR_X_COUNT	0xFFC00E30	/* MemDMA0 Stream 0 Destination	Current	X Count	Register */
+#define	MDMA_D0_CURR_Y_COUNT	0xFFC00E38	/* MemDMA0 Stream 0 Destination	Current	Y Count	Register */
+
+#define	MDMA_S0_NEXT_DESC_PTR	0xFFC00E40	/* MemDMA0 Stream 0 Source Next	Descriptor Pointer Register */
+#define	MDMA_S0_START_ADDR		0xFFC00E44	/* MemDMA0 Stream 0 Source Start Address Register */
+#define	MDMA_S0_CONFIG			0xFFC00E48	/* MemDMA0 Stream 0 Source Configuration Register */
+#define	MDMA_S0_X_COUNT		0xFFC00E50	/* MemDMA0 Stream 0 Source X Count Register */
+#define	MDMA_S0_X_MODIFY		0xFFC00E54	/* MemDMA0 Stream 0 Source X Modify Register */
+#define	MDMA_S0_Y_COUNT		0xFFC00E58	/* MemDMA0 Stream 0 Source Y Count Register */
+#define	MDMA_S0_Y_MODIFY		0xFFC00E5C	/* MemDMA0 Stream 0 Source Y Modify Register */
+#define	MDMA_S0_CURR_DESC_PTR	0xFFC00E60	/* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
+#define	MDMA_S0_CURR_ADDR		0xFFC00E64	/* MemDMA0 Stream 0 Source Current Address Register */
+#define	MDMA_S0_IRQ_STATUS		0xFFC00E68	/* MemDMA0 Stream 0 Source Interrupt/Status Register */
+#define	MDMA_S0_PERIPHERAL_MAP	0xFFC00E6C	/* MemDMA0 Stream 0 Source Peripheral Map Register */
+#define	MDMA_S0_CURR_X_COUNT	0xFFC00E70	/* MemDMA0 Stream 0 Source Current X Count Register */
+#define	MDMA_S0_CURR_Y_COUNT	0xFFC00E78	/* MemDMA0 Stream 0 Source Current Y Count Register */
+
+#define	MDMA_D1_NEXT_DESC_PTR	0xFFC00E80	/* MemDMA0 Stream 1 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D1_START_ADDR		0xFFC00E84	/* MemDMA0 Stream 1 Destination	Start Address Register */
+#define	MDMA_D1_CONFIG			0xFFC00E88	/* MemDMA0 Stream 1 Destination	Configuration Register */
+#define	MDMA_D1_X_COUNT		0xFFC00E90	/* MemDMA0 Stream 1 Destination	X Count	Register */
+#define	MDMA_D1_X_MODIFY		0xFFC00E94	/* MemDMA0 Stream 1 Destination	X Modify Register */
+#define	MDMA_D1_Y_COUNT		0xFFC00E98	/* MemDMA0 Stream 1 Destination	Y Count	Register */
+#define	MDMA_D1_Y_MODIFY		0xFFC00E9C	/* MemDMA0 Stream 1 Destination	Y Modify Register */
+#define	MDMA_D1_CURR_DESC_PTR	0xFFC00EA0	/* MemDMA0 Stream 1 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D1_CURR_ADDR		0xFFC00EA4	/* MemDMA0 Stream 1 Destination	Current	Address	Register */
+#define	MDMA_D1_IRQ_STATUS		0xFFC00EA8	/* MemDMA0 Stream 1 Destination	Interrupt/Status Register */
+#define	MDMA_D1_PERIPHERAL_MAP	0xFFC00EAC	/* MemDMA0 Stream 1 Destination	Peripheral Map Register */
+#define	MDMA_D1_CURR_X_COUNT	0xFFC00EB0	/* MemDMA0 Stream 1 Destination	Current	X Count	Register */
+#define	MDMA_D1_CURR_Y_COUNT	0xFFC00EB8	/* MemDMA0 Stream 1 Destination	Current	Y Count	Register */
+
+#define	MDMA_S1_NEXT_DESC_PTR	0xFFC00EC0	/* MemDMA0 Stream 1 Source Next	Descriptor Pointer Register */
+#define	MDMA_S1_START_ADDR		0xFFC00EC4	/* MemDMA0 Stream 1 Source Start Address Register */
+#define	MDMA_S1_CONFIG			0xFFC00EC8	/* MemDMA0 Stream 1 Source Configuration Register */
+#define	MDMA_S1_X_COUNT		0xFFC00ED0	/* MemDMA0 Stream 1 Source X Count Register */
+#define	MDMA_S1_X_MODIFY		0xFFC00ED4	/* MemDMA0 Stream 1 Source X Modify Register */
+#define	MDMA_S1_Y_COUNT		0xFFC00ED8	/* MemDMA0 Stream 1 Source Y Count Register */
+#define	MDMA_S1_Y_MODIFY		0xFFC00EDC	/* MemDMA0 Stream 1 Source Y Modify Register */
+#define	MDMA_S1_CURR_DESC_PTR	0xFFC00EE0	/* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
+#define	MDMA_S1_CURR_ADDR		0xFFC00EE4	/* MemDMA0 Stream 1 Source Current Address Register */
+#define	MDMA_S1_IRQ_STATUS		0xFFC00EE8	/* MemDMA0 Stream 1 Source Interrupt/Status Register */
+#define	MDMA_S1_PERIPHERAL_MAP	0xFFC00EEC	/* MemDMA0 Stream 1 Source Peripheral Map Register */
+#define	MDMA_S1_CURR_X_COUNT	0xFFC00EF0	/* MemDMA0 Stream 1 Source Current X Count Register */
+#define	MDMA_S1_CURR_Y_COUNT	0xFFC00EF8	/* MemDMA0 Stream 1 Source Current Y Count Register */
+
+
+/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
+#define	PPI_CONTROL			0xFFC01000	/* PPI Control Register */
+#define	PPI_STATUS			0xFFC01004	/* PPI Status Register */
+#define	PPI_COUNT			0xFFC01008	/* PPI Transfer	Count Register */
+#define	PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register */
+#define	PPI_FRAME			0xFFC01010	/* PPI Frame Length Register */
+
+
+/* Two-Wire Interface 0	(0xFFC01400 - 0xFFC014FF)			 */
+#define	TWI0_CLKDIV			0xFFC01400	/* Serial Clock	Divider	Register */
+#define	TWI0_CONTROL		0xFFC01404	/* TWI0	Master Internal	Time Reference Register */
+#define	TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register */
+#define	TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register */
+#define	TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register */
+#define	TWI0_MASTER_CTL	0xFFC01414	/* Master Mode Control Register */
+#define	TWI0_MASTER_STAT	0xFFC01418	/* Master Mode Status Register */
+#define	TWI0_MASTER_ADDR	0xFFC0141C	/* Master Mode Address Register */
+#define	TWI0_INT_STAT		0xFFC01420	/* TWI0	Master Interrupt Register */
+#define	TWI0_INT_MASK		0xFFC01424	/* TWI0	Master Interrupt Mask Register */
+#define	TWI0_FIFO_CTL		0xFFC01428	/* FIFO	Control	Register */
+#define	TWI0_FIFO_STAT		0xFFC0142C	/* FIFO	Status Register */
+#define	TWI0_XMT_DATA8		0xFFC01480	/* FIFO	Transmit Data Single Byte Register */
+#define	TWI0_XMT_DATA16		0xFFC01484	/* FIFO	Transmit Data Double Byte Register */
+#define	TWI0_RCV_DATA8		0xFFC01488	/* FIFO	Receive	Data Single Byte Register */
+#define	TWI0_RCV_DATA16		0xFFC0148C	/* FIFO	Receive	Data Double Byte Register */
+
+#define TWI0_REGBASE		TWI0_CLKDIV
+
+/* the following are for backwards compatibility */
+#define	TWI0_PRESCALE	 TWI0_CONTROL
+#define	TWI0_INT_SRC	 TWI0_INT_STAT
+#define	TWI0_INT_ENABLE	 TWI0_INT_MASK
+
+
+/* General-Purpose Ports  (0xFFC01500 -	0xFFC015FF)	 */
+
+/* GPIO	Port C Register	Names */
+#define PORTCIO_FER			0xFFC01500	/* GPIO	Pin Port C Configuration Register */
+#define PORTCIO				0xFFC01510	/* GPIO	Pin Port C Data	Register */
+#define PORTCIO_CLEAR			0xFFC01520	/* Clear GPIO Pin Port C Register */
+#define PORTCIO_SET			0xFFC01530	/* Set GPIO Pin	Port C Register */
+#define PORTCIO_TOGGLE			0xFFC01540	/* Toggle GPIO Pin Port	C Register */
+#define PORTCIO_DIR			0xFFC01550	/* GPIO	Pin Port C Direction Register */
+#define PORTCIO_INEN			0xFFC01560	/* GPIO	Pin Port C Input Enable	Register */
+
+/* GPIO	Port D Register	Names */
+#define PORTDIO_FER			0xFFC01504	/* GPIO	Pin Port D Configuration Register */
+#define PORTDIO				0xFFC01514	/* GPIO	Pin Port D Data	Register */
+#define PORTDIO_CLEAR			0xFFC01524	/* Clear GPIO Pin Port D Register */
+#define PORTDIO_SET			0xFFC01534	/* Set GPIO Pin	Port D Register */
+#define PORTDIO_TOGGLE			0xFFC01544	/* Toggle GPIO Pin Port	D Register */
+#define PORTDIO_DIR			0xFFC01554	/* GPIO	Pin Port D Direction Register */
+#define PORTDIO_INEN			0xFFC01564	/* GPIO	Pin Port D Input Enable	Register */
+
+/* GPIO	Port E Register	Names */
+#define PORTEIO_FER			0xFFC01508	/* GPIO	Pin Port E Configuration Register */
+#define PORTEIO				0xFFC01518	/* GPIO	Pin Port E Data	Register */
+#define PORTEIO_CLEAR			0xFFC01528	/* Clear GPIO Pin Port E Register */
+#define PORTEIO_SET			0xFFC01538	/* Set GPIO Pin	Port E Register */
+#define PORTEIO_TOGGLE			0xFFC01548	/* Toggle GPIO Pin Port	E Register */
+#define PORTEIO_DIR			0xFFC01558	/* GPIO	Pin Port E Direction Register */
+#define PORTEIO_INEN			0xFFC01568	/* GPIO	Pin Port E Input Enable	Register */
+
+/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
+
+#define	DMAC1_TC_PER			0xFFC01B0C	/* DMA Controller 1 Traffic Control Periods Register */
+#define	DMAC1_TC_CNT			0xFFC01B10	/* DMA Controller 1 Traffic Control Current Counts Register */
+
+
+
+/* DMA Controller 1 (0xFFC01C00	- 0xFFC01FFF)							 */
+#define	DMA8_NEXT_DESC_PTR		0xFFC01C00	/* DMA Channel 8 Next Descriptor Pointer Register */
+#define	DMA8_START_ADDR			0xFFC01C04	/* DMA Channel 8 Start Address Register */
+#define	DMA8_CONFIG				0xFFC01C08	/* DMA Channel 8 Configuration Register */
+#define	DMA8_X_COUNT			0xFFC01C10	/* DMA Channel 8 X Count Register */
+#define	DMA8_X_MODIFY			0xFFC01C14	/* DMA Channel 8 X Modify Register */
+#define	DMA8_Y_COUNT			0xFFC01C18	/* DMA Channel 8 Y Count Register */
+#define	DMA8_Y_MODIFY			0xFFC01C1C	/* DMA Channel 8 Y Modify Register */
+#define	DMA8_CURR_DESC_PTR		0xFFC01C20	/* DMA Channel 8 Current Descriptor Pointer Register */
+#define	DMA8_CURR_ADDR			0xFFC01C24	/* DMA Channel 8 Current Address Register */
+#define	DMA8_IRQ_STATUS			0xFFC01C28	/* DMA Channel 8 Interrupt/Status Register */
+#define	DMA8_PERIPHERAL_MAP		0xFFC01C2C	/* DMA Channel 8 Peripheral Map	Register */
+#define	DMA8_CURR_X_COUNT		0xFFC01C30	/* DMA Channel 8 Current X Count Register */
+#define	DMA8_CURR_Y_COUNT		0xFFC01C38	/* DMA Channel 8 Current Y Count Register */
+
+#define	DMA9_NEXT_DESC_PTR		0xFFC01C40	/* DMA Channel 9 Next Descriptor Pointer Register */
+#define	DMA9_START_ADDR			0xFFC01C44	/* DMA Channel 9 Start Address Register */
+#define	DMA9_CONFIG				0xFFC01C48	/* DMA Channel 9 Configuration Register */
+#define	DMA9_X_COUNT			0xFFC01C50	/* DMA Channel 9 X Count Register */
+#define	DMA9_X_MODIFY			0xFFC01C54	/* DMA Channel 9 X Modify Register */
+#define	DMA9_Y_COUNT			0xFFC01C58	/* DMA Channel 9 Y Count Register */
+#define	DMA9_Y_MODIFY			0xFFC01C5C	/* DMA Channel 9 Y Modify Register */
+#define	DMA9_CURR_DESC_PTR		0xFFC01C60	/* DMA Channel 9 Current Descriptor Pointer Register */
+#define	DMA9_CURR_ADDR			0xFFC01C64	/* DMA Channel 9 Current Address Register */
+#define	DMA9_IRQ_STATUS			0xFFC01C68	/* DMA Channel 9 Interrupt/Status Register */
+#define	DMA9_PERIPHERAL_MAP		0xFFC01C6C	/* DMA Channel 9 Peripheral Map	Register */
+#define	DMA9_CURR_X_COUNT		0xFFC01C70	/* DMA Channel 9 Current X Count Register */
+#define	DMA9_CURR_Y_COUNT		0xFFC01C78	/* DMA Channel 9 Current Y Count Register */
+
+#define	DMA10_NEXT_DESC_PTR		0xFFC01C80	/* DMA Channel 10 Next Descriptor Pointer Register */
+#define	DMA10_START_ADDR		0xFFC01C84	/* DMA Channel 10 Start	Address	Register */
+#define	DMA10_CONFIG			0xFFC01C88	/* DMA Channel 10 Configuration	Register */
+#define	DMA10_X_COUNT			0xFFC01C90	/* DMA Channel 10 X Count Register */
+#define	DMA10_X_MODIFY			0xFFC01C94	/* DMA Channel 10 X Modify Register */
+#define	DMA10_Y_COUNT			0xFFC01C98	/* DMA Channel 10 Y Count Register */
+#define	DMA10_Y_MODIFY			0xFFC01C9C	/* DMA Channel 10 Y Modify Register */
+#define	DMA10_CURR_DESC_PTR		0xFFC01CA0	/* DMA Channel 10 Current Descriptor Pointer Register */
+#define	DMA10_CURR_ADDR			0xFFC01CA4	/* DMA Channel 10 Current Address Register */
+#define	DMA10_IRQ_STATUS		0xFFC01CA8	/* DMA Channel 10 Interrupt/Status Register */
+#define	DMA10_PERIPHERAL_MAP	0xFFC01CAC	/* DMA Channel 10 Peripheral Map Register */
+#define	DMA10_CURR_X_COUNT		0xFFC01CB0	/* DMA Channel 10 Current X Count Register */
+#define	DMA10_CURR_Y_COUNT		0xFFC01CB8	/* DMA Channel 10 Current Y Count Register */
+
+#define	DMA11_NEXT_DESC_PTR		0xFFC01CC0	/* DMA Channel 11 Next Descriptor Pointer Register */
+#define	DMA11_START_ADDR		0xFFC01CC4	/* DMA Channel 11 Start	Address	Register */
+#define	DMA11_CONFIG			0xFFC01CC8	/* DMA Channel 11 Configuration	Register */
+#define	DMA11_X_COUNT			0xFFC01CD0	/* DMA Channel 11 X Count Register */
+#define	DMA11_X_MODIFY			0xFFC01CD4	/* DMA Channel 11 X Modify Register */
+#define	DMA11_Y_COUNT			0xFFC01CD8	/* DMA Channel 11 Y Count Register */
+#define	DMA11_Y_MODIFY			0xFFC01CDC	/* DMA Channel 11 Y Modify Register */
+#define	DMA11_CURR_DESC_PTR		0xFFC01CE0	/* DMA Channel 11 Current Descriptor Pointer Register */
+#define	DMA11_CURR_ADDR			0xFFC01CE4	/* DMA Channel 11 Current Address Register */
+#define	DMA11_IRQ_STATUS		0xFFC01CE8	/* DMA Channel 11 Interrupt/Status Register */
+#define	DMA11_PERIPHERAL_MAP	0xFFC01CEC	/* DMA Channel 11 Peripheral Map Register */
+#define	DMA11_CURR_X_COUNT		0xFFC01CF0	/* DMA Channel 11 Current X Count Register */
+#define	DMA11_CURR_Y_COUNT		0xFFC01CF8	/* DMA Channel 11 Current Y Count Register */
+
+#define	DMA12_NEXT_DESC_PTR		0xFFC01D00	/* DMA Channel 12 Next Descriptor Pointer Register */
+#define	DMA12_START_ADDR		0xFFC01D04	/* DMA Channel 12 Start	Address	Register */
+#define	DMA12_CONFIG			0xFFC01D08	/* DMA Channel 12 Configuration	Register */
+#define	DMA12_X_COUNT			0xFFC01D10	/* DMA Channel 12 X Count Register */
+#define	DMA12_X_MODIFY			0xFFC01D14	/* DMA Channel 12 X Modify Register */
+#define	DMA12_Y_COUNT			0xFFC01D18	/* DMA Channel 12 Y Count Register */
+#define	DMA12_Y_MODIFY			0xFFC01D1C	/* DMA Channel 12 Y Modify Register */
+#define	DMA12_CURR_DESC_PTR		0xFFC01D20	/* DMA Channel 12 Current Descriptor Pointer Register */
+#define	DMA12_CURR_ADDR			0xFFC01D24	/* DMA Channel 12 Current Address Register */
+#define	DMA12_IRQ_STATUS		0xFFC01D28	/* DMA Channel 12 Interrupt/Status Register */
+#define	DMA12_PERIPHERAL_MAP	0xFFC01D2C	/* DMA Channel 12 Peripheral Map Register */
+#define	DMA12_CURR_X_COUNT		0xFFC01D30	/* DMA Channel 12 Current X Count Register */
+#define	DMA12_CURR_Y_COUNT		0xFFC01D38	/* DMA Channel 12 Current Y Count Register */
+
+#define	DMA13_NEXT_DESC_PTR		0xFFC01D40	/* DMA Channel 13 Next Descriptor Pointer Register */
+#define	DMA13_START_ADDR		0xFFC01D44	/* DMA Channel 13 Start	Address	Register */
+#define	DMA13_CONFIG			0xFFC01D48	/* DMA Channel 13 Configuration	Register */
+#define	DMA13_X_COUNT			0xFFC01D50	/* DMA Channel 13 X Count Register */
+#define	DMA13_X_MODIFY			0xFFC01D54	/* DMA Channel 13 X Modify Register */
+#define	DMA13_Y_COUNT			0xFFC01D58	/* DMA Channel 13 Y Count Register */
+#define	DMA13_Y_MODIFY			0xFFC01D5C	/* DMA Channel 13 Y Modify Register */
+#define	DMA13_CURR_DESC_PTR		0xFFC01D60	/* DMA Channel 13 Current Descriptor Pointer Register */
+#define	DMA13_CURR_ADDR			0xFFC01D64	/* DMA Channel 13 Current Address Register */
+#define	DMA13_IRQ_STATUS		0xFFC01D68	/* DMA Channel 13 Interrupt/Status Register */
+#define	DMA13_PERIPHERAL_MAP	0xFFC01D6C	/* DMA Channel 13 Peripheral Map Register */
+#define	DMA13_CURR_X_COUNT		0xFFC01D70	/* DMA Channel 13 Current X Count Register */
+#define	DMA13_CURR_Y_COUNT		0xFFC01D78	/* DMA Channel 13 Current Y Count Register */
+
+#define	DMA14_NEXT_DESC_PTR		0xFFC01D80	/* DMA Channel 14 Next Descriptor Pointer Register */
+#define	DMA14_START_ADDR		0xFFC01D84	/* DMA Channel 14 Start	Address	Register */
+#define	DMA14_CONFIG			0xFFC01D88	/* DMA Channel 14 Configuration	Register */
+#define	DMA14_X_COUNT			0xFFC01D90	/* DMA Channel 14 X Count Register */
+#define	DMA14_X_MODIFY			0xFFC01D94	/* DMA Channel 14 X Modify Register */
+#define	DMA14_Y_COUNT			0xFFC01D98	/* DMA Channel 14 Y Count Register */
+#define	DMA14_Y_MODIFY			0xFFC01D9C	/* DMA Channel 14 Y Modify Register */
+#define	DMA14_CURR_DESC_PTR		0xFFC01DA0	/* DMA Channel 14 Current Descriptor Pointer Register */
+#define	DMA14_CURR_ADDR			0xFFC01DA4	/* DMA Channel 14 Current Address Register */
+#define	DMA14_IRQ_STATUS		0xFFC01DA8	/* DMA Channel 14 Interrupt/Status Register */
+#define	DMA14_PERIPHERAL_MAP	0xFFC01DAC	/* DMA Channel 14 Peripheral Map Register */
+#define	DMA14_CURR_X_COUNT		0xFFC01DB0	/* DMA Channel 14 Current X Count Register */
+#define	DMA14_CURR_Y_COUNT		0xFFC01DB8	/* DMA Channel 14 Current Y Count Register */
+
+#define	DMA15_NEXT_DESC_PTR		0xFFC01DC0	/* DMA Channel 15 Next Descriptor Pointer Register */
+#define	DMA15_START_ADDR		0xFFC01DC4	/* DMA Channel 15 Start	Address	Register */
+#define	DMA15_CONFIG			0xFFC01DC8	/* DMA Channel 15 Configuration	Register */
+#define	DMA15_X_COUNT			0xFFC01DD0	/* DMA Channel 15 X Count Register */
+#define	DMA15_X_MODIFY			0xFFC01DD4	/* DMA Channel 15 X Modify Register */
+#define	DMA15_Y_COUNT			0xFFC01DD8	/* DMA Channel 15 Y Count Register */
+#define	DMA15_Y_MODIFY			0xFFC01DDC	/* DMA Channel 15 Y Modify Register */
+#define	DMA15_CURR_DESC_PTR		0xFFC01DE0	/* DMA Channel 15 Current Descriptor Pointer Register */
+#define	DMA15_CURR_ADDR			0xFFC01DE4	/* DMA Channel 15 Current Address Register */
+#define	DMA15_IRQ_STATUS		0xFFC01DE8	/* DMA Channel 15 Interrupt/Status Register */
+#define	DMA15_PERIPHERAL_MAP	0xFFC01DEC	/* DMA Channel 15 Peripheral Map Register */
+#define	DMA15_CURR_X_COUNT		0xFFC01DF0	/* DMA Channel 15 Current X Count Register */
+#define	DMA15_CURR_Y_COUNT		0xFFC01DF8	/* DMA Channel 15 Current Y Count Register */
+
+#define	DMA16_NEXT_DESC_PTR		0xFFC01E00	/* DMA Channel 16 Next Descriptor Pointer Register */
+#define	DMA16_START_ADDR		0xFFC01E04	/* DMA Channel 16 Start	Address	Register */
+#define	DMA16_CONFIG			0xFFC01E08	/* DMA Channel 16 Configuration	Register */
+#define	DMA16_X_COUNT			0xFFC01E10	/* DMA Channel 16 X Count Register */
+#define	DMA16_X_MODIFY			0xFFC01E14	/* DMA Channel 16 X Modify Register */
+#define	DMA16_Y_COUNT			0xFFC01E18	/* DMA Channel 16 Y Count Register */
+#define	DMA16_Y_MODIFY			0xFFC01E1C	/* DMA Channel 16 Y Modify Register */
+#define	DMA16_CURR_DESC_PTR		0xFFC01E20	/* DMA Channel 16 Current Descriptor Pointer Register */
+#define	DMA16_CURR_ADDR			0xFFC01E24	/* DMA Channel 16 Current Address Register */
+#define	DMA16_IRQ_STATUS		0xFFC01E28	/* DMA Channel 16 Interrupt/Status Register */
+#define	DMA16_PERIPHERAL_MAP	0xFFC01E2C	/* DMA Channel 16 Peripheral Map Register */
+#define	DMA16_CURR_X_COUNT		0xFFC01E30	/* DMA Channel 16 Current X Count Register */
+#define	DMA16_CURR_Y_COUNT		0xFFC01E38	/* DMA Channel 16 Current Y Count Register */
+
+#define	DMA17_NEXT_DESC_PTR		0xFFC01E40	/* DMA Channel 17 Next Descriptor Pointer Register */
+#define	DMA17_START_ADDR		0xFFC01E44	/* DMA Channel 17 Start	Address	Register */
+#define	DMA17_CONFIG			0xFFC01E48	/* DMA Channel 17 Configuration	Register */
+#define	DMA17_X_COUNT			0xFFC01E50	/* DMA Channel 17 X Count Register */
+#define	DMA17_X_MODIFY			0xFFC01E54	/* DMA Channel 17 X Modify Register */
+#define	DMA17_Y_COUNT			0xFFC01E58	/* DMA Channel 17 Y Count Register */
+#define	DMA17_Y_MODIFY			0xFFC01E5C	/* DMA Channel 17 Y Modify Register */
+#define	DMA17_CURR_DESC_PTR		0xFFC01E60	/* DMA Channel 17 Current Descriptor Pointer Register */
+#define	DMA17_CURR_ADDR			0xFFC01E64	/* DMA Channel 17 Current Address Register */
+#define	DMA17_IRQ_STATUS		0xFFC01E68	/* DMA Channel 17 Interrupt/Status Register */
+#define	DMA17_PERIPHERAL_MAP	0xFFC01E6C	/* DMA Channel 17 Peripheral Map Register */
+#define	DMA17_CURR_X_COUNT		0xFFC01E70	/* DMA Channel 17 Current X Count Register */
+#define	DMA17_CURR_Y_COUNT		0xFFC01E78	/* DMA Channel 17 Current Y Count Register */
+
+#define	DMA18_NEXT_DESC_PTR		0xFFC01E80	/* DMA Channel 18 Next Descriptor Pointer Register */
+#define	DMA18_START_ADDR		0xFFC01E84	/* DMA Channel 18 Start	Address	Register */
+#define	DMA18_CONFIG			0xFFC01E88	/* DMA Channel 18 Configuration	Register */
+#define	DMA18_X_COUNT			0xFFC01E90	/* DMA Channel 18 X Count Register */
+#define	DMA18_X_MODIFY			0xFFC01E94	/* DMA Channel 18 X Modify Register */
+#define	DMA18_Y_COUNT			0xFFC01E98	/* DMA Channel 18 Y Count Register */
+#define	DMA18_Y_MODIFY			0xFFC01E9C	/* DMA Channel 18 Y Modify Register */
+#define	DMA18_CURR_DESC_PTR		0xFFC01EA0	/* DMA Channel 18 Current Descriptor Pointer Register */
+#define	DMA18_CURR_ADDR			0xFFC01EA4	/* DMA Channel 18 Current Address Register */
+#define	DMA18_IRQ_STATUS		0xFFC01EA8	/* DMA Channel 18 Interrupt/Status Register */
+#define	DMA18_PERIPHERAL_MAP	0xFFC01EAC	/* DMA Channel 18 Peripheral Map Register */
+#define	DMA18_CURR_X_COUNT		0xFFC01EB0	/* DMA Channel 18 Current X Count Register */
+#define	DMA18_CURR_Y_COUNT		0xFFC01EB8	/* DMA Channel 18 Current Y Count Register */
+
+#define	DMA19_NEXT_DESC_PTR		0xFFC01EC0	/* DMA Channel 19 Next Descriptor Pointer Register */
+#define	DMA19_START_ADDR		0xFFC01EC4	/* DMA Channel 19 Start	Address	Register */
+#define	DMA19_CONFIG			0xFFC01EC8	/* DMA Channel 19 Configuration	Register */
+#define	DMA19_X_COUNT			0xFFC01ED0	/* DMA Channel 19 X Count Register */
+#define	DMA19_X_MODIFY			0xFFC01ED4	/* DMA Channel 19 X Modify Register */
+#define	DMA19_Y_COUNT			0xFFC01ED8	/* DMA Channel 19 Y Count Register */
+#define	DMA19_Y_MODIFY			0xFFC01EDC	/* DMA Channel 19 Y Modify Register */
+#define	DMA19_CURR_DESC_PTR		0xFFC01EE0	/* DMA Channel 19 Current Descriptor Pointer Register */
+#define	DMA19_CURR_ADDR			0xFFC01EE4	/* DMA Channel 19 Current Address Register */
+#define	DMA19_IRQ_STATUS		0xFFC01EE8	/* DMA Channel 19 Interrupt/Status Register */
+#define	DMA19_PERIPHERAL_MAP	0xFFC01EEC	/* DMA Channel 19 Peripheral Map Register */
+#define	DMA19_CURR_X_COUNT		0xFFC01EF0	/* DMA Channel 19 Current X Count Register */
+#define	DMA19_CURR_Y_COUNT		0xFFC01EF8	/* DMA Channel 19 Current Y Count Register */
+
+#define	MDMA_D2_NEXT_DESC_PTR	0xFFC01F00	/* MemDMA1 Stream 0 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D2_START_ADDR		0xFFC01F04	/* MemDMA1 Stream 0 Destination	Start Address Register */
+#define	MDMA_D2_CONFIG			0xFFC01F08	/* MemDMA1 Stream 0 Destination	Configuration Register */
+#define	MDMA_D2_X_COUNT		0xFFC01F10	/* MemDMA1 Stream 0 Destination	X Count	Register */
+#define	MDMA_D2_X_MODIFY		0xFFC01F14	/* MemDMA1 Stream 0 Destination	X Modify Register */
+#define	MDMA_D2_Y_COUNT		0xFFC01F18	/* MemDMA1 Stream 0 Destination	Y Count	Register */
+#define	MDMA_D2_Y_MODIFY		0xFFC01F1C	/* MemDMA1 Stream 0 Destination	Y Modify Register */
+#define	MDMA_D2_CURR_DESC_PTR	0xFFC01F20	/* MemDMA1 Stream 0 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D2_CURR_ADDR		0xFFC01F24	/* MemDMA1 Stream 0 Destination	Current	Address	Register */
+#define	MDMA_D2_IRQ_STATUS		0xFFC01F28	/* MemDMA1 Stream 0 Destination	Interrupt/Status Register */
+#define	MDMA_D2_PERIPHERAL_MAP	0xFFC01F2C	/* MemDMA1 Stream 0 Destination	Peripheral Map Register */
+#define	MDMA_D2_CURR_X_COUNT	0xFFC01F30	/* MemDMA1 Stream 0 Destination	Current	X Count	Register */
+#define	MDMA_D2_CURR_Y_COUNT	0xFFC01F38	/* MemDMA1 Stream 0 Destination	Current	Y Count	Register */
+
+#define	MDMA_S2_NEXT_DESC_PTR	0xFFC01F40	/* MemDMA1 Stream 0 Source Next	Descriptor Pointer Register */
+#define	MDMA_S2_START_ADDR		0xFFC01F44	/* MemDMA1 Stream 0 Source Start Address Register */
+#define	MDMA_S2_CONFIG			0xFFC01F48	/* MemDMA1 Stream 0 Source Configuration Register */
+#define	MDMA_S2_X_COUNT		0xFFC01F50	/* MemDMA1 Stream 0 Source X Count Register */
+#define	MDMA_S2_X_MODIFY		0xFFC01F54	/* MemDMA1 Stream 0 Source X Modify Register */
+#define	MDMA_S2_Y_COUNT		0xFFC01F58	/* MemDMA1 Stream 0 Source Y Count Register */
+#define	MDMA_S2_Y_MODIFY		0xFFC01F5C	/* MemDMA1 Stream 0 Source Y Modify Register */
+#define	MDMA_S2_CURR_DESC_PTR	0xFFC01F60	/* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
+#define	MDMA_S2_CURR_ADDR		0xFFC01F64	/* MemDMA1 Stream 0 Source Current Address Register */
+#define	MDMA_S2_IRQ_STATUS		0xFFC01F68	/* MemDMA1 Stream 0 Source Interrupt/Status Register */
+#define	MDMA_S2_PERIPHERAL_MAP	0xFFC01F6C	/* MemDMA1 Stream 0 Source Peripheral Map Register */
+#define	MDMA_S2_CURR_X_COUNT	0xFFC01F70	/* MemDMA1 Stream 0 Source Current X Count Register */
+#define	MDMA_S2_CURR_Y_COUNT	0xFFC01F78	/* MemDMA1 Stream 0 Source Current Y Count Register */
+
+#define	MDMA_D3_NEXT_DESC_PTR	0xFFC01F80	/* MemDMA1 Stream 1 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D3_START_ADDR		0xFFC01F84	/* MemDMA1 Stream 1 Destination	Start Address Register */
+#define	MDMA_D3_CONFIG			0xFFC01F88	/* MemDMA1 Stream 1 Destination	Configuration Register */
+#define	MDMA_D3_X_COUNT		0xFFC01F90	/* MemDMA1 Stream 1 Destination	X Count	Register */
+#define	MDMA_D3_X_MODIFY		0xFFC01F94	/* MemDMA1 Stream 1 Destination	X Modify Register */
+#define	MDMA_D3_Y_COUNT		0xFFC01F98	/* MemDMA1 Stream 1 Destination	Y Count	Register */
+#define	MDMA_D3_Y_MODIFY		0xFFC01F9C	/* MemDMA1 Stream 1 Destination	Y Modify Register */
+#define	MDMA_D3_CURR_DESC_PTR	0xFFC01FA0	/* MemDMA1 Stream 1 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D3_CURR_ADDR		0xFFC01FA4	/* MemDMA1 Stream 1 Destination	Current	Address	Register */
+#define	MDMA_D3_IRQ_STATUS		0xFFC01FA8	/* MemDMA1 Stream 1 Destination	Interrupt/Status Register */
+#define	MDMA_D3_PERIPHERAL_MAP	0xFFC01FAC	/* MemDMA1 Stream 1 Destination	Peripheral Map Register */
+#define	MDMA_D3_CURR_X_COUNT	0xFFC01FB0	/* MemDMA1 Stream 1 Destination	Current	X Count	Register */
+#define	MDMA_D3_CURR_Y_COUNT	0xFFC01FB8	/* MemDMA1 Stream 1 Destination	Current	Y Count	Register */
+
+#define	MDMA_S3_NEXT_DESC_PTR	0xFFC01FC0	/* MemDMA1 Stream 1 Source Next	Descriptor Pointer Register */
+#define	MDMA_S3_START_ADDR		0xFFC01FC4	/* MemDMA1 Stream 1 Source Start Address Register */
+#define	MDMA_S3_CONFIG			0xFFC01FC8	/* MemDMA1 Stream 1 Source Configuration Register */
+#define	MDMA_S3_X_COUNT		0xFFC01FD0	/* MemDMA1 Stream 1 Source X Count Register */
+#define	MDMA_S3_X_MODIFY		0xFFC01FD4	/* MemDMA1 Stream 1 Source X Modify Register */
+#define	MDMA_S3_Y_COUNT		0xFFC01FD8	/* MemDMA1 Stream 1 Source Y Count Register */
+#define	MDMA_S3_Y_MODIFY		0xFFC01FDC	/* MemDMA1 Stream 1 Source Y Modify Register */
+#define	MDMA_S3_CURR_DESC_PTR	0xFFC01FE0	/* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
+#define	MDMA_S3_CURR_ADDR		0xFFC01FE4	/* MemDMA1 Stream 1 Source Current Address Register */
+#define	MDMA_S3_IRQ_STATUS		0xFFC01FE8	/* MemDMA1 Stream 1 Source Interrupt/Status Register */
+#define	MDMA_S3_PERIPHERAL_MAP	0xFFC01FEC	/* MemDMA1 Stream 1 Source Peripheral Map Register */
+#define	MDMA_S3_CURR_X_COUNT	0xFFC01FF0	/* MemDMA1 Stream 1 Source Current X Count Register */
+#define	MDMA_S3_CURR_Y_COUNT	0xFFC01FF8	/* MemDMA1 Stream 1 Source Current Y Count Register */
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)	 */
+#define	UART1_THR			0xFFC02000	/* Transmit Holding register */
+#define	UART1_RBR			0xFFC02000	/* Receive Buffer register */
+#define	UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte) */
+#define	UART1_IER			0xFFC02004	/* Interrupt Enable Register */
+#define	UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte) */
+#define	UART1_IIR			0xFFC02008	/* Interrupt Identification Register */
+#define	UART1_LCR			0xFFC0200C	/* Line	Control	Register */
+#define	UART1_MCR			0xFFC02010	/* Modem Control Register */
+#define	UART1_LSR			0xFFC02014	/* Line	Status Register */
+#define	UART1_SCR			0xFFC0201C	/* SCR Scratch Register */
+#define	UART1_GCTL			0xFFC02024	/* Global Control Register */
+
+
+/* UART2 Controller		(0xFFC02100 - 0xFFC021FF)	 */
+#define	UART2_THR			0xFFC02100	/* Transmit Holding register */
+#define	UART2_RBR			0xFFC02100	/* Receive Buffer register */
+#define	UART2_DLL			0xFFC02100	/* Divisor Latch (Low-Byte) */
+#define	UART2_IER			0xFFC02104	/* Interrupt Enable Register */
+#define	UART2_DLH			0xFFC02104	/* Divisor Latch (High-Byte) */
+#define	UART2_IIR			0xFFC02108	/* Interrupt Identification Register */
+#define	UART2_LCR			0xFFC0210C	/* Line	Control	Register */
+#define	UART2_MCR			0xFFC02110	/* Modem Control Register */
+#define	UART2_LSR			0xFFC02114	/* Line	Status Register */
+#define	UART2_SCR			0xFFC0211C	/* SCR Scratch Register */
+#define	UART2_GCTL			0xFFC02124	/* Global Control Register */
+
+
+/* Two-Wire Interface 1	(0xFFC02200 - 0xFFC022FF)			 */
+#define	TWI1_CLKDIV			0xFFC02200	/* Serial Clock	Divider	Register */
+#define	TWI1_CONTROL		0xFFC02204	/* TWI1	Master Internal	Time Reference Register */
+#define	TWI1_SLAVE_CTL		0xFFC02208	/* Slave Mode Control Register */
+#define	TWI1_SLAVE_STAT		0xFFC0220C	/* Slave Mode Status Register */
+#define	TWI1_SLAVE_ADDR		0xFFC02210	/* Slave Mode Address Register */
+#define	TWI1_MASTER_CTL	0xFFC02214	/* Master Mode Control Register */
+#define	TWI1_MASTER_STAT	0xFFC02218	/* Master Mode Status Register */
+#define	TWI1_MASTER_ADDR	0xFFC0221C	/* Master Mode Address Register */
+#define	TWI1_INT_STAT		0xFFC02220	/* TWI1	Master Interrupt Register */
+#define	TWI1_INT_MASK		0xFFC02224	/* TWI1	Master Interrupt Mask Register */
+#define	TWI1_FIFO_CTL		0xFFC02228	/* FIFO	Control	Register */
+#define	TWI1_FIFO_STAT		0xFFC0222C	/* FIFO	Status Register */
+#define	TWI1_XMT_DATA8		0xFFC02280	/* FIFO	Transmit Data Single Byte Register */
+#define	TWI1_XMT_DATA16		0xFFC02284	/* FIFO	Transmit Data Double Byte Register */
+#define	TWI1_RCV_DATA8		0xFFC02288	/* FIFO	Receive	Data Single Byte Register */
+#define	TWI1_RCV_DATA16		0xFFC0228C	/* FIFO	Receive	Data Double Byte Register */
+#define TWI1_REGBASE		TWI1_CLKDIV
+
+
+/* the following are for backwards compatibility */
+#define	TWI1_PRESCALE	  TWI1_CONTROL
+#define	TWI1_INT_SRC	  TWI1_INT_STAT
+#define	TWI1_INT_ENABLE	  TWI1_INT_MASK
+
+
+/* SPI1	Controller		(0xFFC02300 - 0xFFC023FF)	 */
+#define	SPI1_CTL			0xFFC02300  /* SPI1 Control Register */
+#define	SPI1_FLG			0xFFC02304  /* SPI1 Flag register */
+#define	SPI1_STAT			0xFFC02308  /* SPI1 Status register */
+#define	SPI1_TDBR			0xFFC0230C  /* SPI1 Transmit Data Buffer Register */
+#define	SPI1_RDBR			0xFFC02310  /* SPI1 Receive Data Buffer	Register */
+#define	SPI1_BAUD			0xFFC02314  /* SPI1 Baud rate Register */
+#define	SPI1_SHADOW			0xFFC02318  /* SPI1_RDBR Shadow	Register */
+#define SPI1_REGBASE			SPI1_CTL
+
+/* SPI2	Controller		(0xFFC02400 - 0xFFC024FF)	 */
+#define	SPI2_CTL			0xFFC02400  /* SPI2 Control Register */
+#define	SPI2_FLG			0xFFC02404  /* SPI2 Flag register */
+#define	SPI2_STAT			0xFFC02408  /* SPI2 Status register */
+#define	SPI2_TDBR			0xFFC0240C  /* SPI2 Transmit Data Buffer Register */
+#define	SPI2_RDBR			0xFFC02410  /* SPI2 Receive Data Buffer	Register */
+#define	SPI2_BAUD			0xFFC02414  /* SPI2 Baud rate Register */
+#define	SPI2_SHADOW			0xFFC02418  /* SPI2_RDBR Shadow	Register */
+#define SPI2_REGBASE			SPI2_CTL
+
+/* SPORT2 Controller		(0xFFC02500 - 0xFFC025FF)			 */
+#define	SPORT2_TCR1			0xFFC02500	/* SPORT2 Transmit Configuration 1 Register */
+#define	SPORT2_TCR2			0xFFC02504	/* SPORT2 Transmit Configuration 2 Register */
+#define	SPORT2_TCLKDIV		0xFFC02508	/* SPORT2 Transmit Clock Divider */
+#define	SPORT2_TFSDIV		0xFFC0250C	/* SPORT2 Transmit Frame Sync Divider */
+#define	SPORT2_TX			0xFFC02510	/* SPORT2 TX Data Register */
+#define	SPORT2_RX			0xFFC02518	/* SPORT2 RX Data Register */
+#define	SPORT2_RCR1			0xFFC02520	/* SPORT2 Transmit Configuration 1 Register */
+#define	SPORT2_RCR2			0xFFC02524	/* SPORT2 Transmit Configuration 2 Register */
+#define	SPORT2_RCLKDIV		0xFFC02528	/* SPORT2 Receive Clock	Divider */
+#define	SPORT2_RFSDIV		0xFFC0252C	/* SPORT2 Receive Frame	Sync Divider */
+#define	SPORT2_STAT			0xFFC02530	/* SPORT2 Status Register */
+#define	SPORT2_CHNL			0xFFC02534	/* SPORT2 Current Channel Register */
+#define	SPORT2_MCMC1		0xFFC02538	/* SPORT2 Multi-Channel	Configuration Register 1 */
+#define	SPORT2_MCMC2		0xFFC0253C	/* SPORT2 Multi-Channel	Configuration Register 2 */
+#define	SPORT2_MTCS0		0xFFC02540	/* SPORT2 Multi-Channel	Transmit Select	Register 0 */
+#define	SPORT2_MTCS1		0xFFC02544	/* SPORT2 Multi-Channel	Transmit Select	Register 1 */
+#define	SPORT2_MTCS2		0xFFC02548	/* SPORT2 Multi-Channel	Transmit Select	Register 2 */
+#define	SPORT2_MTCS3		0xFFC0254C	/* SPORT2 Multi-Channel	Transmit Select	Register 3 */
+#define	SPORT2_MRCS0		0xFFC02550	/* SPORT2 Multi-Channel	Receive	Select Register	0 */
+#define	SPORT2_MRCS1		0xFFC02554	/* SPORT2 Multi-Channel	Receive	Select Register	1 */
+#define	SPORT2_MRCS2		0xFFC02558	/* SPORT2 Multi-Channel	Receive	Select Register	2 */
+#define	SPORT2_MRCS3		0xFFC0255C	/* SPORT2 Multi-Channel	Receive	Select Register	3 */
+
+
+/* SPORT3 Controller		(0xFFC02600 - 0xFFC026FF)			 */
+#define	SPORT3_TCR1			0xFFC02600	/* SPORT3 Transmit Configuration 1 Register */
+#define	SPORT3_TCR2			0xFFC02604	/* SPORT3 Transmit Configuration 2 Register */
+#define	SPORT3_TCLKDIV		0xFFC02608	/* SPORT3 Transmit Clock Divider */
+#define	SPORT3_TFSDIV		0xFFC0260C	/* SPORT3 Transmit Frame Sync Divider */
+#define	SPORT3_TX			0xFFC02610	/* SPORT3 TX Data Register */
+#define	SPORT3_RX			0xFFC02618	/* SPORT3 RX Data Register */
+#define	SPORT3_RCR1			0xFFC02620	/* SPORT3 Transmit Configuration 1 Register */
+#define	SPORT3_RCR2			0xFFC02624	/* SPORT3 Transmit Configuration 2 Register */
+#define	SPORT3_RCLKDIV		0xFFC02628	/* SPORT3 Receive Clock	Divider */
+#define	SPORT3_RFSDIV		0xFFC0262C	/* SPORT3 Receive Frame	Sync Divider */
+#define	SPORT3_STAT			0xFFC02630	/* SPORT3 Status Register */
+#define	SPORT3_CHNL			0xFFC02634	/* SPORT3 Current Channel Register */
+#define	SPORT3_MCMC1		0xFFC02638	/* SPORT3 Multi-Channel	Configuration Register 1 */
+#define	SPORT3_MCMC2		0xFFC0263C	/* SPORT3 Multi-Channel	Configuration Register 2 */
+#define	SPORT3_MTCS0		0xFFC02640	/* SPORT3 Multi-Channel	Transmit Select	Register 0 */
+#define	SPORT3_MTCS1		0xFFC02644	/* SPORT3 Multi-Channel	Transmit Select	Register 1 */
+#define	SPORT3_MTCS2		0xFFC02648	/* SPORT3 Multi-Channel	Transmit Select	Register 2 */
+#define	SPORT3_MTCS3		0xFFC0264C	/* SPORT3 Multi-Channel	Transmit Select	Register 3 */
+#define	SPORT3_MRCS0		0xFFC02650	/* SPORT3 Multi-Channel	Receive	Select Register	0 */
+#define	SPORT3_MRCS1		0xFFC02654	/* SPORT3 Multi-Channel	Receive	Select Register	1 */
+#define	SPORT3_MRCS2		0xFFC02658	/* SPORT3 Multi-Channel	Receive	Select Register	2 */
+#define	SPORT3_MRCS3		0xFFC0265C	/* SPORT3 Multi-Channel	Receive	Select Register	3 */
+
+
+/* CAN Controller		(0xFFC02A00 - 0xFFC02FFF)				 */
+/* For Mailboxes 0-15											 */
+#define	CAN_MC1				0xFFC02A00	/* Mailbox config reg 1	 */
+#define	CAN_MD1				0xFFC02A04	/* Mailbox direction reg 1 */
+#define	CAN_TRS1			0xFFC02A08	/* Transmit Request Set	reg 1 */
+#define	CAN_TRR1			0xFFC02A0C	/* Transmit Request Reset reg 1 */
+#define	CAN_TA1				0xFFC02A10	/* Transmit Acknowledge	reg 1 */
+#define	CAN_AA1				0xFFC02A14	/* Transmit Abort Acknowledge reg 1 */
+#define	CAN_RMP1			0xFFC02A18	/* Receive Message Pending reg 1 */
+#define	CAN_RML1			0xFFC02A1C	/* Receive Message Lost	reg 1 */
+#define	CAN_MBTIF1			0xFFC02A20	/* Mailbox Transmit Interrupt Flag reg 1 */
+#define	CAN_MBRIF1			0xFFC02A24	/* Mailbox Receive  Interrupt Flag reg 1 */
+#define	CAN_MBIM1			0xFFC02A28	/* Mailbox Interrupt Mask reg 1 */
+#define	CAN_RFH1			0xFFC02A2C	/* Remote Frame	Handling reg 1 */
+#define	CAN_OPSS1			0xFFC02A30	/* Overwrite Protection	Single Shot Xmission reg 1 */
+
+/* For Mailboxes 16-31											 */
+#define	CAN_MC2				0xFFC02A40	/* Mailbox config reg 2	 */
+#define	CAN_MD2				0xFFC02A44	/* Mailbox direction reg 2 */
+#define	CAN_TRS2			0xFFC02A48	/* Transmit Request Set	reg 2 */
+#define	CAN_TRR2			0xFFC02A4C	/* Transmit Request Reset reg 2 */
+#define	CAN_TA2				0xFFC02A50	/* Transmit Acknowledge	reg 2 */
+#define	CAN_AA2				0xFFC02A54	/* Transmit Abort Acknowledge reg 2 */
+#define	CAN_RMP2			0xFFC02A58	/* Receive Message Pending reg 2 */
+#define	CAN_RML2			0xFFC02A5C	/* Receive Message Lost	reg 2 */
+#define	CAN_MBTIF2			0xFFC02A60	/* Mailbox Transmit Interrupt Flag reg 2 */
+#define	CAN_MBRIF2			0xFFC02A64	/* Mailbox Receive  Interrupt Flag reg 2 */
+#define	CAN_MBIM2			0xFFC02A68	/* Mailbox Interrupt Mask reg 2 */
+#define	CAN_RFH2			0xFFC02A6C	/* Remote Frame	Handling reg 2 */
+#define	CAN_OPSS2			0xFFC02A70	/* Overwrite Protection	Single Shot Xmission reg 2 */
+
+#define	CAN_CLOCK			0xFFC02A80	/* Bit Timing Configuration register 0 */
+#define	CAN_TIMING			0xFFC02A84	/* Bit Timing Configuration register 1 */
+
+#define	CAN_DEBUG			0xFFC02A88	/* Debug Register		 */
+/* the following is for	backwards compatibility */
+#define	CAN_CNF		 CAN_DEBUG
+
+#define	CAN_STATUS			0xFFC02A8C	/* Global Status Register */
+#define	CAN_CEC				0xFFC02A90	/* Error Counter Register */
+#define	CAN_GIS				0xFFC02A94	/* Global Interrupt Status Register */
+#define	CAN_GIM				0xFFC02A98	/* Global Interrupt Mask Register */
+#define	CAN_GIF				0xFFC02A9C	/* Global Interrupt Flag Register */
+#define	CAN_CONTROL			0xFFC02AA0	/* Master Control Register */
+#define	CAN_INTR			0xFFC02AA4	/* Interrupt Pending Register */
+#define	CAN_MBTD			0xFFC02AAC	/* Mailbox Temporary Disable Feature */
+#define	CAN_EWR				0xFFC02AB0	/* Programmable	Warning	Level */
+#define	CAN_ESR				0xFFC02AB4	/* Error Status	Register */
+#define	CAN_UCCNT			0xFFC02AC4	/* Universal Counter	 */
+#define	CAN_UCRC			0xFFC02AC8	/* Universal Counter Reload/Capture Register */
+#define	CAN_UCCNF			0xFFC02ACC	/* Universal Counter Configuration Register */
+
+/* Mailbox Acceptance Masks					 */
+#define	CAN_AM00L			0xFFC02B00	/* Mailbox 0 Low Acceptance Mask */
+#define	CAN_AM00H			0xFFC02B04	/* Mailbox 0 High Acceptance Mask */
+#define	CAN_AM01L			0xFFC02B08	/* Mailbox 1 Low Acceptance Mask */
+#define	CAN_AM01H			0xFFC02B0C	/* Mailbox 1 High Acceptance Mask */
+#define	CAN_AM02L			0xFFC02B10	/* Mailbox 2 Low Acceptance Mask */
+#define	CAN_AM02H			0xFFC02B14	/* Mailbox 2 High Acceptance Mask */
+#define	CAN_AM03L			0xFFC02B18	/* Mailbox 3 Low Acceptance Mask */
+#define	CAN_AM03H			0xFFC02B1C	/* Mailbox 3 High Acceptance Mask */
+#define	CAN_AM04L			0xFFC02B20	/* Mailbox 4 Low Acceptance Mask */
+#define	CAN_AM04H			0xFFC02B24	/* Mailbox 4 High Acceptance Mask */
+#define	CAN_AM05L			0xFFC02B28	/* Mailbox 5 Low Acceptance Mask */
+#define	CAN_AM05H			0xFFC02B2C	/* Mailbox 5 High Acceptance Mask */
+#define	CAN_AM06L			0xFFC02B30	/* Mailbox 6 Low Acceptance Mask */
+#define	CAN_AM06H			0xFFC02B34	/* Mailbox 6 High Acceptance Mask */
+#define	CAN_AM07L			0xFFC02B38	/* Mailbox 7 Low Acceptance Mask */
+#define	CAN_AM07H			0xFFC02B3C	/* Mailbox 7 High Acceptance Mask */
+#define	CAN_AM08L			0xFFC02B40	/* Mailbox 8 Low Acceptance Mask */
+#define	CAN_AM08H			0xFFC02B44	/* Mailbox 8 High Acceptance Mask */
+#define	CAN_AM09L			0xFFC02B48	/* Mailbox 9 Low Acceptance Mask */
+#define	CAN_AM09H			0xFFC02B4C	/* Mailbox 9 High Acceptance Mask */
+#define	CAN_AM10L			0xFFC02B50	/* Mailbox 10 Low Acceptance Mask */
+#define	CAN_AM10H			0xFFC02B54	/* Mailbox 10 High Acceptance Mask */
+#define	CAN_AM11L			0xFFC02B58	/* Mailbox 11 Low Acceptance Mask */
+#define	CAN_AM11H			0xFFC02B5C	/* Mailbox 11 High Acceptance Mask */
+#define	CAN_AM12L			0xFFC02B60	/* Mailbox 12 Low Acceptance Mask */
+#define	CAN_AM12H			0xFFC02B64	/* Mailbox 12 High Acceptance Mask */
+#define	CAN_AM13L			0xFFC02B68	/* Mailbox 13 Low Acceptance Mask */
+#define	CAN_AM13H			0xFFC02B6C	/* Mailbox 13 High Acceptance Mask */
+#define	CAN_AM14L			0xFFC02B70	/* Mailbox 14 Low Acceptance Mask */
+#define	CAN_AM14H			0xFFC02B74	/* Mailbox 14 High Acceptance Mask */
+#define	CAN_AM15L			0xFFC02B78	/* Mailbox 15 Low Acceptance Mask */
+#define	CAN_AM15H			0xFFC02B7C	/* Mailbox 15 High Acceptance Mask */
+
+#define	CAN_AM16L			0xFFC02B80	/* Mailbox 16 Low Acceptance Mask */
+#define	CAN_AM16H			0xFFC02B84	/* Mailbox 16 High Acceptance Mask */
+#define	CAN_AM17L			0xFFC02B88	/* Mailbox 17 Low Acceptance Mask */
+#define	CAN_AM17H			0xFFC02B8C	/* Mailbox 17 High Acceptance Mask */
+#define	CAN_AM18L			0xFFC02B90	/* Mailbox 18 Low Acceptance Mask */
+#define	CAN_AM18H			0xFFC02B94	/* Mailbox 18 High Acceptance Mask */
+#define	CAN_AM19L			0xFFC02B98	/* Mailbox 19 Low Acceptance Mask */
+#define	CAN_AM19H			0xFFC02B9C	/* Mailbox 19 High Acceptance Mask */
+#define	CAN_AM20L			0xFFC02BA0	/* Mailbox 20 Low Acceptance Mask */
+#define	CAN_AM20H			0xFFC02BA4	/* Mailbox 20 High Acceptance Mask */
+#define	CAN_AM21L			0xFFC02BA8	/* Mailbox 21 Low Acceptance Mask */
+#define	CAN_AM21H			0xFFC02BAC	/* Mailbox 21 High Acceptance Mask */
+#define	CAN_AM22L			0xFFC02BB0	/* Mailbox 22 Low Acceptance Mask */
+#define	CAN_AM22H			0xFFC02BB4	/* Mailbox 22 High Acceptance Mask */
+#define	CAN_AM23L			0xFFC02BB8	/* Mailbox 23 Low Acceptance Mask */
+#define	CAN_AM23H			0xFFC02BBC	/* Mailbox 23 High Acceptance Mask */
+#define	CAN_AM24L			0xFFC02BC0	/* Mailbox 24 Low Acceptance Mask */
+#define	CAN_AM24H			0xFFC02BC4	/* Mailbox 24 High Acceptance Mask */
+#define	CAN_AM25L			0xFFC02BC8	/* Mailbox 25 Low Acceptance Mask */
+#define	CAN_AM25H			0xFFC02BCC	/* Mailbox 25 High Acceptance Mask */
+#define	CAN_AM26L			0xFFC02BD0	/* Mailbox 26 Low Acceptance Mask */
+#define	CAN_AM26H			0xFFC02BD4	/* Mailbox 26 High Acceptance Mask */
+#define	CAN_AM27L			0xFFC02BD8	/* Mailbox 27 Low Acceptance Mask */
+#define	CAN_AM27H			0xFFC02BDC	/* Mailbox 27 High Acceptance Mask */
+#define	CAN_AM28L			0xFFC02BE0	/* Mailbox 28 Low Acceptance Mask */
+#define	CAN_AM28H			0xFFC02BE4	/* Mailbox 28 High Acceptance Mask */
+#define	CAN_AM29L			0xFFC02BE8	/* Mailbox 29 Low Acceptance Mask */
+#define	CAN_AM29H			0xFFC02BEC	/* Mailbox 29 High Acceptance Mask */
+#define	CAN_AM30L			0xFFC02BF0	/* Mailbox 30 Low Acceptance Mask */
+#define	CAN_AM30H			0xFFC02BF4	/* Mailbox 30 High Acceptance Mask */
+#define	CAN_AM31L			0xFFC02BF8	/* Mailbox 31 Low Acceptance Mask */
+#define	CAN_AM31H			0xFFC02BFC	/* Mailbox 31 High Acceptance Mask */
+
+/* CAN Acceptance Mask Macros */
+#define	CAN_AM_L(x)			(CAN_AM00L+((x)*0x8))
+#define	CAN_AM_H(x)			(CAN_AM00H+((x)*0x8))
+
+/* Mailbox Registers									 */
+#define	CAN_MB00_DATA0		0xFFC02C00	/* Mailbox 0 Data Word 0 [15:0]	Register */
+#define	CAN_MB00_DATA1		0xFFC02C04	/* Mailbox 0 Data Word 1 [31:16] Register */
+#define	CAN_MB00_DATA2		0xFFC02C08	/* Mailbox 0 Data Word 2 [47:32] Register */
+#define	CAN_MB00_DATA3		0xFFC02C0C	/* Mailbox 0 Data Word 3 [63:48] Register */
+#define	CAN_MB00_LENGTH		0xFFC02C10	/* Mailbox 0 Data Length Code Register */
+#define	CAN_MB00_TIMESTAMP	0xFFC02C14	/* Mailbox 0 Time Stamp	Value Register */
+#define	CAN_MB00_ID0		0xFFC02C18	/* Mailbox 0 Identifier	Low Register */
+#define	CAN_MB00_ID1		0xFFC02C1C	/* Mailbox 0 Identifier	High Register */
+
+#define	CAN_MB01_DATA0		0xFFC02C20	/* Mailbox 1 Data Word 0 [15:0]	Register */
+#define	CAN_MB01_DATA1		0xFFC02C24	/* Mailbox 1 Data Word 1 [31:16] Register */
+#define	CAN_MB01_DATA2		0xFFC02C28	/* Mailbox 1 Data Word 2 [47:32] Register */
+#define	CAN_MB01_DATA3		0xFFC02C2C	/* Mailbox 1 Data Word 3 [63:48] Register */
+#define	CAN_MB01_LENGTH		0xFFC02C30	/* Mailbox 1 Data Length Code Register */
+#define	CAN_MB01_TIMESTAMP	0xFFC02C34	/* Mailbox 1 Time Stamp	Value Register */
+#define	CAN_MB01_ID0		0xFFC02C38	/* Mailbox 1 Identifier	Low Register */
+#define	CAN_MB01_ID1		0xFFC02C3C	/* Mailbox 1 Identifier	High Register */
+
+#define	CAN_MB02_DATA0		0xFFC02C40	/* Mailbox 2 Data Word 0 [15:0]	Register */
+#define	CAN_MB02_DATA1		0xFFC02C44	/* Mailbox 2 Data Word 1 [31:16] Register */
+#define	CAN_MB02_DATA2		0xFFC02C48	/* Mailbox 2 Data Word 2 [47:32] Register */
+#define	CAN_MB02_DATA3		0xFFC02C4C	/* Mailbox 2 Data Word 3 [63:48] Register */
+#define	CAN_MB02_LENGTH		0xFFC02C50	/* Mailbox 2 Data Length Code Register */
+#define	CAN_MB02_TIMESTAMP	0xFFC02C54	/* Mailbox 2 Time Stamp	Value Register */
+#define	CAN_MB02_ID0		0xFFC02C58	/* Mailbox 2 Identifier	Low Register */
+#define	CAN_MB02_ID1		0xFFC02C5C	/* Mailbox 2 Identifier	High Register */
+
+#define	CAN_MB03_DATA0		0xFFC02C60	/* Mailbox 3 Data Word 0 [15:0]	Register */
+#define	CAN_MB03_DATA1		0xFFC02C64	/* Mailbox 3 Data Word 1 [31:16] Register */
+#define	CAN_MB03_DATA2		0xFFC02C68	/* Mailbox 3 Data Word 2 [47:32] Register */
+#define	CAN_MB03_DATA3		0xFFC02C6C	/* Mailbox 3 Data Word 3 [63:48] Register */
+#define	CAN_MB03_LENGTH		0xFFC02C70	/* Mailbox 3 Data Length Code Register */
+#define	CAN_MB03_TIMESTAMP	0xFFC02C74	/* Mailbox 3 Time Stamp	Value Register */
+#define	CAN_MB03_ID0		0xFFC02C78	/* Mailbox 3 Identifier	Low Register */
+#define	CAN_MB03_ID1		0xFFC02C7C	/* Mailbox 3 Identifier	High Register */
+
+#define	CAN_MB04_DATA0		0xFFC02C80	/* Mailbox 4 Data Word 0 [15:0]	Register */
+#define	CAN_MB04_DATA1		0xFFC02C84	/* Mailbox 4 Data Word 1 [31:16] Register */
+#define	CAN_MB04_DATA2		0xFFC02C88	/* Mailbox 4 Data Word 2 [47:32] Register */
+#define	CAN_MB04_DATA3		0xFFC02C8C	/* Mailbox 4 Data Word 3 [63:48] Register */
+#define	CAN_MB04_LENGTH		0xFFC02C90	/* Mailbox 4 Data Length Code Register */
+#define	CAN_MB04_TIMESTAMP	0xFFC02C94	/* Mailbox 4 Time Stamp	Value Register */
+#define	CAN_MB04_ID0		0xFFC02C98	/* Mailbox 4 Identifier	Low Register */
+#define	CAN_MB04_ID1		0xFFC02C9C	/* Mailbox 4 Identifier	High Register */
+
+#define	CAN_MB05_DATA0		0xFFC02CA0	/* Mailbox 5 Data Word 0 [15:0]	Register */
+#define	CAN_MB05_DATA1		0xFFC02CA4	/* Mailbox 5 Data Word 1 [31:16] Register */
+#define	CAN_MB05_DATA2		0xFFC02CA8	/* Mailbox 5 Data Word 2 [47:32] Register */
+#define	CAN_MB05_DATA3		0xFFC02CAC	/* Mailbox 5 Data Word 3 [63:48] Register */
+#define	CAN_MB05_LENGTH		0xFFC02CB0	/* Mailbox 5 Data Length Code Register */
+#define	CAN_MB05_TIMESTAMP	0xFFC02CB4	/* Mailbox 5 Time Stamp	Value Register */
+#define	CAN_MB05_ID0		0xFFC02CB8	/* Mailbox 5 Identifier	Low Register */
+#define	CAN_MB05_ID1		0xFFC02CBC	/* Mailbox 5 Identifier	High Register */
+
+#define	CAN_MB06_DATA0		0xFFC02CC0	/* Mailbox 6 Data Word 0 [15:0]	Register */
+#define	CAN_MB06_DATA1		0xFFC02CC4	/* Mailbox 6 Data Word 1 [31:16] Register */
+#define	CAN_MB06_DATA2		0xFFC02CC8	/* Mailbox 6 Data Word 2 [47:32] Register */
+#define	CAN_MB06_DATA3		0xFFC02CCC	/* Mailbox 6 Data Word 3 [63:48] Register */
+#define	CAN_MB06_LENGTH		0xFFC02CD0	/* Mailbox 6 Data Length Code Register */
+#define	CAN_MB06_TIMESTAMP	0xFFC02CD4	/* Mailbox 6 Time Stamp	Value Register */
+#define	CAN_MB06_ID0		0xFFC02CD8	/* Mailbox 6 Identifier	Low Register */
+#define	CAN_MB06_ID1		0xFFC02CDC	/* Mailbox 6 Identifier	High Register */
+
+#define	CAN_MB07_DATA0		0xFFC02CE0	/* Mailbox 7 Data Word 0 [15:0]	Register */
+#define	CAN_MB07_DATA1		0xFFC02CE4	/* Mailbox 7 Data Word 1 [31:16] Register */
+#define	CAN_MB07_DATA2		0xFFC02CE8	/* Mailbox 7 Data Word 2 [47:32] Register */
+#define	CAN_MB07_DATA3		0xFFC02CEC	/* Mailbox 7 Data Word 3 [63:48] Register */
+#define	CAN_MB07_LENGTH		0xFFC02CF0	/* Mailbox 7 Data Length Code Register */
+#define	CAN_MB07_TIMESTAMP	0xFFC02CF4	/* Mailbox 7 Time Stamp	Value Register */
+#define	CAN_MB07_ID0		0xFFC02CF8	/* Mailbox 7 Identifier	Low Register */
+#define	CAN_MB07_ID1		0xFFC02CFC	/* Mailbox 7 Identifier	High Register */
+
+#define	CAN_MB08_DATA0		0xFFC02D00	/* Mailbox 8 Data Word 0 [15:0]	Register */
+#define	CAN_MB08_DATA1		0xFFC02D04	/* Mailbox 8 Data Word 1 [31:16] Register */
+#define	CAN_MB08_DATA2		0xFFC02D08	/* Mailbox 8 Data Word 2 [47:32] Register */
+#define	CAN_MB08_DATA3		0xFFC02D0C	/* Mailbox 8 Data Word 3 [63:48] Register */
+#define	CAN_MB08_LENGTH		0xFFC02D10	/* Mailbox 8 Data Length Code Register */
+#define	CAN_MB08_TIMESTAMP	0xFFC02D14	/* Mailbox 8 Time Stamp	Value Register */
+#define	CAN_MB08_ID0		0xFFC02D18	/* Mailbox 8 Identifier	Low Register */
+#define	CAN_MB08_ID1		0xFFC02D1C	/* Mailbox 8 Identifier	High Register */
+
+#define	CAN_MB09_DATA0		0xFFC02D20	/* Mailbox 9 Data Word 0 [15:0]	Register */
+#define	CAN_MB09_DATA1		0xFFC02D24	/* Mailbox 9 Data Word 1 [31:16] Register */
+#define	CAN_MB09_DATA2		0xFFC02D28	/* Mailbox 9 Data Word 2 [47:32] Register */
+#define	CAN_MB09_DATA3		0xFFC02D2C	/* Mailbox 9 Data Word 3 [63:48] Register */
+#define	CAN_MB09_LENGTH		0xFFC02D30	/* Mailbox 9 Data Length Code Register */
+#define	CAN_MB09_TIMESTAMP	0xFFC02D34	/* Mailbox 9 Time Stamp	Value Register */
+#define	CAN_MB09_ID0		0xFFC02D38	/* Mailbox 9 Identifier	Low Register */
+#define	CAN_MB09_ID1		0xFFC02D3C	/* Mailbox 9 Identifier	High Register */
+
+#define	CAN_MB10_DATA0		0xFFC02D40	/* Mailbox 10 Data Word	0 [15:0] Register */
+#define	CAN_MB10_DATA1		0xFFC02D44	/* Mailbox 10 Data Word	1 [31:16] Register */
+#define	CAN_MB10_DATA2		0xFFC02D48	/* Mailbox 10 Data Word	2 [47:32] Register */
+#define	CAN_MB10_DATA3		0xFFC02D4C	/* Mailbox 10 Data Word	3 [63:48] Register */
+#define	CAN_MB10_LENGTH		0xFFC02D50	/* Mailbox 10 Data Length Code Register */
+#define	CAN_MB10_TIMESTAMP	0xFFC02D54	/* Mailbox 10 Time Stamp Value Register */
+#define	CAN_MB10_ID0		0xFFC02D58	/* Mailbox 10 Identifier Low Register */
+#define	CAN_MB10_ID1		0xFFC02D5C	/* Mailbox 10 Identifier High Register */
+
+#define	CAN_MB11_DATA0		0xFFC02D60	/* Mailbox 11 Data Word	0 [15:0] Register */
+#define	CAN_MB11_DATA1		0xFFC02D64	/* Mailbox 11 Data Word	1 [31:16] Register */
+#define	CAN_MB11_DATA2		0xFFC02D68	/* Mailbox 11 Data Word	2 [47:32] Register */
+#define	CAN_MB11_DATA3		0xFFC02D6C	/* Mailbox 11 Data Word	3 [63:48] Register */
+#define	CAN_MB11_LENGTH		0xFFC02D70	/* Mailbox 11 Data Length Code Register */
+#define	CAN_MB11_TIMESTAMP	0xFFC02D74	/* Mailbox 11 Time Stamp Value Register */
+#define	CAN_MB11_ID0		0xFFC02D78	/* Mailbox 11 Identifier Low Register */
+#define	CAN_MB11_ID1		0xFFC02D7C	/* Mailbox 11 Identifier High Register */
+
+#define	CAN_MB12_DATA0		0xFFC02D80	/* Mailbox 12 Data Word	0 [15:0] Register */
+#define	CAN_MB12_DATA1		0xFFC02D84	/* Mailbox 12 Data Word	1 [31:16] Register */
+#define	CAN_MB12_DATA2		0xFFC02D88	/* Mailbox 12 Data Word	2 [47:32] Register */
+#define	CAN_MB12_DATA3		0xFFC02D8C	/* Mailbox 12 Data Word	3 [63:48] Register */
+#define	CAN_MB12_LENGTH		0xFFC02D90	/* Mailbox 12 Data Length Code Register */
+#define	CAN_MB12_TIMESTAMP	0xFFC02D94	/* Mailbox 12 Time Stamp Value Register */
+#define	CAN_MB12_ID0		0xFFC02D98	/* Mailbox 12 Identifier Low Register */
+#define	CAN_MB12_ID1		0xFFC02D9C	/* Mailbox 12 Identifier High Register */
+
+#define	CAN_MB13_DATA0		0xFFC02DA0	/* Mailbox 13 Data Word	0 [15:0] Register */
+#define	CAN_MB13_DATA1		0xFFC02DA4	/* Mailbox 13 Data Word	1 [31:16] Register */
+#define	CAN_MB13_DATA2		0xFFC02DA8	/* Mailbox 13 Data Word	2 [47:32] Register */
+#define	CAN_MB13_DATA3		0xFFC02DAC	/* Mailbox 13 Data Word	3 [63:48] Register */
+#define	CAN_MB13_LENGTH		0xFFC02DB0	/* Mailbox 13 Data Length Code Register */
+#define	CAN_MB13_TIMESTAMP	0xFFC02DB4	/* Mailbox 13 Time Stamp Value Register */
+#define	CAN_MB13_ID0		0xFFC02DB8	/* Mailbox 13 Identifier Low Register */
+#define	CAN_MB13_ID1		0xFFC02DBC	/* Mailbox 13 Identifier High Register */
+
+#define	CAN_MB14_DATA0		0xFFC02DC0	/* Mailbox 14 Data Word	0 [15:0] Register */
+#define	CAN_MB14_DATA1		0xFFC02DC4	/* Mailbox 14 Data Word	1 [31:16] Register */
+#define	CAN_MB14_DATA2		0xFFC02DC8	/* Mailbox 14 Data Word	2 [47:32] Register */
+#define	CAN_MB14_DATA3		0xFFC02DCC	/* Mailbox 14 Data Word	3 [63:48] Register */
+#define	CAN_MB14_LENGTH		0xFFC02DD0	/* Mailbox 14 Data Length Code Register */
+#define	CAN_MB14_TIMESTAMP	0xFFC02DD4	/* Mailbox 14 Time Stamp Value Register */
+#define	CAN_MB14_ID0		0xFFC02DD8	/* Mailbox 14 Identifier Low Register */
+#define	CAN_MB14_ID1		0xFFC02DDC	/* Mailbox 14 Identifier High Register */
+
+#define	CAN_MB15_DATA0		0xFFC02DE0	/* Mailbox 15 Data Word	0 [15:0] Register */
+#define	CAN_MB15_DATA1		0xFFC02DE4	/* Mailbox 15 Data Word	1 [31:16] Register */
+#define	CAN_MB15_DATA2		0xFFC02DE8	/* Mailbox 15 Data Word	2 [47:32] Register */
+#define	CAN_MB15_DATA3		0xFFC02DEC	/* Mailbox 15 Data Word	3 [63:48] Register */
+#define	CAN_MB15_LENGTH		0xFFC02DF0	/* Mailbox 15 Data Length Code Register */
+#define	CAN_MB15_TIMESTAMP	0xFFC02DF4	/* Mailbox 15 Time Stamp Value Register */
+#define	CAN_MB15_ID0		0xFFC02DF8	/* Mailbox 15 Identifier Low Register */
+#define	CAN_MB15_ID1		0xFFC02DFC	/* Mailbox 15 Identifier High Register */
+
+#define	CAN_MB16_DATA0		0xFFC02E00	/* Mailbox 16 Data Word	0 [15:0] Register */
+#define	CAN_MB16_DATA1		0xFFC02E04	/* Mailbox 16 Data Word	1 [31:16] Register */
+#define	CAN_MB16_DATA2		0xFFC02E08	/* Mailbox 16 Data Word	2 [47:32] Register */
+#define	CAN_MB16_DATA3		0xFFC02E0C	/* Mailbox 16 Data Word	3 [63:48] Register */
+#define	CAN_MB16_LENGTH		0xFFC02E10	/* Mailbox 16 Data Length Code Register */
+#define	CAN_MB16_TIMESTAMP	0xFFC02E14	/* Mailbox 16 Time Stamp Value Register */
+#define	CAN_MB16_ID0		0xFFC02E18	/* Mailbox 16 Identifier Low Register */
+#define	CAN_MB16_ID1		0xFFC02E1C	/* Mailbox 16 Identifier High Register */
+
+#define	CAN_MB17_DATA0		0xFFC02E20	/* Mailbox 17 Data Word	0 [15:0] Register */
+#define	CAN_MB17_DATA1		0xFFC02E24	/* Mailbox 17 Data Word	1 [31:16] Register */
+#define	CAN_MB17_DATA2		0xFFC02E28	/* Mailbox 17 Data Word	2 [47:32] Register */
+#define	CAN_MB17_DATA3		0xFFC02E2C	/* Mailbox 17 Data Word	3 [63:48] Register */
+#define	CAN_MB17_LENGTH		0xFFC02E30	/* Mailbox 17 Data Length Code Register */
+#define	CAN_MB17_TIMESTAMP	0xFFC02E34	/* Mailbox 17 Time Stamp Value Register */
+#define	CAN_MB17_ID0		0xFFC02E38	/* Mailbox 17 Identifier Low Register */
+#define	CAN_MB17_ID1		0xFFC02E3C	/* Mailbox 17 Identifier High Register */
+
+#define	CAN_MB18_DATA0		0xFFC02E40	/* Mailbox 18 Data Word	0 [15:0] Register */
+#define	CAN_MB18_DATA1		0xFFC02E44	/* Mailbox 18 Data Word	1 [31:16] Register */
+#define	CAN_MB18_DATA2		0xFFC02E48	/* Mailbox 18 Data Word	2 [47:32] Register */
+#define	CAN_MB18_DATA3		0xFFC02E4C	/* Mailbox 18 Data Word	3 [63:48] Register */
+#define	CAN_MB18_LENGTH		0xFFC02E50	/* Mailbox 18 Data Length Code Register */
+#define	CAN_MB18_TIMESTAMP	0xFFC02E54	/* Mailbox 18 Time Stamp Value Register */
+#define	CAN_MB18_ID0		0xFFC02E58	/* Mailbox 18 Identifier Low Register */
+#define	CAN_MB18_ID1		0xFFC02E5C	/* Mailbox 18 Identifier High Register */
+
+#define	CAN_MB19_DATA0		0xFFC02E60	/* Mailbox 19 Data Word	0 [15:0] Register */
+#define	CAN_MB19_DATA1		0xFFC02E64	/* Mailbox 19 Data Word	1 [31:16] Register */
+#define	CAN_MB19_DATA2		0xFFC02E68	/* Mailbox 19 Data Word	2 [47:32] Register */
+#define	CAN_MB19_DATA3		0xFFC02E6C	/* Mailbox 19 Data Word	3 [63:48] Register */
+#define	CAN_MB19_LENGTH		0xFFC02E70	/* Mailbox 19 Data Length Code Register */
+#define	CAN_MB19_TIMESTAMP	0xFFC02E74	/* Mailbox 19 Time Stamp Value Register */
+#define	CAN_MB19_ID0		0xFFC02E78	/* Mailbox 19 Identifier Low Register */
+#define	CAN_MB19_ID1		0xFFC02E7C	/* Mailbox 19 Identifier High Register */
+
+#define	CAN_MB20_DATA0		0xFFC02E80	/* Mailbox 20 Data Word	0 [15:0] Register */
+#define	CAN_MB20_DATA1		0xFFC02E84	/* Mailbox 20 Data Word	1 [31:16] Register */
+#define	CAN_MB20_DATA2		0xFFC02E88	/* Mailbox 20 Data Word	2 [47:32] Register */
+#define	CAN_MB20_DATA3		0xFFC02E8C	/* Mailbox 20 Data Word	3 [63:48] Register */
+#define	CAN_MB20_LENGTH		0xFFC02E90	/* Mailbox 20 Data Length Code Register */
+#define	CAN_MB20_TIMESTAMP	0xFFC02E94	/* Mailbox 20 Time Stamp Value Register */
+#define	CAN_MB20_ID0		0xFFC02E98	/* Mailbox 20 Identifier Low Register */
+#define	CAN_MB20_ID1		0xFFC02E9C	/* Mailbox 20 Identifier High Register */
+
+#define	CAN_MB21_DATA0		0xFFC02EA0	/* Mailbox 21 Data Word	0 [15:0] Register */
+#define	CAN_MB21_DATA1		0xFFC02EA4	/* Mailbox 21 Data Word	1 [31:16] Register */
+#define	CAN_MB21_DATA2		0xFFC02EA8	/* Mailbox 21 Data Word	2 [47:32] Register */
+#define	CAN_MB21_DATA3		0xFFC02EAC	/* Mailbox 21 Data Word	3 [63:48] Register */
+#define	CAN_MB21_LENGTH		0xFFC02EB0	/* Mailbox 21 Data Length Code Register */
+#define	CAN_MB21_TIMESTAMP	0xFFC02EB4	/* Mailbox 21 Time Stamp Value Register */
+#define	CAN_MB21_ID0		0xFFC02EB8	/* Mailbox 21 Identifier Low Register */
+#define	CAN_MB21_ID1		0xFFC02EBC	/* Mailbox 21 Identifier High Register */
+
+#define	CAN_MB22_DATA0		0xFFC02EC0	/* Mailbox 22 Data Word	0 [15:0] Register */
+#define	CAN_MB22_DATA1		0xFFC02EC4	/* Mailbox 22 Data Word	1 [31:16] Register */
+#define	CAN_MB22_DATA2		0xFFC02EC8	/* Mailbox 22 Data Word	2 [47:32] Register */
+#define	CAN_MB22_DATA3		0xFFC02ECC	/* Mailbox 22 Data Word	3 [63:48] Register */
+#define	CAN_MB22_LENGTH		0xFFC02ED0	/* Mailbox 22 Data Length Code Register */
+#define	CAN_MB22_TIMESTAMP	0xFFC02ED4	/* Mailbox 22 Time Stamp Value Register */
+#define	CAN_MB22_ID0		0xFFC02ED8	/* Mailbox 22 Identifier Low Register */
+#define	CAN_MB22_ID1		0xFFC02EDC	/* Mailbox 22 Identifier High Register */
+
+#define	CAN_MB23_DATA0		0xFFC02EE0	/* Mailbox 23 Data Word	0 [15:0] Register */
+#define	CAN_MB23_DATA1		0xFFC02EE4	/* Mailbox 23 Data Word	1 [31:16] Register */
+#define	CAN_MB23_DATA2		0xFFC02EE8	/* Mailbox 23 Data Word	2 [47:32] Register */
+#define	CAN_MB23_DATA3		0xFFC02EEC	/* Mailbox 23 Data Word	3 [63:48] Register */
+#define	CAN_MB23_LENGTH		0xFFC02EF0	/* Mailbox 23 Data Length Code Register */
+#define	CAN_MB23_TIMESTAMP	0xFFC02EF4	/* Mailbox 23 Time Stamp Value Register */
+#define	CAN_MB23_ID0		0xFFC02EF8	/* Mailbox 23 Identifier Low Register */
+#define	CAN_MB23_ID1		0xFFC02EFC	/* Mailbox 23 Identifier High Register */
+
+#define	CAN_MB24_DATA0		0xFFC02F00	/* Mailbox 24 Data Word	0 [15:0] Register */
+#define	CAN_MB24_DATA1		0xFFC02F04	/* Mailbox 24 Data Word	1 [31:16] Register */
+#define	CAN_MB24_DATA2		0xFFC02F08	/* Mailbox 24 Data Word	2 [47:32] Register */
+#define	CAN_MB24_DATA3		0xFFC02F0C	/* Mailbox 24 Data Word	3 [63:48] Register */
+#define	CAN_MB24_LENGTH		0xFFC02F10	/* Mailbox 24 Data Length Code Register */
+#define	CAN_MB24_TIMESTAMP	0xFFC02F14	/* Mailbox 24 Time Stamp Value Register */
+#define	CAN_MB24_ID0		0xFFC02F18	/* Mailbox 24 Identifier Low Register */
+#define	CAN_MB24_ID1		0xFFC02F1C	/* Mailbox 24 Identifier High Register */
+
+#define	CAN_MB25_DATA0		0xFFC02F20	/* Mailbox 25 Data Word	0 [15:0] Register */
+#define	CAN_MB25_DATA1		0xFFC02F24	/* Mailbox 25 Data Word	1 [31:16] Register */
+#define	CAN_MB25_DATA2		0xFFC02F28	/* Mailbox 25 Data Word	2 [47:32] Register */
+#define	CAN_MB25_DATA3		0xFFC02F2C	/* Mailbox 25 Data Word	3 [63:48] Register */
+#define	CAN_MB25_LENGTH		0xFFC02F30	/* Mailbox 25 Data Length Code Register */
+#define	CAN_MB25_TIMESTAMP	0xFFC02F34	/* Mailbox 25 Time Stamp Value Register */
+#define	CAN_MB25_ID0		0xFFC02F38	/* Mailbox 25 Identifier Low Register */
+#define	CAN_MB25_ID1		0xFFC02F3C	/* Mailbox 25 Identifier High Register */
+
+#define	CAN_MB26_DATA0		0xFFC02F40	/* Mailbox 26 Data Word	0 [15:0] Register */
+#define	CAN_MB26_DATA1		0xFFC02F44	/* Mailbox 26 Data Word	1 [31:16] Register */
+#define	CAN_MB26_DATA2		0xFFC02F48	/* Mailbox 26 Data Word	2 [47:32] Register */
+#define	CAN_MB26_DATA3		0xFFC02F4C	/* Mailbox 26 Data Word	3 [63:48] Register */
+#define	CAN_MB26_LENGTH		0xFFC02F50	/* Mailbox 26 Data Length Code Register */
+#define	CAN_MB26_TIMESTAMP	0xFFC02F54	/* Mailbox 26 Time Stamp Value Register */
+#define	CAN_MB26_ID0		0xFFC02F58	/* Mailbox 26 Identifier Low Register */
+#define	CAN_MB26_ID1		0xFFC02F5C	/* Mailbox 26 Identifier High Register */
+
+#define	CAN_MB27_DATA0		0xFFC02F60	/* Mailbox 27 Data Word	0 [15:0] Register */
+#define	CAN_MB27_DATA1		0xFFC02F64	/* Mailbox 27 Data Word	1 [31:16] Register */
+#define	CAN_MB27_DATA2		0xFFC02F68	/* Mailbox 27 Data Word	2 [47:32] Register */
+#define	CAN_MB27_DATA3		0xFFC02F6C	/* Mailbox 27 Data Word	3 [63:48] Register */
+#define	CAN_MB27_LENGTH		0xFFC02F70	/* Mailbox 27 Data Length Code Register */
+#define	CAN_MB27_TIMESTAMP	0xFFC02F74	/* Mailbox 27 Time Stamp Value Register */
+#define	CAN_MB27_ID0		0xFFC02F78	/* Mailbox 27 Identifier Low Register */
+#define	CAN_MB27_ID1		0xFFC02F7C	/* Mailbox 27 Identifier High Register */
+
+#define	CAN_MB28_DATA0		0xFFC02F80	/* Mailbox 28 Data Word	0 [15:0] Register */
+#define	CAN_MB28_DATA1		0xFFC02F84	/* Mailbox 28 Data Word	1 [31:16] Register */
+#define	CAN_MB28_DATA2		0xFFC02F88	/* Mailbox 28 Data Word	2 [47:32] Register */
+#define	CAN_MB28_DATA3		0xFFC02F8C	/* Mailbox 28 Data Word	3 [63:48] Register */
+#define	CAN_MB28_LENGTH		0xFFC02F90	/* Mailbox 28 Data Length Code Register */
+#define	CAN_MB28_TIMESTAMP	0xFFC02F94	/* Mailbox 28 Time Stamp Value Register */
+#define	CAN_MB28_ID0		0xFFC02F98	/* Mailbox 28 Identifier Low Register */
+#define	CAN_MB28_ID1		0xFFC02F9C	/* Mailbox 28 Identifier High Register */
+
+#define	CAN_MB29_DATA0		0xFFC02FA0	/* Mailbox 29 Data Word	0 [15:0] Register */
+#define	CAN_MB29_DATA1		0xFFC02FA4	/* Mailbox 29 Data Word	1 [31:16] Register */
+#define	CAN_MB29_DATA2		0xFFC02FA8	/* Mailbox 29 Data Word	2 [47:32] Register */
+#define	CAN_MB29_DATA3		0xFFC02FAC	/* Mailbox 29 Data Word	3 [63:48] Register */
+#define	CAN_MB29_LENGTH		0xFFC02FB0	/* Mailbox 29 Data Length Code Register */
+#define	CAN_MB29_TIMESTAMP	0xFFC02FB4	/* Mailbox 29 Time Stamp Value Register */
+#define	CAN_MB29_ID0		0xFFC02FB8	/* Mailbox 29 Identifier Low Register */
+#define	CAN_MB29_ID1		0xFFC02FBC	/* Mailbox 29 Identifier High Register */
+
+#define	CAN_MB30_DATA0		0xFFC02FC0	/* Mailbox 30 Data Word	0 [15:0] Register */
+#define	CAN_MB30_DATA1		0xFFC02FC4	/* Mailbox 30 Data Word	1 [31:16] Register */
+#define	CAN_MB30_DATA2		0xFFC02FC8	/* Mailbox 30 Data Word	2 [47:32] Register */
+#define	CAN_MB30_DATA3		0xFFC02FCC	/* Mailbox 30 Data Word	3 [63:48] Register */
+#define	CAN_MB30_LENGTH		0xFFC02FD0	/* Mailbox 30 Data Length Code Register */
+#define	CAN_MB30_TIMESTAMP	0xFFC02FD4	/* Mailbox 30 Time Stamp Value Register */
+#define	CAN_MB30_ID0		0xFFC02FD8	/* Mailbox 30 Identifier Low Register */
+#define	CAN_MB30_ID1		0xFFC02FDC	/* Mailbox 30 Identifier High Register */
+
+#define	CAN_MB31_DATA0		0xFFC02FE0	/* Mailbox 31 Data Word	0 [15:0] Register */
+#define	CAN_MB31_DATA1		0xFFC02FE4	/* Mailbox 31 Data Word	1 [31:16] Register */
+#define	CAN_MB31_DATA2		0xFFC02FE8	/* Mailbox 31 Data Word	2 [47:32] Register */
+#define	CAN_MB31_DATA3		0xFFC02FEC	/* Mailbox 31 Data Word	3 [63:48] Register */
+#define	CAN_MB31_LENGTH		0xFFC02FF0	/* Mailbox 31 Data Length Code Register */
+#define	CAN_MB31_TIMESTAMP	0xFFC02FF4	/* Mailbox 31 Time Stamp Value Register */
+#define	CAN_MB31_ID0		0xFFC02FF8	/* Mailbox 31 Identifier Low Register */
+#define	CAN_MB31_ID1		0xFFC02FFC	/* Mailbox 31 Identifier High Register */
+
+/* CAN Mailbox Area Macros */
+#define	CAN_MB_ID1(x)		(CAN_MB00_ID1+((x)*0x20))
+#define	CAN_MB_ID0(x)		(CAN_MB00_ID0+((x)*0x20))
+#define	CAN_MB_TIMESTAMP(x)	(CAN_MB00_TIMESTAMP+((x)*0x20))
+#define	CAN_MB_LENGTH(x)	(CAN_MB00_LENGTH+((x)*0x20))
+#define	CAN_MB_DATA3(x)		(CAN_MB00_DATA3+((x)*0x20))
+#define	CAN_MB_DATA2(x)		(CAN_MB00_DATA2+((x)*0x20))
+#define	CAN_MB_DATA1(x)		(CAN_MB00_DATA1+((x)*0x20))
+#define	CAN_MB_DATA0(x)		(CAN_MB00_DATA0+((x)*0x20))
+
+
+/*********************************************************************************** */
+/* System MMR Register Bits and	Macros */
+/******************************************************************************* */
+
+/* SWRST Mask */
+#define	SYSTEM_RESET	0x0007	/* Initiates A System Software Reset */
+#define	DOUBLE_FAULT	0x0008	/* Core	Double Fault Causes Reset */
+#define	RESET_DOUBLE	0x2000	/* SW Reset Generated By Core Double-Fault */
+#define	RESET_WDOG		0x4000	/* SW Reset Generated By Watchdog Timer */
+#define	RESET_SOFTWARE	0x8000	/* SW Reset Occurred Since Last	Read Of	SWRST */
+
+/* SYSCR Masks													 */
+#define	BMODE			0x0006	/* Boot	Mode - Latched During HW Reset From Mode Pins */
+#define	NOBOOT			0x0010	/* Execute From	L1 or ASYNC Bank 0 When	BMODE =	0 */
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
+
+/* Peripheral Masks For	SIC0_ISR, SIC0_IWR, SIC0_IMASK */
+#define	PLL_WAKEUP_IRQ		0x00000001	/* PLL Wakeup Interrupt	Request */
+#define	DMAC0_ERR_IRQ		0x00000002	/* DMA Controller 0 Error Interrupt Request */
+#define	PPI_ERR_IRQ		0x00000004	/* PPI Error Interrupt Request */
+#define	SPORT0_ERR_IRQ		0x00000008	/* SPORT0 Error	Interrupt Request */
+#define	SPORT1_ERR_IRQ		0x00000010	/* SPORT1 Error	Interrupt Request */
+#define	SPI0_ERR_IRQ		0x00000020	/* SPI0	Error Interrupt	Request */
+#define	UART0_ERR_IRQ		0x00000040	/* UART0 Error Interrupt Request */
+#define	RTC_IRQ			0x00000080	/* Real-Time Clock Interrupt Request */
+#define	DMA0_IRQ		0x00000100	/* DMA Channel 0 (PPI) Interrupt Request */
+#define	DMA1_IRQ		0x00000200	/* DMA Channel 1 (SPORT0 RX) Interrupt Request */
+#define	DMA2_IRQ		0x00000400	/* DMA Channel 2 (SPORT0 TX) Interrupt Request */
+#define	DMA3_IRQ		0x00000800	/* DMA Channel 3 (SPORT1 RX) Interrupt Request */
+#define	DMA4_IRQ		0x00001000	/* DMA Channel 4 (SPORT1 TX) Interrupt Request */
+#define	DMA5_IRQ		0x00002000	/* DMA Channel 5 (SPI) Interrupt Request */
+#define	DMA6_IRQ		0x00004000	/* DMA Channel 6 (UART RX) Interrupt Request */
+#define	DMA7_IRQ		0x00008000	/* DMA Channel 7 (UART TX) Interrupt Request */
+#define	TIMER0_IRQ		0x00010000	/* Timer 0 Interrupt Request */
+#define	TIMER1_IRQ		0x00020000	/* Timer 1 Interrupt Request */
+#define	TIMER2_IRQ		0x00040000	/* Timer 2 Interrupt Request */
+#define	PFA_IRQ			0x00080000	/* Programmable	Flag Interrupt Request A */
+#define	PFB_IRQ			0x00100000	/* Programmable	Flag Interrupt Request B */
+#define	MDMA0_0_IRQ		0x00200000	/* MemDMA0 Stream 0 Interrupt Request */
+#define	MDMA0_1_IRQ		0x00400000	/* MemDMA0 Stream 1 Interrupt Request */
+#define	WDOG_IRQ		0x00800000	/* Software Watchdog Timer Interrupt Request */
+#define	DMAC1_ERR_IRQ		0x01000000	/* DMA Controller 1 Error Interrupt Request */
+#define	SPORT2_ERR_IRQ		0x02000000	/* SPORT2 Error	Interrupt Request */
+#define	SPORT3_ERR_IRQ		0x04000000	/* SPORT3 Error	Interrupt Request */
+#define	MXVR_SD_IRQ		0x08000000	/* MXVR	Synchronous Data Interrupt Request */
+#define	SPI1_ERR_IRQ		0x10000000	/* SPI1	Error Interrupt	Request */
+#define	SPI2_ERR_IRQ		0x20000000	/* SPI2	Error Interrupt	Request */
+#define	UART1_ERR_IRQ		0x40000000	/* UART1 Error Interrupt Request */
+#define	UART2_ERR_IRQ		0x80000000	/* UART2 Error Interrupt Request */
+
+/* the following are for backwards compatibility */
+#define	DMA0_ERR_IRQ		DMAC0_ERR_IRQ
+#define	DMA1_ERR_IRQ		DMAC1_ERR_IRQ
+
+
+/* Peripheral Masks For	SIC_ISR1, SIC_IWR1, SIC_IMASK1	 */
+#define	CAN_ERR_IRQ			0x00000001	/* CAN Error Interrupt Request */
+#define	DMA8_IRQ			0x00000002	/* DMA Channel 8 (SPORT2 RX) Interrupt Request */
+#define	DMA9_IRQ			0x00000004	/* DMA Channel 9 (SPORT2 TX) Interrupt Request */
+#define	DMA10_IRQ			0x00000008	/* DMA Channel 10 (SPORT3 RX) Interrupt	Request */
+#define	DMA11_IRQ			0x00000010	/* DMA Channel 11 (SPORT3 TX) Interrupt	Request */
+#define	DMA12_IRQ			0x00000020	/* DMA Channel 12 Interrupt Request */
+#define	DMA13_IRQ			0x00000040	/* DMA Channel 13 Interrupt Request */
+#define	DMA14_IRQ			0x00000080	/* DMA Channel 14 (SPI1) Interrupt Request */
+#define	DMA15_IRQ			0x00000100	/* DMA Channel 15 (SPI2) Interrupt Request */
+#define	DMA16_IRQ			0x00000200	/* DMA Channel 16 (UART1 RX) Interrupt Request */
+#define	DMA17_IRQ			0x00000400	/* DMA Channel 17 (UART1 TX) Interrupt Request */
+#define	DMA18_IRQ			0x00000800	/* DMA Channel 18 (UART2 RX) Interrupt Request */
+#define	DMA19_IRQ			0x00001000	/* DMA Channel 19 (UART2 TX) Interrupt Request */
+#define	TWI0_IRQ			0x00002000	/* TWI0	Interrupt Request */
+#define	TWI1_IRQ			0x00004000	/* TWI1	Interrupt Request */
+#define	CAN_RX_IRQ			0x00008000	/* CAN Receive Interrupt Request */
+#define	CAN_TX_IRQ			0x00010000	/* CAN Transmit	Interrupt Request */
+#define	MDMA1_0_IRQ			0x00020000	/* MemDMA1 Stream 0 Interrupt Request */
+#define	MDMA1_1_IRQ			0x00040000	/* MemDMA1 Stream 1 Interrupt Request */
+#define	MXVR_STAT_IRQ			0x00080000	/* MXVR	Status Interrupt Request */
+#define	MXVR_CM_IRQ			0x00100000	/* MXVR	Control	Message	Interrupt Request */
+#define	MXVR_AP_IRQ			0x00200000	/* MXVR	Asynchronous Packet Interrupt */
+
+/* the following are for backwards compatibility */
+#define	MDMA0_IRQ		MDMA1_0_IRQ
+#define	MDMA1_IRQ		MDMA1_1_IRQ
+
+#ifdef _MISRA_RULES
+#define	_MF15 0xFu
+#define	_MF7 7u
+#else
+#define	_MF15 0xF
+#define	_MF7 7
+#endif /* _MISRA_RULES */
+
+/* SIC_IMASKx Masks											 */
+#define	SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts */
+#define	SIC_MASK_ALL	0xFFFFFFFF					/* Mask	all peripheral interrupts */
+#ifdef _MISRA_RULES
+#define	SIC_MASK(x)		(1 << ((x)&0x1Fu))					/* Mask	Peripheral #x interrupt */
+#define	SIC_UNMASK(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Unmask Peripheral #x	interrupt */
+#else
+#define	SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask	Peripheral #x interrupt */
+#define	SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x	interrupt */
+#endif /* _MISRA_RULES */
+
+/* SIC_IWRx Masks											 */
+#define	IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals */
+#define	IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals */
+#ifdef _MISRA_RULES
+#define	IWR_ENABLE(x)	(1 << ((x)&0x1Fu))					/* Wakeup Enable Peripheral #x */
+#define	IWR_DISABLE(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Wakeup Disable Peripheral #x */
+#else
+#define	IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x */
+#define	IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Wakeup Disable Peripheral #x */
+#endif /* _MISRA_RULES */
+
+/*  *********  PARALLEL	PERIPHERAL INTERFACE (PPI) MASKS ****************   */
+/*  PPI_CONTROL	Masks	      */
+#define	PORT_EN		0x0001	/* PPI Port Enable  */
+#define	PORT_DIR	0x0002	/* PPI Port Direction	    */
+#define	XFR_TYPE	0x000C	/* PPI Transfer	Type  */
+#define	PORT_CFG	0x0030	/* PPI Port Configuration */
+#define	FLD_SEL		0x0040	/* PPI Active Field Select */
+#define	PACK_EN		0x0080	/* PPI Packing Mode */
+/* previous versions of	defBF539.h erroneously included	DMA32 (PPI 32-bit DMA Enable) */
+#define	SKIP_EN		0x0200	/* PPI Skip Element Enable */
+#define	SKIP_EO		0x0400	/* PPI Skip Even/Odd Elements */
+#define	DLENGTH		0x3800	/* PPI Data Length  */
+#define	DLEN_8		0x0	     /*	PPI Data Length	mask for DLEN=8 */
+#define	DLEN_10		0x0800		/* Data	Length = 10 Bits */
+#define	DLEN_11		0x1000		/* Data	Length = 11 Bits */
+#define	DLEN_12		0x1800		/* Data	Length = 12 Bits */
+#define	DLEN_13		0x2000		/* Data	Length = 13 Bits */
+#define	DLEN_14		0x2800		/* Data	Length = 14 Bits */
+#define	DLEN_15		0x3000		/* Data	Length = 15 Bits */
+#define	DLEN_16		0x3800		/* Data	Length = 16 Bits */
+#ifdef _MISRA_RULES
+#define	DLEN(x)		((((x)-9u) & 0x07u) << 11)  /* PPI Data	Length (only works for x=10-->x=16) */
+#else
+#define	DLEN(x)		((((x)-9) & 0x07) << 11)  /* PPI Data Length (only works for x=10-->x=16) */
+#endif /* _MISRA_RULES */
+#define	POL			0xC000	/* PPI Signal Polarities       */
+#define	POLC		0x4000		/* PPI Clock Polarity */
+#define	POLS		0x8000		/* PPI Frame Sync Polarity */
+
+
+/* PPI_STATUS Masks					     */
+#define	FLD			0x0400	/* Field Indicator   */
+#define	FT_ERR		0x0800	/* Frame Track Error */
+#define	OVR			0x1000	/* FIFO	Overflow Error */
+#define	UNDR		0x2000	/* FIFO	Underrun Error */
+#define	ERR_DET		0x4000	/* Error Detected Indicator */
+#define	ERR_NCOR	0x8000	/* Error Not Corrected Indicator */
+
+
+/* **********  DMA CONTROLLER MASKS  ***********************/
+
+/* DMAx_PERIPHERAL_MAP,	MDMA_yy_PERIPHERAL_MAP Masks */
+
+#define	CTYPE			0x0040	/* DMA Channel Type Indicator */
+#define	CTYPE_P			0x6		/* DMA Channel Type Indicator BIT POSITION */
+#define	PCAP8			0x0080	/* DMA 8-bit Operation Indicator   */
+#define	PCAP16			0x0100	/* DMA 16-bit Operation	Indicator */
+#define	PCAP32			0x0200	/* DMA 32-bit Operation	Indicator */
+#define	PCAPWR			0x0400	/* DMA Write Operation Indicator */
+#define	PCAPRD			0x0800	/* DMA Read Operation Indicator */
+#define	PMAP			0xF000	/* DMA Peripheral Map Field */
+
+/* PMAP	Encodings For DMA Controller 0 */
+#define	PMAP_PPI		0x0000	/* PMAP	PPI Port DMA */
+#define	PMAP_SPORT0RX	0x1000	/* PMAP	SPORT0 Receive DMA */
+#define	PMAP_SPORT0TX	0x2000	/* PMAP	SPORT0 Transmit	DMA */
+#define	PMAP_SPORT1RX	0x3000	/* PMAP	SPORT1 Receive DMA */
+#define	PMAP_SPORT1TX	0x4000	/* PMAP	SPORT1 Transmit	DMA */
+#define	PMAP_SPI0		0x5000	/* PMAP	SPI DMA */
+#define	PMAP_UART0RX		0x6000	/* PMAP	UART Receive DMA */
+#define	PMAP_UART0TX		0x7000	/* PMAP	UART Transmit DMA */
+
+/* PMAP	Encodings For DMA Controller 1 */
+#define	PMAP_SPORT2RX	    0x0000  /* PMAP SPORT2 Receive DMA */
+#define	PMAP_SPORT2TX	    0x1000  /* PMAP SPORT2 Transmit DMA */
+#define	PMAP_SPORT3RX	    0x2000  /* PMAP SPORT3 Receive DMA */
+#define	PMAP_SPORT3TX	    0x3000  /* PMAP SPORT3 Transmit DMA */
+#define	PMAP_SPI1	    0x6000  /* PMAP SPI1 DMA */
+#define	PMAP_SPI2	    0x7000  /* PMAP SPI2 DMA */
+#define	PMAP_UART1RX	    0x8000  /* PMAP UART1 Receive DMA */
+#define	PMAP_UART1TX	    0x9000  /* PMAP UART1 Transmit DMA */
+#define	PMAP_UART2RX	    0xA000  /* PMAP UART2 Receive DMA */
+#define	PMAP_UART2TX	    0xB000  /* PMAP UART2 Transmit DMA */
+
+
+/*  *************  GENERAL PURPOSE TIMER MASKS	******************** */
+/* PWM Timer bit definitions */
+/* TIMER_ENABLE	Register */
+#define	TIMEN0			0x0001	/* Enable Timer	0 */
+#define	TIMEN1			0x0002	/* Enable Timer	1 */
+#define	TIMEN2			0x0004	/* Enable Timer	2 */
+
+#define	TIMEN0_P		0x00
+#define	TIMEN1_P		0x01
+#define	TIMEN2_P		0x02
+
+/* TIMER_DISABLE Register */
+#define	TIMDIS0			0x0001	/* Disable Timer 0 */
+#define	TIMDIS1			0x0002	/* Disable Timer 1 */
+#define	TIMDIS2			0x0004	/* Disable Timer 2 */
+
+#define	TIMDIS0_P		0x00
+#define	TIMDIS1_P		0x01
+#define	TIMDIS2_P		0x02
+
+/* TIMER_STATUS	Register */
+#define	TIMIL0			0x0001	/* Timer 0 Interrupt */
+#define	TIMIL1			0x0002	/* Timer 1 Interrupt */
+#define	TIMIL2			0x0004	/* Timer 2 Interrupt */
+#define	TOVF_ERR0		0x0010	/* Timer 0 Counter Overflow */
+#define	TOVF_ERR1		0x0020	/* Timer 1 Counter Overflow */
+#define	TOVF_ERR2		0x0040	/* Timer 2 Counter Overflow */
+#define	TRUN0			0x1000	/* Timer 0 Slave Enable	Status */
+#define	TRUN1			0x2000	/* Timer 1 Slave Enable	Status */
+#define	TRUN2			0x4000	/* Timer 2 Slave Enable	Status */
+
+#define	TIMIL0_P		0x00
+#define	TIMIL1_P		0x01
+#define	TIMIL2_P		0x02
+#define	TOVF_ERR0_P		0x04
+#define	TOVF_ERR1_P		0x05
+#define	TOVF_ERR2_P		0x06
+#define	TRUN0_P			0x0C
+#define	TRUN1_P			0x0D
+#define	TRUN2_P			0x0E
+
+/* Alternate Deprecated	Macros Provided	For Backwards Code Compatibility */
+#define	TOVL_ERR0		TOVF_ERR0
+#define	TOVL_ERR1		TOVF_ERR1
+#define	TOVL_ERR2		TOVF_ERR2
+#define	TOVL_ERR0_P		TOVF_ERR0_P
+#define	TOVL_ERR1_P	TOVF_ERR1_P
+#define	TOVL_ERR2_P	TOVF_ERR2_P
+
+/* TIMERx_CONFIG Registers */
+#define	PWM_OUT			0x0001
+#define	WDTH_CAP		0x0002
+#define	EXT_CLK			0x0003
+#define	PULSE_HI		0x0004
+#define	PERIOD_CNT		0x0008
+#define	IRQ_ENA			0x0010
+#define	TIN_SEL			0x0020
+#define	OUT_DIS			0x0040
+#define	CLK_SEL			0x0080
+#define	TOGGLE_HI		0x0100
+#define	EMU_RUN			0x0200
+#ifdef _MISRA_RULES
+#define	ERR_TYP(x)		(((x) &	0x03u) << 14)
+#else
+#define	ERR_TYP(x)		(((x) &	0x03) << 14)
+#endif /* _MISRA_RULES */
+
+#define	TMODE_P0		0x00
+#define	TMODE_P1		0x01
+#define	PULSE_HI_P		0x02
+#define	PERIOD_CNT_P	0x03
+#define	IRQ_ENA_P		0x04
+#define	TIN_SEL_P		0x05
+#define	OUT_DIS_P		0x06
+#define	CLK_SEL_P		0x07
+#define	TOGGLE_HI_P		0x08
+#define	EMU_RUN_P		0x09
+#define	ERR_TYP_P0		0x0E
+#define	ERR_TYP_P1		0x0F
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS	************* */
+/* EBIU_AMGCTL Masks */
+#define	AMCKEN		0x0001	/* Enable CLKOUT */
+#define	AMBEN_NONE	0x0000	/* All Banks Disabled */
+#define	AMBEN_B0	0x0002	/* Enable Asynchronous Memory Bank 0 only */
+#define	AMBEN_B0_B1	0x0004	/* Enable Asynchronous Memory Banks 0 &	1 only */
+#define	AMBEN_B0_B1_B2	0x0006	/* Enable Asynchronous Memory Banks 0, 1, and 2 */
+#define	AMBEN_ALL	0x0008	/* Enable Asynchronous Memory Banks (all) 0, 1,	2, and 3 */
+#define	CDPRIO		0x0100	/* DMA has priority over core for external accesses */
+
+/* EBIU_AMGCTL Bit Positions */
+#define	AMCKEN_P		0x0000	/* Enable CLKOUT */
+#define	AMBEN_P0		0x0001	/* Asynchronous	Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
+#define	AMBEN_P1		0x0002	/* Asynchronous	Memory Enable, 010 - banks 0&1 enabled,	 011 - banks 0-3 enabled */
+#define	AMBEN_P2		0x0003	/* Asynchronous	Memory Enable, 1xx - All banks (bank 0,	1, 2, and 3) enabled */
+
+/* EBIU_AMBCTL0	Masks */
+#define	B0RDYEN			0x00000001  /* Bank 0 RDY Enable, 0=disable, 1=enable */
+#define	B0RDYPOL		0x00000002  /* Bank 0 RDY Active high, 0=active	low, 1=active high */
+#define	B0TT_1			0x00000004  /* Bank 0 Transition Time from Read	to Write = 1 cycle */
+#define	B0TT_2			0x00000008  /* Bank 0 Transition Time from Read	to Write = 2 cycles */
+#define	B0TT_3			0x0000000C  /* Bank 0 Transition Time from Read	to Write = 3 cycles */
+#define	B0TT_4			0x00000000  /* Bank 0 Transition Time from Read	to Write = 4 cycles */
+#define	B0ST_1			0x00000010  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
+#define	B0ST_2			0x00000020  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
+#define	B0ST_3			0x00000030  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
+#define	B0ST_4			0x00000000  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
+#define	B0HT_1			0x00000040  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 1 cycle */
+#define	B0HT_2			0x00000080  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 2 cycles */
+#define	B0HT_3			0x000000C0  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 3 cycles */
+#define	B0HT_0			0x00000000  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 0 cycles */
+#define	B0RAT_1			0x00000100  /* Bank 0 Read Access Time = 1 cycle */
+#define	B0RAT_2			0x00000200  /* Bank 0 Read Access Time = 2 cycles */
+#define	B0RAT_3			0x00000300  /* Bank 0 Read Access Time = 3 cycles */
+#define	B0RAT_4			0x00000400  /* Bank 0 Read Access Time = 4 cycles */
+#define	B0RAT_5			0x00000500  /* Bank 0 Read Access Time = 5 cycles */
+#define	B0RAT_6			0x00000600  /* Bank 0 Read Access Time = 6 cycles */
+#define	B0RAT_7			0x00000700  /* Bank 0 Read Access Time = 7 cycles */
+#define	B0RAT_8			0x00000800  /* Bank 0 Read Access Time = 8 cycles */
+#define	B0RAT_9			0x00000900  /* Bank 0 Read Access Time = 9 cycles */
+#define	B0RAT_10		0x00000A00  /* Bank 0 Read Access Time = 10 cycles */
+#define	B0RAT_11		0x00000B00  /* Bank 0 Read Access Time = 11 cycles */
+#define	B0RAT_12		0x00000C00  /* Bank 0 Read Access Time = 12 cycles */
+#define	B0RAT_13		0x00000D00  /* Bank 0 Read Access Time = 13 cycles */
+#define	B0RAT_14		0x00000E00  /* Bank 0 Read Access Time = 14 cycles */
+#define	B0RAT_15		0x00000F00  /* Bank 0 Read Access Time = 15 cycles */
+#define	B0WAT_1			0x00001000  /* Bank 0 Write Access Time	= 1 cycle */
+#define	B0WAT_2			0x00002000  /* Bank 0 Write Access Time	= 2 cycles */
+#define	B0WAT_3			0x00003000  /* Bank 0 Write Access Time	= 3 cycles */
+#define	B0WAT_4			0x00004000  /* Bank 0 Write Access Time	= 4 cycles */
+#define	B0WAT_5			0x00005000  /* Bank 0 Write Access Time	= 5 cycles */
+#define	B0WAT_6			0x00006000  /* Bank 0 Write Access Time	= 6 cycles */
+#define	B0WAT_7			0x00007000  /* Bank 0 Write Access Time	= 7 cycles */
+#define	B0WAT_8			0x00008000  /* Bank 0 Write Access Time	= 8 cycles */
+#define	B0WAT_9			0x00009000  /* Bank 0 Write Access Time	= 9 cycles */
+#define	B0WAT_10		0x0000A000  /* Bank 0 Write Access Time	= 10 cycles */
+#define	B0WAT_11		0x0000B000  /* Bank 0 Write Access Time	= 11 cycles */
+#define	B0WAT_12		0x0000C000  /* Bank 0 Write Access Time	= 12 cycles */
+#define	B0WAT_13		0x0000D000  /* Bank 0 Write Access Time	= 13 cycles */
+#define	B0WAT_14		0x0000E000  /* Bank 0 Write Access Time	= 14 cycles */
+#define	B0WAT_15		0x0000F000  /* Bank 0 Write Access Time	= 15 cycles */
+#define	B1RDYEN			0x00010000  /* Bank 1 RDY enable, 0=disable, 1=enable */
+#define	B1RDYPOL		0x00020000  /* Bank 1 RDY Active high, 0=active	low, 1=active high */
+#define	B1TT_1			0x00040000  /* Bank 1 Transition Time from Read	to Write = 1 cycle */
+#define	B1TT_2			0x00080000  /* Bank 1 Transition Time from Read	to Write = 2 cycles */
+#define	B1TT_3			0x000C0000  /* Bank 1 Transition Time from Read	to Write = 3 cycles */
+#define	B1TT_4			0x00000000  /* Bank 1 Transition Time from Read	to Write = 4 cycles */
+#define	B1ST_1			0x00100000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B1ST_2			0x00200000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B1ST_3			0x00300000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B1ST_4			0x00000000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B1HT_1			0x00400000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B1HT_2			0x00800000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B1HT_3			0x00C00000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B1HT_0			0x00000000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B1RAT_1			0x01000000  /* Bank 1 Read Access Time = 1 cycle */
+#define	B1RAT_2			0x02000000  /* Bank 1 Read Access Time = 2 cycles */
+#define	B1RAT_3			0x03000000  /* Bank 1 Read Access Time = 3 cycles */
+#define	B1RAT_4			0x04000000  /* Bank 1 Read Access Time = 4 cycles */
+#define	B1RAT_5			0x05000000  /* Bank 1 Read Access Time = 5 cycles */
+#define	B1RAT_6			0x06000000  /* Bank 1 Read Access Time = 6 cycles */
+#define	B1RAT_7			0x07000000  /* Bank 1 Read Access Time = 7 cycles */
+#define	B1RAT_8			0x08000000  /* Bank 1 Read Access Time = 8 cycles */
+#define	B1RAT_9			0x09000000  /* Bank 1 Read Access Time = 9 cycles */
+#define	B1RAT_10		0x0A000000  /* Bank 1 Read Access Time = 10 cycles */
+#define	B1RAT_11		0x0B000000  /* Bank 1 Read Access Time = 11 cycles */
+#define	B1RAT_12		0x0C000000  /* Bank 1 Read Access Time = 12 cycles */
+#define	B1RAT_13		0x0D000000  /* Bank 1 Read Access Time = 13 cycles */
+#define	B1RAT_14		0x0E000000  /* Bank 1 Read Access Time = 14 cycles */
+#define	B1RAT_15		0x0F000000  /* Bank 1 Read Access Time = 15 cycles */
+#define	B1WAT_1			0x10000000 /* Bank 1 Write Access Time = 1 cycle */
+#define	B1WAT_2			0x20000000  /* Bank 1 Write Access Time	= 2 cycles */
+#define	B1WAT_3			0x30000000  /* Bank 1 Write Access Time	= 3 cycles */
+#define	B1WAT_4			0x40000000  /* Bank 1 Write Access Time	= 4 cycles */
+#define	B1WAT_5			0x50000000  /* Bank 1 Write Access Time	= 5 cycles */
+#define	B1WAT_6			0x60000000  /* Bank 1 Write Access Time	= 6 cycles */
+#define	B1WAT_7			0x70000000  /* Bank 1 Write Access Time	= 7 cycles */
+#define	B1WAT_8			0x80000000  /* Bank 1 Write Access Time	= 8 cycles */
+#define	B1WAT_9			0x90000000  /* Bank 1 Write Access Time	= 9 cycles */
+#define	B1WAT_10		0xA0000000  /* Bank 1 Write Access Time	= 10 cycles */
+#define	B1WAT_11		0xB0000000  /* Bank 1 Write Access Time	= 11 cycles */
+#define	B1WAT_12		0xC0000000  /* Bank 1 Write Access Time	= 12 cycles */
+#define	B1WAT_13		0xD0000000  /* Bank 1 Write Access Time	= 13 cycles */
+#define	B1WAT_14		0xE0000000  /* Bank 1 Write Access Time	= 14 cycles */
+#define	B1WAT_15		0xF0000000  /* Bank 1 Write Access Time	= 15 cycles */
+
+/* EBIU_AMBCTL1	Masks */
+#define	B2RDYEN			0x00000001  /* Bank 2 RDY Enable, 0=disable, 1=enable */
+#define	B2RDYPOL		0x00000002  /* Bank 2 RDY Active high, 0=active	low, 1=active high */
+#define	B2TT_1			0x00000004  /* Bank 2 Transition Time from Read	to Write = 1 cycle */
+#define	B2TT_2			0x00000008  /* Bank 2 Transition Time from Read	to Write = 2 cycles */
+#define	B2TT_3			0x0000000C  /* Bank 2 Transition Time from Read	to Write = 3 cycles */
+#define	B2TT_4			0x00000000  /* Bank 2 Transition Time from Read	to Write = 4 cycles */
+#define	B2ST_1			0x00000010  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B2ST_2			0x00000020  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B2ST_3			0x00000030  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B2ST_4			0x00000000  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B2HT_1			0x00000040  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B2HT_2			0x00000080  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B2HT_3			0x000000C0  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B2HT_0			0x00000000  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B2RAT_1			0x00000100  /* Bank 2 Read Access Time = 1 cycle */
+#define	B2RAT_2			0x00000200  /* Bank 2 Read Access Time = 2 cycles */
+#define	B2RAT_3			0x00000300  /* Bank 2 Read Access Time = 3 cycles */
+#define	B2RAT_4			0x00000400  /* Bank 2 Read Access Time = 4 cycles */
+#define	B2RAT_5			0x00000500  /* Bank 2 Read Access Time = 5 cycles */
+#define	B2RAT_6			0x00000600  /* Bank 2 Read Access Time = 6 cycles */
+#define	B2RAT_7			0x00000700  /* Bank 2 Read Access Time = 7 cycles */
+#define	B2RAT_8			0x00000800  /* Bank 2 Read Access Time = 8 cycles */
+#define	B2RAT_9			0x00000900  /* Bank 2 Read Access Time = 9 cycles */
+#define	B2RAT_10		0x00000A00  /* Bank 2 Read Access Time = 10 cycles */
+#define	B2RAT_11		0x00000B00  /* Bank 2 Read Access Time = 11 cycles */
+#define	B2RAT_12		0x00000C00  /* Bank 2 Read Access Time = 12 cycles */
+#define	B2RAT_13		0x00000D00  /* Bank 2 Read Access Time = 13 cycles */
+#define	B2RAT_14		0x00000E00  /* Bank 2 Read Access Time = 14 cycles */
+#define	B2RAT_15		0x00000F00  /* Bank 2 Read Access Time = 15 cycles */
+#define	B2WAT_1			0x00001000  /* Bank 2 Write Access Time	= 1 cycle */
+#define	B2WAT_2			0x00002000  /* Bank 2 Write Access Time	= 2 cycles */
+#define	B2WAT_3			0x00003000  /* Bank 2 Write Access Time	= 3 cycles */
+#define	B2WAT_4			0x00004000  /* Bank 2 Write Access Time	= 4 cycles */
+#define	B2WAT_5			0x00005000  /* Bank 2 Write Access Time	= 5 cycles */
+#define	B2WAT_6			0x00006000  /* Bank 2 Write Access Time	= 6 cycles */
+#define	B2WAT_7			0x00007000  /* Bank 2 Write Access Time	= 7 cycles */
+#define	B2WAT_8			0x00008000  /* Bank 2 Write Access Time	= 8 cycles */
+#define	B2WAT_9			0x00009000  /* Bank 2 Write Access Time	= 9 cycles */
+#define	B2WAT_10		0x0000A000  /* Bank 2 Write Access Time	= 10 cycles */
+#define	B2WAT_11		0x0000B000  /* Bank 2 Write Access Time	= 11 cycles */
+#define	B2WAT_12		0x0000C000  /* Bank 2 Write Access Time	= 12 cycles */
+#define	B2WAT_13		0x0000D000  /* Bank 2 Write Access Time	= 13 cycles */
+#define	B2WAT_14		0x0000E000  /* Bank 2 Write Access Time	= 14 cycles */
+#define	B2WAT_15		0x0000F000  /* Bank 2 Write Access Time	= 15 cycles */
+#define	B3RDYEN			0x00010000  /* Bank 3 RDY enable, 0=disable, 1=enable */
+#define	B3RDYPOL		0x00020000  /* Bank 3 RDY Active high, 0=active	low, 1=active high */
+#define	B3TT_1			0x00040000  /* Bank 3 Transition Time from Read	to Write = 1 cycle */
+#define	B3TT_2			0x00080000  /* Bank 3 Transition Time from Read	to Write = 2 cycles */
+#define	B3TT_3			0x000C0000  /* Bank 3 Transition Time from Read	to Write = 3 cycles */
+#define	B3TT_4			0x00000000  /* Bank 3 Transition Time from Read	to Write = 4 cycles */
+#define	B3ST_1			0x00100000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B3ST_2			0x00200000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B3ST_3			0x00300000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B3ST_4			0x00000000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B3HT_1			0x00400000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B3HT_2			0x00800000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B3HT_3			0x00C00000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B3HT_0			0x00000000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B3RAT_1			0x01000000 /* Bank 3 Read Access Time =	1 cycle */
+#define	B3RAT_2			0x02000000  /* Bank 3 Read Access Time = 2 cycles */
+#define	B3RAT_3			0x03000000  /* Bank 3 Read Access Time = 3 cycles */
+#define	B3RAT_4			0x04000000  /* Bank 3 Read Access Time = 4 cycles */
+#define	B3RAT_5			0x05000000  /* Bank 3 Read Access Time = 5 cycles */
+#define	B3RAT_6			0x06000000  /* Bank 3 Read Access Time = 6 cycles */
+#define	B3RAT_7			0x07000000  /* Bank 3 Read Access Time = 7 cycles */
+#define	B3RAT_8			0x08000000  /* Bank 3 Read Access Time = 8 cycles */
+#define	B3RAT_9			0x09000000  /* Bank 3 Read Access Time = 9 cycles */
+#define	B3RAT_10		0x0A000000  /* Bank 3 Read Access Time = 10 cycles */
+#define	B3RAT_11		0x0B000000  /* Bank 3 Read Access Time = 11 cycles */
+#define	B3RAT_12		0x0C000000  /* Bank 3 Read Access Time = 12 cycles */
+#define	B3RAT_13		0x0D000000  /* Bank 3 Read Access Time = 13 cycles */
+#define	B3RAT_14		0x0E000000  /* Bank 3 Read Access Time = 14 cycles */
+#define	B3RAT_15		0x0F000000  /* Bank 3 Read Access Time = 15 cycles */
+#define	B3WAT_1			0x10000000 /* Bank 3 Write Access Time = 1 cycle */
+#define	B3WAT_2			0x20000000  /* Bank 3 Write Access Time	= 2 cycles */
+#define	B3WAT_3			0x30000000  /* Bank 3 Write Access Time	= 3 cycles */
+#define	B3WAT_4			0x40000000  /* Bank 3 Write Access Time	= 4 cycles */
+#define	B3WAT_5			0x50000000  /* Bank 3 Write Access Time	= 5 cycles */
+#define	B3WAT_6			0x60000000  /* Bank 3 Write Access Time	= 6 cycles */
+#define	B3WAT_7			0x70000000  /* Bank 3 Write Access Time	= 7 cycles */
+#define	B3WAT_8			0x80000000  /* Bank 3 Write Access Time	= 8 cycles */
+#define	B3WAT_9			0x90000000  /* Bank 3 Write Access Time	= 9 cycles */
+#define	B3WAT_10		0xA0000000  /* Bank 3 Write Access Time	= 10 cycles */
+#define	B3WAT_11		0xB0000000  /* Bank 3 Write Access Time	= 11 cycles */
+#define	B3WAT_12		0xC0000000  /* Bank 3 Write Access Time	= 12 cycles */
+#define	B3WAT_13		0xD0000000  /* Bank 3 Write Access Time	= 13 cycles */
+#define	B3WAT_14		0xE0000000  /* Bank 3 Write Access Time	= 14 cycles */
+#define	B3WAT_15		0xF0000000  /* Bank 3 Write Access Time	= 15 cycles */
+
+/* **********************  SDRAM CONTROLLER MASKS  *************************** */
+/* EBIU_SDGCTL Masks */
+#define	SCTLE			0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
+#define	CL_2			0x00000008 /* SDRAM CAS	latency	= 2 cycles */
+#define	CL_3			0x0000000C /* SDRAM CAS	latency	= 3 cycles */
+#define	PFE				0x00000010 /* Enable SDRAM prefetch */
+#define	PFP				0x00000020 /* Prefetch has priority over AMC requests */
+#define	PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh */
+#define	PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In	Self-Refresh */
+#define	PASR_B0			0x00000020	/* Only	SDRAM Bank 0 Is	Refreshed In Self-Refresh */
+#define	TRAS_1			0x00000040 /* SDRAM tRAS = 1 cycle */
+#define	TRAS_2			0x00000080 /* SDRAM tRAS = 2 cycles */
+#define	TRAS_3			0x000000C0 /* SDRAM tRAS = 3 cycles */
+#define	TRAS_4			0x00000100 /* SDRAM tRAS = 4 cycles */
+#define	TRAS_5			0x00000140 /* SDRAM tRAS = 5 cycles */
+#define	TRAS_6			0x00000180 /* SDRAM tRAS = 6 cycles */
+#define	TRAS_7			0x000001C0 /* SDRAM tRAS = 7 cycles */
+#define	TRAS_8			0x00000200 /* SDRAM tRAS = 8 cycles */
+#define	TRAS_9			0x00000240 /* SDRAM tRAS = 9 cycles */
+#define	TRAS_10			0x00000280 /* SDRAM tRAS = 10 cycles */
+#define	TRAS_11			0x000002C0 /* SDRAM tRAS = 11 cycles */
+#define	TRAS_12			0x00000300 /* SDRAM tRAS = 12 cycles */
+#define	TRAS_13			0x00000340 /* SDRAM tRAS = 13 cycles */
+#define	TRAS_14			0x00000380 /* SDRAM tRAS = 14 cycles */
+#define	TRAS_15			0x000003C0 /* SDRAM tRAS = 15 cycles */
+#define	TRP_1			0x00000800 /* SDRAM tRP	= 1 cycle */
+#define	TRP_2			0x00001000 /* SDRAM tRP	= 2 cycles */
+#define	TRP_3			0x00001800 /* SDRAM tRP	= 3 cycles */
+#define	TRP_4			0x00002000 /* SDRAM tRP	= 4 cycles */
+#define	TRP_5			0x00002800 /* SDRAM tRP	= 5 cycles */
+#define	TRP_6			0x00003000 /* SDRAM tRP	= 6 cycles */
+#define	TRP_7			0x00003800 /* SDRAM tRP	= 7 cycles */
+#define	TRCD_1			0x00008000 /* SDRAM tRCD = 1 cycle */
+#define	TRCD_2			0x00010000 /* SDRAM tRCD = 2 cycles */
+#define	TRCD_3			0x00018000 /* SDRAM tRCD = 3 cycles */
+#define	TRCD_4			0x00020000 /* SDRAM tRCD = 4 cycles */
+#define	TRCD_5			0x00028000 /* SDRAM tRCD = 5 cycles */
+#define	TRCD_6			0x00030000 /* SDRAM tRCD = 6 cycles */
+#define	TRCD_7			0x00038000 /* SDRAM tRCD = 7 cycles */
+#define	TWR_1			0x00080000 /* SDRAM tWR	= 1 cycle */
+#define	TWR_2			0x00100000 /* SDRAM tWR	= 2 cycles */
+#define	TWR_3			0x00180000 /* SDRAM tWR	= 3 cycles */
+#define	PUPSD			0x00200000 /*Power-up start delay */
+#define	PSM				0x00400000 /* SDRAM power-up sequence =	Precharge, mode	register set, 8	CBR refresh cycles */
+#define	PSS				0x00800000 /* enable SDRAM power-up sequence on	next SDRAM access */
+#define	SRFS			0x01000000 /* Start SDRAM self-refresh mode */
+#define	EBUFE			0x02000000 /* Enable external buffering	timing */
+#define	FBBRW			0x04000000 /* Fast back-to-back	read write enable */
+#define	EMREN			0x10000000 /* Extended mode register enable */
+#define	TCSR			0x20000000 /* Temp compensated self refresh value 85 deg C */
+#define	CDDBG			0x40000000 /* Tristate SDRAM controls during bus grant */
+
+/* EBIU_SDBCTL Masks */
+#define	EBE				0x00000001 /* Enable SDRAM external bank */
+#define	EBSZ_16			0x00000000 /* SDRAM external bank size = 16MB */
+#define	EBSZ_32			0x00000002 /* SDRAM external bank size = 32MB */
+#define	EBSZ_64			0x00000004 /* SDRAM external bank size = 64MB */
+#define	EBSZ_128		0x00000006 /* SDRAM external bank size = 128MB */
+#define	EBSZ_256		0x00000008 /* SDRAM External Bank Size = 256MB */
+#define	EBSZ_512		0x0000000A /* SDRAM External Bank Size = 512MB */
+#define	EBCAW_8			0x00000000 /* SDRAM external bank column address width = 8 bits */
+#define	EBCAW_9			0x00000010 /* SDRAM external bank column address width = 9 bits */
+#define	EBCAW_10		0x00000020 /* SDRAM external bank column address width = 9 bits */
+#define	EBCAW_11		0x00000030 /* SDRAM external bank column address width = 9 bits */
+
+/* EBIU_SDSTAT Masks */
+#define	SDCI			0x00000001 /* SDRAM controller is idle */
+#define	SDSRA			0x00000002 /* SDRAM SDRAM self refresh is active */
+#define	SDPUA			0x00000004 /* SDRAM power up active  */
+#define	SDRS			0x00000008 /* SDRAM is in reset	state */
+#define	SDEASE			0x00000010 /* SDRAM EAB	sticky error status - W1C */
+#define	BGSTAT			0x00000020 /* Bus granted */
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWIx) MASKS  ***********************/
+/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y);	 ) */
+#ifdef _MISRA_RULES
+#define	CLKLOW(x)	((x) & 0xFFu)		/* Periods Clock Is Held Low */
+#define	CLKHI(y)	(((y)&0xFFu)<<0x8)	/* Periods Before New Clock Low */
+#else
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low */
+#define	CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low */
+#endif /* _MISRA_RULES */
+
+/* TWIx_PRESCALE Masks								 */
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz) */
+#define	TWI_ENA		0x0080		/* TWI Enable		 */
+#define	SCCB		0x0200		/* SCCB	Compatibility Enable */
+
+/* TWIx_SLAVE_CTRL Masks								 */
+#define	SEN			0x0001		/* Slave Enable		 */
+#define	SADD_LEN	0x0002		/* Slave Address Length */
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid */
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call	Adrress	Matching Enabled */
+
+/* TWIx_SLAVE_STAT Masks								 */
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*) */
+#define	GCALL		0x0002		/* General Call	Indicator */
+
+/* TWIx_MASTER_CTRL Masks						 */
+#define	MEN			0x0001		/* Master Mode Enable */
+#define	MADD_LEN	0x0002		/* Master Address Length */
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*) */
+#define	FAST		0x0008		/* Use Fast Mode Timing	Specs */
+#define	STOP		0x0010		/* Issue Stop Condition */
+#define	RSTART		0x0020		/* Repeat Start	or Stop* At End	Of Transfer */
+#define	DCNT		0x3FC0		/* Data	Bytes To Transfer */
+#define	SDAOVR		0x4000		/* Serial Data Override */
+#define	SCLOVR		0x8000		/* Serial Clock	Override */
+
+/* TWIx_MASTER_STAT Masks							 */
+#define	MPROG		0x0001		/* Master Transfer In Progress */
+#define	LOSTARB		0x0002		/* Lost	Arbitration Indicator (Xfer Aborted) */
+#define	ANAK		0x0004		/* Address Not Acknowledged */
+#define	DNAK		0x0008		/* Data	Not Acknowledged */
+#define	BUFRDERR	0x0010		/* Buffer Read Error */
+#define	BUFWRERR	0x0020		/* Buffer Write	Error */
+#define	SDASEN		0x0040		/* Serial Data Sense */
+#define	SCLSEN		0x0080		/* Serial Clock	Sense */
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator */
+
+/* TWIx_INT_SRC	and TWIx_INT_ENABLE Masks */
+#define	SINIT		0x0001		/* Slave Transfer Initiated */
+#define	SCOMP		0x0002		/* Slave Transfer Complete */
+#define	SERR		0x0004		/* Slave Transfer Error */
+#define	SOVF		0x0008		/* Slave Overflow */
+#define	MCOMP		0x0010		/* Master Transfer Complete */
+#define	MERR		0x0020		/* Master Transfer Error */
+#define	XMTSERV		0x0040		/* Transmit FIFO Service */
+#define	RCVSERV		0x0080		/* Receive FIFO	Service */
+
+/* TWIx_FIFO_CTL Masks					 */
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush */
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush */
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length */
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length */
+
+/* TWIx_FIFO_STAT Masks								 */
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status */
+#define	XMT_EMPTY	0x0000		/*		Transmit FIFO Empty */
+#define	XMT_HALF	0x0001		/*		Transmit FIFO Has 1 Byte To Write */
+#define	XMT_FULL	0x0003		/*		Transmit FIFO Full (2 Bytes To Write) */
+
+#define	RCVSTAT		0x000C		/* Receive FIFO	Status */
+#define	RCV_EMPTY	0x0000		/*		Receive	FIFO Empty */
+#define	RCV_HALF	0x0004		/*		Receive	FIFO Has 1 Byte	To Read */
+#define	RCV_FULL	0x000C		/*		Receive	FIFO Full (2 Bytes To Read) */
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index 7a8ac5f..8100bcd 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1,859 +1,13 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
 
-/* SYSTEM & MM REGISTER BIT & ADDRESS DEFINITIONS FOR ADSP-BF538/9 */
-
 #ifndef _DEF_BF539_H
 #define _DEF_BF539_H
 
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-
-/*********************************************************************************** */
-/* System MMR Register Map */
-/*********************************************************************************** */
-/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
-#define	PLL_CTL			0xFFC00000	/* PLL Control register (16-bit) */
-#define	PLL_DIV			0xFFC00004	/* PLL Divide Register (16-bit) */
-#define	VR_CTL			0xFFC00008	/* Voltage Regulator Control Register (16-bit) */
-#define	PLL_STAT		0xFFC0000C	/* PLL Status register (16-bit) */
-#define	PLL_LOCKCNT		0xFFC00010	/* PLL Lock	Count register (16-bit) */
-#define	CHIPID			0xFFC00014	/* Chip	ID Register */
-
-/* CHIPID Masks */
-#define CHIPID_VERSION         0xF0000000
-#define CHIPID_FAMILY          0x0FFFF000
-#define CHIPID_MANUFACTURE     0x00000FFE
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
-#define	SWRST			0xFFC00100  /* Software	Reset Register (16-bit) */
-#define	SYSCR			0xFFC00104  /* System Configuration registe */
-#define	SIC_RVECT		0xFFC00108
-#define	SIC_IMASK0		0xFFC0010C  /* Interrupt Mask Register */
-#define	SIC_IAR0		0xFFC00110  /* Interrupt Assignment Register 0 */
-#define	SIC_IAR1		0xFFC00114  /* Interrupt Assignment Register 1 */
-#define	SIC_IAR2		0xFFC00118  /* Interrupt Assignment Register 2 */
-#define	SIC_IAR3			0xFFC0011C	/* Interrupt Assignment	Register 3 */
-#define	SIC_ISR0			0xFFC00120  /* Interrupt Status	Register */
-#define	SIC_IWR0			0xFFC00124  /* Interrupt Wakeup	Register */
-#define	SIC_IMASK1			0xFFC00128	/* Interrupt Mask Register 1 */
-#define	SIC_ISR1			0xFFC0012C	/* Interrupt Status Register 1 */
-#define	SIC_IWR1			0xFFC00130	/* Interrupt Wakeup Register 1 */
-#define	SIC_IAR4			0xFFC00134	/* Interrupt Assignment	Register 4 */
-#define	SIC_IAR5			0xFFC00138	/* Interrupt Assignment	Register 5 */
-#define	SIC_IAR6			0xFFC0013C	/* Interrupt Assignment	Register 6 */
-
-
-/* Watchdog Timer (0xFFC00200 -	0xFFC002FF) */
-#define	WDOG_CTL	0xFFC00200  /* Watchdog	Control	Register */
-#define	WDOG_CNT	0xFFC00204  /* Watchdog	Count Register */
-#define	WDOG_STAT	0xFFC00208  /* Watchdog	Status Register */
-
-
-/* Real	Time Clock (0xFFC00300 - 0xFFC003FF) */
-#define	RTC_STAT	0xFFC00300  /* RTC Status Register */
-#define	RTC_ICTL	0xFFC00304  /* RTC Interrupt Control Register */
-#define	RTC_ISTAT	0xFFC00308  /* RTC Interrupt Status Register */
-#define	RTC_SWCNT	0xFFC0030C  /* RTC Stopwatch Count Register */
-#define	RTC_ALARM	0xFFC00310  /* RTC Alarm Time Register */
-#define	RTC_FAST	0xFFC00314  /* RTC Prescaler Enable Register */
-#define	RTC_PREN		0xFFC00314  /* RTC Prescaler Enable Register (alternate	macro) */
-
-
-/* UART0 Controller (0xFFC00400	- 0xFFC004FF) */
-#define	UART0_THR	      0xFFC00400  /* Transmit Holding register */
-#define	UART0_RBR	      0xFFC00400  /* Receive Buffer register */
-#define	UART0_DLL	      0xFFC00400  /* Divisor Latch (Low-Byte) */
-#define	UART0_IER	      0xFFC00404  /* Interrupt Enable Register */
-#define	UART0_DLH	      0xFFC00404  /* Divisor Latch (High-Byte) */
-#define	UART0_IIR	      0xFFC00408  /* Interrupt Identification Register */
-#define	UART0_LCR	      0xFFC0040C  /* Line Control Register */
-#define	UART0_MCR			 0xFFC00410  /*	Modem Control Register */
-#define	UART0_LSR	      0xFFC00414  /* Line Status Register */
-#define	UART0_SCR	      0xFFC0041C  /* SCR Scratch Register */
-#define	UART0_GCTL		     0xFFC00424	 /* Global Control Register */
-
-
-/* SPI0	Controller (0xFFC00500 - 0xFFC005FF) */
-
-#define	SPI0_CTL			0xFFC00500  /* SPI0 Control Register */
-#define	SPI0_FLG			0xFFC00504  /* SPI0 Flag register */
-#define	SPI0_STAT			0xFFC00508  /* SPI0 Status register */
-#define	SPI0_TDBR			0xFFC0050C  /* SPI0 Transmit Data Buffer Register */
-#define	SPI0_RDBR			0xFFC00510  /* SPI0 Receive Data Buffer	Register */
-#define	SPI0_BAUD			0xFFC00514  /* SPI0 Baud rate Register */
-#define	SPI0_SHADOW			0xFFC00518  /* SPI0_RDBR Shadow	Register */
-#define SPI0_REGBASE			SPI0_CTL
-
-
-/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
-#define	TIMER0_CONFIG			0xFFC00600     /* Timer	0 Configuration	Register */
-#define	TIMER0_COUNTER				0xFFC00604     /* Timer	0 Counter Register */
-#define	TIMER0_PERIOD			0xFFC00608     /* Timer	0 Period Register */
-#define	TIMER0_WIDTH			0xFFC0060C     /* Timer	0 Width	Register */
-
-#define	TIMER1_CONFIG			0xFFC00610	/*  Timer 1 Configuration Register   */
-#define	TIMER1_COUNTER			0xFFC00614	/*  Timer 1 Counter Register	     */
-#define	TIMER1_PERIOD			0xFFC00618	/*  Timer 1 Period Register	     */
-#define	TIMER1_WIDTH			0xFFC0061C	/*  Timer 1 Width Register	     */
-
-#define	TIMER2_CONFIG			0xFFC00620	/* Timer 2 Configuration Register   */
-#define	TIMER2_COUNTER			0xFFC00624	/* Timer 2 Counter Register	    */
-#define	TIMER2_PERIOD			0xFFC00628	/* Timer 2 Period Register	    */
-#define	TIMER2_WIDTH			0xFFC0062C	/* Timer 2 Width Register	    */
-
-#define	TIMER_ENABLE				0xFFC00640	/* Timer Enable	Register */
-#define	TIMER_DISABLE				0xFFC00644	/* Timer Disable Register */
-#define	TIMER_STATUS				0xFFC00648	/* Timer Status	Register */
-
-
-/* Programmable	Flags (0xFFC00700 - 0xFFC007FF) */
-#define	FIO_FLAG_D				0xFFC00700  /* Flag Mask to directly specify state of pins */
-#define	FIO_FLAG_C			0xFFC00704  /* Peripheral Interrupt Flag Register (clear) */
-#define	FIO_FLAG_S			0xFFC00708  /* Peripheral Interrupt Flag Register (set) */
-#define	FIO_FLAG_T					0xFFC0070C  /* Flag Mask to directly toggle state of pins */
-#define	FIO_MASKA_D			0xFFC00710  /* Flag Mask Interrupt A Register (set directly) */
-#define	FIO_MASKA_C			0xFFC00714  /* Flag Mask Interrupt A Register (clear) */
-#define	FIO_MASKA_S			0xFFC00718  /* Flag Mask Interrupt A Register (set) */
-#define	FIO_MASKA_T			0xFFC0071C  /* Flag Mask Interrupt A Register (toggle) */
-#define	FIO_MASKB_D			0xFFC00720  /* Flag Mask Interrupt B Register (set directly) */
-#define	FIO_MASKB_C			0xFFC00724  /* Flag Mask Interrupt B Register (clear) */
-#define	FIO_MASKB_S			0xFFC00728  /* Flag Mask Interrupt B Register (set) */
-#define	FIO_MASKB_T			0xFFC0072C  /* Flag Mask Interrupt B Register (toggle) */
-#define	FIO_DIR				0xFFC00730  /* Peripheral Flag Direction Register */
-#define	FIO_POLAR			0xFFC00734  /* Flag Source Polarity Register */
-#define	FIO_EDGE			0xFFC00738  /* Flag Source Sensitivity Register */
-#define	FIO_BOTH			0xFFC0073C  /* Flag Set	on BOTH	Edges Register */
-#define	FIO_INEN					0xFFC00740  /* Flag Input Enable Register  */
-
-
-/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
-#define	SPORT0_TCR1				0xFFC00800  /* SPORT0 Transmit Configuration 1 Register */
-#define	SPORT0_TCR2				0xFFC00804  /* SPORT0 Transmit Configuration 2 Register */
-#define	SPORT0_TCLKDIV			0xFFC00808  /* SPORT0 Transmit Clock Divider */
-#define	SPORT0_TFSDIV			0xFFC0080C  /* SPORT0 Transmit Frame Sync Divider */
-#define	SPORT0_TX			0xFFC00810  /* SPORT0 TX Data Register */
-#define	SPORT0_RX			0xFFC00818  /* SPORT0 RX Data Register */
-#define	SPORT0_RCR1				0xFFC00820  /* SPORT0 Transmit Configuration 1 Register */
-#define	SPORT0_RCR2				0xFFC00824  /* SPORT0 Transmit Configuration 2 Register */
-#define	SPORT0_RCLKDIV			0xFFC00828  /* SPORT0 Receive Clock Divider */
-#define	SPORT0_RFSDIV			0xFFC0082C  /* SPORT0 Receive Frame Sync Divider */
-#define	SPORT0_STAT			0xFFC00830  /* SPORT0 Status Register */
-#define	SPORT0_CHNL			0xFFC00834  /* SPORT0 Current Channel Register */
-#define	SPORT0_MCMC1			0xFFC00838  /* SPORT0 Multi-Channel Configuration Register 1 */
-#define	SPORT0_MCMC2			0xFFC0083C  /* SPORT0 Multi-Channel Configuration Register 2 */
-#define	SPORT0_MTCS0			0xFFC00840  /* SPORT0 Multi-Channel Transmit Select Register 0 */
-#define	SPORT0_MTCS1			0xFFC00844  /* SPORT0 Multi-Channel Transmit Select Register 1 */
-#define	SPORT0_MTCS2			0xFFC00848  /* SPORT0 Multi-Channel Transmit Select Register 2 */
-#define	SPORT0_MTCS3			0xFFC0084C  /* SPORT0 Multi-Channel Transmit Select Register 3 */
-#define	SPORT0_MRCS0			0xFFC00850  /* SPORT0 Multi-Channel Receive Select Register 0 */
-#define	SPORT0_MRCS1			0xFFC00854  /* SPORT0 Multi-Channel Receive Select Register 1 */
-#define	SPORT0_MRCS2			0xFFC00858  /* SPORT0 Multi-Channel Receive Select Register 2 */
-#define	SPORT0_MRCS3			0xFFC0085C  /* SPORT0 Multi-Channel Receive Select Register 3 */
-
-
-/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
-#define	SPORT1_TCR1				0xFFC00900  /* SPORT1 Transmit Configuration 1 Register */
-#define	SPORT1_TCR2				0xFFC00904  /* SPORT1 Transmit Configuration 2 Register */
-#define	SPORT1_TCLKDIV			0xFFC00908  /* SPORT1 Transmit Clock Divider */
-#define	SPORT1_TFSDIV			0xFFC0090C  /* SPORT1 Transmit Frame Sync Divider */
-#define	SPORT1_TX			0xFFC00910  /* SPORT1 TX Data Register */
-#define	SPORT1_RX			0xFFC00918  /* SPORT1 RX Data Register */
-#define	SPORT1_RCR1				0xFFC00920  /* SPORT1 Transmit Configuration 1 Register */
-#define	SPORT1_RCR2				0xFFC00924  /* SPORT1 Transmit Configuration 2 Register */
-#define	SPORT1_RCLKDIV			0xFFC00928  /* SPORT1 Receive Clock Divider */
-#define	SPORT1_RFSDIV			0xFFC0092C  /* SPORT1 Receive Frame Sync Divider */
-#define	SPORT1_STAT			0xFFC00930  /* SPORT1 Status Register */
-#define	SPORT1_CHNL			0xFFC00934  /* SPORT1 Current Channel Register */
-#define	SPORT1_MCMC1			0xFFC00938  /* SPORT1 Multi-Channel Configuration Register 1 */
-#define	SPORT1_MCMC2			0xFFC0093C  /* SPORT1 Multi-Channel Configuration Register 2 */
-#define	SPORT1_MTCS0			0xFFC00940  /* SPORT1 Multi-Channel Transmit Select Register 0 */
-#define	SPORT1_MTCS1			0xFFC00944  /* SPORT1 Multi-Channel Transmit Select Register 1 */
-#define	SPORT1_MTCS2			0xFFC00948  /* SPORT1 Multi-Channel Transmit Select Register 2 */
-#define	SPORT1_MTCS3			0xFFC0094C  /* SPORT1 Multi-Channel Transmit Select Register 3 */
-#define	SPORT1_MRCS0			0xFFC00950  /* SPORT1 Multi-Channel Receive Select Register 0 */
-#define	SPORT1_MRCS1			0xFFC00954  /* SPORT1 Multi-Channel Receive Select Register 1 */
-#define	SPORT1_MRCS2			0xFFC00958  /* SPORT1 Multi-Channel Receive Select Register 2 */
-#define	SPORT1_MRCS3			0xFFC0095C  /* SPORT1 Multi-Channel Receive Select Register 3 */
-
-
-/* External Bus	Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
-/* Asynchronous	Memory Controller  */
-#define	EBIU_AMGCTL			0xFFC00A00  /* Asynchronous Memory Global Control Register */
-#define	EBIU_AMBCTL0		0xFFC00A04  /* Asynchronous Memory Bank	Control	Register 0 */
-#define	EBIU_AMBCTL1		0xFFC00A08  /* Asynchronous Memory Bank	Control	Register 1 */
-
-/* SDRAM Controller */
-#define	EBIU_SDGCTL			0xFFC00A10  /* SDRAM Global Control Register */
-#define	EBIU_SDBCTL			0xFFC00A14  /* SDRAM Bank Control Register */
-#define	EBIU_SDRRC			0xFFC00A18  /* SDRAM Refresh Rate Control Register */
-#define	EBIU_SDSTAT			0xFFC00A1C  /* SDRAM Status Register */
-
-
-
-/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
-
-#define	DMAC0_TC_PER			0xFFC00B0C	/* DMA Controller 0 Traffic Control Periods Register */
-#define	DMAC0_TC_CNT			0xFFC00B10	/* DMA Controller 0 Traffic Control Current Counts Register */
-
-/* Alternate deprecated	register names (below) provided	for backwards code compatibility */
-#define	DMA0_TCPER			DMAC0_TC_PER
-#define	DMA0_TCCNT			DMAC0_TC_CNT
-
-
-/* DMA Controller 0 (0xFFC00C00	- 0xFFC00FFF)							 */
-
-#define	DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register */
-#define	DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register */
-#define	DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register */
-#define	DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register */
-#define	DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register */
-#define	DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register */
-#define	DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register */
-#define	DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register */
-#define	DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register */
-#define	DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register */
-#define	DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map	Register */
-#define	DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register */
-#define	DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register */
-
-#define	DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register */
-#define	DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register */
-#define	DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register */
-#define	DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register */
-#define	DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register */
-#define	DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register */
-#define	DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register */
-#define	DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register */
-#define	DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register */
-#define	DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register */
-#define	DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map	Register */
-#define	DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register */
-#define	DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register */
-
-#define	DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register */
-#define	DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register */
-#define	DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register */
-#define	DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register */
-#define	DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register */
-#define	DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register */
-#define	DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register */
-#define	DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register */
-#define	DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register */
-#define	DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register */
-#define	DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map	Register */
-#define	DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register */
-#define	DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register */
-
-#define	DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register */
-#define	DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register */
-#define	DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register */
-#define	DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register */
-#define	DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register */
-#define	DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register */
-#define	DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register */
-#define	DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register */
-#define	DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register */
-#define	DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register */
-#define	DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map	Register */
-#define	DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register */
-#define	DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register */
-
-#define	DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register */
-#define	DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register */
-#define	DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register */
-#define	DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register */
-#define	DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register */
-#define	DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register */
-#define	DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register */
-#define	DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register */
-#define	DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register */
-#define	DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register */
-#define	DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map	Register */
-#define	DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register */
-#define	DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register */
-
-#define	DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register */
-#define	DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register */
-#define	DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register */
-#define	DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register */
-#define	DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register */
-#define	DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register */
-#define	DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register */
-#define	DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register */
-#define	DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register */
-#define	DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register */
-#define	DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map	Register */
-#define	DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register */
-#define	DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register */
-
-#define	DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register */
-#define	DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register */
-#define	DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register */
-#define	DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register */
-#define	DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register */
-#define	DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register */
-#define	DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register */
-#define	DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register */
-#define	DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register */
-#define	DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register */
-#define	DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map	Register */
-#define	DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register */
-#define	DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register */
-
-#define	DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register */
-#define	DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register */
-#define	DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register */
-#define	DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register */
-#define	DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register */
-#define	DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register */
-#define	DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register */
-#define	DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register */
-#define	DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register */
-#define	DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register */
-#define	DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map	Register */
-#define	DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register */
-#define	DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register */
-
-#define	MDMA0_D0_NEXT_DESC_PTR	0xFFC00E00	/* MemDMA0 Stream 0 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA0_D0_START_ADDR		0xFFC00E04	/* MemDMA0 Stream 0 Destination	Start Address Register */
-#define	MDMA0_D0_CONFIG			0xFFC00E08	/* MemDMA0 Stream 0 Destination	Configuration Register */
-#define	MDMA0_D0_X_COUNT		0xFFC00E10	/* MemDMA0 Stream 0 Destination	X Count	Register */
-#define	MDMA0_D0_X_MODIFY		0xFFC00E14	/* MemDMA0 Stream 0 Destination	X Modify Register */
-#define	MDMA0_D0_Y_COUNT		0xFFC00E18	/* MemDMA0 Stream 0 Destination	Y Count	Register */
-#define	MDMA0_D0_Y_MODIFY		0xFFC00E1C	/* MemDMA0 Stream 0 Destination	Y Modify Register */
-#define	MDMA0_D0_CURR_DESC_PTR	0xFFC00E20	/* MemDMA0 Stream 0 Destination	Current	Descriptor Pointer Register */
-#define	MDMA0_D0_CURR_ADDR		0xFFC00E24	/* MemDMA0 Stream 0 Destination	Current	Address	Register */
-#define	MDMA0_D0_IRQ_STATUS		0xFFC00E28	/* MemDMA0 Stream 0 Destination	Interrupt/Status Register */
-#define	MDMA0_D0_PERIPHERAL_MAP	0xFFC00E2C	/* MemDMA0 Stream 0 Destination	Peripheral Map Register */
-#define	MDMA0_D0_CURR_X_COUNT	0xFFC00E30	/* MemDMA0 Stream 0 Destination	Current	X Count	Register */
-#define	MDMA0_D0_CURR_Y_COUNT	0xFFC00E38	/* MemDMA0 Stream 0 Destination	Current	Y Count	Register */
-
-#define	MDMA0_S0_NEXT_DESC_PTR	0xFFC00E40	/* MemDMA0 Stream 0 Source Next	Descriptor Pointer Register */
-#define	MDMA0_S0_START_ADDR		0xFFC00E44	/* MemDMA0 Stream 0 Source Start Address Register */
-#define	MDMA0_S0_CONFIG			0xFFC00E48	/* MemDMA0 Stream 0 Source Configuration Register */
-#define	MDMA0_S0_X_COUNT		0xFFC00E50	/* MemDMA0 Stream 0 Source X Count Register */
-#define	MDMA0_S0_X_MODIFY		0xFFC00E54	/* MemDMA0 Stream 0 Source X Modify Register */
-#define	MDMA0_S0_Y_COUNT		0xFFC00E58	/* MemDMA0 Stream 0 Source Y Count Register */
-#define	MDMA0_S0_Y_MODIFY		0xFFC00E5C	/* MemDMA0 Stream 0 Source Y Modify Register */
-#define	MDMA0_S0_CURR_DESC_PTR	0xFFC00E60	/* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
-#define	MDMA0_S0_CURR_ADDR		0xFFC00E64	/* MemDMA0 Stream 0 Source Current Address Register */
-#define	MDMA0_S0_IRQ_STATUS		0xFFC00E68	/* MemDMA0 Stream 0 Source Interrupt/Status Register */
-#define	MDMA0_S0_PERIPHERAL_MAP	0xFFC00E6C	/* MemDMA0 Stream 0 Source Peripheral Map Register */
-#define	MDMA0_S0_CURR_X_COUNT	0xFFC00E70	/* MemDMA0 Stream 0 Source Current X Count Register */
-#define	MDMA0_S0_CURR_Y_COUNT	0xFFC00E78	/* MemDMA0 Stream 0 Source Current Y Count Register */
-
-#define	MDMA0_D1_NEXT_DESC_PTR	0xFFC00E80	/* MemDMA0 Stream 1 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA0_D1_START_ADDR		0xFFC00E84	/* MemDMA0 Stream 1 Destination	Start Address Register */
-#define	MDMA0_D1_CONFIG			0xFFC00E88	/* MemDMA0 Stream 1 Destination	Configuration Register */
-#define	MDMA0_D1_X_COUNT		0xFFC00E90	/* MemDMA0 Stream 1 Destination	X Count	Register */
-#define	MDMA0_D1_X_MODIFY		0xFFC00E94	/* MemDMA0 Stream 1 Destination	X Modify Register */
-#define	MDMA0_D1_Y_COUNT		0xFFC00E98	/* MemDMA0 Stream 1 Destination	Y Count	Register */
-#define	MDMA0_D1_Y_MODIFY		0xFFC00E9C	/* MemDMA0 Stream 1 Destination	Y Modify Register */
-#define	MDMA0_D1_CURR_DESC_PTR	0xFFC00EA0	/* MemDMA0 Stream 1 Destination	Current	Descriptor Pointer Register */
-#define	MDMA0_D1_CURR_ADDR		0xFFC00EA4	/* MemDMA0 Stream 1 Destination	Current	Address	Register */
-#define	MDMA0_D1_IRQ_STATUS		0xFFC00EA8	/* MemDMA0 Stream 1 Destination	Interrupt/Status Register */
-#define	MDMA0_D1_PERIPHERAL_MAP	0xFFC00EAC	/* MemDMA0 Stream 1 Destination	Peripheral Map Register */
-#define	MDMA0_D1_CURR_X_COUNT	0xFFC00EB0	/* MemDMA0 Stream 1 Destination	Current	X Count	Register */
-#define	MDMA0_D1_CURR_Y_COUNT	0xFFC00EB8	/* MemDMA0 Stream 1 Destination	Current	Y Count	Register */
-
-#define	MDMA0_S1_NEXT_DESC_PTR	0xFFC00EC0	/* MemDMA0 Stream 1 Source Next	Descriptor Pointer Register */
-#define	MDMA0_S1_START_ADDR		0xFFC00EC4	/* MemDMA0 Stream 1 Source Start Address Register */
-#define	MDMA0_S1_CONFIG			0xFFC00EC8	/* MemDMA0 Stream 1 Source Configuration Register */
-#define	MDMA0_S1_X_COUNT		0xFFC00ED0	/* MemDMA0 Stream 1 Source X Count Register */
-#define	MDMA0_S1_X_MODIFY		0xFFC00ED4	/* MemDMA0 Stream 1 Source X Modify Register */
-#define	MDMA0_S1_Y_COUNT		0xFFC00ED8	/* MemDMA0 Stream 1 Source Y Count Register */
-#define	MDMA0_S1_Y_MODIFY		0xFFC00EDC	/* MemDMA0 Stream 1 Source Y Modify Register */
-#define	MDMA0_S1_CURR_DESC_PTR	0xFFC00EE0	/* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
-#define	MDMA0_S1_CURR_ADDR		0xFFC00EE4	/* MemDMA0 Stream 1 Source Current Address Register */
-#define	MDMA0_S1_IRQ_STATUS		0xFFC00EE8	/* MemDMA0 Stream 1 Source Interrupt/Status Register */
-#define	MDMA0_S1_PERIPHERAL_MAP	0xFFC00EEC	/* MemDMA0 Stream 1 Source Peripheral Map Register */
-#define	MDMA0_S1_CURR_X_COUNT	0xFFC00EF0	/* MemDMA0 Stream 1 Source Current X Count Register */
-#define	MDMA0_S1_CURR_Y_COUNT	0xFFC00EF8	/* MemDMA0 Stream 1 Source Current Y Count Register */
-
-#define MDMA_D0_NEXT_DESC_PTR MDMA0_D0_NEXT_DESC_PTR
-#define MDMA_D0_START_ADDR MDMA0_D0_START_ADDR
-#define MDMA_D0_CONFIG MDMA0_D0_CONFIG
-#define MDMA_D0_X_COUNT MDMA0_D0_X_COUNT
-#define MDMA_D0_X_MODIFY MDMA0_D0_X_MODIFY
-#define MDMA_D0_Y_COUNT MDMA0_D0_Y_COUNT
-#define MDMA_D0_Y_MODIFY MDMA0_D0_Y_MODIFY
-#define MDMA_D0_CURR_DESC_PTR MDMA0_D0_CURR_DESC_PTR
-#define MDMA_D0_CURR_ADDR MDMA0_D0_CURR_ADDR
-#define MDMA_D0_IRQ_STATUS MDMA0_D0_IRQ_STATUS
-#define MDMA_D0_PERIPHERAL_MAP MDMA0_D0_PERIPHERAL_MAP
-#define MDMA_D0_CURR_X_COUNT MDMA0_D0_CURR_X_COUNT
-#define MDMA_D0_CURR_Y_COUNT MDMA0_D0_CURR_Y_COUNT
-
-#define MDMA_S0_NEXT_DESC_PTR MDMA0_S0_NEXT_DESC_PTR
-#define MDMA_S0_START_ADDR MDMA0_S0_START_ADDR
-#define MDMA_S0_CONFIG MDMA0_S0_CONFIG
-#define MDMA_S0_X_COUNT MDMA0_S0_X_COUNT
-#define MDMA_S0_X_MODIFY MDMA0_S0_X_MODIFY
-#define MDMA_S0_Y_COUNT MDMA0_S0_Y_COUNT
-#define MDMA_S0_Y_MODIFY MDMA0_S0_Y_MODIFY
-#define MDMA_S0_CURR_DESC_PTR MDMA0_S0_CURR_DESC_PTR
-#define MDMA_S0_CURR_ADDR MDMA0_S0_CURR_ADDR
-#define MDMA_S0_IRQ_STATUS MDMA0_S0_IRQ_STATUS
-#define MDMA_S0_PERIPHERAL_MAP MDMA0_S0_PERIPHERAL_MAP
-#define MDMA_S0_CURR_X_COUNT MDMA0_S0_CURR_X_COUNT
-#define MDMA_S0_CURR_Y_COUNT MDMA0_S0_CURR_Y_COUNT
-
-#define MDMA_D1_NEXT_DESC_PTR MDMA0_D1_NEXT_DESC_PTR
-#define MDMA_D1_START_ADDR MDMA0_D1_START_ADDR
-#define MDMA_D1_CONFIG MDMA0_D1_CONFIG
-#define MDMA_D1_X_COUNT MDMA0_D1_X_COUNT
-#define MDMA_D1_X_MODIFY MDMA0_D1_X_MODIFY
-#define MDMA_D1_Y_COUNT MDMA0_D1_Y_COUNT
-#define MDMA_D1_Y_MODIFY MDMA0_D1_Y_MODIFY
-#define MDMA_D1_CURR_DESC_PTR MDMA0_D1_CURR_DESC_PTR
-#define MDMA_D1_CURR_ADDR MDMA0_D1_CURR_ADDR
-#define MDMA_D1_IRQ_STATUS MDMA0_D1_IRQ_STATUS
-#define MDMA_D1_PERIPHERAL_MAP MDMA0_D1_PERIPHERAL_MAP
-#define MDMA_D1_CURR_X_COUNT MDMA0_D1_CURR_X_COUNT
-#define MDMA_D1_CURR_Y_COUNT MDMA0_D1_CURR_Y_COUNT
-
-#define MDMA_S1_NEXT_DESC_PTR MDMA0_S1_NEXT_DESC_PTR
-#define MDMA_S1_START_ADDR MDMA0_S1_START_ADDR
-#define MDMA_S1_CONFIG MDMA0_S1_CONFIG
-#define MDMA_S1_X_COUNT MDMA0_S1_X_COUNT
-#define MDMA_S1_X_MODIFY MDMA0_S1_X_MODIFY
-#define MDMA_S1_Y_COUNT MDMA0_S1_Y_COUNT
-#define MDMA_S1_Y_MODIFY MDMA0_S1_Y_MODIFY
-#define MDMA_S1_CURR_DESC_PTR MDMA0_S1_CURR_DESC_PTR
-#define MDMA_S1_CURR_ADDR MDMA0_S1_CURR_ADDR
-#define MDMA_S1_IRQ_STATUS MDMA0_S1_IRQ_STATUS
-#define MDMA_S1_PERIPHERAL_MAP MDMA0_S1_PERIPHERAL_MAP
-#define MDMA_S1_CURR_X_COUNT MDMA0_S1_CURR_X_COUNT
-#define MDMA_S1_CURR_Y_COUNT MDMA0_S1_CURR_Y_COUNT
-
-
-/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
-#define	PPI_CONTROL			0xFFC01000	/* PPI Control Register */
-#define	PPI_STATUS			0xFFC01004	/* PPI Status Register */
-#define	PPI_COUNT			0xFFC01008	/* PPI Transfer	Count Register */
-#define	PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register */
-#define	PPI_FRAME			0xFFC01010	/* PPI Frame Length Register */
-
-
-/* Two-Wire Interface 0	(0xFFC01400 - 0xFFC014FF)			 */
-#define	TWI0_CLKDIV			0xFFC01400	/* Serial Clock	Divider	Register */
-#define	TWI0_CONTROL		0xFFC01404	/* TWI0	Master Internal	Time Reference Register */
-#define	TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register */
-#define	TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register */
-#define	TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register */
-#define	TWI0_MASTER_CTL	0xFFC01414	/* Master Mode Control Register */
-#define	TWI0_MASTER_STAT	0xFFC01418	/* Master Mode Status Register */
-#define	TWI0_MASTER_ADDR	0xFFC0141C	/* Master Mode Address Register */
-#define	TWI0_INT_STAT		0xFFC01420	/* TWI0	Master Interrupt Register */
-#define	TWI0_INT_MASK		0xFFC01424	/* TWI0	Master Interrupt Mask Register */
-#define	TWI0_FIFO_CTL		0xFFC01428	/* FIFO	Control	Register */
-#define	TWI0_FIFO_STAT		0xFFC0142C	/* FIFO	Status Register */
-#define	TWI0_XMT_DATA8		0xFFC01480	/* FIFO	Transmit Data Single Byte Register */
-#define	TWI0_XMT_DATA16		0xFFC01484	/* FIFO	Transmit Data Double Byte Register */
-#define	TWI0_RCV_DATA8		0xFFC01488	/* FIFO	Receive	Data Single Byte Register */
-#define	TWI0_RCV_DATA16		0xFFC0148C	/* FIFO	Receive	Data Double Byte Register */
-
-#define TWI0_REGBASE		TWI0_CLKDIV
-
-/* the following are for backwards compatibility */
-#define	TWI0_PRESCALE	 TWI0_CONTROL
-#define	TWI0_INT_SRC	 TWI0_INT_STAT
-#define	TWI0_INT_ENABLE	 TWI0_INT_MASK
-
-
-/* General-Purpose Ports  (0xFFC01500 -	0xFFC015FF)	 */
-
-/* GPIO	Port C Register	Names */
-#define PORTCIO_FER			0xFFC01500	/* GPIO	Pin Port C Configuration Register */
-#define PORTCIO				0xFFC01510	/* GPIO	Pin Port C Data	Register */
-#define PORTCIO_CLEAR			0xFFC01520	/* Clear GPIO Pin Port C Register */
-#define PORTCIO_SET			0xFFC01530	/* Set GPIO Pin	Port C Register */
-#define PORTCIO_TOGGLE			0xFFC01540	/* Toggle GPIO Pin Port	C Register */
-#define PORTCIO_DIR			0xFFC01550	/* GPIO	Pin Port C Direction Register */
-#define PORTCIO_INEN			0xFFC01560	/* GPIO	Pin Port C Input Enable	Register */
-
-/* GPIO	Port D Register	Names */
-#define PORTDIO_FER			0xFFC01504	/* GPIO	Pin Port D Configuration Register */
-#define PORTDIO				0xFFC01514	/* GPIO	Pin Port D Data	Register */
-#define PORTDIO_CLEAR			0xFFC01524	/* Clear GPIO Pin Port D Register */
-#define PORTDIO_SET			0xFFC01534	/* Set GPIO Pin	Port D Register */
-#define PORTDIO_TOGGLE			0xFFC01544	/* Toggle GPIO Pin Port	D Register */
-#define PORTDIO_DIR			0xFFC01554	/* GPIO	Pin Port D Direction Register */
-#define PORTDIO_INEN			0xFFC01564	/* GPIO	Pin Port D Input Enable	Register */
-
-/* GPIO	Port E Register	Names */
-#define PORTEIO_FER			0xFFC01508	/* GPIO	Pin Port E Configuration Register */
-#define PORTEIO				0xFFC01518	/* GPIO	Pin Port E Data	Register */
-#define PORTEIO_CLEAR			0xFFC01528	/* Clear GPIO Pin Port E Register */
-#define PORTEIO_SET			0xFFC01538	/* Set GPIO Pin	Port E Register */
-#define PORTEIO_TOGGLE			0xFFC01548	/* Toggle GPIO Pin Port	E Register */
-#define PORTEIO_DIR			0xFFC01558	/* GPIO	Pin Port E Direction Register */
-#define PORTEIO_INEN			0xFFC01568	/* GPIO	Pin Port E Input Enable	Register */
-
-/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
-
-#define	DMAC1_TC_PER			0xFFC01B0C	/* DMA Controller 1 Traffic Control Periods Register */
-#define	DMAC1_TC_CNT			0xFFC01B10	/* DMA Controller 1 Traffic Control Current Counts Register */
-
-/* Alternate deprecated	register names (below) provided	for backwards code compatibility */
-#define	DMA1_TCPER			DMAC1_TC_PER
-#define	DMA1_TCCNT			DMAC1_TC_CNT
-
-
-/* DMA Controller 1 (0xFFC01C00	- 0xFFC01FFF)							 */
-#define	DMA8_NEXT_DESC_PTR		0xFFC01C00	/* DMA Channel 8 Next Descriptor Pointer Register */
-#define	DMA8_START_ADDR			0xFFC01C04	/* DMA Channel 8 Start Address Register */
-#define	DMA8_CONFIG				0xFFC01C08	/* DMA Channel 8 Configuration Register */
-#define	DMA8_X_COUNT			0xFFC01C10	/* DMA Channel 8 X Count Register */
-#define	DMA8_X_MODIFY			0xFFC01C14	/* DMA Channel 8 X Modify Register */
-#define	DMA8_Y_COUNT			0xFFC01C18	/* DMA Channel 8 Y Count Register */
-#define	DMA8_Y_MODIFY			0xFFC01C1C	/* DMA Channel 8 Y Modify Register */
-#define	DMA8_CURR_DESC_PTR		0xFFC01C20	/* DMA Channel 8 Current Descriptor Pointer Register */
-#define	DMA8_CURR_ADDR			0xFFC01C24	/* DMA Channel 8 Current Address Register */
-#define	DMA8_IRQ_STATUS			0xFFC01C28	/* DMA Channel 8 Interrupt/Status Register */
-#define	DMA8_PERIPHERAL_MAP		0xFFC01C2C	/* DMA Channel 8 Peripheral Map	Register */
-#define	DMA8_CURR_X_COUNT		0xFFC01C30	/* DMA Channel 8 Current X Count Register */
-#define	DMA8_CURR_Y_COUNT		0xFFC01C38	/* DMA Channel 8 Current Y Count Register */
-
-#define	DMA9_NEXT_DESC_PTR		0xFFC01C40	/* DMA Channel 9 Next Descriptor Pointer Register */
-#define	DMA9_START_ADDR			0xFFC01C44	/* DMA Channel 9 Start Address Register */
-#define	DMA9_CONFIG				0xFFC01C48	/* DMA Channel 9 Configuration Register */
-#define	DMA9_X_COUNT			0xFFC01C50	/* DMA Channel 9 X Count Register */
-#define	DMA9_X_MODIFY			0xFFC01C54	/* DMA Channel 9 X Modify Register */
-#define	DMA9_Y_COUNT			0xFFC01C58	/* DMA Channel 9 Y Count Register */
-#define	DMA9_Y_MODIFY			0xFFC01C5C	/* DMA Channel 9 Y Modify Register */
-#define	DMA9_CURR_DESC_PTR		0xFFC01C60	/* DMA Channel 9 Current Descriptor Pointer Register */
-#define	DMA9_CURR_ADDR			0xFFC01C64	/* DMA Channel 9 Current Address Register */
-#define	DMA9_IRQ_STATUS			0xFFC01C68	/* DMA Channel 9 Interrupt/Status Register */
-#define	DMA9_PERIPHERAL_MAP		0xFFC01C6C	/* DMA Channel 9 Peripheral Map	Register */
-#define	DMA9_CURR_X_COUNT		0xFFC01C70	/* DMA Channel 9 Current X Count Register */
-#define	DMA9_CURR_Y_COUNT		0xFFC01C78	/* DMA Channel 9 Current Y Count Register */
-
-#define	DMA10_NEXT_DESC_PTR		0xFFC01C80	/* DMA Channel 10 Next Descriptor Pointer Register */
-#define	DMA10_START_ADDR		0xFFC01C84	/* DMA Channel 10 Start	Address	Register */
-#define	DMA10_CONFIG			0xFFC01C88	/* DMA Channel 10 Configuration	Register */
-#define	DMA10_X_COUNT			0xFFC01C90	/* DMA Channel 10 X Count Register */
-#define	DMA10_X_MODIFY			0xFFC01C94	/* DMA Channel 10 X Modify Register */
-#define	DMA10_Y_COUNT			0xFFC01C98	/* DMA Channel 10 Y Count Register */
-#define	DMA10_Y_MODIFY			0xFFC01C9C	/* DMA Channel 10 Y Modify Register */
-#define	DMA10_CURR_DESC_PTR		0xFFC01CA0	/* DMA Channel 10 Current Descriptor Pointer Register */
-#define	DMA10_CURR_ADDR			0xFFC01CA4	/* DMA Channel 10 Current Address Register */
-#define	DMA10_IRQ_STATUS		0xFFC01CA8	/* DMA Channel 10 Interrupt/Status Register */
-#define	DMA10_PERIPHERAL_MAP	0xFFC01CAC	/* DMA Channel 10 Peripheral Map Register */
-#define	DMA10_CURR_X_COUNT		0xFFC01CB0	/* DMA Channel 10 Current X Count Register */
-#define	DMA10_CURR_Y_COUNT		0xFFC01CB8	/* DMA Channel 10 Current Y Count Register */
-
-#define	DMA11_NEXT_DESC_PTR		0xFFC01CC0	/* DMA Channel 11 Next Descriptor Pointer Register */
-#define	DMA11_START_ADDR		0xFFC01CC4	/* DMA Channel 11 Start	Address	Register */
-#define	DMA11_CONFIG			0xFFC01CC8	/* DMA Channel 11 Configuration	Register */
-#define	DMA11_X_COUNT			0xFFC01CD0	/* DMA Channel 11 X Count Register */
-#define	DMA11_X_MODIFY			0xFFC01CD4	/* DMA Channel 11 X Modify Register */
-#define	DMA11_Y_COUNT			0xFFC01CD8	/* DMA Channel 11 Y Count Register */
-#define	DMA11_Y_MODIFY			0xFFC01CDC	/* DMA Channel 11 Y Modify Register */
-#define	DMA11_CURR_DESC_PTR		0xFFC01CE0	/* DMA Channel 11 Current Descriptor Pointer Register */
-#define	DMA11_CURR_ADDR			0xFFC01CE4	/* DMA Channel 11 Current Address Register */
-#define	DMA11_IRQ_STATUS		0xFFC01CE8	/* DMA Channel 11 Interrupt/Status Register */
-#define	DMA11_PERIPHERAL_MAP	0xFFC01CEC	/* DMA Channel 11 Peripheral Map Register */
-#define	DMA11_CURR_X_COUNT		0xFFC01CF0	/* DMA Channel 11 Current X Count Register */
-#define	DMA11_CURR_Y_COUNT		0xFFC01CF8	/* DMA Channel 11 Current Y Count Register */
-
-#define	DMA12_NEXT_DESC_PTR		0xFFC01D00	/* DMA Channel 12 Next Descriptor Pointer Register */
-#define	DMA12_START_ADDR		0xFFC01D04	/* DMA Channel 12 Start	Address	Register */
-#define	DMA12_CONFIG			0xFFC01D08	/* DMA Channel 12 Configuration	Register */
-#define	DMA12_X_COUNT			0xFFC01D10	/* DMA Channel 12 X Count Register */
-#define	DMA12_X_MODIFY			0xFFC01D14	/* DMA Channel 12 X Modify Register */
-#define	DMA12_Y_COUNT			0xFFC01D18	/* DMA Channel 12 Y Count Register */
-#define	DMA12_Y_MODIFY			0xFFC01D1C	/* DMA Channel 12 Y Modify Register */
-#define	DMA12_CURR_DESC_PTR		0xFFC01D20	/* DMA Channel 12 Current Descriptor Pointer Register */
-#define	DMA12_CURR_ADDR			0xFFC01D24	/* DMA Channel 12 Current Address Register */
-#define	DMA12_IRQ_STATUS		0xFFC01D28	/* DMA Channel 12 Interrupt/Status Register */
-#define	DMA12_PERIPHERAL_MAP	0xFFC01D2C	/* DMA Channel 12 Peripheral Map Register */
-#define	DMA12_CURR_X_COUNT		0xFFC01D30	/* DMA Channel 12 Current X Count Register */
-#define	DMA12_CURR_Y_COUNT		0xFFC01D38	/* DMA Channel 12 Current Y Count Register */
-
-#define	DMA13_NEXT_DESC_PTR		0xFFC01D40	/* DMA Channel 13 Next Descriptor Pointer Register */
-#define	DMA13_START_ADDR		0xFFC01D44	/* DMA Channel 13 Start	Address	Register */
-#define	DMA13_CONFIG			0xFFC01D48	/* DMA Channel 13 Configuration	Register */
-#define	DMA13_X_COUNT			0xFFC01D50	/* DMA Channel 13 X Count Register */
-#define	DMA13_X_MODIFY			0xFFC01D54	/* DMA Channel 13 X Modify Register */
-#define	DMA13_Y_COUNT			0xFFC01D58	/* DMA Channel 13 Y Count Register */
-#define	DMA13_Y_MODIFY			0xFFC01D5C	/* DMA Channel 13 Y Modify Register */
-#define	DMA13_CURR_DESC_PTR		0xFFC01D60	/* DMA Channel 13 Current Descriptor Pointer Register */
-#define	DMA13_CURR_ADDR			0xFFC01D64	/* DMA Channel 13 Current Address Register */
-#define	DMA13_IRQ_STATUS		0xFFC01D68	/* DMA Channel 13 Interrupt/Status Register */
-#define	DMA13_PERIPHERAL_MAP	0xFFC01D6C	/* DMA Channel 13 Peripheral Map Register */
-#define	DMA13_CURR_X_COUNT		0xFFC01D70	/* DMA Channel 13 Current X Count Register */
-#define	DMA13_CURR_Y_COUNT		0xFFC01D78	/* DMA Channel 13 Current Y Count Register */
-
-#define	DMA14_NEXT_DESC_PTR		0xFFC01D80	/* DMA Channel 14 Next Descriptor Pointer Register */
-#define	DMA14_START_ADDR		0xFFC01D84	/* DMA Channel 14 Start	Address	Register */
-#define	DMA14_CONFIG			0xFFC01D88	/* DMA Channel 14 Configuration	Register */
-#define	DMA14_X_COUNT			0xFFC01D90	/* DMA Channel 14 X Count Register */
-#define	DMA14_X_MODIFY			0xFFC01D94	/* DMA Channel 14 X Modify Register */
-#define	DMA14_Y_COUNT			0xFFC01D98	/* DMA Channel 14 Y Count Register */
-#define	DMA14_Y_MODIFY			0xFFC01D9C	/* DMA Channel 14 Y Modify Register */
-#define	DMA14_CURR_DESC_PTR		0xFFC01DA0	/* DMA Channel 14 Current Descriptor Pointer Register */
-#define	DMA14_CURR_ADDR			0xFFC01DA4	/* DMA Channel 14 Current Address Register */
-#define	DMA14_IRQ_STATUS		0xFFC01DA8	/* DMA Channel 14 Interrupt/Status Register */
-#define	DMA14_PERIPHERAL_MAP	0xFFC01DAC	/* DMA Channel 14 Peripheral Map Register */
-#define	DMA14_CURR_X_COUNT		0xFFC01DB0	/* DMA Channel 14 Current X Count Register */
-#define	DMA14_CURR_Y_COUNT		0xFFC01DB8	/* DMA Channel 14 Current Y Count Register */
-
-#define	DMA15_NEXT_DESC_PTR		0xFFC01DC0	/* DMA Channel 15 Next Descriptor Pointer Register */
-#define	DMA15_START_ADDR		0xFFC01DC4	/* DMA Channel 15 Start	Address	Register */
-#define	DMA15_CONFIG			0xFFC01DC8	/* DMA Channel 15 Configuration	Register */
-#define	DMA15_X_COUNT			0xFFC01DD0	/* DMA Channel 15 X Count Register */
-#define	DMA15_X_MODIFY			0xFFC01DD4	/* DMA Channel 15 X Modify Register */
-#define	DMA15_Y_COUNT			0xFFC01DD8	/* DMA Channel 15 Y Count Register */
-#define	DMA15_Y_MODIFY			0xFFC01DDC	/* DMA Channel 15 Y Modify Register */
-#define	DMA15_CURR_DESC_PTR		0xFFC01DE0	/* DMA Channel 15 Current Descriptor Pointer Register */
-#define	DMA15_CURR_ADDR			0xFFC01DE4	/* DMA Channel 15 Current Address Register */
-#define	DMA15_IRQ_STATUS		0xFFC01DE8	/* DMA Channel 15 Interrupt/Status Register */
-#define	DMA15_PERIPHERAL_MAP	0xFFC01DEC	/* DMA Channel 15 Peripheral Map Register */
-#define	DMA15_CURR_X_COUNT		0xFFC01DF0	/* DMA Channel 15 Current X Count Register */
-#define	DMA15_CURR_Y_COUNT		0xFFC01DF8	/* DMA Channel 15 Current Y Count Register */
-
-#define	DMA16_NEXT_DESC_PTR		0xFFC01E00	/* DMA Channel 16 Next Descriptor Pointer Register */
-#define	DMA16_START_ADDR		0xFFC01E04	/* DMA Channel 16 Start	Address	Register */
-#define	DMA16_CONFIG			0xFFC01E08	/* DMA Channel 16 Configuration	Register */
-#define	DMA16_X_COUNT			0xFFC01E10	/* DMA Channel 16 X Count Register */
-#define	DMA16_X_MODIFY			0xFFC01E14	/* DMA Channel 16 X Modify Register */
-#define	DMA16_Y_COUNT			0xFFC01E18	/* DMA Channel 16 Y Count Register */
-#define	DMA16_Y_MODIFY			0xFFC01E1C	/* DMA Channel 16 Y Modify Register */
-#define	DMA16_CURR_DESC_PTR		0xFFC01E20	/* DMA Channel 16 Current Descriptor Pointer Register */
-#define	DMA16_CURR_ADDR			0xFFC01E24	/* DMA Channel 16 Current Address Register */
-#define	DMA16_IRQ_STATUS		0xFFC01E28	/* DMA Channel 16 Interrupt/Status Register */
-#define	DMA16_PERIPHERAL_MAP	0xFFC01E2C	/* DMA Channel 16 Peripheral Map Register */
-#define	DMA16_CURR_X_COUNT		0xFFC01E30	/* DMA Channel 16 Current X Count Register */
-#define	DMA16_CURR_Y_COUNT		0xFFC01E38	/* DMA Channel 16 Current Y Count Register */
-
-#define	DMA17_NEXT_DESC_PTR		0xFFC01E40	/* DMA Channel 17 Next Descriptor Pointer Register */
-#define	DMA17_START_ADDR		0xFFC01E44	/* DMA Channel 17 Start	Address	Register */
-#define	DMA17_CONFIG			0xFFC01E48	/* DMA Channel 17 Configuration	Register */
-#define	DMA17_X_COUNT			0xFFC01E50	/* DMA Channel 17 X Count Register */
-#define	DMA17_X_MODIFY			0xFFC01E54	/* DMA Channel 17 X Modify Register */
-#define	DMA17_Y_COUNT			0xFFC01E58	/* DMA Channel 17 Y Count Register */
-#define	DMA17_Y_MODIFY			0xFFC01E5C	/* DMA Channel 17 Y Modify Register */
-#define	DMA17_CURR_DESC_PTR		0xFFC01E60	/* DMA Channel 17 Current Descriptor Pointer Register */
-#define	DMA17_CURR_ADDR			0xFFC01E64	/* DMA Channel 17 Current Address Register */
-#define	DMA17_IRQ_STATUS		0xFFC01E68	/* DMA Channel 17 Interrupt/Status Register */
-#define	DMA17_PERIPHERAL_MAP	0xFFC01E6C	/* DMA Channel 17 Peripheral Map Register */
-#define	DMA17_CURR_X_COUNT		0xFFC01E70	/* DMA Channel 17 Current X Count Register */
-#define	DMA17_CURR_Y_COUNT		0xFFC01E78	/* DMA Channel 17 Current Y Count Register */
-
-#define	DMA18_NEXT_DESC_PTR		0xFFC01E80	/* DMA Channel 18 Next Descriptor Pointer Register */
-#define	DMA18_START_ADDR		0xFFC01E84	/* DMA Channel 18 Start	Address	Register */
-#define	DMA18_CONFIG			0xFFC01E88	/* DMA Channel 18 Configuration	Register */
-#define	DMA18_X_COUNT			0xFFC01E90	/* DMA Channel 18 X Count Register */
-#define	DMA18_X_MODIFY			0xFFC01E94	/* DMA Channel 18 X Modify Register */
-#define	DMA18_Y_COUNT			0xFFC01E98	/* DMA Channel 18 Y Count Register */
-#define	DMA18_Y_MODIFY			0xFFC01E9C	/* DMA Channel 18 Y Modify Register */
-#define	DMA18_CURR_DESC_PTR		0xFFC01EA0	/* DMA Channel 18 Current Descriptor Pointer Register */
-#define	DMA18_CURR_ADDR			0xFFC01EA4	/* DMA Channel 18 Current Address Register */
-#define	DMA18_IRQ_STATUS		0xFFC01EA8	/* DMA Channel 18 Interrupt/Status Register */
-#define	DMA18_PERIPHERAL_MAP	0xFFC01EAC	/* DMA Channel 18 Peripheral Map Register */
-#define	DMA18_CURR_X_COUNT		0xFFC01EB0	/* DMA Channel 18 Current X Count Register */
-#define	DMA18_CURR_Y_COUNT		0xFFC01EB8	/* DMA Channel 18 Current Y Count Register */
-
-#define	DMA19_NEXT_DESC_PTR		0xFFC01EC0	/* DMA Channel 19 Next Descriptor Pointer Register */
-#define	DMA19_START_ADDR		0xFFC01EC4	/* DMA Channel 19 Start	Address	Register */
-#define	DMA19_CONFIG			0xFFC01EC8	/* DMA Channel 19 Configuration	Register */
-#define	DMA19_X_COUNT			0xFFC01ED0	/* DMA Channel 19 X Count Register */
-#define	DMA19_X_MODIFY			0xFFC01ED4	/* DMA Channel 19 X Modify Register */
-#define	DMA19_Y_COUNT			0xFFC01ED8	/* DMA Channel 19 Y Count Register */
-#define	DMA19_Y_MODIFY			0xFFC01EDC	/* DMA Channel 19 Y Modify Register */
-#define	DMA19_CURR_DESC_PTR		0xFFC01EE0	/* DMA Channel 19 Current Descriptor Pointer Register */
-#define	DMA19_CURR_ADDR			0xFFC01EE4	/* DMA Channel 19 Current Address Register */
-#define	DMA19_IRQ_STATUS		0xFFC01EE8	/* DMA Channel 19 Interrupt/Status Register */
-#define	DMA19_PERIPHERAL_MAP	0xFFC01EEC	/* DMA Channel 19 Peripheral Map Register */
-#define	DMA19_CURR_X_COUNT		0xFFC01EF0	/* DMA Channel 19 Current X Count Register */
-#define	DMA19_CURR_Y_COUNT		0xFFC01EF8	/* DMA Channel 19 Current Y Count Register */
-
-#define	MDMA1_D0_NEXT_DESC_PTR	0xFFC01F00	/* MemDMA1 Stream 0 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA1_D0_START_ADDR		0xFFC01F04	/* MemDMA1 Stream 0 Destination	Start Address Register */
-#define	MDMA1_D0_CONFIG			0xFFC01F08	/* MemDMA1 Stream 0 Destination	Configuration Register */
-#define	MDMA1_D0_X_COUNT		0xFFC01F10	/* MemDMA1 Stream 0 Destination	X Count	Register */
-#define	MDMA1_D0_X_MODIFY		0xFFC01F14	/* MemDMA1 Stream 0 Destination	X Modify Register */
-#define	MDMA1_D0_Y_COUNT		0xFFC01F18	/* MemDMA1 Stream 0 Destination	Y Count	Register */
-#define	MDMA1_D0_Y_MODIFY		0xFFC01F1C	/* MemDMA1 Stream 0 Destination	Y Modify Register */
-#define	MDMA1_D0_CURR_DESC_PTR	0xFFC01F20	/* MemDMA1 Stream 0 Destination	Current	Descriptor Pointer Register */
-#define	MDMA1_D0_CURR_ADDR		0xFFC01F24	/* MemDMA1 Stream 0 Destination	Current	Address	Register */
-#define	MDMA1_D0_IRQ_STATUS		0xFFC01F28	/* MemDMA1 Stream 0 Destination	Interrupt/Status Register */
-#define	MDMA1_D0_PERIPHERAL_MAP	0xFFC01F2C	/* MemDMA1 Stream 0 Destination	Peripheral Map Register */
-#define	MDMA1_D0_CURR_X_COUNT	0xFFC01F30	/* MemDMA1 Stream 0 Destination	Current	X Count	Register */
-#define	MDMA1_D0_CURR_Y_COUNT	0xFFC01F38	/* MemDMA1 Stream 0 Destination	Current	Y Count	Register */
-
-#define	MDMA1_S0_NEXT_DESC_PTR	0xFFC01F40	/* MemDMA1 Stream 0 Source Next	Descriptor Pointer Register */
-#define	MDMA1_S0_START_ADDR		0xFFC01F44	/* MemDMA1 Stream 0 Source Start Address Register */
-#define	MDMA1_S0_CONFIG			0xFFC01F48	/* MemDMA1 Stream 0 Source Configuration Register */
-#define	MDMA1_S0_X_COUNT		0xFFC01F50	/* MemDMA1 Stream 0 Source X Count Register */
-#define	MDMA1_S0_X_MODIFY		0xFFC01F54	/* MemDMA1 Stream 0 Source X Modify Register */
-#define	MDMA1_S0_Y_COUNT		0xFFC01F58	/* MemDMA1 Stream 0 Source Y Count Register */
-#define	MDMA1_S0_Y_MODIFY		0xFFC01F5C	/* MemDMA1 Stream 0 Source Y Modify Register */
-#define	MDMA1_S0_CURR_DESC_PTR	0xFFC01F60	/* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
-#define	MDMA1_S0_CURR_ADDR		0xFFC01F64	/* MemDMA1 Stream 0 Source Current Address Register */
-#define	MDMA1_S0_IRQ_STATUS		0xFFC01F68	/* MemDMA1 Stream 0 Source Interrupt/Status Register */
-#define	MDMA1_S0_PERIPHERAL_MAP	0xFFC01F6C	/* MemDMA1 Stream 0 Source Peripheral Map Register */
-#define	MDMA1_S0_CURR_X_COUNT	0xFFC01F70	/* MemDMA1 Stream 0 Source Current X Count Register */
-#define	MDMA1_S0_CURR_Y_COUNT	0xFFC01F78	/* MemDMA1 Stream 0 Source Current Y Count Register */
-
-#define	MDMA1_D1_NEXT_DESC_PTR	0xFFC01F80	/* MemDMA1 Stream 1 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA1_D1_START_ADDR		0xFFC01F84	/* MemDMA1 Stream 1 Destination	Start Address Register */
-#define	MDMA1_D1_CONFIG			0xFFC01F88	/* MemDMA1 Stream 1 Destination	Configuration Register */
-#define	MDMA1_D1_X_COUNT		0xFFC01F90	/* MemDMA1 Stream 1 Destination	X Count	Register */
-#define	MDMA1_D1_X_MODIFY		0xFFC01F94	/* MemDMA1 Stream 1 Destination	X Modify Register */
-#define	MDMA1_D1_Y_COUNT		0xFFC01F98	/* MemDMA1 Stream 1 Destination	Y Count	Register */
-#define	MDMA1_D1_Y_MODIFY		0xFFC01F9C	/* MemDMA1 Stream 1 Destination	Y Modify Register */
-#define	MDMA1_D1_CURR_DESC_PTR	0xFFC01FA0	/* MemDMA1 Stream 1 Destination	Current	Descriptor Pointer Register */
-#define	MDMA1_D1_CURR_ADDR		0xFFC01FA4	/* MemDMA1 Stream 1 Destination	Current	Address	Register */
-#define	MDMA1_D1_IRQ_STATUS		0xFFC01FA8	/* MemDMA1 Stream 1 Destination	Interrupt/Status Register */
-#define	MDMA1_D1_PERIPHERAL_MAP	0xFFC01FAC	/* MemDMA1 Stream 1 Destination	Peripheral Map Register */
-#define	MDMA1_D1_CURR_X_COUNT	0xFFC01FB0	/* MemDMA1 Stream 1 Destination	Current	X Count	Register */
-#define	MDMA1_D1_CURR_Y_COUNT	0xFFC01FB8	/* MemDMA1 Stream 1 Destination	Current	Y Count	Register */
-
-#define	MDMA1_S1_NEXT_DESC_PTR	0xFFC01FC0	/* MemDMA1 Stream 1 Source Next	Descriptor Pointer Register */
-#define	MDMA1_S1_START_ADDR		0xFFC01FC4	/* MemDMA1 Stream 1 Source Start Address Register */
-#define	MDMA1_S1_CONFIG			0xFFC01FC8	/* MemDMA1 Stream 1 Source Configuration Register */
-#define	MDMA1_S1_X_COUNT		0xFFC01FD0	/* MemDMA1 Stream 1 Source X Count Register */
-#define	MDMA1_S1_X_MODIFY		0xFFC01FD4	/* MemDMA1 Stream 1 Source X Modify Register */
-#define	MDMA1_S1_Y_COUNT		0xFFC01FD8	/* MemDMA1 Stream 1 Source Y Count Register */
-#define	MDMA1_S1_Y_MODIFY		0xFFC01FDC	/* MemDMA1 Stream 1 Source Y Modify Register */
-#define	MDMA1_S1_CURR_DESC_PTR	0xFFC01FE0	/* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
-#define	MDMA1_S1_CURR_ADDR		0xFFC01FE4	/* MemDMA1 Stream 1 Source Current Address Register */
-#define	MDMA1_S1_IRQ_STATUS		0xFFC01FE8	/* MemDMA1 Stream 1 Source Interrupt/Status Register */
-#define	MDMA1_S1_PERIPHERAL_MAP	0xFFC01FEC	/* MemDMA1 Stream 1 Source Peripheral Map Register */
-#define	MDMA1_S1_CURR_X_COUNT	0xFFC01FF0	/* MemDMA1 Stream 1 Source Current X Count Register */
-#define	MDMA1_S1_CURR_Y_COUNT	0xFFC01FF8	/* MemDMA1 Stream 1 Source Current Y Count Register */
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)	 */
-#define	UART1_THR			0xFFC02000	/* Transmit Holding register */
-#define	UART1_RBR			0xFFC02000	/* Receive Buffer register */
-#define	UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte) */
-#define	UART1_IER			0xFFC02004	/* Interrupt Enable Register */
-#define	UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte) */
-#define	UART1_IIR			0xFFC02008	/* Interrupt Identification Register */
-#define	UART1_LCR			0xFFC0200C	/* Line	Control	Register */
-#define	UART1_MCR			0xFFC02010	/* Modem Control Register */
-#define	UART1_LSR			0xFFC02014	/* Line	Status Register */
-#define	UART1_SCR			0xFFC0201C	/* SCR Scratch Register */
-#define	UART1_GCTL			0xFFC02024	/* Global Control Register */
-
-
-/* UART2 Controller		(0xFFC02100 - 0xFFC021FF)	 */
-#define	UART2_THR			0xFFC02100	/* Transmit Holding register */
-#define	UART2_RBR			0xFFC02100	/* Receive Buffer register */
-#define	UART2_DLL			0xFFC02100	/* Divisor Latch (Low-Byte) */
-#define	UART2_IER			0xFFC02104	/* Interrupt Enable Register */
-#define	UART2_DLH			0xFFC02104	/* Divisor Latch (High-Byte) */
-#define	UART2_IIR			0xFFC02108	/* Interrupt Identification Register */
-#define	UART2_LCR			0xFFC0210C	/* Line	Control	Register */
-#define	UART2_MCR			0xFFC02110	/* Modem Control Register */
-#define	UART2_LSR			0xFFC02114	/* Line	Status Register */
-#define	UART2_SCR			0xFFC0211C	/* SCR Scratch Register */
-#define	UART2_GCTL			0xFFC02124	/* Global Control Register */
-
-
-/* Two-Wire Interface 1	(0xFFC02200 - 0xFFC022FF)			 */
-#define	TWI1_CLKDIV			0xFFC02200	/* Serial Clock	Divider	Register */
-#define	TWI1_CONTROL		0xFFC02204	/* TWI1	Master Internal	Time Reference Register */
-#define	TWI1_SLAVE_CTL		0xFFC02208	/* Slave Mode Control Register */
-#define	TWI1_SLAVE_STAT		0xFFC0220C	/* Slave Mode Status Register */
-#define	TWI1_SLAVE_ADDR		0xFFC02210	/* Slave Mode Address Register */
-#define	TWI1_MASTER_CTL	0xFFC02214	/* Master Mode Control Register */
-#define	TWI1_MASTER_STAT	0xFFC02218	/* Master Mode Status Register */
-#define	TWI1_MASTER_ADDR	0xFFC0221C	/* Master Mode Address Register */
-#define	TWI1_INT_STAT		0xFFC02220	/* TWI1	Master Interrupt Register */
-#define	TWI1_INT_MASK		0xFFC02224	/* TWI1	Master Interrupt Mask Register */
-#define	TWI1_FIFO_CTL		0xFFC02228	/* FIFO	Control	Register */
-#define	TWI1_FIFO_STAT		0xFFC0222C	/* FIFO	Status Register */
-#define	TWI1_XMT_DATA8		0xFFC02280	/* FIFO	Transmit Data Single Byte Register */
-#define	TWI1_XMT_DATA16		0xFFC02284	/* FIFO	Transmit Data Double Byte Register */
-#define	TWI1_RCV_DATA8		0xFFC02288	/* FIFO	Receive	Data Single Byte Register */
-#define	TWI1_RCV_DATA16		0xFFC0228C	/* FIFO	Receive	Data Double Byte Register */
-#define TWI1_REGBASE		TWI1_CLKDIV
-
-
-/* the following are for backwards compatibility */
-#define	TWI1_PRESCALE	  TWI1_CONTROL
-#define	TWI1_INT_SRC	  TWI1_INT_STAT
-#define	TWI1_INT_ENABLE	  TWI1_INT_MASK
-
-
-/* SPI1	Controller		(0xFFC02300 - 0xFFC023FF)	 */
-#define	SPI1_CTL			0xFFC02300  /* SPI1 Control Register */
-#define	SPI1_FLG			0xFFC02304  /* SPI1 Flag register */
-#define	SPI1_STAT			0xFFC02308  /* SPI1 Status register */
-#define	SPI1_TDBR			0xFFC0230C  /* SPI1 Transmit Data Buffer Register */
-#define	SPI1_RDBR			0xFFC02310  /* SPI1 Receive Data Buffer	Register */
-#define	SPI1_BAUD			0xFFC02314  /* SPI1 Baud rate Register */
-#define	SPI1_SHADOW			0xFFC02318  /* SPI1_RDBR Shadow	Register */
-#define SPI1_REGBASE			SPI1_CTL
-
-/* SPI2	Controller		(0xFFC02400 - 0xFFC024FF)	 */
-#define	SPI2_CTL			0xFFC02400  /* SPI2 Control Register */
-#define	SPI2_FLG			0xFFC02404  /* SPI2 Flag register */
-#define	SPI2_STAT			0xFFC02408  /* SPI2 Status register */
-#define	SPI2_TDBR			0xFFC0240C  /* SPI2 Transmit Data Buffer Register */
-#define	SPI2_RDBR			0xFFC02410  /* SPI2 Receive Data Buffer	Register */
-#define	SPI2_BAUD			0xFFC02414  /* SPI2 Baud rate Register */
-#define	SPI2_SHADOW			0xFFC02418  /* SPI2_RDBR Shadow	Register */
-#define SPI2_REGBASE			SPI2_CTL
-
-/* SPORT2 Controller		(0xFFC02500 - 0xFFC025FF)			 */
-#define	SPORT2_TCR1			0xFFC02500	/* SPORT2 Transmit Configuration 1 Register */
-#define	SPORT2_TCR2			0xFFC02504	/* SPORT2 Transmit Configuration 2 Register */
-#define	SPORT2_TCLKDIV		0xFFC02508	/* SPORT2 Transmit Clock Divider */
-#define	SPORT2_TFSDIV		0xFFC0250C	/* SPORT2 Transmit Frame Sync Divider */
-#define	SPORT2_TX			0xFFC02510	/* SPORT2 TX Data Register */
-#define	SPORT2_RX			0xFFC02518	/* SPORT2 RX Data Register */
-#define	SPORT2_RCR1			0xFFC02520	/* SPORT2 Transmit Configuration 1 Register */
-#define	SPORT2_RCR2			0xFFC02524	/* SPORT2 Transmit Configuration 2 Register */
-#define	SPORT2_RCLKDIV		0xFFC02528	/* SPORT2 Receive Clock	Divider */
-#define	SPORT2_RFSDIV		0xFFC0252C	/* SPORT2 Receive Frame	Sync Divider */
-#define	SPORT2_STAT			0xFFC02530	/* SPORT2 Status Register */
-#define	SPORT2_CHNL			0xFFC02534	/* SPORT2 Current Channel Register */
-#define	SPORT2_MCMC1		0xFFC02538	/* SPORT2 Multi-Channel	Configuration Register 1 */
-#define	SPORT2_MCMC2		0xFFC0253C	/* SPORT2 Multi-Channel	Configuration Register 2 */
-#define	SPORT2_MTCS0		0xFFC02540	/* SPORT2 Multi-Channel	Transmit Select	Register 0 */
-#define	SPORT2_MTCS1		0xFFC02544	/* SPORT2 Multi-Channel	Transmit Select	Register 1 */
-#define	SPORT2_MTCS2		0xFFC02548	/* SPORT2 Multi-Channel	Transmit Select	Register 2 */
-#define	SPORT2_MTCS3		0xFFC0254C	/* SPORT2 Multi-Channel	Transmit Select	Register 3 */
-#define	SPORT2_MRCS0		0xFFC02550	/* SPORT2 Multi-Channel	Receive	Select Register	0 */
-#define	SPORT2_MRCS1		0xFFC02554	/* SPORT2 Multi-Channel	Receive	Select Register	1 */
-#define	SPORT2_MRCS2		0xFFC02558	/* SPORT2 Multi-Channel	Receive	Select Register	2 */
-#define	SPORT2_MRCS3		0xFFC0255C	/* SPORT2 Multi-Channel	Receive	Select Register	3 */
-
-
-/* SPORT3 Controller		(0xFFC02600 - 0xFFC026FF)			 */
-#define	SPORT3_TCR1			0xFFC02600	/* SPORT3 Transmit Configuration 1 Register */
-#define	SPORT3_TCR2			0xFFC02604	/* SPORT3 Transmit Configuration 2 Register */
-#define	SPORT3_TCLKDIV		0xFFC02608	/* SPORT3 Transmit Clock Divider */
-#define	SPORT3_TFSDIV		0xFFC0260C	/* SPORT3 Transmit Frame Sync Divider */
-#define	SPORT3_TX			0xFFC02610	/* SPORT3 TX Data Register */
-#define	SPORT3_RX			0xFFC02618	/* SPORT3 RX Data Register */
-#define	SPORT3_RCR1			0xFFC02620	/* SPORT3 Transmit Configuration 1 Register */
-#define	SPORT3_RCR2			0xFFC02624	/* SPORT3 Transmit Configuration 2 Register */
-#define	SPORT3_RCLKDIV		0xFFC02628	/* SPORT3 Receive Clock	Divider */
-#define	SPORT3_RFSDIV		0xFFC0262C	/* SPORT3 Receive Frame	Sync Divider */
-#define	SPORT3_STAT			0xFFC02630	/* SPORT3 Status Register */
-#define	SPORT3_CHNL			0xFFC02634	/* SPORT3 Current Channel Register */
-#define	SPORT3_MCMC1		0xFFC02638	/* SPORT3 Multi-Channel	Configuration Register 1 */
-#define	SPORT3_MCMC2		0xFFC0263C	/* SPORT3 Multi-Channel	Configuration Register 2 */
-#define	SPORT3_MTCS0		0xFFC02640	/* SPORT3 Multi-Channel	Transmit Select	Register 0 */
-#define	SPORT3_MTCS1		0xFFC02644	/* SPORT3 Multi-Channel	Transmit Select	Register 1 */
-#define	SPORT3_MTCS2		0xFFC02648	/* SPORT3 Multi-Channel	Transmit Select	Register 2 */
-#define	SPORT3_MTCS3		0xFFC0264C	/* SPORT3 Multi-Channel	Transmit Select	Register 3 */
-#define	SPORT3_MRCS0		0xFFC02650	/* SPORT3 Multi-Channel	Receive	Select Register	0 */
-#define	SPORT3_MRCS1		0xFFC02654	/* SPORT3 Multi-Channel	Receive	Select Register	1 */
-#define	SPORT3_MRCS2		0xFFC02658	/* SPORT3 Multi-Channel	Receive	Select Register	2 */
-#define	SPORT3_MRCS3		0xFFC0265C	/* SPORT3 Multi-Channel	Receive	Select Register	3 */
-
+#include "defBF538.h"
 
 /* Media Transceiver (MXVR)   (0xFFC02700 - 0xFFC028FF) */
 
@@ -995,1249 +149,4 @@
 #define	MXVR_BLOCK_CNT	      0xFFC028C0  /* MXVR Block	Counter */
 #define	MXVR_PLL_CTL_2	      0xFFC028C4  /* MXVR Phase	Lock Loop Control Register 2 */
 
-
-/* CAN Controller		(0xFFC02A00 - 0xFFC02FFF)				 */
-/* For Mailboxes 0-15											 */
-#define	CAN_MC1				0xFFC02A00	/* Mailbox config reg 1	 */
-#define	CAN_MD1				0xFFC02A04	/* Mailbox direction reg 1 */
-#define	CAN_TRS1			0xFFC02A08	/* Transmit Request Set	reg 1 */
-#define	CAN_TRR1			0xFFC02A0C	/* Transmit Request Reset reg 1 */
-#define	CAN_TA1				0xFFC02A10	/* Transmit Acknowledge	reg 1 */
-#define	CAN_AA1				0xFFC02A14	/* Transmit Abort Acknowledge reg 1 */
-#define	CAN_RMP1			0xFFC02A18	/* Receive Message Pending reg 1 */
-#define	CAN_RML1			0xFFC02A1C	/* Receive Message Lost	reg 1 */
-#define	CAN_MBTIF1			0xFFC02A20	/* Mailbox Transmit Interrupt Flag reg 1 */
-#define	CAN_MBRIF1			0xFFC02A24	/* Mailbox Receive  Interrupt Flag reg 1 */
-#define	CAN_MBIM1			0xFFC02A28	/* Mailbox Interrupt Mask reg 1 */
-#define	CAN_RFH1			0xFFC02A2C	/* Remote Frame	Handling reg 1 */
-#define	CAN_OPSS1			0xFFC02A30	/* Overwrite Protection	Single Shot Xmission reg 1 */
-
-/* For Mailboxes 16-31											 */
-#define	CAN_MC2				0xFFC02A40	/* Mailbox config reg 2	 */
-#define	CAN_MD2				0xFFC02A44	/* Mailbox direction reg 2 */
-#define	CAN_TRS2			0xFFC02A48	/* Transmit Request Set	reg 2 */
-#define	CAN_TRR2			0xFFC02A4C	/* Transmit Request Reset reg 2 */
-#define	CAN_TA2				0xFFC02A50	/* Transmit Acknowledge	reg 2 */
-#define	CAN_AA2				0xFFC02A54	/* Transmit Abort Acknowledge reg 2 */
-#define	CAN_RMP2			0xFFC02A58	/* Receive Message Pending reg 2 */
-#define	CAN_RML2			0xFFC02A5C	/* Receive Message Lost	reg 2 */
-#define	CAN_MBTIF2			0xFFC02A60	/* Mailbox Transmit Interrupt Flag reg 2 */
-#define	CAN_MBRIF2			0xFFC02A64	/* Mailbox Receive  Interrupt Flag reg 2 */
-#define	CAN_MBIM2			0xFFC02A68	/* Mailbox Interrupt Mask reg 2 */
-#define	CAN_RFH2			0xFFC02A6C	/* Remote Frame	Handling reg 2 */
-#define	CAN_OPSS2			0xFFC02A70	/* Overwrite Protection	Single Shot Xmission reg 2 */
-
-#define	CAN_CLOCK			0xFFC02A80	/* Bit Timing Configuration register 0 */
-#define	CAN_TIMING			0xFFC02A84	/* Bit Timing Configuration register 1 */
-
-#define	CAN_DEBUG			0xFFC02A88	/* Debug Register		 */
-/* the following is for	backwards compatibility */
-#define	CAN_CNF		 CAN_DEBUG
-
-#define	CAN_STATUS			0xFFC02A8C	/* Global Status Register */
-#define	CAN_CEC				0xFFC02A90	/* Error Counter Register */
-#define	CAN_GIS				0xFFC02A94	/* Global Interrupt Status Register */
-#define	CAN_GIM				0xFFC02A98	/* Global Interrupt Mask Register */
-#define	CAN_GIF				0xFFC02A9C	/* Global Interrupt Flag Register */
-#define	CAN_CONTROL			0xFFC02AA0	/* Master Control Register */
-#define	CAN_INTR			0xFFC02AA4	/* Interrupt Pending Register */
-#define	CAN_MBTD			0xFFC02AAC	/* Mailbox Temporary Disable Feature */
-#define	CAN_EWR				0xFFC02AB0	/* Programmable	Warning	Level */
-#define	CAN_ESR				0xFFC02AB4	/* Error Status	Register */
-#define	CAN_UCCNT			0xFFC02AC4	/* Universal Counter	 */
-#define	CAN_UCRC			0xFFC02AC8	/* Universal Counter Reload/Capture Register */
-#define	CAN_UCCNF			0xFFC02ACC	/* Universal Counter Configuration Register */
-
-/* Mailbox Acceptance Masks					 */
-#define	CAN_AM00L			0xFFC02B00	/* Mailbox 0 Low Acceptance Mask */
-#define	CAN_AM00H			0xFFC02B04	/* Mailbox 0 High Acceptance Mask */
-#define	CAN_AM01L			0xFFC02B08	/* Mailbox 1 Low Acceptance Mask */
-#define	CAN_AM01H			0xFFC02B0C	/* Mailbox 1 High Acceptance Mask */
-#define	CAN_AM02L			0xFFC02B10	/* Mailbox 2 Low Acceptance Mask */
-#define	CAN_AM02H			0xFFC02B14	/* Mailbox 2 High Acceptance Mask */
-#define	CAN_AM03L			0xFFC02B18	/* Mailbox 3 Low Acceptance Mask */
-#define	CAN_AM03H			0xFFC02B1C	/* Mailbox 3 High Acceptance Mask */
-#define	CAN_AM04L			0xFFC02B20	/* Mailbox 4 Low Acceptance Mask */
-#define	CAN_AM04H			0xFFC02B24	/* Mailbox 4 High Acceptance Mask */
-#define	CAN_AM05L			0xFFC02B28	/* Mailbox 5 Low Acceptance Mask */
-#define	CAN_AM05H			0xFFC02B2C	/* Mailbox 5 High Acceptance Mask */
-#define	CAN_AM06L			0xFFC02B30	/* Mailbox 6 Low Acceptance Mask */
-#define	CAN_AM06H			0xFFC02B34	/* Mailbox 6 High Acceptance Mask */
-#define	CAN_AM07L			0xFFC02B38	/* Mailbox 7 Low Acceptance Mask */
-#define	CAN_AM07H			0xFFC02B3C	/* Mailbox 7 High Acceptance Mask */
-#define	CAN_AM08L			0xFFC02B40	/* Mailbox 8 Low Acceptance Mask */
-#define	CAN_AM08H			0xFFC02B44	/* Mailbox 8 High Acceptance Mask */
-#define	CAN_AM09L			0xFFC02B48	/* Mailbox 9 Low Acceptance Mask */
-#define	CAN_AM09H			0xFFC02B4C	/* Mailbox 9 High Acceptance Mask */
-#define	CAN_AM10L			0xFFC02B50	/* Mailbox 10 Low Acceptance Mask */
-#define	CAN_AM10H			0xFFC02B54	/* Mailbox 10 High Acceptance Mask */
-#define	CAN_AM11L			0xFFC02B58	/* Mailbox 11 Low Acceptance Mask */
-#define	CAN_AM11H			0xFFC02B5C	/* Mailbox 11 High Acceptance Mask */
-#define	CAN_AM12L			0xFFC02B60	/* Mailbox 12 Low Acceptance Mask */
-#define	CAN_AM12H			0xFFC02B64	/* Mailbox 12 High Acceptance Mask */
-#define	CAN_AM13L			0xFFC02B68	/* Mailbox 13 Low Acceptance Mask */
-#define	CAN_AM13H			0xFFC02B6C	/* Mailbox 13 High Acceptance Mask */
-#define	CAN_AM14L			0xFFC02B70	/* Mailbox 14 Low Acceptance Mask */
-#define	CAN_AM14H			0xFFC02B74	/* Mailbox 14 High Acceptance Mask */
-#define	CAN_AM15L			0xFFC02B78	/* Mailbox 15 Low Acceptance Mask */
-#define	CAN_AM15H			0xFFC02B7C	/* Mailbox 15 High Acceptance Mask */
-
-#define	CAN_AM16L			0xFFC02B80	/* Mailbox 16 Low Acceptance Mask */
-#define	CAN_AM16H			0xFFC02B84	/* Mailbox 16 High Acceptance Mask */
-#define	CAN_AM17L			0xFFC02B88	/* Mailbox 17 Low Acceptance Mask */
-#define	CAN_AM17H			0xFFC02B8C	/* Mailbox 17 High Acceptance Mask */
-#define	CAN_AM18L			0xFFC02B90	/* Mailbox 18 Low Acceptance Mask */
-#define	CAN_AM18H			0xFFC02B94	/* Mailbox 18 High Acceptance Mask */
-#define	CAN_AM19L			0xFFC02B98	/* Mailbox 19 Low Acceptance Mask */
-#define	CAN_AM19H			0xFFC02B9C	/* Mailbox 19 High Acceptance Mask */
-#define	CAN_AM20L			0xFFC02BA0	/* Mailbox 20 Low Acceptance Mask */
-#define	CAN_AM20H			0xFFC02BA4	/* Mailbox 20 High Acceptance Mask */
-#define	CAN_AM21L			0xFFC02BA8	/* Mailbox 21 Low Acceptance Mask */
-#define	CAN_AM21H			0xFFC02BAC	/* Mailbox 21 High Acceptance Mask */
-#define	CAN_AM22L			0xFFC02BB0	/* Mailbox 22 Low Acceptance Mask */
-#define	CAN_AM22H			0xFFC02BB4	/* Mailbox 22 High Acceptance Mask */
-#define	CAN_AM23L			0xFFC02BB8	/* Mailbox 23 Low Acceptance Mask */
-#define	CAN_AM23H			0xFFC02BBC	/* Mailbox 23 High Acceptance Mask */
-#define	CAN_AM24L			0xFFC02BC0	/* Mailbox 24 Low Acceptance Mask */
-#define	CAN_AM24H			0xFFC02BC4	/* Mailbox 24 High Acceptance Mask */
-#define	CAN_AM25L			0xFFC02BC8	/* Mailbox 25 Low Acceptance Mask */
-#define	CAN_AM25H			0xFFC02BCC	/* Mailbox 25 High Acceptance Mask */
-#define	CAN_AM26L			0xFFC02BD0	/* Mailbox 26 Low Acceptance Mask */
-#define	CAN_AM26H			0xFFC02BD4	/* Mailbox 26 High Acceptance Mask */
-#define	CAN_AM27L			0xFFC02BD8	/* Mailbox 27 Low Acceptance Mask */
-#define	CAN_AM27H			0xFFC02BDC	/* Mailbox 27 High Acceptance Mask */
-#define	CAN_AM28L			0xFFC02BE0	/* Mailbox 28 Low Acceptance Mask */
-#define	CAN_AM28H			0xFFC02BE4	/* Mailbox 28 High Acceptance Mask */
-#define	CAN_AM29L			0xFFC02BE8	/* Mailbox 29 Low Acceptance Mask */
-#define	CAN_AM29H			0xFFC02BEC	/* Mailbox 29 High Acceptance Mask */
-#define	CAN_AM30L			0xFFC02BF0	/* Mailbox 30 Low Acceptance Mask */
-#define	CAN_AM30H			0xFFC02BF4	/* Mailbox 30 High Acceptance Mask */
-#define	CAN_AM31L			0xFFC02BF8	/* Mailbox 31 Low Acceptance Mask */
-#define	CAN_AM31H			0xFFC02BFC	/* Mailbox 31 High Acceptance Mask */
-
-/* CAN Acceptance Mask Macros */
-#define	CAN_AM_L(x)			(CAN_AM00L+((x)*0x8))
-#define	CAN_AM_H(x)			(CAN_AM00H+((x)*0x8))
-
-/* Mailbox Registers									 */
-#define	CAN_MB00_DATA0		0xFFC02C00	/* Mailbox 0 Data Word 0 [15:0]	Register */
-#define	CAN_MB00_DATA1		0xFFC02C04	/* Mailbox 0 Data Word 1 [31:16] Register */
-#define	CAN_MB00_DATA2		0xFFC02C08	/* Mailbox 0 Data Word 2 [47:32] Register */
-#define	CAN_MB00_DATA3		0xFFC02C0C	/* Mailbox 0 Data Word 3 [63:48] Register */
-#define	CAN_MB00_LENGTH		0xFFC02C10	/* Mailbox 0 Data Length Code Register */
-#define	CAN_MB00_TIMESTAMP	0xFFC02C14	/* Mailbox 0 Time Stamp	Value Register */
-#define	CAN_MB00_ID0		0xFFC02C18	/* Mailbox 0 Identifier	Low Register */
-#define	CAN_MB00_ID1		0xFFC02C1C	/* Mailbox 0 Identifier	High Register */
-
-#define	CAN_MB01_DATA0		0xFFC02C20	/* Mailbox 1 Data Word 0 [15:0]	Register */
-#define	CAN_MB01_DATA1		0xFFC02C24	/* Mailbox 1 Data Word 1 [31:16] Register */
-#define	CAN_MB01_DATA2		0xFFC02C28	/* Mailbox 1 Data Word 2 [47:32] Register */
-#define	CAN_MB01_DATA3		0xFFC02C2C	/* Mailbox 1 Data Word 3 [63:48] Register */
-#define	CAN_MB01_LENGTH		0xFFC02C30	/* Mailbox 1 Data Length Code Register */
-#define	CAN_MB01_TIMESTAMP	0xFFC02C34	/* Mailbox 1 Time Stamp	Value Register */
-#define	CAN_MB01_ID0		0xFFC02C38	/* Mailbox 1 Identifier	Low Register */
-#define	CAN_MB01_ID1		0xFFC02C3C	/* Mailbox 1 Identifier	High Register */
-
-#define	CAN_MB02_DATA0		0xFFC02C40	/* Mailbox 2 Data Word 0 [15:0]	Register */
-#define	CAN_MB02_DATA1		0xFFC02C44	/* Mailbox 2 Data Word 1 [31:16] Register */
-#define	CAN_MB02_DATA2		0xFFC02C48	/* Mailbox 2 Data Word 2 [47:32] Register */
-#define	CAN_MB02_DATA3		0xFFC02C4C	/* Mailbox 2 Data Word 3 [63:48] Register */
-#define	CAN_MB02_LENGTH		0xFFC02C50	/* Mailbox 2 Data Length Code Register */
-#define	CAN_MB02_TIMESTAMP	0xFFC02C54	/* Mailbox 2 Time Stamp	Value Register */
-#define	CAN_MB02_ID0		0xFFC02C58	/* Mailbox 2 Identifier	Low Register */
-#define	CAN_MB02_ID1		0xFFC02C5C	/* Mailbox 2 Identifier	High Register */
-
-#define	CAN_MB03_DATA0		0xFFC02C60	/* Mailbox 3 Data Word 0 [15:0]	Register */
-#define	CAN_MB03_DATA1		0xFFC02C64	/* Mailbox 3 Data Word 1 [31:16] Register */
-#define	CAN_MB03_DATA2		0xFFC02C68	/* Mailbox 3 Data Word 2 [47:32] Register */
-#define	CAN_MB03_DATA3		0xFFC02C6C	/* Mailbox 3 Data Word 3 [63:48] Register */
-#define	CAN_MB03_LENGTH		0xFFC02C70	/* Mailbox 3 Data Length Code Register */
-#define	CAN_MB03_TIMESTAMP	0xFFC02C74	/* Mailbox 3 Time Stamp	Value Register */
-#define	CAN_MB03_ID0		0xFFC02C78	/* Mailbox 3 Identifier	Low Register */
-#define	CAN_MB03_ID1		0xFFC02C7C	/* Mailbox 3 Identifier	High Register */
-
-#define	CAN_MB04_DATA0		0xFFC02C80	/* Mailbox 4 Data Word 0 [15:0]	Register */
-#define	CAN_MB04_DATA1		0xFFC02C84	/* Mailbox 4 Data Word 1 [31:16] Register */
-#define	CAN_MB04_DATA2		0xFFC02C88	/* Mailbox 4 Data Word 2 [47:32] Register */
-#define	CAN_MB04_DATA3		0xFFC02C8C	/* Mailbox 4 Data Word 3 [63:48] Register */
-#define	CAN_MB04_LENGTH		0xFFC02C90	/* Mailbox 4 Data Length Code Register */
-#define	CAN_MB04_TIMESTAMP	0xFFC02C94	/* Mailbox 4 Time Stamp	Value Register */
-#define	CAN_MB04_ID0		0xFFC02C98	/* Mailbox 4 Identifier	Low Register */
-#define	CAN_MB04_ID1		0xFFC02C9C	/* Mailbox 4 Identifier	High Register */
-
-#define	CAN_MB05_DATA0		0xFFC02CA0	/* Mailbox 5 Data Word 0 [15:0]	Register */
-#define	CAN_MB05_DATA1		0xFFC02CA4	/* Mailbox 5 Data Word 1 [31:16] Register */
-#define	CAN_MB05_DATA2		0xFFC02CA8	/* Mailbox 5 Data Word 2 [47:32] Register */
-#define	CAN_MB05_DATA3		0xFFC02CAC	/* Mailbox 5 Data Word 3 [63:48] Register */
-#define	CAN_MB05_LENGTH		0xFFC02CB0	/* Mailbox 5 Data Length Code Register */
-#define	CAN_MB05_TIMESTAMP	0xFFC02CB4	/* Mailbox 5 Time Stamp	Value Register */
-#define	CAN_MB05_ID0		0xFFC02CB8	/* Mailbox 5 Identifier	Low Register */
-#define	CAN_MB05_ID1		0xFFC02CBC	/* Mailbox 5 Identifier	High Register */
-
-#define	CAN_MB06_DATA0		0xFFC02CC0	/* Mailbox 6 Data Word 0 [15:0]	Register */
-#define	CAN_MB06_DATA1		0xFFC02CC4	/* Mailbox 6 Data Word 1 [31:16] Register */
-#define	CAN_MB06_DATA2		0xFFC02CC8	/* Mailbox 6 Data Word 2 [47:32] Register */
-#define	CAN_MB06_DATA3		0xFFC02CCC	/* Mailbox 6 Data Word 3 [63:48] Register */
-#define	CAN_MB06_LENGTH		0xFFC02CD0	/* Mailbox 6 Data Length Code Register */
-#define	CAN_MB06_TIMESTAMP	0xFFC02CD4	/* Mailbox 6 Time Stamp	Value Register */
-#define	CAN_MB06_ID0		0xFFC02CD8	/* Mailbox 6 Identifier	Low Register */
-#define	CAN_MB06_ID1		0xFFC02CDC	/* Mailbox 6 Identifier	High Register */
-
-#define	CAN_MB07_DATA0		0xFFC02CE0	/* Mailbox 7 Data Word 0 [15:0]	Register */
-#define	CAN_MB07_DATA1		0xFFC02CE4	/* Mailbox 7 Data Word 1 [31:16] Register */
-#define	CAN_MB07_DATA2		0xFFC02CE8	/* Mailbox 7 Data Word 2 [47:32] Register */
-#define	CAN_MB07_DATA3		0xFFC02CEC	/* Mailbox 7 Data Word 3 [63:48] Register */
-#define	CAN_MB07_LENGTH		0xFFC02CF0	/* Mailbox 7 Data Length Code Register */
-#define	CAN_MB07_TIMESTAMP	0xFFC02CF4	/* Mailbox 7 Time Stamp	Value Register */
-#define	CAN_MB07_ID0		0xFFC02CF8	/* Mailbox 7 Identifier	Low Register */
-#define	CAN_MB07_ID1		0xFFC02CFC	/* Mailbox 7 Identifier	High Register */
-
-#define	CAN_MB08_DATA0		0xFFC02D00	/* Mailbox 8 Data Word 0 [15:0]	Register */
-#define	CAN_MB08_DATA1		0xFFC02D04	/* Mailbox 8 Data Word 1 [31:16] Register */
-#define	CAN_MB08_DATA2		0xFFC02D08	/* Mailbox 8 Data Word 2 [47:32] Register */
-#define	CAN_MB08_DATA3		0xFFC02D0C	/* Mailbox 8 Data Word 3 [63:48] Register */
-#define	CAN_MB08_LENGTH		0xFFC02D10	/* Mailbox 8 Data Length Code Register */
-#define	CAN_MB08_TIMESTAMP	0xFFC02D14	/* Mailbox 8 Time Stamp	Value Register */
-#define	CAN_MB08_ID0		0xFFC02D18	/* Mailbox 8 Identifier	Low Register */
-#define	CAN_MB08_ID1		0xFFC02D1C	/* Mailbox 8 Identifier	High Register */
-
-#define	CAN_MB09_DATA0		0xFFC02D20	/* Mailbox 9 Data Word 0 [15:0]	Register */
-#define	CAN_MB09_DATA1		0xFFC02D24	/* Mailbox 9 Data Word 1 [31:16] Register */
-#define	CAN_MB09_DATA2		0xFFC02D28	/* Mailbox 9 Data Word 2 [47:32] Register */
-#define	CAN_MB09_DATA3		0xFFC02D2C	/* Mailbox 9 Data Word 3 [63:48] Register */
-#define	CAN_MB09_LENGTH		0xFFC02D30	/* Mailbox 9 Data Length Code Register */
-#define	CAN_MB09_TIMESTAMP	0xFFC02D34	/* Mailbox 9 Time Stamp	Value Register */
-#define	CAN_MB09_ID0		0xFFC02D38	/* Mailbox 9 Identifier	Low Register */
-#define	CAN_MB09_ID1		0xFFC02D3C	/* Mailbox 9 Identifier	High Register */
-
-#define	CAN_MB10_DATA0		0xFFC02D40	/* Mailbox 10 Data Word	0 [15:0] Register */
-#define	CAN_MB10_DATA1		0xFFC02D44	/* Mailbox 10 Data Word	1 [31:16] Register */
-#define	CAN_MB10_DATA2		0xFFC02D48	/* Mailbox 10 Data Word	2 [47:32] Register */
-#define	CAN_MB10_DATA3		0xFFC02D4C	/* Mailbox 10 Data Word	3 [63:48] Register */
-#define	CAN_MB10_LENGTH		0xFFC02D50	/* Mailbox 10 Data Length Code Register */
-#define	CAN_MB10_TIMESTAMP	0xFFC02D54	/* Mailbox 10 Time Stamp Value Register */
-#define	CAN_MB10_ID0		0xFFC02D58	/* Mailbox 10 Identifier Low Register */
-#define	CAN_MB10_ID1		0xFFC02D5C	/* Mailbox 10 Identifier High Register */
-
-#define	CAN_MB11_DATA0		0xFFC02D60	/* Mailbox 11 Data Word	0 [15:0] Register */
-#define	CAN_MB11_DATA1		0xFFC02D64	/* Mailbox 11 Data Word	1 [31:16] Register */
-#define	CAN_MB11_DATA2		0xFFC02D68	/* Mailbox 11 Data Word	2 [47:32] Register */
-#define	CAN_MB11_DATA3		0xFFC02D6C	/* Mailbox 11 Data Word	3 [63:48] Register */
-#define	CAN_MB11_LENGTH		0xFFC02D70	/* Mailbox 11 Data Length Code Register */
-#define	CAN_MB11_TIMESTAMP	0xFFC02D74	/* Mailbox 11 Time Stamp Value Register */
-#define	CAN_MB11_ID0		0xFFC02D78	/* Mailbox 11 Identifier Low Register */
-#define	CAN_MB11_ID1		0xFFC02D7C	/* Mailbox 11 Identifier High Register */
-
-#define	CAN_MB12_DATA0		0xFFC02D80	/* Mailbox 12 Data Word	0 [15:0] Register */
-#define	CAN_MB12_DATA1		0xFFC02D84	/* Mailbox 12 Data Word	1 [31:16] Register */
-#define	CAN_MB12_DATA2		0xFFC02D88	/* Mailbox 12 Data Word	2 [47:32] Register */
-#define	CAN_MB12_DATA3		0xFFC02D8C	/* Mailbox 12 Data Word	3 [63:48] Register */
-#define	CAN_MB12_LENGTH		0xFFC02D90	/* Mailbox 12 Data Length Code Register */
-#define	CAN_MB12_TIMESTAMP	0xFFC02D94	/* Mailbox 12 Time Stamp Value Register */
-#define	CAN_MB12_ID0		0xFFC02D98	/* Mailbox 12 Identifier Low Register */
-#define	CAN_MB12_ID1		0xFFC02D9C	/* Mailbox 12 Identifier High Register */
-
-#define	CAN_MB13_DATA0		0xFFC02DA0	/* Mailbox 13 Data Word	0 [15:0] Register */
-#define	CAN_MB13_DATA1		0xFFC02DA4	/* Mailbox 13 Data Word	1 [31:16] Register */
-#define	CAN_MB13_DATA2		0xFFC02DA8	/* Mailbox 13 Data Word	2 [47:32] Register */
-#define	CAN_MB13_DATA3		0xFFC02DAC	/* Mailbox 13 Data Word	3 [63:48] Register */
-#define	CAN_MB13_LENGTH		0xFFC02DB0	/* Mailbox 13 Data Length Code Register */
-#define	CAN_MB13_TIMESTAMP	0xFFC02DB4	/* Mailbox 13 Time Stamp Value Register */
-#define	CAN_MB13_ID0		0xFFC02DB8	/* Mailbox 13 Identifier Low Register */
-#define	CAN_MB13_ID1		0xFFC02DBC	/* Mailbox 13 Identifier High Register */
-
-#define	CAN_MB14_DATA0		0xFFC02DC0	/* Mailbox 14 Data Word	0 [15:0] Register */
-#define	CAN_MB14_DATA1		0xFFC02DC4	/* Mailbox 14 Data Word	1 [31:16] Register */
-#define	CAN_MB14_DATA2		0xFFC02DC8	/* Mailbox 14 Data Word	2 [47:32] Register */
-#define	CAN_MB14_DATA3		0xFFC02DCC	/* Mailbox 14 Data Word	3 [63:48] Register */
-#define	CAN_MB14_LENGTH		0xFFC02DD0	/* Mailbox 14 Data Length Code Register */
-#define	CAN_MB14_TIMESTAMP	0xFFC02DD4	/* Mailbox 14 Time Stamp Value Register */
-#define	CAN_MB14_ID0		0xFFC02DD8	/* Mailbox 14 Identifier Low Register */
-#define	CAN_MB14_ID1		0xFFC02DDC	/* Mailbox 14 Identifier High Register */
-
-#define	CAN_MB15_DATA0		0xFFC02DE0	/* Mailbox 15 Data Word	0 [15:0] Register */
-#define	CAN_MB15_DATA1		0xFFC02DE4	/* Mailbox 15 Data Word	1 [31:16] Register */
-#define	CAN_MB15_DATA2		0xFFC02DE8	/* Mailbox 15 Data Word	2 [47:32] Register */
-#define	CAN_MB15_DATA3		0xFFC02DEC	/* Mailbox 15 Data Word	3 [63:48] Register */
-#define	CAN_MB15_LENGTH		0xFFC02DF0	/* Mailbox 15 Data Length Code Register */
-#define	CAN_MB15_TIMESTAMP	0xFFC02DF4	/* Mailbox 15 Time Stamp Value Register */
-#define	CAN_MB15_ID0		0xFFC02DF8	/* Mailbox 15 Identifier Low Register */
-#define	CAN_MB15_ID1		0xFFC02DFC	/* Mailbox 15 Identifier High Register */
-
-#define	CAN_MB16_DATA0		0xFFC02E00	/* Mailbox 16 Data Word	0 [15:0] Register */
-#define	CAN_MB16_DATA1		0xFFC02E04	/* Mailbox 16 Data Word	1 [31:16] Register */
-#define	CAN_MB16_DATA2		0xFFC02E08	/* Mailbox 16 Data Word	2 [47:32] Register */
-#define	CAN_MB16_DATA3		0xFFC02E0C	/* Mailbox 16 Data Word	3 [63:48] Register */
-#define	CAN_MB16_LENGTH		0xFFC02E10	/* Mailbox 16 Data Length Code Register */
-#define	CAN_MB16_TIMESTAMP	0xFFC02E14	/* Mailbox 16 Time Stamp Value Register */
-#define	CAN_MB16_ID0		0xFFC02E18	/* Mailbox 16 Identifier Low Register */
-#define	CAN_MB16_ID1		0xFFC02E1C	/* Mailbox 16 Identifier High Register */
-
-#define	CAN_MB17_DATA0		0xFFC02E20	/* Mailbox 17 Data Word	0 [15:0] Register */
-#define	CAN_MB17_DATA1		0xFFC02E24	/* Mailbox 17 Data Word	1 [31:16] Register */
-#define	CAN_MB17_DATA2		0xFFC02E28	/* Mailbox 17 Data Word	2 [47:32] Register */
-#define	CAN_MB17_DATA3		0xFFC02E2C	/* Mailbox 17 Data Word	3 [63:48] Register */
-#define	CAN_MB17_LENGTH		0xFFC02E30	/* Mailbox 17 Data Length Code Register */
-#define	CAN_MB17_TIMESTAMP	0xFFC02E34	/* Mailbox 17 Time Stamp Value Register */
-#define	CAN_MB17_ID0		0xFFC02E38	/* Mailbox 17 Identifier Low Register */
-#define	CAN_MB17_ID1		0xFFC02E3C	/* Mailbox 17 Identifier High Register */
-
-#define	CAN_MB18_DATA0		0xFFC02E40	/* Mailbox 18 Data Word	0 [15:0] Register */
-#define	CAN_MB18_DATA1		0xFFC02E44	/* Mailbox 18 Data Word	1 [31:16] Register */
-#define	CAN_MB18_DATA2		0xFFC02E48	/* Mailbox 18 Data Word	2 [47:32] Register */
-#define	CAN_MB18_DATA3		0xFFC02E4C	/* Mailbox 18 Data Word	3 [63:48] Register */
-#define	CAN_MB18_LENGTH		0xFFC02E50	/* Mailbox 18 Data Length Code Register */
-#define	CAN_MB18_TIMESTAMP	0xFFC02E54	/* Mailbox 18 Time Stamp Value Register */
-#define	CAN_MB18_ID0		0xFFC02E58	/* Mailbox 18 Identifier Low Register */
-#define	CAN_MB18_ID1		0xFFC02E5C	/* Mailbox 18 Identifier High Register */
-
-#define	CAN_MB19_DATA0		0xFFC02E60	/* Mailbox 19 Data Word	0 [15:0] Register */
-#define	CAN_MB19_DATA1		0xFFC02E64	/* Mailbox 19 Data Word	1 [31:16] Register */
-#define	CAN_MB19_DATA2		0xFFC02E68	/* Mailbox 19 Data Word	2 [47:32] Register */
-#define	CAN_MB19_DATA3		0xFFC02E6C	/* Mailbox 19 Data Word	3 [63:48] Register */
-#define	CAN_MB19_LENGTH		0xFFC02E70	/* Mailbox 19 Data Length Code Register */
-#define	CAN_MB19_TIMESTAMP	0xFFC02E74	/* Mailbox 19 Time Stamp Value Register */
-#define	CAN_MB19_ID0		0xFFC02E78	/* Mailbox 19 Identifier Low Register */
-#define	CAN_MB19_ID1		0xFFC02E7C	/* Mailbox 19 Identifier High Register */
-
-#define	CAN_MB20_DATA0		0xFFC02E80	/* Mailbox 20 Data Word	0 [15:0] Register */
-#define	CAN_MB20_DATA1		0xFFC02E84	/* Mailbox 20 Data Word	1 [31:16] Register */
-#define	CAN_MB20_DATA2		0xFFC02E88	/* Mailbox 20 Data Word	2 [47:32] Register */
-#define	CAN_MB20_DATA3		0xFFC02E8C	/* Mailbox 20 Data Word	3 [63:48] Register */
-#define	CAN_MB20_LENGTH		0xFFC02E90	/* Mailbox 20 Data Length Code Register */
-#define	CAN_MB20_TIMESTAMP	0xFFC02E94	/* Mailbox 20 Time Stamp Value Register */
-#define	CAN_MB20_ID0		0xFFC02E98	/* Mailbox 20 Identifier Low Register */
-#define	CAN_MB20_ID1		0xFFC02E9C	/* Mailbox 20 Identifier High Register */
-
-#define	CAN_MB21_DATA0		0xFFC02EA0	/* Mailbox 21 Data Word	0 [15:0] Register */
-#define	CAN_MB21_DATA1		0xFFC02EA4	/* Mailbox 21 Data Word	1 [31:16] Register */
-#define	CAN_MB21_DATA2		0xFFC02EA8	/* Mailbox 21 Data Word	2 [47:32] Register */
-#define	CAN_MB21_DATA3		0xFFC02EAC	/* Mailbox 21 Data Word	3 [63:48] Register */
-#define	CAN_MB21_LENGTH		0xFFC02EB0	/* Mailbox 21 Data Length Code Register */
-#define	CAN_MB21_TIMESTAMP	0xFFC02EB4	/* Mailbox 21 Time Stamp Value Register */
-#define	CAN_MB21_ID0		0xFFC02EB8	/* Mailbox 21 Identifier Low Register */
-#define	CAN_MB21_ID1		0xFFC02EBC	/* Mailbox 21 Identifier High Register */
-
-#define	CAN_MB22_DATA0		0xFFC02EC0	/* Mailbox 22 Data Word	0 [15:0] Register */
-#define	CAN_MB22_DATA1		0xFFC02EC4	/* Mailbox 22 Data Word	1 [31:16] Register */
-#define	CAN_MB22_DATA2		0xFFC02EC8	/* Mailbox 22 Data Word	2 [47:32] Register */
-#define	CAN_MB22_DATA3		0xFFC02ECC	/* Mailbox 22 Data Word	3 [63:48] Register */
-#define	CAN_MB22_LENGTH		0xFFC02ED0	/* Mailbox 22 Data Length Code Register */
-#define	CAN_MB22_TIMESTAMP	0xFFC02ED4	/* Mailbox 22 Time Stamp Value Register */
-#define	CAN_MB22_ID0		0xFFC02ED8	/* Mailbox 22 Identifier Low Register */
-#define	CAN_MB22_ID1		0xFFC02EDC	/* Mailbox 22 Identifier High Register */
-
-#define	CAN_MB23_DATA0		0xFFC02EE0	/* Mailbox 23 Data Word	0 [15:0] Register */
-#define	CAN_MB23_DATA1		0xFFC02EE4	/* Mailbox 23 Data Word	1 [31:16] Register */
-#define	CAN_MB23_DATA2		0xFFC02EE8	/* Mailbox 23 Data Word	2 [47:32] Register */
-#define	CAN_MB23_DATA3		0xFFC02EEC	/* Mailbox 23 Data Word	3 [63:48] Register */
-#define	CAN_MB23_LENGTH		0xFFC02EF0	/* Mailbox 23 Data Length Code Register */
-#define	CAN_MB23_TIMESTAMP	0xFFC02EF4	/* Mailbox 23 Time Stamp Value Register */
-#define	CAN_MB23_ID0		0xFFC02EF8	/* Mailbox 23 Identifier Low Register */
-#define	CAN_MB23_ID1		0xFFC02EFC	/* Mailbox 23 Identifier High Register */
-
-#define	CAN_MB24_DATA0		0xFFC02F00	/* Mailbox 24 Data Word	0 [15:0] Register */
-#define	CAN_MB24_DATA1		0xFFC02F04	/* Mailbox 24 Data Word	1 [31:16] Register */
-#define	CAN_MB24_DATA2		0xFFC02F08	/* Mailbox 24 Data Word	2 [47:32] Register */
-#define	CAN_MB24_DATA3		0xFFC02F0C	/* Mailbox 24 Data Word	3 [63:48] Register */
-#define	CAN_MB24_LENGTH		0xFFC02F10	/* Mailbox 24 Data Length Code Register */
-#define	CAN_MB24_TIMESTAMP	0xFFC02F14	/* Mailbox 24 Time Stamp Value Register */
-#define	CAN_MB24_ID0		0xFFC02F18	/* Mailbox 24 Identifier Low Register */
-#define	CAN_MB24_ID1		0xFFC02F1C	/* Mailbox 24 Identifier High Register */
-
-#define	CAN_MB25_DATA0		0xFFC02F20	/* Mailbox 25 Data Word	0 [15:0] Register */
-#define	CAN_MB25_DATA1		0xFFC02F24	/* Mailbox 25 Data Word	1 [31:16] Register */
-#define	CAN_MB25_DATA2		0xFFC02F28	/* Mailbox 25 Data Word	2 [47:32] Register */
-#define	CAN_MB25_DATA3		0xFFC02F2C	/* Mailbox 25 Data Word	3 [63:48] Register */
-#define	CAN_MB25_LENGTH		0xFFC02F30	/* Mailbox 25 Data Length Code Register */
-#define	CAN_MB25_TIMESTAMP	0xFFC02F34	/* Mailbox 25 Time Stamp Value Register */
-#define	CAN_MB25_ID0		0xFFC02F38	/* Mailbox 25 Identifier Low Register */
-#define	CAN_MB25_ID1		0xFFC02F3C	/* Mailbox 25 Identifier High Register */
-
-#define	CAN_MB26_DATA0		0xFFC02F40	/* Mailbox 26 Data Word	0 [15:0] Register */
-#define	CAN_MB26_DATA1		0xFFC02F44	/* Mailbox 26 Data Word	1 [31:16] Register */
-#define	CAN_MB26_DATA2		0xFFC02F48	/* Mailbox 26 Data Word	2 [47:32] Register */
-#define	CAN_MB26_DATA3		0xFFC02F4C	/* Mailbox 26 Data Word	3 [63:48] Register */
-#define	CAN_MB26_LENGTH		0xFFC02F50	/* Mailbox 26 Data Length Code Register */
-#define	CAN_MB26_TIMESTAMP	0xFFC02F54	/* Mailbox 26 Time Stamp Value Register */
-#define	CAN_MB26_ID0		0xFFC02F58	/* Mailbox 26 Identifier Low Register */
-#define	CAN_MB26_ID1		0xFFC02F5C	/* Mailbox 26 Identifier High Register */
-
-#define	CAN_MB27_DATA0		0xFFC02F60	/* Mailbox 27 Data Word	0 [15:0] Register */
-#define	CAN_MB27_DATA1		0xFFC02F64	/* Mailbox 27 Data Word	1 [31:16] Register */
-#define	CAN_MB27_DATA2		0xFFC02F68	/* Mailbox 27 Data Word	2 [47:32] Register */
-#define	CAN_MB27_DATA3		0xFFC02F6C	/* Mailbox 27 Data Word	3 [63:48] Register */
-#define	CAN_MB27_LENGTH		0xFFC02F70	/* Mailbox 27 Data Length Code Register */
-#define	CAN_MB27_TIMESTAMP	0xFFC02F74	/* Mailbox 27 Time Stamp Value Register */
-#define	CAN_MB27_ID0		0xFFC02F78	/* Mailbox 27 Identifier Low Register */
-#define	CAN_MB27_ID1		0xFFC02F7C	/* Mailbox 27 Identifier High Register */
-
-#define	CAN_MB28_DATA0		0xFFC02F80	/* Mailbox 28 Data Word	0 [15:0] Register */
-#define	CAN_MB28_DATA1		0xFFC02F84	/* Mailbox 28 Data Word	1 [31:16] Register */
-#define	CAN_MB28_DATA2		0xFFC02F88	/* Mailbox 28 Data Word	2 [47:32] Register */
-#define	CAN_MB28_DATA3		0xFFC02F8C	/* Mailbox 28 Data Word	3 [63:48] Register */
-#define	CAN_MB28_LENGTH		0xFFC02F90	/* Mailbox 28 Data Length Code Register */
-#define	CAN_MB28_TIMESTAMP	0xFFC02F94	/* Mailbox 28 Time Stamp Value Register */
-#define	CAN_MB28_ID0		0xFFC02F98	/* Mailbox 28 Identifier Low Register */
-#define	CAN_MB28_ID1		0xFFC02F9C	/* Mailbox 28 Identifier High Register */
-
-#define	CAN_MB29_DATA0		0xFFC02FA0	/* Mailbox 29 Data Word	0 [15:0] Register */
-#define	CAN_MB29_DATA1		0xFFC02FA4	/* Mailbox 29 Data Word	1 [31:16] Register */
-#define	CAN_MB29_DATA2		0xFFC02FA8	/* Mailbox 29 Data Word	2 [47:32] Register */
-#define	CAN_MB29_DATA3		0xFFC02FAC	/* Mailbox 29 Data Word	3 [63:48] Register */
-#define	CAN_MB29_LENGTH		0xFFC02FB0	/* Mailbox 29 Data Length Code Register */
-#define	CAN_MB29_TIMESTAMP	0xFFC02FB4	/* Mailbox 29 Time Stamp Value Register */
-#define	CAN_MB29_ID0		0xFFC02FB8	/* Mailbox 29 Identifier Low Register */
-#define	CAN_MB29_ID1		0xFFC02FBC	/* Mailbox 29 Identifier High Register */
-
-#define	CAN_MB30_DATA0		0xFFC02FC0	/* Mailbox 30 Data Word	0 [15:0] Register */
-#define	CAN_MB30_DATA1		0xFFC02FC4	/* Mailbox 30 Data Word	1 [31:16] Register */
-#define	CAN_MB30_DATA2		0xFFC02FC8	/* Mailbox 30 Data Word	2 [47:32] Register */
-#define	CAN_MB30_DATA3		0xFFC02FCC	/* Mailbox 30 Data Word	3 [63:48] Register */
-#define	CAN_MB30_LENGTH		0xFFC02FD0	/* Mailbox 30 Data Length Code Register */
-#define	CAN_MB30_TIMESTAMP	0xFFC02FD4	/* Mailbox 30 Time Stamp Value Register */
-#define	CAN_MB30_ID0		0xFFC02FD8	/* Mailbox 30 Identifier Low Register */
-#define	CAN_MB30_ID1		0xFFC02FDC	/* Mailbox 30 Identifier High Register */
-
-#define	CAN_MB31_DATA0		0xFFC02FE0	/* Mailbox 31 Data Word	0 [15:0] Register */
-#define	CAN_MB31_DATA1		0xFFC02FE4	/* Mailbox 31 Data Word	1 [31:16] Register */
-#define	CAN_MB31_DATA2		0xFFC02FE8	/* Mailbox 31 Data Word	2 [47:32] Register */
-#define	CAN_MB31_DATA3		0xFFC02FEC	/* Mailbox 31 Data Word	3 [63:48] Register */
-#define	CAN_MB31_LENGTH		0xFFC02FF0	/* Mailbox 31 Data Length Code Register */
-#define	CAN_MB31_TIMESTAMP	0xFFC02FF4	/* Mailbox 31 Time Stamp Value Register */
-#define	CAN_MB31_ID0		0xFFC02FF8	/* Mailbox 31 Identifier Low Register */
-#define	CAN_MB31_ID1		0xFFC02FFC	/* Mailbox 31 Identifier High Register */
-
-/* CAN Mailbox Area Macros */
-#define	CAN_MB_ID1(x)		(CAN_MB00_ID1+((x)*0x20))
-#define	CAN_MB_ID0(x)		(CAN_MB00_ID0+((x)*0x20))
-#define	CAN_MB_TIMESTAMP(x)	(CAN_MB00_TIMESTAMP+((x)*0x20))
-#define	CAN_MB_LENGTH(x)	(CAN_MB00_LENGTH+((x)*0x20))
-#define	CAN_MB_DATA3(x)		(CAN_MB00_DATA3+((x)*0x20))
-#define	CAN_MB_DATA2(x)		(CAN_MB00_DATA2+((x)*0x20))
-#define	CAN_MB_DATA1(x)		(CAN_MB00_DATA1+((x)*0x20))
-#define	CAN_MB_DATA0(x)		(CAN_MB00_DATA0+((x)*0x20))
-
-
-/*********************************************************************************** */
-/* System MMR Register Bits and	Macros */
-/******************************************************************************* */
-
-/* SWRST Mask */
-#define	SYSTEM_RESET	0x0007	/* Initiates A System Software Reset */
-#define	DOUBLE_FAULT	0x0008	/* Core	Double Fault Causes Reset */
-#define	RESET_DOUBLE	0x2000	/* SW Reset Generated By Core Double-Fault */
-#define	RESET_WDOG		0x4000	/* SW Reset Generated By Watchdog Timer */
-#define	RESET_SOFTWARE	0x8000	/* SW Reset Occurred Since Last	Read Of	SWRST */
-
-/* SYSCR Masks													 */
-#define	BMODE			0x0006	/* Boot	Mode - Latched During HW Reset From Mode Pins */
-#define	NOBOOT			0x0010	/* Execute From	L1 or ASYNC Bank 0 When	BMODE =	0 */
-
-
-/* *************  SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
-
-/* Peripheral Masks For	SIC0_ISR, SIC0_IWR, SIC0_IMASK */
-#define	PLL_WAKEUP_IRQ		0x00000001	/* PLL Wakeup Interrupt	Request */
-#define	DMAC0_ERR_IRQ		0x00000002	/* DMA Controller 0 Error Interrupt Request */
-#define	PPI_ERR_IRQ		0x00000004	/* PPI Error Interrupt Request */
-#define	SPORT0_ERR_IRQ		0x00000008	/* SPORT0 Error	Interrupt Request */
-#define	SPORT1_ERR_IRQ		0x00000010	/* SPORT1 Error	Interrupt Request */
-#define	SPI0_ERR_IRQ		0x00000020	/* SPI0	Error Interrupt	Request */
-#define	UART0_ERR_IRQ		0x00000040	/* UART0 Error Interrupt Request */
-#define	RTC_IRQ			0x00000080	/* Real-Time Clock Interrupt Request */
-#define	DMA0_IRQ		0x00000100	/* DMA Channel 0 (PPI) Interrupt Request */
-#define	DMA1_IRQ		0x00000200	/* DMA Channel 1 (SPORT0 RX) Interrupt Request */
-#define	DMA2_IRQ		0x00000400	/* DMA Channel 2 (SPORT0 TX) Interrupt Request */
-#define	DMA3_IRQ		0x00000800	/* DMA Channel 3 (SPORT1 RX) Interrupt Request */
-#define	DMA4_IRQ		0x00001000	/* DMA Channel 4 (SPORT1 TX) Interrupt Request */
-#define	DMA5_IRQ		0x00002000	/* DMA Channel 5 (SPI) Interrupt Request */
-#define	DMA6_IRQ		0x00004000	/* DMA Channel 6 (UART RX) Interrupt Request */
-#define	DMA7_IRQ		0x00008000	/* DMA Channel 7 (UART TX) Interrupt Request */
-#define	TIMER0_IRQ		0x00010000	/* Timer 0 Interrupt Request */
-#define	TIMER1_IRQ		0x00020000	/* Timer 1 Interrupt Request */
-#define	TIMER2_IRQ		0x00040000	/* Timer 2 Interrupt Request */
-#define	PFA_IRQ			0x00080000	/* Programmable	Flag Interrupt Request A */
-#define	PFB_IRQ			0x00100000	/* Programmable	Flag Interrupt Request B */
-#define	MDMA0_0_IRQ		0x00200000	/* MemDMA0 Stream 0 Interrupt Request */
-#define	MDMA0_1_IRQ		0x00400000	/* MemDMA0 Stream 1 Interrupt Request */
-#define	WDOG_IRQ		0x00800000	/* Software Watchdog Timer Interrupt Request */
-#define	DMAC1_ERR_IRQ		0x01000000	/* DMA Controller 1 Error Interrupt Request */
-#define	SPORT2_ERR_IRQ		0x02000000	/* SPORT2 Error	Interrupt Request */
-#define	SPORT3_ERR_IRQ		0x04000000	/* SPORT3 Error	Interrupt Request */
-#define	MXVR_SD_IRQ		0x08000000	/* MXVR	Synchronous Data Interrupt Request */
-#define	SPI1_ERR_IRQ		0x10000000	/* SPI1	Error Interrupt	Request */
-#define	SPI2_ERR_IRQ		0x20000000	/* SPI2	Error Interrupt	Request */
-#define	UART1_ERR_IRQ		0x40000000	/* UART1 Error Interrupt Request */
-#define	UART2_ERR_IRQ		0x80000000	/* UART2 Error Interrupt Request */
-
-/* the following are for backwards compatibility */
-#define	DMA0_ERR_IRQ		DMAC0_ERR_IRQ
-#define	DMA1_ERR_IRQ		DMAC1_ERR_IRQ
-
-
-/* Peripheral Masks For	SIC_ISR1, SIC_IWR1, SIC_IMASK1	 */
-#define	CAN_ERR_IRQ			0x00000001	/* CAN Error Interrupt Request */
-#define	DMA8_IRQ			0x00000002	/* DMA Channel 8 (SPORT2 RX) Interrupt Request */
-#define	DMA9_IRQ			0x00000004	/* DMA Channel 9 (SPORT2 TX) Interrupt Request */
-#define	DMA10_IRQ			0x00000008	/* DMA Channel 10 (SPORT3 RX) Interrupt	Request */
-#define	DMA11_IRQ			0x00000010	/* DMA Channel 11 (SPORT3 TX) Interrupt	Request */
-#define	DMA12_IRQ			0x00000020	/* DMA Channel 12 Interrupt Request */
-#define	DMA13_IRQ			0x00000040	/* DMA Channel 13 Interrupt Request */
-#define	DMA14_IRQ			0x00000080	/* DMA Channel 14 (SPI1) Interrupt Request */
-#define	DMA15_IRQ			0x00000100	/* DMA Channel 15 (SPI2) Interrupt Request */
-#define	DMA16_IRQ			0x00000200	/* DMA Channel 16 (UART1 RX) Interrupt Request */
-#define	DMA17_IRQ			0x00000400	/* DMA Channel 17 (UART1 TX) Interrupt Request */
-#define	DMA18_IRQ			0x00000800	/* DMA Channel 18 (UART2 RX) Interrupt Request */
-#define	DMA19_IRQ			0x00001000	/* DMA Channel 19 (UART2 TX) Interrupt Request */
-#define	TWI0_IRQ			0x00002000	/* TWI0	Interrupt Request */
-#define	TWI1_IRQ			0x00004000	/* TWI1	Interrupt Request */
-#define	CAN_RX_IRQ			0x00008000	/* CAN Receive Interrupt Request */
-#define	CAN_TX_IRQ			0x00010000	/* CAN Transmit	Interrupt Request */
-#define	MDMA1_0_IRQ			0x00020000	/* MemDMA1 Stream 0 Interrupt Request */
-#define	MDMA1_1_IRQ			0x00040000	/* MemDMA1 Stream 1 Interrupt Request */
-#define	MXVR_STAT_IRQ			0x00080000	/* MXVR	Status Interrupt Request */
-#define	MXVR_CM_IRQ			0x00100000	/* MXVR	Control	Message	Interrupt Request */
-#define	MXVR_AP_IRQ			0x00200000	/* MXVR	Asynchronous Packet Interrupt */
-
-/* the following are for backwards compatibility */
-#define	MDMA0_IRQ		MDMA1_0_IRQ
-#define	MDMA1_IRQ		MDMA1_1_IRQ
-
-#ifdef _MISRA_RULES
-#define	_MF15 0xFu
-#define	_MF7 7u
-#else
-#define	_MF15 0xF
-#define	_MF7 7
-#endif /* _MISRA_RULES */
-
-/* SIC_IMASKx Masks											 */
-#define	SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts */
-#define	SIC_MASK_ALL	0xFFFFFFFF					/* Mask	all peripheral interrupts */
-#ifdef _MISRA_RULES
-#define	SIC_MASK(x)		(1 << ((x)&0x1Fu))					/* Mask	Peripheral #x interrupt */
-#define	SIC_UNMASK(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Unmask Peripheral #x	interrupt */
-#else
-#define	SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask	Peripheral #x interrupt */
-#define	SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x	interrupt */
-#endif /* _MISRA_RULES */
-
-/* SIC_IWRx Masks											 */
-#define	IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals */
-#define	IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals */
-#ifdef _MISRA_RULES
-#define	IWR_ENABLE(x)	(1 << ((x)&0x1Fu))					/* Wakeup Enable Peripheral #x */
-#define	IWR_DISABLE(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Wakeup Disable Peripheral #x */
-#else
-#define	IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x */
-#define	IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Wakeup Disable Peripheral #x */
-#endif /* _MISRA_RULES */
-
-
-/* ***************************** UART CONTROLLER MASKS ********************** */
-/* UARTx_LCR Register */
-#ifdef _MISRA_RULES
-#define	WLS(x)		(((x)-5u) & 0x03u)	/* Word	Length Select */
-#else
-#define	WLS(x)		(((x)-5) & 0x03)	/* Word	Length Select */
-#endif /* _MISRA_RULES */
-#define	STB			0x04				/* Stop	Bits */
-#define	PEN			0x08				/* Parity Enable */
-#define	EPS			0x10				/* Even	Parity Select */
-#define	STP			0x20				/* Stick Parity */
-#define	SB			0x40				/* Set Break */
-#define	DLAB		0x80				/* Divisor Latch Access */
-
-#define	DLAB_P		0x07
-#define	SB_P		0x06
-#define	STP_P		0x05
-#define	EPS_P		0x04
-#define	PEN_P		0x03
-#define	STB_P		0x02
-#define	WLS_P1		0x01
-#define	WLS_P0		0x00
-
-/* UARTx_MCR Register */
-#define	LOOP_ENA	0x10	/* Loopback Mode Enable */
-#define	LOOP_ENA_P	0x04
-/* Deprecated UARTx_MCR	Mask			 */
-
-/* UARTx_LSR Register */
-#define	DR			0x01	/* Data	Ready */
-#define	OE			0x02	/* Overrun Error */
-#define	PE			0x04	/* Parity Error */
-#define	FE			0x08	/* Framing Error */
-#define	BI			0x10	/* Break Interrupt */
-#define	THRE		0x20	/* THR Empty */
-#define	TEMT		0x40	/* TSR and UART_THR Empty */
-
-#define	TEMP_P		0x06
-#define	THRE_P		0x05
-#define	BI_P		0x04
-#define	FE_P		0x03
-#define	PE_P		0x02
-#define	OE_P		0x01
-#define	DR_P		0x00
-
-/* UARTx_IER Register */
-#define	ERBFI		0x01		/* Enable Receive Buffer Full Interrupt */
-#define	ETBEI		0x02		/* Enable Transmit Buffer Empty	Interrupt */
-#define	ELSI		0x04		/* Enable RX Status Interrupt */
-
-#define	ELSI_P		0x02
-#define	ETBEI_P		0x01
-#define	ERBFI_P		0x00
-
-/* UARTx_IIR Register */
-#define	NINT		0x01
-#define	STATUS_P1	0x02
-#define	STATUS_P0	0x01
-#define	NINT_P		0x00
-
-/* UARTx_GCTL Register */
-#define	UCEN		0x01		/* Enable UARTx	Clocks */
-#define	IREN		0x02		/* Enable IrDA Mode */
-#define	TPOLC		0x04		/* IrDA	TX Polarity Change */
-#define	RPOLC		0x08		/* IrDA	RX Polarity Change */
-#define	FPE			0x10		/* Force Parity	Error On Transmit */
-#define	FFE			0x20		/* Force Framing Error On Transmit */
-
-#define	FFE_P		0x05
-#define	FPE_P		0x04
-#define	RPOLC_P		0x03
-#define	TPOLC_P		0x02
-#define	IREN_P		0x01
-#define	UCEN_P		0x00
-
-
-/*  *********  PARALLEL	PERIPHERAL INTERFACE (PPI) MASKS ****************   */
-/*  PPI_CONTROL	Masks	      */
-#define	PORT_EN		0x0001	/* PPI Port Enable  */
-#define	PORT_DIR	0x0002	/* PPI Port Direction	    */
-#define	XFR_TYPE	0x000C	/* PPI Transfer	Type  */
-#define	PORT_CFG	0x0030	/* PPI Port Configuration */
-#define	FLD_SEL		0x0040	/* PPI Active Field Select */
-#define	PACK_EN		0x0080	/* PPI Packing Mode */
-/* previous versions of	defBF539.h erroneously included	DMA32 (PPI 32-bit DMA Enable) */
-#define	SKIP_EN		0x0200	/* PPI Skip Element Enable */
-#define	SKIP_EO		0x0400	/* PPI Skip Even/Odd Elements */
-#define	DLENGTH		0x3800	/* PPI Data Length  */
-#define	DLEN_8		0x0	     /*	PPI Data Length	mask for DLEN=8 */
-#define	DLEN_10		0x0800		/* Data	Length = 10 Bits */
-#define	DLEN_11		0x1000		/* Data	Length = 11 Bits */
-#define	DLEN_12		0x1800		/* Data	Length = 12 Bits */
-#define	DLEN_13		0x2000		/* Data	Length = 13 Bits */
-#define	DLEN_14		0x2800		/* Data	Length = 14 Bits */
-#define	DLEN_15		0x3000		/* Data	Length = 15 Bits */
-#define	DLEN_16		0x3800		/* Data	Length = 16 Bits */
-#ifdef _MISRA_RULES
-#define	DLEN(x)		((((x)-9u) & 0x07u) << 11)  /* PPI Data	Length (only works for x=10-->x=16) */
-#else
-#define	DLEN(x)		((((x)-9) & 0x07) << 11)  /* PPI Data Length (only works for x=10-->x=16) */
-#endif /* _MISRA_RULES */
-#define	POL			0xC000	/* PPI Signal Polarities       */
-#define	POLC		0x4000		/* PPI Clock Polarity */
-#define	POLS		0x8000		/* PPI Frame Sync Polarity */
-
-
-/* PPI_STATUS Masks					     */
-#define	FLD			0x0400	/* Field Indicator   */
-#define	FT_ERR		0x0800	/* Frame Track Error */
-#define	OVR			0x1000	/* FIFO	Overflow Error */
-#define	UNDR		0x2000	/* FIFO	Underrun Error */
-#define	ERR_DET		0x4000	/* Error Detected Indicator */
-#define	ERR_NCOR	0x8000	/* Error Not Corrected Indicator */
-
-
-/* **********  DMA CONTROLLER MASKS  ***********************/
-
-/* DMAx_PERIPHERAL_MAP,	MDMA_yy_PERIPHERAL_MAP Masks */
-
-#define	CTYPE			0x0040	/* DMA Channel Type Indicator */
-#define	CTYPE_P			0x6		/* DMA Channel Type Indicator BIT POSITION */
-#define	PCAP8			0x0080	/* DMA 8-bit Operation Indicator   */
-#define	PCAP16			0x0100	/* DMA 16-bit Operation	Indicator */
-#define	PCAP32			0x0200	/* DMA 32-bit Operation	Indicator */
-#define	PCAPWR			0x0400	/* DMA Write Operation Indicator */
-#define	PCAPRD			0x0800	/* DMA Read Operation Indicator */
-#define	PMAP			0xF000	/* DMA Peripheral Map Field */
-
-/* PMAP	Encodings For DMA Controller 0 */
-#define	PMAP_PPI		0x0000	/* PMAP	PPI Port DMA */
-#define	PMAP_SPORT0RX	0x1000	/* PMAP	SPORT0 Receive DMA */
-#define	PMAP_SPORT0TX	0x2000	/* PMAP	SPORT0 Transmit	DMA */
-#define	PMAP_SPORT1RX	0x3000	/* PMAP	SPORT1 Receive DMA */
-#define	PMAP_SPORT1TX	0x4000	/* PMAP	SPORT1 Transmit	DMA */
-#define	PMAP_SPI0		0x5000	/* PMAP	SPI DMA */
-#define	PMAP_UART0RX		0x6000	/* PMAP	UART Receive DMA */
-#define	PMAP_UART0TX		0x7000	/* PMAP	UART Transmit DMA */
-
-/* PMAP	Encodings For DMA Controller 1 */
-#define	PMAP_SPORT2RX	    0x0000  /* PMAP SPORT2 Receive DMA */
-#define	PMAP_SPORT2TX	    0x1000  /* PMAP SPORT2 Transmit DMA */
-#define	PMAP_SPORT3RX	    0x2000  /* PMAP SPORT3 Receive DMA */
-#define	PMAP_SPORT3TX	    0x3000  /* PMAP SPORT3 Transmit DMA */
-#define	PMAP_SPI1	    0x6000  /* PMAP SPI1 DMA */
-#define	PMAP_SPI2	    0x7000  /* PMAP SPI2 DMA */
-#define	PMAP_UART1RX	    0x8000  /* PMAP UART1 Receive DMA */
-#define	PMAP_UART1TX	    0x9000  /* PMAP UART1 Transmit DMA */
-#define	PMAP_UART2RX	    0xA000  /* PMAP UART2 Receive DMA */
-#define	PMAP_UART2TX	    0xB000  /* PMAP UART2 Transmit DMA */
-
-
-/*  *************  GENERAL PURPOSE TIMER MASKS	******************** */
-/* PWM Timer bit definitions */
-/* TIMER_ENABLE	Register */
-#define	TIMEN0			0x0001	/* Enable Timer	0 */
-#define	TIMEN1			0x0002	/* Enable Timer	1 */
-#define	TIMEN2			0x0004	/* Enable Timer	2 */
-
-#define	TIMEN0_P		0x00
-#define	TIMEN1_P		0x01
-#define	TIMEN2_P		0x02
-
-/* TIMER_DISABLE Register */
-#define	TIMDIS0			0x0001	/* Disable Timer 0 */
-#define	TIMDIS1			0x0002	/* Disable Timer 1 */
-#define	TIMDIS2			0x0004	/* Disable Timer 2 */
-
-#define	TIMDIS0_P		0x00
-#define	TIMDIS1_P		0x01
-#define	TIMDIS2_P		0x02
-
-/* TIMER_STATUS	Register */
-#define	TIMIL0			0x0001	/* Timer 0 Interrupt */
-#define	TIMIL1			0x0002	/* Timer 1 Interrupt */
-#define	TIMIL2			0x0004	/* Timer 2 Interrupt */
-#define	TOVF_ERR0		0x0010	/* Timer 0 Counter Overflow */
-#define	TOVF_ERR1		0x0020	/* Timer 1 Counter Overflow */
-#define	TOVF_ERR2		0x0040	/* Timer 2 Counter Overflow */
-#define	TRUN0			0x1000	/* Timer 0 Slave Enable	Status */
-#define	TRUN1			0x2000	/* Timer 1 Slave Enable	Status */
-#define	TRUN2			0x4000	/* Timer 2 Slave Enable	Status */
-
-#define	TIMIL0_P		0x00
-#define	TIMIL1_P		0x01
-#define	TIMIL2_P		0x02
-#define	TOVF_ERR0_P		0x04
-#define	TOVF_ERR1_P		0x05
-#define	TOVF_ERR2_P		0x06
-#define	TRUN0_P			0x0C
-#define	TRUN1_P			0x0D
-#define	TRUN2_P			0x0E
-
-/* Alternate Deprecated	Macros Provided	For Backwards Code Compatibility */
-#define	TOVL_ERR0		TOVF_ERR0
-#define	TOVL_ERR1		TOVF_ERR1
-#define	TOVL_ERR2		TOVF_ERR2
-#define	TOVL_ERR0_P		TOVF_ERR0_P
-#define	TOVL_ERR1_P	TOVF_ERR1_P
-#define	TOVL_ERR2_P	TOVF_ERR2_P
-
-/* TIMERx_CONFIG Registers */
-#define	PWM_OUT			0x0001
-#define	WDTH_CAP		0x0002
-#define	EXT_CLK			0x0003
-#define	PULSE_HI		0x0004
-#define	PERIOD_CNT		0x0008
-#define	IRQ_ENA			0x0010
-#define	TIN_SEL			0x0020
-#define	OUT_DIS			0x0040
-#define	CLK_SEL			0x0080
-#define	TOGGLE_HI		0x0100
-#define	EMU_RUN			0x0200
-#ifdef _MISRA_RULES
-#define	ERR_TYP(x)		(((x) &	0x03u) << 14)
-#else
-#define	ERR_TYP(x)		(((x) &	0x03) << 14)
-#endif /* _MISRA_RULES */
-
-#define	TMODE_P0		0x00
-#define	TMODE_P1		0x01
-#define	PULSE_HI_P		0x02
-#define	PERIOD_CNT_P	0x03
-#define	IRQ_ENA_P		0x04
-#define	TIN_SEL_P		0x05
-#define	OUT_DIS_P		0x06
-#define	CLK_SEL_P		0x07
-#define	TOGGLE_HI_P		0x08
-#define	EMU_RUN_P		0x09
-#define	ERR_TYP_P0		0x0E
-#define	ERR_TYP_P1		0x0F
-
-
-/*/ ******************	 GENERAL-PURPOSE I/O  ********************* */
-/*  Flag I/O (FIO_) Masks */
-#define	PF0			0x0001
-#define	PF1			0x0002
-#define	PF2			0x0004
-#define	PF3			0x0008
-#define	PF4			0x0010
-#define	PF5			0x0020
-#define	PF6			0x0040
-#define	PF7			0x0080
-#define	PF8			0x0100
-#define	PF9			0x0200
-#define	PF10		0x0400
-#define	PF11		0x0800
-#define	PF12		0x1000
-#define	PF13		0x2000
-#define	PF14		0x4000
-#define	PF15		0x8000
-
-/*  PORT F BIT POSITIONS */
-#define	PF0_P		0x0
-#define	PF1_P		0x1
-#define	PF2_P		0x2
-#define	PF3_P		0x3
-#define	PF4_P		0x4
-#define	PF5_P		0x5
-#define	PF6_P		0x6
-#define	PF7_P		0x7
-#define	PF8_P		0x8
-#define	PF9_P		0x9
-#define	PF10_P		0xA
-#define	PF11_P		0xB
-#define	PF12_P		0xC
-#define	PF13_P		0xD
-#define	PF14_P		0xE
-#define	PF15_P		0xF
-
-
-/*******************   GPIO MASKS  *********************/
-/* Port	C Masks */
-#define	PC0		0x0001
-#define	PC1		0x0002
-#define	PC4		0x0010
-#define	PC5		0x0020
-#define	PC6		0x0040
-#define	PC7		0x0080
-#define	PC8		0x0100
-#define	PC9		0x0200
-/* Port	C Bit Positions */
-#define	PC0_P	0x0
-#define	PC1_P	0x1
-#define	PC4_P	0x4
-#define	PC5_P	0x5
-#define	PC6_P	0x6
-#define	PC7_P	0x7
-#define	PC8_P	0x8
-#define	PC9_P	0x9
-
-/* Port	D */
-#define	PD0		0x0001
-#define	PD1		0x0002
-#define	PD2		0x0004
-#define	PD3		0x0008
-#define	PD4		0x0010
-#define	PD5		0x0020
-#define	PD6		0x0040
-#define	PD7		0x0080
-#define	PD8		0x0100
-#define	PD9		0x0200
-#define	PD10	0x0400
-#define	PD11	0x0800
-#define	PD12	0x1000
-#define	PD13	0x2000
-#define	PD14	0x4000
-#define	PD15	0x8000
-/* Port	D Bit Positions */
-#define	PD0_P	0x0
-#define	PD1_P	0x1
-#define	PD2_P	0x2
-#define	PD3_P	0x3
-#define	PD4_P	0x4
-#define	PD5_P	0x5
-#define	PD6_P	0x6
-#define	PD7_P	0x7
-#define	PD8_P	0x8
-#define	PD9_P	0x9
-#define	PD10_P	0xA
-#define	PD11_P	0xB
-#define	PD12_P	0xC
-#define	PD13_P	0xD
-#define	PD14_P	0xE
-#define	PD15_P	0xF
-
-/* Port	E */
-#define	PE0		0x0001
-#define	PE1		0x0002
-#define	PE2		0x0004
-#define	PE3		0x0008
-#define	PE4		0x0010
-#define	PE5		0x0020
-#define	PE6		0x0040
-#define	PE7		0x0080
-#define	PE8		0x0100
-#define	PE9		0x0200
-#define	PE10	0x0400
-#define	PE11	0x0800
-#define	PE12	0x1000
-#define	PE13	0x2000
-#define	PE14	0x4000
-#define	PE15	0x8000
-/* Port	E Bit Positions */
-#define	PE0_P	0x0
-#define	PE1_P	0x1
-#define	PE2_P	0x2
-#define	PE3_P	0x3
-#define	PE4_P	0x4
-#define	PE5_P	0x5
-#define	PE6_P	0x6
-#define	PE7_P	0x7
-#define	PE8_P	0x8
-#define	PE9_P	0x9
-#define	PE10_P	0xA
-#define	PE11_P	0xB
-#define	PE12_P	0xC
-#define	PE13_P	0xD
-#define	PE14_P	0xE
-#define	PE15_P	0xF
-
-/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS	************* */
-/* EBIU_AMGCTL Masks */
-#define	AMCKEN		0x0001	/* Enable CLKOUT */
-#define	AMBEN_NONE	0x0000	/* All Banks Disabled */
-#define	AMBEN_B0	0x0002	/* Enable Asynchronous Memory Bank 0 only */
-#define	AMBEN_B0_B1	0x0004	/* Enable Asynchronous Memory Banks 0 &	1 only */
-#define	AMBEN_B0_B1_B2	0x0006	/* Enable Asynchronous Memory Banks 0, 1, and 2 */
-#define	AMBEN_ALL	0x0008	/* Enable Asynchronous Memory Banks (all) 0, 1,	2, and 3 */
-#define	CDPRIO		0x0100	/* DMA has priority over core for external accesses */
-
-/* EBIU_AMGCTL Bit Positions */
-#define	AMCKEN_P		0x0000	/* Enable CLKOUT */
-#define	AMBEN_P0		0x0001	/* Asynchronous	Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
-#define	AMBEN_P1		0x0002	/* Asynchronous	Memory Enable, 010 - banks 0&1 enabled,	 011 - banks 0-3 enabled */
-#define	AMBEN_P2		0x0003	/* Asynchronous	Memory Enable, 1xx - All banks (bank 0,	1, 2, and 3) enabled */
-
-/* EBIU_AMBCTL0	Masks */
-#define	B0RDYEN			0x00000001  /* Bank 0 RDY Enable, 0=disable, 1=enable */
-#define	B0RDYPOL		0x00000002  /* Bank 0 RDY Active high, 0=active	low, 1=active high */
-#define	B0TT_1			0x00000004  /* Bank 0 Transition Time from Read	to Write = 1 cycle */
-#define	B0TT_2			0x00000008  /* Bank 0 Transition Time from Read	to Write = 2 cycles */
-#define	B0TT_3			0x0000000C  /* Bank 0 Transition Time from Read	to Write = 3 cycles */
-#define	B0TT_4			0x00000000  /* Bank 0 Transition Time from Read	to Write = 4 cycles */
-#define	B0ST_1			0x00000010  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
-#define	B0ST_2			0x00000020  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
-#define	B0ST_3			0x00000030  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
-#define	B0ST_4			0x00000000  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
-#define	B0HT_1			0x00000040  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 1 cycle */
-#define	B0HT_2			0x00000080  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 2 cycles */
-#define	B0HT_3			0x000000C0  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 3 cycles */
-#define	B0HT_0			0x00000000  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 0 cycles */
-#define	B0RAT_1			0x00000100  /* Bank 0 Read Access Time = 1 cycle */
-#define	B0RAT_2			0x00000200  /* Bank 0 Read Access Time = 2 cycles */
-#define	B0RAT_3			0x00000300  /* Bank 0 Read Access Time = 3 cycles */
-#define	B0RAT_4			0x00000400  /* Bank 0 Read Access Time = 4 cycles */
-#define	B0RAT_5			0x00000500  /* Bank 0 Read Access Time = 5 cycles */
-#define	B0RAT_6			0x00000600  /* Bank 0 Read Access Time = 6 cycles */
-#define	B0RAT_7			0x00000700  /* Bank 0 Read Access Time = 7 cycles */
-#define	B0RAT_8			0x00000800  /* Bank 0 Read Access Time = 8 cycles */
-#define	B0RAT_9			0x00000900  /* Bank 0 Read Access Time = 9 cycles */
-#define	B0RAT_10		0x00000A00  /* Bank 0 Read Access Time = 10 cycles */
-#define	B0RAT_11		0x00000B00  /* Bank 0 Read Access Time = 11 cycles */
-#define	B0RAT_12		0x00000C00  /* Bank 0 Read Access Time = 12 cycles */
-#define	B0RAT_13		0x00000D00  /* Bank 0 Read Access Time = 13 cycles */
-#define	B0RAT_14		0x00000E00  /* Bank 0 Read Access Time = 14 cycles */
-#define	B0RAT_15		0x00000F00  /* Bank 0 Read Access Time = 15 cycles */
-#define	B0WAT_1			0x00001000  /* Bank 0 Write Access Time	= 1 cycle */
-#define	B0WAT_2			0x00002000  /* Bank 0 Write Access Time	= 2 cycles */
-#define	B0WAT_3			0x00003000  /* Bank 0 Write Access Time	= 3 cycles */
-#define	B0WAT_4			0x00004000  /* Bank 0 Write Access Time	= 4 cycles */
-#define	B0WAT_5			0x00005000  /* Bank 0 Write Access Time	= 5 cycles */
-#define	B0WAT_6			0x00006000  /* Bank 0 Write Access Time	= 6 cycles */
-#define	B0WAT_7			0x00007000  /* Bank 0 Write Access Time	= 7 cycles */
-#define	B0WAT_8			0x00008000  /* Bank 0 Write Access Time	= 8 cycles */
-#define	B0WAT_9			0x00009000  /* Bank 0 Write Access Time	= 9 cycles */
-#define	B0WAT_10		0x0000A000  /* Bank 0 Write Access Time	= 10 cycles */
-#define	B0WAT_11		0x0000B000  /* Bank 0 Write Access Time	= 11 cycles */
-#define	B0WAT_12		0x0000C000  /* Bank 0 Write Access Time	= 12 cycles */
-#define	B0WAT_13		0x0000D000  /* Bank 0 Write Access Time	= 13 cycles */
-#define	B0WAT_14		0x0000E000  /* Bank 0 Write Access Time	= 14 cycles */
-#define	B0WAT_15		0x0000F000  /* Bank 0 Write Access Time	= 15 cycles */
-#define	B1RDYEN			0x00010000  /* Bank 1 RDY enable, 0=disable, 1=enable */
-#define	B1RDYPOL		0x00020000  /* Bank 1 RDY Active high, 0=active	low, 1=active high */
-#define	B1TT_1			0x00040000  /* Bank 1 Transition Time from Read	to Write = 1 cycle */
-#define	B1TT_2			0x00080000  /* Bank 1 Transition Time from Read	to Write = 2 cycles */
-#define	B1TT_3			0x000C0000  /* Bank 1 Transition Time from Read	to Write = 3 cycles */
-#define	B1TT_4			0x00000000  /* Bank 1 Transition Time from Read	to Write = 4 cycles */
-#define	B1ST_1			0x00100000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define	B1ST_2			0x00200000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define	B1ST_3			0x00300000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define	B1ST_4			0x00000000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define	B1HT_1			0x00400000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
-#define	B1HT_2			0x00800000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
-#define	B1HT_3			0x00C00000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
-#define	B1HT_0			0x00000000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
-#define	B1RAT_1			0x01000000  /* Bank 1 Read Access Time = 1 cycle */
-#define	B1RAT_2			0x02000000  /* Bank 1 Read Access Time = 2 cycles */
-#define	B1RAT_3			0x03000000  /* Bank 1 Read Access Time = 3 cycles */
-#define	B1RAT_4			0x04000000  /* Bank 1 Read Access Time = 4 cycles */
-#define	B1RAT_5			0x05000000  /* Bank 1 Read Access Time = 5 cycles */
-#define	B1RAT_6			0x06000000  /* Bank 1 Read Access Time = 6 cycles */
-#define	B1RAT_7			0x07000000  /* Bank 1 Read Access Time = 7 cycles */
-#define	B1RAT_8			0x08000000  /* Bank 1 Read Access Time = 8 cycles */
-#define	B1RAT_9			0x09000000  /* Bank 1 Read Access Time = 9 cycles */
-#define	B1RAT_10		0x0A000000  /* Bank 1 Read Access Time = 10 cycles */
-#define	B1RAT_11		0x0B000000  /* Bank 1 Read Access Time = 11 cycles */
-#define	B1RAT_12		0x0C000000  /* Bank 1 Read Access Time = 12 cycles */
-#define	B1RAT_13		0x0D000000  /* Bank 1 Read Access Time = 13 cycles */
-#define	B1RAT_14		0x0E000000  /* Bank 1 Read Access Time = 14 cycles */
-#define	B1RAT_15		0x0F000000  /* Bank 1 Read Access Time = 15 cycles */
-#define	B1WAT_1			0x10000000 /* Bank 1 Write Access Time = 1 cycle */
-#define	B1WAT_2			0x20000000  /* Bank 1 Write Access Time	= 2 cycles */
-#define	B1WAT_3			0x30000000  /* Bank 1 Write Access Time	= 3 cycles */
-#define	B1WAT_4			0x40000000  /* Bank 1 Write Access Time	= 4 cycles */
-#define	B1WAT_5			0x50000000  /* Bank 1 Write Access Time	= 5 cycles */
-#define	B1WAT_6			0x60000000  /* Bank 1 Write Access Time	= 6 cycles */
-#define	B1WAT_7			0x70000000  /* Bank 1 Write Access Time	= 7 cycles */
-#define	B1WAT_8			0x80000000  /* Bank 1 Write Access Time	= 8 cycles */
-#define	B1WAT_9			0x90000000  /* Bank 1 Write Access Time	= 9 cycles */
-#define	B1WAT_10		0xA0000000  /* Bank 1 Write Access Time	= 10 cycles */
-#define	B1WAT_11		0xB0000000  /* Bank 1 Write Access Time	= 11 cycles */
-#define	B1WAT_12		0xC0000000  /* Bank 1 Write Access Time	= 12 cycles */
-#define	B1WAT_13		0xD0000000  /* Bank 1 Write Access Time	= 13 cycles */
-#define	B1WAT_14		0xE0000000  /* Bank 1 Write Access Time	= 14 cycles */
-#define	B1WAT_15		0xF0000000  /* Bank 1 Write Access Time	= 15 cycles */
-
-/* EBIU_AMBCTL1	Masks */
-#define	B2RDYEN			0x00000001  /* Bank 2 RDY Enable, 0=disable, 1=enable */
-#define	B2RDYPOL		0x00000002  /* Bank 2 RDY Active high, 0=active	low, 1=active high */
-#define	B2TT_1			0x00000004  /* Bank 2 Transition Time from Read	to Write = 1 cycle */
-#define	B2TT_2			0x00000008  /* Bank 2 Transition Time from Read	to Write = 2 cycles */
-#define	B2TT_3			0x0000000C  /* Bank 2 Transition Time from Read	to Write = 3 cycles */
-#define	B2TT_4			0x00000000  /* Bank 2 Transition Time from Read	to Write = 4 cycles */
-#define	B2ST_1			0x00000010  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define	B2ST_2			0x00000020  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define	B2ST_3			0x00000030  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define	B2ST_4			0x00000000  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define	B2HT_1			0x00000040  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
-#define	B2HT_2			0x00000080  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
-#define	B2HT_3			0x000000C0  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
-#define	B2HT_0			0x00000000  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
-#define	B2RAT_1			0x00000100  /* Bank 2 Read Access Time = 1 cycle */
-#define	B2RAT_2			0x00000200  /* Bank 2 Read Access Time = 2 cycles */
-#define	B2RAT_3			0x00000300  /* Bank 2 Read Access Time = 3 cycles */
-#define	B2RAT_4			0x00000400  /* Bank 2 Read Access Time = 4 cycles */
-#define	B2RAT_5			0x00000500  /* Bank 2 Read Access Time = 5 cycles */
-#define	B2RAT_6			0x00000600  /* Bank 2 Read Access Time = 6 cycles */
-#define	B2RAT_7			0x00000700  /* Bank 2 Read Access Time = 7 cycles */
-#define	B2RAT_8			0x00000800  /* Bank 2 Read Access Time = 8 cycles */
-#define	B2RAT_9			0x00000900  /* Bank 2 Read Access Time = 9 cycles */
-#define	B2RAT_10		0x00000A00  /* Bank 2 Read Access Time = 10 cycles */
-#define	B2RAT_11		0x00000B00  /* Bank 2 Read Access Time = 11 cycles */
-#define	B2RAT_12		0x00000C00  /* Bank 2 Read Access Time = 12 cycles */
-#define	B2RAT_13		0x00000D00  /* Bank 2 Read Access Time = 13 cycles */
-#define	B2RAT_14		0x00000E00  /* Bank 2 Read Access Time = 14 cycles */
-#define	B2RAT_15		0x00000F00  /* Bank 2 Read Access Time = 15 cycles */
-#define	B2WAT_1			0x00001000  /* Bank 2 Write Access Time	= 1 cycle */
-#define	B2WAT_2			0x00002000  /* Bank 2 Write Access Time	= 2 cycles */
-#define	B2WAT_3			0x00003000  /* Bank 2 Write Access Time	= 3 cycles */
-#define	B2WAT_4			0x00004000  /* Bank 2 Write Access Time	= 4 cycles */
-#define	B2WAT_5			0x00005000  /* Bank 2 Write Access Time	= 5 cycles */
-#define	B2WAT_6			0x00006000  /* Bank 2 Write Access Time	= 6 cycles */
-#define	B2WAT_7			0x00007000  /* Bank 2 Write Access Time	= 7 cycles */
-#define	B2WAT_8			0x00008000  /* Bank 2 Write Access Time	= 8 cycles */
-#define	B2WAT_9			0x00009000  /* Bank 2 Write Access Time	= 9 cycles */
-#define	B2WAT_10		0x0000A000  /* Bank 2 Write Access Time	= 10 cycles */
-#define	B2WAT_11		0x0000B000  /* Bank 2 Write Access Time	= 11 cycles */
-#define	B2WAT_12		0x0000C000  /* Bank 2 Write Access Time	= 12 cycles */
-#define	B2WAT_13		0x0000D000  /* Bank 2 Write Access Time	= 13 cycles */
-#define	B2WAT_14		0x0000E000  /* Bank 2 Write Access Time	= 14 cycles */
-#define	B2WAT_15		0x0000F000  /* Bank 2 Write Access Time	= 15 cycles */
-#define	B3RDYEN			0x00010000  /* Bank 3 RDY enable, 0=disable, 1=enable */
-#define	B3RDYPOL		0x00020000  /* Bank 3 RDY Active high, 0=active	low, 1=active high */
-#define	B3TT_1			0x00040000  /* Bank 3 Transition Time from Read	to Write = 1 cycle */
-#define	B3TT_2			0x00080000  /* Bank 3 Transition Time from Read	to Write = 2 cycles */
-#define	B3TT_3			0x000C0000  /* Bank 3 Transition Time from Read	to Write = 3 cycles */
-#define	B3TT_4			0x00000000  /* Bank 3 Transition Time from Read	to Write = 4 cycles */
-#define	B3ST_1			0x00100000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define	B3ST_2			0x00200000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define	B3ST_3			0x00300000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define	B3ST_4			0x00000000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define	B3HT_1			0x00400000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
-#define	B3HT_2			0x00800000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
-#define	B3HT_3			0x00C00000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
-#define	B3HT_0			0x00000000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
-#define	B3RAT_1			0x01000000 /* Bank 3 Read Access Time =	1 cycle */
-#define	B3RAT_2			0x02000000  /* Bank 3 Read Access Time = 2 cycles */
-#define	B3RAT_3			0x03000000  /* Bank 3 Read Access Time = 3 cycles */
-#define	B3RAT_4			0x04000000  /* Bank 3 Read Access Time = 4 cycles */
-#define	B3RAT_5			0x05000000  /* Bank 3 Read Access Time = 5 cycles */
-#define	B3RAT_6			0x06000000  /* Bank 3 Read Access Time = 6 cycles */
-#define	B3RAT_7			0x07000000  /* Bank 3 Read Access Time = 7 cycles */
-#define	B3RAT_8			0x08000000  /* Bank 3 Read Access Time = 8 cycles */
-#define	B3RAT_9			0x09000000  /* Bank 3 Read Access Time = 9 cycles */
-#define	B3RAT_10		0x0A000000  /* Bank 3 Read Access Time = 10 cycles */
-#define	B3RAT_11		0x0B000000  /* Bank 3 Read Access Time = 11 cycles */
-#define	B3RAT_12		0x0C000000  /* Bank 3 Read Access Time = 12 cycles */
-#define	B3RAT_13		0x0D000000  /* Bank 3 Read Access Time = 13 cycles */
-#define	B3RAT_14		0x0E000000  /* Bank 3 Read Access Time = 14 cycles */
-#define	B3RAT_15		0x0F000000  /* Bank 3 Read Access Time = 15 cycles */
-#define	B3WAT_1			0x10000000 /* Bank 3 Write Access Time = 1 cycle */
-#define	B3WAT_2			0x20000000  /* Bank 3 Write Access Time	= 2 cycles */
-#define	B3WAT_3			0x30000000  /* Bank 3 Write Access Time	= 3 cycles */
-#define	B3WAT_4			0x40000000  /* Bank 3 Write Access Time	= 4 cycles */
-#define	B3WAT_5			0x50000000  /* Bank 3 Write Access Time	= 5 cycles */
-#define	B3WAT_6			0x60000000  /* Bank 3 Write Access Time	= 6 cycles */
-#define	B3WAT_7			0x70000000  /* Bank 3 Write Access Time	= 7 cycles */
-#define	B3WAT_8			0x80000000  /* Bank 3 Write Access Time	= 8 cycles */
-#define	B3WAT_9			0x90000000  /* Bank 3 Write Access Time	= 9 cycles */
-#define	B3WAT_10		0xA0000000  /* Bank 3 Write Access Time	= 10 cycles */
-#define	B3WAT_11		0xB0000000  /* Bank 3 Write Access Time	= 11 cycles */
-#define	B3WAT_12		0xC0000000  /* Bank 3 Write Access Time	= 12 cycles */
-#define	B3WAT_13		0xD0000000  /* Bank 3 Write Access Time	= 13 cycles */
-#define	B3WAT_14		0xE0000000  /* Bank 3 Write Access Time	= 14 cycles */
-#define	B3WAT_15		0xF0000000  /* Bank 3 Write Access Time	= 15 cycles */
-
-/* **********************  SDRAM CONTROLLER MASKS  *************************** */
-/* EBIU_SDGCTL Masks */
-#define	SCTLE			0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
-#define	CL_2			0x00000008 /* SDRAM CAS	latency	= 2 cycles */
-#define	CL_3			0x0000000C /* SDRAM CAS	latency	= 3 cycles */
-#define	PFE				0x00000010 /* Enable SDRAM prefetch */
-#define	PFP				0x00000020 /* Prefetch has priority over AMC requests */
-#define	PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh */
-#define	PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In	Self-Refresh */
-#define	PASR_B0			0x00000020	/* Only	SDRAM Bank 0 Is	Refreshed In Self-Refresh */
-#define	TRAS_1			0x00000040 /* SDRAM tRAS = 1 cycle */
-#define	TRAS_2			0x00000080 /* SDRAM tRAS = 2 cycles */
-#define	TRAS_3			0x000000C0 /* SDRAM tRAS = 3 cycles */
-#define	TRAS_4			0x00000100 /* SDRAM tRAS = 4 cycles */
-#define	TRAS_5			0x00000140 /* SDRAM tRAS = 5 cycles */
-#define	TRAS_6			0x00000180 /* SDRAM tRAS = 6 cycles */
-#define	TRAS_7			0x000001C0 /* SDRAM tRAS = 7 cycles */
-#define	TRAS_8			0x00000200 /* SDRAM tRAS = 8 cycles */
-#define	TRAS_9			0x00000240 /* SDRAM tRAS = 9 cycles */
-#define	TRAS_10			0x00000280 /* SDRAM tRAS = 10 cycles */
-#define	TRAS_11			0x000002C0 /* SDRAM tRAS = 11 cycles */
-#define	TRAS_12			0x00000300 /* SDRAM tRAS = 12 cycles */
-#define	TRAS_13			0x00000340 /* SDRAM tRAS = 13 cycles */
-#define	TRAS_14			0x00000380 /* SDRAM tRAS = 14 cycles */
-#define	TRAS_15			0x000003C0 /* SDRAM tRAS = 15 cycles */
-#define	TRP_1			0x00000800 /* SDRAM tRP	= 1 cycle */
-#define	TRP_2			0x00001000 /* SDRAM tRP	= 2 cycles */
-#define	TRP_3			0x00001800 /* SDRAM tRP	= 3 cycles */
-#define	TRP_4			0x00002000 /* SDRAM tRP	= 4 cycles */
-#define	TRP_5			0x00002800 /* SDRAM tRP	= 5 cycles */
-#define	TRP_6			0x00003000 /* SDRAM tRP	= 6 cycles */
-#define	TRP_7			0x00003800 /* SDRAM tRP	= 7 cycles */
-#define	TRCD_1			0x00008000 /* SDRAM tRCD = 1 cycle */
-#define	TRCD_2			0x00010000 /* SDRAM tRCD = 2 cycles */
-#define	TRCD_3			0x00018000 /* SDRAM tRCD = 3 cycles */
-#define	TRCD_4			0x00020000 /* SDRAM tRCD = 4 cycles */
-#define	TRCD_5			0x00028000 /* SDRAM tRCD = 5 cycles */
-#define	TRCD_6			0x00030000 /* SDRAM tRCD = 6 cycles */
-#define	TRCD_7			0x00038000 /* SDRAM tRCD = 7 cycles */
-#define	TWR_1			0x00080000 /* SDRAM tWR	= 1 cycle */
-#define	TWR_2			0x00100000 /* SDRAM tWR	= 2 cycles */
-#define	TWR_3			0x00180000 /* SDRAM tWR	= 3 cycles */
-#define	PUPSD			0x00200000 /*Power-up start delay */
-#define	PSM				0x00400000 /* SDRAM power-up sequence =	Precharge, mode	register set, 8	CBR refresh cycles */
-#define	PSS				0x00800000 /* enable SDRAM power-up sequence on	next SDRAM access */
-#define	SRFS			0x01000000 /* Start SDRAM self-refresh mode */
-#define	EBUFE			0x02000000 /* Enable external buffering	timing */
-#define	FBBRW			0x04000000 /* Fast back-to-back	read write enable */
-#define	EMREN			0x10000000 /* Extended mode register enable */
-#define	TCSR			0x20000000 /* Temp compensated self refresh value 85 deg C */
-#define	CDDBG			0x40000000 /* Tristate SDRAM controls during bus grant */
-
-/* EBIU_SDBCTL Masks */
-#define	EBE				0x00000001 /* Enable SDRAM external bank */
-#define	EBSZ_16			0x00000000 /* SDRAM external bank size = 16MB */
-#define	EBSZ_32			0x00000002 /* SDRAM external bank size = 32MB */
-#define	EBSZ_64			0x00000004 /* SDRAM external bank size = 64MB */
-#define	EBSZ_128		0x00000006 /* SDRAM external bank size = 128MB */
-#define	EBSZ_256		0x00000008 /* SDRAM External Bank Size = 256MB */
-#define	EBSZ_512		0x0000000A /* SDRAM External Bank Size = 512MB */
-#define	EBCAW_8			0x00000000 /* SDRAM external bank column address width = 8 bits */
-#define	EBCAW_9			0x00000010 /* SDRAM external bank column address width = 9 bits */
-#define	EBCAW_10		0x00000020 /* SDRAM external bank column address width = 9 bits */
-#define	EBCAW_11		0x00000030 /* SDRAM external bank column address width = 9 bits */
-
-/* EBIU_SDSTAT Masks */
-#define	SDCI			0x00000001 /* SDRAM controller is idle */
-#define	SDSRA			0x00000002 /* SDRAM SDRAM self refresh is active */
-#define	SDPUA			0x00000004 /* SDRAM power up active  */
-#define	SDRS			0x00000008 /* SDRAM is in reset	state */
-#define	SDEASE			0x00000010 /* SDRAM EAB	sticky error status - W1C */
-#define	BGSTAT			0x00000020 /* Bus granted */
-
-
-/*  ********************  TWO-WIRE INTERFACE (TWIx) MASKS  ***********************/
-/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y);	 ) */
-#ifdef _MISRA_RULES
-#define	CLKLOW(x)	((x) & 0xFFu)		/* Periods Clock Is Held Low */
-#define	CLKHI(y)	(((y)&0xFFu)<<0x8)	/* Periods Before New Clock Low */
-#else
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low */
-#define	CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low */
-#endif /* _MISRA_RULES */
-
-/* TWIx_PRESCALE Masks								 */
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz) */
-#define	TWI_ENA		0x0080		/* TWI Enable		 */
-#define	SCCB		0x0200		/* SCCB	Compatibility Enable */
-
-/* TWIx_SLAVE_CTRL Masks								 */
-#define	SEN			0x0001		/* Slave Enable		 */
-#define	SADD_LEN	0x0002		/* Slave Address Length */
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid */
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call	Adrress	Matching Enabled */
-
-/* TWIx_SLAVE_STAT Masks								 */
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*) */
-#define	GCALL		0x0002		/* General Call	Indicator */
-
-/* TWIx_MASTER_CTRL Masks						 */
-#define	MEN			0x0001		/* Master Mode Enable */
-#define	MADD_LEN	0x0002		/* Master Address Length */
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*) */
-#define	FAST		0x0008		/* Use Fast Mode Timing	Specs */
-#define	STOP		0x0010		/* Issue Stop Condition */
-#define	RSTART		0x0020		/* Repeat Start	or Stop* At End	Of Transfer */
-#define	DCNT		0x3FC0		/* Data	Bytes To Transfer */
-#define	SDAOVR		0x4000		/* Serial Data Override */
-#define	SCLOVR		0x8000		/* Serial Clock	Override */
-
-/* TWIx_MASTER_STAT Masks							 */
-#define	MPROG		0x0001		/* Master Transfer In Progress */
-#define	LOSTARB		0x0002		/* Lost	Arbitration Indicator (Xfer Aborted) */
-#define	ANAK		0x0004		/* Address Not Acknowledged */
-#define	DNAK		0x0008		/* Data	Not Acknowledged */
-#define	BUFRDERR	0x0010		/* Buffer Read Error */
-#define	BUFWRERR	0x0020		/* Buffer Write	Error */
-#define	SDASEN		0x0040		/* Serial Data Sense */
-#define	SCLSEN		0x0080		/* Serial Clock	Sense */
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator */
-
-/* TWIx_INT_SRC	and TWIx_INT_ENABLE Masks */
-#define	SINIT		0x0001		/* Slave Transfer Initiated */
-#define	SCOMP		0x0002		/* Slave Transfer Complete */
-#define	SERR		0x0004		/* Slave Transfer Error */
-#define	SOVF		0x0008		/* Slave Overflow */
-#define	MCOMP		0x0010		/* Master Transfer Complete */
-#define	MERR		0x0020		/* Master Transfer Error */
-#define	XMTSERV		0x0040		/* Transmit FIFO Service */
-#define	RCVSERV		0x0080		/* Receive FIFO	Service */
-
-/* TWIx_FIFO_CTL Masks					 */
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush */
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush */
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length */
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length */
-
-/* TWIx_FIFO_STAT Masks								 */
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status */
-#define	XMT_EMPTY	0x0000		/*		Transmit FIFO Empty */
-#define	XMT_HALF	0x0001		/*		Transmit FIFO Has 1 Byte To Write */
-#define	XMT_FULL	0x0003		/*		Transmit FIFO Full (2 Bytes To Write) */
-
-#define	RCVSTAT		0x000C		/* Receive FIFO	Status */
-#define	RCV_EMPTY	0x0000		/*		Receive	FIFO Empty */
-#define	RCV_HALF	0x0004		/*		Receive	FIFO Has 1 Byte	To Read */
-#define	RCV_FULL	0x000C		/*		Receive	FIFO Full (2 Bytes To Read) */
-
 #endif /* _DEF_BF539_H */
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index bd9adb7..8a5beee 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -70,4 +70,9 @@
 #define PORT_D GPIO_PD0
 #define PORT_E GPIO_PE0
 
+#include <mach-common/ports-c.h>
+#include <mach-common/ports-d.h>
+#include <mach-common/ports-e.h>
+#include <mach-common/ports-f.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf538/include/mach/pll.h b/arch/blackfin/mach-bf538/include/mach/pll.h
index b30bbcd..94cca67 100644
--- a/arch/blackfin/mach-bf538/include/mach/pll.h
+++ b/arch/blackfin/mach-bf538/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2008-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index f0c0eef..d11502a 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -156,7 +156,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -211,7 +211,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX,
 #ifdef CONFIG_BFIN_UART1_CTSRTS
 	P_UART1_RTS, P_UART1_CTS,
@@ -258,7 +258,7 @@
 	},
 };
 
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
 	P_UART2_TX, P_UART2_RX, 0
 };
 
@@ -313,7 +313,7 @@
 #endif
 };
 
-unsigned short bfin_uart3_peripherals[] = {
+static unsigned short bfin_uart3_peripherals[] = {
 	P_UART3_TX, P_UART3_RX,
 #ifdef CONFIG_BFIN_UART3_CTSRTS
 	P_UART3_RTS, P_UART3_CTS,
@@ -482,11 +482,13 @@
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -502,6 +504,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -518,7 +521,7 @@
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -550,9 +553,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -584,9 +587,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -618,7 +621,7 @@
 	},
 };
 
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
 	P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
 	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
 };
@@ -652,7 +655,7 @@
 	},
 };
 
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
 	P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
 	P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
 };
@@ -754,7 +757,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 216e269..ce5a2bb 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -261,7 +261,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -316,7 +316,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX,
 #ifdef CONFIG_BFIN_UART1_CTSRTS
 	P_UART1_RTS, P_UART1_CTS,
@@ -363,7 +363,7 @@
 	},
 };
 
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
 	P_UART2_TX, P_UART2_RX, 0
 };
 
@@ -418,7 +418,7 @@
 #endif
 };
 
-unsigned short bfin_uart3_peripherals[] = {
+static unsigned short bfin_uart3_peripherals[] = {
 	P_UART3_TX, P_UART3_RX,
 #ifdef CONFIG_BFIN_UART3_CTSRTS
 	P_UART3_RTS, P_UART3_CTS,
@@ -587,11 +587,13 @@
 		.start	= IRQ_USB_INT0,
 		.end	= IRQ_USB_INT0,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "mc"
 	},
 	[2] = {	/* DMA IRQ */
 		.start	= IRQ_USB_DMA,
 		.end	= IRQ_USB_DMA,
 		.flags	= IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
+		.name	= "dma"
 	},
 };
 
@@ -607,6 +609,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -623,7 +626,7 @@
 static u64 musb_dmamask = ~(u32)0;
 
 static struct platform_device musb_device = {
-	.name		= "musb_hdrc",
+	.name		= "musb-blackfin",
 	.id		= 0,
 	.dev = {
 		.dma_mask		= &musb_dmamask,
@@ -655,9 +658,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -689,9 +692,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -723,7 +726,7 @@
 	},
 };
 
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
 	P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
 	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
 };
@@ -757,7 +760,7 @@
 	},
 };
 
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
 	P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
 	P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
 };
@@ -775,7 +778,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
diff --git a/arch/blackfin/mach-bf548/dma.c b/arch/blackfin/mach-bf548/dma.c
index 888b9cc..69ead33 100644
--- a/arch/blackfin/mach-bf548/dma.c
+++ b/arch/blackfin/mach-bf548/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
new file mode 100644
index 0000000..a77109f
--- /dev/null
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
@@ -0,0 +1,16 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	4
+
+#define BFIN_UART_BF54X_STYLE
+
+#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
index dd44aa7..0d94eda 100644
--- a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
@@ -4,72 +4,14 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER_SET))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_LSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LSR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-#define UART_GET_MSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_MSR))
-#define UART_GET_MCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_MCR))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_SET_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER_SET),v)
-#define UART_CLEAR_IER(uart,v)  bfin_write16(((uart)->port.membase + OFFSET_IER_CLEAR),v)
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LSR(uart,v)	bfin_write16(((uart)->port.membase + OFFSET_LSR),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_CLEAR_LSR(uart)    bfin_write16(((uart)->port.membase + OFFSET_LSR), -1)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-#define UART_PUT_MCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_MCR),v)
-#define UART_CLEAR_SCTS(uart)   bfin_write16(((uart)->port.membase + OFFSET_MSR),SCTS)
-
-#define UART_SET_DLAB(uart)     /* MMRs not muxed on BF54x */
-#define UART_CLEAR_DLAB(uart)   /* MMRs not muxed on BF54x */
-
-#define UART_GET_CTS(x) (UART_GET_MSR(x) & CTS)
-#define UART_DISABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS|MRTS))
-#define UART_ENABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
-#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) || \
 	defined(CONFIG_BFIN_UART2_CTSRTS) || defined(CONFIG_BFIN_UART3_CTSRTS)
 # define CONFIG_SERIAL_BFIN_HARD_CTSRTS
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	int			scts;
-	int			cts_pin;
-	int			rts_pin;
-#endif
-};
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -148,3 +90,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index 5684030..72da721 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2009 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -10,58 +10,40 @@
 #include "bf548.h"
 #include "anomaly.h"
 
+#include <asm/def_LPBlackfin.h>
 #ifdef CONFIG_BF542
-#include "defBF542.h"
-#endif
-
-#ifdef CONFIG_BF544
-#include "defBF544.h"
-#endif
-
-#ifdef CONFIG_BF547
-#include "defBF547.h"
-#endif
-
-#ifdef CONFIG_BF548
-#include "defBF548.h"
-#endif
-
-#ifdef CONFIG_BF549
-#include "defBF549.h"
-#endif
-
-#if !defined(__ASSEMBLY__)
-#ifdef CONFIG_BF542
-#include "cdefBF542.h"
+# include "defBF542.h"
 #endif
 #ifdef CONFIG_BF544
-#include "cdefBF544.h"
+# include "defBF544.h"
 #endif
 #ifdef CONFIG_BF547
-#include "cdefBF547.h"
+# include "defBF547.h"
 #endif
 #ifdef CONFIG_BF548
-#include "cdefBF548.h"
+# include "defBF548.h"
 #endif
 #ifdef CONFIG_BF549
-#include "cdefBF549.h"
+# include "defBF549.h"
 #endif
 
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF542
+#  include "cdefBF542.h"
+# endif
+# ifdef CONFIG_BF544
+#  include "cdefBF544.h"
+# endif
+# ifdef CONFIG_BF547
+#  include "cdefBF547.h"
+# endif
+# ifdef CONFIG_BF548
+#  include "cdefBF548.h"
+# endif
+# ifdef CONFIG_BF549
+#  include "cdefBF549.h"
+# endif
 #endif
 
-#define BFIN_UART_NR_PORTS	4
-
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_GCTL             0x08	/* Global Control Register              */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_IER_SET          0x20	/* Set Interrupt Enable Register        */
-#define OFFSET_IER_CLEAR        0x24	/* Clear Interrupt Enable Register      */
-#define OFFSET_THR              0x28	/* Transmit Holding register            */
-#define OFFSET_RBR              0x2C	/* Receive Buffer register              */
-
 #endif
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
index 42f4a94..d09c19c 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF542_H
 #define _CDEF_BF542_H
 
-/* include all Core registers and bit definitions */
-#include "defBF542.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
index 2207799..33ec810 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF544_H
 #define _CDEF_BF544_H
 
-/* include all Core registers and bit definitions */
-#include "defBF544.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
index bc650e6..bcb9726 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF547_H
 #define _CDEF_BF547_H
 
-/* include all Core registers and bit definitions */
-#include "defBF547.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
index 3523e08..bae67a6 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF548_H
 #define _CDEF_BF548_H
 
-/* include all Core registers and bit definitions */
-#include "defBF548.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
index 80201ed..002136a 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF549_H
 #define _CDEF_BF549_H
 
-/* include all Core registers and bit definitions */
-#include "defBF549.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
index deaf5d6..50c89c8 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,10 +7,6 @@
 #ifndef _CDEF_BF54X_H
 #define _CDEF_BF54X_H
 
-#include <asm/blackfin.h>
-
-#include "defBF54x_base.h"
-
 /* ************************************************************** */
 /* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF54x    */
 /* ************************************************************** */
@@ -2633,22 +2629,5 @@
 
 /* Handshake MDMA is not defined in the shared file because it is not available on the ADSP-BF542 bfin_read_()rocessor */
 
-/* legacy definitions */
-#define bfin_read_EBIU_AMCBCTL0		bfin_read_EBIU_AMBCTL0
-#define bfin_write_EBIU_AMCBCTL0	bfin_write_EBIU_AMBCTL0
-#define bfin_read_EBIU_AMCBCTL1		bfin_read_EBIU_AMBCTL1
-#define bfin_write_EBIU_AMCBCTL1	bfin_write_EBIU_AMBCTL1
-#define bfin_read_PINT0_IRQ		bfin_read_PINT0_REQUEST
-#define bfin_write_PINT0_IRQ		bfin_write_PINT0_REQUEST
-#define bfin_read_PINT1_IRQ		bfin_read_PINT1_REQUEST
-#define bfin_write_PINT1_IRQ		bfin_write_PINT1_REQUEST
-#define bfin_read_PINT2_IRQ		bfin_read_PINT2_REQUEST
-#define bfin_write_PINT2_IRQ		bfin_write_PINT2_REQUEST
-#define bfin_read_PINT3_IRQ		bfin_read_PINT3_REQUEST
-#define bfin_write_PINT3_IRQ		bfin_write_PINT3_REQUEST
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif /* _CDEF_BF54X_H */
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF542.h b/arch/blackfin/mach-bf548/include/mach/defBF542.h
index abf5f75..629bf21 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF542.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF542_H
 #define _DEF_BF542_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h
index e277109..642468c 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF544_H
 #define _DEF_BF544_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index be21ba5..2f3337c 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF547_H
 #define _DEF_BF547_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF548.h b/arch/blackfin/mach-bf548/include/mach/defBF548.h
index 3fb33b0..3c7f1b6 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF548.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF548_H
 #define _DEF_BF548_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF549.h b/arch/blackfin/mach-bf548/include/mach/defBF549.h
index 5a04e6d..9a45cb6 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF549.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF549_H
 #define _DEF_BF549_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 78f9110..0867c2b 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -1615,14 +1615,14 @@
 #define                     CTYPE  0x40       /* DMA Channel Type */
 #define                      PMAP  0xf000     /* Peripheral Mapped To This Channel */
 
-/* Bit masks for DMACx_TCPER */
+/* Bit masks for DMACx_TC_PER */
 
 #define        DCB_TRAFFIC_PERIOD  0xf        /* DCB Traffic Control Period */
 #define        DEB_TRAFFIC_PERIOD  0xf0       /* DEB Traffic Control Period */
 #define        DAB_TRAFFIC_PERIOD  0x700      /* DAB Traffic Control Period */
 #define   MDMA_ROUND_ROBIN_PERIOD  0xf800     /* MDMA Round Robin Period */
 
-/* Bit masks for DMACx_TCCNT */
+/* Bit masks for DMACx_TC_CNT */
 
 #define         DCB_TRAFFIC_COUNT  0xf        /* DCB Traffic Control Count */
 #define         DEB_TRAFFIC_COUNT  0xf0       /* DEB Traffic Control Count */
@@ -2172,68 +2172,6 @@
 
 #define                 RCVDATA16  0xffff     /* Receive FIFO 16-Bit Data */
 
-/* Bit masks for UARTx_LCR */
-
-#if 0
-/* conflicts with legacy one in last section */
-#define                       WLS  0x3        /* Word Length Select */
-#endif
-#define                       STB  0x4        /* Stop Bits */
-#define                       PEN  0x8        /* Parity Enable */
-#define                       EPS  0x10       /* Even Parity Select */
-#define                       STP  0x20       /* Sticky Parity */
-#define                        SB  0x40       /* Set Break */
-
-/* Bit masks for UARTx_MCR */
-
-#define                      XOFF  0x1        /* Transmitter Off */
-#define                      MRTS  0x2        /* Manual Request To Send */
-#define                      RFIT  0x4        /* Receive FIFO IRQ Threshold */
-#define                      RFRT  0x8        /* Receive FIFO RTS Threshold */
-#define                  LOOP_ENA  0x10       /* Loopback Mode Enable */
-#define                     FCPOL  0x20       /* Flow Control Pin Polarity */
-#define                      ARTS  0x40       /* Automatic Request To Send */
-#define                      ACTS  0x80       /* Automatic Clear To Send */
-
-/* Bit masks for UARTx_LSR */
-
-#define                        DR  0x1        /* Data Ready */
-#define                        OE  0x2        /* Overrun Error */
-#define                        PE  0x4        /* Parity Error */
-#define                        FE  0x8        /* Framing Error */
-#define                        BI  0x10       /* Break Interrupt */
-#define                      THRE  0x20       /* THR Empty */
-#define                      TEMT  0x40       /* Transmitter Empty */
-#define                       TFI  0x80       /* Transmission Finished Indicator */
-
-/* Bit masks for UARTx_MSR */
-
-#define                      SCTS  0x1        /* Sticky CTS */
-#define                       CTS  0x10       /* Clear To Send */
-#define                      RFCS  0x20       /* Receive FIFO Count Status */
-
-/* Bit masks for UARTx_IER_SET & UARTx_IER_CLEAR */
-
-#define                   ERBFI  0x1        /* Enable Receive Buffer Full Interrupt */
-#define                   ETBEI  0x2        /* Enable Transmit Buffer Empty Interrupt */
-#define                    ELSI  0x4        /* Enable Receive Status Interrupt */
-#define                   EDSSI  0x8        /* Enable Modem Status Interrupt */
-#define                  EDTPTI  0x10       /* Enable DMA Transmit PIRQ Interrupt */
-#define                    ETFI  0x20       /* Enable Transmission Finished Interrupt */
-#define                   ERFCI  0x40       /* Enable Receive FIFO Count Interrupt */
-
-/* Bit masks for UARTx_GCTL */
-
-#define                      UCEN  0x1        /* UART Enable */
-#define                      IREN  0x2        /* IrDA Mode Enable */
-#define                     TPOLC  0x4        /* IrDA TX Polarity Change */
-#define                     RPOLC  0x8        /* IrDA RX Polarity Change */
-#define                       FPE  0x10       /* Force Parity Error */
-#define                       FFE  0x20       /* Force Framing Error */
-#define                      EDBO  0x40       /* Enable Divide-by-One */
-#define                     EGLSI  0x80       /* Enable Global LS Interrupt */
-
-
 /* ******************************************* */
 /*     MULTI BIT MACRO ENUMERATIONS            */
 /* ******************************************* */
@@ -2251,13 +2189,6 @@
 #define WDTH_CAP 0x0002
 #define EXT_CLK  0x0003
 
-/* UARTx_LCR bit field options */
-
-#define WLS_5   0x0000    /* 5 data bits */
-#define WLS_6   0x0001    /* 6 data bits */
-#define WLS_7   0x0002    /* 7 data bits */
-#define WLS_8   0x0003    /* 8 data bits */
-
 /* PINTx Register Bit Definitions */
 
 #define PIQ0 0x00000001
@@ -2300,240 +2231,6 @@
 #define PIQ30 0x40000000
 #define PIQ31 0x80000000
 
-/* PORT A Bit Definitions for the registers
-PORTA, PORTA_SET, PORTA_CLEAR,
-PORTA_DIR_SET, PORTA_DIR_CLEAR, PORTA_INEN,
-PORTA_FER registers
-*/
-
-#define PA0 0x0001
-#define PA1 0x0002
-#define PA2 0x0004
-#define PA3 0x0008
-#define PA4 0x0010
-#define PA5 0x0020
-#define PA6 0x0040
-#define PA7 0x0080
-#define PA8 0x0100
-#define PA9 0x0200
-#define PA10 0x0400
-#define PA11 0x0800
-#define PA12 0x1000
-#define PA13 0x2000
-#define PA14 0x4000
-#define PA15 0x8000
-
-/* PORT B Bit Definitions for the registers
-PORTB, PORTB_SET, PORTB_CLEAR,
-PORTB_DIR_SET, PORTB_DIR_CLEAR, PORTB_INEN,
-PORTB_FER registers
-*/
-
-#define PB0 0x0001
-#define PB1 0x0002
-#define PB2 0x0004
-#define PB3 0x0008
-#define PB4 0x0010
-#define PB5 0x0020
-#define PB6 0x0040
-#define PB7 0x0080
-#define PB8 0x0100
-#define PB9 0x0200
-#define PB10 0x0400
-#define PB11 0x0800
-#define PB12 0x1000
-#define PB13 0x2000
-#define PB14 0x4000
-
-
-/* PORT C Bit Definitions for the registers
-PORTC, PORTC_SET, PORTC_CLEAR,
-PORTC_DIR_SET, PORTC_DIR_CLEAR, PORTC_INEN,
-PORTC_FER registers
-*/
-
-
-#define PC0 0x0001
-#define PC1 0x0002
-#define PC2 0x0004
-#define PC3 0x0008
-#define PC4 0x0010
-#define PC5 0x0020
-#define PC6 0x0040
-#define PC7 0x0080
-#define PC8 0x0100
-#define PC9 0x0200
-#define PC10 0x0400
-#define PC11 0x0800
-#define PC12 0x1000
-#define PC13 0x2000
-
-
-/* PORT D Bit Definitions for the registers
-PORTD, PORTD_SET, PORTD_CLEAR,
-PORTD_DIR_SET, PORTD_DIR_CLEAR, PORTD_INEN,
-PORTD_FER registers
-*/
-
-#define PD0 0x0001
-#define PD1 0x0002
-#define PD2 0x0004
-#define PD3 0x0008
-#define PD4 0x0010
-#define PD5 0x0020
-#define PD6 0x0040
-#define PD7 0x0080
-#define PD8 0x0100
-#define PD9 0x0200
-#define PD10 0x0400
-#define PD11 0x0800
-#define PD12 0x1000
-#define PD13 0x2000
-#define PD14 0x4000
-#define PD15 0x8000
-
-/* PORT E Bit Definitions for the registers
-PORTE, PORTE_SET, PORTE_CLEAR,
-PORTE_DIR_SET, PORTE_DIR_CLEAR, PORTE_INEN,
-PORTE_FER registers
-*/
-
-
-#define PE0 0x0001
-#define PE1 0x0002
-#define PE2 0x0004
-#define PE3 0x0008
-#define PE4 0x0010
-#define PE5 0x0020
-#define PE6 0x0040
-#define PE7 0x0080
-#define PE8 0x0100
-#define PE9 0x0200
-#define PE10 0x0400
-#define PE11 0x0800
-#define PE12 0x1000
-#define PE13 0x2000
-#define PE14 0x4000
-#define PE15 0x8000
-
-/* PORT F Bit Definitions for the registers
-PORTF, PORTF_SET, PORTF_CLEAR,
-PORTF_DIR_SET, PORTF_DIR_CLEAR, PORTF_INEN,
-PORTF_FER registers
-*/
-
-
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* PORT G Bit Definitions for the registers
-PORTG, PORTG_SET, PORTG_CLEAR,
-PORTG_DIR_SET, PORTG_DIR_CLEAR, PORTG_INEN,
-PORTG_FER registers
-*/
-
-
-#define PG0 0x0001
-#define PG1 0x0002
-#define PG2 0x0004
-#define PG3 0x0008
-#define PG4 0x0010
-#define PG5 0x0020
-#define PG6 0x0040
-#define PG7 0x0080
-#define PG8 0x0100
-#define PG9 0x0200
-#define PG10 0x0400
-#define PG11 0x0800
-#define PG12 0x1000
-#define PG13 0x2000
-#define PG14 0x4000
-#define PG15 0x8000
-
-/* PORT H Bit Definitions for the registers
-PORTH, PORTH_SET, PORTH_CLEAR,
-PORTH_DIR_SET, PORTH_DIR_CLEAR, PORTH_INEN,
-PORTH_FER registers
-*/
-
-
-#define PH0 0x0001
-#define PH1 0x0002
-#define PH2 0x0004
-#define PH3 0x0008
-#define PH4 0x0010
-#define PH5 0x0020
-#define PH6 0x0040
-#define PH7 0x0080
-#define PH8 0x0100
-#define PH9 0x0200
-#define PH10 0x0400
-#define PH11 0x0800
-#define PH12 0x1000
-#define PH13 0x2000
-
-
-/* PORT I Bit Definitions for the registers
-PORTI, PORTI_SET, PORTI_CLEAR,
-PORTI_DIR_SET, PORTI_DIR_CLEAR, PORTI_INEN,
-PORTI_FER registers
-*/
-
-
-#define PI0 0x0001
-#define PI1 0x0002
-#define PI2 0x0004
-#define PI3 0x0008
-#define PI4 0x0010
-#define PI5 0x0020
-#define PI6 0x0040
-#define PI7 0x0080
-#define PI8 0x0100
-#define PI9 0x0200
-#define PI10 0x0400
-#define PI11 0x0800
-#define PI12 0x1000
-#define PI13 0x2000
-#define PI14 0x4000
-#define PI15 0x8000
-
-/* PORT J Bit Definitions for the registers
-PORTJ, PORTJ_SET, PORTJ_CLEAR,
-PORTJ_DIR_SET, PORTJ_DIR_CLEAR, PORTJ_INEN,
-PORTJ_FER registers
-*/
-
-
-#define PJ0 0x0001
-#define PJ1 0x0002
-#define PJ2 0x0004
-#define PJ3 0x0008
-#define PJ4 0x0010
-#define PJ5 0x0020
-#define PJ6 0x0040
-#define PJ7 0x0080
-#define PJ8 0x0100
-#define PJ9 0x0200
-#define PJ10 0x0400
-#define PJ11 0x0800
-#define PJ12 0x1000
-#define PJ13 0x2000
-
-
 /* Port Muxing Bit Fields for PORTx_MUX Registers */
 
 #define MUX0 0x00000003
@@ -2703,16 +2400,4 @@
 #define B3MAP_PIH 0x06000000 /* Map Port I High to Byte 3 */
 #define B3MAP_PJH 0x07000000 /* Map Port J High to Byte 3 */
 
-
-/* for legacy compatibility */
-
-#define WLS(x)  (((x)-5) & 0x03) /* Word Length Select */
-#define W1LMAX_MAX W1LMAX_MIN
-#define EBIU_AMCBCTL0 EBIU_AMBCTL0
-#define EBIU_AMCBCTL1 EBIU_AMBCTL1
-#define PINT0_IRQ PINT0_REQUEST
-#define PINT1_IRQ PINT1_REQUEST
-#define PINT2_IRQ PINT2_REQUEST
-#define PINT3_IRQ PINT3_REQUEST
-
 #endif /* _DEF_BF54X_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 28037e3..7db4335 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -200,4 +200,15 @@
 
 #endif
 
+#include <mach-common/ports-a.h>
+#include <mach-common/ports-b.h>
+#include <mach-common/ports-c.h>
+#include <mach-common/ports-d.h>
+#include <mach-common/ports-e.h>
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+#include <mach-common/ports-i.h>
+#include <mach-common/ports-j.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 1f99b51..99fd1b2 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -474,4 +474,26 @@
 #define IRQ_PINT2_POS		24
 #define IRQ_PINT3_POS		28
 
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/*
+ * bfin pint registers layout
+ */
+struct bfin_pint_regs {
+	u32 mask_set;
+	u32 mask_clear;
+	u32 irq;
+	u32 assign;
+	u32 edge_set;
+	u32 edge_clear;
+	u32 invert_set;
+	u32 invert_clear;
+	u32 pinstate;
+	u32 latch;
+	u32 __pad0[2];
+};
+
+#endif
+
 #endif /* _BF548_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/pll.h b/arch/blackfin/mach-bf548/include/mach/pll.h
index 7865a09..94cca67 100644
--- a/arch/blackfin/mach-bf548/include/mach/pll.h
+++ b/arch/blackfin/mach-bf548/include/mach/pll.h
@@ -1,69 +1 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1, iwr2;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	iwr2 = bfin_read32(SIC_IWR2);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-	bfin_write32(SIC_IWR2, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	bfin_write32(SIC_IWR2, iwr2);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1, iwr2;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	iwr2 = bfin_read32(SIC_IWR2);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-	bfin_write32(SIC_IWR2, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	bfin_write32(SIC_IWR2, iwr2);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index f99f174..52d6f73 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -49,6 +49,7 @@
 	jump .Lretry_corelock
 .Ldone_corelock:
 	p0 = r1;
+	/* flush core internal write buffer before invalidate dcache */
 	CSYNC(r2);
 	flushinv[p0];
 	SSYNC(r2);
@@ -685,6 +686,8 @@
 	r1 = -L1_CACHE_BYTES;
 	r1 = r0 & r1;
 	p0 = r1;
+	/* flush core internal write buffer before invalidate dcache */
+	CSYNC(r2);
 	flushinv[p0];
 	SSYNC(r2);
 	r0 = [p1];
@@ -907,6 +910,8 @@
 	r1 = -L1_CACHE_BYTES;
 	r1 = r0 & r1;
 	p0 = r1;
+	/* flush core internal write buffer before invalidate dcache */
+	CSYNC(r2);
 	flushinv[p0];
 	SSYNC(r2);
 	r0 = [p1];
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 0b1c20f..3926cd9 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -224,7 +224,7 @@
 	 },
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 087b6b0..3b67929 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -334,7 +334,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index ab7a487..f667e77 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -190,7 +190,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index d3017e5..bb056e6 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -72,7 +72,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/dma.c b/arch/blackfin/mach-bf561/dma.c
index c938c3c..8ffdd6b 100644
--- a/arch/blackfin/mach-bf561/dma.c
+++ b/arch/blackfin/mach-bf561/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA1_0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_2_NEXT_DESC_PTR,
@@ -36,14 +36,14 @@
 	(struct dma_register *) DMA2_9_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_10_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_11_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
 	(struct dma_register *) IMDMA_D0_NEXT_DESC_PTR,
 	(struct dma_register *) IMDMA_S0_NEXT_DESC_PTR,
 	(struct dma_register *) IMDMA_D1_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf561/hotplug.c b/arch/blackfin/mach-bf561/hotplug.c
index c95169b..4cd3b28 100644
--- a/arch/blackfin/mach-bf561/hotplug.c
+++ b/arch/blackfin/mach-bf561/hotplug.c
@@ -6,7 +6,9 @@
  */
 
 #include <asm/blackfin.h>
+#include <asm/irq.h>
 #include <asm/smp.h>
+
 #define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
 
 int hotplug_coreb;
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 4c108c9..6a3499b 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -181,7 +181,11 @@
 /* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
 #define ANOMALY_05000254 (__SILICON_REVISION__ > 3)
 /* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
-#define ANOMALY_05000257 (__SILICON_REVISION__ < 5)
+/* Tempoary work around for kgdb bug 6333 in SMP kernel. It looks coreb hangs in exception
+ * without handling anomaly 05000257 properly on bf561 v0.5. This work around may change
+ * after the behavior and the root cause are confirmed with hardware team.
+ */
+#define ANOMALY_05000257 (__SILICON_REVISION__ < 5 || (__SILICON_REVISION__ == 5 && CONFIG_SMP))
 /* Instruction Cache Is Corrupted When Bits 9 and 12 of the ICPLB Data Registers Differ */
 #define ANOMALY_05000258 (__SILICON_REVISION__ < 5)
 /* ICPLB_STATUS MMR Register May Be Corrupted */
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
new file mode 100644
index 0000000..08072c8
--- /dev/null
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	1
+
+#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
index e33e158..3a69474 100644
--- a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #ifdef CONFIG_BFIN_UART0_CTSRTS
 # define CONFIG_SERIAL_BFIN_CTSRTS
 # ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#else
-# if ANOMALY_05000363
-	unsigned int anomaly_threshold;
-# endif
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list       cts_timer;
-	int			cts_pin;
-	int			rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -120,3 +48,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index 6c7dc58..dc47053 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -10,11 +10,14 @@
 #define BF561_FAMILY
 
 #include "bf561.h"
-#include "defBF561.h"
 #include "anomaly.h"
 
-#if !defined(__ASSEMBLY__)
-#include "cdefBF561.h"
+#include <asm/def_LPBlackfin.h>
+#include "defBF561.h"
+
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# include "cdefBF561.h"
 #endif
 
 #define bfin_read_FIO_FLAG_D() bfin_read_FIO0_FLAG_D()
@@ -35,19 +38,4 @@
 #define bfin_read_SICB_ISR(x)		bfin_read32(__SIC_MUX(SICB_ISR0, x))
 #define bfin_write_SICB_ISR(x, val)	bfin_write32(__SIC_MUX(SICB_ISR0, x), val)
 
-#define BFIN_UART_NR_PORTS      1
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
 #endif				/* _MACH_BLACKFIN_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
index 2bab991..7533315 100644
--- a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF561_H
 #define _CDEF_BF561_H
 
-#include <asm/blackfin.h>
-
-/* include all Core registers and bit definitions */
-#include "defBF561.h"
-
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 /*********************************************************************************** */
 /* System MMR Register Map */
 /*********************************************************************************** */
@@ -523,14 +515,14 @@
 #define bfin_read_PPI1_FRAME()               bfin_read16(PPI1_FRAME)
 #define bfin_write_PPI1_FRAME(val)           bfin_write16(PPI1_FRAME,val)
 /*DMA traffic control registers */
-#define bfin_read_DMA1_TC_PER()              bfin_read16(DMA1_TC_PER)
-#define bfin_write_DMA1_TC_PER(val)          bfin_write16(DMA1_TC_PER,val)
-#define bfin_read_DMA1_TC_CNT()              bfin_read16(DMA1_TC_CNT)
-#define bfin_write_DMA1_TC_CNT(val)          bfin_write16(DMA1_TC_CNT,val)
-#define bfin_read_DMA2_TC_PER()              bfin_read16(DMA2_TC_PER)
-#define bfin_write_DMA2_TC_PER(val)          bfin_write16(DMA2_TC_PER,val)
-#define bfin_read_DMA2_TC_CNT()              bfin_read16(DMA2_TC_CNT)
-#define bfin_write_DMA2_TC_CNT(val)          bfin_write16(DMA2_TC_CNT,val)
+#define bfin_read_DMAC0_TC_PER()             bfin_read16(DMAC0_TC_PER)
+#define bfin_write_DMAC0_TC_PER(val)         bfin_write16(DMAC0_TC_PER,val)
+#define bfin_read_DMAC0_TC_CNT()             bfin_read16(DMAC0_TC_CNT)
+#define bfin_write_DMAC0_TC_CNT(val)         bfin_write16(DMAC0_TC_CNT,val)
+#define bfin_read_DMAC1_TC_PER()             bfin_read16(DMAC1_TC_PER)
+#define bfin_write_DMAC1_TC_PER(val)         bfin_write16(DMAC1_TC_PER,val)
+#define bfin_read_DMAC1_TC_CNT()             bfin_read16(DMAC1_TC_CNT)
+#define bfin_write_DMAC1_TC_CNT(val)         bfin_write16(DMAC1_TC_CNT,val)
 /* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
 #define bfin_read_DMA1_0_CONFIG()            bfin_read16(DMA1_0_CONFIG)
 #define bfin_write_DMA1_0_CONFIG(val)        bfin_write16(DMA1_0_CONFIG,val)
@@ -845,110 +837,110 @@
 #define bfin_read_DMA1_11_PERIPHERAL_MAP()   bfin_read16(DMA1_11_PERIPHERAL_MAP)
 #define bfin_write_DMA1_11_PERIPHERAL_MAP(val) bfin_write16(DMA1_11_PERIPHERAL_MAP,val)
 /* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
-#define bfin_read_MDMA1_D0_CONFIG()          bfin_read16(MDMA1_D0_CONFIG)
-#define bfin_write_MDMA1_D0_CONFIG(val)      bfin_write16(MDMA1_D0_CONFIG,val)
-#define bfin_read_MDMA1_D0_NEXT_DESC_PTR()   bfin_read32(MDMA1_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_D0_START_ADDR()      bfin_read32(MDMA1_D0_START_ADDR)
-#define bfin_write_MDMA1_D0_START_ADDR(val)  bfin_write32(MDMA1_D0_START_ADDR,val)
-#define bfin_read_MDMA1_D0_X_COUNT()         bfin_read16(MDMA1_D0_X_COUNT)
-#define bfin_write_MDMA1_D0_X_COUNT(val)     bfin_write16(MDMA1_D0_X_COUNT,val)
-#define bfin_read_MDMA1_D0_Y_COUNT()         bfin_read16(MDMA1_D0_Y_COUNT)
-#define bfin_write_MDMA1_D0_Y_COUNT(val)     bfin_write16(MDMA1_D0_Y_COUNT,val)
-#define bfin_read_MDMA1_D0_X_MODIFY()        bfin_read16(MDMA1_D0_X_MODIFY)
-#define bfin_write_MDMA1_D0_X_MODIFY(val)    bfin_write16(MDMA1_D0_X_MODIFY,val)
-#define bfin_read_MDMA1_D0_Y_MODIFY()        bfin_read16(MDMA1_D0_Y_MODIFY)
-#define bfin_write_MDMA1_D0_Y_MODIFY(val)    bfin_write16(MDMA1_D0_Y_MODIFY,val)
-#define bfin_read_MDMA1_D0_CURR_DESC_PTR()   bfin_read32(MDMA1_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_write32(MDMA1_D0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_D0_CURR_ADDR()       bfin_read32(MDMA1_D0_CURR_ADDR)
-#define bfin_write_MDMA1_D0_CURR_ADDR(val)   bfin_write32(MDMA1_D0_CURR_ADDR,val)
-#define bfin_read_MDMA1_D0_CURR_X_COUNT()    bfin_read16(MDMA1_D0_CURR_X_COUNT)
-#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_D0_CURR_Y_COUNT()    bfin_read16(MDMA1_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_D0_IRQ_STATUS()      bfin_read16(MDMA1_D0_IRQ_STATUS)
-#define bfin_write_MDMA1_D0_IRQ_STATUS(val)  bfin_write16(MDMA1_D0_IRQ_STATUS,val)
-#define bfin_read_MDMA1_D0_PERIPHERAL_MAP()  bfin_read16(MDMA1_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_S0_CONFIG()          bfin_read16(MDMA1_S0_CONFIG)
-#define bfin_write_MDMA1_S0_CONFIG(val)      bfin_write16(MDMA1_S0_CONFIG,val)
-#define bfin_read_MDMA1_S0_NEXT_DESC_PTR()   bfin_read32(MDMA1_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_S0_START_ADDR()      bfin_read32(MDMA1_S0_START_ADDR)
-#define bfin_write_MDMA1_S0_START_ADDR(val)  bfin_write32(MDMA1_S0_START_ADDR,val)
-#define bfin_read_MDMA1_S0_X_COUNT()         bfin_read16(MDMA1_S0_X_COUNT)
-#define bfin_write_MDMA1_S0_X_COUNT(val)     bfin_write16(MDMA1_S0_X_COUNT,val)
-#define bfin_read_MDMA1_S0_Y_COUNT()         bfin_read16(MDMA1_S0_Y_COUNT)
-#define bfin_write_MDMA1_S0_Y_COUNT(val)     bfin_write16(MDMA1_S0_Y_COUNT,val)
-#define bfin_read_MDMA1_S0_X_MODIFY()        bfin_read16(MDMA1_S0_X_MODIFY)
-#define bfin_write_MDMA1_S0_X_MODIFY(val)    bfin_write16(MDMA1_S0_X_MODIFY,val)
-#define bfin_read_MDMA1_S0_Y_MODIFY()        bfin_read16(MDMA1_S0_Y_MODIFY)
-#define bfin_write_MDMA1_S0_Y_MODIFY(val)    bfin_write16(MDMA1_S0_Y_MODIFY,val)
-#define bfin_read_MDMA1_S0_CURR_DESC_PTR()   bfin_read32(MDMA1_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_write32(MDMA1_S0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_S0_CURR_ADDR()       bfin_read32(MDMA1_S0_CURR_ADDR)
-#define bfin_write_MDMA1_S0_CURR_ADDR(val)   bfin_write32(MDMA1_S0_CURR_ADDR,val)
-#define bfin_read_MDMA1_S0_CURR_X_COUNT()    bfin_read16(MDMA1_S0_CURR_X_COUNT)
-#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_S0_CURR_Y_COUNT()    bfin_read16(MDMA1_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_S0_IRQ_STATUS()      bfin_read16(MDMA1_S0_IRQ_STATUS)
-#define bfin_write_MDMA1_S0_IRQ_STATUS(val)  bfin_write16(MDMA1_S0_IRQ_STATUS,val)
-#define bfin_read_MDMA1_S0_PERIPHERAL_MAP()  bfin_read16(MDMA1_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_D1_CONFIG()          bfin_read16(MDMA1_D1_CONFIG)
-#define bfin_write_MDMA1_D1_CONFIG(val)      bfin_write16(MDMA1_D1_CONFIG,val)
-#define bfin_read_MDMA1_D1_NEXT_DESC_PTR()   bfin_read32(MDMA1_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_D1_START_ADDR()      bfin_read32(MDMA1_D1_START_ADDR)
-#define bfin_write_MDMA1_D1_START_ADDR(val)  bfin_write32(MDMA1_D1_START_ADDR,val)
-#define bfin_read_MDMA1_D1_X_COUNT()         bfin_read16(MDMA1_D1_X_COUNT)
-#define bfin_write_MDMA1_D1_X_COUNT(val)     bfin_write16(MDMA1_D1_X_COUNT,val)
-#define bfin_read_MDMA1_D1_Y_COUNT()         bfin_read16(MDMA1_D1_Y_COUNT)
-#define bfin_write_MDMA1_D1_Y_COUNT(val)     bfin_write16(MDMA1_D1_Y_COUNT,val)
-#define bfin_read_MDMA1_D1_X_MODIFY()        bfin_read16(MDMA1_D1_X_MODIFY)
-#define bfin_write_MDMA1_D1_X_MODIFY(val)    bfin_write16(MDMA1_D1_X_MODIFY,val)
-#define bfin_read_MDMA1_D1_Y_MODIFY()        bfin_read16(MDMA1_D1_Y_MODIFY)
-#define bfin_write_MDMA1_D1_Y_MODIFY(val)    bfin_write16(MDMA1_D1_Y_MODIFY,val)
-#define bfin_read_MDMA1_D1_CURR_DESC_PTR()   bfin_read32(MDMA1_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_write32(MDMA1_D1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_D1_CURR_ADDR()       bfin_read32(MDMA1_D1_CURR_ADDR)
-#define bfin_write_MDMA1_D1_CURR_ADDR(val)   bfin_write32(MDMA1_D1_CURR_ADDR,val)
-#define bfin_read_MDMA1_D1_CURR_X_COUNT()    bfin_read16(MDMA1_D1_CURR_X_COUNT)
-#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_D1_CURR_Y_COUNT()    bfin_read16(MDMA1_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_D1_IRQ_STATUS()      bfin_read16(MDMA1_D1_IRQ_STATUS)
-#define bfin_write_MDMA1_D1_IRQ_STATUS(val)  bfin_write16(MDMA1_D1_IRQ_STATUS,val)
-#define bfin_read_MDMA1_D1_PERIPHERAL_MAP()  bfin_read16(MDMA1_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_S1_CONFIG()          bfin_read16(MDMA1_S1_CONFIG)
-#define bfin_write_MDMA1_S1_CONFIG(val)      bfin_write16(MDMA1_S1_CONFIG,val)
-#define bfin_read_MDMA1_S1_NEXT_DESC_PTR()   bfin_read32(MDMA1_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_S1_START_ADDR()      bfin_read32(MDMA1_S1_START_ADDR)
-#define bfin_write_MDMA1_S1_START_ADDR(val)  bfin_write32(MDMA1_S1_START_ADDR,val)
-#define bfin_read_MDMA1_S1_X_COUNT()         bfin_read16(MDMA1_S1_X_COUNT)
-#define bfin_write_MDMA1_S1_X_COUNT(val)     bfin_write16(MDMA1_S1_X_COUNT,val)
-#define bfin_read_MDMA1_S1_Y_COUNT()         bfin_read16(MDMA1_S1_Y_COUNT)
-#define bfin_write_MDMA1_S1_Y_COUNT(val)     bfin_write16(MDMA1_S1_Y_COUNT,val)
-#define bfin_read_MDMA1_S1_X_MODIFY()        bfin_read16(MDMA1_S1_X_MODIFY)
-#define bfin_write_MDMA1_S1_X_MODIFY(val)    bfin_write16(MDMA1_S1_X_MODIFY,val)
-#define bfin_read_MDMA1_S1_Y_MODIFY()        bfin_read16(MDMA1_S1_Y_MODIFY)
-#define bfin_write_MDMA1_S1_Y_MODIFY(val)    bfin_write16(MDMA1_S1_Y_MODIFY,val)
-#define bfin_read_MDMA1_S1_CURR_DESC_PTR()   bfin_read32(MDMA1_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_write32(MDMA1_S1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_S1_CURR_ADDR()       bfin_read32(MDMA1_S1_CURR_ADDR)
-#define bfin_write_MDMA1_S1_CURR_ADDR(val)   bfin_write32(MDMA1_S1_CURR_ADDR,val)
-#define bfin_read_MDMA1_S1_CURR_X_COUNT()    bfin_read16(MDMA1_S1_CURR_X_COUNT)
-#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_S1_CURR_Y_COUNT()    bfin_read16(MDMA1_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_S1_IRQ_STATUS()      bfin_read16(MDMA1_S1_IRQ_STATUS)
-#define bfin_write_MDMA1_S1_IRQ_STATUS(val)  bfin_write16(MDMA1_S1_IRQ_STATUS,val)
-#define bfin_read_MDMA1_S1_PERIPHERAL_MAP()  bfin_read16(MDMA1_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D2_CONFIG()          bfin_read16(MDMA_D2_CONFIG)
+#define bfin_write_MDMA_D2_CONFIG(val)      bfin_write16(MDMA_D2_CONFIG,val)
+#define bfin_read_MDMA_D2_NEXT_DESC_PTR()   bfin_read32(MDMA_D2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_write32(MDMA_D2_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D2_START_ADDR()      bfin_read32(MDMA_D2_START_ADDR)
+#define bfin_write_MDMA_D2_START_ADDR(val)  bfin_write32(MDMA_D2_START_ADDR,val)
+#define bfin_read_MDMA_D2_X_COUNT()         bfin_read16(MDMA_D2_X_COUNT)
+#define bfin_write_MDMA_D2_X_COUNT(val)     bfin_write16(MDMA_D2_X_COUNT,val)
+#define bfin_read_MDMA_D2_Y_COUNT()         bfin_read16(MDMA_D2_Y_COUNT)
+#define bfin_write_MDMA_D2_Y_COUNT(val)     bfin_write16(MDMA_D2_Y_COUNT,val)
+#define bfin_read_MDMA_D2_X_MODIFY()        bfin_read16(MDMA_D2_X_MODIFY)
+#define bfin_write_MDMA_D2_X_MODIFY(val)    bfin_write16(MDMA_D2_X_MODIFY,val)
+#define bfin_read_MDMA_D2_Y_MODIFY()        bfin_read16(MDMA_D2_Y_MODIFY)
+#define bfin_write_MDMA_D2_Y_MODIFY(val)    bfin_write16(MDMA_D2_Y_MODIFY,val)
+#define bfin_read_MDMA_D2_CURR_DESC_PTR()   bfin_read32(MDMA_D2_CURR_DESC_PTR)
+#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_write32(MDMA_D2_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D2_CURR_ADDR()       bfin_read32(MDMA_D2_CURR_ADDR)
+#define bfin_write_MDMA_D2_CURR_ADDR(val)   bfin_write32(MDMA_D2_CURR_ADDR,val)
+#define bfin_read_MDMA_D2_CURR_X_COUNT()    bfin_read16(MDMA_D2_CURR_X_COUNT)
+#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D2_CURR_Y_COUNT()    bfin_read16(MDMA_D2_CURR_Y_COUNT)
+#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D2_IRQ_STATUS()      bfin_read16(MDMA_D2_IRQ_STATUS)
+#define bfin_write_MDMA_D2_IRQ_STATUS(val)  bfin_write16(MDMA_D2_IRQ_STATUS,val)
+#define bfin_read_MDMA_D2_PERIPHERAL_MAP()  bfin_read16(MDMA_D2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S2_CONFIG()          bfin_read16(MDMA_S2_CONFIG)
+#define bfin_write_MDMA_S2_CONFIG(val)      bfin_write16(MDMA_S2_CONFIG,val)
+#define bfin_read_MDMA_S2_NEXT_DESC_PTR()   bfin_read32(MDMA_S2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_write32(MDMA_S2_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S2_START_ADDR()      bfin_read32(MDMA_S2_START_ADDR)
+#define bfin_write_MDMA_S2_START_ADDR(val)  bfin_write32(MDMA_S2_START_ADDR,val)
+#define bfin_read_MDMA_S2_X_COUNT()         bfin_read16(MDMA_S2_X_COUNT)
+#define bfin_write_MDMA_S2_X_COUNT(val)     bfin_write16(MDMA_S2_X_COUNT,val)
+#define bfin_read_MDMA_S2_Y_COUNT()         bfin_read16(MDMA_S2_Y_COUNT)
+#define bfin_write_MDMA_S2_Y_COUNT(val)     bfin_write16(MDMA_S2_Y_COUNT,val)
+#define bfin_read_MDMA_S2_X_MODIFY()        bfin_read16(MDMA_S2_X_MODIFY)
+#define bfin_write_MDMA_S2_X_MODIFY(val)    bfin_write16(MDMA_S2_X_MODIFY,val)
+#define bfin_read_MDMA_S2_Y_MODIFY()        bfin_read16(MDMA_S2_Y_MODIFY)
+#define bfin_write_MDMA_S2_Y_MODIFY(val)    bfin_write16(MDMA_S2_Y_MODIFY,val)
+#define bfin_read_MDMA_S2_CURR_DESC_PTR()   bfin_read32(MDMA_S2_CURR_DESC_PTR)
+#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_write32(MDMA_S2_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S2_CURR_ADDR()       bfin_read32(MDMA_S2_CURR_ADDR)
+#define bfin_write_MDMA_S2_CURR_ADDR(val)   bfin_write32(MDMA_S2_CURR_ADDR,val)
+#define bfin_read_MDMA_S2_CURR_X_COUNT()    bfin_read16(MDMA_S2_CURR_X_COUNT)
+#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S2_CURR_Y_COUNT()    bfin_read16(MDMA_S2_CURR_Y_COUNT)
+#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S2_IRQ_STATUS()      bfin_read16(MDMA_S2_IRQ_STATUS)
+#define bfin_write_MDMA_S2_IRQ_STATUS(val)  bfin_write16(MDMA_S2_IRQ_STATUS,val)
+#define bfin_read_MDMA_S2_PERIPHERAL_MAP()  bfin_read16(MDMA_S2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D3_CONFIG()          bfin_read16(MDMA_D3_CONFIG)
+#define bfin_write_MDMA_D3_CONFIG(val)      bfin_write16(MDMA_D3_CONFIG,val)
+#define bfin_read_MDMA_D3_NEXT_DESC_PTR()   bfin_read32(MDMA_D3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_write32(MDMA_D3_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D3_START_ADDR()      bfin_read32(MDMA_D3_START_ADDR)
+#define bfin_write_MDMA_D3_START_ADDR(val)  bfin_write32(MDMA_D3_START_ADDR,val)
+#define bfin_read_MDMA_D3_X_COUNT()         bfin_read16(MDMA_D3_X_COUNT)
+#define bfin_write_MDMA_D3_X_COUNT(val)     bfin_write16(MDMA_D3_X_COUNT,val)
+#define bfin_read_MDMA_D3_Y_COUNT()         bfin_read16(MDMA_D3_Y_COUNT)
+#define bfin_write_MDMA_D3_Y_COUNT(val)     bfin_write16(MDMA_D3_Y_COUNT,val)
+#define bfin_read_MDMA_D3_X_MODIFY()        bfin_read16(MDMA_D3_X_MODIFY)
+#define bfin_write_MDMA_D3_X_MODIFY(val)    bfin_write16(MDMA_D3_X_MODIFY,val)
+#define bfin_read_MDMA_D3_Y_MODIFY()        bfin_read16(MDMA_D3_Y_MODIFY)
+#define bfin_write_MDMA_D3_Y_MODIFY(val)    bfin_write16(MDMA_D3_Y_MODIFY,val)
+#define bfin_read_MDMA_D3_CURR_DESC_PTR()   bfin_read32(MDMA_D3_CURR_DESC_PTR)
+#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_write32(MDMA_D3_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D3_CURR_ADDR()       bfin_read32(MDMA_D3_CURR_ADDR)
+#define bfin_write_MDMA_D3_CURR_ADDR(val)   bfin_write32(MDMA_D3_CURR_ADDR,val)
+#define bfin_read_MDMA_D3_CURR_X_COUNT()    bfin_read16(MDMA_D3_CURR_X_COUNT)
+#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D3_CURR_Y_COUNT()    bfin_read16(MDMA_D3_CURR_Y_COUNT)
+#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D3_IRQ_STATUS()      bfin_read16(MDMA_D3_IRQ_STATUS)
+#define bfin_write_MDMA_D3_IRQ_STATUS(val)  bfin_write16(MDMA_D3_IRQ_STATUS,val)
+#define bfin_read_MDMA_D3_PERIPHERAL_MAP()  bfin_read16(MDMA_D3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S3_CONFIG()          bfin_read16(MDMA_S3_CONFIG)
+#define bfin_write_MDMA_S3_CONFIG(val)      bfin_write16(MDMA_S3_CONFIG,val)
+#define bfin_read_MDMA_S3_NEXT_DESC_PTR()   bfin_read32(MDMA_S3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_write32(MDMA_S3_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S3_START_ADDR()      bfin_read32(MDMA_S3_START_ADDR)
+#define bfin_write_MDMA_S3_START_ADDR(val)  bfin_write32(MDMA_S3_START_ADDR,val)
+#define bfin_read_MDMA_S3_X_COUNT()         bfin_read16(MDMA_S3_X_COUNT)
+#define bfin_write_MDMA_S3_X_COUNT(val)     bfin_write16(MDMA_S3_X_COUNT,val)
+#define bfin_read_MDMA_S3_Y_COUNT()         bfin_read16(MDMA_S3_Y_COUNT)
+#define bfin_write_MDMA_S3_Y_COUNT(val)     bfin_write16(MDMA_S3_Y_COUNT,val)
+#define bfin_read_MDMA_S3_X_MODIFY()        bfin_read16(MDMA_S3_X_MODIFY)
+#define bfin_write_MDMA_S3_X_MODIFY(val)    bfin_write16(MDMA_S3_X_MODIFY,val)
+#define bfin_read_MDMA_S3_Y_MODIFY()        bfin_read16(MDMA_S3_Y_MODIFY)
+#define bfin_write_MDMA_S3_Y_MODIFY(val)    bfin_write16(MDMA_S3_Y_MODIFY,val)
+#define bfin_read_MDMA_S3_CURR_DESC_PTR()   bfin_read32(MDMA_S3_CURR_DESC_PTR)
+#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_write32(MDMA_S3_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S3_CURR_ADDR()       bfin_read32(MDMA_S3_CURR_ADDR)
+#define bfin_write_MDMA_S3_CURR_ADDR(val)   bfin_write32(MDMA_S3_CURR_ADDR,val)
+#define bfin_read_MDMA_S3_CURR_X_COUNT()    bfin_read16(MDMA_S3_CURR_X_COUNT)
+#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S3_CURR_Y_COUNT()    bfin_read16(MDMA_S3_CURR_Y_COUNT)
+#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S3_IRQ_STATUS()      bfin_read16(MDMA_S3_IRQ_STATUS)
+#define bfin_write_MDMA_S3_IRQ_STATUS(val)  bfin_write16(MDMA_S3_IRQ_STATUS,val)
+#define bfin_read_MDMA_S3_PERIPHERAL_MAP()  bfin_read16(MDMA_S3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP,val)
 /* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
 #define bfin_read_DMA2_0_CONFIG()            bfin_read16(DMA2_0_CONFIG)
 #define bfin_write_DMA2_0_CONFIG(val)        bfin_write16(DMA2_0_CONFIG,val)
@@ -1263,110 +1255,110 @@
 #define bfin_read_DMA2_11_PERIPHERAL_MAP()   bfin_read16(DMA2_11_PERIPHERAL_MAP)
 #define bfin_write_DMA2_11_PERIPHERAL_MAP(val) bfin_write16(DMA2_11_PERIPHERAL_MAP,val)
 /* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
-#define bfin_read_MDMA2_D0_CONFIG()          bfin_read16(MDMA2_D0_CONFIG)
-#define bfin_write_MDMA2_D0_CONFIG(val)      bfin_write16(MDMA2_D0_CONFIG,val)
-#define bfin_read_MDMA2_D0_NEXT_DESC_PTR()   bfin_read32(MDMA2_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_D0_START_ADDR()      bfin_read32(MDMA2_D0_START_ADDR)
-#define bfin_write_MDMA2_D0_START_ADDR(val)  bfin_write32(MDMA2_D0_START_ADDR,val)
-#define bfin_read_MDMA2_D0_X_COUNT()         bfin_read16(MDMA2_D0_X_COUNT)
-#define bfin_write_MDMA2_D0_X_COUNT(val)     bfin_write16(MDMA2_D0_X_COUNT,val)
-#define bfin_read_MDMA2_D0_Y_COUNT()         bfin_read16(MDMA2_D0_Y_COUNT)
-#define bfin_write_MDMA2_D0_Y_COUNT(val)     bfin_write16(MDMA2_D0_Y_COUNT,val)
-#define bfin_read_MDMA2_D0_X_MODIFY()        bfin_read16(MDMA2_D0_X_MODIFY)
-#define bfin_write_MDMA2_D0_X_MODIFY(val)    bfin_write16(MDMA2_D0_X_MODIFY,val)
-#define bfin_read_MDMA2_D0_Y_MODIFY()        bfin_read16(MDMA2_D0_Y_MODIFY)
-#define bfin_write_MDMA2_D0_Y_MODIFY(val)    bfin_write16(MDMA2_D0_Y_MODIFY,val)
-#define bfin_read_MDMA2_D0_CURR_DESC_PTR()   bfin_read32(MDMA2_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA2_D0_CURR_DESC_PTR(val) bfin_write32(MDMA2_D0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_D0_CURR_ADDR()       bfin_read32(MDMA2_D0_CURR_ADDR)
-#define bfin_write_MDMA2_D0_CURR_ADDR(val)   bfin_write32(MDMA2_D0_CURR_ADDR,val)
-#define bfin_read_MDMA2_D0_CURR_X_COUNT()    bfin_read16(MDMA2_D0_CURR_X_COUNT)
-#define bfin_write_MDMA2_D0_CURR_X_COUNT(val) bfin_write16(MDMA2_D0_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_D0_CURR_Y_COUNT()    bfin_read16(MDMA2_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA2_D0_CURR_Y_COUNT(val) bfin_write16(MDMA2_D0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_D0_IRQ_STATUS()      bfin_read16(MDMA2_D0_IRQ_STATUS)
-#define bfin_write_MDMA2_D0_IRQ_STATUS(val)  bfin_write16(MDMA2_D0_IRQ_STATUS,val)
-#define bfin_read_MDMA2_D0_PERIPHERAL_MAP()  bfin_read16(MDMA2_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_S0_CONFIG()          bfin_read16(MDMA2_S0_CONFIG)
-#define bfin_write_MDMA2_S0_CONFIG(val)      bfin_write16(MDMA2_S0_CONFIG,val)
-#define bfin_read_MDMA2_S0_NEXT_DESC_PTR()   bfin_read32(MDMA2_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_S0_START_ADDR()      bfin_read32(MDMA2_S0_START_ADDR)
-#define bfin_write_MDMA2_S0_START_ADDR(val)  bfin_write32(MDMA2_S0_START_ADDR,val)
-#define bfin_read_MDMA2_S0_X_COUNT()         bfin_read16(MDMA2_S0_X_COUNT)
-#define bfin_write_MDMA2_S0_X_COUNT(val)     bfin_write16(MDMA2_S0_X_COUNT,val)
-#define bfin_read_MDMA2_S0_Y_COUNT()         bfin_read16(MDMA2_S0_Y_COUNT)
-#define bfin_write_MDMA2_S0_Y_COUNT(val)     bfin_write16(MDMA2_S0_Y_COUNT,val)
-#define bfin_read_MDMA2_S0_X_MODIFY()        bfin_read16(MDMA2_S0_X_MODIFY)
-#define bfin_write_MDMA2_S0_X_MODIFY(val)    bfin_write16(MDMA2_S0_X_MODIFY,val)
-#define bfin_read_MDMA2_S0_Y_MODIFY()        bfin_read16(MDMA2_S0_Y_MODIFY)
-#define bfin_write_MDMA2_S0_Y_MODIFY(val)    bfin_write16(MDMA2_S0_Y_MODIFY,val)
-#define bfin_read_MDMA2_S0_CURR_DESC_PTR()   bfin_read32(MDMA2_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA2_S0_CURR_DESC_PTR(val) bfin_write32(MDMA2_S0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_S0_CURR_ADDR()       bfin_read32(MDMA2_S0_CURR_ADDR)
-#define bfin_write_MDMA2_S0_CURR_ADDR(val)   bfin_write32(MDMA2_S0_CURR_ADDR,val)
-#define bfin_read_MDMA2_S0_CURR_X_COUNT()    bfin_read16(MDMA2_S0_CURR_X_COUNT)
-#define bfin_write_MDMA2_S0_CURR_X_COUNT(val) bfin_write16(MDMA2_S0_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_S0_CURR_Y_COUNT()    bfin_read16(MDMA2_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA2_S0_CURR_Y_COUNT(val) bfin_write16(MDMA2_S0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_S0_IRQ_STATUS()      bfin_read16(MDMA2_S0_IRQ_STATUS)
-#define bfin_write_MDMA2_S0_IRQ_STATUS(val)  bfin_write16(MDMA2_S0_IRQ_STATUS,val)
-#define bfin_read_MDMA2_S0_PERIPHERAL_MAP()  bfin_read16(MDMA2_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_D1_CONFIG()          bfin_read16(MDMA2_D1_CONFIG)
-#define bfin_write_MDMA2_D1_CONFIG(val)      bfin_write16(MDMA2_D1_CONFIG,val)
-#define bfin_read_MDMA2_D1_NEXT_DESC_PTR()   bfin_read32(MDMA2_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_D1_START_ADDR()      bfin_read32(MDMA2_D1_START_ADDR)
-#define bfin_write_MDMA2_D1_START_ADDR(val)  bfin_write32(MDMA2_D1_START_ADDR,val)
-#define bfin_read_MDMA2_D1_X_COUNT()         bfin_read16(MDMA2_D1_X_COUNT)
-#define bfin_write_MDMA2_D1_X_COUNT(val)     bfin_write16(MDMA2_D1_X_COUNT,val)
-#define bfin_read_MDMA2_D1_Y_COUNT()         bfin_read16(MDMA2_D1_Y_COUNT)
-#define bfin_write_MDMA2_D1_Y_COUNT(val)     bfin_write16(MDMA2_D1_Y_COUNT,val)
-#define bfin_read_MDMA2_D1_X_MODIFY()        bfin_read16(MDMA2_D1_X_MODIFY)
-#define bfin_write_MDMA2_D1_X_MODIFY(val)    bfin_write16(MDMA2_D1_X_MODIFY,val)
-#define bfin_read_MDMA2_D1_Y_MODIFY()        bfin_read16(MDMA2_D1_Y_MODIFY)
-#define bfin_write_MDMA2_D1_Y_MODIFY(val)    bfin_write16(MDMA2_D1_Y_MODIFY,val)
-#define bfin_read_MDMA2_D1_CURR_DESC_PTR()   bfin_read32(MDMA2_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA2_D1_CURR_DESC_PTR(val) bfin_write32(MDMA2_D1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_D1_CURR_ADDR()       bfin_read32(MDMA2_D1_CURR_ADDR)
-#define bfin_write_MDMA2_D1_CURR_ADDR(val)   bfin_write32(MDMA2_D1_CURR_ADDR,val)
-#define bfin_read_MDMA2_D1_CURR_X_COUNT()    bfin_read16(MDMA2_D1_CURR_X_COUNT)
-#define bfin_write_MDMA2_D1_CURR_X_COUNT(val) bfin_write16(MDMA2_D1_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_D1_CURR_Y_COUNT()    bfin_read16(MDMA2_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA2_D1_CURR_Y_COUNT(val) bfin_write16(MDMA2_D1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_D1_IRQ_STATUS()      bfin_read16(MDMA2_D1_IRQ_STATUS)
-#define bfin_write_MDMA2_D1_IRQ_STATUS(val)  bfin_write16(MDMA2_D1_IRQ_STATUS,val)
-#define bfin_read_MDMA2_D1_PERIPHERAL_MAP()  bfin_read16(MDMA2_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D1_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_S1_CONFIG()          bfin_read16(MDMA2_S1_CONFIG)
-#define bfin_write_MDMA2_S1_CONFIG(val)      bfin_write16(MDMA2_S1_CONFIG,val)
-#define bfin_read_MDMA2_S1_NEXT_DESC_PTR()   bfin_read32(MDMA2_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_S1_START_ADDR()      bfin_read32(MDMA2_S1_START_ADDR)
-#define bfin_write_MDMA2_S1_START_ADDR(val)  bfin_write32(MDMA2_S1_START_ADDR,val)
-#define bfin_read_MDMA2_S1_X_COUNT()         bfin_read16(MDMA2_S1_X_COUNT)
-#define bfin_write_MDMA2_S1_X_COUNT(val)     bfin_write16(MDMA2_S1_X_COUNT,val)
-#define bfin_read_MDMA2_S1_Y_COUNT()         bfin_read16(MDMA2_S1_Y_COUNT)
-#define bfin_write_MDMA2_S1_Y_COUNT(val)     bfin_write16(MDMA2_S1_Y_COUNT,val)
-#define bfin_read_MDMA2_S1_X_MODIFY()        bfin_read16(MDMA2_S1_X_MODIFY)
-#define bfin_write_MDMA2_S1_X_MODIFY(val)    bfin_write16(MDMA2_S1_X_MODIFY,val)
-#define bfin_read_MDMA2_S1_Y_MODIFY()        bfin_read16(MDMA2_S1_Y_MODIFY)
-#define bfin_write_MDMA2_S1_Y_MODIFY(val)    bfin_write16(MDMA2_S1_Y_MODIFY,val)
-#define bfin_read_MDMA2_S1_CURR_DESC_PTR()   bfin_read32(MDMA2_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA2_S1_CURR_DESC_PTR(val) bfin_write32(MDMA2_S1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_S1_CURR_ADDR()       bfin_read32(MDMA2_S1_CURR_ADDR)
-#define bfin_write_MDMA2_S1_CURR_ADDR(val)   bfin_write32(MDMA2_S1_CURR_ADDR,val)
-#define bfin_read_MDMA2_S1_CURR_X_COUNT()    bfin_read16(MDMA2_S1_CURR_X_COUNT)
-#define bfin_write_MDMA2_S1_CURR_X_COUNT(val) bfin_write16(MDMA2_S1_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_S1_CURR_Y_COUNT()    bfin_read16(MDMA2_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA2_S1_CURR_Y_COUNT(val) bfin_write16(MDMA2_S1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_S1_IRQ_STATUS()      bfin_read16(MDMA2_S1_IRQ_STATUS)
-#define bfin_write_MDMA2_S1_IRQ_STATUS(val)  bfin_write16(MDMA2_S1_IRQ_STATUS,val)
-#define bfin_read_MDMA2_S1_PERIPHERAL_MAP()  bfin_read16(MDMA2_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D0_CONFIG()          bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val)      bfin_write16(MDMA_D0_CONFIG,val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR()   bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D0_START_ADDR()      bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val)  bfin_write32(MDMA_D0_START_ADDR,val)
+#define bfin_read_MDMA_D0_X_COUNT()         bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val)     bfin_write16(MDMA_D0_X_COUNT,val)
+#define bfin_read_MDMA_D0_Y_COUNT()         bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val)     bfin_write16(MDMA_D0_Y_COUNT,val)
+#define bfin_read_MDMA_D0_X_MODIFY()        bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val)    bfin_write16(MDMA_D0_X_MODIFY,val)
+#define bfin_read_MDMA_D0_Y_MODIFY()        bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val)    bfin_write16(MDMA_D0_Y_MODIFY,val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR()   bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D0_CURR_ADDR()       bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val)   bfin_write32(MDMA_D0_CURR_ADDR,val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT()    bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT()    bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D0_IRQ_STATUS()      bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val)  bfin_write16(MDMA_D0_IRQ_STATUS,val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP()  bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S0_CONFIG()          bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val)      bfin_write16(MDMA_S0_CONFIG,val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR()   bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S0_START_ADDR()      bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val)  bfin_write32(MDMA_S0_START_ADDR,val)
+#define bfin_read_MDMA_S0_X_COUNT()         bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val)     bfin_write16(MDMA_S0_X_COUNT,val)
+#define bfin_read_MDMA_S0_Y_COUNT()         bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val)     bfin_write16(MDMA_S0_Y_COUNT,val)
+#define bfin_read_MDMA_S0_X_MODIFY()        bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val)    bfin_write16(MDMA_S0_X_MODIFY,val)
+#define bfin_read_MDMA_S0_Y_MODIFY()        bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val)    bfin_write16(MDMA_S0_Y_MODIFY,val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR()   bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S0_CURR_ADDR()       bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val)   bfin_write32(MDMA_S0_CURR_ADDR,val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT()    bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT()    bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S0_IRQ_STATUS()      bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val)  bfin_write16(MDMA_S0_IRQ_STATUS,val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP()  bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D1_CONFIG()          bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val)      bfin_write16(MDMA_D1_CONFIG,val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR()   bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D1_START_ADDR()      bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val)  bfin_write32(MDMA_D1_START_ADDR,val)
+#define bfin_read_MDMA_D1_X_COUNT()         bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val)     bfin_write16(MDMA_D1_X_COUNT,val)
+#define bfin_read_MDMA_D1_Y_COUNT()         bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val)     bfin_write16(MDMA_D1_Y_COUNT,val)
+#define bfin_read_MDMA_D1_X_MODIFY()        bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val)    bfin_write16(MDMA_D1_X_MODIFY,val)
+#define bfin_read_MDMA_D1_Y_MODIFY()        bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val)    bfin_write16(MDMA_D1_Y_MODIFY,val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR()   bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D1_CURR_ADDR()       bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val)   bfin_write32(MDMA_D1_CURR_ADDR,val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT()    bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT()    bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D1_IRQ_STATUS()      bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val)  bfin_write16(MDMA_D1_IRQ_STATUS,val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP()  bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S1_CONFIG()          bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val)      bfin_write16(MDMA_S1_CONFIG,val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR()   bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S1_START_ADDR()      bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val)  bfin_write32(MDMA_S1_START_ADDR,val)
+#define bfin_read_MDMA_S1_X_COUNT()         bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val)     bfin_write16(MDMA_S1_X_COUNT,val)
+#define bfin_read_MDMA_S1_Y_COUNT()         bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val)     bfin_write16(MDMA_S1_Y_COUNT,val)
+#define bfin_read_MDMA_S1_X_MODIFY()        bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val)    bfin_write16(MDMA_S1_X_MODIFY,val)
+#define bfin_read_MDMA_S1_Y_MODIFY()        bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val)    bfin_write16(MDMA_S1_Y_MODIFY,val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR()   bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S1_CURR_ADDR()       bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val)   bfin_write32(MDMA_S1_CURR_ADDR,val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT()    bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT()    bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S1_IRQ_STATUS()      bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val)  bfin_write16(MDMA_S1_IRQ_STATUS,val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP()  bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP,val)
 /* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
 #define bfin_read_IMDMA_D0_CONFIG()          bfin_read16(IMDMA_D0_CONFIG)
 #define bfin_write_IMDMA_D0_CONFIG(val)      bfin_write16(IMDMA_D0_CONFIG,val)
@@ -1465,65 +1457,4 @@
 #define bfin_read_IMDMA_S1_IRQ_STATUS()      bfin_read16(IMDMA_S1_IRQ_STATUS)
 #define bfin_write_IMDMA_S1_IRQ_STATUS(val)  bfin_write16(IMDMA_S1_IRQ_STATUS,val)
 
-#define bfin_read_MDMA_S0_CONFIG()  bfin_read_MDMA1_S0_CONFIG()
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA1_S0_CONFIG(val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()  bfin_read_MDMA1_S0_IRQ_STATUS()
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA1_S0_IRQ_STATUS(val)
-#define bfin_read_MDMA_S0_X_MODIFY()  bfin_read_MDMA1_S0_X_MODIFY()
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA1_S0_X_MODIFY(val)
-#define bfin_read_MDMA_S0_Y_MODIFY()  bfin_read_MDMA1_S0_Y_MODIFY()
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA1_S0_Y_MODIFY(val)
-#define bfin_read_MDMA_S0_X_COUNT()  bfin_read_MDMA1_S0_X_COUNT()
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA1_S0_X_COUNT(val)
-#define bfin_read_MDMA_S0_Y_COUNT()  bfin_read_MDMA1_S0_Y_COUNT()
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA1_S0_Y_COUNT(val)
-#define bfin_read_MDMA_S0_START_ADDR()  bfin_read_MDMA1_S0_START_ADDR()
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA1_S0_START_ADDR(val)
-#define bfin_read_MDMA_D0_CONFIG()  bfin_read_MDMA1_D0_CONFIG()
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA1_D0_CONFIG(val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()  bfin_read_MDMA1_D0_IRQ_STATUS()
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA1_D0_IRQ_STATUS(val)
-#define bfin_read_MDMA_D0_X_MODIFY()  bfin_read_MDMA1_D0_X_MODIFY()
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA1_D0_X_MODIFY(val)
-#define bfin_read_MDMA_D0_Y_MODIFY()  bfin_read_MDMA1_D0_Y_MODIFY()
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA1_D0_Y_MODIFY(val)
-#define bfin_read_MDMA_D0_X_COUNT()  bfin_read_MDMA1_D0_X_COUNT()
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA1_D0_X_COUNT(val)
-#define bfin_read_MDMA_D0_Y_COUNT()  bfin_read_MDMA1_D0_Y_COUNT()
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA1_D0_Y_COUNT(val)
-#define bfin_read_MDMA_D0_START_ADDR()  bfin_read_MDMA1_D0_START_ADDR()
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val)
-
-#define bfin_read_MDMA_S1_CONFIG()  bfin_read_MDMA1_S1_CONFIG()
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA1_S1_CONFIG(val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()  bfin_read_MDMA1_S1_IRQ_STATUS()
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA1_S1_IRQ_STATUS(val)
-#define bfin_read_MDMA_S1_X_MODIFY()  bfin_read_MDMA1_S1_X_MODIFY()
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA1_S1_X_MODIFY(val)
-#define bfin_read_MDMA_S1_Y_MODIFY()  bfin_read_MDMA1_S1_Y_MODIFY()
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA1_S1_Y_MODIFY(val)
-#define bfin_read_MDMA_S1_X_COUNT()  bfin_read_MDMA1_S1_X_COUNT()
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA1_S1_X_COUNT(val)
-#define bfin_read_MDMA_S1_Y_COUNT()  bfin_read_MDMA1_S1_Y_COUNT()
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA1_S1_Y_COUNT(val)
-#define bfin_read_MDMA_S1_START_ADDR()  bfin_read_MDMA1_S1_START_ADDR()
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA1_S1_START_ADDR(val)
-#define bfin_read_MDMA_D1_CONFIG()  bfin_read_MDMA1_D1_CONFIG()
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA1_D1_CONFIG(val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()  bfin_read_MDMA1_D1_IRQ_STATUS()
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA1_D1_IRQ_STATUS(val)
-#define bfin_read_MDMA_D1_X_MODIFY()  bfin_read_MDMA1_D1_X_MODIFY()
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA1_D1_X_MODIFY(val)
-#define bfin_read_MDMA_D1_Y_MODIFY()  bfin_read_MDMA1_D1_Y_MODIFY()
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA1_D1_Y_MODIFY(val)
-#define bfin_read_MDMA_D1_X_COUNT()  bfin_read_MDMA1_D1_X_COUNT()
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA1_D1_X_COUNT(val)
-#define bfin_read_MDMA_D1_Y_COUNT()  bfin_read_MDMA1_D1_Y_COUNT()
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA1_D1_Y_COUNT(val)
-#define bfin_read_MDMA_D1_START_ADDR()  bfin_read_MDMA1_D1_START_ADDR()
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA1_D1_START_ADDR(val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif				/* _CDEF_BF561_H */
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 79e048d..71e805e 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1,18 +1,11 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF561_H
 #define _DEF_BF561_H
-/*
-#if !defined(__ADSPBF561__)
-#warning defBF561.h should only be included for BF561 chip.
-#endif
-*/
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
 
 /*********************************************************************************** */
 /* System MMR Register Map */
@@ -311,10 +304,10 @@
 #define PPI1_FRAME 					0xFFC01310	/* PPI1 Frame Length register */
 
 /*DMA traffic control registers */
-#define	DMA1_TC_PER  0xFFC01B0C	/* Traffic control periods */
-#define	DMA1_TC_CNT  0xFFC01B10	/* Traffic control current counts */
-#define	DMA2_TC_PER  0xFFC00B0C	/* Traffic control periods */
-#define	DMA2_TC_CNT  0xFFC00B10	/* Traffic control current counts        */
+#define	DMAC0_TC_PER  0xFFC00B0C	/* Traffic control periods */
+#define	DMAC0_TC_CNT  0xFFC00B10	/* Traffic control current counts        */
+#define	DMAC1_TC_PER  0xFFC01B0C	/* Traffic control periods */
+#define	DMAC1_TC_CNT  0xFFC01B10	/* Traffic control current counts */
 
 /* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
 #define DMA1_0_CONFIG 0xFFC01C08	/* DMA1 Channel 0 Configuration register */
@@ -486,61 +479,61 @@
 #define DMA1_11_PERIPHERAL_MAP 0xFFC01EEC	/* DMA1 Channel 11 Peripheral Map Register */
 
 /* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
-#define MDMA1_D0_CONFIG 0xFFC01F08	/*MemDMA1 Stream 0 Destination Configuration */
-#define MDMA1_D0_NEXT_DESC_PTR 0xFFC01F00	/*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */
-#define MDMA1_D0_START_ADDR 0xFFC01F04	/*MemDMA1 Stream 0 Destination Start Address */
-#define MDMA1_D0_X_COUNT 0xFFC01F10	/*MemDMA1 Stream 0 Destination Inner-Loop Count */
-#define MDMA1_D0_Y_COUNT 0xFFC01F18	/*MemDMA1 Stream 0 Destination Outer-Loop Count */
-#define MDMA1_D0_X_MODIFY 0xFFC01F14	/*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */
-#define MDMA1_D0_Y_MODIFY 0xFFC01F1C	/*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */
-#define MDMA1_D0_CURR_DESC_PTR 0xFFC01F20	/*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */
-#define MDMA1_D0_CURR_ADDR 0xFFC01F24	/*MemDMA1 Stream 0 Destination Current Address */
-#define MDMA1_D0_CURR_X_COUNT 0xFFC01F30	/*MemDMA1 Stream 0 Dest Current Inner-Loop Count */
-#define MDMA1_D0_CURR_Y_COUNT 0xFFC01F38	/*MemDMA1 Stream 0 Dest Current Outer-Loop Count */
-#define MDMA1_D0_IRQ_STATUS 0xFFC01F28	/*MemDMA1 Stream 0 Destination Interrupt/Status */
-#define MDMA1_D0_PERIPHERAL_MAP 0xFFC01F2C	/*MemDMA1 Stream 0 Destination Peripheral Map */
+#define MDMA_D2_CONFIG 0xFFC01F08	/*MemDMA1 Stream 0 Destination Configuration */
+#define MDMA_D2_NEXT_DESC_PTR 0xFFC01F00	/*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */
+#define MDMA_D2_START_ADDR 0xFFC01F04	/*MemDMA1 Stream 0 Destination Start Address */
+#define MDMA_D2_X_COUNT 0xFFC01F10	/*MemDMA1 Stream 0 Destination Inner-Loop Count */
+#define MDMA_D2_Y_COUNT 0xFFC01F18	/*MemDMA1 Stream 0 Destination Outer-Loop Count */
+#define MDMA_D2_X_MODIFY 0xFFC01F14	/*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */
+#define MDMA_D2_Y_MODIFY 0xFFC01F1C	/*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */
+#define MDMA_D2_CURR_DESC_PTR 0xFFC01F20	/*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */
+#define MDMA_D2_CURR_ADDR 0xFFC01F24	/*MemDMA1 Stream 0 Destination Current Address */
+#define MDMA_D2_CURR_X_COUNT 0xFFC01F30	/*MemDMA1 Stream 0 Dest Current Inner-Loop Count */
+#define MDMA_D2_CURR_Y_COUNT 0xFFC01F38	/*MemDMA1 Stream 0 Dest Current Outer-Loop Count */
+#define MDMA_D2_IRQ_STATUS 0xFFC01F28	/*MemDMA1 Stream 0 Destination Interrupt/Status */
+#define MDMA_D2_PERIPHERAL_MAP 0xFFC01F2C	/*MemDMA1 Stream 0 Destination Peripheral Map */
 
-#define MDMA1_S0_CONFIG 0xFFC01F48	/*MemDMA1 Stream 0 Source Configuration */
-#define MDMA1_S0_NEXT_DESC_PTR 0xFFC01F40	/*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */
-#define MDMA1_S0_START_ADDR 0xFFC01F44	/*MemDMA1 Stream 0 Source Start Address */
-#define MDMA1_S0_X_COUNT 0xFFC01F50	/*MemDMA1 Stream 0 Source Inner-Loop Count */
-#define MDMA1_S0_Y_COUNT 0xFFC01F58	/*MemDMA1 Stream 0 Source Outer-Loop Count */
-#define MDMA1_S0_X_MODIFY 0xFFC01F54	/*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */
-#define MDMA1_S0_Y_MODIFY 0xFFC01F5C	/*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */
-#define MDMA1_S0_CURR_DESC_PTR 0xFFC01F60	/*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */
-#define MDMA1_S0_CURR_ADDR 0xFFC01F64	/*MemDMA1 Stream 0 Source Current Address */
-#define MDMA1_S0_CURR_X_COUNT 0xFFC01F70	/*MemDMA1 Stream 0 Source Current Inner-Loop Count */
-#define MDMA1_S0_CURR_Y_COUNT 0xFFC01F78	/*MemDMA1 Stream 0 Source Current Outer-Loop Count */
-#define MDMA1_S0_IRQ_STATUS 0xFFC01F68	/*MemDMA1 Stream 0 Source Interrupt/Status */
-#define MDMA1_S0_PERIPHERAL_MAP 0xFFC01F6C	/*MemDMA1 Stream 0 Source Peripheral Map */
+#define MDMA_S2_CONFIG 0xFFC01F48	/*MemDMA1 Stream 0 Source Configuration */
+#define MDMA_S2_NEXT_DESC_PTR 0xFFC01F40	/*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */
+#define MDMA_S2_START_ADDR 0xFFC01F44	/*MemDMA1 Stream 0 Source Start Address */
+#define MDMA_S2_X_COUNT 0xFFC01F50	/*MemDMA1 Stream 0 Source Inner-Loop Count */
+#define MDMA_S2_Y_COUNT 0xFFC01F58	/*MemDMA1 Stream 0 Source Outer-Loop Count */
+#define MDMA_S2_X_MODIFY 0xFFC01F54	/*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */
+#define MDMA_S2_Y_MODIFY 0xFFC01F5C	/*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */
+#define MDMA_S2_CURR_DESC_PTR 0xFFC01F60	/*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */
+#define MDMA_S2_CURR_ADDR 0xFFC01F64	/*MemDMA1 Stream 0 Source Current Address */
+#define MDMA_S2_CURR_X_COUNT 0xFFC01F70	/*MemDMA1 Stream 0 Source Current Inner-Loop Count */
+#define MDMA_S2_CURR_Y_COUNT 0xFFC01F78	/*MemDMA1 Stream 0 Source Current Outer-Loop Count */
+#define MDMA_S2_IRQ_STATUS 0xFFC01F68	/*MemDMA1 Stream 0 Source Interrupt/Status */
+#define MDMA_S2_PERIPHERAL_MAP 0xFFC01F6C	/*MemDMA1 Stream 0 Source Peripheral Map */
 
-#define MDMA1_D1_CONFIG 0xFFC01F88	/*MemDMA1 Stream 1 Destination Configuration */
-#define MDMA1_D1_NEXT_DESC_PTR 0xFFC01F80	/*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */
-#define MDMA1_D1_START_ADDR 0xFFC01F84	/*MemDMA1 Stream 1 Destination Start Address */
-#define MDMA1_D1_X_COUNT 0xFFC01F90	/*MemDMA1 Stream 1 Destination Inner-Loop Count */
-#define MDMA1_D1_Y_COUNT 0xFFC01F98	/*MemDMA1 Stream 1 Destination Outer-Loop Count */
-#define MDMA1_D1_X_MODIFY 0xFFC01F94	/*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */
-#define MDMA1_D1_Y_MODIFY 0xFFC01F9C	/*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */
-#define MDMA1_D1_CURR_DESC_PTR 0xFFC01FA0	/*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */
-#define MDMA1_D1_CURR_ADDR 0xFFC01FA4	/*MemDMA1 Stream 1 Dest Current Address */
-#define MDMA1_D1_CURR_X_COUNT 0xFFC01FB0	/*MemDMA1 Stream 1 Dest Current Inner-Loop Count */
-#define MDMA1_D1_CURR_Y_COUNT 0xFFC01FB8	/*MemDMA1 Stream 1 Dest Current Outer-Loop Count */
-#define MDMA1_D1_IRQ_STATUS 0xFFC01FA8	/*MemDMA1 Stream 1 Dest Interrupt/Status */
-#define MDMA1_D1_PERIPHERAL_MAP 0xFFC01FAC	/*MemDMA1 Stream 1 Dest Peripheral Map */
+#define MDMA_D3_CONFIG 0xFFC01F88	/*MemDMA1 Stream 1 Destination Configuration */
+#define MDMA_D3_NEXT_DESC_PTR 0xFFC01F80	/*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */
+#define MDMA_D3_START_ADDR 0xFFC01F84	/*MemDMA1 Stream 1 Destination Start Address */
+#define MDMA_D3_X_COUNT 0xFFC01F90	/*MemDMA1 Stream 1 Destination Inner-Loop Count */
+#define MDMA_D3_Y_COUNT 0xFFC01F98	/*MemDMA1 Stream 1 Destination Outer-Loop Count */
+#define MDMA_D3_X_MODIFY 0xFFC01F94	/*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */
+#define MDMA_D3_Y_MODIFY 0xFFC01F9C	/*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */
+#define MDMA_D3_CURR_DESC_PTR 0xFFC01FA0	/*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */
+#define MDMA_D3_CURR_ADDR 0xFFC01FA4	/*MemDMA1 Stream 1 Dest Current Address */
+#define MDMA_D3_CURR_X_COUNT 0xFFC01FB0	/*MemDMA1 Stream 1 Dest Current Inner-Loop Count */
+#define MDMA_D3_CURR_Y_COUNT 0xFFC01FB8	/*MemDMA1 Stream 1 Dest Current Outer-Loop Count */
+#define MDMA_D3_IRQ_STATUS 0xFFC01FA8	/*MemDMA1 Stream 1 Dest Interrupt/Status */
+#define MDMA_D3_PERIPHERAL_MAP 0xFFC01FAC	/*MemDMA1 Stream 1 Dest Peripheral Map */
 
-#define MDMA1_S1_CONFIG 0xFFC01FC8	/*MemDMA1 Stream 1 Source Configuration */
-#define MDMA1_S1_NEXT_DESC_PTR 0xFFC01FC0	/*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */
-#define MDMA1_S1_START_ADDR 0xFFC01FC4	/*MemDMA1 Stream 1 Source Start Address */
-#define MDMA1_S1_X_COUNT 0xFFC01FD0	/*MemDMA1 Stream 1 Source Inner-Loop Count */
-#define MDMA1_S1_Y_COUNT 0xFFC01FD8	/*MemDMA1 Stream 1 Source Outer-Loop Count */
-#define MDMA1_S1_X_MODIFY 0xFFC01FD4	/*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */
-#define MDMA1_S1_Y_MODIFY 0xFFC01FDC	/*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */
-#define MDMA1_S1_CURR_DESC_PTR 0xFFC01FE0	/*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */
-#define MDMA1_S1_CURR_ADDR 0xFFC01FE4	/*MemDMA1 Stream 1 Source Current Address */
-#define MDMA1_S1_CURR_X_COUNT 0xFFC01FF0	/*MemDMA1 Stream 1 Source Current Inner-Loop Count */
-#define MDMA1_S1_CURR_Y_COUNT 0xFFC01FF8	/*MemDMA1 Stream 1 Source Current Outer-Loop Count */
-#define MDMA1_S1_IRQ_STATUS 0xFFC01FE8	/*MemDMA1 Stream 1 Source Interrupt/Status */
-#define MDMA1_S1_PERIPHERAL_MAP 0xFFC01FEC	/*MemDMA1 Stream 1 Source Peripheral Map */
+#define MDMA_S3_CONFIG 0xFFC01FC8	/*MemDMA1 Stream 1 Source Configuration */
+#define MDMA_S3_NEXT_DESC_PTR 0xFFC01FC0	/*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */
+#define MDMA_S3_START_ADDR 0xFFC01FC4	/*MemDMA1 Stream 1 Source Start Address */
+#define MDMA_S3_X_COUNT 0xFFC01FD0	/*MemDMA1 Stream 1 Source Inner-Loop Count */
+#define MDMA_S3_Y_COUNT 0xFFC01FD8	/*MemDMA1 Stream 1 Source Outer-Loop Count */
+#define MDMA_S3_X_MODIFY 0xFFC01FD4	/*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */
+#define MDMA_S3_Y_MODIFY 0xFFC01FDC	/*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */
+#define MDMA_S3_CURR_DESC_PTR 0xFFC01FE0	/*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */
+#define MDMA_S3_CURR_ADDR 0xFFC01FE4	/*MemDMA1 Stream 1 Source Current Address */
+#define MDMA_S3_CURR_X_COUNT 0xFFC01FF0	/*MemDMA1 Stream 1 Source Current Inner-Loop Count */
+#define MDMA_S3_CURR_Y_COUNT 0xFFC01FF8	/*MemDMA1 Stream 1 Source Current Outer-Loop Count */
+#define MDMA_S3_IRQ_STATUS 0xFFC01FE8	/*MemDMA1 Stream 1 Source Interrupt/Status */
+#define MDMA_S3_PERIPHERAL_MAP 0xFFC01FEC	/*MemDMA1 Stream 1 Source Peripheral Map */
 
 /* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
 #define DMA2_0_CONFIG 0xFFC00C08	/* DMA2 Channel 0 Configuration register */
@@ -712,117 +705,61 @@
 #define DMA2_11_PERIPHERAL_MAP 0xFFC00EEC	/* DMA2 Channel 11 Peripheral Map Register */
 
 /* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
-#define MDMA2_D0_CONFIG 0xFFC00F08	/*MemDMA2 Stream 0 Destination Configuration register */
-#define MDMA2_D0_NEXT_DESC_PTR 0xFFC00F00	/*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */
-#define MDMA2_D0_START_ADDR 0xFFC00F04	/*MemDMA2 Stream 0 Destination Start Address */
-#define MDMA2_D0_X_COUNT 0xFFC00F10	/*MemDMA2 Stream 0 Dest Inner-Loop Count register */
-#define MDMA2_D0_Y_COUNT 0xFFC00F18	/*MemDMA2 Stream 0 Dest Outer-Loop Count register */
-#define MDMA2_D0_X_MODIFY 0xFFC00F14	/*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */
-#define MDMA2_D0_Y_MODIFY 0xFFC00F1C	/*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */
-#define MDMA2_D0_CURR_DESC_PTR 0xFFC00F20	/*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */
-#define MDMA2_D0_CURR_ADDR 0xFFC00F24	/*MemDMA2 Stream 0 Destination Current Address */
-#define MDMA2_D0_CURR_X_COUNT 0xFFC00F30	/*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */
-#define MDMA2_D0_CURR_Y_COUNT 0xFFC00F38	/*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */
-#define MDMA2_D0_IRQ_STATUS 0xFFC00F28	/*MemDMA2 Stream 0 Dest Interrupt/Status Register */
-#define MDMA2_D0_PERIPHERAL_MAP 0xFFC00F2C	/*MemDMA2 Stream 0 Destination Peripheral Map register */
+#define MDMA_D0_CONFIG 0xFFC00F08	/*MemDMA2 Stream 0 Destination Configuration register */
+#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00	/*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */
+#define MDMA_D0_START_ADDR 0xFFC00F04	/*MemDMA2 Stream 0 Destination Start Address */
+#define MDMA_D0_X_COUNT 0xFFC00F10	/*MemDMA2 Stream 0 Dest Inner-Loop Count register */
+#define MDMA_D0_Y_COUNT 0xFFC00F18	/*MemDMA2 Stream 0 Dest Outer-Loop Count register */
+#define MDMA_D0_X_MODIFY 0xFFC00F14	/*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */
+#define MDMA_D0_Y_MODIFY 0xFFC00F1C	/*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */
+#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20	/*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */
+#define MDMA_D0_CURR_ADDR 0xFFC00F24	/*MemDMA2 Stream 0 Destination Current Address */
+#define MDMA_D0_CURR_X_COUNT 0xFFC00F30	/*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */
+#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38	/*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */
+#define MDMA_D0_IRQ_STATUS 0xFFC00F28	/*MemDMA2 Stream 0 Dest Interrupt/Status Register */
+#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C	/*MemDMA2 Stream 0 Destination Peripheral Map register */
 
-#define MDMA2_S0_CONFIG 0xFFC00F48	/*MemDMA2 Stream 0 Source Configuration register */
-#define MDMA2_S0_NEXT_DESC_PTR 0xFFC00F40	/*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */
-#define MDMA2_S0_START_ADDR 0xFFC00F44	/*MemDMA2 Stream 0 Source Start Address */
-#define MDMA2_S0_X_COUNT 0xFFC00F50	/*MemDMA2 Stream 0 Source Inner-Loop Count register */
-#define MDMA2_S0_Y_COUNT 0xFFC00F58	/*MemDMA2 Stream 0 Source Outer-Loop Count register */
-#define MDMA2_S0_X_MODIFY 0xFFC00F54	/*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */
-#define MDMA2_S0_Y_MODIFY 0xFFC00F5C	/*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */
-#define MDMA2_S0_CURR_DESC_PTR 0xFFC00F60	/*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */
-#define MDMA2_S0_CURR_ADDR 0xFFC00F64	/*MemDMA2 Stream 0 Source Current Address */
-#define MDMA2_S0_CURR_X_COUNT 0xFFC00F70	/*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */
-#define MDMA2_S0_CURR_Y_COUNT 0xFFC00F78	/*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */
-#define MDMA2_S0_IRQ_STATUS 0xFFC00F68	/*MemDMA2 Stream 0 Source Interrupt/Status Register */
-#define MDMA2_S0_PERIPHERAL_MAP 0xFFC00F6C	/*MemDMA2 Stream 0 Source Peripheral Map register */
+#define MDMA_S0_CONFIG 0xFFC00F48	/*MemDMA2 Stream 0 Source Configuration register */
+#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40	/*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */
+#define MDMA_S0_START_ADDR 0xFFC00F44	/*MemDMA2 Stream 0 Source Start Address */
+#define MDMA_S0_X_COUNT 0xFFC00F50	/*MemDMA2 Stream 0 Source Inner-Loop Count register */
+#define MDMA_S0_Y_COUNT 0xFFC00F58	/*MemDMA2 Stream 0 Source Outer-Loop Count register */
+#define MDMA_S0_X_MODIFY 0xFFC00F54	/*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */
+#define MDMA_S0_Y_MODIFY 0xFFC00F5C	/*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */
+#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60	/*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */
+#define MDMA_S0_CURR_ADDR 0xFFC00F64	/*MemDMA2 Stream 0 Source Current Address */
+#define MDMA_S0_CURR_X_COUNT 0xFFC00F70	/*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */
+#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78	/*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */
+#define MDMA_S0_IRQ_STATUS 0xFFC00F68	/*MemDMA2 Stream 0 Source Interrupt/Status Register */
+#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C	/*MemDMA2 Stream 0 Source Peripheral Map register */
 
-#define MDMA2_D1_CONFIG 0xFFC00F88	/*MemDMA2 Stream 1 Destination Configuration register */
-#define MDMA2_D1_NEXT_DESC_PTR 0xFFC00F80	/*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */
-#define MDMA2_D1_START_ADDR 0xFFC00F84	/*MemDMA2 Stream 1 Destination Start Address */
-#define MDMA2_D1_X_COUNT 0xFFC00F90	/*MemDMA2 Stream 1 Dest Inner-Loop Count register */
-#define MDMA2_D1_Y_COUNT 0xFFC00F98	/*MemDMA2 Stream 1 Dest Outer-Loop Count register */
-#define MDMA2_D1_X_MODIFY 0xFFC00F94	/*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */
-#define MDMA2_D1_Y_MODIFY 0xFFC00F9C	/*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */
-#define MDMA2_D1_CURR_DESC_PTR 0xFFC00FA0	/*MemDMA2 Stream 1 Destination Current Descriptor Ptr */
-#define MDMA2_D1_CURR_ADDR 0xFFC00FA4	/*MemDMA2 Stream 1 Destination Current Address reg */
-#define MDMA2_D1_CURR_X_COUNT 0xFFC00FB0	/*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */
-#define MDMA2_D1_CURR_Y_COUNT 0xFFC00FB8	/*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */
-#define MDMA2_D1_IRQ_STATUS 0xFFC00FA8	/*MemDMA2 Stream 1 Destination Interrupt/Status Reg */
-#define MDMA2_D1_PERIPHERAL_MAP 0xFFC00FAC	/*MemDMA2 Stream 1 Destination Peripheral Map register */
+#define MDMA_D1_CONFIG 0xFFC00F88	/*MemDMA2 Stream 1 Destination Configuration register */
+#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80	/*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */
+#define MDMA_D1_START_ADDR 0xFFC00F84	/*MemDMA2 Stream 1 Destination Start Address */
+#define MDMA_D1_X_COUNT 0xFFC00F90	/*MemDMA2 Stream 1 Dest Inner-Loop Count register */
+#define MDMA_D1_Y_COUNT 0xFFC00F98	/*MemDMA2 Stream 1 Dest Outer-Loop Count register */
+#define MDMA_D1_X_MODIFY 0xFFC00F94	/*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */
+#define MDMA_D1_Y_MODIFY 0xFFC00F9C	/*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */
+#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0	/*MemDMA2 Stream 1 Destination Current Descriptor Ptr */
+#define MDMA_D1_CURR_ADDR 0xFFC00FA4	/*MemDMA2 Stream 1 Destination Current Address reg */
+#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0	/*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */
+#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8	/*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */
+#define MDMA_D1_IRQ_STATUS 0xFFC00FA8	/*MemDMA2 Stream 1 Destination Interrupt/Status Reg */
+#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC	/*MemDMA2 Stream 1 Destination Peripheral Map register */
 
-#define MDMA2_S1_CONFIG 0xFFC00FC8	/*MemDMA2 Stream 1 Source Configuration register */
-#define MDMA2_S1_NEXT_DESC_PTR 0xFFC00FC0	/*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */
-#define MDMA2_S1_START_ADDR 0xFFC00FC4	/*MemDMA2 Stream 1 Source Start Address */
-#define MDMA2_S1_X_COUNT 0xFFC00FD0	/*MemDMA2 Stream 1 Source Inner-Loop Count register */
-#define MDMA2_S1_Y_COUNT 0xFFC00FD8	/*MemDMA2 Stream 1 Source Outer-Loop Count register */
-#define MDMA2_S1_X_MODIFY 0xFFC00FD4	/*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */
-#define MDMA2_S1_Y_MODIFY 0xFFC00FDC	/*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */
-#define MDMA2_S1_CURR_DESC_PTR 0xFFC00FE0	/*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */
-#define MDMA2_S1_CURR_ADDR 0xFFC00FE4	/*MemDMA2 Stream 1 Source Current Address */
-#define MDMA2_S1_CURR_X_COUNT 0xFFC00FF0	/*MemDMA2 Stream 1 Source Current Inner-Loop Count */
-#define MDMA2_S1_CURR_Y_COUNT 0xFFC00FF8	/*MemDMA2 Stream 1 Source Current Outer-Loop Count */
-#define MDMA2_S1_IRQ_STATUS 0xFFC00FE8	/*MemDMA2 Stream 1 Source Interrupt/Status Register */
-#define MDMA2_S1_PERIPHERAL_MAP 0xFFC00FEC	/*MemDMA2 Stream 1 Source Peripheral Map register */
-
-#define MDMA_D0_NEXT_DESC_PTR MDMA1_D0_NEXT_DESC_PTR
-#define MDMA_D0_START_ADDR MDMA1_D0_START_ADDR
-#define MDMA_D0_CONFIG MDMA1_D0_CONFIG
-#define MDMA_D0_X_COUNT MDMA1_D0_X_COUNT
-#define MDMA_D0_X_MODIFY MDMA1_D0_X_MODIFY
-#define MDMA_D0_Y_COUNT MDMA1_D0_Y_COUNT
-#define MDMA_D0_Y_MODIFY MDMA1_D0_Y_MODIFY
-#define MDMA_D0_CURR_DESC_PTR MDMA1_D0_CURR_DESC_PTR
-#define MDMA_D0_CURR_ADDR MDMA1_D0_CURR_ADDR
-#define MDMA_D0_IRQ_STATUS MDMA1_D0_IRQ_STATUS
-#define MDMA_D0_PERIPHERAL_MAP MDMA1_D0_PERIPHERAL_MAP
-#define MDMA_D0_CURR_X_COUNT MDMA1_D0_CURR_X_COUNT
-#define MDMA_D0_CURR_Y_COUNT MDMA1_D0_CURR_Y_COUNT
-
-#define MDMA_S0_NEXT_DESC_PTR MDMA1_S0_NEXT_DESC_PTR
-#define MDMA_S0_START_ADDR MDMA1_S0_START_ADDR
-#define MDMA_S0_CONFIG MDMA1_S0_CONFIG
-#define MDMA_S0_X_COUNT MDMA1_S0_X_COUNT
-#define MDMA_S0_X_MODIFY MDMA1_S0_X_MODIFY
-#define MDMA_S0_Y_COUNT MDMA1_S0_Y_COUNT
-#define MDMA_S0_Y_MODIFY MDMA1_S0_Y_MODIFY
-#define MDMA_S0_CURR_DESC_PTR MDMA1_S0_CURR_DESC_PTR
-#define MDMA_S0_CURR_ADDR MDMA1_S0_CURR_ADDR
-#define MDMA_S0_IRQ_STATUS MDMA1_S0_IRQ_STATUS
-#define MDMA_S0_PERIPHERAL_MAP MDMA1_S0_PERIPHERAL_MAP
-#define MDMA_S0_CURR_X_COUNT MDMA1_S0_CURR_X_COUNT
-#define MDMA_S0_CURR_Y_COUNT MDMA1_S0_CURR_Y_COUNT
-
-#define MDMA_D1_NEXT_DESC_PTR MDMA1_D1_NEXT_DESC_PTR
-#define MDMA_D1_START_ADDR MDMA1_D1_START_ADDR
-#define MDMA_D1_CONFIG MDMA1_D1_CONFIG
-#define MDMA_D1_X_COUNT MDMA1_D1_X_COUNT
-#define MDMA_D1_X_MODIFY MDMA1_D1_X_MODIFY
-#define MDMA_D1_Y_COUNT MDMA1_D1_Y_COUNT
-#define MDMA_D1_Y_MODIFY MDMA1_D1_Y_MODIFY
-#define MDMA_D1_CURR_DESC_PTR MDMA1_D1_CURR_DESC_PTR
-#define MDMA_D1_CURR_ADDR MDMA1_D1_CURR_ADDR
-#define MDMA_D1_IRQ_STATUS MDMA1_D1_IRQ_STATUS
-#define MDMA_D1_PERIPHERAL_MAP MDMA1_D1_PERIPHERAL_MAP
-#define MDMA_D1_CURR_X_COUNT MDMA1_D1_CURR_X_COUNT
-#define MDMA_D1_CURR_Y_COUNT MDMA1_D1_CURR_Y_COUNT
-
-#define MDMA_S1_NEXT_DESC_PTR MDMA1_S1_NEXT_DESC_PTR
-#define MDMA_S1_START_ADDR MDMA1_S1_START_ADDR
-#define MDMA_S1_CONFIG MDMA1_S1_CONFIG
-#define MDMA_S1_X_COUNT MDMA1_S1_X_COUNT
-#define MDMA_S1_X_MODIFY MDMA1_S1_X_MODIFY
-#define MDMA_S1_Y_COUNT MDMA1_S1_Y_COUNT
-#define MDMA_S1_Y_MODIFY MDMA1_S1_Y_MODIFY
-#define MDMA_S1_CURR_DESC_PTR MDMA1_S1_CURR_DESC_PTR
-#define MDMA_S1_CURR_ADDR MDMA1_S1_CURR_ADDR
-#define MDMA_S1_IRQ_STATUS MDMA1_S1_IRQ_STATUS
-#define MDMA_S1_PERIPHERAL_MAP MDMA1_S1_PERIPHERAL_MAP
-#define MDMA_S1_CURR_X_COUNT MDMA1_S1_CURR_X_COUNT
-#define MDMA_S1_CURR_Y_COUNT MDMA1_S1_CURR_Y_COUNT
+#define MDMA_S1_CONFIG 0xFFC00FC8	/*MemDMA2 Stream 1 Source Configuration register */
+#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0	/*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */
+#define MDMA_S1_START_ADDR 0xFFC00FC4	/*MemDMA2 Stream 1 Source Start Address */
+#define MDMA_S1_X_COUNT 0xFFC00FD0	/*MemDMA2 Stream 1 Source Inner-Loop Count register */
+#define MDMA_S1_Y_COUNT 0xFFC00FD8	/*MemDMA2 Stream 1 Source Outer-Loop Count register */
+#define MDMA_S1_X_MODIFY 0xFFC00FD4	/*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */
+#define MDMA_S1_Y_MODIFY 0xFFC00FDC	/*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */
+#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0	/*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */
+#define MDMA_S1_CURR_ADDR 0xFFC00FE4	/*MemDMA2 Stream 1 Source Current Address */
+#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0	/*MemDMA2 Stream 1 Source Current Inner-Loop Count */
+#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8	/*MemDMA2 Stream 1 Source Current Outer-Loop Count */
+#define MDMA_S1_IRQ_STATUS 0xFFC00FE8	/*MemDMA2 Stream 1 Source Interrupt/Status Register */
+#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC	/*MemDMA2 Stream 1 Source Peripheral Map register */
 
 /* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
 #define IMDMA_D0_CONFIG 0xFFC01808	/*IMDMA Stream 0 Destination Configuration */
@@ -927,83 +864,6 @@
 #define IWR_ENABLE(x)	       (1 << (x))	/* Wakeup Enable Peripheral #x */
 #define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x)))	/* Wakeup Disable Peripheral #x */
 
-/* ***************************** UART CONTROLLER MASKS ********************** */
-
-/* UART_LCR Register */
-
-#define DLAB	0x80
-#define SB      0x40
-#define STP      0x20
-#define EPS     0x10
-#define PEN	0x08
-#define STB	0x04
-#define WLS(x)	((x-5) & 0x03)
-
-#define DLAB_P	0x07
-#define SB_P	0x06
-#define STP_P	0x05
-#define EPS_P	0x04
-#define PEN_P	0x03
-#define STB_P	0x02
-#define WLS_P1	0x01
-#define WLS_P0	0x00
-
-/* UART_MCR Register */
-#define LOOP_ENA	0x10
-#define LOOP_ENA_P	0x04
-
-/* UART_LSR Register */
-#define TEMT	0x40
-#define THRE	0x20
-#define BI	0x10
-#define FE	0x08
-#define PE	0x04
-#define OE	0x02
-#define DR	0x01
-
-#define TEMP_P	0x06
-#define THRE_P	0x05
-#define BI_P	0x04
-#define FE_P	0x03
-#define PE_P	0x02
-#define OE_P	0x01
-#define DR_P	0x00
-
-/* UART_IER Register */
-#define ELSI	0x04
-#define ETBEI	0x02
-#define ERBFI	0x01
-
-#define ELSI_P	0x02
-#define ETBEI_P	0x01
-#define ERBFI_P	0x00
-
-/* UART_IIR Register */
-#define STATUS(x)	((x << 1) & 0x06)
-#define NINT		0x01
-#define STATUS_P1	0x02
-#define STATUS_P0	0x01
-#define NINT_P		0x00
-#define IIR_TX_READY    0x02	/* UART_THR empty                               */
-#define IIR_RX_READY    0x04	/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06	/* Receive line status                          */
-#define IIR_STATUS	0x06
-
-/* UART_GCTL Register */
-#define FFE	0x20
-#define FPE	0x10
-#define RPOLC	0x08
-#define TPOLC	0x04
-#define IREN	0x02
-#define UCEN	0x01
-
-#define FFE_P	0x05
-#define FPE_P	0x04
-#define RPOLC_P	0x03
-#define TPOLC_P	0x02
-#define IREN_P	0x01
-#define UCEN_P	0x00
-
 /*  *********  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS ****************   */
 
 /*  PPI_CONTROL Masks         */
@@ -1230,44 +1090,6 @@
 #define ERR_TYP_P0		0x0E
 #define ERR_TYP_P1		0x0F
 
-/*/ ******************   PROGRAMMABLE FLAG MASKS  ********************* */
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks */
-#define PF0         0x0001
-#define PF1         0x0002
-#define PF2         0x0004
-#define PF3         0x0008
-#define PF4         0x0010
-#define PF5         0x0020
-#define PF6         0x0040
-#define PF7         0x0080
-#define PF8         0x0100
-#define PF9         0x0200
-#define PF10        0x0400
-#define PF11        0x0800
-#define PF12        0x1000
-#define PF13        0x2000
-#define PF14        0x4000
-#define PF15        0x8000
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  BIT POSITIONS */
-#define PF0_P         0
-#define PF1_P         1
-#define PF2_P         2
-#define PF3_P         3
-#define PF4_P         4
-#define PF5_P         5
-#define PF6_P         6
-#define PF7_P         7
-#define PF8_P         8
-#define PF9_P         9
-#define PF10_P        10
-#define PF11_P        11
-#define PF12_P        12
-#define PF13_P        13
-#define PF14_P        14
-#define PF15_P        15
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  ************* */
 
 /* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
index 4f8aa5d..57d5eab 100644
--- a/arch/blackfin/mach-bf561/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -62,4 +62,6 @@
 #define PORT_FIO1 GPIO_16
 #define PORT_FIO2 GPIO_32
 
+#include <mach-common/ports-f.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index 5b96ea5..4cc9199 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -106,7 +106,7 @@
 #define COREA_L1_SCRATCH_START	0xFFB00000
 #define COREB_L1_SCRATCH_START	0xFF700000
 
-#ifdef __ASSEMBLY__
+#ifdef CONFIG_SMP
 
 /*
  * The following macros both return the address of the PDA for the
@@ -121,8 +121,7 @@
  * is allowed to use the specified Dreg for determining the PDA
  * address to be returned into Preg.
  */
-#ifdef CONFIG_SMP
-#define GET_PDA_SAFE(preg)		\
+# define GET_PDA_SAFE(preg)		\
 	preg.l = lo(DSPID);		\
 	preg.h = hi(DSPID);		\
 	preg = [preg];			\
@@ -158,7 +157,7 @@
 	preg = [preg];			\
 4:
 
-#define GET_PDA(preg, dreg)		\
+# define GET_PDA(preg, dreg)		\
 	preg.l = lo(DSPID);		\
 	preg.h = hi(DSPID);		\
 	dreg = [preg];			\
@@ -169,13 +168,17 @@
 	preg = [preg];			\
 1:					\
 
-#define GET_CPUID(preg, dreg)		\
+# define GET_CPUID(preg, dreg)		\
 	preg.l = lo(DSPID);		\
 	preg.h = hi(DSPID);		\
 	dreg = [preg];			\
 	dreg = ROT dreg BY -1;		\
 	dreg = CC;
 
+# ifndef __ASSEMBLY__
+
+#  include <asm/processor.h>
+
 static inline unsigned long get_l1_scratch_start_cpu(int cpu)
 {
 	return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
@@ -210,8 +213,7 @@
 	return get_l1_data_b_start_cpu(blackfin_core_id());
 }
 
+# endif /* __ASSEMBLY__ */
 #endif /* CONFIG_SMP */
 
-#endif /* __ASSEMBLY__ */
-
 #endif
diff --git a/arch/blackfin/mach-bf561/include/mach/pll.h b/arch/blackfin/mach-bf561/include/mach/pll.h
index f2b1fbd..7977db2 100644
--- a/arch/blackfin/mach-bf561/include/mach/pll.h
+++ b/arch/blackfin/mach-bf561/include/mach/pll.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,57 +7,48 @@
 #ifndef _MACH_PLL_H
 #define _MACH_PLL_H
 
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+
 #include <asm/blackfin.h>
 #include <asm/irqflags.h>
+#include <mach/irq.h>
 
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
+
+static inline void
+bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
 {
-	unsigned long flags, iwr0, iwr1;
+	unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
 
-	if (val == bfin_read_PLL_CTL())
-		return;
+	bfin_write32(SIC_IWR0 + SICA_SICB_OFF, iwr0);
+	bfin_write32(SIC_IWR1 + SICA_SICB_OFF, iwr1);
+}
+#define bfin_iwr_restore bfin_iwr_restore
 
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SICA_IWR0);
-	iwr1 = bfin_read32(SICA_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
-	bfin_write32(SICA_IWR1, 0);
+static inline void
+bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
+              unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+	unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
 
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
+	*iwr0 = bfin_read32(SIC_IWR0 + SICA_SICB_OFF);
+	*iwr1 = bfin_read32(SIC_IWR1 + SICA_SICB_OFF);
+	bfin_iwr_restore(niwr0, niwr1, niwr2);
+}
+#define bfin_iwr_save bfin_iwr_save
 
-	bfin_write32(SICA_IWR0, iwr0);
-	bfin_write32(SICA_IWR1, iwr1);
-	hard_local_irq_restore(flags);
+static inline void
+bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+	bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2);
 }
 
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
+#endif
 
-	if (val == bfin_read_VR_CTL())
-		return;
+#endif
 
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SICA_IWR0);
-	iwr1 = bfin_read32(SICA_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
-	bfin_write32(SICA_IWR1, 0);
+#include <mach-common/pll.h>
 
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SICA_IWR0, iwr0);
-	bfin_write32(SICA_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h
index 2c8c514..346c605 100644
--- a/arch/blackfin/mach-bf561/include/mach/smp.h
+++ b/arch/blackfin/mach-bf561/include/mach/smp.h
@@ -7,6 +7,8 @@
 #ifndef _MACH_BF561_SMP
 #define _MACH_BF561_SMP
 
+/* This header has to stand alone to avoid circular deps */
+
 struct task_struct;
 
 void platform_init_cpus(void);
@@ -17,13 +19,13 @@
 
 void platform_secondary_init(unsigned int cpu);
 
-void platform_request_ipi(int (*handler)(int, void *));
+void platform_request_ipi(int irq, /*irq_handler_t*/ void *handler);
 
-void platform_send_ipi(cpumask_t callmap);
+void platform_send_ipi(cpumask_t callmap, int irq);
 
-void platform_send_ipi_cpu(unsigned int cpu);
+void platform_send_ipi_cpu(unsigned int cpu, int irq);
 
-void platform_clear_ipi(unsigned int cpu);
+void platform_clear_ipi(unsigned int cpu, int irq);
 
 void bfin_local_timer_setup(void);
 
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index f540ed1..1074a7e 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -86,12 +86,12 @@
 
 	spin_lock(&boot_lock);
 
-	if ((bfin_read_SIC_SYSCR() & COREB_SRAM_INIT) == 0) {
+	if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
 		/* CoreB already running, sending ipi to wakeup it */
 		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
 	} else {
 		/* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
-		bfin_write_SIC_SYSCR(bfin_read_SIC_SYSCR() & ~COREB_SRAM_INIT);
+		bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
 		SSYNC();
 	}
 
@@ -111,41 +111,46 @@
 		panic("CPU%u: processor failed to boot\n", cpu);
 }
 
-void __init platform_request_ipi(irq_handler_t handler)
+static const char supple0[] = "IRQ_SUPPLE_0";
+static const char supple1[] = "IRQ_SUPPLE_1";
+void __init platform_request_ipi(int irq, void *handler)
 {
 	int ret;
+	const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
 
-	ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED,
-			  "Supplemental Interrupt0", handler);
+	ret = request_irq(irq, handler, IRQF_DISABLED | IRQF_PERCPU, name, handler);
 	if (ret)
-		panic("Cannot request supplemental interrupt 0 for IPI service");
+		panic("Cannot request %s for IPI service", name);
 }
 
-void platform_send_ipi(cpumask_t callmap)
+void platform_send_ipi(cpumask_t callmap, int irq)
 {
 	unsigned int cpu;
+	int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
 
 	for_each_cpu_mask(cpu, callmap) {
 		BUG_ON(cpu >= 2);
 		SSYNC();
-		bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+		bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
 		SSYNC();
 	}
 }
 
-void platform_send_ipi_cpu(unsigned int cpu)
+void platform_send_ipi_cpu(unsigned int cpu, int irq)
 {
+	int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
 	BUG_ON(cpu >= 2);
 	SSYNC();
-	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
 	SSYNC();
 }
 
-void platform_clear_ipi(unsigned int cpu)
+void platform_clear_ipi(unsigned int cpu, int irq)
 {
+	int offset = (irq == IRQ_SUPPLE_0) ? 10 : 12;
 	BUG_ON(cpu >= 2);
 	SSYNC();
-	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu)));
+	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
 	SSYNC();
 }
 
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 2ca915e..bc08c98 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -615,7 +615,7 @@
 #ifdef CONFIG_IPIPE
 	r0 = sp;
 	SP += -12;
-	call ___ipipe_syscall_root;
+	pseudo_long_call ___ipipe_syscall_root, p0;
 	SP += 12;
 	cc = r0 == 1;
 	if cc jump .Lsyscall_really_exit;
@@ -692,7 +692,7 @@
 	[--sp] = reti;
 	SP += 4; /* don't merge with next insn to keep the pattern obvious */
 	SP += -12;
-	call ___ipipe_sync_root;
+	pseudo_long_call ___ipipe_sync_root, p4;
 	SP += 12;
 	jump .Lresume_userspace_1;
 .Lsyscall_no_irqsync:
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index da7e3c6..a604f19 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -866,7 +866,6 @@
 	u32 pintbit = PINT_BIT(pint_val);
 	u32 bank = PINT_2_BANK(pint_val);
 
-	pint[bank]->request = pintbit;
 	pint[bank]->mask_set = pintbit;
 }
 
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 80884b1..3c648a0 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -23,9 +23,6 @@
 
 void bfin_pm_suspend_standby_enter(void)
 {
-	unsigned long flags;
-
-	flags = hard_local_irq_save();
 	bfin_pm_standby_setup();
 
 #ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -55,8 +52,6 @@
 #else
 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
 #endif
-
-	hard_local_irq_restore(flags);
 }
 
 int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -127,7 +122,6 @@
 
 int bfin_pm_suspend_mem_enter(void)
 {
-	unsigned long flags;
 	int wakeup, ret;
 
 	unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
@@ -149,12 +143,9 @@
 	wakeup |= GPWE;
 #endif
 
-	flags = hard_local_irq_save();
-
 	ret = blackfin_dma_suspend();
 
 	if (ret) {
-		hard_local_irq_restore(flags);
 		kfree(memptr);
 		return ret;
 	}
@@ -178,7 +169,6 @@
 	bfin_gpio_pm_hibernate_restore();
 	blackfin_dma_resume();
 
-	hard_local_irq_restore(flags);
 	kfree(memptr);
 
 	return 0;
@@ -233,7 +223,7 @@
 	return 0;
 }
 
-struct platform_suspend_ops bfin_pm_ops = {
+static const struct platform_suspend_ops bfin_pm_ops = {
 	.enter = bfin_pm_enter,
 	.valid	= bfin_pm_valid,
 };
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index a17107a..9f25140 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -19,6 +19,7 @@
 #include <linux/mm.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/cpumask.h>
 #include <linux/seq_file.h>
 #include <linux/irq.h>
 #include <linux/slab.h>
@@ -43,12 +44,6 @@
 	*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
 	*init_saved_dcplb_fault_addr_coreb;
 
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 #define BFIN_IPI_RESCHEDULE   0
 #define BFIN_IPI_CALL_FUNC    1
 #define BFIN_IPI_CPU_STOP     2
@@ -65,8 +60,7 @@
 	void (*func)(void *info);
 	void *info;
 	int wait;
-	cpumask_t pending;
-	cpumask_t waitmask;
+	cpumask_t *waitmask;
 };
 
 static struct blackfin_flush_data smp_flush_data;
@@ -74,15 +68,19 @@
 static DEFINE_SPINLOCK(stop_lock);
 
 struct ipi_message {
-	struct list_head list;
 	unsigned long type;
 	struct smp_call_struct call_struct;
 };
 
+/* A magic number - stress test shows this is safe for common cases */
+#define BFIN_IPI_MSGQ_LEN 5
+
+/* Simple FIFO buffer, overflow leads to panic */
 struct ipi_message_queue {
-	struct list_head head;
 	spinlock_t lock;
 	unsigned long count;
+	unsigned long head; /* head of the queue */
+	struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
 };
 
 static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
@@ -121,7 +119,6 @@
 	func = msg->call_struct.func;
 	info = msg->call_struct.info;
 	wait = msg->call_struct.wait;
-	cpu_clear(cpu, msg->call_struct.pending);
 	func(info);
 	if (wait) {
 #ifdef __ARCH_SYNC_CORE_DCACHE
@@ -132,51 +129,57 @@
 		 */
 		resync_core_dcache();
 #endif
-		cpu_clear(cpu, msg->call_struct.waitmask);
-	} else
-		kfree(msg);
+		cpu_clear(cpu, *msg->call_struct.waitmask);
+	}
 }
 
-static irqreturn_t ipi_handler(int irq, void *dev_instance)
+/* Use IRQ_SUPPLE_0 to request reschedule.
+ * When returning from interrupt to user space,
+ * there is chance to reschedule */
+static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
+{
+	unsigned int cpu = smp_processor_id();
+
+	platform_clear_ipi(cpu, IRQ_SUPPLE_0);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 {
 	struct ipi_message *msg;
 	struct ipi_message_queue *msg_queue;
 	unsigned int cpu = smp_processor_id();
+	unsigned long flags;
 
-	platform_clear_ipi(cpu);
+	platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
 	msg_queue = &__get_cpu_var(ipi_msg_queue);
-	msg_queue->count++;
 
-	spin_lock(&msg_queue->lock);
-	while (!list_empty(&msg_queue->head)) {
-		msg = list_entry(msg_queue->head.next, typeof(*msg), list);
-		list_del(&msg->list);
+	spin_lock_irqsave(&msg_queue->lock, flags);
+
+	while (msg_queue->count) {
+		msg = &msg_queue->ipi_message[msg_queue->head];
 		switch (msg->type) {
-		case BFIN_IPI_RESCHEDULE:
-			/* That's the easiest one; leave it to
-			 * return_from_int. */
-			kfree(msg);
-			break;
 		case BFIN_IPI_CALL_FUNC:
-			spin_unlock(&msg_queue->lock);
+			spin_unlock_irqrestore(&msg_queue->lock, flags);
 			ipi_call_function(cpu, msg);
-			spin_lock(&msg_queue->lock);
+			spin_lock_irqsave(&msg_queue->lock, flags);
 			break;
 		case BFIN_IPI_CPU_STOP:
-			spin_unlock(&msg_queue->lock);
+			spin_unlock_irqrestore(&msg_queue->lock, flags);
 			ipi_cpu_stop(cpu);
-			spin_lock(&msg_queue->lock);
-			kfree(msg);
+			spin_lock_irqsave(&msg_queue->lock, flags);
 			break;
 		default:
 			printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
 			       cpu, msg->type);
-			kfree(msg);
 			break;
 		}
+		msg_queue->head++;
+		msg_queue->head %= BFIN_IPI_MSGQ_LEN;
+		msg_queue->count--;
 	}
-	spin_unlock(&msg_queue->lock);
+	spin_unlock_irqrestore(&msg_queue->lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -186,48 +189,47 @@
 	struct ipi_message_queue *msg_queue;
 	for_each_possible_cpu(cpu) {
 		msg_queue = &per_cpu(ipi_msg_queue, cpu);
-		INIT_LIST_HEAD(&msg_queue->head);
 		spin_lock_init(&msg_queue->lock);
 		msg_queue->count = 0;
+		msg_queue->head = 0;
 	}
 }
 
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+static inline void smp_send_message(cpumask_t callmap, unsigned long type,
+					void (*func) (void *info), void *info, int wait)
 {
 	unsigned int cpu;
-	cpumask_t callmap;
-	unsigned long flags;
 	struct ipi_message_queue *msg_queue;
 	struct ipi_message *msg;
-
-	callmap = cpu_online_map;
-	cpu_clear(smp_processor_id(), callmap);
-	if (cpus_empty(callmap))
-		return 0;
-
-	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&msg->list);
-	msg->call_struct.func = func;
-	msg->call_struct.info = info;
-	msg->call_struct.wait = wait;
-	msg->call_struct.pending = callmap;
-	msg->call_struct.waitmask = callmap;
-	msg->type = BFIN_IPI_CALL_FUNC;
+	unsigned long flags, next_msg;
+	cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */
 
 	for_each_cpu_mask(cpu, callmap) {
 		msg_queue = &per_cpu(ipi_msg_queue, cpu);
 		spin_lock_irqsave(&msg_queue->lock, flags);
-		list_add_tail(&msg->list, &msg_queue->head);
+		if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
+			next_msg = (msg_queue->head + msg_queue->count)
+					% BFIN_IPI_MSGQ_LEN;
+			msg = &msg_queue->ipi_message[next_msg];
+			msg->type = type;
+			if (type == BFIN_IPI_CALL_FUNC) {
+				msg->call_struct.func = func;
+				msg->call_struct.info = info;
+				msg->call_struct.wait = wait;
+				msg->call_struct.waitmask = &waitmask;
+			}
+			msg_queue->count++;
+		} else
+			panic("IPI message queue overflow\n");
 		spin_unlock_irqrestore(&msg_queue->lock, flags);
-		platform_send_ipi_cpu(cpu);
+		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
 	}
+
 	if (wait) {
-		while (!cpus_empty(msg->call_struct.waitmask))
+		while (!cpus_empty(waitmask))
 			blackfin_dcache_invalidate_range(
-				(unsigned long)(&msg->call_struct.waitmask),
-				(unsigned long)(&msg->call_struct.waitmask));
+				(unsigned long)(&waitmask),
+				(unsigned long)(&waitmask));
 #ifdef __ARCH_SYNC_CORE_DCACHE
 		/*
 		 * Invalidate D cache in case shared data was changed by
@@ -235,8 +237,20 @@
 		 */
 		resync_core_dcache();
 #endif
-		kfree(msg);
 	}
+}
+
+int smp_call_function(void (*func)(void *info), void *info, int wait)
+{
+	cpumask_t callmap;
+
+	callmap = cpu_online_map;
+	cpu_clear(smp_processor_id(), callmap);
+	if (cpus_empty(callmap))
+		return 0;
+
+	smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(smp_call_function);
@@ -246,100 +260,39 @@
 {
 	unsigned int cpu = cpuid;
 	cpumask_t callmap;
-	unsigned long flags;
-	struct ipi_message_queue *msg_queue;
-	struct ipi_message *msg;
 
 	if (cpu_is_offline(cpu))
 		return 0;
 	cpus_clear(callmap);
 	cpu_set(cpu, callmap);
 
-	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&msg->list);
-	msg->call_struct.func = func;
-	msg->call_struct.info = info;
-	msg->call_struct.wait = wait;
-	msg->call_struct.pending = callmap;
-	msg->call_struct.waitmask = callmap;
-	msg->type = BFIN_IPI_CALL_FUNC;
+	smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
 
-	msg_queue = &per_cpu(ipi_msg_queue, cpu);
-	spin_lock_irqsave(&msg_queue->lock, flags);
-	list_add_tail(&msg->list, &msg_queue->head);
-	spin_unlock_irqrestore(&msg_queue->lock, flags);
-	platform_send_ipi_cpu(cpu);
-
-	if (wait) {
-		while (!cpus_empty(msg->call_struct.waitmask))
-			blackfin_dcache_invalidate_range(
-				(unsigned long)(&msg->call_struct.waitmask),
-				(unsigned long)(&msg->call_struct.waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
-		/*
-		 * Invalidate D cache in case shared data was changed by
-		 * other processors to ensure cache coherence.
-		 */
-		resync_core_dcache();
-#endif
-		kfree(msg);
-	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(smp_call_function_single);
 
 void smp_send_reschedule(int cpu)
 {
-	unsigned long flags;
-	struct ipi_message_queue *msg_queue;
-	struct ipi_message *msg;
-
+	/* simply trigger an ipi */
 	if (cpu_is_offline(cpu))
 		return;
-
-	msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return;
-	INIT_LIST_HEAD(&msg->list);
-	msg->type = BFIN_IPI_RESCHEDULE;
-
-	msg_queue = &per_cpu(ipi_msg_queue, cpu);
-	spin_lock_irqsave(&msg_queue->lock, flags);
-	list_add_tail(&msg->list, &msg_queue->head);
-	spin_unlock_irqrestore(&msg_queue->lock, flags);
-	platform_send_ipi_cpu(cpu);
+	platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
 
 	return;
 }
 
 void smp_send_stop(void)
 {
-	unsigned int cpu;
 	cpumask_t callmap;
-	unsigned long flags;
-	struct ipi_message_queue *msg_queue;
-	struct ipi_message *msg;
 
 	callmap = cpu_online_map;
 	cpu_clear(smp_processor_id(), callmap);
 	if (cpus_empty(callmap))
 		return;
 
-	msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return;
-	INIT_LIST_HEAD(&msg->list);
-	msg->type = BFIN_IPI_CPU_STOP;
+	smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
 
-	for_each_cpu_mask(cpu, callmap) {
-		msg_queue = &per_cpu(ipi_msg_queue, cpu);
-		spin_lock_irqsave(&msg_queue->lock, flags);
-		list_add_tail(&msg->list, &msg_queue->head);
-		spin_unlock_irqrestore(&msg_queue->lock, flags);
-		platform_send_ipi_cpu(cpu);
-	}
 	return;
 }
 
@@ -446,7 +399,8 @@
 {
 	platform_prepare_cpus(max_cpus);
 	ipi_queue_init();
-	platform_request_ipi(&ipi_handler);
+	platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
+	platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 627e04b..dfd304a 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -704,18 +704,18 @@
 {
 	struct sram_list_struct *lsl, **tmp;
 	struct mm_struct *mm = current->mm;
+	int ret = -1;
 
 	for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
-		if ((*tmp)->addr == addr)
-			goto found;
-	return -1;
-found:
-	lsl = *tmp;
-	sram_free(addr);
-	*tmp = lsl->next;
-	kfree(lsl);
+		if ((*tmp)->addr == addr) {
+			lsl = *tmp;
+			ret = sram_free(addr);
+			*tmp = lsl->next;
+			kfree(lsl);
+			break;
+		}
 
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(sram_free_with_lsl);
 
diff --git a/arch/cris/arch-v32/lib/nand_init.S b/arch/cris/arch-v32/lib/nand_init.S
index e705f5c..d671fed 100644
--- a/arch/cris/arch-v32/lib/nand_init.S
+++ b/arch/cris/arch-v32/lib/nand_init.S
@@ -139,7 +139,7 @@
 	lsrq	8, $r4
 	move.b	$r4, [$r1]	; Row address
 	lsrq	8, $r4
-	move.b	$r4, [$r1]	; Row adddress
+	move.b	$r4, [$r1]	; Row address
 	moveq	20, $r4
 2:	bne	2b
 	subq	1, $r4
diff --git a/arch/cris/include/asm/etraxgpio.h b/arch/cris/include/asm/etraxgpio.h
index d474818..461c089 100644
--- a/arch/cris/include/asm/etraxgpio.h
+++ b/arch/cris/include/asm/etraxgpio.h
@@ -1,5 +1,5 @@
 /*
- * The following devices are accessable using this driver using
+ * The following devices are accessible using this driver using
  * GPIO_MAJOR (120) and a couple of minor numbers.
  *
  * For ETRAX 100LX (CONFIG_ETRAX_ARCH_V10):
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index b509643..4e73092 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -86,7 +86,7 @@
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
index ee671c3..e8d1b23 100644
--- a/arch/h8300/Kconfig.debug
+++ b/arch/h8300/Kconfig.debug
@@ -48,7 +48,7 @@
 	  builtin kernel commandline enabled.
 
 config KERNEL_COMMAND
-	string "Buildin commmand string"
+	string "Buildin command string"
 	depends on DEFAULT_CMDLINE
 	help
 	  builtin kernel commandline strings.
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index cc8335e..e5a6c35 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -426,6 +426,11 @@
 extern void iounmap (volatile void __iomem *addr);
 extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
 extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
+static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
+{
+	return ioremap(phys_addr, size);
+}
+
 
 /*
  * String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2f229e5..2689ee5 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -590,6 +590,10 @@
 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 void kvm_sal_emul(struct kvm_vcpu *vcpu);
 
+#define __KVM_HAVE_ARCH_VM_ALLOC 1
+struct kvm *kvm_arch_alloc_vm(void);
+void kvm_arch_free_vm(struct kvm *kvm);
+
 #endif /* __ASSEMBLY__*/
 
 #endif
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index 41b6d31..961a16f 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -189,6 +189,7 @@
 # define pgprot_val(x)	((x).pgprot)
 
 # define __pte(x)	((pte_t) { (x) } )
+# define __pmd(x)	((pmd_t) { (x) } )
 # define __pgprot(x)	((pgprot_t) { (x) } )
 
 #else /* !STRICT_MM_TYPECHECKS */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 348e44d..03afe79 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -717,8 +717,9 @@
 #define spin_lock_prefetch(x)	prefetchw(x)
 
 extern unsigned long boot_option_idle_override;
-extern unsigned long idle_halt;
-extern unsigned long idle_nomwait;
+
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
+			 IDLE_NOMWAIT, IDLE_POLL};
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c6c90f3..90ebceb 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -477,6 +477,12 @@
 	if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
 		return;
 
+	if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) {
+		printk_once(KERN_WARNING
+			    "node_cpuid[%ld] is too small, may not be able to use all cpus\n",
+			    ARRAY_SIZE(node_cpuid));
+		return;
+	}
 	pxm = get_processor_proximity_domain(pa);
 
 	/* record this node in proximity bitmap */
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 9a26015..38c07b8 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -633,7 +633,7 @@
 	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
 	desc = irq_desc + irq;
 	desc->status |= IRQ_PER_CPU;
-	desc->chip = &irq_type_ia64_lsapic;
+	set_irq_chip(irq, &irq_type_ia64_lsapic);
 	if (action)
 		setup_irq(irq, action);
 	set_irq_handler(irq, handle_percpu_irq);
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 39e534f..89accc6 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -617,11 +617,14 @@
 	return get_unmapped_area(file, addr, len, pgoff, flags);
 }
 
+/* forward declaration */
+static const struct dentry_operations pfmfs_dentry_operations;
 
 static struct dentry *
 pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
+	return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
+			PFMFS_MAGIC);
 }
 
 static struct file_system_type pfm_fs_type = {
@@ -829,10 +832,9 @@
 	unsigned long addr;
 
 	size = PAGE_ALIGN(size);
-	mem  = vmalloc(size);
+	mem  = vzalloc(size);
 	if (mem) {
 		//printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
-		memset(mem, 0, size);
 		addr = (unsigned long)mem;
 		while (size > 0) {
 			pfm_reserve_page(addr);
@@ -1542,7 +1544,7 @@
  * any operations on the root directory. However, we need a non-trivial
  * d_name - pfm: will go nicely and kill the special-casing in procfs.
  */
-static struct vfsmount *pfmfs_mnt;
+static struct vfsmount *pfmfs_mnt __read_mostly;
 
 static int __init
 init_pfm_fs(void)
@@ -2185,7 +2187,7 @@
 };
 
 static int
-pfmfs_delete_dentry(struct dentry *dentry)
+pfmfs_delete_dentry(const struct dentry *dentry)
 {
 	return 1;
 }
@@ -2233,7 +2235,6 @@
 	}
 	path.mnt = mntget(pfmfs_mnt);
 
-	path.dentry->d_op = &pfmfs_dentry_operations;
 	d_add(path.dentry, inode);
 
 	file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 16f1c7b..6d33c5c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -53,12 +53,8 @@
 
 void (*ia64_mark_idle)(int);
 
-unsigned long boot_option_idle_override = 0;
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
 EXPORT_SYMBOL(boot_option_idle_override);
-unsigned long idle_halt;
-EXPORT_SYMBOL(idle_halt);
-unsigned long idle_nomwait;
-EXPORT_SYMBOL(idle_nomwait);
 void (*pm_idle) (void);
 EXPORT_SYMBOL(pm_idle);
 void (*pm_power_off) (void);
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index dabeefe..be450a3 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -293,6 +293,7 @@
 void
 smp_flush_tlb_mm (struct mm_struct *mm)
 {
+	cpumask_var_t cpus;
 	preempt_disable();
 	/* this happens for the common case of a single-threaded fork():  */
 	if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
@@ -301,9 +302,15 @@
 		preempt_enable();
 		return;
 	}
-
-	smp_call_function_many(mm_cpumask(mm),
-		(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
+		smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
+			mm, 1);
+	} else {
+		cpumask_copy(cpus, mm_cpumask(mm));
+		smp_call_function_many(cpus,
+			(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+		free_cpumask_var(cpus);
+	}
 	local_irq_disable();
 	local_finish_flush_tlb_mm(mm);
 	local_irq_enable();
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ed6f22e..9702fa9 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -168,7 +168,7 @@
 {
 	unsigned long new_itm;
 
-	if (unlikely(cpu_is_offline(smp_processor_id()))) {
+	if (cpu_is_offline(smp_processor_id())) {
 		return IRQ_HANDLED;
 	}
 
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f56a631..70d224d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -749,7 +749,7 @@
 	return r;
 }
 
-static struct kvm *kvm_alloc_kvm(void)
+struct kvm *kvm_arch_alloc_vm(void)
 {
 
 	struct kvm *kvm;
@@ -760,7 +760,7 @@
 	vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
 
 	if (!vm_base)
-		return ERR_PTR(-ENOMEM);
+		return NULL;
 
 	memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
 	kvm = (struct kvm *)(vm_base +
@@ -806,10 +806,12 @@
 #define GUEST_PHYSICAL_RR4	0x2739
 #define VMM_INIT_RR		0x1660
 
-static void kvm_init_vm(struct kvm *kvm)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
 	BUG_ON(!kvm);
 
+	kvm->arch.is_sn2 = ia64_platform_is("sn2");
+
 	kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
 	kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
 	kvm->arch.vmm_init_rr = VMM_INIT_RR;
@@ -823,21 +825,8 @@
 
 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
-}
 
-struct  kvm *kvm_arch_create_vm(void)
-{
-	struct kvm *kvm = kvm_alloc_kvm();
-
-	if (IS_ERR(kvm))
-		return ERR_PTR(-ENOMEM);
-
-	kvm->arch.is_sn2 = ia64_platform_is("sn2");
-
-	kvm_init_vm(kvm);
-
-	return kvm;
-
+	return 0;
 }
 
 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
@@ -962,7 +951,9 @@
 			goto out;
 		r = kvm_setup_default_irq_routing(kvm);
 		if (r) {
+			mutex_lock(&kvm->slots_lock);
 			kvm_ioapic_destroy(kvm);
+			mutex_unlock(&kvm->slots_lock);
 			goto out;
 		}
 		break;
@@ -1357,7 +1348,7 @@
 	return -EINVAL;
 }
 
-static void free_kvm(struct kvm *kvm)
+void kvm_arch_free_vm(struct kvm *kvm)
 {
 	unsigned long vm_base = kvm->arch.vm_base;
 
@@ -1399,9 +1390,6 @@
 #endif
 	kfree(kvm->arch.vioapic);
 	kvm_release_vm_pages(kvm);
-	kvm_free_physmem(kvm);
-	cleanup_srcu_struct(&kvm->srcu);
-	free_kvm(kvm);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index fb8f9f5..f1e17d3 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -130,7 +130,7 @@
 
 	local_irq_save(psr);
 
-	/*Intercept the acces for PIB range*/
+	/*Intercept the access for PIB range*/
 	if (iot == GPFN_PIB) {
 		if (!dir)
 			lsapic_write(vcpu, src_pa, s, *dest);
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 1841ee7..5ca674b 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -38,7 +38,7 @@
 	if (pud) {
 		pmd = pmd_alloc(mm, pud, taddr);
 		if (pmd)
-			pte = pte_alloc_map(mm, pmd, taddr);
+			pte = pte_alloc_map(mm, NULL, pmd, taddr);
 	}
 	return pte;
 }
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
index 73613b5..26e85e2 100644
--- a/arch/m68k/ifpsp060/src/fpsp.S
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -3881,7 +3881,7 @@
 # FP Unimplemented Instruction stack frame and jump to that entry
 # point.
 #
-# but, if the FPU is disabled, then we need to jump to the FPU diabled
+# but, if the FPU is disabled, then we need to jump to the FPU disabled
 # entry point.
 	movc		%pcr,%d0
 	btst		&0x1,%d0
diff --git a/arch/m68k/include/asm/m548xgpt.h b/arch/m68k/include/asm/m548xgpt.h
index c8ef158..33b2eef 100644
--- a/arch/m68k/include/asm/m548xgpt.h
+++ b/arch/m68k/include/asm/m548xgpt.h
@@ -59,11 +59,13 @@
 #define MCF_GPT_GMS_GPIO_INPUT     (0x00000000)
 #define MCF_GPT_GMS_GPIO_OUTLO     (0x00000020)
 #define MCF_GPT_GMS_GPIO_OUTHI     (0x00000030)
+#define MCF_GPT_GMS_GPIO_MASK      (0x00000030)
 #define MCF_GPT_GMS_TMS_DISABLE    (0x00000000)
 #define MCF_GPT_GMS_TMS_INCAPT     (0x00000001)
 #define MCF_GPT_GMS_TMS_OUTCAPT    (0x00000002)
 #define MCF_GPT_GMS_TMS_PWM        (0x00000003)
 #define MCF_GPT_GMS_TMS_GPIO       (0x00000004)
+#define MCF_GPT_GMS_TMS_MASK       (0x00000007)
 
 /* Bit definitions and macros for MCF_GPT_GCIR */
 #define MCF_GPT_GCIR_CNT(x)        (((x)&0x0000FFFF)<<0)
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index cf5fad9..f55aa04 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -217,9 +217,8 @@
 /* Find an entry in the third-level pagetable. */
 #define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
 #define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
-/* FIXME: should we bother with kmap() here? */
-#define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + pte_index(address))
-#define pte_unmap(pte) kunmap(pte)
+#define pte_offset_map(pmd, address) ((pte_t *)page_address(pmd_page(*pmd)) + pte_index(address))
+#define pte_unmap(pte) do { } while (0)
 
 /* Macros to (de)construct the fake PTEs representing swap pages. */
 #define __swp_type(x)		((x).val & 0x7F)
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 1da5d53..7909889 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -104,5 +104,6 @@
 #define TIF_SYSCALL_TRACE	15	/* syscall trace active */
 #define TIF_MEMDIE		16	/* is terminating due to OOM killer */
 #define TIF_FREEZE		17	/* thread is freezing for suspend */
+#define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal */
 
 #endif	/* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index b43b36b..26d851d 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -373,6 +373,7 @@
 #define __ARCH_WANT_SYS_SIGPENDING
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
 
 /*
  * "Conditional" syscalls
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 6360c43..1559dea 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -99,7 +99,10 @@
 	jra	.Lret_from_exception
 
 ENTRY(ret_from_signal)
-	RESTORE_SWITCH_STACK
+	tstb	%curptr@(TASK_INFO+TINFO_FLAGS+2)
+	jge	1f
+	jbsr	syscall_trace
+1:	RESTORE_SWITCH_STACK
 	addql	#4,%sp
 /* on 68040 complete pending writebacks if any */
 #ifdef CONFIG_M68040
@@ -174,16 +177,11 @@
 	subql	#4,%sp			| dummy return address
 	SAVE_SWITCH_STACK
 	pea	%sp@(SWITCH_STACK_SIZE)
-	clrl	%sp@-
 	bsrl	do_signal
-	addql	#8,%sp
+	addql	#4,%sp
 	RESTORE_SWITCH_STACK
 	addql	#4,%sp
-	tstl	%d0
-	jeq	resume_userspace
-	| when single stepping into handler stop at the first insn
-	btst	#6,%curptr@(TASK_INFO+TINFO_FLAGS+2)
-	jeq	resume_userspace
+	jbra	resume_userspace
 
 do_delayed_trace:
 	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
@@ -290,22 +288,6 @@
 	RESTORE_SWITCH_STACK
 	rts
 
-ENTRY(sys_sigsuspend)
-	SAVE_SWITCH_STACK
-	pea	%sp@(SWITCH_STACK_SIZE)
-	jbsr	do_sigsuspend
-	addql	#4,%sp
-	RESTORE_SWITCH_STACK
-	rts
-
-ENTRY(sys_rt_sigsuspend)
-	SAVE_SWITCH_STACK
-	pea	%sp@(SWITCH_STACK_SIZE)
-	jbsr	do_rt_sigsuspend
-	addql	#4,%sp
-	RESTORE_SWITCH_STACK
-	rts
-
 ENTRY(sys_sigreturn)
 	SAVE_SWITCH_STACK
 	jbsr	do_sigreturn
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 4b38753..d12c3b0 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -51,8 +51,6 @@
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
-
 const int frame_extra_sizes[16] = {
   [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
   [2]	= sizeof(((struct frame *)0)->un.fmt2),
@@ -74,51 +72,21 @@
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
-asmlinkage int do_sigsuspend(struct pt_regs *regs)
+asmlinkage int
+sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
 {
-	old_sigset_t mask = regs->d3;
-	sigset_t saveset;
-
 	mask &= _BLOCKABLE;
-	saveset = current->blocked;
+	spin_lock_irq(&current->sighand->siglock);
+	current->saved_sigmask = current->blocked;
 	siginitset(&current->blocked, mask);
 	recalc_sigpending();
+	spin_unlock_irq(&current->sighand->siglock);
 
-	regs->d0 = -EINTR;
-	while (1) {
-		current->state = TASK_INTERRUPTIBLE;
-		schedule();
-		if (do_signal(&saveset, regs))
-			return -EINTR;
-	}
-}
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	set_restore_sigmask();
 
-asmlinkage int
-do_rt_sigsuspend(struct pt_regs *regs)
-{
-	sigset_t __user *unewset = (sigset_t __user *)regs->d1;
-	size_t sigsetsize = (size_t)regs->d2;
-	sigset_t saveset, newset;
-
-	/* XXX: Don't preclude handling different sized sigset_t's.  */
-	if (sigsetsize != sizeof(sigset_t))
-		return -EINVAL;
-
-	if (copy_from_user(&newset, unewset, sizeof(newset)))
-		return -EFAULT;
-	sigdelsetmask(&newset, ~_BLOCKABLE);
-
-	saveset = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-
-	regs->d0 = -EINTR;
-	while (1) {
-		current->state = TASK_INTERRUPTIBLE;
-		schedule();
-		if (do_signal(&saveset, regs))
-			return -EINTR;
-	}
+	return -ERESTARTNOHAND;
 }
 
 asmlinkage int
@@ -132,10 +100,10 @@
 		old_sigset_t mask;
 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+		    __get_user(mask, &act->sa_mask))
 			return -EFAULT;
-		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
-		__get_user(mask, &act->sa_mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
 	}
 
@@ -144,10 +112,10 @@
 	if (!ret && oact) {
 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
 			return -EFAULT;
-		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
 	}
 
 	return ret;
@@ -318,36 +286,10 @@
 	return err;
 }
 
-static inline int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
-		   int *pd0)
+static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
+			       void __user *fp)
 {
-	int fsize, formatvec;
-	struct sigcontext context;
-	int err;
-
-	/* Always make any pending restarted system calls return -EINTR */
-	current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-	/* get previous context */
-	if (copy_from_user(&context, usc, sizeof(context)))
-		goto badframe;
-
-	/* restore passed registers */
-	regs->d1 = context.sc_d1;
-	regs->a0 = context.sc_a0;
-	regs->a1 = context.sc_a1;
-	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
-	regs->pc = context.sc_pc;
-	regs->orig_d0 = -1;		/* disable syscall checks */
-	wrusp(context.sc_usp);
-	formatvec = context.sc_formatvec;
-	regs->format = formatvec >> 12;
-	regs->vector = formatvec & 0xfff;
-
-	err = restore_fpu_state(&context);
-
-	fsize = frame_extra_sizes[regs->format];
+	int fsize = frame_extra_sizes[formatvec >> 12];
 	if (fsize < 0) {
 		/*
 		 * user process trying to return with weird frame format
@@ -355,16 +297,22 @@
 #ifdef DEBUG
 		printk("user process returning with weird frame format\n");
 #endif
-		goto badframe;
+		return 1;
 	}
-
-	/* OK.	Make room on the supervisor stack for the extra junk,
-	 * if necessary.
-	 */
-
-	if (fsize) {
+	if (!fsize) {
+		regs->format = formatvec >> 12;
+		regs->vector = formatvec & 0xfff;
+	} else {
 		struct switch_stack *sw = (struct switch_stack *)regs - 1;
-		regs->d0 = context.sc_d0;
+		unsigned long buf[fsize / 2]; /* yes, twice as much */
+
+		/* that'll make sure that expansion won't crap over data */
+		if (copy_from_user(buf + fsize / 4, fp, fsize))
+			return 1;
+
+		/* point of no return */
+		regs->format = formatvec >> 12;
+		regs->vector = formatvec & 0xfff;
 #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
 		__asm__ __volatile__
 			("   movel %0,%/a0\n\t"
@@ -376,30 +324,50 @@
 			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
 			 "   lsrl  #2,%1\n\t"
 			 "   subql #1,%1\n\t"
-			 "2: movesl %4@+,%2\n\t"
-			 "3: movel %2,%/a0@+\n\t"
+			 /* copy to the gap we'd made */
+			 "2: movel %4@+,%/a0@+\n\t"
 			 "   dbra %1,2b\n\t"
 			 "   bral ret_from_signal\n"
-			 "4:\n"
-			 ".section __ex_table,\"a\"\n"
-			 "   .align 4\n"
-			 "   .long 2b,4b\n"
-			 "   .long 3b,4b\n"
-			 ".previous"
 			 : /* no outputs, it doesn't ever return */
 			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
-			   "n" (frame_offset), "a" (fp)
+			   "n" (frame_offset), "a" (buf + fsize/4)
 			 : "a0");
 #undef frame_offset
-		/*
-		 * If we ever get here an exception occurred while
-		 * building the above stack-frame.
-		 */
-		goto badframe;
 	}
+	return 0;
+}
 
-	*pd0 = context.sc_d0;
-	return err;
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
+{
+	int formatvec;
+	struct sigcontext context;
+	int err;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+	/* get previous context */
+	if (copy_from_user(&context, usc, sizeof(context)))
+		goto badframe;
+
+	/* restore passed registers */
+	regs->d0 = context.sc_d0;
+	regs->d1 = context.sc_d1;
+	regs->a0 = context.sc_a0;
+	regs->a1 = context.sc_a1;
+	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
+	regs->pc = context.sc_pc;
+	regs->orig_d0 = -1;		/* disable syscall checks */
+	wrusp(context.sc_usp);
+	formatvec = context.sc_formatvec;
+
+	err = restore_fpu_state(&context);
+
+	if (err || mangle_kernel_stack(regs, formatvec, fp))
+		goto badframe;
+
+	return 0;
 
 badframe:
 	return 1;
@@ -407,9 +375,9 @@
 
 static inline int
 rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
-		    struct ucontext __user *uc, int *pd0)
+		    struct ucontext __user *uc)
 {
-	int fsize, temp;
+	int temp;
 	greg_t __user *gregs = uc->uc_mcontext.gregs;
 	unsigned long usp;
 	int err;
@@ -443,65 +411,16 @@
 	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
 	regs->orig_d0 = -1;		/* disable syscall checks */
 	err |= __get_user(temp, &uc->uc_formatvec);
-	regs->format = temp >> 12;
-	regs->vector = temp & 0xfff;
 
 	err |= rt_restore_fpu_state(uc);
 
-	if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
+	if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
 		goto badframe;
 
-	fsize = frame_extra_sizes[regs->format];
-	if (fsize < 0) {
-		/*
-		 * user process trying to return with weird frame format
-		 */
-#ifdef DEBUG
-		printk("user process returning with weird frame format\n");
-#endif
+	if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
 		goto badframe;
-	}
 
-	/* OK.	Make room on the supervisor stack for the extra junk,
-	 * if necessary.
-	 */
-
-	if (fsize) {
-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
-		__asm__ __volatile__
-			("   movel %0,%/a0\n\t"
-			 "   subl %1,%/a0\n\t"     /* make room on stack */
-			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
-			 /* move switch_stack and pt_regs */
-			 "1: movel %0@+,%/a0@+\n\t"
-			 "   dbra %2,1b\n\t"
-			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
-			 "   lsrl  #2,%1\n\t"
-			 "   subql #1,%1\n\t"
-			 "2: movesl %4@+,%2\n\t"
-			 "3: movel %2,%/a0@+\n\t"
-			 "   dbra %1,2b\n\t"
-			 "   bral ret_from_signal\n"
-			 "4:\n"
-			 ".section __ex_table,\"a\"\n"
-			 "   .align 4\n"
-			 "   .long 2b,4b\n"
-			 "   .long 3b,4b\n"
-			 ".previous"
-			 : /* no outputs, it doesn't ever return */
-			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
-			   "n" (frame_offset), "a" (&uc->uc_extra)
-			 : "a0");
-#undef frame_offset
-		/*
-		 * If we ever get here an exception occurred while
-		 * building the above stack-frame.
-		 */
-		goto badframe;
-	}
-
-	*pd0 = regs->d0;
-	return err;
+	return 0;
 
 badframe:
 	return 1;
@@ -514,7 +433,6 @@
 	unsigned long usp = rdusp();
 	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
 	sigset_t set;
-	int d0;
 
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
@@ -528,9 +446,9 @@
 	current->blocked = set;
 	recalc_sigpending();
 
-	if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
+	if (restore_sigcontext(regs, &frame->sc, frame + 1))
 		goto badframe;
-	return d0;
+	return regs->d0;
 
 badframe:
 	force_sig(SIGSEGV, current);
@@ -544,7 +462,6 @@
 	unsigned long usp = rdusp();
 	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
 	sigset_t set;
-	int d0;
 
 	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
@@ -555,9 +472,9 @@
 	current->blocked = set;
 	recalc_sigpending();
 
-	if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
+	if (rt_restore_ucontext(regs, sw, &frame->uc))
 		goto badframe;
-	return d0;
+	return regs->d0;
 
 badframe:
 	force_sig(SIGSEGV, current);
@@ -775,7 +692,7 @@
 	return (void __user *)((usp - frame_size) & -8UL);
 }
 
-static void setup_frame (int sig, struct k_sigaction *ka,
+static int setup_frame (int sig, struct k_sigaction *ka,
 			 sigset_t *set, struct pt_regs *regs)
 {
 	struct sigframe __user *frame;
@@ -793,10 +710,8 @@
 
 	frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
 
-	if (fsize) {
+	if (fsize)
 		err |= copy_to_user (frame + 1, regs + 1, fsize);
-		regs->stkadj = fsize;
-	}
 
 	err |= __put_user((current_thread_info()->exec_domain
 			   && current_thread_info()->exec_domain->signal_invmap
@@ -826,11 +741,21 @@
 
 	push_cache ((unsigned long) &frame->retcode);
 
-	/* Set up registers for signal handler */
+	/*
+	 * Set up registers for signal handler.  All the state we are about
+	 * to destroy is successfully copied to sigframe.
+	 */
 	wrusp ((unsigned long) frame);
 	regs->pc = (unsigned long) ka->sa.sa_handler;
 
-adjust_stack:
+	/*
+	 * This is subtle; if we build more than one sigframe, all but the
+	 * first one will see frame format 0 and have fsize == 0, so we won't
+	 * screw stkadj.
+	 */
+	if (fsize)
+		regs->stkadj = fsize;
+
 	/* Prepare to skip over the extra stuff in the exception frame.  */
 	if (regs->stkadj) {
 		struct pt_regs *tregs =
@@ -845,14 +770,14 @@
 		tregs->pc = regs->pc;
 		tregs->sr = regs->sr;
 	}
-	return;
+	return 0;
 
 give_sigsegv:
 	force_sigsegv(sig, current);
-	goto adjust_stack;
+	return err;
 }
 
-static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
 			    sigset_t *set, struct pt_regs *regs)
 {
 	struct rt_sigframe __user *frame;
@@ -869,10 +794,8 @@
 
 	frame = get_sigframe(ka, regs, sizeof(*frame));
 
-	if (fsize) {
+	if (fsize)
 		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
-		regs->stkadj = fsize;
-	}
 
 	err |= __put_user((current_thread_info()->exec_domain
 			   && current_thread_info()->exec_domain->signal_invmap
@@ -914,11 +837,21 @@
 
 	push_cache ((unsigned long) &frame->retcode);
 
-	/* Set up registers for signal handler */
+	/*
+	 * Set up registers for signal handler.  All the state we are about
+	 * to destroy is successfully copied to sigframe.
+	 */
 	wrusp ((unsigned long) frame);
 	regs->pc = (unsigned long) ka->sa.sa_handler;
 
-adjust_stack:
+	/*
+	 * This is subtle; if we build more than one sigframe, all but the
+	 * first one will see frame format 0 and have fsize == 0, so we won't
+	 * screw stkadj.
+	 */
+	if (fsize)
+		regs->stkadj = fsize;
+
 	/* Prepare to skip over the extra stuff in the exception frame.  */
 	if (regs->stkadj) {
 		struct pt_regs *tregs =
@@ -933,11 +866,11 @@
 		tregs->pc = regs->pc;
 		tregs->sr = regs->sr;
 	}
-	return;
+	return 0;
 
 give_sigsegv:
 	force_sigsegv(sig, current);
-	goto adjust_stack;
+	return err;
 }
 
 static inline void
@@ -995,6 +928,7 @@
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
 	      sigset_t *oldset, struct pt_regs *regs)
 {
+	int err;
 	/* are we from a system call? */
 	if (regs->orig_d0 >= 0)
 		/* If so, check system call restarting.. */
@@ -1002,17 +936,24 @@
 
 	/* set up the stack frame */
 	if (ka->sa.sa_flags & SA_SIGINFO)
-		setup_rt_frame(sig, ka, info, oldset, regs);
+		err = setup_rt_frame(sig, ka, info, oldset, regs);
 	else
-		setup_frame(sig, ka, oldset, regs);
+		err = setup_frame(sig, ka, oldset, regs);
 
-	if (ka->sa.sa_flags & SA_ONESHOT)
-		ka->sa.sa_handler = SIG_DFL;
+	if (err)
+		return;
 
 	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
 	if (!(ka->sa.sa_flags & SA_NODEFER))
 		sigaddset(&current->blocked,sig);
 	recalc_sigpending();
+
+	if (test_thread_flag(TIF_DELAYED_TRACE)) {
+		regs->sr &= ~0x8000;
+		send_sig(SIGTRAP, current, 1);
+	}
+
+	clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 
 /*
@@ -1020,22 +961,25 @@
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+asmlinkage void do_signal(struct pt_regs *regs)
 {
 	siginfo_t info;
 	struct k_sigaction ka;
 	int signr;
+	sigset_t *oldset;
 
 	current->thread.esp0 = (unsigned long) regs;
 
-	if (!oldset)
+	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+		oldset = &current->saved_sigmask;
+	else
 		oldset = &current->blocked;
 
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
 		handle_signal(signr, &ka, &info, oldset, regs);
-		return 1;
+		return;
 	}
 
 	/* Did we come from a system call? */
@@ -1043,5 +987,9 @@
 		/* Restart the system call - no handlers present */
 		handle_restart(regs, NULL, 0);
 
-	return 0;
+	/* If there's no signal to deliver, we just restore the saved mask.  */
+	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+		clear_thread_flag(TIF_RESTORE_SIGMASK);
+		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+	}
 }
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index ba6ccab..a4c3eb6 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -88,7 +88,7 @@
 
 	/*
 	 * The PSC is always at the same spot, but using psc
-	 * keeps things consisant with the psc_xxxx functions.
+	 * keeps things consistent with the psc_xxxx functions.
 	 */
 
 	psc = (void *) PSC_BASE;
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S
index aff6f57..2783f25 100644
--- a/arch/m68knommu/kernel/entry.S
+++ b/arch/m68knommu/kernel/entry.S
@@ -112,22 +112,6 @@
 	RESTORE_SWITCH_STACK
 	rts
 
-ENTRY(sys_sigsuspend)
-	SAVE_SWITCH_STACK
-	pea	%sp@(SWITCH_STACK_SIZE)
-	jbsr	do_sigsuspend
-	addql	#4,%sp
-	RESTORE_SWITCH_STACK
-	rts
-
-ENTRY(sys_rt_sigsuspend)
-	SAVE_SWITCH_STACK
-	pea	%sp@(SWITCH_STACK_SIZE)
-	jbsr	do_rt_sigsuspend
-	addql	#4,%sp
-	RESTORE_SWITCH_STACK
-	rts
-
 ENTRY(sys_sigreturn)
 	SAVE_SWITCH_STACK
 	jbsr	do_sigreturn
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index 5ab6a04..36a81bb 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -53,65 +53,30 @@
 
 void ret_from_user_signal(void);
 void ret_from_user_rt_signal(void);
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
 
 /*
  * Atomically swap in the new signal mask, and wait for a signal.
  */
-asmlinkage int do_sigsuspend(struct pt_regs *regs)
+asmlinkage int
+sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
 {
-	old_sigset_t mask = regs->d3;
-	sigset_t saveset;
-
 	mask &= _BLOCKABLE;
 	spin_lock_irq(&current->sighand->siglock);
-	saveset = current->blocked;
+	current->saved_sigmask = current->blocked;
 	siginitset(&current->blocked, mask);
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 
-	regs->d0 = -EINTR;
-	while (1) {
-		current->state = TASK_INTERRUPTIBLE;
-		schedule();
-		if (do_signal(&saveset, regs))
-			return -EINTR;
-	}
+	current->state = TASK_INTERRUPTIBLE;
+	schedule();
+	set_restore_sigmask();
+
+	return -ERESTARTNOHAND;
 }
 
 asmlinkage int
-do_rt_sigsuspend(struct pt_regs *regs)
-{
-	sigset_t *unewset = (sigset_t *)regs->d1;
-	size_t sigsetsize = (size_t)regs->d2;
-	sigset_t saveset, newset;
-
-	/* XXX: Don't preclude handling different sized sigset_t's.  */
-	if (sigsetsize != sizeof(sigset_t))
-		return -EINVAL;
-
-	if (copy_from_user(&newset, unewset, sizeof(newset)))
-		return -EFAULT;
-	sigdelsetmask(&newset, ~_BLOCKABLE);
-
-	spin_lock_irq(&current->sighand->siglock);
-	saveset = current->blocked;
-	current->blocked = newset;
-	recalc_sigpending();
-	spin_unlock_irq(&current->sighand->siglock);
-
-	regs->d0 = -EINTR;
-	while (1) {
-		current->state = TASK_INTERRUPTIBLE;
-		schedule();
-		if (do_signal(&saveset, regs))
-			return -EINTR;
-	}
-}
-
-asmlinkage int 
-sys_sigaction(int sig, const struct old_sigaction *act,
-	      struct old_sigaction *oact)
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+	      struct old_sigaction __user *oact)
 {
 	struct k_sigaction new_ka, old_ka;
 	int ret;
@@ -120,10 +85,10 @@
 		old_sigset_t mask;
 		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
 		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+		    __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+		    __get_user(mask, &act->sa_mask))
 			return -EFAULT;
-		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
-		__get_user(mask, &act->sa_mask);
 		siginitset(&new_ka.sa.sa_mask, mask);
 	}
 
@@ -132,17 +97,17 @@
 	if (!ret && oact) {
 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
 		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
 			return -EFAULT;
-		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
 	}
 
 	return ret;
 }
 
 asmlinkage int
-sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
 {
 	return do_sigaltstack(uss, uoss, rdusp());
 }
@@ -157,10 +122,10 @@
 
 struct sigframe
 {
-	char *pretcode;
+	char __user *pretcode;
 	int sig;
 	int code;
-	struct sigcontext *psc;
+	struct sigcontext __user *psc;
 	char retcode[8];
 	unsigned long extramask[_NSIG_WORDS-1];
 	struct sigcontext sc;
@@ -168,10 +133,10 @@
 
 struct rt_sigframe
 {
-	char *pretcode;
+	char __user *pretcode;
 	int sig;
-	struct siginfo *pinfo;
-	void *puc;
+	struct siginfo __user *pinfo;
+	void __user *puc;
 	char retcode[8];
 	struct siginfo info;
 	struct ucontext uc;
@@ -198,8 +163,8 @@
 		goto out;
 
 	    __asm__ volatile (".chip 68k/68881\n\t"
-			      "fmovemx %0,%/fp0-%/fp1\n\t"
-			      "fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t"
+			      "fmovemx %0,%%fp0-%%fp1\n\t"
+			      "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 			      ".chip 68k"
 			      : /* no outputs */
 			      : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
@@ -218,7 +183,7 @@
 #define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
 #define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
 
-static inline int rt_restore_fpu_state(struct ucontext *uc)
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 {
 	unsigned char fpstate[FPCONTEXT_SIZE];
 	int context_size = 0;
@@ -228,7 +193,7 @@
 	if (FPU_IS_EMU) {
 		/* restore fpu control register */
 		if (__copy_from_user(current->thread.fpcntl,
-				&uc->uc_mcontext.fpregs.f_pcr, 12))
+				uc->uc_mcontext.fpregs.f_fpcntl, 12))
 			goto out;
 		/* restore all other fpu register */
 		if (__copy_from_user(current->thread.fp,
@@ -237,7 +202,7 @@
 		return 0;
 	}
 
-	if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate))
+	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
 		goto out;
 	if (fpstate[0]) {
 		context_size = fpstate[1];
@@ -249,15 +214,15 @@
 		     sizeof(fpregs)))
 			goto out;
 		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %0,%/fp0-%/fp7\n\t"
-				  "fmoveml %1,%/fpcr/%/fpsr/%/fpiar\n\t"
+				  "fmovemx %0,%%fp0-%%fp7\n\t"
+				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 				  ".chip 68k"
 				  : /* no outputs */
 				  : "m" (*fpregs.f_fpregs),
-				    "m" (fpregs.f_pcr));
+				    "m" (*fpregs.f_fpcntl));
 	}
 	if (context_size &&
-	    __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1,
+	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
 			     context_size))
 		goto out;
 	__asm__ volatile (".chip 68k/68881\n\t"
@@ -272,7 +237,7 @@
 #endif
 
 static inline int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp,
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
 		   int *pd0)
 {
 	int formatvec;
@@ -312,10 +277,10 @@
 
 static inline int
 rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
-		    struct ucontext *uc, int *pd0)
+		    struct ucontext __user *uc, int *pd0)
 {
 	int temp;
-	greg_t *gregs = uc->uc_mcontext.gregs;
+	greg_t __user *gregs = uc->uc_mcontext.gregs;
 	unsigned long usp;
 	int err;
 
@@ -365,7 +330,7 @@
 	struct switch_stack *sw = (struct switch_stack *) &__unused;
 	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
 	unsigned long usp = rdusp();
-	struct sigframe *frame = (struct sigframe *)(usp - 4);
+	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
 	sigset_t set;
 	int d0;
 
@@ -397,7 +362,7 @@
 	struct switch_stack *sw = (struct switch_stack *) &__unused;
 	struct pt_regs *regs = (struct pt_regs *) (sw + 1);
 	unsigned long usp = rdusp();
-	struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
 	sigset_t set;
 	int d0;
 
@@ -443,17 +408,17 @@
 	if (sc->sc_fpstate[0]) {
 		fpu_version = sc->sc_fpstate[0];
 		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %/fp0-%/fp1,%0\n\t"
-				  "fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t"
+				  "fmovemx %%fp0-%%fp1,%0\n\t"
+				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 				  ".chip 68k"
-				  : /* no outputs */
-				  : "m" (*sc->sc_fpregs),
-				    "m" (*sc->sc_fpcntl)
+				  : "=m" (*sc->sc_fpregs),
+				    "=m" (*sc->sc_fpcntl)
+				  : /* no inputs */
 				  : "memory");
 	}
 }
 
-static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
+static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 {
 	unsigned char fpstate[FPCONTEXT_SIZE];
 	int context_size = 0;
@@ -461,7 +426,7 @@
 
 	if (FPU_IS_EMU) {
 		/* save fpu control register */
-		err |= copy_to_user(&uc->uc_mcontext.fpregs.f_pcr,
+		err |= copy_to_user(uc->uc_mcontext.fpregs.f_pcntl,
 				current->thread.fpcntl, 12);
 		/* save all other fpu register */
 		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
@@ -474,24 +439,24 @@
 			  ".chip 68k"
 			  : : "m" (*fpstate) : "memory");
 
-	err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
+	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
 	if (fpstate[0]) {
 		fpregset_t fpregs;
 		context_size = fpstate[1];
 		fpu_version = fpstate[0];
 		__asm__ volatile (".chip 68k/68881\n\t"
-				  "fmovemx %/fp0-%/fp7,%0\n\t"
-				  "fmoveml %/fpcr/%/fpsr/%/fpiar,%1\n\t"
+				  "fmovemx %%fp0-%%fp7,%0\n\t"
+				  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 				  ".chip 68k"
-				  : /* no outputs */
-				  : "m" (*fpregs.f_fpregs),
-				    "m" (fpregs.f_pcr)
+				  : "=m" (*fpregs.f_fpregs),
+				    "=m" (*fpregs.f_fpcntl)
+				  : /* no inputs */
 				  : "memory");
 		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
 				    sizeof(fpregs));
 	}
 	if (context_size)
-		err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
+		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
 				    context_size);
 	return err;
 }
@@ -516,10 +481,10 @@
 #endif
 }
 
-static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
+static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 {
 	struct switch_stack *sw = (struct switch_stack *)regs - 1;
-	greg_t *gregs = uc->uc_mcontext.gregs;
+	greg_t __user *gregs = uc->uc_mcontext.gregs;
 	int err = 0;
 
 	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
@@ -547,7 +512,7 @@
 	return err;
 }
 
-static inline void *
+static inline void __user *
 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
 {
 	unsigned long usp;
@@ -560,13 +525,13 @@
 		if (!sas_ss_flags(usp))
 			usp = current->sas_ss_sp + current->sas_ss_size;
 	}
-	return (void *)((usp - frame_size) & -8UL);
+	return (void __user *)((usp - frame_size) & -8UL);
 }
 
-static void setup_frame (int sig, struct k_sigaction *ka,
+static int setup_frame (int sig, struct k_sigaction *ka,
 			 sigset_t *set, struct pt_regs *regs)
 {
-	struct sigframe *frame;
+	struct sigframe __user *frame;
 	struct sigcontext context;
 	int err = 0;
 
@@ -617,17 +582,17 @@
 		tregs->pc = regs->pc;
 		tregs->sr = regs->sr;
 	}
-	return;
+	return err;
 
 give_sigsegv:
 	force_sigsegv(sig, current);
 	goto adjust_stack;
 }
 
-static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
 			    sigset_t *set, struct pt_regs *regs)
 {
-	struct rt_sigframe *frame;
+	struct rt_sigframe __user *frame;
 	int err = 0;
 
 	frame = get_sigframe(ka, regs, sizeof(*frame));
@@ -644,8 +609,8 @@
 
 	/* Create the ucontext.  */
 	err |= __put_user(0, &frame->uc.uc_flags);
-	err |= __put_user(0, &frame->uc.uc_link);
-	err |= __put_user((void *)current->sas_ss_sp,
+	err |= __put_user(NULL, &frame->uc.uc_link);
+	err |= __put_user((void __user *)current->sas_ss_sp,
 			  &frame->uc.uc_stack.ss_sp);
 	err |= __put_user(sas_ss_flags(rdusp()),
 			  &frame->uc.uc_stack.ss_flags);
@@ -681,7 +646,7 @@
 		tregs->pc = regs->pc;
 		tregs->sr = regs->sr;
 	}
-	return;
+	return err;
 
 give_sigsegv:
 	force_sigsegv(sig, current);
@@ -728,6 +693,7 @@
 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
 	      sigset_t *oldset, struct pt_regs *regs)
 {
+	int err;
 	/* are we from a system call? */
 	if (regs->orig_d0 >= 0)
 		/* If so, check system call restarting.. */
@@ -735,12 +701,12 @@
 
 	/* set up the stack frame */
 	if (ka->sa.sa_flags & SA_SIGINFO)
-		setup_rt_frame(sig, ka, info, oldset, regs);
+		err = setup_rt_frame(sig, ka, info, oldset, regs);
 	else
-		setup_frame(sig, ka, oldset, regs);
+		err = setup_frame(sig, ka, oldset, regs);
 
-	if (ka->sa.sa_flags & SA_ONESHOT)
-		ka->sa.sa_handler = SIG_DFL;
+	if (err)
+		return;
 
 	spin_lock_irq(&current->sighand->siglock);
 	sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -748,6 +714,8 @@
 		sigaddset(&current->blocked,sig);
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
+
+	clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
 
 /*
@@ -755,11 +723,12 @@
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+asmlinkage void do_signal(struct pt_regs *regs)
 {
 	struct k_sigaction ka;
 	siginfo_t info;
 	int signr;
+	sigset_t *oldset;
 
 	/*
 	 * We want the common case to go fast, which
@@ -768,16 +737,18 @@
 	 * if so.
 	 */
 	if (!user_mode(regs))
-		return 1;
+		return;
 
-	if (!oldset)
+	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+		oldset = &current->saved_sigmask;
+	else
 		oldset = &current->blocked;
 
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 		/* Whee!  Actually deliver the signal.  */
 		handle_signal(signr, &ka, &info, oldset, regs);
-		return 1;
+		return;
 	}
 
 	/* Did we come from a system call? */
@@ -785,5 +756,10 @@
 		/* Restart the system call - no handlers present */
 		handle_restart(regs, NULL, 0);
 	}
-	return 0;
+
+	/* If there's no signal to deliver, we just restore the saved mask.  */
+	if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+		clear_thread_flag(TIF_RESTORE_SIGMASK);
+		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+	}
 }
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index 27241e1..240a7a6 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -106,6 +106,7 @@
 	movel	%sp,%d1			/* get thread_info pointer */
 	andl	#-THREAD_SIZE,%d1
 	movel	%d1,%a2
+1:
 	move	%a2@(TI_FLAGS),%d1	/* thread_info->flags */
 	andl	#_TIF_WORK_MASK,%d1
 	jne	Lwork_to_do
@@ -120,13 +121,11 @@
 	subql	#4,%sp			/* dummy return address*/
 	SAVE_SWITCH_STACK
 	pea	%sp@(SWITCH_STACK_SIZE)
-	clrl	%sp@-
 	bsrw	do_signal
-	addql	#8,%sp
+	addql	#4,%sp
 	RESTORE_SWITCH_STACK
 	addql	#4,%sp
-Lreturn:
-	RESTORE_ALL
+	jra	1b
 
 /*
  * This is the main interrupt handler, responsible for calling process_int()
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index c131c6e..8a28788 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -102,6 +102,7 @@
 	movel	%sp,%d1			/* get thread_info pointer */
 	andl	#-THREAD_SIZE,%d1
 	movel	%d1,%a2
+1:
 	move	%a2@(TI_FLAGS),%d1	/* thread_info->flags */
 	andl	#_TIF_WORK_MASK,%d1
 	jne	Lwork_to_do
@@ -116,13 +117,11 @@
 	subql	#4,%sp			/* dummy return address*/
 	SAVE_SWITCH_STACK
 	pea	%sp@(SWITCH_STACK_SIZE)
-	clrl	%sp@-
 	bsrw	do_signal
-	addql	#8,%sp
+	addql	#4,%sp
 	RESTORE_SWITCH_STACK
 	addql	#4,%sp
-Lreturn:
-	RESTORE_ALL
+	jra	1b
 
 /*
  * This is the main interrupt handler, responsible for calling do_IRQ()
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index 5e92bed..e1debc8 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -167,12 +167,11 @@
 	subql	#4,%sp			/* dummy return address */
 	SAVE_SWITCH_STACK
 	pea	%sp@(SWITCH_STACK_SIZE)
-	clrl	%sp@-
 	jsr	do_signal
-	addql	#8,%sp
+	addql	#4,%sp
 	RESTORE_SWITCH_STACK
 	addql	#4,%sp
-	jmp	Lreturn
+	jmp	Luser_return
 
 /*
  * This is the generic interrupt handler (for all hardware interrupt
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 387d5ff..5f5018a 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -14,7 +14,7 @@
 	select HAVE_DMA_API_DEBUG
 	select TRACING_SUPPORT
 	select OF
-	select OF_FLATTREE
+	select OF_EARLY_FLATTREE
 
 config SWAP
 	def_bool n
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index e66e25c..012e377 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -23,8 +23,4 @@
 	  This option turns on/off heart beat kernel functionality.
 	  First GPIO node is taken.
 
-config DEBUG_BOOTMEM
-	depends on DEBUG_KERNEL
-	bool "Debug BOOTMEM initialization"
-
 endmenu
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 15f1f1d..6f432e6 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -17,7 +17,7 @@
 # The various CONFIG_XILINX cpu features options are integers 0/1/2...
 # rather than bools y/n
 
-# Work out HW multipler support.  This is icky.
+# Work out HW multipler support. This is tricky.
 # 1. Spartan2 has no HW multiplers.
 # 2. MicroBlaze v3.x always uses them, except in Spartan 2
 # 3. All other FPGa/CPU ver combos, we can trust the CONFIG_ settings
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index be01d78..4c4e58e 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -10,9 +10,6 @@
 
 OBJCOPYFLAGS := -O binary
 
-# Where the DTS files live
-dtstree         := $(srctree)/$(src)/dts
-
 # Ensure system.dtb exists
 $(obj)/linked_dtb.o: $(obj)/system.dtb
 
@@ -51,14 +48,11 @@
 	$(call if_changed,strip)
 	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
 
-# Rule to build device tree blobs
-DTC = $(objtree)/scripts/dtc/dtc
 
 # Rule to build device tree blobs
-quiet_cmd_dtc = DTC     $@
-	cmd_dtc = $(DTC) -O dtb -o $(obj)/$*.dtb -b 0 -p 1024 $(dtstree)/$*.dts
+DTC_FLAGS := -p 1024
 
-$(obj)/%.dtb: $(dtstree)/%.dts FORCE
-	$(call if_changed,dtc)
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+	$(call cmd,dtc)
 
 clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 8b422b1..ab8fbe7 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -66,5 +66,4 @@
 CONFIG_DEBUG_INFO=y
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_BOOTMEM=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index bdc3831..2e72af0 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -64,9 +64,6 @@
 /* CPU OF node matching */
 struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
 
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
 /**
  * of_irq_map_pci - Resolve the interrupt for a PCI device
  * @pdev:	the device whose interrupt is to be resolved
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 37db96a..a10bec6 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -1,9 +1,9 @@
 /*
  * Support for the MicroBlaze PVR (Processor Version Register)
  *
- * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 - 2011 Michal Simek <monstr@monstr.eu>
  * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
- * Copyright (C) 2007 - 2009 PetaLogix
+ * Copyright (C) 2007 - 2011 PetaLogix
  *
  * This file is subject to the terms and conditions of the GNU General
  * Public License. See the file COPYING in the main directory of this
@@ -46,11 +46,11 @@
 #define PVR2_I_LMB_MASK			0x10000000
 #define PVR2_INTERRUPT_IS_EDGE_MASK	0x08000000
 #define PVR2_EDGE_IS_POSITIVE_MASK	0x04000000
-#define PVR2_D_PLB_MASK			0x02000000	/* new */
-#define PVR2_I_PLB_MASK			0x01000000	/* new */
-#define PVR2_INTERCONNECT		0x00800000	/* new */
-#define PVR2_USE_EXTEND_FSL		0x00080000	/* new */
-#define PVR2_USE_FSL_EXC		0x00040000	/* new */
+#define PVR2_D_PLB_MASK			0x02000000 /* new */
+#define PVR2_I_PLB_MASK			0x01000000 /* new */
+#define PVR2_INTERCONNECT		0x00800000 /* new */
+#define PVR2_USE_EXTEND_FSL		0x00080000 /* new */
+#define PVR2_USE_FSL_EXC		0x00040000 /* new */
 #define PVR2_USE_MSR_INSTR		0x00020000
 #define PVR2_USE_PCMP_INSTR		0x00010000
 #define PVR2_AREA_OPTIMISED		0x00008000
@@ -59,7 +59,7 @@
 #define PVR2_USE_HW_MUL_MASK		0x00001000
 #define PVR2_USE_FPU_MASK		0x00000800
 #define PVR2_USE_MUL64_MASK		0x00000400
-#define PVR2_USE_FPU2_MASK		0x00000200	/* new */
+#define PVR2_USE_FPU2_MASK		0x00000200 /* new */
 #define PVR2_USE_IPLBEXC 		0x00000100
 #define PVR2_USE_DPLBEXC		0x00000080
 #define PVR2_OPCODE_0x0_ILL_MASK	0x00000040
@@ -122,96 +122,103 @@
 
 
 /* PVR access macros */
-#define PVR_IS_FULL(pvr)		(pvr.pvr[0] & PVR0_PVR_FULL_MASK)
-#define PVR_USE_BARREL(pvr)		(pvr.pvr[0] & PVR0_USE_BARREL_MASK)
-#define PVR_USE_DIV(pvr)		(pvr.pvr[0] & PVR0_USE_DIV_MASK)
-#define PVR_USE_HW_MUL(pvr)		(pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
-#define PVR_USE_FPU(pvr)		(pvr.pvr[0] & PVR0_USE_FPU_MASK)
-#define PVR_USE_FPU2(pvr)		(pvr.pvr[2] & PVR2_USE_FPU2_MASK)
-#define PVR_USE_ICACHE(pvr)		(pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
-#define PVR_USE_DCACHE(pvr)		(pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
-#define PVR_VERSION(pvr)	((pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
-#define PVR_USER1(pvr)			(pvr.pvr[0] & PVR0_USER1_MASK)
-#define PVR_USER2(pvr)			(pvr.pvr[1] & PVR1_USER2_MASK)
+#define PVR_IS_FULL(_pvr)	(_pvr.pvr[0] & PVR0_PVR_FULL_MASK)
+#define PVR_USE_BARREL(_pvr)	(_pvr.pvr[0] & PVR0_USE_BARREL_MASK)
+#define PVR_USE_DIV(_pvr)	(_pvr.pvr[0] & PVR0_USE_DIV_MASK)
+#define PVR_USE_HW_MUL(_pvr)	(_pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
+#define PVR_USE_FPU(_pvr)	(_pvr.pvr[0] & PVR0_USE_FPU_MASK)
+#define PVR_USE_FPU2(_pvr)	(_pvr.pvr[2] & PVR2_USE_FPU2_MASK)
+#define PVR_USE_ICACHE(_pvr)	(_pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
+#define PVR_USE_DCACHE(_pvr)	(_pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
+#define PVR_VERSION(_pvr)	((_pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
+#define PVR_USER1(_pvr)		(_pvr.pvr[0] & PVR0_USER1_MASK)
+#define PVR_USER2(_pvr)		(_pvr.pvr[1] & PVR1_USER2_MASK)
 
-#define PVR_D_OPB(pvr)			(pvr.pvr[2] & PVR2_D_OPB_MASK)
-#define PVR_D_LMB(pvr)			(pvr.pvr[2] & PVR2_D_LMB_MASK)
-#define PVR_I_OPB(pvr)			(pvr.pvr[2] & PVR2_I_OPB_MASK)
-#define PVR_I_LMB(pvr)			(pvr.pvr[2] & PVR2_I_LMB_MASK)
-#define PVR_INTERRUPT_IS_EDGE(pvr) \
-			(pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
-#define PVR_EDGE_IS_POSITIVE(pvr) \
-			(pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
-#define PVR_USE_MSR_INSTR(pvr)		(pvr.pvr[2] & PVR2_USE_MSR_INSTR)
-#define PVR_USE_PCMP_INSTR(pvr)		(pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
-#define PVR_AREA_OPTIMISED(pvr)		(pvr.pvr[2] & PVR2_AREA_OPTIMISED)
-#define PVR_USE_MUL64(pvr)		(pvr.pvr[2] & PVR2_USE_MUL64_MASK)
-#define PVR_OPCODE_0x0_ILLEGAL(pvr) \
-			(pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
-#define PVR_UNALIGNED_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
-#define PVR_ILL_OPCODE_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
-#define PVR_IOPB_BUS_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
-#define PVR_DOPB_BUS_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
-#define PVR_DIV_ZERO_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
-#define PVR_FPU_EXCEPTION(pvr)		(pvr.pvr[2] & PVR2_FPU_EXC_MASK)
-#define PVR_FSL_EXCEPTION(pvr)		(pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
+#define PVR_D_OPB(_pvr)		(_pvr.pvr[2] & PVR2_D_OPB_MASK)
+#define PVR_D_LMB(_pvr)		(_pvr.pvr[2] & PVR2_D_LMB_MASK)
+#define PVR_I_OPB(_pvr)		(_pvr.pvr[2] & PVR2_I_OPB_MASK)
+#define PVR_I_LMB(_pvr)		(_pvr.pvr[2] & PVR2_I_LMB_MASK)
+#define PVR_INTERRUPT_IS_EDGE(_pvr) \
+			(_pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
+#define PVR_EDGE_IS_POSITIVE(_pvr) \
+			(_pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
+#define PVR_USE_MSR_INSTR(_pvr)		(_pvr.pvr[2] & PVR2_USE_MSR_INSTR)
+#define PVR_USE_PCMP_INSTR(_pvr)	(_pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
+#define PVR_AREA_OPTIMISED(_pvr)	(_pvr.pvr[2] & PVR2_AREA_OPTIMISED)
+#define PVR_USE_MUL64(_pvr)		(_pvr.pvr[2] & PVR2_USE_MUL64_MASK)
+#define PVR_OPCODE_0x0_ILLEGAL(_pvr) \
+			(_pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
+#define PVR_UNALIGNED_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
+#define PVR_ILL_OPCODE_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
+#define PVR_IOPB_BUS_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
+#define PVR_DOPB_BUS_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
+#define PVR_DIV_ZERO_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
+#define PVR_FPU_EXCEPTION(_pvr)		(_pvr.pvr[2] & PVR2_FPU_EXC_MASK)
+#define PVR_FSL_EXCEPTION(_pvr)		(_pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
 
-#define PVR_DEBUG_ENABLED(pvr)		(pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
-#define PVR_NUMBER_OF_PC_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
-#define PVR_NUMBER_OF_RD_ADDR_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
-#define PVR_NUMBER_OF_WR_ADDR_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
-#define PVR_FSL_LINKS(pvr)	((pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
+#define PVR_DEBUG_ENABLED(_pvr)		(_pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
+#define PVR_NUMBER_OF_PC_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
+#define PVR_NUMBER_OF_RD_ADDR_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
+#define PVR_NUMBER_OF_WR_ADDR_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
+#define PVR_FSL_LINKS(_pvr)	((_pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
 
-#define PVR_ICACHE_ADDR_TAG_BITS(pvr) \
-			((pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_ICACHE_USE_FSL(pvr)		(pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
-#define PVR_ICACHE_ALLOW_WR(pvr)	(pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
-#define PVR_ICACHE_LINE_LEN(pvr) \
-			(1 << ((pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
-#define PVR_ICACHE_BYTE_SIZE(pvr) \
-			(1 << ((pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_ICACHE_ADDR_TAG_BITS(_pvr) \
+		((_pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_ICACHE_USE_FSL(_pvr) \
+		(_pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
+#define PVR_ICACHE_ALLOW_WR(_pvr) \
+		(_pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
+#define PVR_ICACHE_LINE_LEN(_pvr) \
+		(1 << ((_pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
+#define PVR_ICACHE_BYTE_SIZE(_pvr) \
+		(1 << ((_pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
 
-#define PVR_DCACHE_ADDR_TAG_BITS(pvr) \
-			((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_DCACHE_USE_FSL(pvr)		(pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
-#define PVR_DCACHE_ALLOW_WR(pvr)	(pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
+#define PVR_DCACHE_ADDR_TAG_BITS(_pvr) \
+			((_pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_DCACHE_USE_FSL(_pvr)	(_pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
+#define PVR_DCACHE_ALLOW_WR(_pvr) \
+			(_pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
 /* FIXME two shifts on one line needs any comment */
-#define PVR_DCACHE_LINE_LEN(pvr) \
-			(1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
-#define PVR_DCACHE_BYTE_SIZE(pvr) \
-			(1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_DCACHE_LINE_LEN(_pvr) \
+		(1 << ((_pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
+#define PVR_DCACHE_BYTE_SIZE(_pvr) \
+		(1 << ((_pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
 
-#define PVR_DCACHE_USE_WRITEBACK(pvr) \
-			((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
+#define PVR_DCACHE_USE_WRITEBACK(_pvr) \
+			((_pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
 
-#define PVR_ICACHE_BASEADDR(pvr)	(pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
-#define PVR_ICACHE_HIGHADDR(pvr)	(pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_ICACHE_BASEADDR(_pvr) \
+			(_pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
+#define PVR_ICACHE_HIGHADDR(_pvr) \
+			(_pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_DCACHE_BASEADDR(_pvr) \
+			(_pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
+#define PVR_DCACHE_HIGHADDR(_pvr) \
+			(_pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
 
-#define PVR_DCACHE_BASEADDR(pvr)	(pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
-#define PVR_DCACHE_HIGHADDR(pvr)	(pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
+#define PVR_TARGET_FAMILY(_pvr) \
+			((_pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
 
-#define PVR_TARGET_FAMILY(pvr)	((pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
-
-#define PVR_MSR_RESET_VALUE(pvr) \
-				(pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
+#define PVR_MSR_RESET_VALUE(_pvr) \
+			(_pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
 
 /* mmu */
-#define PVR_USE_MMU(pvr)	((pvr.pvr[11] & PVR11_USE_MMU) >> 30)
-#define PVR_MMU_ITLB_SIZE(pvr)	(pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
-#define PVR_MMU_DTLB_SIZE(pvr)	(pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
-#define PVR_MMU_TLB_ACCESS(pvr)	(pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
-#define PVR_MMU_ZONES(pvr)	(pvr.pvr[11] & PVR11_MMU_ZONES)
+#define PVR_USE_MMU(_pvr)		((_pvr.pvr[11] & PVR11_USE_MMU) >> 30)
+#define PVR_MMU_ITLB_SIZE(_pvr)		(_pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
+#define PVR_MMU_DTLB_SIZE(_pvr)		(_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
+#define PVR_MMU_TLB_ACCESS(_pvr)	(_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
+#define PVR_MMU_ZONES(_pvr)		(_pvr.pvr[11] & PVR11_MMU_ZONES)
 
 /* endian */
-#define PVR_ENDIAN(pvr)	(pvr.pvr[0] & PVR0_ENDI)
+#define PVR_ENDIAN(_pvr)	(_pvr.pvr[0] & PVR0_ENDI)
 
 int cpu_has_pvr(void);
 void get_pvr(struct pvr_s *pvr);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 87c79fa..2c309fc 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -32,6 +32,7 @@
 	{"7.30.a", 0x10},
 	{"7.30.b", 0x11},
 	{"8.00.a", 0x12},
+	{"8.00.b", 0x13},
 	{NULL, 0},
 };
 
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 819238b..41c30cd 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -287,25 +287,44 @@
  * are masked. This is nice, means we don't have to CLI before state save
  */
 C_ENTRY(_user_exception):
-	addi	r14, r14, 4	/* return address is 4 byte after call */
 	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	addi	r14, r14, 4	/* return address is 4 byte after call */
 
+	mfs	r1, rmsr
+	nop
+	andi	r1, r1, MSR_UMS
+	bnei	r1, 1f
+
+/* Kernel-mode state save - kernel execve */
+	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+	SAVE_REGS
+
+	swi	r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
+	brid	2f;
+	nop;				/* Fill delay slot */
+
+/* User-mode state save.  */
+1:
 	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
 	tophys(r1,r1);
 	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
-	/* MS these three instructions can be added to one */
-	/* addik	r1, r1, THREAD_SIZE; */
-	/* tophys(r1,r1); */
-	/* addik	r1, r1, -STATE_SAVE_SIZE; */
-	addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
+/* calculate kernel stack pointer from task struct 8k */
+	addik	r1, r1, THREAD_SIZE;
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
 	SAVE_REGS
 	swi	r0, r1, PTO + PT_R3
 	swi	r0, r1, PTO + PT_R4
 
+	swi	r0, r1, PTO + PT_MODE;			/* Was in user-mode. */
 	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
 	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
 	clear_ums;
-	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+2:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
 	/* Save away the syscall number.  */
 	swi	r12, r1, PTO+PT_R0;
 	tovirt(r1,r1)
@@ -375,6 +394,9 @@
 	swi	r3, r1, PTO + PT_R3
 	swi	r4, r1, PTO + PT_R4
 
+	lwi	r11, r1, PTO + PT_MODE;
+/* See if returning to kernel mode, if so, skip resched &c.  */
+	bnei	r11, 2f;
 	/* We're returning to user mode, so check for various conditions that
 	 * trigger rescheduling. */
 	/* FIXME: Restructure all these flag checks. */
@@ -417,6 +439,16 @@
 	RESTORE_REGS;
 	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
 	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
+	bri	6f;
+
+/* Return to kernel state.  */
+2:	set_bip;			/*  Ints masked for state restore */
+	VM_OFF;
+	tophys(r1,r1);
+	RESTORE_REGS;
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+	tovirt(r1,r1);
+6:
 TRAP_return:		/* Make global symbol for debugging */
 	rtbd	r14, 0;	/* Instructions to return from an IRQ */
 	nop;
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 478f294..a7fa6ae 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -25,6 +25,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <asm/current.h>
+#include <asm/cacheflush.h>
 
 #define MICROBLAZE_ILL_OPCODE_EXCEPTION	0x02
 #define MICROBLAZE_IBUS_EXCEPTION	0x03
@@ -52,6 +53,8 @@
 void sw_exception(struct pt_regs *regs)
 {
 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
+	flush_dcache_range(regs->r16, regs->r16 + 0x4);
+	flush_icache_range(regs->r16, regs->r16 + 0x4);
 }
 
 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 7811954..25f6e07 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -945,11 +945,20 @@
 store4:	sbi	r3, r4, 3;	/* Delay slot */
 ex_shw_vm:
 	/* Store the lower half-word, byte-by-byte into destination address */
+#ifdef __MICROBLAZEEL__
+	lbui	r3, r5, 0;
+store5:	sbi	r3, r4, 0;
+	lbui	r3, r5, 1;
+	brid	ret_from_exc;
+store6:	sbi	r3, r4, 1;	/* Delay slot */
+#else
 	lbui	r3, r5, 2;
 store5:	sbi	r3, r4, 0;
 	lbui	r3, r5, 3;
 	brid	ret_from_exc;
 store6:	sbi	r3, r4, 1;	/* Delay slot */
+#endif
+
 ex_sw_end_vm:			/* Exception handling of store word, ends. */
 
 /* We have to prevent cases that get/put_user macros get unaligned pointer
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index a105301..c881393 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -61,14 +61,12 @@
 	char *p;
 	int *addr;
 
-	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+	pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname);
 
 /* find all serial nodes */
 	if (strncmp(uname, "serial", 6) != 0)
 		return 0;
 
-	early_init_dt_check_for_initrd(node);
-
 /* find compatible node with uartlite */
 	p = of_get_flat_dt_prop(node, "compatible", &l);
 	if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
index 99d9b61..9ae24f4 100644
--- a/arch/microblaze/kernel/prom_parse.c
+++ b/arch/microblaze/kernel/prom_parse.c
@@ -110,41 +110,3 @@
 	cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
 	*size = of_read_number(dma_window, cells);
 }
-
-/**
- * Search the device tree for the best MAC address to use.  'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address.  If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the device tree, but were not set by U-Boot.  For example, the
- * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
- * addresses.  Some older U-Boots only initialized 'local-mac-address'.  In
- * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
- * but is all zeros.
-*/
-const void *of_get_mac_address(struct device_node *np)
-{
-	struct property *pp;
-
-	pp = of_find_property(np, "mac-address", NULL);
-	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
-		return pp->value;
-
-	pp = of_find_property(np, "local-mac-address", NULL);
-	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
-		return pp->value;
-
-	pp = of_find_property(np, "address", NULL);
-	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
-		return pp->value;
-
-	return NULL;
-}
-EXPORT_SYMBOL(of_get_mac_address);
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 96a88c3..3451bde 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -123,20 +123,10 @@
 
 	__init_end_before_initramfs = .;
 
-	.init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
-		__initramfs_start = .;
-		*(.init.ramfs)
-		__initramfs_end = .;
-		. = ALIGN(4);
-		LONG(0);
-/*
- * FIXME this can break initramfs for MMU.
- * Pad init.ramfs up to page boundary,
- * so that __init_end == __bss_start. This will make image.elf
- * consistent with the image.bin
- */
-		/* . = ALIGN(PAGE_SIZE); */
+	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+		INIT_RAM_FS
 	}
+
 	__init_end = .;
 
 	.bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 123e361..810fd68 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -182,7 +182,7 @@
 			for (; c >= 4; c -= 4) {
 				value = *--i_src;
 				*--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
-				buf_hold = (value & 0xFFFFFF) << 8;;
+				buf_hold = (value & 0xFFFFFF) << 8;
 			}
 #endif
 			/* Realign the source */
diff --git a/arch/microblaze/lib/muldi3.S b/arch/microblaze/lib/muldi3.S
deleted file mode 100644
index ceeaa8c..0000000
--- a/arch/microblaze/lib/muldi3.S
+++ /dev/null
@@ -1,121 +0,0 @@
-#include <linux/linkage.h>
-
-/*
- * Multiply operation for 64 bit integers, for devices with hard multiply
- *	Input :	Operand1[H] in Reg r5
- *		Operand1[L] in Reg r6
- *		Operand2[H] in Reg r7
- *		Operand2[L] in Reg r8
- *	Output: Result[H] in Reg r3
- *		Result[L] in Reg r4
- *
- * Explaination:
- *
- *	Both the input numbers are divided into 16 bit number as follows
- *		op1 = A B C D
- *		op2 = E F G H
- *	result = D * H
- *		 + (C * H + D * G) << 16
- *		 + (B * H + C * G + D * F) << 32
- *		 + (A * H + B * G + C * F + D * E) << 48
- *
- *	Only 64 bits of the output are considered
- */
-
-	.text
-	.globl	__muldi3
-	.type __muldi3, @function
-	.ent __muldi3
-
-__muldi3:
-	addi	r1, r1, -40
-
-/* Save the input operands on the caller's stack */
-	swi	r5, r1, 44
-	swi	r6, r1, 48
-	swi	r7, r1, 52
-	swi	r8, r1, 56
-
-/* Store all the callee saved registers */
-	sw	r20, r1, r0
-	swi	r21, r1, 4
-	swi	r22, r1, 8
-	swi	r23, r1, 12
-	swi	r24, r1, 16
-	swi	r25, r1, 20
-	swi	r26, r1, 24
-	swi	r27, r1, 28
-
-/* Load all the 16 bit values for A thru H */
-	lhui	r20, r1, 44 /* A */
-	lhui	r21, r1, 46 /* B */
-	lhui	r22, r1, 48 /* C */
-	lhui	r23, r1, 50 /* D */
-	lhui	r24, r1, 52 /* E */
-	lhui	r25, r1, 54 /* F */
-	lhui	r26, r1, 56 /* G */
-	lhui	r27, r1, 58 /* H */
-
-/* D * H ==> LSB of the result on stack ==> Store1 */
-	mul	r9, r23, r27
-	swi	r9, r1, 36 /* Pos2 and Pos3 */
-
-/* Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2 */
-/* Store the carry generated in position 2 for Pos 3 */
-	lhui	r11, r1, 36 /* Pos2 */
-	mul	r9, r22, r27 /* C * H */
-	mul	r10, r23, r26 /* D * G */
-	add	r9, r9, r10
-	addc	r12, r0, r0
-	add	r9, r9, r11
-	addc	r12, r12, r0 /* Store the Carry */
-	shi	r9, r1, 36 /* Store Pos2 */
-	swi	r9, r1, 32
-	lhui	r11, r1, 32
-	shi	r11, r1, 34 /* Store Pos1 */
-
-/* Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1 */
-	mul	r9, r21, r27 /* B * H */
-	mul	r10, r22, r26 /* C * G */
-	mul	r7, r23, r25 /* D * F */
-	add	r9, r9, r11
-	add	r9, r9, r10
-	add	r9, r9, r7
-	swi	r9, r1, 32 /* Pos0 and Pos1 */
-
-/* Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0 */
-	lhui	r11, r1, 32 /* Pos0 */
-	mul	r9, r20, r27 /* A * H */
-	mul	r10, r21, r26 /* B * G */
-	mul	r7, r22, r25 /* C * F */
-	mul	r8, r23, r24 /* D * E */
-	add	r9, r9, r11
-	add 	r9, r9, r10
-	add	r9, r9, r7
-	add	r9, r9, r8
-	sext16	r9, r9 /* Sign extend the MSB */
-	shi	r9, r1, 32
-
-/* Move results to r3 and r4 */
-	lhui	r3, r1, 32
-	add	r3, r3, r12
-	shi	r3, r1, 32
-	lwi	r3, r1, 32 /* Hi Part */
-	lwi	r4, r1, 36 /* Lo Part */
-
-/* Restore Callee saved registers */
-	lw	r20, r1, r0
-	lwi	r21, r1, 4
-	lwi	r22, r1, 8
-	lwi	r23, r1, 12
-	lwi	r24, r1, 16
-	lwi	r25, r1, 20
-	lwi	r26, r1, 24
-	lwi	r27, r1, 28
-
-/* Restore Frame and return */
-	rtsd	r15, 8
-	addi	r1, r1, 40
-
-.size __muldi3, . - __muldi3
-.end __muldi3
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c
new file mode 100644
index 0000000..d4860e1
--- /dev/null
+++ b/arch/microblaze/lib/muldi3.c
@@ -0,0 +1,60 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+#define DWtype long long
+#define UWtype unsigned long
+#define UHWtype unsigned short
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C.  */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v)						\
+	do {								\
+		UWtype __x0, __x1, __x2, __x3;				\
+		UHWtype __ul, __vl, __uh, __vh;				\
+									\
+		__ul = __ll_lowpart(u);					\
+		__uh = __ll_highpart(u);				\
+		__vl = __ll_lowpart(v);					\
+		__vh = __ll_highpart(v);				\
+									\
+		__x0 = (UWtype) __ul * __vl;				\
+		__x1 = (UWtype) __ul * __vh;				\
+		__x2 = (UWtype) __uh * __vl;				\
+		__x3 = (UWtype) __uh * __vh;				\
+									\
+		__x1 += __ll_highpart(__x0); /* this can't give carry */\
+		__x1 += __x2; /* but this indeed can */			\
+		if (__x1 < __x2) /* did we get it? */			\
+		__x3 += __ll_B; /* yes, add it in the proper pos */	\
+									\
+		(w1) = __x3 + __ll_highpart(__x1);			\
+		(w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+	} while (0)
+#endif
+
+#if !defined(__umulsidi3)
+#define __umulsidi3(u, v) ({				\
+	DWunion __w;					\
+	umul_ppmm(__w.s.high, __w.s.low, u, v);		\
+	__w.ll;						\
+	})
+#endif
+
+DWtype __muldi3(DWtype u, DWtype v)
+{
+	const DWunion uu = {.ll = u};
+	const DWunion vv = {.ll = v};
+	DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
+
+	w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+		+ (UWtype) uu.s.high * (UWtype) vv.s.low);
+
+	return w.ll;
+}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0a9b5b8..f489ec3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2218,7 +2218,7 @@
 config USE_OF
 	bool "Flattened Device Tree support"
 	select OF
-	select OF_FLATTREE
+	select OF_EARLY_FLATTREE
 	help
 	  Include support for flattened device tree machine descriptions.
 
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index e5916a5..647e518 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -130,7 +130,7 @@
 	au_writel(sleep_usb[1], USBD_ENABLE);
 	au_sync();
 #else
-	/* enable accces to OTG memory */
+	/* enable access to OTG memory */
 	au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
 	au_sync();
 
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index 4bbd313..acaf91b 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -110,7 +110,7 @@
 
 }
 
-static struct platform_suspend_ops db1x_pm_ops = {
+static const struct platform_suspend_ops db1x_pm_ops = {
 	.valid		= suspend_valid_only_mem,
 	.begin		= db1x_pm_begin,
 	.enter		= db1x_pm_enter,
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 02f505f..ea57f39 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -104,7 +104,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/mips/include/asm/ioctls.h b/arch/mips/include/asm/ioctls.h
index d87cb04..d967b89 100644
--- a/arch/mips/include/asm/ioctls.h
+++ b/arch/mips/include/asm/ioctls.h
@@ -83,6 +83,7 @@
 #define TCSETSF2	_IOW('T', 0x2D, struct termios2)
 #define TIOCGPTN	_IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T', 0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */
 #define TIOCSIG		_IOW('T', 0x36, int)  /* Generate signal on Pty slave */
 
 /* I hope the range from 0x5480 on is free ... */
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
index 076f2ee..c86ef09 100644
--- a/arch/mips/include/asm/mach-powertv/ioremap.h
+++ b/arch/mips/include/asm/mach-powertv/ioremap.h
@@ -88,7 +88,7 @@
 }
 
 /* These are not portable and should not be used in drivers. Drivers should
- * be using ioremap() and friends to map physical addreses to virtual
+ * be using ioremap() and friends to map physical addresses to virtual
  * addresses and dma_map*() and friends to map virtual addresses into DMA
  * addresses and back.
  */
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
index 199b457..4a08dbe 100644
--- a/arch/mips/include/asm/mc146818-time.h
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -66,7 +66,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h
index c892bfb..785b4ea 100644
--- a/arch/mips/include/asm/mman.h
+++ b/arch/mips/include/asm/mman.h
@@ -77,6 +77,9 @@
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 #define MADV_HWPOISON    100		/* poison a page for testing */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 5c0a357..2c0e107 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -65,7 +65,7 @@
 
 /* Early prototypes of the QI LB60 had only 1GB of NAND.
  * In order to support these devices aswell the partition and ecc layout is
- * initalized depending on the NAND size */
+ * initialized depending on the NAND size */
 static struct mtd_partition qi_lb60_partitions_1gb[] = {
 	{
 		.name = "NAND BOOT partition",
@@ -464,7 +464,7 @@
 	board_gpio_setup();
 
 	if (qi_lb60_init_platform_devices())
-		panic("Failed to initalize platform devices\n");
+		panic("Failed to initialize platform devices\n");
 
 	return 0;
 }
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 38f60f3..88e6aed 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -546,7 +546,7 @@
 	for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
 		jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
 
-	printk(KERN_INFO "JZ4740 GPIO initalized\n");
+	printk(KERN_INFO "JZ4740 GPIO initialized\n");
 
 	return 0;
 }
diff --git a/arch/mips/jz4740/pm.c b/arch/mips/jz4740/pm.c
index a999458..902d5b5 100644
--- a/arch/mips/jz4740/pm.c
+++ b/arch/mips/jz4740/pm.c
@@ -42,7 +42,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops jz4740_pm_ops = {
+static const struct platform_suspend_ops jz4740_pm_ops = {
 	.valid		= suspend_valid_only_mem,
 	.enter		= jz4740_pm_enter,
 };
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 6f51dda..d87a72e 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -46,17 +46,9 @@
 void *module_alloc(unsigned long size)
 {
 #ifdef MODULE_START
-	struct vm_struct *area;
-
-	size = PAGE_ALIGN(size);
-	if (!size)
-		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+	return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+				GFP_KERNEL, PAGE_KERNEL, -1,
+				__builtin_return_address(0));
 #else
 	if (size == 0)
 		return NULL;
diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c
index 6c1fd90..f55e07a 100644
--- a/arch/mips/loongson/common/pm.c
+++ b/arch/mips/loongson/common/pm.c
@@ -147,7 +147,7 @@
 	}
 }
 
-static struct platform_suspend_ops loongson_pm_ops = {
+static const struct platform_suspend_ops loongson_pm_ops = {
 	.valid	= loongson_pm_valid_state,
 	.enter	= loongson_pm_enter,
 };
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index b27419c..a96d281 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -43,7 +43,7 @@
 static char *mtypes[3] = {
 	"Dont use memory",
 	"YAMON PROM memory",
-	"Free memmory",
+	"Free memory",
 };
 #endif
 
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 385f035..0583c46 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -900,7 +900,7 @@
 	mem_access_subid.s.ror = 0;
 	/* Disable Relaxed Ordering for Writes. */
 	mem_access_subid.s.row = 0;
-	/* PCIe Adddress Bits <63:34>. */
+	/* PCIe Address Bits <63:34>. */
 	mem_access_subid.s.ba = 0;
 
 	/*
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index 73880ad..fb3d296 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -57,7 +57,7 @@
 unsigned long ptv_memsize;
 
 /*
- * struct low_mem_reserved - Items in low memmory that are reserved
+ * struct low_mem_reserved - Items in low memory that are reserved
  * @start:	Physical address of item
  * @size:	Size, in bytes, of this item
  * @is_aliased:	True if this is RAM aliased from another location. If false,
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 9a0be81..96e69a0 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -107,7 +107,7 @@
 
 /*
  * allocate pci_controller and resources.
- * mem_base, io_base: physical addresss.  0 for auto assignment.
+ * mem_base, io_base: physical address.  0 for auto assignment.
  * mem_size and io_size means max size on auto assignment.
  * pcic must be &txx9_primary_pcic or NULL.
  */
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 41ba385..8ed41cf 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -203,6 +203,7 @@
 config SMP
 	bool "Symmetric multi-processing support"
 	default y
+	select USE_GENERIC_SMP_HELPERS
 	depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
 	---help---
 	  This enables support for systems with more than one CPU. If you have
@@ -226,11 +227,6 @@
 	depends on SMP
 	default "2"
 
-config USE_GENERIC_SMP_HELPERS
-	bool
-	depends on SMP
-	default y
-
 source "kernel/Kconfig.preempt"
 
 config MN10300_CURRENT_IN_E2
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index e9e20f9..48d7058 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -89,7 +89,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/parisc/include/asm/ioctls.h b/arch/parisc/include/asm/ioctls.h
index 4e06144..6ba80d0 100644
--- a/arch/parisc/include/asm/ioctls.h
+++ b/arch/parisc/include/asm/ioctls.h
@@ -52,6 +52,7 @@
 #define TCSETSF2	_IOW('T',0x2D, struct termios2)
 #define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T',0x32, int)  /* Get primary device node of /dev/console */
 #define TIOCSIG		_IOW('T',0x36, int)  /* Generate signal on Pty slave */
 
 #define FIONCLEX	0x5450  /* these numbers need to be adjusted. */
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
index 9749c8a..f5b7bf5 100644
--- a/arch/parisc/include/asm/mman.h
+++ b/arch/parisc/include/asm/mman.h
@@ -59,6 +59,9 @@
 #define MADV_MERGEABLE   65		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 66		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	67		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	68		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 #define MAP_VARIABLE	0
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 66d1f17..11bdd68 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -92,8 +92,6 @@
 
 static struct timer_list pdc_console_timer;
 
-extern struct console * console_drivers;
-
 static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
 {
 
@@ -169,11 +167,13 @@
 	 * It is unregistered if the pdc console was not selected as the
 	 * primary console. */
 
-	struct console *tmp = console_drivers;
+	struct console *tmp;
 
-	for (tmp = console_drivers; tmp; tmp = tmp->next)
+	acquire_console_sem();
+	for_each_console(tmp)
 		if (tmp == &pdc_cons)
 			break;
+	release_console_sem();
 
 	if (!tmp) {
 		printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e625e9e..959f38c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -20,6 +20,9 @@
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool PPC64 || PHYS_64BIT
 
+config ARCH_DMA_ADDR_T_64BIT
+	def_bool ARCH_PHYS_ADDR_T_64BIT
+
 config MMU
 	bool
 	default y
@@ -116,7 +119,7 @@
 	bool
 	default y
 	select OF
-	select OF_FLATTREE
+	select OF_EARLY_FLATTREE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FUNCTION_TRACER
@@ -209,7 +212,7 @@
 config ARCH_SUSPEND_POSSIBLE
 	def_bool y
 	depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
-		   PPC_85xx || PPC_86xx || PPC_PSERIES
+		   PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
 
 config PPC_DCR_NATIVE
 	bool
@@ -595,13 +598,11 @@
 
 	  If unsure, leave blank
 
-if !44x || BROKEN
 config ARCH_WANTS_FREEZER_CONTROL
 	def_bool y
 	depends on ADB_PMU
 
 source kernel/power/Kconfig
-endif
 
 config SECCOMP
 	bool "Enable seccomp to safely compute untrusted bytecode"
@@ -682,6 +683,15 @@
 	  Freescale MPC85xx/MPC86xx power management controller support
 	  (suspend/resume). For MPC83xx see platforms/83xx/suspend.c
 
+config PPC4xx_CPM
+	bool
+	default y
+	depends on SUSPEND && (44x || 40x)
+	help
+	  PPC4xx Clock Power Management (CPM) support (suspend/resume).
+	  It also enables support for two different idle states (idle-wait
+	  and idle-doze).
+
 config 4xx_SOC
 	bool
 
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index fae8192..96deec6 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -35,7 +35,7 @@
 
 BOOTCFLAGS	+= -I$(obj) -I$(srctree)/$(obj)
 
-DTS_FLAGS	?= -p 1024
+DTC_FLAGS	?= -p 1024
 
 $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
 $(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
@@ -332,10 +332,8 @@
 	$(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb)
 
 # Rule to build device tree blobs
-DTC = $(objtree)/scripts/dtc/dtc
-
-$(obj)/%.dtb: $(dtstree)/%.dts
-	$(DTC) -O dtb -o $(obj)/$*.dtb -b 0 $(DTS_FLAGS) $(dtstree)/$*.dts
+$(obj)/%.dtb: $(src)/dts/%.dts
+	$(call cmd,dtc)
 
 # If there isn't a platform selected then just strip the vmlinux.
 ifeq (,$(image-y))
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index 9bb3d72..2a56a0d 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -33,7 +33,7 @@
 	aliases {
 		ethernet0 = &EMAC0;
 		serial0 = &UART0;
-		serial1 = &UART1;
+		//serial1 = &UART1; --gcl missing UART1 label
 	};
 
 	cpus {
@@ -52,7 +52,7 @@
 			d-cache-size = <32768>;
 			dcr-controller;
 			dcr-access-method = "native";
-			next-level-cache = <&L2C0>;
+			//next-level-cache = <&L2C0>; --gcl missing L2C0 label
 		};
 	};
 
@@ -142,7 +142,7 @@
 					/*RXEOB*/ 0x7 0x4
 					/*SERR*/  0x3 0x4
 					/*TXDE*/  0x4 0x4
-					/*RXDE*/  0x5 0x4
+					/*RXDE*/  0x5 0x4>;
 		};
 
 		POB0: opb {
@@ -182,7 +182,7 @@
 						reg = <0x001a0000 0x00060000>;
 					};
 				};
-			}
+			};
 
 			UART0: serial@ef600300 {
 				device_type = "serial";
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index a303703..5b27a4b 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -105,6 +105,15 @@
 		dcr-reg = <0x00c 0x002>;
 	};
 
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x160 0x003>;
+		unused-units = <0x00000100>;
+		idle-doze = <0x02000000>;
+		standby = <0xfeff791d>;
+	};
+
 	L2C0: l2c {
 		compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
 		dcr-reg = <0x020 0x008		/* Internal SRAM DCR's */
@@ -270,28 +279,6 @@
 				interrupts = <0x1 0x4>;
 			};
 
-			UART2: serial@ef600500 {
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0xef600500 0x00000008>;
-				virtual-reg = <0xef600500>;
-				clock-frequency = <0>; /* Filled in by U-Boot */
-				current-speed = <0>; /* Filled in by U-Boot */
-				interrupt-parent = <&UIC1>;
-				interrupts = <28 0x4>;
-			};
-
-			UART3: serial@ef600600 {
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0xef600600 0x00000008>;
-				virtual-reg = <0xef600600>;
-				clock-frequency = <0>; /* Filled in by U-Boot */
-				current-speed = <0>; /* Filled in by U-Boot */
-				interrupt-parent = <&UIC1>;
-				interrupts = <29 0x4>;
-			};
-
 			IIC0: i2c@ef600700 {
 				compatible = "ibm,iic-460ex", "ibm,iic";
 				reg = <0xef600700 0x00000014>;
diff --git a/arch/powerpc/boot/dts/cm5200.dts b/arch/powerpc/boot/dts/cm5200.dts
index dd38608..ad3a4f4 100644
--- a/arch/powerpc/boot/dts/cm5200.dts
+++ b/arch/powerpc/boot/dts/cm5200.dts
@@ -10,220 +10,74 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "schindler,cm5200";
 	compatible = "schindler,cm5200";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;		// L1, 16K
-			i-cache-size = <0x4000>;		// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
-
-	memory {
-		device_type = "memory";
-		reg = <0x00000000 0x04000000>;	// 64MB
-	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		reg = <0xf0000000 0x00000100>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
 		timer@600 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
 			fsl,has-wdt;
 		};
 
-		timer@610 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
+		can@900 {
+			status = "disabled";
 		};
 
-		timer@620 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
+		can@980 {
+			status = "disabled";
 		};
 
-		timer@630 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
-		};
-
-		timer@640 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
-		};
-
-		timer@650 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
-		};
-
-		timer@660 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x660 0x10>;
-			interrupts = <1 15 0>;
-		};
-
-		timer@670 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
-		};
-
-		rtc@800 {	// Real time clock
-			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-			reg = <0x800 0x100>;
-			interrupts = <1 5 0 1 6 0>;
-		};
-
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		spi@f00 {
-			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-			reg = <0xf00 0x20>;
-			interrupts = <2 13 0 2 14 0>;
-		};
-
-		usb@1000 {
-			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-			reg = <0x1000 0xff>;
-			interrupts = <2 6 0>;
-		};
-
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
-		};
-
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
-		};
-
-		serial@2000 {		// PSC1
+		psc@2000 {		// PSC1
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2000 0x100>;
-			interrupts = <2 1 0>;
 		};
 
-		serial@2200 {		// PSC2
+		psc@2200 {		// PSC2
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2200 0x100>;
-			interrupts = <2 2 0>;
 		};
 
-		serial@2400 {		// PSC3
+		psc@2400 {		// PSC3
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2400 0x100>;
-			interrupts = <2 3 0>;
 		};
 
-		serial@2c00 {		// PSC6
+		psc@2600 {		// PSC4
+			status = "disabled";
+		};
+
+		psc@2800 {		// PSC5
+			status = "disabled";
+		};
+
+		psc@2c00 {		// PSC6
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2c00 0x100>;
-			interrupts = <2 4 0>;
 		};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;       // fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				reg = <0>;
 			};
 		};
 
-		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
+		ata@3a00 {
+			status = "disabled";
 		};
 
-		sram@8000 {
-			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-			reg = <0x8000 0x4000>;
+		i2c@3d00 {
+			status = "disabled";
 		};
+
+	};
+
+	pci@f0000d00 {
+		status = "disabled";
 	};
 
 	localbus {
-		compatible = "fsl,mpc5200b-lpb","simple-bus";
-		#address-cells = <2>;
-		#size-cells = <1>;
-		ranges = <0 0 0xfc000000 0x2000000>;
-
 		// 16-bit flash device at LocalPlus Bus CS0
 		flash@0,0 {
 			compatible = "cfi-flash";
diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts
index 8e9be6b..27bd267 100644
--- a/arch/powerpc/boot/dts/digsy_mtc.dts
+++ b/arch/powerpc/boot/dts/digsy_mtc.dts
@@ -11,195 +11,68 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "intercontrol,digsy-mtc";
 	compatible = "intercontrol,digsy-mtc";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;		// L1, 16K
-			i-cache-size = <0x4000>;		// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
 
 	memory {
-		device_type = "memory";
 		reg = <0x00000000 0x02000000>;	// 32MB
 	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		reg = <0xf0000000 0x00000100>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
 		timer@600 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
 			fsl,has-wdt;
 		};
 
-		timer@610 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
+		rtc@800 {
+			status = "disabled";
 		};
 
-		timer@620 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
+		can@900 {
+			status = "disabled";
 		};
 
-		timer@630 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
+		can@980 {
+			status = "disabled";
 		};
 
-		timer@640 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
+		psc@2000 {		// PSC1
+			status = "disabled";
 		};
 
-		timer@650 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
+		psc@2200 {		// PSC2
+			status = "disabled";
 		};
 
-		timer@660 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x660 0x10>;
-			interrupts = <1 15 0>;
+		psc@2400 {		// PSC3
+			status = "disabled";
 		};
 
-		timer@670 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
-		};
-
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		spi@f00 {
-			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-			reg = <0xf00 0x20>;
-			interrupts = <2 13 0 2 14 0>;
-		};
-
-		usb@1000 {
-			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-			reg = <0x1000 0xff>;
-			interrupts = <2 6 0>;
-		};
-
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
-		};
-
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
-		};
-
-		serial@2600 {		// PSC4
+		psc@2600 {		// PSC4
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2600 0x100>;
-			interrupts = <2 11 0>;
 		};
 
-		serial@2800 {		// PSC5
+		psc@2800 {		// PSC5
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2800 0x100>;
-			interrupts = <2 12 0>;
+		};
+
+		psc@2c00 {		// PSC6
+			status = "disabled";
 		};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;       // fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				reg = <0>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
-		};
-
 		i2c@3d00 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d00 0x40>;
-			interrupts = <2 15 0>;
-
 			rtc@50 {
 				compatible = "at,24c08";
 				reg = <0x50>;
@@ -211,16 +84,16 @@
 			};
 		};
 
-		sram@8000 {
-			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-			reg = <0x8000 0x4000>;
+		i2c@3d40 {
+			status = "disabled";
 		};
 	};
 
-	lpb {
-		compatible = "fsl,mpc5200b-lpb","simple-bus";
-		#address-cells = <2>;
-		#size-cells = <1>;
+	pci@f0000d00 {
+		status = "disabled";
+	};
+
+	localbus {
 		ranges = <0 0 0xff000000 0x1000000>;
 
 		// 16-bit flash device at LocalPlus Bus CS0
diff --git a/arch/powerpc/boot/dts/hotfoot.dts b/arch/powerpc/boot/dts/hotfoot.dts
index cad9c38..71d3bb4 100644
--- a/arch/powerpc/boot/dts/hotfoot.dts
+++ b/arch/powerpc/boot/dts/hotfoot.dts
@@ -117,6 +117,8 @@
 			};
 
 			IIC: i2c@ef600500 {
+				#address-cells = <1>;
+				#size-cells = <0>;
 				compatible = "ibm,iic-405ep", "ibm,iic";
 				reg = <0xef600500 0x00000011>;
 				interrupt-parent = <&UIC0>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 083e68e..89edb16 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -82,6 +82,15 @@
 		interrupt-parent = <&UIC0>;
 	};
 
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x0b0 0x003>;
+		unused-units = <0x00000000>;
+		idle-doze = <0x02000000>;
+		standby = <0xe3e74800>;
+	};
+
 	plb {
 		compatible = "ibm,plb-405ex", "ibm,plb4";
 		#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 59702ac..fb288bb 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -10,256 +10,75 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "fsl,lite5200b";
 	compatible = "fsl,lite5200b";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;	// L1, 16K
-			i-cache-size = <0x4000>;	// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
 
 	memory {
-		device_type = "memory";
 		reg = <0x00000000 0x10000000>;	// 256MB
 	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		reg = <0xf0000000 0x00000100>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
 		timer@600 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
 			fsl,has-wdt;
 		};
 
-		timer@610 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
-		};
-
-		timer@620 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
-		};
-
-		timer@630 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
-		};
-
-		timer@640 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
-		};
-
-		timer@650 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
-		};
-
-		timer@660 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x660 0x10>;
-			interrupts = <1 15 0>;
-		};
-
-		timer@670 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
-		};
-
-		rtc@800 {	// Real time clock
-			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-			reg = <0x800 0x100>;
-			interrupts = <1 5 0 1 6 0>;
-		};
-
-		can@900 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 17 0>;
-			reg = <0x900 0x80>;
-		};
-
-		can@980 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 18 0>;
-			reg = <0x980 0x80>;
-		};
-
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		spi@f00 {
-			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-			reg = <0xf00 0x20>;
-			interrupts = <2 13 0 2 14 0>;
-		};
-
-		usb@1000 {
-			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-			reg = <0x1000 0xff>;
-			interrupts = <2 6 0>;
-		};
-
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
-		};
-
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
-		};
-
-		serial@2000 {		// PSC1
+		psc@2000 {		// PSC1
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
 			cell-index = <0>;
-			reg = <0x2000 0x100>;
-			interrupts = <2 1 0>;
+		};
+
+		psc@2200 {		// PSC2
+			status = "disabled";
+		};
+
+		psc@2400 {		// PSC3
+			status = "disabled";
+		};
+
+		psc@2600 {		// PSC4
+			status = "disabled";
+		};
+
+		psc@2800 {		// PSC5
+			status = "disabled";
+		};
+
+		psc@2c00 {		// PSC6
+			status = "disabled";
 		};
 
 		// PSC2 in ac97 mode example
 		//ac97@2200 {		// PSC2
 		//	compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97";
 		//	cell-index = <1>;
-		//	reg = <0x2200 0x100>;
-		//	interrupts = <2 2 0>;
 		//};
 
 		// PSC3 in CODEC mode example
 		//i2s@2400 {		// PSC3
 		//	compatible = "fsl,mpc5200b-psc-i2s"; //not 5200 compatible
 		//	cell-index = <2>;
-		//	reg = <0x2400 0x100>;
-		//	interrupts = <2 3 0>;
-		//};
-
-		// PSC4 in uart mode example
-		//serial@2600 {		// PSC4
-		//	compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-		//	cell-index = <3>;
-		//	reg = <0x2600 0x100>;
-		//	interrupts = <2 11 0>;
-		//};
-
-		// PSC5 in uart mode example
-		//serial@2800 {		// PSC5
-		//	compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-		//	cell-index = <4>;
-		//	reg = <0x2800 0x100>;
-		//	interrupts = <2 12 0>;
 		//};
 
 		// PSC6 in spi mode example
 		//spi@2c00 {		// PSC6
 		//	compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi";
 		//	cell-index = <5>;
-		//	reg = <0x2c00 0x100>;
-		//	interrupts = <2 4 0>;
 		//};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;	// fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;	// these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				reg = <0>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
-		};
-
-		i2c@3d00 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d00 0x40>;
-			interrupts = <2 15 0>;
-		};
-
 		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
-
 			eeprom@50 {
 				compatible = "atmel,24c02";
 				reg = <0x50>;
@@ -273,12 +92,6 @@
 	};
 
 	pci@f0000d00 {
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		device_type = "pci";
-		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
-		reg = <0xf0000d00 0x100>;
 		interrupt-map-mask = <0xf800 0 0 7>;
 		interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot
 				 0xc000 0 0 2 &mpc5200_pic 1 1 3
@@ -298,11 +111,6 @@
 	};
 
 	localbus {
-		compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus";
-
-		#address-cells = <2>;
-		#size-cells = <1>;
-
 		ranges = <0 0 0xfe000000 0x02000000>;
 
 		flash@0,0 {
diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts
index 0c3902b..48d72f3 100644
--- a/arch/powerpc/boot/dts/media5200.dts
+++ b/arch/powerpc/boot/dts/media5200.dts
@@ -11,14 +11,11 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "fsl,media5200";
 	compatible = "fsl,media5200";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
 
 	aliases {
 		console = &console;
@@ -30,16 +27,7 @@
 	};
 
 	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
 		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;		// L1, 16K
-			i-cache-size = <0x4000>;		// L1, 16K
 			timebase-frequency = <33000000>;	// 33 MHz, these were configured by U-Boot
 			bus-frequency = <132000000>;		// 132 MHz
 			clock-frequency = <396000000>;		// 396 MHz
@@ -47,205 +35,57 @@
 	};
 
 	memory {
-		device_type = "memory";
 		reg = <0x00000000 0x08000000>;	// 128MB RAM
 	};
 
-	soc@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		reg = <0xf0000000 0x00000100>;
+	soc5200@f0000000 {
 		bus-frequency = <132000000>;// 132 MHz
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
 
 		timer@600 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
 			fsl,has-wdt;
 		};
 
-		timer@610 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
+		psc@2000 {	// PSC1
+			status = "disabled";
 		};
 
-		timer@620 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
+		psc@2200 {	// PSC2
+			status = "disabled";
 		};
 
-		timer@630 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
+		psc@2400 {	// PSC3
+			status = "disabled";
 		};
 
-		timer@640 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
+		psc@2600 {	// PSC4
+			status = "disabled";
 		};
 
-		timer@650 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
-		};
-
-		timer@660 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x660 0x10>;
-			interrupts = <1 15 0>;
-		};
-
-		timer@670 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
-		};
-
-		rtc@800 {	// Real time clock
-			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-			reg = <0x800 0x100>;
-			interrupts = <1 5 0 1 6 0>;
-		};
-
-		can@900 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 17 0>;
-			reg = <0x900 0x80>;
-		};
-
-		can@980 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 18 0>;
-			reg = <0x980 0x80>;
-		};
-
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		spi@f00 {
-			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-			reg = <0xf00 0x20>;
-			interrupts = <2 13 0 2 14 0>;
-		};
-
-		usb@1000 {
-			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-			reg = <0x1000 0x100>;
-			interrupts = <2 6 0>;
-		};
-
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
-		};
-
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
+		psc@2800 {	// PSC5
+			status = "disabled";
 		};
 
 		// PSC6 in uart mode
-		console: serial@2c00 {		// PSC6
+		console: psc@2c00 {		// PSC6
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			cell-index = <5>;
-			port-number = <0>;  // Logical port assignment
-			reg = <0x2c00 0x100>;
-			interrupts = <2 4 0>;
 		};
 
-		eth0: ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
+		ethernet@3000 {
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;	// fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;	// these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				reg = <0>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
-		};
-
-		i2c@3d00 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d00 0x40>;
-			interrupts = <2 15 0>;
-		};
-
-		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
-		};
-
-		sram@8000 {
-			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-			reg = <0x8000 0x4000>;
+		usb@1000 {
+			reg = <0x1000 0x100>;
 		};
 	};
 
 	pci@f0000d00 {
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		device_type = "pci";
-		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
-		reg = <0xf0000d00 0x100>;
 		interrupt-map-mask = <0xf800 0 0 7>;
 		interrupt-map = <0xc000 0 0 1 &media5200_fpga 0 2 // 1st slot
 				 0xc000 0 0 2 &media5200_fpga 0 3
@@ -262,37 +102,29 @@
 
 				 0xe000 0 0 1 &media5200_fpga 0 5 // CoralIP
 				>;
-		clock-frequency = <0>; // From boot loader
-		interrupts = <2 8 0 2 9 0 2 10 0>;
-		interrupt-parent = <&mpc5200_pic>;
-		bus-range = <0 0>;
 		ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
 			  0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
 			  0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+		interrupt-parent = <&mpc5200_pic>;
 	};
 
 	localbus {
-		compatible = "fsl,mpc5200b-lpb","simple-bus";
-		#address-cells = <2>;
-		#size-cells = <1>;
-
 		ranges = < 0 0 0xfc000000 0x02000000
 			   1 0 0xfe000000 0x02000000
 			   2 0 0xf0010000 0x00010000
 			   3 0 0xf0020000 0x00010000 >;
-
 		flash@0,0 {
 			compatible = "amd,am29lv28ml", "cfi-flash";
-			reg = <0 0x0 0x2000000>;		// 32 MB
-			bank-width = <4>;			// Width in bytes of the flash bank
-			device-width = <2>;			// Two devices on each bank
+			reg = <0 0x0 0x2000000>;                // 32 MB
+			bank-width = <4>;                       // Width in bytes of the flash bank
+			device-width = <2>;                     // Two devices on each bank
 		};
 
 		flash@1,0 {
 			compatible = "amd,am29lv28ml", "cfi-flash";
-			reg = <1 0 0x2000000>;			// 32 MB
-			bank-width = <4>;			// Width in bytes of the flash bank
-			device-width = <2>;			// Two devices on each bank
+			reg = <1 0 0x2000000>;                  // 32 MB
+			bank-width = <4>;                       // Width in bytes of the flash bank
+			device-width = <2>;                     // Two devices on each bank
 		};
 
 		media5200_fpga: fpga@2,0 {
diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts
index 6ca4fc1..0b78e89 100644
--- a/arch/powerpc/boot/dts/motionpro.dts
+++ b/arch/powerpc/boot/dts/motionpro.dts
@@ -10,219 +10,73 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "promess,motionpro";
 	compatible = "promess,motionpro";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;	// L1, 16K
-			i-cache-size = <0x4000>;	// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
-
-	memory {
-		device_type = "memory";
-		reg = <0x00000000 0x04000000>;	// 64MB
-	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		reg = <0xf0000000 0x00000100>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
 		timer@600 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
 			fsl,has-wdt;
 		};
 
-		timer@610 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
-		};
-
-		timer@620 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
-		};
-
-		timer@630 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
-		};
-
-		timer@640 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
-		};
-
-		timer@650 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
-		};
-
-		motionpro-led@660 {	// Motion-PRO status LED
+		timer@660 {	// Motion-PRO status LED
 			compatible = "promess,motionpro-led";
 			label = "motionpro-statusled";
-			reg = <0x660 0x10>;
-			interrupts = <1 15 0>;
 			blink-delay = <100>; // 100 msec
 		};
 
-		motionpro-led@670 {	// Motion-PRO ready LED
+		timer@670 {	// Motion-PRO ready LED
 			compatible = "promess,motionpro-led";
 			label = "motionpro-readyled";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
 		};
 
-		rtc@800 {	// Real time clock
-			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-			reg = <0x800 0x100>;
-			interrupts = <1 5 0 1 6 0>;
+		can@900 {
+			status = "disabled";
 		};
 
-		can@980 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 18 0>;
-			reg = <0x980 0x80>;
-		};
-
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		spi@f00 {
-			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-			reg = <0xf00 0x20>;
-			interrupts = <2 13 0 2 14 0>;
-		};
-
-		usb@1000 {
-			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-			reg = <0x1000 0xff>;
-			interrupts = <2 6 0>;
-		};
-
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
-		};
-
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
-		};
-
-		serial@2000 {		// PSC1
+		psc@2000 {		// PSC1
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2000 0x100>;
-			interrupts = <2 1 0>;
 		};
 
 		// PSC2 in spi master mode 
-		spi@2200 {		// PSC2
+		psc@2200 {		// PSC2
 			compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi";
 			cell-index = <1>;
-			reg = <0x2200 0x100>;
-			interrupts = <2 2 0>;
 		};
 
-		// PSC5 in uart mode
-		serial@2800 {		// PSC5
+		psc@2400 {		// PSC3
+			status = "disabled";
+		};
+
+		psc@2600 {		// PSC4
+			status = "disabled";
+		};
+
+		psc@2800 {		// PSC5
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2800 0x100>;
-			interrupts = <2 12 0>;
+		};
+
+		psc@2c00 {		// PSC6
+			status = "disabled";
 		};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;       // fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@2 {
 				reg = <2>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
+		i2c@3d00 {
+			status = "disabled";
 		};
 
 		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
-
 			rtc@68 {
 				compatible = "dallas,ds1339";
 				reg = <0x68>;
@@ -235,10 +89,11 @@
 		};
 	};
 
+	pci@f0000d00 {
+		status = "disabled";
+	};
+
 	localbus {
-		compatible = "fsl,mpc5200b-lpb","simple-bus";
-		#address-cells = <2>;
-		#size-cells = <1>;
 		ranges = <0 0 0xff000000 0x01000000
 			  1 0 0x50000000 0x00010000
 			  2 0 0x50010000 0x00010000
@@ -280,5 +135,6 @@
 			#size-cells = <1>;
 			#address-cells = <1>;
 		};
+
 	};
 };
diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi
new file mode 100644
index 0000000..bc27548
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc5200b.dtsi
@@ -0,0 +1,275 @@
+/*
+ * base MPC5200b Device Tree Source
+ *
+ * Copyright (C) 2010 SecretLab
+ * Grant Likely <grant@secretlab.ca>
+ * John Bonesio <bones@secretlab.ca>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+
+/ {
+	model = "fsl,mpc5200b";
+	compatible = "fsl,mpc5200b";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&mpc5200_pic>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		powerpc: PowerPC,5200@0 {
+			device_type = "cpu";
+			reg = <0>;
+			d-cache-line-size = <32>;
+			i-cache-line-size = <32>;
+			d-cache-size = <0x4000>;	// L1, 16K
+			i-cache-size = <0x4000>;	// L1, 16K
+			timebase-frequency = <0>;	// from bootloader
+			bus-frequency = <0>;		// from bootloader
+			clock-frequency = <0>;		// from bootloader
+		};
+	};
+
+	memory: memory {
+		device_type = "memory";
+		reg = <0x00000000 0x04000000>;	// 64MB
+	};
+
+	soc: soc5200@f0000000 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "fsl,mpc5200b-immr";
+		ranges = <0 0xf0000000 0x0000c000>;
+		reg = <0xf0000000 0x00000100>;
+		bus-frequency = <0>;		// from bootloader
+		system-frequency = <0>;		// from bootloader
+
+		cdm@200 {
+			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
+			reg = <0x200 0x38>;
+		};
+
+		mpc5200_pic: interrupt-controller@500 {
+			// 5200 interrupts are encoded into two levels;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
+			reg = <0x500 0x80>;
+		};
+
+		timer@600 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x600 0x10>;
+			interrupts = <1 9 0>;
+		};
+
+		timer@610 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x610 0x10>;
+			interrupts = <1 10 0>;
+		};
+
+		timer@620 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x620 0x10>;
+			interrupts = <1 11 0>;
+		};
+
+		timer@630 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x630 0x10>;
+			interrupts = <1 12 0>;
+		};
+
+		timer@640 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x640 0x10>;
+			interrupts = <1 13 0>;
+		};
+
+		timer@650 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x650 0x10>;
+			interrupts = <1 14 0>;
+		};
+
+		timer@660 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x660 0x10>;
+			interrupts = <1 15 0>;
+		};
+
+		timer@670 {	// General Purpose Timer
+			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+			reg = <0x670 0x10>;
+			interrupts = <1 16 0>;
+		};
+
+		rtc@800 {	// Real time clock
+			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
+			reg = <0x800 0x100>;
+			interrupts = <1 5 0 1 6 0>;
+		};
+
+		can@900 {
+			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
+			interrupts = <2 17 0>;
+			reg = <0x900 0x80>;
+		};
+
+		can@980 {
+			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
+			interrupts = <2 18 0>;
+			reg = <0x980 0x80>;
+		};
+
+		gpio_simple: gpio@b00 {
+			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
+			reg = <0xb00 0x40>;
+			interrupts = <1 7 0>;
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		gpio_wkup: gpio@c00 {
+			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
+			reg = <0xc00 0x40>;
+			interrupts = <1 8 0 0 3 0>;
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		spi@f00 {
+			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
+			reg = <0xf00 0x20>;
+			interrupts = <2 13 0 2 14 0>;
+		};
+
+		usb: usb@1000 {
+			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
+			reg = <0x1000 0xff>;
+			interrupts = <2 6 0>;
+		};
+
+		dma-controller@1200 {
+			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
+			reg = <0x1200 0x80>;
+			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
+			              3 4 0  3 5 0  3 6 0  3 7 0
+			              3 8 0  3 9 0  3 10 0  3 11 0
+			              3 12 0  3 13 0  3 14 0  3 15 0>;
+		};
+
+		xlb@1f00 {
+			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
+			reg = <0x1f00 0x100>;
+		};
+
+		psc1: psc@2000 {		// PSC1
+			compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc";
+			reg = <0x2000 0x100>;
+			interrupts = <2 1 0>;
+		};
+
+		psc2: psc@2200 {		// PSC2
+			compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc";
+			reg = <0x2200 0x100>;
+			interrupts = <2 2 0>;
+		};
+
+		psc3: psc@2400 {		// PSC3
+			compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc";
+			reg = <0x2400 0x100>;
+			interrupts = <2 3 0>;
+		};
+
+		psc4: psc@2600 {		// PSC4
+			compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc";
+			reg = <0x2600 0x100>;
+			interrupts = <2 11 0>;
+		};
+
+		psc5: psc@2800 {		// PSC5
+			compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc";
+			reg = <0x2800 0x100>;
+			interrupts = <2 12 0>;
+		};
+
+		psc6: psc@2c00 {		// PSC6
+			compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc";
+			reg = <0x2c00 0x100>;
+			interrupts = <2 4 0>;
+		};
+
+		eth0: ethernet@3000 {
+			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
+			reg = <0x3000 0x400>;
+			local-mac-address = [ 00 00 00 00 00 00 ];
+			interrupts = <2 5 0>;
+		};
+
+		mdio@3000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
+			reg = <0x3000 0x400>;	// fec range, since we need to setup fec interrupts
+			interrupts = <2 5 0>;	// these are for "mii command finished", not link changes & co.
+		};
+
+		ata@3a00 {
+			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
+			reg = <0x3a00 0x100>;
+			interrupts = <2 7 0>;
+		};
+
+		i2c@3d00 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
+			reg = <0x3d00 0x40>;
+			interrupts = <2 15 0>;
+		};
+
+		i2c@3d40 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
+			reg = <0x3d40 0x40>;
+			interrupts = <2 16 0>;
+		};
+
+		sram@8000 {
+			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
+			reg = <0x8000 0x4000>;
+		};
+	};
+
+	pci: pci@f0000d00 {
+		#interrupt-cells = <1>;
+		#size-cells = <2>;
+		#address-cells = <3>;
+		device_type = "pci";
+		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
+		reg = <0xf0000d00 0x100>;
+		// interrupt-map-mask = need to add
+		// interrupt-map = need to add
+		clock-frequency = <0>; // From boot loader
+		interrupts = <2 8 0 2 9 0 2 10 0>;
+		bus-range = <0 0>;
+		// ranges = need to add
+	};
+
+	localbus: localbus {
+		compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0xfc000000 0x2000000>;
+	};
+};
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 05a76cc..697b3f6 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -297,6 +297,14 @@
 			interrupt-parent = < &ipic >;
 		};
 
+		dma@2c000 {
+			compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+			reg = <0x2c000 0x1800>;
+			interrupts = <3 0x8
+					94 0x8>;
+			interrupt-parent = < &ipic >;
+		};
+
 	};
 
 	pci0: pcie@e0009000 {
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index a97eb2d..d3db02f 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -265,6 +265,14 @@
 			interrupt-parent = < &ipic >;
 		};
 
+		dma@2c000 {
+			compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+			reg = <0x2c000 0x1800>;
+			interrupts = <3 0x8
+					94 0x8>;
+			interrupt-parent = < &ipic >;
+		};
+
 	};
 
 	pci0: pcie@e0009000 {
diff --git a/arch/powerpc/boot/dts/mucmc52.dts b/arch/powerpc/boot/dts/mucmc52.dts
index b72a758..21d3472 100644
--- a/arch/powerpc/boot/dts/mucmc52.dts
+++ b/arch/powerpc/boot/dts/mucmc52.dts
@@ -11,172 +11,109 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "manroland,mucmc52";
 	compatible = "manroland,mucmc52";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;	// L1, 16K
-			i-cache-size = <0x4000>;	// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
-
-	memory {
-		device_type = "memory";
-		reg = <0x00000000 0x04000000>;	// 64MB
-	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		reg = <0xf0000000 0x00000100>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
 		gpt0: timer@600 {	// GPT 0 in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt1: timer@610 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt2: timer@620 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt3: timer@630 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
+		timer@640 {
+			status = "disabled";
 		};
 
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
+		timer@650 {
+			status = "disabled";
 		};
 
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
+		timer@660 {
+			status = "disabled";
 		};
 
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
+		timer@670 {
+			status = "disabled";
 		};
 
-		serial@2000 { /* PSC1 in UART mode */
+		rtc@800 {
+			status = "disabled";
+		};
+
+		can@900 {
+			status = "disabled";
+		};
+
+		can@980 {
+			status = "disabled";
+		};
+
+		spi@f00 {
+			status = "disabled";
+		};
+
+		usb@1000 {
+			status = "disabled";
+		};
+
+		psc@2000 {		// PSC1
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2000 0x100>;
-			interrupts = <2 1 0>;
 		};
 
-		serial@2200 { /* PSC2 in UART mode */
+		psc@2200 {		// PSC2
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2200 0x100>;
-			interrupts = <2 2 0>;
 		};
 
-		serial@2c00 { /* PSC6 in UART mode */
+		psc@2400 {		// PSC3
+			status = "disabled";
+		};
+
+		psc@2600 {		// PSC4
+			status = "disabled";
+		};
+
+		psc@2800 {		// PSC5
+			status = "disabled";
+		};
+
+		psc@2c00 {		// PSC6
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2c00 0x100>;
-			interrupts = <2 4 0>;
 		};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>; 	// fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>; 	// these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				compatible = "intel,lxt971";
 				reg = <0>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
+		i2c@3d00 {
+			status = "disabled";
 		};
 
 		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
 			hwmon@2c {
 				compatible = "ad,adm9240";
 				reg = <0x2c>;
@@ -186,20 +123,9 @@
 				reg = <0x51>;
 			};
 		};
-
-		sram@8000 {
-			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-			reg = <0x8000 0x4000>;
-		};
 	};
 
 	pci@f0000d00 {
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		device_type = "pci";
-		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
-		reg = <0xf0000d00 0x100>;
 		interrupt-map-mask = <0xf800 0 0 7>;
 		interrupt-map = <
 				/* IDSEL 0x10 */
@@ -208,20 +134,12 @@
 				0x8000 0 0 3 &mpc5200_pic 0 2 3
 				0x8000 0 0 4 &mpc5200_pic 0 1 3
 				>;
-		clock-frequency = <0>; // From boot loader
-		interrupts = <2 8 0 2 9 0 2 10 0>;
-		bus-range = <0 0>;
 		ranges = <0x42000000 0 0x60000000 0x60000000 0 0x10000000
 			  0x02000000 0 0x90000000 0x90000000 0 0x10000000
 			  0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
 	};
 
 	localbus {
-		compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus";
-
-		#address-cells = <2>;
-		#size-cells = <1>;
-
 		ranges = <0 0 0xff800000 0x00800000
 			  1 0 0x80000000 0x00800000
 			  3 0 0x80000000 0x00800000>;
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index 8a4ec30..9e35499 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -12,246 +12,92 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "phytec,pcm030";
 	compatible = "phytec,pcm030";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;	// L1, 16K
-			i-cache-size = <0x4000>;	// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
-
-	memory {
-		device_type = "memory";
-		reg = <0x00000000 0x04000000>;	// 64MB
-	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
-		timer@600 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
+		timer@600 {		// General Purpose Timer
 			fsl,has-wdt;
 		};
 
-		timer@610 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
-		};
-
 		gpt2: timer@620 {	// General Purpose Timer in GPIO mode
 			compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt3: timer@630 {	// General Purpose Timer in GPIO mode
 			compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt4: timer@640 {	// General Purpose Timer in GPIO mode
 			compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt5: timer@650 {	// General Purpose Timer in GPIO mode
 			compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt6: timer@660 {	// General Purpose Timer in GPIO mode
 			compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
-			reg = <0x660 0x10>;
-			interrupts = <1 15 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt7: timer@670 {	// General Purpose Timer in GPIO mode
 			compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
-		rtc@800 {	// Real time clock
-			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-			reg = <0x800 0x100>;
-			interrupts = <1 5 0 1 6 0>;
-		};
-
-		can@900 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 17 0>;
-			reg = <0x900 0x80>;
-		};
-
-		can@980 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 18 0>;
-			reg = <0x980 0x80>;
-		};
-
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		spi@f00 {
-			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-			reg = <0xf00 0x20>;
-			interrupts = <2 13 0 2 14 0>;
-		};
-
-		usb@1000 {
-			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-			reg = <0x1000 0xff>;
-			interrupts = <2 6 0>;
-		};
-
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
-		};
-
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
-		};
-
-		ac97@2000 { /* PSC1 in ac97 mode */
+		psc@2000 { /* PSC1 in ac97 mode */
 			compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
 			cell-index = <0>;
-			reg = <0x2000 0x100>;
-			interrupts = <2 1 0>;
 		};
 
 		/* PSC2 port is used by CAN1/2 */
+		psc@2200 {
+			status = "disabled";
+		};
 
-		serial@2400 { /* PSC3 in UART mode */
+		psc@2400 { /* PSC3 in UART mode */
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			cell-index = <2>;
-			reg = <0x2400 0x100>;
-			interrupts = <2 3 0>;
 		};
 
 		/* PSC4 is ??? */
+		psc@2600 {
+			status = "disabled";
+		};
 
 		/* PSC5 is ??? */
+		psc@2800 {
+			status = "disabled";
+		};
 
-		serial@2c00 { /* PSC6 in UART mode */
+		psc@2c00 { /* PSC6 in UART mode */
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			cell-index = <5>;
-			reg = <0x2c00 0x100>;
-			interrupts = <2 4 0>;
 		};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;	// fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;	// these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				reg = <0>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
-		};
-
-		i2c@3d00 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d00 0x40>;
-			interrupts = <2 15 0>;
-		};
-
 		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
 			rtc@51 {
 				compatible = "nxp,pcf8563";
 				reg = <0x51>;
@@ -259,6 +105,7 @@
 			eeprom@52 {
 				compatible = "catalyst,24c32";
 				reg = <0x52>;
+				pagesize = <32>;
 			};
 		};
 
@@ -269,12 +116,6 @@
 	};
 
 	pci@f0000d00 {
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		device_type = "pci";
-		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
-		reg = <0xf0000d00 0x100>;
 		interrupt-map-mask = <0xf800 0 0 7>;
 		interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot
 				 0xc000 0 0 2 &mpc5200_pic 1 1 3
@@ -285,11 +126,12 @@
 				 0xc800 0 0 2 &mpc5200_pic 1 2 3
 				 0xc800 0 0 3 &mpc5200_pic 1 3 3
 				 0xc800 0 0 4 &mpc5200_pic 0 0 3>;
-		clock-frequency = <0>; // From boot loader
-		interrupts = <2 8 0 2 9 0 2 10 0>;
-		bus-range = <0 0>;
 		ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
 			  0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
 			  0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
 	};
+
+	localbus {
+		status = "disabled";
+	};
 };
diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts
index 85d857a..1dd478b 100644
--- a/arch/powerpc/boot/dts/pcm032.dts
+++ b/arch/powerpc/boot/dts/pcm032.dts
@@ -12,99 +12,37 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "phytec,pcm032";
 	compatible = "phytec,pcm032";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;	// L1, 16K
-			i-cache-size = <0x4000>;	// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
 
 	memory {
-		device_type = "memory";
 		reg = <0x00000000 0x08000000>;	// 128MB
 	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
-		timer@600 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
+		timer@600 {		// General Purpose Timer
 			fsl,has-wdt;
 		};
 
-		timer@610 {	// General Purpose Timer
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
-		};
-
 		gpt2: timer@620 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt3: timer@630 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x630 0x10>;
-			interrupts = <1 12 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt4: timer@640 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt5: timer@650 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
@@ -118,163 +56,62 @@
 		};
 
 		gpt7: timer@670 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
-		rtc@800 {	// Real time clock
-			compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-			reg = <0x800 0x100>;
-			interrupts = <1 5 0 1 6 0>;
-		};
-
-		can@900 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 17 0>;
-			reg = <0x900 0x80>;
-		};
-
-		can@980 {
-			compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-			interrupts = <2 18 0>;
-			reg = <0x980 0x80>;
-		};
-
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		spi@f00 {
-			compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-			reg = <0xf00 0x20>;
-			interrupts = <2 13 0 2 14 0>;
-		};
-
-		usb@1000 {
-			compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-			reg = <0x1000 0xff>;
-			interrupts = <2 6 0>;
-		};
-
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
-		};
-
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
-		};
-
-		ac97@2000 {	/* PSC1 is ac97 */
+		psc@2000 {	/* PSC1 is ac97 */
 			compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97";
 			cell-index = <0>;
-			reg = <0x2000 0x100>;
-			interrupts = <2 1 0>;
 		};
 
 		/* PSC2 port is used by CAN1/2 */
+		psc@2200 {
+			status = "disabled";
+		};
 
-		serial@2400 { /* PSC3 in UART mode */
+		psc@2400 { /* PSC3 in UART mode */
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			cell-index = <2>;
-			reg = <0x2400 0x100>;
-			interrupts = <2 3 0>;
 		};
 
 		/* PSC4 is ??? */
+		psc@2600 {
+			status = "disabled";
+		};
 
 		/* PSC5 is ??? */
+		psc@2800 {
+			status = "disabled";
+		};
 
-		serial@2c00 { /* PSC6 in UART mode */
+		psc@2c00 { /* PSC6 in UART mode */
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			cell-index = <5>;
-			reg = <0x2c00 0x100>;
-			interrupts = <2 4 0>;
 		};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;	// fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;	// these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				reg = <0>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
-		};
-
-		i2c@3d00 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d00 0x40>;
-			interrupts = <2 15 0>;
-		};
-
 		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
 			rtc@51 {
 				compatible = "nxp,pcf8563";
 				reg = <0x51>;
 			};
 			eeprom@52 {
-				compatible = "at24,24c32";
+				compatible = "catalyst,24c32";
 				reg = <0x52>;
+				pagesize = <32>;
 			};
 		};
-
-		sram@8000 {
-			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-			reg = <0x8000 0x4000>;
-		};
 	};
 
 	pci@f0000d00 {
-		#interrupt-cells = <1>;
-		#size-cells = <2>;
-		#address-cells = <3>;
-		device_type = "pci";
-		compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
-		reg = <0xf0000d00 0x100>;
 		interrupt-map-mask = <0xf800 0 0 7>;
 		interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot
 				 0xc000 0 0 2 &mpc5200_pic 1 1 3
@@ -285,20 +122,12 @@
 				 0xc800 0 0 2 &mpc5200_pic 1 2 3
 				 0xc800 0 0 3 &mpc5200_pic 1 3 3
 				 0xc800 0 0 4 &mpc5200_pic 0 0 3>;
-		clock-frequency = <0>; // From boot loader
-		interrupts = <2 8 0 2 9 0 2 10 0>;
-		bus-range = <0 0>;
 		ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
 			  0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
 			  0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
 	};
 
 	localbus {
-		compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus";
-
-		#address-cells = <2>;
-		#size-cells = <1>;
-
 		ranges = <0 0 0xfe000000 0x02000000
 			  1 0 0xfc000000 0x02000000
 			  2 0 0xfbe00000 0x00200000
@@ -351,40 +180,39 @@
 			bank-width = <2>;
 		};
 
-                /*
+		/*
 		 * example snippets for FPGA
 		 *
 		 * fpga@3,0 {
-		 *         compatible = "fpga_driver";
-		 *         reg = <3 0 0x02000000>;
-		 *         bank-width = <4>;
+		 *	 compatible = "fpga_driver";
+		 *	 reg = <3 0 0x02000000>;
+		 *	 bank-width = <4>;
 		 * };
 		 *
 		 * fpga@4,0 {
-		 *         compatible = "fpga_driver";
-		 *         reg = <4 0 0x02000000>;
-		 *         bank-width = <4>;
+		 *	 compatible = "fpga_driver";
+		 *	 reg = <4 0 0x02000000>;
+		 *	 bank-width = <4>;
 		 * };
-                 */
+		 */
 
-                /*
+		/*
 		 * example snippets for free chipselects
-                 *
+		 *
 		 * device@5,0 {
-		 *         compatible = "custom_driver";
-		 *         reg = <5 0 0x02000000>;
+		 *	 compatible = "custom_driver";
+		 *	 reg = <5 0 0x02000000>;
 		 * };
-                 *
+		 *
 		 * device@6,0 {
-		 *         compatible = "custom_driver";
-		 *         reg = <6 0 0x02000000>;
+		 *	 compatible = "custom_driver";
+		 *	 reg = <6 0 0x02000000>;
 		 * };
-                 *
+		 *
 		 * device@7,0 {
-		 *         compatible = "custom_driver";
-		 *         reg = <7 0 0x02000000>;
+		 *	 compatible = "custom_driver";
+		 *	 reg = <7 0 0x02000000>;
 		 * };
-                 */
+		 */
 	};
 };
-
diff --git a/arch/powerpc/boot/dts/uc101.dts b/arch/powerpc/boot/dts/uc101.dts
index 019264c..ba83d54 100644
--- a/arch/powerpc/boot/dts/uc101.dts
+++ b/arch/powerpc/boot/dts/uc101.dts
@@ -11,79 +11,24 @@
  * option) any later version.
  */
 
-/dts-v1/;
+/include/ "mpc5200b.dtsi"
 
 / {
 	model = "manroland,uc101";
 	compatible = "manroland,uc101";
-	#address-cells = <1>;
-	#size-cells = <1>;
-	interrupt-parent = <&mpc5200_pic>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		PowerPC,5200@0 {
-			device_type = "cpu";
-			reg = <0>;
-			d-cache-line-size = <32>;
-			i-cache-line-size = <32>;
-			d-cache-size = <0x4000>;	// L1, 16K
-			i-cache-size = <0x4000>;	// L1, 16K
-			timebase-frequency = <0>;	// from bootloader
-			bus-frequency = <0>;		// from bootloader
-			clock-frequency = <0>;		// from bootloader
-		};
-	};
-
-	memory {
-		device_type = "memory";
-		reg = <0x00000000 0x04000000>;	// 64MB
-	};
 
 	soc5200@f0000000 {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		compatible = "fsl,mpc5200b-immr";
-		ranges = <0 0xf0000000 0x0000c000>;
-		reg = <0xf0000000 0x00000100>;
-		bus-frequency = <0>;		// from bootloader
-		system-frequency = <0>;		// from bootloader
-
-		cdm@200 {
-			compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-			reg = <0x200 0x38>;
-		};
-
-		mpc5200_pic: interrupt-controller@500 {
-			// 5200 interrupts are encoded into two levels;
-			interrupt-controller;
-			#interrupt-cells = <3>;
-			compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-			reg = <0x500 0x80>;
-		};
-
 		gpt0: timer@600 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x600 0x10>;
-			interrupts = <1 9 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt1: timer@610 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x610 0x10>;
-			interrupts = <1 10 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt2: timer@620 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x620 0x10>;
-			interrupts = <1 11 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
@@ -97,118 +42,85 @@
 		};
 
 		gpt4: timer@640 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x640 0x10>;
-			interrupts = <1 13 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt5: timer@650 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x650 0x10>;
-			interrupts = <1 14 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt6: timer@660 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x660 0x10>;
-			interrupts = <1 15 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
 		gpt7: timer@670 {	// General Purpose Timer in GPIO mode
-			compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-			reg = <0x670 0x10>;
-			interrupts = <1 16 0>;
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
 
-		gpio_simple: gpio@b00 {
-			compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-			reg = <0xb00 0x40>;
-			interrupts = <1 7 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
+		rtc@800 {
+			status = "disabled";
 		};
 
-		gpio_wkup: gpio@c00 {
-			compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-			reg = <0xc00 0x40>;
-			interrupts = <1 8 0 0 3 0>;
-			gpio-controller;
-			#gpio-cells = <2>;
+		can@900 {
+			status = "disabled";
 		};
 
-		dma-controller@1200 {
-			compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-			reg = <0x1200 0x80>;
-			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
-			              3 4 0  3 5 0  3 6 0  3 7 0
-			              3 8 0  3 9 0  3 10 0  3 11 0
-			              3 12 0  3 13 0  3 14 0  3 15 0>;
+		can@980 {
+			status = "disabled";
 		};
 
-		xlb@1f00 {
-			compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-			reg = <0x1f00 0x100>;
+		spi@f00 {
+			status = "disabled";
 		};
 
-		serial@2000 { /* PSC1 in UART mode */
+		usb@1000 {
+			status = "disabled";
+		};
+
+		psc@2000 {	// PSC1
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2000 0x100>;
-			interrupts = <2 1 0>;
 		};
 
-		serial@2200 { /* PSC2 in UART mode */
+		psc@2200 {	// PSC2
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2200 0x100>;
-			interrupts = <2 2 0>;
 		};
 
-		serial@2c00 {		/* PSC6 in UART mode */
+		psc@2400 {	// PSC3
+			status = "disabled";
+		};
+
+		psc@2600 {	// PSC4
+			status = "disabled";
+		};
+
+		psc@2800 {	// PSC5
+			status = "disabled";
+		};
+
+		psc@2c00 {	// PSC6
 			compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
-			reg = <0x2c00 0x100>;
-			interrupts = <2 4 0>;
 		};
 
 		ethernet@3000 {
-			compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-			reg = <0x3000 0x400>;
-			local-mac-address = [ 00 00 00 00 00 00 ];
-			interrupts = <2 5 0>;
 			phy-handle = <&phy0>;
 		};
 
 		mdio@3000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-			reg = <0x3000 0x400>;	// fec range, since we need to setup fec interrupts
-			interrupts = <2 5 0>;	// these are for "mii command finished", not link changes & co.
-
 			phy0: ethernet-phy@0 {
 				compatible = "intel,lxt971";
 				reg = <0>;
 			};
 		};
 
-		ata@3a00 {
-			compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-			reg = <0x3a00 0x100>;
-			interrupts = <2 7 0>;
+		i2c@3d00 {
+			status = "disabled";
 		};
 
 		i2c@3d40 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-			reg = <0x3d40 0x40>;
-			interrupts = <2 16 0>;
 			fsl,preserve-clocking;
 			clock-frequency = <400000>;
 
@@ -221,19 +133,13 @@
 				reg = <0x51>;
 			};
 		};
+	};
 
-		sram@8000 {
-			compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-			reg = <0x8000 0x4000>;
-		};
+	pci@f0000d00 {
+		status = "disabled";
 	};
 
 	localbus {
-		compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus";
-
-		#address-cells = <2>;
-		#size-cells = <1>;
-
 		ranges = <0 0 0xff800000 0x00800000
 			  1 0 0x80000000 0x00800000
 			  3 0 0x80000000 0x00800000>;
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 4e19ee7..34b8c1a 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -12,6 +12,8 @@
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_KILAUEA=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 # CONFIG_WALNUT is not set
 CONFIG_SPARSE_IRQ=y
 CONFIG_PCI=y
@@ -42,6 +44,9 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_NDFC=y
 CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=35000
 # CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 45c64d8..17e4dd9 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -42,6 +42,9 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_NDFC=y
 CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=35000
 # CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
index 4b0e152..6b6dc20 100644
--- a/arch/powerpc/include/asm/8xx_immap.h
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -93,7 +93,7 @@
 } memctl8xx_t;
 
 /*-----------------------------------------------------------------------
- * BR - Memory Controler: Base Register					16-9
+ * BR - Memory Controller: Base Register					16-9
  */
 #define BR_BA_MSK	0xffff8000	/* Base Address Mask			*/
 #define BR_AT_MSK	0x00007000	/* Address Type Mask			*/
@@ -110,7 +110,7 @@
 #define BR_V		0x00000001	/* Bank Valid				*/
 
 /*-----------------------------------------------------------------------
- * OR - Memory Controler: Option Register				16-11
+ * OR - Memory Controller: Option Register				16-11
  */
 #define OR_AM_MSK	0xffff8000	/* Address Mask Mask			*/
 #define OR_ATM_MSK	0x00007000	/* Address Type Mask Mask		*/
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 30964ae..8a7e9314 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -267,7 +267,16 @@
 #include <asm-generic/bitops/fls64.h>
 #endif /* __powerpc64__ */
 
+#ifdef CONFIG_PPC64
+unsigned int __arch_hweight8(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+#include <asm-generic/bitops/const_hweight.h>
+#else
 #include <asm-generic/bitops/hweight.h>
+#endif
+
 #include <asm-generic/bitops/find.h>
 
 /* Little-endian versions */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f3a1fdd..f0a211d 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -199,6 +199,8 @@
 #define CPU_FTR_UNALIGNED_LD_STD	LONG_ASM_CONST(0x0080000000000000)
 #define CPU_FTR_ASYM_SMT		LONG_ASM_CONST(0x0100000000000000)
 #define CPU_FTR_STCX_CHECKS_ADDRESS	LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_POPCNTB			LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_POPCNTD			LONG_ASM_CONST(0x0800000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -403,21 +405,22 @@
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
-	    CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \
+	    CPU_FTR_POPCNTB)
 #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
 #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
 #define CPU_FTRS_CELL	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index a8e1844..f71bb4c 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -61,22 +61,25 @@
 	return cpu_thread_mask_to_cores(cpu_online_map);
 }
 
-static inline int cpu_thread_to_core(int cpu)
-{
-	return cpu >> threads_shift;
-}
+#ifdef CONFIG_SMP
+int cpu_core_index_of_thread(int cpu);
+int cpu_first_thread_of_core(int core);
+#else
+static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
+static inline int cpu_first_thread_of_core(int core) { return core; }
+#endif
 
 static inline int cpu_thread_in_core(int cpu)
 {
 	return cpu & (threads_per_core - 1);
 }
 
-static inline int cpu_first_thread_in_core(int cpu)
+static inline int cpu_first_thread_sibling(int cpu)
 {
 	return cpu & ~(threads_per_core - 1);
 }
 
-static inline int cpu_last_thread_in_core(int cpu)
+static inline int cpu_last_thread_sibling(int cpu)
 {
 	return cpu | (threads_per_core - 1);
 }
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index a3954e4..16d25c0 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -9,6 +9,12 @@
 struct dma_map_ops;
 struct device_node;
 
+/*
+ * Arch extensions to struct device.
+ *
+ * When adding fields, consider macio_add_one_device in
+ * drivers/macintosh/macio_asic.c
+ */
 struct dev_archdata {
 	/* DMA operations on that device */
 	struct dma_map_ops	*dma_ops;
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 20778a4..4ef662e 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -46,6 +46,7 @@
 #define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
 #define FW_FEATURE_BEAT		ASM_CONST(0x0000000001000000)
 #define FW_FEATURE_CMO		ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_VPHN		ASM_CONST(0x0000000004000000)
 
 #ifndef __ASSEMBLY__
 
@@ -59,7 +60,7 @@
 		FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
 		FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
 		FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
-		FW_FEATURE_CMO,
+		FW_FEATURE_CMO | FW_FEATURE_VPHN,
 	FW_FEATURE_PSERIES_ALWAYS = 0,
 	FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
 	FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index de03ca5..ec089ac 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -232,7 +232,9 @@
 #define H_GET_EM_PARMS		0x2B8
 #define H_SET_MPP		0x2D0
 #define H_GET_MPP		0x2D4
-#define MAX_HCALL_OPCODE	H_GET_MPP
+#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
+#define H_BEST_ENERGY		0x2F4
+#define MAX_HCALL_OPCODE	H_BEST_ENERGY
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h
index 8519200..c7dc17c 100644
--- a/arch/powerpc/include/asm/ioctls.h
+++ b/arch/powerpc/include/asm/ioctls.h
@@ -94,6 +94,7 @@
 #define TIOCSRS485	0x542f
 #define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
 #define TIOCSIG		_IOW('T',0x36, int)  /* Generate signal on Pty slave */
 
 #define TIOCSERCONFIG	0x5453
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 7f5e0fe..380d48b 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -62,7 +62,10 @@
 	volatile u32 dyn_pir;		// Dynamic ProcIdReg value	x20-x23
 	u32	dsei_data;           	// DSEI data                  	x24-x27
 	u64	sprg3;               	// SPRG3 value                	x28-x2F
-	u8	reserved3[80];		// Reserved			x30-x7F
+	u8	reserved3[40];		// Reserved			x30-x57
+	volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
+					// associativity change counters x58-x5F
+	u8	reserved4[32];		// Reserved			x60-x7F
 
 //=============================================================================
 // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index d045b01..8433d36 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -27,9 +27,7 @@
 struct rtc_time;
 struct file;
 struct pci_controller;
-#ifdef CONFIG_KEXEC
 struct kimage;
-#endif
 
 #ifdef CONFIG_SMP
 struct smp_ops_t {
@@ -72,7 +70,7 @@
 					     int psize, int ssize);
 	void		(*flush_hash_range)(unsigned long number, int local);
 
-	/* special for kexec, to be called in real mode, linar mapping is
+	/* special for kexec, to be called in real mode, linear mapping is
 	 * destroyed as well */
 	void		(*hpte_clear_all)(void);
 
@@ -324,8 +322,6 @@
 
 #endif /* CONFIG_PPC_PMAC */
 
-extern void setup_pci_ptrs(void);
-
 #ifdef CONFIG_SMP
 /* Poor default implementations */
 extern void __devinit smp_generic_give_timebase(void);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index aac87cb..fd3fd58 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -33,6 +33,9 @@
 extern cpumask_var_t node_to_cpumask_map[];
 #ifdef CONFIG_MEMORY_HOTPLUG
 extern unsigned long max_pfn;
+u64 memory_hotplug_max(void);
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
 #endif
 
 /*
@@ -42,6 +45,8 @@
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)	(NODE_DATA(nid)->node_end_pfn)
 
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 850b72f..92efe67 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -10,31 +10,7 @@
 #ifndef _ASM_POWERPC_NVRAM_H
 #define _ASM_POWERPC_NVRAM_H
 
-#include <linux/errno.h>
-
-#define NVRW_CNT 0x20
-#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
-#define NVRAM_BLOCK_LEN 16
-#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
-#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
-
-#define NVRAM_AS0  0x74
-#define NVRAM_AS1  0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS	0x1FF9
-#define MOTO_RTC_MINUTES	0x1FFA
-#define MOTO_RTC_HOURS		0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK	0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH	0x1FFD
-#define MOTO_RTC_MONTH		0x1FFE
-#define MOTO_RTC_YEAR		0x1FFF
-#define MOTO_RTC_CONTROLA       0x1FF8
-#define MOTO_RTC_CONTROLB       0x1FF9
-
+/* Signatures for nvram partitions */
 #define NVRAM_SIG_SP	0x02	/* support processor */
 #define NVRAM_SIG_OF	0x50	/* open firmware config */
 #define NVRAM_SIG_FW	0x51	/* general firmware */
@@ -49,32 +25,19 @@
 #define NVRAM_SIG_OS	0xa0	/* OS defined */
 #define NVRAM_SIG_PANIC	0xa1	/* Apple OSX "panic" */
 
-/* If change this size, then change the size of NVNAME_LEN */
-struct nvram_header {
-	unsigned char signature;
-	unsigned char checksum;
-	unsigned short length;
-	char name[12];
-};
-
 #ifdef __KERNEL__
 
+#include <linux/errno.h>
 #include <linux/list.h>
 
-struct nvram_partition {
-	struct list_head partition;
-	struct nvram_header header;
-	unsigned int index;
-};
-
-
+#ifdef CONFIG_PPC_PSERIES
 extern int nvram_write_error_log(char * buff, int length,
 					 unsigned int err_type, unsigned int err_seq);
 extern int nvram_read_error_log(char * buff, int length,
 					 unsigned int * err_type, unsigned int *err_seq);
 extern int nvram_clear_error_log(void);
-
 extern int pSeries_nvram_init(void);
+#endif /* CONFIG_PPC_PSERIES */
 
 #ifdef CONFIG_MMIO_NVRAM
 extern int mmio_nvram_init(void);
@@ -85,6 +48,13 @@
 }
 #endif
 
+extern int __init nvram_scan_partitions(void);
+extern loff_t nvram_create_partition(const char *name, int sig,
+				     int req_size, int min_size);
+extern int nvram_remove_partition(const char *name, int sig);
+extern int nvram_get_partition_size(loff_t data_index);
+extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
+
 #endif /* __KERNEL__ */
 
 /* PowerMac specific nvram stuffs */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 43adc8b..1255569 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -36,6 +36,8 @@
 #define PPC_INST_NOP			0x60000000
 #define PPC_INST_POPCNTB		0x7c0000f4
 #define PPC_INST_POPCNTB_MASK		0xfc0007fe
+#define PPC_INST_POPCNTD		0x7c0003f4
+#define PPC_INST_POPCNTW		0x7c0002f4
 #define PPC_INST_RFCI			0x4c000066
 #define PPC_INST_RFDI			0x4c00004e
 #define PPC_INST_RFMCI			0x4c00004c
@@ -88,6 +90,12 @@
 					__PPC_RB(b) | __PPC_EH(eh))
 #define PPC_MSGSND(b)		stringify_in_c(.long PPC_INST_MSGSND | \
 					__PPC_RB(b))
+#define PPC_POPCNTB(a, s)	stringify_in_c(.long PPC_INST_POPCNTB | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTD(a, s)	stringify_in_c(.long PPC_INST_POPCNTD | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTW(a, s)	stringify_in_c(.long PPC_INST_POPCNTW | \
+					__PPC_RA(a) | __PPC_RS(s))
 #define PPC_RFCI		stringify_in_c(.long PPC_INST_RFCI)
 #define PPC_RFDI		stringify_in_c(.long PPC_INST_RFDI)
 #define PPC_RFMCI		stringify_in_c(.long PPC_INST_RFMCI)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 4c14187..de1967a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -122,7 +122,6 @@
 		TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
 #endif
 
-#ifdef __KERNEL__
 #ifdef __powerpc64__
 
 #define STACK_TOP_USER64 TASK_SIZE_USER64
@@ -139,7 +138,6 @@
 #define STACK_TOP_MAX	STACK_TOP
 
 #endif /* __powerpc64__ */
-#endif /* __KERNEL__ */
 
 typedef struct {
 	unsigned long seg;
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index ae26f2e..d727575 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -42,7 +42,7 @@
 
 /* Translate a DMA address from device space to CPU space */
 extern u64 of_translate_dma_address(struct device_node *dev,
-				    const u32 *in_addr);
+				    const __be32 *in_addr);
 
 #ifdef CONFIG_PCI
 extern unsigned long pci_address_to_pio(phys_addr_t address);
@@ -63,9 +63,6 @@
 /* cache lookup */
 struct device_node *of_find_next_cache_node(struct device_node *np);
 
-/* Get the MAC address */
-extern const void *of_get_mac_address(struct device_node *np);
-
 #ifdef CONFIG_NUMA
 extern int of_node_to_nid(struct device_node *device);
 #else
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index afe4aaa..7ef0d90 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -106,9 +106,22 @@
 						int nid)
 {
 }
-
 #endif /* CONFIG_NUMA */
 
+#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+extern int start_topology_update(void);
+extern int stop_topology_update(void);
+#else
+static inline int start_topology_update(void)
+{
+	return 0;
+}
+static inline int stop_topology_update(void)
+{
+	return 0;
+}
+#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+
 #include <asm-generic/topology.h>
 
 #ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 08679c5..25e3922 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -116,9 +116,7 @@
 
 #endif /* CONFIG_PPC64 */
 
-#ifdef __KERNEL__
 extern struct vdso_data *vdso_data;
-#endif
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 36c30f3..3bb2a3e 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,8 +29,10 @@
 obj-y				:= cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
 				   init_task.o process.o systbl.o idle.o \
-				   signal.o sysfs.o cacheinfo.o
-obj-y				+= vdso32/
+				   signal.o sysfs.o cacheinfo.o time.o \
+				   prom.o traps.o setup-common.o \
+				   udbg.o misc.o io.o dma.o \
+				   misc_$(CONFIG_WORD_SIZE).o vdso32/
 obj-$(CONFIG_PPC64)		+= setup_64.o sys_ppc32.o \
 				   signal_64.o ptrace32.o \
 				   paca.o nvram_64.o firmware.o
@@ -80,9 +82,6 @@
 extra-$(CONFIG_8xx)		:= head_8xx.o
 extra-y				+= vmlinux.lds
 
-obj-y				+= time.o prom.o traps.o setup-common.o \
-				   udbg.o misc.o io.o dma.o \
-				   misc_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
 obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index bd0df2e..23e6a93 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -209,7 +209,6 @@
 	DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
 
 	/* Interrupt register frame */
-	DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
 	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
 	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 96a908f..be5ab18 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -457,16 +457,26 @@
 		.dcache_bsize		= 128,
 		.num_pmcs		= 6,
 		.pmc_type		= PPC_PMC_IBM,
-		.cpu_setup		= __setup_cpu_power7,
-		.cpu_restore		= __restore_cpu_power7,
 		.oprofile_cpu_type	= "ppc64/power7",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.oprofile_mmcra_sihv	= POWER6_MMCRA_SIHV,
-		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
-		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
-			POWER6_MMCRA_OTHER,
 		.platform		= "power7",
 	},
+	{	/* Power7+ */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x004A0000,
+		.cpu_name		= "POWER7+ (raw)",
+		.cpu_features		= CPU_FTRS_POWER7,
+		.cpu_user_features	= COMMON_USER_POWER7,
+		.mmu_features		= MMU_FTR_HPTE_TABLE |
+			MMU_FTR_TLBIE_206,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/power7",
+		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.platform		= "power7+",
+	},
 	{	/* Cell Broadband Engine */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00700000,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 8e05c16..0a2af50 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -19,6 +19,7 @@
 #include <asm/prom.h>
 #include <asm/firmware.h>
 #include <asm/uaccess.h>
+#include <asm/rtas.h>
 
 #ifdef DEBUG
 #include <asm/udbg.h>
@@ -141,3 +142,35 @@
 
 	return csize;
 }
+
+#ifdef CONFIG_PPC_RTAS
+/*
+ * The crashkernel region will almost always overlap the RTAS region, so
+ * we have to be careful when shrinking the crashkernel region.
+ */
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+	unsigned long addr;
+	const u32 *basep, *sizep;
+	unsigned int rtas_start = 0, rtas_end = 0;
+
+	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
+	sizep = of_get_property(rtas.dev, "rtas-size", NULL);
+
+	if (basep && sizep) {
+		rtas_start = *basep;
+		rtas_end = *basep + *sizep;
+	}
+
+	for (addr = begin; addr < end; addr += PAGE_SIZE) {
+		/* Does this page overlap with the RTAS region? */
+		if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
+			continue;
+
+		ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+		init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+		free_page((unsigned long)__va(addr));
+		totalram_pages++;
+	}
+}
+#endif
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 6e54a0f..e755415 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -19,7 +19,7 @@
 				      dma_addr_t *dma_handle, gfp_t flag)
 {
 	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
-				    dma_handle, device_to_mask(dev), flag,
+				    dma_handle, dev->coherent_dma_mask, flag,
 				    dev_to_node(dev));
 }
 
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ed4aeb9..c22dc1e 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -31,6 +31,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/ptrace.h>
 
 #undef SHOW_SYSCALLS
 #undef SHOW_SYSCALLS_TASK
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f8b01d..8a81799 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -13,6 +13,7 @@
  */
 
 #include <asm/exception-64s.h>
+#include <asm/ptrace.h>
 
 /*
  * We layout physical memory as follows:
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index e86c040..de36955 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -23,6 +23,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 #ifdef CONFIG_VSX
 #define REST_32FPVSRS(n,c,base)						\
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 8278e8b..9dd21a8 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -40,6 +40,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /* As with the other PowerPC ports, it is expected that when code
  * execution begins here, the following registers contain valid, yet
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 562305b..cbb3436 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 #include <asm/synch.h>
 #include "head_booke.h"
 
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f0dd577..782f23d 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -38,6 +38,7 @@
 #include <asm/page_64.h>
 #include <asm/irqflags.h>
 #include <asm/kvm_book3s_asm.h>
+#include <asm/ptrace.h>
 
 /* The physical memory is layed out such that the secondary processor
  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -96,7 +97,7 @@
 	.llong hvReleaseData-KERNELBASE
 #endif /* CONFIG_PPC_ISERIES */
 
-#ifdef CONFIG_CRASH_DUMP
+#ifdef CONFIG_RELOCATABLE
 	/* This flag is set to 1 by a loader if the kernel should run
 	 * at the loaded address instead of the linked address.  This
 	 * is used by kexec-tools to keep the the kdump kernel in the
@@ -384,12 +385,10 @@
 	/* process relocations for the final address of the kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
 	sldi	r25,r25,32
-#ifdef CONFIG_CRASH_DUMP
 	lwz	r7,__run_at_load-_stext(r26)
-	cmplwi	cr0,r7,1	/* kdump kernel ? - stay where we are */
+	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
 	bne	1f
 	add	r25,r25,r26
-#endif
 1:	mr	r3,r25
 	bl	.relocate
 #endif
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 1f1a04b..1cbf64e 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -29,6 +29,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /* Macro to make the code more readable. */
 #ifdef CONFIG_8xx_CPU6
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 529b817..3e02710 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -41,6 +41,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
+#include <asm/ptrace.h>
 #include "head_booke.h"
 
 /* As with the other PowerPC ports, it is expected that when code
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d583917..961bb03 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -311,8 +311,9 @@
 		/* Handle failure */
 		if (unlikely(entry == DMA_ERROR_CODE)) {
 			if (printk_ratelimit())
-				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
-				       " npages %lx\n", tbl, vaddr, npages);
+				dev_info(dev, "iommu_alloc failed, tbl %p "
+					 "vaddr %lx npages %lu\n", tbl, vaddr,
+					 npages);
 			goto failure;
 		}
 
@@ -579,9 +580,9 @@
 					 attrs);
 		if (dma_handle == DMA_ERROR_CODE) {
 			if (printk_ratelimit())  {
-				printk(KERN_INFO "iommu_alloc failed, "
-						"tbl %p vaddr %p npages %d\n",
-						tbl, vaddr, npages);
+				dev_info(dev, "iommu_alloc failed, tbl %p "
+					 "vaddr %p npages %d\n", tbl, vaddr,
+					 npages);
 			}
 		} else
 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
@@ -627,7 +628,8 @@
 	 * the tce tables.
 	 */
 	if (order >= IOMAP_MAX_ORDER) {
-		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
+		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
+			 size);
 		return NULL;
 	}
 
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 2d29752..b69463e 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -122,8 +122,3 @@
 	mtlr	r0
 	mr	r3,r4
 	blr
-
-_GLOBAL(__setup_cpu_power7)
-_GLOBAL(__restore_cpu_power7)
-	/* place holder */
-	blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index a7a570d..094bd98 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -30,6 +30,7 @@
 #include <asm/processor.h>
 #include <asm/kexec.h>
 #include <asm/bug.h>
+#include <asm/ptrace.h>
 
 	.text
 
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e514490..206a321 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -25,6 +25,7 @@
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
 #include <asm/kexec.h>
+#include <asm/ptrace.h>
 
 	.text
 
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 9cf197f..bb12b32 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -34,15 +34,26 @@
 
 #undef DEBUG_NVRAM
 
-static struct nvram_partition * nvram_part;
-static long nvram_error_log_index = -1;
-static long nvram_error_log_size = 0;
+#define NVRAM_HEADER_LEN	sizeof(struct nvram_header)
+#define NVRAM_BLOCK_LEN		NVRAM_HEADER_LEN
 
-struct err_log_info {
-	int error_type;
-	unsigned int seq_num;
+/* If change this size, then change the size of NVNAME_LEN */
+struct nvram_header {
+	unsigned char signature;
+	unsigned char checksum;
+	unsigned short length;
+	/* Terminating null required only for names < 12 chars. */
+	char name[12];
 };
 
+struct nvram_partition {
+	struct list_head partition;
+	struct nvram_header header;
+	unsigned int index;
+};
+
+static LIST_HEAD(nvram_partitions);
+
 static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
 {
 	int size;
@@ -186,14 +197,12 @@
 #ifdef DEBUG_NVRAM
 static void __init nvram_print_partitions(char * label)
 {
-	struct list_head * p;
 	struct nvram_partition * tmp_part;
 	
 	printk(KERN_WARNING "--------%s---------\n", label);
 	printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
-	list_for_each(p, &nvram_part->partition) {
-		tmp_part = list_entry(p, struct nvram_partition, partition);
-		printk(KERN_WARNING "%4d    \t%02x\t%02x\t%d\t%s\n",
+	list_for_each_entry(tmp_part, &nvram_partitions, partition) {
+		printk(KERN_WARNING "%4d    \t%02x\t%02x\t%d\t%12s\n",
 		       tmp_part->index, tmp_part->header.signature,
 		       tmp_part->header.checksum, tmp_part->header.length,
 		       tmp_part->header.name);
@@ -228,95 +237,113 @@
 	return c_sum;
 }
 
-static int __init nvram_remove_os_partition(void)
+/**
+ * nvram_remove_partition - Remove one or more partitions in nvram
+ * @name: name of the partition to remove, or NULL for a
+ *        signature only match
+ * @sig: signature of the partition(s) to remove
+ */
+
+int __init nvram_remove_partition(const char *name, int sig)
 {
-	struct list_head *i;
-	struct list_head *j;
-	struct nvram_partition * part;
-	struct nvram_partition * cur_part;
+	struct nvram_partition *part, *prev, *tmp;
 	int rc;
 
-	list_for_each(i, &nvram_part->partition) {
-		part = list_entry(i, struct nvram_partition, partition);
-		if (part->header.signature != NVRAM_SIG_OS)
+	list_for_each_entry(part, &nvram_partitions, partition) {
+		if (part->header.signature != sig)
 			continue;
-		
-		/* Make os partition a free partition */
+		if (name && strncmp(name, part->header.name, 12))
+			continue;
+
+		/* Make partition a free partition */
 		part->header.signature = NVRAM_SIG_FREE;
-		sprintf(part->header.name, "wwwwwwwwwwww");
+		strncpy(part->header.name, "wwwwwwwwwwww", 12);
 		part->header.checksum = nvram_checksum(&part->header);
-
-		/* Merge contiguous free partitions backwards */
-		list_for_each_prev(j, &part->partition) {
-			cur_part = list_entry(j, struct nvram_partition, partition);
-			if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
-				break;
-			}
-			
-			part->header.length += cur_part->header.length;
-			part->header.checksum = nvram_checksum(&part->header);
-			part->index = cur_part->index;
-
-			list_del(&cur_part->partition);
-			kfree(cur_part);
-			j = &part->partition; /* fixup our loop */
-		}
-		
-		/* Merge contiguous free partitions forwards */
-		list_for_each(j, &part->partition) {
-			cur_part = list_entry(j, struct nvram_partition, partition);
-			if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
-				break;
-			}
-
-			part->header.length += cur_part->header.length;
-			part->header.checksum = nvram_checksum(&part->header);
-
-			list_del(&cur_part->partition);
-			kfree(cur_part);
-			j = &part->partition; /* fixup our loop */
-		}
-		
 		rc = nvram_write_header(part);
 		if (rc <= 0) {
-			printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc);
+			printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
 			return rc;
 		}
+	}
 
+	/* Merge contiguous ones */
+	prev = NULL;
+	list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) {
+		if (part->header.signature != NVRAM_SIG_FREE) {
+			prev = NULL;
+			continue;
+		}
+		if (prev) {
+			prev->header.length += part->header.length;
+			prev->header.checksum = nvram_checksum(&part->header);
+			rc = nvram_write_header(part);
+			if (rc <= 0) {
+				printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
+				return rc;
+			}
+			list_del(&part->partition);
+			kfree(part);
+		} else
+			prev = part;
 	}
 	
 	return 0;
 }
 
-/* nvram_create_os_partition
+/**
+ * nvram_create_partition - Create a partition in nvram
+ * @name: name of the partition to create
+ * @sig: signature of the partition to create
+ * @req_size: size of data to allocate in bytes
+ * @min_size: minimum acceptable size (0 means req_size)
  *
- * Create a OS linux partition to buffer error logs.
- * Will create a partition starting at the first free
- * space found if space has enough room.
+ * Returns a negative error code or a positive nvram index
+ * of the beginning of the data area of the newly created
+ * partition. If you provided a min_size smaller than req_size
+ * you need to query for the actual size yourself after the
+ * call using nvram_partition_get_size().
  */
-static int __init nvram_create_os_partition(void)
+loff_t __init nvram_create_partition(const char *name, int sig,
+				     int req_size, int min_size)
 {
 	struct nvram_partition *part;
 	struct nvram_partition *new_part;
 	struct nvram_partition *free_part = NULL;
-	int seq_init[2] = { 0, 0 };
+	static char nv_init_vals[16];
 	loff_t tmp_index;
 	long size = 0;
 	int rc;
-	
+
+	/* Convert sizes from bytes to blocks */
+	req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+	min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+
+	/* If no minimum size specified, make it the same as the
+	 * requested size
+	 */
+	if (min_size == 0)
+		min_size = req_size;
+	if (min_size > req_size)
+		return -EINVAL;
+
+	/* Now add one block to each for the header */
+	req_size += 1;
+	min_size += 1;
+
 	/* Find a free partition that will give us the maximum needed size 
 	   If can't find one that will give us the minimum size needed */
-	list_for_each_entry(part, &nvram_part->partition, partition) {
+	list_for_each_entry(part, &nvram_partitions, partition) {
 		if (part->header.signature != NVRAM_SIG_FREE)
 			continue;
 
-		if (part->header.length >= NVRAM_MAX_REQ) {
-			size = NVRAM_MAX_REQ;
+		if (part->header.length >= req_size) {
+			size = req_size;
 			free_part = part;
 			break;
 		}
-		if (!size && part->header.length >= NVRAM_MIN_REQ) {
-			size = NVRAM_MIN_REQ;
+		if (part->header.length > size &&
+		    part->header.length >= min_size) {
+			size = part->header.length;
 			free_part = part;
 		}
 	}
@@ -326,136 +353,95 @@
 	/* Create our OS partition */
 	new_part = kmalloc(sizeof(*new_part), GFP_KERNEL);
 	if (!new_part) {
-		printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n");
+		pr_err("nvram_create_os_partition: kmalloc failed\n");
 		return -ENOMEM;
 	}
 
 	new_part->index = free_part->index;
-	new_part->header.signature = NVRAM_SIG_OS;
+	new_part->header.signature = sig;
 	new_part->header.length = size;
-	strcpy(new_part->header.name, "ppc64,linux");
+	strncpy(new_part->header.name, name, 12);
 	new_part->header.checksum = nvram_checksum(&new_part->header);
 
 	rc = nvram_write_header(new_part);
 	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
-				"failed (%d)\n", rc);
-		return rc;
-	}
-
-	/* make sure and initialize to zero the sequence number and the error
-	   type logged */
-	tmp_index = new_part->index + NVRAM_HEADER_LEN;
-	rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write "
+		pr_err("nvram_create_os_partition: nvram_write_header "
 		       "failed (%d)\n", rc);
 		return rc;
 	}
-	
-	nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN;
-	nvram_error_log_size = ((part->header.length - 1) *
-				NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
-	
 	list_add_tail(&new_part->partition, &free_part->partition);
 
-	if (free_part->header.length <= size) {
+	/* Adjust or remove the partition we stole the space from */
+	if (free_part->header.length > size) {
+		free_part->index += size * NVRAM_BLOCK_LEN;
+		free_part->header.length -= size;
+		free_part->header.checksum = nvram_checksum(&free_part->header);
+		rc = nvram_write_header(free_part);
+		if (rc <= 0) {
+			pr_err("nvram_create_os_partition: nvram_write_header "
+			       "failed (%d)\n", rc);
+			return rc;
+		}
+	} else {
 		list_del(&free_part->partition);
 		kfree(free_part);
-		return 0;
 	} 
 
-	/* Adjust the partition we stole the space from */
-	free_part->index += size * NVRAM_BLOCK_LEN;
-	free_part->header.length -= size;
-	free_part->header.checksum = nvram_checksum(&free_part->header);
-	
-	rc = nvram_write_header(free_part);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
-		       "failed (%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-
-/* nvram_setup_partition
- *
- * This will setup the partition we need for buffering the
- * error logs and cleanup partitions if needed.
- *
- * The general strategy is the following:
- * 1.) If there is ppc64,linux partition large enough then use it.
- * 2.) If there is not a ppc64,linux partition large enough, search
- * for a free partition that is large enough.
- * 3.) If there is not a free partition large enough remove 
- * _all_ OS partitions and consolidate the space.
- * 4.) Will first try getting a chunk that will satisfy the maximum
- * error log size (NVRAM_MAX_REQ).
- * 5.) If the max chunk cannot be allocated then try finding a chunk
- * that will satisfy the minum needed (NVRAM_MIN_REQ).
- */
-static int __init nvram_setup_partition(void)
-{
-	struct list_head * p;
-	struct nvram_partition * part;
-	int rc;
-
-	/* For now, we don't do any of this on pmac, until I
-	 * have figured out if it's worth killing some unused stuffs
-	 * in our nvram, as Apple defined partitions use pretty much
-	 * all of the space
-	 */
-	if (machine_is(powermac))
-		return -ENOSPC;
-
-	/* see if we have an OS partition that meets our needs.
-	   will try getting the max we need.  If not we'll delete
-	   partitions and try again. */
-	list_for_each(p, &nvram_part->partition) {
-		part = list_entry(p, struct nvram_partition, partition);
-		if (part->header.signature != NVRAM_SIG_OS)
-			continue;
-
-		if (strcmp(part->header.name, "ppc64,linux"))
-			continue;
-
-		if (part->header.length >= NVRAM_MIN_REQ) {
-			/* found our partition */
-			nvram_error_log_index = part->index + NVRAM_HEADER_LEN;
-			nvram_error_log_size = ((part->header.length - 1) *
-						NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
-			return 0;
+	/* Clear the new partition */
+	for (tmp_index = new_part->index + NVRAM_HEADER_LEN;
+	     tmp_index <  ((size - 1) * NVRAM_BLOCK_LEN);
+	     tmp_index += NVRAM_BLOCK_LEN) {
+		rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
+		if (rc <= 0) {
+			pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc);
+			return rc;
 		}
 	}
 	
-	/* try creating a partition with the free space we have */
-	rc = nvram_create_os_partition();
-	if (!rc) {
-		return 0;
-	}
-		
-	/* need to free up some space */
-	rc = nvram_remove_os_partition();
-	if (rc) {
-		return rc;
-	}
+	return new_part->index + NVRAM_HEADER_LEN;
+}
+
+/**
+ * nvram_get_partition_size - Get the data size of an nvram partition
+ * @data_index: This is the offset of the start of the data of
+ *              the partition. The same value that is returned by
+ *              nvram_create_partition().
+ */
+int nvram_get_partition_size(loff_t data_index)
+{
+	struct nvram_partition *part;
 	
-	/* create a partition in this new space */
-	rc = nvram_create_os_partition();
-	if (rc) {
-		printk(KERN_ERR "nvram_create_os_partition: Could not find a "
-		       "NVRAM partition large enough\n");
-		return rc;
+	list_for_each_entry(part, &nvram_partitions, partition) {
+		if (part->index + NVRAM_HEADER_LEN == data_index)
+			return (part->header.length - 1) * NVRAM_BLOCK_LEN;
 	}
-	
-	return 0;
+	return -1;
 }
 
 
-static int __init nvram_scan_partitions(void)
+/**
+ * nvram_find_partition - Find an nvram partition by signature and name
+ * @name: Name of the partition or NULL for any name
+ * @sig: Signature to test against
+ * @out_size: if non-NULL, returns the size of the data part of the partition
+ */
+loff_t nvram_find_partition(const char *name, int sig, int *out_size)
+{
+	struct nvram_partition *p;
+
+	list_for_each_entry(p, &nvram_partitions, partition) {
+		if (p->header.signature == sig &&
+		    (!name || !strncmp(p->header.name, name, 12))) {
+			if (out_size)
+				*out_size = (p->header.length - 1) *
+					NVRAM_BLOCK_LEN;
+			return p->index + NVRAM_HEADER_LEN;
+		}
+	}
+	return 0;
+}
+
+int __init nvram_scan_partitions(void)
 {
 	loff_t cur_index = 0;
 	struct nvram_header phead;
@@ -465,7 +451,7 @@
 	int total_size;
 	int err;
 
-	if (ppc_md.nvram_size == NULL)
+	if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
 		return -ENODEV;
 	total_size = ppc_md.nvram_size();
 	
@@ -512,12 +498,16 @@
 		
 		memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN);
 		tmp_part->index = cur_index;
-		list_add_tail(&tmp_part->partition, &nvram_part->partition);
+		list_add_tail(&tmp_part->partition, &nvram_partitions);
 		
 		cur_index += phead.length * NVRAM_BLOCK_LEN;
 	}
 	err = 0;
 
+#ifdef DEBUG_NVRAM
+	nvram_print_partitions("NVRAM Partitions");
+#endif
+
  out:
 	kfree(header);
 	return err;
@@ -525,9 +515,10 @@
 
 static int __init nvram_init(void)
 {
-	int error;
 	int rc;
 	
+	BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
+
 	if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
 		return  -ENODEV;
 
@@ -537,29 +528,6 @@
 		return rc;
 	}
   	
-  	/* initialize our anchor for the nvram partition list */
-  	nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
-  	if (!nvram_part) {
-  		printk(KERN_ERR "nvram_init: Failed kmalloc\n");
-  		return -ENOMEM;
-  	}
-  	INIT_LIST_HEAD(&nvram_part->partition);
-  
-  	/* Get all the NVRAM partitions */
-  	error = nvram_scan_partitions();
-  	if (error) {
-  		printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n");
-  		return error;
-  	}
-  		
-  	if(nvram_setup_partition()) 
-  		printk(KERN_WARNING "nvram_init: Could not find nvram partition"
-  		       " for nvram buffered error logging.\n");
-  
-#ifdef DEBUG_NVRAM
-	nvram_print_partitions("NVRAM Partitions");
-#endif
-
   	return rc;
 }
 
@@ -568,135 +536,6 @@
         misc_deregister( &nvram_dev );
 }
 
-
-#ifdef CONFIG_PPC_PSERIES
-
-/* nvram_write_error_log
- *
- * We need to buffer the error logs into nvram to ensure that we have
- * the failure information to decode.  If we have a severe error there
- * is no way to guarantee that the OS or the machine is in a state to
- * get back to user land and write the error to disk.  For example if
- * the SCSI device driver causes a Machine Check by writing to a bad
- * IO address, there is no way of guaranteeing that the device driver
- * is in any state that is would also be able to write the error data
- * captured to disk, thus we buffer it in NVRAM for analysis on the
- * next boot.
- *
- * In NVRAM the partition containing the error log buffer will looks like:
- * Header (in bytes):
- * +-----------+----------+--------+------------+------------------+
- * | signature | checksum | length | name       | data             |
- * |0          |1         |2      3|4         15|16        length-1|
- * +-----------+----------+--------+------------+------------------+
- *
- * The 'data' section would look like (in bytes):
- * +--------------+------------+-----------------------------------+
- * | event_logged | sequence # | error log                         |
- * |0            3|4          7|8            nvram_error_log_size-1|
- * +--------------+------------+-----------------------------------+
- *
- * event_logged: 0 if event has not been logged to syslog, 1 if it has
- * sequence #: The unique sequence # for each event. (until it wraps)
- * error log: The error log from event_scan
- */
-int nvram_write_error_log(char * buff, int length,
-                          unsigned int err_type, unsigned int error_log_cnt)
-{
-	int rc;
-	loff_t tmp_index;
-	struct err_log_info info;
-	
-	if (nvram_error_log_index == -1) {
-		return -ESPIPE;
-	}
-
-	if (length > nvram_error_log_size) {
-		length = nvram_error_log_size;
-	}
-
-	info.error_type = err_type;
-	info.seq_num = error_log_cnt;
-
-	tmp_index = nvram_error_log_index;
-
-	rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-
-	rc = ppc_md.nvram_write(buff, length, &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-	
-	return 0;
-}
-
-/* nvram_read_error_log
- *
- * Reads nvram for error log for at most 'length'
- */
-int nvram_read_error_log(char * buff, int length,
-                         unsigned int * err_type, unsigned int * error_log_cnt)
-{
-	int rc;
-	loff_t tmp_index;
-	struct err_log_info info;
-	
-	if (nvram_error_log_index == -1)
-		return -1;
-
-	if (length > nvram_error_log_size)
-		length = nvram_error_log_size;
-
-	tmp_index = nvram_error_log_index;
-
-	rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
-		return rc;
-	}
-
-	rc = ppc_md.nvram_read(buff, length, &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
-		return rc;
-	}
-
-	*error_log_cnt = info.seq_num;
-	*err_type = info.error_type;
-
-	return 0;
-}
-
-/* This doesn't actually zero anything, but it sets the event_logged
- * word to tell that this event is safely in syslog.
- */
-int nvram_clear_error_log(void)
-{
-	loff_t tmp_index;
-	int clear_word = ERR_FLAG_ALREADY_LOGGED;
-	int rc;
-
-	if (nvram_error_log_index == -1)
-		return -1;
-
-	tmp_index = nvram_error_log_index;
-	
-	rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-#endif /* CONFIG_PPC_PSERIES */
-
 module_init(nvram_init);
 module_exit(nvram_cleanup);
 MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d43fc65..8515776 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -193,8 +193,7 @@
 	hose->io_resource.start += io_virt_offset;
 	hose->io_resource.end += io_virt_offset;
 
-	pr_debug("  hose->io_resource=0x%016llx...0x%016llx\n",
-		 hose->io_resource.start, hose->io_resource.end);
+	pr_debug("  hose->io_resource=%pR\n", &hose->io_resource);
 
 	return 0;
 }
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392..ef3ef56 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -186,3 +186,10 @@
 EXPORT_SYMBOL(__mfdcr);
 #endif
 EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(__arch_hweight8);
+EXPORT_SYMBOL(__arch_hweight16);
+EXPORT_SYMBOL(__arch_hweight32);
+EXPORT_SYMBOL(__arch_hweight64);
+#endif
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 5113bd2..e83ba3f 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /*
  * Grab the register values as they are now.
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 88334af..c2b7a07 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -117,41 +117,3 @@
 	cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
 	*size = of_read_number(dma_window, cells);
 }
-
-/**
- * Search the device tree for the best MAC address to use.  'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address.  If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the device tree, but were not set by U-Boot.  For example, the
- * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
- * addresses.  Some older U-Boots only initialized 'local-mac-address'.  In
- * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
- * but is all zeros.
-*/
-const void *of_get_mac_address(struct device_node *np)
-{
-	struct property *pp;
-
-	pp = of_find_property(np, "mac-address", NULL);
-	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
-		return pp->value;
-
-	pp = of_find_property(np, "local-mac-address", NULL);
-	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
-		return pp->value;
-
-	pp = of_find_property(np, "address", NULL);
-	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
-		return pp->value;
-
-	return NULL;
-}
-EXPORT_SYMBOL(of_get_mac_address);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a9b3296..9065369 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1316,6 +1316,10 @@
 static long ppc_set_hwdebug(struct task_struct *child,
 		     struct ppc_hw_breakpoint *bp_info)
 {
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
+	unsigned long dabr;
+#endif
+
 	if (bp_info->version != 1)
 		return -ENOTSUPP;
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1353,11 +1357,10 @@
 	/*
 	 * We only support one data breakpoint
 	 */
-	if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) ||
-	    ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) ||
-	    (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) ||
-	    (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) ||
-	    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
+	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
+	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
+	    bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
+	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
 		return -EINVAL;
 
 	if (child->thread.dabr)
@@ -1366,7 +1369,14 @@
 	if ((unsigned long)bp_info->addr >= TASK_SIZE)
 		return -EIO;
 
-	child->thread.dabr = (unsigned long)bp_info->addr;
+	dabr = (unsigned long)bp_info->addr & ~7UL;
+	dabr |= DABR_TRANSLATION;
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+		dabr |= DABR_DATA_READ;
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+		dabr |= DABR_DATA_WRITE;
+
+	child->thread.dabr = dabr;
 
 	return 1;
 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 8a6daf4..69c4be9 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -280,7 +280,11 @@
 		/* We only support one DABR and no IABRS at the moment */
 		if (addr > 0)
 			break;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+		ret = put_user(child->thread.dac1, (u32 __user *)data);
+#else
 		ret = put_user(child->thread.dabr, (u32 __user *)data);
+#endif
 		break;
 	}
 
@@ -312,6 +316,9 @@
 	case PTRACE_SET_DEBUGREG:
 	case PTRACE_SYSCALL:
 	case PTRACE_CONT:
+	case PPC_PTRACE_GETHWDBGINFO:
+	case PPC_PTRACE_SETHWDEBUG:
+	case PPC_PTRACE_DELHWDEBUG:
 		ret = arch_ptrace(child, request, addr, data);
 		break;
 
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8fe8bc6..2097f2b 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -41,6 +41,7 @@
 #include <asm/atomic.h>
 #include <asm/time.h>
 #include <asm/mmu.h>
+#include <asm/topology.h>
 
 struct rtas_t rtas = {
 	.lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -713,6 +714,7 @@
 	int cpu;
 
 	slb_set_size(SLB_MIN_SIZE);
+	stop_topology_update();
 	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
 
 	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -728,6 +730,7 @@
 		rc = atomic_read(&data->error);
 
 	atomic_set(&data->error, rc);
+	start_topology_update();
 
 	if (wake_when_done) {
 		atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ce6f61c..5a0401f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -437,8 +437,8 @@
 	unsigned int i;
 
 	/*
-	 * interrupt stacks must be under 256MB, we cannot afford to take
-	 * SLB misses on them.
+	 * Interrupt stacks must be in the first segment since we
+	 * cannot afford to take SLB misses on them.
 	 */
 	for_each_possible_cpu(i) {
 		softirq_ctx[i] = (struct thread_info *)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 68034bb..9813605 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -466,7 +466,20 @@
 	return id;
 }
 
-/* Must be called when no change can occur to cpu_present_mask,
+/* Helper routines for cpu to core mapping */
+int cpu_core_index_of_thread(int cpu)
+{
+	return cpu >> threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
+
+int cpu_first_thread_of_core(int core)
+{
+	return core << threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
+
+/* Must be called when no change can occur to cpu_present_map,
  * i.e. during cpu online or offline.
  */
 static struct device_node *cpu_to_l2cache(int cpu)
@@ -514,7 +527,7 @@
 	notify_cpu_starting(cpu);
 	set_cpu_online(cpu, true);
 	/* Update sibling maps */
-	base = cpu_first_thread_in_core(cpu);
+	base = cpu_first_thread_sibling(cpu);
 	for (i = 0; i < threads_per_core; i++) {
 		if (cpu_is_offline(base + i))
 			continue;
@@ -600,7 +613,7 @@
 		return err;
 
 	/* Update sibling maps */
-	base = cpu_first_thread_in_core(cpu);
+	base = cpu_first_thread_sibling(cpu);
 	for (i = 0; i < threads_per_core; i++) {
 		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
 		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 0104069..09e4dea 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -155,7 +155,7 @@
 
 static u64 tb_to_ns_scale __read_mostly;
 static unsigned tb_to_ns_shift __read_mostly;
-static unsigned long boot_tb __read_mostly;
+static u64 boot_tb __read_mostly;
 
 extern struct timezone sys_tz;
 static long timezone_offset;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index fe46048..9de6f39 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -5,6 +5,7 @@
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 
 /*
  * load_up_altivec(unused, unused, tsk)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 441d2a7..1b695fd 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -600,6 +600,11 @@
 	vio_cmo_dealloc(viodev, alloc_size);
 }
 
+static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+        return dma_iommu_ops.dma_supported(dev, mask);
+}
+
 struct dma_map_ops vio_dma_mapping_ops = {
 	.alloc_coherent = vio_dma_iommu_alloc_coherent,
 	.free_coherent  = vio_dma_iommu_free_coherent,
@@ -607,6 +612,7 @@
 	.unmap_sg       = vio_dma_iommu_unmap_sg,
 	.map_page       = vio_dma_iommu_map_page,
 	.unmap_page     = vio_dma_iommu_unmap_page,
+	.dma_supported  = vio_dma_iommu_dma_supported,
 
 };
 
@@ -858,8 +864,7 @@
 
 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
 {
-	vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
-	viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
+	set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
 }
 
 /**
@@ -1244,7 +1249,7 @@
 	if (firmware_has_feature(FW_FEATURE_CMO))
 		vio_cmo_set_dma_ops(viodev);
 	else
-		viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+		set_dma_ops(&viodev->dev, &dma_iommu_ops);
 	set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
 	set_dev_node(&viodev->dev, of_node_to_nid(of_node));
 
@@ -1252,6 +1257,10 @@
 	viodev->dev.parent = &vio_bus_device.dev;
 	viodev->dev.bus = &vio_bus_type;
 	viodev->dev.release = vio_dev_release;
+        /* needed to ensure proper operation of coherent allocations
+         * later, in case driver doesn't set it explicitly */
+        dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
+        dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
 
 	/* register with generic device framework */
 	if (device_register(&viodev->dev)) {
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index e316847..badc983 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1307,12 +1307,10 @@
 	int err = -ENOMEM;
 	unsigned long p;
 
-	vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
+	vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
 	if (!vcpu_book3s)
 		goto out;
 
-	memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
-
 	vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
 		kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
 	if (!vcpu_book3s->shadow_vcpu)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 38f756f..9975846 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -145,18 +145,12 @@
 	*(int *)rtn = kvmppc_core_check_processor_compat();
 }
 
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm;
-
-	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-	if (!kvm)
-		return ERR_PTR(-ENOMEM);
-
-	return kvm;
+	return 0;
 }
 
-static void kvmppc_free_vcpus(struct kvm *kvm)
+void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 	unsigned int i;
 	struct kvm_vcpu *vcpu;
@@ -176,14 +170,6 @@
 {
 }
 
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
-	kvmppc_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
-}
-
 int kvm_dev_ioctl_check_extension(long ext)
 {
 	int r;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 889f2bc..166a6a0 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,7 +16,7 @@
 
 obj-$(CONFIG_PPC64)	+= copypage_64.o copyuser_64.o \
 			   memcpy_64.o usercopy_64.o mem_64.o string.o \
-			   checksum_wrappers_64.o
+			   checksum_wrappers_64.o hweight_64.o
 obj-$(CONFIG_XMON)	+= sstep.o ldstfp.o
 obj-$(CONFIG_KPROBES)	+= sstep.o ldstfp.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= sstep.o ldstfp.o
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
new file mode 100644
index 0000000..fda2786
--- /dev/null
+++ b/arch/powerpc/lib/hweight_64.S
@@ -0,0 +1,110 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2010
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+/* Note: This code relies on -mminimal-toc */
+
+_GLOBAL(__arch_hweight8)
+BEGIN_FTR_SECTION
+	b .__sw_hweight8
+	nop
+	nop
+FTR_SECTION_ELSE
+	PPC_POPCNTB(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight16)
+BEGIN_FTR_SECTION
+	b .__sw_hweight16
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(50)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(50)
+	clrlwi  r3,r3,16
+	PPC_POPCNTW(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight32)
+BEGIN_FTR_SECTION
+	b .__sw_hweight32
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(51)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,16
+	add	r3,r4,r3
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(51)
+	PPC_POPCNTW(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight64)
+BEGIN_FTR_SECTION
+	b .__sw_hweight64
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(52)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,32
+	add	r3,r4,r3
+	srdi	r4,r3,16
+	add	r3,r4,r3
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(52)
+	PPC_POPCNTD(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index d7efdbf..fec1320 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -16,6 +16,16 @@
 
 #ifdef __HAVE_ARCH_PTE_SPECIAL
 
+static inline void get_huge_page_tail(struct page *page)
+{
+	/*
+	 * __split_huge_page_refcount() cannot run
+	 * from under us.
+	 */
+	VM_BUG_ON(atomic_read(&page->_count) < 0);
+	atomic_inc(&page->_count);
+}
+
 /*
  * The performance critical leaf functions are made noinline otherwise gcc
  * inlines everything into a single function which results in too much
@@ -47,6 +57,8 @@
 			put_page(page);
 			return 0;
 		}
+		if (PageTail(page))
+			get_huge_page_tail(page);
 		pages[*nr] = page;
 		(*nr)++;
 
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5e95844..a5991fa 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1070,7 +1070,7 @@
 		  unsigned long access, unsigned long trap)
 {
 	unsigned long vsid;
-	void *pgdir;
+	pgd_t *pgdir;
 	pte_t *ptep;
 	unsigned long flags;
 	int rc, ssize, local = 0;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5ce9984..c0aab52 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -111,8 +111,8 @@
 		 * a core map instead but this will do for now.
 		 */
 		for_each_cpu(cpu, mm_cpumask(mm)) {
-			for (i = cpu_first_thread_in_core(cpu);
-			     i <= cpu_last_thread_in_core(cpu); i++)
+			for (i = cpu_first_thread_sibling(cpu);
+			     i <= cpu_last_thread_sibling(cpu); i++)
 				__set_bit(id, stale_map[i]);
 			cpu = i - 1;
 		}
@@ -264,14 +264,14 @@
 	 */
 	if (test_bit(id, stale_map[cpu])) {
 		pr_hardcont(" | stale flush %d [%d..%d]",
-			    id, cpu_first_thread_in_core(cpu),
-			    cpu_last_thread_in_core(cpu));
+			    id, cpu_first_thread_sibling(cpu),
+			    cpu_last_thread_sibling(cpu));
 
 		local_flush_tlb_mm(next);
 
 		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
-		for (i = cpu_first_thread_in_core(cpu);
-		     i <= cpu_last_thread_in_core(cpu); i++) {
+		for (i = cpu_first_thread_sibling(cpu);
+		     i <= cpu_last_thread_sibling(cpu); i++) {
 			__clear_bit(id, stale_map[i]);
 		}
 	}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 74505b2..bf5cb91 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -20,10 +20,15 @@
 #include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/pfn.h>
+#include <linux/cpuset.h>
+#include <linux/node.h>
 #include <asm/sparsemem.h>
 #include <asm/prom.h>
 #include <asm/system.h>
 #include <asm/smp.h>
+#include <asm/firmware.h>
+#include <asm/paca.h>
+#include <asm/hvcall.h>
 
 static int numa_enabled = 1;
 
@@ -163,7 +168,7 @@
 	work_with_active_regions(nid, get_active_region_work_fn, node_ar);
 }
 
-static void __cpuinit map_cpu_to_node(int cpu, int node)
+static void map_cpu_to_node(int cpu, int node)
 {
 	numa_cpu_lookup_table[cpu] = node;
 
@@ -173,7 +178,7 @@
 		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 static void unmap_cpu_from_node(unsigned long cpu)
 {
 	int node = numa_cpu_lookup_table[cpu];
@@ -187,7 +192,7 @@
 		       cpu, node);
 	}
 }
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 
 /* must hold reference to node during call */
 static const int *of_get_associativity(struct device_node *dev)
@@ -246,32 +251,41 @@
 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
  * info is found.
  */
-static int of_node_to_nid_single(struct device_node *device)
+static int associativity_to_nid(const unsigned int *associativity)
 {
 	int nid = -1;
-	const unsigned int *tmp;
 
 	if (min_common_depth == -1)
 		goto out;
 
-	tmp = of_get_associativity(device);
-	if (!tmp)
-		goto out;
-
-	if (tmp[0] >= min_common_depth)
-		nid = tmp[min_common_depth];
+	if (associativity[0] >= min_common_depth)
+		nid = associativity[min_common_depth];
 
 	/* POWER4 LPAR uses 0xffff as invalid node */
 	if (nid == 0xffff || nid >= MAX_NUMNODES)
 		nid = -1;
 
-	if (nid > 0 && tmp[0] >= distance_ref_points_depth)
-		initialize_distance_lookup_table(nid, tmp);
+	if (nid > 0 && associativity[0] >= distance_ref_points_depth)
+		initialize_distance_lookup_table(nid, associativity);
 
 out:
 	return nid;
 }
 
+/* Returns the nid associated with the given device tree node,
+ * or -1 if not found.
+ */
+static int of_node_to_nid_single(struct device_node *device)
+{
+	int nid = -1;
+	const unsigned int *tmp;
+
+	tmp = of_get_associativity(device);
+	if (tmp)
+		nid = associativity_to_nid(tmp);
+	return nid;
+}
+
 /* Walk the device tree upwards, looking for an associativity id */
 int of_node_to_nid(struct device_node *device)
 {
@@ -1247,4 +1261,275 @@
 	return nid;
 }
 
+static u64 hot_add_drconf_memory_max(void)
+{
+        struct device_node *memory = NULL;
+        unsigned int drconf_cell_cnt = 0;
+        u64 lmb_size = 0;
+        const u32 *dm = 0;
+
+        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+        if (memory) {
+                drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
+                lmb_size = of_get_lmb_size(memory);
+                of_node_put(memory);
+        }
+        return lmb_size * drconf_cell_cnt;
+}
+
+/*
+ * memory_hotplug_max - return max address of memory that may be added
+ *
+ * This is currently only used on systems that support drconfig memory
+ * hotplug.
+ */
+u64 memory_hotplug_max(void)
+{
+        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
+}
 #endif /* CONFIG_MEMORY_HOTPLUG */
+
+/* Vrtual Processor Home Node (VPHN) support */
+#ifdef CONFIG_PPC_SPLPAR
+#define VPHN_NR_CHANGE_CTRS (8)
+static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
+static cpumask_t cpu_associativity_changes_mask;
+static int vphn_enabled;
+static void set_topology_timer(void);
+
+/*
+ * Store the current values of the associativity change counters in the
+ * hypervisor.
+ */
+static void setup_cpu_associativity_change_counters(void)
+{
+	int cpu = 0;
+
+	for_each_possible_cpu(cpu) {
+		int i = 0;
+		u8 *counts = vphn_cpu_change_counts[cpu];
+		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+		for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+			counts[i] = hypervisor_counts[i];
+		}
+	}
+}
+
+/*
+ * The hypervisor maintains a set of 8 associativity change counters in
+ * the VPA of each cpu that correspond to the associativity levels in the
+ * ibm,associativity-reference-points property. When an associativity
+ * level changes, the corresponding counter is incremented.
+ *
+ * Set a bit in cpu_associativity_changes_mask for each cpu whose home
+ * node associativity levels have changed.
+ *
+ * Returns the number of cpus with unhandled associativity changes.
+ */
+static int update_cpu_associativity_changes_mask(void)
+{
+	int cpu = 0, nr_cpus = 0;
+	cpumask_t *changes = &cpu_associativity_changes_mask;
+
+	cpumask_clear(changes);
+
+	for_each_possible_cpu(cpu) {
+		int i, changed = 0;
+		u8 *counts = vphn_cpu_change_counts[cpu];
+		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+		for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+			if (hypervisor_counts[i] > counts[i]) {
+				counts[i] = hypervisor_counts[i];
+				changed = 1;
+			}
+		}
+		if (changed) {
+			cpumask_set_cpu(cpu, changes);
+			nr_cpus++;
+		}
+	}
+
+	return nr_cpus;
+}
+
+/* 6 64-bit registers unpacked into 12 32-bit associativity values */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
+
+/*
+ * Convert the associativity domain numbers returned from the hypervisor
+ * to the sequence they would appear in the ibm,associativity property.
+ */
+static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
+{
+	int i = 0;
+	int nr_assoc_doms = 0;
+	const u16 *field = (const u16*) packed;
+
+#define VPHN_FIELD_UNUSED	(0xffff)
+#define VPHN_FIELD_MSB		(0x8000)
+#define VPHN_FIELD_MASK		(~VPHN_FIELD_MSB)
+
+	for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
+		if (*field == VPHN_FIELD_UNUSED) {
+			/* All significant fields processed, and remaining
+			 * fields contain the reserved value of all 1's.
+			 * Just store them.
+			 */
+			unpacked[i] = *((u32*)field);
+			field += 2;
+		}
+		else if (*field & VPHN_FIELD_MSB) {
+			/* Data is in the lower 15 bits of this field */
+			unpacked[i] = *field & VPHN_FIELD_MASK;
+			field++;
+			nr_assoc_doms++;
+		}
+		else {
+			/* Data is in the lower 15 bits of this field
+			 * concatenated with the next 16 bit field
+			 */
+			unpacked[i] = *((u32*)field);
+			field += 2;
+			nr_assoc_doms++;
+		}
+	}
+
+	return nr_assoc_doms;
+}
+
+/*
+ * Retrieve the new associativity information for a virtual processor's
+ * home node.
+ */
+static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
+{
+	long rc = 0;
+	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+	u64 flags = 1;
+	int hwcpu = get_hard_smp_processor_id(cpu);
+
+	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
+	vphn_unpack_associativity(retbuf, associativity);
+
+	return rc;
+}
+
+static long vphn_get_associativity(unsigned long cpu,
+					unsigned int *associativity)
+{
+	long rc = 0;
+
+	rc = hcall_vphn(cpu, associativity);
+
+	switch (rc) {
+	case H_FUNCTION:
+		printk(KERN_INFO
+			"VPHN is not supported. Disabling polling...\n");
+		stop_topology_update();
+		break;
+	case H_HARDWARE:
+		printk(KERN_ERR
+			"hcall_vphn() experienced a hardware fault "
+			"preventing VPHN. Disabling polling...\n");
+		stop_topology_update();
+	}
+
+	return rc;
+}
+
+/*
+ * Update the node maps and sysfs entries for each cpu whose home node
+ * has changed.
+ */
+int arch_update_cpu_topology(void)
+{
+	int cpu = 0, nid = 0, old_nid = 0;
+	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
+	struct sys_device *sysdev = NULL;
+
+	for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
+		vphn_get_associativity(cpu, associativity);
+		nid = associativity_to_nid(associativity);
+
+		if (nid < 0 || !node_online(nid))
+			nid = first_online_node;
+
+		old_nid = numa_cpu_lookup_table[cpu];
+
+		/* Disable hotplug while we update the cpu
+		 * masks and sysfs.
+		 */
+		get_online_cpus();
+		unregister_cpu_under_node(cpu, old_nid);
+		unmap_cpu_from_node(cpu);
+		map_cpu_to_node(cpu, nid);
+		register_cpu_under_node(cpu, nid);
+		put_online_cpus();
+
+		sysdev = get_cpu_sysdev(cpu);
+		if (sysdev)
+			kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+	}
+
+	return 1;
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+	rebuild_sched_domains();
+}
+static DECLARE_WORK(topology_work, topology_work_fn);
+
+void topology_schedule_update(void)
+{
+	schedule_work(&topology_work);
+}
+
+static void topology_timer_fn(unsigned long ignored)
+{
+	if (!vphn_enabled)
+		return;
+	if (update_cpu_associativity_changes_mask() > 0)
+		topology_schedule_update();
+	set_topology_timer();
+}
+static struct timer_list topology_timer =
+	TIMER_INITIALIZER(topology_timer_fn, 0, 0);
+
+static void set_topology_timer(void)
+{
+	topology_timer.data = 0;
+	topology_timer.expires = jiffies + 60 * HZ;
+	add_timer(&topology_timer);
+}
+
+/*
+ * Start polling for VPHN associativity changes.
+ */
+int start_topology_update(void)
+{
+	int rc = 0;
+
+	if (firmware_has_feature(FW_FEATURE_VPHN)) {
+		vphn_enabled = 1;
+		setup_cpu_associativity_change_counters();
+		init_timer_deferrable(&topology_timer);
+		set_topology_timer();
+		rc = 1;
+	}
+
+	return rc;
+}
+__initcall(start_topology_update);
+
+/*
+ * Disable polling for VPHN associativity changes.
+ */
+int stop_topology_update(void)
+{
+	vphn_enabled = 0;
+	return del_timer_sync(&topology_timer);
+}
+#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a87ead0..8dc41c0 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -78,7 +78,7 @@
 
 	/* pgdir take page or two with 4K pages and a page fraction otherwise */
 #ifndef CONFIG_PPC_4K_PAGES
-	ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+	ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
 #else
 	ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
 			PGDIR_ORDER - PAGE_SHIFT);
@@ -230,6 +230,7 @@
 		area = get_vm_area_caller(size, VM_IOREMAP, caller);
 		if (area == 0)
 			return NULL;
+		area->phys_addr = p;
 		v = (unsigned long) area->addr;
 	} else {
 		v = (ioremap_bot -= size);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 21d6dfa..88927a0 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -223,6 +223,8 @@
 					    caller);
 		if (area == NULL)
 			return NULL;
+
+		area->phys_addr = paligned;
 		ret = __ioremap_at(paligned, area->addr, size, flags);
 		if (!ret)
 			vunmap(area->addr);
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 7fd90d0..c4d2b71 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -1469,7 +1469,7 @@
  * The pm_interval register is setup to write the SPU PC value into the
  * trace buffer at the maximum rate possible.  The trace buffer is configured
  * to store the PCs, wrapping when it is full.  The performance counter is
- * intialized to the max hardware count minus the number of events, N, between
+ * initialized to the max hardware count minus the number of events, N, between
  * samples.  Once the N events have occured, a HW counter overflow occurs
  * causing the generation of a HW counter interrupt which also stops the
  * writing of the SPU PC values to the trace buffer.  Hence the last PC
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 546bbc2..2521d93 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -50,7 +50,7 @@
  * Again, if your board needs to do things differently then create a
  * board.c file for it rather than adding it to this list.
  */
-static char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"amcc,acadia",
 	"amcc,haleakala",
 	"amcc,kilauea",
@@ -60,14 +60,9 @@
 
 static int __init ppc40x_probe(void)
 {
-	unsigned long root = of_get_flat_dt_root();
-	int i = 0;
-
-	for (i = 0; i < ARRAY_SIZE(board); i++) {
-		if (of_flat_dt_is_compatible(root, board[i])) {
-			ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
-			return 1;
-		}
+	if (of_flat_dt_match(of_get_flat_dt_root(), board)) {
+		ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
+		return 1;
 	}
 
 	return 0;
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 82ff326..c04d16d 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -1,4 +1,7 @@
-obj-$(CONFIG_44x)	:= misc_44x.o idle.o
+obj-$(CONFIG_44x)	+= misc_44x.o
+ifneq ($(CONFIG_PPC4xx_CPM),y)
+obj-$(CONFIG_44x)	+= idle.o
+endif
 obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
 obj-$(CONFIG_EBONY)	+= ebony.o
 obj-$(CONFIG_SAM440EP) 	+= sam440ep.o
diff --git a/arch/powerpc/platforms/512x/mpc5121_generic.c b/arch/powerpc/platforms/512x/mpc5121_generic.c
index e487eb0..926731f 100644
--- a/arch/powerpc/platforms/512x/mpc5121_generic.c
+++ b/arch/powerpc/platforms/512x/mpc5121_generic.c
@@ -26,7 +26,7 @@
 /*
  * list of supported boards
  */
-static char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"prt,prtlvt",
 	NULL
 };
@@ -36,16 +36,7 @@
  */
 static int __init mpc5121_generic_probe(void)
 {
-	unsigned long node = of_get_flat_dt_root();
-	int i = 0;
-
-	while (board[i]) {
-		if (of_flat_dt_is_compatible(node, board[i]))
-			break;
-		i++;
-	}
-
-	return board[i] != NULL;
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 define_machine(mpc5121_generic) {
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index de55bc0..01ffa64 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -172,20 +172,18 @@
 	mpc52xx_setup_pci();
 }
 
+static const char *board[] __initdata = {
+	"fsl,lite5200",
+	"fsl,lite5200b",
+	NULL,
+};
+
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
  */
 static int __init lite5200_probe(void)
 {
-	unsigned long node = of_get_flat_dt_root();
-	const char *model = of_get_flat_dt_prop(node, "model", NULL);
-
-	if (!of_flat_dt_is_compatible(node, "fsl,lite5200") &&
-	    !of_flat_dt_is_compatible(node, "fsl,lite5200b"))
-		return 0;
-	pr_debug("%s board found\n", model ? model : "unknown");
-
-	return 1;
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 define_machine(lite5200) {
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index 80234e5..eda0fc2 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -232,7 +232,7 @@
 	lite5200_pm_target_state = PM_SUSPEND_ON;
 }
 
-static struct platform_suspend_ops lite5200_pm_ops = {
+static const struct platform_suspend_ops lite5200_pm_ops = {
 	.valid		= lite5200_pm_valid,
 	.begin		= lite5200_pm_begin,
 	.prepare	= lite5200_pm_prepare,
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 0bac3a3..2c7780c 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -239,7 +239,7 @@
 }
 
 /* list of the supported boards */
-static char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"fsl,media5200",
 	NULL
 };
@@ -249,16 +249,7 @@
  */
 static int __init media5200_probe(void)
 {
-	unsigned long node = of_get_flat_dt_root();
-	int i = 0;
-
-	while (board[i]) {
-		if (of_flat_dt_is_compatible(node, board[i]))
-			break;
-		i++;
-	}
-
-	return (board[i] != NULL);
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 define_machine(media5200_platform) {
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index d45be5b..e36d6e2 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -49,7 +49,7 @@
 }
 
 /* list of the supported boards */
-static char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"intercontrol,digsy-mtc",
 	"manroland,mucmc52",
 	"manroland,uc101",
@@ -66,16 +66,7 @@
  */
 static int __init mpc5200_simple_probe(void)
 {
-	unsigned long node = of_get_flat_dt_root();
-	int i = 0;
-
-	while (board[i]) {
-		if (of_flat_dt_is_compatible(node, board[i]))
-			break;
-		i++;
-	}
-	
-	return (board[i] != NULL);
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 define_machine(mpc5200_simple_platform) {
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index 568cef6..8310e8b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -186,7 +186,7 @@
 	iounmap(mbar);
 }
 
-static struct platform_suspend_ops mpc52xx_pm_ops = {
+static const struct platform_suspend_ops mpc52xx_pm_ops = {
 	.valid		= mpc52xx_pm_valid,
 	.prepare	= mpc52xx_pm_prepare,
 	.enter		= mpc52xx_pm_enter,
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 846831d..661d354 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -57,16 +57,19 @@
 	ipic_set_default_priority();
 }
 
+struct const char *board[] __initdata = {
+	"MPC8308RDB",
+	"fsl,mpc8308rdb",
+	"denx,mpc8308_p1m",
+	NULL
+}
+
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
  */
 static int __init mpc830x_rdb_probe(void)
 {
-	unsigned long root = of_get_flat_dt_root();
-
-	return of_flat_dt_is_compatible(root, "MPC8308RDB") ||
-	       of_flat_dt_is_compatible(root, "fsl,mpc8308rdb") ||
-	       of_flat_dt_is_compatible(root, "denx,mpc8308_p1m");
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index ae525e4..b54cd73 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -60,15 +60,18 @@
 	ipic_set_default_priority();
 }
 
+struct const char *board[] __initdata = {
+	"MPC8313ERDB",
+	"fsl,mpc8315erdb",
+	NULL
+}
+
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
  */
 static int __init mpc831x_rdb_probe(void)
 {
-	unsigned long root = of_get_flat_dt_root();
-
-	return of_flat_dt_is_compatible(root, "MPC8313ERDB") ||
-	       of_flat_dt_is_compatible(root, "fsl,mpc8315erdb");
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 910caa6..7bafbf2 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -101,17 +101,20 @@
 	ipic_set_default_priority();
 }
 
+static const char *board[] __initdata = {
+	"fsl,mpc8377rdb",
+	"fsl,mpc8378rdb",
+	"fsl,mpc8379rdb",
+	"fsl,mpc8377wlan",
+	NULL
+};
+
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
  */
 static int __init mpc837x_rdb_probe(void)
 {
-	unsigned long root = of_get_flat_dt_root();
-
-	return of_flat_dt_is_compatible(root, "fsl,mpc8377rdb") ||
-	       of_flat_dt_is_compatible(root, "fsl,mpc8378rdb") ||
-	       of_flat_dt_is_compatible(root, "fsl,mpc8379rdb") ||
-	       of_flat_dt_is_compatible(root, "fsl,mpc8377wlan");
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 define_machine(mpc837x_rdb) {
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 1930543..3d1ecd2 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -231,7 +231,7 @@
 	ori	r4, r4, 0x002a
 	mtspr	SPRN_DBAT0L, r4
 	lis	r8, TMP_VIRT_IMMR@h
-	ori	r4, r8, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r8, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT0U, r4
 	isync
 
@@ -241,7 +241,7 @@
 	ori	r4, r4, 0x002a
 	mtspr	SPRN_DBAT1L, r4
 	lis	r9, (TMP_VIRT_IMMR + 0x01000000)@h
-	ori	r4, r9, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r9, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT1U, r4
 	isync
 
@@ -253,7 +253,7 @@
 	li	r4, 0x0002
 	mtspr	SPRN_DBAT2L, r4
 	lis	r4, KERNELBASE@h
-	ori	r4, r4, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r4, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT2U, r4
 	isync
 
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 75ae77f..fd4f2f2 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -311,7 +311,7 @@
 	return ret;
 }
 
-static struct platform_suspend_ops mpc83xx_suspend_ops = {
+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
 	.valid = mpc83xx_suspend_valid,
 	.begin = mpc83xx_suspend_begin,
 	.enter = mpc83xx_suspend_enter,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index aa34cac..747d1ee 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -309,7 +309,7 @@
 			/* P1021 has pins muxed for QE and other functions. To
 			 * enable QE UEC mode, we need to set bit QE0 for UCC1
 			 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
-			 * and QE12 for QE MII management singals in PMUXCR
+			 * and QE12 for QE MII management signals in PMUXCR
 			 * register.
 			 */
 				setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 |
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 8f29bbc..5e847d0 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -186,21 +186,21 @@
 }
 machine_device_initcall(tqm85xx, declare_of_platform_devices);
 
+static const char *board[] __initdata = {
+	"tqc,tqm8540",
+	"tqc,tqm8541",
+	"tqc,tqm8548",
+	"tqc,tqm8555",
+	"tqc,tqm8560",
+	NULL
+};
+
 /*
  * Called very early, device-tree isn't unflattened
  */
 static int __init tqm85xx_probe(void)
 {
-	unsigned long root = of_get_flat_dt_root();
-
-	if ((of_flat_dt_is_compatible(root, "tqc,tqm8540")) ||
-	    (of_flat_dt_is_compatible(root, "tqc,tqm8541")) ||
-	    (of_flat_dt_is_compatible(root, "tqc,tqm8548")) ||
-	    (of_flat_dt_is_compatible(root, "tqc,tqm8555")) ||
-	    (of_flat_dt_is_compatible(root, "tqc,tqm8560")))
-		return 1;
-
-	return 0;
+	return of_flat_dt_match(of_get_flat_dt_root(), board);
 }
 
 define_machine(tqm85xx) {
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 956154f..2057682 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -313,13 +313,14 @@
 source "arch/powerpc/sysdev/bestcomm/Kconfig"
 
 config MPC8xxx_GPIO
-	bool "MPC8xxx GPIO support"
-	depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx
+	bool "MPC512x/MPC8xxx GPIO support"
+	depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
+		   FSL_SOC_BOOKE || PPC_86xx
 	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Say Y here if you're going to use hardware that connects to the
-	  MPC831x/834x/837x/8572/8610 GPIOs.
+	  MPC512x/831x/834x/837x/8572/8610 GPIOs.
 
 config SIMPLE_GPIO
 	bool "Support for simple, memory-mapped GPIO controllers"
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index beec405..3ce6855 100644
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -76,7 +76,7 @@
 
 static void celleb_dma_dev_setup(struct device *dev)
 {
-	dev->archdata.dma_ops = get_pci_dma_ops();
+	set_dma_ops(dev, &dma_direct_ops);
 	set_dma_offset(dev, celleb_dma_direct_offset);
 }
 
@@ -106,7 +106,6 @@
 static int __init celleb_init_iommu(void)
 {
 	celleb_init_direct_mapping();
-	set_pci_dma_ops(&dma_direct_ops);
 	ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
 	bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
 
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 3532b92..856e9c3 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -71,12 +71,18 @@
 	return &ei->vfs_inode;
 }
 
-static void
-spufs_destroy_inode(struct inode *inode)
+static void spufs_i_callback(struct rcu_head *head)
 {
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
 }
 
+static void spufs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, spufs_i_callback);
+}
+
 static void
 spufs_init_once(void *p)
 {
@@ -159,18 +165,18 @@
 
 	mutex_lock(&dir->d_inode->i_mutex);
 	list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
-		spin_lock(&dcache_lock);
 		spin_lock(&dentry->d_lock);
 		if (!(d_unhashed(dentry)) && dentry->d_inode) {
-			dget_locked(dentry);
+			dget_dlock(dentry);
 			__d_drop(dentry);
 			spin_unlock(&dentry->d_lock);
 			simple_unlink(dir->d_inode, dentry);
-			spin_unlock(&dcache_lock);
+			/* XXX: what was dcache_lock protecting here? Other
+			 * filesystems (IB, configfs) release dcache_lock
+			 * before unlink */
 			dput(dentry);
 		} else {
 			spin_unlock(&dentry->d_lock);
-			spin_unlock(&dcache_lock);
 		}
 	}
 	shrink_dcache_parent(dir);
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index a101abf..3b894f5 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -36,10 +36,9 @@
 	struct spu_lscsa *lscsa;
 	unsigned char *p;
 
-	lscsa = vmalloc(sizeof(struct spu_lscsa));
+	lscsa = vzalloc(sizeof(struct spu_lscsa));
 	if (!lscsa)
 		return -ENOMEM;
-	memset(lscsa, 0, sizeof(struct spu_lscsa));
 	csa->lscsa = lscsa;
 
 	/* Set LS pages reserved to allow for user-space mapping. */
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index 054dfe5..f803f4b 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -29,6 +29,10 @@
 
 extern spinlock_t rtc_lock;
 
+#define NVRAM_AS0  0x74
+#define NVRAM_AS1  0x75
+#define NVRAM_DATA 0x77
+
 static int nvram_as1 = NVRAM_AS1;
 static int nvram_as0 = NVRAM_AS0;
 static int nvram_data = NVRAM_DATA;
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 42d0a88..b5e026b 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1045,71 +1045,9 @@
 	.write		= mf_side_proc_write,
 };
 
-#if 0
-static void mf_getSrcHistory(char *buffer, int size)
-{
-	struct IplTypeReturnStuff return_stuff;
-	struct pending_event *ev = new_pending_event();
-	int rc = 0;
-	char *pages[4];
-
-	pages[0] = kmalloc(4096, GFP_ATOMIC);
-	pages[1] = kmalloc(4096, GFP_ATOMIC);
-	pages[2] = kmalloc(4096, GFP_ATOMIC);
-	pages[3] = kmalloc(4096, GFP_ATOMIC);
-	if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
-			 || (pages[2] == NULL) || (pages[3] == NULL))
-		return -ENOMEM;
-
-	return_stuff.xType = 0;
-	return_stuff.xRc = 0;
-	return_stuff.xDone = 0;
-	ev->event.hp_lp_event.xSubtype = 6;
-	ev->event.hp_lp_event.x.xSubtypeData =
-		subtype_data('M', 'F', 'V', 'I');
-	ev->event.data.vsp_cmd.xEvent = &return_stuff;
-	ev->event.data.vsp_cmd.cmd = 4;
-	ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
-	ev->event.data.vsp_cmd.result_code = 0xFF;
-	ev->event.data.vsp_cmd.reserved = 0;
-	ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
-	ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
-	ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
-	ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
-	mb();
-	if (signal_event(ev) != 0)
-		return;
-
- 	while (return_stuff.xDone != 1)
- 		udelay(10);
- 	if (return_stuff.xRc == 0)
- 		memcpy(buffer, pages[0], size);
-	kfree(pages[0]);
-	kfree(pages[1]);
-	kfree(pages[2]);
-	kfree(pages[3]);
-}
-#endif
-
 static int mf_src_proc_show(struct seq_file *m, void *v)
 {
-#if 0
-	int len;
-
-	mf_getSrcHistory(page, count);
-	len = count;
-	len -= off;
-	if (len < count) {
-		*eof = 1;
-		if (len <= 0)
-			return 0;
-	} else
-		len = count;
-	*start = page + off;
-	return len;
-#else
 	return 0;
-#endif
 }
 
 static int mf_src_proc_open(struct inode *inode, struct file *file)
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 1f9fb2c..14943ef 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -156,20 +156,12 @@
 
 static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
 {
-	struct device_node *dn;
-
 	pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
 
 	if (!iommu_table_iobmap_inited) {
 		iommu_table_iobmap_inited = 1;
 		iommu_table_iobmap_setup();
 	}
-
-	dn = pci_bus_to_OF_node(bus);
-
-	if (dn)
-		PCI_DN(dn)->iommu_table = &iommu_table_iobmap;
-
 }
 
 
@@ -192,9 +184,6 @@
 	set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
 }
 
-static void pci_dma_bus_setup_null(struct pci_bus *b) { }
-static void pci_dma_dev_setup_null(struct pci_dev *d) { }
-
 int __init iob_init(struct device_node *dn)
 {
 	unsigned long tmp;
@@ -251,14 +240,8 @@
 	iommu_off = of_chosen &&
 			of_get_property(of_chosen, "linux,iommu-off", NULL);
 #endif
-	if (iommu_off) {
-		/* Direct I/O, IOMMU off */
-		ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null;
-		ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null;
-		set_pci_dma_ops(&dma_direct_ops);
-
+	if (iommu_off)
 		return;
-	}
 
 	iob_init(NULL);
 
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 9deb274..d5aceb7 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -506,6 +506,15 @@
 		of_platform_device_create(np, "smu", NULL);
 		of_node_put(np);
 	}
+	np = of_find_node_by_type(NULL, "fcu");
+	if (np == NULL) {
+		/* Some machines have strangely broken device-tree */
+		np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
+	}
+	if (np) {
+		of_platform_device_create(np, "temperature", NULL);
+		of_node_put(np);
+	}
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index b341018..6c4b583 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -566,10 +566,10 @@
 	case PS3_DEV_TYPE_STOR_DISK:
 		result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK);
 
-		/* Some devices are not accessable from the Other OS lpar. */
+		/* Some devices are not accessible from the Other OS lpar. */
 		if (result == -ENODEV) {
 			result = 0;
-			pr_debug("%s:%u: not accessable\n", __func__,
+			pr_debug("%s:%u: not accessible\n", __func__,
 				 __LINE__);
 		}
 
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 59d9712..92290ff 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -44,7 +44,7 @@
  * @lock:
  * @ipi_debug_brk_mask:
  *
- * The HV mantains per SMT thread mappings of HV outlet to HV plug on
+ * The HV maintains per SMT thread mappings of HV outlet to HV plug on
  * behalf of the guest.  These mappings are implemented as 256 bit guest
  * supplied bitmaps indexed by plug number.  The addresses of the bitmaps
  * are registered with the HV through lv1_configure_irq_state_bitmap().
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 3139814..5d1b743 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -33,6 +33,16 @@
        depends on PCI_MSI && EEH
        default y
 
+config PSERIES_ENERGY
+	tristate "pSeries energy management capabilities driver"
+	depends on PPC_PSERIES
+	default y
+	help
+	  Provides interface to platform energy management capabilities
+	  on supported PSERIES platforms.
+	  Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
+	  and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
+
 config SCANLOG
 	tristate "Scanlog dump interface"
 	depends on RTAS_PROC && PPC_PSERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 59eb8bd..fc52378 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_KEXEC)	+= kexec.o
 obj-$(CONFIG_PCI)	+= pci.o pci_dlpar.o
 obj-$(CONFIG_PSERIES_MSI)	+= msi.o
+obj-$(CONFIG_PSERIES_ENERGY)	+= pseries_energy.o
 
 obj-$(CONFIG_HOTPLUG_CPU)	+= hotplug-cpu.o
 obj-$(CONFIG_MEMORY_HOTPLUG)	+= hotplug-memory.o
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
index 15e13b5..23982c7 100644
--- a/arch/powerpc/platforms/pseries/eeh_sysfs.c
+++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c
@@ -25,7 +25,6 @@
 #include <linux/pci.h>
 #include <asm/ppc-pci.h>
 #include <asm/pci-bridge.h>
-#include <linux/kobject.h>
 
 /**
  * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 0a14d8c..0b0eff0 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -55,6 +55,7 @@
 	{FW_FEATURE_XDABR,		"hcall-xdabr"},
 	{FW_FEATURE_MULTITCE,		"hcall-multi-tce"},
 	{FW_FEATURE_SPLPAR,		"hcall-splpar"},
+	{FW_FEATURE_VPHN,		"hcall-vphn"},
 };
 
 /* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 48d2057..fd05fde 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 	
 #define STK_PARM(i)     (48 + ((i)-3)*8)
 
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index e19ff02..f106662 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -55,7 +55,7 @@
 static int hc_show(struct seq_file *m, void *p)
 {
 	unsigned long h_num = (unsigned long)p;
-	struct hcall_stats *hs = (struct hcall_stats *)m->private;
+	struct hcall_stats *hs = m->private;
 
 	if (hs[h_num].num_calls) {
 		if (cpu_has_feature(CPU_FTR_PURR))
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a77bcae..edea60b 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -140,7 +140,7 @@
 	return ret;
 }
 
-static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
+static DEFINE_PER_CPU(u64 *, tce_page);
 
 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 				     long npages, unsigned long uaddr,
@@ -323,14 +323,13 @@
 static void iommu_table_setparms_lpar(struct pci_controller *phb,
 				      struct device_node *dn,
 				      struct iommu_table *tbl,
-				      const void *dma_window,
-				      int bussubno)
+				      const void *dma_window)
 {
 	unsigned long offset, size;
 
-	tbl->it_busno  = bussubno;
 	of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
 
+	tbl->it_busno = phb->bus->number;
 	tbl->it_base   = 0;
 	tbl->it_blocksize  = 16;
 	tbl->it_type = TCE_PCI;
@@ -450,14 +449,10 @@
 	if (!ppci->iommu_table) {
 		tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
 				   ppci->phb->node);
-		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
-			bus->number);
+		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
 		ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
 		pr_debug("  created table: %p\n", ppci->iommu_table);
 	}
-
-	if (pdn != dn)
-		PCI_DN(dn)->iommu_table = ppci->iommu_table;
 }
 
 
@@ -533,21 +528,11 @@
 	}
 	pr_debug("  parent is %s\n", pdn->full_name);
 
-	/* Check for parent == NULL so we don't try to setup the empty EADS
-	 * slots on POWER4 machines.
-	 */
-	if (dma_window == NULL || pdn->parent == NULL) {
-		pr_debug("  no dma window for device, linking to parent\n");
-		set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
-		return;
-	}
-
 	pci = PCI_DN(pdn);
 	if (!pci->iommu_table) {
 		tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
 				   pci->phb->node);
-		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
-			pci->phb->bus->number);
+		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
 		pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
 		pr_debug("  created table: %p\n", pci->iommu_table);
 	} else {
@@ -571,8 +556,7 @@
 
 	switch (action) {
 	case PSERIES_RECONFIG_REMOVE:
-		if (pci && pci->iommu_table &&
-		    of_get_property(np, "ibm,dma-window", NULL))
+		if (pci && pci->iommu_table)
 			iommu_free_table(pci->iommu_table, np->full_name);
 		break;
 	default:
@@ -589,13 +573,8 @@
 /* These are called very early. */
 void iommu_init_early_pSeries(void)
 {
-	if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) {
-		/* Direct I/O, IOMMU off */
-		ppc_md.pci_dma_dev_setup = NULL;
-		ppc_md.pci_dma_bus_setup = NULL;
-		set_pci_dma_ops(&dma_direct_ops);
+	if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
 		return;
-	}
 
 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
 		if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
@@ -622,3 +601,17 @@
 	set_pci_dma_ops(&dma_iommu_ops);
 }
 
+static int __init disable_multitce(char *str)
+{
+	if (strcmp(str, "off") == 0 &&
+	    firmware_has_feature(FW_FEATURE_LPAR) &&
+	    firmware_has_feature(FW_FEATURE_MULTITCE)) {
+		printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
+		ppc_md.tce_build = tce_build_pSeriesLP;
+		ppc_md.tce_free	 = tce_free_pSeriesLP;
+		powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
+	}
+	return 1;
+}
+
+__setup("multitce=", disable_multitce);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f129040..5d3ea9f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -627,6 +627,18 @@
 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
 }
 
+static int __init disable_bulk_remove(char *str)
+{
+	if (strcmp(str, "off") == 0 &&
+	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
+			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
+			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
+	}
+	return 1;
+}
+
+__setup("bulk_remove=", disable_bulk_remove);
+
 void __init hpte_init_lpar(void)
 {
 	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index bc3c7f2..7e828ba 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -22,11 +22,25 @@
 #include <asm/prom.h>
 #include <asm/machdep.h>
 
+/* Max bytes to read/write in one go */
+#define NVRW_CNT 0x20
+
 static unsigned int nvram_size;
 static int nvram_fetch, nvram_store;
 static char nvram_buf[NVRW_CNT];	/* assume this is in the first 4GB */
 static DEFINE_SPINLOCK(nvram_lock);
 
+static long nvram_error_log_index = -1;
+static long nvram_error_log_size = 0;
+
+struct err_log_info {
+	int error_type;
+	unsigned int seq_num;
+};
+#define NVRAM_MAX_REQ		2079
+#define NVRAM_MIN_REQ		1055
+
+#define NVRAM_LOG_PART_NAME	"ibm,rtas-log"
 
 static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
 {
@@ -119,6 +133,197 @@
 	return nvram_size ? nvram_size : -ENODEV;
 }
 
+
+/* nvram_write_error_log
+ *
+ * We need to buffer the error logs into nvram to ensure that we have
+ * the failure information to decode.  If we have a severe error there
+ * is no way to guarantee that the OS or the machine is in a state to
+ * get back to user land and write the error to disk.  For example if
+ * the SCSI device driver causes a Machine Check by writing to a bad
+ * IO address, there is no way of guaranteeing that the device driver
+ * is in any state that is would also be able to write the error data
+ * captured to disk, thus we buffer it in NVRAM for analysis on the
+ * next boot.
+ *
+ * In NVRAM the partition containing the error log buffer will looks like:
+ * Header (in bytes):
+ * +-----------+----------+--------+------------+------------------+
+ * | signature | checksum | length | name       | data             |
+ * |0          |1         |2      3|4         15|16        length-1|
+ * +-----------+----------+--------+------------+------------------+
+ *
+ * The 'data' section would look like (in bytes):
+ * +--------------+------------+-----------------------------------+
+ * | event_logged | sequence # | error log                         |
+ * |0            3|4          7|8            nvram_error_log_size-1|
+ * +--------------+------------+-----------------------------------+
+ *
+ * event_logged: 0 if event has not been logged to syslog, 1 if it has
+ * sequence #: The unique sequence # for each event. (until it wraps)
+ * error log: The error log from event_scan
+ */
+int nvram_write_error_log(char * buff, int length,
+                          unsigned int err_type, unsigned int error_log_cnt)
+{
+	int rc;
+	loff_t tmp_index;
+	struct err_log_info info;
+	
+	if (nvram_error_log_index == -1) {
+		return -ESPIPE;
+	}
+
+	if (length > nvram_error_log_size) {
+		length = nvram_error_log_size;
+	}
+
+	info.error_type = err_type;
+	info.seq_num = error_log_cnt;
+
+	tmp_index = nvram_error_log_index;
+
+	rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+
+	rc = ppc_md.nvram_write(buff, length, &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+	
+	return 0;
+}
+
+/* nvram_read_error_log
+ *
+ * Reads nvram for error log for at most 'length'
+ */
+int nvram_read_error_log(char * buff, int length,
+                         unsigned int * err_type, unsigned int * error_log_cnt)
+{
+	int rc;
+	loff_t tmp_index;
+	struct err_log_info info;
+	
+	if (nvram_error_log_index == -1)
+		return -1;
+
+	if (length > nvram_error_log_size)
+		length = nvram_error_log_size;
+
+	tmp_index = nvram_error_log_index;
+
+	rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+		return rc;
+	}
+
+	rc = ppc_md.nvram_read(buff, length, &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+		return rc;
+	}
+
+	*error_log_cnt = info.seq_num;
+	*err_type = info.error_type;
+
+	return 0;
+}
+
+/* This doesn't actually zero anything, but it sets the event_logged
+ * word to tell that this event is safely in syslog.
+ */
+int nvram_clear_error_log(void)
+{
+	loff_t tmp_index;
+	int clear_word = ERR_FLAG_ALREADY_LOGGED;
+	int rc;
+
+	if (nvram_error_log_index == -1)
+		return -1;
+
+	tmp_index = nvram_error_log_index;
+	
+	rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* pseries_nvram_init_log_partition
+ *
+ * This will setup the partition we need for buffering the
+ * error logs and cleanup partitions if needed.
+ *
+ * The general strategy is the following:
+ * 1.) If there is log partition large enough then use it.
+ * 2.) If there is none large enough, search
+ * for a free partition that is large enough.
+ * 3.) If there is not a free partition large enough remove 
+ * _all_ OS partitions and consolidate the space.
+ * 4.) Will first try getting a chunk that will satisfy the maximum
+ * error log size (NVRAM_MAX_REQ).
+ * 5.) If the max chunk cannot be allocated then try finding a chunk
+ * that will satisfy the minum needed (NVRAM_MIN_REQ).
+ */
+static int __init pseries_nvram_init_log_partition(void)
+{
+	loff_t p;
+	int size;
+
+	/* Scan nvram for partitions */
+	nvram_scan_partitions();
+
+	/* Lookg for ours */
+	p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size);
+
+	/* Found one but too small, remove it */
+	if (p && size < NVRAM_MIN_REQ) {
+		pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition"
+			",removing it...");
+		nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS);
+		p = 0;
+	}
+
+	/* Create one if we didn't find */
+	if (!p) {
+		p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS,
+					   NVRAM_MAX_REQ, NVRAM_MIN_REQ);
+		/* No room for it, try to get rid of any OS partition
+		 * and try again
+		 */
+		if (p == -ENOSPC) {
+			pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME
+				" partition, deleting all OS partitions...");
+			nvram_remove_partition(NULL, NVRAM_SIG_OS);
+			p = nvram_create_partition(NVRAM_LOG_PART_NAME,
+						   NVRAM_SIG_OS, NVRAM_MAX_REQ,
+						   NVRAM_MIN_REQ);
+		}
+	}
+
+	if (p <= 0) {
+		pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME
+		       " partition, err %d\n", (int)p);
+		return 0;
+	}
+
+	nvram_error_log_index = p;
+	nvram_error_log_size = nvram_get_partition_size(p) -
+		sizeof(struct err_log_info);
+	
+	return 0;
+}
+machine_arch_initcall(pseries, pseries_nvram_init_log_partition);
+
 int __init pSeries_nvram_init(void)
 {
 	struct device_node *nvram;
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
new file mode 100644
index 0000000..c8b3c69
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -0,0 +1,326 @@
+/*
+ * POWER platform energy management driver
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This pseries platform device driver provides access to
+ * platform energy management capabilities.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <asm/cputhreads.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+
+
+#define MODULE_VERS "1.0"
+#define MODULE_NAME "pseries_energy"
+
+/* Driver flags */
+
+static int sysfs_entries;
+
+/* Helper routines */
+
+/*
+ * Routine to detect firmware support for hcall
+ * return 1 if H_BEST_ENERGY is supported
+ * else return 0
+ */
+
+static int check_for_h_best_energy(void)
+{
+	struct device_node *rtas = NULL;
+	const char *hypertas, *s;
+	int length;
+	int rc = 0;
+
+	rtas = of_find_node_by_path("/rtas");
+	if (!rtas)
+		return 0;
+
+	hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
+	if (!hypertas) {
+		of_node_put(rtas);
+		return 0;
+	}
+
+	/* hypertas will have list of strings with hcall names */
+	for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
+		if (!strncmp("hcall-best-energy-1", s, 19)) {
+			rc = 1; /* Found the string */
+			break;
+		}
+	}
+	of_node_put(rtas);
+	return rc;
+}
+
+/* Helper Routines to convert between drc_index to cpu numbers */
+
+static u32 cpu_to_drc_index(int cpu)
+{
+	struct device_node *dn = NULL;
+	const int *indexes;
+	int i;
+	int rc = 1;
+	u32 ret = 0;
+
+	dn = of_find_node_by_path("/cpus");
+	if (dn == NULL)
+		goto err;
+	indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+	if (indexes == NULL)
+		goto err_of_node_put;
+	/* Convert logical cpu number to core number */
+	i = cpu_core_index_of_thread(cpu);
+	/*
+	 * The first element indexes[0] is the number of drc_indexes
+	 * returned in the list.  Hence i+1 will get the drc_index
+	 * corresponding to core number i.
+	 */
+	WARN_ON(i > indexes[0]);
+	ret = indexes[i + 1];
+	rc = 0;
+
+err_of_node_put:
+	of_node_put(dn);
+err:
+	if (rc)
+		printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
+	return ret;
+}
+
+static int drc_index_to_cpu(u32 drc_index)
+{
+	struct device_node *dn = NULL;
+	const int *indexes;
+	int i, cpu = 0;
+	int rc = 1;
+
+	dn = of_find_node_by_path("/cpus");
+	if (dn == NULL)
+		goto err;
+	indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+	if (indexes == NULL)
+		goto err_of_node_put;
+	/*
+	 * First element in the array is the number of drc_indexes
+	 * returned.  Search through the list to find the matching
+	 * drc_index and get the core number
+	 */
+	for (i = 0; i < indexes[0]; i++) {
+		if (indexes[i + 1] == drc_index)
+			break;
+	}
+	/* Convert core number to logical cpu number */
+	cpu = cpu_first_thread_of_core(i);
+	rc = 0;
+
+err_of_node_put:
+	of_node_put(dn);
+err:
+	if (rc)
+		printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index);
+	return cpu;
+}
+
+/*
+ * pseries hypervisor call H_BEST_ENERGY provides hints to OS on
+ * preferred logical cpus to activate or deactivate for optimized
+ * energy consumption.
+ */
+
+#define FLAGS_MODE1	0x004E200000080E01
+#define FLAGS_MODE2	0x004E200000080401
+#define FLAGS_ACTIVATE  0x100
+
+static ssize_t get_best_energy_list(char *page, int activate)
+{
+	int rc, cnt, i, cpu;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+	unsigned long flags = 0;
+	u32 *buf_page;
+	char *s = page;
+
+	buf_page = (u32 *) get_zeroed_page(GFP_KERNEL);
+	if (!buf_page)
+		return -ENOMEM;
+
+	flags = FLAGS_MODE1;
+	if (activate)
+		flags |= FLAGS_ACTIVATE;
+
+	rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page),
+				0, 0, 0, 0, 0, 0);
+	if (rc != H_SUCCESS) {
+		free_page((unsigned long) buf_page);
+		return -EINVAL;
+	}
+
+	cnt = retbuf[0];
+	for (i = 0; i < cnt; i++) {
+		cpu = drc_index_to_cpu(buf_page[2*i+1]);
+		if ((cpu_online(cpu) && !activate) ||
+		    (!cpu_online(cpu) && activate))
+			s += sprintf(s, "%d,", cpu);
+	}
+	if (s > page) { /* Something to show */
+		s--; /* Suppress last comma */
+		s += sprintf(s, "\n");
+	}
+
+	free_page((unsigned long) buf_page);
+	return s-page;
+}
+
+static ssize_t get_best_energy_data(struct sys_device *dev,
+					char *page, int activate)
+{
+	int rc;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+	unsigned long flags = 0;
+
+	flags = FLAGS_MODE2;
+	if (activate)
+		flags |= FLAGS_ACTIVATE;
+
+	rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags,
+				cpu_to_drc_index(dev->id),
+				0, 0, 0, 0, 0, 0, 0);
+
+	if (rc != H_SUCCESS)
+		return -EINVAL;
+
+	return sprintf(page, "%lu\n", retbuf[1] >> 32);
+}
+
+/* Wrapper functions */
+
+static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
+			struct sysdev_class_attribute *attr, char *page)
+{
+	return get_best_energy_list(page, 1);
+}
+
+static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
+			struct sysdev_class_attribute *attr, char *page)
+{
+	return get_best_energy_list(page, 0);
+}
+
+static ssize_t percpu_activate_hint_show(struct sys_device *dev,
+			struct sysdev_attribute *attr, char *page)
+{
+	return get_best_energy_data(dev, page, 1);
+}
+
+static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
+			struct sysdev_attribute *attr, char *page)
+{
+	return get_best_energy_data(dev, page, 0);
+}
+
+/*
+ * Create sysfs interface:
+ * /sys/devices/system/cpu/pseries_activate_hint_list
+ * /sys/devices/system/cpu/pseries_deactivate_hint_list
+ *	Comma separated list of cpus to activate or deactivate
+ * /sys/devices/system/cpu/cpuN/pseries_activate_hint
+ * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint
+ *	Per-cpu value of the hint
+ */
+
+struct sysdev_class_attribute attr_cpu_activate_hint_list =
+		_SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
+		cpu_activate_hint_list_show, NULL);
+
+struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
+		_SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
+		cpu_deactivate_hint_list_show, NULL);
+
+struct sysdev_attribute attr_percpu_activate_hint =
+		_SYSDEV_ATTR(pseries_activate_hint, 0444,
+		percpu_activate_hint_show, NULL);
+
+struct sysdev_attribute attr_percpu_deactivate_hint =
+		_SYSDEV_ATTR(pseries_deactivate_hint, 0444,
+		percpu_deactivate_hint_show, NULL);
+
+static int __init pseries_energy_init(void)
+{
+	int cpu, err;
+	struct sys_device *cpu_sys_dev;
+
+	if (!check_for_h_best_energy()) {
+		printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
+		return 0;
+	}
+	/* Create the sysfs files */
+	err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_activate_hint_list.attr);
+	if (!err)
+		err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_deactivate_hint_list.attr);
+
+	if (err)
+		return err;
+	for_each_possible_cpu(cpu) {
+		cpu_sys_dev = get_cpu_sysdev(cpu);
+		err = sysfs_create_file(&cpu_sys_dev->kobj,
+				&attr_percpu_activate_hint.attr);
+		if (err)
+			break;
+		err = sysfs_create_file(&cpu_sys_dev->kobj,
+				&attr_percpu_deactivate_hint.attr);
+		if (err)
+			break;
+	}
+
+	if (err)
+		return err;
+
+	sysfs_entries = 1; /* Removed entries on cleanup */
+	return 0;
+
+}
+
+static void __exit pseries_energy_cleanup(void)
+{
+	int cpu;
+	struct sys_device *cpu_sys_dev;
+
+	if (!sysfs_entries)
+		return;
+
+	/* Remove the sysfs files */
+	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_activate_hint_list.attr);
+
+	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_deactivate_hint_list.attr);
+
+	for_each_possible_cpu(cpu) {
+		cpu_sys_dev = get_cpu_sysdev(cpu);
+		sysfs_remove_file(&cpu_sys_dev->kobj,
+				&attr_percpu_activate_hint.attr);
+		sysfs_remove_file(&cpu_sys_dev->kobj,
+				&attr_percpu_deactivate_hint.attr);
+	}
+}
+
+module_init(pseries_energy_init);
+module_exit(pseries_energy_cleanup);
+MODULE_DESCRIPTION("Driver for pSeries platform energy management");
+MODULE_AUTHOR("Vaidyanathan Srinivasan");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index ed72098..a8ca289 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -153,7 +153,7 @@
 	.name = "power",
 };
 
-static struct platform_suspend_ops pseries_suspend_ops = {
+static const struct platform_suspend_ops pseries_suspend_ops = {
 	.valid		= suspend_valid_only_mem,
 	.begin		= pseries_suspend_begin,
 	.prepare_late	= pseries_prepare_late,
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 0bef9da..9c29734 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -41,6 +41,7 @@
 ifeq ($(CONFIG_PCI),y)
 obj-$(CONFIG_4xx)		+= ppc4xx_pci.o
 endif
+obj-$(CONFIG_PPC4xx_CPM)	+= ppc4xx_cpm.o
 obj-$(CONFIG_PPC4xx_GPIO)	+= ppc4xx_gpio.o
 
 obj-$(CONFIG_CPM)		+= cpm_common.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 17cf15e..8e9e06a 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -312,17 +312,10 @@
 
 static void pci_dma_bus_setup_dart(struct pci_bus *bus)
 {
-	struct device_node *dn;
-
 	if (!iommu_table_dart_inited) {
 		iommu_table_dart_inited = 1;
 		iommu_table_dart_setup();
 	}
-
-	dn = pci_bus_to_OF_node(bus);
-
-	if (dn)
-		PCI_DN(dn)->iommu_table = &iommu_table_dart;
 }
 
 static bool dart_device_on_pcie(struct device *dev)
@@ -373,7 +366,7 @@
 	if (dn == NULL) {
 		dn = of_find_compatible_node(NULL, "dart", "u4-dart");
 		if (dn == NULL)
-			goto bail;
+			return;	/* use default direct_dma_ops */
 		dart_is_u4 = 1;
 	}
 
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 44de855..e9381bf 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -53,7 +53,7 @@
 	return 1;
 }
 
-static struct platform_suspend_ops pmc_suspend_ops = {
+static const struct platform_suspend_ops pmc_suspend_ops = {
 	.valid = pmc_suspend_valid,
 	.enter = pmc_suspend_enter,
 };
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 9725369..9f99bef 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -973,7 +973,6 @@
 	if (dsr & DOORBELL_DSR_QFI) {
 		pr_info("RIO: doorbell queue full\n");
 		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
-		goto out;
 	}
 
 	/* XXX Need to check/dispatch until queue empty */
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index c0ea05e..c48cd81 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -1,5 +1,5 @@
 /*
- * GPIOs on MPC8349/8572/8610 and compatible
+ * GPIOs on MPC512x/8349/8572/8610 and compatible
  *
  * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
  *
@@ -26,6 +26,7 @@
 #define GPIO_IER		0x0c
 #define GPIO_IMR		0x10
 #define GPIO_ICR		0x14
+#define GPIO_ICR2		0x18
 
 struct mpc8xxx_gpio_chip {
 	struct of_mm_gpio_chip mm_gc;
@@ -37,6 +38,7 @@
 	 */
 	u32 data;
 	struct irq_host *irq;
+	void *of_dev_id_data;
 };
 
 static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -215,6 +217,51 @@
 	return 0;
 }
 
+static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+	unsigned long gpio = virq_to_hw(virq);
+	void __iomem *reg;
+	unsigned int shift;
+	unsigned long flags;
+
+	if (gpio < 16) {
+		reg = mm->regs + GPIO_ICR;
+		shift = (15 - gpio) * 2;
+	} else {
+		reg = mm->regs + GPIO_ICR2;
+		shift = (15 - (gpio % 16)) * 2;
+	}
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_FALLING:
+	case IRQ_TYPE_LEVEL_LOW:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrsetbits_be32(reg, 3 << shift, 2 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_LEVEL_HIGH:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrsetbits_be32(reg, 3 << shift, 1 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	case IRQ_TYPE_EDGE_BOTH:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrbits32(reg, 3 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static struct irq_chip mpc8xxx_irq_chip = {
 	.name		= "mpc8xxx-gpio",
 	.unmask		= mpc8xxx_irq_unmask,
@@ -226,6 +273,11 @@
 static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
 				irq_hw_number_t hw)
 {
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
+
+	if (mpc8xxx_gc->of_dev_id_data)
+		mpc8xxx_irq_chip.set_type = mpc8xxx_gc->of_dev_id_data;
+
 	set_irq_chip_data(virq, h->host_data);
 	set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
 	set_irq_type(virq, IRQ_TYPE_NONE);
@@ -253,11 +305,20 @@
 	.xlate	= mpc8xxx_gpio_irq_xlate,
 };
 
+static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+	{ .compatible = "fsl,mpc8349-gpio", },
+	{ .compatible = "fsl,mpc8572-gpio", },
+	{ .compatible = "fsl,mpc8610-gpio", },
+	{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+	{}
+};
+
 static void __init mpc8xxx_add_controller(struct device_node *np)
 {
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc;
 	struct of_mm_gpio_chip *mm_gc;
 	struct gpio_chip *gc;
+	const struct of_device_id *id;
 	unsigned hwirq;
 	int ret;
 
@@ -297,6 +358,10 @@
 	if (!mpc8xxx_gc->irq)
 		goto skip_irq;
 
+	id = of_match_node(mpc8xxx_gpio_ids, np);
+	if (id)
+		mpc8xxx_gc->of_dev_id_data = id->data;
+
 	mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
 
 	/* ack and mask all irqs */
@@ -321,13 +386,7 @@
 {
 	struct device_node *np;
 
-	for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio")
-		mpc8xxx_add_controller(np);
-
-	for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio")
-		mpc8xxx_add_controller(np);
-
-	for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio")
+	for_each_matching_node(np, mpc8xxx_gpio_ids)
 		mpc8xxx_add_controller(np);
 
 	for_each_compatible_node(np, NULL, "fsl,qoriq-gpio")
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 1398bc4..feaee40 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -16,6 +16,7 @@
 #include <linux/mv643xx.h>
 #include <linux/platform_device.h>
 #include <linux/of_platform.h>
+#include <linux/of_net.h>
 #include <linux/dma-mapping.h>
 
 #include <asm/prom.h>
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
new file mode 100644
index 0000000..73b86cc
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -0,0 +1,346 @@
+/*
+ * PowerPC 4xx Clock and Power Management
+ *
+ * Copyright (C) 2010, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@apm.com)
+ *
+ * Based on arch/powerpc/platforms/44x/idle.c:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Copyright 2008 IBM Corp.
+ *
+ * Based on arch/powerpc/sysdev/fsl_pmc.c:
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Copyright 2009  MontaVista Software, Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/suspend.h>
+#include <asm/dcr.h>
+#include <asm/dcr-native.h>
+#include <asm/machdep.h>
+
+#define CPM_ER	0
+#define CPM_FR	1
+#define CPM_SR	2
+
+#define CPM_IDLE_WAIT	0
+#define CPM_IDLE_DOZE	1
+
+struct cpm {
+	dcr_host_t	dcr_host;
+	unsigned int	dcr_offset[3];
+	unsigned int	powersave_off;
+	unsigned int	unused;
+	unsigned int	idle_doze;
+	unsigned int	standby;
+	unsigned int	suspend;
+};
+
+static struct cpm cpm;
+
+struct cpm_idle_mode {
+	unsigned int enabled;
+	const char  *name;
+};
+
+static struct cpm_idle_mode idle_mode[] = {
+	[CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
+	[CPM_IDLE_DOZE] = { 0, "doze" },
+};
+
+static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
+{
+	unsigned int value;
+
+	/* CPM controller supports 3 different types of sleep interface
+	 * known as class 1, 2 and 3. For class 1 units, they are
+	 * unconditionally put to sleep when the corresponding CPM bit is
+	 * set. For class 2 and 3 units this is not case; if they can be
+	 * put to to sleep, they will. Here we do not verify, we just
+	 * set them and expect them to eventually go off when they can.
+	 */
+	value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
+	dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
+
+	/* return old state, to restore later if needed */
+	return value;
+}
+
+static void cpm_idle_wait(void)
+{
+	unsigned long msr_save;
+
+	/* save off initial state */
+	msr_save = mfmsr();
+	/* sync required when CPM0_ER[CPU] is set */
+	mb();
+	/* set wait state MSR */
+	mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
+	isync();
+	/* return to initial state */
+	mtmsr(msr_save);
+	isync();
+}
+
+static void cpm_idle_sleep(unsigned int mask)
+{
+	unsigned int er_save;
+
+	/* update CPM_ER state */
+	er_save = cpm_set(CPM_ER, mask);
+
+	/* go to wait state so that CPM0_ER[CPU] can take effect */
+	cpm_idle_wait();
+
+	/* restore CPM_ER state */
+	dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
+}
+
+static void cpm_idle_doze(void)
+{
+	cpm_idle_sleep(cpm.idle_doze);
+}
+
+static void cpm_idle_config(int mode)
+{
+	int i;
+
+	if (idle_mode[mode].enabled)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
+		idle_mode[i].enabled = 0;
+
+	idle_mode[mode].enabled = 1;
+}
+
+static ssize_t cpm_idle_show(struct kobject *kobj,
+			     struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+		if (idle_mode[i].enabled)
+			s += sprintf(s, "[%s] ", idle_mode[i].name);
+		else
+			s += sprintf(s, "%s ", idle_mode[i].name);
+	}
+
+	*(s-1) = '\n'; /* convert the last space to a newline */
+
+	return s - buf;
+}
+
+static ssize_t cpm_idle_store(struct kobject *kobj,
+			      struct kobj_attribute *attr,
+			      const char *buf, size_t n)
+{
+	int i;
+	char *p;
+	int len;
+
+	p = memchr(buf, '\n', n);
+	len = p ? p - buf : n;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+		if (strncmp(buf, idle_mode[i].name, len) == 0) {
+			cpm_idle_config(i);
+			return n;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static struct kobj_attribute cpm_idle_attr =
+	__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
+
+static void cpm_idle_config_sysfs(void)
+{
+	struct sys_device *sys_dev;
+	unsigned long ret;
+
+	sys_dev = get_cpu_sysdev(0);
+
+	ret = sysfs_create_file(&sys_dev->kobj,
+				&cpm_idle_attr.attr);
+	if (ret)
+		printk(KERN_WARNING
+		       "cpm: failed to create idle sysfs entry\n");
+}
+
+static void cpm_idle(void)
+{
+	if (idle_mode[CPM_IDLE_DOZE].enabled)
+		cpm_idle_doze();
+	else
+		cpm_idle_wait();
+}
+
+static int cpm_suspend_valid(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		return !!cpm.standby;
+	case PM_SUSPEND_MEM:
+		return !!cpm.suspend;
+	default:
+		return 0;
+	}
+}
+
+static void cpm_suspend_standby(unsigned int mask)
+{
+	unsigned long tcr_save;
+
+	/* disable decrement interrupt */
+	tcr_save = mfspr(SPRN_TCR);
+	mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
+
+	/* go to sleep state */
+	cpm_idle_sleep(mask);
+
+	/* restore decrement interrupt */
+	mtspr(SPRN_TCR, tcr_save);
+}
+
+static int cpm_suspend_enter(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		cpm_suspend_standby(cpm.standby);
+		break;
+	case PM_SUSPEND_MEM:
+		cpm_suspend_standby(cpm.suspend);
+		break;
+	}
+
+	return 0;
+}
+
+static struct platform_suspend_ops cpm_suspend_ops = {
+	.valid		= cpm_suspend_valid,
+	.enter		= cpm_suspend_enter,
+};
+
+static int cpm_get_uint_property(struct device_node *np,
+				 const char *name)
+{
+	int len;
+	const unsigned int *prop = of_get_property(np, name, &len);
+
+	if (prop == NULL || len < sizeof(u32))
+		return 0;
+
+	return *prop;
+}
+
+static int __init cpm_init(void)
+{
+	struct device_node *np;
+	int dcr_base, dcr_len;
+	int ret = 0;
+
+	if (!cpm.powersave_off) {
+		cpm_idle_config(CPM_IDLE_WAIT);
+		ppc_md.power_save = &cpm_idle;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
+	if (!np) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dcr_base = dcr_resource_start(np, 0);
+	dcr_len = dcr_resource_len(np, 0);
+
+	if (dcr_base == 0 || dcr_len == 0) {
+		printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
+		       np->full_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
+
+	if (!DCR_MAP_OK(cpm.dcr_host)) {
+		printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
+		       np->full_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* All 4xx SoCs with a CPM controller have one of two
+	 * different order for the CPM registers. Some have the
+	 * CPM registers in the following order (ER,FR,SR). The
+	 * others have them in the following order (SR,ER,FR).
+	 */
+
+	if (cpm_get_uint_property(np, "er-offset") == 0) {
+		cpm.dcr_offset[CPM_ER] = 0;
+		cpm.dcr_offset[CPM_FR] = 1;
+		cpm.dcr_offset[CPM_SR] = 2;
+	} else {
+		cpm.dcr_offset[CPM_ER] = 1;
+		cpm.dcr_offset[CPM_FR] = 2;
+		cpm.dcr_offset[CPM_SR] = 0;
+	}
+
+	/* Now let's see what IPs to turn off for the following modes */
+
+	cpm.unused = cpm_get_uint_property(np, "unused-units");
+	cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
+	cpm.standby = cpm_get_uint_property(np, "standby");
+	cpm.suspend = cpm_get_uint_property(np, "suspend");
+
+	/* If some IPs are unused let's turn them off now */
+
+	if (cpm.unused) {
+		cpm_set(CPM_ER, cpm.unused);
+		cpm_set(CPM_FR, cpm.unused);
+	}
+
+	/* Now let's export interfaces */
+
+	if (!cpm.powersave_off && cpm.idle_doze)
+		cpm_idle_config_sysfs();
+
+	if (cpm.standby || cpm.suspend)
+		suspend_set_ops(&cpm_suspend_ops);
+out:
+	if (np)
+		of_node_put(np);
+	return ret;
+}
+
+late_initcall(cpm_init);
+
+static int __init cpm_powersave_off(char *arg)
+{
+	cpm.powersave_off = 1;
+	return 0;
+}
+__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index d4d15aa..ee05680 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/of_net.h>
 #include <asm/tsi108.h>
 
 #include <asm/system.h>
@@ -83,8 +84,8 @@
 		memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
 
 		ret = of_address_to_resource(np, 0, &r[0]);
-		DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
-			__func__,r[0].name, r[0].start, r[0].end);
+		DBG("%s: name:start->end = %s:%pR\n",
+		    __func__, r[0].name, &r[0]);
 		if (ret)
 			goto err;
 
@@ -92,8 +93,8 @@
 		r[1].start = irq_of_parse_and_map(np, 0);
 		r[1].end = irq_of_parse_and_map(np, 0);
 		r[1].flags = IORESOURCE_IRQ;
-		DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
-			__func__,r[1].name, r[1].start, r[1].end);
+		DBG("%s: name:start->end = %s:%pR\n",
+			__func__, r[1].name, &r[1]);
 
 		tsi_eth_dev =
 		    platform_device_register_simple("tsi-ethernet", i++, &r[0],
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 6c6d7b3..ff19efdf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -1,13 +1,8 @@
-config SCHED_MC
-	def_bool y
-	depends on SMP
-
 config MMU
 	def_bool y
 
 config ZONE_DMA
-	def_bool y
-	depends on 64BIT
+	def_bool y if 64BIT
 
 config LOCKDEP_SUPPORT
 	def_bool y
@@ -25,12 +20,10 @@
 	def_bool y
 
 config ARCH_HAS_ILOG2_U32
-	bool
-	default n
+	def_bool n
 
 config ARCH_HAS_ILOG2_U64
-	bool
-	default n
+	def_bool n
 
 config GENERIC_HWEIGHT
 	def_bool y
@@ -42,9 +35,7 @@
 	def_bool y
 
 config GENERIC_BUG
-	bool
-	depends on BUG
-	default y
+	def_bool y if BUG
 
 config GENERIC_BUG_RELATIVE_POINTERS
 	def_bool y
@@ -59,13 +50,10 @@
 	def_bool 64BIT
 
 config GENERIC_LOCKBREAK
-	bool
-	default y
-	depends on SMP && PREEMPT
+	def_bool y if SMP && PREEMPT
 
 config PGSTE
-	bool
-	default y if KVM
+	def_bool y if KVM
 
 config VIRT_CPU_ACCOUNTING
 	def_bool y
@@ -85,7 +73,6 @@
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_REGS_AND_STACK_ACCESS_API
-	select HAVE_DEFAULT_NO_SPIN_MUTEXES
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
@@ -130,8 +117,7 @@
 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
 
 config SCHED_OMIT_FRAME_POINTER
-	bool
-	default y
+	def_bool y
 
 source "init/Kconfig"
 
@@ -144,20 +130,21 @@
 source "kernel/time/Kconfig"
 
 config 64BIT
-	bool "64 bit kernel"
+	def_bool y
+	prompt "64 bit kernel"
 	help
 	  Select this option if you have an IBM z/Architecture machine
 	  and want to use the 64 bit addressing mode.
 
 config 32BIT
-	bool
-	default y if !64BIT
+	def_bool y if !64BIT
 
 config KTIME_SCALAR
 	def_bool 32BIT
 
 config SMP
-	bool "Symmetric multi-processing support"
+	def_bool y
+	prompt "Symmetric multi-processing support"
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
@@ -189,10 +176,10 @@
 	  approximately sixteen kilobytes to the kernel image.
 
 config HOTPLUG_CPU
-	bool "Support for hot-pluggable CPUs"
+	def_bool y
+	prompt "Support for hot-pluggable CPUs"
 	depends on SMP
 	select HOTPLUG
-	default n
 	help
 	  Say Y here to be able to turn CPUs off and on. CPUs
 	  can be controlled through /sys/devices/system/cpu/cpu#.
@@ -208,14 +195,16 @@
 	  increased overhead in some places.
 
 config SCHED_BOOK
-	bool "Book scheduler support"
+	def_bool y
+	prompt "Book scheduler support"
 	depends on SMP && SCHED_MC
 	help
 	  Book scheduler support improves the CPU scheduler's decision making
 	  when dealing with machines that have several books.
 
 config MATHEMU
-	bool "IEEE FPU emulation"
+	def_bool y
+	prompt "IEEE FPU emulation"
 	depends on MARCH_G5
 	help
 	  This option is required for IEEE compliant floating point arithmetic
@@ -223,7 +212,8 @@
 	  need this.
 
 config COMPAT
-	bool "Kernel support for 31 bit emulation"
+	def_bool y
+	prompt "Kernel support for 31 bit emulation"
 	depends on 64BIT
 	select COMPAT_BINFMT_ELF
 	help
@@ -233,16 +223,14 @@
 	  executing 31 bit applications.  It is safe to say "Y".
 
 config SYSVIPC_COMPAT
-	bool
-	depends on COMPAT && SYSVIPC
-	default y
+	def_bool y if COMPAT && SYSVIPC
 
 config AUDIT_ARCH
-	bool
-	default y
+	def_bool y
 
 config S390_EXEC_PROTECT
-	bool "Data execute protection"
+	def_bool y
+	prompt "Data execute protection"
 	help
 	  This option allows to enable a buffer overflow protection for user
 	  space programs and it also selects the addressing mode option above.
@@ -302,7 +290,8 @@
 endchoice
 
 config PACK_STACK
-	bool "Pack kernel stack"
+	def_bool y
+	prompt "Pack kernel stack"
 	help
 	  This option enables the compiler option -mkernel-backchain if it
 	  is available. If the option is available the compiler supports
@@ -315,7 +304,8 @@
 	  Say Y if you are unsure.
 
 config SMALL_STACK
-	bool "Use 8kb for kernel stack instead of 16kb"
+	def_bool n
+	prompt "Use 8kb for kernel stack instead of 16kb"
 	depends on PACK_STACK && 64BIT && !LOCKDEP
 	help
 	  If you say Y here and the compiler supports the -mkernel-backchain
@@ -327,7 +317,8 @@
 	  Say N if you are unsure.
 
 config CHECK_STACK
-	bool "Detect kernel stack overflow"
+	def_bool y
+	prompt "Detect kernel stack overflow"
 	help
 	  This option enables the compiler option -mstack-guard and
 	  -mstack-size if they are available. If the compiler supports them
@@ -351,7 +342,8 @@
 	  512 for 64 bit.
 
 config WARN_STACK
-	bool "Emit compiler warnings for function with broken stack usage"
+	def_bool n
+	prompt "Emit compiler warnings for function with broken stack usage"
 	help
 	  This option enables the compiler options -mwarn-framesize and
 	  -mwarn-dynamicstack. If the compiler supports these options it
@@ -386,24 +378,24 @@
 	def_bool y
 
 config ARCH_SELECT_MEMORY_MODEL
-       def_bool y
+	def_bool y
 
 config ARCH_ENABLE_MEMORY_HOTPLUG
-	def_bool y
-	depends on SPARSEMEM
+	def_bool y if SPARSEMEM
 
 config ARCH_ENABLE_MEMORY_HOTREMOVE
 	def_bool y
 
 config ARCH_HIBERNATION_POSSIBLE
-       def_bool y if 64BIT
+	def_bool y if 64BIT
 
 source "mm/Kconfig"
 
 comment "I/O subsystem configuration"
 
 config QDIO
-	tristate "QDIO support"
+	def_tristate y
+	prompt "QDIO support"
 	---help---
 	  This driver provides the Queued Direct I/O base support for
 	  IBM System z.
@@ -414,7 +406,8 @@
 	  If unsure, say Y.
 
 config CHSC_SCH
-	tristate "Support for CHSC subchannels"
+	def_tristate y
+	prompt "Support for CHSC subchannels"
 	help
 	  This driver allows usage of CHSC subchannels. A CHSC subchannel
 	  is usually present on LPAR only.
@@ -432,7 +425,8 @@
 comment "Misc"
 
 config IPL
-	bool "Builtin IPL record support"
+	def_bool y
+	prompt "Builtin IPL record support"
 	help
 	  If you want to use the produced kernel to IPL directly from a
 	  device, you have to merge a bootsector specific to the device
@@ -464,7 +458,8 @@
 	default "9"
 
 config PFAULT
-	bool "Pseudo page fault support"
+	def_bool y
+	prompt "Pseudo page fault support"
 	help
 	  Select this option, if you want to use PFAULT pseudo page fault
 	  handling under VM. If running native or in LPAR, this option
@@ -476,7 +471,8 @@
 	  this option.
 
 config SHARED_KERNEL
-	bool "VM shared kernel support"
+	def_bool y
+	prompt "VM shared kernel support"
 	help
 	  Select this option, if you want to share the text segment of the
 	  Linux kernel between different VM guests. This reduces memory
@@ -487,7 +483,8 @@
 	  doing and want to exploit this feature.
 
 config CMM
-	tristate "Cooperative memory management"
+	def_tristate n
+	prompt "Cooperative memory management"
 	help
 	  Select this option, if you want to enable the kernel interface
 	  to reduce the memory size of the system. This is accomplished
@@ -499,14 +496,16 @@
 	  option.
 
 config CMM_IUCV
-	bool "IUCV special message interface to cooperative memory management"
+	def_bool y
+	prompt "IUCV special message interface to cooperative memory management"
 	depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
 	help
 	  Select this option to enable the special message interface to
 	  the cooperative memory management.
 
 config APPLDATA_BASE
-	bool "Linux - VM Monitor Stream, base infrastructure"
+	def_bool n
+	prompt "Linux - VM Monitor Stream, base infrastructure"
 	depends on PROC_FS
 	help
 	  This provides a kernel interface for creating and updating z/VM APPLDATA
@@ -521,7 +520,8 @@
 	  The /proc entries can also be read from, showing the current settings.
 
 config APPLDATA_MEM
-	tristate "Monitor memory management statistics"
+	def_tristate m
+	prompt "Monitor memory management statistics"
 	depends on APPLDATA_BASE && VM_EVENT_COUNTERS
 	help
 	  This provides memory management related data to the Linux - VM Monitor
@@ -537,7 +537,8 @@
 	  appldata_mem.o.
 
 config APPLDATA_OS
-	tristate "Monitor OS statistics"
+	def_tristate m
+	prompt "Monitor OS statistics"
 	depends on APPLDATA_BASE
 	help
 	  This provides OS related data to the Linux - VM Monitor Stream, like
@@ -551,7 +552,8 @@
 	  appldata_os.o.
 
 config APPLDATA_NET_SUM
-	tristate "Monitor overall network statistics"
+	def_tristate m
+	prompt "Monitor overall network statistics"
 	depends on APPLDATA_BASE && NET
 	help
 	  This provides network related data to the Linux - VM Monitor Stream,
@@ -568,30 +570,32 @@
 source kernel/Kconfig.hz
 
 config S390_HYPFS_FS
-	bool "s390 hypervisor file system support"
+	def_bool y
+	prompt "s390 hypervisor file system support"
 	select SYS_HYPERVISOR
-	default y
 	help
 	  This is a virtual file system intended to provide accounting
 	  information in an s390 hypervisor environment.
 
 config KEXEC
-	bool "kexec system call"
+	def_bool n
+	prompt "kexec system call"
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
 	  but is independent of hardware/microcode support.
 
 config ZFCPDUMP
-	bool "zfcpdump support"
+	def_bool n
+	prompt "zfcpdump support"
 	select SMP
-	default n
 	help
 	  Select this option if you want to build an zfcpdump enabled kernel.
 	  Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
 
 config S390_GUEST
-bool "s390 guest support for KVM (EXPERIMENTAL)"
+	def_bool y
+	prompt "s390 guest support for KVM (EXPERIMENTAL)"
 	depends on 64BIT && EXPERIMENTAL
 	select VIRTIO
 	select VIRTIO_RING
@@ -603,9 +607,9 @@
 	  the default console.
 
 config SECCOMP
-	bool "Enable seccomp to safely compute untrusted bytecode"
+	def_bool y
+	prompt "Enable seccomp to safely compute untrusted bytecode"
 	depends on PROC_FS
-	default y
 	help
 	  This kernel feature is useful for number crunching applications
 	  that may need to compute untrusted bytecode during their
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 05221b1..2b380df 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -1,8 +1,7 @@
 menu "Kernel hacking"
 
 config TRACE_IRQFLAGS_SUPPORT
-	bool
-	default y
+	def_bool y
 
 source "lib/Kconfig.debug"
 
@@ -19,7 +18,8 @@
 	  If you are unsure, say Y.
 
 config DEBUG_STRICT_USER_COPY_CHECKS
-	bool "Strict user copy size checks"
+	def_bool n
+	prompt "Strict user copy size checks"
 	---help---
 	  Enabling this option turns a certain set of sanity checks for user
 	  copy operations into compile time warnings.
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e40ac6e..29c82c6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -2,17 +2,24 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_AUDIT=y
+CONFIG_RCU_TRACE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -20,24 +27,14 @@
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=32
-CONFIG_COMPAT=y
-CONFIG_S390_EXEC_PROTECT=y
-CONFIG_PACK_STACK=y
-CONFIG_CHECK_STACK=y
 CONFIG_PREEMPT=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
-CONFIG_QDIO=y
-CONFIG_CHSC_SCH=m
-CONFIG_IPL=y
+CONFIG_KSM=y
 CONFIG_BINFMT_MISC=m
-CONFIG_PFAULT=y
+CONFIG_CMM=m
 CONFIG_HZ_100=y
 CONFIG_KEXEC=y
-CONFIG_S390_GUEST=y
 CONFIG_PM=y
 CONFIG_HIBERNATION=y
 CONFIG_PACKET=y
@@ -46,16 +43,15 @@
 CONFIG_AFIUCV=m
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
+# CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NETFILTER_NETLINK_LOG=m
-CONFIG_NF_CONNTRACK=m
-# CONFIG_NF_CT_PROTO_SCTP is not set
+CONFIG_NET_SCTPPROBE=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_VLAN_8021Q=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_CBQ=m
 CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_MULTIQ=y
 CONFIG_NET_SCH_RED=m
 CONFIG_NET_SCH_SFQ=m
 CONFIG_NET_SCH_TEQL=m
@@ -69,28 +65,14 @@
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_CLS_FLOW=m
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=y
-CONFIG_NET_ACT_NAT=m
-CONFIG_CAN=m
-CONFIG_CAN_RAW=m
-CONFIG_CAN_BCM=m
-CONFIG_CAN_VCAN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_XIP=y
-CONFIG_BLK_DEV_XPRAM=m
-CONFIG_DASD=y
-CONFIG_DASD_PROFILE=y
-CONFIG_DASD_ECKD=y
-CONFIG_DASD_FBA=y
-CONFIG_DASD_DIAG=y
-CONFIG_DASD_EER=y
-CONFIG_VIRTIO_BLK=m
+CONFIG_VIRTIO_BLK=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
@@ -102,101 +84,93 @@
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_ZFCP=y
-CONFIG_SCSI_DH=m
-CONFIG_SCSI_DH_RDAC=m
-CONFIG_SCSI_DH_HP_SW=m
-CONFIG_SCSI_DH_EMC=m
-CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_BLK_DEV_DM=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_SNAPSHOT=y
-CONFIG_DM_MIRROR=y
-CONFIG_DM_ZERO=y
-CONFIG_DM_MULTIPATH=m
+CONFIG_ZFCP_DIF=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
 CONFIG_BONDING=m
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
-CONFIG_VETH=m
 CONFIG_NET_ETHERNET=y
-CONFIG_LCS=m
-CONFIG_CTCM=m
-CONFIG_QETH=y
-CONFIG_QETH_L2=y
-CONFIG_QETH_L3=y
-CONFIG_VIRTIO_NET=m
-CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_VIRTIO_NET=y
 CONFIG_RAW_DRIVER=m
-CONFIG_TN3270=y
-CONFIG_TN3270_TTY=y
-CONFIG_TN3270_FS=m
-CONFIG_TN3270_CONSOLE=y
-CONFIG_TN3215=y
-CONFIG_TN3215_CONSOLE=y
-CONFIG_SCLP_TTY=y
-CONFIG_SCLP_CONSOLE=y
-CONFIG_SCLP_VT220_TTY=y
-CONFIG_SCLP_VT220_CONSOLE=y
-CONFIG_SCLP_CPI=m
-CONFIG_SCLP_ASYNC=m
-CONFIG_S390_TAPE=m
-CONFIG_S390_TAPE_BLOCK=y
-CONFIG_S390_TAPE_34XX=m
-CONFIG_ACCESSIBILITY=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
 CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
+CONFIG_TIMER_STATS=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_SPINLOCK_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_NOTIFIERS=y
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_LATENCYTOP=y
 CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_SAMPLES=y
-CONFIG_CRYPTO_FIPS=y
+CONFIG_DEBUG_PAGEALLOC=y
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
 CONFIG_CRYPTO_SHA512_S390=m
-CONFIG_CRC_T10DIF=y
-CONFIG_CRC32=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRC7=m
-CONFIG_KVM=m
-CONFIG_VIRTIO_BALLOON=m
+CONFIG_VIRTIO_BALLOON=y
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index b08d2ab..2e671d5 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
 
-s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o
+s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index fa487d4..80c1526 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -12,6 +12,8 @@
 #include <linux/fs.h>
 #include <linux/types.h>
 #include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/kref.h>
 
 #define REG_FILE_MODE    0440
 #define UPDATE_FILE_MODE 0220
@@ -38,6 +40,33 @@
 extern void hypfs_vm_exit(void);
 extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
 
-/* Directory for debugfs files */
-extern struct dentry *hypfs_dbfs_dir;
+/* debugfs interface */
+struct hypfs_dbfs_file;
+
+struct hypfs_dbfs_data {
+	void			*buf;
+	void			*buf_free_ptr;
+	size_t			size;
+	struct hypfs_dbfs_file	*dbfs_file;;
+	struct kref		kref;
+};
+
+struct hypfs_dbfs_file {
+	const char	*name;
+	int		(*data_create)(void **data, void **data_free_ptr,
+				       size_t *size);
+	void		(*data_free)(const void *buf_free_ptr);
+
+	/* Private data for hypfs_dbfs.c */
+	struct hypfs_dbfs_data	*data;
+	struct delayed_work	data_free_work;
+	struct mutex		lock;
+	struct dentry		*dentry;
+};
+
+extern int hypfs_dbfs_init(void);
+extern void hypfs_dbfs_exit(void);
+extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
+extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
+
 #endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
new file mode 100644
index 0000000..b478013
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -0,0 +1,116 @@
+/*
+ * Hypervisor filesystem for Linux on s390 - debugfs interface
+ *
+ * Copyright (C) IBM Corp. 2010
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <linux/slab.h>
+#include "hypfs.h"
+
+static struct dentry *dbfs_dir;
+
+static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
+{
+	struct hypfs_dbfs_data *data;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+	kref_init(&data->kref);
+	data->dbfs_file = f;
+	return data;
+}
+
+static void hypfs_dbfs_data_free(struct kref *kref)
+{
+	struct hypfs_dbfs_data *data;
+
+	data = container_of(kref, struct hypfs_dbfs_data, kref);
+	data->dbfs_file->data_free(data->buf_free_ptr);
+	kfree(data);
+}
+
+static void data_free_delayed(struct work_struct *work)
+{
+	struct hypfs_dbfs_data *data;
+	struct hypfs_dbfs_file *df;
+
+	df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
+	mutex_lock(&df->lock);
+	data = df->data;
+	df->data = NULL;
+	mutex_unlock(&df->lock);
+	kref_put(&data->kref, hypfs_dbfs_data_free);
+}
+
+static ssize_t dbfs_read(struct file *file, char __user *buf,
+			 size_t size, loff_t *ppos)
+{
+	struct hypfs_dbfs_data *data;
+	struct hypfs_dbfs_file *df;
+	ssize_t rc;
+
+	if (*ppos != 0)
+		return 0;
+
+	df = file->f_path.dentry->d_inode->i_private;
+	mutex_lock(&df->lock);
+	if (!df->data) {
+		data = hypfs_dbfs_data_alloc(df);
+		if (!data) {
+			mutex_unlock(&df->lock);
+			return -ENOMEM;
+		}
+		rc = df->data_create(&data->buf, &data->buf_free_ptr,
+				     &data->size);
+		if (rc) {
+			mutex_unlock(&df->lock);
+			kfree(data);
+			return rc;
+		}
+		df->data = data;
+		schedule_delayed_work(&df->data_free_work, HZ);
+	}
+	data = df->data;
+	kref_get(&data->kref);
+	mutex_unlock(&df->lock);
+
+	rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
+	kref_put(&data->kref, hypfs_dbfs_data_free);
+	return rc;
+}
+
+static const struct file_operations dbfs_ops = {
+	.read		= dbfs_read,
+	.llseek		= no_llseek,
+};
+
+int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+{
+	df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+					 &dbfs_ops);
+	if (IS_ERR(df->dentry))
+		return PTR_ERR(df->dentry);
+	mutex_init(&df->lock);
+	INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
+	return 0;
+}
+
+void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
+{
+	debugfs_remove(df->dentry);
+}
+
+int hypfs_dbfs_init(void)
+{
+	dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
+	if (IS_ERR(dbfs_dir))
+		return PTR_ERR(dbfs_dir);
+	return 0;
+}
+
+void hypfs_dbfs_exit(void)
+{
+	debugfs_remove(dbfs_dir);
+}
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index cd4a81b..6023c6d 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -555,81 +555,38 @@
 	char			buf[];	/* d204 buffer */
 } __attribute__ ((packed));
 
-struct dbfs_d204_private {
-	struct dbfs_d204	*d204;	/* Aligned d204 data with header */
-	void			*base;	/* Base pointer (needed for vfree) */
-};
-
-static int dbfs_d204_open(struct inode *inode, struct file *file)
+static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
 {
-	struct dbfs_d204_private *data;
 	struct dbfs_d204 *d204;
 	int rc, buf_size;
+	void *base;
 
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
 	buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
-	data->base = vmalloc(buf_size);
-	if (!data->base) {
-		rc = -ENOMEM;
-		goto fail_kfree_data;
+	base = vmalloc(buf_size);
+	if (!base)
+		return -ENOMEM;
+	memset(base, 0, buf_size);
+	d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
+	rc = diag204_do_store(d204->buf, diag204_buf_pages);
+	if (rc) {
+		vfree(base);
+		return rc;
 	}
-	memset(data->base, 0, buf_size);
-	d204 = page_align_ptr(data->base + sizeof(d204->hdr))
-		- sizeof(d204->hdr);
-	rc = diag204_do_store(&d204->buf, diag204_buf_pages);
-	if (rc)
-		goto fail_vfree_base;
 	d204->hdr.version = DBFS_D204_HDR_VERSION;
 	d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
 	d204->hdr.sc = diag204_store_sc;
-	data->d204 = d204;
-	file->private_data = data;
-	return nonseekable_open(inode, file);
-
-fail_vfree_base:
-	vfree(data->base);
-fail_kfree_data:
-	kfree(data);
-	return rc;
-}
-
-static int dbfs_d204_release(struct inode *inode, struct file *file)
-{
-	struct dbfs_d204_private *data = file->private_data;
-
-	vfree(data->base);
-	kfree(data);
+	*data = d204;
+	*data_free_ptr = base;
+	*size = d204->hdr.len + sizeof(struct dbfs_d204_hdr);
 	return 0;
 }
 
-static ssize_t dbfs_d204_read(struct file *file, char __user *buf,
-			      size_t size, loff_t *ppos)
-{
-	struct dbfs_d204_private *data = file->private_data;
-
-	return simple_read_from_buffer(buf, size, ppos, data->d204,
-				       data->d204->hdr.len +
-				       sizeof(data->d204->hdr));
-}
-
-static const struct file_operations dbfs_d204_ops = {
-	.open		= dbfs_d204_open,
-	.read		= dbfs_d204_read,
-	.release	= dbfs_d204_release,
-	.llseek		= no_llseek,
+static struct hypfs_dbfs_file dbfs_file_d204 = {
+	.name		= "diag_204",
+	.data_create	= dbfs_d204_create,
+	.data_free	= vfree,
 };
 
-static int hypfs_dbfs_init(void)
-{
-	dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
-					     NULL, &dbfs_d204_ops);
-	if (IS_ERR(dbfs_d204_file))
-		return PTR_ERR(dbfs_d204_file);
-	return 0;
-}
-
 __init int hypfs_diag_init(void)
 {
 	int rc;
@@ -639,7 +596,7 @@
 		return -ENODATA;
 	}
 	if (diag204_info_type == INFO_EXT) {
-		rc = hypfs_dbfs_init();
+		rc = hypfs_dbfs_create_file(&dbfs_file_d204);
 		if (rc)
 			return rc;
 	}
@@ -660,6 +617,7 @@
 	debugfs_remove(dbfs_d204_file);
 	diag224_delete_name_table();
 	diag204_free_buffer();
+	hypfs_dbfs_remove_file(&dbfs_file_d204);
 }
 
 /*
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 26cf177..e547960 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -20,8 +20,6 @@
 static char all_guests[] = "*       ";
 static char *guest_query;
 
-static struct dentry *dbfs_d2fc_file;
-
 struct diag2fc_data {
 	__u32 version;
 	__u32 flags;
@@ -104,7 +102,7 @@
 	return data;
 }
 
-static void diag2fc_free(void *data)
+static void diag2fc_free(const void *data)
 {
 	vfree(data);
 }
@@ -239,43 +237,29 @@
 	char			buf[];	/* d2fc buffer */
 } __attribute__ ((packed));
 
-static int dbfs_d2fc_open(struct inode *inode, struct file *file)
+static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
 {
-	struct dbfs_d2fc *data;
+	struct dbfs_d2fc *d2fc;
 	unsigned int count;
 
-	data = diag2fc_store(guest_query, &count, sizeof(data->hdr));
-	if (IS_ERR(data))
-		return PTR_ERR(data);
-	get_clock_ext(data->hdr.tod_ext);
-	data->hdr.len = count * sizeof(struct diag2fc_data);
-	data->hdr.version = DBFS_D2FC_HDR_VERSION;
-	data->hdr.count = count;
-	memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved));
-	file->private_data = data;
-	return nonseekable_open(inode, file);
-}
-
-static int dbfs_d2fc_release(struct inode *inode, struct file *file)
-{
-	diag2fc_free(file->private_data);
+	d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
+	if (IS_ERR(d2fc))
+		return PTR_ERR(d2fc);
+	get_clock_ext(d2fc->hdr.tod_ext);
+	d2fc->hdr.len = count * sizeof(struct diag2fc_data);
+	d2fc->hdr.version = DBFS_D2FC_HDR_VERSION;
+	d2fc->hdr.count = count;
+	memset(&d2fc->hdr.reserved, 0, sizeof(d2fc->hdr.reserved));
+	*data = d2fc;
+	*data_free_ptr = d2fc;
+	*size = d2fc->hdr.len + sizeof(struct dbfs_d2fc_hdr);
 	return 0;
 }
 
-static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf,
-				    size_t size, loff_t *ppos)
-{
-	struct dbfs_d2fc *data = file->private_data;
-
-	return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
-				       sizeof(struct dbfs_d2fc_hdr));
-}
-
-static const struct file_operations dbfs_d2fc_ops = {
-	.open		= dbfs_d2fc_open,
-	.read		= dbfs_d2fc_read,
-	.release	= dbfs_d2fc_release,
-	.llseek		= no_llseek,
+static struct hypfs_dbfs_file dbfs_file_2fc = {
+	.name		= "diag_2fc",
+	.data_create	= dbfs_diag2fc_create,
+	.data_free	= diag2fc_free,
 };
 
 int hypfs_vm_init(void)
@@ -288,18 +272,12 @@
 		guest_query = local_guest;
 	else
 		return -EACCES;
-
-	dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
-					     NULL, &dbfs_d2fc_ops);
-	if (IS_ERR(dbfs_d2fc_file))
-		return PTR_ERR(dbfs_d2fc_file);
-
-	return 0;
+	return hypfs_dbfs_create_file(&dbfs_file_2fc);
 }
 
 void hypfs_vm_exit(void)
 {
 	if (!MACHINE_IS_VM)
 		return;
-	debugfs_remove(dbfs_d2fc_file);
+	hypfs_dbfs_remove_file(&dbfs_file_2fc);
 }
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 47cc446..6fe874f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -46,8 +46,6 @@
 /* start of list of all dentries, which have to be deleted on update */
 static struct dentry *hypfs_last_dentry;
 
-struct dentry *hypfs_dbfs_dir;
-
 static void hypfs_update_update(struct super_block *sb)
 {
 	struct hypfs_sb_info *sb_info = sb->s_fs_info;
@@ -471,13 +469,12 @@
 {
 	int rc;
 
-	hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
-	if (IS_ERR(hypfs_dbfs_dir))
-		return PTR_ERR(hypfs_dbfs_dir);
-
+	rc = hypfs_dbfs_init();
+	if (rc)
+		return rc;
 	if (hypfs_diag_init()) {
 		rc = -ENODATA;
-		goto fail_debugfs_remove;
+		goto fail_dbfs_exit;
 	}
 	if (hypfs_vm_init()) {
 		rc = -ENODATA;
@@ -499,9 +496,8 @@
 	hypfs_vm_exit();
 fail_hypfs_diag_exit:
 	hypfs_diag_exit();
-fail_debugfs_remove:
-	debugfs_remove(hypfs_dbfs_dir);
-
+fail_dbfs_exit:
+	hypfs_dbfs_exit();
 	pr_err("Initialization of hypfs failed with rc=%i\n", rc);
 	return rc;
 }
@@ -510,7 +506,7 @@
 {
 	hypfs_diag_exit();
 	hypfs_vm_exit();
-	debugfs_remove(hypfs_dbfs_dir);
+	hypfs_dbfs_exit();
 	unregister_filesystem(&hypfs_type);
 	kobject_put(s390_kobj);
 }
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index e850111..ff6f62e 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -204,6 +204,8 @@
 			    unsigned long, u8, int);
 int ccw_device_tm_intrg(struct ccw_device *cdev);
 
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask);
+
 extern int ccw_device_set_online(struct ccw_device *cdev);
 extern int ccw_device_set_offline(struct ccw_device *cdev);
 
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index a875c2f..da359ca 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -169,7 +169,7 @@
 
 static inline int is_compat_task(void)
 {
-	return test_thread_flag(TIF_31BIT);
+	return is_32bit_task();
 }
 
 #else
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 40e2ab0..0814348 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -202,7 +202,7 @@
 
 static inline int s390_nohz_delay(int cpu)
 {
-	return per_cpu(s390_idle, cpu).nohz_delay != 0;
+	return __get_cpu_var(s390_idle).nohz_delay != 0;
 }
 
 #define arch_needs_cpu(cpu) s390_nohz_delay(cpu)
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index b604a91..0be28ef 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -73,6 +73,7 @@
  * 0x02: use diag discipline (diag)
  * 0x04: set the device initially online (internal use only)
  * 0x08: enable ERP related logging
+ * 0x20: give access to raw eckd data
  */
 #define DASD_FEATURE_DEFAULT	     0x00
 #define DASD_FEATURE_READONLY	     0x01
@@ -80,6 +81,8 @@
 #define DASD_FEATURE_INITIAL_ONLINE  0x04
 #define DASD_FEATURE_ERPLOG	     0x08
 #define DASD_FEATURE_FAILFAST	     0x10
+#define DASD_FEATURE_FAILONSLCK      0x20
+#define DASD_FEATURE_USERAW	     0x40
 
 #define DASD_PARTN_BITS 2
 
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 354d426..10c029c 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -161,7 +161,9 @@
    use of this is to invoke "./ld.so someprog" to test out a new version of
    the loader.  We need to make sure that it is out of the way of the program
    that it will "exec", and that there is sufficient room for the brk.  */
-#define ELF_ET_DYN_BASE		(STACK_TOP / 3 * 2)
+
+extern unsigned long randomize_et_dyn(unsigned long base);
+#define ELF_ET_DYN_BASE		(randomize_et_dyn(STACK_TOP / 3 * 2))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
@@ -206,6 +208,8 @@
 	current->mm->context.noexec == 0;		\
 })
 
+#define STACK_RND_MASK	0x7ffUL
+
 #define ARCH_DLINFO							    \
 do {									    \
 	if (vdso_enabled)						    \
@@ -218,4 +222,7 @@
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
 int arch_setup_additional_pages(struct linux_binprm *, int);
 
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+#define arch_randomize_brk arch_randomize_brk
+
 #endif
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 96c14a9..3c29be4 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -4,20 +4,17 @@
 #ifndef __ASSEMBLY__
 
 extern void _mcount(void);
-extern unsigned long ftrace_dyn_func;
 
 struct dyn_arch_ftrace { };
 
 #define MCOUNT_ADDR ((long)_mcount)
 
 #ifdef CONFIG_64BIT
-#define MCOUNT_OFFSET_RET 18
-#define MCOUNT_INSN_SIZE  24
-#define MCOUNT_OFFSET	  14
-#else
-#define MCOUNT_OFFSET_RET 26
-#define MCOUNT_INSN_SIZE  30
+#define MCOUNT_INSN_SIZE  12
 #define MCOUNT_OFFSET	   8
+#else
+#define MCOUNT_INSN_SIZE  20
+#define MCOUNT_OFFSET	   4
 #endif
 
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 881d945..e4155d3 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -21,20 +21,4 @@
 
 #define HARDIRQ_BITS	8
 
-void clock_comparator_work(void);
-
-static inline unsigned long long local_tick_disable(void)
-{
-	unsigned long long old;
-
-	old = S390_lowcore.clock_comparator;
-	S390_lowcore.clock_comparator = -1ULL;
-	return old;
-}
-
-static inline void local_tick_enable(unsigned long long comp)
-{
-	S390_lowcore.clock_comparator = comp;
-}
-
 #endif /* __ASM_HARDIRQ_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 7da991a..db14a31 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,23 +1,33 @@
 #ifndef _ASM_IRQ_H
 #define _ASM_IRQ_H
 
-#ifdef __KERNEL__
 #include <linux/hardirq.h>
 
-/*
- * the definition of irqs has changed in 2.5.46:
- * NR_IRQS is no longer the number of i/o
- * interrupts (65536), but rather the number
- * of interrupt classes (2).
- * Only external and i/o interrupts make much sense here (CH).
- */
-
 enum interruption_class {
 	EXTERNAL_INTERRUPT,
 	IO_INTERRUPT,
-
+	EXTINT_CLK,
+	EXTINT_IPI,
+	EXTINT_TMR,
+	EXTINT_TLA,
+	EXTINT_PFL,
+	EXTINT_DSD,
+	EXTINT_VRT,
+	EXTINT_SCP,
+	EXTINT_IUC,
+	IOINT_QAI,
+	IOINT_QDI,
+	IOINT_DAS,
+	IOINT_C15,
+	IOINT_C70,
+	IOINT_TAP,
+	IOINT_VMR,
+	IOINT_LCS,
+	IOINT_CLW,
+	IOINT_CTC,
+	IOINT_APB,
+	NMI_NMI,
 	NR_IRQS,
 };
 
-#endif /* __KERNEL__ */
-#endif
+#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 330f68c..a231a94 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -31,7 +31,6 @@
 #include <linux/ptrace.h>
 #include <linux/percpu.h>
 
-#define  __ARCH_WANT_KPROBES_INSN_SLOT
 struct pt_regs;
 struct kprobe;
 
@@ -58,23 +57,12 @@
 /* Architecture specific copy of original instruction */
 struct arch_specific_insn {
 	/* copy of original instruction */
-	kprobe_opcode_t *insn;
-	int fixup;
-	int ilen;
-	int reg;
+	kprobe_opcode_t insn[MAX_INSN_SIZE];
 };
 
-struct ins_replace_args {
-	kprobe_opcode_t *ptr;
-	kprobe_opcode_t old;
-	kprobe_opcode_t new;
-};
 struct prev_kprobe {
 	struct kprobe *kp;
 	unsigned long status;
-	unsigned long saved_psw;
-	unsigned long kprobe_saved_imask;
-	unsigned long kprobe_saved_ctl[3];
 };
 
 /* per-cpu kprobe control block */
@@ -82,17 +70,13 @@
 	unsigned long kprobe_status;
 	unsigned long kprobe_saved_imask;
 	unsigned long kprobe_saved_ctl[3];
-	struct pt_regs jprobe_saved_regs;
-	unsigned long jprobe_saved_r14;
-	unsigned long jprobe_saved_r15;
 	struct prev_kprobe prev_kprobe;
+	struct pt_regs jprobe_saved_regs;
 	kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
 };
 
 void arch_remove_kprobe(struct kprobe *p);
 void kretprobe_trampoline(void);
-int  is_prohibited_opcode(kprobe_opcode_t *instruction);
-void get_instruction_type(struct arch_specific_insn *ainsn);
 
 int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 int kprobe_exceptions_notify(struct notifier_block *self,
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 8d6f871..bf3de04 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -32,7 +32,6 @@
 }
 
 extern void s390_adjust_jiffies(void);
-extern void print_cpu_info(void);
 extern int get_cpu_capability(unsigned int *);
 
 /*
@@ -81,7 +80,8 @@
 	mm_segment_t mm_segment;
         unsigned long prot_addr;        /* address of protection-excep.     */
         unsigned int trap_no;
-        per_struct per_info;
+	struct per_regs per_user;	/* User specified PER registers */
+	struct per_event per_event;	/* Cause of the last PER trap */
         /* pfault_wait is used to block the process on a pfault event */
 	unsigned long pfault_wait;
 };
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index d9d42b1..9ad628a 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -331,10 +331,60 @@
 	unsigned short ilc;
 	unsigned short svcnr;
 };
+
+/*
+ * Program event recording (PER) register set.
+ */
+struct per_regs {
+	unsigned long control;		/* PER control bits */
+	unsigned long start;		/* PER starting address */
+	unsigned long end;		/* PER ending address */
+};
+
+/*
+ * PER event contains information about the cause of the last PER exception.
+ */
+struct per_event {
+	unsigned short cause;		/* PER code, ATMID and AI */
+	unsigned long address;		/* PER address */
+	unsigned char paid;		/* PER access identification */
+};
+
+/*
+ * Simplified per_info structure used to decode the ptrace user space ABI.
+ */
+struct per_struct_kernel {
+	unsigned long cr9;		/* PER control bits */
+	unsigned long cr10;		/* PER starting address */
+	unsigned long cr11;		/* PER ending address */
+	unsigned long bits;		/* Obsolete software bits */
+	unsigned long starting_addr;	/* User specified start address */
+	unsigned long ending_addr;	/* User specified end address */
+	unsigned short perc_atmid;	/* PER trap ATMID */
+	unsigned long address;		/* PER trap instruction address */
+	unsigned char access_id;	/* PER trap access identification */
+};
+
+#define PER_EVENT_MASK			0xE9000000UL
+
+#define PER_EVENT_BRANCH		0x80000000UL
+#define PER_EVENT_IFETCH		0x40000000UL
+#define PER_EVENT_STORE			0x20000000UL
+#define PER_EVENT_STORE_REAL		0x08000000UL
+#define PER_EVENT_NULLIFICATION		0x01000000UL
+
+#define PER_CONTROL_MASK		0x00a00000UL
+
+#define PER_CONTROL_BRANCH_ADDRESS	0x00800000UL
+#define PER_CONTROL_ALTERATION		0x00200000UL
+
 #endif
 
 /*
- * Now for the program event recording (trace) definitions.
+ * Now for the user space program event recording (trace) definitions.
+ * The following structures are used only for the ptrace interface, don't
+ * touch or even look at it if you don't want to modify the user-space
+ * ptrace interface. In particular stay away from it for in-kernel PER.
  */
 typedef struct
 {
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 46e96bc..350e7ee 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -361,6 +361,7 @@
 	qdio_handler_t *input_handler;
 	qdio_handler_t *output_handler;
 	void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
+	int scan_threshold;
 	unsigned long int_parm;
 	void **input_sbal_addr_array;
 	void **output_sbal_addr_array;
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
index 1a9307e..080876d 100644
--- a/arch/s390/include/asm/s390_ext.h
+++ b/arch/s390/include/asm/s390_ext.h
@@ -1,32 +1,17 @@
+/*
+ *    Copyright IBM Corp. 1999,2010
+ *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ */
+
 #ifndef _S390_EXTINT_H
 #define _S390_EXTINT_H
 
-/*
- *  include/asm-s390/s390_ext.h
- *
- *  S390 version
- *    Copyright IBM Corp. 1999,2007
- *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
- *               Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-
 #include <linux/types.h>
 
 typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
 
-typedef struct ext_int_info_t {
-	struct ext_int_info_t *next;
-	ext_int_handler_t handler;
-	__u16 code;
-} ext_int_info_t;
-
-extern ext_int_info_t *ext_int_hash[];
-
 int register_external_interrupt(__u16 code, ext_int_handler_t handler);
-int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
-				      ext_int_info_t *info);
 int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
-int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
-					ext_int_info_t *info);
 
-#endif
+#endif /* _S390_EXTINT_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index edc03cb..045e009 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -20,7 +20,6 @@
 
 extern int __cpu_disable (void);
 extern void __cpu_die (unsigned int cpu);
-extern void cpu_die (void) __attribute__ ((noreturn));
 extern int __cpu_up (unsigned int cpu);
 
 extern struct mutex smp_cpu_state_mutex;
@@ -71,8 +70,10 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 extern int smp_rescan_cpus(void);
+extern void __noreturn cpu_die(void);
 #else
 static inline int smp_rescan_cpus(void) { return 0; }
+static inline void cpu_die(void) { }
 #endif
 
 #endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 3ad16db..8f8d759 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -20,6 +20,7 @@
 struct task_struct;
 
 extern struct task_struct *__switch_to(void *, void *);
+extern void update_per_regs(struct task_struct *task);
 
 static inline void save_fp_regs(s390_fp_regs *fpregs)
 {
@@ -93,6 +94,7 @@
 	if (next->mm) {							\
 		restore_fp_regs(&next->thread.fp_regs);			\
 		restore_access_regs(&next->thread.acrs[0]);		\
+		update_per_regs(next);					\
 	}								\
 	prev = __switch_to(prev,next);					\
 } while (0)
@@ -101,11 +103,9 @@
 extern void account_tick_vtime(struct task_struct *);
 
 #ifdef CONFIG_PFAULT
-extern void pfault_irq_init(void);
 extern int pfault_init(void);
 extern void pfault_fini(void);
 #else /* CONFIG_PFAULT */
-#define pfault_irq_init()	do { } while (0)
 #define pfault_init()		({-1;})
 #define pfault_fini()		do { } while (0)
 #endif /* CONFIG_PFAULT */
@@ -449,7 +449,7 @@
 extern void (*_machine_halt)(void);
 extern void (*_machine_power_off)(void);
 
-#define arch_align_stack(x) (x)
+extern unsigned long arch_align_stack(unsigned long sp);
 
 static inline int tprot(unsigned long addr)
 {
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 5baf023..ad1382f 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -74,7 +74,7 @@
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
-	return (struct thread_info *)(S390_lowcore.kernel_stack - THREAD_SIZE);
+	return (struct thread_info *) S390_lowcore.thread_info;
 }
 
 #define THREAD_SIZE_ORDER THREAD_ORDER
@@ -88,7 +88,7 @@
 #define TIF_SIGPENDING		2	/* signal pending */
 #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 #define TIF_RESTART_SVC		4	/* restart svc with new svc number */
-#define TIF_SINGLE_STEP		6	/* deliver sigtrap on return to user */
+#define TIF_PER_TRAP		6	/* deliver sigtrap on return to user */
 #define TIF_MCCK_PENDING	7	/* machine check handling is pending */
 #define TIF_SYSCALL_TRACE	8	/* syscall trace active */
 #define TIF_SYSCALL_AUDIT	9	/* syscall auditing active */
@@ -99,14 +99,15 @@
 #define TIF_31BIT		17	/* 32bit process */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	19	/* restore signal mask in do_signal() */
-#define TIF_FREEZE		20	/* thread is freezing for suspend */
+#define TIF_SINGLE_STEP		20	/* This task is single stepped */
+#define TIF_FREEZE		21	/* thread is freezing for suspend */
 
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
 #define _TIF_RESTORE_SIGMASK	(1<<TIF_RESTORE_SIGMASK)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_RESTART_SVC	(1<<TIF_RESTART_SVC)
-#define _TIF_SINGLE_STEP	(1<<TIF_SINGLE_STEP)
+#define _TIF_PER_TRAP		(1<<TIF_PER_TRAP)
 #define _TIF_MCCK_PENDING	(1<<TIF_MCCK_PENDING)
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
@@ -114,8 +115,15 @@
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
 #define _TIF_31BIT		(1<<TIF_31BIT)
+#define _TIF_SINGLE_STEP	(1<<TIF_FREEZE)
 #define _TIF_FREEZE		(1<<TIF_FREEZE)
 
+#ifdef CONFIG_64BIT
+#define is_32bit_task()		(test_thread_flag(TIF_31BIT))
+#else
+#define is_32bit_task()		(1)
+#endif
+
 #endif /* __KERNEL__ */
 
 #define PREEMPT_ACTIVE		0x4000000
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 09d345a..88829a4 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -11,6 +11,8 @@
 #ifndef _ASM_S390_TIMEX_H
 #define _ASM_S390_TIMEX_H
 
+#include <asm/lowcore.h>
+
 /* The value of the TOD clock for 1.1.1970. */
 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
 
@@ -49,6 +51,24 @@
 	asm volatile("stckc %0" : "=Q" (*time));
 }
 
+void clock_comparator_work(void);
+
+static inline unsigned long long local_tick_disable(void)
+{
+	unsigned long long old;
+
+	old = S390_lowcore.clock_comparator;
+	S390_lowcore.clock_comparator = -1ULL;
+	set_clock_comparator(S390_lowcore.clock_comparator);
+	return old;
+}
+
+static inline void local_tick_enable(unsigned long long comp)
+{
+	S390_lowcore.clock_comparator = comp;
+	set_clock_comparator(S390_lowcore.clock_comparator);
+}
+
 #define CLOCK_TICK_RATE	1193180 /* Underlying HZ */
 
 typedef unsigned long long cycles_t;
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 33982e7..fe03c14 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,14 +23,16 @@
 {
 	DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
 	DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
-	DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
 	DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
 	BLANK();
 	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
 	BLANK();
-	DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid));
-	DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address));
-	DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id));
+	DEFINE(__THREAD_per_cause,
+	       offsetof(struct task_struct, thread.per_event.cause));
+	DEFINE(__THREAD_per_address,
+	       offsetof(struct task_struct, thread.per_event.address));
+	DEFINE(__THREAD_per_paid,
+	       offsetof(struct task_struct, thread.per_event.paid));
 	BLANK();
 	DEFINE(__TI_task, offsetof(struct thread_info, task));
 	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
@@ -85,9 +87,9 @@
 	DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
 	DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
 	DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
-	DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid));
+	DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
 	DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
-	DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
+	DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
 	DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
 	DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
 	DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 3141025..12b8238 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -4,40 +4,19 @@
 #include <asm/ptrace.h>    /* needed for NUM_CR_WORDS */
 #include "compat_linux.h"  /* needed for psw_compat_t */
 
-typedef struct {
-	__u32 cr[NUM_CR_WORDS];
-} per_cr_words32;
+struct compat_per_struct_kernel {
+	__u32 cr9;		/* PER control bits */
+	__u32 cr10;		/* PER starting address */
+	__u32 cr11;		/* PER ending address */
+	__u32 bits;		/* Obsolete software bits */
+	__u32 starting_addr;	/* User specified start address */
+	__u32 ending_addr;	/* User specified end address */
+	__u16 perc_atmid;	/* PER trap ATMID */
+	__u32 address;		/* PER trap instruction address */
+	__u8  access_id;	/* PER trap access identification */
+};
 
-typedef struct {
-	__u16          perc_atmid;          /* 0x096 */
-	__u32          address;             /* 0x098 */
-	__u8           access_id;           /* 0x0a1 */
-} per_lowcore_words32;
-
-typedef struct {
-	union {
-		per_cr_words32   words;
-	} control_regs;
-	/*
-	 * Use these flags instead of setting em_instruction_fetch
-	 * directly they are used so that single stepping can be
-	 * switched on & off while not affecting other tracing
-	 */
-	unsigned  single_step       : 1;
-	unsigned  instruction_fetch : 1;
-	unsigned                    : 30;
-	/*
-	 * These addresses are copied into cr10 & cr11 if single
-	 * stepping is switched off
-	 */
-	__u32     starting_addr;
-	__u32     ending_addr;
-	union {
-		per_lowcore_words32 words;
-	} lowcore; 
-} per_struct32;
-
-struct user_regs_struct32
+struct compat_user_regs_struct
 {
 	psw_compat_t psw;
 	u32 gprs[NUM_GPRS];
@@ -50,14 +29,14 @@
 	 * itself as there is no "official" ptrace interface for hardware
 	 * watchpoints. This is the way intel does it.
 	 */
-	per_struct32 per_info;
+	struct compat_per_struct_kernel per_info;
 	u32  ieee_instruction_pointer;	/* obsolete, always 0 */
 };
 
-struct user32 {
+struct compat_user {
 	/* We start with the registers, to mimic the way that "memory"
 	   is returned from the ptrace(3,...) function.  */
-	struct user_regs_struct32 regs; /* Where the registers are actually stored */
+	struct compat_user_regs_struct regs;
 	/* The rest of this junk is to help gdb figure out what goes where */
 	u32 u_tsize;		/* Text segment size (pages). */
 	u32 u_dsize;	        /* Data segment size (pages). */
@@ -79,6 +58,6 @@
 	__u32   len;
 	__u32   kernel_addr;
 	__u32   process_addr;
-} ptrace_area_emu31;
+} compat_ptrace_area;
 
 #endif /* _PTRACE32_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1ecc337..648f642 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -9,7 +9,6 @@
  *		 Heiko Carstens <heiko.carstens@de.ibm.com>
  */
 
-#include <linux/sys.h>
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/cache.h>
@@ -49,7 +48,7 @@
 SP_SIZE      =	STACK_FRAME_OVERHEAD + __PT_SIZE
 
 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
-		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
+		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING)
 _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -110,31 +109,36 @@
 1:	stm	%r10,%r11,\lc_sum
 	.endm
 
+	.macro	SAVE_ALL_SVC psworg,savearea
+	stm	%r12,%r15,\savearea
+	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
+	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
+	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
+	.endm
+
 	.macro	SAVE_ALL_BASE savearea
 	stm	%r12,%r15,\savearea
 	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
 	.endm
 
-	.macro	SAVE_ALL_SVC psworg,savearea
-	la	%r12,\psworg
-	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
-	.endm
-
-	.macro	SAVE_ALL_SYNC psworg,savearea
-	la	%r12,\psworg
+	.macro	SAVE_ALL_PGM psworg,savearea
 	tm	\psworg+1,0x01		# test problem state bit
-	bz	BASED(2f)		# skip stack setup save
-	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
 #ifdef CONFIG_CHECK_STACK
-	b	BASED(3f)
-2:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
-	bz	BASED(stack_overflow)
-3:
+	bnz	BASED(1f)
+	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
+	bnz	BASED(2f)
+	la	%r12,\psworg
+	b	BASED(stack_overflow)
+#else
+	bz	BASED(2f)
 #endif
-2:
+1:	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
+2:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 	.endm
 
 	.macro	SAVE_ALL_ASYNC psworg,savearea
+	stm	%r12,%r15,\savearea
+	l	%r13,__LC_SVC_NEW_PSW+4	# load &system_call to %r13
 	la	%r12,\psworg
 	tm	\psworg+1,0x01		# test problem state bit
 	bnz	BASED(1f)		# from user -> load async stack
@@ -149,27 +153,23 @@
 0:	l	%r14,__LC_ASYNC_STACK	# are we already on the async stack ?
 	slr	%r14,%r15
 	sra	%r14,STACK_SHIFT
-	be	BASED(2f)
-1:	l	%r15,__LC_ASYNC_STACK
 #ifdef CONFIG_CHECK_STACK
-	b	BASED(3f)
-2:	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
-	bz	BASED(stack_overflow)
-3:
+	bnz	BASED(1f)
+	tml	%r15,STACK_SIZE - CONFIG_STACK_GUARD
+	bnz	BASED(2f)
+	b	BASED(stack_overflow)
+#else
+	bz	BASED(2f)
 #endif
-2:
+1:	l	%r15,__LC_ASYNC_STACK
+2:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 	.endm
 
-	.macro	CREATE_STACK_FRAME psworg,savearea
-	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
-	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
+	.macro	CREATE_STACK_FRAME savearea
+	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
 	st	%r2,SP_ORIG_R2(%r15)	# store original content of gpr 2
-	icm	%r12,12,__LC_SVC_ILC
-	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
-	st	%r12,SP_ILC(%r15)
 	mvc	SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
-	la	%r12,0
-	st	%r12,__SF_BACKCHAIN(%r15)	# clear back chain
+	stm	%r0,%r11,SP_R0(%r15)	# store gprs %r0-%r11 to kernel stack
 	.endm
 
 	.macro	RESTORE_ALL psworg,sync
@@ -188,6 +188,8 @@
 	ssm	__SF_EMPTY(%r15)
 	.endm
 
+	.section .kprobes.text, "ax"
+
 /*
  * Scheduler resume function, called by switch_to
  *  gpr2 = (task_struct *) prev
@@ -198,31 +200,21 @@
 	.globl	__switch_to
 __switch_to:
 	basr	%r1,0
-__switch_to_base:
-	tm	__THREAD_per(%r3),0xe8		# new process is using per ?
-	bz	__switch_to_noper-__switch_to_base(%r1)	# if not we're fine
-	stctl	%c9,%c11,__SF_EMPTY(%r15)	# We are using per stuff
-	clc	__THREAD_per(12,%r3),__SF_EMPTY(%r15)
-	be	__switch_to_noper-__switch_to_base(%r1)	# we got away w/o bashing TLB's
-	lctl	%c9,%c11,__THREAD_per(%r3)	# Nope we didn't
-__switch_to_noper:
-	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
+0:	l	%r4,__THREAD_info(%r2)		# get thread_info of prev
+	l	%r5,__THREAD_info(%r3)		# get thread_info of next
 	tm	__TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
-	bz	__switch_to_no_mcck-__switch_to_base(%r1)
-	ni	__TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
-	l	%r4,__THREAD_info(%r3)		# get thread_info of next
-	oi	__TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next
-__switch_to_no_mcck:
-	stm	%r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
-	st	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
-	l	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
-	lm	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
-	st	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
-	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
-	l	%r3,__THREAD_info(%r3)	# load thread_info from task struct
-	st	%r3,__LC_THREAD_INFO
-	ahi	%r3,STACK_SIZE
-	st	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
+	bz	1f-0b(%r1)
+	ni	__TI_flags+3(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
+	oi	__TI_flags+3(%r5),_TIF_MCCK_PENDING	# set it in next
+1:	stm	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
+	st	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
+	l	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
+	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
+	lm	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
+	st	%r3,__LC_CURRENT		# store task struct of next
+	st	%r5,__LC_THREAD_INFO		# store thread info of next
+	ahi	%r5,STACK_SIZE			# end of kernel stack of next
+	st	%r5,__LC_KERNEL_STACK		# store end of kernel stack
 	br	%r14
 
 __critical_start:
@@ -235,10 +227,11 @@
 system_call:
 	stpt	__LC_SYNC_ENTER_TIMER
 sysc_saveall:
-	SAVE_ALL_BASE __LC_SAVE_AREA
 	SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	lh	%r7,0x8a	  # get svc number from lowcore
+	CREATE_STACK_FRAME __LC_SAVE_AREA
+	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
+	mvc	SP_ILC(4,%r15),__LC_SVC_ILC
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 sysc_vtime:
 	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 sysc_stime:
@@ -246,20 +239,20 @@
 sysc_update:
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 sysc_do_svc:
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
-	ltr	%r7,%r7			# test for svc 0
+	xr	%r7,%r7
+	icm	%r7,3,SP_SVCNR(%r15)	# load svc number and test for svc 0
 	bnz	BASED(sysc_nr_ok)	# svc number > 0
 	# svc 0: system call number in %r1
 	cl	%r1,BASED(.Lnr_syscalls)
 	bnl	BASED(sysc_nr_ok)
+	sth	%r1,SP_SVCNR(%r15)
 	lr	%r7,%r1 	  # copy svc number to %r7
 sysc_nr_ok:
-	sth	%r7,SP_SVCNR(%r15)
 	sll	%r7,2		  # svc number *4
-	l	%r8,BASED(.Lsysc_table)
-	tm	__TI_flags+2(%r9),_TIF_SYSCALL
+	l	%r10,BASED(.Lsysc_table)
+	tm	__TI_flags+2(%r12),_TIF_SYSCALL
 	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
-	l	%r8,0(%r7,%r8)	  # get system call addr.
+	l	%r8,0(%r7,%r10)	  # get system call addr.
 	bnz	BASED(sysc_tracesys)
 	basr	%r14,%r8	  # call sys_xxxx
 	st	%r2,SP_R2(%r15)   # store return value (change R2 on stack)
@@ -267,7 +260,7 @@
 sysc_return:
 	LOCKDEP_SYS_EXIT
 sysc_tif:
-	tm	__TI_flags+3(%r9),_TIF_WORK_SVC
+	tm	__TI_flags+3(%r12),_TIF_WORK_SVC
 	bnz	BASED(sysc_work)  # there is work to do (signals etc.)
 sysc_restore:
 	RESTORE_ALL __LC_RETURN_PSW,1
@@ -284,17 +277,17 @@
 # One of the work bits is on. Find out which one.
 #
 sysc_work_tif:
-	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
+	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
 	bo	BASED(sysc_mcck_pending)
-	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
 	bo	BASED(sysc_reschedule)
-	tm	__TI_flags+3(%r9),_TIF_SIGPENDING
+	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
 	bo	BASED(sysc_sigpending)
-	tm	__TI_flags+3(%r9),_TIF_NOTIFY_RESUME
+	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
 	bo	BASED(sysc_notify_resume)
-	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
+	tm	__TI_flags+3(%r12),_TIF_RESTART_SVC
 	bo	BASED(sysc_restart)
-	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
+	tm	__TI_flags+3(%r12),_TIF_PER_TRAP
 	bo	BASED(sysc_singlestep)
 	b	BASED(sysc_return)	# beware of critical section cleanup
 
@@ -318,13 +311,13 @@
 # _TIF_SIGPENDING is set, call do_signal
 #
 sysc_sigpending:
-	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+	ni	__TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	l	%r1,BASED(.Ldo_signal)
 	basr	%r14,%r1		# call do_signal
-	tm	__TI_flags+3(%r9),_TIF_RESTART_SVC
+	tm	__TI_flags+3(%r12),_TIF_RESTART_SVC
 	bo	BASED(sysc_restart)
-	tm	__TI_flags+3(%r9),_TIF_SINGLE_STEP
+	tm	__TI_flags+3(%r12),_TIF_PER_TRAP
 	bo	BASED(sysc_singlestep)
 	b	BASED(sysc_return)
 
@@ -342,23 +335,23 @@
 # _TIF_RESTART_SVC is set, set up registers and restart svc
 #
 sysc_restart:
-	ni	__TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+	ni	__TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
 	l	%r7,SP_R2(%r15) 	# load new svc number
 	mvc	SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
 	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
+	sth	%r7,SP_SVCNR(%r15)
 	b	BASED(sysc_nr_ok)	# restart svc
 
 #
-# _TIF_SINGLE_STEP is set, call do_single_step
+# _TIF_PER_TRAP is set, call do_per_trap
 #
 sysc_singlestep:
-	ni	__TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
-	mvi	SP_SVCNR(%r15),0xff	# set trap indication to pgm check
-	mvi	SP_SVCNR+1(%r15),0xff
+	ni	__TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
+	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)		# clear svc number
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
 	la	%r14,BASED(sysc_return)	# load adr. of system return
-	br	%r1			# branch to do_single_step
+	br	%r1			# branch to do_per_trap
 
 #
 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -368,15 +361,15 @@
 	l	%r1,BASED(.Ltrace_entry)
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	la	%r3,0
-	srl	%r7,2
-	st	%r7,SP_R2(%r15)
+	xr	%r0,%r0
+	icm	%r0,3,SP_SVCNR(%r15)
+	st	%r0,SP_R2(%r15)
 	basr	%r14,%r1
 	cl	%r2,BASED(.Lnr_syscalls)
 	bnl	BASED(sysc_tracenogo)
-	l	%r8,BASED(.Lsysc_table)
 	lr	%r7,%r2
 	sll	%r7,2			# svc number *4
-	l	%r8,0(%r7,%r8)
+	l	%r8,0(%r7,%r10)
 sysc_tracego:
 	lm	%r3,%r6,SP_R3(%r15)
 	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
@@ -384,7 +377,7 @@
 	basr	%r14,%r8		# call sys_xxx
 	st	%r2,SP_R2(%r15)		# store return value
 sysc_tracenogo:
-	tm	__TI_flags+2(%r9),_TIF_SYSCALL
+	tm	__TI_flags+2(%r12),_TIF_SYSCALL
 	bz	BASED(sysc_return)
 	l	%r1,BASED(.Ltrace_exit)
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
@@ -397,7 +390,7 @@
 	.globl	ret_from_fork
 ret_from_fork:
 	l	%r13,__LC_SVC_NEW_PSW+4
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	SP_PSW+1(%r15),0x01	# forking a kernel thread ?
 	bo	BASED(0f)
 	st	%r15,SP_R15(%r15)	# store stack pointer for new kthread
@@ -432,8 +425,8 @@
 0:	stnsm	__SF_EMPTY(%r15),0xfc	# disable interrupts
 	l	%r15,__LC_KERNEL_STACK	# load ksp
 	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
-	l	%r9,__LC_THREAD_INFO
 	mvc	SP_PTREGS(__PT_SIZE,%r15),0(%r12)	# copy pt_regs
+	l	%r12,__LC_THREAD_INFO
 	xc	__SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	l	%r1,BASED(.Lexecve_tail)
@@ -463,26 +456,27 @@
 	SAVE_ALL_BASE __LC_SAVE_AREA
 	tm	__LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
 	bnz	BASED(pgm_per)		# got per exception -> special case
-	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+	CREATE_STACK_FRAME __LC_SAVE_AREA
+	xc	SP_ILC(4,%r15),SP_ILC(%r15)
+	mvc	SP_PSW(8,%r15),__LC_PGM_OLD_PSW
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 	bz	BASED(pgm_no_vtime)
 	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 pgm_no_vtime:
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	l	%r3,__LC_PGM_ILC	# load program interruption code
 	l	%r4,__LC_TRANS_EXC_CODE
 	REENABLE_IRQS
 	la	%r8,0x7f
 	nr	%r8,%r3
-pgm_do_call:
-	l	%r7,BASED(.Ljump_table)
 	sll	%r8,2
-	l	%r7,0(%r8,%r7)		# load address of handler routine
+	l	%r1,BASED(.Ljump_table)
+	l	%r1,0(%r8,%r1)		# load address of handler routine
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	basr	%r14,%r7		# branch to interrupt-handler
+	basr	%r14,%r1		# branch to interrupt-handler
 pgm_exit:
 	b	BASED(sysc_return)
 
@@ -503,33 +497,34 @@
 # Normal per exception
 #
 pgm_per_std:
-	SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+	SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
+	CREATE_STACK_FRAME __LC_SAVE_AREA
+	mvc	SP_PSW(8,%r15),__LC_PGM_OLD_PSW
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 	bz	BASED(pgm_no_vtime2)
 	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 pgm_no_vtime2:
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
-	l	%r1,__TI_task(%r9)
+	l	%r1,__TI_task(%r12)
 	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
 	bz	BASED(kernel_per)
-	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
-	mvc	__THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
-	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
-	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+	mvc	__THREAD_per_address(4,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per_paid(1,%r1),__LC_PER_PAID
+	oi	__TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
 	l	%r3,__LC_PGM_ILC	# load program interruption code
 	l	%r4,__LC_TRANS_EXC_CODE
 	REENABLE_IRQS
 	la	%r8,0x7f
 	nr	%r8,%r3 		# clear per-event-bit and ilc
 	be	BASED(pgm_exit2)	# only per or per+check ?
-	l	%r7,BASED(.Ljump_table)
 	sll	%r8,2
-	l	%r7,0(%r8,%r7)		# load address of handler routine
+	l	%r1,BASED(.Ljump_table)
+	l	%r1,0(%r8,%r1)		# load address of handler routine
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	basr	%r14,%r7		# branch to interrupt-handler
+	basr	%r14,%r1		# branch to interrupt-handler
 pgm_exit2:
 	b	BASED(sysc_return)
 
@@ -537,18 +532,19 @@
 # it was a single stepped SVC that is causing all the trouble
 #
 pgm_svcper:
-	SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
+	SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
+	CREATE_STACK_FRAME __LC_SAVE_AREA
+	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
+	mvc	SP_ILC(4,%r15),__LC_SVC_ILC
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 	UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
 	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-	lh	%r7,0x8a		# get svc number from lowcore
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
-	l	%r8,__TI_task(%r9)
-	mvc	__THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
-	mvc	__THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS
-	mvc	__THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
-	oi	__TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	l	%r8,__TI_task(%r12)
+	mvc	__THREAD_per_cause(2,%r8),__LC_PER_CAUSE
+	mvc	__THREAD_per_address(4,%r8),__LC_PER_ADDRESS
+	mvc	__THREAD_per_paid(1,%r8),__LC_PER_PAID
+	oi	__TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	lm	%r2,%r6,SP_R2(%r15)	# load svc arguments
 	b	BASED(sysc_do_svc)
@@ -558,8 +554,7 @@
 #
 kernel_per:
 	REENABLE_IRQS
-	mvi	SP_SVCNR(%r15),0xff	# set trap indication to pgm check
-	mvi	SP_SVCNR+1(%r15),0xff
+	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 	l	%r1,BASED(.Lhandle_per)	# load adr. of per handler
 	basr	%r14,%r1		# branch to do_single_step
@@ -573,9 +568,10 @@
 io_int_handler:
 	stck	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
-	SAVE_ALL_BASE __LC_SAVE_AREA+16
 	SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
-	CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
+	CREATE_STACK_FRAME __LC_SAVE_AREA+16
+	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 	bz	BASED(io_no_vtime)
 	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -583,7 +579,6 @@
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
 io_no_vtime:
 	TRACE_IRQS_OFF
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	l	%r1,BASED(.Ldo_IRQ)	# load address of do_IRQ
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 	basr	%r14,%r1		# branch to standard irq handler
@@ -591,7 +586,7 @@
 	LOCKDEP_SYS_EXIT
 	TRACE_IRQS_ON
 io_tif:
-	tm	__TI_flags+3(%r9),_TIF_WORK_INT
+	tm	__TI_flags+3(%r12),_TIF_WORK_INT
 	bnz	BASED(io_work)		# there is work to do (signals etc.)
 io_restore:
 	RESTORE_ALL __LC_RETURN_PSW,0
@@ -609,9 +604,9 @@
 	bo	BASED(io_work_user)	# yes -> do resched & signal
 #ifdef CONFIG_PREEMPT
 	# check for preemptive scheduling
-	icm	%r0,15,__TI_precount(%r9)
+	icm	%r0,15,__TI_precount(%r12)
 	bnz	BASED(io_restore)	# preemption disabled
-	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
 	bno	BASED(io_restore)
 	# switch to kernel stack
 	l	%r1,SP_R15(%r15)
@@ -645,13 +640,13 @@
 #		and _TIF_MCCK_PENDING
 #
 io_work_tif:
-	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
+	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
 	bo	BASED(io_mcck_pending)
-	tm	__TI_flags+3(%r9),_TIF_NEED_RESCHED
+	tm	__TI_flags+3(%r12),_TIF_NEED_RESCHED
 	bo	BASED(io_reschedule)
-	tm	__TI_flags+3(%r9),_TIF_SIGPENDING
+	tm	__TI_flags+3(%r12),_TIF_SIGPENDING
 	bo	BASED(io_sigpending)
-	tm	__TI_flags+3(%r9),_TIF_NOTIFY_RESUME
+	tm	__TI_flags+3(%r12),_TIF_NOTIFY_RESUME
 	bo	BASED(io_notify_resume)
 	b	BASED(io_return)	# beware of critical section cleanup
 
@@ -711,16 +706,16 @@
 ext_int_handler:
 	stck	__LC_INT_CLOCK
 	stpt	__LC_ASYNC_ENTER_TIMER
-	SAVE_ALL_BASE __LC_SAVE_AREA+16
 	SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
-	CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
+	CREATE_STACK_FRAME __LC_SAVE_AREA+16
+	mvc	SP_PSW(8,%r15),0(%r12)	# move user PSW to stack
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
 	bz	BASED(ext_no_vtime)
 	UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
 	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
 ext_no_vtime:
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	TRACE_IRQS_OFF
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 	l	%r3,__LC_CPU_ADDRESS	# get cpu address + interruption code
@@ -775,7 +770,10 @@
 	sra	%r14,PAGE_SHIFT
 	be	BASED(0f)
 	l	%r15,__LC_PANIC_STACK	# load panic stack
-0:	CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
+0:	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
+	CREATE_STACK_FRAME __LC_SAVE_AREA+32
+	mvc	SP_PSW(8,%r15),0(%r12)
+	l	%r12,__LC_THREAD_INFO	# load pointer to thread_info struct
 	tm	__LC_MCCK_CODE+2,0x08	# mwp of old psw valid?
 	bno	BASED(mcck_no_vtime)	# no -> skip cleanup critical
 	tm	SP_PSW+1(%r15),0x01	# interrupting from user ?
@@ -784,7 +782,6 @@
 	UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
 mcck_no_vtime:
-	l	%r9,__LC_THREAD_INFO	# load pointer to thread_info struct
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	l	%r1,BASED(.Ls390_mcck)
 	basr	%r14,%r1		# call machine check handler
@@ -796,7 +793,7 @@
 	xc	__SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
 	lr	%r15,%r1
 	stosm	__SF_EMPTY(%r15),0x04	# turn dat on
-	tm	__TI_flags+3(%r9),_TIF_MCCK_PENDING
+	tm	__TI_flags+3(%r12),_TIF_MCCK_PENDING
 	bno	BASED(mcck_return)
 	TRACE_IRQS_OFF
 	l	%r1,BASED(.Ls390_handle_mcck)
@@ -861,6 +858,8 @@
 restart_go:
 #endif
 
+	.section .kprobes.text, "ax"
+
 #ifdef CONFIG_CHECK_STACK
 /*
  * The synchronous or the asynchronous stack overflowed. We are dead.
@@ -943,12 +942,13 @@
 	bh	BASED(0f)
 	mvc	__LC_SAVE_AREA(16),0(%r12)
 0:	st	%r13,4(%r12)
-	st	%r12,__LC_SAVE_AREA+48	# argh
-	SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
-	l	%r12,__LC_SAVE_AREA+48	# argh
+	l	%r15,__LC_KERNEL_STACK	# problem state -> load ksp
+	s	%r15,BASED(.Lc_spsize)	# make room for registers & psw
 	st	%r15,12(%r12)
-	lh	%r7,0x8a
+	CREATE_STACK_FRAME __LC_SAVE_AREA
+	mvc	SP_PSW(8,%r15),__LC_SVC_OLD_PSW
+	mvc	SP_ILC(4,%r15),__LC_SVC_ILC
+	mvc	0(4,%r12),__LC_THREAD_INFO
 cleanup_vtime:
 	clc	__LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
 	bhe	BASED(cleanup_stime)
@@ -1046,7 +1046,7 @@
 .Ldo_signal:	.long	do_signal
 .Ldo_notify_resume:
 		.long	do_notify_resume
-.Lhandle_per:	.long	do_single_step
+.Lhandle_per:	.long	do_per_trap
 .Ldo_execve:	.long	do_execve
 .Lexecve_tail:	.long	execve_tail
 .Ljump_table:	.long	pgm_check_table
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 95c1dfc..17a6f83 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -12,7 +12,7 @@
 
 extern int sysctl_userprocess_debug;
 
-void do_single_step(struct pt_regs *regs);
+void do_per_trap(struct pt_regs *regs);
 void syscall_trace(struct pt_regs *regs, int entryexit);
 void kernel_stack_overflow(struct pt_regs * regs);
 void do_signal(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 8f3e802..9d3603d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -51,7 +51,7 @@
 STACK_SIZE  = 1 << STACK_SHIFT
 
 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
-		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
+		 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
 		 _TIF_MCCK_PENDING)
 _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
@@ -197,6 +197,8 @@
 	ssm	__SF_EMPTY(%r15)
 	.endm
 
+	.section .kprobes.text, "ax"
+
 /*
  * Scheduler resume function, called by switch_to
  *  gpr2 = (task_struct *) prev
@@ -206,30 +208,21 @@
  */
 	.globl	__switch_to
 __switch_to:
-	tm	__THREAD_per+4(%r3),0xe8 # is the new process using per ?
-	jz	__switch_to_noper		# if not we're fine
-	stctg	%c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
-	clc	__THREAD_per(24,%r3),__SF_EMPTY(%r15)
-	je	__switch_to_noper	     # we got away without bashing TLB's
-	lctlg	%c9,%c11,__THREAD_per(%r3)	# Nope we didn't
-__switch_to_noper:
-	lg	%r4,__THREAD_info(%r2)		    # get thread_info of prev
+	lg	%r4,__THREAD_info(%r2)		# get thread_info of prev
+	lg	%r5,__THREAD_info(%r3)		# get thread_info of next
 	tm	__TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
-	jz	__switch_to_no_mcck
-	ni	__TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
-	lg	%r4,__THREAD_info(%r3)		    # get thread_info of next
-	oi	__TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next
-__switch_to_no_mcck:
-	stmg	%r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
-	stg	%r15,__THREAD_ksp(%r2)	# store kernel stack to prev->tss.ksp
-	lg	%r15,__THREAD_ksp(%r3)	# load kernel stack from next->tss.ksp
-	lmg	%r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
-	stg	%r3,__LC_CURRENT	# __LC_CURRENT = current task struct
-	lctl	%c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
-	lg	%r3,__THREAD_info(%r3)	# load thread_info from task struct
-	stg	%r3,__LC_THREAD_INFO
-	aghi	%r3,STACK_SIZE
-	stg	%r3,__LC_KERNEL_STACK	# __LC_KERNEL_STACK = new kernel stack
+	jz	0f
+	ni	__TI_flags+7(%r4),255-_TIF_MCCK_PENDING	# clear flag in prev
+	oi	__TI_flags+7(%r5),_TIF_MCCK_PENDING	# set it in next
+0:	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
+	stg	%r15,__THREAD_ksp(%r2)		# store kernel stack of prev
+	lg	%r15,__THREAD_ksp(%r3)		# load kernel stack of next
+	lctl	%c4,%c4,__TASK_pid(%r3)		# load pid to control reg. 4
+	lmg	%r6,%r15,__SF_GPRS(%r15)	# load gprs of next task
+	stg	%r3,__LC_CURRENT		# store task struct of next
+	stg	%r5,__LC_THREAD_INFO		# store thread info of next
+	aghi	%r5,STACK_SIZE			# end of kernel stack of next
+	stg	%r5,__LC_KERNEL_STACK		# store end of kernel stack
 	br	%r14
 
 __critical_start:
@@ -309,7 +302,7 @@
 	jo	sysc_notify_resume
 	tm	__TI_flags+7(%r12),_TIF_RESTART_SVC
 	jo	sysc_restart
-	tm	__TI_flags+7(%r12),_TIF_SINGLE_STEP
+	tm	__TI_flags+7(%r12),_TIF_PER_TRAP
 	jo	sysc_singlestep
 	j	sysc_return		# beware of critical section cleanup
 
@@ -331,12 +324,12 @@
 # _TIF_SIGPENDING is set, call do_signal
 #
 sysc_sigpending:
-	ni	__TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+	ni	__TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
 	la	%r2,SP_PTREGS(%r15)	# load pt_regs
 	brasl	%r14,do_signal		# call do_signal
 	tm	__TI_flags+7(%r12),_TIF_RESTART_SVC
 	jo	sysc_restart
-	tm	__TI_flags+7(%r12),_TIF_SINGLE_STEP
+	tm	__TI_flags+7(%r12),_TIF_PER_TRAP
 	jo	sysc_singlestep
 	j	sysc_return
 
@@ -361,14 +354,14 @@
 	j	sysc_nr_ok		# restart svc
 
 #
-# _TIF_SINGLE_STEP is set, call do_single_step
+# _TIF_PER_TRAP is set, call do_per_trap
 #
 sysc_singlestep:
-	ni	__TI_flags+7(%r12),255-_TIF_SINGLE_STEP	# clear TIF_SINGLE_STEP
+	ni	__TI_flags+7(%r12),255-_TIF_PER_TRAP	# clear TIF_PER_TRAP
 	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)		# clear svc number
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
 	larl	%r14,sysc_return	# load adr. of system return
-	jg	do_single_step		# branch to do_sigtrap
+	jg	do_per_trap
 
 #
 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
@@ -524,10 +517,10 @@
 	lg	%r1,__TI_task(%r12)
 	tm	SP_PSW+1(%r15),0x01	# kernel per event ?
 	jz	kernel_per
-	mvc	__THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
-	mvc	__THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
-	mvc	__THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
-	oi	__TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	mvc	__THREAD_per_cause(2,%r1),__LC_PER_CAUSE
+	mvc	__THREAD_per_address(8,%r1),__LC_PER_ADDRESS
+	mvc	__THREAD_per_paid(1,%r1),__LC_PER_PAID
+	oi	__TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
 	lgf	%r3,__LC_PGM_ILC	# load program interruption code
 	lg	%r4,__LC_TRANS_EXC_CODE
 	REENABLE_IRQS
@@ -556,10 +549,10 @@
 	mvc	__LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
 	LAST_BREAK
 	lg	%r8,__TI_task(%r12)
-	mvc	__THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
-	mvc	__THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
-	mvc	__THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
-	oi	__TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+	mvc	__THREAD_per_cause(2,%r8),__LC_PER_CAUSE
+	mvc	__THREAD_per_address(8,%r8),__LC_PER_ADDRESS
+	mvc	__THREAD_per_paid(1,%r8),__LC_PER_PAID
+	oi	__TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
 	stosm	__SF_EMPTY(%r15),0x03	# reenable interrupts
 	lmg	%r2,%r6,SP_R2(%r15)	# load svc arguments
 	j	sysc_do_svc
@@ -571,7 +564,7 @@
 	REENABLE_IRQS
 	xc	SP_SVCNR(2,%r15),SP_SVCNR(%r15)	# clear svc number
 	la	%r2,SP_PTREGS(%r15)	# address of register-save area
-	brasl	%r14,do_single_step
+	brasl	%r14,do_per_trap
 	j	pgm_exit
 
 /*
@@ -868,6 +861,8 @@
 restart_go:
 #endif
 
+	.section .kprobes.text, "ax"
+
 #ifdef CONFIG_CHECK_STACK
 /*
  * The synchronous or the asynchronous stack overflowed. We are dead.
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 6a83d05..78bdf0e 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -4,7 +4,7 @@
  * Copyright IBM Corp. 2009
  *
  *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- *
+ *		Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
 #include <linux/hardirq.h>
@@ -12,176 +12,144 @@
 #include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/kprobes.h>
 #include <trace/syscall.h>
 #include <asm/asm-offsets.h>
 
+#ifdef CONFIG_64BIT
+#define MCOUNT_OFFSET_RET 12
+#else
+#define MCOUNT_OFFSET_RET 22
+#endif
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 void ftrace_disable_code(void);
-void ftrace_disable_return(void);
-void ftrace_call_code(void);
-void ftrace_nop_code(void);
-
-#define FTRACE_INSN_SIZE 4
+void ftrace_enable_insn(void);
 
 #ifdef CONFIG_64BIT
-
+/*
+ * The 64-bit mcount code looks like this:
+ *	stg	%r14,8(%r15)		# offset 0
+ * >	larl	%r1,<&counter>		# offset 6
+ * >	brasl	%r14,_mcount		# offset 12
+ *	lg	%r14,8(%r15)		# offset 18
+ * Total length is 24 bytes. The middle two instructions of the mcount
+ * block get overwritten by ftrace_make_nop / ftrace_make_call.
+ * The 64-bit enabled ftrace code block looks like this:
+ *	stg	%r14,8(%r15)		# offset 0
+ * >	lg	%r1,__LC_FTRACE_FUNC	# offset 6
+ * >	lgr	%r0,%r0			# offset 12
+ * >	basr	%r14,%r1		# offset 16
+ *	lg	%r14,8(%15)		# offset 18
+ * The return points of the mcount/ftrace function have the same offset 18.
+ * The 64-bit disable ftrace code block looks like this:
+ *	stg	%r14,8(%r15)		# offset 0
+ * >	jg	.+18			# offset 6
+ * >	lgr	%r0,%r0			# offset 12
+ * >	basr	%r14,%r1		# offset 16
+ *	lg	%r14,8(%15)		# offset 18
+ * The jg instruction branches to offset 24 to skip as many instructions
+ * as possible.
+ */
 asm(
 	"	.align	4\n"
 	"ftrace_disable_code:\n"
-	"	j	0f\n"
-	"	.word	0x0024\n"
-	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
-	"	basr	%r14,%r1\n"
-	"ftrace_disable_return:\n"
-	"	lg	%r14,8(15)\n"
+	"	jg	0f\n"
 	"	lgr	%r0,%r0\n"
-	"0:\n");
-
-asm(
+	"	basr	%r14,%r1\n"
+	"0:\n"
 	"	.align	4\n"
-	"ftrace_nop_code:\n"
-	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
+	"ftrace_enable_insn:\n"
+	"	lg	%r1,"__stringify(__LC_FTRACE_FUNC)"\n");
 
-asm(
-	"	.align	4\n"
-	"ftrace_call_code:\n"
-	"	stg	%r14,8(%r15)\n");
+#define FTRACE_INSN_SIZE	6
 
 #else /* CONFIG_64BIT */
-
+/*
+ * The 31-bit mcount code looks like this:
+ *	st	%r14,4(%r15)		# offset 0
+ * >	bras	%r1,0f			# offset 4
+ * >	.long	_mcount			# offset 8
+ * >	.long	<&counter>		# offset 12
+ * > 0:	l	%r14,0(%r1)		# offset 16
+ * >	l	%r1,4(%r1)		# offset 20
+ *	basr	%r14,%r14		# offset 24
+ *	l	%r14,4(%r15)		# offset 26
+ * Total length is 30 bytes. The twenty bytes starting from offset 4
+ * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
+ * The 31-bit enabled ftrace code block looks like this:
+ *	st	%r14,4(%r15)		# offset 0
+ * >	l	%r14,__LC_FTRACE_FUNC	# offset 4
+ * >	j	0f			# offset 8
+ * >	.fill	12,1,0x07		# offset 12
+ *   0:	basr	%r14,%r14		# offset 24
+ *	l	%r14,4(%r14)		# offset 26
+ * The return points of the mcount/ftrace function have the same offset 26.
+ * The 31-bit disabled ftrace code block looks like this:
+ *	st	%r14,4(%r15)		# offset 0
+ * >	j	.+26			# offset 4
+ * >	j	0f			# offset 8
+ * >	.fill	12,1,0x07		# offset 12
+ *   0:	basr	%r14,%r14		# offset 24
+ *	l	%r14,4(%r14)		# offset 26
+ * The j instruction branches to offset 30 to skip as many instructions
+ * as possible.
+ */
 asm(
 	"	.align	4\n"
 	"ftrace_disable_code:\n"
+	"	j	1f\n"
 	"	j	0f\n"
-	"	l	%r1,"__stringify(__LC_FTRACE_FUNC)"\n"
-	"	basr	%r14,%r1\n"
-	"ftrace_disable_return:\n"
-	"	l	%r14,4(%r15)\n"
-	"	j	0f\n"
-	"	bcr	0,%r7\n"
-	"	bcr	0,%r7\n"
-	"	bcr	0,%r7\n"
-	"	bcr	0,%r7\n"
-	"	bcr	0,%r7\n"
-	"	bcr	0,%r7\n"
-	"0:\n");
-
-asm(
+	"	.fill	12,1,0x07\n"
+	"0:	basr	%r14,%r14\n"
+	"1:\n"
 	"	.align	4\n"
-	"ftrace_nop_code:\n"
-	"	j	.+"__stringify(MCOUNT_INSN_SIZE)"\n");
+	"ftrace_enable_insn:\n"
+	"	l	%r14,"__stringify(__LC_FTRACE_FUNC)"\n");
 
-asm(
-	"	.align	4\n"
-	"ftrace_call_code:\n"
-	"	st	%r14,4(%r15)\n");
+#define FTRACE_INSN_SIZE	4
 
 #endif /* CONFIG_64BIT */
 
-static int ftrace_modify_code(unsigned long ip,
-			      void *old_code, int old_size,
-			      void *new_code, int new_size)
-{
-	unsigned char replaced[MCOUNT_INSN_SIZE];
-
-	/*
-	 * Note: Due to modules code can disappear and change.
-	 *  We need to protect against faulting as well as code
-	 *  changing. We do this by using the probe_kernel_*
-	 *  functions.
-	 *  This however is just a simple sanity check.
-	 */
-	if (probe_kernel_read(replaced, (void *)ip, old_size))
-		return -EFAULT;
-	if (memcmp(replaced, old_code, old_size) != 0)
-		return -EINVAL;
-	if (probe_kernel_write((void *)ip, new_code, new_size))
-		return -EPERM;
-	return 0;
-}
-
-static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
-				   unsigned long addr)
-{
-	return ftrace_modify_code(rec->ip,
-				  ftrace_call_code, FTRACE_INSN_SIZE,
-				  ftrace_disable_code, MCOUNT_INSN_SIZE);
-}
 
 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 		    unsigned long addr)
 {
-	if (addr == MCOUNT_ADDR)
-		return ftrace_make_initial_nop(mod, rec, addr);
-	return ftrace_modify_code(rec->ip,
-				  ftrace_call_code, FTRACE_INSN_SIZE,
-				  ftrace_nop_code, FTRACE_INSN_SIZE);
+	if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
+			       MCOUNT_INSN_SIZE))
+		return -EPERM;
+	return 0;
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
-	return ftrace_modify_code(rec->ip,
-				  ftrace_nop_code, FTRACE_INSN_SIZE,
-				  ftrace_call_code, FTRACE_INSN_SIZE);
+	if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
+			       FTRACE_INSN_SIZE))
+		return -EPERM;
+	return 0;
 }
 
 int ftrace_update_ftrace_func(ftrace_func_t func)
 {
-	ftrace_dyn_func = (unsigned long)func;
 	return 0;
 }
 
 int __init ftrace_dyn_arch_init(void *data)
 {
-	*(unsigned long *)data = 0;
+	*(unsigned long *) data = 0;
 	return 0;
 }
 
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-/*
- * Patch the kernel code at ftrace_graph_caller location:
- * The instruction there is branch relative on condition. The condition mask
- * is either all ones (always branch aka disable ftrace_graph_caller) or all
- * zeroes (nop aka enable ftrace_graph_caller).
- * Instruction format for brc is a7m4xxxx where m is the condition mask.
- */
-int ftrace_enable_ftrace_graph_caller(void)
-{
-	unsigned short opcode = 0xa704;
-
-	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
-	unsigned short opcode = 0xa7f4;
-
-	return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
-}
-
-static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
-{
-	return addr - (ftrace_disable_return - ftrace_disable_code);
-}
-
-#else /* CONFIG_DYNAMIC_FTRACE */
-
-static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
-{
-	return addr - MCOUNT_OFFSET_RET;
-}
-
-#endif /* CONFIG_DYNAMIC_FTRACE */
-
 /*
  * Hook the return address and push it in the stack of return addresses
  * in current thread info.
  */
-unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
+unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
+					      unsigned long ip)
 {
 	struct ftrace_graph_ent trace;
 
@@ -189,14 +157,42 @@
 		goto out;
 	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
 		goto out;
-	trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
+	trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET;
 	/* Only trace if the calling function expects to. */
 	if (!ftrace_graph_entry(&trace)) {
 		current->curr_ret_stack--;
 		goto out;
 	}
-	parent = (unsigned long)return_to_handler;
+	parent = (unsigned long) return_to_handler;
 out:
 	return parent;
 }
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * Patch the kernel code at ftrace_graph_caller location. The instruction
+ * there is branch relative and save to prepare_ftrace_return. To disable
+ * the call to prepare_ftrace_return we patch the bras offset to point
+ * directly after the instructions. To enable the call we calculate
+ * the original offset to prepare_ftrace_return and put it back.
+ */
+int ftrace_enable_ftrace_graph_caller(void)
+{
+	unsigned short offset;
+
+	offset = ((void *) prepare_ftrace_return -
+		  (void *) ftrace_graph_caller) / 2;
+	return probe_kernel_write(ftrace_graph_caller + 2,
+				  &offset, sizeof(offset));
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+	static unsigned short offset = 0x0002;
+
+	return probe_kernel_write(ftrace_graph_caller + 2,
+				  &offset, sizeof(offset));
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 026a37a..ea5099c 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,7 +1,5 @@
 /*
- *  arch/s390/kernel/irq.c
- *
- *    Copyright IBM Corp. 2004,2007
+ *    Copyright IBM Corp. 2004,2010
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  *		 Thomas Spatzier (tspat@de.ibm.com)
  *
@@ -17,12 +15,42 @@
 #include <linux/proc_fs.h>
 #include <linux/profile.h>
 
+struct irq_class {
+	char *name;
+	char *desc;
+};
+
+static const struct irq_class intrclass_names[] = {
+	{.name = "EXT" },
+	{.name = "I/O" },
+	{.name = "CLK", .desc = "[EXT] Clock Comparator" },
+	{.name = "IPI", .desc = "[EXT] Signal Processor" },
+	{.name = "TMR", .desc = "[EXT] CPU Timer" },
+	{.name = "TAL", .desc = "[EXT] Timing Alert" },
+	{.name = "PFL", .desc = "[EXT] Pseudo Page Fault" },
+	{.name = "DSD", .desc = "[EXT] DASD Diag" },
+	{.name = "VRT", .desc = "[EXT] Virtio" },
+	{.name = "SCP", .desc = "[EXT] Service Call" },
+	{.name = "IUC", .desc = "[EXT] IUCV" },
+	{.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
+	{.name = "QDI", .desc = "[I/O] QDIO Interrupt" },
+	{.name = "DAS", .desc = "[I/O] DASD" },
+	{.name = "C15", .desc = "[I/O] 3215" },
+	{.name = "C70", .desc = "[I/O] 3270" },
+	{.name = "TAP", .desc = "[I/O] Tape" },
+	{.name = "VMR", .desc = "[I/O] Unit Record Devices" },
+	{.name = "LCS", .desc = "[I/O] LCS" },
+	{.name = "CLW", .desc = "[I/O] CLAW" },
+	{.name = "CTC", .desc = "[I/O] CTC" },
+	{.name = "APB", .desc = "[I/O] AP Bus" },
+	{.name = "NMI", .desc = "[NMI] Machine Check" },
+};
+
 /*
  * show_interrupts is needed by /proc/interrupts.
  */
 int show_interrupts(struct seq_file *p, void *v)
 {
-	static const char *intrclass_names[] = { "EXT", "I/O", };
 	int i = *(loff_t *) v, j;
 
 	get_online_cpus();
@@ -34,15 +62,16 @@
 	}
 
 	if (i < NR_IRQS) {
-		seq_printf(p, "%s: ", intrclass_names[i]);
+		seq_printf(p, "%s: ", intrclass_names[i].name);
 #ifndef CONFIG_SMP
 		seq_printf(p, "%10u ", kstat_irqs(i));
 #else
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
+		if (intrclass_names[i].desc)
+			seq_printf(p, "  %s", intrclass_names[i].desc);
                 seq_putc(p, '\n');
-
         }
 	put_online_cpus();
         return 0;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 2564793..1d05d66 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -32,34 +32,14 @@
 #include <linux/slab.h>
 #include <linux/hardirq.h>
 
-DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
-struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
+struct kretprobe_blackpoint kretprobe_blacklist[] = { };
 
-int __kprobes arch_prepare_kprobe(struct kprobe *p)
+static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
 {
-	/* Make sure the probe isn't going on a difficult instruction */
-	if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
-		return -EINVAL;
-
-	if ((unsigned long)p->addr & 0x01)
-		return -EINVAL;
-
-	/* Use the get_insn_slot() facility for correctness */
-	if (!(p->ainsn.insn = get_insn_slot()))
-		return -ENOMEM;
-
-	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-
-	get_instruction_type(&p->ainsn);
-	p->opcode = *p->addr;
-	return 0;
-}
-
-int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
-{
-	switch (*(__u8 *) instruction) {
+	switch (insn[0] >> 8) {
 	case 0x0c:	/* bassm */
 	case 0x0b:	/* bsm	 */
 	case 0x83:	/* diag  */
@@ -68,7 +48,7 @@
 	case 0xad:	/* stosm */
 		return -EINVAL;
 	}
-	switch (*(__u16 *) instruction) {
+	switch (insn[0]) {
 	case 0x0101:	/* pr	 */
 	case 0xb25a:	/* bsa	 */
 	case 0xb240:	/* bakr  */
@@ -81,93 +61,92 @@
 	return 0;
 }
 
-void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
+static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
 {
 	/* default fixup method */
-	ainsn->fixup = FIXUP_PSW_NORMAL;
+	int fixup = FIXUP_PSW_NORMAL;
 
-	/* save r1 operand */
-	ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
-
-	/* save the instruction length (pop 5-5) in bytes */
-	switch (*(__u8 *) (ainsn->insn) >> 6) {
-	case 0:
-		ainsn->ilen = 2;
-		break;
-	case 1:
-	case 2:
-		ainsn->ilen = 4;
-		break;
-	case 3:
-		ainsn->ilen = 6;
-		break;
-	}
-
-	switch (*(__u8 *) ainsn->insn) {
+	switch (insn[0] >> 8) {
 	case 0x05:	/* balr	*/
 	case 0x0d:	/* basr */
-		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		fixup = FIXUP_RETURN_REGISTER;
 		/* if r2 = 0, no branch will be taken */
-		if ((*ainsn->insn & 0x0f) == 0)
-			ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
+		if ((insn[0] & 0x0f) == 0)
+			fixup |= FIXUP_BRANCH_NOT_TAKEN;
 		break;
 	case 0x06:	/* bctr	*/
 	case 0x07:	/* bcr	*/
-		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		fixup = FIXUP_BRANCH_NOT_TAKEN;
 		break;
 	case 0x45:	/* bal	*/
 	case 0x4d:	/* bas	*/
-		ainsn->fixup = FIXUP_RETURN_REGISTER;
+		fixup = FIXUP_RETURN_REGISTER;
 		break;
 	case 0x47:	/* bc	*/
 	case 0x46:	/* bct	*/
 	case 0x86:	/* bxh	*/
 	case 0x87:	/* bxle	*/
-		ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+		fixup = FIXUP_BRANCH_NOT_TAKEN;
 		break;
 	case 0x82:	/* lpsw	*/
-		ainsn->fixup = FIXUP_NOT_REQUIRED;
+		fixup = FIXUP_NOT_REQUIRED;
 		break;
 	case 0xb2:	/* lpswe */
-		if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
-			ainsn->fixup = FIXUP_NOT_REQUIRED;
-		}
+		if ((insn[0] & 0xff) == 0xb2)
+			fixup = FIXUP_NOT_REQUIRED;
 		break;
 	case 0xa7:	/* bras	*/
-		if ((*ainsn->insn & 0x0f) == 0x05) {
-			ainsn->fixup |= FIXUP_RETURN_REGISTER;
-		}
+		if ((insn[0] & 0x0f) == 0x05)
+			fixup |= FIXUP_RETURN_REGISTER;
 		break;
 	case 0xc0:
-		if ((*ainsn->insn & 0x0f) == 0x00  /* larl  */
-			|| (*ainsn->insn & 0x0f) == 0x05) /* brasl */
-		ainsn->fixup |= FIXUP_RETURN_REGISTER;
+		if ((insn[0] & 0x0f) == 0x00 ||	/* larl  */
+		    (insn[0] & 0x0f) == 0x05)	/* brasl */
+		fixup |= FIXUP_RETURN_REGISTER;
 		break;
 	case 0xeb:
-		if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 ||	/* bxhg  */
-			*(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
-			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
-		}
+		if ((insn[2] & 0xff) == 0x44 ||	/* bxhg  */
+		    (insn[2] & 0xff) == 0x45)	/* bxleg */
+			fixup = FIXUP_BRANCH_NOT_TAKEN;
 		break;
 	case 0xe3:	/* bctg	*/
-		if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
-			ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
-		}
+		if ((insn[2] & 0xff) == 0x46)
+			fixup = FIXUP_BRANCH_NOT_TAKEN;
 		break;
 	}
+	return fixup;
 }
 
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+	if ((unsigned long) p->addr & 0x01)
+		return -EINVAL;
+
+	/* Make sure the probe isn't going on a difficult instruction */
+	if (is_prohibited_opcode(p->addr))
+		return -EINVAL;
+
+	p->opcode = *p->addr;
+	memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+
+	return 0;
+}
+
+struct ins_replace_args {
+	kprobe_opcode_t *ptr;
+	kprobe_opcode_t opcode;
+};
+
 static int __kprobes swap_instruction(void *aref)
 {
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 	unsigned long status = kcb->kprobe_status;
 	struct ins_replace_args *args = aref;
-	int rc;
 
 	kcb->kprobe_status = KPROBE_SWAP_INST;
-	rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
+	probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode));
 	kcb->kprobe_status = status;
-	return rc;
+	return 0;
 }
 
 void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -175,8 +154,7 @@
 	struct ins_replace_args args;
 
 	args.ptr = p->addr;
-	args.old = p->opcode;
-	args.new = BREAKPOINT_INSTRUCTION;
+	args.opcode = BREAKPOINT_INSTRUCTION;
 	stop_machine(swap_instruction, &args, NULL);
 }
 
@@ -185,64 +163,69 @@
 	struct ins_replace_args args;
 
 	args.ptr = p->addr;
-	args.old = BREAKPOINT_INSTRUCTION;
-	args.new = p->opcode;
+	args.opcode = p->opcode;
 	stop_machine(swap_instruction, &args, NULL);
 }
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
-	if (p->ainsn.insn) {
-		free_insn_slot(p->ainsn.insn, 0);
-		p->ainsn.insn = NULL;
-	}
 }
 
-static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
+					struct pt_regs *regs,
+					unsigned long ip)
 {
-	per_cr_bits kprobe_per_regs[1];
+	struct per_regs per_kprobe;
 
-	memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
-	regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
+	/* Set up the PER control registers %cr9-%cr11 */
+	per_kprobe.control = PER_EVENT_IFETCH;
+	per_kprobe.start = ip;
+	per_kprobe.end = ip;
 
-	/* Set up the per control reg info, will pass to lctl */
-	kprobe_per_regs[0].em_instruction_fetch = 1;
-	kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
-	kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
+	/* Save control regs and psw mask */
+	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+	kcb->kprobe_saved_imask = regs->psw.mask &
+		(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
 
-	/* Set the PER control regs, turns on single step for this address */
-	__ctl_load(kprobe_per_regs, 9, 11);
+	/* Set PER control regs, turns on single step for the given address */
+	__ctl_load(per_kprobe, 9, 11);
 	regs->psw.mask |= PSW_MASK_PER;
 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
+	regs->psw.addr = ip | PSW_ADDR_AMODE;
 }
 
-static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
+					 struct pt_regs *regs,
+					 unsigned long ip)
 {
-	kcb->prev_kprobe.kp = kprobe_running();
-	kcb->prev_kprobe.status = kcb->kprobe_status;
-	kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
-	memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
-					sizeof(kcb->kprobe_saved_ctl));
+	/* Restore control regs and psw mask, set new psw address */
+	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+	regs->psw.mask &= ~PSW_MASK_PER;
+	regs->psw.mask |= kcb->kprobe_saved_imask;
+	regs->psw.addr = ip | PSW_ADDR_AMODE;
 }
 
-static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+/*
+ * Activate a kprobe by storing its pointer to current_kprobe. The
+ * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
+ * two kprobes can be active, see KPROBE_REENTER.
+ */
+static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
+{
+	kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
+	kcb->prev_kprobe.status = kcb->kprobe_status;
+	__get_cpu_var(current_kprobe) = p;
+}
+
+/*
+ * Deactivate a kprobe by backing up to the previous state. If the
+ * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
+ * for any other state prev_kprobe.kp will be NULL.
+ */
+static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
 {
 	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
 	kcb->kprobe_status = kcb->prev_kprobe.status;
-	kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
-	memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
-					sizeof(kcb->kprobe_saved_ctl));
-}
-
-static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
-						struct kprobe_ctlblk *kcb)
-{
-	__get_cpu_var(current_kprobe) = p;
-	/* Save the interrupt and per flags */
-	kcb->kprobe_saved_imask = regs->psw.mask &
-		(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
-	/* Save the control regs that govern PER */
-	__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
 }
 
 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
@@ -251,79 +234,104 @@
 	ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
 
 	/* Replace the return addr with trampoline addr */
-	regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
+	regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
+}
+
+static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
+					   struct kprobe *p)
+{
+	switch (kcb->kprobe_status) {
+	case KPROBE_HIT_SSDONE:
+	case KPROBE_HIT_ACTIVE:
+		kprobes_inc_nmissed_count(p);
+		break;
+	case KPROBE_HIT_SS:
+	case KPROBE_REENTER:
+	default:
+		/*
+		 * A kprobe on the code path to single step an instruction
+		 * is a BUG. The code path resides in the .kprobes.text
+		 * section and is executed with interrupts disabled.
+		 */
+		printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
+		dump_kprobe(p);
+		BUG();
+	}
 }
 
 static int __kprobes kprobe_handler(struct pt_regs *regs)
 {
-	struct kprobe *p;
-	int ret = 0;
-	unsigned long *addr = (unsigned long *)
-		((regs->psw.addr & PSW_ADDR_INSN) - 2);
 	struct kprobe_ctlblk *kcb;
+	struct kprobe *p;
 
 	/*
-	 * We don't want to be preempted for the entire
-	 * duration of kprobe processing
+	 * We want to disable preemption for the entire duration of kprobe
+	 * processing. That includes the calls to the pre/post handlers
+	 * and single stepping the kprobe instruction.
 	 */
 	preempt_disable();
 	kcb = get_kprobe_ctlblk();
+	p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
 
-	/* Check we're not actually recursing */
-	if (kprobe_running()) {
-		p = get_kprobe(addr);
-		if (p) {
-			if (kcb->kprobe_status == KPROBE_HIT_SS &&
-			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
-				regs->psw.mask &= ~PSW_MASK_PER;
-				regs->psw.mask |= kcb->kprobe_saved_imask;
-				goto no_kprobe;
-			}
-			/* We have reentered the kprobe_handler(), since
-			 * another probe was hit while within the handler.
-			 * We here save the original kprobes variables and
-			 * just single step on the instruction of the new probe
-			 * without calling any user handlers.
+	if (p) {
+		if (kprobe_running()) {
+			/*
+			 * We have hit a kprobe while another is still
+			 * active. This can happen in the pre and post
+			 * handler. Single step the instruction of the
+			 * new probe but do not call any handler function
+			 * of this secondary kprobe.
+			 * push_kprobe and pop_kprobe saves and restores
+			 * the currently active kprobe.
 			 */
-			save_previous_kprobe(kcb);
-			set_current_kprobe(p, regs, kcb);
-			kprobes_inc_nmissed_count(p);
-			prepare_singlestep(p, regs);
+			kprobe_reenter_check(kcb, p);
+			push_kprobe(kcb, p);
 			kcb->kprobe_status = KPROBE_REENTER;
-			return 1;
 		} else {
-			p = __get_cpu_var(current_kprobe);
-			if (p->break_handler && p->break_handler(p, regs)) {
-				goto ss_probe;
-			}
+			/*
+			 * If we have no pre-handler or it returned 0, we
+			 * continue with single stepping. If we have a
+			 * pre-handler and it returned non-zero, it prepped
+			 * for calling the break_handler below on re-entry
+			 * for jprobe processing, so get out doing nothing
+			 * more here.
+			 */
+			push_kprobe(kcb, p);
+			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+			if (p->pre_handler && p->pre_handler(p, regs))
+				return 1;
+			kcb->kprobe_status = KPROBE_HIT_SS;
 		}
-		goto no_kprobe;
-	}
-
-	p = get_kprobe(addr);
-	if (!p)
-		/*
-		 * No kprobe at this address. The fault has not been
-		 * caused by a kprobe breakpoint. The race of breakpoint
-		 * vs. kprobe remove does not exist because on s390 we
-		 * use stop_machine to arm/disarm the breakpoints.
-		 */
-		goto no_kprobe;
-
-	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-	set_current_kprobe(p, regs, kcb);
-	if (p->pre_handler && p->pre_handler(p, regs))
-		/* handler has already set things up, so skip ss setup */
+		enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
 		return 1;
-
-ss_probe:
-	prepare_singlestep(p, regs);
-	kcb->kprobe_status = KPROBE_HIT_SS;
-	return 1;
-
-no_kprobe:
+	} else if (kprobe_running()) {
+		p = __get_cpu_var(current_kprobe);
+		if (p->break_handler && p->break_handler(p, regs)) {
+			/*
+			 * Continuation after the jprobe completed and
+			 * caused the jprobe_return trap. The jprobe
+			 * break_handler "returns" to the original
+			 * function that still has the kprobe breakpoint
+			 * installed. We continue with single stepping.
+			 */
+			kcb->kprobe_status = KPROBE_HIT_SS;
+			enable_singlestep(kcb, regs,
+					  (unsigned long) p->ainsn.insn);
+			return 1;
+		} /* else:
+		   * No kprobe at this address and the current kprobe
+		   * has no break handler (no jprobe!). The kernel just
+		   * exploded, let the standard trap handler pick up the
+		   * pieces.
+		   */
+	} /* else:
+	   * No kprobe at this address and no active kprobe. The trap has
+	   * not been caused by a kprobe breakpoint. The race of breakpoint
+	   * vs. kprobe remove does not exist because on s390 as we use
+	   * stop_machine to arm/disarm the breakpoints.
+	   */
 	preempt_enable_no_resched();
-	return ret;
+	return 0;
 }
 
 /*
@@ -344,12 +352,12 @@
 static int __kprobes trampoline_probe_handler(struct kprobe *p,
 					      struct pt_regs *regs)
 {
-	struct kretprobe_instance *ri = NULL;
+	struct kretprobe_instance *ri;
 	struct hlist_head *head, empty_rp;
 	struct hlist_node *node, *tmp;
-	unsigned long flags, orig_ret_address = 0;
-	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
-	kprobe_opcode_t *correct_ret_addr = NULL;
+	unsigned long flags, orig_ret_address;
+	unsigned long trampoline_address;
+	kprobe_opcode_t *correct_ret_addr;
 
 	INIT_HLIST_HEAD(&empty_rp);
 	kretprobe_hash_lock(current, &head, &flags);
@@ -367,12 +375,16 @@
 	 *	 real return address, and all the rest will point to
 	 *	 kretprobe_trampoline
 	 */
+	ri = NULL;
+	orig_ret_address = 0;
+	correct_ret_addr = NULL;
+	trampoline_address = (unsigned long) &kretprobe_trampoline;
 	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
 		if (ri->task != current)
 			/* another task is sharing our hash bucket */
 			continue;
 
-		orig_ret_address = (unsigned long)ri->ret_addr;
+		orig_ret_address = (unsigned long) ri->ret_addr;
 
 		if (orig_ret_address != trampoline_address)
 			/*
@@ -391,7 +403,7 @@
 			/* another task is sharing our hash bucket */
 			continue;
 
-		orig_ret_address = (unsigned long)ri->ret_addr;
+		orig_ret_address = (unsigned long) ri->ret_addr;
 
 		if (ri->rp && ri->rp->handler) {
 			ri->ret_addr = correct_ret_addr;
@@ -400,19 +412,18 @@
 
 		recycle_rp_inst(ri, &empty_rp);
 
-		if (orig_ret_address != trampoline_address) {
+		if (orig_ret_address != trampoline_address)
 			/*
 			 * This is the real return address. Any other
 			 * instances associated with this task are for
 			 * other calls deeper on the call stack
 			 */
 			break;
-		}
 	}
 
 	regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
 
-	reset_current_kprobe();
+	pop_kprobe(get_kprobe_ctlblk());
 	kretprobe_hash_unlock(current, &flags);
 	preempt_enable_no_resched();
 
@@ -439,55 +450,42 @@
 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
 {
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
+	int fixup = get_fixup_type(p->ainsn.insn);
 
-	regs->psw.addr &= PSW_ADDR_INSN;
+	if (fixup & FIXUP_PSW_NORMAL)
+		ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 
-	if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
-		regs->psw.addr = (unsigned long)p->addr +
-				((unsigned long)regs->psw.addr -
-				 (unsigned long)p->ainsn.insn);
+	if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
+		int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
+		if (ip - (unsigned long) p->ainsn.insn == ilen)
+			ip = (unsigned long) p->addr + ilen;
+	}
 
-	if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
-		if ((unsigned long)regs->psw.addr -
-		    (unsigned long)p->ainsn.insn == p->ainsn.ilen)
-			regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
+	if (fixup & FIXUP_RETURN_REGISTER) {
+		int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
+		regs->gprs[reg] += (unsigned long) p->addr -
+				   (unsigned long) p->ainsn.insn;
+	}
 
-	if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
-		regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
-						(regs->gprs[p->ainsn.reg] -
-						(unsigned long)p->ainsn.insn))
-						| PSW_ADDR_AMODE;
-
-	regs->psw.addr |= PSW_ADDR_AMODE;
-	/* turn off PER mode */
-	regs->psw.mask &= ~PSW_MASK_PER;
-	/* Restore the original per control regs */
-	__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
-	regs->psw.mask |= kcb->kprobe_saved_imask;
+	disable_singlestep(kcb, regs, ip);
 }
 
 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
 {
-	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	struct kprobe *p = kprobe_running();
 
-	if (!cur)
+	if (!p)
 		return 0;
 
-	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+	if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
-		cur->post_handler(cur, regs, 0);
+		p->post_handler(p, regs, 0);
 	}
 
-	resume_execution(cur, regs);
-
-	/*Restore back the original saved kprobes variables and continue. */
-	if (kcb->kprobe_status == KPROBE_REENTER) {
-		restore_previous_kprobe(kcb);
-		goto out;
-	}
-	reset_current_kprobe();
-out:
+	resume_execution(p, regs);
+	pop_kprobe(kcb);
 	preempt_enable_no_resched();
 
 	/*
@@ -495,17 +493,16 @@
 	 * will have PER set, in which case, continue the remaining processing
 	 * of do_single_step, as if this is not a probe hit.
 	 */
-	if (regs->psw.mask & PSW_MASK_PER) {
+	if (regs->psw.mask & PSW_MASK_PER)
 		return 0;
-	}
 
 	return 1;
 }
 
 static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
 {
-	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	struct kprobe *p = kprobe_running();
 	const struct exception_table_entry *entry;
 
 	switch(kcb->kprobe_status) {
@@ -521,14 +518,8 @@
 		 * and allow the page fault handler to continue as a
 		 * normal page fault.
 		 */
-		regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
-		regs->psw.mask &= ~PSW_MASK_PER;
-		regs->psw.mask |= kcb->kprobe_saved_imask;
-		if (kcb->kprobe_status == KPROBE_REENTER)
-			restore_previous_kprobe(kcb);
-		else {
-			reset_current_kprobe();
-		}
+		disable_singlestep(kcb, regs, (unsigned long) p->addr);
+		pop_kprobe(kcb);
 		preempt_enable_no_resched();
 		break;
 	case KPROBE_HIT_ACTIVE:
@@ -538,7 +529,7 @@
 		 * we can also use npre/npostfault count for accouting
 		 * these specific fault cases.
 		 */
-		kprobes_inc_nmissed_count(cur);
+		kprobes_inc_nmissed_count(p);
 
 		/*
 		 * We come here because instructions in the pre/post
@@ -547,7 +538,7 @@
 		 * copy_from_user(), get_user() etc. Let the
 		 * user-specified handler try to fix it first.
 		 */
-		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+		if (p->fault_handler && p->fault_handler(p, regs, trapnr))
 			return 1;
 
 		/*
@@ -589,7 +580,7 @@
 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 				       unsigned long val, void *data)
 {
-	struct die_args *args = (struct die_args *)data;
+	struct die_args *args = (struct die_args *) data;
 	struct pt_regs *regs = args->regs;
 	int ret = NOTIFY_DONE;
 
@@ -598,16 +589,16 @@
 
 	switch (val) {
 	case DIE_BPT:
-		if (kprobe_handler(args->regs))
+		if (kprobe_handler(regs))
 			ret = NOTIFY_STOP;
 		break;
 	case DIE_SSTEP:
-		if (post_kprobe_handler(args->regs))
+		if (post_kprobe_handler(regs))
 			ret = NOTIFY_STOP;
 		break;
 	case DIE_TRAP:
 		if (!preemptible() && kprobe_running() &&
-		    kprobe_trap_handler(args->regs, args->trapnr))
+		    kprobe_trap_handler(regs, args->trapnr))
 			ret = NOTIFY_STOP;
 		break;
 	default:
@@ -623,23 +614,19 @@
 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
 	struct jprobe *jp = container_of(p, struct jprobe, kp);
-	unsigned long addr;
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+	unsigned long stack;
 
 	memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
 
 	/* setup return addr to the jprobe handler routine */
-	regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+	regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
 	regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 
-	/* r14 is the function return address */
-	kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
 	/* r15 is the stack pointer */
-	kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
-	addr = (unsigned long)kcb->jprobe_saved_r15;
+	stack = (unsigned long) regs->gprs[15];
 
-	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
-	       MIN_STACK_SIZE(addr));
+	memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
 	return 1;
 }
 
@@ -656,30 +643,29 @@
 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-	unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
+	unsigned long stack;
+
+	stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
 
 	/* Put the regs back */
 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
 	/* put the stack back */
-	memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
-	       MIN_STACK_SIZE(stack_addr));
+	memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
 	preempt_enable_no_resched();
 	return 1;
 }
 
-static struct kprobe trampoline_p = {
-	.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+static struct kprobe trampoline = {
+	.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
 	.pre_handler = trampoline_probe_handler
 };
 
 int __init arch_init_kprobes(void)
 {
-	return register_kprobe(&trampoline_p);
+	return register_kprobe(&trampoline);
 }
 
 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 {
-	if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline)
-		return 1;
-	return 0;
+	return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
 }
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index dfe015d..1e6a557 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -7,6 +7,8 @@
 
 #include <asm/asm-offsets.h>
 
+	.section .kprobes.text, "ax"
+
 	.globl ftrace_stub
 ftrace_stub:
 	br	%r14
@@ -16,22 +18,12 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 	br	%r14
 
-	.data
-	.globl	ftrace_dyn_func
-ftrace_dyn_func:
-	.long	ftrace_stub
-	.previous
-
 	.globl ftrace_caller
 ftrace_caller:
 #endif
 	stm	%r2,%r5,16(%r15)
 	bras	%r1,2f
-#ifdef CONFIG_DYNAMIC_FTRACE
-0:	.long	ftrace_dyn_func
-#else
 0:	.long	ftrace_trace_function
-#endif
 1:	.long	function_trace_stop
 2:	l	%r2,1b-0b(%r1)
 	icm	%r2,0xf,0(%r2)
@@ -47,21 +39,15 @@
 	l	%r14,0(%r14)
 	basr	%r14,%r14
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
+	l	%r2,100(%r15)
+	l	%r3,152(%r15)
 	.globl	ftrace_graph_caller
 ftrace_graph_caller:
-	# This unconditional branch gets runtime patched. Change only if
-	# you know what you are doing. See ftrace_enable_graph_caller().
-	j	1f
-#endif
-	bras	%r1,0f
-	.long	prepare_ftrace_return
-0:	l	%r2,152(%r15)
-	l	%r4,0(%r1)
-	l	%r3,100(%r15)
-	basr	%r14,%r4
-	st	%r2,100(%r15)
-1:
+# The bras instruction gets runtime patched to call prepare_ftrace_return.
+# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
+#	bras	%r14,prepare_ftrace_return
+	bras	%r14,0f
+0:	st	%r2,100(%r15)
 #endif
 	ahi	%r15,96
 	l	%r14,56(%r15)
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index c37211c..e736672 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -7,6 +7,8 @@
 
 #include <asm/asm-offsets.h>
 
+	.section .kprobes.text, "ax"
+
 	.globl ftrace_stub
 ftrace_stub:
 	br	%r14
@@ -16,12 +18,6 @@
 #ifdef CONFIG_DYNAMIC_FTRACE
 	br	%r14
 
-	.data
-	.globl	ftrace_dyn_func
-ftrace_dyn_func:
-	.quad	ftrace_stub
-	.previous
-
 	.globl ftrace_caller
 ftrace_caller:
 #endif
@@ -35,26 +31,19 @@
 	stg	%r1,__SF_BACKCHAIN(%r15)
 	lgr	%r2,%r14
 	lg	%r3,168(%r15)
-#ifdef CONFIG_DYNAMIC_FTRACE
-	larl	%r14,ftrace_dyn_func
-#else
 	larl	%r14,ftrace_trace_function
-#endif
 	lg	%r14,0(%r14)
 	basr	%r14,%r14
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
+	lg	%r2,168(%r15)
+	lg	%r3,272(%r15)
 	.globl	ftrace_graph_caller
 ftrace_graph_caller:
-	# This unconditional branch gets runtime patched. Change only if
-	# you know what you are doing. See ftrace_enable_graph_caller().
-	j	0f
-#endif
-	lg	%r2,272(%r15)
-	lg	%r3,168(%r15)
-	brasl	%r14,prepare_ftrace_return
-	stg	%r2,168(%r15)
-0:
+# The bras instruction gets runtime patched to call prepare_ftrace_return.
+# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
+#	bras	%r14,prepare_ftrace_return
+	bras	%r14,0f
+0:	stg	%r2,168(%r15)
 #endif
 	aghi	%r15,160
 	lmg	%r2,%r5,32(%r15)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 1995c17..fab8843 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -8,6 +8,7 @@
  *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
  */
 
+#include <linux/kernel_stat.h>
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/hardirq.h>
@@ -255,7 +256,7 @@
 	nmi_enter();
 	s390_idle_check(regs, S390_lowcore.mcck_clock,
 			S390_lowcore.mcck_enter_timer);
-
+	kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
 	mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
 	mcck = &__get_cpu_var(cpu_mcck);
 	umode = user_mode(regs);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ec2e03b..a895e69 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -30,8 +30,11 @@
 #include <linux/tick.h>
 #include <linux/elfcore.h>
 #include <linux/kernel_stat.h>
+#include <linux/personality.h>
 #include <linux/syscalls.h>
 #include <linux/compat.h>
+#include <linux/kprobes.h>
+#include <linux/random.h>
 #include <asm/compat.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -41,6 +44,7 @@
 #include <asm/irq.h>
 #include <asm/timer.h>
 #include <asm/nmi.h>
+#include <asm/smp.h>
 #include "entry.h"
 
 asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -75,13 +79,8 @@
  */
 static void default_idle(void)
 {
-	/* CPU is going idle. */
-#ifdef CONFIG_HOTPLUG_CPU
-	if (cpu_is_offline(smp_processor_id())) {
-		preempt_enable_no_resched();
+	if (cpu_is_offline(smp_processor_id()))
 		cpu_die();
-	}
-#endif
 	local_irq_disable();
 	if (need_resched()) {
 		local_irq_enable();
@@ -116,15 +115,17 @@
 	}
 }
 
-extern void kernel_thread_starter(void);
+extern void __kprobes kernel_thread_starter(void);
 
 asm(
-	".align 4\n"
+	".section .kprobes.text, \"ax\"\n"
+	".global kernel_thread_starter\n"
 	"kernel_thread_starter:\n"
 	"    la    2,0(10)\n"
 	"    basr  14,9\n"
 	"    la    2,0\n"
-	"    br    11\n");
+	"    br    11\n"
+	".previous\n");
 
 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 {
@@ -214,8 +215,10 @@
 	/* start new process with ar4 pointing to the correct address space */
 	p->thread.mm_segment = get_fs();
 	/* Don't copy debug registers */
-	memset(&p->thread.per_info, 0, sizeof(p->thread.per_info));
+	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
+	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
 	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
+	clear_tsk_thread_flag(p, TIF_PER_TRAP);
 	/* Initialize per thread user and system timer values */
 	ti = task_thread_info(p);
 	ti->user_timer = 0;
@@ -331,3 +334,39 @@
 	}
 	return 0;
 }
+
+unsigned long arch_align_stack(unsigned long sp)
+{
+	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+		sp -= get_random_int() & ~PAGE_MASK;
+	return sp & ~0xf;
+}
+
+static inline unsigned long brk_rnd(void)
+{
+	/* 8MB for 32bit, 1GB for 64bit */
+	if (is_32bit_task())
+		return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+	else
+		return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+	unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+
+	if (ret < mm->brk)
+		return mm->brk;
+	return ret;
+}
+
+unsigned long randomize_et_dyn(unsigned long base)
+{
+	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+
+	if (!(current->flags & PF_RANDOMIZE))
+		return base;
+	if (ret < base)
+		return base;
+	return ret;
+}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 644548e..311e9d7 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,7 +13,7 @@
 #include <linux/smp.h>
 #include <linux/seq_file.h>
 #include <linux/delay.h>
-
+#include <linux/cpu.h>
 #include <asm/elf.h>
 #include <asm/lowcore.h>
 #include <asm/param.h>
@@ -35,17 +35,6 @@
 }
 
 /*
- * print_cpu_info - print basic information about a cpu
- */
-void __cpuinit print_cpu_info(void)
-{
-	struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
-
-	pr_info("Processor %d started, address %d, identification %06X\n",
-		S390_lowcore.cpu_nr, stap(), id->ident);
-}
-
-/*
  * show_cpuinfo - Get information on one CPU for use by procfs.
  */
 static int show_cpuinfo(struct seq_file *m, void *v)
@@ -57,9 +46,8 @@
 	unsigned long n = (unsigned long) v - 1;
 	int i;
 
-	s390_adjust_jiffies();
-	preempt_disable();
 	if (!n) {
+		s390_adjust_jiffies();
 		seq_printf(m, "vendor_id       : IBM/S390\n"
 			   "# processors    : %i\n"
 			   "bogomips per cpu: %lu.%02lu\n",
@@ -71,7 +59,7 @@
 				seq_printf(m, "%s ", hwcap_str[i]);
 		seq_puts(m, "\n");
 	}
-
+	get_online_cpus();
 	if (cpu_online(n)) {
 		struct cpuid *id = &per_cpu(cpu_id, n);
 		seq_printf(m, "processor %li: "
@@ -80,7 +68,7 @@
 			   "machine = %04X\n",
 			   n, id->version, id->ident, id->machine);
 	}
-	preempt_enable();
+	put_online_cpus();
 	return 0;
 }
 
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 019bb71..ef86ad2 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1,25 +1,9 @@
 /*
- *  arch/s390/kernel/ptrace.c
+ *  Ptrace user space interface.
  *
- *  S390 version
- *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *    Copyright IBM Corp. 1999,2010
+ *    Author(s): Denis Joseph Barrow
  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- *  Based on PowerPC version 
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  Derived from "arch/m68k/kernel/ptrace.c"
- *  Copyright (C) 1994 by Hamish Macdonald
- *  Taken from linux/kernel/ptrace.c and modified for M680x0.
- *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * Modified by Cort Dougan (cort@cs.nmt.edu) 
- *
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file README.legal in the main directory of
- * this archive for more details.
  */
 
 #include <linux/kernel.h>
@@ -61,76 +45,58 @@
 	REGSET_GENERAL_EXTENDED,
 };
 
-static void
-FixPerRegisters(struct task_struct *task)
+void update_per_regs(struct task_struct *task)
 {
-	struct pt_regs *regs;
-	per_struct *per_info;
-	per_cr_words cr_words;
+	static const struct per_regs per_single_step = {
+		.control = PER_EVENT_IFETCH,
+		.start = 0,
+		.end = PSW_ADDR_INSN,
+	};
+	struct pt_regs *regs = task_pt_regs(task);
+	struct thread_struct *thread = &task->thread;
+	const struct per_regs *new;
+	struct per_regs old;
 
-	regs = task_pt_regs(task);
-	per_info = (per_struct *) &task->thread.per_info;
-	per_info->control_regs.bits.em_instruction_fetch =
-		per_info->single_step | per_info->instruction_fetch;
-	
-	if (per_info->single_step) {
-		per_info->control_regs.bits.starting_addr = 0;
-#ifdef CONFIG_COMPAT
-		if (is_compat_task())
-			per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
-		else
-#endif
-			per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
-	} else {
-		per_info->control_regs.bits.starting_addr =
-			per_info->starting_addr;
-		per_info->control_regs.bits.ending_addr =
-			per_info->ending_addr;
-	}
-	/*
-	 * if any of the control reg tracing bits are on 
-	 * we switch on per in the psw
-	 */
-	if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
-		regs->psw.mask |= PSW_MASK_PER;
-	else
+	/* TIF_SINGLE_STEP overrides the user specified PER registers. */
+	new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
+		&per_single_step : &thread->per_user;
+
+	/* Take care of the PER enablement bit in the PSW. */
+	if (!(new->control & PER_EVENT_MASK)) {
 		regs->psw.mask &= ~PSW_MASK_PER;
-
-	if (per_info->control_regs.bits.em_storage_alteration)
-		per_info->control_regs.bits.storage_alt_space_ctl = 1;
-	else
-		per_info->control_regs.bits.storage_alt_space_ctl = 0;
-
-	if (task == current) {
-		__ctl_store(cr_words, 9, 11);
-		if (memcmp(&cr_words, &per_info->control_regs.words,
-			   sizeof(cr_words)) != 0)
-			__ctl_load(per_info->control_regs.words, 9, 11);
+		return;
 	}
+	regs->psw.mask |= PSW_MASK_PER;
+	__ctl_store(old, 9, 11);
+	if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
+		__ctl_load(*new, 9, 11);
 }
 
 void user_enable_single_step(struct task_struct *task)
 {
-	task->thread.per_info.single_step = 1;
-	FixPerRegisters(task);
+	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
+	if (task == current)
+		update_per_regs(task);
 }
 
 void user_disable_single_step(struct task_struct *task)
 {
-	task->thread.per_info.single_step = 0;
-	FixPerRegisters(task);
+	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
+	if (task == current)
+		update_per_regs(task);
 }
 
 /*
  * Called by kernel/ptrace.c when detaching..
  *
- * Make sure single step bits etc are not set.
+ * Clear all debugging related fields.
  */
-void
-ptrace_disable(struct task_struct *child)
+void ptrace_disable(struct task_struct *task)
 {
-	/* make sure the single step bit is not set. */
-	user_disable_single_step(child);
+	memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
+	memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
+	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
+	clear_tsk_thread_flag(task, TIF_PER_TRAP);
 }
 
 #ifndef CONFIG_64BIT
@@ -139,6 +105,47 @@
 # define __ADDR_MASK 7
 #endif
 
+static inline unsigned long __peek_user_per(struct task_struct *child,
+					    addr_t addr)
+{
+	struct per_struct_kernel *dummy = NULL;
+
+	if (addr == (addr_t) &dummy->cr9)
+		/* Control bits of the active per set. */
+		return test_thread_flag(TIF_SINGLE_STEP) ?
+			PER_EVENT_IFETCH : child->thread.per_user.control;
+	else if (addr == (addr_t) &dummy->cr10)
+		/* Start address of the active per set. */
+		return test_thread_flag(TIF_SINGLE_STEP) ?
+			0 : child->thread.per_user.start;
+	else if (addr == (addr_t) &dummy->cr11)
+		/* End address of the active per set. */
+		return test_thread_flag(TIF_SINGLE_STEP) ?
+			PSW_ADDR_INSN : child->thread.per_user.end;
+	else if (addr == (addr_t) &dummy->bits)
+		/* Single-step bit. */
+		return test_thread_flag(TIF_SINGLE_STEP) ?
+			(1UL << (BITS_PER_LONG - 1)) : 0;
+	else if (addr == (addr_t) &dummy->starting_addr)
+		/* Start address of the user specified per set. */
+		return child->thread.per_user.start;
+	else if (addr == (addr_t) &dummy->ending_addr)
+		/* End address of the user specified per set. */
+		return child->thread.per_user.end;
+	else if (addr == (addr_t) &dummy->perc_atmid)
+		/* PER code, ATMID and AI of the last PER trap */
+		return (unsigned long)
+			child->thread.per_event.cause << (BITS_PER_LONG - 16);
+	else if (addr == (addr_t) &dummy->address)
+		/* Address of the last PER trap */
+		return child->thread.per_event.address;
+	else if (addr == (addr_t) &dummy->access_id)
+		/* Access id of the last PER trap */
+		return (unsigned long)
+			child->thread.per_event.paid << (BITS_PER_LONG - 8);
+	return 0;
+}
+
 /*
  * Read the word at offset addr from the user area of a process. The
  * trouble here is that the information is littered over different
@@ -204,10 +211,10 @@
 
 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
 		/*
-		 * per_info is found in the thread structure
+		 * Handle access to the per_info structure.
 		 */
-		offset = addr - (addr_t) &dummy->regs.per_info;
-		tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
+		addr -= (addr_t) &dummy->regs.per_info;
+		tmp = __peek_user_per(child, addr);
 
 	} else
 		tmp = 0;
@@ -237,6 +244,35 @@
 	return put_user(tmp, (addr_t __user *) data);
 }
 
+static inline void __poke_user_per(struct task_struct *child,
+				   addr_t addr, addr_t data)
+{
+	struct per_struct_kernel *dummy = NULL;
+
+	/*
+	 * There are only three fields in the per_info struct that the
+	 * debugger user can write to.
+	 * 1) cr9: the debugger wants to set a new PER event mask
+	 * 2) starting_addr: the debugger wants to set a new starting
+	 *    address to use with the PER event mask.
+	 * 3) ending_addr: the debugger wants to set a new ending
+	 *    address to use with the PER event mask.
+	 * The user specified PER event mask and the start and end
+	 * addresses are used only if single stepping is not in effect.
+	 * Writes to any other field in per_info are ignored.
+	 */
+	if (addr == (addr_t) &dummy->cr9)
+		/* PER event mask of the user specified per set. */
+		child->thread.per_user.control =
+			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
+	else if (addr == (addr_t) &dummy->starting_addr)
+		/* Starting address of the user specified per set. */
+		child->thread.per_user.start = data;
+	else if (addr == (addr_t) &dummy->ending_addr)
+		/* Ending address of the user specified per set. */
+		child->thread.per_user.end = data;
+}
+
 /*
  * Write a word to the user area of a process at location addr. This
  * operation does have an additional problem compared to peek_user.
@@ -311,19 +347,17 @@
 
 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
 		/*
-		 * per_info is found in the thread structure 
+		 * Handle access to the per_info structure.
 		 */
-		offset = addr - (addr_t) &dummy->regs.per_info;
-		*(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
+		addr -= (addr_t) &dummy->regs.per_info;
+		__poke_user_per(child, addr, data);
 
 	}
 
-	FixPerRegisters(child);
 	return 0;
 }
 
-static int
-poke_user(struct task_struct *child, addr_t addr, addr_t data)
+static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
 {
 	addr_t mask;
 
@@ -410,12 +444,53 @@
  */
 
 /*
+ * Same as peek_user_per but for a 31 bit program.
+ */
+static inline __u32 __peek_user_per_compat(struct task_struct *child,
+					   addr_t addr)
+{
+	struct compat_per_struct_kernel *dummy32 = NULL;
+
+	if (addr == (addr_t) &dummy32->cr9)
+		/* Control bits of the active per set. */
+		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
+			PER_EVENT_IFETCH : child->thread.per_user.control;
+	else if (addr == (addr_t) &dummy32->cr10)
+		/* Start address of the active per set. */
+		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
+			0 : child->thread.per_user.start;
+	else if (addr == (addr_t) &dummy32->cr11)
+		/* End address of the active per set. */
+		return test_thread_flag(TIF_SINGLE_STEP) ?
+			PSW32_ADDR_INSN : child->thread.per_user.end;
+	else if (addr == (addr_t) &dummy32->bits)
+		/* Single-step bit. */
+		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
+			0x80000000 : 0;
+	else if (addr == (addr_t) &dummy32->starting_addr)
+		/* Start address of the user specified per set. */
+		return (__u32) child->thread.per_user.start;
+	else if (addr == (addr_t) &dummy32->ending_addr)
+		/* End address of the user specified per set. */
+		return (__u32) child->thread.per_user.end;
+	else if (addr == (addr_t) &dummy32->perc_atmid)
+		/* PER code, ATMID and AI of the last PER trap */
+		return (__u32) child->thread.per_event.cause << 16;
+	else if (addr == (addr_t) &dummy32->address)
+		/* Address of the last PER trap */
+		return (__u32) child->thread.per_event.address;
+	else if (addr == (addr_t) &dummy32->access_id)
+		/* Access id of the last PER trap */
+		return (__u32) child->thread.per_event.paid << 24;
+	return 0;
+}
+
+/*
  * Same as peek_user but for a 31 bit program.
  */
 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
 {
-	struct user32 *dummy32 = NULL;
-	per_struct32 *dummy_per32 = NULL;
+	struct compat_user *dummy32 = NULL;
 	addr_t offset;
 	__u32 tmp;
 
@@ -465,19 +540,10 @@
 
 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
 		/*
-		 * per_info is found in the thread structure
+		 * Handle access to the per_info structure.
 		 */
-		offset = addr - (addr_t) &dummy32->regs.per_info;
-		/* This is magic. See per_struct and per_struct32. */
-		if ((offset >= (addr_t) &dummy_per32->control_regs &&
-		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
-		    (offset >= (addr_t) &dummy_per32->starting_addr &&
-		     offset <= (addr_t) &dummy_per32->ending_addr) ||
-		    offset == (addr_t) &dummy_per32->lowcore.words.address)
-			offset = offset*2 + 4;
-		else
-			offset = offset*2;
-		tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
+		addr -= (addr_t) &dummy32->regs.per_info;
+		tmp = __peek_user_per_compat(child, addr);
 
 	} else
 		tmp = 0;
@@ -498,13 +564,32 @@
 }
 
 /*
+ * Same as poke_user_per but for a 31 bit program.
+ */
+static inline void __poke_user_per_compat(struct task_struct *child,
+					  addr_t addr, __u32 data)
+{
+	struct compat_per_struct_kernel *dummy32 = NULL;
+
+	if (addr == (addr_t) &dummy32->cr9)
+		/* PER event mask of the user specified per set. */
+		child->thread.per_user.control =
+			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
+	else if (addr == (addr_t) &dummy32->starting_addr)
+		/* Starting address of the user specified per set. */
+		child->thread.per_user.start = data;
+	else if (addr == (addr_t) &dummy32->ending_addr)
+		/* Ending address of the user specified per set. */
+		child->thread.per_user.end = data;
+}
+
+/*
  * Same as poke_user but for a 31 bit program.
  */
 static int __poke_user_compat(struct task_struct *child,
 			      addr_t addr, addr_t data)
 {
-	struct user32 *dummy32 = NULL;
-	per_struct32 *dummy_per32 = NULL;
+	struct compat_user *dummy32 = NULL;
 	__u32 tmp = (__u32) data;
 	addr_t offset;
 
@@ -561,37 +646,20 @@
 
 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
 		/*
-		 * per_info is found in the thread structure.
+		 * Handle access to the per_info structure.
 		 */
-		offset = addr - (addr_t) &dummy32->regs.per_info;
-		/*
-		 * This is magic. See per_struct and per_struct32.
-		 * By incident the offsets in per_struct are exactly
-		 * twice the offsets in per_struct32 for all fields.
-		 * The 8 byte fields need special handling though,
-		 * because the second half (bytes 4-7) is needed and
-		 * not the first half.
-		 */
-		if ((offset >= (addr_t) &dummy_per32->control_regs &&
-		     offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
-		    (offset >= (addr_t) &dummy_per32->starting_addr &&
-		     offset <= (addr_t) &dummy_per32->ending_addr) ||
-		    offset == (addr_t) &dummy_per32->lowcore.words.address)
-			offset = offset*2 + 4;
-		else
-			offset = offset*2;
-		*(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
-
+		addr -= (addr_t) &dummy32->regs.per_info;
+		__poke_user_per_compat(child, addr, data);
 	}
 
-	FixPerRegisters(child);
 	return 0;
 }
 
 static int poke_user_compat(struct task_struct *child,
 			    addr_t addr, addr_t data)
 {
-	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3)
+	if (!is_compat_task() || (addr & 3) ||
+	    addr > sizeof(struct compat_user) - 3)
 		return -EIO;
 
 	return __poke_user_compat(child, addr, data);
@@ -602,7 +670,7 @@
 {
 	unsigned long addr = caddr;
 	unsigned long data = cdata;
-	ptrace_area_emu31 parea; 
+	compat_ptrace_area parea;
 	int copied, ret;
 
 	switch (request) {
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bd1db50..1850299 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -1,33 +1,36 @@
 /*
- *  arch/s390/kernel/s390_ext.c
- *
- *  S390 version
- *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- *    Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
- *               Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *    Copyright IBM Corp. 1999,2010
+ *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/ftrace.h>
-#include <linux/errno.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
-#include <asm/cputime.h>
-#include <asm/lowcore.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
 #include <asm/s390_ext.h>
 #include <asm/irq_regs.h>
+#include <asm/cputime.h>
+#include <asm/lowcore.h>
 #include <asm/irq.h>
 #include "entry.h"
 
+struct ext_int_info {
+	struct ext_int_info *next;
+	ext_int_handler_t handler;
+	__u16 code;
+};
+
 /*
  * ext_int_hash[index] is the start of the list for all external interrupts
  * that hash to this index. With the current set of external interrupts 
  * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
  * iucv and 0x2603 pfault) this is always the first element. 
  */
-ext_int_info_t *ext_int_hash[256] = { NULL, };
+static struct ext_int_info *ext_int_hash[256];
 
 static inline int ext_hash(__u16 code)
 {
@@ -36,90 +39,53 @@
 
 int register_external_interrupt(__u16 code, ext_int_handler_t handler)
 {
-        ext_int_info_t *p;
-        int index;
+	struct ext_int_info *p;
+	int index;
 
-	p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
-        if (p == NULL)
-                return -ENOMEM;
-        p->code = code;
-        p->handler = handler;
+	p = kmalloc(sizeof(*p), GFP_ATOMIC);
+	if (!p)
+		return -ENOMEM;
+	p->code = code;
+	p->handler = handler;
 	index = ext_hash(code);
-        p->next = ext_int_hash[index];
-        ext_int_hash[index] = p;
-        return 0;
+	p->next = ext_int_hash[index];
+	ext_int_hash[index] = p;
+	return 0;
 }
-
-int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
-				      ext_int_info_t *p)
-{
-        int index;
-
-        if (p == NULL)
-                return -EINVAL;
-        p->code = code;
-        p->handler = handler;
-	index = ext_hash(code);
-        p->next = ext_int_hash[index];
-        ext_int_hash[index] = p;
-        return 0;
-}
+EXPORT_SYMBOL(register_external_interrupt);
 
 int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
 {
-        ext_int_info_t *p, *q;
-        int index;
-
-	index = ext_hash(code);
-        q = NULL;
-        p = ext_int_hash[index];
-        while (p != NULL) {
-                if (p->code == code && p->handler == handler)
-                        break;
-                q = p;
-                p = p->next;
-        }
-        if (p == NULL)
-                return -ENOENT;
-        if (q != NULL)
-                q->next = p->next;
-        else
-                ext_int_hash[index] = p->next;
-	kfree(p);
-        return 0;
-}
-
-int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
-					ext_int_info_t *p)
-{
-	ext_int_info_t *q;
+	struct ext_int_info *p, *q;
 	int index;
 
-	if (p == NULL || p->code != code || p->handler != handler)
-		return -EINVAL;
 	index = ext_hash(code);
-	q = ext_int_hash[index];
-	if (p != q) {
-		while (q != NULL) {
-			if (q->next == p)
-				break;
-			q = q->next;
-		}
-		if (q == NULL)
-			return -ENOENT;
+	q = NULL;
+	p = ext_int_hash[index];
+	while (p) {
+		if (p->code == code && p->handler == handler)
+			break;
+		q = p;
+		p = p->next;
+	}
+	if (!p)
+		return -ENOENT;
+	if (q)
 		q->next = p->next;
-	} else
+	else
 		ext_int_hash[index] = p->next;
+	kfree(p);
 	return 0;
 }
+EXPORT_SYMBOL(unregister_external_interrupt);
 
 void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
 			   unsigned int param32, unsigned long param64)
 {
 	struct pt_regs *old_regs;
 	unsigned short code;
-        ext_int_info_t *p;
-        int index;
+	struct ext_int_info *p;
+	int index;
 
 	code = (unsigned short) ext_int_code;
 	old_regs = set_irq_regs(regs);
@@ -132,7 +98,7 @@
 	kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
 	if (code != 0x1004)
 		__get_cpu_var(s390_idle).nohz_delay = 1;
-        index = ext_hash(code);
+	index = ext_hash(code);
 	for (p = ext_int_hash[index]; p; p = p->next) {
 		if (likely(p->code == code))
 			p->handler(ext_int_code, param32, param64);
@@ -140,6 +106,3 @@
 	irq_exit();
 	set_irq_regs(old_regs);
 }
-
-EXPORT_SYMBOL(register_external_interrupt);
-EXPORT_SYMBOL(unregister_external_interrupt);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index ee7ac8b..abbb3c3 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -505,7 +505,7 @@
 			 * Let tracing know that we've done the handler setup.
 			 */
 			tracehook_signal_handler(signr, &info, &ka, regs,
-					current->thread.per_info.single_step);
+					test_thread_flag(TIF_SINGLE_STEP));
 		}
 		return;
 	}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 94cf510..63a97db 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -23,6 +23,7 @@
 #define KMSG_COMPONENT "cpu"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/workqueue.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/mm.h>
@@ -161,6 +162,7 @@
 {
 	unsigned long bits;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
 	/*
 	 * handle bit signal external calls
 	 *
@@ -469,25 +471,25 @@
 	ipi_call_unlock();
 	/* Switch on interrupts */
 	local_irq_enable();
-	/* Print info about this processor */
-	print_cpu_info();
 	/* cpu_idle will call schedule for us */
 	cpu_idle();
 	return 0;
 }
 
-static void __init smp_create_idle(unsigned int cpu)
-{
-	struct task_struct *p;
+struct create_idle {
+	struct work_struct work;
+	struct task_struct *idle;
+	struct completion done;
+	int cpu;
+};
 
-	/*
-	 *  don't care about the psw and regs settings since we'll never
-	 *  reschedule the forked task.
-	 */
-	p = fork_idle(cpu);
-	if (IS_ERR(p))
-		panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-	current_set[cpu] = p;
+static void __cpuinit smp_fork_idle(struct work_struct *work)
+{
+	struct create_idle *c_idle;
+
+	c_idle = container_of(work, struct create_idle, work);
+	c_idle->idle = fork_idle(c_idle->cpu);
+	complete(&c_idle->done);
 }
 
 static int __cpuinit smp_alloc_lowcore(int cpu)
@@ -551,6 +553,7 @@
 int __cpuinit __cpu_up(unsigned int cpu)
 {
 	struct _lowcore *cpu_lowcore;
+	struct create_idle c_idle;
 	struct task_struct *idle;
 	struct stack_frame *sf;
 	u32 lowcore;
@@ -558,6 +561,19 @@
 
 	if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
 		return -EIO;
+	idle = current_set[cpu];
+	if (!idle) {
+		c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
+		INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
+		c_idle.cpu = cpu;
+		schedule_work(&c_idle.work);
+		wait_for_completion(&c_idle.done);
+		if (IS_ERR(c_idle.idle))
+			return PTR_ERR(c_idle.idle);
+		idle = c_idle.idle;
+		current_set[cpu] = c_idle.idle;
+	}
+	init_idle(idle, cpu);
 	if (smp_alloc_lowcore(cpu))
 		return -ENOMEM;
 	do {
@@ -572,7 +588,6 @@
 	while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
 		udelay(10);
 
-	idle = current_set[cpu];
 	cpu_lowcore = lowcore_ptr[cpu];
 	cpu_lowcore->kernel_stack = (unsigned long)
 		task_stack_page(idle) + THREAD_SIZE;
@@ -664,7 +679,6 @@
 		udelay(10);
 	smp_free_lowcore(cpu);
 	atomic_dec(&init_mm.context.attach_count);
-	pr_info("Processor %d stopped\n", cpu);
 }
 
 void cpu_die(void)
@@ -684,14 +698,12 @@
 #endif
 	unsigned long async_stack, panic_stack;
 	struct _lowcore *lowcore;
-	unsigned int cpu;
 
 	smp_detect_cpus();
 
 	/* request the 0x1201 emergency signal external interrupt */
 	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
 		panic("Couldn't request external interrupt 0x1201");
-	print_cpu_info();
 
 	/* Reallocate current lowcore, but keep its contents. */
 	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
@@ -719,9 +731,6 @@
 	if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
 		BUG();
 #endif
-	for_each_possible_cpu(cpu)
-		if (cpu != smp_processor_id())
-			smp_create_idle(cpu);
 }
 
 void __init smp_prepare_boot_cpu(void)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index f754a6d..9e7b039 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -15,6 +15,7 @@
 #define KMSG_COMPONENT "time"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -37,6 +38,7 @@
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
 #include <linux/gfp.h>
+#include <linux/kprobes.h>
 #include <asm/uaccess.h>
 #include <asm/delay.h>
 #include <asm/s390_ext.h>
@@ -60,7 +62,7 @@
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
-unsigned long long notrace sched_clock(void)
+unsigned long long notrace __kprobes sched_clock(void)
 {
 	return (get_clock_monotonic() * 125) >> 9;
 }
@@ -159,6 +161,7 @@
 				       unsigned int param32,
 				       unsigned long param64)
 {
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
 	if (S390_lowcore.clock_comparator == -1ULL)
 		set_clock_comparator(S390_lowcore.clock_comparator);
 }
@@ -169,6 +172,7 @@
 static void timing_alert_interrupt(unsigned int ext_int_code,
 				   unsigned int param32, unsigned long param64)
 {
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
 	if (param32 & 0x00c40000)
 		etr_timing_alert((struct etr_irq_parm *) &param32);
 	if (param32 & 0x00038000)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 7064082..5eb78dd 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -365,12 +365,10 @@
 		((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
 }
 
-void __kprobes do_single_step(struct pt_regs *regs)
+void __kprobes do_per_trap(struct pt_regs *regs)
 {
-	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
-					SIGTRAP) == NOTIFY_STOP){
+	if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
 		return;
-	}
 	if (tracehook_consider_fatal_signal(current, SIGTRAP))
 		force_sig(SIGTRAP, current);
 }
@@ -451,8 +449,8 @@
 		"floating point exception", regs, &si);
 }
 
-static void illegal_op(struct pt_regs *regs, long pgm_int_code,
-		       unsigned long trans_exc_code)
+static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
+				 unsigned long trans_exc_code)
 {
 	siginfo_t info;
         __u8 opcode[6];
@@ -688,7 +686,7 @@
 	do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
 }
 
-asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
+asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs)
 {
 	bust_spinlocks(1);
 	printk("Kernel stack overflow.\n");
@@ -733,5 +731,6 @@
         pgm_check_table[0x15] = &operand_exception;
         pgm_check_table[0x1C] = &space_switch_exception;
         pgm_check_table[0x1D] = &hfp_sqrt_exception;
-	pfault_irq_init();
+	/* Enable machine checks early. */
+	local_mcck_enable();
 }
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index e3150dd..f438d74 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -203,7 +203,6 @@
 	if (!uses_interp)
 		return 0;
 
-	vdso_base = mm->mmap_base;
 #ifdef CONFIG_64BIT
 	vdso_pagelist = vdso64_pagelist;
 	vdso_pages = vdso64_pages;
@@ -233,8 +232,7 @@
 	 * fail and end up putting it elsewhere.
 	 */
 	down_write(&mm->mmap_sem);
-	vdso_base = get_unmapped_area(NULL, vdso_base,
-				      vdso_pages << PAGE_SHIFT, 0, 0);
+	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
 		rc = vdso_base;
 		goto out_up;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7eff9b7..1ccdf4d 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -20,6 +20,7 @@
 #include <linux/rcupdate.h>
 #include <linux/posix-timers.h>
 #include <linux/cpu.h>
+#include <linux/kprobes.h>
 
 #include <asm/s390_ext.h>
 #include <asm/timer.h>
@@ -122,7 +123,7 @@
 }
 EXPORT_SYMBOL_GPL(account_system_vtime);
 
-void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
+void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
 {
 	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
 	struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -162,7 +163,7 @@
 	idle->sequence++;
 }
 
-void vtime_stop_cpu(void)
+void __kprobes vtime_stop_cpu(void)
 {
 	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
 	struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
@@ -323,6 +324,7 @@
 	struct list_head cb_list;	/* the callback queue */
 	__u64 elapsed, next;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
 	INIT_LIST_HEAD(&cb_list);
 	vq = &__get_cpu_var(virt_cpu_timer);
 
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index a725158..f66a1bd 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -4,8 +4,8 @@
 source "virt/kvm/Kconfig"
 
 menuconfig VIRTUALIZATION
-	bool "Virtualization"
-	default y
+	def_bool y
+	prompt "Virtualization"
 	---help---
 	  Say Y here to get to see options for using your Linux host to run other
 	  operating systems inside virtual machines (guests).
@@ -16,7 +16,8 @@
 if VIRTUALIZATION
 
 config KVM
-	tristate "Kernel-based Virtual Machine (KVM) support"
+	def_tristate y
+	prompt "Kernel-based Virtual Machine (KVM) support"
 	depends on HAVE_KVM && EXPERIMENTAL
 	select PREEMPT_NOTIFIERS
 	select ANON_INODES
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 985d825..bade533 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -164,24 +164,18 @@
 	return r;
 }
 
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm;
 	int rc;
 	char debug_name[16];
 
 	rc = s390_enable_sie();
 	if (rc)
-		goto out_nokvm;
-
-	rc = -ENOMEM;
-	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-	if (!kvm)
-		goto out_nokvm;
+		goto out_err;
 
 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
 	if (!kvm->arch.sca)
-		goto out_nosca;
+		goto out_err;
 
 	sprintf(debug_name, "kvm-%u", current->pid);
 
@@ -195,13 +189,11 @@
 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
 	VM_EVENT(kvm, 3, "%s", "vm created");
 
-	return kvm;
+	return 0;
 out_nodbf:
 	free_page((unsigned long)(kvm->arch.sca));
-out_nosca:
-	kfree(kvm);
-out_nokvm:
-	return ERR_PTR(rc);
+out_err:
+	return rc;
 }
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -240,11 +232,8 @@
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 	kvm_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
 	free_page((unsigned long)(kvm->arch.sca));
 	debug_unregister(kvm->arch.dbf);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
 }
 
 /* Section: vcpu related */
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 7c37ec3..0f53110 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -47,7 +47,6 @@
 	lockdep_on();
 	__ctl_load(cr0_saved, 0, 0);
 	local_tick_enable(clock_saved);
-	set_clock_comparator(S390_lowcore.clock_comparator);
 }
 
 static void __udelay_enabled(unsigned long long usecs)
@@ -70,7 +69,6 @@
 		if (clock_saved)
 			local_tick_enable(clock_saved);
 	} while (get_clock() < end);
-	set_clock_comparator(S390_lowcore.clock_comparator);
 }
 
 /*
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fe5701e..2c57806 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,6 +10,7 @@
  *    Copyright (C) 1995  Linus Torvalds
  */
 
+#include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
@@ -234,13 +235,13 @@
 	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
 
 	if (!rc && instruction == 0x0a77) {
-		clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+		clear_tsk_thread_flag(current, TIF_PER_TRAP);
 		if (is_compat_task())
 			sys32_sigreturn();
 		else
 			sys_sigreturn();
 	} else if (!rc && instruction == 0x0aad) {
-		clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
+		clear_tsk_thread_flag(current, TIF_PER_TRAP);
 		if (is_compat_task())
 			sys32_rt_sigreturn();
 		else
@@ -378,7 +379,7 @@
 	 * The instruction that caused the program check will
 	 * be repeated. Don't signal single step via SIGTRAP.
 	 */
-	clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
+	clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
 	fault = 0;
 out_up:
 	up_read(&mm->mmap_sem);
@@ -480,8 +481,7 @@
 /*
  * 'pfault' pseudo page faults routines.
  */
-static ext_int_info_t ext_int_pfault;
-static int pfault_disable = 0;
+static int pfault_disable;
 
 static int __init nopfault(char *str)
 {
@@ -543,6 +543,7 @@
 	struct task_struct *tsk;
 	__u16 subcode;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
 	/*
 	 * Get the external interruption subcode & pfault
 	 * initial/completion signal bit. VM stores this 
@@ -592,24 +593,28 @@
 	}
 }
 
-void __init pfault_irq_init(void)
+static int __init pfault_irq_init(void)
 {
-	if (!MACHINE_IS_VM)
-		return;
+	int rc;
 
+	if (!MACHINE_IS_VM)
+		return 0;
 	/*
 	 * Try to get pfault pseudo page faults going.
 	 */
-	if (register_early_external_interrupt(0x2603, pfault_interrupt,
-					      &ext_int_pfault) != 0)
-		panic("Couldn't request external interrupt 0x2603");
-
+	rc = register_external_interrupt(0x2603, pfault_interrupt);
+	if (rc) {
+		pfault_disable = 1;
+		return rc;
+	}
 	if (pfault_init() == 0)
-		return;
+		return 0;
 
 	/* Tough luck, no pfault. */
 	pfault_disable = 1;
-	unregister_early_external_interrupt(0x2603, pfault_interrupt,
-					    &ext_int_pfault);
+	unregister_external_interrupt(0x2603, pfault_interrupt);
+	return 0;
 }
+early_initcall(pfault_irq_init);
+
 #endif
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 869efba..c9a9f7f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -27,17 +27,44 @@
 #include <linux/personality.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/random.h>
 #include <asm/pgalloc.h>
 #include <asm/compat.h>
 
+static unsigned long stack_maxrandom_size(void)
+{
+	if (!(current->flags & PF_RANDOMIZE))
+		return 0;
+	if (current->personality & ADDR_NO_RANDOMIZE)
+		return 0;
+	return STACK_RND_MASK << PAGE_SHIFT;
+}
+
 /*
  * Top of mmap area (just below the process stack).
  *
- * Leave an at least ~128 MB hole.
+ * Leave at least a ~32 MB hole.
  */
-#define MIN_GAP (128*1024*1024)
+#define MIN_GAP (32*1024*1024)
 #define MAX_GAP (STACK_TOP/6*5)
 
+static inline int mmap_is_legacy(void)
+{
+	if (current->personality & ADDR_COMPAT_LAYOUT)
+		return 1;
+	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+		return 1;
+	return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_rnd(void)
+{
+	if (!(current->flags & PF_RANDOMIZE))
+		return 0;
+	/* 8MB randomization for mmap_base */
+	return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+}
+
 static inline unsigned long mmap_base(void)
 {
 	unsigned long gap = rlimit(RLIMIT_STACK);
@@ -46,22 +73,8 @@
 		gap = MIN_GAP;
 	else if (gap > MAX_GAP)
 		gap = MAX_GAP;
-
-	return STACK_TOP - (gap & PAGE_MASK);
-}
-
-static inline int mmap_is_legacy(void)
-{
-#ifdef CONFIG_64BIT
-	/*
-	 * Force standard allocation for 64 bit programs.
-	 */
-	if (!is_compat_task())
-		return 1;
-#endif
-	return sysctl_legacy_va_layout ||
-	    (current->personality & ADDR_COMPAT_LAYOUT) ||
-	    rlimit(RLIMIT_STACK) == RLIM_INFINITY;
+	gap &= PAGE_MASK;
+	return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
 }
 
 #ifndef CONFIG_64BIT
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index e9e7112..fff2522 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -349,6 +349,7 @@
 	select CPU_HAS_DSP
 	select SYS_SUPPORTS_CMT
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select USB_ARCH_HAS_OHCI
 	help
 	  Select SH7720 if you have a SH3-DSP SH7720 CPU.
 
@@ -357,6 +358,7 @@
 	select CPU_SH3
 	select CPU_HAS_DSP
 	select SYS_SUPPORTS_CMT
+	select USB_ARCH_HAS_OHCI
 	help
 	  Select SH7721 if you have a SH3-DSP SH7721 CPU.
 
@@ -437,6 +439,7 @@
 config CPU_SUBTYPE_SH7763
 	bool "Support SH7763 processor"
 	select CPU_SH4A
+	select USB_ARCH_HAS_OHCI
 	help
 	  Select SH7763 if you have a SH4A SH7763(R5S77631) CPU.
 
@@ -463,6 +466,8 @@
 	select CPU_HAS_PTEAEX
 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select USB_ARCH_HAS_OHCI
+	select USB_ARCH_HAS_EHCI
 
 config CPU_SUBTYPE_SHX3
 	bool "Support SH-X3 processor"
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 2018c7e..d893411 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -3,6 +3,9 @@
 config SOLUTION_ENGINE
 	bool
 
+config SH_ALPHA_BOARD
+	bool
+
 config SH_SOLUTION_ENGINE
 	bool "SolutionEngine"
 	select SOLUTION_ENGINE
@@ -320,6 +323,21 @@
 	  Compact Flash socket, two serial ports and PC-104 bus.
 	  More information at <http://sh2000.sh-linux.org>.
 
+config SH_APSH4A3A
+	bool "AP-SH4A-3A"
+	select SH_ALPHA_BOARD
+	depends on CPU_SUBTYPE_SH7785
+	help
+	  Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A.
+
+config SH_APSH4AD0A
+	bool "AP-SH4AD-0A"
+	select SH_ALPHA_BOARD
+	select SYS_SUPPORTS_PCI
+	depends on CPU_SUBTYPE_SH7786
+	help
+	  Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A.
+
 endmenu
 
 source "arch/sh/boards/mach-r2d/Kconfig"
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index be7d11d..975a0f6 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -13,3 +13,5 @@
 obj-$(CONFIG_SH_POLARIS)	+= board-polaris.o
 obj-$(CONFIG_SH_TITAN)		+= board-titan.o
 obj-$(CONFIG_SH_SH7757LCR)	+= board-sh7757lcr.o
+obj-$(CONFIG_SH_APSH4A3A)	+= board-apsh4a3a.o
+obj-$(CONFIG_SH_APSH4AD0A)	+= board-apsh4ad0a.o
diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c
new file mode 100644
index 0000000..8e2a270
--- /dev/null
+++ b/arch/sh/boards/board-apsh4a3a.c
@@ -0,0 +1,175 @@
+/*
+ * ALPHAPROJECT AP-SH4A-3A Support.
+ *
+ * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
+ * Copyright (C) 2008  Yoshihiro Shimoda
+ * Copyright (C) 2009  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mtd/physmap.h>
+#include <linux/smsc911x.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+#include <asm/clock.h>
+
+static struct mtd_partition nor_flash_partitions[] = {
+	{
+		.name		= "loader",
+		.offset		= 0x00000000,
+		.size		= 512 * 1024,
+	},
+	{
+		.name		= "bootenv",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 512 * 1024,
+	},
+	{
+		.name		= "kernel",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 4 * 1024 * 1024,
+	},
+	{
+		.name		= "data",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= MTDPART_SIZ_FULL,
+	},
+};
+
+static struct physmap_flash_data nor_flash_data = {
+	.width		= 4,
+	.parts		= nor_flash_partitions,
+	.nr_parts	= ARRAY_SIZE(nor_flash_partitions),
+};
+
+static struct resource nor_flash_resources[] = {
+	[0]	= {
+		.start	= 0x00000000,
+		.end	= 0x01000000 - 1,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device nor_flash_device = {
+	.name		= "physmap-flash",
+	.dev		= {
+		.platform_data	= &nor_flash_data,
+	},
+	.num_resources	= ARRAY_SIZE(nor_flash_resources),
+	.resource	= nor_flash_resources,
+};
+
+static struct resource smsc911x_resources[] = {
+	[0] = {
+		.name		= "smsc911x-memory",
+		.start		= 0xA4000000,
+		.end		= 0xA4000000 + SZ_256 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.name		= "smsc911x-irq",
+		.start		= evt2irq(0x200),
+		.end		= evt2irq(0x200),
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.flags		= SMSC911X_USE_16BIT,
+	.phy_interface	= PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device smsc911x_device = {
+	.name		= "smsc911x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smsc911x_resources),
+	.resource	= smsc911x_resources,
+	.dev = {
+		.platform_data = &smsc911x_config,
+	},
+};
+
+static struct platform_device *apsh4a3a_devices[] __initdata = {
+	&nor_flash_device,
+	&smsc911x_device,
+};
+
+static int __init apsh4a3a_devices_setup(void)
+{
+	return platform_add_devices(apsh4a3a_devices,
+				    ARRAY_SIZE(apsh4a3a_devices));
+}
+device_initcall(apsh4a3a_devices_setup);
+
+static int apsh4a3a_clk_init(void)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(NULL, "extal");
+	if (!clk || IS_ERR(clk))
+		return PTR_ERR(clk);
+	ret = clk_set_rate(clk, 33333000);
+	clk_put(clk);
+
+	return ret;
+}
+
+/* Initialize the board */
+static void __init apsh4a3a_setup(char **cmdline_p)
+{
+	printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n");
+}
+
+static void __init apsh4a3a_init_irq(void)
+{
+	plat_irq_setup_pins(IRQ_MODE_IRQ7654);
+}
+
+/* Return the board specific boot mode pin configuration */
+static int apsh4a3a_mode_pins(void)
+{
+	int value = 0;
+
+	/* These are the factory default settings of SW1 and SW2.
+	 * If you change these dip switches then you will need to
+	 * adjust the values below as well.
+	 */
+	value &= ~MODE_PIN0;  /* Clock Mode 16 */
+	value &= ~MODE_PIN1;
+	value &= ~MODE_PIN2;
+	value &= ~MODE_PIN3;
+	value |=  MODE_PIN4;
+	value &= ~MODE_PIN5;  /* 16-bit Area0 bus width */
+	value |=  MODE_PIN6;  /* Area 0 SRAM interface */
+	value |=  MODE_PIN7;
+	value |=  MODE_PIN8;  /* Little Endian */
+	value |=  MODE_PIN9;  /* Master Mode */
+	value |=  MODE_PIN10; /* Crystal resonator */
+	value |=  MODE_PIN11; /* Display Unit */
+	value |=  MODE_PIN12;
+	value &= ~MODE_PIN13; /* 29-bit address mode */
+	value |=  MODE_PIN14; /* No PLL step-up */
+
+	return value;
+}
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_apsh4a3a __initmv = {
+	.mv_name		= "AP-SH4A-3A",
+	.mv_setup		= apsh4a3a_setup,
+	.mv_clk_init		= apsh4a3a_clk_init,
+	.mv_init_irq		= apsh4a3a_init_irq,
+	.mv_mode_pins		= apsh4a3a_mode_pins,
+};
diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c
new file mode 100644
index 0000000..e2bd218
--- /dev/null
+++ b/arch/sh/boards/board-apsh4ad0a.c
@@ -0,0 +1,125 @@
+/*
+ * ALPHAPROJECT AP-SH4AD-0A Support.
+ *
+ * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
+ * Copyright (C) 2010  Matt Fleming
+ * Copyright (C) 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/smsc911x.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+
+static struct resource smsc911x_resources[] = {
+	[0] = {
+		.name		= "smsc911x-memory",
+		.start		= 0xA4000000,
+		.end		= 0xA4000000 + SZ_256 - 1,
+		.flags		= IORESOURCE_MEM,
+	},
+	[1] = {
+		.name		= "smsc911x-irq",
+		.start		= evt2irq(0x200),
+		.end		= evt2irq(0x200),
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+	.flags		= SMSC911X_USE_16BIT,
+	.phy_interface	= PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device smsc911x_device = {
+	.name		= "smsc911x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smsc911x_resources),
+	.resource	= smsc911x_resources,
+	.dev = {
+		.platform_data = &smsc911x_config,
+	},
+};
+
+static struct platform_device *apsh4ad0a_devices[] __initdata = {
+	&smsc911x_device,
+};
+
+static int __init apsh4ad0a_devices_setup(void)
+{
+	return platform_add_devices(apsh4ad0a_devices,
+				    ARRAY_SIZE(apsh4ad0a_devices));
+}
+device_initcall(apsh4ad0a_devices_setup);
+
+static int apsh4ad0a_mode_pins(void)
+{
+	int value = 0;
+
+	/* These are the factory default settings of SW1 and SW2.
+	 * If you change these dip switches then you will need to
+	 * adjust the values below as well.
+	 */
+	value |=  MODE_PIN0;  /* Clock Mode 3 */
+	value |=  MODE_PIN1;
+	value &= ~MODE_PIN2;
+	value &= ~MODE_PIN3;
+	value &= ~MODE_PIN4;  /* 16-bit Area0 bus width  */
+	value |=  MODE_PIN5;
+	value |=  MODE_PIN6;
+	value |=  MODE_PIN7;  /* Normal mode */
+	value |=  MODE_PIN8;  /* Little Endian */
+	value |=  MODE_PIN9;  /* Crystal resonator */
+	value &= ~MODE_PIN10; /* 29-bit address mode */
+	value &= ~MODE_PIN11; /* PCI-E Root port */
+	value &= ~MODE_PIN12; /* 4 lane + 1 lane */
+	value |=  MODE_PIN13; /* AUD Enable */
+	value &= ~MODE_PIN14; /* Normal Operation */
+
+	return value;
+}
+
+static int apsh4ad0a_clk_init(void)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = clk_get(NULL, "extal");
+	if (!clk || IS_ERR(clk))
+		return PTR_ERR(clk);
+	ret = clk_set_rate(clk, 33333000);
+	clk_put(clk);
+
+	return ret;
+}
+
+/* Initialize the board */
+static void __init apsh4ad0a_setup(char **cmdline_p)
+{
+	pr_info("Alpha Project AP-SH4AD-0A support:\n");
+}
+
+static void __init apsh4ad0a_init_irq(void)
+{
+	plat_irq_setup_pins(IRQ_MODE_IRQ3210);
+}
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_apsh4ad0a __initmv = {
+	.mv_name		= "AP-SH4AD-0A",
+	.mv_setup		= apsh4ad0a_setup,
+	.mv_mode_pins		= apsh4ad0a_mode_pins,
+	.mv_clk_init		= apsh4ad0a_clk_init,
+	.mv_init_irq		= apsh4ad0a_init_irq,
+};
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
index 4cb3bb7..541d8a2 100644
--- a/arch/sh/boards/board-edosk7705.c
+++ b/arch/sh/boards/board-edosk7705.c
@@ -66,7 +66,7 @@
 	return platform_add_devices(edosk7705_devices,
 				    ARRAY_SIZE(edosk7705_devices));
 }
-__initcall(init_edosk7705_devices);
+device_initcall(init_edosk7705_devices);
 
 /*
  * The Machine Vector
diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c
index 35dc099..f47ac82 100644
--- a/arch/sh/boards/board-edosk7760.c
+++ b/arch/sh/boards/board-edosk7760.c
@@ -182,7 +182,7 @@
 	return platform_add_devices(edosk7760_devices,
 				    ARRAY_SIZE(edosk7760_devices));
 }
-__initcall(init_edosk7760_devices);
+device_initcall(init_edosk7760_devices);
 
 /*
  * The Machine Vector
diff --git a/arch/sh/boards/board-sh7785lcr.c b/arch/sh/boards/board-sh7785lcr.c
index fe7e686..ee65ff0 100644
--- a/arch/sh/boards/board-sh7785lcr.c
+++ b/arch/sh/boards/board-sh7785lcr.c
@@ -284,7 +284,7 @@
 	return platform_add_devices(sh7785lcr_devices,
 				    ARRAY_SIZE(sh7785lcr_devices));
 }
-__initcall(sh7785lcr_devices_setup);
+device_initcall(sh7785lcr_devices_setup);
 
 /* Initialize IRQ setting */
 void __init init_sh7785lcr_IRQ(void)
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 07ea908..3e5fc3b 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -14,6 +14,8 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/sh_flctl.h>
 #include <linux/delay.h>
@@ -430,11 +432,18 @@
 	},
 };
 
+static struct sh_mobile_sdhi_info sdhi0_cn3_data = {
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
+};
+
 static struct platform_device sdhi0_cn3_device = {
 	.name		= "sh_mobile_sdhi",
 	.id             = 0, /* "sdhi0" clock */
 	.num_resources	= ARRAY_SIZE(sdhi0_cn3_resources),
 	.resource	= sdhi0_cn3_resources,
+	.dev = {
+		.platform_data = &sdhi0_cn3_data,
+	},
 	.archdata = {
 		.hwblk_id = HWBLK_SDHI0,
 	},
@@ -453,11 +462,18 @@
 	},
 };
 
+static struct sh_mobile_sdhi_info sdhi1_cn7_data = {
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
+};
+
 static struct platform_device sdhi1_cn7_device = {
 	.name		= "sh_mobile_sdhi",
 	.id             = 1, /* "sdhi1" clock */
 	.num_resources	= ARRAY_SIZE(sdhi1_cn7_resources),
 	.resource	= sdhi1_cn7_resources,
+	.dev = {
+		.platform_data = &sdhi1_cn7_data,
+	},
 	.archdata = {
 		.hwblk_id = HWBLK_SDHI1,
 	},
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c
index 7e8216a..e89e8e1 100644
--- a/arch/sh/boards/mach-cayman/setup.c
+++ b/arch/sh/boards/mach-cayman/setup.c
@@ -165,7 +165,7 @@
 
 	return 0;
 }
-__initcall(smsc_superio_setup);
+device_initcall(smsc_superio_setup);
 
 static void __iomem *cayman_ioport_map(unsigned long port, unsigned int len)
 {
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index f48c492..33b6629 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -473,6 +473,7 @@
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
 	.set_pwr	= sdhi0_set_pwr,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
 };
 
 static struct resource sdhi0_resources[] = {
@@ -511,6 +512,7 @@
 static struct sh_mobile_sdhi_info sdhi1_info = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI1_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
 	.set_pwr	= sdhi1_set_pwr,
 };
 
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index 4499a37..adc9b4b 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -143,7 +143,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops hp6x0_pm_ops = {
+static const struct platform_suspend_ops hp6x0_pm_ops = {
 	.enter		= hp6x0_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 9b60eaa..7504daa 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -11,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/onenand.h>
@@ -366,6 +367,7 @@
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
 	.tmio_flags	= TMIO_MMC_WRPROTECT_DISABLE,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device kfr2r09_sh_sdhi0_device = {
diff --git a/arch/sh/boards/mach-landisk/irq.c b/arch/sh/boards/mach-landisk/irq.c
index e79412a..c00ace3 100644
--- a/arch/sh/boards/mach-landisk/irq.c
+++ b/arch/sh/boards/mach-landisk/irq.c
@@ -1,9 +1,10 @@
 /*
- * arch/sh/boards/landisk/irq.c
+ * arch/sh/boards/mach-landisk/irq.c
  *
  * I-O DATA Device, Inc. LANDISK Support
  *
  * Copyright (C) 2005-2007 kogiidena
+ * Copyright (C) 2011 Nobuhiro Iwamatsu
  *
  * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
  * Based largely on io_se.c.
@@ -12,44 +13,54 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  */
+
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <mach-landisk/mach/iodata_landisk.h>
 
-static void disable_landisk_irq(struct irq_data *data)
-{
-	unsigned char mask = 0xff ^ (0x01 << (data->irq - 5));
+enum {
+	UNUSED = 0,
 
-	__raw_writeb(__raw_readb(PA_IMASK) & mask, PA_IMASK);
-}
-
-static void enable_landisk_irq(struct irq_data *data)
-{
-	unsigned char value = (0x01 << (data->irq - 5));
-
-	__raw_writeb(__raw_readb(PA_IMASK) | value, PA_IMASK);
-}
-
-static struct irq_chip landisk_irq_chip __read_mostly = {
-	.name		= "LANDISK",
-	.irq_mask	= disable_landisk_irq,
-	.irq_unmask	= enable_landisk_irq,
+	PCI_INTA, /* PCI int A */
+	PCI_INTB, /* PCI int B */
+	PCI_INTC, /* PCI int C */
+	PCI_INTD, /* PCI int D */
+	ATA,	  /* ATA */
+	FATA,	  /* CF */
+	POWER,	  /* Power swtich */
+	BUTTON,	  /* Button swtich */
 };
 
+/* Vectors for LANDISK */
+static struct intc_vect vectors_landisk[] __initdata = {
+	INTC_IRQ(PCI_INTA, IRQ_PCIINTA),
+	INTC_IRQ(PCI_INTB, IRQ_PCIINTB),
+	INTC_IRQ(PCI_INTC, IRQ_PCIINTC),
+	INTC_IRQ(PCI_INTD, IRQ_PCIINTD),
+	INTC_IRQ(ATA, IRQ_ATA),
+	INTC_IRQ(FATA, IRQ_FATA),
+	INTC_IRQ(POWER, IRQ_POWER),
+	INTC_IRQ(BUTTON, IRQ_BUTTON),
+};
+
+/* IRLMSK mask register layout for LANDISK */
+static struct intc_mask_reg mask_registers_landisk[] __initdata = {
+	{ PA_IMASK, 0, 8, /* IRLMSK */
+	  {  BUTTON, POWER, FATA, ATA,
+	     PCI_INTD, PCI_INTC, PCI_INTB, PCI_INTA,
+	  }
+	},
+};
+
+static DECLARE_INTC_DESC(intc_desc_landisk, "landisk", vectors_landisk, NULL,
+			mask_registers_landisk, NULL, NULL);
 /*
  * Initialize IRQ setting
  */
 void __init init_landisk_IRQ(void)
 {
-	int i;
-
-	for (i = 5; i < 14; i++) {
-		disable_irq_nosync(i);
-		set_irq_chip_and_handler_name(i, &landisk_irq_chip,
-					      handle_level_irq, "level");
-		enable_landisk_irq(irq_get_irq_data(i));
-	}
+	register_intc_controller(&intc_desc_landisk);
 	__raw_writeb(0x00, PA_PWRINT_CLR);
 }
diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c
index 50337acc..94186cf 100644
--- a/arch/sh/boards/mach-landisk/setup.c
+++ b/arch/sh/boards/mach-landisk/setup.c
@@ -21,8 +21,6 @@
 #include <mach-landisk/mach/iodata_landisk.h>
 #include <asm/io.h>
 
-void init_landisk_IRQ(void);
-
 static void landisk_power_off(void)
 {
         __raw_writeb(0x01, PA_SHUTDOWN);
@@ -83,7 +81,7 @@
 				    ARRAY_SIZE(landisk_devices));
 }
 
-__initcall(landisk_devices_setup);
+device_initcall(landisk_devices_setup);
 
 static void __init landisk_setup(char **cmdline_p)
 {
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index c8acfec..03a7ffe 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -13,6 +13,7 @@
 #include <linux/input.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/nand.h>
 #include <linux/i2c.h>
@@ -410,6 +411,7 @@
 static struct sh_mobile_sdhi_info sh7724_sdhi_data = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device sdhi_cn9_device = {
diff --git a/arch/sh/boards/mach-r2d/setup.c b/arch/sh/boards/mach-r2d/setup.c
index b84df6a..4b98a52 100644
--- a/arch/sh/boards/mach-r2d/setup.c
+++ b/arch/sh/boards/mach-r2d/setup.c
@@ -258,7 +258,7 @@
 	return platform_add_devices(rts7751r2d_devices,
 				    ARRAY_SIZE(rts7751r2d_devices));
 }
-__initcall(rts7751r2d_devices_setup);
+device_initcall(rts7751r2d_devices_setup);
 
 static void rts7751r2d_power_off(void)
 {
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index 75e4ddb..1521aa7 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -15,13 +15,13 @@
 #include <linux/i2c.h>
 #include <linux/irq.h>
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <mach/fpga.h>
 #include <mach/irq.h>
 #include <asm/machvec.h>
 #include <asm/heartbeat.h>
 #include <asm/sizes.h>
 #include <asm/clock.h>
-#include <asm/clkdev.h>
 #include <asm/reboot.h>
 #include <asm/smp-ops.h>
 
@@ -135,7 +135,7 @@
 
 	return sdk7786_i2c_setup();
 }
-__initcall(sdk7786_devices_setup);
+device_initcall(sdk7786_devices_setup);
 
 static int sdk7786_mode_pins(void)
 {
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 33039e0..8ab8330 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -77,7 +77,7 @@
 {
 	return platform_add_devices(se7206_devices, ARRAY_SIZE(se7206_devices));
 }
-__initcall(se7206_devices_setup);
+device_initcall(se7206_devices_setup);
 
 static int se7206_mode_pins(void)
 {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 527a0cd..5276793 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -15,6 +15,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/host.h>
 #include <linux/mtd/physmap.h>
 #include <linux/delay.h>
 #include <linux/smc91x.h>
@@ -318,6 +319,10 @@
 	},
 };
 
+static struct platform_device fsi_ak4642_device = {
+	.name		= "sh_fsi_a_ak4642",
+};
+
 /* KEYSC in SoC (Needs SW33-2 set to ON) */
 static struct sh_keysc_info keysc_info = {
 	.mode = SH_KEYSC_MODE_1,
@@ -467,6 +472,7 @@
 static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI0_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI0_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device sdhi0_cn7_device = {
@@ -498,6 +504,7 @@
 static struct sh_mobile_sdhi_info sh7724_sdhi1_data = {
 	.dma_slave_tx	= SHDMA_SLAVE_SDHI1_TX,
 	.dma_slave_rx	= SHDMA_SLAVE_SDHI1_RX,
+	.tmio_caps      = MMC_CAP_SDIO_IRQ,
 };
 
 static struct platform_device sdhi1_cn8_device = {
@@ -590,6 +597,7 @@
 	&sh7724_usb0_host_device,
 	&sh7724_usb1_gadget_device,
 	&fsi_device,
+	&fsi_ak4642_device,
 	&sdhi0_cn7_device,
 	&sdhi1_cn8_device,
 	&irda_device,
diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c
index 9fbc51b..4ed60c5 100644
--- a/arch/sh/boards/mach-se/7751/setup.c
+++ b/arch/sh/boards/mach-se/7751/setup.c
@@ -48,7 +48,7 @@
 {
 	return platform_add_devices(se7751_devices, ARRAY_SIZE(se7751_devices));
 }
-__initcall(se7751_devices_setup);
+device_initcall(se7751_devices_setup);
 
 /*
  * The Machine Vector
diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c
index 1b20099..f83ac79 100644
--- a/arch/sh/boards/mach-sh03/rtc.c
+++ b/arch/sh/boards/mach-sh03/rtc.c
@@ -108,7 +108,7 @@
 		__raw_writeb(real_minutes % 10, RTC_MIN1);
 		__raw_writeb(real_minutes / 10, RTC_MIN10);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c
index af4a0c0..d4f79b2 100644
--- a/arch/sh/boards/mach-sh03/setup.c
+++ b/arch/sh/boards/mach-sh03/setup.c
@@ -96,7 +96,7 @@
 
 	return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices));
 }
-__initcall(sh03_devices_setup);
+device_initcall(sh03_devices_setup);
 
 static struct sh_machine_vector mv_sh03 __initmv = {
 	.mv_name		= "Interface (CTP/PCI-SH03)",
diff --git a/arch/sh/boot/romimage/mmcif-sh7724.c b/arch/sh/boot/romimage/mmcif-sh7724.c
index 14863d7..c84e783 100644
--- a/arch/sh/boot/romimage/mmcif-sh7724.c
+++ b/arch/sh/boot/romimage/mmcif-sh7724.c
@@ -21,9 +21,6 @@
 #define HIZCRC		0xa405015c
 #define DRVCRA		0xa405018a
 
-enum { MMCIF_PROGRESS_ENTER, MMCIF_PROGRESS_INIT,
-       MMCIF_PROGRESS_LOAD, MMCIF_PROGRESS_DONE };
-
 /* SH7724 specific MMCIF loader
  *
  * loads the romImage from an MMC card starting from block 512
@@ -63,7 +60,9 @@
 	mmcif_update_progress(MMCIF_PROGRESS_LOAD);
 
 	/* load kernel via MMCIF interface */
-	sh_mmcif_boot_slurp(MMCIF_BASE, buf, no_bytes);
+	sh_mmcif_boot_do_read(MMCIF_BASE, 512,
+	                      (no_bytes + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS,
+			      buf);
 
 	/* disable clock to the MMCIF hardware block */
 	__raw_writel(__raw_readl(MSTPCR2) | 0x20000000, MSTPCR2);
diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig
new file mode 100644
index 0000000..6cb3279
--- /dev/null
+++ b/arch/sh/configs/apsh4a3a_defconfig
@@ -0,0 +1,102 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CPU_SUBTYPE_SH7785=y
+CONFIG_MEMORY_START=0x0C000000
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_SH_STORE_QUEUES=y
+CONFIG_SH_APSH4A3A=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_KEXEC=y
+CONFIG_PREEMPT=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=6
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_SH7785FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_CIFS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_FTRACE is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
new file mode 100644
index 0000000..e71a531
--- /dev/null
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -0,0 +1,133 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_RCU_TRACE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_BLK_CGROUP=y
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_CPU_SUBTYPE_SH7786=y
+CONFIG_MEMORY_SIZE=0x10000000
+CONFIG_HUGETLB_PAGE_SIZE_1MB=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_SH_STORE_QUEUES=y
+CONFIG_SH_APSH4AD0A=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_SH_CPU_FREQ=y
+CONFIG_KEXEC=y
+CONFIG_SECCOMP=y
+CONFIG_PREEMPT=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+CONFIG_PM_VERBOSE=y
+CONFIG_PM_RUNTIME=y
+CONFIG_CPU_IDLE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CFI=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=6
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_SH7785FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V4=y
+CONFIG_CIFS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DWARF_UNWINDER=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig
index 273f3fa..5f7f667 100644
--- a/arch/sh/configs/sh7757lcr_defconfig
+++ b/arch/sh/configs/sh7757lcr_defconfig
@@ -39,21 +39,15 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 # CONFIG_FW_LOADER is not set
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=y
 CONFIG_VITESSE_PHY=y
-CONFIG_MDIO_BITBANG=y
 CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
+CONFIG_SH_ETH=y
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_WLAN is not set
 # CONFIG_KEYBOARD_ATKBD is not set
 # CONFIG_MOUSE_PS2 is not set
 # CONFIG_SERIO is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=3
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
@@ -63,7 +57,6 @@
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_INOTIFY=y
 CONFIG_ISO9660_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_PROC_KCORE=y
@@ -76,10 +69,8 @@
 CONFIG_NLS_CODEPAGE_932=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_FTRACE is not set
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/sh/drivers/pci/fixups-landisk.c b/arch/sh/drivers/pci/fixups-landisk.c
index bb1a6bb..95c6e2d 100644
--- a/arch/sh/drivers/pci/fixups-landisk.c
+++ b/arch/sh/drivers/pci/fixups-landisk.c
@@ -1,9 +1,10 @@
 /*
- * arch/sh/drivers/pci/ops-landisk.c
+ * arch/sh/drivers/pci/fixups-landisk.c
  *
  * PCI initialization for the I-O DATA Device, Inc. LANDISK board
  *
  * Copyright (C) 2006 kogiidena
+ * Copyright (C) 2010 Nobuhiro Iwamatsu
  *
  * May be copied or modified under the terms of the GNU General Public
  * License.  See linux/COPYING for more information.
@@ -15,6 +16,9 @@
 #include <linux/pci.h>
 #include "pci-sh4.h"
 
+#define PCIMCR_MRSET_OFF	0xBFFFFFFF
+#define PCIMCR_RFSH_OFF		0xFFFFFFFB
+
 int pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
 {
 	/*
@@ -26,9 +30,29 @@
 	int irq = ((slot + pin - 1) & 0x3) + 5;
 
 	if ((slot | (pin - 1)) > 0x3) {
-		printk("PCI: Bad IRQ mapping request for slot %d pin %c\n",
+		printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n",
 		       slot, pin - 1 + 'A');
 		return -1;
 	}
 	return irq;
 }
+
+int pci_fixup_pcic(struct pci_channel *chan)
+{
+	unsigned long bcr1, mcr;
+
+	bcr1 = __raw_readl(SH7751_BCR1);
+	bcr1 |= 0x40080000;	/* Enable Bit 19 BREQEN, set PCIC to slave */
+	pci_write_reg(chan, bcr1, SH4_PCIBCR1);
+
+	mcr = __raw_readl(SH7751_MCR);
+	mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
+	pci_write_reg(chan, mcr, SH4_PCIMCR);
+
+	pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5);
+	pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6);
+	pci_write_reg(chan, 0x0c000000, SH4_PCILAR0);
+	pci_write_reg(chan, 0x00000000, SH4_PCILAR1);
+
+	return 0;
+}
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index 7b42c24..afc2455 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -107,7 +107,7 @@
 		device_remove_file(&pdev->dev, &dev_attr_switch);
 
 	platform_set_drvdata(pdev, NULL);
-	flush_scheduled_work();
+	flush_work_sync(&psw->work);
 	del_timer_sync(&psw->debounce);
 	free_irq(irq, pdev);
 
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 89ab2c5..28c5aa5 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -11,11 +11,6 @@
  *
  * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
  * automatically, there are also __raw versions, which do not.
- *
- * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
- * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
- * these have the same semantics as the __raw variants, and as such, all
- * new code should be using the __raw versions.
  */
 #include <linux/errno.h>
 #include <asm/cache.h>
@@ -231,52 +226,6 @@
 
 #endif
 
-/*
- * Legacy SuperH on-chip I/O functions
- *
- * These are all deprecated, all new (and especially cross-platform) code
- * should be using the __raw_xxx() routines directly.
- */
-static inline u8 __deprecated ctrl_inb(unsigned long addr)
-{
-	return __raw_readb(addr);
-}
-
-static inline u16 __deprecated ctrl_inw(unsigned long addr)
-{
-	return __raw_readw(addr);
-}
-
-static inline u32 __deprecated ctrl_inl(unsigned long addr)
-{
-	return __raw_readl(addr);
-}
-
-static inline u64 __deprecated ctrl_inq(unsigned long addr)
-{
-	return __raw_readq(addr);
-}
-
-static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
-{
-	__raw_writeb(v, addr);
-}
-
-static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
-{
-	__raw_writew(v, addr);
-}
-
-static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
-{
-	__raw_writel(v, addr);
-}
-
-static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
-{
-	__raw_writeq(v, addr);
-}
-
 #define IO_SPACE_LIMIT 0xffffffff
 
 /* synco on SH-4A, otherwise a nop */
@@ -341,7 +290,15 @@
 	 * mapping must be done by the PMB or by using page tables.
 	 */
 	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
+		u64 flags = pgprot_val(prot);
+
+		/*
+		 * Anything using the legacy PTEA space attributes needs
+		 * to be kicked down to page table mappings.
+		 */
+		if (unlikely(flags & _PAGE_PCC_MASK))
+			return NULL;
+		if (unlikely(flags & _PAGE_CACHABLE))
 			return (void __iomem *)P1SEGADDR(offset);
 
 		return (void __iomem *)P2SEGADDR(offset);
diff --git a/arch/sh/include/asm/ioctls.h b/arch/sh/include/asm/ioctls.h
index eb6c4c6..84e85a7 100644
--- a/arch/sh/include/asm/ioctls.h
+++ b/arch/sh/include/asm/ioctls.h
@@ -85,6 +85,7 @@
 #define TCSETSF2	_IOW('T', 45, struct termios2)
 #define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
 #define TIOCSIG		_IOW('T',0x36, int)  /* Generate signal on Pty slave */
 
 #define TIOCSERCONFIG	_IO('T', 83) /* 0x5453 */
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index dd5d6e5..57c5c3d 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -31,6 +31,7 @@
 	int (*mv_mode_pins)(void);
 
 	void (*mv_mem_init)(void);
+	void (*mv_mem_reserve)(void);
 };
 
 extern struct sh_machine_vector sh_mv;
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 43528ec..b799fe7 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -76,6 +76,10 @@
 /* Wrapper for extended mode pgprot twiddling */
 #define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
 
+#ifdef CONFIG_X2TLB
+#define _PAGE_PCC_MASK	0x00000000	/* No legacy PTEA support */
+#else
+
 /* software: moves to PTEA.TC (Timing Control) */
 #define _PAGE_PCC_AREA5	0x00000000	/* use BSC registers for area5 */
 #define _PAGE_PCC_AREA6	0x80000000	/* use BSC registers for area6 */
@@ -89,7 +93,8 @@
 #define _PAGE_PCC_ATR8	0x60000000	/* Attribute Memory space, 8 bit bus */
 #define _PAGE_PCC_ATR16	0x60000001	/* Attribute Memory space, 6 bit bus */
 
-#ifndef CONFIG_X2TLB
+#define _PAGE_PCC_MASK	0xe0000001
+
 /* copy the ptea attributes */
 static inline unsigned long copy_ptea_attributes(unsigned long x)
 {
@@ -231,13 +236,7 @@
 					   _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_PCC(slot, type) \
-			__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
-				 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
-				 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-					   _PAGE_EXT_KERN_WRITE | \
-					   _PAGE_EXT_KERN_EXEC) \
-				 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
-				 (type))
+			__pgprot(0)
 
 #elif defined(CONFIG_MMU) /* SH-X TLB */
 #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index c9e7cbc..9c7bdfc 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -35,7 +35,7 @@
 	CPU_SH7723, CPU_SH7724, CPU_SH7757, CPU_SHX3,
 
 	/* SH4AL-DSP types */
-	CPU_SH7343, CPU_SH7722, CPU_SH7366,
+	CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
 
 	/* SH-5 types */
         CPU_SH5_101, CPU_SH5_103,
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index e3c73cd..900f8d7 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -194,15 +194,17 @@
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
 
 #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
+
 #define PREFETCH_STRIDE		L1_CACHE_BYTES
 #define ARCH_HAS_PREFETCH
 #define ARCH_HAS_PREFETCHW
-static inline void prefetch(void *x)
+
+static inline void prefetch(const void *x)
 {
 	__builtin_prefetch(x, 0, 3);
 }
 
-static inline void prefetchw(void *x)
+static inline void prefetchw(const void *x)
 {
 	__builtin_prefetch(x, 1, 3);
 }
diff --git a/arch/sh/include/mach-common/mach/romimage.h b/arch/sh/include/mach-common/mach/romimage.h
index 08fb422..3670455 100644
--- a/arch/sh/include/mach-common/mach/romimage.h
+++ b/arch/sh/include/mach-common/mach/romimage.h
@@ -4,7 +4,7 @@
 
 #else /* __ASSEMBLY__ */
 
-extern inline void mmcif_update_progress(int nr)
+static inline void mmcif_update_progress(int nr)
 {
 }
 
diff --git a/arch/sh/include/mach-ecovec24/mach/romimage.h b/arch/sh/include/mach-ecovec24/mach/romimage.h
index 1dcf5e6..d63ef51 100644
--- a/arch/sh/include/mach-ecovec24/mach/romimage.h
+++ b/arch/sh/include/mach-ecovec24/mach/romimage.h
@@ -35,7 +35,7 @@
 #define HIZCRA		0xa4050158
 #define PGDR		0xa405012c
 
-extern inline void mmcif_update_progress(int nr)
+static inline void mmcif_update_progress(int nr)
 {
 	/* disable Hi-Z for LED pins */
 	__raw_writew(__raw_readw(HIZCRA) & ~(1 << 1), HIZCRA);
diff --git a/arch/sh/include/mach-kfr2r09/mach/romimage.h b/arch/sh/include/mach-kfr2r09/mach/romimage.h
index 976256a..7a88316 100644
--- a/arch/sh/include/mach-kfr2r09/mach/romimage.h
+++ b/arch/sh/include/mach-kfr2r09/mach/romimage.h
@@ -23,7 +23,7 @@
 
 #else /* __ASSEMBLY__ */
 
-extern inline void mmcif_update_progress(int nr)
+static inline void mmcif_update_progress(int nr)
 {
 }
 
diff --git a/arch/sh/include/mach-landisk/mach/iodata_landisk.h b/arch/sh/include/mach-landisk/mach/iodata_landisk.h
index 6fb04ab..f432773 100644
--- a/arch/sh/include/mach-landisk/mach/iodata_landisk.h
+++ b/arch/sh/include/mach-landisk/mach/iodata_landisk.h
@@ -2,7 +2,7 @@
 #define __ASM_SH_IODATA_LANDISK_H
 
 /*
- * linux/include/asm-sh/landisk/iodata_landisk.h
+ * arch/sh/include/mach-landisk/mach/iodata_landisk.h
  *
  * Copyright (C) 2000  Atom Create Engineering Co., Ltd.
  *
@@ -27,7 +27,7 @@
 
 #define IRQ_PCIINTA	5		/* PCI INTA IRQ */
 #define IRQ_PCIINTB	6		/* PCI INTB IRQ */
-#define IRQ_PCIINDC	7		/* PCI INTC IRQ */
+#define IRQ_PCIINTC	7		/* PCI INTC IRQ */
 #define IRQ_PCIINTD	8		/* PCI INTD IRQ */
 #define IRQ_ATA		9		/* ATA IRQ */
 #define IRQ_FATA	10		/* FATA IRQ */
@@ -35,6 +35,8 @@
 #define IRQ_BUTTON	12		/* USL-5P Button IRQ */
 #define IRQ_FAULT	13		/* USL-5P Fault  IRQ */
 
+void init_landisk_IRQ(void);
+
 #define __IO_PREFIX landisk
 #include <asm/io_generic.h>
 
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
index e80a936..f47be87 100644
--- a/arch/sh/kernel/cpu/proc.c
+++ b/arch/sh/kernel/cpu/proc.c
@@ -25,7 +25,7 @@
 	[CPU_SH5_101]	= "SH5-101",	[CPU_SH5_103]	= "SH5-103",
 	[CPU_MXG]	= "MX-G",	[CPU_SH7723]	= "SH7723",
 	[CPU_SH7366]	= "SH7366",	[CPU_SH7724]	= "SH7724",
-	[CPU_SH_NONE]	= "Unknown"
+	[CPU_SH7372]	= "SH7372",	[CPU_SH_NONE]	= "Unknown"
 };
 
 const char *get_cpu_subtype(struct sh_cpuinfo *c)
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index c363851..0f8befc 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -62,6 +62,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xf8400000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 88, 88, 88, 88 },
 };
@@ -77,6 +79,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xf8410000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 92, 92, 92, 92 },
 };
@@ -92,6 +96,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xf8420000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 96, 96, 96, 96 },
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index 6c96ea0..949bf2b 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -201,6 +201,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xff804000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 220, 220, 220, 220 },
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index d08bf4c..9df558d 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -180,6 +180,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffe8000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 180, 180, 180, 180 }
 };
@@ -195,6 +197,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xfffe8800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 184, 184, 184, 184 }
 };
@@ -210,6 +214,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfffe9000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 188, 188, 188, 188 }
 };
@@ -225,6 +231,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfffe9800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 192, 192, 192, 192 }
 };
@@ -240,6 +248,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xfffea000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 196, 196, 196, 196 }
 };
@@ -255,6 +265,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xfffea800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 200, 200, 200, 200 }
 };
@@ -270,6 +282,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xfffeb000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 204, 204, 204, 204 }
 };
@@ -285,6 +299,8 @@
 static struct plat_sci_port scif7_platform_data = {
 	.mapbase	= 0xfffeb800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 208, 208, 208, 208 }
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index 832f401..a43124e 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -176,6 +176,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffe8000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 192, 192, 192, 192 },
 };
@@ -191,6 +193,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xfffe8800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 196, 196, 196, 196 },
 };
@@ -206,6 +210,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfffe9000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 200, 200, 200, 200 },
 };
@@ -221,6 +227,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfffe9800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		=  { 204, 204, 204, 204 },
 };
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index dc47b04..5d14f84 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -136,6 +136,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffe8000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 240, 240, 240, 240 },
 };
@@ -151,6 +153,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xfffe8800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 244, 244, 244, 244 },
 };
@@ -166,6 +170,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfffe9000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 248, 248, 248, 248 },
 };
@@ -181,6 +187,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfffe9800,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 252, 252, 252, 252 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index baadd7f..cd2e702 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -70,6 +70,9 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xa4410000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TIE | SCSCR_RIE  | SCSCR_TE |
+			  SCSCR_RE  | SCSCR_CKE1 | SCSCR_CKE0,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { 56, 56, 56 },
 };
@@ -85,6 +88,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4400000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 52, 52 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 3cf8c8e..4551ad6 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -109,6 +109,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfffffe80,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCI,
 	.irqs		= { 23, 23, 23, 0 },
 };
@@ -126,6 +128,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4000150,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 56, 56, 56, 56 },
 };
@@ -143,6 +147,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xa4000140,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_IRDA,
 	.irqs		= { 52, 52, 52, 52 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index b0c2fb4..78f6b01 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -99,6 +99,9 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xa4400000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+			  SCSCR_CKE1 | SCSCR_CKE0,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 52, 52, 52 },
 };
@@ -114,6 +117,9 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4410000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+			  SCSCR_CKE1 | SCSCR_CKE0,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs           = { 56, 56, 56, 56 },
 };
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 24b1713..365b94a 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -1,5 +1,5 @@
 /*
- * SH7720 Setup
+ * Setup code for SH7720, SH7721.
  *
  *  Copyright (C) 2007  Markus Brunner, Mark Jonas
  *  Copyright (C) 2009  Paul Mundt
@@ -51,6 +51,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xa4430000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs		= { 80, 80, 80, 80 },
 };
@@ -66,6 +68,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xa4438000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_4,
 	.type		= PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index b93458f..971cf0f 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -151,8 +151,14 @@
 			boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
 			break;
 		case 0x10:
+		case 0x11:
 			boot_cpu_data.type = CPU_SH7757;
 			break;
+		case 0xd0:
+		case 0x40: /* yon-ten-go */
+			boot_cpu_data.type = CPU_SH7372;
+			break;
+
 		}
 		break;
 	case 0x4000:	/* 1st cut */
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index e916b18..5b28331 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -18,6 +18,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 41, 43, 42 },
 };
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 911d196..c2b0aaa 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/sh_timer.h>
 #include <linux/serial_sci.h>
+#include <asm/machtypes.h>
 
 static struct resource rtc_resources[] = {
 	[0] = {
@@ -35,33 +36,37 @@
 	.resource	= rtc_resources,
 };
 
-static struct plat_sci_port scif0_platform_data = {
+static struct plat_sci_port sci_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCI,
 	.irqs		= { 23, 23, 23, 0 },
 };
 
-static struct platform_device scif0_device = {
+static struct platform_device sci_device = {
 	.name		= "sh-sci",
 	.id		= 0,
 	.dev		= {
-		.platform_data	= &scif0_platform_data,
+		.platform_data	= &sci_platform_data,
 	},
 };
 
-static struct plat_sci_port scif1_platform_data = {
+static struct plat_sci_port scif_platform_data = {
 	.mapbase	= 0xffe80000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_TE | SCSCR_RE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
 
-static struct platform_device scif1_device = {
+static struct platform_device scif_device = {
 	.name		= "sh-sci",
 	.id		= 1,
 	.dev		= {
-		.platform_data	= &scif1_platform_data,
+		.platform_data	= &scif_platform_data,
 	},
 };
 
@@ -210,8 +215,6 @@
 #endif
 
 static struct platform_device *sh7750_devices[] __initdata = {
-	&scif0_device,
-	&scif1_device,
 	&rtc_device,
 	&tmu0_device,
 	&tmu1_device,
@@ -226,14 +229,19 @@
 
 static int __init sh7750_devices_setup(void)
 {
+	if (mach_is_rts7751r2d()) {
+		platform_register_device(&scif_device);
+	} else {
+		platform_register_device(&sci_device);
+		platform_register_device(&scif_device);
+	}
+
 	return platform_add_devices(sh7750_devices,
 				    ARRAY_SIZE(sh7750_devices));
 }
 arch_initcall(sh7750_devices_setup);
 
 static struct platform_device *sh7750_early_devices[] __initdata = {
-	&scif0_device,
-	&scif1_device,
 	&tmu0_device,
 	&tmu1_device,
 	&tmu2_device,
@@ -247,6 +255,14 @@
 
 void __init plat_early_device_setup(void)
 {
+	if (mach_is_rts7751r2d()) {
+		scif_platform_data.scscr |= SCSCR_CKE1;
+		early_platform_add_devices(&scif_device, 1);
+	} else {
+		early_platform_add_devices(&sci_device, 1);
+		early_platform_add_devices(&scif_device, 1);
+	}
+
 	early_platform_add_devices(sh7750_early_devices,
 				   ARRAY_SIZE(sh7750_early_devices));
 }
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 48ea8fe..78bbf23 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -129,6 +129,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xfe600000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 53, 55, 54 },
 };
@@ -145,6 +147,8 @@
 	.mapbase	= 0xfe610000,
 	.flags		= UPF_BOOT_AUTOCONF,
 	.type		= PORT_SCIF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.irqs		= { 72, 73, 75, 74 },
 };
 
@@ -159,6 +163,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfe620000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 77, 79, 78 },
 };
@@ -174,6 +180,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfe480000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCI,
 	.irqs		= { 80, 81, 82, 0 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 3681cafd..1b88483 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -19,6 +19,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -34,6 +36,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -49,6 +53,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -64,6 +70,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase        = 0xffe30000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 83, 83, 83, 83 },
 };
@@ -360,6 +368,8 @@
 
 enum {
 	UNUSED = 0,
+	ENABLED,
+	DISABLED,
 
 	/* interrupt sources */
 	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -375,15 +385,13 @@
 	I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
 	I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
 	SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI,
-	IRDA,
-	SDHI0, SDHI1, SDHI2, SDHI3,
-	CMT, TSIF, SIU,
+	IRDA, SDHI, CMT, TSIF, SIU,
 	TMU0, TMU1, TMU2,
 	JPU, LCDC,
 
 	/* interrupt groups */
 
-	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, SDHI, USB,
+	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB,
 };
 
 static struct intc_vect vectors[] __initdata = {
@@ -412,8 +420,8 @@
 	INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
 	INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20),
 	INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60),
-	INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
-	INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+	INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+	INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
 	INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
 	INTC_VECT(SIU, 0xf80),
 	INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -431,7 +439,6 @@
 	INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
 	INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
 	INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI),
-	INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
 	INTC_GROUP(USB, USBI0, USBI1),
 };
 
@@ -452,7 +459,7 @@
 	  { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+	  { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USBI1, USBI0 } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -488,9 +495,13 @@
 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
 };
 
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7343", vectors, groups,
-			     mask_registers, prio_registers, sense_registers,
-			     ack_registers);
+static struct intc_desc intc_desc __initdata = {
+	.name = "sh7343",
+	.force_enable = ENABLED,
+	.force_disable = DISABLED,
+	.hw = INTC_HW_DESC(vectors, groups, mask_registers,
+			   prio_registers, sense_registers, ack_registers),
+};
 
 void __init plat_irq_setup(void)
 {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 8dab9e1..82616af 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -21,6 +21,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 80, 80, 80, 80 },
 };
@@ -319,6 +321,8 @@
 
 enum {
 	UNUSED=0,
+	ENABLED,
+	DISABLED,
 
 	/* interrupt sources */
 	IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -332,14 +336,13 @@
 	DENC, MSIOF,
 	FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
 	I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
-	SDHI0, SDHI1, SDHI2, SDHI3,
-	CMT, TSIF, SIU,
+	SDHI, CMT, TSIF, SIU,
 	TMU0, TMU1, TMU2,
 	VEU2, LCDC,
 
 	/* interrupt groups */
 
-	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI,
+	DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C,
 };
 
 static struct intc_vect vectors[] __initdata = {
@@ -364,8 +367,8 @@
 	INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
 	INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
 	INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
-	INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
-	INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+	INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+	INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
 	INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
 	INTC_VECT(SIU, 0xf80),
 	INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -381,7 +384,6 @@
 	INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
 		   FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
 	INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
-	INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
 };
 
 static struct intc_mask_reg mask_registers[] __initdata = {
@@ -403,7 +405,7 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+	  { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB, } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -441,9 +443,13 @@
 	  { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
 };
 
-static DECLARE_INTC_DESC_ACK(intc_desc, "sh7366", vectors, groups,
-			     mask_registers, prio_registers, sense_registers,
-			     ack_registers);
+static struct intc_desc intc_desc __initdata = {
+	.name = "sh7366",
+	.force_enable = ENABLED,
+	.force_disable = DISABLED,
+	.hw = INTC_HW_DESC(vectors, groups, mask_registers,
+			   prio_registers, sense_registers, ack_registers),
+};
 
 void __init plat_irq_setup(void)
 {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index d551ed8..5813d80 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -181,6 +181,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -196,6 +198,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -211,6 +215,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -699,7 +705,7 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
+	  { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
 	{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 0eadefd..0723822 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -24,6 +24,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -39,6 +41,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -54,6 +58,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -69,6 +75,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase        = 0xa4e30000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 56, 56, 56, 56 },
 };
@@ -84,6 +92,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase        = 0xa4e40000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 88, 88, 88, 88 },
 };
@@ -99,6 +109,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase        = 0xa4e50000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 109, 109, 109, 109 },
 };
@@ -719,7 +731,7 @@
 static struct intc_mask_reg mask_registers[] __initdata = {
 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
 	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
-	    0, DISABLED, ENABLED, ENABLED } },
+	    0, ENABLED, ENABLED, ENABLED } },
 	{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
 	  { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
 	{ 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
@@ -736,7 +748,7 @@
 	  { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
 	    FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { 0, DISABLED, ENABLED, ENABLED,
+	  { 0, ENABLED, ENABLED, ENABLED,
 	    0, 0, SCIFA_SCIFA2, SIU_SIUI } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 828c965..0333fe9 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -257,6 +257,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase        = 0xffe00000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 80, 80, 80, 80 },
 };
@@ -272,6 +274,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase        = 0xffe10000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 81, 81, 81, 81 },
 };
@@ -287,6 +291,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase        = 0xffe20000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type           = PORT_SCIF,
 	.irqs           = { 82, 82, 82, 82 },
 };
@@ -302,6 +308,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase        = 0xa4e30000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 56, 56, 56, 56 },
 };
@@ -317,6 +325,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase        = 0xa4e40000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 88, 88, 88, 88 },
 };
@@ -332,6 +342,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase        = 0xa4e50000,
 	.flags          = UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE,
+	.scbrr_algo_id	= SCBRR_ALGO_3,
 	.type           = PORT_SCIFA,
 	.irqs           = { 109, 109, 109, 109 },
 };
@@ -1144,7 +1156,7 @@
 static struct intc_mask_reg mask_registers[] __initdata = {
 	{ 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
 	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
-	    0, DISABLED, ENABLED, ENABLED } },
+	    0, ENABLED, ENABLED, ENABLED } },
 	{ 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
 	  { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
 	    DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
@@ -1166,7 +1178,7 @@
 	  { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
 	    I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
 	{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
-	  { DISABLED, DISABLED, ENABLED, ENABLED,
+	  { DISABLED, ENABLED, ENABLED, ENABLED,
 	    0, 0, SCIFA5, FSI } },
 	{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
 	  { 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 749c638..9c1de26 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -20,6 +20,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xfe4b0000,		/* SCIF2 */
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -35,6 +37,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xfe4c0000,		/* SCIF3 */
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 76, 76, 76 },
 };
@@ -50,6 +54,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xfe4d0000,		/* SCIF4 */
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 104, 104, 104, 104 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 5b5f6b0..593eca6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -19,6 +19,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -34,6 +36,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffe08000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 76, 76, 76 },
 };
@@ -49,6 +53,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffe10000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 104, 104, 104, 104 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 7270d7f..2c6aa22 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -17,6 +17,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xff923000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 61, 61, 61, 61 },
 };
@@ -32,6 +34,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xff924000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 62, 62, 62, 62 },
 };
@@ -47,6 +51,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xff925000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 63, 63, 63, 63 },
 };
@@ -62,6 +68,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xff926000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 64, 64, 64, 64 },
 };
@@ -77,6 +85,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xff927000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 65, 65, 65, 65 },
 };
@@ -92,6 +102,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xff928000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 66, 66, 66, 66 },
 };
@@ -107,6 +119,8 @@
 static struct plat_sci_port scif6_platform_data = {
 	.mapbase	= 0xff929000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 67, 67, 67, 67 },
 };
@@ -122,6 +136,8 @@
 static struct plat_sci_port scif7_platform_data = {
 	.mapbase	= 0xff92a000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 68, 68, 68, 68 },
 };
@@ -137,6 +153,8 @@
 static struct plat_sci_port scif8_platform_data = {
 	.mapbase	= 0xff92b000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 69, 69, 69, 69 },
 };
@@ -152,6 +170,8 @@
 static struct plat_sci_port scif9_platform_data = {
 	.mapbase	= 0xff92c000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 70, 70, 70, 70 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 0f41486..08add7f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -20,6 +20,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffe00000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -35,6 +37,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffe10000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 76, 76, 76, 76 },
 };
@@ -379,6 +383,7 @@
 				    ARRAY_SIZE(sh7780_devices));
 }
 arch_initcall(sh7780_devices_setup);
+
 static struct platform_device *sh7780_early_devices[] __initdata = {
 	&scif0_device,
 	&scif1_device,
@@ -392,6 +397,13 @@
 
 void __init plat_early_device_setup(void)
 {
+	if (mach_is_sh2007()) {
+		scif0_platform_data.scscr &= ~SCSCR_CKE1;
+		scif0_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
+		scif1_platform_data.scscr &= ~SCSCR_CKE1;
+		scif1_platform_data.scbrr_algo_id = SCBRR_ALGO_2;
+	}
+
 	early_platform_add_devices(sh7780_early_devices,
 				   ARRAY_SIZE(sh7780_early_devices));
 }
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index c9a572b..18d8fc1 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -23,6 +23,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffea0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 40, 40, 40 },
 };
@@ -38,6 +40,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffeb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 44, 44, 44, 44 },
 };
@@ -53,6 +57,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffec0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 60, 60, 60, 60 },
 };
@@ -68,6 +74,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xffed0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 61, 61, 61, 61 },
 };
@@ -83,6 +91,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xffee0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 62, 62, 62, 62 },
 };
@@ -98,6 +108,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xffef0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 63, 63, 63, 63 },
 };
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index c016c00..1656b8c 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -29,6 +29,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffea0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 41, 43, 42 },
 };
@@ -47,6 +49,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffeb0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 44, 44, 44, 44 },
 };
@@ -62,6 +66,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffec0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 50, 50, 50, 50 },
 };
@@ -77,6 +83,8 @@
 static struct plat_sci_port scif3_platform_data = {
 	.mapbase	= 0xffed0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 51, 51, 51, 51 },
 };
@@ -92,6 +100,8 @@
 static struct plat_sci_port scif4_platform_data = {
 	.mapbase	= 0xffee0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 52, 52, 52 },
 };
@@ -107,6 +117,8 @@
 static struct plat_sci_port scif5_platform_data = {
 	.mapbase	= 0xffef0000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+	.scbrr_algo_id	= SCBRR_ALGO_1,
 	.type		= PORT_SCIF,
 	.irqs		= { 53, 53, 53, 53 },
 };
@@ -522,10 +534,13 @@
 	},
 };
 
-static struct resource usb_ohci_resources[] = {
+#define USB_EHCI_START 0xffe70000
+#define USB_OHCI_START 0xffe70400
+
+static struct resource usb_ehci_resources[] = {
 	[0] = {
-		.start	= 0xffe70400,
-		.end	= 0xffe704ff,
+		.start	= USB_EHCI_START,
+		.end	= USB_EHCI_START + 0x3ff,
 		.flags	= IORESOURCE_MEM,
 	},
 	[1] = {
@@ -535,12 +550,35 @@
 	},
 };
 
-static u64 usb_ohci_dma_mask = DMA_BIT_MASK(32);
+static struct platform_device usb_ehci_device = {
+	.name		= "sh_ehci",
+	.id		= -1,
+	.dev = {
+		.dma_mask		= &usb_ehci_device.dev.coherent_dma_mask,
+		.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
+	.num_resources	= ARRAY_SIZE(usb_ehci_resources),
+	.resource	= usb_ehci_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+	[0] = {
+		.start	= USB_OHCI_START,
+		.end	= USB_OHCI_START + 0x3ff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= 77,
+		.end	= 77,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
 static struct platform_device usb_ohci_device = {
 	.name		= "sh_ohci",
 	.id		= -1,
 	.dev = {
-		.dma_mask		= &usb_ohci_dma_mask,
+		.dma_mask		= &usb_ohci_device.dev.coherent_dma_mask,
 		.coherent_dma_mask	= DMA_BIT_MASK(32),
 	},
 	.num_resources	= ARRAY_SIZE(usb_ohci_resources),
@@ -570,6 +608,7 @@
 
 static struct platform_device *sh7786_devices[] __initdata = {
 	&dma0_device,
+	&usb_ehci_device,
 	&usb_ohci_device,
 };
 
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 013f0b1..bb20880 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -29,6 +29,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= 0xffc30000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 40, 41, 43, 42 },
 };
@@ -44,6 +46,8 @@
 static struct plat_sci_port scif1_platform_data = {
 	.mapbase	= 0xffc40000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 44, 45, 47, 46 },
 };
@@ -59,6 +63,8 @@
 static struct plat_sci_port scif2_platform_data = {
 	.mapbase	= 0xffc60000,
 	.flags		= UPF_BOOT_AUTOCONF,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 52, 53, 55, 54 },
 };
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index d910666..18419f1 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -19,6 +19,8 @@
 static struct plat_sci_port scif0_platform_data = {
 	.mapbase	= PHYS_PERIPHERAL_BLOCK + 0x01030000,
 	.flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP,
+	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+	.scbrr_algo_id	= SCBRR_ALGO_2,
 	.type		= PORT_SCIF,
 	.irqs		= { 39, 40, 42, 0 },
 };
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 83972aa..c19e2a9 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -81,7 +81,6 @@
 	state->target_residency = 1 * 2;
 	state->power_usage = 3;
 	state->flags = 0;
-	state->flags |= CPUIDLE_FLAG_SHALLOW;
 	state->flags |= CPUIDLE_FLAG_TIME_VALID;
 	state->enter = cpuidle_sleep_enter;
 
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index e559687..a6f95ae 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -141,7 +141,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops sh_pm_ops = {
+static const struct platform_suspend_ops sh_pm_ops = {
 	.enter          = sh_pm_enter,
 	.valid          = suspend_valid_only_mem,
 };
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index eb4cc4e..d1bffbc 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -568,7 +568,7 @@
 }
 
 /*
- * Flush the range [start,end] of kernel virtual adddress space from
+ * Flush the range [start,end] of kernel virtual address space from
  * the I-cache.  The corresponding range must be purged from the
  * D-cache also because the SH-5 doesn't have cache snooping between
  * the caches.  The addresses will be visible through the superpage
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 9163db3..d776234 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -35,7 +35,7 @@
 		if (pud) {
 			pmd = pmd_alloc(mm, pud, addr);
 			if (pmd)
-				pte = pte_alloc_map(mm, pmd, addr);
+				pte = pte_alloc_map(mm, NULL, pmd, addr);
 		}
 	}
 
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 3385b28..0d3f912 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -2,7 +2,7 @@
  * linux/arch/sh/mm/init.c
  *
  *  Copyright (C) 1999  Niibe Yutaka
- *  Copyright (C) 2002 - 2010  Paul Mundt
+ *  Copyright (C) 2002 - 2011  Paul Mundt
  *
  *  Based on linux/arch/i386/mm/init.c:
  *   Copyright (C) 1995  Linus Torvalds
@@ -325,11 +325,17 @@
 	int nid;
 
 	memblock_init();
-
 	sh_mv.mv_mem_init();
 
 	early_reserve_mem();
 
+	/*
+	 * Once the early reservations are out of the way, give the
+	 * platforms a chance to kick out some memory.
+	 */
+	if (sh_mv.mv_mem_reserve)
+		sh_mv.mv_mem_reserve();
+
 	memblock_enforce_memory_limit(memory_limit);
 	memblock_analyze();
 
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 0e68465..6dd56c4 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -9,6 +9,7 @@
 HIGHLANDER		SH_HIGHLANDER
 RTS7751R2D		SH_RTS7751R2D
 RSK			SH_RSK
+ALPHA_BOARD		SH_ALPHA_BOARD
 
 #
 # List of companion chips / MFDs.
@@ -61,3 +62,5 @@
 POLARIS			SH_POLARIS
 KFR2R09			SH_KFR2R09
 ECOVEC			SH_ECOVEC
+APSH4A3A		SH_APSH4A3A
+APSH4AD0A		SH_APSH4AD0A
diff --git a/arch/sparc/include/asm/ioctls.h b/arch/sparc/include/asm/ioctls.h
index 53f4ee0..ed3807b 100644
--- a/arch/sparc/include/asm/ioctls.h
+++ b/arch/sparc/include/asm/ioctls.h
@@ -19,6 +19,7 @@
 #define TCSETS2		_IOW('T', 13, struct termios2)
 #define TCSETSW2	_IOW('T', 14, struct termios2)
 #define TCSETSF2	_IOW('T', 15, struct termios2)
+#define TIOCGDEV	_IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
 
 /* Note that all the ioctls that are not available in Linux have a 
  * double underscore on the front to: a) avoid some programs to
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index 35f4883..8505e0a 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -121,7 +121,7 @@
 	node = prom_searchsiblings(node, "obio");
 	node = prom_getchild(node);
 	node = prom_searchsiblings(node, "power");
-	if (node == 0 || node == -1)
+	if (node == 0 || (s32)node == -1)
 		return;
 
 	/* Map the power control register. */
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index e447938..0dc714f 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -375,5 +375,5 @@
 	return 0;
 }
 
-arch_initcall(cpu_type_probe);
+early_initcall(cpu_type_probe);
 #endif
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index ee3c7dd..8d348c4 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -23,17 +23,11 @@
 
 static void *module_map(unsigned long size)
 {
-	struct vm_struct *area;
-
-	size = PAGE_ALIGN(size);
-	if (!size || size > MODULES_LEN)
+	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				GFP_KERNEL, PAGE_KERNEL, -1,
+				__builtin_return_address(0));
 }
 
 static char *dot2underscore(char *name)
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index b87873c..ae96cf5 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -168,4 +168,4 @@
 	return err;
 }
 
-arch_initcall(pcr_arch_init);
+early_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c
index a4446c0..82281a5 100644
--- a/arch/sparc/kernel/starfire.c
+++ b/arch/sparc/kernel/starfire.c
@@ -24,7 +24,7 @@
 void check_if_starfire(void)
 {
 	phandle ssnode = prom_finddevice("/ssp-serial");
-	if (ssnode != 0 && ssnode != -1)
+	if (ssnode != 0 && (s32)ssnode != -1)
 		this_is_starfire = 1;
 }
 
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 42ad2ba..1e97709 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -622,7 +622,7 @@
 static const char CHAFSR_IERR_msg[] =
 	"Internal processor error";
 static const char CHAFSR_ISAP_msg[] =
-	"System request parity error on incoming addresss";
+	"System request parity error on incoming address";
 static const char CHAFSR_UCU_msg[] =
 	"Uncorrectable E-cache ECC error for ifetch/data";
 static const char CHAFSR_UCC_msg[] =
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
index 5edcac1..e6067b7 100644
--- a/arch/sparc/mm/generic_32.c
+++ b/arch/sparc/mm/generic_32.c
@@ -50,7 +50,7 @@
 		end = PGDIR_SIZE;
 	offset -= address;
 	do {
-		pte_t * pte = pte_alloc_map(mm, pmd, address);
+		pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
 		if (!pte)
 			return -ENOMEM;
 		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
index 04f2bf4..3cb00df 100644
--- a/arch/sparc/mm/generic_64.c
+++ b/arch/sparc/mm/generic_64.c
@@ -92,7 +92,7 @@
 		end = PGDIR_SIZE;
 	offset -= address;
 	do {
-		pte_t * pte = pte_alloc_map(mm, pmd, address);
+		pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
 		if (!pte)
 			return -ENOMEM;
 		io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 5fdddf1..f4e9764 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -214,7 +214,7 @@
 	if (pud) {
 		pmd = pmd_alloc(mm, pud, addr);
 		if (pmd)
-			pte = pte_alloc_map(mm, pmd, addr);
+			pte = pte_alloc_map(mm, NULL, pmd, addr);
 	}
 	return pte;
 }
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index d342dba..0a601b3 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -60,7 +60,7 @@
 	prom_nodeops = romvec->pv_nodeops;
 
 	prom_root_node = prom_getsibling(0);
-	if((prom_root_node == 0) || (prom_root_node == -1))
+	if ((prom_root_node == 0) || ((s32)prom_root_node == -1))
 		prom_halt();
 
 	if((((unsigned long) prom_nodeops) == 0) || 
diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
index 9c6ac4b..5016c5e 100644
--- a/arch/sparc/prom/init_64.c
+++ b/arch/sparc/prom/init_64.c
@@ -35,13 +35,13 @@
 	prom_cif_init(cif_handler, cif_stack);
 
 	prom_chosen_node = prom_finddevice(prom_chosen_path);
-	if (!prom_chosen_node || prom_chosen_node == -1)
+	if (!prom_chosen_node || (s32)prom_chosen_node == -1)
 		prom_halt();
 
 	prom_stdout = prom_getint(prom_chosen_node, "stdout");
 
 	node = prom_finddevice("/openprom");
-	if (!node || node == -1)
+	if (!node || (s32)node == -1)
 		prom_halt();
 
 	prom_getstring(node, "version", prom_version, sizeof(prom_version));
diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c
index bc8e4cb..f30e8d0 100644
--- a/arch/sparc/prom/tree_32.c
+++ b/arch/sparc/prom/tree_32.c
@@ -40,11 +40,11 @@
 {
 	phandle cnode;
 
-	if (node == -1)
+	if ((s32)node == -1)
 		return 0;
 
 	cnode = __prom_getchild(node);
-	if (cnode == 0 || cnode == -1)
+	if (cnode == 0 || (s32)cnode == -1)
 		return 0;
 
 	return cnode;
@@ -72,11 +72,11 @@
 {
 	phandle sibnode;
 
-	if (node == -1)
+	if ((s32)node == -1)
 		return 0;
 
 	sibnode = __prom_getsibling(node);
-	if (sibnode == 0 || sibnode == -1)
+	if (sibnode == 0 || (s32)sibnode == -1)
 		return 0;
 
 	return sibnode;
@@ -219,7 +219,7 @@
  */
 char *prom_nextprop(phandle node, char *oprop, char *buffer)
 {
-	if (node == 0 || node == -1)
+	if (node == 0 || (s32)node == -1)
 		return "";
 
 	return __prom_nextprop(node, oprop);
@@ -253,7 +253,7 @@
 				if (d != s + 3 && (!*d || *d == '/')
 				    && d <= s + 3 + 8) {
 					node2 = node;
-					while (node2 && node2 != -1) {
+					while (node2 && (s32)node2 != -1) {
 						if (prom_getproperty (node2, "reg", (char *)reg, sizeof (reg)) > 0) {
 							if (which_io == reg[0].which_io && phys_addr == reg[0].phys_addr) {
 								node = node2;
@@ -261,7 +261,7 @@
 							}
 						}
 						node2 = prom_getsibling(node2);
-						if (!node2 || node2 == -1)
+						if (!node2 || (s32)node2 == -1)
 							break;
 						node2 = prom_searchsiblings(prom_getsibling(node2), nbuf);
 					}
@@ -303,6 +303,7 @@
 	node = (*romvec->pv_v2devops.v2_inst2pkg)(inst);
 	restore_current();
 	spin_unlock_irqrestore(&prom_lock, flags);
-	if (node == -1) return 0;
+	if ((s32)node == -1)
+		return 0;
 	return node;
 }
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
index d936600..92204c3 100644
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -43,10 +43,10 @@
 {
 	phandle cnode;
 
-	if (node == -1)
+	if ((s32)node == -1)
 		return 0;
 	cnode = __prom_getchild(node);
-	if (cnode == -1)
+	if ((s32)cnode == -1)
 		return 0;
 	return cnode;
 }
@@ -56,10 +56,10 @@
 {
 	phandle cnode;
 
-	if (node == -1)
+	if ((s32)node == -1)
 		return 0;
 	cnode = prom_node_to_node("parent", node);
-	if (cnode == -1)
+	if ((s32)cnode == -1)
 		return 0;
 	return cnode;
 }
@@ -76,10 +76,10 @@
 {
 	phandle sibnode;
 
-	if (node == -1)
+	if ((s32)node == -1)
 		return 0;
 	sibnode = __prom_getsibling(node);
-	if (sibnode == -1)
+	if ((s32)sibnode == -1)
 		return 0;
 
 	return sibnode;
@@ -240,7 +240,7 @@
 	unsigned long args[7];
 
 	*buffer = 0;
-	if (node == -1)
+	if ((s32)node == -1)
 		return buffer;
 
 	args[0] = (unsigned long) prom_nextprop_name;
@@ -266,7 +266,7 @@
 	unsigned long args[7];
 	char buf[32];
 
-	if (node == -1) {
+	if ((s32)node == -1) {
 		*buffer = 0;
 		return buffer;
 	}
@@ -369,7 +369,7 @@
 	p1275_cmd_direct(args);
 
 	node = (int) args[4];
-	if (node == -1)
+	if ((s32)node == -1)
 		return 0;
 	return node;
 }
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index 50d6aa2..f8d1d0d 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -131,7 +131,7 @@
 
 config HIGHMEM
 	bool "Highmem support (EXPERIMENTAL)"
-	depends on !64BIT && EXPERIMENTAL
+	depends on !64BIT && BROKEN
 	default n
 	help
 	  This was used to allow UML to run with big amounts of memory.
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 1664cce..050e4dd 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -821,12 +821,12 @@
 
 static void unregister_winch(struct tty_struct *tty)
 {
-	struct list_head *ele;
+	struct list_head *ele, *next;
 	struct winch *winch;
 
 	spin_lock(&winch_handler_lock);
 
-	list_for_each(ele, &winch_handlers) {
+	list_for_each_safe(ele, next, &winch_handlers) {
 		winch = list_entry(ele, struct winch, list);
 		if (winch->tty == tty) {
 			free_winch(winch, 1);
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index 8501e7d..7e0619c 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -37,13 +37,7 @@
 	if (*ppos > mmapper_size)
 		return -EINVAL;
 
-	if (count > mmapper_size - *ppos)
-		count = mmapper_size - *ppos;
-
-	if (copy_from_user(&v_buf[*ppos], buf, count))
-		return -EFAULT;
-
-	return count;
+	return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count);
 }
 
 static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -137,3 +131,4 @@
 
 MODULE_AUTHOR("Greg Lonnon <glonnon@ridgerun.com>");
 MODULE_DESCRIPTION("DSPLinux simulator mmapper driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 3d099f9..1aee587 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -31,7 +31,7 @@
 	if (!pmd)
 		goto out_pmd;
 
-	pte = pte_alloc_map(mm, pmd, proc);
+	pte = pte_alloc_map(mm, NULL, pmd, proc);
 	if (!pte)
 		goto out_pte;
 
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b6fccb0..47ae4a7 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -51,6 +51,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_XZ
 	select HAVE_KERNEL_LZO
 	select HAVE_HW_BREAKPOINT
 	select HAVE_MIXED_BREAKPOINTS_REGS
@@ -65,6 +66,7 @@
 	select HAVE_SPARSE_IRQ
 	select GENERIC_IRQ_PROBE
 	select GENERIC_PENDING_IRQ if SMP
+	select USE_GENERIC_SMP_HELPERS if SMP
 
 config INSTRUCTION_DECODER
 	def_bool (KPROBES || PERF_EVENTS)
@@ -203,10 +205,6 @@
 	def_bool y
 	depends on EXPERIMENTAL && DMAR && ACPI
 
-config USE_GENERIC_SMP_HELPERS
-	def_bool y
-	depends on SMP
-
 config X86_32_SMP
 	def_bool y
 	depends on X86_32 && SMP
@@ -1936,13 +1934,19 @@
 	depends on X86_64 && PCI && ACPI
 
 config PCI_CNB20LE_QUIRK
-	bool "Read CNB20LE Host Bridge Windows"
-	depends on PCI
+	bool "Read CNB20LE Host Bridge Windows" if EMBEDDED
+	default n
+	depends on PCI && EXPERIMENTAL
 	help
 	  Read the PCI windows out of the CNB20LE host bridge. This allows
 	  PCI hotplug to work on systems with the CNB20LE chipset which do
 	  not have ACPI.
 
+	  There's no public spec for this chipset, and this functionality
+	  is known to be incomplete.
+
+	  You should say N unless you know you need this.
+
 config DMAR
 	bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
 	depends on PCI_MSI && ACPI && EXPERIMENTAL
@@ -2070,7 +2074,7 @@
 
 config OLPC_XO1
 	tristate "OLPC XO-1 support"
-	depends on OLPC && PCI
+	depends on OLPC && MFD_CS5535
 	---help---
 	  Add support for non-essential features of the OLPC XO-1 laptop.
 
@@ -2078,11 +2082,17 @@
 	bool "Support for OLPC's Open Firmware"
 	depends on !X86_64 && !X86_PAE
 	default n
+	select OF
 	help
 	  This option adds support for the implementation of Open Firmware
 	  that is used on the OLPC XO-1 Children's Machine.
 	  If unsure, say N here.
 
+config OLPC_OPENFIRMWARE_DT
+	bool
+	default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE
+	select OF_PROMTREE
+
 endif # X86_32
 
 config AMD_NB
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 2ac9069..15588a0 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -310,6 +310,9 @@
 config X86_CMPXCHG
 	def_bool X86_64 || (X86_32 && !M386)
 
+config CMPXCHG_LOCAL
+	def_bool X86_64 || (X86_32 && !M386)
+
 config X86_L1_CACHE_SHIFT
 	int
 	default "7" if MPENTIUM4 || MPSC
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 0c22955..09664ef 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
 # create a compressed vmlinux image from the original vmlinux
 #
 
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -49,12 +49,15 @@
 	$(call if_changed,bzip2)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzma)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
+	$(call if_changed,xzkern)
 $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzo)
 
 suffix-$(CONFIG_KERNEL_GZIP)	:= gz
 suffix-$(CONFIG_KERNEL_BZIP2)	:= bz2
 suffix-$(CONFIG_KERNEL_LZMA)	:= lzma
+suffix-$(CONFIG_KERNEL_XZ)	:= xz
 suffix-$(CONFIG_KERNEL_LZO) 	:= lzo
 
 quiet_cmd_mkpiggy = MKPIGGY $@
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 325c052..3a19d04 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -139,6 +139,10 @@
 #include "../../../../lib/decompress_unlzma.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZO
 #include "../../../../lib/decompress_unlzo.c"
 #endif
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index 5c22812..646aa78 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -74,7 +74,7 @@
 
 	offs = (olen > ilen) ? olen - ilen : 0;
 	offs += olen >> 12;	/* Add 8 bytes for each 32K block */
-	offs += 32*1024 + 18;	/* Add 32K + 18 bytes slack */
+	offs += 64*1024 + 128;	/* Add 64K + 128 bytes slack */
 	offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
 
 	printf(".section \".rodata..compressed\",\"a\",@progbits\n");
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index ff16756..8fe2a49 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -9,6 +9,20 @@
  *            Vinodh Gopal <vinodh.gopal@intel.com>
  *            Kahraman Akdemir
  *
+ * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
+ * interface for 64-bit kernels.
+ *    Authors: Erdinc Ozturk (erdinc.ozturk@intel.com)
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             James Guilford (james.guilford@intel.com)
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Wajdi Feghali (wajdi.k.feghali@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
+ * Ported x86_64 version to x86:
+ *    Author: Mathias Krause <minipli@googlemail.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -18,8 +32,62 @@
 #include <linux/linkage.h>
 #include <asm/inst.h>
 
+#ifdef __x86_64__
+.data
+POLY:   .octa 0xC2000000000000000000000000000001
+TWOONE: .octa 0x00000001000000000000000000000001
+
+# order of these constants should not change.
+# more specifically, ALL_F should follow SHIFT_MASK,
+# and ZERO should follow ALL_F
+
+SHUF_MASK:  .octa 0x000102030405060708090A0B0C0D0E0F
+MASK1:      .octa 0x0000000000000000ffffffffffffffff
+MASK2:      .octa 0xffffffffffffffff0000000000000000
+SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
+ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
+ZERO:       .octa 0x00000000000000000000000000000000
+ONE:        .octa 0x00000000000000000000000000000001
+F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0
+dec:        .octa 0x1
+enc:        .octa 0x2
+
+
 .text
 
+
+#define	STACK_OFFSET    8*3
+#define	HashKey		16*0	// store HashKey <<1 mod poly here
+#define	HashKey_2	16*1	// store HashKey^2 <<1 mod poly here
+#define	HashKey_3	16*2	// store HashKey^3 <<1 mod poly here
+#define	HashKey_4	16*3	// store HashKey^4 <<1 mod poly here
+#define	HashKey_k	16*4	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey <<1 mod poly here
+				//(for Karatsuba purposes)
+#define	HashKey_2_k	16*5	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^2 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	HashKey_3_k	16*6	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^3 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	HashKey_4_k	16*7	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^4 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	VARIABLE_OFFSET	16*8
+
+#define arg1 rdi
+#define arg2 rsi
+#define arg3 rdx
+#define arg4 rcx
+#define arg5 r8
+#define arg6 r9
+#define arg7 STACK_OFFSET+8(%r14)
+#define arg8 STACK_OFFSET+16(%r14)
+#define arg9 STACK_OFFSET+24(%r14)
+#define arg10 STACK_OFFSET+32(%r14)
+#endif
+
+
 #define STATE1	%xmm0
 #define STATE2	%xmm4
 #define STATE3	%xmm5
@@ -32,12 +100,16 @@
 #define IN	IN1
 #define KEY	%xmm2
 #define IV	%xmm3
+
 #define BSWAP_MASK %xmm10
 #define CTR	%xmm11
 #define INC	%xmm12
 
+#ifdef __x86_64__
+#define AREG	%rax
 #define KEYP	%rdi
 #define OUTP	%rsi
+#define UKEYP	OUTP
 #define INP	%rdx
 #define LEN	%rcx
 #define IVP	%r8
@@ -46,6 +118,1588 @@
 #define TKEYP	T1
 #define T2	%r11
 #define TCTR_LOW T2
+#else
+#define AREG	%eax
+#define KEYP	%edi
+#define OUTP	AREG
+#define UKEYP	OUTP
+#define INP	%edx
+#define LEN	%esi
+#define IVP	%ebp
+#define KLEN	%ebx
+#define T1	%ecx
+#define TKEYP	T1
+#endif
+
+
+#ifdef __x86_64__
+/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+*
+*
+* Input: A and B (128-bits each, bit-reflected)
+* Output: C = A*B*x mod poly, (i.e. >>1 )
+* To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+* GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+*
+*/
+.macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
+	movdqa	  \GH, \TMP1
+	pshufd	  $78, \GH, \TMP2
+	pshufd	  $78, \HK, \TMP3
+	pxor	  \GH, \TMP2            # TMP2 = a1+a0
+	pxor	  \HK, \TMP3            # TMP3 = b1+b0
+	PCLMULQDQ 0x11, \HK, \TMP1     # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \HK, \GH       # GH = a0*b0
+	PCLMULQDQ 0x00, \TMP3, \TMP2   # TMP2 = (a0+a1)*(b1+b0)
+	pxor	  \GH, \TMP2
+	pxor	  \TMP1, \TMP2          # TMP2 = (a0*b0)+(a1*b0)
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3             # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2             # right shift TMP2 2 DWs
+	pxor	  \TMP3, \GH
+	pxor	  \TMP2, \TMP1          # TMP2:GH holds the result of GH*HK
+
+        # first phase of the reduction
+
+	movdqa    \GH, \TMP2
+	movdqa    \GH, \TMP3
+	movdqa    \GH, \TMP4            # copy GH into TMP2,TMP3 and TMP4
+					# in in order to perform
+					# independent shifts
+	pslld     $31, \TMP2            # packed right shift <<31
+	pslld     $30, \TMP3            # packed right shift <<30
+	pslld     $25, \TMP4            # packed right shift <<25
+	pxor      \TMP3, \TMP2          # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5             # right shift TMP5 1 DW
+	pslldq    $12, \TMP2            # left shift TMP2 3 DWs
+	pxor      \TMP2, \GH
+
+        # second phase of the reduction
+
+	movdqa    \GH,\TMP2             # copy GH into TMP2,TMP3 and TMP4
+					# in in order to perform
+					# independent shifts
+	movdqa    \GH,\TMP3
+	movdqa    \GH,\TMP4
+	psrld     $1,\TMP2              # packed left shift >>1
+	psrld     $2,\TMP3              # packed left shift >>2
+	psrld     $7,\TMP4              # packed left shift >>7
+	pxor      \TMP3,\TMP2		# xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \GH
+	pxor      \TMP1, \GH            # result is in TMP1
+.endm
+
+/*
+* if a = number of total plaintext bytes
+* b = floor(a/16)
+* num_initial_blocks = b mod 4
+* encrypt the initial num_initial_blocks blocks and apply ghash on
+* the ciphertext
+* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* are clobbered
+* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+*/
+
+
+.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+	mov	   arg7, %r10           # %r10 = AAD
+	mov	   arg8, %r12           # %r12 = aadLen
+	mov	   %r12, %r11
+	pxor	   %xmm\i, %xmm\i
+_get_AAD_loop\num_initial_blocks\operation:
+	movd	   (%r10), \TMP1
+	pslldq	   $12, \TMP1
+	psrldq	   $4, %xmm\i
+	pxor	   \TMP1, %xmm\i
+	add	   $4, %r10
+	sub	   $4, %r12
+	jne	   _get_AAD_loop\num_initial_blocks\operation
+	cmp	   $16, %r11
+	je	   _get_AAD_loop2_done\num_initial_blocks\operation
+	mov	   $16, %r12
+_get_AAD_loop2\num_initial_blocks\operation:
+	psrldq	   $4, %xmm\i
+	sub	   $4, %r12
+	cmp	   %r11, %r12
+	jne	   _get_AAD_loop2\num_initial_blocks\operation
+_get_AAD_loop2_done\num_initial_blocks\operation:
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\i # byte-reflect the AAD data
+
+	xor	   %r11, %r11 # initialise the data pointer offset as zero
+
+        # start AES for num_initial_blocks blocks
+
+	mov	   %arg5, %rax                      # %rax = *Y0
+	movdqu	   (%rax), \XMM0                    # XMM0 = Y0
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, \XMM0
+
+.if (\i == 5) || (\i == 6) || (\i == 7)
+.irpc index, \i_seq
+	paddd	   ONE(%rip), \XMM0                 # INCR Y0
+	movdqa	   \XMM0, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\index      # perform a 16 byte swap
+
+.endr
+.irpc index, \i_seq
+	pxor	   16*0(%arg1), %xmm\index
+.endr
+.irpc index, \i_seq
+	movaps 0x10(%rdi), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 1
+.endr
+.irpc index, \i_seq
+	movaps 0x20(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x30(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x40(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x50(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x60(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x70(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x80(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x90(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0xa0(%arg1), \TMP1
+	AESENCLAST \TMP1, %xmm\index         # Round 10
+.endr
+.irpc index, \i_seq
+	movdqu	   (%arg3 , %r11, 1), \TMP1
+	pxor	   \TMP1, %xmm\index
+	movdqu	   %xmm\index, (%arg2 , %r11, 1)
+	# write back plaintext/ciphertext for num_initial_blocks
+	add	   $16, %r11
+
+	movdqa     \TMP1, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM	   %xmm14, %xmm\index
+
+		# prepare plaintext/ciphertext for GHASH computation
+.endr
+.endif
+	GHASH_MUL  %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        # apply GHASH on num_initial_blocks blocks
+
+.if \i == 5
+        pxor       %xmm5, %xmm6
+	GHASH_MUL  %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 6
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 7
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.endif
+	cmp	   $64, %r13
+	jl	_initial_blocks_done\num_initial_blocks\operation
+	# no need for precomputed values
+/*
+*
+* Precomputations for HashKey parallel with encryption of first 4 blocks.
+* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+*/
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM1
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM1        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM2
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM2        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM3
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM4
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4        # perform a 16 byte swap
+
+	pxor	   16*0(%arg1), \XMM1
+	pxor	   16*0(%arg1), \XMM2
+	pxor	   16*0(%arg1), \XMM3
+	pxor	   16*0(%arg1), \XMM4
+	movdqa	   \TMP3, \TMP5
+	pshufd	   $78, \TMP3, \TMP1
+	pxor	   \TMP3, \TMP1
+	movdqa	   \TMP1, HashKey_k(%rsp)
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_2(%rsp)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_2_k(%rsp)
+.irpc index, 1234 # do 4 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_3(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_3_k(%rsp)
+.irpc index, 56789 # do next 5 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_4(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_4_k(%rsp)
+	movaps 0xa0(%arg1), \TMP2
+	AESENCLAST \TMP2, \XMM1
+	AESENCLAST \TMP2, \XMM2
+	AESENCLAST \TMP2, \XMM3
+	AESENCLAST \TMP2, \XMM4
+	movdqu	   16*0(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM1
+	movdqu	   \XMM1, 16*0(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM1
+	movdqu	   16*1(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM2
+	movdqu	   \XMM2, 16*1(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM2
+	movdqu	   16*2(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM3
+	movdqu	   \XMM3, 16*2(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM3
+	movdqu	   16*3(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM4
+	movdqu	   \XMM4, 16*3(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM4
+	add	   $64, %r11
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+	pxor	   \XMMDst, \XMM1
+# combine GHASHed value with the corresponding ciphertext
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+
+_initial_blocks_done\num_initial_blocks\operation:
+
+.endm
+
+
+/*
+* if a = number of total plaintext bytes
+* b = floor(a/16)
+* num_initial_blocks = b mod 4
+* encrypt the initial num_initial_blocks blocks and apply ghash on
+* the ciphertext
+* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* are clobbered
+* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+*/
+
+
+.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+	mov	   arg7, %r10           # %r10 = AAD
+	mov	   arg8, %r12           # %r12 = aadLen
+	mov	   %r12, %r11
+	pxor	   %xmm\i, %xmm\i
+_get_AAD_loop\num_initial_blocks\operation:
+	movd	   (%r10), \TMP1
+	pslldq	   $12, \TMP1
+	psrldq	   $4, %xmm\i
+	pxor	   \TMP1, %xmm\i
+	add	   $4, %r10
+	sub	   $4, %r12
+	jne	   _get_AAD_loop\num_initial_blocks\operation
+	cmp	   $16, %r11
+	je	   _get_AAD_loop2_done\num_initial_blocks\operation
+	mov	   $16, %r12
+_get_AAD_loop2\num_initial_blocks\operation:
+	psrldq	   $4, %xmm\i
+	sub	   $4, %r12
+	cmp	   %r11, %r12
+	jne	   _get_AAD_loop2\num_initial_blocks\operation
+_get_AAD_loop2_done\num_initial_blocks\operation:
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\i # byte-reflect the AAD data
+
+	xor	   %r11, %r11 # initialise the data pointer offset as zero
+
+        # start AES for num_initial_blocks blocks
+
+	mov	   %arg5, %rax                      # %rax = *Y0
+	movdqu	   (%rax), \XMM0                    # XMM0 = Y0
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, \XMM0
+
+.if (\i == 5) || (\i == 6) || (\i == 7)
+.irpc index, \i_seq
+	paddd	   ONE(%rip), \XMM0                 # INCR Y0
+	movdqa	   \XMM0, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\index      # perform a 16 byte swap
+
+.endr
+.irpc index, \i_seq
+	pxor	   16*0(%arg1), %xmm\index
+.endr
+.irpc index, \i_seq
+	movaps 0x10(%rdi), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 1
+.endr
+.irpc index, \i_seq
+	movaps 0x20(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x30(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x40(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x50(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x60(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x70(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x80(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x90(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0xa0(%arg1), \TMP1
+	AESENCLAST \TMP1, %xmm\index         # Round 10
+.endr
+.irpc index, \i_seq
+	movdqu	   (%arg3 , %r11, 1), \TMP1
+	pxor	   \TMP1, %xmm\index
+	movdqu	   %xmm\index, (%arg2 , %r11, 1)
+	# write back plaintext/ciphertext for num_initial_blocks
+	add	   $16, %r11
+
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM	   %xmm14, %xmm\index
+
+		# prepare plaintext/ciphertext for GHASH computation
+.endr
+.endif
+	GHASH_MUL  %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        # apply GHASH on num_initial_blocks blocks
+
+.if \i == 5
+        pxor       %xmm5, %xmm6
+	GHASH_MUL  %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 6
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 7
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.endif
+	cmp	   $64, %r13
+	jl	_initial_blocks_done\num_initial_blocks\operation
+	# no need for precomputed values
+/*
+*
+* Precomputations for HashKey parallel with encryption of first 4 blocks.
+* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+*/
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM1
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM1        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM2
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM2        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM3
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM4
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4        # perform a 16 byte swap
+
+	pxor	   16*0(%arg1), \XMM1
+	pxor	   16*0(%arg1), \XMM2
+	pxor	   16*0(%arg1), \XMM3
+	pxor	   16*0(%arg1), \XMM4
+	movdqa	   \TMP3, \TMP5
+	pshufd	   $78, \TMP3, \TMP1
+	pxor	   \TMP3, \TMP1
+	movdqa	   \TMP1, HashKey_k(%rsp)
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_2(%rsp)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_2_k(%rsp)
+.irpc index, 1234 # do 4 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_3(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_3_k(%rsp)
+.irpc index, 56789 # do next 5 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_4(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_4_k(%rsp)
+	movaps 0xa0(%arg1), \TMP2
+	AESENCLAST \TMP2, \XMM1
+	AESENCLAST \TMP2, \XMM2
+	AESENCLAST \TMP2, \XMM3
+	AESENCLAST \TMP2, \XMM4
+	movdqu	   16*0(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM1
+	movdqu	   16*1(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM2
+	movdqu	   16*2(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM3
+	movdqu	   16*3(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM4
+	movdqu     \XMM1, 16*0(%arg2 , %r11 , 1)
+	movdqu     \XMM2, 16*1(%arg2 , %r11 , 1)
+	movdqu     \XMM3, 16*2(%arg2 , %r11 , 1)
+	movdqu     \XMM4, 16*3(%arg2 , %r11 , 1)
+
+	add	   $64, %r11
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+	pxor	   \XMMDst, \XMM1
+# combine GHASHed value with the corresponding ciphertext
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+
+_initial_blocks_done\num_initial_blocks\operation:
+
+.endm
+
+/*
+* encrypt 4 blocks at a time
+* ghash the 4 previously encrypted ciphertext blocks
+* arg1, %arg2, %arg3 are used as pointers only, not modified
+* %r11 is the data offset value
+*/
+.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
+TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+	movdqa	  \XMM1, \XMM5
+	movdqa	  \XMM2, \XMM6
+	movdqa	  \XMM3, \XMM7
+	movdqa	  \XMM4, \XMM8
+
+        movdqa    SHUF_MASK(%rip), %xmm15
+        # multiply TMP5 * HashKey using karatsuba
+
+	movdqa	  \XMM5, \TMP4
+	pshufd	  $78, \XMM5, \TMP6
+	pxor	  \XMM5, \TMP6
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
+	movdqa    \XMM0, \XMM1
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM2
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM3
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1	# perform a 16 byte swap
+	PCLMULQDQ 0x00, \TMP5, \XMM5           # XMM5 = a0*b0
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  (%arg1), \XMM1
+	pxor	  (%arg1), \XMM2
+	pxor	  (%arg1), \XMM3
+	pxor	  (%arg1), \XMM4
+	movdqa	  HashKey_4_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
+	movaps 0x10(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 1
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movaps 0x20(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 2
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movdqa	  \XMM6, \TMP1
+	pshufd	  $78, \XMM6, \TMP2
+	pxor	  \XMM6, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
+	movaps 0x30(%arg1), \TMP3
+	AESENC    \TMP3, \XMM1              # Round 3
+	AESENC    \TMP3, \XMM2
+	AESENC    \TMP3, \XMM3
+	AESENC    \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM6           # XMM6 = a0*b0
+	movaps 0x40(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 4
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_3_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x50(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 5
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM6, \XMM5
+	pxor	  \TMP2, \TMP6
+	movdqa	  \XMM7, \TMP1
+	pshufd	  $78, \XMM7, \TMP2
+	pxor	  \XMM7, \TMP2
+	movdqa	  HashKey_2(%rsp ), \TMP5
+
+        # Multiply TMP5 * HashKey using karatsuba
+
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1*b1
+	movaps 0x60(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 6
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM7           # XMM7 = a0*b0
+	movaps 0x70(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 7
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_2_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x80(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 8
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM7, \XMM5
+	pxor	  \TMP2, \TMP6
+
+        # Multiply XMM8 * HashKey
+        # XMM8 and TMP5 hold the values for the two operands
+
+	movdqa	  \XMM8, \TMP1
+	pshufd	  $78, \XMM8, \TMP2
+	pxor	  \XMM8, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
+	movaps 0x90(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1            # Round 9
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM8          # XMM8 = a0*b0
+	movaps 0xa0(%arg1), \TMP3
+	AESENCLAST \TMP3, \XMM1           # Round 10
+	AESENCLAST \TMP3, \XMM2
+	AESENCLAST \TMP3, \XMM3
+	AESENCLAST \TMP3, \XMM4
+	movdqa    HashKey_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
+	movdqu	  (%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
+	movdqu	  16(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM2                 # Ciphertext/Plaintext XOR EK
+	movdqu	  32(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM3                 # Ciphertext/Plaintext XOR EK
+	movdqu	  48(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM4                 # Ciphertext/Plaintext XOR EK
+        movdqu    \XMM1, (%arg2,%r11,1)        # Write to the ciphertext buffer
+        movdqu    \XMM2, 16(%arg2,%r11,1)      # Write to the ciphertext buffer
+        movdqu    \XMM3, 32(%arg2,%r11,1)      # Write to the ciphertext buffer
+        movdqu    \XMM4, 48(%arg2,%r11,1)      # Write to the ciphertext buffer
+	PSHUFB_XMM %xmm15, \XMM1        # perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  \TMP4, \TMP1
+	pxor	  \XMM8, \XMM5
+	pxor	  \TMP6, \TMP2
+	pxor	  \TMP1, \TMP2
+	pxor	  \XMM5, \TMP2
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3                    # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2                    # right shift TMP2 2 DWs
+	pxor	  \TMP3, \XMM5
+	pxor	  \TMP2, \TMP1	  # accumulate the results in TMP1:XMM5
+
+        # first phase of reduction
+
+	movdqa    \XMM5, \TMP2
+	movdqa    \XMM5, \TMP3
+	movdqa    \XMM5, \TMP4
+# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
+	pslld     $31, \TMP2                   # packed right shift << 31
+	pslld     $30, \TMP3                   # packed right shift << 30
+	pslld     $25, \TMP4                   # packed right shift << 25
+	pxor      \TMP3, \TMP2	               # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5                    # right shift T5 1 DW
+	pslldq    $12, \TMP2                   # left shift T2 3 DWs
+	pxor      \TMP2, \XMM5
+
+        # second phase of reduction
+
+	movdqa    \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
+	movdqa    \XMM5,\TMP3
+	movdqa    \XMM5,\TMP4
+	psrld     $1, \TMP2                    # packed left shift >>1
+	psrld     $2, \TMP3                    # packed left shift >>2
+	psrld     $7, \TMP4                    # packed left shift >>7
+	pxor      \TMP3,\TMP2		       # xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \XMM5
+	pxor      \TMP1, \XMM5                 # result is in TMP1
+
+	pxor	  \XMM5, \XMM1
+.endm
+
+/*
+* decrypt 4 blocks at a time
+* ghash the 4 previously decrypted ciphertext blocks
+* arg1, %arg2, %arg3 are used as pointers only, not modified
+* %r11 is the data offset value
+*/
+.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
+TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+	movdqa	  \XMM1, \XMM5
+	movdqa	  \XMM2, \XMM6
+	movdqa	  \XMM3, \XMM7
+	movdqa	  \XMM4, \XMM8
+
+        movdqa    SHUF_MASK(%rip), %xmm15
+        # multiply TMP5 * HashKey using karatsuba
+
+	movdqa	  \XMM5, \TMP4
+	pshufd	  $78, \XMM5, \TMP6
+	pxor	  \XMM5, \TMP6
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
+	movdqa    \XMM0, \XMM1
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM2
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM3
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1	# perform a 16 byte swap
+	PCLMULQDQ 0x00, \TMP5, \XMM5           # XMM5 = a0*b0
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  (%arg1), \XMM1
+	pxor	  (%arg1), \XMM2
+	pxor	  (%arg1), \XMM3
+	pxor	  (%arg1), \XMM4
+	movdqa	  HashKey_4_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
+	movaps 0x10(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 1
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movaps 0x20(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 2
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movdqa	  \XMM6, \TMP1
+	pshufd	  $78, \XMM6, \TMP2
+	pxor	  \XMM6, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
+	movaps 0x30(%arg1), \TMP3
+	AESENC    \TMP3, \XMM1              # Round 3
+	AESENC    \TMP3, \XMM2
+	AESENC    \TMP3, \XMM3
+	AESENC    \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM6           # XMM6 = a0*b0
+	movaps 0x40(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 4
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_3_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x50(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 5
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM6, \XMM5
+	pxor	  \TMP2, \TMP6
+	movdqa	  \XMM7, \TMP1
+	pshufd	  $78, \XMM7, \TMP2
+	pxor	  \XMM7, \TMP2
+	movdqa	  HashKey_2(%rsp ), \TMP5
+
+        # Multiply TMP5 * HashKey using karatsuba
+
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1*b1
+	movaps 0x60(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 6
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM7           # XMM7 = a0*b0
+	movaps 0x70(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 7
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_2_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x80(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 8
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM7, \XMM5
+	pxor	  \TMP2, \TMP6
+
+        # Multiply XMM8 * HashKey
+        # XMM8 and TMP5 hold the values for the two operands
+
+	movdqa	  \XMM8, \TMP1
+	pshufd	  $78, \XMM8, \TMP2
+	pxor	  \XMM8, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
+	movaps 0x90(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1            # Round 9
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM8          # XMM8 = a0*b0
+	movaps 0xa0(%arg1), \TMP3
+	AESENCLAST \TMP3, \XMM1           # Round 10
+	AESENCLAST \TMP3, \XMM2
+	AESENCLAST \TMP3, \XMM3
+	AESENCLAST \TMP3, \XMM4
+	movdqa    HashKey_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
+	movdqu	  (%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM1, (%arg2,%r11,1)        # Write to plaintext buffer
+	movdqa    \TMP3, \XMM1
+	movdqu	  16(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM2                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM2, 16(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM2
+	movdqu	  32(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM3                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM3, 32(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM3
+	movdqu	  48(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM4                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM4, 48(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1        # perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  \TMP4, \TMP1
+	pxor	  \XMM8, \XMM5
+	pxor	  \TMP6, \TMP2
+	pxor	  \TMP1, \TMP2
+	pxor	  \XMM5, \TMP2
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3                    # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2                    # right shift TMP2 2 DWs
+	pxor	  \TMP3, \XMM5
+	pxor	  \TMP2, \TMP1	  # accumulate the results in TMP1:XMM5
+
+        # first phase of reduction
+
+	movdqa    \XMM5, \TMP2
+	movdqa    \XMM5, \TMP3
+	movdqa    \XMM5, \TMP4
+# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
+	pslld     $31, \TMP2                   # packed right shift << 31
+	pslld     $30, \TMP3                   # packed right shift << 30
+	pslld     $25, \TMP4                   # packed right shift << 25
+	pxor      \TMP3, \TMP2	               # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5                    # right shift T5 1 DW
+	pslldq    $12, \TMP2                   # left shift T2 3 DWs
+	pxor      \TMP2, \XMM5
+
+        # second phase of reduction
+
+	movdqa    \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
+	movdqa    \XMM5,\TMP3
+	movdqa    \XMM5,\TMP4
+	psrld     $1, \TMP2                    # packed left shift >>1
+	psrld     $2, \TMP3                    # packed left shift >>2
+	psrld     $7, \TMP4                    # packed left shift >>7
+	pxor      \TMP3,\TMP2		       # xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \XMM5
+	pxor      \TMP1, \XMM5                 # result is in TMP1
+
+	pxor	  \XMM5, \XMM1
+.endm
+
+/* GHASH the last 4 ciphertext blocks. */
+.macro	GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
+TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
+
+        # Multiply TMP6 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM1, \TMP6
+	pshufd	  $78, \XMM1, \TMP2
+	pxor	  \XMM1, \TMP2
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP6       # TMP6 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM1       # XMM1 = a0*b0
+	movdqa	  HashKey_4_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	movdqa	  \XMM1, \XMMDst
+	movdqa	  \TMP2, \XMM1              # result in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM2, \TMP1
+	pshufd	  $78, \XMM2, \TMP2
+	pxor	  \XMM2, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM2       # XMM2 = a0*b0
+	movdqa	  HashKey_3_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM2, \XMMDst
+	pxor	  \TMP2, \XMM1
+# results accumulated in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM3, \TMP1
+	pshufd	  $78, \XMM3, \TMP2
+	pxor	  \XMM3, \TMP2
+	movdqa	  HashKey_2(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM3       # XMM3 = a0*b0
+	movdqa	  HashKey_2_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM3, \XMMDst
+	pxor	  \TMP2, \XMM1   # results accumulated in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+	movdqa	  \XMM4, \TMP1
+	pshufd	  $78, \XMM4, \TMP2
+	pxor	  \XMM4, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1	    # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM4       # XMM4 = a0*b0
+	movdqa	  HashKey_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM4, \XMMDst
+	pxor	  \XMM1, \TMP2
+	pxor	  \TMP6, \TMP2
+	pxor	  \XMMDst, \TMP2
+	# middle section of the temp results combined as in karatsuba algorithm
+	movdqa	  \TMP2, \TMP4
+	pslldq	  $8, \TMP4                 # left shift TMP4 2 DWs
+	psrldq	  $8, \TMP2                 # right shift TMP2 2 DWs
+	pxor	  \TMP4, \XMMDst
+	pxor	  \TMP2, \TMP6
+# TMP6:XMMDst holds the result of the accumulated carry-less multiplications
+	# first phase of the reduction
+	movdqa    \XMMDst, \TMP2
+	movdqa    \XMMDst, \TMP3
+	movdqa    \XMMDst, \TMP4
+# move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
+	pslld     $31, \TMP2                # packed right shifting << 31
+	pslld     $30, \TMP3                # packed right shifting << 30
+	pslld     $25, \TMP4                # packed right shifting << 25
+	pxor      \TMP3, \TMP2              # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP7
+	psrldq    $4, \TMP7                 # right shift TMP7 1 DW
+	pslldq    $12, \TMP2                # left shift TMP2 3 DWs
+	pxor      \TMP2, \XMMDst
+
+        # second phase of the reduction
+	movdqa    \XMMDst, \TMP2
+	# make 3 copies of XMMDst for doing 3 shift operations
+	movdqa    \XMMDst, \TMP3
+	movdqa    \XMMDst, \TMP4
+	psrld     $1, \TMP2                 # packed left shift >> 1
+	psrld     $2, \TMP3                 # packed left shift >> 2
+	psrld     $7, \TMP4                 # packed left shift >> 7
+	pxor      \TMP3, \TMP2              # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	pxor      \TMP7, \TMP2
+	pxor      \TMP2, \XMMDst
+	pxor      \TMP6, \XMMDst            # reduced result is in XMMDst
+.endm
+
+/* Encryption of a single block done*/
+.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
+
+	pxor	(%arg1), \XMM0
+        movaps 16(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 32(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 48(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 64(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 80(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 96(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 112(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 128(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 144(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 160(%arg1), \TMP1
+	AESENCLAST	\TMP1, \XMM0
+.endm
+
+
+/*****************************************************************************
+* void aesni_gcm_dec(void *aes_ctx,    // AES Key schedule. Starts on a 16 byte boundary.
+*                   u8 *out,           // Plaintext output. Encrypt in-place is allowed.
+*                   const u8 *in,      // Ciphertext input
+*                   u64 plaintext_len, // Length of data in bytes for decryption.
+*                   u8 *iv,            // Pre-counter block j0: 4 byte salt (from Security Association)
+*                                      // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+*                                      // concatenated with 0x00000001. 16-byte aligned pointer.
+*                   u8 *hash_subkey,   // H, the Hash sub key input. Data starts on a 16-byte boundary.
+*                   const u8 *aad,     // Additional Authentication Data (AAD)
+*                   u64 aad_len,       // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
+*                   u8  *auth_tag,     // Authenticated Tag output. The driver will compare this to the
+*                                      // given authentication tag and only return the plaintext if they match.
+*                   u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16
+*                                      // (most likely), 12 or 8.
+*
+* Assumptions:
+*
+* keys:
+*       keys are pre-expanded and aligned to 16 bytes. we are using the first
+*       set of 11 keys in the data structure void *aes_ctx
+*
+* iv:
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                             Salt  (From the SA)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     Initialization Vector                     |
+*       |         (This is the sequence number from IPSec header)       |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x1                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*
+*
+* AAD:
+*       AAD padded to 128 bits with 0
+*       for example, assume AAD is a u32 vector
+*
+*       if AAD is 8 bytes:
+*       AAD[3] = {A0, A1};
+*       padded AAD in xmm register = {A1 A0 0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A1)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     32-bit Sequence Number (A0)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                                       AAD Format with 32-bit Sequence Number
+*
+*       if AAD is 12 bytes:
+*       AAD[3] = {A0, A1, A2};
+*       padded AAD in xmm register = {A2 A1 A0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A2)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                 64-bit Extended Sequence Number {A1,A0}       |
+*       |                                                               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                        AAD Format with 64-bit Extended Sequence Number
+*
+* aadLen:
+*       from the definition of the spec, aadLen can only be 8 or 12 bytes.
+*       The code supports 16 too but for other sizes, the code will fail.
+*
+* TLen:
+*       from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+*       For other sizes, the code will fail.
+*
+* poly = x^128 + x^127 + x^126 + x^121 + 1
+*
+*****************************************************************************/
+
+ENTRY(aesni_gcm_dec)
+	push	%r12
+	push	%r13
+	push	%r14
+	mov	%rsp, %r14
+/*
+* states of %xmm registers %xmm6:%xmm15 not saved
+* all %xmm registers are clobbered
+*/
+	sub	$VARIABLE_OFFSET, %rsp
+	and	$~63, %rsp                        # align rsp to 64 bytes
+	mov	%arg6, %r12
+	movdqu	(%r12), %xmm13			  # %xmm13 = HashKey
+        movdqa  SHUF_MASK(%rip), %xmm2
+	PSHUFB_XMM %xmm2, %xmm13
+
+
+# Precompute HashKey<<1 (mod poly) from the hash key (required for GHASH)
+
+	movdqa	%xmm13, %xmm2
+	psllq	$1, %xmm13
+	psrlq	$63, %xmm2
+	movdqa	%xmm2, %xmm1
+	pslldq	$8, %xmm2
+	psrldq	$8, %xmm1
+	por	%xmm2, %xmm13
+
+        # Reduction
+
+	pshufd	$0x24, %xmm1, %xmm2
+	pcmpeqd TWOONE(%rip), %xmm2
+	pand	POLY(%rip), %xmm2
+	pxor	%xmm2, %xmm13     # %xmm13 holds the HashKey<<1 (mod poly)
+
+
+        # Decrypt first few blocks
+
+	movdqa %xmm13, HashKey(%rsp)           # store HashKey<<1 (mod poly)
+	mov %arg4, %r13    # save the number of bytes of plaintext/ciphertext
+	and $-16, %r13                      # %r13 = %r13 - (%r13 mod 16)
+	mov %r13, %r12
+	and $(3<<4), %r12
+	jz _initial_num_blocks_is_0_decrypt
+	cmp $(2<<4), %r12
+	jb _initial_num_blocks_is_1_decrypt
+	je _initial_num_blocks_is_2_decrypt
+_initial_num_blocks_is_3_decrypt:
+	INITIAL_BLOCKS_DEC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, dec
+	sub	$48, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_2_decrypt:
+	INITIAL_BLOCKS_DEC	2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, dec
+	sub	$32, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_1_decrypt:
+	INITIAL_BLOCKS_DEC	1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, dec
+	sub	$16, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_0_decrypt:
+	INITIAL_BLOCKS_DEC	0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, dec
+_initial_blocks_decrypted:
+	cmp	$0, %r13
+	je	_zero_cipher_left_decrypt
+	sub	$64, %r13
+	je	_four_cipher_left_decrypt
+_decrypt_by_4:
+	GHASH_4_ENCRYPT_4_PARALLEL_DEC	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
+%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, dec
+	add	$64, %r11
+	sub	$64, %r13
+	jne	_decrypt_by_4
+_four_cipher_left_decrypt:
+	GHASH_LAST_4	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_decrypt:
+	mov	%arg4, %r13
+	and	$15, %r13				# %r13 = arg4 (mod 16)
+	je	_multiple_of_16_bytes_decrypt
+
+        # Handle the last <16 byte block seperately
+
+	paddd ONE(%rip), %xmm0         # increment CNT to get Yn
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm0
+
+	ENCRYPT_SINGLE_BLOCK  %xmm0, %xmm1    # E(K, Yn)
+	sub $16, %r11
+	add %r13, %r11
+	movdqu (%arg3,%r11,1), %xmm1   # recieve the last <16 byte block
+	lea SHIFT_MASK+16(%rip), %r12
+	sub %r13, %r12
+# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+# (%r13 is the number of bytes in plaintext mod 16)
+	movdqu (%r12), %xmm2           # get the appropriate shuffle mask
+	PSHUFB_XMM %xmm2, %xmm1            # right shift 16-%r13 butes
+
+	movdqa  %xmm1, %xmm2
+	pxor %xmm1, %xmm0            # Ciphertext XOR E(K, Yn)
+	movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+	# get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+	pand %xmm1, %xmm0            # mask out top 16-%r13 bytes of %xmm0
+	pand    %xmm1, %xmm2
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10 ,%xmm2
+
+	pxor %xmm2, %xmm8
+	GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	          # GHASH computation for the last <16 byte block
+	sub %r13, %r11
+	add $16, %r11
+
+        # output %r13 bytes
+	MOVQ_R64_XMM	%xmm0, %rax
+	cmp	$8, %r13
+	jle	_less_than_8_bytes_left_decrypt
+	mov	%rax, (%arg2 , %r11, 1)
+	add	$8, %r11
+	psrldq	$8, %xmm0
+	MOVQ_R64_XMM	%xmm0, %rax
+	sub	$8, %r13
+_less_than_8_bytes_left_decrypt:
+	mov	%al,  (%arg2, %r11, 1)
+	add	$1, %r11
+	shr	$8, %rax
+	sub	$1, %r13
+	jne	_less_than_8_bytes_left_decrypt
+_multiple_of_16_bytes_decrypt:
+	mov	arg8, %r12		  # %r13 = aadLen (number of bytes)
+	shl	$3, %r12		  # convert into number of bits
+	movd	%r12d, %xmm15		  # len(A) in %xmm15
+	shl	$3, %arg4		  # len(C) in bits (*128)
+	MOVQ_R64_XMM	%arg4, %xmm1
+	pslldq	$8, %xmm15		  # %xmm15 = len(A)||0x0000000000000000
+	pxor	%xmm1, %xmm15		  # %xmm15 = len(A)||len(C)
+	pxor	%xmm15, %xmm8
+	GHASH_MUL	%xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	         # final GHASH computation
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm8
+
+	mov	%arg5, %rax		  # %rax = *Y0
+	movdqu	(%rax), %xmm0		  # %xmm0 = Y0
+	ENCRYPT_SINGLE_BLOCK	%xmm0,  %xmm1	  # E(K, Y0)
+	pxor	%xmm8, %xmm0
+_return_T_decrypt:
+	mov	arg9, %r10                # %r10 = authTag
+	mov	arg10, %r11               # %r11 = auth_tag_len
+	cmp	$16, %r11
+	je	_T_16_decrypt
+	cmp	$12, %r11
+	je	_T_12_decrypt
+_T_8_decrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	jmp	_return_T_done_decrypt
+_T_12_decrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	psrldq	$8, %xmm0
+	movd	%xmm0, %eax
+	mov	%eax, 8(%r10)
+	jmp	_return_T_done_decrypt
+_T_16_decrypt:
+	movdqu	%xmm0, (%r10)
+_return_T_done_decrypt:
+	mov	%r14, %rsp
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	ret
+
+
+/*****************************************************************************
+* void aesni_gcm_enc(void *aes_ctx,      // AES Key schedule. Starts on a 16 byte boundary.
+*                    u8 *out,            // Ciphertext output. Encrypt in-place is allowed.
+*                    const u8 *in,       // Plaintext input
+*                    u64 plaintext_len,  // Length of data in bytes for encryption.
+*                    u8 *iv,             // Pre-counter block j0: 4 byte salt (from Security Association)
+*                                        // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+*                                        // concatenated with 0x00000001. 16-byte aligned pointer.
+*                    u8 *hash_subkey,    // H, the Hash sub key input. Data starts on a 16-byte boundary.
+*                    const u8 *aad,      // Additional Authentication Data (AAD)
+*                    u64 aad_len,        // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
+*                    u8 *auth_tag,       // Authenticated Tag output.
+*                    u64 auth_tag_len);  // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
+*                                        // 12 or 8.
+*
+* Assumptions:
+*
+* keys:
+*       keys are pre-expanded and aligned to 16 bytes. we are using the
+*       first set of 11 keys in the data structure void *aes_ctx
+*
+*
+* iv:
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                             Salt  (From the SA)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     Initialization Vector                     |
+*       |         (This is the sequence number from IPSec header)       |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x1                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*
+*
+* AAD:
+*       AAD padded to 128 bits with 0
+*       for example, assume AAD is a u32 vector
+*
+*       if AAD is 8 bytes:
+*       AAD[3] = {A0, A1};
+*       padded AAD in xmm register = {A1 A0 0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A1)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     32-bit Sequence Number (A0)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                                 AAD Format with 32-bit Sequence Number
+*
+*       if AAD is 12 bytes:
+*       AAD[3] = {A0, A1, A2};
+*       padded AAD in xmm register = {A2 A1 A0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A2)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                 64-bit Extended Sequence Number {A1,A0}       |
+*       |                                                               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                         AAD Format with 64-bit Extended Sequence Number
+*
+* aadLen:
+*       from the definition of the spec, aadLen can only be 8 or 12 bytes.
+*       The code supports 16 too but for other sizes, the code will fail.
+*
+* TLen:
+*       from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+*       For other sizes, the code will fail.
+*
+* poly = x^128 + x^127 + x^126 + x^121 + 1
+***************************************************************************/
+ENTRY(aesni_gcm_enc)
+	push	%r12
+	push	%r13
+	push	%r14
+	mov	%rsp, %r14
+#
+# states of %xmm registers %xmm6:%xmm15 not saved
+# all %xmm registers are clobbered
+#
+	sub	$VARIABLE_OFFSET, %rsp
+	and	$~63, %rsp
+	mov	%arg6, %r12
+	movdqu	(%r12), %xmm13
+        movdqa  SHUF_MASK(%rip), %xmm2
+	PSHUFB_XMM %xmm2, %xmm13
+
+
+# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
+
+	movdqa	%xmm13, %xmm2
+	psllq	$1, %xmm13
+	psrlq	$63, %xmm2
+	movdqa	%xmm2, %xmm1
+	pslldq	$8, %xmm2
+	psrldq	$8, %xmm1
+	por	%xmm2, %xmm13
+
+        # reduce HashKey<<1
+
+	pshufd	$0x24, %xmm1, %xmm2
+	pcmpeqd TWOONE(%rip), %xmm2
+	pand	POLY(%rip), %xmm2
+	pxor	%xmm2, %xmm13
+	movdqa	%xmm13, HashKey(%rsp)
+	mov	%arg4, %r13            # %xmm13 holds HashKey<<1 (mod poly)
+	and	$-16, %r13
+	mov	%r13, %r12
+
+        # Encrypt first few blocks
+
+	and	$(3<<4), %r12
+	jz	_initial_num_blocks_is_0_encrypt
+	cmp	$(2<<4), %r12
+	jb	_initial_num_blocks_is_1_encrypt
+	je	_initial_num_blocks_is_2_encrypt
+_initial_num_blocks_is_3_encrypt:
+	INITIAL_BLOCKS_ENC	3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, enc
+	sub	$48, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_2_encrypt:
+	INITIAL_BLOCKS_ENC	2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, enc
+	sub	$32, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_1_encrypt:
+	INITIAL_BLOCKS_ENC	1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, enc
+	sub	$16, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_0_encrypt:
+	INITIAL_BLOCKS_ENC	0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, enc
+_initial_blocks_encrypted:
+
+        # Main loop - Encrypt remaining blocks
+
+	cmp	$0, %r13
+	je	_zero_cipher_left_encrypt
+	sub	$64, %r13
+	je	_four_cipher_left_encrypt
+_encrypt_by_4_encrypt:
+	GHASH_4_ENCRYPT_4_PARALLEL_ENC	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
+%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc
+	add	$64, %r11
+	sub	$64, %r13
+	jne	_encrypt_by_4_encrypt
+_four_cipher_left_encrypt:
+	GHASH_LAST_4	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_encrypt:
+	mov	%arg4, %r13
+	and	$15, %r13			# %r13 = arg4 (mod 16)
+	je	_multiple_of_16_bytes_encrypt
+
+         # Handle the last <16 Byte block seperately
+	paddd ONE(%rip), %xmm0                # INCR CNT to get Yn
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm0
+
+	ENCRYPT_SINGLE_BLOCK	%xmm0, %xmm1        # Encrypt(K, Yn)
+	sub $16, %r11
+	add %r13, %r11
+	movdqu (%arg3,%r11,1), %xmm1     # receive the last <16 byte blocks
+	lea SHIFT_MASK+16(%rip), %r12
+	sub %r13, %r12
+	# adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+	# (%r13 is the number of bytes in plaintext mod 16)
+	movdqu	(%r12), %xmm2           # get the appropriate shuffle mask
+	PSHUFB_XMM	%xmm2, %xmm1            # shift right 16-r13 byte
+	pxor	%xmm1, %xmm0            # Plaintext XOR Encrypt(K, Yn)
+	movdqu	ALL_F-SHIFT_MASK(%r12), %xmm1
+	# get the appropriate mask to mask out top 16-r13 bytes of xmm0
+	pand	%xmm1, %xmm0            # mask out top 16-r13 bytes of xmm0
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10,%xmm0
+
+	pxor	%xmm0, %xmm8
+	GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	# GHASH computation for the last <16 byte block
+	sub	%r13, %r11
+	add	$16, %r11
+	PSHUFB_XMM %xmm10, %xmm1
+
+	# shuffle xmm0 back to output as ciphertext
+
+        # Output %r13 bytes
+	MOVQ_R64_XMM %xmm0, %rax
+	cmp $8, %r13
+	jle _less_than_8_bytes_left_encrypt
+	mov %rax, (%arg2 , %r11, 1)
+	add $8, %r11
+	psrldq $8, %xmm0
+	MOVQ_R64_XMM %xmm0, %rax
+	sub $8, %r13
+_less_than_8_bytes_left_encrypt:
+	mov %al,  (%arg2, %r11, 1)
+	add $1, %r11
+	shr $8, %rax
+	sub $1, %r13
+	jne _less_than_8_bytes_left_encrypt
+_multiple_of_16_bytes_encrypt:
+	mov	arg8, %r12    # %r12 = addLen (number of bytes)
+	shl	$3, %r12
+	movd	%r12d, %xmm15       # len(A) in %xmm15
+	shl	$3, %arg4               # len(C) in bits (*128)
+	MOVQ_R64_XMM	%arg4, %xmm1
+	pslldq	$8, %xmm15          # %xmm15 = len(A)||0x0000000000000000
+	pxor	%xmm1, %xmm15       # %xmm15 = len(A)||len(C)
+	pxor	%xmm15, %xmm8
+	GHASH_MUL	%xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	# final GHASH computation
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm8         # perform a 16 byte swap
+
+	mov	%arg5, %rax		       # %rax  = *Y0
+	movdqu	(%rax), %xmm0		       # %xmm0 = Y0
+	ENCRYPT_SINGLE_BLOCK	%xmm0, %xmm15         # Encrypt(K, Y0)
+	pxor	%xmm8, %xmm0
+_return_T_encrypt:
+	mov	arg9, %r10                     # %r10 = authTag
+	mov	arg10, %r11                    # %r11 = auth_tag_len
+	cmp	$16, %r11
+	je	_T_16_encrypt
+	cmp	$12, %r11
+	je	_T_12_encrypt
+_T_8_encrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	jmp	_return_T_done_encrypt
+_T_12_encrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	psrldq	$8, %xmm0
+	movd	%xmm0, %eax
+	mov	%eax, 8(%r10)
+	jmp	_return_T_done_encrypt
+_T_16_encrypt:
+	movdqu	%xmm0, (%r10)
+_return_T_done_encrypt:
+	mov	%r14, %rsp
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	ret
+
+#endif
+
 
 _key_expansion_128:
 _key_expansion_256a:
@@ -55,10 +1709,11 @@
 	shufps $0b10001100, %xmm0, %xmm4
 	pxor %xmm4, %xmm0
 	pxor %xmm1, %xmm0
-	movaps %xmm0, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm0, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
+.align 4
 _key_expansion_192a:
 	pshufd $0b01010101, %xmm1, %xmm1
 	shufps $0b00010000, %xmm0, %xmm4
@@ -76,12 +1731,13 @@
 
 	movaps %xmm0, %xmm1
 	shufps $0b01000100, %xmm0, %xmm6
-	movaps %xmm6, (%rcx)
+	movaps %xmm6, (TKEYP)
 	shufps $0b01001110, %xmm2, %xmm1
-	movaps %xmm1, 16(%rcx)
-	add $0x20, %rcx
+	movaps %xmm1, 0x10(TKEYP)
+	add $0x20, TKEYP
 	ret
 
+.align 4
 _key_expansion_192b:
 	pshufd $0b01010101, %xmm1, %xmm1
 	shufps $0b00010000, %xmm0, %xmm4
@@ -96,10 +1752,11 @@
 	pxor %xmm3, %xmm2
 	pxor %xmm5, %xmm2
 
-	movaps %xmm0, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm0, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
+.align 4
 _key_expansion_256b:
 	pshufd $0b10101010, %xmm1, %xmm1
 	shufps $0b00010000, %xmm2, %xmm4
@@ -107,8 +1764,8 @@
 	shufps $0b10001100, %xmm2, %xmm4
 	pxor %xmm4, %xmm2
 	pxor %xmm1, %xmm2
-	movaps %xmm2, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm2, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
 /*
@@ -116,17 +1773,23 @@
  *                   unsigned int key_len)
  */
 ENTRY(aesni_set_key)
-	movups (%rsi), %xmm0		# user key (first 16 bytes)
-	movaps %xmm0, (%rdi)
-	lea 0x10(%rdi), %rcx		# key addr
-	movl %edx, 480(%rdi)
+#ifndef __x86_64__
+	pushl KEYP
+	movl 8(%esp), KEYP		# ctx
+	movl 12(%esp), UKEYP		# in_key
+	movl 16(%esp), %edx		# key_len
+#endif
+	movups (UKEYP), %xmm0		# user key (first 16 bytes)
+	movaps %xmm0, (KEYP)
+	lea 0x10(KEYP), TKEYP		# key addr
+	movl %edx, 480(KEYP)
 	pxor %xmm4, %xmm4		# xmm4 is assumed 0 in _key_expansion_x
 	cmp $24, %dl
 	jb .Lenc_key128
 	je .Lenc_key192
-	movups 0x10(%rsi), %xmm2	# other user key
-	movaps %xmm2, (%rcx)
-	add $0x10, %rcx
+	movups 0x10(UKEYP), %xmm2	# other user key
+	movaps %xmm2, (TKEYP)
+	add $0x10, TKEYP
 	AESKEYGENASSIST 0x1 %xmm2 %xmm1		# round 1
 	call _key_expansion_256a
 	AESKEYGENASSIST 0x1 %xmm0 %xmm1
@@ -155,7 +1818,7 @@
 	call _key_expansion_256a
 	jmp .Ldec_key
 .Lenc_key192:
-	movq 0x10(%rsi), %xmm2		# other user key
+	movq 0x10(UKEYP), %xmm2		# other user key
 	AESKEYGENASSIST 0x1 %xmm2 %xmm1		# round 1
 	call _key_expansion_192a
 	AESKEYGENASSIST 0x2 %xmm2 %xmm1		# round 2
@@ -195,33 +1858,47 @@
 	AESKEYGENASSIST 0x36 %xmm0 %xmm1	# round 10
 	call _key_expansion_128
 .Ldec_key:
-	sub $0x10, %rcx
-	movaps (%rdi), %xmm0
-	movaps (%rcx), %xmm1
-	movaps %xmm0, 240(%rcx)
-	movaps %xmm1, 240(%rdi)
-	add $0x10, %rdi
-	lea 240-16(%rcx), %rsi
+	sub $0x10, TKEYP
+	movaps (KEYP), %xmm0
+	movaps (TKEYP), %xmm1
+	movaps %xmm0, 240(TKEYP)
+	movaps %xmm1, 240(KEYP)
+	add $0x10, KEYP
+	lea 240-16(TKEYP), UKEYP
 .align 4
 .Ldec_key_loop:
-	movaps (%rdi), %xmm0
+	movaps (KEYP), %xmm0
 	AESIMC %xmm0 %xmm1
-	movaps %xmm1, (%rsi)
-	add $0x10, %rdi
-	sub $0x10, %rsi
-	cmp %rcx, %rdi
+	movaps %xmm1, (UKEYP)
+	add $0x10, KEYP
+	sub $0x10, UKEYP
+	cmp TKEYP, KEYP
 	jb .Ldec_key_loop
-	xor %rax, %rax
+	xor AREG, AREG
+#ifndef __x86_64__
+	popl KEYP
+#endif
 	ret
 
 /*
  * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
  */
 ENTRY(aesni_enc)
+#ifndef __x86_64__
+	pushl KEYP
+	pushl KLEN
+	movl 12(%esp), KEYP
+	movl 16(%esp), OUTP
+	movl 20(%esp), INP
+#endif
 	movl 480(KEYP), KLEN		# key length
 	movups (INP), STATE		# input
 	call _aesni_enc1
 	movups STATE, (OUTP)		# output
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+#endif
 	ret
 
 /*
@@ -236,6 +1913,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_enc1:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -298,6 +1976,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_enc4:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -391,11 +2070,22 @@
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
  */
 ENTRY(aesni_dec)
+#ifndef __x86_64__
+	pushl KEYP
+	pushl KLEN
+	movl 12(%esp), KEYP
+	movl 16(%esp), OUTP
+	movl 20(%esp), INP
+#endif
 	mov 480(KEYP), KLEN		# key length
 	add $240, KEYP
 	movups (INP), STATE		# input
 	call _aesni_dec1
 	movups STATE, (OUTP)		#output
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+#endif
 	ret
 
 /*
@@ -410,6 +2100,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_dec1:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -472,6 +2163,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_dec4:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -566,6 +2258,15 @@
  *		      size_t len)
  */
 ENTRY(aesni_ecb_enc)
+#ifndef __x86_64__
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 16(%esp), KEYP
+	movl 20(%esp), OUTP
+	movl 24(%esp), INP
+	movl 28(%esp), LEN
+#endif
 	test LEN, LEN		# check length
 	jz .Lecb_enc_ret
 	mov 480(KEYP), KLEN
@@ -602,6 +2303,11 @@
 	cmp $16, LEN
 	jge .Lecb_enc_loop1
 .Lecb_enc_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+#endif
 	ret
 
 /*
@@ -609,6 +2315,15 @@
  *		      size_t len);
  */
 ENTRY(aesni_ecb_dec)
+#ifndef __x86_64__
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 16(%esp), KEYP
+	movl 20(%esp), OUTP
+	movl 24(%esp), INP
+	movl 28(%esp), LEN
+#endif
 	test LEN, LEN
 	jz .Lecb_dec_ret
 	mov 480(KEYP), KLEN
@@ -646,6 +2361,11 @@
 	cmp $16, LEN
 	jge .Lecb_dec_loop1
 .Lecb_dec_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+#endif
 	ret
 
 /*
@@ -653,6 +2373,17 @@
  *		      size_t len, u8 *iv)
  */
 ENTRY(aesni_cbc_enc)
+#ifndef __x86_64__
+	pushl IVP
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 20(%esp), KEYP
+	movl 24(%esp), OUTP
+	movl 28(%esp), INP
+	movl 32(%esp), LEN
+	movl 36(%esp), IVP
+#endif
 	cmp $16, LEN
 	jb .Lcbc_enc_ret
 	mov 480(KEYP), KLEN
@@ -670,6 +2401,12 @@
 	jge .Lcbc_enc_loop
 	movups STATE, (IVP)
 .Lcbc_enc_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+	popl IVP
+#endif
 	ret
 
 /*
@@ -677,6 +2414,17 @@
  *		      size_t len, u8 *iv)
  */
 ENTRY(aesni_cbc_dec)
+#ifndef __x86_64__
+	pushl IVP
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 20(%esp), KEYP
+	movl 24(%esp), OUTP
+	movl 28(%esp), INP
+	movl 32(%esp), LEN
+	movl 36(%esp), IVP
+#endif
 	cmp $16, LEN
 	jb .Lcbc_dec_just_ret
 	mov 480(KEYP), KLEN
@@ -690,16 +2438,30 @@
 	movaps IN1, STATE1
 	movups 0x10(INP), IN2
 	movaps IN2, STATE2
+#ifdef __x86_64__
 	movups 0x20(INP), IN3
 	movaps IN3, STATE3
 	movups 0x30(INP), IN4
 	movaps IN4, STATE4
+#else
+	movups 0x20(INP), IN1
+	movaps IN1, STATE3
+	movups 0x30(INP), IN2
+	movaps IN2, STATE4
+#endif
 	call _aesni_dec4
 	pxor IV, STATE1
+#ifdef __x86_64__
 	pxor IN1, STATE2
 	pxor IN2, STATE3
 	pxor IN3, STATE4
 	movaps IN4, IV
+#else
+	pxor (INP), STATE2
+	pxor 0x10(INP), STATE3
+	pxor IN1, STATE4
+	movaps IN2, IV
+#endif
 	movups STATE1, (OUTP)
 	movups STATE2, 0x10(OUTP)
 	movups STATE3, 0x20(OUTP)
@@ -727,8 +2489,15 @@
 .Lcbc_dec_ret:
 	movups IV, (IVP)
 .Lcbc_dec_just_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+	popl IVP
+#endif
 	ret
 
+#ifdef __x86_64__
 .align 16
 .Lbswap_mask:
 	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
@@ -744,6 +2513,7 @@
  *	INC:	== 1, in little endian
  *	BSWAP_MASK == endian swapping mask
  */
+.align 4
 _aesni_inc_init:
 	movaps .Lbswap_mask, BSWAP_MASK
 	movaps IV, CTR
@@ -768,6 +2538,7 @@
  *	CTR:	== output IV, in little endian
  *	TCTR_LOW: == lower qword of CTR
  */
+.align 4
 _aesni_inc:
 	paddq INC, CTR
 	add $1, TCTR_LOW
@@ -839,3 +2610,4 @@
 	movups IV, (IVP)
 .Lctr_enc_just_ret:
 	ret
+#endif
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 2cb3dcc..e1e60c7 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -5,6 +5,14 @@
  * Copyright (C) 2008, Intel Corp.
  *    Author: Huang Ying <ying.huang@intel.com>
  *
+ * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
+ * interface for 64-bit kernels.
+ *    Authors: Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -21,6 +29,10 @@
 #include <crypto/ctr.h>
 #include <asm/i387.h>
 #include <asm/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
 
 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
 #define HAS_CTR
@@ -42,8 +54,31 @@
 	struct cryptd_ablkcipher *cryptd_tfm;
 };
 
-#define AESNI_ALIGN	16
+/* This data is stored at the end of the crypto_tfm struct.
+ * It's a type of per "session" data storage location.
+ * This needs to be 16 byte aligned.
+ */
+struct aesni_rfc4106_gcm_ctx {
+	u8 hash_subkey[16];
+	struct crypto_aes_ctx aes_key_expanded;
+	u8 nonce[4];
+	struct cryptd_aead *cryptd_tfm;
+};
+
+struct aesni_gcm_set_hash_subkey_result {
+	int err;
+	struct completion completion;
+};
+
+struct aesni_hash_subkey_req_data {
+	u8 iv[16];
+	struct aesni_gcm_set_hash_subkey_result result;
+	struct scatterlist sg;
+};
+
+#define AESNI_ALIGN	(16)
 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1))
+#define RFC4106_HASH_SUBKEY_SIZE 16
 
 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
 			     unsigned int key_len);
@@ -59,9 +94,62 @@
 			      const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
+#ifdef CONFIG_X86_64
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
 
+/* asmlinkage void aesni_gcm_enc()
+ * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
+ * u8 *out, Ciphertext output. Encrypt in-place is allowed.
+ * const u8 *in, Plaintext input
+ * unsigned long plaintext_len, Length of data in bytes for encryption.
+ * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
+ *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
+ *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ * const u8 *aad, Additional Authentication Data (AAD)
+ * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
+ *          is going to be 8 or 12 bytes
+ * u8 *auth_tag, Authenticated Tag output.
+ * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
+ *          Valid values are 16 (most likely), 12 or 8.
+ */
+asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
+			const u8 *in, unsigned long plaintext_len, u8 *iv,
+			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+			u8 *auth_tag, unsigned long auth_tag_len);
+
+/* asmlinkage void aesni_gcm_dec()
+ * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
+ * u8 *out, Plaintext output. Decrypt in-place is allowed.
+ * const u8 *in, Ciphertext input
+ * unsigned long ciphertext_len, Length of data in bytes for decryption.
+ * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
+ *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
+ *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ * const u8 *aad, Additional Authentication Data (AAD)
+ * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
+ * to be 8 or 12 bytes
+ * u8 *auth_tag, Authenticated Tag output.
+ * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
+ * Valid values are 16 (most likely), 12 or 8.
+ */
+asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
+			const u8 *in, unsigned long ciphertext_len, u8 *iv,
+			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+			u8 *auth_tag, unsigned long auth_tag_len);
+
+static inline struct
+aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
+{
+	return
+		(struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)
+		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
+}
+#endif
+
 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 {
 	unsigned long addr = (unsigned long)raw_ctx;
@@ -324,6 +412,7 @@
 	},
 };
 
+#ifdef CONFIG_X86_64
 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 			    struct blkcipher_walk *walk)
 {
@@ -389,6 +478,7 @@
 		},
 	},
 };
+#endif
 
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 			unsigned int key_len)
@@ -536,6 +626,7 @@
 	},
 };
 
+#ifdef CONFIG_X86_64
 static int ablk_ctr_init(struct crypto_tfm *tfm)
 {
 	struct cryptd_ablkcipher *cryptd_tfm;
@@ -612,6 +703,7 @@
 	},
 };
 #endif
+#endif
 
 #ifdef HAS_LRW
 static int ablk_lrw_init(struct crypto_tfm *tfm)
@@ -730,6 +822,424 @@
 };
 #endif
 
+#ifdef CONFIG_X86_64
+static int rfc4106_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_aead *cryptd_tfm;
+	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ctx->cryptd_tfm = cryptd_tfm;
+	tfm->crt_aead.reqsize = sizeof(struct aead_request)
+		+ crypto_aead_reqsize(&cryptd_tfm->base);
+	return 0;
+}
+
+static void rfc4106_exit(struct crypto_tfm *tfm)
+{
+	struct aesni_rfc4106_gcm_ctx *ctx =
+		(struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+	if (!IS_ERR(ctx->cryptd_tfm))
+		cryptd_free_aead(ctx->cryptd_tfm);
+	return;
+}
+
+static void
+rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
+{
+	struct aesni_gcm_set_hash_subkey_result *result = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	result->err = err;
+	complete(&result->completion);
+}
+
+static int
+rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
+{
+	struct crypto_ablkcipher *ctr_tfm;
+	struct ablkcipher_request *req;
+	int ret = -EINVAL;
+	struct aesni_hash_subkey_req_data *req_data;
+
+	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
+	if (IS_ERR(ctr_tfm))
+		return PTR_ERR(ctr_tfm);
+
+	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
+
+	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
+	if (ret) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return ret;
+	}
+
+	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
+	if (!req) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return -EINVAL;
+	}
+
+	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
+	if (!req_data) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return -ENOMEM;
+	}
+	memset(req_data->iv, 0, sizeof(req_data->iv));
+
+	/* Clear the data in the hash sub key container to zero.*/
+	/* We want to cipher all zeros to create the hash sub key. */
+	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
+
+	init_completion(&req_data->result.completion);
+	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
+	ablkcipher_request_set_tfm(req, ctr_tfm);
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+					CRYPTO_TFM_REQ_MAY_BACKLOG,
+					rfc4106_set_hash_subkey_done,
+					&req_data->result);
+
+	ablkcipher_request_set_crypt(req, &req_data->sg,
+		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
+
+	ret = crypto_ablkcipher_encrypt(req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret = wait_for_completion_interruptible
+			(&req_data->result.completion);
+		if (!ret)
+			ret = req_data->result.err;
+	}
+	ablkcipher_request_free(req);
+	kfree(req_data);
+	crypto_free_ablkcipher(ctr_tfm);
+	return ret;
+}
+
+static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
+						   unsigned int key_len)
+{
+	int ret = 0;
+	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+	u8 *new_key_mem = NULL;
+
+	if (key_len < 4) {
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	/*Account for 4 byte nonce at the end.*/
+	key_len -= 4;
+	if (key_len != AES_KEYSIZE_128) {
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
+	/*This must be on a 16 byte boundary!*/
+	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
+		return -EINVAL;
+
+	if ((unsigned long)key % AESNI_ALIGN) {
+		/*key is not aligned: use an auxuliar aligned pointer*/
+		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
+		if (!new_key_mem)
+			return -ENOMEM;
+
+		new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
+		memcpy(new_key_mem, key, key_len);
+		key = new_key_mem;
+	}
+
+	if (!irq_fpu_usable())
+		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
+		key, key_len);
+	else {
+		kernel_fpu_begin();
+		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
+		kernel_fpu_end();
+	}
+	/*This must be on a 16 byte boundary!*/
+	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+exit:
+	kfree(new_key_mem);
+	return ret;
+}
+
+/* This is the Integrity Check Value (aka the authentication tag length and can
+ * be 8, 12 or 16 bytes long. */
+static int rfc4106_set_authsize(struct crypto_aead *parent,
+				unsigned int authsize)
+{
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	crypto_aead_crt(parent)->authsize = authsize;
+	crypto_aead_crt(cryptd_child)->authsize = authsize;
+	return 0;
+}
+
+static int rfc4106_encrypt(struct aead_request *req)
+{
+	int ret;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	if (!irq_fpu_usable()) {
+		struct aead_request *cryptd_req =
+			(struct aead_request *) aead_request_ctx(req);
+		memcpy(cryptd_req, req, sizeof(*req));
+		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+		return crypto_aead_encrypt(cryptd_req);
+	} else {
+		kernel_fpu_begin();
+		ret = cryptd_child->base.crt_aead.encrypt(req);
+		kernel_fpu_end();
+		return ret;
+	}
+}
+
+static int rfc4106_decrypt(struct aead_request *req)
+{
+	int ret;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	if (!irq_fpu_usable()) {
+		struct aead_request *cryptd_req =
+			(struct aead_request *) aead_request_ctx(req);
+		memcpy(cryptd_req, req, sizeof(*req));
+		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+		return crypto_aead_decrypt(cryptd_req);
+	} else {
+		kernel_fpu_begin();
+		ret = cryptd_child->base.crt_aead.decrypt(req);
+		kernel_fpu_end();
+		return ret;
+	}
+}
+
+static struct crypto_alg rfc4106_alg = {
+	.cra_name = "rfc4106(gcm(aes))",
+	.cra_driver_name = "rfc4106-gcm-aesni",
+	.cra_priority = 400,
+	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = 1,
+	.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
+	.cra_alignmask = 0,
+	.cra_type = &crypto_nivaead_type,
+	.cra_module = THIS_MODULE,
+	.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
+	.cra_init = rfc4106_init,
+	.cra_exit = rfc4106_exit,
+	.cra_u = {
+		.aead = {
+			.setkey = rfc4106_set_key,
+			.setauthsize = rfc4106_set_authsize,
+			.encrypt = rfc4106_encrypt,
+			.decrypt = rfc4106_decrypt,
+			.geniv = "seqiv",
+			.ivsize = 8,
+			.maxauthsize = 16,
+		},
+	},
+};
+
+static int __driver_rfc4106_encrypt(struct aead_request *req)
+{
+	u8 one_entry_in_sg = 0;
+	u8 *src, *dst, *assoc;
+	__be32 counter = cpu_to_be32(1);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	void *aes_ctx = &(ctx->aes_key_expanded);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 iv_tab[16+AESNI_ALIGN];
+	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
+	struct scatter_walk src_sg_walk;
+	struct scatter_walk assoc_sg_walk;
+	struct scatter_walk dst_sg_walk;
+	unsigned int i;
+
+	/* Assuming we are supporting rfc4106 64-bit extended */
+	/* sequence numbers We need to have the AAD length equal */
+	/* to 8 or 12 bytes */
+	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+		return -EINVAL;
+	/* IV below built */
+	for (i = 0; i < 4; i++)
+		*(iv+i) = ctx->nonce[i];
+	for (i = 0; i < 8; i++)
+		*(iv+4+i) = req->iv[i];
+	*((__be32 *)(iv+12)) = counter;
+
+	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+		one_entry_in_sg = 1;
+		scatterwalk_start(&src_sg_walk, req->src);
+		scatterwalk_start(&assoc_sg_walk, req->assoc);
+		src = scatterwalk_map(&src_sg_walk, 0);
+		assoc = scatterwalk_map(&assoc_sg_walk, 0);
+		dst = src;
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_start(&dst_sg_walk, req->dst);
+			dst = scatterwalk_map(&dst_sg_walk, 0);
+		}
+
+	} else {
+		/* Allocate memory for src, dst, assoc */
+		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
+			GFP_ATOMIC);
+		if (unlikely(!src))
+			return -ENOMEM;
+		assoc = (src + req->cryptlen + auth_tag_len);
+		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+					req->assoclen, 0);
+		dst = src;
+	}
+
+	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
+		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+		+ ((unsigned long)req->cryptlen), auth_tag_len);
+
+	/* The authTag (aka the Integrity Check Value) needs to be written
+	 * back to the packet. */
+	if (one_entry_in_sg) {
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_unmap(dst, 0);
+			scatterwalk_done(&dst_sg_walk, 0, 0);
+		}
+		scatterwalk_unmap(src, 0);
+		scatterwalk_unmap(assoc, 0);
+		scatterwalk_done(&src_sg_walk, 0, 0);
+		scatterwalk_done(&assoc_sg_walk, 0, 0);
+	} else {
+		scatterwalk_map_and_copy(dst, req->dst, 0,
+			req->cryptlen + auth_tag_len, 1);
+		kfree(src);
+	}
+	return 0;
+}
+
+static int __driver_rfc4106_decrypt(struct aead_request *req)
+{
+	u8 one_entry_in_sg = 0;
+	u8 *src, *dst, *assoc;
+	unsigned long tempCipherLen = 0;
+	__be32 counter = cpu_to_be32(1);
+	int retval = 0;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	void *aes_ctx = &(ctx->aes_key_expanded);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 iv_and_authTag[32+AESNI_ALIGN];
+	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
+	u8 *authTag = iv + 16;
+	struct scatter_walk src_sg_walk;
+	struct scatter_walk assoc_sg_walk;
+	struct scatter_walk dst_sg_walk;
+	unsigned int i;
+
+	if (unlikely((req->cryptlen < auth_tag_len) ||
+		(req->assoclen != 8 && req->assoclen != 12)))
+		return -EINVAL;
+	/* Assuming we are supporting rfc4106 64-bit extended */
+	/* sequence numbers We need to have the AAD length */
+	/* equal to 8 or 12 bytes */
+
+	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
+	/* IV below built */
+	for (i = 0; i < 4; i++)
+		*(iv+i) = ctx->nonce[i];
+	for (i = 0; i < 8; i++)
+		*(iv+4+i) = req->iv[i];
+	*((__be32 *)(iv+12)) = counter;
+
+	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+		one_entry_in_sg = 1;
+		scatterwalk_start(&src_sg_walk, req->src);
+		scatterwalk_start(&assoc_sg_walk, req->assoc);
+		src = scatterwalk_map(&src_sg_walk, 0);
+		assoc = scatterwalk_map(&assoc_sg_walk, 0);
+		dst = src;
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_start(&dst_sg_walk, req->dst);
+			dst = scatterwalk_map(&dst_sg_walk, 0);
+		}
+
+	} else {
+		/* Allocate memory for src, dst, assoc */
+		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+		if (!src)
+			return -ENOMEM;
+		assoc = (src + req->cryptlen + auth_tag_len);
+		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+			req->assoclen, 0);
+		dst = src;
+	}
+
+	aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
+		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
+		authTag, auth_tag_len);
+
+	/* Compare generated tag with passed in tag. */
+	retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
+		-EBADMSG : 0;
+
+	if (one_entry_in_sg) {
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_unmap(dst, 0);
+			scatterwalk_done(&dst_sg_walk, 0, 0);
+		}
+		scatterwalk_unmap(src, 0);
+		scatterwalk_unmap(assoc, 0);
+		scatterwalk_done(&src_sg_walk, 0, 0);
+		scatterwalk_done(&assoc_sg_walk, 0, 0);
+	} else {
+		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
+		kfree(src);
+	}
+	return retval;
+}
+
+static struct crypto_alg __rfc4106_alg = {
+	.cra_name		= "__gcm-aes-aesni",
+	.cra_driver_name	= "__driver-gcm-aes-aesni",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
+	.cra_blocksize		= 1,
+	.cra_ctxsize	= sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_aead_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(__rfc4106_alg.cra_list),
+	.cra_u = {
+		.aead = {
+			.encrypt	= __driver_rfc4106_encrypt,
+			.decrypt	= __driver_rfc4106_decrypt,
+		},
+	},
+};
+#endif
+
 static int __init aesni_init(void)
 {
 	int err;
@@ -738,6 +1248,7 @@
 		printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
 		return -ENODEV;
 	}
+
 	if ((err = crypto_register_alg(&aesni_alg)))
 		goto aes_err;
 	if ((err = crypto_register_alg(&__aesni_alg)))
@@ -746,18 +1257,24 @@
 		goto blk_ecb_err;
 	if ((err = crypto_register_alg(&blk_cbc_alg)))
 		goto blk_cbc_err;
-	if ((err = crypto_register_alg(&blk_ctr_alg)))
-		goto blk_ctr_err;
 	if ((err = crypto_register_alg(&ablk_ecb_alg)))
 		goto ablk_ecb_err;
 	if ((err = crypto_register_alg(&ablk_cbc_alg)))
 		goto ablk_cbc_err;
+#ifdef CONFIG_X86_64
+	if ((err = crypto_register_alg(&blk_ctr_alg)))
+		goto blk_ctr_err;
 	if ((err = crypto_register_alg(&ablk_ctr_alg)))
 		goto ablk_ctr_err;
+	if ((err = crypto_register_alg(&__rfc4106_alg)))
+		goto __aead_gcm_err;
+	if ((err = crypto_register_alg(&rfc4106_alg)))
+		goto aead_gcm_err;
 #ifdef HAS_CTR
 	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
 		goto ablk_rfc3686_ctr_err;
 #endif
+#endif
 #ifdef HAS_LRW
 	if ((err = crypto_register_alg(&ablk_lrw_alg)))
 		goto ablk_lrw_err;
@@ -770,7 +1287,6 @@
 	if ((err = crypto_register_alg(&ablk_xts_alg)))
 		goto ablk_xts_err;
 #endif
-
 	return err;
 
 #ifdef HAS_XTS
@@ -784,18 +1300,24 @@
 	crypto_unregister_alg(&ablk_lrw_alg);
 ablk_lrw_err:
 #endif
+#ifdef CONFIG_X86_64
 #ifdef HAS_CTR
 	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 ablk_rfc3686_ctr_err:
 #endif
+	crypto_unregister_alg(&rfc4106_alg);
+aead_gcm_err:
+	crypto_unregister_alg(&__rfc4106_alg);
+__aead_gcm_err:
 	crypto_unregister_alg(&ablk_ctr_alg);
 ablk_ctr_err:
+	crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
+#endif
 	crypto_unregister_alg(&ablk_cbc_alg);
 ablk_cbc_err:
 	crypto_unregister_alg(&ablk_ecb_alg);
 ablk_ecb_err:
-	crypto_unregister_alg(&blk_ctr_alg);
-blk_ctr_err:
 	crypto_unregister_alg(&blk_cbc_alg);
 blk_cbc_err:
 	crypto_unregister_alg(&blk_ecb_alg);
@@ -818,13 +1340,17 @@
 #ifdef HAS_LRW
 	crypto_unregister_alg(&ablk_lrw_alg);
 #endif
+#ifdef CONFIG_X86_64
 #ifdef HAS_CTR
 	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 #endif
+	crypto_unregister_alg(&rfc4106_alg);
+	crypto_unregister_alg(&__rfc4106_alg);
 	crypto_unregister_alg(&ablk_ctr_alg);
+	crypto_unregister_alg(&blk_ctr_alg);
+#endif
 	crypto_unregister_alg(&ablk_cbc_alg);
 	crypto_unregister_alg(&ablk_ecb_alg);
-	crypto_unregister_alg(&blk_ctr_alg);
 	crypto_unregister_alg(&blk_cbc_alg);
 	crypto_unregister_alg(&blk_ecb_alg);
 	crypto_unregister_alg(&__aesni_alg);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 55d106b..211ca3f 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -185,17 +185,16 @@
 
 #ifdef CONFIG_ACPI_NUMA
 extern int acpi_numa;
-extern int acpi_get_nodes(struct bootnode *physnodes);
+extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+				unsigned long end);
 extern int acpi_scan_nodes(unsigned long start, unsigned long end);
 #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+#ifdef CONFIG_NUMA_EMU
 extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
 				   int num_nodes);
-#else
-static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
-				   int num_nodes)
-{
-}
 #endif
+#endif /* CONFIG_ACPI_NUMA */
 
 #define acpi_unlazy_tlb(x)	leave_mm(x)
 
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 6aee50d..64dc82e 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,16 +3,27 @@
 
 #include <linux/pci.h>
 
+struct amd_nb_bus_dev_range {
+	u8 bus;
+	u8 dev_base;
+	u8 dev_limit;
+};
+
 extern struct pci_device_id amd_nb_misc_ids[];
+extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
 struct bootnode;
 
 extern int early_is_amd_nb(u32 value);
 extern int amd_cache_northbridges(void);
 extern void amd_flush_garts(void);
-extern int amd_get_nodes(struct bootnode *nodes);
 extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
 extern int amd_scan_nodes(void);
 
+#ifdef CONFIG_NUMA_EMU
+extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
+extern void amd_get_nodes(struct bootnode *nodes);
+#endif
+
 struct amd_northbridge {
 	struct pci_dev *misc;
 };
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index cf12007..5e3969c 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -234,6 +234,7 @@
 extern void setup_local_APIC(void);
 extern void end_local_APIC_setup(void);
 extern void init_apic_mappings(void);
+void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
 extern void setup_secondary_APIC_clock(void);
 extern int APIC_init_uniprocessor(void);
@@ -244,7 +245,6 @@
  * On 32bit this is mach-xxx local
  */
 #ifdef CONFIG_X86_64
-extern void early_init_lapic_mapping(void);
 extern int apic_is_clustered_box(void);
 #else
 static inline int apic_is_clustered_box(void)
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 3b62ab5..5e1a2ee 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -32,11 +32,7 @@
 #define BOOT_HEAP_SIZE             0x400000
 #else /* !CONFIG_KERNEL_BZIP2 */
 
-#ifdef CONFIG_X86_64
-#define BOOT_HEAP_SIZE	0x7000
-#else
-#define BOOT_HEAP_SIZE	0x4000
-#endif
+#define BOOT_HEAP_SIZE	0x8000
 
 #endif /* !CONFIG_KERNEL_BZIP2 */
 
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index b81002f..078ad0c 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -94,7 +94,7 @@
 
 static inline int hw_breakpoint_active(void)
 {
-	return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
+	return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
 }
 
 extern void aout_dump_debugregs(struct user *dump);
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0141b23..4729b2b 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -116,11 +116,11 @@
 #endif
 	FIX_TEXT_POKE1,	/* reserve 2 pages for text_poke() */
 	FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
-	__end_of_permanent_fixed_addresses,
-
 #ifdef	CONFIG_X86_MRST
 	FIX_LNW_VRTC,
 #endif
+	__end_of_permanent_fixed_addresses,
+
 	/*
 	 * 256 temporary boot-time mappings, used by early_ioremap(),
 	 * before ioremap() is functional.
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 49dbfdf..91d915a 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -38,12 +38,9 @@
 	return __gpio_cansleep(gpio);
 }
 
-/*
- * Not implemented, yet.
- */
 static inline int gpio_to_irq(unsigned int gpio)
 {
-	return -ENOSYS;
+	return __gpio_to_irq(gpio);
 }
 
 static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index ff2546c..7a15153 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,6 +20,9 @@
 #ifndef _ASM_X86_HYPERVISOR_H
 #define _ASM_X86_HYPERVISOR_H
 
+#include <asm/kvm_para.h>
+#include <asm/xen/hypervisor.h>
+
 extern void init_hypervisor(struct cpuinfo_x86 *c);
 extern void init_hypervisor_platform(void);
 
@@ -47,4 +50,13 @@
 extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
 extern const struct hypervisor_x86 x86_hyper_xen_hvm;
 
+static inline bool hypervisor_x2apic_available(void)
+{
+	if (kvm_para_available())
+		return true;
+	if (xen_x2apic_para_available())
+		return true;
+	return false;
+}
+
 #endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 0c5ca4e..f327d38 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -169,6 +169,7 @@
 extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
 
 extern int get_nr_irqs_gsi(void);
+
 extern void setup_ioapic_ids_from_mpc(void);
 extern void setup_ioapic_ids_from_mpc_nocheck(void);
 
@@ -183,6 +184,8 @@
 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
 extern void __init pre_init_apic_IRQ0(void);
 
+extern void mp_save_irq(struct mpc_intsrc *m);
+
 #else  /* !CONFIG_X86_IO_APIC */
 
 #define io_apic_assign_pci_irqs 0
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb..c704b38 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -10,6 +10,9 @@
 #include <asm/apicdef.h>
 #include <asm/irq_vectors.h>
 
+/* Even though we don't support this, supply it to appease OF */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+
 static inline int irq_canonicalize(int irq)
 {
 	return ((irq == 2) ? 9 : irq);
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index f23eb25..ca242d3 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -18,7 +18,6 @@
 	DIE_TRAP,
 	DIE_GPF,
 	DIE_CALL,
-	DIE_NMI_IPI,
 	DIE_PAGE_FAULT,
 	DIE_NMIUNKNOWN,
 };
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b36c6b3..8e37deb 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -15,6 +15,14 @@
 
 struct x86_emulate_ctxt;
 
+struct x86_exception {
+	u8 vector;
+	bool error_code_valid;
+	u16 error_code;
+	bool nested_page_fault;
+	u64 address; /* cr2 or nested page fault gpa */
+};
+
 /*
  * x86_emulate_ops:
  *
@@ -64,7 +72,8 @@
 	 *  @bytes: [IN ] Number of bytes to read from memory.
 	 */
 	int (*read_std)(unsigned long addr, void *val,
-			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+			unsigned int bytes, struct kvm_vcpu *vcpu,
+			struct x86_exception *fault);
 
 	/*
 	 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -74,7 +83,8 @@
 	 *  @bytes: [IN ] Number of bytes to write to memory.
 	 */
 	int (*write_std)(unsigned long addr, void *val,
-			 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+			 unsigned int bytes, struct kvm_vcpu *vcpu,
+			 struct x86_exception *fault);
 	/*
 	 * fetch: Read bytes of standard (non-emulated/special) memory.
 	 *        Used for instruction fetch.
@@ -83,7 +93,8 @@
 	 *  @bytes: [IN ] Number of bytes to read from memory.
 	 */
 	int (*fetch)(unsigned long addr, void *val,
-			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+		     unsigned int bytes, struct kvm_vcpu *vcpu,
+		     struct x86_exception *fault);
 
 	/*
 	 * read_emulated: Read bytes from emulated/special memory area.
@@ -94,7 +105,7 @@
 	int (*read_emulated)(unsigned long addr,
 			     void *val,
 			     unsigned int bytes,
-			     unsigned int *error,
+			     struct x86_exception *fault,
 			     struct kvm_vcpu *vcpu);
 
 	/*
@@ -107,7 +118,7 @@
 	int (*write_emulated)(unsigned long addr,
 			      const void *val,
 			      unsigned int bytes,
-			      unsigned int *error,
+			      struct x86_exception *fault,
 			      struct kvm_vcpu *vcpu);
 
 	/*
@@ -122,7 +133,7 @@
 				const void *old,
 				const void *new,
 				unsigned int bytes,
-				unsigned int *error,
+				struct x86_exception *fault,
 				struct kvm_vcpu *vcpu);
 
 	int (*pio_in_emulated)(int size, unsigned short port, void *val,
@@ -159,7 +170,10 @@
 	};
 	union {
 		unsigned long *reg;
-		unsigned long mem;
+		struct segmented_address {
+			ulong ea;
+			unsigned seg;
+		} mem;
 	} addr;
 	union {
 		unsigned long val;
@@ -226,9 +240,8 @@
 
 	bool perm_ok; /* do not check permissions if true */
 
-	int exception; /* exception that happens during emulation or -1 */
-	u32 error_code; /* error code for exception */
-	bool error_code_valid;
+	bool have_exception;
+	struct x86_exception exception;
 
 	/* decode cache */
 	struct decode_cache decode;
@@ -252,7 +265,7 @@
 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
 #endif
 
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f702f82..ffd7f8d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -83,11 +83,14 @@
 #define KVM_NR_FIXED_MTRR_REGION 88
 #define KVM_NR_VAR_MTRR 8
 
+#define ASYNC_PF_PER_VCPU 64
+
 extern spinlock_t kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_vcpu;
 struct kvm;
+struct kvm_async_pf;
 
 enum kvm_reg {
 	VCPU_REGS_RAX = 0,
@@ -114,6 +117,7 @@
 
 enum kvm_reg_ex {
 	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+	VCPU_EXREG_CR3,
 };
 
 enum {
@@ -238,16 +242,18 @@
 	void (*new_cr3)(struct kvm_vcpu *vcpu);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
-	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
-	void (*inject_page_fault)(struct kvm_vcpu *vcpu);
+	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
+			  bool prefault);
+	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
+				  struct x86_exception *fault);
 	void (*free)(struct kvm_vcpu *vcpu);
 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
-			    u32 *error);
+			    struct x86_exception *exception);
 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
 	void (*prefetch_page)(struct kvm_vcpu *vcpu,
 			      struct kvm_mmu_page *page);
 	int (*sync_page)(struct kvm_vcpu *vcpu,
-			 struct kvm_mmu_page *sp, bool clear_unsync);
+			 struct kvm_mmu_page *sp);
 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
 	hpa_t root_hpa;
 	int root_level;
@@ -315,16 +321,6 @@
 	 */
 	struct kvm_mmu *walk_mmu;
 
-	/*
-	 * This struct is filled with the necessary information to propagate a
-	 * page fault into the guest
-	 */
-	struct {
-		u64      address;
-		unsigned error_code;
-		bool     nested;
-	} fault;
-
 	/* only needed in kvm_pv_mmu_op() path, but it's hot so
 	 * put it here to avoid allocation */
 	struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -412,6 +408,15 @@
 	u64 hv_vapic;
 
 	cpumask_var_t wbinvd_dirty_mask;
+
+	struct {
+		bool halted;
+		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
+		struct gfn_to_hva_cache data;
+		u64 msr_val;
+		u32 id;
+		bool send_user_only;
+	} apf;
 };
 
 struct kvm_arch {
@@ -456,6 +461,10 @@
 	/* fields used by HYPER-V emulation */
 	u64 hv_guest_os_id;
 	u64 hv_hypercall;
+
+	#ifdef CONFIG_KVM_MMU_AUDIT
+	int audit_point;
+	#endif
 };
 
 struct kvm_vm_stat {
@@ -529,6 +538,7 @@
 			    struct kvm_segment *var, int seg);
 	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
 	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
+	void (*decache_cr3)(struct kvm_vcpu *vcpu);
 	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -582,9 +592,17 @@
 
 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
+	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 	const struct trace_print_flags *exit_reasons_str;
 };
 
+struct kvm_arch_async_pf {
+	u32 token;
+	gfn_t gfn;
+	unsigned long cr3;
+	bool direct_map;
+};
+
 extern struct kvm_x86_ops *kvm_x86_ops;
 
 int kvm_mmu_module_init(void);
@@ -594,7 +612,6 @@
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
-void kvm_mmu_set_base_ptes(u64 base_pte);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask);
 
@@ -623,8 +640,15 @@
 #define EMULTYPE_NO_DECODE	    (1 << 0)
 #define EMULTYPE_TRAP_UD	    (1 << 1)
 #define EMULTYPE_SKIP		    (1 << 2)
-int emulate_instruction(struct kvm_vcpu *vcpu,
-			unsigned long cr2, u16 error_code, int emulation_type);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+			    int emulation_type, void *insn, int insn_len);
+
+static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+			int emulation_type)
+{
+	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+
 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
 
@@ -650,7 +674,7 @@
 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
@@ -668,11 +692,11 @@
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu);
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			    gfn_t gfn, void *data, int offset, int len,
 			    u32 access);
-void kvm_propagate_fault(struct kvm_vcpu *vcpu);
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
 
 int kvm_pic_set_irq(void *opaque, int irq, int level);
@@ -690,16 +714,21 @@
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+			      struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
 int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 
 void kvm_enable_tdp(void);
@@ -766,20 +795,25 @@
 #define HF_VINTR_MASK		(1 << 2)
 #define HF_NMI_MASK		(1 << 3)
 #define HF_IRET_MASK		(1 << 4)
+#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
 
 /*
  * Hardware virtualization extension instructions may fault if a
  * reboot turns off virtualization while processes are running.
  * Trap the fault and ignore the instruction if that happens.
  */
-asmlinkage void kvm_handle_fault_on_reboot(void);
+asmlinkage void kvm_spurious_fault(void);
+extern bool kvm_rebooting;
 
 #define __kvm_handle_fault_on_reboot(insn) \
 	"666: " insn "\n\t" \
+	"668: \n\t"                           \
 	".pushsection .fixup, \"ax\" \n" \
 	"667: \n\t" \
+	"cmpb $0, kvm_rebooting \n\t"	      \
+	"jne 668b \n\t"      		      \
 	__ASM_SIZE(push) " $666b \n\t"	      \
-	"jmp kvm_handle_fault_on_reboot \n\t" \
+	"call kvm_spurious_fault \n\t"	      \
 	".popsection \n\t" \
 	".pushsection __ex_table, \"a\" \n\t" \
 	_ASM_PTR " 666b, 667b \n\t" \
@@ -788,6 +822,7 @@
 #define KVM_ARCH_WANT_MMU_NOTIFIER
 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_age_hva(struct kvm *kvm, unsigned long hva);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
@@ -799,4 +834,15 @@
 
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+				     struct kvm_async_pf *work);
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+				 struct kvm_async_pf *work);
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
+			       struct kvm_async_pf *work);
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
+extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 7b562b6..a427bf7 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -20,6 +20,7 @@
  * are available. The use of 0x11 and 0x12 is deprecated
  */
 #define KVM_FEATURE_CLOCKSOURCE2        3
+#define KVM_FEATURE_ASYNC_PF		4
 
 /* The last 8 bits are used to indicate how to interpret the flags field
  * in pvclock structure. If no bits are set, all flags are ignored.
@@ -32,9 +33,13 @@
 /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
 #define MSR_KVM_WALL_CLOCK_NEW  0x4b564d00
 #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
+#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
 
 #define KVM_MAX_MMU_OP_BATCH           32
 
+#define KVM_ASYNC_PF_ENABLED			(1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS		(1 << 1)
+
 /* Operations for KVM_HC_MMU_OP */
 #define KVM_MMU_OP_WRITE_PTE            1
 #define KVM_MMU_OP_FLUSH_TLB	        2
@@ -61,10 +66,20 @@
 	__u64 pt_phys;
 };
 
+#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
+#define KVM_PV_REASON_PAGE_READY 2
+
+struct kvm_vcpu_pv_apf_data {
+	__u32 reason;
+	__u8 pad[60];
+	__u32 enabled;
+};
+
 #ifdef __KERNEL__
 #include <asm/processor.h>
 
 extern void kvmclock_init(void);
+extern int kvm_register_clock(char *txt);
 
 
 /* This instruction is vmcall.  On non-VT architectures, it will generate a
@@ -160,8 +175,17 @@
 
 #ifdef CONFIG_KVM_GUEST
 void __init kvm_guest_init(void);
+void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wake(u32 token);
+u32 kvm_read_and_reset_pf_reason(void);
 #else
 #define kvm_guest_init() do { } while (0)
+#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wake(T) do {} while(0)
+static inline u32 kvm_read_and_reset_pf_reason(void)
+{
+	return 0;
+}
 #endif
 
 #endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f792060..72a8b52 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -7,9 +7,19 @@
 
 #include <asm/mc146818rtc.h>
 
+#define NMI_REASON_PORT		0x61
+
+#define NMI_REASON_SERR		0x80
+#define NMI_REASON_IOCHK	0x40
+#define NMI_REASON_MASK		(NMI_REASON_SERR | NMI_REASON_IOCHK)
+
+#define NMI_REASON_CLEAR_SERR	0x04
+#define NMI_REASON_CLEAR_IOCHK	0x08
+#define NMI_REASON_CLEAR_MASK	0x0f
+
 static inline unsigned char get_nmi_reason(void)
 {
-	return inb(0x61);
+	return inb(NMI_REASON_PORT);
 }
 
 static inline void reassert_nmi(void)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c4021b9..c76f5b9 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -23,6 +23,26 @@
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 #endif
 
+/*
+ * Define some priorities for the nmi notifier call chain.
+ *
+ * Create a local nmi bit that has a higher priority than
+ * external nmis, because the local ones are more frequent.
+ *
+ * Also setup some default high/normal/low settings for
+ * subsystems to registers with.  Using 4 bits to seperate
+ * the priorities.  This can go alot higher if needed be.
+ */
+
+#define NMI_LOCAL_SHIFT		16	/* randomly picked */
+#define NMI_LOCAL_BIT		(1ULL << NMI_LOCAL_SHIFT)
+#define NMI_HIGH_PRIOR		(1ULL << 8)
+#define NMI_NORMAL_PRIOR	(1ULL << 4)
+#define NMI_LOW_PRIOR		(1ULL << 0)
+#define NMI_LOCAL_HIGH_PRIOR	(NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
+#define NMI_LOCAL_NORMAL_PRIOR	(NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
+#define NMI_LOCAL_LOW_PRIOR	(NMI_LOCAL_BIT | NMI_LOW_PRIOR)
+
 void stop_nmi(void);
 void restart_nmi(void);
 
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 823e070..5ae8728 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -38,7 +38,7 @@
 extern void __cpuinit numa_remove_cpu(int cpu);
 
 #ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE	((u64)64 << 20)
+#define FAKE_NODE_MIN_SIZE	((u64)32 << 20)
 #define FAKE_NODE_MIN_HASH_MASK	(~(FAKE_NODE_MIN_SIZE - 1UL))
 #endif /* CONFIG_NUMA_EMU */
 #else
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 42a978c..f482010 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -107,10 +107,14 @@
 /* GPIO assignments */
 
 #define OLPC_GPIO_MIC_AC	1
-#define OLPC_GPIO_DCON_IRQ	geode_gpio(7)
+#define OLPC_GPIO_DCON_STAT0	5
+#define OLPC_GPIO_DCON_STAT1	6
+#define OLPC_GPIO_DCON_IRQ	7
 #define OLPC_GPIO_THRM_ALRM	geode_gpio(10)
-#define OLPC_GPIO_SMB_CLK	geode_gpio(14)
-#define OLPC_GPIO_SMB_DATA	geode_gpio(15)
+#define OLPC_GPIO_DCON_LOAD    11
+#define OLPC_GPIO_DCON_BLANK   12
+#define OLPC_GPIO_SMB_CLK      14
+#define OLPC_GPIO_SMB_DATA     15
 #define OLPC_GPIO_WORKAUX	geode_gpio(24)
 #define OLPC_GPIO_LID		geode_gpio(26)
 #define OLPC_GPIO_ECSCI		geode_gpio(27)
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h
index 2a84781..641988e 100644
--- a/arch/x86/include/asm/olpc_ofw.h
+++ b/arch/x86/include/asm/olpc_ofw.h
@@ -8,6 +8,8 @@
 
 #ifdef CONFIG_OLPC_OPENFIRMWARE
 
+extern bool olpc_ofw_is_installed(void);
+
 /* run an OFW command by calling into the firmware */
 #define olpc_ofw(name, args, res) \
 	__olpc_ofw((name), ARRAY_SIZE(args), args, ARRAY_SIZE(res), res)
@@ -26,10 +28,17 @@
 
 #else /* !CONFIG_OLPC_OPENFIRMWARE */
 
+static inline bool olpc_ofw_is_installed(void) { return false; }
 static inline void olpc_ofw_detect(void) { }
 static inline void setup_olpc_ofw_pgd(void) { }
 static inline bool olpc_ofw_present(void) { return false; }
 
 #endif /* !CONFIG_OLPC_OPENFIRMWARE */
 
+#ifdef CONFIG_OLPC_OPENFIRMWARE_DT
+extern void olpc_dt_build_devicetree(void);
+#else
+static inline void olpc_dt_build_devicetree(void) { }
+#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */
+
 #endif /* _ASM_X86_OLPC_OFW_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 7709c12..2071a8b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -435,6 +435,11 @@
 {
 	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
 }
+static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
+			      pmd_t *pmdp)
+{
+	PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
+}
 
 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
 				    pte_t *ptep)
@@ -442,6 +447,12 @@
 	PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
 }
 
+static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
+				    pmd_t *pmdp)
+{
+	PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
+}
+
 static inline pte_t __pte(pteval_t val)
 {
 	pteval_t ret;
@@ -543,6 +554,20 @@
 		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+			      pmd_t *pmdp, pmd_t pmd)
+{
+#if PAGETABLE_LEVELS >= 3
+	if (sizeof(pmdval_t) > sizeof(long))
+		/* 5 arg words */
+		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
+	else
+		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd);
+#endif
+}
+#endif
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
 	pmdval_t val = native_pmd_val(pmd);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index b82bac9..8288509 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -265,10 +265,16 @@
 	void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
 			   pte_t *ptep, pte_t pteval);
 	void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+	void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
+			   pmd_t *pmdp, pmd_t pmdval);
 	void (*pte_update)(struct mm_struct *mm, unsigned long addr,
 			   pte_t *ptep);
 	void (*pte_update_defer)(struct mm_struct *mm,
 				 unsigned long addr, pte_t *ptep);
+	void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
+			   pmd_t *pmdp);
+	void (*pmd_update_defer)(struct mm_struct *mm,
+				 unsigned long addr, pmd_t *pmdp);
 
 	pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
 					pte_t *ptep);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index f899e01..8ee4516 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -230,6 +230,125 @@
 })
 
 /*
+ * Add return operation
+ */
+#define percpu_add_return_op(var, val)					\
+({									\
+	typeof(var) paro_ret__ = val;					\
+	switch (sizeof(var)) {						\
+	case 1:								\
+		asm("xaddb %0, "__percpu_arg(1)				\
+			    : "+q" (paro_ret__), "+m" (var)		\
+			    : : "memory");				\
+		break;							\
+	case 2:								\
+		asm("xaddw %0, "__percpu_arg(1)				\
+			    : "+r" (paro_ret__), "+m" (var)		\
+			    : : "memory");				\
+		break;							\
+	case 4:								\
+		asm("xaddl %0, "__percpu_arg(1)				\
+			    : "+r" (paro_ret__), "+m" (var)		\
+			    : : "memory");				\
+		break;							\
+	case 8:								\
+		asm("xaddq %0, "__percpu_arg(1)				\
+			    : "+re" (paro_ret__), "+m" (var)		\
+			    : : "memory");				\
+		break;							\
+	default: __bad_percpu_size();					\
+	}								\
+	paro_ret__ += val;						\
+	paro_ret__;							\
+})
+
+/*
+ * xchg is implemented using cmpxchg without a lock prefix. xchg is
+ * expensive due to the implied lock prefix.  The processor cannot prefetch
+ * cachelines if xchg is used.
+ */
+#define percpu_xchg_op(var, nval)					\
+({									\
+	typeof(var) pxo_ret__;						\
+	typeof(var) pxo_new__ = (nval);					\
+	switch (sizeof(var)) {						\
+	case 1:								\
+		asm("\n1:mov "__percpu_arg(1)",%%al"			\
+		    "\n\tcmpxchgb %2, "__percpu_arg(1)			\
+		    "\n\tjnz 1b"					\
+			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "q" (pxo_new__)				\
+			    : "memory");				\
+		break;							\
+	case 2:								\
+		asm("\n1:mov "__percpu_arg(1)",%%ax"			\
+		    "\n\tcmpxchgw %2, "__percpu_arg(1)			\
+		    "\n\tjnz 1b"					\
+			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "r" (pxo_new__)				\
+			    : "memory");				\
+		break;							\
+	case 4:								\
+		asm("\n1:mov "__percpu_arg(1)",%%eax"			\
+		    "\n\tcmpxchgl %2, "__percpu_arg(1)			\
+		    "\n\tjnz 1b"					\
+			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "r" (pxo_new__)				\
+			    : "memory");				\
+		break;							\
+	case 8:								\
+		asm("\n1:mov "__percpu_arg(1)",%%rax"			\
+		    "\n\tcmpxchgq %2, "__percpu_arg(1)			\
+		    "\n\tjnz 1b"					\
+			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "r" (pxo_new__)				\
+			    : "memory");				\
+		break;							\
+	default: __bad_percpu_size();					\
+	}								\
+	pxo_ret__;							\
+})
+
+/*
+ * cmpxchg has no such implied lock semantics as a result it is much
+ * more efficient for cpu local operations.
+ */
+#define percpu_cmpxchg_op(var, oval, nval)				\
+({									\
+	typeof(var) pco_ret__;						\
+	typeof(var) pco_old__ = (oval);					\
+	typeof(var) pco_new__ = (nval);					\
+	switch (sizeof(var)) {						\
+	case 1:								\
+		asm("cmpxchgb %2, "__percpu_arg(1)			\
+			    : "=a" (pco_ret__), "+m" (var)		\
+			    : "q" (pco_new__), "0" (pco_old__)		\
+			    : "memory");				\
+		break;							\
+	case 2:								\
+		asm("cmpxchgw %2, "__percpu_arg(1)			\
+			    : "=a" (pco_ret__), "+m" (var)		\
+			    : "r" (pco_new__), "0" (pco_old__)		\
+			    : "memory");				\
+		break;							\
+	case 4:								\
+		asm("cmpxchgl %2, "__percpu_arg(1)			\
+			    : "=a" (pco_ret__), "+m" (var)		\
+			    : "r" (pco_new__), "0" (pco_old__)		\
+			    : "memory");				\
+		break;							\
+	case 8:								\
+		asm("cmpxchgq %2, "__percpu_arg(1)			\
+			    : "=a" (pco_ret__), "+m" (var)		\
+			    : "r" (pco_new__), "0" (pco_old__)		\
+			    : "memory");				\
+		break;							\
+	default: __bad_percpu_size();					\
+	}								\
+	pco_ret__;							\
+})
+
+/*
  * percpu_read() makes gcc load the percpu variable every time it is
  * accessed while percpu_read_stable() allows the value to be cached.
  * percpu_read_stable() is more efficient and can be used if its value
@@ -267,6 +386,12 @@
 #define __this_cpu_xor_1(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define __this_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define __this_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val)
+/*
+ * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
+ * faster than an xchg with forced lock semantics.
+ */
+#define __this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define __this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #define this_cpu_read_1(pcp)		percpu_from_op("mov", (pcp), "m"(pcp))
 #define this_cpu_read_2(pcp)		percpu_from_op("mov", (pcp), "m"(pcp))
@@ -286,6 +411,11 @@
 #define this_cpu_xor_1(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define this_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define this_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val)
+#define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #define irqsafe_cpu_add_1(pcp, val)	percpu_add_op((pcp), val)
 #define irqsafe_cpu_add_2(pcp, val)	percpu_add_op((pcp), val)
@@ -299,6 +429,31 @@
 #define irqsafe_cpu_xor_1(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define irqsafe_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define irqsafe_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+
+#ifndef CONFIG_M386
+#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+#define __this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+#define __this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+
+#define this_cpu_add_return_1(pcp, val)	percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_2(pcp, val)	percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_4(pcp, val)	percpu_add_return_op(pcp, val)
+#define this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+#define this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+#define this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+
+#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
+#endif /* !CONFIG_M386 */
 
 /*
  * Per cpu atomic 64 bit operations are only available under 64 bit.
@@ -311,6 +466,7 @@
 #define __this_cpu_and_8(pcp, val)	percpu_to_op("and", (pcp), val)
 #define __this_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)
 #define __this_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
+#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
 
 #define this_cpu_read_8(pcp)		percpu_from_op("mov", (pcp), "m"(pcp))
 #define this_cpu_write_8(pcp, val)	percpu_to_op("mov", (pcp), val)
@@ -318,12 +474,12 @@
 #define this_cpu_and_8(pcp, val)	percpu_to_op("and", (pcp), val)
 #define this_cpu_or_8(pcp, val)		percpu_to_op("or", (pcp), val)
 #define this_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
+#define this_cpu_add_return_8(pcp, val)	percpu_add_return_op(pcp, val)
 
 #define irqsafe_cpu_add_8(pcp, val)	percpu_add_op((pcp), val)
 #define irqsafe_cpu_and_8(pcp, val)	percpu_to_op("and", (pcp), val)
 #define irqsafe_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)
 #define irqsafe_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
-
 #endif
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 295e2ff..e2f6a99 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -20,6 +20,9 @@
 #define ARCH_P4_MAX_ESCR	(ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
 #define ARCH_P4_MAX_CCCR	(18)
 
+#define ARCH_P4_CNTRVAL_BITS	(40)
+#define ARCH_P4_CNTRVAL_MASK	((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+
 #define P4_ESCR_EVENT_MASK	0x7e000000U
 #define P4_ESCR_EVENT_SHIFT	25
 #define P4_ESCR_EVENTMASK_MASK	0x01fffe00U
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 271de94..b4389a4 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -92,7 +92,7 @@
 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
 
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
-				  unsigned long adddress)
+				  unsigned long address)
 {
 	___pmd_free_tlb(tlb, pmd);
 }
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 2334982..98391db 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -46,6 +46,15 @@
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+{
+	return __pmd(xchg((pmdval_t *)xp, 0));
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
 /*
  * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
  * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 177b016..94b979d 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -104,6 +104,29 @@
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+union split_pmd {
+	struct {
+		u32 pmd_low;
+		u32 pmd_high;
+	};
+	pmd_t pmd;
+};
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
+{
+	union split_pmd res, *orig = (union split_pmd *)pmdp;
+
+	/* xchg acts as a barrier before setting of the high bits */
+	res.pmd_low = xchg(&orig->pmd_low, 0);
+	res.pmd_high = orig->pmd_high;
+	orig->pmd_high = 0;
+
+	return res.pmd;
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index ada823a..18601c8 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -35,6 +35,7 @@
 #else  /* !CONFIG_PARAVIRT */
 #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
+#define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
 
 #define set_pte_atomic(ptep, pte)					\
 	native_set_pte_atomic(ptep, pte)
@@ -59,6 +60,8 @@
 
 #define pte_update(mm, addr, ptep)              do { } while (0)
 #define pte_update_defer(mm, addr, ptep)        do { } while (0)
+#define pmd_update(mm, addr, ptep)              do { } while (0)
+#define pmd_update_defer(mm, addr, ptep)        do { } while (0)
 
 #define pgd_val(x)	native_pgd_val(x)
 #define __pgd(x)	native_make_pgd(x)
@@ -94,6 +97,11 @@
 	return pte_flags(pte) & _PAGE_ACCESSED;
 }
 
+static inline int pmd_young(pmd_t pmd)
+{
+	return pmd_flags(pmd) & _PAGE_ACCESSED;
+}
+
 static inline int pte_write(pte_t pte)
 {
 	return pte_flags(pte) & _PAGE_RW;
@@ -142,6 +150,23 @@
 		(_PAGE_PSE | _PAGE_PRESENT);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+	return pmd_val(pmd) & _PAGE_SPLITTING;
+}
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+	return pmd_val(pmd) & _PAGE_PSE;
+}
+
+static inline int has_transparent_hugepage(void)
+{
+	return cpu_has_pse;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 {
 	pteval_t v = native_pte_val(pte);
@@ -216,6 +241,55 @@
 	return pte_set_flags(pte, _PAGE_SPECIAL);
 }
 
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+	pmdval_t v = native_pmd_val(pmd);
+
+	return __pmd(v | set);
+}
+
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+	pmdval_t v = native_pmd_val(pmd);
+
+	return __pmd(v & ~clear);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_DIRTY);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_PSE);
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+	return pmd_set_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+	return pmd_clear_flags(pmd, _PAGE_PRESENT);
+}
+
 /*
  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  * can use those bits for other purposes, so leave them be.
@@ -256,6 +330,16 @@
 	return __pte(val);
 }
 
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+	pmdval_t val = pmd_val(pmd);
+
+	val &= _HPAGE_CHG_MASK;
+	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+
+	return __pmd(val);
+}
+
 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
 #define pgprot_modify pgprot_modify
 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
@@ -350,7 +434,7 @@
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pmd_page(pmd)	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd)	pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
 
 /*
  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -524,12 +608,26 @@
 	return res;
 }
 
+static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
+{
+	pmd_t res = *pmdp;
+
+	native_pmd_clear(pmdp);
+	return res;
+}
+
 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 				     pte_t *ptep , pte_t pte)
 {
 	native_set_pte(ptep, pte);
 }
 
+static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
+				     pmd_t *pmdp , pmd_t pmd)
+{
+	native_set_pmd(pmdp, pmd);
+}
+
 #ifndef CONFIG_PARAVIRT
 /*
  * Rules for using pte_update - it must be called after any PTE update which
@@ -607,6 +705,49 @@
 
 #define flush_tlb_fix_spurious_fault(vma, address)
 
+#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
+
+#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pmd_t *pmdp,
+				 pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+				     unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+				  unsigned long address, pmd_t *pmdp);
+
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+				 unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+	return pmd_flags(pmd) & _PAGE_RW;
+}
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
+				       pmd_t *pmdp)
+{
+	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
+	pmd_update(mm, addr, pmdp);
+	return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long addr, pmd_t *pmdp)
+{
+	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
+	pmd_update(mm, addr, pmdp);
+}
+
 /*
  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  *
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index f86da20..975f709 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -59,6 +59,16 @@
 	native_set_pte(ptep, pte);
 }
 
+static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+	*pmdp = pmd;
+}
+
+static inline void native_pmd_clear(pmd_t *pmd)
+{
+	native_set_pmd(pmd, native_make_pmd(0));
+}
+
 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 {
 #ifdef CONFIG_SMP
@@ -72,14 +82,17 @@
 #endif
 }
 
-static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 {
-	*pmdp = pmd;
-}
-
-static inline void native_pmd_clear(pmd_t *pmd)
-{
-	native_set_pmd(pmd, native_make_pmd(0));
+#ifdef CONFIG_SMP
+	return native_make_pmd(xchg(&xp->pmd, 0));
+#else
+	/* native_local_pmdp_get_and_clear,
+	   but duplicated because of cyclic dependency */
+	pmd_t ret = *xp;
+	native_pmd_clear(xp);
+	return ret;
+#endif
 }
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
@@ -168,6 +181,7 @@
 #define	kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
 
 #define __HAVE_ARCH_PTE_SAME
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index d1f4a76..7db7723 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -22,6 +22,7 @@
 #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
 #define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
 #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
+#define _PAGE_BIT_SPLITTING	_PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
 #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
 
 /* If _PAGE_BIT_PRESENT is clear, we use these: */
@@ -45,6 +46,7 @@
 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
 #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
 #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
+#define _PAGE_SPLITTING	(_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
 #define __HAVE_ARCH_PTE_SPECIAL
 
 #ifdef CONFIG_KMEMCHECK
@@ -70,6 +72,7 @@
 /* Set of bits not changed in pte_modify */
 #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
 			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
 
 #define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
 #define _PAGE_CACHE_WB		(0)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cae9c3c..45636ce 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -141,10 +141,9 @@
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 #define cpu_data(cpu)		per_cpu(cpu_info, cpu)
-#define current_cpu_data	__get_cpu_var(cpu_info)
 #else
+#define cpu_info		boot_cpu_data
 #define cpu_data(cpu)		boot_cpu_data
-#define current_cpu_data	boot_cpu_data
 #endif
 
 extern const struct seq_operations cpuinfo_op;
@@ -762,10 +761,11 @@
 extern void init_c1e_mask(void);
 
 extern unsigned long		boot_option_idle_override;
-extern unsigned long		idle_halt;
-extern unsigned long		idle_nomwait;
 extern bool			c1e_detected;
 
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
+			 IDLE_POLL, IDLE_FORCE_MWAIT};
+
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
@@ -902,7 +902,7 @@
 /*
  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
  * This is necessary to guarantee that the entire "struct pt_regs"
- * is accessable even if the CPU haven't stored the SS/ESP registers
+ * is accessible even if the CPU haven't stored the SS/ESP registers
  * on the stack (interrupt gate does not save these registers
  * when switching to the same priv ring).
  * Therefore beware: accessing the ss/esp fields of the
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
new file mode 100644
index 0000000..b4ec95f
--- /dev/null
+++ b/arch/x86/include/asm/prom.h
@@ -0,0 +1 @@
+/* dummy prom.h; here to make linux/of.h's #includes happy */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0e83105..f2b83bc 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -47,14 +47,13 @@
 	INTERCEPT_MONITOR,
 	INTERCEPT_MWAIT,
 	INTERCEPT_MWAIT_COND,
+	INTERCEPT_XSETBV,
 };
 
 
 struct __attribute__ ((__packed__)) vmcb_control_area {
-	u16 intercept_cr_read;
-	u16 intercept_cr_write;
-	u16 intercept_dr_read;
-	u16 intercept_dr_write;
+	u32 intercept_cr;
+	u32 intercept_dr;
 	u32 intercept_exceptions;
 	u64 intercept;
 	u8 reserved_1[42];
@@ -81,14 +80,19 @@
 	u32 event_inj_err;
 	u64 nested_cr3;
 	u64 lbr_ctl;
-	u64 reserved_5;
+	u32 clean;
+	u32 reserved_5;
 	u64 next_rip;
-	u8 reserved_6[816];
+	u8 insn_len;
+	u8 insn_bytes[15];
+	u8 reserved_6[800];
 };
 
 
 #define TLB_CONTROL_DO_NOTHING 0
 #define TLB_CONTROL_FLUSH_ALL_ASID 1
+#define TLB_CONTROL_FLUSH_ASID 3
+#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
 
 #define V_TPR_MASK 0x0f
 
@@ -204,19 +208,31 @@
 #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
 #define SVM_SELECTOR_CODE_MASK (1 << 3)
 
-#define INTERCEPT_CR0_MASK 1
-#define INTERCEPT_CR3_MASK (1 << 3)
-#define INTERCEPT_CR4_MASK (1 << 4)
-#define INTERCEPT_CR8_MASK (1 << 8)
+#define INTERCEPT_CR0_READ	0
+#define INTERCEPT_CR3_READ	3
+#define INTERCEPT_CR4_READ	4
+#define INTERCEPT_CR8_READ	8
+#define INTERCEPT_CR0_WRITE	(16 + 0)
+#define INTERCEPT_CR3_WRITE	(16 + 3)
+#define INTERCEPT_CR4_WRITE	(16 + 4)
+#define INTERCEPT_CR8_WRITE	(16 + 8)
 
-#define INTERCEPT_DR0_MASK 1
-#define INTERCEPT_DR1_MASK (1 << 1)
-#define INTERCEPT_DR2_MASK (1 << 2)
-#define INTERCEPT_DR3_MASK (1 << 3)
-#define INTERCEPT_DR4_MASK (1 << 4)
-#define INTERCEPT_DR5_MASK (1 << 5)
-#define INTERCEPT_DR6_MASK (1 << 6)
-#define INTERCEPT_DR7_MASK (1 << 7)
+#define INTERCEPT_DR0_READ	0
+#define INTERCEPT_DR1_READ	1
+#define INTERCEPT_DR2_READ	2
+#define INTERCEPT_DR3_READ	3
+#define INTERCEPT_DR4_READ	4
+#define INTERCEPT_DR5_READ	5
+#define INTERCEPT_DR6_READ	6
+#define INTERCEPT_DR7_READ	7
+#define INTERCEPT_DR0_WRITE	(16 + 0)
+#define INTERCEPT_DR1_WRITE	(16 + 1)
+#define INTERCEPT_DR2_WRITE	(16 + 2)
+#define INTERCEPT_DR3_WRITE	(16 + 3)
+#define INTERCEPT_DR4_WRITE	(16 + 4)
+#define INTERCEPT_DR5_WRITE	(16 + 5)
+#define INTERCEPT_DR6_WRITE	(16 + 6)
+#define INTERCEPT_DR7_WRITE	(16 + 7)
 
 #define SVM_EVTINJ_VEC_MASK 0xff
 
@@ -246,6 +262,8 @@
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
+#define SVM_EXITINFO_REG_MASK 0x0F
+
 #define	SVM_EXIT_READ_CR0 	0x000
 #define	SVM_EXIT_READ_CR3 	0x003
 #define	SVM_EXIT_READ_CR4 	0x004
@@ -316,6 +334,7 @@
 #define SVM_EXIT_MONITOR	0x08a
 #define SVM_EXIT_MWAIT		0x08b
 #define SVM_EXIT_MWAIT_COND	0x08c
+#define SVM_EXIT_XSETBV		0x08d
 #define SVM_EXIT_NPF  		0x400
 
 #define SVM_EXIT_ERR		-1
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index f66cda5..0310da6 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -30,6 +30,7 @@
 asmlinkage void stack_segment(void);
 asmlinkage void general_protection(void);
 asmlinkage void page_fault(void);
+asmlinkage void async_page_fault(void);
 asmlinkage void spurious_interrupt_bug(void);
 asmlinkage void coprocessor_error(void);
 asmlinkage void alignment_check(void);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9f0cbd9..84471b8 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -66,15 +66,23 @@
 #define PIN_BASED_NMI_EXITING                   0x00000008
 #define PIN_BASED_VIRTUAL_NMIS                  0x00000020
 
+#define VM_EXIT_SAVE_DEBUG_CONTROLS             0x00000002
 #define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
+#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL      0x00001000
 #define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
 #define VM_EXIT_SAVE_IA32_PAT			0x00040000
 #define VM_EXIT_LOAD_IA32_PAT			0x00080000
+#define VM_EXIT_SAVE_IA32_EFER                  0x00100000
+#define VM_EXIT_LOAD_IA32_EFER                  0x00200000
+#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER       0x00400000
 
+#define VM_ENTRY_LOAD_DEBUG_CONTROLS            0x00000002
 #define VM_ENTRY_IA32E_MODE                     0x00000200
 #define VM_ENTRY_SMM                            0x00000400
 #define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
+#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL     0x00002000
 #define VM_ENTRY_LOAD_IA32_PAT			0x00004000
+#define VM_ENTRY_LOAD_IA32_EFER                 0x00008000
 
 /* VMCS Encodings */
 enum vmcs_field {
@@ -239,6 +247,7 @@
 #define EXIT_REASON_TASK_SWITCH         9
 #define EXIT_REASON_CPUID               10
 #define EXIT_REASON_HLT                 12
+#define EXIT_REASON_INVD                13
 #define EXIT_REASON_INVLPG              14
 #define EXIT_REASON_RDPMC               15
 #define EXIT_REASON_RDTSC               16
@@ -296,6 +305,12 @@
 #define GUEST_INTR_STATE_SMI		0x00000004
 #define GUEST_INTR_STATE_NMI		0x00000008
 
+/* GUEST_ACTIVITY_STATE flags */
+#define GUEST_ACTIVITY_ACTIVE		0
+#define GUEST_ACTIVITY_HLT		1
+#define GUEST_ACTIVITY_SHUTDOWN		2
+#define GUEST_ACTIVITY_WAIT_SIPI	3
+
 /*
  * Exit Qualifications for MOV for Control Register Access
  */
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 396ff4c..66d0fff 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -37,4 +37,39 @@
 extern struct shared_info *HYPERVISOR_shared_info;
 extern struct start_info *xen_start_info;
 
+#include <asm/processor.h>
+
+static inline uint32_t xen_cpuid_base(void)
+{
+	uint32_t base, eax, ebx, ecx, edx;
+	char signature[13];
+
+	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
+		cpuid(base, &eax, &ebx, &ecx, &edx);
+		*(uint32_t *)(signature + 0) = ebx;
+		*(uint32_t *)(signature + 4) = ecx;
+		*(uint32_t *)(signature + 8) = edx;
+		signature[12] = 0;
+
+		if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2))
+			return base;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_XEN
+extern bool xen_hvm_need_lapic(void);
+
+static inline bool xen_x2apic_para_available(void)
+{
+	return xen_hvm_need_lapic();
+}
+#else
+static inline bool xen_x2apic_para_available(void)
+{
+	return (xen_cpuid_base() != 0);
+}
+#endif
+
 #endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 8760cc6..f25bdf2 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -42,6 +42,11 @@
 extern unsigned long get_phys_to_machine(unsigned long pfn);
 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 
+extern int m2p_add_override(unsigned long mfn, struct page *page);
+extern int m2p_remove_override(struct page *page);
+extern struct page *m2p_find_override(unsigned long mfn);
+extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
+
 static inline unsigned long pfn_to_mfn(unsigned long pfn)
 {
 	unsigned long mfn;
@@ -72,9 +77,6 @@
 	if (xen_feature(XENFEAT_auto_translated_physmap))
 		return mfn;
 
-	if (unlikely((mfn >> machine_to_phys_order) != 0))
-		return ~0;
-
 	pfn = 0;
 	/*
 	 * The array access can fail (e.g., device space beyond end of RAM).
@@ -83,6 +85,14 @@
 	 */
 	__get_user(pfn, &machine_to_phys_mapping[mfn]);
 
+	/*
+	 * If this appears to be a foreign mfn (because the pfn
+	 * doesn't map back to the mfn), then check the local override
+	 * table to see if there's a better pfn to use.
+	 */
+	if (get_phys_to_machine(pfn) != mfn)
+		pfn = m2p_find_override_pfn(mfn, pfn);
+
 	return pfn;
 }
 
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 17c8090..b3a7113 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -509,6 +509,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 
 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
 {
@@ -852,18 +853,6 @@
  * returns 0 on success, < 0 on error
  */
 
-static void __init acpi_register_lapic_address(unsigned long address)
-{
-	mp_lapic_addr = address;
-
-	set_fixmap_nocache(FIX_APIC_BASE, address);
-	if (boot_cpu_physical_apicid == -1U) {
-		boot_cpu_physical_apicid  = read_apic_id();
-		apic_version[boot_cpu_physical_apicid] =
-			 GET_APIC_VERSION(apic_read(APIC_LVR));
-	}
-}
-
 static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
 {
 	int count;
@@ -885,7 +874,7 @@
 		return count;
 	}
 
-	acpi_register_lapic_address(acpi_lapic_addr);
+	register_lapic_address(acpi_lapic_addr);
 
 	return count;
 }
@@ -912,7 +901,7 @@
 		return count;
 	}
 
-	acpi_register_lapic_address(acpi_lapic_addr);
+	register_lapic_address(acpi_lapic_addr);
 
 	count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
 				      acpi_parse_sapic, MAX_LOCAL_APIC);
@@ -954,32 +943,6 @@
 extern int es7000_plat;
 #endif
 
-static void assign_to_mp_irq(struct mpc_intsrc *m,
-				    struct mpc_intsrc *mp_irq)
-{
-	memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
-}
-
-static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
-				struct mpc_intsrc *m)
-{
-	return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
-}
-
-static void save_mp_irq(struct mpc_intsrc *m)
-{
-	int i;
-
-	for (i = 0; i < mp_irq_entries; i++) {
-		if (!mp_irq_cmp(&mp_irqs[i], m))
-			return;
-	}
-
-	assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
-	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-		panic("Max # of irq sources exceeded!!\n");
-}
-
 void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
 {
 	int ioapic;
@@ -1010,7 +973,7 @@
 	mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
 	mp_irq.dstirq = pin;	/* INTIN# */
 
-	save_mp_irq(&mp_irq);
+	mp_save_irq(&mp_irq);
 
 	isa_irq_to_gsi[bus_irq] = gsi;
 }
@@ -1085,7 +1048,7 @@
 		mp_irq.srcbusirq = i; /* Identity mapped */
 		mp_irq.dstirq = pin;
 
-		save_mp_irq(&mp_irq);
+		mp_save_irq(&mp_irq);
 	}
 }
 
@@ -1122,7 +1085,7 @@
 	mp_irq.dstapic = mp_ioapics[ioapic].apicid;
 	mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
 
-	save_mp_irq(&mp_irq);
+	mp_save_irq(&mp_irq);
 #endif
 	return 0;
 }
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index d2fdb08..57ca777 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1086,7 +1086,7 @@
 
 	dma_dom->aperture_size += APERTURE_RANGE_SIZE;
 
-	/* Intialize the exclusion range if necessary */
+	/* Initialize the exclusion range if necessary */
 	for_each_iommu(iommu) {
 		if (iommu->exclusion_start &&
 		    iommu->exclusion_start >= dma_dom->aperture[index]->offset
@@ -1353,7 +1353,7 @@
 
 /*
  * Allocates a new protection domain usable for the dma_ops functions.
- * It also intializes the page table and the address allocator data
+ * It also initializes the page table and the address allocator data
  * structures required for the dma_ops interface
  */
 static struct dma_ops_domain *dma_ops_domain_alloc(void)
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index affacb5..0a99f71 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,13 @@
 };
 EXPORT_SYMBOL(amd_nb_misc_ids);
 
+const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
+	{ 0x00, 0x18, 0x20 },
+	{ 0xff, 0x00, 0x20 },
+	{ 0xfe, 0x00, 0x20 },
+	{ }
+};
+
 struct amd_northbridge_info amd_northbridges;
 EXPORT_SYMBOL(amd_northbridges);
 
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index dcd7c83..5955a78 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -39,18 +39,6 @@
 
 int fix_aperture __initdata = 1;
 
-struct bus_dev_range {
-	int bus;
-	int dev_base;
-	int dev_limit;
-};
-
-static struct bus_dev_range bus_dev_ranges[] __initdata = {
-	{ 0x00, 0x18, 0x20},
-	{ 0xff, 0x00, 0x20},
-	{ 0xfe, 0x00, 0x20}
-};
-
 static struct resource gart_resource = {
 	.name	= "GART",
 	.flags	= IORESOURCE_MEM,
@@ -294,13 +282,13 @@
 	search_agp_bridge(&agp_aper_order, &valid_agp);
 
 	fix = 0;
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -349,13 +337,13 @@
 		return;
 
 	/* disable them all at first */
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -390,14 +378,14 @@
 
 	fix = 0;
 	node = 0;
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 		u32 ctl;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -505,7 +493,7 @@
 	}
 
 	/* Fix up the north bridges */
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus, dev_base, dev_limit;
 
 		/*
@@ -514,9 +502,9 @@
 		 */
 		u32 ctl = DISTLBWALKPRB | aper_order << 1;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
 				continue;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 879999a..06c196d 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -49,8 +49,8 @@
 #include <asm/mtrr.h>
 #include <asm/smp.h>
 #include <asm/mce.h>
-#include <asm/kvm_para.h>
 #include <asm/tsc.h>
+#include <asm/hypervisor.h>
 
 unsigned int num_processors;
 
@@ -516,7 +516,7 @@
 {
 	struct clock_event_device *levt = &__get_cpu_var(lapic_events);
 
-	if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) {
+	if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) {
 		lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
 		/* Make LAPIC timer preferrable over percpu HPET */
 		lapic_clockevent.rating = 150;
@@ -684,7 +684,7 @@
 	lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
 				       lapic_clockevent.shift);
 	lapic_clockevent.max_delta_ns =
-		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+		clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
 	lapic_clockevent.min_delta_ns =
 		clockevent_delta2ns(0xF, &lapic_clockevent);
 
@@ -1191,12 +1191,15 @@
 			oldvalue, value);
 }
 
-
 /**
  * setup_local_APIC - setup the local APIC
+ *
+ * Used to setup local APIC while initializing BSP or bringin up APs.
+ * Always called with preemption disabled.
  */
 void __cpuinit setup_local_APIC(void)
 {
+	int cpu = smp_processor_id();
 	unsigned int value, queued;
 	int i, j, acked = 0;
 	unsigned long long tsc = 0, ntsc;
@@ -1221,8 +1224,6 @@
 #endif
 	perf_events_lapic_init();
 
-	preempt_disable();
-
 	/*
 	 * Double-check whether this APIC is really registered.
 	 * This is meaningless in clustered apic mode, so we skip it.
@@ -1338,21 +1339,19 @@
 	 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
 	 */
 	value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
-	if (!smp_processor_id() && (pic_mode || !value)) {
+	if (!cpu && (pic_mode || !value)) {
 		value = APIC_DM_EXTINT;
-		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
-				smp_processor_id());
+		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
 	} else {
 		value = APIC_DM_EXTINT | APIC_LVT_MASKED;
-		apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
-				smp_processor_id());
+		apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
 	}
 	apic_write(APIC_LVT0, value);
 
 	/*
 	 * only the BP should see the LINT1 NMI signal, obviously.
 	 */
-	if (!smp_processor_id())
+	if (!cpu)
 		value = APIC_DM_NMI;
 	else
 		value = APIC_DM_NMI | APIC_LVT_MASKED;
@@ -1360,11 +1359,9 @@
 		value |= APIC_LVT_LEVEL_TRIGGER;
 	apic_write(APIC_LVT1, value);
 
-	preempt_enable();
-
 #ifdef CONFIG_X86_MCE_INTEL
 	/* Recheck CMCI information after local APIC is up on CPU #0 */
-	if (smp_processor_id() == 0)
+	if (!cpu)
 		cmci_recheck();
 #endif
 }
@@ -1479,7 +1476,8 @@
 		/* IR is required if there is APIC ID > 255 even when running
 		 * under KVM
 		 */
-		if (max_physical_apicid > 255 || !kvm_para_available())
+		if (max_physical_apicid > 255 ||
+		    !hypervisor_x2apic_available())
 			goto nox2apic;
 		/*
 		 * without IR all CPUs can be addressed by IOAPIC/MSI
@@ -1633,28 +1631,6 @@
 }
 #endif
 
-#ifdef CONFIG_X86_64
-void __init early_init_lapic_mapping(void)
-{
-	/*
-	 * If no local APIC can be found then go out
-	 * : it means there is no mpatable and MADT
-	 */
-	if (!smp_found_config)
-		return;
-
-	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-	apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
-		    APIC_BASE, mp_lapic_addr);
-
-	/*
-	 * Fetch the APIC ID of the BSP in case we have a
-	 * default configuration (or the MP table is broken).
-	 */
-	boot_cpu_physical_apicid = read_apic_id();
-}
-#endif
-
 /**
  * init_apic_mappings - initialize APIC mappings
  */
@@ -1680,10 +1656,7 @@
 		 * acpi_register_lapic_address()
 		 */
 		if (!acpi_lapic && !smp_found_config)
-			set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
-
-		apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
-					APIC_BASE, apic_phys);
+			register_lapic_address(apic_phys);
 	}
 
 	/*
@@ -1705,6 +1678,22 @@
 	}
 }
 
+void __init register_lapic_address(unsigned long address)
+{
+	mp_lapic_addr = address;
+
+	if (!x2apic_mode) {
+		set_fixmap_nocache(FIX_APIC_BASE, address);
+		apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+			    APIC_BASE, mp_lapic_addr);
+	}
+	if (boot_cpu_physical_apicid == -1U) {
+		boot_cpu_physical_apicid  = read_apic_id();
+		apic_version[boot_cpu_physical_apicid] =
+			 GET_APIC_VERSION(apic_read(APIC_LVR));
+	}
+}
+
 /*
  * This initializes the IO-APIC and APIC hardware if this is
  * a UP kernel.
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 72ec29e..79fd43c 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -68,7 +68,6 @@
 
 	switch (cmd) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		break;
 
 	default:
@@ -96,7 +95,7 @@
 static __read_mostly struct notifier_block backtrace_notifier = {
 	.notifier_call          = arch_trigger_all_cpu_backtrace_handler,
 	.next                   = NULL,
-	.priority               = 1
+	.priority               = NMI_LOCAL_LOW_PRIOR,
 };
 
 static int __init register_trigger_all_cpu_backtrace(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index f6cd5b4..697dc34 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -125,6 +125,26 @@
 }
 early_param("noapic", parse_noapic);
 
+/* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
+void mp_save_irq(struct mpc_intsrc *m)
+{
+	int i;
+
+	apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
+		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
+		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
+		m->srcbusirq, m->dstapic, m->dstirq);
+
+	for (i = 0; i < mp_irq_entries; i++) {
+		if (!memcmp(&mp_irqs[i], m, sizeof(*m)))
+			return;
+	}
+
+	memcpy(&mp_irqs[mp_irq_entries], m, sizeof(*m));
+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
+		panic("Max # of irq sources exceeded!!\n");
+}
+
 struct irq_pin_list {
 	int apic, pin;
 	struct irq_pin_list *next;
@@ -135,6 +155,7 @@
 	return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
 }
 
+
 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
 #ifdef CONFIG_SPARSE_IRQ
 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
@@ -2006,9 +2027,12 @@
 						= mp_ioapics[apic_id].apicid;
 
 		/*
-		 * Read the right value from the MPC table and
-		 * write it into the ID register.
+		 * Update the ID register according to the right value
+		 * from the MPC table if they are different.
 		 */
+		if (mp_ioapics[apic_id].apicid == reg_00.bits.ID)
+			continue;
+
 		apic_printk(APIC_VERBOSE, KERN_INFO
 			"...changing IO-APIC physical APIC ID to %d ...",
 			mp_ioapics[apic_id].apicid);
@@ -2305,7 +2329,7 @@
 		unsigned int irr;
 		struct irq_desc *desc;
 		struct irq_cfg *cfg;
-		irq = __get_cpu_var(vector_irq)[vector];
+		irq = __this_cpu_read(vector_irq[vector]);
 
 		if (irq == -1)
 			continue;
@@ -2339,7 +2363,7 @@
 			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
 			goto unlock;
 		}
-		__get_cpu_var(vector_irq)[vector] = -1;
+		__this_cpu_write(vector_irq[vector], -1);
 unlock:
 		raw_spin_unlock(&desc->lock);
 	}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 2a3f2a7..bd16b58 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -120,8 +120,8 @@
 		else if (!strcmp(oem_table_id, "UVX"))
 			uv_system_type = UV_X2APIC;
 		else if (!strcmp(oem_table_id, "UVH")) {
-			__get_cpu_var(x2apic_extra_bits) =
-				pnodeid << uvh_apicid.s.pnode_shift;
+			__this_cpu_write(x2apic_extra_bits,
+				pnodeid << uvh_apicid.s.pnode_shift);
 			uv_system_type = UV_NON_UNIQUE_APIC;
 			uv_set_apicid_hibit();
 			return 1;
@@ -286,7 +286,7 @@
 	unsigned int id;
 
 	WARN_ON(preemptible() && num_online_cpus() > 1);
-	id = x | __get_cpu_var(x2apic_extra_bits);
+	id = x | __this_cpu_read(x2apic_extra_bits);
 
 	return id;
 }
@@ -378,7 +378,7 @@
 
 static __cpuinit void set_x2apic_extra_bits(int pnode)
 {
-	__get_cpu_var(x2apic_extra_bits) = (pnode << 6);
+	__this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
 }
 
 /*
@@ -641,7 +641,7 @@
  */
 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
 {
-	if (reason != DIE_NMI_IPI)
+	if (reason != DIE_NMIUNKNOWN)
 		return NOTIFY_OK;
 
 	if (in_crash_kexec)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 9e093f8..7c7bedb 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -668,7 +668,7 @@
 
 bool cpu_has_amd_erratum(const int *erratum)
 {
-	struct cpuinfo_x86 *cpu = &current_cpu_data;
+	struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
 	int osvw_id = *erratum++;
 	u32 range;
 	u32 ms;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 491977b..35c7e65 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -521,7 +521,7 @@
 
 	*rc = -ENODEV;
 
-	if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
+	if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
 		return;
 
 	eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
@@ -1377,7 +1377,7 @@
 static void query_values_on_cpu(void *_err)
 {
 	int *err = _err;
-	struct powernow_k8_data *data = __get_cpu_var(powernow_data);
+	struct powernow_k8_data *data = __this_cpu_read(powernow_data);
 
 	*err = query_current_values_with_pending_wait(data);
 }
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 9ecf81f..7283e98 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -265,7 +265,7 @@
 		line_size = l2.line_size;
 		lines_per_tag = l2.lines_per_tag;
 		/* cpu_data has errata corrections for K7 applied */
-		size_in_kb = current_cpu_data.x86_cache_size;
+		size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
 		break;
 	case 3:
 		if (!l3.val)
@@ -287,7 +287,7 @@
 	eax->split.type = types[leaf];
 	eax->split.level = levels[leaf];
 	eax->split.num_threads_sharing = 0;
-	eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
+	eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
 
 
 	if (assoc == 0xffff)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index e7dbde7..a779719 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -25,6 +25,7 @@
 #include <linux/gfp.h>
 #include <asm/mce.h>
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 /* Update fake mce registers on current CPU. */
 static void inject_mce(struct mce *m)
@@ -83,7 +84,7 @@
 	struct die_args *args = (struct die_args *)data;
 	int cpu = smp_processor_id();
 	struct mce *m = &__get_cpu_var(injectm);
-	if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
+	if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
 		return NOTIFY_DONE;
 	cpumask_clear_cpu(cpu, mce_inject_cpumask);
 	if (m->inject_flags & MCJ_EXCEPTION)
@@ -95,7 +96,7 @@
 
 static struct notifier_block mce_raise_nb = {
 	.notifier_call = mce_raise_notify,
-	.priority = 1000,
+	.priority = NMI_LOCAL_NORMAL_PRIOR,
 };
 
 /* Inject mce on current CPU */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7a35b72..d916183 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -326,7 +326,7 @@
 
 static int msr_to_offset(u32 msr)
 {
-	unsigned bank = __get_cpu_var(injectm.bank);
+	unsigned bank = __this_cpu_read(injectm.bank);
 
 	if (msr == rip_msr)
 		return offsetof(struct mce, ip);
@@ -346,7 +346,7 @@
 {
 	u64 v;
 
-	if (__get_cpu_var(injectm).finished) {
+	if (__this_cpu_read(injectm.finished)) {
 		int offset = msr_to_offset(msr);
 
 		if (offset < 0)
@@ -369,7 +369,7 @@
 
 static void mce_wrmsrl(u32 msr, u64 v)
 {
-	if (__get_cpu_var(injectm).finished) {
+	if (__this_cpu_read(injectm.finished)) {
 		int offset = msr_to_offset(msr);
 
 		if (offset >= 0)
@@ -1159,7 +1159,7 @@
 
 	WARN_ON(smp_processor_id() != data);
 
-	if (mce_available(&current_cpu_data)) {
+	if (mce_available(__this_cpu_ptr(&cpu_info))) {
 		machine_check_poll(MCP_TIMESTAMP,
 				&__get_cpu_var(mce_poll_banks));
 	}
@@ -1767,7 +1767,7 @@
 static int mce_resume(struct sys_device *dev)
 {
 	__mcheck_cpu_init_generic();
-	__mcheck_cpu_init_vendor(&current_cpu_data);
+	__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
 
 	return 0;
 }
@@ -1775,7 +1775,7 @@
 static void mce_cpu_restart(void *data)
 {
 	del_timer_sync(&__get_cpu_var(mce_timer));
-	if (!mce_available(&current_cpu_data))
+	if (!mce_available(__this_cpu_ptr(&cpu_info)))
 		return;
 	__mcheck_cpu_init_generic();
 	__mcheck_cpu_init_timer();
@@ -1790,7 +1790,7 @@
 /* Toggle features for corrected errors */
 static void mce_disable_ce(void *all)
 {
-	if (!mce_available(&current_cpu_data))
+	if (!mce_available(__this_cpu_ptr(&cpu_info)))
 		return;
 	if (all)
 		del_timer_sync(&__get_cpu_var(mce_timer));
@@ -1799,7 +1799,7 @@
 
 static void mce_enable_ce(void *all)
 {
-	if (!mce_available(&current_cpu_data))
+	if (!mce_available(__this_cpu_ptr(&cpu_info)))
 		return;
 	cmci_reenable();
 	cmci_recheck();
@@ -2022,7 +2022,7 @@
 	unsigned long action = *(unsigned long *)h;
 	int i;
 
-	if (!mce_available(&current_cpu_data))
+	if (!mce_available(__this_cpu_ptr(&cpu_info)))
 		return;
 
 	if (!(action & CPU_TASKS_FROZEN))
@@ -2040,7 +2040,7 @@
 	unsigned long action = *(unsigned long *)h;
 	int i;
 
-	if (!mce_available(&current_cpu_data))
+	if (!mce_available(__this_cpu_ptr(&cpu_info)))
 		return;
 
 	if (!(action & CPU_TASKS_FROZEN))
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 6fcd093..8694ef56 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -130,7 +130,7 @@
 	unsigned long flags;
 	int banks;
 
-	if (!mce_available(&current_cpu_data) || !cmci_supported(&banks))
+	if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
 		return;
 	local_irq_save(flags);
 	machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0a360d1..9d977a2 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -997,8 +997,7 @@
 
 static void x86_pmu_enable_event(struct perf_event *event)
 {
-	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-	if (cpuc->enabled)
+	if (__this_cpu_read(cpu_hw_events.enabled))
 		__x86_pmu_enable_event(&event->hw,
 				       ARCH_PERFMON_EVENTSEL_ENABLE);
 }
@@ -1268,11 +1267,10 @@
 
 	switch (cmd) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		break;
 	case DIE_NMIUNKNOWN:
 		this_nmi = percpu_read(irq_stat.__nmi_count);
-		if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+		if (this_nmi != __this_cpu_read(pmu_nmi.marked))
 			/* let the kernel handle the unknown nmi */
 			return NOTIFY_DONE;
 		/*
@@ -1296,8 +1294,8 @@
 	this_nmi = percpu_read(irq_stat.__nmi_count);
 	if ((handled > 1) ||
 		/* the next nmi could be a back-to-back nmi */
-	    ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
-	     (__get_cpu_var(pmu_nmi).handled > 1))) {
+	    ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
+	     (__this_cpu_read(pmu_nmi.handled) > 1))) {
 		/*
 		 * We could have two subsequent back-to-back nmis: The
 		 * first handles more than one counter, the 2nd
@@ -1308,8 +1306,8 @@
 		 * handling more than one counter. We will mark the
 		 * next (3rd) and then drop it if unhandled.
 		 */
-		__get_cpu_var(pmu_nmi).marked	= this_nmi + 1;
-		__get_cpu_var(pmu_nmi).handled	= handled;
+		__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
+		__this_cpu_write(pmu_nmi.handled, handled);
 	}
 
 	return NOTIFY_STOP;
@@ -1318,7 +1316,7 @@
 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
 	.notifier_call		= perf_event_nmi_handler,
 	.next			= NULL,
-	.priority		= 1
+	.priority		= NMI_LOCAL_LOW_PRIOR,
 };
 
 static struct event_constraint unconstrained;
@@ -1484,11 +1482,9 @@
  */
 static void x86_pmu_start_txn(struct pmu *pmu)
 {
-	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
 	perf_pmu_disable(pmu);
-	cpuc->group_flag |= PERF_EVENT_TXN;
-	cpuc->n_txn = 0;
+	__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
+	__this_cpu_write(cpu_hw_events.n_txn, 0);
 }
 
 /*
@@ -1498,14 +1494,12 @@
  */
 static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
-	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
-	cpuc->group_flag &= ~PERF_EVENT_TXN;
+	__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
 	/*
 	 * Truncate the collected events.
 	 */
-	cpuc->n_added -= cpuc->n_txn;
-	cpuc->n_events -= cpuc->n_txn;
+	__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
+	__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
 	perf_pmu_enable(pmu);
 }
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 24e390e..008835c 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -649,7 +649,7 @@
 	struct hw_perf_event *hwc = &event->hw;
 
 	if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
-		if (!__get_cpu_var(cpu_hw_events).enabled)
+		if (!__this_cpu_read(cpu_hw_events.enabled))
 			return;
 
 		intel_pmu_enable_bts(hwc->config);
@@ -679,7 +679,7 @@
 
 static void intel_pmu_reset(void)
 {
-	struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
+	struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
 	unsigned long flags;
 	int idx;
 
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 81400b9..e56b9bf 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -753,19 +753,21 @@
 
 static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
 {
-	int overflow = 0;
-	u32 low, high;
+	u64 v;
 
-	rdmsr(hwc->config_base + hwc->idx, low, high);
-
-	/* we need to check high bit for unflagged overflows */
-	if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
-		overflow = 1;
-		(void)checking_wrmsrl(hwc->config_base + hwc->idx,
-			((u64)low) & ~P4_CCCR_OVF);
+	/* an official way for overflow indication */
+	rdmsrl(hwc->config_base + hwc->idx, v);
+	if (v & P4_CCCR_OVF) {
+		wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
+		return 1;
 	}
 
-	return overflow;
+	/* it might be unflagged overflow */
+	rdmsrl(hwc->event_base + hwc->idx, v);
+	if (!(v & ARCH_P4_CNTRVAL_MASK))
+		return 1;
+
+	return 0;
 }
 
 static void p4_pmu_disable_pebs(void)
@@ -1152,9 +1154,9 @@
 	 */
 	.num_counters		= ARCH_P4_MAX_CCCR,
 	.apic			= 1,
-	.cntval_bits		= 40,
-	.cntval_mask		= (1ULL << 40) - 1,
-	.max_period		= (1ULL << 39) - 1,
+	.cntval_bits		= ARCH_P4_CNTRVAL_BITS,
+	.cntval_mask		= ARCH_P4_CNTRVAL_MASK,
+	.max_period		= (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
 	.hw_config		= p4_hw_config,
 	.schedule_events	= p4_pmu_schedule_events,
 	/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 8474c99..df20723 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -197,14 +197,8 @@
  */
 void dump_stack(void)
 {
-	unsigned long bp = 0;
 	unsigned long stack;
 
-#ifdef CONFIG_FRAME_POINTER
-	if (!bp)
-		get_bp(bp);
-#endif
-
 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
 		current->pid, current->comm, print_tainted(),
 		init_utsname()->release,
@@ -240,6 +234,7 @@
 	bust_spinlocks(1);
 	return flags;
 }
+EXPORT_SYMBOL_GPL(oops_begin);
 
 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 {
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 0c2b7ef..294f26d 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -14,6 +14,7 @@
 #include <linux/bootmem.h>
 #include <linux/pfn.h>
 #include <linux/suspend.h>
+#include <linux/acpi.h>
 #include <linux/firmware-map.h>
 #include <linux/memblock.h>
 
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 591e601..c8b4efa 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1406,6 +1406,16 @@
 	CFI_ENDPROC
 END(general_protection)
 
+#ifdef CONFIG_KVM_GUEST
+ENTRY(async_page_fault)
+	RING0_EC_FRAME
+	pushl $do_async_page_fault
+	CFI_ADJUST_CFA_OFFSET 4
+	jmp error_code
+	CFI_ENDPROC
+END(apf_page_fault)
+#endif
+
 /*
  * End of kprobes section
  */
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e3ba417..aed1ffb 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -299,17 +299,21 @@
 ENTRY(save_args)
 	XCPT_FRAME
 	cld
-	movq_cfi rdi, RDI+16-ARGOFFSET
-	movq_cfi rsi, RSI+16-ARGOFFSET
-	movq_cfi rdx, RDX+16-ARGOFFSET
-	movq_cfi rcx, RCX+16-ARGOFFSET
-	movq_cfi rax, RAX+16-ARGOFFSET
-	movq_cfi  r8,  R8+16-ARGOFFSET
-	movq_cfi  r9,  R9+16-ARGOFFSET
-	movq_cfi r10, R10+16-ARGOFFSET
-	movq_cfi r11, R11+16-ARGOFFSET
+	/*
+	 * start from rbp in pt_regs and jump over
+	 * return address.
+	 */
+	movq_cfi rdi, RDI+8-RBP
+	movq_cfi rsi, RSI+8-RBP
+	movq_cfi rdx, RDX+8-RBP
+	movq_cfi rcx, RCX+8-RBP
+	movq_cfi rax, RAX+8-RBP
+	movq_cfi  r8,  R8+8-RBP
+	movq_cfi  r9,  R9+8-RBP
+	movq_cfi r10, R10+8-RBP
+	movq_cfi r11, R11+8-RBP
 
-	leaq -ARGOFFSET+16(%rsp),%rdi	/* arg1 for handler */
+	leaq -RBP+8(%rsp),%rdi	/* arg1 for handler */
 	movq_cfi rbp, 8		/* push %rbp */
 	leaq 8(%rsp), %rbp		/* mov %rsp, %ebp */
 	testl $3, CS(%rdi)
@@ -782,8 +786,9 @@
 
 /* 0(%rsp): ~(interrupt number) */
 	.macro interrupt func
-	subq $ORIG_RAX-ARGOFFSET+8, %rsp
-	CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
+	/* reserve pt_regs for scratch regs and rbp */
+	subq $ORIG_RAX-RBP, %rsp
+	CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
 	call save_args
 	PARTIAL_FRAME 0
 	call \func
@@ -808,9 +813,14 @@
 	TRACE_IRQS_OFF
 	decl PER_CPU_VAR(irq_count)
 	leaveq
+
 	CFI_RESTORE		rbp
 	CFI_DEF_CFA_REGISTER	rsp
 	CFI_ADJUST_CFA_OFFSET	-8
+
+	/* we did not save rbx, restore only from ARGOFFSET */
+	addq $8, %rsp
+	CFI_ADJUST_CFA_OFFSET	-8
 exit_intr:
 	GET_THREAD_INFO(%rcx)
 	testl $3,CS-ARGOFFSET(%rsp)
@@ -1319,6 +1329,9 @@
 #endif
 errorentry general_protection do_general_protection
 errorentry page_fault do_page_fault
+#ifdef CONFIG_KVM_GUEST
+errorentry async_page_fault do_async_page_fault
+#endif
 #ifdef CONFIG_X86_MCE
 paranoidzeroentry machine_check *machine_check_vector(%rip)
 #endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 2984486..382eb29 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -170,9 +170,9 @@
 
 void ftrace_nmi_enter(void)
 {
-	__get_cpu_var(save_modifying_code) = modifying_code;
+	__this_cpu_write(save_modifying_code, modifying_code);
 
-	if (!__get_cpu_var(save_modifying_code))
+	if (!__this_cpu_read(save_modifying_code))
 		return;
 
 	if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
@@ -186,7 +186,7 @@
 
 void ftrace_nmi_exit(void)
 {
-	if (!__get_cpu_var(save_modifying_code))
+	if (!__this_cpu_read(save_modifying_code))
 		return;
 
 	/* Finish all executions before clearing nmi_running */
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 9f54b20..fc293dc 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -126,7 +126,7 @@
 	movsl
 	movl pa(boot_params) + NEW_CL_POINTER,%esi
 	andl %esi,%esi
-	jz 1f			# No comand line
+	jz 1f			# No command line
 	movl $pa(boot_command_line),%edi
 	movl $(COMMAND_LINE_SIZE/4),%ecx
 	rep
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 42c5942..02f0763 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -122,7 +122,7 @@
 		return -EBUSY;
 
 	set_debugreg(info->address, i);
-	__get_cpu_var(cpu_debugreg[i]) = info->address;
+	__this_cpu_write(cpu_debugreg[i], info->address);
 
 	dr7 = &__get_cpu_var(cpu_dr7);
 	*dr7 |= encode_dr7(i, info->len, info->type);
@@ -397,12 +397,12 @@
 
 void hw_breakpoint_restore(void)
 {
-	set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0);
-	set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1);
-	set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2);
-	set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3);
+	set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
+	set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
+	set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
+	set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
 	set_debugreg(current->thread.debugreg6, 6);
-	set_debugreg(__get_cpu_var(cpu_dr7), 7);
+	set_debugreg(__this_cpu_read(cpu_dr7), 7);
 }
 EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
 
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 58bb239..e60c38c 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -169,6 +169,7 @@
 	set_stopped_child_used_math(tsk);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(init_fpu);
 
 /*
  * The xstateregs_active() routine is the same as the fpregs_active() routine,
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 83ec017..52945da 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -4,6 +4,7 @@
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/of.h>
 #include <linux/seq_file.h>
 #include <linux/smp.h>
 #include <linux/ftrace.h>
@@ -234,7 +235,7 @@
 	exit_idle();
 	irq_enter();
 
-	irq = __get_cpu_var(vector_irq)[vector];
+	irq = __this_cpu_read(vector_irq[vector]);
 
 	if (!handle_irq(irq, regs)) {
 		ack_APIC_irq();
@@ -275,6 +276,15 @@
 
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
 
+#ifdef CONFIG_OF
+unsigned int irq_create_of_mapping(struct device_node *controller,
+		const u32 *intspec, unsigned int intsize)
+{
+	return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+#endif
+
 #ifdef CONFIG_HOTPLUG_CPU
 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 void fixup_irqs(void)
@@ -350,12 +360,12 @@
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
 		unsigned int irr;
 
-		if (__get_cpu_var(vector_irq)[vector] < 0)
+		if (__this_cpu_read(vector_irq[vector]) < 0)
 			continue;
 
 		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
 		if (irr  & (1 << (vector % 32))) {
-			irq = __get_cpu_var(vector_irq)[vector];
+			irq = __this_cpu_read(vector_irq[vector]);
 
 			data = irq_get_irq_data(irq);
 			raw_spin_lock(&desc->lock);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 96656f2..48ff6dc 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -79,7 +79,7 @@
 	u32 *isp, arg1, arg2;
 
 	curctx = (union irq_ctx *) current_thread_info();
-	irqctx = __get_cpu_var(hardirq_ctx);
+	irqctx = __this_cpu_read(hardirq_ctx);
 
 	/*
 	 * this is where we switch to the IRQ stack. However, if we are
@@ -166,7 +166,7 @@
 
 	if (local_softirq_pending()) {
 		curctx = current_thread_info();
-		irqctx = __get_cpu_var(softirq_ctx);
+		irqctx = __this_cpu_read(softirq_ctx);
 		irqctx->tinfo.task = curctx->task;
 		irqctx->tinfo.previous_esp = current_stack_pointer;
 
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index cd21b65..a413000 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -48,6 +48,7 @@
 #include <asm/apicdef.h>
 #include <asm/system.h>
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 {
@@ -525,10 +526,6 @@
 		}
 		return NOTIFY_DONE;
 
-	case DIE_NMI_IPI:
-		/* Just ignore, we will handle the roundup on DIE_NMI. */
-		return NOTIFY_DONE;
-
 	case DIE_NMIUNKNOWN:
 		if (was_in_debug_nmi[raw_smp_processor_id()]) {
 			was_in_debug_nmi[raw_smp_processor_id()] = 0;
@@ -606,7 +603,7 @@
 	/*
 	 * Lowest-prio notifier priority, we want to be notified last:
 	 */
-	.priority	= -INT_MAX,
+	.priority	= NMI_LOCAL_LOW_PRIOR,
 };
 
 /**
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 5940282..d91c477 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -403,7 +403,7 @@
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-	__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 	kcb->kprobe_status = kcb->prev_kprobe.status;
 	kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
 	kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
@@ -412,7 +412,7 @@
 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 				struct kprobe_ctlblk *kcb)
 {
-	__get_cpu_var(current_kprobe) = p;
+	__this_cpu_write(current_kprobe, p);
 	kcb->kprobe_saved_flags = kcb->kprobe_old_flags
 		= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
 	if (is_IF_modifier(p->ainsn.insn))
@@ -586,7 +586,7 @@
 		preempt_enable_no_resched();
 		return 1;
 	} else if (kprobe_running()) {
-		p = __get_cpu_var(current_kprobe);
+		p = __this_cpu_read(current_kprobe);
 		if (p->break_handler && p->break_handler(p, regs)) {
 			setup_singlestep(p, regs, kcb, 0);
 			return 1;
@@ -759,11 +759,11 @@
 
 		orig_ret_address = (unsigned long)ri->ret_addr;
 		if (ri->rp && ri->rp->handler) {
-			__get_cpu_var(current_kprobe) = &ri->rp->kp;
+			__this_cpu_write(current_kprobe, &ri->rp->kp);
 			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
 			ri->ret_addr = correct_ret_addr;
 			ri->rp->handler(ri, regs);
-			__get_cpu_var(current_kprobe) = NULL;
+			__this_cpu_write(current_kprobe, NULL);
 		}
 
 		recycle_rp_inst(ri, &empty_rp);
@@ -1202,10 +1202,10 @@
 		regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
 		regs->orig_ax = ~0UL;
 
-		__get_cpu_var(current_kprobe) = &op->kp;
+		__this_cpu_write(current_kprobe, &op->kp);
 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 		opt_pre_handler(&op->kp, regs);
-		__get_cpu_var(current_kprobe) = NULL;
+		__this_cpu_write(current_kprobe, NULL);
 	}
 	preempt_enable_no_resched();
 }
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 63b0ec8..8dc4466 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -27,16 +27,37 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/hardirq.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/hash.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kprobes.h>
 #include <asm/timer.h>
+#include <asm/cpu.h>
+#include <asm/traps.h>
+#include <asm/desc.h>
+#include <asm/tlbflush.h>
 
 #define MMU_QUEUE_SIZE 1024
 
+static int kvmapf = 1;
+
+static int parse_no_kvmapf(char *arg)
+{
+        kvmapf = 0;
+        return 0;
+}
+
+early_param("no-kvmapf", parse_no_kvmapf);
+
 struct kvm_para_state {
 	u8 mmu_queue[MMU_QUEUE_SIZE];
 	int mmu_queue_len;
 };
 
 static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 
 static struct kvm_para_state *kvm_para_state(void)
 {
@@ -50,6 +71,195 @@
 {
 }
 
+#define KVM_TASK_SLEEP_HASHBITS 8
+#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
+
+struct kvm_task_sleep_node {
+	struct hlist_node link;
+	wait_queue_head_t wq;
+	u32 token;
+	int cpu;
+	bool halted;
+	struct mm_struct *mm;
+};
+
+static struct kvm_task_sleep_head {
+	spinlock_t lock;
+	struct hlist_head list;
+} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
+
+static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
+						  u32 token)
+{
+	struct hlist_node *p;
+
+	hlist_for_each(p, &b->list) {
+		struct kvm_task_sleep_node *n =
+			hlist_entry(p, typeof(*n), link);
+		if (n->token == token)
+			return n;
+	}
+
+	return NULL;
+}
+
+void kvm_async_pf_task_wait(u32 token)
+{
+	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+	struct kvm_task_sleep_node n, *e;
+	DEFINE_WAIT(wait);
+	int cpu, idle;
+
+	cpu = get_cpu();
+	idle = idle_cpu(cpu);
+	put_cpu();
+
+	spin_lock(&b->lock);
+	e = _find_apf_task(b, token);
+	if (e) {
+		/* dummy entry exist -> wake up was delivered ahead of PF */
+		hlist_del(&e->link);
+		kfree(e);
+		spin_unlock(&b->lock);
+		return;
+	}
+
+	n.token = token;
+	n.cpu = smp_processor_id();
+	n.mm = current->active_mm;
+	n.halted = idle || preempt_count() > 1;
+	atomic_inc(&n.mm->mm_count);
+	init_waitqueue_head(&n.wq);
+	hlist_add_head(&n.link, &b->list);
+	spin_unlock(&b->lock);
+
+	for (;;) {
+		if (!n.halted)
+			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+		if (hlist_unhashed(&n.link))
+			break;
+
+		if (!n.halted) {
+			local_irq_enable();
+			schedule();
+			local_irq_disable();
+		} else {
+			/*
+			 * We cannot reschedule. So halt.
+			 */
+			native_safe_halt();
+			local_irq_disable();
+		}
+	}
+	if (!n.halted)
+		finish_wait(&n.wq, &wait);
+
+	return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
+
+static void apf_task_wake_one(struct kvm_task_sleep_node *n)
+{
+	hlist_del_init(&n->link);
+	if (!n->mm)
+		return;
+	mmdrop(n->mm);
+	if (n->halted)
+		smp_send_reschedule(n->cpu);
+	else if (waitqueue_active(&n->wq))
+		wake_up(&n->wq);
+}
+
+static void apf_task_wake_all(void)
+{
+	int i;
+
+	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
+		struct hlist_node *p, *next;
+		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
+		spin_lock(&b->lock);
+		hlist_for_each_safe(p, next, &b->list) {
+			struct kvm_task_sleep_node *n =
+				hlist_entry(p, typeof(*n), link);
+			if (n->cpu == smp_processor_id())
+				apf_task_wake_one(n);
+		}
+		spin_unlock(&b->lock);
+	}
+}
+
+void kvm_async_pf_task_wake(u32 token)
+{
+	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+	struct kvm_task_sleep_node *n;
+
+	if (token == ~0) {
+		apf_task_wake_all();
+		return;
+	}
+
+again:
+	spin_lock(&b->lock);
+	n = _find_apf_task(b, token);
+	if (!n) {
+		/*
+		 * async PF was not yet handled.
+		 * Add dummy entry for the token.
+		 */
+		n = kmalloc(sizeof(*n), GFP_ATOMIC);
+		if (!n) {
+			/*
+			 * Allocation failed! Busy wait while other cpu
+			 * handles async PF.
+			 */
+			spin_unlock(&b->lock);
+			cpu_relax();
+			goto again;
+		}
+		n->token = token;
+		n->cpu = smp_processor_id();
+		n->mm = NULL;
+		init_waitqueue_head(&n->wq);
+		hlist_add_head(&n->link, &b->list);
+	} else
+		apf_task_wake_one(n);
+	spin_unlock(&b->lock);
+	return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+
+u32 kvm_read_and_reset_pf_reason(void)
+{
+	u32 reason = 0;
+
+	if (__get_cpu_var(apf_reason).enabled) {
+		reason = __get_cpu_var(apf_reason).reason;
+		__get_cpu_var(apf_reason).reason = 0;
+	}
+
+	return reason;
+}
+EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
+
+dotraplinkage void __kprobes
+do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+	switch (kvm_read_and_reset_pf_reason()) {
+	default:
+		do_page_fault(regs, error_code);
+		break;
+	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+		/* page is swapped out by the host. */
+		kvm_async_pf_task_wait((u32)read_cr2());
+		break;
+	case KVM_PV_REASON_PAGE_READY:
+		kvm_async_pf_task_wake((u32)read_cr2());
+		break;
+	}
+}
+
 static void kvm_mmu_op(void *buffer, unsigned len)
 {
 	int r;
@@ -231,10 +441,117 @@
 #endif
 }
 
-void __init kvm_guest_init(void)
+void __cpuinit kvm_guest_cpu_init(void)
 {
 	if (!kvm_para_available())
 		return;
 
+	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
+		u64 pa = __pa(&__get_cpu_var(apf_reason));
+
+#ifdef CONFIG_PREEMPT
+		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
+		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
+		__get_cpu_var(apf_reason).enabled = 1;
+		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
+		       smp_processor_id());
+	}
+}
+
+static void kvm_pv_disable_apf(void *unused)
+{
+	if (!__get_cpu_var(apf_reason).enabled)
+		return;
+
+	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
+	__get_cpu_var(apf_reason).enabled = 0;
+
+	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
+	       smp_processor_id());
+}
+
+static int kvm_pv_reboot_notify(struct notifier_block *nb,
+				unsigned long code, void *unused)
+{
+	if (code == SYS_RESTART)
+		on_each_cpu(kvm_pv_disable_apf, NULL, 1);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block kvm_pv_reboot_nb = {
+	.notifier_call = kvm_pv_reboot_notify,
+};
+
+#ifdef CONFIG_SMP
+static void __init kvm_smp_prepare_boot_cpu(void)
+{
+#ifdef CONFIG_KVM_CLOCK
+	WARN_ON(kvm_register_clock("primary cpu clock"));
+#endif
+	kvm_guest_cpu_init();
+	native_smp_prepare_boot_cpu();
+}
+
+static void kvm_guest_cpu_online(void *dummy)
+{
+	kvm_guest_cpu_init();
+}
+
+static void kvm_guest_cpu_offline(void *dummy)
+{
+	kvm_pv_disable_apf(NULL);
+	apf_task_wake_all();
+}
+
+static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
+				    unsigned long action, void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_DOWN_FAILED:
+	case CPU_ONLINE_FROZEN:
+		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
+        .notifier_call  = kvm_cpu_notify,
+};
+#endif
+
+static void __init kvm_apf_trap_init(void)
+{
+	set_intr_gate(14, &async_page_fault);
+}
+
+void __init kvm_guest_init(void)
+{
+	int i;
+
+	if (!kvm_para_available())
+		return;
+
 	paravirt_ops_setup();
+	register_reboot_notifier(&kvm_pv_reboot_nb);
+	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
+		spin_lock_init(&async_pf_sleepers[i].lock);
+	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
+		x86_init.irqs.trap_init = kvm_apf_trap_init;
+
+#ifdef CONFIG_SMP
+	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
+	register_cpu_notifier(&kvm_cpu_notifier);
+#else
+	kvm_guest_cpu_init();
+#endif
 }
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index ca43ce3..f98d3ea 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -125,7 +125,7 @@
 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static int kvm_register_clock(char *txt)
+int kvm_register_clock(char *txt)
 {
 	int cpu = smp_processor_id();
 	int low, high, ret;
@@ -152,14 +152,6 @@
 }
 #endif
 
-#ifdef CONFIG_SMP
-static void __init kvm_smp_prepare_boot_cpu(void)
-{
-	WARN_ON(kvm_register_clock("primary cpu clock"));
-	native_smp_prepare_boot_cpu();
-}
-#endif
-
 /*
  * After the clock is registered, the host will keep writing to the
  * registered memory location. If the guest happens to shutdown, this memory
@@ -206,9 +198,6 @@
 	x86_cpuinit.setup_percpu_clockev =
 		kvm_setup_secondary_clock;
 #endif
-#ifdef CONFIG_SMP
-	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
-#endif
 	machine_ops.shutdown  = kvm_shutdown;
 #ifdef CONFIG_KEXEC
 	machine_ops.crash_shutdown  = kvm_crash_shutdown;
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 8f29560..ab23f1a 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -37,20 +37,11 @@
 
 void *module_alloc(unsigned long size)
 {
-	struct vm_struct *area;
-
-	if (!size)
+	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
-	size = PAGE_ALIGN(size);
-	if (size > MODULES_LEN)
-		return NULL;
-
-	area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-	if (!area)
-		return NULL;
-
-	return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
-					PAGE_KERNEL_EXEC);
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
+				-1, __builtin_return_address(0));
 }
 
 /* Free memory returned from module_alloc */
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 9af64d9..01b0f6d 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -118,21 +118,8 @@
 
 static void __init MP_ioapic_info(struct mpc_ioapic *m)
 {
-	if (!(m->flags & MPC_APIC_USABLE))
-		return;
-
-	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%X.\n",
-	       m->apicid, m->apicver, m->apicaddr);
-
-	mp_register_ioapic(m->apicid, m->apicaddr, gsi_top);
-}
-
-static void print_MP_intsrc_info(struct mpc_intsrc *m)
-{
-	apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
-		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-		m->irqtype, m->irqflag & 3, (m->irqflag >> 2) & 3, m->srcbus,
-		m->srcbusirq, m->dstapic, m->dstirq);
+	if (m->flags & MPC_APIC_USABLE)
+		mp_register_ioapic(m->apicid, m->apicaddr, gsi_top);
 }
 
 static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
@@ -144,73 +131,11 @@
 		mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
 }
 
-static void __init assign_to_mp_irq(struct mpc_intsrc *m,
-				    struct mpc_intsrc *mp_irq)
-{
-	mp_irq->dstapic = m->dstapic;
-	mp_irq->type = m->type;
-	mp_irq->irqtype = m->irqtype;
-	mp_irq->irqflag = m->irqflag;
-	mp_irq->srcbus = m->srcbus;
-	mp_irq->srcbusirq = m->srcbusirq;
-	mp_irq->dstirq = m->dstirq;
-}
-
-static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
-					struct mpc_intsrc *m)
-{
-	m->dstapic = mp_irq->dstapic;
-	m->type = mp_irq->type;
-	m->irqtype = mp_irq->irqtype;
-	m->irqflag = mp_irq->irqflag;
-	m->srcbus = mp_irq->srcbus;
-	m->srcbusirq = mp_irq->srcbusirq;
-	m->dstirq = mp_irq->dstirq;
-}
-
-static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
-					struct mpc_intsrc *m)
-{
-	if (mp_irq->dstapic != m->dstapic)
-		return 1;
-	if (mp_irq->type != m->type)
-		return 2;
-	if (mp_irq->irqtype != m->irqtype)
-		return 3;
-	if (mp_irq->irqflag != m->irqflag)
-		return 4;
-	if (mp_irq->srcbus != m->srcbus)
-		return 5;
-	if (mp_irq->srcbusirq != m->srcbusirq)
-		return 6;
-	if (mp_irq->dstirq != m->dstirq)
-		return 7;
-
-	return 0;
-}
-
-static void __init MP_intsrc_info(struct mpc_intsrc *m)
-{
-	int i;
-
-	print_MP_intsrc_info(m);
-
-	for (i = 0; i < mp_irq_entries; i++) {
-		if (!mp_irq_mpc_intsrc_cmp(&mp_irqs[i], m))
-			return;
-	}
-
-	assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
-	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-		panic("Max # of irq sources exceeded!!\n");
-}
 #else /* CONFIG_X86_IO_APIC */
 static inline void __init MP_bus_info(struct mpc_bus *m) {}
 static inline void __init MP_ioapic_info(struct mpc_ioapic *m) {}
-static inline void __init MP_intsrc_info(struct mpc_intsrc *m) {}
 #endif /* CONFIG_X86_IO_APIC */
 
-
 static void __init MP_lintsrc_info(struct mpc_lintsrc *m)
 {
 	apic_printk(APIC_VERBOSE, "Lint: type %d, pol %d, trig %d, bus %02x,"
@@ -222,7 +147,6 @@
 /*
  * Read/parse the MPC
  */
-
 static int __init smp_check_mpc(struct mpc_table *mpc, char *oem, char *str)
 {
 
@@ -275,18 +199,6 @@
 
 void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { }
 
-static void __init smp_register_lapic_address(unsigned long address)
-{
-	mp_lapic_addr = address;
-
-	set_fixmap_nocache(FIX_APIC_BASE, address);
-	if (boot_cpu_physical_apicid == -1U) {
-		boot_cpu_physical_apicid  = read_apic_id();
-		apic_version[boot_cpu_physical_apicid] =
-			 GET_APIC_VERSION(apic_read(APIC_LVR));
-	}
-}
-
 static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
 {
 	char str[16];
@@ -301,17 +213,13 @@
 #ifdef CONFIG_X86_32
 	generic_mps_oem_check(mpc, oem, str);
 #endif
-	/* save the local APIC address, it might be non-default */
+	/* Initialize the lapic mapping */
 	if (!acpi_lapic)
-		mp_lapic_addr = mpc->lapic;
+		register_lapic_address(mpc->lapic);
 
 	if (early)
 		return 1;
 
-	/* Initialize the lapic mapping */
-	if (!acpi_lapic)
-		smp_register_lapic_address(mpc->lapic);
-
 	if (mpc->oemptr)
 		x86_init.mpparse.smp_read_mpc_oem(mpc);
 
@@ -337,7 +245,7 @@
 			skip_entry(&mpt, &count, sizeof(struct mpc_ioapic));
 			break;
 		case MP_INTSRC:
-			MP_intsrc_info((struct mpc_intsrc *)mpt);
+			mp_save_irq((struct mpc_intsrc *)mpt);
 			skip_entry(&mpt, &count, sizeof(struct mpc_intsrc));
 			break;
 		case MP_LINTSRC:
@@ -429,13 +337,13 @@
 
 		intsrc.srcbusirq = i;
 		intsrc.dstirq = i ? i : 2;	/* IRQ0 to INTIN2 */
-		MP_intsrc_info(&intsrc);
+		mp_save_irq(&intsrc);
 	}
 
 	intsrc.irqtype = mp_ExtINT;
 	intsrc.srcbusirq = 0;
 	intsrc.dstirq = 0;	/* 8259A to INTIN0 */
-	MP_intsrc_info(&intsrc);
+	mp_save_irq(&intsrc);
 }
 
 
@@ -784,11 +692,11 @@
 	int i;
 
 	apic_printk(APIC_VERBOSE, "OLD ");
-	print_MP_intsrc_info(m);
+	print_mp_irq_info(m);
 
 	i = get_MP_intsrc_index(m);
 	if (i > 0) {
-		assign_to_mpc_intsrc(&mp_irqs[i], m);
+		memcpy(m, &mp_irqs[i], sizeof(*m));
 		apic_printk(APIC_VERBOSE, "NEW ");
 		print_mp_irq_info(&mp_irqs[i]);
 		return;
@@ -875,14 +783,14 @@
 		if (nr_m_spare > 0) {
 			apic_printk(APIC_VERBOSE, "*NEW* found\n");
 			nr_m_spare--;
-			assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
+			memcpy(m_spare[nr_m_spare], &mp_irqs[i], sizeof(mp_irqs[i]));
 			m_spare[nr_m_spare] = NULL;
 		} else {
 			struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
 			count += sizeof(struct mpc_intsrc);
 			if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
 				goto out;
-			assign_to_mpc_intsrc(&mp_irqs[i], m);
+			memcpy(m, &mp_irqs[i], sizeof(*m));
 			mpc->length = count;
 			mpt += sizeof(struct mpc_intsrc);
 		}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c5b2500..869e1ae 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -421,8 +421,11 @@
 	.set_pte = native_set_pte,
 	.set_pte_at = native_set_pte_at,
 	.set_pmd = native_set_pmd,
+	.set_pmd_at = native_set_pmd_at,
 	.pte_update = paravirt_nop,
 	.pte_update_defer = paravirt_nop,
+	.pmd_update = paravirt_nop,
+	.pmd_update_defer = paravirt_nop,
 
 	.ptep_modify_prot_start = __ptep_modify_prot_start,
 	.ptep_modify_prot_commit = __ptep_modify_prot_commit,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c852041..d8286ed 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -22,11 +22,6 @@
 #include <asm/i387.h>
 #include <asm/debugreg.h>
 
-unsigned long idle_halt;
-EXPORT_SYMBOL(idle_halt);
-unsigned long idle_nomwait;
-EXPORT_SYMBOL(idle_nomwait);
-
 struct kmem_cache *task_xstate_cachep;
 EXPORT_SYMBOL_GPL(task_xstate_cachep);
 
@@ -327,7 +322,7 @@
 /*
  * Idle related variables and functions
  */
-unsigned long boot_option_idle_override = 0;
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
 EXPORT_SYMBOL(boot_option_idle_override);
 
 /*
@@ -386,6 +381,8 @@
 		else
 			local_irq_enable();
 		current_thread_info()->status |= TS_POLLING;
+		trace_power_end(smp_processor_id());
+		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 	} else {
 		local_irq_enable();
 		/* loop is done by the caller */
@@ -443,10 +440,8 @@
  */
 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
 {
-	trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
-	trace_cpu_idle((ax>>4)+1, smp_processor_id());
 	if (!need_resched()) {
-		if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
+		if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
 			clflush((void *)&current_thread_info()->flags);
 
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -462,7 +457,7 @@
 	if (!need_resched()) {
 		trace_power_start(POWER_CSTATE, 1, smp_processor_id());
 		trace_cpu_idle(1, smp_processor_id());
-		if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
+		if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
 			clflush((void *)&current_thread_info()->flags);
 
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -471,6 +466,8 @@
 			__sti_mwait(0, 0);
 		else
 			local_irq_enable();
+		trace_power_end(smp_processor_id());
+		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 	} else
 		local_irq_enable();
 }
@@ -503,7 +500,6 @@
  *
  * idle=mwait overrides this decision and forces the usage of mwait.
  */
-static int __cpuinitdata force_mwait;
 
 #define MWAIT_INFO			0x05
 #define MWAIT_ECX_EXTENDED_INFO		0x01
@@ -513,7 +509,7 @@
 {
 	u32 eax, ebx, ecx, edx;
 
-	if (force_mwait)
+	if (boot_option_idle_override == IDLE_FORCE_MWAIT)
 		return 1;
 
 	if (c->cpuid_level < MWAIT_INFO)
@@ -633,9 +629,10 @@
 	if (!strcmp(str, "poll")) {
 		printk("using polling idle threads.\n");
 		pm_idle = poll_idle;
-	} else if (!strcmp(str, "mwait"))
-		force_mwait = 1;
-	else if (!strcmp(str, "halt")) {
+		boot_option_idle_override = IDLE_POLL;
+	} else if (!strcmp(str, "mwait")) {
+		boot_option_idle_override = IDLE_FORCE_MWAIT;
+	} else if (!strcmp(str, "halt")) {
 		/*
 		 * When the boot option of idle=halt is added, halt is
 		 * forced to be used for CPU idle. In such case CPU C2/C3
@@ -644,8 +641,7 @@
 		 * the boot_option_idle_override.
 		 */
 		pm_idle = default_idle;
-		idle_halt = 1;
-		return 0;
+		boot_option_idle_override = IDLE_HALT;
 	} else if (!strcmp(str, "nomwait")) {
 		/*
 		 * If the boot option of "idle=nomwait" is added,
@@ -653,12 +649,10 @@
 		 * states. In such case it won't touch the variable
 		 * of boot_option_idle_override.
 		 */
-		idle_nomwait = 1;
-		return 0;
+		boot_option_idle_override = IDLE_NOMWAIT;
 	} else
 		return -1;
 
-	boot_option_idle_override = 1;
 	return 0;
 }
 early_param("idle", idle_setup);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 4b9befa..8d12878 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -57,8 +57,6 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 
-#include <trace/events/power.h>
-
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
 /*
@@ -113,8 +111,6 @@
 			stop_critical_timings();
 			pm_idle();
 			start_critical_timings();
-			trace_power_end(smp_processor_id());
-			trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 		}
 		tick_nohz_restart_sched_tick();
 		preempt_enable_no_resched();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4c818a7..bd387e8 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -51,8 +51,6 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 
-#include <trace/events/power.h>
-
 asmlinkage extern void ret_from_fork(void);
 
 DEFINE_PER_CPU(unsigned long, old_rsp);
@@ -141,10 +139,6 @@
 			pm_idle();
 			start_critical_timings();
 
-			trace_power_end(smp_processor_id());
-			trace_cpu_idle(PWR_EVENT_EXIT,
-				       smp_processor_id());
-
 			/* In many cases the interrupt that ended idle
 			   has already called exit_idle. But some idle
 			   loops can be woken up without interrupt. */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index c495aa8..fc7aae1 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -18,6 +18,7 @@
 #include <asm/pci_x86.h>
 #include <asm/virtext.h>
 #include <asm/cpu.h>
+#include <asm/nmi.h>
 
 #ifdef CONFIG_X86_32
 # include <linux/ctype.h>
@@ -747,7 +748,7 @@
 {
 	int cpu;
 
-	if (val != DIE_NMI_IPI)
+	if (val != DIE_NMI)
 		return NOTIFY_OK;
 
 	cpu = raw_smp_processor_id();
@@ -778,6 +779,8 @@
 
 static struct notifier_block crash_nmi_nb = {
 	.notifier_call = crash_nmi_callback,
+	/* we want to be the first one called */
+	.priority = NMI_LOCAL_HIGH_PRIOR+1,
 };
 
 /* Halt all other CPUs, calling the specified function on each of them
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 1cfbbfc..6f39cab 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -76,7 +76,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ee886fe..763df77 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -97,12 +97,12 @@
  */
 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
 
-void cpu_hotplug_driver_lock()
+void cpu_hotplug_driver_lock(void)
 {
         mutex_lock(&x86_cpu_hotplug_driver_mutex);
 }
 
-void cpu_hotplug_driver_unlock()
+void cpu_hotplug_driver_unlock(void)
 {
         mutex_unlock(&x86_cpu_hotplug_driver_mutex);
 }
@@ -427,7 +427,7 @@
 
 	cpumask_set_cpu(cpu, c->llc_shared_map);
 
-	if (current_cpu_data.x86_max_cores == 1) {
+	if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
 		cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
 		c->booted_cores = 1;
 		return;
@@ -1089,7 +1089,7 @@
 
 	preempt_disable();
 	smp_cpu_index_default();
-	current_cpu_data = boot_cpu_data;
+	memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
 	cpumask_copy(cpu_callin_mask, cpumask_of(0));
 	mb();
 	/*
@@ -1383,7 +1383,7 @@
 
 	mb();
 	/* Ack it */
-	__get_cpu_var(cpu_state) = CPU_DEAD;
+	__this_cpu_write(cpu_state, CPU_DEAD);
 
 	/*
 	 * With physical CPU hotplug, we should halt the cpu
@@ -1403,11 +1403,11 @@
 	int i;
 	void *mwait_ptr;
 
-	if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
+	if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
 		return;
-	if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
+	if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
 		return;
-	if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
 		return;
 
 	eax = CPUID_MWAIT_LEAF;
@@ -1458,7 +1458,7 @@
 
 static inline void hlt_play_dead(void)
 {
-	if (current_cpu_data.x86 >= 4)
+	if (__this_cpu_read(cpu_info.x86) >= 4)
 		wbinvd();
 
 	while (1) {
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index c2f1b26..998e972 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -133,7 +133,7 @@
 	pmd = pmd_alloc(&tboot_mm, pud, vaddr);
 	if (!pmd)
 		return -1;
-	pte = pte_alloc_map(&tboot_mm, pmd, vaddr);
+	pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr);
 	if (!pte)
 		return -1;
 	set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c76aaca..b9b6716 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -84,6 +84,11 @@
 static int ignore_nmis;
 
 int unknown_nmi_panic;
+/*
+ * Prevent NMI reason port (0x61) being accessed simultaneously, can
+ * only be used in NMI handler.
+ */
+static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
 
 static inline void conditional_sti(struct pt_regs *regs)
 {
@@ -310,15 +315,15 @@
 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
 
 static notrace __kprobes void
-mem_parity_error(unsigned char reason, struct pt_regs *regs)
+pci_serr_error(unsigned char reason, struct pt_regs *regs)
 {
-	printk(KERN_EMERG
-		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-			reason, smp_processor_id());
+	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 
-	printk(KERN_EMERG
-		"You have some hardware problem, likely on the PCI bus.\n");
-
+	/*
+	 * On some machines, PCI SERR line is used to report memory
+	 * errors. EDAC makes use of it.
+	 */
 #if defined(CONFIG_EDAC)
 	if (edac_handler_set()) {
 		edac_atomic_assert_error();
@@ -329,11 +334,11 @@
 	if (panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
 
-	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+	pr_emerg("Dazed and confused, but trying to continue\n");
 
-	/* Clear and disable the memory parity error line. */
-	reason = (reason & 0xf) | 4;
-	outb(reason, 0x61);
+	/* Clear and disable the PCI SERR error line. */
+	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
+	outb(reason, NMI_REASON_PORT);
 }
 
 static notrace __kprobes void
@@ -341,15 +346,17 @@
 {
 	unsigned long i;
 
-	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+	pr_emerg(
+	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 	show_registers(regs);
 
 	if (panic_on_io_nmi)
 		panic("NMI IOCK error: Not continuing");
 
 	/* Re-enable the IOCK line, wait for a few seconds */
-	reason = (reason & 0xf) | 8;
-	outb(reason, 0x61);
+	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
+	outb(reason, NMI_REASON_PORT);
 
 	i = 20000;
 	while (--i) {
@@ -357,8 +364,8 @@
 		udelay(100);
 	}
 
-	reason &= ~8;
-	outb(reason, 0x61);
+	reason &= ~NMI_REASON_CLEAR_IOCHK;
+	outb(reason, NMI_REASON_PORT);
 }
 
 static notrace __kprobes void
@@ -377,57 +384,50 @@
 		return;
 	}
 #endif
-	printk(KERN_EMERG
-		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-			reason, smp_processor_id());
+	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 
-	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
+	pr_emerg("Do you have a strange power saving mode enabled?\n");
 	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
 
-	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+	pr_emerg("Dazed and confused, but trying to continue\n");
 }
 
 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
 {
 	unsigned char reason = 0;
-	int cpu;
 
-	cpu = smp_processor_id();
+	/*
+	 * CPU-specific NMI must be processed before non-CPU-specific
+	 * NMI, otherwise we may lose it, because the CPU-specific
+	 * NMI can not be detected/processed on other CPUs.
+	 */
+	if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
+		return;
 
-	/* Only the BSP gets external NMIs from the system. */
-	if (!cpu)
-		reason = get_nmi_reason();
+	/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
+	raw_spin_lock(&nmi_reason_lock);
+	reason = get_nmi_reason();
 
-	if (!(reason & 0xc0)) {
-		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-								== NOTIFY_STOP)
-			return;
-
-#ifdef CONFIG_X86_LOCAL_APIC
-		if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
-							== NOTIFY_STOP)
-			return;
+	if (reason & NMI_REASON_MASK) {
+		if (reason & NMI_REASON_SERR)
+			pci_serr_error(reason, regs);
+		else if (reason & NMI_REASON_IOCHK)
+			io_check_error(reason, regs);
+#ifdef CONFIG_X86_32
+		/*
+		 * Reassert NMI in case it became active
+		 * meanwhile as it's edge-triggered:
+		 */
+		reassert_nmi();
 #endif
-		unknown_nmi_error(reason, regs);
-
+		raw_spin_unlock(&nmi_reason_lock);
 		return;
 	}
-	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-		return;
+	raw_spin_unlock(&nmi_reason_lock);
 
-	/* AK: following checks seem to be broken on modern chipsets. FIXME */
-	if (reason & 0x80)
-		mem_parity_error(reason, regs);
-	if (reason & 0x40)
-		io_check_error(reason, regs);
-#ifdef CONFIG_X86_32
-	/*
-	 * Reassert NMI in case it became active meanwhile
-	 * as it's edge-triggered:
-	 */
-	reassert_nmi();
-#endif
+	unknown_nmi_error(reason, regs);
 }
 
 dotraplinkage notrace __kprobes void
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 356a0d4..823f79a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -659,7 +659,7 @@
 
 	local_irq_save(flags);
 
-	__get_cpu_var(cyc2ns_offset) = 0;
+	__this_cpu_write(cyc2ns_offset, 0);
 	offset = cyc2ns_suspend - sched_clock();
 
 	for_each_possible_cpu(cpu)
@@ -965,7 +965,7 @@
 
 static int __init init_tsc_clocksource(void)
 {
-	if (!cpu_has_tsc || tsc_disabled > 0)
+	if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
 		return 0;
 
 	if (tsc_clocksource_reliable)
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 61fb985..863f875 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -179,6 +179,7 @@
 	if (pud_none_or_clear_bad(pud))
 		goto out;
 	pmd = pmd_offset(pud, 0xA0000);
+	split_huge_page_pmd(mm, pmd);
 	if (pmd_none_or_clear_bad(pmd))
 		goto out;
 	pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ddc131f..50f6364 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -28,6 +28,7 @@
 	select HAVE_KVM_IRQCHIP
 	select HAVE_KVM_EVENTFD
 	select KVM_APIC_ARCHITECTURE
+	select KVM_ASYNC_PF
 	select USER_RETURN_NOTIFIER
 	select KVM_MMIO
 	---help---
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 31a7035..f15501f 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,5 +1,5 @@
 
-EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
+ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
 
 CFLAGS_x86.o := -I.
 CFLAGS_svm.o := -I.
@@ -9,6 +9,7 @@
 				coalesced_mmio.o irq_comm.o eventfd.o \
 				assigned-dev.o)
 kvm-$(CONFIG_IOMMU_API)	+= $(addprefix ../../../virt/kvm/, iommu.o)
+kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(addprefix ../../../virt/kvm/, async_pf.o)
 
 kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
 			   i8254.o timer.o
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 38b6e8d..caf9667 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -20,16 +20,8 @@
  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  */
 
-#ifndef __KERNEL__
-#include <stdio.h>
-#include <stdint.h>
-#include <public/xen.h>
-#define DPRINTF(_f, _a ...) printf(_f , ## _a)
-#else
 #include <linux/kvm_host.h>
 #include "kvm_cache_regs.h"
-#define DPRINTF(x...) do {} while (0)
-#endif
 #include <linux/module.h>
 #include <asm/kvm_emulate.h>
 
@@ -418,9 +410,9 @@
 }
 
 static inline unsigned long
-register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
+register_address(struct decode_cache *c, unsigned long reg)
 {
-	return base + address_mask(c, reg);
+	return address_mask(c, reg);
 }
 
 static inline void
@@ -452,60 +444,55 @@
 	return ops->get_cached_segment_base(seg, ctxt->vcpu);
 }
 
-static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
-				       struct x86_emulate_ops *ops,
-				       struct decode_cache *c)
+static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
+			     struct x86_emulate_ops *ops,
+			     struct decode_cache *c)
 {
 	if (!c->has_seg_override)
 		return 0;
 
-	return seg_base(ctxt, ops, c->seg_override);
+	return c->seg_override;
 }
 
-static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
-			     struct x86_emulate_ops *ops)
+static ulong linear(struct x86_emulate_ctxt *ctxt,
+		    struct segmented_address addr)
 {
-	return seg_base(ctxt, ops, VCPU_SREG_ES);
+	struct decode_cache *c = &ctxt->decode;
+	ulong la;
+
+	la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
+	if (c->ad_bytes != 8)
+		la &= (u32)-1;
+	return la;
 }
 
-static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
-			     struct x86_emulate_ops *ops)
+static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
+			     u32 error, bool valid)
 {
-	return seg_base(ctxt, ops, VCPU_SREG_SS);
+	ctxt->exception.vector = vec;
+	ctxt->exception.error_code = error;
+	ctxt->exception.error_code_valid = valid;
+	return X86EMUL_PROPAGATE_FAULT;
 }
 
-static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
-				      u32 error, bool valid)
+static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 {
-	ctxt->exception = vec;
-	ctxt->error_code = error;
-	ctxt->error_code_valid = valid;
+	return emulate_exception(ctxt, GP_VECTOR, err, true);
 }
 
-static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
+static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 {
-	emulate_exception(ctxt, GP_VECTOR, err, true);
+	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 }
 
-static void emulate_pf(struct x86_emulate_ctxt *ctxt)
+static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 {
-	emulate_exception(ctxt, PF_VECTOR, 0, true);
-}
-
-static void emulate_ud(struct x86_emulate_ctxt *ctxt)
-{
-	emulate_exception(ctxt, UD_VECTOR, 0, false);
-}
-
-static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
-{
-	emulate_exception(ctxt, TS_VECTOR, err, true);
+	return emulate_exception(ctxt, TS_VECTOR, err, true);
 }
 
 static int emulate_de(struct x86_emulate_ctxt *ctxt)
 {
-	emulate_exception(ctxt, DE_VECTOR, 0, false);
-	return X86EMUL_PROPAGATE_FAULT;
+	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 }
 
 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
@@ -520,7 +507,7 @@
 		cur_size = fc->end - fc->start;
 		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
 		rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
-				size, ctxt->vcpu, NULL);
+				size, ctxt->vcpu, &ctxt->exception);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		fc->end += size;
@@ -564,7 +551,7 @@
 
 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 			   struct x86_emulate_ops *ops,
-			   ulong addr,
+			   struct segmented_address addr,
 			   u16 *size, unsigned long *address, int op_bytes)
 {
 	int rc;
@@ -572,10 +559,13 @@
 	if (op_bytes == 2)
 		op_bytes = 3;
 	*address = 0;
-	rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
+	rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
+			   ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
-	rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
+	addr.ea += 2;
+	rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
+			   ctxt->vcpu, &ctxt->exception);
 	return rc;
 }
 
@@ -768,7 +758,7 @@
 			break;
 		}
 	}
-	op->addr.mem = modrm_ea;
+	op->addr.mem.ea = modrm_ea;
 done:
 	return rc;
 }
@@ -783,13 +773,13 @@
 	op->type = OP_MEM;
 	switch (c->ad_bytes) {
 	case 2:
-		op->addr.mem = insn_fetch(u16, 2, c->eip);
+		op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
 		break;
 	case 4:
-		op->addr.mem = insn_fetch(u32, 4, c->eip);
+		op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
 		break;
 	case 8:
-		op->addr.mem = insn_fetch(u64, 8, c->eip);
+		op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
 		break;
 	}
 done:
@@ -808,7 +798,7 @@
 		else if (c->src.bytes == 4)
 			sv = (s32)c->src.val & (s32)mask;
 
-		c->dst.addr.mem += (sv >> 3);
+		c->dst.addr.mem.ea += (sv >> 3);
 	}
 
 	/* only subword offset */
@@ -821,7 +811,6 @@
 {
 	int rc;
 	struct read_cache *mc = &ctxt->decode.mem_read;
-	u32 err;
 
 	while (size) {
 		int n = min(size, 8u);
@@ -829,10 +818,8 @@
 		if (mc->pos < mc->end)
 			goto read_cached;
 
-		rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
-					ctxt->vcpu);
-		if (rc == X86EMUL_PROPAGATE_FAULT)
-			emulate_pf(ctxt);
+		rc = ops->read_emulated(addr, mc->data + mc->end, n,
+					&ctxt->exception, ctxt->vcpu);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		mc->end += n;
@@ -907,19 +894,15 @@
 	struct desc_ptr dt;
 	u16 index = selector >> 3;
 	int ret;
-	u32 err;
 	ulong addr;
 
 	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
 
-	if (dt.size < index * 8 + 7) {
-		emulate_gp(ctxt, selector & 0xfffc);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (dt.size < index * 8 + 7)
+		return emulate_gp(ctxt, selector & 0xfffc);
 	addr = dt.address + index * 8;
-	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,  &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
+	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
+			    &ctxt->exception);
 
        return ret;
 }
@@ -931,21 +914,17 @@
 {
 	struct desc_ptr dt;
 	u16 index = selector >> 3;
-	u32 err;
 	ulong addr;
 	int ret;
 
 	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
 
-	if (dt.size < index * 8 + 7) {
-		emulate_gp(ctxt, selector & 0xfffc);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (dt.size < index * 8 + 7)
+		return emulate_gp(ctxt, selector & 0xfffc);
 
 	addr = dt.address + index * 8;
-	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
+	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
+			     &ctxt->exception);
 
 	return ret;
 }
@@ -1092,7 +1071,6 @@
 {
 	int rc;
 	struct decode_cache *c = &ctxt->decode;
-	u32 err;
 
 	switch (c->dst.type) {
 	case OP_REG:
@@ -1101,21 +1079,19 @@
 	case OP_MEM:
 		if (c->lock_prefix)
 			rc = ops->cmpxchg_emulated(
-					c->dst.addr.mem,
+					linear(ctxt, c->dst.addr.mem),
 					&c->dst.orig_val,
 					&c->dst.val,
 					c->dst.bytes,
-					&err,
+					&ctxt->exception,
 					ctxt->vcpu);
 		else
 			rc = ops->write_emulated(
-					c->dst.addr.mem,
+					linear(ctxt, c->dst.addr.mem),
 					&c->dst.val,
 					c->dst.bytes,
-					&err,
+					&ctxt->exception,
 					ctxt->vcpu);
-		if (rc == X86EMUL_PROPAGATE_FAULT)
-			emulate_pf(ctxt);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		break;
@@ -1137,8 +1113,8 @@
 	c->dst.bytes = c->op_bytes;
 	c->dst.val = c->src.val;
 	register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
-	c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
-					   c->regs[VCPU_REGS_RSP]);
+	c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+	c->dst.addr.mem.seg = VCPU_SREG_SS;
 }
 
 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1147,10 +1123,11 @@
 {
 	struct decode_cache *c = &ctxt->decode;
 	int rc;
+	struct segmented_address addr;
 
-	rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
-						       c->regs[VCPU_REGS_RSP]),
-			   dest, len);
+	addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+	addr.seg = VCPU_SREG_SS;
+	rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
@@ -1184,10 +1161,8 @@
 			change_mask |= EFLG_IF;
 		break;
 	case X86EMUL_MODE_VM86:
-		if (iopl < 3) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (iopl < 3)
+			return emulate_gp(ctxt, 0);
 		change_mask |= EFLG_IF;
 		break;
 	default: /* real mode */
@@ -1198,9 +1173,6 @@
 	*(unsigned long *)dest =
 		(ctxt->eflags & ~change_mask) | (val & change_mask);
 
-	if (rc == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
-
 	return rc;
 }
 
@@ -1287,7 +1259,6 @@
 	gva_t cs_addr;
 	gva_t eip_addr;
 	u16 cs, eip;
-	u32 err;
 
 	/* TODO: Add limit checks */
 	c->src.val = ctxt->eflags;
@@ -1317,11 +1288,11 @@
 	eip_addr = dt.address + (irq << 2);
 	cs_addr = dt.address + (irq << 2) + 2;
 
-	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
+	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
+	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
@@ -1370,10 +1341,8 @@
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	if (temp_eip & ~0xffff) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (temp_eip & ~0xffff)
+		return emulate_gp(ctxt, 0);
 
 	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
 
@@ -1624,10 +1593,8 @@
 
 	/* syscall is not available in real mode */
 	if (ctxt->mode == X86EMUL_MODE_REAL ||
-	    ctxt->mode == X86EMUL_MODE_VM86) {
-		emulate_ud(ctxt);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	    ctxt->mode == X86EMUL_MODE_VM86)
+		return emulate_ud(ctxt);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
@@ -1678,34 +1645,26 @@
 	u16 cs_sel, ss_sel;
 
 	/* inject #GP if in real mode */
-	if (ctxt->mode == X86EMUL_MODE_REAL) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ctxt->mode == X86EMUL_MODE_REAL)
+		return emulate_gp(ctxt, 0);
 
 	/* XXX sysenter/sysexit have not been tested in 64bit mode.
 	* Therefore, we inject an #UD.
 	*/
-	if (ctxt->mode == X86EMUL_MODE_PROT64) {
-		emulate_ud(ctxt);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ctxt->mode == X86EMUL_MODE_PROT64)
+		return emulate_ud(ctxt);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
 	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
 	switch (ctxt->mode) {
 	case X86EMUL_MODE_PROT32:
-		if ((msr_data & 0xfffc) == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if ((msr_data & 0xfffc) == 0x0)
+			return emulate_gp(ctxt, 0);
 		break;
 	case X86EMUL_MODE_PROT64:
-		if (msr_data == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (msr_data == 0x0)
+			return emulate_gp(ctxt, 0);
 		break;
 	}
 
@@ -1745,10 +1704,8 @@
 
 	/* inject #GP if in real mode or Virtual 8086 mode */
 	if (ctxt->mode == X86EMUL_MODE_REAL ||
-	    ctxt->mode == X86EMUL_MODE_VM86) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	    ctxt->mode == X86EMUL_MODE_VM86)
+		return emulate_gp(ctxt, 0);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
@@ -1763,18 +1720,14 @@
 	switch (usermode) {
 	case X86EMUL_MODE_PROT32:
 		cs_sel = (u16)(msr_data + 16);
-		if ((msr_data & 0xfffc) == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if ((msr_data & 0xfffc) == 0x0)
+			return emulate_gp(ctxt, 0);
 		ss_sel = (u16)(msr_data + 24);
 		break;
 	case X86EMUL_MODE_PROT64:
 		cs_sel = (u16)(msr_data + 32);
-		if (msr_data == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (msr_data == 0x0)
+			return emulate_gp(ctxt, 0);
 		ss_sel = cs_sel + 8;
 		cs.d = 0;
 		cs.l = 1;
@@ -1934,33 +1887,27 @@
 {
 	struct tss_segment_16 tss_seg;
 	int ret;
-	u32 err, new_tss_base = get_desc_base(new_desc);
+	u32 new_tss_base = get_desc_base(new_desc);
 
 	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	save_state_to_tss16(ctxt, ops, &tss_seg);
 
 	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			     &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			     &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	if (old_tss_sel != 0xffff) {
 		tss_seg.prev_task_link = old_tss_sel;
@@ -1968,12 +1915,10 @@
 		ret = ops->write_std(new_tss_base,
 				     &tss_seg.prev_task_link,
 				     sizeof tss_seg.prev_task_link,
-				     ctxt->vcpu, &err);
-		if (ret == X86EMUL_PROPAGATE_FAULT) {
+				     ctxt->vcpu, &ctxt->exception);
+		if (ret != X86EMUL_CONTINUE)
 			/* FIXME: need to provide precise fault address */
-			emulate_pf(ctxt);
 			return ret;
-		}
 	}
 
 	return load_state_from_tss16(ctxt, ops, &tss_seg);
@@ -2013,10 +1958,8 @@
 	struct decode_cache *c = &ctxt->decode;
 	int ret;
 
-	if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
+		return emulate_gp(ctxt, 0);
 	c->eip = tss->eip;
 	ctxt->eflags = tss->eflags | 2;
 	c->regs[VCPU_REGS_RAX] = tss->eax;
@@ -2076,33 +2019,27 @@
 {
 	struct tss_segment_32 tss_seg;
 	int ret;
-	u32 err, new_tss_base = get_desc_base(new_desc);
+	u32 new_tss_base = get_desc_base(new_desc);
 
 	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	save_state_to_tss32(ctxt, ops, &tss_seg);
 
 	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			     &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			     &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	if (old_tss_sel != 0xffff) {
 		tss_seg.prev_task_link = old_tss_sel;
@@ -2110,12 +2047,10 @@
 		ret = ops->write_std(new_tss_base,
 				     &tss_seg.prev_task_link,
 				     sizeof tss_seg.prev_task_link,
-				     ctxt->vcpu, &err);
-		if (ret == X86EMUL_PROPAGATE_FAULT) {
+				     ctxt->vcpu, &ctxt->exception);
+		if (ret != X86EMUL_CONTINUE)
 			/* FIXME: need to provide precise fault address */
-			emulate_pf(ctxt);
 			return ret;
-		}
 	}
 
 	return load_state_from_tss32(ctxt, ops, &tss_seg);
@@ -2146,10 +2081,8 @@
 
 	if (reason != TASK_SWITCH_IRET) {
 		if ((tss_selector & 3) > next_tss_desc.dpl ||
-		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
+			return emulate_gp(ctxt, 0);
 	}
 
 	desc_limit = desc_limit_scaled(&next_tss_desc);
@@ -2231,14 +2164,15 @@
 	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
 }
 
-static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
+static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
 			    int reg, struct operand *op)
 {
 	struct decode_cache *c = &ctxt->decode;
 	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
 
 	register_address_increment(c, &c->regs[reg], df * op->bytes);
-	op->addr.mem = register_address(c,  base, c->regs[reg]);
+	op->addr.mem.ea = register_address(c, c->regs[reg]);
+	op->addr.mem.seg = seg;
 }
 
 static int em_push(struct x86_emulate_ctxt *ctxt)
@@ -2369,10 +2303,8 @@
 	struct decode_cache *c = &ctxt->decode;
 	u64 tsc = 0;
 
-	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
+		return emulate_gp(ctxt, 0);
 	ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
 	c->regs[VCPU_REGS_RAX] = (u32)tsc;
 	c->regs[VCPU_REGS_RDX] = tsc >> 32;
@@ -2647,7 +2579,7 @@
 
 	op->type = OP_IMM;
 	op->bytes = size;
-	op->addr.mem = c->eip;
+	op->addr.mem.ea = c->eip;
 	/* NB. Immediates are sign-extended as necessary. */
 	switch (op->bytes) {
 	case 1:
@@ -2678,7 +2610,7 @@
 }
 
 int
-x86_decode_insn(struct x86_emulate_ctxt *ctxt)
+x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 {
 	struct x86_emulate_ops *ops = ctxt->ops;
 	struct decode_cache *c = &ctxt->decode;
@@ -2689,7 +2621,10 @@
 	struct operand memop = { .type = OP_NONE };
 
 	c->eip = ctxt->eip;
-	c->fetch.start = c->fetch.end = c->eip;
+	c->fetch.start = c->eip;
+	c->fetch.end = c->fetch.start + insn_len;
+	if (insn_len > 0)
+		memcpy(c->fetch.data, insn, insn_len);
 	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
 
 	switch (mode) {
@@ -2803,10 +2738,8 @@
 	c->execute = opcode.u.execute;
 
 	/* Unrecognised? */
-	if (c->d == 0 || (c->d & Undefined)) {
-		DPRINTF("Cannot emulate %02x\n", c->b);
+	if (c->d == 0 || (c->d & Undefined))
 		return -1;
-	}
 
 	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
 		c->op_bytes = 8;
@@ -2831,14 +2764,13 @@
 	if (!c->has_seg_override)
 		set_seg_override(c, VCPU_SREG_DS);
 
-	if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
-		memop.addr.mem += seg_override_base(ctxt, ops, c);
+	memop.addr.mem.seg = seg_override(ctxt, ops, c);
 
 	if (memop.type == OP_MEM && c->ad_bytes != 8)
-		memop.addr.mem = (u32)memop.addr.mem;
+		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
 
 	if (memop.type == OP_MEM && c->rip_relative)
-		memop.addr.mem += c->eip;
+		memop.addr.mem.ea += c->eip;
 
 	/*
 	 * Decode and fetch the source operand: register, memory
@@ -2890,14 +2822,14 @@
 	case SrcSI:
 		c->src.type = OP_MEM;
 		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-		c->src.addr.mem =
-			register_address(c,  seg_override_base(ctxt, ops, c),
-					 c->regs[VCPU_REGS_RSI]);
+		c->src.addr.mem.ea =
+			register_address(c, c->regs[VCPU_REGS_RSI]);
+		c->src.addr.mem.seg = seg_override(ctxt, ops, c),
 		c->src.val = 0;
 		break;
 	case SrcImmFAddr:
 		c->src.type = OP_IMM;
-		c->src.addr.mem = c->eip;
+		c->src.addr.mem.ea = c->eip;
 		c->src.bytes = c->op_bytes + 2;
 		insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
 		break;
@@ -2944,7 +2876,7 @@
 		break;
 	case DstImmUByte:
 		c->dst.type = OP_IMM;
-		c->dst.addr.mem = c->eip;
+		c->dst.addr.mem.ea = c->eip;
 		c->dst.bytes = 1;
 		c->dst.val = insn_fetch(u8, 1, c->eip);
 		break;
@@ -2969,9 +2901,9 @@
 	case DstDI:
 		c->dst.type = OP_MEM;
 		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-		c->dst.addr.mem =
-			register_address(c, es_base(ctxt, ops),
-					 c->regs[VCPU_REGS_RDI]);
+		c->dst.addr.mem.ea =
+			register_address(c, c->regs[VCPU_REGS_RDI]);
+		c->dst.addr.mem.seg = VCPU_SREG_ES;
 		c->dst.val = 0;
 		break;
 	case ImplicitOps:
@@ -3020,24 +2952,24 @@
 	ctxt->decode.mem_read.pos = 0;
 
 	if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	/* LOCK prefix is allowed only with some instructions */
 	if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	/* Privileged instruction can be executed only in CPL=0 */
 	if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
-		emulate_gp(ctxt, 0);
+		rc = emulate_gp(ctxt, 0);
 		goto done;
 	}
 
@@ -3050,7 +2982,7 @@
 	}
 
 	if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
-		rc = read_emulated(ctxt, ops, c->src.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
 					c->src.valptr, c->src.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3058,7 +2990,7 @@
 	}
 
 	if (c->src2.type == OP_MEM) {
-		rc = read_emulated(ctxt, ops, c->src2.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
 					&c->src2.val, c->src2.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3070,7 +3002,7 @@
 
 	if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
 		/* optimisation - avoid slow emulated read if Mov */
-		rc = read_emulated(ctxt, ops, c->dst.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
 				   &c->dst.val, c->dst.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3215,13 +3147,13 @@
 		break;
 	case 0x8c:  /* mov r/m, sreg */
 		if (c->modrm_reg > VCPU_SREG_GS) {
-			emulate_ud(ctxt);
+			rc = emulate_ud(ctxt);
 			goto done;
 		}
 		c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
 		break;
 	case 0x8d: /* lea r16/r32, m */
-		c->dst.val = c->src.addr.mem;
+		c->dst.val = c->src.addr.mem.ea;
 		break;
 	case 0x8e: { /* mov seg, r/m16 */
 		uint16_t sel;
@@ -3230,7 +3162,7 @@
 
 		if (c->modrm_reg == VCPU_SREG_CS ||
 		    c->modrm_reg > VCPU_SREG_GS) {
-			emulate_ud(ctxt);
+			rc = emulate_ud(ctxt);
 			goto done;
 		}
 
@@ -3268,7 +3200,6 @@
 		break;
 	case 0xa6 ... 0xa7:	/* cmps */
 		c->dst.type = OP_NONE; /* Disable writeback. */
-		DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
 		goto cmp;
 	case 0xa8 ... 0xa9:	/* test ax, imm */
 		goto test;
@@ -3363,7 +3294,7 @@
 	do_io_in:
 		c->dst.bytes = min(c->dst.bytes, 4u);
 		if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		}
 		if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
@@ -3377,7 +3308,7 @@
 		c->src.bytes = min(c->src.bytes, 4u);
 		if (!emulator_io_permited(ctxt, ops, c->dst.val,
 					  c->src.bytes)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		}
 		ops->pio_out_emulated(c->src.bytes, c->dst.val,
@@ -3402,14 +3333,14 @@
 		break;
 	case 0xfa: /* cli */
 		if (emulator_bad_iopl(ctxt, ops)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		} else
 			ctxt->eflags &= ~X86_EFLAGS_IF;
 		break;
 	case 0xfb: /* sti */
 		if (emulator_bad_iopl(ctxt, ops)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		} else {
 			ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
@@ -3449,11 +3380,11 @@
 	c->dst.type = saved_dst_type;
 
 	if ((c->d & SrcMask) == SrcSI)
-		string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
+		string_addr_inc(ctxt, seg_override(ctxt, ops, c),
 				VCPU_REGS_RSI, &c->src);
 
 	if ((c->d & DstMask) == DstDI)
-		string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
+		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
 				&c->dst);
 
 	if (c->rep_prefix && (c->d & String)) {
@@ -3482,6 +3413,8 @@
 	ctxt->eip = c->eip;
 
 done:
+	if (rc == X86EMUL_PROPAGATE_FAULT)
+		ctxt->have_exception = true;
 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
 
 twobyte_insn:
@@ -3544,9 +3477,11 @@
 			break;
 		case 5: /* not defined */
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		case 7: /* invlpg*/
-			emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
+			emulate_invlpg(ctxt->vcpu,
+				       linear(ctxt, c->src.addr.mem));
 			/* Disable writeback. */
 			c->dst.type = OP_NONE;
 			break;
@@ -3573,6 +3508,7 @@
 		case 5 ... 7:
 		case 9 ... 15:
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
@@ -3581,6 +3517,7 @@
 		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
 		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
@@ -3588,6 +3525,7 @@
 	case 0x22: /* mov reg, cr */
 		if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		c->dst.type = OP_NONE;
@@ -3596,6 +3534,7 @@
 		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
 		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 
@@ -3604,6 +3543,7 @@
 				 ~0ULL : ~0U), ctxt->vcpu) < 0) {
 			/* #UD condition is already handled by the code above */
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 
@@ -3615,6 +3555,7 @@
 			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
 		if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		rc = X86EMUL_CONTINUE;
@@ -3623,6 +3564,7 @@
 		/* rdmsr */
 		if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		} else {
 			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
@@ -3785,6 +3727,5 @@
 	goto writeback;
 
 cannot_emulate:
-	DPRINTF("Cannot emulate %02x\n", c->b);
 	return -1;
 }
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 975bb45..3377d53 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -73,6 +73,13 @@
 	return vcpu->arch.cr4 & mask;
 }
 
+static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
+{
+	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+		kvm_x86_ops->decache_cr3(vcpu);
+	return vcpu->arch.cr3;
+}
+
 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
 {
 	return kvm_read_cr4_bits(vcpu, ~0UL);
@@ -84,4 +91,19 @@
 		| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
 }
 
+static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hflags |= HF_GUEST_MASK;
+}
+
+static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hflags &= ~HF_GUEST_MASK;
+}
+
+static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.hflags & HF_GUEST_MASK;
+}
+
 #endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 413f897..93cf9d0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -277,7 +277,8 @@
 
 	if (old_ppr != ppr) {
 		apic_set_reg(apic, APIC_PROCPRI, ppr);
-		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+		if (ppr < old_ppr)
+			kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 	}
 }
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fbb04ae..f02b8ed 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,9 +18,11 @@
  *
  */
 
+#include "irq.h"
 #include "mmu.h"
 #include "x86.h"
 #include "kvm_cache_regs.h"
+#include "x86.h"
 
 #include <linux/kvm_host.h>
 #include <linux/types.h>
@@ -194,7 +196,6 @@
 
 static u64 __read_mostly shadow_trap_nonpresent_pte;
 static u64 __read_mostly shadow_notrap_nonpresent_pte;
-static u64 __read_mostly shadow_base_present_pte;
 static u64 __read_mostly shadow_nx_mask;
 static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
@@ -213,12 +214,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
 
-void kvm_mmu_set_base_ptes(u64 base_pte)
-{
-	shadow_base_present_pte = base_pte;
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
-
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask)
 {
@@ -482,46 +477,46 @@
 }
 
 /*
- * Return the pointer to the largepage write count for a given
- * gfn, handling slots that are not large page aligned.
+ * Return the pointer to the large page information for a given gfn,
+ * handling slots that are not large page aligned.
  */
-static int *slot_largepage_idx(gfn_t gfn,
-			       struct kvm_memory_slot *slot,
-			       int level)
+static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
+					      struct kvm_memory_slot *slot,
+					      int level)
 {
 	unsigned long idx;
 
 	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 	      (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-	return &slot->lpage_info[level - 2][idx].write_count;
+	return &slot->lpage_info[level - 2][idx];
 }
 
 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_memory_slot *slot;
-	int *write_count;
+	struct kvm_lpage_info *linfo;
 	int i;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-		write_count   = slot_largepage_idx(gfn, slot, i);
-		*write_count += 1;
+		linfo = lpage_info_slot(gfn, slot, i);
+		linfo->write_count += 1;
 	}
 }
 
 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_memory_slot *slot;
-	int *write_count;
+	struct kvm_lpage_info *linfo;
 	int i;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-		write_count   = slot_largepage_idx(gfn, slot, i);
-		*write_count -= 1;
-		WARN_ON(*write_count < 0);
+		linfo = lpage_info_slot(gfn, slot, i);
+		linfo->write_count -= 1;
+		WARN_ON(linfo->write_count < 0);
 	}
 }
 
@@ -530,12 +525,12 @@
 				int level)
 {
 	struct kvm_memory_slot *slot;
-	int *largepage_idx;
+	struct kvm_lpage_info *linfo;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	if (slot) {
-		largepage_idx = slot_largepage_idx(gfn, slot, level);
-		return *largepage_idx;
+		linfo = lpage_info_slot(gfn, slot, level);
+		return linfo->write_count;
 	}
 
 	return 1;
@@ -559,14 +554,18 @@
 	return ret;
 }
 
-static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 {
 	struct kvm_memory_slot *slot;
-	int host_level, level, max_level;
-
 	slot = gfn_to_memslot(vcpu->kvm, large_gfn);
 	if (slot && slot->dirty_bitmap)
-		return PT_PAGE_TABLE_LEVEL;
+		return true;
+	return false;
+}
+
+static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+{
+	int host_level, level, max_level;
 
 	host_level = host_mapping_level(vcpu->kvm, large_gfn);
 
@@ -590,16 +589,15 @@
 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 {
 	struct kvm_memory_slot *slot;
-	unsigned long idx;
+	struct kvm_lpage_info *linfo;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	if (likely(level == PT_PAGE_TABLE_LEVEL))
 		return &slot->rmap[gfn - slot->base_gfn];
 
-	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
-		(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+	linfo = lpage_info_slot(gfn, slot, level);
 
-	return &slot->lpage_info[level - 2][idx].rmap_pde;
+	return &linfo->rmap_pde;
 }
 
 /*
@@ -887,19 +885,16 @@
 		end = start + (memslot->npages << PAGE_SHIFT);
 		if (hva >= start && hva < end) {
 			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+			gfn_t gfn = memslot->base_gfn + gfn_offset;
 
 			ret = handler(kvm, &memslot->rmap[gfn_offset], data);
 
 			for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
-				unsigned long idx;
-				int sh;
+				struct kvm_lpage_info *linfo;
 
-				sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
-				idx = ((memslot->base_gfn+gfn_offset) >> sh) -
-					(memslot->base_gfn >> sh);
-				ret |= handler(kvm,
-					&memslot->lpage_info[j][idx].rmap_pde,
-					data);
+				linfo = lpage_info_slot(gfn, memslot,
+							PT_DIRECTORY_LEVEL + j);
+				ret |= handler(kvm, &linfo->rmap_pde, data);
 			}
 			trace_kvm_age_page(hva, memslot, ret);
 			retval |= ret;
@@ -950,6 +945,35 @@
 	return young;
 }
 
+static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
+			      unsigned long data)
+{
+	u64 *spte;
+	int young = 0;
+
+	/*
+	 * If there's no access bit in the secondary pte set by the
+	 * hardware it's up to gup-fast/gup to set the access bit in
+	 * the primary pte or in the page structure.
+	 */
+	if (!shadow_accessed_mask)
+		goto out;
+
+	spte = rmap_next(kvm, rmapp, NULL);
+	while (spte) {
+		u64 _spte = *spte;
+		BUG_ON(!(_spte & PT_PRESENT_MASK));
+		young = _spte & PT_ACCESSED_MASK;
+		if (young) {
+			young = 1;
+			break;
+		}
+		spte = rmap_next(kvm, rmapp, spte);
+	}
+out:
+	return young;
+}
+
 #define RMAP_RECYCLE_THRESHOLD 1000
 
 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -970,6 +994,11 @@
 	return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
 }
 
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
+}
+
 #ifdef MMU_DEBUG
 static int is_empty_shadow_page(u64 *spt)
 {
@@ -1161,7 +1190,7 @@
 }
 
 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
-			       struct kvm_mmu_page *sp, bool clear_unsync)
+			       struct kvm_mmu_page *sp)
 {
 	return 1;
 }
@@ -1291,7 +1320,7 @@
 	if (clear_unsync)
 		kvm_unlink_unsync_page(vcpu->kvm, sp);
 
-	if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
+	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
 		return 1;
 	}
@@ -1332,12 +1361,12 @@
 			continue;
 
 		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+		kvm_unlink_unsync_page(vcpu->kvm, s);
 		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
-			(vcpu->arch.mmu.sync_page(vcpu, s, true))) {
+			(vcpu->arch.mmu.sync_page(vcpu, s))) {
 			kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
 			continue;
 		}
-		kvm_unlink_unsync_page(vcpu->kvm, s);
 		flush = true;
 	}
 
@@ -1963,9 +1992,9 @@
 		    unsigned pte_access, int user_fault,
 		    int write_fault, int dirty, int level,
 		    gfn_t gfn, pfn_t pfn, bool speculative,
-		    bool can_unsync, bool reset_host_protection)
+		    bool can_unsync, bool host_writable)
 {
-	u64 spte;
+	u64 spte, entry = *sptep;
 	int ret = 0;
 
 	/*
@@ -1973,7 +2002,7 @@
 	 * whether the guest actually used the pte (in order to detect
 	 * demand paging).
 	 */
-	spte = shadow_base_present_pte;
+	spte = PT_PRESENT_MASK;
 	if (!speculative)
 		spte |= shadow_accessed_mask;
 	if (!dirty)
@@ -1990,8 +2019,10 @@
 		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
 			kvm_is_mmio_pfn(pfn));
 
-	if (reset_host_protection)
+	if (host_writable)
 		spte |= SPTE_HOST_WRITEABLE;
+	else
+		pte_access &= ~ACC_WRITE_MASK;
 
 	spte |= (u64)pfn << PAGE_SHIFT;
 
@@ -2036,6 +2067,14 @@
 
 set_pte:
 	update_spte(sptep, spte);
+	/*
+	 * If we overwrite a writable spte with a read-only one we
+	 * should flush remote TLBs. Otherwise rmap_write_protect
+	 * will find a read-only spte, even though the writable spte
+	 * might be cached on a CPU's TLB.
+	 */
+	if (is_writable_pte(entry) && !is_writable_pte(*sptep))
+		kvm_flush_remote_tlbs(vcpu->kvm);
 done:
 	return ret;
 }
@@ -2045,7 +2084,7 @@
 			 int user_fault, int write_fault, int dirty,
 			 int *ptwrite, int level, gfn_t gfn,
 			 pfn_t pfn, bool speculative,
-			 bool reset_host_protection)
+			 bool host_writable)
 {
 	int was_rmapped = 0;
 	int rmap_count;
@@ -2080,7 +2119,7 @@
 
 	if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
 		      dirty, level, gfn, pfn, speculative, true,
-		      reset_host_protection)) {
+		      host_writable)) {
 		if (write_fault)
 			*ptwrite = 1;
 		kvm_mmu_flush_tlb(vcpu);
@@ -2211,7 +2250,8 @@
 }
 
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
-			int level, gfn_t gfn, pfn_t pfn)
+			int map_writable, int level, gfn_t gfn, pfn_t pfn,
+			bool prefault)
 {
 	struct kvm_shadow_walk_iterator iterator;
 	struct kvm_mmu_page *sp;
@@ -2220,9 +2260,11 @@
 
 	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
 		if (iterator.level == level) {
-			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
+			unsigned pte_access = ACC_ALL;
+
+			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
 				     0, write, 1, &pt_write,
-				     level, gfn, pfn, false, true);
+				     level, gfn, pfn, prefault, map_writable);
 			direct_pte_prefetch(vcpu, iterator.sptep);
 			++vcpu->stat.pf_fixed;
 			break;
@@ -2277,27 +2319,81 @@
 	return 1;
 }
 
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
+static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
+					gfn_t *gfnp, pfn_t *pfnp, int *levelp)
+{
+	pfn_t pfn = *pfnp;
+	gfn_t gfn = *gfnp;
+	int level = *levelp;
+
+	/*
+	 * Check if it's a transparent hugepage. If this would be an
+	 * hugetlbfs page, level wouldn't be set to
+	 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
+	 * here.
+	 */
+	if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
+	    level == PT_PAGE_TABLE_LEVEL &&
+	    PageTransCompound(pfn_to_page(pfn)) &&
+	    !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
+		unsigned long mask;
+		/*
+		 * mmu_notifier_retry was successful and we hold the
+		 * mmu_lock here, so the pmd can't become splitting
+		 * from under us, and in turn
+		 * __split_huge_page_refcount() can't run from under
+		 * us and we can safely transfer the refcount from
+		 * PG_tail to PG_head as we switch the pfn to tail to
+		 * head.
+		 */
+		*levelp = level = PT_DIRECTORY_LEVEL;
+		mask = KVM_PAGES_PER_HPAGE(level) - 1;
+		VM_BUG_ON((gfn & mask) != (pfn & mask));
+		if (pfn & mask) {
+			gfn &= ~mask;
+			*gfnp = gfn;
+			kvm_release_pfn_clean(pfn);
+			pfn &= ~mask;
+			if (!get_page_unless_zero(pfn_to_page(pfn)))
+				BUG();
+			*pfnp = pfn;
+		}
+	}
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+			 gva_t gva, pfn_t *pfn, bool write, bool *writable);
+
+static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
+			 bool prefault)
 {
 	int r;
 	int level;
+	int force_pt_level;
 	pfn_t pfn;
 	unsigned long mmu_seq;
+	bool map_writable;
 
-	level = mapping_level(vcpu, gfn);
+	force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+	if (likely(!force_pt_level)) {
+		level = mapping_level(vcpu, gfn);
+		/*
+		 * This path builds a PAE pagetable - so we can map
+		 * 2mb pages at maximum. Therefore check if the level
+		 * is larger than that.
+		 */
+		if (level > PT_DIRECTORY_LEVEL)
+			level = PT_DIRECTORY_LEVEL;
 
-	/*
-	 * This path builds a PAE pagetable - so we can map 2mb pages at
-	 * maximum. Therefore check if the level is larger than that.
-	 */
-	if (level > PT_DIRECTORY_LEVEL)
-		level = PT_DIRECTORY_LEVEL;
-
-	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+	} else
+		level = PT_PAGE_TABLE_LEVEL;
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
+		return 0;
 
 	/* mmio */
 	if (is_error_pfn(pfn))
@@ -2307,7 +2403,10 @@
 	if (mmu_notifier_retry(vcpu, mmu_seq))
 		goto out_unlock;
 	kvm_mmu_free_some_pages(vcpu);
-	r = __direct_map(vcpu, v, write, level, gfn, pfn);
+	if (likely(!force_pt_level))
+		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
+	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
+			 prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 
@@ -2530,6 +2629,7 @@
 		hpa_t root = vcpu->arch.mmu.root_hpa;
 		sp = page_header(root);
 		mmu_sync_children(vcpu, sp);
+		trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
 		return;
 	}
 	for (i = 0; i < 4; ++i) {
@@ -2552,23 +2652,24 @@
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
-				  u32 access, u32 *error)
+				  u32 access, struct x86_exception *exception)
 {
-	if (error)
-		*error = 0;
+	if (exception)
+		exception->error_code = 0;
 	return vaddr;
 }
 
 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
-					 u32 access, u32 *error)
+					 u32 access,
+					 struct x86_exception *exception)
 {
-	if (error)
-		*error = 0;
+	if (exception)
+		exception->error_code = 0;
 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-				u32 error_code)
+				u32 error_code, bool prefault)
 {
 	gfn_t gfn;
 	int r;
@@ -2584,17 +2685,68 @@
 	gfn = gva >> PAGE_SHIFT;
 
 	return nonpaging_map(vcpu, gva & PAGE_MASK,
-			     error_code & PFERR_WRITE_MASK, gfn);
+			     error_code & PFERR_WRITE_MASK, gfn, prefault);
 }
 
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
-				u32 error_code)
+static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
+{
+	struct kvm_arch_async_pf arch;
+
+	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+	arch.gfn = gfn;
+	arch.direct_map = vcpu->arch.mmu.direct_map;
+	arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
+
+	return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
+}
+
+static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
+		     kvm_event_needs_reinjection(vcpu)))
+		return false;
+
+	return kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+			 gva_t gva, pfn_t *pfn, bool write, bool *writable)
+{
+	bool async;
+
+	*pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
+
+	if (!async)
+		return false; /* *pfn has correct page already */
+
+	put_page(pfn_to_page(*pfn));
+
+	if (!prefault && can_do_async_pf(vcpu)) {
+		trace_kvm_try_async_get_page(gva, gfn);
+		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+			trace_kvm_async_pf_doublefault(gva, gfn);
+			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+			return true;
+		} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
+			return true;
+	}
+
+	*pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
+
+	return false;
+}
+
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
+			  bool prefault)
 {
 	pfn_t pfn;
 	int r;
 	int level;
+	int force_pt_level;
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	unsigned long mmu_seq;
+	int write = error_code & PFERR_WRITE_MASK;
+	bool map_writable;
 
 	ASSERT(vcpu);
 	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -2603,21 +2755,30 @@
 	if (r)
 		return r;
 
-	level = mapping_level(vcpu, gfn);
-
-	gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+	force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
+	if (likely(!force_pt_level)) {
+		level = mapping_level(vcpu, gfn);
+		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+	} else
+		level = PT_PAGE_TABLE_LEVEL;
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+		return 0;
+
+	/* mmio */
 	if (is_error_pfn(pfn))
 		return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
 	spin_lock(&vcpu->kvm->mmu_lock);
 	if (mmu_notifier_retry(vcpu, mmu_seq))
 		goto out_unlock;
 	kvm_mmu_free_some_pages(vcpu);
-	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
-			 level, gfn, pfn);
+	if (likely(!force_pt_level))
+		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
+	r = __direct_map(vcpu, gpa, write, map_writable,
+			 level, gfn, pfn, prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 	return r;
@@ -2659,18 +2820,19 @@
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
-	pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
+	pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
 	mmu_free_roots(vcpu);
 }
 
 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.cr3;
+	return kvm_read_cr3(vcpu);
 }
 
-static void inject_page_fault(struct kvm_vcpu *vcpu)
+static void inject_page_fault(struct kvm_vcpu *vcpu,
+			      struct x86_exception *fault)
 {
-	vcpu->arch.mmu.inject_page_fault(vcpu);
+	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
@@ -2816,6 +2978,7 @@
 {
 	struct kvm_mmu *context = vcpu->arch.walk_mmu;
 
+	context->base_role.word = 0;
 	context->new_cr3 = nonpaging_new_cr3;
 	context->page_fault = tdp_page_fault;
 	context->free = nonpaging_free;
@@ -3008,9 +3171,6 @@
 		return;
         }
 
-	if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
-		return;
-
 	++vcpu->kvm->stat.mmu_pte_updated;
 	if (!sp->role.cr4_pae)
 		paging32_update_pte(vcpu, sp, spte, new);
@@ -3264,12 +3424,13 @@
 	}
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+		       void *insn, int insn_len)
 {
 	int r;
 	enum emulation_result er;
 
-	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
+	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
 	if (r < 0)
 		goto out;
 
@@ -3282,7 +3443,7 @@
 	if (r)
 		goto out;
 
-	er = emulate_instruction(vcpu, cr2, error_code, 0);
+	er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
 
 	switch (er) {
 	case EMULATE_DONE:
@@ -3377,11 +3538,14 @@
 		if (!test_bit(slot, sp->slot_bitmap))
 			continue;
 
+		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
+			continue;
+
 		pt = sp->spt;
 		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
 			/* avoid RMW */
 			if (is_writable_pte(pt[i]))
-				pt[i] &= ~PT_WRITABLE_MASK;
+				update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
 	}
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -3463,13 +3627,6 @@
 		kmem_cache_destroy(mmu_page_header_cache);
 }
 
-void kvm_mmu_module_exit(void)
-{
-	mmu_destroy_caches();
-	percpu_counter_destroy(&kvm_total_used_mmu_pages);
-	unregister_shrinker(&mmu_shrinker);
-}
-
 int kvm_mmu_module_init(void)
 {
 	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -3566,7 +3723,7 @@
 
 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-	(void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
+	(void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
 	return 1;
 }
 
@@ -3662,12 +3819,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
 
-#ifdef CONFIG_KVM_MMU_AUDIT
-#include "mmu_audit.c"
-#else
-static void mmu_audit_disable(void) { }
-#endif
-
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
 	ASSERT(vcpu);
@@ -3675,5 +3826,18 @@
 	destroy_kvm_mmu(vcpu);
 	free_mmu_pages(vcpu);
 	mmu_free_memory_caches(vcpu);
+}
+
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
+#else
+static void mmu_audit_disable(void) { }
+#endif
+
+void kvm_mmu_module_exit(void)
+{
+	mmu_destroy_caches();
+	percpu_counter_destroy(&kvm_total_used_mmu_pages);
+	unregister_shrinker(&mmu_shrinker);
 	mmu_audit_disable();
 }
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index ba2bcdd..5f6223b 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -19,11 +19,9 @@
 
 #include <linux/ratelimit.h>
 
-static int audit_point;
-
-#define audit_printk(fmt, args...)		\
+#define audit_printk(kvm, fmt, args...)		\
 	printk(KERN_ERR "audit: (%s) error: "	\
-		fmt, audit_point_name[audit_point], ##args)
+		fmt, audit_point_name[kvm->arch.audit_point], ##args)
 
 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
 
@@ -97,18 +95,21 @@
 
 	if (sp->unsync) {
 		if (level != PT_PAGE_TABLE_LEVEL) {
-			audit_printk("unsync sp: %p level = %d\n", sp, level);
+			audit_printk(vcpu->kvm, "unsync sp: %p "
+				     "level = %d\n", sp, level);
 			return;
 		}
 
 		if (*sptep == shadow_notrap_nonpresent_pte) {
-			audit_printk("notrap spte in unsync sp: %p\n", sp);
+			audit_printk(vcpu->kvm, "notrap spte in unsync "
+				     "sp: %p\n", sp);
 			return;
 		}
 	}
 
 	if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
-		audit_printk("notrap spte in direct sp: %p\n", sp);
+		audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n",
+			     sp);
 		return;
 	}
 
@@ -125,8 +126,9 @@
 
 	hpa =  pfn << PAGE_SHIFT;
 	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
-		audit_printk("levels %d pfn %llx hpa %llx ent %llxn",
-				   vcpu->arch.mmu.root_level, pfn, hpa, *sptep);
+		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
+			     "ent %llxn", vcpu->arch.mmu.root_level, pfn,
+			     hpa, *sptep);
 }
 
 static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
@@ -142,8 +144,8 @@
 	if (!gfn_to_memslot(kvm, gfn)) {
 		if (!printk_ratelimit())
 			return;
-		audit_printk("no memslot for gfn %llx\n", gfn);
-		audit_printk("index %ld of sp (gfn=%llx)\n",
+		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
+		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
 		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
 		dump_stack();
 		return;
@@ -153,7 +155,8 @@
 	if (!*rmapp) {
 		if (!printk_ratelimit())
 			return;
-		audit_printk("no rmap for writable spte %llx\n", *sptep);
+		audit_printk(kvm, "no rmap for writable spte %llx\n",
+			     *sptep);
 		dump_stack();
 	}
 }
@@ -168,8 +171,9 @@
 {
 	struct kvm_mmu_page *sp = page_header(__pa(sptep));
 
-	if (audit_point == AUDIT_POST_SYNC && sp->unsync)
-		audit_printk("meet unsync sp(%p) after sync root.\n", sp);
+	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
+		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
+			     "root.\n", sp);
 }
 
 static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -202,8 +206,9 @@
 	spte = rmap_next(kvm, rmapp, NULL);
 	while (spte) {
 		if (is_writable_pte(*spte))
-			audit_printk("shadow page has writable mappings: gfn "
-				     "%llx role %x\n", sp->gfn, sp->role.word);
+			audit_printk(kvm, "shadow page has writable "
+				     "mappings: gfn %llx role %x\n",
+				     sp->gfn, sp->role.word);
 		spte = rmap_next(kvm, rmapp, spte);
 	}
 }
@@ -238,7 +243,7 @@
 	if (!__ratelimit(&ratelimit_state))
 		return;
 
-	audit_point = point;
+	vcpu->kvm->arch.audit_point = point;
 	audit_all_active_sps(vcpu->kvm);
 	audit_vcpu_spte(vcpu);
 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index cd7a833..6bccc24 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -72,7 +72,7 @@
 	unsigned pt_access;
 	unsigned pte_access;
 	gfn_t gfn;
-	u32 error_code;
+	struct x86_exception fault;
 };
 
 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
@@ -266,21 +266,23 @@
 	return 1;
 
 error:
-	walker->error_code = 0;
+	walker->fault.vector = PF_VECTOR;
+	walker->fault.error_code_valid = true;
+	walker->fault.error_code = 0;
 	if (present)
-		walker->error_code |= PFERR_PRESENT_MASK;
+		walker->fault.error_code |= PFERR_PRESENT_MASK;
 
-	walker->error_code |= write_fault | user_fault;
+	walker->fault.error_code |= write_fault | user_fault;
 
 	if (fetch_fault && mmu->nx)
-		walker->error_code |= PFERR_FETCH_MASK;
+		walker->fault.error_code |= PFERR_FETCH_MASK;
 	if (rsvd_fault)
-		walker->error_code |= PFERR_RSVD_MASK;
+		walker->fault.error_code |= PFERR_RSVD_MASK;
 
-	vcpu->arch.fault.address    = addr;
-	vcpu->arch.fault.error_code = walker->error_code;
+	walker->fault.address = addr;
+	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 
-	trace_kvm_mmu_walker_error(walker->error_code);
+	trace_kvm_mmu_walker_error(walker->fault.error_code);
 	return 0;
 }
 
@@ -299,25 +301,42 @@
 					addr, access);
 }
 
+static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
+				    struct kvm_mmu_page *sp, u64 *spte,
+				    pt_element_t gpte)
+{
+	u64 nonpresent = shadow_trap_nonpresent_pte;
+
+	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+		goto no_present;
+
+	if (!is_present_gpte(gpte)) {
+		if (!sp->unsync)
+			nonpresent = shadow_notrap_nonpresent_pte;
+		goto no_present;
+	}
+
+	if (!(gpte & PT_ACCESSED_MASK))
+		goto no_present;
+
+	return false;
+
+no_present:
+	drop_spte(vcpu->kvm, spte, nonpresent);
+	return true;
+}
+
 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			      u64 *spte, const void *pte)
 {
 	pt_element_t gpte;
 	unsigned pte_access;
 	pfn_t pfn;
-	u64 new_spte;
 
 	gpte = *(const pt_element_t *)pte;
-	if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
-		if (!is_present_gpte(gpte)) {
-			if (sp->unsync)
-				new_spte = shadow_trap_nonpresent_pte;
-			else
-				new_spte = shadow_notrap_nonpresent_pte;
-			__set_spte(spte, new_spte);
-		}
+	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 		return;
-	}
+
 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 	if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
@@ -329,7 +348,7 @@
 		return;
 	kvm_get_pfn(pfn);
 	/*
-	 * we call mmu_set_spte() with reset_host_protection = true beacuse that
+	 * we call mmu_set_spte() with host_writable = true beacuse that
 	 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
 	 */
 	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
@@ -364,7 +383,6 @@
 				u64 *sptep)
 {
 	struct kvm_mmu_page *sp;
-	struct kvm_mmu *mmu = &vcpu->arch.mmu;
 	pt_element_t *gptep = gw->prefetch_ptes;
 	u64 *spte;
 	int i;
@@ -395,14 +413,7 @@
 
 		gpte = gptep[i];
 
-		if (!is_present_gpte(gpte) ||
-		      is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) {
-			if (!sp->unsync)
-				__set_spte(spte, shadow_notrap_nonpresent_pte);
-			continue;
-		}
-
-		if (!(gpte & PT_ACCESSED_MASK))
+		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 			continue;
 
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
@@ -427,7 +438,8 @@
 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 			 struct guest_walker *gw,
 			 int user_fault, int write_fault, int hlevel,
-			 int *ptwrite, pfn_t pfn)
+			 int *ptwrite, pfn_t pfn, bool map_writable,
+			 bool prefault)
 {
 	unsigned access = gw->pt_access;
 	struct kvm_mmu_page *sp = NULL;
@@ -501,7 +513,7 @@
 
 	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
 		     user_fault, write_fault, dirty, ptwrite, it.level,
-		     gw->gfn, pfn, false, true);
+		     gw->gfn, pfn, prefault, map_writable);
 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 
 	return it.sptep;
@@ -527,8 +539,8 @@
  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  *           a negative value on error.
  */
-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
-			       u32 error_code)
+static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
+			     bool prefault)
 {
 	int write_fault = error_code & PFERR_WRITE_MASK;
 	int user_fault = error_code & PFERR_USER_MASK;
@@ -538,7 +550,9 @@
 	int r;
 	pfn_t pfn;
 	int level = PT_PAGE_TABLE_LEVEL;
+	int force_pt_level;
 	unsigned long mmu_seq;
+	bool map_writable;
 
 	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
@@ -556,19 +570,29 @@
 	 */
 	if (!r) {
 		pgprintk("%s: guest page fault\n", __func__);
-		inject_page_fault(vcpu);
-		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
+		if (!prefault) {
+			inject_page_fault(vcpu, &walker.fault);
+			/* reset fork detector */
+			vcpu->arch.last_pt_write_count = 0;
+		}
 		return 0;
 	}
 
-	if (walker.level >= PT_DIRECTORY_LEVEL) {
+	if (walker.level >= PT_DIRECTORY_LEVEL)
+		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
+	else
+		force_pt_level = 1;
+	if (!force_pt_level) {
 		level = min(walker.level, mapping_level(vcpu, walker.gfn));
 		walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 	}
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
+
+	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
+			 &map_writable))
+		return 0;
 
 	/* mmio */
 	if (is_error_pfn(pfn))
@@ -580,8 +604,10 @@
 
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 	kvm_mmu_free_some_pages(vcpu);
+	if (!force_pt_level)
+		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
 	sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
-			     level, &write_pt, pfn);
+			     level, &write_pt, pfn, map_writable, prefault);
 	(void)sptep;
 	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
 		 sptep, *sptep, write_pt);
@@ -661,7 +687,7 @@
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
-			       u32 *error)
+			       struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = UNMAPPED_GVA;
@@ -672,14 +698,15 @@
 	if (r) {
 		gpa = gfn_to_gpa(walker.gfn);
 		gpa |= vaddr & ~PAGE_MASK;
-	} else if (error)
-		*error = walker.error_code;
+	} else if (exception)
+		*exception = walker.fault;
 
 	return gpa;
 }
 
 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
-				      u32 access, u32 *error)
+				      u32 access,
+				      struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = UNMAPPED_GVA;
@@ -690,8 +717,8 @@
 	if (r) {
 		gpa = gfn_to_gpa(walker.gfn);
 		gpa |= vaddr & ~PAGE_MASK;
-	} else if (error)
-		*error = walker.error_code;
+	} else if (exception)
+		*exception = walker.fault;
 
 	return gpa;
 }
@@ -730,12 +757,19 @@
  * Using the cached information from sp->gfns is safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
  *   can't change unless all sptes pointing to it are nuked first.
+ *
+ * Note:
+ *   We should flush all tlbs if spte is dropped even though guest is
+ *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
+ *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
+ *   used by guest then tlbs are not flushed, so guest is allowed to access the
+ *   freed pages.
+ *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  */
-static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			    bool clear_unsync)
+static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
 	int i, offset, nr_present;
-	bool reset_host_protection;
+	bool host_writable;
 	gpa_t first_pte_gpa;
 
 	offset = nr_present = 0;
@@ -764,31 +798,27 @@
 			return -EINVAL;
 
 		gfn = gpte_to_gfn(gpte);
-		if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)
-		      || gfn != sp->gfns[i] || !is_present_gpte(gpte)
-		      || !(gpte & PT_ACCESSED_MASK)) {
-			u64 nonpresent;
 
-			if (is_present_gpte(gpte) || !clear_unsync)
-				nonpresent = shadow_trap_nonpresent_pte;
-			else
-				nonpresent = shadow_notrap_nonpresent_pte;
-			drop_spte(vcpu->kvm, &sp->spt[i], nonpresent);
+		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
+			vcpu->kvm->tlbs_dirty++;
+			continue;
+		}
+
+		if (gfn != sp->gfns[i]) {
+			drop_spte(vcpu->kvm, &sp->spt[i],
+				      shadow_trap_nonpresent_pte);
+			vcpu->kvm->tlbs_dirty++;
 			continue;
 		}
 
 		nr_present++;
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
-		if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
-			pte_access &= ~ACC_WRITE_MASK;
-			reset_host_protection = 0;
-		} else {
-			reset_host_protection = 1;
-		}
+		host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
+
 		set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
 			 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
 			 spte_to_pfn(sp->spt[i]), true, false,
-			 reset_host_protection);
+			 host_writable);
 	}
 
 	return !nr_present;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b81a9b7..25bd1bc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -31,6 +31,7 @@
 
 #include <asm/tlbflush.h>
 #include <asm/desc.h>
+#include <asm/kvm_para.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -50,6 +51,10 @@
 #define SVM_FEATURE_LBRV           (1 <<  1)
 #define SVM_FEATURE_SVML           (1 <<  2)
 #define SVM_FEATURE_NRIP           (1 <<  3)
+#define SVM_FEATURE_TSC_RATE       (1 <<  4)
+#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
+#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
+#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
 
 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
@@ -97,10 +102,8 @@
 	unsigned long vmexit_rax;
 
 	/* cache for intercepts of the guest */
-	u16 intercept_cr_read;
-	u16 intercept_cr_write;
-	u16 intercept_dr_read;
-	u16 intercept_dr_write;
+	u32 intercept_cr;
+	u32 intercept_dr;
 	u32 intercept_exceptions;
 	u64 intercept;
 
@@ -123,7 +126,12 @@
 	u64 next_rip;
 
 	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
-	u64 host_gs_base;
+	struct {
+		u16 fs;
+		u16 gs;
+		u16 ldt;
+		u64 gs_base;
+	} host;
 
 	u32 *msrpm;
 
@@ -133,6 +141,7 @@
 
 	unsigned int3_injected;
 	unsigned long int3_rip;
+	u32 apf_reason;
 };
 
 #define MSR_INVALID			0xffffffffU
@@ -180,14 +189,151 @@
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
 				      bool has_error_code, u32 error_code);
 
+enum {
+	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
+			    pause filter count */
+	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
+	VMCB_ASID,	 /* ASID */
+	VMCB_INTR,	 /* int_ctl, int_vector */
+	VMCB_NPT,        /* npt_en, nCR3, gPAT */
+	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
+	VMCB_DR,         /* DR6, DR7 */
+	VMCB_DT,         /* GDT, IDT */
+	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
+	VMCB_CR2,        /* CR2 only */
+	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
+	VMCB_DIRTY_MAX,
+};
+
+/* TPR and CR2 are always written before VMRUN */
+#define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
+
+static inline void mark_all_dirty(struct vmcb *vmcb)
+{
+	vmcb->control.clean = 0;
+}
+
+static inline void mark_all_clean(struct vmcb *vmcb)
+{
+	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+			       & ~VMCB_ALWAYS_DIRTY_MASK;
+}
+
+static inline void mark_dirty(struct vmcb *vmcb, int bit)
+{
+	vmcb->control.clean &= ~(1 << bit);
+}
+
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
 	return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
-static inline bool is_nested(struct vcpu_svm *svm)
+static void recalc_intercepts(struct vcpu_svm *svm)
 {
-	return svm->nested.vmcb;
+	struct vmcb_control_area *c, *h;
+	struct nested_state *g;
+
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+
+	if (!is_guest_mode(&svm->vcpu))
+		return;
+
+	c = &svm->vmcb->control;
+	h = &svm->nested.hsave->control;
+	g = &svm->nested;
+
+	c->intercept_cr = h->intercept_cr | g->intercept_cr;
+	c->intercept_dr = h->intercept_dr | g->intercept_dr;
+	c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+	c->intercept = h->intercept | g->intercept;
+}
+
+static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
+{
+	if (is_guest_mode(&svm->vcpu))
+		return svm->nested.hsave;
+	else
+		return svm->vmcb;
+}
+
+static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_cr |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_cr &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	return vmcb->control.intercept_cr & (1U << bit);
+}
+
+static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_dr |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_dr &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_exceptions |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_exceptions &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void set_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept |= (1ULL << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept &= ~(1ULL << bit);
+
+	recalc_intercepts(svm);
 }
 
 static inline void enable_gif(struct vcpu_svm *svm)
@@ -264,11 +410,6 @@
 
 #define MAX_INST_SIZE 15
 
-static inline u32 svm_has(u32 feat)
-{
-	return svm_features & feat;
-}
-
 static inline void clgi(void)
 {
 	asm volatile (__ex(SVM_CLGI));
@@ -284,16 +425,6 @@
 	asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
 }
 
-static inline void force_new_asid(struct kvm_vcpu *vcpu)
-{
-	to_svm(vcpu)->asid_generation--;
-}
-
-static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
-{
-	force_new_asid(vcpu);
-}
-
 static int get_npt_level(void)
 {
 #ifdef CONFIG_X86_64
@@ -310,6 +441,7 @@
 		efer &= ~EFER_LME;
 
 	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
+	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static int is_external_interrupt(u32 info)
@@ -347,7 +479,7 @@
 		svm->next_rip = svm->vmcb->control.next_rip;
 
 	if (!svm->next_rip) {
-		if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
+		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
 				EMULATE_DONE)
 			printk(KERN_DEBUG "%s: NOP\n", __func__);
 		return;
@@ -374,7 +506,7 @@
 	    nested_svm_check_exception(svm, nr, has_error_code, error_code))
 		return;
 
-	if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
+	if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
 		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
 
 		/*
@@ -670,7 +802,7 @@
 
 	svm_features = cpuid_edx(SVM_CPUID_FUNC);
 
-	if (!svm_has(SVM_FEATURE_NPT))
+	if (!boot_cpu_has(X86_FEATURE_NPT))
 		npt_enabled = false;
 
 	if (npt_enabled && !npt) {
@@ -725,13 +857,15 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 g_tsc_offset = 0;
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		g_tsc_offset = svm->vmcb->control.tsc_offset -
 			       svm->nested.hsave->control.tsc_offset;
 		svm->nested.hsave->control.tsc_offset = offset;
 	}
 
 	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -739,8 +873,9 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.tsc_offset += adjustment;
-	if (is_nested(svm))
+	if (is_guest_mode(vcpu))
 		svm->nested.hsave->control.tsc_offset += adjustment;
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void init_vmcb(struct vcpu_svm *svm)
@@ -749,62 +884,62 @@
 	struct vmcb_save_area *save = &svm->vmcb->save;
 
 	svm->vcpu.fpu_active = 1;
+	svm->vcpu.arch.hflags = 0;
 
-	control->intercept_cr_read =	INTERCEPT_CR0_MASK |
-					INTERCEPT_CR3_MASK |
-					INTERCEPT_CR4_MASK;
+	set_cr_intercept(svm, INTERCEPT_CR0_READ);
+	set_cr_intercept(svm, INTERCEPT_CR3_READ);
+	set_cr_intercept(svm, INTERCEPT_CR4_READ);
+	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 
-	control->intercept_cr_write =	INTERCEPT_CR0_MASK |
-					INTERCEPT_CR3_MASK |
-					INTERCEPT_CR4_MASK |
-					INTERCEPT_CR8_MASK;
+	set_dr_intercept(svm, INTERCEPT_DR0_READ);
+	set_dr_intercept(svm, INTERCEPT_DR1_READ);
+	set_dr_intercept(svm, INTERCEPT_DR2_READ);
+	set_dr_intercept(svm, INTERCEPT_DR3_READ);
+	set_dr_intercept(svm, INTERCEPT_DR4_READ);
+	set_dr_intercept(svm, INTERCEPT_DR5_READ);
+	set_dr_intercept(svm, INTERCEPT_DR6_READ);
+	set_dr_intercept(svm, INTERCEPT_DR7_READ);
 
-	control->intercept_dr_read =	INTERCEPT_DR0_MASK |
-					INTERCEPT_DR1_MASK |
-					INTERCEPT_DR2_MASK |
-					INTERCEPT_DR3_MASK |
-					INTERCEPT_DR4_MASK |
-					INTERCEPT_DR5_MASK |
-					INTERCEPT_DR6_MASK |
-					INTERCEPT_DR7_MASK;
+	set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
 
-	control->intercept_dr_write =	INTERCEPT_DR0_MASK |
-					INTERCEPT_DR1_MASK |
-					INTERCEPT_DR2_MASK |
-					INTERCEPT_DR3_MASK |
-					INTERCEPT_DR4_MASK |
-					INTERCEPT_DR5_MASK |
-					INTERCEPT_DR6_MASK |
-					INTERCEPT_DR7_MASK;
+	set_exception_intercept(svm, PF_VECTOR);
+	set_exception_intercept(svm, UD_VECTOR);
+	set_exception_intercept(svm, MC_VECTOR);
 
-	control->intercept_exceptions = (1 << PF_VECTOR) |
-					(1 << UD_VECTOR) |
-					(1 << MC_VECTOR);
-
-
-	control->intercept =	(1ULL << INTERCEPT_INTR) |
-				(1ULL << INTERCEPT_NMI) |
-				(1ULL << INTERCEPT_SMI) |
-				(1ULL << INTERCEPT_SELECTIVE_CR0) |
-				(1ULL << INTERCEPT_CPUID) |
-				(1ULL << INTERCEPT_INVD) |
-				(1ULL << INTERCEPT_HLT) |
-				(1ULL << INTERCEPT_INVLPG) |
-				(1ULL << INTERCEPT_INVLPGA) |
-				(1ULL << INTERCEPT_IOIO_PROT) |
-				(1ULL << INTERCEPT_MSR_PROT) |
-				(1ULL << INTERCEPT_TASK_SWITCH) |
-				(1ULL << INTERCEPT_SHUTDOWN) |
-				(1ULL << INTERCEPT_VMRUN) |
-				(1ULL << INTERCEPT_VMMCALL) |
-				(1ULL << INTERCEPT_VMLOAD) |
-				(1ULL << INTERCEPT_VMSAVE) |
-				(1ULL << INTERCEPT_STGI) |
-				(1ULL << INTERCEPT_CLGI) |
-				(1ULL << INTERCEPT_SKINIT) |
-				(1ULL << INTERCEPT_WBINVD) |
-				(1ULL << INTERCEPT_MONITOR) |
-				(1ULL << INTERCEPT_MWAIT);
+	set_intercept(svm, INTERCEPT_INTR);
+	set_intercept(svm, INTERCEPT_NMI);
+	set_intercept(svm, INTERCEPT_SMI);
+	set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+	set_intercept(svm, INTERCEPT_CPUID);
+	set_intercept(svm, INTERCEPT_INVD);
+	set_intercept(svm, INTERCEPT_HLT);
+	set_intercept(svm, INTERCEPT_INVLPG);
+	set_intercept(svm, INTERCEPT_INVLPGA);
+	set_intercept(svm, INTERCEPT_IOIO_PROT);
+	set_intercept(svm, INTERCEPT_MSR_PROT);
+	set_intercept(svm, INTERCEPT_TASK_SWITCH);
+	set_intercept(svm, INTERCEPT_SHUTDOWN);
+	set_intercept(svm, INTERCEPT_VMRUN);
+	set_intercept(svm, INTERCEPT_VMMCALL);
+	set_intercept(svm, INTERCEPT_VMLOAD);
+	set_intercept(svm, INTERCEPT_VMSAVE);
+	set_intercept(svm, INTERCEPT_STGI);
+	set_intercept(svm, INTERCEPT_CLGI);
+	set_intercept(svm, INTERCEPT_SKINIT);
+	set_intercept(svm, INTERCEPT_WBINVD);
+	set_intercept(svm, INTERCEPT_MONITOR);
+	set_intercept(svm, INTERCEPT_MWAIT);
+	set_intercept(svm, INTERCEPT_XSETBV);
 
 	control->iopm_base_pa = iopm_base;
 	control->msrpm_base_pa = __pa(svm->msrpm);
@@ -855,25 +990,27 @@
 	if (npt_enabled) {
 		/* Setup VMCB for Nested Paging */
 		control->nested_ctl = 1;
-		control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
-					(1ULL << INTERCEPT_INVLPG));
-		control->intercept_exceptions &= ~(1 << PF_VECTOR);
-		control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
-		control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
+		clr_intercept(svm, INTERCEPT_TASK_SWITCH);
+		clr_intercept(svm, INTERCEPT_INVLPG);
+		clr_exception_intercept(svm, PF_VECTOR);
+		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
 		save->g_pat = 0x0007040600070406ULL;
 		save->cr3 = 0;
 		save->cr4 = 0;
 	}
-	force_new_asid(&svm->vcpu);
+	svm->asid_generation = 0;
 
 	svm->nested.vmcb = 0;
 	svm->vcpu.arch.hflags = 0;
 
-	if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+	if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
 		control->pause_filter_count = 3000;
-		control->intercept |= (1ULL << INTERCEPT_PAUSE);
+		set_intercept(svm, INTERCEPT_PAUSE);
 	}
 
+	mark_all_dirty(svm->vmcb);
+
 	enable_gif(svm);
 }
 
@@ -990,8 +1127,16 @@
 
 	if (unlikely(cpu != vcpu->cpu)) {
 		svm->asid_generation = 0;
+		mark_all_dirty(svm->vmcb);
 	}
 
+#ifdef CONFIG_X86_64
+	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
+#endif
+	savesegment(fs, svm->host.fs);
+	savesegment(gs, svm->host.gs);
+	svm->host.ldt = kvm_read_ldt();
+
 	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
 		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
@@ -1002,6 +1147,14 @@
 	int i;
 
 	++vcpu->stat.host_state_reload;
+	kvm_load_ldt(svm->host.ldt);
+#ifdef CONFIG_X86_64
+	loadsegment(fs, svm->host.fs);
+	load_gs_index(svm->host.gs);
+	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+#else
+	loadsegment(gs, svm->host.gs);
+#endif
 	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
 		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
@@ -1021,7 +1174,7 @@
 	switch (reg) {
 	case VCPU_EXREG_PDPTR:
 		BUG_ON(!npt_enabled);
-		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
 		break;
 	default:
 		BUG();
@@ -1030,12 +1183,12 @@
 
 static void svm_set_vintr(struct vcpu_svm *svm)
 {
-	svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
+	set_intercept(svm, INTERCEPT_VINTR);
 }
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
+	clr_intercept(svm, INTERCEPT_VINTR);
 }
 
 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
@@ -1150,6 +1303,7 @@
 
 	svm->vmcb->save.idtr.limit = dt->size;
 	svm->vmcb->save.idtr.base = dt->address ;
+	mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
@@ -1166,19 +1320,23 @@
 
 	svm->vmcb->save.gdtr.limit = dt->size;
 	svm->vmcb->save.gdtr.base = dt->address ;
+	mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
 
+static void svm_decache_cr3(struct kvm_vcpu *vcpu)
+{
+}
+
 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
 
 static void update_cr0_intercept(struct vcpu_svm *svm)
 {
-	struct vmcb *vmcb = svm->vmcb;
 	ulong gcr0 = svm->vcpu.arch.cr0;
 	u64 *hcr0 = &svm->vmcb->save.cr0;
 
@@ -1188,27 +1346,14 @@
 		*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
 			| (gcr0 & SVM_CR0_SELECTIVE_MASK);
 
+	mark_dirty(svm->vmcb, VMCB_CR);
 
 	if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
-		vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
-		vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
-		if (is_nested(svm)) {
-			struct vmcb *hsave = svm->nested.hsave;
-
-			hsave->control.intercept_cr_read  &= ~INTERCEPT_CR0_MASK;
-			hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
-			vmcb->control.intercept_cr_read  |= svm->nested.intercept_cr_read;
-			vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
-		}
+		clr_cr_intercept(svm, INTERCEPT_CR0_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
 	} else {
-		svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
-		svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
-		if (is_nested(svm)) {
-			struct vmcb *hsave = svm->nested.hsave;
-
-			hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
-			hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
-		}
+		set_cr_intercept(svm, INTERCEPT_CR0_READ);
+		set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
 	}
 }
 
@@ -1216,7 +1361,7 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		/*
 		 * We are here because we run in nested mode, the host kvm
 		 * intercepts cr0 writes but the l1 hypervisor does not.
@@ -1268,6 +1413,7 @@
 	 */
 	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
 	svm->vmcb->save.cr0 = cr0;
+	mark_dirty(svm->vmcb, VMCB_CR);
 	update_cr0_intercept(svm);
 }
 
@@ -1277,13 +1423,14 @@
 	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
 
 	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
-		force_new_asid(vcpu);
+		svm_flush_tlb(vcpu);
 
 	vcpu->arch.cr4 = cr4;
 	if (!npt_enabled)
 		cr4 |= X86_CR4_PAE;
 	cr4 |= host_cr4_mce;
 	to_svm(vcpu)->vmcb->save.cr4 = cr4;
+	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -1312,26 +1459,25 @@
 			= (svm->vmcb->save.cs.attrib
 			   >> SVM_SELECTOR_DPL_SHIFT) & 3;
 
+	mark_dirty(svm->vmcb, VMCB_SEG);
 }
 
 static void update_db_intercept(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	svm->vmcb->control.intercept_exceptions &=
-		~((1 << DB_VECTOR) | (1 << BP_VECTOR));
+	clr_exception_intercept(svm, DB_VECTOR);
+	clr_exception_intercept(svm, BP_VECTOR);
 
 	if (svm->nmi_singlestep)
-		svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
+		set_exception_intercept(svm, DB_VECTOR);
 
 	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
 		if (vcpu->guest_debug &
 		    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-			svm->vmcb->control.intercept_exceptions |=
-				1 << DB_VECTOR;
+			set_exception_intercept(svm, DB_VECTOR);
 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
-			svm->vmcb->control.intercept_exceptions |=
-				1 << BP_VECTOR;
+			set_exception_intercept(svm, BP_VECTOR);
 	} else
 		vcpu->guest_debug = 0;
 }
@@ -1345,23 +1491,11 @@
 	else
 		svm->vmcb->save.dr7 = vcpu->arch.dr7;
 
+	mark_dirty(svm->vmcb, VMCB_DR);
+
 	update_db_intercept(vcpu);
 }
 
-static void load_host_msrs(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-	wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
-#endif
-}
-
-static void save_host_msrs(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
-#endif
-}
-
 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 {
 	if (sd->next_asid > sd->max_asid) {
@@ -1372,6 +1506,8 @@
 
 	svm->asid_generation = sd->asid_generation;
 	svm->vmcb->control.asid = sd->next_asid++;
+
+	mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
@@ -1379,20 +1515,40 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->save.dr7 = value;
+	mark_dirty(svm->vmcb, VMCB_DR);
 }
 
 static int pf_interception(struct vcpu_svm *svm)
 {
-	u64 fault_address;
+	u64 fault_address = svm->vmcb->control.exit_info_2;
 	u32 error_code;
+	int r = 1;
 
-	fault_address  = svm->vmcb->control.exit_info_2;
-	error_code = svm->vmcb->control.exit_info_1;
+	switch (svm->apf_reason) {
+	default:
+		error_code = svm->vmcb->control.exit_info_1;
 
-	trace_kvm_page_fault(fault_address, error_code);
-	if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
-		kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
-	return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
+		trace_kvm_page_fault(fault_address, error_code);
+		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
+			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
+		r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+			svm->vmcb->control.insn_bytes,
+			svm->vmcb->control.insn_len);
+		break;
+	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+		svm->apf_reason = 0;
+		local_irq_disable();
+		kvm_async_pf_task_wait(fault_address);
+		local_irq_enable();
+		break;
+	case KVM_PV_REASON_PAGE_READY:
+		svm->apf_reason = 0;
+		local_irq_disable();
+		kvm_async_pf_task_wake(fault_address);
+		local_irq_enable();
+		break;
+	}
+	return r;
 }
 
 static int db_interception(struct vcpu_svm *svm)
@@ -1440,7 +1596,7 @@
 {
 	int er;
 
-	er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
+	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
 	if (er != EMULATE_DONE)
 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	return 1;
@@ -1449,21 +1605,8 @@
 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	u32 excp;
 
-	if (is_nested(svm)) {
-		u32 h_excp, n_excp;
-
-		h_excp  = svm->nested.hsave->control.intercept_exceptions;
-		n_excp  = svm->nested.intercept_exceptions;
-		h_excp &= ~(1 << NM_VECTOR);
-		excp    = h_excp | n_excp;
-	} else {
-		excp  = svm->vmcb->control.intercept_exceptions;
-		excp &= ~(1 << NM_VECTOR);
-	}
-
-	svm->vmcb->control.intercept_exceptions = excp;
+	clr_exception_intercept(svm, NM_VECTOR);
 
 	svm->vcpu.fpu_active = 1;
 	update_cr0_intercept(svm);
@@ -1570,7 +1713,7 @@
 	string = (io_info & SVM_IOIO_STR_MASK) != 0;
 	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
 	if (string || in)
-		return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
 	port = io_info >> 16;
 	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -1624,17 +1767,19 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.nested_cr3 = root;
-	force_new_asid(vcpu);
+	mark_dirty(svm->vmcb, VMCB_NPT);
+	svm_flush_tlb(vcpu);
 }
 
-static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu)
+static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
+				       struct x86_exception *fault)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.exit_code = SVM_EXIT_NPF;
 	svm->vmcb->control.exit_code_hi = 0;
-	svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code;
-	svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address;
+	svm->vmcb->control.exit_info_1 = fault->error_code;
+	svm->vmcb->control.exit_info_2 = fault->address;
 
 	nested_svm_vmexit(svm);
 }
@@ -1680,7 +1825,7 @@
 {
 	int vmexit;
 
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return 0;
 
 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
@@ -1698,7 +1843,7 @@
 /* This function returns true if it is save to enable the irq window */
 static inline bool nested_svm_intr(struct vcpu_svm *svm)
 {
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return true;
 
 	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
@@ -1737,7 +1882,7 @@
 /* This function returns true if it is save to enable the nmi window */
 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
 {
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return true;
 
 	if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
@@ -1836,8 +1981,8 @@
 			return NESTED_EXIT_HOST;
 		break;
 	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
-		/* When we're shadowing, trap PFs */
-		if (!npt_enabled)
+		/* When we're shadowing, trap PFs, but not async PF */
+		if (!npt_enabled && svm->apf_reason == 0)
 			return NESTED_EXIT_HOST;
 		break;
 	case SVM_EXIT_EXCP_BASE + NM_VECTOR:
@@ -1865,27 +2010,15 @@
 	case SVM_EXIT_IOIO:
 		vmexit = nested_svm_intercept_ioio(svm);
 		break;
-	case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
-		u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
-		if (svm->nested.intercept_cr_read & cr_bits)
+	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
+		u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
+		if (svm->nested.intercept_cr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
-	case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
-		u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
-		if (svm->nested.intercept_cr_write & cr_bits)
-			vmexit = NESTED_EXIT_DONE;
-		break;
-	}
-	case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
-		u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
-		if (svm->nested.intercept_dr_read & dr_bits)
-			vmexit = NESTED_EXIT_DONE;
-		break;
-	}
-	case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
-		u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
-		if (svm->nested.intercept_dr_write & dr_bits)
+	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
+		u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
+		if (svm->nested.intercept_dr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
@@ -1893,6 +2026,10 @@
 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
 		if (svm->nested.intercept_exceptions & excp_bits)
 			vmexit = NESTED_EXIT_DONE;
+		/* async page fault always cause vmexit */
+		else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
+			 svm->apf_reason != 0)
+			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
 	case SVM_EXIT_ERR: {
@@ -1926,10 +2063,8 @@
 	struct vmcb_control_area *dst  = &dst_vmcb->control;
 	struct vmcb_control_area *from = &from_vmcb->control;
 
-	dst->intercept_cr_read    = from->intercept_cr_read;
-	dst->intercept_cr_write   = from->intercept_cr_write;
-	dst->intercept_dr_read    = from->intercept_dr_read;
-	dst->intercept_dr_write   = from->intercept_dr_write;
+	dst->intercept_cr         = from->intercept_cr;
+	dst->intercept_dr         = from->intercept_dr;
 	dst->intercept_exceptions = from->intercept_exceptions;
 	dst->intercept            = from->intercept;
 	dst->iopm_base_pa         = from->iopm_base_pa;
@@ -1970,7 +2105,8 @@
 	if (!nested_vmcb)
 		return 1;
 
-	/* Exit nested SVM mode */
+	/* Exit Guest-Mode */
+	leave_guest_mode(&svm->vcpu);
 	svm->nested.vmcb = 0;
 
 	/* Give the current vmcb to the guest */
@@ -1984,7 +2120,7 @@
 	nested_vmcb->save.idtr   = vmcb->save.idtr;
 	nested_vmcb->save.efer   = svm->vcpu.arch.efer;
 	nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
-	nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
+	nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
 	nested_vmcb->save.cr2    = vmcb->save.cr2;
 	nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
 	nested_vmcb->save.rflags = vmcb->save.rflags;
@@ -2061,6 +2197,8 @@
 	svm->vmcb->save.cpl = 0;
 	svm->vmcb->control.exit_int_info = 0;
 
+	mark_all_dirty(svm->vmcb);
+
 	nested_svm_unmap(page);
 
 	nested_svm_uninit_mmu_context(&svm->vcpu);
@@ -2148,8 +2286,8 @@
 			       nested_vmcb->control.event_inj,
 			       nested_vmcb->control.nested_ctl);
 
-	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
-				    nested_vmcb->control.intercept_cr_write,
+	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
+				    nested_vmcb->control.intercept_cr >> 16,
 				    nested_vmcb->control.intercept_exceptions,
 				    nested_vmcb->control.intercept);
 
@@ -2177,7 +2315,7 @@
 	if (npt_enabled)
 		hsave->save.cr3    = vmcb->save.cr3;
 	else
-		hsave->save.cr3    = svm->vcpu.arch.cr3;
+		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
 
 	copy_vmcb_control_area(hsave, vmcb);
 
@@ -2229,14 +2367,12 @@
 	svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
 
 	/* cache intercepts */
-	svm->nested.intercept_cr_read    = nested_vmcb->control.intercept_cr_read;
-	svm->nested.intercept_cr_write   = nested_vmcb->control.intercept_cr_write;
-	svm->nested.intercept_dr_read    = nested_vmcb->control.intercept_dr_read;
-	svm->nested.intercept_dr_write   = nested_vmcb->control.intercept_dr_write;
+	svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
+	svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
 	svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
 	svm->nested.intercept            = nested_vmcb->control.intercept;
 
-	force_new_asid(&svm->vcpu);
+	svm_flush_tlb(&svm->vcpu);
 	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
 	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
 		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -2245,29 +2381,12 @@
 
 	if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
 		/* We only want the cr8 intercept bits of the guest */
-		svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
-		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+		clr_cr_intercept(svm, INTERCEPT_CR8_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 	}
 
 	/* We don't want to see VMMCALLs from a nested guest */
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
-
-	/*
-	 * We don't want a nested guest to be more powerful than the guest, so
-	 * all intercepts are ORed
-	 */
-	svm->vmcb->control.intercept_cr_read |=
-		nested_vmcb->control.intercept_cr_read;
-	svm->vmcb->control.intercept_cr_write |=
-		nested_vmcb->control.intercept_cr_write;
-	svm->vmcb->control.intercept_dr_read |=
-		nested_vmcb->control.intercept_dr_read;
-	svm->vmcb->control.intercept_dr_write |=
-		nested_vmcb->control.intercept_dr_write;
-	svm->vmcb->control.intercept_exceptions |=
-		nested_vmcb->control.intercept_exceptions;
-
-	svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+	clr_intercept(svm, INTERCEPT_VMMCALL);
 
 	svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
 	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
@@ -2278,11 +2397,21 @@
 
 	nested_svm_unmap(page);
 
-	/* nested_vmcb is our indicator if nested SVM is activated */
+	/* Enter Guest-Mode */
+	enter_guest_mode(&svm->vcpu);
+
+	/*
+	 * Merge guest and host intercepts - must be called  with vcpu in
+	 * guest-mode to take affect here
+	 */
+	recalc_intercepts(svm);
+
 	svm->nested.vmcb = vmcb_gpa;
 
 	enable_gif(svm);
 
+	mark_all_dirty(svm->vmcb);
+
 	return true;
 }
 
@@ -2400,6 +2529,8 @@
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 
+	mark_dirty(svm->vmcb, VMCB_INTR);
+
 	return 1;
 }
 
@@ -2426,6 +2557,19 @@
 	return 1;
 }
 
+static int xsetbv_interception(struct vcpu_svm *svm)
+{
+	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
+	u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+
+	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
+		svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+		skip_emulated_instruction(&svm->vcpu);
+	}
+
+	return 1;
+}
+
 static int invalid_op_interception(struct vcpu_svm *svm)
 {
 	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
@@ -2507,19 +2651,92 @@
 static int iret_interception(struct vcpu_svm *svm)
 {
 	++svm->vcpu.stat.nmi_window_exits;
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+	clr_intercept(svm, INTERCEPT_IRET);
 	svm->vcpu.arch.hflags |= HF_IRET_MASK;
 	return 1;
 }
 
 static int invlpg_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+
+	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
+	skip_emulated_instruction(&svm->vcpu);
+	return 1;
 }
 
 static int emulate_on_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+}
+
+#define CR_VALID (1ULL << 63)
+
+static int cr_interception(struct vcpu_svm *svm)
+{
+	int reg, cr;
+	unsigned long val;
+	int err;
+
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+
+	err = 0;
+	if (cr >= 16) { /* mov to cr */
+		cr -= 16;
+		val = kvm_register_read(&svm->vcpu, reg);
+		switch (cr) {
+		case 0:
+			err = kvm_set_cr0(&svm->vcpu, val);
+			break;
+		case 3:
+			err = kvm_set_cr3(&svm->vcpu, val);
+			break;
+		case 4:
+			err = kvm_set_cr4(&svm->vcpu, val);
+			break;
+		case 8:
+			err = kvm_set_cr8(&svm->vcpu, val);
+			break;
+		default:
+			WARN(1, "unhandled write to CR%d", cr);
+			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+			return 1;
+		}
+	} else { /* mov from cr */
+		switch (cr) {
+		case 0:
+			val = kvm_read_cr0(&svm->vcpu);
+			break;
+		case 2:
+			val = svm->vcpu.arch.cr2;
+			break;
+		case 3:
+			val = kvm_read_cr3(&svm->vcpu);
+			break;
+		case 4:
+			val = kvm_read_cr4(&svm->vcpu);
+			break;
+		case 8:
+			val = kvm_get_cr8(&svm->vcpu);
+			break;
+		default:
+			WARN(1, "unhandled read from CR%d", cr);
+			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+			return 1;
+		}
+		kvm_register_write(&svm->vcpu, reg, val);
+	}
+	kvm_complete_insn_gp(&svm->vcpu, err);
+
+	return 1;
 }
 
 static int cr0_write_interception(struct vcpu_svm *svm)
@@ -2527,7 +2744,7 @@
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	int r;
 
-	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 
 	if (svm->nested.vmexit_rip) {
 		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2536,22 +2753,47 @@
 		svm->nested.vmexit_rip = 0;
 	}
 
-	return r == EMULATE_DONE;
+	return r;
+}
+
+static int dr_interception(struct vcpu_svm *svm)
+{
+	int reg, dr;
+	unsigned long val;
+	int err;
+
+	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+
+	if (dr >= 16) { /* mov to DRn */
+		val = kvm_register_read(&svm->vcpu, reg);
+		kvm_set_dr(&svm->vcpu, dr - 16, val);
+	} else {
+		err = kvm_get_dr(&svm->vcpu, dr, &val);
+		if (!err)
+			kvm_register_write(&svm->vcpu, reg, val);
+	}
+
+	return 1;
 }
 
 static int cr8_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_run *kvm_run = svm->vcpu.run;
+	int r;
 
 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
 	/* instruction emulation calls kvm_set_cr8() */
-	emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 	if (irqchip_in_kernel(svm->vcpu.kvm)) {
-		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
-		return 1;
+		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+		return r;
 	}
 	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
-		return 1;
+		return r;
 	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
 	return 0;
 }
@@ -2562,14 +2804,9 @@
 
 	switch (ecx) {
 	case MSR_IA32_TSC: {
-		u64 tsc_offset;
+		struct vmcb *vmcb = get_host_vmcb(svm);
 
-		if (is_nested(svm))
-			tsc_offset = svm->nested.hsave->control.tsc_offset;
-		else
-			tsc_offset = svm->vmcb->control.tsc_offset;
-
-		*data = tsc_offset + native_read_tsc();
+		*data = vmcb->control.tsc_offset + native_read_tsc();
 		break;
 	}
 	case MSR_STAR:
@@ -2714,7 +2951,7 @@
 		svm->vmcb->save.sysenter_esp = data;
 		break;
 	case MSR_IA32_DEBUGCTLMSR:
-		if (!svm_has(SVM_FEATURE_LBRV)) {
+		if (!boot_cpu_has(X86_FEATURE_LBRV)) {
 			pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
 					__func__, data);
 			break;
@@ -2723,6 +2960,7 @@
 			return 1;
 
 		svm->vmcb->save.dbgctl = data;
+		mark_dirty(svm->vmcb, VMCB_LBR);
 		if (data & (1ULL<<0))
 			svm_enable_lbrv(svm);
 		else
@@ -2775,6 +3013,7 @@
 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+	mark_dirty(svm->vmcb, VMCB_INTR);
 	/*
 	 * If the user space waits to inject interrupts, exit as soon as
 	 * possible
@@ -2797,31 +3036,31 @@
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
+	[SVM_EXIT_READ_CR0]			= cr_interception,
+	[SVM_EXIT_READ_CR3]			= cr_interception,
+	[SVM_EXIT_READ_CR4]			= cr_interception,
+	[SVM_EXIT_READ_CR8]			= cr_interception,
 	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
 	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
-	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
+	[SVM_EXIT_WRITE_CR3]			= cr_interception,
+	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
-	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR5]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR6]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR7]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR4]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR6]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
+	[SVM_EXIT_READ_DR0]			= dr_interception,
+	[SVM_EXIT_READ_DR1]			= dr_interception,
+	[SVM_EXIT_READ_DR2]			= dr_interception,
+	[SVM_EXIT_READ_DR3]			= dr_interception,
+	[SVM_EXIT_READ_DR4]			= dr_interception,
+	[SVM_EXIT_READ_DR5]			= dr_interception,
+	[SVM_EXIT_READ_DR6]			= dr_interception,
+	[SVM_EXIT_READ_DR7]			= dr_interception,
+	[SVM_EXIT_WRITE_DR0]			= dr_interception,
+	[SVM_EXIT_WRITE_DR1]			= dr_interception,
+	[SVM_EXIT_WRITE_DR2]			= dr_interception,
+	[SVM_EXIT_WRITE_DR3]			= dr_interception,
+	[SVM_EXIT_WRITE_DR4]			= dr_interception,
+	[SVM_EXIT_WRITE_DR5]			= dr_interception,
+	[SVM_EXIT_WRITE_DR6]			= dr_interception,
+	[SVM_EXIT_WRITE_DR7]			= dr_interception,
 	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
 	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
 	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
@@ -2854,6 +3093,7 @@
 	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
 	[SVM_EXIT_MONITOR]			= invalid_op_interception,
 	[SVM_EXIT_MWAIT]			= invalid_op_interception,
+	[SVM_EXIT_XSETBV]			= xsetbv_interception,
 	[SVM_EXIT_NPF]				= pf_interception,
 };
 
@@ -2864,10 +3104,10 @@
 	struct vmcb_save_area *save = &svm->vmcb->save;
 
 	pr_err("VMCB Control Area:\n");
-	pr_err("cr_read:            %04x\n", control->intercept_cr_read);
-	pr_err("cr_write:           %04x\n", control->intercept_cr_write);
-	pr_err("dr_read:            %04x\n", control->intercept_dr_read);
-	pr_err("dr_write:           %04x\n", control->intercept_dr_write);
+	pr_err("cr_read:            %04x\n", control->intercept_cr & 0xffff);
+	pr_err("cr_write:           %04x\n", control->intercept_cr >> 16);
+	pr_err("dr_read:            %04x\n", control->intercept_dr & 0xffff);
+	pr_err("dr_write:           %04x\n", control->intercept_dr >> 16);
 	pr_err("exceptions:         %08x\n", control->intercept_exceptions);
 	pr_err("intercepts:         %016llx\n", control->intercept);
 	pr_err("pause filter count: %d\n", control->pause_filter_count);
@@ -2950,15 +3190,23 @@
 
 }
 
+static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+
+	*info1 = control->exit_info_1;
+	*info2 = control->exit_info_2;
+}
+
 static int handle_exit(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_run *kvm_run = vcpu->run;
 	u32 exit_code = svm->vmcb->control.exit_code;
 
-	trace_kvm_exit(exit_code, vcpu);
+	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
 
-	if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
+	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
 	if (npt_enabled)
 		vcpu->arch.cr3 = svm->vmcb->save.cr3;
@@ -2970,7 +3218,7 @@
 		return 1;
 	}
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		int vmexit;
 
 		trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
@@ -3033,7 +3281,6 @@
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
-	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
@@ -3045,7 +3292,7 @@
 
 	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
 	vcpu->arch.hflags |= HF_NMI_MASK;
-	svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+	set_intercept(svm, INTERCEPT_IRET);
 	++vcpu->stat.nmi_injections;
 }
 
@@ -3058,6 +3305,7 @@
 	control->int_ctl &= ~V_INTR_PRIO_MASK;
 	control->int_ctl |= V_IRQ_MASK |
 		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
+	mark_dirty(svm->vmcb, VMCB_INTR);
 }
 
 static void svm_set_irq(struct kvm_vcpu *vcpu)
@@ -3077,14 +3325,14 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
 	if (irr == -1)
 		return;
 
 	if (tpr >= irr)
-		svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
+		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 }
 
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -3112,10 +3360,10 @@
 
 	if (masked) {
 		svm->vcpu.arch.hflags |= HF_NMI_MASK;
-		svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+		set_intercept(svm, INTERCEPT_IRET);
 	} else {
 		svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
-		svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+		clr_intercept(svm, INTERCEPT_IRET);
 	}
 }
 
@@ -3131,7 +3379,7 @@
 
 	ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
 
-	if (is_nested(svm))
+	if (is_guest_mode(vcpu))
 		return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
 
 	return ret;
@@ -3177,7 +3425,12 @@
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
 {
-	force_new_asid(vcpu);
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
+		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+	else
+		svm->asid_generation--;
 }
 
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
@@ -3188,10 +3441,10 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
-	if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
+	if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
 		int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
 		kvm_set_cr8(vcpu, cr8);
 	}
@@ -3202,7 +3455,7 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 cr8;
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
 	cr8 = kvm_get_cr8(vcpu);
@@ -3289,9 +3542,6 @@
 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	u16 fs_selector;
-	u16 gs_selector;
-	u16 ldt_selector;
 
 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
@@ -3308,10 +3558,6 @@
 
 	sync_lapic_to_cr8(vcpu);
 
-	save_host_msrs(vcpu);
-	savesegment(fs, fs_selector);
-	savesegment(gs, gs_selector);
-	ldt_selector = kvm_read_ldt();
 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
 	clgi();
@@ -3389,19 +3635,10 @@
 #endif
 		);
 
-	vcpu->arch.cr2 = svm->vmcb->save.cr2;
-	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
-	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
-	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
-
-	load_host_msrs(vcpu);
-	kvm_load_ldt(ldt_selector);
-	loadsegment(fs, fs_selector);
 #ifdef CONFIG_X86_64
-	load_gs_index(gs_selector);
-	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
-	loadsegment(gs, gs_selector);
+	loadsegment(fs, svm->host.fs);
 #endif
 
 	reload_tss(vcpu);
@@ -3410,10 +3647,21 @@
 
 	stgi();
 
+	vcpu->arch.cr2 = svm->vmcb->save.cr2;
+	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+
 	sync_cr8_to_lapic(vcpu);
 
 	svm->next_rip = 0;
 
+	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+
+	/* if exit due to PF check for async PF */
+	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
+		svm->apf_reason = kvm_read_and_reset_pf_reason();
+
 	if (npt_enabled) {
 		vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
 		vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
@@ -3426,6 +3674,8 @@
 	if (unlikely(svm->vmcb->control.exit_code ==
 		     SVM_EXIT_EXCP_BASE + MC_VECTOR))
 		svm_handle_mce(svm);
+
+	mark_all_clean(svm->vmcb);
 }
 
 #undef R
@@ -3435,7 +3685,8 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->save.cr3 = root;
-	force_new_asid(vcpu);
+	mark_dirty(svm->vmcb, VMCB_CR);
+	svm_flush_tlb(vcpu);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -3443,11 +3694,13 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.nested_cr3 = root;
+	mark_dirty(svm->vmcb, VMCB_NPT);
 
 	/* Also sync guest cr3 here in case we live migrate */
-	svm->vmcb->save.cr3 = vcpu->arch.cr3;
+	svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
+	mark_dirty(svm->vmcb, VMCB_CR);
 
-	force_new_asid(vcpu);
+	svm_flush_tlb(vcpu);
 }
 
 static int is_disabled(void)
@@ -3494,10 +3747,6 @@
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 	switch (func) {
-	case 0x00000001:
-		/* Mask out xsave bit as long as it is not supported by SVM */
-		entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
-		break;
 	case 0x80000001:
 		if (nested)
 			entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -3511,7 +3760,7 @@
 				   additional features */
 
 		/* Support next_rip if host supports it */
-		if (svm_has(SVM_FEATURE_NRIP))
+		if (boot_cpu_has(X86_FEATURE_NRIPS))
 			entry->edx |= SVM_FEATURE_NRIP;
 
 		/* Support NPT for the guest if enabled */
@@ -3571,6 +3820,7 @@
 	{ SVM_EXIT_WBINVD,			"wbinvd" },
 	{ SVM_EXIT_MONITOR,			"monitor" },
 	{ SVM_EXIT_MWAIT,			"mwait" },
+	{ SVM_EXIT_XSETBV,			"xsetbv" },
 	{ SVM_EXIT_NPF,				"npf" },
 	{ -1, NULL }
 };
@@ -3594,9 +3844,7 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
-	if (is_nested(svm))
-		svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+	set_exception_intercept(svm, NM_VECTOR);
 	update_cr0_intercept(svm);
 }
 
@@ -3627,6 +3875,7 @@
 	.get_cpl = svm_get_cpl,
 	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
 	.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
+	.decache_cr3 = svm_decache_cr3,
 	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
 	.set_cr0 = svm_set_cr0,
 	.set_cr3 = svm_set_cr3,
@@ -3667,7 +3916,9 @@
 	.get_tdp_level = get_npt_level,
 	.get_mt_mask = svm_get_mt_mask,
 
+	.get_exit_info = svm_get_exit_info,
 	.exit_reasons_str = svm_exit_reasons_str,
+
 	.get_lpage_level = svm_get_lpage_level,
 
 	.cpuid_update = svm_cpuid_update,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a6544b8..1357d7c 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -178,27 +178,36 @@
 #define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
 #define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
 
+#define KVM_ISA_VMX   1
+#define KVM_ISA_SVM   2
+
 /*
  * Tracepoint for kvm guest exit:
  */
 TRACE_EVENT(kvm_exit,
-	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu),
-	TP_ARGS(exit_reason, vcpu),
+	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
+	TP_ARGS(exit_reason, vcpu, isa),
 
 	TP_STRUCT__entry(
 		__field(	unsigned int,	exit_reason	)
 		__field(	unsigned long,	guest_rip	)
+		__field(	u32,	        isa             )
+		__field(	u64,	        info1           )
+		__field(	u64,	        info2           )
 	),
 
 	TP_fast_assign(
 		__entry->exit_reason	= exit_reason;
 		__entry->guest_rip	= kvm_rip_read(vcpu);
+		__entry->isa            = isa;
+		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
+					   &__entry->info2);
 	),
 
-	TP_printk("reason %s rip 0x%lx",
+	TP_printk("reason %s rip 0x%lx info %llx %llx",
 		 ftrace_print_symbols_seq(p, __entry->exit_reason,
 					  kvm_x86_ops->exit_reasons_str),
-		 __entry->guest_rip)
+		 __entry->guest_rip, __entry->info1, __entry->info2)
 );
 
 /*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 81fcbe9..bf89ec2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -69,6 +69,9 @@
 static int __read_mostly vmm_exclusive = 1;
 module_param(vmm_exclusive, bool, S_IRUGO);
 
+static int __read_mostly yield_on_hlt = 1;
+module_param(yield_on_hlt, bool, S_IRUGO);
+
 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST				\
 	(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
 #define KVM_GUEST_CR0_MASK						\
@@ -177,6 +180,7 @@
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
+static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -188,6 +192,8 @@
 static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 
+static bool cpu_has_load_ia32_efer;
+
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
 
@@ -472,7 +478,7 @@
 	u8 error;
 
 	asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
-		      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+		      : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
 		      : "cc", "memory");
 	if (error)
 		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
@@ -485,7 +491,7 @@
 	u8 error;
 
 	asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
-			: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+			: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
 			: "cc", "memory");
 	if (error)
 		printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
@@ -565,10 +571,10 @@
 
 static unsigned long vmcs_readl(unsigned long field)
 {
-	unsigned long value;
+	unsigned long value = 0;
 
 	asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
-		      : "=a"(value) : "d"(field) : "cc");
+		      : "+a"(value) : "d"(field) : "cc");
 	return value;
 }
 
@@ -661,6 +667,12 @@
 	unsigned i;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
+	if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+		vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+		vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+		return;
+	}
+
 	for (i = 0; i < m->nr; ++i)
 		if (m->guest[i].index == msr)
 			break;
@@ -680,6 +692,14 @@
 	unsigned i;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
+	if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+		vmcs_write64(GUEST_IA32_EFER, guest_val);
+		vmcs_write64(HOST_IA32_EFER, host_val);
+		vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+		vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+		return;
+	}
+
 	for (i = 0; i < m->nr; ++i)
 		if (m->guest[i].index == msr)
 			break;
@@ -1009,6 +1029,17 @@
 	vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
+{
+	/* Ensure that we clear the HLT state in the VMCS.  We don't need to
+	 * explicitly skip the instruction because if the HLT state is set, then
+	 * the instruction is already executing and RIP has already been
+	 * advanced. */
+	if (!yield_on_hlt &&
+	    vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
+		vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -1035,6 +1066,7 @@
 		intr_info |= INTR_TYPE_HARD_EXCEPTION;
 
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
+	vmx_clear_hlt(vcpu);
 }
 
 static bool vmx_rdtscp_supported(void)
@@ -1305,8 +1337,11 @@
 			&& tboot_enabled())
 			return 1;
 		if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
-			&& !tboot_enabled())
+			&& !tboot_enabled()) {
+			printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
+				" activate TXT before enabling KVM\n");
 			return 1;
+		}
 	}
 
 	return 0;
@@ -1400,6 +1435,14 @@
 	return 0;
 }
 
+static __init bool allow_1_setting(u32 msr, u32 ctl)
+{
+	u32 vmx_msr_low, vmx_msr_high;
+
+	rdmsr(msr, vmx_msr_low, vmx_msr_high);
+	return vmx_msr_high & ctl;
+}
+
 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 {
 	u32 vmx_msr_low, vmx_msr_high;
@@ -1416,7 +1459,7 @@
 				&_pin_based_exec_control) < 0)
 		return -EIO;
 
-	min = CPU_BASED_HLT_EXITING |
+	min =
 #ifdef CONFIG_X86_64
 	      CPU_BASED_CR8_LOAD_EXITING |
 	      CPU_BASED_CR8_STORE_EXITING |
@@ -1429,6 +1472,10 @@
 	      CPU_BASED_MWAIT_EXITING |
 	      CPU_BASED_MONITOR_EXITING |
 	      CPU_BASED_INVLPG_EXITING;
+
+	if (yield_on_hlt)
+		min |= CPU_BASED_HLT_EXITING;
+
 	opt = CPU_BASED_TPR_SHADOW |
 	      CPU_BASED_USE_MSR_BITMAPS |
 	      CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -1510,6 +1557,12 @@
 	vmcs_conf->vmexit_ctrl         = _vmexit_control;
 	vmcs_conf->vmentry_ctrl        = _vmentry_control;
 
+	cpu_has_load_ia32_efer =
+		allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
+				VM_ENTRY_LOAD_IA32_EFER)
+		&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
+				   VM_EXIT_LOAD_IA32_EFER);
+
 	return 0;
 }
 
@@ -1683,9 +1736,13 @@
 	save->limit = vmcs_read32(sf->limit);
 	save->ar = vmcs_read32(sf->ar_bytes);
 	vmcs_write16(sf->selector, save->base >> 4);
-	vmcs_write32(sf->base, save->base & 0xfffff);
+	vmcs_write32(sf->base, save->base & 0xffff0);
 	vmcs_write32(sf->limit, 0xffff);
 	vmcs_write32(sf->ar_bytes, 0xf3);
+	if (save->base & 0xf)
+		printk_once(KERN_WARNING "kvm: segment base is not paragraph"
+			    " aligned when entering protected mode (seg=%d)",
+			    seg);
 }
 
 static void enter_rmode(struct kvm_vcpu *vcpu)
@@ -1814,6 +1871,13 @@
 	vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
 }
 
+static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
+{
+	if (enable_ept && is_paging(vcpu))
+		vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+}
+
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
 	ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
@@ -1857,6 +1921,7 @@
 					unsigned long cr0,
 					struct kvm_vcpu *vcpu)
 {
+	vmx_decache_cr3(vcpu);
 	if (!(cr0 & X86_CR0_PG)) {
 		/* From paging/starting to nonpaging */
 		vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1937,7 +2002,7 @@
 	if (enable_ept) {
 		eptp = construct_eptp(cr3);
 		vmcs_write64(EPT_POINTER, eptp);
-		guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
+		guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
 			vcpu->kvm->arch.ept_identity_map_addr;
 		ept_load_pdptrs(vcpu);
 	}
@@ -2725,7 +2790,7 @@
 	vmcs_writel(GUEST_IDTR_BASE, 0);
 	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
 
-	vmcs_write32(GUEST_ACTIVITY_STATE, 0);
+	vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
 	vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
 
@@ -2787,6 +2852,10 @@
 		return;
 	}
 
+	if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+		enable_irq_window(vcpu);
+		return;
+	}
 	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
 	cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
 	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
@@ -2814,6 +2883,7 @@
 	} else
 		intr |= INTR_TYPE_EXT_INTR;
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
+	vmx_clear_hlt(vcpu);
 }
 
 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
@@ -2841,6 +2911,7 @@
 	}
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
 			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
+	vmx_clear_hlt(vcpu);
 }
 
 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -2849,7 +2920,8 @@
 		return 0;
 
 	return	!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-			(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
+		  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
+		   | GUEST_INTR_STATE_NMI));
 }
 
 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -2910,7 +2982,7 @@
 	 * Cause the #SS fault with 0 error code in VM86 mode.
 	 */
 	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
-		if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
+		if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
 			return 1;
 	/*
 	 * Forward all other exceptions that are valid in real mode.
@@ -3007,7 +3079,7 @@
 	}
 
 	if (is_invalid_opcode(intr_info)) {
-		er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
+		er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
 		if (er != EMULATE_DONE)
 			kvm_queue_exception(vcpu, UD_VECTOR);
 		return 1;
@@ -3026,7 +3098,7 @@
 
 		if (kvm_event_needs_reinjection(vcpu))
 			kvm_mmu_unprotect_page_virt(vcpu, cr2);
-		return kvm_mmu_page_fault(vcpu, cr2, error_code);
+		return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
 	}
 
 	if (vmx->rmode.vm86_active &&
@@ -3098,7 +3170,7 @@
 	++vcpu->stat.io_exits;
 
 	if (string || in)
-		return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
 	port = exit_qualification >> 16;
 	size = (exit_qualification & 7) + 1;
@@ -3118,14 +3190,6 @@
 	hypercall[2] = 0xc1;
 }
 
-static void complete_insn_gp(struct kvm_vcpu *vcpu, int err)
-{
-	if (err)
-		kvm_inject_gp(vcpu, 0);
-	else
-		skip_emulated_instruction(vcpu);
-}
-
 static int handle_cr(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification, val;
@@ -3143,21 +3207,21 @@
 		switch (cr) {
 		case 0:
 			err = kvm_set_cr0(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 3:
 			err = kvm_set_cr3(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 4:
 			err = kvm_set_cr4(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 8: {
 				u8 cr8_prev = kvm_get_cr8(vcpu);
 				u8 cr8 = kvm_register_read(vcpu, reg);
-				kvm_set_cr8(vcpu, cr8);
-				skip_emulated_instruction(vcpu);
+				err = kvm_set_cr8(vcpu, cr8);
+				kvm_complete_insn_gp(vcpu, err);
 				if (irqchip_in_kernel(vcpu->kvm))
 					return 1;
 				if (cr8_prev <= cr8)
@@ -3176,8 +3240,9 @@
 	case 1: /*mov from cr*/
 		switch (cr) {
 		case 3:
-			kvm_register_write(vcpu, reg, vcpu->arch.cr3);
-			trace_kvm_cr_read(cr, vcpu->arch.cr3);
+			val = kvm_read_cr3(vcpu);
+			kvm_register_write(vcpu, reg, val);
+			trace_kvm_cr_read(cr, val);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 8:
@@ -3349,6 +3414,11 @@
 	return 1;
 }
 
+static int handle_invd(struct kvm_vcpu *vcpu)
+{
+	return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+}
+
 static int handle_invlpg(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -3377,7 +3447,7 @@
 
 static int handle_apic_access(struct kvm_vcpu *vcpu)
 {
-	return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+	return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_task_switch(struct kvm_vcpu *vcpu)
@@ -3476,7 +3546,7 @@
 
 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
 	trace_kvm_page_fault(gpa, exit_qualification);
-	return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
+	return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
 }
 
 static u64 ept_rsvd_mask(u64 spte, int level)
@@ -3592,7 +3662,7 @@
 		    && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
 			return handle_interrupt_window(&vmx->vcpu);
 
-		err = emulate_instruction(vcpu, 0, 0, 0);
+		err = emulate_instruction(vcpu, 0);
 
 		if (err == EMULATE_DO_MMIO) {
 			ret = 0;
@@ -3649,6 +3719,7 @@
 	[EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
 	[EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
 	[EXIT_REASON_HLT]                     = handle_halt,
+	[EXIT_REASON_INVD]		      = handle_invd,
 	[EXIT_REASON_INVLPG]		      = handle_invlpg,
 	[EXIT_REASON_VMCALL]                  = handle_vmcall,
 	[EXIT_REASON_VMCLEAR]	              = handle_vmx_insn,
@@ -3676,6 +3747,12 @@
 static const int kvm_vmx_max_exit_handlers =
 	ARRAY_SIZE(kvm_vmx_exit_handlers);
 
+static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	*info1 = vmcs_readl(EXIT_QUALIFICATION);
+	*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
+}
+
 /*
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
@@ -3686,17 +3763,12 @@
 	u32 exit_reason = vmx->exit_reason;
 	u32 vectoring_info = vmx->idt_vectoring_info;
 
-	trace_kvm_exit(exit_reason, vcpu);
+	trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
 
 	/* If guest state is invalid, start emulating */
 	if (vmx->emulation_required && emulate_invalid_guest_state)
 		return handle_invalid_guest_state(vcpu);
 
-	/* Access CR3 don't cause VMExit in paging mode, so we need
-	 * to sync with guest real CR3. */
-	if (enable_ept && is_paging(vcpu))
-		vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
-
 	if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
 		vcpu->run->fail_entry.hardware_entry_failure_reason
@@ -4013,7 +4085,8 @@
 	      );
 
 	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
-				  | (1 << VCPU_EXREG_PDPTR));
+				  | (1 << VCPU_EXREG_PDPTR)
+				  | (1 << VCPU_EXREG_CR3));
 	vcpu->arch.regs_dirty = 0;
 
 	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
@@ -4280,6 +4353,7 @@
 	.get_cpl = vmx_get_cpl,
 	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
 	.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
+	.decache_cr3 = vmx_decache_cr3,
 	.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
 	.set_cr0 = vmx_set_cr0,
 	.set_cr3 = vmx_set_cr3,
@@ -4320,7 +4394,9 @@
 	.get_tdp_level = get_ept_level,
 	.get_mt_mask = vmx_get_mt_mask,
 
+	.get_exit_info = vmx_get_exit_info,
 	.exit_reasons_str = vmx_exit_reasons_str,
+
 	.get_lpage_level = vmx_get_lpage_level,
 
 	.cpuid_update = vmx_cpuid_update,
@@ -4396,8 +4472,6 @@
 
 	if (enable_ept) {
 		bypass_guest_pf = 0;
-		kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
-			VMX_EPT_WRITABLE_MASK);
 		kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
 				VMX_EPT_EXECUTABLE_MASK);
 		kvm_enable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b989e1f..bcc0efc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -43,6 +43,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <linux/uaccess.h>
+#include <linux/hash.h>
 #include <trace/events/kvm.h>
 
 #define CREATE_TRACE_POINTS
@@ -155,6 +156,13 @@
 
 u64 __read_mostly host_xcr0;
 
+static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
+{
+	int i;
+	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
+		vcpu->arch.apf.gfns[i] = ~0;
+}
+
 static void kvm_on_user_return(struct user_return_notifier *urn)
 {
 	unsigned slot;
@@ -326,23 +334,28 @@
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 {
-	unsigned error_code = vcpu->arch.fault.error_code;
+	if (err)
+		kvm_inject_gp(vcpu, 0);
+	else
+		kvm_x86_ops->skip_emulated_instruction(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
 
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
+{
 	++vcpu->stat.pf_guest;
-	vcpu->arch.cr2 = vcpu->arch.fault.address;
-	kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+	vcpu->arch.cr2 = fault->address;
+	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
 }
 
-void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 {
-	if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
-		vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
+		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
 	else
-		vcpu->arch.mmu.inject_page_fault(vcpu);
-
-	vcpu->arch.fault.nested = false;
+		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -460,8 +473,8 @@
 		      (unsigned long *)&vcpu->arch.regs_avail))
 		return true;
 
-	gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
-	offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
+	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
 	if (r < 0)
@@ -506,12 +519,15 @@
 		} else
 #endif
 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
-						 vcpu->arch.cr3))
+						 kvm_read_cr3(vcpu)))
 			return 1;
 	}
 
 	kvm_x86_ops->set_cr0(vcpu, cr0);
 
+	if ((cr0 ^ old_cr0) & X86_CR0_PG)
+		kvm_clear_async_pf_completion_queue(vcpu);
+
 	if ((cr0 ^ old_cr0) & update_bits)
 		kvm_mmu_reset_context(vcpu);
 	return 0;
@@ -595,7 +611,8 @@
 			return 1;
 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
 		   && ((cr4 ^ old_cr4) & pdptr_bits)
-		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
+		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+				   kvm_read_cr3(vcpu)))
 		return 1;
 
 	if (cr4 & X86_CR4_VMXE)
@@ -615,7 +632,7 @@
 
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
-	if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
+	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
 		kvm_mmu_sync_roots(vcpu);
 		kvm_mmu_flush_tlb(vcpu);
 		return 0;
@@ -650,12 +667,13 @@
 	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
 		return 1;
 	vcpu->arch.cr3 = cr3;
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 	vcpu->arch.mmu.new_cr3(vcpu);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr3);
 
-int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
 	if (cr8 & CR8_RESERVED_BITS)
 		return 1;
@@ -665,12 +683,6 @@
 		vcpu->arch.cr8 = cr8;
 	return 0;
 }
-
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
-{
-	if (__kvm_set_cr8(vcpu, cr8))
-		kvm_inject_gp(vcpu, 0);
-}
 EXPORT_SYMBOL_GPL(kvm_set_cr8);
 
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
@@ -775,12 +787,12 @@
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN	7
+#define KVM_SAVE_MSRS_BEGIN	8
 static u32 msrs_to_save[] = {
 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
-	HV_X64_MSR_APIC_ASSIST_PAGE,
+	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
 	MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -830,7 +842,6 @@
 	kvm_x86_ops->set_efer(vcpu, efer);
 
 	vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
-	kvm_mmu_reset_context(vcpu);
 
 	/* Update reserved bits */
 	if ((efer ^ old_efer) & EFER_NX)
@@ -976,7 +987,7 @@
 	if (kvm_tsc_changes_freq())
 		printk_once(KERN_WARNING
 		 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
-	ret = nsec * __get_cpu_var(cpu_tsc_khz);
+	ret = nsec * __this_cpu_read(cpu_tsc_khz);
 	do_div(ret, USEC_PER_SEC);
 	return ret;
 }
@@ -1061,7 +1072,7 @@
 	local_irq_save(flags);
 	kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
 	kernel_ns = get_kernel_ns();
-	this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+	this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
 
 	if (unlikely(this_tsc_khz == 0)) {
 		local_irq_restore(flags);
@@ -1418,6 +1429,30 @@
 	return 0;
 }
 
+static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+{
+	gpa_t gpa = data & ~0x3f;
+
+	/* Bits 2:5 are resrved, Should be zero */
+	if (data & 0x3c)
+		return 1;
+
+	vcpu->arch.apf.msr_val = data;
+
+	if (!(data & KVM_ASYNC_PF_ENABLED)) {
+		kvm_clear_async_pf_completion_queue(vcpu);
+		kvm_async_pf_hash_reset(vcpu);
+		return 0;
+	}
+
+	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+		return 1;
+
+	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+	kvm_async_pf_wakeup_all(vcpu);
+	return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
 	switch (msr) {
@@ -1499,6 +1534,10 @@
 		}
 		break;
 	}
+	case MSR_KVM_ASYNC_PF_EN:
+		if (kvm_pv_enable_async_pf(vcpu, data))
+			return 1;
+		break;
 	case MSR_IA32_MCG_CTL:
 	case MSR_IA32_MCG_STATUS:
 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1775,6 +1814,9 @@
 	case MSR_KVM_SYSTEM_TIME_NEW:
 		data = vcpu->arch.time;
 		break;
+	case MSR_KVM_ASYNC_PF_EN:
+		data = vcpu->arch.apf.msr_val;
+		break;
 	case MSR_IA32_P5_MC_ADDR:
 	case MSR_IA32_P5_MC_TYPE:
 	case MSR_IA32_MCG_CAP:
@@ -1904,6 +1946,7 @@
 	case KVM_CAP_NOP_IO_DELAY:
 	case KVM_CAP_MP_STATE:
 	case KVM_CAP_SYNC_MMU:
+	case KVM_CAP_USER_NMI:
 	case KVM_CAP_REINJECT_CONTROL:
 	case KVM_CAP_IRQ_INJECT_STATUS:
 	case KVM_CAP_ASSIGN_DEV_IRQ:
@@ -1922,6 +1965,7 @@
 	case KVM_CAP_DEBUGREGS:
 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
 	case KVM_CAP_XSAVE:
+	case KVM_CAP_ASYNC_PF:
 		r = 1;
 		break;
 	case KVM_CAP_COALESCED_MMIO:
@@ -2185,6 +2229,11 @@
 	return r;
 }
 
+static void cpuid_mask(u32 *word, int wordnum)
+{
+	*word &= boot_cpu_data.x86_capability[wordnum];
+}
+
 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			   u32 index)
 {
@@ -2259,7 +2308,9 @@
 		break;
 	case 1:
 		entry->edx &= kvm_supported_word0_x86_features;
+		cpuid_mask(&entry->edx, 0);
 		entry->ecx &= kvm_supported_word4_x86_features;
+		cpuid_mask(&entry->ecx, 4);
 		/* we support x2apic emulation even if host does not support
 		 * it since we emulate x2apic in software */
 		entry->ecx |= F(X2APIC);
@@ -2350,7 +2401,9 @@
 		break;
 	case 0x80000001:
 		entry->edx &= kvm_supported_word1_x86_features;
+		cpuid_mask(&entry->edx, 1);
 		entry->ecx &= kvm_supported_word6_x86_features;
+		cpuid_mask(&entry->ecx, 6);
 		break;
 	}
 
@@ -3169,20 +3222,18 @@
 		struct kvm_memslots *slots, *old_slots;
 		unsigned long *dirty_bitmap;
 
-		r = -ENOMEM;
-		dirty_bitmap = vmalloc(n);
-		if (!dirty_bitmap)
-			goto out;
+		dirty_bitmap = memslot->dirty_bitmap_head;
+		if (memslot->dirty_bitmap == dirty_bitmap)
+			dirty_bitmap += n / sizeof(long);
 		memset(dirty_bitmap, 0, n);
 
 		r = -ENOMEM;
 		slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-		if (!slots) {
-			vfree(dirty_bitmap);
+		if (!slots)
 			goto out;
-		}
 		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 		slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+		slots->generation++;
 
 		old_slots = kvm->memslots;
 		rcu_assign_pointer(kvm->memslots, slots);
@@ -3195,11 +3246,8 @@
 		spin_unlock(&kvm->mmu_lock);
 
 		r = -EFAULT;
-		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
-			vfree(dirty_bitmap);
+		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
 			goto out;
-		}
-		vfree(dirty_bitmap);
 	} else {
 		r = -EFAULT;
 		if (clear_user(log->dirty_bitmap, n))
@@ -3266,8 +3314,10 @@
 		if (vpic) {
 			r = kvm_ioapic_init(kvm);
 			if (r) {
+				mutex_lock(&kvm->slots_lock);
 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
 							  &vpic->dev);
+				mutex_unlock(&kvm->slots_lock);
 				kfree(vpic);
 				goto create_irqchip_unlock;
 			}
@@ -3278,10 +3328,12 @@
 		smp_wmb();
 		r = kvm_setup_default_irq_routing(kvm);
 		if (r) {
+			mutex_lock(&kvm->slots_lock);
 			mutex_lock(&kvm->irq_lock);
 			kvm_ioapic_destroy(kvm);
 			kvm_destroy_pic(kvm);
 			mutex_unlock(&kvm->irq_lock);
+			mutex_unlock(&kvm->slots_lock);
 		}
 	create_irqchip_unlock:
 		mutex_unlock(&kvm->lock);
@@ -3557,63 +3609,63 @@
 static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 {
 	gpa_t t_gpa;
-	u32 error;
+	struct x86_exception exception;
 
 	BUG_ON(!mmu_is_nested(vcpu));
 
 	/* NPT walks are always user-walks */
 	access |= PFERR_USER_MASK;
-	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
-	if (t_gpa == UNMAPPED_GVA)
-		vcpu->arch.fault.nested = true;
+	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
 
 	return t_gpa;
 }
 
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+			      struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
- gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	access |= PFERR_FETCH_MASK;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	access |= PFERR_WRITE_MASK;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
 /* uses this to access any guest's mapped memory without checking CPL */
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception)
 {
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
 }
 
 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 				      struct kvm_vcpu *vcpu, u32 access,
-				      u32 *error)
+				      struct x86_exception *exception)
 {
 	void *data = val;
 	int r = X86EMUL_CONTINUE;
 
 	while (bytes) {
 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
-							    error);
+							    exception);
 		unsigned offset = addr & (PAGE_SIZE-1);
 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
 		int ret;
 
-		if (gpa == UNMAPPED_GVA) {
-			r = X86EMUL_PROPAGATE_FAULT;
-			goto out;
-		}
+		if (gpa == UNMAPPED_GVA)
+			return X86EMUL_PROPAGATE_FAULT;
 		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
@@ -3630,31 +3682,35 @@
 
 /* used for instruction fetching */
 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
-				struct kvm_vcpu *vcpu, u32 *error)
+				struct kvm_vcpu *vcpu,
+				struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-					  access | PFERR_FETCH_MASK, error);
+					  access | PFERR_FETCH_MASK,
+					  exception);
 }
 
 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
-			       struct kvm_vcpu *vcpu, u32 *error)
+			       struct kvm_vcpu *vcpu,
+			       struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
-					  error);
+					  exception);
 }
 
 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
-			       struct kvm_vcpu *vcpu, u32 *error)
+				      struct kvm_vcpu *vcpu,
+				      struct x86_exception *exception)
 {
-	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
 }
 
 static int kvm_write_guest_virt_system(gva_t addr, void *val,
 				       unsigned int bytes,
 				       struct kvm_vcpu *vcpu,
-				       u32 *error)
+				       struct x86_exception *exception)
 {
 	void *data = val;
 	int r = X86EMUL_CONTINUE;
@@ -3662,15 +3718,13 @@
 	while (bytes) {
 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
 							     PFERR_WRITE_MASK,
-							     error);
+							     exception);
 		unsigned offset = addr & (PAGE_SIZE-1);
 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
 		int ret;
 
-		if (gpa == UNMAPPED_GVA) {
-			r = X86EMUL_PROPAGATE_FAULT;
-			goto out;
-		}
+		if (gpa == UNMAPPED_GVA)
+			return X86EMUL_PROPAGATE_FAULT;
 		ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
@@ -3688,7 +3742,7 @@
 static int emulator_read_emulated(unsigned long addr,
 				  void *val,
 				  unsigned int bytes,
-				  unsigned int *error_code,
+				  struct x86_exception *exception,
 				  struct kvm_vcpu *vcpu)
 {
 	gpa_t                 gpa;
@@ -3701,7 +3755,7 @@
 		return X86EMUL_CONTINUE;
 	}
 
-	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
+	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
 
 	if (gpa == UNMAPPED_GVA)
 		return X86EMUL_PROPAGATE_FAULT;
@@ -3710,8 +3764,8 @@
 	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
 		goto mmio;
 
-	if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
-				== X86EMUL_CONTINUE)
+	if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
+	    == X86EMUL_CONTINUE)
 		return X86EMUL_CONTINUE;
 
 mmio:
@@ -3735,7 +3789,7 @@
 }
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
-			  const void *val, int bytes)
+			const void *val, int bytes)
 {
 	int ret;
 
@@ -3749,12 +3803,12 @@
 static int emulator_write_emulated_onepage(unsigned long addr,
 					   const void *val,
 					   unsigned int bytes,
-					   unsigned int *error_code,
+					   struct x86_exception *exception,
 					   struct kvm_vcpu *vcpu)
 {
 	gpa_t                 gpa;
 
-	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
+	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
 
 	if (gpa == UNMAPPED_GVA)
 		return X86EMUL_PROPAGATE_FAULT;
@@ -3787,7 +3841,7 @@
 int emulator_write_emulated(unsigned long addr,
 			    const void *val,
 			    unsigned int bytes,
-			    unsigned int *error_code,
+			    struct x86_exception *exception,
 			    struct kvm_vcpu *vcpu)
 {
 	/* Crossing a page boundary? */
@@ -3795,7 +3849,7 @@
 		int rc, now;
 
 		now = -addr & ~PAGE_MASK;
-		rc = emulator_write_emulated_onepage(addr, val, now, error_code,
+		rc = emulator_write_emulated_onepage(addr, val, now, exception,
 						     vcpu);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
@@ -3803,7 +3857,7 @@
 		val += now;
 		bytes -= now;
 	}
-	return emulator_write_emulated_onepage(addr, val, bytes, error_code,
+	return emulator_write_emulated_onepage(addr, val, bytes, exception,
 					       vcpu);
 }
 
@@ -3821,7 +3875,7 @@
 				     const void *old,
 				     const void *new,
 				     unsigned int bytes,
-				     unsigned int *error_code,
+				     struct x86_exception *exception,
 				     struct kvm_vcpu *vcpu)
 {
 	gpa_t gpa;
@@ -3879,7 +3933,7 @@
 emul_write:
 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
 
-	return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
+	return emulator_write_emulated(addr, new, bytes, exception, vcpu);
 }
 
 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
@@ -3904,7 +3958,7 @@
 	if (vcpu->arch.pio.count)
 		goto data_avail;
 
-	trace_kvm_pio(0, port, size, 1);
+	trace_kvm_pio(0, port, size, count);
 
 	vcpu->arch.pio.port = port;
 	vcpu->arch.pio.in = 1;
@@ -3932,7 +3986,7 @@
 			      const void *val, unsigned int count,
 			      struct kvm_vcpu *vcpu)
 {
-	trace_kvm_pio(1, port, size, 1);
+	trace_kvm_pio(1, port, size, count);
 
 	vcpu->arch.pio.port = port;
 	vcpu->arch.pio.in = 0;
@@ -3973,13 +4027,15 @@
 		return X86EMUL_CONTINUE;
 
 	if (kvm_x86_ops->has_wbinvd_exit()) {
-		preempt_disable();
+		int cpu = get_cpu();
+
+		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
 				wbinvd_ipi, NULL, 1);
-		preempt_enable();
+		put_cpu();
 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
-	}
-	wbinvd();
+	} else
+		wbinvd();
 	return X86EMUL_CONTINUE;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -4019,7 +4075,7 @@
 		value = vcpu->arch.cr2;
 		break;
 	case 3:
-		value = vcpu->arch.cr3;
+		value = kvm_read_cr3(vcpu);
 		break;
 	case 4:
 		value = kvm_read_cr4(vcpu);
@@ -4053,7 +4109,7 @@
 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
 		break;
 	case 8:
-		res = __kvm_set_cr8(vcpu, val & 0xfUL);
+		res = kvm_set_cr8(vcpu, val);
 		break;
 	default:
 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
@@ -4206,12 +4262,13 @@
 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
 {
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
-	if (ctxt->exception == PF_VECTOR)
-		kvm_propagate_fault(vcpu);
-	else if (ctxt->error_code_valid)
-		kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
+	if (ctxt->exception.vector == PF_VECTOR)
+		kvm_propagate_fault(vcpu, &ctxt->exception);
+	else if (ctxt->exception.error_code_valid)
+		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
+				      ctxt->exception.error_code);
 	else
-		kvm_queue_exception(vcpu, ctxt->exception);
+		kvm_queue_exception(vcpu, ctxt->exception.vector);
 }
 
 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
@@ -4267,13 +4324,19 @@
 
 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 {
+	int r = EMULATE_DONE;
+
 	++vcpu->stat.insn_emulation_fail;
 	trace_kvm_emulate_insn_failed(vcpu);
-	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-	vcpu->run->internal.ndata = 0;
+	if (!is_guest_mode(vcpu)) {
+		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+		vcpu->run->internal.ndata = 0;
+		r = EMULATE_FAIL;
+	}
 	kvm_queue_exception(vcpu, UD_VECTOR);
-	return EMULATE_FAIL;
+
+	return r;
 }
 
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
@@ -4302,10 +4365,11 @@
 	return false;
 }
 
-int emulate_instruction(struct kvm_vcpu *vcpu,
-			unsigned long cr2,
-			u16 error_code,
-			int emulation_type)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+			    unsigned long cr2,
+			    int emulation_type,
+			    void *insn,
+			    int insn_len)
 {
 	int r;
 	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
@@ -4323,10 +4387,10 @@
 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
 		init_emulate_ctxt(vcpu);
 		vcpu->arch.emulate_ctxt.interruptibility = 0;
-		vcpu->arch.emulate_ctxt.exception = -1;
+		vcpu->arch.emulate_ctxt.have_exception = false;
 		vcpu->arch.emulate_ctxt.perm_ok = false;
 
-		r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+		r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
 		if (r == X86EMUL_PROPAGATE_FAULT)
 			goto done;
 
@@ -4389,7 +4453,7 @@
 	}
 
 done:
-	if (vcpu->arch.emulate_ctxt.exception >= 0) {
+	if (vcpu->arch.emulate_ctxt.have_exception) {
 		inject_emulated_exception(vcpu);
 		r = EMULATE_DONE;
 	} else if (vcpu->arch.pio.count) {
@@ -4413,7 +4477,7 @@
 
 	return r;
 }
-EXPORT_SYMBOL_GPL(emulate_instruction);
+EXPORT_SYMBOL_GPL(x86_emulate_instruction);
 
 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
 {
@@ -4427,7 +4491,7 @@
 
 static void tsc_bad(void *info)
 {
-	__get_cpu_var(cpu_tsc_khz) = 0;
+	__this_cpu_write(cpu_tsc_khz, 0);
 }
 
 static void tsc_khz_changed(void *data)
@@ -4441,7 +4505,7 @@
 		khz = cpufreq_quick_get(raw_smp_processor_id());
 	if (!khz)
 		khz = tsc_khz;
-	__get_cpu_var(cpu_tsc_khz) = khz;
+	__this_cpu_write(cpu_tsc_khz, khz);
 }
 
 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -4653,7 +4717,6 @@
 
 	kvm_x86_ops = ops;
 	kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-	kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
 			PT_DIRTY_MASK, PT64_NX_MASK, 0);
 
@@ -5116,6 +5179,12 @@
 			vcpu->fpu_active = 0;
 			kvm_x86_ops->fpu_deactivate(vcpu);
 		}
+		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+			/* Page is swapped out. Do synthetic halt */
+			vcpu->arch.apf.halted = true;
+			r = 1;
+			goto out;
+		}
 	}
 
 	r = kvm_mmu_reload(vcpu);
@@ -5244,7 +5313,8 @@
 
 	r = 1;
 	while (r > 0) {
-		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		    !vcpu->arch.apf.halted)
 			r = vcpu_enter_guest(vcpu);
 		else {
 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@ -5257,6 +5327,7 @@
 					vcpu->arch.mp_state =
 						KVM_MP_STATE_RUNNABLE;
 				case KVM_MP_STATE_RUNNABLE:
+					vcpu->arch.apf.halted = false;
 					break;
 				case KVM_MP_STATE_SIPI_RECEIVED:
 				default:
@@ -5278,6 +5349,9 @@
 			vcpu->run->exit_reason = KVM_EXIT_INTR;
 			++vcpu->stat.request_irq_exits;
 		}
+
+		kvm_check_async_pf_completion(vcpu);
+
 		if (signal_pending(current)) {
 			r = -EINTR;
 			vcpu->run->exit_reason = KVM_EXIT_INTR;
@@ -5302,6 +5376,9 @@
 	int r;
 	sigset_t sigsaved;
 
+	if (!tsk_used_math(current) && init_fpu(current))
+		return -ENOMEM;
+
 	if (vcpu->sigset_active)
 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -5313,8 +5390,12 @@
 	}
 
 	/* re-sync apic's tpr */
-	if (!irqchip_in_kernel(vcpu->kvm))
-		kvm_set_cr8(vcpu, kvm_run->cr8);
+	if (!irqchip_in_kernel(vcpu->kvm)) {
+		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
+			r = -EINVAL;
+			goto out;
+		}
+	}
 
 	if (vcpu->arch.pio.count || vcpu->mmio_needed) {
 		if (vcpu->mmio_needed) {
@@ -5323,7 +5404,7 @@
 			vcpu->mmio_needed = 0;
 		}
 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-		r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
+		r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 		if (r != EMULATE_DONE) {
 			r = 0;
@@ -5436,7 +5517,7 @@
 
 	sregs->cr0 = kvm_read_cr0(vcpu);
 	sregs->cr2 = vcpu->arch.cr2;
-	sregs->cr3 = vcpu->arch.cr3;
+	sregs->cr3 = kvm_read_cr3(vcpu);
 	sregs->cr4 = kvm_read_cr4(vcpu);
 	sregs->cr8 = kvm_get_cr8(vcpu);
 	sregs->efer = vcpu->arch.efer;
@@ -5504,8 +5585,9 @@
 	kvm_x86_ops->set_gdt(vcpu, &dt);
 
 	vcpu->arch.cr2 = sregs->cr2;
-	mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
 	vcpu->arch.cr3 = sregs->cr3;
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 
 	kvm_set_cr8(vcpu, sregs->cr8);
 
@@ -5522,7 +5604,7 @@
 	if (sregs->cr4 & X86_CR4_OSXSAVE)
 		update_cpuid(vcpu);
 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
 		mmu_reset_needed = 1;
 	}
 
@@ -5773,6 +5855,8 @@
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+	vcpu->arch.apf.msr_val = 0;
+
 	vcpu_load(vcpu);
 	kvm_mmu_unload(vcpu);
 	vcpu_put(vcpu);
@@ -5792,6 +5876,11 @@
 	vcpu->arch.dr7 = DR7_FIXED_1;
 
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	vcpu->arch.apf.msr_val = 0;
+
+	kvm_clear_async_pf_completion_queue(vcpu);
+	kvm_async_pf_hash_reset(vcpu);
+	vcpu->arch.apf.halted = false;
 
 	return kvm_x86_ops->vcpu_reset(vcpu);
 }
@@ -5881,6 +5970,8 @@
 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
 		goto fail_free_mce_banks;
 
+	kvm_async_pf_hash_reset(vcpu);
+
 	return 0;
 fail_free_mce_banks:
 	kfree(vcpu->arch.mce_banks);
@@ -5906,13 +5997,8 @@
 	free_page((unsigned long)vcpu->arch.pio_data);
 }
 
-struct  kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-
-	if (!kvm)
-		return ERR_PTR(-ENOMEM);
-
 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
@@ -5921,7 +6007,7 @@
 
 	spin_lock_init(&kvm->arch.tsc_write_lock);
 
-	return kvm;
+	return 0;
 }
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -5939,8 +6025,10 @@
 	/*
 	 * Unpin any mmu pages first.
 	 */
-	kvm_for_each_vcpu(i, vcpu, kvm)
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		kvm_clear_async_pf_completion_queue(vcpu);
 		kvm_unload_vcpu_mmu(vcpu);
+	}
 	kvm_for_each_vcpu(i, vcpu, kvm)
 		kvm_arch_vcpu_free(vcpu);
 
@@ -5964,13 +6052,10 @@
 	kfree(kvm->arch.vpic);
 	kfree(kvm->arch.vioapic);
 	kvm_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
 	if (kvm->arch.apic_access_page)
 		put_page(kvm->arch.apic_access_page);
 	if (kvm->arch.ept_identity_pagetable)
 		put_page(kvm->arch.ept_identity_pagetable);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -6051,7 +6136,9 @@
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		!vcpu->arch.apf.halted)
+		|| !list_empty_careful(&vcpu->async_pf.done)
 		|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
 		|| vcpu->arch.nmi_pending ||
 		(kvm_arch_interrupt_allowed(vcpu) &&
@@ -6110,6 +6197,147 @@
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
 
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+{
+	int r;
+
+	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
+	      is_error_page(work->page))
+		return;
+
+	r = kvm_mmu_reload(vcpu);
+	if (unlikely(r))
+		return;
+
+	if (!vcpu->arch.mmu.direct_map &&
+	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+		return;
+
+	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+}
+
+static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
+{
+	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
+}
+
+static inline u32 kvm_async_pf_next_probe(u32 key)
+{
+	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
+}
+
+static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	u32 key = kvm_async_pf_hash_fn(gfn);
+
+	while (vcpu->arch.apf.gfns[key] != ~0)
+		key = kvm_async_pf_next_probe(key);
+
+	vcpu->arch.apf.gfns[key] = gfn;
+}
+
+static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	int i;
+	u32 key = kvm_async_pf_hash_fn(gfn);
+
+	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
+		     (vcpu->arch.apf.gfns[key] != gfn &&
+		      vcpu->arch.apf.gfns[key] != ~0); i++)
+		key = kvm_async_pf_next_probe(key);
+
+	return key;
+}
+
+bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
+}
+
+static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	u32 i, j, k;
+
+	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
+	while (true) {
+		vcpu->arch.apf.gfns[i] = ~0;
+		do {
+			j = kvm_async_pf_next_probe(j);
+			if (vcpu->arch.apf.gfns[j] == ~0)
+				return;
+			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
+			/*
+			 * k lies cyclically in ]i,j]
+			 * |    i.k.j |
+			 * |....j i.k.| or  |.k..j i...|
+			 */
+		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
+		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
+		i = j;
+	}
+}
+
+static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+{
+
+	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+				      sizeof(val));
+}
+
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+				     struct kvm_async_pf *work)
+{
+	struct x86_exception fault;
+
+	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
+	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+
+	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
+	    (vcpu->arch.apf.send_user_only &&
+	     kvm_x86_ops->get_cpl(vcpu) == 0))
+		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+		fault.vector = PF_VECTOR;
+		fault.error_code_valid = true;
+		fault.error_code = 0;
+		fault.nested_page_fault = false;
+		fault.address = work->arch.token;
+		kvm_inject_page_fault(vcpu, &fault);
+	}
+}
+
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+				 struct kvm_async_pf *work)
+{
+	struct x86_exception fault;
+
+	trace_kvm_async_pf_ready(work->arch.token, work->gva);
+	if (is_error_page(work->page))
+		work->arch.token = ~0; /* broadcast wakeup */
+	else
+		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+
+	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+		fault.vector = PF_VECTOR;
+		fault.error_code_valid = true;
+		fault.error_code = 0;
+		fault.nested_page_fault = false;
+		fault.address = work->arch.token;
+		kvm_inject_page_fault(vcpu, &fault);
+	}
+	vcpu->arch.apf.halted = false;
+}
+
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+{
+	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+		return true;
+	else
+		return !kvm_event_needs_reinjection(vcpu) &&
+			kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index ff485d3..fc45ba8 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -121,7 +121,7 @@
 	asm("mull %%edx"
 		:"=d" (xloops), "=&a" (d0)
 		:"1" (xloops), "0"
-		(cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
+		(this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
 
 	__delay(++xloops);
 }
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index 51fae9c..f21962c 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -27,6 +27,7 @@
 #include <asm/amd_nb.h>
 
 static struct bootnode __initdata nodes[8];
+static unsigned char __initdata nodeids[8];
 static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
 
 static __init int find_northbridge(void)
@@ -66,20 +67,6 @@
 	if (smp_found_config)
 		early_get_smp_config();
 #endif
-	early_init_lapic_mapping();
-}
-
-int __init amd_get_nodes(struct bootnode *physnodes)
-{
-	int i;
-	int ret = 0;
-
-	for_each_node_mask(i, nodes_parsed) {
-		physnodes[ret].start = nodes[i].start;
-		physnodes[ret].end = nodes[i].end;
-		ret++;
-	}
-	return ret;
 }
 
 int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
@@ -114,7 +101,7 @@
 		base = read_pci_config(0, nb, 1, 0x40 + i*8);
 		limit = read_pci_config(0, nb, 1, 0x44 + i*8);
 
-		nodeid = limit & 7;
+		nodeids[i] = nodeid = limit & 7;
 		if ((base & 3) == 0) {
 			if (i < numnodes)
 				pr_info("Skipping disabled node %d\n", i);
@@ -194,6 +181,76 @@
 	return 0;
 }
 
+#ifdef CONFIG_NUMA_EMU
+static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
+	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+};
+
+void __init amd_get_nodes(struct bootnode *physnodes)
+{
+	int i;
+
+	for_each_node_mask(i, nodes_parsed) {
+		physnodes[i].start = nodes[i].start;
+		physnodes[i].end = nodes[i].end;
+	}
+}
+
+static int __init find_node_by_addr(unsigned long addr)
+{
+	int ret = NUMA_NO_NODE;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		if (addr >= nodes[i].start && addr < nodes[i].end) {
+			ret = i;
+			break;
+		}
+	return ret;
+}
+
+/*
+ * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
+ * setup to represent the physical topology but reflect the emulated
+ * environment.  For each emulated node, the real node which it appears on is
+ * found and a fake pxm to nid mapping is created which mirrors the actual
+ * locality.  node_distance() then represents the correct distances between
+ * emulated nodes by using the fake acpi mappings to pxms.
+ */
+void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
+{
+	unsigned int bits;
+	unsigned int cores;
+	unsigned int apicid_base = 0;
+	int i;
+
+	bits = boot_cpu_data.x86_coreid_bits;
+	cores = 1 << bits;
+	early_get_boot_cpu_id();
+	if (boot_cpu_physical_apicid > 0)
+		apicid_base = boot_cpu_physical_apicid;
+
+	for (i = 0; i < nr_nodes; i++) {
+		int index;
+		int nid;
+		int j;
+
+		nid = find_node_by_addr(nodes[i].start);
+		if (nid == NUMA_NO_NODE)
+			continue;
+
+		index = nodeids[nid] << bits;
+		if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
+			for (j = apicid_base; j < cores + apicid_base; j++)
+				fake_apicid_to_node[index + j] = i;
+#ifdef CONFIG_ACPI_NUMA
+		__acpi_map_pxm_to_node(nid, i);
+#endif
+	}
+	memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
+}
+#endif /* CONFIG_NUMA_EMU */
+
 int __init amd_scan_nodes(void)
 {
 	unsigned int bits;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 738e659..dbe34b9 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -8,6 +8,7 @@
 #include <linux/mm.h>
 #include <linux/vmstat.h>
 #include <linux/highmem.h>
+#include <linux/swap.h>
 
 #include <asm/pgtable.h>
 
@@ -89,6 +90,7 @@
 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 		page = pte_page(pte);
 		get_page(page);
+		SetPageReferenced(page);
 		pages[*nr] = page;
 		(*nr)++;
 
@@ -103,6 +105,17 @@
 	VM_BUG_ON(page != compound_head(page));
 	VM_BUG_ON(page_count(page) == 0);
 	atomic_add(nr, &page->_count);
+	SetPageReferenced(page);
+}
+
+static inline void get_huge_page_tail(struct page *page)
+{
+	/*
+	 * __split_huge_page_refcount() cannot run
+	 * from under us.
+	 */
+	VM_BUG_ON(atomic_read(&page->_count) < 0);
+	atomic_inc(&page->_count);
 }
 
 static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
@@ -128,6 +141,8 @@
 	do {
 		VM_BUG_ON(compound_head(page) != head);
 		pages[*nr] = page;
+		if (PageTail(page))
+			get_huge_page_tail(page);
 		(*nr)++;
 		page++;
 		refs++;
@@ -148,7 +163,18 @@
 		pmd_t pmd = *pmdp;
 
 		next = pmd_addr_end(addr, end);
-		if (pmd_none(pmd))
+		/*
+		 * The pmd_trans_splitting() check below explains why
+		 * pmdp_splitting_flush has to flush the tlb, to stop
+		 * this gup-fast code from running while we set the
+		 * splitting bit in the pmd. Returning zero will take
+		 * the slow path that will call wait_split_huge_page()
+		 * if the pmd is still in splitting state. gup-fast
+		 * can't because it has irq disabled and
+		 * wait_split_huge_page() would never return as the
+		 * tlb flush IPI wouldn't run.
+		 */
+		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 			return 0;
 		if (unlikely(pmd_large(pmd))) {
 			if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f89b5bb..c821074 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -45,6 +45,7 @@
 #include <asm/bugs.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include <asm/olpc_ofw.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
 #include <asm/paravirt.h>
@@ -715,6 +716,7 @@
 	/*
 	 * NOTE: at this point the bootmem allocator is fully available.
 	 */
+	olpc_dt_build_devicetree();
 	sparse_init();
 	zone_sizes_init();
 }
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 7762a51..1e72102 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -260,30 +260,30 @@
 #ifdef CONFIG_NUMA_EMU
 /* Numa emulation */
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __initdata;
+static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
 static char *cmdline __initdata;
 
 static int __init setup_physnodes(unsigned long start, unsigned long end,
 					int acpi, int amd)
 {
-	int nr_nodes = 0;
 	int ret = 0;
 	int i;
 
+	memset(physnodes, 0, sizeof(physnodes));
 #ifdef CONFIG_ACPI_NUMA
 	if (acpi)
-		nr_nodes = acpi_get_nodes(physnodes);
+		acpi_get_nodes(physnodes, start, end);
 #endif
 #ifdef CONFIG_AMD_NUMA
 	if (amd)
-		nr_nodes = amd_get_nodes(physnodes);
+		amd_get_nodes(physnodes);
 #endif
 	/*
 	 * Basic sanity checking on the physical node map: there may be errors
 	 * if the SRAT or AMD code incorrectly reported the topology or the mem=
 	 * kernel parameter is used.
 	 */
-	for (i = 0; i < nr_nodes; i++) {
+	for (i = 0; i < MAX_NUMNODES; i++) {
 		if (physnodes[i].start == physnodes[i].end)
 			continue;
 		if (physnodes[i].start > end) {
@@ -298,17 +298,6 @@
 			physnodes[i].start = start;
 		if (physnodes[i].end > end)
 			physnodes[i].end = end;
-	}
-
-	/*
-	 * Remove all nodes that have no memory or were truncated because of the
-	 * limited address range.
-	 */
-	for (i = 0; i < nr_nodes; i++) {
-		if (physnodes[i].start == physnodes[i].end)
-			continue;
-		physnodes[ret].start = physnodes[i].start;
-		physnodes[ret].end = physnodes[i].end;
 		ret++;
 	}
 
@@ -324,6 +313,24 @@
 	return ret;
 }
 
+static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
+{
+	int i;
+
+	BUG_ON(acpi && amd);
+#ifdef CONFIG_ACPI_NUMA
+	if (acpi)
+		acpi_fake_nodes(nodes, nr_nodes);
+#endif
+#ifdef CONFIG_AMD_NUMA
+	if (amd)
+		amd_fake_nodes(nodes, nr_nodes);
+#endif
+	if (!acpi && !amd)
+		for (i = 0; i < nr_cpu_ids; i++)
+			numa_set_node(i, 0);
+}
+
 /*
  * Setups up nid to range from addr to addr + size.  If the end
  * boundary is greater than max_addr, then max_addr is used instead.
@@ -352,8 +359,7 @@
  * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
  * to max_addr.  The return value is the number of nodes allocated.
  */
-static int __init split_nodes_interleave(u64 addr, u64 max_addr,
-						int nr_phys_nodes, int nr_nodes)
+static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 {
 	nodemask_t physnode_mask = NODE_MASK_NONE;
 	u64 size;
@@ -384,7 +390,7 @@
 		return -1;
 	}
 
-	for (i = 0; i < nr_phys_nodes; i++)
+	for (i = 0; i < MAX_NUMNODES; i++)
 		if (physnodes[i].start != physnodes[i].end)
 			node_set(i, physnode_mask);
 
@@ -553,11 +559,9 @@
 {
 	u64 addr = start_pfn << PAGE_SHIFT;
 	u64 max_addr = last_pfn << PAGE_SHIFT;
-	int num_phys_nodes;
 	int num_nodes;
 	int i;
 
-	num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
 	/*
 	 * If the numa=fake command-line contains a 'M' or 'G', it represents
 	 * the fixed node size.  Otherwise, if it is just a single number N,
@@ -572,7 +576,7 @@
 		unsigned long n;
 
 		n = simple_strtoul(cmdline, NULL, 0);
-		num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n);
+		num_nodes = split_nodes_interleave(addr, max_addr, n);
 	}
 
 	if (num_nodes < 0)
@@ -595,7 +599,8 @@
 						nodes[i].end >> PAGE_SHIFT);
 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 	}
-	acpi_fake_nodes(nodes, num_nodes);
+	setup_physnodes(addr, max_addr, acpi, amd);
+	fake_physnodes(acpi, amd, num_nodes);
 	numa_init_array();
 	return 0;
 }
@@ -610,8 +615,12 @@
 	nodes_clear(node_online_map);
 
 #ifdef CONFIG_NUMA_EMU
+	setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+			acpi, amd);
 	if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
 		return;
+	setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+			acpi, amd);
 	nodes_clear(node_possible_map);
 	nodes_clear(node_online_map);
 #endif
@@ -767,6 +776,7 @@
 
 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
 
+#ifndef CONFIG_NUMA_EMU
 void __cpuinit numa_add_cpu(int cpu)
 {
 	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
@@ -776,34 +786,115 @@
 {
 	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 }
+#else
+void __cpuinit numa_add_cpu(int cpu)
+{
+	unsigned long addr;
+	u16 apicid;
+	int physnid;
+	int nid = NUMA_NO_NODE;
+
+	apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
+	if (apicid != BAD_APICID)
+		nid = apicid_to_node[apicid];
+	if (nid == NUMA_NO_NODE)
+		nid = early_cpu_to_node(cpu);
+	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
+
+	/*
+	 * Use the starting address of the emulated node to find which physical
+	 * node it is allocated on.
+	 */
+	addr = node_start_pfn(nid) << PAGE_SHIFT;
+	for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
+		if (addr >= physnodes[physnid].start &&
+		    addr < physnodes[physnid].end)
+			break;
+
+	/*
+	 * Map the cpu to each emulated node that is allocated on the physical
+	 * node of the cpu's apic id.
+	 */
+	for_each_online_node(nid) {
+		addr = node_start_pfn(nid) << PAGE_SHIFT;
+		if (addr >= physnodes[physnid].start &&
+		    addr < physnodes[physnid].end)
+			cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+	}
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+	int i;
+
+	for_each_online_node(i)
+		cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
+}
+#endif /* !CONFIG_NUMA_EMU */
 
 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
-
-/*
- * --------- debug versions of the numa functions ---------
- */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
 {
 	int node = early_cpu_to_node(cpu);
 	struct cpumask *mask;
 	char buf[64];
 
 	mask = node_to_cpumask_map[node];
-	if (mask == NULL) {
-		printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
+	if (!mask) {
+		pr_err("node_to_cpumask_map[%i] NULL\n", node);
 		dump_stack();
-		return;
+		return NULL;
 	}
 
+	cpulist_scnprintf(buf, sizeof(buf), mask);
+	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+		enable ? "numa_add_cpu" : "numa_remove_cpu",
+		cpu, node, buf);
+	return mask;
+}
+
+/*
+ * --------- debug versions of the numa functions ---------
+ */
+#ifndef CONFIG_NUMA_EMU
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+	struct cpumask *mask;
+
+	mask = debug_cpumask_set_cpu(cpu, enable);
+	if (!mask)
+		return;
+
 	if (enable)
 		cpumask_set_cpu(cpu, mask);
 	else
 		cpumask_clear_cpu(cpu, mask);
-
-	cpulist_scnprintf(buf, sizeof(buf), mask);
-	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-		enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
 }
+#else
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+	int node = early_cpu_to_node(cpu);
+	struct cpumask *mask;
+	int i;
+
+	for_each_online_node(i) {
+		unsigned long addr;
+
+		addr = node_start_pfn(i) << PAGE_SHIFT;
+		if (addr < physnodes[node].start ||
+					addr >= physnodes[node].end)
+			continue;
+		mask = debug_cpumask_set_cpu(cpu, enable);
+		if (!mask)
+			return;
+
+		if (enable)
+			cpumask_set_cpu(cpu, mask);
+		else
+			cpumask_clear_cpu(cpu, mask);
+	}
+}
+#endif /* CONFIG_NUMA_EMU */
 
 void __cpuinit numa_add_cpu(int cpu)
 {
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8be8c7d..500242d 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -320,6 +320,25 @@
 	return changed;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+			  unsigned long address, pmd_t *pmdp,
+			  pmd_t entry, int dirty)
+{
+	int changed = !pmd_same(*pmdp, entry);
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+	if (changed && dirty) {
+		*pmdp = entry;
+		pmd_update_defer(vma->vm_mm, address, pmdp);
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	}
+
+	return changed;
+}
+#endif
+
 int ptep_test_and_clear_young(struct vm_area_struct *vma,
 			      unsigned long addr, pte_t *ptep)
 {
@@ -335,6 +354,23 @@
 	return ret;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+			      unsigned long addr, pmd_t *pmdp)
+{
+	int ret = 0;
+
+	if (pmd_young(*pmdp))
+		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+					 (unsigned long *)pmdp);
+
+	if (ret)
+		pmd_update(vma->vm_mm, addr, pmdp);
+
+	return ret;
+}
+#endif
+
 int ptep_clear_flush_young(struct vm_area_struct *vma,
 			   unsigned long address, pte_t *ptep)
 {
@@ -347,6 +383,36 @@
 	return young;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pmd_t *pmdp)
+{
+	int young;
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+	young = pmdp_test_and_clear_young(vma, address, pmdp);
+	if (young)
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+	return young;
+}
+
+void pmdp_splitting_flush(struct vm_area_struct *vma,
+			  unsigned long address, pmd_t *pmdp)
+{
+	int set;
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
+				(unsigned long *)pmdp);
+	if (set) {
+		pmd_update(vma->vm_mm, address, pmdp);
+		/* need tlb flush only to serialize against gup-fast */
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	}
+}
+#endif
+
 /**
  * reserve_top_address - reserves a hole in the top of kernel address space
  * @reserve - size of hole to reserve
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 171a0aa..603d285 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -349,18 +349,19 @@
 
 void __init acpi_numa_arch_fixup(void) {}
 
-int __init acpi_get_nodes(struct bootnode *physnodes)
+#ifdef CONFIG_NUMA_EMU
+void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+				unsigned long end)
 {
 	int i;
-	int ret = 0;
 
 	for_each_node_mask(i, nodes_parsed) {
-		physnodes[ret].start = nodes[i].start;
-		physnodes[ret].end = nodes[i].end;
-		ret++;
+		cutoff_node(i, start, end);
+		physnodes[i].start = nodes[i].start;
+		physnodes[i].end = nodes[i].end;
 	}
-	return ret;
 }
+#endif /* CONFIG_NUMA_EMU */
 
 /* Use the information discovered above to actually set up the nodes. */
 int __init acpi_scan_nodes(unsigned long start, unsigned long end)
@@ -505,8 +506,6 @@
 {
 	int i, j;
 
-	printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
-			 "topology.\n");
 	for (i = 0; i < num_nodes; i++) {
 		int nid, pxm;
 
@@ -526,6 +525,17 @@
 			    fake_apicid_to_node[j] == NUMA_NO_NODE)
 				fake_apicid_to_node[j] = i;
 	}
+
+	/*
+	 * If there are apicid-to-node mappings for physical nodes that do not
+	 * have a corresponding emulated node, it should default to a guaranteed
+	 * value.
+	 */
+	for (i = 0; i < MAX_LOCAL_APIC; i++)
+		if (apicid_to_node[i] != NUMA_NO_NODE &&
+		    fake_apicid_to_node[i] == NUMA_NO_NODE)
+			fake_apicid_to_node[i] = 0;
+
 	for (i = 0; i < num_nodes; i++)
 		__acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
 	memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 358c8b9..e2b7b0c 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -65,7 +65,6 @@
 
 	switch (val) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		if (ctr_running)
 			model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
 		else if (!nmi_enabled)
@@ -143,7 +142,7 @@
 
 inline int op_x86_phys_to_virt(int phys)
 {
-	return __get_cpu_var(switch_index) + phys;
+	return __this_cpu_read(switch_index) + phys;
 }
 
 inline int op_x86_virt_to_phys(int virt)
@@ -361,7 +360,7 @@
 static struct notifier_block profile_exceptions_nb = {
 	.notifier_call = profile_exceptions_notify,
 	.next = NULL,
-	.priority = 2
+	.priority = NMI_LOCAL_LOW_PRIOR,
 };
 
 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 0636dd9..720bf5a 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -38,7 +38,7 @@
 static struct notifier_block profile_timer_exceptions_nb = {
 	.notifier_call = profile_timer_exceptions_notify,
 	.next = NULL,
-	.priority = 0
+	.priority = NMI_LOW_PRIOR,
 };
 
 static int timer_start(void)
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index d769cda..94b7450 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -95,8 +95,8 @@
 		 * counter width:
 		 */
 		if (!(eax.split.version_id == 0 &&
-			current_cpu_data.x86 == 6 &&
-				current_cpu_data.x86_model == 15)) {
+			__this_cpu_read(cpu_info.x86) == 6 &&
+				__this_cpu_read(cpu_info.x86_model) == 15)) {
 
 			if (counter_width < eax.split.bit_width)
 				counter_width = eax.split.bit_width;
@@ -235,8 +235,8 @@
 	eax.full = cpuid_eax(0xa);
 
 	/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
-	if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
-		current_cpu_data.x86_model == 15) {
+	if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
+		__this_cpu_read(cpu_info.x86_model) == 15) {
 		eax.split.version_id = 2;
 		eax.split.num_counters = 2;
 		eax.split.bit_width = 40;
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index fc1e8fe..e27dffb 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -4,6 +4,7 @@
 #include <linux/cpu.h>
 #include <linux/range.h>
 
+#include <asm/amd_nb.h>
 #include <asm/pci_x86.h>
 
 #include <asm/pci-direct.h>
@@ -378,6 +379,34 @@
 	.notifier_call	= amd_cpu_notify,
 };
 
+static void __init pci_enable_pci_io_ecs(void)
+{
+#ifdef CONFIG_AMD_NB
+	unsigned int i, n;
+
+	for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) {
+		u8 bus = amd_nb_bus_dev_ranges[i].bus;
+		u8 slot = amd_nb_bus_dev_ranges[i].dev_base;
+		u8 limit = amd_nb_bus_dev_ranges[i].dev_limit;
+
+		for (; slot < limit; ++slot) {
+			u32 val = read_pci_config(bus, slot, 3, 0);
+
+			if (!early_is_amd_nb(val))
+				continue;
+
+			val = read_pci_config(bus, slot, 3, 0x8c);
+			if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) {
+				val |= ENABLE_CF8_EXT_CFG >> 32;
+				write_pci_config(bus, slot, 3, 0x8c, val);
+			}
+			++n;
+		}
+	}
+	pr_info("Extended Config Space enabled on %u nodes\n", n);
+#endif
+}
+
 static int __init pci_io_ecs_init(void)
 {
 	int cpu;
@@ -386,6 +415,10 @@
         if (boot_cpu_data.x86 < 0x10)
 		return 0;
 
+	/* Try the PCI method first. */
+	if (early_pci_allowed())
+		pci_enable_pci_io_ecs();
+
 	register_cpu_notifier(&amd_cpu_notifier);
 	for_each_online_cpu(cpu)
 		amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index 0846a5b..ab8269b 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -9,6 +9,7 @@
  * option) any later version.
  */
 
+#include <linux/acpi.h>
 #include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/pci.h>
@@ -25,12 +26,14 @@
 	u8 fbus, lbus;
 	int i;
 
+#ifdef CONFIG_ACPI
 	/*
-	 * The x86_pci_root_bus_res_quirks() function already refuses to use
-	 * this information if ACPI _CRS was used. Therefore, we don't bother
-	 * checking if ACPI is enabled, and just generate the information
-	 * for both the ACPI _CRS and no ACPI cases.
+	 * We should get host bridge information from ACPI unless the BIOS
+	 * doesn't support it.
 	 */
+	if (acpi_os_get_root_pointer())
+		return;
+#endif
 
 	info = &pci_root_info[pci_root_num];
 	pci_root_num++;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index f7c8a39..5fe7502 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -22,6 +22,7 @@
 
 unsigned int pci_early_dump_regs;
 static int pci_bf_sort;
+static int smbios_type_b1_flag;
 int pci_routeirq;
 int noioapicquirk;
 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -185,6 +186,39 @@
 	return 0;
 }
 
+static void __devinit read_dmi_type_b1(const struct dmi_header *dm,
+				       void *private_data)
+{
+	u8 *d = (u8 *)dm + 4;
+
+	if (dm->type != 0xB1)
+		return;
+	switch (((*(u32 *)d) >> 9) & 0x03) {
+	case 0x00:
+		printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n");
+		break;
+	case 0x01: /* set pci=bfsort */
+		smbios_type_b1_flag = 1;
+		break;
+	case 0x02: /* do not set pci=bfsort */
+		smbios_type_b1_flag = 2;
+		break;
+	default:
+		break;
+	}
+}
+
+static int __devinit find_sort_method(const struct dmi_system_id *d)
+{
+	dmi_walk(read_dmi_type_b1, NULL);
+
+	if (smbios_type_b1_flag == 1) {
+		set_bf_sort(d);
+		return 0;
+	}
+	return -1;
+}
+
 /*
  * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
  */
@@ -213,6 +247,13 @@
 	},
 #endif		/* __i386__ */
 	{
+		.callback = find_sort_method,
+		.ident = "Dell System",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+		},
+	},
+	{
 		.callback = set_bf_sort,
 		.ident = "Dell PowerEdge 1950",
 		.matches = {
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 9f9bfb7..87e6c83 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -589,7 +589,8 @@
 	case PCI_DEVICE_ID_INTEL_ICH10_1:
 	case PCI_DEVICE_ID_INTEL_ICH10_2:
 	case PCI_DEVICE_ID_INTEL_ICH10_3:
-	case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
+	case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0:
+	case PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1:
 		r->name = "PIIX/ICH";
 		r->get = pirq_piix_get;
 		r->set = pirq_piix_set;
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 65df603..25bfdbb 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -103,7 +103,7 @@
 static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
 
 static u32 *pclk_spi0;
-/* Always contains an accessable address, start with 0 */
+/* Always contains an accessible address, start with 0 */
 static struct dw_spi_reg *pspi;
 
 static struct kmsg_dumper dw_dumper;
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index fee0b49..ea6529e 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -71,32 +71,6 @@
 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
 int sfi_mrtc_num;
 
-static inline void assign_to_mp_irq(struct mpc_intsrc *m,
-				    struct mpc_intsrc *mp_irq)
-{
-	memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
-}
-
-static inline int mp_irq_cmp(struct mpc_intsrc *mp_irq,
-				struct mpc_intsrc *m)
-{
-	return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
-}
-
-static void save_mp_irq(struct mpc_intsrc *m)
-{
-	int i;
-
-	for (i = 0; i < mp_irq_entries; i++) {
-		if (!mp_irq_cmp(&mp_irqs[i], m))
-			return;
-	}
-
-	assign_to_mp_irq(m, &mp_irqs[mp_irq_entries]);
-	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-		panic("Max # of irq sources exceeded!!\n");
-}
-
 /* parse all the mtimer info to a static mtimer array */
 static int __init sfi_parse_mtmr(struct sfi_table_header *table)
 {
@@ -130,7 +104,7 @@
 			mp_irq.srcbusirq = pentry->irq;	/* IRQ */
 			mp_irq.dstapic = MP_APIC_ALL;
 			mp_irq.dstirq = pentry->irq;
-			save_mp_irq(&mp_irq);
+			mp_save_irq(&mp_irq);
 	}
 
 	return 0;
@@ -200,7 +174,7 @@
 		mp_irq.srcbusirq = pentry->irq;	/* IRQ */
 		mp_irq.dstapic = MP_APIC_ALL;
 		mp_irq.dstirq = pentry->irq;
-		save_mp_irq(&mp_irq);
+		mp_save_irq(&mp_irq);
 	}
 	return 0;
 }
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile
index c31b8fc..e797428 100644
--- a/arch/x86/platform/olpc/Makefile
+++ b/arch/x86/platform/olpc/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_OLPC)		+= olpc.o
 obj-$(CONFIG_OLPC_XO1)		+= olpc-xo1.o
 obj-$(CONFIG_OLPC_OPENFIRMWARE)	+= olpc_ofw.o
+obj-$(CONFIG_OLPC_OPENFIRMWARE_DT)	+= olpc_dt.o
diff --git a/arch/x86/platform/olpc/olpc-xo1.c b/arch/x86/platform/olpc/olpc-xo1.c
index f5442c0..1277756 100644
--- a/arch/x86/platform/olpc/olpc-xo1.c
+++ b/arch/x86/platform/olpc/olpc-xo1.c
@@ -1,6 +1,7 @@
 /*
  * Support for features of the OLPC XO-1 laptop
  *
+ * Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
  * Copyright (C) 2010 One Laptop per Child
  * Copyright (C) 2006 Red Hat, Inc.
  * Copyright (C) 2006 Advanced Micro Devices, Inc.
@@ -12,8 +13,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
 
@@ -22,9 +21,6 @@
 
 #define DRV_NAME "olpc-xo1"
 
-#define PMS_BAR		4
-#define ACPI_BAR	5
-
 /* PMC registers (PMS block) */
 #define PM_SCLK		0x10
 #define PM_IN_SLPCTL	0x20
@@ -57,65 +53,67 @@
 	outl(0x00002000, acpi_base + PM1_CNT);
 }
 
-/* Read the base addresses from the PCI BAR info */
-static int __devinit setup_bases(struct pci_dev *pdev)
-{
-	int r;
-
-	r = pci_enable_device_io(pdev);
-	if (r) {
-		dev_err(&pdev->dev, "can't enable device IO\n");
-		return r;
-	}
-
-	r = pci_request_region(pdev, ACPI_BAR, DRV_NAME);
-	if (r) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", ACPI_BAR);
-		return r;
-	}
-
-	r = pci_request_region(pdev, PMS_BAR, DRV_NAME);
-	if (r) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", PMS_BAR);
-		pci_release_region(pdev, ACPI_BAR);
-		return r;
-	}
-
-	acpi_base = pci_resource_start(pdev, ACPI_BAR);
-	pms_base = pci_resource_start(pdev, PMS_BAR);
-
-	return 0;
-}
-
 static int __devinit olpc_xo1_probe(struct platform_device *pdev)
 {
-	struct pci_dev *pcidev;
-	int r;
+	struct resource *res;
 
-	pcidev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
-				NULL);
-	if (!pdev)
+	/* don't run on non-XOs */
+	if (!machine_is_olpc())
 		return -ENODEV;
 
-	r = setup_bases(pcidev);
-	if (r)
-		return r;
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
+		return -EIO;
+	}
 
-	pm_power_off = xo1_power_off;
+	if (!request_region(res->start, resource_size(res), DRV_NAME)) {
+		dev_err(&pdev->dev, "can't request region\n");
+		return -EIO;
+	}
 
-	printk(KERN_INFO "OLPC XO-1 support registered\n");
+	if (strcmp(pdev->name, "cs5535-pms") == 0)
+		pms_base = res->start;
+	else if (strcmp(pdev->name, "cs5535-acpi") == 0)
+		acpi_base = res->start;
+
+	/* If we have both addresses, we can override the poweroff hook */
+	if (pms_base && acpi_base) {
+		pm_power_off = xo1_power_off;
+		printk(KERN_INFO "OLPC XO-1 support registered\n");
+	}
+
 	return 0;
 }
 
 static int __devexit olpc_xo1_remove(struct platform_device *pdev)
 {
+	struct resource *r;
+
+	r = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	release_region(r->start, resource_size(r));
+
+	if (strcmp(pdev->name, "cs5535-pms") == 0)
+		pms_base = 0;
+	else if (strcmp(pdev->name, "cs5535-acpi") == 0)
+		acpi_base = 0;
+
 	pm_power_off = NULL;
 	return 0;
 }
 
-static struct platform_driver olpc_xo1_driver = {
+static struct platform_driver cs5535_pms_drv = {
 	.driver = {
-		.name = DRV_NAME,
+		.name = "cs5535-pms",
+		.owner = THIS_MODULE,
+	},
+	.probe = olpc_xo1_probe,
+	.remove = __devexit_p(olpc_xo1_remove),
+};
+
+static struct platform_driver cs5535_acpi_drv = {
+	.driver = {
+		.name = "cs5535-acpi",
 		.owner = THIS_MODULE,
 	},
 	.probe = olpc_xo1_probe,
@@ -124,12 +122,23 @@
 
 static int __init olpc_xo1_init(void)
 {
-	return platform_driver_register(&olpc_xo1_driver);
+	int r;
+
+	r = platform_driver_register(&cs5535_pms_drv);
+	if (r)
+		return r;
+
+	r = platform_driver_register(&cs5535_acpi_drv);
+	if (r)
+		platform_driver_unregister(&cs5535_pms_drv);
+
+	return r;
 }
 
 static void __exit olpc_xo1_exit(void)
 {
-	platform_driver_unregister(&olpc_xo1_driver);
+	platform_driver_unregister(&cs5535_acpi_drv);
+	platform_driver_unregister(&cs5535_pms_drv);
 }
 
 MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
new file mode 100644
index 0000000..dab8746
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -0,0 +1,183 @@
+/*
+ * OLPC-specific OFW device tree support code.
+ *
+ * Paul Mackerras	August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ *    {engebret|bergner}@us.ibm.com
+ *
+ *  Adapted for sparc by David S. Miller davem@davemloft.net
+ *  Adapted for x86/OLPC by Andres Salomon <dilinger@queued.net>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/of.h>
+#include <linux/of_pdt.h>
+#include <asm/olpc_ofw.h>
+
+static phandle __init olpc_dt_getsibling(phandle node)
+{
+	const void *args[] = { (void *)node };
+	void *res[] = { &node };
+
+	if ((s32)node == -1)
+		return 0;
+
+	if (olpc_ofw("peer", args, res) || (s32)node == -1)
+		return 0;
+
+	return node;
+}
+
+static phandle __init olpc_dt_getchild(phandle node)
+{
+	const void *args[] = { (void *)node };
+	void *res[] = { &node };
+
+	if ((s32)node == -1)
+		return 0;
+
+	if (olpc_ofw("child", args, res) || (s32)node == -1) {
+		pr_err("PROM: %s: fetching child failed!\n", __func__);
+		return 0;
+	}
+
+	return node;
+}
+
+static int __init olpc_dt_getproplen(phandle node, const char *prop)
+{
+	const void *args[] = { (void *)node, prop };
+	int len;
+	void *res[] = { &len };
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("getproplen", args, res)) {
+		pr_err("PROM: %s: getproplen failed!\n", __func__);
+		return -1;
+	}
+
+	return len;
+}
+
+static int __init olpc_dt_getproperty(phandle node, const char *prop,
+		char *buf, int bufsize)
+{
+	int plen;
+
+	plen = olpc_dt_getproplen(node, prop);
+	if (plen > bufsize || plen < 1) {
+		return -1;
+	} else {
+		const void *args[] = { (void *)node, prop, buf, (void *)plen };
+		void *res[] = { &plen };
+
+		if (olpc_ofw("getprop", args, res)) {
+			pr_err("PROM: %s: getprop failed!\n", __func__);
+			return -1;
+		}
+	}
+
+	return plen;
+}
+
+static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf)
+{
+	const void *args[] = { (void *)node, prev, buf };
+	int success;
+	void *res[] = { &success };
+
+	buf[0] = '\0';
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("nextprop", args, res) || success != 1)
+		return -1;
+
+	return 0;
+}
+
+static int __init olpc_dt_pkg2path(phandle node, char *buf,
+		const int buflen, int *len)
+{
+	const void *args[] = { (void *)node, buf, (void *)buflen };
+	void *res[] = { len };
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("package-to-path", args, res) || *len < 1)
+		return -1;
+
+	return 0;
+}
+
+static unsigned int prom_early_allocated __initdata;
+
+void * __init prom_early_alloc(unsigned long size)
+{
+	static u8 *mem;
+	static size_t free_mem;
+	void *res;
+
+	if (free_mem < size) {
+		const size_t chunk_size = max(PAGE_SIZE, size);
+
+		/*
+		 * To mimimize the number of allocations, grab at least
+		 * PAGE_SIZE of memory (that's an arbitrary choice that's
+		 * fast enough on the platforms we care about while minimizing
+		 * wasted bootmem) and hand off chunks of it to callers.
+		 */
+		res = alloc_bootmem(chunk_size);
+		if (!res)
+			return NULL;
+		prom_early_allocated += chunk_size;
+		memset(res, 0, chunk_size);
+		free_mem = chunk_size;
+		mem = res;
+	}
+
+	/* allocate from the local cache */
+	free_mem -= size;
+	res = mem;
+	mem += size;
+	return res;
+}
+
+static struct of_pdt_ops prom_olpc_ops __initdata = {
+	.nextprop = olpc_dt_nextprop,
+	.getproplen = olpc_dt_getproplen,
+	.getproperty = olpc_dt_getproperty,
+	.getchild = olpc_dt_getchild,
+	.getsibling = olpc_dt_getsibling,
+	.pkg2path = olpc_dt_pkg2path,
+};
+
+void __init olpc_dt_build_devicetree(void)
+{
+	phandle root;
+
+	if (!olpc_ofw_is_installed())
+		return;
+
+	root = olpc_dt_getsibling(0);
+	if (!root) {
+		pr_err("PROM: unable to get root node from OFW!\n");
+		return;
+	}
+	of_pdt_build_devicetree(root, &prom_olpc_ops);
+
+	pr_info("PROM DT: Built device tree with %u bytes of memory.\n",
+			prom_early_allocated);
+}
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index 7873204..e7604f6 100644
--- a/arch/x86/platform/olpc/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -110,3 +110,8 @@
 			(unsigned long)olpc_ofw_cif, (-start) >> 20);
 	reserve_top_address(-start);
 }
+
+bool __init olpc_ofw_is_installed(void)
+{
+	return olpc_ofw_cif != NULL;
+}
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c
index ca54875..7785b72 100644
--- a/arch/x86/platform/sfi/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
@@ -34,17 +34,6 @@
 #ifdef CONFIG_X86_LOCAL_APIC
 static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
 
-static void __init mp_sfi_register_lapic_address(unsigned long address)
-{
-	mp_lapic_addr = address;
-
-	set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
-	if (boot_cpu_physical_apicid == -1U)
-		boot_cpu_physical_apicid = read_apic_id();
-
-	pr_info("Boot CPU = %d\n", boot_cpu_physical_apicid);
-}
-
 /* All CPUs enumerated by SFI must be present and enabled */
 static void __cpuinit mp_sfi_register_lapic(u8 id)
 {
@@ -110,7 +99,7 @@
 int __init sfi_platform_init(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
-	mp_sfi_register_lapic_address(sfi_lapic_addr);
+	register_lapic_address(sfi_lapic_addr);
 	sfi_table_parse(SFI_SIG_CPUS, NULL, NULL, sfi_parse_cpus);
 #endif
 #ifdef CONFIG_X86_IO_APIC
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7793851..17c565d 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -12,7 +12,8 @@
 
 obj-y		:= enlighten.o setup.o multicalls.o mmu.o irq.o \
 			time.o xen-asm.o xen-asm_$(BITS).o \
-			grant-table.o suspend.o platform-pci-unplug.o
+			grant-table.o suspend.o platform-pci-unplug.o \
+			p2m.o
 
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 44dcad4..7e8d3bc 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -574,8 +574,8 @@
 
 	preempt_disable();
 
-	start = __get_cpu_var(idt_desc).address;
-	end = start + __get_cpu_var(idt_desc).size + 1;
+	start = __this_cpu_read(idt_desc.address);
+	end = start + __this_cpu_read(idt_desc.size) + 1;
 
 	xen_mc_flush();
 
@@ -1174,6 +1174,15 @@
 
 	xen_smp_init();
 
+#ifdef CONFIG_ACPI_NUMA
+	/*
+	 * The pages we from Xen are not related to machine pages, so
+	 * any NUMA information the kernel tries to get from ACPI will
+	 * be meaningless.  Prevent it from trying.
+	 */
+	acpi_numa = -1;
+#endif
+
 	pgd = (pgd_t *)xen_start_info->pt_base;
 
 	if (!xen_initial_domain())
@@ -1256,25 +1265,6 @@
 #endif
 }
 
-static uint32_t xen_cpuid_base(void)
-{
-	uint32_t base, eax, ebx, ecx, edx;
-	char signature[13];
-
-	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
-		cpuid(base, &eax, &ebx, &ecx, &edx);
-		*(uint32_t *)(signature + 0) = ebx;
-		*(uint32_t *)(signature + 4) = ecx;
-		*(uint32_t *)(signature + 8) = edx;
-		signature[12] = 0;
-
-		if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2))
-			return base;
-	}
-
-	return 0;
-}
-
 static int init_hvm_pv_info(int *major, int *minor)
 {
 	uint32_t eax, ebx, ecx, edx, pages, msr, base;
@@ -1384,6 +1374,18 @@
 	return true;
 }
 
+bool xen_hvm_need_lapic(void)
+{
+	if (xen_pv_domain())
+		return false;
+	if (!xen_hvm_domain())
+		return false;
+	if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
+		return false;
+	return true;
+}
+EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
 const __refconst struct hypervisor_x86 x86_hyper_xen_hvm = {
 	.name			= "Xen HVM",
 	.detect			= xen_hvm_platform,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 44924e5..5e92b61 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -173,371 +173,6 @@
  */
 #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
 
-/*
- * Xen leaves the responsibility for maintaining p2m mappings to the
- * guests themselves, but it must also access and update the p2m array
- * during suspend/resume when all the pages are reallocated.
- *
- * The p2m table is logically a flat array, but we implement it as a
- * three-level tree to allow the address space to be sparse.
- *
- *                               Xen
- *                                |
- *     p2m_top              p2m_top_mfn
- *       /  \                   /   \
- * p2m_mid p2m_mid	p2m_mid_mfn p2m_mid_mfn
- *    / \      / \         /           /
- *  p2m p2m p2m p2m p2m p2m p2m ...
- *
- * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
- *
- * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
- * maximum representable pseudo-physical address space is:
- *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
- *
- * P2M_PER_PAGE depends on the architecture, as a mfn is always
- * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
- * 512 and 1024 entries respectively. 
- */
-
-unsigned long xen_max_p2m_pfn __read_mostly;
-
-#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
-#define P2M_MID_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long *))
-#define P2M_TOP_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long **))
-
-#define MAX_P2M_PFN		(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
-
-/* Placeholders for holes in the address space */
-static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
-
-static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
-static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
-
-RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
-
-static inline unsigned p2m_top_index(unsigned long pfn)
-{
-	BUG_ON(pfn >= MAX_P2M_PFN);
-	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
-}
-
-static inline unsigned p2m_mid_index(unsigned long pfn)
-{
-	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
-}
-
-static inline unsigned p2m_index(unsigned long pfn)
-{
-	return pfn % P2M_PER_PAGE;
-}
-
-static void p2m_top_init(unsigned long ***top)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = p2m_mid_missing;
-}
-
-static void p2m_top_mfn_init(unsigned long *top)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
-}
-
-static void p2m_top_mfn_p_init(unsigned long **top)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
-		top[i] = p2m_mid_missing_mfn;
-}
-
-static void p2m_mid_init(unsigned long **mid)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		mid[i] = p2m_missing;
-}
-
-static void p2m_mid_mfn_init(unsigned long *mid)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		mid[i] = virt_to_mfn(p2m_missing);
-}
-
-static void p2m_init(unsigned long *p2m)
-{
-	unsigned i;
-
-	for (i = 0; i < P2M_MID_PER_PAGE; i++)
-		p2m[i] = INVALID_P2M_ENTRY;
-}
-
-/*
- * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
- *
- * This is called both at boot time, and after resuming from suspend:
- * - At boot time we're called very early, and must use extend_brk()
- *   to allocate memory.
- *
- * - After resume we're called from within stop_machine, but the mfn
- *   tree should alreay be completely allocated.
- */
-void xen_build_mfn_list_list(void)
-{
-	unsigned long pfn;
-
-	/* Pre-initialize p2m_top_mfn to be completely missing */
-	if (p2m_top_mfn == NULL) {
-		p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
-		p2m_mid_mfn_init(p2m_mid_missing_mfn);
-
-		p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
-		p2m_top_mfn_p_init(p2m_top_mfn_p);
-
-		p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
-		p2m_top_mfn_init(p2m_top_mfn);
-	} else {
-		/* Reinitialise, mfn's all change after migration */
-		p2m_mid_mfn_init(p2m_mid_missing_mfn);
-	}
-
-	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
-		unsigned topidx = p2m_top_index(pfn);
-		unsigned mididx = p2m_mid_index(pfn);
-		unsigned long **mid;
-		unsigned long *mid_mfn_p;
-
-		mid = p2m_top[topidx];
-		mid_mfn_p = p2m_top_mfn_p[topidx];
-
-		/* Don't bother allocating any mfn mid levels if
-		 * they're just missing, just update the stored mfn,
-		 * since all could have changed over a migrate.
-		 */
-		if (mid == p2m_mid_missing) {
-			BUG_ON(mididx);
-			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
-			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
-			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
-			continue;
-		}
-
-		if (mid_mfn_p == p2m_mid_missing_mfn) {
-			/*
-			 * XXX boot-time only!  We should never find
-			 * missing parts of the mfn tree after
-			 * runtime.  extend_brk() will BUG if we call
-			 * it too late.
-			 */
-			mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
-			p2m_mid_mfn_init(mid_mfn_p);
-
-			p2m_top_mfn_p[topidx] = mid_mfn_p;
-		}
-
-		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
-		mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
-	}
-}
-
-void xen_setup_mfn_list_list(void)
-{
-	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
-
-	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-		virt_to_mfn(p2m_top_mfn);
-	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
-}
-
-/* Set up p2m_top to point to the domain-builder provided p2m pages */
-void __init xen_build_dynamic_phys_to_machine(void)
-{
-	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
-	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
-	unsigned long pfn;
-
-	xen_max_p2m_pfn = max_pfn;
-
-	p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_init(p2m_missing);
-
-	p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_mid_init(p2m_mid_missing);
-
-	p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
-	p2m_top_init(p2m_top);
-
-	/*
-	 * The domain builder gives us a pre-constructed p2m array in
-	 * mfn_list for all the pages initially given to us, so we just
-	 * need to graft that into our tree structure.
-	 */
-	for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
-		unsigned topidx = p2m_top_index(pfn);
-		unsigned mididx = p2m_mid_index(pfn);
-
-		if (p2m_top[topidx] == p2m_mid_missing) {
-			unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-			p2m_mid_init(mid);
-
-			p2m_top[topidx] = mid;
-		}
-
-		p2m_top[topidx][mididx] = &mfn_list[pfn];
-	}
-}
-
-unsigned long get_phys_to_machine(unsigned long pfn)
-{
-	unsigned topidx, mididx, idx;
-
-	if (unlikely(pfn >= MAX_P2M_PFN))
-		return INVALID_P2M_ENTRY;
-
-	topidx = p2m_top_index(pfn);
-	mididx = p2m_mid_index(pfn);
-	idx = p2m_index(pfn);
-
-	return p2m_top[topidx][mididx][idx];
-}
-EXPORT_SYMBOL_GPL(get_phys_to_machine);
-
-static void *alloc_p2m_page(void)
-{
-	return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
-}
-
-static void free_p2m_page(void *p)
-{
-	free_page((unsigned long)p);
-}
-
-/* 
- * Fully allocate the p2m structure for a given pfn.  We need to check
- * that both the top and mid levels are allocated, and make sure the
- * parallel mfn tree is kept in sync.  We may race with other cpus, so
- * the new pages are installed with cmpxchg; if we lose the race then
- * simply free the page we allocated and use the one that's there.
- */
-static bool alloc_p2m(unsigned long pfn)
-{
-	unsigned topidx, mididx;
-	unsigned long ***top_p, **mid;
-	unsigned long *top_mfn_p, *mid_mfn;
-
-	topidx = p2m_top_index(pfn);
-	mididx = p2m_mid_index(pfn);
-
-	top_p = &p2m_top[topidx];
-	mid = *top_p;
-
-	if (mid == p2m_mid_missing) {
-		/* Mid level is missing, allocate a new one */
-		mid = alloc_p2m_page();
-		if (!mid)
-			return false;
-
-		p2m_mid_init(mid);
-
-		if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
-			free_p2m_page(mid);
-	}
-
-	top_mfn_p = &p2m_top_mfn[topidx];
-	mid_mfn = p2m_top_mfn_p[topidx];
-
-	BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
-
-	if (mid_mfn == p2m_mid_missing_mfn) {
-		/* Separately check the mid mfn level */
-		unsigned long missing_mfn;
-		unsigned long mid_mfn_mfn;
-
-		mid_mfn = alloc_p2m_page();
-		if (!mid_mfn)
-			return false;
-
-		p2m_mid_mfn_init(mid_mfn);
-
-		missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
-		mid_mfn_mfn = virt_to_mfn(mid_mfn);
-		if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
-			free_p2m_page(mid_mfn);
-		else
-			p2m_top_mfn_p[topidx] = mid_mfn;
-	}
-
-	if (p2m_top[topidx][mididx] == p2m_missing) {
-		/* p2m leaf page is missing */
-		unsigned long *p2m;
-
-		p2m = alloc_p2m_page();
-		if (!p2m)
-			return false;
-
-		p2m_init(p2m);
-
-		if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
-			free_p2m_page(p2m);
-		else
-			mid_mfn[mididx] = virt_to_mfn(p2m);
-	}
-
-	return true;
-}
-
-/* Try to install p2m mapping; fail if intermediate bits missing */
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-	unsigned topidx, mididx, idx;
-
-	if (unlikely(pfn >= MAX_P2M_PFN)) {
-		BUG_ON(mfn != INVALID_P2M_ENTRY);
-		return true;
-	}
-
-	topidx = p2m_top_index(pfn);
-	mididx = p2m_mid_index(pfn);
-	idx = p2m_index(pfn);
-
-	if (p2m_top[topidx][mididx] == p2m_missing)
-		return mfn == INVALID_P2M_ENTRY;
-
-	p2m_top[topidx][mididx][idx] = mfn;
-
-	return true;
-}
-
-bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
-		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
-		return true;
-	}
-
-	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
-		if (!alloc_p2m(pfn))
-			return false;
-
-		if (!__set_phys_to_machine(pfn, mfn))
-			return false;
-	}
-
-	return true;
-}
-
 unsigned long arbitrary_virt_to_mfn(void *vaddr)
 {
 	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
@@ -566,6 +201,7 @@
 	offset = address & ~PAGE_MASK;
 	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
 }
+EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 
 void make_lowmem_page_readonly(void *vaddr)
 {
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index 9e565da..4ec8035 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -22,7 +22,7 @@
 	unsigned long flags;
 	/* need to disable interrupts until this entry is complete */
 	local_irq_save(flags);
-	__get_cpu_var(xen_mc_irq_flags) = flags;
+	__this_cpu_write(xen_mc_irq_flags, flags);
 }
 
 static inline struct multicall_space xen_mc_entry(size_t args)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
new file mode 100644
index 0000000..8f2251d
--- /dev/null
+++ b/arch/x86/xen/p2m.c
@@ -0,0 +1,510 @@
+/*
+ * Xen leaves the responsibility for maintaining p2m mappings to the
+ * guests themselves, but it must also access and update the p2m array
+ * during suspend/resume when all the pages are reallocated.
+ *
+ * The p2m table is logically a flat array, but we implement it as a
+ * three-level tree to allow the address space to be sparse.
+ *
+ *                               Xen
+ *                                |
+ *     p2m_top              p2m_top_mfn
+ *       /  \                   /   \
+ * p2m_mid p2m_mid	p2m_mid_mfn p2m_mid_mfn
+ *    / \      / \         /           /
+ *  p2m p2m p2m p2m p2m p2m p2m ...
+ *
+ * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
+ *
+ * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
+ * maximum representable pseudo-physical address space is:
+ *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
+ *
+ * P2M_PER_PAGE depends on the architecture, as a mfn is always
+ * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
+ * 512 and 1024 entries respectively. 
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/sched.h>
+
+#include <asm/cache.h>
+#include <asm/setup.h>
+
+#include <asm/xen/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include "xen-ops.h"
+
+static void __init m2p_override_init(void);
+
+unsigned long xen_max_p2m_pfn __read_mostly;
+
+#define P2M_PER_PAGE		(PAGE_SIZE / sizeof(unsigned long))
+#define P2M_MID_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long *))
+#define P2M_TOP_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long **))
+
+#define MAX_P2M_PFN		(P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
+
+/* Placeholders for holes in the address space */
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
+
+static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
+
+RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
+
+static inline unsigned p2m_top_index(unsigned long pfn)
+{
+	BUG_ON(pfn >= MAX_P2M_PFN);
+	return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
+}
+
+static inline unsigned p2m_mid_index(unsigned long pfn)
+{
+	return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
+}
+
+static inline unsigned p2m_index(unsigned long pfn)
+{
+	return pfn % P2M_PER_PAGE;
+}
+
+static void p2m_top_init(unsigned long ***top)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+		top[i] = p2m_mid_missing;
+}
+
+static void p2m_top_mfn_init(unsigned long *top)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+		top[i] = virt_to_mfn(p2m_mid_missing_mfn);
+}
+
+static void p2m_top_mfn_p_init(unsigned long **top)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_TOP_PER_PAGE; i++)
+		top[i] = p2m_mid_missing_mfn;
+}
+
+static void p2m_mid_init(unsigned long **mid)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_MID_PER_PAGE; i++)
+		mid[i] = p2m_missing;
+}
+
+static void p2m_mid_mfn_init(unsigned long *mid)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_MID_PER_PAGE; i++)
+		mid[i] = virt_to_mfn(p2m_missing);
+}
+
+static void p2m_init(unsigned long *p2m)
+{
+	unsigned i;
+
+	for (i = 0; i < P2M_MID_PER_PAGE; i++)
+		p2m[i] = INVALID_P2M_ENTRY;
+}
+
+/*
+ * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
+ *
+ * This is called both at boot time, and after resuming from suspend:
+ * - At boot time we're called very early, and must use extend_brk()
+ *   to allocate memory.
+ *
+ * - After resume we're called from within stop_machine, but the mfn
+ *   tree should alreay be completely allocated.
+ */
+void xen_build_mfn_list_list(void)
+{
+	unsigned long pfn;
+
+	/* Pre-initialize p2m_top_mfn to be completely missing */
+	if (p2m_top_mfn == NULL) {
+		p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+		p2m_mid_mfn_init(p2m_mid_missing_mfn);
+
+		p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+		p2m_top_mfn_p_init(p2m_top_mfn_p);
+
+		p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+		p2m_top_mfn_init(p2m_top_mfn);
+	} else {
+		/* Reinitialise, mfn's all change after migration */
+		p2m_mid_mfn_init(p2m_mid_missing_mfn);
+	}
+
+	for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
+		unsigned topidx = p2m_top_index(pfn);
+		unsigned mididx = p2m_mid_index(pfn);
+		unsigned long **mid;
+		unsigned long *mid_mfn_p;
+
+		mid = p2m_top[topidx];
+		mid_mfn_p = p2m_top_mfn_p[topidx];
+
+		/* Don't bother allocating any mfn mid levels if
+		 * they're just missing, just update the stored mfn,
+		 * since all could have changed over a migrate.
+		 */
+		if (mid == p2m_mid_missing) {
+			BUG_ON(mididx);
+			BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
+			p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
+			pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
+			continue;
+		}
+
+		if (mid_mfn_p == p2m_mid_missing_mfn) {
+			/*
+			 * XXX boot-time only!  We should never find
+			 * missing parts of the mfn tree after
+			 * runtime.  extend_brk() will BUG if we call
+			 * it too late.
+			 */
+			mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+			p2m_mid_mfn_init(mid_mfn_p);
+
+			p2m_top_mfn_p[topidx] = mid_mfn_p;
+		}
+
+		p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+		mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
+	}
+}
+
+void xen_setup_mfn_list_list(void)
+{
+	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
+	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
+		virt_to_mfn(p2m_top_mfn);
+	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+}
+
+/* Set up p2m_top to point to the domain-builder provided p2m pages */
+void __init xen_build_dynamic_phys_to_machine(void)
+{
+	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
+	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
+	unsigned long pfn;
+
+	xen_max_p2m_pfn = max_pfn;
+
+	p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_init(p2m_missing);
+
+	p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_mid_init(p2m_mid_missing);
+
+	p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+	p2m_top_init(p2m_top);
+
+	/*
+	 * The domain builder gives us a pre-constructed p2m array in
+	 * mfn_list for all the pages initially given to us, so we just
+	 * need to graft that into our tree structure.
+	 */
+	for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
+		unsigned topidx = p2m_top_index(pfn);
+		unsigned mididx = p2m_mid_index(pfn);
+
+		if (p2m_top[topidx] == p2m_mid_missing) {
+			unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+			p2m_mid_init(mid);
+
+			p2m_top[topidx] = mid;
+		}
+
+		p2m_top[topidx][mididx] = &mfn_list[pfn];
+	}
+
+	m2p_override_init();
+}
+
+unsigned long get_phys_to_machine(unsigned long pfn)
+{
+	unsigned topidx, mididx, idx;
+
+	if (unlikely(pfn >= MAX_P2M_PFN))
+		return INVALID_P2M_ENTRY;
+
+	topidx = p2m_top_index(pfn);
+	mididx = p2m_mid_index(pfn);
+	idx = p2m_index(pfn);
+
+	return p2m_top[topidx][mididx][idx];
+}
+EXPORT_SYMBOL_GPL(get_phys_to_machine);
+
+static void *alloc_p2m_page(void)
+{
+	return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+static void free_p2m_page(void *p)
+{
+	free_page((unsigned long)p);
+}
+
+/* 
+ * Fully allocate the p2m structure for a given pfn.  We need to check
+ * that both the top and mid levels are allocated, and make sure the
+ * parallel mfn tree is kept in sync.  We may race with other cpus, so
+ * the new pages are installed with cmpxchg; if we lose the race then
+ * simply free the page we allocated and use the one that's there.
+ */
+static bool alloc_p2m(unsigned long pfn)
+{
+	unsigned topidx, mididx;
+	unsigned long ***top_p, **mid;
+	unsigned long *top_mfn_p, *mid_mfn;
+
+	topidx = p2m_top_index(pfn);
+	mididx = p2m_mid_index(pfn);
+
+	top_p = &p2m_top[topidx];
+	mid = *top_p;
+
+	if (mid == p2m_mid_missing) {
+		/* Mid level is missing, allocate a new one */
+		mid = alloc_p2m_page();
+		if (!mid)
+			return false;
+
+		p2m_mid_init(mid);
+
+		if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
+			free_p2m_page(mid);
+	}
+
+	top_mfn_p = &p2m_top_mfn[topidx];
+	mid_mfn = p2m_top_mfn_p[topidx];
+
+	BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
+
+	if (mid_mfn == p2m_mid_missing_mfn) {
+		/* Separately check the mid mfn level */
+		unsigned long missing_mfn;
+		unsigned long mid_mfn_mfn;
+
+		mid_mfn = alloc_p2m_page();
+		if (!mid_mfn)
+			return false;
+
+		p2m_mid_mfn_init(mid_mfn);
+
+		missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
+		mid_mfn_mfn = virt_to_mfn(mid_mfn);
+		if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
+			free_p2m_page(mid_mfn);
+		else
+			p2m_top_mfn_p[topidx] = mid_mfn;
+	}
+
+	if (p2m_top[topidx][mididx] == p2m_missing) {
+		/* p2m leaf page is missing */
+		unsigned long *p2m;
+
+		p2m = alloc_p2m_page();
+		if (!p2m)
+			return false;
+
+		p2m_init(p2m);
+
+		if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing)
+			free_p2m_page(p2m);
+		else
+			mid_mfn[mididx] = virt_to_mfn(p2m);
+	}
+
+	return true;
+}
+
+/* Try to install p2m mapping; fail if intermediate bits missing */
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	unsigned topidx, mididx, idx;
+
+	if (unlikely(pfn >= MAX_P2M_PFN)) {
+		BUG_ON(mfn != INVALID_P2M_ENTRY);
+		return true;
+	}
+
+	topidx = p2m_top_index(pfn);
+	mididx = p2m_mid_index(pfn);
+	idx = p2m_index(pfn);
+
+	if (p2m_top[topidx][mididx] == p2m_missing)
+		return mfn == INVALID_P2M_ENTRY;
+
+	p2m_top[topidx][mididx][idx] = mfn;
+
+	return true;
+}
+
+bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
+		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+		return true;
+	}
+
+	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
+		if (!alloc_p2m(pfn))
+			return false;
+
+		if (!__set_phys_to_machine(pfn, mfn))
+			return false;
+	}
+
+	return true;
+}
+
+#define M2P_OVERRIDE_HASH_SHIFT	10
+#define M2P_OVERRIDE_HASH	(1 << M2P_OVERRIDE_HASH_SHIFT)
+
+static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
+static DEFINE_SPINLOCK(m2p_override_lock);
+
+static void __init m2p_override_init(void)
+{
+	unsigned i;
+
+	m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
+				   sizeof(unsigned long));
+
+	for (i = 0; i < M2P_OVERRIDE_HASH; i++)
+		INIT_LIST_HEAD(&m2p_overrides[i]);
+}
+
+static unsigned long mfn_hash(unsigned long mfn)
+{
+	return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
+}
+
+/* Add an MFN override for a particular page */
+int m2p_add_override(unsigned long mfn, struct page *page)
+{
+	unsigned long flags;
+	unsigned long pfn;
+	unsigned long address;
+	unsigned level;
+	pte_t *ptep = NULL;
+
+	pfn = page_to_pfn(page);
+	if (!PageHighMem(page)) {
+		address = (unsigned long)__va(pfn << PAGE_SHIFT);
+		ptep = lookup_address(address, &level);
+
+		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
+					"m2p_add_override: pfn %lx not mapped", pfn))
+			return -EINVAL;
+	}
+
+	page->private = mfn;
+	page->index = pfn_to_mfn(pfn);
+
+	__set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+	if (!PageHighMem(page))
+		/* Just zap old mapping for now */
+		pte_clear(&init_mm, address, ptep);
+
+	spin_lock_irqsave(&m2p_override_lock, flags);
+	list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
+	spin_unlock_irqrestore(&m2p_override_lock, flags);
+
+	return 0;
+}
+
+int m2p_remove_override(struct page *page)
+{
+	unsigned long flags;
+	unsigned long mfn;
+	unsigned long pfn;
+	unsigned long address;
+	unsigned level;
+	pte_t *ptep = NULL;
+
+	pfn = page_to_pfn(page);
+	mfn = get_phys_to_machine(pfn);
+	if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
+		return -EINVAL;
+
+	if (!PageHighMem(page)) {
+		address = (unsigned long)__va(pfn << PAGE_SHIFT);
+		ptep = lookup_address(address, &level);
+
+		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
+					"m2p_remove_override: pfn %lx not mapped", pfn))
+			return -EINVAL;
+	}
+
+	spin_lock_irqsave(&m2p_override_lock, flags);
+	list_del(&page->lru);
+	spin_unlock_irqrestore(&m2p_override_lock, flags);
+	__set_phys_to_machine(pfn, page->index);
+
+	if (!PageHighMem(page))
+		set_pte_at(&init_mm, address, ptep,
+				pfn_pte(pfn, PAGE_KERNEL));
+		/* No tlb flush necessary because the caller already
+		 * left the pte unmapped. */
+
+	return 0;
+}
+
+struct page *m2p_find_override(unsigned long mfn)
+{
+	unsigned long flags;
+	struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
+	struct page *p, *ret;
+
+	ret = NULL;
+
+	spin_lock_irqsave(&m2p_override_lock, flags);
+
+	list_for_each_entry(p, bucket, lru) {
+		if (p->private == mfn) {
+			ret = p;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&m2p_override_lock, flags);
+
+	return ret;
+}
+
+unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
+{
+	struct page *p = m2p_find_override(mfn);
+	unsigned long ret = pfn;
+
+	if (p)
+		ret = page_to_pfn(p);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 23e061b..cc9b1e1 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -159,8 +159,8 @@
 {
 	struct xen_spinlock *prev;
 
-	prev = __get_cpu_var(lock_spinners);
-	__get_cpu_var(lock_spinners) = xl;
+	prev = __this_cpu_read(lock_spinners);
+	__this_cpu_write(lock_spinners, xl);
 
 	wmb();			/* set lock of interest before count */
 
@@ -179,14 +179,14 @@
 	asm(LOCK_PREFIX " decw %0"
 	    : "+m" (xl->spinners) : : "memory");
 	wmb();			/* decrement count before restoring lock */
-	__get_cpu_var(lock_spinners) = prev;
+	__this_cpu_write(lock_spinners, prev);
 }
 
 static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
 {
 	struct xen_spinlock *xl = (struct xen_spinlock *)lock;
 	struct xen_spinlock *prev;
-	int irq = __get_cpu_var(lock_kicker_irq);
+	int irq = __this_cpu_read(lock_kicker_irq);
 	int ret;
 	u64 start;
 
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 5da5e53..067759e 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -135,24 +135,24 @@
 
 	/* Add the appropriate number of ticks of stolen time,
 	   including any left-overs from last time. */
-	stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
+	stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
 
 	if (stolen < 0)
 		stolen = 0;
 
 	ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
-	__get_cpu_var(xen_residual_stolen) = stolen;
+	__this_cpu_write(xen_residual_stolen, stolen);
 	account_steal_ticks(ticks);
 
 	/* Add the appropriate number of ticks of blocked time,
 	   including any left-overs from last time. */
-	blocked += __get_cpu_var(xen_residual_blocked);
+	blocked += __this_cpu_read(xen_residual_blocked);
 
 	if (blocked < 0)
 		blocked = 0;
 
 	ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
-	__get_cpu_var(xen_residual_blocked) = blocked;
+	__this_cpu_write(xen_residual_blocked, blocked);
 	account_idle_ticks(ticks);
 }
 
diff --git a/arch/xtensa/include/asm/ioctls.h b/arch/xtensa/include/asm/ioctls.h
index ab18000..ccf1800 100644
--- a/arch/xtensa/include/asm/ioctls.h
+++ b/arch/xtensa/include/asm/ioctls.h
@@ -98,6 +98,7 @@
 #define TCSETSF2	_IOW('T', 45, struct termios2)
 #define TIOCGPTN	_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T',0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
 #define TIOCSIG		_IOW('T',0x36, int)  /* Generate signal on Pty slave */
 
 #define TIOCSERCONFIG	_IO('T', 83)
diff --git a/arch/xtensa/include/asm/mman.h b/arch/xtensa/include/asm/mman.h
index fca4db4..3078901 100644
--- a/arch/xtensa/include/asm/mman.h
+++ b/arch/xtensa/include/asm/mman.h
@@ -83,6 +83,9 @@
 #define MADV_MERGEABLE   12		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b1febd0..455768a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1452,10 +1452,6 @@
 		goto done;
 	}
 
-	/* Currently we do not support hierarchy deeper than two level (0,1) */
-	if (parent != cgroup->top_cgroup)
-		return ERR_PTR(-EPERM);
-
 	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 	if (!blkcg)
 		return ERR_PTR(-ENOMEM);
diff --git a/block/blk-core.c b/block/blk-core.c
index 4ce953f..2f4002f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,7 +33,7 @@
 
 #include "blk.h"
 
-EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
 
@@ -64,13 +64,27 @@
 		return;
 
 	cpu = part_stat_lock();
-	part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
 
-	if (!new_io)
+	if (!new_io) {
+		part = rq->part;
 		part_stat_inc(cpu, part, merges[rw]);
-	else {
+	} else {
+		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
+		if (!hd_struct_try_get(part)) {
+			/*
+			 * The partition is already being removed,
+			 * the request will be accounted on the disk only
+			 *
+			 * We take a reference on disk->part0 although that
+			 * partition will never be deleted, so we can treat
+			 * it as any other partition.
+			 */
+			part = &rq->rq_disk->part0;
+			hd_struct_get(part);
+		}
 		part_round_stats(cpu, part);
 		part_inc_in_flight(part, rw);
+		rq->part = part;
 	}
 
 	part_stat_unlock();
@@ -128,6 +142,7 @@
 	rq->ref_count = 1;
 	rq->start_time = jiffies;
 	set_start_time_ns(rq);
+	rq->part = NULL;
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -1329,9 +1344,9 @@
 		bio->bi_sector += p->start_sect;
 		bio->bi_bdev = bdev->bd_contains;
 
-		trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
-				    bdev->bd_dev,
-				    bio->bi_sector - p->start_sect);
+		trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
+				      bdev->bd_dev,
+				      bio->bi_sector - p->start_sect);
 	}
 }
 
@@ -1500,7 +1515,7 @@
 			goto end_io;
 
 		if (old_sector != -1)
-			trace_block_remap(q, bio, old_dev, old_sector);
+			trace_block_bio_remap(q, bio, old_dev, old_sector);
 
 		old_sector = bio->bi_sector;
 		old_dev = bio->bi_bdev->bd_dev;
@@ -1776,7 +1791,7 @@
 		int cpu;
 
 		cpu = part_stat_lock();
-		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+		part = req->part;
 		part_stat_add(cpu, part, sectors[rw], bytes >> 9);
 		part_stat_unlock();
 	}
@@ -1796,13 +1811,14 @@
 		int cpu;
 
 		cpu = part_stat_lock();
-		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+		part = req->part;
 
 		part_stat_inc(cpu, part, ios[rw]);
 		part_stat_add(cpu, part, ticks[rw], duration);
 		part_round_stats(cpu, part);
 		part_dec_in_flight(part, rw);
 
+		hd_struct_put(part);
 		part_stat_unlock();
 	}
 }
@@ -2606,7 +2622,9 @@
 	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
 			sizeof(((struct request *)0)->cmd_flags));
 
-	kblockd_workqueue = create_workqueue("kblockd");
+	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
+	kblockd_workqueue = alloc_workqueue("kblockd",
+					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 	if (!kblockd_workqueue)
 		panic("Failed to create kblockd\n");
 
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 3c7a339..b791022 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -64,7 +64,7 @@
 	rcu_read_unlock();
 }
 
-/* Called by the exitting task */
+/* Called by the exiting task */
 void exit_io_context(struct task_struct *task)
 {
 	struct io_context *ioc;
@@ -74,10 +74,9 @@
 	task->io_context = NULL;
 	task_unlock(task);
 
-	if (atomic_dec_and_test(&ioc->nr_tasks)) {
+	if (atomic_dec_and_test(&ioc->nr_tasks))
 		cfq_exit(ioc);
 
-	}
 	put_io_context(ioc);
 }
 
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 74bc4a7..ea85e20 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,11 +351,12 @@
 		int cpu;
 
 		cpu = part_stat_lock();
-		part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
+		part = req->part;
 
 		part_round_stats(cpu, part);
 		part_dec_in_flight(part, rq_data_dir(req));
 
+		hd_struct_put(part);
 		part_stat_unlock();
 	}
 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4cd59b0..501ffdf 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -87,7 +87,6 @@
 	unsigned count;
 	unsigned total_weight;
 	u64 min_vdisktime;
-	struct rb_node *active;
 };
 #define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
 			.count = 0, .min_vdisktime = 0, }
@@ -97,7 +96,7 @@
  */
 struct cfq_queue {
 	/* reference count */
-	atomic_t ref;
+	int ref;
 	/* various state flags, see below */
 	unsigned int flags;
 	/* parent cfq_data */
@@ -180,7 +179,6 @@
 	/* group service_tree key */
 	u64 vdisktime;
 	unsigned int weight;
-	bool on_st;
 
 	/* number of cfqq currently on this group */
 	int nr_cfqq;
@@ -209,7 +207,7 @@
 	struct blkio_group blkg;
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
 	struct hlist_node cfqd_node;
-	atomic_t ref;
+	int ref;
 #endif
 	/* number of requests that are on the dispatch list or inside driver */
 	int dispatched;
@@ -563,11 +561,6 @@
 	u64 vdisktime = st->min_vdisktime;
 	struct cfq_group *cfqg;
 
-	if (st->active) {
-		cfqg = rb_entry_cfqg(st->active);
-		vdisktime = cfqg->vdisktime;
-	}
-
 	if (st->left) {
 		cfqg = rb_entry_cfqg(st->left);
 		vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
@@ -605,8 +598,8 @@
 	return cfq_target_latency * cfqg->weight / st->total_weight;
 }
 
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+static inline unsigned
+cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
 	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
 	if (cfqd->cfq_latency) {
@@ -632,6 +625,14 @@
 				    low_slice);
 		}
 	}
+	return slice;
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	unsigned slice = cfq_scaled_group_slice(cfqd, cfqq);
+
 	cfqq->slice_start = jiffies;
 	cfqq->slice_end = jiffies + slice;
 	cfqq->allocated_slice = slice;
@@ -646,11 +647,11 @@
 static inline bool cfq_slice_used(struct cfq_queue *cfqq)
 {
 	if (cfq_cfqq_slice_new(cfqq))
-		return 0;
+		return false;
 	if (time_before(jiffies, cfqq->slice_end))
-		return 0;
+		return false;
 
-	return 1;
+	return true;
 }
 
 /*
@@ -869,7 +870,7 @@
 	struct rb_node *n;
 
 	cfqg->nr_cfqq++;
-	if (cfqg->on_st)
+	if (!RB_EMPTY_NODE(&cfqg->rb_node))
 		return;
 
 	/*
@@ -885,7 +886,6 @@
 		cfqg->vdisktime = st->min_vdisktime;
 
 	__cfq_group_service_tree_add(st, cfqg);
-	cfqg->on_st = true;
 	st->total_weight += cfqg->weight;
 }
 
@@ -894,9 +894,6 @@
 {
 	struct cfq_rb_root *st = &cfqd->grp_service_tree;
 
-	if (st->active == &cfqg->rb_node)
-		st->active = NULL;
-
 	BUG_ON(cfqg->nr_cfqq < 1);
 	cfqg->nr_cfqq--;
 
@@ -905,7 +902,6 @@
 		return;
 
 	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
-	cfqg->on_st = false;
 	st->total_weight -= cfqg->weight;
 	if (!RB_EMPTY_NODE(&cfqg->rb_node))
 		cfq_rb_erase(&cfqg->rb_node, st);
@@ -1026,11 +1022,11 @@
 	 * elevator which will be dropped by either elevator exit
 	 * or cgroup deletion path depending on who is exiting first.
 	 */
-	atomic_set(&cfqg->ref, 1);
+	cfqg->ref = 1;
 
 	/*
 	 * Add group onto cgroup list. It might happen that bdi->dev is
-	 * not initiliazed yet. Initialize this new group without major
+	 * not initialized yet. Initialize this new group without major
 	 * and minor info and this info will be filled in once a new thread
 	 * comes for IO. See code above.
 	 */
@@ -1071,7 +1067,7 @@
 
 static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
 {
-	atomic_inc(&cfqg->ref);
+	cfqg->ref++;
 	return cfqg;
 }
 
@@ -1083,7 +1079,7 @@
 
 	cfqq->cfqg = cfqg;
 	/* cfqq reference on cfqg */
-	atomic_inc(&cfqq->cfqg->ref);
+	cfqq->cfqg->ref++;
 }
 
 static void cfq_put_cfqg(struct cfq_group *cfqg)
@@ -1091,11 +1087,12 @@
 	struct cfq_rb_root *st;
 	int i, j;
 
-	BUG_ON(atomic_read(&cfqg->ref) <= 0);
-	if (!atomic_dec_and_test(&cfqg->ref))
+	BUG_ON(cfqg->ref <= 0);
+	cfqg->ref--;
+	if (cfqg->ref)
 		return;
 	for_each_cfqg_st(cfqg, i, j, st)
-		BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
+		BUG_ON(!RB_EMPTY_ROOT(&st->rb));
 	kfree(cfqg);
 }
 
@@ -1200,7 +1197,7 @@
 			cfq_group_service_tree_del(cfqd, cfqq->cfqg);
 		cfqq->orig_cfqg = cfqq->cfqg;
 		cfqq->cfqg = &cfqd->root_group;
-		atomic_inc(&cfqd->root_group.ref);
+		cfqd->root_group.ref++;
 		group_changed = 1;
 	} else if (!cfqd->cfq_group_isolation
 		   && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
@@ -1672,8 +1669,11 @@
 	/*
 	 * store what was left of this slice, if the queue idled/timed out
 	 */
-	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
-		cfqq->slice_resid = cfqq->slice_end - jiffies;
+	if (timed_out) {
+		if (cfq_cfqq_slice_new(cfqq))
+			cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq);
+		else
+			cfqq->slice_resid = cfqq->slice_end - jiffies;
 		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
 	}
 
@@ -1687,9 +1687,6 @@
 	if (cfqq == cfqd->active_queue)
 		cfqd->active_queue = NULL;
 
-	if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
-		cfqd->grp_service_tree.active = NULL;
-
 	if (cfqd->active_cic) {
 		put_io_context(cfqd->active_cic->ioc);
 		cfqd->active_cic = NULL;
@@ -1901,10 +1898,10 @@
 	 * in their service tree.
 	 */
 	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
-		return 1;
+		return true;
 	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
 			service_tree->count);
-	return 0;
+	return false;
 }
 
 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -2040,7 +2037,7 @@
 	int process_refs, io_refs;
 
 	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
-	process_refs = atomic_read(&cfqq->ref) - io_refs;
+	process_refs = cfqq->ref - io_refs;
 	BUG_ON(process_refs < 0);
 	return process_refs;
 }
@@ -2080,10 +2077,10 @@
 	 */
 	if (new_process_refs >= process_refs) {
 		cfqq->new_cfqq = new_cfqq;
-		atomic_add(process_refs, &new_cfqq->ref);
+		new_cfqq->ref += process_refs;
 	} else {
 		new_cfqq->new_cfqq = cfqq;
-		atomic_add(new_process_refs, &cfqq->ref);
+		cfqq->ref += new_process_refs;
 	}
 }
 
@@ -2116,12 +2113,7 @@
 	unsigned count;
 	struct cfq_rb_root *st;
 	unsigned group_slice;
-
-	if (!cfqg) {
-		cfqd->serving_prio = IDLE_WORKLOAD;
-		cfqd->workload_expires = jiffies + 1;
-		return;
-	}
+	enum wl_prio_t original_prio = cfqd->serving_prio;
 
 	/* Choose next priority. RT > BE > IDLE */
 	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
@@ -2134,6 +2126,9 @@
 		return;
 	}
 
+	if (original_prio != cfqd->serving_prio)
+		goto new_workload;
+
 	/*
 	 * For RT and BE, we have to choose also the type
 	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
@@ -2148,6 +2143,7 @@
 	if (count && !time_after(jiffies, cfqd->workload_expires))
 		return;
 
+new_workload:
 	/* otherwise select new workload type */
 	cfqd->serving_type =
 		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
@@ -2199,7 +2195,6 @@
 	if (RB_EMPTY_ROOT(&st->rb))
 		return NULL;
 	cfqg = cfq_rb_first_group(st);
-	st->active = &cfqg->rb_node;
 	update_min_vdisktime(st);
 	return cfqg;
 }
@@ -2293,6 +2288,17 @@
 		goto keep_queue;
 	}
 
+	/*
+	 * This is a deep seek queue, but the device is much faster than
+	 * the queue can deliver, don't idle
+	 **/
+	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
+	    (cfq_cfqq_slice_new(cfqq) ||
+	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
+		cfq_clear_cfqq_deep(cfqq);
+		cfq_clear_cfqq_idle_window(cfqq);
+	}
+
 	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
 		cfqq = NULL;
 		goto keep_queue;
@@ -2367,12 +2373,12 @@
 {
 	/* the queue hasn't finished any request, can't estimate */
 	if (cfq_cfqq_slice_new(cfqq))
-		return 1;
+		return true;
 	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
 		cfqq->slice_end))
-		return 1;
+		return true;
 
-	return 0;
+	return false;
 }
 
 static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
@@ -2538,9 +2544,10 @@
 	struct cfq_data *cfqd = cfqq->cfqd;
 	struct cfq_group *cfqg, *orig_cfqg;
 
-	BUG_ON(atomic_read(&cfqq->ref) <= 0);
+	BUG_ON(cfqq->ref <= 0);
 
-	if (!atomic_dec_and_test(&cfqq->ref))
+	cfqq->ref--;
+	if (cfqq->ref)
 		return;
 
 	cfq_log_cfqq(cfqd, cfqq, "put_queue");
@@ -2843,7 +2850,7 @@
 	RB_CLEAR_NODE(&cfqq->p_node);
 	INIT_LIST_HEAD(&cfqq->fifo);
 
-	atomic_set(&cfqq->ref, 0);
+	cfqq->ref = 0;
 	cfqq->cfqd = cfqd;
 
 	cfq_mark_cfqq_prio_changed(cfqq);
@@ -2979,11 +2986,11 @@
 	 * pin the queue now that it's allocated, scheduler exit will prune it
 	 */
 	if (!is_sync && !(*async_cfqq)) {
-		atomic_inc(&cfqq->ref);
+		cfqq->ref++;
 		*async_cfqq = cfqq;
 	}
 
-	atomic_inc(&cfqq->ref);
+	cfqq->ref++;
 	return cfqq;
 }
 
@@ -3265,6 +3272,10 @@
 	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
 		return true;
 
+	/* An idle queue should not be idle now for some reason */
+	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
+		return true;
+
 	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
 		return false;
 
@@ -3284,10 +3295,19 @@
  */
 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
+	struct cfq_queue *old_cfqq = cfqd->active_queue;
+
 	cfq_log_cfqq(cfqd, cfqq, "preempt");
 	cfq_slice_expired(cfqd, 1);
 
 	/*
+	 * workload type is changed, don't save slice, otherwise preempt
+	 * doesn't happen
+	 */
+	if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+		cfqq->cfqg->saved_workload_slice = 0;
+
+	/*
 	 * Put the new queue at the front of the of the current list,
 	 * so we know that it will be selected next.
 	 */
@@ -3681,13 +3701,13 @@
 	}
 
 	cfqq->allocated[rw]++;
-	atomic_inc(&cfqq->ref);
-
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
+	cfqq->ref++;
 	rq->elevator_private = cic;
 	rq->elevator_private2 = cfqq;
 	rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
+
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
 	return 0;
 
 queue_fail:
@@ -3862,6 +3882,10 @@
 	if (!cfqd)
 		return NULL;
 
+	/*
+	 * Don't need take queue_lock in the routine, since we are
+	 * initializing the ioscheduler, and nobody is using cfqd
+	 */
 	cfqd->cic_index = i;
 
 	/* Init root service tree */
@@ -3881,7 +3905,7 @@
 	 * Take a reference to root group which we never drop. This is just
 	 * to make sure that cfq_put_cfqg() does not try to kfree root group
 	 */
-	atomic_set(&cfqg->ref, 1);
+	cfqg->ref = 1;
 	rcu_read_lock();
 	cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
 					(void *)cfqd, 0);
@@ -3901,7 +3925,7 @@
 	 * will not attempt to free it.
 	 */
 	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
-	atomic_inc(&cfqd->oom_cfqq.ref);
+	cfqd->oom_cfqq.ref++;
 	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
 
 	INIT_LIST_HEAD(&cfqd->cic_list);
diff --git a/block/genhd.c b/block/genhd.c
index 5fa2b44..6a5b772 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -18,6 +18,7 @@
 #include <linux/buffer_head.h>
 #include <linux/mutex.h>
 #include <linux/idr.h>
+#include <linux/log2.h>
 
 #include "blk.h"
 
@@ -35,6 +36,10 @@
 
 static struct device_type disk_type;
 
+static void disk_add_events(struct gendisk *disk);
+static void disk_del_events(struct gendisk *disk);
+static void disk_release_events(struct gendisk *disk);
+
 /**
  * disk_get_part - get partition
  * @disk: disk to look partition from
@@ -239,7 +244,7 @@
 } *major_names[BLKDEV_MAJOR_HASH_SIZE];
 
 /* index in the above - for now: assume no multimajor ranges */
-static inline int major_to_index(int major)
+static inline int major_to_index(unsigned major)
 {
 	return major % BLKDEV_MAJOR_HASH_SIZE;
 }
@@ -502,6 +507,64 @@
 	return 0;
 }
 
+void register_disk(struct gendisk *disk)
+{
+	struct device *ddev = disk_to_dev(disk);
+	struct block_device *bdev;
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+	int err;
+
+	ddev->parent = disk->driverfs_dev;
+
+	dev_set_name(ddev, disk->disk_name);
+
+	/* delay uevents, until we scanned partition table */
+	dev_set_uevent_suppress(ddev, 1);
+
+	if (device_add(ddev))
+		return;
+	if (!sysfs_deprecated) {
+		err = sysfs_create_link(block_depr, &ddev->kobj,
+					kobject_name(&ddev->kobj));
+		if (err) {
+			device_del(ddev);
+			return;
+		}
+	}
+	disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
+	disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+
+	/* No minors to use for partitions */
+	if (!disk_partitionable(disk))
+		goto exit;
+
+	/* No such device (e.g., media were just removed) */
+	if (!get_capacity(disk))
+		goto exit;
+
+	bdev = bdget_disk(disk, 0);
+	if (!bdev)
+		goto exit;
+
+	bdev->bd_invalidated = 1;
+	err = blkdev_get(bdev, FMODE_READ, NULL);
+	if (err < 0)
+		goto exit;
+	blkdev_put(bdev, FMODE_READ);
+
+exit:
+	/* announce disk after possible partitions are created */
+	dev_set_uevent_suppress(ddev, 0);
+	kobject_uevent(&ddev->kobj, KOBJ_ADD);
+
+	/* announce possible partitions */
+	disk_part_iter_init(&piter, disk, 0);
+	while ((part = disk_part_iter_next(&piter)))
+		kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
+	disk_part_iter_exit(&piter);
+}
+
 /**
  * add_disk - add partitioning information to kernel list
  * @disk: per-device partitioning information
@@ -551,18 +614,48 @@
 	retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
 				   "bdi");
 	WARN_ON(retval);
+
+	disk_add_events(disk);
 }
-
 EXPORT_SYMBOL(add_disk);
-EXPORT_SYMBOL(del_gendisk);	/* in partitions/check.c */
 
-void unlink_gendisk(struct gendisk *disk)
+void del_gendisk(struct gendisk *disk)
 {
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+
+	disk_del_events(disk);
+
+	/* invalidate stuff */
+	disk_part_iter_init(&piter, disk,
+			     DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
+	while ((part = disk_part_iter_next(&piter))) {
+		invalidate_partition(disk, part->partno);
+		delete_partition(disk, part->partno);
+	}
+	disk_part_iter_exit(&piter);
+
+	invalidate_partition(disk, 0);
+	blk_free_devt(disk_to_dev(disk)->devt);
+	set_capacity(disk, 0);
+	disk->flags &= ~GENHD_FL_UP;
+
 	sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
 	bdi_unregister(&disk->queue->backing_dev_info);
 	blk_unregister_queue(disk);
 	blk_unregister_region(disk_devt(disk), disk->minors);
+
+	part_stat_set_all(&disk->part0, 0);
+	disk->part0.stamp = 0;
+
+	kobject_put(disk->part0.holder_dir);
+	kobject_put(disk->slave_dir);
+	disk->driverfs_dev = NULL;
+	if (!sysfs_deprecated)
+		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+	device_del(disk_to_dev(disk));
 }
+EXPORT_SYMBOL(del_gendisk);
 
 /**
  * get_gendisk - get partitioning information for a given device
@@ -735,7 +828,7 @@
 	static void *p;
 
 	p = disk_seqf_start(seqf, pos);
-	if (!IS_ERR(p) && p && !*pos)
+	if (!IS_ERR_OR_NULL(p) && !*pos)
 		seq_puts(seqf, "major minor  #blocks  name\n\n");
 	return p;
 }
@@ -1005,6 +1098,7 @@
 {
 	struct gendisk *disk = dev_to_disk(dev);
 
+	disk_release_events(disk);
 	kfree(disk->random);
 	disk_replace_part_tbl(disk, NULL);
 	free_part_stats(&disk->part0);
@@ -1110,29 +1204,6 @@
 module_init(proc_genhd_init);
 #endif /* CONFIG_PROC_FS */
 
-static void media_change_notify_thread(struct work_struct *work)
-{
-	struct gendisk *gd = container_of(work, struct gendisk, async_notify);
-	char event[] = "MEDIA_CHANGE=1";
-	char *envp[] = { event, NULL };
-
-	/*
-	 * set enviroment vars to indicate which event this is for
-	 * so that user space will know to go check the media status.
-	 */
-	kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
-	put_device(gd->driverfs_dev);
-}
-
-#if 0
-void genhd_media_change_notify(struct gendisk *disk)
-{
-	get_device(disk->driverfs_dev);
-	schedule_work(&disk->async_notify);
-}
-EXPORT_SYMBOL_GPL(genhd_media_change_notify);
-#endif  /*  0  */
-
 dev_t blk_lookup_devt(const char *name, int partno)
 {
 	dev_t devt = MKDEV(0, 0);
@@ -1193,13 +1264,13 @@
 		}
 		disk->part_tbl->part[0] = &disk->part0;
 
+		hd_ref_init(&disk->part0);
+
 		disk->minors = minors;
 		rand_initialize_disk(disk);
 		disk_to_dev(disk)->class = &block_class;
 		disk_to_dev(disk)->type = &disk_type;
 		device_initialize(disk_to_dev(disk));
-		INIT_WORK(&disk->async_notify,
-			media_change_notify_thread);
 	}
 	return disk;
 }
@@ -1291,3 +1362,422 @@
 }
 
 EXPORT_SYMBOL(invalidate_partition);
+
+/*
+ * Disk events - monitor disk events like media change and eject request.
+ */
+struct disk_events {
+	struct list_head	node;		/* all disk_event's */
+	struct gendisk		*disk;		/* the associated disk */
+	spinlock_t		lock;
+
+	int			block;		/* event blocking depth */
+	unsigned int		pending;	/* events already sent out */
+	unsigned int		clearing;	/* events being cleared */
+
+	long			poll_msecs;	/* interval, -1 for default */
+	struct delayed_work	dwork;
+};
+
+static const char *disk_events_strs[] = {
+	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "media_change",
+	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "eject_request",
+};
+
+static char *disk_uevents[] = {
+	[ilog2(DISK_EVENT_MEDIA_CHANGE)]	= "DISK_MEDIA_CHANGE=1",
+	[ilog2(DISK_EVENT_EJECT_REQUEST)]	= "DISK_EJECT_REQUEST=1",
+};
+
+/* list of all disk_events */
+static DEFINE_MUTEX(disk_events_mutex);
+static LIST_HEAD(disk_events);
+
+/* disable in-kernel polling by default */
+static unsigned long disk_events_dfl_poll_msecs	= 0;
+
+static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
+{
+	struct disk_events *ev = disk->ev;
+	long intv_msecs = 0;
+
+	/*
+	 * If device-specific poll interval is set, always use it.  If
+	 * the default is being used, poll iff there are events which
+	 * can't be monitored asynchronously.
+	 */
+	if (ev->poll_msecs >= 0)
+		intv_msecs = ev->poll_msecs;
+	else if (disk->events & ~disk->async_events)
+		intv_msecs = disk_events_dfl_poll_msecs;
+
+	return msecs_to_jiffies(intv_msecs);
+}
+
+static void __disk_block_events(struct gendisk *disk, bool sync)
+{
+	struct disk_events *ev = disk->ev;
+	unsigned long flags;
+	bool cancel;
+
+	spin_lock_irqsave(&ev->lock, flags);
+	cancel = !ev->block++;
+	spin_unlock_irqrestore(&ev->lock, flags);
+
+	if (cancel) {
+		if (sync)
+			cancel_delayed_work_sync(&disk->ev->dwork);
+		else
+			cancel_delayed_work(&disk->ev->dwork);
+	}
+}
+
+static void __disk_unblock_events(struct gendisk *disk, bool check_now)
+{
+	struct disk_events *ev = disk->ev;
+	unsigned long intv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ev->lock, flags);
+
+	if (WARN_ON_ONCE(ev->block <= 0))
+		goto out_unlock;
+
+	if (--ev->block)
+		goto out_unlock;
+
+	/*
+	 * Not exactly a latency critical operation, set poll timer
+	 * slack to 25% and kick event check.
+	 */
+	intv = disk_events_poll_jiffies(disk);
+	set_timer_slack(&ev->dwork.timer, intv / 4);
+	if (check_now)
+		queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+	else if (intv)
+		queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+out_unlock:
+	spin_unlock_irqrestore(&ev->lock, flags);
+}
+
+/**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events().  Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void disk_block_events(struct gendisk *disk)
+{
+	if (disk->ev)
+		__disk_block_events(disk, true);
+}
+
+/**
+ * disk_unblock_events - unblock disk event checking
+ * @disk: disk to unblock events for
+ *
+ * Undo disk_block_events().  When the block count reaches zero, it
+ * starts events polling if configured.
+ *
+ * CONTEXT:
+ * Don't care.  Safe to call from irq context.
+ */
+void disk_unblock_events(struct gendisk *disk)
+{
+	if (disk->ev)
+		__disk_unblock_events(disk, true);
+}
+
+/**
+ * disk_check_events - schedule immediate event checking
+ * @disk: disk to check events for
+ *
+ * Schedule immediate event checking on @disk if not blocked.
+ *
+ * CONTEXT:
+ * Don't care.  Safe to call from irq context.
+ */
+void disk_check_events(struct gendisk *disk)
+{
+	if (disk->ev) {
+		__disk_block_events(disk, false);
+		__disk_unblock_events(disk, true);
+	}
+}
+EXPORT_SYMBOL_GPL(disk_check_events);
+
+/**
+ * disk_clear_events - synchronously check, clear and return pending events
+ * @disk: disk to fetch and clear events from
+ * @mask: mask of events to be fetched and clearted
+ *
+ * Disk events are synchronously checked and pending events in @mask
+ * are cleared and returned.  This ignores the block count.
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
+{
+	const struct block_device_operations *bdops = disk->fops;
+	struct disk_events *ev = disk->ev;
+	unsigned int pending;
+
+	if (!ev) {
+		/* for drivers still using the old ->media_changed method */
+		if ((mask & DISK_EVENT_MEDIA_CHANGE) &&
+		    bdops->media_changed && bdops->media_changed(disk))
+			return DISK_EVENT_MEDIA_CHANGE;
+		return 0;
+	}
+
+	/* tell the workfn about the events being cleared */
+	spin_lock_irq(&ev->lock);
+	ev->clearing |= mask;
+	spin_unlock_irq(&ev->lock);
+
+	/* uncondtionally schedule event check and wait for it to finish */
+	__disk_block_events(disk, true);
+	queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
+	flush_delayed_work(&ev->dwork);
+	__disk_unblock_events(disk, false);
+
+	/* then, fetch and clear pending events */
+	spin_lock_irq(&ev->lock);
+	WARN_ON_ONCE(ev->clearing & mask);	/* cleared by workfn */
+	pending = ev->pending & mask;
+	ev->pending &= ~mask;
+	spin_unlock_irq(&ev->lock);
+
+	return pending;
+}
+
+static void disk_events_workfn(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+	struct gendisk *disk = ev->disk;
+	char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
+	unsigned int clearing = ev->clearing;
+	unsigned int events;
+	unsigned long intv;
+	int nr_events = 0, i;
+
+	/* check events */
+	events = disk->fops->check_events(disk, clearing);
+
+	/* accumulate pending events and schedule next poll if necessary */
+	spin_lock_irq(&ev->lock);
+
+	events &= ~ev->pending;
+	ev->pending |= events;
+	ev->clearing &= ~clearing;
+
+	intv = disk_events_poll_jiffies(disk);
+	if (!ev->block && intv)
+		queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
+
+	spin_unlock_irq(&ev->lock);
+
+	/* tell userland about new events */
+	for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
+		if (events & (1 << i))
+			envp[nr_events++] = disk_uevents[i];
+
+	if (nr_events)
+		kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
+}
+
+/*
+ * A disk events enabled device has the following sysfs nodes under
+ * its /sys/block/X/ directory.
+ *
+ * events		: list of all supported events
+ * events_async		: list of events which can be detected w/o polling
+ * events_poll_msecs	: polling interval, 0: disable, -1: system default
+ */
+static ssize_t __disk_events_show(unsigned int events, char *buf)
+{
+	const char *delim = "";
+	ssize_t pos = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
+		if (events & (1 << i)) {
+			pos += sprintf(buf + pos, "%s%s",
+				       delim, disk_events_strs[i]);
+			delim = " ";
+		}
+	if (pos)
+		pos += sprintf(buf + pos, "\n");
+	return pos;
+}
+
+static ssize_t disk_events_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+
+	return __disk_events_show(disk->events, buf);
+}
+
+static ssize_t disk_events_async_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+
+	return __disk_events_show(disk->async_events, buf);
+}
+
+static ssize_t disk_events_poll_msecs_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+
+	return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
+}
+
+static ssize_t disk_events_poll_msecs_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+	long intv;
+
+	if (!count || !sscanf(buf, "%ld", &intv))
+		return -EINVAL;
+
+	if (intv < 0 && intv != -1)
+		return -EINVAL;
+
+	__disk_block_events(disk, true);
+	disk->ev->poll_msecs = intv;
+	__disk_unblock_events(disk, true);
+
+	return count;
+}
+
+static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
+static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL);
+static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR,
+			 disk_events_poll_msecs_show,
+			 disk_events_poll_msecs_store);
+
+static const struct attribute *disk_events_attrs[] = {
+	&dev_attr_events.attr,
+	&dev_attr_events_async.attr,
+	&dev_attr_events_poll_msecs.attr,
+	NULL,
+};
+
+/*
+ * The default polling interval can be specified by the kernel
+ * parameter block.events_dfl_poll_msecs which defaults to 0
+ * (disable).  This can also be modified runtime by writing to
+ * /sys/module/block/events_dfl_poll_msecs.
+ */
+static int disk_events_set_dfl_poll_msecs(const char *val,
+					  const struct kernel_param *kp)
+{
+	struct disk_events *ev;
+	int ret;
+
+	ret = param_set_ulong(val, kp);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&disk_events_mutex);
+
+	list_for_each_entry(ev, &disk_events, node)
+		disk_check_events(ev->disk);
+
+	mutex_unlock(&disk_events_mutex);
+
+	return 0;
+}
+
+static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
+	.set	= disk_events_set_dfl_poll_msecs,
+	.get	= param_get_ulong,
+};
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX	"block."
+
+module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
+		&disk_events_dfl_poll_msecs, 0644);
+
+/*
+ * disk_{add|del|release}_events - initialize and destroy disk_events.
+ */
+static void disk_add_events(struct gendisk *disk)
+{
+	struct disk_events *ev;
+
+	if (!disk->fops->check_events || !(disk->events | disk->async_events))
+		return;
+
+	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev) {
+		pr_warn("%s: failed to initialize events\n", disk->disk_name);
+		return;
+	}
+
+	if (sysfs_create_files(&disk_to_dev(disk)->kobj,
+			       disk_events_attrs) < 0) {
+		pr_warn("%s: failed to create sysfs files for events\n",
+			disk->disk_name);
+		kfree(ev);
+		return;
+	}
+
+	disk->ev = ev;
+
+	INIT_LIST_HEAD(&ev->node);
+	ev->disk = disk;
+	spin_lock_init(&ev->lock);
+	ev->block = 1;
+	ev->poll_msecs = -1;
+	INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
+
+	mutex_lock(&disk_events_mutex);
+	list_add_tail(&ev->node, &disk_events);
+	mutex_unlock(&disk_events_mutex);
+
+	/*
+	 * Block count is initialized to 1 and the following initial
+	 * unblock kicks it into action.
+	 */
+	__disk_unblock_events(disk, true);
+}
+
+static void disk_del_events(struct gendisk *disk)
+{
+	if (!disk->ev)
+		return;
+
+	__disk_block_events(disk, true);
+
+	mutex_lock(&disk_events_mutex);
+	list_del_init(&disk->ev->node);
+	mutex_unlock(&disk_events_mutex);
+
+	sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
+}
+
+static void disk_release_events(struct gendisk *disk)
+{
+	/* the block count should be 1 from disk_del_events() */
+	WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
+	kfree(disk->ev);
+}
diff --git a/block/ioctl.c b/block/ioctl.c
index a9a302e..9049d46 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,11 +294,12 @@
 			return -EINVAL;
 		if (get_user(n, (int __user *) arg))
 			return -EFAULT;
-		if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0)
+		if (!(mode & FMODE_EXCL) &&
+		    blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
 			return -EBUSY;
 		ret = set_blocksize(bdev, n);
 		if (!(mode & FMODE_EXCL))
-			bd_release(bdev);
+			blkdev_put(bdev, mode | FMODE_EXCL);
 		return ret;
 	case BLKPG:
 		ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e4bac29..4b7cb0e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -110,7 +110,6 @@
 
 config CRYPTO_GF128MUL
 	tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
 	help
 	  Efficient table driven implementation of multiplications in the
 	  field GF(2^128).  This is needed by some cypher modes. This
@@ -539,8 +538,9 @@
 
 config CRYPTO_AES_NI_INTEL
 	tristate "AES cipher algorithms (AES-NI)"
-	depends on (X86 || UML_X86) && 64BIT
-	select CRYPTO_AES_X86_64
+	depends on (X86 || UML_X86)
+	select CRYPTO_AES_X86_64 if 64BIT
+	select CRYPTO_AES_586 if !64BIT
 	select CRYPTO_CRYPTD
 	select CRYPTO_ALGAPI
 	select CRYPTO_FPU
@@ -563,9 +563,10 @@
 
 	  See <http://csrc.nist.gov/encryption/aes/> for more information.
 
-	  In addition to AES cipher algorithm support, the
-	  acceleration for some popular block cipher mode is supported
-	  too, including ECB, CBC, CTR, LRW, PCBC, XTS.
+	  In addition to AES cipher algorithm support, the acceleration
+	  for some popular block cipher mode is supported too, including
+	  ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
+	  acceleration for CTR.
 
 config CRYPTO_ANUBIS
 	tristate "Anubis cipher algorithm"
@@ -841,6 +842,27 @@
 	  ANSI X9.31 A.2.4. Note that this option must be enabled if
 	  CRYPTO_FIPS is selected
 
+config CRYPTO_USER_API
+	tristate
+
+config CRYPTO_USER_API_HASH
+	tristate "User-space interface for hash algorithms"
+	depends on NET
+	select CRYPTO_HASH
+	select CRYPTO_USER_API
+	help
+	  This option enables the user-spaces interface for hash
+	  algorithms.
+
+config CRYPTO_USER_API_SKCIPHER
+	tristate "User-space interface for symmetric key cipher algorithms"
+	depends on NET
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_USER_API
+	help
+	  This option enables the user-spaces interface for symmetric
+	  key cipher algorithms.
+
 source "drivers/crypto/Kconfig"
 
 endif	# if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index 423b7de..e9a399c 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,32 +3,32 @@
 #
 
 obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-objs := api.o cipher.o compress.o
+crypto-y := api.o cipher.o compress.o
 
 obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
 
 obj-$(CONFIG_CRYPTO_FIPS) += fips.o
 
 crypto_algapi-$(CONFIG_PROC_FS) += proc.o
-crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
+crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
 obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
 
 obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
 
-crypto_blkcipher-objs := ablkcipher.o
-crypto_blkcipher-objs += blkcipher.o
+crypto_blkcipher-y := ablkcipher.o
+crypto_blkcipher-y += blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
 obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
 
-crypto_hash-objs += ahash.o
-crypto_hash-objs += shash.o
+crypto_hash-y += ahash.o
+crypto_hash-y += shash.o
 obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
 obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
 
-cryptomgr-objs := algboss.o testmgr.o
+cryptomgr-y := algboss.o testmgr.o
 
 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
@@ -85,6 +85,9 @@
 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
+obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
+obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
+obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
 
 #
 # generic algorithms and the async_tx api
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
new file mode 100644
index 0000000..940d70c
--- /dev/null
+++ b/crypto/af_alg.c
@@ -0,0 +1,483 @@
+/*
+ * af_alg: User-space algorithm interface
+ *
+ * This file provides the user-space API for algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/atomic.h>
+#include <crypto/if_alg.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/rwsem.h>
+
+struct alg_type_list {
+	const struct af_alg_type *type;
+	struct list_head list;
+};
+
+static atomic_long_t alg_memory_allocated;
+
+static struct proto alg_proto = {
+	.name			= "ALG",
+	.owner			= THIS_MODULE,
+	.memory_allocated	= &alg_memory_allocated,
+	.obj_size		= sizeof(struct alg_sock),
+};
+
+static LIST_HEAD(alg_types);
+static DECLARE_RWSEM(alg_types_sem);
+
+static const struct af_alg_type *alg_get_type(const char *name)
+{
+	const struct af_alg_type *type = ERR_PTR(-ENOENT);
+	struct alg_type_list *node;
+
+	down_read(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (strcmp(node->type->name, name))
+			continue;
+
+		if (try_module_get(node->type->owner))
+			type = node->type;
+		break;
+	}
+	up_read(&alg_types_sem);
+
+	return type;
+}
+
+int af_alg_register_type(const struct af_alg_type *type)
+{
+	struct alg_type_list *node;
+	int err = -EEXIST;
+
+	down_write(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (!strcmp(node->type->name, type->name))
+			goto unlock;
+	}
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!node)
+		goto unlock;
+
+	type->ops->owner = THIS_MODULE;
+	node->type = type;
+	list_add(&node->list, &alg_types);
+	err = 0;
+
+unlock:
+	up_write(&alg_types_sem);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_register_type);
+
+int af_alg_unregister_type(const struct af_alg_type *type)
+{
+	struct alg_type_list *node;
+	int err = -ENOENT;
+
+	down_write(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (strcmp(node->type->name, type->name))
+			continue;
+
+		list_del(&node->list);
+		kfree(node);
+		err = 0;
+		break;
+	}
+	up_write(&alg_types_sem);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_unregister_type);
+
+static void alg_do_release(const struct af_alg_type *type, void *private)
+{
+	if (!type)
+		return;
+
+	type->release(private);
+	module_put(type->owner);
+}
+
+int af_alg_release(struct socket *sock)
+{
+	if (sock->sk)
+		sock_put(sock->sk);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_release);
+
+static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct sockaddr_alg *sa = (void *)uaddr;
+	const struct af_alg_type *type;
+	void *private;
+
+	if (sock->state == SS_CONNECTED)
+		return -EINVAL;
+
+	if (addr_len != sizeof(*sa))
+		return -EINVAL;
+
+	sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+	sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
+
+	type = alg_get_type(sa->salg_type);
+	if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
+		request_module("algif-%s", sa->salg_type);
+		type = alg_get_type(sa->salg_type);
+	}
+
+	if (IS_ERR(type))
+		return PTR_ERR(type);
+
+	private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
+	if (IS_ERR(private)) {
+		module_put(type->owner);
+		return PTR_ERR(private);
+	}
+
+	lock_sock(sk);
+
+	swap(ask->type, type);
+	swap(ask->private, private);
+
+	release_sock(sk);
+
+	alg_do_release(type, private);
+
+	return 0;
+}
+
+static int alg_setkey(struct sock *sk, char __user *ukey,
+		      unsigned int keylen)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type = ask->type;
+	u8 *key;
+	int err;
+
+	key = sock_kmalloc(sk, keylen, GFP_KERNEL);
+	if (!key)
+		return -ENOMEM;
+
+	err = -EFAULT;
+	if (copy_from_user(key, ukey, keylen))
+		goto out;
+
+	err = type->setkey(ask->private, key, keylen);
+
+out:
+	sock_kfree_s(sk, key, keylen);
+
+	return err;
+}
+
+static int alg_setsockopt(struct socket *sock, int level, int optname,
+			  char __user *optval, unsigned int optlen)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type;
+	int err = -ENOPROTOOPT;
+
+	lock_sock(sk);
+	type = ask->type;
+
+	if (level != SOL_ALG || !type)
+		goto unlock;
+
+	switch (optname) {
+	case ALG_SET_KEY:
+		if (sock->state == SS_CONNECTED)
+			goto unlock;
+		if (!type->setkey)
+			goto unlock;
+
+		err = alg_setkey(sk, optval, optlen);
+	}
+
+unlock:
+	release_sock(sk);
+
+	return err;
+}
+
+int af_alg_accept(struct sock *sk, struct socket *newsock)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type;
+	struct sock *sk2;
+	int err;
+
+	lock_sock(sk);
+	type = ask->type;
+
+	err = -EINVAL;
+	if (!type)
+		goto unlock;
+
+	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto);
+	err = -ENOMEM;
+	if (!sk2)
+		goto unlock;
+
+	sock_init_data(newsock, sk2);
+	sock_graft(sk2, newsock);
+
+	err = type->accept(ask->private, sk2);
+	if (err) {
+		sk_free(sk2);
+		goto unlock;
+	}
+
+	sk2->sk_family = PF_ALG;
+
+	sock_hold(sk);
+	alg_sk(sk2)->parent = sk;
+	alg_sk(sk2)->type = type;
+
+	newsock->ops = type->ops;
+	newsock->state = SS_CONNECTED;
+
+	err = 0;
+
+unlock:
+	release_sock(sk);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_accept);
+
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+	return af_alg_accept(sock->sk, newsock);
+}
+
+static const struct proto_ops alg_proto_ops = {
+	.family		=	PF_ALG,
+	.owner		=	THIS_MODULE,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.sendpage	=	sock_no_sendpage,
+	.sendmsg	=	sock_no_sendmsg,
+	.recvmsg	=	sock_no_recvmsg,
+	.poll		=	sock_no_poll,
+
+	.bind		=	alg_bind,
+	.release	=	af_alg_release,
+	.setsockopt	=	alg_setsockopt,
+	.accept		=	alg_accept,
+};
+
+static void alg_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+
+	alg_do_release(ask->type, ask->private);
+}
+
+static int alg_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
+{
+	struct sock *sk;
+	int err;
+
+	if (sock->type != SOCK_SEQPACKET)
+		return -ESOCKTNOSUPPORT;
+	if (protocol != 0)
+		return -EPROTONOSUPPORT;
+
+	err = -ENOMEM;
+	sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto);
+	if (!sk)
+		goto out;
+
+	sock->ops = &alg_proto_ops;
+	sock_init_data(sock, sk);
+
+	sk->sk_family = PF_ALG;
+	sk->sk_destruct = alg_sock_destruct;
+
+	return 0;
+out:
+	return err;
+}
+
+static const struct net_proto_family alg_family = {
+	.family	=	PF_ALG,
+	.create	=	alg_create,
+	.owner	=	THIS_MODULE,
+};
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+		   int write)
+{
+	unsigned long from = (unsigned long)addr;
+	unsigned long npages;
+	unsigned off;
+	int err;
+	int i;
+
+	err = -EFAULT;
+	if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
+		goto out;
+
+	off = from & ~PAGE_MASK;
+	npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (npages > ALG_MAX_PAGES)
+		npages = ALG_MAX_PAGES;
+
+	err = get_user_pages_fast(from, npages, write, sgl->pages);
+	if (err < 0)
+		goto out;
+
+	npages = err;
+	err = -EINVAL;
+	if (WARN_ON(npages == 0))
+		goto out;
+
+	err = 0;
+
+	sg_init_table(sgl->sg, npages);
+
+	for (i = 0; i < npages; i++) {
+		int plen = min_t(int, len, PAGE_SIZE - off);
+
+		sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
+
+		off = 0;
+		len -= plen;
+		err += plen;
+	}
+
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_make_sg);
+
+void af_alg_free_sg(struct af_alg_sgl *sgl)
+{
+	int i;
+
+	i = 0;
+	do {
+		put_page(sgl->pages[i]);
+	} while (!sg_is_last(sgl->sg + (i++)));
+}
+EXPORT_SYMBOL_GPL(af_alg_free_sg);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
+{
+	struct cmsghdr *cmsg;
+
+	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+		if (cmsg->cmsg_level != SOL_ALG)
+			continue;
+
+		switch(cmsg->cmsg_type) {
+		case ALG_SET_IV:
+			if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
+				return -EINVAL;
+			con->iv = (void *)CMSG_DATA(cmsg);
+			if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
+						      sizeof(*con->iv)))
+				return -EINVAL;
+			break;
+
+		case ALG_SET_OP:
+			if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
+				return -EINVAL;
+			con->op = *(u32 *)CMSG_DATA(cmsg);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
+{
+	switch (err) {
+	case -EINPROGRESS:
+	case -EBUSY:
+		wait_for_completion(&completion->completion);
+		INIT_COMPLETION(completion->completion);
+		err = completion->err;
+		break;
+	};
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
+
+void af_alg_complete(struct crypto_async_request *req, int err)
+{
+	struct af_alg_completion *completion = req->data;
+
+	completion->err = err;
+	complete(&completion->completion);
+}
+EXPORT_SYMBOL_GPL(af_alg_complete);
+
+static int __init af_alg_init(void)
+{
+	int err = proto_register(&alg_proto, 0);
+
+	if (err)
+		goto out;
+
+	err = sock_register(&alg_family);
+	if (err != 0)
+		goto out_unregister_proto;
+
+out:
+	return err;
+
+out_unregister_proto:
+	proto_unregister(&alg_proto);
+	goto out;
+}
+
+static void __exit af_alg_exit(void)
+{
+	sock_unregister(PF_ALG);
+	proto_unregister(&alg_proto);
+}
+
+module_init(af_alg_init);
+module_exit(af_alg_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(AF_ALG);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
new file mode 100644
index 0000000..62122a1
--- /dev/null
+++ b/crypto/algif_hash.c
@@ -0,0 +1,319 @@
+/*
+ * algif_hash: User-space interface for hash algorithms
+ *
+ * This file provides the user-space API for hash algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct hash_ctx {
+	struct af_alg_sgl sgl;
+
+	u8 *result;
+
+	struct af_alg_completion completion;
+
+	unsigned int len;
+	bool more;
+
+	struct ahash_request req;
+};
+
+static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
+			struct msghdr *msg, size_t ignored)
+{
+	int limit = ALG_MAX_PAGES * PAGE_SIZE;
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	unsigned long iovlen;
+	struct iovec *iov;
+	long copied = 0;
+	int err;
+
+	if (limit > sk->sk_sndbuf)
+		limit = sk->sk_sndbuf;
+
+	lock_sock(sk);
+	if (!ctx->more) {
+		err = crypto_ahash_init(&ctx->req);
+		if (err)
+			goto unlock;
+	}
+
+	ctx->more = 0;
+
+	for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+	     iovlen--, iov++) {
+		unsigned long seglen = iov->iov_len;
+		char __user *from = iov->iov_base;
+
+		while (seglen) {
+			int len = min_t(unsigned long, seglen, limit);
+			int newlen;
+
+			newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
+			if (newlen < 0)
+				goto unlock;
+
+			ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
+						newlen);
+
+			err = af_alg_wait_for_completion(
+				crypto_ahash_update(&ctx->req),
+				&ctx->completion);
+
+			af_alg_free_sg(&ctx->sgl);
+
+			if (err)
+				goto unlock;
+
+			seglen -= newlen;
+			from += newlen;
+			copied += newlen;
+		}
+	}
+
+	err = 0;
+
+	ctx->more = msg->msg_flags & MSG_MORE;
+	if (!ctx->more) {
+		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+						 &ctx->completion);
+	}
+
+unlock:
+	release_sock(sk);
+
+	return err ?: copied;
+}
+
+static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+			     int offset, size_t size, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	int err;
+
+	lock_sock(sk);
+	sg_init_table(ctx->sgl.sg, 1);
+	sg_set_page(ctx->sgl.sg, page, size, offset);
+
+	ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+
+	if (!(flags & MSG_MORE)) {
+		if (ctx->more)
+			err = crypto_ahash_finup(&ctx->req);
+		else
+			err = crypto_ahash_digest(&ctx->req);
+	} else {
+		if (!ctx->more) {
+			err = crypto_ahash_init(&ctx->req);
+			if (err)
+				goto unlock;
+		}
+
+		err = crypto_ahash_update(&ctx->req);
+	}
+
+	err = af_alg_wait_for_completion(err, &ctx->completion);
+	if (err)
+		goto unlock;
+
+	ctx->more = flags & MSG_MORE;
+
+unlock:
+	release_sock(sk);
+
+	return err ?: size;
+}
+
+static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+			struct msghdr *msg, size_t len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+	int err;
+
+	if (len > ds)
+		len = ds;
+	else if (len < ds)
+		msg->msg_flags |= MSG_TRUNC;
+
+	lock_sock(sk);
+	if (ctx->more) {
+		ctx->more = 0;
+		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+						 &ctx->completion);
+		if (err)
+			goto unlock;
+	}
+
+	err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
+
+unlock:
+	release_sock(sk);
+
+	return err ?: len;
+}
+
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	struct ahash_request *req = &ctx->req;
+	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+	struct sock *sk2;
+	struct alg_sock *ask2;
+	struct hash_ctx *ctx2;
+	int err;
+
+	err = crypto_ahash_export(req, state);
+	if (err)
+		return err;
+
+	err = af_alg_accept(ask->parent, newsock);
+	if (err)
+		return err;
+
+	sk2 = newsock->sk;
+	ask2 = alg_sk(sk2);
+	ctx2 = ask2->private;
+	ctx2->more = 1;
+
+	err = crypto_ahash_import(&ctx2->req, state);
+	if (err) {
+		sock_orphan(sk2);
+		sock_put(sk2);
+	}
+
+	return err;
+}
+
+static struct proto_ops algif_hash_ops = {
+	.family		=	PF_ALG,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.bind		=	sock_no_bind,
+	.setsockopt	=	sock_no_setsockopt,
+	.poll		=	sock_no_poll,
+
+	.release	=	af_alg_release,
+	.sendmsg	=	hash_sendmsg,
+	.sendpage	=	hash_sendpage,
+	.recvmsg	=	hash_recvmsg,
+	.accept		=	hash_accept,
+};
+
+static void *hash_bind(const char *name, u32 type, u32 mask)
+{
+	return crypto_alloc_ahash(name, type, mask);
+}
+
+static void hash_release(void *private)
+{
+	crypto_free_ahash(private);
+}
+
+static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+	return crypto_ahash_setkey(private, key, keylen);
+}
+
+static void hash_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+
+	sock_kfree_s(sk, ctx->result,
+		     crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+	sock_kfree_s(sk, ctx, ctx->len);
+	af_alg_release_parent(sk);
+}
+
+static int hash_accept_parent(void *private, struct sock *sk)
+{
+	struct hash_ctx *ctx;
+	struct alg_sock *ask = alg_sk(sk);
+	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
+	unsigned ds = crypto_ahash_digestsize(private);
+
+	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
+	if (!ctx->result) {
+		sock_kfree_s(sk, ctx, len);
+		return -ENOMEM;
+	}
+
+	memset(ctx->result, 0, ds);
+
+	ctx->len = len;
+	ctx->more = 0;
+	af_alg_init_completion(&ctx->completion);
+
+	ask->private = ctx;
+
+	ahash_request_set_tfm(&ctx->req, private);
+	ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   af_alg_complete, &ctx->completion);
+
+	sk->sk_destruct = hash_sock_destruct;
+
+	return 0;
+}
+
+static const struct af_alg_type algif_type_hash = {
+	.bind		=	hash_bind,
+	.release	=	hash_release,
+	.setkey		=	hash_setkey,
+	.accept		=	hash_accept_parent,
+	.ops		=	&algif_hash_ops,
+	.name		=	"hash",
+	.owner		=	THIS_MODULE
+};
+
+static int __init algif_hash_init(void)
+{
+	return af_alg_register_type(&algif_type_hash);
+}
+
+static void __exit algif_hash_exit(void)
+{
+	int err = af_alg_unregister_type(&algif_type_hash);
+	BUG_ON(err);
+}
+
+module_init(algif_hash_init);
+module_exit(algif_hash_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
new file mode 100644
index 0000000..6a6dfc0
--- /dev/null
+++ b/crypto/algif_skcipher.c
@@ -0,0 +1,632 @@
+/*
+ * algif_skcipher: User-space interface for skcipher algorithms
+ *
+ * This file provides the user-space API for symmetric key ciphers.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct skcipher_sg_list {
+	struct list_head list;
+
+	int cur;
+
+	struct scatterlist sg[0];
+};
+
+struct skcipher_ctx {
+	struct list_head tsgl;
+	struct af_alg_sgl rsgl;
+
+	void *iv;
+
+	struct af_alg_completion completion;
+
+	unsigned used;
+
+	unsigned int len;
+	bool more;
+	bool merge;
+	bool enc;
+
+	struct ablkcipher_request req;
+};
+
+#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
+		      sizeof(struct scatterlist) - 1)
+
+static inline int skcipher_sndbuf(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+
+	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+			  ctx->used, 0);
+}
+
+static inline bool skcipher_writable(struct sock *sk)
+{
+	return PAGE_SIZE <= skcipher_sndbuf(sk);
+}
+
+static int skcipher_alloc_sgl(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg = NULL;
+
+	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+	if (!list_empty(&ctx->tsgl))
+		sg = sgl->sg;
+
+	if (!sg || sgl->cur >= MAX_SGL_ENTS) {
+		sgl = sock_kmalloc(sk, sizeof(*sgl) +
+				       sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
+				   GFP_KERNEL);
+		if (!sgl)
+			return -ENOMEM;
+
+		sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+		sgl->cur = 0;
+
+		if (sg)
+			scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+
+		list_add_tail(&sgl->list, &ctx->tsgl);
+	}
+
+	return 0;
+}
+
+static void skcipher_pull_sgl(struct sock *sk, int used)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	int i;
+
+	while (!list_empty(&ctx->tsgl)) {
+		sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
+				       list);
+		sg = sgl->sg;
+
+		for (i = 0; i < sgl->cur; i++) {
+			int plen = min_t(int, used, sg[i].length);
+
+			if (!sg_page(sg + i))
+				continue;
+
+			sg[i].length -= plen;
+			sg[i].offset += plen;
+
+			used -= plen;
+			ctx->used -= plen;
+
+			if (sg[i].length)
+				return;
+
+			put_page(sg_page(sg + i));
+			sg_assign_page(sg + i, NULL);
+		}
+
+		list_del(&sgl->list);
+		sock_kfree_s(sk, sgl,
+			     sizeof(*sgl) + sizeof(sgl->sg[0]) *
+					    (MAX_SGL_ENTS + 1));
+	}
+
+	if (!ctx->used)
+		ctx->merge = 0;
+}
+
+static void skcipher_free_sgl(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+
+	skcipher_pull_sgl(sk, ctx->used);
+}
+
+static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
+{
+	long timeout;
+	DEFINE_WAIT(wait);
+	int err = -ERESTARTSYS;
+
+	if (flags & MSG_DONTWAIT)
+		return -EAGAIN;
+
+	set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+	for (;;) {
+		if (signal_pending(current))
+			break;
+		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		timeout = MAX_SCHEDULE_TIMEOUT;
+		if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
+			err = 0;
+			break;
+		}
+	}
+	finish_wait(sk_sleep(sk), &wait);
+
+	return err;
+}
+
+static void skcipher_wmem_wakeup(struct sock *sk)
+{
+	struct socket_wq *wq;
+
+	if (!skcipher_writable(sk))
+		return;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+							   POLLRDNORM |
+							   POLLRDBAND);
+	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+	rcu_read_unlock();
+}
+
+static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	long timeout;
+	DEFINE_WAIT(wait);
+	int err = -ERESTARTSYS;
+
+	if (flags & MSG_DONTWAIT) {
+		return -EAGAIN;
+	}
+
+	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+	for (;;) {
+		if (signal_pending(current))
+			break;
+		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		timeout = MAX_SCHEDULE_TIMEOUT;
+		if (sk_wait_event(sk, &timeout, ctx->used)) {
+			err = 0;
+			break;
+		}
+	}
+	finish_wait(sk_sleep(sk), &wait);
+
+	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+	return err;
+}
+
+static void skcipher_data_wakeup(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct socket_wq *wq;
+
+	if (!ctx->used)
+		return;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+							   POLLRDNORM |
+							   POLLRDBAND);
+	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+	rcu_read_unlock();
+}
+
+static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
+			    struct msghdr *msg, size_t size)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct skcipher_sg_list *sgl;
+	struct af_alg_control con = {};
+	long copied = 0;
+	bool enc = 0;
+	int err;
+	int i;
+
+	if (msg->msg_controllen) {
+		err = af_alg_cmsg_send(msg, &con);
+		if (err)
+			return err;
+
+		switch (con.op) {
+		case ALG_OP_ENCRYPT:
+			enc = 1;
+			break;
+		case ALG_OP_DECRYPT:
+			enc = 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (con.iv && con.iv->ivlen != ivsize)
+			return -EINVAL;
+	}
+
+	err = -EINVAL;
+
+	lock_sock(sk);
+	if (!ctx->more && ctx->used)
+		goto unlock;
+
+	if (!ctx->used) {
+		ctx->enc = enc;
+		if (con.iv)
+			memcpy(ctx->iv, con.iv->iv, ivsize);
+	}
+
+	while (size) {
+		struct scatterlist *sg;
+		unsigned long len = size;
+		int plen;
+
+		if (ctx->merge) {
+			sgl = list_entry(ctx->tsgl.prev,
+					 struct skcipher_sg_list, list);
+			sg = sgl->sg + sgl->cur - 1;
+			len = min_t(unsigned long, len,
+				    PAGE_SIZE - sg->offset - sg->length);
+
+			err = memcpy_fromiovec(page_address(sg_page(sg)) +
+					       sg->offset + sg->length,
+					       msg->msg_iov, len);
+			if (err)
+				goto unlock;
+
+			sg->length += len;
+			ctx->merge = (sg->offset + sg->length) &
+				     (PAGE_SIZE - 1);
+
+			ctx->used += len;
+			copied += len;
+			size -= len;
+			continue;
+		}
+
+		if (!skcipher_writable(sk)) {
+			err = skcipher_wait_for_wmem(sk, msg->msg_flags);
+			if (err)
+				goto unlock;
+		}
+
+		len = min_t(unsigned long, len, skcipher_sndbuf(sk));
+
+		err = skcipher_alloc_sgl(sk);
+		if (err)
+			goto unlock;
+
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+		sg = sgl->sg;
+		do {
+			i = sgl->cur;
+			plen = min_t(int, len, PAGE_SIZE);
+
+			sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
+			err = -ENOMEM;
+			if (!sg_page(sg + i))
+				goto unlock;
+
+			err = memcpy_fromiovec(page_address(sg_page(sg + i)),
+					       msg->msg_iov, plen);
+			if (err) {
+				__free_page(sg_page(sg + i));
+				sg_assign_page(sg + i, NULL);
+				goto unlock;
+			}
+
+			sg[i].length = plen;
+			len -= plen;
+			ctx->used += plen;
+			copied += plen;
+			size -= plen;
+			sgl->cur++;
+		} while (len && sgl->cur < MAX_SGL_ENTS);
+
+		ctx->merge = plen & (PAGE_SIZE - 1);
+	}
+
+	err = 0;
+
+	ctx->more = msg->msg_flags & MSG_MORE;
+	if (!ctx->more && !list_empty(&ctx->tsgl))
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+	skcipher_data_wakeup(sk);
+	release_sock(sk);
+
+	return copied ?: err;
+}
+
+static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+				 int offset, size_t size, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	int err = -EINVAL;
+
+	lock_sock(sk);
+	if (!ctx->more && ctx->used)
+		goto unlock;
+
+	if (!size)
+		goto done;
+
+	if (!skcipher_writable(sk)) {
+		err = skcipher_wait_for_wmem(sk, flags);
+		if (err)
+			goto unlock;
+	}
+
+	err = skcipher_alloc_sgl(sk);
+	if (err)
+		goto unlock;
+
+	ctx->merge = 0;
+	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+	get_page(page);
+	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+	sgl->cur++;
+	ctx->used += size;
+
+done:
+	ctx->more = flags & MSG_MORE;
+	if (!ctx->more && !list_empty(&ctx->tsgl))
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+	skcipher_data_wakeup(sk);
+	release_sock(sk);
+
+	return err ?: size;
+}
+
+static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+			    struct msghdr *msg, size_t ignored, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
+		&ctx->req));
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	unsigned long iovlen;
+	struct iovec *iov;
+	int err = -EAGAIN;
+	int used;
+	long copied = 0;
+
+	lock_sock(sk);
+	for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+	     iovlen--, iov++) {
+		unsigned long seglen = iov->iov_len;
+		char __user *from = iov->iov_base;
+
+		while (seglen) {
+			sgl = list_first_entry(&ctx->tsgl,
+					       struct skcipher_sg_list, list);
+			sg = sgl->sg;
+
+			while (!sg->length)
+				sg++;
+
+			used = ctx->used;
+			if (!used) {
+				err = skcipher_wait_for_data(sk, flags);
+				if (err)
+					goto unlock;
+			}
+
+			used = min_t(unsigned long, used, seglen);
+
+			used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
+			err = used;
+			if (err < 0)
+				goto unlock;
+
+			if (ctx->more || used < ctx->used)
+				used -= used % bs;
+
+			err = -EINVAL;
+			if (!used)
+				goto free;
+
+			ablkcipher_request_set_crypt(&ctx->req, sg,
+						     ctx->rsgl.sg, used,
+						     ctx->iv);
+
+			err = af_alg_wait_for_completion(
+				ctx->enc ?
+					crypto_ablkcipher_encrypt(&ctx->req) :
+					crypto_ablkcipher_decrypt(&ctx->req),
+				&ctx->completion);
+
+free:
+			af_alg_free_sg(&ctx->rsgl);
+
+			if (err)
+				goto unlock;
+
+			copied += used;
+			from += used;
+			seglen -= used;
+			skcipher_pull_sgl(sk, used);
+		}
+	}
+
+	err = 0;
+
+unlock:
+	skcipher_wmem_wakeup(sk);
+	release_sock(sk);
+
+	return copied ?: err;
+}
+
+
+static unsigned int skcipher_poll(struct file *file, struct socket *sock,
+				  poll_table *wait)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	unsigned int mask;
+
+	sock_poll_wait(file, sk_sleep(sk), wait);
+	mask = 0;
+
+	if (ctx->used)
+		mask |= POLLIN | POLLRDNORM;
+
+	if (skcipher_writable(sk))
+		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+	return mask;
+}
+
+static struct proto_ops algif_skcipher_ops = {
+	.family		=	PF_ALG,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.bind		=	sock_no_bind,
+	.accept		=	sock_no_accept,
+	.setsockopt	=	sock_no_setsockopt,
+
+	.release	=	af_alg_release,
+	.sendmsg	=	skcipher_sendmsg,
+	.sendpage	=	skcipher_sendpage,
+	.recvmsg	=	skcipher_recvmsg,
+	.poll		=	skcipher_poll,
+};
+
+static void *skcipher_bind(const char *name, u32 type, u32 mask)
+{
+	return crypto_alloc_ablkcipher(name, type, mask);
+}
+
+static void skcipher_release(void *private)
+{
+	crypto_free_ablkcipher(private);
+}
+
+static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+	return crypto_ablkcipher_setkey(private, key, keylen);
+}
+
+static void skcipher_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+
+	skcipher_free_sgl(sk);
+	sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+	sock_kfree_s(sk, ctx, ctx->len);
+	af_alg_release_parent(sk);
+}
+
+static int skcipher_accept_parent(void *private, struct sock *sk)
+{
+	struct skcipher_ctx *ctx;
+	struct alg_sock *ask = alg_sk(sk);
+	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
+
+	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
+			       GFP_KERNEL);
+	if (!ctx->iv) {
+		sock_kfree_s(sk, ctx, len);
+		return -ENOMEM;
+	}
+
+	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
+
+	INIT_LIST_HEAD(&ctx->tsgl);
+	ctx->len = len;
+	ctx->used = 0;
+	ctx->more = 0;
+	ctx->merge = 0;
+	ctx->enc = 0;
+	af_alg_init_completion(&ctx->completion);
+
+	ask->private = ctx;
+
+	ablkcipher_request_set_tfm(&ctx->req, private);
+	ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					af_alg_complete, &ctx->completion);
+
+	sk->sk_destruct = skcipher_sock_destruct;
+
+	return 0;
+}
+
+static const struct af_alg_type algif_type_skcipher = {
+	.bind		=	skcipher_bind,
+	.release	=	skcipher_release,
+	.setkey		=	skcipher_setkey,
+	.accept		=	skcipher_accept_parent,
+	.ops		=	&algif_skcipher_ops,
+	.name		=	"skcipher",
+	.owner		=	THIS_MODULE
+};
+
+static int __init algif_skcipher_init(void)
+{
+	return af_alg_register_type(&algif_type_skcipher);
+}
+
+static void __exit algif_skcipher_exit(void)
+{
+	int err = af_alg_unregister_type(&algif_type_skcipher);
+	BUG_ON(err);
+}
+
+module_init(algif_skcipher_init);
+module_exit(algif_skcipher_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a5a22cf..5ef7ba6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -107,20 +107,6 @@
 	goto out;
 }
 
-static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
-			  int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
 					    int err)
 {
@@ -345,7 +331,7 @@
 	if (ivsize) {
 		sg_init_table(cipher, 2);
 		sg_set_buf(cipher, iv, ivsize);
-		authenc_chain(cipher, dst, vdst == iv + ivsize);
+		scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
 		dst = cipher;
 		cryptlen += ivsize;
 	}
@@ -354,7 +340,7 @@
 		authenc_ahash_fn = crypto_authenc_ahash;
 		sg_init_table(asg, 2);
 		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		authenc_chain(asg, dst, 0);
+		scatterwalk_crypto_chain(asg, dst, 0, 2);
 		dst = asg;
 		cryptlen += req->assoclen;
 	}
@@ -499,7 +485,7 @@
 	if (ivsize) {
 		sg_init_table(cipher, 2);
 		sg_set_buf(cipher, iv, ivsize);
-		authenc_chain(cipher, src, vsrc == iv + ivsize);
+		scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
 		src = cipher;
 		cryptlen += ivsize;
 	}
@@ -508,7 +494,7 @@
 		authenc_ahash_fn = crypto_authenc_ahash;
 		sg_init_table(asg, 2);
 		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		authenc_chain(asg, src, 0);
+		scatterwalk_crypto_chain(asg, src, 0, 2);
 		src = asg;
 		cryptlen += req->assoclen;
 	}
diff --git a/crypto/cast5.c b/crypto/cast5.c
index a1d2294..4a230dd 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -604,36 +604,23 @@
 	 * Rounds 3, 6, 9, 12, and 15 use f function Type 3.
 	 */
 
+	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
+	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 	if (!(c->rr)) {
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
 		t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
-	} else {
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 	}
 
 	/* c1...c64 <-- (R16,L16).  (Exchange final blocks L16, R16 and
@@ -663,32 +650,19 @@
 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-	} else {
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
 	}
+	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
+	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
 
 	dst[0] = cpu_to_be32(r);
 	dst[1] = cpu_to_be32(l);
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
index fdcf624..b980ee1 100644
--- a/crypto/crypto_wq.c
+++ b/crypto/crypto_wq.c
@@ -20,7 +20,8 @@
 
 static int __init crypto_wq_init(void)
 {
-	kcrypto_wq = create_workqueue("crypto");
+	kcrypto_wq = alloc_workqueue("crypto",
+				     WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 	if (unlikely(!kcrypto_wq))
 		return -ENOMEM;
 	return 0;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 463dc85..cbc7a33 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -48,12 +48,11 @@
 	int ret = 0;
 	struct z_stream_s *stream = &ctx->comp_stream;
 
-	stream->workspace = vmalloc(zlib_deflate_workspacesize());
+	stream->workspace = vzalloc(zlib_deflate_workspacesize());
 	if (!stream->workspace) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	memset(stream->workspace, 0, zlib_deflate_workspacesize());
 	ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
 	                        -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
 	                        Z_DEFAULT_STRATEGY);
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 3ca3b66..42ce9f5 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -62,20 +62,6 @@
 	skcipher_givcrypt_complete(req, err);
 }
 
-static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
-			 int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
 {
 	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
@@ -124,13 +110,13 @@
 
 	sg_init_table(reqctx->src, 2);
 	sg_set_buf(reqctx->src, giv, ivsize);
-	eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
+	scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
 
 	dst = reqctx->src;
 	if (osrc != odst) {
 		sg_init_table(reqctx->dst, 2);
 		sg_set_buf(reqctx->dst, giv, ivsize);
-		eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
+		scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
 
 		dst = reqctx->dst;
 	}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 2f5fbba..1a25263 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1102,21 +1102,6 @@
 	return crypto_aead_setauthsize(ctx->child, authsize);
 }
 
-/* this is the same as crypto_authenc_chain */
-static void crypto_rfc4543_chain(struct scatterlist *head,
-				 struct scatterlist *sg, int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
 						 int enc)
 {
@@ -1154,13 +1139,13 @@
 
 	sg_init_table(payload, 2);
 	sg_set_buf(payload, req->iv, 8);
-	crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
+	scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
 	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
 
 	sg_init_table(assoc, 2);
 	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
 		    req->assoc->offset);
-	crypto_rfc4543_chain(assoc, payload, 0);
+	scatterwalk_crypto_chain(assoc, payload, 0, 2);
 
 	aead_request_set_tfm(subreq, ctx->child);
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 75586f1..29a89da 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -455,7 +455,8 @@
 
 	get_online_cpus();
 
-	pcrypt->wq = create_workqueue(name);
+	pcrypt->wq = alloc_workqueue(name,
+				     WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 	if (!pcrypt->wq)
 		goto err;
 
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 1ceb673..8a0f68b 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -325,4 +325,5 @@
 module_exit(rmd128_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 472261f..525d7bb 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -369,4 +369,5 @@
 module_exit(rmd160_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 72eafa8..69293d9 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -344,4 +344,5 @@
 module_exit(rmd256_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 86becab..09f97df 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -393,4 +393,5 @@
 module_exit(rmd320_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
diff --git a/crypto/shash.c b/crypto/shash.c
index 22fd943..76f74b9 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -310,7 +310,13 @@
 
 static int shash_async_import(struct ahash_request *req, const void *in)
 {
-	return crypto_shash_import(ahash_request_ctx(req), in);
+	struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct shash_desc *desc = ahash_request_ctx(req);
+
+	desc->tfm = *ctx;
+	desc->flags = req->base.flags;
+
+	return crypto_shash_import(desc, in);
 }
 
 static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 3ca68f9..9aac5e5 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -8,6 +8,13 @@
  * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
  * Copyright (c) 2007 Nokia Siemens Networks
  *
+ * Updated RFC4106 AES-GCM testing.
+ *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -980,6 +987,10 @@
 		ret += tcrypt_test("ansi_cprng");
 		break;
 
+	case 151:
+		ret += tcrypt_test("rfc4106(gcm(aes))");
+		break;
+
 	case 200:
 		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
 				speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index fa8c8f7..27ea9fe 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -6,6 +6,13 @@
  * Copyright (c) 2007 Nokia Siemens Networks
  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
  *
+ * Updated RFC4106 AES-GCM testing.
+ *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -2242,6 +2249,23 @@
 			}
 		}
 	}, {
+		.alg = "rfc4106(gcm(aes))",
+		.test = alg_test_aead,
+		.suite = {
+			.aead = {
+				.enc = {
+					.vecs = aes_gcm_rfc4106_enc_tv_template,
+					.count = AES_GCM_4106_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = aes_gcm_rfc4106_dec_tv_template,
+					.count = AES_GCM_4106_DEC_TEST_VECTORS
+				}
+			}
+		}
+	}, {
+
+
 		.alg = "rfc4309(ccm(aes))",
 		.test = alg_test_aead,
 		.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 74e3537..834af7f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -6,6 +6,15 @@
  * Copyright (c) 2007 Nokia Siemens Networks
  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
  *
+ * Updated RFC4106 AES-GCM testing. Some test vectors were taken from
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/
+ * gcm/gcm-test-vectors.tar.gz
+ *     Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *              Adrian Hoban <adrian.hoban@intel.com>
+ *              Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *              Tadeusz Struk (tadeusz.struk@intel.com)
+ *     Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -2947,6 +2956,8 @@
 #define AES_CTR_3686_DEC_TEST_VECTORS 6
 #define AES_GCM_ENC_TEST_VECTORS 9
 #define AES_GCM_DEC_TEST_VECTORS 8
+#define AES_GCM_4106_ENC_TEST_VECTORS 7
+#define AES_GCM_4106_DEC_TEST_VECTORS 7
 #define AES_CCM_ENC_TEST_VECTORS 7
 #define AES_CCM_DEC_TEST_VECTORS 7
 #define AES_CCM_4309_ENC_TEST_VECTORS 7
@@ -5829,6 +5840,356 @@
 	}
 };
 
+static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
+        { /* Generated using Crypto++ */
+		.key    = zeroed_string,
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = zeroed_string,
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+		.rlen	= 32,
+        },{
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = zeroed_string,
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+		.rlen	= 32,
+
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 64,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+		.rlen	= 80,
+        }, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+                          "\x00\x00\x00\x00",
+                .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff",
+                .ilen   = 192,
+                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                          "\xaa\xaa\xaa\xaa",
+                .alen   = 12,
+		.result	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+			  "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+			  "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+			  "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+			  "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+			  "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+			  "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+			  "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+			  "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+			  "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+			  "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+			  "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+			  "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+			  "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+			  "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+			  "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+			  "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+			  "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+			  "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+			  "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+			  "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+			  "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+		.rlen	= 208,
+	}
+};
+
+static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
+        { /* Generated using Crypto++ */
+		.key    = zeroed_string,
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = zeroed_string,
+                .rlen   = 16,
+
+        },{
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = zeroed_string,
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+		.ilen	= 32,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+		.ilen	= 32,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+		.ilen	= 80,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 64,
+        }, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+			  "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+			  "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+			  "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+			  "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+			  "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+			  "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+			  "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+			  "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+			  "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+			  "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+			  "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+			  "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+			  "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+			  "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+			  "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+			  "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+			  "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+			  "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+			  "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+			  "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+			  "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+		.ilen	= 208,
+                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                          "\xaa\xaa\xaa\xaa",
+                .alen   = 12,
+                .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff",
+                .rlen   = 192,
+
+	}
+};
+
 static struct aead_testvec aes_ccm_enc_tv_template[] = {
 	{ /* From RFC 3610 */
 		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
diff --git a/crypto/zlib.c b/crypto/zlib.c
index c3015733..739b8fc 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -95,11 +95,10 @@
 	zlib_comp_exit(ctx);
 
 	workspacesize = zlib_deflate_workspacesize();
-	stream->workspace = vmalloc(workspacesize);
+	stream->workspace = vzalloc(workspacesize);
 	if (!stream->workspace)
 		return -ENOMEM;
 
-	memset(stream->workspace, 0, workspacesize);
 	ret = zlib_deflateInit2(stream,
 				tb[ZLIB_COMP_LEVEL]
 					? nla_get_u32(tb[ZLIB_COMP_LEVEL])
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3d93b3a..9bfb71f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -26,6 +26,8 @@
 
 source "drivers/md/Kconfig"
 
+source "drivers/target/Kconfig"
+
 source "drivers/message/fusion/Kconfig"
 
 source "drivers/firewire/Kconfig"
@@ -88,6 +90,8 @@
 
 source "drivers/leds/Kconfig"
 
+source "drivers/nfc/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index bf15ce7..7eb35f4 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -40,12 +40,13 @@
 
 obj-y				+= serial/
 obj-$(CONFIG_PARPORT)		+= parport/
-obj-y				+= base/ block/ misc/ mfd/
+obj-y				+= base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-$(CONFIG_ATA)		+= ata/
+obj-$(CONFIG_TARGET_CORE)	+= target/
 obj-$(CONFIG_MTD)		+= mtd/
 obj-$(CONFIG_SPI)		+= spi/
 obj-y				+= net/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 3f3489c..10c7ad5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -51,12 +51,7 @@
 	  For backwards compatibility, this option allows
 	  deprecated /proc/acpi/ files to exist, even when
 	  they have been replaced by functions in /sys.
-	  The deprecated files (and their replacements) include:
 
-	  /proc/acpi/processor/*/throttling (/sys/class/thermal/
-		cooling_device*/*)
-	  /proc/acpi/video/*/brightness (/sys/class/backlight/)
-	  /proc/acpi/thermal_zone/*/* (/sys/class/thermal/)
 	  This option has no effect on /proc/acpi/ files
 	  and functions which do not yet exist in /sys.
 
@@ -74,6 +69,8 @@
 	  /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
 	  This option has no effect on /proc/acpi/ directories
 	  and functions, which do not yet exist in /sys
+	  This option, together with the proc directories, will be
+	  deleted in 2.6.39.
 
 	  Say N to delete power /proc/acpi/ directories that have moved to /sys/
 
@@ -209,6 +206,17 @@
 
 	  To compile this driver as a module, choose M here:
 	  the module will be called processor.
+config ACPI_IPMI
+	tristate "IPMI"
+	depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER
+	default n
+	help
+	  This driver enables the ACPI to access the BMC controller. And it
+	  uses the IPMI request/response message to communicate with BMC
+	  controller, which can be found on on the server.
+
+	  To compile this driver as a module, choose M here:
+	  the module will be called as acpi_ipmi.
 
 config ACPI_HOTPLUG_CPU
 	bool
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3d031d0..d113fa5 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@
 # sleep related files
 acpi-y				+= wakeup.o
 acpi-y				+= sleep.o
-acpi-$(CONFIG_ACPI_SLEEP)	+= proc.o
+acpi-$(CONFIG_ACPI_SLEEP)	+= proc.o nvs.o
 
 
 #
@@ -69,5 +69,6 @@
 processor-$(CONFIG_CPU_FREQ)	+= processor_perflib.o
 
 obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+obj-$(CONFIG_ACPI_IPMI)		+= acpi_ipmi.o
 
 obj-$(CONFIG_ACPI_APEI)		+= apei/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 25d3aae..58c3f74 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -197,7 +197,8 @@
 {
 	struct proc_dir_entry *entry = NULL;
 
-
+	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
 	if (!acpi_device_dir(device)) {
 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
 						     acpi_ac_dir);
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
new file mode 100644
index 0000000..f40acef
--- /dev/null
+++ b/drivers/acpi/acpi_ipmi.c
@@ -0,0 +1,525 @@
+/*
+ *  acpi_ipmi.c - ACPI IPMI opregion
+ *
+ *  Copyright (C) 2010 Intel Corporation
+ *  Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or (at
+ *  your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/ipmi.h>
+#include <linux/device.h>
+#include <linux/pnp.h>
+
+MODULE_AUTHOR("Zhao Yakui");
+MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
+MODULE_LICENSE("GPL");
+
+#define IPMI_FLAGS_HANDLER_INSTALL	0
+
+#define ACPI_IPMI_OK			0
+#define ACPI_IPMI_TIMEOUT		0x10
+#define ACPI_IPMI_UNKNOWN		0x07
+/* the IPMI timeout is 5s */
+#define IPMI_TIMEOUT			(5 * HZ)
+
+struct acpi_ipmi_device {
+	/* the device list attached to driver_data.ipmi_devices */
+	struct list_head head;
+	/* the IPMI request message list */
+	struct list_head tx_msg_list;
+	struct mutex	tx_msg_lock;
+	acpi_handle handle;
+	struct pnp_dev *pnp_dev;
+	ipmi_user_t	user_interface;
+	int ipmi_ifnum; /* IPMI interface number */
+	long curr_msgid;
+	unsigned long flags;
+	struct ipmi_smi_info smi_data;
+};
+
+struct ipmi_driver_data {
+	struct list_head	ipmi_devices;
+	struct ipmi_smi_watcher	bmc_events;
+	struct ipmi_user_hndl	ipmi_hndlrs;
+	struct mutex		ipmi_lock;
+};
+
+struct acpi_ipmi_msg {
+	struct list_head head;
+	/*
+	 * General speaking the addr type should be SI_ADDR_TYPE. And
+	 * the addr channel should be BMC.
+	 * In fact it can also be IPMB type. But we will have to
+	 * parse it from the Netfn command buffer. It is so complex
+	 * that it is skipped.
+	 */
+	struct ipmi_addr addr;
+	long tx_msgid;
+	/* it is used to track whether the IPMI message is finished */
+	struct completion tx_complete;
+	struct kernel_ipmi_msg tx_message;
+	int	msg_done;
+	/* tx data . And copy it from ACPI object buffer */
+	u8	tx_data[64];
+	int	tx_len;
+	u8	rx_data[64];
+	int	rx_len;
+	struct acpi_ipmi_device *device;
+};
+
+/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
+struct acpi_ipmi_buffer {
+	u8 status;
+	u8 length;
+	u8 data[64];
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev);
+static void ipmi_bmc_gone(int iface);
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
+
+static struct ipmi_driver_data driver_data = {
+	.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
+	.bmc_events = {
+		.owner = THIS_MODULE,
+		.new_smi = ipmi_register_bmc,
+		.smi_gone = ipmi_bmc_gone,
+	},
+	.ipmi_hndlrs = {
+		.ipmi_recv_hndl = ipmi_msg_handler,
+	},
+};
+
+static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+{
+	struct acpi_ipmi_msg *ipmi_msg;
+	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+	ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
+	if (!ipmi_msg)	{
+		dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+		return NULL;
+	}
+	init_completion(&ipmi_msg->tx_complete);
+	INIT_LIST_HEAD(&ipmi_msg->head);
+	ipmi_msg->device = ipmi;
+	return ipmi_msg;
+}
+
+#define		IPMI_OP_RGN_NETFN(offset)	((offset >> 8) & 0xff)
+#define		IPMI_OP_RGN_CMD(offset)		(offset & 0xff)
+static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+				acpi_physical_address address,
+				acpi_integer *value)
+{
+	struct kernel_ipmi_msg *msg;
+	struct acpi_ipmi_buffer *buffer;
+	struct acpi_ipmi_device *device;
+
+	msg = &tx_msg->tx_message;
+	/*
+	 * IPMI network function and command are encoded in the address
+	 * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
+	 */
+	msg->netfn = IPMI_OP_RGN_NETFN(address);
+	msg->cmd = IPMI_OP_RGN_CMD(address);
+	msg->data = tx_msg->tx_data;
+	/*
+	 * value is the parameter passed by the IPMI opregion space handler.
+	 * It points to the IPMI request message buffer
+	 */
+	buffer = (struct acpi_ipmi_buffer *)value;
+	/* copy the tx message data */
+	msg->data_len = buffer->length;
+	memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+	/*
+	 * now the default type is SYSTEM_INTERFACE and channel type is BMC.
+	 * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
+	 * the addr type should be changed to IPMB. Then we will have to parse
+	 * the IPMI request message buffer to get the IPMB address.
+	 * If so, please fix me.
+	 */
+	tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+	tx_msg->addr.channel = IPMI_BMC_CHANNEL;
+	tx_msg->addr.data[0] = 0;
+
+	/* Get the msgid */
+	device = tx_msg->device;
+	mutex_lock(&device->tx_msg_lock);
+	device->curr_msgid++;
+	tx_msg->tx_msgid = device->curr_msgid;
+	mutex_unlock(&device->tx_msg_lock);
+}
+
+static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
+		acpi_integer *value, int rem_time)
+{
+	struct acpi_ipmi_buffer *buffer;
+
+	/*
+	 * value is also used as output parameter. It represents the response
+	 * IPMI message returned by IPMI command.
+	 */
+	buffer = (struct acpi_ipmi_buffer *)value;
+	if (!rem_time && !msg->msg_done) {
+		buffer->status = ACPI_IPMI_TIMEOUT;
+		return;
+	}
+	/*
+	 * If the flag of msg_done is not set or the recv length is zero, it
+	 * means that the IPMI command is not executed correctly.
+	 * The status code will be ACPI_IPMI_UNKNOWN.
+	 */
+	if (!msg->msg_done || !msg->rx_len) {
+		buffer->status = ACPI_IPMI_UNKNOWN;
+		return;
+	}
+	/*
+	 * If the IPMI response message is obtained correctly, the status code
+	 * will be ACPI_IPMI_OK
+	 */
+	buffer->status = ACPI_IPMI_OK;
+	buffer->length = msg->rx_len;
+	memcpy(buffer->data, msg->rx_data, msg->rx_len);
+}
+
+static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
+{
+	struct acpi_ipmi_msg *tx_msg, *temp;
+	int count = HZ / 10;
+	struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+	list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+		/* wake up the sleep thread on the Tx msg */
+		complete(&tx_msg->tx_complete);
+	}
+
+	/* wait for about 100ms to flush the tx message list */
+	while (count--) {
+		if (list_empty(&ipmi->tx_msg_list))
+			break;
+		schedule_timeout(1);
+	}
+	if (!list_empty(&ipmi->tx_msg_list))
+		dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+}
+
+static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+{
+	struct acpi_ipmi_device *ipmi_device = user_msg_data;
+	int msg_found = 0;
+	struct acpi_ipmi_msg *tx_msg;
+	struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+
+	if (msg->user != ipmi_device->user_interface) {
+		dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
+			"returned user %p, expected user %p\n",
+			msg->user, ipmi_device->user_interface);
+		ipmi_free_recv_msg(msg);
+		return;
+	}
+	mutex_lock(&ipmi_device->tx_msg_lock);
+	list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+		if (msg->msgid == tx_msg->tx_msgid) {
+			msg_found = 1;
+			break;
+		}
+	}
+
+	mutex_unlock(&ipmi_device->tx_msg_lock);
+	if (!msg_found) {
+		dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
+			"returned.\n", msg->msgid);
+		ipmi_free_recv_msg(msg);
+		return;
+	}
+
+	if (msg->msg.data_len) {
+		/* copy the response data to Rx_data buffer */
+		memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
+		tx_msg->rx_len = msg->msg.data_len;
+		tx_msg->msg_done = 1;
+	}
+	complete(&tx_msg->tx_complete);
+	ipmi_free_recv_msg(msg);
+};
+
+static void ipmi_register_bmc(int iface, struct device *dev)
+{
+	struct acpi_ipmi_device *ipmi_device, *temp;
+	struct pnp_dev *pnp_dev;
+	ipmi_user_t		user;
+	int err;
+	struct ipmi_smi_info smi_data;
+	acpi_handle handle;
+
+	err = ipmi_get_smi_info(iface, &smi_data);
+
+	if (err)
+		return;
+
+	if (smi_data.addr_src != SI_ACPI) {
+		put_device(smi_data.dev);
+		return;
+	}
+
+	handle = smi_data.addr_info.acpi_info.acpi_handle;
+
+	mutex_lock(&driver_data.ipmi_lock);
+	list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
+		/*
+		 * if the corresponding ACPI handle is already added
+		 * to the device list, don't add it again.
+		 */
+		if (temp->handle == handle)
+			goto out;
+	}
+
+	ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+
+	if (!ipmi_device)
+		goto out;
+
+	pnp_dev = to_pnp_dev(smi_data.dev);
+	ipmi_device->handle = handle;
+	ipmi_device->pnp_dev = pnp_dev;
+
+	err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+					ipmi_device, &user);
+	if (err) {
+		dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
+		kfree(ipmi_device);
+		goto out;
+	}
+	acpi_add_ipmi_device(ipmi_device);
+	ipmi_device->user_interface = user;
+	ipmi_device->ipmi_ifnum = iface;
+	mutex_unlock(&driver_data.ipmi_lock);
+	memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+	return;
+
+out:
+	mutex_unlock(&driver_data.ipmi_lock);
+	put_device(smi_data.dev);
+	return;
+}
+
+static void ipmi_bmc_gone(int iface)
+{
+	struct acpi_ipmi_device *ipmi_device, *temp;
+
+	mutex_lock(&driver_data.ipmi_lock);
+	list_for_each_entry_safe(ipmi_device, temp,
+				&driver_data.ipmi_devices, head) {
+		if (ipmi_device->ipmi_ifnum != iface)
+			continue;
+
+		acpi_remove_ipmi_device(ipmi_device);
+		put_device(ipmi_device->smi_data.dev);
+		kfree(ipmi_device);
+		break;
+	}
+	mutex_unlock(&driver_data.ipmi_lock);
+}
+/* --------------------------------------------------------------------------
+ *			Address Space Management
+ * -------------------------------------------------------------------------- */
+/*
+ * This is the IPMI opregion space handler.
+ * @function: indicates the read/write. In fact as the IPMI message is driven
+ * by command, only write is meaningful.
+ * @address: This contains the netfn/command of IPMI request message.
+ * @bits   : not used.
+ * @value  : it is an in/out parameter. It points to the IPMI message buffer.
+ *	     Before the IPMI message is sent, it represents the actual request
+ *	     IPMI message. After the IPMI message is finished, it represents
+ *	     the response IPMI message returned by IPMI command.
+ * @handler_context: IPMI device context.
+ */
+
+static acpi_status
+acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+		      u32 bits, acpi_integer *value,
+		      void *handler_context, void *region_context)
+{
+	struct acpi_ipmi_msg *tx_msg;
+	struct acpi_ipmi_device *ipmi_device = handler_context;
+	int err, rem_time;
+	acpi_status status;
+	/*
+	 * IPMI opregion message.
+	 * IPMI message is firstly written to the BMC and system software
+	 * can get the respsonse. So it is unmeaningful for the read access
+	 * of IPMI opregion.
+	 */
+	if ((function & ACPI_IO_MASK) == ACPI_READ)
+		return AE_TYPE;
+
+	if (!ipmi_device->user_interface)
+		return AE_NOT_EXIST;
+
+	tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
+	if (!tx_msg)
+		return AE_NO_MEMORY;
+
+	acpi_format_ipmi_msg(tx_msg, address, value);
+	mutex_lock(&ipmi_device->tx_msg_lock);
+	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
+	mutex_unlock(&ipmi_device->tx_msg_lock);
+	err = ipmi_request_settime(ipmi_device->user_interface,
+					&tx_msg->addr,
+					tx_msg->tx_msgid,
+					&tx_msg->tx_message,
+					NULL, 0, 0, 0);
+	if (err) {
+		status = AE_ERROR;
+		goto end_label;
+	}
+	rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
+					IPMI_TIMEOUT);
+	acpi_format_ipmi_response(tx_msg, value, rem_time);
+	status = AE_OK;
+
+end_label:
+	mutex_lock(&ipmi_device->tx_msg_lock);
+	list_del(&tx_msg->head);
+	mutex_unlock(&ipmi_device->tx_msg_lock);
+	kfree(tx_msg);
+	return status;
+}
+
+static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
+{
+	if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+		return;
+
+	acpi_remove_address_space_handler(ipmi->handle,
+				ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
+
+	clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+}
+
+static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+{
+	acpi_status status;
+
+	if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+		return 0;
+
+	status = acpi_install_address_space_handler(ipmi->handle,
+						    ACPI_ADR_SPACE_IPMI,
+						    &acpi_ipmi_space_handler,
+						    NULL, ipmi);
+	if (ACPI_FAILURE(status)) {
+		struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+		dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
+			"handle\n");
+		return -EINVAL;
+	}
+	set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
+	return 0;
+}
+
+static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+
+	INIT_LIST_HEAD(&ipmi_device->head);
+
+	mutex_init(&ipmi_device->tx_msg_lock);
+	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+	ipmi_install_space_handler(ipmi_device);
+
+	list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
+}
+
+static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+{
+	/*
+	 * If the IPMI user interface is created, it should be
+	 * destroyed.
+	 */
+	if (ipmi_device->user_interface) {
+		ipmi_destroy_user(ipmi_device->user_interface);
+		ipmi_device->user_interface = NULL;
+	}
+	/* flush the Tx_msg list */
+	if (!list_empty(&ipmi_device->tx_msg_list))
+		ipmi_flush_tx_msg(ipmi_device);
+
+	list_del(&ipmi_device->head);
+	ipmi_remove_space_handler(ipmi_device);
+}
+
+static int __init acpi_ipmi_init(void)
+{
+	int result = 0;
+
+	if (acpi_disabled)
+		return result;
+
+	mutex_init(&driver_data.ipmi_lock);
+
+	result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+
+	return result;
+}
+
+static void __exit acpi_ipmi_exit(void)
+{
+	struct acpi_ipmi_device *ipmi_device, *temp;
+
+	if (acpi_disabled)
+		return;
+
+	ipmi_smi_watcher_unregister(&driver_data.bmc_events);
+
+	/*
+	 * When one smi_watcher is unregistered, it is only deleted
+	 * from the smi_watcher list. But the smi_gone callback function
+	 * is not called. So explicitly uninstall the ACPI IPMI oregion
+	 * handler and free it.
+	 */
+	mutex_lock(&driver_data.ipmi_lock);
+	list_for_each_entry_safe(ipmi_device, temp,
+				&driver_data.ipmi_devices, head) {
+		acpi_remove_ipmi_device(ipmi_device);
+		put_device(ipmi_device->smi_data.dev);
+		kfree(ipmi_device);
+	}
+	mutex_unlock(&driver_data.ipmi_lock);
+}
+
+module_init(acpi_ipmi_init);
+module_exit(acpi_ipmi_exit);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a7e1d1a..eec2ead 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@
 
 acpi-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
 	 evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
-	 evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o
+	 evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o evxfgpe.o
 
 acpi-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
 	 exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index a6f99cc..70e0b28 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -51,8 +51,6 @@
 
 acpi_status acpi_ev_install_xrupt_handlers(void);
 
-acpi_status acpi_ev_install_fadt_gpes(void);
-
 u32 acpi_ev_fixed_event_detect(void);
 
 /*
@@ -82,9 +80,9 @@
 
 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
 
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
 
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
 
 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
 						       u32 gpe_number);
@@ -93,6 +91,8 @@
 						     struct acpi_gpe_block_info
 						     *gpe_block);
 
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info);
+
 /*
  * evgpeblk - Upper-level GPE block support
  */
@@ -107,12 +107,13 @@
 acpi_status
 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 			     struct acpi_gpe_block_info *gpe_block,
-			     void *ignored);
+			     void *context);
 
 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block);
 
 u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+		     struct acpi_gpe_event_info *gpe_event_info,
 		     u32 gpe_number);
 
 /*
@@ -126,10 +127,6 @@
 acpi_ev_match_gpe_method(acpi_handle obj_handle,
 			 u32 level, void *context, void **return_value);
 
-acpi_status
-acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
-			  u32 level, void *context, void **return_value);
-
 /*
  * evgpeutil - GPE utilities
  */
@@ -138,6 +135,10 @@
 
 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
 
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+		       struct acpi_gpe_block_info *gpe_block, void *context);
+
 struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
 
 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ad88fca..0e4dba0 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -146,6 +146,9 @@
 
 extern u32 acpi_gbl_nesting_level;
 
+ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+
 /* Support for dynamic control method tracing mechanism */
 
 ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
@@ -225,8 +228,10 @@
  */
 ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
 ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
+ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
 #define acpi_gbl_gpe_lock	&_acpi_gbl_gpe_lock
 #define acpi_gbl_hardware_lock	&_acpi_gbl_hardware_lock
+#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
 
 /*****************************************************************************
  *
@@ -370,7 +375,9 @@
 ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
 ACPI_EXTERN struct acpi_gpe_block_info
 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN u8 acpi_all_gpes_initialized;
+ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
+ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
+ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
 
 /*****************************************************************************
  *
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 167470a..258d628 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -94,7 +94,7 @@
 			     struct acpi_gpe_register_info *gpe_register_info);
 
 acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action);
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action);
 
 acpi_status
 acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2ceb0c0..74000f5 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -408,17 +408,18 @@
 
 /* Dispatch info for each GPE -- either a method or handler, cannot be both */
 
-struct acpi_handler_info {
-	acpi_event_handler address;	/* Address of handler, if any */
+struct acpi_gpe_handler_info {
+	acpi_gpe_handler address;	/* Address of handler, if any */
 	void *context;		/* Context to be passed to handler */
 	struct acpi_namespace_node *method_node;	/* Method node for this GPE level (saved) */
-	u8 orig_flags;		/* Original misc info about this GPE */
-	u8 orig_enabled;	/* Set if the GPE was originally enabled */
+	u8 original_flags;      /* Original (pre-handler) GPE info */
+	u8 originally_enabled;  /* True if GPE was originally enabled */
 };
 
 union acpi_gpe_dispatch_info {
 	struct acpi_namespace_node *method_node;	/* Method node for this GPE level */
-	struct acpi_handler_info *handler;
+	struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
+	struct acpi_namespace_node *device_node;        /* Parent _PRW device for implicit notify */
 };
 
 /*
@@ -458,7 +459,7 @@
 	u32 register_count;	/* Number of register pairs in block */
 	u16 gpe_count;		/* Number of individual GPEs in block */
 	u8 block_base_number;	/* Base GPE number for this block */
-	u8 initialized;         /* If set, the GPE block has been initialized */
+	u8 initialized;         /* TRUE if this block is initialized */
 };
 
 /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index bdbfaf22b..962a3cc 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -93,7 +93,7 @@
 
 #define AOPOBJ_AML_CONSTANT         0x01	/* Integer is an AML constant */
 #define AOPOBJ_STATIC_POINTER       0x02	/* Data is part of an ACPI table, don't delete */
-#define AOPOBJ_DATA_VALID           0x04	/* Object is intialized and data is valid */
+#define AOPOBJ_DATA_VALID           0x04	/* Object is initialized and data is valid */
 #define AOPOBJ_OBJECT_INITIALIZED   0x08	/* Region is initialized, _REG was run */
 #define AOPOBJ_SETUP_COMPLETE       0x10	/* Region setup is complete */
 #define AOPOBJ_INVALID              0x20	/* Host OS won't allow a Region address */
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c61c303..e5e313c 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -217,9 +217,17 @@
 		     status_bit_mask)
 		    && (fixed_enable & acpi_gbl_fixed_event_info[i].
 			enable_bit_mask)) {
+			/*
+			 * Found an active (signalled) event. Invoke global event
+			 * handler if present.
+			 */
+			acpi_fixed_event_count[i]++;
+			if (acpi_gbl_global_event_handler) {
+				acpi_gbl_global_event_handler
+				    (ACPI_EVENT_TYPE_FIXED, NULL, i,
+				     acpi_gbl_global_event_handler_context);
+			}
 
-			/* Found an active (signalled) event */
-			acpi_os_fixed_event_count(i);
 			int_status |= acpi_ev_fixed_event_dispatch(i);
 		}
 	}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f226eac..7c339d3 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -52,6 +52,8 @@
 /* Local prototypes */
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
 
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_update_gpe_enable_mask
@@ -102,7 +104,7 @@
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Clear the given GPE from stale events and enable it.
+ * DESCRIPTION: Clear a GPE of stale events and enable it.
  *
  ******************************************************************************/
 acpi_status
@@ -113,12 +115,13 @@
 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
 	/*
-	 * We will only allow a GPE to be enabled if it has either an
-	 * associated method (_Lxx/_Exx) or a handler. Otherwise, the
-	 * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
-	 * first time it fires.
+	 * We will only allow a GPE to be enabled if it has either an associated
+	 * method (_Lxx/_Exx) or a handler, or is using the implicit notify
+	 * feature. Otherwise, the GPE will be immediately disabled by
+	 * acpi_ev_gpe_dispatch the first time it fires.
 	 */
-	if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
+	if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+	    ACPI_GPE_DISPATCH_NONE) {
 		return_ACPI_STATUS(AE_NO_HANDLER);
 	}
 
@@ -137,9 +140,9 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_raw_enable_gpe
+ * FUNCTION:    acpi_ev_add_gpe_reference
  *
- * PARAMETERS:  gpe_event_info  - GPE to enable
+ * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
  *
  * RETURN:      Status
  *
@@ -148,16 +151,21 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 {
 	acpi_status status = AE_OK;
 
+	ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
+
 	if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
 		return_ACPI_STATUS(AE_LIMIT);
 	}
 
 	gpe_event_info->runtime_count++;
 	if (gpe_event_info->runtime_count == 1) {
+
+		/* Enable on first reference */
+
 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
 		if (ACPI_SUCCESS(status)) {
 			status = acpi_ev_enable_gpe(gpe_event_info);
@@ -173,9 +181,9 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_raw_disable_gpe
+ * FUNCTION:    acpi_ev_remove_gpe_reference
  *
- * PARAMETERS:  gpe_event_info  - GPE to disable
+ * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
  *
  * RETURN:      Status
  *
@@ -184,16 +192,21 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 {
 	acpi_status status = AE_OK;
 
+	ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
+
 	if (!gpe_event_info->runtime_count) {
 		return_ACPI_STATUS(AE_LIMIT);
 	}
 
 	gpe_event_info->runtime_count--;
 	if (!gpe_event_info->runtime_count) {
+
+		/* Disable on last reference */
+
 		status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
 		if (ACPI_SUCCESS(status)) {
 			status = acpi_hw_low_set_gpe(gpe_event_info,
@@ -379,7 +392,7 @@
 			}
 
 			ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
-					  "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
+					  "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
 					  gpe_register_info->base_gpe_number,
 					  status_reg, enable_reg));
 
@@ -405,7 +418,9 @@
 					 * or method.
 					 */
 					int_status |=
-					    acpi_ev_gpe_dispatch(&gpe_block->
+					    acpi_ev_gpe_dispatch(gpe_block->
+								 node,
+								 &gpe_block->
 						event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
 				}
 			}
@@ -435,17 +450,25 @@
  *              an interrupt handler.
  *
  ******************************************************************************/
-static void acpi_ev_asynch_enable_gpe(void *context);
 
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
 {
-	struct acpi_gpe_event_info *gpe_event_info = (void *)context;
+	struct acpi_gpe_event_info *gpe_event_info = context;
 	acpi_status status;
-	struct acpi_gpe_event_info local_gpe_event_info;
+	struct acpi_gpe_event_info *local_gpe_event_info;
 	struct acpi_evaluate_info *info;
 
 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
+	/* Allocate a local GPE block */
+
+	local_gpe_event_info =
+	    ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
+	if (!local_gpe_event_info) {
+		ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
+		return_VOID;
+	}
+
 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
 	if (ACPI_FAILURE(status)) {
 		return_VOID;
@@ -462,7 +485,7 @@
 	 * Take a snapshot of the GPE info for this level - we copy the info to
 	 * prevent a race condition with remove_handler/remove_block.
 	 */
-	ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
+	ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
 		    sizeof(struct acpi_gpe_event_info));
 
 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -470,12 +493,26 @@
 		return_VOID;
 	}
 
-	/*
-	 * Must check for control method type dispatch one more time to avoid a
-	 * race with ev_gpe_install_handler
-	 */
-	if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
-	    ACPI_GPE_DISPATCH_METHOD) {
+	/* Do the correct dispatch - normal method or implicit notify */
+
+	switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+	case ACPI_GPE_DISPATCH_NOTIFY:
+
+		/*
+		 * Implicit notify.
+		 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
+		 * NOTE: the request is queued for execution after this method
+		 * completes. The notify handlers are NOT invoked synchronously
+		 * from this thread -- because handlers may in turn run other
+		 * control methods.
+		 */
+		status =
+		    acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
+						 device_node,
+						 ACPI_NOTIFY_DEVICE_WAKE);
+		break;
+
+	case ACPI_GPE_DISPATCH_METHOD:
 
 		/* Allocate the evaluation information block */
 
@@ -488,7 +525,7 @@
 			 * control method that corresponds to this GPE
 			 */
 			info->prefix_node =
-			    local_gpe_event_info.dispatch.method_node;
+			    local_gpe_event_info->dispatch.method_node;
 			info->flags = ACPI_IGNORE_RETURN_VALUE;
 
 			status = acpi_ns_evaluate(info);
@@ -499,46 +536,98 @@
 			ACPI_EXCEPTION((AE_INFO, status,
 					"while evaluating GPE method [%4.4s]",
 					acpi_ut_get_node_name
-					(local_gpe_event_info.dispatch.
+					(local_gpe_event_info->dispatch.
 					 method_node)));
 		}
+
+		break;
+
+	default:
+		return_VOID;    /* Should never happen */
 	}
+
 	/* Defer enabling of GPE until all notify handlers are done */
-	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
-				gpe_event_info);
+
+	status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+				 acpi_ev_asynch_enable_gpe,
+				 local_gpe_event_info);
+	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(local_gpe_event_info);
+	}
 	return_VOID;
 }
 
-static void acpi_ev_asynch_enable_gpe(void *context)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_asynch_enable_gpe
+ *
+ * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
+ *              Callback from acpi_os_execute
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
+ *              complete (i.e., finish execution of Notify)
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
 {
 	struct acpi_gpe_event_info *gpe_event_info = context;
+
+	(void)acpi_ev_finish_gpe(gpe_event_info);
+
+	ACPI_FREE(gpe_event_info);
+	return;
+}
+
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_finish_gpe
+ *
+ * PARAMETERS:  gpe_event_info      - Info for this GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
+ *              of a GPE method or a synchronous or asynchronous GPE handler.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
 	acpi_status status;
+
 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
 	    ACPI_GPE_LEVEL_TRIGGERED) {
 		/*
-		 * GPE is level-triggered, we clear the GPE status bit after handling
-		 * the event.
+		 * GPE is level-triggered, we clear the GPE status bit after
+		 * handling the event.
 		 */
 		status = acpi_hw_clear_gpe(gpe_event_info);
 		if (ACPI_FAILURE(status)) {
-			return_VOID;
+			return (status);
 		}
 	}
 
 	/*
-	 * Enable this GPE, conditionally. This means that the GPE will only be
-	 * physically enabled if the enable_for_run bit is set in the event_info
+	 * Enable this GPE, conditionally. This means that the GPE will
+	 * only be physically enabled if the enable_for_run bit is set
+	 * in the event_info.
 	 */
-	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE);
-
-	return_VOID;
+	(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
+	return (AE_OK);
 }
 
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_gpe_dispatch
  *
- * PARAMETERS:  gpe_event_info  - Info for this GPE
+ * PARAMETERS:  gpe_device      - Device node. NULL for GPE0/GPE1
+ *              gpe_event_info  - Info for this GPE
  *              gpe_number      - Number relative to the parent GPE block
  *
  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
@@ -551,13 +640,22 @@
  ******************************************************************************/
 
 u32
-acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
+		    struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
 {
 	acpi_status status;
+	u32 return_value;
 
 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
 
-	acpi_os_gpe_count(gpe_number);
+	/* Invoke global event handler if present */
+
+	acpi_gpe_count++;
+	if (acpi_gbl_global_event_handler) {
+		acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
+					      gpe_number,
+					      acpi_gbl_global_event_handler_context);
+	}
 
 	/*
 	 * If edge-triggered, clear the GPE status bit now. Note that
@@ -568,59 +666,55 @@
 		status = acpi_hw_clear_gpe(gpe_event_info);
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to clear GPE[0x%2X]",
-					gpe_number));
+					"Unable to clear GPE%02X", gpe_number));
 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
 		}
 	}
 
 	/*
-	 * Dispatch the GPE to either an installed handler, or the control method
-	 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
-	 * it and do not attempt to run the method. If there is neither a handler
-	 * nor a method, we disable this GPE to prevent further such pointless
-	 * events from firing.
+	 * Always disable the GPE so that it does not keep firing before
+	 * any asynchronous activity completes (either from the execution
+	 * of a GPE method or an asynchronous GPE handler.)
+	 *
+	 * If there is no handler or method to run, just disable the
+	 * GPE and leave it disabled permanently to prevent further such
+	 * pointless events from firing.
+	 */
+	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
+	if (ACPI_FAILURE(status)) {
+		ACPI_EXCEPTION((AE_INFO, status,
+				"Unable to disable GPE%02X", gpe_number));
+		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+	}
+
+	/*
+	 * Dispatch the GPE to either an installed handler or the control
+	 * method associated with this GPE (_Lxx or _Exx). If a handler
+	 * exists, we invoke it and do not attempt to run the method.
+	 * If there is neither a handler nor a method, leave the GPE
+	 * disabled.
 	 */
 	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
 	case ACPI_GPE_DISPATCH_HANDLER:
 
-		/*
-		 * Invoke the installed handler (at interrupt level)
-		 * Ignore return status for now.
-		 * TBD: leave GPE disabled on error?
-		 */
-		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
-								dispatch.
-								handler->
-								context);
+		/* Invoke the installed handler (at interrupt level) */
 
-		/* It is now safe to clear level-triggered events. */
+		return_value =
+		    gpe_event_info->dispatch.handler->address(gpe_device,
+							      gpe_number,
+							      gpe_event_info->
+							      dispatch.handler->
+							      context);
 
-		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
-		    ACPI_GPE_LEVEL_TRIGGERED) {
-			status = acpi_hw_clear_gpe(gpe_event_info);
-			if (ACPI_FAILURE(status)) {
-				ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to clear GPE[0x%2X]",
-						gpe_number));
-				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-			}
+		/* If requested, clear (if level-triggered) and reenable the GPE */
+
+		if (return_value & ACPI_REENABLE_GPE) {
+			(void)acpi_ev_finish_gpe(gpe_event_info);
 		}
 		break;
 
 	case ACPI_GPE_DISPATCH_METHOD:
-
-		/*
-		 * Disable the GPE, so it doesn't keep firing before the method has a
-		 * chance to run (it runs asynchronously with interrupts enabled).
-		 */
-		status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to disable GPE[0x%2X]",
-					gpe_number));
-			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-		}
+	case ACPI_GPE_DISPATCH_NOTIFY:
 
 		/*
 		 * Execute the method associated with the GPE
@@ -631,7 +725,7 @@
 					 gpe_event_info);
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to queue handler for GPE[0x%2X] - event disabled",
+					"Unable to queue handler for GPE%2X - event disabled",
 					gpe_number));
 		}
 		break;
@@ -644,20 +738,9 @@
 		 * a GPE to be enabled if it has no handler or method.
 		 */
 		ACPI_ERROR((AE_INFO,
-			    "No handler or method for GPE[0x%2X], disabling event",
+			    "No handler or method for GPE%02X, disabling event",
 			    gpe_number));
 
-		/*
-		 * Disable the GPE. The GPE will remain disabled a handler
-		 * is installed or ACPICA is restarted.
-		 */
-		status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
-		if (ACPI_FAILURE(status)) {
-			ACPI_EXCEPTION((AE_INFO, status,
-					"Unable to disable GPE[0x%2X]",
-					gpe_number));
-			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
-		}
 		break;
 	}
 
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 020add3..9acb869 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -361,9 +361,9 @@
 
 	gpe_block->node = gpe_device;
 	gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
+	gpe_block->initialized = FALSE;
 	gpe_block->register_count = register_count;
 	gpe_block->block_base_number = gpe_block_base_number;
-	gpe_block->initialized = FALSE;
 
 	ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
 		    sizeof(struct acpi_generic_address));
@@ -386,7 +386,7 @@
 		return_ACPI_STATUS(status);
 	}
 
-	acpi_all_gpes_initialized = FALSE;
+	acpi_gbl_all_gpes_initialized = FALSE;
 
 	/* Find all GPE methods (_Lxx or_Exx) for this block */
 
@@ -423,14 +423,12 @@
  *
  * FUNCTION:    acpi_ev_initialize_gpe_block
  *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE block
- *              gpe_block           - Gpe Block info
+ * PARAMETERS:  acpi_gpe_callback
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Initialize and enable a GPE block. First find and run any
- *              _PRT methods associated with the block, then enable the
- *              appropriate GPEs.
+ * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
+ *              associated methods.
  *              Note: Assumes namespace is locked.
  *
  ******************************************************************************/
@@ -450,8 +448,8 @@
 	ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
 
 	/*
-	 * Ignore a null GPE block (e.g., if no GPE block 1 exists) and
-	 * GPE blocks that have been initialized already.
+	 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
+	 * any GPE blocks that have been initialized already.
 	 */
 	if (!gpe_block || gpe_block->initialized) {
 		return_ACPI_STATUS(AE_OK);
@@ -459,8 +457,8 @@
 
 	/*
 	 * Enable all GPEs that have a corresponding method and have the
-	 * ACPI_GPE_CAN_WAKE flag unset.  Any other GPEs within this block must
-	 * be enabled via the acpi_enable_gpe() interface.
+	 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
+	 * must be enabled via the acpi_enable_gpe() interface.
 	 */
 	gpe_enabled_count = 0;
 
@@ -472,14 +470,19 @@
 			gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
 			gpe_event_info = &gpe_block->event_info[gpe_index];
 
-			/* Ignore GPEs that have no corresponding _Lxx/_Exx method */
-
-			if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)
+			/*
+			 * Ignore GPEs that have no corresponding _Lxx/_Exx method
+			 * and GPEs that are used to wake the system
+			 */
+			if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+			     ACPI_GPE_DISPATCH_NONE)
+			    || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+				== ACPI_GPE_DISPATCH_HANDLER)
 			    || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
 				continue;
 			}
 
-			status = acpi_raw_enable_gpe(gpe_event_info);
+			status = acpi_ev_add_gpe_reference(gpe_event_info);
 			if (ACPI_FAILURE(status)) {
 				ACPI_EXCEPTION((AE_INFO, status,
 					"Could not enable GPE 0x%02X",
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 4c8dea5..c59dc23 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -45,11 +45,27 @@
 #include "accommon.h"
 #include "acevents.h"
 #include "acnamesp.h"
-#include "acinterp.h"
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evgpeinit")
 
+/*
+ * Note: History of _PRW support in ACPICA
+ *
+ * Originally (2000 - 2010), the GPE initialization code performed a walk of
+ * the entire namespace to execute the _PRW methods and detect all GPEs
+ * capable of waking the system.
+ *
+ * As of 10/2010, the _PRW method execution has been removed since it is
+ * actually unnecessary. The host OS must in fact execute all _PRW methods
+ * in order to identify the device/power-resource dependencies. We now put
+ * the onus on the host OS to identify the wake GPEs as part of this process
+ * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
+ * not only reduces the complexity of the ACPICA initialization code, but in
+ * some cases (on systems with very large namespaces) it should reduce the
+ * kernel boot time as well.
+ */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_gpe_initialize
@@ -222,7 +238,7 @@
 	acpi_status status = AE_OK;
 
 	/*
-	 * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
+	 * Find any _Lxx/_Exx GPE methods that have just been loaded.
 	 *
 	 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately
 	 * enabled.
@@ -235,9 +251,9 @@
 		return;
 	}
 
+	walk_info.count = 0;
 	walk_info.owner_id = table_owner_id;
 	walk_info.execute_by_owner_id = TRUE;
-	walk_info.count = 0;
 
 	/* Walk the interrupt level descriptor list */
 
@@ -298,7 +314,7 @@
  *                  xx     - is the GPE number [in HEX]
  *
  * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
- *    with that owner.
+ * with that owner.
  *
  ******************************************************************************/
 
@@ -415,6 +431,7 @@
 	 * Add the GPE information from above to the gpe_event_info block for
 	 * use during dispatch of this GPE.
 	 */
+	gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
 	gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
 	gpe_event_info->dispatch.method_node = method_node;
 
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 19a0e51..10e4774 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -154,6 +154,45 @@
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_ev_get_gpe_device
+ *
+ * PARAMETERS:  GPE_WALK_CALLBACK
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
+ *              block device. NULL if the GPE is one of the FADT-defined GPEs.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+		       struct acpi_gpe_block_info *gpe_block, void *context)
+{
+	struct acpi_gpe_device_info *info = context;
+
+	/* Increment Index by the number of GPEs in this block */
+
+	info->next_block_base_index += gpe_block->gpe_count;
+
+	if (info->index < info->next_block_base_index) {
+		/*
+		 * The GPE index is within this block, get the node. Leave the node
+		 * NULL for the FADT-defined GPEs
+		 */
+		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
+			info->gpe_device = gpe_block->node;
+		}
+
+		info->status = AE_OK;
+		return (AE_CTRL_END);
+	}
+
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
  *
  * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index fcaed9f..38bba66 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -284,41 +284,39 @@
  * RETURN:      ACPI_INTERRUPT_HANDLED
  *
  * DESCRIPTION: Invoked directly from the SCI handler when a global lock
- *              release interrupt occurs. Attempt to acquire the global lock,
- *              if successful, signal the thread waiting for the lock.
+ *              release interrupt occurs.  If there's a thread waiting for
+ *              the global lock, signal it.
  *
  * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
  * this is not possible for some reason, a separate thread will have to be
  * scheduled to do this.
  *
  ******************************************************************************/
+static u8 acpi_ev_global_lock_pending;
 
 static u32 acpi_ev_global_lock_handler(void *context)
 {
-	u8 acquired = FALSE;
+	acpi_status status;
+	acpi_cpu_flags flags;
 
-	/*
-	 * Attempt to get the lock.
-	 *
-	 * If we don't get it now, it will be marked pending and we will
-	 * take another interrupt when it becomes free.
-	 */
-	ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
-	if (acquired) {
+	flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-		/* Got the lock, now wake all threads waiting for it */
-
-		acpi_gbl_global_lock_acquired = TRUE;
-		/* Send a unit to the semaphore */
-
-		if (ACPI_FAILURE
-		    (acpi_os_signal_semaphore
-		     (acpi_gbl_global_lock_semaphore, 1))) {
-			ACPI_ERROR((AE_INFO,
-				    "Could not signal Global Lock semaphore"));
-		}
+	if (!acpi_ev_global_lock_pending) {
+		goto out;
 	}
 
+	/* Send a unit to the semaphore */
+
+	status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
+	if (ACPI_FAILURE(status)) {
+		ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
+	}
+
+	acpi_ev_global_lock_pending = FALSE;
+
+ out:
+	acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
+
 	return (ACPI_INTERRUPT_HANDLED);
 }
 
@@ -415,6 +413,7 @@
 
 acpi_status acpi_ev_acquire_global_lock(u16 timeout)
 {
+	acpi_cpu_flags flags;
 	acpi_status status = AE_OK;
 	u8 acquired = FALSE;
 
@@ -467,32 +466,47 @@
 		return_ACPI_STATUS(AE_OK);
 	}
 
-	/* Attempt to acquire the actual hardware lock */
+	flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-	ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
-	if (acquired) {
+	do {
 
-		/* We got the lock */
+		/* Attempt to acquire the actual hardware lock */
 
+		ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
+		if (acquired) {
+			acpi_gbl_global_lock_acquired = TRUE;
+
+			ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+					  "Acquired hardware Global Lock\n"));
+			break;
+		}
+
+		acpi_ev_global_lock_pending = TRUE;
+
+		acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
+
+		/*
+		 * Did not get the lock. The pending bit was set above, and we
+		 * must wait until we get the global lock released interrupt.
+		 */
 		ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-				  "Acquired hardware Global Lock\n"));
+				  "Waiting for hardware Global Lock\n"));
 
-		acpi_gbl_global_lock_acquired = TRUE;
-		return_ACPI_STATUS(AE_OK);
-	}
+		/*
+		 * Wait for handshake with the global lock interrupt handler.
+		 * This interface releases the interpreter if we must wait.
+		 */
+		status = acpi_ex_system_wait_semaphore(
+						acpi_gbl_global_lock_semaphore,
+						ACPI_WAIT_FOREVER);
 
-	/*
-	 * Did not get the lock. The pending bit was set above, and we must now
-	 * wait until we get the global lock released interrupt.
-	 */
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
+		flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
 
-	/*
-	 * Wait for handshake with the global lock interrupt handler.
-	 * This interface releases the interpreter if we must wait.
-	 */
-	status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
-					       ACPI_WAIT_FOREVER);
+	} while (ACPI_SUCCESS(status));
+
+	acpi_ev_global_lock_pending = FALSE;
+
+	acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
 
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 36af222..1226689 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -92,6 +92,57 @@
 
 ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
 #endif				/*  ACPI_FUTURE_USAGE  */
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_global_event_handler
+ *
+ * PARAMETERS:  Handler         - Pointer to the global event handler function
+ *              Context         - Value passed to the handler on each event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function. The global handler
+ *              is invoked upon each incoming GPE and Fixed Event. It is
+ *              invoked at interrupt level at the time of the event dispatch.
+ *              Can be used to update event counters, etc.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
+
+	/* Parameter validation */
+
+	if (!handler) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Don't allow two handlers. */
+
+	if (acpi_gbl_global_event_handler) {
+		status = AE_ALREADY_EXISTS;
+		goto cleanup;
+	}
+
+	acpi_gbl_global_event_handler = handler;
+	acpi_gbl_global_event_handler_context = context;
+
+      cleanup:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_fixed_event_handler
@@ -671,10 +722,10 @@
 acpi_status
 acpi_install_gpe_handler(acpi_handle gpe_device,
 			 u32 gpe_number,
-			 u32 type, acpi_event_handler address, void *context)
+			 u32 type, acpi_gpe_handler address, void *context)
 {
 	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_handler_info *handler;
+	struct acpi_gpe_handler_info *handler;
 	acpi_status status;
 	acpi_cpu_flags flags;
 
@@ -693,7 +744,7 @@
 
 	/* Allocate memory for the handler object */
 
-	handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+	handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
 	if (!handler) {
 		status = AE_NO_MEMORY;
 		goto unlock_and_exit;
@@ -722,7 +773,7 @@
 	handler->address = address;
 	handler->context = context;
 	handler->method_node = gpe_event_info->dispatch.method_node;
-	handler->orig_flags = gpe_event_info->flags &
+	handler->original_flags = gpe_event_info->flags &
 			(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
 
 	/*
@@ -731,10 +782,10 @@
 	 * disabled now to avoid spurious execution of the handler.
 	 */
 
-	if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
+	if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
 	    && gpe_event_info->runtime_count) {
-		handler->orig_enabled = 1;
-		(void)acpi_raw_disable_gpe(gpe_event_info);
+		handler->originally_enabled = 1;
+		(void)acpi_ev_remove_gpe_reference(gpe_event_info);
 	}
 
 	/* Install the handler */
@@ -777,10 +828,10 @@
  ******************************************************************************/
 acpi_status
 acpi_remove_gpe_handler(acpi_handle gpe_device,
-			u32 gpe_number, acpi_event_handler address)
+			u32 gpe_number, acpi_gpe_handler address)
 {
 	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_handler_info *handler;
+	struct acpi_gpe_handler_info *handler;
 	acpi_status status;
 	acpi_cpu_flags flags;
 
@@ -835,7 +886,7 @@
 	gpe_event_info->dispatch.method_node = handler->method_node;
 	gpe_event_info->flags &=
 		~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
-	gpe_event_info->flags |= handler->orig_flags;
+	gpe_event_info->flags |= handler->original_flags;
 
 	/*
 	 * If the GPE was previously associated with a method and it was
@@ -843,9 +894,9 @@
 	 * post-initialization configuration.
 	 */
 
-	if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD)
-	    && handler->orig_enabled)
-		(void)acpi_raw_enable_gpe(gpe_event_info);
+	if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
+	    && handler->originally_enabled)
+		(void)acpi_ev_add_gpe_reference(gpe_event_info);
 
 	/* Now we can free the handler object */
 
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index a1dabe3..90488c1 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -43,18 +43,11 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "acevents.h"
-#include "acnamesp.h"
 #include "actables.h"
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evxfevnt")
 
-/* Local prototypes */
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-		       struct acpi_gpe_block_info *gpe_block, void *context);
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_enable
@@ -213,185 +206,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_gpe_wakeup
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *              Action          - Enable or Disable
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit.
- *
- ******************************************************************************/
-acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	struct acpi_gpe_register_info *gpe_register_info;
-	acpi_cpu_flags flags;
-	u32 register_bit;
-
-	ACPI_FUNCTION_TRACE(acpi_gpe_wakeup);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	gpe_register_info = gpe_event_info->register_info;
-	if (!gpe_register_info) {
-		status = AE_NOT_EXIST;
-		goto unlock_and_exit;
-	}
-
-	register_bit =
-	    acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
-
-	/* Perform the action */
-
-	switch (action) {
-	case ACPI_GPE_ENABLE:
-		ACPI_SET_BIT(gpe_register_info->enable_for_wake,
-			     (u8)register_bit);
-		break;
-
-	case ACPI_GPE_DISABLE:
-		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
-			       (u8)register_bit);
-		break;
-
-	default:
-		ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
-		status = AE_BAD_PARAMETER;
-		break;
-	}
-
-unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_enable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
- *              hardware-enabled.
- *
- ******************************************************************************/
-acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_BAD_PARAMETER;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		status = acpi_raw_enable_gpe(gpe_event_info);
-	}
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_disable_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a reference to a GPE. When the last reference is
- *              removed, only then is the GPE disabled (for runtime GPEs), or
- *              the GPE mask bit disabled (for wake GPEs)
- *
- ******************************************************************************/
-acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_BAD_PARAMETER;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		status = acpi_raw_disable_gpe(gpe_event_info) ;
-	}
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_gpe_can_wake
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE.  If the GPE
- *              has a corresponding method and is currently enabled, disable it
- *              (GPEs with corresponding methods are enabled unconditionally
- *              during initialization, but GPEs that can wake up are expected
- *              to be initially disabled).
- *
- ******************************************************************************/
-acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_gpe_can_wake);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
-	} else {
-		status = AE_BAD_PARAMETER;
-	}
-
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake)
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_disable_event
  *
  * PARAMETERS:  Event           - The fixed eventto be enabled
@@ -483,44 +297,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_clear_gpe
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear an ACPI event (general purpose)
- *
- ******************************************************************************/
-acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_clear_gpe);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	status = acpi_hw_clear_gpe(gpe_event_info);
-
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
-/*******************************************************************************
- *
  * FUNCTION:    acpi_get_event_status
  *
  * PARAMETERS:  Event           - The fixed event
@@ -575,379 +351,3 @@
 }
 
 ACPI_EXPORT_SYMBOL(acpi_get_event_status)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_gpe_status
- *
- * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
- *              gpe_number      - GPE level within the GPE block
- *              event_status    - Where the current status of the event will
- *                                be returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Get status of an event (general purpose)
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_status(acpi_handle gpe_device,
-		    u32 gpe_number, acpi_event_status *event_status)
-{
-	acpi_status status = AE_OK;
-	struct acpi_gpe_event_info *gpe_event_info;
-	acpi_cpu_flags flags;
-
-	ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
-
-	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
-
-	/* Ensure that we have a valid GPE number */
-
-	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (!gpe_event_info) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Obtain status on the requested GPE number */
-
-	status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
-
-	if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
-		*event_status |= ACPI_EVENT_FLAG_HANDLE;
-
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_install_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *              gpe_block_address   - Address and space_iD
- *              register_count      - Number of GPE register pairs in the block
- *              interrupt_number    - H/W interrupt for the block
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create and Install a block of GPE registers
- *
- ******************************************************************************/
-acpi_status
-acpi_install_gpe_block(acpi_handle gpe_device,
-		       struct acpi_generic_address *gpe_block_address,
-		       u32 register_count, u32 interrupt_number)
-{
-	acpi_status status = AE_OK;
-	union acpi_operand_object *obj_desc;
-	struct acpi_namespace_node *node;
-	struct acpi_gpe_block_info *gpe_block;
-
-	ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
-
-	if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_validate_handle(gpe_device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/*
-	 * For user-installed GPE Block Devices, the gpe_block_base_number
-	 * is always zero
-	 */
-	status =
-	    acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
-				     interrupt_number, &gpe_block);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Install block in the device_object attached to the node */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc) {
-
-		/*
-		 * No object, create a new one (Device nodes do not always have
-		 * an attached object)
-		 */
-		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
-		if (!obj_desc) {
-			status = AE_NO_MEMORY;
-			goto unlock_and_exit;
-		}
-
-		status =
-		    acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
-
-		/* Remove local reference to the object */
-
-		acpi_ut_remove_reference(obj_desc);
-
-		if (ACPI_FAILURE(status)) {
-			goto unlock_and_exit;
-		}
-	}
-
-	/* Now install the GPE block in the device_object */
-
-	obj_desc->device.gpe_block = gpe_block;
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_remove_gpe_block
- *
- * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Remove a previously installed block of GPE registers
- *
- ******************************************************************************/
-acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
-{
-	union acpi_operand_object *obj_desc;
-	acpi_status status;
-	struct acpi_namespace_node *node;
-
-	ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
-
-	if (!gpe_device) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_validate_handle(gpe_device);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Get the device_object attached to the node */
-
-	obj_desc = acpi_ns_get_attached_object(node);
-	if (!obj_desc || !obj_desc->device.gpe_block) {
-		return_ACPI_STATUS(AE_NULL_OBJECT);
-	}
-
-	/* Delete the GPE block (but not the device_object) */
-
-	status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
-	if (ACPI_SUCCESS(status)) {
-		obj_desc->device.gpe_block = NULL;
-	}
-
-      unlock_and_exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_gpe_device
- *
- * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
- *              gpe_device          - Where the parent GPE Device is returned
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
- *              gpe device indicates that the gpe number is contained in one of
- *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
- *
- ******************************************************************************/
-acpi_status
-acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
-{
-	struct acpi_gpe_device_info info;
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
-
-	if (!gpe_device) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (index >= acpi_current_gpe_count) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
-	}
-
-	/* Setup and walk the GPE list */
-
-	info.index = index;
-	info.status = AE_NOT_EXIST;
-	info.gpe_device = NULL;
-	info.next_block_base_index = 0;
-
-	status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	*gpe_device = info.gpe_device;
-	return_ACPI_STATUS(info.status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_get_gpe_device
- *
- * PARAMETERS:  GPE_WALK_CALLBACK
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
- *              block device. NULL if the GPE is one of the FADT-defined GPEs.
- *
- ******************************************************************************/
-static acpi_status
-acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
-		       struct acpi_gpe_block_info *gpe_block, void *context)
-{
-	struct acpi_gpe_device_info *info = context;
-
-	/* Increment Index by the number of GPEs in this block */
-
-	info->next_block_base_index += gpe_block->gpe_count;
-
-	if (info->index < info->next_block_base_index) {
-		/*
-		 * The GPE index is within this block, get the node. Leave the node
-		 * NULL for the FADT-defined GPEs
-		 */
-		if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
-			info->gpe_device = gpe_block->node;
-		}
-
-		info->status = AE_OK;
-		return (AE_CTRL_END);
-	}
-
-	return (AE_OK);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_disable_all_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_disable_all_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_disable_all_gpes();
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_enable_all_runtime_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
- *
- ******************************************************************************/
-
-acpi_status acpi_enable_all_runtime_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	}
-
-	status = acpi_hw_enable_all_runtime_gpes();
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-	return_ACPI_STATUS(status);
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_update_gpes
- *
- * PARAMETERS:  None
- *
- * RETURN:      None
- *
- * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and
- *              are not pointed to by any device _PRW methods indicating that
- *              these GPEs are generally intended for system or device wakeup
- *              (such GPEs have to be enabled directly when the devices whose
- *              _PRW methods point to them are set up for wakeup signaling).
- *
- ******************************************************************************/
-
-acpi_status acpi_update_gpes(void)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_TRACE(acpi_update_gpes);
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
-	} else if (acpi_all_gpes_initialized) {
-		goto unlock;
-	}
-
-	status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
-	if (ACPI_SUCCESS(status)) {
-		acpi_all_gpes_initialized = TRUE;
-	}
-
-unlock:
-	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-
-	return_ACPI_STATUS(status);
-}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
new file mode 100644
index 0000000..416845b
--- /dev/null
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -0,0 +1,669 @@
+/******************************************************************************
+ *
+ * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2010, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acevents.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_EVENTS
+ACPI_MODULE_NAME("evxfgpe")
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_update_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Complete GPE initialization and enable all GPEs that have
+ *              associated _Lxx or _Exx methods and are not pointed to by any
+ *              device _PRW methods (this indicates that these GPEs are
+ *              generally intended for system or device wakeup. Such GPEs
+ *              have to be enabled directly when the devices whose _PRW
+ *              methods point to them are set up for wakeup signaling.)
+ *
+ * NOTE: Should be called after any GPEs are added to the system. Primarily,
+ * after the system _PRW methods have been run, but also after a GPE Block
+ * Device has been added or if any new GPE methods have been added via a
+ * dynamic table load.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_update_all_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_update_all_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	if (acpi_gbl_all_gpes_initialized) {
+		goto unlock_and_exit;
+	}
+
+	status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL);
+	if (ACPI_SUCCESS(status)) {
+		acpi_gbl_all_gpes_initialized = TRUE;
+	}
+
+unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
+ *              hardware-enabled.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (gpe_event_info) {
+		status = acpi_ev_add_gpe_reference(gpe_event_info);
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a reference to a GPE. When the last reference is
+ *              removed, only then is the GPE disabled (for runtime GPEs), or
+ *              the GPE mask bit disabled (for wake GPEs)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (gpe_event_info) {
+		status = acpi_ev_remove_gpe_reference(gpe_event_info) ;
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_setup_gpe_for_wake
+ *
+ * PARAMETERS:  wake_device         - Device associated with the GPE (via _PRW)
+ *              gpe_device          - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number          - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Mark a GPE as having the ability to wake the system. This
+ *              interface is intended to be used as the host executes the
+ *              _PRW methods (Power Resources for Wake) in the system tables.
+ *              Each _PRW appears under a Device Object (The wake_device), and
+ *              contains the info for the wake GPE associated with the
+ *              wake_device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_setup_gpe_for_wake(acpi_handle wake_device,
+			acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_BAD_PARAMETER;
+	struct acpi_gpe_event_info *gpe_event_info;
+	struct acpi_namespace_node *device_node;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
+
+	/* Parameter Validation */
+
+	if (!wake_device) {
+		/*
+		 * By forcing wake_device to be valid, we automatically enable the
+		 * implicit notify feature on all hosts.
+		 */
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Validate wake_device is of type Device */
+
+	device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+	if (device_node->type != ACPI_TYPE_DEVICE) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (gpe_event_info) {
+		/*
+		 * If there is no method or handler for this GPE, then the
+		 * wake_device will be notified whenever this GPE fires (aka
+		 * "implicit notify") Note: The GPE is assumed to be
+		 * level-triggered (for windows compatibility).
+		 */
+		if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+		    ACPI_GPE_DISPATCH_NONE) {
+			gpe_event_info->flags =
+			    (ACPI_GPE_DISPATCH_NOTIFY |
+			     ACPI_GPE_LEVEL_TRIGGERED);
+			gpe_event_info->dispatch.device_node = device_node;
+		}
+
+		gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+		status = AE_OK;
+	}
+
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_gpe_wake_mask
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *              Action          - Enable or Disable
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must
+ *              already be marked as a WAKE GPE.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+	struct acpi_gpe_register_info *gpe_register_info;
+	acpi_cpu_flags flags;
+	u32 register_bit;
+
+	ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/*
+	 * Ensure that we have a valid GPE number and that this GPE is in
+	 * fact a wake GPE
+	 */
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
+		status = AE_TYPE;
+		goto unlock_and_exit;
+	}
+
+	gpe_register_info = gpe_event_info->register_info;
+	if (!gpe_register_info) {
+		status = AE_NOT_EXIST;
+		goto unlock_and_exit;
+	}
+
+	register_bit =
+	    acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
+	/* Perform the action */
+
+	switch (action) {
+	case ACPI_GPE_ENABLE:
+		ACPI_SET_BIT(gpe_register_info->enable_for_wake,
+			     (u8)register_bit);
+		break;
+
+	case ACPI_GPE_DISABLE:
+		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+			       (u8)register_bit);
+		break;
+
+	default:
+		ACPI_ERROR((AE_INFO, "%u, Invalid action", action));
+		status = AE_BAD_PARAMETER;
+		break;
+	}
+
+unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_clear_gpe
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	status = acpi_hw_clear_gpe(gpe_event_info);
+
+      unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_status
+ *
+ * PARAMETERS:  gpe_device      - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number      - GPE level within the GPE block
+ *              event_status    - Where the current status of the event will
+ *                                be returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+		    u32 gpe_number, acpi_event_status *event_status)
+{
+	acpi_status status = AE_OK;
+	struct acpi_gpe_event_info *gpe_event_info;
+	acpi_cpu_flags flags;
+
+	ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+
+	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+	/* Ensure that we have a valid GPE number */
+
+	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+	if (!gpe_event_info) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Obtain status on the requested GPE number */
+
+	status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
+
+	if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+		*event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+      unlock_and_exit:
+	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_disable_all_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Disable and clear all GPEs in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_disable_all_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_disable_all_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_disable_all_gpes();
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes)
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_enable_all_runtime_gpes
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks
+ *
+ ******************************************************************************/
+
+acpi_status acpi_enable_all_runtime_gpes(void)
+{
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes);
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	status = acpi_hw_enable_all_runtime_gpes();
+	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *              gpe_block_address   - Address and space_iD
+ *              register_count      - Number of GPE register pairs in the block
+ *              interrupt_number    - H/W interrupt for the block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not
+ *              enabled here.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+		       struct acpi_generic_address *gpe_block_address,
+		       u32 register_count, u32 interrupt_number)
+{
+	acpi_status status;
+	union acpi_operand_object *obj_desc;
+	struct acpi_namespace_node *node;
+	struct acpi_gpe_block_info *gpe_block;
+
+	ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
+
+	if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_validate_handle(gpe_device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * For user-installed GPE Block Devices, the gpe_block_base_number
+	 * is always zero
+	 */
+	status =
+	    acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
+				     interrupt_number, &gpe_block);
+	if (ACPI_FAILURE(status)) {
+		goto unlock_and_exit;
+	}
+
+	/* Install block in the device_object attached to the node */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc) {
+
+		/*
+		 * No object, create a new one (Device nodes do not always have
+		 * an attached object)
+		 */
+		obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
+		if (!obj_desc) {
+			status = AE_NO_MEMORY;
+			goto unlock_and_exit;
+		}
+
+		status =
+		    acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
+
+		/* Remove local reference to the object */
+
+		acpi_ut_remove_reference(obj_desc);
+
+		if (ACPI_FAILURE(status)) {
+			goto unlock_and_exit;
+		}
+	}
+
+	/* Now install the GPE block in the device_object */
+
+	obj_desc->device.gpe_block = gpe_block;
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_gpe_block
+ *
+ * PARAMETERS:  gpe_device          - Handle to the parent GPE Block Device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a previously installed block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
+{
+	union acpi_operand_object *obj_desc;
+	acpi_status status;
+	struct acpi_namespace_node *node;
+
+	ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
+
+	if (!gpe_device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
+	node = acpi_ns_validate_handle(gpe_device);
+	if (!node) {
+		status = AE_BAD_PARAMETER;
+		goto unlock_and_exit;
+	}
+
+	/* Get the device_object attached to the node */
+
+	obj_desc = acpi_ns_get_attached_object(node);
+	if (!obj_desc || !obj_desc->device.gpe_block) {
+		return_ACPI_STATUS(AE_NULL_OBJECT);
+	}
+
+	/* Delete the GPE block (but not the device_object) */
+
+	status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
+	if (ACPI_SUCCESS(status)) {
+		obj_desc->device.gpe_block = NULL;
+	}
+
+      unlock_and_exit:
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_gpe_device
+ *
+ * PARAMETERS:  Index               - System GPE index (0-current_gpe_count)
+ *              gpe_device          - Where the parent GPE Device is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL
+ *              gpe device indicates that the gpe number is contained in one of
+ *              the FADT-defined gpe blocks. Otherwise, the GPE block device.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
+{
+	struct acpi_gpe_device_info info;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(acpi_get_gpe_device);
+
+	if (!gpe_device) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (index >= acpi_current_gpe_count) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Setup and walk the GPE list */
+
+	info.index = index;
+	info.status = AE_NOT_EXIST;
+	info.gpe_device = NULL;
+	info.next_block_base_index = 0;
+
+	status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	*gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device);
+	return_ACPI_STATUS(info.status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 14750db..85c3cbd 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -62,10 +62,10 @@
  * PARAMETERS:	gpe_event_info	    - Info block for the GPE
  *		gpe_register_info   - Info block for the GPE register
  *
- * RETURN:	Status
+ * RETURN:	Register mask with a one in the GPE bit position
  *
- * DESCRIPTION:	Compute GPE enable mask with one bit corresponding to the given
- *		GPE set.
+ * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the
+ *              correct position for the input GPE.
  *
  ******************************************************************************/
 
@@ -85,12 +85,12 @@
  *
  * RETURN:	Status
  *
- * DESCRIPTION: Enable or disable a single GPE in its enable register.
+ * DESCRIPTION: Enable or disable a single GPE in the parent enable register.
  *
  ******************************************************************************/
 
 acpi_status
-acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action)
+acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
 {
 	struct acpi_gpe_register_info *gpe_register_info;
 	acpi_status status;
@@ -113,14 +113,20 @@
 		return (status);
 	}
 
-	/* Set ot clear just the bit that corresponds to this GPE */
+	/* Set or clear just the bit that corresponds to this GPE */
 
 	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
 						gpe_register_info);
 	switch (action) {
-	case ACPI_GPE_COND_ENABLE:
-		if (!(register_bit & gpe_register_info->enable_for_run))
+	case ACPI_GPE_CONDITIONAL_ENABLE:
+
+		/* Only enable if the enable_for_run bit is set */
+
+		if (!(register_bit & gpe_register_info->enable_for_run)) {
 			return (AE_BAD_PARAMETER);
+		}
+
+		/*lint -fallthrough */
 
 	case ACPI_GPE_ENABLE:
 		ACPI_SET_BIT(enable_mask, register_bit);
@@ -131,7 +137,7 @@
 		break;
 
 	default:
-		ACPI_ERROR((AE_INFO, "Invalid action\n"));
+		ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action));
 		return (AE_BAD_PARAMETER);
 	}
 
@@ -168,13 +174,13 @@
 		return (AE_NOT_EXIST);
 	}
 
-	register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
-						gpe_register_info);
-
 	/*
 	 * Write a one to the appropriate bit in the status register to
 	 * clear this GPE.
 	 */
+	register_bit =
+	    acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info);
+
 	status = acpi_hw_write(register_bit,
 			       &gpe_register_info->status_address);
 
@@ -201,8 +207,8 @@
 	u32 in_byte;
 	u32 register_bit;
 	struct acpi_gpe_register_info *gpe_register_info;
-	acpi_status status;
 	acpi_event_status local_event_status = 0;
+	acpi_status status;
 
 	ACPI_FUNCTION_ENTRY();
 
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index e87bc67..508537f 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -768,7 +768,7 @@
 	acpi_gbl_gpe_fadt_blocks[0] = NULL;
 	acpi_gbl_gpe_fadt_blocks[1] = NULL;
 	acpi_current_gpe_count = 0;
-	acpi_all_gpes_initialized = FALSE;
+	acpi_gbl_all_gpes_initialized = FALSE;
 
 	/* Global handlers */
 
@@ -778,6 +778,7 @@
 	acpi_gbl_init_handler = NULL;
 	acpi_gbl_table_handler = NULL;
 	acpi_gbl_interface_handler = NULL;
+	acpi_gbl_global_event_handler = NULL;
 
 	/* Global Lock support */
 
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index d9efa49..199528f 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -85,6 +85,7 @@
 
 	spin_lock_init(acpi_gbl_gpe_lock);
 	spin_lock_init(acpi_gbl_hardware_lock);
+	spin_lock_init(acpi_ev_global_lock_pending_lock);
 
 	/* Mutex for _OSI support */
 	status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 18df1e9..ef0581f 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -109,6 +109,8 @@
 		return sizeof(*estatus) + estatus->data_length;
 }
 
+void apei_estatus_print(const char *pfx,
+			const struct acpi_hest_generic_status *estatus);
 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
 int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
 #endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index f4cf2fc..31464a0 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -46,6 +46,317 @@
 }
 EXPORT_SYMBOL_GPL(cper_next_record_id);
 
+static const char *cper_severity_strs[] = {
+	"recoverable",
+	"fatal",
+	"corrected",
+	"info",
+};
+
+static const char *cper_severity_str(unsigned int severity)
+{
+	return severity < ARRAY_SIZE(cper_severity_strs) ?
+		cper_severity_strs[severity] : "unknown";
+}
+
+/*
+ * cper_print_bits - print strings for set bits
+ * @pfx: prefix for each line, including log level and prefix string
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * For each set bit in @bits, print the corresponding string in @strs.
+ * If the output length is longer than 80, multiple line will be
+ * printed, with @pfx is printed at the beginning of each line.
+ */
+static void cper_print_bits(const char *pfx, unsigned int bits,
+			    const char *strs[], unsigned int strs_size)
+{
+	int i, len = 0;
+	const char *str;
+	char buf[84];
+
+	for (i = 0; i < strs_size; i++) {
+		if (!(bits & (1U << i)))
+			continue;
+		str = strs[i];
+		if (len && len + strlen(str) + 2 > 80) {
+			printk("%s\n", buf);
+			len = 0;
+		}
+		if (!len)
+			len = snprintf(buf, sizeof(buf), "%s%s", pfx, str);
+		else
+			len += snprintf(buf+len, sizeof(buf)-len, ", %s", str);
+	}
+	if (len)
+		printk("%s\n", buf);
+}
+
+static const char *cper_proc_type_strs[] = {
+	"IA32/X64",
+	"IA64",
+};
+
+static const char *cper_proc_isa_strs[] = {
+	"IA32",
+	"IA64",
+	"X64",
+};
+
+static const char *cper_proc_error_type_strs[] = {
+	"cache error",
+	"TLB error",
+	"bus error",
+	"micro-architectural error",
+};
+
+static const char *cper_proc_op_strs[] = {
+	"unknown or generic",
+	"data read",
+	"data write",
+	"instruction execution",
+};
+
+static const char *cper_proc_flag_strs[] = {
+	"restartable",
+	"precise IP",
+	"overflow",
+	"corrected",
+};
+
+static void cper_print_proc_generic(const char *pfx,
+				    const struct cper_sec_proc_generic *proc)
+{
+	if (proc->validation_bits & CPER_PROC_VALID_TYPE)
+		printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type,
+		       proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ?
+		       cper_proc_type_strs[proc->proc_type] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_ISA)
+		printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa,
+		       proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ?
+		       cper_proc_isa_strs[proc->proc_isa] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
+		printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
+		cper_print_bits(pfx, proc->proc_error_type,
+				cper_proc_error_type_strs,
+				ARRAY_SIZE(cper_proc_error_type_strs));
+	}
+	if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
+		printk("%s""operation: %d, %s\n", pfx, proc->operation,
+		       proc->operation < ARRAY_SIZE(cper_proc_op_strs) ?
+		       cper_proc_op_strs[proc->operation] : "unknown");
+	if (proc->validation_bits & CPER_PROC_VALID_FLAGS) {
+		printk("%s""flags: 0x%02x\n", pfx, proc->flags);
+		cper_print_bits(pfx, proc->flags, cper_proc_flag_strs,
+				ARRAY_SIZE(cper_proc_flag_strs));
+	}
+	if (proc->validation_bits & CPER_PROC_VALID_LEVEL)
+		printk("%s""level: %d\n", pfx, proc->level);
+	if (proc->validation_bits & CPER_PROC_VALID_VERSION)
+		printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version);
+	if (proc->validation_bits & CPER_PROC_VALID_ID)
+		printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id);
+	if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS)
+		printk("%s""target_address: 0x%016llx\n",
+		       pfx, proc->target_addr);
+	if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID)
+		printk("%s""requestor_id: 0x%016llx\n",
+		       pfx, proc->requestor_id);
+	if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID)
+		printk("%s""responder_id: 0x%016llx\n",
+		       pfx, proc->responder_id);
+	if (proc->validation_bits & CPER_PROC_VALID_IP)
+		printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
+}
+
+static const char *cper_mem_err_type_strs[] = {
+	"unknown",
+	"no error",
+	"single-bit ECC",
+	"multi-bit ECC",
+	"single-symbol chipkill ECC",
+	"multi-symbol chipkill ECC",
+	"master abort",
+	"target abort",
+	"parity error",
+	"watchdog timeout",
+	"invalid address",
+	"mirror Broken",
+	"memory sparing",
+	"scrub corrected error",
+	"scrub uncorrected error",
+};
+
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+{
+	if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+		printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+	if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)
+		printk("%s""physical_address: 0x%016llx\n",
+		       pfx, mem->physical_addr);
+	if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK)
+		printk("%s""physical_address_mask: 0x%016llx\n",
+		       pfx, mem->physical_addr_mask);
+	if (mem->validation_bits & CPER_MEM_VALID_NODE)
+		printk("%s""node: %d\n", pfx, mem->node);
+	if (mem->validation_bits & CPER_MEM_VALID_CARD)
+		printk("%s""card: %d\n", pfx, mem->card);
+	if (mem->validation_bits & CPER_MEM_VALID_MODULE)
+		printk("%s""module: %d\n", pfx, mem->module);
+	if (mem->validation_bits & CPER_MEM_VALID_BANK)
+		printk("%s""bank: %d\n", pfx, mem->bank);
+	if (mem->validation_bits & CPER_MEM_VALID_DEVICE)
+		printk("%s""device: %d\n", pfx, mem->device);
+	if (mem->validation_bits & CPER_MEM_VALID_ROW)
+		printk("%s""row: %d\n", pfx, mem->row);
+	if (mem->validation_bits & CPER_MEM_VALID_COLUMN)
+		printk("%s""column: %d\n", pfx, mem->column);
+	if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION)
+		printk("%s""bit_position: %d\n", pfx, mem->bit_pos);
+	if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID)
+		printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id);
+	if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID)
+		printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id);
+	if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID)
+		printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id);
+	if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) {
+		u8 etype = mem->error_type;
+		printk("%s""error_type: %d, %s\n", pfx, etype,
+		       etype < ARRAY_SIZE(cper_mem_err_type_strs) ?
+		       cper_mem_err_type_strs[etype] : "unknown");
+	}
+}
+
+static const char *cper_pcie_port_type_strs[] = {
+	"PCIe end point",
+	"legacy PCI end point",
+	"unknown",
+	"unknown",
+	"root port",
+	"upstream switch port",
+	"downstream switch port",
+	"PCIe to PCI/PCI-X bridge",
+	"PCI/PCI-X to PCIe bridge",
+	"root complex integrated endpoint device",
+	"root complex event collector",
+};
+
+static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie)
+{
+	if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
+		printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
+		       pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
+		       cper_pcie_port_type_strs[pcie->port_type] : "unknown");
+	if (pcie->validation_bits & CPER_PCIE_VALID_VERSION)
+		printk("%s""version: %d.%d\n", pfx,
+		       pcie->version.major, pcie->version.minor);
+	if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS)
+		printk("%s""command: 0x%04x, status: 0x%04x\n", pfx,
+		       pcie->command, pcie->status);
+	if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) {
+		const __u8 *p;
+		printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx,
+		       pcie->device_id.segment, pcie->device_id.bus,
+		       pcie->device_id.device, pcie->device_id.function);
+		printk("%s""slot: %d\n", pfx,
+		       pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT);
+		printk("%s""secondary_bus: 0x%02x\n", pfx,
+		       pcie->device_id.secondary_bus);
+		printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
+		       pcie->device_id.vendor_id, pcie->device_id.device_id);
+		p = pcie->device_id.class_code;
+		printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+	}
+	if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
+		printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
+		       pcie->serial_number.lower, pcie->serial_number.upper);
+	if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS)
+		printk(
+	"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+	pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+}
+
+static const char *apei_estatus_section_flag_strs[] = {
+	"primary",
+	"containment warning",
+	"reset",
+	"threshold exceeded",
+	"resource not accessible",
+	"latent error",
+};
+
+static void apei_estatus_print_section(
+	const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
+{
+	uuid_le *sec_type = (uuid_le *)gdata->section_type;
+	__u16 severity;
+
+	severity = gdata->error_severity;
+	printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity,
+	       cper_severity_str(severity));
+	printk("%s""flags: 0x%02x\n", pfx, gdata->flags);
+	cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs,
+			ARRAY_SIZE(apei_estatus_section_flag_strs));
+	if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+		printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id);
+	if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+		printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
+
+	if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) {
+		struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1);
+		printk("%s""section_type: general processor error\n", pfx);
+		if (gdata->error_data_length >= sizeof(*proc_err))
+			cper_print_proc_generic(pfx, proc_err);
+		else
+			goto err_section_too_small;
+	} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+		struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
+		printk("%s""section_type: memory error\n", pfx);
+		if (gdata->error_data_length >= sizeof(*mem_err))
+			cper_print_mem(pfx, mem_err);
+		else
+			goto err_section_too_small;
+	} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+		struct cper_sec_pcie *pcie = (void *)(gdata + 1);
+		printk("%s""section_type: PCIe error\n", pfx);
+		if (gdata->error_data_length >= sizeof(*pcie))
+			cper_print_pcie(pfx, pcie);
+		else
+			goto err_section_too_small;
+	} else
+		printk("%s""section type: unknown, %pUl\n", pfx, sec_type);
+
+	return;
+
+err_section_too_small:
+	pr_err(FW_WARN "error section length is too small\n");
+}
+
+void apei_estatus_print(const char *pfx,
+			const struct acpi_hest_generic_status *estatus)
+{
+	struct acpi_hest_generic_data *gdata;
+	unsigned int data_len, gedata_len;
+	int sec_no = 0;
+	__u16 severity;
+
+	printk("%s""APEI generic hardware error status\n", pfx);
+	severity = estatus->error_severity;
+	printk("%s""severity: %d, %s\n", pfx, severity,
+	       cper_severity_str(severity));
+	data_len = estatus->data_length;
+	gdata = (struct acpi_hest_generic_data *)(estatus + 1);
+	while (data_len > sizeof(*gdata)) {
+		gedata_len = gdata->error_data_length;
+		apei_estatus_print_section(pfx, gdata, sec_no);
+		data_len -= gedata_len + sizeof(*gdata);
+		sec_no++;
+	}
+}
+EXPORT_SYMBOL_GPL(apei_estatus_print);
+
 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
 {
 	if (estatus->data_length &&
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index cf29df6..096aebf 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -39,7 +39,7 @@
 #define EINJ_PFX "EINJ: "
 
 #define SPIN_UNIT		100			/* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
 #define FIRMWARE_TIMEOUT	(1 * NSEC_PER_MSEC)
 
 /*
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 5850d32..cf6db6b 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -53,7 +53,7 @@
 				     sizeof(struct acpi_table_erst)))
 
 #define SPIN_UNIT		100			/* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
 #define FIRMWARE_TIMEOUT	(1 * NSEC_PER_MSEC)
 #define FIRMWARE_MAX_STALL	50			/* 50us */
 
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0d505e5..d1d484d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,10 +12,6 @@
  * For more information about Generic Hardware Error Source, please
  * refer to ACPI Specification version 4.0, section 17.3.2.6
  *
- * Now, only SCI notification type and memory errors are
- * supported. More notification type and hardware error type will be
- * added later.
- *
  * Copyright 2010 Intel Corp.
  *   Author: Huang Ying <ying.huang@intel.com>
  *
@@ -39,14 +35,18 @@
 #include <linux/acpi.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
+#include <linux/timer.h>
 #include <linux/cper.h>
 #include <linux/kdebug.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
 #include <acpi/apei.h>
 #include <acpi/atomicio.h>
 #include <acpi/hed.h>
 #include <asm/mce.h>
+#include <asm/tlbflush.h>
 
 #include "apei-internal.h"
 
@@ -55,42 +55,131 @@
 #define GHES_ESTATUS_MAX_SIZE		65536
 
 /*
- * One struct ghes is created for each generic hardware error
- * source.
- *
+ * One struct ghes is created for each generic hardware error source.
  * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
- * handler. Handler for one generic hardware error source is only
- * triggered after the previous one is done. So handler can uses
- * struct ghes without locking.
+ * handler.
  *
  * estatus: memory buffer for error status block, allocated during
  * HEST parsing.
  */
 #define GHES_TO_CLEAR		0x0001
+#define GHES_EXITING		0x0002
 
 struct ghes {
 	struct acpi_hest_generic *generic;
 	struct acpi_hest_generic_status *estatus;
-	struct list_head list;
 	u64 buffer_paddr;
 	unsigned long flags;
+	union {
+		struct list_head list;
+		struct timer_list timer;
+		unsigned int irq;
+	};
 };
 
+static int ghes_panic_timeout	__read_mostly = 30;
+
 /*
- * Error source lists, one list for each notification method. The
- * members in lists are struct ghes.
+ * All error sources notified with SCI shares one notifier function,
+ * so they need to be linked and checked one by one.  This is applied
+ * to NMI too.
  *
- * The list members are only added in HEST parsing and deleted during
- * module_exit, that is, single-threaded. So no lock is needed for
- * that.
- *
- * But the mutual exclusion is needed between members adding/deleting
- * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
- * used for that.
+ * RCU is used for these lists, so ghes_list_mutex is only used for
+ * list changing, not for traversing.
  */
 static LIST_HEAD(ghes_sci);
+static LIST_HEAD(ghes_nmi);
 static DEFINE_MUTEX(ghes_list_mutex);
 
+/*
+ * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
+ * mutual exclusion.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
+
+/*
+ * Because the memory area used to transfer hardware error information
+ * from BIOS to Linux can be determined only in NMI, IRQ or timer
+ * handler, but general ioremap can not be used in atomic context, so
+ * a special version of atomic ioremap is implemented for that.
+ */
+
+/*
+ * Two virtual pages are used, one for NMI context, the other for
+ * IRQ/PROCESS context
+ */
+#define GHES_IOREMAP_PAGES		2
+#define GHES_IOREMAP_NMI_PAGE(base)	(base)
+#define GHES_IOREMAP_IRQ_PAGE(base)	((base) + PAGE_SIZE)
+
+/* virtual memory area for atomic ioremap */
+static struct vm_struct *ghes_ioremap_area;
+/*
+ * These 2 spinlock is used to prevent atomic ioremap virtual memory
+ * area from being mapped simultaneously.
+ */
+static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
+static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+
+static int ghes_ioremap_init(void)
+{
+	ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
+		VM_IOREMAP, VMALLOC_START, VMALLOC_END);
+	if (!ghes_ioremap_area) {
+		pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void ghes_ioremap_exit(void)
+{
+	free_vm_area(ghes_ioremap_area);
+}
+
+static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
+{
+	unsigned long vaddr;
+
+	vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
+	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+			   pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+	return (void __iomem *)vaddr;
+}
+
+static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
+{
+	unsigned long vaddr;
+
+	vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
+	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
+			   pfn << PAGE_SHIFT, PAGE_KERNEL);
+
+	return (void __iomem *)vaddr;
+}
+
+static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
+{
+	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+	void *base = ghes_ioremap_area->addr;
+
+	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
+	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+	__flush_tlb_one(vaddr);
+}
+
+static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
+{
+	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
+	void *base = ghes_ioremap_area->addr;
+
+	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
+	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
+	__flush_tlb_one(vaddr);
+}
+
 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
 {
 	struct ghes *ghes;
@@ -101,7 +190,6 @@
 	if (!ghes)
 		return ERR_PTR(-ENOMEM);
 	ghes->generic = generic;
-	INIT_LIST_HEAD(&ghes->list);
 	rc = acpi_pre_map_gar(&generic->error_status_address);
 	if (rc)
 		goto err_free;
@@ -158,22 +246,41 @@
 	}
 }
 
-/* SCI handler run in work queue, so ioremap can be used here */
-static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
-				 int from_phys)
+static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
+				  int from_phys)
 {
-	void *vaddr;
+	void __iomem *vaddr;
+	unsigned long flags = 0;
+	int in_nmi = in_nmi();
+	u64 offset;
+	u32 trunk;
 
-	vaddr = ioremap_cache(paddr, len);
-	if (!vaddr)
-		return -ENOMEM;
-	if (from_phys)
-		memcpy(buffer, vaddr, len);
-	else
-		memcpy(vaddr, buffer, len);
-	iounmap(vaddr);
-
-	return 0;
+	while (len > 0) {
+		offset = paddr - (paddr & PAGE_MASK);
+		if (in_nmi) {
+			raw_spin_lock(&ghes_ioremap_lock_nmi);
+			vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
+		} else {
+			spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
+			vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
+		}
+		trunk = PAGE_SIZE - offset;
+		trunk = min(trunk, len);
+		if (from_phys)
+			memcpy_fromio(buffer, vaddr + offset, trunk);
+		else
+			memcpy_toio(vaddr + offset, buffer, trunk);
+		len -= trunk;
+		paddr += trunk;
+		buffer += trunk;
+		if (in_nmi) {
+			ghes_iounmap_nmi(vaddr);
+			raw_spin_unlock(&ghes_ioremap_lock_nmi);
+		} else {
+			ghes_iounmap_irq(vaddr);
+			spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
+		}
+	}
 }
 
 static int ghes_read_estatus(struct ghes *ghes, int silent)
@@ -194,10 +301,8 @@
 	if (!buf_paddr)
 		return -ENOENT;
 
-	rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
-				   sizeof(*ghes->estatus), 1);
-	if (rc)
-		return rc;
+	ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
+			      sizeof(*ghes->estatus), 1);
 	if (!ghes->estatus->block_status)
 		return -ENOENT;
 
@@ -212,17 +317,15 @@
 		goto err_read_block;
 	if (apei_estatus_check_header(ghes->estatus))
 		goto err_read_block;
-	rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
-				   buf_paddr + sizeof(*ghes->estatus),
-				   len - sizeof(*ghes->estatus), 1);
-	if (rc)
-		return rc;
+	ghes_copy_tofrom_phys(ghes->estatus + 1,
+			      buf_paddr + sizeof(*ghes->estatus),
+			      len - sizeof(*ghes->estatus), 1);
 	if (apei_estatus_check(ghes->estatus))
 		goto err_read_block;
 	rc = 0;
 
 err_read_block:
-	if (rc && !silent)
+	if (rc && !silent && printk_ratelimit())
 		pr_warning(FW_WARN GHES_PFX
 			   "Failed to read error status block!\n");
 	return rc;
@@ -255,11 +358,26 @@
 		}
 #endif
 	}
+}
 
-	if (!processed && printk_ratelimit())
-		pr_warning(GHES_PFX
-		"Unknown error record from generic hardware error source: %d\n",
-			   ghes->generic->header.source_id);
+static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+{
+	/* Not more than 2 messages every 5 seconds */
+	static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
+
+	if (pfx == NULL) {
+		if (ghes_severity(ghes->estatus->error_severity) <=
+		    GHES_SEV_CORRECTED)
+			pfx = KERN_WARNING HW_ERR;
+		else
+			pfx = KERN_ERR HW_ERR;
+	}
+	if (__ratelimit(&ratelimit)) {
+		printk(
+	"%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+	pfx, ghes->generic->header.source_id);
+		apei_estatus_print(pfx, ghes->estatus);
+	}
 }
 
 static int ghes_proc(struct ghes *ghes)
@@ -269,6 +387,7 @@
 	rc = ghes_read_estatus(ghes, 0);
 	if (rc)
 		goto out;
+	ghes_print_estatus(NULL, ghes);
 	ghes_do_proc(ghes);
 
 out:
@@ -276,6 +395,42 @@
 	return 0;
 }
 
+static void ghes_add_timer(struct ghes *ghes)
+{
+	struct acpi_hest_generic *g = ghes->generic;
+	unsigned long expire;
+
+	if (!g->notify.poll_interval) {
+		pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
+			   g->header.source_id);
+		return;
+	}
+	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
+	ghes->timer.expires = round_jiffies_relative(expire);
+	add_timer(&ghes->timer);
+}
+
+static void ghes_poll_func(unsigned long data)
+{
+	struct ghes *ghes = (void *)data;
+
+	ghes_proc(ghes);
+	if (!(ghes->flags & GHES_EXITING))
+		ghes_add_timer(ghes);
+}
+
+static irqreturn_t ghes_irq_func(int irq, void *data)
+{
+	struct ghes *ghes = data;
+	int rc;
+
+	rc = ghes_proc(ghes);
+	if (rc)
+		return IRQ_NONE;
+
+	return IRQ_HANDLED;
+}
+
 static int ghes_notify_sci(struct notifier_block *this,
 				  unsigned long event, void *data)
 {
@@ -292,10 +447,63 @@
 	return ret;
 }
 
+static int ghes_notify_nmi(struct notifier_block *this,
+				  unsigned long cmd, void *data)
+{
+	struct ghes *ghes, *ghes_global = NULL;
+	int sev, sev_global = -1;
+	int ret = NOTIFY_DONE;
+
+	if (cmd != DIE_NMI)
+		return ret;
+
+	raw_spin_lock(&ghes_nmi_lock);
+	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+		if (ghes_read_estatus(ghes, 1)) {
+			ghes_clear_estatus(ghes);
+			continue;
+		}
+		sev = ghes_severity(ghes->estatus->error_severity);
+		if (sev > sev_global) {
+			sev_global = sev;
+			ghes_global = ghes;
+		}
+		ret = NOTIFY_STOP;
+	}
+
+	if (ret == NOTIFY_DONE)
+		goto out;
+
+	if (sev_global >= GHES_SEV_PANIC) {
+		oops_begin();
+		ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+		/* reboot to log the error! */
+		if (panic_timeout == 0)
+			panic_timeout = ghes_panic_timeout;
+		panic("Fatal hardware error!");
+	}
+
+	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+		if (!(ghes->flags & GHES_TO_CLEAR))
+			continue;
+		/* Do not print estatus because printk is not NMI safe */
+		ghes_do_proc(ghes);
+		ghes_clear_estatus(ghes);
+	}
+
+out:
+	raw_spin_unlock(&ghes_nmi_lock);
+	return ret;
+}
+
 static struct notifier_block ghes_notifier_sci = {
 	.notifier_call = ghes_notify_sci,
 };
 
+static struct notifier_block ghes_notifier_nmi = {
+	.notifier_call = ghes_notify_nmi,
+};
+
 static int __devinit ghes_probe(struct platform_device *ghes_dev)
 {
 	struct acpi_hest_generic *generic;
@@ -306,18 +514,27 @@
 	if (!generic->enabled)
 		return -ENODEV;
 
-	if (generic->error_block_length <
-	    sizeof(struct acpi_hest_generic_status)) {
-		pr_warning(FW_BUG GHES_PFX
-"Invalid error block length: %u for generic hardware error source: %d\n",
-			   generic->error_block_length,
+	switch (generic->notify.type) {
+	case ACPI_HEST_NOTIFY_POLLED:
+	case ACPI_HEST_NOTIFY_EXTERNAL:
+	case ACPI_HEST_NOTIFY_SCI:
+	case ACPI_HEST_NOTIFY_NMI:
+		break;
+	case ACPI_HEST_NOTIFY_LOCAL:
+		pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
 			   generic->header.source_id);
 		goto err;
+	default:
+		pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
+			   generic->notify.type, generic->header.source_id);
+		goto err;
 	}
-	if (generic->records_to_preallocate == 0) {
-		pr_warning(FW_BUG GHES_PFX
-"Invalid records to preallocate: %u for generic hardware error source: %d\n",
-			   generic->records_to_preallocate,
+
+	rc = -EIO;
+	if (generic->error_block_length <
+	    sizeof(struct acpi_hest_generic_status)) {
+		pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
+			   generic->error_block_length,
 			   generic->header.source_id);
 		goto err;
 	}
@@ -327,38 +544,43 @@
 		ghes = NULL;
 		goto err;
 	}
-	if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) {
+	switch (generic->notify.type) {
+	case ACPI_HEST_NOTIFY_POLLED:
+		ghes->timer.function = ghes_poll_func;
+		ghes->timer.data = (unsigned long)ghes;
+		init_timer_deferrable(&ghes->timer);
+		ghes_add_timer(ghes);
+		break;
+	case ACPI_HEST_NOTIFY_EXTERNAL:
+		/* External interrupt vector is GSI */
+		if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
+			       generic->header.source_id);
+			goto err;
+		}
+		if (request_irq(ghes->irq, ghes_irq_func,
+				0, "GHES IRQ", ghes)) {
+			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
+			       generic->header.source_id);
+			goto err;
+		}
+		break;
+	case ACPI_HEST_NOTIFY_SCI:
 		mutex_lock(&ghes_list_mutex);
 		if (list_empty(&ghes_sci))
 			register_acpi_hed_notifier(&ghes_notifier_sci);
 		list_add_rcu(&ghes->list, &ghes_sci);
 		mutex_unlock(&ghes_list_mutex);
-	} else {
-		unsigned char *notify = NULL;
-
-		switch (generic->notify.type) {
-		case ACPI_HEST_NOTIFY_POLLED:
-			notify = "POLL";
-			break;
-		case ACPI_HEST_NOTIFY_EXTERNAL:
-		case ACPI_HEST_NOTIFY_LOCAL:
-			notify = "IRQ";
-			break;
-		case ACPI_HEST_NOTIFY_NMI:
-			notify = "NMI";
-			break;
-		}
-		if (notify) {
-			pr_warning(GHES_PFX
-"Generic hardware error source: %d notified via %s is not supported!\n",
-				   generic->header.source_id, notify);
-		} else {
-			pr_warning(FW_WARN GHES_PFX
-"Unknown notification type: %u for generic hardware error source: %d\n",
-			generic->notify.type, generic->header.source_id);
-		}
-		rc = -ENODEV;
-		goto err;
+		break;
+	case ACPI_HEST_NOTIFY_NMI:
+		mutex_lock(&ghes_list_mutex);
+		if (list_empty(&ghes_nmi))
+			register_die_notifier(&ghes_notifier_nmi);
+		list_add_rcu(&ghes->list, &ghes_nmi);
+		mutex_unlock(&ghes_list_mutex);
+		break;
+	default:
+		BUG();
 	}
 	platform_set_drvdata(ghes_dev, ghes);
 
@@ -379,7 +601,14 @@
 	ghes = platform_get_drvdata(ghes_dev);
 	generic = ghes->generic;
 
+	ghes->flags |= GHES_EXITING;
 	switch (generic->notify.type) {
+	case ACPI_HEST_NOTIFY_POLLED:
+		del_timer_sync(&ghes->timer);
+		break;
+	case ACPI_HEST_NOTIFY_EXTERNAL:
+		free_irq(ghes->irq, ghes);
+		break;
 	case ACPI_HEST_NOTIFY_SCI:
 		mutex_lock(&ghes_list_mutex);
 		list_del_rcu(&ghes->list);
@@ -387,12 +616,23 @@
 			unregister_acpi_hed_notifier(&ghes_notifier_sci);
 		mutex_unlock(&ghes_list_mutex);
 		break;
+	case ACPI_HEST_NOTIFY_NMI:
+		mutex_lock(&ghes_list_mutex);
+		list_del_rcu(&ghes->list);
+		if (list_empty(&ghes_nmi))
+			unregister_die_notifier(&ghes_notifier_nmi);
+		mutex_unlock(&ghes_list_mutex);
+		/*
+		 * To synchronize with NMI handler, ghes can only be
+		 * freed after NMI handler finishes.
+		 */
+		synchronize_rcu();
+		break;
 	default:
 		BUG();
 		break;
 	}
 
-	synchronize_rcu();
 	ghes_fini(ghes);
 	kfree(ghes);
 
@@ -412,6 +652,8 @@
 
 static int __init ghes_init(void)
 {
+	int rc;
+
 	if (acpi_disabled)
 		return -ENODEV;
 
@@ -420,12 +662,25 @@
 		return -EINVAL;
 	}
 
-	return platform_driver_register(&ghes_platform_driver);
+	rc = ghes_ioremap_init();
+	if (rc)
+		goto err;
+
+	rc = platform_driver_register(&ghes_platform_driver);
+	if (rc)
+		goto err_ioremap_exit;
+
+	return 0;
+err_ioremap_exit:
+	ghes_ioremap_exit();
+err:
+	return rc;
 }
 
 static void __exit ghes_exit(void)
 {
 	platform_driver_unregister(&ghes_platform_driver);
+	ghes_ioremap_exit();
 }
 
 module_init(ghes_init);
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index daa7bc6..4ee58e7 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -195,24 +195,24 @@
 
 __setup("hest_disable", setup_hest_disable);
 
-static int __init hest_init(void)
+void __init acpi_hest_init(void)
 {
 	acpi_status status;
 	int rc = -ENODEV;
 	unsigned int ghes_count = 0;
 
 	if (acpi_disabled)
-		goto err;
+		return;
 
 	if (hest_disable) {
-		pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
-		goto err;
+		pr_info(HEST_PFX "Table parsing disabled.\n");
+		return;
 	}
 
 	status = acpi_get_table(ACPI_SIG_HEST, 0,
 				(struct acpi_table_header **)&hest_tab);
 	if (status == AE_NOT_FOUND) {
-		pr_info(HEST_PFX "Table is not found!\n");
+		pr_info(HEST_PFX "Table not found.\n");
 		goto err;
 	} else if (ACPI_FAILURE(status)) {
 		const char *msg = acpi_format_exception(status);
@@ -226,15 +226,11 @@
 		goto err;
 
 	rc = hest_ghes_dev_register(ghes_count);
-	if (rc)
-		goto err;
+	if (!rc) {
+		pr_info(HEST_PFX "Table parsing has been initialized.\n");
+		return;
+	}
 
-	pr_info(HEST_PFX "HEST table parsing is initialized.\n");
-
-	return 0;
 err:
 	hest_disable = 1;
-	return rc;
 }
-
-subsys_initcall(hest_init);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 95649d3..68bc227 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -631,6 +631,17 @@
 	return result;
 }
 
+static void acpi_battery_refresh(struct acpi_battery *battery)
+{
+	if (!battery->bat.dev)
+		return;
+
+	acpi_battery_get_info(battery);
+	/* The battery may have changed its reporting units. */
+	sysfs_remove_battery(battery);
+	sysfs_add_battery(battery);
+}
+
 /* --------------------------------------------------------------------------
                               FS Interface (/proc)
    -------------------------------------------------------------------------- */
@@ -868,6 +879,8 @@
 	struct proc_dir_entry *entry = NULL;
 	int i;
 
+	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
 	if (!acpi_device_dir(device)) {
 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
 						     acpi_battery_dir);
@@ -914,6 +927,8 @@
 	if (!battery)
 		return;
 	old = battery->bat.dev;
+	if (event == ACPI_BATTERY_NOTIFY_INFO)
+		acpi_battery_refresh(battery);
 	acpi_battery_update(battery);
 	acpi_bus_generate_proc_event(device, event,
 				     acpi_battery_present(battery));
@@ -983,6 +998,7 @@
 	if (!device)
 		return -EINVAL;
 	battery = acpi_driver_data(device);
+	acpi_battery_refresh(battery);
 	battery->update_time = 0;
 	acpi_battery_update(battery);
 	return 0;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d68bd61..7ced61f 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -52,22 +52,6 @@
 
 #define STRUCT_TO_INT(s)	(*((int*)&s))
 
-static int set_power_nocheck(const struct dmi_system_id *id)
-{
-	printk(KERN_NOTICE PREFIX "%s detected - "
-		"disable power check in power transition\n", id->ident);
-	acpi_power_nocheck = 1;
-	return 0;
-}
-static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
-	{
-	set_power_nocheck, "HP Pavilion 05", {
-	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-	DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"),
-	DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL},
-	{},
-};
-
 
 #ifdef CONFIG_X86
 static int set_copy_dsdt(const struct dmi_system_id *id)
@@ -196,33 +180,24 @@
                                  Power Management
    -------------------------------------------------------------------------- */
 
-int acpi_bus_get_power(acpi_handle handle, int *state)
+static int __acpi_bus_get_power(struct acpi_device *device, int *state)
 {
 	int result = 0;
 	acpi_status status = 0;
-	struct acpi_device *device = NULL;
 	unsigned long long psc = 0;
 
-
-	result = acpi_bus_get_device(handle, &device);
-	if (result)
-		return result;
+	if (!device || !state)
+		return -EINVAL;
 
 	*state = ACPI_STATE_UNKNOWN;
 
-	if (!device->flags.power_manageable) {
-		/* TBD: Non-recursive algorithm for walking up hierarchy */
-		if (device->parent)
-			*state = device->parent->power.state;
-		else
-			*state = ACPI_STATE_D0;
-	} else {
+	if (device->flags.power_manageable) {
 		/*
 		 * Get the device's power state either directly (via _PSC) or
 		 * indirectly (via power resources).
 		 */
 		if (device->power.flags.power_resources) {
-			result = acpi_power_get_inferred_state(device);
+			result = acpi_power_get_inferred_state(device, state);
 			if (result)
 				return result;
 		} else if (device->power.flags.explicit_get) {
@@ -230,59 +205,33 @@
 						       NULL, &psc);
 			if (ACPI_FAILURE(status))
 				return -ENODEV;
-			device->power.state = (int)psc;
+			*state = (int)psc;
 		}
-
-		*state = device->power.state;
+	} else {
+		/* TBD: Non-recursive algorithm for walking up hierarchy. */
+		*state = device->parent ?
+			device->parent->power.state : ACPI_STATE_D0;
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
-			  device->pnp.bus_id, device->power.state));
+			  device->pnp.bus_id, *state));
 
 	return 0;
 }
 
-EXPORT_SYMBOL(acpi_bus_get_power);
 
-int acpi_bus_set_power(acpi_handle handle, int state)
+static int __acpi_bus_set_power(struct acpi_device *device, int state)
 {
 	int result = 0;
 	acpi_status status = AE_OK;
-	struct acpi_device *device = NULL;
 	char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
 
-
-	result = acpi_bus_get_device(handle, &device);
-	if (result)
-		return result;
-
-	if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
+	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
 		return -EINVAL;
 
 	/* Make sure this is a valid target state */
 
-	if (!device->flags.power_manageable) {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
-				kobject_name(&device->dev.kobj)));
-		return -ENODEV;
-	}
-	/*
-	 * Get device's current power state
-	 */
-	if (!acpi_power_nocheck) {
-		/*
-		 * Maybe the incorrect power state is returned on the bogus
-		 * bios, which is different with the real power state.
-		 * For example: the bios returns D0 state and the real power
-		 * state is D3. OS expects to set the device to D0 state. In
-		 * such case if OS uses the power state returned by the BIOS,
-		 * the device can't be transisted to the correct power state.
-		 * So if the acpi_power_nocheck is set, it is unnecessary to
-		 * get the power state by calling acpi_bus_get_power.
-		 */
-		acpi_bus_get_power(device->handle, &device->power.state);
-	}
-	if ((state == device->power.state) && !device->flags.force_power_state) {
+	if (state == device->power.state) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
 				  state));
 		return 0;
@@ -351,8 +300,75 @@
 	return result;
 }
 
+
+int acpi_bus_set_power(acpi_handle handle, int state)
+{
+	struct acpi_device *device;
+	int result;
+
+	result = acpi_bus_get_device(handle, &device);
+	if (result)
+		return result;
+
+	if (!device->flags.power_manageable) {
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				"Device [%s] is not power manageable\n",
+				dev_name(&device->dev)));
+		return -ENODEV;
+	}
+
+	return __acpi_bus_set_power(device, state);
+}
 EXPORT_SYMBOL(acpi_bus_set_power);
 
+
+int acpi_bus_init_power(struct acpi_device *device)
+{
+	int state;
+	int result;
+
+	if (!device)
+		return -EINVAL;
+
+	device->power.state = ACPI_STATE_UNKNOWN;
+
+	result = __acpi_bus_get_power(device, &state);
+	if (result)
+		return result;
+
+	if (device->power.flags.power_resources)
+		result = acpi_power_on_resources(device, state);
+
+	if (!result)
+		device->power.state = state;
+
+	return result;
+}
+
+
+int acpi_bus_update_power(acpi_handle handle, int *state_p)
+{
+	struct acpi_device *device;
+	int state;
+	int result;
+
+	result = acpi_bus_get_device(handle, &device);
+	if (result)
+		return result;
+
+	result = __acpi_bus_get_power(device, &state);
+	if (result)
+		return result;
+
+	result = __acpi_bus_set_power(device, state);
+	if (!result && state_p)
+		*state_p = state;
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_update_power);
+
+
 bool acpi_bus_power_manageable(acpi_handle handle)
 {
 	struct acpi_device *device;
@@ -1023,15 +1039,8 @@
 	if (acpi_disabled)
 		return result;
 
-	/*
-	 * If the laptop falls into the DMI check table, the power state check
-	 * will be disabled in the course of device power transition.
-	 */
-	dmi_check_system(power_nocheck_dmi_table);
-
 	acpi_scan_init();
 	acpi_ec_init();
-	acpi_power_init();
 	acpi_debugfs_init();
 	acpi_sleep_proc_init();
 	acpi_wakeup_device_init();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 71ef9cd..76bbb78 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -279,6 +279,9 @@
 	input_report_switch(button->input, SW_LID, !state);
 	input_sync(button->input);
 
+	if (state)
+		pm_wakeup_event(&device->dev, 0);
+
 	ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
 	if (ret == NOTIFY_DONE)
 		ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
@@ -314,6 +317,8 @@
 			input_sync(input);
 			input_report_key(input, keycode, 0);
 			input_sync(input);
+
+			pm_wakeup_event(&device->dev, 0);
 		}
 
 		acpi_bus_generate_proc_event(device, event, ++button->pushed);
@@ -426,7 +431,7 @@
 		acpi_enable_gpe(device->wakeup.gpe_device,
 				device->wakeup.gpe_number);
 		device->wakeup.run_wake_count++;
-		device->wakeup.state.enabled = 1;
+		device_set_wakeup_enable(&device->dev, true);
 	}
 
 	printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
@@ -449,7 +454,7 @@
 		acpi_disable_gpe(device->wakeup.gpe_device,
 				device->wakeup.gpe_number);
 		device->wakeup.run_wake_count--;
-		device->wakeup.state.enabled = 0;
+		device_set_wakeup_enable(&device->dev, false);
 	}
 
 	acpi_button_remove_fs(device);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 81514a4..1864ad3 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -725,7 +725,7 @@
 			complete_dock(ds);
 			dock_event(ds, event, DOCK_EVENT);
 			dock_lock(ds, 1);
-			acpi_update_gpes();
+			acpi_update_all_gpes();
 			break;
 		}
 		if (dock_present(ds) || dock_in_progress(ds))
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 302b31e..fa848c4 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -606,7 +606,8 @@
 	return 0;
 }
 
-static u32 acpi_ec_gpe_handler(void *data)
+static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+	u32 gpe_number, void *data)
 {
 	struct acpi_ec *ec = data;
 
@@ -618,7 +619,7 @@
 		wake_up(&ec->wait);
 		ec_check_sci(ec, acpi_ec_read_status(ec));
 	}
-	return ACPI_INTERRUPT_HANDLED;
+	return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
 }
 
 /* --------------------------------------------------------------------------
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 6004908..467479f 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -86,7 +86,7 @@
 	if (!device)
 		return -EINVAL;
 
-	result = acpi_bus_get_power(device->handle, &acpi_state);
+	result = acpi_bus_update_power(device->handle, &acpi_state);
 	if (result)
 		return result;
 
@@ -123,7 +123,6 @@
 static int acpi_fan_add(struct acpi_device *device)
 {
 	int result = 0;
-	int state = 0;
 	struct thermal_cooling_device *cdev;
 
 	if (!device)
@@ -132,16 +131,12 @@
 	strcpy(acpi_device_name(device), "Fan");
 	strcpy(acpi_device_class(device), ACPI_FAN_CLASS);
 
-	result = acpi_bus_get_power(device->handle, &state);
+	result = acpi_bus_update_power(device->handle, NULL);
 	if (result) {
-		printk(KERN_ERR PREFIX "Reading power state\n");
+		printk(KERN_ERR PREFIX "Setting initial power state\n");
 		goto end;
 	}
 
-	device->flags.force_power_state = 1;
-	acpi_bus_set_power(device->handle, state);
-	device->flags.force_power_state = 0;
-
 	cdev = thermal_cooling_device_register("Fan", device,
 						&fan_cooling_ops);
 	if (IS_ERR(cdev)) {
@@ -200,22 +195,14 @@
 
 static int acpi_fan_resume(struct acpi_device *device)
 {
-	int result = 0;
-	int power_state = 0;
+	int result;
 
 	if (!device)
 		return -EINVAL;
 
-	result = acpi_bus_get_power(device->handle, &power_state);
-	if (result) {
-		printk(KERN_ERR PREFIX
-				  "Error reading fan power state\n");
-		return result;
-	}
-
-	device->flags.force_power_state = 1;
-	acpi_bus_set_power(device->handle, power_state);
-	device->flags.force_power_state = 0;
+	result = acpi_bus_update_power(device->handle, NULL);
+	if (result)
+		printk(KERN_ERR PREFIX "Error updating fan power state\n");
 
 	return result;
 }
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 78b0164..7c47ed5 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -167,11 +167,8 @@
 				"firmware_node");
 		ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
 				"physical_node");
-		if (acpi_dev->wakeup.flags.valid) {
+		if (acpi_dev->wakeup.flags.valid)
 			device_set_wakeup_capable(dev, true);
-			device_set_wakeup_enable(dev,
-						acpi_dev->wakeup.state.enabled);
-		}
 	}
 
 	return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index a212bfe..b1cc81a 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,9 +41,10 @@
 int acpi_power_init(void);
 int acpi_device_sleep_wake(struct acpi_device *dev,
                            int enable, int sleep_state, int dev_state);
-int acpi_power_get_inferred_state(struct acpi_device *device);
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
+int acpi_power_on_resources(struct acpi_device *device, int state);
 int acpi_power_transition(struct acpi_device *device, int state);
-extern int acpi_power_nocheck;
+int acpi_bus_init_power(struct acpi_device *device);
 
 int acpi_wakeup_device_init(void);
 void acpi_early_processor_set_pdc(void);
@@ -82,8 +83,16 @@
 
 #ifdef CONFIG_ACPI_SLEEP
 int acpi_sleep_proc_init(void);
+int suspend_nvs_alloc(void);
+void suspend_nvs_free(void);
+int suspend_nvs_save(void);
+void suspend_nvs_restore(void);
 #else
 static inline int acpi_sleep_proc_init(void) { return 0; }
+static inline int suspend_nvs_alloc(void) { return 0; }
+static inline void suspend_nvs_free(void) {}
+static inline int suspend_nvs_save(void) { return 0; }
+static inline void suspend_nvs_restore(void) {}
 #endif
 
 #endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
new file mode 100644
index 0000000..54b6ab8
--- /dev/null
+++ b/drivers/acpi/nvs.c
@@ -0,0 +1,144 @@
+/*
+ * nvs.c - Routines for saving and restoring ACPI NVS memory region
+ *
+ * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <acpi/acpiosxf.h>
+
+/*
+ * Platforms, like ACPI, may want us to save some memory used by them during
+ * suspend and to restore the contents of this memory during the subsequent
+ * resume.  The code below implements a mechanism allowing us to do that.
+ */
+
+struct nvs_page {
+	unsigned long phys_start;
+	unsigned int size;
+	void *kaddr;
+	void *data;
+	struct list_head node;
+};
+
+static LIST_HEAD(nvs_list);
+
+/**
+ *	suspend_nvs_register - register platform NVS memory region to save
+ *	@start - physical address of the region
+ *	@size - size of the region
+ *
+ *	The NVS region need not be page-aligned (both ends) and we arrange
+ *	things so that the data from page-aligned addresses in this region will
+ *	be copied into separate RAM pages.
+ */
+int suspend_nvs_register(unsigned long start, unsigned long size)
+{
+	struct nvs_page *entry, *next;
+
+	while (size > 0) {
+		unsigned int nr_bytes;
+
+		entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
+		if (!entry)
+			goto Error;
+
+		list_add_tail(&entry->node, &nvs_list);
+		entry->phys_start = start;
+		nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
+		entry->size = (size < nr_bytes) ? size : nr_bytes;
+
+		start += entry->size;
+		size -= entry->size;
+	}
+	return 0;
+
+ Error:
+	list_for_each_entry_safe(entry, next, &nvs_list, node) {
+		list_del(&entry->node);
+		kfree(entry);
+	}
+	return -ENOMEM;
+}
+
+/**
+ *	suspend_nvs_free - free data pages allocated for saving NVS regions
+ */
+void suspend_nvs_free(void)
+{
+	struct nvs_page *entry;
+
+	list_for_each_entry(entry, &nvs_list, node)
+		if (entry->data) {
+			free_page((unsigned long)entry->data);
+			entry->data = NULL;
+			if (entry->kaddr) {
+				acpi_os_unmap_memory(entry->kaddr, entry->size);
+				entry->kaddr = NULL;
+			}
+		}
+}
+
+/**
+ *	suspend_nvs_alloc - allocate memory necessary for saving NVS regions
+ */
+int suspend_nvs_alloc(void)
+{
+	struct nvs_page *entry;
+
+	list_for_each_entry(entry, &nvs_list, node) {
+		entry->data = (void *)__get_free_page(GFP_KERNEL);
+		if (!entry->data) {
+			suspend_nvs_free();
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/**
+ *	suspend_nvs_save - save NVS memory regions
+ */
+int suspend_nvs_save(void)
+{
+	struct nvs_page *entry;
+
+	printk(KERN_INFO "PM: Saving platform NVS memory\n");
+
+	list_for_each_entry(entry, &nvs_list, node)
+		if (entry->data) {
+			entry->kaddr = acpi_os_map_memory(entry->phys_start,
+							  entry->size);
+			if (!entry->kaddr) {
+				suspend_nvs_free();
+				return -ENOMEM;
+			}
+			memcpy(entry->data, entry->kaddr, entry->size);
+		}
+
+	return 0;
+}
+
+/**
+ *	suspend_nvs_restore - restore NVS memory regions
+ *
+ *	This function is going to be called with interrupts disabled, so it
+ *	cannot iounmap the virtual addresses used to access the NVS region.
+ */
+void suspend_nvs_restore(void)
+{
+	struct nvs_page *entry;
+
+	printk(KERN_INFO "PM: Restoring platform NVS memory\n");
+
+	list_for_each_entry(entry, &nvs_list, node)
+		if (entry->data)
+			memcpy(entry->kaddr, entry->data, entry->size);
+}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 055d7b7..e2dd6de 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -320,7 +320,7 @@
 
 	pg_off = round_down(phys, PAGE_SIZE);
 	pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-	virt = ioremap(pg_off, pg_sz);
+	virt = ioremap_cache(pg_off, pg_sz);
 	if (!virt) {
 		kfree(map);
 		return NULL;
@@ -642,7 +642,7 @@
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
 	rcu_read_unlock();
 	if (!virt_addr) {
-		virt_addr = ioremap(phys_addr, size);
+		virt_addr = ioremap_cache(phys_addr, size);
 		unmap = 1;
 	}
 	if (!value)
@@ -678,7 +678,7 @@
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
 	rcu_read_unlock();
 	if (!virt_addr) {
-		virt_addr = ioremap(phys_addr, size);
+		virt_addr = ioremap_cache(phys_addr, size);
 		unmap = 1;
 	}
 
@@ -1233,8 +1233,7 @@
 int acpi_check_resource_conflict(const struct resource *res)
 {
 	struct acpi_res_list *res_list_elem;
-	int ioport;
-	int clash = 0;
+	int ioport = 0, clash = 0;
 
 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
 		return 0;
@@ -1264,9 +1263,13 @@
 	if (clash) {
 		if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
 			printk(KERN_WARNING "ACPI: resource %s %pR"
-			       " conflicts with ACPI region %s %pR\n",
+			       " conflicts with ACPI region %s "
+			       "[%s 0x%zx-0x%zx]\n",
 			       res->name, res, res_list_elem->name,
-			       res_list_elem);
+			       (res_list_elem->resource_type ==
+				ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
+			       (size_t) res_list_elem->start,
+			       (size_t) res_list_elem->end);
 			if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
 				printk(KERN_NOTICE "ACPI: This conflict may"
 				       " cause random problems and system"
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 96668ad..d976679 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -36,6 +36,7 @@
 #include <linux/slab.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
+#include <acpi/apei.h>
 
 #define PREFIX "ACPI: "
 
@@ -47,6 +48,11 @@
 static int acpi_pci_root_remove(struct acpi_device *device, int type);
 static int acpi_pci_root_start(struct acpi_device *device);
 
+#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
+				| OSC_ACTIVE_STATE_PWR_SUPPORT \
+				| OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
+				| OSC_MSI_SUPPORT)
+
 static const struct acpi_device_id root_device_ids[] = {
 	{"PNP0A03", 0},
 	{"", 0},
@@ -566,6 +572,33 @@
 	if (flags != base_flags)
 		acpi_pci_osc_support(root, flags);
 
+	if (!pcie_ports_disabled
+	    && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
+		flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+			| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+			| OSC_PCI_EXPRESS_PME_CONTROL;
+
+		if (pci_aer_available()) {
+			if (aer_acpi_firmware_first())
+				dev_dbg(root->bus->bridge,
+					"PCIe errors handled by BIOS.\n");
+			else
+				flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+		}
+
+		dev_info(root->bus->bridge,
+			"Requesting ACPI _OSC control (0x%02x)\n", flags);
+
+		status = acpi_pci_osc_control_set(device->handle, &flags,
+					OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+		if (ACPI_SUCCESS(status))
+			dev_info(root->bus->bridge,
+				"ACPI _OSC control (0x%02x) granted\n", flags);
+		else
+			dev_dbg(root->bus->bridge,
+				"ACPI _OSC request failed (code %d)\n", status);
+	}
+
 	pci_acpi_add_bus_pm_notifier(device, root->bus);
 	if (device->wakeup.flags.run_wake)
 		device_set_run_wake(root->bus->bridge, true);
@@ -603,6 +636,8 @@
 	if (acpi_pci_disabled)
 		return 0;
 
+	acpi_hest_init();
+
 	pci_acpi_crs_quirks();
 	if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
 		return -ENODEV;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 4c9c2fb..9ac2a9f 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -56,9 +56,6 @@
 #define ACPI_POWER_RESOURCE_STATE_ON	0x01
 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
 
-int acpi_power_nocheck;
-module_param_named(power_nocheck, acpi_power_nocheck, bool, 000);
-
 static int acpi_power_add(struct acpi_device *device);
 static int acpi_power_remove(struct acpi_device *device, int type);
 static int acpi_power_resume(struct acpi_device *device);
@@ -148,9 +145,8 @@
 
 static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
 {
-	int result = 0, state1;
-	u32 i = 0;
-
+	int cur_state;
+	int i = 0;
 
 	if (!list || !state)
 		return -EINVAL;
@@ -158,25 +154,33 @@
 	/* The state of the list is 'on' IFF all resources are 'on'. */
 
 	for (i = 0; i < list->count; i++) {
-		/*
-		 * The state of the power resource can be obtained by
-		 * using the ACPI handle. In such case it is unnecessary to
-		 * get the Power resource first and then get its state again.
-		 */
-		result = acpi_power_get_state(list->handles[i], &state1);
+		struct acpi_power_resource *resource;
+		acpi_handle handle = list->handles[i];
+		int result;
+
+		result = acpi_power_get_context(handle, &resource);
 		if (result)
 			return result;
 
-		*state = state1;
+		mutex_lock(&resource->resource_lock);
 
-		if (*state != ACPI_POWER_RESOURCE_STATE_ON)
+		result = acpi_power_get_state(handle, &cur_state);
+
+		mutex_unlock(&resource->resource_lock);
+
+		if (result)
+			return result;
+
+		if (cur_state != ACPI_POWER_RESOURCE_STATE_ON)
 			break;
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n",
-			  *state ? "on" : "off"));
+			  cur_state ? "on" : "off"));
 
-	return result;
+	*state = cur_state;
+
+	return 0;
 }
 
 static int __acpi_power_on(struct acpi_power_resource *resource)
@@ -222,7 +226,7 @@
 	return result;
 }
 
-static int acpi_power_off_device(acpi_handle handle)
+static int acpi_power_off(acpi_handle handle)
 {
 	int result = 0;
 	acpi_status status = AE_OK;
@@ -266,6 +270,35 @@
 	return result;
 }
 
+static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res)
+{
+	int i;
+
+	for (i = num_res - 1; i >= 0 ; i--)
+		acpi_power_off(list->handles[i]);
+}
+
+static void acpi_power_off_list(struct acpi_handle_list *list)
+{
+	__acpi_power_off_list(list, list->count);
+}
+
+static int acpi_power_on_list(struct acpi_handle_list *list)
+{
+	int result = 0;
+	int i;
+
+	for (i = 0; i < list->count; i++) {
+		result = acpi_power_on(list->handles[i]);
+		if (result) {
+			__acpi_power_off_list(list, i);
+			break;
+		}
+	}
+
+	return result;
+}
+
 /**
  * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
  *                          ACPI 3.0) _PSW (Power State Wake)
@@ -404,8 +437,7 @@
 
 	/* Close power resource */
 	for (i = 0; i < dev->wakeup.resources.count; i++) {
-		int ret = acpi_power_off_device(
-				dev->wakeup.resources.handles[i]);
+		int ret = acpi_power_off(dev->wakeup.resources.handles[i]);
 		if (ret) {
 			printk(KERN_ERR PREFIX "Transition power state\n");
 			dev->wakeup.flags.valid = 0;
@@ -423,19 +455,16 @@
                              Device Power Management
    -------------------------------------------------------------------------- */
 
-int acpi_power_get_inferred_state(struct acpi_device *device)
+int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
 {
 	int result = 0;
 	struct acpi_handle_list *list = NULL;
 	int list_state = 0;
 	int i = 0;
 
-
-	if (!device)
+	if (!device || !state)
 		return -EINVAL;
 
-	device->power.state = ACPI_STATE_UNKNOWN;
-
 	/*
 	 * We know a device's inferred power state when all the resources
 	 * required for a given D-state are 'on'.
@@ -450,22 +479,26 @@
 			return result;
 
 		if (list_state == ACPI_POWER_RESOURCE_STATE_ON) {
-			device->power.state = i;
+			*state = i;
 			return 0;
 		}
 	}
 
-	device->power.state = ACPI_STATE_D3;
-
+	*state = ACPI_STATE_D3;
 	return 0;
 }
 
+int acpi_power_on_resources(struct acpi_device *device, int state)
+{
+	if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3)
+		return -EINVAL;
+
+	return acpi_power_on_list(&device->power.states[state].resources);
+}
+
 int acpi_power_transition(struct acpi_device *device, int state)
 {
-	int result = 0;
-	struct acpi_handle_list *cl = NULL;	/* Current Resources */
-	struct acpi_handle_list *tl = NULL;	/* Target Resources */
-	int i = 0;
+	int result;
 
 	if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3))
 		return -EINVAL;
@@ -477,37 +510,20 @@
 	    || (device->power.state > ACPI_STATE_D3))
 		return -ENODEV;
 
-	cl = &device->power.states[device->power.state].resources;
-	tl = &device->power.states[state].resources;
-
 	/* TBD: Resources must be ordered. */
 
 	/*
 	 * First we reference all power resources required in the target list
-	 * (e.g. so the device doesn't lose power while transitioning).
+	 * (e.g. so the device doesn't lose power while transitioning).  Then,
+	 * we dereference all power resources used in the current list.
 	 */
-	for (i = 0; i < tl->count; i++) {
-		result = acpi_power_on(tl->handles[i]);
-		if (result)
-			goto end;
-	}
+	result = acpi_power_on_list(&device->power.states[state].resources);
+	if (!result)
+		acpi_power_off_list(
+			&device->power.states[device->power.state].resources);
 
-	/*
-	 * Then we dereference all power resources used in the current list.
-	 */
-	for (i = 0; i < cl->count; i++) {
-		result = acpi_power_off_device(cl->handles[i]);
-		if (result)
-			goto end;
-	}
-
-     end:
-	if (result)
-		device->power.state = ACPI_STATE_UNKNOWN;
-	else {
-	/* We shouldn't change the state till all above operations succeed */
-		device->power.state = state;
-	}
+	/* We shouldn't change the state unless the above operations succeed. */
+	device->power.state = result ? ACPI_STATE_UNKNOWN : state;
 
 	return result;
 }
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index afad677..f5f9869 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,7 +311,9 @@
 			   dev->pnp.bus_id,
 			   (u32) dev->wakeup.sleep_state,
 			   dev->wakeup.flags.run_wake ? '*' : ' ',
-			   dev->wakeup.state.enabled ? "enabled" : "disabled");
+			   (device_may_wakeup(&dev->dev)
+			     || (ldev && device_may_wakeup(ldev))) ?
+			       "enabled" : "disabled");
 		if (ldev)
 			seq_printf(seq, "%s:%s",
 				   ldev->bus ? ldev->bus->name : "no-bus",
@@ -328,8 +330,10 @@
 {
 	struct device *dev = acpi_get_physical_device(adev->handle);
 
-	if (dev && device_can_wakeup(dev))
-		device_set_wakeup_enable(dev, adev->wakeup.state.enabled);
+	if (dev && device_can_wakeup(dev)) {
+		bool enable = !device_may_wakeup(dev);
+		device_set_wakeup_enable(dev, enable);
+	}
 }
 
 static ssize_t
@@ -341,7 +345,6 @@
 	char strbuf[5];
 	char str[5] = "";
 	unsigned int len = count;
-	struct acpi_device *found_dev = NULL;
 
 	if (len > 4)
 		len = 4;
@@ -361,33 +364,13 @@
 			continue;
 
 		if (!strncmp(dev->pnp.bus_id, str, 4)) {
-			dev->wakeup.state.enabled =
-			    dev->wakeup.state.enabled ? 0 : 1;
-			found_dev = dev;
-			break;
-		}
-	}
-	if (found_dev) {
-		physical_device_enable_wakeup(found_dev);
-		list_for_each_safe(node, next, &acpi_wakeup_device_list) {
-			struct acpi_device *dev = container_of(node,
-							       struct
-							       acpi_device,
-							       wakeup_list);
-
-			if ((dev != found_dev) &&
-			    (dev->wakeup.gpe_number ==
-			     found_dev->wakeup.gpe_number)
-			    && (dev->wakeup.gpe_device ==
-				found_dev->wakeup.gpe_device)) {
-				printk(KERN_WARNING
-				       "ACPI: '%s' and '%s' have the same GPE, "
-				       "can't disable/enable one separately\n",
-				       dev->pnp.bus_id, found_dev->pnp.bus_id);
-				dev->wakeup.state.enabled =
-				    found_dev->wakeup.state.enabled;
+			if (device_can_wakeup(&dev->dev)) {
+				bool enable = !device_may_wakeup(&dev->dev);
+				device_set_wakeup_enable(&dev->dev, enable);
+			} else {
 				physical_device_enable_wakeup(dev);
 			}
+			break;
 		}
 	}
 	mutex_unlock(&acpi_device_lock);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index bec561c..3c1a2fe 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -23,7 +23,7 @@
 {
 	printk(KERN_NOTICE PREFIX "%s detected - "
 		"disabling mwait for CPU C-states\n", id->ident);
-	idle_nomwait = 1;
+	boot_option_idle_override = IDLE_NOMWAIT;
 	return 0;
 }
 
@@ -283,7 +283,7 @@
 {
 	acpi_status status = AE_OK;
 
-	if (idle_nomwait) {
+	if (boot_option_idle_override == IDLE_NOMWAIT) {
 		/*
 		 * If mwait is disabled for CPU C-states, the C2C3_FFH access
 		 * mode will be disabled in the parameter of _PDC object.
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 85e4804..360a74e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -40,10 +40,6 @@
 #include <linux/pm.h>
 #include <linux/cpufreq.h>
 #include <linux/cpu.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
 #include <linux/dmi.h>
 #include <linux/moduleparam.h>
 #include <linux/cpuidle.h>
@@ -246,53 +242,6 @@
 	return result;
 }
 
-#ifdef CONFIG_ACPI_PROCFS
-static struct proc_dir_entry *acpi_processor_dir = NULL;
-
-static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
-{
-	struct proc_dir_entry *entry = NULL;
-
-
-	if (!acpi_device_dir(device)) {
-		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-						     acpi_processor_dir);
-		if (!acpi_device_dir(device))
-			return -ENODEV;
-	}
-
-	/* 'throttling' [R/W] */
-	entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
-				 S_IFREG | S_IRUGO | S_IWUSR,
-				 acpi_device_dir(device),
-				 &acpi_processor_throttling_fops,
-				 acpi_driver_data(device));
-	if (!entry)
-		return -EIO;
-	return 0;
-}
-static int acpi_processor_remove_fs(struct acpi_device *device)
-{
-
-	if (acpi_device_dir(device)) {
-		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
-				  acpi_device_dir(device));
-		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
-		acpi_device_dir(device) = NULL;
-	}
-
-	return 0;
-}
-#else
-static inline int acpi_processor_add_fs(struct acpi_device *device)
-{
-	return 0;
-}
-static inline int acpi_processor_remove_fs(struct acpi_device *device)
-{
-	return 0;
-}
-#endif
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -478,8 +427,13 @@
 	if (action == CPU_ONLINE && pr) {
 		acpi_processor_ppc_has_changed(pr, 0);
 		acpi_processor_cst_has_changed(pr);
+		acpi_processor_reevaluate_tstate(pr, action);
 		acpi_processor_tstate_has_changed(pr);
 	}
+	if (action == CPU_DEAD && pr) {
+		/* invalidate the flag.throttling after one CPU is offline */
+		acpi_processor_reevaluate_tstate(pr, action);
+	}
 	return NOTIFY_OK;
 }
 
@@ -537,14 +491,10 @@
 
 	per_cpu(processors, pr->id) = pr;
 
-	result = acpi_processor_add_fs(device);
-	if (result)
-		goto err_free_cpumask;
-
 	sysdev = get_cpu_sysdev(pr->id);
 	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
 		result = -EFAULT;
-		goto err_remove_fs;
+		goto err_free_cpumask;
 	}
 
 #ifdef CONFIG_CPU_FREQ
@@ -590,8 +540,6 @@
 	thermal_cooling_device_unregister(pr->cdev);
 err_power_exit:
 	acpi_processor_power_exit(pr, device);
-err_remove_fs:
-	acpi_processor_remove_fs(device);
 err_free_cpumask:
 	free_cpumask_var(pr->throttling.shared_cpu_map);
 
@@ -620,8 +568,6 @@
 
 	sysfs_remove_link(&device->dev.kobj, "sysdev");
 
-	acpi_processor_remove_fs(device);
-
 	if (pr->cdev) {
 		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
 		sysfs_remove_link(&pr->cdev->device.kobj, "device");
@@ -854,12 +800,6 @@
 
 	memset(&errata, 0, sizeof(errata));
 
-#ifdef CONFIG_ACPI_PROCFS
-	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-	if (!acpi_processor_dir)
-		return -ENOMEM;
-#endif
-
 	if (!cpuidle_register_driver(&acpi_idle_driver)) {
 		printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
 			acpi_idle_driver.name);
@@ -885,10 +825,6 @@
 out_cpuidle:
 	cpuidle_unregister_driver(&acpi_idle_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
 	return result;
 }
 
@@ -907,10 +843,6 @@
 
 	cpuidle_unregister_driver(&acpi_idle_driver);
 
-#ifdef CONFIG_ACPI_PROCFS
-	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
-#endif
-
 	return;
 }
 
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index dcb38f8..d615b7d 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -79,6 +79,13 @@
 static unsigned int latency_factor __read_mostly = 2;
 module_param(latency_factor, uint, 0644);
 
+static int disabled_by_idle_boot_param(void)
+{
+	return boot_option_idle_override == IDLE_POLL ||
+		boot_option_idle_override == IDLE_FORCE_MWAIT ||
+		boot_option_idle_override == IDLE_HALT;
+}
+
 /*
  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  * For now disable this. Probably a bug somewhere else.
@@ -455,7 +462,7 @@
 				continue;
 			}
 			if (cx.type == ACPI_STATE_C1 &&
-					(idle_halt || idle_nomwait)) {
+			    (boot_option_idle_override == IDLE_NOMWAIT)) {
 				/*
 				 * In most cases the C1 space_id obtained from
 				 * _CST object is FIXED_HARDWARE access mode.
@@ -746,7 +753,7 @@
 	struct acpi_processor *pr;
 	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 
-	pr = __get_cpu_var(processors);
+	pr = __this_cpu_read(processors);
 
 	if (unlikely(!pr))
 		return 0;
@@ -787,7 +794,7 @@
 	s64 idle_time_ns;
 	s64 idle_time;
 
-	pr = __get_cpu_var(processors);
+	pr = __this_cpu_read(processors);
 
 	if (unlikely(!pr))
 		return 0;
@@ -864,7 +871,7 @@
 	s64 idle_time;
 
 
-	pr = __get_cpu_var(processors);
+	pr = __this_cpu_read(processors);
 
 	if (unlikely(!pr))
 		return 0;
@@ -1016,7 +1023,6 @@
 		state->flags = 0;
 		switch (cx->type) {
 			case ACPI_STATE_C1:
-			state->flags |= CPUIDLE_FLAG_SHALLOW;
 			if (cx->entry_method == ACPI_CSTATE_FFH)
 				state->flags |= CPUIDLE_FLAG_TIME_VALID;
 
@@ -1025,16 +1031,13 @@
 			break;
 
 			case ACPI_STATE_C2:
-			state->flags |= CPUIDLE_FLAG_BALANCED;
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
 			state->enter = acpi_idle_enter_simple;
 			dev->safe_state = state;
 			break;
 
 			case ACPI_STATE_C3:
-			state->flags |= CPUIDLE_FLAG_DEEP;
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
-			state->flags |= CPUIDLE_FLAG_CHECK_BM;
 			state->enter = pr->flags.bm_check ?
 					acpi_idle_enter_bm :
 					acpi_idle_enter_simple;
@@ -1058,7 +1061,7 @@
 {
 	int ret = 0;
 
-	if (boot_option_idle_override)
+	if (disabled_by_idle_boot_param())
 		return 0;
 
 	if (!pr)
@@ -1089,19 +1092,10 @@
 	acpi_status status = 0;
 	static int first_run;
 
-	if (boot_option_idle_override)
+	if (disabled_by_idle_boot_param())
 		return 0;
 
 	if (!first_run) {
-		if (idle_halt) {
-			/*
-			 * When the boot option of "idle=halt" is added, halt
-			 * is used for CPU IDLE.
-			 * In such case C2/C3 is meaningless. So the max_cstate
-			 * is set to one.
-			 */
-			max_cstate = 1;
-		}
 		dmi_check_system(processor_power_dmi_table);
 		max_cstate = acpi_processor_cstate_check(max_cstate);
 		if (max_cstate < ACPI_C_STATES_MAX)
@@ -1142,7 +1136,7 @@
 int acpi_processor_power_exit(struct acpi_processor *pr,
 			      struct acpi_device *device)
 {
-	if (boot_option_idle_override)
+	if (disabled_by_idle_boot_param())
 		return 0;
 
 	cpuidle_unregister_device(&pr->power.dev);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index ff36327..fa84e97 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -32,10 +32,6 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/cpufreq.h>
-#ifdef CONFIG_ACPI_PROCFS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
@@ -370,6 +366,58 @@
 }
 
 /*
+ * This function is used to reevaluate whether the T-state is valid
+ * after one CPU is onlined/offlined.
+ * It is noted that it won't reevaluate the following properties for
+ * the T-state.
+ *	1. Control method.
+ *	2. the number of supported T-state
+ *	3. TSD domain
+ */
+void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+					unsigned long action)
+{
+	int result = 0;
+
+	if (action == CPU_DEAD) {
+		/* When one CPU is offline, the T-state throttling
+		 * will be invalidated.
+		 */
+		pr->flags.throttling = 0;
+		return;
+	}
+	/* the following is to recheck whether the T-state is valid for
+	 * the online CPU
+	 */
+	if (!pr->throttling.state_count) {
+		/* If the number of T-state is invalid, it is
+		 * invalidated.
+		 */
+		pr->flags.throttling = 0;
+		return;
+	}
+	pr->flags.throttling = 1;
+
+	/* Disable throttling (if enabled).  We'll let subsequent
+	 * policy (e.g.thermal) decide to lower performance if it
+	 * so chooses, but for now we'll crank up the speed.
+	 */
+
+	result = acpi_processor_get_throttling(pr);
+	if (result)
+		goto end;
+
+	if (pr->throttling.state) {
+		result = acpi_processor_set_throttling(pr, 0, false);
+		if (result)
+			goto end;
+	}
+
+end:
+	if (result)
+		pr->flags.throttling = 0;
+}
+/*
  * _PTC - Processor Throttling Control (and status) register location
  */
 static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
@@ -876,7 +924,11 @@
 	 */
 	cpumask_copy(saved_mask, &current->cpus_allowed);
 	/* FIXME: use work_on_cpu() */
-	set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+	if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+		/* Can't migrate to the target pr->id CPU. Exit */
+		free_cpumask_var(saved_mask);
+		return -ENODEV;
+	}
 	ret = pr->throttling.acpi_processor_get_throttling(pr);
 	/* restore the previous state */
 	set_cpus_allowed_ptr(current, saved_mask);
@@ -1051,6 +1103,14 @@
 		return -ENOMEM;
 	}
 
+	if (cpu_is_offline(pr->id)) {
+		/*
+		 * the cpu pointed by pr->id is offline. Unnecessary to change
+		 * the throttling state any more.
+		 */
+		return -ENODEV;
+	}
+
 	cpumask_copy(saved_mask, &current->cpus_allowed);
 	t_state.target_state = state;
 	p_throttling = &(pr->throttling);
@@ -1074,7 +1134,11 @@
 	 */
 	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
 		/* FIXME: use work_on_cpu() */
-		set_cpus_allowed_ptr(current, cpumask_of(pr->id));
+		if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+			/* Can't migrate to the pr->id CPU. Exit */
+			ret = -ENODEV;
+			goto exit;
+		}
 		ret = p_throttling->acpi_processor_set_throttling(pr,
 						t_state.target_state, force);
 	} else {
@@ -1106,7 +1170,8 @@
 			}
 			t_state.cpu = i;
 			/* FIXME: use work_on_cpu() */
-			set_cpus_allowed_ptr(current, cpumask_of(i));
+			if (set_cpus_allowed_ptr(current, cpumask_of(i)))
+				continue;
 			ret = match_pr->throttling.
 				acpi_processor_set_throttling(
 				match_pr, t_state.target_state, force);
@@ -1126,6 +1191,7 @@
 	/* restore the previous state */
 	/* FIXME: use work_on_cpu() */
 	set_cpus_allowed_ptr(current, saved_mask);
+exit:
 	free_cpumask_var(online_throttling_cpus);
 	free_cpumask_var(saved_mask);
 	return ret;
@@ -1216,113 +1282,3 @@
 	return result;
 }
 
-#ifdef CONFIG_ACPI_PROCFS
-/* proc interface */
-static int acpi_processor_throttling_seq_show(struct seq_file *seq,
-					      void *offset)
-{
-	struct acpi_processor *pr = seq->private;
-	int i = 0;
-	int result = 0;
-
-	if (!pr)
-		goto end;
-
-	if (!(pr->throttling.state_count > 0)) {
-		seq_puts(seq, "<not supported>\n");
-		goto end;
-	}
-
-	result = acpi_processor_get_throttling(pr);
-
-	if (result) {
-		seq_puts(seq,
-			 "Could not determine current throttling state.\n");
-		goto end;
-	}
-
-	seq_printf(seq, "state count:             %d\n"
-		   "active state:            T%d\n"
-		   "state available: T%d to T%d\n",
-		   pr->throttling.state_count, pr->throttling.state,
-		   pr->throttling_platform_limit,
-		   pr->throttling.state_count - 1);
-
-	seq_puts(seq, "states:\n");
-	if (pr->throttling.acpi_processor_get_throttling ==
-			acpi_processor_get_throttling_fadt) {
-		for (i = 0; i < pr->throttling.state_count; i++)
-			seq_printf(seq, "   %cT%d:                  %02d%%\n",
-				   (i == pr->throttling.state ? '*' : ' '), i,
-				   (pr->throttling.states[i].performance ? pr->
-				    throttling.states[i].performance / 10 : 0));
-	} else {
-		for (i = 0; i < pr->throttling.state_count; i++)
-			seq_printf(seq, "   %cT%d:                  %02d%%\n",
-				   (i == pr->throttling.state ? '*' : ' '), i,
-				   (int)pr->throttling.states_tss[i].
-				   freqpercentage);
-	}
-
-      end:
-	return 0;
-}
-
-static int acpi_processor_throttling_open_fs(struct inode *inode,
-					     struct file *file)
-{
-	return single_open(file, acpi_processor_throttling_seq_show,
-			   PDE(inode)->data);
-}
-
-static ssize_t acpi_processor_write_throttling(struct file *file,
-					       const char __user * buffer,
-					       size_t count, loff_t * data)
-{
-	int result = 0;
-	struct seq_file *m = file->private_data;
-	struct acpi_processor *pr = m->private;
-	char state_string[5] = "";
-	char *charp = NULL;
-	size_t state_val = 0;
-	char tmpbuf[5] = "";
-
-	if (!pr || (count > sizeof(state_string) - 1))
-		return -EINVAL;
-
-	if (copy_from_user(state_string, buffer, count))
-		return -EFAULT;
-
-	state_string[count] = '\0';
-	if ((count > 0) && (state_string[count-1] == '\n'))
-		state_string[count-1] = '\0';
-
-	charp = state_string;
-	if ((state_string[0] == 't') || (state_string[0] == 'T'))
-		charp++;
-
-	state_val = simple_strtoul(charp, NULL, 0);
-	if (state_val >= pr->throttling.state_count)
-		return -EINVAL;
-
-	snprintf(tmpbuf, 5, "%zu", state_val);
-
-	if (strcmp(tmpbuf, charp) != 0)
-		return -EINVAL;
-
-	result = acpi_processor_set_throttling(pr, state_val, false);
-	if (result)
-		return result;
-
-	return count;
-}
-
-const struct file_operations acpi_processor_throttling_fops = {
-	.owner = THIS_MODULE,
-	.open = acpi_processor_throttling_open_fs,
-	.read = seq_read,
-	.write = acpi_processor_write_throttling,
-	.llseek = seq_lseek,
-	.release = single_release,
-};
-#endif
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index e5dbedb..51ae379 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -484,6 +484,8 @@
 		const struct file_operations *state_fops,
 		const struct file_operations *alarm_fops, void *data)
 {
+	printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
+			" please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
 	if (!*dir) {
 		*dir = proc_mkdir(dir_name, parent_dir);
 		if (!*dir) {
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 29ef505..b99e624 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -778,7 +778,7 @@
 		wakeup->resources.handles[i] = element->reference.handle;
 	}
 
-	acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number);
+	acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
 
  out:
 	kfree(buffer.pointer);
@@ -803,7 +803,7 @@
 	/* Power button, Lid switch always enable wakeup */
 	if (!acpi_match_device_ids(device, button_device_ids)) {
 		device->wakeup.flags.run_wake = 1;
-		device->wakeup.flags.always_enabled = 1;
+		device_set_wakeup_capable(&device->dev, true);
 		return;
 	}
 
@@ -815,16 +815,22 @@
 				!!(event_status & ACPI_EVENT_FLAG_HANDLE);
 }
 
-static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
+static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
 {
+	acpi_handle temp;
 	acpi_status status = 0;
 	int psw_error;
 
+	/* Presence of _PRW indicates wake capable */
+	status = acpi_get_handle(device->handle, "_PRW", &temp);
+	if (ACPI_FAILURE(status))
+		return;
+
 	status = acpi_bus_extract_wakeup_device_power_package(device->handle,
 							      &device->wakeup);
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
-		goto end;
+		return;
 	}
 
 	device->wakeup.flags.valid = 1;
@@ -840,13 +846,10 @@
 	if (psw_error)
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 				"error in _DSW or _PSW evaluation\n"));
-
-end:
-	if (ACPI_FAILURE(status))
-		device->flags.wake_capable = 0;
-	return 0;
 }
 
+static void acpi_bus_add_power_resource(acpi_handle handle);
+
 static int acpi_bus_get_power_flags(struct acpi_device *device)
 {
 	acpi_status status = 0;
@@ -875,8 +878,12 @@
 		acpi_evaluate_reference(device->handle, object_name, NULL,
 					&ps->resources);
 		if (ps->resources.count) {
+			int j;
+
 			device->power.flags.power_resources = 1;
 			ps->flags.valid = 1;
+			for (j = 0; j < ps->resources.count; j++)
+				acpi_bus_add_power_resource(ps->resources.handles[j]);
 		}
 
 		/* Evaluate "_PSx" to see if we can do explicit sets */
@@ -901,10 +908,7 @@
 	device->power.states[ACPI_STATE_D3].flags.valid = 1;
 	device->power.states[ACPI_STATE_D3].power = 0;
 
-	/* TBD: System wake support and resource requirements. */
-
-	device->power.state = ACPI_STATE_UNKNOWN;
-	acpi_bus_get_power(device->handle, &(device->power.state));
+	acpi_bus_init_power(device);
 
 	return 0;
 }
@@ -947,11 +951,6 @@
 	if (ACPI_SUCCESS(status))
 		device->flags.power_manageable = 1;
 
-	/* Presence of _PRW indicates wake capable */
-	status = acpi_get_handle(device->handle, "_PRW", &temp);
-	if (ACPI_SUCCESS(status))
-		device->flags.wake_capable = 1;
-
 	/* TBD: Performance management */
 
 	return 0;
@@ -1278,11 +1277,7 @@
 	 * Wakeup device management
 	 *-----------------------
 	 */
-	if (device->flags.wake_capable) {
-		result = acpi_bus_get_wakeup_device_flags(device);
-		if (result)
-			goto end;
-	}
+	acpi_bus_get_wakeup_device_flags(device);
 
 	/*
 	 * Performance Management
@@ -1326,6 +1321,20 @@
 #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
 			  ACPI_STA_DEVICE_UI      | ACPI_STA_DEVICE_FUNCTIONING)
 
+static void acpi_bus_add_power_resource(acpi_handle handle)
+{
+	struct acpi_bus_ops ops = {
+		.acpi_op_add = 1,
+		.acpi_op_start = 1,
+	};
+	struct acpi_device *device = NULL;
+
+	acpi_bus_get_device(handle, &device);
+	if (!device)
+		acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER,
+					ACPI_STA_DEFAULT, &ops);
+}
+
 static int acpi_bus_type_and_status(acpi_handle handle, int *type,
 				    unsigned long long *sta)
 {
@@ -1371,7 +1380,6 @@
 	struct acpi_bus_ops *ops = context;
 	int type;
 	unsigned long long sta;
-	struct acpi_device_wakeup wakeup;
 	struct acpi_device *device;
 	acpi_status status;
 	int result;
@@ -1382,7 +1390,13 @@
 
 	if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
 	    !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
-		acpi_bus_extract_wakeup_device_power_package(handle, &wakeup);
+		struct acpi_device_wakeup wakeup;
+		acpi_handle temp;
+
+		status = acpi_get_handle(handle, "_PRW", &temp);
+		if (ACPI_SUCCESS(status))
+			acpi_bus_extract_wakeup_device_power_package(handle,
+								     &wakeup);
 		return AE_CTRL_DEPTH;
 	}
 
@@ -1467,7 +1481,7 @@
 
 	result = acpi_bus_scan(device->handle, &ops, NULL);
 
-	acpi_update_gpes();
+	acpi_update_all_gpes();
 
 	return result;
 }
@@ -1573,6 +1587,8 @@
 		printk(KERN_ERR PREFIX "Could not register bus type\n");
 	}
 
+	acpi_power_init();
+
 	/*
 	 * Enumerate devices in the ACPI namespace.
 	 */
@@ -1584,7 +1600,7 @@
 	if (result)
 		acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
 	else
-		acpi_update_gpes();
+		acpi_update_all_gpes();
 
 	return result;
 }
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index febb153b..fdd3aee 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -124,8 +124,7 @@
 static int acpi_pm_pre_suspend(void)
 {
 	acpi_pm_freeze();
-	suspend_nvs_save();
-	return 0;
+	return suspend_nvs_save();
 }
 
 /**
@@ -151,7 +150,7 @@
 {
 	int error = __acpi_pm_prepare();
 	if (!error)
-		acpi_pm_pre_suspend();
+		error = acpi_pm_pre_suspend();
 
 	return error;
 }
@@ -319,7 +318,7 @@
 	}
 }
 
-static struct platform_suspend_ops acpi_suspend_ops = {
+static const struct platform_suspend_ops acpi_suspend_ops = {
 	.valid = acpi_suspend_state_valid,
 	.begin = acpi_suspend_begin,
 	.prepare_late = acpi_pm_prepare,
@@ -347,7 +346,7 @@
  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
  * been requested.
  */
-static struct platform_suspend_ops acpi_suspend_ops_old = {
+static const struct platform_suspend_ops acpi_suspend_ops_old = {
 	.valid = acpi_suspend_state_valid,
 	.begin = acpi_suspend_begin_old,
 	.prepare_late = acpi_pm_pre_suspend,
@@ -435,6 +434,14 @@
 		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
 		},
 	},
+	{
+	.callback = init_nvs_nosave,
+	.ident = "Averatec AV1020-ED2",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+		},
+	},
 	{},
 };
 #endif /* CONFIG_SUSPEND */
@@ -506,7 +513,7 @@
 	acpi_enable_all_runtime_gpes();
 }
 
-static struct platform_hibernation_ops acpi_hibernation_ops = {
+static const struct platform_hibernation_ops acpi_hibernation_ops = {
 	.begin = acpi_hibernation_begin,
 	.end = acpi_pm_end,
 	.pre_snapshot = acpi_pm_prepare,
@@ -549,7 +556,7 @@
  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
  * been requested.
  */
-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
 	.begin = acpi_hibernation_begin_old,
 	.end = acpi_pm_end,
 	.pre_snapshot = acpi_pm_pre_suspend,
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index f8588f8..61891e7 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -438,7 +438,7 @@
 	return;
 }
 
-void acpi_os_gpe_count(u32 gpe_number)
+static void gpe_count(u32 gpe_number)
 {
 	acpi_gpe_count++;
 
@@ -454,7 +454,7 @@
 	return;
 }
 
-void acpi_os_fixed_event_count(u32 event_number)
+static void fixed_event_count(u32 event_number)
 {
 	if (!all_counters)
 		return;
@@ -468,6 +468,16 @@
 	return;
 }
 
+static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
+	u32 event_number, void *context)
+{
+	if (event_type == ACPI_EVENT_TYPE_GPE)
+		gpe_count(event_number);
+
+	if (event_type == ACPI_EVENT_TYPE_FIXED)
+		fixed_event_count(event_number);
+}
+
 static int get_status(u32 index, acpi_event_status *status,
 		      acpi_handle *handle)
 {
@@ -601,6 +611,7 @@
 
 void acpi_irq_stats_init(void)
 {
+	acpi_status status;
 	int i;
 
 	if (all_counters)
@@ -619,6 +630,10 @@
 	if (all_counters == NULL)
 		goto fail;
 
+	status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
+	if (ACPI_FAILURE(status))
+		goto fail;
+
 	counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
 				GFP_KERNEL);
 	if (counter_attrs == NULL)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5a27b0a..2607e17 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1059,8 +1059,9 @@
 			break;
 		tz->trips.active[i].flags.enabled = 1;
 		for (j = 0; j < tz->trips.active[i].devices.count; j++) {
-			result = acpi_bus_get_power(tz->trips.active[i].devices.
-			    handles[j], &power_state);
+			result = acpi_bus_update_power(
+					tz->trips.active[i].devices.handles[j],
+					&power_state);
 			if (result || (power_state != ACPI_STATE_D0)) {
 				tz->trips.active[i].flags.enabled = 0;
 				break;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5cd0228..90f8f76 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -33,7 +33,6 @@
 #include <linux/input.h>
 #include <linux/backlight.h>
 #include <linux/thermal.h>
-#include <linux/video_output.h>
 #include <linux/sort.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
@@ -81,6 +80,13 @@
 static int allow_duplicates;
 module_param(allow_duplicates, bool, 0644);
 
+/*
+ * Some BIOSes claim they use minimum backlight at boot,
+ * and this may bring dimming screen after boot
+ */
+static int use_bios_initial_backlight = 1;
+module_param(use_bios_initial_backlight, bool, 0644);
+
 static int register_count = 0;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -172,9 +178,6 @@
 	u8 _BQC:1;		/* Get current brightness level */
 	u8 _BCQ:1;		/* Some buggy BIOS uses _BCQ instead of _BQC */
 	u8 _DDC:1;		/*Return the EDID for this device */
-	u8 _DCS:1;		/*Return status of output device */
-	u8 _DGS:1;		/*Query graphics state */
-	u8 _DSS:1;		/*Device state set */
 };
 
 struct acpi_video_brightness_flags {
@@ -202,7 +205,6 @@
 	struct acpi_video_device_brightness *brightness;
 	struct backlight_device *backlight;
 	struct thermal_cooling_device *cooling_dev;
-	struct output_device *output_dev;
 };
 
 static const char device_decode[][30] = {
@@ -226,10 +228,6 @@
 				     u32 level_current, u32 event);
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
 					 int event);
-static int acpi_video_device_get_state(struct acpi_video_device *device,
-			    unsigned long long *state);
-static int acpi_video_output_get(struct output_device *od);
-static int acpi_video_device_set_state(struct acpi_video_device *device, int state);
 
 /*backlight device sysfs support*/
 static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -260,35 +258,11 @@
 				vd->brightness->levels[request_level]);
 }
 
-static struct backlight_ops acpi_backlight_ops = {
+static const struct backlight_ops acpi_backlight_ops = {
 	.get_brightness = acpi_video_get_brightness,
 	.update_status  = acpi_video_set_brightness,
 };
 
-/*video output device sysfs support*/
-static int acpi_video_output_get(struct output_device *od)
-{
-	unsigned long long state;
-	struct acpi_video_device *vd =
-		(struct acpi_video_device *)dev_get_drvdata(&od->dev);
-	acpi_video_device_get_state(vd, &state);
-	return (int)state;
-}
-
-static int acpi_video_output_set(struct output_device *od)
-{
-	unsigned long state = od->request_state;
-	struct acpi_video_device *vd=
-		(struct acpi_video_device *)dev_get_drvdata(&od->dev);
-	return acpi_video_device_set_state(vd, state);
-}
-
-static struct output_properties acpi_output_properties = {
-	.set_state = acpi_video_output_set,
-	.get_status = acpi_video_output_get,
-};
-
-
 /* thermal cooling device callbacks */
 static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned
 			       long *state)
@@ -344,34 +318,6 @@
                                Video Management
    -------------------------------------------------------------------------- */
 
-/* device */
-
-static int
-acpi_video_device_get_state(struct acpi_video_device *device,
-			    unsigned long long *state)
-{
-	int status;
-
-	status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state);
-
-	return status;
-}
-
-static int
-acpi_video_device_set_state(struct acpi_video_device *device, int state)
-{
-	int status;
-	union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-	struct acpi_object_list args = { 1, &arg0 };
-	unsigned long long ret;
-
-
-	arg0.integer.value = state;
-	status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret);
-
-	return status;
-}
-
 static int
 acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
 				   union acpi_object **levels)
@@ -766,9 +712,11 @@
 		 * when invoked for the first time, i.e. level_old is invalid.
 		 * set the backlight to max_level in this case
 		 */
-		for (i = 2; i < br->count; i++)
-			if (level_old == br->levels[i])
-				level = level_old;
+		if (use_bios_initial_backlight) {
+			for (i = 2; i < br->count; i++)
+				if (level_old == br->levels[i])
+					level = level_old;
+		}
 		goto set_level;
 	}
 
@@ -831,15 +779,6 @@
 	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
 		device->cap._DDC = 1;
 	}
-	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) {
-		device->cap._DCS = 1;
-	}
-	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) {
-		device->cap._DGS = 1;
-	}
-	if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) {
-		device->cap._DSS = 1;
-	}
 
 	if (acpi_video_backlight_support()) {
 		struct backlight_properties props;
@@ -904,21 +843,6 @@
 			printk(KERN_ERR PREFIX "Create sysfs link\n");
 
 	}
-
-	if (acpi_video_display_switch_support()) {
-
-		if (device->cap._DCS && device->cap._DSS) {
-			static int count;
-			char *name;
-			name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
-			if (!name)
-				return;
-			count++;
-			device->output_dev = video_output_register(name,
-					NULL, device, &acpi_output_properties);
-			kfree(name);
-		}
-	}
 }
 
 /*
@@ -1360,6 +1284,9 @@
 		if (!video_device)
 			continue;
 
+		if (!video_device->cap._DDC)
+			continue;
+
 		if (type) {
 			switch (type) {
 			case ACPI_VIDEO_DISPLAY_CRT:
@@ -1452,7 +1379,6 @@
 		thermal_cooling_device_unregister(device->cooling_dev);
 		device->cooling_dev = NULL;
 	}
-	video_output_unregister(device->output_dev);
 
 	return 0;
 }
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b836761..42d3d72 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -17,15 +17,14 @@
  * capabilities the graphics cards plugged in support. The check for general
  * video capabilities will be triggered by the first caller of
  * acpi_video_get_capabilities(NULL); which will happen when the first
- * backlight (or display output) switching supporting driver calls:
+ * backlight switching supporting driver calls:
  * acpi_video_backlight_support();
  *
  * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B)
  * are available, video.ko should be used to handle the device.
  *
  * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi,
- * sony_acpi,... can take care about backlight brightness and display output
- * switching.
+ * sony_acpi,... can take care about backlight brightness.
  *
  * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
  * this file will not be compiled, acpi_video_get_capabilities() and
@@ -83,11 +82,6 @@
 	if (!device)
 		return 0;
 
-	/* Is this device able to support video switching ? */
-	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
-	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
-		video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
-
 	/* Is this device able to retrieve a video ROM ? */
 	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
 		video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
@@ -161,8 +155,6 @@
 		 *
 		 *   if (dmi_name_in_vendors("XY")) {
 		 *	acpi_video_support |=
-		 *		ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR;
-		 *	acpi_video_support |=
 		 *		ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
 		 *}
 		 */
@@ -212,33 +204,8 @@
 EXPORT_SYMBOL(acpi_video_backlight_support);
 
 /*
- * Returns true if video.ko can do display output switching.
- * This does not work well/at all with binary graphics drivers
- * which disable system io ranges and do it on their own.
- */
-int acpi_video_display_switch_support(void)
-{
-	if (!acpi_video_caps_checked)
-		acpi_video_get_capabilities(NULL);
-
-	if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR)
-		return 0;
-	else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO)
-		return 1;
-
-	if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR)
-		return 0;
-	else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO)
-		return 1;
-
-	return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING;
-}
-EXPORT_SYMBOL(acpi_video_display_switch_support);
-
-/*
- * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video
- * To force that backlight or display output switching is processed by vendor
- * specific acpi drivers or video.ko driver.
+ * Use acpi_backlight=vendor/video to force that backlight switching
+ * is processed by vendor specific acpi drivers or video.ko driver.
  */
 static int __init acpi_backlight(char *str)
 {
@@ -255,19 +222,3 @@
 	return 1;
 }
 __setup("acpi_backlight=", acpi_backlight);
-
-static int __init acpi_display_output(char *str)
-{
-	if (str == NULL || *str == '\0')
-		return 1;
-	else {
-		if (!strcmp("vendor", str))
-			acpi_video_support |=
-				ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR;
-		if (!strcmp("video", str))
-			acpi_video_support |=
-				ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO;
-	}
-	return 1;
-}
-__setup("acpi_display_output=", acpi_display_output);
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index f62a50c..ed65014 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -37,15 +37,16 @@
 			container_of(node, struct acpi_device, wakeup_list);
 
 		if (!dev->wakeup.flags.valid
-		    || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
-		    || sleep_state > (u32) dev->wakeup.sleep_state)
+		    || sleep_state > (u32) dev->wakeup.sleep_state
+		    || !(device_may_wakeup(&dev->dev)
+		        || dev->wakeup.prepare_count))
 			continue;
 
-		if (dev->wakeup.state.enabled)
+		if (device_may_wakeup(&dev->dev))
 			acpi_enable_wakeup_device_power(dev, sleep_state);
 
 		/* The wake-up power should have been enabled already. */
-		acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+		acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
 				ACPI_GPE_ENABLE);
 	}
 }
@@ -63,14 +64,15 @@
 			container_of(node, struct acpi_device, wakeup_list);
 
 		if (!dev->wakeup.flags.valid
-		    || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count)
-		    || (sleep_state > (u32) dev->wakeup.sleep_state))
+		    || sleep_state > (u32) dev->wakeup.sleep_state
+		    || !(device_may_wakeup(&dev->dev)
+		        || dev->wakeup.prepare_count))
 			continue;
 
-		acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
+		acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number,
 				ACPI_GPE_DISABLE);
 
-		if (dev->wakeup.state.enabled)
+		if (device_may_wakeup(&dev->dev))
 			acpi_disable_wakeup_device_power(dev);
 	}
 }
@@ -84,8 +86,8 @@
 		struct acpi_device *dev = container_of(node,
 						       struct acpi_device,
 						       wakeup_list);
-		if (dev->wakeup.flags.always_enabled)
-			dev->wakeup.state.enabled = 1;
+		if (device_can_wakeup(&dev->dev))
+			device_set_wakeup_enable(&dev->dev, true);
 	}
 	mutex_unlock(&acpi_device_lock);
 	return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 36e2319..c6b298d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -2,6 +2,14 @@
 # SATA/PATA driver configuration
 #
 
+config HAVE_PATA_PLATFORM
+	bool
+	help
+	  This is an internal configuration node for any machine that
+	  uses pata-platform driver to enable the relevant driver in the
+	  configuration structure without having to submit endless patches
+	  to update the PATA_PLATFORM entry.
+
 menuconfig ATA
 	tristate "Serial ATA and Parallel ATA drivers"
 	depends on HAS_IOMEM
@@ -90,6 +98,14 @@
 	help
 	  This option enables support for Initio 162x Serial ATA.
 
+config SATA_ACARD_AHCI
+	tristate "ACard AHCI variant (ATP 8620)"
+	depends on PCI
+	help
+	  This option enables support for Acard.
+
+	  If unsure, say N.
+
 config SATA_SIL24
 	tristate "Silicon Image 3124/3132 SATA support"
 	depends on PCI
@@ -400,11 +416,11 @@
 	  If unsure, say N.
 
 config PATA_HPT3X2N
-	tristate "HPT 372N/302N PATA support"
+	tristate "HPT 371N/372N/302N PATA support"
 	depends on PCI
 	help
 	  This option enables support for the N variant HPT PATA
-	  controllers via the new ATA layer
+	  controllers via the new ATA layer.
 
 	  If unsure, say N.
 
@@ -765,14 +781,6 @@
 
 	  If unsure, say N.
 
-config HAVE_PATA_PLATFORM
-	bool
-	help
-	  This is an internal configuration node for any machine that
-	  uses pata-platform driver to enable the relevant driver in the
-	  configuration structure without having to submit endless patches
-	  to update the PATA_PLATFORM entry.
-
 config PATA_PLATFORM
 	tristate "Generic platform device PATA support"
 	depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 2b67c90..27291aa 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -3,6 +3,7 @@
 
 # non-SFF interface
 obj-$(CONFIG_SATA_AHCI)		+= ahci.o libahci.o
+obj-$(CONFIG_SATA_ACARD_AHCI)	+= acard-ahci.o libahci.o
 obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
 obj-$(CONFIG_SATA_FSL)		+= sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)	+= sata_inic162x.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
new file mode 100644
index 0000000..339c210
--- /dev/null
+++ b/drivers/ata/acard-ahci.c
@@ -0,0 +1,528 @@
+
+/*
+ *  acard-ahci.c - ACard AHCI SATA support
+ *
+ *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2010 Red Hat, Inc.
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * AHCI hardware documentation:
+ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
+ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/libata.h>
+#include "ahci.h"
+
+#define DRV_NAME	"acard-ahci"
+#define DRV_VERSION	"1.0"
+
+/*
+  Received FIS structure limited to 80h.
+*/
+
+#define ACARD_AHCI_RX_FIS_SZ 128
+
+enum {
+	AHCI_PCI_BAR		= 5,
+};
+
+enum board_ids {
+	board_acard_ahci,
+};
+
+struct acard_sg {
+	__le32			addr;
+	__le32			addr_hi;
+	__le32			reserved;
+	__le32			size;	 /* bit 31 (EOT) max==0x10000 (64k) */
+};
+
+static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static int acard_ahci_port_start(struct ata_port *ap);
+static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+#ifdef CONFIG_PM
+static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
+#endif
+
+static struct scsi_host_template acard_ahci_sht = {
+	AHCI_SHT("acard-ahci"),
+};
+
+static struct ata_port_operations acard_ops = {
+	.inherits		= &ahci_ops,
+	.qc_prep		= acard_ahci_qc_prep,
+	.qc_fill_rtf		= acard_ahci_qc_fill_rtf,
+	.port_start             = acard_ahci_port_start,
+};
+
+#define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
+
+static const struct ata_port_info acard_ahci_port_info[] = {
+	[board_acard_ahci] =
+	{
+		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ),
+		.flags		= AHCI_FLAG_COMMON,
+		.pio_mask	= ATA_PIO4,
+		.udma_mask	= ATA_UDMA6,
+		.port_ops	= &acard_ops,
+	},
+};
+
+static const struct pci_device_id acard_ahci_pci_tbl[] = {
+	/* ACard */
+	{ PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
+
+	{ }    /* terminate list */
+};
+
+static struct pci_driver acard_ahci_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= acard_ahci_pci_tbl,
+	.probe			= acard_ahci_init_one,
+	.remove			= ata_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend		= acard_ahci_pci_device_suspend,
+	.resume			= acard_ahci_pci_device_resume,
+#endif
+};
+
+#ifdef CONFIG_PM
+static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 ctl;
+
+	if (mesg.event & PM_EVENT_SUSPEND &&
+	    hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+		dev_printk(KERN_ERR, &pdev->dev,
+			   "BIOS update required for suspend/resume\n");
+		return -EIO;
+	}
+
+	if (mesg.event & PM_EVENT_SLEEP) {
+		/* AHCI spec rev1.1 section 8.3.3:
+		 * Software must disable interrupts prior to requesting a
+		 * transition of the HBA to D3 state.
+		 */
+		ctl = readl(mmio + HOST_CTL);
+		ctl &= ~HOST_IRQ_EN;
+		writel(ctl, mmio + HOST_CTL);
+		readl(mmio + HOST_CTL); /* flush */
+	}
+
+	return ata_pci_device_suspend(pdev, mesg);
+}
+
+static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
+{
+	struct ata_host *host = dev_get_drvdata(&pdev->dev);
+	int rc;
+
+	rc = ata_pci_device_do_resume(pdev);
+	if (rc)
+		return rc;
+
+	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = ahci_reset_controller(host);
+		if (rc)
+			return rc;
+
+		ahci_init_controller(host);
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+{
+	int rc;
+
+	if (using_dac &&
+	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (rc) {
+			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+			if (rc) {
+				dev_printk(KERN_ERR, &pdev->dev,
+					   "64-bit DMA enable failed\n");
+				return rc;
+			}
+		}
+	} else {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit DMA enable failed\n");
+			return rc;
+		}
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "32-bit consistent DMA enable failed\n");
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static void acard_ahci_pci_print_info(struct ata_host *host)
+{
+	struct pci_dev *pdev = to_pci_dev(host->dev);
+	u16 cc;
+	const char *scc_s;
+
+	pci_read_config_word(pdev, 0x0a, &cc);
+	if (cc == PCI_CLASS_STORAGE_IDE)
+		scc_s = "IDE";
+	else if (cc == PCI_CLASS_STORAGE_SATA)
+		scc_s = "SATA";
+	else if (cc == PCI_CLASS_STORAGE_RAID)
+		scc_s = "RAID";
+	else
+		scc_s = "unknown";
+
+	ahci_print_info(host, scc_s);
+}
+
+static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+{
+	struct scatterlist *sg;
+	struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
+	unsigned int si, last_si = 0;
+
+	VPRINTK("ENTER\n");
+
+	/*
+	 * Next, the S/G list.
+	 */
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		dma_addr_t addr = sg_dma_address(sg);
+		u32 sg_len = sg_dma_len(sg);
+
+		/*
+		 * ACard note:
+		 * We must set an end-of-table (EOT) bit,
+		 * and the segment cannot exceed 64k (0x10000)
+		 */
+		acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
+		acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
+		acard_sg[si].size = cpu_to_le32(sg_len);
+		last_si = si;
+	}
+
+	acard_sg[last_si].size |= cpu_to_le32(1 << 31);	/* set EOT */
+
+	return si;
+}
+
+static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ahci_port_priv *pp = ap->private_data;
+	int is_atapi = ata_is_atapi(qc->tf.protocol);
+	void *cmd_tbl;
+	u32 opts;
+	const u32 cmd_fis_len = 5; /* five dwords */
+	unsigned int n_elem;
+
+	/*
+	 * Fill in command table information.  First, the header,
+	 * a SATA Register - Host to Device command FIS.
+	 */
+	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
+
+	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
+	if (is_atapi) {
+		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
+		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
+	}
+
+	n_elem = 0;
+	if (qc->flags & ATA_QCFLAG_DMAMAP)
+		n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
+
+	/*
+	 * Fill in command slot information.
+	 *
+	 * ACard note: prd table length not filled in
+	 */
+	opts = cmd_fis_len | (qc->dev->link->pmp << 12);
+	if (qc->tf.flags & ATA_TFLAG_WRITE)
+		opts |= AHCI_CMD_WRITE;
+	if (is_atapi)
+		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+
+	ahci_fill_cmd_slot(pp, qc->tag, opts);
+}
+
+static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+	struct ahci_port_priv *pp = qc->ap->private_data;
+	u8 *rx_fis = pp->rx_fis;
+
+	if (pp->fbs_enabled)
+		rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
+
+	/*
+	 * After a successful execution of an ATA PIO data-in command,
+	 * the device doesn't send D2H Reg FIS to update the TF and
+	 * the host should take TF and E_Status from the preceding PIO
+	 * Setup FIS.
+	 */
+	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
+	    !(qc->flags & ATA_QCFLAG_FAILED)) {
+		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
+		qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
+	} else
+		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+
+	return true;
+}
+
+static int acard_ahci_port_start(struct ata_port *ap)
+{
+	struct ahci_host_priv *hpriv = ap->host->private_data;
+	struct device *dev = ap->host->dev;
+	struct ahci_port_priv *pp;
+	void *mem;
+	dma_addr_t mem_dma;
+	size_t dma_sz, rx_fis_sz;
+
+	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+
+	/* check FBS capability */
+	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+		void __iomem *port_mmio = ahci_port_base(ap);
+		u32 cmd = readl(port_mmio + PORT_CMD);
+		if (cmd & PORT_CMD_FBSCP)
+			pp->fbs_supported = true;
+		else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+			dev_printk(KERN_INFO, dev,
+				   "port %d can do FBS, forcing FBSCP\n",
+				   ap->port_no);
+			pp->fbs_supported = true;
+		} else
+			dev_printk(KERN_WARNING, dev,
+				   "port %d is not capable of FBS\n",
+				   ap->port_no);
+	}
+
+	if (pp->fbs_supported) {
+		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
+	} else {
+		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
+	}
+
+	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+	memset(mem, 0, dma_sz);
+
+	/*
+	 * First item in chunk of DMA memory: 32-slot command table,
+	 * 32 bytes each in size
+	 */
+	pp->cmd_slot = mem;
+	pp->cmd_slot_dma = mem_dma;
+
+	mem += AHCI_CMD_SLOT_SZ;
+	mem_dma += AHCI_CMD_SLOT_SZ;
+
+	/*
+	 * Second item: Received-FIS area
+	 */
+	pp->rx_fis = mem;
+	pp->rx_fis_dma = mem_dma;
+
+	mem += rx_fis_sz;
+	mem_dma += rx_fis_sz;
+
+	/*
+	 * Third item: data area for storing a single command
+	 * and its scatter-gather table
+	 */
+	pp->cmd_tbl = mem;
+	pp->cmd_tbl_dma = mem_dma;
+
+	/*
+	 * Save off initial list of interrupts to be enabled.
+	 * This could be changed later
+	 */
+	pp->intr_mask = DEF_PORT_IRQ;
+
+	ap->private_data = pp;
+
+	/* engage engines, captain */
+	return ahci_port_resume(ap);
+}
+
+static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version;
+	unsigned int board_id = ent->driver_data;
+	struct ata_port_info pi = acard_ahci_port_info[board_id];
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct device *dev = &pdev->dev;
+	struct ahci_host_priv *hpriv;
+	struct ata_host *host;
+	int n_ports, i, rc;
+
+	VPRINTK("ENTER\n");
+
+	WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
+
+	if (!printed_version++)
+		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+
+	/* acquire resources */
+	rc = pcim_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	/* AHCI controllers often implement SFF compatible interface.
+	 * Grab all PCI BARs just in case.
+	 */
+	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+	if (rc == -EBUSY)
+		pcim_pin_device(pdev);
+	if (rc)
+		return rc;
+
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		return -ENOMEM;
+	hpriv->flags |= (unsigned long)pi.private_data;
+
+	if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
+		pci_enable_msi(pdev);
+
+	hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+
+	/* save initial config */
+	ahci_save_initial_config(&pdev->dev, hpriv, 0, 0);
+
+	/* prepare host */
+	if (hpriv->cap & HOST_CAP_NCQ)
+		pi.flags |= ATA_FLAG_NCQ;
+
+	if (hpriv->cap & HOST_CAP_PMP)
+		pi.flags |= ATA_FLAG_PMP;
+
+	ahci_set_em_messages(hpriv, &pi);
+
+	/* CAP.NP sometimes indicate the index of the last enabled
+	 * port, at other times, that of the last possible port, so
+	 * determining the maximum port number requires looking at
+	 * both CAP.NP and port_map.
+	 */
+	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+	if (!host)
+		return -ENOMEM;
+	host->private_data = hpriv;
+
+	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+		host->flags |= ATA_HOST_PARALLEL_SCAN;
+	else
+		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+				   0x100 + ap->port_no * 0x80, "port");
+
+		/* set initial link pm policy */
+		/*
+		ap->pm_policy = NOT_AVAILABLE;
+		*/
+		/* disabled/not-implemented port */
+		if (!(hpriv->port_map & (1 << i)))
+			ap->ops = &ata_dummy_port_ops;
+	}
+
+	/* initialize adapter */
+	rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+	if (rc)
+		return rc;
+
+	rc = ahci_reset_controller(host);
+	if (rc)
+		return rc;
+
+	ahci_init_controller(host);
+	acard_ahci_pci_print_info(host);
+
+	pci_set_master(pdev);
+	return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
+				 &acard_ahci_sht);
+}
+
+static int __init acard_ahci_init(void)
+{
+	return pci_register_driver(&acard_ahci_pci_driver);
+}
+
+static void __exit acard_ahci_exit(void)
+{
+	pci_unregister_driver(&acard_ahci_pci_driver);
+}
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
+MODULE_VERSION(DRV_VERSION);
+
+module_init(acard_ahci_init);
+module_exit(acard_ahci_exit);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 329cbbb..3e606c3 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -311,6 +311,8 @@
 
 extern struct ata_port_operations ahci_ops;
 
+void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+			u32 opts);
 void ahci_save_initial_config(struct device *dev,
 			      struct ahci_host_priv *hpriv,
 			      unsigned int force_port_map,
@@ -326,6 +328,7 @@
 void ahci_start_engine(struct ata_port *ap);
 int ahci_check_ready(struct ata_link *link);
 int ahci_kick_engine(struct ata_port *ap);
+int ahci_port_resume(struct ata_port *ap);
 void ahci_set_em_messages(struct ahci_host_priv *hpriv,
 			  struct ata_port_info *pi);
 int ahci_reset_em(struct ata_host *host);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index ebc08d6..26d4523 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -87,10 +87,7 @@
 static void ahci_postreset(struct ata_link *link, unsigned int *class);
 static void ahci_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
-static int ahci_port_resume(struct ata_port *ap);
 static void ahci_dev_config(struct ata_device *dev);
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
-			       u32 opts);
 #ifdef CONFIG_PM
 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
 #endif
@@ -1133,8 +1130,8 @@
 	return ata_dev_classify(&tf);
 }
 
-static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
-			       u32 opts)
+void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
+			u32 opts)
 {
 	dma_addr_t cmd_tbl_dma;
 
@@ -1145,6 +1142,7 @@
 	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
 	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
 }
+EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
 
 int ahci_kick_engine(struct ata_port *ap)
 {
@@ -1918,7 +1916,7 @@
 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
 }
 
-static int ahci_port_resume(struct ata_port *ap)
+int ahci_port_resume(struct ata_port *ap)
 {
 	ahci_power_up(ap);
 	ahci_start_port(ap);
@@ -1930,6 +1928,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ahci_port_resume);
 
 #ifdef CONFIG_PM
 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f23d6d4..a31fe96 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2240,7 +2240,7 @@
 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
 				ata_dev_printk(dev, KERN_WARNING,
 					       "supports DRM functions and may "
-					       "not be fully accessable.\n");
+					       "not be fully accessible.\n");
 			snprintf(revbuf, 7, "CFA");
 		} else {
 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
@@ -2248,7 +2248,7 @@
 			if (ata_id_has_tpm(id))
 				ata_dev_printk(dev, KERN_WARNING,
 					       "supports DRM functions and may "
-					       "not be fully accessable.\n");
+					       "not be fully accessible.\n");
 		}
 
 		dev->n_sectors = ata_id_n_sectors(id);
@@ -6128,7 +6128,7 @@
 	/* it better be dead now */
 	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
 
-	cancel_rearming_delayed_work(&ap->hotplug_task);
+	cancel_delayed_work_sync(&ap->hotplug_task);
 
  skip_eh:
 	if (ap->pmp_link) {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 66aa4be..5defc74 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -346,12 +346,11 @@
 };
 EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
 
-static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
-				   void (*done)(struct scsi_cmnd *))
+static void ata_scsi_invalid_field(struct scsi_cmnd *cmd)
 {
 	ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
 	/* "Invalid field in cbd" */
-	done(cmd);
+	cmd->scsi_done(cmd);
 }
 
 /**
@@ -719,7 +718,6 @@
  *	ata_scsi_qc_new - acquire new ata_queued_cmd reference
  *	@dev: ATA device to which the new command is attached
  *	@cmd: SCSI command that originated this ATA command
- *	@done: SCSI command completion function
  *
  *	Obtain a reference to an unused ata_queued_cmd structure,
  *	which is the basic libata structure representing a single
@@ -736,21 +734,20 @@
  *	Command allocated, or %NULL if none available.
  */
 static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
-					      struct scsi_cmnd *cmd,
-					      void (*done)(struct scsi_cmnd *))
+					      struct scsi_cmnd *cmd)
 {
 	struct ata_queued_cmd *qc;
 
 	qc = ata_qc_new_init(dev);
 	if (qc) {
 		qc->scsicmd = cmd;
-		qc->scsidone = done;
+		qc->scsidone = cmd->scsi_done;
 
 		qc->sg = scsi_sglist(cmd);
 		qc->n_elem = scsi_sg_count(cmd);
 	} else {
 		cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
-		done(cmd);
+		cmd->scsi_done(cmd);
 	}
 
 	return qc;
@@ -1735,7 +1732,6 @@
  *	ata_scsi_translate - Translate then issue SCSI command to ATA device
  *	@dev: ATA device to which the command is addressed
  *	@cmd: SCSI command to execute
- *	@done: SCSI command completion function
  *	@xlat_func: Actor which translates @cmd to an ATA taskfile
  *
  *	Our ->queuecommand() function has decided that the SCSI
@@ -1759,7 +1755,6 @@
  *	needs to be deferred.
  */
 static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
-			      void (*done)(struct scsi_cmnd *),
 			      ata_xlat_func_t xlat_func)
 {
 	struct ata_port *ap = dev->link->ap;
@@ -1768,7 +1763,7 @@
 
 	VPRINTK("ENTER\n");
 
-	qc = ata_scsi_qc_new(dev, cmd, done);
+	qc = ata_scsi_qc_new(dev, cmd);
 	if (!qc)
 		goto err_mem;
 
@@ -1804,14 +1799,14 @@
 
 early_finish:
 	ata_qc_free(qc);
-	qc->scsidone(cmd);
+	cmd->scsi_done(cmd);
 	DPRINTK("EXIT - early finish (good or error)\n");
 	return 0;
 
 err_did:
 	ata_qc_free(qc);
 	cmd->result = (DID_ERROR << 16);
-	qc->scsidone(cmd);
+	cmd->scsi_done(cmd);
 err_mem:
 	DPRINTK("EXIT - internal\n");
 	return 0;
@@ -3116,7 +3111,6 @@
 }
 
 static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
-				      void (*done)(struct scsi_cmnd *),
 				      struct ata_device *dev)
 {
 	u8 scsi_op = scmd->cmnd[0];
@@ -3150,9 +3144,9 @@
 	}
 
 	if (xlat_func)
-		rc = ata_scsi_translate(dev, scmd, done, xlat_func);
+		rc = ata_scsi_translate(dev, scmd, xlat_func);
 	else
-		ata_scsi_simulate(dev, scmd, done);
+		ata_scsi_simulate(dev, scmd);
 
 	return rc;
 
@@ -3160,7 +3154,7 @@
 	DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
 		scmd->cmd_len, scsi_op, dev->cdb_len);
 	scmd->result = DID_ERROR << 16;
-	done(scmd);
+	scmd->scsi_done(scmd);
 	return 0;
 }
 
@@ -3199,7 +3193,7 @@
 
 	dev = ata_scsi_find_dev(ap, scsidev);
 	if (likely(dev))
-		rc = __ata_scsi_queuecmd(cmd, cmd->scsi_done, dev);
+		rc = __ata_scsi_queuecmd(cmd, dev);
 	else {
 		cmd->result = (DID_BAD_TARGET << 16);
 		cmd->scsi_done(cmd);
@@ -3214,7 +3208,6 @@
  *	ata_scsi_simulate - simulate SCSI command on ATA device
  *	@dev: the target device
  *	@cmd: SCSI command being sent to device.
- *	@done: SCSI command completion function.
  *
  *	Interprets and directly executes a select list of SCSI commands
  *	that can be handled internally.
@@ -3223,8 +3216,7 @@
  *	spin_lock_irqsave(host lock)
  */
 
-void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
-		      void (*done)(struct scsi_cmnd *))
+void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
 {
 	struct ata_scsi_args args;
 	const u8 *scsicmd = cmd->cmnd;
@@ -3233,17 +3225,17 @@
 	args.dev = dev;
 	args.id = dev->id;
 	args.cmd = cmd;
-	args.done = done;
+	args.done = cmd->scsi_done;
 
 	switch(scsicmd[0]) {
 	/* TODO: worth improving? */
 	case FORMAT_UNIT:
-		ata_scsi_invalid_field(cmd, done);
+		ata_scsi_invalid_field(cmd);
 		break;
 
 	case INQUIRY:
 		if (scsicmd[1] & 2)	           /* is CmdDt set?  */
-			ata_scsi_invalid_field(cmd, done);
+			ata_scsi_invalid_field(cmd);
 		else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
 			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
 		else switch (scsicmd[2]) {
@@ -3269,7 +3261,7 @@
 			ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
 			break;
 		default:
-			ata_scsi_invalid_field(cmd, done);
+			ata_scsi_invalid_field(cmd);
 			break;
 		}
 		break;
@@ -3281,7 +3273,7 @@
 
 	case MODE_SELECT:	/* unconditionally return */
 	case MODE_SELECT_10:	/* bad-field-in-cdb */
-		ata_scsi_invalid_field(cmd, done);
+		ata_scsi_invalid_field(cmd);
 		break;
 
 	case READ_CAPACITY:
@@ -3292,7 +3284,7 @@
 		if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
 			ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
 		else
-			ata_scsi_invalid_field(cmd, done);
+			ata_scsi_invalid_field(cmd);
 		break;
 
 	case REPORT_LUNS:
@@ -3302,7 +3294,7 @@
 	case REQUEST_SENSE:
 		ata_scsi_set_sense(cmd, 0, 0, 0);
 		cmd->result = (DRIVER_SENSE << 24);
-		done(cmd);
+		cmd->scsi_done(cmd);
 		break;
 
 	/* if we reach this, then writeback caching is disabled,
@@ -3324,14 +3316,14 @@
 		if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
 			ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
 		else
-			ata_scsi_invalid_field(cmd, done);
+			ata_scsi_invalid_field(cmd);
 		break;
 
 	/* all other commands */
 	default:
 		ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
 		/* "Invalid command operation code" */
-		done(cmd);
+		cmd->scsi_done(cmd);
 		break;
 	}
 }
@@ -3858,7 +3850,6 @@
 /**
  *	ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
  *	@cmd: SCSI command to be sent
- *	@done: Completion function, called when command is complete
  *	@ap:	ATA port to which the command is being sent
  *
  *	RETURNS:
@@ -3866,18 +3857,17 @@
  *	0 otherwise.
  */
 
-int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
-		     struct ata_port *ap)
+int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
 {
 	int rc = 0;
 
 	ata_scsi_dump_cdb(ap, cmd);
 
 	if (likely(ata_dev_enabled(ap->link.device)))
-		rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
+		rc = __ata_scsi_queuecmd(cmd, ap->link.device);
 	else {
 		cmd->result = (DID_BAD_TARGET << 16);
-		done(cmd);
+		cmd->scsi_done(cmd);
 	}
 	return rc;
 }
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 484697f..af6141b 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1320,7 +1320,7 @@
 {
 	DPRINTK("ENTER\n");
 
-	cancel_rearming_delayed_work(&ap->sff_pio_task);
+	cancel_delayed_work_sync(&ap->sff_pio_task);
 	ap->hsm_task_state = HSM_ST_IDLE;
 
 	if (ata_msg_ctl(ap))
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 7688868..d7e57db 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt366"
-#define DRV_VERSION	"0.6.8"
+#define DRV_VERSION	"0.6.9"
 
 struct hpt_clock {
 	u8	xfer_mode;
@@ -110,18 +110,23 @@
 	{	0,		0x01208585	}
 };
 
-static const char *bad_ata33[] = {
-	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
-	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
-	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+static const char * const bad_ata33[] = {
+	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
+	"Maxtor 90845U3", "Maxtor 90650U2",
+	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
+	"Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
+	"Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
 	"Maxtor 90510D4",
 	"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
-	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
-	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
+	"Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
+	"Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
 	NULL
 };
 
-static const char *bad_ata66_4[] = {
+static const char * const bad_ata66_4[] = {
 	"IBM-DTLA-307075",
 	"IBM-DTLA-307060",
 	"IBM-DTLA-307045",
@@ -140,12 +145,13 @@
 	NULL
 };
 
-static const char *bad_ata66_3[] = {
+static const char * const bad_ata66_3[] = {
 	"WDC AC310200R",
 	NULL
 };
 
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
+			       const char * const list[])
 {
 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
 	int i = 0;
@@ -288,6 +294,7 @@
 static void hpt36x_init_chipset(struct pci_dev *dev)
 {
 	u8 drive_fast;
+
 	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
 	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
 	pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
@@ -349,16 +356,16 @@
 
 	/* PCI clocking determines the ATA timing values to use */
 	/* info_hpt366 is safe against re-entry so we can scribble on it */
-	switch((reg1 & 0x700) >> 8) {
-		case 9:
-			hpriv = &hpt366_40;
-			break;
-		case 5:
-			hpriv = &hpt366_25;
-			break;
-		default:
-			hpriv = &hpt366_33;
-			break;
+	switch ((reg1 & 0x700) >> 8) {
+	case 9:
+		hpriv = &hpt366_40;
+		break;
+	case 5:
+		hpriv = &hpt366_25;
+		break;
+	default:
+		hpriv = &hpt366_33;
+		break;
 	}
 	/* Now kick off ATA set up */
 	return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
@@ -385,9 +392,9 @@
 };
 
 static struct pci_driver hpt36x_pci_driver = {
-	.name 		= DRV_NAME,
+	.name		= DRV_NAME,
 	.id_table	= hpt36x,
-	.probe 		= hpt36x_init_one,
+	.probe		= hpt36x_init_one,
 	.remove		= ata_pci_remove_one,
 #ifdef CONFIG_PM
 	.suspend	= ata_pci_device_suspend,
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 9ae4c08..efdd18b 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -8,7 +8,7 @@
  * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
  * Portions Copyright (C) 2003		Red Hat Inc
- * Portions Copyright (C) 2005-2009	MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2010	MontaVista Software, Inc.
  *
  * TODO
  *	Look into engine reset on timeout errors. Should not be	required.
@@ -24,7 +24,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt37x"
-#define DRV_VERSION	"0.6.15"
+#define DRV_VERSION	"0.6.18"
 
 struct hpt_clock {
 	u8	xfer_speed;
@@ -210,7 +210,7 @@
 {
 	struct hpt_clock *clocks = ap->host->private_data;
 
-	while(clocks->xfer_speed) {
+	while (clocks->xfer_speed) {
 		if (clocks->xfer_speed == speed)
 			return clocks->timing;
 		clocks++;
@@ -219,7 +219,8 @@
 	return 0xffffffffU;	/* silence compiler warning */
 }
 
-static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
+static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
+			       const char * const list[])
 {
 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
 	int i = 0;
@@ -237,18 +238,23 @@
 	return 0;
 }
 
-static const char *bad_ata33[] = {
-	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
-	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
-	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
+static const char * const bad_ata33[] = {
+	"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
+	"Maxtor 90845U3", "Maxtor 90650U2",
+	"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5",
+	"Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
+	"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6",
+	"Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
 	"Maxtor 90510D4",
 	"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
-	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
-	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
+	"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7",
+	"Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
+	"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5",
+	"Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
 	NULL
 };
 
-static const char *bad_ata100_5[] = {
+static const char * const bad_ata100_5[] = {
 	"IBM-DTLA-307075",
 	"IBM-DTLA-307060",
 	"IBM-DTLA-307045",
@@ -302,6 +308,22 @@
 }
 
 /**
+ *	hpt372_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: mode mask
+ *
+ *	The Marvell bridge chips used on the HighPoint SATA cards do not seem
+ *	to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
+ */
+static unsigned long hpt372_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (ata_id_is_sata(adev->id))
+		mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
+
+	return mask;
+}
+
+/**
  *	hpt37x_cable_detect	-	Detect the cable type
  *	@ap: ATA port to detect on
  *
@@ -373,6 +395,7 @@
 		{ 0x50, 1, 0x04, 0x04 },
 		{ 0x54, 1, 0x04, 0x04 }
 	};
+
 	if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
 		return -ENOENT;
 
@@ -586,11 +609,11 @@
 };
 
 /*
- *	Configuration for HPT372, HPT371, HPT302. Slightly different PIO
- *	and DMA mode setting functionality.
+ *	Configuration for HPT371 and HPT302. Slightly different PIO and DMA
+ *	mode setting functionality.
  */
 
-static struct ata_port_operations hpt372_port_ops = {
+static struct ata_port_operations hpt302_port_ops = {
 	.inherits	= &ata_bmdma_port_ops,
 
 	.bmdma_stop	= hpt37x_bmdma_stop,
@@ -602,7 +625,17 @@
 };
 
 /*
- *	Configuration for HPT374. Mode setting works like 372 and friends
+ *	Configuration for HPT372. Mode setting works like 371 and 302
+ *	but we have a mode filter.
+ */
+
+static struct ata_port_operations hpt372_port_ops = {
+	.inherits	= &hpt302_port_ops,
+	.mode_filter	= hpt372_filter,
+};
+
+/*
+ *	Configuration for HPT374. Mode setting and filtering works like 372
  *	but we have a different cable detection procedure for function 1.
  */
 
@@ -647,12 +680,12 @@
 	u32 reg5c;
 	int tries;
 
-	for(tries = 0; tries < 0x5000; tries++) {
+	for (tries = 0; tries < 0x5000; tries++) {
 		udelay(50);
 		pci_read_config_byte(dev, 0x5b, &reg5b);
 		if (reg5b & 0x80) {
 			/* See if it stays set */
-			for(tries = 0; tries < 0x1000; tries ++) {
+			for (tries = 0; tries < 0x1000; tries++) {
 				pci_read_config_byte(dev, 0x5b, &reg5b);
 				/* Failed ? */
 				if ((reg5b & 0x80) == 0)
@@ -660,7 +693,7 @@
 			}
 			/* Turn off tuning, we have the DPLL set */
 			pci_read_config_dword(dev, 0x5c, &reg5c);
-			pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+			pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
 			return 1;
 		}
 	}
@@ -672,6 +705,7 @@
 {
 	u32 freq;
 	unsigned long io_base = pci_resource_start(pdev, 4);
+
 	if (PCI_FUNC(pdev->devfn) & 1) {
 		struct pci_dev *pdev_0;
 
@@ -737,23 +771,23 @@
 		.udma_mask = ATA_UDMA5,
 		.port_ops = &hpt370a_port_ops
 	};
-	/* HPT370 - UDMA100 */
+	/* HPT370 - UDMA66 */
 	static const struct ata_port_info info_hpt370_33 = {
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = ATA_PIO4,
 		.mwdma_mask = ATA_MWDMA2,
-		.udma_mask = ATA_UDMA5,
+		.udma_mask = ATA_UDMA4,
 		.port_ops = &hpt370_port_ops
 	};
-	/* HPT370A - UDMA100 */
+	/* HPT370A - UDMA66 */
 	static const struct ata_port_info info_hpt370a_33 = {
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = ATA_PIO4,
 		.mwdma_mask = ATA_MWDMA2,
-		.udma_mask = ATA_UDMA5,
+		.udma_mask = ATA_UDMA4,
 		.port_ops = &hpt370a_port_ops
 	};
-	/* HPT371, 372 and friends - UDMA133 */
+	/* HPT372 - UDMA133 */
 	static const struct ata_port_info info_hpt372 = {
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = ATA_PIO4,
@@ -761,6 +795,14 @@
 		.udma_mask = ATA_UDMA6,
 		.port_ops = &hpt372_port_ops
 	};
+	/* HPT371, 302 - UDMA133 */
+	static const struct ata_port_info info_hpt302 = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &hpt302_port_ops
+	};
 	/* HPT374 - UDMA100, function 1 uses different prereset method */
 	static const struct ata_port_info info_hpt374_fn0 = {
 		.flags = ATA_FLAG_SLAVE_POSS,
@@ -805,64 +847,68 @@
 		if (rev == 6)
 			return -ENODEV;
 
-		switch(rev) {
-			case 3:
-				ppi[0] = &info_hpt370;
-				chip_table = &hpt370;
-				prefer_dpll = 0;
-				break;
-			case 4:
-				ppi[0] = &info_hpt370a;
-				chip_table = &hpt370a;
-				prefer_dpll = 0;
-				break;
-			case 5:
-				ppi[0] = &info_hpt372;
-				chip_table = &hpt372;
-				break;
-			default:
-				printk(KERN_ERR "pata_hpt37x: Unknown HPT366 "
-				       "subtype, please report (%d).\n", rev);
-				return -ENODEV;
+		switch (rev) {
+		case 3:
+			ppi[0] = &info_hpt370;
+			chip_table = &hpt370;
+			prefer_dpll = 0;
+			break;
+		case 4:
+			ppi[0] = &info_hpt370a;
+			chip_table = &hpt370a;
+			prefer_dpll = 0;
+			break;
+		case 5:
+			ppi[0] = &info_hpt372;
+			chip_table = &hpt372;
+			break;
+		default:
+			printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype, "
+			       "please report (%d).\n", rev);
+			return -ENODEV;
 		}
 	} else {
-		switch(dev->device) {
-			case PCI_DEVICE_ID_TTI_HPT372:
-				/* 372N if rev >= 2*/
-				if (rev >= 2)
-					return -ENODEV;
-				ppi[0] = &info_hpt372;
-				chip_table = &hpt372a;
-				break;
-			case PCI_DEVICE_ID_TTI_HPT302:
-				/* 302N if rev > 1 */
-				if (rev > 1)
-					return -ENODEV;
-				ppi[0] = &info_hpt372;
-				/* Check this */
-				chip_table = &hpt302;
-				break;
-			case PCI_DEVICE_ID_TTI_HPT371:
-				if (rev > 1)
-					return -ENODEV;
-				ppi[0] = &info_hpt372;
-				chip_table = &hpt371;
-				/* Single channel device, master is not present
-				   but the BIOS (or us for non x86) must mark it
-				   absent */
-				pci_read_config_byte(dev, 0x50, &mcr1);
-				mcr1 &= ~0x04;
-				pci_write_config_byte(dev, 0x50, mcr1);
-				break;
-			case PCI_DEVICE_ID_TTI_HPT374:
-				chip_table = &hpt374;
-				if (!(PCI_FUNC(dev->devfn) & 1))
-					*ppi = &info_hpt374_fn0;
-				else
-					*ppi = &info_hpt374_fn1;
-				break;
-			default:
-				printk(KERN_ERR "pata_hpt37x: PCI table is bogus please report (%d).\n", dev->device);
+		switch (dev->device) {
+		case PCI_DEVICE_ID_TTI_HPT372:
+			/* 372N if rev >= 2 */
+			if (rev >= 2)
+				return -ENODEV;
+			ppi[0] = &info_hpt372;
+			chip_table = &hpt372a;
+			break;
+		case PCI_DEVICE_ID_TTI_HPT302:
+			/* 302N if rev > 1 */
+			if (rev > 1)
+				return -ENODEV;
+			ppi[0] = &info_hpt302;
+			/* Check this */
+			chip_table = &hpt302;
+			break;
+		case PCI_DEVICE_ID_TTI_HPT371:
+			if (rev > 1)
+				return -ENODEV;
+			ppi[0] = &info_hpt302;
+			chip_table = &hpt371;
+			/*
+			 * Single channel device, master is not present
+			 * but the BIOS (or us for non x86) must mark it
+			 * absent
+			 */
+			pci_read_config_byte(dev, 0x50, &mcr1);
+			mcr1 &= ~0x04;
+			pci_write_config_byte(dev, 0x50, mcr1);
+			break;
+		case PCI_DEVICE_ID_TTI_HPT374:
+			chip_table = &hpt374;
+			if (!(PCI_FUNC(dev->devfn) & 1))
+				*ppi = &info_hpt374_fn0;
+			else
+				*ppi = &info_hpt374_fn1;
+			break;
+		default:
+			printk(KERN_ERR
+			       "pata_hpt37x: PCI table is bogus, please report (%d).\n",
+			       dev->device);
 				return -ENODEV;
 		}
 	}
@@ -893,9 +939,11 @@
 	if (chip_table == &hpt372a)
 		outb(0x0e, iobase + 0x9c);
 
-	/* Some devices do not let this value be accessed via PCI space
-	   according to the old driver. In addition we must use the value
-	   from FN 0 on the HPT374 */
+	/*
+	 * Some devices do not let this value be accessed via PCI space
+	 * according to the old driver. In addition we must use the value
+	 * from FN 0 on the HPT374.
+	 */
 
 	if (chip_table == &hpt374) {
 		freq = hpt374_read_freq(dev);
@@ -909,10 +957,11 @@
 		u8 sr;
 		u32 total = 0;
 
-		printk(KERN_WARNING "pata_hpt37x: BIOS has not set timing clocks.\n");
+		printk(KERN_WARNING
+		       "pata_hpt37x: BIOS has not set timing clocks.\n");
 
 		/* This is the process the HPT371 BIOS is reported to use */
-		for(i = 0; i < 128; i++) {
+		for (i = 0; i < 128; i++) {
 			pci_read_config_byte(dev, 0x78, &sr);
 			total += sr & 0x1FF;
 			udelay(15);
@@ -947,17 +996,22 @@
 
 		/* Select the DPLL clock. */
 		pci_write_config_byte(dev, 0x5b, 0x21);
-		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+		pci_write_config_dword(dev, 0x5C,
+				       (f_high << 16) | f_low | 0x100);
 
-		for(adjust = 0; adjust < 8; adjust++) {
+		for (adjust = 0; adjust < 8; adjust++) {
 			if (hpt37x_calibrate_dpll(dev))
 				break;
-			/* See if it'll settle at a fractionally different clock */
+			/*
+			 * See if it'll settle at a fractionally
+			 * different clock
+			 */
 			if (adjust & 1)
 				f_low -= adjust >> 1;
 			else
 				f_high += adjust >> 1;
-			pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
+			pci_write_config_dword(dev, 0x5C,
+					       (f_high << 16) | f_low | 0x100);
 		}
 		if (adjust == 8) {
 			printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n");
@@ -976,7 +1030,7 @@
 		 *	Perform a final fixup. Note that we will have used the
 		 *	DPLL on the HPT372 which means we don't have to worry
 		 *	about lack of UDMA133 support on lower clocks
- 		 */
+		 */
 
 		if (clock_slot < 2 && ppi[0] == &info_hpt370)
 			ppi[0] = &info_hpt370_33;
@@ -1001,9 +1055,9 @@
 };
 
 static struct pci_driver hpt37x_pci_driver = {
-	.name 		= DRV_NAME,
+	.name		= DRV_NAME,
 	.id_table	= hpt37x,
-	.probe 		= hpt37x_init_one,
+	.probe		= hpt37x_init_one,
 	.remove		= ata_pci_remove_one
 };
 
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 32f3463..d2239bb 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -1,5 +1,5 @@
 /*
- * Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers.
+ * Libata driver for the HighPoint 371N, 372N, and 302N UDMA66 ATA controllers.
  *
  * This driver is heavily based upon:
  *
@@ -8,7 +8,7 @@
  * Copyright (C) 1999-2003		Andre Hedrick <andre@linux-ide.org>
  * Portions Copyright (C) 2001	        Sun Microsystems, Inc.
  * Portions Copyright (C) 2003		Red Hat Inc
- * Portions Copyright (C) 2005-2009	MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2010	MontaVista Software, Inc.
  *
  *
  * TODO
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt3x2n"
-#define DRV_VERSION	"0.3.10"
+#define DRV_VERSION	"0.3.13"
 
 enum {
 	HPT_PCI_FAST	=	(1 << 31),
@@ -103,7 +103,7 @@
 {
 	struct hpt_clock *clocks = hpt3x2n_clocks;
 
-	while(clocks->xfer_speed) {
+	while (clocks->xfer_speed) {
 		if (clocks->xfer_speed == speed)
 			return clocks->timing;
 		clocks++;
@@ -113,6 +113,22 @@
 }
 
 /**
+ *	hpt372n_filter	-	mode selection filter
+ *	@adev: ATA device
+ *	@mask: mode mask
+ *
+ *	The Marvell bridge chips used on the HighPoint SATA cards do not seem
+ *	to support the UltraDMA modes 1, 2, and 3 as well as any MWDMA modes...
+ */
+static unsigned long hpt372n_filter(struct ata_device *adev, unsigned long mask)
+{
+	if (ata_id_is_sata(adev->id))
+		mask &= ~((0xE << ATA_SHIFT_UDMA) | ATA_MASK_MWDMA);
+
+	return mask;
+}
+
+/**
  *	hpt3x2n_cable_detect	-	Detect the cable type
  *	@ap: ATA port to detect on
  *
@@ -153,6 +169,7 @@
 {
 	struct ata_port *ap = link->ap;
 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
 	/* Reset the state machine */
 	pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
 	udelay(100);
@@ -328,10 +345,10 @@
 };
 
 /*
- *	Configuration for HPT3x2n.
+ *	Configuration for HPT302N/371N.
  */
 
-static struct ata_port_operations hpt3x2n_port_ops = {
+static struct ata_port_operations hpt3xxn_port_ops = {
 	.inherits	= &ata_bmdma_port_ops,
 
 	.bmdma_stop	= hpt3x2n_bmdma_stop,
@@ -345,6 +362,15 @@
 	.prereset	= hpt3x2n_pre_reset,
 };
 
+/*
+ *	Configuration for HPT372N. Same as 302N/371N but we have a mode filter.
+ */
+
+static struct ata_port_operations hpt372n_port_ops = {
+	.inherits	= &hpt3xxn_port_ops,
+	.mode_filter	= &hpt372n_filter,
+};
+
 /**
  *	hpt3xn_calibrate_dpll		-	Calibrate the DPLL loop
  *	@dev: PCI device
@@ -359,12 +385,12 @@
 	u32 reg5c;
 	int tries;
 
-	for(tries = 0; tries < 0x5000; tries++) {
+	for (tries = 0; tries < 0x5000; tries++) {
 		udelay(50);
 		pci_read_config_byte(dev, 0x5b, &reg5b);
 		if (reg5b & 0x80) {
 			/* See if it stays set */
-			for(tries = 0; tries < 0x1000; tries ++) {
+			for (tries = 0; tries < 0x1000; tries++) {
 				pci_read_config_byte(dev, 0x5b, &reg5b);
 				/* Failed ? */
 				if ((reg5b & 0x80) == 0)
@@ -372,7 +398,7 @@
 			}
 			/* Turn off tuning, we have the DPLL set */
 			pci_read_config_dword(dev, 0x5c, &reg5c);
-			pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
+			pci_write_config_dword(dev, 0x5c, reg5c & ~0x100);
 			return 1;
 		}
 	}
@@ -388,8 +414,19 @@
 
 	fcnt = inl(iobase + 0x90);	/* Not PCI readable for some chips */
 	if ((fcnt >> 12) != 0xABCDE) {
-		printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n");
-		return 33;	/* Not BIOS set */
+		int i;
+		u16 sr;
+		u32 total = 0;
+
+		printk(KERN_WARNING "pata_hpt3x2n: BIOS clock data not set.\n");
+
+		/* This is the process the HPT371 BIOS is reported to use */
+		for (i = 0; i < 128; i++) {
+			pci_read_config_word(pdev, 0x78, &sr);
+			total += sr & 0x1FF;
+			udelay(15);
+		}
+		fcnt = total / 128;
 	}
 	fcnt &= 0x1FF;
 
@@ -431,21 +468,27 @@
  *	HPT372N			9 (HPT372N)	*	UDMA133
  *
  *	(1) UDMA133 support depends on the bus clock
- *
- *	To pin down		HPT371N
  */
 
 static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
-	/* HPT372N and friends - UDMA133 */
-	static const struct ata_port_info info = {
+	/* HPT372N - UDMA133 */
+	static const struct ata_port_info info_hpt372n = {
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = ATA_PIO4,
 		.mwdma_mask = ATA_MWDMA2,
 		.udma_mask = ATA_UDMA6,
-		.port_ops = &hpt3x2n_port_ops
+		.port_ops = &hpt372n_port_ops
 	};
-	const struct ata_port_info *ppi[] = { &info, NULL };
+	/* HPT302N and HPT371N - UDMA133 */
+	static const struct ata_port_info info_hpt3xxn = {
+		.flags = ATA_FLAG_SLAVE_POSS,
+		.pio_mask = ATA_PIO4,
+		.mwdma_mask = ATA_MWDMA2,
+		.udma_mask = ATA_UDMA6,
+		.port_ops = &hpt3xxn_port_ops
+	};
+	const struct ata_port_info *ppi[] = { &info_hpt3xxn, NULL };
 	u8 rev = dev->revision;
 	u8 irqmask;
 	unsigned int pci_mhz;
@@ -459,30 +502,36 @@
 	if (rc)
 		return rc;
 
-	switch(dev->device) {
-		case PCI_DEVICE_ID_TTI_HPT366:
-			if (rev < 6)
-				return -ENODEV;
-			break;
-		case PCI_DEVICE_ID_TTI_HPT371:
-			if (rev < 2)
-				return -ENODEV;
-			/* 371N if rev > 1 */
-			break;
-		case PCI_DEVICE_ID_TTI_HPT372:
-			/* 372N if rev >= 2*/
-			if (rev < 2)
-				return -ENODEV;
-			break;
-		case PCI_DEVICE_ID_TTI_HPT302:
-			if (rev < 2)
-				return -ENODEV;
-			break;
-		case PCI_DEVICE_ID_TTI_HPT372N:
-			break;
-		default:
-			printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
+	switch (dev->device) {
+	case PCI_DEVICE_ID_TTI_HPT366:
+		/* 372N if rev >= 6 */
+		if (rev < 6)
 			return -ENODEV;
+		goto hpt372n;
+	case PCI_DEVICE_ID_TTI_HPT371:
+		/* 371N if rev >= 2 */
+		if (rev < 2)
+			return -ENODEV;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT372:
+		/* 372N if rev >= 2 */
+		if (rev < 2)
+			return -ENODEV;
+		goto hpt372n;
+	case PCI_DEVICE_ID_TTI_HPT302:
+		/* 302N if rev >= 2 */
+		if (rev < 2)
+			return -ENODEV;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT372N:
+hpt372n:
+		ppi[0] = &info_hpt372n;
+		break;
+	default:
+		printk(KERN_ERR
+		       "pata_hpt3x2n: PCI table is bogus please report (%d).\n",
+		       dev->device);
+		return -ENODEV;
 	}
 
 	/* Ok so this is a chip we support */
@@ -509,8 +558,10 @@
 		pci_write_config_byte(dev, 0x50, mcr1);
 	}
 
-	/* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
-	   50 for UDMA100. Right now we always use 66 */
+	/*
+	 * Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
+	 * 50 for UDMA100. Right now we always use 66
+	 */
 
 	pci_mhz = hpt3x2n_pci_clock(dev);
 
@@ -522,7 +573,7 @@
 	pci_write_config_byte(dev, 0x5B, 0x21);
 
 	/* Unlike the 37x we don't try jiggling the frequency */
-	for(adjust = 0; adjust < 8; adjust++) {
+	for (adjust = 0; adjust < 8; adjust++) {
 		if (hpt3xn_calibrate_dpll(dev))
 			break;
 		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
@@ -534,8 +585,11 @@
 
 	printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n",
 	       pci_mhz);
-	/* Set our private data up. We only need a few flags so we use
-	   it directly */
+
+	/*
+	 * Set our private data up. We only need a few flags
+	 * so we use it directly.
+	 */
 	if (pci_mhz > 60)
 		hpriv = (void *)(PCI66 | USE_DPLL);
 
@@ -562,9 +616,9 @@
 };
 
 static struct pci_driver hpt3x2n_pci_driver = {
-	.name 		= DRV_NAME,
+	.name		= DRV_NAME,
 	.id_table	= hpt3x2n,
-	.probe 		= hpt3x2n_init_one,
+	.probe		= hpt3x2n_init_one,
 	.remove		= ata_pci_remove_one
 };
 
@@ -579,7 +633,7 @@
 }
 
 MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
+MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3xxN");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, hpt3x2n);
 MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index b777176..e079cf2 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -370,7 +370,7 @@
 	if (pci_resource_len(pdev, 0) == 0)
 		return -ENODEV;
 
-	/* map IO regions and intialize host accordingly */
+	/* map IO regions and initialize host accordingly */
 	rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
 	if (rc == -EBUSY)
 		pcim_pin_device(pdev);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ffe9b65..9f47e86 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1926,8 +1926,9 @@
   const struct firmware *fw;
   unsigned long start_address;
   const struct ihex_binrec *rec;
+  const char *errmsg = 0;
   int res;
-  
+
   res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
   if (res) {
     PRINTK (KERN_ERR, "Cannot load microcode data");
@@ -1937,8 +1938,8 @@
   /* First record contains just the start address */
   rec = (const struct ihex_binrec *)fw->data;
   if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
-    PRINTK (KERN_ERR, "Bad microcode data (no start record)");
-    return -EINVAL;
+    errmsg = "no start record";
+    goto fail;
   }
   start_address = be32_to_cpup((__be32 *)rec->data);
 
@@ -1950,12 +1951,12 @@
     PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
 	    be16_to_cpu(rec->len));
     if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
-	    PRINTK (KERN_ERR, "Bad microcode data (record too long)");
-	    return -EINVAL;
+	    errmsg = "record too long";
+	    goto fail;
     }
     if (be16_to_cpu(rec->len) & 3) {
-	    PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)");
-	    return -EINVAL;
+	    errmsg = "odd number of bytes";
+	    goto fail;
     }
     res = loader_write(lb, dev, rec);
     if (res)
@@ -1970,6 +1971,10 @@
     res = loader_start(lb, dev, start_address);
 
   return res;
+fail:
+  release_firmware(fw);
+  PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
+  return -EINVAL;
 }
 
 /********** give adapter parameters **********/
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 5042bb2..f53a43a 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -572,7 +572,7 @@
 #define SAR_STAT_TSQF       0x00001000 /* Transmit Status Queue full      */
 #define SAR_STAT_TMROF      0x00000800 /* Timer overflow                  */
 #define SAR_STAT_PHYI       0x00000400 /* PHY device Interrupt flag       */
-#define SAR_STAT_CMDBZ      0x00000200 /* ABR SAR Comand Busy Flag        */
+#define SAR_STAT_CMDBZ      0x00000200 /* ABR SAR Command Busy Flag       */
 #define SAR_STAT_FBQ3A      0x00000100 /* Free Buffer Queue 3 Attention   */
 #define SAR_STAT_FBQ2A      0x00000080 /* Free Buffer Queue 2 Attention   */
 #define SAR_STAT_RSQF       0x00000040 /* Receive Status Queue full       */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7292540..d80d51b 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2063,7 +2063,7 @@
 		- UBR Table size is 4K  
 		- UBR wait queue is 4K  
 	   since the table and wait queues are contiguous, all the bytes   
-	   can be initialized by one memeset.  
+	   can be initialized by one memeset.
 	*/  
         
         vcsize_sel = 0;
@@ -2089,7 +2089,7 @@
 		- ABR Table size is 2K  
 		- ABR wait queue is 2K  
 	   since the table and wait queues are contiguous, all the bytes   
-	   can be intialized by one memeset.  
+	   can be initialized by one memeset.
 	*/  
         i = ABR_SCHED_TABLE * iadev->memSize;
         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 2ca7f5b..19f49e4 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -1,31 +1,46 @@
 
 /**
- * struct bus_type_private - structure to hold the private to the driver core portions of the bus_type structure.
+ * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
  *
- * @subsys - the struct kset that defines this bus.  This is the main kobject
- * @drivers_kset - the list of drivers associated with this bus
- * @devices_kset - the list of devices associated with this bus
+ * @subsys - the struct kset that defines this subsystem
+ * @devices_kset - the list of devices associated
+ *
+ * @drivers_kset - the list of drivers associated
  * @klist_devices - the klist to iterate over the @devices_kset
  * @klist_drivers - the klist to iterate over the @drivers_kset
  * @bus_notifier - the bus notifier list for anything that cares about things
- * on this bus.
+ *                 on this bus.
  * @bus - pointer back to the struct bus_type that this structure is associated
- * with.
+ *        with.
+ *
+ * @class_interfaces - list of class_interfaces associated
+ * @glue_dirs - "glue" directory to put in-between the parent device to
+ *              avoid namespace conflicts
+ * @class_mutex - mutex to protect the children, devices, and interfaces lists.
+ * @class - pointer back to the struct class that this structure is associated
+ *          with.
  *
  * This structure is the one that is the actual kobject allowing struct
- * bus_type to be statically allocated safely.  Nothing outside of the driver
- * core should ever touch these fields.
+ * bus_type/class to be statically allocated safely.  Nothing outside of the
+ * driver core should ever touch these fields.
  */
-struct bus_type_private {
+struct subsys_private {
 	struct kset subsys;
-	struct kset *drivers_kset;
 	struct kset *devices_kset;
+
+	struct kset *drivers_kset;
 	struct klist klist_devices;
 	struct klist klist_drivers;
 	struct blocking_notifier_head bus_notifier;
 	unsigned int drivers_autoprobe:1;
 	struct bus_type *bus;
+
+	struct list_head class_interfaces;
+	struct kset glue_dirs;
+	struct mutex class_mutex;
+	struct class *class;
 };
+#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
 
 struct driver_private {
 	struct kobject kobj;
@@ -36,33 +51,6 @@
 };
 #define to_driver(obj) container_of(obj, struct driver_private, kobj)
 
-
-/**
- * struct class_private - structure to hold the private to the driver core portions of the class structure.
- *
- * @class_subsys - the struct kset that defines this class.  This is the main kobject
- * @class_devices - list of devices associated with this class
- * @class_interfaces - list of class_interfaces associated with this class
- * @class_dirs - "glue" directory for virtual devices associated with this class
- * @class_mutex - mutex to protect the children, devices, and interfaces lists.
- * @class - pointer back to the struct class that this structure is associated
- * with.
- *
- * This structure is the one that is the actual kobject allowing struct
- * class to be statically allocated safely.  Nothing outside of the driver
- * core should ever touch these fields.
- */
-struct class_private {
-	struct kset class_subsys;
-	struct klist class_devices;
-	struct list_head class_interfaces;
-	struct kset class_dirs;
-	struct mutex class_mutex;
-	struct class *class;
-};
-#define to_class(obj)	\
-	container_of(obj, struct class_private, class_subsys.kobj)
-
 /**
  * struct device_private - structure to hold the private to the driver core portions of the device structure.
  *
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 33c270a..000e7b2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -20,7 +20,6 @@
 #include "power/power.h"
 
 #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
-#define to_bus(obj) container_of(obj, struct bus_type_private, subsys.kobj)
 
 /*
  * sysfs bindings for drivers
@@ -96,11 +95,11 @@
 			     char *buf)
 {
 	struct bus_attribute *bus_attr = to_bus_attr(attr);
-	struct bus_type_private *bus_priv = to_bus(kobj);
+	struct subsys_private *subsys_priv = to_subsys_private(kobj);
 	ssize_t ret = 0;
 
 	if (bus_attr->show)
-		ret = bus_attr->show(bus_priv->bus, buf);
+		ret = bus_attr->show(subsys_priv->bus, buf);
 	return ret;
 }
 
@@ -108,11 +107,11 @@
 			      const char *buf, size_t count)
 {
 	struct bus_attribute *bus_attr = to_bus_attr(attr);
-	struct bus_type_private *bus_priv = to_bus(kobj);
+	struct subsys_private *subsys_priv = to_subsys_private(kobj);
 	ssize_t ret = 0;
 
 	if (bus_attr->store)
-		ret = bus_attr->store(bus_priv->bus, buf, count);
+		ret = bus_attr->store(subsys_priv->bus, buf, count);
 	return ret;
 }
 
@@ -858,9 +857,9 @@
 int bus_register(struct bus_type *bus)
 {
 	int retval;
-	struct bus_type_private *priv;
+	struct subsys_private *priv;
 
-	priv = kzalloc(sizeof(struct bus_type_private), GFP_KERNEL);
+	priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
@@ -976,7 +975,7 @@
 EXPORT_SYMBOL_GPL(bus_get_device_klist);
 
 /*
- * Yes, this forcably breaks the klist abstraction temporarily.  It
+ * Yes, this forcibly breaks the klist abstraction temporarily.  It
  * just wants to sort the klist, not change reference counts and
  * take/drop locks rapidly in the process.  It does all this while
  * holding the lock for the list, so objects can't otherwise be
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 9c63a56..4f1df2e 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -27,7 +27,7 @@
 			       char *buf)
 {
 	struct class_attribute *class_attr = to_class_attr(attr);
-	struct class_private *cp = to_class(kobj);
+	struct subsys_private *cp = to_subsys_private(kobj);
 	ssize_t ret = -EIO;
 
 	if (class_attr->show)
@@ -39,7 +39,7 @@
 				const char *buf, size_t count)
 {
 	struct class_attribute *class_attr = to_class_attr(attr);
-	struct class_private *cp = to_class(kobj);
+	struct subsys_private *cp = to_subsys_private(kobj);
 	ssize_t ret = -EIO;
 
 	if (class_attr->store)
@@ -49,7 +49,7 @@
 
 static void class_release(struct kobject *kobj)
 {
-	struct class_private *cp = to_class(kobj);
+	struct subsys_private *cp = to_subsys_private(kobj);
 	struct class *class = cp->class;
 
 	pr_debug("class '%s': release.\n", class->name);
@@ -65,7 +65,7 @@
 
 static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
 {
-	struct class_private *cp = to_class(kobj);
+	struct subsys_private *cp = to_subsys_private(kobj);
 	struct class *class = cp->class;
 
 	return class->ns_type;
@@ -82,7 +82,7 @@
 	.child_ns_type	= class_child_ns_type,
 };
 
-/* Hotplug events for classes go to the class class_subsys */
+/* Hotplug events for classes go to the class subsys */
 static struct kset *class_kset;
 
 
@@ -90,7 +90,7 @@
 {
 	int error;
 	if (cls)
-		error = sysfs_create_file(&cls->p->class_subsys.kobj,
+		error = sysfs_create_file(&cls->p->subsys.kobj,
 					  &attr->attr);
 	else
 		error = -EINVAL;
@@ -100,20 +100,20 @@
 void class_remove_file(struct class *cls, const struct class_attribute *attr)
 {
 	if (cls)
-		sysfs_remove_file(&cls->p->class_subsys.kobj, &attr->attr);
+		sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr);
 }
 
 static struct class *class_get(struct class *cls)
 {
 	if (cls)
-		kset_get(&cls->p->class_subsys);
+		kset_get(&cls->p->subsys);
 	return cls;
 }
 
 static void class_put(struct class *cls)
 {
 	if (cls)
-		kset_put(&cls->p->class_subsys);
+		kset_put(&cls->p->subsys);
 }
 
 static int add_class_attrs(struct class *cls)
@@ -162,7 +162,7 @@
 
 int __class_register(struct class *cls, struct lock_class_key *key)
 {
-	struct class_private *cp;
+	struct subsys_private *cp;
 	int error;
 
 	pr_debug("device class '%s': registering\n", cls->name);
@@ -170,11 +170,11 @@
 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
 	if (!cp)
 		return -ENOMEM;
-	klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
+	klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
 	INIT_LIST_HEAD(&cp->class_interfaces);
-	kset_init(&cp->class_dirs);
+	kset_init(&cp->glue_dirs);
 	__mutex_init(&cp->class_mutex, "struct class mutex", key);
-	error = kobject_set_name(&cp->class_subsys.kobj, "%s", cls->name);
+	error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
 	if (error) {
 		kfree(cp);
 		return error;
@@ -187,15 +187,15 @@
 #if defined(CONFIG_BLOCK)
 	/* let the block class directory show up in the root of sysfs */
 	if (!sysfs_deprecated || cls != &block_class)
-		cp->class_subsys.kobj.kset = class_kset;
+		cp->subsys.kobj.kset = class_kset;
 #else
-	cp->class_subsys.kobj.kset = class_kset;
+	cp->subsys.kobj.kset = class_kset;
 #endif
-	cp->class_subsys.kobj.ktype = &class_ktype;
+	cp->subsys.kobj.ktype = &class_ktype;
 	cp->class = cls;
 	cls->p = cp;
 
-	error = kset_register(&cp->class_subsys);
+	error = kset_register(&cp->subsys);
 	if (error) {
 		kfree(cp);
 		return error;
@@ -210,7 +210,7 @@
 {
 	pr_debug("device class '%s': unregistering\n", cls->name);
 	remove_class_attrs(cls);
-	kset_unregister(&cls->p->class_subsys);
+	kset_unregister(&cls->p->subsys);
 }
 
 static void class_create_release(struct class *cls)
@@ -295,7 +295,7 @@
 
 	if (start)
 		start_knode = &start->knode_class;
-	klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
+	klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
 	iter->type = type;
 }
 EXPORT_SYMBOL_GPL(class_dev_iter_init);
@@ -482,8 +482,8 @@
 	class_put(parent);
 }
 
-ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
-                        	char *buf)
+ssize_t show_class_attr_string(struct class *class,
+			       struct class_attribute *attr, char *buf)
 {
 	struct class_attribute_string *cs;
 	cs = container_of(attr, struct class_attribute_string, attr);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6ed6454..080e9ca 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -338,6 +338,35 @@
 			device_remove_file(dev, &attrs[i]);
 }
 
+static int device_add_bin_attributes(struct device *dev,
+				     struct bin_attribute *attrs)
+{
+	int error = 0;
+	int i;
+
+	if (attrs) {
+		for (i = 0; attr_name(attrs[i]); i++) {
+			error = device_create_bin_file(dev, &attrs[i]);
+			if (error)
+				break;
+		}
+		if (error)
+			while (--i >= 0)
+				device_remove_bin_file(dev, &attrs[i]);
+	}
+	return error;
+}
+
+static void device_remove_bin_attributes(struct device *dev,
+					 struct bin_attribute *attrs)
+{
+	int i;
+
+	if (attrs)
+		for (i = 0; attr_name(attrs[i]); i++)
+			device_remove_bin_file(dev, &attrs[i]);
+}
+
 static int device_add_groups(struct device *dev,
 			     const struct attribute_group **groups)
 {
@@ -378,12 +407,15 @@
 		error = device_add_attributes(dev, class->dev_attrs);
 		if (error)
 			return error;
+		error = device_add_bin_attributes(dev, class->dev_bin_attrs);
+		if (error)
+			goto err_remove_class_attrs;
 	}
 
 	if (type) {
 		error = device_add_groups(dev, type->groups);
 		if (error)
-			goto err_remove_class_attrs;
+			goto err_remove_class_bin_attrs;
 	}
 
 	error = device_add_groups(dev, dev->groups);
@@ -395,6 +427,9 @@
  err_remove_type_groups:
 	if (type)
 		device_remove_groups(dev, type->groups);
+ err_remove_class_bin_attrs:
+	if (class)
+		device_remove_bin_attributes(dev, class->dev_bin_attrs);
  err_remove_class_attrs:
 	if (class)
 		device_remove_attributes(dev, class->dev_attrs);
@@ -412,8 +447,10 @@
 	if (type)
 		device_remove_groups(dev, type->groups);
 
-	if (class)
+	if (class) {
 		device_remove_attributes(dev, class->dev_attrs);
+		device_remove_bin_attributes(dev, class->dev_bin_attrs);
+	}
 }
 
 
@@ -610,7 +647,7 @@
 	dir->class = class;
 	kobject_init(&dir->kobj, &class_dir_ktype);
 
-	dir->kobj.kset = &class->p->class_dirs;
+	dir->kobj.kset = &class->p->glue_dirs;
 
 	retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
 	if (retval < 0) {
@@ -635,7 +672,7 @@
 		if (sysfs_deprecated && dev->class == &block_class) {
 			if (parent && parent->class == &block_class)
 				return &parent->kobj;
-			return &block_class.p->class_subsys.kobj;
+			return &block_class.p->subsys.kobj;
 		}
 #endif
 
@@ -654,13 +691,13 @@
 		mutex_lock(&gdp_mutex);
 
 		/* find our class-directory at the parent and reference it */
-		spin_lock(&dev->class->p->class_dirs.list_lock);
-		list_for_each_entry(k, &dev->class->p->class_dirs.list, entry)
+		spin_lock(&dev->class->p->glue_dirs.list_lock);
+		list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
 			if (k->parent == parent_kobj) {
 				kobj = kobject_get(k);
 				break;
 			}
-		spin_unlock(&dev->class->p->class_dirs.list_lock);
+		spin_unlock(&dev->class->p->glue_dirs.list_lock);
 		if (kobj) {
 			mutex_unlock(&gdp_mutex);
 			return kobj;
@@ -682,7 +719,7 @@
 {
 	/* see if we live in a "glue" directory */
 	if (!glue_dir || !dev->class ||
-	    glue_dir->kset != &dev->class->p->class_dirs)
+	    glue_dir->kset != &dev->class->p->glue_dirs)
 		return;
 
 	kobject_put(glue_dir);
@@ -709,7 +746,7 @@
 		return 0;
 
 	error = sysfs_create_link(&dev->kobj,
-				  &dev->class->p->class_subsys.kobj,
+				  &dev->class->p->subsys.kobj,
 				  "subsystem");
 	if (error)
 		goto out;
@@ -728,7 +765,7 @@
 #endif
 
 	/* link in the class directory pointing to the device */
-	error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
+	error = sysfs_create_link(&dev->class->p->subsys.kobj,
 				  &dev->kobj, dev_name(dev));
 	if (error)
 		goto out_device;
@@ -756,7 +793,7 @@
 	if (sysfs_deprecated && dev->class == &block_class)
 		return;
 #endif
-	sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev));
+	sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
 }
 
 /**
@@ -947,7 +984,7 @@
 		mutex_lock(&dev->class->p->class_mutex);
 		/* tie the class to the device */
 		klist_add_tail(&dev->knode_class,
-			       &dev->class->p->class_devices);
+			       &dev->class->p->klist_devices);
 
 		/* notify any interfaces that the device is here */
 		list_for_each_entry(class_intf,
@@ -1513,6 +1550,8 @@
  * exclusion between two different calls of device_rename
  * on the same device to ensure that new_name is valid and
  * won't conflict with other devices.
+ *
+ * "Never use this function, bad things will happen" - gregkh
  */
 int device_rename(struct device *dev, const char *new_name)
 {
@@ -1535,7 +1574,7 @@
 	}
 
 	if (dev->class) {
-		error = sysfs_rename_link(&dev->class->p->class_subsys.kobj,
+		error = sysfs_rename_link(&dev->class->p->subsys.kobj,
 			&dev->kobj, old_device_name, new_name);
 		if (error)
 			goto out;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ce012a9..36b4305 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -117,12 +117,21 @@
 		       "Node %d WritebackTmp:   %8lu kB\n"
 		       "Node %d Slab:           %8lu kB\n"
 		       "Node %d SReclaimable:   %8lu kB\n"
-		       "Node %d SUnreclaim:     %8lu kB\n",
+		       "Node %d SUnreclaim:     %8lu kB\n"
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		       "Node %d AnonHugePages:  %8lu kB\n"
+#endif
+			,
 		       nid, K(node_page_state(nid, NR_FILE_DIRTY)),
 		       nid, K(node_page_state(nid, NR_WRITEBACK)),
 		       nid, K(node_page_state(nid, NR_FILE_PAGES)),
 		       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
-		       nid, K(node_page_state(nid, NR_ANON_PAGES)),
+		       nid, K(node_page_state(nid, NR_ANON_PAGES)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+			+ node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
+			HPAGE_PMD_NR
+#endif
+		       ),
 		       nid, K(node_page_state(nid, NR_SHMEM)),
 		       nid, node_page_state(nid, NR_KERNEL_STACK) *
 				THREAD_SIZE / 1024,
@@ -133,7 +142,13 @@
 		       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
 				node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
 		       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
-		       nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
+		       nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+			, nid,
+			K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
+			HPAGE_PMD_NR)
+#endif
+		       );
 	n += hugetlb_report_node_meminfo(nid, buf + n);
 	return n;
 }
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 81f2c84..42f97f9 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -39,7 +39,7 @@
  *
  * If PM operations are defined for the @dev's driver and they include
  * ->runtime_suspend(), execute it and return its error code.  Otherwise,
- * return -EINVAL.
+ * return 0.
  */
 int pm_generic_runtime_suspend(struct device *dev)
 {
@@ -58,7 +58,7 @@
  *
  * If PM operations are defined for the @dev's driver and they include
  * ->runtime_resume(), execute it and return its error code.  Otherwise,
- * return -EINVAL.
+ * return 0.
  */
 int pm_generic_runtime_resume(struct device *dev)
 {
@@ -185,7 +185,7 @@
 		return 0;
 
 	ret = callback(dev);
-	if (!ret) {
+	if (!ret && pm_runtime_enabled(dev)) {
 		pm_runtime_disable(dev);
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ead3e79..8340497 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -8,7 +8,7 @@
  *
  *
  * The driver model core calls device_pm_add() when a device is registered.
- * This will intialize the embedded device_pm_info object in the device
+ * This will initialize the embedded device_pm_info object in the device
  * and add it to the list of power-controlled devices. sysfs entries for
  * controlling device power management will also be added.
  *
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/async.h>
+#include <linux/suspend.h>
 
 #include "../base.h"
 #include "power.h"
@@ -41,16 +42,13 @@
  */
 
 LIST_HEAD(dpm_list);
+LIST_HEAD(dpm_prepared_list);
+LIST_HEAD(dpm_suspended_list);
+LIST_HEAD(dpm_noirq_list);
 
 static DEFINE_MUTEX(dpm_list_mtx);
 static pm_message_t pm_transition;
 
-/*
- * Set once the preparation of devices for a PM transition has started, reset
- * before starting to resume devices.  Protected by dpm_list_mtx.
- */
-static bool transition_started;
-
 static int async_error;
 
 /**
@@ -59,7 +57,7 @@
  */
 void device_pm_init(struct device *dev)
 {
-	dev->power.status = DPM_ON;
+	dev->power.in_suspend = false;
 	init_completion(&dev->power.completion);
 	complete_all(&dev->power.completion);
 	dev->power.wakeup = NULL;
@@ -90,22 +88,11 @@
 void device_pm_add(struct device *dev)
 {
 	pr_debug("PM: Adding info for %s:%s\n",
-		 dev->bus ? dev->bus->name : "No Bus",
-		 kobject_name(&dev->kobj));
+		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 	mutex_lock(&dpm_list_mtx);
-	if (dev->parent) {
-		if (dev->parent->power.status >= DPM_SUSPENDING)
-			dev_warn(dev, "parent %s should not be sleeping\n",
-				 dev_name(dev->parent));
-	} else if (transition_started) {
-		/*
-		 * We refuse to register parentless devices while a PM
-		 * transition is in progress in order to avoid leaving them
-		 * unhandled down the road
-		 */
-		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
-	}
-
+	if (dev->parent && dev->parent->power.in_suspend)
+		dev_warn(dev, "parent %s should not be sleeping\n",
+			dev_name(dev->parent));
 	list_add_tail(&dev->power.entry, &dpm_list);
 	mutex_unlock(&dpm_list_mtx);
 }
@@ -117,8 +104,7 @@
 void device_pm_remove(struct device *dev)
 {
 	pr_debug("PM: Removing info for %s:%s\n",
-		 dev->bus ? dev->bus->name : "No Bus",
-		 kobject_name(&dev->kobj));
+		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 	complete_all(&dev->power.completion);
 	mutex_lock(&dpm_list_mtx);
 	list_del_init(&dev->power.entry);
@@ -135,10 +121,8 @@
 void device_pm_move_before(struct device *deva, struct device *devb)
 {
 	pr_debug("PM: Moving %s:%s before %s:%s\n",
-		 deva->bus ? deva->bus->name : "No Bus",
-		 kobject_name(&deva->kobj),
-		 devb->bus ? devb->bus->name : "No Bus",
-		 kobject_name(&devb->kobj));
+		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 	/* Delete deva from dpm_list and reinsert before devb. */
 	list_move_tail(&deva->power.entry, &devb->power.entry);
 }
@@ -151,10 +135,8 @@
 void device_pm_move_after(struct device *deva, struct device *devb)
 {
 	pr_debug("PM: Moving %s:%s after %s:%s\n",
-		 deva->bus ? deva->bus->name : "No Bus",
-		 kobject_name(&deva->kobj),
-		 devb->bus ? devb->bus->name : "No Bus",
-		 kobject_name(&devb->kobj));
+		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 	/* Delete deva from dpm_list and reinsert after devb. */
 	list_move(&deva->power.entry, &devb->power.entry);
 }
@@ -166,8 +148,7 @@
 void device_pm_move_last(struct device *dev)
 {
 	pr_debug("PM: Moving %s:%s to end of list\n",
-		 dev->bus ? dev->bus->name : "No Bus",
-		 kobject_name(&dev->kobj));
+		 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 	list_move_tail(&dev->power.entry, &dpm_list);
 }
 
@@ -303,7 +284,7 @@
 			pm_message_t state)
 {
 	int error = 0;
-	ktime_t calltime, delta, rettime;
+	ktime_t calltime = ktime_set(0, 0), delta, rettime;
 
 	if (initcall_debug) {
 		pr_info("calling  %s+ @ %i, parent: %s\n",
@@ -405,7 +386,7 @@
 			int error)
 {
 	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
-		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
+		dev_name(dev), pm_verb(state.event), info, error);
 }
 
 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
@@ -475,33 +456,24 @@
  */
 void dpm_resume_noirq(pm_message_t state)
 {
-	struct list_head list;
 	ktime_t starttime = ktime_get();
 
-	INIT_LIST_HEAD(&list);
 	mutex_lock(&dpm_list_mtx);
-	transition_started = false;
-	while (!list_empty(&dpm_list)) {
-		struct device *dev = to_device(dpm_list.next);
+	while (!list_empty(&dpm_noirq_list)) {
+		struct device *dev = to_device(dpm_noirq_list.next);
+		int error;
 
 		get_device(dev);
-		if (dev->power.status > DPM_OFF) {
-			int error;
+		list_move_tail(&dev->power.entry, &dpm_suspended_list);
+		mutex_unlock(&dpm_list_mtx);
 
-			dev->power.status = DPM_OFF;
-			mutex_unlock(&dpm_list_mtx);
+		error = device_resume_noirq(dev, state);
+		if (error)
+			pm_dev_err(dev, state, " early", error);
 
-			error = device_resume_noirq(dev, state);
-
-			mutex_lock(&dpm_list_mtx);
-			if (error)
-				pm_dev_err(dev, state, " early", error);
-		}
-		if (!list_empty(&dev->power.entry))
-			list_move_tail(&dev->power.entry, &list);
+		mutex_lock(&dpm_list_mtx);
 		put_device(dev);
 	}
-	list_splice(&list, &dpm_list);
 	mutex_unlock(&dpm_list_mtx);
 	dpm_show_time(starttime, state, "early");
 	resume_device_irqs();
@@ -544,7 +516,7 @@
 	dpm_wait(dev->parent, async);
 	device_lock(dev);
 
-	dev->power.status = DPM_RESUMING;
+	dev->power.in_suspend = false;
 
 	if (dev->bus) {
 		if (dev->bus->pm) {
@@ -610,19 +582,14 @@
  */
 static void dpm_resume(pm_message_t state)
 {
-	struct list_head list;
 	struct device *dev;
 	ktime_t starttime = ktime_get();
 
-	INIT_LIST_HEAD(&list);
 	mutex_lock(&dpm_list_mtx);
 	pm_transition = state;
 	async_error = 0;
 
-	list_for_each_entry(dev, &dpm_list, power.entry) {
-		if (dev->power.status < DPM_OFF)
-			continue;
-
+	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 		INIT_COMPLETION(dev->power.completion);
 		if (is_async(dev)) {
 			get_device(dev);
@@ -630,28 +597,24 @@
 		}
 	}
 
-	while (!list_empty(&dpm_list)) {
-		dev = to_device(dpm_list.next);
+	while (!list_empty(&dpm_suspended_list)) {
+		dev = to_device(dpm_suspended_list.next);
 		get_device(dev);
-		if (dev->power.status >= DPM_OFF && !is_async(dev)) {
+		if (!is_async(dev)) {
 			int error;
 
 			mutex_unlock(&dpm_list_mtx);
 
 			error = device_resume(dev, state, false);
-
-			mutex_lock(&dpm_list_mtx);
 			if (error)
 				pm_dev_err(dev, state, "", error);
-		} else if (dev->power.status == DPM_SUSPENDING) {
-			/* Allow new children of the device to be registered */
-			dev->power.status = DPM_RESUMING;
+
+			mutex_lock(&dpm_list_mtx);
 		}
 		if (!list_empty(&dev->power.entry))
-			list_move_tail(&dev->power.entry, &list);
+			list_move_tail(&dev->power.entry, &dpm_prepared_list);
 		put_device(dev);
 	}
-	list_splice(&list, &dpm_list);
 	mutex_unlock(&dpm_list_mtx);
 	async_synchronize_full();
 	dpm_show_time(starttime, state, NULL);
@@ -697,22 +660,18 @@
 
 	INIT_LIST_HEAD(&list);
 	mutex_lock(&dpm_list_mtx);
-	transition_started = false;
-	while (!list_empty(&dpm_list)) {
-		struct device *dev = to_device(dpm_list.prev);
+	while (!list_empty(&dpm_prepared_list)) {
+		struct device *dev = to_device(dpm_prepared_list.prev);
 
 		get_device(dev);
-		if (dev->power.status > DPM_ON) {
-			dev->power.status = DPM_ON;
-			mutex_unlock(&dpm_list_mtx);
+		dev->power.in_suspend = false;
+		list_move(&dev->power.entry, &list);
+		mutex_unlock(&dpm_list_mtx);
 
-			device_complete(dev, state);
-			pm_runtime_put_sync(dev);
+		device_complete(dev, state);
+		pm_runtime_put_sync(dev);
 
-			mutex_lock(&dpm_list_mtx);
-		}
-		if (!list_empty(&dev->power.entry))
-			list_move(&dev->power.entry, &list);
+		mutex_lock(&dpm_list_mtx);
 		put_device(dev);
 	}
 	list_splice(&list, &dpm_list);
@@ -802,15 +761,13 @@
  */
 int dpm_suspend_noirq(pm_message_t state)
 {
-	struct list_head list;
 	ktime_t starttime = ktime_get();
 	int error = 0;
 
-	INIT_LIST_HEAD(&list);
 	suspend_device_irqs();
 	mutex_lock(&dpm_list_mtx);
-	while (!list_empty(&dpm_list)) {
-		struct device *dev = to_device(dpm_list.prev);
+	while (!list_empty(&dpm_suspended_list)) {
+		struct device *dev = to_device(dpm_suspended_list.prev);
 
 		get_device(dev);
 		mutex_unlock(&dpm_list_mtx);
@@ -823,12 +780,10 @@
 			put_device(dev);
 			break;
 		}
-		dev->power.status = DPM_OFF_IRQ;
 		if (!list_empty(&dev->power.entry))
-			list_move(&dev->power.entry, &list);
+			list_move(&dev->power.entry, &dpm_noirq_list);
 		put_device(dev);
 	}
-	list_splice_tail(&list, &dpm_list);
 	mutex_unlock(&dpm_list_mtx);
 	if (error)
 		dpm_resume_noirq(resume_event(state));
@@ -876,6 +831,11 @@
 	if (async_error)
 		goto End;
 
+	if (pm_wakeup_pending()) {
+		async_error = -EBUSY;
+		goto End;
+	}
+
 	if (dev->class) {
 		if (dev->class->pm) {
 			pm_dev_dbg(dev, state, "class ");
@@ -907,9 +867,6 @@
 		}
 	}
 
-	if (!error)
-		dev->power.status = DPM_OFF;
-
  End:
 	device_unlock(dev);
 	complete_all(&dev->power.completion);
@@ -951,16 +908,14 @@
  */
 static int dpm_suspend(pm_message_t state)
 {
-	struct list_head list;
 	ktime_t starttime = ktime_get();
 	int error = 0;
 
-	INIT_LIST_HEAD(&list);
 	mutex_lock(&dpm_list_mtx);
 	pm_transition = state;
 	async_error = 0;
-	while (!list_empty(&dpm_list)) {
-		struct device *dev = to_device(dpm_list.prev);
+	while (!list_empty(&dpm_prepared_list)) {
+		struct device *dev = to_device(dpm_prepared_list.prev);
 
 		get_device(dev);
 		mutex_unlock(&dpm_list_mtx);
@@ -974,12 +929,11 @@
 			break;
 		}
 		if (!list_empty(&dev->power.entry))
-			list_move(&dev->power.entry, &list);
+			list_move(&dev->power.entry, &dpm_suspended_list);
 		put_device(dev);
 		if (async_error)
 			break;
 	}
-	list_splice(&list, dpm_list.prev);
 	mutex_unlock(&dpm_list_mtx);
 	async_synchronize_full();
 	if (!error)
@@ -1038,22 +992,20 @@
  */
 static int dpm_prepare(pm_message_t state)
 {
-	struct list_head list;
 	int error = 0;
 
-	INIT_LIST_HEAD(&list);
 	mutex_lock(&dpm_list_mtx);
-	transition_started = true;
 	while (!list_empty(&dpm_list)) {
 		struct device *dev = to_device(dpm_list.next);
 
 		get_device(dev);
-		dev->power.status = DPM_PREPARING;
 		mutex_unlock(&dpm_list_mtx);
 
 		pm_runtime_get_noresume(dev);
-		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
-			/* Wake-up requested during system sleep transition. */
+		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+			pm_wakeup_event(dev, 0);
+
+		if (pm_wakeup_pending()) {
 			pm_runtime_put_sync(dev);
 			error = -EBUSY;
 		} else {
@@ -1062,24 +1014,22 @@
 
 		mutex_lock(&dpm_list_mtx);
 		if (error) {
-			dev->power.status = DPM_ON;
 			if (error == -EAGAIN) {
 				put_device(dev);
 				error = 0;
 				continue;
 			}
-			printk(KERN_ERR "PM: Failed to prepare device %s "
-				"for power transition: error %d\n",
-				kobject_name(&dev->kobj), error);
+			printk(KERN_INFO "PM: Device %s not prepared "
+				"for power transition: code %d\n",
+				dev_name(dev), error);
 			put_device(dev);
 			break;
 		}
-		dev->power.status = DPM_SUSPENDING;
+		dev->power.in_suspend = true;
 		if (!list_empty(&dev->power.entry))
-			list_move_tail(&dev->power.entry, &list);
+			list_move_tail(&dev->power.entry, &dpm_prepared_list);
 		put_device(dev);
 	}
-	list_splice(&list, &dpm_list);
 	mutex_unlock(&dpm_list_mtx);
 	return error;
 }
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 02c652b..656493a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,13 +250,16 @@
 	if (!cb)
 		return -ENOSYS;
 
-	spin_unlock_irq(&dev->power.lock);
+	if (dev->power.irq_safe) {
+		retval = cb(dev);
+	} else {
+		spin_unlock_irq(&dev->power.lock);
 
-	retval = cb(dev);
+		retval = cb(dev);
 
-	spin_lock_irq(&dev->power.lock);
+		spin_lock_irq(&dev->power.lock);
+	}
 	dev->power.runtime_error = retval;
-
 	return retval;
 }
 
@@ -404,7 +407,7 @@
 		goto out;
 	}
 
-	if (parent && !parent->power.ignore_children) {
+	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 		spin_unlock_irq(&dev->power.lock);
 
 		pm_request_idle(parent);
@@ -527,10 +530,13 @@
 
 	if (!parent && dev->parent) {
 		/*
-		 * Increment the parent's resume counter and resume it if
-		 * necessary.
+		 * Increment the parent's usage counter and resume it if
+		 * necessary.  Not needed if dev is irq-safe; then the
+		 * parent is permanently resumed.
 		 */
 		parent = dev->parent;
+		if (dev->power.irq_safe)
+			goto skip_parent;
 		spin_unlock(&dev->power.lock);
 
 		pm_runtime_get_noresume(parent);
@@ -553,6 +559,7 @@
 			goto out;
 		goto repeat;
 	}
+ skip_parent:
 
 	if (dev->power.no_callbacks)
 		goto no_callback;	/* Assume success. */
@@ -584,7 +591,7 @@
 		rpm_idle(dev, RPM_ASYNC);
 
  out:
-	if (parent) {
+	if (parent && !dev->power.irq_safe) {
 		spin_unlock_irq(&dev->power.lock);
 
 		pm_runtime_put(parent);
@@ -1065,7 +1072,6 @@
  * Set the power.no_callbacks flag, which tells the PM core that this
  * device is power-managed through its parent and has no run-time PM
  * callbacks of its own.  The run-time sysfs attributes will be removed.
- *
  */
 void pm_runtime_no_callbacks(struct device *dev)
 {
@@ -1078,6 +1084,27 @@
 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
 
 /**
+ * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
+ * @dev: Device to handle
+ *
+ * Set the power.irq_safe flag, which tells the PM core that the
+ * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
+ * always be invoked with the spinlock held and interrupts disabled.  It also
+ * causes the parent's usage counter to be permanently incremented, preventing
+ * the parent from runtime suspending -- otherwise an irq-safe child might have
+ * to wait for a non-irq-safe parent.
+ */
+void pm_runtime_irq_safe(struct device *dev)
+{
+	if (dev->parent)
+		pm_runtime_get_sync(dev->parent);
+	spin_lock_irq(&dev->power.lock);
+	dev->power.irq_safe = 1;
+	spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
+
+/**
  * update_autosuspend - Handle a change to a device's autosuspend settings.
  * @dev: Device to handle.
  * @old_delay: The former autosuspend_delay value.
@@ -1199,4 +1226,6 @@
 	/* Change the status back to 'suspended' to match the initial status. */
 	if (dev->power.runtime_status == RPM_ACTIVE)
 		pm_runtime_set_suspended(dev);
+	if (dev->power.irq_safe && dev->parent)
+		pm_runtime_put_sync(dev->parent);
 }
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 71c5528..8ec406d 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -542,26 +542,26 @@
 }
 
 /**
- * pm_check_wakeup_events - Check for new wakeup events.
+ * pm_wakeup_pending - Check if power transition in progress should be aborted.
  *
  * Compare the current number of registered wakeup events with its preserved
- * value from the past to check if new wakeup events have been registered since
- * the old value was stored.  Check if the current number of wakeup events being
- * processed is zero.
+ * value from the past and return true if new wakeup events have been registered
+ * since the old value was stored.  Also return true if the current number of
+ * wakeup events being processed is different from zero.
  */
-bool pm_check_wakeup_events(void)
+bool pm_wakeup_pending(void)
 {
 	unsigned long flags;
-	bool ret = true;
+	bool ret = false;
 
 	spin_lock_irqsave(&events_lock, flags);
 	if (events_check_enabled) {
-		ret = ((unsigned int)atomic_read(&event_count) == saved_count)
-			&& !atomic_read(&events_in_progress);
-		events_check_enabled = ret;
+		ret = ((unsigned int)atomic_read(&event_count) != saved_count)
+			|| atomic_read(&events_in_progress);
+		events_check_enabled = !ret;
 	}
 	spin_unlock_irqrestore(&events_lock, flags);
-	if (!ret)
+	if (ret)
 		pm_wakeup_update_hit_counts();
 	return ret;
 }
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4b9359a..83c32cb 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -464,6 +464,7 @@
 	tristate "Xen virtual block device support"
 	depends on XEN
 	default y
+	select XEN_XENBUS_FRONTEND
 	help
 	  This driver implements the front-end of the Xen virtual
 	  block device driver.  It communicates with a back-end driver
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8e0f925..516d5bb 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -238,9 +238,9 @@
 /*
  * Enqueuing and dequeuing functions for cmdlists.
  */
-static inline void addQ(struct hlist_head *list, CommandList_struct *c)
+static inline void addQ(struct list_head *list, CommandList_struct *c)
 {
-	hlist_add_head(&c->list, list);
+	list_add_tail(&c->list, list);
 }
 
 static inline void removeQ(CommandList_struct *c)
@@ -253,12 +253,12 @@
 	 * them off as 'stale' to prevent the driver from
 	 * falling over.
 	 */
-	if (WARN_ON(hlist_unhashed(&c->list))) {
+	if (WARN_ON(list_empty(&c->list))) {
 		c->cmd_type = CMD_MSG_STALE;
 		return;
 	}
 
-	hlist_del_init(&c->list);
+	list_del_init(&c->list);
 }
 
 static void enqueue_cmd_and_start_io(ctlr_info_t *h,
@@ -905,7 +905,7 @@
 
 	c->cmdindex = i;
 
-	INIT_HLIST_NODE(&c->list);
+	INIT_LIST_HEAD(&c->list);
 	c->busaddr = (__u32) cmd_dma_handle;
 	temp64.val = (__u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -944,7 +944,7 @@
 	}
 	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
 
-	INIT_HLIST_NODE(&c->list);
+	INIT_LIST_HEAD(&c->list);
 	c->busaddr = (__u32) cmd_dma_handle;
 	temp64.val = (__u64) err_dma_handle;
 	c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2888,8 +2888,8 @@
 {
 	CommandList_struct *c;
 
-	while (!hlist_empty(&h->reqQ)) {
-		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
+	while (!list_empty(&h->reqQ)) {
+		c = list_entry(h->reqQ.next, CommandList_struct, list);
 		/* can't do anything if fifo is full */
 		if ((h->access.fifo_full(h))) {
 			dev_warn(&h->pdev->dev, "fifo full\n");
@@ -3402,11 +3402,10 @@
 {
 	u32 tag;
 	CommandList_struct *c = NULL;
-	struct hlist_node *tmp;
 	__u32 busaddr_masked, tag_masked;
 
 	tag = cciss_tag_discard_error_bits(raw_tag);
-	hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+	list_for_each_entry(c, &h->cmpQ, list) {
 		busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
 		tag_masked = cciss_tag_discard_error_bits(tag);
 		if (busaddr_masked == tag_masked) {
@@ -4572,8 +4571,8 @@
 	h = hba[i];
 	h->pdev = pdev;
 	h->busy_initializing = 1;
-	INIT_HLIST_HEAD(&h->cmpQ);
-	INIT_HLIST_HEAD(&h->reqQ);
+	INIT_LIST_HEAD(&h->cmpQ);
+	INIT_LIST_HEAD(&h->reqQ);
 	mutex_init(&h->busy_shutting_down);
 
 	if (cciss_pci_init(h) != 0)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 4b8933d..579f749 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -103,8 +103,8 @@
 	struct access_method access;
 
 	/* queue and queue Info */ 
-	struct hlist_head reqQ;
-	struct hlist_head cmpQ;
+	struct list_head reqQ;
+	struct list_head cmpQ;
 	unsigned int Qdepth;
 	unsigned int maxQsinceinit;
 	unsigned int maxSG;
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index eb060f1..35463d2 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -195,7 +195,7 @@
   int			   ctlr;
   int			   cmd_type; 
   long			   cmdindex;
-  struct hlist_node list;
+  struct list_head list;
   struct request *	   rq;
   struct completion *waiting;
   int	 retry_count;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 1ea1a34..3803a03 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -911,8 +911,6 @@
 struct drbd_backing_dev {
 	struct block_device *backing_bdev;
 	struct block_device *md_bdev;
-	struct file *lo_file;
-	struct file *md_file;
 	struct drbd_md md;
 	struct disk_conf dc; /* The user provided config... */
 	sector_t known_size; /* last known size of that backing device */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 6be5401..29cd0dc 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3372,11 +3372,8 @@
 	if (ldev == NULL)
 		return;
 
-	bd_release(ldev->backing_bdev);
-	bd_release(ldev->md_bdev);
-
-	fput(ldev->lo_file);
-	fput(ldev->md_file);
+	blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+	blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 
 	kfree(ldev);
 }
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 29e5c70..8cbfaa6 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -855,7 +855,7 @@
 	sector_t max_possible_sectors;
 	sector_t min_md_device_sectors;
 	struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
-	struct inode *inode, *inode2;
+	struct block_device *bdev;
 	struct lru_cache *resync_lru = NULL;
 	union drbd_state ns, os;
 	unsigned int max_seg_s;
@@ -907,46 +907,40 @@
 		}
 	}
 
-	nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
-	if (IS_ERR(nbc->lo_file)) {
+	bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+				  FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
+	if (IS_ERR(bdev)) {
 		dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
-		    PTR_ERR(nbc->lo_file));
-		nbc->lo_file = NULL;
+			PTR_ERR(bdev));
 		retcode = ERR_OPEN_DISK;
 		goto fail;
 	}
+	nbc->backing_bdev = bdev;
 
-	inode = nbc->lo_file->f_dentry->d_inode;
-
-	if (!S_ISBLK(inode->i_mode)) {
-		retcode = ERR_DISK_NOT_BDEV;
-		goto fail;
-	}
-
-	nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
-	if (IS_ERR(nbc->md_file)) {
+	/*
+	 * meta_dev_idx >= 0: external fixed size, possibly multiple
+	 * drbd sharing one meta device.  TODO in that case, paranoia
+	 * check that [md_bdev, meta_dev_idx] is not yet used by some
+	 * other drbd minor!  (if you use drbd.conf + drbdadm, that
+	 * should check it for you already; but if you don't, or
+	 * someone fooled it, we need to double check here)
+	 */
+	bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+				  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
+				  (nbc->dc.meta_dev_idx < 0) ?
+				  (void *)mdev : (void *)drbd_m_holder);
+	if (IS_ERR(bdev)) {
 		dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
-		    PTR_ERR(nbc->md_file));
-		nbc->md_file = NULL;
+			PTR_ERR(bdev));
 		retcode = ERR_OPEN_MD_DISK;
 		goto fail;
 	}
+	nbc->md_bdev = bdev;
 
-	inode2 = nbc->md_file->f_dentry->d_inode;
-
-	if (!S_ISBLK(inode2->i_mode)) {
-		retcode = ERR_MD_NOT_BDEV;
-		goto fail;
-	}
-
-	nbc->backing_bdev = inode->i_bdev;
-	if (bd_claim(nbc->backing_bdev, mdev)) {
-		printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
-		       nbc->backing_bdev, mdev,
-		       nbc->backing_bdev->bd_holder,
-		       nbc->backing_bdev->bd_contains->bd_holder,
-		       nbc->backing_bdev->bd_holders);
-		retcode = ERR_BDCLAIM_DISK;
+	if ((nbc->backing_bdev == nbc->md_bdev) !=
+	    (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+	     nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+		retcode = ERR_MD_IDX_INVALID;
 		goto fail;
 	}
 
@@ -955,28 +949,7 @@
 			offsetof(struct bm_extent, lce));
 	if (!resync_lru) {
 		retcode = ERR_NOMEM;
-		goto release_bdev_fail;
-	}
-
-	/* meta_dev_idx >= 0: external fixed size,
-	 * possibly multiple drbd sharing one meta device.
-	 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
-	 * not yet used by some other drbd minor!
-	 * (if you use drbd.conf + drbdadm,
-	 * that should check it for you already; but if you don't, or someone
-	 * fooled it, we need to double check here) */
-	nbc->md_bdev = inode2->i_bdev;
-	if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
-				: (void *) drbd_m_holder)) {
-		retcode = ERR_BDCLAIM_MD_DISK;
-		goto release_bdev_fail;
-	}
-
-	if ((nbc->backing_bdev == nbc->md_bdev) !=
-	    (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
-	     nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
-		retcode = ERR_MD_IDX_INVALID;
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
@@ -987,7 +960,7 @@
 			(unsigned long long) drbd_get_max_capacity(nbc),
 			(unsigned long long) nbc->dc.disk_size);
 		retcode = ERR_DISK_TO_SMALL;
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	if (nbc->dc.meta_dev_idx < 0) {
@@ -1004,7 +977,7 @@
 		dev_warn(DEV, "refusing attach: md-device too small, "
 		     "at least %llu sectors needed for this meta-disk type\n",
 		     (unsigned long long) min_md_device_sectors);
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	/* Make sure the new disk is big enough
@@ -1012,7 +985,7 @@
 	if (drbd_get_max_capacity(nbc) <
 	    drbd_get_capacity(mdev->this_bdev)) {
 		retcode = ERR_DISK_TO_SMALL;
-		goto release_bdev2_fail;
+		goto fail;
 	}
 
 	nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
@@ -1035,7 +1008,7 @@
 	retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
 	drbd_resume_io(mdev);
 	if (retcode < SS_SUCCESS)
-		goto release_bdev2_fail;
+		goto fail;
 
 	if (!get_ldev_if_state(mdev, D_ATTACHING))
 		goto force_diskless;
@@ -1269,18 +1242,14 @@
  force_diskless:
 	drbd_force_state(mdev, NS(disk, D_FAILED));
 	drbd_md_sync(mdev);
- release_bdev2_fail:
-	if (nbc)
-		bd_release(nbc->md_bdev);
- release_bdev_fail:
-	if (nbc)
-		bd_release(nbc->backing_bdev);
  fail:
 	if (nbc) {
-		if (nbc->lo_file)
-			fput(nbc->lo_file);
-		if (nbc->md_file)
-			fput(nbc->md_file);
+		if (nbc->backing_bdev)
+			blkdev_put(nbc->backing_bdev,
+				   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+		if (nbc->md_bdev)
+			blkdev_put(nbc->md_bdev,
+				   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 		kfree(nbc);
 	}
 	lc_destroy(resync_lru);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3951020..b9ba04f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -597,6 +597,11 @@
 static unsigned char in_sector_offset;	/* offset within physical sector,
 					 * expressed in units of 512 bytes */
 
+static inline bool drive_no_geom(int drive)
+{
+	return !current_type[drive] && !ITYPE(UDRS->fd_device);
+}
+
 #ifndef fd_eject
 static inline int fd_eject(int drive)
 {
@@ -3782,7 +3787,7 @@
 	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 	    test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
 	    test_bit(drive, &fake_change) ||
-	    (!ITYPE(UDRS->fd_device) && !current_type[drive]))
+	    drive_no_geom(drive))
 		return 1;
 	return 0;
 }
@@ -3848,13 +3853,13 @@
 static int floppy_revalidate(struct gendisk *disk)
 {
 	int drive = (long)disk->private_data;
-#define NO_GEOM (!current_type[drive] && !ITYPE(UDRS->fd_device))
 	int cf;
 	int res = 0;
 
 	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 	    test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
-	    test_bit(drive, &fake_change) || NO_GEOM) {
+	    test_bit(drive, &fake_change) ||
+	    drive_no_geom(drive)) {
 		if (WARN(atomic_read(&usage_count) == 0,
 			 "VFS: revalidate called on non-open device.\n"))
 			return -EFAULT;
@@ -3862,7 +3867,7 @@
 		lock_fdc(drive, false);
 		cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 		      test_bit(FD_VERIFY_BIT, &UDRS->flags));
-		if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)) {
+		if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
 			process_fd_request();	/*already done by another thread */
 			return 0;
 		}
@@ -3874,7 +3879,7 @@
 		clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
 		if (cf)
 			UDRS->generation++;
-		if (NO_GEOM) {
+		if (drive_no_geom(drive)) {
 			/* auto-sensing */
 			res = __floppy_read_block_0(opened_bdev[drive]);
 		} else {
@@ -4352,7 +4357,7 @@
 out_unreg_platform_dev:
 	platform_device_unregister(&floppy_device[drive]);
 out_flush_work:
-	flush_scheduled_work();
+	flush_work_sync(&floppy_work);
 	if (atomic_read(&usage_count))
 		floppy_release_irq_and_dma();
 out_unreg_region:
@@ -4422,7 +4427,7 @@
 	 * We might have scheduled a free_irq(), wait it to
 	 * drain first:
 	 */
-	flush_scheduled_work();
+	flush_work_sync(&floppy_work);
 
 	if (fd_request_irq()) {
 		DPRINT("Unable to grab IRQ%d for the floppy driver\n",
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7ea0bea..44e18c0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -395,11 +395,7 @@
 	struct loop_device *lo = p->lo;
 	struct page *page = buf->page;
 	sector_t IV;
-	int size, ret;
-
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
+	int size;
 
 	IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) +
 							(buf->offset >> 9);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 19b3568..77d70ee 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2296,15 +2296,12 @@
 	 * so bdget() can't fail.
 	 */
 	bdget(pd->bdev->bd_dev);
-	if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
+	if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
 		goto out;
 
-	if ((ret = bd_claim(pd->bdev, pd)))
-		goto out_putdev;
-
 	if ((ret = pkt_get_last_written(pd, &lba))) {
 		printk(DRIVER_NAME": pkt_get_last_written failed\n");
-		goto out_unclaim;
+		goto out_putdev;
 	}
 
 	set_capacity(pd->disk, lba << 2);
@@ -2314,7 +2311,7 @@
 	q = bdev_get_queue(pd->bdev);
 	if (write) {
 		if ((ret = pkt_open_write(pd)))
-			goto out_unclaim;
+			goto out_putdev;
 		/*
 		 * Some CDRW drives can not handle writes larger than one packet,
 		 * even if the size is a multiple of the packet size.
@@ -2329,23 +2326,21 @@
 	}
 
 	if ((ret = pkt_set_segment_merging(pd, q)))
-		goto out_unclaim;
+		goto out_putdev;
 
 	if (write) {
 		if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
 			printk(DRIVER_NAME": not enough memory for buffers\n");
 			ret = -ENOMEM;
-			goto out_unclaim;
+			goto out_putdev;
 		}
 		printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
 	}
 
 	return 0;
 
-out_unclaim:
-	bd_release(pd->bdev);
 out_putdev:
-	blkdev_put(pd->bdev, FMODE_READ);
+	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
 out:
 	return ret;
 }
@@ -2362,8 +2357,7 @@
 	pkt_lock_door(pd, 0);
 
 	pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
-	bd_release(pd->bdev);
-	blkdev_put(pd->bdev, FMODE_READ);
+	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
 
 	pkt_shrink_pktlist(pd);
 }
@@ -2733,7 +2727,7 @@
 	bdev = bdget(dev);
 	if (!bdev)
 		return -ENOMEM;
-	ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
+	ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
 	if (ret)
 		return ret;
 
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 008d4a0..e1e38b1 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1790,18 +1790,29 @@
 
 	rc = rbd_bus_add_dev(rbd_dev);
 	if (rc)
-		goto err_out_disk;
+		goto err_out_blkdev;
+
 	/* set up and announce blkdev mapping */
 	rc = rbd_init_disk(rbd_dev);
 	if (rc)
-		goto err_out_blkdev;
+		goto err_out_bus;
 
 	return count;
 
+err_out_bus:
+	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+	list_del_init(&rbd_dev->node);
+	mutex_unlock(&ctl_mutex);
+
+	/* this will also clean up rest of rbd_dev stuff */
+
+	rbd_bus_del_dev(rbd_dev);
+	kfree(options);
+	kfree(mon_dev_name);
+	return rc;
+
 err_out_blkdev:
 	unregister_blkdev(rbd_dev->major, rbd_dev->name);
-err_out_disk:
-	rbd_free_disk(rbd_dev);
 err_out_client:
 	rbd_put_client(rbd_dev);
 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 657873e..d7aa39e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -547,7 +547,7 @@
 	spin_unlock_irqrestore(&blkif_io_lock, flags);
 
 	/* Flush gnttab callback work. Must be done with no locks held. */
-	flush_scheduled_work();
+	flush_work_sync(&info->work);
 
 	del_gendisk(info->gd);
 
@@ -596,7 +596,7 @@
 	spin_unlock_irq(&blkif_io_lock);
 
 	/* Flush gnttab callback work. Must be done with no locks held. */
-	flush_scheduled_work();
+	flush_work_sync(&info->work);
 
 	/* Free resources associated with old device channel. */
 	if (info->ring_ref != GRANT_INVALID_REF) {
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index af13c62..14033a3 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1348,7 +1348,10 @@
 	if (!CDROM_CAN(CDC_SELECT_DISC))
 		return -EDRIVE_CANT_DO_THIS;
 
-	(void) cdi->ops->media_changed(cdi, slot);
+	if (cdi->ops->check_events)
+		cdi->ops->check_events(cdi, 0, slot);
+	else
+		cdi->ops->media_changed(cdi, slot);
 
 	if (slot == CDSL_NONE) {
 		/* set media changed bits, on both queues */
@@ -1392,6 +1395,42 @@
 	return slot;
 }
 
+/*
+ * As cdrom implements an extra ioctl consumer for media changed
+ * event, it needs to buffer ->check_events() output, such that event
+ * is not lost for both the usual VFS and ioctl paths.
+ * cdi->{vfs|ioctl}_events are used to buffer pending events for each
+ * path.
+ *
+ * XXX: Locking is non-existent.  cdi->ops->check_events() can be
+ * called in parallel and buffering fields are accessed without any
+ * exclusion.  The original media_changed code had the same problem.
+ * It might be better to simply deprecate CDROM_MEDIA_CHANGED ioctl
+ * and remove this cruft altogether.  It doesn't have much usefulness
+ * at this point.
+ */
+static void cdrom_update_events(struct cdrom_device_info *cdi,
+				unsigned int clearing)
+{
+	unsigned int events;
+
+	events = cdi->ops->check_events(cdi, clearing, CDSL_CURRENT);
+	cdi->vfs_events |= events;
+	cdi->ioctl_events |= events;
+}
+
+unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
+				unsigned int clearing)
+{
+	unsigned int events;
+
+	cdrom_update_events(cdi, clearing);
+	events = cdi->vfs_events;
+	cdi->vfs_events = 0;
+	return events;
+}
+EXPORT_SYMBOL(cdrom_check_events);
+
 /* We want to make media_changed accessible to the user through an
  * ioctl. The main problem now is that we must double-buffer the
  * low-level implementation, to assure that the VFS and the user both
@@ -1403,15 +1442,26 @@
 {
 	unsigned int mask = (1 << (queue & 1));
 	int ret = !!(cdi->mc_flags & mask);
+	bool changed;
 
 	if (!CDROM_CAN(CDC_MEDIA_CHANGED))
-	    return ret;
+		return ret;
+
 	/* changed since last call? */
-	if (cdi->ops->media_changed(cdi, CDSL_CURRENT)) {
+	if (cdi->ops->check_events) {
+		BUG_ON(!queue);	/* shouldn't be called from VFS path */
+		cdrom_update_events(cdi, DISK_EVENT_MEDIA_CHANGE);
+		changed = cdi->ioctl_events & DISK_EVENT_MEDIA_CHANGE;
+		cdi->ioctl_events = 0;
+	} else
+		changed = cdi->ops->media_changed(cdi, CDSL_CURRENT);
+
+	if (changed) {
 		cdi->mc_flags = 0x3;    /* set bit on both queues */
 		ret |= 1;
 		cdi->media_written = 0;
 	}
+
 	cdi->mc_flags &= ~mask;         /* clear bit */
 	return ret;
 }
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index de65915..64a2146 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -837,7 +837,7 @@
 
 static int __devexit remove_gdrom(struct platform_device *devptr)
 {
-	flush_scheduled_work();
+	flush_work_sync(&work);
 	blk_cleanup_queue(gd.gdrom_rq);
 	free_irq(HW_EVENT_GDROM_CMD, &gd);
 	free_irq(HW_EVENT_GDROM_DMA, &gd);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 43d3395..0f175a8 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -682,6 +682,15 @@
        select HVC_DRIVER
        default n
 
+config HVC_DCC
+       bool "ARM JTAG DCC console"
+       depends on ARM
+       select HVC_DRIVER
+       help
+         This console uses the JTAG DCC on ARM to create a console under the HVC
+	 driver. This console is used through a JTAG only on ARM. If you don't have
+	 a JTAG then you probably don't want this option.
+
 config VIRTIO_CONSOLE
 	tristate "Virtio console"
 	depends on VIRTIO
@@ -1038,15 +1047,6 @@
 	  pc8736x_gpio drivers.  If those drivers are built as
 	  modules, this one will be too, named nsc_gpio
 
-config CS5535_GPIO
-	tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
-	depends on X86_32
-	help
-	  Give userspace access to the GPIO pins on the AMD CS5535 and
-	  CS5536 Geode companion devices.
-
-	  If compiled as a module, it will be called cs5535_gpio.
-
 config RAW_DRIVER
 	tristate "RAW driver (/dev/raw/rawN)"
 	depends on BLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index ba53ec9..1e9dffb 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_HVC_ISERIES)	+= hvc_iseries.o
 obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
 obj-$(CONFIG_HVC_TILE)		+= hvc_tile.o
+obj-$(CONFIG_HVC_DCC)		+= hvc_dcc.o
 obj-$(CONFIG_HVC_BEAT)		+= hvc_beat.o
 obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
 obj-$(CONFIG_HVC_IRQ)		+= hvc_irq.o
@@ -81,7 +82,6 @@
 obj-$(CONFIG_SCx200_GPIO)	+= scx200_gpio.o
 obj-$(CONFIG_PC8736x_GPIO)	+= pc8736x_gpio.o
 obj-$(CONFIG_NSC_GPIO)		+= nsc_gpio.o
-obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio.o
 obj-$(CONFIG_GPIO_TB0219)	+= tb0219.o
 obj-$(CONFIG_TELCLOCK)		+= tlclk.o
 
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5259065..3e67ddd 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -120,7 +120,6 @@
 	void (*agp_destroy_page)(struct page *, int flags);
 	void (*agp_destroy_pages)(struct agp_memory *);
 	int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
-	void (*chipset_flush)(struct agp_bridge_data *);
 };
 
 struct agp_bridge_data {
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index 9d2c97a..a48e05b 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -276,7 +276,6 @@
 		break;
 
 	case AGPIOC_CHIPSET_FLUSH32:
-		ret_val = agpioc_chipset_flush_wrap(curr_priv);
 		break;
 	}
 
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 0c9678a..f30e0fd 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -102,6 +102,5 @@
 struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
 struct agp_memory *agp_find_mem_by_key(int key);
 struct agp_client *agp_find_client_by_pid(pid_t id);
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
 
 #endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 3cb4539..2e04433 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -957,13 +957,6 @@
 	return agp_unbind_memory(memory);
 }
 
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
-{
-	DBG("");
-	agp_flush_chipset(agp_bridge);
-	return 0;
-}
-
 static long agp_ioctl(struct file *file,
 		     unsigned int cmd, unsigned long arg)
 {
@@ -1039,7 +1032,6 @@
 		break;
 	       
 	case AGPIOC_CHIPSET_FLUSH:
-		ret_val = agpioc_chipset_flush_wrap(curr_priv);
 		break;
 	}
 
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 4956f1c..012cba0 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -81,13 +81,6 @@
 	return -1;
 }
 
-void agp_flush_chipset(struct agp_bridge_data *bridge)
-{
-	if (bridge->driver->chipset_flush)
-		bridge->driver->chipset_flush(bridge);
-}
-EXPORT_SYMBOL(agp_flush_chipset);
-
 /*
  * Use kmalloc if possible for the page list. Otherwise fall back to
  * vmalloc. This speeds things up and also saves memory for small AGP
@@ -487,26 +480,6 @@
 }
 EXPORT_SYMBOL(agp_unbind_memory);
 
-/**
- *	agp_rebind_emmory  -  Rewrite the entire GATT, useful on resume
- */
-int agp_rebind_memory(void)
-{
-	struct agp_memory *curr;
-	int ret_val = 0;
-
-	spin_lock(&agp_bridge->mapped_lock);
-	list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
-		ret_val = curr->bridge->driver->insert_memory(curr,
-							      curr->pg_start,
-							      curr->type);
-		if (ret_val != 0)
-			break;
-	}
-	spin_unlock(&agp_bridge->mapped_lock);
-	return ret_val;
-}
-EXPORT_SYMBOL(agp_rebind_memory);
 
 /* End - Routines for handling swapping of agp_memory into the GATT */
 
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index e72f49d..857df10 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -717,8 +717,8 @@
 	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
 	{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
 	{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
-	{ PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
-	{ PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
 	{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
 	{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
 	{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
@@ -828,14 +828,9 @@
 static int agp_intel_resume(struct pci_dev *pdev)
 {
 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-	int ret_val;
 
 	bridge->driver->configure();
 
-	ret_val = agp_rebind_memory();
-	if (ret_val != 0)
-		return ret_val;
-
 	return 0;
 }
 #endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 90539df..c195bfe 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -75,6 +75,8 @@
 #define I810_GMS_DISABLE	0x00000000
 #define I810_PGETBL_CTL		0x2020
 #define I810_PGETBL_ENABLED	0x00000001
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2	0x20c4
 #define I965_PGETBL_SIZE_MASK	0x0000000e
 #define I965_PGETBL_SIZE_512KB	(0 << 1)
 #define I965_PGETBL_SIZE_256KB	(1 << 1)
@@ -82,9 +84,17 @@
 #define I965_PGETBL_SIZE_1MB	(3 << 1)
 #define I965_PGETBL_SIZE_2MB	(4 << 1)
 #define I965_PGETBL_SIZE_1_5MB	(5 << 1)
-#define G33_PGETBL_SIZE_MASK    (3 << 8)
-#define G33_PGETBL_SIZE_1M      (1 << 8)
-#define G33_PGETBL_SIZE_2M      (2 << 8)
+#define G33_GMCH_SIZE_MASK	(3 << 8)
+#define G33_GMCH_SIZE_1M	(1 << 8)
+#define G33_GMCH_SIZE_2M	(2 << 8)
+#define G4x_GMCH_SIZE_MASK	(0xf << 8)
+#define G4x_GMCH_SIZE_1M	(0x1 << 8)
+#define G4x_GMCH_SIZE_2M	(0x3 << 8)
+#define G4x_GMCH_SIZE_VT_1M	(0x9 << 8)
+#define G4x_GMCH_SIZE_VT_1_5M	(0xa << 8)
+#define G4x_GMCH_SIZE_VT_2M	(0xc << 8)
+
+#define GFX_FLSH_CNTL		0x2170 /* 915+ */
 
 #define I810_DRAM_CTL		0x3000
 #define I810_DRAM_ROW_0		0x00000001
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 29ac6d4..826ab09 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -24,7 +24,6 @@
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
-#include <linux/intel-gtt.h>
 #include <drm/intel-gtt.h>
 
 /*
@@ -39,40 +38,12 @@
 #define USE_PCI_DMA_API 0
 #endif
 
-/* Max amount of stolen space, anything above will be returned to Linux */
-int intel_max_stolen = 32 * 1024 * 1024;
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
-	{64, 16384, 4},
-	/* The 32M mode still requires a 64k gatt */
-	{32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY	1
-#define AGP_PHYS_MEMORY		2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
-	{.mask = I810_PTE_VALID, .type = 0},
-	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
-	{.mask = I810_PTE_VALID, .type = 0},
-	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
-	 .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-#define INTEL_AGP_UNCACHED_MEMORY              0
-#define INTEL_AGP_CACHED_MEMORY_LLC            1
-#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
-
 struct intel_gtt_driver {
 	unsigned int gen : 8;
 	unsigned int is_g33 : 1;
 	unsigned int is_pineview : 1;
 	unsigned int is_ironlake : 1;
+	unsigned int has_pgtbl_enable : 1;
 	unsigned int dma_mask_size : 8;
 	/* Chipset specific GTT setup */
 	int (*setup)(void);
@@ -95,13 +66,14 @@
 	u8 __iomem *registers;
 	phys_addr_t gtt_bus_addr;
 	phys_addr_t gma_bus_addr;
-	phys_addr_t pte_bus_addr;
+	u32 PGETBL_save;
 	u32 __iomem *gtt;		/* I915G */
 	int num_dcache_entries;
 	union {
 		void __iomem *i9xx_flush_page;
 		void *i8xx_flush_page;
 	};
+	char *i81x_gtt_table;
 	struct page *i8xx_page;
 	struct resource ifp_resource;
 	int resource_valid;
@@ -113,42 +85,31 @@
 #define IS_G33		intel_private.driver->is_g33
 #define IS_PINEVIEW	intel_private.driver->is_pineview
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
+#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
 
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
-	struct sg_table st;
-
-	st.sgl = mem->sg_list;
-	st.orig_nents = st.nents = mem->page_count;
-
-	sg_free_table(&st);
-
-	mem->sg_list = NULL;
-	mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+			 struct scatterlist **sg_list, int *num_sg)
 {
 	struct sg_table st;
 	struct scatterlist *sg;
 	int i;
 
-	if (mem->sg_list)
+	if (*sg_list)
 		return 0; /* already mapped (for e.g. resume */
 
-	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
 
-	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+	if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
 		goto err;
 
-	mem->sg_list = sg = st.sgl;
+	*sg_list = sg = st.sgl;
 
-	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
-		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+	for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
 
-	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
-				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
-	if (unlikely(!mem->num_sg))
+	*num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
+				 num_entries, PCI_DMA_BIDIRECTIONAL);
+	if (unlikely(!*num_sg))
 		goto err;
 
 	return 0;
@@ -157,90 +118,22 @@
 	sg_free_table(&st);
 	return -ENOMEM;
 }
+EXPORT_SYMBOL(intel_gtt_map_memory);
 
-static void intel_agp_unmap_memory(struct agp_memory *mem)
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
 {
+	struct sg_table st;
 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
 
-	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
-		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
-	intel_agp_free_sglist(mem);
+	pci_unmap_sg(intel_private.pcidev, sg_list,
+		     num_sg, PCI_DMA_BIDIRECTIONAL);
+
+	st.sgl = sg_list;
+	st.orig_nents = st.nents = num_sg;
+
+	sg_free_table(&st);
 }
-
-static int intel_i810_fetch_size(void)
-{
-	u32 smram_miscc;
-	struct aper_size_info_fixed *values;
-
-	pci_read_config_dword(intel_private.bridge_dev,
-			      I810_SMRAM_MISCC, &smram_miscc);
-	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
-	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
-		dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
-		return 0;
-	}
-	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
-		agp_bridge->current_size = (void *) (values + 1);
-		agp_bridge->aperture_size_idx = 1;
-		return values[1].size;
-	} else {
-		agp_bridge->current_size = (void *) (values);
-		agp_bridge->aperture_size_idx = 0;
-		return values[0].size;
-	}
-
-	return 0;
-}
-
-static int intel_i810_configure(void)
-{
-	struct aper_size_info_fixed *current_size;
-	u32 temp;
-	int i;
-
-	current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-	if (!intel_private.registers) {
-		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
-		temp &= 0xfff80000;
-
-		intel_private.registers = ioremap(temp, 128 * 4096);
-		if (!intel_private.registers) {
-			dev_err(&intel_private.pcidev->dev,
-				"can't remap memory\n");
-			return -ENOMEM;
-		}
-	}
-
-	if ((readl(intel_private.registers+I810_DRAM_CTL)
-		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
-		/* This will need to be dynamically assigned */
-		dev_info(&intel_private.pcidev->dev,
-			 "detected 4MB dedicated video ram\n");
-		intel_private.num_dcache_entries = 1024;
-	}
-	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
-	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
-
-	if (agp_bridge->driver->needs_scratch_page) {
-		for (i = 0; i < current_size->num_entries; i++) {
-			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
-	}
-	global_cache_flush();
-	return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
-	writel(0, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers);	/* PCI Posting. */
-	iounmap(intel_private.registers);
-}
+EXPORT_SYMBOL(intel_gtt_unmap_memory);
 
 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
 {
@@ -277,80 +170,64 @@
 	atomic_dec(&agp_bridge->current_memory_agp);
 }
 
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
-				int type)
+#define I810_GTT_ORDER 4
+static int i810_setup(void)
 {
-	int i, j, num_entries;
-	void *temp;
-	int ret = -EINVAL;
-	int mask_type;
+	u32 reg_addr;
+	char *gtt_table;
 
-	if (mem->page_count == 0)
-		goto out;
+	/* i81x does not preallocate the gtt. It's always 64kb in size. */
+	gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
+	if (gtt_table == NULL)
+		return -ENOMEM;
+	intel_private.i81x_gtt_table = gtt_table;
 
-	temp = agp_bridge->current_size;
-	num_entries = A_SIZE_FIX(temp)->num_entries;
+	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+	reg_addr &= 0xfff80000;
 
-	if ((pg_start + mem->page_count) > num_entries)
-		goto out_err;
+	intel_private.registers = ioremap(reg_addr, KB(64));
+	if (!intel_private.registers)
+		return -ENOMEM;
 
+	writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
+	       intel_private.registers+I810_PGETBL_CTL);
 
-	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
-		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
-			ret = -EBUSY;
-			goto out_err;
-		}
+	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+
+	if ((readl(intel_private.registers+I810_DRAM_CTL)
+		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+		dev_info(&intel_private.pcidev->dev,
+			 "detected 4MB dedicated video ram\n");
+		intel_private.num_dcache_entries = 1024;
 	}
 
-	if (type != mem->type)
-		goto out_err;
-
-	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-	switch (mask_type) {
-	case AGP_DCACHE_MEMORY:
-		if (!mem->is_flushed)
-			global_cache_flush();
-		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
-			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
-			       intel_private.registers+I810_PTE_BASE+(i*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-		break;
-	case AGP_PHYS_MEMORY:
-	case AGP_NORMAL_MEMORY:
-		if (!mem->is_flushed)
-			global_cache_flush();
-		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-			writel(agp_bridge->driver->mask_memory(agp_bridge,
-					page_to_phys(mem->pages[i]), mask_type),
-			       intel_private.registers+I810_PTE_BASE+(j*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
-		break;
-	default:
-		goto out_err;
-	}
-
-out:
-	ret = 0;
-out_err:
-	mem->is_flushed = true;
-	return ret;
+	return 0;
 }
 
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
-				int type)
+static void i810_cleanup(void)
+{
+	writel(0, intel_private.registers+I810_PGETBL_CTL);
+	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
+}
+
+static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
+				      int type)
 {
 	int i;
 
-	if (mem->page_count == 0)
-		return 0;
+	if ((pg_start + mem->page_count)
+			> intel_private.num_dcache_entries)
+		return -EINVAL;
 
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+	if (!mem->is_flushed)
+		global_cache_flush();
+
+	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+		dma_addr_t addr = i << PAGE_SHIFT;
+		intel_private.driver->write_entry(addr,
+						  i, type);
 	}
-	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+	readl(intel_private.gtt+i-1);
 
 	return 0;
 }
@@ -397,29 +274,6 @@
 	return new;
 }
 
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
-	struct agp_memory *new;
-
-	if (type == AGP_DCACHE_MEMORY) {
-		if (pg_count != intel_private.num_dcache_entries)
-			return NULL;
-
-		new = agp_create_memory(1);
-		if (new == NULL)
-			return NULL;
-
-		new->type = AGP_DCACHE_MEMORY;
-		new->page_count = pg_count;
-		new->num_scratch_pages = 0;
-		agp_free_page_array(new);
-		return new;
-	}
-	if (type == AGP_PHYS_MEMORY)
-		return alloc_agpphysmem_i8xx(pg_count, type);
-	return NULL;
-}
-
 static void intel_i810_free_by_type(struct agp_memory *curr)
 {
 	agp_free_key(curr->key);
@@ -437,13 +291,6 @@
 	kfree(curr);
 }
 
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
-					    dma_addr_t addr, int type)
-{
-	/* Type checking must be done elsewhere */
-	return addr | bridge->driver->masks[type].mask;
-}
-
 static int intel_gtt_setup_scratch_page(void)
 {
 	struct page *page;
@@ -455,7 +302,7 @@
 	get_page(page);
 	set_pages_uc(page, 1);
 
-	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+	if (intel_private.base.needs_dmar) {
 		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
 				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
@@ -470,34 +317,45 @@
 	return 0;
 }
 
-static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
+static void i810_write_entry(dma_addr_t addr, unsigned int entry,
+			     unsigned int flags)
+{
+	u32 pte_flags = I810_PTE_VALID;
+
+	switch (flags) {
+	case AGP_DCACHE_MEMORY:
+		pte_flags |= I810_PTE_LOCAL;
+		break;
+	case AGP_USER_CACHED_MEMORY:
+		pte_flags |= I830_PTE_SYSTEM_CACHED;
+		break;
+	}
+
+	writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+	{32, 8192, 3},
+	{64, 16384, 4},
 	{128, 32768, 5},
-	/* The 64M mode still requires a 128k gatt */
-	{64, 16384, 5},
 	{256, 65536, 6},
 	{512, 131072, 7},
 };
 
-static unsigned int intel_gtt_stolen_entries(void)
+static unsigned int intel_gtt_stolen_size(void)
 {
 	u16 gmch_ctrl;
 	u8 rdct;
 	int local = 0;
 	static const int ddt[4] = { 0, 16, 32, 64 };
-	unsigned int overhead_entries, stolen_entries;
 	unsigned int stolen_size = 0;
 
+	if (INTEL_GTT_GEN == 1)
+		return 0; /* no stolen mem on i81x */
+
 	pci_read_config_word(intel_private.bridge_dev,
 			     I830_GMCH_CTRL, &gmch_ctrl);
 
-	if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
-		overhead_entries = 0;
-	else
-		overhead_entries = intel_private.base.gtt_mappable_entries
-			/ 1024;
-
-	overhead_entries += 1; /* BIOS popup */
-
 	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
 	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
@@ -623,12 +481,7 @@
 		}
 	}
 
-	if (!local && stolen_size > intel_max_stolen) {
-		dev_info(&intel_private.bridge_dev->dev,
-			 "detected %dK stolen memory, trimming to %dK\n",
-			 stolen_size / KB(1), intel_max_stolen / KB(1));
-		stolen_size = intel_max_stolen;
-	} else if (stolen_size > 0) {
+	if (stolen_size > 0) {
 		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
 		       stolen_size / KB(1), local ? "local" : "stolen");
 	} else {
@@ -637,46 +490,88 @@
 		stolen_size = 0;
 	}
 
-	stolen_entries = stolen_size/KB(4) - overhead_entries;
+	return stolen_size;
+}
 
-	return stolen_entries;
+static void i965_adjust_pgetbl_size(unsigned int size_flag)
+{
+	u32 pgetbl_ctl, pgetbl_ctl2;
+
+	/* ensure that ppgtt is disabled */
+	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
+	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
+
+	/* write the new ggtt size */
+	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+	pgetbl_ctl |= size_flag;
+	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
+}
+
+static unsigned int i965_gtt_total_entries(void)
+{
+	int size;
+	u32 pgetbl_ctl;
+	u16 gmch_ctl;
+
+	pci_read_config_word(intel_private.bridge_dev,
+			     I830_GMCH_CTRL, &gmch_ctl);
+
+	if (INTEL_GTT_GEN == 5) {
+		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+		case G4x_GMCH_SIZE_1M:
+		case G4x_GMCH_SIZE_VT_1M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
+			break;
+		case G4x_GMCH_SIZE_VT_1_5M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
+			break;
+		case G4x_GMCH_SIZE_2M:
+		case G4x_GMCH_SIZE_VT_2M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
+			break;
+		}
+	}
+
+	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+	case I965_PGETBL_SIZE_128KB:
+		size = KB(128);
+		break;
+	case I965_PGETBL_SIZE_256KB:
+		size = KB(256);
+		break;
+	case I965_PGETBL_SIZE_512KB:
+		size = KB(512);
+		break;
+	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+	case I965_PGETBL_SIZE_1MB:
+		size = KB(1024);
+		break;
+	case I965_PGETBL_SIZE_2MB:
+		size = KB(2048);
+		break;
+	case I965_PGETBL_SIZE_1_5MB:
+		size = KB(1024 + 512);
+		break;
+	default:
+		dev_info(&intel_private.pcidev->dev,
+			 "unknown page table size, assuming 512KB\n");
+		size = KB(512);
+	}
+
+	return size/4;
 }
 
 static unsigned int intel_gtt_total_entries(void)
 {
 	int size;
 
-	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
-		u32 pgetbl_ctl;
-		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-
-		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
-		case I965_PGETBL_SIZE_128KB:
-			size = KB(128);
-			break;
-		case I965_PGETBL_SIZE_256KB:
-			size = KB(256);
-			break;
-		case I965_PGETBL_SIZE_512KB:
-			size = KB(512);
-			break;
-		case I965_PGETBL_SIZE_1MB:
-			size = KB(1024);
-			break;
-		case I965_PGETBL_SIZE_2MB:
-			size = KB(2048);
-			break;
-		case I965_PGETBL_SIZE_1_5MB:
-			size = KB(1024 + 512);
-			break;
-		default:
-			dev_info(&intel_private.pcidev->dev,
-				 "unknown page table size, assuming 512KB\n");
-			size = KB(512);
-		}
-
-		return size/4;
-	} else if (INTEL_GTT_GEN == 6) {
+	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
+		return i965_gtt_total_entries();
+	else if (INTEL_GTT_GEN == 6) {
 		u16 snb_gmch_ctl;
 
 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -706,7 +601,18 @@
 {
 	unsigned int aperture_size;
 
-	if (INTEL_GTT_GEN == 2) {
+	if (INTEL_GTT_GEN == 1) {
+		u32 smram_miscc;
+
+		pci_read_config_dword(intel_private.bridge_dev,
+				      I810_SMRAM_MISCC, &smram_miscc);
+
+		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+				== I810_GFX_MEM_WIN_32M)
+			aperture_size = MB(32);
+		else
+			aperture_size = MB(64);
+	} else if (INTEL_GTT_GEN == 2) {
 		u16 gmch_ctrl;
 
 		pci_read_config_word(intel_private.bridge_dev,
@@ -739,7 +645,7 @@
 
 	iounmap(intel_private.gtt);
 	iounmap(intel_private.registers);
-	
+
 	intel_gtt_teardown_scratch_page();
 }
 
@@ -755,6 +661,14 @@
 	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
 	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
 
+	/* save the PGETBL reg for resume */
+	intel_private.PGETBL_save =
+		readl(intel_private.registers+I810_PGETBL_CTL)
+			& ~I810_PGETBL_ENABLED;
+	/* we only ever restore the register when enabling the PGTBL... */
+	if (HAS_PGTBL_EN)
+		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
+
 	dev_info(&intel_private.bridge_dev->dev,
 			"detected gtt size: %dK total, %dK mappable\n",
 			intel_private.base.gtt_total_entries * 4,
@@ -772,14 +686,9 @@
 
 	global_cache_flush();   /* FIXME: ? */
 
-	/* we have to call this as early as possible after the MMIO base address is known */
-	intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
-	if (intel_private.base.gtt_stolen_entries == 0) {
-		intel_private.driver->cleanup();
-		iounmap(intel_private.registers);
-		iounmap(intel_private.gtt);
-		return -ENOMEM;
-	}
+	intel_private.base.stolen_size = intel_gtt_stolen_size();
+
+	intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
 
 	ret = intel_gtt_setup_scratch_page();
 	if (ret != 0) {
@@ -862,25 +771,19 @@
 			     unsigned int flags)
 {
 	u32 pte_flags = I810_PTE_VALID;
-	
-	switch (flags) {
-	case AGP_DCACHE_MEMORY:
-		pte_flags |= I810_PTE_LOCAL;
-		break;
-	case AGP_USER_CACHED_MEMORY:
+
+	if (flags ==  AGP_USER_CACHED_MEMORY)
 		pte_flags |= I830_PTE_SYSTEM_CACHED;
-		break;
-	}
 
 	writel(addr | pte_flags, intel_private.gtt + entry);
 }
 
-static void intel_enable_gtt(void)
+static bool intel_enable_gtt(void)
 {
 	u32 gma_addr;
-	u16 gmch_ctrl;
+	u8 __iomem *reg;
 
-	if (INTEL_GTT_GEN == 2)
+	if (INTEL_GTT_GEN <= 2)
 		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
 				      &gma_addr);
 	else
@@ -889,13 +792,47 @@
 
 	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
-	pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
-	gmch_ctrl |= I830_GMCH_ENABLED;
-	pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+	if (INTEL_GTT_GEN >= 6)
+	    return true;
 
-	writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
-	       intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
+	if (INTEL_GTT_GEN == 2) {
+		u16 gmch_ctrl;
+
+		pci_read_config_word(intel_private.bridge_dev,
+				     I830_GMCH_CTRL, &gmch_ctrl);
+		gmch_ctrl |= I830_GMCH_ENABLED;
+		pci_write_config_word(intel_private.bridge_dev,
+				      I830_GMCH_CTRL, gmch_ctrl);
+
+		pci_read_config_word(intel_private.bridge_dev,
+				     I830_GMCH_CTRL, &gmch_ctrl);
+		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
+			dev_err(&intel_private.pcidev->dev,
+				"failed to enable the GTT: GMCH_CTRL=%x\n",
+				gmch_ctrl);
+			return false;
+		}
+	}
+
+	/* On the resume path we may be adjusting the PGTBL value, so
+	 * be paranoid and flush all chipset write buffers...
+	 */
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+	reg = intel_private.registers+I810_PGETBL_CTL;
+	writel(intel_private.PGETBL_save, reg);
+	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
+		dev_err(&intel_private.pcidev->dev,
+			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
+			readl(reg), intel_private.PGETBL_save);
+		return false;
+	}
+
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+	return true;
 }
 
 static int i830_setup(void)
@@ -910,8 +847,6 @@
 		return -ENOMEM;
 
 	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
-	intel_private.pte_bus_addr =
-		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
 
 	intel_i830_setup_flush();
 
@@ -936,12 +871,12 @@
 {
 	int i;
 
-	intel_enable_gtt();
+	if (!intel_enable_gtt())
+	    return -EIO;
 
 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
-	for (i = intel_private.base.gtt_stolen_entries;
-			i < intel_private.base.gtt_total_entries; i++) {
+	for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
 						  i, 0);
 	}
@@ -965,10 +900,10 @@
 	return false;
 }
 
-static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
-					unsigned int sg_len,
-					unsigned int pg_start,
-					unsigned int flags)
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+				 unsigned int sg_len,
+				 unsigned int pg_start,
+				 unsigned int flags)
 {
 	struct scatterlist *sg;
 	unsigned int len, m;
@@ -989,27 +924,34 @@
 	}
 	readl(intel_private.gtt+j-1);
 }
+EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+			    struct page **pages, unsigned int flags)
+{
+	int i, j;
+
+	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
+		dma_addr_t addr = page_to_phys(pages[i]);
+		intel_private.driver->write_entry(addr,
+						  j, flags);
+	}
+	readl(intel_private.gtt+j-1);
+}
+EXPORT_SYMBOL(intel_gtt_insert_pages);
 
 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 					 off_t pg_start, int type)
 {
-	int i, j;
 	int ret = -EINVAL;
 
+	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
+		return i810_insert_dcache_entries(mem, pg_start, type);
+
 	if (mem->page_count == 0)
 		goto out;
 
-	if (pg_start < intel_private.base.gtt_stolen_entries) {
-		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
-			   pg_start, intel_private.base.gtt_stolen_entries);
-
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to insert into local/stolen memory\n");
-		goto out_err;
-	}
-
-	if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
+	if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
 		goto out_err;
 
 	if (type != mem->type)
@@ -1021,21 +963,17 @@
 	if (!mem->is_flushed)
 		global_cache_flush();
 
-	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
-		ret = intel_agp_map_memory(mem);
+	if (intel_private.base.needs_dmar) {
+		ret = intel_gtt_map_memory(mem->pages, mem->page_count,
+					   &mem->sg_list, &mem->num_sg);
 		if (ret != 0)
 			return ret;
 
 		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
 					    pg_start, type);
-	} else {
-		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-			dma_addr_t addr = page_to_phys(mem->pages[i]);
-			intel_private.driver->write_entry(addr,
-							  j, type);
-		}
-		readl(intel_private.gtt+j-1);
-	}
+	} else
+		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+				       type);
 
 out:
 	ret = 0;
@@ -1044,40 +982,54 @@
 	return ret;
 }
 
-static int intel_fake_agp_remove_entries(struct agp_memory *mem,
-					 off_t pg_start, int type)
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 {
-	int i;
+	unsigned int i;
 
-	if (mem->page_count == 0)
-		return 0;
-
-	if (pg_start < intel_private.base.gtt_stolen_entries) {
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to disable local/stolen memory\n");
-		return -EINVAL;
-	}
-
-	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
-		intel_agp_unmap_memory(mem);
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+	for (i = first_entry; i < (first_entry + num_entries); i++) {
 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
 						  i, 0);
 	}
 	readl(intel_private.gtt+i-1);
+}
+EXPORT_SYMBOL(intel_gtt_clear_range);
+
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+					 off_t pg_start, int type)
+{
+	if (mem->page_count == 0)
+		return 0;
+
+	intel_gtt_clear_range(pg_start, mem->page_count);
+
+	if (intel_private.base.needs_dmar) {
+		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
+		mem->sg_list = NULL;
+		mem->num_sg = 0;
+	}
 
 	return 0;
 }
 
-static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
-{
-	intel_private.driver->chipset_flush();
-}
-
 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
 						       int type)
 {
+	struct agp_memory *new;
+
+	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
+		if (pg_count != intel_private.num_dcache_entries)
+			return NULL;
+
+		new = agp_create_memory(1);
+		if (new == NULL)
+			return NULL;
+
+		new->type = AGP_DCACHE_MEMORY;
+		new->page_count = pg_count;
+		new->num_scratch_pages = 0;
+		agp_free_page_array(new);
+		return new;
+	}
 	if (type == AGP_PHYS_MEMORY)
 		return alloc_agpphysmem_i8xx(pg_count, type);
 	/* always return NULL for other allocation types for now */
@@ -1274,40 +1226,11 @@
 		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
 	}
 
-	intel_private.pte_bus_addr =
-		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-
 	intel_i9xx_setup_flush();
 
 	return 0;
 }
 
-static const struct agp_bridge_driver intel_810_driver = {
-	.owner			= THIS_MODULE,
-	.aperture_sizes		= intel_i810_sizes,
-	.size_type		= FIXED_APER_SIZE,
-	.num_aperture_sizes	= 2,
-	.needs_scratch_page	= true,
-	.configure		= intel_i810_configure,
-	.fetch_size		= intel_i810_fetch_size,
-	.cleanup		= intel_i810_cleanup,
-	.mask_memory		= intel_i810_mask_memory,
-	.masks			= intel_i810_masks,
-	.agp_enable		= intel_fake_agp_enable,
-	.cache_flush		= global_cache_flush,
-	.create_gatt_table	= agp_generic_create_gatt_table,
-	.free_gatt_table	= agp_generic_free_gatt_table,
-	.insert_memory		= intel_i810_insert_entries,
-	.remove_memory		= intel_i810_remove_entries,
-	.alloc_by_type		= intel_i810_alloc_by_type,
-	.free_by_type		= intel_i810_free_by_type,
-	.agp_alloc_page		= agp_generic_alloc_page,
-	.agp_alloc_pages        = agp_generic_alloc_pages,
-	.agp_destroy_page	= agp_generic_destroy_page,
-	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
-};
-
 static const struct agp_bridge_driver intel_fake_agp_driver = {
 	.owner			= THIS_MODULE,
 	.size_type		= FIXED_APER_SIZE,
@@ -1328,15 +1251,20 @@
 	.agp_alloc_pages        = agp_generic_alloc_pages,
 	.agp_destroy_page	= agp_generic_destroy_page,
 	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.chipset_flush		= intel_fake_agp_chipset_flush,
 };
 
 static const struct intel_gtt_driver i81x_gtt_driver = {
 	.gen = 1,
+	.has_pgtbl_enable = 1,
 	.dma_mask_size = 32,
+	.setup = i810_setup,
+	.cleanup = i810_cleanup,
+	.check_flags = i830_check_flags,
+	.write_entry = i810_write_entry,
 };
 static const struct intel_gtt_driver i8xx_gtt_driver = {
 	.gen = 2,
+	.has_pgtbl_enable = 1,
 	.setup = i830_setup,
 	.cleanup = i830_cleanup,
 	.write_entry = i830_write_entry,
@@ -1346,10 +1274,11 @@
 };
 static const struct intel_gtt_driver i915_gtt_driver = {
 	.gen = 3,
+	.has_pgtbl_enable = 1,
 	.setup = i9xx_setup,
 	.cleanup = i9xx_cleanup,
 	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
-	.write_entry = i830_write_entry, 
+	.write_entry = i830_write_entry,
 	.dma_mask_size = 32,
 	.check_flags = i830_check_flags,
 	.chipset_flush = i9xx_chipset_flush,
@@ -1376,6 +1305,7 @@
 };
 static const struct intel_gtt_driver i965_gtt_driver = {
 	.gen = 4,
+	.has_pgtbl_enable = 1,
 	.setup = i9xx_setup,
 	.cleanup = i9xx_cleanup,
 	.write_entry = i965_write_entry,
@@ -1419,93 +1349,92 @@
 static const struct intel_gtt_driver_description {
 	unsigned int gmch_chip_id;
 	char *name;
-	const struct agp_bridge_driver *gmch_driver;
 	const struct intel_gtt_driver *gtt_driver;
 } intel_gtt_chipsets[] = {
-	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
 		&i81x_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
 		&i81x_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
 		&i81x_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
 		&i81x_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
+	{ PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
-		&intel_fake_agp_driver, &g33_gtt_driver },
+		&g33_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
-		&intel_fake_agp_driver, &g33_gtt_driver },
+		&g33_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
-		&intel_fake_agp_driver, &g33_gtt_driver },
+		&g33_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
-		&intel_fake_agp_driver, &pineview_gtt_driver },
+		&pineview_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
-		&intel_fake_agp_driver, &pineview_gtt_driver },
+		&pineview_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
-	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+	    "HD Graphics", &ironlake_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+	    "HD Graphics", &ironlake_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ 0, NULL, NULL }
 };
 
@@ -1530,21 +1459,20 @@
 				      struct agp_bridge_data *bridge)
 {
 	int i, mask;
-	bridge->driver = NULL;
+	intel_private.driver = NULL;
 
 	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
 		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
-			bridge->driver =
-				intel_gtt_chipsets[i].gmch_driver;
-			intel_private.driver = 
+			intel_private.driver =
 				intel_gtt_chipsets[i].gtt_driver;
 			break;
 		}
 	}
 
-	if (!bridge->driver)
+	if (!intel_private.driver)
 		return 0;
 
+	bridge->driver = &intel_fake_agp_driver;
 	bridge->dev_private_data = &intel_private;
 	bridge->dev = pdev;
 
@@ -1560,8 +1488,8 @@
 		pci_set_consistent_dma_mask(intel_private.pcidev,
 					    DMA_BIT_MASK(mask));
 
-	if (bridge->driver == &intel_810_driver)
-		return 1;
+	/*if (bridge->driver == &intel_810_driver)
+		return 1;*/
 
 	if (intel_gtt_init() != 0)
 		return 0;
@@ -1570,12 +1498,19 @@
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-struct intel_gtt *intel_gtt_get(void)
+const struct intel_gtt *intel_gtt_get(void)
 {
 	return &intel_private.base;
 }
 EXPORT_SYMBOL(intel_gtt_get);
 
+void intel_gtt_chipset_flush(void)
+{
+	if (intel_private.driver->chipset_flush)
+		intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_chipset_flush);
+
 void intel_gmch_remove(struct pci_dev *pdev)
 {
 	if (intel_private.pcidev)
diff --git a/drivers/char/hvc_dcc.c b/drivers/char/hvc_dcc.c
new file mode 100644
index 0000000..6470f63
--- /dev/null
+++ b/drivers/char/hvc_dcc.c
@@ -0,0 +1,133 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <asm/processor.h>
+
+#include "hvc_console.h"
+
+/* DCC Status Bits */
+#define DCC_STATUS_RX		(1 << 30)
+#define DCC_STATUS_TX		(1 << 29)
+
+static inline u32 __dcc_getstatus(void)
+{
+	u32 __ret;
+
+	asm("mrc p14, 0, %0, c0, c1, 0	@ read comms ctrl reg"
+		: "=r" (__ret) : : "cc");
+
+	return __ret;
+}
+
+
+#if defined(CONFIG_CPU_V7)
+static inline char __dcc_getchar(void)
+{
+	char __c;
+
+	asm("get_wait:	mrc p14, 0, pc, c0, c1, 0                          \n\
+			bne get_wait                                       \n\
+			mrc p14, 0, %0, c0, c5, 0	@ read comms data reg"
+		: "=r" (__c) : : "cc");
+
+	return __c;
+}
+#else
+static inline char __dcc_getchar(void)
+{
+	char __c;
+
+	asm("mrc p14, 0, %0, c0, c5, 0	@ read comms data reg"
+		: "=r" (__c));
+
+	return __c;
+}
+#endif
+
+#if defined(CONFIG_CPU_V7)
+static inline void __dcc_putchar(char c)
+{
+	asm("put_wait:	mrc p14, 0, pc, c0, c1, 0                 \n\
+			bcs put_wait                              \n\
+			mcr p14, 0, %0, c0, c5, 0                   "
+	: : "r" (c) : "cc");
+}
+#else
+static inline void __dcc_putchar(char c)
+{
+	asm("mcr p14, 0, %0, c0, c5, 0	@ write a char"
+		: /* no output register */
+		: "r" (c));
+}
+#endif
+
+static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		while (__dcc_getstatus() & DCC_STATUS_TX)
+			cpu_relax();
+
+		__dcc_putchar((char)(buf[i] & 0xFF));
+	}
+
+	return count;
+}
+
+static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
+{
+	int i;
+
+	for (i = 0; i < count; ++i) {
+		int c = -1;
+
+		if (__dcc_getstatus() & DCC_STATUS_RX)
+			c = __dcc_getchar();
+		if (c < 0)
+			break;
+		buf[i] = c;
+	}
+
+	return i;
+}
+
+static const struct hv_ops hvc_dcc_get_put_ops = {
+	.get_chars = hvc_dcc_get_chars,
+	.put_chars = hvc_dcc_put_chars,
+};
+
+static int __init hvc_dcc_console_init(void)
+{
+	hvc_instantiate(0, 0, &hvc_dcc_get_put_ops);
+	return 0;
+}
+console_initcall(hvc_dcc_console_init);
+
+static int __init hvc_dcc_init(void)
+{
+	hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
+	return 0;
+}
+device_initcall(hvc_dcc_init);
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 27370e9..5e2f52b 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -39,7 +39,7 @@
 
 #include "hvc_console.h"
 
-char hvc_driver_name[] = "hvc_console";
+static const char hvc_driver_name[] = "hvc_console";
 
 static struct vio_device_id hvc_driver_table[] __devinitdata = {
 	{"serial", "hvterm1"},
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index a2bc885..67a75a5 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -850,8 +850,8 @@
 	wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT);
 
 	/* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
-	cancel_delayed_work(&hp->writer);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&hp->writer);
+	flush_work_sync(&hp->handshaker);
 
 	/*
 	 * it's also possible that our timeout expired and hvsi_write_worker
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 794aacb..d0387a8 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -24,6 +24,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <crypto/padlock.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/hw_random.h>
@@ -34,7 +35,6 @@
 #include <asm/i387.h>
 
 
-#define PFX	KBUILD_MODNAME ": "
 
 
 enum {
@@ -81,8 +81,7 @@
 	ts_state = irq_ts_save();
 
 	asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
-		:"=m"(*addr), "=a"(eax_out)
-		:"D"(addr), "d"(edx_in));
+		: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
 
 	irq_ts_restore(ts_state);
 	return eax_out;
@@ -90,8 +89,10 @@
 
 static int via_rng_data_present(struct hwrng *rng, int wait)
 {
+	char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 	u32 bytes_out;
-	u32 *via_rng_datum = (u32 *)(&rng->priv);
 	int i;
 
 	/* We choose the recommended 1-byte-per-instruction RNG rate,
@@ -115,6 +116,7 @@
 			break;
 		udelay(10);
 	}
+	rng->priv = *via_rng_datum;
 	return bytes_out ? 1 : 0;
 }
 
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index fcd02ba..c3a0253 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -3224,7 +3224,7 @@
 
 MODULE_LICENSE("GPL");
 
-static struct pci_device_id ip2main_pci_tbl[] __devinitdata = {
+static struct pci_device_id ip2main_pci_tbl[] __devinitdata __used = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_IP2EX) },
 	{ }
 };
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 2fe72f8..38223e9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -970,6 +970,33 @@
 }
 EXPORT_SYMBOL(ipmi_create_user);
 
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+	int           rv = 0;
+	ipmi_smi_t    intf;
+	struct ipmi_smi_handlers *handlers;
+
+	mutex_lock(&ipmi_interfaces_mutex);
+	list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+		if (intf->intf_num == if_num)
+			goto found;
+	}
+	/* Not found, return an error */
+	rv = -EINVAL;
+	mutex_unlock(&ipmi_interfaces_mutex);
+	return rv;
+
+found:
+	handlers = intf->handlers;
+	rv = -ENOSYS;
+	if (handlers->get_smi_info)
+		rv = handlers->get_smi_info(intf->send_info, data);
+	mutex_unlock(&ipmi_interfaces_mutex);
+
+	return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
 static void free_user(struct kref *ref)
 {
 	ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 035da9e..b6ae6e9 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -57,6 +57,7 @@
 #include <asm/irq.h>
 #include <linux/interrupt.h>
 #include <linux/rcupdate.h>
+#include <linux/ipmi.h>
 #include <linux/ipmi_smi.h>
 #include <asm/io.h>
 #include "ipmi_si_sm.h"
@@ -69,6 +70,8 @@
 #ifdef CONFIG_PPC_OF
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #endif
 
 #define PFX "ipmi_si: "
@@ -107,10 +110,6 @@
 };
 static char *si_to_str[] = { "kcs", "smic", "bt" };
 
-enum ipmi_addr_src {
-	SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
-	SI_PCI,	SI_DEVICETREE, SI_DEFAULT
-};
 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
 					"ACPI", "SMBIOS", "PCI",
 					"device-tree", "default" };
@@ -291,6 +290,7 @@
 	struct task_struct *thread;
 
 	struct list_head link;
+	union ipmi_smi_info_union addr_info;
 };
 
 #define smi_inc_stat(smi, stat) \
@@ -1186,6 +1186,18 @@
 	return 0;
 }
 
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+	struct smi_info *smi = send_info;
+
+	data->addr_src = smi->addr_source;
+	data->dev = smi->dev;
+	data->addr_info = smi->addr_info;
+	get_device(smi->dev);
+
+	return 0;
+}
+
 static void set_maintenance_mode(void *send_info, int enable)
 {
 	struct smi_info   *smi_info = send_info;
@@ -1197,6 +1209,7 @@
 static struct ipmi_smi_handlers handlers = {
 	.owner                  = THIS_MODULE,
 	.start_processing       = smi_start_processing,
+	.get_smi_info		= get_smi_info,
 	.sender			= sender,
 	.request_events		= request_events,
 	.set_maintenance_mode   = set_maintenance_mode,
@@ -1928,7 +1941,8 @@
 static int acpi_failure;
 
 /* For GPE-type interrupts. */
-static u32 ipmi_acpi_gpe(void *context)
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+	u32 gpe_number, void *context)
 {
 	struct smi_info *smi_info = context;
 	unsigned long   flags;
@@ -2156,6 +2170,7 @@
 	printk(KERN_INFO PFX "probing via ACPI\n");
 
 	handle = acpi_dev->handle;
+	info->addr_info.acpi_info.acpi_handle = handle;
 
 	/* _IFT tells us the interface type: KCS, BT, etc */
 	status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
@@ -2546,7 +2561,7 @@
 {
 	struct smi_info *info;
 	struct resource resource;
-	const int *regsize, *regspacing, *regshift;
+	const __be32 *regsize, *regspacing, *regshift;
 	struct device_node *np = dev->dev.of_node;
 	int ret;
 	int proplen;
@@ -2599,9 +2614,9 @@
 
 	info->io.addr_data	= resource.start;
 
-	info->io.regsize	= regsize ? *regsize : DEFAULT_REGSIZE;
-	info->io.regspacing	= regspacing ? *regspacing : DEFAULT_REGSPACING;
-	info->io.regshift	= regshift ? *regshift : 0;
+	info->io.regsize	= regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+	info->io.regspacing	= regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+	info->io.regshift	= regshift ? be32_to_cpup(regshift) : 0;
 
 	info->irq		= irq_of_parse_and_map(dev->dev.of_node, 0);
 	info->dev		= &dev->dev;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f4d334f..320668f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1081,7 +1081,7 @@
 {
 	struct die_args *args = data;
 
-	if (val != DIE_NMI)
+	if (val != DIE_NMIUNKNOWN)
 		return NOTIFY_OK;
 
 	/* Hack, if it's a memory or I/O error, ignore it. */
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
index 99cffda..0aeb5a3 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.c
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -1729,7 +1729,7 @@
 
 	ipwireless_stop_interrupts(hw);
 
-	flush_scheduled_work();
+	flush_work_sync(&hw->work_rx);
 
 	for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
 		if (hw->packet_assembler[i] != NULL)
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c
index 9fe5383..f7daeea 100644
--- a/drivers/char/pcmcia/ipwireless/network.c
+++ b/drivers/char/pcmcia/ipwireless/network.c
@@ -430,7 +430,8 @@
 	network->shutting_down = 1;
 
 	ipwireless_ppp_close(network);
-	flush_scheduled_work();
+	flush_work_sync(&network->work_go_online);
+	flush_work_sync(&network->work_go_offline);
 
 	ipwireless_stop_interrupts(network->hardware);
 	ipwireless_associate_network(network->hardware, NULL);
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index 1a2c2c3..f5eb28b 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -577,7 +577,7 @@
 				mutex_unlock(&ttyj->ipw_tty_mutex);
 				tty_hangup(ttyj->linux_tty);
 				/* Wait till the tty_hangup has completed */
-				flush_scheduled_work();
+				flush_work_sync(&ttyj->linux_tty->hangup_work);
 				/* FIXME: Exactly how is the tty object locked here
 				   against a parallel ioctl etc */
 				mutex_lock(&ttyj->ipw_tty_mutex);
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index d3d63be..1a9f5f6 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -30,7 +30,7 @@
 
 #define RAMOOPS_KERNMSG_HDR "===="
 
-#define RECORD_SIZE 4096
+#define RECORD_SIZE 4096UL
 
 static ulong mem_address;
 module_param(mem_address, ulong, 0400);
@@ -68,11 +68,16 @@
 	char *buf, *buf_orig;
 	struct timeval timestamp;
 
+	if (reason != KMSG_DUMP_OOPS &&
+	    reason != KMSG_DUMP_PANIC &&
+	    reason != KMSG_DUMP_KEXEC)
+		return;
+
 	/* Only dump oopses if dump_oops is set */
 	if (reason == KMSG_DUMP_OOPS && !dump_oops)
 		return;
 
-	buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+	buf = cxt->virt_addr + (cxt->count * RECORD_SIZE);
 	buf_orig = buf;
 
 	memset(buf, '\0', RECORD_SIZE);
@@ -83,8 +88,8 @@
 	buf += res;
 
 	hdr_size = buf - buf_orig;
-	l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
-	l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
+	l2_cpy = min(l2, RECORD_SIZE - hdr_size);
+	l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy);
 
 	s2_start = l2 - l2_cpy;
 	s1_start = l1 - l1_cpy;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5a1aa64..72a4fcb 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -626,7 +626,7 @@
 	preempt_disable();
 	/* if over the trickle threshold, use only 1 in 4096 samples */
 	if (input_pool.entropy_count > trickle_thresh &&
-	    (__get_cpu_var(trickle_count)++ & 0xfff))
+	    ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
 		goto out;
 
 	sample.jiffies = jiffies;
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index bfe25ea..b4b9d5a 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -65,15 +65,12 @@
 	if (!bdev)
 		goto out;
 	igrab(bdev->bd_inode);
-	err = blkdev_get(bdev, filp->f_mode);
+	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
 	if (err)
 		goto out;
-	err = bd_claim(bdev, raw_open);
-	if (err)
-		goto out1;
 	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
 	if (err)
-		goto out2;
+		goto out1;
 	filp->f_flags |= O_DIRECT;
 	filp->f_mapping = bdev->bd_inode->i_mapping;
 	if (++raw_devices[minor].inuse == 1)
@@ -83,10 +80,8 @@
 	mutex_unlock(&raw_mutex);
 	return 0;
 
-out2:
-	bd_release(bdev);
 out1:
-	blkdev_put(bdev, filp->f_mode);
+	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
 out:
 	mutex_unlock(&raw_mutex);
 	return err;
@@ -110,8 +105,7 @@
 	}
 	mutex_unlock(&raw_mutex);
 
-	bd_release(bdev);
-	blkdev_put(bdev, filp->f_mode);
+	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
 	return 0;
 }
 
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 86308830..3e4e73a 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -1764,7 +1764,7 @@
 
 #ifdef CONFIG_PCI
 
-static struct pci_device_id __devinitdata rocket_pci_ids[] = {
+static struct pci_device_id __devinitdata __used rocket_pci_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_RP, PCI_ANY_ID) },
 	{ }
 };
diff --git a/drivers/char/snsc.h b/drivers/char/snsc.h
index 4be62ed..e8c52c8 100644
--- a/drivers/char/snsc.h
+++ b/drivers/char/snsc.h
@@ -19,7 +19,6 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
-#include <linux/kobject.h>
 #include <linux/fs.h>
 #include <linux/cdev.h>
 #include <linux/semaphore.h>
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 73f66d0..79e36c8 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1434,7 +1434,7 @@
 	sonypi_disable();
 
 	synchronize_irq(sonypi_device.irq);
-	flush_scheduled_work();
+	flush_work_sync(&sonypi_device.input_work);
 
 	if (useinput) {
 		input_unregister_device(sonypi_device.input_key_dev);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index a7616d2..c2bca3f 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2355,7 +2355,7 @@
 	func_exit();
 }
 
-static struct pci_device_id specialx_pci_tbl[] __devinitdata = {
+static struct pci_device_id specialx_pci_tbl[] __devinitdata __used = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_IO8) },
 	{ }
 };
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 7c41335..1f46f1c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -736,7 +736,7 @@
 	if (chip == NULL)
 		return -ENODEV;
 	rc = __tpm_pcr_read(chip, pcr_idx, res_buf);
-	module_put(chip->dev->driver->owner);
+	tpm_chip_put(chip);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(tpm_pcr_read);
@@ -775,11 +775,27 @@
 	rc = transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
 			  "attempting extend a PCR value");
 
-	module_put(chip->dev->driver->owner);
+	tpm_chip_put(chip);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(tpm_pcr_extend);
 
+int tpm_send(u32 chip_num, void *cmd, size_t buflen)
+{
+	struct tpm_chip *chip;
+	int rc;
+
+	chip = tpm_chip_find_get(chip_num);
+	if (chip == NULL)
+		return -ENODEV;
+
+	rc = transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
+
+	tpm_chip_put(chip);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_send);
+
 ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
 		      char *buf)
 {
@@ -986,7 +1002,7 @@
 	struct tpm_chip *chip = file->private_data;
 
 	del_singleshot_timer_sync(&chip->user_read_timer);
-	flush_scheduled_work();
+	flush_work_sync(&chip->work);
 	file->private_data = NULL;
 	atomic_set(&chip->data_pending, 0);
 	kfree(chip->data_buffer);
@@ -1038,7 +1054,7 @@
 	ssize_t ret_size;
 
 	del_singleshot_timer_sync(&chip->user_read_timer);
-	flush_scheduled_work();
+	flush_work_sync(&chip->work);
 	ret_size = atomic_read(&chip->data_pending);
 	atomic_set(&chip->data_pending, 0);
 	if (ret_size > 0) {	/* relay data */
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 792868d..72ddb03 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -113,6 +113,11 @@
 
 #define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
 
+static inline void tpm_chip_put(struct tpm_chip *chip)
+{
+	module_put(chip->dev->driver->owner);
+}
+
 static inline int tpm_read_index(int base, int index)
 {
 	outb(index, base);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index a7f046b..2b46a7e 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -43,9 +43,10 @@
 
 static inline void get_seq(__u32 *ts, int *cpu)
 {
-	*ts = get_cpu_var(proc_event_counts)++;
+	preempt_disable();
+	*ts = __this_cpu_inc_return(proc_event_counts) -1;
 	*cpu = smp_processor_id();
-	put_cpu_var(proc_event_counts);
+	preempt_enable();
 }
 
 void proc_fork_connector(struct task_struct *task)
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 08d5f05..bf50924 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -49,7 +49,7 @@
  */
 static void cpuidle_idle_call(void)
 {
-	struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
+	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
 	struct cpuidle_state *target_state;
 	int next_state;
 
@@ -96,7 +96,15 @@
 
 	/* enter the state and update stats */
 	dev->last_state = target_state;
+
+	trace_power_start(POWER_CSTATE, next_state, dev->cpu);
+	trace_cpu_idle(next_state, dev->cpu);
+
 	dev->last_residency = target_state->enter(dev, target_state);
+
+	trace_power_end(dev->cpu);
+	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
+
 	if (dev->last_state)
 		target_state = dev->last_state;
 
@@ -106,8 +114,6 @@
 	/* give the governor an opportunity to reflect on the outcome */
 	if (cpuidle_curr_governor->reflect)
 		cpuidle_curr_governor->reflect(dev);
-	trace_power_end(smp_processor_id());
-	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 /**
@@ -155,6 +161,45 @@
 
 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+{
+	ktime_t	t1, t2;
+	s64 diff;
+	int ret;
+
+	t1 = ktime_get();
+	local_irq_enable();
+	while (!need_resched())
+		cpu_relax();
+
+	t2 = ktime_get();
+	diff = ktime_to_us(ktime_sub(t2, t1));
+	if (diff > INT_MAX)
+		diff = INT_MAX;
+
+	ret = (int) diff;
+	return ret;
+}
+
+static void poll_idle_init(struct cpuidle_device *dev)
+{
+	struct cpuidle_state *state = &dev->states[0];
+
+	cpuidle_set_statedata(state, NULL);
+
+	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+	state->exit_latency = 0;
+	state->target_residency = 0;
+	state->power_usage = -1;
+	state->flags = 0;
+	state->enter = poll_idle;
+}
+#else
+static void poll_idle_init(struct cpuidle_device *dev) {}
+#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
+
 /**
  * cpuidle_enable_device - enables idle PM for a CPU
  * @dev: the CPU
@@ -179,6 +224,8 @@
 			return ret;
 	}
 
+	poll_idle_init(dev);
+
 	if ((ret = cpuidle_add_state_sysfs(dev)))
 		return ret;
 
@@ -233,45 +280,6 @@
 
 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
 
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
-{
-	ktime_t	t1, t2;
-	s64 diff;
-	int ret;
-
-	t1 = ktime_get();
-	local_irq_enable();
-	while (!need_resched())
-		cpu_relax();
-
-	t2 = ktime_get();
-	diff = ktime_to_us(ktime_sub(t2, t1));
-	if (diff > INT_MAX)
-		diff = INT_MAX;
-
-	ret = (int) diff;
-	return ret;
-}
-
-static void poll_idle_init(struct cpuidle_device *dev)
-{
-	struct cpuidle_state *state = &dev->states[0];
-
-	cpuidle_set_statedata(state, NULL);
-
-	snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
-	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
-	state->exit_latency = 0;
-	state->target_residency = 0;
-	state->power_usage = -1;
-	state->flags = CPUIDLE_FLAG_POLL;
-	state->enter = poll_idle;
-}
-#else
-static void poll_idle_init(struct cpuidle_device *dev) {}
-#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
-
 /**
  * __cpuidle_register_device - internal register function called before register
  * and enable routines
@@ -292,8 +300,6 @@
 
 	init_completion(&dev->kobj_unregister);
 
-	poll_idle_init(dev);
-
 	/*
 	 * cpuidle driver should set the dev->power_specified bit
 	 * before registering the device if the driver provides
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 7d279e5..c99305a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -857,7 +857,7 @@
 			printk(KERN_WARNING MV_CESA
 			       "Base driver '%s' could not be loaded!\n",
 			       base_hash_name);
-			err = PTR_ERR(fallback_tfm);
+			err = PTR_ERR(base_hash);
 			goto err_bad_base;
 		}
 	}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 7614126..80dc094 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1542,7 +1542,7 @@
 	return err;
 }
 
-static void __exit n2_unregister_algs(void)
+static void __devexit n2_unregister_algs(void)
 {
 	mutex_lock(&spu_lock);
 	if (!--algs_registered)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 799ca51..add2a1a 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -74,11 +74,9 @@
 #define FLAGS_CBC		BIT(1)
 #define FLAGS_GIV		BIT(2)
 
-#define FLAGS_NEW_KEY		BIT(4)
-#define FLAGS_NEW_IV		BIT(5)
-#define FLAGS_INIT		BIT(6)
-#define FLAGS_FAST		BIT(7)
-#define FLAGS_BUSY		8
+#define FLAGS_INIT		BIT(4)
+#define FLAGS_FAST		BIT(5)
+#define FLAGS_BUSY		BIT(6)
 
 struct omap_aes_ctx {
 	struct omap_aes_dev *dd;
@@ -98,19 +96,18 @@
 struct omap_aes_dev {
 	struct list_head	list;
 	unsigned long		phys_base;
-	void __iomem 		*io_base;
+	void __iomem		*io_base;
 	struct clk		*iclk;
 	struct omap_aes_ctx	*ctx;
 	struct device		*dev;
 	unsigned long		flags;
+	int			err;
 
-	u32			*iv;
-	u32			ctrl;
+	spinlock_t		lock;
+	struct crypto_queue	queue;
 
-	spinlock_t			lock;
-	struct crypto_queue		queue;
-
-	struct tasklet_struct		task;
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
 
 	struct ablkcipher_request	*req;
 	size_t				total;
@@ -179,9 +176,13 @@
 
 static int omap_aes_hw_init(struct omap_aes_dev *dd)
 {
-	int err = 0;
-
+	/*
+	 * clocks are enabled when request starts and disabled when finished.
+	 * It may be long delays between requests.
+	 * Device might go to off mode to save power.
+	 */
 	clk_enable(dd->iclk);
+
 	if (!(dd->flags & FLAGS_INIT)) {
 		/* is it necessary to reset before every operation? */
 		omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
@@ -193,39 +194,26 @@
 		__asm__ __volatile__("nop");
 		__asm__ __volatile__("nop");
 
-		err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
-				AES_REG_SYSSTATUS_RESETDONE);
-		if (!err)
-			dd->flags |= FLAGS_INIT;
+		if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
+				AES_REG_SYSSTATUS_RESETDONE))
+			return -ETIMEDOUT;
+
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
 	}
 
-	return err;
+	return 0;
 }
 
-static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
-{
-	clk_disable(dd->iclk);
-}
-
-static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
+static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
 	unsigned int key32;
-	int i;
+	int i, err;
 	u32 val, mask;
 
-	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
-	if (dd->flags & FLAGS_CBC)
-		val |= AES_REG_CTRL_CBC;
-	if (dd->flags & FLAGS_ENCRYPT)
-		val |= AES_REG_CTRL_DIRECTION;
-
-	if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
-		   !(dd->ctx->flags & FLAGS_NEW_KEY))
-		goto out;
-
-	/* only need to write control registers for new settings */
-
-	dd->ctrl = val;
+	err = omap_aes_hw_init(dd);
+	if (err)
+		return err;
 
 	val = 0;
 	if (dd->dma_lch_out >= 0)
@@ -237,30 +225,43 @@
 
 	omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
 
-	pr_debug("Set key\n");
 	key32 = dd->ctx->keylen / sizeof(u32);
-	/* set a key */
+
+	/* it seems a key should always be set even if it has not changed */
 	for (i = 0; i < key32; i++) {
 		omap_aes_write(dd, AES_REG_KEY(i),
 			__le32_to_cpu(dd->ctx->key[i]));
 	}
-	dd->ctx->flags &= ~FLAGS_NEW_KEY;
 
-	if (dd->flags & FLAGS_NEW_IV) {
-		pr_debug("Set IV\n");
-		omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
-		dd->flags &= ~FLAGS_NEW_IV;
-	}
+	if ((dd->flags & FLAGS_CBC) && dd->req->info)
+		omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
+
+	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
+	if (dd->flags & FLAGS_CBC)
+		val |= AES_REG_CTRL_CBC;
+	if (dd->flags & FLAGS_ENCRYPT)
+		val |= AES_REG_CTRL_DIRECTION;
 
 	mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
 			AES_REG_CTRL_KEY_SIZE;
 
-	omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
+	omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
 
-out:
-	/* start DMA or disable idle mode */
-	omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
-			    AES_REG_MASK_START);
+	/* IN */
+	omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
+				 dd->phys_base + AES_REG_DATA, 0, 4);
+
+	omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+	omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+
+	/* OUT */
+	omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
+				dd->phys_base + AES_REG_DATA, 0, 4);
+
+	omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+	omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+
+	return 0;
 }
 
 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -288,8 +289,16 @@
 {
 	struct omap_aes_dev *dd = data;
 
-	if (lch == dd->dma_lch_out)
-		tasklet_schedule(&dd->task);
+	if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+		pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
+		dd->err = -EIO;
+		dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+	} else if (lch == dd->dma_lch_in) {
+		return;
+	}
+
+	/* dma_lch_out - completed */
+	tasklet_schedule(&dd->done_task);
 }
 
 static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -339,18 +348,6 @@
 		goto err_dma_out;
 	}
 
-	omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
-				 dd->phys_base + AES_REG_DATA, 0, 4);
-
-	omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-	omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-
-	omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
-				dd->phys_base + AES_REG_DATA, 0, 4);
-
-	omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-	omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-
 	return 0;
 
 err_dma_out:
@@ -406,6 +403,11 @@
 		if (!count)
 			return off;
 
+		/*
+		 * buflen and total are AES_BLOCK_SIZE size aligned,
+		 * so count should be also aligned
+		 */
+
 		sg_copy_buf(buf + off, *sg, *offset, count, out);
 
 		off += count;
@@ -461,7 +463,9 @@
 	omap_start_dma(dd->dma_lch_in);
 	omap_start_dma(dd->dma_lch_out);
 
-	omap_aes_write_ctrl(dd);
+	/* start DMA or disable idle mode */
+	omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
+			    AES_REG_MASK_START);
 
 	return 0;
 }
@@ -488,8 +492,10 @@
 		count = min(dd->total, sg_dma_len(dd->in_sg));
 		count = min(count, sg_dma_len(dd->out_sg));
 
-		if (count != dd->total)
+		if (count != dd->total) {
+			pr_err("request length != buffer length\n");
 			return -EINVAL;
+		}
 
 		pr_debug("fast\n");
 
@@ -525,23 +531,25 @@
 
 	dd->total -= count;
 
-	err = omap_aes_hw_init(dd);
-
 	err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
+	if (err) {
+		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+	}
 
 	return err;
 }
 
 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 {
-	struct omap_aes_ctx *ctx;
+	struct ablkcipher_request *req = dd->req;
 
 	pr_debug("err: %d\n", err);
 
-	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
+	clk_disable(dd->iclk);
+	dd->flags &= ~FLAGS_BUSY;
 
-	if (!dd->total)
-		dd->req->base.complete(&dd->req->base, err);
+	req->base.complete(&req->base, err);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -553,8 +561,6 @@
 
 	omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
 
-	omap_aes_hw_cleanup(dd);
-
 	omap_stop_dma(dd->dma_lch_in);
 	omap_stop_dma(dd->dma_lch_out);
 
@@ -574,40 +580,39 @@
 		}
 	}
 
-	if (err || !dd->total)
-		omap_aes_finish_req(dd, err);
-
 	return err;
 }
 
-static int omap_aes_handle_req(struct omap_aes_dev *dd)
+static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+			       struct ablkcipher_request *req)
 {
 	struct crypto_async_request *async_req, *backlog;
 	struct omap_aes_ctx *ctx;
 	struct omap_aes_reqctx *rctx;
-	struct ablkcipher_request *req;
 	unsigned long flags;
-
-	if (dd->total)
-		goto start;
+	int err, ret = 0;
 
 	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ablkcipher_enqueue_request(&dd->queue, req);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
 	backlog = crypto_get_backlog(&dd->queue);
 	async_req = crypto_dequeue_request(&dd->queue);
-	if (!async_req)
-		clear_bit(FLAGS_BUSY, &dd->flags);
+	if (async_req)
+		dd->flags |= FLAGS_BUSY;
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (!async_req)
-		return 0;
+		return ret;
 
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
 
 	req = ablkcipher_request_cast(async_req);
 
-	pr_debug("get new req\n");
-
 	/* assign new request to device */
 	dd->req = req;
 	dd->total = req->nbytes;
@@ -621,27 +626,22 @@
 	rctx->mode &= FLAGS_MODE_MASK;
 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 
-	dd->iv = req->info;
-	if ((dd->flags & FLAGS_CBC) && dd->iv)
-		dd->flags |= FLAGS_NEW_IV;
-	else
-		dd->flags &= ~FLAGS_NEW_IV;
-
+	dd->ctx = ctx;
 	ctx->dd = dd;
-	if (dd->ctx != ctx) {
-		/* assign new context to device */
-		dd->ctx = ctx;
-		ctx->flags |= FLAGS_NEW_KEY;
+
+	err = omap_aes_write_ctrl(dd);
+	if (!err)
+		err = omap_aes_crypt_dma_start(dd);
+	if (err) {
+		/* aes_task will not finish it, so do it here */
+		omap_aes_finish_req(dd, err);
+		tasklet_schedule(&dd->queue_task);
 	}
 
-	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
-		pr_err("request size is not exact amount of AES blocks\n");
-
-start:
-	return omap_aes_crypt_dma_start(dd);
+	return ret; /* return ret, which is enqueue return value */
 }
 
-static void omap_aes_task(unsigned long data)
+static void omap_aes_done_task(unsigned long data)
 {
 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
 	int err;
@@ -650,40 +650,50 @@
 
 	err = omap_aes_crypt_dma_stop(dd);
 
-	err = omap_aes_handle_req(dd);
+	err = dd->err ? : err;
+
+	if (dd->total && !err) {
+		err = omap_aes_crypt_dma_start(dd);
+		if (!err)
+			return; /* DMA started. Not fininishing. */
+	}
+
+	omap_aes_finish_req(dd, err);
+	omap_aes_handle_queue(dd, NULL);
 
 	pr_debug("exit\n");
 }
 
+static void omap_aes_queue_task(unsigned long data)
+{
+	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+
+	omap_aes_handle_queue(dd, NULL);
+}
+
 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
 			crypto_ablkcipher_reqtfm(req));
 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 	struct omap_aes_dev *dd;
-	unsigned long flags;
-	int err;
 
 	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
 		  !!(mode & FLAGS_ENCRYPT),
 		  !!(mode & FLAGS_CBC));
 
+	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+		pr_err("request size is not exact amount of AES blocks\n");
+		return -EINVAL;
+	}
+
 	dd = omap_aes_find_dev(ctx);
 	if (!dd)
 		return -ENODEV;
 
 	rctx->mode = mode;
 
-	spin_lock_irqsave(&dd->lock, flags);
-	err = ablkcipher_enqueue_request(&dd->queue, req);
-	spin_unlock_irqrestore(&dd->lock, flags);
-
-	if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
-		omap_aes_handle_req(dd);
-
-	pr_debug("exit\n");
-
-	return err;
+	return omap_aes_handle_queue(dd, req);
 }
 
 /* ********************** ALG API ************************************ */
@@ -701,7 +711,6 @@
 
 	memcpy(ctx->key, key, keylen);
 	ctx->keylen = keylen;
-	ctx->flags |= FLAGS_NEW_KEY;
 
 	return 0;
 }
@@ -750,7 +759,7 @@
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
-	.cra_alignmask	 	= 0,
+	.cra_alignmask		= 0,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= omap_aes_cra_init,
@@ -770,7 +779,7 @@
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
-	.cra_alignmask	 	= 0,
+	.cra_alignmask		= 0,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= omap_aes_cra_init,
@@ -849,7 +858,8 @@
 		 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
 	clk_disable(dd->iclk);
 
-	tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
+	tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+	tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
 
 	err = omap_aes_dma_init(dd);
 	if (err)
@@ -876,7 +886,8 @@
 		crypto_unregister_alg(&algs[j]);
 	omap_aes_dma_cleanup(dd);
 err_dma:
-	tasklet_kill(&dd->task);
+	tasklet_kill(&dd->done_task);
+	tasklet_kill(&dd->queue_task);
 	iounmap(dd->io_base);
 err_io:
 	clk_put(dd->iclk);
@@ -903,7 +914,8 @@
 	for (i = 0; i < ARRAY_SIZE(algs); i++)
 		crypto_unregister_alg(&algs[i]);
 
-	tasklet_kill(&dd->task);
+	tasklet_kill(&dd->done_task);
+	tasklet_kill(&dd->queue_task);
 	omap_aes_dma_cleanup(dd);
 	iounmap(dd->io_base);
 	clk_put(dd->iclk);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a081c7c..2e71123 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,10 +72,9 @@
 
 #define DEFAULT_TIMEOUT_INTERVAL	HZ
 
-#define FLAGS_FIRST		0x0001
 #define FLAGS_FINUP		0x0002
 #define FLAGS_FINAL		0x0004
-#define FLAGS_FAST		0x0008
+#define FLAGS_SG		0x0008
 #define FLAGS_SHA1		0x0010
 #define FLAGS_DMA_ACTIVE	0x0020
 #define FLAGS_OUTPUT_READY	0x0040
@@ -83,13 +82,17 @@
 #define FLAGS_INIT		0x0100
 #define FLAGS_CPU		0x0200
 #define FLAGS_HMAC		0x0400
-
-/* 3rd byte */
-#define FLAGS_BUSY		16
+#define FLAGS_ERROR		0x0800
+#define FLAGS_BUSY		0x1000
 
 #define OP_UPDATE	1
 #define OP_FINAL	2
 
+#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
+#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
+
+#define BUFLEN		PAGE_SIZE
+
 struct omap_sham_dev;
 
 struct omap_sham_reqctx {
@@ -97,8 +100,8 @@
 	unsigned long		flags;
 	unsigned long		op;
 
+	u8			digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
 	size_t			digcnt;
-	u8			*buffer;
 	size_t			bufcnt;
 	size_t			buflen;
 	dma_addr_t		dma_addr;
@@ -107,6 +110,8 @@
 	struct scatterlist	*sg;
 	unsigned int		offset;	/* offset in current sg */
 	unsigned int		total;	/* total request */
+
+	u8			buffer[0] OMAP_ALIGNED;
 };
 
 struct omap_sham_hmac_ctx {
@@ -136,6 +141,7 @@
 	int			irq;
 	struct clk		*iclk;
 	spinlock_t		lock;
+	int			err;
 	int			dma;
 	int			dma_lch;
 	struct tasklet_struct	done_task;
@@ -194,53 +200,68 @@
 static void omap_sham_copy_hash(struct ahash_request *req, int out)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)ctx->digest;
+	int i;
+
+	/* MD5 is almost unused. So copy sha1 size to reduce code */
+	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+		if (out)
+			hash[i] = omap_sham_read(ctx->dd,
+						SHA_REG_DIGEST(i));
+		else
+			omap_sham_write(ctx->dd,
+					SHA_REG_DIGEST(i), hash[i]);
+	}
+}
+
+static void omap_sham_copy_ready_hash(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *in = (u32 *)ctx->digest;
 	u32 *hash = (u32 *)req->result;
 	int i;
 
+	if (!hash)
+		return;
+
 	if (likely(ctx->flags & FLAGS_SHA1)) {
 		/* SHA1 results are in big endian */
 		for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
-			if (out)
-				hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
-							SHA_REG_DIGEST(i)));
-			else
-				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
-							cpu_to_be32(hash[i]));
+			hash[i] = be32_to_cpu(in[i]);
 	} else {
 		/* MD5 results are in little endian */
 		for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
-			if (out)
-				hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
-							SHA_REG_DIGEST(i)));
-			else
-				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
-							cpu_to_le32(hash[i]));
+			hash[i] = le32_to_cpu(in[i]);
 	}
 }
 
-static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+static int omap_sham_hw_init(struct omap_sham_dev *dd)
+{
+	clk_enable(dd->iclk);
+
+	if (!(dd->flags & FLAGS_INIT)) {
+		omap_sham_write_mask(dd, SHA_REG_MASK,
+			SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+		if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+					SHA_REG_SYSSTATUS_RESETDONE))
+			return -ETIMEDOUT;
+
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
 				 int final, int dma)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	u32 val = length << 5, mask;
 
-	if (unlikely(!ctx->digcnt)) {
-
-		clk_enable(dd->iclk);
-
-		if (!(dd->flags & FLAGS_INIT)) {
-			omap_sham_write_mask(dd, SHA_REG_MASK,
-				SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
-
-			if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
-						SHA_REG_SYSSTATUS_RESETDONE))
-				return -ETIMEDOUT;
-
-			dd->flags |= FLAGS_INIT;
-		}
-	} else {
+	if (likely(ctx->digcnt))
 		omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
-	}
 
 	omap_sham_write_mask(dd, SHA_REG_MASK,
 		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
@@ -260,29 +281,26 @@
 			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 
 	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
-
-	return 0;
 }
 
 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
 			      size_t length, int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	int err, count, len32;
+	int count, len32;
 	const u32 *buffer = (const u32 *)buf;
 
 	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
 
-	err = omap_sham_write_ctrl(dd, length, final, 0);
-	if (err)
-		return err;
+	omap_sham_write_ctrl(dd, length, final, 0);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt += length;
 
 	if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
 		return -ETIMEDOUT;
 
-	ctx->digcnt += length;
-
 	if (final)
 		ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
 
@@ -298,16 +316,11 @@
 			      size_t length, int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	int err, len32;
+	int len32;
 
 	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
 
-	/* flush cache entries related to our page */
-	if (dma_addr == ctx->dma_addr)
-		dma_sync_single_for_device(dd->dev, dma_addr, length,
-					   DMA_TO_DEVICE);
-
 	len32 = DIV_ROUND_UP(length, sizeof(u32));
 
 	omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
@@ -317,9 +330,7 @@
 	omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
 				dma_addr, 0, 0);
 
-	err = omap_sham_write_ctrl(dd, length, final, 1);
-	if (err)
-		return err;
+	omap_sham_write_ctrl(dd, length, final, 1);
 
 	ctx->digcnt += length;
 
@@ -371,15 +382,29 @@
 	return 0;
 }
 
+static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
+					struct omap_sham_reqctx *ctx,
+					size_t length, int final)
+{
+	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+				       DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+		return -EINVAL;
+	}
+
+	ctx->flags &= ~FLAGS_SG;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
+}
+
 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	unsigned int final;
 	size_t count;
 
-	if (!ctx->total)
-		return 0;
-
 	omap_sham_append_sg(ctx);
 
 	final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
@@ -390,30 +415,68 @@
 	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 		count = ctx->bufcnt;
 		ctx->bufcnt = 0;
-		return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+		return omap_sham_xmit_dma_map(dd, ctx, count, final);
 	}
 
 	return 0;
 }
 
-static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+/* Start address alignment */
+#define SG_AA(sg)	(IS_ALIGNED(sg->offset, sizeof(u32)))
+/* SHA1 block size alignment */
+#define SG_SA(sg)	(IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
+
+static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	unsigned int length;
+	unsigned int length, final, tail;
+	struct scatterlist *sg;
 
-	ctx->flags |= FLAGS_FAST;
+	if (!ctx->total)
+		return 0;
 
-	length = min(ctx->total, sg_dma_len(ctx->sg));
-	ctx->total = length;
+	if (ctx->bufcnt || ctx->offset)
+		return omap_sham_update_dma_slow(dd);
+
+	dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
+			ctx->digcnt, ctx->bufcnt, ctx->total);
+
+	sg = ctx->sg;
+
+	if (!SG_AA(sg))
+		return omap_sham_update_dma_slow(dd);
+
+	if (!sg_is_last(sg) && !SG_SA(sg))
+		/* size is not SHA1_BLOCK_SIZE aligned */
+		return omap_sham_update_dma_slow(dd);
+
+	length = min(ctx->total, sg->length);
+
+	if (sg_is_last(sg)) {
+		if (!(ctx->flags & FLAGS_FINUP)) {
+			/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
+			tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
+			/* without finup() we need one block to close hash */
+			if (!tail)
+				tail = SHA1_MD5_BLOCK_SIZE;
+			length -= tail;
+		}
+	}
 
 	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 		dev_err(dd->dev, "dma_map_sg  error\n");
 		return -EINVAL;
 	}
 
-	ctx->total -= length;
+	ctx->flags |= FLAGS_SG;
 
-	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+	ctx->total -= length;
+	ctx->offset = length; /* offset where to start slow */
+
+	final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
 }
 
 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -433,8 +496,17 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 
 	omap_stop_dma(dd->dma_lch);
-	if (ctx->flags & FLAGS_FAST)
+	if (ctx->flags & FLAGS_SG) {
 		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+		if (ctx->sg->length == ctx->offset) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+		}
+	} else {
+		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+				 DMA_TO_DEVICE);
+	}
 
 	return 0;
 }
@@ -454,14 +526,7 @@
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (ctx->digcnt)
-		clk_disable(dd->iclk);
-
-	if (ctx->dma_addr)
-		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
-				 DMA_TO_DEVICE);
-
-	if (ctx->buffer)
-		free_page((unsigned long)ctx->buffer);
+		omap_sham_copy_ready_hash(req);
 
 	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
 }
@@ -489,8 +554,6 @@
 
 	ctx->flags = 0;
 
-	ctx->flags |= FLAGS_FIRST;
-
 	dev_dbg(dd->dev, "init: digest size: %d\n",
 		crypto_ahash_digestsize(tfm));
 
@@ -499,21 +562,7 @@
 
 	ctx->bufcnt = 0;
 	ctx->digcnt = 0;
-
-	ctx->buflen = PAGE_SIZE;
-	ctx->buffer = (void *)__get_free_page(
-				(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-				GFP_KERNEL : GFP_ATOMIC);
-	if (!ctx->buffer)
-		return -ENOMEM;
-
-	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
-					DMA_TO_DEVICE);
-	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
-		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
-		free_page((unsigned long)ctx->buffer);
-		return -EINVAL;
-	}
+	ctx->buflen = BUFLEN;
 
 	if (tctx->flags & FLAGS_HMAC) {
 		struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -538,10 +587,8 @@
 
 	if (ctx->flags & FLAGS_CPU)
 		err = omap_sham_update_cpu(dd);
-	else if (ctx->flags & FLAGS_FAST)
-		err = omap_sham_update_dma_fast(dd);
 	else
-		err = omap_sham_update_dma_slow(dd);
+		err = omap_sham_update_dma_start(dd);
 
 	/* wait for dma completion before can take more data */
 	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -560,15 +607,12 @@
 		use_dma = 0;
 
 	if (use_dma)
-		err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+		err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
 	else
 		err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
 
 	ctx->bufcnt = 0;
 
-	if (err != -EINPROGRESS)
-		omap_sham_cleanup(req);
-
 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
 
 	return err;
@@ -576,6 +620,7 @@
 
 static int omap_sham_finish_req_hmac(struct ahash_request *req)
 {
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 	struct omap_sham_hmac_ctx *bctx = tctx->base;
 	int bs = crypto_shash_blocksize(bctx->shash);
@@ -590,48 +635,56 @@
 
 	return crypto_shash_init(&desc.shash) ?:
 	       crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
-	       crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+	       crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
 }
 
 static void omap_sham_finish_req(struct ahash_request *req, int err)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
 
 	if (!err) {
 		omap_sham_copy_hash(ctx->dd->req, 1);
 		if (ctx->flags & FLAGS_HMAC)
 			err = omap_sham_finish_req_hmac(req);
+	} else {
+		ctx->flags |= FLAGS_ERROR;
 	}
 
-	if (ctx->flags & FLAGS_FINAL)
+	if ((ctx->flags & FLAGS_FINAL) || err)
 		omap_sham_cleanup(req);
 
-	clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+	clk_disable(dd->iclk);
+	dd->flags &= ~FLAGS_BUSY;
 
 	if (req->base.complete)
 		req->base.complete(&req->base, err);
 }
 
-static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+static int omap_sham_handle_queue(struct omap_sham_dev *dd,
+				  struct ahash_request *req)
 {
 	struct crypto_async_request *async_req, *backlog;
 	struct omap_sham_reqctx *ctx;
-	struct ahash_request *req, *prev_req;
+	struct ahash_request *prev_req;
 	unsigned long flags;
-	int err = 0;
-
-	if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
-		return 0;
+	int err = 0, ret = 0;
 
 	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->queue, req);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
 	backlog = crypto_get_backlog(&dd->queue);
 	async_req = crypto_dequeue_request(&dd->queue);
-	if (!async_req)
-		clear_bit(FLAGS_BUSY, &dd->flags);
+	if (async_req)
+		dd->flags |= FLAGS_BUSY;
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (!async_req)
-		return 0;
+		return ret;
 
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
@@ -646,7 +699,22 @@
 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 						ctx->op, req->nbytes);
 
-	if (req != prev_req && ctx->digcnt)
+
+	err = omap_sham_hw_init(dd);
+	if (err)
+		goto err1;
+
+	omap_set_dma_dest_params(dd->dma_lch, 0,
+			OMAP_DMA_AMODE_CONSTANT,
+			dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+	omap_set_dma_dest_burst_mode(dd->dma_lch,
+			OMAP_DMA_DATA_BURST_16);
+
+	omap_set_dma_src_burst_mode(dd->dma_lch,
+			OMAP_DMA_DATA_BURST_4);
+
+	if (ctx->digcnt)
 		/* request has changed - restore hash */
 		omap_sham_copy_hash(req, 0);
 
@@ -658,7 +726,7 @@
 	} else if (ctx->op == OP_FINAL) {
 		err = omap_sham_final_req(dd);
 	}
-
+err1:
 	if (err != -EINPROGRESS) {
 		/* done_task will not finish it, so do it here */
 		omap_sham_finish_req(req, err);
@@ -667,7 +735,7 @@
 
 	dev_dbg(dd->dev, "exit, err: %d\n", err);
 
-	return err;
+	return ret;
 }
 
 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -675,18 +743,10 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 	struct omap_sham_dev *dd = tctx->dd;
-	unsigned long flags;
-	int err;
 
 	ctx->op = op;
 
-	spin_lock_irqsave(&dd->lock, flags);
-	err = ahash_enqueue_request(&dd->queue, req);
-	spin_unlock_irqrestore(&dd->lock, flags);
-
-	omap_sham_handle_queue(dd);
-
-	return err;
+	return omap_sham_handle_queue(dd, req);
 }
 
 static int omap_sham_update(struct ahash_request *req)
@@ -709,21 +769,13 @@
 			*/
 			omap_sham_append_sg(ctx);
 			return 0;
-		} else if (ctx->bufcnt + ctx->total <= 64) {
+		} else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
+			/*
+			* faster to use CPU for short transfers
+			*/
 			ctx->flags |= FLAGS_CPU;
-		} else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
-			/* may be can use faster functions */
-			int aligned = IS_ALIGNED((u32)ctx->sg->offset,
-								sizeof(u32));
-
-			if (aligned && (ctx->flags & FLAGS_FIRST))
-				/* digest: first and final */
-				ctx->flags |= FLAGS_FAST;
-
-			ctx->flags &= ~FLAGS_FIRST;
 		}
-	} else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
-		/* if not finaup -> not fast */
+	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
 		omap_sham_append_sg(ctx);
 		return 0;
 	}
@@ -761,12 +813,14 @@
 
 	ctx->flags |= FLAGS_FINUP;
 
-	/* OMAP HW accel works only with buffers >= 9 */
-	/* HMAC is always >= 9 because of ipad */
-	if ((ctx->digcnt + ctx->bufcnt) < 9)
-		err = omap_sham_final_shash(req);
-	else if (ctx->bufcnt)
-		return omap_sham_enqueue(req, OP_FINAL);
+	if (!(ctx->flags & FLAGS_ERROR)) {
+		/* OMAP HW accel works only with buffers >= 9 */
+		/* HMAC is always >= 9 because of ipad */
+		if ((ctx->digcnt + ctx->bufcnt) < 9)
+			err = omap_sham_final_shash(req);
+		else if (ctx->bufcnt)
+			return omap_sham_enqueue(req, OP_FINAL);
+	}
 
 	omap_sham_cleanup(req);
 
@@ -836,6 +890,8 @@
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 	const char *alg_name = crypto_tfm_alg_name(tfm);
 
+	pr_info("enter\n");
+
 	/* Allocate a fallback and abort if it failed. */
 	tctx->fallback = crypto_alloc_shash(alg_name, 0,
 					    CRYPTO_ALG_NEED_FALLBACK);
@@ -846,7 +902,7 @@
 	}
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-				 sizeof(struct omap_sham_reqctx));
+				 sizeof(struct omap_sham_reqctx) + BUFLEN);
 
 	if (alg_base) {
 		struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -932,7 +988,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -956,7 +1012,7 @@
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
 					sizeof(struct omap_sham_hmac_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_sha1_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -980,7 +1036,7 @@
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
 					sizeof(struct omap_sham_hmac_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_md5_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -993,7 +1049,7 @@
 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
 	struct ahash_request *req = dd->req;
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-	int ready = 1;
+	int ready = 0, err = 0;
 
 	if (ctx->flags & FLAGS_OUTPUT_READY) {
 		ctx->flags &= ~FLAGS_OUTPUT_READY;
@@ -1003,15 +1059,18 @@
 	if (dd->flags & FLAGS_DMA_ACTIVE) {
 		dd->flags &= ~FLAGS_DMA_ACTIVE;
 		omap_sham_update_dma_stop(dd);
-		omap_sham_update_dma_slow(dd);
+		if (!dd->err)
+			err = omap_sham_update_dma_start(dd);
 	}
 
-	if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
-		dev_dbg(dd->dev, "update done\n");
+	err = dd->err ? : err;
+
+	if (err != -EINPROGRESS && (ready || err)) {
+		dev_dbg(dd->dev, "update done: err: %d\n", err);
 		/* finish curent request */
-		omap_sham_finish_req(req, 0);
+		omap_sham_finish_req(req, err);
 		/* start new request */
-		omap_sham_handle_queue(dd);
+		omap_sham_handle_queue(dd, NULL);
 	}
 }
 
@@ -1019,7 +1078,7 @@
 {
 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
 
-	omap_sham_handle_queue(dd);
+	omap_sham_handle_queue(dd, NULL);
 }
 
 static irqreturn_t omap_sham_irq(int irq, void *dev_id)
@@ -1041,6 +1100,7 @@
 	omap_sham_read(dd, SHA_REG_CTRL);
 
 	ctx->flags |= FLAGS_OUTPUT_READY;
+	dd->err = 0;
 	tasklet_schedule(&dd->done_task);
 
 	return IRQ_HANDLED;
@@ -1050,8 +1110,13 @@
 {
 	struct omap_sham_dev *dd = data;
 
-	if (likely(lch == dd->dma_lch))
-		tasklet_schedule(&dd->done_task);
+	if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+		pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
+		dd->err = -EIO;
+		dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+	}
+
+	tasklet_schedule(&dd->done_task);
 }
 
 static int omap_sham_dma_init(struct omap_sham_dev *dd)
@@ -1066,15 +1131,6 @@
 		dev_err(dd->dev, "Unable to request DMA channel\n");
 		return err;
 	}
-	omap_set_dma_dest_params(dd->dma_lch, 0,
-			OMAP_DMA_AMODE_CONSTANT,
-			dd->phys_base + SHA_REG_DIN(0), 0, 16);
-
-	omap_set_dma_dest_burst_mode(dd->dma_lch,
-			OMAP_DMA_DATA_BURST_16);
-
-	omap_set_dma_src_burst_mode(dd->dma_lch,
-			OMAP_DMA_DATA_BURST_4);
 
 	return 0;
 }
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a515ba..db33d30 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -9,6 +9,7 @@
 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
+#include <crypto/padlock.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
@@ -21,7 +22,6 @@
 #include <asm/byteorder.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
-#include "padlock.h"
 
 /*
  * Number of data blocks actually fetched for each xcrypt insn.
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d3a27e0..adf075b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -13,6 +13,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/padlock.h>
 #include <crypto/sha.h>
 #include <linux/err.h>
 #include <linux/module.h>
@@ -22,13 +23,6 @@
 #include <linux/kernel.h>
 #include <linux/scatterlist.h>
 #include <asm/i387.h>
-#include "padlock.h"
-
-#ifdef CONFIG_64BIT
-#define STACK_ALIGN 16
-#else
-#define STACK_ALIGN 4
-#endif
 
 struct padlock_sha_desc {
 	struct shash_desc fallback;
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h
deleted file mode 100644
index b728e45..0000000
--- a/drivers/crypto/padlock.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Driver for VIA PadLock
- *
- * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option) 
- * any later version.
- *
- */
-
-#ifndef _CRYPTO_PADLOCK_H
-#define _CRYPTO_PADLOCK_H
-
-#define PADLOCK_ALIGNMENT 16
-
-#define PFX	"padlock: "
-
-#define PADLOCK_CRA_PRIORITY	300
-#define PADLOCK_COMPOSITE_PRIORITY 400
-
-#endif	/* _CRYPTO_PADLOCK_H */
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index b98c676..c461eda 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -110,8 +110,6 @@
 
 	/* at this point only one domain in the list is expected */
 	domain = list_first_entry(&dca_domains, struct dca_domain, node);
-	if (!domain)
-		return;
 
 	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
 		list_del(&dca->node);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ee2359..ef13873 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -109,7 +109,7 @@
 
 config MPC512X_DMA
 	tristate "Freescale MPC512x built-in DMA engine support"
-	depends on PPC_MPC512x
+	depends on PPC_MPC512x || PPC_MPC831x
 	select DMA_ENGINE
 	---help---
 	  Enable support for the Freescale MPC512x built-in DMA engine.
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3109bd9..7826638 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1060,8 +1060,8 @@
  * mid_setup_dma -	Setup the DMA controller
  * @pdev: Controller PCI device structure
  *
- * Initilize the DMA controller, channels, registers with DMA engine,
- * ISR. Initilize DMA controller channels.
+ * Initialize the DMA controller, channels, registers with DMA engine,
+ * ISR. Initialize DMA controller channels.
  */
 static int mid_setup_dma(struct pci_dev *pdev)
 {
@@ -1217,7 +1217,7 @@
  * @pdev: Controller PCI device structure
  * @id: pci device id structure
  *
- * Initilize the PCI device, map BARs, query driver data.
+ * Initialize the PCI device, map BARs, query driver data.
  * Call setup_dma to complete contoller and chan initilzation
  */
 static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4e9cbf3..59c2701 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  *
  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -70,6 +71,8 @@
 #define MPC_DMA_DMAES_SBE	(1 << 1)
 #define MPC_DMA_DMAES_DBE	(1 << 0)
 
+#define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
+
 #define MPC_DMA_TSIZE_1		0x00
 #define MPC_DMA_TSIZE_2		0x01
 #define MPC_DMA_TSIZE_4		0x02
@@ -104,7 +107,10 @@
 	/* 0x30 */
 	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
 	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
-	u32 dmaihsa;		/* DMA interrupt high select AXE(ch63~32) */
+	union {
+		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
+		u32 dmagpor;	/* (General purpose register on MPC8308) */
+	};
 	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
 	/* 0x40 ~ 0xff */
 	u32 reserve0[48];	/* Reserved */
@@ -195,7 +201,9 @@
 	struct mpc_dma_regs __iomem	*regs;
 	struct mpc_dma_tcd __iomem	*tcd;
 	int				irq;
+	int				irq2;
 	uint				error_status;
+	int				is_mpc8308;
 
 	/* Lock for error_status field in this structure */
 	spinlock_t			error_status_lock;
@@ -252,11 +260,13 @@
 		prev = mdesc;
 	}
 
-	prev->tcd->start = 0;
 	prev->tcd->int_maj = 1;
 
 	/* Send first descriptor in chain into hardware */
 	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+
+	if (first != prev)
+		mdma->tcd[cid].e_sg = 1;
 	out_8(&mdma->regs->dmassrt, cid);
 }
 
@@ -274,6 +284,9 @@
 
 		spin_lock(&mchan->lock);
 
+		out_8(&mdma->regs->dmacint, ch + off);
+		out_8(&mdma->regs->dmacerr, ch + off);
+
 		/* Check error status */
 		if (es & (1 << ch))
 			list_for_each_entry(mdesc, &mchan->active, node)
@@ -302,36 +315,68 @@
 	spin_unlock(&mdma->error_status_lock);
 
 	/* Handle interrupt on each channel */
-	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+	if (mdma->dma.chancnt > 32) {
+		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
 					in_be32(&mdma->regs->dmaerrh), 32);
+	}
 	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
 					in_be32(&mdma->regs->dmaerrl), 0);
 
-	/* Ack interrupt on all channels */
-	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
-
 	/* Schedule tasklet */
 	tasklet_schedule(&mdma->tasklet);
 
 	return IRQ_HANDLED;
 }
 
-/* DMA Tasklet */
-static void mpc_dma_tasklet(unsigned long data)
+/* proccess completed descriptors */
+static void mpc_dma_process_completed(struct mpc_dma *mdma)
 {
-	struct mpc_dma *mdma = (void *)data;
 	dma_cookie_t last_cookie = 0;
 	struct mpc_dma_chan *mchan;
 	struct mpc_dma_desc *mdesc;
 	struct dma_async_tx_descriptor *desc;
 	unsigned long flags;
 	LIST_HEAD(list);
-	uint es;
 	int i;
 
+	for (i = 0; i < mdma->dma.chancnt; i++) {
+		mchan = &mdma->channels[i];
+
+		/* Get all completed descriptors */
+		spin_lock_irqsave(&mchan->lock, flags);
+		if (!list_empty(&mchan->completed))
+			list_splice_tail_init(&mchan->completed, &list);
+		spin_unlock_irqrestore(&mchan->lock, flags);
+
+		if (list_empty(&list))
+			continue;
+
+		/* Execute callbacks and run dependencies */
+		list_for_each_entry(mdesc, &list, node) {
+			desc = &mdesc->desc;
+
+			if (desc->callback)
+				desc->callback(desc->callback_param);
+
+			last_cookie = desc->cookie;
+			dma_run_dependencies(desc);
+		}
+
+		/* Free descriptors */
+		spin_lock_irqsave(&mchan->lock, flags);
+		list_splice_tail_init(&list, &mchan->free);
+		mchan->completed_cookie = last_cookie;
+		spin_unlock_irqrestore(&mchan->lock, flags);
+	}
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+	struct mpc_dma *mdma = (void *)data;
+	unsigned long flags;
+	uint es;
+
 	spin_lock_irqsave(&mdma->error_status_lock, flags);
 	es = mdma->error_status;
 	mdma->error_status = 0;
@@ -370,35 +415,7 @@
 			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
 	}
 
-	for (i = 0; i < mdma->dma.chancnt; i++) {
-		mchan = &mdma->channels[i];
-
-		/* Get all completed descriptors */
-		spin_lock_irqsave(&mchan->lock, flags);
-		if (!list_empty(&mchan->completed))
-			list_splice_tail_init(&mchan->completed, &list);
-		spin_unlock_irqrestore(&mchan->lock, flags);
-
-		if (list_empty(&list))
-			continue;
-
-		/* Execute callbacks and run dependencies */
-		list_for_each_entry(mdesc, &list, node) {
-			desc = &mdesc->desc;
-
-			if (desc->callback)
-				desc->callback(desc->callback_param);
-
-			last_cookie = desc->cookie;
-			dma_run_dependencies(desc);
-		}
-
-		/* Free descriptors */
-		spin_lock_irqsave(&mchan->lock, flags);
-		list_splice_tail_init(&list, &mchan->free);
-		mchan->completed_cookie = last_cookie;
-		spin_unlock_irqrestore(&mchan->lock, flags);
-	}
+	mpc_dma_process_completed(mdma);
 }
 
 /* Submit descriptor to hardware */
@@ -563,6 +580,7 @@
 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 					size_t len, unsigned long flags)
 {
+	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 	struct mpc_dma_desc *mdesc = NULL;
 	struct mpc_dma_tcd *tcd;
@@ -577,8 +595,11 @@
 	}
 	spin_unlock_irqrestore(&mchan->lock, iflags);
 
-	if (!mdesc)
+	if (!mdesc) {
+		/* try to free completed descriptors */
+		mpc_dma_process_completed(mdma);
 		return NULL;
+	}
 
 	mdesc->error = 0;
 	tcd = mdesc->tcd;
@@ -591,7 +612,8 @@
 		tcd->dsize = MPC_DMA_TSIZE_32;
 		tcd->soff = 32;
 		tcd->doff = 32;
-	} else if (IS_ALIGNED(src | dst | len, 16)) {
+	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
+		/* MPC8308 doesn't support 16 byte transfers */
 		tcd->ssize = MPC_DMA_TSIZE_16;
 		tcd->dsize = MPC_DMA_TSIZE_16;
 		tcd->soff = 16;
@@ -651,6 +673,15 @@
 		return -EINVAL;
 	}
 
+	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
+		mdma->is_mpc8308 = 1;
+		mdma->irq2 = irq_of_parse_and_map(dn, 1);
+		if (mdma->irq2 == NO_IRQ) {
+			dev_err(dev, "Error mapping IRQ!\n");
+			return -EINVAL;
+		}
+	}
+
 	retval = of_address_to_resource(dn, 0, &res);
 	if (retval) {
 		dev_err(dev, "Error parsing memory region!\n");
@@ -681,11 +712,23 @@
 		return -EINVAL;
 	}
 
+	if (mdma->is_mpc8308) {
+		retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
+				DRV_NAME, mdma);
+		if (retval) {
+			dev_err(dev, "Error requesting IRQ2!\n");
+			return -EINVAL;
+		}
+	}
+
 	spin_lock_init(&mdma->error_status_lock);
 
 	dma = &mdma->dma;
 	dma->dev = dev;
-	dma->chancnt = MPC_DMA_CHANNELS;
+	if (!mdma->is_mpc8308)
+		dma->chancnt = MPC_DMA_CHANNELS;
+	else
+		dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
 	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
 	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
 	dma->device_issue_pending = mpc_dma_issue_pending;
@@ -721,26 +764,40 @@
 	 * - Round-robin group arbitration,
 	 * - Round-robin channel arbitration.
 	 */
-	out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
-				MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+	if (!mdma->is_mpc8308) {
+		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
 
-	/* Disable hardware DMA requests */
-	out_be32(&mdma->regs->dmaerqh, 0);
-	out_be32(&mdma->regs->dmaerql, 0);
+		/* Disable hardware DMA requests */
+		out_be32(&mdma->regs->dmaerqh, 0);
+		out_be32(&mdma->regs->dmaerql, 0);
 
-	/* Disable error interrupts */
-	out_be32(&mdma->regs->dmaeeih, 0);
-	out_be32(&mdma->regs->dmaeeil, 0);
+		/* Disable error interrupts */
+		out_be32(&mdma->regs->dmaeeih, 0);
+		out_be32(&mdma->regs->dmaeeil, 0);
 
-	/* Clear interrupts status */
-	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+		/* Clear interrupts status */
+		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
 
-	/* Route interrupts to IPIC */
-	out_be32(&mdma->regs->dmaihsa, 0);
-	out_be32(&mdma->regs->dmailsa, 0);
+		/* Route interrupts to IPIC */
+		out_be32(&mdma->regs->dmaihsa, 0);
+		out_be32(&mdma->regs->dmailsa, 0);
+	} else {
+		/* MPC8308 has 16 channels and lacks some registers */
+		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
+
+		/* enable snooping */
+		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
+		/* Disable error interrupts */
+		out_be32(&mdma->regs->dmaeeil, 0);
+
+		/* Clear interrupts status */
+		out_be32(&mdma->regs->dmaintl, 0xFFFF);
+		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
+	}
 
 	/* Register DMA engine */
 	dev_set_drvdata(dev, mdma);
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index a0069c1..28720d3 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -1110,11 +1110,6 @@
 	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
 
-	/* Wire up NMI handling before bringing the controller online */
-	err = register_die_notifier(&sh_dmae_nmi_notifier);
-	if (err)
-		goto notifier_err;
-
 	/* reset dma controller */
 	err = sh_dmae_rst(shdev);
 	if (err)
@@ -1218,8 +1213,6 @@
 eirq_err:
 #endif
 rst_err:
-	unregister_die_notifier(&sh_dmae_nmi_notifier);
-notifier_err:
 	spin_lock_irqsave(&sh_dmae_lock, flags);
 	list_del_rcu(&shdev->node);
 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
@@ -1252,8 +1245,6 @@
 	if (errirq > 0)
 		free_irq(errirq, shdev);
 
-	unregister_die_notifier(&sh_dmae_nmi_notifier);
-
 	spin_lock_irqsave(&sh_dmae_lock, flags);
 	list_del_rcu(&shdev->node);
 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
@@ -1296,6 +1287,11 @@
 
 static int __init sh_dmae_init(void)
 {
+	/* Wire up NMI handling */
+	int err = register_die_notifier(&sh_dmae_nmi_notifier);
+	if (err)
+		return err;
+
 	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
 }
 module_init(sh_dmae_init);
@@ -1303,6 +1299,8 @@
 static void __exit sh_dmae_exit(void)
 {
 	platform_driver_unregister(&sh_dmae_driver);
+
+	unregister_die_notifier(&sh_dmae_nmi_notifier);
 }
 module_exit(sh_dmae_exit);
 
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index f436a2f..fe70a34 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -75,11 +75,11 @@
 	bool
 
 config EDAC_AMD64
-	tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
-	depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE
+	tristate "AMD64 (Opteron, Athlon64) K8, F10h"
+	depends on EDAC_MM_EDAC && AMD_NB && X86_64 && EDAC_DECODE_MCE
 	help
-	  Support for error detection and correction on the AMD 64
-	  Families of Memory Controllers (K8, F10h and F11h)
+	  Support for error detection and correction of DRAM ECC errors on
+	  the AMD64 families of memory controllers (K8 and F10h)
 
 config EDAC_AMD64_ERROR_INJECTION
 	bool "Sysfs HW Error injection facilities"
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index df21118..4a5ecc5 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,10 +15,14 @@
 
 static struct msr __percpu *msrs;
 
-/* Lookup table for all possible MC control instances */
-struct amd64_pvt;
-static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
-static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
+/*
+ * count successfully initialized driver instances for setup_pci_device()
+ */
+static atomic_t drv_instances = ATOMIC_INIT(0);
+
+/* Per-node driver instances */
+static struct mem_ctl_info **mcis;
+static struct ecc_settings **ecc_stngs;
 
 /*
  * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
@@ -62,7 +66,7 @@
 			   [5 ... 6]	= 1024,
 			   [7 ... 8]	= 2048,
 			   [9 ... 10]	= 4096,
-			   [11]	= 8192,
+			   [11]		= 8192,
 };
 
 /*
@@ -73,7 +77,11 @@
  *FIXME: Produce a better mapping/linearisation.
  */
 
-struct scrubrate scrubrates[] = {
+
+struct scrubrate {
+       u32 scrubval;           /* bit pattern for scrub rate */
+       u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
+} scrubrates[] = {
 	{ 0x01, 1600000000UL},
 	{ 0x02, 800000000UL},
 	{ 0x03, 400000000UL},
@@ -117,8 +125,7 @@
  * scan the scrub rate mapping table for a close or matching bandwidth value to
  * issue. If requested is too big, then use last maximum value found.
  */
-static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
-				       u32 min_scrubrate)
+static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
 {
 	u32 scrubval;
 	int i;
@@ -134,7 +141,7 @@
 		 * skip scrub rates which aren't recommended
 		 * (see F10 BKDG, F3x58)
 		 */
-		if (scrubrates[i].scrubval < min_scrubrate)
+		if (scrubrates[i].scrubval < min_rate)
 			continue;
 
 		if (scrubrates[i].bandwidth <= new_bw)
@@ -148,64 +155,41 @@
 	}
 
 	scrubval = scrubrates[i].scrubval;
-	if (scrubval)
-		edac_printk(KERN_DEBUG, EDAC_MC,
-			    "Setting scrub rate bandwidth: %u\n",
-			    scrubrates[i].bandwidth);
-	else
-		edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
 
 	pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
 
+	if (scrubval)
+		return scrubrates[i].bandwidth;
+
 	return 0;
 }
 
-static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
+static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
-	u32 min_scrubrate = 0x0;
 
-	switch (boot_cpu_data.x86) {
-	case 0xf:
-		min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
-		break;
-	case 0x10:
-		min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
-		break;
-	case 0x11:
-		min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
-		break;
-
-	default:
-		amd64_printk(KERN_ERR, "Unsupported family!\n");
-		return -EINVAL;
-	}
-	return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, bandwidth,
-					   min_scrubrate);
+	return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
 }
 
-static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
 	u32 scrubval = 0;
-	int status = -1, i;
+	int i, retval = -EINVAL;
 
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
+	amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
 
 	scrubval = scrubval & 0x001F;
 
-	edac_printk(KERN_DEBUG, EDAC_MC,
-		    "pci-read, sdram scrub control value: %d \n", scrubval);
+	amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
 
 	for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
 		if (scrubrates[i].scrubval == scrubval) {
-			*bw = scrubrates[i].bandwidth;
-			status = 0;
+			retval = scrubrates[i].bandwidth;
 			break;
 		}
 	}
-
-	return status;
+	return retval;
 }
 
 /* Map from a CSROW entry to the mask entry that operates on it */
@@ -314,9 +298,7 @@
 	if (unlikely((intlv_en != 0x01) &&
 		     (intlv_en != 0x03) &&
 		     (intlv_en != 0x07))) {
-		amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
-			     "IntlvEn field of DRAM Base Register for node 0: "
-			     "this probably indicates a BIOS bug.\n", intlv_en);
+		amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
 		return NULL;
 	}
 
@@ -332,11 +314,9 @@
 
 	/* sanity test for sys_addr */
 	if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
-		amd64_printk(KERN_WARNING,
-			     "%s(): sys_addr 0x%llx falls outside base/limit "
-			     "address range for node %d with node interleaving "
-			     "enabled.\n",
-			     __func__, sys_addr, node_id);
+		amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
+			   "range for node %d with node interleaving enabled.\n",
+			   __func__, sys_addr, node_id);
 		return NULL;
 	}
 
@@ -788,9 +768,8 @@
 	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
 
 	if (csrow == -1)
-		amd64_mc_printk(mci, KERN_ERR,
-			     "Failed to translate InputAddr to csrow for "
-			     "address 0x%lx\n", (unsigned long)sys_addr);
+		amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
+				  "address 0x%lx\n", (unsigned long)sys_addr);
 	return csrow;
 }
 
@@ -801,21 +780,6 @@
 	return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
 }
 
-static void amd64_cpu_display_info(struct amd64_pvt *pvt)
-{
-	if (boot_cpu_data.x86 == 0x11)
-		edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
-	else if (boot_cpu_data.x86 == 0x10)
-		edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
-	else if (boot_cpu_data.x86 == 0xf)
-		edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
-			(pvt->ext_model >= K8_REV_F) ?
-			"Rev F or later" : "Rev E or earlier");
-	else
-		/* we'll hardly ever ever get here */
-		edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
-}
-
 /*
  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
  * are ECC capable.
@@ -893,8 +857,7 @@
 		return;
 	}
 
-	amd64_printk(KERN_INFO, "using %s syndromes.\n",
-		     ((pvt->syn_type == 8) ? "x8" : "x4"));
+	amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
 
 	/* Only if NOT ganged does dclr1 have valid info */
 	if (!dct_ganging_enabled(pvt))
@@ -915,10 +878,10 @@
 /* Read in both of DBAM registers */
 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
 {
-	amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
+	amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
 
 	if (boot_cpu_data.x86 >= 0x10)
-		amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
+		amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
 }
 
 /*
@@ -965,14 +928,8 @@
 		pvt->dcsm_mask		= REV_F_F1Xh_DCSM_MASK_BITS;
 		pvt->dcs_mask_notused	= REV_F_F1Xh_DCS_NOTUSED_BITS;
 		pvt->dcs_shift		= REV_F_F1Xh_DCS_SHIFT;
-
-		if (boot_cpu_data.x86 == 0x11) {
-			pvt->cs_count = 4;
-			pvt->num_dcsm = 2;
-		} else {
-			pvt->cs_count = 8;
-			pvt->num_dcsm = 4;
-		}
+		pvt->cs_count		= 8;
+		pvt->num_dcsm		= 4;
 	}
 }
 
@@ -987,14 +944,14 @@
 
 	for (cs = 0; cs < pvt->cs_count; cs++) {
 		reg = K8_DCSB0 + (cs * 4);
-		if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
+		if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
 			debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
 				cs, pvt->dcsb0[cs], reg);
 
 		/* If DCT are NOT ganged, then read in DCT1's base */
 		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
 			reg = F10_DCSB1 + (cs * 4);
-			if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
+			if (!amd64_read_pci_cfg(pvt->F2, reg,
 						&pvt->dcsb1[cs]))
 				debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
 					cs, pvt->dcsb1[cs], reg);
@@ -1005,14 +962,14 @@
 
 	for (cs = 0; cs < pvt->num_dcsm; cs++) {
 		reg = K8_DCSM0 + (cs * 4);
-		if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
+		if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
 			debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
 				cs, pvt->dcsm0[cs], reg);
 
 		/* If DCT are NOT ganged, then read in DCT1's mask */
 		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
 			reg = F10_DCSM1 + (cs * 4);
-			if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
+			if (!amd64_read_pci_cfg(pvt->F2, reg,
 						&pvt->dcsm1[cs]))
 				debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
 					cs, pvt->dcsm1[cs], reg);
@@ -1022,7 +979,7 @@
 	}
 }
 
-static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
+static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
 {
 	enum mem_type type;
 
@@ -1035,7 +992,7 @@
 		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
 	}
 
-	debugf1("  Memory type is: %s\n", edac_mem_types[type]);
+	amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
 
 	return type;
 }
@@ -1053,17 +1010,16 @@
 {
 	int flag, err = 0;
 
-	err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+	err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
 	if (err)
 		return err;
 
-	if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) {
+	if (pvt->ext_model >= K8_REV_F)
 		/* RevF (NPT) and later */
 		flag = pvt->dclr0 & F10_WIDTH_128;
-	} else {
+	else
 		/* RevE and earlier */
 		flag = pvt->dclr0 & REVE_WIDTH_128;
-	}
 
 	/* not used */
 	pvt->dclr1 = 0;
@@ -1090,14 +1046,14 @@
 	u32 low;
 	u32 off = dram << 3;	/* 8 bytes between DRAM entries */
 
-	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
+	amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
 
 	/* Extract parts into separate data entries */
 	pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
 	pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
 	pvt->dram_rw_en[dram] = (low & 0x3);
 
-	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
+	amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
 
 	/*
 	 * Extract parts into separate data entries. Limit is the HIGHEST memory
@@ -1127,9 +1083,8 @@
 			 * 2 DIMMs is in error. So we need to ID 'both' of them
 			 * as suspect.
 			 */
-			amd64_mc_printk(mci, KERN_WARNING,
-					"unknown syndrome 0x%04x - possible "
-					"error reporting race\n", syndrome);
+			amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
+					   "error reporting race\n", syndrome);
 			edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
 			return;
 		}
@@ -1151,8 +1106,7 @@
 	 */
 	src_mci = find_mc_by_sys_addr(mci, sys_addr);
 	if (!src_mci) {
-		amd64_mc_printk(mci, KERN_ERR,
-			     "failed to map error address 0x%lx to a node\n",
+		amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
 			     (unsigned long)sys_addr);
 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
 		return;
@@ -1220,7 +1174,7 @@
 	 * both controllers since DIMMs can be placed in either one.
 	 */
 	for (i = 0; i < ARRAY_SIZE(dbams); i++) {
-		if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
+		if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
 			goto err_reg;
 
 		for (j = 0; j < 4; j++) {
@@ -1234,7 +1188,7 @@
 	if (channels > 2)
 		channels = 2;
 
-	debugf0("MCT channel count: %d\n", channels);
+	amd64_info("MCT channel count: %d\n", channels);
 
 	return channels;
 
@@ -1255,31 +1209,6 @@
 	return dbam_map[cs_mode];
 }
 
-/* Enable extended configuration access via 0xCF8 feature */
-static void amd64_setup(struct amd64_pvt *pvt)
-{
-	u32 reg;
-
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
-
-	pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
-	reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
-	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
-}
-
-/* Restore the extended configuration access via 0xCF8 feature */
-static void amd64_teardown(struct amd64_pvt *pvt)
-{
-	u32 reg;
-
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
-
-	reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
-	if (pvt->flags.cf8_extcfg)
-		reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
-	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
-}
-
 static u64 f10_get_error_address(struct mem_ctl_info *mci,
 			struct err_regs *info)
 {
@@ -1301,10 +1230,8 @@
 	high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
 
 	/* read the 'raw' DRAM BASE Address register */
-	amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
-
-	/* Read from the ECS data register */
-	amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
+	amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
+	amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
 
 	/* Extract parts into separate data entries */
 	pvt->dram_rw_en[dram] = (low_base & 0x3);
@@ -1321,10 +1248,8 @@
 	high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
 
 	/* read the 'raw' LIMIT registers */
-	amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
-
-	/* Read from the ECS data register for the HIGH portion */
-	amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
+	amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
+	amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
 
 	pvt->dram_DstNode[dram] = (low_limit & 0x7);
 	pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
@@ -1341,7 +1266,7 @@
 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
 {
 
-	if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
+	if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
 				&pvt->dram_ctl_select_low)) {
 		debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
 			"High range addresses at: 0x%x\n",
@@ -1367,7 +1292,7 @@
 			dct_sel_interleave_addr(pvt));
 	}
 
-	amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
+	amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
 			   &pvt->dram_ctl_select_high);
 }
 
@@ -1496,7 +1421,7 @@
 	int cs_found = -EINVAL;
 	int csrow;
 
-	mci = mci_lookup[nid];
+	mci = mcis[nid];
 	if (!mci)
 		return cs_found;
 
@@ -1738,28 +1663,17 @@
 		if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
 			size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
 
-		edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
-			    dimm * 2,     size0 << factor,
-			    dimm * 2 + 1, size1 << factor);
+		amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
+				dimm * 2,     size0 << factor,
+				dimm * 2 + 1, size1 << factor);
 	}
 }
 
-/*
- * There currently are 3 types type of MC devices for AMD Athlon/Opterons
- * (as per PCI DEVICE_IDs):
- *
- * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
- * DEVICE ID, even though there is differences between the different Revisions
- * (CG,D,E,F).
- *
- * Family F10h and F11h.
- *
- */
 static struct amd64_family_type amd64_family_types[] = {
 	[K8_CPUS] = {
-		.ctl_name = "RevF",
-		.addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
-		.misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
+		.ctl_name = "K8",
+		.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
+		.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
 		.ops = {
 			.early_channel_count	= k8_early_channel_count,
 			.get_error_address	= k8_get_error_address,
@@ -1769,22 +1683,9 @@
 		}
 	},
 	[F10_CPUS] = {
-		.ctl_name = "Family 10h",
-		.addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
-		.misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
-		.ops = {
-			.early_channel_count	= f10_early_channel_count,
-			.get_error_address	= f10_get_error_address,
-			.read_dram_base_limit	= f10_read_dram_base_limit,
-			.read_dram_ctl_register	= f10_read_dram_ctl_register,
-			.map_sysaddr_to_csrow	= f10_map_sysaddr_to_csrow,
-			.dbam_to_cs		= f10_dbam_to_chip_select,
-		}
-	},
-	[F11_CPUS] = {
-		.ctl_name = "Family 11h",
-		.addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
-		.misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
+		.ctl_name = "F10h",
+		.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
+		.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
 		.ops = {
 			.early_channel_count	= f10_early_channel_count,
 			.get_error_address	= f10_get_error_address,
@@ -1970,8 +1871,7 @@
 					  ARRAY_SIZE(x4_vectors),
 					  pvt->syn_type);
 	else {
-		amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n",
-					   __func__, pvt->syn_type);
+		amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
 		return err_sym;
 	}
 
@@ -1989,17 +1889,15 @@
 	u64 sys_addr;
 
 	/* Ensure that the Error Address is VALID */
-	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
-		amd64_mc_printk(mci, KERN_ERR,
-			"HW has no ERROR_ADDRESS available\n");
+	if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+		amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
 		return;
 	}
 
 	sys_addr = pvt->ops->get_error_address(mci, info);
 
-	amd64_mc_printk(mci, KERN_ERR,
-		"CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
+	amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
 
 	pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
 }
@@ -2016,9 +1914,8 @@
 
 	log_mci = mci;
 
-	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
-		amd64_mc_printk(mci, KERN_CRIT,
-			"HW has no ERROR_ADDRESS available\n");
+	if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+		amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
 		return;
 	}
@@ -2031,9 +1928,8 @@
 	 */
 	src_mci = find_mc_by_sys_addr(mci, sys_addr);
 	if (!src_mci) {
-		amd64_mc_printk(mci, KERN_CRIT,
-			"ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
-			(unsigned long)sys_addr);
+		amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
+				  (unsigned long)sys_addr);
 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
 		return;
 	}
@@ -2042,9 +1938,8 @@
 
 	csrow = sys_addr_to_csrow(log_mci, sys_addr);
 	if (csrow < 0) {
-		amd64_mc_printk(mci, KERN_CRIT,
-			"ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
-			(unsigned long)sys_addr);
+		amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
+				  (unsigned long)sys_addr);
 		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
 	} else {
 		error_address_to_page_and_offset(sys_addr, &page, &offset);
@@ -2055,8 +1950,8 @@
 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
 					    struct err_regs *info)
 {
-	u32 ec  = ERROR_CODE(info->nbsl);
-	u32 xec = EXT_ERROR_CODE(info->nbsl);
+	u16 ec = EC(info->nbsl);
+	u8 xec = XEC(info->nbsl, 0x1f);
 	int ecc_type = (info->nbsh >> 13) & 0x3;
 
 	/* Bail early out if this was an 'observed' error */
@@ -2075,7 +1970,7 @@
 
 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
 {
-	struct mem_ctl_info *mci = mci_lookup[node_id];
+	struct mem_ctl_info *mci = mcis[node_id];
 	struct err_regs regs;
 
 	regs.nbsl  = (u32) m->status;
@@ -2099,75 +1994,50 @@
 }
 
 /*
- * Input:
- *	1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
- *	2) AMD Family index value
- *
- * Ouput:
- *	Upon return of 0, the following filled in:
- *
- *		struct pvt->addr_f1_ctl
- *		struct pvt->misc_f3_ctl
- *
- *	Filled in with related device funcitions of 'dram_f2_ctl'
- *	These devices are "reserved" via the pci_get_device()
- *
- *	Upon return of 1 (error status):
- *
- *		Nothing reserved
+ * Use pvt->F2 which contains the F2 CPU PCI device to get the related
+ * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
  */
-static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
+static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
 {
-	const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
-
 	/* Reserve the ADDRESS MAP Device */
-	pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
-						    amd64_dev->addr_f1_ctl,
-						    pvt->dram_f2_ctl);
-
-	if (!pvt->addr_f1_ctl) {
-		amd64_printk(KERN_ERR, "error address map device not found: "
-			     "vendor %x device 0x%x (broken BIOS?)\n",
-			     PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
-		return 1;
+	pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
+	if (!pvt->F1) {
+		amd64_err("error address map device not found: "
+			  "vendor %x device 0x%x (broken BIOS?)\n",
+			  PCI_VENDOR_ID_AMD, f1_id);
+		return -ENODEV;
 	}
 
 	/* Reserve the MISC Device */
-	pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
-						    amd64_dev->misc_f3_ctl,
-						    pvt->dram_f2_ctl);
+	pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
+	if (!pvt->F3) {
+		pci_dev_put(pvt->F1);
+		pvt->F1 = NULL;
 
-	if (!pvt->misc_f3_ctl) {
-		pci_dev_put(pvt->addr_f1_ctl);
-		pvt->addr_f1_ctl = NULL;
+		amd64_err("error F3 device not found: "
+			  "vendor %x device 0x%x (broken BIOS?)\n",
+			  PCI_VENDOR_ID_AMD, f3_id);
 
-		amd64_printk(KERN_ERR, "error miscellaneous device not found: "
-			     "vendor %x device 0x%x (broken BIOS?)\n",
-			     PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
-		return 1;
+		return -ENODEV;
 	}
-
-	debugf1("    Addr Map device PCI Bus ID:\t%s\n",
-		pci_name(pvt->addr_f1_ctl));
-	debugf1("    DRAM MEM-CTL PCI Bus ID:\t%s\n",
-		pci_name(pvt->dram_f2_ctl));
-	debugf1("    Misc device PCI Bus ID:\t%s\n",
-		pci_name(pvt->misc_f3_ctl));
+	debugf1("F1: %s\n", pci_name(pvt->F1));
+	debugf1("F2: %s\n", pci_name(pvt->F2));
+	debugf1("F3: %s\n", pci_name(pvt->F3));
 
 	return 0;
 }
 
-static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
+static void free_mc_sibling_devs(struct amd64_pvt *pvt)
 {
-	pci_dev_put(pvt->addr_f1_ctl);
-	pci_dev_put(pvt->misc_f3_ctl);
+	pci_dev_put(pvt->F1);
+	pci_dev_put(pvt->F3);
 }
 
 /*
  * Retrieve the hardware registers of the memory controller (this includes the
  * 'Address Map' and 'Misc' device regs)
  */
-static void amd64_read_mc_registers(struct amd64_pvt *pvt)
+static void read_mc_regs(struct amd64_pvt *pvt)
 {
 	u64 msr_val;
 	u32 tmp;
@@ -2188,9 +2058,7 @@
 	} else
 		debugf0("  TOP_MEM2 disabled.\n");
 
-	amd64_cpu_display_info(pvt);
-
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
+	amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
 
 	if (pvt->ops->read_dram_ctl_register)
 		pvt->ops->read_dram_ctl_register(pvt);
@@ -2227,21 +2095,20 @@
 
 	amd64_read_dct_base_mask(pvt);
 
-	amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
+	amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
 	amd64_read_dbam_reg(pvt);
 
-	amd64_read_pci_cfg(pvt->misc_f3_ctl,
-			   F10_ONLINE_SPARE, &pvt->online_spare);
+	amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
 
-	amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
-	amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
+	amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
+	amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
 
 	if (boot_cpu_data.x86 >= 0x10) {
 		if (!dct_ganging_enabled(pvt)) {
-			amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
-			amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
+			amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
+			amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
 		}
-		amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp);
+		amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
 	}
 
 	if (boot_cpu_data.x86 == 0x10 &&
@@ -2321,21 +2188,22 @@
  * Initialize the array of csrow attribute instances, based on the values
  * from pci config hardware registers.
  */
-static int amd64_init_csrows(struct mem_ctl_info *mci)
+static int init_csrows(struct mem_ctl_info *mci)
 {
 	struct csrow_info *csrow;
-	struct amd64_pvt *pvt;
+	struct amd64_pvt *pvt = mci->pvt_info;
 	u64 input_addr_min, input_addr_max, sys_addr;
+	u32 val;
 	int i, empty = 1;
 
-	pvt = mci->pvt_info;
+	amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
 
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
+	pvt->nbcfg = val;
+	pvt->ctl_error_info.nbcfg = val;
 
-	debugf0("NBCFG= 0x%x  CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
-		(pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
-		(pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
-		);
+	debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+		pvt->mc_node_id, val,
+		!!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
 
 	for (i = 0; i < pvt->cs_count; i++) {
 		csrow = &mci->csrows[i];
@@ -2359,7 +2227,7 @@
 		csrow->page_mask = ~mask_from_dct_mask(pvt, i);
 		/* 8 bytes of resolution */
 
-		csrow->mtype = amd64_determine_memory_type(pvt);
+		csrow->mtype = amd64_determine_memory_type(pvt, i);
 
 		debugf1("  for MC node %d csrow %d:\n", pvt->mc_node_id, i);
 		debugf1("    input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
@@ -2404,8 +2272,7 @@
 	bool ret = false;
 
 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
-		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
-			     __func__);
+		amd64_warn("%s: Error allocating mask\n", __func__);
 		return false;
 	}
 
@@ -2431,18 +2298,17 @@
 	return ret;
 }
 
-static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
+static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
 {
 	cpumask_var_t cmask;
 	int cpu;
 
 	if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
-		amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
-			     __func__);
+		amd64_warn("%s: error allocating mask\n", __func__);
 		return false;
 	}
 
-	get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
+	get_cpus_on_this_dct_cpumask(cmask, nid);
 
 	rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
 
@@ -2452,14 +2318,14 @@
 
 		if (on) {
 			if (reg->l & K8_MSR_MCGCTL_NBE)
-				pvt->flags.nb_mce_enable = 1;
+				s->flags.nb_mce_enable = 1;
 
 			reg->l |= K8_MSR_MCGCTL_NBE;
 		} else {
 			/*
 			 * Turn off NB MCE reporting only when it was off before
 			 */
-			if (!pvt->flags.nb_mce_enable)
+			if (!s->flags.nb_mce_enable)
 				reg->l &= ~K8_MSR_MCGCTL_NBE;
 		}
 	}
@@ -2470,92 +2336,92 @@
 	return 0;
 }
 
-static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
+static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+				       struct pci_dev *F3)
 {
-	struct amd64_pvt *pvt = mci->pvt_info;
+	bool ret = true;
 	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
 
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
+	if (toggle_ecc_err_reporting(s, nid, ON)) {
+		amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
+		return false;
+	}
 
-	/* turn on UECCn and CECCEn bits */
-	pvt->old_nbctl = value & mask;
-	pvt->nbctl_mcgctl_saved = 1;
+	amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+
+	/* turn on UECCEn and CECCEn bits */
+	s->old_nbctl   = value & mask;
+	s->nbctl_valid = true;
 
 	value |= mask;
-	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+	pci_write_config_dword(F3, K8_NBCTL, value);
 
-	if (amd64_toggle_ecc_err_reporting(pvt, ON))
-		amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
-					   "MCGCTL!\n");
+	amd64_read_pci_cfg(F3, K8_NBCFG, &value);
 
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
-
-	debugf0("NBCFG(1)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
-		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
-		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+	debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+		nid, value,
+		!!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
 
 	if (!(value & K8_NBCFG_ECC_ENABLE)) {
-		amd64_printk(KERN_WARNING,
-			"This node reports that DRAM ECC is "
-			"currently Disabled; ENABLING now\n");
+		amd64_warn("DRAM ECC disabled on this node, enabling...\n");
 
-		pvt->flags.nb_ecc_prev = 0;
+		s->flags.nb_ecc_prev = 0;
 
 		/* Attempt to turn on DRAM ECC Enable */
 		value |= K8_NBCFG_ECC_ENABLE;
-		pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+		pci_write_config_dword(F3, K8_NBCFG, value);
 
-		amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
+		amd64_read_pci_cfg(F3, K8_NBCFG, &value);
 
 		if (!(value & K8_NBCFG_ECC_ENABLE)) {
-			amd64_printk(KERN_WARNING,
-				"Hardware rejects Enabling DRAM ECC checking\n"
-				"Check memory DIMM configuration\n");
+			amd64_warn("Hardware rejected DRAM ECC enable,"
+				   "check memory DIMM configuration.\n");
+			ret = false;
 		} else {
-			amd64_printk(KERN_DEBUG,
-				"Hardware accepted DRAM ECC Enable\n");
+			amd64_info("Hardware accepted DRAM ECC Enable\n");
 		}
 	} else {
-		pvt->flags.nb_ecc_prev = 1;
+		s->flags.nb_ecc_prev = 1;
 	}
 
-	debugf0("NBCFG(2)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
-		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
-		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+	debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+		nid, value,
+		!!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
 
-	pvt->ctl_error_info.nbcfg = value;
+	return ret;
 }
 
-static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
+static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+					struct pci_dev *F3)
 {
 	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
 
-	if (!pvt->nbctl_mcgctl_saved)
+	if (!s->nbctl_valid)
 		return;
 
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
+	amd64_read_pci_cfg(F3, K8_NBCTL, &value);
 	value &= ~mask;
-	value |= pvt->old_nbctl;
+	value |= s->old_nbctl;
 
-	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+	pci_write_config_dword(F3, K8_NBCTL, value);
 
-	/* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
-	if (!pvt->flags.nb_ecc_prev) {
-		amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
+	/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
+	if (!s->flags.nb_ecc_prev) {
+		amd64_read_pci_cfg(F3, K8_NBCFG, &value);
 		value &= ~K8_NBCFG_ECC_ENABLE;
-		pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+		pci_write_config_dword(F3, K8_NBCFG, value);
 	}
 
 	/* restore the NB Enable MCGCTL bit */
-	if (amd64_toggle_ecc_err_reporting(pvt, OFF))
-		amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
+	if (toggle_ecc_err_reporting(s, nid, OFF))
+		amd64_warn("Error restoring NB MCGCTL settings!\n");
 }
 
 /*
- * EDAC requires that the BIOS have ECC enabled before taking over the
- * processing of ECC errors. This is because the BIOS can properly initialize
- * the memory system completely. A command line option allows to force-enable
- * hardware ECC later in amd64_enable_ecc_error_reporting().
+ * EDAC requires that the BIOS have ECC enabled before
+ * taking over the processing of ECC errors. A command line
+ * option allows to force-enable hardware ECC later in
+ * enable_ecc_error_reporting().
  */
 static const char *ecc_msg =
 	"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
@@ -2563,38 +2429,28 @@
 	"'ecc_enable_override'.\n"
 	" (Note that use of the override may cause unknown side effects.)\n";
 
-static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
+static bool ecc_enabled(struct pci_dev *F3, u8 nid)
 {
 	u32 value;
-	u8 ecc_enabled = 0;
+	u8 ecc_en = 0;
 	bool nb_mce_en = false;
 
-	amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
+	amd64_read_pci_cfg(F3, K8_NBCFG, &value);
 
-	ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
-	if (!ecc_enabled)
-		amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
-			     "is currently disabled, set F3x%x[22] (%s).\n",
-			     K8_NBCFG, pci_name(pvt->misc_f3_ctl));
-	else
-		amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
+	ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
+	amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
 
-	nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
+	nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
 	if (!nb_mce_en)
-		amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
+		amd64_notice("NB MCE bank disabled, set MSR "
 			     "0x%08x[4] on node %d to enable.\n",
-			     MSR_IA32_MCG_CTL, pvt->mc_node_id);
+			     MSR_IA32_MCG_CTL, nid);
 
-	if (!ecc_enabled || !nb_mce_en) {
-		if (!ecc_enable_override) {
-			amd64_printk(KERN_NOTICE, "%s", ecc_msg);
-			return -ENODEV;
-		} else {
-			amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
-		}
+	if (!ecc_en || !nb_mce_en) {
+		amd64_notice("%s", ecc_msg);
+		return false;
 	}
-
-	return 0;
+	return true;
 }
 
 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
@@ -2603,22 +2459,23 @@
 
 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
 
-static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
 {
 	unsigned int i = 0, j = 0;
 
 	for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
 		sysfs_attrs[i] = amd64_dbg_attrs[i];
 
-	for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
-		sysfs_attrs[i] = amd64_inj_attrs[j];
+	if (boot_cpu_data.x86 >= 0x10)
+		for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
+			sysfs_attrs[i] = amd64_inj_attrs[j];
 
 	sysfs_attrs[i] = terminator;
 
 	mci->mc_driver_sysfs_attributes = sysfs_attrs;
 }
 
-static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
 {
 	struct amd64_pvt *pvt = mci->pvt_info;
 
@@ -2634,8 +2491,8 @@
 	mci->edac_cap		= amd64_determine_edac_cap(pvt);
 	mci->mod_name		= EDAC_MOD_STR;
 	mci->mod_ver		= EDAC_AMD64_VERSION;
-	mci->ctl_name		= get_amd_family_name(pvt->mc_type_index);
-	mci->dev_name		= pci_name(pvt->dram_f2_ctl);
+	mci->ctl_name		= pvt->ctl_name;
+	mci->dev_name		= pci_name(pvt->F2);
 	mci->ctl_page_to_phys	= NULL;
 
 	/* memory scrubber interface */
@@ -2644,111 +2501,94 @@
 }
 
 /*
- * Init stuff for this DRAM Controller device.
- *
- * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
- * Space feature MUST be enabled on ALL Processors prior to actually reading
- * from the ECS registers. Since the loading of the module can occur on any
- * 'core', and cores don't 'see' all the other processors ECS data when the
- * others are NOT enabled. Our solution is to first enable ECS access in this
- * routine on all processors, gather some data in a amd64_pvt structure and
- * later come back in a finish-setup function to perform that final
- * initialization. See also amd64_init_2nd_stage() for that.
+ * returns a pointer to the family descriptor on success, NULL otherwise.
  */
-static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
-				    int mc_type_index)
+static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
+{
+	u8 fam = boot_cpu_data.x86;
+	struct amd64_family_type *fam_type = NULL;
+
+	switch (fam) {
+	case 0xf:
+		fam_type		= &amd64_family_types[K8_CPUS];
+		pvt->ops		= &amd64_family_types[K8_CPUS].ops;
+		pvt->ctl_name		= fam_type->ctl_name;
+		pvt->min_scrubrate	= K8_MIN_SCRUB_RATE_BITS;
+		break;
+	case 0x10:
+		fam_type		= &amd64_family_types[F10_CPUS];
+		pvt->ops		= &amd64_family_types[F10_CPUS].ops;
+		pvt->ctl_name		= fam_type->ctl_name;
+		pvt->min_scrubrate	= F10_MIN_SCRUB_RATE_BITS;
+		break;
+
+	default:
+		amd64_err("Unsupported family!\n");
+		return NULL;
+	}
+
+	pvt->ext_model = boot_cpu_data.x86_model >> 4;
+
+	amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+		     (fam == 0xf ?
+				(pvt->ext_model >= K8_REV_F  ? "revF or later "
+							     : "revE or earlier ")
+				 : ""), pvt->mc_node_id);
+	return fam_type;
+}
+
+static int amd64_init_one_instance(struct pci_dev *F2)
 {
 	struct amd64_pvt *pvt = NULL;
+	struct amd64_family_type *fam_type = NULL;
+	struct mem_ctl_info *mci = NULL;
 	int err = 0, ret;
+	u8 nid = get_node_id(F2);
 
 	ret = -ENOMEM;
 	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
 	if (!pvt)
-		goto err_exit;
+		goto err_ret;
 
-	pvt->mc_node_id = get_node_id(dram_f2_ctl);
+	pvt->mc_node_id	= nid;
+	pvt->F2 = F2;
 
-	pvt->dram_f2_ctl	= dram_f2_ctl;
-	pvt->ext_model		= boot_cpu_data.x86_model >> 4;
-	pvt->mc_type_index	= mc_type_index;
-	pvt->ops		= family_ops(mc_type_index);
+	ret = -EINVAL;
+	fam_type = amd64_per_family_init(pvt);
+	if (!fam_type)
+		goto err_free;
 
-	/*
-	 * We have the dram_f2_ctl device as an argument, now go reserve its
-	 * sibling devices from the PCI system.
-	 */
 	ret = -ENODEV;
-	err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
+	err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
 	if (err)
 		goto err_free;
 
-	ret = -EINVAL;
-	err = amd64_check_ecc_enabled(pvt);
-	if (err)
-		goto err_put;
-
-	/*
-	 * Key operation here: setup of HW prior to performing ops on it. Some
-	 * setup is required to access ECS data. After this is performed, the
-	 * 'teardown' function must be called upon error and normal exit paths.
-	 */
-	if (boot_cpu_data.x86 >= 0x10)
-		amd64_setup(pvt);
-
-	/*
-	 * Save the pointer to the private data for use in 2nd initialization
-	 * stage
-	 */
-	pvt_lookup[pvt->mc_node_id] = pvt;
-
-	return 0;
-
-err_put:
-	amd64_free_mc_sibling_devices(pvt);
-
-err_free:
-	kfree(pvt);
-
-err_exit:
-	return ret;
-}
-
-/*
- * This is the finishing stage of the init code. Needs to be performed after all
- * MCs' hardware have been prepped for accessing extended config space.
- */
-static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
-{
-	int node_id = pvt->mc_node_id;
-	struct mem_ctl_info *mci;
-	int ret = -ENODEV;
-
-	amd64_read_mc_registers(pvt);
+	read_mc_regs(pvt);
 
 	/*
 	 * We need to determine how many memory channels there are. Then use
 	 * that information for calculating the size of the dynamic instance
-	 * tables in the 'mci' structure
+	 * tables in the 'mci' structure.
 	 */
+	ret = -EINVAL;
 	pvt->channel_count = pvt->ops->early_channel_count(pvt);
 	if (pvt->channel_count < 0)
-		goto err_exit;
+		goto err_siblings;
 
 	ret = -ENOMEM;
-	mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
+	mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
 	if (!mci)
-		goto err_exit;
+		goto err_siblings;
 
 	mci->pvt_info = pvt;
+	mci->dev = &pvt->F2->dev;
 
-	mci->dev = &pvt->dram_f2_ctl->dev;
-	amd64_setup_mci_misc_attributes(mci);
+	setup_mci_misc_attrs(mci);
 
-	if (amd64_init_csrows(mci))
+	if (init_csrows(mci))
 		mci->edac_cap = EDAC_FLAG_NONE;
 
-	amd64_enable_ecc_error_reporting(mci);
-	amd64_set_mc_sysfs_attributes(mci);
+	set_mc_sysfs_attrs(mci);
 
 	ret = -ENODEV;
 	if (edac_mc_add_mc(mci)) {
@@ -2756,54 +2596,77 @@
 		goto err_add_mc;
 	}
 
-	mci_lookup[node_id] = mci;
-	pvt_lookup[node_id] = NULL;
-
 	/* register stuff with EDAC MCE */
 	if (report_gart_errors)
 		amd_report_gart_errors(true);
 
 	amd_register_ecc_decoder(amd64_decode_bus_error);
 
+	mcis[nid] = mci;
+
+	atomic_inc(&drv_instances);
+
 	return 0;
 
 err_add_mc:
 	edac_mc_free(mci);
 
-err_exit:
-	debugf0("failure to init 2nd stage: ret=%d\n", ret);
+err_siblings:
+	free_mc_sibling_devs(pvt);
 
-	amd64_restore_ecc_error_reporting(pvt);
+err_free:
+	kfree(pvt);
 
-	if (boot_cpu_data.x86 > 0xf)
-		amd64_teardown(pvt);
-
-	amd64_free_mc_sibling_devices(pvt);
-
-	kfree(pvt_lookup[pvt->mc_node_id]);
-	pvt_lookup[node_id] = NULL;
-
+err_ret:
 	return ret;
 }
 
-
-static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
-				 const struct pci_device_id *mc_type)
+static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
+					     const struct pci_device_id *mc_type)
 {
+	u8 nid = get_node_id(pdev);
+	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
+	struct ecc_settings *s;
 	int ret = 0;
 
-	debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
-		get_amd_family_name(mc_type->driver_data));
-
 	ret = pci_enable_device(pdev);
-	if (ret < 0)
-		ret = -EIO;
-	else
-		ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
-
-	if (ret < 0)
+	if (ret < 0) {
 		debugf0("ret=%d\n", ret);
+		return -EIO;
+	}
 
+	ret = -ENOMEM;
+	s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
+	if (!s)
+		goto err_out;
+
+	ecc_stngs[nid] = s;
+
+	if (!ecc_enabled(F3, nid)) {
+		ret = -ENODEV;
+
+		if (!ecc_enable_override)
+			goto err_enable;
+
+		amd64_warn("Forcing ECC on!\n");
+
+		if (!enable_ecc_error_reporting(s, nid, F3))
+			goto err_enable;
+	}
+
+	ret = amd64_init_one_instance(pdev);
+	if (ret < 0) {
+		amd64_err("Error probing instance: %d\n", nid);
+		restore_ecc_error_reporting(s, nid, F3);
+	}
+
+	return ret;
+
+err_enable:
+	kfree(s);
+	ecc_stngs[nid] = NULL;
+
+err_out:
 	return ret;
 }
 
@@ -2811,6 +2674,9 @@
 {
 	struct mem_ctl_info *mci;
 	struct amd64_pvt *pvt;
+	u8 nid = get_node_id(pdev);
+	struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
+	struct ecc_settings *s = ecc_stngs[nid];
 
 	/* Remove from EDAC CORE tracking list */
 	mci = edac_mc_del_mc(&pdev->dev);
@@ -2819,20 +2685,20 @@
 
 	pvt = mci->pvt_info;
 
-	amd64_restore_ecc_error_reporting(pvt);
+	restore_ecc_error_reporting(s, nid, F3);
 
-	if (boot_cpu_data.x86 > 0xf)
-		amd64_teardown(pvt);
-
-	amd64_free_mc_sibling_devices(pvt);
+	free_mc_sibling_devs(pvt);
 
 	/* unregister from EDAC MCE */
 	amd_report_gart_errors(false);
 	amd_unregister_ecc_decoder(amd64_decode_bus_error);
 
+	kfree(ecc_stngs[nid]);
+	ecc_stngs[nid] = NULL;
+
 	/* Free the EDAC CORE resources */
 	mci->pvt_info = NULL;
-	mci_lookup[pvt->mc_node_id] = NULL;
+	mcis[nid] = NULL;
 
 	kfree(pvt);
 	edac_mc_free(mci);
@@ -2851,7 +2717,6 @@
 		.subdevice	= PCI_ANY_ID,
 		.class		= 0,
 		.class_mask	= 0,
-		.driver_data	= K8_CPUS
 	},
 	{
 		.vendor		= PCI_VENDOR_ID_AMD,
@@ -2860,16 +2725,6 @@
 		.subdevice	= PCI_ANY_ID,
 		.class		= 0,
 		.class_mask	= 0,
-		.driver_data	= F10_CPUS
-	},
-	{
-		.vendor		= PCI_VENDOR_ID_AMD,
-		.device		= PCI_DEVICE_ID_AMD_11H_NB_DRAM,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.class		= 0,
-		.class_mask	= 0,
-		.driver_data	= F11_CPUS
 	},
 	{0, }
 };
@@ -2877,12 +2732,12 @@
 
 static struct pci_driver amd64_pci_driver = {
 	.name		= EDAC_MOD_STR,
-	.probe		= amd64_init_one_instance,
+	.probe		= amd64_probe_one_instance,
 	.remove		= __devexit_p(amd64_remove_one_instance),
 	.id_table	= amd64_pci_table,
 };
 
-static void amd64_setup_pci_device(void)
+static void setup_pci_device(void)
 {
 	struct mem_ctl_info *mci;
 	struct amd64_pvt *pvt;
@@ -2890,13 +2745,12 @@
 	if (amd64_ctl_pci)
 		return;
 
-	mci = mci_lookup[0];
+	mci = mcis[0];
 	if (mci) {
 
 		pvt = mci->pvt_info;
 		amd64_ctl_pci =
-			edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
-						    EDAC_MOD_STR);
+			edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
 
 		if (!amd64_ctl_pci) {
 			pr_warning("%s(): Unable to create PCI control\n",
@@ -2910,8 +2764,7 @@
 
 static int __init amd64_edac_init(void)
 {
-	int nb, err = -ENODEV;
-	bool load_ok = false;
+	int err = -ENODEV;
 
 	edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
 
@@ -2920,41 +2773,41 @@
 	if (amd_cache_northbridges() < 0)
 		goto err_ret;
 
+	err = -ENOMEM;
+	mcis	  = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
+	ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
+	if (!(mcis && ecc_stngs))
+		goto err_ret;
+
 	msrs = msrs_alloc();
 	if (!msrs)
-		goto err_ret;
+		goto err_free;
 
 	err = pci_register_driver(&amd64_pci_driver);
 	if (err)
 		goto err_pci;
 
-	/*
-	 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
-	 * amd64_pvt structs. These will be used in the 2nd stage init function
-	 * to finish initialization of the MC instances.
-	 */
 	err = -ENODEV;
-	for (nb = 0; nb < amd_nb_num(); nb++) {
-		if (!pvt_lookup[nb])
-			continue;
+	if (!atomic_read(&drv_instances))
+		goto err_no_instances;
 
-		err = amd64_init_2nd_stage(pvt_lookup[nb]);
-		if (err)
-			goto err_2nd_stage;
+	setup_pci_device();
+	return 0;
 
-		load_ok = true;
-	}
-
-	if (load_ok) {
-		amd64_setup_pci_device();
-		return 0;
-	}
-
-err_2nd_stage:
+err_no_instances:
 	pci_unregister_driver(&amd64_pci_driver);
+
 err_pci:
 	msrs_free(msrs);
 	msrs = NULL;
+
+err_free:
+	kfree(mcis);
+	mcis = NULL;
+
+	kfree(ecc_stngs);
+	ecc_stngs = NULL;
+
 err_ret:
 	return err;
 }
@@ -2966,6 +2819,12 @@
 
 	pci_unregister_driver(&amd64_pci_driver);
 
+	kfree(ecc_stngs);
+	ecc_stngs = NULL;
+
+	kfree(mcis);
+	mcis = NULL;
+
 	msrs_free(msrs);
 	msrs = NULL;
 }
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 044aee4..613ec72 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -74,11 +74,26 @@
 #include "edac_core.h"
 #include "mce_amd.h"
 
-#define amd64_printk(level, fmt, arg...) \
-	edac_printk(level, "amd64", fmt, ##arg)
+#define amd64_debug(fmt, arg...) \
+	edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
 
-#define amd64_mc_printk(mci, level, fmt, arg...) \
-	edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
+#define amd64_info(fmt, arg...) \
+	edac_printk(KERN_INFO, "amd64", fmt, ##arg)
+
+#define amd64_notice(fmt, arg...) \
+	edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)
+
+#define amd64_warn(fmt, arg...) \
+	edac_printk(KERN_WARNING, "amd64", fmt, ##arg)
+
+#define amd64_err(fmt, arg...) \
+	edac_printk(KERN_ERR, "amd64", fmt, ##arg)
+
+#define amd64_mc_warn(mci, fmt, arg...) \
+	edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)
+
+#define amd64_mc_err(mci, fmt, arg...) \
+	edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
 
 /*
  * Throughout the comments in this code, the following terms are used:
@@ -129,11 +144,9 @@
  *         sections 3.5.4 and 3.5.5 for more information.
  */
 
-#define EDAC_AMD64_VERSION		" Ver: 3.3.0 " __DATE__
+#define EDAC_AMD64_VERSION		"v3.3.0"
 #define EDAC_MOD_STR			"amd64_edac"
 
-#define EDAC_MAX_NUMNODES		8
-
 /* Extended Model from CPUID, for CPU Revision numbers */
 #define K8_REV_D			1
 #define K8_REV_E			2
@@ -322,9 +335,6 @@
 #define K8_SCRCTRL			0x58
 
 #define F10_NB_CFG_LOW			0x88
-#define	F10_NB_CFG_LOW_ENABLE_EXT_CFG	BIT(14)
-
-#define F10_NB_CFG_HIGH			0x8C
 
 #define F10_ONLINE_SPARE		0xB0
 #define F10_ONLINE_SPARE_SWAPDONE0(x)	((x) & BIT(1))
@@ -373,7 +383,6 @@
 enum amd64_chipset_families {
 	K8_CPUS = 0,
 	F10_CPUS,
-	F11_CPUS,
 };
 
 /* Error injection control structure */
@@ -384,16 +393,13 @@
 };
 
 struct amd64_pvt {
+	struct low_ops *ops;
+
 	/* pci_device handles which we utilize */
-	struct pci_dev *addr_f1_ctl;
-	struct pci_dev *dram_f2_ctl;
-	struct pci_dev *misc_f3_ctl;
+	struct pci_dev *F1, *F2, *F3;
 
 	int mc_node_id;		/* MC index of this MC node */
 	int ext_model;		/* extended model value of this node */
-
-	struct low_ops *ops;	/* pointer to per PCI Device ID func table */
-
 	int channel_count;
 
 	/* Raw registers */
@@ -455,27 +461,27 @@
 	/* place to store error injection parameters prior to issue */
 	struct error_injection injection;
 
-	/* Save old hw registers' values before we modified them */
-	u32 nbctl_mcgctl_saved;		/* When true, following 2 are valid */
+	/* DCT per-family scrubrate setting */
+	u32 min_scrubrate;
+
+	/* family name this instance is running on */
+	const char *ctl_name;
+
+};
+
+/*
+ * per-node ECC settings descriptor
+ */
+struct ecc_settings {
 	u32 old_nbctl;
+	bool nbctl_valid;
 
-	/* MC Type Index value: socket F vs Family 10h */
-	u32 mc_type_index;
-
-	/* misc settings */
 	struct flags {
-		unsigned long cf8_extcfg:1;
 		unsigned long nb_mce_enable:1;
 		unsigned long nb_ecc_prev:1;
 	} flags;
 };
 
-struct scrubrate {
-       u32 scrubval;           /* bit pattern for scrub rate */
-       u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
-};
-
-extern struct scrubrate scrubrates[23];
 extern const char *tt_msgs[4];
 extern const char *ll_msgs[4];
 extern const char *rrrr_msgs[16];
@@ -517,23 +523,10 @@
 
 struct amd64_family_type {
 	const char *ctl_name;
-	u16 addr_f1_ctl;
-	u16 misc_f3_ctl;
+	u16 f1_id, f3_id;
 	struct low_ops ops;
 };
 
-static struct amd64_family_type amd64_family_types[];
-
-static inline const char *get_amd_family_name(int index)
-{
-	return amd64_family_types[index].ctl_name;
-}
-
-static inline struct low_ops *family_ops(int index)
-{
-	return &amd64_family_types[index].ops;
-}
-
 static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
 					   u32 *val, const char *func)
 {
@@ -541,8 +534,8 @@
 
 	err = pci_read_config_dword(pdev, offset, val);
 	if (err)
-		amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n",
-			     func, PCI_FUNC(pdev->devfn), offset);
+		amd64_warn("%s: error reading F%dx%x.\n",
+			   func, PCI_FUNC(pdev->devfn), offset);
 
 	return err;
 }
@@ -556,7 +549,6 @@
  */
 #define K8_MIN_SCRUB_RATE_BITS	0x0
 #define F10_MIN_SCRUB_RATE_BITS	0x5
-#define F11_MIN_SCRUB_RATE_BITS	0x6
 
 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
 			     u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 29f1f7a..688478d 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -23,9 +23,7 @@
 	if (ret != -EINVAL) {
 
 		if (value > 3) {
-			amd64_printk(KERN_WARNING,
-				     "%s: invalid section 0x%lx\n",
-				     __func__, value);
+			amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
 			return -EINVAL;
 		}
 
@@ -58,9 +56,7 @@
 	if (ret != -EINVAL) {
 
 		if (value > 8) {
-			amd64_printk(KERN_WARNING,
-				     "%s: invalid word 0x%lx\n",
-				     __func__, value);
+			amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
 			return -EINVAL;
 		}
 
@@ -92,9 +88,8 @@
 	if (ret != -EINVAL) {
 
 		if (value & 0xFFFF0000) {
-			amd64_printk(KERN_WARNING,
-				     "%s: invalid EccVector: 0x%lx\n",
-				     __func__, value);
+			amd64_warn("%s: invalid EccVector: 0x%lx\n",
+				   __func__, value);
 			return -EINVAL;
 		}
 
@@ -122,15 +117,13 @@
 		/* Form value to choose 16-byte section of cacheline */
 		section = F10_NB_ARRAY_DRAM_ECC |
 				SET_NB_ARRAY_ADDRESS(pvt->injection.section);
-		pci_write_config_dword(pvt->misc_f3_ctl,
-					F10_NB_ARRAY_ADDR, section);
+		pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
 
 		word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
 						pvt->injection.bit_map);
 
 		/* Issue 'word' and 'bit' along with the READ request */
-		pci_write_config_dword(pvt->misc_f3_ctl,
-					F10_NB_ARRAY_DATA, word_bits);
+		pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
 
 		debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
 
@@ -157,15 +150,13 @@
 		/* Form value to choose 16-byte section of cacheline */
 		section = F10_NB_ARRAY_DRAM_ECC |
 				SET_NB_ARRAY_ADDRESS(pvt->injection.section);
-		pci_write_config_dword(pvt->misc_f3_ctl,
-					F10_NB_ARRAY_ADDR, section);
+		pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
 
 		word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
 						pvt->injection.bit_map);
 
 		/* Issue 'word' and 'bit' along with the READ request */
-		pci_write_config_dword(pvt->misc_f3_ctl,
-					F10_NB_ARRAY_DATA, word_bits);
+		pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
 
 		debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
 
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
index 60e0d1c..6f8b071 100644
--- a/drivers/edac/amd8131_edac.h
+++ b/drivers/edac/amd8131_edac.h
@@ -99,7 +99,7 @@
 
 /*
  * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
- * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are
+ * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are
  * four PCIX Bridges on ATCA-6101 altogether.
  *
  * These PCIX Bridges share the same PCI Device ID and are all of
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index c973004..db1df59 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -47,7 +47,7 @@
 	offset = address & ~PAGE_MASK;
 	syndrome = (ar & 0x000000001fe00000ul) >> 21;
 
-	/* TODO: Decoding of the error addresss */
+	/* TODO: Decoding of the error address */
 	edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
 			  syndrome, 0, chan, "");
 }
@@ -68,7 +68,7 @@
 	pfn = address >> PAGE_SHIFT;
 	offset = address & ~PAGE_MASK;
 
-	/* TODO: Decoding of the error addresss */
+	/* TODO: Decoding of the error address */
 	edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
 }
 
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 1609a19..b9a781c 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -818,9 +818,10 @@
 }
 
 /* Convert current back-ground scrub rate into byte/sec bandwith */
-static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
 {
 	struct cpc925_mc_pdata *pdata = mci->pvt_info;
+	int bw;
 	u32 mscr;
 	u8 si;
 
@@ -832,11 +833,11 @@
 	if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
 	    (si == 0)) {
 		cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
-		*bw = 0;
+		bw = 0;
 	} else
-		*bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
+		bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
 
-	return 0;
+	return bw;
 }
 
 /* Return 0 for single channel; 1 for dual channel */
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 073f5a0..ec302d4 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -983,11 +983,11 @@
 
 	pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
 
-	return 0;
+	return scrubrates[i].bandwidth;
 }
 
 /* Convert current scrub rate value into byte/sec bandwidth */
-static int get_sdram_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
+static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
 {
 	const struct scrubrate *scrubrates;
 	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
@@ -1013,10 +1013,8 @@
 			"Invalid sdram scrub control value: 0x%x\n", scrubval);
 		return -1;
 	}
+	return scrubrates[i].bandwidth;
 
-	*bw = scrubrates[i].bandwidth;
-
-	return 0;
 }
 
 /* Return 1 if dual channel mode is active.  Else return 0. */
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 251440c..3d96534 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -68,9 +68,10 @@
 #define EDAC_PCI "PCI"
 #define EDAC_DEBUG "DEBUG"
 
+extern const char *edac_mem_types[];
+
 #ifdef CONFIG_EDAC_DEBUG
 extern int edac_debug_level;
-extern const char *edac_mem_types[];
 
 #define edac_debug_printk(level, fmt, arg...)                           \
 	do {                                                            \
@@ -258,7 +259,7 @@
  *			for single channel are 64 bits, for dual channel 128
  *			bits.
  *
- * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memmory.
+ * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memory.
  *			Motherboards commonly drive two chip-select pins to
  *			a memory stick. A single-ranked stick, will occupy
  *			only one of those rows. The other will be unused.
@@ -386,7 +387,7 @@
 	   representation and converts it to the closest matching
 	   bandwith in bytes/sec.
 	 */
-	int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
+	int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
 
 
 	/* pointer to edac checking routine */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 795ea69..a4e9db2 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -76,6 +76,8 @@
 	debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
 }
 
+#endif				/* CONFIG_EDAC_DEBUG */
+
 /*
  * keep those in sync with the enum mem_type
  */
@@ -100,8 +102,6 @@
 };
 EXPORT_SYMBOL_GPL(edac_mem_types);
 
-#endif				/* CONFIG_EDAC_DEBUG */
-
 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
  * Adjust 'ptr' so that its alignment is at least as stringent as what the
  * compiler would provide for X and return the aligned result.
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index dce61f7..39d97cf 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -436,56 +436,55 @@
 	return count;
 }
 
-/* memory scrubbing */
+/* Memory scrubbing interface:
+ *
+ * A MC driver can limit the scrubbing bandwidth based on the CPU type.
+ * Therefore, ->set_sdram_scrub_rate should be made to return the actual
+ * bandwidth that is accepted or 0 when scrubbing is to be disabled.
+ *
+ * Negative value still means that an error has occurred while setting
+ * the scrub rate.
+ */
 static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
 					  const char *data, size_t count)
 {
 	unsigned long bandwidth = 0;
-	int err;
+	int new_bw = 0;
 
-	if (!mci->set_sdram_scrub_rate) {
-		edac_printk(KERN_WARNING, EDAC_MC,
-			    "Memory scrub rate setting not implemented!\n");
+	if (!mci->set_sdram_scrub_rate)
 		return -EINVAL;
-	}
 
 	if (strict_strtoul(data, 10, &bandwidth) < 0)
 		return -EINVAL;
 
-	err = mci->set_sdram_scrub_rate(mci, (u32)bandwidth);
-	if (err) {
-		edac_printk(KERN_DEBUG, EDAC_MC,
-			    "Failed setting scrub rate to %lu\n", bandwidth);
-		return -EINVAL;
-	}
-	else {
-		edac_printk(KERN_DEBUG, EDAC_MC,
-			    "Scrub rate set to: %lu\n", bandwidth);
+	new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
+	if (new_bw >= 0) {
+		edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw);
 		return count;
 	}
+
+	edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth);
+	return -EINVAL;
 }
 
+/*
+ * ->get_sdram_scrub_rate() return value semantics same as above.
+ */
 static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
 {
-	u32 bandwidth = 0;
-	int err;
+	int bandwidth = 0;
 
-	if (!mci->get_sdram_scrub_rate) {
-		edac_printk(KERN_WARNING, EDAC_MC,
-			    "Memory scrub rate reading not implemented\n");
+	if (!mci->get_sdram_scrub_rate)
 		return -EINVAL;
+
+	bandwidth = mci->get_sdram_scrub_rate(mci);
+	if (bandwidth < 0) {
+		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
+		return bandwidth;
 	}
 
-	err = mci->get_sdram_scrub_rate(mci, &bandwidth);
-	if (err) {
-		edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
-		return err;
-	}
-	else {
-		edac_printk(KERN_DEBUG, EDAC_MC,
-			    "Read scrub rate: %d\n", bandwidth);
-		return sprintf(data, "%d\n", bandwidth);
-	}
+	edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth);
+	return sprintf(data, "%d\n", bandwidth);
 }
 
 /* default attribute files for the MCI object */
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index f459a6c..0448da0 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -611,20 +611,17 @@
 
 	bandwidth = 5900000 * i5100_mc_scrben(dw);
 
-	return 0;
+	return bandwidth;
 }
 
-static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
-				u32 *bandwidth)
+static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
 {
 	struct i5100_priv *priv = mci->pvt_info;
 	u32 dw;
 
 	pci_read_config_dword(priv->mc, I5100_MC, &dw);
 
-	*bandwidth = 5900000 * i5100_mc_scrben(dw);
-
-	return 0;
+	return 5900000 * i5100_mc_scrben(dw);
 }
 
 static struct pci_dev *pci_get_device_func(unsigned vendor,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 362861c..81154ab 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1,6 +1,6 @@
 /* Intel i7 core/Nehalem Memory Controller kernel module
  *
- * This driver supports yhe memory controllers found on the Intel
+ * This driver supports the memory controllers found on the Intel
  * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
  * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
  * and Westmere-EP.
@@ -1271,7 +1271,7 @@
 	int i;
 
 	/*
-	 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
+	 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
 	 * aren't announced by acpi. So, we need to use a legacy scan probing
 	 * to detect them
 	 */
@@ -1864,7 +1864,7 @@
 	if (mce->mcgstatus & 1)
 		i7core_check_error(mci);
 
-	/* Advice mcelog that the error were handled */
+	/* Advise mcelog that the errors were handled */
 	return 1;
 }
 
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index c018109..f6cf73d 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -5,6 +5,7 @@
 
 static struct amd_decoder_ops *fam_ops;
 
+static u8 xec_mask	 = 0xf;
 static u8 nb_err_cpumask = 0xf;
 
 static bool report_gart_errors;
@@ -74,57 +75,104 @@
 	"ECC Error in the Probe Filter directory"
 };
 
-static bool f12h_dc_mce(u16 ec)
+static const char * const f15h_ic_mce_desc[] = {
+	"UC during a demand linefill from L2",
+	"Parity error during data load from IC",
+	"Parity error for IC valid bit",
+	"Main tag parity error",
+	"Parity error in prediction queue",
+	"PFB data/address parity error",
+	"Parity error in the branch status reg",
+	"PFB promotion address error",
+	"Tag error during probe/victimization",
+	"Parity error for IC probe tag valid bit",
+	"PFB non-cacheable bit parity error",
+	"PFB valid bit parity error",			/* xec = 0xd */
+	"patch RAM",					/* xec = 010 */
+	"uop queue",
+	"insn buffer",
+	"predecode buffer",
+	"fetch address FIFO"
+};
+
+static const char * const f15h_cu_mce_desc[] = {
+	"Fill ECC error on data fills",			/* xec = 0x4 */
+	"Fill parity error on insn fills",
+	"Prefetcher request FIFO parity error",
+	"PRQ address parity error",
+	"PRQ data parity error",
+	"WCC Tag ECC error",
+	"WCC Data ECC error",
+	"WCB Data parity error",
+	"VB Data/ECC error",
+	"L2 Tag ECC error",				/* xec = 0x10 */
+	"Hard L2 Tag ECC error",
+	"Multiple hits on L2 tag",
+	"XAB parity error",
+	"PRB address parity error"
+};
+
+static const char * const fr_ex_mce_desc[] = {
+	"CPU Watchdog timer expire",
+	"Wakeup array dest tag",
+	"AG payload array",
+	"EX payload array",
+	"IDRF array",
+	"Retire dispatch queue",
+	"Mapper checkpoint array",
+	"Physical register file EX0 port",
+	"Physical register file EX1 port",
+	"Physical register file AG0 port",
+	"Physical register file AG1 port",
+	"Flag register file",
+	"DE correctable error could not be corrected"
+};
+
+static bool f12h_dc_mce(u16 ec, u8 xec)
 {
 	bool ret = false;
 
 	if (MEM_ERROR(ec)) {
-		u8 ll = ec & 0x3;
+		u8 ll = LL(ec);
 		ret = true;
 
 		if (ll == LL_L2)
 			pr_cont("during L1 linefill from L2.\n");
 		else if (ll == LL_L1)
-			pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec));
+			pr_cont("Data/Tag %s error.\n", R4_MSG(ec));
 		else
 			ret = false;
 	}
 	return ret;
 }
 
-static bool f10h_dc_mce(u16 ec)
+static bool f10h_dc_mce(u16 ec, u8 xec)
 {
-	u8 r4  = (ec >> 4) & 0xf;
-	u8 ll  = ec & 0x3;
-
-	if (r4 == R4_GEN && ll == LL_L1) {
+	if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
 		pr_cont("during data scrub.\n");
 		return true;
 	}
-	return f12h_dc_mce(ec);
+	return f12h_dc_mce(ec, xec);
 }
 
-static bool k8_dc_mce(u16 ec)
+static bool k8_dc_mce(u16 ec, u8 xec)
 {
 	if (BUS_ERROR(ec)) {
 		pr_cont("during system linefill.\n");
 		return true;
 	}
 
-	return f10h_dc_mce(ec);
+	return f10h_dc_mce(ec, xec);
 }
 
-static bool f14h_dc_mce(u16 ec)
+static bool f14h_dc_mce(u16 ec, u8 xec)
 {
-	u8 r4	 = (ec >> 4) & 0xf;
-	u8 ll	 = ec & 0x3;
-	u8 tt	 = (ec >> 2) & 0x3;
-	u8 ii	 = tt;
+	u8 r4	 = R4(ec);
 	bool ret = true;
 
 	if (MEM_ERROR(ec)) {
 
-		if (tt != TT_DATA || ll != LL_L1)
+		if (TT(ec) != TT_DATA || LL(ec) != LL_L1)
 			return false;
 
 		switch (r4) {
@@ -144,7 +192,7 @@
 		}
 	} else if (BUS_ERROR(ec)) {
 
-		if ((ii != II_MEM && ii != II_IO) || ll != LL_LG)
+		if ((II(ec) != II_MEM && II(ec) != II_IO) || LL(ec) != LL_LG)
 			return false;
 
 		pr_cont("System read data error on a ");
@@ -169,39 +217,78 @@
 	return ret;
 }
 
+static bool f15h_dc_mce(u16 ec, u8 xec)
+{
+	bool ret = true;
+
+	if (MEM_ERROR(ec)) {
+
+		switch (xec) {
+		case 0x0:
+			pr_cont("Data Array access error.\n");
+			break;
+
+		case 0x1:
+			pr_cont("UC error during a linefill from L2/NB.\n");
+			break;
+
+		case 0x2:
+		case 0x11:
+			pr_cont("STQ access error.\n");
+			break;
+
+		case 0x3:
+			pr_cont("SCB access error.\n");
+			break;
+
+		case 0x10:
+			pr_cont("Tag error.\n");
+			break;
+
+		case 0x12:
+			pr_cont("LDQ access error.\n");
+			break;
+
+		default:
+			ret = false;
+		}
+	} else if (BUS_ERROR(ec)) {
+
+		if (!xec)
+			pr_cont("during system linefill.\n");
+		else
+			pr_cont(" Internal %s condition.\n",
+				((xec == 1) ? "livelock" : "deadlock"));
+	} else
+		ret = false;
+
+	return ret;
+}
+
 static void amd_decode_dc_mce(struct mce *m)
 {
-	u16 ec = m->status & 0xffff;
-	u8 xec = (m->status >> 16) & 0xf;
+	u16 ec = EC(m->status);
+	u8 xec = XEC(m->status, xec_mask);
 
 	pr_emerg(HW_ERR "Data Cache Error: ");
 
 	/* TLB error signatures are the same across families */
 	if (TLB_ERROR(ec)) {
-		u8 tt = (ec >> 2) & 0x3;
-
-		if (tt == TT_DATA) {
+		if (TT(ec) == TT_DATA) {
 			pr_cont("%s TLB %s.\n", LL_MSG(ec),
-				(xec ? "multimatch" : "parity error"));
+				((xec == 2) ? "locked miss"
+					    : (xec ? "multimatch" : "parity")));
 			return;
 		}
-		else
-			goto wrong_dc_mce;
-	}
-
-	if (!fam_ops->dc_mce(ec))
-		goto wrong_dc_mce;
-
-	return;
-
-wrong_dc_mce:
-	pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
+	} else if (fam_ops->dc_mce(ec, xec))
+		;
+	else
+		pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
 }
 
-static bool k8_ic_mce(u16 ec)
+static bool k8_ic_mce(u16 ec, u8 xec)
 {
-	u8 ll	 = ec & 0x3;
-	u8 r4	 = (ec >> 4) & 0xf;
+	u8 ll	 = LL(ec);
 	bool ret = true;
 
 	if (!MEM_ERROR(ec))
@@ -210,7 +297,7 @@
 	if (ll == 0x2)
 		pr_cont("during a linefill from L2.\n");
 	else if (ll == 0x1) {
-		switch (r4) {
+		switch (R4(ec)) {
 		case R4_IRD:
 			pr_cont("Parity error during data load.\n");
 			break;
@@ -233,15 +320,13 @@
 	return ret;
 }
 
-static bool f14h_ic_mce(u16 ec)
+static bool f14h_ic_mce(u16 ec, u8 xec)
 {
-	u8 ll    = ec & 0x3;
-	u8 tt    = (ec >> 2) & 0x3;
-	u8 r4  = (ec >> 4) & 0xf;
+	u8 r4    = R4(ec);
 	bool ret = true;
 
 	if (MEM_ERROR(ec)) {
-		if (tt != 0 || ll != 1)
+		if (TT(ec) != 0 || LL(ec) != 1)
 			ret = false;
 
 		if (r4 == R4_IRD)
@@ -254,10 +339,36 @@
 	return ret;
 }
 
+static bool f15h_ic_mce(u16 ec, u8 xec)
+{
+	bool ret = true;
+
+	if (!MEM_ERROR(ec))
+		return false;
+
+	switch (xec) {
+	case 0x0 ... 0xa:
+		pr_cont("%s.\n", f15h_ic_mce_desc[xec]);
+		break;
+
+	case 0xd:
+		pr_cont("%s.\n", f15h_ic_mce_desc[xec-2]);
+		break;
+
+	case 0x10 ... 0x14:
+		pr_cont("Decoder %s parity error.\n", f15h_ic_mce_desc[xec-4]);
+		break;
+
+	default:
+		ret = false;
+	}
+	return ret;
+}
+
 static void amd_decode_ic_mce(struct mce *m)
 {
-	u16 ec = m->status & 0xffff;
-	u8 xec = (m->status >> 16) & 0xf;
+	u16 ec = EC(m->status);
+	u8 xec = XEC(m->status, xec_mask);
 
 	pr_emerg(HW_ERR "Instruction Cache Error: ");
 
@@ -268,7 +379,7 @@
 		bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
 
 		pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
-	} else if (fam_ops->ic_mce(ec))
+	} else if (fam_ops->ic_mce(ec, xec))
 		;
 	else
 		pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
@@ -276,8 +387,8 @@
 
 static void amd_decode_bu_mce(struct mce *m)
 {
-	u32 ec = m->status & 0xffff;
-	u32 xec = (m->status >> 16) & 0xf;
+	u16 ec = EC(m->status);
+	u8 xec = XEC(m->status, xec_mask);
 
 	pr_emerg(HW_ERR "Bus Unit Error");
 
@@ -286,23 +397,23 @@
 	else if (xec == 0x3)
 		pr_cont(" in the victim data buffers.\n");
 	else if (xec == 0x2 && MEM_ERROR(ec))
-		pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
+		pr_cont(": %s error in the L2 cache tags.\n", R4_MSG(ec));
 	else if (xec == 0x0) {
 		if (TLB_ERROR(ec))
 			pr_cont(": %s error in a Page Descriptor Cache or "
 				"Guest TLB.\n", TT_MSG(ec));
 		else if (BUS_ERROR(ec))
 			pr_cont(": %s/ECC error in data read from NB: %s.\n",
-				RRRR_MSG(ec), PP_MSG(ec));
+				R4_MSG(ec), PP_MSG(ec));
 		else if (MEM_ERROR(ec)) {
-			u8 rrrr = (ec >> 4) & 0xf;
+			u8 r4 = R4(ec);
 
-			if (rrrr >= 0x7)
+			if (r4 >= 0x7)
 				pr_cont(": %s error during data copyback.\n",
-					RRRR_MSG(ec));
-			else if (rrrr <= 0x1)
+					R4_MSG(ec));
+			else if (r4 <= 0x1)
 				pr_cont(": %s parity/ECC error during data "
-					"access from L2.\n", RRRR_MSG(ec));
+					"access from L2.\n", R4_MSG(ec));
 			else
 				goto wrong_bu_mce;
 		} else
@@ -316,12 +427,52 @@
 	pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
 }
 
+static void amd_decode_cu_mce(struct mce *m)
+{
+	u16 ec = EC(m->status);
+	u8 xec = XEC(m->status, xec_mask);
+
+	pr_emerg(HW_ERR "Combined Unit Error: ");
+
+	if (TLB_ERROR(ec)) {
+		if (xec == 0x0)
+			pr_cont("Data parity TLB read error.\n");
+		else if (xec == 0x1)
+			pr_cont("Poison data provided for TLB fill.\n");
+		else
+			goto wrong_cu_mce;
+	} else if (BUS_ERROR(ec)) {
+		if (xec > 2)
+			goto wrong_cu_mce;
+
+		pr_cont("Error during attempted NB data read.\n");
+	} else if (MEM_ERROR(ec)) {
+		switch (xec) {
+		case 0x4 ... 0xc:
+			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x4]);
+			break;
+
+		case 0x10 ... 0x14:
+			pr_cont("%s.\n", f15h_cu_mce_desc[xec - 0x7]);
+			break;
+
+		default:
+			goto wrong_cu_mce;
+		}
+	}
+
+	return;
+
+wrong_cu_mce:
+	pr_emerg(HW_ERR "Corrupted CU MCE info?\n");
+}
+
 static void amd_decode_ls_mce(struct mce *m)
 {
-	u16 ec = m->status & 0xffff;
-	u8 xec = (m->status >> 16) & 0xf;
+	u16 ec = EC(m->status);
+	u8 xec = XEC(m->status, xec_mask);
 
-	if (boot_cpu_data.x86 == 0x14) {
+	if (boot_cpu_data.x86 >= 0x14) {
 		pr_emerg("You shouldn't be seeing an LS MCE on this cpu family,"
 			 " please report on LKML.\n");
 		return;
@@ -330,12 +481,12 @@
 	pr_emerg(HW_ERR "Load Store Error");
 
 	if (xec == 0x0) {
-		u8 r4 = (ec >> 4) & 0xf;
+		u8 r4 = R4(ec);
 
 		if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR))
 			goto wrong_ls_mce;
 
-		pr_cont(" during %s.\n", RRRR_MSG(ec));
+		pr_cont(" during %s.\n", R4_MSG(ec));
 	} else
 		goto wrong_ls_mce;
 
@@ -410,6 +561,15 @@
 		goto out;
 		break;
 
+	case 0x19:
+		if (boot_cpu_data.x86 == 0x15)
+			pr_cont("Compute Unit Data Error.\n");
+		else
+			ret = false;
+
+		goto out;
+		break;
+
 	case 0x1c ... 0x1f:
 		offset = 24;
 		break;
@@ -434,27 +594,30 @@
 
 void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
 {
-	u8 xec   = (m->status >> 16) & 0x1f;
-	u16 ec   = m->status & 0xffff;
+	u16 ec   = EC(m->status);
+	u8 xec   = XEC(m->status, 0x1f);
 	u32 nbsh = (u32)(m->status >> 32);
+	int core = -1;
 
-	pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id);
+	pr_emerg(HW_ERR "Northbridge Error (node %d", node_id);
 
-	/*
-	 * F10h, revD can disable ErrCpu[3:0] so check that first and also the
-	 * value encoding has changed so interpret those differently
-	 */
+	/* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */
 	if ((boot_cpu_data.x86 == 0x10) &&
 	    (boot_cpu_data.x86_model > 7)) {
 		if (nbsh & K8_NBSH_ERR_CPU_VAL)
-			pr_cont(", core: %u", (u8)(nbsh & nb_err_cpumask));
+			core = nbsh & nb_err_cpumask;
 	} else {
 		u8 assoc_cpus = nbsh & nb_err_cpumask;
 
 		if (assoc_cpus > 0)
-			pr_cont(", core: %d", fls(assoc_cpus) - 1);
+			core = fls(assoc_cpus) - 1;
 	}
 
+	if (core >= 0)
+		pr_cont(", core %d): ", core);
+	else
+		pr_cont("): ");
+
 	switch (xec) {
 	case 0x2:
 		pr_cont("Sync error (sync packets on HT link detected).\n");
@@ -496,35 +659,89 @@
 
 static void amd_decode_fr_mce(struct mce *m)
 {
-	if (boot_cpu_data.x86 == 0xf ||
-	    boot_cpu_data.x86 == 0x11)
+	struct cpuinfo_x86 *c = &boot_cpu_data;
+	u8 xec = XEC(m->status, xec_mask);
+
+	if (c->x86 == 0xf || c->x86 == 0x11)
 		goto wrong_fr_mce;
 
-	/* we have only one error signature so match all fields at once. */
-	if ((m->status & 0xffff) == 0x0f0f) {
-		pr_emerg(HW_ERR "FR Error: CPU Watchdog timer expire.\n");
-		return;
-	}
+	if (c->x86 != 0x15 && xec != 0x0)
+		goto wrong_fr_mce;
+
+	pr_emerg(HW_ERR "%s Error: ",
+		 (c->x86 == 0x15 ? "Execution Unit" : "FIROB"));
+
+	if (xec == 0x0 || xec == 0xc)
+		pr_cont("%s.\n", fr_ex_mce_desc[xec]);
+	else if (xec < 0xd)
+		pr_cont("%s parity error.\n", fr_ex_mce_desc[xec]);
+	else
+		goto wrong_fr_mce;
+
+	return;
 
 wrong_fr_mce:
 	pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
 }
 
+static void amd_decode_fp_mce(struct mce *m)
+{
+	u8 xec = XEC(m->status, xec_mask);
+
+	pr_emerg(HW_ERR "Floating Point Unit Error: ");
+
+	switch (xec) {
+	case 0x1:
+		pr_cont("Free List");
+		break;
+
+	case 0x2:
+		pr_cont("Physical Register File");
+		break;
+
+	case 0x3:
+		pr_cont("Retire Queue");
+		break;
+
+	case 0x4:
+		pr_cont("Scheduler table");
+		break;
+
+	case 0x5:
+		pr_cont("Status Register File");
+		break;
+
+	default:
+		goto wrong_fp_mce;
+		break;
+	}
+
+	pr_cont(" parity error.\n");
+
+	return;
+
+wrong_fp_mce:
+	pr_emerg(HW_ERR "Corrupted FP MCE info?\n");
+}
+
 static inline void amd_decode_err_code(u16 ec)
 {
-	if (TLB_ERROR(ec)) {
-		pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n",
-			 TT_MSG(ec), LL_MSG(ec));
-	} else if (MEM_ERROR(ec)) {
-		pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
-			 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
-	} else if (BUS_ERROR(ec)) {
-		pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
-			 "Participating Processor: %s\n",
-			  RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
-			  PP_MSG(ec));
-	} else
-		pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
+
+	pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
+
+	if (BUS_ERROR(ec))
+		pr_cont(", mem/io: %s", II_MSG(ec));
+	else
+		pr_cont(", tx: %s", TT_MSG(ec));
+
+	if (MEM_ERROR(ec) || BUS_ERROR(ec)) {
+		pr_cont(", mem-tx: %s", R4_MSG(ec));
+
+		if (BUS_ERROR(ec))
+			pr_cont(", part-proc: %s (%s)", PP_MSG(ec), TO_MSG(ec));
+	}
+
+	pr_cont("\n");
 }
 
 /*
@@ -546,25 +763,32 @@
 int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
 {
 	struct mce *m = (struct mce *)data;
+	struct cpuinfo_x86 *c = &boot_cpu_data;
 	int node, ecc;
 
 	if (amd_filter_mce(m))
 		return NOTIFY_STOP;
 
-	pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank);
+	pr_emerg(HW_ERR "MC%d_STATUS[%s|%s|%s|%s|%s",
+		m->bank,
+		((m->status & MCI_STATUS_OVER)	? "Over"  : "-"),
+		((m->status & MCI_STATUS_UC)	? "UE"	  : "CE"),
+		((m->status & MCI_STATUS_MISCV)	? "MiscV" : "-"),
+		((m->status & MCI_STATUS_PCC)	? "PCC"	  : "-"),
+		((m->status & MCI_STATUS_ADDRV)	? "AddrV" : "-"));
 
-	pr_cont("%sorrected error, other errors lost: %s, "
-		 "CPU context corrupt: %s",
-		 ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
-		 ((m->status & MCI_STATUS_OVER) ? "yes"  : "no"),
-		 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
+	if (c->x86 == 0x15)
+		pr_cont("|%s|%s",
+			((m->status & BIT_64(44)) ? "Deferred" : "-"),
+			((m->status & BIT_64(43)) ? "Poison"   : "-"));
 
 	/* do the two bits[14:13] together */
 	ecc = (m->status >> 45) & 0x3;
 	if (ecc)
-		pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
+		pr_cont("|%sECC", ((ecc == 2) ? "C" : "U"));
 
-	pr_cont("\n");
+	pr_cont("]: 0x%016llx\n", m->status);
+
 
 	switch (m->bank) {
 	case 0:
@@ -576,7 +800,10 @@
 		break;
 
 	case 2:
-		amd_decode_bu_mce(m);
+		if (c->x86 == 0x15)
+			amd_decode_cu_mce(m);
+		else
+			amd_decode_bu_mce(m);
 		break;
 
 	case 3:
@@ -592,6 +819,10 @@
 		amd_decode_fr_mce(m);
 		break;
 
+	case 6:
+		amd_decode_fp_mce(m);
+		break;
+
 	default:
 		break;
 	}
@@ -608,18 +839,21 @@
 
 static int __init mce_amd_init(void)
 {
-	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+	struct cpuinfo_x86 *c = &boot_cpu_data;
+
+	if (c->x86_vendor != X86_VENDOR_AMD)
 		return 0;
 
-	if ((boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x12) &&
-	    (boot_cpu_data.x86 != 0x14 || boot_cpu_data.x86_model > 0xf))
+	if ((c->x86 < 0xf || c->x86 > 0x12) &&
+	    (c->x86 != 0x14 || c->x86_model > 0xf) &&
+	    (c->x86 != 0x15 || c->x86_model > 0xf))
 		return 0;
 
 	fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
 	if (!fam_ops)
 		return -ENOMEM;
 
-	switch (boot_cpu_data.x86) {
+	switch (c->x86) {
 	case 0xf:
 		fam_ops->dc_mce = k8_dc_mce;
 		fam_ops->ic_mce = k8_ic_mce;
@@ -651,9 +885,15 @@
 		fam_ops->nb_mce = nb_noop_mce;
 		break;
 
+	case 0x15:
+		xec_mask = 0x1f;
+		fam_ops->dc_mce = f15h_dc_mce;
+		fam_ops->ic_mce = f15h_ic_mce;
+		fam_ops->nb_mce = f10h_nb_mce;
+		break;
+
 	default:
-		printk(KERN_WARNING "Huh? What family is that: %d?!\n",
-				    boot_cpu_data.x86);
+		printk(KERN_WARNING "Huh? What family is that: %d?!\n", c->x86);
 		kfree(fam_ops);
 		return -EINVAL;
 	}
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 35f6e0e..45dda47 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -7,8 +7,8 @@
 
 #define BIT_64(n)			(U64_C(1) << (n))
 
-#define ERROR_CODE(x)			((x) & 0xffff)
-#define EXT_ERROR_CODE(x)		(((x) >> 16) & 0x1f)
+#define EC(x)				((x) & 0xffff)
+#define XEC(x, mask)			(((x) >> 16) & mask)
 
 #define LOW_SYNDROME(x)			(((x) >> 15) & 0xff)
 #define HIGH_SYNDROME(x)		(((x) >> 24) & 0xff)
@@ -21,15 +21,15 @@
 #define TT_MSG(x)			tt_msgs[TT(x)]
 #define II(x)				(((x) >> 2) & 0x3)
 #define II_MSG(x)			ii_msgs[II(x)]
-#define LL(x)				(((x) >> 0) & 0x3)
+#define LL(x)				((x) & 0x3)
 #define LL_MSG(x)			ll_msgs[LL(x)]
 #define TO(x)				(((x) >> 8) & 0x1)
 #define TO_MSG(x)			to_msgs[TO(x)]
 #define PP(x)				(((x) >> 9) & 0x3)
 #define PP_MSG(x)			pp_msgs[PP(x)]
 
-#define RRRR(x)				(((x) >> 4) & 0xf)
-#define RRRR_MSG(x)			((RRRR(x) < 9) ?  rrrr_msgs[RRRR(x)] : "Wrong R4!")
+#define R4(x)				(((x) >> 4) & 0xf)
+#define R4_MSG(x)			((R4(x) < 9) ?  rrrr_msgs[R4(x)] : "Wrong R4!")
 
 #define K8_NBSH				0x4C
 
@@ -100,8 +100,8 @@
  * per-family decoder ops
  */
 struct amd_decoder_ops {
-	bool (*dc_mce)(u16);
-	bool (*ic_mce)(u16);
+	bool (*dc_mce)(u16, u8);
+	bool (*ic_mce)(u16, u8);
 	bool (*nb_mce)(u16, u8);
 };
 
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 39faded..733a7e7a 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -88,10 +88,11 @@
 		return -EINVAL;
 	}
 
-	if (value > 5) {
-		printk(KERN_ERR "Non-existant MCE bank: %lu\n", value);
-		return -EINVAL;
-	}
+	if (value > 5)
+		if (boot_cpu_data.x86 != 0x15 || value > 6) {
+			printk(KERN_ERR "Non-existant MCE bank: %lu\n", value);
+			return -EINVAL;
+		}
 
 	i_mce.bank = value;
 
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 070cea4..b9f0c20 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -873,7 +873,7 @@
 }
 
 /**
- * ppc4xx_edac_init_csrows - intialize driver instance rows
+ * ppc4xx_edac_init_csrows - initialize driver instance rows
  * @mci: A pointer to the EDAC memory controller instance
  *       associated with the ibm,sdram-4xx-ddr2 controller for which
  *       the csrows (i.e. banks/ranks) are being initialized.
@@ -881,7 +881,7 @@
  *          currently set for the controller, from which bank width
  *          and memory typ information is derived.
  *
- * This routine intializes the virtual "chip select rows" associated
+ * This routine initializes the virtual "chip select rows" associated
  * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
  * controller bank/rank is mapped to a row.
  *
@@ -992,7 +992,7 @@
 }
 
 /**
- * ppc4xx_edac_mc_init - intialize driver instance
+ * ppc4xx_edac_mc_init - initialize driver instance
  * @mci: A pointer to the EDAC memory controller instance being
  *       initialized.
  * @op: A pointer to the OpenFirmware device tree node associated
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 40a222e..68f942c 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -19,7 +19,7 @@
 
 config FIREWIRE_OHCI
 	tristate "OHCI-1394 controllers"
-	depends on PCI && FIREWIRE
+	depends on PCI && FIREWIRE && MMU
 	help
 	  Enable this driver if you have a FireWire controller based
 	  on the OHCI specification.  For all practical purposes, this
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14bb7b7..48ae712 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1501,9 +1501,10 @@
 	e->client		= client;
 	e->p.speed		= SCODE_100;
 	e->p.generation		= a->generation;
-	e->p.header[0]		= a->data[0];
-	e->p.header[1]		= a->data[1];
-	e->p.header_length	= 8;
+	e->p.header[0]		= TCODE_LINK_INTERNAL << 4;
+	e->p.header[1]		= a->data[0];
+	e->p.header[2]		= a->data[1];
+	e->p.header_length	= 12;
 	e->p.callback		= outbound_phy_packet_callback;
 	e->phy_packet.closure	= a->closure;
 	e->phy_packet.type	= FW_CDEV_EVENT_PHY_PACKET_SENT;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index b42a0bd..d00f8ce 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -72,6 +72,15 @@
 #define PHY_CONFIG_ROOT_ID(node_id)	((((node_id) & 0x3f) << 24) | (1 << 23))
 #define PHY_IDENTIFIER(id)		((id) << 30)
 
+/* returns 0 if the split timeout handler is already running */
+static int try_cancel_split_timeout(struct fw_transaction *t)
+{
+	if (t->is_split_transaction)
+		return del_timer(&t->split_timeout_timer);
+	else
+		return 1;
+}
+
 static int close_transaction(struct fw_transaction *transaction,
 			     struct fw_card *card, int rcode)
 {
@@ -81,7 +90,7 @@
 	spin_lock_irqsave(&card->lock, flags);
 	list_for_each_entry(t, &card->transaction_list, link) {
 		if (t == transaction) {
-			if (!del_timer(&t->split_timeout_timer)) {
+			if (!try_cancel_split_timeout(t)) {
 				spin_unlock_irqrestore(&card->lock, flags);
 				goto timed_out;
 			}
@@ -141,16 +150,28 @@
 	card->tlabel_mask &= ~(1ULL << t->tlabel);
 	spin_unlock_irqrestore(&card->lock, flags);
 
-	card->driver->cancel_packet(card, &t->packet);
-
-	/*
-	 * At this point cancel_packet will never call the transaction
-	 * callback, since we just took the transaction out of the list.
-	 * So do it here.
-	 */
 	t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
 }
 
+static void start_split_transaction_timeout(struct fw_transaction *t,
+					    struct fw_card *card)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->lock, flags);
+
+	if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
+		spin_unlock_irqrestore(&card->lock, flags);
+		return;
+	}
+
+	t->is_split_transaction = true;
+	mod_timer(&t->split_timeout_timer,
+		  jiffies + card->split_timeout_jiffies);
+
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+
 static void transmit_complete_callback(struct fw_packet *packet,
 				       struct fw_card *card, int status)
 {
@@ -162,7 +183,7 @@
 		close_transaction(t, card, RCODE_COMPLETE);
 		break;
 	case ACK_PENDING:
-		t->timestamp = packet->timestamp;
+		start_split_transaction_timeout(t, card);
 		break;
 	case ACK_BUSY_X:
 	case ACK_BUSY_A:
@@ -250,7 +271,7 @@
 		break;
 
 	default:
-		WARN(1, "wrong tcode %d", tcode);
+		WARN(1, "wrong tcode %d\n", tcode);
 	}
  common:
 	packet->speed = speed;
@@ -349,11 +370,9 @@
 	t->node_id = destination_id;
 	t->tlabel = tlabel;
 	t->card = card;
+	t->is_split_transaction = false;
 	setup_timer(&t->split_timeout_timer,
 		    split_transaction_timeout_callback, (unsigned long)t);
-	/* FIXME: start this timer later, relative to t->timestamp */
-	mod_timer(&t->split_timeout_timer,
-		  jiffies + card->split_timeout_jiffies);
 	t->callback = callback;
 	t->callback_data = callback_data;
 
@@ -423,7 +442,8 @@
 }
 
 static struct fw_packet phy_config_packet = {
-	.header_length	= 8,
+	.header_length	= 12,
+	.header[0]	= TCODE_LINK_INTERNAL << 4,
 	.payload_length	= 0,
 	.speed		= SCODE_100,
 	.callback	= transmit_phy_packet_callback,
@@ -451,8 +471,8 @@
 
 	mutex_lock(&phy_config_mutex);
 
-	phy_config_packet.header[0] = data;
-	phy_config_packet.header[1] = ~data;
+	phy_config_packet.header[1] = data;
+	phy_config_packet.header[2] = ~data;
 	phy_config_packet.generation = generation;
 	INIT_COMPLETION(phy_config_done);
 
@@ -638,7 +658,7 @@
 		}
 
 	default:
-		WARN(1, "wrong tcode %d", tcode);
+		WARN(1, "wrong tcode %d\n", tcode);
 		return 0;
 	}
 }
@@ -694,7 +714,7 @@
 		break;
 
 	default:
-		WARN(1, "wrong tcode %d", tcode);
+		WARN(1, "wrong tcode %d\n", tcode);
 	}
 
 	response->payload_mapped = false;
@@ -925,7 +945,7 @@
 	spin_lock_irqsave(&card->lock, flags);
 	list_for_each_entry(t, &card->transaction_list, link) {
 		if (t->node_id == source && t->tlabel == tlabel) {
-			if (!del_timer(&t->split_timeout_timer)) {
+			if (!try_cancel_split_timeout(t)) {
 				spin_unlock_irqrestore(&card->lock, flags);
 				goto timed_out;
 			}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index e6239f9..f8dfcf1 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -215,9 +215,11 @@
 
 /* -transaction */
 
+#define TCODE_LINK_INTERNAL		0xe
+
 #define TCODE_IS_READ_REQUEST(tcode)	(((tcode) & ~1) == 4)
 #define TCODE_IS_BLOCK_PACKET(tcode)	(((tcode) &  1) != 0)
-#define TCODE_IS_LINK_INTERNAL(tcode)	((tcode) == 0xe)
+#define TCODE_IS_LINK_INTERNAL(tcode)	((tcode) == TCODE_LINK_INTERNAL)
 #define TCODE_IS_REQUEST(tcode)		(((tcode) &  2) == 0)
 #define TCODE_IS_RESPONSE(tcode)	(((tcode) &  2) != 0)
 #define TCODE_HAS_REQUEST_DATA(tcode)	(((tcode) & 12) != 4)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 1a467a9..c2e194c 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -9,6 +9,7 @@
 #include <linux/bug.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/ethtool.h>
 #include <linux/firewire.h>
 #include <linux/firewire-constants.h>
 #include <linux/highmem.h>
@@ -179,6 +180,7 @@
 	/* Number of tx datagrams that have been queued but not yet acked */
 	int queued_datagrams;
 
+	int peer_count;
 	struct list_head peer_list;
 	struct fw_card *card;
 	struct net_device *netdev;
@@ -996,15 +998,23 @@
 static void fwnet_write_complete(struct fw_card *card, int rcode,
 				 void *payload, size_t length, void *data)
 {
-	struct fwnet_packet_task *ptask;
-
-	ptask = data;
+	struct fwnet_packet_task *ptask = data;
+	static unsigned long j;
+	static int last_rcode, errors_skipped;
 
 	if (rcode == RCODE_COMPLETE) {
 		fwnet_transmit_packet_done(ptask);
 	} else {
-		fw_error("fwnet_write_complete: failed: %x\n", rcode);
 		fwnet_transmit_packet_failed(ptask);
+
+		if (printk_timed_ratelimit(&j,  1000) || rcode != last_rcode) {
+			fw_error("fwnet_write_complete: "
+				"failed: %x (skipped %d)\n", rcode, errors_skipped);
+
+			errors_skipped = 0;
+			last_rcode = rcode;
+		} else
+			errors_skipped++;
 	}
 }
 
@@ -1213,6 +1223,14 @@
 	return retval;
 }
 
+static void set_carrier_state(struct fwnet_device *dev)
+{
+	if (dev->peer_count > 1)
+		netif_carrier_on(dev->netdev);
+	else
+		netif_carrier_off(dev->netdev);
+}
+
 /* ifup */
 static int fwnet_open(struct net_device *net)
 {
@@ -1226,6 +1244,10 @@
 	}
 	netif_start_queue(net);
 
+	spin_lock_irq(&dev->lock);
+	set_carrier_state(dev);
+	spin_unlock_irq(&dev->lock);
+
 	return 0;
 }
 
@@ -1397,6 +1419,10 @@
 	return 0;
 }
 
+static const struct ethtool_ops fwnet_ethtool_ops = {
+	.get_link	= ethtool_op_get_link,
+};
+
 static const struct net_device_ops fwnet_netdev_ops = {
 	.ndo_open       = fwnet_open,
 	.ndo_stop	= fwnet_stop,
@@ -1415,6 +1441,7 @@
 	net->hard_header_len	= FWNET_HLEN;
 	net->type		= ARPHRD_IEEE1394;
 	net->tx_queue_len	= FWNET_TX_QUEUE_LEN;
+	net->ethtool_ops	= &fwnet_ethtool_ops;
 }
 
 /* caller must hold fwnet_device_mutex */
@@ -1455,6 +1482,8 @@
 
 	spin_lock_irq(&dev->lock);
 	list_add_tail(&peer->peer_link, &dev->peer_list);
+	dev->peer_count++;
+	set_carrier_state(dev);
 	spin_unlock_irq(&dev->lock);
 
 	return 0;
@@ -1535,13 +1564,15 @@
 	return ret;
 }
 
-static void fwnet_remove_peer(struct fwnet_peer *peer)
+static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
 {
 	struct fwnet_partial_datagram *pd, *pd_next;
 
-	spin_lock_irq(&peer->dev->lock);
+	spin_lock_irq(&dev->lock);
 	list_del(&peer->peer_link);
-	spin_unlock_irq(&peer->dev->lock);
+	dev->peer_count--;
+	set_carrier_state(dev);
+	spin_unlock_irq(&dev->lock);
 
 	list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
 		fwnet_pd_delete(pd);
@@ -1558,7 +1589,7 @@
 
 	mutex_lock(&fwnet_device_mutex);
 
-	fwnet_remove_peer(peer);
+	fwnet_remove_peer(peer, dev);
 
 	if (list_empty(&dev->peer_list)) {
 		net = dev->netdev;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index bf184fb..0618145 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -302,7 +302,7 @@
 
 	file->private_data = client;
 
-	return 0;
+	return nonseekable_open(inode, file);
 fail:
 	kfree(client);
 	lynx_put(lynx);
@@ -405,7 +405,6 @@
 	.poll =			nosy_poll,
 	.open =			nosy_open,
 	.release =		nosy_release,
-	.llseek =		noop_llseek,
 };
 
 #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index e3c8b60..bd3c61b 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -18,6 +18,7 @@
  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/delay.h>
@@ -40,6 +41,7 @@
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/time.h>
+#include <linux/vmalloc.h>
 
 #include <asm/byteorder.h>
 #include <asm/page.h>
@@ -80,17 +82,23 @@
 #define COMMAND_PTR(regs)	((regs) + 12)
 #define CONTEXT_MATCH(regs)	((regs) + 16)
 
-struct ar_buffer {
-	struct descriptor descriptor;
-	struct ar_buffer *next;
-	__le32 data[0];
-};
+#define AR_BUFFER_SIZE	(32*1024)
+#define AR_BUFFERS_MIN	DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
+/* we need at least two pages for proper list management */
+#define AR_BUFFERS	(AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
+
+#define MAX_ASYNC_PAYLOAD	4096
+#define MAX_AR_PACKET_SIZE	(16 + MAX_ASYNC_PAYLOAD + 4)
+#define AR_WRAPAROUND_PAGES	DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
 
 struct ar_context {
 	struct fw_ohci *ohci;
-	struct ar_buffer *current_buffer;
-	struct ar_buffer *last_buffer;
+	struct page *pages[AR_BUFFERS];
+	void *buffer;
+	struct descriptor *descriptors;
+	dma_addr_t descriptors_bus;
 	void *pointer;
+	unsigned int last_buffer_index;
 	u32 regs;
 	struct tasklet_struct tasklet;
 };
@@ -117,6 +125,8 @@
 	struct fw_ohci *ohci;
 	u32 regs;
 	int total_allocation;
+	bool running;
+	bool flushing;
 
 	/*
 	 * List of page-sized buffers for storing DMA descriptors.
@@ -161,6 +171,9 @@
 	int excess_bytes;
 	void *header;
 	size_t header_length;
+
+	u8 sync;
+	u8 tags;
 };
 
 #define CONFIG_ROM_SIZE 1024
@@ -177,7 +190,8 @@
 	u32 bus_time;
 	bool is_root;
 	bool csr_state_setclear_abdicate;
-
+	int n_ir;
+	int n_it;
 	/*
 	 * Spinlock for accessing fw_ohci data.  Never call out of
 	 * this driver with this lock held.
@@ -186,6 +200,9 @@
 
 	struct mutex phy_reg_mutex;
 
+	void *misc_buffer;
+	dma_addr_t misc_buffer_bus;
+
 	struct ar_context ar_request_ctx;
 	struct ar_context ar_response_ctx;
 	struct context at_request_ctx;
@@ -411,10 +428,6 @@
 	[0xc] = "-reserved-",		[0xd] = "-reserved-",
 	[0xe] = "link internal",	[0xf] = "-reserved-",
 };
-static const char *phys[] = {
-	[0x0] = "phy config packet",	[0x1] = "link-on packet",
-	[0x2] = "self-id packet",	[0x3] = "-reserved-",
-};
 
 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
 {
@@ -433,12 +446,6 @@
 		return;
 	}
 
-	if (header[0] == ~header[1]) {
-		fw_notify("A%c %s, %s, %08x\n",
-		    dir, evts[evt], phys[header[0] >> 30 & 0x3], header[0]);
-		return;
-	}
-
 	switch (tcode) {
 	case 0x0: case 0x6: case 0x8:
 		snprintf(specific, sizeof(specific), " = %08x",
@@ -453,9 +460,13 @@
 	}
 
 	switch (tcode) {
-	case 0xe: case 0xa:
+	case 0xa:
 		fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
 		break;
+	case 0xe:
+		fw_notify("A%c %s, PHY %08x %08x\n",
+			  dir, evts[evt], header[1], header[2]);
+		break;
 	case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
 		fw_notify("A%c spd %x tl %02x, "
 		    "%04x -> %04x, %s, "
@@ -594,59 +605,150 @@
 	return ret;
 }
 
-static void ar_context_link_page(struct ar_context *ctx,
-				 struct ar_buffer *ab, dma_addr_t ab_bus)
+static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
 {
-	size_t offset;
+	return page_private(ctx->pages[i]);
+}
 
-	ab->next = NULL;
-	memset(&ab->descriptor, 0, sizeof(ab->descriptor));
-	ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
-						    DESCRIPTOR_STATUS |
-						    DESCRIPTOR_BRANCH_ALWAYS);
-	offset = offsetof(struct ar_buffer, data);
-	ab->descriptor.req_count      = cpu_to_le16(PAGE_SIZE - offset);
-	ab->descriptor.data_address   = cpu_to_le32(ab_bus + offset);
-	ab->descriptor.res_count      = cpu_to_le16(PAGE_SIZE - offset);
-	ab->descriptor.branch_address = 0;
+static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
+{
+	struct descriptor *d;
+
+	d = &ctx->descriptors[index];
+	d->branch_address  &= cpu_to_le32(~0xf);
+	d->res_count       =  cpu_to_le16(PAGE_SIZE);
+	d->transfer_status =  0;
 
 	wmb(); /* finish init of new descriptors before branch_address update */
-	ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1);
-	ctx->last_buffer->next = ab;
-	ctx->last_buffer = ab;
+	d = &ctx->descriptors[ctx->last_buffer_index];
+	d->branch_address  |= cpu_to_le32(1);
+
+	ctx->last_buffer_index = index;
 
 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
 	flush_writes(ctx->ohci);
 }
 
-static int ar_context_add_page(struct ar_context *ctx)
-{
-	struct device *dev = ctx->ohci->card.device;
-	struct ar_buffer *ab;
-	dma_addr_t uninitialized_var(ab_bus);
-
-	ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
-	if (ab == NULL)
-		return -ENOMEM;
-
-	ar_context_link_page(ctx, ab, ab_bus);
-
-	return 0;
-}
-
 static void ar_context_release(struct ar_context *ctx)
 {
-	struct ar_buffer *ab, *ab_next;
-	size_t offset;
-	dma_addr_t ab_bus;
+	unsigned int i;
 
-	for (ab = ctx->current_buffer; ab; ab = ab_next) {
-		ab_next = ab->next;
-		offset = offsetof(struct ar_buffer, data);
-		ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-		dma_free_coherent(ctx->ohci->card.device, PAGE_SIZE,
-				  ab, ab_bus);
+	if (ctx->buffer)
+		vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
+
+	for (i = 0; i < AR_BUFFERS; i++)
+		if (ctx->pages[i]) {
+			dma_unmap_page(ctx->ohci->card.device,
+				       ar_buffer_bus(ctx, i),
+				       PAGE_SIZE, DMA_FROM_DEVICE);
+			__free_page(ctx->pages[i]);
+		}
+}
+
+static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
+{
+	if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
+		reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+		flush_writes(ctx->ohci);
+
+		fw_error("AR error: %s; DMA stopped\n", error_msg);
 	}
+	/* FIXME: restart? */
+}
+
+static inline unsigned int ar_next_buffer_index(unsigned int index)
+{
+	return (index + 1) % AR_BUFFERS;
+}
+
+static inline unsigned int ar_prev_buffer_index(unsigned int index)
+{
+	return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
+}
+
+static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
+{
+	return ar_next_buffer_index(ctx->last_buffer_index);
+}
+
+/*
+ * We search for the buffer that contains the last AR packet DMA data written
+ * by the controller.
+ */
+static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
+						 unsigned int *buffer_offset)
+{
+	unsigned int i, next_i, last = ctx->last_buffer_index;
+	__le16 res_count, next_res_count;
+
+	i = ar_first_buffer_index(ctx);
+	res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
+
+	/* A buffer that is not yet completely filled must be the last one. */
+	while (i != last && res_count == 0) {
+
+		/* Peek at the next descriptor. */
+		next_i = ar_next_buffer_index(i);
+		rmb(); /* read descriptors in order */
+		next_res_count = ACCESS_ONCE(
+				ctx->descriptors[next_i].res_count);
+		/*
+		 * If the next descriptor is still empty, we must stop at this
+		 * descriptor.
+		 */
+		if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
+			/*
+			 * The exception is when the DMA data for one packet is
+			 * split over three buffers; in this case, the middle
+			 * buffer's descriptor might be never updated by the
+			 * controller and look still empty, and we have to peek
+			 * at the third one.
+			 */
+			if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
+				next_i = ar_next_buffer_index(next_i);
+				rmb();
+				next_res_count = ACCESS_ONCE(
+					ctx->descriptors[next_i].res_count);
+				if (next_res_count != cpu_to_le16(PAGE_SIZE))
+					goto next_buffer_is_active;
+			}
+
+			break;
+		}
+
+next_buffer_is_active:
+		i = next_i;
+		res_count = next_res_count;
+	}
+
+	rmb(); /* read res_count before the DMA data */
+
+	*buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
+	if (*buffer_offset > PAGE_SIZE) {
+		*buffer_offset = 0;
+		ar_context_abort(ctx, "corrupted descriptor");
+	}
+
+	return i;
+}
+
+static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
+				    unsigned int end_buffer_index,
+				    unsigned int end_buffer_offset)
+{
+	unsigned int i;
+
+	i = ar_first_buffer_index(ctx);
+	while (i != end_buffer_index) {
+		dma_sync_single_for_cpu(ctx->ohci->card.device,
+					ar_buffer_bus(ctx, i),
+					PAGE_SIZE, DMA_FROM_DEVICE);
+		i = ar_next_buffer_index(i);
+	}
+	if (end_buffer_offset > 0)
+		dma_sync_single_for_cpu(ctx->ohci->card.device,
+					ar_buffer_bus(ctx, i),
+					end_buffer_offset, DMA_FROM_DEVICE);
 }
 
 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
@@ -689,6 +791,10 @@
 		p.header[3] = cond_le32_to_cpu(buffer[3]);
 		p.header_length = 16;
 		p.payload_length = p.header[3] >> 16;
+		if (p.payload_length > MAX_ASYNC_PAYLOAD) {
+			ar_context_abort(ctx, "invalid packet length");
+			return NULL;
+		}
 		break;
 
 	case TCODE_WRITE_RESPONSE:
@@ -699,9 +805,8 @@
 		break;
 
 	default:
-		/* FIXME: Stop context, discard everything, and restart? */
-		p.header_length = 0;
-		p.payload_length = 0;
+		ar_context_abort(ctx, "invalid tcode");
+		return NULL;
 	}
 
 	p.payload = (void *) buffer + p.header_length;
@@ -751,121 +856,147 @@
 	return buffer + length + 1;
 }
 
-static void ar_context_tasklet(unsigned long data)
+static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
 {
-	struct ar_context *ctx = (struct ar_context *)data;
-	struct ar_buffer *ab;
-	struct descriptor *d;
-	void *buffer, *end;
-	__le16 res_count;
+	void *next;
 
-	ab = ctx->current_buffer;
-	d = &ab->descriptor;
+	while (p < end) {
+		next = handle_ar_packet(ctx, p);
+		if (!next)
+			return p;
+		p = next;
+	}
 
-	res_count = ACCESS_ONCE(d->res_count);
-	if (res_count == 0) {
-		size_t size, size2, rest, pktsize, size3, offset;
-		dma_addr_t start_bus;
-		void *start;
+	return p;
+}
 
-		/*
-		 * This descriptor is finished and we may have a
-		 * packet split across this and the next buffer. We
-		 * reuse the page for reassembling the split packet.
-		 */
+static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
+{
+	unsigned int i;
 
-		offset = offsetof(struct ar_buffer, data);
-		start = ab;
-		start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
-		buffer = ab->data;
-
-		ab = ab->next;
-		d = &ab->descriptor;
-		size = start + PAGE_SIZE - ctx->pointer;
-		/* valid buffer data in the next page */
-		rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
-		/* what actually fits in this page */
-		size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
-		memmove(buffer, ctx->pointer, size);
-		memcpy(buffer + size, ab->data, size2);
-
-		while (size > 0) {
-			void *next = handle_ar_packet(ctx, buffer);
-			pktsize = next - buffer;
-			if (pktsize >= size) {
-				/*
-				 * We have handled all the data that was
-				 * originally in this page, so we can now
-				 * continue in the next page.
-				 */
-				buffer = next;
-				break;
-			}
-			/* move the next packet to the start of the buffer */
-			memmove(buffer, next, size + size2 - pktsize);
-			size -= pktsize;
-			/* fill up this page again */
-			size3 = min(rest - size2,
-				    (size_t)PAGE_SIZE - offset - size - size2);
-			memcpy(buffer + size + size2,
-			       (void *) ab->data + size2, size3);
-			size2 += size3;
-		}
-
-		if (rest > 0) {
-			/* handle the packets that are fully in the next page */
-			buffer = (void *) ab->data +
-					(buffer - (start + offset + size));
-			end = (void *) ab->data + rest;
-
-			while (buffer < end)
-				buffer = handle_ar_packet(ctx, buffer);
-
-			ctx->current_buffer = ab;
-			ctx->pointer = end;
-
-			ar_context_link_page(ctx, start, start_bus);
-		} else {
-			ctx->pointer = start + PAGE_SIZE;
-		}
-	} else {
-		buffer = ctx->pointer;
-		ctx->pointer = end =
-			(void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
-
-		while (buffer < end)
-			buffer = handle_ar_packet(ctx, buffer);
+	i = ar_first_buffer_index(ctx);
+	while (i != end_buffer) {
+		dma_sync_single_for_device(ctx->ohci->card.device,
+					   ar_buffer_bus(ctx, i),
+					   PAGE_SIZE, DMA_FROM_DEVICE);
+		ar_context_link_page(ctx, i);
+		i = ar_next_buffer_index(i);
 	}
 }
 
-static int ar_context_init(struct ar_context *ctx,
-			   struct fw_ohci *ohci, u32 regs)
+static void ar_context_tasklet(unsigned long data)
 {
-	struct ar_buffer ab;
+	struct ar_context *ctx = (struct ar_context *)data;
+	unsigned int end_buffer_index, end_buffer_offset;
+	void *p, *end;
+
+	p = ctx->pointer;
+	if (!p)
+		return;
+
+	end_buffer_index = ar_search_last_active_buffer(ctx,
+							&end_buffer_offset);
+	ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
+	end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
+
+	if (end_buffer_index < ar_first_buffer_index(ctx)) {
+		/*
+		 * The filled part of the overall buffer wraps around; handle
+		 * all packets up to the buffer end here.  If the last packet
+		 * wraps around, its tail will be visible after the buffer end
+		 * because the buffer start pages are mapped there again.
+		 */
+		void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
+		p = handle_ar_packets(ctx, p, buffer_end);
+		if (p < buffer_end)
+			goto error;
+		/* adjust p to point back into the actual buffer */
+		p -= AR_BUFFERS * PAGE_SIZE;
+	}
+
+	p = handle_ar_packets(ctx, p, end);
+	if (p != end) {
+		if (p > end)
+			ar_context_abort(ctx, "inconsistent descriptor");
+		goto error;
+	}
+
+	ctx->pointer = p;
+	ar_recycle_buffers(ctx, end_buffer_index);
+
+	return;
+
+error:
+	ctx->pointer = NULL;
+}
+
+static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
+			   unsigned int descriptors_offset, u32 regs)
+{
+	unsigned int i;
+	dma_addr_t dma_addr;
+	struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
+	struct descriptor *d;
 
 	ctx->regs        = regs;
 	ctx->ohci        = ohci;
-	ctx->last_buffer = &ab;
 	tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
 
-	ar_context_add_page(ctx);
-	ar_context_add_page(ctx);
-	ctx->current_buffer = ab.next;
-	ctx->pointer = ctx->current_buffer->data;
+	for (i = 0; i < AR_BUFFERS; i++) {
+		ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
+		if (!ctx->pages[i])
+			goto out_of_memory;
+		dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
+					0, PAGE_SIZE, DMA_FROM_DEVICE);
+		if (dma_mapping_error(ohci->card.device, dma_addr)) {
+			__free_page(ctx->pages[i]);
+			ctx->pages[i] = NULL;
+			goto out_of_memory;
+		}
+		set_page_private(ctx->pages[i], dma_addr);
+	}
+
+	for (i = 0; i < AR_BUFFERS; i++)
+		pages[i]              = ctx->pages[i];
+	for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
+		pages[AR_BUFFERS + i] = ctx->pages[i];
+	ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
+				 -1, PAGE_KERNEL);
+	if (!ctx->buffer)
+		goto out_of_memory;
+
+	ctx->descriptors     = ohci->misc_buffer     + descriptors_offset;
+	ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
+
+	for (i = 0; i < AR_BUFFERS; i++) {
+		d = &ctx->descriptors[i];
+		d->req_count      = cpu_to_le16(PAGE_SIZE);
+		d->control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
+						DESCRIPTOR_STATUS |
+						DESCRIPTOR_BRANCH_ALWAYS);
+		d->data_address   = cpu_to_le32(ar_buffer_bus(ctx, i));
+		d->branch_address = cpu_to_le32(ctx->descriptors_bus +
+			ar_next_buffer_index(i) * sizeof(struct descriptor));
+	}
 
 	return 0;
+
+out_of_memory:
+	ar_context_release(ctx);
+
+	return -ENOMEM;
 }
 
 static void ar_context_run(struct ar_context *ctx)
 {
-	struct ar_buffer *ab = ctx->current_buffer;
-	dma_addr_t ab_bus;
-	size_t offset;
+	unsigned int i;
 
-	offset = offsetof(struct ar_buffer, data);
-	ab_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+	for (i = 0; i < AR_BUFFERS; i++)
+		ar_context_link_page(ctx, i);
 
-	reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab_bus | 1);
+	ctx->pointer = ctx->buffer;
+
+	reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
 	flush_writes(ctx->ohci);
 }
@@ -1042,6 +1173,7 @@
 		  le32_to_cpu(ctx->last->branch_address));
 	reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
 	reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
+	ctx->running = true;
 	flush_writes(ohci);
 }
 
@@ -1069,6 +1201,7 @@
 	int i;
 
 	reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
+	ctx->running = false;
 	flush_writes(ctx->ohci);
 
 	for (i = 0; i < 10; i++) {
@@ -1099,7 +1232,6 @@
 	struct descriptor *d, *last;
 	__le32 *header;
 	int z, tcode;
-	u32 reg;
 
 	d = context_get_descriptors(ctx, 4, &d_bus);
 	if (d == NULL) {
@@ -1113,21 +1245,27 @@
 	/*
 	 * The DMA format for asyncronous link packets is different
 	 * from the IEEE1394 layout, so shift the fields around
-	 * accordingly.  If header_length is 8, it's a PHY packet, to
-	 * which we need to prepend an extra quadlet.
+	 * accordingly.
 	 */
 
+	tcode = (packet->header[0] >> 4) & 0x0f;
 	header = (__le32 *) &d[1];
-	switch (packet->header_length) {
-	case 16:
-	case 12:
+	switch (tcode) {
+	case TCODE_WRITE_QUADLET_REQUEST:
+	case TCODE_WRITE_BLOCK_REQUEST:
+	case TCODE_WRITE_RESPONSE:
+	case TCODE_READ_QUADLET_REQUEST:
+	case TCODE_READ_BLOCK_REQUEST:
+	case TCODE_READ_QUADLET_RESPONSE:
+	case TCODE_READ_BLOCK_RESPONSE:
+	case TCODE_LOCK_REQUEST:
+	case TCODE_LOCK_RESPONSE:
 		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
 					(packet->speed << 16));
 		header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
 					(packet->header[0] & 0xffff0000));
 		header[2] = cpu_to_le32(packet->header[2]);
 
-		tcode = (packet->header[0] >> 4) & 0x0f;
 		if (TCODE_IS_BLOCK_PACKET(tcode))
 			header[3] = cpu_to_le32(packet->header[3]);
 		else
@@ -1136,18 +1274,18 @@
 		d[0].req_count = cpu_to_le16(packet->header_length);
 		break;
 
-	case 8:
+	case TCODE_LINK_INTERNAL:
 		header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
 					(packet->speed << 16));
-		header[1] = cpu_to_le32(packet->header[0]);
-		header[2] = cpu_to_le32(packet->header[1]);
+		header[1] = cpu_to_le32(packet->header[1]);
+		header[2] = cpu_to_le32(packet->header[2]);
 		d[0].req_count = cpu_to_le16(12);
 
-		if (is_ping_packet(packet->header))
+		if (is_ping_packet(&packet->header[1]))
 			d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
 		break;
 
-	case 4:
+	case TCODE_STREAM_DATA:
 		header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
 					(packet->speed << 16));
 		header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
@@ -1197,6 +1335,8 @@
 	 * some controllers (like a JMicron JMB381 PCI-e) misbehave and wind
 	 * up stalling out.  So we just bail out in software and try again
 	 * later, and everyone is happy.
+	 * FIXME: Test of IntEvent.busReset may no longer be necessary since we
+	 *        flush AT queues in bus_reset_tasklet.
 	 * FIXME: Document how the locking works.
 	 */
 	if (ohci->generation != packet->generation ||
@@ -1210,14 +1350,23 @@
 
 	context_append(ctx, d, z, 4 - z);
 
-	/* If the context isn't already running, start it up. */
-	reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
-	if ((reg & CONTEXT_RUN) == 0)
+	if (!ctx->running)
 		context_run(ctx, 0);
 
 	return 0;
 }
 
+static void at_context_flush(struct context *ctx)
+{
+	tasklet_disable(&ctx->tasklet);
+
+	ctx->flushing = true;
+	context_tasklet((unsigned long)ctx);
+	ctx->flushing = false;
+
+	tasklet_enable(&ctx->tasklet);
+}
+
 static int handle_at_packet(struct context *context,
 			    struct descriptor *d,
 			    struct descriptor *last)
@@ -1227,7 +1376,7 @@
 	struct fw_ohci *ohci = context->ohci;
 	int evt;
 
-	if (last->transfer_status == 0)
+	if (last->transfer_status == 0 && !context->flushing)
 		/* This descriptor isn't done yet, stop iteration. */
 		return 0;
 
@@ -1261,11 +1410,15 @@
 		break;
 
 	case OHCI1394_evt_missing_ack:
-		/*
-		 * Using a valid (current) generation count, but the
-		 * node is not on the bus or not sending acks.
-		 */
-		packet->ack = RCODE_NO_ACK;
+		if (context->flushing)
+			packet->ack = RCODE_GENERATION;
+		else {
+			/*
+			 * Using a valid (current) generation count, but the
+			 * node is not on the bus or not sending acks.
+			 */
+			packet->ack = RCODE_NO_ACK;
+		}
 		break;
 
 	case ACK_COMPLETE + 0x10:
@@ -1278,6 +1431,13 @@
 		packet->ack = evt - 0x10;
 		break;
 
+	case OHCI1394_evt_no_status:
+		if (context->flushing) {
+			packet->ack = RCODE_GENERATION;
+			break;
+		}
+		/* fall through */
+
 	default:
 		packet->ack = RCODE_SEND_ERROR;
 		break;
@@ -1583,9 +1743,23 @@
 	/* FIXME: Document how the locking works. */
 	spin_lock_irqsave(&ohci->lock, flags);
 
-	ohci->generation = generation;
+	ohci->generation = -1; /* prevent AT packet queueing */
 	context_stop(&ohci->at_request_ctx);
 	context_stop(&ohci->at_response_ctx);
+
+	spin_unlock_irqrestore(&ohci->lock, flags);
+
+	/*
+	 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
+	 * packets in the AT queues and software needs to drain them.
+	 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
+	 */
+	at_context_flush(&ohci->at_request_ctx);
+	at_context_flush(&ohci->at_response_ctx);
+
+	spin_lock_irqsave(&ohci->lock, flags);
+
+	ohci->generation = generation;
 	reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
 
 	if (ohci->quirks & QUIRK_RESET_PACKET)
@@ -1653,8 +1827,12 @@
 	if (!event || !~event)
 		return IRQ_NONE;
 
-	/* busReset must not be cleared yet, see OHCI 1.1 clause 7.2.3.2 */
-	reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
+	/*
+	 * busReset and postedWriteErr must not be cleared yet
+	 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
+	 */
+	reg_write(ohci, OHCI1394_IntEventClear,
+		  event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
 	log_irqs(event);
 
 	if (event & OHCI1394_selfIDComplete)
@@ -1672,30 +1850,41 @@
 	if (event & OHCI1394_respTxComplete)
 		tasklet_schedule(&ohci->at_response_ctx.tasklet);
 
-	iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
-	reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
+	if (event & OHCI1394_isochRx) {
+		iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
+		reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
 
-	while (iso_event) {
-		i = ffs(iso_event) - 1;
-		tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
-		iso_event &= ~(1 << i);
+		while (iso_event) {
+			i = ffs(iso_event) - 1;
+			tasklet_schedule(
+				&ohci->ir_context_list[i].context.tasklet);
+			iso_event &= ~(1 << i);
+		}
 	}
 
-	iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
-	reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
+	if (event & OHCI1394_isochTx) {
+		iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
+		reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
 
-	while (iso_event) {
-		i = ffs(iso_event) - 1;
-		tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
-		iso_event &= ~(1 << i);
+		while (iso_event) {
+			i = ffs(iso_event) - 1;
+			tasklet_schedule(
+				&ohci->it_context_list[i].context.tasklet);
+			iso_event &= ~(1 << i);
+		}
 	}
 
 	if (unlikely(event & OHCI1394_regAccessFail))
 		fw_error("Register access failure - "
 			 "please notify linux1394-devel@lists.sf.net\n");
 
-	if (unlikely(event & OHCI1394_postedWriteErr))
+	if (unlikely(event & OHCI1394_postedWriteErr)) {
+		reg_read(ohci, OHCI1394_PostedWriteAddressHi);
+		reg_read(ohci, OHCI1394_PostedWriteAddressLo);
+		reg_write(ohci, OHCI1394_IntEventClear,
+			  OHCI1394_postedWriteErr);
 		fw_error("PCI posted write error\n");
+	}
 
 	if (unlikely(event & OHCI1394_cycleTooLong)) {
 		if (printk_ratelimit())
@@ -1719,7 +1908,8 @@
 		spin_lock(&ohci->lock);
 		update_bus_time(ohci);
 		spin_unlock(&ohci->lock);
-	}
+	} else
+		flush_writes(ohci);
 
 	return IRQ_HANDLED;
 }
@@ -2495,6 +2685,10 @@
 		reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
 		reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
 		context_run(&ctx->context, control);
+
+		ctx->sync = sync;
+		ctx->tags = tags;
+
 		break;
 	}
 
@@ -2592,6 +2786,26 @@
 	return ret;
 }
 
+#ifdef CONFIG_PM
+static void ohci_resume_iso_dma(struct fw_ohci *ohci)
+{
+	int i;
+	struct iso_context *ctx;
+
+	for (i = 0 ; i < ohci->n_ir ; i++) {
+		ctx = &ohci->ir_context_list[i];
+		if (ctx->context.running)
+			ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
+	}
+
+	for (i = 0 ; i < ohci->n_it ; i++) {
+		ctx = &ohci->it_context_list[i];
+		if (ctx->context.running)
+			ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
+	}
+}
+#endif
+
 static int queue_iso_transmit(struct iso_context *ctx,
 			      struct fw_iso_packet *packet,
 			      struct fw_iso_buffer *buffer,
@@ -2901,7 +3115,7 @@
 	struct fw_ohci *ohci;
 	u32 bus_options, max_receive, link_speed, version;
 	u64 guid;
-	int i, err, n_ir, n_it;
+	int i, err;
 	size_t size;
 
 	ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2955,31 +3169,55 @@
 	if (param_quirks)
 		ohci->quirks = param_quirks;
 
-	ar_context_init(&ohci->ar_request_ctx, ohci,
-			OHCI1394_AsReqRcvContextControlSet);
+	/*
+	 * Because dma_alloc_coherent() allocates at least one page,
+	 * we save space by using a common buffer for the AR request/
+	 * response descriptors and the self IDs buffer.
+	 */
+	BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
+	BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
+	ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
+					       PAGE_SIZE,
+					       &ohci->misc_buffer_bus,
+					       GFP_KERNEL);
+	if (!ohci->misc_buffer) {
+		err = -ENOMEM;
+		goto fail_iounmap;
+	}
 
-	ar_context_init(&ohci->ar_response_ctx, ohci,
-			OHCI1394_AsRspRcvContextControlSet);
+	err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
+			      OHCI1394_AsReqRcvContextControlSet);
+	if (err < 0)
+		goto fail_misc_buf;
 
-	context_init(&ohci->at_request_ctx, ohci,
-		     OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+	err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
+			      OHCI1394_AsRspRcvContextControlSet);
+	if (err < 0)
+		goto fail_arreq_ctx;
 
-	context_init(&ohci->at_response_ctx, ohci,
-		     OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+	err = context_init(&ohci->at_request_ctx, ohci,
+			   OHCI1394_AsReqTrContextControlSet, handle_at_packet);
+	if (err < 0)
+		goto fail_arrsp_ctx;
+
+	err = context_init(&ohci->at_response_ctx, ohci,
+			   OHCI1394_AsRspTrContextControlSet, handle_at_packet);
+	if (err < 0)
+		goto fail_atreq_ctx;
 
 	reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
 	ohci->ir_context_channels = ~0ULL;
 	ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
 	reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
-	n_ir = hweight32(ohci->ir_context_mask);
-	size = sizeof(struct iso_context) * n_ir;
+	ohci->n_ir = hweight32(ohci->ir_context_mask);
+	size = sizeof(struct iso_context) * ohci->n_ir;
 	ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
 
 	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
 	ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
 	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
-	n_it = hweight32(ohci->it_context_mask);
-	size = sizeof(struct iso_context) * n_it;
+	ohci->n_it = hweight32(ohci->it_context_mask);
+	size = sizeof(struct iso_context) * ohci->n_it;
 	ohci->it_context_list = kzalloc(size, GFP_KERNEL);
 
 	if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
@@ -2987,15 +3225,8 @@
 		goto fail_contexts;
 	}
 
-	/* self-id dma buffer allocation */
-	ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
-					       SELF_ID_BUF_SIZE,
-					       &ohci->self_id_bus,
-					       GFP_KERNEL);
-	if (ohci->self_id_cpu == NULL) {
-		err = -ENOMEM;
-		goto fail_contexts;
-	}
+	ohci->self_id_cpu = ohci->misc_buffer     + PAGE_SIZE/2;
+	ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
 
 	bus_options = reg_read(ohci, OHCI1394_BusOptions);
 	max_receive = (bus_options >> 12) & 0xf;
@@ -3005,26 +3236,30 @@
 
 	err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
 	if (err)
-		goto fail_self_id;
+		goto fail_contexts;
 
 	version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
 	fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
 		  "%d IR + %d IT contexts, quirks 0x%x\n",
 		  dev_name(&dev->dev), version >> 16, version & 0xff,
-		  n_ir, n_it, ohci->quirks);
+		  ohci->n_ir, ohci->n_it, ohci->quirks);
 
 	return 0;
 
- fail_self_id:
-	dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
-			  ohci->self_id_cpu, ohci->self_id_bus);
  fail_contexts:
 	kfree(ohci->ir_context_list);
 	kfree(ohci->it_context_list);
 	context_release(&ohci->at_response_ctx);
+ fail_atreq_ctx:
 	context_release(&ohci->at_request_ctx);
+ fail_arrsp_ctx:
 	ar_context_release(&ohci->ar_response_ctx);
+ fail_arreq_ctx:
 	ar_context_release(&ohci->ar_request_ctx);
+ fail_misc_buf:
+	dma_free_coherent(ohci->card.device, PAGE_SIZE,
+			  ohci->misc_buffer, ohci->misc_buffer_bus);
+ fail_iounmap:
 	pci_iounmap(dev, ohci->registers);
  fail_iomem:
 	pci_release_region(dev, 0);
@@ -3063,10 +3298,10 @@
 	if (ohci->config_rom)
 		dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
 				  ohci->config_rom, ohci->config_rom_bus);
-	dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
-			  ohci->self_id_cpu, ohci->self_id_bus);
 	ar_context_release(&ohci->ar_request_ctx);
 	ar_context_release(&ohci->ar_response_ctx);
+	dma_free_coherent(ohci->card.device, PAGE_SIZE,
+			  ohci->misc_buffer, ohci->misc_buffer_bus);
 	context_release(&ohci->at_request_ctx);
 	context_release(&ohci->at_response_ctx);
 	kfree(ohci->it_context_list);
@@ -3117,7 +3352,20 @@
 		return err;
 	}
 
-	return ohci_enable(&ohci->card, NULL, 0);
+	/* Some systems don't setup GUID register on resume from ram  */
+	if (!reg_read(ohci, OHCI1394_GUIDLo) &&
+					!reg_read(ohci, OHCI1394_GUIDHi)) {
+		reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
+		reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
+	}
+
+	err = ohci_enable(&ohci->card, NULL, 0);
+	if (err)
+		return err;
+
+	ohci_resume_iso_dma(ohci);
+
+	return 0;
 }
 #endif
 
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 082495b..664660e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -118,7 +118,7 @@
 
 config GPIO_VX855
 	tristate "VIA VX855/VX875 GPIO"
-	depends on GPIOLIB
+	depends on GPIOLIB && MFD_SUPPORT && PCI
 	select MFD_CORE
 	select MFD_VX855
 	help
@@ -295,7 +295,7 @@
 
 config GPIO_CS5535
 	tristate "AMD CS5535/CS5536 GPIO support"
-	depends on PCI && !CS5535_GPIO
+	depends on PCI && X86 && !CS5535_GPIO
 	help
 	  The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
 	  can be used for quite a number of things.  The CS5535/6 is found on
@@ -333,6 +333,15 @@
 	  which is an IOH(Input/Output Hub) for x86 embedded processor.
 	  This driver can access PCH GPIO device.
 
+config GPIO_ML_IOH
+	tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+	depends on PCI
+	help
+	  ML7213 is companion chip for Intel Atom E6xx series.
+	  This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output
+	  Hub) which is for IVI(In-Vehicle Infotainment) use.
+	  This driver can access the IOH's GPIO device.
+
 config GPIO_TIMBERDALE
 	bool "Support for timberdale GPIO IP"
 	depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -342,6 +351,7 @@
 config GPIO_RDC321X
 	tristate "RDC R-321x GPIO support"
 	depends on PCI && GPIOLIB
+	select MFD_SUPPORT
 	select MFD_CORE
 	select MFD_RDC321X
 	help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 39bfd7a..3351cf8 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -41,3 +41,4 @@
 obj-$(CONFIG_GPIO_JANZ_TTL)	+= janz-ttl.o
 obj-$(CONFIG_GPIO_SX150X)	+= sx150x.o
 obj-$(CONFIG_GPIO_VX855)	+= vx855_gpio.o
+obj-$(CONFIG_GPIO_ML_IOH)	+= ml_ioh_gpio.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
index 0871f78..33fc685 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/adp5588-gpio.c
@@ -146,9 +146,10 @@
 	return dev->irq_base + off;
 }
 
-static void adp5588_irq_bus_lock(unsigned int irq)
+static void adp5588_irq_bus_lock(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+
 	mutex_lock(&dev->irq_lock);
 }
 
@@ -160,9 +161,9 @@
   * and unlocks the bus.
   */
 
-static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
 	int i;
 
 	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
@@ -175,31 +176,31 @@
 	mutex_unlock(&dev->irq_lock);
 }
 
-static void adp5588_irq_mask(unsigned int irq)
+static void adp5588_irq_mask(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	unsigned gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	unsigned gpio = d->irq - dev->irq_base;
 
 	dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
 }
 
-static void adp5588_irq_unmask(unsigned int irq)
+static void adp5588_irq_unmask(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	unsigned gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	unsigned gpio = d->irq - dev->irq_base;
 
 	dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
 }
 
-static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	uint16_t gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	uint16_t gpio = d->irq - dev->irq_base;
 	unsigned bank, bit;
 
 	if ((type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -222,11 +223,11 @@
 
 static struct irq_chip adp5588_irq_chip = {
 	.name			= "adp5588",
-	.mask			= adp5588_irq_mask,
-	.unmask			= adp5588_irq_unmask,
-	.bus_lock		= adp5588_irq_bus_lock,
-	.bus_sync_unlock	= adp5588_irq_bus_sync_unlock,
-	.set_type		= adp5588_irq_set_type,
+	.irq_mask		= adp5588_irq_mask,
+	.irq_unmask		= adp5588_irq_unmask,
+	.irq_bus_lock		= adp5588_irq_bus_lock,
+	.irq_bus_sync_unlock	= adp5588_irq_bus_sync_unlock,
+	.irq_set_type		= adp5588_irq_set_type,
 };
 
 static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index d3e55a0..0d05ea7 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -11,13 +11,13 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
 #include <linux/cs5535.h>
+#include <asm/msr.h>
 
 #define DRV_NAME "cs5535-gpio"
-#define GPIO_BAR 1
 
 /*
  * Some GPIO pins
@@ -46,7 +46,7 @@
 	struct gpio_chip chip;
 	resource_size_t base;
 
-	struct pci_dev *pdev;
+	struct platform_device *pdev;
 	spinlock_t lock;
 } cs5535_gpio_chip;
 
@@ -144,6 +144,57 @@
 }
 EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
 
+int cs5535_gpio_set_irq(unsigned group, unsigned irq)
+{
+	uint32_t lo, hi;
+
+	if (group > 7 || irq > 15)
+		return -EINVAL;
+
+	rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+
+	lo &= ~(0xF << (group * 4));
+	lo |= (irq & 0xF) << (group * 4);
+
+	wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_set_irq);
+
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme)
+{
+	struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+	uint32_t shift = (offset % 8) * 4;
+	unsigned long flags;
+	uint32_t val;
+
+	if (offset >= 24)
+		offset = GPIO_MAP_W;
+	else if (offset >= 16)
+		offset = GPIO_MAP_Z;
+	else if (offset >= 8)
+		offset = GPIO_MAP_Y;
+	else
+		offset = GPIO_MAP_X;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	val = inl(chip->base + offset);
+
+	/* Clear whatever was there before */
+	val &= ~(0xF << shift);
+
+	/* Set the new value */
+	val |= ((pair & 7) << shift);
+
+	/* Set the PME bit if this is a PME event */
+	if (pme)
+		val |= (1 << (shift + 3));
+
+	outl(val, chip->base + offset);
+	spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_setup_event);
+
 /*
  * Generic gpio_chip API support.
  */
@@ -249,10 +300,10 @@
 	},
 };
 
-static int __init cs5535_gpio_probe(struct pci_dev *pdev,
-		const struct pci_device_id *pci_id)
+static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
 {
-	int err;
+	struct resource *res;
+	int err = -EIO;
 	ulong mask_orig = mask;
 
 	/* There are two ways to get the GPIO base address; one is by
@@ -262,25 +313,23 @@
 	 * it turns out to be unreliable in the face of crappy BIOSes, we
 	 * can always go back to using MSRs.. */
 
-	err = pci_enable_device_io(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "can't enable device IO\n");
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
 		goto done;
 	}
 
-	err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
-	if (err) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
+	if (!request_region(res->start, resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "can't request region\n");
 		goto done;
 	}
 
 	/* set up the driver-specific struct */
-	cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
+	cs5535_gpio_chip.base = res->start;
 	cs5535_gpio_chip.pdev = pdev;
 	spin_lock_init(&cs5535_gpio_chip.lock);
 
-	dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
-			(unsigned long long) cs5535_gpio_chip.base);
+	dev_info(&pdev->dev, "reserved resource region %pR\n", res);
 
 	/* mask out reserved pins */
 	mask &= 0x1F7FFFFF;
@@ -298,78 +347,49 @@
 	if (err)
 		goto release_region;
 
-	dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
+	dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
 	return 0;
 
 release_region:
-	pci_release_region(pdev, GPIO_BAR);
+	release_region(res->start, resource_size(res));
 done:
 	return err;
 }
 
-static void __exit cs5535_gpio_remove(struct pci_dev *pdev)
+static int __devexit cs5535_gpio_remove(struct platform_device *pdev)
 {
+	struct resource *r;
 	int err;
 
 	err = gpiochip_remove(&cs5535_gpio_chip.chip);
 	if (err) {
 		/* uhh? */
 		dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
+		return err;
 	}
-	pci_release_region(pdev, GPIO_BAR);
+
+	r = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	release_region(r->start, resource_size(r));
+	return 0;
 }
 
-static struct pci_device_id cs5535_gpio_pci_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
-	{ 0, },
+static struct platform_driver cs5535_gpio_drv = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = cs5535_gpio_probe,
+	.remove = __devexit_p(cs5535_gpio_remove),
 };
-MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
-
-/*
- * We can't use the standard PCI driver registration stuff here, since
- * that allows only one driver to bind to each PCI device (and we want
- * multiple drivers to be able to bind to the device).  Instead, manually
- * scan for the PCI device, request a single region, and keep track of the
- * devices that we're using.
- */
-
-static int __init cs5535_gpio_scan_pci(void)
-{
-	struct pci_dev *pdev;
-	int err = -ENODEV;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
-		pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
-				cs5535_gpio_pci_tbl[i].device, NULL);
-		if (pdev) {
-			err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
-			if (err)
-				pci_dev_put(pdev);
-
-			/* we only support a single CS5535/6 southbridge */
-			break;
-		}
-	}
-
-	return err;
-}
-
-static void __exit cs5535_gpio_free_pci(void)
-{
-	cs5535_gpio_remove(cs5535_gpio_chip.pdev);
-	pci_dev_put(cs5535_gpio_chip.pdev);
-}
 
 static int __init cs5535_gpio_init(void)
 {
-	return cs5535_gpio_scan_pci();
+	return platform_driver_register(&cs5535_gpio_drv);
 }
 
 static void __exit cs5535_gpio_exit(void)
 {
-	cs5535_gpio_free_pci();
+	platform_driver_unregister(&cs5535_gpio_drv);
 }
 
 module_init(cs5535_gpio_init);
@@ -378,3 +398,4 @@
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 64db9dc..d81cc74 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -134,10 +134,10 @@
 	return lnw->irq_base + offset;
 }
 
-static int lnw_irq_type(unsigned irq, unsigned type)
+static int lnw_irq_type(struct irq_data *d, unsigned type)
 {
-	struct lnw_gpio *lnw = get_irq_chip_data(irq);
-	u32 gpio = irq - lnw->irq_base;
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = d->irq - lnw->irq_base;
 	unsigned long flags;
 	u32 value;
 	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
@@ -162,19 +162,19 @@
 	return 0;
 }
 
-static void lnw_irq_unmask(unsigned irq)
+static void lnw_irq_unmask(struct irq_data *d)
 {
 }
 
-static void lnw_irq_mask(unsigned irq)
+static void lnw_irq_mask(struct irq_data *d)
 {
 }
 
 static struct irq_chip lnw_irqchip = {
 	.name		= "LNW-GPIO",
-	.mask		= lnw_irq_mask,
-	.unmask		= lnw_irq_unmask,
-	.set_type	= lnw_irq_type,
+	.irq_mask	= lnw_irq_mask,
+	.irq_unmask	= lnw_irq_unmask,
+	.irq_set_type	= lnw_irq_type,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = {   /* pin number */
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index 9cad60f..9e1d01f 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -327,40 +327,40 @@
 	return chip->irq_base + off;
 }
 
-static void max732x_irq_mask(unsigned int irq)
+static void max732x_irq_mask(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+	chip->irq_mask_cur &= ~(1 << (d->irq - chip->irq_base));
 }
 
-static void max732x_irq_unmask(unsigned int irq)
+static void max732x_irq_unmask(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+	chip->irq_mask_cur |= 1 << (d->irq - chip->irq_base);
 }
 
-static void max732x_irq_bus_lock(unsigned int irq)
+static void max732x_irq_bus_lock(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&chip->irq_lock);
 	chip->irq_mask_cur = chip->irq_mask;
 }
 
-static void max732x_irq_bus_sync_unlock(unsigned int irq)
+static void max732x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	max732x_irq_update_mask(chip);
 	mutex_unlock(&chip->irq_lock);
 }
 
-static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+static int max732x_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
-	uint16_t off = irq - chip->irq_base;
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+	uint16_t off = d->irq - chip->irq_base;
 	uint16_t mask = 1 << off;
 
 	if (!(mask & chip->dir_input)) {
@@ -371,7 +371,7 @@
 
 	if (!(type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -390,11 +390,11 @@
 
 static struct irq_chip max732x_irq_chip = {
 	.name			= "max732x",
-	.mask			= max732x_irq_mask,
-	.unmask			= max732x_irq_unmask,
-	.bus_lock		= max732x_irq_bus_lock,
-	.bus_sync_unlock	= max732x_irq_bus_sync_unlock,
-	.set_type		= max732x_irq_set_type,
+	.irq_mask		= max732x_irq_mask,
+	.irq_unmask		= max732x_irq_unmask,
+	.irq_bus_lock		= max732x_irq_bus_lock,
+	.irq_bus_sync_unlock	= max732x_irq_bus_sync_unlock,
+	.irq_set_type		= max732x_irq_set_type,
 };
 
 static uint8_t max732x_irq_pending(struct max732x_chip *chip)
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
new file mode 100644
index 0000000..cead8e6
--- /dev/null
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCI_VENDOR_ID_ROHM             0x10DB
+
+struct ioh_reg_comn {
+	u32	ien;
+	u32	istatus;
+	u32	idisp;
+	u32	iclr;
+	u32	imask;
+	u32	imaskclr;
+	u32	po;
+	u32	pi;
+	u32	pm;
+	u32	im_0;
+	u32	im_1;
+	u32	reserved;
+};
+
+struct ioh_regs {
+	struct ioh_reg_comn regs[8];
+	u32 reserve1[16];
+	u32 ioh_sel_reg[4];
+	u32 reserve2[11];
+	u32 srst;
+};
+
+/**
+ * struct ioh_gpio_reg_data - The register store data.
+ * @po_reg:	To store contents of PO register.
+ * @pm_reg:	To store contents of PM register.
+ */
+struct ioh_gpio_reg_data {
+	u32 po_reg;
+	u32 pm_reg;
+};
+
+/**
+ * struct ioh_gpio - GPIO private data structure.
+ * @base:			PCI base address of Memory mapped I/O register.
+ * @reg:			Memory mapped IOH GPIO register list.
+ * @dev:			Pointer to device structure.
+ * @gpio:			Data for GPIO infrastructure.
+ * @ioh_gpio_reg:		Memory mapped Register data is saved here
+ *				when suspend.
+ * @ch:				Indicate GPIO channel
+ */
+struct ioh_gpio {
+	void __iomem *base;
+	struct ioh_regs __iomem *reg;
+	struct device *dev;
+	struct gpio_chip gpio;
+	struct ioh_gpio_reg_data ioh_gpio_reg;
+	struct mutex lock;
+	int ch;
+};
+
+static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
+
+static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+	u32 reg_val;
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+
+	mutex_lock(&chip->lock);
+	reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+	if (val)
+		reg_val |= (1 << nr);
+	else
+		reg_val &= ~(1 << nr);
+
+	iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
+	mutex_unlock(&chip->lock);
+}
+
+static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+
+	return ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr);
+}
+
+static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+				     int val)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+	u32 pm;
+	u32 reg_val;
+
+	mutex_lock(&chip->lock);
+	pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+					((1 << num_ports[chip->ch]) - 1);
+	pm |= (1 << nr);
+	iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+
+	reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+	if (val)
+		reg_val |= (1 << nr);
+	else
+		reg_val &= ~(1 << nr);
+
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+
+static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+	u32 pm;
+
+	mutex_lock(&chip->lock);
+	pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+				((1 << num_ports[chip->ch]) - 1);
+	pm &= ~(1 << nr);
+	iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
+{
+	chip->ioh_gpio_reg.po_reg = ioread32(&chip->reg->regs[chip->ch].po);
+	chip->ioh_gpio_reg.pm_reg = ioread32(&chip->reg->regs[chip->ch].pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
+{
+	/* to store contents of PO register */
+	iowrite32(chip->ioh_gpio_reg.po_reg, &chip->reg->regs[chip->ch].po);
+	/* to store contents of PM register */
+	iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm);
+}
+
+static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
+{
+	struct gpio_chip *gpio = &chip->gpio;
+
+	gpio->label = dev_name(chip->dev);
+	gpio->owner = THIS_MODULE;
+	gpio->direction_input = ioh_gpio_direction_input;
+	gpio->get = ioh_gpio_get;
+	gpio->direction_output = ioh_gpio_direction_output;
+	gpio->set = ioh_gpio_set;
+	gpio->dbg_show = NULL;
+	gpio->base = -1;
+	gpio->ngpio = num_port;
+	gpio->can_sleep = 0;
+}
+
+static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *id)
+{
+	int ret;
+	int i;
+	struct ioh_gpio *chip;
+	void __iomem *base;
+	void __iomem *chip_save;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s : pci_enable_device failed", __func__);
+		goto err_pci_enable;
+	}
+
+	ret = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_request_regions failed-%d", ret);
+		goto err_request_regions;
+	}
+
+	base = pci_iomap(pdev, 1, 0);
+	if (base == 0) {
+		dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
+		ret = -ENOMEM;
+		goto err_iomap;
+	}
+
+	chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL);
+	if (chip_save == NULL) {
+		dev_err(&pdev->dev, "%s : kzalloc failed", __func__);
+		ret = -ENOMEM;
+		goto err_kzalloc;
+	}
+
+	chip = chip_save;
+	for (i = 0; i < 8; i++, chip++) {
+		chip->dev = &pdev->dev;
+		chip->base = base;
+		chip->reg = chip->base;
+		chip->ch = i;
+		mutex_init(&chip->lock);
+		ioh_gpio_setup(chip, num_ports[i]);
+		ret = gpiochip_add(&chip->gpio);
+		if (ret) {
+			dev_err(&pdev->dev, "IOH gpio: Failed to register GPIO\n");
+			goto err_gpiochip_add;
+		}
+	}
+
+	chip = chip_save;
+	pci_set_drvdata(pdev, chip);
+
+	return 0;
+
+err_gpiochip_add:
+	for (; i != 0; i--) {
+		chip--;
+		ret = gpiochip_remove(&chip->gpio);
+		if (ret)
+			dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
+	}
+	kfree(chip_save);
+
+err_kzalloc:
+	pci_iounmap(pdev, base);
+
+err_iomap:
+	pci_release_regions(pdev);
+
+err_request_regions:
+	pci_disable_device(pdev);
+
+err_pci_enable:
+
+	dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+	return ret;
+}
+
+static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
+{
+	int err;
+	int i;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+	void __iomem *chip_save;
+
+	chip_save = chip;
+	for (i = 0; i < 8; i++, chip++) {
+		err = gpiochip_remove(&chip->gpio);
+		if (err)
+			dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+	}
+
+	chip = chip_save;
+	pci_iounmap(pdev, chip->base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	s32 ret;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+	ioh_gpio_save_reg_conf(chip);
+	ioh_gpio_restore_reg_conf(chip);
+
+	ret = pci_save_state(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+		return ret;
+	}
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+	ret = pci_enable_wake(pdev, PCI_D0, 1);
+	if (ret)
+		dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+	return 0;
+}
+
+static int ioh_gpio_resume(struct pci_dev *pdev)
+{
+	s32 ret;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+	ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+	pci_set_power_state(pdev, PCI_D0);
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+		return ret;
+	}
+	pci_restore_state(pdev);
+
+	iowrite32(0x01, &chip->reg->srst);
+	iowrite32(0x00, &chip->reg->srst);
+	ioh_gpio_restore_reg_conf(chip);
+
+	return 0;
+}
+#else
+#define ioh_gpio_suspend NULL
+#define ioh_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
+	{ 0, }
+};
+
+static struct pci_driver ioh_gpio_driver = {
+	.name = "ml_ioh_gpio",
+	.id_table = ioh_gpio_pcidev_id,
+	.probe = ioh_gpio_probe,
+	.remove = __devexit_p(ioh_gpio_remove),
+	.suspend = ioh_gpio_suspend,
+	.resume = ioh_gpio_resume
+};
+
+static int __init ioh_gpio_pci_init(void)
+{
+	return pci_register_driver(&ioh_gpio_driver);
+}
+module_init(ioh_gpio_pci_init);
+
+static void __exit ioh_gpio_pci_exit(void)
+{
+	pci_unregister_driver(&ioh_gpio_driver);
+}
+module_exit(ioh_gpio_pci_exit);
+
+MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 5018666..a261972 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -228,30 +228,30 @@
 	return chip->irq_base + off;
 }
 
-static void pca953x_irq_mask(unsigned int irq)
+static void pca953x_irq_mask(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask &= ~(1 << (irq - chip->irq_base));
+	chip->irq_mask &= ~(1 << (d->irq - chip->irq_base));
 }
 
-static void pca953x_irq_unmask(unsigned int irq)
+static void pca953x_irq_unmask(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask |= 1 << (irq - chip->irq_base);
+	chip->irq_mask |= 1 << (d->irq - chip->irq_base);
 }
 
-static void pca953x_irq_bus_lock(unsigned int irq)
+static void pca953x_irq_bus_lock(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&chip->irq_lock);
 }
 
-static void pca953x_irq_bus_sync_unlock(unsigned int irq)
+static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 	uint16_t new_irqs;
 	uint16_t level;
 
@@ -268,15 +268,15 @@
 	mutex_unlock(&chip->irq_lock);
 }
 
-static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
+static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
-	uint16_t level = irq - chip->irq_base;
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+	uint16_t level = d->irq - chip->irq_base;
 	uint16_t mask = 1 << level;
 
 	if (!(type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -295,11 +295,11 @@
 
 static struct irq_chip pca953x_irq_chip = {
 	.name			= "pca953x",
-	.mask			= pca953x_irq_mask,
-	.unmask			= pca953x_irq_unmask,
-	.bus_lock		= pca953x_irq_bus_lock,
-	.bus_sync_unlock	= pca953x_irq_bus_sync_unlock,
-	.set_type		= pca953x_irq_set_type,
+	.irq_mask		= pca953x_irq_mask,
+	.irq_unmask		= pca953x_irq_unmask,
+	.irq_bus_lock		= pca953x_irq_bus_lock,
+	.irq_bus_sync_unlock	= pca953x_irq_bus_sync_unlock,
+	.irq_set_type		= pca953x_irq_set_type,
 };
 
 static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 5005990..2975d22 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -129,10 +129,10 @@
 /*
  * PL061 GPIO IRQ
  */
-static void pl061_irq_disable(unsigned irq)
+static void pl061_irq_disable(struct irq_data *d)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpioie;
 
@@ -143,10 +143,10 @@
 	spin_unlock_irqrestore(&chip->irq_lock, flags);
 }
 
-static void pl061_irq_enable(unsigned irq)
+static void pl061_irq_enable(struct irq_data *d)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpioie;
 
@@ -157,10 +157,10 @@
 	spin_unlock_irqrestore(&chip->irq_lock, flags);
 }
 
-static int pl061_irq_type(unsigned irq, unsigned trigger)
+static int pl061_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpiois, gpioibe, gpioiev;
 
@@ -203,9 +203,9 @@
 
 static struct irq_chip pl061_irqchip = {
 	.name		= "GPIO",
-	.enable		= pl061_irq_enable,
-	.disable	= pl061_irq_disable,
-	.set_type	= pl061_irq_type,
+	.irq_enable	= pl061_irq_enable,
+	.irq_disable	= pl061_irq_disable,
+	.irq_set_type	= pl061_irq_type,
 };
 
 static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -214,7 +214,7 @@
 	struct list_head *ptr;
 	struct pl061_gpio *chip;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	list_for_each(ptr, chip_list) {
 		unsigned long pending;
 		int offset;
@@ -229,7 +229,7 @@
 		for_each_set_bit(offset, &pending, PL061_GPIO_NR)
 			generic_handle_irq(pl061_to_irq(&chip->gc, offset));
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static int pl061_probe(struct amba_device *dev, struct amba_id *id)
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c
index 7c9e6a0..eb2901f 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/stmpe-gpio.c
@@ -122,10 +122,10 @@
 	.can_sleep		= 1,
 };
 
-static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -145,16 +145,16 @@
 	return 0;
 }
 
-static void stmpe_gpio_irq_lock(unsigned int irq)
+static void stmpe_gpio_irq_lock(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&stmpe_gpio->irq_lock);
 }
 
-static void stmpe_gpio_irq_sync_unlock(unsigned int irq)
+static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
 	struct stmpe *stmpe = stmpe_gpio->stmpe;
 	int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
 	static const u8 regmap[] = {
@@ -180,20 +180,20 @@
 	mutex_unlock(&stmpe_gpio->irq_lock);
 }
 
-static void stmpe_gpio_irq_mask(unsigned int irq)
+static void stmpe_gpio_irq_mask(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	stmpe_gpio->regs[REG_IE][regoffset] &= ~mask;
 }
 
-static void stmpe_gpio_irq_unmask(unsigned int irq)
+static void stmpe_gpio_irq_unmask(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -202,11 +202,11 @@
 
 static struct irq_chip stmpe_gpio_irq_chip = {
 	.name			= "stmpe-gpio",
-	.bus_lock		= stmpe_gpio_irq_lock,
-	.bus_sync_unlock	= stmpe_gpio_irq_sync_unlock,
-	.mask			= stmpe_gpio_irq_mask,
-	.unmask			= stmpe_gpio_irq_unmask,
-	.set_type		= stmpe_gpio_irq_set_type,
+	.irq_bus_lock		= stmpe_gpio_irq_lock,
+	.irq_bus_sync_unlock	= stmpe_gpio_irq_sync_unlock,
+	.irq_mask		= stmpe_gpio_irq_mask,
+	.irq_unmask		= stmpe_gpio_irq_unmask,
+	.irq_set_type		= stmpe_gpio_irq_set_type,
 };
 
 static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index 823559a..e60be00 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -304,36 +304,36 @@
 	return chip->irq_base + offset;
 }
 
-static void sx150x_irq_mask(unsigned int irq)
+static void sx150x_irq_mask(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
 	sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
 }
 
-static void sx150x_irq_unmask(unsigned int irq)
+static void sx150x_irq_unmask(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
 	sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
 			 chip->irq_sense >> (n * 2));
 }
 
-static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n, val = 0;
 
@@ -341,7 +341,7 @@
 		return -EINVAL;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	if (flow_type & IRQ_TYPE_EDGE_RISING)
 		val |= 0x1;
@@ -386,9 +386,9 @@
 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
 }
 
-static void sx150x_irq_bus_lock(unsigned int irq)
+static void sx150x_irq_bus_lock(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
@@ -396,9 +396,9 @@
 	mutex_lock(&chip->lock);
 }
 
-static void sx150x_irq_bus_sync_unlock(unsigned int irq)
+static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
@@ -437,16 +437,16 @@
 	if (pdata->oscio_is_gpo)
 		++chip->gpio_chip.ngpio;
 
-	chip->irq_chip.name            = client->name;
-	chip->irq_chip.mask            = sx150x_irq_mask;
-	chip->irq_chip.unmask          = sx150x_irq_unmask;
-	chip->irq_chip.set_type        = sx150x_irq_set_type;
-	chip->irq_chip.bus_lock        = sx150x_irq_bus_lock;
-	chip->irq_chip.bus_sync_unlock = sx150x_irq_bus_sync_unlock;
-	chip->irq_summary              = -1;
-	chip->irq_base                 = -1;
-	chip->irq_sense                = 0;
-	chip->irq_set_type_pending     = 0;
+	chip->irq_chip.name                = client->name;
+	chip->irq_chip.irq_mask            = sx150x_irq_mask;
+	chip->irq_chip.irq_unmask          = sx150x_irq_unmask;
+	chip->irq_chip.irq_set_type        = sx150x_irq_set_type;
+	chip->irq_chip.irq_bus_lock        = sx150x_irq_bus_lock;
+	chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+	chip->irq_summary                  = -1;
+	chip->irq_base                     = -1;
+	chip->irq_sense                    = 0;
+	chip->irq_set_type_pending         = 0;
 }
 
 static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/tc3589x-gpio.c
index 180d584..27200af 100644
--- a/drivers/gpio/tc3589x-gpio.c
+++ b/drivers/gpio/tc3589x-gpio.c
@@ -110,10 +110,10 @@
 	.can_sleep		= 1,
 };
 
-static int tc3589x_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -137,16 +137,16 @@
 	return 0;
 }
 
-static void tc3589x_gpio_irq_lock(unsigned int irq)
+static void tc3589x_gpio_irq_lock(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&tc3589x_gpio->irq_lock);
 }
 
-static void tc3589x_gpio_irq_sync_unlock(unsigned int irq)
+static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	static const u8 regmap[] = {
 		[REG_IBE]	= TC3589x_GPIOIBE0,
@@ -172,20 +172,20 @@
 	mutex_unlock(&tc3589x_gpio->irq_lock);
 }
 
-static void tc3589x_gpio_irq_mask(unsigned int irq)
+static void tc3589x_gpio_irq_mask(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
 }
 
-static void tc3589x_gpio_irq_unmask(unsigned int irq)
+static void tc3589x_gpio_irq_unmask(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -194,11 +194,11 @@
 
 static struct irq_chip tc3589x_gpio_irq_chip = {
 	.name			= "tc3589x-gpio",
-	.bus_lock		= tc3589x_gpio_irq_lock,
-	.bus_sync_unlock	= tc3589x_gpio_irq_sync_unlock,
-	.mask			= tc3589x_gpio_irq_mask,
-	.unmask			= tc3589x_gpio_irq_unmask,
-	.set_type		= tc3589x_gpio_irq_set_type,
+	.irq_bus_lock		= tc3589x_gpio_irq_lock,
+	.irq_bus_sync_unlock	= tc3589x_gpio_irq_sync_unlock,
+	.irq_mask		= tc3589x_gpio_irq_mask,
+	.irq_unmask		= tc3589x_gpio_irq_unmask,
+	.irq_set_type		= tc3589x_gpio_irq_set_type,
 };
 
 static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 4529366..58c8f30 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -109,10 +109,10 @@
 /*
  * GPIO IRQ
  */
-static void timbgpio_irq_disable(unsigned irq)
+static void timbgpio_irq_disable(struct irq_data *d)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgpio->lock, flags);
@@ -121,10 +121,10 @@
 	spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
-static void timbgpio_irq_enable(unsigned irq)
+static void timbgpio_irq_enable(struct irq_data *d)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgpio->lock, flags);
@@ -133,10 +133,10 @@
 	spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
-static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 	u32 lvr, flr, bflr = 0;
 	u32 ver;
@@ -199,7 +199,7 @@
 	unsigned long ipr;
 	int offset;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
 	ipr = ioread32(tgpio->membase + TGPIO_IPR);
 	iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
@@ -217,9 +217,9 @@
 
 static struct irq_chip timbgpio_irqchip = {
 	.name		= "GPIO",
-	.enable		= timbgpio_irq_enable,
-	.disable	= timbgpio_irq_disable,
-	.set_type	= timbgpio_irq_type,
+	.irq_enable	= timbgpio_irq_enable,
+	.irq_disable	= timbgpio_irq_disable,
+	.irq_set_type	= timbgpio_irq_type,
 };
 
 static int __devinit timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
index b16c9a8..cffa3bd 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/vr41xx_giu.c
@@ -111,69 +111,69 @@
 	return data;
 }
 
-static void ack_giuint_low(unsigned int irq)
+static void ack_giuint_low(struct irq_data *d)
 {
-	giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
-static void mask_giuint_low(unsigned int irq)
+static void mask_giuint_low(struct irq_data *d)
 {
-	giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
-static void mask_ack_giuint_low(unsigned int irq)
+static void mask_ack_giuint_low(struct irq_data *d)
 {
 	unsigned int pin;
 
-	pin = GPIO_PIN_OF_IRQ(irq);
+	pin = GPIO_PIN_OF_IRQ(d->irq);
 	giu_clear(GIUINTENL, 1 << pin);
 	giu_write(GIUINTSTATL, 1 << pin);
 }
 
-static void unmask_giuint_low(unsigned int irq)
+static void unmask_giuint_low(struct irq_data *d)
 {
-	giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
 static struct irq_chip giuint_low_irq_chip = {
 	.name		= "GIUINTL",
-	.ack		= ack_giuint_low,
-	.mask		= mask_giuint_low,
-	.mask_ack	= mask_ack_giuint_low,
-	.unmask		= unmask_giuint_low,
+	.irq_ack	= ack_giuint_low,
+	.irq_mask	= mask_giuint_low,
+	.irq_mask_ack	= mask_ack_giuint_low,
+	.irq_unmask	= unmask_giuint_low,
 };
 
-static void ack_giuint_high(unsigned int irq)
+static void ack_giuint_high(struct irq_data *d)
 {
 	giu_write(GIUINTSTATH,
-		  1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+		  1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
-static void mask_giuint_high(unsigned int irq)
+static void mask_giuint_high(struct irq_data *d)
 {
-	giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+	giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
-static void mask_ack_giuint_high(unsigned int irq)
+static void mask_ack_giuint_high(struct irq_data *d)
 {
 	unsigned int pin;
 
-	pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
+	pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
 	giu_clear(GIUINTENH, 1 << pin);
 	giu_write(GIUINTSTATH, 1 << pin);
 }
 
-static void unmask_giuint_high(unsigned int irq)
+static void unmask_giuint_high(struct irq_data *d)
 {
-	giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+	giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
 static struct irq_chip giuint_high_irq_chip = {
 	.name		= "GIUINTH",
-	.ack		= ack_giuint_high,
-	.mask		= mask_giuint_high,
-	.mask_ack	= mask_ack_giuint_high,
-	.unmask		= unmask_giuint_high,
+	.irq_ack	= ack_giuint_high,
+	.irq_mask	= mask_giuint_high,
+	.irq_mask_ack	= mask_ack_giuint_high,
+	.irq_unmask	= unmask_giuint_high,
 };
 
 static int giu_get_irq(unsigned int irq)
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 618398e..c822baa 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -35,6 +35,29 @@
 	return container_of(chip, struct wm8994_gpio, gpio_chip);
 }
 
+static int wm8994_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+	struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+	struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+	switch (wm8994->type) {
+	case WM8958:
+		switch (offset) {
+		case 1:
+		case 2:
+		case 3:
+		case 4:
+		case 6:
+			return -EINVAL;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
 static int wm8994_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
 {
 	struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -136,6 +159,7 @@
 static struct gpio_chip template_chip = {
 	.label			= "wm8994",
 	.owner			= THIS_MODULE,
+	.request		= wm8994_gpio_request,
 	.direction_input	= wm8994_gpio_direction_in,
 	.get			= wm8994_gpio_get,
 	.direction_output	= wm8994_gpio_direction_out,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 7af4436..64828a7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -107,7 +107,6 @@
 	select FB_CFB_IMAGEBLIT
 	# i915 depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
-	select VIDEO_OUTPUT_CONTROL if ACPI
 	select BACKLIGHT_CLASS_DEVICE if ACPI
 	select INPUT if ACPI
 	select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb9..0cb2ba5 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@
 }
 EXPORT_SYMBOL(drm_agp_bind_pages);
 
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
-	agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
 #endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2d4e17a..952b3d4 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -336,7 +336,7 @@
 			      struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_display_mode *adjusted_mode, saved_mode;
+	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	int saved_x, saved_y;
@@ -350,6 +350,7 @@
 	if (!crtc->enabled)
 		return true;
 
+	saved_hwmode = crtc->hwmode;
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
 	saved_y = crtc->y;
@@ -427,11 +428,21 @@
 
 	}
 
+	/* Store real post-adjustment hardware mode. */
+	crtc->hwmode = *adjusted_mode;
+
+	/* Calculate and store various constants which
+	 * are later needed by vblank and swap-completion
+	 * timestamping. They are derived from true hwmode.
+	 */
+	drm_calc_timestamping_constants(crtc);
+
 	/* XXX free adjustedmode */
 	drm_mode_destroy(dev, adjusted_mode);
 	/* FIXME: add subpixel order */
 done:
 	if (!ret) {
+		crtc->hwmode = saved_hwmode;
 		crtc->mode = saved_mode;
 		crtc->x = saved_x;
 		crtc->y = saved_y;
@@ -650,6 +661,7 @@
 						      old_fb)) {
 				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
 					  set->crtc->base.id);
+				set->crtc->fb = old_fb;
 				ret = -EINVAL;
 				goto fail;
 			}
@@ -664,8 +676,10 @@
 			set->crtc->fb = set->fb;
 		ret = crtc_funcs->mode_set_base(set->crtc,
 						set->x, set->y, old_fb);
-		if (ret != 0)
+		if (ret != 0) {
+			set->crtc->fb = old_fb;
 			goto fail;
+		}
 	}
 	DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
 	for (i = 0; i < set->num_connectors; i++) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d2849e4..5c4f9b9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -985,6 +985,8 @@
 	info->fix.type = FB_TYPE_PACKED_PIXELS;
 	info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
 		FB_VISUAL_TRUECOLOR;
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
 	info->fix.type_aux = 0;
 	info->fix.xpanstep = 1; /* doing it in hw */
 	info->fix.ypanstep = 1; /* doing it in hw */
@@ -1005,6 +1007,7 @@
 	info->var.xres_virtual = fb->width;
 	info->var.yres_virtual = fb->height;
 	info->var.bits_per_pixel = fb->bits_per_pixel;
+	info->var.accel_flags = FB_ACCELF_TEXT;
 	info->var.xoffset = 0;
 	info->var.yoffset = 0;
 	info->var.activate = FB_ACTIVATE_NOW;
@@ -1530,3 +1533,24 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
 
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
+ * but the module doesn't depend on any fb console symbols.  At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
+static int __init drm_fb_helper_modinit(void)
+{
+	const char *name = "fbcon";
+	struct module *fbcon;
+
+	mutex_lock(&module_mutex);
+	fbcon = find_module(name);
+	mutex_unlock(&module_mutex);
+
+	if (!fbcon)
+		request_module_nowait(name);
+	return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index a39794b..2ec7d48 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -236,6 +236,8 @@
 		return -EBUSY;	/* No exclusive opens */
 	if (!drm_cpu_valid())
 		return -EINVAL;
+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+		return -EINVAL;
 
 	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
 
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 16d5155..0054e95 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -40,6 +40,22 @@
 #include <linux/slab.h>
 
 #include <linux/vgaarb.h>
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+	((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
 /**
  * Get interrupt from bus id.
  *
@@ -77,6 +93,87 @@
 	return 0;
 }
 
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+	memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+	u32 vblcount;
+	s64 diff_ns;
+	int vblrc;
+	struct timeval tvblank;
+
+	/* Prevent vblank irq processing while disabling vblank irqs,
+	 * so no updates of timestamps or count can happen after we've
+	 * disabled. Needed to prevent races in case of delayed irq's.
+	 * Disable preemption, so vblank_time_lock is held as short as
+	 * possible, even under a kernel with PREEMPT_RT patches.
+	 */
+	preempt_disable();
+	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+	dev->driver->disable_vblank(dev, crtc);
+	dev->vblank_enabled[crtc] = 0;
+
+	/* No further vblank irq's will be processed after
+	 * this point. Get current hardware vblank count and
+	 * vblank timestamp, repeat until they are consistent.
+	 *
+	 * FIXME: There is still a race condition here and in
+	 * drm_update_vblank_count() which can cause off-by-one
+	 * reinitialization of software vblank counter. If gpu
+	 * vblank counter doesn't increment exactly at the leading
+	 * edge of a vblank interval, then we can lose 1 count if
+	 * we happen to execute between start of vblank and the
+	 * delayed gpu counter increment.
+	 */
+	do {
+		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+
+	/* Compute time difference to stored timestamp of last vblank
+	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
+	 */
+	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	diff_ns = timeval_to_ns(&tvblank) -
+		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+	/* If there is at least 1 msec difference between the last stored
+	 * timestamp and tvblank, then we are currently executing our
+	 * disable inside a new vblank interval, the tvblank timestamp
+	 * corresponds to this new vblank interval and the irq handler
+	 * for this vblank didn't run yet and won't run due to our disable.
+	 * Therefore we need to do the job of drm_handle_vblank() and
+	 * increment the vblank counter by one to account for this vblank.
+	 *
+	 * Skip this step if there isn't any high precision timestamp
+	 * available. In that case we can't account for this and just
+	 * hope for the best.
+	 */
+	if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+		atomic_inc(&dev->_vblank_count[crtc]);
+
+	/* Invalidate all timestamps while vblank irq's are off. */
+	clear_vblank_timestamps(dev, crtc);
+
+	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+	preempt_enable();
+}
+
 static void vblank_disable_fn(unsigned long arg)
 {
 	struct drm_device *dev = (struct drm_device *)arg;
@@ -91,10 +188,7 @@
 		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
 		    dev->vblank_enabled[i]) {
 			DRM_DEBUG("disabling vblank on crtc %d\n", i);
-			dev->last_vblank[i] =
-				dev->driver->get_vblank_counter(dev, i);
-			dev->driver->disable_vblank(dev, i);
-			dev->vblank_enabled[i] = 0;
+			vblank_disable_and_save(dev, i);
 		}
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 	}
@@ -117,6 +211,7 @@
 	kfree(dev->last_vblank);
 	kfree(dev->last_vblank_wait);
 	kfree(dev->vblank_inmodeset);
+	kfree(dev->_vblank_time);
 
 	dev->num_crtcs = 0;
 }
@@ -129,6 +224,8 @@
 	setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
 		    (unsigned long)dev);
 	spin_lock_init(&dev->vbl_lock);
+	spin_lock_init(&dev->vblank_time_lock);
+
 	dev->num_crtcs = num_crtcs;
 
 	dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -161,6 +258,19 @@
 	if (!dev->vblank_inmodeset)
 		goto err;
 
+	dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+				    sizeof(struct timeval), GFP_KERNEL);
+	if (!dev->_vblank_time)
+		goto err;
+
+	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+	/* Driver specific high-precision vblank timestamping supported? */
+	if (dev->driver->get_vblank_timestamp)
+		DRM_INFO("Driver supports precise vblank timestamp query.\n");
+	else
+		DRM_INFO("No driver support for vblank timestamp query.\n");
+
 	/* Zero per-crtc vblank stuff */
 	for (i = 0; i < num_crtcs; i++) {
 		init_waitqueue_head(&dev->vbl_queue[i]);
@@ -279,7 +389,7 @@
  *
  * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
  */
-int drm_irq_uninstall(struct drm_device * dev)
+int drm_irq_uninstall(struct drm_device *dev)
 {
 	unsigned long irqflags;
 	int irq_enabled, i;
@@ -335,7 +445,9 @@
 {
 	struct drm_control *ctl = data;
 
-	/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+	/* if we haven't irq we fallback for compatibility reasons -
+	 * this used to be a separate function in drm_dma.h
+	 */
 
 
 	switch (ctl->func) {
@@ -360,6 +472,287 @@
 }
 
 /**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+	s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+	u64 dotclock;
+
+	/* Dot clock in Hz: */
+	dotclock = (u64) crtc->hwmode.clock * 1000;
+
+	/* Valid dotclock? */
+	if (dotclock > 0) {
+		/* Convert scanline length in pixels and video dot clock to
+		 * line duration, frame duration and pixel duration in
+		 * nanoseconds:
+		 */
+		pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
+		linedur_ns  = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
+					      1000000000), dotclock);
+		framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+	} else
+		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+			  crtc->base.id);
+
+	crtc->pixeldur_ns = pixeldur_ns;
+	crtc->linedur_ns  = linedur_ns;
+	crtc->framedur_ns = framedur_ns;
+
+	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+		  crtc->base.id, crtc->hwmode.crtc_htotal,
+		  crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+		  crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+		  (int) linedur_ns, (int) pixeldur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ *             On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ *         0 = Default.
+ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL   - Invalid crtc.
+ * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO      - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+					  int *max_error,
+					  struct timeval *vblank_time,
+					  unsigned flags,
+					  struct drm_crtc *refcrtc)
+{
+	struct timeval stime, raw_time;
+	struct drm_display_mode *mode;
+	int vbl_status, vtotal, vdisplay;
+	int vpos, hpos, i;
+	s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+	bool invbl;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Scanout position query not supported? Should not happen. */
+	if (!dev->driver->get_scanout_position) {
+		DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+		return -EIO;
+	}
+
+	mode = &refcrtc->hwmode;
+	vtotal = mode->crtc_vtotal;
+	vdisplay = mode->crtc_vdisplay;
+
+	/* Durations of frames, lines, pixels in nanoseconds. */
+	framedur_ns = refcrtc->framedur_ns;
+	linedur_ns  = refcrtc->linedur_ns;
+	pixeldur_ns = refcrtc->pixeldur_ns;
+
+	/* If mode timing undefined, just return as no-op:
+	 * Happens during initial modesetting of a crtc.
+	 */
+	if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+		return -EAGAIN;
+	}
+
+	/* Don't know yet how to handle interlaced or
+	 * double scan modes. Just no-op for now.
+	 */
+	if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
+		DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
+		return -ENOTSUPP;
+	}
+
+	/* Get current scanout position with system timestamp.
+	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+	 * if single query takes longer than max_error nanoseconds.
+	 *
+	 * This guarantees a tight bound on maximum error if
+	 * code gets preempted or delayed for some reason.
+	 */
+	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+		/* Disable preemption to make it very likely to
+		 * succeed in the first iteration even on PREEMPT_RT kernel.
+		 */
+		preempt_disable();
+
+		/* Get system timestamp before query. */
+		do_gettimeofday(&stime);
+
+		/* Get vertical and horizontal scanout pos. vpos, hpos. */
+		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+		/* Get system timestamp after query. */
+		do_gettimeofday(&raw_time);
+
+		preempt_enable();
+
+		/* Return as no-op if scanout query unsupported or failed. */
+		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+				  crtc, vbl_status);
+			return -EIO;
+		}
+
+		duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+		/* Accept result with <  max_error nsecs timing uncertainty. */
+		if (duration_ns <= (s64) *max_error)
+			break;
+	}
+
+	/* Noisy system timing? */
+	if (i == DRM_TIMESTAMP_MAXRETRIES) {
+		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+			  crtc, (int) duration_ns/1000, *max_error/1000, i);
+	}
+
+	/* Return upper bound of timestamp precision error. */
+	*max_error = (int) duration_ns;
+
+	/* Check if in vblank area:
+	 * vpos is >=0 in video scanout area, but negative
+	 * within vblank area, counting down the number of lines until
+	 * start of scanout.
+	 */
+	invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+	/* Convert scanout position into elapsed time at raw_time query
+	 * since start of scanout at first display scanline. delta_ns
+	 * can be negative if start of scanout hasn't happened yet.
+	 */
+	delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+	/* Is vpos outside nominal vblank area, but less than
+	 * 1/100 of a frame height away from start of vblank?
+	 * If so, assume this isn't a massively delayed vblank
+	 * interrupt, but a vblank interrupt that fired a few
+	 * microseconds before true start of vblank. Compensate
+	 * by adding a full frame duration to the final timestamp.
+	 * Happens, e.g., on ATI R500, R600.
+	 *
+	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
+	 */
+	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+	    ((vdisplay - vpos) < vtotal / 100)) {
+		delta_ns = delta_ns - framedur_ns;
+
+		/* Signal this correction as "applied". */
+		vbl_status |= 0x8;
+	}
+
+	/* Subtract time delta from raw timestamp to get final
+	 * vblank_time timestamp for end of vblank.
+	 */
+	*vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+
+	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n",
+		  crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
+		  raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
+		  (int) duration_ns/1000, i);
+
+	vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+	if (invbl)
+		vbl_status |= DRM_VBLANKTIME_INVBL;
+
+	return vbl_status;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ *         0 = Default.
+ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+			      struct timeval *tvblank, unsigned flags)
+{
+	int ret = 0;
+
+	/* Define requested maximum error on timestamps (nanoseconds). */
+	int max_error = (int) drm_timestamp_precision * 1000;
+
+	/* Query driver if possible and precision timestamping enabled. */
+	if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+		ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+							tvblank, flags);
+		if (ret > 0)
+			return (u32) ret;
+	}
+
+	/* GPU high precision timestamp query unsupported or failed.
+	 * Return gettimeofday timestamp as best estimate.
+	 */
+	do_gettimeofday(tvblank);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+/**
  * drm_vblank_count - retrieve "cooked" vblank counter value
  * @dev: DRM device
  * @crtc: which counter to retrieve
@@ -375,6 +768,40 @@
 EXPORT_SYMBOL(drm_vblank_count);
 
 /**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+			      struct timeval *vblanktime)
+{
+	u32 cur_vblank;
+
+	/* Read timestamp from slot of _vblank_time ringbuffer
+	 * that corresponds to current vblank count. Retry if
+	 * count has incremented during readout. This works like
+	 * a seqlock.
+	 */
+	do {
+		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+		smp_rmb();
+	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+	return cur_vblank;
+}
+EXPORT_SYMBOL(drm_vblank_count_and_time);
+
+/**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
  * @crtc: counter to update
@@ -392,7 +819,8 @@
  */
 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 {
-	u32 cur_vblank, diff;
+	u32 cur_vblank, diff, tslot, rc;
+	struct timeval t_vblank;
 
 	/*
 	 * Interrupts were disabled prior to this call, so deal with counter
@@ -400,8 +828,18 @@
 	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
 	 * here if the register is small or we had vblank interrupts off for
 	 * a long time.
+	 *
+	 * We repeat the hardware vblank counter & timestamp query until
+	 * we get consistent results. This to prevent races between gpu
+	 * updating its hardware counter while we are retrieving the
+	 * corresponding vblank timestamp.
 	 */
-	cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+	do {
+		cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+		rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+	/* Deal with counter wrap */
 	diff = cur_vblank - dev->last_vblank[crtc];
 	if (cur_vblank < dev->last_vblank[crtc]) {
 		diff += dev->max_vblank_count;
@@ -413,6 +851,16 @@
 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
 		  crtc, diff);
 
+	/* Reinitialize corresponding vblank timestamp if high-precision query
+	 * available. Skip this step if query unsupported or failed. Will
+	 * reinitialize delayed at next vblank interrupt in that case.
+	 */
+	if (rc) {
+		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+		vblanktimestamp(dev, crtc, tslot) = t_vblank;
+		smp_wmb();
+	}
+
 	atomic_add(diff, &dev->_vblank_count[crtc]);
 }
 
@@ -429,15 +877,27 @@
  */
 int drm_vblank_get(struct drm_device *dev, int crtc)
 {
-	unsigned long irqflags;
+	unsigned long irqflags, irqflags2;
 	int ret = 0;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
 	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+		/* Disable preemption while holding vblank_time_lock. Do
+		 * it explicitely to guard against PREEMPT_RT kernel.
+		 */
+		preempt_disable();
+		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
 		if (!dev->vblank_enabled[crtc]) {
+			/* Enable vblank irqs under vblank_time_lock protection.
+			 * All vblank count & timestamp updates are held off
+			 * until we are done reinitializing master counter and
+			 * timestamps. Filtercode in drm_handle_vblank() will
+			 * prevent double-accounting of same vblank interval.
+			 */
 			ret = dev->driver->enable_vblank(dev, crtc);
-			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+				  crtc, ret);
 			if (ret)
 				atomic_dec(&dev->vblank_refcount[crtc]);
 			else {
@@ -445,6 +905,8 @@
 				drm_update_vblank_count(dev, crtc);
 			}
 		}
+		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+		preempt_enable();
 	} else {
 		if (!dev->vblank_enabled[crtc]) {
 			atomic_dec(&dev->vblank_refcount[crtc]);
@@ -463,15 +925,17 @@
  * @crtc: which counter to give up
  *
  * Release ownership of a given vblank counter, turning off interrupts
- * if possible.
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
  */
 void drm_vblank_put(struct drm_device *dev, int crtc)
 {
-	BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+	BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
 
 	/* Last user schedules interrupt disable */
-	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
-		mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+	    (drm_vblank_offdelay > 0))
+		mod_timer(&dev->vblank_disable_timer,
+			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
@@ -480,10 +944,8 @@
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
-	dev->driver->disable_vblank(dev, crtc);
+	vblank_disable_and_save(dev, crtc);
 	DRM_WAKEUP(&dev->vbl_queue[crtc]);
-	dev->vblank_enabled[crtc] = 0;
-	dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_vblank_off);
@@ -602,7 +1064,6 @@
 	e->base.file_priv = file_priv;
 	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
-	do_gettimeofday(&now);
 	spin_lock_irqsave(&dev->event_lock, flags);
 
 	if (file_priv->event_space < sizeof e->event) {
@@ -611,7 +1072,8 @@
 	}
 
 	file_priv->event_space -= sizeof e->event;
-	seq = drm_vblank_count(dev, pipe);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
+
 	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
 	    (seq - vblwait->request.sequence) <= (1 << 23)) {
 		vblwait->request.sequence = seq + 1;
@@ -626,15 +1088,18 @@
 
 	e->event.sequence = vblwait->request.sequence;
 	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+		e->event.sequence = seq;
 		e->event.tv_sec = now.tv_sec;
 		e->event.tv_usec = now.tv_usec;
 		drm_vblank_put(dev, pipe);
 		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
 		wake_up_interruptible(&e->base.file_priv->event_wait);
+		vblwait->reply.sequence = seq;
 		trace_drm_vblank_event_delivered(current->pid, pipe,
 						 vblwait->request.sequence);
 	} else {
 		list_add_tail(&e->base.link, &dev->vblank_event_list);
+		vblwait->reply.sequence = vblwait->request.sequence;
 	}
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -727,11 +1192,10 @@
 	if (ret != -EINTR) {
 		struct timeval now;
 
-		do_gettimeofday(&now);
-
+		vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
 		vblwait->reply.tval_sec = now.tv_sec;
 		vblwait->reply.tval_usec = now.tv_usec;
-		vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+
 		DRM_DEBUG("returning %d to client\n",
 			  vblwait->reply.sequence);
 	} else {
@@ -750,8 +1214,7 @@
 	unsigned long flags;
 	unsigned int seq;
 
-	do_gettimeofday(&now);
-	seq = drm_vblank_count(dev, crtc);
+	seq = drm_vblank_count_and_time(dev, crtc, &now);
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 
@@ -789,11 +1252,64 @@
  */
 void drm_handle_vblank(struct drm_device *dev, int crtc)
 {
+	u32 vblcount;
+	s64 diff_ns;
+	struct timeval tvblank;
+	unsigned long irqflags;
+
 	if (!dev->num_crtcs)
 		return;
 
-	atomic_inc(&dev->_vblank_count[crtc]);
+	/* Need timestamp lock to prevent concurrent execution with
+	 * vblank enable/disable, as this would cause inconsistent
+	 * or corrupted timestamps and vblank counts.
+	 */
+	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+	/* Vblank irq handling disabled. Nothing to do. */
+	if (!dev->vblank_enabled[crtc]) {
+		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+		return;
+	}
+
+	/* Fetch corresponding timestamp for this vblank interval from
+	 * driver and store it in proper slot of timestamp ringbuffer.
+	 */
+
+	/* Get current timestamp and count. */
+	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+	/* Compute time difference to timestamp of last vblank */
+	diff_ns = timeval_to_ns(&tvblank) -
+		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+	/* Update vblank timestamp and count if at least
+	 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+	 * difference between last stored timestamp and current
+	 * timestamp. A smaller difference means basically
+	 * identical timestamps. Happens if this vblank has
+	 * been already processed and this is a redundant call,
+	 * e.g., due to spurious vblank interrupts. We need to
+	 * ignore those for accounting.
+	 */
+	if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+		/* Store new timestamp in ringbuffer. */
+		vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+		smp_wmb();
+
+		/* Increment cooked vblank count. This also atomically commits
+		 * the timestamp computed above.
+		 */
+		atomic_inc(&dev->_vblank_count[crtc]);
+	} else {
+		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+			  crtc, (int) diff_ns);
+	}
+
 	DRM_WAKEUP(&dev->vbl_queue[crtc]);
 	drm_handle_vblank_events(dev, crtc);
+
+	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc30..c59515b 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@
 	mm->scanned_blocks = 0;
 	mm->scan_hit_start = 0;
 	mm->scan_hit_size = 0;
+	mm->scan_check_range = 0;
 }
 EXPORT_SYMBOL(drm_mm_init_scan);
 
 /**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+				 unsigned alignment,
+				 unsigned long start,
+				 unsigned long end)
+{
+	mm->scan_alignment = alignment;
+	mm->scan_size = size;
+	mm->scanned_blocks = 0;
+	mm->scan_hit_start = 0;
+	mm->scan_hit_size = 0;
+	mm->scan_start = start;
+	mm->scan_end = end;
+	mm->scan_check_range = 1;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+/**
  * Add a node to the scan list that might be freed to make space for the desired
  * hole.
  *
@@ -406,6 +432,8 @@
 	struct drm_mm *mm = node->mm;
 	struct list_head *prev_free, *next_free;
 	struct drm_mm_node *prev_node, *next_node;
+	unsigned long adj_start;
+	unsigned long adj_end;
 
 	mm->scanned_blocks++;
 
@@ -452,7 +480,17 @@
 	node->free_stack.prev = prev_free;
 	node->free_stack.next = next_free;
 
-	if (check_free_hole(node->start, node->start + node->size,
+	if (mm->scan_check_range) {
+		adj_start = node->start < mm->scan_start ?
+			mm->scan_start : node->start;
+		adj_end = node->start + node->size > mm->scan_end ?
+			mm->scan_end : node->start + node->size;
+	} else {
+		adj_start = node->start;
+		adj_end = node->start + node->size;
+	}
+
+	if (check_free_hole(adj_start , adj_end,
 			    mm->scan_size, mm->scan_alignment)) {
 		mm->scan_hit_start = node->start;
 		mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index cdc89ee..d59edc1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,12 +40,22 @@
 unsigned int drm_debug = 0;	/* 1 to enable debug output */
 EXPORT_SYMBOL(drm_debug);
 
+unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
 MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
 
 module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
 
 struct idr drm_minors_idr;
 
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d..0ae6a7c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@
 	  i915_gem.o \
 	  i915_gem_debug.o \
 	  i915_gem_evict.o \
+	  i915_gem_execbuffer.o \
+	  i915_gem_gtt.o \
 	  i915_gem_tiling.o \
 	  i915_trace_points.o \
 	  intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ce..3601466 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
 #include "drmP.h"
 #include "drm.h"
 #include "intel_drv.h"
+#include "intel_ringbuffer.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 
@@ -72,7 +73,6 @@
 	B(is_broadwater);
 	B(is_crestline);
 	B(has_fbc);
-	B(has_rc6);
 	B(has_pipe_cxsr);
 	B(has_hotplug);
 	B(cursor_needs_physical);
@@ -86,19 +86,19 @@
 	return 0;
 }
 
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
 {
-	if (obj_priv->user_pin_count > 0)
+	if (obj->user_pin_count > 0)
 		return "P";
-	else if (obj_priv->pin_count > 0)
+	else if (obj->pin_count > 0)
 		return "p";
 	else
 		return " ";
 }
 
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 {
-    switch (obj_priv->tiling_mode) {
+    switch (obj->tiling_mode) {
     default:
     case I915_TILING_NONE: return " ";
     case I915_TILING_X: return "X";
@@ -106,10 +106,19 @@
     }
 }
 
+static const char *agp_type_str(int type)
+{
+	switch (type) {
+	case 0: return " uncached";
+	case 1: return " snooped";
+	default: return "";
+	}
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
@@ -117,6 +126,8 @@
 		   obj->base.read_domains,
 		   obj->base.write_domain,
 		   obj->last_rendering_seqno,
+		   obj->last_fenced_seqno,
+		   agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -124,7 +135,17 @@
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	if (obj->gtt_space != NULL)
-		seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+		seq_printf(m, " (gtt offset: %08x, size: %08x)",
+			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+	if (obj->pin_mappable || obj->fault_mappable) {
+		char s[3], *t = s;
+		if (obj->pin_mappable)
+			*t++ = 'p';
+		if (obj->fault_mappable)
+			*t++ = 'f';
+		*t = '\0';
+		seq_printf(m, " (%s mappable)", s);
+	}
 	if (obj->ring != NULL)
 		seq_printf(m, " (%s)", obj->ring->name);
 }
@@ -136,7 +157,7 @@
 	struct list_head *head;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	size_t total_obj_size, total_gtt_size;
 	int count, ret;
 
@@ -171,12 +192,12 @@
 	}
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj_priv, head, mm_list) {
+	list_for_each_entry(obj, head, mm_list) {
 		seq_printf(m, "   ");
-		describe_obj(m, obj_priv);
+		describe_obj(m, obj);
 		seq_printf(m, "\n");
-		total_obj_size += obj_priv->base.size;
-		total_gtt_size += obj_priv->gtt_space->size;
+		total_obj_size += obj->base.size;
+		total_gtt_size += obj->gtt_space->size;
 		count++;
 	}
 	mutex_unlock(&dev->struct_mutex);
@@ -186,30 +207,116 @@
 	return 0;
 }
 
+#define count_objects(list, member) do { \
+	list_for_each_entry(obj, list, member) { \
+		size += obj->gtt_space->size; \
+		++count; \
+		if (obj->map_and_fenceable) { \
+			mappable_size += obj->gtt_space->size; \
+			++mappable_count; \
+		} \
+	} \
+} while(0)
+
 static int i915_gem_object_info(struct seq_file *m, void* data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 count, mappable_count;
+	size_t size, mappable_size;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
-	seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
-	seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
-	seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
-	seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
-	seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
-	seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+	seq_printf(m, "%u objects, %zu bytes\n",
+		   dev_priv->mm.object_count,
+		   dev_priv->mm.object_memory);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.gtt_list, gtt_list);
+	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.active_list, mm_list);
+	count_objects(&dev_priv->mm.flushing_list, mm_list);
+	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.pinned_list, mm_list);
+	seq_printf(m, "  %u [%u] pinned objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.inactive_list, mm_list);
+	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+	seq_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (obj->fault_mappable) {
+			size += obj->gtt_space->size;
+			++count;
+		}
+		if (obj->pin_mappable) {
+			mappable_size += obj->gtt_space->size;
+			++mappable_count;
+		}
+	}
+	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+		   mappable_count, mappable_size);
+	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+		   count, size);
+
+	seq_printf(m, "%zu [%zu] gtt total\n",
+		   dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
 
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
 
+static int i915_gem_gtt_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	size_t total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		seq_printf(m, "   ");
+		describe_obj(m, obj);
+		seq_printf(m, "\n");
+		total_obj_size += obj->base.size;
+		total_gtt_size += obj->gtt_space->size;
+		count++;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+
+	return 0;
+}
+
 
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
@@ -243,14 +350,14 @@
 			seq_printf(m, "%d prepares\n", work->pending);
 
 			if (work->old_fb_obj) {
-				struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
-				if(obj_priv)
-					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+				struct drm_i915_gem_object *obj = work->old_fb_obj;
+				if (obj)
+					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 			}
 			if (work->pending_flip_obj) {
-				struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
-				if(obj_priv)
-					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+				struct drm_i915_gem_object *obj = work->pending_flip_obj;
+				if (obj)
+					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 			}
 		}
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +372,80 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_request *gem_request;
-	int ret;
+	int ret, count;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	seq_printf(m, "Request:\n");
-	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
-			list) {
-		seq_printf(m, "    %d @ %d\n",
-			   gem_request->seqno,
-			   (int) (jiffies - gem_request->emitted_jiffies));
+	count = 0;
+	if (!list_empty(&dev_priv->ring[RCS].request_list)) {
+		seq_printf(m, "Render requests:\n");
+		list_for_each_entry(gem_request,
+				    &dev_priv->ring[RCS].request_list,
+				    list) {
+			seq_printf(m, "    %d @ %d\n",
+				   gem_request->seqno,
+				   (int) (jiffies - gem_request->emitted_jiffies));
+		}
+		count++;
+	}
+	if (!list_empty(&dev_priv->ring[VCS].request_list)) {
+		seq_printf(m, "BSD requests:\n");
+		list_for_each_entry(gem_request,
+				    &dev_priv->ring[VCS].request_list,
+				    list) {
+			seq_printf(m, "    %d @ %d\n",
+				   gem_request->seqno,
+				   (int) (jiffies - gem_request->emitted_jiffies));
+		}
+		count++;
+	}
+	if (!list_empty(&dev_priv->ring[BCS].request_list)) {
+		seq_printf(m, "BLT requests:\n");
+		list_for_each_entry(gem_request,
+				    &dev_priv->ring[BCS].request_list,
+				    list) {
+			seq_printf(m, "    %d @ %d\n",
+				   gem_request->seqno,
+				   (int) (jiffies - gem_request->emitted_jiffies));
+		}
+		count++;
 	}
 	mutex_unlock(&dev->struct_mutex);
 
+	if (count == 0)
+		seq_printf(m, "No requests\n");
+
 	return 0;
 }
 
+static void i915_ring_seqno_info(struct seq_file *m,
+				 struct intel_ring_buffer *ring)
+{
+	if (ring->get_seqno) {
+		seq_printf(m, "Current sequence (%s): %d\n",
+			   ring->name, ring->get_seqno(ring));
+		seq_printf(m, "Waiter sequence (%s):  %d\n",
+			   ring->name, ring->waiting_seqno);
+		seq_printf(m, "IRQ sequence (%s):     %d\n",
+			   ring->name, ring->irq_seqno);
+	}
+}
+
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	if (dev_priv->render_ring.status_page.page_addr != NULL) {
-		seq_printf(m, "Current sequence: %d\n",
-			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
-	} else {
-		seq_printf(m, "Current sequence: hws uninitialized\n");
-	}
-	seq_printf(m, "Waiter sequence:  %d\n",
-			dev_priv->mm.waiting_gem_seqno);
-	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -315,7 +458,7 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
@@ -354,16 +497,14 @@
 	}
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
-	if (dev_priv->render_ring.status_page.page_addr != NULL) {
-		seq_printf(m, "Current sequence:    %d\n",
-			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
-	} else {
-		seq_printf(m, "Current sequence:    hws uninitialized\n");
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		if (IS_GEN6(dev)) {
+			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
+				   dev_priv->ring[i].name,
+				   I915_READ_IMR(&dev_priv->ring[i]));
+		}
+		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 	}
-	seq_printf(m, "Waiter sequence:     %d\n",
-		   dev_priv->mm.waiting_gem_seqno);
-	seq_printf(m, "IRQ sequence:        %d\n",
-		   dev_priv->mm.irq_gem_seqno);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -383,29 +524,17 @@
 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 
-		if (obj == NULL) {
-			seq_printf(m, "Fenced object[%2d] = unused\n", i);
-		} else {
-			struct drm_i915_gem_object *obj_priv;
-
-			obj_priv = to_intel_bo(obj);
-			seq_printf(m, "Fenced object[%2d] = %p: %s "
-				   "%08x %08zx %08x %s %08x %08x %d",
-				   i, obj, get_pin_flag(obj_priv),
-				   obj_priv->gtt_offset,
-				   obj->size, obj_priv->stride,
-				   get_tiling_flag(obj_priv),
-				   obj->read_domains, obj->write_domain,
-				   obj_priv->last_rendering_seqno);
-			if (obj->name)
-				seq_printf(m, " (name: %d)", obj->name);
-			seq_printf(m, "\n");
-		}
+		seq_printf(m, "Fenced object[%2d] = ", i);
+		if (obj == NULL)
+			seq_printf(m, "unused");
+		else
+			describe_obj(m, obj);
+		seq_printf(m, "\n");
 	}
-	mutex_unlock(&dev->struct_mutex);
 
+	mutex_unlock(&dev->struct_mutex);
 	return 0;
 }
 
@@ -414,10 +543,12 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int i;
+	struct intel_ring_buffer *ring;
 	volatile u32 *hws;
+	int i;
 
-	hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	hws = (volatile u32 *)ring->status_page.page_addr;
 	if (hws == NULL)
 		return 0;
 
@@ -431,14 +562,14 @@
 
 static void i915_dump_object(struct seq_file *m,
 			     struct io_mapping *mapping,
-			     struct drm_i915_gem_object *obj_priv)
+			     struct drm_i915_gem_object *obj)
 {
 	int page, page_count, i;
 
-	page_count = obj_priv->base.size / PAGE_SIZE;
+	page_count = obj->base.size / PAGE_SIZE;
 	for (page = 0; page < page_count; page++) {
 		u32 *mem = io_mapping_map_wc(mapping,
-					     obj_priv->gtt_offset + page * PAGE_SIZE);
+					     obj->gtt_offset + page * PAGE_SIZE);
 		for (i = 0; i < PAGE_SIZE; i += 4)
 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
 		io_mapping_unmap(mem);
@@ -450,25 +581,21 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-		obj = &obj_priv->base;
-		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-		    seq_printf(m, "--- gtt_offset = 0x%08x\n",
-			       obj_priv->gtt_offset);
-		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
 		}
 	}
 
 	mutex_unlock(&dev->struct_mutex);
-
 	return 0;
 }
 
@@ -477,19 +604,21 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	if (!dev_priv->render_ring.gem_object) {
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	if (!ring->obj) {
 		seq_printf(m, "No ringbuffer setup\n");
 	} else {
-		u8 *virt = dev_priv->render_ring.virtual_start;
+		u8 *virt = ring->virtual_start;
 		uint32_t off;
 
-		for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+		for (off = 0; off < ring->size; off += 4) {
 			uint32_t *ptr = (uint32_t *)(virt + off);
 			seq_printf(m, "%08x :  %08x\n", off, *ptr);
 		}
@@ -504,19 +633,38 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	unsigned int head, tail;
+	struct intel_ring_buffer *ring;
 
-	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	if (ring->size == 0)
+		return 0;
 
-	seq_printf(m, "RingHead :  %08x\n", head);
-	seq_printf(m, "RingTail :  %08x\n", tail);
-	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
-	seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+	seq_printf(m, "Ring %s:\n", ring->name);
+	seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
+	seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
+	seq_printf(m, "  Size :    %08x\n", ring->size);
+	seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
+	seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
+	if (IS_GEN6(dev)) {
+		seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
+		seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
+	}
+	seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
+	seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
 
 	return 0;
 }
 
+static const char *ring_str(int ring)
+{
+	switch (ring) {
+	case RING_RENDER: return " render";
+	case RING_BSD: return " bsd";
+	case RING_BLT: return " blt";
+	default: return "";
+	}
+}
+
 static const char *pin_flag(int pinned)
 {
 	if (pinned > 0)
@@ -547,6 +695,37 @@
 	return purgeable ? " purgeable" : "";
 }
 
+static void print_error_buffers(struct seq_file *m,
+				const char *name,
+				struct drm_i915_error_buffer *err,
+				int count)
+{
+	seq_printf(m, "%s [%d]:\n", name, count);
+
+	while (count--) {
+		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
+			   err->gtt_offset,
+			   err->size,
+			   err->read_domains,
+			   err->write_domain,
+			   err->seqno,
+			   pin_flag(err->pinned),
+			   tiling_flag(err->tiling),
+			   dirty_flag(err->dirty),
+			   purgeable_flag(err->purgeable),
+			   ring_str(err->ring),
+			   agp_type_str(err->agp_type));
+
+		if (err->name)
+			seq_printf(m, " (name: %d)", err->name);
+		if (err->fence_reg != I915_FENCE_REG_NONE)
+			seq_printf(m, " (fence: %d)", err->fence_reg);
+
+		seq_printf(m, "\n");
+		err++;
+	}
+}
+
 static int i915_error_state(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,47 +747,54 @@
 		   error->time.tv_usec);
 	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
 	seq_printf(m, "EIR: 0x%08x\n", error->eir);
-	seq_printf(m, "  PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
+	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+	if (INTEL_INFO(dev)->gen >= 6) {
+		seq_printf(m, "ERROR: 0x%08x\n", error->error);
+		seq_printf(m, "Blitter command stream:\n");
+		seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
+		seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
+		seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
+		seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
+		seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
+		seq_printf(m, "Video (BSD) command stream:\n");
+		seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
+		seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
+		seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
+		seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
+		seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
+	}
+	seq_printf(m, "Render command stream:\n");
+	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
-	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
+		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 	}
-	seq_printf(m, "seqno: 0x%08x\n", error->seqno);
+	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
+	seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
 
-	if (error->active_bo_count) {
-		seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
+	for (i = 0; i < 16; i++)
+		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
-		for (i = 0; i < error->active_bo_count; i++) {
-			seq_printf(m, "  %08x %8zd %08x %08x %08x%s%s%s%s",
-				   error->active_bo[i].gtt_offset,
-				   error->active_bo[i].size,
-				   error->active_bo[i].read_domains,
-				   error->active_bo[i].write_domain,
-				   error->active_bo[i].seqno,
-				   pin_flag(error->active_bo[i].pinned),
-				   tiling_flag(error->active_bo[i].tiling),
-				   dirty_flag(error->active_bo[i].dirty),
-				   purgeable_flag(error->active_bo[i].purgeable));
+	if (error->active_bo)
+		print_error_buffers(m, "Active",
+				    error->active_bo,
+				    error->active_bo_count);
 
-			if (error->active_bo[i].name)
-				seq_printf(m, " (name: %d)", error->active_bo[i].name);
-			if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
-				seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
-
-			seq_printf(m, "\n");
-		}
-	}
+	if (error->pinned_bo)
+		print_error_buffers(m, "Pinned",
+				    error->pinned_bo,
+				    error->pinned_bo_count);
 
 	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
 		if (error->batchbuffer[i]) {
 			struct drm_i915_error_object *obj = error->batchbuffer[i];
 
-			seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   obj->gtt_offset);
 			offset = 0;
 			for (page = 0; page < obj->page_count; page++) {
 				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@@ -635,6 +821,9 @@
 	if (error->overlay)
 		intel_overlay_print_error_state(m, error->overlay);
 
+	if (error->display)
+		intel_display_print_error_state(m, dev, error->display);
+
 out:
 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
@@ -658,15 +847,51 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u16 rgvswctl = I915_READ16(MEMSWCTL);
-	u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
-	seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
-	seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
-	seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
-		   MEMSTAT_VID_SHIFT);
-	seq_printf(m, "Current P-state: %d\n",
-		   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+	if (IS_GEN5(dev)) {
+		u16 rgvswctl = I915_READ16(MEMSWCTL);
+		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+			   MEMSTAT_VID_SHIFT);
+		seq_printf(m, "Current P-state: %d\n",
+			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+	} else if (IS_GEN6(dev)) {
+		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		int max_freq;
+
+		/* RPSTAT1 is in the GT power well */
+		__gen6_force_wake_get(dev_priv);
+
+		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+		seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+		seq_printf(m, "Render p-state ratio: %d\n",
+			   (gt_perf_status & 0xff00) >> 8);
+		seq_printf(m, "Render p-state VID: %d\n",
+			   gt_perf_status & 0xff);
+		seq_printf(m, "Render p-state limit: %d\n",
+			   rp_state_limits & 0xff);
+
+		max_freq = (rp_state_cap & 0xff0000) >> 16;
+		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+			   max_freq * 100);
+
+		max_freq = (rp_state_cap & 0xff00) >> 8;
+		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+			   max_freq * 100);
+
+		max_freq = rp_state_cap & 0xff;
+		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+			   max_freq * 100);
+
+		__gen6_force_wake_put(dev_priv);
+	} else {
+		seq_printf(m, "no P-state info available\n");
+	}
 
 	return 0;
 }
@@ -715,7 +940,7 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
-	u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+	u32 rstdbyctl = I915_READ(RSTDBYCTL);
 	u16 crstandvid = I915_READ16(CRSTANDVID);
 
 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -738,6 +963,30 @@
 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
 	seq_printf(m, "Render standby enabled: %s\n",
 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+	seq_printf(m, "Current RS state: ");
+	switch (rstdbyctl & RSX_STATUS_MASK) {
+	case RSX_STATUS_ON:
+		seq_printf(m, "on\n");
+		break;
+	case RSX_STATUS_RC1:
+		seq_printf(m, "RC1\n");
+		break;
+	case RSX_STATUS_RC1E:
+		seq_printf(m, "RC1E\n");
+		break;
+	case RSX_STATUS_RS1:
+		seq_printf(m, "RS1\n");
+		break;
+	case RSX_STATUS_RS2:
+		seq_printf(m, "RS2 (RC6)\n");
+		break;
+	case RSX_STATUS_RS3:
+		seq_printf(m, "RC3 (RC6+)\n");
+		break;
+	default:
+		seq_printf(m, "unknown\n");
+		break;
+	}
 
 	return 0;
 }
@@ -794,7 +1043,7 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool sr_enabled = false;
 
-	if (IS_GEN5(dev))
+	if (HAS_PCH_SPLIT(dev))
 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
 	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1135,7 @@
 		   fb->base.height,
 		   fb->base.depth,
 		   fb->base.bits_per_pixel);
-	describe_obj(m, to_intel_bo(fb->obj));
+	describe_obj(m, fb->obj);
 	seq_printf(m, "\n");
 
 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1147,7 @@
 			   fb->base.height,
 			   fb->base.depth,
 			   fb->base.bits_per_pixel);
-		describe_obj(m, to_intel_bo(fb->obj));
+		describe_obj(m, fb->obj);
 		seq_printf(m, "\n");
 	}
 
@@ -943,7 +1192,6 @@
 		  loff_t *ppos)
 {
 	struct drm_device *dev = filp->private_data;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[20];
 	int val = 1;
 
@@ -959,12 +1207,7 @@
 	}
 
 	DRM_INFO("Manually setting wedged to %d\n", val);
-
-	atomic_set(&dev_priv->mm.wedged, val);
-	if (val) {
-		wake_up_all(&dev_priv->irq_queue);
-		queue_work(dev_priv->wq, &dev_priv->error_work);
-	}
+	i915_handle_error(dev, val);
 
 	return cnt;
 }
@@ -1018,6 +1261,7 @@
 static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
+	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
@@ -1028,9 +1272,15 @@
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
-	{"i915_gem_hws", i915_hws_info, 0},
-	{"i915_ringbuffer_data", i915_ringbuffer_data, 0},
-	{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
+	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
+	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
+	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
 	{"i915_batchbuffers", i915_batchbuffer_info, 0},
 	{"i915_error_state", i915_error_state, 0},
 	{"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cb900dc..844f3c9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,8 @@
 static int i915_init_phys_hws(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
 	/* Program Hardware Status Page */
 	dev_priv->status_page_dmah =
 		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -58,11 +60,10 @@
 		DRM_ERROR("Can not allocate hardware status page\n");
 		return -ENOMEM;
 	}
-	dev_priv->render_ring.status_page.page_addr
-		= dev_priv->status_page_dmah->vaddr;
+	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
 	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 
-	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
 	if (INTEL_INFO(dev)->gen >= 4)
 		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -80,13 +81,15 @@
 static void i915_free_hws(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
 	if (dev_priv->status_page_dmah) {
 		drm_pci_free(dev, dev_priv->status_page_dmah);
 		dev_priv->status_page_dmah = NULL;
 	}
 
-	if (dev_priv->render_ring.status_page.gfx_addr) {
-		dev_priv->render_ring.status_page.gfx_addr = 0;
+	if (ring->status_page.gfx_addr) {
+		ring->status_page.gfx_addr = 0;
 		drm_core_ioremapfree(&dev_priv->hws_map, dev);
 	}
 
@@ -98,7 +101,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
-	struct intel_ring_buffer *ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	/*
 	 * We should never lose context on the ring with modesetting
@@ -107,8 +110,8 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
 	ring->space = ring->head - (ring->tail + 8);
 	if (ring->space < 0)
 		ring->space += ring->size;
@@ -124,6 +127,8 @@
 static int i915_dma_cleanup(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+
 	/* Make sure interrupts are disabled here because the uninstall ioctl
 	 * may not have been called from userspace and after dev_private
 	 * is freed, it's too late.
@@ -132,9 +137,8 @@
 		drm_irq_uninstall(dev);
 
 	mutex_lock(&dev->struct_mutex);
-	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
 	mutex_unlock(&dev->struct_mutex);
 
 	/* Clear the HWS virtual address at teardown */
@@ -148,6 +152,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	master_priv->sarea = drm_getsarea(dev);
 	if (master_priv->sarea) {
@@ -158,24 +163,24 @@
 	}
 
 	if (init->ring_size != 0) {
-		if (dev_priv->render_ring.gem_object != NULL) {
+		if (ring->obj != NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("Client tried to initialize ringbuffer in "
 				  "GEM mode\n");
 			return -EINVAL;
 		}
 
-		dev_priv->render_ring.size = init->ring_size;
+		ring->size = init->ring_size;
 
-		dev_priv->render_ring.map.offset = init->ring_start;
-		dev_priv->render_ring.map.size = init->ring_size;
-		dev_priv->render_ring.map.type = 0;
-		dev_priv->render_ring.map.flags = 0;
-		dev_priv->render_ring.map.mtrr = 0;
+		ring->map.offset = init->ring_start;
+		ring->map.size = init->ring_size;
+		ring->map.type = 0;
+		ring->map.flags = 0;
+		ring->map.mtrr = 0;
 
-		drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
+		drm_core_ioremap_wc(&ring->map, dev);
 
-		if (dev_priv->render_ring.map.handle == NULL) {
+		if (ring->map.handle == NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("can not ioremap virtual address for"
 				  " ring buffer\n");
@@ -183,7 +188,7 @@
 		}
 	}
 
-	dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
+	ring->virtual_start = ring->map.handle;
 
 	dev_priv->cpp = init->cpp;
 	dev_priv->back_offset = init->back_offset;
@@ -202,12 +207,10 @@
 static int i915_dma_resume(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
-	struct intel_ring_buffer *ring;
 	DRM_DEBUG_DRIVER("%s\n", __func__);
 
-	ring = &dev_priv->render_ring;
-
 	if (ring->map.handle == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
@@ -222,7 +225,7 @@
 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
 				ring->status_page.page_addr);
 	if (ring->status_page.gfx_addr != 0)
-		intel_ring_setup_status_page(dev, ring);
+		intel_ring_setup_status_page(ring);
 	else
 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 
@@ -264,7 +267,7 @@
  * instruction detected will be given a size of zero, which is a
  * signal to abort the rest of the buffer.
  */
-static int do_validate_cmd(int cmd)
+static int validate_cmd(int cmd)
 {
 	switch (((cmd >> 29) & 0x7)) {
 	case 0x0:
@@ -322,40 +325,27 @@
 	return 0;
 }
 
-static int validate_cmd(int cmd)
-{
-	int ret = do_validate_cmd(cmd);
-
-/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
-	return ret;
-}
-
 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int i;
+	int i, ret;
 
-	if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
+	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
 		return -EINVAL;
 
-	BEGIN_LP_RING((dwords+1)&~1);
-
 	for (i = 0; i < dwords;) {
-		int cmd, sz;
-
-		cmd = buffer[i];
-
-		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+		int sz = validate_cmd(buffer[i]);
+		if (sz == 0 || i + sz > dwords)
 			return -EINVAL;
-
-		OUT_RING(cmd);
-
-		while (++i, --sz) {
-			OUT_RING(buffer[i]);
-		}
+		i += sz;
 	}
 
+	ret = BEGIN_LP_RING((dwords+1)&~1);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dwords; i++)
+		OUT_RING(buffer[i]);
 	if (dwords & 1)
 		OUT_RING(0);
 
@@ -366,34 +356,41 @@
 
 int
 i915_emit_box(struct drm_device *dev,
-	      struct drm_clip_rect *boxes,
-	      int i, int DR1, int DR4)
+	      struct drm_clip_rect *box,
+	      int DR1, int DR4)
 {
-	struct drm_clip_rect box = boxes[i];
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
-	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+	    box->y2 <= 0 || box->x2 <= 0) {
 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
-			  box.x1, box.y1, box.x2, box.y2);
+			  box->x1, box->y1, box->x2, box->y2);
 		return -EINVAL;
 	}
 
 	if (INTEL_INFO(dev)->gen >= 4) {
-		BEGIN_LP_RING(4);
+		ret = BEGIN_LP_RING(4);
+		if (ret)
+			return ret;
+
 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
-		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
-		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
 		OUT_RING(DR4);
-		ADVANCE_LP_RING();
 	} else {
-		BEGIN_LP_RING(6);
+		ret = BEGIN_LP_RING(6);
+		if (ret)
+			return ret;
+
 		OUT_RING(GFX_OP_DRAWRECT_INFO);
 		OUT_RING(DR1);
-		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
-		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
 		OUT_RING(DR4);
 		OUT_RING(0);
-		ADVANCE_LP_RING();
 	}
+	ADVANCE_LP_RING();
 
 	return 0;
 }
@@ -413,12 +410,13 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
-	BEGIN_LP_RING(4);
-	OUT_RING(MI_STORE_DWORD_INDEX);
-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	OUT_RING(dev_priv->counter);
-	OUT_RING(0);
-	ADVANCE_LP_RING();
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
 }
 
 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -440,7 +438,7 @@
 
 	for (i = 0; i < count; i++) {
 		if (i < nbox) {
-			ret = i915_emit_box(dev, cliprects, i,
+			ret = i915_emit_box(dev, &cliprects[i],
 					    cmd->DR1, cmd->DR4);
 			if (ret)
 				return ret;
@@ -459,8 +457,9 @@
 				     drm_i915_batchbuffer_t * batch,
 				     struct drm_clip_rect *cliprects)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int nbox = batch->num_cliprects;
-	int i = 0, count;
+	int i, count, ret;
 
 	if ((batch->start | batch->used) & 0x7) {
 		DRM_ERROR("alignment");
@@ -470,17 +469,19 @@
 	i915_kernel_lost_context(dev);
 
 	count = nbox ? nbox : 1;
-
 	for (i = 0; i < count; i++) {
 		if (i < nbox) {
-			int ret = i915_emit_box(dev, cliprects, i,
-						batch->DR1, batch->DR4);
+			ret = i915_emit_box(dev, &cliprects[i],
+					    batch->DR1, batch->DR4);
 			if (ret)
 				return ret;
 		}
 
 		if (!IS_I830(dev) && !IS_845G(dev)) {
-			BEGIN_LP_RING(2);
+			ret = BEGIN_LP_RING(2);
+			if (ret)
+				return ret;
+
 			if (INTEL_INFO(dev)->gen >= 4) {
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
 				OUT_RING(batch->start);
@@ -488,26 +489,29 @@
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
 				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
 			}
-			ADVANCE_LP_RING();
 		} else {
-			BEGIN_LP_RING(4);
+			ret = BEGIN_LP_RING(4);
+			if (ret)
+				return ret;
+
 			OUT_RING(MI_BATCH_BUFFER);
 			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
 			OUT_RING(batch->start + batch->used - 4);
 			OUT_RING(0);
-			ADVANCE_LP_RING();
 		}
+		ADVANCE_LP_RING();
 	}
 
 
 	if (IS_G4X(dev) || IS_GEN5(dev)) {
-		BEGIN_LP_RING(2);
-		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
-		OUT_RING(MI_NOOP);
-		ADVANCE_LP_RING();
+		if (BEGIN_LP_RING(2) == 0) {
+			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+			OUT_RING(MI_NOOP);
+			ADVANCE_LP_RING();
+		}
 	}
-	i915_emit_breadcrumb(dev);
 
+	i915_emit_breadcrumb(dev);
 	return 0;
 }
 
@@ -516,6 +520,7 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv =
 		dev->primary->master->driver_priv;
+	int ret;
 
 	if (!master_priv->sarea_priv)
 		return -EINVAL;
@@ -527,12 +532,13 @@
 
 	i915_kernel_lost_context(dev);
 
-	BEGIN_LP_RING(2);
+	ret = BEGIN_LP_RING(10);
+	if (ret)
+		return ret;
+
 	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
 	OUT_RING(0);
-	ADVANCE_LP_RING();
 
-	BEGIN_LP_RING(6);
 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
 	OUT_RING(0);
 	if (dev_priv->current_page == 0) {
@@ -543,33 +549,32 @@
 		dev_priv->current_page = 0;
 	}
 	OUT_RING(0);
-	ADVANCE_LP_RING();
 
-	BEGIN_LP_RING(2);
 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
 	OUT_RING(0);
+
 	ADVANCE_LP_RING();
 
 	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
 
-	BEGIN_LP_RING(4);
-	OUT_RING(MI_STORE_DWORD_INDEX);
-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	OUT_RING(dev_priv->counter);
-	OUT_RING(0);
-	ADVANCE_LP_RING();
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
 
 	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 	return 0;
 }
 
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
 
 	i915_kernel_lost_context(dev);
-	return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
-				      dev_priv->render_ring.size - 8);
+	return intel_wait_ring_buffer(ring, ring->size - 8);
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -768,9 +773,15 @@
 	case I915_PARAM_HAS_BLT:
 		value = HAS_BLT(dev);
 		break;
+	case I915_PARAM_HAS_RELAXED_FENCING:
+		value = 1;
+		break;
 	case I915_PARAM_HAS_COHERENT_RINGS:
 		value = 1;
 		break;
+	case I915_PARAM_HAS_EXEC_CONSTANTS:
+		value = INTEL_INFO(dev)->gen >= 4;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -826,7 +837,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_hws_addr_t *hws = data;
-	struct intel_ring_buffer *ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	if (!I915_NEED_GFX_HWS(dev))
 		return -EINVAL;
@@ -1005,73 +1016,47 @@
 #define PTE_VALID			(1 << 0)
 
 /**
- * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ *                       a physical one
  * @dev: drm device
- * @gtt_addr: address to translate
+ * @offset: address to translate
  *
- * Some chip functions require allocations from stolen space but need the
- * physical address of the memory in question.  We use this routine
- * to get a physical address suitable for register programming from a given
- * GTT address.
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
  */
-static unsigned long i915_gtt_to_phys(struct drm_device *dev,
-				      unsigned long gtt_addr)
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
 {
-	unsigned long *gtt;
-	unsigned long entry, phys;
-	int gtt_bar = IS_GEN2(dev) ? 1 : 0;
-	int gtt_offset, gtt_size;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev_priv->bridge_dev;
+	u32 base;
 
-	if (INTEL_INFO(dev)->gen >= 4) {
-		if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
-			gtt_offset = 2*1024*1024;
-			gtt_size = 2*1024*1024;
-		} else {
-			gtt_offset = 512*1024;
-			gtt_size = 512*1024;
-		}
+#if 0
+	/* On the machines I have tested the Graphics Base of Stolen Memory
+	 * is unreliable, so compute the base by subtracting the stolen memory
+	 * from the Top of Low Usable DRAM which is where the BIOS places
+	 * the graphics stolen memory.
+	 */
+	if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+		/* top 32bits are reserved = 0 */
+		pci_read_config_dword(pdev, 0xA4, &base);
 	} else {
-		gtt_bar = 3;
-		gtt_offset = 0;
-		gtt_size = pci_resource_len(dev->pdev, gtt_bar);
+		/* XXX presume 8xx is the same as i915 */
+		pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
 	}
-
-	gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
-			 gtt_size);
-	if (!gtt) {
-		DRM_ERROR("ioremap of GTT failed\n");
-		return 0;
+#else
+	if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+		u16 val;
+		pci_read_config_word(pdev, 0xb0, &val);
+		base = val >> 4 << 20;
+	} else {
+		u8 val;
+		pci_read_config_byte(pdev, 0x9c, &val);
+		base = val >> 3 << 27;
 	}
+	base -= dev_priv->mm.gtt->stolen_size;
+#endif
 
-	entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
-
-	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
-
-	/* Mask out these reserved bits on this hardware. */
-	if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
-		entry &= ~PTE_ADDRESS_MASK_HIGH;
-
-	/* If it's not a mapping type we know, then bail. */
-	if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
-	    (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED)	{
-		iounmap(gtt);
-		return 0;
-	}
-
-	if (!(entry & PTE_VALID)) {
-		DRM_ERROR("bad GTT entry in stolen space\n");
-		iounmap(gtt);
-		return 0;
-	}
-
-	iounmap(gtt);
-
-	phys =(entry & PTE_ADDRESS_MASK) |
-		((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
-
-	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
-
-	return phys;
+	return base + offset;
 }
 
 static void i915_warn_stolen(struct drm_device *dev)
@@ -1087,54 +1072,35 @@
 	unsigned long cfb_base;
 	unsigned long ll_base = 0;
 
-	/* Leave 1M for line length buffer & misc. */
-	compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
-	if (!compressed_fb) {
-		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
-		i915_warn_stolen(dev);
-		return;
-	}
+	compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+	if (compressed_fb)
+		compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+	if (!compressed_fb)
+		goto err;
 
-	compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
-	if (!compressed_fb) {
-		i915_warn_stolen(dev);
-		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
-		return;
-	}
+	cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+	if (!cfb_base)
+		goto err_fb;
 
-	cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
-	if (!cfb_base) {
-		DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
-		drm_mm_put_block(compressed_fb);
-	}
+	if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+		compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+						    4096, 4096, 0);
+		if (compressed_llb)
+			compressed_llb = drm_mm_get_block(compressed_llb,
+							  4096, 4096);
+		if (!compressed_llb)
+			goto err_fb;
 
-	if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
-		compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
-						    4096, 0);
-		if (!compressed_llb) {
-			i915_warn_stolen(dev);
-			return;
-		}
-
-		compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
-		if (!compressed_llb) {
-			i915_warn_stolen(dev);
-			return;
-		}
-
-		ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
-		if (!ll_base) {
-			DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
-			drm_mm_put_block(compressed_fb);
-			drm_mm_put_block(compressed_llb);
-		}
+		ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+		if (!ll_base)
+			goto err_llb;
 	}
 
 	dev_priv->cfb_size = size;
 
 	intel_disable_fbc(dev);
 	dev_priv->compressed_fb = compressed_fb;
-	if (IS_IRONLAKE_M(dev))
+	if (HAS_PCH_SPLIT(dev))
 		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
 	else if (IS_GM45(dev)) {
 		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1144,8 +1110,17 @@
 		dev_priv->compressed_llb = compressed_llb;
 	}
 
-	DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
-		  ll_base, size >> 20);
+	DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+		      cfb_base, ll_base, size >> 20);
+	return;
+
+err_llb:
+	drm_mm_put_block(compressed_llb);
+err_fb:
+	drm_mm_put_block(compressed_fb);
+err:
+	dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+	i915_warn_stolen(dev);
 }
 
 static void i915_cleanup_compression(struct drm_device *dev)
@@ -1176,12 +1151,16 @@
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_INFO "i915: switched on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		/* i915 resume handler doesn't set to D0 */
 		pci_set_power_state(dev->pdev, PCI_D0);
 		i915_resume(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	} else {
 		printk(KERN_ERR "i915: switched off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		i915_suspend(dev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
 
@@ -1196,17 +1175,20 @@
 	return can_switch;
 }
 
-static int i915_load_modeset_init(struct drm_device *dev,
-				  unsigned long prealloc_size,
-				  unsigned long agp_size)
+static int i915_load_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long prealloc_size, gtt_size, mappable_size;
 	int ret = 0;
 
-	/* Basic memrange allocator for stolen space (aka mm.vram) */
-	drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+	prealloc_size = dev_priv->mm.gtt->stolen_size;
+	gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+	mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
-	/* Let GEM Manage from end of prealloc space to end of aperture.
+	/* Basic memrange allocator for stolen space */
+	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+	/* Let GEM Manage all of the aperture.
 	 *
 	 * However, leave one page at the end still bound to the scratch page.
 	 * There are a number of places where the hardware apparently
@@ -1215,7 +1197,7 @@
 	 * at the last page of the aperture.  One page should be enough to
 	 * keep any prefetching inside of the aperture.
 	 */
-	i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+	i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
 
 	mutex_lock(&dev->struct_mutex);
 	ret = i915_gem_init_ringbuffer(dev);
@@ -1227,16 +1209,17 @@
 	if (I915_HAS_FBC(dev) && i915_powersave) {
 		int cfb_size;
 
-		/* Try to get an 8M buffer... */
-		if (prealloc_size > (9*1024*1024))
-			cfb_size = 8*1024*1024;
+		/* Leave 1M for line length buffer & misc. */
+
+		/* Try to get a 32M buffer... */
+		if (prealloc_size > (36*1024*1024))
+			cfb_size = 32*1024*1024;
 		else /* fall back to 7/8 of the stolen space */
 			cfb_size = prealloc_size * 7 / 8;
 		i915_setup_compression(dev, cfb_size);
 	}
 
-	/* Allow hardware batchbuffers unless told otherwise.
-	 */
+	/* Allow hardware batchbuffers unless told otherwise. */
 	dev_priv->allow_batchbuffer = 1;
 
 	ret = intel_parse_bios(dev);
@@ -1252,6 +1235,7 @@
 
 	ret = vga_switcheroo_register_client(dev->pdev,
 					     i915_switcheroo_set_state,
+					     NULL,
 					     i915_switcheroo_can_switch);
 	if (ret)
 		goto cleanup_vga_client;
@@ -1426,152 +1410,12 @@
 	}
 }
 
-struct v_table {
-	u8 vid;
-	unsigned long vd; /* in .1 mil */
-	unsigned long vm; /* in .1 mil */
-	u8 pvid;
-};
-
-static struct v_table v_table[] = {
-	{ 0, 16125, 15000, 0x7f, },
-	{ 1, 16000, 14875, 0x7e, },
-	{ 2, 15875, 14750, 0x7d, },
-	{ 3, 15750, 14625, 0x7c, },
-	{ 4, 15625, 14500, 0x7b, },
-	{ 5, 15500, 14375, 0x7a, },
-	{ 6, 15375, 14250, 0x79, },
-	{ 7, 15250, 14125, 0x78, },
-	{ 8, 15125, 14000, 0x77, },
-	{ 9, 15000, 13875, 0x76, },
-	{ 10, 14875, 13750, 0x75, },
-	{ 11, 14750, 13625, 0x74, },
-	{ 12, 14625, 13500, 0x73, },
-	{ 13, 14500, 13375, 0x72, },
-	{ 14, 14375, 13250, 0x71, },
-	{ 15, 14250, 13125, 0x70, },
-	{ 16, 14125, 13000, 0x6f, },
-	{ 17, 14000, 12875, 0x6e, },
-	{ 18, 13875, 12750, 0x6d, },
-	{ 19, 13750, 12625, 0x6c, },
-	{ 20, 13625, 12500, 0x6b, },
-	{ 21, 13500, 12375, 0x6a, },
-	{ 22, 13375, 12250, 0x69, },
-	{ 23, 13250, 12125, 0x68, },
-	{ 24, 13125, 12000, 0x67, },
-	{ 25, 13000, 11875, 0x66, },
-	{ 26, 12875, 11750, 0x65, },
-	{ 27, 12750, 11625, 0x64, },
-	{ 28, 12625, 11500, 0x63, },
-	{ 29, 12500, 11375, 0x62, },
-	{ 30, 12375, 11250, 0x61, },
-	{ 31, 12250, 11125, 0x60, },
-	{ 32, 12125, 11000, 0x5f, },
-	{ 33, 12000, 10875, 0x5e, },
-	{ 34, 11875, 10750, 0x5d, },
-	{ 35, 11750, 10625, 0x5c, },
-	{ 36, 11625, 10500, 0x5b, },
-	{ 37, 11500, 10375, 0x5a, },
-	{ 38, 11375, 10250, 0x59, },
-	{ 39, 11250, 10125, 0x58, },
-	{ 40, 11125, 10000, 0x57, },
-	{ 41, 11000, 9875, 0x56, },
-	{ 42, 10875, 9750, 0x55, },
-	{ 43, 10750, 9625, 0x54, },
-	{ 44, 10625, 9500, 0x53, },
-	{ 45, 10500, 9375, 0x52, },
-	{ 46, 10375, 9250, 0x51, },
-	{ 47, 10250, 9125, 0x50, },
-	{ 48, 10125, 9000, 0x4f, },
-	{ 49, 10000, 8875, 0x4e, },
-	{ 50, 9875, 8750, 0x4d, },
-	{ 51, 9750, 8625, 0x4c, },
-	{ 52, 9625, 8500, 0x4b, },
-	{ 53, 9500, 8375, 0x4a, },
-	{ 54, 9375, 8250, 0x49, },
-	{ 55, 9250, 8125, 0x48, },
-	{ 56, 9125, 8000, 0x47, },
-	{ 57, 9000, 7875, 0x46, },
-	{ 58, 8875, 7750, 0x45, },
-	{ 59, 8750, 7625, 0x44, },
-	{ 60, 8625, 7500, 0x43, },
-	{ 61, 8500, 7375, 0x42, },
-	{ 62, 8375, 7250, 0x41, },
-	{ 63, 8250, 7125, 0x40, },
-	{ 64, 8125, 7000, 0x3f, },
-	{ 65, 8000, 6875, 0x3e, },
-	{ 66, 7875, 6750, 0x3d, },
-	{ 67, 7750, 6625, 0x3c, },
-	{ 68, 7625, 6500, 0x3b, },
-	{ 69, 7500, 6375, 0x3a, },
-	{ 70, 7375, 6250, 0x39, },
-	{ 71, 7250, 6125, 0x38, },
-	{ 72, 7125, 6000, 0x37, },
-	{ 73, 7000, 5875, 0x36, },
-	{ 74, 6875, 5750, 0x35, },
-	{ 75, 6750, 5625, 0x34, },
-	{ 76, 6625, 5500, 0x33, },
-	{ 77, 6500, 5375, 0x32, },
-	{ 78, 6375, 5250, 0x31, },
-	{ 79, 6250, 5125, 0x30, },
-	{ 80, 6125, 5000, 0x2f, },
-	{ 81, 6000, 4875, 0x2e, },
-	{ 82, 5875, 4750, 0x2d, },
-	{ 83, 5750, 4625, 0x2c, },
-	{ 84, 5625, 4500, 0x2b, },
-	{ 85, 5500, 4375, 0x2a, },
-	{ 86, 5375, 4250, 0x29, },
-	{ 87, 5250, 4125, 0x28, },
-	{ 88, 5125, 4000, 0x27, },
-	{ 89, 5000, 3875, 0x26, },
-	{ 90, 4875, 3750, 0x25, },
-	{ 91, 4750, 3625, 0x24, },
-	{ 92, 4625, 3500, 0x23, },
-	{ 93, 4500, 3375, 0x22, },
-	{ 94, 4375, 3250, 0x21, },
-	{ 95, 4250, 3125, 0x20, },
-	{ 96, 4125, 3000, 0x1f, },
-	{ 97, 4125, 3000, 0x1e, },
-	{ 98, 4125, 3000, 0x1d, },
-	{ 99, 4125, 3000, 0x1c, },
-	{ 100, 4125, 3000, 0x1b, },
-	{ 101, 4125, 3000, 0x1a, },
-	{ 102, 4125, 3000, 0x19, },
-	{ 103, 4125, 3000, 0x18, },
-	{ 104, 4125, 3000, 0x17, },
-	{ 105, 4125, 3000, 0x16, },
-	{ 106, 4125, 3000, 0x15, },
-	{ 107, 4125, 3000, 0x14, },
-	{ 108, 4125, 3000, 0x13, },
-	{ 109, 4125, 3000, 0x12, },
-	{ 110, 4125, 3000, 0x11, },
-	{ 111, 4125, 3000, 0x10, },
-	{ 112, 4125, 3000, 0x0f, },
-	{ 113, 4125, 3000, 0x0e, },
-	{ 114, 4125, 3000, 0x0d, },
-	{ 115, 4125, 3000, 0x0c, },
-	{ 116, 4125, 3000, 0x0b, },
-	{ 117, 4125, 3000, 0x0a, },
-	{ 118, 4125, 3000, 0x09, },
-	{ 119, 4125, 3000, 0x08, },
-	{ 120, 1125, 0, 0x07, },
-	{ 121, 1000, 0, 0x06, },
-	{ 122, 875, 0, 0x05, },
-	{ 123, 750, 0, 0x04, },
-	{ 124, 625, 0, 0x03, },
-	{ 125, 500, 0, 0x02, },
-	{ 126, 375, 0, 0x01, },
-	{ 127, 0, 0, 0x00, },
-};
-
-struct cparams {
-	int i;
-	int t;
-	int m;
-	int c;
-};
-
-static struct cparams cparams[] = {
+static const struct cparams {
+	u16 i;
+	u16 t;
+	u16 m;
+	u16 c;
+} cparams[] = {
 	{ 1, 1333, 301, 28664 },
 	{ 1, 1066, 294, 24460 },
 	{ 1, 800, 294, 25192 },
@@ -1637,21 +1481,145 @@
 	return ((m * x) / 127) - b;
 }
 
-static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 {
-	unsigned long val = 0;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(v_table); i++) {
-		if (v_table[i].pvid == pxvid) {
-			if (IS_MOBILE(dev_priv->dev))
-				val = v_table[i].vm;
-			else
-				val = v_table[i].vd;
-		}
-	}
-
-	return val;
+	static const struct v_table {
+		u16 vd; /* in .1 mil */
+		u16 vm; /* in .1 mil */
+	} v_table[] = {
+		{ 0, 0, },
+		{ 375, 0, },
+		{ 500, 0, },
+		{ 625, 0, },
+		{ 750, 0, },
+		{ 875, 0, },
+		{ 1000, 0, },
+		{ 1125, 0, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4250, 3125, },
+		{ 4375, 3250, },
+		{ 4500, 3375, },
+		{ 4625, 3500, },
+		{ 4750, 3625, },
+		{ 4875, 3750, },
+		{ 5000, 3875, },
+		{ 5125, 4000, },
+		{ 5250, 4125, },
+		{ 5375, 4250, },
+		{ 5500, 4375, },
+		{ 5625, 4500, },
+		{ 5750, 4625, },
+		{ 5875, 4750, },
+		{ 6000, 4875, },
+		{ 6125, 5000, },
+		{ 6250, 5125, },
+		{ 6375, 5250, },
+		{ 6500, 5375, },
+		{ 6625, 5500, },
+		{ 6750, 5625, },
+		{ 6875, 5750, },
+		{ 7000, 5875, },
+		{ 7125, 6000, },
+		{ 7250, 6125, },
+		{ 7375, 6250, },
+		{ 7500, 6375, },
+		{ 7625, 6500, },
+		{ 7750, 6625, },
+		{ 7875, 6750, },
+		{ 8000, 6875, },
+		{ 8125, 7000, },
+		{ 8250, 7125, },
+		{ 8375, 7250, },
+		{ 8500, 7375, },
+		{ 8625, 7500, },
+		{ 8750, 7625, },
+		{ 8875, 7750, },
+		{ 9000, 7875, },
+		{ 9125, 8000, },
+		{ 9250, 8125, },
+		{ 9375, 8250, },
+		{ 9500, 8375, },
+		{ 9625, 8500, },
+		{ 9750, 8625, },
+		{ 9875, 8750, },
+		{ 10000, 8875, },
+		{ 10125, 9000, },
+		{ 10250, 9125, },
+		{ 10375, 9250, },
+		{ 10500, 9375, },
+		{ 10625, 9500, },
+		{ 10750, 9625, },
+		{ 10875, 9750, },
+		{ 11000, 9875, },
+		{ 11125, 10000, },
+		{ 11250, 10125, },
+		{ 11375, 10250, },
+		{ 11500, 10375, },
+		{ 11625, 10500, },
+		{ 11750, 10625, },
+		{ 11875, 10750, },
+		{ 12000, 10875, },
+		{ 12125, 11000, },
+		{ 12250, 11125, },
+		{ 12375, 11250, },
+		{ 12500, 11375, },
+		{ 12625, 11500, },
+		{ 12750, 11625, },
+		{ 12875, 11750, },
+		{ 13000, 11875, },
+		{ 13125, 12000, },
+		{ 13250, 12125, },
+		{ 13375, 12250, },
+		{ 13500, 12375, },
+		{ 13625, 12500, },
+		{ 13750, 12625, },
+		{ 13875, 12750, },
+		{ 14000, 12875, },
+		{ 14125, 13000, },
+		{ 14250, 13125, },
+		{ 14375, 13250, },
+		{ 14500, 13375, },
+		{ 14625, 13500, },
+		{ 14750, 13625, },
+		{ 14875, 13750, },
+		{ 15000, 13875, },
+		{ 15125, 14000, },
+		{ 15250, 14125, },
+		{ 15375, 14250, },
+		{ 15500, 14375, },
+		{ 15625, 14500, },
+		{ 15750, 14625, },
+		{ 15875, 14750, },
+		{ 16000, 14875, },
+		{ 16125, 15000, },
+	};
+	if (dev_priv->info->is_mobile)
+		return v_table[pxvid].vm;
+	else
+		return v_table[pxvid].vd;
 }
 
 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1905,9 +1873,9 @@
 int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
 	struct drm_i915_private *dev_priv;
-	resource_size_t base, size;
 	int ret = 0, mmio_bar;
-	uint32_t agp_size, prealloc_size;
+	uint32_t agp_size;
+
 	/* i915 has 4 more counters */
 	dev->counters += 4;
 	dev->types[6] = _DRM_STAT_IRQ;
@@ -1923,11 +1891,6 @@
 	dev_priv->dev = dev;
 	dev_priv->info = (struct intel_device_info *) flags;
 
-	/* Add register map (needed for suspend/resume) */
-	mmio_bar = IS_GEN2(dev) ? 1 : 0;
-	base = pci_resource_start(dev->pdev, mmio_bar);
-	size = pci_resource_len(dev->pdev, mmio_bar);
-
 	if (i915_get_bridge_dev(dev)) {
 		ret = -EIO;
 		goto free_priv;
@@ -1937,16 +1900,25 @@
 	if (IS_GEN2(dev))
 		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
-	dev_priv->regs = ioremap(base, size);
+	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
 	if (!dev_priv->regs) {
 		DRM_ERROR("failed to map registers\n");
 		ret = -EIO;
 		goto put_bridge;
 	}
 
+	dev_priv->mm.gtt = intel_gtt_get();
+	if (!dev_priv->mm.gtt) {
+		DRM_ERROR("Failed to initialize GTT\n");
+		ret = -ENODEV;
+		goto out_iomapfree;
+	}
+
+	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
         dev_priv->mm.gtt_mapping =
-		io_mapping_create_wc(dev->agp->base,
-				     dev->agp->agp_info.aper_size * 1024*1024);
+		io_mapping_create_wc(dev->agp->base, agp_size);
 	if (dev_priv->mm.gtt_mapping == NULL) {
 		ret = -EIO;
 		goto out_rmmap;
@@ -1958,24 +1930,13 @@
 	 * MTRR if present.  Even if a UC MTRR isn't present.
 	 */
 	dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
-					 dev->agp->agp_info.aper_size *
-					 1024 * 1024,
+					 agp_size,
 					 MTRR_TYPE_WRCOMB, 1);
 	if (dev_priv->mm.gtt_mtrr < 0) {
 		DRM_INFO("MTRR allocation failed.  Graphics "
 			 "performance may suffer.\n");
 	}
 
-	dev_priv->mm.gtt = intel_gtt_get();
-	if (!dev_priv->mm.gtt) {
-		DRM_ERROR("Failed to initialize GTT\n");
-		ret = -ENODEV;
-		goto out_iomapfree;
-	}
-
-	prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
-	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
 	/* The i915 workqueue is primarily used for batched retirement of
 	 * requests (and thus managing bo) once the task has been completed
 	 * by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1983,7 +1944,7 @@
 	 * bo.
 	 *
 	 * It is also used for periodic low-priority events, such as
-	 * idle-timers and hangcheck.
+	 * idle-timers and recording error state.
 	 *
 	 * All tasks on the workqueue are expected to acquire the dev mutex
 	 * so there is no point in running more than one instance of the
@@ -2001,22 +1962,6 @@
 	/* enable GEM by default */
 	dev_priv->has_gem = 1;
 
-	if (prealloc_size > agp_size * 3 / 4) {
-		DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
-			  "memory stolen.\n",
-			  prealloc_size / 1024, agp_size / 1024);
-		DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
-			  "updating the BIOS to fix).\n");
-		dev_priv->has_gem = 0;
-	}
-
-	if (dev_priv->has_gem == 0 &&
-	    drm_core_check_feature(dev, DRIVER_MODESET)) {
-		DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
-		ret = -ENODEV;
-		goto out_iomapfree;
-	}
-
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
@@ -2037,8 +1982,8 @@
 	/* Init HWS */
 	if (!I915_NEED_GFX_HWS(dev)) {
 		ret = i915_init_phys_hws(dev);
-		if (ret != 0)
-			goto out_workqueue_free;
+		if (ret)
+			goto out_gem_unload;
 	}
 
 	if (IS_PINEVIEW(dev))
@@ -2060,16 +2005,13 @@
 	if (!IS_I945G(dev) && !IS_I945GM(dev))
 		pci_enable_msi(dev->pdev);
 
-	spin_lock_init(&dev_priv->user_irq_lock);
+	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
 	dev_priv->trace_irq_seqno = 0;
 
 	ret = drm_vblank_init(dev, I915_NUM_PIPE);
-
-	if (ret) {
-		(void) i915_driver_unload(dev);
-		return ret;
-	}
+	if (ret)
+		goto out_gem_unload;
 
 	/* Start out suspended */
 	dev_priv->mm.suspended = 1;
@@ -2077,10 +2019,10 @@
 	intel_detect_pch(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+		ret = i915_load_modeset_init(dev);
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
-			goto out_workqueue_free;
+			goto out_gem_unload;
 		}
 	}
 
@@ -2100,12 +2042,17 @@
 
 	return 0;
 
-out_workqueue_free:
+out_gem_unload:
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	intel_teardown_gmbus(dev);
+	intel_teardown_mchbar(dev);
 	destroy_workqueue(dev_priv->wq);
 out_iomapfree:
 	io_mapping_free(dev_priv->mm.gtt_mapping);
 out_rmmap:
-	iounmap(dev_priv->regs);
+	pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 free_priv:
@@ -2122,6 +2069,9 @@
 	i915_mch_dev = NULL;
 	spin_unlock(&mchdev_lock);
 
+	if (dev_priv->mm.inactive_shrinker.shrink)
+		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
 	mutex_lock(&dev->struct_mutex);
 	ret = i915_gpu_idle(dev);
 	if (ret)
@@ -2179,7 +2129,7 @@
 		mutex_unlock(&dev->struct_mutex);
 		if (I915_HAS_FBC(dev) && i915_powersave)
 			i915_cleanup_compression(dev);
-		drm_mm_takedown(&dev_priv->mm.vram);
+		drm_mm_takedown(&dev_priv->mm.stolen);
 
 		intel_cleanup_overlay(dev);
 
@@ -2188,7 +2138,7 @@
 	}
 
 	if (dev_priv->regs != NULL)
-		iounmap(dev_priv->regs);
+		pci_iounmap(dev->pdev, dev_priv->regs);
 
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960..72fea2b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,12 @@
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
 
+unsigned int i915_panel_use_ssc = 1;
+module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
+
+bool i915_try_reset = true;
+module_param_named(reset, i915_try_reset, bool, 0600);
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
@@ -111,7 +117,7 @@
 
 static const struct intel_device_info intel_i965gm_info = {
 	.gen = 4, .is_crestline = 1,
-	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
 	.has_overlay = 1,
 	.supports_tv = 1,
 };
@@ -130,7 +136,7 @@
 
 static const struct intel_device_info intel_gm45_info = {
 	.gen = 4, .is_g4x = 1,
-	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.supports_tv = 1,
 	.has_bsd_ring = 1,
@@ -150,7 +156,7 @@
 
 static const struct intel_device_info intel_ironlake_m_info = {
 	.gen = 5, .is_mobile = 1,
-	.need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 0, /* disabled due to buggy hardware */
 	.has_bsd_ring = 1,
 };
@@ -165,6 +171,7 @@
 static const struct intel_device_info intel_sandybridge_m_info = {
 	.gen = 6, .is_mobile = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
 	.has_bsd_ring = 1,
 	.has_blt_ring = 1,
 };
@@ -244,10 +251,34 @@
 	}
 }
 
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	int count;
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+		udelay(10);
+
+	I915_WRITE_NOTRACE(FORCEWAKE, 1);
+	POSTING_READ(FORCEWAKE);
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+		udelay(10);
+}
+
+void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE, 0);
+	POSTING_READ(FORCEWAKE);
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	drm_kms_helper_poll_disable(dev);
+
 	pci_save_state(dev->pdev);
 
 	/* If KMS is active, we do the leavevt stuff here */
@@ -284,7 +315,9 @@
 	if (state.event == PM_EVENT_PRETHAW)
 		return 0;
 
-	drm_kms_helper_poll_disable(dev);
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
 
 	error = i915_drm_freeze(dev);
 	if (error)
@@ -304,6 +337,12 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
 	i915_restore_state(dev);
 	intel_opregion_setup(dev);
 
@@ -319,6 +358,9 @@
 
 		/* Resume the modeset for every activated CRTC */
 		drm_helper_resume_force_mode(dev);
+
+		if (dev_priv->renderctx && dev_priv->pwrctx)
+			ironlake_enable_rc6(dev);
 	}
 
 	intel_opregion_init(dev);
@@ -332,6 +374,9 @@
 {
 	int ret;
 
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	if (pci_enable_device(dev->pdev))
 		return -EIO;
 
@@ -405,6 +450,14 @@
 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
 }
 
+static int gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+	return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+}
+
 /**
  * i965_reset - reset chip after a hang
  * @dev: drm device to reset
@@ -431,7 +484,11 @@
 	bool need_display = true;
 	int ret;
 
-	mutex_lock(&dev->struct_mutex);
+	if (!i915_try_reset)
+		return 0;
+
+	if (!mutex_trylock(&dev->struct_mutex))
+		return -EBUSY;
 
 	i915_gem_reset(dev);
 
@@ -439,6 +496,9 @@
 	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
 	} else switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		ret = gen6_do_reset(dev, flags);
+		break;
 	case 5:
 		ret = ironlake_do_reset(dev, flags);
 		break;
@@ -472,9 +532,14 @@
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->mm.suspended) {
-		struct intel_ring_buffer *ring = &dev_priv->render_ring;
 		dev_priv->mm.suspended = 0;
-		ring->init(dev, ring);
+
+		dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
+		if (HAS_BSD(dev))
+		    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
+		if (HAS_BLT(dev))
+		    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+
 		mutex_unlock(&dev->struct_mutex);
 		drm_irq_uninstall(dev);
 		drm_irq_install(dev);
@@ -523,6 +588,9 @@
 		return -ENODEV;
 	}
 
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	error = i915_drm_freeze(drm_dev);
 	if (error)
 		return error;
@@ -606,6 +674,8 @@
 	.device_is_agp = i915_driver_device_is_agp,
 	.enable_vblank = i915_enable_vblank,
 	.disable_vblank = i915_disable_vblank,
+	.get_vblank_timestamp = i915_get_vblank_timestamp,
+	.get_scanout_position = i915_get_crtc_scanoutpos,
 	.irq_preinstall = i915_driver_irq_preinstall,
 	.irq_postinstall = i915_driver_irq_postinstall,
 	.irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +731,6 @@
 
 	driver.num_ioctls = i915_max_ioctl;
 
-	i915_gem_shrinker_init();
-
 	/*
 	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
 	 * explicitly disabled with the module pararmeter.
@@ -684,17 +752,11 @@
 		driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
-	if (!(driver.driver_features & DRIVER_MODESET)) {
-		driver.suspend = i915_suspend;
-		driver.resume = i915_resume;
-	}
-
 	return drm_init(&driver);
 }
 
 static void __exit i915_exit(void)
 {
-	i915_gem_shrinker_exit();
 	drm_exit(&driver);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826d..5969f46 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@
 	int id;
 	struct page **page_list;
 	drm_dma_handle_t *handle;
-	struct drm_gem_object *cur_obj;
+	struct drm_i915_gem_object *cur_obj;
 };
 
 struct mem_block {
@@ -124,9 +124,9 @@
 #define I915_FENCE_REG_NONE -1
 
 struct drm_i915_fence_reg {
-	struct drm_gem_object *obj;
 	struct list_head lru_list;
-	bool gpu;
+	struct drm_i915_gem_object *obj;
+	uint32_t setup_seqno;
 };
 
 struct sdvo_device_mapping {
@@ -139,6 +139,8 @@
 	u8 ddc_pin;
 };
 
+struct intel_display_error_state;
+
 struct drm_i915_error_state {
 	u32 eir;
 	u32 pgtbl_er;
@@ -148,32 +150,47 @@
 	u32 ipehr;
 	u32 instdone;
 	u32 acthd;
+	u32 error; /* gen6+ */
+	u32 bcs_acthd; /* gen6+ blt engine */
+	u32 bcs_ipehr;
+	u32 bcs_ipeir;
+	u32 bcs_instdone;
+	u32 bcs_seqno;
+	u32 vcs_acthd; /* gen6+ bsd engine */
+	u32 vcs_ipehr;
+	u32 vcs_ipeir;
+	u32 vcs_instdone;
+	u32 vcs_seqno;
 	u32 instpm;
 	u32 instps;
 	u32 instdone1;
 	u32 seqno;
 	u64 bbaddr;
+	u64 fence[16];
 	struct timeval time;
 	struct drm_i915_error_object {
 		int page_count;
 		u32 gtt_offset;
 		u32 *pages[0];
-	} *ringbuffer, *batchbuffer[2];
+	} *ringbuffer, *batchbuffer[I915_NUM_RINGS];
 	struct drm_i915_error_buffer {
-		size_t size;
+		u32 size;
 		u32 name;
 		u32 seqno;
 		u32 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
-		u32 fence_reg;
+		s32 fence_reg:5;
 		s32 pinned:2;
 		u32 tiling:2;
 		u32 dirty:1;
 		u32 purgeable:1;
-	} *active_bo;
-	u32 active_bo_count;
+		u32 ring:4;
+		u32 agp_type:1;
+	} *active_bo, *pinned_bo;
+	u32 active_bo_count, pinned_bo_count;
 	struct intel_overlay_error_state *overlay;
+	struct intel_display_error_state *display;
 };
 
 struct drm_i915_display_funcs {
@@ -207,7 +224,6 @@
 	u8 is_broadwater : 1;
 	u8 is_crestline : 1;
 	u8 has_fbc : 1;
-	u8 has_rc6 : 1;
 	u8 has_pipe_cxsr : 1;
 	u8 has_hotplug : 1;
 	u8 cursor_needs_physical : 1;
@@ -243,6 +259,7 @@
 	const struct intel_device_info *info;
 
 	int has_gem;
+	int relative_constants_mode;
 
 	void __iomem *regs;
 
@@ -253,20 +270,15 @@
 	} *gmbus;
 
 	struct pci_dev *bridge_dev;
-	struct intel_ring_buffer render_ring;
-	struct intel_ring_buffer bsd_ring;
-	struct intel_ring_buffer blt_ring;
+	struct intel_ring_buffer ring[I915_NUM_RINGS];
 	uint32_t next_seqno;
 
 	drm_dma_handle_t *status_page_dmah;
-	void *seqno_page;
 	dma_addr_t dma_status_page;
 	uint32_t counter;
-	unsigned int seqno_gfx_addr;
 	drm_local_map_t hws_map;
-	struct drm_gem_object *seqno_obj;
-	struct drm_gem_object *pwrctx;
-	struct drm_gem_object *renderctx;
+	struct drm_i915_gem_object *pwrctx;
+	struct drm_i915_gem_object *renderctx;
 
 	struct resource mch_res;
 
@@ -275,25 +287,17 @@
 	int front_offset;
 	int current_page;
 	int page_flipping;
-#define I915_DEBUG_READ (1<<0)
-#define I915_DEBUG_WRITE (1<<1)
-	unsigned long debug_flags;
 
-	wait_queue_head_t irq_queue;
 	atomic_t irq_received;
-	/** Protects user_irq_refcount and irq_mask_reg */
-	spinlock_t user_irq_lock;
 	u32 trace_irq_seqno;
+
+	/* protects the irq masks */
+	spinlock_t irq_lock;
 	/** Cached value of IMR to avoid reads in updating the bitfield */
-	u32 irq_mask_reg;
 	u32 pipestat[2];
-	/** splitted irq regs for graphics and display engine on Ironlake,
-	    irq_mask_reg is still used for display irq. */
-	u32 gt_irq_mask_reg;
-	u32 gt_irq_enable_reg;
-	u32 de_irq_enable_reg;
-	u32 pch_irq_mask_reg;
-	u32 pch_irq_enable_reg;
+	u32 irq_mask;
+	u32 gt_irq_mask;
+	u32 pch_irq_mask;
 
 	u32 hotplug_supported_mask;
 	struct work_struct hotplug_work;
@@ -306,7 +310,7 @@
 	int num_pipe;
 
 	/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 	struct timer_list hangcheck_timer;
 	int hangcheck_count;
 	uint32_t last_acthd;
@@ -329,6 +333,7 @@
 
 	/* LVDS info */
 	int backlight_level;  /* restore backlight to this value */
+	bool backlight_enabled;
 	struct drm_display_mode *panel_fixed_mode;
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -530,23 +535,21 @@
 
 	struct {
 		/** Bridge to intel-gtt-ko */
-		struct intel_gtt *gtt;
+		const struct intel_gtt *gtt;
 		/** Memory allocator for GTT stolen memory */
-		struct drm_mm vram;
+		struct drm_mm stolen;
 		/** Memory allocator for GTT */
 		struct drm_mm gtt_space;
+		/** List of all objects in gtt_space. Used to restore gtt
+		 * mappings on resume */
+		struct list_head gtt_list;
+		/** End of mappable part of GTT */
+		unsigned long gtt_mappable_end;
 
 		struct io_mapping *gtt_mapping;
 		int gtt_mtrr;
 
-		/**
-		 * Membership on list of all loaded devices, used to evict
-		 * inactive buffers under memory pressure.
-		 *
-		 * Modifications should only be done whilst holding the
-		 * shrink_list_lock spinlock.
-		 */
-		struct list_head shrink_list;
+		struct shrinker inactive_shrinker;
 
 		/**
 		 * List of objects currently involved in rendering.
@@ -609,16 +612,6 @@
 		struct delayed_work retire_work;
 
 		/**
-		 * Waiting sequence number, if any
-		 */
-		uint32_t waiting_gem_seqno;
-
-		/**
-		 * Last seq seen at irq time
-		 */
-		uint32_t irq_gem_seqno;
-
-		/**
 		 * Flag if the X Server, and thus DRM, is not currently in
 		 * control of the device.
 		 *
@@ -645,16 +638,11 @@
 		/* storage for physical objects */
 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
-		uint32_t flush_rings;
-
 		/* accounting, useful for userland debugging */
-		size_t object_memory;
-		size_t pin_memory;
-		size_t gtt_memory;
 		size_t gtt_total;
+		size_t mappable_gtt_total;
+		size_t object_memory;
 		u32 object_count;
-		u32 pin_count;
-		u32 gtt_count;
 	} mm;
 	struct sdvo_device_mapping sdvo_mappings[2];
 	/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +676,14 @@
 	u8 fmax;
 	u8 fstart;
 
- 	u64 last_count1;
- 	unsigned long last_time1;
- 	u64 last_count2;
- 	struct timespec last_time2;
- 	unsigned long gfx_power;
- 	int c_m;
- 	int r_t;
- 	u8 corr;
+	u64 last_count1;
+	unsigned long last_time1;
+	u64 last_count2;
+	struct timespec last_time2;
+	unsigned long gfx_power;
+	int c_m;
+	int r_t;
+	u8 corr;
 	spinlock_t *mchdev_lock;
 
 	enum no_fbc_reason no_fbc_reason;
@@ -709,20 +697,20 @@
 	struct intel_fbdev *fbdev;
 } drm_i915_private_t;
 
-/** driver private structure attached to each drm_gem_object */
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
 	/** Current space allocated to this object in the GTT, if any. */
 	struct drm_mm_node *gtt_space;
+	struct list_head gtt_list;
 
 	/** This object's place on the active/flushing/inactive lists */
 	struct list_head ring_list;
 	struct list_head mm_list;
 	/** This object's place on GPU write list */
 	struct list_head gpu_write_list;
-	/** This object's place on eviction list */
-	struct list_head evict_list;
+	/** This object's place in the batchbuffer or on the eviction list */
+	struct list_head exec_list;
 
 	/**
 	 * This is set if the object is on the active or flushing lists
@@ -738,6 +726,12 @@
 	unsigned int dirty : 1;
 
 	/**
+	 * This is set if the object has been written to since the last
+	 * GPU flush.
+	 */
+	unsigned int pending_gpu_write : 1;
+
+	/**
 	 * Fence register bits (if any) for this object.  Will be set
 	 * as needed when mapped into the GTT.
 	 * Protected by dev->struct_mutex.
@@ -747,29 +741,15 @@
 	signed int fence_reg : 5;
 
 	/**
-	 * Used for checking the object doesn't appear more than once
-	 * in an execbuffer object list.
-	 */
-	unsigned int in_execbuffer : 1;
-
-	/**
 	 * Advice: are the backing pages purgeable?
 	 */
 	unsigned int madv : 2;
 
 	/**
-	 * Refcount for the pages array. With the current locking scheme, there
-	 * are at most two concurrent users: Binding a bo to the gtt and
-	 * pwrite/pread using physical addresses. So two bits for a maximum
-	 * of two users are enough.
-	 */
-	unsigned int pages_refcount : 2;
-#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
-
-	/**
 	 * Current tiling mode for the object.
 	 */
 	unsigned int tiling_mode : 2;
+	unsigned int tiling_changed : 1;
 
 	/** How many users have pinned this object in GTT space. The following
 	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +763,55 @@
 	unsigned int pin_count : 4;
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
-	/** AGP memory structure for our GTT binding. */
-	DRM_AGP_MEM *agp_mem;
+	/**
+	 * Is the object at the current location in the gtt mappable and
+	 * fenceable? Used to avoid costly recalculations.
+	 */
+	unsigned int map_and_fenceable : 1;
+
+	/**
+	 * Whether the current gtt mapping needs to be mappable (and isn't just
+	 * mappable by accident). Track pin and fault separate for a more
+	 * accurate mappable working set.
+	 */
+	unsigned int fault_mappable : 1;
+	unsigned int pin_mappable : 1;
+
+	/*
+	 * Is the GPU currently using a fence to access this buffer,
+	 */
+	unsigned int pending_fenced_gpu_access:1;
+	unsigned int fenced_gpu_access:1;
 
 	struct page **pages;
 
 	/**
+	 * DMAR support
+	 */
+	struct scatterlist *sg_list;
+	int num_sg;
+
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	struct hlist_node exec_node;
+	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
+
+	/**
 	 * Current offset of the object in GTT space.
 	 *
 	 * This is the same as gtt_space->start
 	 */
 	uint32_t gtt_offset;
 
-	/* Which ring is refering to is this object */
-	struct intel_ring_buffer *ring;
-
-	/**
-	 * Fake offset for use by mmap(2)
-	 */
-	uint64_t mmap_offset;
-
 	/** Breadcrumb of last rendering to the buffer. */
 	uint32_t last_rendering_seqno;
+	struct intel_ring_buffer *ring;
+
+	/** Breadcrumb of last fenced GPU access to the buffer. */
+	uint32_t last_fenced_seqno;
+	struct intel_ring_buffer *last_fenced_ring;
 
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
@@ -880,11 +887,74 @@
 	CHIP_I965 = 0x08,
 };
 
+#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev)		((dev)->pci_device == 0x3577)
+#define IS_845G(dev)		((dev)->pci_device == 0x2562)
+#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
+#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
+#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
+
+#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
+
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
+#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+						      IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+#include "i915_trace.h"
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
 extern unsigned int i915_powersave;
 extern unsigned int i915_lvds_downclock;
+extern unsigned int i915_panel_use_ssc;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -907,8 +977,8 @@
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 extern int i915_emit_box(struct drm_device *dev,
-			 struct drm_clip_rect *boxes,
-			 int i, int DR1, int DR4);
+			 struct drm_clip_rect *box,
+			 int DR1, int DR4);
 extern int i915_reset(struct drm_device *dev, u8 flags);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +988,7 @@
 
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
 extern int i915_irq_emit(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -939,12 +1010,6 @@
 extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
-extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
-		u32 mask);
-extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
-		u32 mask);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -953,6 +1018,13 @@
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 void intel_enable_asle (struct drm_device *dev);
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+			      int *max_error,
+			      struct timeval *vblank_time,
+			      unsigned flags);
+
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+			     int *vpos, int *hpos);
 
 #ifdef CONFIG_DEBUG_FS
 extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1089,28 @@
 				struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
-					      size_t size);
+int __must_check i915_gem_flush_ring(struct drm_device *dev,
+				     struct intel_ring_buffer *ring,
+				     uint32_t invalidate_domains,
+				     uint32_t flush_domains);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+				     uint32_t alignment,
+				     bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+						bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+				    struct intel_ring_buffer *ring,
+				    u32 seqno);
+
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -1035,73 +1120,88 @@
 	return (int32_t)(seq1 - seq2) >= 0;
 }
 
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
-				  bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
-				  bool interruptible);
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+			    struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+					   struct intel_ring_buffer *pipelined,
+					   bool interruptible);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
-			       uint32_t read_domains,
-			       uint32_t write_domain);
-int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
-			      bool interruptible);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+					    uint32_t read_domains,
+					    uint32_t write_domain);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+					   bool interruptible);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
-		     unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
-int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev,
-			  struct drm_file *file_priv,
-			  struct drm_i915_gem_request *request,
-			  struct intel_ring_buffer *ring);
-int i915_do_wait_request(struct drm_device *dev,
-			 uint32_t seqno,
-			 bool interruptible,
-			 struct intel_ring_buffer *ring);
+void i915_gem_do_init(struct drm_device *dev,
+		      unsigned long start,
+		      unsigned long mappable_end,
+		      unsigned long end);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_add_request(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  struct drm_i915_gem_request *request,
+				  struct intel_ring_buffer *ring);
+int __must_check i915_do_wait_request(struct drm_device *dev,
+				      uint32_t seqno,
+				      bool interruptible,
+				      struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
-				      int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
-					 bool pipelined);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+				  bool write);
+int __must_check
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+				     struct intel_ring_buffer *pipelined);
 int i915_gem_attach_phys_object(struct drm_device *dev,
-				struct drm_gem_object *obj,
+				struct drm_i915_gem_object *obj,
 				int id,
 				int align);
 void i915_gem_detach_phys_object(struct drm_device *dev,
-				 struct drm_gem_object *obj);
+				 struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-void i915_gem_shrinker_init(void);
-void i915_gem_shrinker_exit(void);
+/* i915_gem_gtt.c */
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 
 /* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+					  unsigned alignment, bool mappable);
+int __must_check i915_gem_evict_everything(struct drm_device *dev,
+					   bool purgeable_only);
+int __must_check i915_gem_evict_inactive(struct drm_device *dev,
+					 bool purgeable_only);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
-bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
-		    int tiling_mode);
-bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
-				     int tiling_mode);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
 /* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
 			  const char *where, uint32_t mark);
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
 #else
 #define i915_verify_lists(dev) 0
 #endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+				     int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
 			  const char *where, uint32_t mark);
 
 /* i915_debugfs.c */
@@ -1163,6 +1263,8 @@
 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_enable_rc6(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch (struct drm_device *dev);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
@@ -1170,79 +1272,120 @@
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+					    struct drm_device *dev,
+					    struct intel_display_error_state *error);
 #endif
 
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+	intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+	intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+	intel_ring_advance(LP_RING(dev_priv))
+
 /**
  * Lock test for when it's just for synchronization of ring access.
  *
  * In that case, we don't need to do it when GEM is initialized as nobody else
  * has access to the ring.
  */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
-	if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
-			== NULL)					\
-		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
+	if (LP_RING(dev->dev_private)->obj == NULL)			\
+		LOCK_TEST_WITH_RETURN(dev, file);			\
 } while (0)
 
-static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+
+#define __i915_read(x, y) \
+static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+	u##x val = read##y(dev_priv->regs + reg); \
+	trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+	return val; \
+}
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+	trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+	write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+#define I915_READ8(reg)		i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg)	i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg)	readw(dev_priv->regs + (reg))
+#define I915_WRITE16_NOTRACE(reg, val)	writew(val, dev_priv->regs + (reg))
+
+#define I915_READ(reg)		i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg)		readl(dev_priv->regs + (reg))
+#define I915_WRITE_NOTRACE(reg, val)	writel(val, dev_priv->regs + (reg))
+
+#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg)	i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
+
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
+static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
 {
 	u32 val;
 
-	val = readl(dev_priv->regs + reg);
-	if (dev_priv->debug_flags & I915_DEBUG_READ)
-		printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+	if (dev_priv->info->gen >= 6) {
+		__gen6_force_wake_get(dev_priv);
+		val = I915_READ(reg);
+		__gen6_force_wake_put(dev_priv);
+	} else
+		val = I915_READ(reg);
+
 	return val;
 }
 
-static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
-			      u32 val)
+static inline void
+i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
 {
-	writel(val, dev_priv->regs + reg);
-	if (dev_priv->debug_flags & I915_DEBUG_WRITE)
-		printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+       /* Trace down the write operation before the real write */
+       trace_i915_reg_rw('W', reg, val, len);
+       switch (len) {
+       case 8:
+               writeq(val, dev_priv->regs + reg);
+               break;
+       case 4:
+               writel(val, dev_priv->regs + reg);
+               break;
+       case 2:
+               writew(val, dev_priv->regs + reg);
+               break;
+       case 1:
+               writeb(val, dev_priv->regs + reg);
+               break;
+       }
 }
 
-#define I915_READ(reg)          i915_read(dev_priv, (reg))
-#define I915_WRITE(reg, val)    i915_write(dev_priv, (reg), (val))
-#define I915_READ16(reg)	readw(dev_priv->regs + (reg))
-#define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
-#define I915_READ8(reg)		readb(dev_priv->regs + (reg))
-#define I915_WRITE8(reg, val)	writeb(val, dev_priv->regs + (reg))
-#define I915_WRITE64(reg, val)	writeq(val, dev_priv->regs + (reg))
-#define I915_READ64(reg)	readq(dev_priv->regs + (reg))
-#define POSTING_READ(reg)	(void)I915_READ(reg)
-#define POSTING_READ16(reg)	(void)I915_READ16(reg)
-
-#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
-				I915_DEBUG_WRITE)
-#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
-							    I915_DEBUG_WRITE))
-
-#define I915_VERBOSE 0
-
-#define BEGIN_LP_RING(n)  do { \
-	drm_i915_private_t *dev_priv__ = dev->dev_private;                \
-	if (I915_VERBOSE)						\
-		DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));		\
-	intel_ring_begin(dev, &dev_priv__->render_ring, (n));		\
-} while (0)
-
-
-#define OUT_RING(x) do {						\
-	drm_i915_private_t *dev_priv__ = dev->dev_private;		\
-	if (I915_VERBOSE)						\
-		DRM_DEBUG("   OUT_RING %x\n", (int)(x));		\
-	intel_ring_emit(dev, &dev_priv__->render_ring, x);		\
-} while (0)
-
-#define ADVANCE_LP_RING() do {						\
-	drm_i915_private_t *dev_priv__ = dev->dev_private;                \
-	if (I915_VERBOSE)						\
-		DRM_DEBUG("ADVANCE_LP_RING %x\n",			\
-				dev_priv__->render_ring.tail);		\
-	intel_ring_advance(dev, &dev_priv__->render_ring);		\
-} while(0)
-
 /**
  * Reads a dword out of the status page, which is written to from the command
  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1402,9 @@
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
 #define READ_HWSP(dev_priv, reg)  (((volatile u32 *)\
-			(dev_priv->render_ring.status_page.page_addr))[reg])
+			(LP_RING(dev_priv)->status_page.page_addr))[reg])
 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX		0x20
 #define I915_BREADCRUMB_INDEX		0x21
 
-#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev)		((dev)->pci_device == 0x3577)
-#define IS_845G(dev)		((dev)->pci_device == 0x2562)
-#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
-#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
-#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
-#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
-
-#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
-
-#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
-#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_OVERLAY(dev) 		(INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
-						      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
-
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 275ec6e..3dfc848 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,38 +34,31 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
-#include <linux/intel-gtt.h>
 
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
-					     int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
-						     uint64_t offset,
-						     uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
-					  bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
-					   unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+							  bool write);
+static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
+								  uint64_t offset,
+								  uint64_t size);
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+						    unsigned alignment,
+						    bool map_and_fenceable);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+				     struct drm_i915_fence_reg *reg);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+				struct drm_i915_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
-				struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+				struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
-			  gfp_t gfpmask);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+				    int nr_to_scan,
+				    gfp_t gfp_mask);
 
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
-
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
 
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -82,34 +75,6 @@
 	dev_priv->mm.object_memory -= size;
 }
 
-static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
-				  size_t size)
-{
-	dev_priv->mm.gtt_count++;
-	dev_priv->mm.gtt_memory += size;
-}
-
-static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
-				     size_t size)
-{
-	dev_priv->mm.gtt_count--;
-	dev_priv->mm.gtt_memory -= size;
-}
-
-static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
-				  size_t size)
-{
-	dev_priv->mm.pin_count++;
-	dev_priv->mm.pin_memory += size;
-}
-
-static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
-				     size_t size)
-{
-	dev_priv->mm.pin_count--;
-	dev_priv->mm.pin_memory -= size;
-}
-
 int
 i915_gem_check_is_wedged(struct drm_device *dev)
 {
@@ -140,7 +105,7 @@
 	return -EIO;
 }
 
-static int i915_mutex_lock_interruptible(struct drm_device *dev)
+int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -163,75 +128,76 @@
 }
 
 static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-	return obj_priv->gtt_space &&
-		!obj_priv->active &&
-		obj_priv->pin_count == 0;
+	return obj->gtt_space && !obj->active && obj->pin_count == 0;
 }
 
-int i915_gem_do_init(struct drm_device *dev,
-		     unsigned long start,
-		     unsigned long end)
+void i915_gem_do_init(struct drm_device *dev,
+		      unsigned long start,
+		      unsigned long mappable_end,
+		      unsigned long end)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (start >= end ||
-	    (start & (PAGE_SIZE - 1)) != 0 ||
-	    (end & (PAGE_SIZE - 1)) != 0) {
-		return -EINVAL;
-	}
-
 	drm_mm_init(&dev_priv->mm.gtt_space, start,
 		    end - start);
 
 	dev_priv->mm.gtt_total = end - start;
-
-	return 0;
+	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+	dev_priv->mm.gtt_mappable_end = mappable_end;
 }
 
 int
 i915_gem_init_ioctl(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
+		    struct drm_file *file)
 {
 	struct drm_i915_gem_init *args = data;
-	int ret;
+
+	if (args->gtt_start >= args->gtt_end ||
+	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
-int
-i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_get_aperture *args = data;
-
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
-	mutex_lock(&dev->struct_mutex);
-	args->aper_size = dev_priv->mm.gtt_total;
-	args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+	i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
 
+int
+i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_get_aperture *args = data;
+	struct drm_i915_gem_object *obj;
+	size_t pinned;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	pinned = 0;
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+		pinned += obj->gtt_space->size;
+	mutex_unlock(&dev->struct_mutex);
+
+	args->aper_size = dev_priv->mm.gtt_total;
+	args->aper_available_size = args->aper_size -pinned;
+
+	return 0;
+}
 
 /**
  * Creates a new mm object and returns a handle to it.
  */
 int
 i915_gem_create_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+		      struct drm_file *file)
 {
 	struct drm_i915_gem_create *args = data;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int ret;
 	u32 handle;
 
@@ -242,45 +208,28 @@
 	if (obj == NULL)
 		return -ENOMEM;
 
-	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	ret = drm_gem_handle_create(file, &obj->base, &handle);
 	if (ret) {
-		drm_gem_object_release(obj);
-		i915_gem_info_remove_obj(dev->dev_private, obj->size);
+		drm_gem_object_release(&obj->base);
+		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
 		kfree(obj);
 		return ret;
 	}
 
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	trace_i915_gem_object_create(obj);
 
 	args->handle = handle;
 	return 0;
 }
 
-static inline int
-fast_shmem_read(struct page **pages,
-		loff_t page_base, int page_offset,
-		char __user *data,
-		int length)
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-	char *vaddr;
-	int ret;
-
-	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-	ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
-	kunmap_atomic(vaddr);
-
-	return ret;
-}
-
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
-{
-	drm_i915_private_t *dev_priv = obj->dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 
 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
-		obj_priv->tiling_mode != I915_TILING_NONE;
+		obj->tiling_mode != I915_TILING_NONE;
 }
 
 static inline void
@@ -356,38 +305,51 @@
  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  */
 static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_fast(struct drm_device *dev,
+			  struct drm_i915_gem_object *obj,
 			  struct drm_i915_gem_pread *args,
-			  struct drm_file *file_priv)
+			  struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
-	loff_t offset, page_base;
+	loff_t offset;
 	char __user *user_data;
 	int page_offset, page_length;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 
 	while (remain > 0) {
+		struct page *page;
+		char *vaddr;
+		int ret;
+
 		/* Operation in this page
 		 *
-		 * page_base = page offset within aperture
 		 * page_offset = offset within page
 		 * page_length = bytes to copy for this page
 		 */
-		page_base = (offset & ~(PAGE_SIZE-1));
 		page_offset = offset & (PAGE_SIZE-1);
 		page_length = remain;
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
 
-		if (fast_shmem_read(obj_priv->pages,
-				    page_base, page_offset,
-				    user_data, page_length))
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		vaddr = kmap_atomic(page);
+		ret = __copy_to_user_inatomic(user_data,
+					      vaddr + page_offset,
+					      page_length);
+		kunmap_atomic(vaddr);
+
+		mark_page_accessed(page);
+		page_cache_release(page);
+		if (ret)
 			return -EFAULT;
 
 		remain -= page_length;
@@ -398,30 +360,6 @@
 	return 0;
 }
 
-static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
-{
-	int ret;
-
-	ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
-
-	/* If we've insufficient memory to map in the pages, attempt
-	 * to make some space by throwing out some old buffers.
-	 */
-	if (ret == -ENOMEM) {
-		struct drm_device *dev = obj->dev;
-
-		ret = i915_gem_evict_something(dev, obj->size,
-					       i915_gem_get_gtt_alignment(obj));
-		if (ret)
-			return ret;
-
-		ret = i915_gem_object_get_pages(obj, 0);
-	}
-
-	return ret;
-}
-
 /**
  * This is the fallback shmem pread path, which allocates temporary storage
  * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -429,18 +367,19 @@
  * and not take page faults.
  */
 static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_slow(struct drm_device *dev,
+			  struct drm_i915_gem_object *obj,
 			  struct drm_i915_gem_pread *args,
-			  struct drm_file *file_priv)
+			  struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
 	loff_t offset, pinned_pages, i;
 	loff_t first_data_page, last_data_page, num_pages;
-	int shmem_page_index, shmem_page_offset;
-	int data_page_index,  data_page_offset;
+	int shmem_page_offset;
+	int data_page_index, data_page_offset;
 	int page_length;
 	int ret;
 	uint64_t data_ptr = args->data_ptr;
@@ -479,19 +418,18 @@
 
 	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 
 	while (remain > 0) {
+		struct page *page;
+
 		/* Operation in this page
 		 *
-		 * shmem_page_index = page number within shmem file
 		 * shmem_page_offset = offset within page in shmem file
 		 * data_page_index = page number in get_user_pages return
 		 * data_page_offset = offset with data_page_index page.
 		 * page_length = bytes to copy for this page
 		 */
-		shmem_page_index = offset / PAGE_SIZE;
 		shmem_page_offset = offset & ~PAGE_MASK;
 		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 		data_page_offset = data_ptr & ~PAGE_MASK;
@@ -502,8 +440,13 @@
 		if ((data_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - data_page_offset;
 
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
 		if (do_bit17_swizzling) {
-			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(page,
 					      shmem_page_offset,
 					      user_pages[data_page_index],
 					      data_page_offset,
@@ -512,11 +455,14 @@
 		} else {
 			slow_shmem_copy(user_pages[data_page_index],
 					data_page_offset,
-					obj_priv->pages[shmem_page_index],
+					page,
 					shmem_page_offset,
 					page_length);
 		}
 
+		mark_page_accessed(page);
+		page_cache_release(page);
+
 		remain -= page_length;
 		data_ptr += page_length;
 		offset += page_length;
@@ -525,6 +471,7 @@
 out:
 	for (i = 0; i < pinned_pages; i++) {
 		SetPageDirty(user_pages[i]);
+		mark_page_accessed(user_pages[i]);
 		page_cache_release(user_pages[i]);
 	}
 	drm_free_large(user_pages);
@@ -539,11 +486,10 @@
  */
 int
 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
+		     struct drm_file *file)
 {
 	struct drm_i915_gem_pread *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
 	if (args->size == 0)
@@ -563,39 +509,33 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
 	/* Bounds check source.  */
-	if (args->offset > obj->size || args->size > obj->size - args->offset) {
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	ret = i915_gem_object_get_pages_or_evict(obj);
-	if (ret)
-		goto out;
-
 	ret = i915_gem_object_set_cpu_read_domain_range(obj,
 							args->offset,
 							args->size);
 	if (ret)
-		goto out_put;
+		goto out;
 
 	ret = -EFAULT;
 	if (!i915_gem_object_needs_bit17_swizzle(obj))
-		ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+		ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
 	if (ret == -EFAULT)
-		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+		ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
 
-out_put:
-	i915_gem_object_put_pages(obj);
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -645,32 +585,16 @@
 	io_mapping_unmap(dst_vaddr);
 }
 
-static inline int
-fast_shmem_write(struct page **pages,
-		 loff_t page_base, int page_offset,
-		 char __user *data,
-		 int length)
-{
-	char *vaddr;
-	int ret;
-
-	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-	ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
-	kunmap_atomic(vaddr);
-
-	return ret;
-}
-
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
  */
 static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+			 struct drm_i915_gem_object *obj,
 			 struct drm_i915_gem_pwrite *args,
-			 struct drm_file *file_priv)
+			 struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	ssize_t remain;
 	loff_t offset, page_base;
@@ -680,8 +604,7 @@
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
 
-	obj_priv = to_intel_bo(obj);
-	offset = obj_priv->gtt_offset + args->offset;
+	offset = obj->gtt_offset + args->offset;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -721,11 +644,11 @@
  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  */
 static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_slow(struct drm_device *dev,
+			 struct drm_i915_gem_object *obj,
 			 struct drm_i915_gem_pwrite *args,
-			 struct drm_file *file_priv)
+			 struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	ssize_t remain;
 	loff_t gtt_page_base, offset;
@@ -762,12 +685,15 @@
 		goto out_unpin_pages;
 	}
 
-	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
 		goto out_unpin_pages;
 
-	obj_priv = to_intel_bo(obj);
-	offset = obj_priv->gtt_offset + args->offset;
+	ret = i915_gem_object_put_fence(obj);
+	if (ret)
+		goto out_unpin_pages;
+
+	offset = obj->gtt_offset + args->offset;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -813,39 +739,58 @@
  * copy_from_user into the kmapped pages backing the object.
  */
 static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_fast(struct drm_device *dev,
+			   struct drm_i915_gem_object *obj,
 			   struct drm_i915_gem_pwrite *args,
-			   struct drm_file *file_priv)
+			   struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
-	loff_t offset, page_base;
+	loff_t offset;
 	char __user *user_data;
 	int page_offset, page_length;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
-	obj_priv->dirty = 1;
+	obj->dirty = 1;
 
 	while (remain > 0) {
+		struct page *page;
+		char *vaddr;
+		int ret;
+
 		/* Operation in this page
 		 *
-		 * page_base = page offset within aperture
 		 * page_offset = offset within page
 		 * page_length = bytes to copy for this page
 		 */
-		page_base = (offset & ~(PAGE_SIZE-1));
 		page_offset = offset & (PAGE_SIZE-1);
 		page_length = remain;
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
 
-		if (fast_shmem_write(obj_priv->pages,
-				       page_base, page_offset,
-				       user_data, page_length))
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		vaddr = kmap_atomic(page, KM_USER0);
+		ret = __copy_from_user_inatomic(vaddr + page_offset,
+						user_data,
+						page_length);
+		kunmap_atomic(vaddr, KM_USER0);
+
+		set_page_dirty(page);
+		mark_page_accessed(page);
+		page_cache_release(page);
+
+		/* If we get a fault while copying data, then (presumably) our
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
+		 */
+		if (ret)
 			return -EFAULT;
 
 		remain -= page_length;
@@ -864,17 +809,18 @@
  * struct_mutex is held.
  */
 static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+			   struct drm_i915_gem_object *obj,
 			   struct drm_i915_gem_pwrite *args,
-			   struct drm_file *file_priv)
+			   struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
 	loff_t offset, pinned_pages, i;
 	loff_t first_data_page, last_data_page, num_pages;
-	int shmem_page_index, shmem_page_offset;
+	int shmem_page_offset;
 	int data_page_index,  data_page_offset;
 	int page_length;
 	int ret;
@@ -912,20 +858,19 @@
 
 	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
-	obj_priv->dirty = 1;
+	obj->dirty = 1;
 
 	while (remain > 0) {
+		struct page *page;
+
 		/* Operation in this page
 		 *
-		 * shmem_page_index = page number within shmem file
 		 * shmem_page_offset = offset within page in shmem file
 		 * data_page_index = page number in get_user_pages return
 		 * data_page_offset = offset with data_page_index page.
 		 * page_length = bytes to copy for this page
 		 */
-		shmem_page_index = offset / PAGE_SIZE;
 		shmem_page_offset = offset & ~PAGE_MASK;
 		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 		data_page_offset = data_ptr & ~PAGE_MASK;
@@ -936,21 +881,32 @@
 		if ((data_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - data_page_offset;
 
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page)) {
+			ret = PTR_ERR(page);
+			goto out;
+		}
+
 		if (do_bit17_swizzling) {
-			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(page,
 					      shmem_page_offset,
 					      user_pages[data_page_index],
 					      data_page_offset,
 					      page_length,
 					      0);
 		} else {
-			slow_shmem_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_copy(page,
 					shmem_page_offset,
 					user_pages[data_page_index],
 					data_page_offset,
 					page_length);
 		}
 
+		set_page_dirty(page);
+		mark_page_accessed(page);
+		page_cache_release(page);
+
 		remain -= page_length;
 		data_ptr += page_length;
 		offset += page_length;
@@ -974,8 +930,7 @@
 		      struct drm_file *file)
 {
 	struct drm_i915_gem_pwrite *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	if (args->size == 0)
@@ -995,15 +950,15 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
 	/* Bounds check destination. */
-	if (args->offset > obj->size || args->size > obj->size - args->offset) {
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1014,16 +969,19 @@
 	 * pread/pwrite currently are reading and writing from the CPU
 	 * perspective, requiring manual detiling by the client.
 	 */
-	if (obj_priv->phys_obj)
+	if (obj->phys_obj)
 		ret = i915_gem_phys_pwrite(dev, obj, args, file);
-	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-		 obj_priv->gtt_space &&
-		 obj->write_domain != I915_GEM_DOMAIN_CPU) {
-		ret = i915_gem_object_pin(obj, 0);
+	else if (obj->gtt_space &&
+		 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+		ret = i915_gem_object_pin(obj, 0, true);
 		if (ret)
 			goto out;
 
-		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+		ret = i915_gem_object_set_to_gtt_domain(obj, true);
+		if (ret)
+			goto out_unpin;
+
+		ret = i915_gem_object_put_fence(obj);
 		if (ret)
 			goto out_unpin;
 
@@ -1034,26 +992,19 @@
 out_unpin:
 		i915_gem_object_unpin(obj);
 	} else {
-		ret = i915_gem_object_get_pages_or_evict(obj);
-		if (ret)
-			goto out;
-
 		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
 		if (ret)
-			goto out_put;
+			goto out;
 
 		ret = -EFAULT;
 		if (!i915_gem_object_needs_bit17_swizzle(obj))
 			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
 		if (ret == -EFAULT)
 			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-
-out_put:
-		i915_gem_object_put_pages(obj);
 	}
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1065,12 +1016,10 @@
  */
 int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
-			  struct drm_file *file_priv)
+			  struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_set_domain *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	uint32_t read_domains = args->read_domains;
 	uint32_t write_domain = args->write_domain;
 	int ret;
@@ -1095,28 +1044,15 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
-
-	intel_mark_busy(dev, obj);
 
 	if (read_domains & I915_GEM_DOMAIN_GTT) {
 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
-		/* Update the LRU on the fence for the CPU access that's
-		 * about to occur.
-		 */
-		if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-			struct drm_i915_fence_reg *reg =
-				&dev_priv->fence_regs[obj_priv->fence_reg];
-			list_move_tail(&reg->lru_list,
-				       &dev_priv->mm.fence_list);
-		}
-
 		/* Silently promote "you're not bound, there was nothing to do"
 		 * to success, since the client was just asking us to
 		 * make sure everything was done.
@@ -1127,11 +1063,7 @@
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 	}
 
-	/* Maintain LRU order of "inactive" objects */
-	if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1142,10 +1074,10 @@
  */
 int
 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+			 struct drm_file *file)
 {
 	struct drm_i915_gem_sw_finish *args = data;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
 	if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1155,17 +1087,17 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
 
 	/* Pinned buffers may be scanout, so flush the cache */
-	if (to_intel_bo(obj)->pin_count)
+	if (obj->pin_count)
 		i915_gem_object_flush_cpu_write_domain(obj);
 
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1180,8 +1112,9 @@
  */
 int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		    struct drm_file *file)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_mmap *args = data;
 	struct drm_gem_object *obj;
 	loff_t offset;
@@ -1190,10 +1123,15 @@
 	if (!(dev->driver->driver_features & DRIVER_GEM))
 		return -ENODEV;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = drm_gem_object_lookup(dev, file, args->handle);
 	if (obj == NULL)
 		return -ENOENT;
 
+	if (obj->size > dev_priv->mm.gtt_mappable_end) {
+		drm_gem_object_unreference_unlocked(obj);
+		return -E2BIG;
+	}
+
 	offset = args->offset;
 
 	down_write(&current->mm->mmap_sem);
@@ -1228,10 +1166,9 @@
  */
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	pgoff_t page_offset;
 	unsigned long pfn;
 	int ret = 0;
@@ -1243,27 +1180,35 @@
 
 	/* Now bind it into the GTT if needed */
 	mutex_lock(&dev->struct_mutex);
-	if (!obj_priv->gtt_space) {
-		ret = i915_gem_object_bind_to_gtt(obj, 0);
+
+	if (!obj->map_and_fenceable) {
+		ret = i915_gem_object_unbind(obj);
 		if (ret)
 			goto unlock;
-
-		ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	}
+	if (!obj->gtt_space) {
+		ret = i915_gem_object_bind_to_gtt(obj, 0, true);
 		if (ret)
 			goto unlock;
 	}
 
-	/* Need a new fence register? */
-	if (obj_priv->tiling_mode != I915_TILING_NONE) {
-		ret = i915_gem_object_get_fence_reg(obj, true);
-		if (ret)
-			goto unlock;
-	}
+	ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (ret)
+		goto unlock;
 
-	if (i915_gem_object_is_inactive(obj_priv))
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+	if (obj->tiling_mode == I915_TILING_NONE)
+		ret = i915_gem_object_put_fence(obj);
+	else
+		ret = i915_gem_object_get_fence(obj, NULL, true);
+	if (ret)
+		goto unlock;
 
-	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+	if (i915_gem_object_is_inactive(obj))
+		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	obj->fault_mappable = true;
+
+	pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
 		page_offset;
 
 	/* Finally, remap it using the new GTT offset */
@@ -1272,11 +1217,12 @@
 	mutex_unlock(&dev->struct_mutex);
 
 	switch (ret) {
+	case -EAGAIN:
+		set_need_resched();
 	case 0:
 	case -ERESTARTSYS:
 		return VM_FAULT_NOPAGE;
 	case -ENOMEM:
-	case -EAGAIN:
 		return VM_FAULT_OOM;
 	default:
 		return VM_FAULT_SIGBUS;
@@ -1295,37 +1241,39 @@
  * This routine allocates and attaches a fake offset for @obj.
  */
 static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_map_list *list;
 	struct drm_local_map *map;
 	int ret = 0;
 
 	/* Set the object up for mmap'ing */
-	list = &obj->map_list;
+	list = &obj->base.map_list;
 	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
 	if (!list->map)
 		return -ENOMEM;
 
 	map = list->map;
 	map->type = _DRM_GEM;
-	map->size = obj->size;
+	map->size = obj->base.size;
 	map->handle = obj;
 
 	/* Get a DRM GEM mmap offset allocated... */
 	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-						    obj->size / PAGE_SIZE, 0, 0);
+						    obj->base.size / PAGE_SIZE,
+						    0, 0);
 	if (!list->file_offset_node) {
-		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+		DRM_ERROR("failed to allocate offset for bo %d\n",
+			  obj->base.name);
 		ret = -ENOSPC;
 		goto out_free_list;
 	}
 
 	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-						  obj->size / PAGE_SIZE, 0);
+						  obj->base.size / PAGE_SIZE,
+						  0);
 	if (!list->file_offset_node) {
 		ret = -ENOMEM;
 		goto out_free_list;
@@ -1338,16 +1286,13 @@
 		goto out_free_mm;
 	}
 
-	/* By now we should be all set, any drm_mmap request on the offset
-	 * below will get to our mmap & fault handler */
-	obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
 	return 0;
 
 out_free_mm:
 	drm_mm_put_block(list->file_offset_node);
 out_free_list:
 	kfree(list->map);
+	list->map = NULL;
 
 	return ret;
 }
@@ -1367,38 +1312,51 @@
  * fixup by i915_gem_fault().
  */
 void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	if (!obj->fault_mappable)
+		return;
 
-	if (dev->dev_mapping)
-		unmap_mapping_range(dev->dev_mapping,
-				    obj_priv->mmap_offset, obj->size, 1);
+	unmap_mapping_range(obj->base.dev->dev_mapping,
+			    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+			    obj->base.size, 1);
+
+	obj->fault_mappable = false;
 }
 
 static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list;
+	struct drm_map_list *list = &obj->base.map_list;
 
-	list = &obj->map_list;
 	drm_ht_remove_item(&mm->offset_hash, &list->hash);
+	drm_mm_put_block(list->file_offset_node);
+	kfree(list->map);
+	list->map = NULL;
+}
 
-	if (list->file_offset_node) {
-		drm_mm_put_block(list->file_offset_node);
-		list->file_offset_node = NULL;
-	}
+static uint32_t
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	uint32_t size;
 
-	if (list->map) {
-		kfree(list->map);
-		list->map = NULL;
-	}
+	if (INTEL_INFO(dev)->gen >= 4 ||
+	    obj->tiling_mode == I915_TILING_NONE)
+		return obj->base.size;
 
-	obj_priv->mmap_offset = 0;
+	/* Previous chips need a power-of-two fence region when tiling */
+	if (INTEL_INFO(dev)->gen == 3)
+		size = 1024*1024;
+	else
+		size = 512*1024;
+
+	while (size < obj->base.size)
+		size <<= 1;
+
+	return size;
 }
 
 /**
@@ -1406,42 +1364,68 @@
  * @obj: object to check
  *
  * Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
  */
 static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int start, i;
+	struct drm_device *dev = obj->base.dev;
 
 	/*
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+	if (INTEL_INFO(dev)->gen >= 4 ||
+	    obj->tiling_mode == I915_TILING_NONE)
 		return 4096;
 
 	/*
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	if (INTEL_INFO(dev)->gen == 3)
-		start = 1024*1024;
+	return i915_gem_get_gtt_size(obj);
+}
+
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ *					 unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+static uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	int tile_height;
+
+	/*
+	 * Minimum alignment is 4k (GTT page size) for sane hw.
+	 */
+	if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+	    obj->tiling_mode == I915_TILING_NONE)
+		return 4096;
+
+	/*
+	 * Older chips need unfenced tiled buffers to be aligned to the left
+	 * edge of an even tile row (where tile rows are counted as if the bo is
+	 * placed in a fenced gtt region).
+	 */
+	if (IS_GEN2(dev) ||
+	    (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+		tile_height = 32;
 	else
-		start = 512*1024;
+		tile_height = 8;
 
-	for (i = start; i < obj->size; i <<= 1)
-		;
-
-	return i;
+	return tile_height * obj->stride * 2;
 }
 
 /**
  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
  * @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
  *
  * Simply returns the fake offset to userspace so it can mmap it.
  * The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1454,11 +1438,11 @@
  */
 int
 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
+			struct drm_file *file)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_mmap_gtt *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1468,130 +1452,196 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->madv != I915_MADV_WILLNEED) {
+	if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+		ret = -E2BIG;
+		goto unlock;
+	}
+
+	if (obj->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (!obj_priv->mmap_offset) {
+	if (!obj->base.map_list.map) {
 		ret = i915_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
 
-	args->offset = obj_priv->mmap_offset;
-
-	/*
-	 * Pull it into the GTT so that we have a page list (makes the
-	 * initial fault faster and any subsequent flushing possible).
-	 */
-	if (!obj_priv->agp_mem) {
-		ret = i915_gem_object_bind_to_gtt(obj, 0);
-		if (ret)
-			goto out;
-	}
+	args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
 
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+			      gfp_t gfpmask)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count = obj->size / PAGE_SIZE;
+	int page_count, i;
+	struct address_space *mapping;
+	struct inode *inode;
+	struct page *page;
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 */
+	page_count = obj->base.size / PAGE_SIZE;
+	BUG_ON(obj->pages != NULL);
+	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+	if (obj->pages == NULL)
+		return -ENOMEM;
+
+	inode = obj->base.filp->f_path.dentry->d_inode;
+	mapping = inode->i_mapping;
+	for (i = 0; i < page_count; i++) {
+		page = read_cache_page_gfp(mapping, i,
+					   GFP_HIGHUSER |
+					   __GFP_COLD |
+					   __GFP_RECLAIMABLE |
+					   gfpmask);
+		if (IS_ERR(page))
+			goto err_pages;
+
+		obj->pages[i] = page;
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		i915_gem_object_do_bit_17_swizzle(obj);
+
+	return 0;
+
+err_pages:
+	while (i--)
+		page_cache_release(obj->pages[i]);
+
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
+	return PTR_ERR(page);
+}
+
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	int page_count = obj->base.size / PAGE_SIZE;
 	int i;
 
-	BUG_ON(obj_priv->pages_refcount == 0);
-	BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
+	BUG_ON(obj->madv == __I915_MADV_PURGED);
 
-	if (--obj_priv->pages_refcount != 0)
-		return;
-
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
+	if (obj->tiling_mode != I915_TILING_NONE)
 		i915_gem_object_save_bit_17_swizzle(obj);
 
-	if (obj_priv->madv == I915_MADV_DONTNEED)
-		obj_priv->dirty = 0;
+	if (obj->madv == I915_MADV_DONTNEED)
+		obj->dirty = 0;
 
 	for (i = 0; i < page_count; i++) {
-		if (obj_priv->dirty)
-			set_page_dirty(obj_priv->pages[i]);
+		if (obj->dirty)
+			set_page_dirty(obj->pages[i]);
 
-		if (obj_priv->madv == I915_MADV_WILLNEED)
-			mark_page_accessed(obj_priv->pages[i]);
+		if (obj->madv == I915_MADV_WILLNEED)
+			mark_page_accessed(obj->pages[i]);
 
-		page_cache_release(obj_priv->pages[i]);
+		page_cache_release(obj->pages[i]);
 	}
-	obj_priv->dirty = 0;
+	obj->dirty = 0;
 
-	drm_free_large(obj_priv->pages);
-	obj_priv->pages = NULL;
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
 }
 
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+			       struct intel_ring_buffer *ring,
+			       u32 seqno)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	ring->outstanding_lazy_request = true;
-	return dev_priv->next_seqno;
-}
-
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj,
-			       struct intel_ring_buffer *ring)
-{
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
 
 	BUG_ON(ring == NULL);
-	obj_priv->ring = ring;
+	obj->ring = ring;
 
 	/* Add a reference if we're newly entering the active list. */
-	if (!obj_priv->active) {
-		drm_gem_object_reference(obj);
-		obj_priv->active = 1;
+	if (!obj->active) {
+		drm_gem_object_reference(&obj->base);
+		obj->active = 1;
 	}
 
 	/* Move from whatever list we were on to the tail of execution. */
-	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
-	list_move_tail(&obj_priv->ring_list, &ring->active_list);
-	obj_priv->last_rendering_seqno = seqno;
+	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+	list_move_tail(&obj->ring_list, &ring->active_list);
+
+	obj->last_rendering_seqno = seqno;
+	if (obj->fenced_gpu_access) {
+		struct drm_i915_fence_reg *reg;
+
+		BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
+		obj->last_fenced_seqno = seqno;
+		obj->last_fenced_ring = ring;
+
+		reg = &dev_priv->fence_regs[obj->fence_reg];
+		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+	}
 }
 
 static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	list_del_init(&obj->ring_list);
+	obj->last_rendering_seqno = 0;
+}
 
-	BUG_ON(!obj_priv->active);
-	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
-	list_del_init(&obj_priv->ring_list);
-	obj_priv->last_rendering_seqno = 0;
+static void
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	BUG_ON(!obj->active);
+	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+
+	i915_gem_object_move_off_active(obj);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (obj->pin_count != 0)
+		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+	else
+		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	BUG_ON(!list_empty(&obj->gpu_write_list));
+	BUG_ON(!obj->active);
+	obj->ring = NULL;
+
+	i915_gem_object_move_off_active(obj);
+	obj->fenced_gpu_access = false;
+
+	obj->active = 0;
+	obj->pending_gpu_write = false;
+	drm_gem_object_unreference(&obj->base);
+
+	WARN_ON(i915_verify_lists(dev));
 }
 
 /* Immediately discard the backing storage */
 static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct inode *inode;
 
 	/* Our goal here is to return as much of the memory as
@@ -1600,42 +1650,18 @@
 	 * backing pages, *now*. Here we mirror the actions taken
 	 * when by shmem_delete_inode() to release the backing store.
 	 */
-	inode = obj->filp->f_path.dentry->d_inode;
+	inode = obj->base.filp->f_path.dentry->d_inode;
 	truncate_inode_pages(inode->i_mapping, 0);
 	if (inode->i_op->truncate_range)
 		inode->i_op->truncate_range(inode, 0, (loff_t)-1);
 
-	obj_priv->madv = __I915_MADV_PURGED;
+	obj->madv = __I915_MADV_PURGED;
 }
 
 static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 {
-	return obj_priv->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
-	if (obj_priv->pin_count != 0)
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
-	else
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-	list_del_init(&obj_priv->ring_list);
-
-	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
-
-	obj_priv->last_rendering_seqno = 0;
-	obj_priv->ring = NULL;
-	if (obj_priv->active) {
-		obj_priv->active = 0;
-		drm_gem_object_unreference(obj);
-	}
-	WARN_ON(i915_verify_lists(dev));
+	return obj->madv == I915_MADV_DONTNEED;
 }
 
 static void
@@ -1643,37 +1669,27 @@
 			       uint32_t flush_domains,
 			       struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv, *next;
+	struct drm_i915_gem_object *obj, *next;
 
-	list_for_each_entry_safe(obj_priv, next,
+	list_for_each_entry_safe(obj, next,
 				 &ring->gpu_write_list,
 				 gpu_write_list) {
-		struct drm_gem_object *obj = &obj_priv->base;
+		if (obj->base.write_domain & flush_domains) {
+			uint32_t old_write_domain = obj->base.write_domain;
 
-		if (obj->write_domain & flush_domains) {
-			uint32_t old_write_domain = obj->write_domain;
-
-			obj->write_domain = 0;
-			list_del_init(&obj_priv->gpu_write_list);
-			i915_gem_object_move_to_active(obj, ring);
-
-			/* update the fence lru list */
-			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-				struct drm_i915_fence_reg *reg =
-					&dev_priv->fence_regs[obj_priv->fence_reg];
-				list_move_tail(&reg->lru_list,
-						&dev_priv->mm.fence_list);
-			}
+			obj->base.write_domain = 0;
+			list_del_init(&obj->gpu_write_list);
+			i915_gem_object_move_to_active(obj, ring,
+						       i915_gem_next_request_seqno(dev, ring));
 
 			trace_i915_gem_object_change_domain(obj,
-							    obj->read_domains,
+							    obj->base.read_domains,
 							    old_write_domain);
 		}
 	}
 }
 
-uint32_t
+int
 i915_add_request(struct drm_device *dev,
 		 struct drm_file *file,
 		 struct drm_i915_gem_request *request,
@@ -1683,17 +1699,17 @@
 	struct drm_i915_file_private *file_priv = NULL;
 	uint32_t seqno;
 	int was_empty;
+	int ret;
+
+	BUG_ON(request == NULL);
 
 	if (file != NULL)
 		file_priv = file->driver_priv;
 
-	if (request == NULL) {
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return 0;
-	}
+	ret = ring->add_request(ring, &seqno);
+	if (ret)
+	    return ret;
 
-	seqno = ring->add_request(dev, ring, 0);
 	ring->outstanding_lazy_request = false;
 
 	request->seqno = seqno;
@@ -1717,26 +1733,7 @@
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work, HZ);
 	}
-	return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-static void
-i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
-{
-	uint32_t flush_domains = 0;
-
-	/* The sampler always gets flushed on i965 (sigh) */
-	if (INTEL_INFO(dev)->gen >= 4)
-		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-
-	ring->flush(dev, ring,
-			I915_GEM_DOMAIN_COMMAND, flush_domains);
+	return 0;
 }
 
 static inline void
@@ -1769,62 +1766,76 @@
 	}
 
 	while (!list_empty(&ring->active_list)) {
-		struct drm_i915_gem_object *obj_priv;
+		struct drm_i915_gem_object *obj;
 
-		obj_priv = list_first_entry(&ring->active_list,
-					    struct drm_i915_gem_object,
-					    ring_list);
+		obj = list_first_entry(&ring->active_list,
+				       struct drm_i915_gem_object,
+				       ring_list);
 
-		obj_priv->base.write_domain = 0;
-		list_del_init(&obj_priv->gpu_write_list);
-		i915_gem_object_move_to_inactive(&obj_priv->base);
+		obj->base.write_domain = 0;
+		list_del_init(&obj->gpu_write_list);
+		i915_gem_object_move_to_inactive(obj);
+	}
+}
+
+static void i915_gem_reset_fences(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+		struct drm_i915_gem_object *obj = reg->obj;
+
+		if (!obj)
+			continue;
+
+		if (obj->tiling_mode)
+			i915_gem_release_mmap(obj);
+
+		reg->obj->fence_reg = I915_FENCE_REG_NONE;
+		reg->obj->fenced_gpu_access = false;
+		reg->obj->last_fenced_seqno = 0;
+		reg->obj->last_fenced_ring = NULL;
+		i915_gem_clear_fence_reg(dev, reg);
 	}
 }
 
 void i915_gem_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int i;
 
-	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
-	i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
-	i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
 
 	/* Remove anything from the flushing lists. The GPU cache is likely
 	 * to be lost on reset along with the data, so simply move the
 	 * lost bo to the inactive list.
 	 */
 	while (!list_empty(&dev_priv->mm.flushing_list)) {
-		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-					    struct drm_i915_gem_object,
-					    mm_list);
+		obj= list_first_entry(&dev_priv->mm.flushing_list,
+				      struct drm_i915_gem_object,
+				      mm_list);
 
-		obj_priv->base.write_domain = 0;
-		list_del_init(&obj_priv->gpu_write_list);
-		i915_gem_object_move_to_inactive(&obj_priv->base);
+		obj->base.write_domain = 0;
+		list_del_init(&obj->gpu_write_list);
+		i915_gem_object_move_to_inactive(obj);
 	}
 
 	/* Move everything out of the GPU domains to ensure we do any
 	 * necessary invalidation upon reuse.
 	 */
-	list_for_each_entry(obj_priv,
+	list_for_each_entry(obj,
 			    &dev_priv->mm.inactive_list,
 			    mm_list)
 	{
-		obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 	}
 
 	/* The fence registers are invalidated so clear them out */
-	for (i = 0; i < 16; i++) {
-		struct drm_i915_fence_reg *reg;
-
-		reg = &dev_priv->fence_regs[i];
-		if (!reg->obj)
-			continue;
-
-		i915_gem_clear_fence_reg(reg->obj);
-	}
+	i915_gem_reset_fences(dev);
 }
 
 /**
@@ -1836,6 +1847,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
+	int i;
 
 	if (!ring->status_page.page_addr ||
 	    list_empty(&ring->request_list))
@@ -1843,7 +1855,12 @@
 
 	WARN_ON(i915_verify_lists(dev));
 
-	seqno = ring->get_seqno(dev, ring);
+	seqno = ring->get_seqno(ring);
+
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		if (seqno >= ring->sync_seqno[i])
+			ring->sync_seqno[i] = 0;
+
 	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 
@@ -1865,18 +1882,16 @@
 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
 	 */
 	while (!list_empty(&ring->active_list)) {
-		struct drm_gem_object *obj;
-		struct drm_i915_gem_object *obj_priv;
+		struct drm_i915_gem_object *obj;
 
-		obj_priv = list_first_entry(&ring->active_list,
-					    struct drm_i915_gem_object,
-					    ring_list);
+		obj= list_first_entry(&ring->active_list,
+				      struct drm_i915_gem_object,
+				      ring_list);
 
-		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
 			break;
 
-		obj = &obj_priv->base;
-		if (obj->write_domain != 0)
+		if (obj->base.write_domain != 0)
 			i915_gem_object_move_to_flushing(obj);
 		else
 			i915_gem_object_move_to_inactive(obj);
@@ -1884,7 +1899,7 @@
 
 	if (unlikely (dev_priv->trace_irq_seqno &&
 		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-		ring->user_irq_put(dev, ring);
+		ring->irq_put(ring);
 		dev_priv->trace_irq_seqno = 0;
 	}
 
@@ -1895,24 +1910,24 @@
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
 
 	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
-	    struct drm_i915_gem_object *obj_priv, *tmp;
+	    struct drm_i915_gem_object *obj, *next;
 
 	    /* We must be careful that during unbind() we do not
 	     * accidentally infinitely recurse into retire requests.
 	     * Currently:
 	     *   retire -> free -> unbind -> wait -> retire_ring
 	     */
-	    list_for_each_entry_safe(obj_priv, tmp,
+	    list_for_each_entry_safe(obj, next,
 				     &dev_priv->mm.deferred_free_list,
 				     mm_list)
-		    i915_gem_free_object_tail(&obj_priv->base);
+		    i915_gem_free_object_tail(obj);
 	}
 
-	i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
-	i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
-	i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
 }
 
 static void
@@ -1920,6 +1935,8 @@
 {
 	drm_i915_private_t *dev_priv;
 	struct drm_device *dev;
+	bool idle;
+	int i;
 
 	dev_priv = container_of(work, drm_i915_private_t,
 				mm.retire_work.work);
@@ -1933,11 +1950,31 @@
 
 	i915_gem_retire_requests(dev);
 
-	if (!dev_priv->mm.suspended &&
-		(!list_empty(&dev_priv->render_ring.request_list) ||
-		 !list_empty(&dev_priv->bsd_ring.request_list) ||
-		 !list_empty(&dev_priv->blt_ring.request_list)))
+	/* Send a periodic flush down the ring so we don't hold onto GEM
+	 * objects indefinitely.
+	 */
+	idle = true;
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+		if (!list_empty(&ring->gpu_write_list)) {
+			struct drm_i915_gem_request *request;
+			int ret;
+
+			ret = i915_gem_flush_ring(dev, ring, 0,
+						  I915_GEM_GPU_DOMAINS);
+			request = kzalloc(sizeof(*request), GFP_KERNEL);
+			if (ret || request == NULL ||
+			    i915_add_request(dev, NULL, request, ring))
+			    kfree(request);
+		}
+
+		idle &= list_empty(&ring->request_list);
+	}
+
+	if (!dev_priv->mm.suspended && !idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -1954,14 +1991,23 @@
 	if (atomic_read(&dev_priv->mm.wedged))
 		return -EAGAIN;
 
-	if (ring->outstanding_lazy_request) {
-		seqno = i915_add_request(dev, NULL, NULL, ring);
-		if (seqno == 0)
-			return -ENOMEM;
-	}
-	BUG_ON(seqno == dev_priv->next_seqno);
+	if (seqno == ring->outstanding_lazy_request) {
+		struct drm_i915_gem_request *request;
 
-	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
+
+		ret = i915_add_request(dev, NULL, request, ring);
+		if (ret) {
+			kfree(request);
+			return ret;
+		}
+
+		seqno = request->seqno;
+	}
+
+	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
 		if (HAS_PCH_SPLIT(dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
@@ -1975,21 +2021,23 @@
 
 		trace_i915_gem_request_wait_begin(dev, seqno);
 
-		ring->waiting_gem_seqno = seqno;
-		ring->user_irq_get(dev, ring);
-		if (interruptible)
-			ret = wait_event_interruptible(ring->irq_queue,
-				i915_seqno_passed(
-					ring->get_seqno(dev, ring), seqno)
-				|| atomic_read(&dev_priv->mm.wedged));
-		else
-			wait_event(ring->irq_queue,
-				i915_seqno_passed(
-					ring->get_seqno(dev, ring), seqno)
-				|| atomic_read(&dev_priv->mm.wedged));
+		ring->waiting_seqno = seqno;
+		if (ring->irq_get(ring)) {
+			if (interruptible)
+				ret = wait_event_interruptible(ring->irq_queue,
+							       i915_seqno_passed(ring->get_seqno(ring), seqno)
+							       || atomic_read(&dev_priv->mm.wedged));
+			else
+				wait_event(ring->irq_queue,
+					   i915_seqno_passed(ring->get_seqno(ring), seqno)
+					   || atomic_read(&dev_priv->mm.wedged));
 
-		ring->user_irq_put(dev, ring);
-		ring->waiting_gem_seqno = 0;
+			ring->irq_put(ring);
+		} else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+						      seqno) ||
+				    atomic_read(&dev_priv->mm.wedged), 3000))
+			ret = -EBUSY;
+		ring->waiting_seqno = 0;
 
 		trace_i915_gem_request_wait_end(dev, seqno);
 	}
@@ -1998,7 +2046,7 @@
 
 	if (ret && ret != -ERESTARTSYS)
 		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-			  __func__, ret, seqno, ring->get_seqno(dev, ring),
+			  __func__, ret, seqno, ring->get_seqno(ring),
 			  dev_priv->next_seqno);
 
 	/* Directly dispatch request retiring.  While we have the work queue
@@ -2023,70 +2071,30 @@
 	return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
-static void
-i915_gem_flush_ring(struct drm_device *dev,
-		    struct drm_file *file_priv,
-		    struct intel_ring_buffer *ring,
-		    uint32_t invalidate_domains,
-		    uint32_t flush_domains)
-{
-	ring->flush(dev, ring, invalidate_domains, flush_domains);
-	i915_gem_process_flushing_list(dev, flush_domains, ring);
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
-	       struct drm_file *file_priv,
-	       uint32_t invalidate_domains,
-	       uint32_t flush_domains,
-	       uint32_t flush_rings)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		drm_agp_chipset_flush(dev);
-
-	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
-		if (flush_rings & RING_RENDER)
-			i915_gem_flush_ring(dev, file_priv,
-					    &dev_priv->render_ring,
-					    invalidate_domains, flush_domains);
-		if (flush_rings & RING_BSD)
-			i915_gem_flush_ring(dev, file_priv,
-					    &dev_priv->bsd_ring,
-					    invalidate_domains, flush_domains);
-		if (flush_rings & RING_BLT)
-			i915_gem_flush_ring(dev, file_priv,
-					    &dev_priv->blt_ring,
-					    invalidate_domains, flush_domains);
-	}
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			       bool interruptible)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_device *dev = obj->base.dev;
 	int ret;
 
 	/* This function only exists to support waiting for existing rendering,
 	 * not for emitting required flushes.
 	 */
-	BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
+	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
 
 	/* If there is rendering queued on the buffer being evicted, wait for
 	 * it.
 	 */
-	if (obj_priv->active) {
+	if (obj->active) {
 		ret = i915_do_wait_request(dev,
-					   obj_priv->last_rendering_seqno,
+					   obj->last_rendering_seqno,
 					   interruptible,
-					   obj_priv->ring);
+					   obj->ring);
 		if (ret)
 			return ret;
 	}
@@ -2098,17 +2106,14 @@
  * Unbinds an object from the GTT aperture.
  */
 int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret = 0;
 
-	if (obj_priv->gtt_space == NULL)
+	if (obj->gtt_space == NULL)
 		return 0;
 
-	if (obj_priv->pin_count != 0) {
+	if (obj->pin_count != 0) {
 		DRM_ERROR("Attempting to unbind pinned buffer\n");
 		return -EINVAL;
 	}
@@ -2131,27 +2136,27 @@
 	 */
 	if (ret) {
 		i915_gem_clflush_object(obj);
-		obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
 	/* release the fence reg _after_ flushing */
-	if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-		i915_gem_clear_fence_reg(obj);
+	ret = i915_gem_object_put_fence(obj);
+	if (ret == -ERESTARTSYS)
+		return ret;
 
-	drm_unbind_agp(obj_priv->agp_mem);
-	drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+	i915_gem_gtt_unbind_object(obj);
+	i915_gem_object_put_pages_gtt(obj);
 
-	i915_gem_object_put_pages(obj);
-	BUG_ON(obj_priv->pages_refcount);
+	list_del_init(&obj->gtt_list);
+	list_del_init(&obj->mm_list);
+	/* Avoid an unnecessary call to unbind on rebind. */
+	obj->map_and_fenceable = true;
 
-	i915_gem_info_remove_gtt(dev_priv, obj->size);
-	list_del_init(&obj_priv->mm_list);
+	drm_mm_put_block(obj->gtt_space);
+	obj->gtt_space = NULL;
+	obj->gtt_offset = 0;
 
-	drm_mm_put_block(obj_priv->gtt_space);
-	obj_priv->gtt_space = NULL;
-	obj_priv->gtt_offset = 0;
-
-	if (i915_gem_object_is_purgeable(obj_priv))
+	if (i915_gem_object_is_purgeable(obj))
 		i915_gem_object_truncate(obj);
 
 	trace_i915_gem_object_unbind(obj);
@@ -2159,14 +2164,37 @@
 	return ret;
 }
 
+int
+i915_gem_flush_ring(struct drm_device *dev,
+		    struct intel_ring_buffer *ring,
+		    uint32_t invalidate_domains,
+		    uint32_t flush_domains)
+{
+	int ret;
+
+	ret = ring->flush(ring, invalidate_domains, flush_domains);
+	if (ret)
+		return ret;
+
+	i915_gem_process_flushing_list(dev, flush_domains, ring);
+	return 0;
+}
+
 static int i915_ring_idle(struct drm_device *dev,
 			  struct intel_ring_buffer *ring)
 {
+	int ret;
+
 	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
 		return 0;
 
-	i915_gem_flush_ring(dev, NULL, ring,
-			    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (!list_empty(&ring->gpu_write_list)) {
+		ret = i915_gem_flush_ring(dev, ring,
+				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		if (ret)
+			return ret;
+	}
+
 	return i915_wait_request(dev,
 				 i915_gem_next_request_seqno(dev, ring),
 				 ring);
@@ -2177,7 +2205,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool lists_empty;
-	int ret;
+	int ret, i;
 
 	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.active_list));
@@ -2185,258 +2213,305 @@
 		return 0;
 
 	/* Flush everything onto the inactive list. */
-	ret = i915_ring_idle(dev, &dev_priv->render_ring);
-	if (ret)
-		return ret;
-
-	ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
-	if (ret)
-		return ret;
-
-	ret = i915_ring_idle(dev, &dev_priv->blt_ring);
-	if (ret)
-		return ret;
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
 
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
-			  gfp_t gfpmask)
+static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
+				       struct intel_ring_buffer *pipelined)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count, i;
-	struct address_space *mapping;
-	struct inode *inode;
-	struct page *page;
-
-	BUG_ON(obj_priv->pages_refcount
-			== DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
-
-	if (obj_priv->pages_refcount++ != 0)
-		return 0;
-
-	/* Get the list of pages out of our struct file.  They'll be pinned
-	 * at this point until we release them.
-	 */
-	page_count = obj->size / PAGE_SIZE;
-	BUG_ON(obj_priv->pages != NULL);
-	obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
-	if (obj_priv->pages == NULL) {
-		obj_priv->pages_refcount--;
-		return -ENOMEM;
-	}
-
-	inode = obj->filp->f_path.dentry->d_inode;
-	mapping = inode->i_mapping;
-	for (i = 0; i < page_count; i++) {
-		page = read_cache_page_gfp(mapping, i,
-					   GFP_HIGHUSER |
-					   __GFP_COLD |
-					   __GFP_RECLAIMABLE |
-					   gfpmask);
-		if (IS_ERR(page))
-			goto err_pages;
-
-		obj_priv->pages[i] = page;
-	}
-
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
-		i915_gem_object_do_bit_17_swizzle(obj);
-
-	return 0;
-
-err_pages:
-	while (i--)
-		page_cache_release(obj_priv->pages[i]);
-
-	drm_free_large(obj_priv->pages);
-	obj_priv->pages = NULL;
-	obj_priv->pages_refcount--;
-	return PTR_ERR(page);
-}
-
-static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
-{
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
-		    0xfffff000) << 32;
-	val |= obj_priv->gtt_offset & 0xfffff000;
-	val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+	val = (uint64_t)((obj->gtt_offset + size - 4096) &
+			 0xfffff000) << 32;
+	val |= obj->gtt_offset & 0xfffff000;
+	val |= (uint64_t)((obj->stride / 128) - 1) <<
 		SANDYBRIDGE_FENCE_PITCH_SHIFT;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 	val |= I965_FENCE_REG_VALID;
 
-	I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 6);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
+		intel_ring_emit(pipelined, (u32)val);
+		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
+		intel_ring_emit(pipelined, (u32)(val >> 32));
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+
+	return 0;
 }
 
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
+				struct intel_ring_buffer *pipelined)
 {
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+	val = (uint64_t)((obj->gtt_offset + size - 4096) &
 		    0xfffff000) << 32;
-	val |= obj_priv->gtt_offset & 0xfffff000;
-	val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	val |= obj->gtt_offset & 0xfffff000;
+	val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 	val |= I965_FENCE_REG_VALID;
 
-	I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 6);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
+		intel_ring_emit(pipelined, (u32)val);
+		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
+		intel_ring_emit(pipelined, (u32)(val >> 32));
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+
+	return 0;
 }
 
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
+				struct intel_ring_buffer *pipelined)
 {
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	u32 fence_reg, val, pitch_val;
 	int tile_width;
-	uint32_t fence_reg, val;
-	uint32_t pitch_val;
 
-	if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
-	    (obj_priv->gtt_offset & (obj->size - 1))) {
-		WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
-		     __func__, obj_priv->gtt_offset, obj->size);
-		return;
-	}
+	if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+		 (size & -size) != size ||
+		 (obj->gtt_offset & (size - 1)),
+		 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		 obj->gtt_offset, obj->map_and_fenceable, size))
+		return -EINVAL;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y &&
-	    HAS_128_BYTE_Y_TILING(dev))
+	if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
 		tile_width = 128;
 	else
 		tile_width = 512;
 
 	/* Note: pitch better be a power of two tile widths */
-	pitch_val = obj_priv->stride / tile_width;
+	pitch_val = obj->stride / tile_width;
 	pitch_val = ffs(pitch_val) - 1;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y &&
-	    HAS_128_BYTE_Y_TILING(dev))
-		WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
-	else
-		WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
-
-	val = obj_priv->gtt_offset;
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	val = obj->gtt_offset;
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-	val |= I915_FENCE_SIZE_BITS(obj->size);
+	val |= I915_FENCE_SIZE_BITS(size);
 	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 	val |= I830_FENCE_REG_VALID;
 
-	if (regnum < 8)
-		fence_reg = FENCE_REG_830_0 + (regnum * 4);
+	fence_reg = obj->fence_reg;
+	if (fence_reg < 8)
+		fence_reg = FENCE_REG_830_0 + fence_reg * 4;
 	else
-		fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
-	I915_WRITE(fence_reg, val);
+		fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 4);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(pipelined, fence_reg);
+		intel_ring_emit(pipelined, val);
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE(fence_reg, val);
+
+	return 0;
 }
 
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
+				struct intel_ring_buffer *pipelined)
 {
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	int regnum = obj->fence_reg;
 	uint32_t val;
 	uint32_t pitch_val;
-	uint32_t fence_size_bits;
 
-	if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
-	    (obj_priv->gtt_offset & (obj->size - 1))) {
-		WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
-		     __func__, obj_priv->gtt_offset);
-		return;
-	}
+	if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+		 (size & -size) != size ||
+		 (obj->gtt_offset & (size - 1)),
+		 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+		 obj->gtt_offset, size))
+		return -EINVAL;
 
-	pitch_val = obj_priv->stride / 128;
+	pitch_val = obj->stride / 128;
 	pitch_val = ffs(pitch_val) - 1;
-	WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
 
-	val = obj_priv->gtt_offset;
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	val = obj->gtt_offset;
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-	fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
-	WARN_ON(fence_size_bits & ~0x00000f00);
-	val |= fence_size_bits;
+	val |= I830_FENCE_SIZE_BITS(size);
 	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 	val |= I830_FENCE_REG_VALID;
 
-	I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 4);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
+		intel_ring_emit(pipelined, val);
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+
+	return 0;
 }
 
-static int i915_find_fence_reg(struct drm_device *dev,
-			       bool interruptible)
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
-	struct drm_i915_fence_reg *reg = NULL;
-	struct drm_i915_gem_object *obj_priv = NULL;
+	return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+			    struct intel_ring_buffer *pipelined,
+			    bool interruptible)
+{
+	int ret;
+
+	if (obj->fenced_gpu_access) {
+		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+			ret = i915_gem_flush_ring(obj->base.dev,
+						  obj->last_fenced_ring,
+						  0, obj->base.write_domain);
+			if (ret)
+				return ret;
+		}
+
+		obj->fenced_gpu_access = false;
+	}
+
+	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+		if (!ring_passed_seqno(obj->last_fenced_ring,
+				       obj->last_fenced_seqno)) {
+			ret = i915_do_wait_request(obj->base.dev,
+						   obj->last_fenced_seqno,
+						   interruptible,
+						   obj->last_fenced_ring);
+			if (ret)
+				return ret;
+		}
+
+		obj->last_fenced_seqno = 0;
+		obj->last_fenced_ring = NULL;
+	}
+
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
+		mb();
+
+	return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	ret = i915_gem_object_flush_fence(obj, NULL, true);
+	if (ret)
+		return ret;
+
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		i915_gem_clear_fence_reg(obj->base.dev,
+					 &dev_priv->fence_regs[obj->fence_reg]);
+
+		obj->fence_reg = I915_FENCE_REG_NONE;
+	}
+
+	return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev,
+		    struct intel_ring_buffer *pipelined)
+{
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj = NULL;
-	int i, avail, ret;
+	struct drm_i915_fence_reg *reg, *first, *avail;
+	int i;
 
 	/* First try to find a free reg */
-	avail = 0;
+	avail = NULL;
 	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
 		reg = &dev_priv->fence_regs[i];
 		if (!reg->obj)
-			return i;
+			return reg;
 
-		obj_priv = to_intel_bo(reg->obj);
-		if (!obj_priv->pin_count)
-		    avail++;
+		if (!reg->obj->pin_count)
+			avail = reg;
 	}
 
-	if (avail == 0)
-		return -ENOSPC;
+	if (avail == NULL)
+		return NULL;
 
 	/* None available, try to steal one or wait for a user to finish */
-	i = I915_FENCE_REG_NONE;
-	list_for_each_entry(reg, &dev_priv->mm.fence_list,
-			    lru_list) {
-		obj = reg->obj;
-		obj_priv = to_intel_bo(obj);
-
-		if (obj_priv->pin_count)
+	avail = first = NULL;
+	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+		if (reg->obj->pin_count)
 			continue;
 
-		/* found one! */
-		i = obj_priv->fence_reg;
-		break;
+		if (first == NULL)
+			first = reg;
+
+		if (!pipelined ||
+		    !reg->obj->last_fenced_ring ||
+		    reg->obj->last_fenced_ring == pipelined) {
+			avail = reg;
+			break;
+		}
 	}
 
-	BUG_ON(i == I915_FENCE_REG_NONE);
+	if (avail == NULL)
+		avail = first;
 
-	/* We only have a reference on obj from the active list. put_fence_reg
-	 * might drop that one, causing a use-after-free in it. So hold a
-	 * private reference to obj like the other callers of put_fence_reg
-	 * (set_tiling ioctl) do. */
-	drm_gem_object_reference(obj);
-	ret = i915_gem_object_put_fence_reg(obj, interruptible);
-	drm_gem_object_unreference(obj);
-	if (ret != 0)
-		return ret;
-
-	return i;
+	return avail;
 }
 
 /**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up a fence reg for an object
  * @obj: object to map through a fence reg
+ * @pipelined: ring on which to queue the change, or NULL for CPU access
+ * @interruptible: must we wait uninterruptibly for the register to retire?
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
@@ -2448,72 +2523,141 @@
  * and tiling format.
  */
 int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
-			      bool interruptible)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+			  struct intel_ring_buffer *pipelined,
+			  bool interruptible)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	struct drm_i915_fence_reg *reg = NULL;
+	struct drm_i915_fence_reg *reg;
 	int ret;
 
-	/* Just update our place in the LRU if our fence is getting used. */
-	if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-		reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+	/* XXX disable pipelining. There are bugs. Shocking. */
+	pipelined = NULL;
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		reg = &dev_priv->fence_regs[obj->fence_reg];
 		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+		if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+			pipelined = NULL;
+
+		if (!pipelined) {
+			if (reg->setup_seqno) {
+				if (!ring_passed_seqno(obj->last_fenced_ring,
+						       reg->setup_seqno)) {
+					ret = i915_do_wait_request(obj->base.dev,
+								   reg->setup_seqno,
+								   interruptible,
+								   obj->last_fenced_ring);
+					if (ret)
+						return ret;
+				}
+
+				reg->setup_seqno = 0;
+			}
+		} else if (obj->last_fenced_ring &&
+			   obj->last_fenced_ring != pipelined) {
+			ret = i915_gem_object_flush_fence(obj,
+							  pipelined,
+							  interruptible);
+			if (ret)
+				return ret;
+		} else if (obj->tiling_changed) {
+			if (obj->fenced_gpu_access) {
+				if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+					ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+								  0, obj->base.write_domain);
+					if (ret)
+						return ret;
+				}
+
+				obj->fenced_gpu_access = false;
+			}
+		}
+
+		if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+			pipelined = NULL;
+		BUG_ON(!pipelined && reg->setup_seqno);
+
+		if (obj->tiling_changed) {
+			if (pipelined) {
+				reg->setup_seqno =
+					i915_gem_next_request_seqno(dev, pipelined);
+				obj->last_fenced_seqno = reg->setup_seqno;
+				obj->last_fenced_ring = pipelined;
+			}
+			goto update;
+		}
+
 		return 0;
 	}
 
-	switch (obj_priv->tiling_mode) {
-	case I915_TILING_NONE:
-		WARN(1, "allocating a fence for non-tiled object?\n");
-		break;
-	case I915_TILING_X:
-		if (!obj_priv->stride)
-			return -EINVAL;
-		WARN((obj_priv->stride & (512 - 1)),
-		     "object 0x%08x is X tiled but has non-512B pitch\n",
-		     obj_priv->gtt_offset);
-		break;
-	case I915_TILING_Y:
-		if (!obj_priv->stride)
-			return -EINVAL;
-		WARN((obj_priv->stride & (128 - 1)),
-		     "object 0x%08x is Y tiled but has non-128B pitch\n",
-		     obj_priv->gtt_offset);
-		break;
-	}
+	reg = i915_find_fence_reg(dev, pipelined);
+	if (reg == NULL)
+		return -ENOSPC;
 
-	ret = i915_find_fence_reg(dev, interruptible);
-	if (ret < 0)
+	ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+	if (ret)
 		return ret;
 
-	obj_priv->fence_reg = ret;
-	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-	list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+	if (reg->obj) {
+		struct drm_i915_gem_object *old = reg->obj;
+
+		drm_gem_object_reference(&old->base);
+
+		if (old->tiling_mode)
+			i915_gem_release_mmap(old);
+
+		ret = i915_gem_object_flush_fence(old,
+						  pipelined,
+						  interruptible);
+		if (ret) {
+			drm_gem_object_unreference(&old->base);
+			return ret;
+		}
+
+		if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+			pipelined = NULL;
+
+		old->fence_reg = I915_FENCE_REG_NONE;
+		old->last_fenced_ring = pipelined;
+		old->last_fenced_seqno =
+			pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+
+		drm_gem_object_unreference(&old->base);
+	} else if (obj->last_fenced_seqno == 0)
+		pipelined = NULL;
 
 	reg->obj = obj;
+	list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+	obj->fence_reg = reg - dev_priv->fence_regs;
+	obj->last_fenced_ring = pipelined;
 
+	reg->setup_seqno =
+		pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+	obj->last_fenced_seqno = reg->setup_seqno;
+
+update:
+	obj->tiling_changed = false;
 	switch (INTEL_INFO(dev)->gen) {
 	case 6:
-		sandybridge_write_fence_reg(reg);
+		ret = sandybridge_write_fence_reg(obj, pipelined);
 		break;
 	case 5:
 	case 4:
-		i965_write_fence_reg(reg);
+		ret = i965_write_fence_reg(obj, pipelined);
 		break;
 	case 3:
-		i915_write_fence_reg(reg);
+		ret = i915_write_fence_reg(obj, pipelined);
 		break;
 	case 2:
-		i830_write_fence_reg(reg);
+		ret = i830_write_fence_reg(obj, pipelined);
 		break;
 	}
 
-	trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
-			obj_priv->tiling_mode);
-
-	return 0;
+	return ret;
 }
 
 /**
@@ -2521,154 +2665,125 @@
  * @obj: object to clear
  *
  * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
  */
 static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_device *dev,
+			 struct drm_i915_fence_reg *reg)
 {
-	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	struct drm_i915_fence_reg *reg =
-		&dev_priv->fence_regs[obj_priv->fence_reg];
-	uint32_t fence_reg;
+	uint32_t fence_reg = reg - dev_priv->fence_regs;
 
 	switch (INTEL_INFO(dev)->gen) {
 	case 6:
-		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
-			     (obj_priv->fence_reg * 8), 0);
+		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
 		break;
 	case 5:
 	case 4:
-		I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+		I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
 		break;
 	case 3:
-		if (obj_priv->fence_reg >= 8)
-			fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
+		if (fence_reg >= 8)
+			fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
 		else
 	case 2:
-			fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+			fence_reg = FENCE_REG_830_0 + fence_reg * 4;
 
 		I915_WRITE(fence_reg, 0);
 		break;
 	}
 
-	reg->obj = NULL;
-	obj_priv->fence_reg = I915_FENCE_REG_NONE;
 	list_del_init(&reg->lru_list);
-}
-
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- * @bool: whether the wait upon the fence is interruptible
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
-			      bool interruptible)
-{
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	struct drm_i915_fence_reg *reg;
-
-	if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
-		return 0;
-
-	/* If we've changed tiling, GTT-mappings of the object
-	 * need to re-fault to ensure that the correct fence register
-	 * setup is in place.
-	 */
-	i915_gem_release_mmap(obj);
-
-	/* On the i915, GPU access to tiled buffers is via a fence,
-	 * therefore we must wait for any outstanding access to complete
-	 * before clearing the fence.
-	 */
-	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-	if (reg->gpu) {
-		int ret;
-
-		ret = i915_gem_object_flush_gpu_write_domain(obj);
-		if (ret)
-			return ret;
-
-		ret = i915_gem_object_wait_rendering(obj, interruptible);
-		if (ret)
-			return ret;
-
-		reg->gpu = false;
-	}
-
-	i915_gem_object_flush_gtt_write_domain(obj);
-	i915_gem_clear_fence_reg(obj);
-
-	return 0;
+	reg->obj = NULL;
+	reg->setup_seqno = 0;
 }
 
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+			    unsigned alignment,
+			    bool map_and_fenceable)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_mm_node *free_space;
-	gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
+	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+	u32 size, fence_size, fence_alignment, unfenced_alignment;
+	bool mappable, fenceable;
 	int ret;
 
-	if (obj_priv->madv != I915_MADV_WILLNEED) {
+	if (obj->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to bind a purgeable object\n");
 		return -EINVAL;
 	}
 
+	fence_size = i915_gem_get_gtt_size(obj);
+	fence_alignment = i915_gem_get_gtt_alignment(obj);
+	unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+
 	if (alignment == 0)
-		alignment = i915_gem_get_gtt_alignment(obj);
-	if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+		alignment = map_and_fenceable ? fence_alignment :
+						unfenced_alignment;
+	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
 		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
 		return -EINVAL;
 	}
 
+	size = map_and_fenceable ? fence_size : obj->base.size;
+
 	/* If the object is bigger than the entire aperture, reject it early
 	 * before evicting everything in a vain attempt to find space.
 	 */
-	if (obj->size > dev_priv->mm.gtt_total) {
+	if (obj->base.size >
+	    (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
 		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
 		return -E2BIG;
 	}
 
  search_free:
-	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
-					obj->size, alignment, 0);
-	if (free_space != NULL)
-		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
-						       alignment);
-	if (obj_priv->gtt_space == NULL) {
+	if (map_and_fenceable)
+		free_space =
+			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+						    size, alignment, 0,
+						    dev_priv->mm.gtt_mappable_end,
+						    0);
+	else
+		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+						size, alignment, 0);
+
+	if (free_space != NULL) {
+		if (map_and_fenceable)
+			obj->gtt_space =
+				drm_mm_get_block_range_generic(free_space,
+							       size, alignment, 0,
+							       dev_priv->mm.gtt_mappable_end,
+							       0);
+		else
+			obj->gtt_space =
+				drm_mm_get_block(free_space, size, alignment);
+	}
+	if (obj->gtt_space == NULL) {
 		/* If the gtt is empty and we're still having trouble
 		 * fitting our object in, we're out of memory.
 		 */
-		ret = i915_gem_evict_something(dev, obj->size, alignment);
+		ret = i915_gem_evict_something(dev, size, alignment,
+					       map_and_fenceable);
 		if (ret)
 			return ret;
 
 		goto search_free;
 	}
 
-	ret = i915_gem_object_get_pages(obj, gfpmask);
+	ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
 	if (ret) {
-		drm_mm_put_block(obj_priv->gtt_space);
-		obj_priv->gtt_space = NULL;
+		drm_mm_put_block(obj->gtt_space);
+		obj->gtt_space = NULL;
 
 		if (ret == -ENOMEM) {
-			/* first try to clear up some space from the GTT */
-			ret = i915_gem_evict_something(dev, obj->size,
-						       alignment);
+			/* first try to reclaim some memory by clearing the GTT */
+			ret = i915_gem_evict_everything(dev, false);
 			if (ret) {
 				/* now try to shrink everyone else */
 				if (gfpmask) {
@@ -2676,7 +2791,7 @@
 					goto search_free;
 				}
 
-				return ret;
+				return -ENOMEM;
 			}
 
 			goto search_free;
@@ -2685,122 +2800,116 @@
 		return ret;
 	}
 
-	/* Create an AGP memory structure pointing at our pages, and bind it
-	 * into the GTT.
-	 */
-	obj_priv->agp_mem = drm_agp_bind_pages(dev,
-					       obj_priv->pages,
-					       obj->size >> PAGE_SHIFT,
-					       obj_priv->gtt_space->start,
-					       obj_priv->agp_type);
-	if (obj_priv->agp_mem == NULL) {
-		i915_gem_object_put_pages(obj);
-		drm_mm_put_block(obj_priv->gtt_space);
-		obj_priv->gtt_space = NULL;
+	ret = i915_gem_gtt_bind_object(obj);
+	if (ret) {
+		i915_gem_object_put_pages_gtt(obj);
+		drm_mm_put_block(obj->gtt_space);
+		obj->gtt_space = NULL;
 
-		ret = i915_gem_evict_something(dev, obj->size, alignment);
-		if (ret)
+		if (i915_gem_evict_everything(dev, false))
 			return ret;
 
 		goto search_free;
 	}
 
-	/* keep track of bounds object by adding it to the inactive list */
-	list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-	i915_gem_info_add_gtt(dev_priv, obj->size);
+	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
 	/* Assert that the object is not currently in any GPU domain. As it
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
 	 * a GPU cache
 	 */
-	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
-	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-	obj_priv->gtt_offset = obj_priv->gtt_space->start;
-	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+	obj->gtt_offset = obj->gtt_space->start;
 
+	fenceable =
+		obj->gtt_space->size == fence_size &&
+		(obj->gtt_space->start & (fence_alignment -1)) == 0;
+
+	mappable =
+		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+
+	obj->map_and_fenceable = mappable && fenceable;
+
+	trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
 	return 0;
 }
 
 void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
-
 	/* If we don't have a page list set up, then we're not pinned
 	 * to GPU, and we can ignore the cache flush because it'll happen
 	 * again at bind time.
 	 */
-	if (obj_priv->pages == NULL)
+	if (obj->pages == NULL)
 		return;
 
 	trace_i915_gem_object_clflush(obj);
 
-	drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
 static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	uint32_t old_write_domain;
+	struct drm_device *dev = obj->base.dev;
 
-	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
 	/* Queue the GPU write cache flushing we need. */
-	old_write_domain = obj->write_domain;
-	i915_gem_flush_ring(dev, NULL,
-			    to_intel_bo(obj)->ring,
-			    0, obj->write_domain);
-	BUG_ON(obj->write_domain);
-
-	trace_i915_gem_object_change_domain(obj,
-					    obj->read_domains,
-					    old_write_domain);
-
-	return 0;
+	return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 {
 	uint32_t old_write_domain;
 
-	if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
 		return;
 
-	/* No actual flushing is required for the GTT write domain.   Writes
+	/* No actual flushing is required for the GTT write domain.  Writes
 	 * to it immediately go to main memory as far as we know, so there's
 	 * no chipset flush.  It also doesn't land in render cache.
+	 *
+	 * However, we do have to enforce the order so that all writes through
+	 * the GTT land before any writes to the device, such as updates to
+	 * the GATT itself.
 	 */
-	old_write_domain = obj->write_domain;
-	obj->write_domain = 0;
+	wmb();
+
+	i915_gem_release_mmap(obj);
+
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
 
 	trace_i915_gem_object_change_domain(obj,
-					    obj->read_domains,
+					    obj->base.read_domains,
 					    old_write_domain);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
 	uint32_t old_write_domain;
 
-	if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
 		return;
 
 	i915_gem_clflush_object(obj);
-	drm_agp_chipset_flush(dev);
-	old_write_domain = obj->write_domain;
-	obj->write_domain = 0;
+	intel_gtt_chipset_flush();
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
 
 	trace_i915_gem_object_change_domain(obj,
-					    obj->read_domains,
+					    obj->base.read_domains,
 					    old_write_domain);
 }
 
@@ -2811,37 +2920,39 @@
  * flushes to occur.
  */
 int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
 	/* Not valid to be called on unbound objects. */
-	if (obj_priv->gtt_space == NULL)
+	if (obj->gtt_space == NULL)
 		return -EINVAL;
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret != 0)
-		return ret;
-	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
 
+	if (obj->pending_gpu_write || write) {
+		ret = i915_gem_object_wait_rendering(obj, true);
+		if (ret)
+			return ret;
+	}
+
 	i915_gem_object_flush_cpu_write_domain(obj);
 
-	old_write_domain = obj->write_domain;
-	old_read_domains = obj->read_domains;
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
 
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 	if (write) {
-		obj->read_domains = I915_GEM_DOMAIN_GTT;
-		obj->write_domain = I915_GEM_DOMAIN_GTT;
-		obj_priv->dirty = 1;
+		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+		obj->dirty = 1;
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -2856,23 +2967,23 @@
  * wait, as in modesetting process we're not supposed to be interrupted.
  */
 int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
-				     bool pipelined)
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+				     struct intel_ring_buffer *pipelined)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_read_domains;
 	int ret;
 
 	/* Not valid to be called on unbound objects. */
-	if (obj_priv->gtt_space == NULL)
+	if (obj->gtt_space == NULL)
 		return -EINVAL;
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
 	if (ret)
 		return ret;
 
+
 	/* Currently, we are always called from an non-interruptible context. */
-	if (!pipelined) {
+	if (pipelined != obj->ring) {
 		ret = i915_gem_object_wait_rendering(obj, false);
 		if (ret)
 			return ret;
@@ -2880,12 +2991,12 @@
 
 	i915_gem_object_flush_cpu_write_domain(obj);
 
-	old_read_domains = obj->read_domains;
-	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+	old_read_domains = obj->base.read_domains;
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
-					    obj->write_domain);
+					    obj->base.write_domain);
 
 	return 0;
 }
@@ -2894,14 +3005,19 @@
 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
 			  bool interruptible)
 {
+	int ret;
+
 	if (!obj->active)
 		return 0;
 
-	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
-		i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
-				    0, obj->base.write_domain);
+	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+		ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+					  0, obj->base.write_domain);
+		if (ret)
+			return ret;
+	}
 
-	return i915_gem_object_wait_rendering(&obj->base, interruptible);
+	return i915_gem_object_wait_rendering(obj, interruptible);
 }
 
 /**
@@ -2911,14 +3027,15 @@
  * flushes to occur.
  */
 static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret != 0)
+	if (ret)
 		return ret;
+
 	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
@@ -2930,27 +3047,27 @@
 	 */
 	i915_gem_object_set_to_full_cpu_read_domain(obj);
 
-	old_write_domain = obj->write_domain;
-	old_read_domains = obj->read_domains;
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
 
 	/* Flush the CPU cache if it's still invalid. */
-	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
 		i915_gem_clflush_object(obj);
 
-		obj->read_domains |= I915_GEM_DOMAIN_CPU;
+		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
 	}
 
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
 	/* If we're writing through the CPU, then the GPU read domains will
 	 * need to be invalidated at next use.
 	 */
 	if (write) {
-		obj->read_domains = I915_GEM_DOMAIN_CPU;
-		obj->write_domain = I915_GEM_DOMAIN_CPU;
+		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -2960,184 +3077,6 @@
 	return 0;
 }
 
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Mapped to GTT
- *	4. Read by GPU
- *	5. Unmapped from GTT
- *	6. Freed
- *
- *	Let's take these a step at a time
- *
- *	1. Allocated
- *		Pages allocated from the kernel may still have
- *		cache contents, so we set them to (CPU, CPU) always.
- *	2. Written by CPU (using pwrite)
- *		The pwrite function calls set_domain (CPU, CPU) and
- *		this function does nothing (as nothing changes)
- *	3. Mapped by GTT
- *		This function asserts that the object is not
- *		currently in any GPU-based read or write domains
- *	4. Read by GPU
- *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
- *		As write_domain is zero, this function adds in the
- *		current read domains (CPU+COMMAND, 0).
- *		flush_domains is set to CPU.
- *		invalidate_domains is set to COMMAND
- *		clflush is run to get data out of the CPU caches
- *		then i915_dev_set_domain calls i915_gem_flush to
- *		emit an MI_FLUSH and drm_agp_chipset_flush
- *	5. Unmapped from GTT
- *		i915_gem_object_unbind calls set_domain (CPU, CPU)
- *		flush_domains and invalidate_domains end up both zero
- *		so no flushing/invalidating happens
- *	6. Freed
- *		yay, done
- *
- * Case 2: The shared render buffer
- *
- *	1. Allocated
- *	2. Mapped to GTT
- *	3. Read/written by GPU
- *	4. set_domain to (CPU,CPU)
- *	5. Read/written by CPU
- *	6. Read/written by GPU
- *
- *	1. Allocated
- *		Same as last example, (CPU, CPU)
- *	2. Mapped to GTT
- *		Nothing changes (assertions find that it is not in the GPU)
- *	3. Read/written by GPU
- *		execbuffer calls set_domain (RENDER, RENDER)
- *		flush_domains gets CPU
- *		invalidate_domains gets GPU
- *		clflush (obj)
- *		MI_FLUSH and drm_agp_chipset_flush
- *	4. set_domain (CPU, CPU)
- *		flush_domains gets GPU
- *		invalidate_domains gets CPU
- *		wait_rendering (obj) to make sure all drawing is complete.
- *		This will include an MI_FLUSH to get the data from GPU
- *		to memory
- *		clflush (obj) to invalidate the CPU cache
- *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- *	5. Read/written by CPU
- *		cache lines are loaded and dirtied
- *	6. Read written by GPU
- *		Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Read by GPU
- *	4. Updated (written) by CPU again
- *	5. Read by GPU
- *
- *	1. Allocated
- *		(CPU, CPU)
- *	2. Written by CPU
- *		(CPU, CPU)
- *	3. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- *	4. Updated (written) by CPU again
- *		(CPU, CPU)
- *		flush_domains = 0 (no previous write domain)
- *		invalidate_domains = 0 (no new read domains)
- *	5. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
-				  struct intel_ring_buffer *ring)
-{
-	struct drm_device		*dev = obj->dev;
-	struct drm_i915_private		*dev_priv = dev->dev_private;
-	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
-	uint32_t			invalidate_domains = 0;
-	uint32_t			flush_domains = 0;
-	uint32_t			old_read_domains;
-
-	intel_mark_busy(dev, obj);
-
-	/*
-	 * If the object isn't moving to a new write domain,
-	 * let the object stay in multiple read domains
-	 */
-	if (obj->pending_write_domain == 0)
-		obj->pending_read_domains |= obj->read_domains;
-	else
-		obj_priv->dirty = 1;
-
-	/*
-	 * Flush the current write domain if
-	 * the new read domains don't match. Invalidate
-	 * any read domains which differ from the old
-	 * write domain
-	 */
-	if (obj->write_domain &&
-	    (obj->write_domain != obj->pending_read_domains ||
-	     obj_priv->ring != ring)) {
-		flush_domains |= obj->write_domain;
-		invalidate_domains |=
-			obj->pending_read_domains & ~obj->write_domain;
-	}
-	/*
-	 * Invalidate any read caches which may have
-	 * stale data. That is, any new read domains.
-	 */
-	invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
-		i915_gem_clflush_object(obj);
-
-	old_read_domains = obj->read_domains;
-
-	/* The actual obj->write_domain will be updated with
-	 * pending_write_domain after we emit the accumulated flush for all
-	 * of our domain changes in execbuffers (which clears objects'
-	 * write_domains).  So if we have a current write domain that we
-	 * aren't changing, set pending_write_domain to that.
-	 */
-	if (flush_domains == 0 && obj->pending_write_domain == 0)
-		obj->pending_write_domain = obj->write_domain;
-	obj->read_domains = obj->pending_read_domains;
-
-	dev->invalidate_domains |= invalidate_domains;
-	dev->flush_domains |= flush_domains;
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		dev_priv->mm.flush_rings |= obj_priv->ring->id;
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-		dev_priv->mm.flush_rings |= ring->id;
-
-	trace_i915_gem_object_change_domain(obj,
-					    old_read_domains,
-					    obj->write_domain);
-}
-
 /**
  * Moves the object from a partially CPU read to a full one.
  *
@@ -3145,30 +3084,28 @@
  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  */
 static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
-	if (!obj_priv->page_cpu_valid)
+	if (!obj->page_cpu_valid)
 		return;
 
 	/* If we're partially in the CPU read domain, finish moving it in.
 	 */
-	if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+	if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
 		int i;
 
-		for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
-			if (obj_priv->page_cpu_valid[i])
+		for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+			if (obj->page_cpu_valid[i])
 				continue;
-			drm_clflush_pages(obj_priv->pages + i, 1);
+			drm_clflush_pages(obj->pages + i, 1);
 		}
 	}
 
 	/* Free the page_cpu_valid mappings which are now stale, whether
 	 * or not we've got I915_GEM_DOMAIN_CPU.
 	 */
-	kfree(obj_priv->page_cpu_valid);
-	obj_priv->page_cpu_valid = NULL;
+	kfree(obj->page_cpu_valid);
+	obj->page_cpu_valid = NULL;
 }
 
 /**
@@ -3184,19 +3121,19 @@
  * flushes to occur.
  */
 static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
 					  uint64_t offset, uint64_t size)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_read_domains;
 	int i, ret;
 
-	if (offset == 0 && size == obj->size)
+	if (offset == 0 && size == obj->base.size)
 		return i915_gem_object_set_to_cpu_domain(obj, 0);
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret != 0)
+	if (ret)
 		return ret;
+
 	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
@@ -3204,457 +3141,45 @@
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	/* If we're already fully in the CPU read domain, we're done. */
-	if (obj_priv->page_cpu_valid == NULL &&
-	    (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+	if (obj->page_cpu_valid == NULL &&
+	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
 		return 0;
 
 	/* Otherwise, create/clear the per-page CPU read domain flag if we're
 	 * newly adding I915_GEM_DOMAIN_CPU
 	 */
-	if (obj_priv->page_cpu_valid == NULL) {
-		obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
-						   GFP_KERNEL);
-		if (obj_priv->page_cpu_valid == NULL)
+	if (obj->page_cpu_valid == NULL) {
+		obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
+					      GFP_KERNEL);
+		if (obj->page_cpu_valid == NULL)
 			return -ENOMEM;
-	} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
-		memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+	} else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+		memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
 
 	/* Flush the cache on any pages that are still invalid from the CPU's
 	 * perspective.
 	 */
 	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
 	     i++) {
-		if (obj_priv->page_cpu_valid[i])
+		if (obj->page_cpu_valid[i])
 			continue;
 
-		drm_clflush_pages(obj_priv->pages + i, 1);
+		drm_clflush_pages(obj->pages + i, 1);
 
-		obj_priv->page_cpu_valid[i] = 1;
+		obj->page_cpu_valid[i] = 1;
 	}
 
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
-	old_read_domains = obj->read_domains;
-	obj->read_domains |= I915_GEM_DOMAIN_CPU;
+	old_read_domains = obj->base.read_domains;
+	obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
 
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
-					    obj->write_domain);
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-				   struct drm_file *file_priv,
-				   struct drm_i915_gem_exec_object2 *entry,
-				   struct drm_i915_gem_relocation_entry *reloc)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_gem_object *target_obj;
-	uint32_t target_offset;
-	int ret = -EINVAL;
-
-	target_obj = drm_gem_object_lookup(dev, file_priv,
-					   reloc->target_handle);
-	if (target_obj == NULL)
-		return -ENOENT;
-
-	target_offset = to_intel_bo(target_obj)->gtt_offset;
-
-#if WATCH_RELOC
-	DRM_INFO("%s: obj %p offset %08x target %d "
-		 "read %08x write %08x gtt %08x "
-		 "presumed %08x delta %08x\n",
-		 __func__,
-		 obj,
-		 (int) reloc->offset,
-		 (int) reloc->target_handle,
-		 (int) reloc->read_domains,
-		 (int) reloc->write_domain,
-		 (int) target_offset,
-		 (int) reloc->presumed_offset,
-		 reloc->delta);
-#endif
-
-	/* The target buffer should have appeared before us in the
-	 * exec_object list, so it should have a GTT space bound by now.
-	 */
-	if (target_offset == 0) {
-		DRM_ERROR("No GTT space found for object %d\n",
-			  reloc->target_handle);
-		goto err;
-	}
-
-	/* Validate that the target is in a valid r/w GPU domain */
-	if (reloc->write_domain & (reloc->write_domain - 1)) {
-		DRM_ERROR("reloc with multiple write domains: "
-			  "obj %p target %d offset %d "
-			  "read %08x write %08x",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->read_domains,
-			  reloc->write_domain);
-		goto err;
-	}
-	if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
-	    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
-		DRM_ERROR("reloc with read/write CPU domains: "
-			  "obj %p target %d offset %d "
-			  "read %08x write %08x",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->read_domains,
-			  reloc->write_domain);
-		goto err;
-	}
-	if (reloc->write_domain && target_obj->pending_write_domain &&
-	    reloc->write_domain != target_obj->pending_write_domain) {
-		DRM_ERROR("Write domain conflict: "
-			  "obj %p target %d offset %d "
-			  "new %08x old %08x\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->write_domain,
-			  target_obj->pending_write_domain);
-		goto err;
-	}
-
-	target_obj->pending_read_domains |= reloc->read_domains;
-	target_obj->pending_write_domain |= reloc->write_domain;
-
-	/* If the relocation already has the right value in it, no
-	 * more work needs to be done.
-	 */
-	if (target_offset == reloc->presumed_offset)
-		goto out;
-
-	/* Check that the relocation address is valid... */
-	if (reloc->offset > obj->base.size - 4) {
-		DRM_ERROR("Relocation beyond object bounds: "
-			  "obj %p target %d offset %d size %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  (int) obj->base.size);
-		goto err;
-	}
-	if (reloc->offset & 3) {
-		DRM_ERROR("Relocation not 4-byte aligned: "
-			  "obj %p target %d offset %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset);
-		goto err;
-	}
-
-	/* and points to somewhere within the target object. */
-	if (reloc->delta >= target_obj->size) {
-		DRM_ERROR("Relocation beyond target object bounds: "
-			  "obj %p target %d delta %d size %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->delta,
-			  (int) target_obj->size);
-		goto err;
-	}
-
-	reloc->delta += target_offset;
-	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
-		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
-		char *vaddr;
-
-		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
-		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
-		kunmap_atomic(vaddr);
-	} else {
-		struct drm_i915_private *dev_priv = dev->dev_private;
-		uint32_t __iomem *reloc_entry;
-		void __iomem *reloc_page;
-
-		ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
-		if (ret)
-			goto err;
-
-		/* Map the page containing the relocation we're going to perform.  */
-		reloc->offset += obj->gtt_offset;
-		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-						      reloc->offset & PAGE_MASK);
-		reloc_entry = (uint32_t __iomem *)
-			(reloc_page + (reloc->offset & ~PAGE_MASK));
-		iowrite32(reloc->delta, reloc_entry);
-		io_mapping_unmap_atomic(reloc_page);
-	}
-
-	/* and update the user's relocation entry */
-	reloc->presumed_offset = target_offset;
-
-out:
-	ret = 0;
-err:
-	drm_gem_object_unreference(target_obj);
-	return ret;
-}
-
-static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-				    struct drm_file *file_priv,
-				    struct drm_i915_gem_exec_object2 *entry)
-{
-	struct drm_i915_gem_relocation_entry __user *user_relocs;
-	int i, ret;
-
-	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
-	for (i = 0; i < entry->relocation_count; i++) {
-		struct drm_i915_gem_relocation_entry reloc;
-
-		if (__copy_from_user_inatomic(&reloc,
-					      user_relocs+i,
-					      sizeof(reloc)))
-			return -EFAULT;
-
-		ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
-		if (ret)
-			return ret;
-
-		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
-					    &reloc.presumed_offset,
-					    sizeof(reloc.presumed_offset)))
-			return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-					 struct drm_file *file_priv,
-					 struct drm_i915_gem_exec_object2 *entry,
-					 struct drm_i915_gem_relocation_entry *relocs)
-{
-	int i, ret;
-
-	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate(struct drm_device *dev,
-			     struct drm_file *file,
-			     struct drm_gem_object **object_list,
-			     struct drm_i915_gem_exec_object2 *exec_list,
-			     int count)
-{
-	int i, ret;
-
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-		obj->base.pending_read_domains = 0;
-		obj->base.pending_write_domain = 0;
-		ret = i915_gem_execbuffer_relocate_object(obj, file,
-							  &exec_list[i]);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_reserve(struct drm_device *dev,
-			    struct drm_file *file,
-			    struct drm_gem_object **object_list,
-			    struct drm_i915_gem_exec_object2 *exec_list,
-			    int count)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret, i, retry;
-
-	/* attempt to pin all of the buffers into the GTT */
-	for (retry = 0; retry < 2; retry++) {
-		ret = 0;
-		for (i = 0; i < count; i++) {
-			struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
-			struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
-			bool need_fence =
-				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-				obj->tiling_mode != I915_TILING_NONE;
-
-			/* Check fence reg constraints and rebind if necessary */
-			if (need_fence &&
-			    !i915_gem_object_fence_offset_ok(&obj->base,
-							     obj->tiling_mode)) {
-				ret = i915_gem_object_unbind(&obj->base);
-				if (ret)
-					break;
-			}
-
-			ret = i915_gem_object_pin(&obj->base, entry->alignment);
-			if (ret)
-				break;
-
-			/*
-			 * Pre-965 chips need a fence register set up in order
-			 * to properly handle blits to/from tiled surfaces.
-			 */
-			if (need_fence) {
-				ret = i915_gem_object_get_fence_reg(&obj->base, true);
-				if (ret) {
-					i915_gem_object_unpin(&obj->base);
-					break;
-				}
-
-				dev_priv->fence_regs[obj->fence_reg].gpu = true;
-			}
-
-			entry->offset = obj->gtt_offset;
-		}
-
-		while (i--)
-			i915_gem_object_unpin(object_list[i]);
-
-		if (ret == 0)
-			break;
-
-		if (ret != -ENOSPC || retry)
-			return ret;
-
-		ret = i915_gem_evict_everything(dev);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
-				  struct drm_file *file,
-				  struct drm_gem_object **object_list,
-				  struct drm_i915_gem_exec_object2 *exec_list,
-				  int count)
-{
-	struct drm_i915_gem_relocation_entry *reloc;
-	int i, total, ret;
-
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-		obj->in_execbuffer = false;
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-	total = 0;
-	for (i = 0; i < count; i++)
-		total += exec_list[i].relocation_count;
-
-	reloc = drm_malloc_ab(total, sizeof(*reloc));
-	if (reloc == NULL) {
-		mutex_lock(&dev->struct_mutex);
-		return -ENOMEM;
-	}
-
-	total = 0;
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_relocation_entry __user *user_relocs;
-
-		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-
-		if (copy_from_user(reloc+total, user_relocs,
-				   exec_list[i].relocation_count *
-				   sizeof(*reloc))) {
-			ret = -EFAULT;
-			mutex_lock(&dev->struct_mutex);
-			goto err;
-		}
-
-		total += exec_list[i].relocation_count;
-	}
-
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret) {
-		mutex_lock(&dev->struct_mutex);
-		goto err;
-	}
-
-	ret = i915_gem_execbuffer_reserve(dev, file,
-					  object_list, exec_list,
-					  count);
-	if (ret)
-		goto err;
-
-	total = 0;
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-		obj->base.pending_read_domains = 0;
-		obj->base.pending_write_domain = 0;
-		ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
-							       &exec_list[i],
-							       reloc + total);
-		if (ret)
-			goto err;
-
-		total += exec_list[i].relocation_count;
-	}
-
-	/* Leave the user relocations as are, this is the painfully slow path,
-	 * and we want to avoid the complication of dropping the lock whilst
-	 * having buffers reserved in the aperture and so causing spurious
-	 * ENOSPC for random operations.
-	 */
-
-err:
-	drm_free_large(reloc);
-	return ret;
-}
-
-static int
-i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
-				struct drm_file *file,
-				struct intel_ring_buffer *ring,
-				struct drm_gem_object **objects,
-				int count)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret, i;
-
-	/* Zero the global flush/invalidate flags. These
-	 * will be modified as new domains are computed
-	 * for each object
-	 */
-	dev->invalidate_domains = 0;
-	dev->flush_domains = 0;
-	dev_priv->mm.flush_rings = 0;
-	for (i = 0; i < count; i++)
-		i915_gem_object_set_to_gpu_domain(objects[i], ring);
-
-	if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
-		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-			  __func__,
-			 dev->invalidate_domains,
-			 dev->flush_domains);
-#endif
-		i915_gem_flush(dev, file,
-			       dev->invalidate_domains,
-			       dev->flush_domains,
-			       dev_priv->mm.flush_rings);
-	}
-
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
-		/* XXX replace with semaphores */
-		if (obj->ring && ring != obj->ring) {
-			ret = i915_gem_object_wait_rendering(&obj->base, true);
-			if (ret)
-				return ret;
-		}
-	}
+					    obj->base.write_domain);
 
 	return 0;
 }
@@ -3694,20 +3219,21 @@
 		return 0;
 
 	ret = 0;
-	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
 		/* And wait for the seqno passing without holding any locks and
 		 * causing extra latency for others. This is safe as the irq
 		 * generation is designed to be run atomically and so is
 		 * lockless.
 		 */
-		ring->user_irq_get(dev, ring);
-		ret = wait_event_interruptible(ring->irq_queue,
-					       i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
-					       || atomic_read(&dev_priv->mm.wedged));
-		ring->user_irq_put(dev, ring);
+		if (ring->irq_get(ring)) {
+			ret = wait_event_interruptible(ring->irq_queue,
+						       i915_seqno_passed(ring->get_seqno(ring), seqno)
+						       || atomic_read(&dev_priv->mm.wedged));
+			ring->irq_put(ring);
 
-		if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
-			ret = -EIO;
+			if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+				ret = -EIO;
+		}
 	}
 
 	if (ret == 0)
@@ -3716,577 +3242,106 @@
 	return ret;
 }
 
-static int
-i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
-			  uint64_t exec_offset)
-{
-	uint32_t exec_start, exec_len;
-
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	exec_len = (uint32_t) exec->batch_len;
-
-	if ((exec_start | exec_len) & 0x7)
-		return -EINVAL;
-
-	if (!exec_start)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
-		   int count)
-{
-	int i;
-
-	for (i = 0; i < count; i++) {
-		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
-		int length; /* limited by fault_in_pages_readable() */
-
-		/* First check for malicious input causing overflow */
-		if (exec[i].relocation_count >
-		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
-			return -EINVAL;
-
-		length = exec[i].relocation_count *
-			sizeof(struct drm_i915_gem_relocation_entry);
-		if (!access_ok(VERIFY_READ, ptr, length))
-			return -EFAULT;
-
-		/* we may also need to update the presumed offsets */
-		if (!access_ok(VERIFY_WRITE, ptr, length))
-			return -EFAULT;
-
-		if (fault_in_pages_readable(ptr, length))
-			return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
-		       struct drm_file *file,
-		       struct drm_i915_gem_execbuffer2 *args,
-		       struct drm_i915_gem_exec_object2 *exec_list)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object **object_list = NULL;
-	struct drm_gem_object *batch_obj;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_clip_rect *cliprects = NULL;
-	struct drm_i915_gem_request *request = NULL;
-	int ret, i, flips;
-	uint64_t exec_offset;
-
-	struct intel_ring_buffer *ring = NULL;
-
-	ret = i915_gem_check_is_wedged(dev);
-	if (ret)
-		return ret;
-
-	ret = validate_exec_list(exec_list, args->buffer_count);
-	if (ret)
-		return ret;
-
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-	switch (args->flags & I915_EXEC_RING_MASK) {
-	case I915_EXEC_DEFAULT:
-	case I915_EXEC_RENDER:
-		ring = &dev_priv->render_ring;
-		break;
-	case I915_EXEC_BSD:
-		if (!HAS_BSD(dev)) {
-			DRM_ERROR("execbuf with invalid ring (BSD)\n");
-			return -EINVAL;
-		}
-		ring = &dev_priv->bsd_ring;
-		break;
-	case I915_EXEC_BLT:
-		if (!HAS_BLT(dev)) {
-			DRM_ERROR("execbuf with invalid ring (BLT)\n");
-			return -EINVAL;
-		}
-		ring = &dev_priv->blt_ring;
-		break;
-	default:
-		DRM_ERROR("execbuf with unknown ring: %d\n",
-			  (int)(args->flags & I915_EXEC_RING_MASK));
-		return -EINVAL;
-	}
-
-	if (args->buffer_count < 1) {
-		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
-		return -EINVAL;
-	}
-	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
-	if (object_list == NULL) {
-		DRM_ERROR("Failed to allocate object list for %d buffers\n",
-			  args->buffer_count);
-		ret = -ENOMEM;
-		goto pre_mutex_err;
-	}
-
-	if (args->num_cliprects != 0) {
-		cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
-				    GFP_KERNEL);
-		if (cliprects == NULL) {
-			ret = -ENOMEM;
-			goto pre_mutex_err;
-		}
-
-		ret = copy_from_user(cliprects,
-				     (struct drm_clip_rect __user *)
-				     (uintptr_t) args->cliprects_ptr,
-				     sizeof(*cliprects) * args->num_cliprects);
-		if (ret != 0) {
-			DRM_ERROR("copy %d cliprects failed: %d\n",
-				  args->num_cliprects, ret);
-			ret = -EFAULT;
-			goto pre_mutex_err;
-		}
-	}
-
-	request = kzalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL) {
-		ret = -ENOMEM;
-		goto pre_mutex_err;
-	}
-
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto pre_mutex_err;
-
-	if (dev_priv->mm.suspended) {
-		mutex_unlock(&dev->struct_mutex);
-		ret = -EBUSY;
-		goto pre_mutex_err;
-	}
-
-	/* Look up object handles */
-	for (i = 0; i < args->buffer_count; i++) {
-		object_list[i] = drm_gem_object_lookup(dev, file,
-						       exec_list[i].handle);
-		if (object_list[i] == NULL) {
-			DRM_ERROR("Invalid object handle %d at index %d\n",
-				   exec_list[i].handle, i);
-			/* prevent error path from reading uninitialized data */
-			args->buffer_count = i + 1;
-			ret = -ENOENT;
-			goto err;
-		}
-
-		obj_priv = to_intel_bo(object_list[i]);
-		if (obj_priv->in_execbuffer) {
-			DRM_ERROR("Object %p appears more than once in object list\n",
-				   object_list[i]);
-			/* prevent error path from reading uninitialized data */
-			args->buffer_count = i + 1;
-			ret = -EINVAL;
-			goto err;
-		}
-		obj_priv->in_execbuffer = true;
-	}
-
-	/* Move the objects en-masse into the GTT, evicting if necessary. */
-	ret = i915_gem_execbuffer_reserve(dev, file,
-					  object_list, exec_list,
-					  args->buffer_count);
-	if (ret)
-		goto err;
-
-	/* The objects are in their final locations, apply the relocations. */
-	ret = i915_gem_execbuffer_relocate(dev, file,
-					   object_list, exec_list,
-					   args->buffer_count);
-	if (ret) {
-		if (ret == -EFAULT) {
-			ret = i915_gem_execbuffer_relocate_slow(dev, file,
-								object_list,
-								exec_list,
-								args->buffer_count);
-			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-		}
-		if (ret)
-			goto err;
-	}
-
-	/* Set the pending read domains for the batch buffer to COMMAND */
-	batch_obj = object_list[args->buffer_count-1];
-	if (batch_obj->pending_write_domain) {
-		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
-		ret = -EINVAL;
-		goto err;
-	}
-	batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
-	/* Sanity check the batch buffer */
-	exec_offset = to_intel_bo(batch_obj)->gtt_offset;
-	ret = i915_gem_check_execbuffer(args, exec_offset);
-	if (ret != 0) {
-		DRM_ERROR("execbuf with invalid offset/length\n");
-		goto err;
-	}
-
-	ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
-					      object_list, args->buffer_count);
-	if (ret)
-		goto err;
-
-	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_gem_object *obj = object_list[i];
-		uint32_t old_write_domain = obj->write_domain;
-		obj->write_domain = obj->pending_write_domain;
-		trace_i915_gem_object_change_domain(obj,
-						    obj->read_domains,
-						    old_write_domain);
-	}
-
-#if WATCH_COHERENCY
-	for (i = 0; i < args->buffer_count; i++) {
-		i915_gem_object_check_coherency(object_list[i],
-						exec_list[i].handle);
-	}
-#endif
-
-#if WATCH_EXEC
-	i915_gem_dump_object(batch_obj,
-			      args->batch_len,
-			      __func__,
-			      ~0);
-#endif
-
-	/* Check for any pending flips. As we only maintain a flip queue depth
-	 * of 1, we can simply insert a WAIT for the next display flip prior
-	 * to executing the batch and avoid stalling the CPU.
-	 */
-	flips = 0;
-	for (i = 0; i < args->buffer_count; i++) {
-		if (object_list[i]->write_domain)
-			flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
-	}
-	if (flips) {
-		int plane, flip_mask;
-
-		for (plane = 0; flips >> plane; plane++) {
-			if (((flips >> plane) & 1) == 0)
-				continue;
-
-			if (plane)
-				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-			else
-				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
-			intel_ring_begin(dev, ring, 2);
-			intel_ring_emit(dev, ring,
-					MI_WAIT_FOR_EVENT | flip_mask);
-			intel_ring_emit(dev, ring, MI_NOOP);
-			intel_ring_advance(dev, ring);
-		}
-	}
-
-	/* Exec the batchbuffer */
-	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
-					    cliprects, exec_offset);
-	if (ret) {
-		DRM_ERROR("dispatch failed %d\n", ret);
-		goto err;
-	}
-
-	/*
-	 * Ensure that the commands in the batch buffer are
-	 * finished before the interrupt fires
-	 */
-	i915_retire_commands(dev, ring);
-
-	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_gem_object *obj = object_list[i];
-
-		i915_gem_object_move_to_active(obj, ring);
-		if (obj->write_domain)
-			list_move_tail(&to_intel_bo(obj)->gpu_write_list,
-				       &ring->gpu_write_list);
-	}
-
-	i915_add_request(dev, file, request, ring);
-	request = NULL;
-
-err:
-	for (i = 0; i < args->buffer_count; i++) {
-		if (object_list[i]) {
-			obj_priv = to_intel_bo(object_list[i]);
-			obj_priv->in_execbuffer = false;
-		}
-		drm_gem_object_unreference(object_list[i]);
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
-	drm_free_large(object_list);
-	kfree(cliprects);
-	kfree(request);
-
-	return ret;
-}
-
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
 int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+		    uint32_t alignment,
+		    bool map_and_fenceable)
 {
-	struct drm_i915_gem_execbuffer *args = data;
-	struct drm_i915_gem_execbuffer2 exec2;
-	struct drm_i915_gem_exec_object *exec_list = NULL;
-	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
-	int ret, i;
-
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
-	if (args->buffer_count < 1) {
-		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
-		return -EINVAL;
-	}
-
-	/* Copy in the exec list from userland */
-	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
-	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
-	if (exec_list == NULL || exec2_list == NULL) {
-		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
-			  args->buffer_count);
-		drm_free_large(exec_list);
-		drm_free_large(exec2_list);
-		return -ENOMEM;
-	}
-	ret = copy_from_user(exec_list,
-			     (struct drm_i915_relocation_entry __user *)
-			     (uintptr_t) args->buffers_ptr,
-			     sizeof(*exec_list) * args->buffer_count);
-	if (ret != 0) {
-		DRM_ERROR("copy %d exec entries failed %d\n",
-			  args->buffer_count, ret);
-		drm_free_large(exec_list);
-		drm_free_large(exec2_list);
-		return -EFAULT;
-	}
-
-	for (i = 0; i < args->buffer_count; i++) {
-		exec2_list[i].handle = exec_list[i].handle;
-		exec2_list[i].relocation_count = exec_list[i].relocation_count;
-		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
-		exec2_list[i].alignment = exec_list[i].alignment;
-		exec2_list[i].offset = exec_list[i].offset;
-		if (INTEL_INFO(dev)->gen < 4)
-			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
-		else
-			exec2_list[i].flags = 0;
-	}
-
-	exec2.buffers_ptr = args->buffers_ptr;
-	exec2.buffer_count = args->buffer_count;
-	exec2.batch_start_offset = args->batch_start_offset;
-	exec2.batch_len = args->batch_len;
-	exec2.DR1 = args->DR1;
-	exec2.DR4 = args->DR4;
-	exec2.num_cliprects = args->num_cliprects;
-	exec2.cliprects_ptr = args->cliprects_ptr;
-	exec2.flags = I915_EXEC_RENDER;
-
-	ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
-	if (!ret) {
-		/* Copy the new buffer offsets back to the user's exec list. */
-		for (i = 0; i < args->buffer_count; i++)
-			exec_list[i].offset = exec2_list[i].offset;
-		/* ... and back out to userspace */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
-				   exec_list,
-				   sizeof(*exec_list) * args->buffer_count);
-		if (ret) {
-			ret = -EFAULT;
-			DRM_ERROR("failed to copy %d exec entries "
-				  "back to user (%d)\n",
-				  args->buffer_count, ret);
-		}
-	}
-
-	drm_free_large(exec_list);
-	drm_free_large(exec2_list);
-	return ret;
-}
-
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
-{
-	struct drm_i915_gem_execbuffer2 *args = data;
-	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
-	int ret;
-
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
-	if (args->buffer_count < 1) {
-		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
-		return -EINVAL;
-	}
-
-	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
-	if (exec2_list == NULL) {
-		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
-			  args->buffer_count);
-		return -ENOMEM;
-	}
-	ret = copy_from_user(exec2_list,
-			     (struct drm_i915_relocation_entry __user *)
-			     (uintptr_t) args->buffers_ptr,
-			     sizeof(*exec2_list) * args->buffer_count);
-	if (ret != 0) {
-		DRM_ERROR("copy %d exec entries failed %d\n",
-			  args->buffer_count, ret);
-		drm_free_large(exec2_list);
-		return -EFAULT;
-	}
-
-	ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
-	if (!ret) {
-		/* Copy the new buffer offsets back to the user's exec list. */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
-				   exec2_list,
-				   sizeof(*exec2_list) * args->buffer_count);
-		if (ret) {
-			ret = -EFAULT;
-			DRM_ERROR("failed to copy %d exec entries "
-				  "back to user (%d)\n",
-				  args->buffer_count, ret);
-		}
-	}
-
-	drm_free_large(exec2_list);
-	return ret;
-}
-
-int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
-{
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
-	BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
 	WARN_ON(i915_verify_lists(dev));
 
-	if (obj_priv->gtt_space != NULL) {
-		if (alignment == 0)
-			alignment = i915_gem_get_gtt_alignment(obj);
-		if (obj_priv->gtt_offset & (alignment - 1)) {
-			WARN(obj_priv->pin_count,
-			     "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
-			     obj_priv->gtt_offset, alignment);
+	if (obj->gtt_space != NULL) {
+		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+		    (map_and_fenceable && !obj->map_and_fenceable)) {
+			WARN(obj->pin_count,
+			     "bo is already pinned with incorrect alignment:"
+			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+			     " obj->map_and_fenceable=%d\n",
+			     obj->gtt_offset, alignment,
+			     map_and_fenceable,
+			     obj->map_and_fenceable);
 			ret = i915_gem_object_unbind(obj);
 			if (ret)
 				return ret;
 		}
 	}
 
-	if (obj_priv->gtt_space == NULL) {
-		ret = i915_gem_object_bind_to_gtt(obj, alignment);
+	if (obj->gtt_space == NULL) {
+		ret = i915_gem_object_bind_to_gtt(obj, alignment,
+						  map_and_fenceable);
 		if (ret)
 			return ret;
 	}
 
-	obj_priv->pin_count++;
-
-	/* If the object is not active and not pending a flush,
-	 * remove it from the inactive list
-	 */
-	if (obj_priv->pin_count == 1) {
-		i915_gem_info_add_pin(dev_priv, obj->size);
-		if (!obj_priv->active)
-			list_move_tail(&obj_priv->mm_list,
+	if (obj->pin_count++ == 0) {
+		if (!obj->active)
+			list_move_tail(&obj->mm_list,
 				       &dev_priv->mm.pinned_list);
 	}
+	obj->pin_mappable |= map_and_fenceable;
 
 	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
 void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	WARN_ON(i915_verify_lists(dev));
-	obj_priv->pin_count--;
-	BUG_ON(obj_priv->pin_count < 0);
-	BUG_ON(obj_priv->gtt_space == NULL);
+	BUG_ON(obj->pin_count == 0);
+	BUG_ON(obj->gtt_space == NULL);
 
-	/* If the object is no longer pinned, and is
-	 * neither active nor being flushed, then stick it on
-	 * the inactive list
-	 */
-	if (obj_priv->pin_count == 0) {
-		if (!obj_priv->active)
-			list_move_tail(&obj_priv->mm_list,
+	if (--obj->pin_count == 0) {
+		if (!obj->active)
+			list_move_tail(&obj->mm_list,
 				       &dev_priv->mm.inactive_list);
-		i915_gem_info_remove_pin(dev_priv, obj->size);
+		obj->pin_mappable = false;
 	}
 	WARN_ON(i915_verify_lists(dev));
 }
 
 int
 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		   struct drm_file *file)
 {
 	struct drm_i915_gem_pin *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->madv != I915_MADV_WILLNEED) {
+	if (obj->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to pin a purgeable buffer\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+	if (obj->pin_filp != NULL && obj->pin_filp != file) {
 		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
 			  args->handle);
 		ret = -EINVAL;
 		goto out;
 	}
 
-	obj_priv->user_pin_count++;
-	obj_priv->pin_filp = file_priv;
-	if (obj_priv->user_pin_count == 1) {
-		ret = i915_gem_object_pin(obj, args->alignment);
+	obj->user_pin_count++;
+	obj->pin_filp = file;
+	if (obj->user_pin_count == 1) {
+		ret = i915_gem_object_pin(obj, args->alignment, true);
 		if (ret)
 			goto out;
 	}
@@ -4295,9 +3350,9 @@
 	 * as the X server doesn't manage domains yet
 	 */
 	i915_gem_object_flush_cpu_write_domain(obj);
-	args->offset = obj_priv->gtt_offset;
+	args->offset = obj->gtt_offset;
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -4305,38 +3360,36 @@
 
 int
 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
+		     struct drm_file *file)
 {
 	struct drm_i915_gem_pin *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->pin_filp != file_priv) {
+	if (obj->pin_filp != file) {
 		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
 			  args->handle);
 		ret = -EINVAL;
 		goto out;
 	}
-	obj_priv->user_pin_count--;
-	if (obj_priv->user_pin_count == 0) {
-		obj_priv->pin_filp = NULL;
+	obj->user_pin_count--;
+	if (obj->user_pin_count == 0) {
+		obj->pin_filp = NULL;
 		i915_gem_object_unpin(obj);
 	}
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -4344,48 +3397,50 @@
 
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
+		    struct drm_file *file)
 {
 	struct drm_i915_gem_busy *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
 	/* Count all active objects as busy, even if they are currently not used
 	 * by the gpu. Users of this interface expect objects to eventually
 	 * become non-busy without any further actions, therefore emit any
 	 * necessary flushes here.
 	 */
-	args->busy = obj_priv->active;
+	args->busy = obj->active;
 	if (args->busy) {
 		/* Unconditionally flush objects, even when the gpu still uses this
 		 * object. Userspace calling this function indicates that it wants to
 		 * use this buffer rather sooner than later, so issuing the required
 		 * flush earlier is beneficial.
 		 */
-		if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
-			i915_gem_flush_ring(dev, file_priv,
-					    obj_priv->ring,
-					    0, obj->write_domain);
-		} else if (obj_priv->ring->outstanding_lazy_request) {
+		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+			ret = i915_gem_flush_ring(dev, obj->ring,
+						  0, obj->base.write_domain);
+		} else if (obj->ring->outstanding_lazy_request ==
+			   obj->last_rendering_seqno) {
+			struct drm_i915_gem_request *request;
+
 			/* This ring is not being cleared by active usage,
 			 * so emit a request to do so.
 			 */
-			u32 seqno = i915_add_request(dev,
-						     NULL, NULL,
-						     obj_priv->ring);
-			if (seqno == 0)
+			request = kzalloc(sizeof(*request), GFP_KERNEL);
+			if (request)
+				ret = i915_add_request(dev,
+						       NULL, request,
+						       obj->ring);
+			else
 				ret = -ENOMEM;
 		}
 
@@ -4394,12 +3449,12 @@
 		 * are actually unmasked, and our working set ends up being
 		 * larger than required.
 		 */
-		i915_gem_retire_requests_ring(dev, obj_priv->ring);
+		i915_gem_retire_requests_ring(dev, obj->ring);
 
-		args->busy = obj_priv->active;
+		args->busy = obj->active;
 	}
 
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -4417,8 +3472,7 @@
 		       struct drm_file *file_priv)
 {
 	struct drm_i915_gem_madvise *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	switch (args->madv) {
@@ -4433,37 +3487,36 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->pin_count) {
+	if (obj->pin_count) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (obj_priv->madv != __I915_MADV_PURGED)
-		obj_priv->madv = args->madv;
+	if (obj->madv != __I915_MADV_PURGED)
+		obj->madv = args->madv;
 
 	/* if the object is no longer bound, discard its backing storage */
-	if (i915_gem_object_is_purgeable(obj_priv) &&
-	    obj_priv->gtt_space == NULL)
+	if (i915_gem_object_is_purgeable(obj) &&
+	    obj->gtt_space == NULL)
 		i915_gem_object_truncate(obj);
 
-	args->retained = obj_priv->madv != __I915_MADV_PURGED;
+	args->retained = obj->madv != __I915_MADV_PURGED;
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
 
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
-					      size_t size)
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
@@ -4486,11 +3539,15 @@
 	obj->base.driver_private = NULL;
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	INIT_LIST_HEAD(&obj->mm_list);
+	INIT_LIST_HEAD(&obj->gtt_list);
 	INIT_LIST_HEAD(&obj->ring_list);
+	INIT_LIST_HEAD(&obj->exec_list);
 	INIT_LIST_HEAD(&obj->gpu_write_list);
 	obj->madv = I915_MADV_WILLNEED;
+	/* Avoid an unnecessary call to unbind on the first bind. */
+	obj->map_and_fenceable = true;
 
-	return &obj->base;
+	return obj;
 }
 
 int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4500,42 +3557,41 @@
 	return 0;
 }
 
-static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
 	ret = i915_gem_object_unbind(obj);
 	if (ret == -ERESTARTSYS) {
-		list_move(&obj_priv->mm_list,
+		list_move(&obj->mm_list,
 			  &dev_priv->mm.deferred_free_list);
 		return;
 	}
 
-	if (obj_priv->mmap_offset)
+	if (obj->base.map_list.map)
 		i915_gem_free_mmap_offset(obj);
 
-	drm_gem_object_release(obj);
-	i915_gem_info_remove_obj(dev_priv, obj->size);
+	drm_gem_object_release(&obj->base);
+	i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
-	kfree(obj_priv->page_cpu_valid);
-	kfree(obj_priv->bit_17);
-	kfree(obj_priv);
+	kfree(obj->page_cpu_valid);
+	kfree(obj->bit_17);
+	kfree(obj);
 }
 
-void i915_gem_free_object(struct drm_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+	struct drm_device *dev = obj->base.dev;
 
 	trace_i915_gem_object_destroy(obj);
 
-	while (obj_priv->pin_count > 0)
+	while (obj->pin_count > 0)
 		i915_gem_object_unpin(obj);
 
-	if (obj_priv->phys_obj)
+	if (obj->phys_obj)
 		i915_gem_detach_phys_object(dev, obj);
 
 	i915_gem_free_object_tail(obj);
@@ -4562,13 +3618,15 @@
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_gem_evict_inactive(dev);
+		ret = i915_gem_evict_inactive(dev, false);
 		if (ret) {
 			mutex_unlock(&dev->struct_mutex);
 			return ret;
 		}
 	}
 
+	i915_gem_reset_fences(dev);
+
 	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
 	 * We need to replace this with a semaphore, or something.
 	 * And not confound mm.suspended!
@@ -4587,82 +3645,15 @@
 	return 0;
 }
 
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-static int
-i915_gem_init_pipe_control(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-	int ret;
-
-	obj = i915_gem_alloc_object(dev, 4096);
-	if (obj == NULL) {
-		DRM_ERROR("Failed to allocate seqno page\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-	obj_priv = to_intel_bo(obj);
-	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
-	ret = i915_gem_object_pin(obj, 4096);
-	if (ret)
-		goto err_unref;
-
-	dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
-	dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
-	if (dev_priv->seqno_page == NULL)
-		goto err_unpin;
-
-	dev_priv->seqno_obj = obj;
-	memset(dev_priv->seqno_page, 0, PAGE_SIZE);
-
-	return 0;
-
-err_unpin:
-	i915_gem_object_unpin(obj);
-err_unref:
-	drm_gem_object_unreference(obj);
-err:
-	return ret;
-}
-
-
-static void
-i915_gem_cleanup_pipe_control(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-
-	obj = dev_priv->seqno_obj;
-	obj_priv = to_intel_bo(obj);
-	kunmap(obj_priv->pages[0]);
-	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
-	dev_priv->seqno_obj = NULL;
-
-	dev_priv->seqno_page = NULL;
-}
-
 int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
-	if (HAS_PIPE_CONTROL(dev)) {
-		ret = i915_gem_init_pipe_control(dev);
-		if (ret)
-			return ret;
-	}
-
 	ret = intel_init_render_ring_buffer(dev);
 	if (ret)
-		goto cleanup_pipe_control;
+		return ret;
 
 	if (HAS_BSD(dev)) {
 		ret = intel_init_bsd_ring_buffer(dev);
@@ -4681,12 +3672,9 @@
 	return 0;
 
 cleanup_bsd_ring:
-	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
 cleanup_render_ring:
-	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-cleanup_pipe_control:
-	if (HAS_PIPE_CONTROL(dev))
-		i915_gem_cleanup_pipe_control(dev);
+	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
 	return ret;
 }
 
@@ -4694,12 +3682,10 @@
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
 
-	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
-	if (HAS_PIPE_CONTROL(dev))
-		i915_gem_cleanup_pipe_control(dev);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
 }
 
 int
@@ -4707,7 +3693,7 @@
 		       struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
@@ -4727,14 +3713,12 @@
 	}
 
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
-	BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
-	BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
-	BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
-	BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
+		BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
+	}
 	mutex_unlock(&dev->struct_mutex);
 
 	ret = drm_irq_install(dev);
@@ -4796,17 +3780,14 @@
 	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
-	init_ring_lists(&dev_priv->render_ring);
-	init_ring_lists(&dev_priv->bsd_ring);
-	init_ring_lists(&dev_priv->blt_ring);
+	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		init_ring_lists(&dev_priv->ring[i]);
 	for (i = 0; i < 16; i++)
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
 	init_completion(&dev_priv->error_completion);
-	spin_lock(&shrink_list_lock);
-	list_add(&dev_priv->mm.shrink_list, &shrink_list);
-	spin_unlock(&shrink_list_lock);
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
 	if (IS_GEN3(dev)) {
@@ -4818,6 +3799,8 @@
 		}
 	}
 
+	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
 	/* Old X drivers will take 0-2 for front, back, depth buffers */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		dev_priv->fence_reg_start = 3;
@@ -4849,6 +3832,10 @@
 	}
 	i915_gem_detect_bit_6_swizzle(dev);
 	init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&dev_priv->mm.inactive_shrinker);
 }
 
 /*
@@ -4918,47 +3905,47 @@
 }
 
 void i915_gem_detach_phys_object(struct drm_device *dev,
-				 struct drm_gem_object *obj)
+				 struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv;
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+	char *vaddr;
 	int i;
-	int ret;
 	int page_count;
 
-	obj_priv = to_intel_bo(obj);
-	if (!obj_priv->phys_obj)
+	if (!obj->phys_obj)
 		return;
+	vaddr = obj->phys_obj->handle->vaddr;
 
-	ret = i915_gem_object_get_pages(obj, 0);
-	if (ret)
-		goto out;
-
-	page_count = obj->size / PAGE_SIZE;
-
+	page_count = obj->base.size / PAGE_SIZE;
 	for (i = 0; i < page_count; i++) {
-		char *dst = kmap_atomic(obj_priv->pages[i]);
-		char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+		struct page *page = read_cache_page_gfp(mapping, i,
+							GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (!IS_ERR(page)) {
+			char *dst = kmap_atomic(page);
+			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+			kunmap_atomic(dst);
 
-		memcpy(dst, src, PAGE_SIZE);
-		kunmap_atomic(dst);
+			drm_clflush_pages(&page, 1);
+
+			set_page_dirty(page);
+			mark_page_accessed(page);
+			page_cache_release(page);
+		}
 	}
-	drm_clflush_pages(obj_priv->pages, page_count);
-	drm_agp_chipset_flush(dev);
+	intel_gtt_chipset_flush();
 
-	i915_gem_object_put_pages(obj);
-out:
-	obj_priv->phys_obj->cur_obj = NULL;
-	obj_priv->phys_obj = NULL;
+	obj->phys_obj->cur_obj = NULL;
+	obj->phys_obj = NULL;
 }
 
 int
 i915_gem_attach_phys_object(struct drm_device *dev,
-			    struct drm_gem_object *obj,
+			    struct drm_i915_gem_object *obj,
 			    int id,
 			    int align)
 {
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
 	int ret = 0;
 	int page_count;
 	int i;
@@ -4966,10 +3953,8 @@
 	if (id > I915_MAX_PHYS_OBJECT)
 		return -EINVAL;
 
-	obj_priv = to_intel_bo(obj);
-
-	if (obj_priv->phys_obj) {
-		if (obj_priv->phys_obj->id == id)
+	if (obj->phys_obj) {
+		if (obj->phys_obj->id == id)
 			return 0;
 		i915_gem_detach_phys_object(dev, obj);
 	}
@@ -4977,51 +3962,50 @@
 	/* create a new object */
 	if (!dev_priv->mm.phys_objs[id - 1]) {
 		ret = i915_gem_init_phys_object(dev, id,
-						obj->size, align);
+						obj->base.size, align);
 		if (ret) {
-			DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
-			goto out;
+			DRM_ERROR("failed to init phys object %d size: %zu\n",
+				  id, obj->base.size);
+			return ret;
 		}
 	}
 
 	/* bind to the object */
-	obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
-	obj_priv->phys_obj->cur_obj = obj;
+	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+	obj->phys_obj->cur_obj = obj;
 
-	ret = i915_gem_object_get_pages(obj, 0);
-	if (ret) {
-		DRM_ERROR("failed to get page list\n");
-		goto out;
-	}
-
-	page_count = obj->size / PAGE_SIZE;
+	page_count = obj->base.size / PAGE_SIZE;
 
 	for (i = 0; i < page_count; i++) {
-		char *src = kmap_atomic(obj_priv->pages[i]);
-		char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+		struct page *page;
+		char *dst, *src;
 
+		page = read_cache_page_gfp(mapping, i,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		src = kmap_atomic(page);
+		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 		memcpy(dst, src, PAGE_SIZE);
 		kunmap_atomic(src);
+
+		mark_page_accessed(page);
+		page_cache_release(page);
 	}
 
-	i915_gem_object_put_pages(obj);
-
 	return 0;
-out:
-	return ret;
 }
 
 static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_phys_pwrite(struct drm_device *dev,
+		     struct drm_i915_gem_object *obj,
 		     struct drm_i915_gem_pwrite *args,
 		     struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
 	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
-	DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
-
 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
 		unsigned long unwritten;
 
@@ -5036,7 +4020,7 @@
 			return -EFAULT;
 	}
 
-	drm_agp_chipset_flush(dev);
+	intel_gtt_chipset_flush();
 	return 0;
 }
 
@@ -5074,144 +4058,68 @@
 }
 
 static int
-i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
+i915_gem_inactive_shrink(struct shrinker *shrinker,
+			 int nr_to_scan,
+			 gfp_t gfp_mask)
 {
-	drm_i915_private_t *dev_priv, *next_dev;
-	struct drm_i915_gem_object *obj_priv, *next_obj;
-	int cnt = 0;
-	int would_deadlock = 1;
+	struct drm_i915_private *dev_priv =
+		container_of(shrinker,
+			     struct drm_i915_private,
+			     mm.inactive_shrinker);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_i915_gem_object *obj, *next;
+	int cnt;
+
+	if (!mutex_trylock(&dev->struct_mutex))
+		return 0;
 
 	/* "fast-path" to count number of available objects */
 	if (nr_to_scan == 0) {
-		spin_lock(&shrink_list_lock);
-		list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
-			struct drm_device *dev = dev_priv->dev;
-
-			if (mutex_trylock(&dev->struct_mutex)) {
-				list_for_each_entry(obj_priv,
-						    &dev_priv->mm.inactive_list,
-						    mm_list)
-					cnt++;
-				mutex_unlock(&dev->struct_mutex);
-			}
-		}
-		spin_unlock(&shrink_list_lock);
-
-		return (cnt / 100) * sysctl_vfs_cache_pressure;
+		cnt = 0;
+		list_for_each_entry(obj,
+				    &dev_priv->mm.inactive_list,
+				    mm_list)
+			cnt++;
+		mutex_unlock(&dev->struct_mutex);
+		return cnt / 100 * sysctl_vfs_cache_pressure;
 	}
 
-	spin_lock(&shrink_list_lock);
-
 rescan:
 	/* first scan for clean buffers */
-	list_for_each_entry_safe(dev_priv, next_dev,
-				 &shrink_list, mm.shrink_list) {
-		struct drm_device *dev = dev_priv->dev;
+	i915_gem_retire_requests(dev);
 
-		if (! mutex_trylock(&dev->struct_mutex))
-			continue;
-
-		spin_unlock(&shrink_list_lock);
-		i915_gem_retire_requests(dev);
-
-		list_for_each_entry_safe(obj_priv, next_obj,
-					 &dev_priv->mm.inactive_list,
-					 mm_list) {
-			if (i915_gem_object_is_purgeable(obj_priv)) {
-				i915_gem_object_unbind(&obj_priv->base);
-				if (--nr_to_scan <= 0)
-					break;
-			}
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list,
+				 mm_list) {
+		if (i915_gem_object_is_purgeable(obj)) {
+			if (i915_gem_object_unbind(obj) == 0 &&
+			    --nr_to_scan == 0)
+				break;
 		}
-
-		spin_lock(&shrink_list_lock);
-		mutex_unlock(&dev->struct_mutex);
-
-		would_deadlock = 0;
-
-		if (nr_to_scan <= 0)
-			break;
 	}
 
 	/* second pass, evict/count anything still on the inactive list */
-	list_for_each_entry_safe(dev_priv, next_dev,
-				 &shrink_list, mm.shrink_list) {
-		struct drm_device *dev = dev_priv->dev;
-
-		if (! mutex_trylock(&dev->struct_mutex))
-			continue;
-
-		spin_unlock(&shrink_list_lock);
-
-		list_for_each_entry_safe(obj_priv, next_obj,
-					 &dev_priv->mm.inactive_list,
-					 mm_list) {
-			if (nr_to_scan > 0) {
-				i915_gem_object_unbind(&obj_priv->base);
-				nr_to_scan--;
-			} else
-				cnt++;
-		}
-
-		spin_lock(&shrink_list_lock);
-		mutex_unlock(&dev->struct_mutex);
-
-		would_deadlock = 0;
+	cnt = 0;
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list,
+				 mm_list) {
+		if (nr_to_scan &&
+		    i915_gem_object_unbind(obj) == 0)
+			nr_to_scan--;
+		else
+			cnt++;
 	}
 
-	if (nr_to_scan) {
-		int active = 0;
-
+	if (nr_to_scan && i915_gpu_is_active(dev)) {
 		/*
 		 * We are desperate for pages, so as a last resort, wait
 		 * for the GPU to finish and discard whatever we can.
 		 * This has a dramatic impact to reduce the number of
 		 * OOM-killer events whilst running the GPU aggressively.
 		 */
-		list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
-			struct drm_device *dev = dev_priv->dev;
-
-			if (!mutex_trylock(&dev->struct_mutex))
-				continue;
-
-			spin_unlock(&shrink_list_lock);
-
-			if (i915_gpu_is_active(dev)) {
-				i915_gpu_idle(dev);
-				active++;
-			}
-
-			spin_lock(&shrink_list_lock);
-			mutex_unlock(&dev->struct_mutex);
-		}
-
-		if (active)
+		if (i915_gpu_idle(dev) == 0)
 			goto rescan;
 	}
-
-	spin_unlock(&shrink_list_lock);
-
-	if (would_deadlock)
-		return -1;
-	else if (cnt > 0)
-		return (cnt / 100) * sysctl_vfs_cache_pressure;
-	else
-		return 0;
-}
-
-static struct shrinker shrinker = {
-	.shrink = i915_gem_shrink,
-	.seeks = DEFAULT_SEEKS,
-};
-
-__init void
-i915_gem_shrinker_init(void)
-{
-    register_shrinker(&shrinker);
-}
-
-__exit void
-i915_gem_shrinker_exit(void)
-{
-    unregister_shrinker(&shrinker);
+	mutex_unlock(&dev->struct_mutex);
+	return cnt / 100 * sysctl_vfs_cache_pressure;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b8..29d014c 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@
 }
 
 void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
+i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
 		     const char *where, uint32_t mark)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page;
 
-	DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+	DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
 	for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
 		int page_len, chunk, chunk_len;
 
@@ -170,9 +169,9 @@
 			chunk_len = page_len - chunk;
 			if (chunk_len > 128)
 				chunk_len = 128;
-			i915_gem_dump_page(obj_priv->pages[page],
+			i915_gem_dump_page(obj->pages[page],
 					   chunk, chunk + chunk_len,
-					   obj_priv->gtt_offset +
+					   obj->gtt_offset +
 					   page * PAGE_SIZE,
 					   mark);
 		}
@@ -182,21 +181,19 @@
 
 #if WATCH_COHERENCY
 void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_device *dev = obj->base.dev;
 	int page;
 	uint32_t *gtt_mapping;
 	uint32_t *backing_map = NULL;
 	int bad_count = 0;
 
 	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
-		 __func__, obj, obj_priv->gtt_offset, handle,
+		 __func__, obj, obj->gtt_offset, handle,
 		 obj->size / 1024);
 
-	gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
-			      obj->size);
+	gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
 	if (gtt_mapping == NULL) {
 		DRM_ERROR("failed to map GTT space\n");
 		return;
@@ -205,7 +202,7 @@
 	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
 		int i;
 
-		backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
+		backing_map = kmap_atomic(obj->pages[page], KM_USER0);
 
 		if (backing_map == NULL) {
 			DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@
 			if (cpuval != gttval) {
 				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
 					 "0x%08x vs 0x%08x\n",
-					 (int)(obj_priv->gtt_offset +
+					 (int)(obj->gtt_offset +
 					       page * PAGE_SIZE + i * 4),
 					 cpuval, gttval);
 				if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1..3d39005 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
 #include "i915_drm.h"
 
 static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
-	   struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
-	list_add(&obj_priv->evict_list, unwind);
-	drm_gem_object_reference(&obj_priv->base);
-	return drm_mm_scan_add_block(obj_priv->gtt_space);
+	list_add(&obj->exec_list, unwind);
+	drm_gem_object_reference(&obj->base);
+	return drm_mm_scan_add_block(obj->gtt_space);
 }
 
 int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+			 unsigned alignment, bool mappable)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
 	i915_gem_retire_requests(dev);
 
 	/* Re-check for free space after retiring requests */
-	if (drm_mm_search_free(&dev_priv->mm.gtt_space,
-			       min_size, alignment, 0))
-		return 0;
+	if (mappable) {
+		if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+						min_size, alignment, 0,
+						dev_priv->mm.gtt_mappable_end,
+						0))
+			return 0;
+	} else {
+		if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+				       min_size, alignment, 0))
+			return 0;
+	}
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,56 @@
 	 */
 
 	INIT_LIST_HEAD(&unwind_list);
-	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+	if (mappable)
+		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+					    alignment, 0,
+					    dev_priv->mm.gtt_mappable_end);
+	else
+		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 
 	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
-		if (mark_free(obj_priv, &unwind_list))
+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
 	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		/* Does the object require an outstanding flush? */
-		if (obj_priv->base.write_domain || obj_priv->pin_count)
+		if (obj->base.write_domain || obj->pin_count)
 			continue;
 
-		if (mark_free(obj_priv, &unwind_list))
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
 	/* Finally add anything with a pending flush (in order of retirement) */
-	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
-		if (obj_priv->pin_count)
+	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+		if (obj->pin_count)
 			continue;
 
-		if (mark_free(obj_priv, &unwind_list))
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-		if (! obj_priv->base.write_domain || obj_priv->pin_count)
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (! obj->base.write_domain || obj->pin_count)
 			continue;
 
-		if (mark_free(obj_priv, &unwind_list))
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
 	/* Nothing found, clean up and bail out! */
-	list_for_each_entry(obj_priv, &unwind_list, evict_list) {
-		ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+	while (!list_empty(&unwind_list)) {
+		obj = list_first_entry(&unwind_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+
+		ret = drm_mm_scan_remove_block(obj->gtt_space);
 		BUG_ON(ret);
-		drm_gem_object_unreference(&obj_priv->base);
+
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
 	}
 
 	/* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +150,34 @@
 	 * temporary list. */
 	INIT_LIST_HEAD(&eviction_list);
 	while (!list_empty(&unwind_list)) {
-		obj_priv = list_first_entry(&unwind_list,
-					    struct drm_i915_gem_object,
-					    evict_list);
-		if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
-			list_move(&obj_priv->evict_list, &eviction_list);
+		obj = list_first_entry(&unwind_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		if (drm_mm_scan_remove_block(obj->gtt_space)) {
+			list_move(&obj->exec_list, &eviction_list);
 			continue;
 		}
-		list_del(&obj_priv->evict_list);
-		drm_gem_object_unreference(&obj_priv->base);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
 	}
 
 	/* Unbinding will emit any required flushes */
 	while (!list_empty(&eviction_list)) {
-		obj_priv = list_first_entry(&eviction_list,
-					    struct drm_i915_gem_object,
-					    evict_list);
+		obj = list_first_entry(&eviction_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
 		if (ret == 0)
-			ret = i915_gem_object_unbind(&obj_priv->base);
-		list_del(&obj_priv->evict_list);
-		drm_gem_object_unreference(&obj_priv->base);
+			ret = i915_gem_object_unbind(obj);
+
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
 	}
 
 	return ret;
 }
 
 int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
@@ -176,36 +196,22 @@
 
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 
-	ret = i915_gem_evict_inactive(dev);
-	if (ret)
-		return ret;
-
-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!lists_empty);
-
-	return 0;
+	return i915_gem_evict_inactive(dev, purgeable_only);
 }
 
 /** Unbinds all inactive objects. */
 int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj, *next;
 
-	while (!list_empty(&dev_priv->mm.inactive_list)) {
-		struct drm_gem_object *obj;
-		int ret;
-
-		obj = &list_first_entry(&dev_priv->mm.inactive_list,
-					struct drm_i915_gem_object,
-					mm_list)->base;
-
-		ret = i915_gem_object_unbind(obj);
-		if (ret != 0) {
-			DRM_ERROR("Error unbinding object: %d\n", ret);
-			return ret;
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list, mm_list) {
+		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+			int ret = i915_gem_object_unbind(obj);
+			if (ret)
+				return ret;
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 0000000..dcfdf41
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1377 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+struct change_domains {
+	uint32_t invalidate_domains;
+	uint32_t flush_domains;
+	uint32_t flush_rings;
+};
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Mapped to GTT
+ *	4. Read by GPU
+ *	5. Unmapped from GTT
+ *	6. Freed
+ *
+ *	Let's take these a step at a time
+ *
+ *	1. Allocated
+ *		Pages allocated from the kernel may still have
+ *		cache contents, so we set them to (CPU, CPU) always.
+ *	2. Written by CPU (using pwrite)
+ *		The pwrite function calls set_domain (CPU, CPU) and
+ *		this function does nothing (as nothing changes)
+ *	3. Mapped by GTT
+ *		This function asserts that the object is not
+ *		currently in any GPU-based read or write domains
+ *	4. Read by GPU
+ *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ *		As write_domain is zero, this function adds in the
+ *		current read domains (CPU+COMMAND, 0).
+ *		flush_domains is set to CPU.
+ *		invalidate_domains is set to COMMAND
+ *		clflush is run to get data out of the CPU caches
+ *		then i915_dev_set_domain calls i915_gem_flush to
+ *		emit an MI_FLUSH and drm_agp_chipset_flush
+ *	5. Unmapped from GTT
+ *		i915_gem_object_unbind calls set_domain (CPU, CPU)
+ *		flush_domains and invalidate_domains end up both zero
+ *		so no flushing/invalidating happens
+ *	6. Freed
+ *		yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ *	1. Allocated
+ *	2. Mapped to GTT
+ *	3. Read/written by GPU
+ *	4. set_domain to (CPU,CPU)
+ *	5. Read/written by CPU
+ *	6. Read/written by GPU
+ *
+ *	1. Allocated
+ *		Same as last example, (CPU, CPU)
+ *	2. Mapped to GTT
+ *		Nothing changes (assertions find that it is not in the GPU)
+ *	3. Read/written by GPU
+ *		execbuffer calls set_domain (RENDER, RENDER)
+ *		flush_domains gets CPU
+ *		invalidate_domains gets GPU
+ *		clflush (obj)
+ *		MI_FLUSH and drm_agp_chipset_flush
+ *	4. set_domain (CPU, CPU)
+ *		flush_domains gets GPU
+ *		invalidate_domains gets CPU
+ *		wait_rendering (obj) to make sure all drawing is complete.
+ *		This will include an MI_FLUSH to get the data from GPU
+ *		to memory
+ *		clflush (obj) to invalidate the CPU cache
+ *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ *	5. Read/written by CPU
+ *		cache lines are loaded and dirtied
+ *	6. Read written by GPU
+ *		Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Read by GPU
+ *	4. Updated (written) by CPU again
+ *	5. Read by GPU
+ *
+ *	1. Allocated
+ *		(CPU, CPU)
+ *	2. Written by CPU
+ *		(CPU, CPU)
+ *	3. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ *	4. Updated (written) by CPU again
+ *		(CPU, CPU)
+ *		flush_domains = 0 (no previous write domain)
+ *		invalidate_domains = 0 (no new read domains)
+ *	5. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+				  struct intel_ring_buffer *ring,
+				  struct change_domains *cd)
+{
+	uint32_t invalidate_domains = 0, flush_domains = 0;
+
+	/*
+	 * If the object isn't moving to a new write domain,
+	 * let the object stay in multiple read domains
+	 */
+	if (obj->base.pending_write_domain == 0)
+		obj->base.pending_read_domains |= obj->base.read_domains;
+
+	/*
+	 * Flush the current write domain if
+	 * the new read domains don't match. Invalidate
+	 * any read domains which differ from the old
+	 * write domain
+	 */
+	if (obj->base.write_domain &&
+	    (((obj->base.write_domain != obj->base.pending_read_domains ||
+	       obj->ring != ring)) ||
+	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
+		flush_domains |= obj->base.write_domain;
+		invalidate_domains |=
+			obj->base.pending_read_domains & ~obj->base.write_domain;
+	}
+	/*
+	 * Invalidate any read caches which may have
+	 * stale data. That is, any new read domains.
+	 */
+	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
+	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+		i915_gem_clflush_object(obj);
+
+	/* blow away mappings if mapped through GTT */
+	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+		i915_gem_release_mmap(obj);
+
+	/* The actual obj->write_domain will be updated with
+	 * pending_write_domain after we emit the accumulated flush for all
+	 * of our domain changes in execbuffers (which clears objects'
+	 * write_domains).  So if we have a current write domain that we
+	 * aren't changing, set pending_write_domain to that.
+	 */
+	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+		obj->base.pending_write_domain = obj->base.write_domain;
+
+	cd->invalidate_domains |= invalidate_domains;
+	cd->flush_domains |= flush_domains;
+	if (flush_domains & I915_GEM_GPU_DOMAINS)
+		cd->flush_rings |= obj->ring->id;
+	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+		cd->flush_rings |= ring->id;
+}
+
+struct eb_objects {
+	int and;
+	struct hlist_head buckets[0];
+};
+
+static struct eb_objects *
+eb_create(int size)
+{
+	struct eb_objects *eb;
+	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+	while (count > size)
+		count >>= 1;
+	eb = kzalloc(count*sizeof(struct hlist_head) +
+		     sizeof(struct eb_objects),
+		     GFP_KERNEL);
+	if (eb == NULL)
+		return eb;
+
+	eb->and = count - 1;
+	return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+	memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+	hlist_add_head(&obj->exec_node,
+		       &eb->buckets[obj->exec_handle & eb->and]);
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct drm_i915_gem_object *obj;
+
+	head = &eb->buckets[handle & eb->and];
+	hlist_for_each(node, head) {
+		obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+		if (obj->exec_handle == handle)
+			return obj;
+	}
+
+	return NULL;
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+	kfree(eb);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+				   struct eb_objects *eb,
+				   struct drm_i915_gem_relocation_entry *reloc)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_gem_object *target_obj;
+	uint32_t target_offset;
+	int ret = -EINVAL;
+
+	/* we've already hold a reference to all valid objects */
+	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+	if (unlikely(target_obj == NULL))
+		return -ENOENT;
+
+	target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+#if WATCH_RELOC
+	DRM_INFO("%s: obj %p offset %08x target %d "
+		 "read %08x write %08x gtt %08x "
+		 "presumed %08x delta %08x\n",
+		 __func__,
+		 obj,
+		 (int) reloc->offset,
+		 (int) reloc->target_handle,
+		 (int) reloc->read_domains,
+		 (int) reloc->write_domain,
+		 (int) target_offset,
+		 (int) reloc->presumed_offset,
+		 reloc->delta);
+#endif
+
+	/* The target buffer should have appeared before us in the
+	 * exec_object list, so it should have a GTT space bound by now.
+	 */
+	if (unlikely(target_offset == 0)) {
+		DRM_ERROR("No GTT space found for object %d\n",
+			  reloc->target_handle);
+		return ret;
+	}
+
+	/* Validate that the target is in a valid r/w GPU domain */
+	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+		DRM_ERROR("reloc with multiple write domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return ret;
+	}
+	if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+		DRM_ERROR("reloc with read/write CPU domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return ret;
+	}
+	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+		     reloc->write_domain != target_obj->pending_write_domain)) {
+		DRM_ERROR("Write domain conflict: "
+			  "obj %p target %d offset %d "
+			  "new %08x old %08x\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->write_domain,
+			  target_obj->pending_write_domain);
+		return ret;
+	}
+
+	target_obj->pending_read_domains |= reloc->read_domains;
+	target_obj->pending_write_domain |= reloc->write_domain;
+
+	/* If the relocation already has the right value in it, no
+	 * more work needs to be done.
+	 */
+	if (target_offset == reloc->presumed_offset)
+		return 0;
+
+	/* Check that the relocation address is valid... */
+	if (unlikely(reloc->offset > obj->base.size - 4)) {
+		DRM_ERROR("Relocation beyond object bounds: "
+			  "obj %p target %d offset %d size %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  (int) obj->base.size);
+		return ret;
+	}
+	if (unlikely(reloc->offset & 3)) {
+		DRM_ERROR("Relocation not 4-byte aligned: "
+			  "obj %p target %d offset %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset);
+		return ret;
+	}
+
+	/* and points to somewhere within the target object. */
+	if (unlikely(reloc->delta >= target_obj->size)) {
+		DRM_ERROR("Relocation beyond target object bounds: "
+			  "obj %p target %d delta %d size %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->delta,
+			  (int) target_obj->size);
+		return ret;
+	}
+
+	reloc->delta += target_offset;
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+		char *vaddr;
+
+		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
+		kunmap_atomic(vaddr);
+	} else {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		uint32_t __iomem *reloc_entry;
+		void __iomem *reloc_page;
+
+		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+		if (ret)
+			return ret;
+
+		/* Map the page containing the relocation we're going to perform.  */
+		reloc->offset += obj->gtt_offset;
+		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+						      reloc->offset & PAGE_MASK);
+		reloc_entry = (uint32_t __iomem *)
+			(reloc_page + (reloc->offset & ~PAGE_MASK));
+		iowrite32(reloc->delta, reloc_entry);
+		io_mapping_unmap_atomic(reloc_page);
+	}
+
+	/* and update the user's relocation entry */
+	reloc->presumed_offset = target_offset;
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+				    struct eb_objects *eb)
+{
+	struct drm_i915_gem_relocation_entry __user *user_relocs;
+	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	int i, ret;
+
+	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+	for (i = 0; i < entry->relocation_count; i++) {
+		struct drm_i915_gem_relocation_entry reloc;
+
+		if (__copy_from_user_inatomic(&reloc,
+					      user_relocs+i,
+					      sizeof(reloc)))
+			return -EFAULT;
+
+		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
+		if (ret)
+			return ret;
+
+		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+					    &reloc.presumed_offset,
+					    sizeof(reloc.presumed_offset)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+					 struct eb_objects *eb,
+					 struct drm_i915_gem_relocation_entry *relocs)
+{
+	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	int i, ret;
+
+	for (i = 0; i < entry->relocation_count; i++) {
+		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+			     struct eb_objects *eb,
+			     struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	list_for_each_entry(obj, objects, exec_list) {
+		ret = i915_gem_execbuffer_relocate_object(obj, eb);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+			    struct drm_file *file,
+			    struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	int ret, retry;
+	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	struct list_head ordered_objects;
+
+	INIT_LIST_HEAD(&ordered_objects);
+	while (!list_empty(objects)) {
+		struct drm_i915_gem_exec_object2 *entry;
+		bool need_fence, need_mappable;
+
+		obj = list_first_entry(objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		entry = obj->exec_entry;
+
+		need_fence =
+			has_fenced_gpu_access &&
+			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+			obj->tiling_mode != I915_TILING_NONE;
+		need_mappable =
+			entry->relocation_count ? true : need_fence;
+
+		if (need_mappable)
+			list_move(&obj->exec_list, &ordered_objects);
+		else
+			list_move_tail(&obj->exec_list, &ordered_objects);
+
+		obj->base.pending_read_domains = 0;
+		obj->base.pending_write_domain = 0;
+	}
+	list_splice(&ordered_objects, objects);
+
+	/* Attempt to pin all of the buffers into the GTT.
+	 * This is done in 3 phases:
+	 *
+	 * 1a. Unbind all objects that do not match the GTT constraints for
+	 *     the execbuffer (fenceable, mappable, alignment etc).
+	 * 1b. Increment pin count for already bound objects.
+	 * 2.  Bind new objects.
+	 * 3.  Decrement pin count.
+	 *
+	 * This avoid unnecessary unbinding of later objects in order to makr
+	 * room for the earlier objects *unless* we need to defragment.
+	 */
+	retry = 0;
+	do {
+		ret = 0;
+
+		/* Unbind any ill-fitting objects or pin. */
+		list_for_each_entry(obj, objects, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+			bool need_fence, need_mappable;
+			if (!obj->gtt_space)
+				continue;
+
+			need_fence =
+				has_fenced_gpu_access &&
+				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+				obj->tiling_mode != I915_TILING_NONE;
+			need_mappable =
+				entry->relocation_count ? true : need_fence;
+
+			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+			    (need_mappable && !obj->map_and_fenceable))
+				ret = i915_gem_object_unbind(obj);
+			else
+				ret = i915_gem_object_pin(obj,
+							  entry->alignment,
+							  need_mappable);
+			if (ret)
+				goto err;
+
+			entry++;
+		}
+
+		/* Bind fresh objects */
+		list_for_each_entry(obj, objects, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+			bool need_fence;
+
+			need_fence =
+				has_fenced_gpu_access &&
+				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+				obj->tiling_mode != I915_TILING_NONE;
+
+			if (!obj->gtt_space) {
+				bool need_mappable =
+					entry->relocation_count ? true : need_fence;
+
+				ret = i915_gem_object_pin(obj,
+							  entry->alignment,
+							  need_mappable);
+				if (ret)
+					break;
+			}
+
+			if (has_fenced_gpu_access) {
+				if (need_fence) {
+					ret = i915_gem_object_get_fence(obj, ring, 1);
+					if (ret)
+						break;
+				} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+					   obj->tiling_mode == I915_TILING_NONE) {
+					/* XXX pipelined! */
+					ret = i915_gem_object_put_fence(obj);
+					if (ret)
+						break;
+				}
+				obj->pending_fenced_gpu_access = need_fence;
+			}
+
+			entry->offset = obj->gtt_offset;
+		}
+
+		/* Decrement pin count for bound objects */
+		list_for_each_entry(obj, objects, exec_list) {
+			if (obj->gtt_space)
+				i915_gem_object_unpin(obj);
+		}
+
+		if (ret != -ENOSPC || retry > 1)
+			return ret;
+
+		/* First attempt, just clear anything that is purgeable.
+		 * Second attempt, clear the entire GTT.
+		 */
+		ret = i915_gem_evict_everything(ring->dev, retry == 0);
+		if (ret)
+			return ret;
+
+		retry++;
+	} while (1);
+
+err:
+	obj = list_entry(obj->exec_list.prev,
+			 struct drm_i915_gem_object,
+			 exec_list);
+	while (objects != &obj->exec_list) {
+		if (obj->gtt_space)
+			i915_gem_object_unpin(obj);
+
+		obj = list_entry(obj->exec_list.prev,
+				 struct drm_i915_gem_object,
+				 exec_list);
+	}
+
+	return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+				  struct drm_file *file,
+				  struct intel_ring_buffer *ring,
+				  struct list_head *objects,
+				  struct eb_objects *eb,
+				  struct drm_i915_gem_exec_object2 *exec,
+				  int count)
+{
+	struct drm_i915_gem_relocation_entry *reloc;
+	struct drm_i915_gem_object *obj;
+	int *reloc_offset;
+	int i, total, ret;
+
+	/* We may process another execbuffer during the unlock... */
+	while (!list_empty(objects)) {
+		obj = list_first_entry(objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	total = 0;
+	for (i = 0; i < count; i++)
+		total += exec[i].relocation_count;
+
+	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
+	reloc = drm_malloc_ab(total, sizeof(*reloc));
+	if (reloc == NULL || reloc_offset == NULL) {
+		drm_free_large(reloc);
+		drm_free_large(reloc_offset);
+		mutex_lock(&dev->struct_mutex);
+		return -ENOMEM;
+	}
+
+	total = 0;
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+		if (copy_from_user(reloc+total, user_relocs,
+				   exec[i].relocation_count * sizeof(*reloc))) {
+			ret = -EFAULT;
+			mutex_lock(&dev->struct_mutex);
+			goto err;
+		}
+
+		reloc_offset[i] = total;
+		total += exec[i].relocation_count;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret) {
+		mutex_lock(&dev->struct_mutex);
+		goto err;
+	}
+
+	/* reacquire the objects */
+	eb_reset(eb);
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_object *obj;
+
+		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+							exec[i].handle));
+		if (obj == NULL) {
+			DRM_ERROR("Invalid object handle %d at index %d\n",
+				   exec[i].handle, i);
+			ret = -ENOENT;
+			goto err;
+		}
+
+		list_add_tail(&obj->exec_list, objects);
+		obj->exec_handle = exec[i].handle;
+		obj->exec_entry = &exec[i];
+		eb_add_object(eb, obj);
+	}
+
+	ret = i915_gem_execbuffer_reserve(ring, file, objects);
+	if (ret)
+		goto err;
+
+	list_for_each_entry(obj, objects, exec_list) {
+		int offset = obj->exec_entry - exec;
+		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+							       reloc + reloc_offset[offset]);
+		if (ret)
+			goto err;
+	}
+
+	/* Leave the user relocations as are, this is the painfully slow path,
+	 * and we want to avoid the complication of dropping the lock whilst
+	 * having buffers reserved in the aperture and so causing spurious
+	 * ENOSPC for random operations.
+	 */
+
+err:
+	drm_free_large(reloc);
+	drm_free_large(reloc_offset);
+	return ret;
+}
+
+static int
+i915_gem_execbuffer_flush(struct drm_device *dev,
+			  uint32_t invalidate_domains,
+			  uint32_t flush_domains,
+			  uint32_t flush_rings)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i, ret;
+
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		intel_gtt_chipset_flush();
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
+	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+		for (i = 0; i < I915_NUM_RINGS; i++)
+			if (flush_rings & (1 << i)) {
+				ret = i915_gem_flush_ring(dev,
+							  &dev_priv->ring[i],
+							  invalidate_domains,
+							  flush_domains);
+				if (ret)
+					return ret;
+			}
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
+			       struct intel_ring_buffer *to)
+{
+	struct intel_ring_buffer *from = obj->ring;
+	u32 seqno;
+	int ret, idx;
+
+	if (from == NULL || to == from)
+		return 0;
+
+	/* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
+	if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
+		return i915_gem_object_wait_rendering(obj, true);
+
+	idx = intel_ring_sync_index(from, to);
+
+	seqno = obj->last_rendering_seqno;
+	if (seqno <= from->sync_seqno[idx])
+		return 0;
+
+	if (seqno == from->outstanding_lazy_request) {
+		struct drm_i915_gem_request *request;
+
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
+
+		ret = i915_add_request(obj->base.dev, NULL, request, from);
+		if (ret) {
+			kfree(request);
+			return ret;
+		}
+
+		seqno = request->seqno;
+	}
+
+	from->sync_seqno[idx] = seqno;
+	return intel_ring_sync(to, from, seqno - 1);
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+				struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	struct change_domains cd;
+	int ret;
+
+	cd.invalidate_domains = 0;
+	cd.flush_domains = 0;
+	cd.flush_rings = 0;
+	list_for_each_entry(obj, objects, exec_list)
+		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
+
+	if (cd.invalidate_domains | cd.flush_domains) {
+#if WATCH_EXEC
+		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+			  __func__,
+			 cd.invalidate_domains,
+			 cd.flush_domains);
+#endif
+		ret = i915_gem_execbuffer_flush(ring->dev,
+						cd.invalidate_domains,
+						cd.flush_domains,
+						cd.flush_rings);
+		if (ret)
+			return ret;
+	}
+
+	list_for_each_entry(obj, objects, exec_list) {
+		ret = i915_gem_execbuffer_sync_rings(obj, ring);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+		   int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+		int length; /* limited by fault_in_pages_readable() */
+
+		/* First check for malicious input causing overflow */
+		if (exec[i].relocation_count >
+		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+			return -EINVAL;
+
+		length = exec[i].relocation_count *
+			sizeof(struct drm_i915_gem_relocation_entry);
+		if (!access_ok(VERIFY_READ, ptr, length))
+			return -EFAULT;
+
+		/* we may also need to update the presumed offsets */
+		if (!access_ok(VERIFY_WRITE, ptr, length))
+			return -EFAULT;
+
+		if (fault_in_pages_readable(ptr, length))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
+				   struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	int flips;
+
+	/* Check for any pending flips. As we only maintain a flip queue depth
+	 * of 1, we can simply insert a WAIT for the next display flip prior
+	 * to executing the batch and avoid stalling the CPU.
+	 */
+	flips = 0;
+	list_for_each_entry(obj, objects, exec_list) {
+		if (obj->base.write_domain)
+			flips |= atomic_read(&obj->pending_flip);
+	}
+	if (flips) {
+		int plane, flip_mask, ret;
+
+		for (plane = 0; flips >> plane; plane++) {
+			if (((flips >> plane) & 1) == 0)
+				continue;
+
+			if (plane)
+				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+			else
+				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+			ret = intel_ring_begin(ring, 2);
+			if (ret)
+				return ret;
+
+			intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+			intel_ring_emit(ring, MI_NOOP);
+			intel_ring_advance(ring);
+		}
+	}
+
+	return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+				   struct intel_ring_buffer *ring,
+				   u32 seqno)
+{
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, objects, exec_list) {
+		obj->base.read_domains = obj->base.pending_read_domains;
+		obj->base.write_domain = obj->base.pending_write_domain;
+		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+		i915_gem_object_move_to_active(obj, ring, seqno);
+		if (obj->base.write_domain) {
+			obj->dirty = 1;
+			obj->pending_gpu_write = true;
+			list_move_tail(&obj->gpu_write_list,
+				       &ring->gpu_write_list);
+			intel_mark_busy(ring->dev, obj);
+		}
+
+		trace_i915_gem_object_change_domain(obj,
+						    obj->base.read_domains,
+						    obj->base.write_domain);
+	}
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+				    struct drm_file *file,
+				    struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_request *request;
+	u32 invalidate;
+
+	/*
+	 * Ensure that the commands in the batch buffer are
+	 * finished before the interrupt fires.
+	 *
+	 * The sampler always gets flushed on i965 (sigh).
+	 */
+	invalidate = I915_GEM_DOMAIN_COMMAND;
+	if (INTEL_INFO(dev)->gen >= 4)
+		invalidate |= I915_GEM_DOMAIN_SAMPLER;
+	if (ring->flush(ring, invalidate, 0)) {
+		i915_gem_next_request_seqno(dev, ring);
+		return;
+	}
+
+	/* Add a breadcrumb for the completion of the batch buffer */
+	request = kzalloc(sizeof(*request), GFP_KERNEL);
+	if (request == NULL || i915_add_request(dev, file, request, ring)) {
+		i915_gem_next_request_seqno(dev, ring);
+		kfree(request);
+	}
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+		       struct drm_file *file,
+		       struct drm_i915_gem_execbuffer2 *args,
+		       struct drm_i915_gem_exec_object2 *exec)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct list_head objects;
+	struct eb_objects *eb;
+	struct drm_i915_gem_object *batch_obj;
+	struct drm_clip_rect *cliprects = NULL;
+	struct intel_ring_buffer *ring;
+	u32 exec_start, exec_len;
+	u32 seqno;
+	int ret, mode, i;
+
+	if (!i915_gem_check_execbuffer(args)) {
+		DRM_ERROR("execbuf with invalid offset/length\n");
+		return -EINVAL;
+	}
+
+	ret = validate_exec_list(exec, args->buffer_count);
+	if (ret)
+		return ret;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+	switch (args->flags & I915_EXEC_RING_MASK) {
+	case I915_EXEC_DEFAULT:
+	case I915_EXEC_RENDER:
+		ring = &dev_priv->ring[RCS];
+		break;
+	case I915_EXEC_BSD:
+		if (!HAS_BSD(dev)) {
+			DRM_ERROR("execbuf with invalid ring (BSD)\n");
+			return -EINVAL;
+		}
+		ring = &dev_priv->ring[VCS];
+		break;
+	case I915_EXEC_BLT:
+		if (!HAS_BLT(dev)) {
+			DRM_ERROR("execbuf with invalid ring (BLT)\n");
+			return -EINVAL;
+		}
+		ring = &dev_priv->ring[BCS];
+		break;
+	default:
+		DRM_ERROR("execbuf with unknown ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		return -EINVAL;
+	}
+
+	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	switch (mode) {
+	case I915_EXEC_CONSTANTS_REL_GENERAL:
+	case I915_EXEC_CONSTANTS_ABSOLUTE:
+	case I915_EXEC_CONSTANTS_REL_SURFACE:
+		if (ring == &dev_priv->ring[RCS] &&
+		    mode != dev_priv->relative_constants_mode) {
+			if (INTEL_INFO(dev)->gen < 4)
+				return -EINVAL;
+
+			if (INTEL_INFO(dev)->gen > 5 &&
+			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+				return -EINVAL;
+
+			ret = intel_ring_begin(ring, 4);
+			if (ret)
+				return ret;
+
+			intel_ring_emit(ring, MI_NOOP);
+			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+			intel_ring_emit(ring, INSTPM);
+			intel_ring_emit(ring,
+					I915_EXEC_CONSTANTS_MASK << 16 | mode);
+			intel_ring_advance(ring);
+
+			dev_priv->relative_constants_mode = mode;
+		}
+		break;
+	default:
+		DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+		return -EINVAL;
+	}
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	if (args->num_cliprects != 0) {
+		if (ring != &dev_priv->ring[RCS]) {
+			DRM_ERROR("clip rectangles are only valid with the render ring\n");
+			return -EINVAL;
+		}
+
+		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+				    GFP_KERNEL);
+		if (cliprects == NULL) {
+			ret = -ENOMEM;
+			goto pre_mutex_err;
+		}
+
+		if (copy_from_user(cliprects,
+				     (struct drm_clip_rect __user *)(uintptr_t)
+				     args->cliprects_ptr,
+				     sizeof(*cliprects)*args->num_cliprects)) {
+			ret = -EFAULT;
+			goto pre_mutex_err;
+		}
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto pre_mutex_err;
+
+	if (dev_priv->mm.suspended) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = -EBUSY;
+		goto pre_mutex_err;
+	}
+
+	eb = eb_create(args->buffer_count);
+	if (eb == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = -ENOMEM;
+		goto pre_mutex_err;
+	}
+
+	/* Look up object handles */
+	INIT_LIST_HEAD(&objects);
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_i915_gem_object *obj;
+
+		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+							exec[i].handle));
+		if (obj == NULL) {
+			DRM_ERROR("Invalid object handle %d at index %d\n",
+				   exec[i].handle, i);
+			/* prevent error path from reading uninitialized data */
+			ret = -ENOENT;
+			goto err;
+		}
+
+		if (!list_empty(&obj->exec_list)) {
+			DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+				   obj, exec[i].handle, i);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		list_add_tail(&obj->exec_list, &objects);
+		obj->exec_handle = exec[i].handle;
+		obj->exec_entry = &exec[i];
+		eb_add_object(eb, obj);
+	}
+
+	/* take note of the batch buffer before we might reorder the lists */
+	batch_obj = list_entry(objects.prev,
+			       struct drm_i915_gem_object,
+			       exec_list);
+
+	/* Move the objects en-masse into the GTT, evicting if necessary. */
+	ret = i915_gem_execbuffer_reserve(ring, file, &objects);
+	if (ret)
+		goto err;
+
+	/* The objects are in their final locations, apply the relocations. */
+	ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+	if (ret) {
+		if (ret == -EFAULT) {
+			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
+								&objects, eb,
+								exec,
+								args->buffer_count);
+			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+		}
+		if (ret)
+			goto err;
+	}
+
+	/* Set the pending read domains for the batch buffer to COMMAND */
+	if (batch_obj->base.pending_write_domain) {
+		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+		ret = -EINVAL;
+		goto err;
+	}
+	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+	if (ret)
+		goto err;
+
+	ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
+	if (ret)
+		goto err;
+
+	seqno = i915_gem_next_request_seqno(dev, ring);
+	for (i = 0; i < I915_NUM_RINGS-1; i++) {
+		if (seqno < ring->sync_seqno[i]) {
+			/* The GPU can not handle its semaphore value wrapping,
+			 * so every billion or so execbuffers, we need to stall
+			 * the GPU in order to reset the counters.
+			 */
+			ret = i915_gpu_idle(dev);
+			if (ret)
+				goto err;
+
+			BUG_ON(ring->sync_seqno[i]);
+		}
+	}
+
+	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+	exec_len = args->batch_len;
+	if (cliprects) {
+		for (i = 0; i < args->num_cliprects; i++) {
+			ret = i915_emit_box(dev, &cliprects[i],
+					    args->DR1, args->DR4);
+			if (ret)
+				goto err;
+
+			ret = ring->dispatch_execbuffer(ring,
+							exec_start, exec_len);
+			if (ret)
+				goto err;
+		}
+	} else {
+		ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+		if (ret)
+			goto err;
+	}
+
+	i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+	i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+	eb_destroy(eb);
+	while (!list_empty(&objects)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+	kfree(cliprects);
+	return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer *args = data;
+	struct drm_i915_gem_execbuffer2 exec2;
+	struct drm_i915_gem_exec_object *exec_list = NULL;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret, i;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	/* Copy in the exec list from userland */
+	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	if (exec_list == NULL || exec2_list == NULL) {
+		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec_list,
+			     (struct drm_i915_relocation_entry __user *)
+			     (uintptr_t) args->buffers_ptr,
+			     sizeof(*exec_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_ERROR("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < args->buffer_count; i++) {
+		exec2_list[i].handle = exec_list[i].handle;
+		exec2_list[i].relocation_count = exec_list[i].relocation_count;
+		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+		exec2_list[i].alignment = exec_list[i].alignment;
+		exec2_list[i].offset = exec_list[i].offset;
+		if (INTEL_INFO(dev)->gen < 4)
+			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+		else
+			exec2_list[i].flags = 0;
+	}
+
+	exec2.buffers_ptr = args->buffers_ptr;
+	exec2.buffer_count = args->buffer_count;
+	exec2.batch_start_offset = args->batch_start_offset;
+	exec2.batch_len = args->batch_len;
+	exec2.DR1 = args->DR1;
+	exec2.DR4 = args->DR4;
+	exec2.num_cliprects = args->num_cliprects;
+	exec2.cliprects_ptr = args->cliprects_ptr;
+	exec2.flags = I915_EXEC_RENDER;
+
+	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+	if (!ret) {
+		/* Copy the new buffer offsets back to the user's exec list. */
+		for (i = 0; i < args->buffer_count; i++)
+			exec_list[i].offset = exec2_list[i].offset;
+		/* ... and back out to userspace */
+		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+				   (uintptr_t) args->buffers_ptr,
+				   exec_list,
+				   sizeof(*exec_list) * args->buffer_count);
+		if (ret) {
+			ret = -EFAULT;
+			DRM_ERROR("failed to copy %d exec entries "
+				  "back to user (%d)\n",
+				  args->buffer_count, ret);
+		}
+	}
+
+	drm_free_large(exec_list);
+	drm_free_large(exec2_list);
+	return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer2 *args = data;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	if (exec2_list == NULL) {
+		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec2_list,
+			     (struct drm_i915_relocation_entry __user *)
+			     (uintptr_t) args->buffers_ptr,
+			     sizeof(*exec2_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_ERROR("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+	if (!ret) {
+		/* Copy the new buffer offsets back to the user's exec list. */
+		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+				   (uintptr_t) args->buffers_ptr,
+				   exec2_list,
+				   sizeof(*exec2_list) * args->buffer_count);
+		if (ret) {
+			ret = -EFAULT;
+			DRM_ERROR("failed to copy %d exec entries "
+				  "back to user (%d)\n",
+				  args->buffer_count, ret);
+		}
+	}
+
+	drm_free_large(exec2_list);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 0000000..70433ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		i915_gem_clflush_object(obj);
+
+		if (dev_priv->mm.gtt->needs_dmar) {
+			BUG_ON(!obj->sg_list);
+
+			intel_gtt_insert_sg_entries(obj->sg_list,
+						    obj->num_sg,
+						    obj->gtt_space->start
+							>> PAGE_SHIFT,
+						    obj->agp_type);
+		} else
+			intel_gtt_insert_pages(obj->gtt_space->start
+						   >> PAGE_SHIFT,
+					       obj->base.size >> PAGE_SHIFT,
+					       obj->pages,
+					       obj->agp_type);
+	}
+
+	intel_gtt_chipset_flush();
+}
+
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (dev_priv->mm.gtt->needs_dmar) {
+		ret = intel_gtt_map_memory(obj->pages,
+					   obj->base.size >> PAGE_SHIFT,
+					   &obj->sg_list,
+					   &obj->num_sg);
+		if (ret != 0)
+			return ret;
+
+		intel_gtt_insert_sg_entries(obj->sg_list,
+					    obj->num_sg,
+					    obj->gtt_space->start >> PAGE_SHIFT,
+					    obj->agp_type);
+	} else
+		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+				       obj->base.size >> PAGE_SHIFT,
+				       obj->pages,
+				       obj->agp_type);
+
+	return 0;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+			      obj->base.size >> PAGE_SHIFT);
+
+	if (obj->sg_list) {
+		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+		obj->sg_list = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de..22a32b9 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,7 +181,7 @@
 }
 
 /* Check pitch constriants for all chips & tiling formats */
-bool
+static bool
 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 {
 	int tile_width;
@@ -232,32 +232,44 @@
 	return true;
 }
 
-bool
-i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
-	if (obj_priv->gtt_space == NULL)
-		return true;
+	u32 size;
 
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
-	if (INTEL_INFO(dev)->gen >= 4)
+	if (INTEL_INFO(obj->base.dev)->gen >= 4)
 		return true;
 
-	if (obj_priv->gtt_offset & (obj->size - 1))
-		return false;
-
-	if (IS_GEN3(dev)) {
-		if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+	if (INTEL_INFO(obj->base.dev)->gen == 3) {
+		if (obj->gtt_offset & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
-		if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+		if (obj->gtt_offset & ~I830_FENCE_START_MASK)
 			return false;
 	}
 
+	/*
+	 * Previous chips need to be aligned to the size of the smallest
+	 * fence register that can contain the object.
+	 */
+	if (INTEL_INFO(obj->base.dev)->gen == 3)
+		size = 1024*1024;
+	else
+		size = 512*1024;
+
+	while (size < obj->base.size)
+		size <<= 1;
+
+	if (obj->gtt_space->size != size)
+		return false;
+
+	if (obj->gtt_offset & (size - 1))
+		return false;
+
 	return true;
 }
 
@@ -267,30 +279,29 @@
  */
 int
 i915_gem_set_tiling(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		   struct drm_file *file)
 {
 	struct drm_i915_gem_set_tiling *args = data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_gem_check_is_wedged(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL)
 		return -ENOENT;
-	obj_priv = to_intel_bo(obj);
 
-	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
-		drm_gem_object_unreference_unlocked(obj);
+	if (!i915_tiling_ok(dev,
+			    args->stride, obj->base.size, args->tiling_mode)) {
+		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EINVAL;
 	}
 
-	if (obj_priv->pin_count) {
-		drm_gem_object_unreference_unlocked(obj);
+	if (obj->pin_count) {
+		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EBUSY;
 	}
 
@@ -324,34 +335,28 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	if (args->tiling_mode != obj_priv->tiling_mode ||
-	    args->stride != obj_priv->stride) {
+	if (args->tiling_mode != obj->tiling_mode ||
+	    args->stride != obj->stride) {
 		/* We need to rebind the object if its current allocation
 		 * no longer meets the alignment restrictions for its new
 		 * tiling mode. Otherwise we can just leave it alone, but
 		 * need to ensure that any fence register is cleared.
 		 */
-		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
-			ret = i915_gem_object_unbind(obj);
-		else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-			ret = i915_gem_object_put_fence_reg(obj, true);
-		else
-			i915_gem_release_mmap(obj);
+		i915_gem_release_mmap(obj);
 
-		if (ret != 0) {
-			args->tiling_mode = obj_priv->tiling_mode;
-			args->stride = obj_priv->stride;
-			goto err;
-		}
+		obj->map_and_fenceable =
+			obj->gtt_space == NULL ||
+			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+			 i915_gem_object_fence_ok(obj, args->tiling_mode));
 
-		obj_priv->tiling_mode = args->tiling_mode;
-		obj_priv->stride = args->stride;
+		obj->tiling_changed = true;
+		obj->tiling_mode = args->tiling_mode;
+		obj->stride = args->stride;
 	}
-err:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -359,22 +364,20 @@
  */
 int
 i915_gem_get_tiling(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		   struct drm_file *file)
 {
 	struct drm_i915_gem_get_tiling *args = data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL)
 		return -ENOENT;
-	obj_priv = to_intel_bo(obj);
 
 	mutex_lock(&dev->struct_mutex);
 
-	args->tiling_mode = obj_priv->tiling_mode;
-	switch (obj_priv->tiling_mode) {
+	args->tiling_mode = obj->tiling_mode;
+	switch (obj->tiling_mode) {
 	case I915_TILING_X:
 		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
 		break;
@@ -394,7 +397,7 @@
 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
 		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
 
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -424,46 +427,44 @@
 }
 
 void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count = obj->size >> PAGE_SHIFT;
+	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
 		return;
 
-	if (obj_priv->bit_17 == NULL)
+	if (obj->bit_17 == NULL)
 		return;
 
 	for (i = 0; i < page_count; i++) {
-		char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+		char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
 		if ((new_bit_17 & 0x1) !=
-		    (test_bit(i, obj_priv->bit_17) != 0)) {
-			i915_gem_swizzle_page(obj_priv->pages[i]);
-			set_page_dirty(obj_priv->pages[i]);
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(obj->pages[i]);
+			set_page_dirty(obj->pages[i]);
 		}
 	}
 }
 
 void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count = obj->size >> PAGE_SHIFT;
+	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
 		return;
 
-	if (obj_priv->bit_17 == NULL) {
-		obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+	if (obj->bit_17 == NULL) {
+		obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
 					   sizeof(long), GFP_KERNEL);
-		if (obj_priv->bit_17 == NULL) {
+		if (obj->bit_17 == NULL) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 				  "record\n");
 			return;
@@ -471,9 +472,9 @@
 	}
 
 	for (i = 0; i < page_count; i++) {
-		if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
-			__set_bit(i, obj_priv->bit_17);
+		if (page_to_phys(obj->pages[i]) & (1 << 17))
+			__set_bit(i, obj->bit_17);
 		else
-			__clear_bit(i, obj_priv->bit_17);
+			__clear_bit(i, obj->bit_17);
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c..b8e509a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -64,64 +64,24 @@
 #define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
 					 DRM_I915_VBLANK_PIPE_B)
 
-void
-ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
-		dev_priv->gt_irq_mask_reg &= ~mask;
-		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-		(void) I915_READ(GTIMR);
-	}
-}
-
-void
-ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
-		dev_priv->gt_irq_mask_reg |= mask;
-		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-		(void) I915_READ(GTIMR);
-	}
-}
-
 /* For display hotplug interrupt */
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
-	if ((dev_priv->irq_mask_reg & mask) != 0) {
-		dev_priv->irq_mask_reg &= ~mask;
-		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(DEIMR);
+	if ((dev_priv->irq_mask & mask) != 0) {
+		dev_priv->irq_mask &= ~mask;
+		I915_WRITE(DEIMR, dev_priv->irq_mask);
+		POSTING_READ(DEIMR);
 	}
 }
 
 static inline void
 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
-	if ((dev_priv->irq_mask_reg & mask) != mask) {
-		dev_priv->irq_mask_reg |= mask;
-		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(DEIMR);
-	}
-}
-
-void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->irq_mask_reg & mask) != 0) {
-		dev_priv->irq_mask_reg &= ~mask;
-		I915_WRITE(IMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(IMR);
-	}
-}
-
-void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->irq_mask_reg & mask) != mask) {
-		dev_priv->irq_mask_reg |= mask;
-		I915_WRITE(IMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(IMR);
+	if ((dev_priv->irq_mask & mask) != mask) {
+		dev_priv->irq_mask |= mask;
+		I915_WRITE(DEIMR, dev_priv->irq_mask);
+		POSTING_READ(DEIMR);
 	}
 }
 
@@ -144,7 +104,7 @@
 		dev_priv->pipestat[pipe] |= mask;
 		/* Enable the interrupt, clear any pending status */
 		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
-		(void) I915_READ(reg);
+		POSTING_READ(reg);
 	}
 }
 
@@ -156,16 +116,19 @@
 
 		dev_priv->pipestat[pipe] &= ~mask;
 		I915_WRITE(reg, dev_priv->pipestat[pipe]);
-		(void) I915_READ(reg);
+		POSTING_READ(reg);
 	}
 }
 
 /**
  * intel_enable_asle - enable ASLE interrupt for OpRegion
  */
-void intel_enable_asle (struct drm_device *dev)
+void intel_enable_asle(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
 	if (HAS_PCH_SPLIT(dev))
 		ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +139,8 @@
 			i915_enable_pipestat(dev_priv, 0,
 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
 	}
+
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 /**
@@ -243,6 +208,92 @@
 	return I915_READ(reg);
 }
 
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+			     int *vpos, int *hpos)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 vbl = 0, position = 0;
+	int vbl_start, vbl_end, htotal, vtotal;
+	bool in_vbl = true;
+	int ret = 0;
+
+	if (!i915_pipe_enabled(dev, pipe)) {
+		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+					"pipe %d\n", pipe);
+		return 0;
+	}
+
+	/* Get vtotal. */
+	vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* No obvious pixelcount register. Only query vertical
+		 * scanout position from Display scan line register.
+		 */
+		position = I915_READ(PIPEDSL(pipe));
+
+		/* Decode into vertical scanout position. Don't have
+		 * horizontal scanout position.
+		 */
+		*vpos = position & 0x1fff;
+		*hpos = 0;
+	} else {
+		/* Have access to pixelcount since start of frame.
+		 * We can split this into vertical and horizontal
+		 * scanout position.
+		 */
+		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+		htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+		*vpos = position / htotal;
+		*hpos = position - (*vpos * htotal);
+	}
+
+	/* Query vblank area. */
+	vbl = I915_READ(VBLANK(pipe));
+
+	/* Test position against vblank region. */
+	vbl_start = vbl & 0x1fff;
+	vbl_end = (vbl >> 16) & 0x1fff;
+
+	if ((*vpos < vbl_start) || (*vpos > vbl_end))
+		in_vbl = false;
+
+	/* Inside "upper part" of vblank area? Apply corrective offset: */
+	if (in_vbl && (*vpos >= vbl_start))
+		*vpos = *vpos - vtotal;
+
+	/* Readouts valid? */
+	if (vbl > 0)
+		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_INVBL;
+
+	return ret;
+}
+
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+			      int *max_error,
+			      struct timeval *vblank_time,
+			      unsigned flags)
+{
+	struct drm_crtc *drmcrtc;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Get drm_crtc to timestamp: */
+	drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+						     vblank_time, flags, drmcrtc);
+}
+
 /*
  * Handle hotplug events outside the interrupt handler proper.
  */
@@ -297,20 +348,105 @@
 			struct intel_ring_buffer *ring)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 seqno = ring->get_seqno(dev, ring);
-	ring->irq_gem_seqno = seqno;
+	u32 seqno = ring->get_seqno(ring);
+
 	trace_i915_gem_request_complete(dev, seqno);
+
+	ring->irq_seqno = seqno;
 	wake_up_all(&ring->irq_queue);
+
 	dev_priv->hangcheck_count = 0;
 	mod_timer(&dev_priv->hangcheck_timer,
 		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 }
 
+static void gen6_pm_irq_handler(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u8 new_delay = dev_priv->cur_delay;
+	u32 pm_iir;
+
+	pm_iir = I915_READ(GEN6_PMIIR);
+	if (!pm_iir)
+		return;
+
+	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+		if (dev_priv->cur_delay != dev_priv->max_delay)
+			new_delay = dev_priv->cur_delay + 1;
+		if (new_delay > dev_priv->max_delay)
+			new_delay = dev_priv->max_delay;
+	} else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+		if (dev_priv->cur_delay != dev_priv->min_delay)
+			new_delay = dev_priv->cur_delay - 1;
+		if (new_delay < dev_priv->min_delay) {
+			new_delay = dev_priv->min_delay;
+			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+				   ((new_delay << 16) & 0x3f0000));
+		} else {
+			/* Make sure we continue to get down interrupts
+			 * until we hit the minimum frequency */
+			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
+		}
+
+	}
+
+	gen6_set_rps(dev, new_delay);
+	dev_priv->cur_delay = new_delay;
+
+	I915_WRITE(GEN6_PMIIR, pm_iir);
+}
+
+static void pch_irq_handler(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 pch_iir;
+
+	pch_iir = I915_READ(SDEIIR);
+
+	if (pch_iir & SDE_AUDIO_POWER_MASK)
+		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
+				 SDE_AUDIO_POWER_SHIFT);
+
+	if (pch_iir & SDE_GMBUS)
+		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_HDCP_MASK)
+		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_TRANS_MASK)
+		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+	if (pch_iir & SDE_POISON)
+		DRM_ERROR("PCH poison interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK) {
+		u32 fdia, fdib;
+
+		fdia = I915_READ(FDI_RXA_IIR);
+		fdib = I915_READ(FDI_RXB_IIR);
+		DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
+	}
+
+	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
+	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+}
+
 static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = IRQ_NONE;
-	u32 de_iir, gt_iir, de_ier, pch_iir;
+	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
 	u32 hotplug_mask;
 	struct drm_i915_master_private *master_priv;
 	u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +457,15 @@
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-	(void)I915_READ(DEIER);
+	POSTING_READ(DEIER);
 
 	de_iir = I915_READ(DEIIR);
 	gt_iir = I915_READ(GTIIR);
 	pch_iir = I915_READ(SDEIIR);
+	pm_iir = I915_READ(GEN6_PMIIR);
 
-	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+	    (!IS_GEN6(dev) || pm_iir == 0))
 		goto done;
 
 	if (HAS_PCH_CPT(dev))
@@ -344,12 +482,12 @@
 				READ_BREADCRUMB(dev_priv);
 	}
 
-	if (gt_iir & GT_PIPE_NOTIFY)
-		notify_ring(dev, &dev_priv->render_ring);
+	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+		notify_ring(dev, &dev_priv->ring[RCS]);
 	if (gt_iir & bsd_usr_interrupt)
-		notify_ring(dev, &dev_priv->bsd_ring);
-	if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->blt_ring);
+		notify_ring(dev, &dev_priv->ring[VCS]);
+	if (gt_iir & GT_BLT_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->ring[BCS]);
 
 	if (de_iir & DE_GSE)
 		intel_opregion_gse_intr(dev);
@@ -371,14 +509,20 @@
 		drm_handle_vblank(dev, 1);
 
 	/* check event from PCH */
-	if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
-		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+	if (de_iir & DE_PCH_EVENT) {
+		if (pch_iir & hotplug_mask)
+			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+		pch_irq_handler(dev);
+	}
 
 	if (de_iir & DE_PCU_EVENT) {
 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 		i915_handle_rps_change(dev);
 	}
 
+	if (IS_GEN6(dev))
+		gen6_pm_irq_handler(dev);
+
 	/* should clear PCH hotplug event before clear CPU irq */
 	I915_WRITE(SDEIIR, pch_iir);
 	I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +530,7 @@
 
 done:
 	I915_WRITE(DEIER, de_ier);
-	(void)I915_READ(DEIER);
+	POSTING_READ(DEIER);
 
 	return ret;
 }
@@ -422,29 +566,23 @@
 
 #ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
-i915_error_object_create(struct drm_device *dev,
-			 struct drm_gem_object *src)
+i915_error_object_create(struct drm_i915_private *dev_priv,
+			 struct drm_i915_gem_object *src)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_error_object *dst;
-	struct drm_i915_gem_object *src_priv;
 	int page, page_count;
 	u32 reloc_offset;
 
-	if (src == NULL)
+	if (src == NULL || src->pages == NULL)
 		return NULL;
 
-	src_priv = to_intel_bo(src);
-	if (src_priv->pages == NULL)
-		return NULL;
-
-	page_count = src->size / PAGE_SIZE;
+	page_count = src->base.size / PAGE_SIZE;
 
 	dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
 	if (dst == NULL)
 		return NULL;
 
-	reloc_offset = src_priv->gtt_offset;
+	reloc_offset = src->gtt_offset;
 	for (page = 0; page < page_count; page++) {
 		unsigned long flags;
 		void __iomem *s;
@@ -466,7 +604,7 @@
 		reloc_offset += PAGE_SIZE;
 	}
 	dst->page_count = page_count;
-	dst->gtt_offset = src_priv->gtt_offset;
+	dst->gtt_offset = src->gtt_offset;
 
 	return dst;
 
@@ -503,53 +641,98 @@
 	kfree(error);
 }
 
-static u32
-i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+static u32 capture_bo_list(struct drm_i915_error_buffer *err,
+			   int count,
+			   struct list_head *head)
 {
-	u32 cmd;
+	struct drm_i915_gem_object *obj;
+	int i = 0;
 
-	if (IS_I830(dev) || IS_845G(dev))
-		cmd = MI_BATCH_BUFFER;
-	else if (INTEL_INFO(dev)->gen >= 4)
-		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
-		       MI_BATCH_NON_SECURE_I965);
-	else
-		cmd = (MI_BATCH_BUFFER_START | (2 << 6));
+	list_for_each_entry(obj, head, mm_list) {
+		err->size = obj->base.size;
+		err->name = obj->base.name;
+		err->seqno = obj->last_rendering_seqno;
+		err->gtt_offset = obj->gtt_offset;
+		err->read_domains = obj->base.read_domains;
+		err->write_domain = obj->base.write_domain;
+		err->fence_reg = obj->fence_reg;
+		err->pinned = 0;
+		if (obj->pin_count > 0)
+			err->pinned = 1;
+		if (obj->user_pin_count > 0)
+			err->pinned = -1;
+		err->tiling = obj->tiling_mode;
+		err->dirty = obj->dirty;
+		err->purgeable = obj->madv != I915_MADV_WILLNEED;
+		err->ring = obj->ring ? obj->ring->id : 0;
+		err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
 
-	return ring[0] == cmd ? ring[1] : 0;
+		if (++i == count)
+			break;
+
+		err++;
+	}
+
+	return i;
 }
 
-static u32
-i915_ringbuffer_last_batch(struct drm_device *dev)
+static void i915_gem_record_fences(struct drm_device *dev,
+				   struct drm_i915_error_state *error)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 head, bbaddr;
-	u32 *ring;
+	int i;
 
-	/* Locate the current position in the ringbuffer and walk back
-	 * to find the most recently dispatched batch buffer.
-	 */
-	bbaddr = 0;
-	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		for (i = 0; i < 16; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+		break;
+	case 3:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+	case 2:
+		for (i = 0; i < 8; i++)
+			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+		break;
 
-	while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
-		bbaddr = i915_get_bbaddr(dev, ring);
-		if (bbaddr)
-			break;
+	}
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+			     struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_object *obj;
+	u32 seqno;
+
+	if (!ring->get_seqno)
+		return NULL;
+
+	seqno = ring->get_seqno(ring);
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (obj->ring != ring)
+			continue;
+
+		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+			continue;
+
+		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+			continue;
+
+		/* We need to copy these to an anonymous buffer as the simplest
+		 * method to avoid being overwritten by userspace.
+		 */
+		return i915_error_object_create(dev_priv, obj);
 	}
 
-	if (bbaddr == 0) {
-		ring = (u32 *)(dev_priv->render_ring.virtual_start
-				+ dev_priv->render_ring.size);
-		while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
-			bbaddr = i915_get_bbaddr(dev, ring);
-			if (bbaddr)
-				break;
-		}
-	}
-
-	return bbaddr;
+	return NULL;
 }
 
 /**
@@ -564,12 +747,10 @@
 static void i915_capture_error_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct drm_i915_error_state *error;
-	struct drm_gem_object *batchbuffer[2];
 	unsigned long flags;
-	u32 bbaddr;
-	int count;
+	int i;
 
 	spin_lock_irqsave(&dev_priv->error_lock, flags);
 	error = dev_priv->first_error;
@@ -585,20 +766,33 @@
 
 	DRM_DEBUG_DRIVER("generating error event\n");
 
-	error->seqno =
-		dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+	error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 	error->pipeastat = I915_READ(PIPEASTAT);
 	error->pipebstat = I915_READ(PIPEBSTAT);
 	error->instpm = I915_READ(INSTPM);
-	if (INTEL_INFO(dev)->gen < 4) {
-		error->ipeir = I915_READ(IPEIR);
-		error->ipehr = I915_READ(IPEHR);
-		error->instdone = I915_READ(INSTDONE);
-		error->acthd = I915_READ(ACTHD);
-		error->bbaddr = 0;
-	} else {
+	error->error = 0;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		error->error = I915_READ(ERROR_GEN6);
+
+		error->bcs_acthd = I915_READ(BCS_ACTHD);
+		error->bcs_ipehr = I915_READ(BCS_IPEHR);
+		error->bcs_ipeir = I915_READ(BCS_IPEIR);
+		error->bcs_instdone = I915_READ(BCS_INSTDONE);
+		error->bcs_seqno = 0;
+		if (dev_priv->ring[BCS].get_seqno)
+			error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+
+		error->vcs_acthd = I915_READ(VCS_ACTHD);
+		error->vcs_ipehr = I915_READ(VCS_IPEHR);
+		error->vcs_ipeir = I915_READ(VCS_IPEIR);
+		error->vcs_instdone = I915_READ(VCS_INSTDONE);
+		error->vcs_seqno = 0;
+		if (dev_priv->ring[VCS].get_seqno)
+			error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+	}
+	if (INTEL_INFO(dev)->gen >= 4) {
 		error->ipeir = I915_READ(IPEIR_I965);
 		error->ipehr = I915_READ(IPEHR_I965);
 		error->instdone = I915_READ(INSTDONE_I965);
@@ -606,118 +800,61 @@
 		error->instdone1 = I915_READ(INSTDONE1);
 		error->acthd = I915_READ(ACTHD_I965);
 		error->bbaddr = I915_READ64(BB_ADDR);
+	} else {
+		error->ipeir = I915_READ(IPEIR);
+		error->ipehr = I915_READ(IPEHR);
+		error->instdone = I915_READ(INSTDONE);
+		error->acthd = I915_READ(ACTHD);
+		error->bbaddr = 0;
 	}
+	i915_gem_record_fences(dev, error);
 
-	bbaddr = i915_ringbuffer_last_batch(dev);
-
-	/* Grab the current batchbuffer, most likely to have crashed. */
-	batchbuffer[0] = NULL;
-	batchbuffer[1] = NULL;
-	count = 0;
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-		struct drm_gem_object *obj = &obj_priv->base;
-
-		if (batchbuffer[0] == NULL &&
-		    bbaddr >= obj_priv->gtt_offset &&
-		    bbaddr < obj_priv->gtt_offset + obj->size)
-			batchbuffer[0] = obj;
-
-		if (batchbuffer[1] == NULL &&
-		    error->acthd >= obj_priv->gtt_offset &&
-		    error->acthd < obj_priv->gtt_offset + obj->size)
-			batchbuffer[1] = obj;
-
-		count++;
-	}
-	/* Scan the other lists for completeness for those bizarre errors. */
-	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
-			struct drm_gem_object *obj = &obj_priv->base;
-
-			if (batchbuffer[0] == NULL &&
-			    bbaddr >= obj_priv->gtt_offset &&
-			    bbaddr < obj_priv->gtt_offset + obj->size)
-				batchbuffer[0] = obj;
-
-			if (batchbuffer[1] == NULL &&
-			    error->acthd >= obj_priv->gtt_offset &&
-			    error->acthd < obj_priv->gtt_offset + obj->size)
-				batchbuffer[1] = obj;
-
-			if (batchbuffer[0] && batchbuffer[1])
-				break;
-		}
-	}
-	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
-			struct drm_gem_object *obj = &obj_priv->base;
-
-			if (batchbuffer[0] == NULL &&
-			    bbaddr >= obj_priv->gtt_offset &&
-			    bbaddr < obj_priv->gtt_offset + obj->size)
-				batchbuffer[0] = obj;
-
-			if (batchbuffer[1] == NULL &&
-			    error->acthd >= obj_priv->gtt_offset &&
-			    error->acthd < obj_priv->gtt_offset + obj->size)
-				batchbuffer[1] = obj;
-
-			if (batchbuffer[0] && batchbuffer[1])
-				break;
-		}
-	}
-
-	/* We need to copy these to an anonymous buffer as the simplest
-	 * method to avoid being overwritten by userspace.
-	 */
-	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
-	if (batchbuffer[1] != batchbuffer[0])
-		error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
-	else
-		error->batchbuffer[1] = NULL;
+	/* Record the active batchbuffers */
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		error->batchbuffer[i] =
+			i915_error_first_batchbuffer(dev_priv,
+						     &dev_priv->ring[i]);
 
 	/* Record the ringbuffer */
-	error->ringbuffer = i915_error_object_create(dev,
-			dev_priv->render_ring.gem_object);
+	error->ringbuffer = i915_error_object_create(dev_priv,
+						     dev_priv->ring[RCS].obj);
 
-	/* Record buffers on the active list. */
+	/* Record buffers on the active and pinned lists. */
 	error->active_bo = NULL;
-	error->active_bo_count = 0;
+	error->pinned_bo = NULL;
 
-	if (count)
-		error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
+	i = 0;
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+		i++;
+	error->active_bo_count = i;
+	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+		i++;
+	error->pinned_bo_count = i - error->active_bo_count;
+
+	if (i) {
+		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
 					   GFP_ATOMIC);
-
-	if (error->active_bo) {
-		int i = 0;
-		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-			struct drm_gem_object *obj = &obj_priv->base;
-
-			error->active_bo[i].size = obj->size;
-			error->active_bo[i].name = obj->name;
-			error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
-			error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
-			error->active_bo[i].read_domains = obj->read_domains;
-			error->active_bo[i].write_domain = obj->write_domain;
-			error->active_bo[i].fence_reg = obj_priv->fence_reg;
-			error->active_bo[i].pinned = 0;
-			if (obj_priv->pin_count > 0)
-				error->active_bo[i].pinned = 1;
-			if (obj_priv->user_pin_count > 0)
-				error->active_bo[i].pinned = -1;
-			error->active_bo[i].tiling = obj_priv->tiling_mode;
-			error->active_bo[i].dirty = obj_priv->dirty;
-			error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
-
-			if (++i == count)
-				break;
-		}
-		error->active_bo_count = i;
+		if (error->active_bo)
+			error->pinned_bo =
+				error->active_bo + error->active_bo_count;
 	}
 
+	if (error->active_bo)
+		error->active_bo_count =
+			capture_bo_list(error->active_bo,
+					error->active_bo_count,
+					&dev_priv->mm.active_list);
+
+	if (error->pinned_bo)
+		error->pinned_bo_count =
+			capture_bo_list(error->pinned_bo,
+					error->pinned_bo_count,
+					&dev_priv->mm.pinned_list);
+
 	do_gettimeofday(&error->time);
 
 	error->overlay = intel_overlay_capture_error_state(dev);
+	error->display = intel_display_capture_error_state(dev);
 
 	spin_lock_irqsave(&dev_priv->error_lock, flags);
 	if (dev_priv->first_error == NULL) {
@@ -775,7 +912,7 @@
 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
 			       I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
-			(void)I915_READ(IPEIR_I965);
+			POSTING_READ(IPEIR_I965);
 		}
 		if (eir & GM45_ERROR_PAGE_TABLE) {
 			u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +920,7 @@
 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
 			       pgtbl_err);
 			I915_WRITE(PGTBL_ER, pgtbl_err);
-			(void)I915_READ(PGTBL_ER);
+			POSTING_READ(PGTBL_ER);
 		}
 	}
 
@@ -794,7 +931,7 @@
 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
 			       pgtbl_err);
 			I915_WRITE(PGTBL_ER, pgtbl_err);
-			(void)I915_READ(PGTBL_ER);
+			POSTING_READ(PGTBL_ER);
 		}
 	}
 
@@ -825,7 +962,7 @@
 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
 			       I915_READ(ACTHD));
 			I915_WRITE(IPEIR, ipeir);
-			(void)I915_READ(IPEIR);
+			POSTING_READ(IPEIR);
 		} else {
 			u32 ipeir = I915_READ(IPEIR_I965);
 
@@ -842,12 +979,12 @@
 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
 			       I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
-			(void)I915_READ(IPEIR_I965);
+			POSTING_READ(IPEIR_I965);
 		}
 	}
 
 	I915_WRITE(EIR, eir);
-	(void)I915_READ(EIR);
+	POSTING_READ(EIR);
 	eir = I915_READ(EIR);
 	if (eir) {
 		/*
@@ -870,7 +1007,7 @@
  * so userspace knows something bad happened (should trigger collection
  * of a ring dump etc.).
  */
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -884,11 +1021,11 @@
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 */
-		wake_up_all(&dev_priv->render_ring.irq_queue);
+		wake_up_all(&dev_priv->ring[RCS].irq_queue);
 		if (HAS_BSD(dev))
-			wake_up_all(&dev_priv->bsd_ring.irq_queue);
+			wake_up_all(&dev_priv->ring[VCS].irq_queue);
 		if (HAS_BLT(dev))
-			wake_up_all(&dev_priv->blt_ring.irq_queue);
+			wake_up_all(&dev_priv->ring[BCS].irq_queue);
 	}
 
 	queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1036,7 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct intel_unpin_work *work;
 	unsigned long flags;
 	bool stall_detected;
@@ -918,13 +1055,13 @@
 	}
 
 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
-	obj_priv = to_intel_bo(work->pending_flip_obj);
+	obj = work->pending_flip_obj;
 	if (INTEL_INFO(dev)->gen >= 4) {
 		int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
-		stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+		stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
 	} else {
 		int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
-		stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
 							crtc->y * crtc->fb->pitch +
 							crtc->x * crtc->fb->bits_per_pixel/8);
 	}
@@ -970,7 +1107,7 @@
 		 * It doesn't set the bit in iir again, but it still produces
 		 * interrupts (for non-MSI).
 		 */
-		spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 		pipea_stats = I915_READ(PIPEASTAT);
 		pipeb_stats = I915_READ(PIPEBSTAT);
 
@@ -993,7 +1130,7 @@
 			I915_WRITE(PIPEBSTAT, pipeb_stats);
 			irq_received = 1;
 		}
-		spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 		if (!irq_received)
 			break;
@@ -1026,9 +1163,9 @@
 		}
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->render_ring);
-		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
-			notify_ring(dev, &dev_priv->bsd_ring);
+			notify_ring(dev, &dev_priv->ring[RCS]);
+		if (iir & I915_BSD_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->ring[VCS]);
 
 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
 			intel_prepare_page_flip(dev, 0);
@@ -1101,12 +1238,13 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
-	BEGIN_LP_RING(4);
-	OUT_RING(MI_STORE_DWORD_INDEX);
-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	OUT_RING(dev_priv->counter);
-	OUT_RING(MI_USER_INTERRUPT);
-	ADVANCE_LP_RING();
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(MI_USER_INTERRUPT);
+		ADVANCE_LP_RING();
+	}
 
 	return dev_priv->counter;
 }
@@ -1114,12 +1252,11 @@
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
-	if (dev_priv->trace_irq_seqno == 0)
-		render_ring->user_irq_get(dev, render_ring);
-
-	dev_priv->trace_irq_seqno = seqno;
+	if (dev_priv->trace_irq_seqno == 0 &&
+	    ring->irq_get(ring))
+		dev_priv->trace_irq_seqno = seqno;
 }
 
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1264,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	int ret = 0;
-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1278,12 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-	render_ring->user_irq_get(dev, render_ring);
-	DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
-		    READ_BREADCRUMB(dev_priv) >= irq_nr);
-	render_ring->user_irq_put(dev, render_ring);
+	ret = -ENODEV;
+	if (ring->irq_get(ring)) {
+		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+			    READ_BREADCRUMB(dev_priv) >= irq_nr);
+		ring->irq_put(ring);
+	}
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1302,7 @@
 	drm_i915_irq_emit_t *emit = data;
 	int result;
 
-	if (!dev_priv || !dev_priv->render_ring.virtual_start) {
+	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
@@ -1209,9 +1348,9 @@
 	if (!i915_pipe_enabled(dev, pipe))
 		return -EINVAL;
 
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	if (HAS_PCH_SPLIT(dev))
-		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
+		ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
 					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
 	else if (INTEL_INFO(dev)->gen >= 4)
 		i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1358,7 @@
 	else
 		i915_enable_pipestat(dev_priv, pipe,
 				     PIPE_VBLANK_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	return 0;
 }
 
@@ -1231,15 +1370,15 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	if (HAS_PCH_SPLIT(dev))
-		ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 
+		ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
 					     DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
 	else
 		i915_disable_pipestat(dev_priv, pipe,
 				      PIPE_VBLANK_INTERRUPT_ENABLE |
 				      PIPE_START_VBLANK_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1445,50 @@
 	return -EINVAL;
 }
 
-static struct drm_i915_gem_request *
-i915_get_tail_request(struct drm_device *dev)
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	return list_entry(dev_priv->render_ring.request_list.prev,
-			struct drm_i915_gem_request, list);
+	return list_entry(ring->request_list.prev,
+			  struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+	if (list_empty(&ring->request_list) ||
+	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+		/* Issue a wake-up to catch stuck h/w. */
+		if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
+			DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+				  ring->name,
+				  ring->waiting_seqno,
+				  ring->get_seqno(ring));
+			wake_up_all(&ring->irq_queue);
+			*err = true;
+		}
+		return true;
+	}
+	return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ_CTL(ring);
+	if (tmp & RING_WAIT) {
+		DRM_ERROR("Kicking stuck wait on %s\n",
+			  ring->name);
+		I915_WRITE_CTL(ring, tmp);
+		return true;
+	}
+	if (IS_GEN6(dev) &&
+	    (tmp & RING_WAIT_SEMAPHORE)) {
+		DRM_ERROR("Kicking stuck semaphore on %s\n",
+			  ring->name);
+		I915_WRITE_CTL(ring, tmp);
+		return true;
+	}
+	return false;
 }
 
 /**
@@ -1325,6 +1502,17 @@
 	struct drm_device *dev = (struct drm_device *)data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t acthd, instdone, instdone1;
+	bool err = false;
+
+	/* If all work is done then ACTHD clearly hasn't advanced. */
+	if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
+	    i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
+	    i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
+		dev_priv->hangcheck_count = 0;
+		if (err)
+			goto repeat;
+		return;
+	}
 
 	if (INTEL_INFO(dev)->gen < 4) {
 		acthd = I915_READ(ACTHD);
@@ -1336,38 +1524,6 @@
 		instdone1 = I915_READ(INSTDONE1);
 	}
 
-	/* If all work is done then ACTHD clearly hasn't advanced. */
-	if (list_empty(&dev_priv->render_ring.request_list) ||
-		i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
-				  i915_get_tail_request(dev)->seqno)) {
-		bool missed_wakeup = false;
-
-		dev_priv->hangcheck_count = 0;
-
-		/* Issue a wake-up to catch stuck h/w. */
-		if (dev_priv->render_ring.waiting_gem_seqno &&
-		    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
-			wake_up_all(&dev_priv->render_ring.irq_queue);
-			missed_wakeup = true;
-		}
-
-		if (dev_priv->bsd_ring.waiting_gem_seqno &&
-		    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
-			wake_up_all(&dev_priv->bsd_ring.irq_queue);
-			missed_wakeup = true;
-		}
-
-		if (dev_priv->blt_ring.waiting_gem_seqno &&
-		    waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
-			wake_up_all(&dev_priv->blt_ring.irq_queue);
-			missed_wakeup = true;
-		}
-
-		if (missed_wakeup)
-			DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
-		return;
-	}
-
 	if (dev_priv->last_acthd == acthd &&
 	    dev_priv->last_instdone == instdone &&
 	    dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1536,17 @@
 				 * and break the hang. This should work on
 				 * all but the second generation chipsets.
 				 */
-				u32 tmp = I915_READ(PRB0_CTL);
-				if (tmp & RING_WAIT) {
-					I915_WRITE(PRB0_CTL, tmp);
-					POSTING_READ(PRB0_CTL);
-					goto out;
-				}
+
+				if (kick_ring(&dev_priv->ring[RCS]))
+					goto repeat;
+
+				if (HAS_BSD(dev) &&
+				    kick_ring(&dev_priv->ring[VCS]))
+					goto repeat;
+
+				if (HAS_BLT(dev) &&
+				    kick_ring(&dev_priv->ring[BCS]))
+					goto repeat;
 			}
 
 			i915_handle_error(dev, true);
@@ -1399,7 +1560,7 @@
 		dev_priv->last_instdone1 = instdone1;
 	}
 
-out:
+repeat:
 	/* Reset timer case chip hangs without another request being added */
 	mod_timer(&dev_priv->hangcheck_timer,
 		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1578,17 @@
 
 	I915_WRITE(DEIMR, 0xffffffff);
 	I915_WRITE(DEIER, 0x0);
-	(void) I915_READ(DEIER);
+	POSTING_READ(DEIER);
 
 	/* and GT */
 	I915_WRITE(GTIMR, 0xffffffff);
 	I915_WRITE(GTIER, 0x0);
-	(void) I915_READ(GTIER);
+	POSTING_READ(GTIER);
 
 	/* south display irq */
 	I915_WRITE(SDEIMR, 0xffffffff);
 	I915_WRITE(SDEIER, 0x0);
-	(void) I915_READ(SDEIER);
+	POSTING_READ(SDEIER);
 }
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1597,34 @@
 	/* enable kind of interrupts always enabled */
 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+	u32 render_irqs;
 	u32 hotplug_mask;
 
-	dev_priv->irq_mask_reg = ~display_mask;
-	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+	dev_priv->irq_mask = ~display_mask;
 
 	/* should always can generate irq */
 	I915_WRITE(DEIIR, I915_READ(DEIIR));
-	I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
-	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
-	(void) I915_READ(DEIER);
+	I915_WRITE(DEIMR, dev_priv->irq_mask);
+	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+	POSTING_READ(DEIER);
 
-	if (IS_GEN6(dev)) {
-		render_mask =
-			GT_PIPE_NOTIFY |
-			GT_GEN6_BSD_USER_INTERRUPT |
-			GT_BLT_USER_INTERRUPT;
-	}
-
-	dev_priv->gt_irq_mask_reg = ~render_mask;
-	dev_priv->gt_irq_enable_reg = render_mask;
+	dev_priv->gt_irq_mask = ~0;
 
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
-	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-	if (IS_GEN6(dev)) {
-		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
-		I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
-		I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
-	}
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
-	(void) I915_READ(GTIER);
+	if (IS_GEN6(dev))
+		render_irqs =
+			GT_USER_INTERRUPT |
+			GT_GEN6_BSD_USER_INTERRUPT |
+			GT_BLT_USER_INTERRUPT;
+	else
+		render_irqs =
+			GT_USER_INTERRUPT |
+			GT_PIPE_NOTIFY |
+			GT_BSD_USER_INTERRUPT;
+	I915_WRITE(GTIER, render_irqs);
+	POSTING_READ(GTIER);
 
 	if (HAS_PCH_CPT(dev)) {
 		hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
@@ -1475,15 +1632,17 @@
 	} else {
 		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
 			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+		hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
+		I915_WRITE(FDI_RXA_IMR, 0);
+		I915_WRITE(FDI_RXB_IMR, 0);
 	}
 
-	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
-	dev_priv->pch_irq_enable_reg = hotplug_mask;
+	dev_priv->pch_irq_mask = ~hotplug_mask;
 
 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
-	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
-	(void) I915_READ(SDEIER);
+	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+	I915_WRITE(SDEIER, hotplug_mask);
+	POSTING_READ(SDEIER);
 
 	if (IS_IRONLAKE_M(dev)) {
 		/* Clear & enable PCU event interrupts */
@@ -1519,7 +1678,7 @@
 	I915_WRITE(PIPEBSTAT, 0);
 	I915_WRITE(IMR, 0xffffffff);
 	I915_WRITE(IER, 0x0);
-	(void) I915_READ(IER);
+	POSTING_READ(IER);
 }
 
 /*
@@ -1532,11 +1691,11 @@
 	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
 	u32 error_mask;
 
-	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+	DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
 	if (HAS_BSD(dev))
-		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+		DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
 	if (HAS_BLT(dev))
-		DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+		DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
 
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
@@ -1544,7 +1703,7 @@
 		return ironlake_irq_postinstall(dev);
 
 	/* Unmask the interrupts that we always want on. */
-	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+	dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
 
 	dev_priv->pipestat[0] = 0;
 	dev_priv->pipestat[1] = 0;
@@ -1553,7 +1712,7 @@
 		/* Enable in IER... */
 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
 		/* and unmask in IMR */
-		dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
 	}
 
 	/*
@@ -1571,9 +1730,9 @@
 	}
 	I915_WRITE(EMR, error_mask);
 
-	I915_WRITE(IMR, dev_priv->irq_mask_reg);
+	I915_WRITE(IMR, dev_priv->irq_mask);
 	I915_WRITE(IER, enable_mask);
-	(void) I915_READ(IER);
+	POSTING_READ(IER);
 
 	if (I915_HAS_HOTPLUG(dev)) {
 		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cb8f434..40a407f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
 #define  GRDOM_RENDER	(1<<2)
 #define  GRDOM_MEDIA	(3<<2)
 
+#define GEN6_GDRST	0x941c
+#define  GEN6_GRDOM_FULL		(1 << 0)
+#define  GEN6_GRDOM_RENDER		(1 << 1)
+#define  GEN6_GRDOM_MEDIA		(1 << 2)
+#define  GEN6_GRDOM_BLT			(1 << 3)
+
 /* VGA stuff */
 
 #define VGA_ST01_MDA 0x3ba
@@ -139,6 +145,8 @@
 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
 #define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
+#define   MI_SUSPEND_FLUSH_EN	(1<<0)
 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
 #define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
 #define   MI_OVERLAY_CONTINUE	(0x0<<21)
@@ -153,17 +161,29 @@
 #define   MI_MM_SPACE_PHYSICAL		(0<<8)
 #define   MI_SAVE_EXT_STATE_EN		(1<<3)
 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
+#define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ *   simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
 #define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
+#define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define  MI_SEMAPHORE_UPDATE	    (1<<21)
+#define  MI_SEMAPHORE_COMPARE	    (1<<20)
+#define  MI_SEMAPHORE_REGISTER	    (1<<18)
 /*
  * 3D instructions used by the kernel
  */
@@ -256,10 +276,6 @@
  * Instruction and interrupt control regs
  */
 #define PGTBL_ER	0x02024
-#define PRB0_TAIL	0x02030
-#define PRB0_HEAD	0x02034
-#define PRB0_START	0x02038
-#define PRB0_CTL	0x0203c
 #define RENDER_RING_BASE	0x02000
 #define BSD_RING_BASE		0x04000
 #define GEN6_BSD_RING_BASE	0x12000
@@ -268,9 +284,14 @@
 #define RING_HEAD(base)		((base)+0x34)
 #define RING_START(base)	((base)+0x38)
 #define RING_CTL(base)		((base)+0x3c)
+#define RING_SYNC_0(base)	((base)+0x40)
+#define RING_SYNC_1(base)	((base)+0x44)
+#define RING_MAX_IDLE(base)	((base)+0x54)
 #define RING_HWS_PGA(base)	((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
 #define RING_ACTHD(base)	((base)+0x74)
+#define RING_NOPID(base)	((base)+0x94)
+#define RING_IMR(base)		((base)+0xa8)
 #define   TAIL_ADDR		0x001FFFF8
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_ONE		0x00200000
@@ -285,10 +306,17 @@
 #define   RING_INVALID		0x00000000
 #define   RING_WAIT_I8XX	(1<<0) /* gen2, PRBx_HEAD */
 #define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
+#define   RING_WAIT_SEMAPHORE	(1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL	0x02030
+#define PRB0_HEAD	0x02034
+#define PRB0_START	0x02038
+#define PRB0_CTL	0x0203c
 #define PRB1_TAIL	0x02040 /* 915+ only */
 #define PRB1_HEAD	0x02044 /* 915+ only */
 #define PRB1_START	0x02048 /* 915+ only */
 #define PRB1_CTL	0x0204c /* 915+ only */
+#endif
 #define IPEIR_I965	0x02064
 #define IPEHR_I965	0x02068
 #define INSTDONE_I965	0x0206c
@@ -305,11 +333,42 @@
 #define INSTDONE	0x02090
 #define NOPID		0x02094
 #define HWSTAM		0x02098
+#define VCS_INSTDONE	0x1206C
+#define VCS_IPEIR	0x12064
+#define VCS_IPEHR	0x12068
+#define VCS_ACTHD	0x12074
+#define BCS_INSTDONE	0x2206C
+#define BCS_IPEIR	0x22064
+#define BCS_IPEHR	0x22068
+#define BCS_ACTHD	0x22074
+
+#define ERROR_GEN6	0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior.  The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN	0x02084
+#define _3D_CHICKEN2	0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED			(1 << 14)
+#define _3D_CHICKEN3	0x02090
 
 #define MI_MODE		0x0209c
 # define VS_TIMER_DISPATCH				(1 << 6)
 # define MI_FLUSH_ENABLE				(1 << 11)
 
+#define GFX_MODE	0x02520
+#define   GFX_RUN_LIST_ENABLE		(1<<15)
+#define   GFX_TLB_INVALIDATE_ALWAYS	(1<<13)
+#define   GFX_SURFACE_FAULT_ENABLE	(1<<12)
+#define   GFX_REPLAY_MODE		(1<<11)
+#define   GFX_PSMI_GRANULARITY		(1<<10)
+#define   GFX_PPGTT_ENABLE		(1<<9)
+
 #define SCPD0		0x0209c /* 915+ only */
 #define IER		0x020a0
 #define IIR		0x020a4
@@ -461,7 +520,7 @@
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR			(1 << 3)
 
 #define GEN6_BSD_IMR			0x120a8
-#define   GEN6_BSD_IMR_USER_INTERRUPT	(1 << 12)
+#define   GEN6_BSD_USER_INTERRUPT	(1 << 12)
 
 #define GEN6_BSD_RNCID			0x12198
 
@@ -541,6 +600,18 @@
 
 #define ILK_DISPLAY_CHICKEN1	0x42000
 #define   ILK_FBCQ_DIS		(1<<22)
+#define   ILK_PABSTRETCH_DIS 	(1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA		0x100100
+#define   SNB_CPU_FENCE_ENABLE	(1<<29)
+#define DPFC_CPU_FENCE_OFFSET	0x100104
+
 
 /*
  * GPIO regs
@@ -900,6 +971,8 @@
  */
 #define MCHBAR_MIRROR_BASE	0x10000
 
+#define MCHBAR_MIRROR_BASE_SNB	0x140000
+
 /** 915-945 and GM965 MCH register controlling DRAM channel access */
 #define DCC			0x10200
 #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL		(0 << 0)
@@ -1061,9 +1134,50 @@
 #define RCBMINAVG		0x111a0
 #define RCUPEI			0x111b0
 #define RCDNEI			0x111b4
-#define MCHBAR_RENDER_STANDBY		0x111b8
-#define   RCX_SW_EXIT		(1<<23)
-#define   RSX_STATUS_MASK	0x00700000
+#define RSTDBYCTL		0x111b8
+#define   RS1EN			(1<<31)
+#define   RS2EN			(1<<30)
+#define   RS3EN			(1<<29)
+#define   D3RS3EN		(1<<28) /* Display D3 imlies RS3 */
+#define   SWPROMORSX		(1<<27) /* RSx promotion timers ignored */
+#define   RCWAKERW		(1<<26) /* Resetwarn from PCH causes wakeup */
+#define   DPRSLPVREN		(1<<25) /* Fast voltage ramp enable */
+#define   GFXTGHYST		(1<<24) /* Hysteresis to allow trunk gating */
+#define   RCX_SW_EXIT		(1<<23) /* Leave RSx and prevent re-entry */
+#define   RSX_STATUS_MASK	(7<<20)
+#define   RSX_STATUS_ON		(0<<20)
+#define   RSX_STATUS_RC1	(1<<20)
+#define   RSX_STATUS_RC1E	(2<<20)
+#define   RSX_STATUS_RS1	(3<<20)
+#define   RSX_STATUS_RS2	(4<<20) /* aka rc6 */
+#define   RSX_STATUS_RSVD	(5<<20) /* deep rc6 unsupported on ilk */
+#define   RSX_STATUS_RS3	(6<<20) /* rs3 unsupported on ilk */
+#define   RSX_STATUS_RSVD2	(7<<20)
+#define   UWRCRSXE		(1<<19) /* wake counter limit prevents rsx */
+#define   RSCRP			(1<<18) /* rs requests control on rs1/2 reqs */
+#define   JRSC			(1<<17) /* rsx coupled to cpu c-state */
+#define   RS2INC0		(1<<16) /* allow rs2 in cpu c0 */
+#define   RS1CONTSAV_MASK	(3<<14)
+#define   RS1CONTSAV_NO_RS1	(0<<14) /* rs1 doesn't save/restore context */
+#define   RS1CONTSAV_RSVD	(1<<14)
+#define   RS1CONTSAV_SAVE_RS1	(2<<14) /* rs1 saves context */
+#define   RS1CONTSAV_FULL_RS1	(3<<14) /* rs1 saves and restores context */
+#define   NORMSLEXLAT_MASK	(3<<12)
+#define   SLOW_RS123		(0<<12)
+#define   SLOW_RS23		(1<<12)
+#define   SLOW_RS3		(2<<12)
+#define   NORMAL_RS123		(3<<12)
+#define   RCMODE_TIMEOUT	(1<<11) /* 0 is eval interval method */
+#define   IMPROMOEN		(1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define   RCENTSYNC		(1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define   STATELOCK		(1<<7) /* locked to rs_cstate if 0 */
+#define   RS_CSTATE_MASK	(3<<4)
+#define   RS_CSTATE_C367_RS1	(0<<4)
+#define   RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define   RS_CSTATE_RSVD	(2<<4)
+#define   RS_CSTATE_C367_RS2	(3<<4)
+#define   REDSAVES		(1<<3) /* no context save if was idle during rs0 */
+#define   REDRESTORES		(1<<2) /* no restore if was idle during rs0 */
 #define VIDCTL			0x111c0
 #define VIDSTS			0x111c8
 #define VIDSTART		0x111cc /* 8 bits */
@@ -1119,6 +1233,10 @@
 #define DDRMPLL1		0X12c20
 #define PEG_BAND_GAP_DATA	0x14d68
 
+#define GEN6_GT_PERF_STATUS	0x145948
+#define GEN6_RP_STATE_LIMITS	0x145994
+#define GEN6_RP_STATE_CAP	0x145998
+
 /*
  * Logical Context regs
  */
@@ -1168,7 +1286,6 @@
 #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
 #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
 #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
 #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
 
 /* VGA port control */
@@ -2182,8 +2299,10 @@
 #define   PIPE_6BPC				(2 << 5)
 #define   PIPE_12BPC				(3 << 5)
 
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
 #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
 #define PIPEDSL(pipe)  _PIPE(pipe, PIPEADSL, PIPEBDSL)
+#define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
 
 #define DSPARB			0x70030
 #define   DSPARB_CSTART_MASK	(0x7f << 7)
@@ -2271,8 +2390,13 @@
 
 /* Memory latency timer register */
 #define MLTR_ILK		0x11222
+#define  MLTR_WM1_SHIFT		0
+#define  MLTR_WM2_SHIFT		8
 /* the unit of memory self-refresh latency time is 0.5us */
 #define  ILK_SRLT_MASK		0x3f
+#define ILK_LATENCY(shift)	(I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
+#define ILK_READ_WM1_LATENCY()	ILK_LATENCY(MLTR_WM1_SHIFT)
+#define ILK_READ_WM2_LATENCY()	ILK_LATENCY(MLTR_WM2_SHIFT)
 
 /* define the fifo size on Ironlake */
 #define ILK_DISPLAY_FIFO	128
@@ -2291,6 +2415,40 @@
 
 #define ILK_FIFO_LINE_SIZE	64
 
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO	128
+#define SNB_DISPLAY_MAXWM	0x7f	/* bit 16:22 */
+#define SNB_DISPLAY_DFTWM	8
+#define SNB_CURSOR_FIFO		32
+#define SNB_CURSOR_MAXWM	0x1f	/* bit 4:0 */
+#define SNB_CURSOR_DFTWM	8
+
+#define SNB_DISPLAY_SR_FIFO	512
+#define SNB_DISPLAY_MAX_SRWM	0x1ff	/* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM	0x3f
+#define SNB_CURSOR_SR_FIFO	64
+#define SNB_CURSOR_MAX_SRWM	0x3f	/* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM	8
+
+#define SNB_FBC_MAX_SRWM	0xf	/* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE	64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD			0x5d10
+#define SSKPD_WM_MASK		0x3f
+#define SSKPD_WM0_SHIFT		0
+#define SSKPD_WM1_SHIFT		8
+#define SSKPD_WM2_SHIFT		16
+#define SSKPD_WM3_SHIFT		24
+
+#define SNB_LATENCY(shift)	(I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY()		SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY()		SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY()		SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY()		SNB_LATENCY(SSKPD_WM3_SHIFT)
+
 /*
  * The two pipe frame counter registers are not synchronized, so
  * reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2509,10 @@
 #define CURBBASE		0x700c4
 #define CURBPOS			0x700c8
 
+#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+
 /* Display A control */
 #define DSPACNTR                0x70180
 #define   DISPLAY_PLANE_ENABLE			(1<<31)
@@ -2589,6 +2751,8 @@
 #define GTIER   0x4401c
 
 #define ILK_DISPLAY_CHICKEN2	0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define  ILK_ELPIN_409_SELECT	(1 << 25)
 #define  ILK_DPARB_GATE	(1<<22)
 #define  ILK_VSDPFD_FULL	(1<<21)
 #define ILK_DISPLAY_CHICKEN_FUSES	0x42014
@@ -2600,6 +2764,8 @@
 #define  ILK_DESKTOP			(1<<23)
 #define ILK_DSPCLK_GATE		0x42020
 #define  ILK_DPARB_CLK_GATE	(1<<5)
+#define  ILK_DPFD_CLK_GATE	(1<<7)
+
 /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
 #define   ILK_CLK_FBC		(1<<7)
 #define   ILK_DPFC_DIS1		(1<<8)
@@ -2612,12 +2778,41 @@
 /* PCH */
 
 /* south display engine interrupt */
+#define SDE_AUDIO_POWER_D	(1 << 27)
+#define SDE_AUDIO_POWER_C	(1 << 26)
+#define SDE_AUDIO_POWER_B	(1 << 25)
+#define SDE_AUDIO_POWER_SHIFT	(25)
+#define SDE_AUDIO_POWER_MASK	(7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS		(1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB	(1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA	(1 << 22)
+#define SDE_AUDIO_HDCP_MASK	(3 << 22)
+#define SDE_AUDIO_TRANSB	(1 << 21)
+#define SDE_AUDIO_TRANSA	(1 << 20)
+#define SDE_AUDIO_TRANS_MASK	(3 << 20)
+#define SDE_POISON		(1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB		(1 << 17)
+#define SDE_FDI_RXA		(1 << 16)
+#define SDE_FDI_MASK		(3 << 16)
+#define SDE_AUXD		(1 << 15)
+#define SDE_AUXC		(1 << 14)
+#define SDE_AUXB		(1 << 13)
+#define SDE_AUX_MASK		(7 << 13)
+/* 12 reserved */
 #define SDE_CRT_HOTPLUG         (1 << 11)
 #define SDE_PORTD_HOTPLUG       (1 << 10)
 #define SDE_PORTC_HOTPLUG       (1 << 9)
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
 #define SDE_HOTPLUG_MASK	(0xf << 8)
+#define SDE_TRANSB_CRC_DONE	(1 << 5)
+#define SDE_TRANSB_CRC_ERR	(1 << 4)
+#define SDE_TRANSB_FIFO_UNDER	(1 << 3)
+#define SDE_TRANSA_CRC_DONE	(1 << 2)
+#define SDE_TRANSA_CRC_ERR	(1 << 1)
+#define SDE_TRANSA_FIFO_UNDER	(1 << 0)
+#define SDE_TRANS_MASK		(0x3f)
 /* CPT */
 #define SDE_CRT_HOTPLUG_CPT	(1 << 19)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
@@ -2679,6 +2874,7 @@
 #define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
 
 #define PCH_FPA0                0xc6040
+#define  FP_CB_TUNE		(0x3<<22)
 #define PCH_FPA1                0xc6044
 #define PCH_FPB0                0xc6048
 #define PCH_FPB1                0xc604c
@@ -3057,10 +3253,74 @@
 #define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
 #define  EDP_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
 /* SNB B-stepping */
-#define  EDP_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
-#define  EDP_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
-#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
-#define  EDP_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
+#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B	(0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B	(0x1<<22)
+#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B	(0x3a<<22)
+#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B	(0x39<<22)
+#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
 
+#define  FORCEWAKE				0xA18C
+#define  FORCEWAKE_ACK				0x130090
+
+#define GEN6_RPNSWREQ				0xA008
+#define   GEN6_TURBO_DISABLE			(1<<31)
+#define   GEN6_FREQUENCY(x)			((x)<<25)
+#define   GEN6_OFFSET(x)			((x)<<19)
+#define   GEN6_AGGRESSIVE_TURBO			(0<<15)
+#define GEN6_RC_VIDEO_FREQ			0xA00C
+#define GEN6_RC_CONTROL				0xA090
+#define   GEN6_RC_CTL_RC6pp_ENABLE		(1<<16)
+#define   GEN6_RC_CTL_RC6p_ENABLE		(1<<17)
+#define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
+#define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
+#define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
+#define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
+#define GEN6_RP_DOWN_TIMEOUT			0xA010
+#define GEN6_RP_INTERRUPT_LIMITS		0xA014
+#define GEN6_RPSTAT1				0xA01C
+#define GEN6_RP_CONTROL				0xA024
+#define   GEN6_RP_MEDIA_TURBO			(1<<11)
+#define   GEN6_RP_USE_NORMAL_FREQ		(1<<9)
+#define   GEN6_RP_MEDIA_IS_GFX			(1<<8)
+#define   GEN6_RP_ENABLE			(1<<7)
+#define   GEN6_RP_UP_BUSY_MAX			(0x2<<3)
+#define   GEN6_RP_DOWN_BUSY_MIN			(0x2<<0)
+#define GEN6_RP_UP_THRESHOLD			0xA02C
+#define GEN6_RP_DOWN_THRESHOLD			0xA030
+#define GEN6_RP_UP_EI				0xA068
+#define GEN6_RP_DOWN_EI				0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS			0xA070
+#define GEN6_RC_STATE				0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT		0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT		0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT		0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL		0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS			0xA0AC
+#define GEN6_RC_SLEEP				0xA0B0
+#define GEN6_RC1e_THRESHOLD			0xA0B4
+#define GEN6_RC6_THRESHOLD			0xA0B8
+#define GEN6_RC6p_THRESHOLD			0xA0BC
+#define GEN6_RC6pp_THRESHOLD			0xA0C0
+#define GEN6_PMINTRMSK				0xA168
+
+#define GEN6_PMISR				0x44020
+#define GEN6_PMIMR				0x44024
+#define GEN6_PMIIR				0x44028
+#define GEN6_PMIER				0x4402C
+#define  GEN6_PM_MBOX_EVENT			(1<<25)
+#define  GEN6_PM_THERMAL_EVENT			(1<<24)
+#define  GEN6_PM_RP_DOWN_TIMEOUT		(1<<6)
+#define  GEN6_PM_RP_UP_THRESHOLD		(1<<5)
+#define  GEN6_PM_RP_DOWN_THRESHOLD		(1<<4)
+#define  GEN6_PM_RP_UP_EI_EXPIRED		(1<<2)
+#define  GEN6_PM_RP_DOWN_EI_EXPIRED		(1<<1)
+
+#define GEN6_PCODE_MAILBOX			0x138124
+#define   GEN6_PCODE_READY			(1<<31)
+#define   GEN6_READ_OC_PARAMS			0xc
+#define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE	0x9
+#define GEN6_PCODE_DATA				0x138128
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 42729d2..0521ecf2 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,6 +235,7 @@
 static void i915_save_modeset_reg(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
@@ -367,6 +368,28 @@
 	}
 	i915_save_palette(dev, PIPE_B);
 	dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		for (i = 0; i < 16; i++)
+			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+		break;
+	case 3:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+	case 2:
+		for (i = 0; i < 8; i++)
+			dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+		break;
+	}
+
 	return;
 }
 
@@ -375,10 +398,33 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int dpll_a_reg, fpa0_reg, fpa1_reg;
 	int dpll_b_reg, fpb0_reg, fpb1_reg;
+	int i;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		for (i = 0; i < 16; i++)
+			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+		break;
+	case 3:
+	case 2:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+		for (i = 0; i < 8; i++)
+			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+		break;
+	}
+
+
 	if (HAS_PCH_SPLIT(dev)) {
 		dpll_a_reg = PCH_DPLL_A;
 		dpll_b_reg = PCH_DPLL_B;
@@ -694,7 +740,7 @@
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
-		I915_WRITE(MCHBAR_RENDER_STANDBY,
+		I915_WRITE(RSTDBYCTL,
 			   dev_priv->saveMCHBAR_RENDER_STANDBY);
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
@@ -765,14 +811,16 @@
 		dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
 		dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
 		dev_priv->saveMCHBAR_RENDER_STANDBY =
-			I915_READ(MCHBAR_RENDER_STANDBY);
+			I915_READ(RSTDBYCTL);
 	} else {
 		dev_priv->saveIER = I915_READ(IER);
 		dev_priv->saveIMR = I915_READ(IMR);
 	}
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_IRONLAKE_M(dev))
 		ironlake_disable_drps(dev);
+	if (IS_GEN6(dev))
+		gen6_disable_rps(dev);
 
 	/* Cache mode state */
 	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +836,6 @@
 	for (i = 0; i < 3; i++)
 		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
-	/* Fences */
-	switch (INTEL_INFO(dev)->gen) {
-	case 6:
-		for (i = 0; i < 16; i++)
-			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
-		break;
-	case 5:
-	case 4:
-		for (i = 0; i < 16; i++)
-			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
-		break;
-	case 3:
-		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-			for (i = 0; i < 8; i++)
-				dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
-	case 2:
-		for (i = 0; i < 8; i++)
-			dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-		break;
-
-	}
-
 	return 0;
 }
 
@@ -823,27 +849,6 @@
 	/* Hardware status page */
 	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 
-	/* Fences */
-	switch (INTEL_INFO(dev)->gen) {
-	case 6:
-		for (i = 0; i < 16; i++)
-			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
-		break;
-	case 5:
-	case 4:
-		for (i = 0; i < 16; i++)
-			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
-		break;
-	case 3:
-	case 2:
-		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-			for (i = 0; i < 8; i++)
-				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
-		for (i = 0; i < 8; i++)
-			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
-		break;
-	}
-
 	i915_restore_display(dev);
 
 	/* Interrupt state */
@@ -860,13 +865,16 @@
 	}
 
 	/* Clock gating state */
-	intel_init_clock_gating(dev);
+	intel_enable_clock_gating(dev);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_IRONLAKE_M(dev)) {
 		ironlake_enable_drps(dev);
 		intel_init_emon(dev);
 	}
 
+	if (IS_GEN6(dev))
+		gen6_enable_rps(dev_priv);
+
 	/* Cache mode state */
 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a2..7f0fc3e 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
 #include <linux/tracepoint.h>
 
 #include <drm/drmP.h>
+#include "i915_drv.h"
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
 
 TRACE_EVENT(i915_gem_object_create,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     __field(u32, size)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
-			   __entry->size = obj->size;
+			   __entry->size = obj->base.size;
 			   ),
 
 	    TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@
 
 TRACE_EVENT(i915_gem_object_bind,
 
-	    TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
 
-	    TP_ARGS(obj, gtt_offset),
+	    TP_ARGS(obj, gtt_offset, mappable),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     __field(u32, gtt_offset)
+			     __field(bool, mappable)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
 			   __entry->gtt_offset = gtt_offset;
+			   __entry->mappable = mappable;
 			   ),
 
-	    TP_printk("obj=%p, gtt_offset=%08x",
-		      __entry->obj, __entry->gtt_offset)
+	    TP_printk("obj=%p, gtt_offset=%08x%s",
+		      __entry->obj, __entry->gtt_offset,
+		      __entry->mappable ? ", mappable" : "")
 );
 
 TRACE_EVENT(i915_gem_object_change_domain,
 
-	    TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+	    TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
 
 	    TP_ARGS(obj, old_read_domains, old_write_domain),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     __field(u32, read_domains)
 			     __field(u32, write_domain)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
-			   __entry->read_domains = obj->read_domains | (old_read_domains << 16);
-			   __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+			   __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
+			   __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
 			   ),
 
 	    TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@
 		      __entry->read_domains, __entry->write_domain)
 );
 
-TRACE_EVENT(i915_gem_object_get_fence,
-
-	    TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
-
-	    TP_ARGS(obj, fence, tiling_mode),
-
-	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
-			     __field(int, fence)
-			     __field(int, tiling_mode)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->obj = obj;
-			   __entry->fence = fence;
-			   __entry->tiling_mode = tiling_mode;
-			   ),
-
-	    TP_printk("obj=%p, fence=%d, tiling=%d",
-		      __entry->obj, __entry->fence, __entry->tiling_mode)
-);
-
 DECLARE_EVENT_CLASS(i915_gem_object,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     ),
 
 	    TP_fast_assign(
@@ -117,21 +99,21 @@
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj)
 );
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj)
 );
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj)
 );
@@ -263,13 +245,13 @@
 );
 
 TRACE_EVENT(i915_flip_request,
-	    TP_PROTO(int plane, struct drm_gem_object *obj),
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(plane, obj),
 
 	    TP_STRUCT__entry(
 		    __field(int, plane)
-		    __field(struct drm_gem_object *, obj)
+		    __field(struct drm_i915_gem_object *, obj)
 		    ),
 
 	    TP_fast_assign(
@@ -281,13 +263,13 @@
 );
 
 TRACE_EVENT(i915_flip_complete,
-	    TP_PROTO(int plane, struct drm_gem_object *obj),
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(plane, obj),
 
 	    TP_STRUCT__entry(
 		    __field(int, plane)
-		    __field(struct drm_gem_object *, obj)
+		    __field(struct drm_i915_gem_object *, obj)
 		    ),
 
 	    TP_fast_assign(
@@ -298,6 +280,29 @@
 	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
 );
 
+TRACE_EVENT(i915_reg_rw,
+           TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+
+           TP_ARGS(cmd, reg, val, len),
+
+           TP_STRUCT__entry(
+                   __field(int, cmd)
+                   __field(uint32_t, reg)
+                   __field(uint64_t, val)
+                   __field(int, len)
+                   ),
+
+           TP_fast_assign(
+                   __entry->cmd = cmd;
+                   __entry->reg = reg;
+                   __entry->val = (uint64_t)val;
+                   __entry->len = len;
+                   ),
+
+           TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
+                     __entry->cmd, __entry->reg, __entry->val, __entry->len)
+);
+
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b0b1200..0b44956 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -264,17 +264,12 @@
 		dev_priv->int_crt_support = general->int_crt_support;
 		dev_priv->lvds_use_ssc = general->enable_ssc;
 
-		if (dev_priv->lvds_use_ssc) {
-			if (IS_I85X(dev))
-				dev_priv->lvds_ssc_freq =
-					general->ssc_freq ? 66 : 48;
-			else if (IS_GEN5(dev) || IS_GEN6(dev))
-				dev_priv->lvds_ssc_freq =
-					general->ssc_freq ? 100 : 120;
-			else
-				dev_priv->lvds_ssc_freq =
-					general->ssc_freq ? 100 : 96;
-		}
+		if (IS_I85X(dev))
+			dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
+		else if (IS_GEN5(dev) || IS_GEN6(dev))
+			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
+		else
+			dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8df5743..17035b8 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -30,6 +30,7 @@
 #include "drm.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_edid.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
@@ -287,8 +288,9 @@
 	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
 }
 
-static bool intel_crt_detect_ddc(struct intel_crt *crt)
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
+	struct intel_crt *crt = intel_attached_crt(connector);
 	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
 
 	/* CRT should always be at 0, but check anyway */
@@ -301,8 +303,26 @@
 	}
 
 	if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
-		DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-		return true;
+		struct edid *edid;
+		bool is_digital = false;
+
+		edid = drm_get_edid(connector,
+			&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+		/*
+		 * This may be a DVI-I connector with a shared DDC
+		 * link between analog and digital outputs, so we
+		 * have to check the EDID input spec of the attached device.
+		 */
+		if (edid != NULL) {
+			is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+			connector->display_info.raw_edid = NULL;
+			kfree(edid);
+		}
+
+		if (!is_digital) {
+			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+			return true;
+		}
 	}
 
 	return false;
@@ -458,7 +478,7 @@
 		}
 	}
 
-	if (intel_crt_detect_ddc(crt))
+	if (intel_crt_detect_ddc(connector))
 		return connector_status_connected;
 
 	if (!force)
@@ -472,7 +492,7 @@
 		crtc = intel_get_load_detect_pipe(&crt->base, connector,
 						  NULL, &dpms_mode);
 		if (crtc) {
-			if (intel_crt_detect_ddc(crt))
+			if (intel_crt_detect_ddc(connector))
 				status = connector_status_connected;
 			else
 				status = intel_crt_load_detect(crtc, crt);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fca5232..98967f3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@
         .find_pll = intel_find_pll_ironlake_dp,
 };
 
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+						int refclk)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const intel_limit_t *limit;
-	int refclk = 120;
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
-			refclk = 100;
-
 		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
 		    LVDS_CLKB_POWER_UP) {
 			/* LVDS dual channel */
-			if (refclk == 100)
+			if (refclk == 100000)
 				limit = &intel_limits_ironlake_dual_lvds_100m;
 			else
 				limit = &intel_limits_ironlake_dual_lvds;
 		} else {
-			if (refclk == 100)
+			if (refclk == 100000)
 				limit = &intel_limits_ironlake_single_lvds_100m;
 			else
 				limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@
 	return limit;
 }
 
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
 {
 	struct drm_device *dev = crtc->dev;
 	const intel_limit_t *limit;
 
 	if (HAS_PCH_SPLIT(dev))
-		limit = intel_ironlake_limit(crtc);
+		limit = intel_ironlake_limit(crtc, refclk);
 	else if (IS_G4X(dev)) {
 		limit = intel_g4x_limit(crtc);
 	} else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@
  * the given connectors.
  */
 
-static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+static bool intel_PLL_is_valid(struct drm_device *dev,
+			       const intel_limit_t *limit,
+			       const intel_clock_t *clock)
 {
-	const intel_limit_t *limit = intel_limit (crtc);
-	struct drm_device *dev = crtc->dev;
-
 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 		INTELPllInvalid ("p1 out of range\n");
 	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
@@ -849,8 +845,8 @@
 					int this_err;
 
 					intel_clock(dev, refclk, &clock);
-
-					if (!intel_PLL_is_valid(crtc, &clock))
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
 						continue;
 
 					this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@
 					int this_err;
 
 					intel_clock(dev, refclk, &clock);
-					if (!intel_PLL_is_valid(crtc, &clock))
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
 						continue;
-					this_err = abs(clock.dot - target) ;
+
+					this_err = abs(clock.dot - target);
 					if (this_err < err_most) {
 						*best_clock = clock;
 						err_most = this_err;
@@ -1066,13 +1064,13 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane, i;
 	u32 fbc_ctl, fbc_ctl2;
 
 	if (fb->pitch == dev_priv->cfb_pitch &&
-	    obj_priv->fence_reg == dev_priv->cfb_fence &&
+	    obj->fence_reg == dev_priv->cfb_fence &&
 	    intel_crtc->plane == dev_priv->cfb_plane &&
 	    I915_READ(FBC_CONTROL) & FBC_CTL_EN)
 		return;
@@ -1086,7 +1084,7 @@
 
 	/* FBC_CTL wants 64B units */
 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-	dev_priv->cfb_fence = obj_priv->fence_reg;
+	dev_priv->cfb_fence = obj->fence_reg;
 	dev_priv->cfb_plane = intel_crtc->plane;
 	plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
@@ -1096,7 +1094,7 @@
 
 	/* Set it up... */
 	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
+	if (obj->tiling_mode != I915_TILING_NONE)
 		fbc_ctl2 |= FBC_CTL_CPU_FENCE;
 	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
 	I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
+	if (obj->tiling_mode != I915_TILING_NONE)
 		fbc_ctl |= dev_priv->cfb_fence;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
@@ -1150,7 +1148,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
 	unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@
 	dpfc_ctl = I915_READ(DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-		    dev_priv->cfb_fence == obj_priv->fence_reg &&
+		    dev_priv->cfb_fence == obj->fence_reg &&
 		    dev_priv->cfb_plane == intel_crtc->plane &&
 		    dev_priv->cfb_y == crtc->y)
 			return;
@@ -1170,12 +1168,12 @@
 	}
 
 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-	dev_priv->cfb_fence = obj_priv->fence_reg;
+	dev_priv->cfb_fence = obj->fence_reg;
 	dev_priv->cfb_plane = intel_crtc->plane;
 	dev_priv->cfb_y = crtc->y;
 
 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
-	if (obj_priv->tiling_mode != I915_TILING_NONE) {
+	if (obj->tiling_mode != I915_TILING_NONE) {
 		dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
 		I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 	} else {
@@ -1221,7 +1219,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
 	unsigned long stall_watermark = 200;
@@ -1230,9 +1228,9 @@
 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-		    dev_priv->cfb_fence == obj_priv->fence_reg &&
+		    dev_priv->cfb_fence == obj->fence_reg &&
 		    dev_priv->cfb_plane == intel_crtc->plane &&
-		    dev_priv->cfb_offset == obj_priv->gtt_offset &&
+		    dev_priv->cfb_offset == obj->gtt_offset &&
 		    dev_priv->cfb_y == crtc->y)
 			return;
 
@@ -1242,14 +1240,14 @@
 	}
 
 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-	dev_priv->cfb_fence = obj_priv->fence_reg;
+	dev_priv->cfb_fence = obj->fence_reg;
 	dev_priv->cfb_plane = intel_crtc->plane;
-	dev_priv->cfb_offset = obj_priv->gtt_offset;
+	dev_priv->cfb_offset = obj->gtt_offset;
 	dev_priv->cfb_y = crtc->y;
 
 	dpfc_ctl &= DPFC_RESERVED;
 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
-	if (obj_priv->tiling_mode != I915_TILING_NONE) {
+	if (obj->tiling_mode != I915_TILING_NONE) {
 		dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
 		I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 	} else {
@@ -1260,10 +1258,16 @@
 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-	I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
+	if (IS_GEN6(dev)) {
+		I915_WRITE(SNB_DPFC_CTL_SA,
+			   SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+	}
+
 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
 }
 
@@ -1345,7 +1349,7 @@
 	struct intel_crtc *intel_crtc;
 	struct drm_framebuffer *fb;
 	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 
 	DRM_DEBUG_KMS("\n");
 
@@ -1384,9 +1388,9 @@
 	intel_crtc = to_intel_crtc(crtc);
 	fb = crtc->fb;
 	intel_fb = to_intel_framebuffer(fb);
-	obj_priv = to_intel_bo(intel_fb->obj);
+	obj = intel_fb->obj;
 
-	if (intel_fb->obj->size > dev_priv->cfb_size) {
+	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
 		DRM_DEBUG_KMS("framebuffer too large, disabling "
 			      "compression\n");
 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1414,7 @@
 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
 		goto out_disable;
 	}
-	if (obj_priv->tiling_mode != I915_TILING_X) {
+	if (obj->tiling_mode != I915_TILING_X) {
 		DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
 		dev_priv->no_fbc_reason = FBC_NOT_TILED;
 		goto out_disable;
@@ -1433,14 +1437,13 @@
 
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
-			   struct drm_gem_object *obj,
-			   bool pipelined)
+			   struct drm_i915_gem_object *obj,
+			   struct intel_ring_buffer *pipelined)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	u32 alignment;
 	int ret;
 
-	switch (obj_priv->tiling_mode) {
+	switch (obj->tiling_mode) {
 	case I915_TILING_NONE:
 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
 			alignment = 128 * 1024;
@@ -1461,7 +1464,7 @@
 		BUG();
 	}
 
-	ret = i915_gem_object_pin(obj, alignment);
+	ret = i915_gem_object_pin(obj, alignment, true);
 	if (ret)
 		return ret;
 
@@ -1474,9 +1477,8 @@
 	 * framebuffer compression.  For simplicity, we always install
 	 * a fence as the cost is not that onerous.
 	 */
-	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-	    obj_priv->tiling_mode != I915_TILING_NONE) {
-		ret = i915_gem_object_get_fence_reg(obj, false);
+	if (obj->tiling_mode != I915_TILING_NONE) {
+		ret = i915_gem_object_get_fence(obj, pipelined, false);
 		if (ret)
 			goto err_unpin;
 	}
@@ -1497,8 +1499,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int plane = intel_crtc->plane;
 	unsigned long Start, Offset;
 	u32 dspcntr;
@@ -1515,7 +1516,6 @@
 
 	intel_fb = to_intel_framebuffer(fb);
 	obj = intel_fb->obj;
-	obj_priv = to_intel_bo(obj);
 
 	reg = DSPCNTR(plane);
 	dspcntr = I915_READ(reg);
@@ -1540,7 +1540,7 @@
 		return -EINVAL;
 	}
 	if (INTEL_INFO(dev)->gen >= 4) {
-		if (obj_priv->tiling_mode != I915_TILING_NONE)
+		if (obj->tiling_mode != I915_TILING_NONE)
 			dspcntr |= DISPPLANE_TILED;
 		else
 			dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1552,7 @@
 
 	I915_WRITE(reg, dspcntr);
 
-	Start = obj_priv->gtt_offset;
+	Start = obj->gtt_offset;
 	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
 
 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1598,7 @@
 	mutex_lock(&dev->struct_mutex);
 	ret = intel_pin_and_fence_fb_obj(dev,
 					 to_intel_framebuffer(crtc->fb)->obj,
-					 false);
+					 NULL);
 	if (ret != 0) {
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
@@ -1606,18 +1606,17 @@
 
 	if (old_fb) {
 		struct drm_i915_private *dev_priv = dev->dev_private;
-		struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
 		wait_event(dev_priv->pending_flip_queue,
-			   atomic_read(&obj_priv->pending_flip) == 0);
+			   atomic_read(&obj->pending_flip) == 0);
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
 		 * current scanout is retired before unpinning the old
 		 * framebuffer.
 		 */
-		ret = i915_gem_object_flush_gpu(obj_priv, false);
+		ret = i915_gem_object_flush_gpu(obj, false);
 		if (ret) {
 			i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
 			mutex_unlock(&dev->struct_mutex);
@@ -1633,8 +1632,10 @@
 		return ret;
 	}
 
-	if (old_fb)
+	if (old_fb) {
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
 		i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+	}
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1996,31 +1997,31 @@
 static void intel_clear_scanline_wait(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	u32 tmp;
 
 	if (IS_GEN2(dev))
 		/* Can't break the hang on i8xx */
 		return;
 
-	tmp = I915_READ(PRB0_CTL);
-	if (tmp & RING_WAIT) {
-		I915_WRITE(PRB0_CTL, tmp);
-		POSTING_READ(PRB0_CTL);
-	}
+	ring = LP_RING(dev_priv);
+	tmp = I915_READ_CTL(ring);
+	if (tmp & RING_WAIT)
+		I915_WRITE_CTL(ring, tmp);
 }
 
 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct drm_i915_private *dev_priv;
 
 	if (crtc->fb == NULL)
 		return;
 
-	obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+	obj = to_intel_framebuffer(crtc->fb)->obj;
 	dev_priv = crtc->dev->dev_private;
 	wait_event(dev_priv->pending_flip_queue,
-		   atomic_read(&obj_priv->pending_flip) == 0);
+		   atomic_read(&obj->pending_flip) == 0);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2850,6 +2851,39 @@
 	ILK_FIFO_LINE_SIZE
 };
 
+static struct intel_watermark_params sandybridge_display_wm_info = {
+	SNB_DISPLAY_FIFO,
+	SNB_DISPLAY_MAXWM,
+	SNB_DISPLAY_DFTWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_wm_info = {
+	SNB_CURSOR_FIFO,
+	SNB_CURSOR_MAXWM,
+	SNB_CURSOR_DFTWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_display_srwm_info = {
+	SNB_DISPLAY_SR_FIFO,
+	SNB_DISPLAY_MAX_SRWM,
+	SNB_DISPLAY_DFT_SRWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+	SNB_CURSOR_SR_FIFO,
+	SNB_CURSOR_MAX_SRWM,
+	SNB_CURSOR_DFT_SRWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+
 /**
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
@@ -3383,12 +3417,17 @@
 
 static bool ironlake_compute_wm0(struct drm_device *dev,
 				 int pipe,
+				 const struct intel_watermark_params *display,
+				 int display_latency_ns,
+				 const struct intel_watermark_params *cursor,
+				 int cursor_latency_ns,
 				 int *plane_wm,
 				 int *cursor_wm)
 {
 	struct drm_crtc *crtc;
-	int htotal, hdisplay, clock, pixel_size = 0;
-	int line_time_us, line_count, entries;
+	int htotal, hdisplay, clock, pixel_size;
+	int line_time_us, line_count;
+	int entries, tlb_miss;
 
 	crtc = intel_get_crtc_for_pipe(dev, pipe);
 	if (crtc->fb == NULL || !crtc->enabled)
@@ -3400,37 +3439,141 @@
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	/* Use the small buffer method to calculate plane watermark */
-	entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
-	entries = DIV_ROUND_UP(entries,
-			       ironlake_display_wm_info.cacheline_size);
-	*plane_wm = entries + ironlake_display_wm_info.guard_size;
-	if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
-		*plane_wm = ironlake_display_wm_info.max_wm;
+	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, display->cacheline_size);
+	*plane_wm = entries + display->guard_size;
+	if (*plane_wm > (int)display->max_wm)
+		*plane_wm = display->max_wm;
 
 	/* Use the large buffer method to calculate cursor watermark */
 	line_time_us = ((htotal * 1000) / clock);
-	line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
 	entries = line_count * 64 * pixel_size;
-	entries = DIV_ROUND_UP(entries,
-			       ironlake_cursor_wm_info.cacheline_size);
-	*cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
-	if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
-		*cursor_wm = ironlake_cursor_wm_info.max_wm;
+	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+	if (*cursor_wm > (int)cursor->max_wm)
+		*cursor_wm = (int)cursor->max_wm;
 
 	return true;
 }
 
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+				int fbc_wm, int display_wm, int cursor_wm,
+				const struct intel_watermark_params *display,
+				const struct intel_watermark_params *cursor)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+	if (fbc_wm > SNB_FBC_MAX_SRWM) {
+		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+			      fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+		/* fbc has it's own way to disable FBC WM */
+		I915_WRITE(DISP_ARB_CTL,
+			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+		return false;
+	}
+
+	if (display_wm > display->max_wm) {
+		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
+		return false;
+	}
+
+	if (cursor_wm > cursor->max_wm) {
+		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+		return false;
+	}
+
+	if (!(fbc_wm || display_wm || cursor_wm)) {
+		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level,
+				  int hdisplay, int htotal,
+				  int pixel_size, int clock, int latency_ns,
+				  const struct intel_watermark_params *display,
+				  const struct intel_watermark_params *cursor,
+				  int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+
+	unsigned long line_time_us;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*fbc_wm = *display_wm = *cursor_wm = 0;
+		return false;
+	}
+
+	line_time_us = (htotal * 1000) / clock;
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = hdisplay * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*display_wm = entries + display->guard_size;
+
+	/*
+	 * Spec says:
+	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+	 */
+	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+	/* calculate the self-refresh watermark for display cursor */
+	entries = line_count * pixel_size * 64;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+
+	return ironlake_check_srwm(dev, level,
+				   *fbc_wm, *display_wm, *cursor_wm,
+				   display, cursor);
+}
+
 static void ironlake_update_wm(struct drm_device *dev,
 			       int planea_clock, int planeb_clock,
-			       int sr_hdisplay, int sr_htotal,
+			       int hdisplay, int htotal,
 			       int pixel_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int plane_wm, cursor_wm, enabled;
-	int tmp;
+	int fbc_wm, plane_wm, cursor_wm, enabled;
+	int clock;
 
 	enabled = 0;
-	if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+	if (ironlake_compute_wm0(dev, 0,
+				 &ironlake_display_wm_info,
+				 ILK_LP0_PLANE_LATENCY,
+				 &ironlake_cursor_wm_info,
+				 ILK_LP0_CURSOR_LATENCY,
+				 &plane_wm, &cursor_wm)) {
 		I915_WRITE(WM0_PIPEA_ILK,
 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3439,7 +3582,12 @@
 		enabled++;
 	}
 
-	if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+	if (ironlake_compute_wm0(dev, 1,
+				 &ironlake_display_wm_info,
+				 ILK_LP0_PLANE_LATENCY,
+				 &ironlake_cursor_wm_info,
+				 ILK_LP0_CURSOR_LATENCY,
+				 &plane_wm, &cursor_wm)) {
 		I915_WRITE(WM0_PIPEB_ILK,
 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3452,57 +3600,151 @@
 	 * Calculate and update the self-refresh watermark only when one
 	 * display plane is used.
 	 */
-	tmp = 0;
-	if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
-		unsigned long line_time_us;
-		int small, large, plane_fbc;
-		int sr_clock, entries;
-		int line_count, line_size;
-		/* Read the self-refresh latency. The unit is 0.5us */
-		int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
 
-		sr_clock = planea_clock ? planea_clock : planeb_clock;
-		line_time_us = (sr_htotal * 1000) / sr_clock;
+	if (enabled != 1)
+		return;
 
-		/* Use ns/us then divide to preserve precision */
-		line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
-			/ 1000;
-		line_size = sr_hdisplay * pixel_size;
+	clock = planea_clock ? planea_clock : planeb_clock;
 
-		/* Use the minimum of the small and large buffer method for primary */
-		small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
-		large = line_count * line_size;
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+				   clock, ILK_READ_WM1_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
 
-		entries = DIV_ROUND_UP(min(small, large),
-				       ironlake_display_srwm_info.cacheline_size);
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 
-		plane_fbc = entries * 64;
-		plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
+				   clock, ILK_READ_WM2_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
 
-		plane_wm = entries + ironlake_display_srwm_info.guard_size;
-		if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
-			plane_wm = ironlake_display_srwm_info.max_wm;
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 
-		/* calculate the self-refresh watermark for display cursor */
-		entries = line_count * pixel_size * 64;
-		entries = DIV_ROUND_UP(entries,
-				       ironlake_cursor_srwm_info.cacheline_size);
+	/*
+	 * WM3 is unsupported on ILK, probably because we don't have latency
+	 * data for that power state
+	 */
+}
 
-		cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
-		if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
-			cursor_wm = ironlake_cursor_srwm_info.max_wm;
+static void sandybridge_update_wm(struct drm_device *dev,
+			       int planea_clock, int planeb_clock,
+			       int hdisplay, int htotal,
+			       int pixel_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
+	int fbc_wm, plane_wm, cursor_wm, enabled;
+	int clock;
 
-		/* configure watermark and enable self-refresh */
-		tmp = (WM1_LP_SR_EN |
-		       (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
-		       (plane_fbc << WM1_LP_FBC_SHIFT) |
-		       (plane_wm << WM1_LP_SR_SHIFT) |
-		       cursor_wm);
-		DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
-			      " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
+	enabled = 0;
+	if (ironlake_compute_wm0(dev, 0,
+				 &sandybridge_display_wm_info, latency,
+				 &sandybridge_cursor_wm_info, latency,
+				 &plane_wm, &cursor_wm)) {
+		I915_WRITE(WM0_PIPEA_ILK,
+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+			      " plane %d, " "cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled++;
 	}
-	I915_WRITE(WM1_LP_ILK, tmp);
-	/* XXX setup WM2 and WM3 */
+
+	if (ironlake_compute_wm0(dev, 1,
+				 &sandybridge_display_wm_info, latency,
+				 &sandybridge_cursor_wm_info, latency,
+				 &plane_wm, &cursor_wm)) {
+		I915_WRITE(WM0_PIPEB_ILK,
+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+			      " plane %d, cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled++;
+	}
+
+	/*
+	 * Calculate and update the self-refresh watermark only when one
+	 * display plane is used.
+	 *
+	 * SNB support 3 levels of watermark.
+	 *
+	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+	 * and disabled in the descending order
+	 *
+	 */
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	if (enabled != 1)
+		return;
+
+	clock = planea_clock ? planea_clock : planeb_clock;
+
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM1_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2,
+				   hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM2_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM3 */
+	if (!ironlake_compute_srwm(dev, 3,
+				   hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM3_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM3_LP_ILK,
+		   WM3_LP_EN |
+		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 }
 
 /**
@@ -3580,6 +3822,11 @@
 				    sr_hdisplay, sr_htotal, pixel_size);
 }
 
+static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
+}
+
 static int intel_crtc_mode_set(struct drm_crtc *crtc,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode,
@@ -3642,7 +3889,7 @@
 		num_connectors++;
 	}
 
-	if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
+	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
 		refclk = dev_priv->lvds_ssc_freq * 1000;
 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
 			      refclk / 1000);
@@ -3660,7 +3907,7 @@
 	 * refclk, or FALSE.  The returned values represent the clock equation:
 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
 	 */
-	limit = intel_limit(crtc);
+	limit = intel_limit(crtc, refclk);
 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
 	if (!ok) {
 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3714,7 +3961,7 @@
 		int lane = 0, link_bw, bpp;
 		/* CPU eDP doesn't require FDI link, so just set DP M/N
 		   according to current link config */
-		if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
+		if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
 			target_clock = mode->clock;
 			intel_edp_link_config(has_edp_encoder,
 					      &lane, &link_bw);
@@ -3817,7 +4064,7 @@
 		udelay(200);
 
 		if (has_edp_encoder) {
-			if (dev_priv->lvds_use_ssc) {
+			if (intel_panel_use_ssc(dev_priv)) {
 				temp |= DREF_SSC1_ENABLE;
 				I915_WRITE(PCH_DREF_CONTROL, temp);
 
@@ -3828,13 +4075,13 @@
 
 			/* Enable CPU source on CPU attached eDP */
 			if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
-				if (dev_priv->lvds_use_ssc)
+				if (intel_panel_use_ssc(dev_priv))
 					temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
 				else
 					temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
 			} else {
 				/* Enable SSC on PCH eDP if needed */
-				if (dev_priv->lvds_use_ssc) {
+				if (intel_panel_use_ssc(dev_priv)) {
 					DRM_ERROR("enabling SSC on PCH\n");
 					temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
 				}
@@ -3857,6 +4104,22 @@
 				reduced_clock.m2;
 	}
 
+	/* Enable autotuning of the PLL clock (if permissible) */
+	if (HAS_PCH_SPLIT(dev)) {
+		int factor = 21;
+
+		if (is_lvds) {
+			if ((intel_panel_use_ssc(dev_priv) &&
+			     dev_priv->lvds_ssc_freq == 100) ||
+			    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+				factor = 25;
+		} else if (is_sdvo && is_tv)
+			factor = 20;
+
+		if (clock.m1 < factor * clock.n)
+			fp |= FP_CB_TUNE;
+	}
+
 	dpll = 0;
 	if (!HAS_PCH_SPLIT(dev))
 		dpll = DPLL_VGA_MODE_DIS;
@@ -3925,7 +4188,7 @@
 		/* XXX: just matching BIOS for now */
 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
 		dpll |= 3;
-	else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
+	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
 	else
 		dpll |= PLL_REF_INPUT_DREFCLK;
@@ -4071,7 +4334,6 @@
 	}
 
 	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
-		I915_WRITE(fp_reg, fp);
 		I915_WRITE(dpll_reg, dpll);
 
 		/* Wait for the clocks to stabilize. */
@@ -4089,13 +4351,13 @@
 			}
 			I915_WRITE(DPLL_MD(pipe), temp);
 		} else {
-			/* write it again -- the BIOS does, after all */
+			/* The pixel multiplier can only be updated once the
+			 * DPLL is enabled and the clocks are stable.
+			 *
+			 * So write it again.
+			 */
 			I915_WRITE(dpll_reg, dpll);
 		}
-
-		/* Wait for the clocks to stabilize. */
-		POSTING_READ(dpll_reg);
-		udelay(150);
 	}
 
 	intel_crtc->lowfreq_avail = false;
@@ -4331,15 +4593,14 @@
 }
 
 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
-				 struct drm_file *file_priv,
+				 struct drm_file *file,
 				 uint32_t handle,
 				 uint32_t width, uint32_t height)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_gem_object *bo;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	uint32_t addr;
 	int ret;
 
@@ -4349,7 +4610,7 @@
 	if (!handle) {
 		DRM_DEBUG_KMS("cursor off\n");
 		addr = 0;
-		bo = NULL;
+		obj = NULL;
 		mutex_lock(&dev->struct_mutex);
 		goto finish;
 	}
@@ -4360,13 +4621,11 @@
 		return -EINVAL;
 	}
 
-	bo = drm_gem_object_lookup(dev, file_priv, handle);
-	if (!bo)
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+	if (!obj)
 		return -ENOENT;
 
-	obj_priv = to_intel_bo(bo);
-
-	if (bo->size < width * height * 4) {
+	if (obj->base.size < width * height * 4) {
 		DRM_ERROR("buffer is to small\n");
 		ret = -ENOMEM;
 		goto fail;
@@ -4375,29 +4634,41 @@
 	/* we only need to pin inside GTT if cursor is non-phy */
 	mutex_lock(&dev->struct_mutex);
 	if (!dev_priv->info->cursor_needs_physical) {
-		ret = i915_gem_object_pin(bo, PAGE_SIZE);
+		if (obj->tiling_mode) {
+			DRM_ERROR("cursor cannot be tiled\n");
+			ret = -EINVAL;
+			goto fail_locked;
+		}
+
+		ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
 		if (ret) {
 			DRM_ERROR("failed to pin cursor bo\n");
 			goto fail_locked;
 		}
 
-		ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+		ret = i915_gem_object_set_to_gtt_domain(obj, 0);
 		if (ret) {
 			DRM_ERROR("failed to move cursor bo into the GTT\n");
 			goto fail_unpin;
 		}
 
-		addr = obj_priv->gtt_offset;
+		ret = i915_gem_object_put_fence(obj);
+		if (ret) {
+			DRM_ERROR("failed to move cursor bo into the GTT\n");
+			goto fail_unpin;
+		}
+
+		addr = obj->gtt_offset;
 	} else {
 		int align = IS_I830(dev) ? 16 * 1024 : 256;
-		ret = i915_gem_attach_phys_object(dev, bo,
+		ret = i915_gem_attach_phys_object(dev, obj,
 						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
 						  align);
 		if (ret) {
 			DRM_ERROR("failed to attach phys object\n");
 			goto fail_locked;
 		}
-		addr = obj_priv->phys_obj->handle->busaddr;
+		addr = obj->phys_obj->handle->busaddr;
 	}
 
 	if (IS_GEN2(dev))
@@ -4406,17 +4677,17 @@
  finish:
 	if (intel_crtc->cursor_bo) {
 		if (dev_priv->info->cursor_needs_physical) {
-			if (intel_crtc->cursor_bo != bo)
+			if (intel_crtc->cursor_bo != obj)
 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
 		} else
 			i915_gem_object_unpin(intel_crtc->cursor_bo);
-		drm_gem_object_unreference(intel_crtc->cursor_bo);
+		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
 
 	intel_crtc->cursor_addr = addr;
-	intel_crtc->cursor_bo = bo;
+	intel_crtc->cursor_bo = obj;
 	intel_crtc->cursor_width = width;
 	intel_crtc->cursor_height = height;
 
@@ -4424,11 +4695,11 @@
 
 	return 0;
 fail_unpin:
-	i915_gem_object_unpin(bo);
+	i915_gem_object_unpin(obj);
 fail_locked:
 	mutex_unlock(&dev->struct_mutex);
 fail:
-	drm_gem_object_unreference_unlocked(bo);
+	drm_gem_object_unreference_unlocked(&obj->base);
 	return ret;
 }
 
@@ -4739,8 +5010,14 @@
 	struct drm_device *dev = (struct drm_device *)arg;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	dev_priv->busy = false;
+	if (!list_empty(&dev_priv->mm.active_list)) {
+		/* Still processing requests, so just re-arm the timer. */
+		mod_timer(&dev_priv->idle_timer, jiffies +
+			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+		return;
+	}
 
+	dev_priv->busy = false;
 	queue_work(dev_priv->wq, &dev_priv->idle_work);
 }
 
@@ -4751,9 +5028,17 @@
 	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
 	struct drm_crtc *crtc = &intel_crtc->base;
 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+	struct intel_framebuffer *intel_fb;
+
+	intel_fb = to_intel_framebuffer(crtc->fb);
+	if (intel_fb && intel_fb->obj->active) {
+		/* The framebuffer is still being accessed by the GPU. */
+		mod_timer(&intel_crtc->idle_timer, jiffies +
+			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+		return;
+	}
 
 	intel_crtc->busy = false;
-
 	queue_work(dev_priv->wq, &dev_priv->idle_work);
 }
 
@@ -4763,8 +5048,8 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-	int dpll = I915_READ(dpll_reg);
+	int dpll_reg = DPLL(pipe);
+	int dpll;
 
 	if (HAS_PCH_SPLIT(dev))
 		return;
@@ -4772,17 +5057,19 @@
 	if (!dev_priv->lvds_downclock_avail)
 		return;
 
+	dpll = I915_READ(dpll_reg);
 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
 		/* Unlock panel regs */
-		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
-			   PANEL_UNLOCK_REGS);
+		I915_WRITE(PP_CONTROL,
+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
 
 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
 		I915_WRITE(dpll_reg, dpll);
-		dpll = I915_READ(dpll_reg);
+		POSTING_READ(dpll_reg);
 		intel_wait_for_vblank(dev, pipe);
+
 		dpll = I915_READ(dpll_reg);
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4888,7 +5175,7 @@
  * buffer), we'll also mark the display as busy, so we know to increase its
  * clock frequency.
  */
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = NULL;
@@ -4969,8 +5256,9 @@
 
 	mutex_lock(&work->dev->struct_mutex);
 	i915_gem_object_unpin(work->old_fb_obj);
-	drm_gem_object_unreference(work->pending_flip_obj);
-	drm_gem_object_unreference(work->old_fb_obj);
+	drm_gem_object_unreference(&work->pending_flip_obj->base);
+	drm_gem_object_unreference(&work->old_fb_obj->base);
+
 	mutex_unlock(&work->dev->struct_mutex);
 	kfree(work);
 }
@@ -4981,15 +5269,17 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_unpin_work *work;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct drm_pending_vblank_event *e;
-	struct timeval now;
+	struct timeval tnow, tvbl;
 	unsigned long flags;
 
 	/* Ignore early vblank irqs */
 	if (intel_crtc == NULL)
 		return;
 
+	do_gettimeofday(&tnow);
+
 	spin_lock_irqsave(&dev->event_lock, flags);
 	work = intel_crtc->unpin_work;
 	if (work == NULL || !work->pending) {
@@ -4998,26 +5288,49 @@
 	}
 
 	intel_crtc->unpin_work = NULL;
-	drm_vblank_put(dev, intel_crtc->pipe);
 
 	if (work->event) {
 		e = work->event;
-		do_gettimeofday(&now);
-		e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
+		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+		/* Called before vblank count and timestamps have
+		 * been updated for the vblank interval of flip
+		 * completion? Need to increment vblank count and
+		 * add one videorefresh duration to returned timestamp
+		 * to account for this. We assume this happened if we
+		 * get called over 0.9 frame durations after the last
+		 * timestamped vblank.
+		 *
+		 * This calculation can not be used with vrefresh rates
+		 * below 5Hz (10Hz to be on the safe side) without
+		 * promoting to 64 integers.
+		 */
+		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
+		    9 * crtc->framedur_ns) {
+			e->event.sequence++;
+			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
+					     crtc->framedur_ns);
+		}
+
+		e->event.tv_sec = tvbl.tv_sec;
+		e->event.tv_usec = tvbl.tv_usec;
+
 		list_add_tail(&e->base.link,
 			      &e->base.file_priv->event_list);
 		wake_up_interruptible(&e->base.file_priv->event_wait);
 	}
 
+	drm_vblank_put(dev, intel_crtc->pipe);
+
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
-	obj_priv = to_intel_bo(work->old_fb_obj);
+	obj = work->old_fb_obj;
+
 	atomic_clear_mask(1 << intel_crtc->plane,
-			  &obj_priv->pending_flip.counter);
-	if (atomic_read(&obj_priv->pending_flip) == 0)
+			  &obj->pending_flip.counter);
+	if (atomic_read(&obj->pending_flip) == 0)
 		wake_up(&dev_priv->pending_flip_queue);
+
 	schedule_work(&work->work);
 
 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5063,8 +5376,7 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_unpin_work *work;
 	unsigned long flags, offset;
@@ -5098,13 +5410,13 @@
 	obj = intel_fb->obj;
 
 	mutex_lock(&dev->struct_mutex);
-	ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 	if (ret)
 		goto cleanup_work;
 
 	/* Reference the objects for the scheduled work. */
-	drm_gem_object_reference(work->old_fb_obj);
-	drm_gem_object_reference(obj);
+	drm_gem_object_reference(&work->old_fb_obj->base);
+	drm_gem_object_reference(&obj->base);
 
 	crtc->fb = fb;
 
@@ -5112,22 +5424,16 @@
 	if (ret)
 		goto cleanup_objs;
 
-	/* Block clients from rendering to the new back buffer until
-	 * the flip occurs and the object is no longer visible.
-	 */
-	atomic_add(1 << intel_crtc->plane,
-		   &to_intel_bo(work->old_fb_obj)->pending_flip);
-
-	work->pending_flip_obj = obj;
-	obj_priv = to_intel_bo(obj);
-
 	if (IS_GEN3(dev) || IS_GEN2(dev)) {
 		u32 flip_mask;
 
 		/* Can't queue multiple flips, so wait for the previous
 		 * one to finish before executing the next.
 		 */
-		BEGIN_LP_RING(2);
+		ret = BEGIN_LP_RING(2);
+		if (ret)
+			goto cleanup_objs;
+
 		if (intel_crtc->plane)
 			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 		else
@@ -5137,18 +5443,28 @@
 		ADVANCE_LP_RING();
 	}
 
+	work->pending_flip_obj = obj;
+
 	work->enable_stall_check = true;
 
 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
 	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
 
-	BEGIN_LP_RING(4);
-	switch(INTEL_INFO(dev)->gen) {
+	ret = BEGIN_LP_RING(4);
+	if (ret)
+		goto cleanup_objs;
+
+	/* Block clients from rendering to the new back buffer until
+	 * the flip occurs and the object is no longer visible.
+	 */
+	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+	switch (INTEL_INFO(dev)->gen) {
 	case 2:
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj_priv->gtt_offset + offset);
+		OUT_RING(obj->gtt_offset + offset);
 		OUT_RING(MI_NOOP);
 		break;
 
@@ -5156,7 +5472,7 @@
 		OUT_RING(MI_DISPLAY_FLIP_I915 |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj_priv->gtt_offset + offset);
+		OUT_RING(obj->gtt_offset + offset);
 		OUT_RING(MI_NOOP);
 		break;
 
@@ -5169,7 +5485,7 @@
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+		OUT_RING(obj->gtt_offset | obj->tiling_mode);
 
 		/* XXX Enabling the panel-fitter across page-flip is so far
 		 * untested on non-native modes, so ignore it for now.
@@ -5183,8 +5499,8 @@
 	case 6:
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-		OUT_RING(fb->pitch | obj_priv->tiling_mode);
-		OUT_RING(obj_priv->gtt_offset);
+		OUT_RING(fb->pitch | obj->tiling_mode);
+		OUT_RING(obj->gtt_offset);
 
 		pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
 		pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5200,8 +5516,8 @@
 	return 0;
 
 cleanup_objs:
-	drm_gem_object_unreference(work->old_fb_obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&work->old_fb_obj->base);
+	drm_gem_object_unreference(&obj->base);
 cleanup_work:
 	mutex_unlock(&dev->struct_mutex);
 
@@ -5338,7 +5654,7 @@
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
-				struct drm_file *file_priv)
+				struct drm_file *file)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5498,6 +5814,8 @@
 		encoder->base.possible_clones =
 			intel_encoder_clones(dev, encoder->clone_mask);
 	}
+
+	intel_panel_setup_backlight(dev);
 }
 
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -5505,19 +5823,19 @@
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
 	drm_framebuffer_cleanup(fb);
-	drm_gem_object_unreference_unlocked(intel_fb->obj);
+	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
 
 	kfree(intel_fb);
 }
 
 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-						struct drm_file *file_priv,
+						struct drm_file *file,
 						unsigned int *handle)
 {
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_gem_object *object = intel_fb->obj;
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 
-	return drm_gem_handle_create(file_priv, object, handle);
+	return drm_gem_handle_create(file, &obj->base, handle);
 }
 
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5528,12 +5846,11 @@
 int intel_framebuffer_init(struct drm_device *dev,
 			   struct intel_framebuffer *intel_fb,
 			   struct drm_mode_fb_cmd *mode_cmd,
-			   struct drm_gem_object *obj)
+			   struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	if (obj->tiling_mode == I915_TILING_Y)
 		return -EINVAL;
 
 	if (mode_cmd->pitch & 63)
@@ -5565,11 +5882,11 @@
 			      struct drm_file *filp,
 			      struct drm_mode_fb_cmd *mode_cmd)
 {
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	struct intel_framebuffer *intel_fb;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
 	if (!obj)
 		return ERR_PTR(-ENOENT);
 
@@ -5577,10 +5894,9 @@
 	if (!intel_fb)
 		return ERR_PTR(-ENOMEM);
 
-	ret = intel_framebuffer_init(dev, intel_fb,
-				     mode_cmd, obj);
+	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
 	if (ret) {
-		drm_gem_object_unreference_unlocked(obj);
+		drm_gem_object_unreference_unlocked(&obj->base);
 		kfree(intel_fb);
 		return ERR_PTR(ret);
 	}
@@ -5593,10 +5909,10 @@
 	.output_poll_changed = intel_fb_output_poll_changed,
 };
 
-static struct drm_gem_object *
+static struct drm_i915_gem_object *
 intel_alloc_context_page(struct drm_device *dev)
 {
-	struct drm_gem_object *ctx;
+	struct drm_i915_gem_object *ctx;
 	int ret;
 
 	ctx = i915_gem_alloc_object(dev, 4096);
@@ -5606,7 +5922,7 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_object_pin(ctx, 4096);
+	ret = i915_gem_object_pin(ctx, 4096, true);
 	if (ret) {
 		DRM_ERROR("failed to pin power context: %d\n", ret);
 		goto err_unref;
@@ -5624,7 +5940,7 @@
 err_unpin:
 	i915_gem_object_unpin(ctx);
 err_unref:
-	drm_gem_object_unreference(ctx);
+	drm_gem_object_unreference(&ctx->base);
 	mutex_unlock(&dev->struct_mutex);
 	return NULL;
 }
@@ -5736,6 +6052,25 @@
 
 }
 
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 swreq;
+
+	swreq = (val & 0x3ff) << 25;
+	I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+	I915_WRITE(GEN6_PMIER, 0);
+	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
 static unsigned long intel_pxfreq(u32 vidfreq)
 {
 	unsigned long freq;
@@ -5822,7 +6157,123 @@
 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
-void intel_init_clock_gating(struct drm_device *dev)
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+	u32 pcu_mbox;
+	int cur_freq, min_freq, max_freq;
+	int i;
+
+	/* Here begins a magic sequence of register writes to enable
+	 * auto-downclocking.
+	 *
+	 * Perhaps there might be some value in exposing these to
+	 * userspace...
+	 */
+	I915_WRITE(GEN6_RC_STATE, 0);
+	__gen6_force_wake_get(dev_priv);
+
+	/* disable the counters and set deterministic thresholds */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+	I915_WRITE(GEN6_RC_CONTROL,
+		   GEN6_RC_CTL_RC6p_ENABLE |
+		   GEN6_RC_CTL_RC6_ENABLE |
+		   GEN6_RC_CTL_EI_MODE(1) |
+		   GEN6_RC_CTL_HW_ENABLE);
+
+	I915_WRITE(GEN6_RPNSWREQ,
+		   GEN6_FREQUENCY(10) |
+		   GEN6_OFFSET(0) |
+		   GEN6_AGGRESSIVE_TURBO);
+	I915_WRITE(GEN6_RC_VIDEO_FREQ,
+		   GEN6_FREQUENCY(12));
+
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+		   18 << 24 |
+		   6 << 16);
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+	I915_WRITE(GEN6_RP_UP_EI, 100000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_USE_NORMAL_FREQ |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_MAX |
+		   GEN6_RP_DOWN_BUSY_MIN);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+	I915_WRITE(GEN6_PCODE_MAILBOX,
+		   GEN6_PCODE_READY |
+		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+	min_freq = (rp_state_cap & 0xff0000) >> 16;
+	max_freq = rp_state_cap & 0xff;
+	cur_freq = (gt_perf_status & 0xff00) >> 8;
+
+	/* Check for overclock support */
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+	if (pcu_mbox & (1<<31)) { /* OC supported */
+		max_freq = pcu_mbox & 0xff;
+		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
+	}
+
+	/* In units of 100MHz */
+	dev_priv->max_delay = max_freq;
+	dev_priv->min_delay = min_freq;
+	dev_priv->cur_delay = cur_freq;
+
+	/* requires MSI enabled */
+	I915_WRITE(GEN6_PMIER,
+		   GEN6_PM_MBOX_EVENT |
+		   GEN6_PM_THERMAL_EVENT |
+		   GEN6_PM_RP_DOWN_TIMEOUT |
+		   GEN6_PM_RP_UP_THRESHOLD |
+		   GEN6_PM_RP_DOWN_THRESHOLD |
+		   GEN6_PM_RP_UP_EI_EXPIRED |
+		   GEN6_PM_RP_DOWN_EI_EXPIRED);
+	I915_WRITE(GEN6_PMIMR, 0);
+	/* enable all PM interrupts */
+	I915_WRITE(GEN6_PMINTRMSK, 0);
+
+	__gen6_force_wake_put(dev_priv);
+}
+
+void intel_enable_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -5872,9 +6323,9 @@
 			I915_WRITE(DISP_ARB_CTL,
 					(I915_READ(DISP_ARB_CTL) |
 						DISP_FBC_WM_DIS));
-		I915_WRITE(WM3_LP_ILK, 0);
-		I915_WRITE(WM2_LP_ILK, 0);
-		I915_WRITE(WM1_LP_ILK, 0);
+			I915_WRITE(WM3_LP_ILK, 0);
+			I915_WRITE(WM2_LP_ILK, 0);
+			I915_WRITE(WM1_LP_ILK, 0);
 		}
 		/*
 		 * Based on the document from hardware guys the following bits
@@ -5896,7 +6347,49 @@
 				   ILK_DPFC_DIS2 |
 				   ILK_CLK_FBC);
 		}
-		return;
+
+		I915_WRITE(ILK_DISPLAY_CHICKEN2,
+			   I915_READ(ILK_DISPLAY_CHICKEN2) |
+			   ILK_ELPIN_409_SELECT);
+
+		if (IS_GEN5(dev)) {
+			I915_WRITE(_3D_CHICKEN2,
+				   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+				   _3D_CHICKEN2_WM_READ_PIPELINED);
+		}
+
+		if (IS_GEN6(dev)) {
+			I915_WRITE(WM3_LP_ILK, 0);
+			I915_WRITE(WM2_LP_ILK, 0);
+			I915_WRITE(WM1_LP_ILK, 0);
+
+			/*
+			 * According to the spec the following bits should be
+			 * set in order to enable memory self-refresh and fbc:
+			 * The bit21 and bit22 of 0x42000
+			 * The bit21 and bit22 of 0x42004
+			 * The bit5 and bit7 of 0x42020
+			 * The bit14 of 0x70180
+			 * The bit14 of 0x71180
+			 */
+			I915_WRITE(ILK_DISPLAY_CHICKEN1,
+				   I915_READ(ILK_DISPLAY_CHICKEN1) |
+				   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+			I915_WRITE(ILK_DISPLAY_CHICKEN2,
+				   I915_READ(ILK_DISPLAY_CHICKEN2) |
+				   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+			I915_WRITE(ILK_DSPCLK_GATE,
+				   I915_READ(ILK_DSPCLK_GATE) |
+				   ILK_DPARB_CLK_GATE  |
+				   ILK_DPFD_CLK_GATE);
+
+			I915_WRITE(DSPACNTR,
+				   I915_READ(DSPACNTR) |
+				   DISPPLANE_TRICKLE_FEED_DISABLE);
+			I915_WRITE(DSPBCNTR,
+				   I915_READ(DSPBCNTR) |
+				   DISPPLANE_TRICKLE_FEED_DISABLE);
+		}
 	} else if (IS_G4X(dev)) {
 		uint32_t dspclk_gate;
 		I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5934,55 +6427,84 @@
 	} else if (IS_I830(dev)) {
 		I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 	}
+}
+
+void intel_disable_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->renderctx) {
+		struct drm_i915_gem_object *obj = dev_priv->renderctx;
+
+		I915_WRITE(CCID, 0);
+		POSTING_READ(CCID);
+
+		i915_gem_object_unpin(obj);
+		drm_gem_object_unreference(&obj->base);
+		dev_priv->renderctx = NULL;
+	}
+
+	if (dev_priv->pwrctx) {
+		struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+
+		I915_WRITE(PWRCTXA, 0);
+		POSTING_READ(PWRCTXA);
+
+		i915_gem_object_unpin(obj);
+		drm_gem_object_unreference(&obj->base);
+		dev_priv->pwrctx = NULL;
+	}
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+	wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+		 10);
+	POSTING_READ(CCID);
+	I915_WRITE(PWRCTXA, 0);
+	POSTING_READ(PWRCTXA);
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+	POSTING_READ(RSTDBYCTL);
+	i915_gem_object_unpin(dev_priv->renderctx);
+	drm_gem_object_unreference(&dev_priv->renderctx->base);
+	dev_priv->renderctx = NULL;
+	i915_gem_object_unpin(dev_priv->pwrctx);
+	drm_gem_object_unreference(&dev_priv->pwrctx->base);
+	dev_priv->pwrctx = NULL;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
 	/*
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
-	if (IS_IRONLAKE_M(dev)) {
-		if (dev_priv->renderctx == NULL)
-			dev_priv->renderctx = intel_alloc_context_page(dev);
-		if (dev_priv->renderctx) {
-			struct drm_i915_gem_object *obj_priv;
-			obj_priv = to_intel_bo(dev_priv->renderctx);
-			if (obj_priv) {
-				BEGIN_LP_RING(4);
-				OUT_RING(MI_SET_CONTEXT);
-				OUT_RING(obj_priv->gtt_offset |
-						MI_MM_SPACE_GTT |
-						MI_SAVE_EXT_STATE_EN |
-						MI_RESTORE_EXT_STATE_EN |
-						MI_RESTORE_INHIBIT);
-				OUT_RING(MI_NOOP);
-				OUT_RING(MI_FLUSH);
-				ADVANCE_LP_RING();
-			}
-		} else
-			DRM_DEBUG_KMS("Failed to allocate render context."
-				       "Disable RC6\n");
+	ret = BEGIN_LP_RING(6);
+	if (ret) {
+		ironlake_disable_rc6(dev);
+		return;
 	}
+	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+	OUT_RING(MI_SET_CONTEXT);
+	OUT_RING(dev_priv->renderctx->gtt_offset |
+		 MI_MM_SPACE_GTT |
+		 MI_SAVE_EXT_STATE_EN |
+		 MI_RESTORE_EXT_STATE_EN |
+		 MI_RESTORE_INHIBIT);
+	OUT_RING(MI_SUSPEND_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_FLUSH);
+	ADVANCE_LP_RING();
 
-	if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
-		struct drm_i915_gem_object *obj_priv = NULL;
-
-		if (dev_priv->pwrctx) {
-			obj_priv = to_intel_bo(dev_priv->pwrctx);
-		} else {
-			struct drm_gem_object *pwrctx;
-
-			pwrctx = intel_alloc_context_page(dev);
-			if (pwrctx) {
-				dev_priv->pwrctx = pwrctx;
-				obj_priv = to_intel_bo(pwrctx);
-			}
-		}
-
-		if (obj_priv) {
-			I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
-			I915_WRITE(MCHBAR_RENDER_STANDBY,
-				   I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
-		}
-	}
+	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
 /* Set up chip specific display functions */
@@ -5997,7 +6519,7 @@
 		dev_priv->display.dpms = i9xx_crtc_dpms;
 
 	if (I915_HAS_FBC(dev)) {
-		if (IS_IRONLAKE_M(dev)) {
+		if (HAS_PCH_SPLIT(dev)) {
 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 			dev_priv->display.enable_fbc = ironlake_enable_fbc;
 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -6046,6 +6568,14 @@
 					      "Disable CxSR\n");
 				dev_priv->display.update_wm = NULL;
 			}
+		} else if (IS_GEN6(dev)) {
+			if (SNB_READ_WM0_LATENCY()) {
+				dev_priv->display.update_wm = sandybridge_update_wm;
+			} else {
+				DRM_DEBUG_KMS("Failed to read display plane latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_PINEVIEW(dev)) {
@@ -6191,12 +6721,7 @@
 		dev->mode_config.max_width = 8192;
 		dev->mode_config.max_height = 8192;
 	}
-
-	/* set memory base */
-	if (IS_GEN2(dev))
-		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
-	else
-		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+	dev->mode_config.fb_base = dev->agp->base;
 
 	if (IS_MOBILE(dev) || !IS_GEN2(dev))
 		dev_priv->num_pipe = 2;
@@ -6211,7 +6736,7 @@
 
 	intel_setup_outputs(dev);
 
-	intel_init_clock_gating(dev);
+	intel_enable_clock_gating(dev);
 
 	/* Just disable it once at startup */
 	i915_disable_vga(dev);
@@ -6221,6 +6746,24 @@
 		intel_init_emon(dev);
 	}
 
+	if (IS_GEN6(dev))
+		gen6_enable_rps(dev_priv);
+
+	if (IS_IRONLAKE_M(dev)) {
+		dev_priv->renderctx = intel_alloc_context_page(dev);
+		if (!dev_priv->renderctx)
+			goto skip_rc6;
+		dev_priv->pwrctx = intel_alloc_context_page(dev);
+		if (!dev_priv->pwrctx) {
+			i915_gem_object_unpin(dev_priv->renderctx);
+			drm_gem_object_unreference(&dev_priv->renderctx->base);
+			dev_priv->renderctx = NULL;
+			goto skip_rc6;
+		}
+		ironlake_enable_rc6(dev);
+	}
+
+skip_rc6:
 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
 		    (unsigned long)dev);
@@ -6252,28 +6795,13 @@
 	if (dev_priv->display.disable_fbc)
 		dev_priv->display.disable_fbc(dev);
 
-	if (dev_priv->renderctx) {
-		struct drm_i915_gem_object *obj_priv;
-
-		obj_priv = to_intel_bo(dev_priv->renderctx);
-		I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
-		I915_READ(CCID);
-		i915_gem_object_unpin(dev_priv->renderctx);
-		drm_gem_object_unreference(dev_priv->renderctx);
-	}
-
-	if (dev_priv->pwrctx) {
-		struct drm_i915_gem_object *obj_priv;
-
-		obj_priv = to_intel_bo(dev_priv->pwrctx);
-		I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
-		I915_READ(PWRCTXA);
-		i915_gem_object_unpin(dev_priv->pwrctx);
-		drm_gem_object_unreference(dev_priv->pwrctx);
-	}
-
 	if (IS_IRONLAKE_M(dev))
 		ironlake_disable_drps(dev);
+	if (IS_GEN6(dev))
+		gen6_disable_rps(dev);
+
+	if (IS_IRONLAKE_M(dev))
+		ironlake_disable_rc6(dev);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -6325,3 +6853,113 @@
 	pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
 	return 0;
 }
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+	struct intel_cursor_error_state {
+		u32 control;
+		u32 position;
+		u32 base;
+		u32 size;
+	} cursor[2];
+
+	struct intel_pipe_error_state {
+		u32 conf;
+		u32 source;
+
+		u32 htotal;
+		u32 hblank;
+		u32 hsync;
+		u32 vtotal;
+		u32 vblank;
+		u32 vsync;
+	} pipe[2];
+
+	struct intel_plane_error_state {
+		u32 control;
+		u32 stride;
+		u32 size;
+		u32 pos;
+		u32 addr;
+		u32 surface;
+		u32 tile_offset;
+	} plane[2];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_display_error_state *error;
+	int i;
+
+	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	if (error == NULL)
+		return NULL;
+
+	for (i = 0; i < 2; i++) {
+		error->cursor[i].control = I915_READ(CURCNTR(i));
+		error->cursor[i].position = I915_READ(CURPOS(i));
+		error->cursor[i].base = I915_READ(CURBASE(i));
+
+		error->plane[i].control = I915_READ(DSPCNTR(i));
+		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+		error->plane[i].size = I915_READ(DSPSIZE(i));
+		error->plane[i].pos= I915_READ(DSPPOS(i));
+		error->plane[i].addr = I915_READ(DSPADDR(i));
+		if (INTEL_INFO(dev)->gen >= 4) {
+			error->plane[i].surface = I915_READ(DSPSURF(i));
+			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+		}
+
+		error->pipe[i].conf = I915_READ(PIPECONF(i));
+		error->pipe[i].source = I915_READ(PIPESRC(i));
+		error->pipe[i].htotal = I915_READ(HTOTAL(i));
+		error->pipe[i].hblank = I915_READ(HBLANK(i));
+		error->pipe[i].hsync = I915_READ(HSYNC(i));
+		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+		error->pipe[i].vblank = I915_READ(VBLANK(i));
+		error->pipe[i].vsync = I915_READ(VSYNC(i));
+	}
+
+	return error;
+}
+
+void
+intel_display_print_error_state(struct seq_file *m,
+				struct drm_device *dev,
+				struct intel_display_error_state *error)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		seq_printf(m, "Pipe [%d]:\n", i);
+		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
+		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
+		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
+		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
+		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
+		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
+		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
+		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
+
+		seq_printf(m, "Plane [%d]:\n", i);
+		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
+		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
+		seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
+		seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
+		seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
+		if (INTEL_INFO(dev)->gen >= 4) {
+			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
+			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
+		}
+
+		seq_printf(m, "Cursor [%d]:\n", i);
+		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
+		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
+		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
+	}
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 864417c..1f4242b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1153,18 +1153,27 @@
 static uint32_t
 intel_gen6_edp_signal_levels(uint8_t train_set)
 {
-	switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-		return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-		return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-		return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
 	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
-		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
 	}
 }
 
@@ -1334,17 +1343,24 @@
 	struct drm_device *dev = intel_dp->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool channel_eq = false;
-	int tries;
+	int tries, cr_tries;
 	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
 	tries = 0;
+	cr_tries = 0;
 	channel_eq = false;
 	for (;;) {
 		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
 		uint32_t    signal_levels;
 
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			intel_dp_link_down(intel_dp);
+			break;
+		}
+
 		if (IS_GEN6(dev) && is_edp(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
@@ -1367,14 +1383,26 @@
 		if (!intel_dp_get_link_status(intel_dp))
 			break;
 
+		/* Make sure clock is still ok */
+		if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			intel_dp_start_link_train(intel_dp);
+			cr_tries++;
+			continue;
+		}
+
 		if (intel_channel_eq_ok(intel_dp)) {
 			channel_eq = true;
 			break;
 		}
 
-		/* Try 5 times */
-		if (tries > 5)
-			break;
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			intel_dp_link_down(intel_dp);
+			intel_dp_start_link_train(intel_dp);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
 
 		/* Compute new intel_dp->train_set as requested by target */
 		intel_get_adjust_train(intel_dp);
@@ -1442,8 +1470,7 @@
 		/* Changes to enable or select take place the vblank
 		 * after being written.
 		 */
-		intel_wait_for_vblank(intel_dp->base.base.dev,
-				      intel_crtc->pipe);
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
 	}
 
 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e52c612..74db255 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@
 
 struct intel_framebuffer {
 	struct drm_framebuffer base;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 };
 
 struct intel_fbdev {
@@ -166,7 +166,7 @@
 	struct intel_unpin_work *unpin_work;
 	int fdi_lanes;
 
-	struct drm_gem_object *cursor_bo;
+	struct drm_i915_gem_object *cursor_bo;
 	uint32_t cursor_addr;
 	int16_t cursor_x, cursor_y;
 	int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@
 struct intel_unpin_work {
 	struct work_struct work;
 	struct drm_device *dev;
-	struct drm_gem_object *old_fb_obj;
-	struct drm_gem_object *pending_flip_obj;
+	struct drm_i915_gem_object *old_fb_obj;
+	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_pending_vblank_event *event;
 	int pending;
 	bool enable_stall_check;
@@ -236,7 +236,8 @@
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev,
+			    struct drm_i915_gem_object *obj);
 extern bool intel_lvds_init(struct drm_device *dev);
 extern void intel_dp_init(struct drm_device *dev, int dp_reg);
 void
@@ -256,6 +257,9 @@
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
 extern u32 intel_panel_get_backlight(struct drm_device *dev);
 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern void intel_panel_setup_backlight(struct drm_device *dev);
+extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
 
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -293,19 +297,22 @@
 				    u16 blue, int regno);
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
-extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void intel_disable_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
 extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-				      struct drm_gem_object *obj,
-				      bool pipelined);
+				      struct drm_i915_gem_object *obj,
+				      struct intel_ring_buffer *pipelined);
 
 extern int intel_framebuffer_init(struct drm_device *dev,
 				  struct intel_framebuffer *ifb,
 				  struct drm_mode_fb_cmd *mode_cmd,
-				  struct drm_gem_object *obj);
+				  struct drm_i915_gem_object *obj);
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_fini(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index af2a1dd..5127827 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -62,13 +62,13 @@
 			  struct drm_fb_helper_surface_size *sizes)
 {
 	struct drm_device *dev = ifbdev->helper.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd mode_cmd;
-	struct drm_gem_object *fbo = NULL;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct device *device = &dev->pdev->dev;
-	int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	int size, ret;
 
 	/* we don't do packed 24bpp */
 	if (sizes->surface_bpp == 24)
@@ -78,23 +78,22 @@
 	mode_cmd.height = sizes->surface_height;
 
 	mode_cmd.bpp = sizes->surface_bpp;
-	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
+	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
 	mode_cmd.depth = sizes->surface_depth;
 
 	size = mode_cmd.pitch * mode_cmd.height;
 	size = ALIGN(size, PAGE_SIZE);
-	fbo = i915_gem_alloc_object(dev, size);
-	if (!fbo) {
+	obj = i915_gem_alloc_object(dev, size);
+	if (!obj) {
 		DRM_ERROR("failed to allocate framebuffer\n");
 		ret = -ENOMEM;
 		goto out;
 	}
-	obj_priv = to_intel_bo(fbo);
 
 	mutex_lock(&dev->struct_mutex);
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
 	if (ret) {
 		DRM_ERROR("failed to pin fb: %d\n", ret);
 		goto out_unref;
@@ -108,7 +107,7 @@
 
 	info->par = ifbdev;
 
-	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
 	if (ret)
 		goto out_unpin;
 
@@ -122,6 +121,11 @@
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &intelfb_ops;
 
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
 	/* setup aperture base/size for vesafb takeover */
 	info->apertures = alloc_apertures(1);
 	if (!info->apertures) {
@@ -129,26 +133,17 @@
 		goto out_unpin;
 	}
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
-	if (!IS_GEN2(dev))
-		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
-	else
-		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+	info->apertures->ranges[0].size =
+		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
-	info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
 	info->fix.smem_len = size;
 
-	info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
-				       size);
+	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
 	if (!info->screen_base) {
 		ret = -ENOSPC;
 		goto out_unpin;
 	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	info->screen_size = size;
 
 //	memset(info->screen_base, 0, size);
@@ -156,10 +151,6 @@
 	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
-	/* FIXME: we really shouldn't expose mmio space at all */
-	info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
-	info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
-
 	info->pixmap.size = 64*1024;
 	info->pixmap.buf_align = 8;
 	info->pixmap.access_align = 32;
@@ -168,7 +159,7 @@
 
 	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
 		      fb->width, fb->height,
-		      obj_priv->gtt_offset, fbo);
+		      obj->gtt_offset, obj);
 
 
 	mutex_unlock(&dev->struct_mutex);
@@ -176,9 +167,9 @@
 	return 0;
 
 out_unpin:
-	i915_gem_object_unpin(fbo);
+	i915_gem_object_unpin(obj);
 out_unref:
-	drm_gem_object_unreference(fbo);
+	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 out:
 	return ret;
@@ -225,7 +216,7 @@
 
 	drm_framebuffer_cleanup(&ifb->base);
 	if (ifb->obj) {
-		drm_gem_object_unreference_unlocked(ifb->obj);
+		drm_gem_object_unreference_unlocked(&ifb->obj->base);
 		ifb->obj = NULL;
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086..58040f6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@
 
 	/* On most chips, these bits must be preserved in software. */
 	if (!IS_I830(dev) && !IS_845G(dev))
-		reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
-						   GPIO_CLOCK_PULLUP_DISABLE);
+		reserved = I915_READ_NOTRACE(gpio->reg) &
+					     (GPIO_DATA_PULLUP_DISABLE |
+					      GPIO_CLOCK_PULLUP_DISABLE);
 
 	return reserved;
 }
@@ -96,9 +97,9 @@
 	struct intel_gpio *gpio = data;
 	struct drm_i915_private *dev_priv = gpio->dev_priv;
 	u32 reserved = get_reserved(gpio);
-	I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
-	I915_WRITE(gpio->reg, reserved);
-	return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+	I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+	I915_WRITE_NOTRACE(gpio->reg, reserved);
+	return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
 }
 
 static int get_data(void *data)
@@ -106,9 +107,9 @@
 	struct intel_gpio *gpio = data;
 	struct drm_i915_private *dev_priv = gpio->dev_priv;
 	u32 reserved = get_reserved(gpio);
-	I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
-	I915_WRITE(gpio->reg, reserved);
-	return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+	I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+	I915_WRITE_NOTRACE(gpio->reg, reserved);
+	return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
 }
 
 static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@
 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
 			GPIO_CLOCK_VAL_MASK;
 
-	I915_WRITE(gpio->reg, reserved | clock_bits);
+	I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
 	POSTING_READ(gpio->reg);
 }
 
@@ -141,7 +142,7 @@
 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
 			GPIO_DATA_VAL_MASK;
 
-	I915_WRITE(gpio->reg, reserved | data_bits);
+	I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
 	POSTING_READ(gpio->reg);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 25bcedf..ace8d5d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
 	POSTING_READ(lvds_reg);
 
-	intel_panel_set_backlight(dev, dev_priv->backlight_level);
+	intel_panel_enable_backlight(dev);
 }
 
 static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@@ -123,8 +123,7 @@
 		lvds_reg = LVDS;
 	}
 
-	dev_priv->backlight_level = intel_panel_get_backlight(dev);
-	intel_panel_set_backlight(dev, 0);
+	intel_panel_disable_backlight(dev);
 
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 
@@ -304,14 +303,13 @@
 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
 
-			pfit_control |= PFIT_ENABLE;
 			/* 965+ is easy, it does everything in hw */
 			if (scaled_width > scaled_height)
-				pfit_control |= PFIT_SCALING_PILLAR;
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
 			else if (scaled_width < scaled_height)
-				pfit_control |= PFIT_SCALING_LETTER;
-			else
-				pfit_control |= PFIT_SCALING_AUTO;
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+			else if (adjusted_mode->hdisplay != mode->hdisplay)
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
 		} else {
 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -358,13 +356,17 @@
 		 * Full scaling, even if it changes the aspect ratio.
 		 * Fortunately this is all done for us in hw.
 		 */
-		pfit_control |= PFIT_ENABLE;
-		if (INTEL_INFO(dev)->gen >= 4)
-			pfit_control |= PFIT_SCALING_AUTO;
-		else
-			pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-					 VERT_INTERP_BILINEAR |
-					 HORIZ_INTERP_BILINEAR);
+		if (mode->vdisplay != adjusted_mode->vdisplay ||
+		    mode->hdisplay != adjusted_mode->hdisplay) {
+			pfit_control |= PFIT_ENABLE;
+			if (INTEL_INFO(dev)->gen >= 4)
+				pfit_control |= PFIT_SCALING_AUTO;
+			else
+				pfit_control |= (VERT_AUTO_SCALE |
+						 VERT_INTERP_BILINEAR |
+						 HORIZ_AUTO_SCALE |
+						 HORIZ_INTERP_BILINEAR);
+		}
 		break;
 
 	default:
@@ -372,6 +374,10 @@
 	}
 
 out:
+	if ((pfit_control & PFIT_ENABLE) == 0) {
+		pfit_control = 0;
+		pfit_pgm_ratios = 0;
+	}
 	if (pfit_control != intel_lvds->pfit_control ||
 	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
 		intel_lvds->pfit_control = pfit_control;
@@ -395,8 +401,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
-	dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
 	/* We try to do the minimum that is necessary in order to unlock
 	 * the registers for mode setting.
 	 *
@@ -427,9 +431,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
-	if (dev_priv->backlight_level == 0)
-		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
-
 	/* Undo any unlocking done in prepare to prevent accidental
 	 * adjustment of the registers.
 	 */
@@ -703,6 +704,14 @@
 	},
 	{
 		.callback = intel_no_lvds_dmi_callback,
+		.ident = "AOpen i915GMm-HFS",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+			DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
+		},
+	},
+	{
+		.callback = intel_no_lvds_dmi_callback,
 		.ident = "Aopen i945GTt-VFA",
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
@@ -914,6 +923,8 @@
 
 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
 	intel_encoder->crtc_mask = (1 << 1);
+	if (INTEL_INFO(dev)->gen >= 5)
+		intel_encoder->crtc_mask |= (1 << 0);
 	drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
 	drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
 	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1019,10 +1030,18 @@
 out:
 	if (HAS_PCH_SPLIT(dev)) {
 		u32 pwm;
-		/* make sure PWM is enabled */
+
+		pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+
+		/* make sure PWM is enabled and locked to the LVDS pipe */
 		pwm = I915_READ(BLC_PWM_CPU_CTL2);
-		pwm |= (PWM_ENABLE | PWM_PIPE_B);
-		I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
+		if (pipe == 0 && (pwm & PWM_PIPE_B))
+			I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
+		if (pipe)
+			pwm |= PWM_PIPE_B;
+		else
+			pwm &= ~PWM_PIPE_B;
+		I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
 
 		pwm = I915_READ(BLC_PWM_PCH_CTL1);
 		pwm |= PWM_PCH_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a8..f295a7a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 
 	if (asle) {
-		if (IS_MOBILE(dev)) {
-			unsigned long irqflags;
-
-			spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+		if (IS_MOBILE(dev))
 			intel_enable_asle(dev);
-			spin_unlock_irqrestore(&dev_priv->user_irq_lock,
-					       irqflags);
-		}
 
 		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
 			ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a4..3fbb98b 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@
 	int ret;
 
 	BUG_ON(overlay->last_flip_req);
-	overlay->last_flip_req =
-		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
-	if (overlay->last_flip_req == 0)
-		return -ENOMEM;
-
+	ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+	if (ret) {
+	    kfree(request);
+	    return ret;
+	}
+	overlay->last_flip_req = request->seqno;
 	overlay->flip_tail = tail;
 	ret = i915_do_wait_request(dev,
 				   overlay->last_flip_req, true,
-				   &dev_priv->render_ring);
+				   LP_RING(dev_priv));
 	if (ret)
 		return ret;
 
@@ -289,6 +290,7 @@
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_request *request;
 	int pipe_a_quirk = 0;
 	int ret;
@@ -308,7 +310,12 @@
 		goto out;
 	}
 
-	BEGIN_LP_RING(4);
+	ret = BEGIN_LP_RING(4);
+	if (ret) {
+		kfree(request);
+		goto out;
+	}
+
 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
 	OUT_RING(overlay->flip_addr | OFC_UPDATE);
 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@
 	struct drm_i915_gem_request *request;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
+	int ret;
 
 	BUG_ON(!overlay->active);
 
@@ -347,36 +355,44 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	BEGIN_LP_RING(2);
+	ret = BEGIN_LP_RING(2);
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req =
-		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+	ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
+
+	overlay->last_flip_req = request->seqno;
 	return 0;
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
-	struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
 	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 
 	overlay->old_vid_bo = NULL;
 }
 
 static void intel_overlay_off_tail(struct intel_overlay *overlay)
 {
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj = overlay->vid_bo;
 
 	/* never have the overlay hw on without showing a frame */
 	BUG_ON(!overlay->vid_bo);
-	obj = &overlay->vid_bo->base;
 
 	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	overlay->vid_bo = NULL;
 
 	overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@
 			     bool interruptible)
 {
 	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 flip_addr = overlay->flip_addr;
 	struct drm_i915_gem_request *request;
+	int ret;
 
 	BUG_ON(!overlay->active);
 
@@ -404,7 +422,11 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	BEGIN_LP_RING(6);
+	ret = BEGIN_LP_RING(6);
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
 	/* wait for overlay to go idle */
 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	OUT_RING(flip_addr);
@@ -432,7 +454,7 @@
 		return 0;
 
 	ret = i915_do_wait_request(dev, overlay->last_flip_req,
-				   interruptible, &dev_priv->render_ring);
+				   interruptible, LP_RING(dev_priv));
 	if (ret)
 		return ret;
 
@@ -467,7 +489,12 @@
 		if (request == NULL)
 			return -ENOMEM;
 
-		BEGIN_LP_RING(2);
+		ret = BEGIN_LP_RING(2);
+		if (ret) {
+			kfree(request);
+			return ret;
+		}
+
 		OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 		OUT_RING(MI_NOOP);
 		ADVANCE_LP_RING();
@@ -736,13 +763,12 @@
 }
 
 static int intel_overlay_do_put_image(struct intel_overlay *overlay,
-				      struct drm_gem_object *new_bo,
+				      struct drm_i915_gem_object *new_bo,
 				      struct put_image_params *params)
 {
 	int ret, tmp_width;
 	struct overlay_registers *regs;
 	bool scale_changed = false;
-	struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
 	struct drm_device *dev = overlay->dev;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+	ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
 	if (ret != 0)
 		return ret;
 
@@ -761,6 +787,10 @@
 	if (ret != 0)
 		goto out_unpin;
 
+	ret = i915_gem_object_put_fence(new_bo);
+	if (ret)
+		goto out_unpin;
+
 	if (!overlay->active) {
 		regs = intel_overlay_map_regs(overlay);
 		if (!regs) {
@@ -797,7 +827,7 @@
 	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
 				       params->offset_Y, tmp_width);
 	regs->SHEIGHT = params->src_h;
-	regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+	regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
 	regs->OSTRIDE = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@
 				      params->src_w/uv_hscale);
 		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
 		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
-		regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
-		regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+		regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+		regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
 		regs->OSTRIDE |= params->stride_UV << 16;
 	}
 
@@ -829,7 +859,7 @@
 		goto out_unpin;
 
 	overlay->old_vid_bo = overlay->vid_bo;
-	overlay->vid_bo = to_intel_bo(new_bo);
+	overlay->vid_bo = new_bo;
 
 	return 0;
 
@@ -942,7 +972,7 @@
 
 static int check_overlay_src(struct drm_device *dev,
 			     struct drm_intel_overlay_put_image *rec,
-			     struct drm_gem_object *new_bo)
+			     struct drm_i915_gem_object *new_bo)
 {
 	int uv_hscale = uv_hsubsampling(rec->flags);
 	int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@
 			return -EINVAL;
 
 		tmp = rec->stride_Y*rec->src_height;
-		if (rec->offset_Y + tmp > new_bo->size)
+		if (rec->offset_Y + tmp > new_bo->base.size)
 			return -EINVAL;
 		break;
 
@@ -1038,12 +1068,12 @@
 			return -EINVAL;
 
 		tmp = rec->stride_Y * rec->src_height;
-		if (rec->offset_Y + tmp > new_bo->size)
+		if (rec->offset_Y + tmp > new_bo->base.size)
 			return -EINVAL;
 
 		tmp = rec->stride_UV * (rec->src_height / uv_vscale);
-		if (rec->offset_U + tmp > new_bo->size ||
-		    rec->offset_V + tmp > new_bo->size)
+		if (rec->offset_U + tmp > new_bo->base.size ||
+		    rec->offset_V + tmp > new_bo->base.size)
 			return -EINVAL;
 		break;
 	}
@@ -1086,7 +1116,7 @@
 	struct intel_overlay *overlay;
 	struct drm_mode_object *drmmode_obj;
 	struct intel_crtc *crtc;
-	struct drm_gem_object *new_bo;
+	struct drm_i915_gem_object *new_bo;
 	struct put_image_params *params;
 	int ret;
 
@@ -1125,8 +1155,8 @@
 	}
 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
 
-	new_bo = drm_gem_object_lookup(dev, file_priv,
-				       put_image_rec->bo_handle);
+	new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+						   put_image_rec->bo_handle));
 	if (!new_bo) {
 		ret = -ENOENT;
 		goto out_free;
@@ -1135,6 +1165,12 @@
 	mutex_lock(&dev->mode_config.mutex);
 	mutex_lock(&dev->struct_mutex);
 
+	if (new_bo->tiling_mode) {
+		DRM_ERROR("buffer used for overlay image can not be tiled\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
 	ret = intel_overlay_recover_from_interrupt(overlay, true);
 	if (ret != 0)
 		goto out_unlock;
@@ -1217,7 +1253,7 @@
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->mode_config.mutex);
-	drm_gem_object_unreference_unlocked(new_bo);
+	drm_gem_object_unreference_unlocked(&new_bo->base);
 out_free:
 	kfree(params);
 
@@ -1370,7 +1406,7 @@
 {
         drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay;
-	struct drm_gem_object *reg_bo;
+	struct drm_i915_gem_object *reg_bo;
 	struct overlay_registers *regs;
 	int ret;
 
@@ -1385,7 +1421,7 @@
 	reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
 	if (!reg_bo)
 		goto out_free;
-	overlay->reg_bo = to_intel_bo(reg_bo);
+	overlay->reg_bo = reg_bo;
 
 	if (OVERLAY_NEEDS_PHYSICAL(dev)) {
 		ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@
                         DRM_ERROR("failed to attach phys overlay regs\n");
                         goto out_free_bo;
                 }
-		overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
 	} else {
-		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
 		if (ret) {
                         DRM_ERROR("failed to pin overlay register bo\n");
                         goto out_free_bo;
                 }
-		overlay->flip_addr = overlay->reg_bo->gtt_offset;
+		overlay->flip_addr = reg_bo->gtt_offset;
 
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
 		if (ret) {
@@ -1434,7 +1470,7 @@
 out_unpin_bo:
 	i915_gem_object_unpin(reg_bo);
 out_free_bo:
-	drm_gem_object_unreference(reg_bo);
+	drm_gem_object_unreference(&reg_bo->base);
 out_free:
 	kfree(overlay);
 	return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f3..c65992d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -125,15 +125,55 @@
 	return 0;
 }
 
+static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	/* Restore the CTL value if it lost, e.g. GPU reset */
+
+	if (HAS_PCH_SPLIT(dev_priv->dev)) {
+		val = I915_READ(BLC_PWM_PCH_CTL2);
+		if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+			dev_priv->saveBLC_PWM_CTL2 = val;
+		} else if (val == 0) {
+			I915_WRITE(BLC_PWM_PCH_CTL2,
+				   dev_priv->saveBLC_PWM_CTL);
+			val = dev_priv->saveBLC_PWM_CTL;
+		}
+	} else {
+		val = I915_READ(BLC_PWM_CTL);
+		if (dev_priv->saveBLC_PWM_CTL == 0) {
+			dev_priv->saveBLC_PWM_CTL = val;
+			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+		} else if (val == 0) {
+			I915_WRITE(BLC_PWM_CTL,
+				   dev_priv->saveBLC_PWM_CTL);
+			I915_WRITE(BLC_PWM_CTL2,
+				   dev_priv->saveBLC_PWM_CTL2);
+			val = dev_priv->saveBLC_PWM_CTL;
+		}
+	}
+
+	return val;
+}
+
 u32 intel_panel_get_max_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 max;
 
+	max = i915_read_blc_pwm_ctl(dev_priv);
+	if (max == 0) {
+		/* XXX add code here to query mode clock or hardware clock
+		 * and program max PWM appropriately.
+		 */
+		printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+		return 1;
+	}
+
 	if (HAS_PCH_SPLIT(dev)) {
-		max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+		max >>= 16;
 	} else {
-		max = I915_READ(BLC_PWM_CTL);
 		if (IS_PINEVIEW(dev)) {
 			max >>= 17;
 		} else {
@@ -146,14 +186,6 @@
 			max *= 0xff;
 	}
 
-	if (max == 0) {
-		/* XXX add code here to query mode clock or hardware clock
-		 * and program max PWM appropriately.
-		 */
-		DRM_ERROR("fixme: max PWM is zero.\n");
-		max = 1;
-	}
-
 	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
 	return max;
 }
@@ -218,3 +250,34 @@
 		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight_enabled) {
+		dev_priv->backlight_level = intel_panel_get_backlight(dev);
+		dev_priv->backlight_enabled = false;
+	}
+
+	intel_panel_set_backlight(dev, 0);
+}
+
+void intel_panel_enable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight_level == 0)
+		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+	intel_panel_set_backlight(dev, dev_priv->backlight_level);
+	dev_priv->backlight_enabled = true;
+}
+
+void intel_panel_setup_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->backlight_level = intel_panel_get_backlight(dev);
+	dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31cd7e3..03e3370 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,14 +48,15 @@
 	return seqno;
 }
 
-static void
-render_ring_flush(struct drm_device *dev,
-		  struct intel_ring_buffer *ring,
+static int
+render_ring_flush(struct intel_ring_buffer *ring,
 		  u32	invalidate_domains,
 		  u32	flush_domains)
 {
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 cmd;
+	int ret;
 
 #if WATCH_EXEC
 	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -109,49 +110,54 @@
 		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
 			cmd |= MI_EXE_FLUSH;
 
+		if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+		    (IS_G4X(dev) || IS_GEN5(dev)))
+			cmd |= MI_INVALIDATE_ISP;
+
 #if WATCH_EXEC
 		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
 #endif
-		intel_ring_begin(dev, ring, 2);
-		intel_ring_emit(dev, ring, cmd);
-		intel_ring_emit(dev, ring, MI_NOOP);
-		intel_ring_advance(dev, ring);
+		ret = intel_ring_begin(ring, 2);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, cmd);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 	}
+
+	return 0;
 }
 
-static void ring_write_tail(struct drm_device *dev,
-			    struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_ring_buffer *ring,
 			    u32 value)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	I915_WRITE_TAIL(ring, value);
 }
 
-u32 intel_ring_get_active_head(struct drm_device *dev,
-			       struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
 			RING_ACTHD(ring->mmio_base) : ACTHD;
 
 	return I915_READ(acthd_reg);
 }
 
-static int init_ring_common(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_ring_buffer *ring)
 {
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_object *obj = ring->obj;
 	u32 head;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	obj_priv = to_intel_bo(ring->gem_object);
 
 	/* Stop the ring if it's running. */
 	I915_WRITE_CTL(ring, 0);
 	I915_WRITE_HEAD(ring, 0);
-	ring->write_tail(dev, ring, 0);
+	ring->write_tail(ring, 0);
 
 	/* Initialize the ring. */
-	I915_WRITE_START(ring, obj_priv->gtt_offset);
+	I915_WRITE_START(ring, obj->gtt_offset);
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
 	/* G45 ring initialization fails to reset head to zero */
@@ -178,12 +184,13 @@
 	}
 
 	I915_WRITE_CTL(ring,
-			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_REPORT_64K | RING_VALID);
 
-	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 	/* If the head is still not zero, the ring is dead */
-	if (head != 0) {
+	if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+	    I915_READ_START(ring) != obj->gtt_offset ||
+	    (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
 		DRM_ERROR("%s initialization failed "
 				"ctl %08x head %08x tail %08x start %08x\n",
 				ring->name,
@@ -194,8 +201,8 @@
 		return -EIO;
 	}
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		i915_kernel_lost_context(dev);
+	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+		i915_kernel_lost_context(ring->dev);
 	else {
 		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
 		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -203,335 +210,562 @@
 		if (ring->space < 0)
 			ring->space += ring->size;
 	}
+
 	return 0;
 }
 
-static int init_render_ring(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+	struct drm_i915_gem_object *obj;
+	volatile u32 *cpu_page;
+	u32 gtt_offset;
+};
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret = init_ring_common(dev, ring);
-	int mode;
+	struct pipe_control *pc;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	if (ring->private)
+		return 0;
+
+	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+	if (!pc)
+		return -ENOMEM;
+
+	obj = i915_gem_alloc_object(ring->dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate seqno page\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	obj->agp_type = AGP_USER_CACHED_MEMORY;
+
+	ret = i915_gem_object_pin(obj, 4096, true);
+	if (ret)
+		goto err_unref;
+
+	pc->gtt_offset = obj->gtt_offset;
+	pc->cpu_page =  kmap(obj->pages[0]);
+	if (pc->cpu_page == NULL)
+		goto err_unpin;
+
+	pc->obj = obj;
+	ring->private = pc;
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_unref:
+	drm_gem_object_unreference(&obj->base);
+err:
+	kfree(pc);
+	return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+	struct pipe_control *pc = ring->private;
+	struct drm_i915_gem_object *obj;
+
+	if (!ring->private)
+		return;
+
+	obj = pc->obj;
+	kunmap(obj->pages[0]);
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+
+	kfree(pc);
+	ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = init_ring_common(ring);
 
 	if (INTEL_INFO(dev)->gen > 3) {
-		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
 		if (IS_GEN6(dev))
 			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
 		I915_WRITE(MI_MODE, mode);
 	}
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (IS_GEN5(dev)) {
+		ret = init_pipe_control(ring);
+		if (ret)
+			return ret;
+	}
+
 	return ret;
 }
 
-#define PIPE_CONTROL_FLUSH(addr)					\
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+	if (!ring->private)
+		return;
+
+	cleanup_pipe_control(ring);
+}
+
+static void
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int id;
+
+	/*
+	 * cs -> 1 = vcs, 0 = bcs
+	 * vcs -> 1 = bcs, 0 = cs,
+	 * bcs -> 1 = cs, 0 = vcs.
+	 */
+	id = ring - dev_priv->ring;
+	id += 2 - i;
+	id %= 3;
+
+	intel_ring_emit(ring,
+			MI_SEMAPHORE_MBOX |
+			MI_SEMAPHORE_REGISTER |
+			MI_SEMAPHORE_UPDATE);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring,
+			RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+}
+
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+		 u32 *result)
+{
+	u32 seqno;
+	int ret;
+
+	ret = intel_ring_begin(ring, 10);
+	if (ret)
+		return ret;
+
+	seqno = i915_gem_get_seqno(ring->dev);
+	update_semaphore(ring, 0, seqno);
+	update_semaphore(ring, 1, seqno);
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	*result = seqno;
+	return 0;
+}
+
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+		struct intel_ring_buffer *to,
+		u32 seqno)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_SEMAPHORE_MBOX |
+			MI_SEMAPHORE_REGISTER |
+			intel_ring_sync_index(ring, to) << 17 |
+			MI_SEMAPHORE_COMPARE);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
 do {									\
-	OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
+	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
 		 PIPE_CONTROL_DEPTH_STALL | 2);				\
-	OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);			\
-	OUT_RING(0);							\
-	OUT_RING(0);							\
+	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
+	intel_ring_emit(ring__, 0);							\
+	intel_ring_emit(ring__, 0);							\
 } while (0)
 
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static u32
-render_ring_add_request(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			u32 flush_domains)
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+		      u32 *result)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 seqno;
+	struct drm_device *dev = ring->dev;
+	u32 seqno = i915_gem_get_seqno(dev);
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
 
-	seqno = i915_gem_get_seqno(dev);
+	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+	 * incoherent with writes to memory, i.e. completely fubar,
+	 * so we need to use PIPE_NOTIFY instead.
+	 *
+	 * However, we also need to workaround the qword write
+	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+	 * memory before requesting an interrupt.
+	 */
+	ret = intel_ring_begin(ring, 32);
+	if (ret)
+		return ret;
 
-	if (IS_GEN6(dev)) {
-		BEGIN_LP_RING(6);
-		OUT_RING(GFX_OP_PIPE_CONTROL | 3);
-		OUT_RING(PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
-			 PIPE_CONTROL_NOTIFY);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		OUT_RING(0);
-		ADVANCE_LP_RING();
-	} else if (HAS_PIPE_CONTROL(dev)) {
-		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, 0);
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128; /* write to separate cachelines */
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+			PIPE_CONTROL_NOTIFY);
+	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
-		/*
-		 * Workaround qword write incoherence by flushing the
-		 * PIPE_NOTIFY buffers out to memory before requesting
-		 * an interrupt.
-		 */
-		BEGIN_LP_RING(32);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128; /* write to separate cachelines */
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
-			 PIPE_CONTROL_NOTIFY);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		ADVANCE_LP_RING();
-	} else {
-		BEGIN_LP_RING(4);
-		OUT_RING(MI_STORE_DWORD_INDEX);
-		OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(seqno);
+	*result = seqno;
+	return 0;
+}
 
-		OUT_RING(MI_USER_INTERRUPT);
-		ADVANCE_LP_RING();
-	}
-	return seqno;
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+			u32 *result)
+{
+	struct drm_device *dev = ring->dev;
+	u32 seqno = i915_gem_get_seqno(dev);
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	*result = seqno;
+	return 0;
 }
 
 static u32
-render_ring_get_seqno(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	if (HAS_PIPE_CONTROL(dev))
-		return ((volatile u32 *)(dev_priv->seqno_page))[0];
-	else
-		return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
-}
-
-static void
-render_ring_get_user_irq(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
-		else
-			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-static void
-render_ring_put_user_irq(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
-	if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
-		else
-			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void intel_ring_setup_status_page(struct drm_device *dev,
-				  struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	if (IS_GEN6(dev)) {
-		I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
-			   ring->status_page.gfx_addr);
-		I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
-	} else {
-		I915_WRITE(RING_HWS_PGA(ring->mmio_base),
-			   ring->status_page.gfx_addr);
-		I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
-	}
-
-}
-
-static void
-bsd_ring_flush(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		u32     invalidate_domains,
-		u32     flush_domains)
-{
-	intel_ring_begin(dev, ring, 2);
-	intel_ring_emit(dev, ring, MI_FLUSH);
-	intel_ring_emit(dev, ring, MI_NOOP);
-	intel_ring_advance(dev, ring);
-}
-
-static int init_bsd_ring(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
-{
-	return init_ring_common(dev, ring);
-}
-
-static u32
-ring_add_request(struct drm_device *dev,
-		 struct intel_ring_buffer *ring,
-		 u32 flush_domains)
-{
-	u32 seqno;
-
-	seqno = i915_gem_get_seqno(dev);
-
-	intel_ring_begin(dev, ring, 4);
-	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(dev, ring,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(dev, ring, seqno);
-	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
-	intel_ring_advance(dev, ring);
-
-	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-
-	return seqno;
-}
-
-static void
-bsd_ring_get_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
-{
-	/* do nothing */
-}
-static void
-bsd_ring_put_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
-{
-	/* do nothing */
-}
-
-static u32
-ring_status_page_get_seqno(struct drm_device *dev,
-			   struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring)
 {
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static int
-ring_dispatch_gem_execbuffer(struct drm_device *dev,
-			     struct intel_ring_buffer *ring,
-			     struct drm_i915_gem_execbuffer2 *exec,
-			     struct drm_clip_rect *cliprects,
-			     uint64_t exec_offset)
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring)
 {
-	uint32_t exec_start;
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	intel_ring_begin(dev, ring, 2);
-	intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
-			(2 << 6) | MI_BATCH_NON_SECURE_I965);
-	intel_ring_emit(dev, ring, exec_start);
-	intel_ring_advance(dev, ring);
+	struct pipe_control *pc = ring->private;
+	return pc->cpu_page[0];
+}
+
+static void
+ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->gt_irq_mask &= ~mask;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	POSTING_READ(GTIMR);
+}
+
+static void
+ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->gt_irq_mask |= mask;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	POSTING_READ(GTIMR);
+}
+
+static void
+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->irq_mask &= ~mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+}
+
+static void
+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->irq_mask |= mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_enable_irq(dev_priv,
+					    GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
+		else
+			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+	}
+	spin_unlock(&ring->irq_lock);
+
+	return true;
+}
+
+static void
+render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_disable_irq(dev_priv,
+					     GT_USER_INTERRUPT |
+					     GT_PIPE_NOTIFY);
+		else
+			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+	}
+	spin_unlock(&ring->irq_lock);
+}
+
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	u32 mmio = IS_GEN6(ring->dev) ?
+		RING_HWS_PGA_GEN6(ring->mmio_base) :
+		RING_HWS_PGA(ring->mmio_base);
+	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+	POSTING_READ(mmio);
+}
+
+static int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+	       u32     invalidate_domains,
+	       u32     flush_domains)
+{
+	int ret;
+
+	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 	return 0;
 }
 
 static int
-render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-				    struct intel_ring_buffer *ring,
-				    struct drm_i915_gem_execbuffer2 *exec,
-				    struct drm_clip_rect *cliprects,
-				    uint64_t exec_offset)
+ring_add_request(struct intel_ring_buffer *ring,
+		 u32 *result)
 {
+	u32 seqno;
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	seqno = i915_gem_get_seqno(ring->dev);
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+	*result = seqno;
+	return 0;
+}
+
+static bool
+ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
+{
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int nbox = exec->num_cliprects;
-	int i = 0, count;
-	uint32_t exec_start, exec_len;
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	exec_len = (uint32_t) exec->batch_len;
+
+	if (!dev->irq_enabled)
+	       return false;
+
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0)
+		ironlake_enable_irq(dev_priv, flag);
+	spin_unlock(&ring->irq_lock);
+
+	return true;
+}
+
+static void
+ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0)
+		ironlake_disable_irq(dev_priv, flag);
+	spin_unlock(&ring->irq_lock);
+}
+
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev->irq_enabled)
+	       return false;
+
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0) {
+		ring->irq_mask &= ~rflag;
+		I915_WRITE_IMR(ring, ring->irq_mask);
+		ironlake_enable_irq(dev_priv, gflag);
+	}
+	spin_unlock(&ring->irq_lock);
+
+	return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0) {
+		ring->irq_mask |= rflag;
+		I915_WRITE_IMR(ring, ring->irq_mask);
+		ironlake_disable_irq(dev_priv, gflag);
+	}
+	spin_unlock(&ring->irq_lock);
+}
+
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+static void
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+
+static int
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START | (2 << 6) |
+			MI_BATCH_NON_SECURE_I965);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+				u32 offset, u32 len)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
 
 	trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
 
-	count = nbox ? nbox : 1;
+	if (IS_I830(dev) || IS_845G(dev)) {
+		ret = intel_ring_begin(ring, 4);
+		if (ret)
+			return ret;
 
-	for (i = 0; i < count; i++) {
-		if (i < nbox) {
-			int ret = i915_emit_box(dev, cliprects, i,
-						exec->DR1, exec->DR4);
-			if (ret)
-				return ret;
-		}
+		intel_ring_emit(ring, MI_BATCH_BUFFER);
+		intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+		intel_ring_emit(ring, offset + len - 8);
+		intel_ring_emit(ring, 0);
+	} else {
+		ret = intel_ring_begin(ring, 2);
+		if (ret)
+			return ret;
 
-		if (IS_I830(dev) || IS_845G(dev)) {
-			intel_ring_begin(dev, ring, 4);
-			intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
-			intel_ring_emit(dev, ring,
-					exec_start | MI_BATCH_NON_SECURE);
-			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
-			intel_ring_emit(dev, ring, 0);
+		if (INTEL_INFO(dev)->gen >= 4) {
+			intel_ring_emit(ring,
+					MI_BATCH_BUFFER_START | (2 << 6) |
+					MI_BATCH_NON_SECURE_I965);
+			intel_ring_emit(ring, offset);
 		} else {
-			intel_ring_begin(dev, ring, 2);
-			if (INTEL_INFO(dev)->gen >= 4) {
-				intel_ring_emit(dev, ring,
-						MI_BATCH_BUFFER_START | (2 << 6)
-						| MI_BATCH_NON_SECURE_I965);
-				intel_ring_emit(dev, ring, exec_start);
-			} else {
-				intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
-						| (2 << 6));
-				intel_ring_emit(dev, ring, exec_start |
-						MI_BATCH_NON_SECURE);
-			}
+			intel_ring_emit(ring,
+					MI_BATCH_BUFFER_START | (2 << 6));
+			intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 		}
-		intel_ring_advance(dev, ring);
 	}
-
-	if (IS_G4X(dev) || IS_GEN5(dev)) {
-		intel_ring_begin(dev, ring, 2);
-		intel_ring_emit(dev, ring, MI_FLUSH |
-				MI_NO_WRITE_FLUSH |
-				MI_INVALIDATE_ISP );
-		intel_ring_emit(dev, ring, MI_NOOP);
-		intel_ring_advance(dev, ring);
-	}
-	/* XXX breadcrumb */
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static void cleanup_status_page(struct drm_device *dev,
-				struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_object *obj;
 
 	obj = ring->status_page.obj;
 	if (obj == NULL)
 		return;
-	obj_priv = to_intel_bo(obj);
 
-	kunmap(obj_priv->pages[0]);
+	kunmap(obj->pages[0]);
 	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	ring->status_page.obj = NULL;
 
 	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }
 
-static int init_status_page(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_ring_buffer *ring)
 {
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	obj = i915_gem_alloc_object(dev, 4096);
@@ -540,16 +774,15 @@
 		ret = -ENOMEM;
 		goto err;
 	}
-	obj_priv = to_intel_bo(obj);
-	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+	obj->agp_type = AGP_USER_CACHED_MEMORY;
 
-	ret = i915_gem_object_pin(obj, 4096);
+	ret = i915_gem_object_pin(obj, 4096, true);
 	if (ret != 0) {
 		goto err_unref;
 	}
 
-	ring->status_page.gfx_addr = obj_priv->gtt_offset;
-	ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+	ring->status_page.gfx_addr = obj->gtt_offset;
+	ring->status_page.page_addr = kmap(obj->pages[0]);
 	if (ring->status_page.page_addr == NULL) {
 		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 		goto err_unpin;
@@ -557,7 +790,7 @@
 	ring->status_page.obj = obj;
 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
-	intel_ring_setup_status_page(dev, ring);
+	intel_ring_setup_status_page(ring);
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
 			ring->name, ring->status_page.gfx_addr);
 
@@ -566,7 +799,7 @@
 err_unpin:
 	i915_gem_object_unpin(obj);
 err_unref:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 err:
 	return ret;
 }
@@ -574,9 +807,7 @@
 int intel_init_ring_buffer(struct drm_device *dev,
 			   struct intel_ring_buffer *ring)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ring->dev = dev;
@@ -584,8 +815,11 @@
 	INIT_LIST_HEAD(&ring->request_list);
 	INIT_LIST_HEAD(&ring->gpu_write_list);
 
+	spin_lock_init(&ring->irq_lock);
+	ring->irq_mask = ~0;
+
 	if (I915_NEED_GFX_HWS(dev)) {
-		ret = init_status_page(dev, ring);
+		ret = init_status_page(ring);
 		if (ret)
 			return ret;
 	}
@@ -597,15 +831,14 @@
 		goto err_hws;
 	}
 
-	ring->gem_object = obj;
+	ring->obj = obj;
 
-	ret = i915_gem_object_pin(obj, PAGE_SIZE);
+	ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
 	if (ret)
 		goto err_unref;
 
-	obj_priv = to_intel_bo(obj);
 	ring->map.size = ring->size;
-	ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+	ring->map.offset = dev->agp->base + obj->gtt_offset;
 	ring->map.type = 0;
 	ring->map.flags = 0;
 	ring->map.mtrr = 0;
@@ -618,60 +851,64 @@
 	}
 
 	ring->virtual_start = ring->map.handle;
-	ret = ring->init(dev, ring);
+	ret = ring->init(ring);
 	if (ret)
 		goto err_unmap;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		i915_kernel_lost_context(dev);
-	else {
-		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->size;
-	}
-	return ret;
+	/* Workaround an erratum on the i830 which causes a hang if
+	 * the TAIL pointer points to within the last 2 cachelines
+	 * of the buffer.
+	 */
+	ring->effective_size = ring->size;
+	if (IS_I830(ring->dev))
+		ring->effective_size -= 128;
+
+	return 0;
 
 err_unmap:
 	drm_core_ioremapfree(&ring->map, dev);
 err_unpin:
 	i915_gem_object_unpin(obj);
 err_unref:
-	drm_gem_object_unreference(obj);
-	ring->gem_object = NULL;
+	drm_gem_object_unreference(&obj->base);
+	ring->obj = NULL;
 err_hws:
-	cleanup_status_page(dev, ring);
+	cleanup_status_page(ring);
 	return ret;
 }
 
-void intel_cleanup_ring_buffer(struct drm_device *dev,
-			       struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 {
-	if (ring->gem_object == NULL)
+	struct drm_i915_private *dev_priv;
+	int ret;
+
+	if (ring->obj == NULL)
 		return;
 
-	drm_core_ioremapfree(&ring->map, dev);
+	/* Disable the ring buffer. The ring must be idle at this point */
+	dev_priv = ring->dev->dev_private;
+	ret = intel_wait_ring_buffer(ring, ring->size - 8);
+	I915_WRITE_CTL(ring, 0);
 
-	i915_gem_object_unpin(ring->gem_object);
-	drm_gem_object_unreference(ring->gem_object);
-	ring->gem_object = NULL;
+	drm_core_ioremapfree(&ring->map, ring->dev);
+
+	i915_gem_object_unpin(ring->obj);
+	drm_gem_object_unreference(&ring->obj->base);
+	ring->obj = NULL;
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
 
-	cleanup_status_page(dev, ring);
+	cleanup_status_page(ring);
 }
 
-static int intel_wrap_ring_buffer(struct drm_device *dev,
-				  struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 {
 	unsigned int *virt;
-	int rem;
-	rem = ring->size - ring->tail;
+	int rem = ring->size - ring->tail;
 
 	if (ring->space < rem) {
-		int ret = intel_wait_ring_buffer(dev, ring, rem);
+		int ret = intel_wait_ring_buffer(ring, rem);
 		if (ret)
 			return ret;
 	}
@@ -689,11 +926,11 @@
 	return 0;
 }
 
-int intel_wait_ring_buffer(struct drm_device *dev,
-			   struct intel_ring_buffer *ring, int n)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 {
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long end;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 head;
 
 	trace_i915_ring_wait_begin (dev);
@@ -711,7 +948,7 @@
 		if (ring->space < 0)
 			ring->space += ring->size;
 		if (ring->space >= n) {
-			trace_i915_ring_wait_end (dev);
+			trace_i915_ring_wait_end(dev);
 			return 0;
 		}
 
@@ -722,29 +959,39 @@
 		}
 
 		msleep(1);
+		if (atomic_read(&dev_priv->mm.wedged))
+			return -EAGAIN;
 	} while (!time_after(jiffies, end));
 	trace_i915_ring_wait_end (dev);
 	return -EBUSY;
 }
 
-void intel_ring_begin(struct drm_device *dev,
-		      struct intel_ring_buffer *ring,
-		      int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+		     int num_dwords)
 {
 	int n = 4*num_dwords;
-	if (unlikely(ring->tail + n > ring->size))
-		intel_wrap_ring_buffer(dev, ring);
-	if (unlikely(ring->space < n))
-		intel_wait_ring_buffer(dev, ring, n);
+	int ret;
+
+	if (unlikely(ring->tail + n > ring->effective_size)) {
+		ret = intel_wrap_ring_buffer(ring);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if (unlikely(ring->space < n)) {
+		ret = intel_wait_ring_buffer(ring, n);
+		if (unlikely(ret))
+			return ret;
+	}
 
 	ring->space -= n;
+	return 0;
 }
 
-void intel_ring_advance(struct drm_device *dev,
-			struct intel_ring_buffer *ring)
+void intel_ring_advance(struct intel_ring_buffer *ring)
 {
 	ring->tail &= ring->size - 1;
-	ring->write_tail(dev, ring, ring->tail);
+	ring->write_tail(ring, ring->tail);
 }
 
 static const struct intel_ring_buffer render_ring = {
@@ -756,10 +1003,11 @@
 	.write_tail		= ring_write_tail,
 	.flush			= render_ring_flush,
 	.add_request		= render_ring_add_request,
-	.get_seqno		= render_ring_get_seqno,
-	.user_irq_get		= render_ring_get_user_irq,
-	.user_irq_put		= render_ring_put_user_irq,
-	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+	.get_seqno		= ring_get_seqno,
+	.irq_get		= render_ring_get_irq,
+	.irq_put		= render_ring_put_irq,
+	.dispatch_execbuffer	= render_ring_dispatch_execbuffer,
+       .cleanup			= render_ring_cleanup,
 };
 
 /* ring buffer for bit-stream decoder */
@@ -769,22 +1017,21 @@
 	.id			= RING_BSD,
 	.mmio_base		= BSD_RING_BASE,
 	.size			= 32 * PAGE_SIZE,
-	.init			= init_bsd_ring,
+	.init			= init_ring_common,
 	.write_tail		= ring_write_tail,
 	.flush			= bsd_ring_flush,
 	.add_request		= ring_add_request,
-	.get_seqno		= ring_status_page_get_seqno,
-	.user_irq_get		= bsd_ring_get_user_irq,
-	.user_irq_put		= bsd_ring_put_user_irq,
-	.dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
+	.get_seqno		= ring_get_seqno,
+	.irq_get		= bsd_ring_get_irq,
+	.irq_put		= bsd_ring_put_irq,
+	.dispatch_execbuffer	= ring_dispatch_execbuffer,
 };
 
 
-static void gen6_bsd_ring_write_tail(struct drm_device *dev,
-				     struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 				     u32 value)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
 
        /* Every tail move must follow the sequence below */
        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -803,69 +1050,109 @@
 	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
 }
 
-static void gen6_ring_flush(struct drm_device *dev,
-			    struct intel_ring_buffer *ring,
-			    u32 invalidate_domains,
-			    u32 flush_domains)
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+			   u32 invalidate_domains,
+			   u32 flush_domains)
 {
-       intel_ring_begin(dev, ring, 4);
-       intel_ring_emit(dev, ring, MI_FLUSH_DW);
-       intel_ring_emit(dev, ring, 0);
-       intel_ring_emit(dev, ring, 0);
-       intel_ring_emit(dev, ring, 0);
-       intel_ring_advance(dev, ring);
+	int ret;
+
+	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_FLUSH_DW);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+	return 0;
 }
 
 static int
-gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-				  struct intel_ring_buffer *ring,
-				  struct drm_i915_gem_execbuffer2 *exec,
-				  struct drm_clip_rect *cliprects,
-				  uint64_t exec_offset)
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			      u32 offset, u32 len)
 {
-       uint32_t exec_start;
+       int ret;
 
-       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+	       return ret;
 
-       intel_ring_begin(dev, ring, 2);
-       intel_ring_emit(dev, ring,
-		       MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
        /* bit0-7 is the length on GEN6+ */
-       intel_ring_emit(dev, ring, exec_start);
-       intel_ring_advance(dev, ring);
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
+static bool
+gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_get_irq(ring,
+				 GT_USER_INTERRUPT,
+				 GEN6_RENDER_USER_INTERRUPT);
+}
+
+static void
+gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_put_irq(ring,
+				 GT_USER_INTERRUPT,
+				 GEN6_RENDER_USER_INTERRUPT);
+}
+
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_get_irq(ring,
+				 GT_GEN6_BSD_USER_INTERRUPT,
+				 GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_put_irq(ring,
+				 GT_GEN6_BSD_USER_INTERRUPT,
+				 GEN6_BSD_USER_INTERRUPT);
+}
+
 /* ring buffer for Video Codec for Gen6+ */
 static const struct intel_ring_buffer gen6_bsd_ring = {
-       .name			= "gen6 bsd ring",
-       .id			= RING_BSD,
-       .mmio_base		= GEN6_BSD_RING_BASE,
-       .size			= 32 * PAGE_SIZE,
-       .init			= init_bsd_ring,
-       .write_tail		= gen6_bsd_ring_write_tail,
-       .flush			= gen6_ring_flush,
-       .add_request		= ring_add_request,
-       .get_seqno		= ring_status_page_get_seqno,
-       .user_irq_get		= bsd_ring_get_user_irq,
-       .user_irq_put		= bsd_ring_put_user_irq,
-       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+	.name			= "gen6 bsd ring",
+	.id			= RING_BSD,
+	.mmio_base		= GEN6_BSD_RING_BASE,
+	.size			= 32 * PAGE_SIZE,
+	.init			= init_ring_common,
+	.write_tail		= gen6_bsd_ring_write_tail,
+	.flush			= gen6_ring_flush,
+	.add_request		= gen6_add_request,
+	.get_seqno		= ring_get_seqno,
+	.irq_get		= gen6_bsd_ring_get_irq,
+	.irq_put		= gen6_bsd_ring_put_irq,
+	.dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
 };
 
 /* Blitter support (SandyBridge+) */
 
-static void
-blt_ring_get_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
 {
-	/* do nothing */
+	return gen6_ring_get_irq(ring,
+				 GT_BLT_USER_INTERRUPT,
+				 GEN6_BLITTER_USER_INTERRUPT);
 }
+
 static void
-blt_ring_put_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
+blt_ring_put_irq(struct intel_ring_buffer *ring)
 {
-	/* do nothing */
+	gen6_ring_put_irq(ring,
+			  GT_BLT_USER_INTERRUPT,
+			  GEN6_BLITTER_USER_INTERRUPT);
 }
 
 
@@ -883,32 +1170,31 @@
 	return ring->private;
 }
 
-static int blt_ring_init(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
+static int blt_ring_init(struct intel_ring_buffer *ring)
 {
-	if (NEED_BLT_WORKAROUND(dev)) {
+	if (NEED_BLT_WORKAROUND(ring->dev)) {
 		struct drm_i915_gem_object *obj;
-		u32 __iomem *ptr;
+		u32 *ptr;
 		int ret;
 
-		obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+		obj = i915_gem_alloc_object(ring->dev, 4096);
 		if (obj == NULL)
 			return -ENOMEM;
 
-		ret = i915_gem_object_pin(&obj->base, 4096);
+		ret = i915_gem_object_pin(obj, 4096, true);
 		if (ret) {
 			drm_gem_object_unreference(&obj->base);
 			return ret;
 		}
 
 		ptr = kmap(obj->pages[0]);
-		iowrite32(MI_BATCH_BUFFER_END, ptr);
-		iowrite32(MI_NOOP, ptr+1);
+		*ptr++ = MI_BATCH_BUFFER_END;
+		*ptr++ = MI_NOOP;
 		kunmap(obj->pages[0]);
 
-		ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+		ret = i915_gem_object_set_to_gtt_domain(obj, false);
 		if (ret) {
-			i915_gem_object_unpin(&obj->base);
+			i915_gem_object_unpin(obj);
 			drm_gem_object_unreference(&obj->base);
 			return ret;
 		}
@@ -916,51 +1202,44 @@
 		ring->private = obj;
 	}
 
-	return init_ring_common(dev, ring);
+	return init_ring_common(ring);
 }
 
-static void blt_ring_begin(struct drm_device *dev,
-			   struct intel_ring_buffer *ring,
+static int blt_ring_begin(struct intel_ring_buffer *ring,
 			  int num_dwords)
 {
 	if (ring->private) {
-		intel_ring_begin(dev, ring, num_dwords+2);
-		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
-		intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+		int ret = intel_ring_begin(ring, num_dwords+2);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+		return 0;
 	} else
-		intel_ring_begin(dev, ring, 4);
+		return intel_ring_begin(ring, 4);
 }
 
-static void blt_ring_flush(struct drm_device *dev,
-			   struct intel_ring_buffer *ring,
+static int blt_ring_flush(struct intel_ring_buffer *ring,
 			   u32 invalidate_domains,
 			   u32 flush_domains)
 {
-	blt_ring_begin(dev, ring, 4);
-	intel_ring_emit(dev, ring, MI_FLUSH_DW);
-	intel_ring_emit(dev, ring, 0);
-	intel_ring_emit(dev, ring, 0);
-	intel_ring_emit(dev, ring, 0);
-	intel_ring_advance(dev, ring);
-}
+	int ret;
 
-static u32
-blt_ring_add_request(struct drm_device *dev,
-		     struct intel_ring_buffer *ring,
-		     u32 flush_domains)
-{
-	u32 seqno = i915_gem_get_seqno(dev);
+	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
 
-	blt_ring_begin(dev, ring, 4);
-	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(dev, ring,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(dev, ring, seqno);
-	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
-	intel_ring_advance(dev, ring);
+	ret = blt_ring_begin(ring, 4);
+	if (ret)
+		return ret;
 
-	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-	return seqno;
+	intel_ring_emit(ring, MI_FLUSH_DW);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+	return 0;
 }
 
 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -981,47 +1260,56 @@
        .init			= blt_ring_init,
        .write_tail		= ring_write_tail,
        .flush			= blt_ring_flush,
-       .add_request		= blt_ring_add_request,
-       .get_seqno		= ring_status_page_get_seqno,
-       .user_irq_get		= blt_ring_get_user_irq,
-       .user_irq_put		= blt_ring_put_user_irq,
-       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+       .add_request		= gen6_add_request,
+       .get_seqno		= ring_get_seqno,
+       .irq_get			= blt_ring_get_irq,
+       .irq_put			= blt_ring_put_irq,
+       .dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
        .cleanup			= blt_ring_cleanup,
 };
 
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 
-	dev_priv->render_ring = render_ring;
-
-	if (!I915_NEED_GFX_HWS(dev)) {
-		dev_priv->render_ring.status_page.page_addr
-			= dev_priv->status_page_dmah->vaddr;
-		memset(dev_priv->render_ring.status_page.page_addr,
-				0, PAGE_SIZE);
+	*ring = render_ring;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ring->add_request = gen6_add_request;
+		ring->irq_get = gen6_render_ring_get_irq;
+		ring->irq_put = gen6_render_ring_put_irq;
+	} else if (IS_GEN5(dev)) {
+		ring->add_request = pc_render_add_request;
+		ring->get_seqno = pc_render_get_seqno;
 	}
 
-	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+	if (!I915_NEED_GFX_HWS(dev)) {
+		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+	}
+
+	return intel_init_ring_buffer(dev, ring);
 }
 
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
 
 	if (IS_GEN6(dev))
-		dev_priv->bsd_ring = gen6_bsd_ring;
+		*ring = gen6_bsd_ring;
 	else
-		dev_priv->bsd_ring = bsd_ring;
+		*ring = bsd_ring;
 
-	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+	return intel_init_ring_buffer(dev, ring);
 }
 
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 
-	dev_priv->blt_ring = gen6_blt_ring;
+	*ring = gen6_blt_ring;
 
-	return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+	return intel_init_ring_buffer(dev, ring);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d2cd0f1..be9087e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,40 @@
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
 
-struct  intel_hw_status_page {
-	void		*page_addr;
-	unsigned int	gfx_addr;
-	struct		drm_gem_object *obj;
+enum {
+    RCS = 0x0,
+    VCS,
+    BCS,
+    I915_NUM_RINGS,
 };
 
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
-#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
-#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
-#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+struct  intel_hw_status_page {
+	u32	__iomem	*page_addr;
+	unsigned int	gfx_addr;
+	struct		drm_i915_gem_object *obj;
+};
 
-struct drm_i915_gem_execbuffer2;
+#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+
+#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+
+#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
+
 struct  intel_ring_buffer {
 	const char	*name;
 	enum intel_ring_id {
@@ -25,45 +43,39 @@
 		RING_BLT = 0x4,
 	} id;
 	u32		mmio_base;
-	unsigned long	size;
 	void		*virtual_start;
 	struct		drm_device *dev;
-	struct		drm_gem_object *gem_object;
+	struct		drm_i915_gem_object *obj;
 
 	u32		actual_head;
 	u32		head;
 	u32		tail;
 	int		space;
+	int		size;
+	int		effective_size;
 	struct intel_hw_status_page status_page;
 
-	u32		irq_gem_seqno;		/* last seq seem at irq time */
-	u32		waiting_gem_seqno;
-	int		user_irq_refcount;
-	void		(*user_irq_get)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
-	void		(*user_irq_put)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
+	spinlock_t	irq_lock;
+	u32		irq_refcount;
+	u32		irq_mask;
+	u32		irq_seqno;		/* last seq seem at irq time */
+	u32		waiting_seqno;
+	u32		sync_seqno[I915_NUM_RINGS-1];
+	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
+	void		(*irq_put)(struct intel_ring_buffer *ring);
 
-	int		(*init)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
+	int		(*init)(struct intel_ring_buffer *ring);
 
-	void		(*write_tail)(struct drm_device *dev,
-				      struct intel_ring_buffer *ring,
+	void		(*write_tail)(struct intel_ring_buffer *ring,
 				      u32 value);
-	void		(*flush)(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			u32	invalidate_domains,
-			u32	flush_domains);
-	u32		(*add_request)(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			u32 flush_domains);
-	u32		(*get_seqno)(struct drm_device *dev,
-				     struct intel_ring_buffer *ring);
-	int		(*dispatch_gem_execbuffer)(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			struct drm_i915_gem_execbuffer2 *exec,
-			struct drm_clip_rect *cliprects,
-			uint64_t exec_offset);
+	int __must_check (*flush)(struct intel_ring_buffer *ring,
+				  u32	invalidate_domains,
+				  u32	flush_domains);
+	int		(*add_request)(struct intel_ring_buffer *ring,
+				       u32 *seqno);
+	u32		(*get_seqno)(struct intel_ring_buffer *ring);
+	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+					       u32 offset, u32 length);
 	void		(*cleanup)(struct intel_ring_buffer *ring);
 
 	/**
@@ -96,7 +108,7 @@
 	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
-	bool outstanding_lazy_request;
+	u32 outstanding_lazy_request;
 
 	wait_queue_head_t irq_queue;
 	drm_local_map_t map;
@@ -105,44 +117,54 @@
 };
 
 static inline u32
-intel_read_status_page(struct intel_ring_buffer *ring,
-		int reg)
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+		      struct intel_ring_buffer *other)
 {
-	u32 *regs = ring->status_page.page_addr;
-	return regs[reg];
+	int idx;
+
+	/*
+	 * cs -> 0 = vcs, 1 = bcs
+	 * vcs -> 0 = bcs, 1 = cs,
+	 * bcs -> 0 = cs, 1 = vcs.
+	 */
+
+	idx = (other - ring) - 1;
+	if (idx < 0)
+		idx += I915_NUM_RINGS;
+
+	return idx;
 }
 
-int intel_init_ring_buffer(struct drm_device *dev,
-			   struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct drm_device *dev,
-			       struct intel_ring_buffer *ring);
-int intel_wait_ring_buffer(struct drm_device *dev,
-			   struct intel_ring_buffer *ring, int n);
-void intel_ring_begin(struct drm_device *dev,
-		      struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
-				   struct intel_ring_buffer *ring,
-				   unsigned int data)
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+		       int reg)
 {
-	unsigned int *virt = ring->virtual_start + ring->tail;
-	*virt = data;
+	return ioread32(ring->status_page.page_addr + reg);
+}
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+				   u32 data)
+{
+	iowrite32(data, ring->virtual_start + ring->tail);
 	ring->tail += 4;
 }
 
-void intel_ring_advance(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
+void intel_ring_advance(struct intel_ring_buffer *ring);
 
-u32 intel_ring_get_seqno(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
+u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_sync(struct intel_ring_buffer *ring,
+		    struct intel_ring_buffer *to,
+		    u32 seqno);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
 int intel_init_blt_ring_buffer(struct drm_device *dev);
 
-u32 intel_ring_get_active_head(struct drm_device *dev,
-			       struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct drm_device *dev,
-				  struct intel_ring_buffer *ring);
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6bc42fa..45cd376 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1024,9 +1024,13 @@
 	if (!intel_sdvo_set_target_input(intel_sdvo))
 		return;
 
-	if (intel_sdvo->has_hdmi_monitor &&
-	    !intel_sdvo_set_avi_infoframe(intel_sdvo))
-		return;
+	if (intel_sdvo->has_hdmi_monitor) {
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+		intel_sdvo_set_colorimetry(intel_sdvo,
+					   SDVO_COLORIMETRY_RGB256);
+		intel_sdvo_set_avi_infoframe(intel_sdvo);
+	} else
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
 	if (intel_sdvo->is_tv &&
 	    !intel_sdvo_set_tv_format(intel_sdvo))
@@ -1045,7 +1049,9 @@
 
 	/* Set the SDVO control regs. */
 	if (INTEL_INFO(dev)->gen >= 4) {
-		sdvox = SDVO_BORDER_ENABLE;
+		sdvox = 0;
+		if (INTEL_INFO(dev)->gen < 5)
+			sdvox |= SDVO_BORDER_ENABLE;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 			sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1075,7 +1081,8 @@
 		sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
 	}
 
-	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+	    INTEL_INFO(dev)->gen < 5)
 		sdvox |= SDVO_STALL_SELECT;
 	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
 }
@@ -1395,6 +1402,9 @@
 
 	intel_sdvo->attached_output = response;
 
+	intel_sdvo->has_hdmi_monitor = false;
+	intel_sdvo->has_hdmi_audio = false;
+
 	if ((intel_sdvo_connector->output_flag & response) == 0)
 		ret = connector_status_disconnected;
 	else if (response & SDVO_TMDS_MASK)
@@ -1919,20 +1929,7 @@
 static bool
 intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
 {
-	int is_hdmi;
-
-	if (!intel_sdvo_check_supp_encode(intel_sdvo))
-		return false;
-
-	if (!intel_sdvo_set_target_output(intel_sdvo,
-					  device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
-		return false;
-
-	is_hdmi = 0;
-	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
-		return false;
-
-	return !!is_hdmi;
+	return intel_sdvo_check_supp_encode(intel_sdvo);
 }
 
 static u8
@@ -2034,12 +2031,7 @@
 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
 	if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
-		/* enable hdmi encoding mode if supported */
-		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
-		intel_sdvo_set_colorimetry(intel_sdvo,
-					   SDVO_COLORIMETRY_RGB256);
 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-
 		intel_sdvo->is_hdmi = true;
 	}
 	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f76819..93206e4 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@
 	int type;
 
 	/* Disable TV interrupts around load detect or we'll recurse */
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_disable_pipestat(dev_priv, 0,
+			      PIPE_HOTPLUG_INTERRUPT_ENABLE |
 			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	save_tv_dac = tv_dac = I915_READ(TV_DAC);
 	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@
 	I915_WRITE(TV_CTL, save_tv_ctl);
 
 	/* Restore interrupt config */
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, 0,
+			     PIPE_HOTPLUG_INTERRUPT_ENABLE |
 			     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	return type;
 }
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 72730e9..21d6c29 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,7 +10,7 @@
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EMBEDDED
 	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
-	select ACPI_VIDEO if ACPI
+	select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
 	help
 	  Choose this option for open-source nVidia support.
 
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d..e12c97f 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,27 +5,32 @@
 ccflags-y := -Iinclude/drm
 nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_object.o nouveau_irq.o nouveau_notifier.o \
-             nouveau_sgdma.o nouveau_dma.o \
+             nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
              nouveau_dp.o nouveau_ramht.o \
 	     nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
+	     nouveau_mm.o nouveau_vm.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv40_graph.o nv50_graph.o nvc0_graph.o \
-             nv40_grctx.o nv50_grctx.o \
+             nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+             nv84_crypt.o \
              nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
-             nv50_crtc.o nv50_dac.o nv50_sor.o \
-             nv50_cursor.o nv50_display.o nv50_fbcon.o \
+             nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
+             nv50_cursor.o nv50_display.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
-             nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+             nv04_crtc.o nv04_display.o nv04_cursor.o \
+             nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
              nv10_gpio.o nv50_gpio.o \
 	     nv50_calc.o \
-	     nv04_pm.o nv50_pm.o nva3_pm.o
+	     nv04_pm.o nv50_pm.o nva3_pm.o \
+	     nv50_vram.o nvc0_vram.o \
+	     nv50_vm.o nvc0_vm.o
 
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1191526..a542380 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -130,10 +130,15 @@
 
 static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
 {
-	if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+	/* easy option one - intel vendor ID means Integrated */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
 		return VGA_SWITCHEROO_IGD;
-	else
-		return VGA_SWITCHEROO_DIS;
+
+	/* is this device on Bus 0? - this may need improving */
+	if (pdev->bus->number == 0)
+		return VGA_SWITCHEROO_IGD;
+
+	return VGA_SWITCHEROO_DIS;
 }
 
 static struct vga_switcheroo_handler nouveau_dsm_handler = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index b14c811..d3a9c6e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -59,7 +59,7 @@
 	return 0;
 }
 
-static struct backlight_ops nv40_bl_ops = {
+static const struct backlight_ops nv40_bl_ops = {
 	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = nv40_get_intensity,
 	.update_status = nv40_set_intensity,
@@ -82,7 +82,7 @@
 	return 0;
 }
 
-static struct backlight_ops nv50_bl_ops = {
+static const struct backlight_ops nv50_bl_ops = {
 	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = nv50_get_intensity,
 	.update_status = nv50_set_intensity,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b229357..2aef5cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1927,7 +1927,7 @@
 	 * offset      (8  bit): opcode
 	 * offset + 1  (16 bit): time
 	 *
-	 * Sleep for "time" miliseconds.
+	 * Sleep for "time" milliseconds.
 	 */
 
 	unsigned time = ROM16(bios->data[offset + 1]);
@@ -1935,7 +1935,7 @@
 	if (!iexec->execute)
 		return 3;
 
-	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n",
+	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
 		offset, time);
 
 	msleep(time);
@@ -6053,52 +6053,17 @@
 	return entry;
 }
 
-static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
+static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
+				 int heads, int or)
 {
 	struct dcb_entry *entry = new_dcb_entry(dcb);
 
-	entry->type = 0;
+	entry->type = type;
 	entry->i2c_index = i2c;
 	entry->heads = heads;
-	entry->location = DCB_LOC_ON_CHIP;
-	entry->or = 1;
-}
-
-static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
-{
-	struct dcb_entry *entry = new_dcb_entry(dcb);
-
-	entry->type = 2;
-	entry->i2c_index = LEGACY_I2C_PANEL;
-	entry->heads = twoHeads ? 3 : 1;
-	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
-	entry->or = 1;	/* means |0x10 gets set on CRE_LCD__INDEX */
-	entry->duallink_possible = false; /* SiI164 and co. are single link */
-
-#if 0
-	/*
-	 * For dvi-a either crtc probably works, but my card appears to only
-	 * support dvi-d.  "nvidia" still attempts to program it for dvi-a,
-	 * doing the full fp output setup (program 0x6808.. fp dimension regs,
-	 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
-	 * the monitor picks up the mode res ok and lights up, but no pixel
-	 * data appears, so the board manufacturer probably connected up the
-	 * sync lines, but missed the video traces / components
-	 *
-	 * with this introduction, dvi-a left as an exercise for the reader.
-	 */
-	fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
-#endif
-}
-
-static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
-{
-	struct dcb_entry *entry = new_dcb_entry(dcb);
-
-	entry->type = 1;
-	entry->i2c_index = LEGACY_I2C_TV;
-	entry->heads = twoHeads ? 3 : 1;
-	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
+	if (type != OUTPUT_ANALOG)
+		entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+	entry->or = or;
 }
 
 static bool
@@ -6365,8 +6330,36 @@
 	return true;
 }
 
+static void
+fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+{
+	struct dcb_table *dcb = &bios->dcb;
+	int all_heads = (nv_two_heads(dev) ? 3 : 1);
+
+#ifdef __powerpc__
+	/* Apple iMac G4 NV17 */
+	if (of_machine_is_compatible("PowerMac4,5")) {
+		fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
+		fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
+		return;
+	}
+#endif
+
+	/* Make up some sane defaults */
+	fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+
+	if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+		fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+				     all_heads, 0);
+
+	else if (bios->tmds.output0_script_ptr ||
+		 bios->tmds.output1_script_ptr)
+		fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+				     all_heads, 1);
+}
+
 static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6379,7 @@
 
 	/* this situation likely means a really old card, pre DCB */
 	if (dcbptr == 0x0) {
-		NV_INFO(dev, "Assuming a CRT output exists\n");
-		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
-		if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
-			fabricate_tv_output(dcb, twoHeads);
-
+		fabricate_dcb_encoder_table(dev, bios);
 		return 0;
 	}
 
@@ -6451,21 +6439,7 @@
 		 */
 		NV_TRACEWARN(dev, "No useful information in BIOS output table; "
 				  "adding all possible outputs\n");
-		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
-		/*
-		 * Attempt to detect TV before DVI because the test
-		 * for the former is more accurate and it rules the
-		 * latter out.
-		 */
-		if (nv04_tv_identify(dev,
-				     bios->legacy.i2c_indices.tv) >= 0)
-			fabricate_tv_output(dcb, twoHeads);
-
-		else if (bios->tmds.output0_script_ptr ||
-			 bios->tmds.output1_script_ptr)
-			fabricate_dvi_i_output(dcb, twoHeads);
-
+		fabricate_dcb_encoder_table(dev, bios);
 		return 0;
 	}
 
@@ -6859,7 +6833,7 @@
 	if (ret)
 		return ret;
 
-	ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+	ret = parse_dcb_table(dev, bios);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c2..a7fae26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
 
 #include <linux/log2.h>
 #include <linux/slab.h>
@@ -46,82 +48,51 @@
 	if (unlikely(nvbo->gem))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
-	if (nvbo->tile)
-		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
-
+	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+	nouveau_vm_put(&nvbo->vma);
 	kfree(nvbo);
 }
 
 static void
-nouveau_bo_fixup_align(struct drm_device *dev,
-		       uint32_t tile_mode, uint32_t tile_flags,
-		       int *align, int *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
+		       int *page_shift)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
 
-	/*
-	 * Some of the tile_flags have a periodic structure of N*4096 bytes,
-	 * align to to that as well as the page size. Align the size to the
-	 * appropriate boundaries. This does imply that sizes are rounded up
-	 * 3-7 pages, so be aware of this and do not waste memory by allocating
-	 * many small buffers.
-	 */
-	if (dev_priv->card_type == NV_50) {
-		uint32_t block_size = dev_priv->vram_size >> 15;
-		int i;
-
-		switch (tile_flags) {
-		case 0x1800:
-		case 0x2800:
-		case 0x4800:
-		case 0x7a00:
-			if (is_power_of_2(block_size)) {
-				for (i = 1; i < 10; i++) {
-					*align = 12 * i * block_size;
-					if (!(*align % 65536))
-						break;
-				}
-			} else {
-				for (i = 1; i < 10; i++) {
-					*align = 8 * i * block_size;
-					if (!(*align % 65536))
-						break;
-				}
-			}
-			*size = roundup(*size, *align);
-			break;
-		default:
-			break;
-		}
-
-	} else {
-		if (tile_mode) {
+	if (dev_priv->card_type < NV_50) {
+		if (nvbo->tile_mode) {
 			if (dev_priv->chipset >= 0x40) {
 				*align = 65536;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x30) {
 				*align = 32768;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x20) {
 				*align = 16384;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x10) {
 				*align = 16384;
-				*size = roundup(*size, 32 * tile_mode);
+				*size = roundup(*size, 32 * nvbo->tile_mode);
 			}
 		}
+	} else {
+		if (likely(dev_priv->chan_vm)) {
+			if (*size > 256 * 1024)
+				*page_shift = dev_priv->chan_vm->lpg_shift;
+			else
+				*page_shift = dev_priv->chan_vm->spg_shift;
+		} else {
+			*page_shift = 12;
+		}
+
+		*size = roundup(*size, (1 << *page_shift));
+		*align = max((1 << *page_shift), *align);
 	}
 
-	/* ALIGN works only on powers of two. */
 	*size = roundup(*size, PAGE_SIZE);
-
-	if (dev_priv->card_type == NV_50) {
-		*size = roundup(*size, 65536);
-		*align = max(65536, *align);
-	}
 }
 
 int
@@ -132,7 +103,7 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_bo *nvbo;
-	int ret = 0;
+	int ret = 0, page_shift = 0;
 
 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 	if (!nvbo)
@@ -145,10 +116,18 @@
 	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &dev_priv->ttm.bdev;
 
-	nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
-			       &align, &size);
+	nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
 	align >>= PAGE_SHIFT;
 
+	if (!nvbo->no_vm && dev_priv->chan_vm) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+				     NV_MEM_ACCESS_RW, &nvbo->vma);
+		if (ret) {
+			kfree(nvbo);
+			return ret;
+		}
+	}
+
 	nouveau_bo_placement_set(nvbo, flags, 0);
 
 	nvbo->channel = chan;
@@ -161,6 +140,11 @@
 	}
 	nvbo->channel = NULL;
 
+	if (nvbo->vma.node) {
+		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+			nvbo->bo.offset = nvbo->vma.offset;
+	}
+
 	*pnvbo = nvbo;
 	return 0;
 }
@@ -244,7 +228,7 @@
 
 	nouveau_bo_placement_set(nvbo, memtype, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -280,7 +264,7 @@
 
 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -319,6 +303,25 @@
 		ttm_bo_kunmap(&nvbo->kmap);
 }
 
+int
+nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
+		    bool no_wait_reserve, bool no_wait_gpu)
+{
+	int ret;
+
+	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
+			      no_wait_reserve, no_wait_gpu);
+	if (ret)
+		return ret;
+
+	if (nvbo->vma.node) {
+		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+			nvbo->bo.offset = nvbo->vma.offset;
+	}
+
+	return 0;
+}
+
 u16
 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
 {
@@ -410,37 +413,40 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_VRAM:
-		man->func = &ttm_bo_manager_func;
+		if (dev_priv->card_type >= NV_50) {
+			man->func = &nouveau_vram_manager;
+			man->io_reserve_fastpath = false;
+			man->use_io_reserve_lru = true;
+		} else {
+			man->func = &ttm_bo_manager_func;
+		}
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_UNCACHED |
 					 TTM_PL_FLAG_WC;
 		man->default_caching = TTM_PL_FLAG_WC;
-		if (dev_priv->card_type == NV_50)
-			man->gpu_offset = 0x40000000;
-		else
-			man->gpu_offset = 0;
 		break;
 	case TTM_PL_TT:
 		man->func = &ttm_bo_manager_func;
 		switch (dev_priv->gart_info.type) {
 		case NOUVEAU_GART_AGP:
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-			man->available_caching = TTM_PL_FLAG_UNCACHED;
-			man->default_caching = TTM_PL_FLAG_UNCACHED;
+			man->available_caching = TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_WC;
+			man->default_caching = TTM_PL_FLAG_WC;
 			break;
 		case NOUVEAU_GART_SGDMA:
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
 				     TTM_MEMTYPE_FLAG_CMA;
 			man->available_caching = TTM_PL_MASK_CACHING;
 			man->default_caching = TTM_PL_FLAG_CACHED;
+			man->gpu_offset = dev_priv->gart_info.aper_base;
 			break;
 		default:
 			NV_ERROR(dev, "Unknown GART type: %d\n",
 				 dev_priv->gart_info.type);
 			return -EINVAL;
 		}
-		man->gpu_offset = dev_priv->vm_gart_base;
 		break;
 	default:
 		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +491,9 @@
 	if (ret)
 		return ret;
 
-	if (nvbo->channel) {
-		ret = nouveau_fence_sync(fence, nvbo->channel);
-		if (ret)
-			goto out;
-	}
-
 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
 					no_wait_reserve, no_wait_gpu, new_mem);
-out:
-	nouveau_fence_unref((void *)&fence);
+	nouveau_fence_unref(&fence);
 	return ret;
 }
 
@@ -516,6 +515,58 @@
 }
 
 static int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	u64 src_offset = old_mem->start << PAGE_SHIFT;
+	u64 dst_offset = new_mem->start << PAGE_SHIFT;
+	u32 page_count = new_mem->num_pages;
+	int ret;
+
+	if (!nvbo->no_vm) {
+		if (old_mem->mem_type == TTM_PL_VRAM)
+			src_offset  = nvbo->vma.offset;
+		else
+			src_offset += dev_priv->gart_info.aper_base;
+
+		if (new_mem->mem_type == TTM_PL_VRAM)
+			dst_offset  = nvbo->vma.offset;
+		else
+			dst_offset += dev_priv->gart_info.aper_base;
+	}
+
+	page_count = new_mem->num_pages;
+	while (page_count) {
+		int line_count = (page_count > 2047) ? 2047 : page_count;
+
+		ret = RING_SPACE(chan, 12);
+		if (ret)
+			return ret;
+
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+		OUT_RING  (chan, upper_32_bits(dst_offset));
+		OUT_RING  (chan, lower_32_bits(dst_offset));
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+		OUT_RING  (chan, upper_32_bits(src_offset));
+		OUT_RING  (chan, lower_32_bits(src_offset));
+		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* line_length */
+		OUT_RING  (chan, line_count);
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+		OUT_RING  (chan, 0x00100110);
+
+		page_count -= line_count;
+		src_offset += (PAGE_SIZE * line_count);
+		dst_offset += (PAGE_SIZE * line_count);
+	}
+
+	return 0;
+}
+
+static int
 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 {
@@ -529,14 +580,14 @@
 	dst_offset = new_mem->start << PAGE_SHIFT;
 	if (!nvbo->no_vm) {
 		if (old_mem->mem_type == TTM_PL_VRAM)
-			src_offset += dev_priv->vm_vram_base;
+			src_offset  = nvbo->vma.offset;
 		else
-			src_offset += dev_priv->vm_gart_base;
+			src_offset += dev_priv->gart_info.aper_base;
 
 		if (new_mem->mem_type == TTM_PL_VRAM)
-			dst_offset += dev_priv->vm_vram_base;
+			dst_offset  = nvbo->vma.offset;
 		else
-			dst_offset += dev_priv->vm_gart_base;
+			dst_offset += dev_priv->gart_info.aper_base;
 	}
 
 	ret = RING_SPACE(chan, 3);
@@ -683,17 +734,27 @@
 	int ret;
 
 	chan = nvbo->channel;
-	if (!chan || nvbo->no_vm)
+	if (!chan || nvbo->no_vm) {
 		chan = dev_priv->channel;
+		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+	}
 
 	if (dev_priv->card_type < NV_50)
 		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
 	else
+	if (dev_priv->card_type < NV_C0)
 		ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
-	if (ret)
-		return ret;
+	else
+		ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+	if (ret == 0) {
+		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
+						    no_wait_reserve,
+						    no_wait_gpu, new_mem);
+	}
 
-	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	if (chan == dev_priv->channel)
+		mutex_unlock(&chan->mutex);
+	return ret;
 }
 
 static int
@@ -771,7 +832,6 @@
 	struct drm_device *dev = dev_priv->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	uint64_t offset;
-	int ret;
 
 	if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
 		/* Nothing to do. */
@@ -781,18 +841,12 @@
 
 	offset = new_mem->start << PAGE_SHIFT;
 
-	if (dev_priv->card_type == NV_50) {
-		ret = nv50_mem_vm_bind_linear(dev,
-					      offset + dev_priv->vm_vram_base,
-					      new_mem->size,
-					      nouveau_bo_tile_layout(nvbo),
-					      offset);
-		if (ret)
-			return ret;
-
+	if (dev_priv->chan_vm) {
+		nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
 	} else if (dev_priv->card_type >= NV_10) {
 		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
-						nvbo->tile_mode);
+						nvbo->tile_mode,
+						nvbo->tile_flags);
 	}
 
 	return 0;
@@ -808,9 +862,7 @@
 
 	if (dev_priv->card_type >= NV_10 &&
 	    dev_priv->card_type < NV_50) {
-		if (*old_tile)
-			nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
-
+		nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
 		*old_tile = new_tile;
 	}
 }
@@ -879,6 +931,7 @@
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
 	struct drm_device *dev = dev_priv->dev;
+	int ret;
 
 	mem->bus.addr = NULL;
 	mem->bus.offset = 0;
@@ -901,9 +954,40 @@
 #endif
 		break;
 	case TTM_PL_VRAM:
-		mem->bus.offset = mem->start << PAGE_SHIFT;
+	{
+		struct nouveau_vram *vram = mem->mm_node;
+		u8 page_shift;
+
+		if (!dev_priv->bar1_vm) {
+			mem->bus.offset = mem->start << PAGE_SHIFT;
+			mem->bus.base = pci_resource_start(dev->pdev, 1);
+			mem->bus.is_iomem = true;
+			break;
+		}
+
+		if (dev_priv->card_type == NV_C0)
+			page_shift = vram->page_shift;
+		else
+			page_shift = 12;
+
+		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
+				     page_shift, NV_MEM_ACCESS_RW,
+				     &vram->bar_vma);
+		if (ret)
+			return ret;
+
+		nouveau_vm_map(&vram->bar_vma, vram);
+		if (ret) {
+			nouveau_vm_put(&vram->bar_vma);
+			return ret;
+		}
+
+		mem->bus.offset = vram->bar_vma.offset;
+		if (dev_priv->card_type == NV_50) /*XXX*/
+			mem->bus.offset -= 0x0020000000ULL;
 		mem->bus.base = pci_resource_start(dev->pdev, 1);
 		mem->bus.is_iomem = true;
+	}
 		break;
 	default:
 		return -EINVAL;
@@ -914,6 +998,17 @@
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+	struct nouveau_vram *vram = mem->mm_node;
+
+	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
+		return;
+
+	if (!vram->bar_vma.node)
+		return;
+
+	nouveau_vm_unmap(&vram->bar_vma);
+	nouveau_vm_put(&vram->bar_vma);
 }
 
 static int
@@ -939,7 +1034,23 @@
 	nvbo->placement.fpfn = 0;
 	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
 	nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
-	return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
+	return nouveau_bo_validate(nvbo, false, true, false);
+}
+
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+	struct nouveau_fence *old_fence;
+
+	if (likely(fence))
+		nouveau_fence_ref(fence);
+
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	old_fence = nvbo->bo.sync_obj;
+	nvbo->bo.sync_obj = fence;
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+	nouveau_fence_unref(&old_fence);
 }
 
 struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +1060,11 @@
 	.evict_flags = nouveau_bo_evict_flags,
 	.move = nouveau_bo_move,
 	.verify_access = nouveau_bo_verify_access,
-	.sync_obj_signaled = nouveau_fence_signalled,
-	.sync_obj_wait = nouveau_fence_wait,
-	.sync_obj_flush = nouveau_fence_flush,
-	.sync_obj_unref = nouveau_fence_unref,
-	.sync_obj_ref = nouveau_fence_ref,
+	.sync_obj_signaled = __nouveau_fence_signalled,
+	.sync_obj_wait = __nouveau_fence_wait,
+	.sync_obj_flush = __nouveau_fence_flush,
+	.sync_obj_unref = __nouveau_fence_unref,
+	.sync_obj_ref = __nouveau_fence_ref,
 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
 	.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e..3960d66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -38,23 +38,28 @@
 	int ret;
 
 	if (dev_priv->card_type >= NV_50) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
-					     dev_priv->vm_end, NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_AGP, &pushbuf);
+		if (dev_priv->card_type < NV_C0) {
+			ret = nouveau_gpuobj_dma_new(chan,
+						     NV_CLASS_DMA_IN_MEMORY, 0,
+						     (1ULL << 40),
+						     NV_MEM_ACCESS_RO,
+						     NV_MEM_TARGET_VM,
+						     &pushbuf);
+		}
 		chan->pushbuf_base = pb->bo.offset;
 	} else
 	if (pb->bo.mem.mem_type == TTM_PL_TT) {
-		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-						  dev_priv->gart_info.aper_size,
-						  NV_DMA_ACCESS_RO, &pushbuf,
-						  NULL);
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+					     dev_priv->gart_info.aper_size,
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_GART, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else
 	if (dev_priv->card_type != NV_04) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
 					     dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_VIDMEM, &pushbuf);
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_VRAM, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else {
 		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,17 +67,16 @@
 		 * VRAM.
 		 */
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     pci_resource_start(dev->pdev,
-					     1),
+					     pci_resource_start(dev->pdev, 1),
 					     dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_PCI, &pushbuf);
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_PCI, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	}
 
 	nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
 	nouveau_gpuobj_ref(NULL, &pushbuf);
-	return 0;
+	return ret;
 }
 
 static struct nouveau_bo *
@@ -100,6 +104,13 @@
 		return NULL;
 	}
 
+	ret = nouveau_bo_map(pushbuf);
+	if (ret) {
+		nouveau_bo_unpin(pushbuf);
+		nouveau_bo_ref(NULL, &pushbuf);
+		return NULL;
+	}
+
 	return pushbuf;
 }
 
@@ -107,74 +118,59 @@
 int
 nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 		      struct drm_file *file_priv,
-		      uint32_t vram_handle, uint32_t tt_handle)
+		      uint32_t vram_handle, uint32_t gart_handle)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_channel *chan;
-	int channel, user;
+	unsigned long flags;
 	int ret;
 
-	/*
-	 * Alright, here is the full story
-	 * Nvidia cards have multiple hw fifo contexts (praise them for that,
-	 * no complicated crash-prone context switches)
-	 * We allocate a new context for each app and let it write to it
-	 * directly (woo, full userspace command submission !)
-	 * When there are no more contexts, you lost
-	 */
-	for (channel = 0; channel < pfifo->channels; channel++) {
-		if (dev_priv->fifos[channel] == NULL)
-			break;
-	}
-
-	/* no more fifos. you lost. */
-	if (channel == pfifo->channels)
-		return -EINVAL;
-
-	dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
-					   GFP_KERNEL);
-	if (!dev_priv->fifos[channel])
+	/* allocate and lock channel structure */
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
 		return -ENOMEM;
-	chan = dev_priv->fifos[channel];
-	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
-	INIT_LIST_HEAD(&chan->fence.pending);
 	chan->dev = dev;
-	chan->id = channel;
 	chan->file_priv = file_priv;
 	chan->vram_handle = vram_handle;
-	chan->gart_handle = tt_handle;
+	chan->gart_handle = gart_handle;
 
-	NV_INFO(dev, "Allocating FIFO number %d\n", channel);
+	kref_init(&chan->ref);
+	atomic_set(&chan->users, 1);
+	mutex_init(&chan->mutex);
+	mutex_lock(&chan->mutex);
+
+	/* allocate hw channel id */
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
+		if (!dev_priv->channels.ptr[chan->id]) {
+			nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+	if (chan->id == pfifo->channels) {
+		mutex_unlock(&chan->mutex);
+		kfree(chan);
+		return -ENODEV;
+	}
+
+	NV_DEBUG(dev, "initialising channel %d\n", chan->id);
+	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+	INIT_LIST_HEAD(&chan->nvsw.flip);
+	INIT_LIST_HEAD(&chan->fence.pending);
 
 	/* Allocate DMA push buffer */
 	chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
 	if (!chan->pushbuf_bo) {
 		ret = -ENOMEM;
 		NV_ERROR(dev, "pushbuf %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	nouveau_dma_pre_init(chan);
-
-	/* Locate channel's user control regs */
-	if (dev_priv->card_type < NV_40)
-		user = NV03_USER(channel);
-	else
-	if (dev_priv->card_type < NV_50)
-		user = NV40_USER(channel);
-	else
-		user = NV50_USER(channel);
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
-								PAGE_SIZE);
-	if (!chan->user) {
-		NV_ERROR(dev, "ioremap of regs failed.\n");
-		nouveau_channel_free(chan);
-		return -ENOMEM;
-	}
 	chan->user_put = 0x40;
 	chan->user_get = 0x44;
 
@@ -182,15 +178,15 @@
 	ret = nouveau_notifier_init_channel(chan);
 	if (ret) {
 		NV_ERROR(dev, "ntfy %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	/* Setup channel's default objects */
-	ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+	ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
 	if (ret) {
 		NV_ERROR(dev, "gpuobj %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
@@ -198,24 +194,17 @@
 	ret = nouveau_channel_pushbuf_ctxdma_init(chan);
 	if (ret) {
 		NV_ERROR(dev, "pbctxdma %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	/* disable the fifo caches */
 	pfifo->reassign(dev, false);
 
-	/* Create a graphics context for new channel */
-	ret = pgraph->create_context(chan);
-	if (ret) {
-		nouveau_channel_free(chan);
-		return ret;
-	}
-
 	/* Construct inital RAMFC for new channel */
 	ret = pfifo->create_context(chan);
 	if (ret) {
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
@@ -225,83 +214,111 @@
 	if (!ret)
 		ret = nouveau_fence_channel_init(chan);
 	if (ret) {
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	nouveau_debugfs_channel_init(chan);
 
-	NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+	NV_DEBUG(dev, "channel %d initialised\n", chan->id);
 	*chan_ret = chan;
 	return 0;
 }
 
-/* stops a fifo */
-void
-nouveau_channel_free(struct nouveau_channel *chan)
+struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *ref)
 {
-	struct drm_device *dev = chan->dev;
+	struct nouveau_channel *chan = NULL;
+
+	if (likely(ref && atomic_inc_not_zero(&ref->users)))
+		nouveau_channel_ref(ref, &chan);
+
+	return chan;
+}
+
+struct nouveau_channel *
+nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+{
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan;
 	unsigned long flags;
-	int ret;
 
-	NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+	if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
+		return ERR_PTR(-EINVAL);
 
-	nouveau_debugfs_channel_fini(chan);
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
-	/* Give outstanding push buffers a chance to complete */
-	nouveau_fence_update(chan);
-	if (chan->fence.sequence != chan->fence.sequence_ack) {
-		struct nouveau_fence *fence = NULL;
+	if (unlikely(!chan))
+		return ERR_PTR(-EINVAL);
 
-		ret = nouveau_fence_new(chan, &fence, true);
-		if (ret == 0) {
-			ret = nouveau_fence_wait(fence, NULL, false, false);
-			nouveau_fence_unref((void *)&fence);
-		}
-
-		if (ret)
-			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+	if (unlikely(file_priv && chan->file_priv != file_priv)) {
+		nouveau_channel_put_unlocked(&chan);
+		return ERR_PTR(-EINVAL);
 	}
 
-	/* Ensure all outstanding fences are signaled.  They should be if the
+	mutex_lock(&chan->mutex);
+	return chan;
+}
+
+void
+nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan = *pchan;
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+	unsigned long flags;
+
+	/* decrement the refcount, and we're done if there's still refs */
+	if (likely(!atomic_dec_and_test(&chan->users))) {
+		nouveau_channel_ref(NULL, pchan);
+		return;
+	}
+
+	/* noone wants the channel anymore */
+	NV_DEBUG(dev, "freeing channel %d\n", chan->id);
+	nouveau_debugfs_channel_fini(chan);
+
+	/* give it chance to idle */
+	nouveau_channel_idle(chan);
+
+	/* ensure all outstanding fences are signaled.  they should be if the
 	 * above attempts at idling were OK, but if we failed this'll tell TTM
 	 * we're done with the buffers.
 	 */
 	nouveau_fence_channel_fini(chan);
 
-	/* This will prevent pfifo from switching channels. */
+	/* boot it off the hardware */
 	pfifo->reassign(dev, false);
 
-	/* We want to give pgraph a chance to idle and get rid of all potential
-	 * errors. We need to do this before the lock, otherwise the irq handler
-	 * is unable to process them.
+	/* We want to give pgraph a chance to idle and get rid of all
+	 * potential errors. We need to do this without the context
+	 * switch lock held, otherwise the irq handler is unable to
+	 * process them.
 	 */
 	if (pgraph->channel(dev) == chan)
 		nouveau_wait_for_idle(dev);
 
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
-	pgraph->fifo_access(dev, false);
-	if (pgraph->channel(dev) == chan)
-		pgraph->unload_context(dev);
-	pgraph->destroy_context(chan);
-	pgraph->fifo_access(dev, true);
-
-	if (pfifo->channel_id(dev) == chan->id) {
-		pfifo->disable(dev);
-		pfifo->unload_context(dev);
-		pfifo->enable(dev);
-	}
+	/* destroy the engine specific contexts */
 	pfifo->destroy_context(chan);
+	pgraph->destroy_context(chan);
+	if (pcrypt->destroy_context)
+		pcrypt->destroy_context(chan);
 
 	pfifo->reassign(dev, true);
 
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+	/* aside from its resources, the channel should now be dead,
+	 * remove it from the channel list
+	 */
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
-	/* Release the channel's resources */
+	/* destroy any resources the channel owned */
 	nouveau_gpuobj_ref(NULL, &chan->pushbuf);
 	if (chan->pushbuf_bo) {
 		nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +327,80 @@
 	}
 	nouveau_gpuobj_channel_takedown(chan);
 	nouveau_notifier_takedown_channel(chan);
-	if (chan->user)
-		iounmap(chan->user);
 
-	dev_priv->fifos[chan->id] = NULL;
+	nouveau_channel_ref(NULL, pchan);
+}
+
+void
+nouveau_channel_put(struct nouveau_channel **pchan)
+{
+	mutex_unlock(&(*pchan)->mutex);
+	nouveau_channel_put_unlocked(pchan);
+}
+
+static void
+nouveau_channel_del(struct kref *ref)
+{
+	struct nouveau_channel *chan =
+		container_of(ref, struct nouveau_channel, ref);
+
 	kfree(chan);
 }
 
+void
+nouveau_channel_ref(struct nouveau_channel *chan,
+		    struct nouveau_channel **pchan)
+{
+	if (chan)
+		kref_get(&chan->ref);
+
+	if (*pchan)
+		kref_put(&(*pchan)->ref, nouveau_channel_del);
+
+	*pchan = chan;
+}
+
+void
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_fence *fence = NULL;
+	int ret;
+
+	nouveau_fence_update(chan);
+
+	if (chan->fence.sequence != chan->fence.sequence_ack) {
+		ret = nouveau_fence_new(chan, &fence, true);
+		if (!ret) {
+			ret = nouveau_fence_wait(fence, false, false);
+			nouveau_fence_unref(&fence);
+		}
+
+		if (ret)
+			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+	}
+}
+
 /* cleans up all the fifos from file_priv */
 void
 nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_channel *chan;
 	int i;
 
 	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
 	for (i = 0; i < engine->fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		chan = nouveau_channel_get(dev, file_priv, i);
+		if (IS_ERR(chan))
+			continue;
 
-		if (chan && chan->file_priv == file_priv)
-			nouveau_channel_free(chan);
+		atomic_dec(&chan->users);
+		nouveau_channel_put(&chan);
 	}
 }
 
-int
-nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
-		      int channel)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-
-	if (channel >= engine->fifo.channels)
-		return 0;
-	if (dev_priv->fifos[channel] == NULL)
-		return 0;
-
-	return (dev_priv->fifos[channel]->file_priv == file_priv);
-}
 
 /***********************************
  * ioctls wrapping the functions
@@ -383,36 +436,44 @@
 	else
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
 
-	init->subchan[0].handle = NvM2MF;
-	if (dev_priv->card_type < NV_50)
-		init->subchan[0].grclass = 0x0039;
-	else
-		init->subchan[0].grclass = 0x5039;
-	init->subchan[1].handle = NvSw;
-	init->subchan[1].grclass = NV_SW;
-	init->nr_subchan = 2;
+	if (dev_priv->card_type < NV_C0) {
+		init->subchan[0].handle = NvM2MF;
+		if (dev_priv->card_type < NV_50)
+			init->subchan[0].grclass = 0x0039;
+		else
+			init->subchan[0].grclass = 0x5039;
+		init->subchan[1].handle = NvSw;
+		init->subchan[1].grclass = NV_SW;
+		init->nr_subchan = 2;
+	} else {
+		init->subchan[0].handle  = 0x9039;
+		init->subchan[0].grclass = 0x9039;
+		init->nr_subchan = 1;
+	}
 
 	/* Named memory object area */
 	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
 				    &init->notifier_handle);
-	if (ret) {
-		nouveau_channel_free(chan);
-		return ret;
-	}
 
-	return 0;
+	if (ret == 0)
+		atomic_inc(&chan->users); /* userspace reference */
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 static int
 nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
-	struct drm_nouveau_channel_free *cfree = data;
+	struct drm_nouveau_channel_free *req = data;
 	struct nouveau_channel *chan;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, req->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
-	nouveau_channel_free(chan);
+	atomic_dec(&chan->users);
+	nouveau_channel_put(&chan);
 	return 0;
 }
 
@@ -421,18 +482,18 @@
  ***********************************/
 
 struct drm_ioctl_desc nouveau_ioctls[] = {
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
 };
 
 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e..a21e000 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
 #include "nouveau_connector.h"
 #include "nouveau_hw.h"
 
+static void nouveau_connector_hotplug(void *, int);
+
 static struct nouveau_encoder *
 find_encoder_by_type(struct drm_connector *connector, int type)
 {
@@ -94,22 +96,30 @@
 }
 
 static void
-nouveau_connector_destroy(struct drm_connector *drm_connector)
+nouveau_connector_destroy(struct drm_connector *connector)
 {
-	struct nouveau_connector *nv_connector =
-		nouveau_connector(drm_connector);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_nouveau_private *dev_priv;
+	struct nouveau_gpio_engine *pgpio;
 	struct drm_device *dev;
 
 	if (!nv_connector)
 		return;
 
 	dev = nv_connector->base.dev;
+	dev_priv = dev->dev_private;
 	NV_DEBUG_KMS(dev, "\n");
 
+	pgpio = &dev_priv->engine.gpio;
+	if (pgpio->irq_unregister) {
+		pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+				      nouveau_connector_hotplug, connector);
+	}
+
 	kfree(nv_connector->edid);
-	drm_sysfs_connector_remove(drm_connector);
-	drm_connector_cleanup(drm_connector);
-	kfree(drm_connector);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
 }
 
 static struct nouveau_i2c_chan *
@@ -760,6 +770,7 @@
 {
 	const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
 	struct nouveau_connector *nv_connector = NULL;
 	struct dcb_connector_table_entry *dcb = NULL;
 	struct drm_connector *connector;
@@ -876,6 +887,11 @@
 		break;
 	}
 
+	if (pgpio->irq_register) {
+		pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+				    nouveau_connector_hotplug, connector);
+	}
+
 	drm_sysfs_connector_add(connector);
 	dcb->drm = connector;
 	return dcb->drm;
@@ -886,3 +902,29 @@
 	return ERR_PTR(ret);
 
 }
+
+static void
+nouveau_connector_hotplug(void *data, int plugged)
+{
+	struct drm_connector *connector = data;
+	struct drm_device *dev = connector->dev;
+
+	NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+		drm_get_connector_name(connector));
+
+	if (connector->encoder && connector->encoder->crtc &&
+	    connector->encoder->crtc->enabled) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
+		struct drm_encoder_helper_funcs *helper =
+			connector->encoder->helper_private;
+
+		if (nv_encoder->dcb->type == OUTPUT_DP) {
+			if (plugged)
+				helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+			else
+				helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+		}
+	}
+
+	drm_helper_hpd_irq_event(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd6..505c6bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
 #include "nouveau_drv.h"
 #include "nouveau_fb.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_hw.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
 
 static void
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@
 	.output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
+int
+nouveau_vblank_enable(struct drm_device *dev, int crtc)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
+			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
+	else
+		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
+			    NV_PCRTC_INTR_0_VBLANK);
+
+	return 0;
+}
+
+void
+nouveau_vblank_disable(struct drm_device *dev, int crtc)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
+			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
+	else
+		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
+			  struct nouveau_bo *new_bo)
+{
+	int ret;
+
+	ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+	if (ret)
+		goto fail;
+
+	ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+	if (ret)
+		goto fail_unreserve;
+
+	return 0;
+
+fail_unreserve:
+	ttm_bo_unreserve(&new_bo->bo);
+fail:
+	nouveau_bo_unpin(new_bo);
+	return ret;
+}
+
+static void
+nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
+			    struct nouveau_bo *new_bo,
+			    struct nouveau_fence *fence)
+{
+	nouveau_bo_fence(new_bo, fence);
+	ttm_bo_unreserve(&new_bo->bo);
+
+	nouveau_bo_fence(old_bo, fence);
+	ttm_bo_unreserve(&old_bo->bo);
+
+	nouveau_bo_unpin(old_bo);
+}
+
+static int
+nouveau_page_flip_emit(struct nouveau_channel *chan,
+		       struct nouveau_bo *old_bo,
+		       struct nouveau_bo *new_bo,
+		       struct nouveau_page_flip_state *s,
+		       struct nouveau_fence **pfence)
+{
+	struct drm_device *dev = chan->dev;
+	unsigned long flags;
+	int ret;
+
+	/* Queue it to the pending list */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_add_tail(&s->head, &chan->nvsw.flip);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* Synchronize with the old framebuffer */
+	ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+	if (ret)
+		goto fail;
+
+	/* Emit the pageflip */
+	ret = RING_SPACE(chan, 2);
+	if (ret)
+		goto fail;
+
+	BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+	OUT_RING(chan, 0);
+	FIRE_RING(chan);
+
+	ret = nouveau_fence_new(chan, pfence, true);
+	if (ret)
+		goto fail;
+
+	return 0;
+fail:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_del(&s->head);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return ret;
+}
+
+int
+nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		       struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+	struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+	struct nouveau_page_flip_state *s;
+	struct nouveau_channel *chan;
+	struct nouveau_fence *fence;
+	int ret;
+
+	if (dev_priv->engine.graph.accel_blocked)
+		return -ENODEV;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	/* Don't let the buffers go away while we flip */
+	ret = nouveau_page_flip_reserve(old_bo, new_bo);
+	if (ret)
+		goto fail_free;
+
+	/* Initialize a page flip struct */
+	*s = (struct nouveau_page_flip_state)
+		{ { }, s->event, nouveau_crtc(crtc)->index,
+		  fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+		  new_bo->bo.offset };
+
+	/* Choose the channel the flip will be handled in */
+	chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+	if (!chan)
+		chan = nouveau_channel_get_unlocked(dev_priv->channel);
+	mutex_lock(&chan->mutex);
+
+	/* Emit a page flip */
+	ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+	nouveau_channel_put(&chan);
+	if (ret)
+		goto fail_unreserve;
+
+	/* Update the crtc struct and cleanup */
+	crtc->fb = fb;
+
+	nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+	nouveau_fence_unref(&fence);
+	return 0;
+
+fail_unreserve:
+	nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+fail_free:
+	kfree(s);
+	return ret;
+}
+
+int
+nouveau_finish_page_flip(struct nouveau_channel *chan,
+			 struct nouveau_page_flip_state *ps)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_page_flip_state *s;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (list_empty(&chan->nvsw.flip)) {
+		NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EINVAL;
+	}
+
+	s = list_first_entry(&chan->nvsw.flip,
+			     struct nouveau_page_flip_state, head);
+	if (s->event) {
+		struct drm_pending_vblank_event *e = s->event;
+		struct timeval now;
+
+		do_gettimeofday(&now);
+		e->event.sequence = 0;
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+
+	list_del(&s->head);
+	*ps = *s;
+	kfree(s);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e6..65699bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -36,7 +36,7 @@
 	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct nouveau_bo *pushbuf = chan->pushbuf_bo;
 
-	if (dev_priv->card_type == NV_50) {
+	if (dev_priv->card_type >= NV_50) {
 		const int ib_size = pushbuf->bo.mem.size / 2;
 
 		chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
@@ -59,17 +59,26 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
 	int ret, i;
 
-	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
-	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
-				    0x0039 : 0x5039, &obj);
-	if (ret)
-		return ret;
+	if (dev_priv->card_type >= NV_C0) {
+		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
+		if (ret)
+			return ret;
 
-	ret = nouveau_ramht_insert(chan, NvM2MF, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
+		ret = RING_SPACE(chan, 2);
+		if (ret)
+			return ret;
+
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
+		OUT_RING  (chan, 0x00009039);
+		FIRE_RING (chan);
+		return 0;
+	}
+
+	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+	ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
+				    0x0039 : 0x5039);
 	if (ret)
 		return ret;
 
@@ -78,11 +87,6 @@
 	if (ret)
 		return ret;
 
-	/* Map push buffer */
-	ret = nouveau_bo_map(chan->pushbuf_bo);
-	if (ret)
-		return ret;
-
 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index d578c21..c36f176 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -77,7 +77,8 @@
 	/* G80+ display objects */
 	NvEvoVRAM	= 0x01000000,
 	NvEvoFB16	= 0x01000001,
-	NvEvoFB32	= 0x01000002
+	NvEvoFB32	= 0x01000002,
+	NvEvoVRAM_LP	= 0x01000003
 };
 
 #define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
@@ -125,6 +126,12 @@
 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
 
 static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+{
+	OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
 BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
 {
 	OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f30..38d5995 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@
 	struct bit_displayport_encoder_table *dpe;
 	int dpe_headerlen;
 	uint8_t config[4], status[3];
-	bool cr_done, cr_max_vs, eq_done;
+	bool cr_done, cr_max_vs, eq_done, hpd_state;
 	int ret = 0, i, tries, voltage;
 
 	NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@
 	/* disable hotplug detect, this flips around on some panels during
 	 * link training.
 	 */
-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+	hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
 
 	if (dpe->script0) {
 		NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@
 	}
 
 	/* re-enable hotplug detect */
-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
 
 	return eq_done;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9087549..13bb672 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@
 int nouveau_perflvl_wr;
 module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
 
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
+int nouveau_msi;
+module_param_named(msi, nouveau_msi, int, 0400);
+
 int nouveau_fbpercrtc;
 #if 0
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -167,6 +171,9 @@
 	if (pm_state.event == PM_EVENT_PRETHAW)
 		return 0;
 
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	NV_INFO(dev, "Disabling fbcon acceleration...\n");
 	nouveau_fbcon_save_disable_accel(dev);
 
@@ -193,23 +200,10 @@
 
 	NV_INFO(dev, "Idling channels...\n");
 	for (i = 0; i < pfifo->channels; i++) {
-		struct nouveau_fence *fence = NULL;
+		chan = dev_priv->channels.ptr[i];
 
-		chan = dev_priv->fifos[i];
-		if (!chan || (dev_priv->card_type >= NV_50 &&
-			      chan == dev_priv->fifos[0]))
-			continue;
-
-		ret = nouveau_fence_new(chan, &fence, true);
-		if (ret == 0) {
-			ret = nouveau_fence_wait(fence, NULL, false, false);
-			nouveau_fence_unref((void *)&fence);
-		}
-
-		if (ret) {
-			NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
-				 chan->id);
-		}
+		if (chan && chan->pushbuf_bo)
+			nouveau_channel_idle(chan);
 	}
 
 	pgraph->fifo_access(dev, false);
@@ -219,17 +213,17 @@
 	pfifo->unload_context(dev);
 	pgraph->unload_context(dev);
 
-	NV_INFO(dev, "Suspending GPU objects...\n");
-	ret = nouveau_gpuobj_suspend(dev);
+	ret = pinstmem->suspend(dev);
 	if (ret) {
 		NV_ERROR(dev, "... failed: %d\n", ret);
 		goto out_abort;
 	}
 
-	ret = pinstmem->suspend(dev);
+	NV_INFO(dev, "Suspending GPU objects...\n");
+	ret = nouveau_gpuobj_suspend(dev);
 	if (ret) {
 		NV_ERROR(dev, "... failed: %d\n", ret);
-		nouveau_gpuobj_suspend_cleanup(dev);
+		pinstmem->resume(dev);
 		goto out_abort;
 	}
 
@@ -263,6 +257,9 @@
 	struct drm_crtc *crtc;
 	int ret, i;
 
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	nouveau_fbcon_save_disable_accel(dev);
 
 	NV_INFO(dev, "We're back, enabling device...\n");
@@ -294,17 +291,18 @@
 		}
 	}
 
+	NV_INFO(dev, "Restoring GPU objects...\n");
+	nouveau_gpuobj_resume(dev);
+
 	NV_INFO(dev, "Reinitialising engines...\n");
 	engine->instmem.resume(dev);
 	engine->mc.init(dev);
 	engine->timer.init(dev);
 	engine->fb.init(dev);
 	engine->graph.init(dev);
+	engine->crypt.init(dev);
 	engine->fifo.init(dev);
 
-	NV_INFO(dev, "Restoring GPU objects...\n");
-	nouveau_gpuobj_resume(dev);
-
 	nouveau_irq_postinstall(dev);
 
 	/* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +311,7 @@
 		int j;
 
 		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			chan = dev_priv->fifos[i];
+			chan = dev_priv->channels.ptr[i];
 			if (!chan || !chan->pushbuf_bo)
 				continue;
 
@@ -347,13 +345,11 @@
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+		u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
 
-		nv_crtc->cursor.set_offset(nv_crtc,
-					nv_crtc->cursor.nvbo->bo.offset -
-					dev_priv->vm_vram_base);
-
+		nv_crtc->cursor.set_offset(nv_crtc, offset);
 		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
-			nv_crtc->cursor_saved_y);
+						 nv_crtc->cursor_saved_y);
 	}
 
 	/* Force CLUT to get re-loaded during modeset */
@@ -393,6 +389,9 @@
 	.irq_postinstall = nouveau_irq_postinstall,
 	.irq_uninstall = nouveau_irq_uninstall,
 	.irq_handler = nouveau_irq_handler,
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = nouveau_vblank_enable,
+	.disable_vblank = nouveau_vblank_disable,
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.ioctls = nouveau_ioctls,
 	.fops = {
@@ -403,6 +402,7 @@
 		.mmap = nouveau_ttm_mmap,
 		.poll = drm_poll,
 		.fasync = drm_fasync,
+		.read = drm_read,
 #if defined(CONFIG_COMPAT)
 		.compat_ioctl = nouveau_compat_ioctl,
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64..46e3257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,37 @@
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
 #include "nouveau_bios.h"
+#include "nouveau_util.h"
+
 struct nouveau_grctx;
+struct nouveau_vram;
+#include "nouveau_vm.h"
 
 #define MAX_NUM_DCB_ENTRIES 16
 
 #define NOUVEAU_MAX_CHANNEL_NR 128
 #define NOUVEAU_MAX_TILE_NR 15
 
-#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
-#define NV50_VM_BLOCK    (512*1024*1024ULL)
-#define NV50_VM_VRAM_NR  (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_vram {
+	struct drm_device *dev;
+
+	struct nouveau_vma bar_vma;
+	u8  page_shift;
+
+	struct list_head regions;
+	u32 memtype;
+	u64 offset;
+	u64 size;
+};
 
 struct nouveau_tile_reg {
-	struct nouveau_fence *fence;
-	uint32_t addr;
-	uint32_t size;
 	bool used;
+	uint32_t addr;
+	uint32_t limit;
+	uint32_t pitch;
+	uint32_t zcomp;
+	struct drm_mm_node *tag_mem;
+	struct nouveau_fence *fence;
 };
 
 struct nouveau_bo {
@@ -88,6 +103,7 @@
 
 	struct nouveau_channel *channel;
 
+	struct nouveau_vma vma;
 	bool mappable;
 	bool no_vm;
 
@@ -96,7 +112,6 @@
 	struct nouveau_tile_reg *tile;
 
 	struct drm_gem_object *gem;
-	struct drm_file *cpu_filp;
 	int pin_refcnt;
 };
 
@@ -133,20 +148,28 @@
 
 #define NVOBJ_ENGINE_SW		0
 #define NVOBJ_ENGINE_GR		1
-#define NVOBJ_ENGINE_DISPLAY	2
+#define NVOBJ_ENGINE_PPP	2
+#define NVOBJ_ENGINE_COPY	3
+#define NVOBJ_ENGINE_VP		4
+#define NVOBJ_ENGINE_CRYPT      5
+#define NVOBJ_ENGINE_BSP	6
+#define NVOBJ_ENGINE_DISPLAY	0xcafe0001
 #define NVOBJ_ENGINE_INT	0xdeadbeef
 
+#define NVOBJ_FLAG_DONT_MAP             (1 << 0)
 #define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
 #define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
+#define NVOBJ_FLAG_VM			(1 << 3)
+
+#define NVOBJ_CINST_GLOBAL	0xdeadbeef
+
 struct nouveau_gpuobj {
 	struct drm_device *dev;
 	struct kref refcount;
 	struct list_head list;
 
-	struct drm_mm_node *im_pramin;
-	struct nouveau_bo *im_backing;
-	uint32_t *im_backing_suspend;
-	int im_bound;
+	void *node;
+	u32 *suspend;
 
 	uint32_t flags;
 
@@ -162,10 +185,29 @@
 	void *priv;
 };
 
+struct nouveau_page_flip_state {
+	struct list_head head;
+	struct drm_pending_vblank_event *event;
+	int crtc, bpp, pitch, x, y;
+	uint64_t offset;
+};
+
+enum nouveau_channel_mutex_class {
+	NOUVEAU_UCHANNEL_MUTEX,
+	NOUVEAU_KCHANNEL_MUTEX
+};
+
 struct nouveau_channel {
 	struct drm_device *dev;
 	int id;
 
+	/* references to the channel data structure */
+	struct kref ref;
+	/* users of the hardware channel resources, the hardware
+	 * context will be kicked off when it reaches zero. */
+	atomic_t users;
+	struct mutex mutex;
+
 	/* owner of this fifo */
 	struct drm_file *file_priv;
 	/* mapping of the fifo itself */
@@ -198,16 +240,17 @@
 	/* PFIFO context */
 	struct nouveau_gpuobj *ramfc;
 	struct nouveau_gpuobj *cache;
+	void *fifo_priv;
 
 	/* PGRAPH context */
 	/* XXX may be merge 2 pointers as private data ??? */
 	struct nouveau_gpuobj *ramin_grctx;
+	struct nouveau_gpuobj *crypt_ctx;
 	void *pgraph_ctx;
 
 	/* NV50 VM */
+	struct nouveau_vm     *vm;
 	struct nouveau_gpuobj *vm_pd;
-	struct nouveau_gpuobj *vm_gart_pt;
-	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 
 	/* Objects */
 	struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +281,11 @@
 
 	struct {
 		struct nouveau_gpuobj *vblsem;
+		uint32_t vblsem_head;
 		uint32_t vblsem_offset;
 		uint32_t vblsem_rval;
 		struct list_head vbl_wait;
+		struct list_head flip;
 	} nvsw;
 
 	struct {
@@ -258,11 +303,11 @@
 	int	(*suspend)(struct drm_device *dev);
 	void	(*resume)(struct drm_device *dev);
 
-	int	(*populate)(struct drm_device *, struct nouveau_gpuobj *,
-			    uint32_t *size);
-	void	(*clear)(struct drm_device *, struct nouveau_gpuobj *);
-	int	(*bind)(struct drm_device *, struct nouveau_gpuobj *);
-	int	(*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+	int	(*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+	void	(*put)(struct nouveau_gpuobj *);
+	int	(*map)(struct nouveau_gpuobj *);
+	void	(*unmap)(struct nouveau_gpuobj *);
+
 	void	(*flush)(struct drm_device *);
 };
 
@@ -279,15 +324,21 @@
 
 struct nouveau_fb_engine {
 	int num_tiles;
+	struct drm_mm tag_heap;
+	void *priv;
 
 	int  (*init)(struct drm_device *dev);
 	void (*takedown)(struct drm_device *dev);
 
-	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
-				 uint32_t size, uint32_t pitch);
+	void (*init_tile_region)(struct drm_device *dev, int i,
+				 uint32_t addr, uint32_t size,
+				 uint32_t pitch, uint32_t flags);
+	void (*set_tile_region)(struct drm_device *dev, int i);
+	void (*free_tile_region)(struct drm_device *dev, int i);
 };
 
 struct nouveau_fifo_engine {
+	void *priv;
 	int  channels;
 
 	struct nouveau_gpuobj *playlist[2];
@@ -310,22 +361,11 @@
 	void (*tlb_flush)(struct drm_device *dev);
 };
 
-struct nouveau_pgraph_object_method {
-	int id;
-	int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
-		      uint32_t data);
-};
-
-struct nouveau_pgraph_object_class {
-	int id;
-	bool software;
-	struct nouveau_pgraph_object_method *methods;
-};
-
 struct nouveau_pgraph_engine {
-	struct nouveau_pgraph_object_class *grclass;
 	bool accel_blocked;
+	bool registered;
 	int grctx_size;
+	void *priv;
 
 	/* NV2x/NV3x context table (0x400780) */
 	struct nouveau_gpuobj *ctx_table;
@@ -342,8 +382,7 @@
 	int  (*unload_context)(struct drm_device *);
 	void (*tlb_flush)(struct drm_device *dev);
 
-	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
-				  uint32_t size, uint32_t pitch);
+	void (*set_tile_region)(struct drm_device *dev, int i);
 };
 
 struct nouveau_display_engine {
@@ -355,13 +394,19 @@
 };
 
 struct nouveau_gpio_engine {
+	void *priv;
+
 	int  (*init)(struct drm_device *);
 	void (*takedown)(struct drm_device *);
 
 	int  (*get)(struct drm_device *, enum dcb_gpio_tag);
 	int  (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
 
-	void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+	int  (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+			     void (*)(void *, int), void *);
+	void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+			       void (*)(void *, int), void *);
+	bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
 };
 
 struct nouveau_pm_voltage_level {
@@ -437,6 +482,7 @@
 	struct nouveau_pm_level *cur;
 
 	struct device *hwmon;
+	struct notifier_block acpi_nb;
 
 	int (*clock_get)(struct drm_device *, u32 id);
 	void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +495,25 @@
 	int (*temp_get)(struct drm_device *);
 };
 
+struct nouveau_crypt_engine {
+	bool registered;
+
+	int  (*init)(struct drm_device *);
+	void (*takedown)(struct drm_device *);
+	int  (*create_context)(struct nouveau_channel *);
+	void (*destroy_context)(struct nouveau_channel *);
+	void (*tlb_flush)(struct drm_device *dev);
+};
+
+struct nouveau_vram_engine {
+	int  (*init)(struct drm_device *);
+	int  (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
+		    u32 type, struct nouveau_vram **);
+	void (*put)(struct drm_device *, struct nouveau_vram **);
+
+	bool (*flags_valid)(struct drm_device *, u32 tile_flags);
+};
+
 struct nouveau_engine {
 	struct nouveau_instmem_engine instmem;
 	struct nouveau_mc_engine      mc;
@@ -459,6 +524,8 @@
 	struct nouveau_display_engine display;
 	struct nouveau_gpio_engine    gpio;
 	struct nouveau_pm_engine      pm;
+	struct nouveau_crypt_engine   crypt;
+	struct nouveau_vram_engine    vram;
 };
 
 struct nouveau_pll_vals {
@@ -577,18 +644,15 @@
 	bool ramin_available;
 	struct drm_mm ramin_heap;
 	struct list_head gpuobj_list;
+	struct list_head classes;
 
 	struct nouveau_bo *vga_ram;
 
+	/* interrupt handling */
+	void (*irq_handler[32])(struct drm_device *);
+	bool msi_enabled;
 	struct workqueue_struct *wq;
 	struct work_struct irq_work;
-	struct work_struct hpd_work;
-
-	struct {
-		spinlock_t lock;
-		uint32_t hpd0_bits;
-		uint32_t hpd1_bits;
-	} hpd_state;
 
 	struct list_head vbl_waiting;
 
@@ -605,8 +669,10 @@
 		struct nouveau_bo *bo;
 	} fence;
 
-	int fifo_alloc_count;
-	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+	struct {
+		spinlock_t lock;
+		struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
+	} channels;
 
 	struct nouveau_engine engine;
 	struct nouveau_channel *channel;
@@ -632,12 +698,14 @@
 		uint64_t aper_free;
 
 		struct nouveau_gpuobj *sg_ctxdma;
-		struct page *sg_dummy_page;
-		dma_addr_t sg_dummy_bus;
+		struct nouveau_vma vma;
 	} gart_info;
 
 	/* nv10-nv40 tiling regions */
-	struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
+	struct {
+		struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+		spinlock_t lock;
+	} tile;
 
 	/* VRAM/fb configuration */
 	uint64_t vram_size;
@@ -650,14 +718,12 @@
 	uint64_t fb_aper_free;
 	int fb_mtrr;
 
+	/* BAR control (NV50-) */
+	struct nouveau_vm *bar1_vm;
+	struct nouveau_vm *bar3_vm;
+
 	/* G8x/G9x virtual address space */
-	uint64_t vm_gart_base;
-	uint64_t vm_gart_size;
-	uint64_t vm_vram_base;
-	uint64_t vm_vram_size;
-	uint64_t vm_end;
-	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
-	int vm_vram_pt_nr;
+	struct nouveau_vm *chan_vm;
 
 	struct nvbios vbios;
 
@@ -674,6 +740,7 @@
 	struct backlight_device *backlight;
 
 	struct nouveau_channel *evo;
+	u32 evo_alloc;
 	struct {
 		struct dcb_entry *dcb;
 		u16 script;
@@ -686,6 +753,8 @@
 
 	struct nouveau_fbdev *nfbdev;
 	struct apertures_struct *apertures;
+
+	bool powered_down;
 };
 
 static inline struct drm_nouveau_private *
@@ -719,16 +788,6 @@
 	return 0;
 }
 
-#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do {    \
-	struct drm_nouveau_private *nv = dev->dev_private;       \
-	if (!nouveau_channel_owner(dev, (cl), (id))) {           \
-		NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
-			 DRM_CURRENTPID, (id));                  \
-		return -EPERM;                                   \
-	}                                                        \
-	(ch) = nv->fifos[(id)];                                  \
-} while (0)
-
 /* nouveau_drv.c */
 extern int nouveau_agpmode;
 extern int nouveau_duallink;
@@ -748,6 +807,7 @@
 extern int nouveau_override_conntype;
 extern char *nouveau_perflvl;
 extern int nouveau_perflvl_wr;
+extern int nouveau_msi;
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +822,10 @@
 				   struct drm_file *);
 extern int  nouveau_ioctl_setparam(struct drm_device *, void *data,
 				   struct drm_file *);
-extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
-			       uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
+			    uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
+			    uint32_t reg, uint32_t mask, uint32_t val);
 extern bool nouveau_wait_for_idle(struct drm_device *);
 extern int  nouveau_card_init(struct drm_device *);
 
@@ -775,18 +837,18 @@
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern int  nouveau_mem_reset_agp(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
-extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
-						    uint32_t addr,
-						    uint32_t size,
-						    uint32_t pitch);
-extern void nv10_mem_expire_tiling(struct drm_device *dev,
-				   struct nouveau_tile_reg *tile,
-				   struct nouveau_fence *fence);
-extern int  nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
-				    uint32_t size, uint32_t flags,
-				    uint64_t phys);
-extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
-			       uint32_t size);
+extern int  nouveau_mem_detect(struct drm_device *);
+extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+	struct drm_device *dev, uint32_t addr, uint32_t size,
+	uint32_t pitch, uint32_t flags);
+extern void nv10_mem_put_tile_region(struct drm_device *dev,
+				     struct nouveau_tile_reg *tile,
+				     struct nouveau_fence *fence);
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+
+/* nvc0_vram.c */
+extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
 
 /* nouveau_notifier.c */
 extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -803,21 +865,44 @@
 extern struct drm_ioctl_desc nouveau_ioctls[];
 extern int nouveau_max_ioctl;
 extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int  nouveau_channel_owner(struct drm_device *, struct drm_file *,
-				  int channel);
 extern int  nouveau_channel_alloc(struct drm_device *dev,
 				  struct nouveau_channel **chan,
 				  struct drm_file *file_priv,
 				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern void nouveau_channel_free(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
+extern void nouveau_channel_put(struct nouveau_channel **);
+extern void nouveau_channel_ref(struct nouveau_channel *chan,
+				struct nouveau_channel **pchan);
+extern void nouveau_channel_idle(struct nouveau_channel *chan);
 
 /* nouveau_object.c */
+#define NVOBJ_CLASS(d,c,e) do {                                                \
+	int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e);        \
+	if (ret)                                                               \
+		return ret;                                                    \
+} while(0)
+
+#define NVOBJ_MTHD(d,c,m,e) do {                                               \
+	int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e));                 \
+	if (ret)                                                               \
+		return ret;                                                    \
+} while(0)
+
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
 extern int  nouveau_gpuobj_init(struct drm_device *);
 extern void nouveau_gpuobj_takedown(struct drm_device *);
 extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
-extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
 extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int  nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
+extern int  nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
+				    int (*exec)(struct nouveau_channel *,
+					        u32 class, u32 mthd, u32 data));
+extern int  nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
+extern int  nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
 extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
 				       uint32_t vram_h, uint32_t tt_h);
 extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +917,25 @@
 extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
 				  uint64_t offset, uint64_t size, int access,
 				  int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
-				       uint64_t offset, uint64_t size,
-				       int access, struct nouveau_gpuobj **,
-				       uint32_t *o_ret);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
-				 struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
-				 struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
+extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
+			       u64 size, int target, int access, u32 type,
+			       u32 comp, struct nouveau_gpuobj **pobj);
+extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
+				 int class, u64 base, u64 size, int target,
+				 int access, u32 type, u32 comp);
 extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
 				     struct drm_file *);
 extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
 				     struct drm_file *);
 
 /* nouveau_irq.c */
+extern int         nouveau_irq_init(struct drm_device *);
+extern void        nouveau_irq_fini(struct drm_device *);
 extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void        nouveau_irq_register(struct drm_device *, int status_bit,
+					void (*)(struct drm_device *));
+extern void        nouveau_irq_unregister(struct drm_device *, int status_bit);
 extern void        nouveau_irq_preinstall(struct drm_device *);
 extern int         nouveau_irq_postinstall(struct drm_device *);
 extern void        nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +943,8 @@
 /* nouveau_sgdma.c */
 extern int nouveau_sgdma_init(struct drm_device *);
 extern void nouveau_sgdma_takedown(struct drm_device *);
-extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
-				  uint32_t *page);
+extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
+					   uint32_t offset);
 extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
 
 /* nouveau_debugfs.c */
@@ -966,18 +1055,25 @@
 /* nv10_fb.c */
 extern int  nv10_fb_init(struct drm_device *);
 extern void nv10_fb_takedown(struct drm_device *);
-extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
-				      uint32_t, uint32_t);
+extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+				     uint32_t addr, uint32_t size,
+				     uint32_t pitch, uint32_t flags);
+extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
+extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
 
 /* nv30_fb.c */
 extern int  nv30_fb_init(struct drm_device *);
 extern void nv30_fb_takedown(struct drm_device *);
+extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
+				     uint32_t addr, uint32_t size,
+				     uint32_t pitch, uint32_t flags);
+extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
 
 /* nv40_fb.c */
 extern int  nv40_fb_init(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
-extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
-				      uint32_t, uint32_t);
+extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
+
 /* nv50_fb.c */
 extern int  nv50_fb_init(struct drm_device *);
 extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1085,7 @@
 
 /* nv04_fifo.c */
 extern int  nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_fini(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1095,18 @@
 extern void nv04_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv04_fifo_load_context(struct nouveau_channel *);
 extern int  nv04_fifo_unload_context(struct drm_device *);
+extern void nv04_fifo_isr(struct drm_device *);
 
 /* nv10_fifo.c */
 extern int  nv10_fifo_init(struct drm_device *);
 extern int  nv10_fifo_channel_id(struct drm_device *);
 extern int  nv10_fifo_create_context(struct nouveau_channel *);
-extern void nv10_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv10_fifo_load_context(struct nouveau_channel *);
 extern int  nv10_fifo_unload_context(struct drm_device *);
 
 /* nv40_fifo.c */
 extern int  nv40_fifo_init(struct drm_device *);
 extern int  nv40_fifo_create_context(struct nouveau_channel *);
-extern void nv40_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv40_fifo_load_context(struct nouveau_channel *);
 extern int  nv40_fifo_unload_context(struct drm_device *);
 
@@ -1038,7 +1134,6 @@
 extern int  nvc0_fifo_unload_context(struct drm_device *);
 
 /* nv04_graph.c */
-extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
 extern int  nv04_graph_init(struct drm_device *);
 extern void nv04_graph_takedown(struct drm_device *);
 extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1142,11 @@
 extern void nv04_graph_destroy_context(struct nouveau_channel *);
 extern int  nv04_graph_load_context(struct nouveau_channel *);
 extern int  nv04_graph_unload_context(struct drm_device *);
-extern void nv04_graph_context_switch(struct drm_device *);
+extern int  nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+				      u32 class, u32 mthd, u32 data);
+extern struct nouveau_bitfield nv04_graph_nsource[];
 
 /* nv10_graph.c */
-extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
 extern int  nv10_graph_init(struct drm_device *);
 extern void nv10_graph_takedown(struct drm_device *);
 extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1154,11 @@
 extern void nv10_graph_destroy_context(struct nouveau_channel *);
 extern int  nv10_graph_load_context(struct nouveau_channel *);
 extern int  nv10_graph_unload_context(struct drm_device *);
-extern void nv10_graph_context_switch(struct drm_device *);
-extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
+extern struct nouveau_bitfield nv10_graph_intr[];
+extern struct nouveau_bitfield nv10_graph_nstatus[];
 
 /* nv20_graph.c */
-extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
-extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
 extern int  nv20_graph_create_context(struct nouveau_channel *);
 extern void nv20_graph_destroy_context(struct nouveau_channel *);
 extern int  nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1166,9 @@
 extern int  nv20_graph_init(struct drm_device *);
 extern void nv20_graph_takedown(struct drm_device *);
 extern int  nv30_graph_init(struct drm_device *);
-extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
 
 /* nv40_graph.c */
-extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
 extern int  nv40_graph_init(struct drm_device *);
 extern void nv40_graph_takedown(struct drm_device *);
 extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1177,9 @@
 extern int  nv40_graph_load_context(struct nouveau_channel *);
 extern int  nv40_graph_unload_context(struct drm_device *);
 extern void nv40_grctx_init(struct nouveau_grctx *);
-extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
 
 /* nv50_graph.c */
-extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
 extern int  nv50_graph_init(struct drm_device *);
 extern void nv50_graph_takedown(struct drm_device *);
 extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,10 +1188,10 @@
 extern void nv50_graph_destroy_context(struct nouveau_channel *);
 extern int  nv50_graph_load_context(struct nouveau_channel *);
 extern int  nv50_graph_unload_context(struct drm_device *);
-extern void nv50_graph_context_switch(struct drm_device *);
 extern int  nv50_grctx_init(struct nouveau_grctx *);
 extern void nv50_graph_tlb_flush(struct drm_device *dev);
 extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern struct nouveau_enum nv50_data_error_names[];
 
 /* nvc0_graph.c */
 extern int  nvc0_graph_init(struct drm_device *);
@@ -1113,16 +1203,22 @@
 extern int  nvc0_graph_load_context(struct nouveau_channel *);
 extern int  nvc0_graph_unload_context(struct drm_device *);
 
+/* nv84_crypt.c */
+extern int  nv84_crypt_init(struct drm_device *dev);
+extern void nv84_crypt_fini(struct drm_device *dev);
+extern int  nv84_crypt_create_context(struct nouveau_channel *);
+extern void nv84_crypt_destroy_context(struct nouveau_channel *);
+extern void nv84_crypt_tlb_flush(struct drm_device *dev);
+
 /* nv04_instmem.c */
 extern int  nv04_instmem_init(struct drm_device *);
 extern void nv04_instmem_takedown(struct drm_device *);
 extern int  nv04_instmem_suspend(struct drm_device *);
 extern void nv04_instmem_resume(struct drm_device *);
-extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv04_instmem_put(struct nouveau_gpuobj *);
+extern int  nv04_instmem_map(struct nouveau_gpuobj *);
+extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
 extern void nv04_instmem_flush(struct drm_device *);
 
 /* nv50_instmem.c */
@@ -1130,26 +1226,18 @@
 extern void nv50_instmem_takedown(struct drm_device *);
 extern int  nv50_instmem_suspend(struct drm_device *);
 extern void nv50_instmem_resume(struct drm_device *);
-extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv50_instmem_put(struct nouveau_gpuobj *);
+extern int  nv50_instmem_map(struct nouveau_gpuobj *);
+extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
 extern void nv50_instmem_flush(struct drm_device *);
 extern void nv84_instmem_flush(struct drm_device *);
-extern void nv50_vm_flush(struct drm_device *, int engine);
 
 /* nvc0_instmem.c */
 extern int  nvc0_instmem_init(struct drm_device *);
 extern void nvc0_instmem_takedown(struct drm_device *);
 extern int  nvc0_instmem_suspend(struct drm_device *);
 extern void nvc0_instmem_resume(struct drm_device *);
-extern int  nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nvc0_instmem_flush(struct drm_device *);
 
 /* nv04_mc.c */
 extern int  nv04_mc_init(struct drm_device *);
@@ -1219,6 +1307,9 @@
 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+			       bool no_wait_reserve, bool no_wait_gpu);
 
 /* nouveau_fence.c */
 struct nouveau_fence;
@@ -1234,12 +1325,35 @@
 			       void (*work)(void *priv, bool signalled),
 			       void *priv);
 struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-extern bool nouveau_fence_signalled(void *obj, void *arg);
-extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+
+extern bool __nouveau_fence_signalled(void *obj, void *arg);
+extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int __nouveau_fence_flush(void *obj, void *arg);
+extern void __nouveau_fence_unref(void **obj);
+extern void *__nouveau_fence_ref(void *obj);
+
+static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_signalled(obj, NULL);
+}
+static inline int
+nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
+{
+	return __nouveau_fence_wait(obj, NULL, lazy, intr);
+}
 extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-extern int nouveau_fence_flush(void *obj, void *arg);
-extern void nouveau_fence_unref(void **obj);
-extern void *nouveau_fence_ref(void *obj);
+static inline int nouveau_fence_flush(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_flush(obj, NULL);
+}
+static inline void nouveau_fence_unref(struct nouveau_fence **obj)
+{
+	__nouveau_fence_unref((void **)obj);
+}
+static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_ref(obj);
+}
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1373,28 @@
 extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
 				  struct drm_file *);
 
+/* nouveau_display.c */
+int nouveau_vblank_enable(struct drm_device *dev, int crtc);
+void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			   struct drm_pending_vblank_event *event);
+int nouveau_finish_page_flip(struct nouveau_channel *,
+			     struct nouveau_page_flip_state *);
+
 /* nv10_gpio.c */
 int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
 
 /* nv50_gpio.c */
 int nv50_gpio_init(struct drm_device *dev);
+void nv50_gpio_fini(struct drm_device *dev);
 int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+int  nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+			    void (*)(void *, int), void *);
+void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+			      void (*)(void *, int), void *);
+bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
 
 /* nv50_calc. */
 int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1461,9 @@
 }
 
 #define nv_wait(dev, reg, mask, val) \
-	nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+	nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_ne(dev, reg, mask, val) \
+	nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
 
 /* PRAMIN access */
 static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1576,23 @@
 		dev->pdev->subsystem_device == sub_device;
 }
 
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO  1
+#define NV_MEM_ACCESS_WO  2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM  8
+
+#define NV_MEM_TARGET_VRAM        0
+#define NV_MEM_TARGET_PCI         1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM          3
+#define NV_MEM_TARGET_GART        4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+/* NV_SW object class */
 #define NV_SW                                                        0x0000506e
 #define NV_SW_DMA_SEMAPHORE                                          0x00000060
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
@@ -1457,5 +1603,6 @@
 #define NV_SW_VBLSEM_OFFSET                                          0x00000400
 #define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
 #define NV_SW_VBLSEM_RELEASE                                         0x00000408
+#define NV_SW_PAGE_FLIP                                              0x00000500
 
 #endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1f..6d56a54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,102 @@
 #include "nouveau_fbcon.h"
 #include "nouveau_dma.h"
 
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_fillrect(info, rect);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_fillrect(info, rect);
+		else
+			ret = nvc0_fbcon_fillrect(info, rect);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_copyarea(info, image);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_copyarea(info, image);
+		else
+			ret = nvc0_fbcon_copyarea(info, image);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_imageblit(info, image);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_imageblit(info, image);
+		else
+			ret = nvc0_fbcon_imageblit(info, image);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_imageblit(info, image);
+}
+
 static int
 nouveau_fbcon_sync(struct fb_info *info)
 {
@@ -58,22 +154,36 @@
 	struct nouveau_channel *chan = dev_priv->channel;
 	int ret, i;
 
-	if (!chan || !chan->accel_done ||
+	if (!chan || !chan->accel_done || in_interrupt() ||
 	    info->state != FBINFO_STATE_RUNNING ||
 	    info->flags & FBINFO_HWACCEL_DISABLED)
 		return 0;
 
-	if (RING_SPACE(chan, 4)) {
+	if (!mutex_trylock(&chan->mutex))
+		return 0;
+
+	ret = RING_SPACE(chan, 4);
+	if (ret) {
+		mutex_unlock(&chan->mutex);
 		nouveau_fbcon_gpu_lockup(info);
 		return 0;
 	}
 
-	BEGIN_RING(chan, 0, 0x0104, 1);
-	OUT_RING(chan, 0);
-	BEGIN_RING(chan, 0, 0x0100, 1);
-	OUT_RING(chan, 0);
+	if (dev_priv->card_type >= NV_C0) {
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
+		OUT_RING  (chan, 0);
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
+		OUT_RING  (chan, 0);
+	} else {
+		BEGIN_RING(chan, 0, 0x0104, 1);
+		OUT_RING  (chan, 0);
+		BEGIN_RING(chan, 0, 0x0100, 1);
+		OUT_RING  (chan, 0);
+	}
+
 	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
 	FIRE_RING(chan);
+	mutex_unlock(&chan->mutex);
 
 	ret = -EBUSY;
 	for (i = 0; i < 100000; i++) {
@@ -97,40 +207,24 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = nouveau_fbcon_fillrect,
+	.fb_copyarea = nouveau_fbcon_copyarea,
+	.fb_imageblit = nouveau_fbcon_imageblit,
+	.fb_sync = nouveau_fbcon_sync,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static struct fb_ops nouveau_fbcon_sw_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
-	.fb_pan_display = drm_fb_helper_pan_display,
-	.fb_blank = drm_fb_helper_blank,
-	.fb_setcmap = drm_fb_helper_setcmap,
-	.fb_debug_enter = drm_fb_helper_debug_enter,
-	.fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv04_fbcon_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = nv04_fbcon_fillrect,
-	.fb_copyarea = nv04_fbcon_copyarea,
-	.fb_imageblit = nv04_fbcon_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
-	.fb_pan_display = drm_fb_helper_pan_display,
-	.fb_blank = drm_fb_helper_blank,
-	.fb_setcmap = drm_fb_helper_setcmap,
-	.fb_debug_enter = drm_fb_helper_debug_enter,
-	.fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv50_fbcon_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = nv50_fbcon_fillrect,
-	.fb_copyarea = nv50_fbcon_copyarea,
-	.fb_imageblit = nv50_fbcon_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -257,9 +351,9 @@
 			      FBINFO_HWACCEL_FILLRECT |
 			      FBINFO_HWACCEL_IMAGEBLIT;
 	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
-	info->fbops = &nouveau_fbcon_ops;
-	info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
-			       dev_priv->vm_vram_base;
+	info->fbops = &nouveau_fbcon_sw_ops;
+	info->fix.smem_start = dev->mode_config.fb_base +
+			       (nvbo->bo.mem.start << PAGE_SHIFT);
 	info->fix.smem_len = size;
 
 	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
@@ -268,10 +362,6 @@
 	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
-	/* FIXME: we really shouldn't expose mmio space at all */
-	info->fix.mmio_start = pci_resource_start(pdev, 1);
-	info->fix.mmio_len = pci_resource_len(pdev, 1);
-
 	/* Set aperture base/size for vesafb takeover */
 	info->apertures = dev_priv->apertures;
 	if (!info->apertures) {
@@ -285,19 +375,20 @@
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 	info->pixmap.scan_align = 1;
 
+	mutex_unlock(&dev->struct_mutex);
+
 	if (dev_priv->channel && !nouveau_nofbaccel) {
-		switch (dev_priv->card_type) {
-		case NV_C0:
-			break;
-		case NV_50:
-			nv50_fbcon_accel_init(info);
-			info->fbops = &nv50_fbcon_ops;
-			break;
-		default:
-			nv04_fbcon_accel_init(info);
-			info->fbops = &nv04_fbcon_ops;
-			break;
-		};
+		ret = -ENODEV;
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_accel_init(info);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_accel_init(info);
+		else
+			ret = nvc0_fbcon_accel_init(info);
+
+		if (ret == 0)
+			info->fbops = &nouveau_fbcon_ops;
 	}
 
 	nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +399,6 @@
 						nouveau_fb->base.height,
 						nvbo->bo.offset, nvbo);
 
-	mutex_unlock(&dev->struct_mutex);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e1268..b73c29f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,15 +40,21 @@
 
 void nouveau_fbcon_restore(void);
 
-void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv04_fbcon_accel_init(struct fb_info *info);
-void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+
+int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv50_fbcon_accel_init(struct fb_info *info);
 
+int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nvc0_fbcon_accel_init(struct fb_info *info);
+
 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
 
 int nouveau_fbcon_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfb..221b846 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,7 +32,8 @@
 #include "nouveau_dma.h"
 
 #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
+		       nouveau_private(dev)->card_type < NV_C0)
 
 struct nouveau_fence {
 	struct nouveau_channel *channel;
@@ -64,6 +65,7 @@
 	struct nouveau_fence *fence =
 		container_of(ref, struct nouveau_fence, refcount);
 
+	nouveau_channel_ref(NULL, &fence->channel);
 	kfree(fence);
 }
 
@@ -76,14 +78,17 @@
 
 	spin_lock(&chan->fence.lock);
 
-	if (USE_REFCNT(dev))
-		sequence = nvchan_rd32(chan, 0x48);
-	else
-		sequence = atomic_read(&chan->fence.last_sequence_irq);
+	/* Fetch the last sequence if the channel is still up and running */
+	if (likely(!list_empty(&chan->fence.pending))) {
+		if (USE_REFCNT(dev))
+			sequence = nvchan_rd32(chan, 0x48);
+		else
+			sequence = atomic_read(&chan->fence.last_sequence_irq);
 
-	if (chan->fence.sequence_ack == sequence)
-		goto out;
-	chan->fence.sequence_ack = sequence;
+		if (chan->fence.sequence_ack == sequence)
+			goto out;
+		chan->fence.sequence_ack = sequence;
+	}
 
 	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		sequence = fence->sequence;
@@ -113,13 +118,13 @@
 	if (!fence)
 		return -ENOMEM;
 	kref_init(&fence->refcount);
-	fence->channel = chan;
+	nouveau_channel_ref(chan, &fence->channel);
 
 	if (emit)
 		ret = nouveau_fence_emit(fence);
 
 	if (ret)
-		nouveau_fence_unref((void *)&fence);
+		nouveau_fence_unref(&fence);
 	*pfence = fence;
 	return ret;
 }
@@ -127,7 +132,7 @@
 struct nouveau_channel *
 nouveau_fence_channel(struct nouveau_fence *fence)
 {
-	return fence ? fence->channel : NULL;
+	return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
 }
 
 int
@@ -135,6 +140,7 @@
 {
 	struct nouveau_channel *chan = fence->channel;
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	int ret;
 
 	ret = RING_SPACE(chan, 2);
@@ -155,8 +161,15 @@
 	list_add_tail(&fence->entry, &chan->fence.pending);
 	spin_unlock(&chan->fence.lock);
 
-	BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
-	OUT_RING(chan, fence->sequence);
+	if (USE_REFCNT(dev)) {
+		if (dev_priv->card_type < NV_C0)
+			BEGIN_RING(chan, NvSubSw, 0x0050, 1);
+		else
+			BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
+	} else {
+		BEGIN_RING(chan, NvSubSw, 0x0150, 1);
+	}
+	OUT_RING (chan, fence->sequence);
 	FIRE_RING(chan);
 
 	return 0;
@@ -182,7 +195,7 @@
 }
 
 void
-nouveau_fence_unref(void **sync_obj)
+__nouveau_fence_unref(void **sync_obj)
 {
 	struct nouveau_fence *fence = nouveau_fence(*sync_obj);
 
@@ -192,7 +205,7 @@
 }
 
 void *
-nouveau_fence_ref(void *sync_obj)
+__nouveau_fence_ref(void *sync_obj)
 {
 	struct nouveau_fence *fence = nouveau_fence(sync_obj);
 
@@ -201,7 +214,7 @@
 }
 
 bool
-nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
 {
 	struct nouveau_fence *fence = nouveau_fence(sync_obj);
 	struct nouveau_channel *chan = fence->channel;
@@ -214,13 +227,14 @@
 }
 
 int
-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
 {
 	unsigned long timeout = jiffies + (3 * DRM_HZ);
+	unsigned long sleep_time = jiffies + 1;
 	int ret = 0;
 
 	while (1) {
-		if (nouveau_fence_signalled(sync_obj, sync_arg))
+		if (__nouveau_fence_signalled(sync_obj, sync_arg))
 			break;
 
 		if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +244,7 @@
 
 		__set_current_state(intr ? TASK_INTERRUPTIBLE
 			: TASK_UNINTERRUPTIBLE);
-		if (lazy)
+		if (lazy && time_after_eq(jiffies, sleep_time))
 			schedule_timeout(1);
 
 		if (intr && signal_pending(current)) {
@@ -368,7 +382,7 @@
 
 	kref_get(&sema->ref);
 	nouveau_fence_work(fence, semaphore_work, sema);
-	nouveau_fence_unref((void *)&fence);
+	nouveau_fence_unref(&fence);
 
 	return 0;
 }
@@ -380,33 +394,49 @@
 	struct nouveau_channel *chan = nouveau_fence_channel(fence);
 	struct drm_device *dev = wchan->dev;
 	struct nouveau_semaphore *sema;
-	int ret;
+	int ret = 0;
 
-	if (likely(!fence || chan == wchan ||
-		   nouveau_fence_signalled(fence, NULL)))
-		return 0;
+	if (likely(!chan || chan == wchan ||
+		   nouveau_fence_signalled(fence)))
+		goto out;
 
 	sema = alloc_semaphore(dev);
 	if (!sema) {
 		/* Early card or broken userspace, fall back to
 		 * software sync. */
-		return nouveau_fence_wait(fence, NULL, false, false);
+		ret = nouveau_fence_wait(fence, true, false);
+		goto out;
+	}
+
+	/* try to take chan's mutex, if we can't take it right away
+	 * we have to fallback to software sync to prevent locking
+	 * order issues
+	 */
+	if (!mutex_trylock(&chan->mutex)) {
+		ret = nouveau_fence_wait(fence, true, false);
+		goto out_unref;
 	}
 
 	/* Make wchan wait until it gets signalled */
 	ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
 	if (ret)
-		goto out;
+		goto out_unlock;
 
 	/* Signal the semaphore from chan */
 	ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
-out:
+
+out_unlock:
+	mutex_unlock(&chan->mutex);
+out_unref:
 	kref_put(&sema->ref, free_semaphore);
+out:
+	if (chan)
+		nouveau_channel_put_unlocked(&chan);
 	return ret;
 }
 
 int
-nouveau_fence_flush(void *sync_obj, void *sync_arg)
+__nouveau_fence_flush(void *sync_obj, void *sync_arg)
 {
 	return 0;
 }
@@ -420,30 +450,27 @@
 	int ret;
 
 	/* Create an NV_SW object for various sync purposes */
-	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+	ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
 	if (ret)
 		return ret;
 
-	ret = nouveau_ramht_insert(chan, NvSw, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	if (ret)
-		return ret;
-
-	ret = RING_SPACE(chan, 2);
-	if (ret)
-		return ret;
-	BEGIN_RING(chan, NvSubSw, 0, 1);
-	OUT_RING(chan, NvSw);
+	/* we leave subchannel empty for nvc0 */
+	if (dev_priv->card_type < NV_C0) {
+		ret = RING_SPACE(chan, 2);
+		if (ret)
+			return ret;
+		BEGIN_RING(chan, NvSubSw, 0, 1);
+		OUT_RING(chan, NvSw);
+	}
 
 	/* Create a DMA object for the shared cross-channel sync area. */
 	if (USE_SEMA(dev)) {
-		struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+		struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
 
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 					     mem->start << PAGE_SHIFT,
-					     mem->size << PAGE_SHIFT,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_VIDMEM, &obj);
+					     mem->size, NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VRAM, &obj);
 		if (ret)
 			return ret;
 
@@ -473,6 +500,8 @@
 {
 	struct nouveau_fence *tmp, *fence;
 
+	spin_lock(&chan->fence.lock);
+
 	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		fence->signalled = true;
 		list_del(&fence->entry);
@@ -482,6 +511,8 @@
 
 		kref_put(&fence->refcount, nouveau_fence_del);
 	}
+
+	spin_unlock(&chan->fence.lock);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf..506c508 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@
 		return;
 	nvbo->gem = NULL;
 
-	if (unlikely(nvbo->cpu_filp))
-		ttm_bo_synccpu_write_release(bo);
-
 	if (unlikely(nvbo->pin_refcnt)) {
 		nvbo->pin_refcnt = 1;
 		nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@
 	return 0;
 }
 
-static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->card_type >= NV_50) {
-		switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
-		case 0x0000:
-		case 0x1800:
-		case 0x2800:
-		case 0x4800:
-		case 0x7000:
-		case 0x7400:
-		case 0x7a00:
-		case 0xe000:
-			return true;
-		}
-	} else {
-		if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
-			return true;
-	}
-
-	NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
-	return false;
-}
-
 int
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv)
@@ -146,11 +117,6 @@
 	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
 		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
 
-	if (req->channel_hint) {
-		NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
-						     file_priv, chan);
-	}
-
 	if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
 		flags |= TTM_PL_FLAG_VRAM;
 	if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@
 	if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
 		flags |= TTM_PL_FLAG_SYSTEM;
 
-	if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
+		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
 		return -EINVAL;
+	}
+
+	if (req->channel_hint) {
+		chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
+		if (IS_ERR(chan))
+			return PTR_ERR(chan);
+	}
 
 	ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
 			      req->info.tile_mode, req->info.tile_flags, false,
 			      (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
 			      &nvbo);
+	if (chan)
+		nouveau_channel_put(&chan);
 	if (ret)
 		return ret;
 
@@ -231,15 +207,8 @@
 
 	list_for_each_safe(entry, tmp, list) {
 		nvbo = list_entry(entry, struct nouveau_bo, entry);
-		if (likely(fence)) {
-			struct nouveau_fence *prev_fence;
 
-			spin_lock(&nvbo->bo.lock);
-			prev_fence = nvbo->bo.sync_obj;
-			nvbo->bo.sync_obj = nouveau_fence_ref(fence);
-			spin_unlock(&nvbo->bo.lock);
-			nouveau_fence_unref((void *)&prev_fence);
-		}
+		nouveau_bo_fence(nvbo, fence);
 
 		if (unlikely(nvbo->validate_mapped)) {
 			ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@
 			return -EINVAL;
 		}
 
-		ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
 		if (ret) {
 			validate_fini(op, NULL);
-			if (ret == -EAGAIN)
-				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+			if (unlikely(ret == -EAGAIN))
+				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
 			drm_gem_object_unreference_unlocked(gem);
-			if (ret) {
-				NV_ERROR(dev, "fail reserve\n");
+			if (unlikely(ret)) {
+				if (ret != -ERESTARTSYS)
+					NV_ERROR(dev, "fail reserve\n");
 				return ret;
 			}
 			goto retry;
@@ -331,25 +301,6 @@
 			validate_fini(op, NULL);
 			return -EINVAL;
 		}
-
-		if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
-			validate_fini(op, NULL);
-
-			if (nvbo->cpu_filp == file_priv) {
-				NV_ERROR(dev, "bo %p mapped by process trying "
-					      "to validate it!\n", nvbo);
-				return -EINVAL;
-			}
-
-			mutex_unlock(&drm_global_mutex);
-			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
-			mutex_lock(&drm_global_mutex);
-			if (ret) {
-				NV_ERROR(dev, "fail wait_cpu\n");
-				return ret;
-			}
-			goto retry;
-		}
 	}
 
 	return 0;
@@ -383,11 +334,11 @@
 		}
 
 		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
-		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-				      false, false, false);
+		ret = nouveau_bo_validate(nvbo, true, false, false);
 		nvbo->channel = NULL;
 		if (unlikely(ret)) {
-			NV_ERROR(dev, "fail ttm_validate\n");
+			if (ret != -ERESTARTSYS)
+				NV_ERROR(dev, "fail ttm_validate\n");
 			return ret;
 		}
 
@@ -439,13 +390,15 @@
 
 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 	if (unlikely(ret)) {
-		NV_ERROR(dev, "validate_init\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate_init\n");
 		return ret;
 	}
 
 	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate vram_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate vram_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -453,7 +406,8 @@
 
 	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate gart_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate gart_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -461,7 +415,8 @@
 
 	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate both_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate both_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -557,9 +512,9 @@
 				data |= r->vor;
 		}
 
-		spin_lock(&nvbo->bo.lock);
+		spin_lock(&nvbo->bo.bdev->fence_lock);
 		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-		spin_unlock(&nvbo->bo.lock);
+		spin_unlock(&nvbo->bo.bdev->fence_lock);
 		if (ret) {
 			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
 			break;
@@ -585,7 +540,9 @@
 	struct nouveau_fence *fence = NULL;
 	int i, j, ret = 0, do_reloc = 0;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, req->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
 	req->vram_available = dev_priv->fb_aper_free;
 	req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@
 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
-	if (IS_ERR(push))
+	if (IS_ERR(push)) {
+		nouveau_channel_put(&chan);
 		return PTR_ERR(push);
+	}
 
 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 	if (IS_ERR(bo)) {
 		kfree(push);
+		nouveau_channel_put(&chan);
 		return PTR_ERR(bo);
 	}
 
@@ -639,7 +602,8 @@
 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
-		NV_ERROR(dev, "validate: %d\n", ret);
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate: %d\n", ret);
 		goto out;
 	}
 
@@ -732,7 +696,7 @@
 
 out:
 	validate_fini(&op, fence);
-	nouveau_fence_unref((void**)&fence);
+	nouveau_fence_unref(&fence);
 	kfree(bo);
 	kfree(push);
 
@@ -750,6 +714,7 @@
 		req->suffix1 = 0x00000000;
 	}
 
+	nouveau_channel_put(&chan);
 	return ret;
 }
 
@@ -781,26 +746,9 @@
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	if (nvbo->cpu_filp) {
-		if (nvbo->cpu_filp == file_priv)
-			goto out;
-
-		ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
-		if (ret)
-			goto out;
-	}
-
-	if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
-		spin_lock(&nvbo->bo.lock);
-		ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
-		spin_unlock(&nvbo->bo.lock);
-	} else {
-		ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
-		if (ret == 0)
-			nvbo->cpu_filp = file_priv;
-	}
-
-out:
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
 	drm_gem_object_unreference_unlocked(gem);
 	return ret;
 }
@@ -809,26 +757,7 @@
 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv)
 {
-	struct drm_nouveau_gem_cpu_prep *req = data;
-	struct drm_gem_object *gem;
-	struct nouveau_bo *nvbo;
-	int ret = -EINVAL;
-
-	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
-	if (!gem)
-		return -ENOENT;
-	nvbo = nouveau_gem_object(gem);
-
-	if (nvbo->cpu_filp != file_priv)
-		goto out;
-	nvbo->cpu_filp = NULL;
-
-	ttm_bo_synccpu_write_release(&nvbo->bo);
-	ret = 0;
-
-out:
-	drm_gem_object_unreference_unlocked(gem);
-	return ret;
+	return 0;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a0..053edf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@
 			NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
 
 			reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
-			if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+			if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
 				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
 			else
 				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@
 		if (dev_priv->card_type == NV_10) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
-			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
-			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
 		}
 
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@
 
 	NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
 
-	/* Setting 1 on this value gives you interrupts for every vblank period. */
-	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+	/* Enable vblank interrupts. */
+	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
+		    (dev->vblank_enabled[head] ? 1 : 0));
 	NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6..2ba7265 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_ramht.h"
-#include <linux/ratelimit.h>
-
-/* needed for hotplug irq */
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
-	return __ratelimit(&nouveau_ratelimit_state);
-}
+#include "nouveau_util.h"
 
 void
 nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@
 	/* Master disable */
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
 
-	if (dev_priv->card_type >= NV_50) {
-		INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
-		INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
-		spin_lock_init(&dev_priv->hpd_state.lock);
-		INIT_LIST_HEAD(&dev_priv->vbl_waiting);
-	}
+	INIT_LIST_HEAD(&dev_priv->vbl_waiting);
 }
 
 int
 nouveau_irq_postinstall(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
 	/* Master enable */
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+	if (dev_priv->msi_enabled)
+		nv_wr08(dev, 0x00088068, 0xff);
+
 	return 0;
 }
 
@@ -80,1178 +69,83 @@
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
 }
 
-static int
-nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nouveau_pgraph_object_method *grm;
-	struct nouveau_pgraph_object_class *grc;
-
-	grc = dev_priv->engine.graph.grclass;
-	while (grc->id) {
-		if (grc->id == class)
-			break;
-		grc++;
-	}
-
-	if (grc->id != class || !grc->methods)
-		return -ENOENT;
-
-	grm = grc->methods;
-	while (grm->id) {
-		if (grm->id == mthd)
-			return grm->exec(chan, class, mthd, data);
-		grm++;
-	}
-
-	return -ENOENT;
-}
-
-static bool
-nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
-{
-	struct drm_device *dev = chan->dev;
-	const int subc = (addr >> 13) & 0x7;
-	const int mthd = addr & 0x1ffc;
-
-	if (mthd == 0x0000) {
-		struct nouveau_gpuobj *gpuobj;
-
-		gpuobj = nouveau_ramht_find(chan, data);
-		if (!gpuobj)
-			return false;
-
-		if (gpuobj->engine != NVOBJ_ENGINE_SW)
-			return false;
-
-		chan->sw_subchannel[subc] = gpuobj->class;
-		nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
-			NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
-		return true;
-	}
-
-	/* hw object */
-	if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
-		return false;
-
-	if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
-		return false;
-
-	return true;
-}
-
-static void
-nouveau_fifo_irq_handler(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	uint32_t status, reassign;
-	int cnt = 0;
-
-	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
-	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
-		struct nouveau_channel *chan = NULL;
-		uint32_t chid, get;
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
-		chid = engine->fifo.channel_id(dev);
-		if (chid >= 0 && chid < engine->fifo.channels)
-			chan = dev_priv->fifos[chid];
-		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
-		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
-			uint32_t mthd, data;
-			int ptr;
-
-			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
-			 * wrapping on my G80 chips, but CACHE1 isn't big
-			 * enough for this much data.. Tests show that it
-			 * wraps around to the start at GET=0x800.. No clue
-			 * as to why..
-			 */
-			ptr = (get & 0x7ff) >> 2;
-
-			if (dev_priv->card_type < NV_40) {
-				mthd = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_DATA(ptr));
-			} else {
-				mthd = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_DATA(ptr));
-			}
-
-			if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
-				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
-					     "Mthd 0x%04x Data 0x%08x\n",
-					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
-					data);
-			}
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-						NV_PFIFO_INTR_CACHE_ERROR);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
-				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
-			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
-		}
-
-		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
-			u32 dma_get = nv_rd32(dev, 0x003244);
-			u32 dma_put = nv_rd32(dev, 0x003240);
-			u32 push = nv_rd32(dev, 0x003220);
-			u32 state = nv_rd32(dev, 0x003228);
-
-			if (dev_priv->card_type == NV_50) {
-				u32 ho_get = nv_rd32(dev, 0x003328);
-				u32 ho_put = nv_rd32(dev, 0x003320);
-				u32 ib_get = nv_rd32(dev, 0x003334);
-				u32 ib_put = nv_rd32(dev, 0x003330);
-
-				if (nouveau_ratelimit())
-					NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
-					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
-					     "State 0x%08x Push 0x%08x\n",
-						chid, ho_get, dma_get, ho_put,
-						dma_put, ib_get, ib_put, state,
-						push);
-
-				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
-				nv_wr32(dev, 0x003364, 0x00000000);
-				if (dma_get != dma_put || ho_get != ho_put) {
-					nv_wr32(dev, 0x003244, dma_put);
-					nv_wr32(dev, 0x003328, ho_put);
-				} else
-				if (ib_get != ib_put) {
-					nv_wr32(dev, 0x003334, ib_put);
-				}
-			} else {
-				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
-					     "Put 0x%08x State 0x%08x Push 0x%08x\n",
-					chid, dma_get, dma_put, state, push);
-
-				if (dma_get != dma_put)
-					nv_wr32(dev, 0x003244, dma_put);
-			}
-
-			nv_wr32(dev, 0x003228, 0x00000000);
-			nv_wr32(dev, 0x003220, 0x00000001);
-			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
-			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
-		}
-
-		if (status & NV_PFIFO_INTR_SEMAPHORE) {
-			uint32_t sem;
-
-			status &= ~NV_PFIFO_INTR_SEMAPHORE;
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-				NV_PFIFO_INTR_SEMAPHORE);
-
-			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
-			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-		}
-
-		if (dev_priv->card_type == NV_50) {
-			if (status & 0x00000010) {
-				nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
-				status &= ~0x00000010;
-				nv_wr32(dev, 0x002100, 0x00000010);
-			}
-		}
-
-		if (status) {
-			if (nouveau_ratelimit())
-				NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
-					status, chid);
-			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
-			status = 0;
-		}
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
-	}
-
-	if (status) {
-		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
-		nv_wr32(dev, 0x2140, 0);
-		nv_wr32(dev, 0x140, 0);
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-struct nouveau_bitfield_names {
-	uint32_t mask;
-	const char *name;
-};
-
-static struct nouveau_bitfield_names nstatus_names[] =
-{
-	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nstatus_names_nv10[] =
-{
-	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nsource_names[] =
-{
-	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
-	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
-	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
-	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
-	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
-	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
-	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
-};
-
-static void
-nouveau_print_bitfield_names_(uint32_t value,
-				const struct nouveau_bitfield_names *namelist,
-				const int namelist_len)
-{
-	/*
-	 * Caller must have already printed the KERN_* log level for us.
-	 * Also the caller is responsible for adding the newline.
-	 */
-	int i;
-	for (i = 0; i < namelist_len; ++i) {
-		uint32_t mask = namelist[i].mask;
-		if (value & mask) {
-			printk(" %s", namelist[i].name);
-			value &= ~mask;
-		}
-	}
-	if (value)
-		printk(" (unknown bits 0x%08x)", value);
-}
-#define nouveau_print_bitfield_names(val, namelist) \
-	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-struct nouveau_enum_names {
-	uint32_t value;
-	const char *name;
-};
-
-static void
-nouveau_print_enum_names_(uint32_t value,
-				const struct nouveau_enum_names *namelist,
-				const int namelist_len)
-{
-	/*
-	 * Caller must have already printed the KERN_* log level for us.
-	 * Also the caller is responsible for adding the newline.
-	 */
-	int i;
-	for (i = 0; i < namelist_len; ++i) {
-		if (value == namelist[i].value) {
-			printk("%s", namelist[i].name);
-			return;
-		}
-	}
-	printk("unknown value 0x%08x", value);
-}
-#define nouveau_print_enum_names(val, namelist) \
-	nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-static int
-nouveau_graph_chid_from_grctx(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t inst;
-	int i;
-
-	if (dev_priv->card_type < NV_40)
-		return dev_priv->engine.fifo.channels;
-	else
-	if (dev_priv->card_type < NV_50) {
-		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
-
-		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			struct nouveau_channel *chan = dev_priv->fifos[i];
-
-			if (!chan || !chan->ramin_grctx)
-				continue;
-
-			if (inst == chan->ramin_grctx->pinst)
-				break;
-		}
-	} else {
-		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
-
-		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			struct nouveau_channel *chan = dev_priv->fifos[i];
-
-			if (!chan || !chan->ramin)
-				continue;
-
-			if (inst == chan->ramin->vinst)
-				break;
-		}
-	}
-
-
-	return i;
-}
-
-static int
-nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	int channel;
-
-	if (dev_priv->card_type < NV_10)
-		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
-	else
-	if (dev_priv->card_type < NV_40)
-		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	else
-		channel = nouveau_graph_chid_from_grctx(dev);
-
-	if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
-		NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
-		return -EINVAL;
-	}
-
-	*channel_ret = channel;
-	return 0;
-}
-
-struct nouveau_pgraph_trap {
-	int channel;
-	int class;
-	int subc, mthd, size;
-	uint32_t data, data2;
-	uint32_t nsource, nstatus;
-};
-
-static void
-nouveau_graph_trap_info(struct drm_device *dev,
-			struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t address;
-
-	trap->nsource = trap->nstatus = 0;
-	if (dev_priv->card_type < NV_50) {
-		trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-		trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
-	}
-
-	if (nouveau_graph_trapped_channel(dev, &trap->channel))
-		trap->channel = -1;
-	address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-
-	trap->mthd = address & 0x1FFC;
-	trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-	if (dev_priv->card_type < NV_10) {
-		trap->subc  = (address >> 13) & 0x7;
-	} else {
-		trap->subc  = (address >> 16) & 0x7;
-		trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
-	}
-
-	if (dev_priv->card_type < NV_10)
-		trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
-	else if (dev_priv->card_type < NV_40)
-		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
-	else if (dev_priv->card_type < NV_50)
-		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
-	else
-		trap->class = nv_rd32(dev, 0x400814);
-}
-
-static void
-nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
-			     struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
-
-	if (dev_priv->card_type < NV_50) {
-		NV_INFO(dev, "%s - nSource:", id);
-		nouveau_print_bitfield_names(nsource, nsource_names);
-		printk(", nStatus:");
-		if (dev_priv->card_type < NV_10)
-			nouveau_print_bitfield_names(nstatus, nstatus_names);
-		else
-			nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
-		printk("\n");
-	}
-
-	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
-					"Data 0x%08x:0x%08x\n",
-					id, trap->channel, trap->subc,
-					trap->class, trap->mthd,
-					trap->data2, trap->data);
-}
-
-static int
-nouveau_pgraph_intr_swmthd(struct drm_device *dev,
-			   struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (trap->channel < 0 ||
-	    trap->channel >= dev_priv->engine.fifo.channels ||
-	    !dev_priv->fifos[trap->channel])
-		return -ENODEV;
-
-	return nouveau_call_method(dev_priv->fifos[trap->channel],
-				   trap->class, trap->mthd, trap->data);
-}
-
-static inline void
-nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-
-	nouveau_graph_trap_info(dev, &trap);
-
-	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-		if (nouveau_pgraph_intr_swmthd(dev, &trap))
-			unhandled = 1;
-	} else {
-		unhandled = 1;
-	}
-
-	if (unhandled)
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
-}
-
-
-static inline void
-nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-
-	nouveau_graph_trap_info(dev, &trap);
-	trap.nsource = nsource;
-
-	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-		if (nouveau_pgraph_intr_swmthd(dev, &trap))
-			unhandled = 1;
-	} else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
-		uint32_t v = nv_rd32(dev, 0x402000);
-		nv_wr32(dev, 0x402000, v);
-
-		/* dump the error anyway for now: it's useful for
-		   Gallium development */
-		unhandled = 1;
-	} else {
-		unhandled = 1;
-	}
-
-	if (unhandled && nouveau_ratelimit())
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
-}
-
-static inline void
-nouveau_pgraph_intr_context_switch(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	uint32_t chid;
-
-	chid = engine->fifo.channel_id(dev);
-	NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
-
-	switch (dev_priv->card_type) {
-	case NV_04:
-		nv04_graph_context_switch(dev);
-		break;
-	case NV_10:
-		nv10_graph_context_switch(dev);
-		break;
-	default:
-		NV_ERROR(dev, "Context switch not implemented\n");
-		break;
-	}
-}
-
-static void
-nouveau_pgraph_irq_handler(struct drm_device *dev)
-{
-	uint32_t status;
-
-	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
-		if (status & NV_PGRAPH_INTR_NOTIFY) {
-			nouveau_pgraph_intr_notify(dev, nsource);
-
-			status &= ~NV_PGRAPH_INTR_NOTIFY;
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
-		}
-
-		if (status & NV_PGRAPH_INTR_ERROR) {
-			nouveau_pgraph_intr_error(dev, nsource);
-
-			status &= ~NV_PGRAPH_INTR_ERROR;
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
-		}
-
-		if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-			nv_wr32(dev, NV03_PGRAPH_INTR,
-				 NV_PGRAPH_INTR_CONTEXT_SWITCH);
-
-			nouveau_pgraph_intr_context_switch(dev);
-		}
-
-		if (status) {
-			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
-			nv_wr32(dev, NV03_PGRAPH_INTR, status);
-		}
-
-		if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
-			nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-}
-
-static struct nouveau_enum_names nv50_mp_exec_error_names[] =
-{
-	{ 3, "STACK_UNDERFLOW" },
-	{ 4, "QUADON_ACTIVE" },
-	{ 8, "TIMEOUT" },
-	{ 0x10, "INVALID_OPCODE" },
-	{ 0x40, "BREAKPOINT" },
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	uint32_t addr, mp10, status, pc, oplow, ophigh;
-	int i;
-	int mps = 0;
-	for (i = 0; i < 4; i++) {
-		if (!(units & 1 << (i+24)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			addr = 0x408200 + (tpid << 12) + (i << 7);
-		else
-			addr = 0x408100 + (tpid << 11) + (i << 7);
-		mp10 = nv_rd32(dev, addr + 0x10);
-		status = nv_rd32(dev, addr + 0x14);
-		if (!status)
-			continue;
-		if (display) {
-			nv_rd32(dev, addr + 0x20);
-			pc = nv_rd32(dev, addr + 0x24);
-			oplow = nv_rd32(dev, addr + 0x70);
-			ophigh= nv_rd32(dev, addr + 0x74);
-			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
-					"TP %d MP %d: ", tpid, i);
-			nouveau_print_enum_names(status,
-					nv50_mp_exec_error_names);
-			printk(" at %06x warp %d, opcode %08x %08x\n",
-					pc&0xffffff, pc >> 24,
-					oplow, ophigh);
-		}
-		nv_wr32(dev, addr + 0x10, mp10);
-		nv_wr32(dev, addr + 0x14, 0);
-		mps++;
-	}
-	if (!mps && display)
-		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
-				"No MPs claiming errors?\n", tpid);
-}
-
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
-		uint32_t ustatus_new, int display, const char *name)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int tps = 0;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	int i, r;
-	uint32_t ustatus_addr, ustatus;
-	for (i = 0; i < 16; i++) {
-		if (!(units & (1 << i)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			ustatus_addr = ustatus_old + (i << 12);
-		else
-			ustatus_addr = ustatus_new + (i << 11);
-		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
-		if (!ustatus)
-			continue;
-		tps++;
-		switch (type) {
-		case 6: /* texture error... unknown for now */
-			nv50_fb_vm_trap(dev, display, name);
-			if (display) {
-				NV_ERROR(dev, "magic set %d:\n", i);
-				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
-					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-						nv_rd32(dev, r));
-			}
-			break;
-		case 7: /* MP error */
-			if (ustatus & 0x00010000) {
-				nv50_pgraph_mp_trap(dev, i, display);
-				ustatus &= ~0x00010000;
-			}
-			break;
-		case 8: /* TPDMA error */
-			{
-			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
-			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
-			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
-			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
-			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
-			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
-			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
-			nv50_fb_vm_trap(dev, display, name);
-			/* 2d engine destination */
-			if (ustatus & 0x00000010) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000010;
-			}
-			/* Render target */
-			if (ustatus & 0x00000040) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000040;
-			}
-			/* CUDA memory: l[], g[] or stack. */
-			if (ustatus & 0x00000080) {
-				if (display) {
-					if (e18 & 0x80000000) {
-						/* g[] read fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 24) & 0x1f));
-						e18 &= ~0x1f000000;
-					} else if (e18 & 0xc) {
-						/* g[] write fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 7) & 0x1f));
-						e18 &= ~0x00000f80;
-					} else {
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
-								i, e14, e10);
-					}
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000080;
-			}
-			}
-			break;
-		}
-		if (ustatus) {
-			if (display)
-				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
-		}
-		nv_wr32(dev, ustatus_addr, 0xc0000000);
-	}
-
-	if (!tps && display)
-		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static void
-nv50_pgraph_trap_handler(struct drm_device *dev)
-{
-	struct nouveau_pgraph_trap trap;
-	uint32_t status = nv_rd32(dev, 0x400108);
-	uint32_t ustatus;
-	int display = nouveau_ratelimit();
-
-
-	if (!status && display) {
-		nouveau_graph_trap_info(dev, &trap);
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
-		NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
-	}
-
-	/* DISPATCH: Relays commands to other units and handles NOTIFY,
-	 * COND, QUERY. If you get a trap from it, the command is still stuck
-	 * in DISPATCH and you need to do something about it. */
-	if (status & 0x001) {
-		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
-		}
-
-		/* Known to be triggered by screwed up NOTIFY and COND... */
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
-			nv_wr32(dev, 0x400500, 0);
-			if (nv_rd32(dev, 0x400808) & 0x80000000) {
-				if (display) {
-					if (nouveau_graph_trapped_channel(dev, &trap.channel))
-						trap.channel = -1;
-					trap.class = nv_rd32(dev, 0x400814);
-					trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
-					trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
-					trap.data = nv_rd32(dev, 0x40080c);
-					trap.data2 = nv_rd32(dev, 0x400810);
-					nouveau_graph_dump_trap_info(dev,
-							"PGRAPH_TRAP_DISPATCH_FAULT", &trap);
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
-				}
-				nv_wr32(dev, 0x400808, 0);
-			} else if (display) {
-				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
-			}
-			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
-			nv_wr32(dev, 0x400848, 0);
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus & 0x00000002) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
-			nv_wr32(dev, 0x400500, 0);
-			if (nv_rd32(dev, 0x40084c) & 0x80000000) {
-				if (display) {
-					if (nouveau_graph_trapped_channel(dev, &trap.channel))
-						trap.channel = -1;
-					trap.class = nv_rd32(dev, 0x400814);
-					trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
-					trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
-					trap.data = nv_rd32(dev, 0x40085c);
-					trap.data2 = 0;
-					nouveau_graph_dump_trap_info(dev,
-							"PGRAPH_TRAP_DISPATCH_QUERY", &trap);
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
-				}
-				nv_wr32(dev, 0x40084c, 0);
-			} else if (display) {
-				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
-			}
-			ustatus &= ~0x00000002;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x400804, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x001);
-		status &= ~0x001;
-	}
-
-	/* TRAPs other than dispatch use the "normal" trap regs. */
-	if (status && display) {
-		nouveau_graph_trap_info(dev, &trap);
-		nouveau_graph_dump_trap_info(dev,
-				"PGRAPH_TRAP", &trap);
-	}
-
-	/* M2MF: Memory to memory copy engine. */
-	if (status & 0x002) {
-		ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus & 0x00000002) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
-			ustatus &= ~0x00000002;
-		}
-		if (ustatus & 0x00000004) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
-			ustatus &= ~0x00000004;
-		}
-		NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
-				nv_rd32(dev, 0x406804),
-				nv_rd32(dev, 0x406808),
-				nv_rd32(dev, 0x40680c),
-				nv_rd32(dev, 0x406810));
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 2);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x406800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x002);
-		status &= ~0x002;
-	}
-
-	/* VFETCH: Fetches data from vertex buffers. */
-	if (status & 0x004) {
-		ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x400c00),
-					nv_rd32(dev, 0x400c08),
-					nv_rd32(dev, 0x400c0c),
-					nv_rd32(dev, 0x400c10));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x400c04, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x004);
-		status &= ~0x004;
-	}
-
-	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
-	if (status & 0x008) {
-		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x401804),
-					nv_rd32(dev, 0x401808),
-					nv_rd32(dev, 0x40180c),
-					nv_rd32(dev, 0x401810));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 0x80);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x401800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x008);
-		status &= ~0x008;
-	}
-
-	/* CCACHE: Handles code and c[] caches and fills them. */
-	if (status & 0x010) {
-		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x405800),
-					nv_rd32(dev, 0x405804),
-					nv_rd32(dev, 0x405808),
-					nv_rd32(dev, 0x40580c),
-					nv_rd32(dev, 0x405810),
-					nv_rd32(dev, 0x405814),
-					nv_rd32(dev, 0x40581c));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x405018, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x010);
-		status &= ~0x010;
-	}
-
-	/* Unknown, not seen yet... 0x402000 is the only trap status reg
-	 * remaining, so try to handle it anyway. Perhaps related to that
-	 * unknown DMA slot on tesla? */
-	if (status & 0x20) {
-		nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
-		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
-		if (display)
-			NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x402000, 0xc0000000);
-		/* no status modifiction on purpose */
-	}
-
-	/* TEXTURE: CUDA texturing units */
-	if (status & 0x040) {
-		nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
-				"PGRAPH_TRAP_TEXTURE");
-		nv_wr32(dev, 0x400108, 0x040);
-		status &= ~0x040;
-	}
-
-	/* MP: CUDA execution engines. */
-	if (status & 0x080) {
-		nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
-				"PGRAPH_TRAP_MP");
-		nv_wr32(dev, 0x400108, 0x080);
-		status &= ~0x080;
-	}
-
-	/* TPDMA:  Handles TP-initiated uncached memory accesses:
-	 * l[], g[], stack, 2d surfaces, render targets. */
-	if (status & 0x100) {
-		nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
-				"PGRAPH_TRAP_TPDMA");
-		nv_wr32(dev, 0x400108, 0x100);
-		status &= ~0x100;
-	}
-
-	if (status) {
-		if (display)
-			NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
-				status);
-		nv_wr32(dev, 0x400108, status);
-	}
-}
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-static struct nouveau_enum_names nv50_data_error_names[] =
-{
-	{ 4,	"INVALID_VALUE" },
-	{ 5,	"INVALID_ENUM" },
-	{ 8,	"INVALID_OBJECT" },
-	{ 0xc,	"INVALID_BITFIELD" },
-	{ 0x28,	"MP_NO_REG_SPACE" },
-	{ 0x2b,	"MP_BLOCK_SIZE_MISMATCH" },
-};
-
-static void
-nv50_pgraph_irq_handler(struct drm_device *dev)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-	uint32_t status;
-
-	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		/* NOTIFY: You've set a NOTIFY an a command and it's done. */
-		if (status & 0x00000001) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_NOTIFY", &trap);
-			status &= ~0x00000001;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
-		}
-
-		/* COMPUTE_QUERY: Purpose and exact cause unknown, happens
-		 * when you write 0x200 to 0x50c0 method 0x31c. */
-		if (status & 0x00000002) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_COMPUTE_QUERY", &trap);
-			status &= ~0x00000002;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
-		}
-
-		/* Unknown, never seen: 0x4 */
-
-		/* ILLEGAL_MTHD: You used a wrong method for this class. */
-		if (status & 0x00000010) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_pgraph_intr_swmthd(dev, &trap))
-				unhandled = 1;
-			if (unhandled && nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_ILLEGAL_MTHD", &trap);
-			status &= ~0x00000010;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
-		}
-
-		/* ILLEGAL_CLASS: You used a wrong class. */
-		if (status & 0x00000020) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_ILLEGAL_CLASS", &trap);
-			status &= ~0x00000020;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
-		}
-
-		/* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
-		if (status & 0x00000040) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_DOUBLE_NOTIFY", &trap);
-			status &= ~0x00000040;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
-		}
-
-		/* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
-		if (status & 0x00001000) {
-			nv_wr32(dev, 0x400500, 0x00000000);
-			nv_wr32(dev, NV03_PGRAPH_INTR,
-				NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
-				NV40_PGRAPH_INTR_EN) &
-				~NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			nv_wr32(dev, 0x400500, 0x00010001);
-
-			nv50_graph_context_switch(dev);
-
-			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		}
-
-		/* BUFFER_NOTIFY: Your m2mf transfer finished */
-		if (status & 0x00010000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_BUFFER_NOTIFY", &trap);
-			status &= ~0x00010000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
-		}
-
-		/* DATA_ERROR: Invalid value for this method, or invalid
-		 * state in current PGRAPH context for this operation */
-		if (status & 0x00100000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit()) {
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_DATA_ERROR", &trap);
-				NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
-				nouveau_print_enum_names(nv_rd32(dev, 0x400110),
-						nv50_data_error_names);
-				printk("\n");
-			}
-			status &= ~0x00100000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
-		}
-
-		/* TRAP: Something bad happened in the middle of command
-		 * execution.  Has a billion types, subtypes, and even
-		 * subsubtypes. */
-		if (status & 0x00200000) {
-			nv50_pgraph_trap_handler(dev);
-			status &= ~0x00200000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
-		}
-
-		/* Unknown, never seen: 0x00400000 */
-
-		/* SINGLE_STEP: Happens on every method if you turned on
-		 * single stepping in 40008c */
-		if (status & 0x01000000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_SINGLE_STEP", &trap);
-			status &= ~0x01000000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
-		}
-
-		/* 0x02000000 happens when you pause a ctxprog...
-		 * but the only way this can happen that I know is by
-		 * poking the relevant MMIO register, and we don't
-		 * do that. */
-
-		if (status) {
-			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
-				status);
-			nv_wr32(dev, NV03_PGRAPH_INTR, status);
-		}
-
-		{
-			const int isb = (1 << 16) | (1 << 0);
-
-			if ((nv_rd32(dev, 0x400500) & isb) != isb)
-				nv_wr32(dev, 0x400500,
-					nv_rd32(dev, 0x400500) | isb);
-		}
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-	if (nv_rd32(dev, 0x400824) & (1 << 31))
-		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
-}
-
-static void
-nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
-{
-	if (crtc & 1)
-		nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
-
-	if (crtc & 2)
-		nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
-}
-
 irqreturn_t
 nouveau_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t status;
 	unsigned long flags;
+	u32 stat;
+	int i;
 
-	status = nv_rd32(dev, NV03_PMC_INTR_0);
-	if (!status)
+	stat = nv_rd32(dev, NV03_PMC_INTR_0);
+	if (!stat)
 		return IRQ_NONE;
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	for (i = 0; i < 32 && stat; i++) {
+		if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
+			continue;
 
-	if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
-		nouveau_fifo_irq_handler(dev);
-		status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
+		dev_priv->irq_handler[i](dev);
+		stat &= ~(1 << i);
 	}
 
-	if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
-		if (dev_priv->card_type >= NV_50)
-			nv50_pgraph_irq_handler(dev);
-		else
-			nouveau_pgraph_irq_handler(dev);
-
-		status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
-	}
-
-	if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
-		nouveau_crtc_irq_handler(dev, (status>>24)&3);
-		status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
-	}
-
-	if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
-		      NV_PMC_INTR_0_NV50_I2C_PENDING)) {
-		nv50_display_irq_handler(dev);
-		status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
-			    NV_PMC_INTR_0_NV50_I2C_PENDING);
-	}
-
-	if (status)
-		NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
-
+	if (dev_priv->msi_enabled)
+		nv_wr08(dev, 0x00088068, 0xff);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
+	if (stat && nouveau_ratelimit())
+		NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
 	return IRQ_HANDLED;
 }
+
+int
+nouveau_irq_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
+		ret = pci_enable_msi(dev->pdev);
+		if (ret == 0) {
+			NV_INFO(dev, "enabled MSI\n");
+			dev_priv->msi_enabled = true;
+		}
+	}
+
+	return drm_irq_install(dev);
+}
+
+void
+nouveau_irq_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	drm_irq_uninstall(dev);
+	if (dev_priv->msi_enabled)
+		pci_disable_msi(dev->pdev);
+}
+
+void
+nouveau_irq_register(struct drm_device *dev, int status_bit,
+		     void (*handler)(struct drm_device *))
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	dev_priv->irq_handler[status_bit] = handler;
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
+
+void
+nouveau_irq_unregister(struct drm_device *dev, int status_bit)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	dev_priv->irq_handler[status_bit] = NULL;
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30d..69044eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
 
 /*
  * NV10-NV40 tiling helpers
  */
 
 static void
-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			   uint32_t size, uint32_t pitch)
+nv10_mem_update_tile_region(struct drm_device *dev,
+			    struct nouveau_tile_reg *tile, uint32_t addr,
+			    uint32_t size, uint32_t pitch, uint32_t flags)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+	int i = tile - dev_priv->tile.reg;
+	unsigned long save;
 
-	tile->addr = addr;
-	tile->size = size;
-	tile->used = !!pitch;
-	nouveau_fence_unref((void **)&tile->fence);
+	nouveau_fence_unref(&tile->fence);
 
+	if (tile->pitch)
+		pfb->free_tile_region(dev, i);
+
+	if (pitch)
+		pfb->init_tile_region(dev, i, addr, size, pitch, flags);
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, save);
 	pfifo->reassign(dev, false);
 	pfifo->cache_pull(dev, false);
 
 	nouveau_wait_for_idle(dev);
 
-	pgraph->set_region_tiling(dev, i, addr, size, pitch);
-	pfb->set_region_tiling(dev, i, addr, size, pitch);
+	pfb->set_tile_region(dev, i);
+	pgraph->set_tile_region(dev, i);
 
 	pfifo->cache_pull(dev, true);
 	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
+}
+
+static struct nouveau_tile_reg *
+nv10_mem_get_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	spin_lock(&dev_priv->tile.lock);
+
+	if (!tile->used &&
+	    (!tile->fence || nouveau_fence_signalled(tile->fence)))
+		tile->used = true;
+	else
+		tile = NULL;
+
+	spin_unlock(&dev_priv->tile.lock);
+	return tile;
+}
+
+void
+nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
+			 struct nouveau_fence *fence)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (tile) {
+		spin_lock(&dev_priv->tile.lock);
+		if (fence) {
+			/* Mark it as pending. */
+			tile->fence = fence;
+			nouveau_fence_ref(fence);
+		}
+
+		tile->used = false;
+		spin_unlock(&dev_priv->tile.lock);
+	}
 }
 
 struct nouveau_tile_reg *
 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
-		    uint32_t pitch)
+		    uint32_t pitch, uint32_t flags)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nouveau_tile_reg *found = NULL;
-	unsigned long i, flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
-	for (i = 0; i < pfb->num_tiles; i++) {
-		struct nouveau_tile_reg *tile = &dev_priv->tile[i];
-
-		if (tile->used)
-			/* Tile region in use. */
-			continue;
-
-		if (tile->fence &&
-		    !nouveau_fence_signalled(tile->fence, NULL))
-			/* Pending tile region. */
-			continue;
-
-		if (max(tile->addr, addr) <
-		    min(tile->addr + tile->size, addr + size))
-			/* Kill an intersecting tile region. */
-			nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
-
-		if (pitch && !found) {
-			/* Free tile region. */
-			nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
-			found = tile;
-		}
-	}
-
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	return found;
-}
-
-void
-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
-		       struct nouveau_fence *fence)
-{
-	if (fence) {
-		/* Mark it as pending. */
-		tile->fence = fence;
-		nouveau_fence_ref(fence);
-	}
-
-	tile->used = false;
-}
-
-/*
- * NV50 VM helpers
- */
-int
-nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
-			uint32_t flags, uint64_t phys)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *pgt;
-	unsigned block;
+	struct nouveau_tile_reg *tile, *found = NULL;
 	int i;
 
-	virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
-	size = (size >> 16) << 1;
+	for (i = 0; i < pfb->num_tiles; i++) {
+		tile = nv10_mem_get_tile_region(dev, i);
 
-	phys |= ((uint64_t)flags << 32);
-	phys |= 1;
-	if (dev_priv->vram_sys_base) {
-		phys += dev_priv->vram_sys_base;
-		phys |= 0x30;
+		if (pitch && !found) {
+			found = tile;
+			continue;
+
+		} else if (tile && tile->pitch) {
+			/* Kill an unused tile region. */
+			nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
+		}
+
+		nv10_mem_put_tile_region(dev, tile, NULL);
 	}
 
-	while (size) {
-		unsigned offset_h = upper_32_bits(phys);
-		unsigned offset_l = lower_32_bits(phys);
-		unsigned pte, end;
-
-		for (i = 7; i >= 0; i--) {
-			block = 1 << (i + 1);
-			if (size >= block && !(virt & (block - 1)))
-				break;
-		}
-		offset_l |= (i << 7);
-
-		phys += block << 15;
-		size -= block;
-
-		while (block) {
-			pgt = dev_priv->vm_vram_pt[virt >> 14];
-			pte = virt & 0x3ffe;
-
-			end = pte + block;
-			if (end > 16384)
-				end = 16384;
-			block -= (end - pte);
-			virt  += (end - pte);
-
-			while (pte < end) {
-				nv_wo32(pgt, (pte * 4) + 0, offset_l);
-				nv_wo32(pgt, (pte * 4) + 4, offset_h);
-				pte += 2;
-			}
-		}
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-	dev_priv->engine.fifo.tlb_flush(dev);
-	dev_priv->engine.graph.tlb_flush(dev);
-	nv50_vm_flush(dev, 6);
-	return 0;
-}
-
-void
-nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *pgt;
-	unsigned pages, pte, end;
-
-	virt -= dev_priv->vm_vram_base;
-	pages = (size >> 16) << 1;
-
-	while (pages) {
-		pgt = dev_priv->vm_vram_pt[virt >> 29];
-		pte = (virt & 0x1ffe0000ULL) >> 15;
-
-		end = pte + pages;
-		if (end > 16384)
-			end = 16384;
-		pages -= (end - pte);
-		virt  += (end - pte) << 15;
-
-		while (pte < end) {
-			nv_wo32(pgt, (pte * 4), 0);
-			pte++;
-		}
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-	dev_priv->engine.fifo.tlb_flush(dev);
-	dev_priv->engine.graph.tlb_flush(dev);
-	nv50_vm_flush(dev, 6);
+	if (found)
+		nv10_mem_update_tile_region(dev, found, addr, size,
+					    pitch, flags);
+	return found;
 }
 
 /*
@@ -312,62 +241,7 @@
 	return 0;
 }
 
-static void
-nv50_vram_preinit(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i, parts, colbits, rowbitsa, rowbitsb, banks;
-	u64 rowsize, predicted;
-	u32 r0, r4, rt, ru;
-
-	r0 = nv_rd32(dev, 0x100200);
-	r4 = nv_rd32(dev, 0x100204);
-	rt = nv_rd32(dev, 0x100250);
-	ru = nv_rd32(dev, 0x001540);
-	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
-	for (i = 0, parts = 0; i < 8; i++) {
-		if (ru & (0x00010000 << i))
-			parts++;
-	}
-
-	colbits  =  (r4 & 0x0000f000) >> 12;
-	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
-	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
-	banks    = ((r4 & 0x01000000) ? 8 : 4);
-
-	rowsize = parts * banks * (1 << colbits) * 8;
-	predicted = rowsize << rowbitsa;
-	if (r0 & 0x00000004)
-		predicted += rowsize << rowbitsb;
-
-	if (predicted != dev_priv->vram_size) {
-		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
-			(u32)(dev_priv->vram_size >> 20));
-		NV_WARN(dev, "we calculated %dMiB VRAM\n",
-			(u32)(predicted >> 20));
-	}
-
-	dev_priv->vram_rblock_size = rowsize >> 12;
-	if (rt & 1)
-		dev_priv->vram_rblock_size *= 3;
-
-	NV_DEBUG(dev, "rblock %lld bytes\n",
-		 (u64)dev_priv->vram_rblock_size << 12);
-}
-
-static void
-nvaa_vram_preinit(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	/* To our knowledge, there's no large scale reordering of pages
-	 * that occurs on IGP chipsets.
-	 */
-	dev_priv->vram_rblock_size = 1;
-}
-
-static int
+int
 nouveau_mem_detect(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,33 +255,6 @@
 	if (dev_priv->card_type < NV_50) {
 		dev_priv->vram_size  = nv_rd32(dev, NV04_PFB_FIFO_DATA);
 		dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
-	} else
-	if (dev_priv->card_type < NV_C0) {
-		dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
-		dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
-		dev_priv->vram_size &= 0xffffffff00ll;
-
-		switch (dev_priv->chipset) {
-		case 0xaa:
-		case 0xac:
-		case 0xaf:
-			dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
-			dev_priv->vram_sys_base <<= 12;
-			nvaa_vram_preinit(dev);
-			break;
-		default:
-			nv50_vram_preinit(dev);
-			break;
-		}
-	} else {
-		dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
-		dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
-	}
-
-	NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
-	if (dev_priv->vram_sys_base) {
-		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
-			dev_priv->vram_sys_base);
 	}
 
 	if (dev_priv->vram_size)
@@ -415,6 +262,15 @@
 	return -ENOMEM;
 }
 
+bool
+nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+		return true;
+
+	return false;
+}
+
 #if __OS_HAS_AGP
 static unsigned long
 get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +403,6 @@
 	if (ret)
 		return ret;
 
-	ret = nouveau_mem_detect(dev);
-	if (ret)
-		return ret;
-
 	dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
 
 	ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +418,6 @@
 		return ret;
 	}
 
-	dev_priv->fb_available_size = dev_priv->vram_size;
-	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
-	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
-		dev_priv->fb_mappable_pages =
-			pci_resource_len(dev->pdev, 1);
-	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
 	/* reserve space at end of VRAM for PRAMIN */
 	if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
 	    dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +428,22 @@
 	else
 		dev_priv->ramin_rsvd_vram = (512 * 1024);
 
+	ret = dev_priv->engine.vram.init(dev);
+	if (ret)
+		return ret;
+
+	NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+	if (dev_priv->vram_sys_base) {
+		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+			dev_priv->vram_sys_base);
+	}
+
+	dev_priv->fb_available_size = dev_priv->vram_size;
+	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+		dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
+	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
 	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
 	dev_priv->fb_aper_free = dev_priv->fb_available_size;
 
@@ -799,3 +660,118 @@
 
 	kfree(mem->timing);
 }
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_mm *mm;
+	u32 b_size;
+	int ret;
+
+	p_size = (p_size << PAGE_SHIFT) >> 12;
+	b_size = dev_priv->vram_rblock_size >> 12;
+
+	ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+	if (ret)
+		return ret;
+
+	man->priv = mm;
+	return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+	struct nouveau_mm *mm = man->priv;
+	int ret;
+
+	ret = nouveau_mm_fini(&mm);
+	if (ret)
+		return ret;
+
+	man->priv = NULL;
+	return 0;
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct drm_device *dev = dev_priv->dev;
+
+	vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_vram *node;
+	u32 size_nc = 0;
+	int ret;
+
+	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+		size_nc = 1 << nvbo->vma.node->type;
+
+	ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
+			mem->page_alignment << PAGE_SHIFT, size_nc,
+			(nvbo->tile_flags >> 8) & 0xff, &node);
+	if (ret)
+		return ret;
+
+	node->page_shift = 12;
+	if (nvbo->vma.node)
+		node->page_shift = nvbo->vma.node->type;
+
+	mem->mm_node = node;
+	mem->start   = node->offset >> PAGE_SHIFT;
+	return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
+	int i;
+
+	mutex_lock(&mm->mutex);
+	list_for_each_entry(r, &mm->nodes, nl_entry) {
+		printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
+		       prefix, r->free ? "free" : "used", r->type,
+		       ((u64)r->offset << 12),
+		       (((u64)r->offset + r->length) << 12));
+		total += r->length;
+		ttotal[r->type] += r->length;
+		if (r->free)
+			tfree[r->type] += r->length;
+		else
+			tused[r->type] += r->length;
+	}
+	mutex_unlock(&mm->mutex);
+
+	printk(KERN_DEBUG "%s  total: 0x%010llx\n", prefix, total << 12);
+	for (i = 0; i < 3; i++) {
+		printk(KERN_DEBUG "%s type %d: 0x%010llx, "
+				  "used 0x%010llx, free 0x%010llx\n", prefix,
+		       i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
+	}
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+	nouveau_vram_manager_init,
+	nouveau_vram_manager_fini,
+	nouveau_vram_manager_new,
+	nouveau_vram_manager_del,
+	nouveau_vram_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 0000000..cdbb11e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static inline void
+region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+{
+	list_del(&a->nl_entry);
+	list_del(&a->fl_entry);
+	kfree(a);
+}
+
+static struct nouveau_mm_node *
+region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+{
+	struct nouveau_mm_node *b;
+
+	if (a->length == size)
+		return a;
+
+	b = kmalloc(sizeof(*b), GFP_KERNEL);
+	if (unlikely(b == NULL))
+		return NULL;
+
+	b->offset = a->offset;
+	b->length = size;
+	b->free   = a->free;
+	b->type   = a->type;
+	a->offset += size;
+	a->length -= size;
+	list_add_tail(&b->nl_entry, &a->nl_entry);
+	if (b->free)
+		list_add_tail(&b->fl_entry, &a->fl_entry);
+	return b;
+}
+
+static struct nouveau_mm_node *
+nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+	struct nouveau_mm_node *prev, *next;
+
+	/* try to merge with free adjacent entries of same type */
+	prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
+	if (this->nl_entry.prev != &rmm->nodes) {
+		if (prev->free && prev->type == this->type) {
+			prev->length += this->length;
+			region_put(rmm, this);
+			this = prev;
+		}
+	}
+
+	next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+	if (this->nl_entry.next != &rmm->nodes) {
+		if (next->free && next->type == this->type) {
+			next->offset  = this->offset;
+			next->length += this->length;
+			region_put(rmm, this);
+			this = next;
+		}
+	}
+
+	return this;
+}
+
+void
+nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+	u32 block_s, block_l;
+
+	this->free = true;
+	list_add(&this->fl_entry, &rmm->free);
+	this = nouveau_mm_merge(rmm, this);
+
+	/* any entirely free blocks now?  we'll want to remove typing
+	 * on them now so they can be use for any memory allocation
+	 */
+	block_s = roundup(this->offset, rmm->block_size);
+	if (block_s + rmm->block_size > this->offset + this->length)
+		return;
+
+	/* split off any still-typed region at the start */
+	if (block_s != this->offset) {
+		if (!region_split(rmm, this, block_s - this->offset))
+			return;
+	}
+
+	/* split off the soon-to-be-untyped block(s) */
+	block_l = rounddown(this->length, rmm->block_size);
+	if (block_l != this->length) {
+		this = region_split(rmm, this, block_l);
+		if (!this)
+			return;
+	}
+
+	/* mark as having no type, and retry merge with any adjacent
+	 * untyped blocks
+	 */
+	this->type = 0;
+	nouveau_mm_merge(rmm, this);
+}
+
+int
+nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+	       u32 align, struct nouveau_mm_node **pnode)
+{
+	struct nouveau_mm_node *this, *tmp, *next;
+	u32 splitoff, avail, alloc;
+
+	list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
+		next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+		if (this->nl_entry.next == &rmm->nodes)
+			next = NULL;
+
+		/* skip wrongly typed blocks */
+		if (this->type && this->type != type)
+			continue;
+
+		/* account for alignment */
+		splitoff = this->offset & (align - 1);
+		if (splitoff)
+			splitoff = align - splitoff;
+
+		if (this->length <= splitoff)
+			continue;
+
+		/* determine total memory available from this, and
+		 * the next block (if appropriate)
+		 */
+		avail = this->length;
+		if (next && next->free && (!next->type || next->type == type))
+			avail += next->length;
+
+		avail -= splitoff;
+
+		/* determine allocation size */
+		if (size_nc) {
+			alloc = min(avail, size);
+			alloc = rounddown(alloc, size_nc);
+			if (alloc == 0)
+				continue;
+		} else {
+			alloc = size;
+			if (avail < alloc)
+				continue;
+		}
+
+		/* untyped block, split off a chunk that's a multiple
+		 * of block_size and type it
+		 */
+		if (!this->type) {
+			u32 block = roundup(alloc + splitoff, rmm->block_size);
+			if (this->length < block)
+				continue;
+
+			this = region_split(rmm, this, block);
+			if (!this)
+				return -ENOMEM;
+
+			this->type = type;
+		}
+
+		/* stealing memory from adjacent block */
+		if (alloc > this->length) {
+			u32 amount = alloc - (this->length - splitoff);
+
+			if (!next->type) {
+				amount = roundup(amount, rmm->block_size);
+
+				next = region_split(rmm, next, amount);
+				if (!next)
+					return -ENOMEM;
+
+				next->type = type;
+			}
+
+			this->length += amount;
+			next->offset += amount;
+			next->length -= amount;
+			if (!next->length) {
+				list_del(&next->nl_entry);
+				list_del(&next->fl_entry);
+				kfree(next);
+			}
+		}
+
+		if (splitoff) {
+			if (!region_split(rmm, this, splitoff))
+				return -ENOMEM;
+		}
+
+		this = region_split(rmm, this, alloc);
+		if (this == NULL)
+			return -ENOMEM;
+
+		this->free = false;
+		list_del(&this->fl_entry);
+		*pnode = this;
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+{
+	struct nouveau_mm *rmm;
+	struct nouveau_mm_node *heap;
+
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+	if (!heap)
+		return -ENOMEM;
+	heap->free = true;
+	heap->offset = roundup(offset, block);
+	heap->length = rounddown(offset + length, block) - heap->offset;
+
+	rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
+	if (!rmm) {
+		kfree(heap);
+		return -ENOMEM;
+	}
+	rmm->block_size = block;
+	mutex_init(&rmm->mutex);
+	INIT_LIST_HEAD(&rmm->nodes);
+	INIT_LIST_HEAD(&rmm->free);
+	list_add(&heap->nl_entry, &rmm->nodes);
+	list_add(&heap->fl_entry, &rmm->free);
+
+	*prmm = rmm;
+	return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm **prmm)
+{
+	struct nouveau_mm *rmm = *prmm;
+	struct nouveau_mm_node *heap =
+		list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+
+	if (!list_is_singular(&rmm->nodes))
+		return -EBUSY;
+
+	kfree(heap);
+	kfree(rmm);
+	*prmm = NULL;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 0000000..af38449
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_REGION_H__
+#define __NOUVEAU_REGION_H__
+
+struct nouveau_mm_node {
+	struct list_head nl_entry;
+	struct list_head fl_entry;
+	struct list_head rl_entry;
+
+	bool free;
+	int  type;
+
+	u32 offset;
+	u32 length;
+};
+
+struct nouveau_mm {
+	struct list_head nodes;
+	struct list_head free;
+
+	struct mutex mutex;
+
+	u32 block_size;
+};
+
+int  nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
+int  nouveau_mm_fini(struct nouveau_mm **);
+int  nouveau_mm_pre(struct nouveau_mm *);
+int  nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
+		    u32 align, struct nouveau_mm_node **);
+void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
+
+int  nv50_vram_init(struct drm_device *);
+int  nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
+		    u32 memtype, struct nouveau_vram **);
+void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+int  nvc0_vram_init(struct drm_device *);
+int  nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
+		    u32 memtype, struct nouveau_vram **);
+bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8..fe29d60 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@
 		       int size, uint32_t *b_offset)
 {
 	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *nobj = NULL;
 	struct drm_mm_node *mem;
 	uint32_t offset;
@@ -113,31 +112,15 @@
 		return -ENOMEM;
 	}
 
-	offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
-	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
-		target = NV_DMA_TARGET_VIDMEM;
-	} else
-	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
-		if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
-		    dev_priv->card_type < NV_50) {
-			ret = nouveau_sgdma_get_page(dev, offset, &offset);
-			if (ret)
-				return ret;
-			target = NV_DMA_TARGET_PCI;
-		} else {
-			target = NV_DMA_TARGET_AGP;
-			if (dev_priv->card_type >= NV_50)
-				offset += dev_priv->vm_gart_base;
-		}
-	} else {
-		NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
-			 chan->notifier_bo->bo.mem.mem_type);
-		return -EINVAL;
-	}
+	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+		target = NV_MEM_TARGET_VRAM;
+	else
+		target = NV_MEM_TARGET_GART;
+	offset  = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
 	offset += mem->start;
 
 	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
-				     mem->size, NV_DMA_ACCESS_RW, target,
+				     mem->size, NV_MEM_ACCESS_RW, target,
 				     &nobj);
 	if (ret) {
 		drm_mm_put_block(mem);
@@ -181,15 +164,20 @@
 nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_notifierobj_alloc *na = data;
 	struct nouveau_channel *chan;
 	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+	/* completely unnecessary for these chipsets... */
+	if (unlikely(dev_priv->card_type >= NV_C0))
+		return -EINVAL;
+
+	chan = nouveau_channel_get(dev, file_priv, na->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
 	ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
-	if (ret)
-		return ret;
-
-	return 0;
+	nouveau_channel_put(&chan);
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572ad..30b6544 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 #include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nouveau_gpuobj_method {
+	struct list_head head;
+	u32 mthd;
+	int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
+};
+
+struct nouveau_gpuobj_class {
+	struct list_head head;
+	struct list_head methods;
+	u32 id;
+	u32 engine;
+};
+
+int
+nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_class *oc;
+
+	oc = kzalloc(sizeof(*oc), GFP_KERNEL);
+	if (!oc)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&oc->methods);
+	oc->id = class;
+	oc->engine = engine;
+	list_add(&oc->head, &dev_priv->classes);
+	return 0;
+}
+
+int
+nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
+			int (*exec)(struct nouveau_channel *, u32, u32, u32))
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_method *om;
+	struct nouveau_gpuobj_class *oc;
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id == class)
+			goto found;
+	}
+
+	return -EINVAL;
+
+found:
+	om = kzalloc(sizeof(*om), GFP_KERNEL);
+	if (!om)
+		return -ENOMEM;
+
+	om->mthd = mthd;
+	om->exec = exec;
+	list_add(&om->head, &oc->methods);
+	return 0;
+}
+
+int
+nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
+			 u32 class, u32 mthd, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_gpuobj_method *om;
+	struct nouveau_gpuobj_class *oc;
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id != class)
+			continue;
+
+		list_for_each_entry(om, &oc->methods, head) {
+			if (om->mthd == mthd)
+				return om->exec(chan, class, mthd, data);
+		}
+	}
+
+	return -ENOENT;
+}
+
+int
+nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
+			  u32 class, u32 mthd, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+		chan = dev_priv->channels.ptr[chid];
+	if (chan)
+		ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return ret;
+}
 
 /* NVidia uses context objects to drive drawing operations.
 
@@ -73,17 +169,14 @@
 		   struct nouveau_gpuobj **gpuobj_ret)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	struct nouveau_gpuobj *gpuobj;
 	struct drm_mm_node *ramin = NULL;
-	int ret;
+	int ret, i;
 
 	NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
 		 chan ? chan->id : -1, size, align, flags);
 
-	if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
-		return -EINVAL;
-
 	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 	if (!gpuobj)
 		return -ENOMEM;
@@ -98,88 +191,41 @@
 	spin_unlock(&dev_priv->ramin_lock);
 
 	if (chan) {
-		NV_DEBUG(dev, "channel heap\n");
-
 		ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
 		if (ramin)
 			ramin = drm_mm_get_block(ramin, size, align);
-
 		if (!ramin) {
 			nouveau_gpuobj_ref(NULL, &gpuobj);
 			return -ENOMEM;
 		}
-	} else {
-		NV_DEBUG(dev, "global heap\n");
 
-		/* allocate backing pages, sets vinst */
-		ret = engine->instmem.populate(dev, gpuobj, &size);
-		if (ret) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return ret;
-		}
-
-		/* try and get aperture space */
-		do {
-			if (drm_mm_pre_get(&dev_priv->ramin_heap))
-				return -ENOMEM;
-
-			spin_lock(&dev_priv->ramin_lock);
-			ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
-						   align, 0);
-			if (ramin == NULL) {
-				spin_unlock(&dev_priv->ramin_lock);
-				nouveau_gpuobj_ref(NULL, &gpuobj);
-				return -ENOMEM;
-			}
-
-			ramin = drm_mm_get_block_atomic(ramin, size, align);
-			spin_unlock(&dev_priv->ramin_lock);
-		} while (ramin == NULL);
-
-		/* on nv50 it's ok to fail, we have a fallback path */
-		if (!ramin && dev_priv->card_type < NV_50) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return -ENOMEM;
-		}
-	}
-
-	/* if we got a chunk of the aperture, map pages into it */
-	gpuobj->im_pramin = ramin;
-	if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
-		ret = engine->instmem.bind(dev, gpuobj);
-		if (ret) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return ret;
-		}
-	}
-
-	/* calculate the various different addresses for the object */
-	if (chan) {
 		gpuobj->pinst = chan->ramin->pinst;
 		if (gpuobj->pinst != ~0)
-			gpuobj->pinst += gpuobj->im_pramin->start;
+			gpuobj->pinst += ramin->start;
 
-		if (dev_priv->card_type < NV_50) {
-			gpuobj->cinst = gpuobj->pinst;
-		} else {
-			gpuobj->cinst = gpuobj->im_pramin->start;
-			gpuobj->vinst = gpuobj->im_pramin->start +
-					chan->ramin->vinst;
-		}
+		gpuobj->cinst = ramin->start;
+		gpuobj->vinst = ramin->start + chan->ramin->vinst;
+		gpuobj->node  = ramin;
 	} else {
-		if (gpuobj->im_pramin)
-			gpuobj->pinst = gpuobj->im_pramin->start;
-		else
+		ret = instmem->get(gpuobj, size, align);
+		if (ret) {
+			nouveau_gpuobj_ref(NULL, &gpuobj);
+			return ret;
+		}
+
+		ret = -ENOSYS;
+		if (!(flags & NVOBJ_FLAG_DONT_MAP))
+			ret = instmem->map(gpuobj);
+		if (ret)
 			gpuobj->pinst = ~0;
-		gpuobj->cinst = 0xdeadbeef;
+
+		gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 	}
 
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		int i;
-
 		for (i = 0; i < gpuobj->size; i += 4)
 			nv_wo32(gpuobj, i, 0);
-		engine->instmem.flush(dev);
+		instmem->flush(dev);
 	}
 
 
@@ -195,6 +241,7 @@
 	NV_DEBUG(dev, "\n");
 
 	INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+	INIT_LIST_HEAD(&dev_priv->classes);
 	spin_lock_init(&dev_priv->ramin_lock);
 	dev_priv->ramin_base = ~0;
 
@@ -205,9 +252,20 @@
 nouveau_gpuobj_takedown(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_method *om, *tm;
+	struct nouveau_gpuobj_class *oc, *tc;
 
 	NV_DEBUG(dev, "\n");
 
+	list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
+		list_for_each_entry_safe(om, tm, &oc->methods, head) {
+			list_del(&om->head);
+			kfree(om);
+		}
+		list_del(&oc->head);
+		kfree(oc);
+	}
+
 	BUG_ON(!list_empty(&dev_priv->gpuobj_list));
 }
 
@@ -219,26 +277,34 @@
 		container_of(ref, struct nouveau_gpuobj, refcount);
 	struct drm_device *dev = gpuobj->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	int i;
 
 	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
 
-	if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+	if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
 		for (i = 0; i < gpuobj->size; i += 4)
 			nv_wo32(gpuobj, i, 0);
-		engine->instmem.flush(dev);
+		instmem->flush(dev);
 	}
 
 	if (gpuobj->dtor)
 		gpuobj->dtor(dev, gpuobj);
 
-	if (gpuobj->im_backing)
-		engine->instmem.clear(dev, gpuobj);
+	if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
+		if (gpuobj->node) {
+			instmem->unmap(gpuobj);
+			instmem->put(gpuobj);
+		}
+	} else {
+		if (gpuobj->node) {
+			spin_lock(&dev_priv->ramin_lock);
+			drm_mm_put_block(gpuobj->node);
+			spin_unlock(&dev_priv->ramin_lock);
+		}
+	}
 
 	spin_lock(&dev_priv->ramin_lock);
-	if (gpuobj->im_pramin)
-		drm_mm_put_block(gpuobj->im_pramin);
 	list_del(&gpuobj->list);
 	spin_unlock(&dev_priv->ramin_lock);
 
@@ -278,7 +344,7 @@
 	kref_init(&gpuobj->refcount);
 	gpuobj->size  = size;
 	gpuobj->pinst = pinst;
-	gpuobj->cinst = 0xdeadbeef;
+	gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 	gpuobj->vinst = vinst;
 
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,115 +401,152 @@
    The method below creates a DMA object in instance RAM and returns a handle
    to it that can be used to set up context objects.
 */
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
-		       uint64_t offset, uint64_t size, int access,
-		       int target, struct nouveau_gpuobj **gpuobj)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-	int ret;
 
-	NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
-		 chan->id, class, offset, size);
-	NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+void
+nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
+		     u64 base, u64 size, int target, int access,
+		     u32 type, u32 comp)
+{
+	struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	u32 flags0;
+
+	flags0  = (comp << 29) | (type << 22) | class;
+	flags0 |= 0x00100000;
+
+	switch (access) {
+	case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
+	case NV_MEM_ACCESS_RW:
+	case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
+	default:
+		break;
+	}
 
 	switch (target) {
-	case NV_DMA_TARGET_AGP:
-		offset += dev_priv->gart_info.aper_base;
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
+		break;
+	case NV_MEM_TARGET_GART:
+		base += dev_priv->gart_info.aper_base;
+	default:
+		flags0 &= ~0x00100000;
+		break;
+	}
+
+	/* convert to base + limit */
+	size = (base + size) - 1;
+
+	nv_wo32(obj, offset + 0x00, flags0);
+	nv_wo32(obj, offset + 0x04, lower_32_bits(size));
+	nv_wo32(obj, offset + 0x08, lower_32_bits(base));
+	nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
+				    upper_32_bits(base));
+	nv_wo32(obj, offset + 0x10, 0x00000000);
+	nv_wo32(obj, offset + 0x14, 0x00000000);
+
+	pinstmem->flush(obj->dev);
+}
+
+int
+nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
+		    int target, int access, u32 type, u32 comp,
+		    struct nouveau_gpuobj **pobj)
+{
+	struct drm_device *dev = chan->dev;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
+	if (ret)
+		return ret;
+
+	nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
+			     access, type, comp);
+	return 0;
+}
+
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
+		       u64 size, int access, int target,
+		       struct nouveau_gpuobj **pobj)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj *obj;
+	u32 flags0, flags2;
+	int ret;
+
+	if (dev_priv->card_type >= NV_50) {
+		u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
+		u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
+
+		return nv50_gpuobj_dma_new(chan, class, base, size,
+					   target, access, type, comp, pobj);
+	}
+
+	if (target == NV_MEM_TARGET_GART) {
+		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+			target = NV_MEM_TARGET_PCI_NOSNOOP;
+			base  += dev_priv->gart_info.aper_base;
+		} else
+		if (base != 0) {
+			base = nouveau_sgdma_get_physical(dev, base);
+			target = NV_MEM_TARGET_PCI;
+		} else {
+			nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
+			return 0;
+		}
+	}
+
+	flags0  = class;
+	flags0 |= 0x00003000; /* PT present, PT linear */
+	flags2  = 0;
+
+	switch (target) {
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
 		break;
 	default:
 		break;
 	}
 
-	ret = nouveau_gpuobj_new(dev, chan,
-				 nouveau_gpuobj_class_instmem_size(dev, class),
-				 16, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+	switch (access) {
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		flags0 |= 0x00008000;
+	default:
+		flags2 |= 0x00000002;
+		break;
+	}
+
+	flags0 |= (base & 0x00000fff) << 20;
+	flags2 |= (base & 0xfffff000);
+
+	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+	if (ret)
 		return ret;
-	}
 
-	if (dev_priv->card_type < NV_50) {
-		uint32_t frame, adjust, pte_flags = 0;
+	nv_wo32(obj, 0x00, flags0);
+	nv_wo32(obj, 0x04, size - 1);
+	nv_wo32(obj, 0x08, flags2);
+	nv_wo32(obj, 0x0c, flags2);
 
-		if (access != NV_DMA_ACCESS_RO)
-			pte_flags |= (1<<1);
-		adjust = offset &  0x00000fff;
-		frame  = offset & ~0x00000fff;
-
-		nv_wo32(*gpuobj,  0, ((1<<12) | (1<<13) | (adjust << 20) |
-				      (access << 14) | (target << 16) |
-				      class));
-		nv_wo32(*gpuobj,  4, size - 1);
-		nv_wo32(*gpuobj,  8, frame | pte_flags);
-		nv_wo32(*gpuobj, 12, frame | pte_flags);
-	} else {
-		uint64_t limit = offset + size - 1;
-		uint32_t flags0, flags5;
-
-		if (target == NV_DMA_TARGET_VIDMEM) {
-			flags0 = 0x00190000;
-			flags5 = 0x00010000;
-		} else {
-			flags0 = 0x7fc00000;
-			flags5 = 0x00080000;
-		}
-
-		nv_wo32(*gpuobj,  0, flags0 | class);
-		nv_wo32(*gpuobj,  4, lower_32_bits(limit));
-		nv_wo32(*gpuobj,  8, lower_32_bits(offset));
-		nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
-				      (upper_32_bits(offset) & 0xff));
-		nv_wo32(*gpuobj, 20, flags5);
-	}
-
-	instmem->flush(dev);
-
-	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
-	(*gpuobj)->class  = class;
+	obj->engine = NVOBJ_ENGINE_SW;
+	obj->class  = class;
+	*pobj = obj;
 	return 0;
 }
 
-int
-nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
-			    uint64_t offset, uint64_t size, int access,
-			    struct nouveau_gpuobj **gpuobj,
-			    uint32_t *o_ret)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
-	    (dev_priv->card_type >= NV_50 &&
-	     dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     offset + dev_priv->vm_gart_base,
-					     size, access, NV_DMA_TARGET_AGP,
-					     gpuobj);
-		if (o_ret)
-			*o_ret = 0;
-	} else
-	if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
-		nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
-		if (offset & ~0xffffffffULL) {
-			NV_ERROR(dev, "obj offset exceeds 32-bits\n");
-			return -EINVAL;
-		}
-		if (o_ret)
-			*o_ret = (uint32_t)offset;
-		ret = (*gpuobj != NULL) ? 0 : -EINVAL;
-	} else {
-		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
 /* Context objects in the instance RAM have the following structure.
  * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
 
@@ -495,68 +598,13 @@
    entry[5]:
    set to 0?
 */
-int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
-		      struct nouveau_gpuobj **gpuobj)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
-
-	ret = nouveau_gpuobj_new(dev, chan,
-				 nouveau_gpuobj_class_instmem_size(dev, class),
-				 16,
-				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
-				 gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
-		return ret;
-	}
-
-	if (dev_priv->card_type >= NV_50) {
-		nv_wo32(*gpuobj,  0, class);
-		nv_wo32(*gpuobj, 20, 0x00010000);
-	} else {
-		switch (class) {
-		case NV_CLASS_NULL:
-			nv_wo32(*gpuobj, 0, 0x00001030);
-			nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
-			break;
-		default:
-			if (dev_priv->card_type >= NV_40) {
-				nv_wo32(*gpuobj, 0, class);
-#ifdef __BIG_ENDIAN
-				nv_wo32(*gpuobj, 8, 0x01000000);
-#endif
-			} else {
-#ifdef __BIG_ENDIAN
-				nv_wo32(*gpuobj, 0, class | 0x00080000);
-#else
-				nv_wo32(*gpuobj, 0, class);
-#endif
-			}
-		}
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
-	(*gpuobj)->class  = class;
-	return 0;
-}
-
-int
+static int
 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
 		      struct nouveau_gpuobj **gpuobj_ret)
 {
-	struct drm_nouveau_private *dev_priv;
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct nouveau_gpuobj *gpuobj;
 
-	if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
-		return -EINVAL;
-	dev_priv = chan->dev->dev_private;
-
 	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 	if (!gpuobj)
 		return -ENOMEM;
@@ -573,6 +621,109 @@
 	return 0;
 }
 
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj_class *oc;
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id == class)
+			goto found;
+	}
+
+	NV_ERROR(dev, "illegal object class: 0x%x\n", class);
+	return -EINVAL;
+
+found:
+	switch (oc->engine) {
+	case NVOBJ_ENGINE_SW:
+		if (dev_priv->card_type < NV_C0) {
+			ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
+			if (ret)
+				return ret;
+			goto insert;
+		}
+		break;
+	case NVOBJ_ENGINE_GR:
+		if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
+		    (dev_priv->card_type  < NV_20 && !chan->pgraph_ctx)) {
+			struct nouveau_pgraph_engine *pgraph =
+				&dev_priv->engine.graph;
+
+			ret = pgraph->create_context(chan);
+			if (ret)
+				return ret;
+		}
+		break;
+	case NVOBJ_ENGINE_CRYPT:
+		if (!chan->crypt_ctx) {
+			struct nouveau_crypt_engine *pcrypt =
+				&dev_priv->engine.crypt;
+
+			ret = pcrypt->create_context(chan);
+			if (ret)
+				return ret;
+		}
+		break;
+	}
+
+	/* we're done if this is fermi */
+	if (dev_priv->card_type >= NV_C0)
+		return 0;
+
+	ret = nouveau_gpuobj_new(dev, chan,
+				 nouveau_gpuobj_class_instmem_size(dev, class),
+				 16,
+				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+				 &gpuobj);
+	if (ret) {
+		NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
+		return ret;
+	}
+
+	if (dev_priv->card_type >= NV_50) {
+		nv_wo32(gpuobj,  0, class);
+		nv_wo32(gpuobj, 20, 0x00010000);
+	} else {
+		switch (class) {
+		case NV_CLASS_NULL:
+			nv_wo32(gpuobj, 0, 0x00001030);
+			nv_wo32(gpuobj, 4, 0xFFFFFFFF);
+			break;
+		default:
+			if (dev_priv->card_type >= NV_40) {
+				nv_wo32(gpuobj, 0, class);
+#ifdef __BIG_ENDIAN
+				nv_wo32(gpuobj, 8, 0x01000000);
+#endif
+			} else {
+#ifdef __BIG_ENDIAN
+				nv_wo32(gpuobj, 0, class | 0x00080000);
+#else
+				nv_wo32(gpuobj, 0, class);
+#endif
+			}
+		}
+	}
+	dev_priv->engine.instmem.flush(dev);
+
+	gpuobj->engine = oc->engine;
+	gpuobj->class  = oc->id;
+
+insert:
+	ret = nouveau_ramht_insert(chan, handle, gpuobj);
+	if (ret)
+		NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
+	nouveau_gpuobj_ref(NULL, &gpuobj);
+	return ret;
+}
+
 static int
 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
 {
@@ -585,7 +736,7 @@
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
 	/* Base amount for object storage (4KiB enough?) */
-	size = 0x1000;
+	size = 0x2000;
 	base = 0;
 
 	/* PGRAPH context */
@@ -624,12 +775,30 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	struct nouveau_gpuobj *vram = NULL, *tt = NULL;
-	int ret, i;
+	int ret;
 
 	NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
 
+	if (dev_priv->card_type == NV_C0) {
+		struct nouveau_vm *vm = dev_priv->chan_vm;
+		struct nouveau_vm_pgd *vpgd;
+
+		ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
+					 &chan->ramin);
+		if (ret)
+			return ret;
+
+		nouveau_vm_ref(vm, &chan->vm, NULL);
+
+		vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+		nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+		nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+		nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+		nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+		return 0;
+	}
+
 	/* Allocate a chunk of memory for per-channel object storage */
 	ret = nouveau_gpuobj_channel_init_pramin(chan);
 	if (ret) {
@@ -639,14 +808,12 @@
 
 	/* NV50 VM
 	 *  - Allocate per-channel page-directory
-	 *  - Map GART and VRAM into the channel's address space at the
-	 *    locations determined during init.
+	 *  - Link with shared channel VM
 	 */
-	if (dev_priv->card_type >= NV_50) {
+	if (dev_priv->chan_vm) {
 		u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
 		u64 vm_vinst = chan->ramin->vinst + pgd_offs;
 		u32 vm_pinst = chan->ramin->pinst;
-		u32 pde;
 
 		if (vm_pinst != ~0)
 			vm_pinst += pgd_offs;
@@ -655,29 +822,8 @@
 					      0, &chan->vm_pd);
 		if (ret)
 			return ret;
-		for (i = 0; i < 0x4000; i += 8) {
-			nv_wo32(chan->vm_pd, i + 0, 0x00000000);
-			nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
-		}
 
-		nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
-				   &chan->vm_gart_pt);
-		pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
-		nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
-		nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-
-		pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
-		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-			nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
-					   &chan->vm_vram_pt[i]);
-
-			nv_wo32(chan->vm_pd, pde + 0,
-				chan->vm_vram_pt[i]->vinst | 0x61);
-			nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-			pde += 8;
-		}
-
-		instmem->flush(dev);
+		nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
 	}
 
 	/* RAMHT */
@@ -700,9 +846,8 @@
 	/* VRAM ctxdma */
 	if (dev_priv->card_type >= NV_50) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->vm_end,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_AGP, &vram);
+					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VM, &vram);
 		if (ret) {
 			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 			return ret;
@@ -710,8 +855,8 @@
 	} else {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 					     0, dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_VIDMEM, &vram);
+					     NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VRAM, &vram);
 		if (ret) {
 			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 			return ret;
@@ -728,21 +873,13 @@
 	/* TT memory ctxdma */
 	if (dev_priv->card_type >= NV_50) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->vm_end,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_AGP, &tt);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
-			return ret;
-		}
-	} else
-	if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
-		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-						  dev_priv->gart_info.aper_size,
-						  NV_DMA_ACCESS_RW, &tt, NULL);
+					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VM, &tt);
 	} else {
-		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-		ret = -EINVAL;
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+					     0, dev_priv->gart_info.aper_size,
+					     NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_GART, &tt);
 	}
 
 	if (ret) {
@@ -763,21 +900,14 @@
 void
 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 {
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct drm_device *dev = chan->dev;
-	int i;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
-	if (!chan->ramht)
-		return;
-
 	nouveau_ramht_ref(NULL, &chan->ramht, chan);
 
+	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
 	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
-	nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
-	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
-		nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
 
 	if (chan->ramin_heap.free_stack.next)
 		drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +921,91 @@
 	struct nouveau_gpuobj *gpuobj;
 	int i;
 
-	if (dev_priv->card_type < NV_50) {
-		dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
-		if (!dev_priv->susres.ramin_copy)
-			return -ENOMEM;
-
-		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
-			dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
-		return 0;
-	}
-
 	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing)
+		if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
 			continue;
 
-		gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
-		if (!gpuobj->im_backing_suspend) {
+		gpuobj->suspend = vmalloc(gpuobj->size);
+		if (!gpuobj->suspend) {
 			nouveau_gpuobj_resume(dev);
 			return -ENOMEM;
 		}
 
 		for (i = 0; i < gpuobj->size; i += 4)
-			gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
+			gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
 	}
 
 	return 0;
 }
 
 void
-nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj;
-
-	if (dev_priv->card_type < NV_50) {
-		vfree(dev_priv->susres.ramin_copy);
-		dev_priv->susres.ramin_copy = NULL;
-		return;
-	}
-
-	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing_suspend)
-			continue;
-
-		vfree(gpuobj->im_backing_suspend);
-		gpuobj->im_backing_suspend = NULL;
-	}
-}
-
-void
 nouveau_gpuobj_resume(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj;
 	int i;
 
-	if (dev_priv->card_type < NV_50) {
-		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
-			nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
-		nouveau_gpuobj_suspend_cleanup(dev);
-		return;
-	}
-
 	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing_suspend)
+		if (!gpuobj->suspend)
 			continue;
 
 		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
-		dev_priv->engine.instmem.flush(dev);
+			nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
+
+		vfree(gpuobj->suspend);
+		gpuobj->suspend = NULL;
 	}
 
-	nouveau_gpuobj_suspend_cleanup(dev);
+	dev_priv->engine.instmem.flush(dev);
 }
 
 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_grobj_alloc *init = data;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_pgraph_object_class *grc;
-	struct nouveau_gpuobj *gr = NULL;
 	struct nouveau_channel *chan;
 	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
-
 	if (init->handle == ~0)
 		return -EINVAL;
 
-	grc = pgraph->grclass;
-	while (grc->id) {
-		if (grc->id == init->class)
-			break;
-		grc++;
+	chan = nouveau_channel_get(dev, file_priv, init->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
+
+	if (nouveau_ramht_find(chan, init->handle)) {
+		ret = -EEXIST;
+		goto out;
 	}
 
-	if (!grc->id) {
-		NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
-		return -EPERM;
-	}
-
-	if (nouveau_ramht_find(chan, init->handle))
-		return -EEXIST;
-
-	if (!grc->software)
-		ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
-	else
-		ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+	ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
 	if (ret) {
 		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
 			 ret, init->channel, init->handle);
-		return ret;
 	}
 
-	ret = nouveau_ramht_insert(chan, init->handle, gr);
-	nouveau_gpuobj_ref(NULL, &gr);
-	if (ret) {
-		NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
-			 ret, init->channel, init->handle);
-		return ret;
-	}
-
-	return 0;
+out:
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
 	struct drm_nouveau_gpuobj_free *objfree = data;
-	struct nouveau_gpuobj *gpuobj;
 	struct nouveau_channel *chan;
+	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
-	gpuobj = nouveau_ramht_find(chan, objfree->handle);
-	if (!gpuobj)
-		return -ENOENT;
+	/* Synchronize with the user channel */
+	nouveau_channel_idle(chan);
 
-	nouveau_ramht_remove(chan, objfree->handle);
-	return 0;
+	ret = nouveau_ramht_remove(chan, objfree->handle);
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158..fb846a3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
 
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 
@@ -418,8 +422,7 @@
 		return ret;
 	}
 	dev_set_drvdata(hwmon_dev, dev);
-	ret = sysfs_create_group(&hwmon_dev->kobj,
-					&hwmon_attrgroup);
+	ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
 	if (ret) {
 		NV_ERROR(dev,
 			"Unable to create hwmon sysfs file: %d\n", ret);
@@ -446,6 +449,25 @@
 #endif
 }
 
+#ifdef CONFIG_ACPI
+static int
+nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct drm_nouveau_private *dev_priv =
+		container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
+	struct drm_device *dev = dev_priv->dev;
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, "ac_adapter") == 0) {
+		bool ac = power_supply_is_system_supplied();
+
+		NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
+	}
+
+	return NOTIFY_OK;
+}
+#endif
+
 int
 nouveau_pm_init(struct drm_device *dev)
 {
@@ -485,6 +507,10 @@
 
 	nouveau_sysfs_init(dev);
 	nouveau_hwmon_init(dev);
+#ifdef CONFIG_ACPI
+	pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
+	register_acpi_notifier(&pm->acpi_nb);
+#endif
 
 	return 0;
 }
@@ -503,6 +529,9 @@
 	nouveau_perf_fini(dev);
 	nouveau_volt_fini(dev);
 
+#ifdef CONFIG_ACPI
+	unregister_acpi_notifier(&pm->acpi_nb);
+#endif
 	nouveau_hwmon_fini(dev);
 	nouveau_sysfs_fini(dev);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d85809..bef3e69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@
 	nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
 
 	if (dev_priv->card_type < NV_40) {
-		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
 		      (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
 		      (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
 	} else
 	if (dev_priv->card_type < NV_50) {
-		ctx = (gpuobj->cinst >> 4) |
+		ctx = (gpuobj->pinst >> 4) |
 		      (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
 		      (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
 	} else {
 		if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
-			ctx = (gpuobj->cinst << 10) | 2;
+			ctx = (gpuobj->cinst << 10) | chan->id;
 		} else {
 			ctx = (gpuobj->cinst >> 4) |
 			      ((gpuobj->engine <<
@@ -214,18 +214,19 @@
 	spin_unlock_irqrestore(&chan->ramht->lock, flags);
 }
 
-void
+int
 nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
 {
 	struct nouveau_ramht_entry *entry;
 
 	entry = nouveau_ramht_remove_entry(chan, handle);
 	if (!entry)
-		return;
+		return -ENOENT;
 
 	nouveau_ramht_remove_hash(chan, entry->handle);
 	nouveau_gpuobj_ref(NULL, &entry->gpuobj);
 	kfree(entry);
+	return 0;
 }
 
 struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e..c82de98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@
 
 extern int  nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
 				 struct nouveau_gpuobj *);
-extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern int  nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
 extern struct nouveau_gpuobj *
 nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541..04e8fb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
 #	define NV04_PFB_REF_CMD_REFRESH				(1 << 0)
 #define NV04_PFB_PRE						0x001002d4
 #	define NV04_PFB_PRE_CMD_PRECHARGE			(1 << 0)
+#define NV20_PFB_ZCOMP(i)                              (0x00100300 + 4*(i))
+#	define NV20_PFB_ZCOMP_MODE_32				(4 << 24)
+#	define NV20_PFB_ZCOMP_EN				(1 << 31)
+#	define NV25_PFB_ZCOMP_MODE_16				(1 << 20)
+#	define NV25_PFB_ZCOMP_MODE_32				(2 << 20)
 #define NV10_PFB_CLOSE_PAGE2					0x0010033c
 #define NV04_PFB_SCRAMBLE(i)                         (0x00100400 + 4 * (i))
 #define NV40_PFB_TILE(i)                              (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
 #    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
 #    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
 
-/* DMA object defines */
-#define NV_DMA_ACCESS_RW 0
-#define NV_DMA_ACCESS_RO 1
-#define NV_DMA_ACCESS_WO 2
-#define NV_DMA_TARGET_VIDMEM 0
-#define NV_DMA_TARGET_PCI    2
-#define NV_DMA_TARGET_AGP    3
-/* The following is not a real value used by the card, it's changed by
- * nouveau_object_dma_create */
-#define NV_DMA_TARGET_PCI_NONLINEAR 8
-
 /* Some object classes we care about in the drm */
 #define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
 #define NV_CLASS_DMA_TO_MEMORY                             0x00000003
@@ -332,6 +326,7 @@
 #define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
 #define NV03_PGRAPH_STATUS                                 0x004006B0
 #define NV04_PGRAPH_STATUS                                 0x00400700
+#    define NV40_PGRAPH_STATUS_SYNC_STALL                  0x00004000
 #define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
 #define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
 #define NV04_PGRAPH_SURFACE                                0x0040070C
@@ -378,6 +373,7 @@
 #define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
 #define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
 #define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
 #define NV50_PDISPLAY_INTR_1_CLK_UNK10                               0x00000010
 #define NV50_PDISPLAY_INTR_1_CLK_UNK20                               0x00000020
 #define NV50_PDISPLAY_INTR_1_CLK_UNK40                               0x00000040
-#define NV50_PDISPLAY_INTR_EN                                        0x0061002c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC                            0x0000000c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0                          0x00000004
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1                          0x00000008
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK10                              0x00000010
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK20                              0x00000020
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK40                              0x00000040
+#define NV50_PDISPLAY_INTR_EN_0                                      0x00610028
+#define NV50_PDISPLAY_INTR_EN_1                                      0x0061002c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC                          0x0000000c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n)                 (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0                        0x00000004
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1                        0x00000008
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10                            0x00000010
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20                            0x00000020
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40                            0x00000040
 #define NV50_PDISPLAY_UNK30_CTRL                                     0x00610030
 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0                        0x00000200
 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1                        0x00000400
 #define NV50_PDISPLAY_UNK30_CTRL_PENDING                             0x80000000
-#define NV50_PDISPLAY_TRAPPED_ADDR                                   0x00610080
-#define NV50_PDISPLAY_TRAPPED_DATA                                   0x00610084
-#define NV50_PDISPLAY_CHANNEL_STAT(i)                  ((i) * 0x10 + 0x00610200)
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA                               0x00000010
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED                      0x00000000
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED                       0x00000010
-#define NV50_PDISPLAY_CHANNEL_DMA_CB(i)                ((i) * 0x10 + 0x00610204)
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION                        0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM                   0x00000000
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM                 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID                           0x00000001
-#define NV50_PDISPLAY_CHANNEL_UNK2(i)                  ((i) * 0x10 + 0x00610208)
-#define NV50_PDISPLAY_CHANNEL_UNK3(i)                  ((i) * 0x10 + 0x0061020c)
+#define NV50_PDISPLAY_TRAPPED_ADDR(i)                  ((i) * 0x08 + 0x00610080)
+#define NV50_PDISPLAY_TRAPPED_DATA(i)                  ((i) * 0x08 + 0x00610084)
+#define NV50_PDISPLAY_EVO_CTRL(i)                      ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_EVO_CTRL_DMA                                   0x00000010
+#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED                          0x00000000
+#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED                           0x00000010
+#define NV50_PDISPLAY_EVO_DMA_CB(i)                    ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION                            0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM                       0x00000000
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM                     0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_VALID                               0x00000001
+#define NV50_PDISPLAY_EVO_UNK2(i)                      ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_EVO_HASH_TAG(i)                  ((i) * 0x10 + 0x0061020c)
 
 #define NV50_PDISPLAY_CURSOR                                         0x00610270
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)           ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS                     0x00030000
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE              0x00010000
 
-#define NV50_PDISPLAY_CTRL_STATE                                     0x00610300
-#define NV50_PDISPLAY_CTRL_STATE_PENDING                             0x80000000
-#define NV50_PDISPLAY_CTRL_STATE_METHOD                              0x00001ffc
-#define NV50_PDISPLAY_CTRL_STATE_ENABLE                              0x00000001
-#define NV50_PDISPLAY_CTRL_VAL                                       0x00610304
-#define NV50_PDISPLAY_UNK_380                                        0x00610380
-#define NV50_PDISPLAY_RAM_AMOUNT                                     0x00610384
-#define NV50_PDISPLAY_UNK_388                                        0x00610388
-#define NV50_PDISPLAY_UNK_38C                                        0x0061038c
+#define NV50_PDISPLAY_PIO_CTRL                                       0x00610300
+#define NV50_PDISPLAY_PIO_CTRL_PENDING                               0x80000000
+#define NV50_PDISPLAY_PIO_CTRL_MTHD                                  0x00001ffc
+#define NV50_PDISPLAY_PIO_CTRL_ENABLED                               0x00000001
+#define NV50_PDISPLAY_PIO_DATA                                       0x00610304
 
 #define NV50_PDISPLAY_CRTC_P(i, r)        ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
 #define NV50_PDISPLAY_CRTC_C(i, r)    (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac970..9a250eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@
 	dma_addr_t *pages;
 	unsigned nr_pages;
 
-	unsigned pte_start;
+	u64 offset;
 	bool bound;
 };
 
@@ -74,18 +74,6 @@
 	}
 }
 
-static inline unsigned
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
-
-	if (dev_priv->card_type < NV_50)
-		return pte + 2;
-
-	return pte << 1;
-}
-
 static int
 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 {
@@ -97,32 +85,17 @@
 
 	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
 
-	pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
-	nvbe->pte_start = pte;
+	nvbe->offset = mem->start << PAGE_SHIFT;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
 		dma_addr_t dma_offset = nvbe->pages[i];
 		uint32_t offset_l = lower_32_bits(dma_offset);
-		uint32_t offset_h = upper_32_bits(dma_offset);
 
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
-				nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
-				pte += 2;
-			}
-
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 		}
 	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
-	}
 
 	nvbe->bound = true;
 	return 0;
@@ -142,28 +115,10 @@
 	if (!nvbe->bound)
 		return 0;
 
-	pte = nvbe->pte_start;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
-		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
-
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
-				nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
-				pte += 2;
-			}
-
-			dma_offset += NV_CTXDMA_PAGE_SIZE;
-		}
-	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
 	}
 
 	nvbe->bound = false;
@@ -186,6 +141,35 @@
 	}
 }
 
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	nvbe->offset = mem->start << PAGE_SHIFT;
+
+	nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
+			  nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+	nvbe->bound = true;
+	return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	if (!nvbe->bound)
+		return 0;
+
+	nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
+			    nvbe->nr_pages << PAGE_SHIFT);
+	nvbe->bound = false;
+	return 0;
+}
+
 static struct ttm_backend_func nouveau_sgdma_backend = {
 	.populate		= nouveau_sgdma_populate,
 	.clear			= nouveau_sgdma_clear,
@@ -194,23 +178,30 @@
 	.destroy		= nouveau_sgdma_destroy
 };
 
+static struct ttm_backend_func nv50_sgdma_backend = {
+	.populate		= nouveau_sgdma_populate,
+	.clear			= nouveau_sgdma_clear,
+	.bind			= nv50_sgdma_bind,
+	.unbind			= nv50_sgdma_unbind,
+	.destroy		= nouveau_sgdma_destroy
+};
+
 struct ttm_backend *
 nouveau_sgdma_init_ttm(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_sgdma_be *nvbe;
 
-	if (!dev_priv->gart_info.sg_ctxdma)
-		return NULL;
-
 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
 	if (!nvbe)
 		return NULL;
 
 	nvbe->dev = dev;
 
-	nvbe->backend.func	= &nouveau_sgdma_backend;
-
+	if (dev_priv->card_type < NV_50)
+		nvbe->backend.func = &nouveau_sgdma_backend;
+	else
+		nvbe->backend.func = &nv50_sgdma_backend;
 	return &nvbe->backend;
 }
 
@@ -218,7 +209,6 @@
 nouveau_sgdma_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_gpuobj *gpuobj = NULL;
 	uint32_t aper_size, obj_size;
 	int i, ret;
@@ -231,68 +221,40 @@
 
 		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
 		obj_size += 8; /* ctxdma header */
-	} else {
-		/* 1 entire VM page table */
-		aper_size = (512 * 1024 * 1024);
-		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
-	}
 
-	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
-				      NVOBJ_FLAG_ZERO_ALLOC |
-				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
-		return ret;
-	}
+		ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+					      NVOBJ_FLAG_ZERO_ALLOC |
+					      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+		if (ret) {
+			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+			return ret;
+		}
 
-	dev_priv->gart_info.sg_dummy_page =
-		alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
-	if (!dev_priv->gart_info.sg_dummy_page) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -ENOMEM;
-	}
-
-	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
-	dev_priv->gart_info.sg_dummy_bus =
-		pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
-			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -EFAULT;
-	}
-
-	if (dev_priv->card_type < NV_50) {
-		/* special case, allocated from global instmem heap so
-		 * cinst is invalid, we use it on all channels though so
-		 * cinst needs to be valid, set it the same as pinst
-		 */
-		gpuobj->cinst = gpuobj->pinst;
-
-		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
-		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
-		 * on those cards? */
 		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
 				   (1 << 12) /* PT present */ |
 				   (0 << 13) /* PT *not* linear */ |
-				   (NV_DMA_ACCESS_RW  << 14) |
-				   (NV_DMA_TARGET_PCI << 16));
+				   (0 << 14) /* RW */ |
+				   (2 << 16) /* PCI */);
 		nv_wo32(gpuobj, 4, aper_size - 1);
-		for (i = 2; i < 2 + (aper_size >> 12); i++) {
-			nv_wo32(gpuobj, i * 4,
-				dev_priv->gart_info.sg_dummy_bus | 3);
-		}
-	} else {
-		for (i = 0; i < obj_size; i += 8) {
-			nv_wo32(gpuobj, i + 0, 0x00000000);
-			nv_wo32(gpuobj, i + 4, 0x00000000);
-		}
+		for (i = 2; i < 2 + (aper_size >> 12); i++)
+			nv_wo32(gpuobj, i * 4, 0x00000000);
+
+		dev_priv->gart_info.sg_ctxdma = gpuobj;
+		dev_priv->gart_info.aper_base = 0;
+		dev_priv->gart_info.aper_size = aper_size;
+	} else
+	if (dev_priv->chan_vm) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
+				     12, NV_MEM_ACCESS_RW,
+				     &dev_priv->gart_info.vma);
+		if (ret)
+			return ret;
+
+		dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+		dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
 	}
-	dev_priv->engine.instmem.flush(dev);
 
 	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
-	dev_priv->gart_info.aper_base = 0;
-	dev_priv->gart_info.aper_size = aper_size;
-	dev_priv->gart_info.sg_ctxdma = gpuobj;
 	return 0;
 }
 
@@ -301,31 +263,19 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->gart_info.sg_dummy_page) {
-		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
-			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		unlock_page(dev_priv->gart_info.sg_dummy_page);
-		__free_page(dev_priv->gart_info.sg_dummy_page);
-		dev_priv->gart_info.sg_dummy_page = NULL;
-		dev_priv->gart_info.sg_dummy_bus = 0;
-	}
-
 	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
+	nouveau_vm_put(&dev_priv->gart_info.vma);
 }
 
-int
-nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+uint32_t
+nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	int pte;
+	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 
-	pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
-	if (dev_priv->card_type < NV_50) {
-		*page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
-		return 0;
-	}
+	BUG_ON(dev_priv->card_type >= NV_50);
 
-	NV_ERROR(dev, "Unimplemented on NV50\n");
-	return -EINVAL;
+	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
+		(offset & NV_CTXDMA_PAGE_MASK);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755..a54fc43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -65,7 +65,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv04_fb_init;
 		engine->fb.takedown		= nv04_fb_takedown;
-		engine->graph.grclass		= nv04_graph_grclass;
 		engine->graph.init		= nv04_graph_init;
 		engine->graph.takedown		= nv04_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -76,7 +75,7 @@
 		engine->graph.unload_context	= nv04_graph_unload_context;
 		engine->fifo.channels		= 16;
 		engine->fifo.init		= nv04_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
@@ -99,16 +98,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x10:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -117,8 +120,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv10_graph_grclass;
+		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
 		engine->graph.init		= nv10_graph_init;
 		engine->graph.takedown		= nv10_graph_takedown;
 		engine->graph.channel		= nv10_graph_channel;
@@ -127,17 +131,17 @@
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv10_graph_load_context;
 		engine->graph.unload_context	= nv10_graph_unload_context;
-		engine->graph.set_region_tiling	= nv10_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv10_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -153,16 +157,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x20:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -171,8 +179,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv20_graph_grclass;
+		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
 		engine->graph.init		= nv20_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.channel		= nv10_graph_channel;
@@ -181,17 +190,17 @@
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
-		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv20_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -207,16 +216,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x30:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -225,8 +238,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv30_fb_init;
 		engine->fb.takedown		= nv30_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv30_graph_grclass;
+		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
 		engine->graph.init		= nv30_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -235,17 +249,17 @@
 		engine->graph.destroy_context	= nv20_graph_destroy_context;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
-		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv20_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -263,6 +277,10 @@
 		engine->pm.clock_set		= nv04_pm_clock_set;
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x40:
 	case 0x60:
@@ -270,10 +288,10 @@
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv40_mc_init;
 		engine->mc.takedown		= nv40_mc_takedown;
@@ -282,8 +300,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv40_fb_init;
 		engine->fb.takedown		= nv40_fb_takedown;
-		engine->fb.set_region_tiling	= nv40_fb_set_region_tiling;
-		engine->graph.grclass		= nv40_graph_grclass;
+		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv40_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
 		engine->graph.init		= nv40_graph_init;
 		engine->graph.takedown		= nv40_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -292,17 +311,17 @@
 		engine->graph.destroy_context	= nv40_graph_destroy_context;
 		engine->graph.load_context	= nv40_graph_load_context;
 		engine->graph.unload_context	= nv40_graph_unload_context;
-		engine->graph.set_region_tiling	= nv40_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv40_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv40_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv40_fifo_create_context;
-		engine->fifo.destroy_context	= nv40_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv40_fifo_load_context;
 		engine->fifo.unload_context	= nv40_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -321,6 +340,10 @@
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
 		engine->pm.temp_get		= nv40_temp_get;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x50:
 	case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@
 		engine->instmem.takedown	= nv50_instmem_takedown;
 		engine->instmem.suspend		= nv50_instmem_suspend;
 		engine->instmem.resume		= nv50_instmem_resume;
-		engine->instmem.populate	= nv50_instmem_populate;
-		engine->instmem.clear		= nv50_instmem_clear;
-		engine->instmem.bind		= nv50_instmem_bind;
-		engine->instmem.unbind		= nv50_instmem_unbind;
+		engine->instmem.get		= nv50_instmem_get;
+		engine->instmem.put		= nv50_instmem_put;
+		engine->instmem.map		= nv50_instmem_map;
+		engine->instmem.unmap		= nv50_instmem_unmap;
 		if (dev_priv->chipset == 0x50)
 			engine->instmem.flush	= nv50_instmem_flush;
 		else
@@ -345,7 +368,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv50_fb_init;
 		engine->fb.takedown		= nv50_fb_takedown;
-		engine->graph.grclass		= nv50_graph_grclass;
 		engine->graph.init		= nv50_graph_init;
 		engine->graph.takedown		= nv50_graph_takedown;
 		engine->graph.fifo_access	= nv50_graph_fifo_access;
@@ -381,24 +403,32 @@
 		engine->display.init		= nv50_display_init;
 		engine->display.destroy		= nv50_display_destroy;
 		engine->gpio.init		= nv50_gpio_init;
-		engine->gpio.takedown		= nouveau_stub_takedown;
+		engine->gpio.takedown		= nv50_gpio_fini;
 		engine->gpio.get		= nv50_gpio_get;
 		engine->gpio.set		= nv50_gpio_set;
+		engine->gpio.irq_register	= nv50_gpio_irq_register;
+		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
 		switch (dev_priv->chipset) {
-		case 0xa3:
-		case 0xa5:
-		case 0xa8:
-		case 0xaf:
-			engine->pm.clock_get	= nva3_pm_clock_get;
-			engine->pm.clock_pre	= nva3_pm_clock_pre;
-			engine->pm.clock_set	= nva3_pm_clock_set;
-			break;
-		default:
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0x98:
+		case 0xa0:
+		case 0xaa:
+		case 0xac:
+		case 0x50:
 			engine->pm.clock_get	= nv50_pm_clock_get;
 			engine->pm.clock_pre	= nv50_pm_clock_pre;
 			engine->pm.clock_set	= nv50_pm_clock_set;
 			break;
+		default:
+			engine->pm.clock_get	= nva3_pm_clock_get;
+			engine->pm.clock_pre	= nva3_pm_clock_pre;
+			engine->pm.clock_set	= nva3_pm_clock_set;
+			break;
 		}
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
@@ -406,17 +436,39 @@
 			engine->pm.temp_get	= nv84_temp_get;
 		else
 			engine->pm.temp_get	= nv40_temp_get;
+		switch (dev_priv->chipset) {
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0xa0:
+			engine->crypt.init	= nv84_crypt_init;
+			engine->crypt.takedown	= nv84_crypt_fini;
+			engine->crypt.create_context = nv84_crypt_create_context;
+			engine->crypt.destroy_context = nv84_crypt_destroy_context;
+			engine->crypt.tlb_flush	= nv84_crypt_tlb_flush;
+			break;
+		default:
+			engine->crypt.init	= nouveau_stub_init;
+			engine->crypt.takedown	= nouveau_stub_takedown;
+			break;
+		}
+		engine->vram.init		= nv50_vram_init;
+		engine->vram.get		= nv50_vram_new;
+		engine->vram.put		= nv50_vram_del;
+		engine->vram.flags_valid	= nv50_vram_flags_valid;
 		break;
 	case 0xC0:
 		engine->instmem.init		= nvc0_instmem_init;
 		engine->instmem.takedown	= nvc0_instmem_takedown;
 		engine->instmem.suspend		= nvc0_instmem_suspend;
 		engine->instmem.resume		= nvc0_instmem_resume;
-		engine->instmem.populate	= nvc0_instmem_populate;
-		engine->instmem.clear		= nvc0_instmem_clear;
-		engine->instmem.bind		= nvc0_instmem_bind;
-		engine->instmem.unbind		= nvc0_instmem_unbind;
-		engine->instmem.flush		= nvc0_instmem_flush;
+		engine->instmem.get		= nv50_instmem_get;
+		engine->instmem.put		= nv50_instmem_put;
+		engine->instmem.map		= nv50_instmem_map;
+		engine->instmem.unmap		= nv50_instmem_unmap;
+		engine->instmem.flush		= nv84_instmem_flush;
 		engine->mc.init			= nv50_mc_init;
 		engine->mc.takedown		= nv50_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
@@ -424,7 +476,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nvc0_fb_init;
 		engine->fb.takedown		= nvc0_fb_takedown;
-		engine->graph.grclass		= NULL;  //nvc0_graph_grclass;
 		engine->graph.init		= nvc0_graph_init;
 		engine->graph.takedown		= nvc0_graph_takedown;
 		engine->graph.fifo_access	= nvc0_graph_fifo_access;
@@ -453,7 +504,15 @@
 		engine->gpio.takedown		= nouveau_stub_takedown;
 		engine->gpio.get		= nv50_gpio_get;
 		engine->gpio.set		= nv50_gpio_set;
+		engine->gpio.irq_register	= nv50_gpio_irq_register;
+		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nvc0_vram_init;
+		engine->vram.get		= nvc0_vram_new;
+		engine->vram.put		= nv50_vram_del;
+		engine->vram.flags_valid	= nvc0_vram_flags_valid;
 		break;
 	default:
 		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -493,9 +552,13 @@
 	if (ret)
 		return ret;
 
+	/* no dma objects on fermi... */
+	if (dev_priv->card_type >= NV_C0)
+		goto out_done;
+
 	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
 				     0, dev_priv->vram_size,
-				     NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+				     NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
 				     &gpuobj);
 	if (ret)
 		goto out_err;
@@ -505,9 +568,10 @@
 	if (ret)
 		goto out_err;
 
-	ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
-					  dev_priv->gart_info.aper_size,
-					  NV_DMA_ACCESS_RW, &gpuobj, NULL);
+	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+				     0, dev_priv->gart_info.aper_size,
+				     NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
+				     &gpuobj);
 	if (ret)
 		goto out_err;
 
@@ -516,11 +580,12 @@
 	if (ret)
 		goto out_err;
 
+out_done:
+	mutex_unlock(&dev_priv->channel->mutex);
 	return 0;
 
 out_err:
-	nouveau_channel_free(dev_priv->channel);
-	dev_priv->channel = NULL;
+	nouveau_channel_put(&dev_priv->channel);
 	return ret;
 }
 
@@ -531,15 +596,25 @@
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		nouveau_pci_resume(pdev);
 		drm_kms_helper_poll_enable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	} else {
 		printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		drm_kms_helper_poll_disable(dev);
 		nouveau_pci_suspend(pdev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
 
+static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	nouveau_fbcon_output_poll_changed(dev);
+}
+
 static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
@@ -560,6 +635,7 @@
 
 	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
 	vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+				       nouveau_switcheroo_reprobe,
 				       nouveau_switcheroo_can_switch);
 
 	/* Initialise internal driver API hooks */
@@ -567,6 +643,8 @@
 	if (ret)
 		goto out;
 	engine = &dev_priv->engine;
+	spin_lock_init(&dev_priv->channels.lock);
+	spin_lock_init(&dev_priv->tile.lock);
 	spin_lock_init(&dev_priv->context_switch_lock);
 
 	/* Make the CRTCs and I2C buses accessible */
@@ -625,26 +703,28 @@
 		if (ret)
 			goto out_fb;
 
+		/* PCRYPT */
+		ret = engine->crypt.init(dev);
+		if (ret)
+			goto out_graph;
+
 		/* PFIFO */
 		ret = engine->fifo.init(dev);
 		if (ret)
-			goto out_graph;
+			goto out_crypt;
 	}
 
 	ret = engine->display.create(dev);
 	if (ret)
 		goto out_fifo;
 
-	/* this call irq_preinstall, register irq handler and
-	 * call irq_postinstall
-	 */
-	ret = drm_irq_install(dev);
+	ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
 	if (ret)
-		goto out_display;
+		goto out_vblank;
 
-	ret = drm_vblank_init(dev, 0);
+	ret = nouveau_irq_init(dev);
 	if (ret)
-		goto out_irq;
+		goto out_vblank;
 
 	/* what about PVIDEO/PCRTC/PRAMDAC etc? */
 
@@ -669,12 +749,16 @@
 out_fence:
 	nouveau_fence_fini(dev);
 out_irq:
-	drm_irq_uninstall(dev);
-out_display:
+	nouveau_irq_fini(dev);
+out_vblank:
+	drm_vblank_cleanup(dev);
 	engine->display.destroy(dev);
 out_fifo:
 	if (!nouveau_noaccel)
 		engine->fifo.takedown(dev);
+out_crypt:
+	if (!nouveau_noaccel)
+		engine->crypt.takedown(dev);
 out_graph:
 	if (!nouveau_noaccel)
 		engine->graph.takedown(dev);
@@ -713,12 +797,12 @@
 
 	if (!engine->graph.accel_blocked) {
 		nouveau_fence_fini(dev);
-		nouveau_channel_free(dev_priv->channel);
-		dev_priv->channel = NULL;
+		nouveau_channel_put_unlocked(&dev_priv->channel);
 	}
 
 	if (!nouveau_noaccel) {
 		engine->fifo.takedown(dev);
+		engine->crypt.takedown(dev);
 		engine->graph.takedown(dev);
 	}
 	engine->fb.takedown(dev);
@@ -737,7 +821,8 @@
 	nouveau_gpuobj_takedown(dev);
 	nouveau_mem_vram_fini(dev);
 
-	drm_irq_uninstall(dev);
+	nouveau_irq_fini(dev);
+	drm_vblank_cleanup(dev);
 
 	nouveau_pm_fini(dev);
 	nouveau_bios_takedown(dev);
@@ -980,6 +1065,7 @@
 
 void nouveau_lastclose(struct drm_device *dev)
 {
+	vga_switcheroo_process_delayed_switch();
 }
 
 int nouveau_unload(struct drm_device *dev)
@@ -1024,21 +1110,6 @@
 		else
 			getparam->value = NV_PCI;
 		break;
-	case NOUVEAU_GETPARAM_FB_PHYSICAL:
-		getparam->value = dev_priv->fb_phys;
-		break;
-	case NOUVEAU_GETPARAM_AGP_PHYSICAL:
-		getparam->value = dev_priv->gart_info.aper_base;
-		break;
-	case NOUVEAU_GETPARAM_PCI_PHYSICAL:
-		if (dev->sg) {
-			getparam->value = (unsigned long)dev->sg->virtual;
-		} else {
-			NV_ERROR(dev, "Requested PCIGART address, "
-					"while no PCIGART was created\n");
-			return -EINVAL;
-		}
-		break;
 	case NOUVEAU_GETPARAM_FB_SIZE:
 		getparam->value = dev_priv->fb_available_size;
 		break;
@@ -1046,7 +1117,7 @@
 		getparam->value = dev_priv->gart_info.aper_size;
 		break;
 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
-		getparam->value = dev_priv->vm_vram_base;
+		getparam->value = 0; /* deprecated */
 		break;
 	case NOUVEAU_GETPARAM_PTIMER_TIME:
 		getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1125,9 @@
 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
 		getparam->value = 1;
 		break;
+	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+		getparam->value = (dev_priv->card_type < NV_50);
+		break;
 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
 		/* NV40 and NV50 versions are quite different, but register
 		 * address is the same. User is supposed to know the card
@@ -1087,8 +1161,9 @@
 }
 
 /* Wait until (value(reg) & mask) == val, up until timeout has hit */
-bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
-			uint32_t reg, uint32_t mask, uint32_t val)
+bool
+nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
+		uint32_t reg, uint32_t mask, uint32_t val)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1177,33 @@
 	return false;
 }
 
+/* Wait until (value(reg) & mask) != val, up until timeout has hit */
+bool
+nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
+		uint32_t reg, uint32_t mask, uint32_t val)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	uint64_t start = ptimer->read(dev);
+
+	do {
+		if ((nv_rd32(dev, reg) & mask) != val)
+			return true;
+	} while (ptimer->read(dev) - start < timeout);
+
+	return false;
+}
+
 /* Waits for PGRAPH to go completely idle */
 bool nouveau_wait_for_idle(struct drm_device *dev)
 {
-	if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t mask = ~0;
+
+	if (dev_priv->card_type == NV_40)
+		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+	if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
 		NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
 			 nv_rd32(dev, NV04_PGRAPH_STATUS));
 		return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 0000000..fbe0fb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/ratelimit.h>
+
+#include "nouveau_util.h"
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+	while (bf->name) {
+		if (value & bf->mask) {
+			printk(" %s", bf->name);
+			value &= ~bf->mask;
+		}
+
+		bf++;
+	}
+
+	if (value)
+		printk(" (unknown bits 0x%08x)", value);
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+	while (en->name) {
+		if (value == en->value) {
+			printk("%s", en->name);
+			return;
+		}
+
+		en++;
+	}
+
+	printk("(unknown enum 0x%08x)", value);
+}
+
+int
+nouveau_ratelimit(void)
+{
+	return __ratelimit(&nouveau_ratelimit_state);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 0000000..d9ceaea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_UTIL_H__
+#define __NOUVEAU_UTIL_H__
+
+struct nouveau_bitfield {
+	u32 mask;
+	const char *name;
+};
+
+struct nouveau_enum {
+	u32 value;
+	const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+int nouveau_ratelimit(void);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 0000000..97d82ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_mm_node *r;
+	int big = vma->node->type != vm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	list_for_each_entry(r, &vram->regions, rl_entry) {
+		u64 phys = (u64)r->offset << 12;
+		u32 num  = r->length >> bits;
+
+		while (num) {
+			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+			end = (pte + num);
+			if (unlikely(end >= max))
+				end = max;
+			len = end - pte;
+
+			vm->map(vma, pgt, vram, pte, len, phys);
+
+			num -= len;
+			pte += len;
+			if (unlikely(end >= max)) {
+				pde++;
+				pte = 0;
+			}
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+{
+	nouveau_vm_map_at(vma, 0, vram);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+		  dma_addr_t *list)
+{
+	struct nouveau_vm *vm = vma->vm;
+	int big = vma->node->type != vm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vm->map_sg(vma, pgt, pte, list, len);
+
+		num  -= len;
+		pte  += len;
+		list += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+	struct nouveau_vm *vm = vma->vm;
+	int big = vma->node->type != vm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vm->unmap(pgt, pte, len);
+
+		num -= len;
+		pte += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
+{
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_vm_pgt *vpgt;
+	struct nouveau_gpuobj *pgt;
+	u32 pde;
+
+	for (pde = fpde; pde <= lpde; pde++) {
+		vpgt = &vm->pgt[pde - vm->fpde];
+		if (--vpgt->refcount[big])
+			continue;
+
+		pgt = vpgt->obj[big];
+		vpgt->obj[big] = NULL;
+
+		list_for_each_entry(vpgd, &vm->pgd_list, head) {
+			vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+		}
+
+		mutex_unlock(&vm->mm->mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm->mutex);
+	}
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_gpuobj *pgt;
+	int big = (type != vm->spg_shift);
+	u32 pgt_size;
+	int ret;
+
+	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
+	pgt_size *= 8;
+
+	mutex_unlock(&vm->mm->mutex);
+	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+	mutex_lock(&vm->mm->mutex);
+	if (unlikely(ret))
+		return ret;
+
+	/* someone beat us to filling the PDE while we didn't have the lock */
+	if (unlikely(vpgt->refcount[big]++)) {
+		mutex_unlock(&vm->mm->mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm->mutex);
+		return 0;
+	}
+
+	vpgt->obj[big] = pgt;
+	list_for_each_entry(vpgd, &vm->pgd_list, head) {
+		vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+	}
+
+	return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+	       u32 access, struct nouveau_vma *vma)
+{
+	u32 align = (1 << page_shift) >> 12;
+	u32 msize = size >> 12;
+	u32 fpde, lpde, pde;
+	int ret;
+
+	mutex_lock(&vm->mm->mutex);
+	ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+	if (unlikely(ret != 0)) {
+		mutex_unlock(&vm->mm->mutex);
+		return ret;
+	}
+
+	fpde = (vma->node->offset >> vm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+	for (pde = fpde; pde <= lpde; pde++) {
+		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+		int big = (vma->node->type != vm->spg_shift);
+
+		if (likely(vpgt->refcount[big])) {
+			vpgt->refcount[big]++;
+			continue;
+		}
+
+		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+		if (ret) {
+			if (pde != fpde)
+				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
+			nouveau_mm_put(vm->mm, vma->node);
+			mutex_unlock(&vm->mm->mutex);
+			vma->node = NULL;
+			return ret;
+		}
+	}
+	mutex_unlock(&vm->mm->mutex);
+
+	vma->vm     = vm;
+	vma->offset = (u64)vma->node->offset << 12;
+	vma->access = access;
+	return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+	struct nouveau_vm *vm = vma->vm;
+	u32 fpde, lpde;
+
+	if (unlikely(vma->node == NULL))
+		return;
+	fpde = (vma->node->offset >> vm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+
+	mutex_lock(&vm->mm->mutex);
+	nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
+	nouveau_mm_put(vm->mm, vma->node);
+	vma->node = NULL;
+	mutex_unlock(&vm->mm->mutex);
+}
+
+int
+nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
+	       struct nouveau_vm **pvm)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vm *vm;
+	u64 mm_length = (offset + length) - mm_offset;
+	u32 block, pgt_bits;
+	int ret;
+
+	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+	if (!vm)
+		return -ENOMEM;
+
+	if (dev_priv->card_type == NV_50) {
+		vm->map_pgt = nv50_vm_map_pgt;
+		vm->map = nv50_vm_map;
+		vm->map_sg = nv50_vm_map_sg;
+		vm->unmap = nv50_vm_unmap;
+		vm->flush = nv50_vm_flush;
+		vm->spg_shift = 12;
+		vm->lpg_shift = 16;
+
+		pgt_bits = 29;
+		block = (1 << pgt_bits);
+		if (length < block)
+			block = length;
+
+	} else
+	if (dev_priv->card_type == NV_C0) {
+		vm->map_pgt = nvc0_vm_map_pgt;
+		vm->map = nvc0_vm_map;
+		vm->map_sg = nvc0_vm_map_sg;
+		vm->unmap = nvc0_vm_unmap;
+		vm->flush = nvc0_vm_flush;
+		vm->spg_shift = 12;
+		vm->lpg_shift = 17;
+		pgt_bits = 27;
+
+		/* Should be 4096 everywhere, this is a hack that's
+		 * currently necessary to avoid an elusive bug that
+		 * causes corruption when mixing small/large pages
+		 */
+		if (length < (1ULL << 40))
+			block = 4096;
+		else {
+			block = (1 << pgt_bits);
+			if (length < block)
+				block = length;
+		}
+	} else {
+		kfree(vm);
+		return -ENOSYS;
+	}
+
+	vm->fpde   = offset >> pgt_bits;
+	vm->lpde   = (offset + length - 1) >> pgt_bits;
+	vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+	if (!vm->pgt) {
+		kfree(vm);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&vm->pgd_list);
+	vm->dev = dev;
+	vm->refcount = 1;
+	vm->pgt_bits = pgt_bits - 12;
+
+	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+			      block >> 12);
+	if (ret) {
+		kfree(vm);
+		return ret;
+	}
+
+	*pvm = vm;
+	return 0;
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm_pgd *vpgd;
+	int i;
+
+	if (!pgd)
+		return 0;
+
+	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+	if (!vpgd)
+		return -ENOMEM;
+
+	nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+	mutex_lock(&vm->mm->mutex);
+	for (i = vm->fpde; i <= vm->lpde; i++)
+		vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+	list_add(&vpgd->head, &vm->pgd_list);
+	mutex_unlock(&vm->mm->mutex);
+	return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+
+	if (!pgd)
+		return;
+
+	mutex_lock(&vm->mm->mutex);
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		if (vpgd->obj != pgd)
+			continue;
+
+		list_del(&vpgd->head);
+		nouveau_gpuobj_ref(NULL, &vpgd->obj);
+		kfree(vpgd);
+	}
+	mutex_unlock(&vm->mm->mutex);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		nouveau_vm_unlink(vm, vpgd->obj);
+	}
+	WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+
+	kfree(vm->pgt);
+	kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+	       struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm *vm;
+	int ret;
+
+	vm = ref;
+	if (vm) {
+		ret = nouveau_vm_link(vm, pgd);
+		if (ret)
+			return ret;
+
+		vm->refcount++;
+	}
+
+	vm = *ptr;
+	*ptr = ref;
+
+	if (vm) {
+		nouveau_vm_unlink(vm, pgd);
+
+		if (--vm->refcount == 0)
+			nouveau_vm_del(vm);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 0000000..e119351
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+struct nouveau_vm_pgt {
+	struct nouveau_gpuobj *obj[2];
+	u32 refcount[2];
+};
+
+struct nouveau_vm_pgd {
+	struct list_head head;
+	struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_vma {
+	struct nouveau_vm *vm;
+	struct nouveau_mm_node *node;
+	u64 offset;
+	u32 access;
+};
+
+struct nouveau_vm {
+	struct drm_device *dev;
+	struct nouveau_mm *mm;
+	int refcount;
+
+	struct list_head pgd_list;
+	atomic_t pgraph_refs;
+	atomic_t pcrypt_refs;
+
+	struct nouveau_vm_pgt *pgt;
+	u32 fpde;
+	u32 lpde;
+
+	u32 pgt_bits;
+	u8  spg_shift;
+	u8  lpg_shift;
+
+	void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
+			struct nouveau_gpuobj *pgt[2]);
+	void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+	void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		       u32 pte, dma_addr_t *, u32 cnt);
+	void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+	void (*flush)(struct nouveau_vm *);
+};
+
+/* nouveau_vm.c */
+int  nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+		    struct nouveau_vm **);
+int  nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+		    struct nouveau_gpuobj *pgd);
+int  nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+		    u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+		       dma_addr_t *);
+
+/* nv50_vm.c */
+void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+		     struct nouveau_gpuobj *pgt[2]);
+void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+		 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    u32 pte, dma_addr_t *, u32 cnt);
+void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nv50_vm_flush(struct nouveau_vm *);
+void nv50_vm_flush_engine(struct drm_device *, int engine);
+
+/* nvc0_vm.c */
+void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+		     struct nouveau_gpuobj *pgt[2]);
+void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+		 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    u32 pte, dma_addr_t *, u32 cnt);
+void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nvc0_vm_flush(struct nouveau_vm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e1807..297505e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@
 	if (dev_priv->card_type >= NV_30)
 		regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
 
-	regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+	if (dev_priv->card_type >= NV_10)
+		regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+	else
+		regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
 
 	/* Some misc regs */
 	if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@
 	if (nv_two_heads(dev))
 		NVSetOwner(dev, nv_crtc->index);
 
+	drm_vblank_pre_modeset(dev, nv_crtc->index);
 	funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
 	NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@
 #endif
 
 	funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+	drm_vblank_post_modeset(dev, nv_crtc->index);
 }
 
 static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@
 	.cursor_move = nv04_crtc_cursor_move,
 	.gamma_set = nv_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
+	.page_flip = nouveau_crtc_page_flip,
 	.destroy = nv_crtc_destroy,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f..e000455 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@
 		 * use a 10ms timeout (guards against crtc being inactive, in
 		 * which case blank state would never change)
 		 */
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000000))
 			return -EBUSY;
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000001))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000001))
 			return -EBUSY;
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000000))
 			return -EBUSY;
 
 		udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf7..1715e14 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 
+static void nv04_vblank_crtc0_isr(struct drm_device *);
+static void nv04_vblank_crtc1_isr(struct drm_device *);
+
 static void
 nv04_display_store_initial_head_owner(struct drm_device *dev)
 {
@@ -197,6 +200,8 @@
 		func->save(encoder);
 	}
 
+	nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
+	nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
 	return 0;
 }
 
@@ -208,6 +213,9 @@
 
 	NV_DEBUG_KMS(dev, "\n");
 
+	nouveau_irq_unregister(dev, 24);
+	nouveau_irq_unregister(dev, 25);
+
 	/* Turn every CRTC off. */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@
 	return 0;
 }
 
+static void
+nv04_vblank_crtc0_isr(struct drm_device *dev)
+{
+	nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+	drm_handle_vblank(dev, 0);
+}
+
+static void
+nv04_vblank_crtc1_isr(struct drm_device *dev)
+{
+	nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+	drm_handle_vblank(dev, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c93..7a11893 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
 #include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
 
-void
+int
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_copyarea(info, region);
-		return;
-	}
+	ret = RING_SPACE(chan, 4);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
 	OUT_RING(chan, (region->sy << 16) | region->sx);
 	OUT_RING(chan, (region->dy << 16) | region->dx);
 	OUT_RING(chan, (region->height << 16) | region->width);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_fillrect(info, rect);
-		return;
-	}
+	ret = RING_SPACE(chan, 7);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
 	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@
 	OUT_RING(chan, (rect->dx << 16) | rect->dy);
 	OUT_RING(chan, (rect->width << 16) | rect->height);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@
 	uint32_t dsize;
 	uint32_t width;
 	uint32_t *data = (uint32_t *)image->data;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
+	if (image->depth != 1)
+		return -ENODEV;
 
-	if (image->depth != 1) {
-		cfb_imageblit(info, image);
-		return;
-	}
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_imageblit(info, image);
-		return;
-	}
+	ret = RING_SPACE(chan, 8);
+	if (ret)
+		return ret;
 
 	width = ALIGN(image->width, 8);
 	dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@
 	while (dsize) {
 		int iter_len = dsize > 128 ? 128 : dsize;
 
-		if (RING_SPACE(chan, iter_len + 1)) {
-			nouveau_fbcon_gpu_lockup(info);
-			cfb_imageblit(info, image);
-			return;
-		}
+		ret = RING_SPACE(chan, iter_len + 1);
+		if (ret)
+			return ret;
 
 		BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
 		OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@
 	}
 
 	FIRE_RING(chan);
-}
-
-static int
-nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
+	return 0;
 }
 
 int
@@ -214,29 +176,31 @@
 		return -EINVAL;
 	}
 
-	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
-				   0x0062 : 0x0042, NvCtxSurf2D);
+	ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
+				    dev_priv->card_type >= NV_10 ?
+				    0x0062 : 0x0042);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+	ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+	ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+	ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+	ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
-				   0x009f : 0x005f, NvImageBlit);
+	ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
+				    dev_priv->chipset >= 0x11 ?
+				    0x009f : 0x005f);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b..f89d104 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
+#include "nouveau_util.h"
 
 #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
 #define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV03_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	/* Setup initial state */
@@ -151,10 +157,31 @@
 nv04_fifo_destroy_context(struct nouveau_channel *chan)
 {
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	unsigned long flags;
 
-	nv_wr32(dev, NV04_PFIFO_MODE,
-		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
 
+	/* Unload the context if it's the currently active one */
+	if (pfifo->channel_id(dev) == chan->id) {
+		pfifo->disable(dev);
+		pfifo->unload_context(dev);
+		pfifo->enable(dev);
+	}
+
+	/* Keep it from being rescheduled */
+	nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
+
+	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the channel resources */
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
 }
 
@@ -208,7 +235,7 @@
 	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
 		return 0;
 
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (!chan) {
 		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
 		return -EINVAL;
@@ -267,6 +294,7 @@
 static void
 nv04_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -289,7 +317,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
@@ -298,3 +326,207 @@
 	return 0;
 }
 
+void
+nv04_fifo_fini(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x2140, 0x00000000);
+	nouveau_irq_unregister(dev, 8);
+}
+
+static bool
+nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	struct nouveau_gpuobj *obj;
+	unsigned long flags;
+	const int subc = (addr >> 13) & 0x7;
+	const int mthd = addr & 0x1ffc;
+	bool handled = false;
+	u32 engine;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+		chan = dev_priv->channels.ptr[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	switch (mthd) {
+	case 0x0000: /* bind object to subchannel */
+		obj = nouveau_ramht_find(chan, data);
+		if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+			break;
+
+		chan->sw_subchannel[subc] = obj->class;
+		engine = 0x0000000f << (subc * 4);
+
+		nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
+		handled = true;
+		break;
+	default:
+		engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+			break;
+
+		if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+					      mthd, data))
+			handled = true;
+		break;
+	}
+
+out:
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return handled;
+}
+
+void
+nv04_fifo_isr(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	uint32_t status, reassign;
+	int cnt = 0;
+
+	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+		uint32_t chid, get;
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+		chid = engine->fifo.channel_id(dev);
+		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+			uint32_t mthd, data;
+			int ptr;
+
+			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+			 * wrapping on my G80 chips, but CACHE1 isn't big
+			 * enough for this much data.. Tests show that it
+			 * wraps around to the start at GET=0x800.. No clue
+			 * as to why..
+			 */
+			ptr = (get & 0x7ff) >> 2;
+
+			if (dev_priv->card_type < NV_40) {
+				mthd = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_DATA(ptr));
+			} else {
+				mthd = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_DATA(ptr));
+			}
+
+			if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
+				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+					     "Mthd 0x%04x Data 0x%08x\n",
+					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+					data);
+			}
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+						NV_PFIFO_INTR_CACHE_ERROR);
+
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+		}
+
+		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+			u32 dma_get = nv_rd32(dev, 0x003244);
+			u32 dma_put = nv_rd32(dev, 0x003240);
+			u32 push = nv_rd32(dev, 0x003220);
+			u32 state = nv_rd32(dev, 0x003228);
+
+			if (dev_priv->card_type == NV_50) {
+				u32 ho_get = nv_rd32(dev, 0x003328);
+				u32 ho_put = nv_rd32(dev, 0x003320);
+				u32 ib_get = nv_rd32(dev, 0x003334);
+				u32 ib_put = nv_rd32(dev, 0x003330);
+
+				if (nouveau_ratelimit())
+					NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+					     "State 0x%08x Push 0x%08x\n",
+						chid, ho_get, dma_get, ho_put,
+						dma_put, ib_get, ib_put, state,
+						push);
+
+				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+				nv_wr32(dev, 0x003364, 0x00000000);
+				if (dma_get != dma_put || ho_get != ho_put) {
+					nv_wr32(dev, 0x003244, dma_put);
+					nv_wr32(dev, 0x003328, ho_put);
+				} else
+				if (ib_get != ib_put) {
+					nv_wr32(dev, 0x003334, ib_put);
+				}
+			} else {
+				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+					     "Put 0x%08x State 0x%08x Push 0x%08x\n",
+					chid, dma_get, dma_put, state, push);
+
+				if (dma_get != dma_put)
+					nv_wr32(dev, 0x003244, dma_put);
+			}
+
+			nv_wr32(dev, 0x003228, 0x00000000);
+			nv_wr32(dev, 0x003220, 0x00000001);
+			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+		}
+
+		if (status & NV_PFIFO_INTR_SEMAPHORE) {
+			uint32_t sem;
+
+			status &= ~NV_PFIFO_INTR_SEMAPHORE;
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+				NV_PFIFO_INTR_SEMAPHORE);
+
+			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+		}
+
+		if (dev_priv->card_type == NV_50) {
+			if (status & 0x00000010) {
+				nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+				status &= ~0x00000010;
+				nv_wr32(dev, 0x002100, 0x00000010);
+			}
+		}
+
+		if (status) {
+			if (nouveau_ratelimit())
+				NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+					status, chid);
+			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+			status = 0;
+		}
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+	}
+
+	if (status) {
+		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+		nv_wr32(dev, 0x2140, 0);
+		nv_wr32(dev, 0x140, 0);
+	}
+
+	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c897342..af75015 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
 #include "drm.h"
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_util.h"
+
+static int  nv04_graph_register(struct drm_device *dev);
+static void nv04_graph_isr(struct drm_device *dev);
 
 static uint32_t nv04_graph_ctx_regs[] = {
 	0x0040053c,
@@ -357,10 +362,10 @@
 	if (chid >= dev_priv->engine.fifo.channels)
 		return NULL;
 
-	return dev_priv->fifos[chid];
+	return dev_priv->channels.ptr[chid];
 }
 
-void
+static void
 nv04_graph_context_switch(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@
 	struct nouveau_channel *chan = NULL;
 	int chid;
 
-	pgraph->fifo_access(dev, false);
 	nouveau_wait_for_idle(dev);
 
 	/* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@
 
 	/* Load context for next channel */
 	chid = dev_priv->engine.fifo.channel_id(dev);
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (chan)
 		nv04_graph_load_context(chan);
-
-	pgraph->fifo_access(dev, true);
 }
 
 static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@
 
 void nv04_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	/* Free the context resources */
 	kfree(pgraph_ctx);
 	chan->pgraph_ctx = NULL;
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
 
 int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
+	int ret;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
 			 NV_PMC_ENABLE_PGRAPH);
 
+	ret = nv04_graph_register(dev);
+	if (ret)
+		return ret;
+
 	/* Enable PGRAPH interrupts */
+	nouveau_irq_register(dev, 12, nv04_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -510,6 +533,8 @@
 
 void nv04_graph_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 void
@@ -524,13 +549,27 @@
 }
 
 static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
-			int mthd, uint32_t data)
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+			u32 class, u32 mthd, u32 data)
 {
 	atomic_set(&chan->fence.last_sequence_irq, data);
 	return 0;
 }
 
+int
+nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+			  u32 class, u32 mthd, u32 data)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_page_flip_state s;
+
+	if (!nouveau_finish_page_flip(chan, &s))
+		nv_set_crtc_base(dev, s.crtc,
+				 s.offset + s.y * s.pitch + s.x * s.bpp / 8);
+
+	return 0;
+}
+
 /*
  * Software methods, why they are needed, and how they all work:
  *
@@ -606,12 +645,12 @@
  */
 
 static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
 {
 	struct drm_device *dev = chan->dev;
-	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
 	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
-	uint32_t tmp;
+	u32 tmp;
 
 	tmp  = nv_ri32(dev, instance);
 	tmp &= ~mask;
@@ -623,11 +662,11 @@
 }
 
 static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
 {
 	struct drm_device *dev = chan->dev;
-	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
-	uint32_t tmp, ctx1;
+	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+	u32 tmp, ctx1;
 	int class, op, valid = 1;
 
 	ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@
 }
 
 static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	if (data > 5)
 		return 1;
 	/* Old versions of the objects only accept first three operations. */
-	if (data > 2 && grclass < 0x40)
+	if (data > 2 && class < 0x40)
 		return 1;
 	nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
 	/* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@
 }
 
 static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	uint32_t min = data & 0xffff, max;
 	uint32_t w = data >> 16;
@@ -706,8 +745,8 @@
 }
 
 static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	uint32_t min = data & 0xffff, max;
 	uint32_t w = data >> 16;
@@ -725,8 +764,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
+			    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -742,8 +781,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
+				    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -763,8 +802,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -778,8 +817,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -793,8 +832,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
+			 u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -808,8 +847,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -823,8 +862,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -838,8 +877,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -853,8 +892,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -868,8 +907,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
+				u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -883,8 +922,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -898,8 +937,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
+			  u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -913,8 +952,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
+			    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -930,194 +969,346 @@
 	return 1;
 }
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
-	{ 0x0150, nv04_graph_mthd_set_ref },
+static int
+nv04_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	/* dvd subpicture */
+	NVOBJ_CLASS(dev, 0x0038, GR);
+
+	/* m2mf */
+	NVOBJ_CLASS(dev, 0x0039, GR);
+
+	/* nv03 gdirect */
+	NVOBJ_CLASS(dev, 0x004b, GR);
+	NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 gdirect */
+	NVOBJ_CLASS(dev, 0x004a, GR);
+	NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 imageblit */
+	NVOBJ_CLASS(dev, 0x001f, GR);
+	NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
+	NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 imageblit */
+	NVOBJ_CLASS(dev, 0x005f, GR);
+	NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 iifc */
+	NVOBJ_CLASS(dev, 0x0060, GR);
+	NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
+	NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
+
+	/* nv05 iifc */
+	NVOBJ_CLASS(dev, 0x0064, GR);
+
+	/* nv01 ifc */
+	NVOBJ_CLASS(dev, 0x0021, GR);
+	NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 ifc */
+	NVOBJ_CLASS(dev, 0x0061, GR);
+	NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv05 ifc */
+	NVOBJ_CLASS(dev, 0x0065, GR);
+
+	/* nv03 sifc */
+	NVOBJ_CLASS(dev, 0x0036, GR);
+	NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 sifc */
+	NVOBJ_CLASS(dev, 0x0076, GR);
+	NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv05 sifc */
+	NVOBJ_CLASS(dev, 0x0066, GR);
+
+	/* nv03 sifm */
+	NVOBJ_CLASS(dev, 0x0037, GR);
+	NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
+
+	/* nv04 sifm */
+	NVOBJ_CLASS(dev, 0x0077, GR);
+	NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
+	NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
+
+	/* null */
+	NVOBJ_CLASS(dev, 0x0030, GR);
+
+	/* surf2d */
+	NVOBJ_CLASS(dev, 0x0042, GR);
+
+	/* rop */
+	NVOBJ_CLASS(dev, 0x0043, GR);
+
+	/* beta1 */
+	NVOBJ_CLASS(dev, 0x0012, GR);
+
+	/* beta4 */
+	NVOBJ_CLASS(dev, 0x0072, GR);
+
+	/* cliprect */
+	NVOBJ_CLASS(dev, 0x0019, GR);
+
+	/* nv01 pattern */
+	NVOBJ_CLASS(dev, 0x0018, GR);
+
+	/* nv04 pattern */
+	NVOBJ_CLASS(dev, 0x0044, GR);
+
+	/* swzsurf */
+	NVOBJ_CLASS(dev, 0x0052, GR);
+
+	/* surf3d */
+	NVOBJ_CLASS(dev, 0x0053, GR);
+	NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
+	NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
+
+	/* nv03 tex_tri */
+	NVOBJ_CLASS(dev, 0x0048, GR);
+	NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
+	NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
+
+	/* tex_tri */
+	NVOBJ_CLASS(dev, 0x0054, GR);
+
+	/* multitex_tri */
+	NVOBJ_CLASS(dev, 0x0055, GR);
+
+	/* nv01 chroma */
+	NVOBJ_CLASS(dev, 0x0017, GR);
+
+	/* nv04 chroma */
+	NVOBJ_CLASS(dev, 0x0057, GR);
+
+	/* surf_dst */
+	NVOBJ_CLASS(dev, 0x0058, GR);
+
+	/* surf_src */
+	NVOBJ_CLASS(dev, 0x0059, GR);
+
+	/* surf_color */
+	NVOBJ_CLASS(dev, 0x005a, GR);
+
+	/* surf_zeta */
+	NVOBJ_CLASS(dev, 0x005b, GR);
+
+	/* nv01 line */
+	NVOBJ_CLASS(dev, 0x001c, GR);
+	NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 line */
+	NVOBJ_CLASS(dev, 0x005c, GR);
+	NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 tri */
+	NVOBJ_CLASS(dev, 0x001d, GR);
+	NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 tri */
+	NVOBJ_CLASS(dev, 0x005d, GR);
+	NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 rect */
+	NVOBJ_CLASS(dev, 0x001e, GR);
+	NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 rect */
+	NVOBJ_CLASS(dev, 0x005e, GR);
+	NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+};
+
+static struct nouveau_bitfield nv04_graph_intr[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
 	{}
 };
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
-	{ 0x0184, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0188, nv04_graph_mthd_bind_rop },
-	{ 0x018c, nv04_graph_mthd_bind_beta1 },
-	{ 0x0190, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x019c, nv04_graph_mthd_bind_surf_src },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_beta4 },
-	{ 0x019c, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
-	{ 0x0188, nv04_graph_mthd_bind_chroma },
-	{ 0x018c, nv04_graph_mthd_bind_clip },
-	{ 0x0190, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x0194, nv04_graph_mthd_bind_rop },
-	{ 0x0198, nv04_graph_mthd_bind_beta1 },
-	{ 0x019c, nv04_graph_mthd_bind_beta4 },
-	{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
-	{ 0x03e4, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x0304, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
-	{ 0x0304, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
-	{ 0x0184, nv04_graph_mthd_bind_clip },
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
-	{ 0x0184, nv04_graph_mthd_bind_clip },
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_surf_color },
-	{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
-	{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
-	{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
-	{},
-};
-
-struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
-	{ 0x0038, false, NULL }, /* dvd subpicture */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
-	{ 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
-	{ 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
-	{ 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
-	{ 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
-	{ 0x0064, false, NULL }, /* nv05 iifc */
-	{ 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
-	{ 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
-	{ 0x0065, false, NULL }, /* nv05 ifc */
-	{ 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
-	{ 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
-	{ 0x0066, false, NULL }, /* nv05 sifc */
-	{ 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
-	{ 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0042, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0018, false, NULL }, /* nv01 pattern */
-	{ 0x0044, false, NULL }, /* nv04 pattern */
-	{ 0x0052, false, NULL }, /* swzsurf */
-	{ 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
-	{ 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
-	{ 0x0054, false, NULL }, /* tex_tri */
-	{ 0x0055, false, NULL }, /* multitex_tri */
-	{ 0x0017, false, NULL }, /* nv01 chroma */
-	{ 0x0057, false, NULL }, /* nv04 chroma */
-	{ 0x0058, false, NULL }, /* surf_dst */
-	{ 0x0059, false, NULL }, /* surf_src */
-	{ 0x005a, false, NULL }, /* surf_color */
-	{ 0x005b, false, NULL }, /* surf_zeta */
-	{ 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
-	{ 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
-	{ 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
-	{ 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
-	{ 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
-	{ 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
-	{ 0x506e, true, nv04_graph_mthds_sw },
+static struct nouveau_bitfield nv04_graph_nstatus[] =
+{
+	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
 	{}
 };
 
+struct nouveau_bitfield nv04_graph_nsource[] =
+{
+	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
+	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
+	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
+	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
+	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
+	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
+	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
+	{}
+};
+
+static void
+nv04_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x0f000000) >> 24;
+		u32 subc = (addr & 0x0000e000) >> 13;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_NOTIFY) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_NOTIFY;
+			}
+		}
+
+		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			nv04_graph_context_switch(dev);
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv04_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae29..b8e3edb 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,35 +98,6 @@
 }
 
 int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *sz)
-{
-	return 0;
-}
-
-void
-nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-}
-
-int
-nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	return 0;
-}
-
-int
-nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	return 0;
-}
-
-void
-nv04_instmem_flush(struct drm_device *dev)
-{
-}
-
-int
 nv04_instmem_suspend(struct drm_device *dev)
 {
 	return 0;
@@ -137,3 +108,56 @@
 {
 }
 
+int
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+{
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct drm_mm_node *ramin = NULL;
+
+	do {
+		if (drm_mm_pre_get(&dev_priv->ramin_heap))
+			return -ENOMEM;
+
+		spin_lock(&dev_priv->ramin_lock);
+		ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
+		if (ramin == NULL) {
+			spin_unlock(&dev_priv->ramin_lock);
+			return -ENOMEM;
+		}
+
+		ramin = drm_mm_get_block_atomic(ramin, size, align);
+		spin_unlock(&dev_priv->ramin_lock);
+	} while (ramin == NULL);
+
+	gpuobj->node  = ramin;
+	gpuobj->vinst = ramin->start;
+	return 0;
+}
+
+void
+nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+
+	spin_lock(&dev_priv->ramin_lock);
+	drm_mm_put_block(gpuobj->node);
+	gpuobj->node = NULL;
+	spin_unlock(&dev_priv->ramin_lock);
+}
+
+int
+nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
+{
+	gpuobj->pinst = gpuobj->vinst;
+	return 0;
+}
+
+void
+nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
+{
+}
+
+void
+nv04_instmem_flush(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda4..f78181a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
-void
-nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			  uint32_t size, uint32_t pitch)
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	struct drm_mm_node *mem;
+	int ret;
 
-	if (pitch) {
-		if (dev_priv->card_type >= NV_20)
-			addr |= 1;
-		else
-			addr |= 1 << 31;
+	ret = drm_mm_pre_get(&pfb->tag_heap);
+	if (ret)
+		return NULL;
+
+	spin_lock(&dev_priv->tile.lock);
+	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+	if (mem)
+		mem = drm_mm_get_block_atomic(mem, size, 0);
+	spin_unlock(&dev_priv->tile.lock);
+
+	return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	spin_lock(&dev_priv->tile.lock);
+	drm_mm_put_block(mem);
+	spin_unlock(&dev_priv->tile.lock);
+}
+
+void
+nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+			 uint32_t size, uint32_t pitch, uint32_t flags)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+	int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+	tile->addr = addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+
+	if (dev_priv->card_type == NV_20) {
+		if (flags & NOUVEAU_GEM_TILE_ZETA) {
+			/*
+			 * Allocate some of the on-die tag memory,
+			 * used to store Z compression meta-data (most
+			 * likely just a bitmap determining if a given
+			 * tile is compressed or not).
+			 */
+			tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+
+			if (tile->tag_mem) {
+				/* Enable Z compression */
+				if (dev_priv->chipset >= 0x25)
+					tile->zcomp = tile->tag_mem->start |
+						(bpp == 16 ?
+						 NV25_PFB_ZCOMP_MODE_16 :
+						 NV25_PFB_ZCOMP_MODE_32);
+				else
+					tile->zcomp = tile->tag_mem->start |
+						NV20_PFB_ZCOMP_EN |
+						(bpp == 16 ? 0 :
+						 NV20_PFB_ZCOMP_MODE_32);
+			}
+
+			tile->addr |= 3;
+		} else {
+			tile->addr |= 1;
+		}
+
+	} else {
+		tile->addr |= 1 << 31;
+	}
+}
+
+void
+nv10_fb_free_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	if (tile->tag_mem) {
+		nv20_fb_free_tag(dev, tile->tag_mem);
+		tile->tag_mem = NULL;
 	}
 
-	nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
-	nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
-	nv_wr32(dev, NV10_PFB_TILE(i), addr);
+	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+}
+
+void
+nv10_fb_set_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+
+	if (dev_priv->card_type == NV_20)
+		nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
 }
 
 int
@@ -31,9 +117,14 @@
 
 	pfb->num_tiles = NV10_PFB_TILE__SIZE;
 
+	if (dev_priv->card_type == NV_20)
+		drm_mm_init(&pfb->tag_heap, 0,
+			    (dev_priv->chipset >= 0x25 ?
+			     64 * 1024 : 32 * 1024));
+
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	return 0;
 }
@@ -41,4 +132,13 @@
 void
 nv10_fb_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	int i;
+
+	for (i = 0; i < pfb->num_tiles; i++)
+		pfb->free_tile_region(dev, i);
+
+	if (dev_priv->card_type == NV_20)
+		drm_mm_takedown(&pfb->tag_heap);
 }
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad..d2ecbff 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV03_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	/* Fill entries that are seen filled in dumps of nvidia driver just
 	 * after channel's is put into DMA mode
 	 */
@@ -73,17 +78,6 @@
 	return 0;
 }
 
-void
-nv10_fifo_destroy_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, NV04_PFIFO_MODE,
-			nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
-	nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
 static void
 nv10_fifo_do_load_context(struct drm_device *dev, int chid)
 {
@@ -219,6 +213,7 @@
 static void
 nv10_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -241,7 +236,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c97..8c92edb 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
 #include "drm.h"
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_util.h"
+
+static int  nv10_graph_register(struct drm_device *);
+static void nv10_graph_isr(struct drm_device *);
 
 #define NV10_FIFO_NUMBER 32
 
@@ -786,15 +790,13 @@
 	return 0;
 }
 
-void
+static void
 nv10_graph_context_switch(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_channel *chan = NULL;
 	int chid;
 
-	pgraph->fifo_access(dev, false);
 	nouveau_wait_for_idle(dev);
 
 	/* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@
 
 	/* Load context for next channel */
 	chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (chan && chan->pgraph_ctx)
 		nv10_graph_load_context(chan);
-
-	pgraph->fifo_access(dev, true);
 }
 
 #define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@
 	if (chid >= dev_priv->engine.fifo.channels)
 		return NULL;
 
-	return dev_priv->fifos[chid];
+	return dev_priv->channels.ptr[chid];
 }
 
 int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@
 
 void nv10_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	/* Free the context resources */
 	kfree(pgraph_ctx);
 	chan->pgraph_ctx = NULL;
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
 
 void
-nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv10_graph_set_tile_region(struct drm_device *dev, int i)
 {
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
-	if (pitch)
-		addr |= 1 << 31;
-
-	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
-	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
-	nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
 }
 
 int nv10_graph_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
-	int i;
+	int ret, i;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
 			 NV_PMC_ENABLE_PGRAPH);
 
+	ret = nv10_graph_register(dev);
+	if (ret)
+		return ret;
+
+	nouveau_irq_register(dev, 12, nv10_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -928,7 +945,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv10_graph_set_tile_region(dev, i);
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@
 
 void nv10_graph_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
 	struct graph_state *ctx = chan->pgraph_ctx;
 	struct pipe_state *pipe = &ctx->pipe_state;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
 	uint32_t xfmode0, xfmode1;
 	int i;
@@ -1025,18 +1042,14 @@
 
 	nouveau_wait_for_idle(dev);
 
-	pgraph->fifo_access(dev, true);
-
 	return 0;
 }
 
 static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
 	nouveau_wait_for_idle(dev);
 
@@ -1045,40 +1058,118 @@
 	nv_wr32(dev, 0x004006b0,
 		nv_rd32(dev, 0x004006b0) | 0x8 << 24);
 
-	pgraph->fifo_access(dev, true);
-
 	return 0;
 }
 
-static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
-	{ 0x1638, nv17_graph_mthd_lma_window },
-	{ 0x163c, nv17_graph_mthd_lma_window },
-	{ 0x1640, nv17_graph_mthd_lma_window },
-	{ 0x1644, nv17_graph_mthd_lma_window },
-	{ 0x1658, nv17_graph_mthd_lma_enable },
+static int
+nv10_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
+	NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
+	NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
+	NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
+
+	/* celcius */
+	if (dev_priv->chipset <= 0x10) {
+		NVOBJ_CLASS(dev, 0x0056, GR);
+	} else
+	if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
+		NVOBJ_CLASS(dev, 0x0096, GR);
+	} else {
+		NVOBJ_CLASS(dev, 0x0099, GR);
+		NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+	}
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+struct nouveau_bitfield nv10_graph_intr[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
 	{}
 };
 
-struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x005f, false, NULL }, /* imageblit */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x0052, false, NULL }, /* swzsurf */
-	{ 0x0093, false, NULL }, /* surf3d */
-	{ 0x0094, false, NULL }, /* tex_tri */
-	{ 0x0095, false, NULL }, /* multitex_tri */
-	{ 0x0056, false, NULL }, /* celcius (nv10) */
-	{ 0x0096, false, NULL }, /* celcius (nv11) */
-	{ 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
+struct nouveau_bitfield nv10_graph_nstatus[] =
+{
+	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
 	{}
 };
+
+static void
+nv10_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x01f00000) >> 20;
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			}
+		}
+
+		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			nv10_graph_context_switch(dev);
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd..8464b76 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
 #define NV34_GRCTX_SIZE    (18140)
 #define NV35_36_GRCTX_SIZE (22396)
 
+static int nv20_graph_register(struct drm_device *);
+static int nv30_graph_register(struct drm_device *);
+static void nv20_graph_isr(struct drm_device *);
+
 static void
 nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
@@ -425,9 +429,21 @@
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	unsigned long flags;
 
-	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the context resources */
 	nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
+	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
 int
@@ -496,24 +512,27 @@
 }
 
 void
-nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv20_graph_set_tile_region(struct drm_device *dev, int i)
 {
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
-	if (pitch)
-		addr |= 1;
-
-	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-	nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
 
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+	if (dev_priv->card_type == NV_20) {
+		nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	}
 }
 
 int
@@ -560,6 +579,13 @@
 
 	nv20_graph_rdi(dev);
 
+	ret = nv20_graph_register(dev);
+	if (ret) {
+		nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+		return ret;
+	}
+
+	nouveau_irq_register(dev, 12, nv20_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -571,16 +597,17 @@
 	nv_wr32(dev, 0x40009C           , 0x00000040);
 
 	if (dev_priv->chipset >= 0x25) {
-		nv_wr32(dev, 0x400890, 0x00080000);
+		nv_wr32(dev, 0x400890, 0x00a8cfff);
 		nv_wr32(dev, 0x400610, 0x304B1FB6);
-		nv_wr32(dev, 0x400B80, 0x18B82880);
+		nv_wr32(dev, 0x400B80, 0x1cbd3883);
 		nv_wr32(dev, 0x400B84, 0x44000000);
 		nv_wr32(dev, 0x400098, 0x40000080);
 		nv_wr32(dev, 0x400B88, 0x000000ff);
+
 	} else {
-		nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+		nv_wr32(dev, 0x400880, 0x0008c7df);
 		nv_wr32(dev, 0x400094, 0x00000005);
-		nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+		nv_wr32(dev, 0x400B80, 0x45eae20e);
 		nv_wr32(dev, 0x400B84, 0x24000000);
 		nv_wr32(dev, 0x400098, 0x00000040);
 		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv20_graph_set_tile_region(dev, i);
 
-	for (i = 0; i < 8; i++) {
-		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
-					nv_rd32(dev, 0x100300 + i * 4));
-	}
 	nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
 	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
+
 	nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
 }
 
@@ -684,9 +708,16 @@
 			return ret;
 	}
 
+	ret = nv30_graph_register(dev);
+	if (ret) {
+		nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+		return ret;
+	}
+
 	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
 		     pgraph->ctx_table->pinst >> 4);
 
+	nouveau_irq_register(dev, 12, nv20_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -724,7 +755,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv20_graph_set_tile_region(dev, i);
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
 	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
@@ -744,46 +775,125 @@
 	return 0;
 }
 
-struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x009e, false, NULL }, /* swzsurf */
-	{ 0x0096, false, NULL }, /* celcius */
-	{ 0x0097, false, NULL }, /* kelvin (nv20) */
-	{ 0x0597, false, NULL }, /* kelvin (nv25) */
-	{}
-};
+static int
+nv20_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x038a, false, NULL }, /* ifc (nv30) */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0389, false, NULL }, /* sifm (nv30) */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0362, false, NULL }, /* surf2d (nv30) */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x039e, false, NULL }, /* swzsurf */
-	{ 0x0397, false, NULL }, /* rankine (nv30) */
-	{ 0x0497, false, NULL }, /* rankine (nv35) */
-	{ 0x0697, false, NULL }, /* rankine (nv34) */
-	{}
-};
+	if (dev_priv->engine.graph.registered)
+		return 0;
 
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
+	NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
+
+	/* kelvin */
+	if (dev_priv->chipset < 0x25)
+		NVOBJ_CLASS(dev, 0x0097, GR);
+	else
+		NVOBJ_CLASS(dev, 0x0597, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static int
+nv30_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
+
+	/* rankine */
+	if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0397, GR);
+	else
+	if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0697, GR);
+	else
+	if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0497, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static void
+nv20_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x01f00000) >> 20;
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			}
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f0..e0135f0 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
+void
+nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+			 uint32_t size, uint32_t pitch, uint32_t flags)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	tile->addr = addr | 1;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+void
+nv30_fb_free_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	tile->addr = tile->limit = tile->pitch = 0;
+}
+
 static int
 calc_bias(struct drm_device *dev, int k, int i, int j)
 {
@@ -65,7 +86,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	/* Init the memory timing regs at 0x10037c/0x1003ac */
 	if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8..f3d9c05 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
 #include "nouveau_drm.h"
 
 void
-nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			  uint32_t size, uint32_t pitch)
+nv40_fb_set_tile_region(struct drm_device *dev, int i)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
-
-	if (pitch)
-		addr |= 1;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
 	case 0x40:
-		nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
-		nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
-		nv_wr32(dev, NV10_PFB_TILE(i), addr);
+		nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
 		break;
 
 	default:
-		nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
-		nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
-		nv_wr32(dev, NV40_PFB_TILE(i), addr);
+		nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
 		break;
 	}
 }
@@ -64,7 +60,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b..49b9a35 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV40_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	nv_wi32(dev, fc +  0, chan->pushbuf_base);
@@ -59,7 +64,6 @@
 			      NV_PFIFO_CACHE1_BIG_ENDIAN |
 #endif
 			      0x30000000 /* no idea.. */);
-	nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
 	nv_wi32(dev, fc + 60, 0x0001FFFF);
 
 	/* enable the fifo dma operation */
@@ -70,17 +74,6 @@
 	return 0;
 }
 
-void
-nv40_fifo_destroy_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, NV04_PFIFO_MODE,
-		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
-	nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
 static void
 nv40_fifo_do_load_context(struct drm_device *dev, int chid)
 {
@@ -279,6 +272,7 @@
 static void
 nv40_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -301,7 +295,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91..19ef92a 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
 #include "nouveau_drv.h"
 #include "nouveau_grctx.h"
 
+static int nv40_graph_register(struct drm_device *);
+static void nv40_graph_isr(struct drm_device *);
+
 struct nouveau_channel *
 nv40_graph_channel(struct drm_device *dev)
 {
@@ -42,7 +45,7 @@
 	inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
 
 		if (chan && chan->ramin_grctx &&
 		    chan->ramin_grctx->pinst == inst)
@@ -59,6 +62,7 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_grctx ctx = {};
+	unsigned long flags;
 	int ret;
 
 	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
@@ -73,12 +77,39 @@
 	nv40_grctx_init(&ctx);
 
 	nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
+
+	/* init grctx pointer in ramfc, and on PFIFO if channel is
+	 * already active there
+	 */
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
+	nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+	if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
+		nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
+	nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 	return 0;
 }
 
 void
 nv40_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the context resources */
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
@@ -174,43 +205,39 @@
 }
 
 void
-nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv40_graph_set_tile_region(struct drm_device *dev, int i)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
-
-	if (pitch)
-		addr |= 1;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
 	case 0x44:
 	case 0x4a:
 	case 0x4e:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
 		break;
 
 	case 0x46:
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
 
 	default:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
 	}
 }
@@ -232,7 +259,7 @@
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_grctx ctx = {};
 	uint32_t vramsz, *cp;
-	int i, j;
+	int ret, i, j;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +283,14 @@
 
 	kfree(cp);
 
+	ret = nv40_graph_register(dev);
+	if (ret)
+		return ret;
+
 	/* No context present currently */
 	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
 
+	nouveau_irq_register(dev, 12, nv40_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -347,7 +379,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv40_graph_set_tile_region(dev, i);
 
 	/* begin RAM config */
 	vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -390,26 +422,111 @@
 
 void nv40_graph_takedown(struct drm_device *dev)
 {
+	nouveau_irq_unregister(dev, 12);
 }
 
-struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x3089, false, NULL }, /* sifm (nv40) */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x3062, false, NULL }, /* surf2d (nv40) */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x309e, false, NULL }, /* swzsurf */
-	{ 0x4097, false, NULL }, /* curie (nv40) */
-	{ 0x4497, false, NULL }, /* curie (nv44) */
-	{}
-};
+static int
+nv40_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
+
+	/* curie */
+	if (dev_priv->chipset >= 0x60 ||
+	    0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x4497, GR);
+	else
+		NVOBJ_CLASS(dev, 0x4097, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static int
+nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin_grctx)
+			continue;
+
+		if (inst == chan->ramin_grctx->pinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nv40_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
+		u32 chid = nv40_graph_isr_chid(dev, inst);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			} else
+			if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+				nv_mask(dev, 0x402000, 0, 0);
+			}
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
+				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+				chid, inst, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0..9023c4d 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -115,15 +115,16 @@
 		OUT_RING(evo, 0);
 		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
 		if (dev_priv->chipset != 0x50)
-			if (nv_crtc->fb.tile_flags == 0x7a00)
+			if (nv_crtc->fb.tile_flags == 0x7a00 ||
+			    nv_crtc->fb.tile_flags == 0xfe00)
 				OUT_RING(evo, NvEvoFB32);
 			else
 			if (nv_crtc->fb.tile_flags == 0x7000)
 				OUT_RING(evo, NvEvoFB16);
 			else
-				OUT_RING(evo, NvEvoVRAM);
+				OUT_RING(evo, NvEvoVRAM_LP);
 		else
-			OUT_RING(evo, NvEvoVRAM);
+			OUT_RING(evo, NvEvoVRAM_LP);
 	}
 
 	nv_crtc->fb.blanked = blanked;
@@ -345,7 +346,6 @@
 		     uint32_t buffer_handle, uint32_t width, uint32_t height)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct nouveau_bo *cursor = NULL;
 	struct drm_gem_object *gem;
@@ -374,8 +374,7 @@
 
 	nouveau_bo_unmap(cursor);
 
-	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
-					    dev_priv->vm_vram_base);
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
 	nv_crtc->cursor.show(nv_crtc, true);
 
 out:
@@ -437,6 +436,7 @@
 	.cursor_move = nv50_crtc_cursor_move,
 	.gamma_set = nv50_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
+	.page_flip = nouveau_crtc_page_flip,
 	.destroy = nv50_crtc_destroy,
 };
 
@@ -453,6 +453,7 @@
 
 	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
+	drm_vblank_pre_modeset(dev, nv_crtc->index);
 	nv50_crtc_blank(nv_crtc, true);
 }
 
@@ -468,6 +469,7 @@
 	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
 	nv50_crtc_blank(nv_crtc, false);
+	drm_vblank_post_modeset(dev, nv_crtc->index);
 
 	ret = RING_SPACE(evo, 2);
 	if (ret) {
@@ -545,7 +547,7 @@
 		 return -EINVAL;
 	}
 
-	nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+	nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
 	nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
 	nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
 	if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -554,13 +556,14 @@
 			return ret;
 
 		BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
-		if (nv_crtc->fb.tile_flags == 0x7a00)
+		if (nv_crtc->fb.tile_flags == 0x7a00 ||
+		    nv_crtc->fb.tile_flags == 0xfe00)
 			OUT_RING(evo, NvEvoFB32);
 		else
 		if (nv_crtc->fb.tile_flags == 0x7000)
 			OUT_RING(evo, NvEvoFB16);
 		else
-			OUT_RING(evo, NvEvoVRAM);
+			OUT_RING(evo, NvEvoVRAM_LP);
 	}
 
 	ret = RING_SPACE(evo, 12);
@@ -574,8 +577,10 @@
 	if (!nv_crtc->fb.tile_flags) {
 		OUT_RING(evo, drm_fb->pitch | (1 << 20));
 	} else {
-		OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
-				  fb->nvbo->tile_mode);
+		u32 tile_mode = fb->nvbo->tile_mode;
+		if (dev_priv->card_type >= NV_C0)
+			tile_mode >>= 4;
+		OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
 	}
 	if (dev_priv->chipset == 0x50)
 		OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c61..7cc94ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
 #include "nouveau_ramht.h"
 #include "drm_crtc_helper.h"
 
+static void nv50_display_isr(struct drm_device *);
+
 static inline int
 nv50_sor_nr(struct drm_device *dev)
 {
@@ -46,159 +48,6 @@
 	return 4;
 }
 
-static void
-nv50_evo_channel_del(struct nouveau_channel **pchan)
-{
-	struct nouveau_channel *chan = *pchan;
-
-	if (!chan)
-		return;
-	*pchan = NULL;
-
-	nouveau_gpuobj_channel_takedown(chan);
-	nouveau_bo_unmap(chan->pushbuf_bo);
-	nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-
-	if (chan->user)
-		iounmap(chan->user);
-
-	kfree(chan);
-}
-
-static int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
-		    uint32_t tile_flags, uint32_t magic_flags,
-		    uint32_t offset, uint32_t limit)
-{
-	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
-	struct drm_device *dev = evo->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
-	if (ret)
-		return ret;
-	obj->engine = NVOBJ_ENGINE_DISPLAY;
-
-	nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
-	nv_wo32(obj,  4, limit);
-	nv_wo32(obj,  8, offset);
-	nv_wo32(obj, 12, 0x00000000);
-	nv_wo32(obj, 16, 0x00000000);
-	if (dev_priv->card_type < NV_C0)
-		nv_wo32(obj, 20, 0x00010000);
-	else
-		nv_wo32(obj, 20, 0x00020000);
-	dev_priv->engine.instmem.flush(dev);
-
-	ret = nouveau_ramht_insert(evo, name, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	if (ret) {
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramht = NULL;
-	struct nouveau_channel *chan;
-	int ret;
-
-	chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
-	if (!chan)
-		return -ENOMEM;
-	*pchan = chan;
-
-	chan->id = -1;
-	chan->dev = dev;
-	chan->user_get = 4;
-	chan->user_put = 0;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
-	if (ret) {
-		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
-	if (ret) {
-		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
-	if (ret) {
-		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
-	nouveau_gpuobj_ref(NULL, &ramht);
-	if (ret) {
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	if (dev_priv->chipset != 0x50) {
-		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
-					  0, 0xffffffff);
-		if (ret) {
-			nv50_evo_channel_del(pchan);
-			return ret;
-		}
-
-
-		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
-					  0, 0xffffffff);
-		if (ret) {
-			nv50_evo_channel_del(pchan);
-			return ret;
-		}
-	}
-
-	ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
-				  0, dev_priv->vram_size);
-	if (ret) {
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
-			     false, true, &chan->pushbuf_bo);
-	if (ret == 0)
-		ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_bo_map(chan->pushbuf_bo);
-	if (ret) {
-		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-					NV50_PDISPLAY_USER(0), PAGE_SIZE);
-	if (!chan->user) {
-		NV_ERROR(dev, "Error mapping EVO control regs.\n");
-		nv50_evo_channel_del(pchan);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
 int
 nv50_display_early_init(struct drm_device *dev)
 {
@@ -214,17 +63,16 @@
 nv50_display_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
 	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	struct nouveau_channel *evo = dev_priv->evo;
 	struct drm_connector *connector;
-	uint32_t val, ram_amount;
-	uint64_t start;
+	struct nouveau_channel *evo;
 	int ret, i;
+	u32 val;
 
 	NV_DEBUG_KMS(dev, "\n");
 
 	nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+
 	/*
 	 * I think the 0x006101XX range is some kind of main control area
 	 * that enables things.
@@ -240,16 +88,19 @@
 		val = nv_rd32(dev, 0x0061610c + (i * 0x800));
 		nv_wr32(dev, 0x0061019c + (i * 0x10), val);
 	}
+
 	/* DAC */
 	for (i = 0; i < 3; i++) {
 		val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
 		nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
 	}
+
 	/* SOR */
 	for (i = 0; i < nv50_sor_nr(dev); i++) {
 		val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
 		nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
 	}
+
 	/* EXT */
 	for (i = 0; i < 3; i++) {
 		val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@
 		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
 	}
 
-	/* This used to be in crtc unblank, but seems out of place there. */
-	nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
-	/* RAM is clamped to 256 MiB. */
-	ram_amount = dev_priv->vram_size;
-	NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
-	if (ram_amount > 256*1024*1024)
-		ram_amount = 256*1024*1024;
-	nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
-	nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
-	nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
-
 	/* The precise purpose is unknown, i suspect it has something to do
 	 * with text mode.
 	 */
@@ -287,37 +127,6 @@
 		}
 	}
 
-	/* taken from nv bug #12637, attempts to un-wedge the hw if it's
-	 * stuck in some unspecified state
-	 */
-	start = ptimer->read(dev);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
-	while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
-		if ((val & 0x9f0000) == 0x20000)
-			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-							val | 0x800000);
-
-		if ((val & 0x3f0000) == 0x30000)
-			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-							val | 0x200000);
-
-		if (ptimer->read(dev) - start > 1000000000ULL) {
-			NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
-			NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
-			return -EBUSY;
-		}
-	}
-
-	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
-	if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-		     0x40000000, 0x40000000)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n",
-			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
-		return -EBUSY;
-	}
-
 	for (i = 0; i < 2; i++) {
 		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
 		if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@
 		}
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+	nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
 
-	/* initialise fifo */
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
-		((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
-		NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
-		NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
-	if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
-		return -EBUSY;
+	/* enable hotplug interrupts */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct nouveau_connector *conn = nouveau_connector(connector);
+
+		if (conn->dcb->gpio_tag == 0xff)
+			continue;
+
+		pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
 	}
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-		(nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
-		 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
-	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
-		NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
-	nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
 
-	evo->dma.max = (4096/4) - 2;
-	evo->dma.put = 0;
-	evo->dma.cur = evo->dma.put;
-	evo->dma.free = evo->dma.max - evo->dma.cur;
-
-	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+	ret = nv50_evo_init(dev);
 	if (ret)
 		return ret;
+	evo = dev_priv->evo;
 
-	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-		OUT_RING(evo, 0);
+	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
 
 	ret = RING_SPACE(evo, 11);
 	if (ret)
@@ -393,21 +194,6 @@
 	if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
 		NV_ERROR(dev, "evo pushbuf stalled\n");
 
-	/* enable clock change interrupts. */
-	nv_wr32(dev, 0x610028, 0x00010001);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
-					     NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
-					     NV50_PDISPLAY_INTR_EN_CLK_UNK40));
-
-	/* enable hotplug interrupts */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct nouveau_connector *conn = nouveau_connector(connector);
-
-		if (conn->dcb->gpio_tag == 0xff)
-			continue;
-
-		pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
-	}
 
 	return 0;
 }
@@ -452,13 +238,7 @@
 		}
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
-	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
-	if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n",
-			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
-	}
+	nv50_evo_fini(dev);
 
 	for (i = 0; i < 3; i++) {
 		if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@
 	}
 
 	/* disable interrupts. */
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
 
 	/* disable hotplug interrupts */
 	nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@
 
 	dev->mode_config.fb_base = dev_priv->fb_phys;
 
-	/* Create EVO channel */
-	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
-	if (ret) {
-		NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
-		return ret;
-	}
-
 	/* Create CRTC objects */
 	for (i = 0; i < 2; i++)
 		nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@
 		}
 	}
 
+	INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+	nouveau_irq_register(dev, 26, nv50_display_isr);
+
 	ret = nv50_display_init(dev);
 	if (ret) {
 		nv50_display_destroy(dev);
@@ -569,14 +345,12 @@
 void
 nv50_display_destroy(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
 	NV_DEBUG_KMS(dev, "\n");
 
 	drm_mode_config_cleanup(dev);
 
 	nv50_display_disable(dev);
-	nv50_evo_channel_del(&dev_priv->evo);
+	nouveau_irq_unregister(dev, 26);
 }
 
 static u16
@@ -660,32 +434,32 @@
 nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	struct list_head *entry, *tmp;
+	struct nouveau_channel *chan, *tmp;
 
-	list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
-		chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+	list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
+				 nvsw.vbl_wait) {
+		if (chan->nvsw.vblsem_head != crtc)
+			continue;
 
 		nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
 						chan->nvsw.vblsem_rval);
 		list_del(&chan->nvsw.vbl_wait);
+		drm_vblank_put(dev, crtc);
 	}
+
+	drm_handle_vblank(dev, crtc);
 }
 
 static void
 nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
 {
-	intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-
 	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
 		nv50_display_vblank_crtc_handler(dev, 0);
 
 	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
 		nv50_display_vblank_crtc_handler(dev, 1);
 
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
-		     NV50_PDISPLAY_INTR_EN) & ~intr);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
 }
 
 static void
@@ -1011,108 +785,31 @@
 static void
 nv50_display_error_handler(struct drm_device *dev)
 {
-	uint32_t addr, data;
+	u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
+	u32 addr, data;
+	int chid;
 
-	nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
-	addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
-	data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
-
-	NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
-		 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
-
-	nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
-}
-
-void
-nv50_display_irq_hotplug_bh(struct work_struct *work)
-{
-	struct drm_nouveau_private *dev_priv =
-		container_of(work, struct drm_nouveau_private, hpd_work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_connector *connector;
-	const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
-	uint32_t unplug_mask, plug_mask, change_mask;
-	uint32_t hpd0, hpd1;
-
-	spin_lock_irq(&dev_priv->hpd_state.lock);
-	hpd0 = dev_priv->hpd_state.hpd0_bits;
-	dev_priv->hpd_state.hpd0_bits = 0;
-	hpd1 = dev_priv->hpd_state.hpd1_bits;
-	dev_priv->hpd_state.hpd1_bits = 0;
-	spin_unlock_irq(&dev_priv->hpd_state.lock);
-
-	hpd0 &= nv_rd32(dev, 0xe050);
-	if (dev_priv->chipset >= 0x90)
-		hpd1 &= nv_rd32(dev, 0xe070);
-
-	plug_mask   = (hpd0 & 0x0000ffff) | (hpd1 << 16);
-	unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
-	change_mask = plug_mask | unplug_mask;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct drm_encoder_helper_funcs *helper;
-		struct nouveau_connector *nv_connector =
-			nouveau_connector(connector);
-		struct nouveau_encoder *nv_encoder;
-		struct dcb_gpio_entry *gpio;
-		uint32_t reg;
-		bool plugged;
-
-		if (!nv_connector->dcb)
+	for (chid = 0; chid < 5; chid++) {
+		if (!(channels & (1 << chid)))
 			continue;
 
-		gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
-		if (!gpio || !(change_mask & (1 << gpio->line)))
-			continue;
+		nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
+		addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
+		data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
+		NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
+			      "(0x%04x 0x%02x)\n", chid,
+			 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
 
-		reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
-		plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
-		NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
-			drm_get_connector_name(connector)) ;
-
-		if (!connector->encoder || !connector->encoder->crtc ||
-		    !connector->encoder->crtc->enabled)
-			continue;
-		nv_encoder = nouveau_encoder(connector->encoder);
-		helper = connector->encoder->helper_private;
-
-		if (nv_encoder->dcb->type != OUTPUT_DP)
-			continue;
-
-		if (plugged)
-			helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
-		else
-			helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+		nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
 	}
-
-	drm_helper_hpd_irq_event(dev);
 }
 
-void
-nv50_display_irq_handler(struct drm_device *dev)
+static void
+nv50_display_isr(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t delayed = 0;
 
-	if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
-		uint32_t hpd0_bits, hpd1_bits = 0;
-
-		hpd0_bits = nv_rd32(dev, 0xe054);
-		nv_wr32(dev, 0xe054, hpd0_bits);
-
-		if (dev_priv->chipset >= 0x90) {
-			hpd1_bits = nv_rd32(dev, 0xe074);
-			nv_wr32(dev, 0xe074, hpd1_bits);
-		}
-
-		spin_lock(&dev_priv->hpd_state.lock);
-		dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
-		dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
-		spin_unlock(&dev_priv->hpd_state.lock);
-
-		queue_work(dev_priv->wq, &dev_priv->hpd_work);
-	}
-
 	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
 		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
 		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@
 		if (!intr0 && !(intr1 & ~delayed))
 			break;
 
-		if (intr0 & 0x00010000) {
+		if (intr0 & 0x001f0000) {
 			nv50_display_error_handler(dev);
-			intr0 &= ~0x00010000;
+			intr0 &= ~0x001f0000;
 		}
 
 		if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@
 		}
 	}
 }
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b..f0e30b78 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
 #include "nouveau_crtc.h"
 #include "nv50_evo.h"
 
-void nv50_display_irq_handler(struct drm_device *dev);
 void nv50_display_irq_handler_bh(struct work_struct *work);
-void nv50_display_irq_hotplug_bh(struct work_struct *work);
 int nv50_display_early_init(struct drm_device *dev);
 void nv50_display_late_takedown(struct drm_device *dev);
 int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 0000000..14e24e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pevo)
+{
+	struct drm_nouveau_private *dev_priv;
+	struct nouveau_channel *evo = *pevo;
+
+	if (!evo)
+		return;
+	*pevo = NULL;
+
+	dev_priv = evo->dev->dev_private;
+	dev_priv->evo_alloc &= ~(1 << evo->id);
+
+	nouveau_gpuobj_channel_takedown(evo);
+	nouveau_bo_unmap(evo->pushbuf_bo);
+	nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+
+	if (evo->user)
+		iounmap(evo->user);
+
+	kfree(evo);
+}
+
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
+		    u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
+		    u32 flags5)
+{
+	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+	struct drm_device *dev = evo->dev;
+	struct nouveau_gpuobj *obj = NULL;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+	if (ret)
+		return ret;
+	obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+	nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
+	nv_wo32(obj,  4, limit);
+	nv_wo32(obj,  8, offset);
+	nv_wo32(obj, 12, 0x00000000);
+	nv_wo32(obj, 16, 0x00000000);
+	nv_wo32(obj, 20, flags5);
+	dev_priv->engine.instmem.flush(dev);
+
+	ret = nouveau_ramht_insert(evo, name, obj);
+	nouveau_gpuobj_ref(NULL, &obj);
+	if (ret) {
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo;
+	int ret;
+
+	evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+	if (!evo)
+		return -ENOMEM;
+	*pevo = evo;
+
+	for (evo->id = 0; evo->id < 5; evo->id++) {
+		if (dev_priv->evo_alloc & (1 << evo->id))
+			continue;
+
+		dev_priv->evo_alloc |= (1 << evo->id);
+		break;
+	}
+
+	if (evo->id == 5) {
+		kfree(evo);
+		return -ENODEV;
+	}
+
+	evo->dev = dev;
+	evo->user_get = 4;
+	evo->user_put = 0;
+
+	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+			     false, true, &evo->pushbuf_bo);
+	if (ret == 0)
+		ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pevo);
+		return ret;
+	}
+
+	ret = nouveau_bo_map(evo->pushbuf_bo);
+	if (ret) {
+		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pevo);
+		return ret;
+	}
+
+	evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			    NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
+	if (!evo->user) {
+		NV_ERROR(dev, "Error mapping EVO control regs.\n");
+		nv50_evo_channel_del(pevo);
+		return -ENOMEM;
+	}
+
+	/* bind primary evo channel's ramht to the channel */
+	if (dev_priv->evo && evo != dev_priv->evo)
+		nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+
+	return 0;
+}
+
+static int
+nv50_evo_channel_init(struct nouveau_channel *evo)
+{
+	struct drm_device *dev = evo->dev;
+	int id = evo->id, ret, i;
+	u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+	u32 tmp;
+
+	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	if ((tmp & 0x009f0000) == 0x00020000)
+		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
+
+	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	if ((tmp & 0x003f0000) == 0x00030000)
+		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
+
+	/* initialise fifo */
+	nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
+		     NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
+		     NV50_PDISPLAY_EVO_DMA_CB_VALID);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
+		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+
+	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
+		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
+		NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
+			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+		return -EBUSY;
+	}
+
+	/* enable error reporting on the channel */
+	nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
+
+	evo->dma.max = (4096/4) - 2;
+	evo->dma.put = 0;
+	evo->dma.cur = evo->dma.put;
+	evo->dma.free = evo->dma.max - evo->dma.cur;
+
+	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+		OUT_RING(evo, 0);
+
+	return 0;
+}
+
+static void
+nv50_evo_channel_fini(struct nouveau_channel *evo)
+{
+	struct drm_device *dev = evo->dev;
+	int id = evo->id;
+
+	nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
+	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
+		NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
+			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+	}
+}
+
+static int
+nv50_evo_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramht = NULL;
+	struct nouveau_channel *evo;
+	int ret;
+
+	/* create primary evo channel, the one we use for modesetting
+	 * purporses
+	 */
+	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+	if (ret)
+		return ret;
+	evo = dev_priv->evo;
+
+	/* setup object management on it, any other evo channel will
+	 * use this also as there's no per-channel support on the
+	 * hardware
+	 */
+	ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
+				 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
+	if (ret) {
+		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
+	if (ret) {
+		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
+	if (ret) {
+		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
+	nouveau_gpuobj_ref(NULL, &ramht);
+	if (ret) {
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	/* create some default objects for the scanout memtypes we support */
+	if (dev_priv->card_type >= NV_C0) {
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
+					  0, 0xffffffff, 0x00000000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00020000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00000000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+	} else
+	if (dev_priv->chipset != 0x50) {
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
+					  0, 0xffffffff, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
+					  0, 0xffffffff, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int
+nv50_evo_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!dev_priv->evo) {
+		ret = nv50_evo_create(dev);
+		if (ret)
+			return ret;
+	}
+
+	return nv50_evo_channel_init(dev_priv->evo);
+}
+
+void
+nv50_evo_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->evo) {
+		nv50_evo_channel_fini(dev_priv->evo);
+		nv50_evo_channel_del(&dev_priv->evo);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae1334..aa4f0d3 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
  *
  */
 
+#ifndef __NV50_EVO_H__
+#define __NV50_EVO_H__
+
+int  nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+int  nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
+			 u32 tile_flags, u32 magic_flags,
+			 u32 offset, u32 limit);
+
 #define NV50_EVO_UPDATE                                              0x00000080
 #define NV50_EVO_UNK84                                               0x00000084
 #define NV50_EVO_UNK84_NOTIFY                                        0x40000000
@@ -111,3 +120,4 @@
 #define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
 #define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
 
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b..50290de 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
+struct nv50_fb_priv {
+	struct page *r100c08_page;
+	dma_addr_t r100c08;
+};
+
+static int
+nv50_fb_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!priv->r100c08_page) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
+				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
+		__free_page(priv->r100c08_page);
+		kfree(priv);
+		return -EFAULT;
+	}
+
+	dev_priv->engine.fb.priv = priv;
+	return 0;
+}
+
 int
 nv50_fb_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+	int ret;
+
+	if (!dev_priv->engine.fb.priv) {
+		ret = nv50_fb_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = dev_priv->engine.fb.priv;
 
 	/* Not a clue what this is exactly.  Without pointing it at a
 	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
 	 * cause IOMMU "read from address 0" errors (rh#561267)
 	 */
-	nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+	nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
 
 	/* This is needed to get meaningful information from 100c90
 	 * on traps. No idea what these values mean exactly. */
 	switch (dev_priv->chipset) {
 	case 0x50:
-		nv_wr32(dev, 0x100c90, 0x0707ff);
+		nv_wr32(dev, 0x100c90, 0x000707ff);
 		break;
 	case 0xa3:
 	case 0xa5:
 	case 0xa8:
-		nv_wr32(dev, 0x100c90, 0x0d0fff);
+		nv_wr32(dev, 0x100c90, 0x000d0fff);
+		break;
+	case 0xaf:
+		nv_wr32(dev, 0x100c90, 0x089d1fff);
 		break;
 	default:
-		nv_wr32(dev, 0x100c90, 0x1d07ff);
+		nv_wr32(dev, 0x100c90, 0x001d07ff);
 		break;
 	}
 
@@ -36,12 +81,25 @@
 void
 nv50_fb_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+
+	priv = dev_priv->engine.fb.priv;
+	if (!priv)
+		return;
+	dev_priv->engine.fb.priv = NULL;
+
+	pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+		       PCI_DMA_BIDIRECTIONAL);
+	__free_page(priv->r100c08_page);
+	kfree(priv);
 }
 
 void
 nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
 	u32 trap[6], idx, chinst;
 	int i, ch;
 
@@ -60,8 +118,10 @@
 		return;
 
 	chinst = (trap[2] << 16) | trap[1];
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
 	for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
-		struct nouveau_channel *chan = dev_priv->fifos[ch];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
 
 		if (!chan || !chan->ramin)
 			continue;
@@ -69,6 +129,7 @@
 		if (chinst == chan->ramin->vinst >> 12)
 			break;
 	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
 	NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
 		     "channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048..791ded1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,29 +1,46 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
 
-void
+int
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
-	     RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_fillrect(info, rect);
-		return;
-	}
+	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+	if (ret)
+		return ret;
 
 	if (rect->rop != ROP_COPY) {
 		BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +62,21 @@
 		OUT_RING(chan, 3);
 	}
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_copyarea(info, region);
-		return;
-	}
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSub2D, 0x0110, 1);
 	OUT_RING(chan, 0);
@@ -80,9 +91,10 @@
 	OUT_RING(chan, 0);
 	OUT_RING(chan, region->sy);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +104,14 @@
 	uint32_t width, dwords, *data = (uint32_t *)image->data;
 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
 	uint32_t *palette = info->pseudo_palette;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
+	if (image->depth != 1)
+		return -ENODEV;
 
-	if (image->depth != 1) {
-		cfb_imageblit(info, image);
-		return;
-	}
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_imageblit(info, image);
-		return;
-	}
+	ret = RING_SPACE(chan, 11);
+	if (ret)
+		return ret;
 
 	width = ALIGN(image->width, 32);
 	dwords = (width * image->height) >> 5;
@@ -134,11 +137,9 @@
 	while (dwords) {
 		int push = dwords > 2047 ? 2047 : dwords;
 
-		if (RING_SPACE(chan, push + 1)) {
-			nouveau_fbcon_gpu_lockup(info);
-			cfb_imageblit(info, image);
-			return;
-		}
+		ret = RING_SPACE(chan, push + 1);
+		if (ret)
+			return ret;
 
 		dwords -= push;
 
@@ -148,6 +149,7 @@
 	}
 
 	FIRE_RING(chan);
+	return 0;
 }
 
 int
@@ -157,12 +159,9 @@
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
-	struct nouveau_gpuobj *eng2d = NULL;
-	uint64_t fb;
+	struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
 	int ret, format;
 
-	fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
-
 	switch (info->var.bits_per_pixel) {
 	case 8:
 		format = 0xf3;
@@ -190,12 +189,7 @@
 		return -EINVAL;
 	}
 
-	ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
-	nouveau_gpuobj_ref(NULL, &eng2d);
+	ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
 	if (ret)
 		return ret;
 
@@ -253,8 +247,8 @@
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb));
-	OUT_RING(chan, lower_32_bits(fb));
+	OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
 	BEGIN_RING(chan, NvSub2D, 0x0230, 2);
 	OUT_RING(chan, format);
 	OUT_RING(chan, 1);
@@ -262,8 +256,8 @@
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb));
-	OUT_RING(chan, lower_32_bits(fb));
+	OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd..8dd04c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
+#include "nouveau_vm.h"
 
 static void
 nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@
 
 	/* We never schedule channel 0 or 127 */
 	for (i = 1, nr = 0; i < 127; i++) {
-		if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+		if (dev_priv->channels.ptr[i] &&
+		    dev_priv->channels.ptr[i]->ramfc) {
 			nv_wo32(cur, (nr * 4), i);
 			nr++;
 		}
@@ -60,7 +62,7 @@
 nv50_fifo_channel_enable(struct drm_device *dev, int channel)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->fifos[channel];
+	struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
 	uint32_t inst;
 
 	NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@
 {
 	NV_DEBUG(dev, "\n");
 
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
 }
@@ -118,7 +121,7 @@
 	NV_DEBUG(dev, "\n");
 
 	for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
-		if (dev_priv->fifos[i])
+		if (dev_priv->channels.ptr[i])
 			nv50_fifo_channel_enable(dev, i);
 		else
 			nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@
 	if (!pfifo->playlist[0])
 		return;
 
+	nv_wr32(dev, 0x2140, 0x00000000);
+	nouveau_irq_unregister(dev, 8);
+
 	nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
 	nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
 }
@@ -256,6 +262,11 @@
 	}
 	ramfc = chan->ramfc;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV50_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@
 nv50_fifo_destroy_context(struct nouveau_channel *chan)
 {
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_gpuobj *ramfc = NULL;
+	unsigned long flags;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pfifo->channel_id(dev) == chan->id) {
+		pfifo->disable(dev);
+		pfifo->unload_context(dev);
+		pfifo->enable(dev);
+	}
+
 	/* This will ensure the channel is seen as disabled. */
 	nouveau_gpuobj_ref(chan->ramfc, &ramfc);
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@
 		nv50_fifo_channel_disable(dev, 127);
 	nv50_fifo_playlist_update(dev);
 
+	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the channel resources */
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
 	nouveau_gpuobj_ref(NULL, &ramfc);
 	nouveau_gpuobj_ref(NULL, &chan->cache);
 }
@@ -392,7 +424,7 @@
 	if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
 		return 0;
 
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (!chan) {
 		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
 		return -EINVAL;
@@ -467,5 +499,5 @@
 void
 nv50_fifo_tlb_flush(struct drm_device *dev)
 {
-	nv50_vm_flush(dev, 5);
+	nv50_vm_flush_engine(dev, 5);
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2b..6b149c0 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
 
+#include "nv50_display.h"
+
+static void nv50_gpio_isr(struct drm_device *dev);
+static void nv50_gpio_isr_bh(struct work_struct *work);
+
+struct nv50_gpio_priv {
+	struct list_head handlers;
+	spinlock_t lock;
+};
+
+struct nv50_gpio_handler {
+	struct drm_device *dev;
+	struct list_head head;
+	struct work_struct work;
+	bool inhibit;
+
+	struct dcb_gpio_entry *gpio;
+
+	void (*handler)(void *data, int state);
+	void *data;
+};
+
 static int
 nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
 {
@@ -75,29 +97,123 @@
 	return 0;
 }
 
+int
+nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+		       void (*handler)(void *, int), void *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh;
+	struct dcb_gpio_entry *gpio;
+	unsigned long flags;
+
+	gpio = nouveau_bios_gpio_entry(dev, tag);
+	if (!gpio)
+		return -ENOENT;
+
+	gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+	if (!gpioh)
+		return -ENOMEM;
+
+	INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+	gpioh->dev  = dev;
+	gpioh->gpio = gpio;
+	gpioh->handler = handler;
+	gpioh->data = data;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	list_add(&gpioh->head, &priv->handlers);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+}
+
 void
+nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+			 void (*handler)(void *, int), void *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh, *tmp;
+	struct dcb_gpio_entry *gpio;
+	unsigned long flags;
+
+	gpio = nouveau_bios_gpio_entry(dev, tag);
+	if (!gpio)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+		if (gpioh->gpio != gpio ||
+		    gpioh->handler != handler ||
+		    gpioh->data != data)
+			continue;
+		list_del(&gpioh->head);
+		kfree(gpioh);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+bool
 nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
 {
 	struct dcb_gpio_entry *gpio;
 	u32 reg, mask;
 
 	gpio = nouveau_bios_gpio_entry(dev, tag);
-	if (!gpio) {
-		NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
-		return;
-	}
+	if (!gpio)
+		return false;
 
 	reg  = gpio->line < 16 ? 0xe050 : 0xe070;
 	mask = 0x00010001 << (gpio->line & 0xf);
 
 	nv_wr32(dev, reg + 4, mask);
-	nv_mask(dev, reg + 0, mask, on ? mask : 0);
+	reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+	return (reg & mask) == mask;
+}
+
+static int
+nv50_gpio_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&priv->handlers);
+	spin_lock_init(&priv->lock);
+	pgpio->priv = priv;
+	return 0;
+}
+
+static void
+nv50_gpio_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+	kfree(pgpio->priv);
+	pgpio->priv = NULL;
 }
 
 int
 nv50_gpio_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv;
+	int ret;
+
+	if (!pgpio->priv) {
+		ret = nv50_gpio_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = pgpio->priv;
 
 	/* disable, and ack any pending gpio interrupts */
 	nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@
 		nv_wr32(dev, 0xe074, 0xffffffff);
 	}
 
+	nouveau_irq_register(dev, 21, nv50_gpio_isr);
 	return 0;
 }
+
+void
+nv50_gpio_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	nv_wr32(dev, 0xe050, 0x00000000);
+	if (dev_priv->chipset >= 0x90)
+		nv_wr32(dev, 0xe070, 0x00000000);
+	nouveau_irq_unregister(dev, 21);
+
+	nv50_gpio_destroy(dev);
+}
+
+static void
+nv50_gpio_isr_bh(struct work_struct *work)
+{
+	struct nv50_gpio_handler *gpioh =
+		container_of(work, struct nv50_gpio_handler, work);
+	struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	unsigned long flags;
+	int state;
+
+	state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+	if (state < 0)
+		return;
+
+	gpioh->handler(gpioh->data, state);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	gpioh->inhibit = false;
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh;
+	u32 intr0, intr1 = 0;
+	u32 hi, lo, ch;
+
+	intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+	if (dev_priv->chipset >= 0x90)
+		intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+	ch = hi | lo;
+
+	nv_wr32(dev, 0xe054, intr0);
+	if (dev_priv->chipset >= 0x90)
+		nv_wr32(dev, 0xe074, intr1);
+
+	spin_lock(&priv->lock);
+	list_for_each_entry(gpioh, &priv->handlers, head) {
+		if (!(ch & (1 << gpioh->gpio->line)))
+			continue;
+
+		if (gpioh->inhibit)
+			continue;
+		gpioh->inhibit = true;
+
+		queue_work(dev_priv->wq, &gpioh->work);
+	}
+	spin_unlock(&priv->lock);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0..2d7ea75 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
 #include "nouveau_grctx.h"
+#include "nouveau_dma.h"
+#include "nouveau_vm.h"
+#include "nv50_evo.h"
+
+static int  nv50_graph_register(struct drm_device *);
+static void nv50_graph_isr(struct drm_device *);
 
 static void
 nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@
 {
 	NV_DEBUG(dev, "\n");
 
+	nouveau_irq_register(dev, 12, nv50_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
 	nv_wr32(dev, 0x400138, 0xffffffff);
 	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@
 	nv50_graph_init_reset(dev);
 	nv50_graph_init_regs__nv(dev);
 	nv50_graph_init_regs(dev);
-	nv50_graph_init_intr(dev);
 
 	ret = nv50_graph_init_ctxctl(dev);
 	if (ret)
 		return ret;
 
+	ret = nv50_graph_register(dev);
+	if (ret)
+		return ret;
+	nv50_graph_init_intr(dev);
 	return 0;
 }
 
@@ -158,6 +168,8 @@
 nv50_graph_takedown(struct drm_device *dev)
 {
 	NV_DEBUG(dev, "\n");
+	nv_wr32(dev, 0x40013c, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 void
@@ -190,7 +202,7 @@
 	inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
 
 		if (chan && chan->ramin && chan->ramin->vinst == inst)
 			return chan;
@@ -211,7 +223,7 @@
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
-	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
 				 NVOBJ_FLAG_ZERO_ALLOC |
 				 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
 	if (ret)
@@ -234,6 +246,7 @@
 	nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
 
 	dev_priv->engine.instmem.flush(dev);
+	atomic_inc(&chan->vm->pgraph_refs);
 	return 0;
 }
 
@@ -242,18 +255,31 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+	unsigned long flags;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
 	if (!chan->ramin)
 		return;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
 	for (i = hdr; i < hdr + 24; i += 4)
 		nv_wo32(chan->ramin, i, 0);
 	dev_priv->engine.instmem.flush(dev);
 
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+
+	atomic_dec(&chan->vm->pgraph_refs);
 }
 
 static int
@@ -306,7 +332,7 @@
 	return 0;
 }
 
-void
+static void
 nv50_graph_context_switch(struct drm_device *dev)
 {
 	uint32_t inst;
@@ -322,8 +348,8 @@
 }
 
 static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct nouveau_gpuobj *gpuobj;
 
@@ -340,8 +366,8 @@
 }
 
 static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
 		return -ERANGE;
@@ -351,16 +377,16 @@
 }
 
 static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
-				   int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
+				   u32 class, u32 mthd, u32 data)
 {
 	chan->nvsw.vblsem_rval = data;
 	return 0;
 }
 
 static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
-			       int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +394,85 @@
 	if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
 		return -EINVAL;
 
-	if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
-		      NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
-		nv_wr32(dev, NV50_PDISPLAY_INTR_1,
-			NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
-		nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
-			NV50_PDISPLAY_INTR_EN) |
-			NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
-	}
+	drm_vblank_get(dev, data);
 
+	chan->nvsw.vblsem_head = data;
 	list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+
 	return 0;
 }
 
-static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
-	{ 0x018c, nv50_graph_nvsw_dma_vblsem },
-	{ 0x0400, nv50_graph_nvsw_vblsem_offset },
-	{ 0x0404, nv50_graph_nvsw_vblsem_release_val },
-	{ 0x0408, nv50_graph_nvsw_vblsem_release },
-	{}
-};
+static int
+nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
+{
+	struct nouveau_page_flip_state s;
 
-struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
-	{ 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x5039, false, NULL }, /* m2mf */
-	{ 0x502d, false, NULL }, /* 2d */
-	{ 0x50c0, false, NULL }, /* compute */
-	{ 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
-	{ 0x5097, false, NULL }, /* tesla (nv50) */
-	{ 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
-	{ 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
-	{ 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
-	{}
-};
+	if (!nouveau_finish_page_flip(chan, &s)) {
+		/* XXX - Do something here */
+	}
+
+	return 0;
+}
+
+static int
+nv50_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
+	NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
+	NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
+	NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
+
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+
+	/* tesla */
+	if (dev_priv->chipset == 0x50)
+		NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
+	else
+	if (dev_priv->chipset < 0xa0)
+		NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
+	else {
+		switch (dev_priv->chipset) {
+		case 0xa0:
+		case 0xaa:
+		case 0xac:
+			NVOBJ_CLASS(dev, 0x8397, GR);
+			break;
+		case 0xa3:
+		case 0xa5:
+		case 0xa8:
+			NVOBJ_CLASS(dev, 0x8597, GR);
+			break;
+		case 0xaf:
+			NVOBJ_CLASS(dev, 0x8697, GR);
+			break;
+		}
+	}
+
+	/* compute */
+	NVOBJ_CLASS(dev, 0x50c0, GR);
+	if (dev_priv->chipset  > 0xa0 &&
+	    dev_priv->chipset != 0xaa &&
+	    dev_priv->chipset != 0xac)
+		NVOBJ_CLASS(dev, 0x85c0, GR);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
 
 void
 nv50_graph_tlb_flush(struct drm_device *dev)
 {
-	nv50_vm_flush(dev, 0);
+	nv50_vm_flush_engine(dev, 0);
 }
 
 void
@@ -449,8 +515,535 @@
 			 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
 	}
 
-	nv50_vm_flush(dev, 0);
+	nv50_vm_flush_engine(dev, 0);
 
 	nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
+
+static struct nouveau_enum nv50_mp_exec_error_names[] =
+{
+	{ 3, "STACK_UNDERFLOW" },
+	{ 4, "QUADON_ACTIVE" },
+	{ 8, "TIMEOUT" },
+	{ 0x10, "INVALID_OPCODE" },
+	{ 0x40, "BREAKPOINT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "IN" },
+	{ 0x00000004, "OUT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+struct nouveau_enum nv50_data_error_names[] = {
+	{ 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
+	{ 0x00000004, "INVALID_VALUE" },
+	{ 0x00000005, "INVALID_ENUM" },
+	{ 0x00000008, "INVALID_OBJECT" },
+	{ 0x00000009, "READ_ONLY_OBJECT" },
+	{ 0x0000000a, "SUPERVISOR_OBJECT" },
+	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
+	{ 0x0000000c, "INVALID_BITFIELD" },
+	{ 0x0000000d, "BEGIN_END_ACTIVE" },
+	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
+	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
+	{ 0x00000010, "RT_DOUBLE_BIND" },
+	{ 0x00000011, "RT_TYPES_MISMATCH" },
+	{ 0x00000012, "RT_LINEAR_WITH_ZETA" },
+	{ 0x00000015, "FP_TOO_FEW_REGS" },
+	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
+	{ 0x00000017, "RT_LINEAR_WITH_MSAA" },
+	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
+	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
+	{ 0x0000001a, "RT_INVALID_ALIGNMENT" },
+	{ 0x0000001b, "SAMPLER_OVER_LIMIT" },
+	{ 0x0000001c, "TEXTURE_OVER_LIMIT" },
+	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
+	{ 0x0000001f, "RT_BPP128_WITH_MS8" },
+	{ 0x00000021, "Z_OUT_OF_BOUNDS" },
+	{ 0x00000023, "XY_OUT_OF_BOUNDS" },
+	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
+	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
+	{ 0x00000029, "CP_NO_REG_SPACE_PACKED" },
+	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
+	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
+	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
+	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
+	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
+	{ 0x00000031, "ENG2D_FORMAT_MISMATCH" },
+	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
+	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
+	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
+	{ 0x00000046, "LAYER_ID_NEEDS_GP" },
+	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
+	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_intr[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "COMPUTE_QUERY" },
+	{ 0x00000010, "ILLEGAL_MTHD" },
+	{ 0x00000020, "ILLEGAL_CLASS" },
+	{ 0x00000040, "DOUBLE_NOTIFY" },
+	{ 0x00001000, "CONTEXT_SWITCH" },
+	{ 0x00010000, "BUFFER_NOTIFY" },
+	{ 0x00100000, "DATA_ERROR" },
+	{ 0x00200000, "TRAP" },
+	{ 0x01000000, "SINGLE_STEP" },
+	{}
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	uint32_t addr, mp10, status, pc, oplow, ophigh;
+	int i;
+	int mps = 0;
+	for (i = 0; i < 4; i++) {
+		if (!(units & 1 << (i+24)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			addr = 0x408200 + (tpid << 12) + (i << 7);
+		else
+			addr = 0x408100 + (tpid << 11) + (i << 7);
+		mp10 = nv_rd32(dev, addr + 0x10);
+		status = nv_rd32(dev, addr + 0x14);
+		if (!status)
+			continue;
+		if (display) {
+			nv_rd32(dev, addr + 0x20);
+			pc = nv_rd32(dev, addr + 0x24);
+			oplow = nv_rd32(dev, addr + 0x70);
+			ophigh= nv_rd32(dev, addr + 0x74);
+			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+					"TP %d MP %d: ", tpid, i);
+			nouveau_enum_print(nv50_mp_exec_error_names, status);
+			printk(" at %06x warp %d, opcode %08x %08x\n",
+					pc&0xffffff, pc >> 24,
+					oplow, ophigh);
+		}
+		nv_wr32(dev, addr + 0x10, mp10);
+		nv_wr32(dev, addr + 0x14, 0);
+		mps++;
+	}
+	if (!mps && display)
+		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+				"No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+		uint32_t ustatus_new, int display, const char *name)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int tps = 0;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	int i, r;
+	uint32_t ustatus_addr, ustatus;
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			ustatus_addr = ustatus_old + (i << 12);
+		else
+			ustatus_addr = ustatus_new + (i << 11);
+		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+		if (!ustatus)
+			continue;
+		tps++;
+		switch (type) {
+		case 6: /* texture error... unknown for now */
+			nv50_fb_vm_trap(dev, display, name);
+			if (display) {
+				NV_ERROR(dev, "magic set %d:\n", i);
+				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+						nv_rd32(dev, r));
+			}
+			break;
+		case 7: /* MP error */
+			if (ustatus & 0x00010000) {
+				nv50_pgraph_mp_trap(dev, i, display);
+				ustatus &= ~0x00010000;
+			}
+			break;
+		case 8: /* TPDMA error */
+			{
+			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+			nv50_fb_vm_trap(dev, display, name);
+			/* 2d engine destination */
+			if (ustatus & 0x00000010) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000010;
+			}
+			/* Render target */
+			if (ustatus & 0x00000040) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000040;
+			}
+			/* CUDA memory: l[], g[] or stack. */
+			if (ustatus & 0x00000080) {
+				if (display) {
+					if (e18 & 0x80000000) {
+						/* g[] read fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 24) & 0x1f));
+						e18 &= ~0x1f000000;
+					} else if (e18 & 0xc) {
+						/* g[] write fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 7) & 0x1f));
+						e18 &= ~0x00000f80;
+					} else {
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+								i, e14, e10);
+					}
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000080;
+			}
+			}
+			break;
+		}
+		if (ustatus) {
+			if (display)
+				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+		}
+		nv_wr32(dev, ustatus_addr, 0xc0000000);
+	}
+
+	if (!tps && display)
+		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+{
+	u32 status = nv_rd32(dev, 0x400108);
+	u32 ustatus;
+
+	if (!status && display) {
+		NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+		return 1;
+	}
+
+	/* DISPATCH: Relays commands to other units and handles NOTIFY,
+	 * COND, QUERY. If you get a trap from it, the command is still stuck
+	 * in DISPATCH and you need to do something about it. */
+	if (status & 0x001) {
+		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+		}
+
+		nv_wr32(dev, 0x400500, 0x00000000);
+
+		/* Known to be triggered by screwed up NOTIFY and COND... */
+		if (ustatus & 0x00000001) {
+			u32 addr = nv_rd32(dev, 0x400808);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 datal = nv_rd32(dev, 0x40080c);
+			u32 datah = nv_rd32(dev, 0x400810);
+			u32 class = nv_rd32(dev, 0x400814);
+			u32 r848 = nv_rd32(dev, 0x400848);
+
+			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+			if (display && (addr & 0x80000000)) {
+				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x%08x "
+					     "400808 0x%08x 400848 0x%08x\n",
+					chid, inst, subc, class, mthd, datah,
+					datal, addr, r848);
+			} else
+			if (display) {
+				NV_INFO(dev, "PGRAPH - no stuck command?\n");
+			}
+
+			nv_wr32(dev, 0x400808, 0);
+			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+			nv_wr32(dev, 0x400848, 0);
+			ustatus &= ~0x00000001;
+		}
+
+		if (ustatus & 0x00000002) {
+			u32 addr = nv_rd32(dev, 0x40084c);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 data = nv_rd32(dev, 0x40085c);
+			u32 class = nv_rd32(dev, 0x400814);
+
+			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+			if (display && (addr & 0x80000000)) {
+				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x 40084c 0x%08x\n",
+					chid, inst, subc, class, mthd,
+					data, addr);
+			} else
+			if (display) {
+				NV_INFO(dev, "PGRAPH - no stuck command?\n");
+			}
+
+			nv_wr32(dev, 0x40084c, 0);
+			ustatus &= ~0x00000002;
+		}
+
+		if (ustatus && display) {
+			NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+				      "0x%08x)\n", ustatus);
+		}
+
+		nv_wr32(dev, 0x400804, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x001);
+		status &= ~0x001;
+		if (!status)
+			return 0;
+	}
+
+	/* M2MF: Memory to memory copy engine. */
+	if (status & 0x002) {
+		u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+			nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
+				nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 2);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x406800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x002);
+		status &= ~0x002;
+	}
+
+	/* VFETCH: Fetches data from vertex buffers. */
+	if (status & 0x004) {
+		u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+			nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
+				nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+		}
+
+		nv_wr32(dev, 0x400c04, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x004);
+		status &= ~0x004;
+	}
+
+	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+	if (status & 0x008) {
+		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+			nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
+				nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 0x80);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x401800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x008);
+		status &= ~0x008;
+	}
+
+	/* CCACHE: Handles code and c[] caches and fills them. */
+	if (status & 0x010) {
+		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+			nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+				     " %08x %08x %08x\n",
+				nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
+				nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
+				nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
+				nv_rd32(dev, 0x40581c));
+
+		}
+
+		nv_wr32(dev, 0x405018, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x010);
+		status &= ~0x010;
+	}
+
+	/* Unknown, not seen yet... 0x402000 is the only trap status reg
+	 * remaining, so try to handle it anyway. Perhaps related to that
+	 * unknown DMA slot on tesla? */
+	if (status & 0x20) {
+		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+		if (display)
+			NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x402000, 0xc0000000);
+		/* no status modifiction on purpose */
+	}
+
+	/* TEXTURE: CUDA texturing units */
+	if (status & 0x040) {
+		nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
+				    "PGRAPH - TRAP_TEXTURE");
+		nv_wr32(dev, 0x400108, 0x040);
+		status &= ~0x040;
+	}
+
+	/* MP: CUDA execution engines. */
+	if (status & 0x080) {
+		nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
+				    "PGRAPH - TRAP_MP");
+		nv_wr32(dev, 0x400108, 0x080);
+		status &= ~0x080;
+	}
+
+	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	 * l[], g[], stack, 2d surfaces, render targets. */
+	if (status & 0x100) {
+		nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
+				    "PGRAPH - TRAP_TPDMA");
+		nv_wr32(dev, 0x400108, 0x100);
+		status &= ~0x100;
+	}
+
+	if (status) {
+		if (display)
+			NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
+		nv_wr32(dev, 0x400108, status);
+	}
+
+	return 1;
+}
+
+static int
+nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin)
+			continue;
+
+		if (inst == chan->ramin->vinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nv50_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, 0x400100))) {
+		u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
+		u32 chid = nv50_graph_isr_chid(dev, inst);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400814);
+		u32 show = stat;
+
+		if (stat & 0x00000010) {
+			if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
+						       mthd, data))
+				show &= ~0x00000010;
+		}
+
+		if (stat & 0x00001000) {
+			nv_wr32(dev, 0x400500, 0x00000000);
+			nv_wr32(dev, 0x400100, 0x00001000);
+			nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
+			nv50_graph_context_switch(dev);
+			stat &= ~0x00001000;
+			show &= ~0x00001000;
+		}
+
+		show = (show && nouveau_ratelimit()) ? show : 0;
+
+		if (show & 0x00100000) {
+			u32 ecode = nv_rd32(dev, 0x400110);
+			NV_INFO(dev, "PGRAPH - DATA_ERROR ");
+			nouveau_enum_print(nv50_data_error_names, ecode);
+			printk("\n");
+		}
+
+		if (stat & 0x00200000) {
+			if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
+				show &= ~0x00200000;
+		}
+
+		nv_wr32(dev, 0x400100, stat);
+		nv_wr32(dev, 0x400500, 0x00010001);
+
+		if (show) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv50_graph_intr, show);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
+				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+				chid, inst, subc, class, mthd, data);
+		}
+	}
+
+	if (nv_rd32(dev, 0x400824) & (1 << 31))
+		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229..2e1b1cd 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
 
 #include "drmP.h"
 #include "drm.h"
+
 #include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+#define BAR1_VM_BASE 0x0020000000ULL
+#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
+#define BAR3_VM_BASE 0x0000000000ULL
+#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
 
 struct nv50_instmem_priv {
 	uint32_t save1700[5]; /* 0x1700->0x1710 */
 
-	struct nouveau_gpuobj *pramin_pt;
-	struct nouveau_gpuobj *pramin_bar;
-	struct nouveau_gpuobj *fb_bar;
+	struct nouveau_gpuobj *bar1_dmaobj;
+	struct nouveau_gpuobj *bar3_dmaobj;
 };
 
 static void
@@ -48,6 +54,7 @@
 		return;
 
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
+	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
 	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
 	if (chan->ramin_heap.free_stack.next)
 		drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@
 }
 
 static int
-nv50_channel_new(struct drm_device *dev, u32 size,
+nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
 		 struct nouveau_channel **pchan)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
 	u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
 	struct nouveau_channel *chan;
-	int ret;
+	int ret, i;
 
 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
 	if (!chan)
@@ -92,6 +99,17 @@
 		return ret;
 	}
 
+	for (i = 0; i < 0x4000; i += 8) {
+		nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+		nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
+	}
+
+	ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+	if (ret) {
+		nv50_channel_del(&chan);
+		return ret;
+	}
+
 	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
 				      chan->ramin->pinst + fc,
 				      chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv;
 	struct nouveau_channel *chan;
+	struct nouveau_vm *vm;
 	int ret, i;
 	u32 tmp;
 
@@ -127,112 +146,87 @@
 	ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
 	if (ret) {
 		NV_ERROR(dev, "Failed to init RAMIN heap\n");
-		return -ENOMEM;
+		goto error;
 	}
 
-	/* we need a channel to plug into the hw to control the BARs */
-	ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
+	/* BAR3 */
+	ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
+			     &dev_priv->bar3_vm);
 	if (ret)
-		return ret;
-	chan = dev_priv->fifos[127] = dev_priv->fifos[0];
+		goto error;
 
-	/* allocate page table for PRAMIN BAR */
-	ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
-				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
-				 &priv->pramin_pt);
+	ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
+				 0x1000, NVOBJ_FLAG_DONT_MAP |
+				 NVOBJ_FLAG_ZERO_ALLOC,
+				 &dev_priv->bar3_vm->pgt[0].obj[0]);
 	if (ret)
-		return ret;
+		goto error;
+	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
 
-	nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
-	nv_wo32(chan->vm_pd, 0x0004, 0);
+	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
 
-	/* DMA object for PRAMIN BAR */
-	ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
+	ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
 	if (ret)
-		return ret;
-	nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
-	nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
-	nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
+		goto error;
+	dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
 
-	/* map channel into PRAMIN, gpuobj didn't do it for us */
-	ret = nv50_instmem_bind(dev, chan->ramin);
+	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
+				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+				  &priv->bar3_dmaobj);
 	if (ret)
-		return ret;
+		goto error;
 
-	/* poke regs... */
 	nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
 	nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
-	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
-
-	tmp = nv_ri32(dev, 0);
-	nv_wi32(dev, 0, ~tmp);
-	if (nv_ri32(dev, 0) != ~tmp) {
-		NV_ERROR(dev, "PRAMIN readback failed\n");
-		return -EIO;
-	}
-	nv_wi32(dev, 0, tmp);
-
-	dev_priv->ramin_available = true;
-
-	/* Determine VM layout */
-	dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
-	dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
-	dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
-	dev_priv->vm_vram_size = dev_priv->vram_size;
-	if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
-		dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
-	dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
-	dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
-	dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
-	NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
-		 dev_priv->vm_gart_base,
-		 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
-	NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
-		 dev_priv->vm_vram_base,
-		 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
-	/* VRAM page table(s), mapped into VM at +1GiB  */
-	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-		ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
-					 0, NVOBJ_FLAG_ZERO_ALLOC,
-					 &chan->vm_vram_pt[i]);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
-			dev_priv->vm_vram_pt_nr = i;
-			return ret;
-		}
-		dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
-
-		nv_wo32(chan->vm_pd, 0x10 + (i*8),
-			chan->vm_vram_pt[i]->vinst | 0x61);
-		nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
-	}
-
-	/* DMA object for FB BAR */
-	ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
-	if (ret)
-		return ret;
-	nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
-	nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
-				    pci_resource_len(dev->pdev, 1) - 1);
-	nv_wo32(priv->fb_bar, 0x08, 0x40000000);
-	nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
-	nv_wo32(priv->fb_bar, 0x10, 0x00000000);
-	nv_wo32(priv->fb_bar, 0x14, 0x00000000);
+	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
 
 	dev_priv->engine.instmem.flush(dev);
+	dev_priv->ramin_available = true;
 
-	nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
+	tmp = nv_ro32(chan->ramin, 0);
+	nv_wo32(chan->ramin, 0, ~tmp);
+	if (nv_ro32(chan->ramin, 0) != ~tmp) {
+		NV_ERROR(dev, "PRAMIN readback failed\n");
+		ret = -EIO;
+		goto error;
+	}
+	nv_wo32(chan->ramin, 0, tmp);
+
+	/* BAR1 */
+	ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
+	if (ret)
+		goto error;
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
+				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+				  &priv->bar1_dmaobj);
+	if (ret)
+		goto error;
+
+	nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
 	for (i = 0; i < 8; i++)
 		nv_wr32(dev, 0x1900 + (i*4), 0);
 
+	/* Create shared channel VM, space is reserved at the beginning
+	 * to catch "NULL pointer" references
+	 */
+	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+			     &dev_priv->chan_vm);
+	if (ret)
+		return ret;
+
 	return 0;
+
+error:
+	nv50_instmem_takedown(dev);
+	return ret;
 }
 
 void
@@ -240,7 +234,7 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
+	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
 	int i;
 
 	NV_DEBUG(dev, "\n");
@@ -250,23 +244,23 @@
 
 	dev_priv->ramin_available = false;
 
-	/* Restore state from before init */
+	nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
 	for (i = 0x1700; i <= 0x1710; i += 4)
 		nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
 
-	nouveau_gpuobj_ref(NULL, &priv->fb_bar);
-	nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
-	nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
+	nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
+	nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
 
-	/* Destroy dummy channel */
-	if (chan) {
-		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
-			nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
-		dev_priv->vm_vram_pt_nr = 0;
+	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
+	dev_priv->channels.ptr[127] = 0;
+	nv50_channel_del(&dev_priv->channels.ptr[0]);
 
-		nv50_channel_del(&dev_priv->fifos[0]);
-		dev_priv->fifos[127] = NULL;
-	}
+	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+	if (dev_priv->ramin_heap.free_stack.next)
+		drm_mm_takedown(&dev_priv->ramin_heap);
 
 	dev_priv->engine.instmem.priv = NULL;
 	kfree(priv);
@@ -276,16 +270,8 @@
 nv50_instmem_suspend(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	int i;
 
-	ramin->im_backing_suspend = vmalloc(ramin->size);
-	if (!ramin->im_backing_suspend)
-		return -ENOMEM;
-
-	for (i = 0; i < ramin->size; i += 4)
-		ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+	dev_priv->ramin_available = false;
 	return 0;
 }
 
@@ -294,146 +280,121 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
-	struct nouveau_gpuobj *ramin = chan->ramin;
+	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
 	int i;
 
-	dev_priv->ramin_available = false;
-	dev_priv->ramin_base = ~0;
-	for (i = 0; i < ramin->size; i += 4)
-		nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
-	dev_priv->ramin_available = true;
-	vfree(ramin->im_backing_suspend);
-	ramin->im_backing_suspend = NULL;
-
 	/* Poke the relevant regs, and pray it works :) */
 	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
 	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
 	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
 					 NV50_PUNK_BAR_CFG_BASE_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
+	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
 					NV50_PUNK_BAR1_CTXDMA_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
+	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
 					NV50_PUNK_BAR3_CTXDMA_VALID);
 
 	for (i = 0; i < 8; i++)
 		nv_wr32(dev, 0x1900 + (i*4), 0);
+
+	dev_priv->ramin_available = true;
 }
 
+struct nv50_gpuobj_node {
+	struct nouveau_vram *vram;
+	struct nouveau_vma chan_vma;
+	u32 align;
+};
+
+
 int
-nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *sz)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
 {
+	struct drm_device *dev = gpuobj->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct nv50_gpuobj_node *node = NULL;
 	int ret;
 
-	if (gpuobj->im_backing)
-		return -EINVAL;
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+	node->align = align;
 
-	*sz = ALIGN(*sz, 4096);
-	if (*sz == 0)
-		return -EINVAL;
+	size  = (size + 4095) & ~4095;
+	align = max(align, (u32)4096);
 
-	ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
-			     true, false, &gpuobj->im_backing);
+	ret = vram->get(dev, size, align, 0, 0, &node->vram);
 	if (ret) {
-		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+		kfree(node);
 		return ret;
 	}
 
-	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		return ret;
+	gpuobj->vinst = node->vram->offset;
+
+	if (gpuobj->flags & NVOBJ_FLAG_VM) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
+				     NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+				     &node->chan_vma);
+		if (ret) {
+			vram->put(dev, &node->vram);
+			kfree(node);
+			return ret;
+		}
+
+		nouveau_vm_map(&node->chan_vma, node->vram);
+		gpuobj->vinst = node->chan_vma.offset;
 	}
 
-	gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+	gpuobj->size = size;
+	gpuobj->node = node;
 	return 0;
 }
 
 void
-nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
 {
+	struct drm_device *dev = gpuobj->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct nv50_gpuobj_node *node;
 
-	if (gpuobj && gpuobj->im_backing) {
-		if (gpuobj->im_bound)
-			dev_priv->engine.instmem.unbind(dev, gpuobj);
-		nouveau_bo_unpin(gpuobj->im_backing);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		gpuobj->im_backing = NULL;
+	node = gpuobj->node;
+	gpuobj->node = NULL;
+
+	if (node->chan_vma.node) {
+		nouveau_vm_unmap(&node->chan_vma);
+		nouveau_vm_put(&node->chan_vma);
 	}
+	vram->put(dev, &node->vram);
+	kfree(node);
 }
 
 int
-nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
-	uint32_t pte, pte_end;
-	uint64_t vram;
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct nv50_gpuobj_node *node = gpuobj->node;
+	int ret;
 
-	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
-		return -EINVAL;
+	ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
+			     NV_MEM_ACCESS_RW, &node->vram->bar_vma);
+	if (ret)
+		return ret;
 
-	NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
-		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
-	pte     = (gpuobj->im_pramin->start >> 12) << 1;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-	vram    = gpuobj->vinst;
-
-	NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
-		 gpuobj->im_pramin->start, pte, pte_end);
-	NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
-	vram |= 1;
-	if (dev_priv->vram_sys_base) {
-		vram += dev_priv->vram_sys_base;
-		vram |= 0x30;
-	}
-
-	while (pte < pte_end) {
-		nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
-		nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
-		vram += 0x1000;
-		pte += 2;
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	nv50_vm_flush(dev, 6);
-
-	gpuobj->im_bound = 1;
+	nouveau_vm_map(&node->vram->bar_vma, node->vram);
+	gpuobj->pinst = node->vram->bar_vma.offset;
 	return 0;
 }
 
-int
-nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+void
+nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	uint32_t pte, pte_end;
+	struct nv50_gpuobj_node *node = gpuobj->node;
 
-	if (gpuobj->im_bound == 0)
-		return -EINVAL;
-
-	/* can happen during late takedown */
-	if (unlikely(!dev_priv->ramin_available))
-		return 0;
-
-	pte     = (gpuobj->im_pramin->start >> 12) << 1;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-
-	while (pte < pte_end) {
-		nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
-		nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
-		pte += 2;
+	if (node->vram->bar_vma.node) {
+		nouveau_vm_unmap(&node->vram->bar_vma);
+		nouveau_vm_put(&node->vram->bar_vma);
 	}
-	dev_priv->engine.instmem.flush(dev);
-
-	gpuobj->im_bound = 0;
-	return 0;
 }
 
 void
@@ -452,11 +413,3 @@
 		NV_ERROR(dev, "PRAMIN flush timeout\n");
 }
 
-void
-nv50_vm_flush(struct drm_device *dev, int engine)
-{
-	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
-	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
-		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 0000000..38e523e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+		struct nouveau_gpuobj *pgt[2])
+{
+	struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
+	u64 phys = 0xdeadcafe00000000ULL;
+	u32 coverage = 0;
+
+	if (pgt[0]) {
+		phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
+		coverage = (pgt[0]->size >> 3) << 12;
+	} else
+	if (pgt[1]) {
+		phys = 0x00000001 | pgt[1]->vinst; /* present */
+		coverage = (pgt[1]->size >> 3) << 16;
+	}
+
+	if (phys & 1) {
+		if (dev_priv->vram_sys_base) {
+			phys += dev_priv->vram_sys_base;
+			phys |= 0x30;
+		}
+
+		if (coverage <= 32 * 1024 * 1024)
+			phys |= 0x60;
+		else if (coverage <= 64 * 1024 * 1024)
+			phys |= 0x40;
+		else if (coverage < 128 * 1024 * 1024)
+			phys |= 0x20;
+	}
+
+	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+static inline u64
+nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	     u64 phys, u32 memtype, u32 target)
+{
+	struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+
+	phys |= 1; /* present */
+	phys |= (u64)memtype << 40;
+
+	/* IGPs don't have real VRAM, re-target to stolen system memory */
+	if (target == 0 && dev_priv->vram_sys_base) {
+		phys  += dev_priv->vram_sys_base;
+		target = 3;
+	}
+
+	phys |= target << 4;
+
+	if (vma->access & NV_MEM_ACCESS_SYS)
+		phys |= (1 << 6);
+
+	if (!(vma->access & NV_MEM_ACCESS_WO))
+		phys |= (1 << 3);
+
+	return phys;
+}
+
+void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	    struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+	u32 block;
+	int i;
+
+	phys  = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+	pte <<= 3;
+	cnt <<= 3;
+
+	while (cnt) {
+		u32 offset_h = upper_32_bits(phys);
+		u32 offset_l = lower_32_bits(phys);
+
+		for (i = 7; i >= 0; i--) {
+			block = 1 << (i + 3);
+			if (cnt >= block && !(pte & (block - 1)))
+				break;
+		}
+		offset_l |= (i << 7);
+
+		phys += block << (vma->node->type - 3);
+		cnt  -= block;
+
+		while (block) {
+			nv_wo32(pgt, pte + 0, offset_l);
+			nv_wo32(pgt, pte + 4, offset_h);
+			pte += 8;
+			block -= 8;
+		}
+	}
+}
+
+void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       u32 pte, dma_addr_t *list, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		pte += 8;
+	}
+}
+
+void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, 0x00000000);
+		nv_wo32(pgt, pte + 4, 0x00000000);
+		pte += 8;
+	}
+}
+
+void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+	pinstmem->flush(vm->dev);
+
+	/* BAR */
+	if (vm != dev_priv->chan_vm) {
+		nv50_vm_flush_engine(vm->dev, 6);
+		return;
+	}
+
+	pfifo->tlb_flush(vm->dev);
+
+	if (atomic_read(&vm->pgraph_refs))
+		pgraph->tlb_flush(vm->dev);
+	if (atomic_read(&vm->pcrypt_refs))
+		pcrypt->tlb_flush(vm->dev);
+}
+
+void
+nv50_vm_flush_engine(struct drm_device *dev, int engine)
+{
+	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
+		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 0000000..58e98ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static int types[0x80] = {
+	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+	0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+bool
+nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+
+	if (likely(type < ARRAY_SIZE(types) && types[type]))
+		return true;
+	return false;
+}
+
+void
+nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *this;
+	struct nouveau_vram *vram;
+
+	vram = *pvram;
+	*pvram = NULL;
+	if (unlikely(vram == NULL))
+		return;
+
+	mutex_lock(&mm->mutex);
+	while (!list_empty(&vram->regions)) {
+		this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+
+		list_del(&this->rl_entry);
+		nouveau_mm_put(mm, this);
+	}
+	mutex_unlock(&mm->mutex);
+
+	kfree(vram);
+}
+
+int
+nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
+	      u32 type, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	struct nouveau_vram *vram;
+	int ret;
+
+	if (!types[type])
+		return -EINVAL;
+	size >>= 12;
+	align >>= 12;
+	size_nc >>= 12;
+
+	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+	if (!vram)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vram->regions);
+	vram->dev = dev_priv->dev;
+	vram->memtype = type;
+	vram->size = size;
+
+	mutex_lock(&mm->mutex);
+	do {
+		ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
+		if (ret) {
+			mutex_unlock(&mm->mutex);
+			nv50_vram_del(dev, &vram);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &vram->regions);
+		size -= r->length;
+	} while (size);
+	mutex_unlock(&mm->mutex);
+
+	r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+	vram->offset = (u64)r->offset << 12;
+	*pvram = vram;
+	return 0;
+}
+
+static u32
+nv50_vram_rblock(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i, parts, colbits, rowbitsa, rowbitsb, banks;
+	u64 rowsize, predicted;
+	u32 r0, r4, rt, ru, rblock_size;
+
+	r0 = nv_rd32(dev, 0x100200);
+	r4 = nv_rd32(dev, 0x100204);
+	rt = nv_rd32(dev, 0x100250);
+	ru = nv_rd32(dev, 0x001540);
+	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+	for (i = 0, parts = 0; i < 8; i++) {
+		if (ru & (0x00010000 << i))
+			parts++;
+	}
+
+	colbits  =  (r4 & 0x0000f000) >> 12;
+	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+	banks    = ((r4 & 0x01000000) ? 8 : 4);
+
+	rowsize = parts * banks * (1 << colbits) * 8;
+	predicted = rowsize << rowbitsa;
+	if (r0 & 0x00000004)
+		predicted += rowsize << rowbitsb;
+
+	if (predicted != dev_priv->vram_size) {
+		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+			(u32)(dev_priv->vram_size >> 20));
+		NV_WARN(dev, "we calculated %dMiB VRAM\n",
+			(u32)(predicted >> 20));
+	}
+
+	rblock_size = rowsize;
+	if (rt & 1)
+		rblock_size *= 3;
+
+	NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
+	return rblock_size;
+}
+
+int
+nv50_vram_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	dev_priv->vram_size  = nv_rd32(dev, 0x10020c);
+	dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+	dev_priv->vram_size &= 0xffffffff00ULL;
+
+	switch (dev_priv->chipset) {
+	case 0xaa:
+	case 0xac:
+	case 0xaf:
+		dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
+		dev_priv->vram_rblock_size = 4096;
+		break;
+	default:
+		dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 0000000..ec18ae1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+
+static void nv84_crypt_isr(struct drm_device *);
+
+int
+nv84_crypt_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramin = chan->ramin;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	ret = nouveau_gpuobj_new(dev, chan, 256, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+				 &chan->crypt_ctx);
+	if (ret)
+		return ret;
+
+	nv_wo32(ramin, 0xa0, 0x00190000);
+	nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
+	nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
+	nv_wo32(ramin, 0xac, 0);
+	nv_wo32(ramin, 0xb0, 0);
+	nv_wo32(ramin, 0xb4, 0);
+
+	dev_priv->engine.instmem.flush(dev);
+	atomic_inc(&chan->vm->pcrypt_refs);
+	return 0;
+}
+
+void
+nv84_crypt_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	u32 inst;
+
+	if (!chan->crypt_ctx)
+		return;
+
+	inst  = (chan->ramin->vinst >> 12);
+	inst |= 0x80000000;
+
+	/* mark context as invalid if still on the hardware, not
+	 * doing this causes issues the next time PCRYPT is used,
+	 * unsurprisingly :)
+	 */
+	nv_wr32(dev, 0x10200c, 0x00000000);
+	if (nv_rd32(dev, 0x102188) == inst)
+		nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
+	if (nv_rd32(dev, 0x10218c) == inst)
+		nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
+	nv_wr32(dev, 0x10200c, 0x00000010);
+
+	nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
+	atomic_dec(&chan->vm->pcrypt_refs);
+}
+
+void
+nv84_crypt_tlb_flush(struct drm_device *dev)
+{
+	nv50_vm_flush_engine(dev, 0x0a);
+}
+
+int
+nv84_crypt_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+	if (!pcrypt->registered) {
+		NVOBJ_CLASS(dev, 0x74c1, CRYPT);
+		pcrypt->registered = true;
+	}
+
+	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+	nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+	nouveau_irq_register(dev, 14, nv84_crypt_isr);
+	nv_wr32(dev, 0x102130, 0xffffffff);
+	nv_wr32(dev, 0x102140, 0xffffffbf);
+
+	nv_wr32(dev, 0x10200c, 0x00000010);
+	return 0;
+}
+
+void
+nv84_crypt_fini(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x102140, 0x00000000);
+	nouveau_irq_unregister(dev, 14);
+}
+
+static void
+nv84_crypt_isr(struct drm_device *dev)
+{
+	u32 stat = nv_rd32(dev, 0x102130);
+	u32 mthd = nv_rd32(dev, 0x102190);
+	u32 data = nv_rd32(dev, 0x102194);
+	u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+	int show = nouveau_ratelimit();
+
+	if (show) {
+		NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			     stat, mthd, data, inst);
+	}
+
+	nv_wr32(dev, 0x102130, stat);
+	nv_wr32(dev, 0x10200c, 0x10);
+
+	nv50_fb_vm_trap(dev, show, "PCRYPT");
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
new file mode 100644
index 0000000..fa5d4c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
+
+int
+nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+	if (ret)
+		return ret;
+
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+		OUT_RING  (chan, 1);
+	}
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		OUT_RING  (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+	else
+		OUT_RING  (chan, rect->color);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+	OUT_RING  (chan, rect->dx);
+	OUT_RING  (chan, rect->dy);
+	OUT_RING  (chan, rect->dx + rect->width);
+	OUT_RING  (chan, rect->dy + rect->height);
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+		OUT_RING  (chan, 3);
+	}
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+	OUT_RING  (chan, 0);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+	OUT_RING  (chan, region->dx);
+	OUT_RING  (chan, region->dy);
+	OUT_RING  (chan, region->width);
+	OUT_RING  (chan, region->height);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, region->sx);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, region->sy);
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	uint32_t width, dwords, *data = (uint32_t *)image->data;
+	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+	uint32_t *palette = info->pseudo_palette;
+	int ret;
+
+	if (image->depth != 1)
+		return -ENODEV;
+
+	ret = RING_SPACE(chan, 11);
+	if (ret)
+		return ret;
+
+	width = ALIGN(image->width, 32);
+	dwords = (width * image->height) >> 5;
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		OUT_RING  (chan, palette[image->bg_color] | mask);
+		OUT_RING  (chan, palette[image->fg_color] | mask);
+	} else {
+		OUT_RING  (chan, image->bg_color);
+		OUT_RING  (chan, image->fg_color);
+	}
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+	OUT_RING  (chan, image->width);
+	OUT_RING  (chan, image->height);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, image->dx);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, image->dy);
+
+	while (dwords) {
+		int push = dwords > 2047 ? 2047 : dwords;
+
+		ret = RING_SPACE(chan, push + 1);
+		if (ret)
+			return ret;
+
+		dwords -= push;
+
+		BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+		OUT_RINGp(chan, data, push);
+		data += push;
+	}
+
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_accel_init(struct fb_info *info)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+	int ret, format;
+
+	ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
+	if (ret)
+		return ret;
+
+	switch (info->var.bits_per_pixel) {
+	case 8:
+		format = 0xf3;
+		break;
+	case 15:
+		format = 0xf8;
+		break;
+	case 16:
+		format = 0xe8;
+		break;
+	case 32:
+		switch (info->var.transp.length) {
+		case 0: /* depth 24 */
+		case 8: /* depth 32, just use 24.. */
+			format = 0xe6;
+			break;
+		case 2: /* depth 30 */
+			format = 0xd1;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = RING_SPACE(chan, 60);
+	if (ret) {
+		WARN_ON(1);
+		nouveau_fbcon_gpu_lockup(info);
+		return ret;
+	}
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+	OUT_RING  (chan, 0x0000902d);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+	OUT_RING  (chan, upper_32_bits(chan->notifier_bo->bo.offset));
+	OUT_RING  (chan, lower_32_bits(chan->notifier_bo->bo.offset));
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+	OUT_RING  (chan, 0);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+	OUT_RING  (chan, 3);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+	OUT_RING  (chan, 0x55);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+	OUT_RING  (chan, 4);
+	OUT_RING  (chan, format);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+	OUT_RING  (chan, 2);
+	OUT_RING  (chan, 1);
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+	OUT_RING  (chan, format);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+	OUT_RING  (chan, format);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, info->fix.line_length);
+	OUT_RING  (chan, info->var.xres_virtual);
+	OUT_RING  (chan, info->var.yres_virtual);
+	OUT_RING  (chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING  (chan, lower_32_bits(nvbo->vma.offset));
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+	OUT_RING  (chan, format);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, info->fix.line_length);
+	OUT_RING  (chan, info->var.xres_virtual);
+	OUT_RING  (chan, info->var.yres_virtual);
+	OUT_RING  (chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING  (chan, lower_32_bits(nvbo->vma.offset));
+	FIRE_RING (chan);
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 890c2b9..e6f92c54 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -25,6 +25,49 @@
 #include "drmP.h"
 
 #include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static void nvc0_fifo_isr(struct drm_device *);
+
+struct nvc0_fifo_priv {
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+	struct nouveau_vma user_vma;
+	int spoon_nr;
+};
+
+struct nvc0_fifo_chan {
+	struct nouveau_bo *user;
+	struct nouveau_gpuobj *ramfc;
+};
+
+static void
+nvc0_fifo_playlist_update(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv = pfifo->priv;
+	struct nouveau_gpuobj *cur;
+	int i, p;
+
+	cur = priv->playlist[priv->cur_playlist];
+	priv->cur_playlist = !priv->cur_playlist;
+
+	for (i = 0, p = 0; i < 128; i++) {
+		if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
+			continue;
+		nv_wo32(cur, p + 0, i);
+		nv_wo32(cur, p + 4, 0x00000004);
+		p += 8;
+	}
+	pinstmem->flush(dev);
+
+	nv_wr32(dev, 0x002270, cur->vinst >> 12);
+	nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
+	if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
+		NV_ERROR(dev, "PFIFO - playlist update failed\n");
+}
 
 void
 nvc0_fifo_disable(struct drm_device *dev)
@@ -57,12 +100,135 @@
 int
 nvc0_fifo_create_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv = pfifo->priv;
+	struct nvc0_fifo_chan *fifoch;
+	u64 ib_virt, user_vinst;
+	int ret;
+
+	chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
+	if (!chan->fifo_priv)
+		return -ENOMEM;
+	fifoch = chan->fifo_priv;
+
+	/* allocate vram for control regs, map into polling area */
+	ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
+			     0, 0, true, true, &fifoch->user);
+	if (ret)
+		goto error;
+
+	ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		nouveau_bo_ref(NULL, &fifoch->user);
+		goto error;
+	}
+
+	user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
+
+	ret = nouveau_bo_map(fifoch->user);
+	if (ret) {
+		nouveau_bo_unpin(fifoch->user);
+		nouveau_bo_ref(NULL, &fifoch->user);
+		goto error;
+	}
+
+	nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+			  fifoch->user->bo.mem.mm_node);
+
+	chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+				priv->user_vma.offset + (chan->id * 0x1000),
+				PAGE_SIZE);
+	if (!chan->user) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+
+	/* zero channel regs */
+	nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
+
+	/* ramfc */
+	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+				      chan->ramin->vinst, 0x100,
+				      NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+	if (ret)
+		goto error;
+
+	nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
+	nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
+	nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
+	nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
+	nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
+	nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+				   upper_32_bits(ib_virt));
+	nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
+	nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
+	nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
+	nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
+	nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
+	nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
+	nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
+	nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
+	nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
+	nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+	pinstmem->flush(dev);
+
+	nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
+						(chan->ramin->vinst >> 12));
+	nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
+	nvc0_fifo_playlist_update(dev);
 	return 0;
+
+error:
+	pfifo->destroy_context(chan);
+	return ret;
 }
 
 void
 nvc0_fifo_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct nvc0_fifo_chan *fifoch;
+
+	nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+	nv_wr32(dev, 0x002634, chan->id);
+	if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+		NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+
+	nvc0_fifo_playlist_update(dev);
+
+	nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
+
+	fifoch = chan->fifo_priv;
+	chan->fifo_priv = NULL;
+	if (!fifoch)
+		return;
+
+	nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
+	if (fifoch->user) {
+		nouveau_bo_unmap(fifoch->user);
+		nouveau_bo_unpin(fifoch->user);
+		nouveau_bo_ref(NULL, &fifoch->user);
+	}
+	kfree(fifoch);
 }
 
 int
@@ -77,14 +243,213 @@
 	return 0;
 }
 
+static void
+nvc0_fifo_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv;
+
+	priv = pfifo->priv;
+	if (!priv)
+		return;
+
+	nouveau_vm_put(&priv->user_vma);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+	kfree(priv);
+}
+
 void
 nvc0_fifo_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, 0x002140, 0x00000000);
+	nvc0_fifo_destroy(dev);
+}
+
+static int
+nvc0_fifo_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	pfifo->priv = priv;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+				 &priv->playlist[0]);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+				 &priv->playlist[1]);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
+			     12, NV_MEM_ACCESS_RW, &priv->user_vma);
+	if (ret)
+		goto error;
+
+	nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	return 0;
+
+error:
+	nvc0_fifo_destroy(dev);
+	return ret;
 }
 
 int
 nvc0_fifo_init(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv;
+	int ret, i;
+
+	if (!pfifo->priv) {
+		ret = nvc0_fifo_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = pfifo->priv;
+
+	/* reset PFIFO, enable all available PSUBFIFO areas */
+	nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+	nv_wr32(dev, 0x000204, 0xffffffff);
+	nv_wr32(dev, 0x002204, 0xffffffff);
+
+	priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
+	NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+	/* assign engines to subfifos */
+	if (priv->spoon_nr >= 3) {
+		nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
+		nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
+		nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
+		nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
+		nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
+		nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
+	}
+
+	/* PSUBFIFO[n] */
+	for (i = 0; i < 3; i++) {
+		nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+	}
+
+	nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
+	nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
+
+	nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+	nv_wr32(dev, 0x002100, 0xffffffff);
+	nv_wr32(dev, 0x002140, 0xbfffffff);
 	return 0;
 }
 
+struct nouveau_enum nvc0_fifo_fault_unit[] = {
+	{ 0, "PGRAPH" },
+	{ 3, "PEEPHOLE" },
+	{ 4, "BAR1" },
+	{ 5, "BAR3" },
+	{ 7, "PFIFO" },
+	{}
+};
+
+struct nouveau_enum nvc0_fifo_fault_reason[] = {
+	{ 0, "PT_NOT_PRESENT" },
+	{ 1, "PT_TOO_SHORT" },
+	{ 2, "PAGE_NOT_PRESENT" },
+	{ 3, "VM_LIMIT_EXCEEDED" },
+	{}
+};
+
+struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/*	{ 0x00008000, "" }	seen with null ib push */
+	{ 0x00200000, "ILLEGAL_MTHD" },
+	{ 0x00800000, "EMPTY_SUBC" },
+	{}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+	u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+	u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+	u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+	u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+
+	NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+		(stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+	nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+	printk("] from ");
+	nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+	printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+	u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+	u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+	u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+	u32 subc = (addr & 0x00070000);
+	u32 mthd = (addr & 0x00003ffc);
+
+	NV_INFO(dev, "PSUBFIFO %d:", unit);
+	nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
+	NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+		unit, chid, subc, mthd, data);
+
+	nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_isr(struct drm_device *dev)
+{
+	u32 stat = nv_rd32(dev, 0x002100);
+
+	if (stat & 0x10000000) {
+		u32 units = nv_rd32(dev, 0x00259c);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_vm_fault(dev, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(dev, 0x00259c, units);
+		stat &= ~0x10000000;
+	}
+
+	if (stat & 0x20000000) {
+		u32 units = nv_rd32(dev, 0x0025a0);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_subfifo_intr(dev, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(dev, 0x0025a0, units);
+		stat &= ~0x20000000;
+	}
+
+	if (stat) {
+		NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+		nv_wr32(dev, 0x002100, stat);
+	}
+
+	nv_wr32(dev, 0x2140, 0);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 717a517..5feacd5 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -22,9 +22,16 @@
  * Authors: Ben Skeggs
  */
 
+#include <linux/firmware.h>
+
 #include "drmP.h"
 
 #include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void nvc0_graph_isr(struct drm_device *);
+static int  nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
 
 void
 nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
@@ -37,39 +44,735 @@
 	return NULL;
 }
 
+static int
+nvc0_graph_construct_context(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+	struct drm_device *dev = chan->dev;
+	int ret, i;
+	u32 *ctx;
+
+	ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	nvc0_graph_load_context(chan);
+
+	nv_wo32(grch->grctx, 0x1c, 1);
+	nv_wo32(grch->grctx, 0x20, 0);
+	nv_wo32(grch->grctx, 0x28, 0);
+	nv_wo32(grch->grctx, 0x2c, 0);
+	dev_priv->engine.instmem.flush(dev);
+
+	ret = nvc0_grctx_generate(chan);
+	if (ret) {
+		kfree(ctx);
+		return ret;
+	}
+
+	ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+	if (ret) {
+		kfree(ctx);
+		return ret;
+	}
+
+	for (i = 0; i < priv->grctx_size; i += 4)
+		ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+	priv->grctx_vals = ctx;
+	return 0;
+}
+
+static int
+nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+	struct drm_device *dev = chan->dev;
+	int i = 0, gpc, tp, ret;
+	u32 magic;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
+				 &grch->unk408004);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
+				 &grch->unk40800c);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
+				 &grch->unk418810);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
+				 &grch->mmio);
+	if (ret)
+		return ret;
+
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00408004);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x00408008);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x00408010);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000000);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00418810);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
+	nv_wo32(grch->mmio, i++ * 4, 0x00419848);
+	nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00419004);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x00419008);
+	nv_wo32(grch->mmio, i++ * 4, 0x00000000);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00418808);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+	magic = 0x02180000;
+	nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+	nv_wo32(grch->mmio, i++ * 4, magic);
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+			u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
+			nv_wo32(grch->mmio, i++ * 4, reg);
+			nv_wo32(grch->mmio, i++ * 4, magic);
+		}
+	}
+
+	grch->mmio_nr = i / 2;
+	return 0;
+}
+
 int
 nvc0_graph_create_context(struct nouveau_channel *chan)
 {
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv = pgraph->priv;
+	struct nvc0_graph_chan *grch;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj *grctx;
+	int ret, i;
+
+	chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL);
+	if (!chan->pgraph_ctx)
+		return -ENOMEM;
+	grch = chan->pgraph_ctx;
+
+	ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
+				 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+				 &grch->grctx);
+	if (ret)
+		goto error;
+	chan->ramin_grctx = grch->grctx;
+	grctx = grch->grctx;
+
+	ret = nvc0_graph_create_context_mmio_list(chan);
+	if (ret)
+		goto error;
+
+	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
+	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
+	pinstmem->flush(dev);
+
+	if (!priv->grctx_vals) {
+		ret = nvc0_graph_construct_context(chan);
+		if (ret)
+			goto error;
+	}
+
+	for (i = 0; i < priv->grctx_size; i += 4)
+		nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+
+        nv_wo32(grctx, 0xf4, 0);
+        nv_wo32(grctx, 0xf8, 0);
+        nv_wo32(grctx, 0x10, grch->mmio_nr);
+        nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
+        nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
+        nv_wo32(grctx, 0x1c, 1);
+        nv_wo32(grctx, 0x20, 0);
+        nv_wo32(grctx, 0x28, 0);
+        nv_wo32(grctx, 0x2c, 0);
+	pinstmem->flush(dev);
 	return 0;
+
+error:
+	pgraph->destroy_context(chan);
+	return ret;
 }
 
 void
 nvc0_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct nvc0_graph_chan *grch;
+
+	grch = chan->pgraph_ctx;
+	chan->pgraph_ctx = NULL;
+	if (!grch)
+		return;
+
+	nouveau_gpuobj_ref(NULL, &grch->mmio);
+	nouveau_gpuobj_ref(NULL, &grch->unk418810);
+	nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+	nouveau_gpuobj_ref(NULL, &grch->unk408004);
+	nouveau_gpuobj_ref(NULL, &grch->grctx);
+	chan->ramin_grctx = NULL;
 }
 
 int
 nvc0_graph_load_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+
+	nv_wr32(dev, 0x409840, 0x00000030);
+	nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+	nv_wr32(dev, 0x409504, 0x00000003);
+	if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+		NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+	return 0;
+}
+
+static int
+nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+	nv_wr32(dev, 0x409840, 0x00000003);
+	nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+	nv_wr32(dev, 0x409504, 0x00000009);
+	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+		return -EBUSY;
+	}
+
 	return 0;
 }
 
 int
 nvc0_graph_unload_context(struct drm_device *dev)
 {
-	return 0;
+	u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+	return nvc0_graph_unload_context_to(dev, inst);
+}
+
+static void
+nvc0_graph_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv;
+
+	priv = pgraph->priv;
+	if (!priv)
+		return;
+
+	nouveau_irq_unregister(dev, 12);
+
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+	if (priv->grctx_vals)
+		kfree(priv->grctx_vals);
+	kfree(priv);
 }
 
 void
 nvc0_graph_takedown(struct drm_device *dev)
 {
+	nvc0_graph_destroy(dev);
+}
+
+static int
+nvc0_graph_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv;
+	int ret, gpc, i;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	pgraph->priv = priv;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+	if (ret)
+		goto error;
+
+	for (i = 0; i < 0x1000; i += 4) {
+		nv_wo32(priv->unk4188b4, i, 0x00000010);
+		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	}
+
+	priv->gpc_nr  =  nv_rd32(dev, 0x409604) & 0x0000001f;
+	priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+		priv->tp_total += priv->tp_nr[gpc];
+	}
+
+	/*XXX: these need figuring out... */
+	switch (dev_priv->chipset) {
+	case 0xc0:
+		if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
+			priv->magic_not_rop_nr = 0x07;
+			/* filled values up to tp_total, the rest 0 */
+			priv->magicgpc980[0]   = 0x22111000;
+			priv->magicgpc980[1]   = 0x00000233;
+			priv->magicgpc980[2]   = 0x00000000;
+			priv->magicgpc980[3]   = 0x00000000;
+			priv->magicgpc918      = 0x000ba2e9;
+		} else
+		if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
+			priv->magic_not_rop_nr = 0x05;
+			priv->magicgpc980[0]   = 0x11110000;
+			priv->magicgpc980[1]   = 0x00233222;
+			priv->magicgpc980[2]   = 0x00000000;
+			priv->magicgpc980[3]   = 0x00000000;
+			priv->magicgpc918      = 0x00092493;
+		} else
+		if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
+			priv->magic_not_rop_nr = 0x06;
+			priv->magicgpc980[0]   = 0x11110000;
+			priv->magicgpc980[1]   = 0x03332222;
+			priv->magicgpc980[2]   = 0x00000000;
+			priv->magicgpc980[3]   = 0x00000000;
+			priv->magicgpc918      = 0x00088889;
+		}
+		break;
+	case 0xc3: /* 450, 4/0/0/0, 2 */
+		priv->magic_not_rop_nr = 0x03;
+		priv->magicgpc980[0]   = 0x00003210;
+		priv->magicgpc980[1]   = 0x00000000;
+		priv->magicgpc980[2]   = 0x00000000;
+		priv->magicgpc980[3]   = 0x00000000;
+		priv->magicgpc918      = 0x00200000;
+		break;
+	case 0xc4: /* 460, 3/4/0/0, 4 */
+		priv->magic_not_rop_nr = 0x01;
+		priv->magicgpc980[0]   = 0x02321100;
+		priv->magicgpc980[1]   = 0x00000000;
+		priv->magicgpc980[2]   = 0x00000000;
+		priv->magicgpc980[3]   = 0x00000000;
+		priv->magicgpc918      = 0x00124925;
+		break;
+	}
+
+	if (!priv->magic_not_rop_nr) {
+		NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+			 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
+			 priv->tp_nr[3], priv->rop_nr);
+		/* use 0xc3's values... */
+		priv->magic_not_rop_nr = 0x03;
+		priv->magicgpc980[0]   = 0x00003210;
+		priv->magicgpc980[1]   = 0x00000000;
+		priv->magicgpc980[2]   = 0x00000000;
+		priv->magicgpc980[3]   = 0x00000000;
+		priv->magicgpc918      = 0x00200000;
+	}
+
+	nouveau_irq_register(dev, 12, nvc0_graph_isr);
+	NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
+	NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+	NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+	NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
+	return 0;
+
+error:
+	nvc0_graph_destroy(dev);
+	return ret;
+}
+
+static void
+nvc0_graph_init_obj418880(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv = pgraph->priv;
+	int i;
+
+	nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+	nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+	nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+	nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x400080, 0x003083c2);
+	nv_wr32(dev, 0x400088, 0x00006fe7);
+	nv_wr32(dev, 0x40008c, 0x00000000);
+	nv_wr32(dev, 0x400090, 0x00000030);
+	nv_wr32(dev, 0x40013c, 0x013901f7);
+	nv_wr32(dev, 0x400140, 0x00000100);
+	nv_wr32(dev, 0x400144, 0x00000000);
+	nv_wr32(dev, 0x400148, 0x00000110);
+	nv_wr32(dev, 0x400138, 0x00000000);
+	nv_wr32(dev, 0x400130, 0x00000000);
+	nv_wr32(dev, 0x400134, 0x00000000);
+	nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	int gpc;
+	
+	//      TP      ROP UNKVAL(magic_not_rop_nr)
+	// 450: 4/0/0/0 2        3
+	// 460: 3/4/0/0 4        1
+	// 465: 3/4/4/0 4        7
+	// 470: 3/3/4/4 5        5
+	// 480: 3/4/4/4 6        6
+
+	// magicgpc918
+	// 450: 00200000 00000000001000000000000000000000
+	// 460: 00124925 00000000000100100100100100100101
+	// 465: 000ba2e9 00000000000010111010001011101001
+	// 470: 00092493 00000000000010010010010010010011
+	// 480: 00088889 00000000000010001000100010001001
+
+	/* filled values up to tp_total, remainder 0 */
+	// 450: 00003210 00000000 00000000 00000000
+	// 460: 02321100 00000000 00000000 00000000
+	// 465: 22111000 00000233 00000000 00000000
+	// 470: 11110000 00233222 00000000 00000000
+	// 480: 11110000 03332222 00000000 00000000
+	
+	nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
+	nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
+	nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
+	nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+						  priv->tp_nr[gpc]);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
+	}
+
+	nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+	nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
+}
+
+static void
+nvc0_graph_init_units(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x409c24, 0x000f0000);
+	nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
+	nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
+	nv_wr32(dev, 0x408030, 0xc0000000);
+	nv_wr32(dev, 0x40601c, 0xc0000000);
+	nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
+	nv_wr32(dev, 0x406018, 0xc0000000);
+	nv_wr32(dev, 0x405840, 0xc0000000);
+	nv_wr32(dev, 0x405844, 0x00ffffff);
+	nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+	nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	int gpc, tp;
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
+		}
+		nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+	}
+}
+
+static void
+nvc0_graph_init_rop(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	int rop;
+
+	for (rop = 0; rop < priv->rop_nr; rop++) {
+		nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+	}
+}
+
+static int
+nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
+		 const char *code_fw, const char *data_fw)
+{
+	const struct firmware *fw;
+	char name[32];
+	int ret, i;
+
+	snprintf(name, sizeof(name), "nouveau/%s", data_fw);
+	ret = request_firmware(&fw, name, &dev->pdev->dev);
+	if (ret) {
+		NV_ERROR(dev, "failed to load %s\n", data_fw);
+		return ret;
+	}
+
+	nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+	for (i = 0; i < fw->size / 4; i++)
+		nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
+	release_firmware(fw);
+
+	snprintf(name, sizeof(name), "nouveau/%s", code_fw);
+	ret = request_firmware(&fw, name, &dev->pdev->dev);
+	if (ret) {
+		NV_ERROR(dev, "failed to load %s\n", code_fw);
+		return ret;
+	}
+
+	nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+	for (i = 0; i < fw->size / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+		nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
+	}
+	release_firmware(fw);
+
+	return 0;
+}
+
+static int
+nvc0_graph_init_ctxctl(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	u32 r000260;
+	int ret;
+
+	/* load fuc microcode */
+	r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+	ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
+	if (ret == 0)
+		ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
+	nv_wr32(dev, 0x000260, r000260);
+
+	if (ret)
+		return ret;
+
+	/* start both of them running */
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x41a10c, 0x00000000);
+	nv_wr32(dev, 0x40910c, 0x00000000);
+	nv_wr32(dev, 0x41a100, 0x00000002);
+	nv_wr32(dev, 0x409100, 0x00000002);
+	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+		NV_INFO(dev, "0x409800 wait failed\n");
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x7fffffff);
+	nv_wr32(dev, 0x409504, 0x00000021);
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x00000000);
+	nv_wr32(dev, 0x409504, 0x00000010);
+	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+		NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+		return -EBUSY;
+	}
+	priv->grctx_size = nv_rd32(dev, 0x409800);
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x00000000);
+	nv_wr32(dev, 0x409504, 0x00000016);
+	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+		NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+		return -EBUSY;
+	}
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x00000000);
+	nv_wr32(dev, 0x409504, 0x00000025);
+	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+		NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+		return -EBUSY;
+	}
+
+	return 0;
 }
 
 int
 nvc0_graph_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv;
+	int ret;
+
 	dev_priv->engine.graph.accel_blocked = true;
+
+	switch (dev_priv->chipset) {
+	case 0xc0:
+	case 0xc3:
+	case 0xc4:
+		break;
+	default:
+		NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+		if (nouveau_noaccel != 0)
+			return 0;
+		break;
+	}
+
+	nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+	nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+	if (!pgraph->priv) {
+		ret = nvc0_graph_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = pgraph->priv;
+
+	nvc0_graph_init_obj418880(dev);
+	nvc0_graph_init_regs(dev);
+	//nvc0_graph_init_unitplemented_magics(dev);
+	nvc0_graph_init_gpc_0(dev);
+	//nvc0_graph_init_unitplemented_c242(dev);
+
+	nv_wr32(dev, 0x400500, 0x00010001);
+	nv_wr32(dev, 0x400100, 0xffffffff);
+	nv_wr32(dev, 0x40013c, 0xffffffff);
+
+	nvc0_graph_init_units(dev);
+	nvc0_graph_init_gpc_1(dev);
+	nvc0_graph_init_rop(dev);
+
+	nv_wr32(dev, 0x400108, 0xffffffff);
+	nv_wr32(dev, 0x400138, 0xffffffff);
+	nv_wr32(dev, 0x400118, 0xffffffff);
+	nv_wr32(dev, 0x400130, 0xffffffff);
+	nv_wr32(dev, 0x40011c, 0xffffffff);
+	nv_wr32(dev, 0x400134, 0xffffffff);
+	nv_wr32(dev, 0x400054, 0x34ce3464);
+
+	ret = nvc0_graph_init_ctxctl(dev);
+	if (ret == 0)
+		dev_priv->engine.graph.accel_blocked = false;
 	return 0;
 }
 
+static int
+nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin)
+			continue;
+
+		if (inst == chan->ramin->vinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nvc0_graph_isr(struct drm_device *dev)
+{
+	u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+	u32 chid = nvc0_graph_isr_chid(dev, inst);
+	u32 stat = nv_rd32(dev, 0x400100);
+	u32 addr = nv_rd32(dev, 0x400704);
+	u32 mthd = (addr & 0x00003ffc);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 data = nv_rd32(dev, 0x400708);
+	u32 code = nv_rd32(dev, 0x400110);
+	u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+	if (stat & 0x00000010) {
+		NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
+			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, inst, subc, class, mthd, data);
+		nv_wr32(dev, 0x400100, 0x00000010);
+		stat &= ~0x00000010;
+	}
+
+	if (stat & 0x00000020) {
+		NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, inst, subc, class, mthd, data);
+		nv_wr32(dev, 0x400100, 0x00000020);
+		stat &= ~0x00000020;
+	}
+
+	if (stat & 0x00100000) {
+		NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+		nouveau_enum_print(nv50_data_error_names, code);
+		printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+		       "mthd 0x%04x data 0x%08x\n",
+		       chid, inst, subc, class, mthd, data);
+		nv_wr32(dev, 0x400100, 0x00100000);
+		stat &= ~0x00100000;
+	}
+
+	if (stat & 0x00200000) {
+		u32 trap = nv_rd32(dev, 0x400108);
+		NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
+		nv_wr32(dev, 0x400108, trap);
+		nv_wr32(dev, 0x400100, 0x00200000);
+		stat &= ~0x00200000;
+	}
+
+	if (stat & 0x00080000) {
+		u32 ustat = nv_rd32(dev, 0x409c18);
+
+		NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
+
+		nv_wr32(dev, 0x409c20, ustat);
+		nv_wr32(dev, 0x400100, 0x00080000);
+		stat &= ~0x00080000;
+	}
+
+	if (stat) {
+		NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+		nv_wr32(dev, 0x400100, stat);
+	}
+
+	nv_wr32(dev, 0x400500, 0x00010001);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
new file mode 100644
index 0000000..40e26f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TP_MAX 32
+
+#define ROP_BCAST(r)   (0x408800 + (r))
+#define ROP_UNIT(u,r)  (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r)   (0x418000 + (r))
+#define GPC_UNIT(t,r)  (0x500000 + (t) * 0x8000 + (r))
+#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_priv {
+	u8 gpc_nr;
+	u8 rop_nr;
+	u8 tp_nr[GPC_MAX];
+	u8 tp_total;
+
+	u32  grctx_size;
+	u32 *grctx_vals;
+	struct nouveau_gpuobj *unk4188b4;
+	struct nouveau_gpuobj *unk4188b8;
+
+	u8  magic_not_rop_nr;
+	u32 magicgpc980[4];
+	u32 magicgpc918;
+};
+
+struct nvc0_graph_chan {
+	struct nouveau_gpuobj *grctx;
+	struct nouveau_gpuobj *unk408004; // 0x418810 too
+	struct nouveau_gpuobj *unk40800c; // 0x419004 too
+	struct nouveau_gpuobj *unk418810; // 0x419848 too
+	struct nouveau_gpuobj *mmio;
+	int mmio_nr;
+};
+
+int nvc0_grctx_generate(struct nouveau_channel *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
new file mode 100644
index 0000000..b9e68b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -0,0 +1,2874 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+	nv_wr32(dev, 0x400204, data);
+	nv_wr32(dev, 0x400200, icmd);
+	while (nv_rd32(dev, 0x400700) & 2) {}
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+	nv_wr32(dev, 0x40448c, data);
+	nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nvc0_grctx_generate_9097(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
+	nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
+	nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
+	nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
+	nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
+	nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
+	nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
+	nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
+	nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
+	nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
+	nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
+	nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
+	nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
+	nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
+	nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
+	nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
+	nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
+	nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
+	nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
+	nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
+	nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
+	nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
+	nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
+	nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
+	nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
+	nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
+	nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
+	nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
+	nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
+	nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
+	nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
+	nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
+	nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
+	nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
+	nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
+	nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
+	nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
+	nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
+	nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
+	nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
+	nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
+	nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
+	nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
+	nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
+	nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
+	nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
+	nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
+	nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
+	nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
+	nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
+	nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
+	nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
+	nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
+	nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
+	nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
+	nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
+	nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
+	nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
+	nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
+	nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
+	nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
+	nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
+	nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
+	/* in trace, right after 0x90c0, not here */
+	nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_902d(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+	nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+	nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+	nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+	nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+	nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+	nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+	nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+	nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+	nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+	nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+	nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+	nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
+	nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
+	nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
+	nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct drm_device *dev)
+{
+	int i;
+
+	nv_wr32(dev, 0x404004, 0x00000000);
+	nv_wr32(dev, 0x404008, 0x00000000);
+	nv_wr32(dev, 0x40400c, 0x00000000);
+	nv_wr32(dev, 0x404010, 0x00000000);
+	nv_wr32(dev, 0x404014, 0x00000000);
+	nv_wr32(dev, 0x404018, 0x00000000);
+	nv_wr32(dev, 0x40401c, 0x00000000);
+	nv_wr32(dev, 0x404020, 0x00000000);
+	nv_wr32(dev, 0x404024, 0x00000000);
+	nv_wr32(dev, 0x404028, 0x00000000);
+	nv_wr32(dev, 0x40402c, 0x00000000);
+	nv_wr32(dev, 0x404044, 0x00000000);
+	nv_wr32(dev, 0x404094, 0x00000000);
+	nv_wr32(dev, 0x404098, 0x00000000);
+	nv_wr32(dev, 0x40409c, 0x00000000);
+	nv_wr32(dev, 0x4040a0, 0x00000000);
+	nv_wr32(dev, 0x4040a4, 0x00000000);
+	nv_wr32(dev, 0x4040a8, 0x00000000);
+	nv_wr32(dev, 0x4040ac, 0x00000000);
+	nv_wr32(dev, 0x4040b0, 0x00000000);
+	nv_wr32(dev, 0x4040b4, 0x00000000);
+	nv_wr32(dev, 0x4040b8, 0x00000000);
+	nv_wr32(dev, 0x4040bc, 0x00000000);
+	nv_wr32(dev, 0x4040c0, 0x00000000);
+	nv_wr32(dev, 0x4040c4, 0x00000000);
+	nv_wr32(dev, 0x4040c8, 0xf0000087);
+	nv_wr32(dev, 0x4040d4, 0x00000000);
+	nv_wr32(dev, 0x4040d8, 0x00000000);
+	nv_wr32(dev, 0x4040dc, 0x00000000);
+	nv_wr32(dev, 0x4040e0, 0x00000000);
+	nv_wr32(dev, 0x4040e4, 0x00000000);
+	nv_wr32(dev, 0x4040e8, 0x00001000);
+	nv_wr32(dev, 0x4040f8, 0x00000000);
+	nv_wr32(dev, 0x404130, 0x00000000);
+	nv_wr32(dev, 0x404134, 0x00000000);
+	nv_wr32(dev, 0x404138, 0x20000040);
+	nv_wr32(dev, 0x404150, 0x0000002e);
+	nv_wr32(dev, 0x404154, 0x00000400);
+	nv_wr32(dev, 0x404158, 0x00000200);
+	nv_wr32(dev, 0x404164, 0x00000055);
+	nv_wr32(dev, 0x404168, 0x00000000);
+	nv_wr32(dev, 0x404174, 0x00000000);
+	nv_wr32(dev, 0x404178, 0x00000000);
+	nv_wr32(dev, 0x40417c, 0x00000000);
+	for (i = 0; i < 8; i++)
+		nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x404404, 0x00000000);
+	nv_wr32(dev, 0x404408, 0x00000000);
+	nv_wr32(dev, 0x40440c, 0x00000000);
+	nv_wr32(dev, 0x404410, 0x00000000);
+	nv_wr32(dev, 0x404414, 0x00000000);
+	nv_wr32(dev, 0x404418, 0x00000000);
+	nv_wr32(dev, 0x40441c, 0x00000000);
+	nv_wr32(dev, 0x404420, 0x00000000);
+	nv_wr32(dev, 0x404424, 0x00000000);
+	nv_wr32(dev, 0x404428, 0x00000000);
+	nv_wr32(dev, 0x40442c, 0x00000000);
+	nv_wr32(dev, 0x404430, 0x00000000);
+	nv_wr32(dev, 0x404434, 0x00000000);
+	nv_wr32(dev, 0x404438, 0x00000000);
+	nv_wr32(dev, 0x404460, 0x00000000);
+	nv_wr32(dev, 0x404464, 0x00000000);
+	nv_wr32(dev, 0x404468, 0x00ffffff);
+	nv_wr32(dev, 0x40446c, 0x00000000);
+	nv_wr32(dev, 0x404480, 0x00000001);
+	nv_wr32(dev, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x404604, 0x00000015);
+	nv_wr32(dev, 0x404608, 0x00000000);
+	nv_wr32(dev, 0x40460c, 0x00002e00);
+	nv_wr32(dev, 0x404610, 0x00000100);
+	nv_wr32(dev, 0x404618, 0x00000000);
+	nv_wr32(dev, 0x40461c, 0x00000000);
+	nv_wr32(dev, 0x404620, 0x00000000);
+	nv_wr32(dev, 0x404624, 0x00000000);
+	nv_wr32(dev, 0x404628, 0x00000000);
+	nv_wr32(dev, 0x40462c, 0x00000000);
+	nv_wr32(dev, 0x404630, 0x00000000);
+	nv_wr32(dev, 0x404634, 0x00000000);
+	nv_wr32(dev, 0x404638, 0x00000004);
+	nv_wr32(dev, 0x40463c, 0x00000000);
+	nv_wr32(dev, 0x404640, 0x00000000);
+	nv_wr32(dev, 0x404644, 0x00000000);
+	nv_wr32(dev, 0x404648, 0x00000000);
+	nv_wr32(dev, 0x40464c, 0x00000000);
+	nv_wr32(dev, 0x404650, 0x00000000);
+	nv_wr32(dev, 0x404654, 0x00000000);
+	nv_wr32(dev, 0x404658, 0x00000000);
+	nv_wr32(dev, 0x40465c, 0x007f0100);
+	nv_wr32(dev, 0x404660, 0x00000000);
+	nv_wr32(dev, 0x404664, 0x00000000);
+	nv_wr32(dev, 0x404668, 0x00000000);
+	nv_wr32(dev, 0x40466c, 0x00000000);
+	nv_wr32(dev, 0x404670, 0x00000000);
+	nv_wr32(dev, 0x404674, 0x00000000);
+	nv_wr32(dev, 0x404678, 0x00000000);
+	nv_wr32(dev, 0x40467c, 0x00000002);
+	nv_wr32(dev, 0x404680, 0x00000000);
+	nv_wr32(dev, 0x404684, 0x00000000);
+	nv_wr32(dev, 0x404688, 0x00000000);
+	nv_wr32(dev, 0x40468c, 0x00000000);
+	nv_wr32(dev, 0x404690, 0x00000000);
+	nv_wr32(dev, 0x404694, 0x00000000);
+	nv_wr32(dev, 0x404698, 0x00000000);
+	nv_wr32(dev, 0x40469c, 0x00000000);
+	nv_wr32(dev, 0x4046a0, 0x007f0080);
+	nv_wr32(dev, 0x4046a4, 0x00000000);
+	nv_wr32(dev, 0x4046a8, 0x00000000);
+	nv_wr32(dev, 0x4046ac, 0x00000000);
+	nv_wr32(dev, 0x4046b0, 0x00000000);
+	nv_wr32(dev, 0x4046b4, 0x00000000);
+	nv_wr32(dev, 0x4046b8, 0x00000000);
+	nv_wr32(dev, 0x4046bc, 0x00000000);
+	nv_wr32(dev, 0x4046c0, 0x00000000);
+	nv_wr32(dev, 0x4046c4, 0x00000000);
+	nv_wr32(dev, 0x4046c8, 0x00000000);
+	nv_wr32(dev, 0x4046cc, 0x00000000);
+	nv_wr32(dev, 0x4046d0, 0x00000000);
+	nv_wr32(dev, 0x4046d4, 0x00000000);
+	nv_wr32(dev, 0x4046d8, 0x00000000);
+	nv_wr32(dev, 0x4046dc, 0x00000000);
+	nv_wr32(dev, 0x4046e0, 0x00000000);
+	nv_wr32(dev, 0x4046e4, 0x00000000);
+	nv_wr32(dev, 0x4046e8, 0x00000000);
+	nv_wr32(dev, 0x4046f0, 0x00000000);
+	nv_wr32(dev, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x404700, 0x00000000);
+	nv_wr32(dev, 0x404704, 0x00000000);
+	nv_wr32(dev, 0x404708, 0x00000000);
+	nv_wr32(dev, 0x40470c, 0x00000000);
+	nv_wr32(dev, 0x404710, 0x00000000);
+	nv_wr32(dev, 0x404714, 0x00000000);
+	nv_wr32(dev, 0x404718, 0x00000000);
+	nv_wr32(dev, 0x40471c, 0x00000000);
+	nv_wr32(dev, 0x404720, 0x00000000);
+	nv_wr32(dev, 0x404724, 0x00000000);
+	nv_wr32(dev, 0x404728, 0x00000000);
+	nv_wr32(dev, 0x40472c, 0x00000000);
+	nv_wr32(dev, 0x404730, 0x00000000);
+	nv_wr32(dev, 0x404734, 0x00000100);
+	nv_wr32(dev, 0x404738, 0x00000000);
+	nv_wr32(dev, 0x40473c, 0x00000000);
+	nv_wr32(dev, 0x404740, 0x00000000);
+	nv_wr32(dev, 0x404744, 0x00000000);
+	nv_wr32(dev, 0x404748, 0x00000000);
+	nv_wr32(dev, 0x40474c, 0x00000000);
+	nv_wr32(dev, 0x404750, 0x00000000);
+	nv_wr32(dev, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x405800, 0x078000bf);
+	nv_wr32(dev, 0x405830, 0x02180000);
+	nv_wr32(dev, 0x405834, 0x00000000);
+	nv_wr32(dev, 0x405838, 0x00000000);
+	nv_wr32(dev, 0x405854, 0x00000000);
+	nv_wr32(dev, 0x405870, 0x00000001);
+	nv_wr32(dev, 0x405874, 0x00000001);
+	nv_wr32(dev, 0x405878, 0x00000001);
+	nv_wr32(dev, 0x40587c, 0x00000001);
+	nv_wr32(dev, 0x405a00, 0x00000000);
+	nv_wr32(dev, 0x405a04, 0x00000000);
+	nv_wr32(dev, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x406020, 0x000103c1);
+	nv_wr32(dev, 0x406028, 0x00000001);
+	nv_wr32(dev, 0x40602c, 0x00000001);
+	nv_wr32(dev, 0x406030, 0x00000001);
+	nv_wr32(dev, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x4064a8, 0x00000000);
+	nv_wr32(dev, 0x4064ac, 0x00003fff);
+	nv_wr32(dev, 0x4064b4, 0x00000000);
+	nv_wr32(dev, 0x4064b8, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x407804, 0x00000023);
+	nv_wr32(dev, 0x40780c, 0x0a418820);
+	nv_wr32(dev, 0x407810, 0x062080e6);
+	nv_wr32(dev, 0x407814, 0x020398a4);
+	nv_wr32(dev, 0x407818, 0x0e629062);
+	nv_wr32(dev, 0x40781c, 0x0a418820);
+	nv_wr32(dev, 0x407820, 0x000000e6);
+	nv_wr32(dev, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x408000, 0x00000000);
+	nv_wr32(dev, 0x408004, 0x00000000);
+	nv_wr32(dev, 0x408008, 0x00000018);
+	nv_wr32(dev, 0x40800c, 0x00000000);
+	nv_wr32(dev, 0x408010, 0x00000000);
+	nv_wr32(dev, 0x408014, 0x00000069);
+	nv_wr32(dev, 0x408018, 0xe100e100);
+	nv_wr32(dev, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	// ROPC_BROADCAST
+	nv_wr32(dev, 0x408800, 0x02802a3c);
+	nv_wr32(dev, 0x408804, 0x00000040);
+	nv_wr32(dev, 0x408808, 0x0003e00d);
+	switch (dev_priv->chipset) {
+	case 0xc0:
+		nv_wr32(dev, 0x408900, 0x0080b801);
+		break;
+	case 0xc3:
+	case 0xc4:
+		nv_wr32(dev, 0x408900, 0x3080b801);
+		break;
+	}
+	nv_wr32(dev, 0x408904, 0x02000001);
+	nv_wr32(dev, 0x408908, 0x00c80929);
+	nv_wr32(dev, 0x40890c, 0x00000000);
+	nv_wr32(dev, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct drm_device *dev)
+{
+	int i;
+
+	// GPC_BROADCAST
+	nv_wr32(dev, 0x418380, 0x00000016);
+	nv_wr32(dev, 0x418400, 0x38004e00);
+	nv_wr32(dev, 0x418404, 0x71e0ffff);
+	nv_wr32(dev, 0x418408, 0x00000000);
+	nv_wr32(dev, 0x41840c, 0x00001008);
+	nv_wr32(dev, 0x418410, 0x0fff0fff);
+	nv_wr32(dev, 0x418414, 0x00200fff);
+	nv_wr32(dev, 0x418450, 0x00000000);
+	nv_wr32(dev, 0x418454, 0x00000000);
+	nv_wr32(dev, 0x418458, 0x00000000);
+	nv_wr32(dev, 0x41845c, 0x00000000);
+	nv_wr32(dev, 0x418460, 0x00000000);
+	nv_wr32(dev, 0x418464, 0x00000000);
+	nv_wr32(dev, 0x418468, 0x00000001);
+	nv_wr32(dev, 0x41846c, 0x00000000);
+	nv_wr32(dev, 0x418470, 0x00000000);
+	nv_wr32(dev, 0x418600, 0x0000001f);
+	nv_wr32(dev, 0x418684, 0x0000000f);
+	nv_wr32(dev, 0x418700, 0x00000002);
+	nv_wr32(dev, 0x418704, 0x00000080);
+	nv_wr32(dev, 0x418708, 0x00000000);
+	nv_wr32(dev, 0x41870c, 0x07c80000);
+	nv_wr32(dev, 0x418710, 0x00000000);
+	nv_wr32(dev, 0x418800, 0x0006860a);
+	nv_wr32(dev, 0x418808, 0x00000000);
+	nv_wr32(dev, 0x41880c, 0x00000000);
+	nv_wr32(dev, 0x418810, 0x00000000);
+	nv_wr32(dev, 0x418828, 0x00008442);
+	nv_wr32(dev, 0x418830, 0x00000001);
+	nv_wr32(dev, 0x4188d8, 0x00000008);
+	nv_wr32(dev, 0x4188e0, 0x01000000);
+	nv_wr32(dev, 0x4188e8, 0x00000000);
+	nv_wr32(dev, 0x4188ec, 0x00000000);
+	nv_wr32(dev, 0x4188f0, 0x00000000);
+	nv_wr32(dev, 0x4188f4, 0x00000000);
+	nv_wr32(dev, 0x4188f8, 0x00000000);
+	nv_wr32(dev, 0x4188fc, 0x00100000);
+	nv_wr32(dev, 0x41891c, 0x00ff00ff);
+	nv_wr32(dev, 0x418924, 0x00000000);
+	nv_wr32(dev, 0x418928, 0x00ffff00);
+	nv_wr32(dev, 0x41892c, 0x0000ff00);
+	for (i = 0; i < 8; i++) {
+		nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
+		nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
+	}
+	nv_wr32(dev, 0x418b00, 0x00000000);
+	nv_wr32(dev, 0x418b08, 0x0a418820);
+	nv_wr32(dev, 0x418b0c, 0x062080e6);
+	nv_wr32(dev, 0x418b10, 0x020398a4);
+	nv_wr32(dev, 0x418b14, 0x0e629062);
+	nv_wr32(dev, 0x418b18, 0x0a418820);
+	nv_wr32(dev, 0x418b1c, 0x000000e6);
+	nv_wr32(dev, 0x418bb8, 0x00000103);
+	nv_wr32(dev, 0x418c08, 0x00000001);
+	nv_wr32(dev, 0x418c10, 0x00000000);
+	nv_wr32(dev, 0x418c14, 0x00000000);
+	nv_wr32(dev, 0x418c18, 0x00000000);
+	nv_wr32(dev, 0x418c1c, 0x00000000);
+	nv_wr32(dev, 0x418c20, 0x00000000);
+	nv_wr32(dev, 0x418c24, 0x00000000);
+	nv_wr32(dev, 0x418c28, 0x00000000);
+	nv_wr32(dev, 0x418c2c, 0x00000000);
+	nv_wr32(dev, 0x418c80, 0x20200004);
+	nv_wr32(dev, 0x418c8c, 0x00000001);
+	nv_wr32(dev, 0x419000, 0x00000780);
+	nv_wr32(dev, 0x419004, 0x00000000);
+	nv_wr32(dev, 0x419008, 0x00000000);
+	nv_wr32(dev, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	// GPC_BROADCAST.TP_BROADCAST
+	nv_wr32(dev, 0x419848, 0x00000000);
+	nv_wr32(dev, 0x419864, 0x0000012a);
+	nv_wr32(dev, 0x419888, 0x00000000);
+	nv_wr32(dev, 0x419a00, 0x000001f0);
+	nv_wr32(dev, 0x419a04, 0x00000001);
+	nv_wr32(dev, 0x419a08, 0x00000023);
+	nv_wr32(dev, 0x419a0c, 0x00020000);
+	nv_wr32(dev, 0x419a10, 0x00000000);
+	nv_wr32(dev, 0x419a14, 0x00000200);
+	nv_wr32(dev, 0x419a1c, 0x00000000);
+	nv_wr32(dev, 0x419a20, 0x00000800);
+	if (dev_priv->chipset != 0xc0)
+		nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3
+	nv_wr32(dev, 0x419b00, 0x0a418820);
+	nv_wr32(dev, 0x419b04, 0x062080e6);
+	nv_wr32(dev, 0x419b08, 0x020398a4);
+	nv_wr32(dev, 0x419b0c, 0x0e629062);
+	nv_wr32(dev, 0x419b10, 0x0a418820);
+	nv_wr32(dev, 0x419b14, 0x000000e6);
+	nv_wr32(dev, 0x419bd0, 0x00900103);
+	nv_wr32(dev, 0x419be0, 0x00000001);
+	nv_wr32(dev, 0x419be4, 0x00000000);
+	nv_wr32(dev, 0x419c00, 0x00000002);
+	nv_wr32(dev, 0x419c04, 0x00000006);
+	nv_wr32(dev, 0x419c08, 0x00000002);
+	nv_wr32(dev, 0x419c20, 0x00000000);
+	nv_wr32(dev, 0x419cbc, 0x28137606);
+	nv_wr32(dev, 0x419ce8, 0x00000000);
+	nv_wr32(dev, 0x419cf4, 0x00000183);
+	nv_wr32(dev, 0x419d20, 0x02180000);
+	nv_wr32(dev, 0x419d24, 0x00001fff);
+	nv_wr32(dev, 0x419e04, 0x00000000);
+	nv_wr32(dev, 0x419e08, 0x00000000);
+	nv_wr32(dev, 0x419e0c, 0x00000000);
+	nv_wr32(dev, 0x419e10, 0x00000002);
+	nv_wr32(dev, 0x419e44, 0x001beff2);
+	nv_wr32(dev, 0x419e48, 0x00000000);
+	nv_wr32(dev, 0x419e4c, 0x0000000f);
+	nv_wr32(dev, 0x419e50, 0x00000000);
+	nv_wr32(dev, 0x419e54, 0x00000000);
+	nv_wr32(dev, 0x419e58, 0x00000000);
+	nv_wr32(dev, 0x419e5c, 0x00000000);
+	nv_wr32(dev, 0x419e60, 0x00000000);
+	nv_wr32(dev, 0x419e64, 0x00000000);
+	nv_wr32(dev, 0x419e68, 0x00000000);
+	nv_wr32(dev, 0x419e6c, 0x00000000);
+	nv_wr32(dev, 0x419e70, 0x00000000);
+	nv_wr32(dev, 0x419e74, 0x00000000);
+	nv_wr32(dev, 0x419e78, 0x00000000);
+	nv_wr32(dev, 0x419e7c, 0x00000000);
+	nv_wr32(dev, 0x419e80, 0x00000000);
+	nv_wr32(dev, 0x419e84, 0x00000000);
+	nv_wr32(dev, 0x419e88, 0x00000000);
+	nv_wr32(dev, 0x419e8c, 0x00000000);
+	nv_wr32(dev, 0x419e90, 0x00000000);
+	nv_wr32(dev, 0x419e98, 0x00000000);
+	if (dev_priv->chipset != 0xc0)
+		nv_wr32(dev, 0x419ee0, 0x00011110);
+	nv_wr32(dev, 0x419f50, 0x00000000);
+	nv_wr32(dev, 0x419f54, 0x00000000);
+	if (dev_priv->chipset != 0xc0)
+		nv_wr32(dev, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+	struct drm_device *dev = chan->dev;
+	int i, gpc, tp, id;
+	u32 r000260, tmp;
+
+	r000260 = nv_rd32(dev, 0x000260);
+	nv_wr32(dev, 0x000260, r000260 & ~1);
+	nv_wr32(dev, 0x400208, 0x00000000);
+
+	nvc0_grctx_generate_dispatch(dev);
+	nvc0_grctx_generate_macro(dev);
+	nvc0_grctx_generate_m2mf(dev);
+	nvc0_grctx_generate_unk47xx(dev);
+	nvc0_grctx_generate_shaders(dev);
+	nvc0_grctx_generate_unk60xx(dev);
+	nvc0_grctx_generate_unk64xx(dev);
+	nvc0_grctx_generate_tpbus(dev);
+	nvc0_grctx_generate_ccache(dev);
+	nvc0_grctx_generate_rop(dev);
+	nvc0_grctx_generate_gpc(dev);
+	nvc0_grctx_generate_tp(dev);
+
+	nv_wr32(dev, 0x404154, 0x00000000);
+
+	/* fuc "mmio list" writes */
+	for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+		u32 reg = nv_ro32(grch->mmio, i + 0);
+		nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
+	}
+
+	for (tp = 0, id = 0; tp < 4; tp++) {
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			if (tp <= priv->tp_nr[gpc]) {
+				nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
+				nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
+				nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
+				nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
+				id++;
+			}
+
+			nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
+			nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
+		}
+	}
+
+	tmp = 0;
+	for (i = 0; i < priv->gpc_nr; i++)
+		tmp |= priv->tp_nr[i] << (i * 4);
+	nv_wr32(dev, 0x406028, tmp);
+	nv_wr32(dev, 0x405870, tmp);
+
+	nv_wr32(dev, 0x40602c, 0x00000000);
+	nv_wr32(dev, 0x405874, 0x00000000);
+	nv_wr32(dev, 0x406030, 0x00000000);
+	nv_wr32(dev, 0x405878, 0x00000000);
+	nv_wr32(dev, 0x406034, 0x00000000);
+	nv_wr32(dev, 0x40587c, 0x00000000);
+
+	if (1) {
+		const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+		u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
+		u8 tpnr[GPC_MAX];
+		u8 data[32];
+
+		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+		memset(data, 0x1f, sizeof(data));
+
+		gpc = -1;
+		for (tp = 0; tp < priv->tp_total; tp++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpnr[gpc]);
+			tpnr[gpc]--;
+			data[tp] = gpc;
+		}
+
+		for (i = 0; i < max / 4; i++)
+			nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+	}
+
+	if (1) {
+		u32 data[6] = {}, data2[2] = {};
+		u8 tpnr[GPC_MAX];
+		u8 shift, ntpcv;
+
+		/* calculate first set of magics */
+		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+
+		for (tp = 0; tp < priv->tp_total; tp++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpnr[gpc]);
+			tpnr[gpc]--;
+
+			data[tp / 6] |= gpc << ((tp % 6) * 5);
+		}
+
+		for (; tp < 32; tp++)
+			data[tp / 6] |= 7 << ((tp % 6) * 5);
+
+		/* and the second... */
+		shift = 0;
+		ntpcv = priv->tp_total;
+		while (!(ntpcv & (1 << 4))) {
+			ntpcv <<= 1;
+			shift++;
+		}
+
+		data2[0]  = (ntpcv << 16);
+		data2[0] |= (shift << 21);
+		data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+		for (i = 1; i < 7; i++)
+			data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+		// GPC_BROADCAST
+		nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+		// GPC_BROADCAST.TP_BROADCAST
+		nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
+				       priv->magic_not_rop_nr |
+				       data2[0]);
+		nv_wr32(dev, 0x419be4, data2[1]);
+		for (i = 0; i < 6; i++)
+			nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
+
+		// UNK78xx
+		nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+	}
+
+	if (1) {
+		u32 tp_mask = 0, tp_set = 0;
+		u8  tpnr[GPC_MAX];
+
+		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+			tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
+
+		gpc = -1;
+		for (i = 0, gpc = -1; i < 32; i++) {
+			int ltp = i * (priv->tp_total - 1) / 32;
+			
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpnr[gpc]);
+			tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+			tp_set |= 1 << ((gpc * 8) + tp);
+
+			do {
+				nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+				tp_set ^= tp_mask;
+				nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
+				tp_set ^= tp_mask;
+			} while (ltp == (++i * (priv->tp_total - 1) / 32));
+			i--;
+		}
+	}
+
+	nv_wr32(dev, 0x400208, 0x80000000);
+
+	nv_icmd(dev, 0x00001000, 0x00000004);
+	nv_icmd(dev, 0x000000a9, 0x0000ffff);
+	nv_icmd(dev, 0x00000038, 0x0fac6881);
+	nv_icmd(dev, 0x0000003d, 0x00000001);
+	nv_icmd(dev, 0x000000e8, 0x00000400);
+	nv_icmd(dev, 0x000000e9, 0x00000400);
+	nv_icmd(dev, 0x000000ea, 0x00000400);
+	nv_icmd(dev, 0x000000eb, 0x00000400);
+	nv_icmd(dev, 0x000000ec, 0x00000400);
+	nv_icmd(dev, 0x000000ed, 0x00000400);
+	nv_icmd(dev, 0x000000ee, 0x00000400);
+	nv_icmd(dev, 0x000000ef, 0x00000400);
+	nv_icmd(dev, 0x00000078, 0x00000300);
+	nv_icmd(dev, 0x00000079, 0x00000300);
+	nv_icmd(dev, 0x0000007a, 0x00000300);
+	nv_icmd(dev, 0x0000007b, 0x00000300);
+	nv_icmd(dev, 0x0000007c, 0x00000300);
+	nv_icmd(dev, 0x0000007d, 0x00000300);
+	nv_icmd(dev, 0x0000007e, 0x00000300);
+	nv_icmd(dev, 0x0000007f, 0x00000300);
+	nv_icmd(dev, 0x00000050, 0x00000011);
+	nv_icmd(dev, 0x00000058, 0x00000008);
+	nv_icmd(dev, 0x00000059, 0x00000008);
+	nv_icmd(dev, 0x0000005a, 0x00000008);
+	nv_icmd(dev, 0x0000005b, 0x00000008);
+	nv_icmd(dev, 0x0000005c, 0x00000008);
+	nv_icmd(dev, 0x0000005d, 0x00000008);
+	nv_icmd(dev, 0x0000005e, 0x00000008);
+	nv_icmd(dev, 0x0000005f, 0x00000008);
+	nv_icmd(dev, 0x00000208, 0x00000001);
+	nv_icmd(dev, 0x00000209, 0x00000001);
+	nv_icmd(dev, 0x0000020a, 0x00000001);
+	nv_icmd(dev, 0x0000020b, 0x00000001);
+	nv_icmd(dev, 0x0000020c, 0x00000001);
+	nv_icmd(dev, 0x0000020d, 0x00000001);
+	nv_icmd(dev, 0x0000020e, 0x00000001);
+	nv_icmd(dev, 0x0000020f, 0x00000001);
+	nv_icmd(dev, 0x00000081, 0x00000001);
+	nv_icmd(dev, 0x00000085, 0x00000004);
+	nv_icmd(dev, 0x00000088, 0x00000400);
+	nv_icmd(dev, 0x00000090, 0x00000300);
+	nv_icmd(dev, 0x00000098, 0x00001001);
+	nv_icmd(dev, 0x000000e3, 0x00000001);
+	nv_icmd(dev, 0x000000da, 0x00000001);
+	nv_icmd(dev, 0x000000f8, 0x00000003);
+	nv_icmd(dev, 0x000000fa, 0x00000001);
+	nv_icmd(dev, 0x0000009f, 0x0000ffff);
+	nv_icmd(dev, 0x000000a0, 0x0000ffff);
+	nv_icmd(dev, 0x000000a1, 0x0000ffff);
+	nv_icmd(dev, 0x000000a2, 0x0000ffff);
+	nv_icmd(dev, 0x000000b1, 0x00000001);
+	nv_icmd(dev, 0x000000b2, 0x00000000);
+	nv_icmd(dev, 0x000000b3, 0x00000000);
+	nv_icmd(dev, 0x000000b4, 0x00000000);
+	nv_icmd(dev, 0x000000b5, 0x00000000);
+	nv_icmd(dev, 0x000000b6, 0x00000000);
+	nv_icmd(dev, 0x000000b7, 0x00000000);
+	nv_icmd(dev, 0x000000b8, 0x00000000);
+	nv_icmd(dev, 0x000000b9, 0x00000000);
+	nv_icmd(dev, 0x000000ba, 0x00000000);
+	nv_icmd(dev, 0x000000bb, 0x00000000);
+	nv_icmd(dev, 0x000000bc, 0x00000000);
+	nv_icmd(dev, 0x000000bd, 0x00000000);
+	nv_icmd(dev, 0x000000be, 0x00000000);
+	nv_icmd(dev, 0x000000bf, 0x00000000);
+	nv_icmd(dev, 0x000000c0, 0x00000000);
+	nv_icmd(dev, 0x000000c1, 0x00000000);
+	nv_icmd(dev, 0x000000c2, 0x00000000);
+	nv_icmd(dev, 0x000000c3, 0x00000000);
+	nv_icmd(dev, 0x000000c4, 0x00000000);
+	nv_icmd(dev, 0x000000c5, 0x00000000);
+	nv_icmd(dev, 0x000000c6, 0x00000000);
+	nv_icmd(dev, 0x000000c7, 0x00000000);
+	nv_icmd(dev, 0x000000c8, 0x00000000);
+	nv_icmd(dev, 0x000000c9, 0x00000000);
+	nv_icmd(dev, 0x000000ca, 0x00000000);
+	nv_icmd(dev, 0x000000cb, 0x00000000);
+	nv_icmd(dev, 0x000000cc, 0x00000000);
+	nv_icmd(dev, 0x000000cd, 0x00000000);
+	nv_icmd(dev, 0x000000ce, 0x00000000);
+	nv_icmd(dev, 0x000000cf, 0x00000000);
+	nv_icmd(dev, 0x000000d0, 0x00000000);
+	nv_icmd(dev, 0x000000d1, 0x00000000);
+	nv_icmd(dev, 0x000000d2, 0x00000000);
+	nv_icmd(dev, 0x000000d3, 0x00000000);
+	nv_icmd(dev, 0x000000d4, 0x00000000);
+	nv_icmd(dev, 0x000000d5, 0x00000000);
+	nv_icmd(dev, 0x000000d6, 0x00000000);
+	nv_icmd(dev, 0x000000d7, 0x00000000);
+	nv_icmd(dev, 0x000000d8, 0x00000000);
+	nv_icmd(dev, 0x000000d9, 0x00000000);
+	nv_icmd(dev, 0x00000210, 0x00000040);
+	nv_icmd(dev, 0x00000211, 0x00000040);
+	nv_icmd(dev, 0x00000212, 0x00000040);
+	nv_icmd(dev, 0x00000213, 0x00000040);
+	nv_icmd(dev, 0x00000214, 0x00000040);
+	nv_icmd(dev, 0x00000215, 0x00000040);
+	nv_icmd(dev, 0x00000216, 0x00000040);
+	nv_icmd(dev, 0x00000217, 0x00000040);
+	nv_icmd(dev, 0x00000218, 0x0000c080);
+	nv_icmd(dev, 0x00000219, 0x0000c080);
+	nv_icmd(dev, 0x0000021a, 0x0000c080);
+	nv_icmd(dev, 0x0000021b, 0x0000c080);
+	nv_icmd(dev, 0x0000021c, 0x0000c080);
+	nv_icmd(dev, 0x0000021d, 0x0000c080);
+	nv_icmd(dev, 0x0000021e, 0x0000c080);
+	nv_icmd(dev, 0x0000021f, 0x0000c080);
+	nv_icmd(dev, 0x000000ad, 0x0000013e);
+	nv_icmd(dev, 0x000000e1, 0x00000010);
+	nv_icmd(dev, 0x00000290, 0x00000000);
+	nv_icmd(dev, 0x00000291, 0x00000000);
+	nv_icmd(dev, 0x00000292, 0x00000000);
+	nv_icmd(dev, 0x00000293, 0x00000000);
+	nv_icmd(dev, 0x00000294, 0x00000000);
+	nv_icmd(dev, 0x00000295, 0x00000000);
+	nv_icmd(dev, 0x00000296, 0x00000000);
+	nv_icmd(dev, 0x00000297, 0x00000000);
+	nv_icmd(dev, 0x00000298, 0x00000000);
+	nv_icmd(dev, 0x00000299, 0x00000000);
+	nv_icmd(dev, 0x0000029a, 0x00000000);
+	nv_icmd(dev, 0x0000029b, 0x00000000);
+	nv_icmd(dev, 0x0000029c, 0x00000000);
+	nv_icmd(dev, 0x0000029d, 0x00000000);
+	nv_icmd(dev, 0x0000029e, 0x00000000);
+	nv_icmd(dev, 0x0000029f, 0x00000000);
+	nv_icmd(dev, 0x000003b0, 0x00000000);
+	nv_icmd(dev, 0x000003b1, 0x00000000);
+	nv_icmd(dev, 0x000003b2, 0x00000000);
+	nv_icmd(dev, 0x000003b3, 0x00000000);
+	nv_icmd(dev, 0x000003b4, 0x00000000);
+	nv_icmd(dev, 0x000003b5, 0x00000000);
+	nv_icmd(dev, 0x000003b6, 0x00000000);
+	nv_icmd(dev, 0x000003b7, 0x00000000);
+	nv_icmd(dev, 0x000003b8, 0x00000000);
+	nv_icmd(dev, 0x000003b9, 0x00000000);
+	nv_icmd(dev, 0x000003ba, 0x00000000);
+	nv_icmd(dev, 0x000003bb, 0x00000000);
+	nv_icmd(dev, 0x000003bc, 0x00000000);
+	nv_icmd(dev, 0x000003bd, 0x00000000);
+	nv_icmd(dev, 0x000003be, 0x00000000);
+	nv_icmd(dev, 0x000003bf, 0x00000000);
+	nv_icmd(dev, 0x000002a0, 0x00000000);
+	nv_icmd(dev, 0x000002a1, 0x00000000);
+	nv_icmd(dev, 0x000002a2, 0x00000000);
+	nv_icmd(dev, 0x000002a3, 0x00000000);
+	nv_icmd(dev, 0x000002a4, 0x00000000);
+	nv_icmd(dev, 0x000002a5, 0x00000000);
+	nv_icmd(dev, 0x000002a6, 0x00000000);
+	nv_icmd(dev, 0x000002a7, 0x00000000);
+	nv_icmd(dev, 0x000002a8, 0x00000000);
+	nv_icmd(dev, 0x000002a9, 0x00000000);
+	nv_icmd(dev, 0x000002aa, 0x00000000);
+	nv_icmd(dev, 0x000002ab, 0x00000000);
+	nv_icmd(dev, 0x000002ac, 0x00000000);
+	nv_icmd(dev, 0x000002ad, 0x00000000);
+	nv_icmd(dev, 0x000002ae, 0x00000000);
+	nv_icmd(dev, 0x000002af, 0x00000000);
+	nv_icmd(dev, 0x00000420, 0x00000000);
+	nv_icmd(dev, 0x00000421, 0x00000000);
+	nv_icmd(dev, 0x00000422, 0x00000000);
+	nv_icmd(dev, 0x00000423, 0x00000000);
+	nv_icmd(dev, 0x00000424, 0x00000000);
+	nv_icmd(dev, 0x00000425, 0x00000000);
+	nv_icmd(dev, 0x00000426, 0x00000000);
+	nv_icmd(dev, 0x00000427, 0x00000000);
+	nv_icmd(dev, 0x00000428, 0x00000000);
+	nv_icmd(dev, 0x00000429, 0x00000000);
+	nv_icmd(dev, 0x0000042a, 0x00000000);
+	nv_icmd(dev, 0x0000042b, 0x00000000);
+	nv_icmd(dev, 0x0000042c, 0x00000000);
+	nv_icmd(dev, 0x0000042d, 0x00000000);
+	nv_icmd(dev, 0x0000042e, 0x00000000);
+	nv_icmd(dev, 0x0000042f, 0x00000000);
+	nv_icmd(dev, 0x000002b0, 0x00000000);
+	nv_icmd(dev, 0x000002b1, 0x00000000);
+	nv_icmd(dev, 0x000002b2, 0x00000000);
+	nv_icmd(dev, 0x000002b3, 0x00000000);
+	nv_icmd(dev, 0x000002b4, 0x00000000);
+	nv_icmd(dev, 0x000002b5, 0x00000000);
+	nv_icmd(dev, 0x000002b6, 0x00000000);
+	nv_icmd(dev, 0x000002b7, 0x00000000);
+	nv_icmd(dev, 0x000002b8, 0x00000000);
+	nv_icmd(dev, 0x000002b9, 0x00000000);
+	nv_icmd(dev, 0x000002ba, 0x00000000);
+	nv_icmd(dev, 0x000002bb, 0x00000000);
+	nv_icmd(dev, 0x000002bc, 0x00000000);
+	nv_icmd(dev, 0x000002bd, 0x00000000);
+	nv_icmd(dev, 0x000002be, 0x00000000);
+	nv_icmd(dev, 0x000002bf, 0x00000000);
+	nv_icmd(dev, 0x00000430, 0x00000000);
+	nv_icmd(dev, 0x00000431, 0x00000000);
+	nv_icmd(dev, 0x00000432, 0x00000000);
+	nv_icmd(dev, 0x00000433, 0x00000000);
+	nv_icmd(dev, 0x00000434, 0x00000000);
+	nv_icmd(dev, 0x00000435, 0x00000000);
+	nv_icmd(dev, 0x00000436, 0x00000000);
+	nv_icmd(dev, 0x00000437, 0x00000000);
+	nv_icmd(dev, 0x00000438, 0x00000000);
+	nv_icmd(dev, 0x00000439, 0x00000000);
+	nv_icmd(dev, 0x0000043a, 0x00000000);
+	nv_icmd(dev, 0x0000043b, 0x00000000);
+	nv_icmd(dev, 0x0000043c, 0x00000000);
+	nv_icmd(dev, 0x0000043d, 0x00000000);
+	nv_icmd(dev, 0x0000043e, 0x00000000);
+	nv_icmd(dev, 0x0000043f, 0x00000000);
+	nv_icmd(dev, 0x000002c0, 0x00000000);
+	nv_icmd(dev, 0x000002c1, 0x00000000);
+	nv_icmd(dev, 0x000002c2, 0x00000000);
+	nv_icmd(dev, 0x000002c3, 0x00000000);
+	nv_icmd(dev, 0x000002c4, 0x00000000);
+	nv_icmd(dev, 0x000002c5, 0x00000000);
+	nv_icmd(dev, 0x000002c6, 0x00000000);
+	nv_icmd(dev, 0x000002c7, 0x00000000);
+	nv_icmd(dev, 0x000002c8, 0x00000000);
+	nv_icmd(dev, 0x000002c9, 0x00000000);
+	nv_icmd(dev, 0x000002ca, 0x00000000);
+	nv_icmd(dev, 0x000002cb, 0x00000000);
+	nv_icmd(dev, 0x000002cc, 0x00000000);
+	nv_icmd(dev, 0x000002cd, 0x00000000);
+	nv_icmd(dev, 0x000002ce, 0x00000000);
+	nv_icmd(dev, 0x000002cf, 0x00000000);
+	nv_icmd(dev, 0x000004d0, 0x00000000);
+	nv_icmd(dev, 0x000004d1, 0x00000000);
+	nv_icmd(dev, 0x000004d2, 0x00000000);
+	nv_icmd(dev, 0x000004d3, 0x00000000);
+	nv_icmd(dev, 0x000004d4, 0x00000000);
+	nv_icmd(dev, 0x000004d5, 0x00000000);
+	nv_icmd(dev, 0x000004d6, 0x00000000);
+	nv_icmd(dev, 0x000004d7, 0x00000000);
+	nv_icmd(dev, 0x000004d8, 0x00000000);
+	nv_icmd(dev, 0x000004d9, 0x00000000);
+	nv_icmd(dev, 0x000004da, 0x00000000);
+	nv_icmd(dev, 0x000004db, 0x00000000);
+	nv_icmd(dev, 0x000004dc, 0x00000000);
+	nv_icmd(dev, 0x000004dd, 0x00000000);
+	nv_icmd(dev, 0x000004de, 0x00000000);
+	nv_icmd(dev, 0x000004df, 0x00000000);
+	nv_icmd(dev, 0x00000720, 0x00000000);
+	nv_icmd(dev, 0x00000721, 0x00000000);
+	nv_icmd(dev, 0x00000722, 0x00000000);
+	nv_icmd(dev, 0x00000723, 0x00000000);
+	nv_icmd(dev, 0x00000724, 0x00000000);
+	nv_icmd(dev, 0x00000725, 0x00000000);
+	nv_icmd(dev, 0x00000726, 0x00000000);
+	nv_icmd(dev, 0x00000727, 0x00000000);
+	nv_icmd(dev, 0x00000728, 0x00000000);
+	nv_icmd(dev, 0x00000729, 0x00000000);
+	nv_icmd(dev, 0x0000072a, 0x00000000);
+	nv_icmd(dev, 0x0000072b, 0x00000000);
+	nv_icmd(dev, 0x0000072c, 0x00000000);
+	nv_icmd(dev, 0x0000072d, 0x00000000);
+	nv_icmd(dev, 0x0000072e, 0x00000000);
+	nv_icmd(dev, 0x0000072f, 0x00000000);
+	nv_icmd(dev, 0x000008c0, 0x00000000);
+	nv_icmd(dev, 0x000008c1, 0x00000000);
+	nv_icmd(dev, 0x000008c2, 0x00000000);
+	nv_icmd(dev, 0x000008c3, 0x00000000);
+	nv_icmd(dev, 0x000008c4, 0x00000000);
+	nv_icmd(dev, 0x000008c5, 0x00000000);
+	nv_icmd(dev, 0x000008c6, 0x00000000);
+	nv_icmd(dev, 0x000008c7, 0x00000000);
+	nv_icmd(dev, 0x000008c8, 0x00000000);
+	nv_icmd(dev, 0x000008c9, 0x00000000);
+	nv_icmd(dev, 0x000008ca, 0x00000000);
+	nv_icmd(dev, 0x000008cb, 0x00000000);
+	nv_icmd(dev, 0x000008cc, 0x00000000);
+	nv_icmd(dev, 0x000008cd, 0x00000000);
+	nv_icmd(dev, 0x000008ce, 0x00000000);
+	nv_icmd(dev, 0x000008cf, 0x00000000);
+	nv_icmd(dev, 0x00000890, 0x00000000);
+	nv_icmd(dev, 0x00000891, 0x00000000);
+	nv_icmd(dev, 0x00000892, 0x00000000);
+	nv_icmd(dev, 0x00000893, 0x00000000);
+	nv_icmd(dev, 0x00000894, 0x00000000);
+	nv_icmd(dev, 0x00000895, 0x00000000);
+	nv_icmd(dev, 0x00000896, 0x00000000);
+	nv_icmd(dev, 0x00000897, 0x00000000);
+	nv_icmd(dev, 0x00000898, 0x00000000);
+	nv_icmd(dev, 0x00000899, 0x00000000);
+	nv_icmd(dev, 0x0000089a, 0x00000000);
+	nv_icmd(dev, 0x0000089b, 0x00000000);
+	nv_icmd(dev, 0x0000089c, 0x00000000);
+	nv_icmd(dev, 0x0000089d, 0x00000000);
+	nv_icmd(dev, 0x0000089e, 0x00000000);
+	nv_icmd(dev, 0x0000089f, 0x00000000);
+	nv_icmd(dev, 0x000008e0, 0x00000000);
+	nv_icmd(dev, 0x000008e1, 0x00000000);
+	nv_icmd(dev, 0x000008e2, 0x00000000);
+	nv_icmd(dev, 0x000008e3, 0x00000000);
+	nv_icmd(dev, 0x000008e4, 0x00000000);
+	nv_icmd(dev, 0x000008e5, 0x00000000);
+	nv_icmd(dev, 0x000008e6, 0x00000000);
+	nv_icmd(dev, 0x000008e7, 0x00000000);
+	nv_icmd(dev, 0x000008e8, 0x00000000);
+	nv_icmd(dev, 0x000008e9, 0x00000000);
+	nv_icmd(dev, 0x000008ea, 0x00000000);
+	nv_icmd(dev, 0x000008eb, 0x00000000);
+	nv_icmd(dev, 0x000008ec, 0x00000000);
+	nv_icmd(dev, 0x000008ed, 0x00000000);
+	nv_icmd(dev, 0x000008ee, 0x00000000);
+	nv_icmd(dev, 0x000008ef, 0x00000000);
+	nv_icmd(dev, 0x000008a0, 0x00000000);
+	nv_icmd(dev, 0x000008a1, 0x00000000);
+	nv_icmd(dev, 0x000008a2, 0x00000000);
+	nv_icmd(dev, 0x000008a3, 0x00000000);
+	nv_icmd(dev, 0x000008a4, 0x00000000);
+	nv_icmd(dev, 0x000008a5, 0x00000000);
+	nv_icmd(dev, 0x000008a6, 0x00000000);
+	nv_icmd(dev, 0x000008a7, 0x00000000);
+	nv_icmd(dev, 0x000008a8, 0x00000000);
+	nv_icmd(dev, 0x000008a9, 0x00000000);
+	nv_icmd(dev, 0x000008aa, 0x00000000);
+	nv_icmd(dev, 0x000008ab, 0x00000000);
+	nv_icmd(dev, 0x000008ac, 0x00000000);
+	nv_icmd(dev, 0x000008ad, 0x00000000);
+	nv_icmd(dev, 0x000008ae, 0x00000000);
+	nv_icmd(dev, 0x000008af, 0x00000000);
+	nv_icmd(dev, 0x000008f0, 0x00000000);
+	nv_icmd(dev, 0x000008f1, 0x00000000);
+	nv_icmd(dev, 0x000008f2, 0x00000000);
+	nv_icmd(dev, 0x000008f3, 0x00000000);
+	nv_icmd(dev, 0x000008f4, 0x00000000);
+	nv_icmd(dev, 0x000008f5, 0x00000000);
+	nv_icmd(dev, 0x000008f6, 0x00000000);
+	nv_icmd(dev, 0x000008f7, 0x00000000);
+	nv_icmd(dev, 0x000008f8, 0x00000000);
+	nv_icmd(dev, 0x000008f9, 0x00000000);
+	nv_icmd(dev, 0x000008fa, 0x00000000);
+	nv_icmd(dev, 0x000008fb, 0x00000000);
+	nv_icmd(dev, 0x000008fc, 0x00000000);
+	nv_icmd(dev, 0x000008fd, 0x00000000);
+	nv_icmd(dev, 0x000008fe, 0x00000000);
+	nv_icmd(dev, 0x000008ff, 0x00000000);
+	nv_icmd(dev, 0x0000094c, 0x000000ff);
+	nv_icmd(dev, 0x0000094d, 0xffffffff);
+	nv_icmd(dev, 0x0000094e, 0x00000002);
+	nv_icmd(dev, 0x000002ec, 0x00000001);
+	nv_icmd(dev, 0x00000303, 0x00000001);
+	nv_icmd(dev, 0x000002e6, 0x00000001);
+	nv_icmd(dev, 0x00000466, 0x00000052);
+	nv_icmd(dev, 0x00000301, 0x3f800000);
+	nv_icmd(dev, 0x00000304, 0x30201000);
+	nv_icmd(dev, 0x00000305, 0x70605040);
+	nv_icmd(dev, 0x00000306, 0xb8a89888);
+	nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
+	nv_icmd(dev, 0x0000030a, 0x00ffff00);
+	nv_icmd(dev, 0x0000030b, 0x0000001a);
+	nv_icmd(dev, 0x0000030c, 0x00000001);
+	nv_icmd(dev, 0x00000318, 0x00000001);
+	nv_icmd(dev, 0x00000340, 0x00000000);
+	nv_icmd(dev, 0x00000375, 0x00000001);
+	nv_icmd(dev, 0x00000351, 0x00000100);
+	nv_icmd(dev, 0x0000037d, 0x00000006);
+	nv_icmd(dev, 0x000003a0, 0x00000002);
+	nv_icmd(dev, 0x000003aa, 0x00000001);
+	nv_icmd(dev, 0x000003a9, 0x00000001);
+	nv_icmd(dev, 0x00000380, 0x00000001);
+	nv_icmd(dev, 0x00000360, 0x00000040);
+	nv_icmd(dev, 0x00000366, 0x00000000);
+	nv_icmd(dev, 0x00000367, 0x00000000);
+	nv_icmd(dev, 0x00000368, 0x00001fff);
+	nv_icmd(dev, 0x00000370, 0x00000000);
+	nv_icmd(dev, 0x00000371, 0x00000000);
+	nv_icmd(dev, 0x00000372, 0x003fffff);
+	nv_icmd(dev, 0x0000037a, 0x00000012);
+	nv_icmd(dev, 0x000005e0, 0x00000022);
+	nv_icmd(dev, 0x000005e1, 0x00000022);
+	nv_icmd(dev, 0x000005e2, 0x00000022);
+	nv_icmd(dev, 0x000005e3, 0x00000022);
+	nv_icmd(dev, 0x000005e4, 0x00000022);
+	nv_icmd(dev, 0x00000619, 0x00000003);
+	nv_icmd(dev, 0x00000811, 0x00000003);
+	nv_icmd(dev, 0x00000812, 0x00000004);
+	nv_icmd(dev, 0x00000813, 0x00000006);
+	nv_icmd(dev, 0x00000814, 0x00000008);
+	nv_icmd(dev, 0x00000815, 0x0000000b);
+	nv_icmd(dev, 0x00000800, 0x00000001);
+	nv_icmd(dev, 0x00000801, 0x00000001);
+	nv_icmd(dev, 0x00000802, 0x00000001);
+	nv_icmd(dev, 0x00000803, 0x00000001);
+	nv_icmd(dev, 0x00000804, 0x00000001);
+	nv_icmd(dev, 0x00000805, 0x00000001);
+	nv_icmd(dev, 0x00000632, 0x00000001);
+	nv_icmd(dev, 0x00000633, 0x00000002);
+	nv_icmd(dev, 0x00000634, 0x00000003);
+	nv_icmd(dev, 0x00000635, 0x00000004);
+	nv_icmd(dev, 0x00000654, 0x3f800000);
+	nv_icmd(dev, 0x00000657, 0x3f800000);
+	nv_icmd(dev, 0x00000655, 0x3f800000);
+	nv_icmd(dev, 0x00000656, 0x3f800000);
+	nv_icmd(dev, 0x000006cd, 0x3f800000);
+	nv_icmd(dev, 0x000007f5, 0x3f800000);
+	nv_icmd(dev, 0x000007dc, 0x39291909);
+	nv_icmd(dev, 0x000007dd, 0x79695949);
+	nv_icmd(dev, 0x000007de, 0xb9a99989);
+	nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
+	nv_icmd(dev, 0x000007e8, 0x00003210);
+	nv_icmd(dev, 0x000007e9, 0x00007654);
+	nv_icmd(dev, 0x000007ea, 0x00000098);
+	nv_icmd(dev, 0x000007ec, 0x39291909);
+	nv_icmd(dev, 0x000007ed, 0x79695949);
+	nv_icmd(dev, 0x000007ee, 0xb9a99989);
+	nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
+	nv_icmd(dev, 0x000007f0, 0x00003210);
+	nv_icmd(dev, 0x000007f1, 0x00007654);
+	nv_icmd(dev, 0x000007f2, 0x00000098);
+	nv_icmd(dev, 0x000005a5, 0x00000001);
+	nv_icmd(dev, 0x00000980, 0x00000000);
+	nv_icmd(dev, 0x00000981, 0x00000000);
+	nv_icmd(dev, 0x00000982, 0x00000000);
+	nv_icmd(dev, 0x00000983, 0x00000000);
+	nv_icmd(dev, 0x00000984, 0x00000000);
+	nv_icmd(dev, 0x00000985, 0x00000000);
+	nv_icmd(dev, 0x00000986, 0x00000000);
+	nv_icmd(dev, 0x00000987, 0x00000000);
+	nv_icmd(dev, 0x00000988, 0x00000000);
+	nv_icmd(dev, 0x00000989, 0x00000000);
+	nv_icmd(dev, 0x0000098a, 0x00000000);
+	nv_icmd(dev, 0x0000098b, 0x00000000);
+	nv_icmd(dev, 0x0000098c, 0x00000000);
+	nv_icmd(dev, 0x0000098d, 0x00000000);
+	nv_icmd(dev, 0x0000098e, 0x00000000);
+	nv_icmd(dev, 0x0000098f, 0x00000000);
+	nv_icmd(dev, 0x00000990, 0x00000000);
+	nv_icmd(dev, 0x00000991, 0x00000000);
+	nv_icmd(dev, 0x00000992, 0x00000000);
+	nv_icmd(dev, 0x00000993, 0x00000000);
+	nv_icmd(dev, 0x00000994, 0x00000000);
+	nv_icmd(dev, 0x00000995, 0x00000000);
+	nv_icmd(dev, 0x00000996, 0x00000000);
+	nv_icmd(dev, 0x00000997, 0x00000000);
+	nv_icmd(dev, 0x00000998, 0x00000000);
+	nv_icmd(dev, 0x00000999, 0x00000000);
+	nv_icmd(dev, 0x0000099a, 0x00000000);
+	nv_icmd(dev, 0x0000099b, 0x00000000);
+	nv_icmd(dev, 0x0000099c, 0x00000000);
+	nv_icmd(dev, 0x0000099d, 0x00000000);
+	nv_icmd(dev, 0x0000099e, 0x00000000);
+	nv_icmd(dev, 0x0000099f, 0x00000000);
+	nv_icmd(dev, 0x000009a0, 0x00000000);
+	nv_icmd(dev, 0x000009a1, 0x00000000);
+	nv_icmd(dev, 0x000009a2, 0x00000000);
+	nv_icmd(dev, 0x000009a3, 0x00000000);
+	nv_icmd(dev, 0x000009a4, 0x00000000);
+	nv_icmd(dev, 0x000009a5, 0x00000000);
+	nv_icmd(dev, 0x000009a6, 0x00000000);
+	nv_icmd(dev, 0x000009a7, 0x00000000);
+	nv_icmd(dev, 0x000009a8, 0x00000000);
+	nv_icmd(dev, 0x000009a9, 0x00000000);
+	nv_icmd(dev, 0x000009aa, 0x00000000);
+	nv_icmd(dev, 0x000009ab, 0x00000000);
+	nv_icmd(dev, 0x000009ac, 0x00000000);
+	nv_icmd(dev, 0x000009ad, 0x00000000);
+	nv_icmd(dev, 0x000009ae, 0x00000000);
+	nv_icmd(dev, 0x000009af, 0x00000000);
+	nv_icmd(dev, 0x000009b0, 0x00000000);
+	nv_icmd(dev, 0x000009b1, 0x00000000);
+	nv_icmd(dev, 0x000009b2, 0x00000000);
+	nv_icmd(dev, 0x000009b3, 0x00000000);
+	nv_icmd(dev, 0x000009b4, 0x00000000);
+	nv_icmd(dev, 0x000009b5, 0x00000000);
+	nv_icmd(dev, 0x000009b6, 0x00000000);
+	nv_icmd(dev, 0x000009b7, 0x00000000);
+	nv_icmd(dev, 0x000009b8, 0x00000000);
+	nv_icmd(dev, 0x000009b9, 0x00000000);
+	nv_icmd(dev, 0x000009ba, 0x00000000);
+	nv_icmd(dev, 0x000009bb, 0x00000000);
+	nv_icmd(dev, 0x000009bc, 0x00000000);
+	nv_icmd(dev, 0x000009bd, 0x00000000);
+	nv_icmd(dev, 0x000009be, 0x00000000);
+	nv_icmd(dev, 0x000009bf, 0x00000000);
+	nv_icmd(dev, 0x000009c0, 0x00000000);
+	nv_icmd(dev, 0x000009c1, 0x00000000);
+	nv_icmd(dev, 0x000009c2, 0x00000000);
+	nv_icmd(dev, 0x000009c3, 0x00000000);
+	nv_icmd(dev, 0x000009c4, 0x00000000);
+	nv_icmd(dev, 0x000009c5, 0x00000000);
+	nv_icmd(dev, 0x000009c6, 0x00000000);
+	nv_icmd(dev, 0x000009c7, 0x00000000);
+	nv_icmd(dev, 0x000009c8, 0x00000000);
+	nv_icmd(dev, 0x000009c9, 0x00000000);
+	nv_icmd(dev, 0x000009ca, 0x00000000);
+	nv_icmd(dev, 0x000009cb, 0x00000000);
+	nv_icmd(dev, 0x000009cc, 0x00000000);
+	nv_icmd(dev, 0x000009cd, 0x00000000);
+	nv_icmd(dev, 0x000009ce, 0x00000000);
+	nv_icmd(dev, 0x000009cf, 0x00000000);
+	nv_icmd(dev, 0x000009d0, 0x00000000);
+	nv_icmd(dev, 0x000009d1, 0x00000000);
+	nv_icmd(dev, 0x000009d2, 0x00000000);
+	nv_icmd(dev, 0x000009d3, 0x00000000);
+	nv_icmd(dev, 0x000009d4, 0x00000000);
+	nv_icmd(dev, 0x000009d5, 0x00000000);
+	nv_icmd(dev, 0x000009d6, 0x00000000);
+	nv_icmd(dev, 0x000009d7, 0x00000000);
+	nv_icmd(dev, 0x000009d8, 0x00000000);
+	nv_icmd(dev, 0x000009d9, 0x00000000);
+	nv_icmd(dev, 0x000009da, 0x00000000);
+	nv_icmd(dev, 0x000009db, 0x00000000);
+	nv_icmd(dev, 0x000009dc, 0x00000000);
+	nv_icmd(dev, 0x000009dd, 0x00000000);
+	nv_icmd(dev, 0x000009de, 0x00000000);
+	nv_icmd(dev, 0x000009df, 0x00000000);
+	nv_icmd(dev, 0x000009e0, 0x00000000);
+	nv_icmd(dev, 0x000009e1, 0x00000000);
+	nv_icmd(dev, 0x000009e2, 0x00000000);
+	nv_icmd(dev, 0x000009e3, 0x00000000);
+	nv_icmd(dev, 0x000009e4, 0x00000000);
+	nv_icmd(dev, 0x000009e5, 0x00000000);
+	nv_icmd(dev, 0x000009e6, 0x00000000);
+	nv_icmd(dev, 0x000009e7, 0x00000000);
+	nv_icmd(dev, 0x000009e8, 0x00000000);
+	nv_icmd(dev, 0x000009e9, 0x00000000);
+	nv_icmd(dev, 0x000009ea, 0x00000000);
+	nv_icmd(dev, 0x000009eb, 0x00000000);
+	nv_icmd(dev, 0x000009ec, 0x00000000);
+	nv_icmd(dev, 0x000009ed, 0x00000000);
+	nv_icmd(dev, 0x000009ee, 0x00000000);
+	nv_icmd(dev, 0x000009ef, 0x00000000);
+	nv_icmd(dev, 0x000009f0, 0x00000000);
+	nv_icmd(dev, 0x000009f1, 0x00000000);
+	nv_icmd(dev, 0x000009f2, 0x00000000);
+	nv_icmd(dev, 0x000009f3, 0x00000000);
+	nv_icmd(dev, 0x000009f4, 0x00000000);
+	nv_icmd(dev, 0x000009f5, 0x00000000);
+	nv_icmd(dev, 0x000009f6, 0x00000000);
+	nv_icmd(dev, 0x000009f7, 0x00000000);
+	nv_icmd(dev, 0x000009f8, 0x00000000);
+	nv_icmd(dev, 0x000009f9, 0x00000000);
+	nv_icmd(dev, 0x000009fa, 0x00000000);
+	nv_icmd(dev, 0x000009fb, 0x00000000);
+	nv_icmd(dev, 0x000009fc, 0x00000000);
+	nv_icmd(dev, 0x000009fd, 0x00000000);
+	nv_icmd(dev, 0x000009fe, 0x00000000);
+	nv_icmd(dev, 0x000009ff, 0x00000000);
+	nv_icmd(dev, 0x00000468, 0x00000004);
+	nv_icmd(dev, 0x0000046c, 0x00000001);
+	nv_icmd(dev, 0x00000470, 0x00000000);
+	nv_icmd(dev, 0x00000471, 0x00000000);
+	nv_icmd(dev, 0x00000472, 0x00000000);
+	nv_icmd(dev, 0x00000473, 0x00000000);
+	nv_icmd(dev, 0x00000474, 0x00000000);
+	nv_icmd(dev, 0x00000475, 0x00000000);
+	nv_icmd(dev, 0x00000476, 0x00000000);
+	nv_icmd(dev, 0x00000477, 0x00000000);
+	nv_icmd(dev, 0x00000478, 0x00000000);
+	nv_icmd(dev, 0x00000479, 0x00000000);
+	nv_icmd(dev, 0x0000047a, 0x00000000);
+	nv_icmd(dev, 0x0000047b, 0x00000000);
+	nv_icmd(dev, 0x0000047c, 0x00000000);
+	nv_icmd(dev, 0x0000047d, 0x00000000);
+	nv_icmd(dev, 0x0000047e, 0x00000000);
+	nv_icmd(dev, 0x0000047f, 0x00000000);
+	nv_icmd(dev, 0x00000480, 0x00000000);
+	nv_icmd(dev, 0x00000481, 0x00000000);
+	nv_icmd(dev, 0x00000482, 0x00000000);
+	nv_icmd(dev, 0x00000483, 0x00000000);
+	nv_icmd(dev, 0x00000484, 0x00000000);
+	nv_icmd(dev, 0x00000485, 0x00000000);
+	nv_icmd(dev, 0x00000486, 0x00000000);
+	nv_icmd(dev, 0x00000487, 0x00000000);
+	nv_icmd(dev, 0x00000488, 0x00000000);
+	nv_icmd(dev, 0x00000489, 0x00000000);
+	nv_icmd(dev, 0x0000048a, 0x00000000);
+	nv_icmd(dev, 0x0000048b, 0x00000000);
+	nv_icmd(dev, 0x0000048c, 0x00000000);
+	nv_icmd(dev, 0x0000048d, 0x00000000);
+	nv_icmd(dev, 0x0000048e, 0x00000000);
+	nv_icmd(dev, 0x0000048f, 0x00000000);
+	nv_icmd(dev, 0x00000490, 0x00000000);
+	nv_icmd(dev, 0x00000491, 0x00000000);
+	nv_icmd(dev, 0x00000492, 0x00000000);
+	nv_icmd(dev, 0x00000493, 0x00000000);
+	nv_icmd(dev, 0x00000494, 0x00000000);
+	nv_icmd(dev, 0x00000495, 0x00000000);
+	nv_icmd(dev, 0x00000496, 0x00000000);
+	nv_icmd(dev, 0x00000497, 0x00000000);
+	nv_icmd(dev, 0x00000498, 0x00000000);
+	nv_icmd(dev, 0x00000499, 0x00000000);
+	nv_icmd(dev, 0x0000049a, 0x00000000);
+	nv_icmd(dev, 0x0000049b, 0x00000000);
+	nv_icmd(dev, 0x0000049c, 0x00000000);
+	nv_icmd(dev, 0x0000049d, 0x00000000);
+	nv_icmd(dev, 0x0000049e, 0x00000000);
+	nv_icmd(dev, 0x0000049f, 0x00000000);
+	nv_icmd(dev, 0x000004a0, 0x00000000);
+	nv_icmd(dev, 0x000004a1, 0x00000000);
+	nv_icmd(dev, 0x000004a2, 0x00000000);
+	nv_icmd(dev, 0x000004a3, 0x00000000);
+	nv_icmd(dev, 0x000004a4, 0x00000000);
+	nv_icmd(dev, 0x000004a5, 0x00000000);
+	nv_icmd(dev, 0x000004a6, 0x00000000);
+	nv_icmd(dev, 0x000004a7, 0x00000000);
+	nv_icmd(dev, 0x000004a8, 0x00000000);
+	nv_icmd(dev, 0x000004a9, 0x00000000);
+	nv_icmd(dev, 0x000004aa, 0x00000000);
+	nv_icmd(dev, 0x000004ab, 0x00000000);
+	nv_icmd(dev, 0x000004ac, 0x00000000);
+	nv_icmd(dev, 0x000004ad, 0x00000000);
+	nv_icmd(dev, 0x000004ae, 0x00000000);
+	nv_icmd(dev, 0x000004af, 0x00000000);
+	nv_icmd(dev, 0x000004b0, 0x00000000);
+	nv_icmd(dev, 0x000004b1, 0x00000000);
+	nv_icmd(dev, 0x000004b2, 0x00000000);
+	nv_icmd(dev, 0x000004b3, 0x00000000);
+	nv_icmd(dev, 0x000004b4, 0x00000000);
+	nv_icmd(dev, 0x000004b5, 0x00000000);
+	nv_icmd(dev, 0x000004b6, 0x00000000);
+	nv_icmd(dev, 0x000004b7, 0x00000000);
+	nv_icmd(dev, 0x000004b8, 0x00000000);
+	nv_icmd(dev, 0x000004b9, 0x00000000);
+	nv_icmd(dev, 0x000004ba, 0x00000000);
+	nv_icmd(dev, 0x000004bb, 0x00000000);
+	nv_icmd(dev, 0x000004bc, 0x00000000);
+	nv_icmd(dev, 0x000004bd, 0x00000000);
+	nv_icmd(dev, 0x000004be, 0x00000000);
+	nv_icmd(dev, 0x000004bf, 0x00000000);
+	nv_icmd(dev, 0x000004c0, 0x00000000);
+	nv_icmd(dev, 0x000004c1, 0x00000000);
+	nv_icmd(dev, 0x000004c2, 0x00000000);
+	nv_icmd(dev, 0x000004c3, 0x00000000);
+	nv_icmd(dev, 0x000004c4, 0x00000000);
+	nv_icmd(dev, 0x000004c5, 0x00000000);
+	nv_icmd(dev, 0x000004c6, 0x00000000);
+	nv_icmd(dev, 0x000004c7, 0x00000000);
+	nv_icmd(dev, 0x000004c8, 0x00000000);
+	nv_icmd(dev, 0x000004c9, 0x00000000);
+	nv_icmd(dev, 0x000004ca, 0x00000000);
+	nv_icmd(dev, 0x000004cb, 0x00000000);
+	nv_icmd(dev, 0x000004cc, 0x00000000);
+	nv_icmd(dev, 0x000004cd, 0x00000000);
+	nv_icmd(dev, 0x000004ce, 0x00000000);
+	nv_icmd(dev, 0x000004cf, 0x00000000);
+	nv_icmd(dev, 0x00000510, 0x3f800000);
+	nv_icmd(dev, 0x00000511, 0x3f800000);
+	nv_icmd(dev, 0x00000512, 0x3f800000);
+	nv_icmd(dev, 0x00000513, 0x3f800000);
+	nv_icmd(dev, 0x00000514, 0x3f800000);
+	nv_icmd(dev, 0x00000515, 0x3f800000);
+	nv_icmd(dev, 0x00000516, 0x3f800000);
+	nv_icmd(dev, 0x00000517, 0x3f800000);
+	nv_icmd(dev, 0x00000518, 0x3f800000);
+	nv_icmd(dev, 0x00000519, 0x3f800000);
+	nv_icmd(dev, 0x0000051a, 0x3f800000);
+	nv_icmd(dev, 0x0000051b, 0x3f800000);
+	nv_icmd(dev, 0x0000051c, 0x3f800000);
+	nv_icmd(dev, 0x0000051d, 0x3f800000);
+	nv_icmd(dev, 0x0000051e, 0x3f800000);
+	nv_icmd(dev, 0x0000051f, 0x3f800000);
+	nv_icmd(dev, 0x00000520, 0x000002b6);
+	nv_icmd(dev, 0x00000529, 0x00000001);
+	nv_icmd(dev, 0x00000530, 0xffff0000);
+	nv_icmd(dev, 0x00000531, 0xffff0000);
+	nv_icmd(dev, 0x00000532, 0xffff0000);
+	nv_icmd(dev, 0x00000533, 0xffff0000);
+	nv_icmd(dev, 0x00000534, 0xffff0000);
+	nv_icmd(dev, 0x00000535, 0xffff0000);
+	nv_icmd(dev, 0x00000536, 0xffff0000);
+	nv_icmd(dev, 0x00000537, 0xffff0000);
+	nv_icmd(dev, 0x00000538, 0xffff0000);
+	nv_icmd(dev, 0x00000539, 0xffff0000);
+	nv_icmd(dev, 0x0000053a, 0xffff0000);
+	nv_icmd(dev, 0x0000053b, 0xffff0000);
+	nv_icmd(dev, 0x0000053c, 0xffff0000);
+	nv_icmd(dev, 0x0000053d, 0xffff0000);
+	nv_icmd(dev, 0x0000053e, 0xffff0000);
+	nv_icmd(dev, 0x0000053f, 0xffff0000);
+	nv_icmd(dev, 0x00000585, 0x0000003f);
+	nv_icmd(dev, 0x00000576, 0x00000003);
+	nv_icmd(dev, 0x00000586, 0x00000040);
+	nv_icmd(dev, 0x00000582, 0x00000080);
+	nv_icmd(dev, 0x00000583, 0x00000080);
+	nv_icmd(dev, 0x000005c2, 0x00000001);
+	nv_icmd(dev, 0x00000638, 0x00000001);
+	nv_icmd(dev, 0x00000639, 0x00000001);
+	nv_icmd(dev, 0x0000063a, 0x00000002);
+	nv_icmd(dev, 0x0000063b, 0x00000001);
+	nv_icmd(dev, 0x0000063c, 0x00000001);
+	nv_icmd(dev, 0x0000063d, 0x00000002);
+	nv_icmd(dev, 0x0000063e, 0x00000001);
+	nv_icmd(dev, 0x000008b8, 0x00000001);
+	nv_icmd(dev, 0x000008b9, 0x00000001);
+	nv_icmd(dev, 0x000008ba, 0x00000001);
+	nv_icmd(dev, 0x000008bb, 0x00000001);
+	nv_icmd(dev, 0x000008bc, 0x00000001);
+	nv_icmd(dev, 0x000008bd, 0x00000001);
+	nv_icmd(dev, 0x000008be, 0x00000001);
+	nv_icmd(dev, 0x000008bf, 0x00000001);
+	nv_icmd(dev, 0x00000900, 0x00000001);
+	nv_icmd(dev, 0x00000901, 0x00000001);
+	nv_icmd(dev, 0x00000902, 0x00000001);
+	nv_icmd(dev, 0x00000903, 0x00000001);
+	nv_icmd(dev, 0x00000904, 0x00000001);
+	nv_icmd(dev, 0x00000905, 0x00000001);
+	nv_icmd(dev, 0x00000906, 0x00000001);
+	nv_icmd(dev, 0x00000907, 0x00000001);
+	nv_icmd(dev, 0x00000908, 0x00000002);
+	nv_icmd(dev, 0x00000909, 0x00000002);
+	nv_icmd(dev, 0x0000090a, 0x00000002);
+	nv_icmd(dev, 0x0000090b, 0x00000002);
+	nv_icmd(dev, 0x0000090c, 0x00000002);
+	nv_icmd(dev, 0x0000090d, 0x00000002);
+	nv_icmd(dev, 0x0000090e, 0x00000002);
+	nv_icmd(dev, 0x0000090f, 0x00000002);
+	nv_icmd(dev, 0x00000910, 0x00000001);
+	nv_icmd(dev, 0x00000911, 0x00000001);
+	nv_icmd(dev, 0x00000912, 0x00000001);
+	nv_icmd(dev, 0x00000913, 0x00000001);
+	nv_icmd(dev, 0x00000914, 0x00000001);
+	nv_icmd(dev, 0x00000915, 0x00000001);
+	nv_icmd(dev, 0x00000916, 0x00000001);
+	nv_icmd(dev, 0x00000917, 0x00000001);
+	nv_icmd(dev, 0x00000918, 0x00000001);
+	nv_icmd(dev, 0x00000919, 0x00000001);
+	nv_icmd(dev, 0x0000091a, 0x00000001);
+	nv_icmd(dev, 0x0000091b, 0x00000001);
+	nv_icmd(dev, 0x0000091c, 0x00000001);
+	nv_icmd(dev, 0x0000091d, 0x00000001);
+	nv_icmd(dev, 0x0000091e, 0x00000001);
+	nv_icmd(dev, 0x0000091f, 0x00000001);
+	nv_icmd(dev, 0x00000920, 0x00000002);
+	nv_icmd(dev, 0x00000921, 0x00000002);
+	nv_icmd(dev, 0x00000922, 0x00000002);
+	nv_icmd(dev, 0x00000923, 0x00000002);
+	nv_icmd(dev, 0x00000924, 0x00000002);
+	nv_icmd(dev, 0x00000925, 0x00000002);
+	nv_icmd(dev, 0x00000926, 0x00000002);
+	nv_icmd(dev, 0x00000927, 0x00000002);
+	nv_icmd(dev, 0x00000928, 0x00000001);
+	nv_icmd(dev, 0x00000929, 0x00000001);
+	nv_icmd(dev, 0x0000092a, 0x00000001);
+	nv_icmd(dev, 0x0000092b, 0x00000001);
+	nv_icmd(dev, 0x0000092c, 0x00000001);
+	nv_icmd(dev, 0x0000092d, 0x00000001);
+	nv_icmd(dev, 0x0000092e, 0x00000001);
+	nv_icmd(dev, 0x0000092f, 0x00000001);
+	nv_icmd(dev, 0x00000648, 0x00000001);
+	nv_icmd(dev, 0x00000649, 0x00000001);
+	nv_icmd(dev, 0x0000064a, 0x00000001);
+	nv_icmd(dev, 0x0000064b, 0x00000001);
+	nv_icmd(dev, 0x0000064c, 0x00000001);
+	nv_icmd(dev, 0x0000064d, 0x00000001);
+	nv_icmd(dev, 0x0000064e, 0x00000001);
+	nv_icmd(dev, 0x0000064f, 0x00000001);
+	nv_icmd(dev, 0x00000650, 0x00000001);
+	nv_icmd(dev, 0x00000658, 0x0000000f);
+	nv_icmd(dev, 0x000007ff, 0x0000000a);
+	nv_icmd(dev, 0x0000066a, 0x40000000);
+	nv_icmd(dev, 0x0000066b, 0x10000000);
+	nv_icmd(dev, 0x0000066c, 0xffff0000);
+	nv_icmd(dev, 0x0000066d, 0xffff0000);
+	nv_icmd(dev, 0x000007af, 0x00000008);
+	nv_icmd(dev, 0x000007b0, 0x00000008);
+	nv_icmd(dev, 0x000007f6, 0x00000001);
+	nv_icmd(dev, 0x000006b2, 0x00000055);
+	nv_icmd(dev, 0x000007ad, 0x00000003);
+	nv_icmd(dev, 0x00000937, 0x00000001);
+	nv_icmd(dev, 0x00000971, 0x00000008);
+	nv_icmd(dev, 0x00000972, 0x00000040);
+	nv_icmd(dev, 0x00000973, 0x0000012c);
+	nv_icmd(dev, 0x0000097c, 0x00000040);
+	nv_icmd(dev, 0x00000979, 0x00000003);
+	nv_icmd(dev, 0x00000975, 0x00000020);
+	nv_icmd(dev, 0x00000976, 0x00000001);
+	nv_icmd(dev, 0x00000977, 0x00000020);
+	nv_icmd(dev, 0x00000978, 0x00000001);
+	nv_icmd(dev, 0x00000957, 0x00000003);
+	nv_icmd(dev, 0x0000095e, 0x20164010);
+	nv_icmd(dev, 0x0000095f, 0x00000020);
+	nv_icmd(dev, 0x00000683, 0x00000006);
+	nv_icmd(dev, 0x00000685, 0x003fffff);
+	nv_icmd(dev, 0x00000687, 0x00000c48);
+	nv_icmd(dev, 0x000006a0, 0x00000005);
+	nv_icmd(dev, 0x00000840, 0x00300008);
+	nv_icmd(dev, 0x00000841, 0x04000080);
+	nv_icmd(dev, 0x00000842, 0x00300008);
+	nv_icmd(dev, 0x00000843, 0x04000080);
+	nv_icmd(dev, 0x00000818, 0x00000000);
+	nv_icmd(dev, 0x00000819, 0x00000000);
+	nv_icmd(dev, 0x0000081a, 0x00000000);
+	nv_icmd(dev, 0x0000081b, 0x00000000);
+	nv_icmd(dev, 0x0000081c, 0x00000000);
+	nv_icmd(dev, 0x0000081d, 0x00000000);
+	nv_icmd(dev, 0x0000081e, 0x00000000);
+	nv_icmd(dev, 0x0000081f, 0x00000000);
+	nv_icmd(dev, 0x00000848, 0x00000000);
+	nv_icmd(dev, 0x00000849, 0x00000000);
+	nv_icmd(dev, 0x0000084a, 0x00000000);
+	nv_icmd(dev, 0x0000084b, 0x00000000);
+	nv_icmd(dev, 0x0000084c, 0x00000000);
+	nv_icmd(dev, 0x0000084d, 0x00000000);
+	nv_icmd(dev, 0x0000084e, 0x00000000);
+	nv_icmd(dev, 0x0000084f, 0x00000000);
+	nv_icmd(dev, 0x00000850, 0x00000000);
+	nv_icmd(dev, 0x00000851, 0x00000000);
+	nv_icmd(dev, 0x00000852, 0x00000000);
+	nv_icmd(dev, 0x00000853, 0x00000000);
+	nv_icmd(dev, 0x00000854, 0x00000000);
+	nv_icmd(dev, 0x00000855, 0x00000000);
+	nv_icmd(dev, 0x00000856, 0x00000000);
+	nv_icmd(dev, 0x00000857, 0x00000000);
+	nv_icmd(dev, 0x00000738, 0x00000000);
+	nv_icmd(dev, 0x000006aa, 0x00000001);
+	nv_icmd(dev, 0x000006ab, 0x00000002);
+	nv_icmd(dev, 0x000006ac, 0x00000080);
+	nv_icmd(dev, 0x000006ad, 0x00000100);
+	nv_icmd(dev, 0x000006ae, 0x00000100);
+	nv_icmd(dev, 0x000006b1, 0x00000011);
+	nv_icmd(dev, 0x000006bb, 0x000000cf);
+	nv_icmd(dev, 0x000006ce, 0x2a712488);
+	nv_icmd(dev, 0x00000739, 0x4085c000);
+	nv_icmd(dev, 0x0000073a, 0x00000080);
+	nv_icmd(dev, 0x00000786, 0x80000100);
+	nv_icmd(dev, 0x0000073c, 0x00010100);
+	nv_icmd(dev, 0x0000073d, 0x02800000);
+	nv_icmd(dev, 0x00000787, 0x000000cf);
+	nv_icmd(dev, 0x0000078c, 0x00000008);
+	nv_icmd(dev, 0x00000792, 0x00000001);
+	nv_icmd(dev, 0x00000794, 0x00000001);
+	nv_icmd(dev, 0x00000795, 0x00000001);
+	nv_icmd(dev, 0x00000796, 0x00000001);
+	nv_icmd(dev, 0x00000797, 0x000000cf);
+	nv_icmd(dev, 0x00000836, 0x00000001);
+	nv_icmd(dev, 0x0000079a, 0x00000002);
+	nv_icmd(dev, 0x00000833, 0x04444480);
+	nv_icmd(dev, 0x000007a1, 0x00000001);
+	nv_icmd(dev, 0x000007a3, 0x00000001);
+	nv_icmd(dev, 0x000007a4, 0x00000001);
+	nv_icmd(dev, 0x000007a5, 0x00000001);
+	nv_icmd(dev, 0x00000831, 0x00000004);
+	nv_icmd(dev, 0x0000080c, 0x00000002);
+	nv_icmd(dev, 0x0000080d, 0x00000100);
+	nv_icmd(dev, 0x0000080e, 0x00000100);
+	nv_icmd(dev, 0x0000080f, 0x00000001);
+	nv_icmd(dev, 0x00000823, 0x00000002);
+	nv_icmd(dev, 0x00000824, 0x00000100);
+	nv_icmd(dev, 0x00000825, 0x00000100);
+	nv_icmd(dev, 0x00000826, 0x00000001);
+	nv_icmd(dev, 0x0000095d, 0x00000001);
+	nv_icmd(dev, 0x0000082b, 0x00000004);
+	nv_icmd(dev, 0x00000942, 0x00010001);
+	nv_icmd(dev, 0x00000943, 0x00000001);
+	nv_icmd(dev, 0x00000944, 0x00000022);
+	nv_icmd(dev, 0x000007c5, 0x00010001);
+	nv_icmd(dev, 0x00000834, 0x00000001);
+	nv_icmd(dev, 0x000007c7, 0x00000001);
+	nv_icmd(dev, 0x0000c1b0, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b1, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b2, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b3, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b4, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b5, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b6, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b7, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
+	nv_icmd(dev, 0x0000c1b9, 0x00fac688);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_icmd(dev, 0x00001000, 0x00000002);
+	nv_icmd(dev, 0x000006aa, 0x00000001);
+	nv_icmd(dev, 0x000006ad, 0x00000100);
+	nv_icmd(dev, 0x000006ae, 0x00000100);
+	nv_icmd(dev, 0x000006b1, 0x00000011);
+	nv_icmd(dev, 0x0000078c, 0x00000008);
+	nv_icmd(dev, 0x00000792, 0x00000001);
+	nv_icmd(dev, 0x00000794, 0x00000001);
+	nv_icmd(dev, 0x00000795, 0x00000001);
+	nv_icmd(dev, 0x00000796, 0x00000001);
+	nv_icmd(dev, 0x00000797, 0x000000cf);
+	nv_icmd(dev, 0x0000079a, 0x00000002);
+	nv_icmd(dev, 0x00000833, 0x04444480);
+	nv_icmd(dev, 0x000007a1, 0x00000001);
+	nv_icmd(dev, 0x000007a3, 0x00000001);
+	nv_icmd(dev, 0x000007a4, 0x00000001);
+	nv_icmd(dev, 0x000007a5, 0x00000001);
+	nv_icmd(dev, 0x00000831, 0x00000004);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_icmd(dev, 0x00001000, 0x00000014);
+	nv_icmd(dev, 0x00000351, 0x00000100);
+	nv_icmd(dev, 0x00000957, 0x00000003);
+	nv_icmd(dev, 0x0000095d, 0x00000001);
+	nv_icmd(dev, 0x0000082b, 0x00000004);
+	nv_icmd(dev, 0x00000942, 0x00010001);
+	nv_icmd(dev, 0x00000943, 0x00000001);
+	nv_icmd(dev, 0x000007c5, 0x00010001);
+	nv_icmd(dev, 0x00000834, 0x00000001);
+	nv_icmd(dev, 0x000007c7, 0x00000001);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_icmd(dev, 0x00001000, 0x00000001);
+	nv_icmd(dev, 0x0000080c, 0x00000002);
+	nv_icmd(dev, 0x0000080d, 0x00000100);
+	nv_icmd(dev, 0x0000080e, 0x00000100);
+	nv_icmd(dev, 0x0000080f, 0x00000001);
+	nv_icmd(dev, 0x00000823, 0x00000002);
+	nv_icmd(dev, 0x00000824, 0x00000100);
+	nv_icmd(dev, 0x00000825, 0x00000100);
+	nv_icmd(dev, 0x00000826, 0x00000001);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_wr32(dev, 0x400208, 0x00000000);
+	nv_wr32(dev, 0x404154, 0x00000400);
+
+	nvc0_grctx_generate_9097(dev);
+	nvc0_grctx_generate_902d(dev);
+	nvc0_grctx_generate_9039(dev);
+	nvc0_grctx_generate_90c0(dev);
+
+	nv_wr32(dev, 0x000260, r000260);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78..c090917 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -25,132 +25,22 @@
 #include "drmP.h"
 
 #include "nouveau_drv.h"
+#include "nouveau_vm.h"
 
-int
-nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *size)
-{
-	int ret;
-
-	*size = ALIGN(*size, 4096);
-	if (*size == 0)
-		return -EINVAL;
-
-	ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
-			     true, false, &gpuobj->im_backing);
-	if (ret) {
-		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
-		return ret;
-	}
-
-	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		return ret;
-	}
-
-	gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
-	return 0;
-}
-
-void
-nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (gpuobj && gpuobj->im_backing) {
-		if (gpuobj->im_bound)
-			dev_priv->engine.instmem.unbind(dev, gpuobj);
-		nouveau_bo_unpin(gpuobj->im_backing);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		gpuobj->im_backing = NULL;
-	}
-}
-
-int
-nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t pte, pte_end;
-	uint64_t vram;
-
-	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
-		return -EINVAL;
-
-	NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
-		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
-	pte     = gpuobj->im_pramin->start >> 12;
-	pte_end = (gpuobj->im_pramin->size >> 12) + pte;
-	vram    = gpuobj->vinst;
-
-	NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
-		 gpuobj->im_pramin->start, pte, pte_end);
-	NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
-	while (pte < pte_end) {
-		nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
-		nv_wr32(dev, 0x702004 + (pte * 8), 0);
-		vram += 4096;
-		pte++;
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	if (1) {
-		u32 chan = nv_rd32(dev, 0x1700) << 16;
-		nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
-		nv_wr32(dev, 0x100cbc, 0x80000005);
-	}
-
-	gpuobj->im_bound = 1;
-	return 0;
-}
-
-int
-nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t pte, pte_end;
-
-	if (gpuobj->im_bound == 0)
-		return -EINVAL;
-
-	pte     = gpuobj->im_pramin->start >> 12;
-	pte_end = (gpuobj->im_pramin->size >> 12) + pte;
-	while (pte < pte_end) {
-		nv_wr32(dev, 0x702000 + (pte * 8), 0);
-		nv_wr32(dev, 0x702004 + (pte * 8), 0);
-		pte++;
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	gpuobj->im_bound = 0;
-	return 0;
-}
-
-void
-nvc0_instmem_flush(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x070000, 1);
-	if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
-		NV_ERROR(dev, "PRAMIN flush timeout\n");
-}
+struct nvc0_instmem_priv {
+	struct nouveau_gpuobj  *bar1_pgd;
+	struct nouveau_channel *bar1;
+	struct nouveau_gpuobj  *bar3_pgd;
+	struct nouveau_channel *bar3;
+	struct nouveau_gpuobj  *chan_pgd;
+};
 
 int
 nvc0_instmem_suspend(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 *buf;
-	int i;
 
-	dev_priv->susres.ramin_copy = vmalloc(65536);
-	if (!dev_priv->susres.ramin_copy)
-		return -ENOMEM;
-	buf = dev_priv->susres.ramin_copy;
-
-	for (i = 0; i < 65536; i += 4)
-		buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+	dev_priv->ramin_available = false;
 	return 0;
 }
 
@@ -158,73 +48,184 @@
 nvc0_instmem_resume(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 *buf = dev_priv->susres.ramin_copy;
-	u64 chan;
-	int i;
+	struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
 
-	chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
-	nv_wr32(dev, 0x001700, chan >> 16);
+	nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
+	nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
+	nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
+	dev_priv->ramin_available = true;
+}
 
-	for (i = 0; i < 65536; i += 4)
-		nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
-	vfree(dev_priv->susres.ramin_copy);
-	dev_priv->susres.ramin_copy = NULL;
+static void
+nvc0_channel_del(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan;
 
-	nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
+	chan = *pchan;
+	*pchan = NULL;
+	if (!chan)
+		return;
+
+	nouveau_vm_ref(NULL, &chan->vm, NULL);
+	if (chan->ramin_heap.free_stack.next)
+		drm_mm_takedown(&chan->ramin_heap);
+	nouveau_gpuobj_ref(NULL, &chan->ramin);
+	kfree(chan);
+}
+
+static int
+nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+		 struct nouveau_channel **pchan,
+		 struct nouveau_gpuobj *pgd, u64 vm_size)
+{
+	struct nouveau_channel *chan;
+	int ret;
+
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+	chan->dev = dev;
+
+	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+	if (ret) {
+		nvc0_channel_del(&chan);
+		return ret;
+	}
+
+	ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
+	if (ret) {
+		nvc0_channel_del(&chan);
+		return ret;
+	}
+
+	ret = nouveau_vm_ref(vm, &chan->vm, NULL);
+	if (ret) {
+		nvc0_channel_del(&chan);
+		return ret;
+	}
+
+	nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
+	nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
+	nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
+	nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
+
+	*pchan = chan;
+	return 0;
 }
 
 int
 nvc0_instmem_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
-	int ret, i;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct pci_dev *pdev = dev->pdev;
+	struct nvc0_instmem_priv *priv;
+	struct nouveau_vm *vm = NULL;
+	int ret;
 
-	dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
-	chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
-	imem = 4096 + 4096 + 32768;
-
-	nv_wr32(dev, 0x001700, chan >> 16);
-
-	/* channel setup */
-	nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
-	nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
-	nv_wr32(dev, 0x700208, lower_32_bits(lim3));
-	nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
-
-	/* point pgd -> pgt */
-	nv_wr32(dev, 0x701000, 0);
-	nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
-
-	/* point pgt -> physical vram for channel */
-	pgt3 = 0x2000;
-	for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
-		nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
-		nv_wr32(dev, 0x700004 + pgt3, 0);
-	}
-
-	/* clear rest of pgt */
-	for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
-		nv_wr32(dev, 0x700000 + pgt3, 0);
-		nv_wr32(dev, 0x700004 + pgt3, 0);
-	}
-
-	/* point bar3 at the channel */
-	nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-
-	/* Global PRAMIN heap */
-	ret = drm_mm_init(&dev_priv->ramin_heap, imem,
-			  dev_priv->ramin_size - imem);
-	if (ret) {
-		NV_ERROR(dev, "Failed to init RAMIN heap\n");
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
 		return -ENOMEM;
-	}
+	pinstmem->priv = priv;
 
+	/* BAR3 VM */
+	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
+			     &dev_priv->bar3_vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL,
+				 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
+				 NVOBJ_FLAG_DONT_MAP |
+				 NVOBJ_FLAG_ZERO_ALLOC,
+				 &dev_priv->bar3_vm->pgt[0].obj[0]);
+	if (ret)
+		goto error;
+	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
+	if (ret)
+		goto error;
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
+			       priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
+	if (ret)
+		goto error;
+
+	/* BAR1 VM */
+	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
+	if (ret)
+		goto error;
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
+			       priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
+	if (ret)
+		goto error;
+
+	/* channel vm */
+	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
+	if (ret)
+		goto error;
+
+	nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	nvc0_instmem_resume(dev);
 	return 0;
+error:
+	nvc0_instmem_takedown(dev);
+	return ret;
 }
 
 void
 nvc0_instmem_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+	struct nouveau_vm *vm = NULL;
+
+	nvc0_instmem_suspend(dev);
+
+	nv_wr32(dev, 0x1704, 0x00000000);
+	nv_wr32(dev, 0x1714, 0x00000000);
+
+	nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
+	nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
+
+	nvc0_channel_del(&priv->bar1);
+	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
+
+	nvc0_channel_del(&priv->bar3);
+	nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
+	nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
+	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+	dev_priv->engine.instmem.priv = NULL;
+	kfree(priv);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
new file mode 100644
index 0000000..4b9251b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
+		struct nouveau_gpuobj *pgt[2])
+{
+	u32 pde[2] = { 0, 0 };
+
+	if (pgt[0])
+		pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
+	if (pgt[1])
+		pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
+
+	nv_wo32(pgd, (index * 8) + 0, pde[0]);
+	nv_wo32(pgd, (index * 8) + 4, pde[1]);
+}
+
+static inline u64
+nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+	phys >>= 8;
+
+	phys |= 0x00000001; /* present */
+//	if (vma->access & NV_MEM_ACCESS_SYS)
+//		phys |= 0x00000002;
+
+	phys |= ((u64)target  << 32);
+	phys |= ((u64)memtype << 36);
+
+	return phys;
+}
+
+void
+nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	    struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+	u32 next = 1 << (vma->node->type - 8);
+
+	phys  = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		phys += next;
+		pte  += 8;
+	}
+}
+
+void
+nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       u32 pte, dma_addr_t *list, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		pte += 8;
+	}
+}
+
+void
+nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, 0x00000000);
+		nv_wo32(pgt, pte + 4, 0x00000000);
+		pte += 8;
+	}
+}
+
+void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct drm_device *dev = vm->dev;
+	struct nouveau_vm_pgd *vpgd;
+	u32 r100c80, engine;
+
+	pinstmem->flush(vm->dev);
+
+	if (vm == dev_priv->chan_vm)
+		engine = 1;
+	else
+		engine = 5;
+
+	list_for_each_entry(vpgd, &vm->pgd_list, head) {
+		r100c80 = nv_rd32(dev, 0x100c80);
+		nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
+		nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
+		if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
+			NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
new file mode 100644
index 0000000..858eda5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+bool
+nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
+	case 0x0000:
+	case 0xfe00:
+	case 0xdb00:
+	case 0x1100:
+		return true;
+	default:
+		break;
+	}
+
+	return false;
+}
+
+int
+nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
+	      u32 type, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	struct nouveau_vram *vram;
+	int ret;
+
+	size  >>= 12;
+	align >>= 12;
+	ncmin >>= 12;
+
+	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+	if (!vram)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vram->regions);
+	vram->dev = dev_priv->dev;
+	vram->memtype = type;
+	vram->size = size;
+
+	mutex_lock(&mm->mutex);
+	do {
+		ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
+		if (ret) {
+			mutex_unlock(&mm->mutex);
+			nv50_vram_del(dev, &vram);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &vram->regions);
+		size -= r->length;
+	} while (size);
+	mutex_unlock(&mm->mutex);
+
+	r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+	vram->offset = (u64)r->offset << 12;
+	*pvram = vram;
+	return 0;
+}
+
+int
+nvc0_vram_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
+	dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+	dev_priv->vram_rblock_size = 4096;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a5..fe0f253 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
 #define NV_PCRTC_START					0x00600800
 #define NV_PCRTC_CONFIG					0x00600804
 #	define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA		(1 << 0)
-#	define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
+#	define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC		(4 << 0)
+#	define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
 #define NV_PCRTC_CURSOR_CONFIG				0x00600810
 #	define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE		(1 << 0)
 #	define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE	(1 << 4)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 6cae4f2..e47eecf 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,10 +65,13 @@
 	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
 	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
-	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
+	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+	radeon_trace_points.o ni.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
 radeon-$(CONFIG_ACPI) += radeon_acpi.o
 
 obj-$(CONFIG_DRM_RADEON)+= radeon.o
+
+CFLAGS_radeon_trace_points.o := -I$(src)
\ No newline at end of file
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index c714179..c61c3fe 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -37,6 +37,8 @@
 #define GRAPH_OBJECT_TYPE_CONNECTOR               0x3
 #define GRAPH_OBJECT_TYPE_ROUTER                  0x4
 /* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH            0x6  
+#define GRAPH_OBJECT_TYPE_GENERIC                 0x7
 
 /****************************************************/
 /* Encoder Object ID Definition                     */
@@ -64,6 +66,9 @@
 #define ENCODER_OBJECT_ID_VT1623                  0x10
 #define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
 #define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+#define ENCODER_OBJECT_ID_ALMOND                  0x22
+#define ENCODER_OBJECT_ID_TRAVIS                  0x23
+#define ENCODER_OBJECT_ID_NUTMEG                  0x22
 /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
@@ -108,6 +113,7 @@
 #define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
 
 /* deleted */
 
@@ -124,6 +130,7 @@
 #define GENERIC_OBJECT_ID_GLSYNC                  0x01
 #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
 #define GENERIC_OBJECT_ID_MXM_OPM                 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
 
 /****************************************************/
 /* Graphics Object ENUM ID Definition               */
@@ -360,6 +367,26 @@
                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                   ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
 
+#define ENCODER_ALMOND_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /****************************************************/
@@ -421,6 +448,14 @@
                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
 #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -512,6 +547,7 @@
 #define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
 #define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
@@ -593,6 +629,14 @@
                                                  GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
 
+#define CONNECTOR_LVDS_eDP_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Router Object ID definition - Shared with BIOS   */
 /****************************************************/
@@ -621,6 +665,10 @@
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
 
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1        (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Object Cap definition - Shared with BIOS         */
 /****************************************************/
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 05efb5b..258fa5e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -734,16 +734,16 @@
 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
 {
 	uint8_t attr = U8((*ptr)++);
-	uint32_t dst, src1, src2, saved;
+	uint32_t dst, mask, src, saved;
 	int dptr = *ptr;
 	SDEBUG("   dst: ");
 	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
-	SDEBUG("   src1: ");
-	src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
-	SDEBUG("   src2: ");
-	src2 = atom_get_src(ctx, attr, ptr);
-	dst &= src1;
-	dst |= src2;
+	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+	SDEBUG("   mask: 0x%08x", mask);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= mask;
+	dst |= src;
 	SDEBUG("   dst: ");
 	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 }
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index fe359a2..04b269d 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -73,8 +73,18 @@
 #define ATOM_PPLL1            0
 #define ATOM_PPLL2            1
 #define ATOM_DCPLL            2
+#define ATOM_PPLL0            2
+#define ATOM_EXT_PLL1         8
+#define ATOM_EXT_PLL2         9
+#define ATOM_EXT_CLOCK        10
 #define ATOM_PPLL_INVALID     0xFF
 
+#define ENCODER_REFCLK_SRC_P1PLL       0       
+#define ENCODER_REFCLK_SRC_P2PLL       1
+#define ENCODER_REFCLK_SRC_DCPLL       2
+#define ENCODER_REFCLK_SRC_EXTCLK      3
+#define ENCODER_REFCLK_SRC_INVALID     0xFF
+
 #define ATOM_SCALER1          0
 #define ATOM_SCALER2          1
 
@@ -192,6 +202,9 @@
                                   /*Image can't be updated, while Driver needs to carry the new table! */
 }ATOM_COMMON_TABLE_HEADER;
 
+/****************************************************************************/	
+// Structure stores the ROM header.
+/****************************************************************************/	
 typedef struct _ATOM_ROM_HEADER
 {
   ATOM_COMMON_TABLE_HEADER		sHeader;
@@ -221,6 +234,9 @@
 	#define	USHORT	void*
 #endif
 
+/****************************************************************************/	
+// Structures used in Command.mtb 
+/****************************************************************************/	
 typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
   USHORT ASIC_Init;                              //Function Table, used by various SW components,latest version 1.1
   USHORT GetDisplaySurfaceSize;                  //Atomic Table,  Used by Bios when enabling HW ICON
@@ -312,6 +328,7 @@
 #define SetUniphyInstance                        ASIC_StaticPwrMgtStatusChange
 #define HPDInterruptService                      ReadHWAssistedI2CStatus
 #define EnableVGA_Access                         GetSCLKOverMCLKRatio
+#define GetDispObjectInfo                        EnableYUV 
 
 typedef struct _ATOM_MASTER_COMMAND_TABLE
 {
@@ -357,6 +374,24 @@
 /****************************************************************************/	
 #define COMPUTE_MEMORY_PLL_PARAM        1
 #define COMPUTE_ENGINE_PLL_PARAM        2
+#define ADJUST_MC_SETTING_PARAM         3
+
+/****************************************************************************/	
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/	
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulClockFreq:24;
+#else
+  ULONG ulClockFreq:24;
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG             0x80
 
 typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
 {
@@ -440,6 +475,26 @@
 #endif
 }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
 
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  union
+  {
+    UCHAR   ucCntlFlag;                       //Output Flags
+    UCHAR   ucInputFlag;                      //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+  };
+  UCHAR   ucReserved;                       
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
+
 typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
 {
   ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -583,6 +638,7 @@
 #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK				0x01
 #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ		0x00
 #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ		0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ		0x02
 #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK				  0x04
 #define ATOM_ENCODER_CONFIG_LINKA								  0x00
 #define ATOM_ENCODER_CONFIG_LINKB								  0x04
@@ -608,6 +664,9 @@
 #define ATOM_ENCODER_MODE_TV											13
 #define ATOM_ENCODER_MODE_CV											14
 #define ATOM_ENCODER_MODE_CRT											15
+#define ATOM_ENCODER_MODE_DVO											16
+#define ATOM_ENCODER_MODE_DP_SST                  ATOM_ENCODER_MODE_DP    // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST                  5                       // For DP1.2
 
 typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
 {
@@ -661,6 +720,7 @@
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START       0x08
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1    0x09
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2    0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3    0x13
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE    0x0b
 #define ATOM_ENCODER_CMD_DP_VIDEO_OFF                 0x0c
 #define ATOM_ENCODER_CMD_DP_VIDEO_ON                  0x0d
@@ -671,24 +731,34 @@
 #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
 #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
 
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
 // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
 typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
 {
 #if ATOM_BIG_ENDIAN
     UCHAR ucReserved1:1;
-    UCHAR ucDigSel:3;             // =0: DIGA/B/C/D/E/F
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
     UCHAR ucReserved:3;
     UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
 #else
     UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
     UCHAR ucReserved:3;
-    UCHAR ucDigSel:3;             // =0: DIGA/B/C/D/E/F
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
     UCHAR ucReserved1:1;
 #endif
 }ATOM_DIG_ENCODER_CONFIG_V3;
 
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
 #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL					  0x70
-
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER					  0x50
 
 typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
 {
@@ -707,6 +777,56 @@
   UCHAR ucReserved;
 }DIG_ENCODER_CONTROL_PARAMETERS_V3;
 
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI           
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:2;
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+#else
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+    UCHAR ucReserved:2;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ		  0x02
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER					  0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  union{
+  ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;                              
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucHPD_ID;           // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
 
 // define ucBitPerColor: 
 #define PANEL_BPC_UNDEFINE                               0x00
@@ -893,6 +1013,7 @@
 #endif
 }ATOM_DIG_TRANSMITTER_CONFIG_V3;
 
+
 typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
 {
 	union
@@ -936,6 +1057,149 @@
 #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2           	0x40	//CD
 #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3           	0x80	//EF
 
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+  UCHAR ucLaneSel;
+ 	union
+ 	{  
+ 	  UCHAR ucLaneSet;
+ 	  struct {
+#if ATOM_BIG_ENDIAN
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+#else
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+#endif
+ 		};
+ 	}; 
+}ATOM_DP_VS_MODE_V4;
+ 
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+  union
+  {
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+    USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode     Redefined comparing to previous version
+  };
+  union
+  {
+  ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX	                        
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR			0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT				          0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA  			            0x00			
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB				            0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER		          0x00				 
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER		          0x08				
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL		                0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL		                0x20   // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT           0x30   // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3           	0x80	//EF
+
+
+/****************************************************************************/	
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+  union{
+  USHORT usPixelClock;      // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT 
+  USHORT usConnectorId;     // connector id, valid when ucAction = INIT
+  };
+  UCHAR  ucConfig;          // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucAction;          // 
+  UCHAR  ucEncoderMode;     // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucLaneNum;         // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucBitPerColor;     // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+  UCHAR  ucReserved;        
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT         0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT          0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT           0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP          0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ		  0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK		    0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1		            0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2		            0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3		            0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+  EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+  ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
 /****************************************************************************/	
 // Structures used by DAC1OuputControlTable
 //                    DAC2OuputControlTable
@@ -1142,6 +1406,7 @@
 #define PIXEL_CLOCK_V4_MISC_SS_ENABLE               0x10
 #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE           0x20
 
+
 typedef struct _PIXEL_CLOCK_PARAMETERS_V3
 {
   USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
@@ -1202,6 +1467,55 @@
 #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP              0x08
 #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC             0x10
 
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+  union{
+    CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;    // pixel clock and CRTC id frequency 
+    ULONG ulDispEngClkFreq;                  // dispclk frequency
+  };
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined                                            
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
+
 typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
 {
   PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
@@ -1241,10 +1555,11 @@
 typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
 {
 	USHORT usPixelClock;                    // target pixel clock
-	UCHAR ucTransmitterID;                  // transmitter id defined in objectid.h
+	UCHAR ucTransmitterID;                  // GPU transmitter id defined in objectid.h
 	UCHAR ucEncodeMode;                     // encoder mode: CRT, LVDS, DP, TMDS or HDMI
   UCHAR ucDispPllConfig;                 // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
-	UCHAR ucReserved[3];
+  UCHAR ucExtTransmitterID;               // external encoder id.
+	UCHAR ucReserved[2];
 }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
 
 // usDispPllConfig v1.2 for RoadRunner
@@ -1314,7 +1629,7 @@
 typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
 {
   USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
-  USHORT    usVRAMAddress;      //Adress in Frame Buffer where to pace raw EDID
+  USHORT    usVRAMAddress;      //Address in Frame Buffer where to pace raw EDID
   USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
                                 //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
   UCHAR     ucSlaveAddr;        //Read from which slave
@@ -1358,6 +1673,7 @@
 /**************************************************************************/
 #define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
 
+
 /****************************************************************************/	
 // Structures used by PowerConnectorDetectionTable
 /****************************************************************************/	
@@ -1438,6 +1754,31 @@
 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK     0x0F00
 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT    8
 
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+  USHORT  usSpreadSpectrumAmountFrac;   // SS_AMOUNT_DSFRAC New in DCE5.0
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+    
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT    8
+
 #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
 
 /**************************************************************************/
@@ -1706,7 +2047,7 @@
   USHORT        StandardVESA_Timing;      // Only used by Bios
   USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
   USHORT        DAC_Info;                 // Will be obsolete from R600
-  USHORT        LVDS_Info;                // Shared by various SW components,latest version 1.1 
+  USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info 
   USHORT        TMDS_Info;                // Will be obsolete from R600
   USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1 
   USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
@@ -1736,12 +2077,16 @@
 	USHORT				PowerSourceInfo;					// Shared by various SW components, latest versoin 1.1
 }ATOM_MASTER_LIST_OF_DATA_TABLES;
 
+// For backward compatible 
+#define LVDS_Info                LCD_Info
+
 typedef struct _ATOM_MASTER_DATA_TABLE
 { 
   ATOM_COMMON_TABLE_HEADER sHeader;  
   ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
 }ATOM_MASTER_DATA_TABLE;
 
+
 /****************************************************************************/	
 // Structure used in MultimediaCapabilityInfoTable
 /****************************************************************************/	
@@ -1776,6 +2121,7 @@
   UCHAR                    ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
 }ATOM_MULTIMEDIA_CONFIG_INFO;
 
+
 /****************************************************************************/	
 // Structures used in FirmwareInfoTable
 /****************************************************************************/	
@@ -2031,8 +2377,47 @@
   UCHAR                           ucReserved4[3];
 }ATOM_FIRMWARE_INFO_V2_1;
 
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved[2];
+  ULONG                           ulReserved1;                //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulReserved2;                //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock  ?
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.          
+  UCHAR                           ucReserved3;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulReserved5;                //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+  ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+  ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+  USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usBootUpVDDCIVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved9[3];
+  USHORT                          usBootUpMVDDCVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  USHORT                          usReserved12;
+  ULONG                           ulReserved10[3];            // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
 
-#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_1
+#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
 
 /****************************************************************************/	
 // Structures used in IntegratedSystemInfoTable
@@ -2212,7 +2597,7 @@
 ucDockingPinBit:     which bit in this register to read the pin status;
 ucDockingPinPolarity:Polarity of the pin when docked;
 
-ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
+ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
 
 usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
 
@@ -2250,6 +2635,14 @@
 usMinDownStreamHTLinkWidth:  same as above.
 */
 
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo  - CPU type definition 
+#define    INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU             0
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN        1
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
+
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH    // this deff reflects max defined CPU code
 
 #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
 #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
@@ -2778,8 +3171,88 @@
 #define PANEL_RANDOM_DITHER   0x80
 #define PANEL_RANDOM_DITHER_MASK   0x80
 
+#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12   // no need to change this 
 
-#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12
+/****************************************************************************/	
+// Structures used by LCD_InfoTable V1.3    Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families:  NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+typedef struct _ATOM_LCD_INFO_V13
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  ULONG               ulReserved0;
+  UCHAR               ucLCD_Misc;                // Reorganized in V13
+                                                 // Bit0: {=0:single, =1:dual},
+                                                 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888}  // was {=0:666RGB, =1:888RGB},
+                                                 // Bit3:2: {Grey level}
+                                                 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) 
+                                                 // Bit7   Reserved.  was for ATOM_PANEL_MISC_API_ENABLED, still need it?  
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap;  // Reorganized in V13 
+                                                 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+                                                 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+                                                 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+                                                 // Bit7-3: Reserved 
+  UCHAR               ucPanelInfoSize;					 //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  USHORT              usBacklightPWM;            //  Backlight PWM in Hz. New in _V13
+
+  UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
+  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
+
+  UCHAR               ucOffDelay_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
+  UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
+  UCHAR               ucReserved1;
+
+  ULONG               ulReserved[4];
+}ATOM_LCD_INFO_V13;  
+
+#define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13    
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL                   0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI                   0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL             0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT       2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK   0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR         0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR         0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+ 
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_V13_READ_EDID              0x1        // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_V13_DRR_SUPPORTED          0x2        // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
 
 typedef struct  _ATOM_PATCH_RECORD_MODE
 {
@@ -2944,9 +3417,9 @@
 #define MAX_DTD_MODE_IN_VRAM            6
 #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)    //28= (SIZEOF ATOM_DTD_FORMAT) 
 #define ATOM_STD_MODE_SUPPORT_TBL_SIZE  32*8                         //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
-#define DFP_ENCODER_TYPE_OFFSET					0x80
-#define DP_ENCODER_LANE_NUM_OFFSET			0x84
-#define DP_ENCODER_LINK_RATE_OFFSET			0x88
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET         (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)    
+#define ATOM_DP_DPCD_OFFSET             (DFP_ENCODER_TYPE_OFFSET + 4 )        
 
 #define ATOM_HWICON1_SURFACE_ADDR       0
 #define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
@@ -2997,14 +3470,16 @@
 #define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
 #define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
 
-#define ATOM_DP_TRAINING_TBL_ADDR				(ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)       
+#define ATOM_DP_TRAINING_TBL_ADDR       (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
 
-#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR+256)       
-#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START+512        
+#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 1024)       
+#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START + 512        
 
 //The size below is in Kb!
 #define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
    
+#define ATOM_VRAM_RESERVE_V2_SIZE      32
+
 #define	ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
 #define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
 #define	ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
@@ -3206,6 +3681,15 @@
   USHORT    usGraphicObjIds[1];                             //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
 }ATOM_DISPLAY_OBJECT_PATH;
 
+typedef struct  _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[2];                            //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder 
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
 typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
 {
   UCHAR                           ucNumOfDispPath;
@@ -3261,6 +3745,47 @@
 #define EXT_AUXDDC_LUTINDEX_7                   7
 #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES   (EXT_AUXDDC_LUTINDEX_7+1)
 
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS 
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDP_Lane3_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane0_Source:2;
+#else
+  UCHAR ucDP_Lane0_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping. 
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDVI_CLK_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA2_Source:2;
+#else
+  UCHAR ucDVI_DATA2_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
 typedef struct _EXT_DISPLAY_PATH
 {
   USHORT  usDeviceTag;                    //A bit vector to show what devices are supported 
@@ -3269,7 +3794,13 @@
   UCHAR   ucExtAUXDDCLutIndex;            //An index into external AUX/DDC channel LUT
   UCHAR   ucExtHPDPINLutIndex;            //An index into external HPD pin LUT
   USHORT  usExtEncoderObjId;              //external encoder object id
-  USHORT  usReserved[3]; 
+  union{
+    UCHAR   ucChannelMapping;                  // if ucChannelMapping=0, using default one to one mapping
+    ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+    ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+  };
+  UCHAR   ucReserved;
+  USHORT  usReserved[2]; 
 }EXT_DISPLAY_PATH;
    
 #define NUMBER_OF_UCHAR_FOR_GUID          16
@@ -3281,7 +3812,8 @@
   UCHAR                    ucGuid [NUMBER_OF_UCHAR_FOR_GUID];     // a GUID is a 16 byte long string
   EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
   UCHAR                    ucChecksum;                            // a  simple Checksum of the sum of whole structure equal to 0x0. 
-  UCHAR                    Reserved [7];                          // for potential expansion
+  UCHAR                    uc3DStereoPinId;                       // use for eDP panel
+  UCHAR                    Reserved [6];                          // for potential expansion
 }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
 
 //Related definitions, all records are differnt but they have a commond header
@@ -3311,10 +3843,11 @@
 #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE          17 //This is for the case when connectors are not known to object table
 #define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
 #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
+#define ATOM_ENCODER_CAP_RECORD_TYPE                   20
 
 
 //Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
 
 typedef struct  _ATOM_I2C_RECORD
 {
@@ -3441,6 +3974,26 @@
   UCHAR                       ucPadding[2];
 }ATOM_ENCODER_DVO_CF_RECORD;
 
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2     0x01         // DP1.2 HBR2 is supported by this path
+
+typedef struct  _ATOM_ENCODER_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  union {
+    USHORT                    usEncoderCap;         
+    struct {
+#if ATOM_BIG_ENDIAN
+      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+#else
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
+#endif
+    };
+  }; 
+}ATOM_ENCODER_CAP_RECORD;                             
+
 // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
@@ -3580,6 +4133,11 @@
 #define	VOLTAGE_CONTROL_ID_DAC								0x02									//I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
 #define	VOLTAGE_CONTROL_ID_VT116xM						0x03									//I2C control, used for R6xx Core Voltage
 #define VOLTAGE_CONTROL_ID_DS4402							0x04									
+#define VOLTAGE_CONTROL_ID_UP6266 						0x05									
+#define VOLTAGE_CONTROL_ID_SCORPIO						0x06
+#define	VOLTAGE_CONTROL_ID_VT1556M						0x07									
+#define	VOLTAGE_CONTROL_ID_CHL822x						0x08									
+#define	VOLTAGE_CONTROL_ID_VT1586M						0x09
 
 typedef struct  _ATOM_VOLTAGE_OBJECT
 {
@@ -3670,66 +4228,157 @@
 #define POWER_SENSOR_GPIO								0x01
 #define POWER_SENSOR_I2C								0x02
 
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+  ULONG      ulVoltageIndex;                      // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table        
+  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+  ULONG      ulSupportedSCLK;               // Maximum clock supported with specified voltage index,  unit in 10kHz
+  USHORT     usVoltageIndex;                // The Voltage Index indicated by FUSE for specified SCLK  
+  USHORT     usVoltageID;                   // The Voltage ID indicated by FUSE for specified SCLK 
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE             1       // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
 typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
 {
   ATOM_COMMON_TABLE_HEADER   sHeader;
   ULONG  ulBootUpEngineClock;
   ULONG  ulDentistVCOFreq;          
   ULONG  ulBootUpUMAClock;          
-  ULONG  ulReserved1[8];            
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];            
   ULONG  ulBootUpReqDisplayVector;
   ULONG  ulOtherDisplayMisc;
   ULONG  ulGPUCapInfo;
-  ULONG  ulReserved2[3];            
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;   
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;           
   ULONG  ulSystemConfig;            
   ULONG  ulCPUCapInfo;              
-  USHORT usMaxNBVoltage;  
-  USHORT usMinNBVoltage;  
-  USHORT usBootUpNBVoltage;         
-  USHORT usExtDispConnInfoOffset;  
-  UCHAR  ucHtcTmpLmt;   
-  UCHAR  ucTjOffset;    
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
   UCHAR  ucMemoryType;  
   UCHAR  ucUMAChannelNumber;
   ULONG  ulCSR_M3_ARB_CNTL_DEFAULT[10];  
   ULONG  ulCSR_M3_ARB_CNTL_UVD[10]; 
   ULONG  ulCSR_M3_ARB_CNTL_FS3D[10];
-  ULONG  ulReserved3[42]; 
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  ulReserved3[21]; 
   ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;   
 }ATOM_INTEGRATED_SYSTEM_INFO_V6;   
 
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT                       0x01
+
+
 /**********************************************************************************************************************
-// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
-//ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. 
-//ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
-//ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
-//ulReserved1[8]                    Reserved by now, must be 0x0. 
-//ulBootUpReqDisplayVector	        VBIOS boot up display IDs
-//                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
-//                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
-//                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
-//                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
-//                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
-//                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
-//                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
-//                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
-//                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
-//ulOtherDisplayMisc      	        Other display related flags, not defined yet. 
-//ulGPUCapInfo                      TBD
-//ulReserved2[3]                    must be 0x0 for the reserved.
-//ulSystemConfig                    TBD
-//ulCPUCapInfo                      TBD
-//usMaxNBVoltage                    High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. 
-//usMinNBVoltage                    Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usBootUpNBVoltage                 Boot up NB voltage in unit of mv.
-//ucHtcTmpLmt                       Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
-//ucTjOffset                        Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
-//ucMemoryType                      [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
-//ucUMAChannelNumber      	        System memory channel numbers. 
-//usExtDispConnectionInfoOffset     ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. 
-//ulCSR_M3_ARB_CNTL_DEFAULT[10]     Arrays with values for CSR M3 arbiter for default
-//ulCSR_M3_ARB_CNTL_UVD[10]         Arrays with values for CSR M3 arbiter for UVD playback.
-//ulCSR_M3_ARB_CNTL_FS3D[10]        Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+  ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        Other display related flags, not defined yet. 
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[3]=0: Enable HW AUX mode detection logic
+                                        =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
 **********************************************************************************************************************/
 
 /**************************************************************************/
@@ -3790,6 +4439,7 @@
 #define ASIC_INTERNAL_SS_ON_LVDS    6
 #define ASIC_INTERNAL_SS_ON_DP      7
 #define ASIC_INTERNAL_SS_ON_DCPLL   8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
 
 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
 {
@@ -3903,6 +4553,7 @@
 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
 
 //Byte aligned defintion for BIOS usage
 #define ATOM_S0_CRT1_MONOb0             0x01
@@ -4529,7 +5180,8 @@
 #define INDEX_ACCESS_RANGE_BEGIN	    (VALUE_DWORD + 1)
 #define INDEX_ACCESS_RANGE_END		    (INDEX_ACCESS_RANGE_BEGIN + 1)
 #define VALUE_INDEX_ACCESS_SINGLE	    (INDEX_ACCESS_RANGE_END + 1)
-
+//#define ACCESS_MCIODEBUGIND            0x40       //defined in BIOS code
+#define ACCESS_PLACEHOLDER             0x80
 
 typedef struct _ATOM_MC_INIT_PARAM_TABLE
 { 
@@ -4554,6 +5206,10 @@
 #define _32Mx32             0x33
 #define _64Mx8              0x41
 #define _64Mx16             0x42
+#define _64Mx32             0x43
+#define _128Mx8             0x51
+#define _128Mx16            0x52
+#define _256Mx8             0x61
 
 #define SAMSUNG             0x1
 #define INFINEON            0x2
@@ -4569,10 +5225,11 @@
 #define QIMONDA             INFINEON
 #define PROMOS              MOSEL
 #define KRETON              INFINEON
+#define ELIXIR              NANYA
 
 /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
 
-#define UCODE_ROM_START_ADDRESS		0x1c000
+#define UCODE_ROM_START_ADDRESS		0x1b800
 #define	UCODE_SIGNATURE			0x4375434d // 'MCuC' - MC uCode
 
 //uCode block header for reference
@@ -4903,7 +5560,34 @@
   ATOM_MEMORY_TIMING_FORMAT_V2  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
 }ATOM_VRAM_MODULE_V6;
 
-
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+  ULONG	  ulChannelMapCfg;	                // mmMC_SHARED_CHREMAP
+  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
+  USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;                    // Current memory module ID
+  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
+  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR	  ucReserve;                        // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+  UCHAR	  ucMisc;                           // RANK_OF_THISMEMORY etc.
+  UCHAR	  ucVREFI;                          // Not used.
+  UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+  UCHAR	  ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+// Memory Module specific values
+  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value. 
+  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
+  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  char    strMemPNString[20];               // part number end with '0'. 
+}ATOM_VRAM_MODULE_V7;
 
 typedef struct _ATOM_VRAM_INFO_V2
 {
@@ -4942,6 +5626,20 @@
 																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
 }ATOM_VRAM_INFO_V4;
 
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+	USHORT										 usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+	USHORT										 usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+	USHORT										 usReserved[4];
+  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
+  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
+  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
+  UCHAR                      ucReserved; 
+  ATOM_VRAM_MODULE_V7		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
 typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
 {
   ATOM_COMMON_TABLE_HEADER   sHeader;
@@ -5182,6 +5880,16 @@
 	UCHAR  ucReserved;
 }ASIC_TRANSMITTER_INFO;
 
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE          0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE         0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK    0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A             0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B             0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C             0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D             0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E             0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F             0x84
+
 typedef struct _ASIC_ENCODER_INFO
 {
 	UCHAR ucEncoderID;
@@ -5284,6 +5992,28 @@
 /* /obselete */
 #define DP_ENCODER_SERVICE_PS_ALLOCATION				WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
 
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+	USHORT usExtEncoderObjId;   // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+  UCHAR  ucAuxId;
+  UCHAR  ucAction;
+  UCHAR  ucSinkType;          // Iput and Output parameters. 
+  UCHAR  ucHPDId;             // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+	UCHAR  ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+  DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE							0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION			    0x02
+
+
 // DP_TRAINING_TABLE
 #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR				ATOM_DP_TRAINING_TBL_ADDR		
 #define DPCD_SET_SS_CNTL_TBL_ADDR													(ATOM_DP_TRAINING_TBL_ADDR + 8 )
@@ -5339,6 +6069,7 @@
 #define SELECT_DCIO_IMPCAL            4
 #define SELECT_DCIO_DIG               6
 #define SELECT_CRTC_PIXEL_RATE        7
+#define SELECT_VGA_BLK                8
 
 /****************************************************************************/	
 //Portion VI: Definitinos for vbios MC scratch registers that driver used
@@ -5744,7 +6475,17 @@
 #define ATOM_PP_THERMALCONTROLLER_ADT7473   9
 #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
 #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
 #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
 
 typedef struct _ATOM_PPLIB_STATE
 {
@@ -5841,6 +6582,29 @@
     USHORT                     usExtendendedHeaderOffset;
 } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
 
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+    ULONG                      ulGoldenPPID;                    // PPGen use only     
+    ULONG                      ulGoldenRevision;                // PPGen use only
+    USHORT                     usVddcDependencyOnSCLKOffset;
+    USHORT                     usVddciDependencyOnMCLKOffset;
+    USHORT                     usVddcDependencyOnMCLKOffset;
+    USHORT                     usMaxClockVoltageOnDCOffset;
+    USHORT                     usReserved[2];  
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+    ULONG                      ulTDPLimit;
+    ULONG                      ulNearTDPLimit;
+    ULONG                      ulSQRampingThreshold;
+    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+    ULONG                      ulCACLeakage;                    // TBD, this parameter is still under discussion.  Change to ulReserved if not needed.
+    ULONG                      ulReserved;
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
 //// ATOM_PPLIB_NONCLOCK_INFO::usClassification
 #define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
@@ -5864,6 +6628,10 @@
 #define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
 #define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
 
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
+
 //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
 #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
 #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
@@ -5896,9 +6664,21 @@
 #define ATOM_PPLIB_M3ARB_MASK                       0x00060000
 #define ATOM_PPLIB_M3ARB_SHIFT                      17
 
+#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+    UCHAR   ucMinTemperature;
+    UCHAR   ucMaxTemperature;
+    UCHAR   ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
 // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
 typedef struct _ATOM_PPLIB_NONCLOCK_INFO
 {
       USHORT usClassification;
@@ -5906,15 +6686,15 @@
       UCHAR  ucMaxTemperature;
       ULONG  ulCapsAndSettings;
       UCHAR  ucRequiredPower;
-      UCHAR  ucUnused1[3];
+      USHORT usClassification2;
+      ULONG  ulVCLK;
+      ULONG  ulDCLK;
+      UCHAR  ucUnused[5];
 } ATOM_PPLIB_NONCLOCK_INFO;
 
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
 // referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
-
 typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
 {
       USHORT usEngineClockLow;
@@ -5985,6 +6765,93 @@
 #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
 #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
 
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+      UCHAR  vddcIndex;         //2-bit vddc index;
+      UCHAR  leakage;          //please use 8-bit absolute value, not the 6-bit % value 
+      //please initalize to 0
+      UCHAR  rsv;
+      //please initalize to 0
+      USHORT rsv1;
+      //please initialize to 0s
+      ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
+      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      UCHAR ucNumDPMLevels;
+      
+      //a index to the array of nonClockInfos
+      UCHAR nonClockInfoIndex;
+      /**
+      * Driver will read the first ucNumDPMLevels in this array
+      */
+      UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct StateArray{
+    //how many states we have 
+    UCHAR ucNumEntries;
+    
+    ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct ClockInfoArray{
+    //how many clock levels we have
+    UCHAR ucNumEntries;
+    
+    //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    //this is for Sumo
+    ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
+}ClockInfoArray;
+
+typedef struct NonClockInfoArray{
+
+    //how many non-clock levels we have. normally should be same as number of states
+    UCHAR ucNumEntries;
+    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+    USHORT usClockLow;
+    UCHAR  ucClockHigh;
+    USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+    USHORT usVddc;
+    USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
 /**************************************************************************/
 
 
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9fbabaa..b0ab185 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -403,6 +403,7 @@
 	ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
 	ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
 	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
 };
 
 static void atombios_crtc_program_ss(struct drm_crtc *crtc,
@@ -417,7 +418,30 @@
 
 	memset(&args, 0, sizeof(args));
 
-	if (ASIC_IS_DCE4(rdev)) {
+	if (ASIC_IS_DCE5(rdev)) {
+		args.v3.usSpreadSpectrumAmountFrac = 0;
+		args.v3.ucSpreadSpectrumType = ss->type;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+			args.v3.usSpreadSpectrumAmount = ss->amount;
+			args.v3.usSpreadSpectrumStep = ss->step;
+			break;
+		case ATOM_PPLL2:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+			args.v3.usSpreadSpectrumAmount = ss->amount;
+			args.v3.usSpreadSpectrumStep = ss->step;
+			break;
+		case ATOM_DCPLL:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+			args.v3.usSpreadSpectrumAmount = 0;
+			args.v3.usSpreadSpectrumStep = 0;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v2.ucEnable = enable;
+	} else if (ASIC_IS_DCE4(rdev)) {
 		args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
 		args.v2.ucSpreadSpectrumType = ss->type;
 		switch (pll_id) {
@@ -673,9 +697,14 @@
 	PIXEL_CLOCK_PARAMETERS_V2 v2;
 	PIXEL_CLOCK_PARAMETERS_V3 v3;
 	PIXEL_CLOCK_PARAMETERS_V5 v5;
+	PIXEL_CLOCK_PARAMETERS_V6 v6;
 };
 
-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+				    u32 dispclk)
 {
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
@@ -698,9 +727,16 @@
 			 * SetPixelClock provides the dividers
 			 */
 			args.v5.ucCRTC = ATOM_CRTC_INVALID;
-			args.v5.usPixelClock = rdev->clock.default_dispclk;
+			args.v5.usPixelClock = dispclk;
 			args.v5.ucPpll = ATOM_DCPLL;
 			break;
+		case 6:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v6.ulDispEngClkFreq = dispclk;
+			args.v6.ucPpll = ATOM_DCPLL;
+			break;
 		default:
 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
 			return;
@@ -784,6 +820,18 @@
 			args.v5.ucEncoderMode = encoder_mode;
 			args.v5.ucPpll = pll_id;
 			break;
+		case 6:
+			args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
+			args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
+			args.v6.ucRefDiv = ref_div;
+			args.v6.usFbDiv = cpu_to_le16(fb_div);
+			args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v6.ucPostDiv = post_div;
+			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+			args.v6.ucTransmitterID = encoder_id;
+			args.v6.ucEncoderMode = encoder_mode;
+			args.v6.ucPpll = pll_id;
+			break;
 		default:
 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
 			return;
@@ -1377,7 +1425,8 @@
 								   rdev->clock.default_dispclk);
 		if (ss_enabled)
 			atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
-		atombios_crtc_set_dcpll(crtc);
+		/* XXX: DCE5, make sure voltage, dispclk is high enough */
+		atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
 		if (ss_enabled)
 			atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
 	}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7b337c3..7fe8ebd 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,62 @@
 
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+	u32 tmp;
+
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+	/* Lock the graphics update lock */
+	tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
 
 /* get temperature in millidegrees */
 u32 evergreen_get_temp(struct radeon_device *rdev)
@@ -57,6 +113,14 @@
 	return actual_temp * 1000;
 }
 
+u32 sumo_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+	u32 actual_temp = (temp >> 1) & 0xff;
+
+	return actual_temp * 1000;
+}
+
 void evergreen_pm_misc(struct radeon_device *rdev)
 {
 	int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -337,16 +401,28 @@
 	case 0:
 	case 4:
 	default:
-		return 3840 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 4096 * 2;
+		else
+			return 3840 * 2;
 	case 1:
 	case 5:
-		return 5760 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 6144 * 2;
+		else
+			return 5760 * 2;
 	case 2:
 	case 6:
-		return 7680 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 8192 * 2;
+		else
+			return 7680 * 2;
 	case 3:
 	case 7:
-		return 1920 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 2048 * 2;
+		else
+			return 1920 * 2;
 	}
 }
 
@@ -890,31 +966,39 @@
 	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
 	save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
 	save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-	save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
-	save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
-	save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
-	save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+		save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	}
 
 	/* Stop all video */
 	WREG32(VGA_RENDER_CONTROL, 0);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	}
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 
 	WREG32(D1VGA_CONTROL, 0);
 	WREG32(D2VGA_CONTROL, 0);
@@ -944,41 +1028,43 @@
 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
 	       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+	}
 
 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
@@ -994,22 +1080,28 @@
 	WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	}
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+	}
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
@@ -1057,11 +1149,17 @@
 			rdev->mc.vram_end >> 12);
 	}
 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+	if (rdev->flags & RADEON_IS_IGP) {
+		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+	}
 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
 	WREG32(MC_VM_FB_LOCATION, tmp);
 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
-	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 	if (rdev->flags & RADEON_IS_AGP) {
 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
@@ -1285,11 +1383,15 @@
 	switch (rdev->family) {
 	case CHIP_CEDAR:
 	case CHIP_REDWOOD:
+	case CHIP_PALM:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
 		force_no_swizzle = false;
 		break;
 	case CHIP_CYPRESS:
 	case CHIP_HEMLOCK:
 	case CHIP_JUNIPER:
+	case CHIP_BARTS:
 	default:
 		force_no_swizzle = true;
 		break;
@@ -1384,6 +1486,46 @@
 	return backend_map;
 }
 
+static void evergreen_program_channel_remap(struct radeon_device *rdev)
+{
+	u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	default:
+		/* default mapping */
+		mc_shared_chremap = 0x00fac688;
+		break;
+	}
+
+	switch (rdev->family) {
+	case CHIP_HEMLOCK:
+	case CHIP_CYPRESS:
+	case CHIP_BARTS:
+		tcp_chan_steer_lo = 0x54763210;
+		tcp_chan_steer_hi = 0x0000ba98;
+		break;
+	case CHIP_JUNIPER:
+	case CHIP_REDWOOD:
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+	default:
+		tcp_chan_steer_lo = 0x76543210;
+		tcp_chan_steer_hi = 0x0000ba98;
+		break;
+	}
+
+	WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+	WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
 	u32 cc_rb_backend_disable = 0;
@@ -1495,6 +1637,90 @@
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
 		break;
+	case CHIP_PALM:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_BARTS:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 7;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_TURKS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 6;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_CAICOS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
 	}
 
 	/* Initialize HDP */
@@ -1636,6 +1862,7 @@
 		switch (rdev->family) {
 		case CHIP_CYPRESS:
 		case CHIP_HEMLOCK:
+		case CHIP_BARTS:
 			gb_backend_map = 0x66442200;
 			break;
 		case CHIP_JUNIPER:
@@ -1687,6 +1914,8 @@
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
+	evergreen_program_channel_remap(rdev);
+
 	num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
 	grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
 
@@ -1769,9 +1998,16 @@
 		      GS_PRIO(2) |
 		      ES_PRIO(3));
 
-	if (rdev->family == CHIP_CEDAR)
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_CAICOS:
 		/* no vertex cache */
 		sq_config &= ~VC_ENABLE;
+		break;
+	default:
+		break;
+	}
 
 	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
 
@@ -1783,10 +2019,15 @@
 	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
 	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
 
-	if (rdev->family == CHIP_CEDAR)
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
 		ps_thread_count = 96;
-	else
+		break;
+	default:
 		ps_thread_count = 128;
+		break;
+	}
 
 	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
 	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
@@ -1817,10 +2058,16 @@
 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
 					  FORCE_EOV_MAX_REZ_CNT(255)));
 
-	if (rdev->family == CHIP_CEDAR)
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_CAICOS:
 		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
-	else
+		break;
+	default:
 		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+		break;
+	}
 	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
 
@@ -1904,12 +2151,18 @@
 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
 	/* Setup GPU memory space */
-	/* size in MB on evergreen */
-	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* size in bytes on fusion */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	} else {
+		/* size in MB on evergreen */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	}
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
 	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
-	r600_vram_gtt_location(rdev, &rdev->mc);
+	r700_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
 	return 0;
@@ -1917,8 +2170,30 @@
 
 bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
 {
-	/* FIXME: implement for evergreen */
-	return false;
+	u32 srbm_status;
+	u32 grbm_status;
+	u32 grbm_status_se0, grbm_status_se1;
+	struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
+	int r;
+
+	srbm_status = RREG32(SRBM_STATUS);
+	grbm_status = RREG32(GRBM_STATUS);
+	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+	if (!(grbm_status & GUI_ACTIVE)) {
+		r100_gpu_lockup_update(lockup, &rdev->cp);
+		return false;
+	}
+	/* force CP activities */
+	r = radeon_ring_lock(rdev, 2);
+	if (!r) {
+		/* PACKET2 NOP */
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_unlock_commit(rdev);
+	}
+	rdev->cp.rptr = RREG32(CP_RB_RPTR);
+	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
 }
 
 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2011,17 +2286,21 @@
 	WREG32(GRBM_INT_CNTL, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 
 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 
 	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2047,6 +2326,7 @@
 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
 	u32 grbm_int_cntl = 0;
+	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2072,27 +2352,33 @@
 		cp_int_cntl |= RB_INT_ENABLE;
 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
 		crtc1 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
 		crtc2 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[2]) {
+	if (rdev->irq.crtc_vblank_int[2] ||
+	    rdev->irq.pflip[2]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
 		crtc3 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[3]) {
+	if (rdev->irq.crtc_vblank_int[3] ||
+	    rdev->irq.pflip[3]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
 		crtc4 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[4]) {
+	if (rdev->irq.crtc_vblank_int[4] ||
+	    rdev->irq.pflip[4]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
 		crtc5 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[5]) {
+	if (rdev->irq.crtc_vblank_int[5] ||
+	    rdev->irq.pflip[5]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
 		crtc6 |= VBLANK_INT_MASK;
 	}
@@ -2130,10 +2416,19 @@
 
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
-	WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
-	WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
-	WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
-	WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
 
 	WREG32(DC_HPD1_INT_CONTROL, hpd1);
 	WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2145,79 +2440,92 @@
 	return 0;
 }
 
-static inline void evergreen_irq_ack(struct radeon_device *rdev,
-				     u32 *disp_int,
-				     u32 *disp_int_cont,
-				     u32 *disp_int_cont2,
-				     u32 *disp_int_cont3,
-				     u32 *disp_int_cont4,
-				     u32 *disp_int_cont5)
+static inline void evergreen_irq_ack(struct radeon_device *rdev)
 {
 	u32 tmp;
 
-	*disp_int = RREG32(DISP_INTERRUPT_STATUS);
-	*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
-	*disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
-	*disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
-	*disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
-	*disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
 
-	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int & LB_D1_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int & DC_HPD1_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
 		tmp = RREG32(DC_HPD1_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD1_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
 		tmp = RREG32(DC_HPD2_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD2_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
 		tmp = RREG32(DC_HPD3_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD3_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
 		tmp = RREG32(DC_HPD4_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD4_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
 		tmp = RREG32(DC_HPD5_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
 		tmp = RREG32(DC_HPD5_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -2226,14 +2534,10 @@
 
 void evergreen_irq_disable(struct radeon_device *rdev)
 {
-	u32 disp_int, disp_int_cont, disp_int_cont2;
-	u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
-
 	r600_disable_interrupts(rdev);
 	/* Wait and acknowledge irq */
 	mdelay(1);
-	evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
-			  &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+	evergreen_irq_ack(rdev);
 	evergreen_disable_interrupt_state(rdev);
 }
 
@@ -2273,8 +2577,6 @@
 	u32 rptr = rdev->ih.rptr;
 	u32 src_id, src_data;
 	u32 ring_index;
-	u32 disp_int, disp_int_cont, disp_int_cont2;
-	u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
 	unsigned long flags;
 	bool queue_hotplug = false;
 
@@ -2295,8 +2597,7 @@
 
 restart_ih:
 	/* display interrupts */
-	evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
-			  &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+	evergreen_irq_ack(rdev);
 
 	rdev->ih.wptr = wptr;
 	while (rptr != wptr) {
@@ -2309,17 +2610,21 @@
 		case 1: /* D1 vblank/vline */
 			switch (src_data) {
 			case 0: /* D1 vblank */
-				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 0);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[0])
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D1 vblank\n");
 				}
 				break;
 			case 1: /* D1 vline */
-				if (disp_int & LB_D1_VLINE_INTERRUPT) {
-					disp_int &= ~LB_D1_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D1 vline\n");
 				}
 				break;
@@ -2331,17 +2636,21 @@
 		case 2: /* D2 vblank/vline */
 			switch (src_data) {
 			case 0: /* D2 vblank */
-				if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 1);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[1])
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D2 vblank\n");
 				}
 				break;
 			case 1: /* D2 vline */
-				if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-					disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D2 vline\n");
 				}
 				break;
@@ -2353,17 +2662,21 @@
 		case 3: /* D3 vblank/vline */
 			switch (src_data) {
 			case 0: /* D3 vblank */
-				if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 2);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[2]) {
+						drm_handle_vblank(rdev->ddev, 2);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[2])
+						radeon_crtc_handle_flip(rdev, 2);
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D3 vblank\n");
 				}
 				break;
 			case 1: /* D3 vline */
-				if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-					disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D3 vline\n");
 				}
 				break;
@@ -2375,17 +2688,21 @@
 		case 4: /* D4 vblank/vline */
 			switch (src_data) {
 			case 0: /* D4 vblank */
-				if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 3);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[3]) {
+						drm_handle_vblank(rdev->ddev, 3);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[3])
+						radeon_crtc_handle_flip(rdev, 3);
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D4 vblank\n");
 				}
 				break;
 			case 1: /* D4 vline */
-				if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-					disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D4 vline\n");
 				}
 				break;
@@ -2397,17 +2714,21 @@
 		case 5: /* D5 vblank/vline */
 			switch (src_data) {
 			case 0: /* D5 vblank */
-				if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 4);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[4]) {
+						drm_handle_vblank(rdev->ddev, 4);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[4])
+						radeon_crtc_handle_flip(rdev, 4);
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D5 vblank\n");
 				}
 				break;
 			case 1: /* D5 vline */
-				if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-					disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D5 vline\n");
 				}
 				break;
@@ -2419,17 +2740,21 @@
 		case 6: /* D6 vblank/vline */
 			switch (src_data) {
 			case 0: /* D6 vblank */
-				if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 5);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[5]) {
+						drm_handle_vblank(rdev->ddev, 5);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[5])
+						radeon_crtc_handle_flip(rdev, 5);
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D6 vblank\n");
 				}
 				break;
 			case 1: /* D6 vline */
-				if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-					disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D6 vline\n");
 				}
 				break;
@@ -2441,43 +2766,43 @@
 		case 42: /* HPD hotplug */
 			switch (src_data) {
 			case 0:
-				if (disp_int & DC_HPD1_INTERRUPT) {
-					disp_int &= ~DC_HPD1_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD1\n");
 				}
 				break;
 			case 1:
-				if (disp_int_cont & DC_HPD2_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD2_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD2\n");
 				}
 				break;
 			case 2:
-				if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
-					disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD3\n");
 				}
 				break;
 			case 3:
-				if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
-					disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD4\n");
 				}
 				break;
 			case 4:
-				if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
-					disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD5\n");
 				}
 				break;
 			case 5:
-				if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
-					disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD6\n");
 				}
@@ -2516,7 +2841,7 @@
 	if (wptr != rdev->ih.wptr)
 		goto restart_ih;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	rdev->ih.rptr = rptr;
 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
 	spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -2527,12 +2852,31 @@
 {
 	int r;
 
-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-		r = r600_init_microcode(rdev);
+	/* enable pcie gen2 link */
+	if (!ASIC_IS_DCE5(rdev))
+		evergreen_pcie_gen2_enable(rdev);
+
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+		r = btc_mc_load_microcode(rdev);
 		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
+			DRM_ERROR("Failed to load MC firmware!\n");
 			return r;
 		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = r600_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
 	}
 
 	evergreen_mc_program(rdev);
@@ -2551,6 +2895,11 @@
 		rdev->asic->copy = NULL;
 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
 	}
+	/* XXX: ontario has problems blitting to gart at the moment */
+	if (rdev->family == CHIP_PALM) {
+		rdev->asic->copy = NULL;
+		rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+	}
 
 	/* allocate wb buffer */
 	r = radeon_wb_init(rdev);
@@ -2658,12 +3007,16 @@
 	u32 reg;
 
 	/* first check CRTCs */
-	reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	if (rdev->flags & RADEON_IS_IGP)
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	else
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
 	if (reg & EVERGREEN_CRTC_MASTER_EN)
 		return true;
 
@@ -2800,3 +3153,52 @@
 	rdev->bios = NULL;
 	radeon_dummy_page_fini(rdev);
 }
+
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, speed_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index e0e5901..b758dc7 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -147,7 +147,9 @@
 	radeon_ring_write(rdev, 0);
 	radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
 
-	if (rdev->family == CHIP_CEDAR)
+	if ((rdev->family == CHIP_CEDAR) ||
+	    (rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_CAICOS))
 		cp_set_surface_sync(rdev,
 				    PACKET3_TC_ACTION_ENA, 48, gpu_addr);
 	else
@@ -331,9 +333,95 @@
 		num_hs_stack_entries = 85;
 		num_ls_stack_entries = 85;
 		break;
+	case CHIP_PALM:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 96;
+		num_vs_threads = 16;
+		num_gs_threads = 16;
+		num_es_threads = 16;
+		num_hs_threads = 16;
+		num_ls_threads = 16;
+		num_ps_stack_entries = 42;
+		num_vs_stack_entries = 42;
+		num_gs_stack_entries = 42;
+		num_es_stack_entries = 42;
+		num_hs_stack_entries = 42;
+		num_ls_stack_entries = 42;
+		break;
+	case CHIP_BARTS:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 128;
+		num_vs_threads = 20;
+		num_gs_threads = 20;
+		num_es_threads = 20;
+		num_hs_threads = 20;
+		num_ls_threads = 20;
+		num_ps_stack_entries = 85;
+		num_vs_stack_entries = 85;
+		num_gs_stack_entries = 85;
+		num_es_stack_entries = 85;
+		num_hs_stack_entries = 85;
+		num_ls_stack_entries = 85;
+		break;
+	case CHIP_TURKS:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 128;
+		num_vs_threads = 20;
+		num_gs_threads = 20;
+		num_es_threads = 20;
+		num_hs_threads = 20;
+		num_ls_threads = 20;
+		num_ps_stack_entries = 42;
+		num_vs_stack_entries = 42;
+		num_gs_stack_entries = 42;
+		num_es_stack_entries = 42;
+		num_hs_stack_entries = 42;
+		num_ls_stack_entries = 42;
+		break;
+	case CHIP_CAICOS:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 128;
+		num_vs_threads = 10;
+		num_gs_threads = 10;
+		num_es_threads = 10;
+		num_hs_threads = 10;
+		num_ls_threads = 10;
+		num_ps_stack_entries = 42;
+		num_vs_stack_entries = 42;
+		num_gs_stack_entries = 42;
+		num_es_stack_entries = 42;
+		num_hs_stack_entries = 42;
+		num_ls_stack_entries = 42;
+		break;
 	}
 
-	if (rdev->family == CHIP_CEDAR)
+	if ((rdev->family == CHIP_CEDAR) ||
+	    (rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_CAICOS))
 		sq_config = 0;
 	else
 		sq_config = VC_ENABLE;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 2330f3a36..c781c92 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -105,6 +105,11 @@
 #define EVERGREEN_GRPH_Y_START                          0x6830
 #define EVERGREEN_GRPH_X_END                            0x6834
 #define EVERGREEN_GRPH_Y_END                            0x6838
+#define EVERGREEN_GRPH_UPDATE                           0x6844
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#       define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x6848
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
 
 /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
 #define EVERGREEN_CUR_CONTROL                           0x6998
@@ -178,6 +183,7 @@
 #       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
 #define EVERGREEN_CRTC_STATUS                           0x6e8c
 #define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
 
 #define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a73b53c..36d32d8 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -164,11 +164,13 @@
 #define		SE_SC_BUSY					(1 << 29)
 #define		SE_DB_BUSY					(1 << 30)
 #define		SE_CB_BUSY					(1 << 31)
-
+/* evergreen */
 #define	CG_MULT_THERMAL_STATUS				0x740
 #define		ASIC_T(x)			        ((x) << 16)
 #define		ASIC_T_MASK			        0x7FF0000
 #define		ASIC_T_SHIFT			        16
+/* APU */
+#define	CG_THERMAL_STATUS			        0x678
 
 #define	HDP_HOST_PATH_CNTL				0x2C00
 #define	HDP_NONSURFACE_BASE				0x2C04
@@ -181,6 +183,7 @@
 #define MC_SHARED_CHMAP						0x2004
 #define		NOOFCHAN_SHIFT					12
 #define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
 
 #define	MC_ARB_RAMCFG					0x2760
 #define		NOOFBANK_SHIFT					0
@@ -200,6 +203,7 @@
 #define	MC_VM_AGP_BOT					0x202C
 #define	MC_VM_AGP_BASE					0x2030
 #define	MC_VM_FB_LOCATION				0x2024
+#define	MC_FUS_VM_FB_OFFSET				0x2898
 #define	MC_VM_MB_L1_TLB0_CNTL				0x2234
 #define	MC_VM_MB_L1_TLB1_CNTL				0x2238
 #define	MC_VM_MB_L1_TLB2_CNTL				0x223C
@@ -349,6 +353,9 @@
 #define		SYNC_WALKER					(1 << 25)
 #define		SYNC_ALIGNER					(1 << 26)
 
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
 #define	VGT_CACHE_INVALIDATION				0x88C4
 #define		CACHE_INVALIDATION(x)				((x) << 0)
 #define			VC_ONLY						0
@@ -574,6 +581,44 @@
 #       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
 #       define DC_HPDx_EN                                 (1 << 28)
 
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
 /*
  * PM4
  */
@@ -603,7 +648,7 @@
 #define	PACKET3_NOP					0x10
 #define	PACKET3_SET_BASE				0x11
 #define	PACKET3_CLEAR_STATE				0x12
-#define	PACKET3_INDIRECT_BUFFER_SIZE			0x13
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
 #define	PACKET3_DISPATCH_DIRECT				0x15
 #define	PACKET3_DISPATCH_INDIRECT			0x16
 #define	PACKET3_INDIRECT_BUFFER_END			0x17
@@ -644,14 +689,14 @@
 #              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
 #              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
 #              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
-#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
 #              define PACKET3_FULL_CACHE_ENA       (1 << 20)
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
 #              define PACKET3_VC_ACTION_ENA        (1 << 24)
 #              define PACKET3_CB_ACTION_ENA        (1 << 25)
 #              define PACKET3_DB_ACTION_ENA        (1 << 26)
 #              define PACKET3_SH_ACTION_ENA        (1 << 27)
-#              define PACKET3_SMX_ACTION_ENA       (1 << 28)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
 #define	PACKET3_ME_INITIALIZE				0x44
 #define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
 #define	PACKET3_COND_WRITE				0x45
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 0000000..5e0bef8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_drm.h"
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00916a00}
+};
+
+int btc_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	u32 mem_type, running, blackout = 0;
+	u32 *io_mc_regs;
+	int i;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		io_mc_regs = (u32 *)&barts_io_mc_regs;
+		break;
+	case CHIP_TURKS:
+		io_mc_regs = (u32 *)&turks_io_mc_regs;
+		break;
+	case CHIP_CAICOS:
+	default:
+		io_mc_regs = (u32 *)&caicos_io_mc_regs;
+		break;
+	}
+
+	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+		if (running) {
+			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+		}
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+		}
+		/* load the MC ucode */
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+		for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
+			udelay(10);
+
+		if (running)
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+	}
+
+	return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+	struct platform_device *pdev;
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		chip_name = "BARTS";
+		rlc_chip_name = "BTC";
+		break;
+	case CHIP_TURKS:
+		chip_name = "TURKS";
+		rlc_chip_name = "BTC";
+		break;
+	case CHIP_CAICOS:
+		chip_name = "CAICOS";
+		rlc_chip_name = "BTC";
+		break;
+	default: BUG();
+	}
+
+	pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+	me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+	rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+	mc_req_size = BTC_MC_UCODE_SIZE * 4;
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->pfp_fw->size != pfp_req_size) {
+		printk(KERN_ERR
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->me_fw->size != me_req_size) {
+		printk(KERN_ERR
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		printk(KERN_ERR
+		       "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+	err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->mc_fw->size != mc_req_size) {
+		printk(KERN_ERR
+		       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->mc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+out:
+	platform_device_unregister(pdev);
+
+	if (err) {
+		if (err != -EINVAL)
+			printk(KERN_ERR
+			       "ni_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->mc_fw);
+		rdev->mc_fw = NULL;
+	}
+	return err;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 0000000..5db7b7d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL                         0x6840
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x68b4
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x68c4
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL                           0x68d4
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x68f0
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x6960
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x6964
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x6a80
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 0000000..f7b4453
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+#define MC_SEQ_MISC0           				0x2a00
+#define		MC_SEQ_MISC0_GDDR5_SHIFT      		28
+#define		MC_SEQ_MISC0_GDDR5_MASK      		0xf0000000
+#define		MC_SEQ_MISC0_GDDR5_VALUE      		5
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8e10aa9..f637595 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -68,6 +68,56 @@
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  */
 
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+	u32 tmp;
+
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
+	tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
+	/* make sure pending bit is asserted */
+	tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+	WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen as late as possible in the vblank interval.
+	 * same field for crtc1/2
+	 */
+	tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+	tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+	WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+
+	/* Lock the graphics update lock */
+	/* update the scanout addresses */
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
 {
 	int i;
@@ -526,10 +576,12 @@
 	if (rdev->irq.gui_idle) {
 		tmp |= RADEON_GUI_IDLE_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		tmp |= RADEON_CRTC_VBLANK_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		tmp |= RADEON_CRTC2_VBLANK_MASK;
 	}
 	if (rdev->irq.hpd[0]) {
@@ -600,14 +652,22 @@
 		}
 		/* Vertical blank interrupts */
 		if (status & RADEON_CRTC_VBLANK_STAT) {
-			drm_handle_vblank(rdev->ddev, 0);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[0])
+				radeon_crtc_handle_flip(rdev, 0);
 		}
 		if (status & RADEON_CRTC2_VBLANK_STAT) {
-			drm_handle_vblank(rdev->ddev, 1);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[1])
+				radeon_crtc_handle_flip(rdev, 1);
 		}
 		if (status & RADEON_FP_DETECT_STAT) {
 			queue_hotplug = true;
@@ -622,7 +682,7 @@
 	/* reset gui idle ack.  the status bit is broken */
 	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS400:
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index b121b6c..eab9176 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -551,7 +551,7 @@
 #define   S_000360_CUR2_LOCK(x)                        (((x) & 0x1) << 31)
 #define   G_000360_CUR2_LOCK(x)                        (((x) >> 31) & 0x1)
 #define   C_000360_CUR2_LOCK                           0x7FFFFFFF
-#define R_0003C2_GENMO_WT                            0x0003C0
+#define R_0003C2_GENMO_WT                            0x0003C2
 #define   S_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) & 0x1) << 0)
 #define   G_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) >> 0) & 0x1)
 #define   C_0003C2_GENMO_MONO_ADDRESS_B                0xFE
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cde1d34..fae5e70 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -558,10 +558,7 @@
 
 	/* FIXME wait for idle */
 
-	if (rdev->family < CHIP_R600)
-		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-	else
-		link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
 
 	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
@@ -745,6 +742,11 @@
 		break;
 	case 0x4E00:
 		/* RB3D_CCTL */
+		if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+		    p->rdev->cmask_filp != p->filp) {
+			DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+			return -EINVAL;
+		}
 		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
 		break;
 	case 0x4E38:
@@ -787,6 +789,13 @@
 		case 15:
 			track->cb[i].cpp = 2;
 			break;
+		case 5:
+			if (p->rdev->family < CHIP_RV515) {
+				DRM_ERROR("Invalid color buffer format (%d)!\n",
+					  ((idx_value >> 21) & 0xF));
+				return -EINVAL;
+			}
+			/* Pass through. */
 		case 6:
 			track->cb[i].cpp = 4;
 			break;
@@ -1199,6 +1208,10 @@
 		if (p->rdev->hyperz_filp != p->filp)
 			return -EINVAL;
 		break;
+	case PACKET3_3D_CLEAR_CMASK:
+		if (p->rdev->cmask_filp != p->filp)
+			return -EINVAL;
+		break;
 	case PACKET3_NOP:
 		break;
 	default:
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 0c036c6..1f519a5 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -54,6 +54,7 @@
 #define		PACKET3_3D_DRAW_IMMD_2		0x35
 #define		PACKET3_3D_DRAW_INDX_2		0x36
 #define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_3D_CLEAR_CMASK		0x38
 #define		PACKET3_BITBLT_MULTI		0x9B
 
 #define PACKET0(reg, n)	(CP_PACKET0 |					\
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 6ac1f60..fc43705 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,8 @@
 #define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
 #define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
 
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE                         0x60e4
+
 /* master controls */
 #define AVIVO_DC_CRTC_MASTER_EN                                 0x60f8
 #define AVIVO_DC_CRTC_TV_CONTROL                                0x60fc
@@ -409,8 +411,10 @@
 #define AVIVO_D1GRPH_X_END                                      0x6134
 #define AVIVO_D1GRPH_Y_END                                      0x6138
 #define AVIVO_D1GRPH_UPDATE                                     0x6144
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING              (1 << 2)
 #       define AVIVO_D1GRPH_UPDATE_LOCK                         (1 << 16)
 #define AVIVO_D1GRPH_FLIP_CONTROL                               0x6148
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN         (1 << 0)
 
 #define AVIVO_D1CUR_CONTROL                     0x6400
 #       define AVIVO_D1CURSOR_EN                (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9c92db7..6b50716 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -83,6 +83,9 @@
 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
 
 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
@@ -91,6 +94,7 @@
 void r600_gpu_init(struct radeon_device *rdev);
 void r600_fini(struct radeon_device *rdev);
 void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 
 /* get temperature in millidegrees */
 u32 rv6xx_get_temp(struct radeon_device *rdev)
@@ -1164,7 +1168,7 @@
  * Note: GTT start, end, size should be initialized before calling this
  * function on AGP platform.
  */
-void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 {
 	u64 size_bf, size_af;
 
@@ -2009,6 +2013,10 @@
 		chip_name = "CYPRESS";
 		rlc_chip_name = "CYPRESS";
 		break;
+	case CHIP_PALM:
+		chip_name = "PALM";
+		rlc_chip_name = "SUMO";
+		break;
 	default: BUG();
 	}
 
@@ -2372,6 +2380,9 @@
 {
 	int r;
 
+	/* enable pcie gen2 link */
+	r600_pcie_gen2_enable(rdev);
+
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 		r = r600_init_microcode(rdev);
 		if (r) {
@@ -2874,6 +2885,8 @@
 	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 	WREG32(GRBM_INT_CNTL, 0);
 	WREG32(DxMODE_INT_MASK, 0);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
 	if (ASIC_IS_DCE3(rdev)) {
 		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
 		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2998,6 +3011,7 @@
 	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
 	u32 grbm_int_cntl = 0;
 	u32 hdmi1, hdmi2;
+	u32 d1grph = 0, d2grph = 0;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,11 +3048,13 @@
 		cp_int_cntl |= RB_INT_ENABLE;
 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		DRM_DEBUG("r600_irq_set: vblank 0\n");
 		mode_int |= D1MODE_VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		DRM_DEBUG("r600_irq_set: vblank 1\n");
 		mode_int |= D2MODE_VBLANK_INT_MASK;
 	}
@@ -3081,6 +3097,8 @@
 
 	WREG32(CP_INT_CNTL, cp_int_cntl);
 	WREG32(DxMODE_INT_MASK, mode_int);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 	WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
 	if (ASIC_IS_DCE3(rdev)) {
@@ -3103,32 +3121,35 @@
 	return 0;
 }
 
-static inline void r600_irq_ack(struct radeon_device *rdev,
-				u32 *disp_int,
-				u32 *disp_int_cont,
-				u32 *disp_int_cont2)
+static inline void r600_irq_ack(struct radeon_device *rdev)
 {
 	u32 tmp;
 
 	if (ASIC_IS_DCE3(rdev)) {
-		*disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
-		*disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
-		*disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
 	} else {
-		*disp_int = RREG32(DISP_INTERRUPT_STATUS);
-		*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
-		*disp_int_cont2 = 0;
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
 	}
+	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
 
-	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
 		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
-	if (*disp_int & LB_D1_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
 		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
-	if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
 		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
-	if (*disp_int & LB_D2_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
 		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
-	if (*disp_int & DC_HPD1_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
 		if (ASIC_IS_DCE3(rdev)) {
 			tmp = RREG32(DC_HPD1_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
@@ -3139,7 +3160,7 @@
 			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
 		}
 	}
-	if (*disp_int & DC_HPD2_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
 		if (ASIC_IS_DCE3(rdev)) {
 			tmp = RREG32(DC_HPD2_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
@@ -3150,7 +3171,7 @@
 			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
 		}
 	}
-	if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
 		if (ASIC_IS_DCE3(rdev)) {
 			tmp = RREG32(DC_HPD3_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
@@ -3161,18 +3182,18 @@
 			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
 		}
 	}
-	if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
 		tmp = RREG32(DC_HPD4_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD4_INT_CONTROL, tmp);
 	}
 	if (ASIC_IS_DCE32(rdev)) {
-		if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
 			tmp = RREG32(DC_HPD5_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
 			WREG32(DC_HPD5_INT_CONTROL, tmp);
 		}
-		if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
 			tmp = RREG32(DC_HPD5_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
 			WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -3194,12 +3215,10 @@
 
 void r600_irq_disable(struct radeon_device *rdev)
 {
-	u32 disp_int, disp_int_cont, disp_int_cont2;
-
 	r600_disable_interrupts(rdev);
 	/* Wait and acknowledge irq */
 	mdelay(1);
-	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+	r600_irq_ack(rdev);
 	r600_disable_interrupt_state(rdev);
 }
 
@@ -3262,7 +3281,7 @@
 	u32 wptr = r600_get_ih_wptr(rdev);
 	u32 rptr = rdev->ih.rptr;
 	u32 src_id, src_data;
-	u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+	u32 ring_index;
 	unsigned long flags;
 	bool queue_hotplug = false;
 
@@ -3283,7 +3302,7 @@
 
 restart_ih:
 	/* display interrupts */
-	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+	r600_irq_ack(rdev);
 
 	rdev->ih.wptr = wptr;
 	while (rptr != wptr) {
@@ -3296,17 +3315,21 @@
 		case 1: /* D1 vblank/vline */
 			switch (src_data) {
 			case 0: /* D1 vblank */
-				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 0);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[0])
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D1 vblank\n");
 				}
 				break;
 			case 1: /* D1 vline */
-				if (disp_int & LB_D1_VLINE_INTERRUPT) {
-					disp_int &= ~LB_D1_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D1 vline\n");
 				}
 				break;
@@ -3318,17 +3341,21 @@
 		case 5: /* D2 vblank/vline */
 			switch (src_data) {
 			case 0: /* D2 vblank */
-				if (disp_int & LB_D2_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 1);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[1])
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D2 vblank\n");
 				}
 				break;
 			case 1: /* D1 vline */
-				if (disp_int & LB_D2_VLINE_INTERRUPT) {
-					disp_int &= ~LB_D2_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D2 vline\n");
 				}
 				break;
@@ -3340,43 +3367,43 @@
 		case 19: /* HPD/DAC hotplug */
 			switch (src_data) {
 			case 0:
-				if (disp_int & DC_HPD1_INTERRUPT) {
-					disp_int &= ~DC_HPD1_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD1\n");
 				}
 				break;
 			case 1:
-				if (disp_int & DC_HPD2_INTERRUPT) {
-					disp_int &= ~DC_HPD2_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD2\n");
 				}
 				break;
 			case 4:
-				if (disp_int_cont & DC_HPD3_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD3_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD3\n");
 				}
 				break;
 			case 5:
-				if (disp_int_cont & DC_HPD4_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD4_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD4\n");
 				}
 				break;
 			case 10:
-				if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
-					disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD5\n");
 				}
 				break;
 			case 12:
-				if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
-					disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD6\n");
 				}
@@ -3419,7 +3446,7 @@
 	if (wptr != rdev->ih.wptr)
 		goto restart_ih;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	rdev->ih.rptr = rptr;
 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
 	spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3508,3 +3535,219 @@
 	} else
 		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 }
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	u32 link_width_cntl, mask, target_reg;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* FIXME wait for idle */
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+	default:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	}
+
+	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+		return;
+
+	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+		return;
+
+	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+			     RADEON_PCIE_LC_RECONFIG_NOW |
+			     R600_PCIE_LC_RENEGOTIATE_EN |
+			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+	link_width_cntl |= mask;
+
+	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+        /* some northbridges can renegotiate the link rather than requiring                                  
+         * a complete re-config.                                                                             
+         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
+         */
+        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
+		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
+        else
+		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
+
+	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+						       RADEON_PCIE_LC_RECONFIG_NOW));
+
+        if (rdev->family >= CHIP_RV770)
+		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
+        else
+		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
+
+        /* wait for lane set to complete */
+        link_width_cntl = RREG32(target_reg);
+        while (link_width_cntl == 0xffffffff)
+		link_width_cntl = RREG32(target_reg);
+
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return 0;
+
+	/* FIXME wait for idle */
+
+	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+		return 0;
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+	u16 link_cntl2;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* only RV6xx+ chips are supported */
+	if (rdev->family <= CHIP_R600)
+		return;
+
+	/* 55 nm r6xx asics */
+	if ((rdev->family == CHIP_RV670) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RV635)) {
+		/* advertise upconfig capability */
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+					     LC_RECONFIG_ARC_MISSING_ESCAPE);
+			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		} else {
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		}
+	}
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		/* 55 nm r6xx asics */
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			WREG32(MM_CFGREGS_CNTL, 0x8);
+			link_cntl2 = RREG32(0x4088);
+			WREG32(MM_CFGREGS_CNTL, 0);
+			/* not supported yet */
+			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+				return;
+		}
+
+		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+			training_cntl &= ~LC_POINT_7_PLUS_EN;
+			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+		} else {
+			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+		}
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bff4dc4..a5d898b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -728,6 +728,54 @@
 /* DCE 3.2 */
 #       define DC_HPDx_EN                                 (1 << 28)
 
+#define D1GRPH_INTERRUPT_STATUS                           0x6158
+#define D2GRPH_INTERRUPT_STATUS                           0x6958
+#       define DxGRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define DxGRPH_PFLIP_INT_CLEAR                     (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL                          0x615c
+#define D2GRPH_INTERRUPT_CONTROL                          0x695c
+#       define DxGRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define DxGRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#       define LC_POINT_7_PLUS_EN                         (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
 /*
  * PM4
  */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a70957..e948663 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -69,6 +69,7 @@
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_placement.h>
 #include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
 
 #include "radeon_family.h"
 #include "radeon_mode.h"
@@ -180,6 +181,7 @@
 extern u32 rv6xx_get_temp(struct radeon_device *rdev);
 extern u32 rv770_get_temp(struct radeon_device *rdev);
 extern u32 evergreen_get_temp(struct radeon_device *rdev);
+extern u32 sumo_get_temp(struct radeon_device *rdev);
 
 /*
  * Fences.
@@ -259,13 +261,12 @@
 };
 
 struct radeon_bo_list {
-	struct list_head	list;
+	struct ttm_validate_buffer tv;
 	struct radeon_bo	*bo;
 	uint64_t		gpu_offset;
 	unsigned		rdomain;
 	unsigned		wdomain;
 	u32			tiling_flags;
-	bool			reserved;
 };
 
 /*
@@ -377,11 +378,56 @@
 /*
  * IRQS.
  */
+
+struct radeon_unpin_work {
+	struct work_struct work;
+	struct radeon_device *rdev;
+	int crtc_id;
+	struct radeon_fence *fence;
+	struct drm_pending_vblank_event *event;
+	struct radeon_bo *old_rbo;
+	u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+	u32 disp_int;
+};
+
+struct r600_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 d1grph_int;
+	u32 d2grph_int;
+};
+
+struct evergreen_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 disp_int_cont3;
+	u32 disp_int_cont4;
+	u32 disp_int_cont5;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 d3grph_int;
+	u32 d4grph_int;
+	u32 d5grph_int;
+	u32 d6grph_int;
+};
+
+union radeon_irq_stat_regs {
+	struct r500_irq_stat_regs r500;
+	struct r600_irq_stat_regs r600;
+	struct evergreen_irq_stat_regs evergreen;
+};
+
 struct radeon_irq {
 	bool		installed;
 	bool		sw_int;
 	/* FIXME: use a define max crtc rather than hardcode it */
 	bool		crtc_vblank_int[6];
+	bool		pflip[6];
 	wait_queue_head_t	vblank_queue;
 	/* FIXME: use defines for max hpd/dacs */
 	bool            hpd[6];
@@ -392,12 +438,17 @@
 	bool		hdmi[2];
 	spinlock_t sw_lock;
 	int sw_refcount;
+	union radeon_irq_stat_regs stat_regs;
+	spinlock_t pflip_lock[6];
+	int pflip_refcount[6];
 };
 
 int radeon_irq_kms_init(struct radeon_device *rdev);
 void radeon_irq_kms_fini(struct radeon_device *rdev);
 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
 
 /*
  * CP & ring.
@@ -687,6 +738,8 @@
 	THERMAL_TYPE_RV6XX,
 	THERMAL_TYPE_RV770,
 	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
 };
 
 struct radeon_voltage {
@@ -770,6 +823,9 @@
 	u32                     current_sclk;
 	u32                     current_mclk;
 	u32                     current_vddc;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	u32                     default_vddc;
 	struct radeon_i2c_chan *i2c_bus;
 	/* selected pm method */
 	enum radeon_pm_method     pm_method;
@@ -881,6 +937,10 @@
 	void (*pm_finish)(struct radeon_device *rdev);
 	void (*pm_init_profile)(struct radeon_device *rdev);
 	void (*pm_get_dynpm_state)(struct radeon_device *rdev);
+	/* pageflipping */
+	void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+	u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+	void (*post_page_flip)(struct radeon_device *rdev, int crtc);
 };
 
 /*
@@ -975,6 +1035,7 @@
 	unsigned tiling_npipes;
 	unsigned tiling_group_size;
 	unsigned tile_config;
+	struct r100_gpu_lockup	lockup;
 };
 
 union radeon_asic_config {
@@ -1091,11 +1152,11 @@
 	const struct firmware *me_fw;	/* all family ME firmware */
 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
 	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
+	const struct firmware *mc_fw;	/* NI MC firmware */
 	struct r600_blit r600_blit;
 	struct r700_vram_scratch vram_scratch;
 	int msi_enabled; /* msi enabled */
 	struct r600_ih ih; /* r6/700 interrupt ring */
-	struct workqueue_struct *wq;
 	struct work_struct hotplug_work;
 	int num_crtc; /* number of crtcs */
 	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -1110,10 +1171,10 @@
 	uint8_t			audio_status_bits;
 	uint8_t			audio_category_code;
 
-	bool powered_down;
 	struct notifier_block acpi_nb;
-	/* only one userspace can use Hyperz features at a time */
+	/* only one userspace can use Hyperz features or CMASK at a time */
 	struct drm_file *hyperz_filp;
+	struct drm_file *cmask_filp;
 	/* i2c buses */
 	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
 };
@@ -1188,6 +1249,8 @@
  */
 #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
 #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
 #define RREG32(reg) r100_mm_rreg(rdev, (reg))
 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
 #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
@@ -1261,6 +1324,14 @@
 		(rdev->family == CHIP_RV410) ||			\
 		(rdev->family == CHIP_RS400) ||			\
 		(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+		(rdev->ddev->pdev->device == 0x9443) || \
+		(rdev->ddev->pdev->device == 0x944B) || \
+		(rdev->ddev->pdev->device == 0x9506) || \
+		(rdev->ddev->pdev->device == 0x9509) || \
+		(rdev->ddev->pdev->device == 0x950F) || \
+		(rdev->ddev->pdev->device == 0x689C) || \
+		(rdev->ddev->pdev->device == 0x689D))
 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
 #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
 			    (rdev->family == CHIP_RS690)  ||	\
@@ -1269,6 +1340,9 @@
 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
 #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+			     (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
 
 /*
  * BIOS helpers.
@@ -1344,6 +1418,9 @@
 #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
 
 /* Common functions */
 /* AGP */
@@ -1372,67 +1449,7 @@
 extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 
-/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-
-/* rv200,rv250,rv280 */
-extern void r200_set_safe_registers(struct radeon_device *rdev);
-
-/* r300,r350,rv350,rv370,rv380 */
-extern void r300_set_reg_safe(struct radeon_device *rdev);
-extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_mc_init(struct radeon_device *rdev);
-extern void r300_clock_startup(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
-extern int rv370_pcie_gart_init(struct radeon_device *rdev);
-extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
-extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
-extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-
-/* r420,r423,rv410 */
-extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
-extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
-extern void r420_pipes_init(struct radeon_device *rdev);
-
-/* rv515 */
-struct rv515_mc_save {
-	u32 d1vga_control;
-	u32 d2vga_control;
-	u32 vga_render_control;
-	u32 vga_hdp_control;
-	u32 d1crtc_control;
-	u32 d2crtc_control;
-};
-extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
-extern void rv515_vga_render_disable(struct radeon_device *rdev);
-extern void rv515_set_safe_registers(struct radeon_device *rdev);
-extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_clock_startup(struct radeon_device *rdev);
-extern void rv515_debugfs(struct radeon_device *rdev);
-extern int rv515_suspend(struct radeon_device *rdev);
-
-/* rs400 */
-extern int rs400_gart_init(struct radeon_device *rdev);
-extern int rs400_gart_enable(struct radeon_device *rdev);
-extern void rs400_gart_adjust_size(struct radeon_device *rdev);
-extern void rs400_gart_disable(struct radeon_device *rdev);
-extern void rs400_gart_fini(struct radeon_device *rdev);
-
-/* rs600 */
-extern void rs600_set_safe_registers(struct radeon_device *rdev);
-extern int rs600_irq_set(struct radeon_device *rdev);
-extern void rs600_irq_disable(struct radeon_device *rdev);
-
-/* rs690, rs740 */
-extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
-					struct drm_display_mode *mode1,
-					struct drm_display_mode *mode2);
-
 /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern bool r600_card_posted(struct radeon_device *rdev);
 extern void r600_cp_stop(struct radeon_device *rdev);
 extern int r600_cp_start(struct radeon_device *rdev);
@@ -1478,6 +1495,7 @@
 extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
 extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
 
+extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern void r700_cp_stop(struct radeon_device *rdev);
 extern void r700_cp_fini(struct radeon_device *rdev);
 extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -1485,6 +1503,9 @@
 extern int evergreen_blit_init(struct radeon_device *rdev);
 extern void evergreen_blit_fini(struct radeon_device *rdev);
 
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int btc_mc_load_microcode(struct radeon_device *rdev);
+
 /* radeon_acpi.c */ 
 #if defined(CONFIG_ACPI) 
 extern int radeon_acpi_init(struct radeon_device *rdev); 
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 64fb89e..3a1b161 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@
 		rdev->mc_rreg = &rs600_mc_rreg;
 		rdev->mc_wreg = &rs600_mc_wreg;
 	}
-	if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+	if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
 		rdev->pciep_rreg = &r600_pciep_rreg;
 		rdev->pciep_wreg = &r600_pciep_wreg;
 	}
@@ -171,6 +171,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r200_asic = {
@@ -215,6 +218,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r300_asic = {
@@ -260,6 +266,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r300_asic_pcie = {
@@ -304,6 +313,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r420_asic = {
@@ -349,6 +361,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic rs400_asic = {
@@ -394,6 +409,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic rs600_asic = {
@@ -439,6 +457,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rs690_asic = {
@@ -484,6 +505,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rv515_asic = {
@@ -529,6 +553,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic r520_asic = {
@@ -574,6 +601,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic r600_asic = {
@@ -601,8 +631,8 @@
 	.set_engine_clock = &radeon_atom_set_engine_clock,
 	.get_memory_clock = &radeon_atom_get_memory_clock,
 	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = NULL,
+	.get_pcie_lanes = &r600_get_pcie_lanes,
+	.set_pcie_lanes = &r600_set_pcie_lanes,
 	.set_clock_gating = NULL,
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
@@ -618,6 +648,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r600_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rs780_asic = {
@@ -662,6 +695,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &rs780_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rv770_asic = {
@@ -689,8 +725,8 @@
 	.set_engine_clock = &radeon_atom_set_engine_clock,
 	.get_memory_clock = &radeon_atom_get_memory_clock,
 	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = NULL,
+	.get_pcie_lanes = &r600_get_pcie_lanes,
+	.set_pcie_lanes = &r600_set_pcie_lanes,
 	.set_clock_gating = &radeon_atom_set_clock_gating,
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
@@ -706,6 +742,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r600_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rv770_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic evergreen_asic = {
@@ -733,6 +772,95 @@
 	.set_engine_clock = &radeon_atom_set_engine_clock,
 	.get_memory_clock = &radeon_atom_get_memory_clock,
 	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = &r600_get_pcie_lanes,
+	.set_pcie_lanes = &r600_set_pcie_lanes,
+	.set_clock_gating = NULL,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &evergreen_bandwidth_update,
+	.hpd_init = &evergreen_hpd_init,
+	.hpd_fini = &evergreen_hpd_fini,
+	.hpd_sense = &evergreen_hpd_sense,
+	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &evergreen_pm_misc,
+	.pm_prepare = &evergreen_pm_prepare,
+	.pm_finish = &evergreen_pm_finish,
+	.pm_init_profile = &r600_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &evergreen_pre_page_flip,
+	.page_flip = &evergreen_page_flip,
+	.post_page_flip = &evergreen_post_page_flip,
+};
+
+static struct radeon_asic sumo_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.cp_commit = &r600_cp_commit,
+	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &evergreen_irq_set,
+	.irq_process = &evergreen_irq_process,
+	.get_vblank_counter = &evergreen_get_vblank_counter,
+	.fence_ring_emit = &r600_fence_ring_emit,
+	.cs_parse = &evergreen_cs_parse,
+	.copy_blit = &evergreen_copy_blit,
+	.copy_dma = &evergreen_copy_blit,
+	.copy = &evergreen_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = NULL,
+	.set_memory_clock = NULL,
+	.get_pcie_lanes = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = NULL,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &evergreen_bandwidth_update,
+	.hpd_init = &evergreen_hpd_init,
+	.hpd_fini = &evergreen_hpd_fini,
+	.hpd_sense = &evergreen_hpd_sense,
+	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &evergreen_pm_misc,
+	.pm_prepare = &evergreen_pm_prepare,
+	.pm_finish = &evergreen_pm_finish,
+	.pm_init_profile = &rs780_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic btc_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.cp_commit = &r600_cp_commit,
+	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &evergreen_irq_set,
+	.irq_process = &evergreen_irq_process,
+	.get_vblank_counter = &evergreen_get_vblank_counter,
+	.fence_ring_emit = &r600_fence_ring_emit,
+	.cs_parse = &evergreen_cs_parse,
+	.copy_blit = &evergreen_copy_blit,
+	.copy_dma = &evergreen_copy_blit,
+	.copy = &evergreen_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
 	.get_pcie_lanes = NULL,
 	.set_pcie_lanes = NULL,
 	.set_clock_gating = NULL,
@@ -749,6 +877,9 @@
 	.pm_finish = &evergreen_pm_finish,
 	.pm_init_profile = &r600_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &evergreen_pre_page_flip,
+	.page_flip = &evergreen_page_flip,
+	.post_page_flip = &evergreen_post_page_flip,
 };
 
 int radeon_asic_init(struct radeon_device *rdev)
@@ -835,6 +966,14 @@
 	case CHIP_HEMLOCK:
 		rdev->asic = &evergreen_asic;
 		break;
+	case CHIP_PALM:
+		rdev->asic = &sumo_asic;
+		break;
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+		rdev->asic = &btc_asic;
+		break;
 	default:
 		/* FIXME: not supported yet */
 		return -EINVAL;
@@ -849,7 +988,9 @@
 	if (rdev->flags & RADEON_SINGLE_CRTC)
 		rdev->num_crtc = 1;
 	else {
-		if (ASIC_IS_DCE4(rdev))
+		if (ASIC_IS_DCE41(rdev))
+			rdev->num_crtc = 2;
+		else if (ASIC_IS_DCE4(rdev))
 			rdev->num_crtc = 6;
 		else
 			rdev->num_crtc = 2;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 7409882..e01f077 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -102,6 +102,11 @@
 void r100_pci_gart_disable(struct radeon_device *rdev);
 int r100_debugfs_mc_info_init(struct radeon_device *rdev);
 int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
+			    struct radeon_cp *cp);
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
+			   struct r100_gpu_lockup *lockup,
+			   struct radeon_cp *cp);
 void r100_ib_fini(struct radeon_device *rdev);
 int r100_ib_init(struct radeon_device *rdev);
 void r100_irq_disable(struct radeon_device *rdev);
@@ -130,15 +135,19 @@
 extern void r100_pm_finish(struct radeon_device *rdev);
 extern void r100_pm_init_profile(struct radeon_device *rdev);
 extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
 
 /*
  * r200,rv250,rs300,rv280
  */
 extern int r200_copy_dma(struct radeon_device *rdev,
-			uint64_t src_offset,
-			uint64_t dst_offset,
-			unsigned num_pages,
+			 uint64_t src_offset,
+			 uint64_t dst_offset,
+			 unsigned num_pages,
 			 struct radeon_fence *fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
 
 /*
  * r300,r350,rv350,rv380
@@ -159,6 +168,15 @@
 extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
 
 /*
  * r420,r423,rv410
@@ -168,6 +186,10 @@
 extern int r420_suspend(struct radeon_device *rdev);
 extern int r420_resume(struct radeon_device *rdev);
 extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
 
 /*
  * rs400,rs480
@@ -180,6 +202,12 @@
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+
 
 /*
  * rs600.
@@ -191,6 +219,7 @@
 extern int rs600_resume(struct radeon_device *rdev);
 int rs600_irq_set(struct radeon_device *rdev);
 int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void rs600_gart_tlb_flush(struct radeon_device *rdev);
 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -205,6 +234,11 @@
 extern void rs600_pm_misc(struct radeon_device *rdev);
 extern void rs600_pm_prepare(struct radeon_device *rdev);
 extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+
 
 /*
  * rs690,rs740
@@ -216,10 +250,21 @@
 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+					struct drm_display_mode *mode1,
+					struct drm_display_mode *mode2);
 
 /*
  * rv515
  */
+struct rv515_mc_save {
+	u32 d1vga_control;
+	u32 d2vga_control;
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	u32 d1crtc_control;
+	u32 d2crtc_control;
+};
 int rv515_init(struct radeon_device *rdev);
 void rv515_fini(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -230,6 +275,14 @@
 void rv515_bandwidth_update(struct radeon_device *rdev);
 int rv515_resume(struct radeon_device *rdev);
 int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+
 
 /*
  * r520,rv530,rv560,rv570,r580
@@ -278,6 +331,8 @@
 extern void r600_pm_init_profile(struct radeon_device *rdev);
 extern void rs780_pm_init_profile(struct radeon_device *rdev);
 extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
 
 /*
  * rv770,rv730,rv710,rv740
@@ -287,6 +342,7 @@
 int rv770_suspend(struct radeon_device *rdev);
 int rv770_resume(struct radeon_device *rdev);
 extern void rv770_pm_misc(struct radeon_device *rdev);
+extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
 
 /*
  * evergreen
@@ -314,5 +370,8 @@
 extern void evergreen_pm_misc(struct radeon_device *rdev);
 extern void evergreen_pm_prepare(struct radeon_device *rdev);
 extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bc5a2c3..1573202 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -37,7 +37,7 @@
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 extern void
 radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
-			uint32_t supported_device);
+			uint32_t supported_device, u16 caps);
 
 /* from radeon_connector.c */
 extern void
@@ -313,7 +313,6 @@
 				     uint16_t *line_mux,
 				     struct radeon_hpd *hpd)
 {
-	struct radeon_device *rdev = dev->dev_private;
 
 	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
 	if ((dev->pdev->device == 0x791e) &&
@@ -388,6 +387,17 @@
 			*line_mux = 0x90;
 	}
 
+	/* mac rv630 */
+	if ((dev->pdev->device == 0x9588) &&
+	    (dev->pdev->subsystem_vendor == 0x106b) &&
+	    (dev->pdev->subsystem_device == 0x00a6)) {
+		if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+		    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+			*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+			*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+		}
+	}
+
 	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
 	if ((dev->pdev->device == 0x9598) &&
 	    (dev->pdev->subsystem_vendor == 0x1043) &&
@@ -425,21 +435,23 @@
 		}
 	}
 
-	/* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
+	/* Acer laptop (Acer TravelMate 5730G) has an HDMI port
+	 * on the laptop and a DVI port on the docking station and
+	 * both share the same encoder, hpd pin, and ddc line.
+	 * So while the bios table is technically correct,
+	 * we drop the DVI port here since xrandr has no concept of
+	 * encoders and will try and drive both connectors
+	 * with different crtcs which isn't possible on the hardware
+	 * side and leaves no crtcs for LVDS or VGA.
+	 */
 	if ((dev->pdev->device == 0x95c4) &&
 	    (dev->pdev->subsystem_vendor == 0x1025) &&
 	    (dev->pdev->subsystem_device == 0x013c)) {
-		struct radeon_gpio_rec gpio;
-
 		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
 		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
-			gpio = radeon_lookup_gpio(rdev, 6);
-			*hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+			/* actually it's a DVI-D port not DVI-I */
 			*connector_type = DRM_MODE_CONNECTOR_DVID;
-		} else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
-			   (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
-			gpio = radeon_lookup_gpio(rdev, 7);
-			*hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+			return false;
 		}
 	}
 
@@ -525,6 +537,7 @@
 	u16 size, data_offset;
 	u8 frev, crev;
 	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+	ATOM_ENCODER_OBJECT_TABLE *enc_obj;
 	ATOM_OBJECT_TABLE *router_obj;
 	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
 	ATOM_OBJECT_HEADER *obj_header;
@@ -549,6 +562,9 @@
 	con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
 	    (ctx->bios + data_offset +
 	     le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+	enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usEncoderObjectTableOffset));
 	router_obj = (ATOM_OBJECT_TABLE *)
 		(ctx->bios + data_offset +
 		 le16_to_cpu(obj_header->usRouterObjectTableOffset));
@@ -654,14 +670,35 @@
 				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
 				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
-					u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
+					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+							ATOM_ENCODER_CAP_RECORD *cap_record;
+							u16 caps = 0;
 
-					radeon_add_atom_encoder(dev,
-								encoder_obj,
-								le16_to_cpu
-								(path->
-								 usDeviceTag));
-
+							while (record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_ENCODER_CAP_RECORD_TYPE:
+									cap_record =(ATOM_ENCODER_CAP_RECORD *)
+										record;
+									caps = le16_to_cpu(cap_record->usEncoderCap);
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+							radeon_add_atom_encoder(dev,
+										encoder_obj,
+										le16_to_cpu
+										(path->
+										 usDeviceTag),
+										caps);
+						}
+					}
 				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
 					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
 						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
@@ -995,7 +1032,8 @@
 						radeon_get_encoder_enum(dev,
 								      (1 << i),
 								      dac),
-						(1 << i));
+						(1 << i),
+						0);
 		else
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_enum(dev,
@@ -1074,6 +1112,7 @@
 	ATOM_FIRMWARE_INFO_V1_3 info_13;
 	ATOM_FIRMWARE_INFO_V1_4 info_14;
 	ATOM_FIRMWARE_INFO_V2_1 info_21;
+	ATOM_FIRMWARE_INFO_V2_2 info_22;
 };
 
 bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -1148,8 +1187,12 @@
 		*p2pll = *p1pll;
 
 		/* system clock */
-		spll->reference_freq =
-		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		if (ASIC_IS_DCE4(rdev))
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+		else
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
 		spll->reference_div = 0;
 
 		spll->pll_out_min =
@@ -1171,8 +1214,12 @@
 		    le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
 
 		/* memory clock */
-		mpll->reference_freq =
-		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		if (ASIC_IS_DCE4(rdev))
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+		else
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
 		mpll->reference_div = 0;
 
 		mpll->pll_out_min =
@@ -1201,8 +1248,12 @@
 		if (ASIC_IS_DCE4(rdev)) {
 			rdev->clock.default_dispclk =
 				le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
-			if (rdev->clock.default_dispclk == 0)
-				rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+			if (rdev->clock.default_dispclk == 0) {
+				if (ASIC_IS_DCE5(rdev))
+					rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+				else
+					rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+			}
 			rdev->clock.dp_extclk =
 				le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
 		}
@@ -1337,6 +1388,43 @@
 	return false;
 }
 
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+						 struct radeon_atom_ss *ss,
+						 int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	u16 data_offset, size;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+	u8 frev, crev;
+	u16 percentage = 0, rate = 0;
+
+	/* get any igp specific overrides */
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+			(mode_info->atom_context->bios + data_offset);
+		switch (id) {
+		case ASIC_INTERNAL_SS_ON_TMDS:
+			percentage = le16_to_cpu(igp_info->usDVISSPercentage);
+			rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+			break;
+		case ASIC_INTERNAL_SS_ON_HDMI:
+			percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
+			rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+			break;
+		case ASIC_INTERNAL_SS_ON_LVDS:
+			percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
+			rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+			break;
+		}
+		if (percentage)
+			ss->percentage = percentage;
+		if (rate)
+			ss->rate = rate;
+	}
+}
+
 union asic_ss_info {
 	struct _ATOM_ASIC_INTERNAL_SS_INFO info;
 	struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
@@ -1401,6 +1489,8 @@
 						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
 					ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+					if (rdev->flags & RADEON_IS_IGP)
+						radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
 					return true;
 				}
 			}
@@ -1477,6 +1567,9 @@
 		if (misc & ATOM_DOUBLE_CLOCK_MODE)
 			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
 
+		lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
+		lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+
 		/* set crtc values */
 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
 
@@ -1489,6 +1582,59 @@
 		else
 			lvds->linkb = false;
 
+		/* parse the lcd record table */
+		if (lvds_info->info.usModePatchTableOffset) {
+			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+			bool bad_record = false;
+			u8 *record = (u8 *)(mode_info->atom_context->bios +
+					    data_offset +
+					    lvds_info->info.usModePatchTableOffset);
+			while (*record != ATOM_RECORD_END_TYPE) {
+				switch (*record) {
+				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+					record += sizeof(ATOM_PATCH_RECORD_MODE);
+					break;
+				case LCD_RTS_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_RTS_RECORD);
+					break;
+				case LCD_CAP_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+					break;
+				case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+					fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+					if (fake_edid_record->ucFakeEDIDLength) {
+						struct edid *edid;
+						int edid_size =
+							max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+						edid = kmalloc(edid_size, GFP_KERNEL);
+						if (edid) {
+							memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+							       fake_edid_record->ucFakeEDIDLength);
+
+							if (drm_edid_is_valid(edid))
+								rdev->mode_info.bios_hardcoded_edid = edid;
+							else
+								kfree(edid);
+						}
+					}
+					record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+					break;
+				case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+					panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+					lvds->native_mode.width_mm = panel_res_record->usHSize;
+					lvds->native_mode.height_mm = panel_res_record->usVSize;
+					record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+					break;
+				default:
+					DRM_ERROR("Bad LCD record %d\n", *record);
+					bad_record = true;
+					break;
+				}
+				if (bad_record)
+					break;
+			}
+		}
 	}
 	return lvds;
 }
@@ -1740,495 +1886,613 @@
 	"RV6xx",
 	"RV770",
 	"adt7473",
+	"NONE",
 	"External GPIO",
 	"Evergreen",
-	"adt7473 with internal",
-
+	"emc2103",
+	"Sumo",
+	"Northern Islands",
 };
 
 union power_info {
 	struct _ATOM_POWERPLAY_INFO info;
 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
-	struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
 };
 
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+						 int state_index,
+						 u32 misc, u32 misc2)
+{
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	/* order matters! */
+	if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_POWERSAVE;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	}
+	if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[0];
+	} else if (state_index == 0) {
+		rdev->pm.power_state[state_index].clock_info[0].flags |=
+			RADEON_PM_MODE_NO_DISPLAY;
+	}
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	u32 misc, misc2 = 0;
+	int num_modes = 0, i;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	/* add the i2c bus for thermal/fan chip */
+	if (power_info->info.ucOverdriveThermalController > 0) {
+		DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+			 thermal_controller_names[power_info->info.ucOverdriveThermalController],
+			 power_info->info.ucOverdriveControllerAddress >> 1);
+		i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+		rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		if (rdev->pm.i2c_bus) {
+			struct i2c_board_info info = { };
+			const char *name = thermal_controller_names[power_info->info.
+								    ucOverdriveThermalController];
+			info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+			strlcpy(info.type, name, sizeof(info.type));
+			i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+		}
+	}
+	num_modes = power_info->info.ucNumOfPowerModeEntries;
+	if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+		num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+	/* last mode is usually default, array is low to high */
+	for (i = 0; i < num_modes; i++) {
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+		switch (frev) {
+		case 1:
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+			state_index++;
+			break;
+		case 2:
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		case 3:
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+				if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+						true;
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+						power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+				}
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		}
+	}
+	/* last mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[state_index - 1].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index - 1;
+		rdev->pm.power_state[state_index - 1].default_clock_mode =
+			&rdev->pm.power_state[state_index - 1].clock_info[0];
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+		rdev->pm.power_state[state_index].misc = 0;
+		rdev->pm.power_state[state_index].misc2 = 0;
+	}
+	return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+							 ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* add the i2c bus for thermal/fan chip */
+	if (controller->ucType > 0) {
+		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+		} else if ((controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+			DRM_INFO("Special thermal controller config\n");
+		} else {
+			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+				 pp_lib_thermal_controller_names[controller->ucType],
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = pp_lib_thermal_controller_names[controller->ucType];
+				info.addr = controller->ucI2cAddress >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+			}
+		}
+	}
+}
+
+static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	u8 frev, crev;
+	u16 data_offset;
+	union firmware_info *firmware_info;
+	u16 vddc = 0;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+	}
+
+	return vddc;
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+						       int state_index, int mode_index,
+						       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+	int j;
+	u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+	u16 vddc = radeon_atombios_get_default_vddc(rdev);
+
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	rdev->pm.power_state[state_index].pcie_lanes =
+		((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+		 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+	switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+		if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	}
+	rdev->pm.power_state[state_index].flags = 0;
+	if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+		rdev->pm.power_state[state_index].flags |=
+			RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+		if (ASIC_IS_DCE5(rdev)) {
+			/* NI chips post without MC ucode, so default clocks are strobe mode only */
+			rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+			rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+			rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+		} else {
+			/* patch the table values with the default slck/mclk from firmware info */
+			for (j = 0; j < mode_index; j++) {
+				rdev->pm.power_state[state_index].clock_info[j].mclk =
+					rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[j].sclk =
+					rdev->clock.default_sclk;
+				if (vddc)
+					rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+						vddc;
+			}
+		}
+	}
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+						   int state_index, int mode_index,
+						   union pplib_clock_info *clock_info)
+{
+	u32 sclk, mclk;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family >= CHIP_PALM) {
+			sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+			sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		} else {
+			sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+			sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		}
+	} else if (ASIC_IS_DCE4(rdev)) {
+		sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+		sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+		mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			clock_info->evergreen.usVDDC;
+	} else {
+		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+		sclk |= clock_info->r600.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+		mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			clock_info->r600.usVDDC;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* skip invalid modes */
+		if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+			return false;
+	} else {
+		/* skip invalid modes */
+		if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+		    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+			return false;
+	}
+	return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	/* first mode is usually default, followed by low to high */
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+			clock_info = (union pplib_clock_info *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+				 (power_state->v1.ucClockStateIndices[j] *
+				  power_info->pplib.ucClockInfoSize));
+			valid = radeon_atombios_parse_pplib_clock_info(rdev,
+								       state_index, mode_index,
+								       clock_info);
+			if (valid)
+				mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, non_clock_array_index, clock_array_index;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	struct StateArray *state_array;
+	struct ClockInfoArray *clock_info_array;
+	struct NonClockInfoArray *non_clock_info_array;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	state_array = (struct StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 power_info->pplib.usStateArrayOffset);
+	clock_info_array = (struct ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 power_info->pplib.usClockInfoArrayOffset);
+	non_clock_info_array = (struct NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 power_info->pplib.usNonClockInfoArrayOffset);
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)&state_array->states[i];
+		/* XXX this might be an inagua bug... */
+		non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = power_state->v2.clockInfoIndex[j];
+			/* XXX this might be an inagua bug... */
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			clock_info = (union pplib_clock_info *)
+				&clock_info_array->clockInfo[clock_array_index];
+			valid = radeon_atombios_parse_pplib_clock_info(rdev,
+								       state_index, mode_index,
+								       clock_info);
+			if (valid)
+				mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
 void radeon_atombios_get_power_modes(struct radeon_device *rdev)
 {
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 	u16 data_offset;
 	u8 frev, crev;
-	u32 misc, misc2 = 0, sclk, mclk;
-	union power_info *power_info;
-	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
-	struct _ATOM_PPLIB_STATE *power_state;
-	int num_modes = 0, i, j;
-	int state_index = 0, mode_index = 0;
-	struct radeon_i2c_bus_rec i2c_bus;
+	int state_index = 0;
 
 	rdev->pm.default_power_state_index = -1;
 
 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
 				   &frev, &crev, &data_offset)) {
-		power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-		if (frev < 4) {
-			/* add the i2c bus for thermal/fan chip */
-			if (power_info->info.ucOverdriveThermalController > 0) {
-				DRM_INFO("Possible %s thermal controller at 0x%02x\n",
-					 thermal_controller_names[power_info->info.ucOverdriveThermalController],
-					 power_info->info.ucOverdriveControllerAddress >> 1);
-				i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
-				rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
-				if (rdev->pm.i2c_bus) {
-					struct i2c_board_info info = { };
-					const char *name = thermal_controller_names[power_info->info.
-										    ucOverdriveThermalController];
-					info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
-					strlcpy(info.type, name, sizeof(info.type));
-					i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
-				}
-			}
-			num_modes = power_info->info.ucNumOfPowerModeEntries;
-			if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
-				num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
-			/* last mode is usually default, array is low to high */
-			for (i = 0; i < num_modes; i++) {
-				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-				switch (frev) {
-				case 1:
-					rdev->pm.power_state[state_index].num_clock_modes = 1;
-					rdev->pm.power_state[state_index].clock_info[0].mclk =
-						le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
-					rdev->pm.power_state[state_index].clock_info[0].sclk =
-						le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
-					/* skip invalid modes */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
-						continue;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
-					misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
-					if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
-					    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_GPIO;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-							radeon_lookup_gpio(rdev,
-							power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
-						if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								true;
-						else
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								false;
-					} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_VDDC;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
-							power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
-					}
-					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					rdev->pm.power_state[state_index].misc = misc;
-					/* order matters! */
-					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_POWERSAVE;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					}
-					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[0];
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					} else if (state_index == 0) {
-						rdev->pm.power_state[state_index].clock_info[0].flags |=
-							RADEON_PM_MODE_NO_DISPLAY;
-					}
-					state_index++;
-					break;
-				case 2:
-					rdev->pm.power_state[state_index].num_clock_modes = 1;
-					rdev->pm.power_state[state_index].clock_info[0].mclk =
-						le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
-					rdev->pm.power_state[state_index].clock_info[0].sclk =
-						le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
-					/* skip invalid modes */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
-						continue;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
-					misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
-					misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
-					if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
-					    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_GPIO;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-							radeon_lookup_gpio(rdev,
-							power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
-						if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								true;
-						else
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								false;
-					} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_VDDC;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
-							power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
-					}
-					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					rdev->pm.power_state[state_index].misc = misc;
-					rdev->pm.power_state[state_index].misc2 = misc2;
-					/* order matters! */
-					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_POWERSAVE;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					}
-					if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[0];
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					} else if (state_index == 0) {
-						rdev->pm.power_state[state_index].clock_info[0].flags |=
-							RADEON_PM_MODE_NO_DISPLAY;
-					}
-					state_index++;
-					break;
-				case 3:
-					rdev->pm.power_state[state_index].num_clock_modes = 1;
-					rdev->pm.power_state[state_index].clock_info[0].mclk =
-						le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
-					rdev->pm.power_state[state_index].clock_info[0].sclk =
-						le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
-					/* skip invalid modes */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
-						continue;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
-					misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
-					misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
-					if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
-					    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_GPIO;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-							radeon_lookup_gpio(rdev,
-							power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
-						if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								true;
-						else
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								false;
-					} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_VDDC;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
-							power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
-						if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
-							rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
-								true;
-							rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
-							power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
-						}
-					}
-					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					rdev->pm.power_state[state_index].misc = misc;
-					rdev->pm.power_state[state_index].misc2 = misc2;
-					/* order matters! */
-					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_POWERSAVE;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					}
-					if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[0];
-					} else if (state_index == 0) {
-						rdev->pm.power_state[state_index].clock_info[0].flags |=
-							RADEON_PM_MODE_NO_DISPLAY;
-					}
-					state_index++;
-					break;
-				}
-			}
-			/* last mode is usually default */
-			if (rdev->pm.default_power_state_index == -1) {
-				rdev->pm.power_state[state_index - 1].type =
-					POWER_STATE_TYPE_DEFAULT;
-				rdev->pm.default_power_state_index = state_index - 1;
-				rdev->pm.power_state[state_index - 1].default_clock_mode =
-					&rdev->pm.power_state[state_index - 1].clock_info[0];
-				rdev->pm.power_state[state_index].flags &=
-					~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-				rdev->pm.power_state[state_index].misc = 0;
-				rdev->pm.power_state[state_index].misc2 = 0;
-			}
-		} else {
-			int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-			uint8_t fw_frev, fw_crev;
-			uint16_t fw_data_offset, vddc = 0;
-			union firmware_info *firmware_info;
-			ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
-
-			if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
-						   &fw_frev, &fw_crev, &fw_data_offset)) {
-				firmware_info =
-					(union firmware_info *)(mode_info->atom_context->bios +
-								fw_data_offset);
-				vddc = firmware_info->info_14.usBootUpVDDCVoltage;
-			}
-
-			/* add the i2c bus for thermal/fan chip */
-			if (controller->ucType > 0) {
-				if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
-					DRM_INFO("Internal thermal controller %s fan control\n",
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
-				} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
-					DRM_INFO("Internal thermal controller %s fan control\n",
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
-				} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
-					DRM_INFO("Internal thermal controller %s fan control\n",
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
-				} else if ((controller->ucType ==
-					    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
-					   (controller->ucType ==
-					    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
-					DRM_INFO("Special thermal controller config\n");
-				} else {
-					DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
-						 pp_lib_thermal_controller_names[controller->ucType],
-						 controller->ucI2cAddress >> 1,
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
-					rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
-					if (rdev->pm.i2c_bus) {
-						struct i2c_board_info info = { };
-						const char *name = pp_lib_thermal_controller_names[controller->ucType];
-						info.addr = controller->ucI2cAddress >> 1;
-						strlcpy(info.type, name, sizeof(info.type));
-						i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
-					}
-
-				}
-			}
-			/* first mode is usually default, followed by low to high */
-			for (i = 0; i < power_info->info_4.ucNumStates; i++) {
-				mode_index = 0;
-				power_state = (struct _ATOM_PPLIB_STATE *)
-					(mode_info->atom_context->bios +
-					 data_offset +
-					 le16_to_cpu(power_info->info_4.usStateArrayOffset) +
-					 i * power_info->info_4.ucStateEntrySize);
-				non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
-					(mode_info->atom_context->bios +
-					 data_offset +
-					 le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
-					 (power_state->ucNonClockStateIndex *
-					  power_info->info_4.ucNonClockSize));
-				for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
-					if (rdev->flags & RADEON_IS_IGP) {
-						struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
-							(struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
-							(mode_info->atom_context->bios +
-							 data_offset +
-							 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
-							 (power_state->ucClockStateIndices[j] *
-							  power_info->info_4.ucClockInfoSize));
-						sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
-						sclk |= clock_info->ucLowEngineClockHigh << 16;
-						rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
-						/* skip invalid modes */
-						if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
-							continue;
-						/* voltage works differently on IGPs */
-						mode_index++;
-					} else if (ASIC_IS_DCE4(rdev)) {
-						struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
-							(struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
-							(mode_info->atom_context->bios +
-							 data_offset +
-							 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
-							 (power_state->ucClockStateIndices[j] *
-							  power_info->info_4.ucClockInfoSize));
-						sclk = le16_to_cpu(clock_info->usEngineClockLow);
-						sclk |= clock_info->ucEngineClockHigh << 16;
-						mclk = le16_to_cpu(clock_info->usMemoryClockLow);
-						mclk |= clock_info->ucMemoryClockHigh << 16;
-						rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
-						rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
-						/* skip invalid modes */
-						if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
-						    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
-							continue;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
-							VOLTAGE_SW;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-							clock_info->usVDDC;
-						/* XXX usVDDCI */
-						mode_index++;
-					} else {
-						struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
-							(struct _ATOM_PPLIB_R600_CLOCK_INFO *)
-							(mode_info->atom_context->bios +
-							 data_offset +
-							 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
-							 (power_state->ucClockStateIndices[j] *
-							  power_info->info_4.ucClockInfoSize));
-						sclk = le16_to_cpu(clock_info->usEngineClockLow);
-						sclk |= clock_info->ucEngineClockHigh << 16;
-						mclk = le16_to_cpu(clock_info->usMemoryClockLow);
-						mclk |= clock_info->ucMemoryClockHigh << 16;
-						rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
-						rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
-						/* skip invalid modes */
-						if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
-						    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
-							continue;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
-							VOLTAGE_SW;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-							clock_info->usVDDC;
-						mode_index++;
-					}
-				}
-				rdev->pm.power_state[state_index].num_clock_modes = mode_index;
-				if (mode_index) {
-					misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
-					misc2 = le16_to_cpu(non_clock_info->usClassification);
-					rdev->pm.power_state[state_index].misc = misc;
-					rdev->pm.power_state[state_index].misc2 = misc2;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
-						ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
-					switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
-					case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-						break;
-					case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-						break;
-					case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						break;
-					case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
-						if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
-							rdev->pm.power_state[state_index].type =
-								POWER_STATE_TYPE_PERFORMANCE;
-						break;
-					}
-					rdev->pm.power_state[state_index].flags = 0;
-					if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
-						rdev->pm.power_state[state_index].flags |=
-							RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
-						/* patch the table values with the default slck/mclk from firmware info */
-						for (j = 0; j < mode_index; j++) {
-							rdev->pm.power_state[state_index].clock_info[j].mclk =
-								rdev->clock.default_mclk;
-							rdev->pm.power_state[state_index].clock_info[j].sclk =
-								rdev->clock.default_sclk;
-							if (vddc)
-								rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
-									vddc;
-						}
-					}
-					state_index++;
-				}
-			}
-			/* if multiple clock modes, mark the lowest as no display */
-			for (i = 0; i < state_index; i++) {
-				if (rdev->pm.power_state[i].num_clock_modes > 1)
-					rdev->pm.power_state[i].clock_info[0].flags |=
-						RADEON_PM_MODE_NO_DISPLAY;
-			}
-			/* first mode is usually default */
-			if (rdev->pm.default_power_state_index == -1) {
-				rdev->pm.power_state[0].type =
-					POWER_STATE_TYPE_DEFAULT;
-				rdev->pm.default_power_state_index = 0;
-				rdev->pm.power_state[0].default_clock_mode =
-					&rdev->pm.power_state[0].clock_info[0];
-			}
+		switch (frev) {
+		case 1:
+		case 2:
+		case 3:
+			state_index = radeon_atombios_parse_power_table_1_3(rdev);
+			break;
+		case 4:
+		case 5:
+			state_index = radeon_atombios_parse_power_table_4_5(rdev);
+			break;
+		case 6:
+			state_index = radeon_atombios_parse_power_table_6(rdev);
+			break;
+		default:
+			break;
 		}
 	} else {
 		/* add the default mode */
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8f2c7b5..1aba85c 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -131,6 +131,45 @@
 	return true;
 }
 
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control;
+	u32 d2vga_control;
+	u32 vga_render_control;
+	u32 rom_cntl;
+	bool r;
+
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
 static bool r700_read_disabled_bios(struct radeon_device *rdev)
 {
 	uint32_t viph_control;
@@ -416,6 +455,8 @@
 {
 	if (rdev->flags & RADEON_IS_IGP)
 		return igp_read_bios_from_vram(rdev);
+	else if (rdev->family >= CHIP_BARTS)
+		return ni_read_disabled_bios(rdev);
 	else if (rdev->family >= CHIP_RV770)
 		return r700_read_disabled_bios(rdev);
 	else if (rdev->family >= CHIP_R600)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 137b807..591fcae 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -471,8 +471,9 @@
 	return true;
 }
 
+/* this is used for atom LCDs as well */
 struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
 {
 	if (rdev->mode_info.bios_hardcoded_edid)
 		return rdev->mode_info.bios_hardcoded_edid;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8afaf7a..22b7e3d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -472,6 +472,9 @@
 	if (mode) {
 		ret = 1;
 		drm_mode_probed_add(connector, mode);
+		/* add the width/height from vbios tables if available */
+		connector->display_info.width_mm = mode->width_mm;
+		connector->display_info.height_mm = mode->height_mm;
 		/* add scaled modes */
 		radeon_add_common_modes(encoder, connector);
 	}
@@ -1216,7 +1219,7 @@
 		if (ASIC_IS_AVIVO(rdev)) {
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_property,
-						      UNDERSCAN_AUTO);
+						      UNDERSCAN_OFF);
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_hborder_property,
 						      0);
@@ -1256,7 +1259,7 @@
 		if (ASIC_IS_AVIVO(rdev)) {
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_property,
-						      UNDERSCAN_AUTO);
+						      UNDERSCAN_OFF);
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_hborder_property,
 						      0);
@@ -1299,7 +1302,7 @@
 		if (ASIC_IS_AVIVO(rdev)) {
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_property,
-						      UNDERSCAN_AUTO);
+						      UNDERSCAN_OFF);
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_hborder_property,
 						      0);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6d64a27..35b5eb8 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,13 +77,13 @@
 			p->relocs_ptr[i] = &p->relocs[i];
 			p->relocs[i].robj = p->relocs[i].gobj->driver_private;
 			p->relocs[i].lobj.bo = p->relocs[i].robj;
-			p->relocs[i].lobj.rdomain = r->read_domains;
 			p->relocs[i].lobj.wdomain = r->write_domain;
+			p->relocs[i].lobj.rdomain = r->read_domains;
+			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
 			p->relocs[i].handle = r->handle;
 			p->relocs[i].flags = r->flags;
-			INIT_LIST_HEAD(&p->relocs[i].lobj.list);
 			radeon_bo_list_add_object(&p->relocs[i].lobj,
-						&p->validated);
+						  &p->validated);
 		}
 	}
 	return radeon_bo_list_validate(&p->validated);
@@ -189,10 +189,13 @@
 {
 	unsigned i;
 
-	if (!error && parser->ib) {
-		radeon_bo_list_fence(&parser->validated, parser->ib->fence);
-	}
-	radeon_bo_list_unreserve(&parser->validated);
+
+	if (!error && parser->ib)
+		ttm_eu_fence_buffer_objects(&parser->validated,
+					    parser->ib->fence);
+	else
+		ttm_eu_backoff_reservation(&parser->validated);
+
 	if (parser->relocs != NULL) {
 		for (i = 0; i < parser->nrelocs; i++) {
 			if (parser->relocs[i].gobj)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 501966a..26091d6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -81,6 +81,10 @@
 	"JUNIPER",
 	"CYPRESS",
 	"HEMLOCK",
+	"PALM",
+	"BARTS",
+	"TURKS",
+	"CAICOS",
 	"LAST",
 };
 
@@ -224,6 +228,11 @@
 				rdev->wb.use_event = true;
 		}
 	}
+	/* always use writeback/events on NI */
+	if (ASIC_IS_DCE5(rdev)) {
+		rdev->wb.enabled = true;
+		rdev->wb.use_event = true;
+	}
 
 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
 
@@ -335,7 +344,12 @@
 	uint32_t reg;
 
 	/* first check CRTCs */
-	if (ASIC_IS_DCE4(rdev)) {
+	if (ASIC_IS_DCE41(rdev)) {
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+		if (reg & EVERGREEN_CRTC_MASTER_EN)
+			return true;
+	} else if (ASIC_IS_DCE4(rdev)) {
 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
@@ -636,20 +650,20 @@
 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct radeon_device *rdev = dev->dev_private;
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_INFO "radeon: switched on\n");
 		/* don't suspend or resume card normally */
-		rdev->powered_down = false;
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		radeon_resume_kms(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 		drm_kms_helper_poll_enable(dev);
 	} else {
 		printk(KERN_INFO "radeon: switched off\n");
 		drm_kms_helper_poll_disable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		radeon_suspend_kms(dev, pmm);
-		/* don't suspend or resume card normally */
-		rdev->powered_down = true;
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
 
@@ -704,11 +718,6 @@
 	init_waitqueue_head(&rdev->irq.vblank_queue);
 	init_waitqueue_head(&rdev->irq.idle_queue);
 
-	/* setup workqueue */
-	rdev->wq = create_workqueue("radeon");
-	if (rdev->wq == NULL)
-		return -ENOMEM;
-
 	/* Set asic functions */
 	r = radeon_asic_init(rdev);
 	if (r)
@@ -773,6 +782,7 @@
 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 	vga_switcheroo_register_client(rdev->pdev,
 				       radeon_switcheroo_set_state,
+				       NULL,
 				       radeon_switcheroo_can_switch);
 
 	r = radeon_init(rdev);
@@ -806,7 +816,6 @@
 	/* evict vram memory */
 	radeon_bo_evict_vram(rdev);
 	radeon_fini(rdev);
-	destroy_workqueue(rdev->wq);
 	vga_switcheroo_unregister_client(rdev->pdev);
 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
 	if (rdev->rio_mem)
@@ -835,7 +844,7 @@
 	}
 	rdev = dev->dev_private;
 
-	if (rdev->powered_down)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
 	/* turn off display hw */
@@ -893,7 +902,7 @@
 	struct drm_connector *connector;
 	struct radeon_device *rdev = dev->dev_private;
 
-	if (rdev->powered_down)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
 	acquire_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1df4dc6..d26dabf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,7 +68,7 @@
 	WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
 }
 
-static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -98,6 +98,66 @@
 	}
 }
 
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+	WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+	       NI_GRPH_PRESCALE_BYPASS);
+	WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+	       NI_OVL_PRESCALE_BYPASS);
+	WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       (radeon_crtc->lut_r[i] << 20) |
+		       (radeon_crtc->lut_g[i] << 10) |
+		       (radeon_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+	WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+	WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+	WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
+	WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
 static void legacy_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -130,8 +190,10 @@
 	if (!crtc->enabled)
 		return;
 
-	if (ASIC_IS_DCE4(rdev))
-		evergreen_crtc_load_lut(crtc);
+	if (ASIC_IS_DCE5(rdev))
+		dce5_crtc_load_lut(crtc);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_crtc_load_lut(crtc);
 	else if (ASIC_IS_AVIVO(rdev))
 		avivo_crtc_load_lut(crtc);
 	else
@@ -183,12 +245,272 @@
 	kfree(radeon_crtc);
 }
 
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+	struct radeon_unpin_work *work =
+		container_of(__work, struct radeon_unpin_work, work);
+	int r;
+
+	/* unpin of the old buffer */
+	r = radeon_bo_reserve(work->old_rbo, false);
+	if (likely(r == 0)) {
+		r = radeon_bo_unpin(work->old_rbo);
+		if (unlikely(r != 0)) {
+			DRM_ERROR("failed to unpin buffer after flip\n");
+		}
+		radeon_bo_unreserve(work->old_rbo);
+	} else
+		DRM_ERROR("failed to reserve buffer after flip\n");
+	kfree(work);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	struct radeon_unpin_work *work;
+	struct drm_pending_vblank_event *e;
+	struct timeval now;
+	unsigned long flags;
+	u32 update_pending;
+	int vpos, hpos;
+
+	spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+	work = radeon_crtc->unpin_work;
+	if (work == NULL ||
+	    !radeon_fence_signaled(work->fence)) {
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+	/* New pageflip, or just completion of a previous one? */
+	if (!radeon_crtc->deferred_flip_completion) {
+		/* do the flip (mmio) */
+		update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+	} else {
+		/* This is just a completion of a flip queued in crtc
+		 * at last invocation. Make sure we go directly to
+		 * completion routine.
+		 */
+		update_pending = 0;
+		radeon_crtc->deferred_flip_completion = 0;
+	}
+
+	/* Has the pageflip already completed in crtc, or is it certain
+	 * to complete in this vblank?
+	 */
+	if (update_pending &&
+	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+							       &vpos, &hpos)) &&
+	    (vpos >=0) &&
+	    (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
+		/* crtc didn't flip in this target vblank interval,
+		 * but flip is pending in crtc. It will complete it
+		 * in next vblank interval, so complete the flip at
+		 * next vblank irq.
+		 */
+		radeon_crtc->deferred_flip_completion = 1;
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+
+	/* Pageflip (will be) certainly completed in this vblank. Clean up. */
+	radeon_crtc->unpin_work = NULL;
+
+	/* wakeup userspace */
+	if (work->event) {
+		e = work->event;
+		e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+	drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+	radeon_fence_unref(&work->fence);
+	radeon_post_page_flip(work->rdev, work->crtc_id);
+	schedule_work(&work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_framebuffer *old_radeon_fb;
+	struct radeon_framebuffer *new_radeon_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	struct radeon_fence *fence;
+	struct radeon_unpin_work *work;
+	unsigned long flags;
+	u32 tiling_flags, pitch_pixels;
+	u64 base;
+	int r;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	r = radeon_fence_create(rdev, &fence);
+	if (unlikely(r != 0)) {
+		kfree(work);
+		DRM_ERROR("flip queue: failed to create fence.\n");
+		return -ENOMEM;
+	}
+	work->event = event;
+	work->rdev = rdev;
+	work->crtc_id = radeon_crtc->crtc_id;
+	work->fence = radeon_fence_ref(fence);
+	old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+	new_radeon_fb = to_radeon_framebuffer(fb);
+	/* schedule unpin of the old buffer */
+	obj = old_radeon_fb->obj;
+	rbo = obj->driver_private;
+	work->old_rbo = rbo;
+	INIT_WORK(&work->work, radeon_unpin_work_func);
+
+	/* We borrow the event spin lock for protecting unpin_work */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (radeon_crtc->unpin_work) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(work);
+		radeon_fence_unref(&fence);
+
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+		return -EBUSY;
+	}
+	radeon_crtc->unpin_work = work;
+	radeon_crtc->deferred_flip_completion = 0;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* pin the new buffer */
+	obj = new_radeon_fb->obj;
+	rbo = obj->driver_private;
+
+	DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+			 work->old_rbo, rbo);
+
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to pin new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		/* crtc offset is from display base addr not FB location */
+		base -= radeon_crtc->legacy_display_base_addr;
+		pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+
+		if (tiling_flags & RADEON_TILING_MACRO) {
+			if (ASIC_IS_R300(rdev)) {
+				base &= ~0x7ff;
+			} else {
+				int byteshift = fb->bits_per_pixel >> 4;
+				int tile_addr = (((crtc->y >> 3) * pitch_pixels +  crtc->x) >> (8 - byteshift)) << 11;
+				base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+			}
+		} else {
+			int offset = crtc->y * pitch_pixels + crtc->x;
+			switch (fb->bits_per_pixel) {
+			case 8:
+			default:
+				offset *= 1;
+				break;
+			case 15:
+			case 16:
+				offset *= 2;
+				break;
+			case 24:
+				offset *= 3;
+				break;
+			case 32:
+				offset *= 4;
+				break;
+			}
+			base += offset;
+		}
+		base &= ~7;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work->new_crtc_base = base;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* update crtc fb */
+	crtc->fb = fb;
+
+	r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+	if (r) {
+		DRM_ERROR("failed to get vblank before flip\n");
+		goto pflip_cleanup1;
+	}
+
+	/* 32 ought to cover us */
+	r = radeon_ring_lock(rdev, 32);
+	if (r) {
+		DRM_ERROR("failed to lock the ring before flip\n");
+		goto pflip_cleanup2;
+	}
+
+	/* emit the fence */
+	radeon_fence_emit(rdev, fence);
+	/* set the proper interrupt */
+	radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+	/* fire the ring */
+	radeon_ring_unlock_commit(rdev);
+
+	return 0;
+
+pflip_cleanup2:
+	drm_vblank_put(dev, radeon_crtc->crtc_id);
+
+pflip_cleanup1:
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo in error path\n");
+		goto pflip_cleanup;
+	}
+	r = radeon_bo_unpin(rbo);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to unpin new rbo in error path\n");
+		goto pflip_cleanup;
+	}
+	radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	radeon_crtc->unpin_work = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	radeon_fence_unref(&fence);
+	kfree(work);
+
+	return r;
+}
+
 static const struct drm_crtc_funcs radeon_crtc_funcs = {
 	.cursor_set = radeon_crtc_cursor_set,
 	.cursor_move = radeon_crtc_cursor_move,
 	.gamma_set = radeon_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = radeon_crtc_destroy,
+	.page_flip = radeon_crtc_page_flip,
 };
 
 static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -225,7 +547,7 @@
 		radeon_legacy_init_crtc(dev, radeon_crtc);
 }
 
-static const char *encoder_names[34] = {
+static const char *encoder_names[36] = {
 	"NONE",
 	"INTERNAL_LVDS",
 	"INTERNAL_TMDS1",
@@ -260,6 +582,8 @@
 	"INTERNAL_KLDSCP_LVTMA",
 	"INTERNAL_UNIPHY1",
 	"INTERNAL_UNIPHY2",
+	"NUTMEG",
+	"TRAVIS",
 };
 
 static const char *connector_names[15] = {
@@ -417,9 +741,17 @@
 	if (!radeon_connector->edid) {
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
 	}
-	/* some servers provide a hardcoded edid in rom for KVMs */
-	if (!radeon_connector->edid)
-		radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
+
+	if (!radeon_connector->edid) {
+		if (rdev->is_atom_bios) {
+			/* some laptops provide a hardcoded edid in rom for LCDs */
+			if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+			     (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+				radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+		} else
+			/* some servers provide a hardcoded edid in rom for KVMs */
+			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+	}
 	if (radeon_connector->edid) {
 		drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
 		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -849,7 +1181,10 @@
 
 	rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
 
-	if (ASIC_IS_AVIVO(rdev)) {
+	if (ASIC_IS_DCE5(rdev)) {
+		rdev->ddev->mode_config.max_width = 16384;
+		rdev->ddev->mode_config.max_height = 16384;
+	} else if (ASIC_IS_AVIVO(rdev)) {
 		rdev->ddev->mode_config.max_width = 8192;
 		rdev->ddev->mode_config.max_height = 8192;
 	} else {
@@ -1019,7 +1354,7 @@
 /*
  * Retrieve current video scanout position of crtc on a given gpu.
  *
- * \param rdev Device to query.
+ * \param dev Device to query.
  * \param crtc Crtc to query.
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
@@ -1031,72 +1366,74 @@
  *
  * \return Flags, or'ed together as follows:
  *
- * RADEON_SCANOUTPOS_VALID = Query successfull.
- * RADEON_SCANOUTPOS_INVBL = Inside vblank.
- * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
  * this flag means that returned position may be offset by a constant but
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
 {
 	u32 stat_crtc = 0, vbl = 0, position = 0;
 	int vbl_start, vbl_end, vtotal, ret = 0;
 	bool in_vbl = true;
 
+	struct radeon_device *rdev = dev->dev_private;
+
 	if (ASIC_IS_DCE4(rdev)) {
 		if (crtc == 0) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC0_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC0_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 1) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC1_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC1_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 2) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC2_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC2_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 3) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC3_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC3_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 4) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC4_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC4_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 5) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC5_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC5_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 	} else if (ASIC_IS_AVIVO(rdev)) {
 		if (crtc == 0) {
 			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
 			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 1) {
 			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
 			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 	} else {
 		/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
@@ -1112,7 +1449,7 @@
 			if (!(stat_crtc & 1))
 				in_vbl = false;
 
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 1) {
 			vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
@@ -1122,7 +1459,7 @@
 			if (!(stat_crtc & 1))
 				in_vbl = false;
 
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 	}
 
@@ -1133,13 +1470,13 @@
 	/* Valid vblank area boundaries from gpu retrieved? */
 	if (vbl > 0) {
 		/* Yes: Decode. */
-		ret |= RADEON_SCANOUTPOS_ACCURATE;
+		ret |= DRM_SCANOUTPOS_ACCURATE;
 		vbl_start = vbl & 0x1fff;
 		vbl_end = (vbl >> 16) & 0x1fff;
 	}
 	else {
 		/* No: Fake something reasonable which gives at least ok results. */
-		vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+		vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
 		vbl_end = 0;
 	}
 
@@ -1155,7 +1492,7 @@
 
 	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
 	if (in_vbl && (*vpos >= vbl_start)) {
-		vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+		vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
 		*vpos = *vpos - vtotal;
 	}
 
@@ -1164,7 +1501,7 @@
 
 	/* In vblank? */
 	if (in_vbl)
-		ret |= RADEON_SCANOUTPOS_INVBL;
+		ret |= DRM_SCANOUTPOS_INVBL;
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 60e689f..be5cb4f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,9 +48,10 @@
  * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
  * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
  *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	7
+#define KMS_DRIVER_MINOR	8
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -66,6 +67,10 @@
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags);
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -74,6 +79,8 @@
 			 struct drm_file *file_priv);
 int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
 extern struct drm_ioctl_desc radeon_ioctls_kms[];
 extern int radeon_max_kms_ioctl;
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -296,6 +303,8 @@
 	.get_vblank_counter = radeon_get_vblank_counter_kms,
 	.enable_vblank = radeon_enable_vblank_kms,
 	.disable_vblank = radeon_disable_vblank_kms,
+	.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+	.get_scanout_position = radeon_get_crtc_scanoutpos,
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = radeon_debugfs_init,
 	.debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 041943d..8fd1842 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@
 	switch (connector->connector_type) {
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid)) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@
 	case DRM_MODE_CONNECTOR_DVID:
 	case DRM_MODE_CONNECTOR_HDMIA:
 	default:
-		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid)) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@
 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 			return ATOM_ENCODER_MODE_DP;
-		else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+		else if (drm_detect_monitor_audio(radeon_connector->edid)) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -712,8 +712,8 @@
  * - 2 DIG encoder blocks.
  * DIG1/2 can drive UNIPHY0/1/2 link A or link B
  *
- * DCE 4.0
- * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
  * Supports up to 6 digital outputs
  * - 6 DIG encoder blocks.
  * - DIG to PHY mapping is hardcoded
@@ -724,6 +724,12 @@
  * DIG5 drives UNIPHY2 link A, A+B
  * DIG6 drives UNIPHY2 link B
  *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
  * Routing
  * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
  * Examples:
@@ -737,6 +743,7 @@
 	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
 	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
 	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
 };
 
 void
@@ -752,6 +759,7 @@
 	uint8_t frev, crev;
 	int dp_clock = 0;
 	int dp_lane_count = 0;
+	int hpd_id = RADEON_HPD_NONE;
 
 	if (connector) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -760,6 +768,7 @@
 
 		dp_clock = dig_connector->dp_clock;
 		dp_lane_count = dig_connector->dp_lane_count;
+		hpd_id = radeon_connector->hpd.hpd;
 	}
 
 	/* no dig encoder assigned */
@@ -784,19 +793,36 @@
 	args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 	args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
-	if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
-		if (dp_clock == 270000)
-			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+	if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+	    (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
 		args.v1.ucLaneNum = dp_lane_count;
-	} else if (radeon_encoder->pixel_clock > 165000)
+	else if (radeon_encoder->pixel_clock > 165000)
 		args.v1.ucLaneNum = 8;
 	else
 		args.v1.ucLaneNum = 4;
 
-	if (ASIC_IS_DCE4(rdev)) {
+	if (ASIC_IS_DCE5(rdev)) {
+		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+		    (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
+			if (dp_clock == 270000)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+			else if (dp_clock == 540000)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+		}
+		args.v4.acConfig.ucDigSel = dig->dig_encoder;
+		args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+		if (hpd_id == RADEON_HPD_NONE)
+			args.v4.ucHPD_ID = 0;
+		else
+			args.v4.ucHPD_ID = hpd_id + 1;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
 		args.v3.acConfig.ucDigSel = dig->dig_encoder;
 		args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
 	} else {
+		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
 			args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -823,6 +849,7 @@
 	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
 };
 
 void
@@ -917,10 +944,18 @@
 			struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 			pll_id = radeon_crtc->pll_id;
 		}
-		if (is_dp && rdev->clock.dp_extclk)
-			args.v3.acConfig.ucRefClkSource = 2; /* external src */
-		else
-			args.v3.acConfig.ucRefClkSource = pll_id;
+
+		if (ASIC_IS_DCE5(rdev)) {
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v4.acConfig.ucRefClkSource = 3; /* external src */
+			else
+				args.v4.acConfig.ucRefClkSource = pll_id;
+		} else {
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v3.acConfig.ucRefClkSource = 2; /* external src */
+			else
+				args.v3.acConfig.ucRefClkSource = pll_id;
+		}
 
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -1044,6 +1079,7 @@
 
 union external_encoder_control {
 	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
 };
 
 static void
@@ -1054,6 +1090,7 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
 	union external_encoder_control args;
 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
@@ -1061,6 +1098,7 @@
 	int dp_clock = 0;
 	int dp_lane_count = 0;
 	int connector_object_id = 0;
+	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 
 	if (connector) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1099,6 +1137,37 @@
 			else
 				args.v1.sDigEncoder.ucLaneNum = 4;
 			break;
+		case 3:
+			args.v3.sExtEncoder.ucAction = action;
+			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+				args.v3.sExtEncoder.usConnectorId = connector_object_id;
+			else
+				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+				if (dp_clock == 270000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_encoder->pixel_clock > 165000)
+				args.v3.sExtEncoder.ucLaneNum = 8;
+			else
+				args.v3.sExtEncoder.ucLaneNum = 4;
+			switch (ext_enum) {
+			case GRAPH_OBJECT_ENUM_ID1:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+				break;
+			case GRAPH_OBJECT_ENUM_ID2:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+				break;
+			case GRAPH_OBJECT_ENUM_ID3:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+				break;
+			}
+			args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+			break;
 		default:
 			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
 			return;
@@ -1158,6 +1227,8 @@
 	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
 	int index = 0;
 	bool is_dig = false;
+	bool is_dce5_dac = false;
+	bool is_dce5_dvo = false;
 
 	memset(&args, 0, sizeof(args));
 
@@ -1180,7 +1251,9 @@
 		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-		if (ASIC_IS_DCE3(rdev))
+		if (ASIC_IS_DCE5(rdev))
+			is_dce5_dvo = true;
+		else if (ASIC_IS_DCE3(rdev))
 			is_dig = true;
 		else
 			index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
@@ -1196,12 +1269,16 @@
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
-		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
-		else
-			index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		if (ASIC_IS_DCE5(rdev))
+			is_dce5_dac = true;
+		else {
+			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+				index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+			else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+				index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+			else
+				index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		}
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
@@ -1260,6 +1337,28 @@
 				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
 			break;
 		}
+	} else if (is_dce5_dac) {
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			atombios_dac_setup(encoder, ATOM_ENABLE);
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			atombios_dac_setup(encoder, ATOM_DISABLE);
+			break;
+		}
+	} else if (is_dce5_dvo) {
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			atombios_dvo_setup(encoder, ATOM_ENABLE);
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			atombios_dvo_setup(encoder, ATOM_DISABLE);
+			break;
+		}
 	} else {
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
@@ -1289,12 +1388,18 @@
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
 		default:
-			action = ATOM_ENABLE;
+			if (ASIC_IS_DCE41(rdev))
+				action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
+			else
+				action = ATOM_ENABLE;
 			break;
 		case DRM_MODE_DPMS_STANDBY:
 		case DRM_MODE_DPMS_SUSPEND:
 		case DRM_MODE_DPMS_OFF:
-			action = ATOM_DISABLE;
+			if (ASIC_IS_DCE41(rdev))
+				action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
+			else
+				action = ATOM_DISABLE;
 			break;
 		}
 		atombios_external_encoder_setup(encoder, ext_encoder, action);
@@ -1483,27 +1588,35 @@
 	struct radeon_encoder_atom_dig *dig;
 	uint32_t dig_enc_in_use = 0;
 
+	/* DCE4/5 */
 	if (ASIC_IS_DCE4(rdev)) {
 		dig = radeon_encoder->enc_priv;
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		if (ASIC_IS_DCE41(rdev)) {
 			if (dig->linkb)
 				return 1;
 			else
 				return 0;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-			if (dig->linkb)
-				return 3;
-			else
-				return 2;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-			if (dig->linkb)
-				return 5;
-			else
-				return 4;
-			break;
+		} else {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					return 3;
+				else
+					return 2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					return 5;
+				else
+					return 4;
+				break;
+			}
 		}
 	}
 
@@ -1610,7 +1723,13 @@
 	}
 
 	if (ext_encoder) {
-		atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+		if (ASIC_IS_DCE41(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
 	}
 
 	atombios_apply_encoder_quirks(encoder, adjusted_mode);
@@ -1927,7 +2046,10 @@
 }
 
 void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev,
+			uint32_t encoder_enum,
+			uint32_t supported_device,
+			u16 caps)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_encoder *encoder;
@@ -1970,6 +2092,7 @@
 	radeon_encoder->rmx_type = RMX_OFF;
 	radeon_encoder->underscan_type = UNDERSCAN_OFF;
 	radeon_encoder->is_ext_encoder = false;
+	radeon_encoder->caps = caps;
 
 	switch (radeon_encoder->encoder_id) {
 	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -2029,6 +2152,8 @@
 	case ENCODER_OBJECT_ID_TITFP513:
 	case ENCODER_OBJECT_ID_VT1623:
 	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
 		/* these are handled by the primary encoders */
 		radeon_encoder->is_ext_encoder = true;
 		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index e329066..1ca55eb 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -80,6 +80,10 @@
 	CHIP_JUNIPER,
 	CHIP_CYPRESS,
 	CHIP_HEMLOCK,
+	CHIP_PALM,
+	CHIP_BARTS,
+	CHIP_TURKS,
+	CHIP_CAICOS,
 	CHIP_LAST,
 };
 
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6abea32..66324b5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -247,8 +247,6 @@
 	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = rdev->mc.aper_size;
 
-	info->fix.mmio_start = 0;
-	info->fix.mmio_len = 0;
 	info->pixmap.size = 64*1024;
 	info->pixmap.buf_align = 8;
 	info->pixmap.access_align = 32;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daacb28..171b0b2 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -38,6 +38,7 @@
 #include "drm.h"
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_trace.h"
 
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
 {
@@ -57,6 +58,7 @@
 	} else
 		radeon_fence_ring_emit(rdev, fence);
 
+	trace_radeon_fence_emit(rdev->ddev, fence->seq);
 	fence->emited = true;
 	list_del(&fence->list);
 	list_add_tail(&fence->list, &rdev->fence_drv.emited);
@@ -213,6 +215,7 @@
 retry:
 	/* save current sequence used to check for GPU lockup */
 	seq = rdev->fence_drv.last_seq;
+	trace_radeon_fence_wait_begin(rdev->ddev, seq);
 	if (intr) {
 		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@@ -227,6 +230,7 @@
 			 radeon_fence_signaled(fence), timeout);
 		radeon_irq_kms_sw_irq_put(rdev);
 	}
+	trace_radeon_fence_wait_end(rdev->ddev, seq);
 	if (unlikely(!radeon_fence_signaled(fence))) {
 		/* we were interrupted for some reason and fence isn't
 		 * isn't signaled yet, resume wait
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7e..a289646 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -64,15 +64,15 @@
 	struct radeon_device *rdev = dev->dev_private;
 	unsigned i;
 
-	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
-
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
 	rdev->irq.gui_idle = false;
 	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
-	for (i = 0; i < 6; i++)
+	for (i = 0; i < 6; i++) {
 		rdev->irq.hpd[i] = false;
+		rdev->irq.pflip[i] = false;
+	}
 	radeon_irq_set(rdev);
 	/* Clear bits */
 	radeon_irq_process(rdev);
@@ -101,8 +101,10 @@
 	rdev->irq.gui_idle = false;
 	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
-	for (i = 0; i < 6; i++)
+	for (i = 0; i < 6; i++) {
 		rdev->irq.hpd[i] = false;
+		rdev->irq.pflip[i] = false;
+	}
 	radeon_irq_set(rdev);
 }
 
@@ -110,6 +112,8 @@
 {
 	int r = 0;
 
+	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
 	spin_lock_init(&rdev->irq.sw_lock);
 	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
 	if (r) {
@@ -121,7 +125,7 @@
 	 * chips.  Disable MSI on them for now.
 	 */
 	if ((rdev->family >= CHIP_RV380) &&
-	    (!(rdev->flags & RADEON_IS_IGP)) &&
+	    ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
 	    (!(rdev->flags & RADEON_IS_AGP))) {
 		int ret = pci_enable_msi(rdev->pdev);
 		if (!ret) {
@@ -148,6 +152,7 @@
 		if (rdev->msi_enabled)
 			pci_disable_msi(rdev->pdev);
 	}
+	flush_work_sync(&rdev->hotplug_work);
 }
 
 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
@@ -175,3 +180,34 @@
 	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
 }
 
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+	if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
+		rdev->irq.pflip[crtc] = true;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
+	if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
+		rdev->irq.pflip[crtc] = false;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8fbbe1c..28a53e4 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -96,9 +96,27 @@
 	return r;
 }
 
+static void radeon_set_filp_rights(struct drm_device *dev,
+				   struct drm_file **owner,
+				   struct drm_file *applier,
+				   uint32_t *value)
+{
+	mutex_lock(&dev->struct_mutex);
+	if (*value == 1) {
+		/* wants rights */
+		if (!*owner)
+			*owner = applier;
+	} else if (*value == 0) {
+		/* revokes rights */
+		if (*owner == applier)
+			*owner = NULL;
+	}
+	*value = *owner == applier ? 1 : 0;
+	mutex_unlock(&dev->struct_mutex);
+}
 
 /*
- * Userspace get informations ioctl
+ * Userspace get information ioctl
  */
 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
@@ -173,18 +191,15 @@
 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
 			return -EINVAL;
 		}
-		mutex_lock(&dev->struct_mutex);
-		if (value == 1) {
-			/* wants hyper-z */
-			if (!rdev->hyperz_filp)
-				rdev->hyperz_filp = filp;
-		} else if (value == 0) {
-			/* revokes hyper-z */
-			if (rdev->hyperz_filp == filp)
-				rdev->hyperz_filp = NULL;
+		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+		break;
+	case RADEON_INFO_WANT_CMASK:
+		/* The same logic as Hyper-Z. */
+		if (value >= 2) {
+			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+			return -EINVAL;
 		}
-		value = rdev->hyperz_filp == filp ?  1 : 0;
-		mutex_unlock(&dev->struct_mutex);
+		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
 		break;
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
@@ -203,10 +218,6 @@
  */
 int radeon_driver_firstopen_kms(struct drm_device *dev)
 {
-	struct radeon_device *rdev = dev->dev_private;
-
-	if (rdev->powered_down)
-		return -EINVAL;
 	return 0;
 }
 
@@ -277,6 +288,27 @@
 	radeon_irq_set(rdev);
 }
 
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags)
+{
+	struct drm_crtc *drmcrtc;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Get associated drm_crtc: */
+	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+						     vblank_time, flags,
+						     drmcrtc);
+}
 
 /*
  * IOCTL.
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e301c6f..12bdeab 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -277,6 +277,9 @@
 	fixed20_12 hsc;
 	struct drm_display_mode native_mode;
 	int pll_id;
+	/* page flipping */
+	struct radeon_unpin_work *unpin_work;
+	int deferred_flip_completion;
 };
 
 struct radeon_encoder_primary_dac {
@@ -376,6 +379,7 @@
 	int hdmi_audio_workaround;
 	int hdmi_buffer_status;
 	bool is_ext_encoder;
+	u16 caps;
 };
 
 struct radeon_connector_atom_dig {
@@ -442,10 +446,6 @@
 	struct drm_gem_object *obj;
 };
 
-/* radeon_get_crtc_scanoutpos() return flags */
-#define RADEON_SCANOUTPOS_VALID        (1 << 0)
-#define RADEON_SCANOUTPOS_INVBL        (1 << 1)
-#define RADEON_SCANOUTPOS_ACCURATE     (1 << 2)
 
 extern enum radeon_tv_std
 radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -562,11 +562,12 @@
 extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
 				   int x, int y);
 
-extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
 
 extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
 extern struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
 extern bool radeon_atom_get_clock_info(struct drm_device *dev);
 extern bool radeon_combios_get_clock_info(struct drm_device *dev);
 extern struct radeon_encoder_atom_dig *
@@ -662,4 +663,7 @@
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
 
 void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a598d00..7d6b8e8 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,6 +34,7 @@
 #include <drm/drmP.h>
 #include "radeon_drm.h"
 #include "radeon.h"
+#include "radeon_trace.h"
 
 
 int radeon_ttm_init(struct radeon_device *rdev);
@@ -146,6 +147,7 @@
 		list_add_tail(&bo->list, &rdev->gem.objects);
 		mutex_unlock(&bo->rdev->gem.mutex);
 	}
+	trace_radeon_bo_create(bo);
 	return 0;
 }
 
@@ -302,34 +304,9 @@
 				struct list_head *head)
 {
 	if (lobj->wdomain) {
-		list_add(&lobj->list, head);
+		list_add(&lobj->tv.head, head);
 	} else {
-		list_add_tail(&lobj->list, head);
-	}
-}
-
-int radeon_bo_list_reserve(struct list_head *head)
-{
-	struct radeon_bo_list *lobj;
-	int r;
-
-	list_for_each_entry(lobj, head, list){
-		r = radeon_bo_reserve(lobj->bo, false);
-		if (unlikely(r != 0))
-			return r;
-		lobj->reserved = true;
-	}
-	return 0;
-}
-
-void radeon_bo_list_unreserve(struct list_head *head)
-{
-	struct radeon_bo_list *lobj;
-
-	list_for_each_entry(lobj, head, list) {
-		/* only unreserve object we successfully reserved */
-		if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
-			radeon_bo_unreserve(lobj->bo);
+		list_add_tail(&lobj->tv.head, head);
 	}
 }
 
@@ -340,14 +317,11 @@
 	u32 domain;
 	int r;
 
-	list_for_each_entry(lobj, head, list) {
-		lobj->reserved = false;
-	}
-	r = radeon_bo_list_reserve(head);
+	r = ttm_eu_reserve_buffers(head);
 	if (unlikely(r != 0)) {
 		return r;
 	}
-	list_for_each_entry(lobj, head, list) {
+	list_for_each_entry(lobj, head, tv.head) {
 		bo = lobj->bo;
 		if (!bo->pin_count) {
 			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
@@ -370,25 +344,6 @@
 	return 0;
 }
 
-void radeon_bo_list_fence(struct list_head *head, void *fence)
-{
-	struct radeon_bo_list *lobj;
-	struct radeon_bo *bo;
-	struct radeon_fence *old_fence = NULL;
-
-	list_for_each_entry(lobj, head, list) {
-		bo = lobj->bo;
-		spin_lock(&bo->tbo.lock);
-		old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
-		bo->tbo.sync_obj = radeon_fence_ref(fence);
-		bo->tbo.sync_obj_arg = NULL;
-		spin_unlock(&bo->tbo.lock);
-		if (old_fence) {
-			radeon_fence_unref(&old_fence);
-		}
-	}
-}
-
 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 			     struct vm_area_struct *vma)
 {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702..22d4c23 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@
 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
 	if (unlikely(r != 0))
 		return r;
-	spin_lock(&bo->tbo.lock);
+	spin_lock(&bo->tbo.bdev->fence_lock);
 	if (mem_type)
 		*mem_type = bo->tbo.mem.mem_type;
 	if (bo->tbo.sync_obj)
 		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
-	spin_unlock(&bo->tbo.lock);
+	spin_unlock(&bo->tbo.bdev->fence_lock);
 	ttm_bo_unreserve(&bo->tbo);
 	return r;
 }
@@ -152,10 +152,7 @@
 extern void radeon_bo_fini(struct radeon_device *rdev);
 extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
 				struct list_head *head);
-extern int radeon_bo_list_reserve(struct list_head *head);
-extern void radeon_bo_list_unreserve(struct list_head *head);
 extern int radeon_bo_list_validate(struct list_head *head);
-extern void radeon_bo_list_fence(struct list_head *head, void *fence);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 				struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8c9b2ef..3b1b2bf 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -167,13 +167,13 @@
 	if (radeon_gui_idle(rdev)) {
 		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
 			clock_info[rdev->pm.requested_clock_mode_index].sclk;
-		if (sclk > rdev->clock.default_sclk)
-			sclk = rdev->clock.default_sclk;
+		if (sclk > rdev->pm.default_sclk)
+			sclk = rdev->pm.default_sclk;
 
 		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
 			clock_info[rdev->pm.requested_clock_mode_index].mclk;
-		if (mclk > rdev->clock.default_mclk)
-			mclk = rdev->clock.default_mclk;
+		if (mclk > rdev->pm.default_mclk)
+			mclk = rdev->pm.default_mclk;
 
 		/* upvolt before raising clocks, downvolt after lowering clocks */
 		if (sclk < rdev->pm.current_sclk)
@@ -405,20 +405,13 @@
 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
 		mutex_unlock(&rdev->pm.mutex);
 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
-		bool flush_wq = false;
-
 		mutex_lock(&rdev->pm.mutex);
-		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
-			cancel_delayed_work(&rdev->pm.dynpm_idle_work);
-			flush_wq = true;
-		}
 		/* disable dynpm */
 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 		rdev->pm.pm_method = PM_METHOD_PROFILE;
 		mutex_unlock(&rdev->pm.mutex);
-		if (flush_wq)
-			flush_workqueue(rdev->wq);
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 	} else {
 		DRM_ERROR("invalid power method!\n");
 		goto fail;
@@ -447,8 +440,12 @@
 		temp = rv770_get_temp(rdev);
 		break;
 	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_NI:
 		temp = evergreen_get_temp(rdev);
 		break;
+	case THERMAL_TYPE_SUMO:
+		temp = sumo_get_temp(rdev);
+		break;
 	default:
 		temp = 0;
 		break;
@@ -487,6 +484,7 @@
 	case THERMAL_TYPE_RV6XX:
 	case THERMAL_TYPE_RV770:
 	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_SUMO:
 		rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
 		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
 			err = PTR_ERR(rdev->pm.int_hwmon_dev);
@@ -520,34 +518,39 @@
 
 void radeon_pm_suspend(struct radeon_device *rdev)
 {
-	bool flush_wq = false;
-
 	mutex_lock(&rdev->pm.mutex);
 	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
-		cancel_delayed_work(&rdev->pm.dynpm_idle_work);
 		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
 			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
-		flush_wq = true;
 	}
 	mutex_unlock(&rdev->pm.mutex);
-	if (flush_wq)
-		flush_workqueue(rdev->wq);
+
+	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 }
 
 void radeon_pm_resume(struct radeon_device *rdev)
 {
+	/* set up the default clocks if the MC ucode is loaded */
+	if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
 	/* asic init will reset the default power state */
 	mutex_lock(&rdev->pm.mutex);
 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
 	rdev->pm.current_clock_mode_index = 0;
-	rdev->pm.current_sclk = rdev->clock.default_sclk;
-	rdev->pm.current_mclk = rdev->clock.default_mclk;
+	rdev->pm.current_sclk = rdev->pm.default_sclk;
+	rdev->pm.current_mclk = rdev->pm.default_mclk;
 	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
 	if (rdev->pm.pm_method == PM_METHOD_DYNPM
 	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
 		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
-		queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 	}
 	mutex_unlock(&rdev->pm.mutex);
 	radeon_pm_compute_clocks(rdev);
@@ -564,6 +567,8 @@
 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 	rdev->pm.dynpm_can_upclock = true;
 	rdev->pm.dynpm_can_downclock = true;
+	rdev->pm.default_sclk = rdev->clock.default_sclk;
+	rdev->pm.default_mclk = rdev->clock.default_mclk;
 	rdev->pm.current_sclk = rdev->clock.default_sclk;
 	rdev->pm.current_mclk = rdev->clock.default_mclk;
 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
@@ -575,12 +580,24 @@
 			radeon_combios_get_power_modes(rdev);
 		radeon_pm_print_states(rdev);
 		radeon_pm_init_profile(rdev);
+		/* set up the default clocks if the MC ucode is loaded */
+		if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+			if (rdev->pm.default_vddc)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+			if (rdev->pm.default_sclk)
+				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+			if (rdev->pm.default_mclk)
+				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+		}
 	}
 
 	/* set up the internal thermal sensor if applicable */
 	ret = radeon_hwmon_init(rdev);
 	if (ret)
 		return ret;
+
+	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
 	if (rdev->pm.num_power_states > 1) {
 		/* where's the best place to put these? */
 		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -594,8 +611,6 @@
 		rdev->acpi_nb.notifier_call = radeon_acpi_event;
 		register_acpi_notifier(&rdev->acpi_nb);
 #endif
-		INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
-
 		if (radeon_debugfs_pm_init(rdev)) {
 			DRM_ERROR("Failed to register debugfs file for PM!\n");
 		}
@@ -609,25 +624,20 @@
 void radeon_pm_fini(struct radeon_device *rdev)
 {
 	if (rdev->pm.num_power_states > 1) {
-		bool flush_wq = false;
-
 		mutex_lock(&rdev->pm.mutex);
 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 			rdev->pm.profile = PM_PROFILE_DEFAULT;
 			radeon_pm_update_profile(rdev);
 			radeon_pm_set_clocks(rdev);
 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
-			/* cancel work */
-			cancel_delayed_work(&rdev->pm.dynpm_idle_work);
-			flush_wq = true;
 			/* reset default clocks */
 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
 			radeon_pm_set_clocks(rdev);
 		}
 		mutex_unlock(&rdev->pm.mutex);
-		if (flush_wq)
-			flush_workqueue(rdev->wq);
+
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 
 		device_remove_file(rdev->dev, &dev_attr_power_profile);
 		device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -686,12 +696,12 @@
 					radeon_pm_get_dynpm_state(rdev);
 					radeon_pm_set_clocks(rdev);
 
-					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
-					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
 				}
 			} else { /* count == 0 */
@@ -720,9 +730,9 @@
 	 */
 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
 		if (rdev->pm.active_crtcs & (1 << crtc)) {
-			vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
-			if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
-			    !(vbl_status & RADEON_SCANOUTPOS_INVBL))
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
 				in_vbl = false;
 		}
 	}
@@ -796,8 +806,8 @@
 			radeon_pm_set_clocks(rdev);
 		}
 
-		queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 	}
 	mutex_unlock(&rdev->pm.mutex);
 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
@@ -814,9 +824,9 @@
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
 
-	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
 	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
-	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
 	if (rdev->asic->get_memory_clock)
 		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
 	if (rdev->pm.current_vddc)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6492881..3cd4dac 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -55,6 +55,7 @@
 #include "r500_reg.h"
 #include "r600_reg.h"
 #include "evergreen_reg.h"
+#include "ni_reg.h"
 
 #define RADEON_MC_AGP_LOCATION		0x014c
 #define		RADEON_MC_AGP_START_MASK	0x0000FFFF
@@ -320,6 +321,15 @@
 #       define RADEON_PCIE_LC_RECONFIG_NOW         (1 << 8)
 #       define RADEON_PCIE_LC_RECONFIG_LATER       (1 << 9)
 #       define RADEON_PCIE_LC_SHORT_RECONFIG_EN    (1 << 10)
+#       define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE   (1 << 7)
+#       define R600_PCIE_LC_RENEGOTIATION_SUPPORT  (1 << 9)
+#       define R600_PCIE_LC_RENEGOTIATE_EN         (1 << 10)
+#       define R600_PCIE_LC_SHORT_RECONFIG_EN      (1 << 11)
+#       define R600_PCIE_LC_UPCONFIGURE_SUPPORT    (1 << 12)
+#       define R600_PCIE_LC_UPCONFIGURE_DIS        (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX      0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX      0x66c
 
 #define RADEON_CACHE_CNTL                   0x1724
 #define RADEON_CACHE_LINE                   0x0f0c /* PCI */
@@ -422,6 +432,7 @@
 #       define RADEON_CRTC_CSYNC_EN         (1 <<  4)
 #       define RADEON_CRTC_ICON_EN          (1 << 15)
 #       define RADEON_CRTC_CUR_EN           (1 << 16)
+#       define RADEON_CRTC_VSTAT_MODE_MASK  (3 << 17)
 #       define RADEON_CRTC_CUR_MODE_MASK    (7 << 20)
 #       define RADEON_CRTC_CUR_MODE_SHIFT   20
 #       define RADEON_CRTC_CUR_MODE_MONO    0
@@ -509,6 +520,8 @@
 #       define RADEON_CRTC_TILE_EN                      (1 << 15)
 #       define RADEON_CRTC_OFFSET_FLIP_CNTL             (1 << 16)
 #       define RADEON_CRTC_STEREO_OFFSET_EN             (1 << 17)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN      (1 << 28)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN     (1 << 29)
 
 #define R300_CRTC_TILE_X0_Y0	            0x0350
 #define R300_CRTC2_TILE_X0_Y0	            0x0358
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 0000000..eafd816
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+	    TP_PROTO(struct radeon_bo *bo),
+	    TP_ARGS(bo),
+	    TP_STRUCT__entry(
+			     __field(struct radeon_bo *, bo)
+			     __field(u32, pages)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->bo = bo;
+			   __entry->pages = bo->tbo.num_pages;
+			   ),
+	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 0000000..8175993
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index b3f9f1d..ef422bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -304,6 +304,22 @@
 0x4630 US_CODE_ADDR
 0x4634 US_CODE_RANGE
 0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
 0x46A4 US_OUT_FMT_0
 0x46A8 US_OUT_FMT_1
 0x46AC US_OUT_FMT_2
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02..b4192ac 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,56 @@
 void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+	u32 tmp;
+
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
 void rs600_pm_misc(struct radeon_device *rdev)
 {
 	int requested_index = rdev->pm.requested_power_state_index;
@@ -515,10 +565,12 @@
 	if (rdev->irq.gui_idle) {
 		tmp |= S_000040_GUI_IDLE(1);
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
 	}
 	if (rdev->irq.hpd[0]) {
@@ -534,7 +586,7 @@
 	return 0;
 }
 
-static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
 {
 	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
 	uint32_t irq_mask = S_000044_SW_INT(1);
@@ -547,27 +599,27 @@
 	}
 
 	if (G_000044_DISPLAY_INT_STAT(irqs)) {
-		*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
-		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+		rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			WREG32(R_006534_D1MODE_VBLANK_STATUS,
 				S_006534_D1MODE_VBLANK_ACK(1));
 		}
-		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
 				S_006D34_D2MODE_VBLANK_ACK(1));
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
 			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
 			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
 			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
 			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
 		}
 	} else {
-		*r500_disp_int = 0;
+		rdev->irq.stat_regs.r500.disp_int = 0;
 	}
 
 	if (irqs) {
@@ -578,32 +630,30 @@
 
 void rs600_irq_disable(struct radeon_device *rdev)
 {
-	u32 tmp;
-
 	WREG32(R_000040_GEN_INT_CNTL, 0);
 	WREG32(R_006540_DxMODE_INT_MASK, 0);
 	/* Wait and acknowledge irq */
 	mdelay(1);
-	rs600_irq_ack(rdev, &tmp);
+	rs600_irq_ack(rdev);
 }
 
 int rs600_irq_process(struct radeon_device *rdev)
 {
-	uint32_t status, msi_rearm;
-	uint32_t r500_disp_int;
+	u32 status, msi_rearm;
 	bool queue_hotplug = false;
 
 	/* reset gui idle ack.  the status bit is broken */
 	rdev->irq.gui_idle_acked = false;
 
-	status = rs600_irq_ack(rdev, &r500_disp_int);
-	if (!status && !r500_disp_int) {
+	status = rs600_irq_ack(rdev);
+	if (!status && !rdev->irq.stat_regs.r500.disp_int) {
 		return IRQ_NONE;
 	}
-	while (status || r500_disp_int) {
+	while (status || rdev->irq.stat_regs.r500.disp_int) {
 		/* SW interrupt */
-		if (G_000044_SW_INT(status))
+		if (G_000044_SW_INT(status)) {
 			radeon_fence_process(rdev);
+		}
 		/* GUI idle */
 		if (G_000040_GUI_IDLE(status)) {
 			rdev->irq.gui_idle_acked = true;
@@ -611,30 +661,38 @@
 			wake_up(&rdev->irq.idle_queue);
 		}
 		/* Vertical blank interrupts */
-		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
-			drm_handle_vblank(rdev->ddev, 0);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[0])
+				radeon_crtc_handle_flip(rdev, 0);
 		}
-		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
-			drm_handle_vblank(rdev->ddev, 1);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[1])
+				radeon_crtc_handle_flip(rdev, 1);
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			queue_hotplug = true;
 			DRM_DEBUG("HPD1\n");
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			queue_hotplug = true;
 			DRM_DEBUG("HPD2\n");
 		}
-		status = rs600_irq_ack(rdev, &r500_disp_int);
+		status = rs600_irq_ack(rdev);
 	}
 	/* reset gui idle ack.  the status bit is broken */
 	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS600:
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4dfead8..3a264aa 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -41,6 +41,41 @@
 
 static void rv770_gpu_init(struct radeon_device *rdev);
 void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	if (radeon_crtc->crtc_id) {
+		WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	} else {
+		WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	}
+	WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
 
 /* get temperature in millidegrees */
 u32 rv770_get_temp(struct radeon_device *rdev)
@@ -489,6 +524,49 @@
 	return backend_map;
 }
 
+static void rv770_program_channel_remap(struct radeon_device *rdev)
+{
+	u32 tcp_chan_steer, mc_shared_chremap, tmp;
+	bool force_no_swizzle;
+
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+		force_no_swizzle = false;
+		break;
+	case CHIP_RV710:
+	case CHIP_RV740:
+	default:
+		force_no_swizzle = true;
+		break;
+	}
+
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	case 1:
+	default:
+		/* default mapping */
+		mc_shared_chremap = 0x00fac688;
+		break;
+	case 2:
+	case 3:
+		if (force_no_swizzle)
+			mc_shared_chremap = 0x00fac688;
+		else
+			mc_shared_chremap = 0x00bbc298;
+		break;
+	}
+
+	if (rdev->family == CHIP_RV740)
+		tcp_chan_steer = 0x00ef2a60;
+	else
+		tcp_chan_steer = 0x00fac688;
+
+	WREG32(TCP_CHAN_STEER, tcp_chan_steer);
+	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
 static void rv770_gpu_init(struct radeon_device *rdev)
 {
 	int i, j, num_qd_pipes;
@@ -688,6 +766,8 @@
 	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
+	rv770_program_channel_remap(rdev);
+
 	WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
 	WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
 	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@ -956,6 +1036,45 @@
 	radeon_bo_unref(&rdev->vram_scratch.robj);
 }
 
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+				mc->mc_vram_size >> 20, mc->vram_start,
+				mc->vram_end, mc->real_vram_size >> 20);
+	} else {
+		radeon_vram_location(rdev, &rdev->mc, 0);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
 int rv770_mc_init(struct radeon_device *rdev)
 {
 	u32 tmp;
@@ -996,7 +1115,7 @@
 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
 	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
-	r600_vram_gtt_location(rdev, &rdev->mc);
+	r700_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
 	return 0;
@@ -1006,6 +1125,9 @@
 {
 	int r;
 
+	/* enable pcie gen2 link */
+	rv770_pcie_gen2_enable(rdev);
+
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 		r = r600_init_microcode(rdev);
 		if (r) {
@@ -1244,3 +1366,75 @@
 	rdev->bios = NULL;
 	radeon_dummy_page_fini(rdev);
 }
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, tmp;
+	u16 link_cntl2;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* advertise upconfig capability */
+	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+	WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+	if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+		lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+		link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+				     LC_RECONFIG_ARC_MISSING_ESCAPE);
+		link_width_cntl |= lanes | LC_RECONFIG_NOW |
+			LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	} else {
+		link_width_cntl |= LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b7a5a20..abc8cf5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -138,6 +138,7 @@
 #define MC_SHARED_CHMAP						0x2004
 #define		NOOFCHAN_SHIFT					12
 #define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
 
 #define	MC_ARB_RAMCFG					0x2760
 #define		NOOFBANK_SHIFT					0
@@ -303,6 +304,7 @@
 #define		BILINEAR_PRECISION_8_BIT			(1 << 31)
 
 #define	TCP_CNTL					0x9610
+#define	TCP_CHAN_STEER					0x9614
 
 #define	VGT_CACHE_INVALIDATION				0x88C4
 #define		CACHE_INVALIDATION(x)				((x)<<0)
@@ -351,4 +353,49 @@
 
 #define	SRBM_STATUS				        0x0E50
 
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
 #endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 148a322..af61fc2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@
 }
 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 
-static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@
 	}
 }
 
-/**
- * Call with the lru_lock held.
- */
-
-static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
 	int put_count = 0;
 
@@ -227,9 +223,18 @@
 		/**
 		 * Deadlock avoidance for multi-bo reserving.
 		 */
-		if (use_sequence && bo->seq_valid &&
-			(sequence - bo->val_seq < (1 << 31))) {
-			return -EAGAIN;
+		if (use_sequence && bo->seq_valid) {
+			/**
+			 * We've already reserved this one.
+			 */
+			if (unlikely(sequence == bo->val_seq))
+				return -EDEADLK;
+			/**
+			 * Already reserved by a thread that will not back
+			 * off for us. We need to back off.
+			 */
+			if (unlikely(sequence - bo->val_seq < (1 << 31)))
+				return -EAGAIN;
 		}
 
 		if (no_wait)
@@ -267,6 +272,13 @@
 	BUG();
 }
 
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+			 bool never_free)
+{
+	kref_sub(&bo->list_kref, count,
+		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
+}
+
 int ttm_bo_reserve(struct ttm_buffer_object *bo,
 		   bool interruptible,
 		   bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,20 +294,24 @@
 		put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	return ret;
 }
 
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+	ttm_bo_add_to_lru(bo);
+	atomic_set(&bo->reserved, 0);
+	wake_up_all(&bo->event_queue);
+}
+
 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_global *glob = bo->glob;
 
 	spin_lock(&glob->lru_lock);
-	ttm_bo_add_to_lru(bo);
-	atomic_set(&bo->reserved, 0);
-	wake_up_all(&bo->event_queue);
+	ttm_bo_unreserve_locked(bo);
 	spin_unlock(&glob->lru_lock);
 }
 EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -362,8 +378,13 @@
 	int ret = 0;
 
 	if (old_is_pci || new_is_pci ||
-	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
-		ttm_bo_unmap_virtual(bo);
+	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+		ret = ttm_mem_io_lock(old_man, true);
+		if (unlikely(ret != 0))
+			goto out_err;
+		ttm_bo_unmap_virtual_locked(bo);
+		ttm_mem_io_unlock(old_man);
+	}
 
 	/*
 	 * Create and bind a ttm if required.
@@ -416,11 +437,9 @@
 	}
 
 	if (bo->mem.mm_node) {
-		spin_lock(&bo->lock);
 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
 		    bdev->man[bo->mem.mem_type].gpu_offset;
 		bo->cur_placement = bo->mem.placement;
-		spin_unlock(&bo->lock);
 	} else
 		bo->offset = 0;
 
@@ -452,7 +471,6 @@
 		ttm_tt_destroy(bo->ttm);
 		bo->ttm = NULL;
 	}
-
 	ttm_bo_mem_put(bo, &bo->mem);
 
 	atomic_set(&bo->reserved, 0);
@@ -474,14 +492,14 @@
 	int put_count;
 	int ret;
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	(void) ttm_bo_wait(bo, false, false, true);
 	if (!bo->sync_obj) {
 
 		spin_lock(&glob->lru_lock);
 
 		/**
-		 * Lock inversion between bo::reserve and bo::lock here,
+		 * Lock inversion between bo:reserve and bdev::fence_lock here,
 		 * but that's OK, since we're only trylocking.
 		 */
 
@@ -490,14 +508,13 @@
 		if (unlikely(ret == -EBUSY))
 			goto queue;
 
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		put_count = ttm_bo_del_from_lru(bo);
 
 		spin_unlock(&glob->lru_lock);
 		ttm_bo_cleanup_memtype_use(bo);
 
-		while (put_count--)
-			kref_put(&bo->list_kref, ttm_bo_ref_bug);
+		ttm_bo_list_ref_sub(bo, put_count, true);
 
 		return;
 	} else {
@@ -512,7 +529,7 @@
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 	spin_unlock(&glob->lru_lock);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 
 	if (sync_obj) {
 		driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -537,14 +554,15 @@
 			       bool no_wait_reserve,
 			       bool no_wait_gpu)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
 	int put_count;
 	int ret = 0;
 
 retry:
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 
 	if (unlikely(ret != 0))
 		return ret;
@@ -580,8 +598,7 @@
 	spin_unlock(&glob->lru_lock);
 	ttm_bo_cleanup_memtype_use(bo);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	return 0;
 }
@@ -652,6 +669,7 @@
 	struct ttm_buffer_object *bo =
 	    container_of(kref, struct ttm_buffer_object, kref);
 	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
 	if (likely(bo->vm_node != NULL)) {
 		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -659,6 +677,9 @@
 		bo->vm_node = NULL;
 	}
 	write_unlock(&bdev->vm_lock);
+	ttm_mem_io_lock(man, false);
+	ttm_mem_io_free_vm(bo);
+	ttm_mem_io_unlock(man);
 	ttm_bo_cleanup_refs_or_queue(bo);
 	kref_put(&bo->list_kref, ttm_bo_release_list);
 	write_lock(&bdev->vm_lock);
@@ -698,9 +719,9 @@
 	struct ttm_placement placement;
 	int ret = 0;
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 
 	if (unlikely(ret != 0)) {
 		if (ret != -ERESTARTSYS) {
@@ -715,7 +736,8 @@
 
 	evict_mem = bo->mem;
 	evict_mem.mm_node = NULL;
-	evict_mem.bus.io_reserved = false;
+	evict_mem.bus.io_reserved_vm = false;
+	evict_mem.bus.io_reserved_count = 0;
 
 	placement.fpfn = 0;
 	placement.lpfn = 0;
@@ -802,8 +824,7 @@
 
 	BUG_ON(ret != 0);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
 	ttm_bo_unreserve(bo);
@@ -1036,6 +1057,7 @@
 {
 	int ret = 0;
 	struct ttm_mem_reg mem;
+	struct ttm_bo_device *bdev = bo->bdev;
 
 	BUG_ON(!atomic_read(&bo->reserved));
 
@@ -1044,15 +1066,16 @@
 	 * Have the driver move function wait for idle when necessary,
 	 * instead of doing it here.
 	 */
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 	if (ret)
 		return ret;
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
-	mem.bus.io_reserved = false;
+	mem.bus.io_reserved_vm = false;
+	mem.bus.io_reserved_count = 0;
 	/*
 	 * Determine where to move the buffer.
 	 */
@@ -1163,7 +1186,6 @@
 	}
 	bo->destroy = destroy;
 
-	spin_lock_init(&bo->lock);
 	kref_init(&bo->kref);
 	kref_init(&bo->list_kref);
 	atomic_set(&bo->cpu_writers, 0);
@@ -1172,6 +1194,7 @@
 	INIT_LIST_HEAD(&bo->lru);
 	INIT_LIST_HEAD(&bo->ddestroy);
 	INIT_LIST_HEAD(&bo->swap);
+	INIT_LIST_HEAD(&bo->io_reserve_lru);
 	bo->bdev = bdev;
 	bo->glob = bdev->glob;
 	bo->type = type;
@@ -1181,7 +1204,8 @@
 	bo->mem.num_pages = bo->num_pages;
 	bo->mem.mm_node = NULL;
 	bo->mem.page_alignment = page_alignment;
-	bo->mem.bus.io_reserved = false;
+	bo->mem.bus.io_reserved_vm = false;
+	bo->mem.bus.io_reserved_count = 0;
 	bo->buffer_start = buffer_start & PAGE_MASK;
 	bo->priv_flags = 0;
 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1355,6 +1379,10 @@
 	BUG_ON(type >= TTM_NUM_MEM_TYPES);
 	man = &bdev->man[type];
 	BUG_ON(man->has_type);
+	man->io_reserve_fastpath = true;
+	man->use_io_reserve_lru = false;
+	mutex_init(&man->io_reserve_mutex);
+	INIT_LIST_HEAD(&man->io_reserve_lru);
 
 	ret = bdev->driver->init_mem_type(bdev, type, man);
 	if (ret)
@@ -1472,8 +1500,7 @@
 	list_del(&bdev->device_list);
 	mutex_unlock(&glob->device_list_mutex);
 
-	if (!cancel_delayed_work(&bdev->wq))
-		flush_scheduled_work();
+	cancel_delayed_work_sync(&bdev->wq);
 
 	while (ttm_bo_delayed_delete(bdev, true))
 		;
@@ -1527,7 +1554,8 @@
 	bdev->dev_mapping = NULL;
 	bdev->glob = glob;
 	bdev->need_dma32 = need_dma32;
-
+	bdev->val_seq = 0;
+	spin_lock_init(&bdev->fence_lock);
 	mutex_lock(&glob->device_list_mutex);
 	list_add_tail(&bdev->device_list, &glob->device_list);
 	mutex_unlock(&glob->device_list_mutex);
@@ -1561,7 +1589,7 @@
 	return true;
 }
 
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1570,8 +1598,20 @@
 	if (!bdev->dev_mapping)
 		return;
 	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
-	ttm_mem_io_free(bdev, &bo->mem);
+	ttm_mem_io_free_vm(bo);
 }
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+	ttm_mem_io_lock(man, false);
+	ttm_bo_unmap_virtual_locked(bo);
+	ttm_mem_io_unlock(man);
+}
+
+
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
@@ -1651,6 +1691,7 @@
 		bool lazy, bool interruptible, bool no_wait)
 {
 	struct ttm_bo_driver *driver = bo->bdev->driver;
+	struct ttm_bo_device *bdev = bo->bdev;
 	void *sync_obj;
 	void *sync_obj_arg;
 	int ret = 0;
@@ -1664,9 +1705,9 @@
 			void *tmp_obj = bo->sync_obj;
 			bo->sync_obj = NULL;
 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-			spin_unlock(&bo->lock);
+			spin_unlock(&bdev->fence_lock);
 			driver->sync_obj_unref(&tmp_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 			continue;
 		}
 
@@ -1675,29 +1716,29 @@
 
 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
 		sync_obj_arg = bo->sync_obj_arg;
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
 					    lazy, interruptible);
 		if (unlikely(ret != 0)) {
 			driver->sync_obj_unref(&sync_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 			return ret;
 		}
-		spin_lock(&bo->lock);
+		spin_lock(&bdev->fence_lock);
 		if (likely(bo->sync_obj == sync_obj &&
 			   bo->sync_obj_arg == sync_obj_arg)) {
 			void *tmp_obj = bo->sync_obj;
 			bo->sync_obj = NULL;
 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
 				  &bo->priv_flags);
-			spin_unlock(&bo->lock);
+			spin_unlock(&bdev->fence_lock);
 			driver->sync_obj_unref(&sync_obj);
 			driver->sync_obj_unref(&tmp_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 		} else {
-			spin_unlock(&bo->lock);
+			spin_unlock(&bdev->fence_lock);
 			driver->sync_obj_unref(&sync_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 		}
 	}
 	return 0;
@@ -1706,6 +1747,7 @@
 
 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	int ret = 0;
 
 	/*
@@ -1715,9 +1757,9 @@
 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
 	if (unlikely(ret != 0))
 		return ret;
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, true, no_wait);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 	if (likely(ret == 0))
 		atomic_inc(&bo->cpu_writers);
 	ttm_bo_unreserve(bo);
@@ -1783,16 +1825,15 @@
 	put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	/**
 	 * Wait for GPU, then move to system cached.
 	 */
 
-	spin_lock(&bo->lock);
+	spin_lock(&bo->bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, false, false);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bo->bdev->fence_lock);
 
 	if (unlikely(ret != 0))
 		goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5b..77dbf40 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 {
+	if (likely(man->io_reserve_fastpath))
+		return 0;
+
+	if (interruptible)
+		return mutex_lock_interruptible(&man->io_reserve_mutex);
+
+	mutex_lock(&man->io_reserve_mutex);
+	return 0;
+}
+
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	mutex_unlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+	struct ttm_buffer_object *bo;
+
+	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+		return -EAGAIN;
+
+	bo = list_first_entry(&man->io_reserve_lru,
+			      struct ttm_buffer_object,
+			      io_reserve_lru);
+	list_del_init(&bo->io_reserve_lru);
+	ttm_bo_unmap_virtual_locked(bo);
+
+	return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+			      struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	int ret = 0;
+
+	if (!bdev->driver->io_mem_reserve)
+		return 0;
+	if (likely(man->io_reserve_fastpath))
+		return bdev->driver->io_mem_reserve(bdev, mem);
+
+	if (bdev->driver->io_mem_reserve &&
+	    mem->bus.io_reserved_count++ == 0) {
+retry:
+		ret = bdev->driver->io_mem_reserve(bdev, mem);
+		if (ret == -EAGAIN) {
+			ret = ttm_mem_io_evict(man);
+			if (ret == 0)
+				goto retry;
+		}
+	}
+	return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+			    struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	if (bdev->driver->io_mem_reserve &&
+	    --mem->bus.io_reserved_count == 0 &&
+	    bdev->driver->io_mem_free)
+		bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
 	int ret;
 
-	if (!mem->bus.io_reserved) {
-		mem->bus.io_reserved = true;
-		ret = bdev->driver->io_mem_reserve(bdev, mem);
+	if (!mem->bus.io_reserved_vm) {
+		struct ttm_mem_type_manager *man =
+			&bo->bdev->man[mem->mem_type];
+
+		ret = ttm_mem_io_reserve(bo->bdev, mem);
 		if (unlikely(ret != 0))
 			return ret;
+		mem->bus.io_reserved_vm = true;
+		if (man->use_io_reserve_lru)
+			list_add_tail(&bo->io_reserve_lru,
+				      &man->io_reserve_lru);
 	}
 	return 0;
 }
 
-void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
 {
-	if (bdev->driver->io_mem_reserve) {
-		if (mem->bus.io_reserved) {
-			mem->bus.io_reserved = false;
-			bdev->driver->io_mem_free(bdev, mem);
-		}
+	struct ttm_mem_reg *mem = &bo->mem;
+
+	if (mem->bus.io_reserved_vm) {
+		mem->bus.io_reserved_vm = false;
+		list_del_init(&bo->io_reserve_lru);
+		ttm_mem_io_free(bo->bdev, mem);
 	}
 }
 
 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 			void **virtual)
 {
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	int ret;
 	void *addr;
 
 	*virtual = NULL;
+	(void) ttm_mem_io_lock(man, false);
 	ret = ttm_mem_io_reserve(bdev, mem);
+	ttm_mem_io_unlock(man);
 	if (ret || !mem->bus.is_iomem)
 		return ret;
 
@@ -117,7 +203,9 @@
 		else
 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
 		if (!addr) {
+			(void) ttm_mem_io_lock(man, false);
 			ttm_mem_io_free(bdev, mem);
+			ttm_mem_io_unlock(man);
 			return -ENOMEM;
 		}
 	}
@@ -134,7 +222,9 @@
 
 	if (virtual && mem->bus.addr == NULL)
 		iounmap(virtual);
+	(void) ttm_mem_io_lock(man, false);
 	ttm_mem_io_free(bdev, mem);
+	ttm_mem_io_unlock(man);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@
 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 	struct ttm_tt *ttm = bo->ttm;
 	struct ttm_mem_reg *old_mem = &bo->mem;
-	struct ttm_mem_reg old_copy = *old_mem;
+	struct ttm_mem_reg old_copy;
 	void *old_iomap;
 	void *new_iomap;
 	int ret;
@@ -280,8 +370,7 @@
 	}
 	mb();
 out2:
-	ttm_bo_free_old_node(bo);
-
+	old_copy = *old_mem;
 	*old_mem = *new_mem;
 	new_mem->mm_node = NULL;
 
@@ -292,9 +381,10 @@
 	}
 
 out1:
-	ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
+	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
 out:
 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+	ttm_bo_mem_put(bo, &old_copy);
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -337,11 +427,11 @@
 	 * TODO: Explicit member copy would probably be better here.
 	 */
 
-	spin_lock_init(&fbo->lock);
 	init_waitqueue_head(&fbo->event_queue);
 	INIT_LIST_HEAD(&fbo->ddestroy);
 	INIT_LIST_HEAD(&fbo->lru);
 	INIT_LIST_HEAD(&fbo->swap);
+	INIT_LIST_HEAD(&fbo->io_reserve_lru);
 	fbo->vm_node = NULL;
 	atomic_set(&fbo->cpu_writers, 0);
 
@@ -453,6 +543,8 @@
 		unsigned long start_page, unsigned long num_pages,
 		struct ttm_bo_kmap_obj *map)
 {
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
 	unsigned long offset, size;
 	int ret;
 
@@ -467,7 +559,9 @@
 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
 		return -EPERM;
 #endif
+	(void) ttm_mem_io_lock(man, false);
 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+	ttm_mem_io_unlock(man);
 	if (ret)
 		return ret;
 	if (!bo->mem.bus.is_iomem) {
@@ -482,12 +576,15 @@
 
 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 {
+	struct ttm_buffer_object *bo = map->bo;
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
+
 	if (!map->virtual)
 		return;
 	switch (map->bo_kmap_type) {
 	case ttm_bo_map_iomap:
 		iounmap(map->virtual);
-		ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
 		break;
 	case ttm_bo_map_vmap:
 		vunmap(map->virtual);
@@ -500,6 +597,9 @@
 	default:
 		BUG();
 	}
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+	ttm_mem_io_unlock(man);
 	map->virtual = NULL;
 	map->page = NULL;
 }
@@ -520,7 +620,7 @@
 	struct ttm_buffer_object *ghost_obj;
 	void *tmp_obj = NULL;
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	if (bo->sync_obj) {
 		tmp_obj = bo->sync_obj;
 		bo->sync_obj = NULL;
@@ -529,7 +629,7 @@
 	bo->sync_obj_arg = sync_obj_arg;
 	if (evict) {
 		ret = ttm_bo_wait(bo, false, false, false);
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		if (tmp_obj)
 			driver->sync_obj_unref(&tmp_obj);
 		if (ret)
@@ -552,7 +652,7 @@
 		 */
 
 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		if (tmp_obj)
 			driver->sync_obj_unref(&tmp_obj);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77..221b924 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@
 	int i;
 	unsigned long address = (unsigned long)vmf->virtual_address;
 	int retval = VM_FAULT_NOPAGE;
+	struct ttm_mem_type_manager *man =
+		&bdev->man[bo->mem.mem_type];
 
 	/*
 	 * Work around locking order reversal in fault / nopfn
@@ -118,24 +120,28 @@
 	 * move.
 	 */
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
 		ret = ttm_bo_wait(bo, false, true, false);
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		if (unlikely(ret != 0)) {
 			retval = (ret != -ERESTARTSYS) ?
 			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
 			goto out_unlock;
 		}
 	} else
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 
-
-	ret = ttm_mem_io_reserve(bdev, &bo->mem);
-	if (ret) {
-		retval = VM_FAULT_SIGBUS;
+	ret = ttm_mem_io_lock(man, true);
+	if (unlikely(ret != 0)) {
+		retval = VM_FAULT_NOPAGE;
 		goto out_unlock;
 	}
+	ret = ttm_mem_io_reserve_vm(bo);
+	if (unlikely(ret != 0)) {
+		retval = VM_FAULT_SIGBUS;
+		goto out_io_unlock;
+	}
 
 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
 	    bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@
 
 	if (unlikely(page_offset >= bo->num_pages)) {
 		retval = VM_FAULT_SIGBUS;
-		goto out_unlock;
+		goto out_io_unlock;
 	}
 
 	/*
@@ -182,7 +188,7 @@
 			page = ttm_tt_get_page(ttm, page_offset);
 			if (unlikely(!page && i == 0)) {
 				retval = VM_FAULT_OOM;
-				goto out_unlock;
+				goto out_io_unlock;
 			} else if (unlikely(!page)) {
 				break;
 			}
@@ -200,14 +206,15 @@
 		else if (unlikely(ret != 0)) {
 			retval =
 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
-			goto out_unlock;
+			goto out_io_unlock;
 		}
 
 		address += PAGE_SIZE;
 		if (unlikely(++page_offset >= page_last))
 			break;
 	}
-
+out_io_unlock:
+	ttm_mem_io_unlock(man);
 out_unlock:
 	ttm_bo_unreserve(bo);
 	return retval;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c29..3832fe1 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 
-void ttm_eu_backoff_reservation(struct list_head *list)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
 {
 	struct ttm_validate_buffer *entry;
 
@@ -41,10 +41,77 @@
 		if (!entry->reserved)
 			continue;
 
+		if (entry->removed) {
+			ttm_bo_add_to_lru(bo);
+			entry->removed = false;
+
+		}
 		entry->reserved = false;
-		ttm_bo_unreserve(bo);
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
 	}
 }
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		if (!entry->removed) {
+			entry->put_count = ttm_bo_del_from_lru(bo);
+			entry->removed = true;
+		}
+	}
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		if (entry->put_count) {
+			ttm_bo_list_ref_sub(bo, entry->put_count, true);
+			entry->put_count = 0;
+		}
+	}
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+					 struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	int ret;
+
+	ttm_eu_del_from_lru_locked(list);
+	spin_unlock(&glob->lru_lock);
+	ret = ttm_bo_wait_unreserved(bo, true);
+	spin_lock(&glob->lru_lock);
+	if (unlikely(ret != 0))
+		ttm_eu_backoff_reservation_locked(list);
+	return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+	struct ttm_bo_global *glob;
+
+	if (list_empty(list))
+		return;
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
+	spin_lock(&glob->lru_lock);
+	ttm_eu_backoff_reservation_locked(list);
+	spin_unlock(&glob->lru_lock);
+}
 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 
 /*
@@ -59,37 +126,76 @@
  * buffers in different orders.
  */
 
-int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+int ttm_eu_reserve_buffers(struct list_head *list)
 {
+	struct ttm_bo_global *glob;
 	struct ttm_validate_buffer *entry;
 	int ret;
+	uint32_t val_seq;
+
+	if (list_empty(list))
+		return 0;
+
+	list_for_each_entry(entry, list, head) {
+		entry->reserved = false;
+		entry->put_count = 0;
+		entry->removed = false;
+	}
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
 
 retry:
+	spin_lock(&glob->lru_lock);
+	val_seq = entry->bo->bdev->val_seq++;
+
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
-		entry->reserved = false;
-		ret = ttm_bo_reserve(bo, true, false, true, val_seq);
-		if (ret != 0) {
-			ttm_eu_backoff_reservation(list);
-			if (ret == -EAGAIN) {
-				ret = ttm_bo_wait_unreserved(bo, true);
-				if (unlikely(ret != 0))
-					return ret;
-				goto retry;
-			} else
+retry_this_bo:
+		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+			ret = ttm_eu_wait_unreserved_locked(list, bo);
+			if (unlikely(ret != 0)) {
+				spin_unlock(&glob->lru_lock);
+				ttm_eu_list_ref_sub(list);
 				return ret;
+			}
+			goto retry_this_bo;
+		case -EAGAIN:
+			ttm_eu_backoff_reservation_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ttm_eu_list_ref_sub(list);
+			ret = ttm_bo_wait_unreserved(bo, true);
+			if (unlikely(ret != 0))
+				return ret;
+			goto retry;
+		default:
+			ttm_eu_backoff_reservation_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ttm_eu_list_ref_sub(list);
+			return ret;
 		}
 
 		entry->reserved = true;
 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-			ttm_eu_backoff_reservation(list);
+			ttm_eu_backoff_reservation_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ttm_eu_list_ref_sub(list);
 			ret = ttm_bo_wait_cpu(bo, false);
 			if (ret)
 				return ret;
 			goto retry;
 		}
 	}
+
+	ttm_eu_del_from_lru_locked(list);
+	spin_unlock(&glob->lru_lock);
+	ttm_eu_list_ref_sub(list);
+
 	return 0;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -97,21 +203,36 @@
 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
 {
 	struct ttm_validate_buffer *entry;
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_global *glob;
+	struct ttm_bo_device *bdev;
+	struct ttm_bo_driver *driver;
+
+	if (list_empty(list))
+		return;
+
+	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+	bdev = bo->bdev;
+	driver = bdev->driver;
+	glob = bo->glob;
+
+	spin_lock(&bdev->fence_lock);
+	spin_lock(&glob->lru_lock);
 
 	list_for_each_entry(entry, list, head) {
-		struct ttm_buffer_object *bo = entry->bo;
-		struct ttm_bo_driver *driver = bo->bdev->driver;
-		void *old_sync_obj;
-
-		spin_lock(&bo->lock);
-		old_sync_obj = bo->sync_obj;
+		bo = entry->bo;
+		entry->old_sync_obj = bo->sync_obj;
 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
 		bo->sync_obj_arg = entry->new_sync_obj_arg;
-		spin_unlock(&bo->lock);
-		ttm_bo_unreserve(bo);
+		ttm_bo_unreserve_locked(bo);
 		entry->reserved = false;
-		if (old_sync_obj)
-			driver->sync_obj_unref(&old_sync_obj);
+	}
+	spin_unlock(&glob->lru_lock);
+	spin_unlock(&bdev->fence_lock);
+
+	list_for_each_entry(entry, list, head) {
+		if (entry->old_sync_obj)
+			driver->sync_obj_unref(&entry->old_sync_obj);
 	}
 }
 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e7a58d0..10fc01f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -264,7 +264,6 @@
 	 */
 
 	struct vmw_sw_context ctx;
-	uint32_t val_seq;
 	struct mutex cmdbuf_mutex;
 
 	/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 76954e3..41b95ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -653,8 +653,7 @@
 	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
 	if (unlikely(ret != 0))
 		goto out_err;
-	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
-				     dev_priv->val_seq++);
+	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
 	if (unlikely(ret != 0))
 		goto out_err;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 41d9a5b..bfab60c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -480,9 +480,6 @@
 	info->fix.smem_start = 0;
 	info->fix.smem_len = fb_size;
 
-	info->fix.mmio_start = 0;
-	info->fix.mmio_len = 0;
-
 	info->pseudo_palette = par->pseudo_palette;
 	info->screen_base = par->vmalloc;
 	info->screen_size = fb_size;
@@ -659,7 +656,7 @@
 	par->dirty.active = false;
 	spin_unlock_irqrestore(&par->dirty.lock, flags);
 
-	flush_scheduled_work();
+	flush_delayed_work_sync(&info->deferred_work);
 
 	par->bo_ptr = NULL;
 	ttm_bo_kunmap(&par->map);
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 0e1edd7..09aea5f 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,7 +3,6 @@
 	depends on PCI
 	# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
-	select VIDEO_OUTPUT_CONTROL if ACPI
 	select BACKLIGHT_CLASS_DEVICE if ACPI
 	select INPUT if ACPI
 	select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index c8768f3..e01cacb 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -33,6 +33,7 @@
 	struct fb_info *fb_info;
 	int pwr_state;
 	void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
+	void (*reprobe)(struct pci_dev *pdev);
 	bool (*can_switch)(struct pci_dev *pdev);
 	int id;
 	bool active;
@@ -103,6 +104,7 @@
 
 int vga_switcheroo_register_client(struct pci_dev *pdev,
 				   void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
+				   void (*reprobe)(struct pci_dev *pdev),
 				   bool (*can_switch)(struct pci_dev *pdev))
 {
 	int index;
@@ -117,6 +119,7 @@
 	vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
 	vgasr_priv.clients[index].pdev = pdev;
 	vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
+	vgasr_priv.clients[index].reprobe = reprobe;
 	vgasr_priv.clients[index].can_switch = can_switch;
 	vgasr_priv.clients[index].id = -1;
 	if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
@@ -174,7 +177,8 @@
 	int i;
 	mutex_lock(&vgasr_mutex);
 	for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
-		seq_printf(m, "%d:%c:%s:%s\n", i,
+		seq_printf(m, "%d:%s:%c:%s:%s\n", i,
+			   vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
 			   vgasr_priv.clients[i].active ? '+' : ' ',
 			   vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
 			   pci_name(vgasr_priv.clients[i].pdev));
@@ -190,9 +194,8 @@
 
 static int vga_switchon(struct vga_switcheroo_client *client)
 {
-	int ret;
-
-	ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
+	if (vgasr_priv.handler->power_state)
+		vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
 	/* call the driver callback to turn on device */
 	client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
 	client->pwr_state = VGA_SWITCHEROO_ON;
@@ -203,12 +206,14 @@
 {
 	/* call the driver callback to turn off device */
 	client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
-	vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
+	if (vgasr_priv.handler->power_state)
+		vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
 	client->pwr_state = VGA_SWITCHEROO_OFF;
 	return 0;
 }
 
-static int vga_switchto(struct vga_switcheroo_client *new_client)
+/* stage one happens before delay */
+static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
 {
 	int ret;
 	int i;
@@ -235,10 +240,28 @@
 		vga_switchon(new_client);
 
 	/* swap shadow resource to denote boot VGA device has changed so X starts on new device */
-	active->active = false;
-
 	active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
 	new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+	return 0;
+}
+
+/* post delay */
+static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
+{
+	int ret;
+	int i;
+	struct vga_switcheroo_client *active = NULL;
+
+	for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+		if (vgasr_priv.clients[i].active == true) {
+			active = &vgasr_priv.clients[i];
+			break;
+		}
+	}
+	if (!active)
+		return 0;
+
+	active->active = false;
 
 	if (new_client->fb_info) {
 		struct fb_event event;
@@ -250,6 +273,9 @@
 	if (ret)
 		return ret;
 
+	if (new_client->reprobe)
+		new_client->reprobe(new_client->pdev);
+
 	if (active->pwr_state == VGA_SWITCHEROO_ON)
 		vga_switchoff(active);
 
@@ -265,6 +291,7 @@
 	const char *pdev_name;
 	int i, ret;
 	bool delay = false, can_switch;
+	bool just_mux = false;
 	int client_id = -1;
 	struct vga_switcheroo_client *client = NULL;
 
@@ -319,6 +346,15 @@
 	if (strncmp(usercmd, "DIS", 3) == 0)
 		client_id = VGA_SWITCHEROO_DIS;
 
+	if (strncmp(usercmd, "MIGD", 4) == 0) {
+		just_mux = true;
+		client_id = VGA_SWITCHEROO_IGD;
+	}
+	if (strncmp(usercmd, "MDIS", 4) == 0) {
+		just_mux = true;
+		client_id = VGA_SWITCHEROO_DIS;
+	}
+
 	if (client_id == -1)
 		goto out;
 
@@ -330,6 +366,12 @@
 	}
 
 	vgasr_priv.delayed_switch_active = false;
+
+	if (just_mux) {
+		ret = vgasr_priv.handler->switchto(client_id);
+		goto out;
+	}
+
 	/* okay we want a switch - test if devices are willing to switch */
 	can_switch = true;
 	for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
@@ -345,18 +387,22 @@
 
 	if (can_switch == true) {
 		pdev_name = pci_name(client->pdev);
-		ret = vga_switchto(client);
+		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
+			printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+
+		ret = vga_switchto_stage2(client);
+		if (ret)
+			printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+
 	} else {
 		printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
 		vgasr_priv.delayed_switch_active = true;
 		vgasr_priv.delayed_client_id = client_id;
 
-		/* we should at least power up the card to
-		   make the switch faster */
-		if (client->pwr_state == VGA_SWITCHEROO_OFF)
-			vga_switchon(client);
+		ret = vga_switchto_stage1(client);
+		if (ret)
+			printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
 	}
 
 out:
@@ -438,9 +484,9 @@
 		goto err;
 
 	pdev_name = pci_name(client->pdev);
-	ret = vga_switchto(client);
+	ret = vga_switchto_stage2(client);
 	if (ret)
-		printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
+		printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
 
 	vgasr_priv.delayed_switch_active = false;
 	err = 0;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 3052e29..24cca2f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,11 +150,22 @@
 	Say Y here if you want to enable force feedback support for DragonRise Inc.
 	game controllers.
 
+config HID_EMS_FF
+	tristate "EMS Production Inc. force feedback support"
+	depends on USB_HID
+	select INPUT_FF_MEMLESS
+	---help---
+	Say Y here if you want to enable force feedback support for devices by
+	EMS Production Ltd.
+	Currently the following devices are known to be supported:
+	 - Trio Linker Plus II
+
 config HID_EGALAX
 	tristate "eGalax multi-touch panel"
 	depends on USB_HID
 	---help---
-	Support for the eGalax dual-touch panel.
+	Support for the eGalax dual-touch panels, including the
+	Joojoo and Wetab tablets.
 
 config HID_ELECOM
 	tristate "ELECOM BM084 bluetooth mouse"
@@ -284,6 +295,23 @@
 	---help---
 	Support for Monterey Genius KB29E.
 
+config HID_MULTITOUCH
+	tristate "HID Multitouch panels"
+	depends on USB_HID
+	---help---
+	  Generic support for HID multitouch panels.
+
+	  Say Y here if you have one of the following devices:
+	  - Cypress TrueTouch panels
+	  - Hanvon dual touch panels
+	  - Pixcir dual touch panels
+	  - 'Sensing Win7-TwoFinger' panel by GeneralTouch
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hid-multitouch.
+
 config HID_NTRIG
 	tristate "N-Trig touch screen"
 	depends on USB_HID
@@ -396,6 +424,13 @@
 	---help---
 	Support for Roccat Kone mouse.
 
+config HID_ROCCAT_KONEPLUS
+	tristate "Roccat Kone[+] mouse support"
+	depends on USB_HID
+	select HID_ROCCAT
+	---help---
+	Support for Roccat Kone[+] mouse.
+
 config HID_ROCCAT_PYRA
 	tristate "Roccat Pyra mouse support"
 	depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index c335605..6efc2a0 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -1,7 +1,7 @@
 #
 # Makefile for the HID driver
 #
-hid-objs			:= hid-core.o hid-input.o
+hid-y			:= hid-core.o hid-input.o
 
 ifdef CONFIG_DEBUG_FS
 	hid-objs		+= hid-debug.o
@@ -11,18 +11,18 @@
 
 hid-$(CONFIG_HIDRAW)		+= hidraw.o
 
-hid-logitech-objs		:= hid-lg.o
+hid-logitech-y		:= hid-lg.o
 ifdef CONFIG_LOGITECH_FF
-	hid-logitech-objs	+= hid-lgff.o
+	hid-logitech-y	+= hid-lgff.o
 endif
 ifdef CONFIG_LOGIRUMBLEPAD2_FF
-	hid-logitech-objs	+= hid-lg2ff.o
+	hid-logitech-y	+= hid-lg2ff.o
 endif
 ifdef CONFIG_LOGIG940_FF
-	hid-logitech-objs	+= hid-lg3ff.o
+	hid-logitech-y	+= hid-lg3ff.o
 endif
 ifdef CONFIG_LOGIWII_FF
-	hid-logitech-objs	+= hid-lg4ff.o
+	hid-logitech-y	+= hid-lg4ff.o
 endif
 
 obj-$(CONFIG_HID_3M_PCT)	+= hid-3m-pct.o
@@ -35,6 +35,7 @@
 obj-$(CONFIG_HID_CHICONY)	+= hid-chicony.o
 obj-$(CONFIG_HID_CYPRESS)	+= hid-cypress.o
 obj-$(CONFIG_HID_DRAGONRISE)	+= hid-drff.o
+obj-$(CONFIG_HID_EMS_FF)	+= hid-emsff.o
 obj-$(CONFIG_HID_EGALAX)	+= hid-egalax.o
 obj-$(CONFIG_HID_ELECOM)	+= hid-elecom.o
 obj-$(CONFIG_HID_EZKEY)		+= hid-ezkey.o
@@ -46,6 +47,7 @@
 obj-$(CONFIG_HID_MICROSOFT)	+= hid-microsoft.o
 obj-$(CONFIG_HID_MONTEREY)	+= hid-monterey.o
 obj-$(CONFIG_HID_MOSART)	+= hid-mosart.o
+obj-$(CONFIG_HID_MULTITOUCH)	+= hid-multitouch.o
 obj-$(CONFIG_HID_NTRIG)		+= hid-ntrig.o
 obj-$(CONFIG_HID_ORTEK)		+= hid-ortek.o
 obj-$(CONFIG_HID_PRODIKEYS)	+= hid-prodikeys.o
@@ -55,6 +57,7 @@
 obj-$(CONFIG_HID_PICOLCD)	+= hid-picolcd.o
 obj-$(CONFIG_HID_ROCCAT)	+= hid-roccat.o
 obj-$(CONFIG_HID_ROCCAT_KONE)	+= hid-roccat-kone.o
+obj-$(CONFIG_HID_ROCCAT_KONEPLUS)	+= hid-roccat-koneplus.o
 obj-$(CONFIG_HID_ROCCAT_PYRA)	+= hid-roccat-pyra.o
 obj-$(CONFIG_HID_SAMSUNG)	+= hid-samsung.o
 obj-$(CONFIG_HID_SMARTJOYPLUS)	+= hid-sjoy.o
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
index 02d8cd3..5243ae2 100644
--- a/drivers/hid/hid-3m-pct.c
+++ b/drivers/hid/hid-3m-pct.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
+#include <linux/input/mt.h>
 
 MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
 MODULE_DESCRIPTION("3M PCT multitouch panels");
@@ -27,8 +28,6 @@
 #include "hid-ids.h"
 
 #define MAX_SLOTS		60
-#define MAX_TRKID		USHRT_MAX
-#define MAX_EVENTS		360
 
 /* estimated signal-to-noise ratios */
 #define SN_MOVE			2048
@@ -36,14 +35,11 @@
 
 struct mmm_finger {
 	__s32 x, y, w, h;
-	__u16 id;
-	bool prev_touch;
 	bool touch, valid;
 };
 
 struct mmm_data {
 	struct mmm_finger f[MAX_SLOTS];
-	__u16 id;
 	__u8 curid;
 	__u8 nexp, nreal;
 	bool touch, valid;
@@ -117,14 +113,7 @@
 					0, 1, 0, 0);
 			return 1;
 		case HID_DG_CONTACTID:
-			field->logical_maximum = MAX_TRKID;
-			hid_map_usage(hi, usage, bit, max,
-					EV_ABS, ABS_MT_TRACKING_ID);
-			input_set_abs_params(hi->input, ABS_MT_TRACKING_ID,
-					     0, MAX_TRKID, 0, 0);
-			if (!hi->input->mt)
-				input_mt_create_slots(hi->input, MAX_SLOTS);
-			input_set_events_per_packet(hi->input, MAX_EVENTS);
+			input_mt_init_slots(hi->input, MAX_SLOTS);
 			return 1;
 		}
 		/* let hid-input decide for the others */
@@ -154,7 +143,6 @@
  */
 static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
 {
-	struct mmm_finger *oldest = 0;
 	int i;
 	for (i = 0; i < MAX_SLOTS; ++i) {
 		struct mmm_finger *f = &md->f[i];
@@ -163,6 +151,7 @@
 			continue;
 		}
 		input_mt_slot(input, i);
+		input_mt_report_slot_state(input, MT_TOOL_FINGER, f->touch);
 		if (f->touch) {
 			/* this finger is on the screen */
 			int wide = (f->w > f->h);
@@ -170,33 +159,16 @@
 			int major = max(f->w, f->h) >> 1;
 			int minor = min(f->w, f->h) >> 1;
 
-			if (!f->prev_touch)
-				f->id = md->id++;
-			input_event(input, EV_ABS, ABS_MT_TRACKING_ID, f->id);
 			input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
 			input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
 			input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
 			input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
 			input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
-			/* touchscreen emulation: pick the oldest contact */
-			if (!oldest || ((f->id - oldest->id) & (SHRT_MAX + 1)))
-				oldest = f;
-		} else {
-			/* this finger took off the screen */
-			input_event(input, EV_ABS, ABS_MT_TRACKING_ID, -1);
 		}
-		f->prev_touch = f->touch;
 		f->valid = 0;
 	}
 
-	/* touchscreen emulation */
-	if (oldest) {
-		input_event(input, EV_KEY, BTN_TOUCH, 1);
-		input_event(input, EV_ABS, ABS_X, oldest->x);
-		input_event(input, EV_ABS, ABS_Y, oldest->y);
-	} else {
-		input_event(input, EV_KEY, BTN_TOUCH, 0);
-	}
+	input_mt_report_pointer_emulation(input, true);
 	input_sync(input);
 }
 
@@ -274,7 +246,7 @@
 
 	md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL);
 	if (!md) {
-		dev_err(&hdev->dev, "cannot allocate 3M data\n");
+		hid_err(hdev, "cannot allocate 3M data\n");
 		return -ENOMEM;
 	}
 	hid_set_drvdata(hdev, md);
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 1666c16..902d1df 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -93,7 +93,7 @@
 
 	a4 = kzalloc(sizeof(*a4), GFP_KERNEL);
 	if (a4 == NULL) {
-		dev_err(&hdev->dev, "can't alloc device descriptor\n");
+		hid_err(hdev, "can't alloc device descriptor\n");
 		ret = -ENOMEM;
 		goto err_free;
 	}
@@ -104,13 +104,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index eaeca56..61aa712 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -16,6 +16,8 @@
  * any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/hid.h>
 #include <linux/module.h>
@@ -59,6 +61,27 @@
 	u8 flags;
 };
 
+static const struct apple_key_translation macbookair_fn_keys[] = {
+	{ KEY_BACKSPACE, KEY_DELETE },
+	{ KEY_ENTER,	KEY_INSERT },
+	{ KEY_F1,	KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
+	{ KEY_F2,	KEY_BRIGHTNESSUP,   APPLE_FLAG_FKEY },
+	{ KEY_F3,	KEY_SCALE,          APPLE_FLAG_FKEY },
+	{ KEY_F4,	KEY_DASHBOARD,      APPLE_FLAG_FKEY },
+	{ KEY_F6,	KEY_PREVIOUSSONG,   APPLE_FLAG_FKEY },
+	{ KEY_F7,	KEY_PLAYPAUSE,      APPLE_FLAG_FKEY },
+	{ KEY_F8,	KEY_NEXTSONG,       APPLE_FLAG_FKEY },
+	{ KEY_F9,	KEY_MUTE,           APPLE_FLAG_FKEY },
+	{ KEY_F10,	KEY_VOLUMEDOWN,     APPLE_FLAG_FKEY },
+	{ KEY_F11,	KEY_VOLUMEUP,       APPLE_FLAG_FKEY },
+	{ KEY_F12,	KEY_EJECTCD,        APPLE_FLAG_FKEY },
+	{ KEY_UP,	KEY_PAGEUP },
+	{ KEY_DOWN,	KEY_PAGEDOWN },
+	{ KEY_LEFT,	KEY_HOME },
+	{ KEY_RIGHT,	KEY_END },
+	{ }
+};
+
 static const struct apple_key_translation apple_fn_keys[] = {
 	{ KEY_BACKSPACE, KEY_DELETE },
 	{ KEY_ENTER,	KEY_INSERT },
@@ -146,7 +169,7 @@
 		struct hid_usage *usage, __s32 value)
 {
 	struct apple_sc *asc = hid_get_drvdata(hid);
-	const struct apple_key_translation *trans;
+	const struct apple_key_translation *trans, *table;
 
 	if (usage->code == KEY_FN) {
 		asc->fn_on = !!value;
@@ -157,10 +180,16 @@
 	if (fnmode) {
 		int do_translate;
 
-		trans = apple_find_translation((hid->product < 0x21d ||
-					hid->product >= 0x300) ?
-					powerbook_fn_keys : apple_fn_keys,
-					usage->code);
+		if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+				hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
+			table = macbookair_fn_keys;
+		else if (hid->product < 0x21d || hid->product >= 0x300)
+			table = powerbook_fn_keys;
+		else
+			table = apple_fn_keys;
+
+		trans = apple_find_translation (table, usage->code);
+
 		if (trans) {
 			if (test_bit(usage->code, asc->pressed_fn))
 				do_translate = 1;
@@ -253,8 +282,8 @@
 
 	if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
 			rdesc[53] == 0x65 && rdesc[59] == 0x65) {
-		dev_info(&hdev->dev, "fixing up MacBook JIS keyboard report "
-				"descriptor\n");
+		hid_info(hdev,
+			 "fixing up MacBook JIS keyboard report descriptor\n");
 		rdesc[53] = rdesc[59] = 0xe7;
 	}
 	return rdesc;
@@ -324,7 +353,7 @@
 
 	asc = kzalloc(sizeof(*asc), GFP_KERNEL);
 	if (asc == NULL) {
-		dev_err(&hdev->dev, "can't alloc apple descriptor\n");
+		hid_err(hdev, "can't alloc apple descriptor\n");
 		return -ENOMEM;
 	}
 
@@ -334,7 +363,7 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
@@ -345,7 +374,7 @@
 
 	ret = hid_hw_start(hdev, connect_mask);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
@@ -440,6 +469,18 @@
 		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
 		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
+		.driver_data = APPLE_HAS_FN },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
+		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
+		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
+		.driver_data = APPLE_HAS_FN },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
+		.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
+		.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
 		.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
@@ -473,7 +514,7 @@
 
 	ret = hid_register_driver(&apple_driver);
 	if (ret)
-		printk(KERN_ERR "can't register apple driver\n");
+		pr_err("can't register apple driver\n");
 
 	return ret;
 }
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index f42ee14..e5b961d 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -73,14 +73,14 @@
 	int error;
 
 	if (list_empty(report_list)) {
-		dev_err(&hid->dev, "no output reports found\n");
+		hid_err(hid, "no output reports found\n");
 		return -ENODEV;
 	}
 
 	report = list_first_entry(report_list, struct hid_report, list);
 
 	if (report->maxfield < 4) {
-		dev_err(&hid->dev, "no fields in the report: %d\n", report->maxfield);
+		hid_err(hid, "no fields in the report: %d\n", report->maxfield);
 		return -ENODEV;
 	}
 
@@ -101,7 +101,7 @@
 	axff->report->field[3]->value[0] = 0x00;
 	usbhid_submit_report(hid, axff->report, USB_DIR_OUT);
 
-	dev_info(&hid->dev, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
+	hid_info(hid, "Force Feedback for ACRUX game controllers by Sergei Kolzun<x0r@dv-life.ru>\n");
 
 	return 0;
 
@@ -114,17 +114,17 @@
 {
 	int error;
 
-	dev_dbg(&hdev->dev, "ACRUX HID hardware probe...");
+	dev_dbg(&hdev->dev, "ACRUX HID hardware probe...\n");
 
 	error = hid_parse(hdev);
 	if (error) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		return error;
 	}
 
 	error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (error) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		return error;
 	}
 
@@ -134,7 +134,7 @@
 		 * Do not fail device initialization completely as device
 		 * may still be partially operable, just warn.
 		 */
-		dev_warn(&hdev->dev,
+		hid_warn(hdev,
 			 "Failed to enable force feedback support, error: %d\n",
 			 error);
 	}
diff --git a/drivers/hid/hid-belkin.c b/drivers/hid/hid-belkin.c
index 4ce7aa3..a1a765a 100644
--- a/drivers/hid/hid-belkin.c
+++ b/drivers/hid/hid-belkin.c
@@ -56,14 +56,14 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
 		((quirks & BELKIN_HIDDEV) ? HID_CONNECT_HIDDEV_FORCE : 0));
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
index 5925bdc..1ea066c 100644
--- a/drivers/hid/hid-cando.c
+++ b/drivers/hid/hid-cando.c
@@ -207,7 +207,7 @@
 
 	td = kmalloc(sizeof(struct cando_data), GFP_KERNEL);
 	if (!td) {
-		dev_err(&hdev->dev, "cannot allocate Cando Touch data\n");
+		hid_err(hdev, "cannot allocate Cando Touch data\n");
 		return -ENOMEM;
 	}
 	hid_set_drvdata(hdev, td);
@@ -236,6 +236,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 			USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 			USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 		USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c
index e880086..888ece6 100644
--- a/drivers/hid/hid-cherry.c
+++ b/drivers/hid/hid-cherry.c
@@ -30,8 +30,7 @@
 		unsigned int *rsize)
 {
 	if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
-		dev_info(&hdev->dev, "fixing up Cherry Cymotion report "
-				"descriptor\n");
+		hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
 		rdesc[11] = rdesc[16] = 0xff;
 		rdesc[12] = rdesc[17] = 0x03;
 	}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 88cb04e..d678cf3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -14,6 +14,8 @@
  * any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/init.h>
@@ -59,7 +61,8 @@
 	if (report_enum->report_id_hash[id])
 		return report_enum->report_id_hash[id];
 
-	if (!(report = kzalloc(sizeof(struct hid_report), GFP_KERNEL)))
+	report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
+	if (!report)
 		return NULL;
 
 	if (id != 0)
@@ -90,8 +93,11 @@
 		return NULL;
 	}
 
-	if (!(field = kzalloc(sizeof(struct hid_field) + usages * sizeof(struct hid_usage)
-		+ values * sizeof(unsigned), GFP_KERNEL))) return NULL;
+	field = kzalloc((sizeof(struct hid_field) +
+			 usages * sizeof(struct hid_usage) +
+			 values * sizeof(unsigned)), GFP_KERNEL);
+	if (!field)
+		return NULL;
 
 	field->index = report->maxfield++;
 	report->field[field->index] = field;
@@ -172,10 +178,14 @@
 
 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
 {
+	struct hid_collection *collection = parser->device->collection;
 	int n;
-	for (n = parser->collection_stack_ptr - 1; n >= 0; n--)
-		if (parser->device->collection[parser->collection_stack[n]].type == type)
-			return parser->device->collection[parser->collection_stack[n]].usage;
+
+	for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
+		unsigned index = parser->collection_stack[n];
+		if (collection[index].type == type)
+			return collection[index].usage;
+	}
 	return 0; /* we know nothing about this usage type */
 }
 
@@ -209,7 +219,8 @@
 	unsigned offset;
 	int i;
 
-	if (!(report = hid_register_report(parser->device, report_type, parser->global.report_id))) {
+	report = hid_register_report(parser->device, report_type, parser->global.report_id);
+	if (!report) {
 		dbg_hid("hid_register_report failed\n");
 		return -1;
 	}
@@ -227,7 +238,8 @@
 
 	usages = max_t(int, parser->local.usage_index, parser->global.report_count);
 
-	if ((field = hid_register_field(report, usages, parser->global.report_count)) == NULL)
+	field = hid_register_field(report, usages, parser->global.report_count);
+	if (!field)
 		return 0;
 
 	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
@@ -652,13 +664,12 @@
 		return -ENOMEM;
 	device->rsize = size;
 
-	parser = vmalloc(sizeof(struct hid_parser));
+	parser = vzalloc(sizeof(struct hid_parser));
 	if (!parser) {
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	memset(parser, 0, sizeof(struct hid_parser));
 	parser->device = device;
 
 	end = start + size;
@@ -672,7 +683,8 @@
 
 		if (dispatch_type[item.type](parser, &item)) {
 			dbg_hid("item %u %u %u %u parsing failed\n",
-				item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag);
+				item.format, (unsigned)item.size,
+				(unsigned)item.type, (unsigned)item.tag);
 			goto err;
 		}
 
@@ -737,13 +749,14 @@
  * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
  */
 
-static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
+static __u32 extract(const struct hid_device *hid, __u8 *report,
+		     unsigned offset, unsigned n)
 {
 	u64 x;
 
 	if (n > 32)
-		printk(KERN_WARNING "HID: extract() called with n (%d) > 32! (%s)\n",
-				n, current->comm);
+		hid_warn(hid, "extract() called with n (%d) > 32! (%s)\n",
+			 n, current->comm);
 
 	report += offset >> 3;  /* adjust byte index */
 	offset &= 7;            /* now only need bit offset into one byte */
@@ -760,18 +773,19 @@
  * endianness of register values by considering a register
  * a "cached" copy of the little endiad bit stream.
  */
-static __inline__ void implement(__u8 *report, unsigned offset, unsigned n, __u32 value)
+static void implement(const struct hid_device *hid, __u8 *report,
+		      unsigned offset, unsigned n, __u32 value)
 {
 	u64 x;
 	u64 m = (1ULL << n) - 1;
 
 	if (n > 32)
-		printk(KERN_WARNING "HID: implement() called with n (%d) > 32! (%s)\n",
-				n, current->comm);
+		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
+			 __func__, n, current->comm);
 
 	if (value > m)
-		printk(KERN_WARNING "HID: implement() called with too large value %d! (%s)\n",
-				value, current->comm);
+		hid_warn(hid, "%s() called with too large value %d! (%s)\n",
+			 __func__, value, current->comm);
 	WARN_ON(value > m);
 	value &= m;
 
@@ -788,7 +802,7 @@
  * Search an array for a value.
  */
 
-static __inline__ int search(__s32 *array, __s32 value, unsigned n)
+static int search(__s32 *array, __s32 value, unsigned n)
 {
 	while (n--) {
 		if (*array++ == value)
@@ -887,18 +901,22 @@
 	__s32 max = field->logical_maximum;
 	__s32 *value;
 
-	if (!(value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC)))
+	value = kmalloc(sizeof(__s32) * count, GFP_ATOMIC);
+	if (!value)
 		return;
 
 	for (n = 0; n < count; n++) {
 
-			value[n] = min < 0 ? snto32(extract(data, offset + n * size, size), size) :
-						    extract(data, offset + n * size, size);
+		value[n] = min < 0 ?
+			snto32(extract(hid, data, offset + n * size, size),
+			       size) :
+			extract(hid, data, offset + n * size, size);
 
-			if (!(field->flags & HID_MAIN_ITEM_VARIABLE) /* Ignore report if ErrorRollOver */
-			    && value[n] >= min && value[n] <= max
-			    && field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
-				goto exit;
+		/* Ignore report if ErrorRollOver */
+		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
+		    value[n] >= min && value[n] <= max &&
+		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
+			goto exit;
 	}
 
 	for (n = 0; n < count; n++) {
@@ -928,7 +946,8 @@
  * Output the field into the report.
  */
 
-static void hid_output_field(struct hid_field *field, __u8 *data)
+static void hid_output_field(const struct hid_device *hid,
+			     struct hid_field *field, __u8 *data)
 {
 	unsigned count = field->report_count;
 	unsigned offset = field->report_offset;
@@ -937,9 +956,11 @@
 
 	for (n = 0; n < count; n++) {
 		if (field->logical_minimum < 0)	/* signed values */
-			implement(data, offset + n * size, size, s32ton(field->value[n], size));
+			implement(hid, data, offset + n * size, size,
+				  s32ton(field->value[n], size));
 		else				/* unsigned values */
-			implement(data, offset + n * size, size, field->value[n]);
+			implement(hid, data, offset + n * size, size,
+				  field->value[n]);
 	}
 }
 
@@ -956,7 +977,7 @@
 
 	memset(data, 0, ((report->size - 1) >> 3) + 1);
 	for (n = 0; n < report->maxfield; n++)
-		hid_output_field(report->field[n], data);
+		hid_output_field(report->device, report->field[n], data);
 }
 EXPORT_SYMBOL_GPL(hid_output_report);
 
@@ -1169,8 +1190,7 @@
 		hdev->claimed |= HID_CLAIMED_HIDRAW;
 
 	if (!hdev->claimed) {
-		dev_err(&hdev->dev, "claimed by neither input, hiddev nor "
-				"hidraw\n");
+		hid_err(hdev, "claimed by neither input, hiddev nor hidraw\n");
 		return -ENODEV;
 	}
 
@@ -1210,9 +1230,9 @@
 		bus = "<UNKNOWN>";
 	}
 
-	dev_info(&hdev->dev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
-			buf, bus, hdev->version >> 8, hdev->version & 0xff,
-			type, hdev->name, hdev->phys);
+	hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
+		 buf, bus, hdev->version >> 8, hdev->version & 0xff,
+		 type, hdev->name, hdev->phys);
 
 	return 0;
 }
@@ -1230,7 +1250,7 @@
 EXPORT_SYMBOL_GPL(hid_disconnect);
 
 /* a list of devices for which there is a specialized driver on HID bus */
-static const struct hid_device_id hid_blacklist[] = {
+static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
@@ -1276,6 +1296,12 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1286,29 +1312,39 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1369,6 +1405,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
@@ -1390,6 +1427,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -1496,9 +1534,9 @@
 	if (!hid_match_device(hdev, hdrv))
 		return 0;
 
-	/* generic wants all non-blacklisted */
+	/* generic wants all that don't have specialized driver */
 	if (!strncmp(hdrv->name, "generic-", 8))
-		return !hid_match_id(hdev, hid_blacklist);
+		return !hid_match_id(hdev, hid_have_special_driver);
 
 	return 1;
 }
@@ -1604,10 +1642,10 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
@@ -1757,6 +1795,12 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
 	{ }
@@ -1948,12 +1992,12 @@
 	int ret;
 
 	if (hid_debug)
-		printk(KERN_WARNING "HID: hid_debug is now used solely for parser and driver debugging.\n"
-				"HID: debugfs is now used for inspecting the device (report descriptor, reports)\n");
+		pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
+			"debugfs is now used for inspecting the device (report descriptor, reports)\n");
 
 	ret = bus_register(&hid_bus_type);
 	if (ret) {
-		printk(KERN_ERR "HID: can't register hid bus\n");
+		pr_err("can't register hid bus\n");
 		goto err;
 	}
 
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 4cd0e23..2f0be4c 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -107,13 +107,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 75c5e23..555382f 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -26,6 +26,8 @@
  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/sched.h>
@@ -393,7 +395,7 @@
 
 	buf = resolv_usage_page(usage >> 16, f);
 	if (IS_ERR(buf)) {
-		printk(KERN_ERR "error allocating HID debug buffer\n");
+		pr_err("error allocating HID debug buffer\n");
 		return NULL;
 	}
 
diff --git a/drivers/hid/hid-drff.c b/drivers/hid/hid-drff.c
index 968b04f..afcf3d6 100644
--- a/drivers/hid/hid-drff.c
+++ b/drivers/hid/hid-drff.c
@@ -96,18 +96,18 @@
 	int error;
 
 	if (list_empty(report_list)) {
-		dev_err(&hid->dev, "no output reports found\n");
+		hid_err(hid, "no output reports found\n");
 		return -ENODEV;
 	}
 
 	report = list_first_entry(report_list, struct hid_report, list);
 	if (report->maxfield < 1) {
-		dev_err(&hid->dev, "no fields in the report\n");
+		hid_err(hid, "no fields in the report\n");
 		return -ENODEV;
 	}
 
 	if (report->field[0]->report_count < 7) {
-		dev_err(&hid->dev, "not enough values in the field\n");
+		hid_err(hid, "not enough values in the field\n");
 		return -ENODEV;
 	}
 
@@ -133,8 +133,8 @@
 	drff->report->field[0]->value[6] = 0x00;
 	usbhid_submit_report(hid, drff->report, USB_DIR_OUT);
 
-	dev_info(&hid->dev, "Force Feedback for DragonRise Inc. game "
-	       "controllers by Richard Walmsley <richwalm@gmail.com>\n");
+	hid_info(hid, "Force Feedback for DragonRise Inc. "
+		 "game controllers by Richard Walmsley <richwalm@gmail.com>\n");
 
 	return 0;
 }
@@ -153,13 +153,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err;
 	}
 
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index 5a1b52e..03bee19 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -2,6 +2,8 @@
  *  HID driver for eGalax dual-touch panels
  *
  *  Copyright (c) 2010 Stephane Chatty <chatty@enac.fr>
+ *  Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se>
+ *  Copyright (c) 2010 Canonical, Ltd.
  *
  */
 
@@ -16,6 +18,7 @@
 #include <linux/hid.h>
 #include <linux/module.h>
 #include <linux/usb.h>
+#include <linux/input/mt.h>
 #include <linux/slab.h>
 #include "usbhid/usbhid.h"
 
@@ -25,38 +28,53 @@
 
 #include "hid-ids.h"
 
+#define MAX_SLOTS		2
+
+/* estimated signal-to-noise ratios */
+#define SN_MOVE			4096
+#define SN_PRESSURE		32
+
 struct egalax_data {
-	__u16 x, y, z;
-	__u8 id;
-	bool first;		/* is this the first finger in the frame? */
-	bool valid;		/* valid finger data, or just placeholder? */
-	bool activity;		/* at least one active finger previously? */
-	__u16 lastx, lasty, lastz;	/* latest valid (x, y, z) in the frame */
+	int valid;
+	int slot;
+	int touch;
+	int x, y, z;
 };
 
+static void set_abs(struct input_dev *input, unsigned int code,
+		    struct hid_field *field, int snratio)
+{
+	int fmin = field->logical_minimum;
+	int fmax = field->logical_maximum;
+	int fuzz = snratio ? (fmax - fmin) / snratio : 0;
+	input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+}
+
 static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
+	struct input_dev *input = hi->input;
+
 	switch (usage->hid & HID_USAGE_PAGE) {
 
 	case HID_UP_GENDESK:
 		switch (usage->hid) {
 		case HID_GD_X:
+			field->logical_maximum = 32760;
 			hid_map_usage(hi, usage, bit, max,
 					EV_ABS, ABS_MT_POSITION_X);
+			set_abs(input, ABS_MT_POSITION_X, field, SN_MOVE);
 			/* touchscreen emulation */
-			input_set_abs_params(hi->input, ABS_X,
-						field->logical_minimum,
-						field->logical_maximum, 0, 0);
+			set_abs(input, ABS_X, field, SN_MOVE);
 			return 1;
 		case HID_GD_Y:
+			field->logical_maximum = 32760;
 			hid_map_usage(hi, usage, bit, max,
 					EV_ABS, ABS_MT_POSITION_Y);
+			set_abs(input, ABS_MT_POSITION_Y, field, SN_MOVE);
 			/* touchscreen emulation */
-			input_set_abs_params(hi->input, ABS_Y,
-						field->logical_minimum,
-						field->logical_maximum, 0, 0);
+			set_abs(input, ABS_Y, field, SN_MOVE);
 			return 1;
 		}
 		return 0;
@@ -66,6 +84,7 @@
 		case HID_DG_TIPSWITCH:
 			/* touchscreen emulation */
 			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			input_set_capability(input, EV_KEY, BTN_TOUCH);
 			return 1;
 		case HID_DG_INRANGE:
 		case HID_DG_CONFIDENCE:
@@ -73,16 +92,15 @@
 		case HID_DG_CONTACTMAX:
 			return -1;
 		case HID_DG_CONTACTID:
-			hid_map_usage(hi, usage, bit, max,
-					EV_ABS, ABS_MT_TRACKING_ID);
+			input_mt_init_slots(input, MAX_SLOTS);
 			return 1;
 		case HID_DG_TIPPRESSURE:
+			field->logical_minimum = 0;
 			hid_map_usage(hi, usage, bit, max,
 					EV_ABS, ABS_MT_PRESSURE);
+			set_abs(input, ABS_MT_PRESSURE, field, SN_PRESSURE);
 			/* touchscreen emulation */
-			input_set_abs_params(hi->input, ABS_PRESSURE,
-						field->logical_minimum,
-						field->logical_maximum, 0, 0);
+			set_abs(input, ABS_PRESSURE, field, SN_PRESSURE);
 			return 1;
 		}
 		return 0;
@@ -96,10 +114,10 @@
 		struct hid_field *field, struct hid_usage *usage,
 		unsigned long **bit, int *max)
 {
+	/* tell hid-input to skip setup of these event types */
 	if (usage->type == EV_KEY || usage->type == EV_ABS)
-		clear_bit(usage->code, *bit);
-
-	return 0;
+		set_bit(usage->type, hi->input->evbit);
+	return -1;
 }
 
 /*
@@ -108,58 +126,16 @@
  */
 static void egalax_filter_event(struct egalax_data *td, struct input_dev *input)
 {
-	td->first = !td->first; /* touchscreen emulation */
-
-	if (td->valid) {
-		/* emit multitouch events */
-		input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
-		input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x >> 3);
-		input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y >> 3);
+	input_mt_slot(input, td->slot);
+	input_mt_report_slot_state(input, MT_TOOL_FINGER, td->touch);
+	if (td->touch) {
+		input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
+		input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
 		input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z);
-
-		input_mt_sync(input);
-
-		/*
-		 * touchscreen emulation: store (x, y) as
-		 * the last valid values in this frame
-		 */
-		td->lastx = td->x;
-		td->lasty = td->y;
-		td->lastz = td->z;
 	}
-
-	/*
-	 * touchscreen emulation: if this is the second finger and at least
-	 * one in this frame is valid, the latest valid in the frame is
-	 * the oldest on the panel, the one we want for single touch
-	 */
-	if (!td->first && td->activity) {
-		input_event(input, EV_ABS, ABS_X, td->lastx >> 3);
-		input_event(input, EV_ABS, ABS_Y, td->lasty >> 3);
- 		input_event(input, EV_ABS, ABS_PRESSURE, td->lastz);
-	}
-
-	if (!td->valid) {
-		/*
-		 * touchscreen emulation: if the first finger is invalid
-		 * and there previously was finger activity, this is a release
-		 */ 
-		if (td->first && td->activity) {
-			input_event(input, EV_KEY, BTN_TOUCH, 0);
-			td->activity = false;
-		}
-		return;
-	}
-
-
-	/* touchscreen emulation: if no previous activity, emit touch event */
-	if (!td->activity) {
-		input_event(input, EV_KEY, BTN_TOUCH, 1);
-		td->activity = true;
-	}
+	input_mt_report_pointer_emulation(input, true);
 }
 
-
 static int egalax_event(struct hid_device *hid, struct hid_field *field,
 				struct hid_usage *usage, __s32 value)
 {
@@ -169,25 +145,26 @@
 	 * uses a standard parallel multitouch protocol (product ID ==
 	 * 48xx).  The second is capacitive and uses an unusual "serial"
 	 * protocol with a different message for each multitouch finger
-	 * (product ID == 72xx).  We do not yet generate a correct event
-	 * sequence for the capacitive/serial protocol.
+	 * (product ID == 72xx).
 	 */
 	if (hid->claimed & HID_CLAIMED_INPUT) {
 		struct input_dev *input = field->hidinput->input;
 
 		switch (usage->hid) {
 		case HID_DG_INRANGE:
+			td->valid = value;
+			break;
 		case HID_DG_CONFIDENCE:
 			/* avoid interference from generic hidinput handling */
 			break;
 		case HID_DG_TIPSWITCH:
-			td->valid = value;
+			td->touch = value;
 			break;
 		case HID_DG_TIPPRESSURE:
 			td->z = value;
 			break;
 		case HID_DG_CONTACTID:
-			td->id = value;
+			td->slot = clamp_val(value, 0, MAX_SLOTS - 1);
 			break;
 		case HID_GD_X:
 			td->x = value;
@@ -195,11 +172,11 @@
 		case HID_GD_Y:
 			td->y = value;
 			/* this is the last field in a finger */
-			egalax_filter_event(td, input);
+			if (td->valid)
+				egalax_filter_event(td, input);
 			break;
 		case HID_DG_CONTACTCOUNT:
 			/* touch emulation: this is the last field in a frame */
-			td->first = false;
 			break;
 
 		default:
@@ -223,7 +200,7 @@
 
 	td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL);
 	if (!td) {
-		dev_err(&hdev->dev, "cannot allocate eGalax data\n");
+		hid_err(hdev, "cannot allocate eGalax data\n");
 		return -ENOMEM;
 	}
 	hid_set_drvdata(hdev, td);
@@ -261,6 +238,12 @@
 			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
 			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, egalax_devices);
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 6e31f30..79d0c61 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -24,8 +24,7 @@
 		unsigned int *rsize)
 {
 	if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) {
-		dev_info(&hdev->dev, "Fixing up Elecom BM084 "
-				"report descriptor.\n");
+		hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n");
 		rdesc[47] = 0x00;
 	}
     return rdesc;
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
new file mode 100644
index 0000000..81877c6
--- /dev/null
+++ b/drivers/hid/hid-emsff.c
@@ -0,0 +1,161 @@
+/*
+ *  Force feedback support for EMS Trio Linker Plus II
+ *
+ *  Copyright (c) 2010 Ignaz Forster <ignaz.forster@gmx.de>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+
+struct emsff_device {
+	struct hid_report *report;
+};
+
+static int emsff_play(struct input_dev *dev, void *data,
+			 struct ff_effect *effect)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+	struct emsff_device *emsff = data;
+	int weak, strong;
+
+	weak = effect->u.rumble.weak_magnitude;
+	strong = effect->u.rumble.strong_magnitude;
+
+	dbg_hid("called with 0x%04x 0x%04x\n", strong, weak);
+
+	weak = weak * 0xff / 0xffff;
+	strong = strong * 0xff / 0xffff;
+
+	emsff->report->field[0]->value[1] = weak;
+	emsff->report->field[0]->value[2] = strong;
+
+	dbg_hid("running with 0x%02x 0x%02x\n", strong, weak);
+	usbhid_submit_report(hid, emsff->report, USB_DIR_OUT);
+
+	return 0;
+}
+
+static int emsff_init(struct hid_device *hid)
+{
+	struct emsff_device *emsff;
+	struct hid_report *report;
+	struct hid_input *hidinput = list_first_entry(&hid->inputs,
+						struct hid_input, list);
+	struct list_head *report_list =
+			&hid->report_enum[HID_OUTPUT_REPORT].report_list;
+	struct input_dev *dev = hidinput->input;
+	int error;
+
+	if (list_empty(report_list)) {
+		hid_err(hid, "no output reports found\n");
+		return -ENODEV;
+	}
+
+	report = list_first_entry(report_list, struct hid_report, list);
+	if (report->maxfield < 1) {
+		hid_err(hid, "no fields in the report\n");
+		return -ENODEV;
+	}
+
+	if (report->field[0]->report_count < 7) {
+		hid_err(hid, "not enough values in the field\n");
+		return -ENODEV;
+	}
+
+	emsff = kzalloc(sizeof(struct emsff_device), GFP_KERNEL);
+	if (!emsff)
+		return -ENOMEM;
+
+	set_bit(FF_RUMBLE, dev->ffbit);
+
+	error = input_ff_create_memless(dev, emsff, emsff_play);
+	if (error) {
+		kfree(emsff);
+		return error;
+	}
+
+	emsff->report = report;
+	emsff->report->field[0]->value[0] = 0x01;
+	emsff->report->field[0]->value[1] = 0x00;
+	emsff->report->field[0]->value[2] = 0x00;
+	emsff->report->field[0]->value[3] = 0x00;
+	emsff->report->field[0]->value[4] = 0x00;
+	emsff->report->field[0]->value[5] = 0x00;
+	emsff->report->field[0]->value[6] = 0x00;
+	usbhid_submit_report(hid, emsff->report, USB_DIR_OUT);
+
+	hid_info(hid, "force feedback for EMS based devices by Ignaz Forster <ignaz.forster@gmx.de>\n");
+
+	return 0;
+}
+
+static int ems_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		hid_err(hdev, "parse failed\n");
+		goto err;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+	if (ret) {
+		hid_err(hdev, "hw start failed\n");
+		goto err;
+	}
+
+	emsff_init(hdev);
+
+	return 0;
+err:
+	return ret;
+}
+
+static const struct hid_device_id ems_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, 0x118) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, ems_devices);
+
+static struct hid_driver ems_driver = {
+	.name = "hkems",
+	.id_table = ems_devices,
+	.probe = ems_probe,
+};
+
+static int ems_init(void)
+{
+	return hid_register_driver(&ems_driver);
+}
+
+static void ems_exit(void)
+{
+	hid_unregister_driver(&ems_driver);
+}
+
+module_init(ems_init);
+module_exit(ems_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index 88dfcf4..279ba53 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -87,7 +87,7 @@
 	int error;
 
 	if (list_empty(report_list)) {
-		dev_err(&hid->dev, "no output reports found\n");
+		hid_err(hid, "no output reports found\n");
 		return -ENODEV;
 	}
 
@@ -95,12 +95,12 @@
 
 	report = list_entry(report_ptr, struct hid_report, list);
 	if (report->maxfield < 1) {
-		dev_err(&hid->dev, "no fields in the report\n");
+		hid_err(hid, "no fields in the report\n");
 		return -ENODEV;
 	}
 
 	if (report->field[0]->report_count < 6) {
-		dev_err(&hid->dev, "not enough values in the field\n");
+		hid_err(hid, "not enough values in the field\n");
 		return -ENODEV;
 	}
 
@@ -128,8 +128,7 @@
 
 	usbhid_submit_report(hid, gaff->report, USB_DIR_OUT);
 
-	dev_info(&hid->dev, "Force Feedback for GreenAsia 0x12"
-	       " devices by Lukasz Lubojanski <lukasz@lubojanski.info>\n");
+	hid_info(hid, "Force Feedback for GreenAsia 0x12 devices by Lukasz Lubojanski <lukasz@lubojanski.info>\n");
 
 	return 0;
 }
@@ -148,13 +147,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err;
 	}
 
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3341baa..92a0d61 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -97,6 +97,12 @@
 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI	0x0236
 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO	0x0237
 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS	0x0238
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI	0x023f
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO	0x0240
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS	0x0241
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI	0x0242
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO	0x0243
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS	0x0244
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
@@ -134,7 +140,9 @@
 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2	0x5577
 
 #define USB_VENDOR_ID_CANDO		0x2087
+#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH	0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
 
@@ -156,6 +164,7 @@
 #define USB_VENDOR_ID_CHICONY		0x04f2
 #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD	0x0418
 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH	0xb19d
+#define USB_DEVICE_ID_CHICONY_WIRELESS	0x0618
 
 #define USB_VENDOR_ID_CIDC		0x1677
 
@@ -179,6 +188,7 @@
 #define USB_DEVICE_ID_CYPRESS_BARCODE_1	0xde61
 #define USB_DEVICE_ID_CYPRESS_BARCODE_2	0xde64
 #define USB_DEVICE_ID_CYPRESS_BARCODE_3	0xbca1
+#define USB_DEVICE_ID_CYPRESS_TRUETOUCH	0xc001
 
 #define USB_VENDOR_ID_DEALEXTREAME	0x10c5
 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701	0x819a
@@ -196,13 +206,21 @@
 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER	0x0001
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH	0x480d
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1	0x720c
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2	0x72a1
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3	0x480e
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4	0x726b
 
 #define USB_VENDOR_ID_ELECOM		0x056e
 #define USB_DEVICE_ID_ELECOM_BM084	0x0061
 
+#define USB_VENDOR_ID_DREAM_CHEEKY	0x1d34
+
 #define USB_VENDOR_ID_ELO		0x04E7
 #define USB_DEVICE_ID_ELO_TS2700	0x0020
 
+#define USB_VENDOR_ID_EMS		0x2006
+#define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118
+
 #define USB_VENDOR_ID_ESSENTIAL_REALITY	0x0d7f
 #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
 
@@ -221,6 +239,7 @@
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR	0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH	0x0dfc
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
 
 #define USB_VENDOR_ID_GLAB		0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30	0x0038
@@ -303,6 +322,9 @@
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST	0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST	0x8fff
 
+#define USB_VENDOR_ID_HANVON		0x20b3
+#define USB_DEVICE_ID_HANVON_MULTITOUCH	0x0a18
+
 #define USB_VENDOR_ID_HAPP		0x078b
 #define USB_DEVICE_ID_UGCI_DRIVING	0x0010
 #define USB_DEVICE_ID_UGCI_FLYING	0x0020
@@ -475,6 +497,7 @@
 
 #define USB_VENDOR_ID_ROCCAT		0x1e7d
 #define USB_DEVICE_ID_ROCCAT_KONE	0x2ced
+#define USB_DEVICE_ID_ROCCAT_KONEPLUS	0x2d51
 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRED	0x2c24
 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS	0x2cf6
 
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d8d372b..7f552bf 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -290,6 +290,14 @@
 		goto ignore;
 	}
 
+	if (field->report_type == HID_FEATURE_REPORT) {
+		if (device->driver->feature_mapping) {
+			device->driver->feature_mapping(device, hidinput, field,
+				usage);
+		}
+		goto ignore;
+	}
+
 	if (device->driver->input_mapping) {
 		int ret = device->driver->input_mapping(device, hidinput, field,
 				usage, &bit, &max);
@@ -319,21 +327,21 @@
 
 		switch (field->application) {
 		case HID_GD_MOUSE:
-		case HID_GD_POINTER:  code += 0x110; break;
+		case HID_GD_POINTER:  code += BTN_MOUSE; break;
 		case HID_GD_JOYSTICK:
 				if (code <= 0xf)
 					code += BTN_JOYSTICK;
 				else
 					code += BTN_TRIGGER_HAPPY;
 				break;
-		case HID_GD_GAMEPAD:  code += 0x130; break;
+		case HID_GD_GAMEPAD:  code += BTN_GAMEPAD; break;
 		default:
 			switch (field->physical) {
 			case HID_GD_MOUSE:
-			case HID_GD_POINTER:  code += 0x110; break;
-			case HID_GD_JOYSTICK: code += 0x120; break;
-			case HID_GD_GAMEPAD:  code += 0x130; break;
-			default:              code += 0x100;
+			case HID_GD_POINTER:  code += BTN_MOUSE; break;
+			case HID_GD_JOYSTICK: code += BTN_JOYSTICK; break;
+			case HID_GD_GAMEPAD:  code += BTN_GAMEPAD; break;
+			default:              code += BTN_MISC;
 			}
 		}
 
@@ -817,14 +825,14 @@
 {
 	struct hid_device *hid = input_get_drvdata(dev);
 
-	return hid->ll_driver->open(hid);
+	return hid_hw_open(hid);
 }
 
 static void hidinput_close(struct input_dev *dev)
 {
 	struct hid_device *hid = input_get_drvdata(dev);
 
-	hid->ll_driver->close(hid);
+	hid_hw_close(hid);
 }
 
 /*
@@ -839,7 +847,6 @@
 	struct hid_input *hidinput = NULL;
 	struct input_dev *input_dev;
 	int i, j, k;
-	int max_report_type = HID_OUTPUT_REPORT;
 
 	INIT_LIST_HEAD(&hid->inputs);
 
@@ -856,10 +863,11 @@
 			return -1;
 	}
 
-	if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
-		max_report_type = HID_INPUT_REPORT;
+	for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) {
+		if (k == HID_OUTPUT_REPORT &&
+			hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
+			continue;
 
-	for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
 		list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
 
 			if (!report->maxfield)
@@ -871,7 +879,7 @@
 				if (!hidinput || !input_dev) {
 					kfree(hidinput);
 					input_free_device(input_dev);
-					err_hid("Out of memory during hid input probe");
+					hid_err(hid, "Out of memory during hid input probe\n");
 					goto out_unwind;
 				}
 
@@ -912,6 +920,7 @@
 				hidinput = NULL;
 			}
 		}
+	}
 
 	if (hidinput && input_register_device(hidinput->input))
 		goto out_cleanup;
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 817247e..f2ba9ef 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -32,8 +32,8 @@
 		rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
 		rdesc[71] == 0x75 && rdesc[72] == 0x08 &&
 		rdesc[73] == 0x95 && rdesc[74] == 0x01) {
-		dev_info(&hdev->dev, "fixing up Kye/Genius Ergo Mouse report "
-				"descriptor\n");
+		hid_info(hdev,
+			 "fixing up Kye/Genius Ergo Mouse report descriptor\n");
 		rdesc[62] = 0x09;
 		rdesc[64] = 0x04;
 		rdesc[66] = 0x07;
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index b629fba..aef4104 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -53,23 +53,22 @@
 
 	if ((quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
 			rdesc[84] == 0x8c && rdesc[85] == 0x02) {
-		dev_info(&hdev->dev, "fixing up Logitech keyboard report "
-				"descriptor\n");
+		hid_info(hdev,
+			 "fixing up Logitech keyboard report descriptor\n");
 		rdesc[84] = rdesc[89] = 0x4d;
 		rdesc[85] = rdesc[90] = 0x10;
 	}
 	if ((quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
 			rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
 			rdesc[49] == 0x81 && rdesc[50] == 0x06) {
-		dev_info(&hdev->dev, "fixing up rel/abs in Logitech "
-				"report descriptor\n");
+		hid_info(hdev,
+			 "fixing up rel/abs in Logitech report descriptor\n");
 		rdesc[33] = rdesc[50] = 0x02;
 	}
 	if ((quirks & LG_FF4) && *rsize >= 101 &&
 			rdesc[41] == 0x95 && rdesc[42] == 0x0B &&
 			rdesc[47] == 0x05 && rdesc[48] == 0x09) {
-		dev_info(&hdev->dev, "fixing up Logitech Speed Force Wireless "
-			"button descriptor\n");
+		hid_info(hdev, "fixing up Logitech Speed Force Wireless button descriptor\n");
 		rdesc[41] = 0x05;
 		rdesc[42] = 0x09;
 		rdesc[47] = 0x95;
@@ -288,7 +287,7 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
@@ -297,7 +296,7 @@
 
 	ret = hid_hw_start(hdev, connect_mask);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index 4258253c..3c31bc6 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -72,18 +72,18 @@
 	int error;
 
 	if (list_empty(report_list)) {
-		dev_err(&hid->dev, "no output report found\n");
+		hid_err(hid, "no output report found\n");
 		return -ENODEV;
 	}
 
 	report = list_entry(report_list->next, struct hid_report, list);
 
 	if (report->maxfield < 1) {
-		dev_err(&hid->dev, "output report is empty\n");
+		hid_err(hid, "output report is empty\n");
 		return -ENODEV;
 	}
 	if (report->field[0]->report_count < 7) {
-		dev_err(&hid->dev, "not enough values in the field\n");
+		hid_err(hid, "not enough values in the field\n");
 		return -ENODEV;
 	}
 
@@ -110,8 +110,7 @@
 
 	usbhid_submit_report(hid, report, USB_DIR_OUT);
 
-	dev_info(&hid->dev, "Force feedback for Logitech RumblePad/Rumblepad 2 by "
-	       "Anssi Hannula <anssi.hannula@gmail.com>\n");
+	hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
 
 	return 0;
 }
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index 4002832..f98644c 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -141,20 +141,20 @@
 
 	/* Find the report to use */
 	if (list_empty(report_list)) {
-		err_hid("No output report found");
+		hid_err(hid, "No output report found\n");
 		return -1;
 	}
 
 	/* Check that the report looks ok */
 	report = list_entry(report_list->next, struct hid_report, list);
 	if (!report) {
-		err_hid("NULL output report");
+		hid_err(hid, "NULL output report\n");
 		return -1;
 	}
 
 	field = report->field[0];
 	if (!field) {
-		err_hid("NULL field");
+		hid_err(hid, "NULL field\n");
 		return -1;
 	}
 
@@ -169,8 +169,7 @@
 	if (test_bit(FF_AUTOCENTER, dev->ffbit))
 		dev->ff->set_autocenter = hid_lg3ff_set_autocenter;
 
-	dev_info(&hid->dev, "Force feedback for Logitech Flight System G940 by "
-			"Gary Stein <LordCnidarian@gmail.com>\n");
+	hid_info(hid, "Force feedback for Logitech Flight System G940 by Gary Stein <LordCnidarian@gmail.com>\n");
 	return 0;
 }
 
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 7eef5a2..fa550c8 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -101,20 +101,20 @@
 
 	/* Find the report to use */
 	if (list_empty(report_list)) {
-		err_hid("No output report found");
+		hid_err(hid, "No output report found\n");
 		return -1;
 	}
 
 	/* Check that the report looks ok */
 	report = list_entry(report_list->next, struct hid_report, list);
 	if (!report) {
-		err_hid("NULL output report");
+		hid_err(hid, "NULL output report\n");
 		return -1;
 	}
 
 	field = report->field[0];
 	if (!field) {
-		err_hid("NULL field");
+		hid_err(hid, "NULL field\n");
 		return -1;
 	}
 
@@ -129,8 +129,7 @@
 	if (test_bit(FF_AUTOCENTER, dev->ffbit))
 		dev->ff->set_autocenter = hid_lg4ff_set_autocenter;
 
-	dev_info(&hid->dev, "Force feedback for Logitech Speed Force Wireless by "
-			"Simon Wood <simon@mungewell.org>\n");
+	hid_info(hid, "Force feedback for Logitech Speed Force Wireless by Simon Wood <simon@mungewell.org>\n");
 	return 0;
 }
 
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index 61142b7..90d0ef2 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -27,6 +27,8 @@
  * e-mail - mail your message to <johann.deneux@it.uu.se>
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/input.h>
 #include <linux/usb.h>
 #include <linux/hid.h>
@@ -146,7 +148,7 @@
 
 	/* Find the report to use */
 	if (list_empty(report_list)) {
-		err_hid("No output report found");
+		hid_err(hid, "No output report found\n");
 		return -1;
 	}
 
@@ -154,7 +156,7 @@
 	report = list_entry(report_list->next, struct hid_report, list);
 	field = report->field[0];
 	if (!field) {
-		err_hid("NULL field");
+		hid_err(hid, "NULL field\n");
 		return -1;
 	}
 
@@ -176,7 +178,7 @@
 	if ( test_bit(FF_AUTOCENTER, dev->ffbit) )
 		dev->ff->set_autocenter = hid_lgff_set_autocenter;
 
-	printk(KERN_INFO "Force feedback for Logitech force feedback devices by Johann Deneux <johann.deneux@it.uu.se>\n");
+	pr_info("Force feedback for Logitech force feedback devices by Johann Deneux <johann.deneux@it.uu.se>\n");
 
 	return 0;
 }
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index e6dc151..698e645 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -12,6 +12,8 @@
  * any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/hid.h>
 #include <linux/module.h>
@@ -433,6 +435,11 @@
 	if (!msc->input)
 		msc->input = hi->input;
 
+	/* Magic Trackpad does not give relative data after switching to MT */
+	if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD &&
+	    field->flags & HID_MAIN_ITEM_RELATIVE)
+		return -1;
+
 	return 0;
 }
 
@@ -446,7 +453,7 @@
 
 	msc = kzalloc(sizeof(*msc), GFP_KERNEL);
 	if (msc == NULL) {
-		dev_err(&hdev->dev, "can't alloc magicmouse descriptor\n");
+		hid_err(hdev, "can't alloc magicmouse descriptor\n");
 		return -ENOMEM;
 	}
 
@@ -459,13 +466,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "magicmouse hid parse failed\n");
+		hid_err(hdev, "magicmouse hid parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
-		dev_err(&hdev->dev, "magicmouse hw start failed\n");
+		hid_err(hdev, "magicmouse hw start failed\n");
 		goto err_free;
 	}
 
@@ -486,7 +493,7 @@
 	}
 
 	if (!report) {
-		dev_err(&hdev->dev, "unable to register touch report\n");
+		hid_err(hdev, "unable to register touch report\n");
 		ret = -ENOMEM;
 		goto err_stop_hw;
 	}
@@ -495,8 +502,7 @@
 	ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
 			HID_FEATURE_REPORT);
 	if (ret != sizeof(feature)) {
-		dev_err(&hdev->dev, "unable to request touch data (%d)\n",
-				ret);
+		hid_err(hdev, "unable to request touch data (%d)\n", ret);
 		goto err_stop_hw;
 	}
 
@@ -540,7 +546,7 @@
 
 	ret = hid_register_driver(&magicmouse_driver);
 	if (ret)
-		printk(KERN_ERR "can't register magicmouse driver\n");
+		pr_err("can't register magicmouse driver\n");
 
 	return ret;
 }
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index dc618c3..0f6fc54 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -40,8 +40,7 @@
 
 	if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
 			rdesc[559] == 0x29) {
-		dev_info(&hdev->dev, "fixing up Microsoft Wireless Receiver "
-				"Model 1028 report descriptor\n");
+		hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
 		rdesc[557] = 0x35;
 		rdesc[559] = 0x45;
 	}
@@ -155,14 +154,14 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | ((quirks & MS_HIDINPUT) ?
 				HID_CONNECT_HIDINPUT_FORCE : 0));
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c
index c95c31e..dedf757 100644
--- a/drivers/hid/hid-monterey.c
+++ b/drivers/hid/hid-monterey.c
@@ -26,8 +26,7 @@
 		unsigned int *rsize)
 {
 	if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
-		dev_info(&hdev->dev, "fixing up button/consumer in HID report "
-				"descriptor\n");
+		hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
 		rdesc[30] = 0x0c;
 	}
 	return rdesc;
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index ac5421d..aed7ffe 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -90,6 +90,10 @@
 	case 0xff000000:
 		/* ignore HID features */
 		return -1;
+
+	case HID_UP_BUTTON:
+		/* ignore buttons */
+		return -1;
 	}
 
 	return 0;
@@ -199,7 +203,7 @@
 
 	td = kmalloc(sizeof(struct mosart_data), GFP_KERNEL);
 	if (!td) {
-		dev_err(&hdev->dev, "cannot allocate MosArt data\n");
+		hid_err(hdev, "cannot allocate MosArt data\n");
 		return -ENOMEM;
 	}
 	td->valid = false;
@@ -230,6 +234,19 @@
 	return ret;
 }
 
+#ifdef CONFIG_PM
+static int mosart_reset_resume(struct hid_device *hdev)
+{
+	struct hid_report_enum *re = hdev->report_enum
+						+ HID_FEATURE_REPORT;
+	struct hid_report *r = re->report_id_hash[7];
+
+	r->field[0]->value[0] = 0x02;
+	usbhid_submit_report(hdev, r, USB_DIR_OUT);
+	return 0;
+}
+#endif
+
 static void mosart_remove(struct hid_device *hdev)
 {
 	hid_hw_stop(hdev);
@@ -240,6 +257,7 @@
 static const struct hid_device_id mosart_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, mosart_devices);
@@ -258,6 +276,9 @@
 	.input_mapped = mosart_input_mapped,
 	.usage_table = mosart_grabbed_usages,
 	.event = mosart_event,
+#ifdef CONFIG_PM
+	.reset_resume = mosart_reset_resume,
+#endif
 };
 
 static int __init mosart_init(void)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
new file mode 100644
index 0000000..07d3183
--- /dev/null
+++ b/drivers/hid/hid-multitouch.c
@@ -0,0 +1,516 @@
+/*
+ *  HID driver for multitouch panels
+ *
+ *  Copyright (c) 2010-2011 Stephane Chatty <chatty@enac.fr>
+ *  Copyright (c) 2010-2011 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ *  Copyright (c) 2010-2011 Ecole Nationale de l'Aviation Civile, France
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/input/mt.h>
+#include "usbhid/usbhid.h"
+
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("HID multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+/* quirks to control the device */
+#define MT_QUIRK_NOT_SEEN_MEANS_UP	(1 << 0)
+#define MT_QUIRK_SLOT_IS_CONTACTID	(1 << 1)
+#define MT_QUIRK_CYPRESS		(1 << 2)
+#define MT_QUIRK_SLOT_IS_CONTACTNUMBER	(1 << 3)
+#define MT_QUIRK_VALID_IS_INRANGE	(1 << 4)
+#define MT_QUIRK_VALID_IS_CONFIDENCE	(1 << 5)
+
+struct mt_slot {
+	__s32 x, y, p, w, h;
+	__s32 contactid;	/* the device ContactID assigned to this slot */
+	bool touch_state;	/* is the touch valid? */
+	bool seen_in_this_frame;/* has this slot been updated */
+};
+
+struct mt_device {
+	struct mt_slot curdata;	/* placeholder of incoming data */
+	struct mt_class *mtclass;	/* our mt device class */
+	unsigned last_field_index;	/* last field index of the report */
+	unsigned last_slot_field;	/* the last field of a slot */
+	__s8 inputmode;		/* InputMode HID feature, -1 if non-existent */
+	__u8 num_received;	/* how many contacts we received */
+	__u8 num_expected;	/* expected last contact index */
+	bool curvalid;		/* is the current contact valid? */
+	struct mt_slot slots[0];	/* first slot */
+};
+
+struct mt_class {
+	__s32 name;	/* MT_CLS */
+	__s32 quirks;
+	__s32 sn_move;	/* Signal/noise ratio for move events */
+	__s32 sn_pressure;	/* Signal/noise ratio for pressure events */
+	__u8 maxcontacts;
+};
+
+/* classes of device behavior */
+#define MT_CLS_DEFAULT	1
+#define MT_CLS_DUAL1	2
+#define MT_CLS_DUAL2	3
+#define MT_CLS_CYPRESS	4
+
+/*
+ * these device-dependent functions determine what slot corresponds
+ * to a valid contact that was just read.
+ */
+
+static int cypress_compute_slot(struct mt_device *td)
+{
+	if (td->curdata.contactid != 0 || td->num_received == 0)
+		return td->curdata.contactid;
+	else
+		return -1;
+}
+
+static int find_slot_from_contactid(struct mt_device *td)
+{
+	int i;
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		if (td->slots[i].contactid == td->curdata.contactid &&
+			td->slots[i].touch_state)
+			return i;
+	}
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		if (!td->slots[i].seen_in_this_frame &&
+			!td->slots[i].touch_state)
+			return i;
+	}
+	/* should not occurs. If this happens that means
+	 * that the device sent more touches that it says
+	 * in the report descriptor. It is ignored then. */
+	return -1;
+}
+
+struct mt_class mt_classes[] = {
+	{ .name = MT_CLS_DEFAULT,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE,
+		.maxcontacts = 10 },
+	{ .name = MT_CLS_DUAL1,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE |
+			MT_QUIRK_SLOT_IS_CONTACTID,
+		.maxcontacts = 2 },
+	{ .name = MT_CLS_DUAL2,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE |
+			MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+		.maxcontacts = 2 },
+	{ .name = MT_CLS_CYPRESS,
+		.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+			MT_QUIRK_CYPRESS,
+		.maxcontacts = 10 },
+
+	{ }
+};
+
+static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage)
+{
+	if (usage->hid == HID_DG_INPUTMODE) {
+		struct mt_device *td = hid_get_drvdata(hdev);
+		td->inputmode = field->report->id;
+	}
+}
+
+static void set_abs(struct input_dev *input, unsigned int code,
+		struct hid_field *field, int snratio)
+{
+	int fmin = field->logical_minimum;
+	int fmax = field->logical_maximum;
+	int fuzz = snratio ? (fmax - fmin) / snratio : 0;
+	input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+}
+
+static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	struct mt_class *cls = td->mtclass;
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			set_abs(hi->input, ABS_MT_POSITION_X, field,
+				cls->sn_move);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_X, field, cls->sn_move);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			set_abs(hi->input, ABS_MT_POSITION_Y, field,
+				cls->sn_move);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_Y, field, cls->sn_move);
+			td->last_slot_field = usage->hid;
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONFIDENCE:
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_TIPSWITCH:
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONTACTID:
+			input_mt_init_slots(hi->input,
+					td->mtclass->maxcontacts);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_WIDTH:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MAJOR);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_HEIGHT:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MINOR);
+			field->logical_maximum = 1;
+			field->logical_minimum = 0;
+			set_abs(hi->input, ABS_MT_ORIENTATION, field, 0);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_TIPPRESSURE:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_PRESSURE);
+			set_abs(hi->input, ABS_MT_PRESSURE, field,
+				cls->sn_pressure);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_PRESSURE, field,
+				cls->sn_pressure);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONTACTCOUNT:
+			td->last_field_index = field->report->maxfield - 1;
+			return 1;
+		case HID_DG_CONTACTMAX:
+			/* we don't set td->last_slot_field as contactcount and
+			 * contact max are global to the report */
+			return -1;
+		}
+		/* let hid-input decide for the others */
+		return 0;
+
+	case 0xff000000:
+		/* we do not want to map these: no input-oriented meaning */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		set_bit(usage->type, hi->input->evbit);
+
+	return -1;
+}
+
+static int mt_compute_slot(struct mt_device *td)
+{
+	__s32 quirks = td->mtclass->quirks;
+
+	if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
+		return td->curdata.contactid;
+
+	if (quirks & MT_QUIRK_CYPRESS)
+		return cypress_compute_slot(td);
+
+	if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER)
+		return td->num_received;
+
+	return find_slot_from_contactid(td);
+}
+
+/*
+ * this function is called when a whole contact has been processed,
+ * so that it can assign it to a slot and store the data there
+ */
+static void mt_complete_slot(struct mt_device *td)
+{
+	td->curdata.seen_in_this_frame = true;
+	if (td->curvalid) {
+		int slotnum = mt_compute_slot(td);
+
+		if (slotnum >= 0 && slotnum < td->mtclass->maxcontacts)
+			td->slots[slotnum] = td->curdata;
+	}
+	td->num_received++;
+}
+
+
+/*
+ * this function is called when a whole packet has been received and processed,
+ * so that it can decide what to send to the input layer.
+ */
+static void mt_emit_event(struct mt_device *td, struct input_dev *input)
+{
+	int i;
+
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		struct mt_slot *s = &(td->slots[i]);
+		if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+			!s->seen_in_this_frame) {
+			s->touch_state = false;
+		}
+
+		input_mt_slot(input, i);
+		input_mt_report_slot_state(input, MT_TOOL_FINGER,
+			s->touch_state);
+		if (s->touch_state) {
+			input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
+			input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
+			input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, s->w);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, s->h);
+		}
+		s->seen_in_this_frame = false;
+
+	}
+
+	input_mt_report_pointer_emulation(input, true);
+	input_sync(input);
+	td->num_received = 0;
+}
+
+
+
+static int mt_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct mt_device *td = hid_get_drvdata(hid);
+	__s32 quirks = td->mtclass->quirks;
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+				td->curvalid = value;
+			break;
+		case HID_DG_TIPSWITCH:
+			if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+				td->curvalid = value;
+			td->curdata.touch_state = value;
+			break;
+		case HID_DG_CONFIDENCE:
+			if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+				td->curvalid = value;
+			break;
+		case HID_DG_CONTACTID:
+			td->curdata.contactid = value;
+			break;
+		case HID_DG_TIPPRESSURE:
+			td->curdata.p = value;
+			break;
+		case HID_GD_X:
+			td->curdata.x = value;
+			break;
+		case HID_GD_Y:
+			td->curdata.y = value;
+			break;
+		case HID_DG_WIDTH:
+			td->curdata.w = value;
+			break;
+		case HID_DG_HEIGHT:
+			td->curdata.h = value;
+			break;
+		case HID_DG_CONTACTCOUNT:
+			/*
+			 * Includes multi-packet support where subsequent
+			 * packets are sent with zero contactcount.
+			 */
+			if (value)
+				td->num_expected = value;
+			break;
+
+		default:
+			/* fallback to the generic hidinput handling */
+			return 0;
+		}
+
+		if (usage->hid == td->last_slot_field)
+			mt_complete_slot(td);
+
+		if (field->index == td->last_field_index
+			&& td->num_received >= td->num_expected)
+			mt_emit_event(td, field->hidinput->input);
+
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static void mt_set_input_mode(struct hid_device *hdev)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	struct hid_report *r;
+	struct hid_report_enum *re;
+
+	if (td->inputmode < 0)
+		return;
+
+	re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+	r = re->report_id_hash[td->inputmode];
+	if (r) {
+		r->field[0]->value[0] = 0x02;
+		usbhid_submit_report(hdev, r, USB_DIR_OUT);
+	}
+}
+
+static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret, i;
+	struct mt_device *td;
+	struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+
+	for (i = 0; mt_classes[i].name ; i++) {
+		if (id->driver_data == mt_classes[i].name) {
+			mtclass = &(mt_classes[i]);
+			break;
+		}
+	}
+
+	/* This allows the driver to correctly support devices
+	 * that emit events over several HID messages.
+	 */
+	hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+	td = kzalloc(sizeof(struct mt_device) +
+				mtclass->maxcontacts * sizeof(struct mt_slot),
+				GFP_KERNEL);
+	if (!td) {
+		dev_err(&hdev->dev, "cannot allocate multitouch data\n");
+		return -ENOMEM;
+	}
+	td->mtclass = mtclass;
+	td->inputmode = -1;
+	hid_set_drvdata(hdev, td);
+
+	ret = hid_parse(hdev);
+	if (ret != 0)
+		goto fail;
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret)
+		goto fail;
+
+	mt_set_input_mode(hdev);
+
+	return 0;
+
+fail:
+	kfree(td);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int mt_reset_resume(struct hid_device *hdev)
+{
+	mt_set_input_mode(hdev);
+	return 0;
+}
+#endif
+
+static void mt_remove(struct hid_device *hdev)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	hid_hw_stop(hdev);
+	kfree(td);
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id mt_devices[] = {
+
+	/* Cypress panel */
+	{ .driver_data = MT_CLS_CYPRESS,
+		HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS,
+			USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+
+	/* GeneralTouch panel */
+	{ .driver_data = MT_CLS_DUAL2,
+		HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
+
+	/* PixCir-based panels */
+	{ .driver_data = MT_CLS_DUAL1,
+		HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
+			USB_DEVICE_ID_HANVON_MULTITOUCH) },
+	{ .driver_data = MT_CLS_DUAL1,
+		HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, mt_devices);
+
+static const struct hid_usage_id mt_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver mt_driver = {
+	.name = "hid-multitouch",
+	.id_table = mt_devices,
+	.probe = mt_probe,
+	.remove = mt_remove,
+	.input_mapping = mt_input_mapping,
+	.input_mapped = mt_input_mapped,
+	.feature_mapping = mt_feature_mapping,
+	.usage_table = mt_grabbed_usages,
+	.event = mt_event,
+#ifdef CONFIG_PM
+	.reset_resume = mt_reset_resume,
+#endif
+};
+
+static int __init mt_init(void)
+{
+	return hid_register_driver(&mt_driver);
+}
+
+static void __exit mt_exit(void)
+{
+	hid_unregister_driver(&mt_driver);
+}
+
+module_init(mt_init);
+module_exit(mt_exit);
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 69169ef..beb4034 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -130,8 +130,7 @@
 	if (ret == 8) {
 		ret = ntrig_version_string(&data[2], buf);
 
-		dev_info(&hdev->dev,
-			 "Firmware version: %s (%02x%02x %02x%02x)\n",
+		hid_info(hdev, "Firmware version: %s (%02x%02x %02x%02x)\n",
 			 buf, data[2], data[3], data[4], data[5]);
 	}
 
@@ -831,7 +830,7 @@
 
 	nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL);
 	if (!nd) {
-		dev_err(&hdev->dev, "cannot allocate N-Trig data\n");
+		hid_err(hdev, "cannot allocate N-Trig data\n");
 		return -ENOMEM;
 	}
 
@@ -850,13 +849,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 2e79716..e90edfc 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -23,8 +23,7 @@
 		unsigned int *rsize)
 {
 	if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
-		dev_info(&hdev->dev, "Fixing up Ortek WKB-2000 "
-				"report descriptor.\n");
+		hid_info(hdev, "Fixing up Ortek WKB-2000 report descriptor\n");
 		rdesc[55] = 0x92;
 	}
 	return rdesc;
diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c
index 308d6ae..f1ea3ff 100644
--- a/drivers/hid/hid-petalynx.c
+++ b/drivers/hid/hid-petalynx.c
@@ -29,8 +29,7 @@
 	if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
 			rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
 			rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
-		dev_info(&hdev->dev, "fixing up Petalynx Maxter Remote report "
-				"descriptor\n");
+		hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
 		rdesc[60] = 0xfa;
 		rdesc[40] = 0xfa;
 	}
@@ -77,13 +76,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index bc2e077..de9cf21b 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -253,7 +253,7 @@
 		if (report->id == id)
 			return report;
 	}
-	dev_warn(&hdev->dev, "No report with id 0x%x found\n", id);
+	hid_warn(hdev, "No report with id 0x%x found\n", id);
 	return NULL;
 }
 
@@ -1329,7 +1329,7 @@
 
 	verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0);
 	if (!verinfo) {
-		dev_err(&hdev->dev, "no version response from PicoLCD");
+		hid_err(hdev, "no version response from PicoLCD\n");
 		return -ENODEV;
 	}
 
@@ -1337,14 +1337,14 @@
 		data->version[0] = verinfo->raw_data[1];
 		data->version[1] = verinfo->raw_data[0];
 		if (data->status & PICOLCD_BOOTLOADER) {
-			dev_info(&hdev->dev, "PicoLCD, bootloader version %d.%d\n",
-					verinfo->raw_data[1], verinfo->raw_data[0]);
+			hid_info(hdev, "PicoLCD, bootloader version %d.%d\n",
+				 verinfo->raw_data[1], verinfo->raw_data[0]);
 		} else {
-			dev_info(&hdev->dev, "PicoLCD, firmware version %d.%d\n",
-					verinfo->raw_data[1], verinfo->raw_data[0]);
+			hid_info(hdev, "PicoLCD, firmware version %d.%d\n",
+				 verinfo->raw_data[1], verinfo->raw_data[0]);
 		}
 	} else {
-		dev_err(&hdev->dev, "confused, got unexpected version response from PicoLCD\n");
+		hid_err(hdev, "confused, got unexpected version response from PicoLCD\n");
 		ret = -EINVAL;
 	}
 	kfree(verinfo);
@@ -1544,7 +1544,7 @@
 
 	/* prepare buffer with info about what we want to read (addr & len) */
 	raw_data[0] = *off & 0xff;
-	raw_data[1] = (*off >> 8) && 0xff;
+	raw_data[1] = (*off >> 8) & 0xff;
 	raw_data[2] = s < 20 ? s : 20;
 	if (*off + raw_data[2] > 0xff)
 		raw_data[2] = 0x100 - *off;
@@ -1583,7 +1583,7 @@
 
 	memset(raw_data, 0, sizeof(raw_data));
 	raw_data[0] = *off & 0xff;
-	raw_data[1] = (*off >> 8) && 0xff;
+	raw_data[1] = (*off >> 8) & 0xff;
 	raw_data[2] = s < 20 ? s : 20;
 	if (*off + raw_data[2] > 0xff)
 		raw_data[2] = 0x100 - *off;
@@ -1867,6 +1867,7 @@
 			report->id, raw_size);
 	hid_debug_event(hdev, buff);
 	if (raw_size + 5 > sizeof(raw_data)) {
+		kfree(buff);
 		hid_debug_event(hdev, " TOO BIG\n");
 		return;
 	} else {
@@ -2328,8 +2329,7 @@
 			(flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0),
 			hdev->debug_dir, data, &picolcd_debug_flash_fops);
 	} else if (flash_r || flash_w)
-		dev_warn(&hdev->dev, "Unexpected FLASH access reports, "
-				"please submit rdesc for review\n");
+		hid_warn(hdev, "Unexpected FLASH access reports, please submit rdesc for review\n");
 }
 
 static void picolcd_exit_devfs(struct picolcd_data *data)
@@ -2457,13 +2457,13 @@
 		return -ENODEV;
 	if (report->maxfield != 1 || report->field[0]->report_count != 2 ||
 			report->field[0]->report_size != 8) {
-		dev_err(&hdev->dev, "unsupported KEY_STATE report");
+		hid_err(hdev, "unsupported KEY_STATE report\n");
 		return -EINVAL;
 	}
 
 	idev = input_allocate_device();
 	if (idev == NULL) {
-		dev_err(&hdev->dev, "failed to allocate input device");
+		hid_err(hdev, "failed to allocate input device\n");
 		return -ENOMEM;
 	}
 	input_set_drvdata(idev, hdev);
@@ -2485,7 +2485,7 @@
 		input_set_capability(idev, EV_KEY, data->keycode[i]);
 	error = input_register_device(idev);
 	if (error) {
-		dev_err(&hdev->dev, "error registering the input device");
+		hid_err(hdev, "error registering the input device\n");
 		input_free_device(idev);
 		return error;
 	}
@@ -2522,9 +2522,8 @@
 		return error;
 
 	if (data->version[0] != 0 && data->version[1] != 3)
-		dev_info(&hdev->dev, "Device with untested firmware revision, "
-				"please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
-				dev_name(&hdev->dev));
+		hid_info(hdev, "Device with untested firmware revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+			 dev_name(&hdev->dev));
 
 	/* Setup keypad input device */
 	error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev));
@@ -2581,9 +2580,8 @@
 		return error;
 
 	if (data->version[0] != 1 && data->version[1] != 0)
-		dev_info(&hdev->dev, "Device with untested bootloader revision, "
-				"please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
-				dev_name(&hdev->dev));
+		hid_info(hdev, "Device with untested bootloader revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n",
+			 dev_name(&hdev->dev));
 
 	picolcd_init_devfs(data, NULL, NULL,
 			picolcd_out_report(REPORT_BL_READ_MEMORY, hdev),
@@ -2605,7 +2603,7 @@
 	 */
 	data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL);
 	if (data == NULL) {
-		dev_err(&hdev->dev, "can't allocate space for Minibox PicoLCD device data\n");
+		hid_err(hdev, "can't allocate space for Minibox PicoLCD device data\n");
 		error = -ENOMEM;
 		goto err_no_cleanup;
 	}
@@ -2621,7 +2619,7 @@
 	/* Parse the device reports and start it up */
 	error = hid_parse(hdev);
 	if (error) {
-		dev_err(&hdev->dev, "device report parse failed\n");
+		hid_err(hdev, "device report parse failed\n");
 		goto err_cleanup_data;
 	}
 
@@ -2631,25 +2629,25 @@
 	error = hid_hw_start(hdev, 0);
 	hdev->claimed = 0;
 	if (error) {
-		dev_err(&hdev->dev, "hardware start failed\n");
+		hid_err(hdev, "hardware start failed\n");
 		goto err_cleanup_data;
 	}
 
-	error = hdev->ll_driver->open(hdev);
+	error = hid_hw_open(hdev);
 	if (error) {
-		dev_err(&hdev->dev, "failed to open input interrupt pipe for key and IR events\n");
+		hid_err(hdev, "failed to open input interrupt pipe for key and IR events\n");
 		goto err_cleanup_hid_hw;
 	}
 
 	error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay);
 	if (error) {
-		dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+		hid_err(hdev, "failed to create sysfs attributes\n");
 		goto err_cleanup_hid_ll;
 	}
 
 	error = device_create_file(&hdev->dev, &dev_attr_operation_mode);
 	if (error) {
-		dev_err(&hdev->dev, "failed to create sysfs attributes\n");
+		hid_err(hdev, "failed to create sysfs attributes\n");
 		goto err_cleanup_sysfs1;
 	}
 
@@ -2668,7 +2666,7 @@
 err_cleanup_sysfs1:
 	device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
 err_cleanup_hid_ll:
-	hdev->ll_driver->close(hdev);
+	hid_hw_close(hdev);
 err_cleanup_hid_hw:
 	hid_hw_stop(hdev);
 err_cleanup_data:
@@ -2699,7 +2697,7 @@
 	picolcd_exit_devfs(data);
 	device_remove_file(&hdev->dev, &dev_attr_operation_mode);
 	device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay);
-	hdev->ll_driver->close(hdev);
+	hid_hw_close(hdev);
 	hid_hw_stop(hdev);
 	hid_set_drvdata(hdev, NULL);
 
@@ -2753,7 +2751,7 @@
 {
 	hid_unregister_driver(&picolcd_driver);
 #ifdef CONFIG_HID_PICOLCD_FB
-	flush_scheduled_work();
+	flush_work_sync(&picolcd_fb_cleanup);
 	WARN_ON(fb_pending);
 #endif
 }
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 9f41e2b..06e5300 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -103,7 +103,7 @@
 	*/
 
 	if (list_empty(report_list)) {
-		dev_err(&hid->dev, "no output reports found\n");
+		hid_err(hid, "no output reports found\n");
 		return -ENODEV;
 	}
 
@@ -112,14 +112,13 @@
 		report_ptr = report_ptr->next;
 
 		if (report_ptr == report_list) {
-			dev_err(&hid->dev, "required output report is "
-					"missing\n");
+			hid_err(hid, "required output report is missing\n");
 			return -ENODEV;
 		}
 
 		report = list_entry(report_ptr, struct hid_report, list);
 		if (report->maxfield < 1) {
-			dev_err(&hid->dev, "no fields in the report\n");
+			hid_err(hid, "no fields in the report\n");
 			return -ENODEV;
 		}
 
@@ -137,7 +136,7 @@
 			weak = &report->field[3]->value[0];
 			debug("detected 4-field device");
 		} else {
-			dev_err(&hid->dev, "not enough fields or values\n");
+			hid_err(hid, "not enough fields or values\n");
 			return -ENODEV;
 		}
 
@@ -164,8 +163,7 @@
 		usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
 	}
 
-	dev_info(&hid->dev, "Force feedback for PantherLord/GreenAsia "
-	       "devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
+	hid_info(hid, "Force feedback for PantherLord/GreenAsia devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
 
 	return 0;
 }
@@ -185,13 +183,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err;
 	}
 
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 48eab84..ab19f29 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -16,6 +16,8 @@
  * any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/usb.h>
@@ -130,7 +132,7 @@
 	return -EINVAL;
 }
 
-static DEVICE_ATTR(channel, S_IRUGO | S_IWUGO, show_channel,
+static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel,
 		store_channel);
 
 static struct device_attribute *sysfs_device_attr_channel = {
@@ -169,7 +171,7 @@
 	return -EINVAL;
 }
 
-static DEVICE_ATTR(sustain, S_IRUGO | S_IWUGO, show_sustain,
+static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain,
 		store_sustain);
 
 static struct device_attribute *sysfs_device_attr_sustain = {
@@ -207,7 +209,7 @@
 	return -EINVAL;
 }
 
-static DEVICE_ATTR(octave, S_IRUGO | S_IWUGO, show_octave,
+static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave,
 		store_octave);
 
 static struct device_attribute *sysfs_device_attr_octave = {
@@ -285,11 +287,11 @@
 			continue;
 
 		if (report->maxfield < 1) {
-			dev_err(&hdev->dev, "output report is empty\n");
+			hid_err(hdev, "output report is empty\n");
 			break;
 		}
 		if (report->field[0]->report_count != 2) {
-			dev_err(&hdev->dev, "field count too low\n");
+			hid_err(hdev, "field count too low\n");
 			break;
 		}
 		pm->pcmidi_report6 = report;
@@ -746,8 +748,8 @@
 	if (*rsize == 178 &&
 	      rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
 	      rdesc[113] == 0xff) {
-		dev_info(&hdev->dev, "fixing up pc-midi keyboard report "
-			"descriptor\n");
+		hid_info(hdev,
+			 "fixing up pc-midi keyboard report descriptor\n");
 
 		rdesc[144] = 0x18; /* report 4: was 0x10 report count */
 	}
@@ -805,7 +807,7 @@
 
 	pk = kzalloc(sizeof(*pk), GFP_KERNEL);
 	if (pk == NULL) {
-		dev_err(&hdev->dev, "prodikeys: can't alloc descriptor\n");
+		hid_err(hdev, "can't alloc descriptor\n");
 		return -ENOMEM;
 	}
 
@@ -813,8 +815,7 @@
 
 	pm = kzalloc(sizeof(*pm), GFP_KERNEL);
 	if (pm == NULL) {
-		dev_err(&hdev->dev,
-			"prodikeys: can't alloc descriptor\n");
+		hid_err(hdev, "can't alloc descriptor\n");
 		ret = -ENOMEM;
 		goto err_free;
 	}
@@ -827,7 +828,7 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "prodikeys: hid parse failed\n");
+		hid_err(hdev, "hid parse failed\n");
 		goto err_free;
 	}
 
@@ -837,7 +838,7 @@
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
-		dev_err(&hdev->dev, "prodikeys: hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
@@ -896,7 +897,7 @@
 
 	ret = hid_register_driver(&pk_driver);
 	if (ret)
-		printk(KERN_ERR "can't register prodikeys driver\n");
+		pr_err("can't register prodikeys driver\n");
 
 	return ret;
 }
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c
index 54d3db5..87a54df 100644
--- a/drivers/hid/hid-quanta.c
+++ b/drivers/hid/hid-quanta.c
@@ -195,7 +195,7 @@
 
 	td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
 	if (!td) {
-		dev_err(&hdev->dev, "cannot allocate Quanta Touch data\n");
+		hid_err(hdev, "cannot allocate Quanta Touch data\n");
 		return -ENOMEM;
 	}
 	td->valid = false;
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index f776957..cbd8cc4 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -35,6 +35,11 @@
 #include "hid-roccat.h"
 #include "hid-roccat-kone.h"
 
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+/* kone_class is used for creating sysfs attributes via roccat char device */
+static struct class *kone_class;
+
 static void kone_set_settings_checksum(struct kone_settings *settings)
 {
 	uint16_t checksum = 0;
@@ -90,8 +95,7 @@
 		kfree(data);
 		return 0;
 	} else { /* unknown answer */
-		dev_err(&usb_dev->dev, "got retval %d when checking write\n",
-				*data);
+		hid_err(usb_dev, "got retval %d when checking write\n", *data);
 		kfree(data);
 		return -EIO;
 	}
@@ -262,7 +266,8 @@
 static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
 		struct bin_attribute *attr, char *buf,
 		loff_t off, size_t count) {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
 
 	if (off >= sizeof(struct kone_settings))
@@ -286,7 +291,8 @@
 static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
 		struct bin_attribute *attr, char *buf,
 		loff_t off, size_t count) {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
 	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
 	int retval = 0, difference;
@@ -319,10 +325,11 @@
 	return sizeof(struct kone_settings);
 }
 
-static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count, int number) {
-	struct device *dev = container_of(kobj, struct device, kobj);
+static ssize_t kone_sysfs_read_profilex(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t off, size_t count) {
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
 
 	if (off >= sizeof(struct kone_profile))
@@ -332,47 +339,18 @@
 		count = sizeof(struct kone_profile) - off;
 
 	mutex_lock(&kone->kone_lock);
-	memcpy(buf, ((char const *)&kone->profiles[number - 1]) + off, count);
+	memcpy(buf, ((char const *)&kone->profiles[*(uint *)(attr->private)]) + off, count);
 	mutex_unlock(&kone->kone_lock);
 
 	return count;
 }
 
-static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
-}
-
-static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
-}
-
-static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
-}
-
-static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
-}
-
-static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
-}
-
 /* Writes data only if different to stored data */
-static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count, int number) {
-	struct device *dev = container_of(kobj, struct device, kobj);
+static ssize_t kone_sysfs_write_profilex(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr,
+		char *buf, loff_t off, size_t count) {
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
 	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
 	struct kone_profile *profile;
@@ -382,13 +360,14 @@
 	if (off != 0 || count != sizeof(struct kone_profile))
 		return -EINVAL;
 
-	profile = &kone->profiles[number - 1];
+	profile = &kone->profiles[*(uint *)(attr->private)];
 
 	mutex_lock(&kone->kone_lock);
 	difference = memcmp(buf, profile, sizeof(struct kone_profile));
 	if (difference) {
 		retval = kone_set_profile(usb_dev,
-				(struct kone_profile const *)buf, number);
+				(struct kone_profile const *)buf,
+				*(uint *)(attr->private) + 1);
 		if (!retval)
 			memcpy(profile, buf, sizeof(struct kone_profile));
 	}
@@ -400,47 +379,19 @@
 	return sizeof(struct kone_profile);
 }
 
-static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
-}
-
-static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
-}
-
-static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
-}
-
-static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
-}
-
-static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
-		struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count) {
-	return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
-}
-
 static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct kone_device *kone =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
 }
 
 static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct kone_device *kone =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
 }
 
@@ -448,11 +399,15 @@
 static ssize_t kone_sysfs_show_weight(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
-	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	struct kone_device *kone;
+	struct usb_device *usb_dev;
 	int weight = 0;
 	int retval;
 
+	dev = dev->parent->parent;
+	kone = hid_get_drvdata(dev_get_drvdata(dev));
+	usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
 	mutex_lock(&kone->kone_lock);
 	retval = kone_get_weight(usb_dev, &weight);
 	mutex_unlock(&kone->kone_lock);
@@ -465,14 +420,16 @@
 static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct kone_device *kone =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
 }
 
 static ssize_t kone_sysfs_show_tcu(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct kone_device *kone =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.tcu);
 }
 
@@ -504,11 +461,15 @@
 static ssize_t kone_sysfs_set_tcu(struct device *dev,
 		struct device_attribute *attr, char const *buf, size_t size)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
-	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	struct kone_device *kone;
+	struct usb_device *usb_dev;
 	int retval;
 	unsigned long state;
 
+	dev = dev->parent->parent;
+	kone = hid_get_drvdata(dev_get_drvdata(dev));
+	usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
 	retval = strict_strtoul(buf, 10, &state);
 	if (retval)
 		return retval;
@@ -556,7 +517,7 @@
 
 		retval = kone_set_settings(usb_dev, &kone->settings);
 		if (retval) {
-			dev_err(&usb_dev->dev, "couldn't set tcu state\n");
+			hid_err(usb_dev, "couldn't set tcu state\n");
 			/*
 			 * try to reread valid settings into buffer overwriting
 			 * first error code
@@ -570,7 +531,7 @@
 
 	retval = size;
 exit_no_settings:
-	dev_err(&usb_dev->dev, "couldn't read settings\n");
+	hid_err(usb_dev, "couldn't read settings\n");
 exit_unlock:
 	mutex_unlock(&kone->kone_lock);
 	return retval;
@@ -579,18 +540,23 @@
 static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
+	struct kone_device *kone =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", kone->settings.startup_profile);
 }
 
 static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
 		struct device_attribute *attr, char const *buf, size_t size)
 {
-	struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
-	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	struct kone_device *kone;
+	struct usb_device *usb_dev;
 	int retval;
 	unsigned long new_startup_profile;
 
+	dev = dev->parent->parent;
+	kone = hid_get_drvdata(dev_get_drvdata(dev));
+	usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
 	retval = strict_strtoul(buf, 10, &new_startup_profile);
 	if (retval)
 		return retval;
@@ -617,160 +583,92 @@
 	return size;
 }
 
-/*
- * Read actual dpi settings.
- * Returns raw value for further processing. Refer to enum kone_polling_rates to
- * get real value.
- */
-static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
+static struct device_attribute kone_attributes[] = {
+	/*
+	 * Read actual dpi settings.
+	 * Returns raw value for further processing. Refer to enum
+	 * kone_polling_rates to get real value.
+	 */
+	__ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL),
+	__ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL),
 
-static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
+	/*
+	 * The mouse can be equipped with one of four supplied weights from 5
+	 * to 20 grams which are recognized and its value can be read out.
+	 * This returns the raw value reported by the mouse for easy evaluation
+	 * by software. Refer to enum kone_weights to get corresponding real
+	 * weight.
+	 */
+	__ATTR(weight, 0440, kone_sysfs_show_weight, NULL),
 
-/*
- * The mouse can be equipped with one of four supplied weights from 5 to 20
- * grams which are recognized and its value can be read out.
- * This returns the raw value reported by the mouse for easy evaluation by
- * software. Refer to enum kone_weights to get corresponding real weight.
- */
-static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
+	/*
+	 * Prints firmware version stored in mouse as integer.
+	 * The raw value reported by the mouse is returned for easy evaluation,
+	 * to get the real version number the decimal point has to be shifted 2
+	 * positions to the left. E.g. a value of 138 means 1.38.
+	 */
+	__ATTR(firmware_version, 0440,
+			kone_sysfs_show_firmware_version, NULL),
 
-/*
- * Prints firmware version stored in mouse as integer.
- * The raw value reported by the mouse is returned for easy evaluation, to get
- * the real version number the decimal point has to be shifted 2 positions to
- * the left. E.g. a value of 138 means 1.38.
- */
-static DEVICE_ATTR(firmware_version, 0440,
-		kone_sysfs_show_firmware_version, NULL);
+	/*
+	 * Prints state of Tracking Control Unit as number where 0 = off and
+	 * 1 = on. Writing 0 deactivates tcu and writing 1 calibrates and
+	 * activates the tcu
+	 */
+	__ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu),
 
-/*
- * Prints state of Tracking Control Unit as number where 0 = off and 1 = on
- * Writing 0 deactivates tcu and writing 1 calibrates and activates the tcu
- */
-static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
-
-/* Prints and takes the number of the profile the mouse starts with */
-static DEVICE_ATTR(startup_profile, 0660,
-		kone_sysfs_show_startup_profile,
-		kone_sysfs_set_startup_profile);
-
-static struct attribute *kone_attributes[] = {
-		&dev_attr_actual_dpi.attr,
-		&dev_attr_actual_profile.attr,
-		&dev_attr_weight.attr,
-		&dev_attr_firmware_version.attr,
-		&dev_attr_tcu.attr,
-		&dev_attr_startup_profile.attr,
-		NULL
+	/* Prints and takes the number of the profile the mouse starts with */
+	__ATTR(startup_profile, 0660,
+			kone_sysfs_show_startup_profile,
+			kone_sysfs_set_startup_profile),
+	__ATTR_NULL
 };
 
-static struct attribute_group kone_attribute_group = {
-		.attrs = kone_attributes
+static struct bin_attribute kone_bin_attributes[] = {
+	{
+		.attr = { .name = "settings", .mode = 0660 },
+		.size = sizeof(struct kone_settings),
+		.read = kone_sysfs_read_settings,
+		.write = kone_sysfs_write_settings
+	},
+	{
+		.attr = { .name = "profile1", .mode = 0660 },
+		.size = sizeof(struct kone_profile),
+		.read = kone_sysfs_read_profilex,
+		.write = kone_sysfs_write_profilex,
+		.private = &profile_numbers[0]
+	},
+	{
+		.attr = { .name = "profile2", .mode = 0660 },
+		.size = sizeof(struct kone_profile),
+		.read = kone_sysfs_read_profilex,
+		.write = kone_sysfs_write_profilex,
+		.private = &profile_numbers[1]
+	},
+	{
+		.attr = { .name = "profile3", .mode = 0660 },
+		.size = sizeof(struct kone_profile),
+		.read = kone_sysfs_read_profilex,
+		.write = kone_sysfs_write_profilex,
+		.private = &profile_numbers[2]
+	},
+	{
+		.attr = { .name = "profile4", .mode = 0660 },
+		.size = sizeof(struct kone_profile),
+		.read = kone_sysfs_read_profilex,
+		.write = kone_sysfs_write_profilex,
+		.private = &profile_numbers[3]
+	},
+	{
+		.attr = { .name = "profile5", .mode = 0660 },
+		.size = sizeof(struct kone_profile),
+		.read = kone_sysfs_read_profilex,
+		.write = kone_sysfs_write_profilex,
+		.private = &profile_numbers[4]
+	},
+	__ATTR_NULL
 };
 
-static struct bin_attribute kone_settings_attr = {
-	.attr = { .name = "settings", .mode = 0660 },
-	.size = sizeof(struct kone_settings),
-	.read = kone_sysfs_read_settings,
-	.write = kone_sysfs_write_settings
-};
-
-static struct bin_attribute kone_profile1_attr = {
-	.attr = { .name = "profile1", .mode = 0660 },
-	.size = sizeof(struct kone_profile),
-	.read = kone_sysfs_read_profile1,
-	.write = kone_sysfs_write_profile1
-};
-
-static struct bin_attribute kone_profile2_attr = {
-	.attr = { .name = "profile2", .mode = 0660 },
-	.size = sizeof(struct kone_profile),
-	.read = kone_sysfs_read_profile2,
-	.write = kone_sysfs_write_profile2
-};
-
-static struct bin_attribute kone_profile3_attr = {
-	.attr = { .name = "profile3", .mode = 0660 },
-	.size = sizeof(struct kone_profile),
-	.read = kone_sysfs_read_profile3,
-	.write = kone_sysfs_write_profile3
-};
-
-static struct bin_attribute kone_profile4_attr = {
-	.attr = { .name = "profile4", .mode = 0660 },
-	.size = sizeof(struct kone_profile),
-	.read = kone_sysfs_read_profile4,
-	.write = kone_sysfs_write_profile4
-};
-
-static struct bin_attribute kone_profile5_attr = {
-	.attr = { .name = "profile5", .mode = 0660 },
-	.size = sizeof(struct kone_profile),
-	.read = kone_sysfs_read_profile5,
-	.write = kone_sysfs_write_profile5
-};
-
-static int kone_create_sysfs_attributes(struct usb_interface *intf)
-{
-	int retval;
-
-	retval = sysfs_create_group(&intf->dev.kobj, &kone_attribute_group);
-	if (retval)
-		goto exit_1;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_settings_attr);
-	if (retval)
-		goto exit_2;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile1_attr);
-	if (retval)
-		goto exit_3;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile2_attr);
-	if (retval)
-		goto exit_4;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile3_attr);
-	if (retval)
-		goto exit_5;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile4_attr);
-	if (retval)
-		goto exit_6;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj, &kone_profile5_attr);
-	if (retval)
-		goto exit_7;
-
-	return 0;
-
-exit_7:
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
-exit_6:
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
-exit_5:
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
-exit_4:
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
-exit_3:
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
-exit_2:
-	sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
-exit_1:
-	return retval;
-}
-
-static void kone_remove_sysfs_attributes(struct usb_interface *intf)
-{
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile5_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile4_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile3_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile2_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_profile1_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &kone_settings_attr);
-	sysfs_remove_group(&intf->dev.kobj, &kone_attribute_group);
-}
-
 static int kone_init_kone_device_struct(struct usb_device *usb_dev,
 		struct kone_device *kone)
 {
@@ -818,32 +716,25 @@
 
 		kone = kzalloc(sizeof(*kone), GFP_KERNEL);
 		if (!kone) {
-			dev_err(&hdev->dev, "can't alloc device descriptor\n");
+			hid_err(hdev, "can't alloc device descriptor\n");
 			return -ENOMEM;
 		}
 		hid_set_drvdata(hdev, kone);
 
 		retval = kone_init_kone_device_struct(usb_dev, kone);
 		if (retval) {
-			dev_err(&hdev->dev,
-					"couldn't init struct kone_device\n");
+			hid_err(hdev, "couldn't init struct kone_device\n");
 			goto exit_free;
 		}
 
-		retval = roccat_connect(hdev);
+		retval = roccat_connect(kone_class, hdev);
 		if (retval < 0) {
-			dev_err(&hdev->dev, "couldn't init char dev\n");
+			hid_err(hdev, "couldn't init char dev\n");
 			/* be tolerant about not getting chrdev */
 		} else {
 			kone->roccat_claimed = 1;
 			kone->chrdev_minor = retval;
 		}
-
-		retval = kone_create_sysfs_attributes(intf);
-		if (retval) {
-			dev_err(&hdev->dev, "cannot create sysfs files\n");
-			goto exit_free;
-		}
 	} else {
 		hid_set_drvdata(hdev, NULL);
 	}
@@ -854,7 +745,6 @@
 	return retval;
 }
 
-
 static void kone_remove_specials(struct hid_device *hdev)
 {
 	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
@@ -862,7 +752,6 @@
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
 			== USB_INTERFACE_PROTOCOL_MOUSE) {
-		kone_remove_sysfs_attributes(intf);
 		kone = hid_get_drvdata(hdev);
 		if (kone->roccat_claimed)
 			roccat_disconnect(kone->chrdev_minor);
@@ -876,19 +765,19 @@
 
 	retval = hid_parse(hdev);
 	if (retval) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto exit;
 	}
 
 	retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (retval) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto exit;
 	}
 
 	retval = kone_init_specials(hdev);
 	if (retval) {
-		dev_err(&hdev->dev, "couldn't install mouse\n");
+		hid_err(hdev, "couldn't install mouse\n");
 		goto exit_stop;
 	}
 
@@ -1006,11 +895,24 @@
 
 static int __init kone_init(void)
 {
-	return hid_register_driver(&kone_driver);
+	int retval;
+
+	/* class name has to be same as driver name */
+	kone_class = class_create(THIS_MODULE, "kone");
+	if (IS_ERR(kone_class))
+		return PTR_ERR(kone_class);
+	kone_class->dev_attrs = kone_attributes;
+	kone_class->dev_bin_attrs = kone_bin_attributes;
+
+	retval = hid_register_driver(&kone_driver);
+	if (retval)
+		class_destroy(kone_class);
+	return retval;
 }
 
 static void __exit kone_exit(void)
 {
+	class_destroy(kone_class);
 	hid_unregister_driver(&kone_driver);
 }
 
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index 130d656..64abb5b 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -14,14 +14,11 @@
 
 #include <linux/types.h>
 
-#pragma pack(push)
-#pragma pack(1)
-
 struct kone_keystroke {
 	uint8_t key;
 	uint8_t action;
 	uint16_t period; /* in milliseconds */
-};
+} __attribute__ ((__packed__));
 
 enum kone_keystroke_buttons {
 	kone_keystroke_button_1 = 0xf0, /* left mouse button */
@@ -44,7 +41,7 @@
 	uint8_t macro_name[16]; /* can be max 15 chars long */
 	uint8_t count;
 	struct kone_keystroke keystrokes[20];
-};
+} __attribute__ ((__packed__));
 
 enum kone_button_info_types {
 	/* valid button types until firmware 1.32 */
@@ -95,7 +92,7 @@
 	uint8_t red;   /* range 0x00-0xff */
 	uint8_t green; /* range 0x00-0xff */
 	uint8_t blue;  /* range 0x00-0xff */
-};
+} __attribute__ ((__packed__));
 
 struct kone_profile {
 	uint16_t size; /* always 975 */
@@ -130,7 +127,7 @@
 	struct kone_button_info button_infos[8];
 
 	uint16_t checksum; /* \brief holds checksum of struct */
-};
+} __attribute__ ((__packed__));
 
 enum kone_polling_rates {
 	kone_polling_rate_125 = 1,
@@ -147,7 +144,7 @@
 	uint8_t  calibration_data[4];
 	uint8_t  unknown3[2];
 	uint16_t checksum;
-};
+} __attribute__ ((__packed__));
 
 /*
  * 12 byte mouse event read by interrupt_read
@@ -163,7 +160,7 @@
 	uint8_t event;
 	uint8_t value; /* press = 0, release = 1 */
 	uint8_t macro_key; /* 0 to 8 */
-};
+} __attribute__ ((__packed__));
 
 enum kone_mouse_events {
 	/* osd events are thought to be display on screen */
@@ -191,9 +188,7 @@
 	uint8_t event;
 	uint8_t value; /* holds dpi or profile value */
 	uint8_t key; /* macro key on overlong macro execution */
-};
-
-#pragma pack(pop)
+} __attribute__ ((__packed__));
 
 struct kone_device {
 	/*
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
new file mode 100644
index 0000000..1608c8d
--- /dev/null
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -0,0 +1,837 @@
+/*
+ * Roccat Kone[+] driver for Linux
+ *
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Kone[+] is an updated/improved version of the Kone with more memory
+ * and functionality and without the non-standard behaviours the Kone had.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "hid-ids.h"
+#include "hid-roccat.h"
+#include "hid-roccat-koneplus.h"
+
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+static struct class *koneplus_class;
+
+static void koneplus_profile_activated(struct koneplus_device *koneplus,
+		uint new_profile)
+{
+	koneplus->actual_profile = new_profile;
+}
+
+static int koneplus_send_control(struct usb_device *usb_dev, uint value,
+		enum koneplus_control_requests request)
+{
+	int len;
+	struct koneplus_control *control;
+
+	if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
+			request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
+			value > 4)
+		return -EINVAL;
+
+	control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
+	if (!control)
+		return -ENOMEM;
+
+	control->command = KONEPLUS_COMMAND_CONTROL;
+	control->value = value;
+	control->request = request;
+
+	len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+			USB_REQ_SET_CONFIGURATION,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+			KONEPLUS_USB_COMMAND_CONTROL, 0, control,
+			sizeof(struct koneplus_control),
+			USB_CTRL_SET_TIMEOUT);
+
+	kfree(control);
+
+	if (len != sizeof(struct koneplus_control))
+		return len;
+
+	return 0;
+}
+
+static int koneplus_receive(struct usb_device *usb_dev, uint usb_command,
+		void *buf, uint size) {
+	int len;
+
+	len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+			USB_REQ_CLEAR_FEATURE,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+			usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+	return (len != size) ? -EIO : 0;
+}
+
+static int koneplus_receive_control_status(struct usb_device *usb_dev)
+{
+	int retval;
+	struct koneplus_control *control;
+
+	control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL);
+	if (!control)
+		return -ENOMEM;
+
+	do {
+		retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+				control, sizeof(struct koneplus_control));
+
+		/* check if we get a completely wrong answer */
+		if (retval)
+			goto out;
+
+		if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OK) {
+			retval = 0;
+			goto out;
+		}
+
+		/* indicates that hardware needs some more time to complete action */
+		if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
+			msleep(500); /* windows driver uses 1000 */
+			continue;
+		}
+
+		/* seems to be critical - replug necessary */
+		if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD) {
+			retval = -EINVAL;
+			goto out;
+		}
+
+		dev_err(&usb_dev->dev, "koneplus_receive_control_status: "
+				"unknown response value 0x%x\n", control->value);
+		retval = -EINVAL;
+		goto out;
+
+	} while (1);
+out:
+	kfree(control);
+	return retval;
+}
+
+static int koneplus_send(struct usb_device *usb_dev, uint command,
+		void *buf, uint size) {
+	int len;
+
+	len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+			USB_REQ_SET_CONFIGURATION,
+			USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+			command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+	if (len != size)
+		return -EIO;
+
+	if (koneplus_receive_control_status(usb_dev))
+		return -EIO;
+
+	return 0;
+}
+
+static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
+		enum koneplus_control_requests request)
+{
+	int retval;
+
+	retval = koneplus_send_control(usb_dev, number, request);
+	if (retval)
+		return retval;
+
+	/* allow time to settle things - windows driver uses 500 */
+	msleep(100);
+
+	retval = koneplus_receive_control_status(usb_dev);
+	if (retval)
+		return retval;
+
+	return 0;
+}
+
+static int koneplus_get_info(struct usb_device *usb_dev,
+		struct koneplus_info *buf)
+{
+	return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
+			buf, sizeof(struct koneplus_info));
+}
+
+static int koneplus_get_profile_settings(struct usb_device *usb_dev,
+		struct koneplus_profile_settings *buf, uint number)
+{
+	int retval;
+
+	retval = koneplus_select_profile(usb_dev, number,
+			KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
+	if (retval)
+		return retval;
+
+	return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+			buf, sizeof(struct koneplus_profile_settings));
+}
+
+static int koneplus_set_profile_settings(struct usb_device *usb_dev,
+		struct koneplus_profile_settings const *settings)
+{
+	return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+			(void *)settings, sizeof(struct koneplus_profile_settings));
+}
+
+static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
+		struct koneplus_profile_buttons *buf, int number)
+{
+	int retval;
+
+	retval = koneplus_select_profile(usb_dev, number,
+			KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
+	if (retval)
+		return retval;
+
+	return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+			buf, sizeof(struct koneplus_profile_buttons));
+}
+
+static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
+		struct koneplus_profile_buttons const *buttons)
+{
+	return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+			(void *)buttons, sizeof(struct koneplus_profile_buttons));
+}
+
+/* retval is 0-4 on success, < 0 on error */
+static int koneplus_get_startup_profile(struct usb_device *usb_dev)
+{
+	struct koneplus_startup_profile *buf;
+	int retval;
+
+	buf = kmalloc(sizeof(struct koneplus_startup_profile), GFP_KERNEL);
+
+	retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
+			buf, sizeof(struct koneplus_startup_profile));
+
+	if (retval)
+		goto out;
+
+	retval = buf->startup_profile;
+out:
+	kfree(buf);
+	return retval;
+}
+
+static int koneplus_set_startup_profile(struct usb_device *usb_dev,
+		int startup_profile)
+{
+	struct koneplus_startup_profile buf;
+
+	buf.command = KONEPLUS_COMMAND_STARTUP_PROFILE;
+	buf.size = sizeof(struct koneplus_startup_profile);
+	buf.startup_profile = startup_profile;
+
+	return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE,
+			(char *)&buf, sizeof(struct koneplus_profile_buttons));
+}
+
+static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
+		char *buf, loff_t off, size_t count,
+		size_t real_size, uint command)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval;
+
+	if (off != 0 || count != real_size)
+		return -EINVAL;
+
+	mutex_lock(&koneplus->koneplus_lock);
+	retval = koneplus_receive(usb_dev, command, buf, real_size);
+	mutex_unlock(&koneplus->koneplus_lock);
+
+	if (retval)
+		return retval;
+
+	return real_size;
+}
+
+static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
+		void const *buf, loff_t off, size_t count,
+		size_t real_size, uint command)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval;
+
+	if (off != 0 || count != real_size)
+		return -EINVAL;
+
+	mutex_lock(&koneplus->koneplus_lock);
+	retval = koneplus_send(usb_dev, command, (void *)buf, real_size);
+	mutex_unlock(&koneplus->koneplus_lock);
+
+	if (retval)
+		return retval;
+
+	return real_size;
+}
+
+static ssize_t koneplus_sysfs_write_macro(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	return koneplus_sysfs_write(fp, kobj, buf, off, count,
+			sizeof(struct koneplus_macro), KONEPLUS_USB_COMMAND_MACRO);
+}
+
+static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	return koneplus_sysfs_read(fp, kobj, buf, off, count,
+			sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+}
+
+static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	return koneplus_sysfs_write(fp, kobj, buf, off, count,
+			sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+}
+
+static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	return koneplus_sysfs_write(fp, kobj, buf, off, count,
+			sizeof(struct koneplus_tcu), KONEPLUS_USB_COMMAND_TCU);
+}
+
+static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	return koneplus_sysfs_read(fp, kobj, buf, off, count,
+			sizeof(struct koneplus_tcu_image), KONEPLUS_USB_COMMAND_TCU);
+}
+
+static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+	if (off >= sizeof(struct koneplus_profile_settings))
+		return 0;
+
+	if (off + count > sizeof(struct koneplus_profile_settings))
+		count = sizeof(struct koneplus_profile_settings) - off;
+
+	mutex_lock(&koneplus->koneplus_lock);
+	memcpy(buf, ((void const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off,
+			count);
+	mutex_unlock(&koneplus->koneplus_lock);
+
+	return count;
+}
+
+static ssize_t koneplus_sysfs_write_profile_settings(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval = 0;
+	int difference;
+	int profile_number;
+	struct koneplus_profile_settings *profile_settings;
+
+	if (off != 0 || count != sizeof(struct koneplus_profile_settings))
+		return -EINVAL;
+
+	profile_number = ((struct koneplus_profile_settings const *)buf)->number;
+	profile_settings = &koneplus->profile_settings[profile_number];
+
+	mutex_lock(&koneplus->koneplus_lock);
+	difference = memcmp(buf, profile_settings,
+			sizeof(struct koneplus_profile_settings));
+	if (difference) {
+		retval = koneplus_set_profile_settings(usb_dev,
+				(struct koneplus_profile_settings const *)buf);
+		if (!retval)
+			memcpy(profile_settings, buf,
+					sizeof(struct koneplus_profile_settings));
+	}
+	mutex_unlock(&koneplus->koneplus_lock);
+
+	if (retval)
+		return retval;
+
+	return sizeof(struct koneplus_profile_settings);
+}
+
+static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+
+	if (off >= sizeof(struct koneplus_profile_buttons))
+		return 0;
+
+	if (off + count > sizeof(struct koneplus_profile_buttons))
+		count = sizeof(struct koneplus_profile_buttons) - off;
+
+	mutex_lock(&koneplus->koneplus_lock);
+	memcpy(buf, ((void const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off,
+			count);
+	mutex_unlock(&koneplus->koneplus_lock);
+
+	return count;
+}
+
+static ssize_t koneplus_sysfs_write_profile_buttons(struct file *fp,
+		struct kobject *kobj, struct bin_attribute *attr, char *buf,
+		loff_t off, size_t count)
+{
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
+	struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+	int retval = 0;
+	int difference;
+	uint profile_number;
+	struct koneplus_profile_buttons *profile_buttons;
+
+	if (off != 0 || count != sizeof(struct koneplus_profile_buttons))
+		return -EINVAL;
+
+	profile_number = ((struct koneplus_profile_buttons const *)buf)->number;
+	profile_buttons = &koneplus->profile_buttons[profile_number];
+
+	mutex_lock(&koneplus->koneplus_lock);
+	difference = memcmp(buf, profile_buttons,
+			sizeof(struct koneplus_profile_buttons));
+	if (difference) {
+		retval = koneplus_set_profile_buttons(usb_dev,
+				(struct koneplus_profile_buttons const *)buf);
+		if (!retval)
+			memcpy(profile_buttons, buf,
+					sizeof(struct koneplus_profile_buttons));
+	}
+	mutex_unlock(&koneplus->koneplus_lock);
+
+	if (retval)
+		return retval;
+
+	return sizeof(struct koneplus_profile_buttons);
+}
+
+static ssize_t koneplus_sysfs_show_startup_profile(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct koneplus_device *koneplus =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+	return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->startup_profile);
+}
+
+static ssize_t koneplus_sysfs_set_startup_profile(struct device *dev,
+		struct device_attribute *attr, char const *buf, size_t size)
+{
+	struct koneplus_device *koneplus;
+	struct usb_device *usb_dev;
+	unsigned long profile;
+	int retval;
+
+	dev = dev->parent->parent;
+	koneplus = hid_get_drvdata(dev_get_drvdata(dev));
+	usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+	retval = strict_strtoul(buf, 10, &profile);
+	if (retval)
+		return retval;
+
+	mutex_lock(&koneplus->koneplus_lock);
+	retval = koneplus_set_startup_profile(usb_dev, profile);
+	mutex_unlock(&koneplus->koneplus_lock);
+	if (retval)
+		return retval;
+
+	return size;
+}
+
+static ssize_t koneplus_sysfs_show_actual_profile(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct koneplus_device *koneplus =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+	return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->actual_profile);
+}
+
+static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct koneplus_device *koneplus =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+	return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->info.firmware_version);
+}
+
+static struct device_attribute koneplus_attributes[] = {
+	__ATTR(startup_profile, 0660,
+			koneplus_sysfs_show_startup_profile,
+			koneplus_sysfs_set_startup_profile),
+	__ATTR(actual_profile, 0440,
+			koneplus_sysfs_show_actual_profile, NULL),
+	__ATTR(firmware_version, 0440,
+			koneplus_sysfs_show_firmware_version, NULL),
+	__ATTR_NULL
+};
+
+static struct bin_attribute koneplus_bin_attributes[] = {
+	{
+		.attr = { .name = "sensor", .mode = 0220 },
+		.size = sizeof(struct koneplus_sensor),
+		.read = koneplus_sysfs_read_sensor,
+		.write = koneplus_sysfs_write_sensor
+	},
+	{
+		.attr = { .name = "tcu", .mode = 0220 },
+		.size = sizeof(struct koneplus_tcu),
+		.write = koneplus_sysfs_write_tcu
+	},
+	{
+		.attr = { .name = "tcu_image", .mode = 0440 },
+		.size = sizeof(struct koneplus_tcu_image),
+		.read = koneplus_sysfs_read_tcu_image
+	},
+	{
+		.attr = { .name = "profile_settings", .mode = 0220 },
+		.size = sizeof(struct koneplus_profile_settings),
+		.write = koneplus_sysfs_write_profile_settings
+	},
+	{
+		.attr = { .name = "profile1_settings", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_settings),
+		.read = koneplus_sysfs_read_profilex_settings,
+		.private = &profile_numbers[0]
+	},
+	{
+		.attr = { .name = "profile2_settings", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_settings),
+		.read = koneplus_sysfs_read_profilex_settings,
+		.private = &profile_numbers[1]
+	},
+	{
+		.attr = { .name = "profile3_settings", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_settings),
+		.read = koneplus_sysfs_read_profilex_settings,
+		.private = &profile_numbers[2]
+	},
+	{
+		.attr = { .name = "profile4_settings", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_settings),
+		.read = koneplus_sysfs_read_profilex_settings,
+		.private = &profile_numbers[3]
+	},
+	{
+		.attr = { .name = "profile5_settings", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_settings),
+		.read = koneplus_sysfs_read_profilex_settings,
+		.private = &profile_numbers[4]
+	},
+	{
+		.attr = { .name = "profile_buttons", .mode = 0220 },
+		.size = sizeof(struct koneplus_profile_buttons),
+		.write = koneplus_sysfs_write_profile_buttons
+	},
+	{
+		.attr = { .name = "profile1_buttons", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_buttons),
+		.read = koneplus_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[0]
+	},
+	{
+		.attr = { .name = "profile2_buttons", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_buttons),
+		.read = koneplus_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[1]
+	},
+	{
+		.attr = { .name = "profile3_buttons", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_buttons),
+		.read = koneplus_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[2]
+	},
+	{
+		.attr = { .name = "profile4_buttons", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_buttons),
+		.read = koneplus_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[3]
+	},
+	{
+		.attr = { .name = "profile5_buttons", .mode = 0440 },
+		.size = sizeof(struct koneplus_profile_buttons),
+		.read = koneplus_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[4]
+	},
+	{
+		.attr = { .name = "macro", .mode = 0220 },
+		.size = sizeof(struct koneplus_macro),
+		.write = koneplus_sysfs_write_macro
+	},
+	__ATTR_NULL
+};
+
+static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
+		struct koneplus_device *koneplus)
+{
+	int retval, i;
+	static uint wait = 70; /* device will freeze with just 60 */
+
+	mutex_init(&koneplus->koneplus_lock);
+
+	koneplus->startup_profile = koneplus_get_startup_profile(usb_dev);
+
+	msleep(wait);
+	retval = koneplus_get_info(usb_dev, &koneplus->info);
+	if (retval)
+		return retval;
+
+	for (i = 0; i < 5; ++i) {
+		msleep(wait);
+		retval = koneplus_get_profile_settings(usb_dev,
+				&koneplus->profile_settings[i], i);
+		if (retval)
+			return retval;
+
+		msleep(wait);
+		retval = koneplus_get_profile_buttons(usb_dev,
+				&koneplus->profile_buttons[i], i);
+		if (retval)
+			return retval;
+	}
+
+	koneplus_profile_activated(koneplus, koneplus->startup_profile);
+
+	return 0;
+}
+
+static int koneplus_init_specials(struct hid_device *hdev)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	struct usb_device *usb_dev = interface_to_usbdev(intf);
+	struct koneplus_device *koneplus;
+	int retval;
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			== USB_INTERFACE_PROTOCOL_MOUSE) {
+
+		koneplus = kzalloc(sizeof(*koneplus), GFP_KERNEL);
+		if (!koneplus) {
+			dev_err(&hdev->dev, "can't alloc device descriptor\n");
+			return -ENOMEM;
+		}
+		hid_set_drvdata(hdev, koneplus);
+
+		retval = koneplus_init_koneplus_device_struct(usb_dev, koneplus);
+		if (retval) {
+			dev_err(&hdev->dev,
+					"couldn't init struct koneplus_device\n");
+			goto exit_free;
+		}
+
+		retval = roccat_connect(koneplus_class, hdev);
+		if (retval < 0) {
+			dev_err(&hdev->dev, "couldn't init char dev\n");
+		} else {
+			koneplus->chrdev_minor = retval;
+			koneplus->roccat_claimed = 1;
+		}
+	} else {
+		hid_set_drvdata(hdev, NULL);
+	}
+
+	return 0;
+exit_free:
+	kfree(koneplus);
+	return retval;
+}
+
+static void koneplus_remove_specials(struct hid_device *hdev)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	struct koneplus_device *koneplus;
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			== USB_INTERFACE_PROTOCOL_MOUSE) {
+		koneplus = hid_get_drvdata(hdev);
+		if (koneplus->roccat_claimed)
+			roccat_disconnect(koneplus->chrdev_minor);
+		kfree(koneplus);
+	}
+}
+
+static int koneplus_probe(struct hid_device *hdev,
+		const struct hid_device_id *id)
+{
+	int retval;
+
+	retval = hid_parse(hdev);
+	if (retval) {
+		dev_err(&hdev->dev, "parse failed\n");
+		goto exit;
+	}
+
+	retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (retval) {
+		dev_err(&hdev->dev, "hw start failed\n");
+		goto exit;
+	}
+
+	retval = koneplus_init_specials(hdev);
+	if (retval) {
+		dev_err(&hdev->dev, "couldn't install mouse\n");
+		goto exit_stop;
+	}
+
+	return 0;
+
+exit_stop:
+	hid_hw_stop(hdev);
+exit:
+	return retval;
+}
+
+static void koneplus_remove(struct hid_device *hdev)
+{
+	koneplus_remove_specials(hdev);
+	hid_hw_stop(hdev);
+}
+
+static void koneplus_keep_values_up_to_date(struct koneplus_device *koneplus,
+		u8 const *data)
+{
+	struct koneplus_mouse_report_button const *button_report;
+
+	switch (data[0]) {
+	case KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON:
+		button_report = (struct koneplus_mouse_report_button const *)data;
+		switch (button_report->type) {
+		case KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE:
+			koneplus_profile_activated(koneplus, button_report->data1 - 1);
+			break;
+		}
+		break;
+	}
+}
+
+static void koneplus_report_to_chrdev(struct koneplus_device const *koneplus,
+		u8 const *data)
+{
+	struct koneplus_roccat_report roccat_report;
+	struct koneplus_mouse_report_button const *button_report;
+
+	if (data[0] != KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON)
+		return;
+
+	button_report = (struct koneplus_mouse_report_button const *)data;
+
+	if ((button_report->type == KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH ||
+			button_report->type == KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER) &&
+			button_report->data2 != KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS)
+		return;
+
+	roccat_report.type = button_report->type;
+	roccat_report.data1 = button_report->data1;
+	roccat_report.data2 = button_report->data2;
+	roccat_report.profile = koneplus->actual_profile + 1;
+	roccat_report_event(koneplus->chrdev_minor,
+			(uint8_t const *)&roccat_report,
+			sizeof(struct koneplus_roccat_report));
+}
+
+static int koneplus_raw_event(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	struct koneplus_device *koneplus = hid_get_drvdata(hdev);
+
+	if (intf->cur_altsetting->desc.bInterfaceProtocol
+			!= USB_INTERFACE_PROTOCOL_MOUSE)
+		return 0;
+
+	koneplus_keep_values_up_to_date(koneplus, data);
+
+	if (koneplus->roccat_claimed)
+		koneplus_report_to_chrdev(koneplus, data);
+
+	return 0;
+}
+
+static const struct hid_device_id koneplus_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(hid, koneplus_devices);
+
+static struct hid_driver koneplus_driver = {
+		.name = "koneplus",
+		.id_table = koneplus_devices,
+		.probe = koneplus_probe,
+		.remove = koneplus_remove,
+		.raw_event = koneplus_raw_event
+};
+
+static int __init koneplus_init(void)
+{
+	int retval;
+
+	/* class name has to be same as driver name */
+	koneplus_class = class_create(THIS_MODULE, "koneplus");
+	if (IS_ERR(koneplus_class))
+		return PTR_ERR(koneplus_class);
+	koneplus_class->dev_attrs = koneplus_attributes;
+	koneplus_class->dev_bin_attrs = koneplus_bin_attributes;
+
+	retval = hid_register_driver(&koneplus_driver);
+	if (retval)
+		class_destroy(koneplus_class);
+	return retval;
+}
+
+static void __exit koneplus_exit(void)
+{
+	class_destroy(koneplus_class);
+	hid_unregister_driver(&koneplus_driver);
+}
+
+module_init(koneplus_init);
+module_exit(koneplus_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Kone[+] driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
new file mode 100644
index 0000000..57a5c1a
--- /dev/null
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -0,0 +1,224 @@
+#ifndef __HID_ROCCAT_KONEPLUS_H
+#define __HID_ROCCAT_KONEPLUS_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+/*
+ * case 1: writes request 80 and reads value 1
+ *
+ */
+struct koneplus_control {
+	uint8_t command; /* KONEPLUS_COMMAND_CONTROL */
+	/*
+	 * value is profile number in range 0-4 for requesting settings and buttons
+	 * 1 if status ok for requesting status
+	 */
+	uint8_t value;
+	uint8_t request;
+} __attribute__ ((__packed__));
+
+enum koneplus_control_requests {
+	KONEPLUS_CONTROL_REQUEST_STATUS = 0x00,
+	KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x80,
+	KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x90,
+};
+
+enum koneplus_control_values {
+	KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0,
+	KONEPLUS_CONTROL_REQUEST_STATUS_OK = 1,
+	KONEPLUS_CONTROL_REQUEST_STATUS_WAIT = 3,
+};
+
+struct koneplus_startup_profile {
+	uint8_t command; /* KONEPLUS_COMMAND_STARTUP_PROFILE */
+	uint8_t size; /* always 3 */
+	uint8_t startup_profile; /* Range 0-4! */
+} __attribute__ ((__packed__));
+
+struct koneplus_profile_settings {
+	uint8_t command; /* KONEPLUS_COMMAND_PROFILE_SETTINGS */
+	uint8_t size; /* always 43 */
+	uint8_t number; /* range 0-4 */
+	uint8_t advanced_sensitivity;
+	uint8_t sensitivity_x;
+	uint8_t sensitivity_y;
+	uint8_t cpi_levels_enabled;
+	uint8_t cpi_levels_x[5];
+	uint8_t cpi_startup_level; /* range 0-4 */
+	uint8_t cpi_levels_y[5]; /* range 1-60 means 100-6000 cpi */
+	uint8_t unknown1;
+	uint8_t polling_rate;
+	uint8_t lights_enabled;
+	uint8_t light_effect_mode;
+	uint8_t color_flow_effect;
+	uint8_t light_effect_type;
+	uint8_t light_effect_speed;
+	uint8_t lights[16];
+	uint16_t checksum;
+} __attribute__ ((__packed__));
+
+struct koneplus_profile_buttons {
+	uint8_t command; /* KONEPLUS_COMMAND_PROFILE_BUTTONS */
+	uint8_t size; /* always 77 */
+	uint8_t number; /* range 0-4 */
+	uint8_t data[72];
+	uint16_t checksum;
+} __attribute__ ((__packed__));
+
+struct koneplus_macro {
+	uint8_t command; /* KONEPLUS_COMMAND_MACRO */
+	uint16_t size; /* always 0x822 little endian */
+	uint8_t profile; /* range 0-4 */
+	uint8_t button; /* range 0-23 */
+	uint8_t data[2075];
+	uint16_t checksum;
+} __attribute__ ((__packed__));
+
+struct koneplus_info {
+	uint8_t command; /* KONEPLUS_COMMAND_INFO */
+	uint8_t size; /* always 6 */
+	uint8_t firmware_version;
+	uint8_t unknown[3];
+} __attribute__ ((__packed__));
+
+struct koneplus_e {
+	uint8_t command; /* KONEPLUS_COMMAND_E */
+	uint8_t size; /* always 3 */
+	uint8_t unknown; /* TODO 1; 0 before firmware update */
+} __attribute__ ((__packed__));
+
+struct koneplus_sensor {
+	uint8_t command;  /* KONEPLUS_COMMAND_SENSOR */
+	uint8_t size; /* always 6 */
+	uint8_t data[4];
+} __attribute__ ((__packed__));
+
+struct koneplus_firmware_write {
+	uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE */
+	uint8_t unknown[1025];
+} __attribute__ ((__packed__));
+
+struct koneplus_firmware_write_control {
+	uint8_t command; /* KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL */
+	/*
+	 * value is 1 on success
+	 * 3 means "not finished yet"
+	 */
+	uint8_t value;
+	uint8_t unknown; /* always 0x75 */
+} __attribute__ ((__packed__));
+
+struct koneplus_tcu {
+	uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
+	uint8_t data[2];
+} __attribute__ ((__packed__));
+
+struct koneplus_tcu_image {
+	uint16_t usb_command; /* KONEPLUS_USB_COMMAND_TCU */
+	uint8_t data[1024];
+	uint16_t checksum;
+} __attribute__ ((__packed__));
+
+enum koneplus_commands {
+	KONEPLUS_COMMAND_CONTROL = 0x4,
+	KONEPLUS_COMMAND_STARTUP_PROFILE = 0x5,
+	KONEPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
+	KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
+	KONEPLUS_COMMAND_MACRO = 0x8,
+	KONEPLUS_COMMAND_INFO = 0x9,
+	KONEPLUS_COMMAND_E = 0xe,
+	KONEPLUS_COMMAND_SENSOR = 0xf,
+	KONEPLUS_COMMAND_FIRMWARE_WRITE = 0x1b,
+	KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
+};
+
+enum koneplus_usb_commands {
+	KONEPLUS_USB_COMMAND_CONTROL = 0x304,
+	KONEPLUS_USB_COMMAND_STARTUP_PROFILE = 0x305,
+	KONEPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
+	KONEPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
+	KONEPLUS_USB_COMMAND_MACRO = 0x308,
+	KONEPLUS_USB_COMMAND_INFO = 0x309,
+	KONEPLUS_USB_COMMAND_TCU = 0x30c,
+	KONEPLUS_USB_COMMAND_E = 0x30e,
+	KONEPLUS_USB_COMMAND_SENSOR = 0x30f,
+	KONEPLUS_USB_COMMAND_FIRMWARE_WRITE = 0x31b,
+	KONEPLUS_USB_COMMAND_FIRMWARE_WRITE_CONTROL = 0x31c,
+};
+
+enum koneplus_mouse_report_numbers {
+	KONEPLUS_MOUSE_REPORT_NUMBER_HID = 1,
+	KONEPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
+	KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON = 3,
+};
+
+struct koneplus_mouse_report_button {
+	uint8_t report_number; /* always KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON */
+	uint8_t zero1;
+	uint8_t type;
+	uint8_t data1;
+	uint8_t data2;
+	uint8_t zero2;
+	uint8_t unknown[2];
+} __attribute__ ((__packed__));
+
+enum koneplus_mouse_report_button_types {
+	/* data1 = new profile range 1-5 */
+	KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE = 0x20,
+
+	/* data1 = button number range 1-24; data2 = action */
+	KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
+
+	/* data1 = button number range 1-24; data2 = action */
+	KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80,
+
+	/* data1 = setting number range 1-5 */
+	KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0,
+
+	/* data1 and data2 = range 0x1-0xb */
+	KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0,
+
+	/* data1 = 22 = next track...
+	 * data2 = action
+	 */
+	KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+};
+
+enum koneplus_mouse_report_button_action {
+	KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS = 0,
+	KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_RELEASE = 1,
+};
+
+struct koneplus_roccat_report {
+	uint8_t type;
+	uint8_t data1;
+	uint8_t data2;
+	uint8_t profile;
+} __attribute__ ((__packed__));
+
+struct koneplus_device {
+	int actual_profile;
+
+	int roccat_claimed;
+	int chrdev_minor;
+
+	struct mutex koneplus_lock;
+
+	int startup_profile;
+	struct koneplus_info info;
+	struct koneplus_profile_settings profile_settings[5];
+	struct koneplus_profile_buttons profile_buttons[5];
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 9bf2304..02c58e0 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -27,6 +27,11 @@
 #include "hid-roccat.h"
 #include "hid-roccat-pyra.h"
 
+static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+
+/* pyra_class is used for creating sysfs attributes via roccat char device */
+static struct class *pyra_class;
+
 static void profile_activated(struct pyra_device *pyra,
 		unsigned int new_profile)
 {
@@ -87,9 +92,8 @@
 			control.value == 1)
 			return 0;
 	else {
-		dev_err(&usb_dev->dev, "receive control status: "
-				"unknown response 0x%x 0x%x\n",
-				control.request, control.value);
+		hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n",
+			control.request, control.value);
 		return -EINVAL;
 	}
 }
@@ -221,9 +225,10 @@
 
 static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
 		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count, int number)
+		loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
 
 	if (off >= sizeof(struct pyra_profile_settings))
@@ -233,58 +238,19 @@
 		count = sizeof(struct pyra_profile_settings) - off;
 
 	mutex_lock(&pyra->pyra_lock);
-	memcpy(buf, ((char const *)&pyra->profile_settings[number]) + off,
+	memcpy(buf, ((char const *)&pyra->profile_settings[*(uint *)(attr->private)]) + off,
 			count);
 	mutex_unlock(&pyra->pyra_lock);
 
 	return count;
 }
 
-static ssize_t pyra_sysfs_read_profile1_settings(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_settings(fp, kobj,
-			attr, buf, off, count, 0);
-}
-
-static ssize_t pyra_sysfs_read_profile2_settings(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_settings(fp, kobj,
-			attr, buf, off, count, 1);
-}
-
-static ssize_t pyra_sysfs_read_profile3_settings(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_settings(fp, kobj,
-			attr, buf, off, count, 2);
-}
-
-static ssize_t pyra_sysfs_read_profile4_settings(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_settings(fp, kobj,
-			attr, buf, off, count, 3);
-}
-
-static ssize_t pyra_sysfs_read_profile5_settings(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_settings(fp, kobj,
-			attr, buf, off, count, 4);
-}
-
 static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
 		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count, int number)
+		loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
 
 	if (off >= sizeof(struct pyra_profile_buttons))
@@ -294,58 +260,19 @@
 		count = sizeof(struct pyra_profile_buttons) - off;
 
 	mutex_lock(&pyra->pyra_lock);
-	memcpy(buf, ((char const *)&pyra->profile_buttons[number]) + off,
+	memcpy(buf, ((char const *)&pyra->profile_buttons[*(uint *)(attr->private)]) + off,
 			count);
 	mutex_unlock(&pyra->pyra_lock);
 
 	return count;
 }
 
-static ssize_t pyra_sysfs_read_profile1_buttons(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_buttons(fp, kobj,
-			attr, buf, off, count, 0);
-}
-
-static ssize_t pyra_sysfs_read_profile2_buttons(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_buttons(fp, kobj,
-			attr, buf, off, count, 1);
-}
-
-static ssize_t pyra_sysfs_read_profile3_buttons(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_buttons(fp, kobj,
-			attr, buf, off, count, 2);
-}
-
-static ssize_t pyra_sysfs_read_profile4_buttons(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_buttons(fp, kobj,
-			attr, buf, off, count, 3);
-}
-
-static ssize_t pyra_sysfs_read_profile5_buttons(struct file *fp,
-		struct kobject *kobj, struct bin_attribute *attr, char *buf,
-		loff_t off, size_t count)
-{
-	return pyra_sysfs_read_profilex_buttons(fp, kobj,
-			attr, buf, off, count, 4);
-}
-
 static ssize_t pyra_sysfs_write_profile_settings(struct file *fp,
 		struct kobject *kobj, struct bin_attribute *attr, char *buf,
 		loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
 	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
 	int retval = 0;
@@ -381,7 +308,8 @@
 		struct kobject *kobj, struct bin_attribute *attr, char *buf,
 		loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
 	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
 	int retval = 0;
@@ -417,7 +345,8 @@
 		struct kobject *kobj, struct bin_attribute *attr, char *buf,
 		loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
 
 	if (off >= sizeof(struct pyra_settings))
@@ -437,7 +366,8 @@
 		struct kobject *kobj, struct bin_attribute *attr, char *buf,
 		loff_t off, size_t count)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
+	struct device *dev =
+			container_of(kobj, struct device, kobj)->parent->parent;
 	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
 	struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
 	int retval = 0;
@@ -469,255 +399,125 @@
 static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+	struct pyra_device *pyra =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_cpi);
 }
 
 static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+	struct pyra_device *pyra =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_profile);
 }
 
 static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+	struct pyra_device *pyra =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", pyra->firmware_version);
 }
 
 static ssize_t pyra_sysfs_show_startup_profile(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
-	struct pyra_device *pyra = hid_get_drvdata(dev_get_drvdata(dev));
+	struct pyra_device *pyra =
+			hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
 	return snprintf(buf, PAGE_SIZE, "%d\n", pyra->settings.startup_profile);
 }
 
-static DEVICE_ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL);
-
-static DEVICE_ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL);
-
-static DEVICE_ATTR(firmware_version, 0440,
-		pyra_sysfs_show_firmware_version, NULL);
-
-static DEVICE_ATTR(startup_profile, 0440,
-		pyra_sysfs_show_startup_profile, NULL);
-
-static struct attribute *pyra_attributes[] = {
-		&dev_attr_actual_cpi.attr,
-		&dev_attr_actual_profile.attr,
-		&dev_attr_firmware_version.attr,
-		&dev_attr_startup_profile.attr,
-		NULL
+static struct device_attribute pyra_attributes[] = {
+	__ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL),
+	__ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL),
+	__ATTR(firmware_version, 0440,
+			pyra_sysfs_show_firmware_version, NULL),
+	__ATTR(startup_profile, 0440,
+			pyra_sysfs_show_startup_profile, NULL),
+	__ATTR_NULL
 };
 
-static struct attribute_group pyra_attribute_group = {
-		.attrs = pyra_attributes
-};
-
-static struct bin_attribute pyra_profile_settings_attr = {
+static struct bin_attribute pyra_bin_attributes[] = {
+	{
 		.attr = { .name = "profile_settings", .mode = 0220 },
 		.size = sizeof(struct pyra_profile_settings),
 		.write = pyra_sysfs_write_profile_settings
-};
-
-static struct bin_attribute pyra_profile1_settings_attr = {
+	},
+	{
 		.attr = { .name = "profile1_settings", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_settings),
-		.read = pyra_sysfs_read_profile1_settings
-};
-
-static struct bin_attribute pyra_profile2_settings_attr = {
+		.read = pyra_sysfs_read_profilex_settings,
+		.private = &profile_numbers[0]
+	},
+	{
 		.attr = { .name = "profile2_settings", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_settings),
-		.read = pyra_sysfs_read_profile2_settings
-};
-
-static struct bin_attribute pyra_profile3_settings_attr = {
+		.read = pyra_sysfs_read_profilex_settings,
+		.private = &profile_numbers[1]
+	},
+	{
 		.attr = { .name = "profile3_settings", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_settings),
-		.read = pyra_sysfs_read_profile3_settings
-};
-
-static struct bin_attribute pyra_profile4_settings_attr = {
+		.read = pyra_sysfs_read_profilex_settings,
+		.private = &profile_numbers[2]
+	},
+	{
 		.attr = { .name = "profile4_settings", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_settings),
-		.read = pyra_sysfs_read_profile4_settings
-};
-
-static struct bin_attribute pyra_profile5_settings_attr = {
+		.read = pyra_sysfs_read_profilex_settings,
+		.private = &profile_numbers[3]
+	},
+	{
 		.attr = { .name = "profile5_settings", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_settings),
-		.read = pyra_sysfs_read_profile5_settings
-};
-
-static struct bin_attribute pyra_profile_buttons_attr = {
+		.read = pyra_sysfs_read_profilex_settings,
+		.private = &profile_numbers[4]
+	},
+	{
 		.attr = { .name = "profile_buttons", .mode = 0220 },
 		.size = sizeof(struct pyra_profile_buttons),
 		.write = pyra_sysfs_write_profile_buttons
-};
-
-static struct bin_attribute pyra_profile1_buttons_attr = {
+	},
+	{
 		.attr = { .name = "profile1_buttons", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_buttons),
-		.read = pyra_sysfs_read_profile1_buttons
-};
-
-static struct bin_attribute pyra_profile2_buttons_attr = {
+		.read = pyra_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[0]
+	},
+	{
 		.attr = { .name = "profile2_buttons", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_buttons),
-		.read = pyra_sysfs_read_profile2_buttons
-};
-
-static struct bin_attribute pyra_profile3_buttons_attr = {
+		.read = pyra_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[1]
+	},
+	{
 		.attr = { .name = "profile3_buttons", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_buttons),
-		.read = pyra_sysfs_read_profile3_buttons
-};
-
-static struct bin_attribute pyra_profile4_buttons_attr = {
+		.read = pyra_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[2]
+	},
+	{
 		.attr = { .name = "profile4_buttons", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_buttons),
-		.read = pyra_sysfs_read_profile4_buttons
-};
-
-static struct bin_attribute pyra_profile5_buttons_attr = {
+		.read = pyra_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[3]
+	},
+	{
 		.attr = { .name = "profile5_buttons", .mode = 0440 },
 		.size = sizeof(struct pyra_profile_buttons),
-		.read = pyra_sysfs_read_profile5_buttons
-};
-
-static struct bin_attribute pyra_settings_attr = {
+		.read = pyra_sysfs_read_profilex_buttons,
+		.private = &profile_numbers[4]
+	},
+	{
 		.attr = { .name = "settings", .mode = 0660 },
 		.size = sizeof(struct pyra_settings),
 		.read = pyra_sysfs_read_settings,
 		.write = pyra_sysfs_write_settings
+	},
+	__ATTR_NULL
 };
 
-static int pyra_create_sysfs_attributes(struct usb_interface *intf)
-{
-	int retval;
-
-	retval = sysfs_create_group(&intf->dev.kobj, &pyra_attribute_group);
-	if (retval)
-		goto exit_1;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile_settings_attr);
-	if (retval)
-		goto exit_2;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile1_settings_attr);
-	if (retval)
-		goto exit_3;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile2_settings_attr);
-	if (retval)
-		goto exit_4;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile3_settings_attr);
-	if (retval)
-		goto exit_5;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile4_settings_attr);
-	if (retval)
-		goto exit_6;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile5_settings_attr);
-	if (retval)
-		goto exit_7;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile_buttons_attr);
-	if (retval)
-		goto exit_8;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile1_buttons_attr);
-	if (retval)
-		goto exit_9;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile2_buttons_attr);
-	if (retval)
-		goto exit_10;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile3_buttons_attr);
-	if (retval)
-		goto exit_11;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile4_buttons_attr);
-	if (retval)
-		goto exit_12;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_profile5_buttons_attr);
-	if (retval)
-		goto exit_13;
-
-	retval = sysfs_create_bin_file(&intf->dev.kobj,
-			&pyra_settings_attr);
-	if (retval)
-		goto exit_14;
-
-	return 0;
-
-exit_14:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_buttons_attr);
-exit_13:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_buttons_attr);
-exit_12:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_buttons_attr);
-exit_11:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_buttons_attr);
-exit_10:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_buttons_attr);
-exit_9:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_buttons_attr);
-exit_8:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_settings_attr);
-exit_7:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_settings_attr);
-exit_6:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_settings_attr);
-exit_5:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_settings_attr);
-exit_4:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_settings_attr);
-exit_3:
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_settings_attr);
-exit_2:
-	sysfs_remove_group(&intf->dev.kobj, &pyra_attribute_group);
-exit_1:
-	return retval;
-}
-
-static void pyra_remove_sysfs_attributes(struct usb_interface *intf)
-{
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_settings_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_buttons_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_buttons_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_buttons_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_buttons_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_buttons_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_buttons_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile5_settings_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile4_settings_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile3_settings_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile2_settings_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile1_settings_attr);
-	sysfs_remove_bin_file(&intf->dev.kobj, &pyra_profile_settings_attr);
-	sysfs_remove_group(&intf->dev.kobj, &pyra_attribute_group);
-}
-
 static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
 		struct pyra_device *pyra)
 {
@@ -770,31 +570,24 @@
 
 		pyra = kzalloc(sizeof(*pyra), GFP_KERNEL);
 		if (!pyra) {
-			dev_err(&hdev->dev, "can't alloc device descriptor\n");
+			hid_err(hdev, "can't alloc device descriptor\n");
 			return -ENOMEM;
 		}
 		hid_set_drvdata(hdev, pyra);
 
 		retval = pyra_init_pyra_device_struct(usb_dev, pyra);
 		if (retval) {
-			dev_err(&hdev->dev,
-					"couldn't init struct pyra_device\n");
+			hid_err(hdev, "couldn't init struct pyra_device\n");
 			goto exit_free;
 		}
 
-		retval = roccat_connect(hdev);
+		retval = roccat_connect(pyra_class, hdev);
 		if (retval < 0) {
-			dev_err(&hdev->dev, "couldn't init char dev\n");
+			hid_err(hdev, "couldn't init char dev\n");
 		} else {
 			pyra->chrdev_minor = retval;
 			pyra->roccat_claimed = 1;
 		}
-
-		retval = pyra_create_sysfs_attributes(intf);
-		if (retval) {
-			dev_err(&hdev->dev, "cannot create sysfs files\n");
-			goto exit_free;
-		}
 	} else {
 		hid_set_drvdata(hdev, NULL);
 	}
@@ -812,7 +605,6 @@
 
 	if (intf->cur_altsetting->desc.bInterfaceProtocol
 			== USB_INTERFACE_PROTOCOL_MOUSE) {
-		pyra_remove_sysfs_attributes(intf);
 		pyra = hid_get_drvdata(hdev);
 		if (pyra->roccat_claimed)
 			roccat_disconnect(pyra->chrdev_minor);
@@ -826,19 +618,19 @@
 
 	retval = hid_parse(hdev);
 	if (retval) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto exit;
 	}
 
 	retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (retval) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto exit;
 	}
 
 	retval = pyra_init_specials(hdev);
 	if (retval) {
-		dev_err(&hdev->dev, "couldn't install mouse\n");
+		hid_err(hdev, "couldn't install mouse\n");
 		goto exit_stop;
 	}
 	return 0;
@@ -952,11 +744,24 @@
 
 static int __init pyra_init(void)
 {
-	return hid_register_driver(&pyra_driver);
+	int retval;
+
+	/* class name has to be same as driver name */
+	pyra_class = class_create(THIS_MODULE, "pyra");
+	if (IS_ERR(pyra_class))
+		return PTR_ERR(pyra_class);
+	pyra_class->dev_attrs = pyra_attributes;
+	pyra_class->dev_bin_attrs = pyra_bin_attributes;
+
+	retval = hid_register_driver(&pyra_driver);
+	if (retval)
+		class_destroy(pyra_class);
+	return retval;
 }
 
 static void __exit pyra_exit(void)
 {
+	class_destroy(pyra_class);
 	hid_unregister_driver(&pyra_driver);
 }
 
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index 22f80a8..14cbbe1 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -14,14 +14,11 @@
 
 #include <linux/types.h>
 
-#pragma pack(push)
-#pragma pack(1)
-
 struct pyra_b {
 	uint8_t command; /* PYRA_COMMAND_B */
 	uint8_t size; /* always 3 */
 	uint8_t unknown; /* 1 */
-};
+} __attribute__ ((__packed__));
 
 struct pyra_control {
 	uint8_t command; /* PYRA_COMMAND_CONTROL */
@@ -31,7 +28,7 @@
 	 */
 	uint8_t value; /* Range 0-4 */
 	uint8_t request;
-};
+} __attribute__ ((__packed__));
 
 enum pyra_control_requests {
 	PYRA_CONTROL_REQUEST_STATUS = 0x00,
@@ -43,7 +40,7 @@
 	uint8_t command; /* PYRA_COMMAND_SETTINGS */
 	uint8_t size; /* always 3 */
 	uint8_t startup_profile; /* Range 0-4! */
-};
+} __attribute__ ((__packed__));
 
 struct pyra_profile_settings {
 	uint8_t command; /* PYRA_COMMAND_PROFILE_SETTINGS */
@@ -58,7 +55,7 @@
 	uint8_t light_effect;
 	uint8_t handedness;
 	uint16_t checksum; /* byte sum */
-};
+} __attribute__ ((__packed__));
 
 struct pyra_profile_buttons {
 	uint8_t command; /* PYRA_COMMAND_PROFILE_BUTTONS */
@@ -66,7 +63,7 @@
 	uint8_t number; /* Range 0-4 */
 	uint8_t buttons[14];
 	uint16_t checksum; /* byte sum */
-};
+} __attribute__ ((__packed__));
 
 struct pyra_info {
 	uint8_t command; /* PYRA_COMMAND_INFO */
@@ -75,7 +72,7 @@
 	uint8_t unknown1; /* always 0 */
 	uint8_t unknown2; /* always 1 */
 	uint8_t unknown3; /* always 0 */
-};
+} __attribute__ ((__packed__));
 
 enum pyra_commands {
 	PYRA_COMMAND_CONTROL = 0x4,
@@ -107,13 +104,13 @@
 	uint8_t type;
 	uint8_t data1;
 	uint8_t data2;
-};
+} __attribute__ ((__packed__));
 
 struct pyra_mouse_event_audio {
 	uint8_t report_number; /* always 2 */
 	uint8_t type;
 	uint8_t unused; /* always 0 */
-};
+} __attribute__ ((__packed__));
 
 /* hid audio controls */
 enum pyra_mouse_event_audio_types {
@@ -167,9 +164,7 @@
 	uint8_t type;
 	uint8_t value;
 	uint8_t key;
-};
-
-#pragma pack(pop)
+} __attribute__ ((__packed__));
 
 struct pyra_device {
 	int actual_profile;
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 5a6879e23..a14c579 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -21,6 +21,8 @@
  * It is inspired by hidraw, but uses only one circular buffer for all readers.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/sched.h>
@@ -65,7 +67,6 @@
 };
 
 static int roccat_major;
-static struct class *roccat_class;
 static struct cdev roccat_cdev;
 
 static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
@@ -165,27 +166,22 @@
 	mutex_lock(&device->readers_lock);
 
 	if (!device) {
-		printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
-				minor);
+		pr_emerg("roccat device with minor %d doesn't exist\n", minor);
 		error = -ENODEV;
 		goto exit_err;
 	}
 
 	if (!device->open++) {
 		/* power on device on adding first reader */
-		if (device->hid->ll_driver->power) {
-			error = device->hid->ll_driver->power(device->hid,
-					PM_HINT_FULLON);
-			if (error < 0) {
-				--device->open;
-				goto exit_err;
-			}
-		}
-		error = device->hid->ll_driver->open(device->hid);
+		error = hid_hw_power(device->hid, PM_HINT_FULLON);
 		if (error < 0) {
-			if (device->hid->ll_driver->power)
-				device->hid->ll_driver->power(device->hid,
-						PM_HINT_NORMAL);
+			--device->open;
+			goto exit_err;
+		}
+
+		error = hid_hw_open(device->hid);
+		if (error < 0) {
+			hid_hw_power(device->hid, PM_HINT_NORMAL);
 			--device->open;
 			goto exit_err;
 		}
@@ -218,8 +214,7 @@
 	device = devices[minor];
 	if (!device) {
 		mutex_unlock(&devices_lock);
-		printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
-				minor);
+		pr_emerg("roccat device with minor %d doesn't exist\n", minor);
 		return -ENODEV;
 	}
 
@@ -231,10 +226,8 @@
 	if (!--device->open) {
 		/* removing last reader */
 		if (device->exist) {
-			if (device->hid->ll_driver->power)
-				device->hid->ll_driver->power(device->hid,
-						PM_HINT_NORMAL);
-			device->hid->ll_driver->close(device->hid);
+			hid_hw_power(device->hid, PM_HINT_NORMAL);
+			hid_hw_close(device->hid);
 		} else {
 			kfree(device);
 		}
@@ -295,12 +288,14 @@
 
 /*
  * roccat_connect() - create a char device for special event output
+ * @class: the class thats used to create the device. Meant to hold device
+ * specific sysfs attributes.
  * @hid: the hid device the char device should be connected to.
  *
  * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
  * success, a negative error code on failure.
  */
-int roccat_connect(struct hid_device *hid)
+int roccat_connect(struct class *klass, struct hid_device *hid)
 {
 	unsigned int minor;
 	struct roccat_device *device;
@@ -326,7 +321,7 @@
 		return -EINVAL;
 	}
 
-	device->dev = device_create(roccat_class, &hid->dev,
+	device->dev = device_create(klass, &hid->dev,
 			MKDEV(roccat_major, minor), NULL,
 			"%s%s%d", "roccat", hid->driver->name, minor);
 
@@ -367,10 +362,10 @@
 
 	device->exist = 0; /* TODO exist maybe not needed */
 
-	device_destroy(roccat_class, MKDEV(roccat_major, minor));
+	device_destroy(device->dev->class, MKDEV(roccat_major, minor));
 
 	if (device->open) {
-		device->hid->ll_driver->close(device->hid);
+		hid_hw_close(device->hid);
 		wake_up_interruptible(&device->wait);
 	} else {
 		kfree(device);
@@ -398,14 +393,7 @@
 	roccat_major = MAJOR(dev_id);
 
 	if (retval < 0) {
-		printk(KERN_WARNING "roccat: can't get major number\n");
-		return retval;
-	}
-
-	roccat_class = class_create(THIS_MODULE, "roccat");
-	if (IS_ERR(roccat_class)) {
-		retval = PTR_ERR(roccat_class);
-		unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
+		pr_warn("can't get major number\n");
 		return retval;
 	}
 
@@ -420,7 +408,6 @@
 	dev_t dev_id = MKDEV(roccat_major, 0);
 
 	cdev_del(&roccat_cdev);
-	class_destroy(roccat_class);
 	unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
 }
 
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
index 09e864e..5784281 100644
--- a/drivers/hid/hid-roccat.h
+++ b/drivers/hid/hid-roccat.h
@@ -16,11 +16,12 @@
 #include <linux/types.h>
 
 #if defined(CONFIG_HID_ROCCAT) || defined(CONFIG_HID_ROCCAT_MODULE)
-int roccat_connect(struct hid_device *hid);
+int roccat_connect(struct class *klass, struct hid_device *hid);
 void roccat_disconnect(int minor);
 int roccat_report_event(int minor, u8 const *data, int len);
 #else
-static inline int roccat_connect(struct hid_device *hid) { return -1; }
+static inline int roccat_connect(struct class *klass,
+		struct hid_device *hid) { return -1; }
 static inline void roccat_disconnect(int minor) {}
 static inline int roccat_report_event(int minor, u8 const *data, int len)
 {
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 3589444..3c1fd8a 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -57,8 +57,8 @@
 static inline void samsung_irda_dev_trace(struct hid_device *hdev,
 		unsigned int rsize)
 {
-	dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
-			"descriptor\n", rsize);
+	hid_info(hdev, "fixing up Samsung IrDA %d byte report descriptor\n",
+		 rsize);
 }
 
 static __u8 *samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -160,7 +160,7 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
@@ -174,7 +174,7 @@
 
 	ret = hid_hw_start(hdev, cmask);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-sjoy.c b/drivers/hid/hid-sjoy.c
index e10a768..16f7caf 100644
--- a/drivers/hid/hid-sjoy.c
+++ b/drivers/hid/hid-sjoy.c
@@ -74,26 +74,25 @@
 	int error;
 
 	if (list_empty(report_list)) {
-		dev_err(&hid->dev, "no output reports found\n");
+		hid_err(hid, "no output reports found\n");
 		return -ENODEV;
 	}
 
 	report_ptr = report_ptr->next;
 
 	if (report_ptr == report_list) {
-		dev_err(&hid->dev, "required output report is "
-				"missing\n");
+		hid_err(hid, "required output report is missing\n");
 		return -ENODEV;
 	}
 
 	report = list_entry(report_ptr, struct hid_report, list);
 	if (report->maxfield < 1) {
-		dev_err(&hid->dev, "no fields in the report\n");
+		hid_err(hid, "no fields in the report\n");
 		return -ENODEV;
 	}
 
 	if (report->field[0]->report_count < 3) {
-		dev_err(&hid->dev, "not enough values in the field\n");
+		hid_err(hid, "not enough values in the field\n");
 		return -ENODEV;
 	}
 
@@ -117,8 +116,7 @@
 	sjoyff->report->field[0]->value[2] = 0x00;
 	usbhid_submit_report(hid, sjoyff->report, USB_DIR_OUT);
 
-	dev_info(&hid->dev,
-		"Force feedback for SmartJoy PLUS PS2/USB adapter\n");
+	hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n");
 
 	return 0;
 }
@@ -135,13 +133,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err;
 	}
 
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 677bb3d..68d7b36 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -40,8 +40,7 @@
 
 	if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
 			*rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
-		dev_info(&hdev->dev, "Fixing up Sony Vaio VGX report "
-				"descriptor\n");
+		hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
 		rdesc[55] = 0x06;
 	}
 	return rdesc;
@@ -89,7 +88,7 @@
 				 (3 << 8) | 0xf2, ifnum, buf, 17,
 				 USB_CTRL_GET_TIMEOUT);
 	if (ret < 0)
-		dev_err(&hdev->dev, "can't set operational mode\n");
+		hid_err(hdev, "can't set operational mode\n");
 
 	kfree(buf);
 
@@ -110,7 +109,7 @@
 
 	sc = kzalloc(sizeof(*sc), GFP_KERNEL);
 	if (sc == NULL) {
-		dev_err(&hdev->dev, "can't alloc sony descriptor\n");
+		hid_err(hdev, "can't alloc sony descriptor\n");
 		return -ENOMEM;
 	}
 
@@ -119,14 +118,14 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
 			HID_CONNECT_HIDDEV_FORCE);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c
index 3171be2..b2be1d1 100644
--- a/drivers/hid/hid-stantum.c
+++ b/drivers/hid/hid-stantum.c
@@ -222,7 +222,7 @@
 
 	sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL);
 	if (!sd) {
-		dev_err(&hdev->dev, "cannot allocate Stantum data\n");
+		hid_err(hdev, "cannot allocate Stantum data\n");
 		return -ENOMEM;
 	}
 	sd->valid = false;
diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c
index 164ed56..d484a00 100644
--- a/drivers/hid/hid-sunplus.c
+++ b/drivers/hid/hid-sunplus.c
@@ -27,8 +27,7 @@
 {
 	if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
 			rdesc[106] == 0x03) {
-		dev_info(&hdev->dev, "fixing up Sunplus Wireless Desktop "
-				"report descriptor\n");
+		hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
 		rdesc[105] = rdesc[110] = 0x03;
 		rdesc[106] = rdesc[111] = 0x21;
 	}
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 25be4e1..575862b 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -151,28 +151,23 @@
 			switch (field->usage[0].hid) {
 			case THRUSTMASTER_USAGE_FF:
 				if (field->report_count < 2) {
-					dev_warn(&hid->dev, "ignoring FF field "
-						"with report_count < 2\n");
+					hid_warn(hid, "ignoring FF field with report_count < 2\n");
 					continue;
 				}
 
 				if (field->logical_maximum ==
 						field->logical_minimum) {
-					dev_warn(&hid->dev, "ignoring FF field "
-							"with logical_maximum "
-							"== logical_minimum\n");
+					hid_warn(hid, "ignoring FF field with logical_maximum == logical_minimum\n");
 					continue;
 				}
 
 				if (tmff->report && tmff->report != report) {
-					dev_warn(&hid->dev, "ignoring FF field "
-							"in other report\n");
+					hid_warn(hid, "ignoring FF field in other report\n");
 					continue;
 				}
 
 				if (tmff->ff_field && tmff->ff_field != field) {
-					dev_warn(&hid->dev, "ignoring "
-							"duplicate FF field\n");
+					hid_warn(hid, "ignoring duplicate FF field\n");
 					continue;
 				}
 
@@ -185,16 +180,15 @@
 				break;
 
 			default:
-				dev_warn(&hid->dev, "ignoring unknown output "
-						"usage %08x\n",
-						field->usage[0].hid);
+				hid_warn(hid, "ignoring unknown output usage %08x\n",
+					 field->usage[0].hid);
 				continue;
 			}
 		}
 	}
 
 	if (!tmff->report) {
-		dev_err(&hid->dev, "can't find FF field in output reports\n");
+		hid_err(hid, "can't find FF field in output reports\n");
 		error = -ENODEV;
 		goto fail;
 	}
@@ -203,8 +197,7 @@
 	if (error)
 		goto fail;
 
-	dev_info(&hid->dev, "force feedback for ThrustMaster devices by Zinx "
-			"Verituse <zinx@epicsol.org>");
+	hid_info(hid, "force feedback for ThrustMaster devices by Zinx Verituse <zinx@epicsol.org>\n");
 	return 0;
 
 fail:
@@ -224,13 +217,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err;
 	}
 
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 956ed9a..613ff7b 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -66,6 +66,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ts_devices);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 724f46e..0688832 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -18,6 +18,8 @@
  * any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/hid.h>
 #include <linux/module.h>
@@ -141,8 +143,8 @@
 	 * Note that if the raw queries fail, it's not a hard failure and it
 	 * is safe to continue
 	 */
-	dev_warn(&hdev->dev, "failed to poke device, command %d, err %d\n",
-				rep_data[0], ret);
+	hid_warn(hdev, "failed to poke device, command %d, err %d\n",
+		 rep_data[0], ret);
 	return;
 }
 
@@ -172,7 +174,7 @@
 		return -EINVAL;
 }
 
-static DEVICE_ATTR(speed, S_IRUGO | S_IWUGO,
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
 		wacom_show_speed, wacom_store_speed);
 
 static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
@@ -312,7 +314,7 @@
 
 	wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
 	if (wdata == NULL) {
-		dev_err(&hdev->dev, "can't alloc wacom descriptor\n");
+		hid_err(hdev, "can't alloc wacom descriptor\n");
 		return -ENOMEM;
 	}
 
@@ -321,20 +323,20 @@
 	/* Parse the HID report now */
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
 	ret = device_create_file(&hdev->dev, &dev_attr_speed);
 	if (ret)
-		dev_warn(&hdev->dev,
-			"can't create sysfs speed attribute err: %d\n", ret);
+		hid_warn(hdev,
+			 "can't create sysfs speed attribute err: %d\n", ret);
 
 	/* Set Wacom mode 2 with high reporting speed */
 	wacom_poke(hdev, 1);
@@ -349,8 +351,8 @@
 
 	ret = power_supply_register(&hdev->dev, &wdata->battery);
 	if (ret) {
-		dev_warn(&hdev->dev,
-			"can't create sysfs battery attribute, err: %d\n", ret);
+		hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
+			 ret);
 		/*
 		 * battery attribute is not critical for the tablet, but if it
 		 * failed then there is no need to create ac attribute
@@ -367,8 +369,8 @@
 
 	ret = power_supply_register(&hdev->dev, &wdata->ac);
 	if (ret) {
-		dev_warn(&hdev->dev,
-			"can't create ac battery attribute, err: %d\n", ret);
+		hid_warn(hdev,
+			 "can't create ac battery attribute, err: %d\n", ret);
 		/*
 		 * ac attribute is not critical for the tablet, but if it
 		 * failed then we don't want to battery attribute to exist
@@ -454,7 +456,7 @@
 
 	ret = hid_register_driver(&wacom_driver);
 	if (ret)
-		printk(KERN_ERR "can't register wacom driver\n");
+		pr_err("can't register wacom driver\n");
 	return ret;
 }
 
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index b7accea..f31fab0 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -75,14 +75,14 @@
 	int error;
 
 	if (list_empty(report_list)) {
-		dev_err(&hid->dev, "no output report found\n");
+		hid_err(hid, "no output report found\n");
 		return -ENODEV;
 	}
 
 	report = list_entry(report_list->next, struct hid_report, list);
 
 	if (report->maxfield < 4) {
-		dev_err(&hid->dev, "not enough fields in report\n");
+		hid_err(hid, "not enough fields in report\n");
 		return -ENODEV;
 	}
 
@@ -105,8 +105,7 @@
 	zpff->report->field[3]->value[0] = 0x00;
 	usbhid_submit_report(hid, zpff->report, USB_DIR_OUT);
 
-	dev_info(&hid->dev, "force feedback for Zeroplus based devices by "
-	       "Anssi Hannula <anssi.hannula@gmail.com>\n");
+	hid_info(hid, "force feedback for Zeroplus based devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
 
 	return 0;
 }
@@ -123,13 +122,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
 	if (ret) {
-		dev_err(&hdev->dev, "hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err;
 	}
 
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index aac1f92..e903715 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -34,9 +34,8 @@
 		rdesc[0x96] == 0xbc && rdesc[0x97] == 0xff &&
 		rdesc[0xca] == 0xbc && rdesc[0xcb] == 0xff &&
 		rdesc[0xe1] == 0xbc && rdesc[0xe2] == 0xff) {
-			dev_info(&hdev->dev,
-				"fixing up zydacron remote control report "
-				"descriptor\n");
+			hid_info(hdev,
+				"fixing up zydacron remote control report descriptor\n");
 			rdesc[0x96] = rdesc[0xca] = rdesc[0xe1] = 0x0c;
 			rdesc[0x97] = rdesc[0xcb] = rdesc[0xe2] = 0x00;
 		}
@@ -172,7 +171,7 @@
 
 	zc = kzalloc(sizeof(*zc), GFP_KERNEL);
 	if (zc == NULL) {
-		dev_err(&hdev->dev, "zydacron: can't alloc descriptor\n");
+		hid_err(hdev, "can't alloc descriptor\n");
 		return -ENOMEM;
 	}
 
@@ -180,13 +179,13 @@
 
 	ret = hid_parse(hdev);
 	if (ret) {
-		dev_err(&hdev->dev, "zydacron: parse failed\n");
+		hid_err(hdev, "parse failed\n");
 		goto err_free;
 	}
 
 	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 	if (ret) {
-		dev_err(&hdev->dev, "zydacron: hw start failed\n");
+		hid_err(hdev, "hw start failed\n");
 		goto err_free;
 	}
 
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index e1f0748..468e87b 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -19,6 +19,8 @@
  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/fs.h>
 #include <linux/module.h>
 #include <linux/errno.h>
@@ -122,15 +124,15 @@
 	}
 
 	if (count > HID_MAX_BUFFER_SIZE) {
-		printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
-				task_pid_nr(current));
+		hid_warn(dev, "pid %d passed too large report\n",
+			 task_pid_nr(current));
 		ret = -EINVAL;
 		goto out;
 	}
 
 	if (count < 2) {
-		printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
-				task_pid_nr(current));
+		hid_warn(dev, "pid %d passed too short report\n",
+			 task_pid_nr(current));
 		ret = -EINVAL;
 		goto out;
 	}
@@ -192,15 +194,13 @@
 
 	dev = hidraw_table[minor];
 	if (!dev->open++) {
-		if (dev->hid->ll_driver->power) {
-			err = dev->hid->ll_driver->power(dev->hid, PM_HINT_FULLON);
-			if (err < 0)
-				goto out_unlock;
-		}
-		err = dev->hid->ll_driver->open(dev->hid);
+		err = hid_hw_power(dev->hid, PM_HINT_FULLON);
+		if (err < 0)
+			goto out_unlock;
+
+		err = hid_hw_open(dev->hid);
 		if (err < 0) {
-			if (dev->hid->ll_driver->power)
-				dev->hid->ll_driver->power(dev->hid, PM_HINT_NORMAL);
+			hid_hw_power(dev->hid, PM_HINT_NORMAL);
 			dev->open--;
 		}
 	}
@@ -229,9 +229,8 @@
 	dev = hidraw_table[minor];
 	if (!--dev->open) {
 		if (list->hidraw->exist) {
-			if (dev->hid->ll_driver->power)
-				dev->hid->ll_driver->power(dev->hid, PM_HINT_NORMAL);
-			dev->hid->ll_driver->close(dev->hid);
+			hid_hw_power(dev->hid, PM_HINT_NORMAL);
+			hid_hw_close(dev->hid);
 		} else {
 			kfree(list->hidraw);
 		}
@@ -345,6 +344,9 @@
 	.open =         hidraw_open,
 	.release =      hidraw_release,
 	.unlocked_ioctl = hidraw_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = hidraw_ioctl,
+#endif
 	.llseek =	noop_llseek,
 };
 
@@ -433,7 +435,7 @@
 	device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
 
 	if (hidraw->open) {
-		hid->ll_driver->close(hid);
+		hid_hw_close(hid);
 		wake_up_interruptible(&hidraw->wait);
 	} else {
 		kfree(hidraw);
@@ -452,7 +454,7 @@
 	hidraw_major = MAJOR(dev_id);
 
 	if (result < 0) {
-		printk(KERN_WARNING "hidraw: can't get major number\n");
+		pr_warn("can't get major number\n");
 		result = 0;
 		goto out;
 	}
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 1329ecb..db3cf31 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -3,15 +3,15 @@
 #
 
 # Multipart objects.
-usbhid-objs	:= hid-core.o hid-quirks.o
+usbhid-y	:= hid-core.o hid-quirks.o
 
 # Optional parts of multipart objects.
 
 ifeq ($(CONFIG_USB_HIDDEV),y)
-	usbhid-objs	+= hiddev.o
+	usbhid-y	+= hiddev.o
 endif
 ifeq ($(CONFIG_HID_PID),y)
-	usbhid-objs	+= hid-pidff.o
+	usbhid-y	+= hid-pidff.o
 endif
 
 obj-$(CONFIG_USB_HID)		+= usbhid.o
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 5489eab..b336dd8 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -67,7 +67,6 @@
  * Input submission and I/O error handler.
  */
 static DEFINE_MUTEX(hid_open_mut);
-static struct workqueue_struct *resumption_waker;
 
 static void hid_io_error(struct hid_device *hid);
 static int hid_submit_out(struct hid_device *hid);
@@ -136,10 +135,10 @@
 			hid_io_error(hid);
 		break;
 	default:
-		err_hid("can't reset device, %s-%s/input%d, status %d",
-				hid_to_usb_dev(hid)->bus->bus_name,
-				hid_to_usb_dev(hid)->devpath,
-				usbhid->ifnum, rc);
+		hid_err(hid, "can't reset device, %s-%s/input%d, status %d\n",
+			hid_to_usb_dev(hid)->bus->bus_name,
+			hid_to_usb_dev(hid)->devpath,
+			usbhid->ifnum, rc);
 		/* FALLTHROUGH */
 	case -EHOSTUNREACH:
 	case -ENODEV:
@@ -278,18 +277,18 @@
 		hid_io_error(hid);
 		return;
 	default:		/* error */
-		dev_warn(&urb->dev->dev, "input irq status %d  "
-				"received\n", urb->status);
+		hid_warn(urb->dev, "input irq status %d received\n",
+			 urb->status);
 	}
 
 	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status) {
 		clear_bit(HID_IN_RUNNING, &usbhid->iofl);
 		if (status != -EPERM) {
-			err_hid("can't resubmit intr, %s-%s/input%d, status %d",
-					hid_to_usb_dev(hid)->bus->bus_name,
-					hid_to_usb_dev(hid)->devpath,
-					usbhid->ifnum, status);
+			hid_err(hid, "can't resubmit intr, %s-%s/input%d, status %d\n",
+				hid_to_usb_dev(hid)->bus->bus_name,
+				hid_to_usb_dev(hid)->devpath,
+				usbhid->ifnum, status);
 			hid_io_error(hid);
 		}
 	}
@@ -300,10 +299,19 @@
 	struct hid_report *report;
 	char *raw_report;
 	struct usbhid_device *usbhid = hid->driver_data;
+	int r;
 
 	report = usbhid->out[usbhid->outtail].report;
 	raw_report = usbhid->out[usbhid->outtail].raw_report;
 
+	r = usb_autopm_get_interface_async(usbhid->intf);
+	if (r < 0)
+		return -1;
+
+	/*
+	 * if the device hasn't been woken, we leave the output
+	 * to resume()
+	 */
 	if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
 		usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
 		usbhid->urbout->dev = hid_to_usb_dev(hid);
@@ -313,17 +321,11 @@
 		dbg_hid("submitting out urb\n");
 
 		if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
-			err_hid("usb_submit_urb(out) failed");
+			hid_err(hid, "usb_submit_urb(out) failed\n");
+			usb_autopm_put_interface_async(usbhid->intf);
 			return -1;
 		}
 		usbhid->last_out = jiffies;
-	} else {
-		/*
-		 * queue work to wake up the device.
-		 * as the work queue is freezeable, this is safe
-		 * with respect to STD and STR
-		 */
-		queue_work(resumption_waker, &usbhid->restart_work);
 	}
 
 	return 0;
@@ -334,13 +336,16 @@
 	struct hid_report *report;
 	unsigned char dir;
 	char *raw_report;
-	int len;
+	int len, r;
 	struct usbhid_device *usbhid = hid->driver_data;
 
 	report = usbhid->ctrl[usbhid->ctrltail].report;
 	raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
 	dir = usbhid->ctrl[usbhid->ctrltail].dir;
 
+	r = usb_autopm_get_interface_async(usbhid->intf);
+	if (r < 0)
+		return -1;
 	if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
 		len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
 		if (dir == USB_DIR_OUT) {
@@ -375,17 +380,11 @@
 			usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
 
 		if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
-			err_hid("usb_submit_urb(ctrl) failed");
+			usb_autopm_put_interface_async(usbhid->intf);
+			hid_err(hid, "usb_submit_urb(ctrl) failed\n");
 			return -1;
 		}
 		usbhid->last_ctrl = jiffies;
-	} else {
-		/*
-		 * queue work to wake up the device.
-		 * as the work queue is freezeable, this is safe
-		 * with respect to STD and STR
-		 */
-		queue_work(resumption_waker, &usbhid->restart_work);
 	}
 
 	return 0;
@@ -413,8 +412,8 @@
 	case -ENOENT:
 		break;
 	default:		/* error */
-		dev_warn(&urb->dev->dev, "output irq status %d "
-				"received\n", urb->status);
+		hid_warn(urb->dev, "output irq status %d received\n",
+			 urb->status);
 	}
 
 	spin_lock_irqsave(&usbhid->lock, flags);
@@ -435,6 +434,7 @@
 
 	clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
 	spin_unlock_irqrestore(&usbhid->lock, flags);
+	usb_autopm_put_interface_async(usbhid->intf);
 	wake_up(&usbhid->wait);
 }
 
@@ -466,8 +466,7 @@
 	case -EPIPE:		/* report not available */
 		break;
 	default:		/* error */
-		dev_warn(&urb->dev->dev, "ctrl urb status %d "
-				"received\n", status);
+		hid_warn(urb->dev, "ctrl urb status %d received\n", status);
 	}
 
 	if (unplug)
@@ -481,11 +480,13 @@
 			wake_up(&usbhid->wait);
 		}
 		spin_unlock(&usbhid->lock);
+		usb_autopm_put_interface_async(usbhid->intf);
 		return;
 	}
 
 	clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
 	spin_unlock(&usbhid->lock);
+	usb_autopm_put_interface_async(usbhid->intf);
 	wake_up(&usbhid->wait);
 }
 
@@ -501,13 +502,13 @@
 
 	if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) {
 		if ((head = (usbhid->outhead + 1) & (HID_OUTPUT_FIFO_SIZE - 1)) == usbhid->outtail) {
-			dev_warn(&hid->dev, "output queue full\n");
+			hid_warn(hid, "output queue full\n");
 			return;
 		}
 
 		usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC);
 		if (!usbhid->out[usbhid->outhead].raw_report) {
-			dev_warn(&hid->dev, "output queueing failed\n");
+			hid_warn(hid, "output queueing failed\n");
 			return;
 		}
 		hid_output_report(report, usbhid->out[usbhid->outhead].raw_report);
@@ -532,14 +533,14 @@
 	}
 
 	if ((head = (usbhid->ctrlhead + 1) & (HID_CONTROL_FIFO_SIZE - 1)) == usbhid->ctrltail) {
-		dev_warn(&hid->dev, "control queue full\n");
+		hid_warn(hid, "control queue full\n");
 		return;
 	}
 
 	if (dir == USB_DIR_OUT) {
 		usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC);
 		if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) {
-			dev_warn(&hid->dev, "control queueing failed\n");
+			hid_warn(hid, "control queueing failed\n");
 			return;
 		}
 		hid_output_report(report, usbhid->ctrl[usbhid->ctrlhead].raw_report);
@@ -590,7 +591,7 @@
 		return -1;
 
 	if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
-		dev_warn(&dev->dev, "event field not found\n");
+		hid_warn(dev, "event field not found\n");
 		return -1;
 	}
 
@@ -656,7 +657,7 @@
 	mutex_lock(&hid_open_mut);
 	if (!hid->open++) {
 		res = usb_autopm_get_interface(usbhid->intf);
-		/* the device must be awake to reliable request remote wakeup */
+		/* the device must be awake to reliably request remote wakeup */
 		if (res < 0) {
 			hid->open--;
 			mutex_unlock(&hid_open_mut);
@@ -722,7 +723,7 @@
 	}
 
 	if (err)
-		dev_warn(&hid->dev, "timeout initializing reports\n");
+		hid_warn(hid, "timeout initializing reports\n");
 }
 
 /*
@@ -857,18 +858,6 @@
 	usbhid_restart_ctrl_queue(usbhid);
 }
 
-static void __usbhid_restart_queues(struct work_struct *work)
-{
-	struct usbhid_device *usbhid =
-		container_of(work, struct usbhid_device, restart_work);
-	int r;
-
-	r = usb_autopm_get_interface(usbhid->intf);
-	if (r < 0)
-		return;
-	usb_autopm_put_interface(usbhid->intf);
-}
-
 static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
 {
 	struct usbhid_device *usbhid = hid->driver_data;
@@ -1140,8 +1129,7 @@
 		if (usb_endpoint_is_int_in(&interface->endpoint[n].desc))
 			has_in++;
 	if (!has_in) {
-		dev_err(&intf->dev, "couldn't find an input interrupt "
-				"endpoint\n");
+		hid_err(intf, "couldn't find an input interrupt endpoint\n");
 		return -ENODEV;
 	}
 
@@ -1206,14 +1194,13 @@
 
 	init_waitqueue_head(&usbhid->wait);
 	INIT_WORK(&usbhid->reset_work, hid_reset);
-	INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
 	setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
 	spin_lock_init(&usbhid->lock);
 
 	ret = hid_add_device(hid);
 	if (ret) {
 		if (ret != -ENODEV)
-			dev_err(&intf->dev, "can't add hid device: %d\n", ret);
+			hid_err(intf, "can't add hid device: %d\n", ret);
 		goto err_free;
 	}
 
@@ -1241,7 +1228,6 @@
 static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
 {
 	del_timer_sync(&usbhid->io_retry);
-	cancel_work_sync(&usbhid->restart_work);
 	cancel_work_sync(&usbhid->reset_work);
 }
 
@@ -1262,7 +1248,6 @@
 	spin_lock_irq(&usbhid->lock);
 	set_bit(HID_RESET_PENDING, &usbhid->iofl);
 	spin_unlock_irq(&usbhid->lock);
-	cancel_work_sync(&usbhid->restart_work);
 	hid_cease_io(usbhid);
 
 	return 0;
@@ -1461,9 +1446,6 @@
 {
 	int retval = -ENOMEM;
 
-	resumption_waker = create_freezeable_workqueue("usbhid_resumer");
-	if (!resumption_waker)
-		goto no_queue;
 	retval = hid_register_driver(&hid_usb_driver);
 	if (retval)
 		goto hid_register_fail;
@@ -1481,8 +1463,6 @@
 usbhid_quirks_init_fail:
 	hid_unregister_driver(&hid_usb_driver);
 hid_register_fail:
-	destroy_workqueue(resumption_waker);
-no_queue:
 	return retval;
 }
 
@@ -1491,7 +1471,6 @@
 	usb_deregister(&hid_driver);
 	usbhid_quirks_exit();
 	hid_unregister_driver(&hid_usb_driver);
-	destroy_workqueue(resumption_waker);
 }
 
 module_init(hid_init);
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index ef381d7..f91c136 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -22,7 +22,7 @@
 
 /* #define DEBUG */
 
-#define debug(format, arg...) pr_debug("hid-pidff: " format "\n" , ## arg)
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/input.h>
 #include <linux/slab.h>
@@ -220,7 +220,7 @@
 static void pidff_set(struct pidff_usage *usage, u16 value)
 {
 	usage->value[0] = pidff_rescale(value, 0xffff, usage->field);
-	debug("calculated from %d to %d", value, usage->value[0]);
+	pr_debug("calculated from %d to %d\n", value, usage->value[0]);
 }
 
 static void pidff_set_signed(struct pidff_usage *usage, s16 value)
@@ -235,7 +235,7 @@
 			usage->value[0] =
 			    pidff_rescale(value, 0x7fff, usage->field);
 	}
-	debug("calculated from %d to %d", value, usage->value[0]);
+	pr_debug("calculated from %d to %d\n", value, usage->value[0]);
 }
 
 /*
@@ -259,8 +259,9 @@
 	pidff->set_envelope[PID_ATTACK_TIME].value[0] = envelope->attack_length;
 	pidff->set_envelope[PID_FADE_TIME].value[0] = envelope->fade_length;
 
-	debug("attack %u => %d", envelope->attack_level,
-	      pidff->set_envelope[PID_ATTACK_LEVEL].value[0]);
+	hid_dbg(pidff->hid, "attack %u => %d\n",
+		envelope->attack_level,
+		pidff->set_envelope[PID_ATTACK_LEVEL].value[0]);
 
 	usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_ENVELOPE],
 			  USB_DIR_OUT);
@@ -466,33 +467,33 @@
 	pidff->create_new_effect_type->value[0] = efnum;
 	usbhid_submit_report(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT],
 			  USB_DIR_OUT);
-	debug("create_new_effect sent, type: %d", efnum);
+	hid_dbg(pidff->hid, "create_new_effect sent, type: %d\n", efnum);
 
 	pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] = 0;
 	pidff->block_load_status->value[0] = 0;
 	usbhid_wait_io(pidff->hid);
 
 	for (j = 0; j < 60; j++) {
-		debug("pid_block_load requested");
+		hid_dbg(pidff->hid, "pid_block_load requested\n");
 		usbhid_submit_report(pidff->hid, pidff->reports[PID_BLOCK_LOAD],
 				  USB_DIR_IN);
 		usbhid_wait_io(pidff->hid);
 		if (pidff->block_load_status->value[0] ==
 		    pidff->status_id[PID_BLOCK_LOAD_SUCCESS]) {
-			debug("device reported free memory: %d bytes",
-			      pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
-				pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
+			hid_dbg(pidff->hid, "device reported free memory: %d bytes\n",
+				 pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
+				 pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
 			return 0;
 		}
 		if (pidff->block_load_status->value[0] ==
 		    pidff->status_id[PID_BLOCK_LOAD_FULL]) {
-			debug("not enough memory free: %d bytes",
-			      pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
+			hid_dbg(pidff->hid, "not enough memory free: %d bytes\n",
+				pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
 				pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
 			return -ENOSPC;
 		}
 	}
-	printk(KERN_ERR "hid-pidff: pid_block_load failed 60 times\n");
+	hid_err(pidff->hid, "pid_block_load failed 60 times\n");
 	return -EIO;
 }
 
@@ -546,7 +547,8 @@
 	struct pidff_device *pidff = dev->ff->private;
 	int pid_id = pidff->pid_id[effect_id];
 
-	debug("starting to erase %d/%d", effect_id, pidff->pid_id[effect_id]);
+	hid_dbg(pidff->hid, "starting to erase %d/%d\n",
+		effect_id, pidff->pid_id[effect_id]);
 	/* Wait for the queue to clear. We do not want a full fifo to
 	   prevent the effect removal. */
 	usbhid_wait_io(pidff->hid);
@@ -604,8 +606,7 @@
 				type_id = PID_SAW_DOWN;
 				break;
 			default:
-				printk(KERN_ERR
-				       "hid-pidff: invalid waveform\n");
+				hid_err(pidff->hid, "invalid waveform\n");
 				return -EINVAL;
 			}
 
@@ -696,7 +697,7 @@
 		break;
 
 	default:
-		printk(KERN_ERR "hid-pidff: invalid type\n");
+		hid_err(pidff->hid, "invalid type\n");
 		return -EINVAL;
 	}
 
@@ -704,7 +705,7 @@
 		pidff->pid_id[effect->id] =
 		    pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
 
-	debug("uploaded");
+	hid_dbg(pidff->hid, "uploaded\n");
 
 	return 0;
 }
@@ -770,14 +771,14 @@
 		for (i = 0; i < report->maxfield; i++) {
 			if (report->field[i]->maxusage !=
 			    report->field[i]->report_count) {
-				debug("maxusage and report_count do not match, "
-				      "skipping");
+				pr_debug("maxusage and report_count do not match, skipping\n");
 				continue;
 			}
 			for (j = 0; j < report->field[i]->maxusage; j++) {
 				if (report->field[i]->usage[j].hid ==
 				    (HID_UP_PID | table[k])) {
-					debug("found %d at %d->%d", k, i, j);
+					pr_debug("found %d at %d->%d\n",
+						 k, i, j);
 					usage[k].field = report->field[i];
 					usage[k].value =
 						&report->field[i]->value[j];
@@ -789,7 +790,7 @@
 				break;
 		}
 		if (!found && strict) {
-			debug("failed to locate %d", k);
+			pr_debug("failed to locate %d\n", k);
 			return -1;
 		}
 	}
@@ -826,8 +827,8 @@
 			continue;
 		ret = pidff_check_usage(report->field[0]->logical);
 		if (ret != -1) {
-			debug("found usage 0x%02x from field->logical",
-			      pidff_reports[ret]);
+			hid_dbg(hid, "found usage 0x%02x from field->logical\n",
+				pidff_reports[ret]);
 			pidff->reports[ret] = report;
 			continue;
 		}
@@ -845,8 +846,9 @@
 			continue;
 		ret = pidff_check_usage(hid->collection[i - 1].usage);
 		if (ret != -1 && !pidff->reports[ret]) {
-			debug("found usage 0x%02x from collection array",
-			      pidff_reports[ret]);
+			hid_dbg(hid,
+				"found usage 0x%02x from collection array\n",
+				pidff_reports[ret]);
 			pidff->reports[ret] = report;
 		}
 	}
@@ -861,7 +863,7 @@
 
 	for (i = 0; i <= PID_REQUIRED_REPORTS; i++) {
 		if (!pidff->reports[i]) {
-			debug("%d missing", i);
+			hid_dbg(pidff->hid, "%d missing\n", i);
 			return 0;
 		}
 	}
@@ -884,8 +886,7 @@
 			    report->field[i]->logical_minimum == 1)
 				return report->field[i];
 			else {
-				printk(KERN_ERR "hid-pidff: logical_minimum "
-					"is not 1 as it should be\n");
+				pr_err("logical_minimum is not 1 as it should be\n");
 				return NULL;
 			}
 		}
@@ -924,7 +925,7 @@
  */
 static int pidff_find_special_fields(struct pidff_device *pidff)
 {
-	debug("finding special fields");
+	hid_dbg(pidff->hid, "finding special fields\n");
 
 	pidff->create_new_effect_type =
 		pidff_find_special_field(pidff->reports[PID_CREATE_NEW_EFFECT],
@@ -945,32 +946,30 @@
 		pidff_find_special_field(pidff->reports[PID_EFFECT_OPERATION],
 					 0x78, 1);
 
-	debug("search done");
+	hid_dbg(pidff->hid, "search done\n");
 
 	if (!pidff->create_new_effect_type || !pidff->set_effect_type) {
-		printk(KERN_ERR "hid-pidff: effect lists not found\n");
+		hid_err(pidff->hid, "effect lists not found\n");
 		return -1;
 	}
 
 	if (!pidff->effect_direction) {
-		printk(KERN_ERR "hid-pidff: direction field not found\n");
+		hid_err(pidff->hid, "direction field not found\n");
 		return -1;
 	}
 
 	if (!pidff->device_control) {
-		printk(KERN_ERR "hid-pidff: device control field not found\n");
+		hid_err(pidff->hid, "device control field not found\n");
 		return -1;
 	}
 
 	if (!pidff->block_load_status) {
-		printk(KERN_ERR
-		       "hid-pidff: block load status field not found\n");
+		hid_err(pidff->hid, "block load status field not found\n");
 		return -1;
 	}
 
 	if (!pidff->effect_operation_status) {
-		printk(KERN_ERR
-		       "hid-pidff: effect operation field not found\n");
+		hid_err(pidff->hid, "effect operation field not found\n");
 		return -1;
 	}
 
@@ -982,23 +981,22 @@
 
 	if (!PIDFF_FIND_SPECIAL_KEYS(type_id, create_new_effect_type,
 				     effect_types)) {
-		printk(KERN_ERR "hid-pidff: no effect types found\n");
+		hid_err(pidff->hid, "no effect types found\n");
 		return -1;
 	}
 
 	if (PIDFF_FIND_SPECIAL_KEYS(status_id, block_load_status,
 				    block_load_status) !=
 			sizeof(pidff_block_load_status)) {
-		printk(KERN_ERR
-		       "hidpidff: block load status identifiers not found\n");
+		hid_err(pidff->hid,
+			"block load status identifiers not found\n");
 		return -1;
 	}
 
 	if (PIDFF_FIND_SPECIAL_KEYS(operation_id, effect_operation_status,
 				    effect_operation_status) !=
 			sizeof(pidff_effect_operation_status)) {
-		printk(KERN_ERR
-		       "hidpidff: effect operation identifiers not found\n");
+		hid_err(pidff->hid, "effect operation identifiers not found\n");
 		return -1;
 	}
 
@@ -1017,8 +1015,8 @@
 		int pidff_type = pidff->type_id[i];
 		if (pidff->set_effect_type->usage[pidff_type].hid !=
 		    pidff->create_new_effect_type->usage[pidff_type].hid) {
-			printk(KERN_ERR "hid-pidff: "
-			       "effect type number %d is invalid\n", i);
+			hid_err(pidff->hid,
+				"effect type number %d is invalid\n", i);
 			return -1;
 		}
 	}
@@ -1073,27 +1071,23 @@
 	int envelope_ok = 0;
 
 	if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
-		printk(KERN_ERR
-		       "hid-pidff: unknown set_effect report layout\n");
+		hid_err(pidff->hid, "unknown set_effect report layout\n");
 		return -ENODEV;
 	}
 
 	PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
 	if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
-		printk(KERN_ERR
-		       "hid-pidff: unknown pid_block_load report layout\n");
+		hid_err(pidff->hid, "unknown pid_block_load report layout\n");
 		return -ENODEV;
 	}
 
 	if (PIDFF_FIND_FIELDS(effect_operation, PID_EFFECT_OPERATION, 1)) {
-		printk(KERN_ERR
-		       "hid-pidff: unknown effect_operation report layout\n");
+		hid_err(pidff->hid, "unknown effect_operation report layout\n");
 		return -ENODEV;
 	}
 
 	if (PIDFF_FIND_FIELDS(block_free, PID_BLOCK_FREE, 1)) {
-		printk(KERN_ERR
-		       "hid-pidff: unknown pid_block_free report layout\n");
+		hid_err(pidff->hid, "unknown pid_block_free report layout\n");
 		return -ENODEV;
 	}
 
@@ -1105,27 +1099,26 @@
 
 	if (!envelope_ok) {
 		if (test_and_clear_bit(FF_CONSTANT, dev->ffbit))
-			printk(KERN_WARNING "hid-pidff: "
-			       "has constant effect but no envelope\n");
+			hid_warn(pidff->hid,
+				 "has constant effect but no envelope\n");
 		if (test_and_clear_bit(FF_RAMP, dev->ffbit))
-			printk(KERN_WARNING "hid-pidff: "
-				"has ramp effect but no envelope\n");
+			hid_warn(pidff->hid,
+				 "has ramp effect but no envelope\n");
 
 		if (test_and_clear_bit(FF_PERIODIC, dev->ffbit))
-			printk(KERN_WARNING "hid-pidff: "
-				"has periodic effect but no envelope\n");
+			hid_warn(pidff->hid,
+				 "has periodic effect but no envelope\n");
 	}
 
 	if (test_bit(FF_CONSTANT, dev->ffbit) &&
 	    PIDFF_FIND_FIELDS(set_constant, PID_SET_CONSTANT, 1)) {
-		printk(KERN_WARNING
-		       "hid-pidff: unknown constant effect layout\n");
+		hid_warn(pidff->hid, "unknown constant effect layout\n");
 		clear_bit(FF_CONSTANT, dev->ffbit);
 	}
 
 	if (test_bit(FF_RAMP, dev->ffbit) &&
 	    PIDFF_FIND_FIELDS(set_ramp, PID_SET_RAMP, 1)) {
-		printk(KERN_WARNING "hid-pidff: unknown ramp effect layout\n");
+		hid_warn(pidff->hid, "unknown ramp effect layout\n");
 		clear_bit(FF_RAMP, dev->ffbit);
 	}
 
@@ -1134,8 +1127,7 @@
 	     test_bit(FF_FRICTION, dev->ffbit) ||
 	     test_bit(FF_INERTIA, dev->ffbit)) &&
 	    PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
-		printk(KERN_WARNING
-		       "hid-pidff: unknown condition effect layout\n");
+		hid_warn(pidff->hid, "unknown condition effect layout\n");
 		clear_bit(FF_SPRING, dev->ffbit);
 		clear_bit(FF_DAMPER, dev->ffbit);
 		clear_bit(FF_FRICTION, dev->ffbit);
@@ -1144,8 +1136,7 @@
 
 	if (test_bit(FF_PERIODIC, dev->ffbit) &&
 	    PIDFF_FIND_FIELDS(set_periodic, PID_SET_PERIODIC, 1)) {
-		printk(KERN_WARNING
-		       "hid-pidff: unknown periodic effect layout\n");
+		hid_warn(pidff->hid, "unknown periodic effect layout\n");
 		clear_bit(FF_PERIODIC, dev->ffbit);
 	}
 
@@ -1184,12 +1175,12 @@
 	if (pidff->pool[PID_SIMULTANEOUS_MAX].value) {
 		while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) {
 			if (i++ > 20) {
-				printk(KERN_WARNING "hid-pidff: device reports "
-				       "%d simultaneous effects\n",
-				       pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
+				hid_warn(pidff->hid,
+					 "device reports %d simultaneous effects\n",
+					 pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
 				break;
 			}
-			debug("pid_pool requested again");
+			hid_dbg(pidff->hid, "pid_pool requested again\n");
 			usbhid_submit_report(hid, pidff->reports[PID_POOL],
 					  USB_DIR_IN);
 			usbhid_wait_io(hid);
@@ -1215,7 +1206,7 @@
 
 	error = pidff_request_effect_upload(pidff, 1);
 	if (error) {
-		printk(KERN_ERR "hid-pidff: upload request failed\n");
+		hid_err(pidff->hid, "upload request failed\n");
 		return error;
 	}
 
@@ -1224,8 +1215,8 @@
 		pidff_autocenter(pidff, 0xffff);
 		set_bit(FF_AUTOCENTER, dev->ffbit);
 	} else {
-		printk(KERN_NOTICE "hid-pidff: "
-		       "device has unknown autocenter control method\n");
+		hid_notice(pidff->hid,
+			   "device has unknown autocenter control method\n");
 	}
 
 	pidff_erase_pid(pidff,
@@ -1248,10 +1239,10 @@
 	int max_effects;
 	int error;
 
-	debug("starting pid init");
+	hid_dbg(hid, "starting pid init\n");
 
 	if (list_empty(&hid->report_enum[HID_OUTPUT_REPORT].report_list)) {
-		debug("not a PID device, no output report");
+		hid_dbg(hid, "not a PID device, no output report\n");
 		return -ENODEV;
 	}
 
@@ -1265,7 +1256,7 @@
 	pidff_find_reports(hid, HID_FEATURE_REPORT, pidff);
 
 	if (!pidff_reports_ok(pidff)) {
-		debug("reports not ok, aborting");
+		hid_dbg(hid, "reports not ok, aborting\n");
 		error = -ENODEV;
 		goto fail;
 	}
@@ -1278,8 +1269,8 @@
 
 	if (test_bit(FF_GAIN, dev->ffbit)) {
 		pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], 0xffff);
-		usbhid_submit_report(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
-				  USB_DIR_OUT);
+		usbhid_submit_report(hid, pidff->reports[PID_DEVICE_GAIN],
+				     USB_DIR_OUT);
 	}
 
 	error = pidff_check_autocenter(pidff, dev);
@@ -1290,23 +1281,23 @@
 	    pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_maximum -
 	    pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum +
 	    1;
-	debug("max effects is %d", max_effects);
+	hid_dbg(hid, "max effects is %d\n", max_effects);
 
 	if (max_effects > PID_EFFECTS_MAX)
 		max_effects = PID_EFFECTS_MAX;
 
 	if (pidff->pool[PID_SIMULTANEOUS_MAX].value)
-		debug("max simultaneous effects is %d",
-		      pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
+		hid_dbg(hid, "max simultaneous effects is %d\n",
+			pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
 
 	if (pidff->pool[PID_RAM_POOL_SIZE].value)
-		debug("device memory size is %d bytes",
-		      pidff->pool[PID_RAM_POOL_SIZE].value[0]);
+		hid_dbg(hid, "device memory size is %d bytes\n",
+			pidff->pool[PID_RAM_POOL_SIZE].value[0]);
 
 	if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
 	    pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
-		printk(KERN_NOTICE "hid-pidff: "
-		       "device does not support device managed pool\n");
+		hid_notice(hid,
+			   "device does not support device managed pool\n");
 		goto fail;
 	}
 
@@ -1322,8 +1313,7 @@
 	ff->set_autocenter = pidff_set_autocenter;
 	ff->playback = pidff_playback;
 
-	printk(KERN_INFO "Force feedback for USB HID PID devices by "
-	       "Anssi Hannula <anssi.hannula@gmail.com>\n");
+	hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
 
 	return 0;
 
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2c18547..9a94b64 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -35,7 +35,6 @@
 	{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
-	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -85,7 +84,7 @@
 	{ USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
 
 	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
-
+	{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
 	{ 0, 0 }
 };
 
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 984feb3..af0a7c1 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -585,163 +585,168 @@
 {
 	struct hiddev_list *list = file->private_data;
 	struct hiddev *hiddev = list->hiddev;
-	struct hid_device *hid = hiddev->hid;
-	struct usb_device *dev;
+	struct hid_device *hid;
 	struct hiddev_collection_info cinfo;
 	struct hiddev_report_info rinfo;
 	struct hiddev_field_info finfo;
 	struct hiddev_devinfo dinfo;
 	struct hid_report *report;
 	struct hid_field *field;
-	struct usbhid_device *usbhid = hid->driver_data;
 	void __user *user_arg = (void __user *)arg;
-	int i, r;
-	
+	int i, r = -EINVAL;
+
 	/* Called without BKL by compat methods so no BKL taken */
 
-	/* FIXME: Who or what stop this racing with a disconnect ?? */
-	if (!hiddev->exist || !hid)
-		return -EIO;
+	mutex_lock(&hiddev->existancelock);
+	if (!hiddev->exist) {
+		r = -ENODEV;
+		goto ret_unlock;
+	}
 
-	dev = hid_to_usb_dev(hid);
+	hid = hiddev->hid;
 
 	switch (cmd) {
 
 	case HIDIOCGVERSION:
-		return put_user(HID_VERSION, (int __user *)arg);
+		r = put_user(HID_VERSION, (int __user *)arg) ?
+			-EFAULT : 0;
+		break;
 
 	case HIDIOCAPPLICATION:
 		if (arg < 0 || arg >= hid->maxapplication)
-			return -EINVAL;
+			break;
 
 		for (i = 0; i < hid->maxcollection; i++)
 			if (hid->collection[i].type ==
 			    HID_COLLECTION_APPLICATION && arg-- == 0)
 				break;
 
-		if (i == hid->maxcollection)
-			return -EINVAL;
-
-		return hid->collection[i].usage;
+		if (i < hid->maxcollection)
+			r = hid->collection[i].usage;
+		break;
 
 	case HIDIOCGDEVINFO:
-		dinfo.bustype = BUS_USB;
-		dinfo.busnum = dev->bus->busnum;
-		dinfo.devnum = dev->devnum;
-		dinfo.ifnum = usbhid->ifnum;
-		dinfo.vendor = le16_to_cpu(dev->descriptor.idVendor);
-		dinfo.product = le16_to_cpu(dev->descriptor.idProduct);
-		dinfo.version = le16_to_cpu(dev->descriptor.bcdDevice);
-		dinfo.num_applications = hid->maxapplication;
-		if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
-			return -EFAULT;
+		{
+			struct usb_device *dev = hid_to_usb_dev(hid);
+			struct usbhid_device *usbhid = hid->driver_data;
 
-		return 0;
+			dinfo.bustype = BUS_USB;
+			dinfo.busnum = dev->bus->busnum;
+			dinfo.devnum = dev->devnum;
+			dinfo.ifnum = usbhid->ifnum;
+			dinfo.vendor = le16_to_cpu(dev->descriptor.idVendor);
+			dinfo.product = le16_to_cpu(dev->descriptor.idProduct);
+			dinfo.version = le16_to_cpu(dev->descriptor.bcdDevice);
+			dinfo.num_applications = hid->maxapplication;
+
+			r = copy_to_user(user_arg, &dinfo, sizeof(dinfo)) ?
+				-EFAULT : 0;
+			break;
+		}
 
 	case HIDIOCGFLAG:
-		if (put_user(list->flags, (int __user *)arg))
-			return -EFAULT;
-
-		return 0;
+		r = put_user(list->flags, (int __user *)arg) ?
+			-EFAULT : 0;
+		break;
 
 	case HIDIOCSFLAG:
 		{
 			int newflags;
-			if (get_user(newflags, (int __user *)arg))
-				return -EFAULT;
+
+			if (get_user(newflags, (int __user *)arg)) {
+				r = -EFAULT;
+				break;
+			}
 
 			if ((newflags & ~HIDDEV_FLAGS) != 0 ||
 			    ((newflags & HIDDEV_FLAG_REPORT) != 0 &&
 			     (newflags & HIDDEV_FLAG_UREF) == 0))
-				return -EINVAL;
+				break;
 
 			list->flags = newflags;
 
-			return 0;
+			r = 0;
+			break;
 		}
 
 	case HIDIOCGSTRING:
-		mutex_lock(&hiddev->existancelock);
-		if (hiddev->exist)
-			r = hiddev_ioctl_string(hiddev, cmd, user_arg);
-		else
-			r = -ENODEV;
-		mutex_unlock(&hiddev->existancelock);
-		return r;
+		r = hiddev_ioctl_string(hiddev, cmd, user_arg);
+		break;
 
 	case HIDIOCINITREPORT:
-		mutex_lock(&hiddev->existancelock);
-		if (!hiddev->exist) {
-			mutex_unlock(&hiddev->existancelock);
-			return -ENODEV;
-		}
 		usbhid_init_reports(hid);
-		mutex_unlock(&hiddev->existancelock);
-
-		return 0;
+		r = 0;
+		break;
 
 	case HIDIOCGREPORT:
-		if (copy_from_user(&rinfo, user_arg, sizeof(rinfo)))
-			return -EFAULT;
+		if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) {
+			r = -EFAULT;
+			break;
+		}
 
 		if (rinfo.report_type == HID_REPORT_TYPE_OUTPUT)
-			return -EINVAL;
+			break;
 
-		if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
-			return -EINVAL;
+		report = hiddev_lookup_report(hid, &rinfo);
+		if (report == NULL)
+			break;
 
-		mutex_lock(&hiddev->existancelock);
-		if (hiddev->exist) {
-			usbhid_submit_report(hid, report, USB_DIR_IN);
-			usbhid_wait_io(hid);
-		}
-		mutex_unlock(&hiddev->existancelock);
+		usbhid_submit_report(hid, report, USB_DIR_IN);
+		usbhid_wait_io(hid);
 
-		return 0;
+		r = 0;
+		break;
 
 	case HIDIOCSREPORT:
-		if (copy_from_user(&rinfo, user_arg, sizeof(rinfo)))
-			return -EFAULT;
+		if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) {
+			r = -EFAULT;
+			break;
+		}
 
 		if (rinfo.report_type == HID_REPORT_TYPE_INPUT)
-			return -EINVAL;
+			break;
 
-		if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
-			return -EINVAL;
+		report = hiddev_lookup_report(hid, &rinfo);
+		if (report == NULL)
+			break;
 
-		mutex_lock(&hiddev->existancelock);
-		if (hiddev->exist) {
-			usbhid_submit_report(hid, report, USB_DIR_OUT);
-			usbhid_wait_io(hid);
-		}
-		mutex_unlock(&hiddev->existancelock);
+		usbhid_submit_report(hid, report, USB_DIR_OUT);
+		usbhid_wait_io(hid);
 
-		return 0;
+		r = 0;
+		break;
 
 	case HIDIOCGREPORTINFO:
-		if (copy_from_user(&rinfo, user_arg, sizeof(rinfo)))
-			return -EFAULT;
+		if (copy_from_user(&rinfo, user_arg, sizeof(rinfo))) {
+			r = -EFAULT;
+			break;
+		}
 
-		if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
-			return -EINVAL;
+		report = hiddev_lookup_report(hid, &rinfo);
+		if (report == NULL)
+			break;
 
 		rinfo.num_fields = report->maxfield;
 
-		if (copy_to_user(user_arg, &rinfo, sizeof(rinfo)))
-			return -EFAULT;
-
-		return 0;
+		r = copy_to_user(user_arg, &rinfo, sizeof(rinfo)) ?
+			-EFAULT : 0;
+		break;
 
 	case HIDIOCGFIELDINFO:
-		if (copy_from_user(&finfo, user_arg, sizeof(finfo)))
-			return -EFAULT;
+		if (copy_from_user(&finfo, user_arg, sizeof(finfo))) {
+			r = -EFAULT;
+			break;
+		}
+
 		rinfo.report_type = finfo.report_type;
 		rinfo.report_id = finfo.report_id;
-		if ((report = hiddev_lookup_report(hid, &rinfo)) == NULL)
-			return -EINVAL;
+
+		report = hiddev_lookup_report(hid, &rinfo);
+		if (report == NULL)
+			break;
 
 		if (finfo.field_index >= report->maxfield)
-			return -EINVAL;
+			break;
 
 		field = report->field[finfo.field_index];
 		memset(&finfo, 0, sizeof(finfo));
@@ -760,10 +765,9 @@
 		finfo.unit_exponent = field->unit_exponent;
 		finfo.unit = field->unit;
 
-		if (copy_to_user(user_arg, &finfo, sizeof(finfo)))
-			return -EFAULT;
-
-		return 0;
+		r = copy_to_user(user_arg, &finfo, sizeof(finfo)) ?
+			-EFAULT : 0;
+		break;
 
 	case HIDIOCGUCODE:
 		/* fall through */
@@ -772,57 +776,66 @@
 	case HIDIOCGUSAGES:
 	case HIDIOCSUSAGES:
 	case HIDIOCGCOLLECTIONINDEX:
-		mutex_lock(&hiddev->existancelock);
-		if (hiddev->exist)
-			r = hiddev_ioctl_usage(hiddev, cmd, user_arg);
-		else
-			r = -ENODEV;
-		mutex_unlock(&hiddev->existancelock);
-		return r;
+		r = hiddev_ioctl_usage(hiddev, cmd, user_arg);
+		break;
 
 	case HIDIOCGCOLLECTIONINFO:
-		if (copy_from_user(&cinfo, user_arg, sizeof(cinfo)))
-			return -EFAULT;
+		if (copy_from_user(&cinfo, user_arg, sizeof(cinfo))) {
+			r = -EFAULT;
+			break;
+		}
 
 		if (cinfo.index >= hid->maxcollection)
-			return -EINVAL;
+			break;
 
 		cinfo.type = hid->collection[cinfo.index].type;
 		cinfo.usage = hid->collection[cinfo.index].usage;
 		cinfo.level = hid->collection[cinfo.index].level;
 
-		if (copy_to_user(user_arg, &cinfo, sizeof(cinfo)))
-			return -EFAULT;
-		return 0;
+		r = copy_to_user(user_arg, &cinfo, sizeof(cinfo)) ?
+			-EFAULT : 0;
+		break;
 
 	default:
-
 		if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ)
-			return -EINVAL;
+			break;
 
 		if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGNAME(0))) {
 			int len;
-			if (!hid->name)
-				return 0;
+
+			if (!hid->name) {
+				r = 0;
+				break;
+			}
+
 			len = strlen(hid->name) + 1;
 			if (len > _IOC_SIZE(cmd))
 				 len = _IOC_SIZE(cmd);
-			return copy_to_user(user_arg, hid->name, len) ?
+			r = copy_to_user(user_arg, hid->name, len) ?
 				-EFAULT : len;
+			break;
 		}
 
 		if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGPHYS(0))) {
 			int len;
-			if (!hid->phys)
-				return 0;
+
+			if (!hid->phys) {
+				r = 0;
+				break;
+			}
+
 			len = strlen(hid->phys) + 1;
 			if (len > _IOC_SIZE(cmd))
 				len = _IOC_SIZE(cmd);
-			return copy_to_user(user_arg, hid->phys, len) ?
+			r = copy_to_user(user_arg, hid->phys, len) ?
 				-EFAULT : len;
+			break;
 		}
 	}
-	return -EINVAL;
+
+ret_unlock:
+	mutex_unlock(&hiddev->existancelock);
+	return r;
 }
 
 #ifdef CONFIG_COMPAT
@@ -892,7 +905,7 @@
 	hiddev->exist = 1;
 	retval = usb_register_dev(usbhid->intf, &hiddev_class);
 	if (retval) {
-		err_hid("Not able to get a minor for this device.");
+		hid_err(hid, "Not able to get a minor for this device\n");
 		hid->hiddev = NULL;
 		kfree(hiddev);
 		return -1;
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 89d2e84..1673cac 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -95,7 +95,6 @@
 	unsigned long stop_retry;                                       /* Time to give up, in jiffies */
 	unsigned int retry_delay;                                       /* Delay length in ms */
 	struct work_struct reset_work;                                  /* Task context for resets */
-	struct work_struct restart_work;				/* waking up for output to be done in a task */
 	wait_queue_head_t wait;						/* For sleeping */
 	int ledcount;							/* counting the number of active leds */
 };
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index a948605..0658173 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -24,6 +24,8 @@
  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -104,16 +106,18 @@
 			if (usb_kbd_keycode[kbd->old[i]])
 				input_report_key(kbd->dev, usb_kbd_keycode[kbd->old[i]], 0);
 			else
-				dev_info(&urb->dev->dev,
-						"Unknown key (scancode %#x) released.\n", kbd->old[i]);
+				hid_info(urb->dev,
+					 "Unknown key (scancode %#x) released.\n",
+					 kbd->old[i]);
 		}
 
 		if (kbd->new[i] > 3 && memscan(kbd->old + 2, kbd->new[i], 6) == kbd->old + 8) {
 			if (usb_kbd_keycode[kbd->new[i]])
 				input_report_key(kbd->dev, usb_kbd_keycode[kbd->new[i]], 1);
 			else
-				dev_info(&urb->dev->dev,
-						"Unknown key (scancode %#x) released.\n", kbd->new[i]);
+				hid_info(urb->dev,
+					 "Unknown key (scancode %#x) released.\n",
+					 kbd->new[i]);
 		}
 	}
 
@@ -124,9 +128,9 @@
 resubmit:
 	i = usb_submit_urb (urb, GFP_ATOMIC);
 	if (i)
-		err_hid ("can't resubmit intr, %s-%s/input0, status %d",
-				kbd->usbdev->bus->bus_name,
-				kbd->usbdev->devpath, i);
+		hid_err(urb->dev, "can't resubmit intr, %s-%s/input0, status %d",
+			kbd->usbdev->bus->bus_name,
+			kbd->usbdev->devpath, i);
 }
 
 static int usb_kbd_event(struct input_dev *dev, unsigned int type,
@@ -150,7 +154,7 @@
 	*(kbd->leds) = kbd->newleds;
 	kbd->led->dev = kbd->usbdev;
 	if (usb_submit_urb(kbd->led, GFP_ATOMIC))
-		err_hid("usb_submit_urb(leds) failed");
+		pr_err("usb_submit_urb(leds) failed\n");
 
 	return 0;
 }
@@ -160,7 +164,7 @@
 	struct usb_kbd *kbd = urb->context;
 
 	if (urb->status)
-		dev_warn(&urb->dev->dev, "led urb status %d received\n",
+		hid_warn(urb->dev, "led urb status %d received\n",
 			 urb->status);
 
 	if (*(kbd->leds) == kbd->newleds)
@@ -169,7 +173,7 @@
 	*(kbd->leds) = kbd->newleds;
 	kbd->led->dev = kbd->usbdev;
 	if (usb_submit_urb(kbd->led, GFP_ATOMIC))
-		err_hid("usb_submit_urb(leds) failed");
+		hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
 }
 
 static int usb_kbd_open(struct input_dev *dev)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index a56f6ad..35f00da 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -274,6 +274,16 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called atxp1.
 
+config SENSORS_DS620
+	tristate "Dallas Semiconductor DS620"
+	depends on I2C
+	help
+	  If you say yes here you get support for Dallas Semiconductor
+	  DS620 sensor chip.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called ds620.
+
 config SENSORS_DS1621
 	tristate "Dallas Semiconductor DS1621 and DS1625"
 	depends on I2C
@@ -734,6 +744,16 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called sht15.
 
+config SENSORS_SHT21
+	tristate "Sensiron humidity and temperature sensors. SHT21 and compat."
+	depends on I2C
+	help
+	  If you say yes here you get support for the Sensiron SHT21, SHT25
+	  humidity and temperature sensors.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called sht21.
+
 config SENSORS_S3C
 	tristate "Samsung built-in ADC"
 	depends on S3C_ADC
@@ -789,10 +809,10 @@
 	  will be called dme1737.
 
 config SENSORS_EMC1403
-	tristate "SMSC EMC1403 thermal sensor"
+	tristate "SMSC EMC1403/23 thermal sensor"
 	depends on I2C
 	help
-	  If you say yes here you get support for the SMSC EMC1403
+	  If you say yes here you get support for the SMSC EMC1403/23
 	  temperature monitoring chip.
 
 	  Threshold values can be configured using sysfs.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 2479b3d..dde02d9 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -41,6 +41,7 @@
 obj-$(CONFIG_SENSORS_CORETEMP)	+= coretemp.o
 obj-$(CONFIG_SENSORS_PKGTEMP)	+= pkgtemp.o
 obj-$(CONFIG_SENSORS_DME1737)	+= dme1737.o
+obj-$(CONFIG_SENSORS_DS620)	+= ds620.o
 obj-$(CONFIG_SENSORS_DS1621)	+= ds1621.o
 obj-$(CONFIG_SENSORS_EMC1403)	+= emc1403.o
 obj-$(CONFIG_SENSORS_EMC2103)	+= emc2103.o
@@ -90,6 +91,7 @@
 obj-$(CONFIG_SENSORS_PCF8591)	+= pcf8591.o
 obj-$(CONFIG_SENSORS_S3C)	+= s3c-hwmon.o
 obj-$(CONFIG_SENSORS_SHT15)	+= sht15.o
+obj-$(CONFIG_SENSORS_SHT21)	+= sht21.o
 obj-$(CONFIG_SENSORS_SIS5595)	+= sis5595.o
 obj-$(CONFIG_SENSORS_SMM665)	+= smm665.o
 obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 03694cc..8f07a9d 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -20,6 +20,9 @@
     the custom Abit uGuru chip found on Abit uGuru motherboards. Note: because
     of lack of specs the CPU/RAM voltage & frequency control is not supported!
 */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -220,6 +223,10 @@
 	u8 pwm_settings[ABIT_UGURU_MAX_PWMS][5];
 };
 
+static const char *never_happen = "This should never happen.";
+static const char *report_this =
+	"Please report this to the abituguru maintainer (see MAINTAINERS)";
+
 /* wait till the uguru is in the specified state */
 static int abituguru_wait(struct abituguru_data *data, u8 state)
 {
@@ -438,8 +445,7 @@
 
 	/* Test val is sane / usable for sensor type detection. */
 	if ((val < 10u) || (val > 250u)) {
-		printk(KERN_WARNING ABIT_UGURU_NAME
-			": bank1-sensor: %d reading (%d) too close to limits, "
+		pr_warn("bank1-sensor: %d reading (%d) too close to limits, "
 			"unable to determine sensor type, skipping sensor\n",
 			(int)sensor_addr, (int)val);
 		/* assume no sensor is there for sensors for which we can't
@@ -535,10 +541,8 @@
 				3) == 3)
 			break;
 	if (i == 3) {
-		printk(KERN_ERR ABIT_UGURU_NAME
-			": Fatal error could not restore original settings. "
-			"This should never happen please report this to the "
-			"abituguru maintainer (see MAINTAINERS)\n");
+		pr_err("Fatal error could not restore original settings. %s %s\n",
+		       never_happen, report_this);
 		return -ENODEV;
 	}
 	return ret;
@@ -1268,14 +1272,12 @@
 	}
 	/* Fail safe check, this should never happen! */
 	if (sysfs_names_free < 0) {
-		printk(KERN_ERR ABIT_UGURU_NAME ": Fatal error ran out of "
-		       "space for sysfs attr names. This should never "
-		       "happen please report to the abituguru maintainer "
-		       "(see MAINTAINERS)\n");
+		pr_err("Fatal error ran out of space for sysfs attr names. %s %s",
+		       never_happen, report_this);
 		res = -ENAMETOOLONG;
 		goto abituguru_probe_error;
 	}
-	printk(KERN_INFO ABIT_UGURU_NAME ": found Abit uGuru\n");
+	pr_info("found Abit uGuru\n");
 
 	/* Register sysfs hooks */
 	for (i = 0; i < sysfs_attr_i; i++)
@@ -1432,8 +1434,7 @@
 		"0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
 
 	if (force) {
-		printk(KERN_INFO ABIT_UGURU_NAME ": Assuming Abit uGuru is "
-				"present because of \"force\" parameter\n");
+		pr_info("Assuming Abit uGuru is present because of \"force\" parameter\n");
 		return ABIT_UGURU_BASE;
 	}
 
@@ -1467,8 +1468,7 @@
 
 	abituguru_pdev = platform_device_alloc(ABIT_UGURU_NAME, address);
 	if (!abituguru_pdev) {
-		printk(KERN_ERR ABIT_UGURU_NAME
-			": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		err = -ENOMEM;
 		goto exit_driver_unregister;
 	}
@@ -1479,15 +1479,13 @@
 
 	err = platform_device_add_resources(abituguru_pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR ABIT_UGURU_NAME
-			": Device resource addition failed (%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(abituguru_pdev);
 	if (err) {
-		printk(KERN_ERR ABIT_UGURU_NAME
-			": Device addition failed (%d)\n", err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 3cf28af..48d21e2 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -23,6 +23,9 @@
     chip found on newer Abit uGuru motherboards. Note: because of lack of specs
     only reading the sensors and their settings is supported.
 */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -608,6 +611,9 @@
 module_param(verbose, bool, 0644);
 MODULE_PARM_DESC(verbose, "Enable/disable verbose error reporting");
 
+static const char *never_happen = "This should never happen.";
+static const char *report_this =
+	"Please report this to the abituguru3 maintainer (see MAINTAINERS)";
 
 /* wait while the uguru is busy (usually after a write) */
 static int abituguru3_wait_while_busy(struct abituguru3_data *data)
@@ -940,15 +946,13 @@
 		if (abituguru3_motherboards[i].id == id)
 			break;
 	if (!abituguru3_motherboards[i].id) {
-		printk(KERN_ERR ABIT_UGURU3_NAME ": error unknown motherboard "
-			"ID: %04X. Please report this to the abituguru3 "
-			"maintainer (see MAINTAINERS)\n", (unsigned int)id);
+		pr_err("error unknown motherboard ID: %04X. %s\n",
+		       (unsigned int)id, report_this);
 		goto abituguru3_probe_error;
 	}
 	data->sensors = abituguru3_motherboards[i].sensors;
 
-	printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard "
-		"ID: %04X\n", (unsigned int)id);
+	pr_info("found Abit uGuru3, motherboard ID: %04X\n", (unsigned int)id);
 
 	/* Fill the sysfs attr array */
 	sysfs_attr_i = 0;
@@ -957,11 +961,8 @@
 	for (i = 0; data->sensors[i].name; i++) {
 		/* Fail safe check, this should never happen! */
 		if (i >= ABIT_UGURU3_MAX_NO_SENSORS) {
-			printk(KERN_ERR ABIT_UGURU3_NAME
-				": Fatal error motherboard has more sensors "
-				"then ABIT_UGURU3_MAX_NO_SENSORS. This should "
-				"never happen please report to the abituguru3 "
-				"maintainer (see MAINTAINERS)\n");
+			pr_err("Fatal error motherboard has more sensors then ABIT_UGURU3_MAX_NO_SENSORS. %s %s\n",
+			       never_happen, report_this);
 			res = -ENAMETOOLONG;
 			goto abituguru3_probe_error;
 		}
@@ -983,10 +984,8 @@
 	}
 	/* Fail safe check, this should never happen! */
 	if (sysfs_names_free < 0) {
-		printk(KERN_ERR ABIT_UGURU3_NAME
-			": Fatal error ran out of space for sysfs attr names. "
-			"This should never happen please report to the "
-			"abituguru3 maintainer (see MAINTAINERS)\n");
+		pr_err("Fatal error ran out of space for sysfs attr names. %s %s\n",
+		       never_happen, report_this);
 		res = -ENAMETOOLONG;
 		goto abituguru3_probe_error;
 	}
@@ -1189,8 +1188,7 @@
 		"0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
 
 	if (force) {
-		printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is "
-				"present because of \"force\" parameter\n");
+		pr_info("Assuming Abit uGuru3 is present because of \"force\" parameter\n");
 		return 0;
 	}
 
@@ -1219,10 +1217,8 @@
 			return err;
 
 #ifdef CONFIG_DMI
-		printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was "
-			"not detected using DMI. Please send the output of "
-			"\"dmidecode\" to the abituguru3 maintainer "
-			"(see MAINTAINERS)\n");
+		pr_warn("this motherboard was not detected using DMI. "
+			"Please send the output of \"dmidecode\" to the abituguru3 maintainer (see MAINTAINERS)\n");
 #endif
 	}
 
@@ -1233,8 +1229,7 @@
 	abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME,
 						ABIT_UGURU3_BASE);
 	if (!abituguru3_pdev) {
-		printk(KERN_ERR ABIT_UGURU3_NAME
-			": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		err = -ENOMEM;
 		goto exit_driver_unregister;
 	}
@@ -1245,15 +1240,13 @@
 
 	err = platform_device_add_resources(abituguru3_pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR ABIT_UGURU3_NAME
-			": Device resource addition failed (%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(abituguru3_pdev);
 	if (err) {
-		printk(KERN_ERR ABIT_UGURU3_NAME
-			": Device addition failed (%d)\n", err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 0727ad2..9e234b9 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -20,7 +20,7 @@
  * Alarms	16-bit map of active alarms
  * Analog Out	0..1250 mV output
  *
- * Chassis Intrusion: clear CI latch with 'echo 1 > chassis_clear'
+ * Chassis Intrusion: clear CI latch with 'echo 0 > intrusion0_alarm'
  *
  * Test hardware: Intel SE440BX-2 desktop motherboard --Grant
  *
@@ -476,13 +476,16 @@
 static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
 
 /* chassis_clear */
-static ssize_t chassis_clear(struct device *dev,
+static ssize_t chassis_clear_legacy(struct device *dev,
 		struct device_attribute *attr,
 		const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	unsigned long val = simple_strtol(buf, NULL, 10);
 
+	dev_warn(dev, "Attribute chassis_clear is deprecated, "
+		 "use intrusion0_alarm instead\n");
+
 	if (val == 1) {
 		i2c_smbus_write_byte_data(client,
 				ADM9240_REG_CHASSIS_CLEAR, 0x80);
@@ -490,7 +493,29 @@
 	}
 	return count;
 }
-static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear);
+static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear_legacy);
+
+static ssize_t chassis_clear(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adm9240_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+	i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80);
+	data->valid = 0;		/* Force cache refresh */
+	mutex_unlock(&data->update_lock);
+	dev_dbg(&client->dev, "chassis intrusion latch cleared\n");
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm,
+		chassis_clear, 12);
 
 static struct attribute *adm9240_attributes[] = {
 	&sensor_dev_attr_in0_input.dev_attr.attr,
@@ -532,6 +557,7 @@
 	&dev_attr_alarms.attr,
 	&dev_attr_aout_output.attr,
 	&dev_attr_chassis_clear.attr,
+	&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
 	&dev_attr_cpu0_vid.attr,
 	NULL
 };
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index aac85f3..c42c5a6 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -4,7 +4,7 @@
 
 	This driver is based on the lm75 and other lm_sensors/hwmon drivers
 
-	Written by Steve Hardy <steve@linuxrealtime.co.uk>
+	Written by Steve Hardy <shardy@redhat.com>
 
 	Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads7828.pdf
 
@@ -271,7 +271,7 @@
 	i2c_del_driver(&ads7828_driver);
 }
 
-MODULE_AUTHOR("Steve Hardy <steve@linuxrealtime.co.uk>");
+MODULE_AUTHOR("Steve Hardy <shardy@redhat.com>");
 MODULE_DESCRIPTION("ADS7828 driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 87d92a5..c6d1ce0 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -19,6 +19,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/jiffies.h>
 #include <linux/i2c.h>
@@ -274,7 +276,7 @@
 	i2c_smbus_write_byte_data(client, ADT7470_REG_PWM_CFG(2), pwm_cfg[1]);
 
 	if (res) {
-		printk(KERN_ERR "ha ha, interrupted");
+		pr_err("ha ha, interrupted\n");
 		return -EAGAIN;
 	}
 
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index b6598aa..ce0372f 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -4,6 +4,7 @@
  * computers.
  *
  * Copyright (C) 2007 Nicolas Boichat <nicolas@boichat.ch>
+ * Copyright (C) 2010 Henrik Rydberg <rydberg@euromail.se>
  *
  * Based on hdaps.c driver:
  * Copyright (C) 2005 Robert Love <rml@novell.com>
@@ -26,10 +27,13 @@
  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/delay.h>
 #include <linux/platform_device.h>
 #include <linux/input-polldev.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/timer.h>
 #include <linux/dmi.h>
@@ -49,6 +53,7 @@
 
 #define APPLESMC_MAX_DATA_LENGTH 32
 
+/* wait up to 32 ms for a status change. */
 #define APPLESMC_MIN_WAIT	0x0040
 #define APPLESMC_MAX_WAIT	0x8000
 
@@ -73,104 +78,15 @@
 
 #define FANS_COUNT		"FNum" /* r-o ui8 */
 #define FANS_MANUAL		"FS! " /* r-w ui16 */
-#define FAN_ACTUAL_SPEED	"F0Ac" /* r-o fpe2 (2 bytes) */
-#define FAN_MIN_SPEED		"F0Mn" /* r-o fpe2 (2 bytes) */
-#define FAN_MAX_SPEED		"F0Mx" /* r-o fpe2 (2 bytes) */
-#define FAN_SAFE_SPEED		"F0Sf" /* r-o fpe2 (2 bytes) */
-#define FAN_TARGET_SPEED	"F0Tg" /* r-w fpe2 (2 bytes) */
-#define FAN_POSITION		"F0ID" /* r-o char[16] */
-
-/*
- * Temperature sensors keys (sp78 - 2 bytes).
- */
-static const char *temperature_sensors_sets[][41] = {
-/* Set 0: Macbook Pro */
-	{ "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H",
-	  "Th1H", "Tm0P", "Ts0P", "Ts1P", NULL },
-/* Set 1: Macbook2 set */
-	{ "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TN1P", "TTF0", "Th0H",
-	  "Th0S", "Th1H", NULL },
-/* Set 2: Macbook set */
-	{ "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TN1P", "Th0H", "Th0S",
-	  "Th1H", "Ts0P", NULL },
-/* Set 3: Macmini set */
-	{ "TC0D", "TC0P", NULL },
-/* Set 4: Mac Pro (2 x Quad-Core) */
-	{ "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P",
-	  "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "THTG", "TH0P",
-	  "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S",
-	  "TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P",
-	  "TM9S", "TN0H", "TS0C", NULL },
-/* Set 5: iMac */
-	{ "TC0D", "TA0P", "TG0P", "TG0D", "TG0H", "TH0P", "Tm0P", "TO0P",
-	  "Tp0C", NULL },
-/* Set 6: Macbook3 set */
-	{ "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TTF0", "TW0P", "Th0H",
-	  "Th0S", "Th1H", NULL },
-/* Set 7: Macbook Air */
-	{ "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TC0P", "TCFP",
-	  "TTF0", "TW0P", "Th0H", "Tp0P", "TpFP", "Ts0P", "Ts0S", NULL },
-/* Set 8: Macbook Pro 4,1 (Penryn) */
-	{ "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P", "Th0H",
-	  "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
-/* Set 9: Macbook Pro 3,1 (Santa Rosa) */
-	{ "TALP", "TB0T", "TC0D", "TC0P", "TG0D", "TG0H", "TTF0", "TW0P",
-	  "Th0H", "Th1H", "Th2H", "Tm0P", "Ts0P", NULL },
-/* Set 10: iMac 5,1 */
-	{ "TA0P", "TC0D", "TC0P", "TG0D", "TH0P", "TO0P", "Tm0P", NULL },
-/* Set 11: Macbook 5,1 */
-	{ "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0P", "TN0D", "TN0P",
-	  "TTF0", "Th0H", "Th1H", "ThFH", "Ts0P", "Ts0S", NULL },
-/* Set 12: Macbook Pro 5,1 */
-	{ "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
-	  "TG0F", "TG0H", "TG0P", "TG0T", "TG1H", "TN0D", "TN0P", "TTF0",
-	  "Th2H", "Tm0P", "Ts0P", "Ts0S", NULL },
-/* Set 13: iMac 8,1 */
-	{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
-	  "TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL },
-/* Set 14: iMac 6,1 */
-	{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
-	  "TO0P", "Tp0P", NULL },
-/* Set 15: MacBook Air 2,1 */
-	{ "TB0T", "TB1S", "TB1T", "TB2S", "TB2T", "TC0D", "TN0D", "TTF0",
-	  "TV0P", "TVFP", "TW0P", "Th0P", "Tp0P", "Tp1P", "TpFP", "Ts0P",
-	  "Ts0S", NULL },
-/* Set 16: Mac Pro 3,1 (2 x Quad-Core) */
-	{ "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P",
-	  "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "TH0P", "TH1P",
-	  "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", "TM1P",
-	  "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", "TM9S",
-	  "TN0C", "TN0D", "TN0H", "TS0C", "Tp0C", "Tp1C", "Tv0S", "Tv1S",
-	  NULL },
-/* Set 17: iMac 9,1 */
-	{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TH0P", "TL0P",
-	  "TN0D", "TN0H", "TN0P", "TO0P", "Tm0P", "Tp0P", NULL },
-/* Set 18: MacBook Pro 2,2 */
-	{ "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
-	  "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
-/* Set 19: Macbook Pro 5,3 */
-	{ "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
-	  "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
-	  "Tm0P", "Ts0P", "Ts0S", NULL },
-/* Set 20: MacBook Pro 5,4 */
-	{ "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
-	  "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
-/* Set 21: MacBook Pro 6,2 */
-	{ "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
-	  "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
-	  "Ts0P", "Ts0S", NULL },
-/* Set 22: MacBook Pro 7,1 */
-	{ "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
-	  "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
-};
+#define FAN_ID_FMT		"F%dID" /* r-o char[16] */
 
 /* List of keys used to read/write fan speeds */
-static const char* fan_speed_keys[] = {
-	FAN_ACTUAL_SPEED,
-	FAN_MIN_SPEED,
-	FAN_MAX_SPEED,
-	FAN_SAFE_SPEED,
-	FAN_TARGET_SPEED
+static const char *const fan_speed_fmt[] = {
+	"F%dAc",		/* actual speed */
+	"F%dMn",		/* minimum speed (rw) */
+	"F%dMx",		/* maximum speed */
+	"F%dSf",		/* safe speed - not all models */
+	"F%dTg",		/* target speed (manual: rw) */
 };
 
 #define INIT_TIMEOUT_MSECS	5000	/* wait up to 5s for device init ... */
@@ -184,14 +100,48 @@
 #define SENSOR_Y 1
 #define SENSOR_Z 2
 
-/* Structure to be passed to DMI_MATCH function */
-struct dmi_match_data {
-/* Indicates whether this computer has an accelerometer. */
-	int accelerometer;
-/* Indicates whether this computer has light sensors and keyboard backlight. */
-	int light;
-/* Indicates which temperature sensors set to use. */
-	int temperature_set;
+#define to_index(attr) (to_sensor_dev_attr(attr)->index & 0xffff)
+#define to_option(attr) (to_sensor_dev_attr(attr)->index >> 16)
+
+/* Dynamic device node attributes */
+struct applesmc_dev_attr {
+	struct sensor_device_attribute sda;	/* hwmon attributes */
+	char name[32];				/* room for node file name */
+};
+
+/* Dynamic device node group */
+struct applesmc_node_group {
+	char *format;				/* format string */
+	void *show;				/* show function */
+	void *store;				/* store function */
+	int option;				/* function argument */
+	struct applesmc_dev_attr *nodes;	/* dynamic node array */
+};
+
+/* AppleSMC entry - cached register information */
+struct applesmc_entry {
+	char key[5];		/* four-letter key code */
+	u8 valid;		/* set when entry is successfully read once */
+	u8 len;			/* bounded by APPLESMC_MAX_DATA_LENGTH */
+	char type[5];		/* four-letter type code */
+	u8 flags;		/* 0x10: func; 0x40: write; 0x80: read */
+};
+
+/* Register lookup and registers common to all SMCs */
+static struct applesmc_registers {
+	struct mutex mutex;		/* register read/write mutex */
+	unsigned int key_count;		/* number of SMC registers */
+	unsigned int fan_count;		/* number of fans */
+	unsigned int temp_count;	/* number of temperature registers */
+	unsigned int temp_begin;	/* temperature lower index bound */
+	unsigned int temp_end;		/* temperature upper index bound */
+	int num_light_sensors;		/* number of light sensors */
+	bool has_accelerometer;		/* has motion sensor */
+	bool has_key_backlight;		/* has keyboard backlight */
+	bool init_complete;		/* true when fully initialized */
+	struct applesmc_entry *cache;	/* cached key entries */
+} smcreg = {
+	.mutex = __MUTEX_INITIALIZER(smcreg.mutex),
 };
 
 static const int debug;
@@ -203,20 +153,6 @@
 static struct device *hwmon_dev;
 static struct input_polled_dev *applesmc_idev;
 
-/* Indicates whether this computer has an accelerometer. */
-static unsigned int applesmc_accelerometer;
-
-/* Indicates whether this computer has light sensors and keyboard backlight. */
-static unsigned int applesmc_light;
-
-/* The number of fans handled by the driver */
-static unsigned int fans_handled;
-
-/* Indicates which temperature sensors set to use. */
-static unsigned int applesmc_temperature_set;
-
-static DEFINE_MUTEX(applesmc_lock);
-
 /*
  * Last index written to key_at_index sysfs file, and value to use for all other
  * key_at_index_* sysfs files.
@@ -238,18 +174,10 @@
 
 	for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
 		udelay(us);
-		if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val) {
-			if (debug)
-				printk(KERN_DEBUG
-					"Waited %d us for status %x\n",
-					2 * us - APPLESMC_MIN_WAIT, val);
+		if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val)
 			return 0;
-		}
 	}
 
-	printk(KERN_WARNING "applesmc: wait status failed: %x != %x\n",
-						val, inb(APPLESMC_CMD_PORT));
-
 	return -EIO;
 }
 
@@ -267,159 +195,242 @@
 		if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == 0x0c)
 			return 0;
 	}
-	printk(KERN_WARNING "applesmc: command failed: %x -> %x\n",
-		cmd, inb(APPLESMC_CMD_PORT));
 	return -EIO;
 }
 
-/*
- * applesmc_read_key - reads len bytes from a given key, and put them in buffer.
- * Returns zero on success or a negative error on failure. Callers must
- * hold applesmc_lock.
- */
-static int applesmc_read_key(const char* key, u8* buffer, u8 len)
+static int send_argument(const char *key)
 {
 	int i;
 
-	if (len > APPLESMC_MAX_DATA_LENGTH) {
-		printk(KERN_ERR	"applesmc_read_key: cannot read more than "
-					"%d bytes\n", APPLESMC_MAX_DATA_LENGTH);
-		return -EINVAL;
-	}
-
-	if (send_command(APPLESMC_READ_CMD))
-		return -EIO;
-
 	for (i = 0; i < 4; i++) {
 		outb(key[i], APPLESMC_DATA_PORT);
 		if (__wait_status(0x04))
 			return -EIO;
 	}
-	if (debug)
-		printk(KERN_DEBUG "<%s", key);
+	return 0;
+}
+
+static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+{
+	int i;
+
+	if (send_command(cmd) || send_argument(key)) {
+		pr_warn("%s: read arg fail\n", key);
+		return -EIO;
+	}
 
 	outb(len, APPLESMC_DATA_PORT);
-	if (debug)
-		printk(KERN_DEBUG ">%x", len);
 
 	for (i = 0; i < len; i++) {
-		if (__wait_status(0x05))
+		if (__wait_status(0x05)) {
+			pr_warn("%s: read data fail\n", key);
 			return -EIO;
+		}
 		buffer[i] = inb(APPLESMC_DATA_PORT);
-		if (debug)
-			printk(KERN_DEBUG "<%x", buffer[i]);
 	}
-	if (debug)
-		printk(KERN_DEBUG "\n");
 
 	return 0;
 }
 
-/*
- * applesmc_write_key - writes len bytes from buffer to a given key.
- * Returns zero on success or a negative error on failure. Callers must
- * hold applesmc_lock.
- */
-static int applesmc_write_key(const char* key, u8* buffer, u8 len)
+static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
 {
 	int i;
 
-	if (len > APPLESMC_MAX_DATA_LENGTH) {
-		printk(KERN_ERR	"applesmc_write_key: cannot write more than "
-					"%d bytes\n", APPLESMC_MAX_DATA_LENGTH);
-		return -EINVAL;
-	}
-
-	if (send_command(APPLESMC_WRITE_CMD))
+	if (send_command(cmd) || send_argument(key)) {
+		pr_warn("%s: write arg fail\n", key);
 		return -EIO;
-
-	for (i = 0; i < 4; i++) {
-		outb(key[i], APPLESMC_DATA_PORT);
-		if (__wait_status(0x04))
-			return -EIO;
 	}
 
 	outb(len, APPLESMC_DATA_PORT);
 
 	for (i = 0; i < len; i++) {
-		if (__wait_status(0x04))
+		if (__wait_status(0x04)) {
+			pr_warn("%s: write data fail\n", key);
 			return -EIO;
+		}
 		outb(buffer[i], APPLESMC_DATA_PORT);
 	}
 
 	return 0;
 }
 
-/*
- * applesmc_get_key_at_index - get key at index, and put the result in key
- * (char[6]). Returns zero on success or a negative error on failure. Callers
- * must hold applesmc_lock.
- */
-static int applesmc_get_key_at_index(int index, char* key)
+static int read_register_count(unsigned int *count)
 {
-	int i;
-	u8 readkey[4];
-	readkey[0] = index >> 24;
-	readkey[1] = index >> 16;
-	readkey[2] = index >> 8;
-	readkey[3] = index;
+	__be32 be;
+	int ret;
 
-	if (send_command(APPLESMC_GET_KEY_BY_INDEX_CMD))
-		return -EIO;
+	ret = read_smc(APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4);
+	if (ret)
+		return ret;
 
-	for (i = 0; i < 4; i++) {
-		outb(readkey[i], APPLESMC_DATA_PORT);
-		if (__wait_status(0x04))
-			return -EIO;
-	}
-
-	outb(4, APPLESMC_DATA_PORT);
-
-	for (i = 0; i < 4; i++) {
-		if (__wait_status(0x05))
-			return -EIO;
-		key[i] = inb(APPLESMC_DATA_PORT);
-	}
-	key[4] = 0;
-
+	*count = be32_to_cpu(be);
 	return 0;
 }
 
 /*
- * applesmc_get_key_type - get key type, and put the result in type (char[6]).
- * Returns zero on success or a negative error on failure. Callers must
- * hold applesmc_lock.
+ * Serialized I/O
+ *
+ * Returns zero on success or a negative error on failure.
+ * All functions below are concurrency safe - callers should NOT hold lock.
  */
-static int applesmc_get_key_type(char* key, char* type)
+
+static int applesmc_read_entry(const struct applesmc_entry *entry,
+			       u8 *buf, u8 len)
 {
-	int i;
+	int ret;
 
-	if (send_command(APPLESMC_GET_KEY_TYPE_CMD))
-		return -EIO;
+	if (entry->len != len)
+		return -EINVAL;
+	mutex_lock(&smcreg.mutex);
+	ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len);
+	mutex_unlock(&smcreg.mutex);
 
-	for (i = 0; i < 4; i++) {
-		outb(key[i], APPLESMC_DATA_PORT);
-		if (__wait_status(0x04))
-			return -EIO;
+	return ret;
+}
+
+static int applesmc_write_entry(const struct applesmc_entry *entry,
+				const u8 *buf, u8 len)
+{
+	int ret;
+
+	if (entry->len != len)
+		return -EINVAL;
+	mutex_lock(&smcreg.mutex);
+	ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len);
+	mutex_unlock(&smcreg.mutex);
+	return ret;
+}
+
+static const struct applesmc_entry *applesmc_get_entry_by_index(int index)
+{
+	struct applesmc_entry *cache = &smcreg.cache[index];
+	u8 key[4], info[6];
+	__be32 be;
+	int ret = 0;
+
+	if (cache->valid)
+		return cache;
+
+	mutex_lock(&smcreg.mutex);
+
+	if (cache->valid)
+		goto out;
+	be = cpu_to_be32(index);
+	ret = read_smc(APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4);
+	if (ret)
+		goto out;
+	ret = read_smc(APPLESMC_GET_KEY_TYPE_CMD, key, info, 6);
+	if (ret)
+		goto out;
+
+	memcpy(cache->key, key, 4);
+	cache->len = info[0];
+	memcpy(cache->type, &info[1], 4);
+	cache->flags = info[5];
+	cache->valid = 1;
+
+out:
+	mutex_unlock(&smcreg.mutex);
+	if (ret)
+		return ERR_PTR(ret);
+	return cache;
+}
+
+static int applesmc_get_lower_bound(unsigned int *lo, const char *key)
+{
+	int begin = 0, end = smcreg.key_count;
+	const struct applesmc_entry *entry;
+
+	while (begin != end) {
+		int middle = begin + (end - begin) / 2;
+		entry = applesmc_get_entry_by_index(middle);
+		if (IS_ERR(entry))
+			return PTR_ERR(entry);
+		if (strcmp(entry->key, key) < 0)
+			begin = middle + 1;
+		else
+			end = middle;
 	}
 
-	outb(6, APPLESMC_DATA_PORT);
+	*lo = begin;
+	return 0;
+}
 
-	for (i = 0; i < 6; i++) {
-		if (__wait_status(0x05))
-			return -EIO;
-		type[i] = inb(APPLESMC_DATA_PORT);
+static int applesmc_get_upper_bound(unsigned int *hi, const char *key)
+{
+	int begin = 0, end = smcreg.key_count;
+	const struct applesmc_entry *entry;
+
+	while (begin != end) {
+		int middle = begin + (end - begin) / 2;
+		entry = applesmc_get_entry_by_index(middle);
+		if (IS_ERR(entry))
+			return PTR_ERR(entry);
+		if (strcmp(key, entry->key) < 0)
+			end = middle;
+		else
+			begin = middle + 1;
 	}
-	type[5] = 0;
 
+	*hi = begin;
+	return 0;
+}
+
+static const struct applesmc_entry *applesmc_get_entry_by_key(const char *key)
+{
+	int begin, end;
+	int ret;
+
+	ret = applesmc_get_lower_bound(&begin, key);
+	if (ret)
+		return ERR_PTR(ret);
+	ret = applesmc_get_upper_bound(&end, key);
+	if (ret)
+		return ERR_PTR(ret);
+	if (end - begin != 1)
+		return ERR_PTR(-EINVAL);
+
+	return applesmc_get_entry_by_index(begin);
+}
+
+static int applesmc_read_key(const char *key, u8 *buffer, u8 len)
+{
+	const struct applesmc_entry *entry;
+
+	entry = applesmc_get_entry_by_key(key);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
+
+	return applesmc_read_entry(entry, buffer, len);
+}
+
+static int applesmc_write_key(const char *key, const u8 *buffer, u8 len)
+{
+	const struct applesmc_entry *entry;
+
+	entry = applesmc_get_entry_by_key(key);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
+
+	return applesmc_write_entry(entry, buffer, len);
+}
+
+static int applesmc_has_key(const char *key, bool *value)
+{
+	const struct applesmc_entry *entry;
+
+	entry = applesmc_get_entry_by_key(key);
+	if (IS_ERR(entry) && PTR_ERR(entry) != -EINVAL)
+		return PTR_ERR(entry);
+
+	*value = !IS_ERR(entry);
 	return 0;
 }
 
 /*
- * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z). Callers must
- * hold applesmc_lock.
+ * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z).
  */
-static int applesmc_read_motion_sensor(int index, s16* value)
+static int applesmc_read_motion_sensor(int index, s16 *value)
 {
 	u8 buffer[2];
 	int ret;
@@ -444,69 +455,120 @@
 }
 
 /*
- * applesmc_device_init - initialize the accelerometer.  Returns zero on success
- * and negative error code on failure.  Can sleep.
+ * applesmc_device_init - initialize the accelerometer.  Can sleep.
  */
-static int applesmc_device_init(void)
+static void applesmc_device_init(void)
 {
-	int total, ret = -ENXIO;
+	int total;
 	u8 buffer[2];
 
-	if (!applesmc_accelerometer)
-		return 0;
-
-	mutex_lock(&applesmc_lock);
+	if (!smcreg.has_accelerometer)
+		return;
 
 	for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
-		if (debug)
-			printk(KERN_DEBUG "applesmc try %d\n", total);
 		if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) &&
-				(buffer[0] != 0x00 || buffer[1] != 0x00)) {
-			if (total == INIT_TIMEOUT_MSECS) {
-				printk(KERN_DEBUG "applesmc: device has"
-						" already been initialized"
-						" (0x%02x, 0x%02x).\n",
-						buffer[0], buffer[1]);
-			} else {
-				printk(KERN_DEBUG "applesmc: device"
-						" successfully initialized"
-						" (0x%02x, 0x%02x).\n",
-						buffer[0], buffer[1]);
-			}
-			ret = 0;
-			goto out;
-		}
+				(buffer[0] != 0x00 || buffer[1] != 0x00))
+			return;
 		buffer[0] = 0xe0;
 		buffer[1] = 0x00;
 		applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2);
 		msleep(INIT_WAIT_MSECS);
 	}
 
-	printk(KERN_WARNING "applesmc: failed to init the device\n");
-
-out:
-	mutex_unlock(&applesmc_lock);
-	return ret;
+	pr_warn("failed to init the device\n");
 }
 
 /*
- * applesmc_get_fan_count - get the number of fans. Callers must NOT hold
- * applesmc_lock.
+ * applesmc_init_smcreg_try - Try to initialize register cache. Idempotent.
  */
-static int applesmc_get_fan_count(void)
+static int applesmc_init_smcreg_try(void)
 {
+	struct applesmc_registers *s = &smcreg;
+	bool left_light_sensor, right_light_sensor;
+	u8 tmp[1];
 	int ret;
-	u8 buffer[1];
 
-	mutex_lock(&applesmc_lock);
+	if (s->init_complete)
+		return 0;
 
-	ret = applesmc_read_key(FANS_COUNT, buffer, 1);
-
-	mutex_unlock(&applesmc_lock);
+	ret = read_register_count(&s->key_count);
 	if (ret)
 		return ret;
-	else
-		return buffer[0];
+
+	if (!s->cache)
+		s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
+	if (!s->cache)
+		return -ENOMEM;
+
+	ret = applesmc_read_key(FANS_COUNT, tmp, 1);
+	if (ret)
+		return ret;
+	s->fan_count = tmp[0];
+
+	ret = applesmc_get_lower_bound(&s->temp_begin, "T");
+	if (ret)
+		return ret;
+	ret = applesmc_get_lower_bound(&s->temp_end, "U");
+	if (ret)
+		return ret;
+	s->temp_count = s->temp_end - s->temp_begin;
+
+	ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor);
+	if (ret)
+		return ret;
+	ret = applesmc_has_key(LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor);
+	if (ret)
+		return ret;
+	ret = applesmc_has_key(MOTION_SENSOR_KEY, &s->has_accelerometer);
+	if (ret)
+		return ret;
+	ret = applesmc_has_key(BACKLIGHT_KEY, &s->has_key_backlight);
+	if (ret)
+		return ret;
+
+	s->num_light_sensors = left_light_sensor + right_light_sensor;
+	s->init_complete = true;
+
+	pr_info("key=%d fan=%d temp=%d acc=%d lux=%d kbd=%d\n",
+	       s->key_count, s->fan_count, s->temp_count,
+	       s->has_accelerometer,
+	       s->num_light_sensors,
+	       s->has_key_backlight);
+
+	return 0;
+}
+
+/*
+ * applesmc_init_smcreg - Initialize register cache.
+ *
+ * Retries until initialization is successful, or the operation times out.
+ *
+ */
+static int applesmc_init_smcreg(void)
+{
+	int ms, ret;
+
+	for (ms = 0; ms < INIT_TIMEOUT_MSECS; ms += INIT_WAIT_MSECS) {
+		ret = applesmc_init_smcreg_try();
+		if (!ret) {
+			if (ms)
+				pr_info("init_smcreg() took %d ms\n", ms);
+			return 0;
+		}
+		msleep(INIT_WAIT_MSECS);
+	}
+
+	kfree(smcreg.cache);
+	smcreg.cache = NULL;
+
+	return ret;
+}
+
+static void applesmc_destroy_smcreg(void)
+{
+	kfree(smcreg.cache);
+	smcreg.cache = NULL;
+	smcreg.init_complete = false;
 }
 
 /* Device model stuff */
@@ -514,30 +576,27 @@
 {
 	int ret;
 
-	ret = applesmc_device_init();
+	ret = applesmc_init_smcreg();
 	if (ret)
 		return ret;
 
-	printk(KERN_INFO "applesmc: device successfully initialized.\n");
+	applesmc_device_init();
+
 	return 0;
 }
 
 /* Synchronize device with memorized backlight state */
 static int applesmc_pm_resume(struct device *dev)
 {
-	mutex_lock(&applesmc_lock);
-	if (applesmc_light)
+	if (smcreg.has_key_backlight)
 		applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2);
-	mutex_unlock(&applesmc_lock);
 	return 0;
 }
 
 /* Reinitialize device on resume from hibernation */
 static int applesmc_pm_restore(struct device *dev)
 {
-	int ret = applesmc_device_init();
-	if (ret)
-		return ret;
+	applesmc_device_init();
 	return applesmc_pm_resume(dev);
 }
 
@@ -571,20 +630,15 @@
 	struct input_dev *idev = dev->input;
 	s16 x, y;
 
-	mutex_lock(&applesmc_lock);
-
 	if (applesmc_read_motion_sensor(SENSOR_X, &x))
-		goto out;
+		return;
 	if (applesmc_read_motion_sensor(SENSOR_Y, &y))
-		goto out;
+		return;
 
 	x = -x;
 	input_report_abs(idev, ABS_X, x - rest_x);
 	input_report_abs(idev, ABS_Y, y - rest_y);
 	input_sync(idev);
-
-out:
-	mutex_unlock(&applesmc_lock);
 }
 
 /* Sysfs Files */
@@ -601,8 +655,6 @@
 	int ret;
 	s16 x, y, z;
 
-	mutex_lock(&applesmc_lock);
-
 	ret = applesmc_read_motion_sensor(SENSOR_X, &x);
 	if (ret)
 		goto out;
@@ -614,7 +666,6 @@
 		goto out;
 
 out:
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -624,20 +675,20 @@
 static ssize_t applesmc_light_show(struct device *dev,
 				struct device_attribute *attr, char *sysfsbuf)
 {
+	const struct applesmc_entry *entry;
 	static int data_length;
 	int ret;
 	u8 left = 0, right = 0;
-	u8 buffer[10], query[6];
-
-	mutex_lock(&applesmc_lock);
+	u8 buffer[10];
 
 	if (!data_length) {
-		ret = applesmc_get_key_type(LIGHT_SENSOR_LEFT_KEY, query);
-		if (ret)
-			goto out;
-		data_length = clamp_val(query[0], 0, 10);
-		printk(KERN_INFO "applesmc: light sensor data length set to "
-			"%d\n", data_length);
+		entry = applesmc_get_entry_by_key(LIGHT_SENSOR_LEFT_KEY);
+		if (IS_ERR(entry))
+			return PTR_ERR(entry);
+		if (entry->len > 10)
+			return -ENXIO;
+		data_length = entry->len;
+		pr_info("light sensor data length set to %d\n", data_length);
 	}
 
 	ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
@@ -653,7 +704,6 @@
 	right = buffer[2];
 
 out:
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -664,36 +714,44 @@
 static ssize_t applesmc_show_sensor_label(struct device *dev,
 			struct device_attribute *devattr, char *sysfsbuf)
 {
-	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	const char *key =
-		temperature_sensors_sets[applesmc_temperature_set][attr->index];
+	int index = smcreg.temp_begin + to_index(devattr);
+	const struct applesmc_entry *entry;
 
-	return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
+	entry = applesmc_get_entry_by_index(index);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
+
+	return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key);
 }
 
 /* Displays degree Celsius * 1000 */
 static ssize_t applesmc_show_temperature(struct device *dev,
 			struct device_attribute *devattr, char *sysfsbuf)
 {
+	int index = smcreg.temp_begin + to_index(devattr);
+	const struct applesmc_entry *entry;
 	int ret;
 	u8 buffer[2];
 	unsigned int temp;
-	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-	const char* key =
-		temperature_sensors_sets[applesmc_temperature_set][attr->index];
 
-	mutex_lock(&applesmc_lock);
+	entry = applesmc_get_entry_by_index(index);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
+	if (entry->len > 2)
+		return -EINVAL;
 
-	ret = applesmc_read_key(key, buffer, 2);
-	temp = buffer[0]*1000;
-	temp += (buffer[1] >> 6) * 250;
-
-	mutex_unlock(&applesmc_lock);
-
+	ret = applesmc_read_entry(entry, buffer, entry->len);
 	if (ret)
 		return ret;
-	else
-		return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp);
+
+	if (entry->len == 2) {
+		temp = buffer[0] * 1000;
+		temp += (buffer[1] >> 6) * 250;
+	} else {
+		temp = buffer[0] * 4000;
+	}
+
+	return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp);
 }
 
 static ssize_t applesmc_show_fan_speed(struct device *dev,
@@ -703,21 +761,12 @@
 	unsigned int speed = 0;
 	char newkey[5];
 	u8 buffer[2];
-	struct sensor_device_attribute_2 *sensor_attr =
-						to_sensor_dev_attr_2(attr);
 
-	newkey[0] = fan_speed_keys[sensor_attr->nr][0];
-	newkey[1] = '0' + sensor_attr->index;
-	newkey[2] = fan_speed_keys[sensor_attr->nr][2];
-	newkey[3] = fan_speed_keys[sensor_attr->nr][3];
-	newkey[4] = 0;
-
-	mutex_lock(&applesmc_lock);
+	sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
 
 	ret = applesmc_read_key(newkey, buffer, 2);
 	speed = ((buffer[0] << 8 | buffer[1]) >> 2);
 
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -729,30 +778,19 @@
 					const char *sysfsbuf, size_t count)
 {
 	int ret;
-	u32 speed;
+	unsigned long speed;
 	char newkey[5];
 	u8 buffer[2];
-	struct sensor_device_attribute_2 *sensor_attr =
-						to_sensor_dev_attr_2(attr);
 
-	speed = simple_strtoul(sysfsbuf, NULL, 10);
+	if (strict_strtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
+		return -EINVAL;		/* Bigger than a 14-bit value */
 
-	if (speed > 0x4000) /* Bigger than a 14-bit value */
-		return -EINVAL;
-
-	newkey[0] = fan_speed_keys[sensor_attr->nr][0];
-	newkey[1] = '0' + sensor_attr->index;
-	newkey[2] = fan_speed_keys[sensor_attr->nr][2];
-	newkey[3] = fan_speed_keys[sensor_attr->nr][3];
-	newkey[4] = 0;
-
-	mutex_lock(&applesmc_lock);
+	sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
 
 	buffer[0] = (speed >> 6) & 0xff;
 	buffer[1] = (speed << 2) & 0xff;
 	ret = applesmc_write_key(newkey, buffer, 2);
 
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -760,19 +798,15 @@
 }
 
 static ssize_t applesmc_show_fan_manual(struct device *dev,
-			struct device_attribute *devattr, char *sysfsbuf)
+			struct device_attribute *attr, char *sysfsbuf)
 {
 	int ret;
 	u16 manual = 0;
 	u8 buffer[2];
-	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
-
-	mutex_lock(&applesmc_lock);
 
 	ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
-	manual = ((buffer[0] << 8 | buffer[1]) >> attr->index) & 0x01;
+	manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
 
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -780,18 +814,16 @@
 }
 
 static ssize_t applesmc_store_fan_manual(struct device *dev,
-					 struct device_attribute *devattr,
+					 struct device_attribute *attr,
 					 const char *sysfsbuf, size_t count)
 {
 	int ret;
 	u8 buffer[2];
-	u32 input;
+	unsigned long input;
 	u16 val;
-	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 
-	input = simple_strtoul(sysfsbuf, NULL, 10);
-
-	mutex_lock(&applesmc_lock);
+	if (strict_strtoul(sysfsbuf, 10, &input) < 0)
+		return -EINVAL;
 
 	ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
 	val = (buffer[0] << 8 | buffer[1]);
@@ -799,9 +831,9 @@
 		goto out;
 
 	if (input)
-		val = val | (0x01 << attr->index);
+		val = val | (0x01 << to_index(attr));
 	else
-		val = val & ~(0x01 << attr->index);
+		val = val & ~(0x01 << to_index(attr));
 
 	buffer[0] = (val >> 8) & 0xFF;
 	buffer[1] = val & 0xFF;
@@ -809,7 +841,6 @@
 	ret = applesmc_write_key(FANS_MANUAL, buffer, 2);
 
 out:
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -822,21 +853,12 @@
 	int ret;
 	char newkey[5];
 	u8 buffer[17];
-	struct sensor_device_attribute_2 *sensor_attr =
-						to_sensor_dev_attr_2(attr);
 
-	newkey[0] = FAN_POSITION[0];
-	newkey[1] = '0' + sensor_attr->index;
-	newkey[2] = FAN_POSITION[2];
-	newkey[3] = FAN_POSITION[3];
-	newkey[4] = 0;
-
-	mutex_lock(&applesmc_lock);
+	sprintf(newkey, FAN_ID_FMT, to_index(attr));
 
 	ret = applesmc_read_key(newkey, buffer, 16);
 	buffer[16] = 0;
 
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -852,18 +874,14 @@
 static ssize_t applesmc_calibrate_store(struct device *dev,
 	struct device_attribute *attr, const char *sysfsbuf, size_t count)
 {
-	mutex_lock(&applesmc_lock);
 	applesmc_calibrate();
-	mutex_unlock(&applesmc_lock);
 
 	return count;
 }
 
 static void applesmc_backlight_set(struct work_struct *work)
 {
-	mutex_lock(&applesmc_lock);
 	applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2);
-	mutex_unlock(&applesmc_lock);
 }
 static DECLARE_WORK(backlight_work, &applesmc_backlight_set);
 
@@ -886,13 +904,10 @@
 	u8 buffer[4];
 	u32 count;
 
-	mutex_lock(&applesmc_lock);
-
 	ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
 	count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
 						((u32)buffer[2]<<8) + buffer[3];
 
-	mutex_unlock(&applesmc_lock);
 	if (ret)
 		return ret;
 	else
@@ -902,113 +917,53 @@
 static ssize_t applesmc_key_at_index_read_show(struct device *dev,
 				struct device_attribute *attr, char *sysfsbuf)
 {
-	char key[5];
-	char info[6];
+	const struct applesmc_entry *entry;
 	int ret;
 
-	mutex_lock(&applesmc_lock);
-
-	ret = applesmc_get_key_at_index(key_at_index, key);
-
-	if (ret || !key[0]) {
-		mutex_unlock(&applesmc_lock);
-
-		return -EINVAL;
-	}
-
-	ret = applesmc_get_key_type(key, info);
-
-	if (ret) {
-		mutex_unlock(&applesmc_lock);
-
+	entry = applesmc_get_entry_by_index(key_at_index);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
+	ret = applesmc_read_entry(entry, sysfsbuf, entry->len);
+	if (ret)
 		return ret;
-	}
 
-	/*
-	 * info[0] maximum value (APPLESMC_MAX_DATA_LENGTH) is much lower than
-	 * PAGE_SIZE, so we don't need any checks before writing to sysfsbuf.
-	 */
-	ret = applesmc_read_key(key, sysfsbuf, info[0]);
-
-	mutex_unlock(&applesmc_lock);
-
-	if (!ret) {
-		return info[0];
-	} else {
-		return ret;
-	}
+	return entry->len;
 }
 
 static ssize_t applesmc_key_at_index_data_length_show(struct device *dev,
 				struct device_attribute *attr, char *sysfsbuf)
 {
-	char key[5];
-	char info[6];
-	int ret;
+	const struct applesmc_entry *entry;
 
-	mutex_lock(&applesmc_lock);
+	entry = applesmc_get_entry_by_index(key_at_index);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
 
-	ret = applesmc_get_key_at_index(key_at_index, key);
-
-	if (ret || !key[0]) {
-		mutex_unlock(&applesmc_lock);
-
-		return -EINVAL;
-	}
-
-	ret = applesmc_get_key_type(key, info);
-
-	mutex_unlock(&applesmc_lock);
-
-	if (!ret)
-		return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", info[0]);
-	else
-		return ret;
+	return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", entry->len);
 }
 
 static ssize_t applesmc_key_at_index_type_show(struct device *dev,
 				struct device_attribute *attr, char *sysfsbuf)
 {
-	char key[5];
-	char info[6];
-	int ret;
+	const struct applesmc_entry *entry;
 
-	mutex_lock(&applesmc_lock);
+	entry = applesmc_get_entry_by_index(key_at_index);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
 
-	ret = applesmc_get_key_at_index(key_at_index, key);
-
-	if (ret || !key[0]) {
-		mutex_unlock(&applesmc_lock);
-
-		return -EINVAL;
-	}
-
-	ret = applesmc_get_key_type(key, info);
-
-	mutex_unlock(&applesmc_lock);
-
-	if (!ret)
-		return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", info+1);
-	else
-		return ret;
+	return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->type);
 }
 
 static ssize_t applesmc_key_at_index_name_show(struct device *dev,
 				struct device_attribute *attr, char *sysfsbuf)
 {
-	char key[5];
-	int ret;
+	const struct applesmc_entry *entry;
 
-	mutex_lock(&applesmc_lock);
+	entry = applesmc_get_entry_by_index(key_at_index);
+	if (IS_ERR(entry))
+		return PTR_ERR(entry);
 
-	ret = applesmc_get_key_at_index(key_at_index, key);
-
-	mutex_unlock(&applesmc_lock);
-
-	if (!ret && key[0])
-		return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
-	else
-		return -EINVAL;
+	return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key);
 }
 
 static ssize_t applesmc_key_at_index_show(struct device *dev,
@@ -1020,12 +975,13 @@
 static ssize_t applesmc_key_at_index_store(struct device *dev,
 	struct device_attribute *attr, const char *sysfsbuf, size_t count)
 {
-	mutex_lock(&applesmc_lock);
+	unsigned long newkey;
 
-	key_at_index = simple_strtoul(sysfsbuf, NULL, 10);
+	if (strict_strtoul(sysfsbuf, 10, &newkey) < 0
+	    || newkey >= smcreg.key_count)
+		return -EINVAL;
 
-	mutex_unlock(&applesmc_lock);
-
+	key_at_index = newkey;
 	return count;
 }
 
@@ -1035,387 +991,101 @@
 	.brightness_set		= applesmc_brightness_set,
 };
 
-static DEVICE_ATTR(name, 0444, applesmc_name_show, NULL);
-
-static DEVICE_ATTR(position, 0444, applesmc_position_show, NULL);
-static DEVICE_ATTR(calibrate, 0644,
-			applesmc_calibrate_show, applesmc_calibrate_store);
-
-static struct attribute *accelerometer_attributes[] = {
-	&dev_attr_position.attr,
-	&dev_attr_calibrate.attr,
-	NULL
+static struct applesmc_node_group info_group[] = {
+	{ "name", applesmc_name_show },
+	{ "key_count", applesmc_key_count_show },
+	{ "key_at_index", applesmc_key_at_index_show, applesmc_key_at_index_store },
+	{ "key_at_index_name", applesmc_key_at_index_name_show },
+	{ "key_at_index_type", applesmc_key_at_index_type_show },
+	{ "key_at_index_data_length", applesmc_key_at_index_data_length_show },
+	{ "key_at_index_data", applesmc_key_at_index_read_show },
+	{ }
 };
 
-static const struct attribute_group accelerometer_attributes_group =
-	{ .attrs = accelerometer_attributes };
-
-static DEVICE_ATTR(light, 0444, applesmc_light_show, NULL);
-
-static DEVICE_ATTR(key_count, 0444, applesmc_key_count_show, NULL);
-static DEVICE_ATTR(key_at_index, 0644,
-		applesmc_key_at_index_show, applesmc_key_at_index_store);
-static DEVICE_ATTR(key_at_index_name, 0444,
-					applesmc_key_at_index_name_show, NULL);
-static DEVICE_ATTR(key_at_index_type, 0444,
-					applesmc_key_at_index_type_show, NULL);
-static DEVICE_ATTR(key_at_index_data_length, 0444,
-				applesmc_key_at_index_data_length_show, NULL);
-static DEVICE_ATTR(key_at_index_data, 0444,
-				applesmc_key_at_index_read_show, NULL);
-
-static struct attribute *key_enumeration_attributes[] = {
-	&dev_attr_key_count.attr,
-	&dev_attr_key_at_index.attr,
-	&dev_attr_key_at_index_name.attr,
-	&dev_attr_key_at_index_type.attr,
-	&dev_attr_key_at_index_data_length.attr,
-	&dev_attr_key_at_index_data.attr,
-	NULL
+static struct applesmc_node_group accelerometer_group[] = {
+	{ "position", applesmc_position_show },
+	{ "calibrate", applesmc_calibrate_show, applesmc_calibrate_store },
+	{ }
 };
 
-static const struct attribute_group key_enumeration_group =
-	{ .attrs = key_enumeration_attributes };
-
-/*
- * Macro defining SENSOR_DEVICE_ATTR for a fan sysfs entries.
- *  - show actual speed
- *  - show/store minimum speed
- *  - show maximum speed
- *  - show safe speed
- *  - show/store target speed
- *  - show/store manual mode
- */
-#define sysfs_fan_speeds_offset(offset) \
-static SENSOR_DEVICE_ATTR_2(fan##offset##_input, S_IRUGO, \
-			applesmc_show_fan_speed, NULL, 0, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_min, S_IRUGO | S_IWUSR, \
-	applesmc_show_fan_speed, applesmc_store_fan_speed, 1, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_max, S_IRUGO, \
-			applesmc_show_fan_speed, NULL, 2, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_safe, S_IRUGO, \
-			applesmc_show_fan_speed, NULL, 3, offset-1); \
-\
-static SENSOR_DEVICE_ATTR_2(fan##offset##_output, S_IRUGO | S_IWUSR, \
-	applesmc_show_fan_speed, applesmc_store_fan_speed, 4, offset-1); \
-\
-static SENSOR_DEVICE_ATTR(fan##offset##_manual, S_IRUGO | S_IWUSR, \
-	applesmc_show_fan_manual, applesmc_store_fan_manual, offset-1); \
-\
-static SENSOR_DEVICE_ATTR(fan##offset##_label, S_IRUGO, \
-	applesmc_show_fan_position, NULL, offset-1); \
-\
-static struct attribute *fan##offset##_attributes[] = { \
-	&sensor_dev_attr_fan##offset##_input.dev_attr.attr, \
-	&sensor_dev_attr_fan##offset##_min.dev_attr.attr, \
-	&sensor_dev_attr_fan##offset##_max.dev_attr.attr, \
-	&sensor_dev_attr_fan##offset##_safe.dev_attr.attr, \
-	&sensor_dev_attr_fan##offset##_output.dev_attr.attr, \
-	&sensor_dev_attr_fan##offset##_manual.dev_attr.attr, \
-	&sensor_dev_attr_fan##offset##_label.dev_attr.attr, \
-	NULL \
+static struct applesmc_node_group light_sensor_group[] = {
+	{ "light", applesmc_light_show },
+	{ }
 };
 
-/*
- * Create the needed functions for each fan using the macro defined above
- * (4 fans are supported)
- */
-sysfs_fan_speeds_offset(1);
-sysfs_fan_speeds_offset(2);
-sysfs_fan_speeds_offset(3);
-sysfs_fan_speeds_offset(4);
-
-static const struct attribute_group fan_attribute_groups[] = {
-	{ .attrs = fan1_attributes },
-	{ .attrs = fan2_attributes },
-	{ .attrs = fan3_attributes },
-	{ .attrs = fan4_attributes },
+static struct applesmc_node_group fan_group[] = {
+	{ "fan%d_label", applesmc_show_fan_position },
+	{ "fan%d_input", applesmc_show_fan_speed, NULL, 0 },
+	{ "fan%d_min", applesmc_show_fan_speed, applesmc_store_fan_speed, 1 },
+	{ "fan%d_max", applesmc_show_fan_speed, NULL, 2 },
+	{ "fan%d_safe", applesmc_show_fan_speed, NULL, 3 },
+	{ "fan%d_output", applesmc_show_fan_speed, applesmc_store_fan_speed, 4 },
+	{ "fan%d_manual", applesmc_show_fan_manual, applesmc_store_fan_manual },
+	{ }
 };
 
-/*
- * Temperature sensors sysfs entries.
- */
-static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 15);
-static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 16);
-static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 17);
-static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 18);
-static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 19);
-static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 20);
-static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 21);
-static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 22);
-static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 23);
-static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 24);
-static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 25);
-static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 26);
-static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 27);
-static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 28);
-static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 29);
-static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 30);
-static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 31);
-static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 32);
-static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 33);
-static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 34);
-static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 35);
-static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 36);
-static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 37);
-static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 38);
-static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO,
-					applesmc_show_sensor_label, NULL, 39);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp9_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp10_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp11_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp12_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp13_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp14_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp15_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp16_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 15);
-static SENSOR_DEVICE_ATTR(temp17_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 16);
-static SENSOR_DEVICE_ATTR(temp18_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 17);
-static SENSOR_DEVICE_ATTR(temp19_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 18);
-static SENSOR_DEVICE_ATTR(temp20_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 19);
-static SENSOR_DEVICE_ATTR(temp21_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 20);
-static SENSOR_DEVICE_ATTR(temp22_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 21);
-static SENSOR_DEVICE_ATTR(temp23_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 22);
-static SENSOR_DEVICE_ATTR(temp24_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 23);
-static SENSOR_DEVICE_ATTR(temp25_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 24);
-static SENSOR_DEVICE_ATTR(temp26_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 25);
-static SENSOR_DEVICE_ATTR(temp27_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 26);
-static SENSOR_DEVICE_ATTR(temp28_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 27);
-static SENSOR_DEVICE_ATTR(temp29_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 28);
-static SENSOR_DEVICE_ATTR(temp30_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 29);
-static SENSOR_DEVICE_ATTR(temp31_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 30);
-static SENSOR_DEVICE_ATTR(temp32_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 31);
-static SENSOR_DEVICE_ATTR(temp33_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 32);
-static SENSOR_DEVICE_ATTR(temp34_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 33);
-static SENSOR_DEVICE_ATTR(temp35_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 34);
-static SENSOR_DEVICE_ATTR(temp36_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 35);
-static SENSOR_DEVICE_ATTR(temp37_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 36);
-static SENSOR_DEVICE_ATTR(temp38_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 37);
-static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 38);
-static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
-					applesmc_show_temperature, NULL, 39);
-
-static struct attribute *label_attributes[] = {
-	&sensor_dev_attr_temp1_label.dev_attr.attr,
-	&sensor_dev_attr_temp2_label.dev_attr.attr,
-	&sensor_dev_attr_temp3_label.dev_attr.attr,
-	&sensor_dev_attr_temp4_label.dev_attr.attr,
-	&sensor_dev_attr_temp5_label.dev_attr.attr,
-	&sensor_dev_attr_temp6_label.dev_attr.attr,
-	&sensor_dev_attr_temp7_label.dev_attr.attr,
-	&sensor_dev_attr_temp8_label.dev_attr.attr,
-	&sensor_dev_attr_temp9_label.dev_attr.attr,
-	&sensor_dev_attr_temp10_label.dev_attr.attr,
-	&sensor_dev_attr_temp11_label.dev_attr.attr,
-	&sensor_dev_attr_temp12_label.dev_attr.attr,
-	&sensor_dev_attr_temp13_label.dev_attr.attr,
-	&sensor_dev_attr_temp14_label.dev_attr.attr,
-	&sensor_dev_attr_temp15_label.dev_attr.attr,
-	&sensor_dev_attr_temp16_label.dev_attr.attr,
-	&sensor_dev_attr_temp17_label.dev_attr.attr,
-	&sensor_dev_attr_temp18_label.dev_attr.attr,
-	&sensor_dev_attr_temp19_label.dev_attr.attr,
-	&sensor_dev_attr_temp20_label.dev_attr.attr,
-	&sensor_dev_attr_temp21_label.dev_attr.attr,
-	&sensor_dev_attr_temp22_label.dev_attr.attr,
-	&sensor_dev_attr_temp23_label.dev_attr.attr,
-	&sensor_dev_attr_temp24_label.dev_attr.attr,
-	&sensor_dev_attr_temp25_label.dev_attr.attr,
-	&sensor_dev_attr_temp26_label.dev_attr.attr,
-	&sensor_dev_attr_temp27_label.dev_attr.attr,
-	&sensor_dev_attr_temp28_label.dev_attr.attr,
-	&sensor_dev_attr_temp29_label.dev_attr.attr,
-	&sensor_dev_attr_temp30_label.dev_attr.attr,
-	&sensor_dev_attr_temp31_label.dev_attr.attr,
-	&sensor_dev_attr_temp32_label.dev_attr.attr,
-	&sensor_dev_attr_temp33_label.dev_attr.attr,
-	&sensor_dev_attr_temp34_label.dev_attr.attr,
-	&sensor_dev_attr_temp35_label.dev_attr.attr,
-	&sensor_dev_attr_temp36_label.dev_attr.attr,
-	&sensor_dev_attr_temp37_label.dev_attr.attr,
-	&sensor_dev_attr_temp38_label.dev_attr.attr,
-	&sensor_dev_attr_temp39_label.dev_attr.attr,
-	&sensor_dev_attr_temp40_label.dev_attr.attr,
-	NULL
-};
-
-static struct attribute *temperature_attributes[] = {
-	&sensor_dev_attr_temp1_input.dev_attr.attr,
-	&sensor_dev_attr_temp2_input.dev_attr.attr,
-	&sensor_dev_attr_temp3_input.dev_attr.attr,
-	&sensor_dev_attr_temp4_input.dev_attr.attr,
-	&sensor_dev_attr_temp5_input.dev_attr.attr,
-	&sensor_dev_attr_temp6_input.dev_attr.attr,
-	&sensor_dev_attr_temp7_input.dev_attr.attr,
-	&sensor_dev_attr_temp8_input.dev_attr.attr,
-	&sensor_dev_attr_temp9_input.dev_attr.attr,
-	&sensor_dev_attr_temp10_input.dev_attr.attr,
-	&sensor_dev_attr_temp11_input.dev_attr.attr,
-	&sensor_dev_attr_temp12_input.dev_attr.attr,
-	&sensor_dev_attr_temp13_input.dev_attr.attr,
-	&sensor_dev_attr_temp14_input.dev_attr.attr,
-	&sensor_dev_attr_temp15_input.dev_attr.attr,
-	&sensor_dev_attr_temp16_input.dev_attr.attr,
-	&sensor_dev_attr_temp17_input.dev_attr.attr,
-	&sensor_dev_attr_temp18_input.dev_attr.attr,
-	&sensor_dev_attr_temp19_input.dev_attr.attr,
-	&sensor_dev_attr_temp20_input.dev_attr.attr,
-	&sensor_dev_attr_temp21_input.dev_attr.attr,
-	&sensor_dev_attr_temp22_input.dev_attr.attr,
-	&sensor_dev_attr_temp23_input.dev_attr.attr,
-	&sensor_dev_attr_temp24_input.dev_attr.attr,
-	&sensor_dev_attr_temp25_input.dev_attr.attr,
-	&sensor_dev_attr_temp26_input.dev_attr.attr,
-	&sensor_dev_attr_temp27_input.dev_attr.attr,
-	&sensor_dev_attr_temp28_input.dev_attr.attr,
-	&sensor_dev_attr_temp29_input.dev_attr.attr,
-	&sensor_dev_attr_temp30_input.dev_attr.attr,
-	&sensor_dev_attr_temp31_input.dev_attr.attr,
-	&sensor_dev_attr_temp32_input.dev_attr.attr,
-	&sensor_dev_attr_temp33_input.dev_attr.attr,
-	&sensor_dev_attr_temp34_input.dev_attr.attr,
-	&sensor_dev_attr_temp35_input.dev_attr.attr,
-	&sensor_dev_attr_temp36_input.dev_attr.attr,
-	&sensor_dev_attr_temp37_input.dev_attr.attr,
-	&sensor_dev_attr_temp38_input.dev_attr.attr,
-	&sensor_dev_attr_temp39_input.dev_attr.attr,
-	&sensor_dev_attr_temp40_input.dev_attr.attr,
-	NULL
-};
-
-static const struct attribute_group temperature_attributes_group =
-	{ .attrs = temperature_attributes };
-
-static const struct attribute_group label_attributes_group = {
-	.attrs = label_attributes
+static struct applesmc_node_group temp_group[] = {
+	{ "temp%d_label", applesmc_show_sensor_label },
+	{ "temp%d_input", applesmc_show_temperature },
+	{ }
 };
 
 /* Module stuff */
 
 /*
- * applesmc_dmi_match - found a match.  return one, short-circuiting the hunt.
+ * applesmc_destroy_nodes - remove files and free associated memory
  */
-static int applesmc_dmi_match(const struct dmi_system_id *id)
+static void applesmc_destroy_nodes(struct applesmc_node_group *groups)
 {
-	int i = 0;
-	struct dmi_match_data* dmi_data = id->driver_data;
-	printk(KERN_INFO "applesmc: %s detected:\n", id->ident);
-	applesmc_accelerometer = dmi_data->accelerometer;
-	printk(KERN_INFO "applesmc:  - Model %s accelerometer\n",
-				applesmc_accelerometer ? "with" : "without");
-	applesmc_light = dmi_data->light;
-	printk(KERN_INFO "applesmc:  - Model %s light sensors and backlight\n",
-					applesmc_light ? "with" : "without");
+	struct applesmc_node_group *grp;
+	struct applesmc_dev_attr *node;
 
-	applesmc_temperature_set =  dmi_data->temperature_set;
-	while (temperature_sensors_sets[applesmc_temperature_set][i] != NULL)
-		i++;
-	printk(KERN_INFO "applesmc:  - Model with %d temperature sensors\n", i);
-	return 1;
+	for (grp = groups; grp->nodes; grp++) {
+		for (node = grp->nodes; node->sda.dev_attr.attr.name; node++)
+			sysfs_remove_file(&pdev->dev.kobj,
+					  &node->sda.dev_attr.attr);
+		kfree(grp->nodes);
+		grp->nodes = NULL;
+	}
+}
+
+/*
+ * applesmc_create_nodes - create a two-dimensional group of sysfs files
+ */
+static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
+{
+	struct applesmc_node_group *grp;
+	struct applesmc_dev_attr *node;
+	struct attribute *attr;
+	int ret, i;
+
+	for (grp = groups; grp->format; grp++) {
+		grp->nodes = kcalloc(num + 1, sizeof(*node), GFP_KERNEL);
+		if (!grp->nodes) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < num; i++) {
+			node = &grp->nodes[i];
+			sprintf(node->name, grp->format, i + 1);
+			node->sda.index = (grp->option << 16) | (i & 0xffff);
+			node->sda.dev_attr.show = grp->show;
+			node->sda.dev_attr.store = grp->store;
+			attr = &node->sda.dev_attr.attr;
+			attr->name = node->name;
+			attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0);
+			ret = sysfs_create_file(&pdev->dev.kobj, attr);
+			if (ret) {
+				attr->name = NULL;
+				goto out;
+			}
+		}
+	}
+
+	return 0;
+out:
+	applesmc_destroy_nodes(groups);
+	return ret;
 }
 
 /* Create accelerometer ressources */
@@ -1424,8 +1094,10 @@
 	struct input_dev *idev;
 	int ret;
 
-	ret = sysfs_create_group(&pdev->dev.kobj,
-					&accelerometer_attributes_group);
+	if (!smcreg.has_accelerometer)
+		return 0;
+
+	ret = applesmc_create_nodes(accelerometer_group, 1);
 	if (ret)
 		goto out;
 
@@ -1462,184 +1134,96 @@
 	input_free_polled_device(applesmc_idev);
 
 out_sysfs:
-	sysfs_remove_group(&pdev->dev.kobj, &accelerometer_attributes_group);
+	applesmc_destroy_nodes(accelerometer_group);
 
 out:
-	printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret);
+	pr_warn("driver init failed (ret=%d)!\n", ret);
 	return ret;
 }
 
 /* Release all ressources used by the accelerometer */
 static void applesmc_release_accelerometer(void)
 {
+	if (!smcreg.has_accelerometer)
+		return;
 	input_unregister_polled_device(applesmc_idev);
 	input_free_polled_device(applesmc_idev);
-	sysfs_remove_group(&pdev->dev.kobj, &accelerometer_attributes_group);
+	applesmc_destroy_nodes(accelerometer_group);
 }
 
-static __initdata struct dmi_match_data applesmc_dmi_data[] = {
-/* MacBook Pro: accelerometer, backlight and temperature set 0 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 0 },
-/* MacBook2: accelerometer and temperature set 1 */
-	{ .accelerometer = 1, .light = 0, .temperature_set = 1 },
-/* MacBook: accelerometer and temperature set 2 */
-	{ .accelerometer = 1, .light = 0, .temperature_set = 2 },
-/* MacMini: temperature set 3 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 3 },
-/* MacPro: temperature set 4 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 4 },
-/* iMac: temperature set 5 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 5 },
-/* MacBook3, MacBook4: accelerometer and temperature set 6 */
-	{ .accelerometer = 1, .light = 0, .temperature_set = 6 },
-/* MacBook Air: accelerometer, backlight and temperature set 7 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 7 },
-/* MacBook Pro 4: accelerometer, backlight and temperature set 8 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 8 },
-/* MacBook Pro 3: accelerometer, backlight and temperature set 9 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 9 },
-/* iMac 5: light sensor only, temperature set 10 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 10 },
-/* MacBook 5: accelerometer, backlight and temperature set 11 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 11 },
-/* MacBook Pro 5: accelerometer, backlight and temperature set 12 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 12 },
-/* iMac 8: light sensor only, temperature set 13 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 13 },
-/* iMac 6: light sensor only, temperature set 14 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 14 },
-/* MacBook Air 2,1: accelerometer, backlight and temperature set 15 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 15 },
-/* MacPro3,1: temperature set 16 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 16 },
-/* iMac 9,1: light sensor only, temperature set 17 */
-	{ .accelerometer = 0, .light = 0, .temperature_set = 17 },
-/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 18 },
-/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 19 },
-/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 20 },
-/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 21 },
-/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
-	{ .accelerometer = 1, .light = 1, .temperature_set = 22 },
-};
+static int applesmc_create_light_sensor(void)
+{
+	if (!smcreg.num_light_sensors)
+		return 0;
+	return applesmc_create_nodes(light_sensor_group, 1);
+}
+
+static void applesmc_release_light_sensor(void)
+{
+	if (!smcreg.num_light_sensors)
+		return;
+	applesmc_destroy_nodes(light_sensor_group);
+}
+
+static int applesmc_create_key_backlight(void)
+{
+	if (!smcreg.has_key_backlight)
+		return 0;
+	applesmc_led_wq = create_singlethread_workqueue("applesmc-led");
+	if (!applesmc_led_wq)
+		return -ENOMEM;
+	return led_classdev_register(&pdev->dev, &applesmc_backlight);
+}
+
+static void applesmc_release_key_backlight(void)
+{
+	if (!smcreg.has_key_backlight)
+		return;
+	led_classdev_unregister(&applesmc_backlight);
+	destroy_workqueue(applesmc_led_wq);
+}
+
+static int applesmc_dmi_match(const struct dmi_system_id *id)
+{
+	return 1;
+}
 
 /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
  * So we need to put "Apple MacBook Pro" before "Apple MacBook". */
 static __initdata struct dmi_system_id applesmc_whitelist[] = {
-	{ applesmc_dmi_match, "Apple MacBook Air 2", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2") },
-		&applesmc_dmi_data[15]},
 	{ applesmc_dmi_match, "Apple MacBook Air", {
 	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
-		&applesmc_dmi_data[7]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 7", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
-		&applesmc_dmi_data[22]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 5,4", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
-		&applesmc_dmi_data[20]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 5,3", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
-		&applesmc_dmi_data[19]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 6", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
-		&applesmc_dmi_data[21]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 5", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
-		&applesmc_dmi_data[12]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 4", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4") },
-		&applesmc_dmi_data[8]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 3", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3") },
-		&applesmc_dmi_data[9]},
-	{ applesmc_dmi_match, "Apple MacBook Pro 2,2", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple Computer, Inc."),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2") },
-		&applesmc_dmi_data[18]},
+	},
 	{ applesmc_dmi_match, "Apple MacBook Pro", {
-	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
-		&applesmc_dmi_data[0]},
-	{ applesmc_dmi_match, "Apple MacBook (v2)", {
-	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") },
-		&applesmc_dmi_data[1]},
-	{ applesmc_dmi_match, "Apple MacBook (v3)", {
-	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
-		&applesmc_dmi_data[6]},
-	{ applesmc_dmi_match, "Apple MacBook 4", {
 	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4") },
-		&applesmc_dmi_data[6]},
-	{ applesmc_dmi_match, "Apple MacBook 5", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5") },
-		&applesmc_dmi_data[11]},
+	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro") },
+	},
 	{ applesmc_dmi_match, "Apple MacBook", {
-	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
-		&applesmc_dmi_data[2]},
-	{ applesmc_dmi_match, "Apple Macmini", {
-	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME,"Macmini") },
-		&applesmc_dmi_data[3]},
-	{ applesmc_dmi_match, "Apple MacPro2", {
-	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
-		&applesmc_dmi_data[4]},
-	{ applesmc_dmi_match, "Apple MacPro3", {
 	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacPro3") },
-		&applesmc_dmi_data[16]},
+	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
+	},
+	{ applesmc_dmi_match, "Apple Macmini", {
+	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+	  DMI_MATCH(DMI_PRODUCT_NAME, "Macmini") },
+	},
 	{ applesmc_dmi_match, "Apple MacPro", {
 	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
 	  DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
-		&applesmc_dmi_data[4]},
-	{ applesmc_dmi_match, "Apple iMac 9,1", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1") },
-		&applesmc_dmi_data[17]},
-	{ applesmc_dmi_match, "Apple iMac 8", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
-		&applesmc_dmi_data[13]},
-	{ applesmc_dmi_match, "Apple iMac 6", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "iMac6") },
-		&applesmc_dmi_data[14]},
-	{ applesmc_dmi_match, "Apple iMac 5", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },
-		&applesmc_dmi_data[10]},
+	},
 	{ applesmc_dmi_match, "Apple iMac", {
-	  DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
-		&applesmc_dmi_data[5]},
+	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+	  DMI_MATCH(DMI_PRODUCT_NAME, "iMac") },
+	},
 	{ .ident = NULL }
 };
 
 static int __init applesmc_init(void)
 {
 	int ret;
-	int count;
-	int i;
 
 	if (!dmi_check_system(applesmc_whitelist)) {
-		printk(KERN_WARNING "applesmc: supported laptop not found!\n");
+		pr_warn("supported laptop not found!\n");
 		ret = -ENODEV;
 		goto out;
 	}
@@ -1661,83 +1245,34 @@
 		goto out_driver;
 	}
 
-	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_name.attr);
+	/* create register cache */
+	ret = applesmc_init_smcreg();
 	if (ret)
 		goto out_device;
 
-	/* Create key enumeration sysfs files */
-	ret = sysfs_create_group(&pdev->dev.kobj, &key_enumeration_group);
+	ret = applesmc_create_nodes(info_group, 1);
 	if (ret)
-		goto out_name;
+		goto out_smcreg;
 
-	/* create fan files */
-	count = applesmc_get_fan_count();
-	if (count < 0)
-		printk(KERN_ERR "applesmc: Cannot get the number of fans.\n");
-	else
-		printk(KERN_INFO "applesmc: %d fans found.\n", count);
+	ret = applesmc_create_nodes(fan_group, smcreg.fan_count);
+	if (ret)
+		goto out_info;
 
-	if (count > 4) {
-		count = 4;
-		printk(KERN_WARNING "applesmc: More than 4 fans found,"
-		       " but at most 4 fans are supported"
-		       " by the driver.\n");
-	}
+	ret = applesmc_create_nodes(temp_group, smcreg.temp_count);
+	if (ret)
+		goto out_fans;
 
-	while (fans_handled < count) {
-		ret = sysfs_create_group(&pdev->dev.kobj,
-					 &fan_attribute_groups[fans_handled]);
-		if (ret)
-			goto out_fans;
-		fans_handled++;
-	}
+	ret = applesmc_create_accelerometer();
+	if (ret)
+		goto out_temperature;
 
-	for (i = 0;
-	     temperature_sensors_sets[applesmc_temperature_set][i] != NULL;
-	     i++) {
-		if (temperature_attributes[i] == NULL ||
-		    label_attributes[i] == NULL) {
-			printk(KERN_ERR "applesmc: More temperature sensors "
-				"in temperature_sensors_sets (at least %i)"
-				"than available sysfs files in "
-				"temperature_attributes (%i), please report "
-				"this bug.\n", i, i-1);
-			goto out_temperature;
-		}
-		ret = sysfs_create_file(&pdev->dev.kobj,
-						temperature_attributes[i]);
-		if (ret)
-			goto out_temperature;
-		ret = sysfs_create_file(&pdev->dev.kobj,
-						label_attributes[i]);
-		if (ret)
-			goto out_temperature;
-	}
+	ret = applesmc_create_light_sensor();
+	if (ret)
+		goto out_accelerometer;
 
-	if (applesmc_accelerometer) {
-		ret = applesmc_create_accelerometer();
-		if (ret)
-			goto out_temperature;
-	}
-
-	if (applesmc_light) {
-		/* Add light sensor file */
-		ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_light.attr);
-		if (ret)
-			goto out_accelerometer;
-
-		/* Create the workqueue */
-		applesmc_led_wq = create_singlethread_workqueue("applesmc-led");
-		if (!applesmc_led_wq) {
-			ret = -ENOMEM;
-			goto out_light_sysfs;
-		}
-
-		/* register as a led device */
-		ret = led_classdev_register(&pdev->dev, &applesmc_backlight);
-		if (ret < 0)
-			goto out_light_wq;
-	}
+	ret = applesmc_create_key_backlight();
+	if (ret)
+		goto out_light_sysfs;
 
 	hwmon_dev = hwmon_device_register(&pdev->dev);
 	if (IS_ERR(hwmon_dev)) {
@@ -1745,32 +1280,22 @@
 		goto out_light_ledclass;
 	}
 
-	printk(KERN_INFO "applesmc: driver successfully loaded.\n");
-
 	return 0;
 
 out_light_ledclass:
-	if (applesmc_light)
-		led_classdev_unregister(&applesmc_backlight);
-out_light_wq:
-	if (applesmc_light)
-		destroy_workqueue(applesmc_led_wq);
+	applesmc_release_key_backlight();
 out_light_sysfs:
-	if (applesmc_light)
-		sysfs_remove_file(&pdev->dev.kobj, &dev_attr_light.attr);
+	applesmc_release_light_sensor();
 out_accelerometer:
-	if (applesmc_accelerometer)
-		applesmc_release_accelerometer();
+	applesmc_release_accelerometer();
 out_temperature:
-	sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
-	sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
+	applesmc_destroy_nodes(temp_group);
 out_fans:
-	while (fans_handled)
-		sysfs_remove_group(&pdev->dev.kobj,
-				   &fan_attribute_groups[--fans_handled]);
-	sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
-out_name:
-	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
+	applesmc_destroy_nodes(fan_group);
+out_info:
+	applesmc_destroy_nodes(info_group);
+out_smcreg:
+	applesmc_destroy_smcreg();
 out_device:
 	platform_device_unregister(pdev);
 out_driver:
@@ -1778,32 +1303,23 @@
 out_region:
 	release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS);
 out:
-	printk(KERN_WARNING "applesmc: driver init failed (ret=%d)!\n", ret);
+	pr_warn("driver init failed (ret=%d)!\n", ret);
 	return ret;
 }
 
 static void __exit applesmc_exit(void)
 {
 	hwmon_device_unregister(hwmon_dev);
-	if (applesmc_light) {
-		led_classdev_unregister(&applesmc_backlight);
-		destroy_workqueue(applesmc_led_wq);
-		sysfs_remove_file(&pdev->dev.kobj, &dev_attr_light.attr);
-	}
-	if (applesmc_accelerometer)
-		applesmc_release_accelerometer();
-	sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
-	sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
-	while (fans_handled)
-		sysfs_remove_group(&pdev->dev.kobj,
-				   &fan_attribute_groups[--fans_handled]);
-	sysfs_remove_group(&pdev->dev.kobj, &key_enumeration_group);
-	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_name.attr);
+	applesmc_release_key_backlight();
+	applesmc_release_light_sensor();
+	applesmc_release_accelerometer();
+	applesmc_destroy_nodes(temp_group);
+	applesmc_destroy_nodes(fan_group);
+	applesmc_destroy_nodes(info_group);
+	applesmc_destroy_smcreg();
 	platform_device_unregister(pdev);
 	platform_driver_unregister(&applesmc_driver);
 	release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS);
-
-	printk(KERN_INFO "applesmc: driver unloaded.\n");
 }
 
 module_init(applesmc_init);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 7dada55..c02a052 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -36,6 +36,8 @@
     asb100	7	3	1	4	0x31	0x0694	yes	no
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -701,8 +703,7 @@
 	int val1, val2;
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
-		pr_debug("asb100.o: detect failed, "
-				"smbus byte data not supported!\n");
+		pr_debug("detect failed, smbus byte data not supported!\n");
 		return -ENODEV;
 	}
 
@@ -715,7 +716,7 @@
 			(((!(val1 & 0x80)) && (val2 != 0x94)) ||
 			/* Check for ASB100 ID (high byte ) */
 			((val1 & 0x80) && (val2 != 0x06)))) {
-		pr_debug("asb100: detect failed, bad chip id 0x%02x!\n", val2);
+		pr_debug("detect failed, bad chip id 0x%02x!\n", val2);
 		return -ENODEV;
 	}
 
@@ -744,7 +745,7 @@
 
 	data = kzalloc(sizeof(struct asb100_data), GFP_KERNEL);
 	if (!data) {
-		pr_debug("asb100.o: probe failed, kzalloc failed!\n");
+		pr_debug("probe failed, kzalloc failed!\n");
 		err = -ENOMEM;
 		goto ERROR0;
 	}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 23b8555..2d68cf3 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -5,6 +5,8 @@
  * See COPYING in the top level directory of the kernel tree.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
 #include <linux/hwmon.h>
@@ -1414,14 +1416,13 @@
 
 	/* Make sure it's safe to access the device through ACPI */
 	if (!acpi_resources_are_enforced()) {
-		pr_err("atk: Resources not safely usable due to "
-		       "acpi_enforce_resources kernel parameter\n");
+		pr_err("Resources not safely usable due to acpi_enforce_resources kernel parameter\n");
 		return -EBUSY;
 	}
 
 	ret = acpi_bus_register_driver(&atk_driver);
 	if (ret)
-		pr_info("atk: acpi_bus_register_driver failed: %d\n", ret);
+		pr_info("acpi_bus_register_driver failed: %d\n", ret);
 
 	return ret;
 }
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 42de98d..194ca0a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -20,6 +20,8 @@
  * 02110-1301 USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -445,8 +447,8 @@
 	 * without thermal sensors will be filtered out.
 	 */
 	if (!cpu_has(c, X86_FEATURE_DTS)) {
-		printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
-		       " has no thermal sensor.\n", c->x86_model);
+		pr_info("CPU (model=0x%x) has no thermal sensor\n",
+			c->x86_model);
 		return 0;
 	}
 
@@ -466,7 +468,7 @@
 	pdev = platform_device_alloc(DRVNAME, cpu);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
@@ -478,8 +480,7 @@
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_free;
 	}
 
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 980c17d..d9c5927 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -25,6 +25,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -75,12 +77,14 @@
  * in4   +12V
  * in5   VTR   (+3.3V stby)
  * in6   Vbat
+ * in7   Vtrip (sch5127 only)
  *
  * --------------------------------------------------------------------- */
 
-/* Voltages (in) numbered 0-6 (ix) */
-#define	DME1737_REG_IN(ix)		((ix) < 5 ? 0x20 + (ix) \
-						  : 0x94 + (ix))
+/* Voltages (in) numbered 0-7 (ix) */
+#define	DME1737_REG_IN(ix)		((ix) < 5 ? 0x20 + (ix) : \
+					 (ix) < 7 ? 0x94 + (ix) : \
+						    0x1f)
 #define	DME1737_REG_IN_MIN(ix)		((ix) < 5 ? 0x44 + (ix) * 2 \
 						  : 0x91 + (ix) * 2)
 #define	DME1737_REG_IN_MAX(ix)		((ix) < 5 ? 0x45 + (ix) * 2 \
@@ -99,10 +103,11 @@
  *    IN_TEMP_LSB(1) = [temp3, temp1]
  *    IN_TEMP_LSB(2) = [in4, temp2]
  *    IN_TEMP_LSB(3) = [in3, in0]
- *    IN_TEMP_LSB(4) = [in2, in1] */
+ *    IN_TEMP_LSB(4) = [in2, in1]
+ *    IN_TEMP_LSB(5) = [res, in7] */
 #define DME1737_REG_IN_TEMP_LSB(ix)	(0x84 + (ix))
-static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0};
-static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4};
+static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0, 5};
+static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4, 4};
 static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1};
 static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0};
 
@@ -143,7 +148,7 @@
 #define DME1737_REG_ALARM1		0x41
 #define DME1737_REG_ALARM2		0x42
 #define DME1737_REG_ALARM3		0x83
-static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17};
+static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17, 18};
 static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6};
 static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
 
@@ -188,6 +193,7 @@
 #define HAS_PWM_MIN		(1 << 4)		/* bit 4 */
 #define HAS_FAN(ix)		(1 << ((ix) + 5))	/* bits 5-10 */
 #define HAS_PWM(ix)		(1 << ((ix) + 11))	/* bits 11-16 */
+#define HAS_IN7			(1 << 17)		/* bit 17 */
 
 /* ---------------------------------------------------------------------
  * Data structures and manipulation thereof
@@ -211,9 +217,9 @@
 	u32 has_features;
 
 	/* Register values */
-	u16 in[7];
-	u8  in_min[7];
-	u8  in_max[7];
+	u16 in[8];
+	u8  in_min[8];
+	u8  in_max[8];
 	s16 temp[3];
 	s8  temp_min[3];
 	s8  temp_max[3];
@@ -245,7 +251,7 @@
 static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
 					 3300};
 static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
-					 3300};
+					 3300, 1500};
 #define IN_NOMINAL(type)	((type) == sch311x ? IN_NOMINAL_SCH311x : \
 				 (type) == sch5027 ? IN_NOMINAL_SCH5027 : \
 				 (type) == sch5127 ? IN_NOMINAL_SCH5127 : \
@@ -578,7 +584,7 @@
 {
 	struct dme1737_data *data = dev_get_drvdata(dev);
 	int ix;
-	u8 lsb[5];
+	u8 lsb[6];
 
 	mutex_lock(&data->update_lock);
 
@@ -601,6 +607,9 @@
 			/* Voltage inputs are stored as 16 bit values even
 			 * though they have only 12 bits resolution. This is
 			 * to make it consistent with the temp inputs. */
+			if (ix == 7 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			data->in[ix] = dme1737_read(data,
 					DME1737_REG_IN(ix)) << 8;
 			data->in_min[ix] = dme1737_read(data,
@@ -633,10 +642,16 @@
 		 * which the registers are read (MSB first, then LSB) is
 		 * important! */
 		for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) {
+			if (ix == 5 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			lsb[ix] = dme1737_read(data,
 					DME1737_REG_IN_TEMP_LSB(ix));
 		}
 		for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+			if (ix == 7 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] <<
 					DME1737_REG_IN_LSB_SHL[ix]) & 0xf0;
 		}
@@ -760,7 +775,7 @@
 
 /* ---------------------------------------------------------------------
  * Voltage sysfs attributes
- * ix = [0-5]
+ * ix = [0-7]
  * --------------------------------------------------------------------- */
 
 #define SYS_IN_INPUT	0
@@ -1437,7 +1452,7 @@
  * Sysfs device attribute defines and structs
  * --------------------------------------------------------------------- */
 
-/* Voltages 0-6 */
+/* Voltages 0-7 */
 
 #define SENSOR_DEVICE_ATTR_IN(ix) \
 static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \
@@ -1456,6 +1471,7 @@
 SENSOR_DEVICE_ATTR_IN(4);
 SENSOR_DEVICE_ATTR_IN(5);
 SENSOR_DEVICE_ATTR_IN(6);
+SENSOR_DEVICE_ATTR_IN(7);
 
 /* Temperatures 1-3 */
 
@@ -1574,7 +1590,7 @@
  * created unconditionally. The attributes that need modification of their
  * permissions are created read-only and write permissions are added or removed
  * on the fly when required */
-static struct attribute *dme1737_attr[] ={
+static struct attribute *dme1737_attr[] = {
 	/* Voltages */
 	&sensor_dev_attr_in0_input.dev_attr.attr,
 	&sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1679,7 +1695,7 @@
 };
 
 
-/* The following struct holds temp zone hysteresis  related attributes, which
+/* The following struct holds temp zone hysteresis related attributes, which
  * are not available in all chips. The following chips support them:
  * DME1737, SCH311x */
 static struct attribute *dme1737_zone_hyst_attr[] = {
@@ -1693,6 +1709,21 @@
 	.attrs = dme1737_zone_hyst_attr,
 };
 
+/* The following struct holds voltage in7 related attributes, which
+ * are not available in all chips. The following chips support them:
+ * SCH5127 */
+static struct attribute *dme1737_in7_attr[] = {
+	&sensor_dev_attr_in7_input.dev_attr.attr,
+	&sensor_dev_attr_in7_min.dev_attr.attr,
+	&sensor_dev_attr_in7_max.dev_attr.attr,
+	&sensor_dev_attr_in7_alarm.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group dme1737_in7_group = {
+	.attrs = dme1737_in7_attr,
+};
+
 /* The following structs hold the PWM attributes, some of which are optional.
  * Their creation depends on the chip configuration which is determined during
  * module load. */
@@ -1984,6 +2015,9 @@
 	if (data->has_features & HAS_ZONE_HYST) {
 		sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
 	}
+	if (data->has_features & HAS_IN7) {
+		sysfs_remove_group(&dev->kobj, &dme1737_in7_group);
+	}
 	sysfs_remove_group(&dev->kobj, &dme1737_group);
 
 	if (!data->client) {
@@ -1997,43 +2031,58 @@
 	int err, ix;
 
 	/* Create a name attribute for ISA devices */
-	if (!data->client &&
-	    (err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr))) {
-		goto exit;
+	if (!data->client) {
+		err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr);
+		if (err) {
+			goto exit;
+		}
 	}
 
 	/* Create standard sysfs attributes */
-	if ((err = sysfs_create_group(&dev->kobj, &dme1737_group))) {
+	err = sysfs_create_group(&dev->kobj, &dme1737_group);
+	if (err) {
 		goto exit_remove;
 	}
 
 	/* Create chip-dependent sysfs attributes */
-	if ((data->has_features & HAS_TEMP_OFFSET) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_temp_offset_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_TEMP_OFFSET) {
+		err = sysfs_create_group(&dev->kobj,
+					 &dme1737_temp_offset_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_VID) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_vid_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_VID) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_vid_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_ZONE3) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_zone3_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_ZONE3) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_zone3_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_ZONE_HYST) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_zone_hyst_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_ZONE_HYST) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_zone_hyst_group);
+		if (err) {
+			goto exit_remove;
+		}
+	}
+	if (data->has_features & HAS_IN7) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_in7_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
 
 	/* Create fan sysfs attributes */
 	for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
 		if (data->has_features & HAS_FAN(ix)) {
-			if ((err = sysfs_create_group(&dev->kobj,
-						&dme1737_fan_group[ix]))) {
+			err = sysfs_create_group(&dev->kobj,
+						 &dme1737_fan_group[ix]);
+			if (err) {
 				goto exit_remove;
 			}
 		}
@@ -2042,14 +2091,17 @@
 	/* Create PWM sysfs attributes */
 	for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
 		if (data->has_features & HAS_PWM(ix)) {
-			if ((err = sysfs_create_group(&dev->kobj,
-						&dme1737_pwm_group[ix]))) {
+			err = sysfs_create_group(&dev->kobj,
+						 &dme1737_pwm_group[ix]);
+			if (err) {
 				goto exit_remove;
 			}
-			if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
-			    (err = sysfs_create_file(&dev->kobj,
-					dme1737_auto_pwm_min_attr[ix]))) {
-				goto exit_remove;
+			if ((data->has_features & HAS_PWM_MIN) && (ix < 3)) {
+				err = sysfs_create_file(&dev->kobj,
+						dme1737_auto_pwm_min_attr[ix]);
+				if (err) {
+					goto exit_remove;
+				}
 			}
 		}
 	}
@@ -2186,7 +2238,7 @@
 		data->has_features |= HAS_ZONE3;
 		break;
 	case sch5127:
-		data->has_features |= HAS_FAN(2) | HAS_PWM(2);
+		data->has_features |= HAS_FAN(2) | HAS_PWM(2) | HAS_IN7;
 		break;
 	default:
 		break;
@@ -2279,8 +2331,9 @@
 	dme1737_sio_outb(sio_cip, 0x07, 0x0a);
 
 	/* Get the base address of the runtime registers */
-	if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
-		      dme1737_sio_inb(sio_cip, 0x61))) {
+	addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+		dme1737_sio_inb(sio_cip, 0x61);
+	if (!addr) {
 		err = -ENODEV;
 		goto exit;
 	}
@@ -2361,13 +2414,15 @@
 	mutex_init(&data->update_lock);
 
 	/* Initialize the DME1737 chip */
-	if ((err = dme1737_init_device(dev))) {
+	err = dme1737_init_device(dev);
+	if (err) {
 		dev_err(dev, "Failed to initialize device.\n");
 		goto exit_kfree;
 	}
 
 	/* Create sysfs files */
-	if ((err = dme1737_create_files(dev))) {
+	err = dme1737_create_files(dev);
+	if (err) {
 		dev_err(dev, "Failed to create sysfs files.\n");
 		goto exit_kfree;
 	}
@@ -2444,9 +2499,10 @@
 	dme1737_sio_outb(sio_cip, 0x07, 0x0a);
 
 	/* Get the base address of the runtime registers */
-	if (!(base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
-			   dme1737_sio_inb(sio_cip, 0x61))) {
-		printk(KERN_ERR "dme1737: Base address not set.\n");
+	base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+		     dme1737_sio_inb(sio_cip, 0x61);
+	if (!base_addr) {
+		pr_err("Base address not set\n");
 		err = -ENODEV;
 		goto exit;
 	}
@@ -2474,21 +2530,22 @@
 	if (err)
 		goto exit;
 
-	if (!(pdev = platform_device_alloc("dme1737", addr))) {
-		printk(KERN_ERR "dme1737: Failed to allocate device.\n");
+	pdev = platform_device_alloc("dme1737", addr);
+	if (!pdev) {
+		pr_err("Failed to allocate device\n");
 		err = -ENOMEM;
 		goto exit;
 	}
 
-	if ((err = platform_device_add_resources(pdev, &res, 1))) {
-		printk(KERN_ERR "dme1737: Failed to add device resource "
-		       "(err = %d).\n", err);
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
+		pr_err("Failed to add device resource (err = %d)\n", err);
 		goto exit_device_put;
 	}
 
-	if ((err = platform_device_add(pdev))) {
-		printk(KERN_ERR "dme1737: Failed to add device (err = %d).\n",
-		       err);
+	err = platform_device_add(pdev);
+	if (err) {
+		pr_err("Failed to add device (err = %d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -2514,11 +2571,12 @@
 		dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
 			(unsigned short)res->start,
 			(unsigned short)res->start + DME1737_EXTENT - 1);
-                err = -EBUSY;
-                goto exit;
-        }
+		err = -EBUSY;
+		goto exit;
+	}
 
-	if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) {
+	data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL);
+	if (!data) {
 		err = -ENOMEM;
 		goto exit_release_region;
 	}
@@ -2565,13 +2623,15 @@
 		 data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
 
 	/* Initialize the chip */
-	if ((err = dme1737_init_device(dev))) {
+	err = dme1737_init_device(dev);
+	if (err) {
 		dev_err(dev, "Failed to initialize device.\n");
 		goto exit_kfree;
 	}
 
 	/* Create sysfs files */
-	if ((err = dme1737_create_files(dev))) {
+	err = dme1737_create_files(dev);
+	if (err) {
 		dev_err(dev, "Failed to create sysfs files.\n");
 		goto exit_kfree;
 	}
@@ -2628,7 +2688,8 @@
 	int err;
 	unsigned short addr;
 
-	if ((err = i2c_add_driver(&dme1737_i2c_driver))) {
+	err = i2c_add_driver(&dme1737_i2c_driver);
+	if (err) {
 		goto exit;
 	}
 
@@ -2641,12 +2702,14 @@
 		return 0;
 	}
 
-	if ((err = platform_driver_register(&dme1737_isa_driver))) {
+	err = platform_driver_register(&dme1737_isa_driver);
+	if (err) {
 		goto exit_del_i2c_driver;
 	}
 
 	/* Sets global pdev as a side effect */
-	if ((err = dme1737_isa_device_add(addr))) {
+	err = dme1737_isa_device_add(addr);
+	if (err) {
 		goto exit_del_isa_driver;
 	}
 
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
new file mode 100644
index 0000000..257957c
--- /dev/null
+++ b/drivers/hwmon/ds620.c
@@ -0,0 +1,337 @@
+/*
+ *  ds620.c - Support for temperature sensor and thermostat DS620
+ *
+ *  Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
+ *
+ *  based on ds1621.c by Christian W. Zuckschwerdt  <zany@triq.net>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/i2c/ds620.h>
+
+/*
+ * Many DS620 constants specified below
+ *  15   14   13   12   11   10   09    08
+ * |Done|NVB |THF |TLF |R1  |R0  |AUTOC|1SHOT|
+ *
+ *  07   06   05   04   03   02   01    00
+ * |PO2 |PO1 |A2  |A1  |A0  |    |     |     |
+ */
+#define DS620_REG_CONFIG_DONE		0x8000
+#define DS620_REG_CONFIG_NVB		0x4000
+#define DS620_REG_CONFIG_THF		0x2000
+#define DS620_REG_CONFIG_TLF		0x1000
+#define DS620_REG_CONFIG_R1		0x0800
+#define DS620_REG_CONFIG_R0		0x0400
+#define DS620_REG_CONFIG_AUTOC		0x0200
+#define DS620_REG_CONFIG_1SHOT		0x0100
+#define DS620_REG_CONFIG_PO2		0x0080
+#define DS620_REG_CONFIG_PO1		0x0040
+#define DS620_REG_CONFIG_A2		0x0020
+#define DS620_REG_CONFIG_A1		0x0010
+#define DS620_REG_CONFIG_A0		0x0008
+
+/* The DS620 registers */
+static const u8 DS620_REG_TEMP[3] = {
+	0xAA,			/* input, word, RO */
+	0xA2,			/* min, word, RW */
+	0xA0,			/* max, word, RW */
+};
+
+#define DS620_REG_CONF		0xAC	/* word, RW */
+#define DS620_COM_START		0x51	/* no data */
+#define DS620_COM_STOP		0x22	/* no data */
+
+/* Each client has this additional data */
+struct ds620_data {
+	struct device *hwmon_dev;
+	struct mutex update_lock;
+	char valid;		/* !=0 if following fields are valid */
+	unsigned long last_updated;	/* In jiffies */
+
+	u16 temp[3];		/* Register values, word */
+};
+
+/*
+ *  Temperature registers are word-sized.
+ *  DS620 uses a high-byte first convention, which is exactly opposite to
+ *  the SMBus standard.
+ */
+static int ds620_read_temp(struct i2c_client *client, u8 reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_word_data(client, reg);
+	if (ret < 0)
+		return ret;
+	return swab16(ret);
+}
+
+static int ds620_write_temp(struct i2c_client *client, u8 reg, u16 value)
+{
+	return i2c_smbus_write_word_data(client, reg, swab16(value));
+}
+
+static void ds620_init_client(struct i2c_client *client)
+{
+	struct ds620_platform_data *ds620_info = client->dev.platform_data;
+	u16 conf, new_conf;
+
+	new_conf = conf =
+	    swab16(i2c_smbus_read_word_data(client, DS620_REG_CONF));
+
+	/* switch to continuous conversion mode */
+	new_conf &= ~DS620_REG_CONFIG_1SHOT;
+	/* already high at power-on, but don't trust the BIOS! */
+	new_conf |= DS620_REG_CONFIG_PO2;
+	/* thermostat mode according to platform data */
+	if (ds620_info && ds620_info->pomode == 1)
+		new_conf &= ~DS620_REG_CONFIG_PO1; /* PO_LOW */
+	else if (ds620_info && ds620_info->pomode == 2)
+		new_conf |= DS620_REG_CONFIG_PO1; /* PO_HIGH */
+	else
+		new_conf &= ~DS620_REG_CONFIG_PO2; /* always low */
+	/* with highest precision */
+	new_conf |= DS620_REG_CONFIG_R1 | DS620_REG_CONFIG_R0;
+
+	if (conf != new_conf)
+		i2c_smbus_write_word_data(client, DS620_REG_CONF,
+					  swab16(new_conf));
+
+	/* start conversion */
+	i2c_smbus_write_byte(client, DS620_COM_START);
+}
+
+static struct ds620_data *ds620_update_client(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ds620_data *data = i2c_get_clientdata(client);
+	struct ds620_data *ret = data;
+
+	mutex_lock(&data->update_lock);
+
+	if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+	    || !data->valid) {
+		int i;
+		int res;
+
+		dev_dbg(&client->dev, "Starting ds620 update\n");
+
+		for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
+			res = ds620_read_temp(client,
+					      DS620_REG_TEMP[i]);
+			if (res < 0) {
+				ret = ERR_PTR(res);
+				goto abort;
+			}
+
+			data->temp[i] = res;
+		}
+
+		data->last_updated = jiffies;
+		data->valid = 1;
+	}
+abort:
+	mutex_unlock(&data->update_lock);
+
+	return ret;
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+			 char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	struct ds620_data *data = ds620_update_client(dev);
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	return sprintf(buf, "%d\n", ((data->temp[attr->index] / 8) * 625) / 10);
+}
+
+static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+			const char *buf, size_t count)
+{
+	int res;
+	long val;
+
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ds620_data *data = i2c_get_clientdata(client);
+
+	res = strict_strtol(buf, 10, &val);
+
+	if (res)
+		return res;
+
+	val = (val * 10 / 625) * 8;
+
+	mutex_lock(&data->update_lock);
+	data->temp[attr->index] = val;
+	ds620_write_temp(client, DS620_REG_TEMP[attr->index],
+			 data->temp[attr->index]);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+			  char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+	struct ds620_data *data = ds620_update_client(dev);
+	struct i2c_client *client = to_i2c_client(dev);
+	u16 conf, new_conf;
+	int res;
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	/* reset alarms if necessary */
+	res = i2c_smbus_read_word_data(client, DS620_REG_CONF);
+	if (res < 0)
+		return res;
+
+	conf = swab16(res);
+	new_conf = conf;
+	new_conf &= ~attr->index;
+	if (conf != new_conf) {
+		res = i2c_smbus_write_word_data(client, DS620_REG_CONF,
+						swab16(new_conf));
+		if (res < 0)
+			return res;
+	}
+
+	return sprintf(buf, "%d\n", !!(conf & attr->index));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp, set_temp, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp, set_temp, 2);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL,
+			  DS620_REG_CONFIG_TLF);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL,
+			  DS620_REG_CONFIG_THF);
+
+static struct attribute *ds620_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_min.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group ds620_group = {
+	.attrs = ds620_attributes,
+};
+
+static int ds620_probe(struct i2c_client *client,
+		       const struct i2c_device_id *id)
+{
+	struct ds620_data *data;
+	int err;
+
+	data = kzalloc(sizeof(struct ds620_data), GFP_KERNEL);
+	if (!data) {
+		err = -ENOMEM;
+		goto exit;
+	}
+
+	i2c_set_clientdata(client, data);
+	mutex_init(&data->update_lock);
+
+	/* Initialize the DS620 chip */
+	ds620_init_client(client);
+
+	/* Register sysfs hooks */
+	err = sysfs_create_group(&client->dev.kobj, &ds620_group);
+	if (err)
+		goto exit_free;
+
+	data->hwmon_dev = hwmon_device_register(&client->dev);
+	if (IS_ERR(data->hwmon_dev)) {
+		err = PTR_ERR(data->hwmon_dev);
+		goto exit_remove_files;
+	}
+
+	dev_info(&client->dev, "temperature sensor found\n");
+
+	return 0;
+
+exit_remove_files:
+	sysfs_remove_group(&client->dev.kobj, &ds620_group);
+exit_free:
+	kfree(data);
+exit:
+	return err;
+}
+
+static int ds620_remove(struct i2c_client *client)
+{
+	struct ds620_data *data = i2c_get_clientdata(client);
+
+	hwmon_device_unregister(data->hwmon_dev);
+	sysfs_remove_group(&client->dev.kobj, &ds620_group);
+
+	kfree(data);
+
+	return 0;
+}
+
+static const struct i2c_device_id ds620_id[] = {
+	{"ds620", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, ds620_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ds620_driver = {
+	.class = I2C_CLASS_HWMON,
+	.driver = {
+		   .name = "ds620",
+	},
+	.probe = ds620_probe,
+	.remove = ds620_remove,
+	.id_table = ds620_id,
+};
+
+static int __init ds620_init(void)
+{
+	return i2c_add_driver(&ds620_driver);
+}
+
+static void __exit ds620_exit(void)
+{
+	i2c_del_driver(&ds620_driver);
+}
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("DS620 driver");
+MODULE_LICENSE("GPL");
+
+module_init(ds620_init);
+module_exit(ds620_exit);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 8dee3f3..5dea9fa 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -269,23 +269,30 @@
 			struct i2c_board_info *info)
 {
 	int id;
-	/* Check if thermal chip is SMSC and EMC1403 */
+	/* Check if thermal chip is SMSC and EMC1403 or EMC1423 */
 
 	id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
 	if (id != 0x5d)
 		return -ENODEV;
 
+	id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+	switch (id) {
+	case 0x21:
+		strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+		break;
+	case 0x23:
+		strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
+		break;
 	/* Note: 0x25 is the 1404 which is very similar and this
 	   driver could be extended */
-	id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
-	if (id != 0x21)
+	default:
 		return -ENODEV;
+	}
 
 	id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
 	if (id != 0x01)
 		return -ENODEV;
 
-	strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
 	return 0;
 }
 
@@ -342,6 +349,7 @@
 
 static const struct i2c_device_id emc1403_idtable[] = {
 	{ "emc1403", 0 },
+	{ "emc1423", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 525a00b..92f9497 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -28,6 +28,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1309,7 +1311,7 @@
 
 	if (!(data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL))) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Out of memory\n");
+		pr_err("Out of memory\n");
 		goto exit;
 	}
 
@@ -1451,7 +1453,7 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
@@ -1462,22 +1464,20 @@
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct f71805f_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -1516,30 +1516,27 @@
 		sio_data->fnsel1 = superio_inb(sioaddr, SIO_REG_FNSEL1);
 		break;
 	default:
-		printk(KERN_INFO DRVNAME ": Unsupported Fintek device, "
-		       "skipping\n");
+		pr_info("Unsupported Fintek device, skipping\n");
 		goto exit;
 	}
 
 	superio_select(sioaddr, F71805F_LD_HWM);
 	if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
-		printk(KERN_WARNING DRVNAME ": Device not activated, "
-		       "skipping\n");
+		pr_warn("Device not activated, skipping\n");
 		goto exit;
 	}
 
 	*address = superio_inw(sioaddr, SIO_REG_ADDR);
 	if (*address == 0) {
-		printk(KERN_WARNING DRVNAME ": Base address not set, "
-		       "skipping\n");
+		pr_warn("Base address not set, skipping\n");
 		goto exit;
 	}
 	*address &= ~(REGION_LENGTH - 1);	/* Ignore 3 LSB */
 
 	err = 0;
-	printk(KERN_INFO DRVNAME ": Found %s chip at %#x, revision %u\n",
-	       names[sio_data->kind], *address,
-	       superio_inb(sioaddr, SIO_REG_DEVREV));
+	pr_info("Found %s chip at %#x, revision %u\n",
+		names[sio_data->kind], *address,
+		superio_inb(sioaddr, SIO_REG_DEVREV));
 
 exit:
 	superio_exit(sioaddr);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 75afb3b..3f49dd3 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -18,6 +18,8 @@
  *   59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.             *
  ***************************************************************************/
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -865,8 +867,7 @@
 {
 	/* Don't step on other drivers' I/O space by accident */
 	if (!request_muxed_region(base, 2, DRVNAME)) {
-		printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
-				base);
+		pr_err("I/O address 0x%04x already in use\n", base);
 		return -EBUSY;
 	}
 
@@ -2192,7 +2193,7 @@
 
 	devid = superio_inw(sioaddr, SIO_REG_MANID);
 	if (devid != SIO_FINTEK_ID) {
-		pr_debug(DRVNAME ": Not a Fintek device\n");
+		pr_debug("Not a Fintek device\n");
 		err = -ENODEV;
 		goto exit;
 	}
@@ -2215,8 +2216,8 @@
 		sio_data->type = f8000;
 		break;
 	default:
-		printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
-		       (unsigned int)devid);
+		pr_info("Unsupported Fintek device: %04x\n",
+			(unsigned int)devid);
 		err = -ENODEV;
 		goto exit;
 	}
@@ -2227,21 +2228,21 @@
 		superio_select(sioaddr, SIO_F71882FG_LD_HWM);
 
 	if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
-		printk(KERN_WARNING DRVNAME ": Device not activated\n");
+		pr_warn("Device not activated\n");
 		err = -ENODEV;
 		goto exit;
 	}
 
 	*address = superio_inw(sioaddr, SIO_REG_ADDR);
 	if (*address == 0) {
-		printk(KERN_WARNING DRVNAME ": Base address not set\n");
+		pr_warn("Base address not set\n");
 		err = -ENODEV;
 		goto exit;
 	}
 	*address &= ~(REGION_LENGTH - 1);	/* Ignore 3 LSB */
 
 	err = 0;
-	printk(KERN_INFO DRVNAME ": Found %s chip at %#x, revision %d\n",
+	pr_info("Found %s chip at %#x, revision %d\n",
 		f71882fg_names[sio_data->type],	(unsigned int)*address,
 		(int)superio_inb(sioaddr, SIO_REG_DEVREV));
 exit:
@@ -2270,20 +2271,20 @@
 
 	err = platform_device_add_resources(f71882fg_pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed\n");
+		pr_err("Device resource addition failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(f71882fg_pdev, sio_data,
 				       sizeof(struct f71882fg_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(f71882fg_pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed\n");
+		pr_err("Device addition failed\n");
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index d4d4ca6..aa6d8b6 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -49,7 +49,6 @@
 #include <linux/kref.h>
 
 /* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
 static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
 
 /* Insmod parameters */
@@ -850,7 +849,7 @@
 
 static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-	static struct watchdog_info ident = {
+	struct watchdog_info ident = {
 		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
 				WDIOF_CARDRESET,
 		.identity = "FSC watchdog"
@@ -858,7 +857,6 @@
 	int i, ret = 0;
 	struct fschmd_data *data = filp->private_data;
 
-	mutex_lock(&watchdog_mutex);
 	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		ident.firmware_version = data->revision;
@@ -915,7 +913,6 @@
 	default:
 		ret = -ENOTTY;
 	}
-	mutex_unlock(&watchdog_mutex);
 	return ret;
 }
 
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index a56a784..3d21fa2 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -20,6 +20,8 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/dmi.h>
@@ -147,7 +149,7 @@
 static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
 {
 	lis3_dev.ac = *((union axis_conversion *)dmi->driver_data);
-	printk(KERN_INFO DRIVER_NAME ": hardware type %s found.\n", dmi->ident);
+	pr_info("hardware type %s found\n", dmi->ident);
 
 	return 1;
 }
@@ -303,11 +305,10 @@
 
 	/* If possible use a "standard" axes order */
 	if (lis3_dev.ac.x && lis3_dev.ac.y && lis3_dev.ac.z) {
-		printk(KERN_INFO DRIVER_NAME ": Using custom axes %d,%d,%d\n",
-		       lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
+		pr_info("Using custom axes %d,%d,%d\n",
+			lis3_dev.ac.x, lis3_dev.ac.y, lis3_dev.ac.z);
 	} else if (dmi_check_system(lis3lv02d_dmi_ids) == 0) {
-		printk(KERN_INFO DRIVER_NAME ": laptop model unknown, "
-				 "using default axes configuration\n");
+		pr_info("laptop model unknown, using default axes configuration\n");
 		lis3_dev.ac = lis3lv02d_axis_normal;
 	}
 
@@ -385,7 +386,7 @@
 	if (ret < 0)
 		return ret;
 
-	printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
+	pr_info("driver loaded\n");
 
 	return 0;
 }
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 2b2ca16..2582bfe 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -22,6 +22,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/hwmon-vid.h>
@@ -146,8 +148,8 @@
 		return(val > 0x77 ? 0 : (1500000 - (val * 12500) + 500) / 1000);
 	default:		/* report 0 for unknown */
 		if (vrm)
-			printk(KERN_WARNING "hwmon-vid: Requested unsupported "
-			       "VRM version (%u)\n", (unsigned int)vrm);
+			pr_warn("Requested unsupported VRM version (%u)\n",
+				(unsigned int)vrm);
 		return 0;
 	}
 }
@@ -246,8 +248,7 @@
 	}
 	vrm_ret = find_vrm(eff_family, eff_model, eff_stepping, c->x86_vendor);
 	if (vrm_ret == 0)
-		printk(KERN_INFO "hwmon-vid: Unknown VRM version of your "
-		       "x86 CPU\n");
+		pr_info("Unknown VRM version of your x86 CPU\n");
 	return vrm_ret;
 }
 
@@ -255,7 +256,7 @@
 #else
 u8 vid_which_vrm(void)
 {
-	printk(KERN_INFO "hwmon-vid: Unknown VRM version of your CPU\n");
+	pr_info("Unknown VRM version of your CPU\n");
 	return 0;
 }
 #endif
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 29ea675..a61e781 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -10,6 +10,8 @@
     the Free Software Foundation; version 2 of the License.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/err.h>
@@ -119,7 +121,7 @@
 
 	hwmon_class = class_create(THIS_MODULE, "hwmon");
 	if (IS_ERR(hwmon_class)) {
-		printk(KERN_ERR "hwmon.c: couldn't create sysfs class\n");
+		pr_err("couldn't create sysfs class\n");
 		return PTR_ERR(hwmon_class);
 	}
 	return 0;
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index eaee546..bc6e2ab 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -20,6 +20,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/ipmi.h>
 #include <linux/module.h>
 #include <linux/hwmon.h>
@@ -1090,7 +1092,7 @@
 
 	res = driver_register(&aem_driver.driver);
 	if (res) {
-		printk(KERN_ERR "Can't register aem driver\n");
+		pr_err("Can't register aem driver\n");
 		return res;
 	}
 
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index a428a92..316b648 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -38,6 +38,8 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1570,26 +1572,25 @@
 	case 0xffff:	/* No device at all */
 		goto exit;
 	default:
-		pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n",
-			 chip_type);
+		pr_debug("Unsupported chip (DEVID=0x%x)\n", chip_type);
 		goto exit;
 	}
 
 	superio_select(PME);
 	if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
-		pr_info("it87: Device not activated, skipping\n");
+		pr_info("Device not activated, skipping\n");
 		goto exit;
 	}
 
 	*address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1);
 	if (*address == 0) {
-		pr_info("it87: Base address not set, skipping\n");
+		pr_info("Base address not set, skipping\n");
 		goto exit;
 	}
 
 	err = 0;
 	sio_data->revision = superio_inb(DEVREV) & 0x0f;
-	pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n",
+	pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
 		chip_type, *address, sio_data->revision);
 
 	/* in8 (Vbat) is always internal */
@@ -1615,7 +1616,7 @@
 		} else {
 			/* We need at least 4 VID pins */
 			if (reg & 0x0f) {
-				pr_info("it87: VID is disabled (pins used for GPIO)\n");
+				pr_info("VID is disabled (pins used for GPIO)\n");
 				sio_data->skip_vid = 1;
 			}
 		}
@@ -1651,7 +1652,7 @@
 		if (sio_data->type == it8720 && !(reg & (1 << 1))) {
 			reg |= (1 << 1);
 			superio_outb(IT87_SIO_PINX2_REG, reg);
-			pr_notice("it87: Routing internal VCCH to in7\n");
+			pr_notice("Routing internal VCCH to in7\n");
 		}
 		if (reg & (1 << 0))
 			sio_data->internal |= (1 << 0);
@@ -1661,7 +1662,7 @@
 		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	}
 	if (sio_data->beep_pin)
-		pr_info("it87: Beeping is supported\n");
+		pr_info("Beeping is supported\n");
 
 	/* Disable specific features based on DMI strings */
 	board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
@@ -1675,8 +1676,7 @@
 			   the PWM2 duty cycle, so we disable it.
 			   I use the board name string as the trigger in case
 			   the same board is ever used in other systems. */
-			pr_info("it87: Disabling pwm2 due to "
-				"hardware constraints\n");
+			pr_info("Disabling pwm2 due to hardware constraints\n");
 			sio_data->skip_pwm = (1 << 1);
 		}
 	}
@@ -2189,28 +2189,26 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct it87_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 0cee73a..1b674b7 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -20,6 +20,8 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/dmi.h>
@@ -860,8 +862,7 @@
 					(p->irq_flags2 & IRQF_TRIGGER_MASK),
 					DRIVER_NAME, &lis3_dev);
 		if (err < 0)
-			printk(KERN_ERR DRIVER_NAME
-				"No second IRQ. Limited functionality\n");
+			pr_err("No second IRQ. Limited functionality\n");
 	}
 }
 
@@ -879,7 +880,7 @@
 
 	switch (dev->whoami) {
 	case WAI_12B:
-		printk(KERN_INFO DRIVER_NAME ": 12 bits sensor found\n");
+		pr_info("12 bits sensor found\n");
 		dev->read_data = lis3lv02d_read_12;
 		dev->mdps_max_val = 2048;
 		dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
@@ -890,7 +891,7 @@
 		dev->regs_size = ARRAY_SIZE(lis3_wai12_regs);
 		break;
 	case WAI_8B:
-		printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n");
+		pr_info("8 bits sensor found\n");
 		dev->read_data = lis3lv02d_read_8;
 		dev->mdps_max_val = 128;
 		dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
@@ -901,7 +902,7 @@
 		dev->regs_size = ARRAY_SIZE(lis3_wai8_regs);
 		break;
 	case WAI_3DC:
-		printk(KERN_INFO DRIVER_NAME ": 8 bits 3DC sensor found\n");
+		pr_info("8 bits 3DC sensor found\n");
 		dev->read_data = lis3lv02d_read_8;
 		dev->mdps_max_val = 128;
 		dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
@@ -910,8 +911,7 @@
 		dev->scale = LIS3_SENSITIVITY_8B;
 		break;
 	default:
-		printk(KERN_ERR DRIVER_NAME
-			": unknown sensor type 0x%X\n", dev->whoami);
+		pr_err("unknown sensor type 0x%X\n", dev->whoami);
 		return -EINVAL;
 	}
 
@@ -935,7 +935,7 @@
 	}
 
 	if (lis3lv02d_joystick_enable())
-		printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
+		pr_err("joystick initialization failed\n");
 
 	/* passing in platform specific data is purely optional and only
 	 * used by the SPI transport layer at the moment */
@@ -957,8 +957,7 @@
 
 	/* bail if we did not get an IRQ from the bus layer */
 	if (!dev->irq) {
-		printk(KERN_ERR DRIVER_NAME
-			": No IRQ. Disabling /dev/freefall\n");
+		pr_err("No IRQ. Disabling /dev/freefall\n");
 		goto out;
 	}
 
@@ -985,12 +984,12 @@
 				DRIVER_NAME, &lis3_dev);
 
 	if (err < 0) {
-		printk(KERN_ERR DRIVER_NAME "Cannot get IRQ\n");
+		pr_err("Cannot get IRQ\n");
 		goto out;
 	}
 
 	if (misc_register(&lis3lv02d_misc_device))
-		printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
+		pr_err("misc_register failed\n");
 out:
 	return 0;
 }
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index fd108cf..3b84fb5 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -24,6 +24,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -67,8 +69,7 @@
 	 */
 	status = spi_write_then_read(spi, NULL, 0, &rxbuf[0], 2);
 	if (status < 0) {
-		printk(KERN_WARNING
-		"spi_write_then_read failed with status %d\n", status);
+		pr_warn("spi_write_then_read failed with status %d\n", status);
 		goto out;
 	}
 	raw = (rxbuf[0] << 8) + rxbuf[1];
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 72ff2c4..4cb24ea 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -19,6 +19,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -858,7 +860,7 @@
 	 * individually for the probing phase. */
 	for (port = address; port < address + LM78_EXTENT; port++) {
 		if (!request_region(port, 1, "lm78")) {
-			pr_debug("lm78: Failed to request port 0x%x\n", port);
+			pr_debug("Failed to request port 0x%x\n", port);
 			goto release;
 		}
 	}
@@ -920,7 +922,7 @@
 		found = 1;
 
 	if (found)
-		pr_info("lm78: Found an %s chip at %#x\n",
+		pr_info("Found an %s chip at %#x\n",
 			val & 0x80 ? "LM79" : "LM78", (int)address);
 
  release:
@@ -942,21 +944,19 @@
 	pdev = platform_device_alloc("lm78", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "lm78: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "lm78: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "lm78: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 4546d82..1a6dfb6 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -1,13 +1,9 @@
 /*
- * lm95241.c - Part of lm_sensors, Linux kernel modules for hardware
- *             monitoring
- * Copyright (C) 2008 Davide Rizzo <elpa-rizzo@gmail.com>
+ * Copyright (C) 2008, 2010 Davide Rizzo <elpa.rizzo@gmail.com>
  *
- * Based on the max1619 driver. The LM95241 is a sensor chip made by National
- *   Semiconductors.
- * It reports up to three temperatures (its own plus up to
- * two external ones). Complete datasheet can be
- * obtained from National's website at:
+ * The LM95241 is a sensor chip made by National Semiconductors.
+ * It reports up to three temperatures (its own plus up to two external ones).
+ * Complete datasheet can be obtained from National's website at:
  *   http://www.national.com/ds.cgi/LM/LM95241.pdf
  *
  * This program is free software; you can redistribute it and/or modify
@@ -36,8 +32,10 @@
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
 
+#define DEVNAME "lm95241"
+
 static const unsigned short normal_i2c[] = {
-	0x19, 0x2a, 0x2b, I2C_CLIENT_END};
+	0x19, 0x2a, 0x2b, I2C_CLIENT_END };
 
 /* LM95241 registers */
 #define LM95241_REG_R_MAN_ID		0xFE
@@ -46,7 +44,7 @@
 #define LM95241_REG_RW_CONFIG		0x03
 #define LM95241_REG_RW_REM_FILTER	0x06
 #define LM95241_REG_RW_TRUTHERM		0x07
-#define LM95241_REG_W_ONE_SHOT  	0x0F
+#define LM95241_REG_W_ONE_SHOT		0x0F
 #define LM95241_REG_R_LOCAL_TEMPH	0x10
 #define LM95241_REG_R_REMOTE1_TEMPH	0x11
 #define LM95241_REG_R_REMOTE2_TEMPH	0x12
@@ -79,52 +77,205 @@
 #define MANUFACTURER_ID 0x01
 #define DEFAULT_REVISION 0xA4
 
-/* Conversions and various macros */
-#define TEMP_FROM_REG(val_h, val_l) (((val_h) & 0x80 ? (val_h) - 0x100 : \
-    (val_h)) * 1000 + (val_l) * 1000 / 256)
-
-/* Functions declaration */
-static void lm95241_init_client(struct i2c_client *client);
-static struct lm95241_data *lm95241_update_device(struct device *dev);
+static const u8 lm95241_reg_address[] = {
+	LM95241_REG_R_LOCAL_TEMPH,
+	LM95241_REG_R_LOCAL_TEMPL,
+	LM95241_REG_R_REMOTE1_TEMPH,
+	LM95241_REG_R_REMOTE1_TEMPL,
+	LM95241_REG_R_REMOTE2_TEMPH,
+	LM95241_REG_R_REMOTE2_TEMPL
+};
 
 /* Client data (each client gets its own) */
 struct lm95241_data {
 	struct device *hwmon_dev;
 	struct mutex update_lock;
-	unsigned long last_updated, interval; /* in jiffies */
-	char valid; /* zero until following fields are valid */
+	unsigned long last_updated, interval;	/* in jiffies */
+	char valid;		/* zero until following fields are valid */
 	/* registers values */
-	u8 local_h, local_l; /* local */
-	u8 remote1_h, remote1_l; /* remote1 */
-	u8 remote2_h, remote2_l; /* remote2 */
+	u8 temp[ARRAY_SIZE(lm95241_reg_address)];
 	u8 config, model, trutherm;
 };
 
-/* Sysfs stuff */
-#define show_temp(value) \
-static ssize_t show_##value(struct device *dev, \
-    struct device_attribute *attr, char *buf) \
-{ \
-	struct lm95241_data *data = lm95241_update_device(dev); \
-	snprintf(buf, PAGE_SIZE - 1, "%d\n", \
-		TEMP_FROM_REG(data->value##_h, data->value##_l)); \
-	return strlen(buf); \
+/* Conversions */
+static int TempFromReg(u8 val_h, u8 val_l)
+{
+	if (val_h & 0x80)
+		return val_h - 0x100;
+	return val_h * 1000 + val_l * 1000 / 256;
 }
-show_temp(local);
-show_temp(remote1);
-show_temp(remote2);
 
-static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
-			 char *buf)
+static struct lm95241_data *lm95241_update_device(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm95241_data *data = i2c_get_clientdata(client);
+
+	mutex_lock(&data->update_lock);
+
+	if (time_after(jiffies, data->last_updated + data->interval) ||
+	    !data->valid) {
+		int i;
+
+		dev_dbg(&client->dev, "Updating lm95241 data.\n");
+		for (i = 0; i < ARRAY_SIZE(lm95241_reg_address); i++)
+			data->temp[i]
+			  = i2c_smbus_read_byte_data(client,
+						     lm95241_reg_address[i]);
+		data->last_updated = jiffies;
+		data->valid = 1;
+	}
+
+	mutex_unlock(&data->update_lock);
+
+	return data;
+}
+
+/* Sysfs stuff */
+static ssize_t show_input(struct device *dev, struct device_attribute *attr,
+			  char *buf)
 {
 	struct lm95241_data *data = lm95241_update_device(dev);
 
-	snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
-	return strlen(buf);
+	return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+		TempFromReg(data->temp[to_sensor_dev_attr(attr)->index],
+			    data->temp[to_sensor_dev_attr(attr)->index + 1]));
+}
+
+static ssize_t show_type(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm95241_data *data = i2c_get_clientdata(client);
+
+	return snprintf(buf, PAGE_SIZE - 1,
+		data->model & to_sensor_dev_attr(attr)->index ? "1\n" : "2\n");
+}
+
+static ssize_t set_type(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm95241_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+	int shift;
+	u8 mask = to_sensor_dev_attr(attr)->index;
+
+	if (strict_strtoul(buf, 10, &val) < 0)
+		return -EINVAL;
+	if (val != 1 && val != 2)
+		return -EINVAL;
+
+	shift = mask == R1MS_MASK ? TT1_SHIFT : TT2_SHIFT;
+
+	mutex_lock(&data->update_lock);
+
+	data->trutherm &= ~(TT_MASK << shift);
+	if (val == 1) {
+		data->model |= mask;
+		data->trutherm |= (TT_ON << shift);
+	} else {
+		data->model &= ~mask;
+		data->trutherm |= (TT_OFF << shift);
+	}
+	data->valid = 0;
+
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL,
+				  data->model);
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
+				  data->trutherm);
+
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_min(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm95241_data *data = i2c_get_clientdata(client);
+
+	return snprintf(buf, PAGE_SIZE - 1,
+			data->config & to_sensor_dev_attr(attr)->index ?
+			"-127000\n" : "0\n");
+}
+
+static ssize_t set_min(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm95241_data *data = i2c_get_clientdata(client);
+	long val;
+
+	if (strict_strtol(buf, 10, &val) < 0)
+		return -EINVAL;
+	if (val < -128000)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+
+	if (val < 0)
+		data->config |= to_sensor_dev_attr(attr)->index;
+	else
+		data->config &= ~to_sensor_dev_attr(attr)->index;
+	data->valid = 0;
+
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_max(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm95241_data *data = i2c_get_clientdata(client);
+
+	return snprintf(buf, PAGE_SIZE - 1,
+			data->config & to_sensor_dev_attr(attr)->index ?
+			"127000\n" : "255000\n");
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *attr,
+		       const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lm95241_data *data = i2c_get_clientdata(client);
+	long val;
+
+	if (strict_strtol(buf, 10, &val) < 0)
+		return -EINVAL;
+	if (val >= 256000)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+
+	if (val <= 127000)
+		data->config |= to_sensor_dev_attr(attr)->index;
+	else
+		data->config &= ~to_sensor_dev_attr(attr)->index;
+	data->valid = 0;
+
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct lm95241_data *data = lm95241_update_device(dev);
+
+	return snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval
+			/ HZ);
 }
 
 static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
-			const char *buf, size_t count)
+			    const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lm95241_data *data = i2c_get_clientdata(client);
@@ -138,176 +289,34 @@
 	return count;
 }
 
-#define show_type(flag) \
-static ssize_t show_type##flag(struct device *dev, \
-				   struct device_attribute *attr, char *buf) \
-{ \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct lm95241_data *data = i2c_get_clientdata(client); \
-\
-	snprintf(buf, PAGE_SIZE - 1, \
-		data->model & R##flag##MS_MASK ? "1\n" : "2\n"); \
-	return strlen(buf); \
-}
-show_type(1);
-show_type(2);
-
-#define show_min(flag) \
-static ssize_t show_min##flag(struct device *dev, \
-    struct device_attribute *attr, char *buf) \
-{ \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct lm95241_data *data = i2c_get_clientdata(client); \
-\
-	snprintf(buf, PAGE_SIZE - 1, \
-		data->config & R##flag##DF_MASK ?	\
-		"-127000\n" : "0\n"); \
-	return strlen(buf); \
-}
-show_min(1);
-show_min(2);
-
-#define show_max(flag) \
-static ssize_t show_max##flag(struct device *dev, \
-    struct device_attribute *attr, char *buf) \
-{ \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct lm95241_data *data = i2c_get_clientdata(client); \
-\
-	snprintf(buf, PAGE_SIZE - 1, \
-		data->config & R##flag##DF_MASK ? \
-		"127000\n" : "255000\n"); \
-	return strlen(buf); \
-}
-show_max(1);
-show_max(2);
-
-#define set_type(flag) \
-static ssize_t set_type##flag(struct device *dev, \
-				  struct device_attribute *attr, \
-				  const char *buf, size_t count) \
-{ \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct lm95241_data *data = i2c_get_clientdata(client); \
-\
-	long val; \
-\
-	if (strict_strtol(buf, 10, &val) < 0) \
-		return -EINVAL; \
-\
-	if ((val == 1) || (val == 2)) { \
-\
-		mutex_lock(&data->update_lock); \
-\
-		data->trutherm &= ~(TT_MASK << TT##flag##_SHIFT); \
-		if (val == 1) { \
-			data->model |= R##flag##MS_MASK; \
-			data->trutherm |= (TT_ON << TT##flag##_SHIFT); \
-		} \
-		else { \
-			data->model &= ~R##flag##MS_MASK; \
-			data->trutherm |= (TT_OFF << TT##flag##_SHIFT); \
-		} \
-\
-		data->valid = 0; \
-\
-		i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL, \
-					  data->model); \
-		i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM, \
-					  data->trutherm); \
-\
-		mutex_unlock(&data->update_lock); \
-\
-	} \
-	return count; \
-}
-set_type(1);
-set_type(2);
-
-#define set_min(flag) \
-static ssize_t set_min##flag(struct device *dev, \
-	struct device_attribute *devattr, const char *buf, size_t count) \
-{ \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct lm95241_data *data = i2c_get_clientdata(client); \
-\
-	long val; \
-\
-	if (strict_strtol(buf, 10, &val) < 0) \
-		return -EINVAL;\
-\
-	mutex_lock(&data->update_lock); \
-\
-	if (val < 0) \
-		data->config |= R##flag##DF_MASK; \
-	else \
-		data->config &= ~R##flag##DF_MASK; \
-\
-	data->valid = 0; \
-\
-	i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, \
-		data->config); \
-\
-	mutex_unlock(&data->update_lock); \
-\
-	return count; \
-}
-set_min(1);
-set_min(2);
-
-#define set_max(flag) \
-static ssize_t set_max##flag(struct device *dev, \
-	struct device_attribute *devattr, const char *buf, size_t count) \
-{ \
-	struct i2c_client *client = to_i2c_client(dev); \
-	struct lm95241_data *data = i2c_get_clientdata(client); \
-\
-	long val; \
-\
-	if (strict_strtol(buf, 10, &val) < 0) \
-		return -EINVAL; \
-\
-	mutex_lock(&data->update_lock); \
-\
-	if (val <= 127000) \
-		data->config |= R##flag##DF_MASK; \
-	else \
-		data->config &= ~R##flag##DF_MASK; \
-\
-	data->valid = 0; \
-\
-	i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, \
-		data->config); \
-\
-	mutex_unlock(&data->update_lock); \
-\
-	return count; \
-}
-set_max(1);
-set_max(2);
-
-static DEVICE_ATTR(temp1_input, S_IRUGO, show_local, NULL);
-static DEVICE_ATTR(temp2_input, S_IRUGO, show_remote1, NULL);
-static DEVICE_ATTR(temp3_input, S_IRUGO, show_remote2, NULL);
-static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type1, set_type1);
-static DEVICE_ATTR(temp3_type, S_IWUSR | S_IRUGO, show_type2, set_type2);
-static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
-static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
-static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
-static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type,
+			  R1MS_MASK);
+static SENSOR_DEVICE_ATTR(temp3_type, S_IWUSR | S_IRUGO, show_type, set_type,
+			  R2MS_MASK);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min,
+			  R1DF_MASK);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min,
+			  R2DF_MASK);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max,
+			  R1DF_MASK);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max,
+			  R2DF_MASK);
 static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
 		   set_interval);
 
 static struct attribute *lm95241_attributes[] = {
-	&dev_attr_temp1_input.attr,
-	&dev_attr_temp2_input.attr,
-	&dev_attr_temp3_input.attr,
-	&dev_attr_temp2_type.attr,
-	&dev_attr_temp3_type.attr,
-	&dev_attr_temp2_min.attr,
-	&dev_attr_temp3_min.attr,
-	&dev_attr_temp2_max.attr,
-	&dev_attr_temp3_max.attr,
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp2_input.dev_attr.attr,
+	&sensor_dev_attr_temp3_input.dev_attr.attr,
+	&sensor_dev_attr_temp2_type.dev_attr.attr,
+	&sensor_dev_attr_temp3_type.dev_attr.attr,
+	&sensor_dev_attr_temp2_min.dev_attr.attr,
+	&sensor_dev_attr_temp3_min.dev_attr.attr,
+	&sensor_dev_attr_temp2_max.dev_attr.attr,
+	&sensor_dev_attr_temp3_max.dev_attr.attr,
 	&dev_attr_update_interval.attr,
 	NULL
 };
@@ -329,9 +338,9 @@
 
 	if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID)
 	     == MANUFACTURER_ID)
-	 && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
-	     >= DEFAULT_REVISION)) {
-		name = "lm95241";
+	    && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
+		>= DEFAULT_REVISION)) {
+		name = DEVNAME;
 	} else {
 		dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n",
 			address);
@@ -343,6 +352,25 @@
 	return 0;
 }
 
+static void lm95241_init_client(struct i2c_client *client)
+{
+	struct lm95241_data *data = i2c_get_clientdata(client);
+
+	data->interval = HZ;	/* 1 sec default */
+	data->valid = 0;
+	data->config = CFG_CR0076;
+	data->model = 0;
+	data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT);
+
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG, data->config);
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_REM_FILTER,
+				  R1FE_MASK | R2FE_MASK);
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
+				  data->trutherm);
+	i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL,
+				  data->model);
+}
+
 static int lm95241_probe(struct i2c_client *new_client,
 			 const struct i2c_device_id *id)
 {
@@ -382,26 +410,6 @@
 	return err;
 }
 
-static void lm95241_init_client(struct i2c_client *client)
-{
-	struct lm95241_data *data = i2c_get_clientdata(client);
-
-	data->interval = HZ;    /* 1 sec default */
-	data->valid = 0;
-	data->config = CFG_CR0076;
-	data->model = 0;
-	data->trutherm = (TT_OFF << TT1_SHIFT) | (TT_OFF << TT2_SHIFT);
-
-	i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
-				  data->config);
-	i2c_smbus_write_byte_data(client, LM95241_REG_RW_REM_FILTER,
-				  R1FE_MASK | R2FE_MASK);
-	i2c_smbus_write_byte_data(client, LM95241_REG_RW_TRUTHERM,
-				  data->trutherm);
-	i2c_smbus_write_byte_data(client, LM95241_REG_RW_REMOTE_MODEL,
-				  data->model);
-}
-
 static int lm95241_remove(struct i2c_client *client)
 {
 	struct lm95241_data *data = i2c_get_clientdata(client);
@@ -413,46 +421,9 @@
 	return 0;
 }
 
-static struct lm95241_data *lm95241_update_device(struct device *dev)
-{
-	struct i2c_client *client = to_i2c_client(dev);
-	struct lm95241_data *data = i2c_get_clientdata(client);
-
-	mutex_lock(&data->update_lock);
-
-	if (time_after(jiffies, data->last_updated + data->interval) ||
-	    !data->valid) {
-		dev_dbg(&client->dev, "Updating lm95241 data.\n");
-		data->local_h =
-			i2c_smbus_read_byte_data(client,
-						 LM95241_REG_R_LOCAL_TEMPH);
-		data->local_l =
-			i2c_smbus_read_byte_data(client,
-						 LM95241_REG_R_LOCAL_TEMPL);
-		data->remote1_h =
-			i2c_smbus_read_byte_data(client,
-						 LM95241_REG_R_REMOTE1_TEMPH);
-		data->remote1_l =
-			i2c_smbus_read_byte_data(client,
-						 LM95241_REG_R_REMOTE1_TEMPL);
-		data->remote2_h =
-			i2c_smbus_read_byte_data(client,
-						 LM95241_REG_R_REMOTE2_TEMPH);
-		data->remote2_l =
-			i2c_smbus_read_byte_data(client,
-						 LM95241_REG_R_REMOTE2_TEMPL);
-		data->last_updated = jiffies;
-		data->valid = 1;
-	}
-
-	mutex_unlock(&data->update_lock);
-
-	return data;
-}
-
 /* Driver data (common to all clients) */
 static const struct i2c_device_id lm95241_id[] = {
-	{ "lm95241", 0 },
+	{ DEVNAME, 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, lm95241_id);
@@ -460,7 +431,7 @@
 static struct i2c_driver lm95241_driver = {
 	.class		= I2C_CLASS_HWMON,
 	.driver = {
-		.name   = "lm95241",
+		.name	= DEVNAME,
 	},
 	.probe		= lm95241_probe,
 	.remove		= lm95241_remove,
@@ -479,7 +450,7 @@
 	i2c_del_driver(&lm95241_driver);
 }
 
-MODULE_AUTHOR("Davide Rizzo <elpa-rizzo@gmail.com>");
+MODULE_AUTHOR("Davide Rizzo <elpa.rizzo@gmail.com>");
 MODULE_DESCRIPTION("LM95241 sensor driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 68e69a4..3d99b88 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -33,6 +33,8 @@
  *  the standard Super-I/O addresses is used (0x2E/0x2F or 0x4E/0x4F).
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1031,16 +1033,15 @@
 
 		val = superio_inb(sioaddr, ACT);
 		if (!(val & 0x01)) {
-			printk(KERN_INFO "pc87360: Device 0x%02x not "
-			       "activated\n", logdev[i]);
+			pr_info("Device 0x%02x not activated\n", logdev[i]);
 			continue;
 		}
 
 		val = (superio_inb(sioaddr, BASE) << 8)
 		    | superio_inb(sioaddr, BASE + 1);
 		if (!val) {
-			printk(KERN_INFO "pc87360: Base address not set for "
-			       "device 0x%02x\n", logdev[i]);
+			pr_info("Base address not set for device 0x%02x\n",
+				logdev[i]);
 			continue;
 		}
 
@@ -1050,17 +1051,15 @@
 			confreg[0] = superio_inb(sioaddr, 0xF0);
 			confreg[1] = superio_inb(sioaddr, 0xF1);
 
-#ifdef DEBUG
-			printk(KERN_DEBUG "pc87360: Fan 1: mon=%d "
-			       "ctrl=%d inv=%d\n", (confreg[0]>>2)&1,
-			       (confreg[0]>>3)&1, (confreg[0]>>4)&1);
-			printk(KERN_DEBUG "pc87360: Fan 2: mon=%d "
-			       "ctrl=%d inv=%d\n", (confreg[0]>>5)&1,
-			       (confreg[0]>>6)&1, (confreg[0]>>7)&1);
-			printk(KERN_DEBUG "pc87360: Fan 3: mon=%d "
-			       "ctrl=%d inv=%d\n", confreg[1]&1,
-			       (confreg[1]>>1)&1, (confreg[1]>>2)&1);
-#endif
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
+				 (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
+				 (confreg[0] >> 4) & 1);
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
+				 (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
+				 (confreg[0] >> 7) & 1);
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
+				 confreg[1] & 1, (confreg[1] >> 1) & 1,
+				 (confreg[1] >> 2) & 1);
 		} else if (i==1) { /* Voltages */
 			/* Are we using thermistors? */
 			if (*devid == 0xE9) { /* PC87366 */
@@ -1071,14 +1070,12 @@
 				confreg[3] = superio_inb(sioaddr, 0x25);
 
 				if (confreg[2] & 0x40) {
-					printk(KERN_INFO "pc87360: Using "
-					       "thermistors for temperature "
-					       "monitoring\n");
+					pr_info("Using thermistors for "
+						"temperature monitoring\n");
 				}
 				if (confreg[3] & 0xE0) {
-					printk(KERN_INFO "pc87360: VID "
-					       "inputs routed (mode %u)\n",
-					       confreg[3] >> 5);
+					pr_info("VID inputs routed (mode %u)\n",
+						confreg[3] >> 5);
 				}
 			}
 		}
@@ -1616,7 +1613,7 @@
 	pdev = platform_device_alloc("pc87360", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "pc87360: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
@@ -1639,15 +1636,13 @@
 
 	err = platform_device_add_resources(pdev, res, res_count);
 	if (err) {
-		printk(KERN_ERR "pc87360: Device resources addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resources addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "pc87360: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -1666,8 +1661,7 @@
 
 	if (pc87360_find(0x2e, &devid, extra_isa)
 	 && pc87360_find(0x4e, &devid, extra_isa)) {
-		printk(KERN_WARNING "pc87360: PC8736x not detected, "
-		       "module not inserted.\n");
+		pr_warn("PC8736x not detected, module not inserted\n");
 		return -ENODEV;
 	}
 
@@ -1680,8 +1674,7 @@
 	}
 
 	if (address == 0x0000) {
-		printk(KERN_WARNING "pc87360: No active logical device, "
-		       "module not inserted.\n");
+		pr_warn("No active logical device, module not inserted\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 9ec4daa..8da2181 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -22,6 +22,8 @@
  *  mode, and voltages aren't supported at all.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1077,7 +1079,7 @@
 	data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL);
 	if (!data) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Out of memory\n");
+		pr_err("Out of memory\n");
 		goto exit;
 	}
 
@@ -1196,28 +1198,26 @@
 	pdev = platform_device_alloc(DRVNAME, res[0].start);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, res, res_count);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct pc87427_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -1249,23 +1249,23 @@
 
 		val = superio_inb(sioaddr, SIOREG_ACT);
 		if (!(val & 0x01)) {
-			printk(KERN_INFO DRVNAME ": Logical device 0x%02x "
-			       "not activated\n", logdev[i]);
+			pr_info("Logical device 0x%02x not activated\n",
+				logdev[i]);
 			continue;
 		}
 
 		val = superio_inb(sioaddr, SIOREG_MAP);
 		if (val & 0x01) {
-			printk(KERN_WARNING DRVNAME ": Logical device 0x%02x "
-			       "is memory-mapped, can't use\n", logdev[i]);
+			pr_warn("Logical device 0x%02x is memory-mapped, "
+				"can't use\n", logdev[i]);
 			continue;
 		}
 
 		val = (superio_inb(sioaddr, SIOREG_IOBASE) << 8)
 		    | superio_inb(sioaddr, SIOREG_IOBASE + 1);
 		if (!val) {
-			printk(KERN_INFO DRVNAME ": I/O base address not set "
-			       "for logical device 0x%02x\n", logdev[i]);
+			pr_info("I/O base address not set for logical device "
+				"0x%02x\n", logdev[i]);
 			continue;
 		}
 		sio_data->address[i] = val;
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index dc7259d..731b09a 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -18,6 +18,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -290,8 +292,7 @@
 static int __init pcf8591_init(void)
 {
 	if (input_mode < 0 || input_mode > 3) {
-		printk(KERN_WARNING "pcf8591: invalid input_mode (%d)\n",
-		       input_mode);
+		pr_warn("invalid input_mode (%d)\n", input_mode);
 		input_mode = 0;
 	}
 	return i2c_add_driver(&pcf8591_driver);
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 0798210..21c817d 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -20,6 +20,8 @@
  * 02110-1301 USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -303,7 +305,7 @@
 	pdev = platform_device_alloc(DRVNAME, cpu);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
@@ -315,8 +317,7 @@
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_free;
 	}
 
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
new file mode 100644
index 0000000..1c8c981
--- /dev/null
+++ b/drivers/hwmon/sht21.c
@@ -0,0 +1,307 @@
+/* Sensirion SHT21 humidity and temperature sensor driver
+ *
+ * Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Data sheet available (5/2010) at
+ * http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT21.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+/* I2C command bytes */
+#define SHT21_TRIG_T_MEASUREMENT_HM  0xe3
+#define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5
+
+/**
+ * struct sht21 - SHT21 device specific data
+ * @hwmon_dev: device registered with hwmon
+ * @lock: mutex to protect measurement values
+ * @valid: only 0 before first measurement is taken
+ * @last_update: time of last update (jiffies)
+ * @temperature: cached temperature measurement value
+ * @humidity: cached humidity measurement value
+ */
+struct sht21 {
+	struct device *hwmon_dev;
+	struct mutex lock;
+	char valid;
+	unsigned long last_update;
+	int temperature;
+	int humidity;
+};
+
+/**
+ * sht21_temp_ticks_to_millicelsius() - convert raw temperature ticks to
+ * milli celsius
+ * @ticks: temperature ticks value received from sensor
+ */
+static inline int sht21_temp_ticks_to_millicelsius(int ticks)
+{
+	ticks &= ~0x0003; /* clear status bits */
+	/*
+	 * Formula T = -46.85 + 175.72 * ST / 2^16 from data sheet 6.2,
+	 * optimized for integer fixed point (3 digits) arithmetic
+	 */
+	return ((21965 * ticks) >> 13) - 46850;
+}
+
+/**
+ * sht21_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to
+ * one-thousandths of a percent relative humidity
+ * @ticks: humidity ticks value received from sensor
+ */
+static inline int sht21_rh_ticks_to_per_cent_mille(int ticks)
+{
+	ticks &= ~0x0003; /* clear status bits */
+	/*
+	 * Formula RH = -6 + 125 * SRH / 2^16 from data sheet 6.1,
+	 * optimized for integer fixed point (3 digits) arithmetic
+	 */
+	return ((15625 * ticks) >> 13) - 6000;
+}
+
+/**
+ * sht21_read_word_data() - read word from register
+ * @client: I2C client device
+ * @reg: I2C command byte
+ *
+ * Returns value, negative errno on error.
+ */
+static inline int sht21_read_word_data(struct i2c_client *client, u8 reg)
+{
+	int ret = i2c_smbus_read_word_data(client, reg);
+	if (ret < 0)
+		return ret;
+	/*
+	 * SMBus specifies low byte first, but the SHT21 returns MSB
+	 * first, so we have to swab16 the values
+	 */
+	return swab16(ret);
+}
+
+/**
+ * sht21_update_measurements() - get updated measurements from device
+ * @client: I2C client device
+ *
+ * Returns 0 on success, else negative errno.
+ */
+static int sht21_update_measurements(struct i2c_client *client)
+{
+	int ret = 0;
+	struct sht21 *sht21 = i2c_get_clientdata(client);
+
+	mutex_lock(&sht21->lock);
+	/*
+	 * Data sheet 2.4:
+	 * SHT2x should not be active for more than 10% of the time - e.g.
+	 * maximum two measurements per second at 12bit accuracy shall be made.
+	 */
+	if (time_after(jiffies, sht21->last_update + HZ / 2) || !sht21->valid) {
+		ret = sht21_read_word_data(client, SHT21_TRIG_T_MEASUREMENT_HM);
+		if (ret < 0)
+			goto out;
+		sht21->temperature = sht21_temp_ticks_to_millicelsius(ret);
+		ret = sht21_read_word_data(client,
+					SHT21_TRIG_RH_MEASUREMENT_HM);
+		if (ret < 0)
+			goto out;
+		sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret);
+		sht21->last_update = jiffies;
+		sht21->valid = 1;
+	}
+out:
+	mutex_unlock(&sht21->lock);
+
+	return ret >= 0 ? 0 : ret;
+}
+
+/**
+ * sht21_show_temperature() - show temperature measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to temp1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t sht21_show_temperature(struct device *dev,
+	struct device_attribute *attr,
+	char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct sht21 *sht21 = i2c_get_clientdata(client);
+	int ret = sht21_update_measurements(client);
+	if (ret < 0)
+		return ret;
+	return sprintf(buf, "%d\n", sht21->temperature);
+}
+
+/**
+ * sht21_show_humidity() - show humidity measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to humidity1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t sht21_show_humidity(struct device *dev,
+	struct device_attribute *attr,
+	char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct sht21 *sht21 = i2c_get_clientdata(client);
+	int ret = sht21_update_measurements(client);
+	if (ret < 0)
+		return ret;
+	return sprintf(buf, "%d\n", sht21->humidity);
+}
+
+/* sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature,
+	NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity,
+	NULL, 0);
+
+static struct attribute *sht21_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_humidity1_input.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group sht21_attr_group = {
+	.attrs = sht21_attributes,
+};
+
+/**
+ * sht21_probe() - probe device
+ * @client: I2C client device
+ * @id: device ID
+ *
+ * Called by the I2C core when an entry in the ID table matches a
+ * device's name.
+ * Returns 0 on success.
+ */
+static int __devinit sht21_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	struct sht21 *sht21;
+	int err;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_WORD_DATA)) {
+		dev_err(&client->dev,
+			"adapter does not support SMBus word transactions\n");
+		return -ENODEV;
+	}
+
+	sht21 = kzalloc(sizeof(*sht21), GFP_KERNEL);
+	if (!sht21) {
+		dev_dbg(&client->dev, "kzalloc failed\n");
+		return -ENOMEM;
+	}
+	i2c_set_clientdata(client, sht21);
+
+	mutex_init(&sht21->lock);
+
+	err = sysfs_create_group(&client->dev.kobj, &sht21_attr_group);
+	if (err) {
+		dev_dbg(&client->dev, "could not create sysfs files\n");
+		goto fail_free;
+	}
+	sht21->hwmon_dev = hwmon_device_register(&client->dev);
+	if (IS_ERR(sht21->hwmon_dev)) {
+		dev_dbg(&client->dev, "unable to register hwmon device\n");
+		err = PTR_ERR(sht21->hwmon_dev);
+		goto fail_remove_sysfs;
+	}
+
+	dev_info(&client->dev, "initialized\n");
+
+	return 0;
+
+fail_remove_sysfs:
+	sysfs_remove_group(&client->dev.kobj, &sht21_attr_group);
+fail_free:
+	kfree(sht21);
+
+	return err;
+}
+
+/**
+ * sht21_remove() - remove device
+ * @client: I2C client device
+ */
+static int __devexit sht21_remove(struct i2c_client *client)
+{
+	struct sht21 *sht21 = i2c_get_clientdata(client);
+
+	hwmon_device_unregister(sht21->hwmon_dev);
+	sysfs_remove_group(&client->dev.kobj, &sht21_attr_group);
+	kfree(sht21);
+
+	return 0;
+}
+
+/* Device ID table */
+static const struct i2c_device_id sht21_id[] = {
+	{ "sht21", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, sht21_id);
+
+static struct i2c_driver sht21_driver = {
+	.driver.name = "sht21",
+	.probe       = sht21_probe,
+	.remove      = __devexit_p(sht21_remove),
+	.id_table    = sht21_id,
+};
+
+/**
+ * sht21_init() - initialize driver
+ *
+ * Called when kernel is booted or module is inserted.
+ * Returns 0 on success.
+ */
+static int __init sht21_init(void)
+{
+	return i2c_add_driver(&sht21_driver);
+}
+module_init(sht21_init);
+
+/**
+ * sht21_init() - clean up driver
+ *
+ * Called when module is removed.
+ */
+static void __exit sht21_exit(void)
+{
+	i2c_del_driver(&sht21_driver);
+}
+module_exit(sht21_exit);
+
+MODULE_AUTHOR("Urs Fleisch <urs.fleisch@sensirion.com>");
+MODULE_DESCRIPTION("Sensirion SHT21 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 79c2931..47d7ce9 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -50,6 +50,8 @@
 	 735		0008		0735
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/ioport.h>
@@ -735,21 +737,19 @@
 	pdev = platform_device_alloc("sis5595", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "sis5595: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "sis5595: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "sis5595: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index f46d936..9fb7516 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -26,6 +26,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/ioport.h>
@@ -311,21 +313,19 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -367,8 +367,7 @@
 	*addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
 		 |  superio_inb(SUPERIO_REG_BASE_LSB);
 
-	printk(KERN_INFO DRVNAME ": found SMSC %s "
-		"(base address 0x%04x, revision %u)\n",
+	pr_info("found SMSC %s (base address 0x%04x, revision %u)\n",
 		name, *addr, rev);
 
 	superio_exit();
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 8fa462f..f44a89a 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -26,6 +26,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/ioport.h>
@@ -435,30 +437,29 @@
 	 */
 	switch (val) {
 	case 0x51:
-		pr_info(DRVNAME ": Found SMSC LPC47B27x\n");
+		pr_info("Found SMSC LPC47B27x\n");
 		sio_data->type = smsc47m1;
 		break;
 	case 0x59:
-		pr_info(DRVNAME ": Found SMSC LPC47M10x/LPC47M112/LPC47M13x\n");
+		pr_info("Found SMSC LPC47M10x/LPC47M112/LPC47M13x\n");
 		sio_data->type = smsc47m1;
 		break;
 	case 0x5F:
-		pr_info(DRVNAME ": Found SMSC LPC47M14x\n");
+		pr_info("Found SMSC LPC47M14x\n");
 		sio_data->type = smsc47m1;
 		break;
 	case 0x60:
-		pr_info(DRVNAME ": Found SMSC LPC47M15x/LPC47M192/LPC47M997\n");
+		pr_info("Found SMSC LPC47M15x/LPC47M192/LPC47M997\n");
 		sio_data->type = smsc47m1;
 		break;
 	case 0x6B:
 		if (superio_inb(SUPERIO_REG_DEVREV) & 0x80) {
-			pr_debug(DRVNAME ": "
-				 "Found SMSC LPC47M233, unsupported\n");
+			pr_debug("Found SMSC LPC47M233, unsupported\n");
 			superio_exit();
 			return -ENODEV;
 		}
 
-		pr_info(DRVNAME ": Found SMSC LPC47M292\n");
+		pr_info("Found SMSC LPC47M292\n");
 		sio_data->type = smsc47m2;
 		break;
 	default:
@@ -470,7 +471,7 @@
 	*addr = (superio_inb(SUPERIO_REG_BASE) << 8)
 	      |  superio_inb(SUPERIO_REG_BASE + 1);
 	if (*addr == 0) {
-		pr_info(DRVNAME ": Device address not set, will not use\n");
+		pr_info("Device address not set, will not use\n");
 		superio_exit();
 		return -ENODEV;
 	}
@@ -479,7 +480,7 @@
 	 * Compaq Presario S4000NX) */
 	sio_data->activate = superio_inb(SUPERIO_REG_ACT);
 	if ((sio_data->activate & 0x01) == 0) {
-		pr_info(DRVNAME ": Enabling device\n");
+		pr_info("Enabling device\n");
 		superio_outb(SUPERIO_REG_ACT, sio_data->activate | 0x01);
 	}
 
@@ -494,7 +495,7 @@
 		superio_enter();
 		superio_select();
 
-		pr_info(DRVNAME ": Disabling device\n");
+		pr_info("Disabling device\n");
 		superio_outb(SUPERIO_REG_ACT, sio_data->activate);
 
 		superio_exit();
@@ -823,28 +824,26 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct smsc47m1_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index ec7fad7..0d18de4 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -21,6 +21,8 @@
  * 02110-1301 USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -202,7 +204,7 @@
 	pdev = platform_device_alloc(DRVNAME, cpu);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
@@ -214,8 +216,7 @@
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_free;
 	}
 
@@ -237,13 +238,16 @@
 
 static void __cpuinit via_cputemp_device_remove(unsigned int cpu)
 {
-	struct pdev_entry *p, *n;
+	struct pdev_entry *p;
+
 	mutex_lock(&pdev_list_mutex);
-	list_for_each_entry_safe(p, n, &pdev_list, list) {
+	list_for_each_entry(p, &pdev_list, list) {
 		if (p->cpu == cpu) {
 			platform_device_unregister(p->pdev);
 			list_del(&p->list);
+			mutex_unlock(&pdev_list_mutex);
 			kfree(p);
+			return;
 		}
 	}
 	mutex_unlock(&pdev_list_mutex);
@@ -273,7 +277,6 @@
 static int __init via_cputemp_init(void)
 {
 	int i, err;
-	struct pdev_entry *p, *n;
 
 	if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
 		printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
@@ -295,33 +298,27 @@
 			continue;
 
 		if (c->x86_model > 0x0f) {
-			printk(KERN_WARNING DRVNAME ": Unknown CPU "
-				"model 0x%x\n", c->x86_model);
+			pr_warn("Unknown CPU model 0x%x\n", c->x86_model);
 			continue;
 		}
 
-		err = via_cputemp_device_add(i);
-		if (err)
-			goto exit_devices_unreg;
+		via_cputemp_device_add(i);
 	}
+
+#ifndef CONFIG_HOTPLUG_CPU
 	if (list_empty(&pdev_list)) {
 		err = -ENODEV;
 		goto exit_driver_unreg;
 	}
+#endif
 
 	register_hotcpu_notifier(&via_cputemp_cpu_notifier);
 	return 0;
 
-exit_devices_unreg:
-	mutex_lock(&pdev_list_mutex);
-	list_for_each_entry_safe(p, n, &pdev_list, list) {
-		platform_device_unregister(p->pdev);
-		list_del(&p->list);
-		kfree(p);
-	}
-	mutex_unlock(&pdev_list_mutex);
+#ifndef CONFIG_HOTPLUG_CPU
 exit_driver_unreg:
 	platform_driver_unregister(&via_cputemp_driver);
+#endif
 exit:
 	return err;
 }
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index f397ce7..25e9166 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -30,6 +30,8 @@
     Warning - only supports a single device.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
@@ -687,6 +689,13 @@
 	return 0;
 }
 
+static void via686a_update_fan_div(struct via686a_data *data)
+{
+	int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
+	data->fan_div[0] = (reg >> 4) & 0x03;
+	data->fan_div[1] = reg >> 6;
+}
+
 static void __devinit via686a_init_device(struct via686a_data *data)
 {
 	u8 reg;
@@ -700,6 +709,9 @@
 	via686a_write_value(data, VIA686A_REG_TEMP_MODE,
 			    (reg & ~VIA686A_TEMP_MODE_MASK)
 			    | VIA686A_TEMP_MODE_CONTINUOUS);
+
+	/* Pre-read fan clock divisor values */
+	via686a_update_fan_div(data);
 }
 
 static struct via686a_data *via686a_update_device(struct device *dev)
@@ -751,9 +763,7 @@
 		    (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
 		     0xc0) >> 6;
 
-		i = via686a_read_value(data, VIA686A_REG_FANDIV);
-		data->fan_div[0] = (i >> 4) & 0x03;
-		data->fan_div[1] = i >> 6;
+		via686a_update_fan_div(data);
 		data->alarms =
 		    via686a_read_value(data,
 				       VIA686A_REG_ALARM1) |
@@ -791,21 +801,19 @@
 	pdev = platform_device_alloc("via686a", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "via686a: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "via686a: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "via686a: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index ae33bbb..49163d4 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -21,6 +21,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1254,8 +1256,7 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed (%d)\n",
-		       err);
+		pr_err("Device allocation failed (%d)\n", err);
 		goto EXIT;
 	}
 
@@ -1266,15 +1267,13 @@
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto EXIT_DEV_PUT;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto EXIT_DEV_PUT;
 	}
 
@@ -1301,23 +1300,20 @@
 	superio_select(sio_cip, SIO_VT1211_LDN_HWMON);
 
 	if ((superio_inb(sio_cip, SIO_VT1211_ACTIVE) & 1) == 0) {
-		printk(KERN_WARNING DRVNAME ": HW monitor is disabled, "
-		       "skipping\n");
+		pr_warn("HW monitor is disabled, skipping\n");
 		goto EXIT;
 	}
 
 	*address = ((superio_inb(sio_cip, SIO_VT1211_BADDR) << 8) |
 		    (superio_inb(sio_cip, SIO_VT1211_BADDR + 1))) & 0xff00;
 	if (*address == 0) {
-		printk(KERN_WARNING DRVNAME ": Base address is not set, "
-		       "skipping\n");
+		pr_warn("Base address is not set, skipping\n");
 		goto EXIT;
 	}
 
 	err = 0;
-	printk(KERN_INFO DRVNAME ": Found VT1211 chip at 0x%04x, "
-	       "revision %u\n", *address,
-	       superio_inb(sio_cip, SIO_VT1211_DEVREV));
+	pr_info("Found VT1211 chip at 0x%04x, revision %u\n",
+		*address, superio_inb(sio_cip, SIO_VT1211_DEVREV));
 
 EXIT:
 	superio_exit(sio_cip);
@@ -1336,15 +1332,15 @@
 
 	if ((uch_config < -1) || (uch_config > 31)) {
 		err = -EINVAL;
-		printk(KERN_WARNING DRVNAME ": Invalid UCH configuration %d. "
-		       "Choose a value between 0 and 31.\n", uch_config);
+		pr_warn("Invalid UCH configuration %d. "
+			"Choose a value between 0 and 31.\n", uch_config);
 	  goto EXIT;
 	}
 
 	if ((int_mode < -1) || (int_mode > 0)) {
 		err = -EINVAL;
-		printk(KERN_WARNING DRVNAME ": Invalid interrupt mode %d. "
-		       "Only mode 0 is supported.\n", int_mode);
+		pr_warn("Invalid interrupt mode %d. "
+			"Only mode 0 is supported.\n", int_mode);
 	  goto EXIT;
 	}
 
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index e6078c9..db3b2e8 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -24,6 +24,8 @@
 /* Supports VIA VT8231 South Bridge embedded sensors
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -902,21 +904,19 @@
 	pdev = platform_device_alloc("vt8231", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "vt8231: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "vt8231: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "vt8231: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 072c580..073eabe 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -42,6 +42,8 @@
     w83667hg-b   9      5       3       3      0xb350 0xc1    0x5ca3
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1668,8 +1670,7 @@
 		break;
 	default:
 		if (val != 0xffff)
-			pr_debug(DRVNAME ": unsupported chip ID: 0x%04x\n",
-				 val);
+			pr_debug("unsupported chip ID: 0x%04x\n", val);
 		superio_exit(sioaddr);
 		return -ENODEV;
 	}
@@ -1680,8 +1681,7 @@
 	    | superio_inb(sioaddr, SIO_REG_ADDR + 1);
 	*addr = val & IOREGION_ALIGNMENT;
 	if (*addr == 0) {
-		printk(KERN_ERR DRVNAME ": Refusing to enable a Super-I/O "
-		       "device with a base I/O port 0.\n");
+		pr_err("Refusing to enable a Super-I/O device with a base I/O port 0\n");
 		superio_exit(sioaddr);
 		return -ENODEV;
 	}
@@ -1689,13 +1689,12 @@
 	/* Activate logical device if needed */
 	val = superio_inb(sioaddr, SIO_REG_ENABLE);
 	if (!(val & 0x01)) {
-		printk(KERN_WARNING DRVNAME ": Forcibly enabling Super-I/O. "
-		       "Sensor is probably unusable.\n");
+		pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
 		superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
 	}
 
 	superio_exit(sioaddr);
-	pr_info(DRVNAME ": Found %s chip at %#x\n", sio_name, *addr);
+	pr_info("Found %s chip at %#x\n", sio_name, *addr);
 	sio_data->sioreg = sioaddr;
 
 	return 0;
@@ -1729,14 +1728,14 @@
 
 	if (!(pdev = platform_device_alloc(DRVNAME, address))) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit_unregister;
 	}
 
 	err = platform_device_add_data(pdev, &sio_data,
 				       sizeof(struct w83627ehf_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
@@ -1752,16 +1751,14 @@
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	/* platform_device_add calls probe() */
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 38e2805..bde50e3 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -39,6 +39,8 @@
     supported yet.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1166,14 +1168,13 @@
 	       superio_inb(sio_data, WINB_BASE_REG + 1);
 	*addr = val & WINB_ALIGNMENT;
 	if (*addr == 0) {
-		printk(KERN_WARNING DRVNAME ": Base address not set, "
-		       "skipping\n");
+		pr_warn("Base address not set, skipping\n");
 		goto exit;
 	}
 
 	val = superio_inb(sio_data, WINB_ACT_REG);
 	if (!(val & 0x01)) {
-		printk(KERN_WARNING DRVNAME ": Enabling HWM logical device\n");
+		pr_warn("Enabling HWM logical device\n");
 		superio_outb(sio_data, WINB_ACT_REG, val | 0x01);
 	}
 
@@ -1789,28 +1790,26 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct w83627hf_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index c84b9b4..eed43a0 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -33,6 +33,8 @@
 
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1798,8 +1800,7 @@
 	 * individually for the probing phase. */
 	for (port = address; port < address + W83781D_EXTENT; port++) {
 		if (!request_region(port, 1, "w83781d")) {
-			pr_debug("w83781d: Failed to request port 0x%x\n",
-				 port);
+			pr_debug("Failed to request port 0x%x\n", port);
 			goto release;
 		}
 	}
@@ -1811,7 +1812,7 @@
 	if (inb_p(address + 2) != val
 	 || inb_p(address + 3) != val
 	 || inb_p(address + 7) != val) {
-		pr_debug("w83781d: Detection failed at step 1\n");
+		pr_debug("Detection failed at step %d\n", 1);
 		goto release;
 	}
 #undef REALLY_SLOW_IO
@@ -1820,14 +1821,14 @@
 	   MSB (busy flag) should be clear initially, set after the write. */
 	save = inb_p(address + W83781D_ADDR_REG_OFFSET);
 	if (save & 0x80) {
-		pr_debug("w83781d: Detection failed at step 2\n");
+		pr_debug("Detection failed at step %d\n", 2);
 		goto release;
 	}
 	val = ~save & 0x7f;
 	outb_p(val, address + W83781D_ADDR_REG_OFFSET);
 	if (inb_p(address + W83781D_ADDR_REG_OFFSET) != (val | 0x80)) {
 		outb_p(save, address + W83781D_ADDR_REG_OFFSET);
-		pr_debug("w83781d: Detection failed at step 3\n");
+		pr_debug("Detection failed at step %d\n", 3);
 		goto release;
 	}
 
@@ -1835,7 +1836,7 @@
 	outb_p(W83781D_REG_CONFIG, address + W83781D_ADDR_REG_OFFSET);
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if (val & 0x80) {
-		pr_debug("w83781d: Detection failed at step 4\n");
+		pr_debug("Detection failed at step %d\n", 4);
 		goto release;
 	}
 	outb_p(W83781D_REG_BANK, address + W83781D_ADDR_REG_OFFSET);
@@ -1844,19 +1845,19 @@
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if ((!(save & 0x80) && (val != 0xa3))
 	 || ((save & 0x80) && (val != 0x5c))) {
-		pr_debug("w83781d: Detection failed at step 5\n");
+		pr_debug("Detection failed at step %d\n", 5);
 		goto release;
 	}
 	outb_p(W83781D_REG_I2C_ADDR, address + W83781D_ADDR_REG_OFFSET);
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if (val < 0x03 || val > 0x77) {	/* Not a valid I2C address */
-		pr_debug("w83781d: Detection failed at step 6\n");
+		pr_debug("Detection failed at step %d\n", 6);
 		goto release;
 	}
 
 	/* The busy flag should be clear again */
 	if (inb_p(address + W83781D_ADDR_REG_OFFSET) & 0x80) {
-		pr_debug("w83781d: Detection failed at step 7\n");
+		pr_debug("Detection failed at step %d\n", 7);
 		goto release;
 	}
 
@@ -1871,7 +1872,7 @@
 		found = 1;
 
 	if (found)
-		pr_info("w83781d: Found a %s chip at %#x\n",
+		pr_info("Found a %s chip at %#x\n",
 			val == 0x30 ? "W83782D" : "W83781D", (int)address);
 
  release:
@@ -1894,21 +1895,19 @@
 	pdev = platform_device_alloc("w83781d", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "w83781d: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "w83781d: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "w83781d: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 679718e..63841f8 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -691,7 +691,7 @@
 }
 
 static ssize_t
-show_regs_chassis(struct device *dev, struct device_attribute *attr,
+show_chassis(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
 	struct w83792d_data *data = w83792d_update_device(dev);
@@ -699,6 +699,16 @@
 }
 
 static ssize_t
+show_regs_chassis(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	dev_warn(dev,
+		 "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+		 "chassis");
+	return show_chassis(dev, attr, buf);
+}
+
+static ssize_t
 show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct w83792d_data *data = w83792d_update_device(dev);
@@ -706,7 +716,7 @@
 }
 
 static ssize_t
-store_chassis_clear(struct device *dev, struct device_attribute *attr,
+store_chassis_clear_legacy(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
@@ -714,6 +724,10 @@
 	u32 val;
 	u8 temp1 = 0, temp2 = 0;
 
+	dev_warn(dev,
+		 "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+		 "chassis_clear");
+
 	val = simple_strtoul(buf, NULL, 10);
 	mutex_lock(&data->update_lock);
 	data->chassis_clear = SENSORS_LIMIT(val, 0 ,1);
@@ -726,6 +740,27 @@
 	return count;
 }
 
+static ssize_t
+store_chassis_clear(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct w83792d_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+	u8 reg;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+	reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR);
+	w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80);
+	data->valid = 0;		/* Force cache refresh */
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
 /* For Smart Fan I / Thermal Cruise */
 static ssize_t
 show_thermal_cruise(struct device *dev, struct device_attribute *attr,
@@ -1012,7 +1047,9 @@
 static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23);
 static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL);
 static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR,
-			show_chassis_clear, store_chassis_clear);
+			show_chassis_clear, store_chassis_clear_legacy);
+static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
+			show_chassis, store_chassis_clear);
 static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
@@ -1214,6 +1251,7 @@
 	&dev_attr_alarms.attr,
 	&dev_attr_chassis.attr,
 	&dev_attr_chassis_clear.attr,
+	&dev_attr_intrusion0_alarm.attr,
 	&sensor_dev_attr_tolerance1.dev_attr.attr,
 	&sensor_dev_attr_thermal_cruise1.dev_attr.attr,
 	&sensor_dev_attr_tolerance2.dev_attr.attr,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 8e540ad..e3bdedf 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -51,7 +51,6 @@
 #define WATCHDOG_TIMEOUT 2	/* 2 minute default timeout */
 
 /* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
 static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
 						I2C_CLIENT_END };
 
@@ -421,18 +420,43 @@
 
 /* Write any value to clear chassis alarm */
 static ssize_t
+store_chassis_clear_legacy(struct device *dev,
+			   struct device_attribute *attr, const char *buf,
+			   size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct w83793_data *data = i2c_get_clientdata(client);
+	u8 val;
+
+	dev_warn(dev, "Attribute chassis is deprecated, "
+		 "use intrusion0_alarm instead\n");
+
+	mutex_lock(&data->update_lock);
+	val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
+	val |= 0x80;
+	w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+/* Write 0 to clear chassis alarm */
+static ssize_t
 store_chassis_clear(struct device *dev,
 		    struct device_attribute *attr, const char *buf,
 		    size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct w83793_data *data = i2c_get_clientdata(client);
-	u8 val;
+	unsigned long val;
+	u8 reg;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
-	val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
-	val |= 0x80;
-	w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
+	reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
+	w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
+	data->valid = 0;		/* Force cache refresh */
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -1102,6 +1126,8 @@
 
 static struct sensor_device_attribute_2 sda_single_files[] = {
 	SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
+		      store_chassis_clear_legacy, ALARM_STATUS, 30),
+	SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
 		      store_chassis_clear, ALARM_STATUS, 30),
 	SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
 		      store_beep_enable, NOT_USED, NOT_USED),
@@ -1323,7 +1349,7 @@
 static long watchdog_ioctl(struct file *filp, unsigned int cmd,
 			   unsigned long arg)
 {
-	static struct watchdog_info ident = {
+	struct watchdog_info ident = {
 		.options = WDIOF_KEEPALIVEPING |
 			   WDIOF_SETTIMEOUT |
 			   WDIOF_CARDRESET,
@@ -1333,7 +1359,6 @@
 	int val, ret = 0;
 	struct w83793_data *data = filp->private_data;
 
-	mutex_lock(&watchdog_mutex);
 	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		if (!nowayout)
@@ -1387,7 +1412,6 @@
 	default:
 		ret = -ENOTTY;
 	}
-	mutex_unlock(&watchdog_mutex);
 	return ret;
 }
 
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index cdbc744..845232d 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -458,6 +458,7 @@
 {
 	struct w83795_data *data = i2c_get_clientdata(client);
 	int i, limit;
+	u8 lsb;
 
 	/* Read the voltage limits */
 	for (i = 0; i < ARRAY_SIZE(data->in); i++) {
@@ -479,9 +480,8 @@
 	}
 
 	/* Read the fan limits */
+	lsb = 0; /* Silent false gcc warning */
 	for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
-		u8 lsb;
-
 		/* Each register contains LSB for 2 fans, but we want to
 		 * read it only once to save time */
 		if ((i & 1) == 0 && (data->has_fan & (3 << i)))
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index a39e6cf..38319a6 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -600,12 +600,14 @@
 /*
  * registering functions to load algorithms at runtime
  */
-static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
+static int __i2c_bit_add_bus(struct i2c_adapter *adap,
+			     int (*add_adapter)(struct i2c_adapter *))
 {
 	struct i2c_algo_bit_data *bit_adap = adap->algo_data;
+	int ret;
 
 	if (bit_test) {
-		int ret = test_bus(bit_adap, adap->name);
+		ret = test_bus(bit_adap, adap->name);
 		if (ret < 0)
 			return -ENODEV;
 	}
@@ -614,30 +616,27 @@
 	adap->algo = &i2c_bit_algo;
 	adap->retries = 3;
 
+	ret = add_adapter(adap);
+	if (ret < 0)
+		return ret;
+
+	/* Complain if SCL can't be read */
+	if (bit_adap->getscl == NULL) {
+		dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
+		dev_warn(&adap->dev, "Bus may be unreliable\n");
+	}
 	return 0;
 }
 
 int i2c_bit_add_bus(struct i2c_adapter *adap)
 {
-	int err;
-
-	err = i2c_bit_prepare_bus(adap);
-	if (err)
-		return err;
-
-	return i2c_add_adapter(adap);
+	return __i2c_bit_add_bus(adap, i2c_add_adapter);
 }
 EXPORT_SYMBOL(i2c_bit_add_bus);
 
 int i2c_bit_add_numbered_bus(struct i2c_adapter *adap)
 {
-	int err;
-
-	err = i2c_bit_prepare_bus(adap);
-	if (err)
-		return err;
-
-	return i2c_add_numbered_adapter(adap);
+	return __i2c_bit_add_bus(adap, i2c_add_numbered_adapter);
 }
 EXPORT_SYMBOL(i2c_bit_add_numbered_bus);
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3a6321c..113505a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -638,6 +638,14 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called xilinx_i2c.
 
+config I2C_EG20T
+        tristate "PCH I2C of Intel EG20T"
+        depends on PCI
+        help
+          This driver is for PCH(Platform controller Hub) I2C of EG20T which
+          is an IOH(Input/Output Hub) for x86 embedded processor.
+          This driver can access PCH I2C bus device.
+
 comment "External I2C/SMBus adapter drivers"
 
 config I2C_PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 84cb16a..9d2d0ec 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -61,6 +61,7 @@
 obj-$(CONFIG_I2C_VERSATILE)	+= i2c-versatile.o
 obj-$(CONFIG_I2C_OCTEON)	+= i2c-octeon.o
 obj-$(CONFIG_I2C_XILINX)	+= i2c-xiic.o
+obj-$(CONFIG_I2C_EG20T)         += i2c-eg20t.o
 
 # External I2C/SMBus adapter drivers
 obj-$(CONFIG_I2C_PARPORT)	+= i2c-parport.o
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index fb26e5c..52b545a 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -20,6 +20,7 @@
 #include <linux/completion.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/delay.h>
 
 #include <asm/blackfin.h>
 #include <asm/portmux.h>
@@ -159,6 +160,27 @@
 		if (mast_stat & BUFWRERR)
 			dev_dbg(&iface->adap.dev, "Buffer Write Error\n");
 
+		/* Faulty slave devices, may drive SDA low after a transfer
+		 * finishes. To release the bus this code generates up to 9
+		 * extra clocks until SDA is released.
+		 */
+
+		if (read_MASTER_STAT(iface) & SDASEN) {
+			int cnt = 9;
+			do {
+				write_MASTER_CTL(iface, SCLOVR);
+				udelay(6);
+				write_MASTER_CTL(iface, 0);
+				udelay(6);
+			} while ((read_MASTER_STAT(iface) & SDASEN) && cnt--);
+
+			write_MASTER_CTL(iface, SDAOVR | SCLOVR);
+			udelay(6);
+			write_MASTER_CTL(iface, SDAOVR);
+			udelay(6);
+			write_MASTER_CTL(iface, 0);
+		}
+
 		/* If it is a quick transfer, only address without data,
 		 * not an err, return 1.
 		 */
@@ -760,7 +782,7 @@
 	platform_driver_unregister(&i2c_bfin_twi_driver);
 }
 
-module_init(i2c_bfin_twi_init);
+subsys_initcall(i2c_bfin_twi_init);
 module_exit(i2c_bfin_twi_exit);
 
 MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
new file mode 100644
index 0000000..2e067dd
--- /dev/null
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -0,0 +1,900 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/ktime.h>
+
+#define PCH_EVENT_SET	0	/* I2C Interrupt Event Set Status */
+#define PCH_EVENT_NONE	1	/* I2C Interrupt Event Clear Status */
+#define PCH_MAX_CLK		100000	/* Maximum Clock speed in MHz */
+#define PCH_BUFFER_MODE_ENABLE	0x0002	/* flag for Buffer mode enable */
+#define PCH_EEPROM_SW_RST_MODE_ENABLE	0x0008	/* EEPROM SW RST enable flag */
+
+#define PCH_I2CSADR	0x00	/* I2C slave address register */
+#define PCH_I2CCTL	0x04	/* I2C control register */
+#define PCH_I2CSR	0x08	/* I2C status register */
+#define PCH_I2CDR	0x0C	/* I2C data register */
+#define PCH_I2CMON	0x10	/* I2C bus monitor register */
+#define PCH_I2CBC	0x14	/* I2C bus transfer rate setup counter */
+#define PCH_I2CMOD	0x18	/* I2C mode register */
+#define PCH_I2CBUFSLV	0x1C	/* I2C buffer mode slave address register */
+#define PCH_I2CBUFSUB	0x20	/* I2C buffer mode subaddress register */
+#define PCH_I2CBUFFOR	0x24	/* I2C buffer mode format register */
+#define PCH_I2CBUFCTL	0x28	/* I2C buffer mode control register */
+#define PCH_I2CBUFMSK	0x2C	/* I2C buffer mode interrupt mask register */
+#define PCH_I2CBUFSTA	0x30	/* I2C buffer mode status register */
+#define PCH_I2CBUFLEV	0x34	/* I2C buffer mode level register */
+#define PCH_I2CESRFOR	0x38	/* EEPROM software reset mode format register */
+#define PCH_I2CESRCTL	0x3C	/* EEPROM software reset mode ctrl register */
+#define PCH_I2CESRMSK	0x40	/* EEPROM software reset mode */
+#define PCH_I2CESRSTA	0x44	/* EEPROM software reset mode status register */
+#define PCH_I2CTMR	0x48	/* I2C timer register */
+#define PCH_I2CSRST	0xFC	/* I2C reset register */
+#define PCH_I2CNF	0xF8	/* I2C noise filter register */
+
+#define BUS_IDLE_TIMEOUT	20
+#define PCH_I2CCTL_I2CMEN	0x0080
+#define TEN_BIT_ADDR_DEFAULT	0xF000
+#define TEN_BIT_ADDR_MASK	0xF0
+#define PCH_START		0x0020
+#define PCH_ESR_START		0x0001
+#define PCH_BUFF_START		0x1
+#define PCH_REPSTART		0x0004
+#define PCH_ACK			0x0008
+#define PCH_GETACK		0x0001
+#define CLR_REG			0x0
+#define I2C_RD			0x1
+#define I2CMCF_BIT		0x0080
+#define I2CMIF_BIT		0x0002
+#define I2CMAL_BIT		0x0010
+#define I2CBMFI_BIT		0x0001
+#define I2CBMAL_BIT		0x0002
+#define I2CBMNA_BIT		0x0004
+#define I2CBMTO_BIT		0x0008
+#define I2CBMIS_BIT		0x0010
+#define I2CESRFI_BIT		0X0001
+#define I2CESRTO_BIT		0x0002
+#define I2CESRFIIE_BIT		0x1
+#define I2CESRTOIE_BIT		0x2
+#define I2CBMDZ_BIT		0x0040
+#define I2CBMAG_BIT		0x0020
+#define I2CMBB_BIT		0x0020
+#define BUFFER_MODE_MASK	(I2CBMFI_BIT | I2CBMAL_BIT | I2CBMNA_BIT | \
+				I2CBMTO_BIT | I2CBMIS_BIT)
+#define I2C_ADDR_MSK		0xFF
+#define I2C_MSB_2B_MSK		0x300
+#define FAST_MODE_CLK		400
+#define FAST_MODE_EN		0x0001
+#define SUB_ADDR_LEN_MAX	4
+#define BUF_LEN_MAX		32
+#define PCH_BUFFER_MODE		0x1
+#define EEPROM_SW_RST_MODE	0x0002
+#define NORMAL_INTR_ENBL	0x0300
+#define EEPROM_RST_INTR_ENBL	(I2CESRFIIE_BIT | I2CESRTOIE_BIT)
+#define EEPROM_RST_INTR_DISBL	0x0
+#define BUFFER_MODE_INTR_ENBL	0x001F
+#define BUFFER_MODE_INTR_DISBL	0x0
+#define NORMAL_MODE		0x0
+#define BUFFER_MODE		0x1
+#define EEPROM_SR_MODE		0x2
+#define I2C_TX_MODE		0x0010
+#define PCH_BUF_TX		0xFFF7
+#define PCH_BUF_RD		0x0008
+#define I2C_ERROR_MASK	(I2CESRTO_EVENT | I2CBMIS_EVENT | I2CBMTO_EVENT | \
+			I2CBMNA_EVENT | I2CBMAL_EVENT | I2CMAL_EVENT)
+#define I2CMAL_EVENT		0x0001
+#define I2CMCF_EVENT		0x0002
+#define I2CBMFI_EVENT		0x0004
+#define I2CBMAL_EVENT		0x0008
+#define I2CBMNA_EVENT		0x0010
+#define I2CBMTO_EVENT		0x0020
+#define I2CBMIS_EVENT		0x0040
+#define I2CESRFI_EVENT		0x0080
+#define I2CESRTO_EVENT		0x0100
+#define PCI_DEVICE_ID_PCH_I2C	0x8817
+
+#define pch_dbg(adap, fmt, arg...)  \
+	dev_dbg(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg)
+
+#define pch_err(adap, fmt, arg...)  \
+	dev_err(adap->pch_adapter.dev.parent, "%s :" fmt, __func__, ##arg)
+
+#define pch_pci_err(pdev, fmt, arg...)  \
+	dev_err(&pdev->dev, "%s :" fmt, __func__, ##arg)
+
+#define pch_pci_dbg(pdev, fmt, arg...)  \
+	dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg)
+
+/**
+ * struct i2c_algo_pch_data - for I2C driver functionalities
+ * @pch_adapter:		stores the reference to i2c_adapter structure
+ * @p_adapter_info:		stores the reference to adapter_info structure
+ * @pch_base_address:		specifies the remapped base address
+ * @pch_buff_mode_en:		specifies if buffer mode is enabled
+ * @pch_event_flag:		specifies occurrence of interrupt events
+ * @pch_i2c_xfer_in_progress:	specifies whether the transfer is completed
+ */
+struct i2c_algo_pch_data {
+	struct i2c_adapter pch_adapter;
+	struct adapter_info *p_adapter_info;
+	void __iomem *pch_base_address;
+	int pch_buff_mode_en;
+	u32 pch_event_flag;
+	bool pch_i2c_xfer_in_progress;
+};
+
+/**
+ * struct adapter_info - This structure holds the adapter information for the
+			 PCH i2c controller
+ * @pch_data:		stores a list of i2c_algo_pch_data
+ * @pch_i2c_suspended:	specifies whether the system is suspended or not
+ *			perhaps with more lines and words.
+ *
+ * pch_data has as many elements as maximum I2C channels
+ */
+struct adapter_info {
+	struct i2c_algo_pch_data pch_data;
+	bool pch_i2c_suspended;
+};
+
+
+static int pch_i2c_speed = 100; /* I2C bus speed in Kbps */
+static int pch_clk = 50000;	/* specifies I2C clock speed in KHz */
+static wait_queue_head_t pch_event;
+static DEFINE_MUTEX(pch_mutex);
+
+static struct pci_device_id __devinitdata pch_pcidev_id[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)},
+	{0,}
+};
+
+static irqreturn_t pch_i2c_handler(int irq, void *pData);
+
+static inline void pch_setbit(void __iomem *addr, u32 offset, u32 bitmask)
+{
+	u32 val;
+	val = ioread32(addr + offset);
+	val |= bitmask;
+	iowrite32(val, addr + offset);
+}
+
+static inline void pch_clrbit(void __iomem *addr, u32 offset, u32 bitmask)
+{
+	u32 val;
+	val = ioread32(addr + offset);
+	val &= (~bitmask);
+	iowrite32(val, addr + offset);
+}
+
+/**
+ * pch_i2c_init() - hardware initialization of I2C module
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_init(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	u32 pch_i2cbc;
+	u32 pch_i2ctmr;
+	u32 reg_value;
+
+	/* reset I2C controller */
+	iowrite32(0x01, p + PCH_I2CSRST);
+	msleep(20);
+	iowrite32(0x0, p + PCH_I2CSRST);
+
+	/* Initialize I2C registers */
+	iowrite32(0x21, p + PCH_I2CNF);
+
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL,
+			  PCH_I2CCTL_I2CMEN);
+
+	if (pch_i2c_speed != 400)
+		pch_i2c_speed = 100;
+
+	reg_value = PCH_I2CCTL_I2CMEN;
+	if (pch_i2c_speed == FAST_MODE_CLK) {
+		reg_value |= FAST_MODE_EN;
+		pch_dbg(adap, "Fast mode enabled\n");
+	}
+
+	if (pch_clk > PCH_MAX_CLK)
+		pch_clk = 62500;
+
+	pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+	/* Set transfer speed in I2CBC */
+	iowrite32(pch_i2cbc, p + PCH_I2CBC);
+
+	pch_i2ctmr = (pch_clk) / 8;
+	iowrite32(pch_i2ctmr, p + PCH_I2CTMR);
+
+	reg_value |= NORMAL_INTR_ENBL;	/* Enable interrupts in normal mode */
+	iowrite32(reg_value, p + PCH_I2CCTL);
+
+	pch_dbg(adap,
+		"I2CCTL=%x pch_i2cbc=%x pch_i2ctmr=%x Enable interrupts\n",
+		ioread32(p + PCH_I2CCTL), pch_i2cbc, pch_i2ctmr);
+
+	init_waitqueue_head(&pch_event);
+}
+
+static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
+{
+	return cmp1.tv64 < cmp2.tv64;
+}
+
+/**
+ * pch_i2c_wait_for_bus_idle() - check the status of bus.
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ * @timeout:	waiting time counter (us).
+ */
+static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
+				 s32 timeout)
+{
+	void __iomem *p = adap->pch_base_address;
+
+	/* MAX timeout value is timeout*1000*1000nsec */
+	ktime_t ns_val = ktime_add_ns(ktime_get(), timeout*1000*1000);
+	do {
+		if ((ioread32(p + PCH_I2CSR) & I2CMBB_BIT) == 0)
+			break;
+		msleep(20);
+	} while (ktime_lt(ktime_get(), ns_val));
+
+	pch_dbg(adap, "I2CSR = %x\n", ioread32(p + PCH_I2CSR));
+
+	if (timeout == 0) {
+		pch_err(adap, "%s: Timeout Error.return%d\n", __func__, -ETIME);
+		return -ETIME;
+	}
+
+	return 0;
+}
+
+/**
+ * pch_i2c_start() - Generate I2C start condition in normal mode.
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ *
+ * Generate I2C start condition in normal mode by setting I2CCTL.I2CMSTA to 1.
+ */
+static void pch_i2c_start(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
+}
+
+/**
+ * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
+{
+	s32 ret;
+	ret = wait_event_timeout(pch_event,
+			(adap->pch_event_flag != 0), msecs_to_jiffies(50));
+	if (ret < 0) {
+		pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+		return ret;
+	}
+
+	if (ret == 0) {
+		pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+		return -ETIMEDOUT;
+	}
+
+	if (adap->pch_event_flag & I2C_ERROR_MASK) {
+		pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
+		return -EIO;
+	}
+
+	adap->pch_event_flag = 0;
+
+	return 0;
+}
+
+/**
+ * pch_i2c_getack() - to confirm ACK/NACK
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static s32 pch_i2c_getack(struct i2c_algo_pch_data *adap)
+{
+	u32 reg_val;
+	void __iomem *p = adap->pch_base_address;
+	reg_val = ioread32(p + PCH_I2CSR) & PCH_GETACK;
+
+	if (reg_val != 0) {
+		pch_err(adap, "return%d\n", -EPROTO);
+		return -EPROTO;
+	}
+
+	return 0;
+}
+
+/**
+ * pch_i2c_stop() - generate stop condition in normal mode.
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	/* clear the start bit */
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
+}
+
+/**
+ * pch_i2c_repstart() - generate repeated start condition in normal mode
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_repstart(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_REPSTART);
+}
+
+/**
+ * pch_i2c_writebytes() - write data to I2C bus in normal mode
+ * @i2c_adap:	Pointer to the struct i2c_adapter.
+ * @last:	specifies whether last message or not.
+ *		In the case of compound mode it will be 1 for last message,
+ *		otherwise 0.
+ * @first:	specifies whether first message or not.
+ *		1 for first message otherwise 0.
+ */
+static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
+			      struct i2c_msg *msgs, u32 last, u32 first)
+{
+	struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+	u8 *buf;
+	u32 length;
+	u32 addr;
+	u32 addr_2_msb;
+	u32 addr_8_lsb;
+	s32 wrcount;
+	void __iomem *p = adap->pch_base_address;
+
+	length = msgs->len;
+	buf = msgs->buf;
+	addr = msgs->addr;
+
+	/* enable master tx */
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
+
+	pch_dbg(adap, "I2CCTL = %x msgs->len = %d\n", ioread32(p + PCH_I2CCTL),
+		length);
+
+	if (first) {
+		if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME)
+			return -ETIME;
+	}
+
+	if (msgs->flags & I2C_M_TEN) {
+		addr_2_msb = ((addr & I2C_MSB_2B_MSK) >> 7);
+		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+		if (first)
+			pch_i2c_start(adap);
+		if (pch_i2c_wait_for_xfer_complete(adap) == 0 &&
+		    pch_i2c_getack(adap) == 0) {
+			addr_8_lsb = (addr & I2C_ADDR_MSK);
+			iowrite32(addr_8_lsb, p + PCH_I2CDR);
+		} else {
+			pch_i2c_stop(adap);
+			return -ETIME;
+		}
+	} else {
+		/* set 7 bit slave address and R/W bit as 0 */
+		iowrite32(addr << 1, p + PCH_I2CDR);
+		if (first)
+			pch_i2c_start(adap);
+	}
+
+	if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
+	    (pch_i2c_getack(adap) == 0)) {
+		for (wrcount = 0; wrcount < length; ++wrcount) {
+			/* write buffer value to I2C data register */
+			iowrite32(buf[wrcount], p + PCH_I2CDR);
+			pch_dbg(adap, "writing %x to Data register\n",
+				buf[wrcount]);
+
+			if (pch_i2c_wait_for_xfer_complete(adap) != 0)
+				return -ETIME;
+
+			if (pch_i2c_getack(adap))
+				return -EIO;
+		}
+
+		/* check if this is the last message */
+		if (last)
+			pch_i2c_stop(adap);
+		else
+			pch_i2c_repstart(adap);
+	} else {
+		pch_i2c_stop(adap);
+		return -EIO;
+	}
+
+	pch_dbg(adap, "return=%d\n", wrcount);
+
+	return wrcount;
+}
+
+/**
+ * pch_i2c_sendack() - send ACK
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_sendack(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK);
+}
+
+/**
+ * pch_i2c_sendnack() - send NACK
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+	pch_dbg(adap, "I2CCTL = %x\n", ioread32(p + PCH_I2CCTL));
+	pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_ACK);
+}
+
+/**
+ * pch_i2c_readbytes() - read data  from I2C bus in normal mode.
+ * @i2c_adap:	Pointer to the struct i2c_adapter.
+ * @msgs:	Pointer to i2c_msg structure.
+ * @last:	specifies whether last message or not.
+ * @first:	specifies whether first message or not.
+ */
+s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
+		  u32 last, u32 first)
+{
+	struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+
+	u8 *buf;
+	u32 count;
+	u32 length;
+	u32 addr;
+	u32 addr_2_msb;
+	void __iomem *p = adap->pch_base_address;
+
+	length = msgs->len;
+	buf = msgs->buf;
+	addr = msgs->addr;
+
+	/* enable master reception */
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, I2C_TX_MODE);
+
+	if (first) {
+		if (pch_i2c_wait_for_bus_idle(adap, BUS_IDLE_TIMEOUT) == -ETIME)
+			return -ETIME;
+	}
+
+	if (msgs->flags & I2C_M_TEN) {
+		addr_2_msb = (((addr & I2C_MSB_2B_MSK) >> 7) | (I2C_RD));
+		iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
+
+	} else {
+		/* 7 address bits + R/W bit */
+		addr = (((addr) << 1) | (I2C_RD));
+		iowrite32(addr, p + PCH_I2CDR);
+	}
+
+	/* check if it is the first message */
+	if (first)
+		pch_i2c_start(adap);
+
+	if ((pch_i2c_wait_for_xfer_complete(adap) == 0) &&
+	    (pch_i2c_getack(adap) == 0)) {
+		pch_dbg(adap, "return %d\n", 0);
+
+		if (length == 0) {
+			pch_i2c_stop(adap);
+			ioread32(p + PCH_I2CDR); /* Dummy read needs */
+
+			count = length;
+		} else {
+			int read_index;
+			int loop;
+			pch_i2c_sendack(adap);
+
+			/* Dummy read */
+			for (loop = 1, read_index = 0; loop < length; loop++) {
+				buf[read_index] = ioread32(p + PCH_I2CDR);
+
+				if (loop != 1)
+					read_index++;
+
+				if (pch_i2c_wait_for_xfer_complete(adap) != 0) {
+					pch_i2c_stop(adap);
+					return -ETIME;
+				}
+			}	/* end for */
+
+			pch_i2c_sendnack(adap);
+
+			buf[read_index] = ioread32(p + PCH_I2CDR);
+
+			if (length != 1)
+				read_index++;
+
+			if (pch_i2c_wait_for_xfer_complete(adap) == 0) {
+				if (last)
+					pch_i2c_stop(adap);
+				else
+					pch_i2c_repstart(adap);
+
+				buf[read_index++] = ioread32(p + PCH_I2CDR);
+				count = read_index;
+			} else {
+				count = -ETIME;
+			}
+
+		}
+	} else {
+		count = -ETIME;
+		pch_i2c_stop(adap);
+	}
+
+	return count;
+}
+
+/**
+ * pch_i2c_cb_ch0() - Interrupt handler Call back function
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
+{
+	u32 sts;
+	void __iomem *p = adap->pch_base_address;
+
+	sts = ioread32(p + PCH_I2CSR);
+	sts &= (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT);
+	if (sts & I2CMAL_BIT)
+		adap->pch_event_flag |= I2CMAL_EVENT;
+
+	if (sts & I2CMCF_BIT)
+		adap->pch_event_flag |= I2CMCF_EVENT;
+
+	/* clear the applicable bits */
+	pch_clrbit(adap->pch_base_address, PCH_I2CSR, sts);
+
+	pch_dbg(adap, "PCH_I2CSR = %x\n", ioread32(p + PCH_I2CSR));
+
+	wake_up(&pch_event);
+}
+
+/**
+ * pch_i2c_handler() - interrupt handler for the PCH I2C controller
+ * @irq:	irq number.
+ * @pData:	cookie passed back to the handler function.
+ */
+static irqreturn_t pch_i2c_handler(int irq, void *pData)
+{
+	s32 reg_val;
+
+	struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData;
+	void __iomem *p = adap_data->pch_base_address;
+	u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE);
+
+	if (mode != NORMAL_MODE) {
+		pch_err(adap_data, "I2C mode is not supported\n");
+		return IRQ_NONE;
+	}
+
+	reg_val = ioread32(p + PCH_I2CSR);
+	if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT))
+		pch_i2c_cb_ch0(adap_data);
+	else
+		return IRQ_NONE;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * pch_i2c_xfer() - Reading adnd writing data through I2C bus
+ * @i2c_adap:	Pointer to the struct i2c_adapter.
+ * @msgs:	Pointer to i2c_msg structure.
+ * @num:	number of messages.
+ */
+static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
+		    struct i2c_msg *msgs, s32 num)
+{
+	struct i2c_msg *pmsg;
+	u32 i = 0;
+	u32 status;
+	u32 msglen;
+	u32 subaddrlen;
+	s32 ret;
+
+	struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
+
+	ret = mutex_lock_interruptible(&pch_mutex);
+	if (ret)
+		return -ERESTARTSYS;
+
+	if (adap->p_adapter_info->pch_i2c_suspended) {
+		mutex_unlock(&pch_mutex);
+		return -EBUSY;
+	}
+
+	pch_dbg(adap, "adap->p_adapter_info->pch_i2c_suspended is %d\n",
+		adap->p_adapter_info->pch_i2c_suspended);
+	/* transfer not completed */
+	adap->pch_i2c_xfer_in_progress = true;
+
+	pmsg = &msgs[0];
+	pmsg->flags |= adap->pch_buff_mode_en;
+	status = pmsg->flags;
+	pch_dbg(adap,
+		"After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
+	/* calculate sub address length and message length */
+	/* these are applicable only for buffer mode */
+	subaddrlen = pmsg->buf[0];
+	/* calculate actual message length excluding
+	 * the sub address fields */
+	msglen = (pmsg->len) - (subaddrlen + 1);
+	if (status & (I2C_M_RD)) {
+		pch_dbg(adap, "invoking pch_i2c_readbytes\n");
+		ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
+				   (i == 0));
+	} else {
+		pch_dbg(adap, "invoking pch_i2c_writebytes\n");
+		ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
+				    (i == 0));
+	}
+
+	adap->pch_i2c_xfer_in_progress = false;	/* transfer completed */
+
+	mutex_unlock(&pch_mutex);
+
+	return ret;
+}
+
+/**
+ * pch_i2c_func() - return the functionality of the I2C driver
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static u32 pch_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
+}
+
+static struct i2c_algorithm pch_algorithm = {
+	.master_xfer = pch_i2c_xfer,
+	.functionality = pch_i2c_func
+};
+
+/**
+ * pch_i2c_disbl_int() - Disable PCH I2C interrupts
+ * @adap:	Pointer to struct i2c_algo_pch_data.
+ */
+static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
+{
+	void __iomem *p = adap->pch_base_address;
+
+	pch_clrbit(adap->pch_base_address, PCH_I2CCTL, NORMAL_INTR_ENBL);
+
+	iowrite32(EEPROM_RST_INTR_DISBL, p + PCH_I2CESRMSK);
+
+	iowrite32(BUFFER_MODE_INTR_DISBL, p + PCH_I2CBUFMSK);
+}
+
+static int __devinit pch_i2c_probe(struct pci_dev *pdev,
+			       const struct pci_device_id *id)
+{
+	void __iomem *base_addr;
+	s32 ret;
+	struct adapter_info *adap_info;
+
+	pch_pci_dbg(pdev, "Entered.\n");
+
+	adap_info = kzalloc((sizeof(struct adapter_info)), GFP_KERNEL);
+	if (adap_info == NULL) {
+		pch_pci_err(pdev, "Memory allocation FAILED\n");
+		return -ENOMEM;
+	}
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		pch_pci_err(pdev, "pci_enable_device FAILED\n");
+		goto err_pci_enable;
+	}
+
+	ret = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (ret) {
+		pch_pci_err(pdev, "pci_request_regions FAILED\n");
+		goto err_pci_req;
+	}
+
+	base_addr = pci_iomap(pdev, 1, 0);
+
+	if (base_addr == NULL) {
+		pch_pci_err(pdev, "pci_iomap FAILED\n");
+		ret = -ENOMEM;
+		goto err_pci_iomap;
+	}
+
+	adap_info->pch_i2c_suspended = false;
+
+	adap_info->pch_data.p_adapter_info = adap_info;
+
+	adap_info->pch_data.pch_adapter.owner = THIS_MODULE;
+	adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON;
+	strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME);
+	adap_info->pch_data.pch_adapter.algo = &pch_algorithm;
+	adap_info->pch_data.pch_adapter.algo_data =
+						&adap_info->pch_data;
+
+	/* (i * 0x80) + base_addr; */
+	adap_info->pch_data.pch_base_address = base_addr;
+
+	adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev;
+
+	ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter));
+
+	if (ret) {
+		pch_pci_err(pdev, "i2c_add_adapter FAILED\n");
+		goto err_i2c_add_adapter;
+	}
+
+	pch_i2c_init(&adap_info->pch_data);
+	ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+		  KBUILD_MODNAME, &adap_info->pch_data);
+	if (ret) {
+		pch_pci_err(pdev, "request_irq FAILED\n");
+		goto err_request_irq;
+	}
+
+	pci_set_drvdata(pdev, adap_info);
+	pch_pci_dbg(pdev, "returns %d.\n", ret);
+	return 0;
+
+err_request_irq:
+	i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+err_i2c_add_adapter:
+	pci_iounmap(pdev, base_addr);
+err_pci_iomap:
+	pci_release_regions(pdev);
+err_pci_req:
+	pci_disable_device(pdev);
+err_pci_enable:
+	kfree(adap_info);
+	return ret;
+}
+
+static void __devexit pch_i2c_remove(struct pci_dev *pdev)
+{
+	struct adapter_info *adap_info = pci_get_drvdata(pdev);
+
+	pch_i2c_disbl_int(&adap_info->pch_data);
+	free_irq(pdev->irq, &adap_info->pch_data);
+	i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
+
+	if (adap_info->pch_data.pch_base_address) {
+		pci_iounmap(pdev, adap_info->pch_data.pch_base_address);
+		adap_info->pch_data.pch_base_address = 0;
+	}
+
+	pci_set_drvdata(pdev, NULL);
+
+	pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+	kfree(adap_info);
+}
+
+#ifdef CONFIG_PM
+static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	int ret;
+	struct adapter_info *adap_info = pci_get_drvdata(pdev);
+	void __iomem *p = adap_info->pch_data.pch_base_address;
+
+	adap_info->pch_i2c_suspended = true;
+
+	while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) {
+		/* Wait until all channel transfers are completed */
+		msleep(20);
+	}
+	/* Disable the i2c interrupts */
+	pch_i2c_disbl_int(&adap_info->pch_data);
+
+	pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x "
+		"invoked function pch_i2c_disbl_int successfully\n",
+		ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA),
+		ioread32(p + PCH_I2CESRSTA));
+
+	ret = pci_save_state(pdev);
+
+	if (ret) {
+		pch_pci_err(pdev, "pci_save_state\n");
+		return ret;
+	}
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int pch_i2c_resume(struct pci_dev *pdev)
+{
+	struct adapter_info *adap_info = pci_get_drvdata(pdev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	if (pci_enable_device(pdev) < 0) {
+		pch_pci_err(pdev, "pch_i2c_resume:pci_enable_device FAILED\n");
+		return -EIO;
+	}
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+
+	pch_i2c_init(&adap_info->pch_data);
+
+	adap_info->pch_i2c_suspended = false;
+
+	return 0;
+}
+#else
+#define pch_i2c_suspend NULL
+#define pch_i2c_resume NULL
+#endif
+
+static struct pci_driver pch_pcidriver = {
+	.name = KBUILD_MODNAME,
+	.id_table = pch_pcidev_id,
+	.probe = pch_i2c_probe,
+	.remove = __devexit_p(pch_i2c_remove),
+	.suspend = pch_i2c_suspend,
+	.resume = pch_i2c_resume
+};
+
+static int __init pch_pci_init(void)
+{
+	return pci_register_driver(&pch_pcidriver);
+}
+module_init(pch_pci_init);
+
+static void __exit pch_pci_exit(void)
+{
+	pci_unregister_driver(&pch_pcidriver);
+}
+module_exit(pch_pci_exit);
+
+MODULE_DESCRIPTION("PCH I2C PCI Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
+module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
+module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 02835ce..7979aef7e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
 #include <linux/acpi.h>
 #include <linux/io.h>
 #include <linux/dmi.h>
+#include <linux/slab.h>
 
 /* I801 SMBus address offsets */
 #define SMBHSTSTS(p)	(0 + (p)->smba)
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 112c61f..f09c931 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -409,7 +409,7 @@
 		IOP3XX_ICR_RXFULL_IE | IOP3XX_ICR_TXEMPTY_IE);
 	__raw_writel(cr, adapter_data->ioaddr + CR_OFFSET);
 
-	iounmap((void __iomem*)adapter_data->ioaddr);
+	iounmap(adapter_data->ioaddr);
 	release_mem_region(res->start, IOP3XX_I2C_IO_SIZE);
 	kfree(adapter_data);
 	kfree(padapter);
@@ -453,7 +453,7 @@
 	/* set the adapter enumeration # */
 	adapter_data->id = i2c_id++;
 
-	adapter_data->ioaddr = (u32)ioremap(res->start, IOP3XX_I2C_IO_SIZE);
+	adapter_data->ioaddr = ioremap(res->start, IOP3XX_I2C_IO_SIZE);
 	if (!adapter_data->ioaddr) {
 		ret = -ENOMEM;
 		goto release_region;
@@ -498,7 +498,7 @@
 	return 0;
 
 unmap:
-	iounmap((void __iomem*)adapter_data->ioaddr);
+	iounmap(adapter_data->ioaddr);
 
 release_region:
 	release_mem_region(res->start, IOP3XX_I2C_IO_SIZE);
diff --git a/drivers/i2c/busses/i2c-iop3xx.h b/drivers/i2c/busses/i2c-iop3xx.h
index 8485861..097e270 100644
--- a/drivers/i2c/busses/i2c-iop3xx.h
+++ b/drivers/i2c/busses/i2c-iop3xx.h
@@ -97,7 +97,7 @@
 #define	IOP3XX_I2C_IO_SIZE	0x18
 
 struct i2c_algo_iop3xx_data {
-	u32 ioaddr;
+	void __iomem *ioaddr;
 	wait_queue_head_t waitq;
 	spinlock_t lock;
 	u32 SR_enabled, SR_received;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 1624206..a9941c6 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -59,6 +59,7 @@
 	MV64XXX_I2C_STATE_INVALID,
 	MV64XXX_I2C_STATE_IDLE,
 	MV64XXX_I2C_STATE_WAITING_FOR_START_COND,
+	MV64XXX_I2C_STATE_WAITING_FOR_RESTART,
 	MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK,
 	MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK,
 	MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK,
@@ -70,6 +71,7 @@
 	MV64XXX_I2C_ACTION_INVALID,
 	MV64XXX_I2C_ACTION_CONTINUE,
 	MV64XXX_I2C_ACTION_SEND_START,
+	MV64XXX_I2C_ACTION_SEND_RESTART,
 	MV64XXX_I2C_ACTION_SEND_ADDR_1,
 	MV64XXX_I2C_ACTION_SEND_ADDR_2,
 	MV64XXX_I2C_ACTION_SEND_DATA,
@@ -91,6 +93,7 @@
 	u32			addr2;
 	u32			bytes_left;
 	u32			byte_posn;
+	u32			send_stop;
 	u32			block;
 	int			rc;
 	u32			freq_m;
@@ -159,8 +162,15 @@
 		if ((drv_data->bytes_left == 0)
 				|| (drv_data->aborting
 					&& (drv_data->byte_posn != 0))) {
-			drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
-			drv_data->state = MV64XXX_I2C_STATE_IDLE;
+			if (drv_data->send_stop) {
+				drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
+				drv_data->state = MV64XXX_I2C_STATE_IDLE;
+			} else {
+				drv_data->action =
+					MV64XXX_I2C_ACTION_SEND_RESTART;
+				drv_data->state =
+					MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
+			}
 		} else {
 			drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA;
 			drv_data->state =
@@ -228,6 +238,15 @@
 mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
 {
 	switch(drv_data->action) {
+	case MV64XXX_I2C_ACTION_SEND_RESTART:
+		drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
+		drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
+		writel(drv_data->cntl_bits,
+			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+		drv_data->block = 0;
+		wake_up_interruptible(&drv_data->waitq);
+		break;
+
 	case MV64XXX_I2C_ACTION_CONTINUE:
 		writel(drv_data->cntl_bits,
 			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
@@ -386,7 +405,8 @@
 }
 
 static int
-mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg)
+mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
+				int is_first, int is_last)
 {
 	unsigned long	flags;
 
@@ -406,10 +426,18 @@
 			drv_data->bytes_left--;
 		}
 	} else {
-		drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
-		drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+		if (is_first) {
+			drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+			drv_data->state =
+				MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+		} else {
+			drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_1;
+			drv_data->state =
+				MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK;
+		}
 	}
 
+	drv_data->send_stop = is_last;
 	drv_data->block = 1;
 	mv64xxx_i2c_do_action(drv_data);
 	spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -437,9 +465,12 @@
 	struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
 	int	i, rc;
 
-	for (i=0; i<num; i++)
-		if ((rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i])) < 0)
+	for (i = 0; i < num; i++) {
+		rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i],
+						i == 0, i + 1 == num);
+		if (rc < 0)
 			return rc;
+	}
 
 	return num;
 }
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index a605a50..ff1e127 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -432,7 +432,7 @@
 
 static void __devexit nforce2_remove(struct pci_dev *dev)
 {
-	struct nforce2_smbus *smbuses = (void*) pci_get_drvdata(dev);
+	struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
 
 	nforce2_set_reference(NULL);
 	if (smbuses[0].base) {
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index c9fffd0..594ed50 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -434,7 +434,7 @@
 	}
 
 	if (timeout == 0) {
-		/* controler has timedout, re-init the h/w */
+		/* controller has timedout, re-init the h/w */
 		dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
 		(void) init_hw(dev);
 		status = -ETIMEDOUT;
@@ -498,7 +498,7 @@
 	}
 
 	if (timeout == 0) {
-		/* controler has timedout, re-init the h/w */
+		/* controller has timedout, re-init the h/w */
 		dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
 		(void) init_hw(dev);
 		status = -ETIMEDOUT;
@@ -872,6 +872,8 @@
 	adap->owner	= THIS_MODULE;
 	adap->class	= I2C_CLASS_HWMON | I2C_CLASS_SPD;
 	adap->algo	= &nmk_i2c_algo;
+	snprintf(adap->name, sizeof(adap->name),
+		 "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
 
 	/* fetch the controller id */
 	adap->nr	= pdev->id;
@@ -891,8 +893,8 @@
 		goto err_init_hw;
 	}
 
-	dev_dbg(&pdev->dev, "initialize I2C%d bus on virtual "
-		"base %p\n", pdev->id, dev->virtbase);
+	dev_info(&pdev->dev, "initialize %s on virtual "
+		"base %p\n", adap->name, dev->virtbase);
 
 	ret = i2c_add_numbered_adapter(adap);
 	if (ret) {
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 0070371..ef3bcb1 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -9,6 +9,41 @@
  * kind, whether express or implied.
  */
 
+/*
+ * Device tree configuration:
+ *
+ * Required properties:
+ * - compatible      : "opencores,i2c-ocores"
+ * - reg             : bus address start and address range size of device
+ * - interrupts      : interrupt number
+ * - regstep         : size of device registers in bytes
+ * - clock-frequency : frequency of bus clock in Hz
+ * 
+ * Example:
+ *
+ *  i2c0: ocores@a0000000 {
+ *              compatible = "opencores,i2c-ocores";
+ *              reg = <0xa0000000 0x8>;
+ *              interrupts = <10>;
+ *
+ *              regstep = <1>;
+ *              clock-frequency = <20000000>;
+ *
+ * -- Devices connected on this I2C bus get
+ * -- defined here; address- and size-cells
+ * -- apply to these child devices
+ *
+ *              #address-cells = <1>;
+ *              #size-cells = <0>;
+ *
+ *              dummy@60 {
+ *                     compatible = "dummy";
+ *                     reg = <60>;
+ *              };
+ *  };
+ *
+ */
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -210,6 +245,32 @@
 	.algo		= &ocores_algorithm,
 };
 
+#ifdef CONFIG_OF
+static int ocores_i2c_of_probe(struct platform_device* pdev,
+				struct ocores_i2c* i2c)
+{
+	__be32* val;
+
+	val = of_get_property(pdev->dev.of_node, "regstep", NULL);
+	if (!val) {
+		dev_err(&pdev->dev, "Missing required parameter 'regstep'");
+		return -ENODEV;
+	}
+	i2c->regstep = be32_to_cpup(val);
+
+	val = of_get_property(pdev->dev.of_node, "clock-frequency", NULL);
+	if (!val) {
+		dev_err(&pdev->dev,
+			"Missing required parameter 'clock-frequency'");
+		return -ENODEV;
+	}
+	i2c->clock_khz = be32_to_cpup(val) / 1000;
+
+	return 0;
+}
+#else
+#define ocores_i2c_of_probe(pdev,i2c) -ENODEV
+#endif
 
 static int __devinit ocores_i2c_probe(struct platform_device *pdev)
 {
@@ -227,37 +288,41 @@
 	if (!res2)
 		return -ENODEV;
 
-	pdata = (struct ocores_i2c_platform_data*) pdev->dev.platform_data;
-	if (!pdata)
-		return -ENODEV;
-
-	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+	i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
 	if (!i2c)
 		return -ENOMEM;
 
-	if (!request_mem_region(res->start, resource_size(res),
-				pdev->name)) {
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				     resource_size(res), pdev->name)) {
 		dev_err(&pdev->dev, "Memory region busy\n");
-		ret = -EBUSY;
-		goto request_mem_failed;
+		return -EBUSY;
 	}
 
-	i2c->base = ioremap(res->start, resource_size(res));
+	i2c->base = devm_ioremap_nocache(&pdev->dev, res->start,
+					 resource_size(res));
 	if (!i2c->base) {
 		dev_err(&pdev->dev, "Unable to map registers\n");
-		ret = -EIO;
-		goto map_failed;
+		return -EIO;
 	}
 
-	i2c->regstep = pdata->regstep;
-	i2c->clock_khz = pdata->clock_khz;
+	pdata = pdev->dev.platform_data;
+	if (pdata) {
+		i2c->regstep = pdata->regstep;
+		i2c->clock_khz = pdata->clock_khz;
+	} else {
+		ret = ocores_i2c_of_probe(pdev, i2c);
+		if (ret)
+			return ret;
+	}
+
 	ocores_init(i2c);
 
 	init_waitqueue_head(&i2c->wait);
-	ret = request_irq(res2->start, ocores_isr, 0, pdev->name, i2c);
+	ret = devm_request_irq(&pdev->dev, res2->start, ocores_isr, 0,
+			       pdev->name, i2c);
 	if (ret) {
 		dev_err(&pdev->dev, "Cannot claim IRQ\n");
-		goto request_irq_failed;
+		return ret;
 	}
 
 	/* hook up driver to tree */
@@ -265,36 +330,29 @@
 	i2c->adap = ocores_adapter;
 	i2c_set_adapdata(&i2c->adap, i2c);
 	i2c->adap.dev.parent = &pdev->dev;
+#ifdef CONFIG_OF
+	i2c->adap.dev.of_node = pdev->dev.of_node;
+#endif
 
 	/* add i2c adapter to i2c tree */
 	ret = i2c_add_adapter(&i2c->adap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to add adapter\n");
-		goto add_adapter_failed;
+		return ret;
 	}
 
 	/* add in known devices to the bus */
-	for (i = 0; i < pdata->num_devices; i++)
-		i2c_new_device(&i2c->adap, pdata->devices + i);
+	if (pdata) {
+		for (i = 0; i < pdata->num_devices; i++)
+			i2c_new_device(&i2c->adap, pdata->devices + i);
+	}
 
 	return 0;
-
-add_adapter_failed:
-	free_irq(res2->start, i2c);
-request_irq_failed:
-	iounmap(i2c->base);
-map_failed:
-	release_mem_region(res->start, resource_size(res));
-request_mem_failed:
-	kfree(i2c);
-
-	return ret;
 }
 
 static int __devexit ocores_i2c_remove(struct platform_device* pdev)
 {
 	struct ocores_i2c *i2c = platform_get_drvdata(pdev);
-	struct resource *res;
 
 	/* disable i2c logic */
 	oc_setreg(i2c, OCI2C_CONTROL, oc_getreg(i2c, OCI2C_CONTROL)
@@ -304,18 +362,6 @@
 	i2c_del_adapter(&i2c->adap);
 	platform_set_drvdata(pdev, NULL);
 
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res)
-		free_irq(res->start, i2c);
-
-	iounmap(i2c->base);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res)
-		release_mem_region(res->start, resource_size(res));
-
-	kfree(i2c);
-
 	return 0;
 }
 
@@ -344,6 +390,16 @@
 #define ocores_i2c_resume	NULL
 #endif
 
+#ifdef CONFIG_OF
+static struct of_device_id ocores_i2c_match[] = {
+        {
+                .compatible = "opencores,i2c-ocores",
+        },
+        {},
+};
+MODULE_DEVICE_TABLE(of, ocores_i2c_match);
+#endif
+
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:ocores-i2c");
 
@@ -355,6 +411,9 @@
 	.driver  = {
 		.owner = THIS_MODULE,
 		.name = "ocores-i2c",
+#ifdef CONFIG_OF
+                .of_match_table = ocores_i2c_match,
+#endif
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b33c785..b605ff3 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -39,6 +39,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/i2c-omap.h>
+#include <linux/pm_runtime.h>
 
 /* I2C controller revisions */
 #define OMAP_I2C_REV_2			0x20
@@ -175,8 +176,6 @@
 	void __iomem		*base;		/* virtual */
 	int			irq;
 	int			reg_shift;      /* bit shift for I2C register addresses */
-	struct clk		*iclk;		/* Interface clock */
-	struct clk		*fclk;		/* Functional clock */
 	struct completion	cmd_complete;
 	struct resource		*ioarea;
 	u32			latency;	/* maximum mpu wkup latency */
@@ -265,45 +264,18 @@
 				(i2c_dev->regs[reg] << i2c_dev->reg_shift));
 }
 
-static int __init omap_i2c_get_clocks(struct omap_i2c_dev *dev)
-{
-	int ret;
-
-	dev->iclk = clk_get(dev->dev, "ick");
-	if (IS_ERR(dev->iclk)) {
-		ret = PTR_ERR(dev->iclk);
-		dev->iclk = NULL;
-		return ret;
-	}
-
-	dev->fclk = clk_get(dev->dev, "fck");
-	if (IS_ERR(dev->fclk)) {
-		ret = PTR_ERR(dev->fclk);
-		if (dev->iclk != NULL) {
-			clk_put(dev->iclk);
-			dev->iclk = NULL;
-		}
-		dev->fclk = NULL;
-		return ret;
-	}
-
-	return 0;
-}
-
-static void omap_i2c_put_clocks(struct omap_i2c_dev *dev)
-{
-	clk_put(dev->fclk);
-	dev->fclk = NULL;
-	clk_put(dev->iclk);
-	dev->iclk = NULL;
-}
-
 static void omap_i2c_unidle(struct omap_i2c_dev *dev)
 {
+	struct platform_device *pdev;
+	struct omap_i2c_bus_platform_data *pdata;
+
 	WARN_ON(!dev->idle);
 
-	clk_enable(dev->iclk);
-	clk_enable(dev->fclk);
+	pdev = to_platform_device(dev->dev);
+	pdata = pdev->dev.platform_data;
+
+	pm_runtime_get_sync(&pdev->dev);
+
 	if (cpu_is_omap34xx()) {
 		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
 		omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
@@ -326,10 +298,15 @@
 
 static void omap_i2c_idle(struct omap_i2c_dev *dev)
 {
+	struct platform_device *pdev;
+	struct omap_i2c_bus_platform_data *pdata;
 	u16 iv;
 
 	WARN_ON(dev->idle);
 
+	pdev = to_platform_device(dev->dev);
+	pdata = pdev->dev.platform_data;
+
 	dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
 	if (dev->rev >= OMAP_I2C_REV_ON_4430)
 		omap_i2c_write_reg(dev, OMAP_I2C_IRQENABLE_CLR, 1);
@@ -345,8 +322,8 @@
 		omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
 	}
 	dev->idle = 1;
-	clk_disable(dev->fclk);
-	clk_disable(dev->iclk);
+
+	pm_runtime_put_sync(&pdev->dev);
 }
 
 static int omap_i2c_init(struct omap_i2c_dev *dev)
@@ -356,6 +333,7 @@
 	unsigned long fclk_rate = 12000000;
 	unsigned long timeout;
 	unsigned long internal_clk = 0;
+	struct clk *fclk;
 
 	if (dev->rev >= OMAP_I2C_REV_2) {
 		/* Disable I2C controller before soft reset */
@@ -414,7 +392,9 @@
 		 * always returns 12MHz for the functional clock, we can
 		 * do this bit unconditionally.
 		 */
-		fclk_rate = clk_get_rate(dev->fclk);
+		fclk = clk_get(dev->dev, "fck");
+		fclk_rate = clk_get_rate(fclk);
+		clk_put(fclk);
 
 		/* TRM for 5912 says the I2C clock must be prescaled to be
 		 * between 7 - 12 MHz. The XOR input clock is typically
@@ -443,7 +423,9 @@
 			internal_clk = 9600;
 		else
 			internal_clk = 4000;
-		fclk_rate = clk_get_rate(dev->fclk) / 1000;
+		fclk = clk_get(dev->dev, "fck");
+		fclk_rate = clk_get_rate(fclk) / 1000;
+		clk_put(fclk);
 
 		/* Compute prescaler divisor */
 		psc = fclk_rate / internal_clk;
@@ -616,12 +598,8 @@
 	 * REVISIT: We should abort the transfer on signals, but the bus goes
 	 * into arbitration and we're currently unable to recover from it.
 	 */
-	if (dev->set_mpu_wkup_lat != NULL)
-		dev->set_mpu_wkup_lat(dev->dev, dev->latency);
 	r = wait_for_completion_timeout(&dev->cmd_complete,
 					OMAP_I2C_TIMEOUT);
-	if (dev->set_mpu_wkup_lat != NULL)
-		dev->set_mpu_wkup_lat(dev->dev, -1);
 	dev->buf_len = 0;
 	if (r < 0)
 		return r;
@@ -672,12 +650,18 @@
 	if (r < 0)
 		goto out;
 
+	if (dev->set_mpu_wkup_lat != NULL)
+		dev->set_mpu_wkup_lat(dev->dev, dev->latency);
+
 	for (i = 0; i < num; i++) {
 		r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
 		if (r != 0)
 			break;
 	}
 
+	if (dev->set_mpu_wkup_lat != NULL)
+		dev->set_mpu_wkup_lat(dev->dev, -1);
+
 	if (r == 0)
 		r = num;
 
@@ -1048,14 +1032,12 @@
 	else
 		dev->reg_shift = 2;
 
-	if ((r = omap_i2c_get_clocks(dev)) != 0)
-		goto err_iounmap;
-
 	if (cpu_is_omap44xx())
 		dev->regs = (u8 *) omap4_reg_map;
 	else
 		dev->regs = (u8 *) reg_map;
 
+	pm_runtime_enable(&pdev->dev);
 	omap_i2c_unidle(dev);
 
 	dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
@@ -1127,8 +1109,6 @@
 err_unuse_clocks:
 	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
 	omap_i2c_idle(dev);
-	omap_i2c_put_clocks(dev);
-err_iounmap:
 	iounmap(dev->base);
 err_free_mem:
 	platform_set_drvdata(pdev, NULL);
@@ -1150,7 +1130,6 @@
 	free_irq(dev->irq, dev);
 	i2c_del_adapter(&dev->adapter);
 	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
-	omap_i2c_put_clocks(dev);
 	iounmap(dev->base);
 	kfree(dev);
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1162,7 +1141,7 @@
 	.probe		= omap_i2c_probe,
 	.remove		= omap_i2c_remove,
 	.driver		= {
-		.name	= "i2c_omap",
+		.name	= "omap_i2c",
 		.owner	= THIS_MODULE,
 	},
 };
@@ -1184,4 +1163,4 @@
 MODULE_AUTHOR("MontaVista Software, Inc. (and others)");
 MODULE_DESCRIPTION("TI OMAP I2C bus adapter");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:i2c_omap");
+MODULE_ALIAS("platform:omap_i2c");
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 53fab51..986e5f6 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -40,6 +41,7 @@
 
 MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
 MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver");
+MODULE_ALIAS("platform:cs5535-smb");
 MODULE_LICENSE("GPL");
 
 #define MAX_DEVICES 4
@@ -84,10 +86,6 @@
 	u8 *ptr;
 	char needs_reset;
 	unsigned len;
-
-	/* PCI device info */
-	struct pci_dev *pdev;
-	int bar;
 };
 
 /* Register Definitions */
@@ -391,7 +389,7 @@
 static struct scx200_acb_iface *scx200_acb_list;
 static DEFINE_MUTEX(scx200_acb_list_mutex);
 
-static __init int scx200_acb_probe(struct scx200_acb_iface *iface)
+static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface)
 {
 	u8 val;
 
@@ -427,7 +425,7 @@
 	return 0;
 }
 
-static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
+static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text,
 		struct device *dev, int index)
 {
 	struct scx200_acb_iface *iface;
@@ -452,7 +450,7 @@
 	return iface;
 }
 
-static int __init scx200_acb_create(struct scx200_acb_iface *iface)
+static int __devinit scx200_acb_create(struct scx200_acb_iface *iface)
 {
 	struct i2c_adapter *adapter;
 	int rc;
@@ -472,67 +470,31 @@
 		return -ENODEV;
 	}
 
-	mutex_lock(&scx200_acb_list_mutex);
-	iface->next = scx200_acb_list;
-	scx200_acb_list = iface;
-	mutex_unlock(&scx200_acb_list_mutex);
+	if (!adapter->dev.parent) {
+		/* If there's no dev, we're tracking (ISA) ifaces manually */
+		mutex_lock(&scx200_acb_list_mutex);
+		iface->next = scx200_acb_list;
+		scx200_acb_list = iface;
+		mutex_unlock(&scx200_acb_list_mutex);
+	}
 
 	return 0;
 }
 
-static __init int scx200_create_pci(const char *text, struct pci_dev *pdev,
-		int bar)
+static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text,
+		unsigned long base, int index, struct device *dev)
 {
 	struct scx200_acb_iface *iface;
 	int rc;
 
-	iface = scx200_create_iface(text, &pdev->dev, 0);
+	iface = scx200_create_iface(text, dev, index);
 
 	if (iface == NULL)
-		return -ENOMEM;
-
-	iface->pdev = pdev;
-	iface->bar = bar;
-
-	rc = pci_enable_device_io(iface->pdev);
-	if (rc)
-		goto errout_free;
-
-	rc = pci_request_region(iface->pdev, iface->bar, iface->adapter.name);
-	if (rc) {
-		printk(KERN_ERR NAME ": can't allocate PCI BAR %d\n",
-				iface->bar);
-		goto errout_free;
-	}
-
-	iface->base = pci_resource_start(iface->pdev, iface->bar);
-	rc = scx200_acb_create(iface);
-
-	if (rc == 0)
-		return 0;
-
-	pci_release_region(iface->pdev, iface->bar);
-	pci_dev_put(iface->pdev);
- errout_free:
-	kfree(iface);
-	return rc;
-}
-
-static int __init scx200_create_isa(const char *text, unsigned long base,
-		int index)
-{
-	struct scx200_acb_iface *iface;
-	int rc;
-
-	iface = scx200_create_iface(text, NULL, index);
-
-	if (iface == NULL)
-		return -ENOMEM;
+		return NULL;
 
 	if (!request_region(base, 8, iface->adapter.name)) {
 		printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n",
 		       base, base + 8 - 1);
-		rc = -EBUSY;
 		goto errout_free;
 	}
 
@@ -540,115 +502,113 @@
 	rc = scx200_acb_create(iface);
 
 	if (rc == 0)
-		return 0;
+		return iface;
 
 	release_region(base, 8);
  errout_free:
 	kfree(iface);
-	return rc;
+	return NULL;
 }
 
-/* Driver data is an index into the scx200_data array that indicates
- * the name and the BAR where the I/O address resource is located.  ISA
- * devices are flagged with a bar value of -1 */
+static int __devinit scx200_probe(struct platform_device *pdev)
+{
+	struct scx200_acb_iface *iface;
+	struct resource *res;
 
-static const struct pci_device_id scx200_pci[] __initconst = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE),
-	  .driver_data = 0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE),
-	  .driver_data = 0 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA),
-	  .driver_data = 1 },
-	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA),
-	  .driver_data = 2 },
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
+		return -ENODEV;
+	}
+
+	iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev);
+	if (!iface)
+		return -EIO;
+
+	dev_info(&pdev->dev, "SCx200 device '%s' registered\n",
+			iface->adapter.name);
+	platform_set_drvdata(pdev, iface);
+
+	return 0;
+}
+
+static void __devexit scx200_cleanup_iface(struct scx200_acb_iface *iface)
+{
+	i2c_del_adapter(&iface->adapter);
+	release_region(iface->base, 8);
+	kfree(iface);
+}
+
+static int __devexit scx200_remove(struct platform_device *pdev)
+{
+	struct scx200_acb_iface *iface;
+
+	iface = platform_get_drvdata(pdev);
+	platform_set_drvdata(pdev, NULL);
+	scx200_cleanup_iface(iface);
+
+	return 0;
+}
+
+static struct platform_driver scx200_pci_drv = {
+	.driver = {
+		.name = "cs5535-smb",
+		.owner = THIS_MODULE,
+	},
+	.probe = scx200_probe,
+	.remove = __devexit_p(scx200_remove),
+};
+
+static const struct pci_device_id scx200_isa[] __initconst = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
 	{ 0, }
 };
 
-static struct {
-	const char *name;
-	int bar;
-} scx200_data[] = {
-	{ "SCx200", -1 },
-	{ "CS5535",  0 },
-	{ "CS5536",  0 }
-};
-
-static __init int scx200_scan_pci(void)
+static __init void scx200_scan_isa(void)
 {
-	int data, dev;
-	int rc = -ENODEV;
-	struct pci_dev *pdev;
+	int i;
 
-	for(dev = 0; dev < ARRAY_SIZE(scx200_pci); dev++) {
-		pdev = pci_get_device(scx200_pci[dev].vendor,
-				scx200_pci[dev].device, NULL);
+	if (!pci_dev_present(scx200_isa))
+		return;
 
-		if (pdev == NULL)
+	for (i = 0; i < MAX_DEVICES; ++i) {
+		if (base[i] == 0)
 			continue;
 
-		data = scx200_pci[dev].driver_data;
-
-		/* if .bar is greater or equal to zero, this is a
-		 * PCI device - otherwise, we assume
-		   that the ports are ISA based
-		*/
-
-		if (scx200_data[data].bar >= 0)
-			rc = scx200_create_pci(scx200_data[data].name, pdev,
-					scx200_data[data].bar);
-		else {
-			int i;
-
-			pci_dev_put(pdev);
-			for (i = 0; i < MAX_DEVICES; ++i) {
-				if (base[i] == 0)
-					continue;
-
-				rc = scx200_create_isa(scx200_data[data].name,
-						base[i],
-						i);
-			}
-		}
-
-		break;
+		/* XXX: should we care about failures? */
+		scx200_create_dev("SCx200", base[i], i, NULL);
 	}
-
-	return rc;
 }
 
 static int __init scx200_acb_init(void)
 {
-	int rc;
-
 	pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n");
 
-	rc = scx200_scan_pci();
+	/* First scan for ISA-based devices */
+	scx200_scan_isa();	/* XXX: should we care about errors? */
 
 	/* If at least one bus was created, init must succeed */
 	if (scx200_acb_list)
 		return 0;
-	return rc;
+
+	/* No ISA devices; register the platform driver for PCI-based devices */
+	return platform_driver_register(&scx200_pci_drv);
 }
 
 static void __exit scx200_acb_cleanup(void)
 {
 	struct scx200_acb_iface *iface;
 
+	platform_driver_unregister(&scx200_pci_drv);
+
 	mutex_lock(&scx200_acb_list_mutex);
 	while ((iface = scx200_acb_list) != NULL) {
 		scx200_acb_list = iface->next;
 		mutex_unlock(&scx200_acb_list_mutex);
 
-		i2c_del_adapter(&iface->adapter);
+		scx200_cleanup_iface(iface);
 
-		if (iface->pdev) {
-			pci_release_region(iface->pdev, iface->bar);
-			pci_dev_put(iface->pdev);
-		}
-		else
-			release_region(iface->base, 8);
-
-		kfree(iface);
 		mutex_lock(&scx200_acb_list_mutex);
 	}
 	mutex_unlock(&scx200_acb_list_mutex);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6b4cc56..f0bd5bc 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -196,88 +196,60 @@
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->suspend ? pm->suspend(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_SUSPEND);
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_SUSPEND);
 }
 
 static int i2c_device_pm_resume(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	int ret;
 
 	if (pm)
-		ret = pm->resume ? pm->resume(dev) : 0;
+		return pm_generic_resume(dev);
 	else
-		ret = i2c_legacy_resume(dev);
-
-	return ret;
+		return i2c_legacy_resume(dev);
 }
 
 static int i2c_device_pm_freeze(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->freeze ? pm->freeze(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_FREEZE);
+	if (pm)
+		return pm_generic_freeze(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_FREEZE);
 }
 
 static int i2c_device_pm_thaw(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->thaw ? pm->thaw(dev) : 0;
-	}
-
-	return i2c_legacy_resume(dev);
+	if (pm)
+		return pm_generic_thaw(dev);
+	else
+		return i2c_legacy_resume(dev);
 }
 
 static int i2c_device_pm_poweroff(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-	if (pm) {
-		if (pm_runtime_suspended(dev))
-			return 0;
-		else
-			return pm->poweroff ? pm->poweroff(dev) : 0;
-	}
-
-	return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
+	if (pm)
+		return pm_generic_poweroff(dev);
+	else
+		return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
 }
 
 static int i2c_device_pm_restore(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-	int ret;
 
 	if (pm)
-		ret = pm->restore ? pm->restore(dev) : 0;
+		return pm_generic_restore(dev);
 	else
-		ret = i2c_legacy_resume(dev);
-
-	if (!ret) {
-		pm_runtime_disable(dev);
-		pm_runtime_set_active(dev);
-		pm_runtime_enable(dev);
-	}
-
-	return ret;
+		return i2c_legacy_resume(dev);
 }
 #else /* !CONFIG_PM_SLEEP */
 #define i2c_device_pm_suspend	NULL
@@ -1021,6 +993,14 @@
 static int __unregister_client(struct device *dev, void *dummy)
 {
 	struct i2c_client *client = i2c_verify_client(dev);
+	if (client && strcmp(client->name, "dummy"))
+		i2c_unregister_device(client);
+	return 0;
+}
+
+static int __unregister_dummy(struct device *dev, void *dummy)
+{
+	struct i2c_client *client = i2c_verify_client(dev);
 	if (client)
 		i2c_unregister_device(client);
 	return 0;
@@ -1075,8 +1055,12 @@
 	mutex_unlock(&adap->userspace_clients_lock);
 
 	/* Detach any active clients. This can't fail, thus we do not
-	   checking the returned value. */
+	 * check the returned value. This is a two-pass process, because
+	 * we can't remove the dummy devices during the first pass: they
+	 * could have been instantiated by real devices wishing to clean
+	 * them up properly, so we give them a chance to do that first. */
 	res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+	res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
 
 #ifdef CONFIG_I2C_COMPAT
 	class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
@@ -1140,6 +1124,14 @@
 	if (res)
 		return res;
 
+	/* Drivers should switch to dev_pm_ops instead. */
+	if (driver->suspend)
+		pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
+			driver->driver.name);
+	if (driver->resume)
+		pr_warn("i2c-core: driver [%s] using legacy resume method\n",
+			driver->driver.name);
+
 	pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
 	INIT_LIST_HEAD(&driver->clients);
@@ -1362,7 +1354,7 @@
  *
  * Returns negative errno, or else the number of bytes written.
  */
-int i2c_master_send(struct i2c_client *client, const char *buf, int count)
+int i2c_master_send(const struct i2c_client *client, const char *buf, int count)
 {
 	int ret;
 	struct i2c_adapter *adap = client->adapter;
@@ -1389,7 +1381,7 @@
  *
  * Returns negative errno, or else the number of bytes read.
  */
-int i2c_master_recv(struct i2c_client *client, char *buf, int count)
+int i2c_master_recv(const struct i2c_client *client, char *buf, int count)
 {
 	struct i2c_adapter *adap = client->adapter;
 	struct i2c_msg msg;
@@ -1679,7 +1671,7 @@
  * This executes the SMBus "receive byte" protocol, returning negative errno
  * else the byte received from the device.
  */
-s32 i2c_smbus_read_byte(struct i2c_client *client)
+s32 i2c_smbus_read_byte(const struct i2c_client *client)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1699,7 +1691,7 @@
  * This executes the SMBus "send byte" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value)
 {
 	return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
 	                      I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
@@ -1714,7 +1706,7 @@
  * This executes the SMBus "read byte" protocol, returning negative errno
  * else a data byte received from the device.
  */
-s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1735,7 +1727,8 @@
  * This executes the SMBus "write byte" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command,
+			      u8 value)
 {
 	union i2c_smbus_data data;
 	data.byte = value;
@@ -1753,7 +1746,7 @@
  * This executes the SMBus "read word" protocol, returning negative errno
  * else a 16-bit unsigned "word" received from the device.
  */
-s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1774,7 +1767,8 @@
  * This executes the SMBus "write word" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command,
+			      u16 value)
 {
 	union i2c_smbus_data data;
 	data.word = value;
@@ -1793,7 +1787,8 @@
  * This executes the SMBus "process call" protocol, returning negative errno
  * else a 16-bit unsigned "word" received from the device.
  */
-s32 i2c_smbus_process_call(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command,
+			   u16 value)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1821,7 +1816,7 @@
  * support this; its emulation through I2C messaging relies on a specific
  * mechanism (I2C_M_RECV_LEN) which may not be implemented.
  */
-s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command,
 			      u8 *values)
 {
 	union i2c_smbus_data data;
@@ -1848,7 +1843,7 @@
  * This executes the SMBus "block write" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command,
 			       u8 length, const u8 *values)
 {
 	union i2c_smbus_data data;
@@ -1864,7 +1859,7 @@
 EXPORT_SYMBOL(i2c_smbus_write_block_data);
 
 /* Returns the number of read bytes */
-s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command,
 				  u8 length, u8 *values)
 {
 	union i2c_smbus_data data;
@@ -1884,7 +1879,7 @@
 }
 EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data);
 
-s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command,
 				   u8 length, const u8 *values)
 {
 	union i2c_smbus_data data;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 4d91d80..90b7a01 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -5,6 +5,18 @@
 menu "Multiplexer I2C Chip support"
 	depends on I2C_MUX
 
+config I2C_MUX_GPIO
+	tristate "GPIO-based I2C multiplexer"
+	depends on GENERIC_GPIO
+	help
+	  If you say yes to this option, support will be included for a
+	  GPIO based I2C multiplexer. This driver provides access to
+	  I2C busses connected through a MUX, which is controlled
+	  through GPIO pins.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called gpio-i2cmux.
+
 config I2C_MUX_PCA9541
 	tristate "NXP PCA9541 I2C Master Selector"
 	depends on EXPERIMENTAL
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index d743806..4640436 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for multiplexer I2C chip drivers.
 
+obj-$(CONFIG_I2C_MUX_GPIO)	+= gpio-i2cmux.o
 obj-$(CONFIG_I2C_MUX_PCA9541)	+= pca9541.o
 obj-$(CONFIG_I2C_MUX_PCA954x)	+= pca954x.o
 
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
new file mode 100644
index 0000000..7b6ce62
--- /dev/null
+++ b/drivers/i2c/muxes/gpio-i2cmux.c
@@ -0,0 +1,184 @@
+/*
+ * I2C multiplexer using GPIO API
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/gpio-i2cmux.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+struct gpiomux {
+	struct i2c_adapter *parent;
+	struct i2c_adapter **adap; /* child busses */
+	struct gpio_i2cmux_platform_data data;
+};
+
+static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+{
+	int i;
+
+	for (i = 0; i < mux->data.n_gpios; i++)
+		gpio_set_value(mux->data.gpios[i], val & (1 << i));
+}
+
+static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+{
+	struct gpiomux *mux = data;
+
+	gpiomux_set(mux, mux->data.values[chan]);
+
+	return 0;
+}
+
+static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+{
+	struct gpiomux *mux = data;
+
+	gpiomux_set(mux, mux->data.idle);
+
+	return 0;
+}
+
+static int __devinit gpiomux_probe(struct platform_device *pdev)
+{
+	struct gpiomux *mux;
+	struct gpio_i2cmux_platform_data *pdata;
+	struct i2c_adapter *parent;
+	int (*deselect) (struct i2c_adapter *, void *, u32);
+	unsigned initial_state;
+	int i, ret;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "Missing platform data\n");
+		return -ENODEV;
+	}
+
+	parent = i2c_get_adapter(pdata->parent);
+	if (!parent) {
+		dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+			pdata->parent);
+		return -ENODEV;
+	}
+
+	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+	if (!mux) {
+		ret = -ENOMEM;
+		goto alloc_failed;
+	}
+
+	mux->parent = parent;
+	mux->data = *pdata;
+	mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
+			    GFP_KERNEL);
+	if (!mux->adap) {
+		ret = -ENOMEM;
+		goto alloc_failed2;
+	}
+
+	if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+		initial_state = pdata->idle;
+		deselect = gpiomux_deselect;
+	} else {
+		initial_state = pdata->values[0];
+		deselect = NULL;
+	}
+
+	for (i = 0; i < pdata->n_gpios; i++) {
+		ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+		if (ret)
+			goto err_request_gpio;
+		gpio_direction_output(pdata->gpios[i],
+				      initial_state & (1 << i));
+	}
+
+	for (i = 0; i < pdata->n_values; i++) {
+		u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
+
+		mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
+						   gpiomux_select, deselect);
+		if (!mux->adap[i]) {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+			goto add_adapter_failed;
+		}
+	}
+
+	dev_info(&pdev->dev, "%d port mux on %s adapter\n",
+		 pdata->n_values, parent->name);
+
+	platform_set_drvdata(pdev, mux);
+
+	return 0;
+
+add_adapter_failed:
+	for (; i > 0; i--)
+		i2c_del_mux_adapter(mux->adap[i - 1]);
+	i = pdata->n_gpios;
+err_request_gpio:
+	for (; i > 0; i--)
+		gpio_free(pdata->gpios[i - 1]);
+	kfree(mux->adap);
+alloc_failed2:
+	kfree(mux);
+alloc_failed:
+	i2c_put_adapter(parent);
+
+	return ret;
+}
+
+static int __devexit gpiomux_remove(struct platform_device *pdev)
+{
+	struct gpiomux *mux = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < mux->data.n_values; i++)
+		i2c_del_mux_adapter(mux->adap[i]);
+
+	for (i = 0; i < mux->data.n_gpios; i++)
+		gpio_free(mux->data.gpios[i]);
+
+	platform_set_drvdata(pdev, NULL);
+	i2c_put_adapter(mux->parent);
+	kfree(mux->adap);
+	kfree(mux);
+
+	return 0;
+}
+
+static struct platform_driver gpiomux_driver = {
+	.probe	= gpiomux_probe,
+	.remove	= __devexit_p(gpiomux_remove),
+	.driver	= {
+		.owner	= THIS_MODULE,
+		.name	= "gpio-i2cmux",
+	},
+};
+
+static int __init gpiomux_init(void)
+{
+	return platform_driver_register(&gpiomux_driver);
+}
+
+static void __exit gpiomux_exit(void)
+{
+	platform_driver_unregister(&gpiomux_driver);
+}
+
+module_init(gpiomux_init);
+module_exit(gpiomux_exit);
+
+MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
+MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-i2cmux");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 56ac09d..7acb32e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -59,6 +59,8 @@
 #include <linux/hrtimer.h>	/* ktime_get_real() */
 #include <trace/events/power.h>
 #include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
 #include <asm/mwait.h>
 
 #define INTEL_IDLE_VERSION "0.4"
@@ -73,6 +75,7 @@
 
 static unsigned int mwait_substates;
 
+#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
 /* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
 static unsigned int lapic_timer_reliable_states = (1 << 1);	 /* Default to only C1 */
 
@@ -82,6 +85,14 @@
 static struct cpuidle_state *cpuidle_state_table;
 
 /*
+ * Set this flag for states where the HW flushes the TLB for us
+ * and so we don't need cross-calls to keep it consistent.
+ * If this flag is set, SW flushes the TLB, so even if the
+ * HW doesn't do the flushing, this flag is safe to use.
+ */
+#define CPUIDLE_FLAG_TLB_FLUSHED	0x10000
+
+/*
  * States are indexed by the cstate number,
  * which is also the index into the MWAIT hint array.
  * Thus C0 is a dummy.
@@ -122,7 +133,7 @@
 		.driver_data = (void *) 0x00,
 		.flags = CPUIDLE_FLAG_TIME_VALID,
 		.exit_latency = 1,
-		.target_residency = 4,
+		.target_residency = 1,
 		.enter = &intel_idle },
 	{ /* MWAIT C2 */
 		.name = "SNB-C3",
@@ -130,7 +141,7 @@
 		.driver_data = (void *) 0x10,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 80,
-		.target_residency = 160,
+		.target_residency = 211,
 		.enter = &intel_idle },
 	{ /* MWAIT C3 */
 		.name = "SNB-C6",
@@ -138,7 +149,7 @@
 		.driver_data = (void *) 0x20,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 104,
-		.target_residency = 208,
+		.target_residency = 345,
 		.enter = &intel_idle },
 	{ /* MWAIT C4 */
 		.name = "SNB-C7",
@@ -146,7 +157,7 @@
 		.driver_data = (void *) 0x30,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 109,
-		.target_residency = 300,
+		.target_residency = 345,
 		.enter = &intel_idle },
 };
 
@@ -220,8 +231,6 @@
 	kt_before = ktime_get_real();
 
 	stop_critical_timings();
-	trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu);
-	trace_cpu_idle((eax >> 4) + 1, cpu);
 	if (!need_resched()) {
 
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -243,6 +252,39 @@
 	return usec_delta;
 }
 
+static void __setup_broadcast_timer(void *arg)
+{
+	unsigned long reason = (unsigned long)arg;
+	int cpu = smp_processor_id();
+
+	reason = reason ?
+		CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+	clockevents_notify(reason, &cpu);
+}
+
+static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n,
+		unsigned long action, void *hcpu)
+{
+	int hotcpu = (unsigned long)hcpu;
+
+	switch (action & 0xf) {
+	case CPU_ONLINE:
+		smp_call_function_single(hotcpu, __setup_broadcast_timer,
+			(void *)true, 1);
+		break;
+	case CPU_DOWN_PREPARE:
+		smp_call_function_single(hotcpu, __setup_broadcast_timer,
+			(void *)false, 1);
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata setup_broadcast_notifier = {
+	.notifier_call = setup_broadcast_cpuhp_notify,
+};
+
 /*
  * intel_idle_probe()
  */
@@ -305,7 +347,11 @@
 	}
 
 	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
-		lapic_timer_reliable_states = 0xFFFFFFFF;
+		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+	else {
+		smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+		register_cpu_notifier(&setup_broadcast_notifier);
+	}
 
 	pr_debug(PREFIX "v" INTEL_IDLE_VERSION
 		" model 0x%X\n", boot_cpu_data.x86_model);
@@ -403,6 +449,10 @@
 {
 	int retval;
 
+	/* Do not load intel_idle at all for now if idle= is passed */
+	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+		return -ENODEV;
+
 	retval = intel_idle_probe();
 	if (retval)
 		return retval;
@@ -428,6 +478,11 @@
 	intel_idle_cpuidle_devices_uninit();
 	cpuidle_unregister_driver(&intel_idle_driver);
 
+	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+		smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+		unregister_cpu_notifier(&setup_broadcast_notifier);
+	}
+
 	return;
 }
 
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b..c3f5aca 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
 
+#ifdef notyet
 int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
 {
 	struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@
 	setup.ovfl_mode = 1;
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
+#endif
 
 static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
 {
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 4bb997a..83d2e19 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -689,7 +689,7 @@
  * A T3 WQ implements both the SQ and RQ.
  */
 struct t3_wq {
-	union t3_wr *queue;		/* DMA accessable memory */
+	union t3_wr *queue;		/* DMA accessible memory */
 	dma_addr_t dma_addr;		/* DMA address for HW */
 	DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
 	u32 error;			/* 1 once we go to ERROR */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49..c5406da 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@
 int iwch_post_zb_read(struct iwch_qp *qhp);
 int iwch_register_device(struct iwch_dev *dev);
 void iwch_unregister_device(struct iwch_dev *dev);
-int iwch_quiesce_qps(struct iwch_cq *chp);
-int iwch_resume_qps(struct iwch_cq *chp);
 void stop_read_rep_timer(struct iwch_qp *qhp);
 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
 		      struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137..1b4cd09 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@
 	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
 	return ret;
 }
-
-static int quiesce_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_quiesce_tid(qhp->ep);
-	qhp->flags |= QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-static int resume_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_resume_tid(qhp->ep);
-	qhp->flags &= ~QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-int iwch_quiesce_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
-			quiesce_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
-			quiesce_qp(qhp);
-	}
-	return 0;
-}
-
-int iwch_resume_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
-			resume_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
-			resume_qp(qhp);
-	}
-	return 0;
-}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cd..2fe19ec 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -46,7 +46,6 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <linux/kfifo.h>
-#include <linux/mutex.h>
 
 #include <asm/byteorder.h>
 
@@ -760,7 +759,6 @@
 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_zb_read(struct c4iw_qp *qhp);
 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb25..2080090 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -892,36 +892,6 @@
 	}
 }
 
-int c4iw_post_zb_read(struct c4iw_qp *qhp)
-{
-	union t4_wr *wqe;
-	struct sk_buff *skb;
-	u8 len16;
-
-	PDBG("%s enter\n", __func__);
-	skb = alloc_skb(40, GFP_KERNEL);
-	if (!skb) {
-		printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
-		return -ENOMEM;
-	}
-	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
-
-	wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
-	memset(wqe, 0, sizeof wqe->read);
-	wqe->read.r2 = cpu_to_be64(0);
-	wqe->read.stag_sink = cpu_to_be32(1);
-	wqe->read.to_sink_hi = cpu_to_be32(0);
-	wqe->read.to_sink_lo = cpu_to_be32(1);
-	wqe->read.stag_src = cpu_to_be32(1);
-	wqe->read.plen = cpu_to_be32(0);
-	wqe->read.to_src_hi = cpu_to_be32(0);
-	wqe->read.to_src_lo = cpu_to_be32(1);
-	len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
-	init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
-
-	return c4iw_ofld_send(&qhp->rhp->rdev, skb);
-}
-
 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
 			   gfp_t gfp)
 {
@@ -1029,7 +999,6 @@
 	wqe->cookie = (unsigned long) &ep->com.wr_wait;
 
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
-	c4iw_init_wr_wait(&ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 		goto out;
@@ -1125,7 +1094,6 @@
 	if (qhp->attr.mpa_attr.initiator)
 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 
-	c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 		goto out;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc..b33f045 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -530,9 +530,8 @@
 	for (j = 0; j < 6; j++) {
 		if (!pdev->resource[j].start)
 			continue;
-		ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
-			   j, (unsigned long long)pdev->resource[j].start,
-			   (unsigned long long)pdev->resource[j].end,
+		ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
+			   j, &pdev->resource[j],
 			   (unsigned long long)pci_resource_len(pdev, j));
 	}
 
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 8c8afc7..31ae1b1 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,18 +277,14 @@
 		goto bail;
 	}
 
-	spin_lock(&dcache_lock);
 	spin_lock(&tmp->d_lock);
 	if (!(d_unhashed(tmp) && tmp->d_inode)) {
-		dget_locked(tmp);
+		dget_dlock(tmp);
 		__d_drop(tmp);
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
 		simple_unlink(parent->d_inode, tmp);
-	} else {
+	} else
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
-	}
 
 	ret = 0;
 bail:
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2..e8df155 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@
 		cq->resize_buf = NULL;
 		cq->resize_umem = NULL;
 	} else {
+		struct mlx4_ib_cq_buf tmp_buf;
+		int tmp_cqe = 0;
+
 		spin_lock_irq(&cq->lock);
 		if (cq->resize_buf) {
 			mlx4_ib_cq_resize_copy_cqes(cq);
-			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+			tmp_buf = cq->buf;
+			tmp_cqe = cq->ibcq.cqe;
 			cq->buf      = cq->resize_buf->buf;
 			cq->ibcq.cqe = cq->resize_buf->cqe;
 
@@ -408,6 +412,9 @@
 			cq->resize_buf = NULL;
 		}
 		spin_unlock_irq(&cq->lock);
+
+		if (tmp_cqe)
+			mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
 	}
 
 	goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd6..57ffa50 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659..03a59534 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c..5a4c364 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -908,8 +908,8 @@
 					nesvnic->nic_index &&
 					mc_index < max_pft_entries_avaiable) {
 						nes_debug(NES_DBG_NIC_RX,
-					"mc_index=%d skipping nic_index=%d,\
-					used for=%d \n", mc_index,
+					"mc_index=%d skipping nic_index=%d, "
+					"used for=%d \n", mc_index,
 					nesvnic->nic_index,
 					nesadapter->pft_mcast_map[mc_index]);
 				mc_index++;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d..73225ee 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@
 	void (*f_sdma_hw_start_up)(struct qib_pportdata *);
 	void (*f_sdma_init_early)(struct qib_pportdata *);
 	void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
-	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
 	u32 (*f_hdrqempty)(struct qib_ctxtdata *);
 	u64 (*f_portcntr)(struct qib_pportdata *, u32);
 	u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf8..5246aa4 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@
 	wc->head = next;
 
 	if (cq->notify == IB_CQ_NEXT_COMP ||
-	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
+	    (cq->notify == IB_CQ_SOLICITED &&
+	     (solicited || entry->status != IB_WC_SUCCESS))) {
 		cq->notify = IB_CQ_NONE;
 		cq->triggered++;
 		/*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd1936..23e584f 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@
  */
 #define QIB_PIO_MAXIBHDR 128
 
+/*
+ * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define QIB_MAX_PKT_RECV 64
+
 struct qlogic_ib_stats qib_stats;
 
 const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@
  * Returns 1 if error was a CRC, else 0.
  * Needed for some chip's synthesized error counters.
  */
-static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
-			  u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
-			  struct qib_message_header *hdr)
+static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
+			  u32 ctxt, u32 eflags, u32 l, u32 etail,
+			  __le32 *rhf_addr, struct qib_message_header *rhdr)
 {
 	u32 ret = 0;
 
 	if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
 		ret = 1;
+	else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
+		/* For TIDERR and RC QPs premptively schedule a NAK */
+		struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
+		struct qib_other_headers *ohdr = NULL;
+		struct qib_ibport *ibp = &ppd->ibport_data;
+		struct qib_qp *qp = NULL;
+		u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
+		u16 lid  = be16_to_cpu(hdr->lrh[1]);
+		int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+		u32 qp_num;
+		u32 opcode;
+		u32 psn;
+		int diff;
+		unsigned long flags;
+
+		/* Sanity check packet */
+		if (tlen < 24)
+			goto drop;
+
+		if (lid < QIB_MULTICAST_LID_BASE) {
+			lid &= ~((1 << ppd->lmc) - 1);
+			if (unlikely(lid != ppd->lid))
+				goto drop;
+		}
+
+		/* Check for GRH */
+		if (lnh == QIB_LRH_BTH)
+			ohdr = &hdr->u.oth;
+		else if (lnh == QIB_LRH_GRH) {
+			u32 vtf;
+
+			ohdr = &hdr->u.l.oth;
+			if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+				goto drop;
+			vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+			if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+				goto drop;
+		} else
+			goto drop;
+
+		/* Get opcode and PSN from packet */
+		opcode = be32_to_cpu(ohdr->bth[0]);
+		opcode >>= 24;
+		psn = be32_to_cpu(ohdr->bth[2]);
+
+		/* Get the destination QP number. */
+		qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+		if (qp_num != QIB_MULTICAST_QPN) {
+			int ruc_res;
+			qp = qib_lookup_qpn(ibp, qp_num);
+			if (!qp)
+				goto drop;
+
+			/*
+			 * Handle only RC QPs - for other QP types drop error
+			 * packet.
+			 */
+			spin_lock(&qp->r_lock);
+
+			/* Check for valid receive state. */
+			if (!(ib_qib_state_ops[qp->state] &
+			      QIB_PROCESS_RECV_OK)) {
+				ibp->n_pkt_drops++;
+				goto unlock;
+			}
+
+			switch (qp->ibqp.qp_type) {
+			case IB_QPT_RC:
+				spin_lock_irqsave(&qp->s_lock, flags);
+				ruc_res =
+					qib_ruc_check_hdr(
+						ibp, hdr,
+						lnh == QIB_LRH_GRH,
+						qp,
+						be32_to_cpu(ohdr->bth[0]));
+				if (ruc_res) {
+					spin_unlock_irqrestore(&qp->s_lock,
+							       flags);
+					goto unlock;
+				}
+				spin_unlock_irqrestore(&qp->s_lock, flags);
+
+				/* Only deal with RDMA Writes for now */
+				if (opcode <
+				    IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+					diff = qib_cmp24(psn, qp->r_psn);
+					if (!qp->r_nak_state && diff >= 0) {
+						ibp->n_rc_seqnak++;
+						qp->r_nak_state =
+							IB_NAK_PSN_ERROR;
+						/* Use the expected PSN. */
+						qp->r_ack_psn = qp->r_psn;
+						/*
+						 * Wait to send the sequence
+						 * NAK until all packets
+						 * in the receive queue have
+						 * been processed.
+						 * Otherwise, we end up
+						 * propagating congestion.
+						 */
+						if (list_empty(&qp->rspwait)) {
+							qp->r_flags |=
+								QIB_R_RSP_NAK;
+							atomic_inc(
+								&qp->refcount);
+							list_add_tail(
+							 &qp->rspwait,
+							 &rcd->qp_wait_list);
+						}
+					} /* Out of sequence NAK */
+				} /* QP Request NAKs */
+				break;
+			case IB_QPT_SMI:
+			case IB_QPT_GSI:
+			case IB_QPT_UD:
+			case IB_QPT_UC:
+			default:
+				/* For now don't handle any other QP types */
+				break;
+			}
+
+unlock:
+			spin_unlock(&qp->r_lock);
+			/*
+			 * Notify qib_destroy_qp() if it is waiting
+			 * for us to finish.
+			 */
+			if (atomic_dec_and_test(&qp->refcount))
+				wake_up(&qp->wait);
+		} /* Unicast QP */
+	} /* Valid packet with TIDErr */
+
+drop:
 	return ret;
 }
 
@@ -335,7 +473,7 @@
 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
 	}
 
-	for (last = 0, i = 1; !last && i <= 64; i += !last) {
+	for (last = 0, i = 1; !last; i += !last) {
 		hdr = dd->f_get_msgheader(dd, rhf_addr);
 		eflags = qib_hdrget_err_flags(rhf_addr);
 		etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@
 		 * packets; only qibhdrerr should be set.
 		 */
 		if (unlikely(eflags))
-			crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+			crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
 					       etail, rhf_addr, hdr);
 		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
 			qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@
 		l += rsize;
 		if (l >= maxcnt)
 			l = 0;
+		if (i == QIB_MAX_PKT_RECV)
+			last = 1;
+
 		rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 		if (dd->flags & QIB_NODMA_RTAIL) {
 			u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@
 		 */
 		lval = l;
 		if (!last && !(i & 0xf)) {
-			dd->f_update_usrhead(rcd, lval, updegr, etail);
+			dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 			updegr = 0;
 		}
 	}
@@ -444,7 +585,7 @@
 	 * if no packets were processed.
 	 */
 	lval = (u64)rcd->head | dd->rhdrhead_intr_off;
-	dd->f_update_usrhead(rcd, lval, updegr, etail);
+	dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 	return crcs;
 }
 
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971..75bfad1 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@
 		/* find device (with ACTIVE ports) with fewest ctxts in use */
 		for (ndev = 0; ndev < devmax; ndev++) {
 			struct qib_devdata *dd = qib_lookup(ndev);
-			unsigned cused = 0, cfree = 0;
+			unsigned cused = 0, cfree = 0, pusable = 0;
 			if (!dd)
 				continue;
 			if (port && port <= dd->num_pports &&
 			    usable(dd->pport + port - 1))
-				dusable = 1;
+				pusable = 1;
 			else
 				for (i = 0; i < dd->num_pports; i++)
 					if (usable(dd->pport + i))
-						dusable++;
-			if (!dusable)
+						pusable++;
+			if (!pusable)
 				continue;
 			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
 			     ctxt++)
@@ -1397,7 +1397,7 @@
 					cused++;
 				else
 					cfree++;
-			if (cfree && cused < inuse) {
+			if (pusable && cfree && cused < inuse) {
 				udd = dd;
 				inuse = cused;
 			}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index f99bddc..df7fa25 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -453,17 +453,14 @@
 		goto bail;
 	}
 
-	spin_lock(&dcache_lock);
 	spin_lock(&tmp->d_lock);
 	if (!(d_unhashed(tmp) && tmp->d_inode)) {
-		dget_locked(tmp);
+		dget_dlock(tmp);
 		__d_drop(tmp);
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
 		simple_unlink(parent->d_inode, tmp);
 	} else {
 		spin_unlock(&tmp->d_lock);
-		spin_unlock(&dcache_lock);
 	}
 
 	ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29db..774dea8 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@
 }
 
 static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74..127a0d5 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2297,7 +2297,7 @@
 	nchipctxts = qib_read_kreg32(dd, kr_portcnt);
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1) {
-		dd->qpn_mask = 0x3f;
+		dd->qpn_mask = 0x3e;
 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
 			dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2703,7 @@
 }
 
 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443..abd409d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@
 
 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+static void serdes_7322_los_enable(struct qib_pportdata *, int);
+static int serdes_7322_init_old(struct qib_pportdata *);
+static int serdes_7322_init_new(struct qib_pportdata *);
 
 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
 
@@ -111,6 +114,21 @@
 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 
+/*
+ * Receive header queue sizes
+ */
+static unsigned qib_rcvhdrcnt;
+module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
+
+static unsigned qib_rcvhdrsize;
+module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
+
+static unsigned qib_rcvhdrentsize;
+module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
+
 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
 /* for read back, default index is ~5m copper cable */
 static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -314,7 +332,7 @@
 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 
 /*
- * Per-context kernel registers.  Acess only with qib_read_kreg_ctxt()
+ * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
  * or qib_write_kreg_ctxt()
  */
 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
@@ -544,6 +562,7 @@
 
 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
+#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 
 #define H1_FORCE_VAL 8
@@ -604,6 +623,7 @@
 	u8 ibmalfusesnap;
 	struct qib_qsfp_data qsfp_data;
 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
+	u8 bounced;
 };
 
 static struct {
@@ -1677,6 +1697,8 @@
 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
 		force_h1(ppd);
 		ppd->cpspec->qdr_reforce = 1;
+		if (!ppd->dd->cspec->r1)
+			serdes_7322_los_enable(ppd, 0);
 	} else if (ppd->cpspec->qdr_reforce &&
 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1714,37 @@
 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
 		adj_tx_serdes(ppd);
 
-	if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
-	    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
-		ppd->cpspec->qdr_dfe_on = 1;
-		ppd->cpspec->qdr_dfe_time = 0;
-		/* On link down, reenable QDR adaptation */
-		qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
-			ppd->dd->cspec->r1 ?
-				    QDR_STATIC_ADAPT_DOWN_R1 :
-				    QDR_STATIC_ADAPT_DOWN);
+	if (ibclt != IB_7322_LT_STATE_LINKUP) {
+		u8 ltstate = qib_7322_phys_portstate(ibcst);
+		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
+					  LinkTrainingState);
+		if (!ppd->dd->cspec->r1 &&
+		    pibclt == IB_7322_LT_STATE_LINKUP &&
+		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+			/* If the link went down (but no into recovery,
+			 * turn LOS back on */
+			serdes_7322_los_enable(ppd, 1);
+		if (!ppd->cpspec->qdr_dfe_on &&
+		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+			ppd->cpspec->qdr_dfe_on = 1;
+			ppd->cpspec->qdr_dfe_time = 0;
+			/* On link down, reenable QDR adaptation */
+			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+					    ppd->dd->cspec->r1 ?
+					    QDR_STATIC_ADAPT_DOWN_R1 :
+					    QDR_STATIC_ADAPT_DOWN);
+			printk(KERN_INFO QIB_DRV_NAME
+				" IB%u:%u re-enabled QDR adaptation "
+				"ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
+		}
 	}
 }
 
+static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
+
 /*
  * This is per-pport error handling.
  * will likely get it's own MSIx interrupt (one for each port,
@@ -1840,7 +1881,23 @@
 		    IB_PHYSPORTSTATE_DISABLED)
 			qib_set_ib_7322_lstate(ppd, 0,
 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-		else
+		else {
+			u32 lstate;
+			/*
+			 * We need the current logical link state before
+			 * lflags are set in handle_e_ibstatuschanged.
+			 */
+			lstate = qib_7322_iblink_state(ibcs);
+
+			if (IS_QMH(dd) && !ppd->cpspec->bounced &&
+			    ltstate == IB_PHYSPORTSTATE_LINKUP &&
+			    (lstate >= IB_PORT_INIT &&
+				lstate <= IB_PORT_ACTIVE)) {
+				ppd->cpspec->bounced = 1;
+				qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+					IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+			}
+
 			/*
 			 * Since going into a recovery state causes the link
 			 * state to go down and since recovery is transitory,
@@ -1854,6 +1911,7 @@
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
 				qib_handle_e_ibstatuschanged(ppd, ibcs);
+		}
 	}
 	if (*msg && iserr)
 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2785,7 +2843,6 @@
 				ctxtrbits &= ~rmask;
 				if (dd->rcd[i]) {
 					qib_kreceive(dd->rcd[i], NULL, &npkts);
-					adjust_rcv_timeout(dd->rcd[i], npkts);
 				}
 			}
 			rmask <<= 1;
@@ -2835,7 +2892,6 @@
 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
 
 	qib_kreceive(rcd, NULL, &npkts);
-	adjust_rcv_timeout(rcd, npkts);
 
 	return IRQ_HANDLED;
 }
@@ -3157,6 +3213,10 @@
 	case BOARD_QME7342:
 		n = "InfiniPath_QME7342";
 		break;
+	case 8:
+		n = "InfiniPath_QME7362";
+		dd->flags |= QIB_HAS_QSFP;
+		break;
 	case 15:
 		n = "InfiniPath_QLE7342_TEST";
 		dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3535,6 @@
 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
-		/*
-		 * Set the mask for which bits from the QPN are used
-		 * to select a context number.
-		 */
-		dd->qpn_mask = 0x3f;
 		dd->first_user_ctxt = NUM_IB_PORTS +
 			(qib_n_krcv_queues - 1) * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3585,11 @@
 
 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
-	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
-				dd->num_pports > 1 ? 1024U : 2048U);
+	if (qib_rcvhdrcnt)
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
+	else
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+				    dd->num_pports > 1 ? 1024U : 2048U);
 }
 
 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4060,14 @@
 }
 
 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
+	/*
+	 * Need to write timeout register before updating rcvhdrhead to ensure
+	 * that the timer is enabled on reception of a packet.
+	 */
+	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+		adjust_rcv_timeout(rcd, npkts);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
@@ -5522,7 +5586,7 @@
 		u64 now = get_jiffies_64();
 		if (time_after64(now, pwrup))
 			break;
-		msleep(1);
+		msleep(20);
 	}
 	ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
 	/*
@@ -5579,6 +5643,7 @@
 	u32 pidx, unit, port, deflt, h1;
 	unsigned long val;
 	int any = 0, seth1;
+	int txdds_size;
 
 	str = txselect_list;
 
@@ -5587,6 +5652,10 @@
 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
 		dd->pport[pidx].cpspec->no_eep = deflt;
 
+	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
+	if (IS_QME(dd) || IS_QMH(dd))
+		txdds_size += TXDDS_MFG_SZ;
+
 	while (*nxt && nxt[1]) {
 		str = ++nxt;
 		unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5678,7 @@
 				;
 			continue;
 		}
-		if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+		if (val >= txdds_size)
 			continue;
 		seth1 = 0;
 		h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5661,10 +5730,11 @@
 		return -ENOSPC;
 	}
 	val = simple_strtoul(str, &n, 0);
-	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+				TXDDS_MFG_SZ)) {
 		printk(KERN_INFO QIB_DRV_NAME
 		       "txselect_values must start with a number < %d\n",
-			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
 		return -EINVAL;
 	}
 	strcpy(txselect_list, str);
@@ -5810,7 +5880,8 @@
 		unsigned n, regno;
 		unsigned long flags;
 
-		if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+		if (dd->n_krcv_queues < 2 ||
+			!dd->pport[pidx].link_speed_supported)
 			continue;
 
 		ppd = &dd->pport[pidx];
@@ -6097,8 +6168,10 @@
 		ppd++;
 	}
 
-	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
-	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+	dd->rcvhdrentsize = qib_rcvhdrentsize ?
+		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
+	dd->rcvhdrsize = qib_rcvhdrsize ?
+		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
 
 	/* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6568,7 @@
 		/* make sure we see an updated copy next time around */
 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
 		sleeps++;
-		msleep(1);
+		msleep(20);
 	}
 
 	switch (which) {
@@ -6993,6 +7066,12 @@
 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
 };
 
+static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
+	/* amp, pre, main, post */
+	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
+	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
+};
+
 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
 					       unsigned atten)
 {
@@ -7066,6 +7145,16 @@
 		*sdr_dds = &txdds_extra_sdr[idx];
 		*ddr_dds = &txdds_extra_ddr[idx];
 		*qdr_dds = &txdds_extra_qdr[idx];
+	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
+		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+					  TXDDS_MFG_SZ)) {
+		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+		printk(KERN_INFO QIB_DRV_NAME
+			" IB%u:%u use idx %u into txdds_mfg\n",
+			ppd->dd->unit, ppd->port, idx);
+		*sdr_dds = &txdds_extra_mfg[idx];
+		*ddr_dds = &txdds_extra_mfg[idx];
+		*qdr_dds = &txdds_extra_mfg[idx];
 	} else {
 		/* this shouldn't happen, it's range checked */
 		*sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7299,30 @@
 	}
 }
 
+static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
+{
+	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
+	printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
+		ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
+	if (enable)
+		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	else
+		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	qib_write_kreg_port(ppd, krp_serdesctrl, data);
+}
+
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
-	u64 data;
+	int ret = 0;
+	if (ppd->dd->cspec->r1)
+		ret = serdes_7322_init_old(ppd);
+	else
+		ret = serdes_7322_init_new(ppd);
+	return ret;
+}
+
+static int serdes_7322_init_old(struct qib_pportdata *ppd)
+{
 	u32 le_val;
 
 	/*
@@ -7270,11 +7380,7 @@
 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
 
-	data = qib_read_kreg_port(ppd, krp_serdesctrl);
-	/* Turn off IB latency mode */
-	data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
-	qib_write_kreg_port(ppd, krp_serdesctrl, data |
-		SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+	serdes_7322_los_enable(ppd, 1);
 
 	/* rxbistena; set 0 to avoid effects of it switch later */
 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7420,205 @@
 	return 0;
 }
 
+static int serdes_7322_init_new(struct qib_pportdata *ppd)
+{
+	u64 tstart;
+	u32 le_val, rxcaldone;
+	int chan, chan_done = (1 << SERDES_CHANS) - 1;
+
+	/*
+	 * Initialize the Tx DDS tables.  Also done every QSFP event,
+	 * for adapters with QSFP
+	 */
+	init_txdds_table(ppd, 0);
+
+	/* Clear cmode-override, may be set from older driver */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+	/* ensure no tx overrides from earlier driver loads */
+	qib_write_kreg_port(ppd, krp_tx_deemph_override,
+		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+		reset_tx_deemphasis_override));
+
+	/* START OF LSI SUGGESTED SERDES BRINGUP */
+	/* Reset - Calibration Setup */
+	/*       Stop DFE adaptaion */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
+	/*       Disable LE1 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
+	/*       Disable autoadapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
+	/*       Disable LE2 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
+	/*       Disable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	/*       Disable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
+	/*       Disable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
+	/*       Disable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
+	/*       Disable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
+	/*       Disable RX Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	/*       Disable RX Offset Calibration */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
+	/*       Select BB CDR */
+	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
+	/*       CDR Step Size */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
+	/*       Enable phase Calibration */
+	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
+	/*       DFE Bandwidth [2:14-12] */
+	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
+	/*       DFE Config (4 taps only) */
+	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
+	/*       Gain Loop Bandwidth */
+	if (!ppd->dd->cspec->r1) {
+		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
+		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
+	} else {
+		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
+	}
+	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
+	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
+	/*       Data Rate Select [5:7-6] (leave as default) */
+	/*       RX Parralel Word Width [3:10-8] (leave as default) */
+
+	/* RX REST */
+	/*       Single- or Multi-channel reset */
+	/*       RX Analog reset */
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
+	msleep(20);
+	/*       RX Analog reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
+	msleep(20);
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
+	msleep(20);
+
+	/* setup LoS params; these are subsystem, so chan == 5 */
+	/* LoS filter threshold_count on, ch 0-3, set to 8 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+	/* LoS filter threshold_count off, ch 0-3, set to 4 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+	/* LoS filter select enabled */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
+	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+	/* Turn on LOS on initial SERDES init */
+	serdes_7322_los_enable(ppd, 1);
+	/* FLoop LOS gate: PPM filter  enabled */
+	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+	/* RX LATCH CALIBRATION */
+	/*       Enable Eyefinder Phase Calibration latch */
+	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
+	/*       Enable RX Offset Calibration latch */
+	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
+	msleep(20);
+	/*       Start Calibration */
+	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
+	tstart = get_jiffies_64();
+	while (chan_done &&
+	       !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
+		msleep(20);
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
+			    (~chan_done & (1 << chan)) == 0)
+				chan_done &= ~(1 << chan);
+		}
+	}
+	if (chan_done) {
+		printk(KERN_INFO QIB_DRV_NAME
+			 " Serdes %d calibration not done after .5 sec: 0x%x\n",
+			 IBSD(ppd->hw_pidx), chan_done);
+	} else {
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
+				printk(KERN_INFO QIB_DRV_NAME
+					 " Serdes %d chan %d calibration "
+					 "failed\n", IBSD(ppd->hw_pidx), chan);
+		}
+	}
+
+	/*       Turn off Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	msleep(20);
+
+	/* BRING RX UP */
+	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
+	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+	/*       Set LE2 Loop bandwidth */
+	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
+	/*       Enable LE2 */
+	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
+	msleep(20);
+	/*       Enable H0 only */
+	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
+	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+	/*       Enable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	msleep(20);
+	/*       Set Frequency Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
+	/*       Enable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
+	/*       Set Timing Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+	/*       Enable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
+	msleep(50);
+	/*       Enable DFE
+	 *       Set receive adaptation mode.  SDR and DDR adaptation are
+	 *       always on, and QDR is initially enabled; later disabled.
+	 */
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+			    ppd->dd->cspec->r1 ?
+			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+	ppd->cpspec->qdr_dfe_on = 1;
+	/*       Disable LE1  */
+	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
+	/*       Disable auto adapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
+	msleep(20);
+	/*       Enable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
+	/*       Enable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
+	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+	/* VGA output common mode */
+	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+
+	return 0;
+}
+
 /* start adjust QMH serdes parameters */
 
 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b5039..7896afb 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -92,9 +92,11 @@
 /* set number of contexts we'll actually use */
 void qib_set_ctxtcnt(struct qib_devdata *dd)
 {
-	if (!qib_cfgctxts)
+	if (!qib_cfgctxts) {
 		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
-	else if (qib_cfgctxts < dd->num_pports)
+		if (dd->cfgctxts > dd->ctxtcnt)
+			dd->cfgctxts = dd->ctxtcnt;
+	} else if (qib_cfgctxts < dd->num_pports)
 		dd->cfgctxts = dd->ctxtcnt;
 	else if (qib_cfgctxts <= dd->ctxtcnt)
 		dd->cfgctxts = qib_cfgctxts;
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a4082..a693c56 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@
 			/* start a 75msec timer to clear symbol errors */
 			mod_timer(&ppd->symerr_clear_timer,
 				  msecs_to_jiffies(75));
-		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
+			   !(ppd->lflags & QIBL_LINKACTIVE)) {
 			/* active, but not active defered */
 			qib_hol_up(ppd); /* useful only for 6120 now */
 			*ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb1..8fd19a4 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@
 	struct qib_mregion *mr;
 	unsigned n, m;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 
 	/*
@@ -152,6 +151,8 @@
 		if (!dev->dma_mr)
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		isge->mr = dev->dma_mr;
 		isge->vaddr = (void *) sge->addr;
 		isge->length = sge->length;
@@ -170,19 +171,34 @@
 		     off + sge->length > mr->length ||
 		     (mr->access_flags & acc) != acc))
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 	}
-	atomic_inc(&mr->refcount);
 	isge->mr = mr;
 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@
 	isge->m = m;
 	isge->n = n;
 ok:
-	ret = 1;
+	return 1;
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 
 /**
@@ -214,7 +230,6 @@
 	struct qib_mregion *mr;
 	unsigned n, m;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 
 	/*
@@ -231,6 +246,8 @@
 		if (!dev->dma_mr)
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		sge->mr = dev->dma_mr;
 		sge->vaddr = (void *) vaddr;
 		sge->length = len;
@@ -248,19 +265,34 @@
 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
 		     (mr->access_flags & acc) == 0))
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 	}
-	atomic_inc(&mr->refcount);
 	sge->mr = mr;
 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@
 	sge->m = m;
 	sge->n = n;
 ok:
-	ret = 1;
+	return 1;
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f..5ad224e 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@
 	lid = be16_to_cpu(pip->lid);
 	/* Must be a valid unicast LID address. */
 	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
 		if (ppd->lid != lid)
 			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
 		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@
 	msl = pip->neighbormtu_mastersmsl & 0xF;
 	/* Must be a valid unicast LID address. */
 	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
 		spin_lock_irqsave(&ibp->lock, flags);
 		if (ibp->sm_ah) {
 			if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@
 		if (lwe == 0xFF)
 			lwe = ppd->link_width_supported;
 		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
-			goto err;
-		set_link_width_enabled(ppd, lwe);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lwe != ppd->link_width_enabled)
+			set_link_width_enabled(ppd, lwe);
 	}
 
 	lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@
 		if (lse == 15)
 			lse = ppd->link_speed_supported;
 		else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
-			goto err;
-		set_link_speed_enabled(ppd, lse);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lse != ppd->link_speed_enabled)
+			set_link_speed_enabled(ppd, lse);
 	}
 
 	/* Set link down default state. */
@@ -738,7 +740,7 @@
 					IB_LINKINITCMD_POLL);
 		break;
 	default:
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 
 	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@
 
 	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
 	if (mtu == -1)
-		goto err;
-	qib_set_mtu(ppd, mtu);
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else
+		qib_set_mtu(ppd, mtu);
 
 	/* Set operational VLs */
 	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
 	if (vls) {
 		if (vls > ppd->vls_supported)
-			goto err;
-		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else
+			(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
 	}
 
 	if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@
 
 	ore = pip->localphyerrors_overrunerrors;
 	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	if (set_overrunthreshold(ppd, (ore & 0xF)))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
 
@@ -792,7 +796,7 @@
 	state = pip->linkspeed_portstate & 0xF;
 	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
 	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	/*
 	 * Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@
 			lstate = QIB_IB_LINKDOWN;
 		else if (lstate == 3)
 			lstate = QIB_IB_LINKDOWN_DISABLE;
-		else
-			goto err;
+		else {
+			smp->status |= IB_SMP_INVALID_FIELD;
+			break;
+		}
 		spin_lock_irqsave(&ppd->lflags_lock, flags);
 		ppd->lflags &= ~QIBL_LINKV;
 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@
 		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
 		break;
 	default:
-		/* XXX We have already partially updated our state! */
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 
 	ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f..08944e2 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
 /* Fast memory region */
 struct qib_fmr {
 	struct ib_fmr ibfmr;
-	u8 page_shift;
 	struct qib_mregion mr;        /* must be last */
 };
 
@@ -107,6 +106,7 @@
 			goto bail;
 	}
 	mr->mr.mapsz = m;
+	mr->mr.page_shift = 0;
 	mr->mr.max_segs = count;
 
 	/*
@@ -231,6 +231,8 @@
 	mr->mr.access_flags = mr_access_flags;
 	mr->umem = umem;
 
+	if (is_power_of_2(umem->page_size))
+		mr->mr.page_shift = ilog2(umem->page_size);
 	m = 0;
 	n = 0;
 	list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@
 	fmr->mr.offset = 0;
 	fmr->mr.access_flags = mr_access_flags;
 	fmr->mr.max_segs = fmr_attr->max_pages;
-	fmr->page_shift = fmr_attr->page_shift;
+	fmr->mr.page_shift = fmr_attr->page_shift;
 
 	atomic_set(&fmr->mr.refcount, 0);
 	ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@
 	spin_lock_irqsave(&rkt->lock, flags);
 	fmr->mr.user_base = iova;
 	fmr->mr.iova = iova;
-	ps = 1 << fmr->page_shift;
+	ps = 1 << fmr->mr.page_shift;
 	fmr->mr.length = list_len * ps;
 	m = 0;
 	n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851..e16751f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@
 
 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
 					struct qpn_map *map, unsigned off,
-					unsigned r)
+					unsigned n)
 {
 	if (qpt->mask) {
 		off++;
-		if ((off & qpt->mask) >> 1 != r)
-			off = ((off & qpt->mask) ?
-				(off | qpt->mask) + 1 : off) | (r << 1);
+		if (((off & qpt->mask) >> 1) >= n)
+			off = (off | qpt->mask) + 2;
 	} else
 		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
 	return off;
@@ -123,7 +122,6 @@
 	u32 i, offset, max_scan, qpn;
 	struct qpn_map *map;
 	u32 ret;
-	int r;
 
 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 		unsigned n;
@@ -139,15 +137,11 @@
 		goto bail;
 	}
 
-	r = smp_processor_id();
-	if (r >= dd->n_krcv_queues)
-		r %= dd->n_krcv_queues;
-	qpn = qpt->last + 1;
+	qpn = qpt->last + 2;
 	if (qpn >= QPN_MAX)
 		qpn = 2;
-	if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
-		qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
-			(r << 1);
+	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+		qpn = (qpn | qpt->mask) + 2;
 	offset = qpn & BITS_PER_PAGE_MASK;
 	map = &qpt->map[qpn / BITS_PER_PAGE];
 	max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@
 				ret = qpn;
 				goto bail;
 			}
-			offset = find_next_offset(qpt, map, offset, r);
+			offset = find_next_offset(qpt, map, offset,
+				dd->n_krcv_queues);
 			qpn = mk_qpn(qpt, map, offset);
 			/*
 			 * This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@
 			if (qpt->nmaps == QPNMAP_ENTRIES)
 				break;
 			map = &qpt->map[qpt->nmaps++];
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else if (map < &qpt->map[qpt->nmaps]) {
 			++map;
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else {
 			map = &qpt->map[0];
-			offset = qpt->mask ? (r << 1) : 2;
+			offset = 2;
 		}
 		qpn = mk_qpn(qpt, map, offset);
 	}
@@ -468,6 +463,10 @@
 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
 		del_timer(&qp->s_timer);
 	}
+
+	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
+		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
+
 	spin_lock(&dev->pending_lock);
 	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
 		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@
 		}
 		qp->ibqp.qp_num = err;
 		qp->port_num = init_attr->port_num;
-		qp->processor_id = smp_processor_id();
 		qib_reset_qp(qp, init_attr->qp_type);
 		break;
 
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb71..8245237 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1407,6 +1407,7 @@
 			    struct qib_ctxtdata *rcd)
 {
 	struct qib_swqe *wqe;
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 	enum ib_wc_status status;
 	unsigned long flags;
 	int diff;
@@ -1414,6 +1415,29 @@
 	u32 aeth;
 	u64 val;
 
+	if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
+		/*
+		 * If ACK'd PSN on SDMA busy list try to make progress to
+		 * reclaim SDMA credits.
+		 */
+		if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
+		    (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
+
+			/*
+			 * If send tasklet not running attempt to progress
+			 * SDMA queue.
+			 */
+			if (!(qp->s_flags & QIB_S_BUSY)) {
+				/* Acquire SDMA Lock */
+				spin_lock_irqsave(&ppd->sdma_lock, flags);
+				/* Invoke sdma make progress */
+				qib_sdma_make_progress(ppd);
+				/* Release SDMA Lock */
+				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+			}
+		}
+	}
+
 	spin_lock_irqsave(&qp->s_lock, flags);
 
 	/* Ignore invalid responses. */
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2..4a51fd1 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@
 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
 
-	/* Get the number of bytes the message was padded by. */
+	/*
+	 * Get the number of bytes the message was padded by
+	 * and drop incomplete packets.
+	 */
 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-	if (unlikely(tlen < (hdrsize + pad + 4))) {
-		/* Drop incomplete packets. */
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	if (unlikely(tlen < (hdrsize + pad + 4)))
+		goto drop;
+
 	tlen -= hdrsize + pad + 4;
 
 	/*
@@ -460,10 +461,8 @@
 	 */
 	if (qp->ibqp.qp_num) {
 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-			     hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			     hdr->lrh[3] == IB_LID_PERMISSIVE))
+			goto drop;
 		if (qp->ibqp.qp_num > 1) {
 			u16 pkey1, pkey2;
 
@@ -476,7 +475,7 @@
 						0xF,
 					      src_qp, qp->ibqp.qp_num,
 					      hdr->lrh[3], hdr->lrh[1]);
-				goto bail;
+				return;
 			}
 		}
 		if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@
 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 				      src_qp, qp->ibqp.qp_num,
 				      hdr->lrh[3], hdr->lrh[1]);
-			goto bail;
+			return;
 		}
 		/* Drop invalid MAD packets (see 13.5.3.1). */
 		if (unlikely(qp->ibqp.qp_num == 1 &&
 			     (tlen != 256 ||
-			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+			goto drop;
 	} else {
 		struct ib_smp *smp;
 
 		/* Drop invalid MAD packets (see 13.5.3.1). */
-		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
+			goto drop;
 		smp = (struct ib_smp *) data;
 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
-		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+			goto drop;
 	}
 
 	/*
@@ -519,14 +512,12 @@
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		hdrsize += sizeof(u32);
+		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
-	} else {
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	} else
+		goto drop;
 
 	/*
 	 * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@
 	/* Silently drop packets which are too big. */
 	if (unlikely(wc.byte_len > qp->r_len)) {
 		qp->r_flags |= QIB_R_REUSE_SGE;
-		ibp->n_pkt_drops++;
-		return;
+		goto drop;
 	}
 	if (has_grh) {
 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@
 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		     (ohdr->bth[0] &
 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail:;
+	return;
+
+drop:
+	ibp->n_pkt_drops++;
 }
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06..66208bc 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@
 
 		kmem_cache_free(pq->pkt_slab, pkt);
 	}
+	INIT_LIST_HEAD(list);
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c12..63b22a9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@
 	int access_flags;
 	u32 max_segs;           /* number of qib_segs in all the arrays */
 	u32 mapsz;              /* size of the map array */
+	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
 	atomic_t refcount;
 	struct qib_segarray *map[0];    /* the segments */
 };
@@ -435,7 +436,6 @@
 	spinlock_t r_lock;      /* used for APM */
 	spinlock_t s_lock;
 	atomic_t s_dma_busy;
-	unsigned processor_id;	/* Processor ID QP is bound to */
 	u32 s_flags;
 	u32 s_cur_size;         /* size of send packet in bytes */
 	u32 s_len;              /* total length of s_sge */
@@ -813,13 +813,8 @@
  */
 static inline void qib_schedule_send(struct qib_qp *qp)
 {
-	if (qib_send_ok(qp)) {
-		if (qp->processor_id == smp_processor_id())
-			queue_work(qib_wq, &qp->s_work);
-		else
-			queue_work_on(qp->processor_id,
-				      qib_wq, &qp->s_work);
-	}
+	if (qib_send_ok(qp))
+		queue_work(qib_wq, &qp->s_work);
 }
 
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc..55855ee 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
 config INFINIBAND_IPOIB
 	tristate "IP-over-InfiniBand"
 	depends on NETDEVICES && INET && (IPV6 || IPV6=n)
-	select INET_LRO
 	---help---
 	  Support for the IP-over-InfiniBand protocol (IPoIB). This
 	  transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983..ab97f92 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_sa.h>
-#include <linux/inet_lro.h>
+#include <linux/sched.h>
 
 /* constants */
 
@@ -100,9 +100,6 @@
 	IPOIB_MCAST_FLAG_BUSY	  = 2,	/* joining or already joined */
 	IPOIB_MCAST_FLAG_ATTACHED = 3,
 
-	IPOIB_MAX_LRO_DESCRIPTORS = 8,
-	IPOIB_LRO_MAX_AGGR 	  = 64,
-
 	MAX_SEND_CQE		  = 16,
 	IPOIB_CM_COPYBREAK	  = 256,
 };
@@ -262,11 +259,6 @@
 	u16     max_coalesced_frames;
 };
 
-struct ipoib_lro {
-	struct net_lro_mgr lro_mgr;
-	struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
-};
-
 /*
  * Device private locking: network stack tx_lock protects members used
  * in TX fast path, lock protects everything else.  lock nests inside
@@ -352,8 +344,6 @@
 	int	hca_caps;
 	struct ipoib_ethtool_st ethtool;
 	struct timer_list poll_timer;
-
-	struct ipoib_lro lro;
 };
 
 struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb10041..c1c49f2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1480,6 +1480,7 @@
 
 		if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+			priv->dev->features |= NETIF_F_GRO;
 			if (priv->hca_caps & IB_DEVICE_UD_TSO)
 				dev->features |= NETIF_F_TSO;
 		}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c..19f7f52 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@
 	return 0;
 }
 
-static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
-	"LRO aggregated", "LRO flushed",
-	"LRO avg aggr", "LRO no desc"
-};
-
-static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
-	switch (stringset) {
-	case ETH_SS_STATS:
-		memcpy(data, *ipoib_stats_keys,	sizeof(ipoib_stats_keys));
-		break;
-	}
-}
-
-static int ipoib_get_sset_count(struct net_device *dev, int sset)
-{
-	switch (sset) {
-	case ETH_SS_STATS:
-		return ARRAY_SIZE(ipoib_stats_keys);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static void ipoib_get_ethtool_stats(struct net_device *dev,
-				struct ethtool_stats *stats, uint64_t *data)
-{
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	int index = 0;
-
-	/* Get LRO statistics */
-	data[index++] = priv->lro.lro_mgr.stats.aggregated;
-	data[index++] = priv->lro.lro_mgr.stats.flushed;
-	if (priv->lro.lro_mgr.stats.flushed)
-		data[index++] = priv->lro.lro_mgr.stats.aggregated /
-				priv->lro.lro_mgr.stats.flushed;
-	else
-		data[index++] = 0;
-	data[index++] = priv->lro.lro_mgr.stats.no_desc;
-}
-
-static int ipoib_set_flags(struct net_device *dev, u32 flags)
-{
-	return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
-}
-
 static const struct ethtool_ops ipoib_ethtool_ops = {
 	.get_drvinfo		= ipoib_get_drvinfo,
 	.get_rx_csum		= ipoib_get_rx_csum,
 	.set_tso		= ipoib_set_tso,
 	.get_coalesce		= ipoib_get_coalesce,
 	.set_coalesce		= ipoib_set_coalesce,
-	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ipoib_set_flags,
-	.get_strings		= ipoib_get_strings,
-	.get_sset_count		= ipoib_get_sset_count,
-	.get_ethtool_stats	= ipoib_get_ethtool_stats,
 };
 
 void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa7190..806d029 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@
 	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	if (dev->features & NETIF_F_LRO)
-		lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
-	else
-		netif_receive_skb(skb);
+	napi_gro_receive(&priv->napi, skb);
 
 repost:
 	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@
 	}
 
 	if (done < budget) {
-		if (dev->features & NETIF_F_LRO)
-			lro_flush_all(&priv->lro.lro_mgr);
-
 		napi_complete(napi);
 		if (unlikely(ib_req_notify_cq(priv->recv_cq,
 					      IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc7..7a07a72 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@
 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
 
-static int lro;
-module_param(lro, bool, 0444);
-MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
-
-static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
-module_param(lro_max_aggr, int, 0644);
-MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
-		"(default = 64)");
-
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 int ipoib_debug_level;
 
@@ -976,54 +967,6 @@
 	.create	= ipoib_hard_header,
 };
 
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
-		       void **tcph, u64 *hdr_flags, void *priv)
-{
-	unsigned int ip_len;
-	struct iphdr *iph;
-
-	if (unlikely(skb->protocol != htons(ETH_P_IP)))
-		return -1;
-
-	/*
-	 * In the future we may add an else clause that verifies the
-	 * checksum and allows devices which do not calculate checksum
-	 * to use LRO.
-	 */
-	if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
-		return -1;
-
-	/* Check for non-TCP packet */
-	skb_reset_network_header(skb);
-	iph = ip_hdr(skb);
-	if (iph->protocol != IPPROTO_TCP)
-		return -1;
-
-	ip_len = ip_hdrlen(skb);
-	skb_set_transport_header(skb, ip_len);
-	*tcph = tcp_hdr(skb);
-
-	/* check if IP header and TCP header are complete */
-	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
-		return -1;
-
-	*hdr_flags = LRO_IPV4 | LRO_TCP;
-	*iphdr = iph;
-
-	return 0;
-}
-
-static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
-{
-	priv->lro.lro_mgr.max_aggr	 = lro_max_aggr;
-	priv->lro.lro_mgr.max_desc	 = IPOIB_MAX_LRO_DESCRIPTORS;
-	priv->lro.lro_mgr.lro_arr	 = priv->lro.lro_desc;
-	priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
-	priv->lro.lro_mgr.features	 = LRO_F_NAPI;
-	priv->lro.lro_mgr.dev		 = priv->dev;
-	priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-}
-
 static const struct net_device_ops ipoib_netdev_ops = {
 	.ndo_open		 = ipoib_open,
 	.ndo_stop		 = ipoib_stop,
@@ -1067,8 +1010,6 @@
 
 	priv->dev = dev;
 
-	ipoib_lro_setup(priv);
-
 	spin_lock_init(&priv->lock);
 
 	mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1159,7 @@
 		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 	}
 
-	if (lro)
-		priv->dev->features |= NETIF_F_LRO;
+	priv->dev->features |= NETIF_F_GRO;
 
 	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
 		priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347..4b62105 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@
 	wait_for_completion(&target->done);
 }
 
+static bool srp_change_state(struct srp_target_port *target,
+			    enum srp_target_state old,
+			    enum srp_target_state new)
+{
+	bool changed = false;
+
+	spin_lock_irq(&target->lock);
+	if (target->state == old) {
+		target->state = new;
+		changed = true;
+	}
+	spin_unlock_irq(&target->lock);
+	return changed;
+}
+
 static void srp_remove_work(struct work_struct *work)
 {
 	struct srp_target_port *target =
 		container_of(work, struct srp_target_port, work);
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_DEAD) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
 		return;
-	}
-	target->state = SRP_TARGET_REMOVED;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	spin_lock(&target->srp_host->target_lock);
 	list_del(&target->list);
@@ -539,33 +549,34 @@
 			scsi_sg_count(scmnd), scmnd->sc_data_direction);
 }
 
-static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_remove_req(struct srp_target_port *target,
+			   struct srp_request *req, s32 req_lim_delta)
 {
+	unsigned long flags;
+
 	srp_unmap_data(req->scmnd, target, req);
-	list_move_tail(&req->list, &target->free_reqs);
+	spin_lock_irqsave(&target->lock, flags);
+	target->req_lim += req_lim_delta;
+	req->scmnd = NULL;
+	list_add_tail(&req->list, &target->free_reqs);
+	spin_unlock_irqrestore(&target->lock, flags);
 }
 
 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 {
 	req->scmnd->result = DID_RESET << 16;
 	req->scmnd->scsi_done(req->scmnd);
-	srp_remove_req(target, req);
+	srp_remove_req(target, req, 0);
 }
 
 static int srp_reconnect_target(struct srp_target_port *target)
 {
 	struct ib_qp_attr qp_attr;
-	struct srp_request *req, *tmp;
 	struct ib_wc wc;
-	int ret;
+	int i, ret;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_LIVE) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
 		return -EAGAIN;
-	}
-	target->state = SRP_TARGET_CONNECTING;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	srp_disconnect_target(target);
 	/*
@@ -590,27 +601,23 @@
 	while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 		; /* nothing */
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		srp_reset_req(target, req);
-	spin_unlock_irq(target->scsi_host->host_lock);
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd)
+			srp_reset_req(target, req);
+	}
 
-	target->rx_head	 = 0;
-	target->tx_head	 = 0;
-	target->tx_tail  = 0;
+	INIT_LIST_HEAD(&target->free_tx);
+	for (i = 0; i < SRP_SQ_SIZE; ++i)
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 
 	target->qp_in_error = 0;
 	ret = srp_connect_target(target);
 	if (ret)
 		goto err;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state == SRP_TARGET_CONNECTING) {
-		ret = 0;
-		target->state = SRP_TARGET_LIVE;
-	} else
+	if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
 		ret = -EAGAIN;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	return ret;
 
@@ -620,17 +627,20 @@
 
 	/*
 	 * We couldn't reconnect, so kill our target port off.
-	 * However, we have to defer the real removal because we might
-	 * be in the context of the SCSI error handler now, which
-	 * would deadlock if we call scsi_remove_host().
+	 * However, we have to defer the real removal because we
+	 * are in the context of the SCSI error handler now, which
+	 * will deadlock if we call scsi_remove_host().
+	 *
+	 * Schedule our work inside the lock to avoid a race with
+	 * the flush_scheduled_work() in srp_remove_one().
 	 */
-	spin_lock_irq(target->scsi_host->host_lock);
+	spin_lock_irq(&target->lock);
 	if (target->state == SRP_TARGET_CONNECTING) {
 		target->state = SRP_TARGET_DEAD;
 		INIT_WORK(&target->work, srp_remove_work);
 		schedule_work(&target->work);
 	}
-	spin_unlock_irq(target->scsi_host->host_lock);
+	spin_unlock_irq(&target->lock);
 
 	return ret;
 }
@@ -758,7 +768,7 @@
 		struct srp_direct_buf *buf = (void *) cmd->add_data;
 
 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
-		buf->key = cpu_to_be32(dev->mr->rkey);
+		buf->key = cpu_to_be32(target->rkey);
 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 	} else if (srp_map_fmr(target, scat, count, req,
 			       (void *) cmd->add_data)) {
@@ -783,7 +793,7 @@
 			buf->desc_list[i].va  =
 				cpu_to_be64(ib_sg_dma_address(ibdev, sg));
 			buf->desc_list[i].key =
-				cpu_to_be32(dev->mr->rkey);
+				cpu_to_be32(target->rkey);
 			buf->desc_list[i].len = cpu_to_be32(dma_len);
 			datalen += dma_len;
 		}
@@ -796,7 +806,7 @@
 		buf->table_desc.va  =
 			cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
 		buf->table_desc.key =
-			cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
+			cpu_to_be32(target->rkey);
 		buf->table_desc.len =
 			cpu_to_be32(count * sizeof (struct srp_direct_buf));
 
@@ -812,9 +822,23 @@
 }
 
 /*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
+ * Return an IU and possible credit to the free pool
+ */
+static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
+			  enum srp_iu_type iu_type)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&iu->list, &target->free_tx);
+	if (iu_type != SRP_IU_RSP)
+		++target->req_lim;
+	spin_unlock_irqrestore(&target->lock, flags);
+}
+
+/*
+ * Must be called with target->lock held to protect req_lim and free_tx.
+ * If IU is not sent, it must be returned using srp_put_tx_iu().
  *
  * Note:
  * An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@
 
 	srp_send_completion(target->send_cq, target);
 
-	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+	if (list_empty(&target->free_tx))
 		return NULL;
 
 	/* Initiator responses to target requests do not consume credits */
-	if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
-		++target->zero_req_lim;
-		return NULL;
+	if (iu_type != SRP_IU_RSP) {
+		if (target->req_lim <= rsv) {
+			++target->zero_req_lim;
+			return NULL;
+		}
+
+		--target->req_lim;
 	}
 
-	iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
-	iu->type = iu_type;
+	iu = list_first_entry(&target->free_tx, struct srp_iu, list);
+	list_del(&iu->list);
 	return iu;
 }
 
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
-			   struct srp_iu *iu, int len)
+static int srp_post_send(struct srp_target_port *target,
+			 struct srp_iu *iu, int len)
 {
 	struct ib_sge list;
 	struct ib_send_wr wr, *bad_wr;
-	int ret = 0;
 
 	list.addr   = iu->dma;
 	list.length = len;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 	wr.next       = NULL;
-	wr.wr_id      = target->tx_head & SRP_SQ_MASK;
+	wr.wr_id      = (uintptr_t) iu;
 	wr.sg_list    = &list;
 	wr.num_sge    = 1;
 	wr.opcode     = IB_WR_SEND;
 	wr.send_flags = IB_SEND_SIGNALED;
 
-	ret = ib_post_send(target->qp, &wr, &bad_wr);
-
-	if (!ret) {
-		++target->tx_head;
-		if (iu->type != SRP_IU_RSP)
-			--target->req_lim;
-	}
-
-	return ret;
+	return ib_post_send(target->qp, &wr, &bad_wr);
 }
 
-static int srp_post_recv(struct srp_target_port *target)
+static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
 {
-	unsigned long flags;
-	struct srp_iu *iu;
-	struct ib_sge list;
 	struct ib_recv_wr wr, *bad_wr;
-	unsigned int next;
-	int ret;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	next	 = target->rx_head & SRP_RQ_MASK;
-	wr.wr_id = next;
-	iu	 = target->rx_ring[next];
+	struct ib_sge list;
 
 	list.addr   = iu->dma;
 	list.length = iu->size;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 	wr.next     = NULL;
+	wr.wr_id    = (uintptr_t) iu;
 	wr.sg_list  = &list;
 	wr.num_sge  = 1;
 
-	ret = ib_post_recv(target->qp, &wr, &bad_wr);
-	if (!ret)
-		++target->rx_head;
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
-
-	return ret;
+	return ib_post_recv(target->qp, &wr, &bad_wr);
 }
 
 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@
 	struct srp_request *req;
 	struct scsi_cmnd *scmnd;
 	unsigned long flags;
-	s32 delta;
-
-	delta = (s32) be32_to_cpu(rsp->req_lim_delta);
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	target->req_lim += delta;
-
-	req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
 
 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
-		if (be32_to_cpu(rsp->resp_data_len) < 4)
-			req->tsk_status = -1;
-		else
-			req->tsk_status = rsp->data[3];
-		complete(&req->done);
+		spin_lock_irqsave(&target->lock, flags);
+		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+		spin_unlock_irqrestore(&target->lock, flags);
+
+		target->tsk_mgmt_status = -1;
+		if (be32_to_cpu(rsp->resp_data_len) >= 4)
+			target->tsk_mgmt_status = rsp->data[3];
+		complete(&target->tsk_mgmt_done);
 	} else {
+		req = &target->req_ring[rsp->tag];
 		scmnd = req->scmnd;
 		if (!scmnd)
 			shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@
 		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 
-		if (!req->tsk_mgmt) {
-			scmnd->host_scribble = (void *) -1L;
-			scmnd->scsi_done(scmnd);
-
-			srp_remove_req(target, req);
-		} else
-			req->cmd_done = 1;
+		srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+		scmnd->host_scribble = NULL;
+		scmnd->scsi_done(scmnd);
 	}
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 }
 
 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
 			       void *rsp, int len)
 {
-	struct ib_device *dev;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	unsigned long flags;
 	struct srp_iu *iu;
-	int err = 1;
+	int err;
 
-	dev = target->srp_host->srp_dev->dev;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
+	spin_lock_irqsave(&target->lock, flags);
 	target->req_lim += req_delta;
-
 	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+	spin_unlock_irqrestore(&target->lock, flags);
+
 	if (!iu) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "no IU available to send response\n");
-		goto out;
+		return 1;
 	}
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
 	memcpy(iu->buf, rsp, len);
 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
 
-	err = __srp_post_send(target, iu, len);
-	if (err)
+	err = srp_post_send(target, iu, len);
+	if (err) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "unable to post response: %d\n", err);
+		srp_put_tx_iu(target, iu, SRP_IU_RSP);
+	}
 
-out:
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 	return err;
 }
 
@@ -1032,14 +1020,11 @@
 
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 {
-	struct ib_device *dev;
-	struct srp_iu *iu;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
+	struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
 	int res;
 	u8 opcode;
 
-	iu = target->rx_ring[wc->wr_id];
-
-	dev = target->srp_host->srp_dev->dev;
 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
 				   DMA_FROM_DEVICE);
 
@@ -1080,7 +1065,7 @@
 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
 				      DMA_FROM_DEVICE);
 
-	res = srp_post_recv(target);
+	res = srp_post_recv(target, iu);
 	if (res != 0)
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@
 {
 	struct srp_target_port *target = target_ptr;
 	struct ib_wc wc;
+	struct srp_iu *iu;
 
 	while (ib_poll_cq(cq, 1, &wc) > 0) {
 		if (wc.status) {
@@ -1119,18 +1105,19 @@
 			break;
 		}
 
-		++target->tx_tail;
+		iu = (struct srp_iu *) wc.wr_id;
+		list_add(&iu->list, &target->free_tx);
 	}
 }
 
-static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
-			    void (*done)(struct scsi_cmnd *))
+static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
 {
-	struct srp_target_port *target = host_to_target(scmnd->device->host);
+	struct srp_target_port *target = host_to_target(shost);
 	struct srp_request *req;
 	struct srp_iu *iu;
 	struct srp_cmd *cmd;
 	struct ib_device *dev;
+	unsigned long flags;
 	int len;
 
 	if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,11 +1126,19 @@
 	if (target->state == SRP_TARGET_DEAD ||
 	    target->state == SRP_TARGET_REMOVED) {
 		scmnd->result = DID_BAD_TARGET << 16;
-		done(scmnd);
+		scmnd->scsi_done(scmnd);
 		return 0;
 	}
 
+	spin_lock_irqsave(&target->lock, flags);
 	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
+	if (iu) {
+		req = list_first_entry(&target->free_reqs, struct srp_request,
+				      list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&target->lock, flags);
+
 	if (!iu)
 		goto err;
 
@@ -1151,11 +1146,8 @@
 	ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
 				   DMA_TO_DEVICE);
 
-	req = list_first_entry(&target->free_reqs, struct srp_request, list);
-
-	scmnd->scsi_done     = done;
 	scmnd->result        = 0;
-	scmnd->host_scribble = (void *) (long) req->index;
+	scmnd->host_scribble = (void *) req;
 
 	cmd = iu->buf;
 	memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1159,38 @@
 
 	req->scmnd    = scmnd;
 	req->cmd      = iu;
-	req->cmd_done = 0;
-	req->tsk_mgmt = NULL;
 
 	len = srp_map_data(scmnd, target, req);
 	if (len < 0) {
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Failed to map data\n");
-		goto err;
+		goto err_iu;
 	}
 
 	ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
 				      DMA_TO_DEVICE);
 
-	if (__srp_post_send(target, iu, len)) {
+	if (srp_post_send(target, iu, len)) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
 		goto err_unmap;
 	}
 
-	list_move_tail(&req->list, &target->req_queue);
-
 	return 0;
 
 err_unmap:
 	srp_unmap_data(scmnd, target, req);
 
+err_iu:
+	srp_put_tx_iu(target, iu, SRP_IU_CMD);
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&req->list, &target->free_reqs);
+	spin_unlock_irqrestore(&target->lock, flags);
+
 err:
 	return SCSI_MLQUEUE_HOST_BUSY;
 }
 
-static DEF_SCSI_QCMD(srp_queuecommand)
-
 static int srp_alloc_iu_bufs(struct srp_target_port *target)
 {
 	int i;
@@ -1216,6 +1209,8 @@
 						  GFP_KERNEL, DMA_TO_DEVICE);
 		if (!target->tx_ring[i])
 			goto err;
+
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 	}
 
 	return 0;
@@ -1377,7 +1372,8 @@
 			break;
 
 		for (i = 0; i < SRP_RQ_SIZE; i++) {
-			target->status = srp_post_recv(target);
+			struct srp_iu *iu = target->rx_ring[i];
+			target->status = srp_post_recv(target, iu);
 			if (target->status)
 				break;
 		}
@@ -1442,25 +1438,24 @@
 }
 
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
-			     struct srp_request *req, u8 func)
+			     u64 req_tag, unsigned int lun, u8 func)
 {
 	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	struct srp_iu *iu;
 	struct srp_tsk_mgmt *tsk_mgmt;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
 	if (target->state == SRP_TARGET_DEAD ||
-	    target->state == SRP_TARGET_REMOVED) {
-		req->scmnd->result = DID_BAD_TARGET << 16;
-		goto out;
-	}
+	    target->state == SRP_TARGET_REMOVED)
+		return -1;
 
-	init_completion(&req->done);
+	init_completion(&target->tsk_mgmt_done);
 
+	spin_lock_irq(&target->lock);
 	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
+	spin_unlock_irq(&target->lock);
+
 	if (!iu)
-		goto out;
+		return -1;
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
 				   DMA_TO_DEVICE);
@@ -1468,70 +1463,46 @@
 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
-	tsk_mgmt->lun 		= cpu_to_be64((u64) req->scmnd->device->lun << 48);
-	tsk_mgmt->tag 		= req->index | SRP_TAG_TSK_MGMT;
+	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
+	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
 	tsk_mgmt->tsk_mgmt_func = func;
-	tsk_mgmt->task_tag 	= req->index;
+	tsk_mgmt->task_tag	= req_tag;
 
 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
 				      DMA_TO_DEVICE);
-	if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
-		goto out;
+	if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
+		srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
+		return -1;
+	}
 
-	req->tsk_mgmt = iu;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
-
-	if (!wait_for_completion_timeout(&req->done,
+	if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
 		return -1;
 
 	return 0;
-
-out:
-	spin_unlock_irq(target->scsi_host->host_lock);
-	return -1;
-}
-
-static int srp_find_req(struct srp_target_port *target,
-			struct scsi_cmnd *scmnd,
-			struct srp_request **req)
-{
-	if (scmnd->host_scribble == (void *) -1L)
-		return -1;
-
-	*req = &target->req_ring[(long) scmnd->host_scribble];
-
-	return 0;
 }
 
 static int srp_abort(struct scsi_cmnd *scmnd)
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req;
+	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
 	int ret = SUCCESS;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
 
-	if (target->qp_in_error)
+	if (!req || target->qp_in_error)
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
-		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
+	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+			      SRP_TSK_ABORT_TASK))
 		return FAILED;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
-	if (req->cmd_done) {
-		srp_remove_req(target, req);
-		scmnd->scsi_done(scmnd);
-	} else if (!req->tsk_status) {
-		srp_remove_req(target, req);
-		scmnd->result = DID_ABORT << 16;
-	} else
-		ret = FAILED;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	if (req->scmnd) {
+		if (!target->tsk_mgmt_status) {
+			srp_remove_req(target, req, 0);
+			scmnd->result = DID_ABORT << 16;
+		} else
+			ret = FAILED;
+	}
 
 	return ret;
 }
@@ -1539,26 +1510,23 @@
 static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req, *tmp;
+	int i;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
 
 	if (target->qp_in_error)
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
+	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
+			      SRP_TSK_LUN_RESET))
 		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
-		return FAILED;
-	if (req->tsk_status)
+	if (target->tsk_mgmt_status)
 		return FAILED;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		if (req->scmnd->device == scmnd->device)
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd && req->scmnd->device == scmnd->device)
 			srp_reset_req(target, req);
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	}
 
 	return SUCCESS;
 }
@@ -1987,9 +1955,12 @@
 	target->io_class   = SRP_REV16A_IB_IO_CLASS;
 	target->scsi_host  = target_host;
 	target->srp_host   = host;
+	target->lkey	   = host->srp_dev->mr->lkey;
+	target->rkey	   = host->srp_dev->mr->rkey;
 
+	spin_lock_init(&target->lock);
+	INIT_LIST_HEAD(&target->free_tx);
 	INIT_LIST_HEAD(&target->free_reqs);
-	INIT_LIST_HEAD(&target->req_queue);
 	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 		target->req_ring[i].index = i;
 		list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2188,9 @@
 		 */
 		spin_lock(&host->target_lock);
 		list_for_each_entry(target, &host->target_list, list) {
-			spin_lock_irq(target->scsi_host->host_lock);
+			spin_lock_irq(&target->lock);
 			target->state = SRP_TARGET_REMOVED;
-			spin_unlock_irq(target->scsi_host->host_lock);
+			spin_unlock_irq(&target->lock);
 		}
 		spin_unlock(&host->target_lock);
 
@@ -2258,8 +2229,7 @@
 {
 	int ret;
 
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
 
 	if (srp_sg_tablesize > 255) {
 		printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9..9dc6fc3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@
 
 	SRP_RQ_SHIFT    	= 6,
 	SRP_RQ_SIZE		= 1 << SRP_RQ_SHIFT,
-	SRP_RQ_MASK		= SRP_RQ_SIZE - 1,
 
 	SRP_SQ_SIZE		= SRP_RQ_SIZE,
-	SRP_SQ_MASK		= SRP_SQ_SIZE - 1,
 	SRP_RSP_SQ_SIZE		= 1,
 	SRP_REQ_SQ_SIZE		= SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
 	SRP_TSK_MGMT_SQ_SIZE	= 1,
 	SRP_CMD_SQ_SIZE		= SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
 
-	SRP_TAG_TSK_MGMT	= 1 << (SRP_RQ_SHIFT + 1),
+	SRP_TAG_NO_REQ		= ~0U,
+	SRP_TAG_TSK_MGMT	= 1U << 31,
 
 	SRP_FMR_SIZE		= 256,
 	SRP_FMR_POOL_SIZE	= 1024,
@@ -113,15 +112,29 @@
 	struct list_head	list;
 	struct scsi_cmnd       *scmnd;
 	struct srp_iu	       *cmd;
-	struct srp_iu	       *tsk_mgmt;
 	struct ib_pool_fmr     *fmr;
-	struct completion	done;
 	short			index;
-	u8			cmd_done;
-	u8			tsk_status;
 };
 
 struct srp_target_port {
+	/* These are RW in the hot path, and commonly used together */
+	struct list_head	free_tx;
+	struct list_head	free_reqs;
+	spinlock_t		lock;
+	s32			req_lim;
+
+	/* These are read-only in the hot path */
+	struct ib_cq	       *send_cq ____cacheline_aligned_in_smp;
+	struct ib_cq	       *recv_cq;
+	struct ib_qp	       *qp;
+	u32			lkey;
+	u32			rkey;
+	enum srp_target_state	state;
+
+	/* Everything above this point is used in the hot path of
+	 * command processing. Try to keep them packed into cachelines.
+	 */
+
 	__be64			id_ext;
 	__be64			ioc_guid;
 	__be64			service_id;
@@ -138,24 +151,13 @@
 	int			path_query_id;
 
 	struct ib_cm_id	       *cm_id;
-	struct ib_cq	       *recv_cq;
-	struct ib_cq	       *send_cq;
-	struct ib_qp	       *qp;
 
 	int			max_ti_iu_len;
-	s32			req_lim;
 
 	int			zero_req_lim;
 
-	unsigned		rx_head;
-	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
-
-	unsigned		tx_head;
-	unsigned		tx_tail;
 	struct srp_iu	       *tx_ring[SRP_SQ_SIZE];
-
-	struct list_head	free_reqs;
-	struct list_head	req_queue;
+	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
 	struct srp_request	req_ring[SRP_CMD_SQ_SIZE];
 
 	struct work_struct	work;
@@ -163,16 +165,18 @@
 	struct list_head	list;
 	struct completion	done;
 	int			status;
-	enum srp_target_state	state;
 	int			qp_in_error;
+
+	struct completion	tsk_mgmt_done;
+	u8			tsk_mgmt_status;
 };
 
 struct srp_iu {
+	struct list_head	list;
 	u64			dma;
 	void		       *buf;
 	size_t			size;
 	enum dma_data_direction	direction;
-	enum srp_iu_type	type;
 };
 
 #endif /* IB_SRP_H */
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 7ad212d..09614ce 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -5,7 +5,7 @@
 # Each configuration option enables a list of files.
 
 obj-$(CONFIG_INPUT)		+= input-core.o
-input-core-objs := input.o input-compat.o ff-core.o
+input-core-y := input.o input-compat.o input-mt.o ff-core.o
 
 obj-$(CONFIG_INPUT_FF_MEMLESS)	+= ff-memless.o
 obj-$(CONFIG_INPUT_POLLDEV)	+= input-polldev.o
diff --git a/drivers/input/apm-power.c b/drivers/input/apm-power.c
index 7d61a96..e90ee3d 100644
--- a/drivers/input/apm-power.c
+++ b/drivers/input/apm-power.c
@@ -9,6 +9,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/slab.h>
@@ -23,8 +25,7 @@
 	switch (keycode) {
 	case KEY_SUSPEND:
 		apm_queue_event(APM_USER_SUSPEND);
-
-		printk(KERN_INFO "apm-power: Requesting system suspend...\n");
+		pr_info("Requesting system suspend...\n");
 		break;
 	default:
 		break;
@@ -65,18 +66,15 @@
 
 	error = input_register_handle(handle);
 	if (error) {
-		printk(KERN_ERR
-			"apm-power: Failed to register input power handler, "
-			"error %d\n", error);
+		pr_err("Failed to register input power handler, error %d\n",
+		       error);
 		kfree(handle);
 		return error;
 	}
 
 	error = input_open_device(handle);
 	if (error) {
-		printk(KERN_ERR
-			"apm-power: Failed to open input power device, "
-			"error %d\n", error);
+		pr_err("Failed to open input power device, error %d\n", error);
 		input_unregister_handle(handle);
 		kfree(handle);
 		return error;
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
index f7c5c14..cd4e667 100644
--- a/drivers/input/evbug.c
+++ b/drivers/input/evbug.c
@@ -26,6 +26,8 @@
  * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/input.h>
@@ -38,8 +40,8 @@
 
 static void evbug_event(struct input_handle *handle, unsigned int type, unsigned int code, int value)
 {
-	printk(KERN_DEBUG "evbug.c: Event. Dev: %s, Type: %d, Code: %d, Value: %d\n",
-		dev_name(&handle->dev->dev), type, code, value);
+	printk(KERN_DEBUG pr_fmt("Event. Dev: %s, Type: %d, Code: %d, Value: %d\n"),
+	       dev_name(&handle->dev->dev), type, code, value);
 }
 
 static int evbug_connect(struct input_handler *handler, struct input_dev *dev,
@@ -64,10 +66,10 @@
 	if (error)
 		goto err_unregister_handle;
 
-	printk(KERN_DEBUG "evbug.c: Connected device: %s (%s at %s)\n",
-		dev_name(&dev->dev),
-		dev->name ?: "unknown",
-		dev->phys ?: "unknown");
+	printk(KERN_DEBUG pr_fmt("Connected device: %s (%s at %s)\n"),
+	       dev_name(&dev->dev),
+	       dev->name ?: "unknown",
+	       dev->phys ?: "unknown");
 
 	return 0;
 
@@ -80,8 +82,8 @@
 
 static void evbug_disconnect(struct input_handle *handle)
 {
-	printk(KERN_DEBUG "evbug.c: Disconnected device: %s\n",
-		dev_name(&handle->dev->dev));
+	printk(KERN_DEBUG pr_fmt("Disconnected device: %s\n"),
+	       dev_name(&handle->dev->dev));
 
 	input_close_device(handle);
 	input_unregister_handle(handle);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 68f09a8..c8471a2 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -8,6 +8,8 @@
  * the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define EVDEV_MINOR_BASE	64
 #define EVDEV_MINORS		32
 #define EVDEV_MIN_BUFFER_SIZE	64U
@@ -522,12 +524,11 @@
 	if (type == EV_KEY && size == OLD_KEY_MAX) {
 		len = OLD_KEY_MAX;
 		if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000))
-			printk(KERN_WARNING
-				"evdev.c(EVIOCGBIT): Suspicious buffer size %u, "
-				"limiting output to %zu bytes. See "
-				"http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n",
-				OLD_KEY_MAX,
-				BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
+			pr_warning("(EVIOCGBIT): Suspicious buffer size %u, "
+				   "limiting output to %zu bytes. See "
+				   "http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n",
+				   OLD_KEY_MAX,
+				   BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
 	}
 
 	return bits_to_user(bits, len, size, p, compat_mode);
@@ -686,6 +687,10 @@
 #define EVIOC_MASK_SIZE(nr)	((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
 	switch (EVIOC_MASK_SIZE(cmd)) {
 
+	case EVIOCGPROP(0):
+		return bits_to_user(dev->propbit, INPUT_PROP_MAX,
+				    size, p, compat_mode);
+
 	case EVIOCGKEY(0):
 		return bits_to_user(dev->key, KEY_MAX, size, p, compat_mode);
 
@@ -897,7 +902,7 @@
 			break;
 
 	if (minor == EVDEV_MINORS) {
-		printk(KERN_ERR "evdev: no more free evdev devices\n");
+		pr_err("no more free evdev devices\n");
 		return -ENFILE;
 	}
 
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 03078c0..3367f76 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -23,7 +23,7 @@
 
 /* #define DEBUG */
 
-#define debug(format, arg...) pr_debug("ff-core: " format "\n", ## arg)
+#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
 
 #include <linux/input.h>
 #include <linux/module.h>
@@ -116,7 +116,7 @@
 
 	if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX ||
 	    !test_bit(effect->type, dev->ffbit)) {
-		debug("invalid or not supported effect type in upload");
+		pr_debug("invalid or not supported effect type in upload\n");
 		return -EINVAL;
 	}
 
@@ -124,7 +124,7 @@
 	    (effect->u.periodic.waveform < FF_WAVEFORM_MIN ||
 	     effect->u.periodic.waveform > FF_WAVEFORM_MAX ||
 	     !test_bit(effect->u.periodic.waveform, dev->ffbit))) {
-		debug("invalid or not supported wave form in upload");
+		pr_debug("invalid or not supported wave form in upload\n");
 		return -EINVAL;
 	}
 
@@ -246,7 +246,7 @@
 	struct ff_device *ff = dev->ff;
 	int i;
 
-	debug("flushing now");
+	pr_debug("flushing now\n");
 
 	mutex_lock(&ff->mutex);
 
@@ -315,8 +315,7 @@
 	int i;
 
 	if (!max_effects) {
-		printk(KERN_ERR
-		       "ff-core: cannot allocate device without any effects\n");
+		pr_err("cannot allocate device without any effects\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 1d881c9..117a59a 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -23,7 +23,7 @@
 
 /* #define DEBUG */
 
-#define debug(format, arg...) pr_debug("ff-memless: " format "\n", ## arg)
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/slab.h>
 #include <linux/input.h>
@@ -129,7 +129,7 @@
 	int events = 0;
 	int i;
 
-	debug("calculating next timer");
+	pr_debug("calculating next timer\n");
 
 	for (i = 0; i < FF_MEMLESS_EFFECTS; i++) {
 
@@ -149,10 +149,10 @@
 	}
 
 	if (!events) {
-		debug("no actions");
+		pr_debug("no actions\n");
 		del_timer(&ml->timer);
 	} else {
-		debug("timer set");
+		pr_debug("timer set\n");
 		mod_timer(&ml->timer, earliest);
 	}
 }
@@ -173,8 +173,8 @@
 	if (envelope->attack_length &&
 	    time_before(now,
 			state->play_at + msecs_to_jiffies(envelope->attack_length))) {
-		debug("value = 0x%x, attack_level = 0x%x", value,
-		      envelope->attack_level);
+		pr_debug("value = 0x%x, attack_level = 0x%x\n",
+			 value, envelope->attack_level);
 		time_from_level = jiffies_to_msecs(now - state->play_at);
 		time_of_envelope = envelope->attack_length;
 		envelope_level = min_t(__s16, envelope->attack_level, 0x7fff);
@@ -191,13 +191,13 @@
 
 	difference = abs(value) - envelope_level;
 
-	debug("difference = %d", difference);
-	debug("time_from_level = 0x%x", time_from_level);
-	debug("time_of_envelope = 0x%x", time_of_envelope);
+	pr_debug("difference = %d\n", difference);
+	pr_debug("time_from_level = 0x%x\n", time_from_level);
+	pr_debug("time_of_envelope = 0x%x\n", time_of_envelope);
 
 	difference = difference * time_from_level / time_of_envelope;
 
-	debug("difference = %d", difference);
+	pr_debug("difference = %d\n", difference);
 
 	return value < 0 ?
 		-(difference + envelope_level) : (difference + envelope_level);
@@ -215,8 +215,7 @@
 	if (effect_type == FF_PERIODIC && test_bit(FF_RUMBLE, ff->ffbit))
 		return FF_RUMBLE;
 
-	printk(KERN_ERR
-	       "ff-memless: invalid type in get_compatible_type()\n");
+	pr_err("invalid type in get_compatible_type()\n");
 
 	return 0;
 }
@@ -312,7 +311,7 @@
 		break;
 
 	default:
-		printk(KERN_ERR "ff-memless: invalid type in ml_combine_effects()\n");
+		pr_err("invalid type in ml_combine_effects()\n");
 		break;
 	}
 
@@ -406,7 +405,7 @@
 	struct ml_device *ml = dev->ff->private;
 	unsigned long flags;
 
-	debug("timer: updating effects");
+	pr_debug("timer: updating effects\n");
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 	ml_play_effects(ml);
@@ -438,7 +437,7 @@
 	struct ml_effect_state *state = &ml->states[effect_id];
 
 	if (value > 0) {
-		debug("initiated play");
+		pr_debug("initiated play\n");
 
 		__set_bit(FF_EFFECT_STARTED, &state->flags);
 		state->count = value;
@@ -449,7 +448,7 @@
 		state->adj_at = state->play_at;
 
 	} else {
-		debug("initiated stop");
+		pr_debug("initiated stop\n");
 
 		if (test_bit(FF_EFFECT_PLAYING, &state->flags))
 			__set_bit(FF_EFFECT_ABORTING, &state->flags);
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 46239e4..23cf8fc 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -18,13 +18,11 @@
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/gameport.h>
-#include <linux/wait.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/kthread.h>
+#include <linux/workqueue.h>
 #include <linux/sched.h>	/* HZ */
 #include <linux/mutex.h>
-#include <linux/freezer.h>
 
 /*#include <asm/io.h>*/
 
@@ -123,7 +121,7 @@
 	}
 
 	gameport_close(gameport);
-	return (cpu_data(raw_smp_processor_id()).loops_per_jiffy *
+	return (this_cpu_read(cpu_info.loops_per_jiffy) *
 		(unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
 
 #else
@@ -234,58 +232,22 @@
 
 static DEFINE_SPINLOCK(gameport_event_lock);	/* protects gameport_event_list */
 static LIST_HEAD(gameport_event_list);
-static DECLARE_WAIT_QUEUE_HEAD(gameport_wait);
-static struct task_struct *gameport_task;
 
-static int gameport_queue_event(void *object, struct module *owner,
-				enum gameport_event_type event_type)
+static struct gameport_event *gameport_get_event(void)
 {
+	struct gameport_event *event = NULL;
 	unsigned long flags;
-	struct gameport_event *event;
-	int retval = 0;
 
 	spin_lock_irqsave(&gameport_event_lock, flags);
 
-	/*
-	 * Scan event list for the other events for the same gameport port,
-	 * starting with the most recent one. If event is the same we
-	 * do not need add new one. If event is of different type we
-	 * need to add this event and should not look further because
-	 * we need to preseve sequence of distinct events.
-	 */
-	list_for_each_entry_reverse(event, &gameport_event_list, node) {
-		if (event->object == object) {
-			if (event->type == event_type)
-				goto out;
-			break;
-		}
+	if (!list_empty(&gameport_event_list)) {
+		event = list_first_entry(&gameport_event_list,
+					 struct gameport_event, node);
+		list_del_init(&event->node);
 	}
 
-	event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC);
-	if (!event) {
-		pr_err("Not enough memory to queue event %d\n", event_type);
-		retval = -ENOMEM;
-		goto out;
-	}
-
-	if (!try_module_get(owner)) {
-		pr_warning("Can't get module reference, dropping event %d\n",
-			   event_type);
-		kfree(event);
-		retval = -EINVAL;
-		goto out;
-	}
-
-	event->type = event_type;
-	event->object = object;
-	event->owner = owner;
-
-	list_add_tail(&event->node, &gameport_event_list);
-	wake_up(&gameport_wait);
-
-out:
 	spin_unlock_irqrestore(&gameport_event_lock, flags);
-	return retval;
+	return event;
 }
 
 static void gameport_free_event(struct gameport_event *event)
@@ -319,24 +281,8 @@
 	spin_unlock_irqrestore(&gameport_event_lock, flags);
 }
 
-static struct gameport_event *gameport_get_event(void)
-{
-	struct gameport_event *event = NULL;
-	unsigned long flags;
 
-	spin_lock_irqsave(&gameport_event_lock, flags);
-
-	if (!list_empty(&gameport_event_list)) {
-		event = list_first_entry(&gameport_event_list,
-					 struct gameport_event, node);
-		list_del_init(&event->node);
-	}
-
-	spin_unlock_irqrestore(&gameport_event_lock, flags);
-	return event;
-}
-
-static void gameport_handle_event(void)
+static void gameport_handle_events(struct work_struct *work)
 {
 	struct gameport_event *event;
 
@@ -368,6 +314,59 @@
 	mutex_unlock(&gameport_mutex);
 }
 
+static DECLARE_WORK(gameport_event_work, gameport_handle_events);
+
+static int gameport_queue_event(void *object, struct module *owner,
+				enum gameport_event_type event_type)
+{
+	unsigned long flags;
+	struct gameport_event *event;
+	int retval = 0;
+
+	spin_lock_irqsave(&gameport_event_lock, flags);
+
+	/*
+	 * Scan event list for the other events for the same gameport port,
+	 * starting with the most recent one. If event is the same we
+	 * do not need add new one. If event is of different type we
+	 * need to add this event and should not look further because
+	 * we need to preserve sequence of distinct events.
+	 */
+	list_for_each_entry_reverse(event, &gameport_event_list, node) {
+		if (event->object == object) {
+			if (event->type == event_type)
+				goto out;
+			break;
+		}
+	}
+
+	event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC);
+	if (!event) {
+		pr_err("Not enough memory to queue event %d\n", event_type);
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (!try_module_get(owner)) {
+		pr_warning("Can't get module reference, dropping event %d\n",
+			   event_type);
+		kfree(event);
+		retval = -EINVAL;
+		goto out;
+	}
+
+	event->type = event_type;
+	event->object = object;
+	event->owner = owner;
+
+	list_add_tail(&event->node, &gameport_event_list);
+	schedule_work(&gameport_event_work);
+
+out:
+	spin_unlock_irqrestore(&gameport_event_lock, flags);
+	return retval;
+}
+
 /*
  * Remove all events that have been submitted for a given object,
  * be it a gameport port or a driver.
@@ -419,19 +418,6 @@
 	return child;
 }
 
-static int gameport_thread(void *nothing)
-{
-	set_freezable();
-	do {
-		gameport_handle_event();
-		wait_event_freezable(gameport_wait,
-			kthread_should_stop() || !list_empty(&gameport_event_list));
-	} while (!kthread_should_stop());
-
-	return 0;
-}
-
-
 /*
  * Gameport port operations
  */
@@ -814,13 +800,6 @@
 		return error;
 	}
 
-	gameport_task = kthread_run(gameport_thread, NULL, "kgameportd");
-	if (IS_ERR(gameport_task)) {
-		bus_unregister(&gameport_bus);
-		error = PTR_ERR(gameport_task);
-		pr_err("Failed to start kgameportd, error: %d\n", error);
-		return error;
-	}
 
 	return 0;
 }
@@ -828,7 +807,12 @@
 static void __exit gameport_exit(void)
 {
 	bus_unregister(&gameport_bus);
-	kthread_stop(gameport_task);
+
+	/*
+	 * There should not be any outstanding events but work may
+	 * still be scheduled so simply cancel it.
+	 */
+	cancel_work_sync(&gameport_event_work);
 }
 
 subsys_initcall(gameport_init);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
new file mode 100644
index 0000000..c48c81f
--- /dev/null
+++ b/drivers/input/input-mt.c
@@ -0,0 +1,170 @@
+/*
+ * Input Multitouch Library
+ *
+ * Copyright (c) 2008-2010 Henrik Rydberg
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/input/mt.h>
+#include <linux/slab.h>
+
+#define TRKID_SGN	((TRKID_MAX + 1) >> 1)
+
+/**
+ * input_mt_init_slots() - initialize MT input slots
+ * @dev: input device supporting MT events and finger tracking
+ * @num_slots: number of slots used by the device
+ *
+ * This function allocates all necessary memory for MT slot handling
+ * in the input device, prepares the ABS_MT_SLOT and
+ * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers.
+ * May be called repeatedly. Returns -EINVAL if attempting to
+ * reinitialize with a different number of slots.
+ */
+int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots)
+{
+	int i;
+
+	if (!num_slots)
+		return 0;
+	if (dev->mt)
+		return dev->mtsize != num_slots ? -EINVAL : 0;
+
+	dev->mt = kcalloc(num_slots, sizeof(struct input_mt_slot), GFP_KERNEL);
+	if (!dev->mt)
+		return -ENOMEM;
+
+	dev->mtsize = num_slots;
+	input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
+	input_set_abs_params(dev, ABS_MT_TRACKING_ID, 0, TRKID_MAX, 0, 0);
+	input_set_events_per_packet(dev, 6 * num_slots);
+
+	/* Mark slots as 'unused' */
+	for (i = 0; i < num_slots; i++)
+		input_mt_set_value(&dev->mt[i], ABS_MT_TRACKING_ID, -1);
+
+	return 0;
+}
+EXPORT_SYMBOL(input_mt_init_slots);
+
+/**
+ * input_mt_destroy_slots() - frees the MT slots of the input device
+ * @dev: input device with allocated MT slots
+ *
+ * This function is only needed in error path as the input core will
+ * automatically free the MT slots when the device is destroyed.
+ */
+void input_mt_destroy_slots(struct input_dev *dev)
+{
+	kfree(dev->mt);
+	dev->mt = NULL;
+	dev->mtsize = 0;
+	dev->slot = 0;
+	dev->trkid = 0;
+}
+EXPORT_SYMBOL(input_mt_destroy_slots);
+
+/**
+ * input_mt_report_slot_state() - report contact state
+ * @dev: input device with allocated MT slots
+ * @tool_type: the tool type to use in this slot
+ * @active: true if contact is active, false otherwise
+ *
+ * Reports a contact via ABS_MT_TRACKING_ID, and optionally
+ * ABS_MT_TOOL_TYPE. If active is true and the slot is currently
+ * inactive, or if the tool type is changed, a new tracking id is
+ * assigned to the slot. The tool type is only reported if the
+ * corresponding absbit field is set.
+ */
+void input_mt_report_slot_state(struct input_dev *dev,
+				unsigned int tool_type, bool active)
+{
+	struct input_mt_slot *mt;
+	int id;
+
+	if (!dev->mt || !active) {
+		input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+		return;
+	}
+
+	mt = &dev->mt[dev->slot];
+	id = input_mt_get_value(mt, ABS_MT_TRACKING_ID);
+	if (id < 0 || input_mt_get_value(mt, ABS_MT_TOOL_TYPE) != tool_type)
+		id = input_mt_new_trkid(dev);
+
+	input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
+	input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
+}
+EXPORT_SYMBOL(input_mt_report_slot_state);
+
+/**
+ * input_mt_report_finger_count() - report contact count
+ * @dev: input device with allocated MT slots
+ * @count: the number of contacts
+ *
+ * Reports the contact count via BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP,
+ * BTN_TOOL_TRIPLETAP and BTN_TOOL_QUADTAP.
+ *
+ * The input core ensures only the KEY events already setup for
+ * this device will produce output.
+ */
+void input_mt_report_finger_count(struct input_dev *dev, int count)
+{
+	input_event(dev, EV_KEY, BTN_TOOL_FINGER, count == 1);
+	input_event(dev, EV_KEY, BTN_TOOL_DOUBLETAP, count == 2);
+	input_event(dev, EV_KEY, BTN_TOOL_TRIPLETAP, count == 3);
+	input_event(dev, EV_KEY, BTN_TOOL_QUADTAP, count == 4);
+}
+EXPORT_SYMBOL(input_mt_report_finger_count);
+
+/**
+ * input_mt_report_pointer_emulation() - common pointer emulation
+ * @dev: input device with allocated MT slots
+ * @use_count: report number of active contacts as finger count
+ *
+ * Performs legacy pointer emulation via BTN_TOUCH, ABS_X, ABS_Y and
+ * ABS_PRESSURE. Touchpad finger count is emulated if use_count is true.
+ *
+ * The input core ensures only the KEY and ABS axes already setup for
+ * this device will produce output.
+ */
+void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
+{
+	struct input_mt_slot *oldest = 0;
+	int oldid = dev->trkid;
+	int count = 0;
+	int i;
+
+	for (i = 0; i < dev->mtsize; ++i) {
+		struct input_mt_slot *ps = &dev->mt[i];
+		int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+		if (id < 0)
+			continue;
+		if ((id - oldid) & TRKID_SGN) {
+			oldest = ps;
+			oldid = id;
+		}
+		count++;
+	}
+
+	input_event(dev, EV_KEY, BTN_TOUCH, count > 0);
+	if (use_count)
+		input_mt_report_finger_count(dev, count);
+
+	if (oldest) {
+		int x = input_mt_get_value(oldest, ABS_MT_POSITION_X);
+		int y = input_mt_get_value(oldest, ABS_MT_POSITION_Y);
+		int p = input_mt_get_value(oldest, ABS_MT_PRESSURE);
+
+		input_event(dev, EV_ABS, ABS_X, x);
+		input_event(dev, EV_ABS, ABS_Y, y);
+		input_event(dev, EV_ABS, ABS_PRESSURE, p);
+	} else {
+		input_event(dev, EV_ABS, ABS_PRESSURE, 0);
+	}
+}
+EXPORT_SYMBOL(input_mt_report_pointer_emulation);
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 10c9b0a..0559e30 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -8,6 +8,8 @@
  * the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/jiffies.h>
 #include <linux/slab.h>
 #include <linux/mutex.h>
@@ -33,8 +35,7 @@
 	if (!polldev_users) {
 		polldev_wq = create_singlethread_workqueue("ipolldevd");
 		if (!polldev_wq) {
-			printk(KERN_ERR "input-polldev: failed to create "
-				"ipolldevd workqueue\n");
+			pr_err("failed to create ipolldevd workqueue\n");
 			retval = -ENOMEM;
 			goto out;
 		}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index db409d6..7985114 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -10,9 +10,11 @@
  * the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/types.h>
-#include <linux/input.h>
+#include <linux/input/mt.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/random.h>
@@ -958,10 +960,8 @@
 
 	error = handler->connect(handler, dev, id);
 	if (error && error != -ENODEV)
-		printk(KERN_ERR
-			"input: failed to attach handler %s to device %s, "
-			"error: %d\n",
-			handler->name, kobject_name(&dev->dev.kobj), error);
+		pr_err("failed to attach handler %s to device %s, error: %d\n",
+		       handler->name, kobject_name(&dev->dev.kobj), error);
 
 	return error;
 }
@@ -1109,6 +1109,8 @@
 		seq_printf(seq, "%s ", handle->name);
 	seq_putc(seq, '\n');
 
+	input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
+
 	input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
 	if (test_bit(EV_KEY, dev->evbit))
 		input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
@@ -1332,11 +1334,26 @@
 }
 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
 
+static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
+			      int max, int add_cr);
+
+static ssize_t input_dev_show_properties(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct input_dev *input_dev = to_input_dev(dev);
+	int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
+				     INPUT_PROP_MAX, true);
+	return min_t(int, len, PAGE_SIZE);
+}
+static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
+
 static struct attribute *input_dev_attrs[] = {
 	&dev_attr_name.attr,
 	&dev_attr_phys.attr,
 	&dev_attr_uniq.attr,
 	&dev_attr_modalias.attr,
+	&dev_attr_properties.attr,
 	NULL
 };
 
@@ -1470,7 +1487,7 @@
 {
 	int len;
 
-	if (add_uevent_var(env, "%s=", name))
+	if (add_uevent_var(env, "%s", name))
 		return -ENOMEM;
 
 	len = input_print_bitmap(&env->buf[env->buflen - 1],
@@ -1536,6 +1553,8 @@
 	if (dev->uniq)
 		INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
 
+	INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
+
 	INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
 	if (test_bit(EV_KEY, dev->evbit))
 		INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
@@ -1725,52 +1744,6 @@
 EXPORT_SYMBOL(input_free_device);
 
 /**
- * input_mt_create_slots() - create MT input slots
- * @dev: input device supporting MT events and finger tracking
- * @num_slots: number of slots used by the device
- *
- * This function allocates all necessary memory for MT slot handling in the
- * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
- * are initially marked as unused by setting ABS_MT_TRACKING_ID to -1.
- */
-int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
-{
-	int i;
-
-	if (!num_slots)
-		return 0;
-
-	dev->mt = kcalloc(num_slots, sizeof(struct input_mt_slot), GFP_KERNEL);
-	if (!dev->mt)
-		return -ENOMEM;
-
-	dev->mtsize = num_slots;
-	input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
-
-	/* Mark slots as 'unused' */
-	for (i = 0; i < num_slots; i++)
-		dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
-
-	return 0;
-}
-EXPORT_SYMBOL(input_mt_create_slots);
-
-/**
- * input_mt_destroy_slots() - frees the MT slots of the input device
- * @dev: input device with allocated MT slots
- *
- * This function is only needed in error path as the input core will
- * automatically free the MT slots when the device is destroyed.
- */
-void input_mt_destroy_slots(struct input_dev *dev)
-{
-	kfree(dev->mt);
-	dev->mt = NULL;
-	dev->mtsize = 0;
-}
-EXPORT_SYMBOL(input_mt_destroy_slots);
-
-/**
  * input_set_capability - mark device as capable of a certain event
  * @dev: device that is capable of emitting or accepting event
  * @type: type of the event (EV_KEY, EV_REL, etc...)
@@ -1819,9 +1792,8 @@
 		break;
 
 	default:
-		printk(KERN_ERR
-			"input_set_capability: unknown type %u (code %u)\n",
-			type, code);
+		pr_err("input_set_capability: unknown type %u (code %u)\n",
+		       type, code);
 		dump_stack();
 		return;
 	}
@@ -1903,8 +1875,9 @@
 		return error;
 
 	path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
-	printk(KERN_INFO "input: %s as %s\n",
-		dev->name ? dev->name : "Unspecified device", path ? path : "N/A");
+	pr_info("%s as %s\n",
+		dev->name ? dev->name : "Unspecified device",
+		path ? path : "N/A");
 	kfree(path);
 
 	error = mutex_lock_interruptible(&input_mutex);
@@ -2186,7 +2159,7 @@
 
 	err = class_register(&input_class);
 	if (err) {
-		printk(KERN_ERR "input: unable to register input_dev class\n");
+		pr_err("unable to register input_dev class\n");
 		return err;
 	}
 
@@ -2196,7 +2169,7 @@
 
 	err = register_chrdev(INPUT_MAJOR, "input", &input_fops);
 	if (err) {
-		printk(KERN_ERR "input: unable to register char major %d", INPUT_MAJOR);
+		pr_err("unable to register char major %d", INPUT_MAJOR);
 		goto fail2;
 	}
 
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 9d424ceb..3182c9c 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -10,6 +10,8 @@
  * (at your option) any later version.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <asm/io.h>
 #include <asm/system.h>
 #include <linux/delay.h>
@@ -806,7 +808,7 @@
 			break;
 
 	if (minor == JOYDEV_MINORS) {
-		printk(KERN_ERR "joydev: no more free joydev devices\n");
+		pr_err("no more free joydev devices\n");
 		return -ENFILE;
 	}
 
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 5b59616..56eb471 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -255,6 +255,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called amijoy.
 
+config JOYSTICK_AS5011
+	tristate "Austria Microsystem AS5011 joystick"
+	depends on I2C
+	help
+	  Say Y here if you have an AS5011 digital joystick connected to your
+	  system.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called as5011.
+
 config JOYSTICK_JOYDUMP
 	tristate "Gameport data dumper"
 	select GAMEPORT
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index f3a8cbe..92dc0de 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_JOYSTICK_A3D)		+= a3d.o
 obj-$(CONFIG_JOYSTICK_ADI)		+= adi.o
 obj-$(CONFIG_JOYSTICK_AMIGA)		+= amijoy.o
+obj-$(CONFIG_JOYSTICK_AS5011)		+= as5011.o
 obj-$(CONFIG_JOYSTICK_ANALOG)		+= analog.o
 obj-$(CONFIG_JOYSTICK_COBRA)		+= cobra.o
 obj-$(CONFIG_JOYSTICK_DB9)		+= db9.o
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
new file mode 100644
index 0000000..f6732b5
--- /dev/null
+++ b/drivers/input/joystick/as5011.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ * Sponsored by ARMadeus Systems
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Driver for Austria Microsystems joysticks AS5011
+ *
+ * TODO:
+ *	- Power on the chip when open() and power down when close()
+ *	- Manage power mode
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/as5011.h>
+#include <linux/slab.h>
+
+#define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick"
+#define MODULE_DEVICE_ALIAS "as5011"
+
+MODULE_AUTHOR("Fabien Marteau <fabien.marteau@armadeus.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/* registers */
+#define AS5011_CTRL1		0x76
+#define AS5011_CTRL2		0x75
+#define AS5011_XP		0x43
+#define AS5011_XN		0x44
+#define AS5011_YP		0x53
+#define AS5011_YN		0x54
+#define AS5011_X_REG		0x41
+#define AS5011_Y_REG		0x42
+#define AS5011_X_RES_INT	0x51
+#define AS5011_Y_RES_INT	0x52
+
+/* CTRL1 bits */
+#define AS5011_CTRL1_LP_PULSED		0x80
+#define AS5011_CTRL1_LP_ACTIVE		0x40
+#define AS5011_CTRL1_LP_CONTINUE	0x20
+#define AS5011_CTRL1_INT_WUP_EN		0x10
+#define AS5011_CTRL1_INT_ACT_EN		0x08
+#define AS5011_CTRL1_EXT_CLK_EN		0x04
+#define AS5011_CTRL1_SOFT_RST		0x02
+#define AS5011_CTRL1_DATA_VALID		0x01
+
+/* CTRL2 bits */
+#define AS5011_CTRL2_EXT_SAMPLE_EN	0x08
+#define AS5011_CTRL2_RC_BIAS_ON		0x04
+#define AS5011_CTRL2_INV_SPINNING	0x02
+
+#define AS5011_MAX_AXIS	80
+#define AS5011_MIN_AXIS	(-80)
+#define AS5011_FUZZ	8
+#define AS5011_FLAT	40
+
+struct as5011_device {
+	struct input_dev *input_dev;
+	struct i2c_client *i2c_client;
+	unsigned int button_gpio;
+	unsigned int button_irq;
+	unsigned int axis_irq;
+};
+
+static int as5011_i2c_write(struct i2c_client *client,
+			    uint8_t aregaddr,
+			    uint8_t avalue)
+{
+	uint8_t data[2] = { aregaddr, avalue };
+	struct i2c_msg msg = {
+		client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
+	};
+	int error;
+
+	error = i2c_transfer(client->adapter, &msg, 1);
+	return error < 0 ? error : 0;
+}
+
+static int as5011_i2c_read(struct i2c_client *client,
+			   uint8_t aregaddr, signed char *value)
+{
+	uint8_t data[2] = { aregaddr };
+	struct i2c_msg msg_set[2] = {
+		{ client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
+		{ client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
+	};
+	int error;
+
+	error = i2c_transfer(client->adapter, msg_set, 2);
+	if (error < 0)
+		return error;
+
+	*value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0];
+	return 0;
+}
+
+static irqreturn_t as5011_button_interrupt(int irq, void *dev_id)
+{
+	struct as5011_device *as5011 = dev_id;
+	int val = gpio_get_value_cansleep(as5011->button_gpio);
+
+	input_report_key(as5011->input_dev, BTN_JOYSTICK, !val);
+	input_sync(as5011->input_dev);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id)
+{
+	struct as5011_device *as5011 = dev_id;
+	int error;
+	signed char x, y;
+
+	error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x);
+	if (error < 0)
+		goto out;
+
+	error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y);
+	if (error < 0)
+		goto out;
+
+	input_report_abs(as5011->input_dev, ABS_X, x);
+	input_report_abs(as5011->input_dev, ABS_Y, y);
+	input_sync(as5011->input_dev);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static int __devinit as5011_configure_chip(struct as5011_device *as5011,
+				const struct as5011_platform_data *plat_dat)
+{
+	struct i2c_client *client = as5011->i2c_client;
+	int error;
+	signed char value;
+
+	/* chip soft reset */
+	error = as5011_i2c_write(client, AS5011_CTRL1,
+				 AS5011_CTRL1_SOFT_RST);
+	if (error < 0) {
+		dev_err(&client->dev, "Soft reset failed\n");
+		return error;
+	}
+
+	mdelay(10);
+
+	error = as5011_i2c_write(client, AS5011_CTRL1,
+				 AS5011_CTRL1_LP_PULSED |
+				 AS5011_CTRL1_LP_ACTIVE |
+				 AS5011_CTRL1_INT_ACT_EN);
+	if (error < 0) {
+		dev_err(&client->dev, "Power config failed\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_CTRL2,
+				 AS5011_CTRL2_INV_SPINNING);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't invert spinning\n");
+		return error;
+	}
+
+	/* write threshold */
+	error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	/* to free irq gpio in chip */
+	error = as5011_i2c_read(client, AS5011_X_RES_INT, &value);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't read i2c X resolution value\n");
+		return error;
+	}
+
+	return 0;
+}
+
+static int __devinit as5011_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	const struct as5011_platform_data *plat_data;
+	struct as5011_device *as5011;
+	struct input_dev *input_dev;
+	int irq;
+	int error;
+
+	plat_data = client->dev.platform_data;
+	if (!plat_data)
+		return -EINVAL;
+
+	if (!plat_data->axis_irq) {
+		dev_err(&client->dev, "No axis IRQ?\n");
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_PROTOCOL_MANGLING)) {
+		dev_err(&client->dev,
+			"need i2c bus that supports protocol mangling\n");
+		return -ENODEV;
+	}
+
+	as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!as5011 || !input_dev) {
+		dev_err(&client->dev,
+			"Can't allocate memory for device structure\n");
+		error = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	as5011->i2c_client = client;
+	as5011->input_dev = input_dev;
+	as5011->button_gpio = plat_data->button_gpio;
+	as5011->axis_irq = plat_data->axis_irq;
+
+	input_dev->name = "Austria Microsystem as5011 joystick";
+	input_dev->id.bustype = BUS_I2C;
+	input_dev->dev.parent = &client->dev;
+
+	__set_bit(EV_KEY, input_dev->evbit);
+	__set_bit(EV_ABS, input_dev->evbit);
+	__set_bit(BTN_JOYSTICK, input_dev->keybit);
+
+	input_set_abs_params(input_dev, ABS_X,
+		AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+	input_set_abs_params(as5011->input_dev, ABS_Y,
+		AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+
+	error = gpio_request(as5011->button_gpio, "AS5011 button");
+	if (error < 0) {
+		dev_err(&client->dev, "Failed to request button gpio\n");
+		goto err_free_mem;
+	}
+
+	irq = gpio_to_irq(as5011->button_gpio);
+	if (irq < 0) {
+		dev_err(&client->dev,
+			"Failed to get irq number for button gpio\n");
+		goto err_free_button_gpio;
+	}
+
+	as5011->button_irq = irq;
+
+	error = request_threaded_irq(as5011->button_irq,
+				     NULL, as5011_button_interrupt,
+				     IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				     "as5011_button", as5011);
+	if (error < 0) {
+		dev_err(&client->dev,
+			"Can't allocate button irq %d\n", as5011->button_irq);
+		goto err_free_button_gpio;
+	}
+
+	error = as5011_configure_chip(as5011, plat_data);
+	if (error)
+		goto err_free_button_irq;
+
+	error = request_threaded_irq(as5011->axis_irq, NULL,
+				     as5011_axis_interrupt,
+				     plat_data->axis_irqflags,
+				     "as5011_joystick", as5011);
+	if (error) {
+		dev_err(&client->dev,
+			"Can't allocate axis irq %d\n", plat_data->axis_irq);
+		goto err_free_button_irq;
+	}
+
+	error = input_register_device(as5011->input_dev);
+	if (error) {
+		dev_err(&client->dev, "Failed to register input device\n");
+		goto err_free_axis_irq;
+	}
+
+	i2c_set_clientdata(client, as5011);
+
+	return 0;
+
+err_free_axis_irq:
+	free_irq(as5011->axis_irq, as5011);
+err_free_button_irq:
+	free_irq(as5011->button_irq, as5011);
+err_free_button_gpio:
+	gpio_free(as5011->button_gpio);
+err_free_mem:
+	input_free_device(input_dev);
+	kfree(as5011);
+
+	return error;
+}
+
+static int __devexit as5011_remove(struct i2c_client *client)
+{
+	struct as5011_device *as5011 = i2c_get_clientdata(client);
+
+	free_irq(as5011->axis_irq, as5011);
+	free_irq(as5011->button_irq, as5011);
+	gpio_free(as5011->button_gpio);
+
+	input_unregister_device(as5011->input_dev);
+	kfree(as5011);
+
+	return 0;
+}
+
+static const struct i2c_device_id as5011_id[] = {
+	{ MODULE_DEVICE_ALIAS, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, as5011_id);
+
+static struct i2c_driver as5011_driver = {
+	.driver = {
+		.name = "as5011",
+	},
+	.probe		= as5011_probe,
+	.remove		= __devexit_p(as5011_remove),
+	.id_table	= as5011_id,
+};
+
+static int __init as5011_init(void)
+{
+	return i2c_add_driver(&as5011_driver);
+}
+module_init(as5011_init);
+
+static void __exit as5011_exit(void)
+{
+	i2c_del_driver(&as5011_driver);
+}
+module_exit(as5011_exit);
diff --git a/drivers/input/joystick/iforce/Makefile b/drivers/input/joystick/iforce/Makefile
index 74daff4..bc5bda2 100644
--- a/drivers/input/joystick/iforce/Makefile
+++ b/drivers/input/joystick/iforce/Makefile
@@ -4,17 +4,8 @@
 # By Johann Deneux <johann.deneux@gmail.com>
 #
 
-# Goal definition
-iforce-objs	:= iforce-ff.o iforce-main.o iforce-packets.o
-
 obj-$(CONFIG_JOYSTICK_IFORCE)	+= iforce.o
 
-ifeq ($(CONFIG_JOYSTICK_IFORCE_232),y)
-	iforce-objs += iforce-serio.o
-endif
-
-ifeq ($(CONFIG_JOYSTICK_IFORCE_USB),y)
-	iforce-objs += iforce-usb.o
-endif
-
-EXTRA_CFLAGS = -Werror-implicit-function-declaration
+iforce-y := iforce-ff.o iforce-main.o iforce-packets.o
+iforce-$(CONFIG_JOYSTICK_IFORCE_232)	+= iforce-serio.o
+iforce-$(CONFIG_JOYSTICK_IFORCE_USB)	+= iforce-usb.o
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index f9fb7fa..56abf3d 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -543,21 +543,25 @@
 static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
 {
 	struct usb_endpoint_descriptor *ep_irq_out;
-	int error = -ENOMEM;
+	int error;
 
 	if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX)
 		return 0;
 
 	xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
 					 GFP_KERNEL, &xpad->odata_dma);
-	if (!xpad->odata)
+	if (!xpad->odata) {
+		error = -ENOMEM;
 		goto fail1;
+	}
 
 	mutex_init(&xpad->odata_mutex);
 
 	xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
-	if (!xpad->irq_out)
+	if (!xpad->irq_out) {
+		error = -ENOMEM;
 		goto fail2;
+	}
 
 	ep_irq_out = &intf->cur_altsetting->endpoint[1].desc;
 	usb_fill_int_urb(xpad->irq_out, xpad->udev,
@@ -728,7 +732,7 @@
 
 	if (xpad_led) {
 		led_classdev_unregister(&xpad_led->led_cdev);
-		kfree(xpad_led->name);
+		kfree(xpad_led);
 	}
 }
 #else
@@ -756,8 +760,9 @@
 {
 	struct usb_xpad *xpad = input_get_drvdata(dev);
 
-	if(xpad->xtype != XTYPE_XBOX360W)
+	if (xpad->xtype != XTYPE_XBOX360W)
 		usb_kill_urb(xpad->irq_in);
+
 	xpad_stop_output(xpad);
 }
 
@@ -789,8 +794,7 @@
 	struct usb_xpad *xpad;
 	struct input_dev *input_dev;
 	struct usb_endpoint_descriptor *ep_irq_in;
-	int i;
-	int error = -ENOMEM;
+	int i, error;
 
 	for (i = 0; xpad_device[i].idVendor; i++) {
 		if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
@@ -800,17 +804,23 @@
 
 	xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
 	input_dev = input_allocate_device();
-	if (!xpad || !input_dev)
+	if (!xpad || !input_dev) {
+		error = -ENOMEM;
 		goto fail1;
+	}
 
 	xpad->idata = usb_alloc_coherent(udev, XPAD_PKT_LEN,
 					 GFP_KERNEL, &xpad->idata_dma);
-	if (!xpad->idata)
+	if (!xpad->idata) {
+		error = -ENOMEM;
 		goto fail1;
+	}
 
 	xpad->irq_in = usb_alloc_urb(0, GFP_KERNEL);
-	if (!xpad->irq_in)
+	if (!xpad->irq_in) {
+		error = -ENOMEM;
 		goto fail2;
+	}
 
 	xpad->udev = udev;
 	xpad->mapping = xpad_device[i].mapping;
@@ -887,15 +897,15 @@
 
 	error = xpad_init_output(intf, xpad);
 	if (error)
-		goto fail2;
+		goto fail3;
 
 	error = xpad_init_ff(xpad);
 	if (error)
-		goto fail3;
+		goto fail4;
 
 	error = xpad_led_probe(xpad);
 	if (error)
-		goto fail3;
+		goto fail5;
 
 	ep_irq_in = &intf->cur_altsetting->endpoint[0].desc;
 	usb_fill_int_urb(xpad->irq_in, udev,
@@ -907,34 +917,26 @@
 
 	error = input_register_device(xpad->dev);
 	if (error)
-		goto fail4;
+		goto fail6;
 
 	usb_set_intfdata(intf, xpad);
 
-	/*
-	 * Submit the int URB immediatly rather than waiting for open
-	 * because we get status messages from the device whether
-	 * or not any controllers are attached.  In fact, it's
-	 * exactly the message that a controller has arrived that
-	 * we're waiting for.
-	 */
 	if (xpad->xtype == XTYPE_XBOX360W) {
-		xpad->irq_in->dev = xpad->udev;
-		error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
-		if (error)
-			goto fail4;
-
 		/*
 		 * Setup the message to set the LEDs on the
 		 * controller when it shows up
 		 */
 		xpad->bulk_out = usb_alloc_urb(0, GFP_KERNEL);
-		if(!xpad->bulk_out)
-			goto fail5;
+		if (!xpad->bulk_out) {
+			error = -ENOMEM;
+			goto fail7;
+		}
 
 		xpad->bdata = kzalloc(XPAD_PKT_LEN, GFP_KERNEL);
-		if(!xpad->bdata)
-			goto fail6;
+		if (!xpad->bdata) {
+			error = -ENOMEM;
+			goto fail8;
+		}
 
 		xpad->bdata[2] = 0x08;
 		switch (intf->cur_altsetting->desc.bInterfaceNumber) {
@@ -955,14 +957,31 @@
 		usb_fill_bulk_urb(xpad->bulk_out, udev,
 				usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress),
 				xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
+
+		/*
+		 * Submit the int URB immediately rather than waiting for open
+		 * because we get status messages from the device whether
+		 * or not any controllers are attached.  In fact, it's
+		 * exactly the message that a controller has arrived that
+		 * we're waiting for.
+		 */
+		xpad->irq_in->dev = xpad->udev;
+		error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
+		if (error)
+			goto fail9;
 	}
 
 	return 0;
 
- fail6:	usb_free_urb(xpad->bulk_out);
- fail5:	usb_kill_urb(xpad->irq_in);
- fail4:	usb_free_urb(xpad->irq_in);
- fail3:	xpad_deinit_output(xpad);
+ fail9:	kfree(xpad->bdata);
+ fail8:	usb_free_urb(xpad->bulk_out);
+ fail7:	input_unregister_device(input_dev);
+	input_dev = NULL;
+ fail6:	xpad_led_disconnect(xpad);
+ fail5:	if (input_dev)
+		input_ff_destroy(input_dev);
+ fail4:	xpad_deinit_output(xpad);
+ fail3:	usb_free_urb(xpad->irq_in);
  fail2:	usb_free_coherent(udev, XPAD_PKT_LEN, xpad->idata, xpad->idata_dma);
  fail1:	input_free_device(input_dev);
 	kfree(xpad);
@@ -974,21 +993,24 @@
 {
 	struct usb_xpad *xpad = usb_get_intfdata (intf);
 
-	usb_set_intfdata(intf, NULL);
-	if (xpad) {
-		xpad_led_disconnect(xpad);
-		input_unregister_device(xpad->dev);
-		xpad_deinit_output(xpad);
-		if (xpad->xtype == XTYPE_XBOX360W) {
-			usb_kill_urb(xpad->bulk_out);
-			usb_free_urb(xpad->bulk_out);
-			usb_kill_urb(xpad->irq_in);
-		}
-		usb_free_urb(xpad->irq_in);
-		usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
-				xpad->idata, xpad->idata_dma);
-		kfree(xpad);
+	xpad_led_disconnect(xpad);
+	input_unregister_device(xpad->dev);
+	xpad_deinit_output(xpad);
+
+	if (xpad->xtype == XTYPE_XBOX360W) {
+		usb_kill_urb(xpad->bulk_out);
+		usb_free_urb(xpad->bulk_out);
+		usb_kill_urb(xpad->irq_in);
 	}
+
+	usb_free_urb(xpad->irq_in);
+	usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
+			xpad->idata, xpad->idata_dma);
+
+	kfree(xpad->bdata);
+	kfree(xpad);
+
+	usb_set_intfdata(intf, NULL);
 }
 
 static struct usb_driver xpad_driver = {
@@ -1000,10 +1022,7 @@
 
 static int __init usb_xpad_init(void)
 {
-	int result = usb_register(&xpad_driver);
-	if (result == 0)
-		printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
-	return result;
+	return usb_register(&xpad_driver);
 }
 
 static void __exit usb_xpad_exit(void)
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index c76bd31..7b3c0b8 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -12,18 +12,6 @@
 
 if INPUT_KEYBOARD
 
-config KEYBOARD_AAED2000
-	tristate "AAED-2000 keyboard"
-	depends on MACH_AAED2000
-	select INPUT_POLLDEV
-	default y
-	help
-	  Say Y here to enable the keyboard on the Agilent AAED-2000
-	  development board.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called aaed2000_kbd.
-
 config KEYBOARD_ADP5520
 	tristate "Keypad Support for ADP5520 PMIC"
 	depends on PMIC_ADP5520
@@ -196,20 +184,22 @@
 	  module will be called gpio_keys_polled.
 
 config KEYBOARD_TCA6416
-	tristate "TCA6416 Keypad Support"
+	tristate "TCA6416/TCA6408A Keypad Support"
 	depends on I2C
 	help
 	  This driver implements basic keypad functionality
-	  for keys connected through TCA6416 IO expander
+	  for keys connected through TCA6416/TCA6408A IO expanders.
 
 	  Say Y here if your device has keys connected to
-	  TCA6416 IO expander. Your board-specific setup logic
+	  TCA6416/TCA6408A IO expander. Your board-specific setup logic
 	  must also provide pin-mask details(of which TCA6416 pins
 	  are used for keypad).
 
-	  If enabled the complete TCA6416 device will be managed through
+	  If enabled the entire TCA6416 device will be managed through
 	  this driver.
 
+	  To compile this driver as a module, choose M here: the
+	  module will be called tca6416_keypad.
 
 config KEYBOARD_MATRIX
 	tristate "GPIO driven matrix keypad support"
@@ -459,6 +449,15 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called omap4-keypad.
 
+config KEYBOARD_SPEAR
+	tristate "ST SPEAR keyboard support"
+	depends on PLAT_SPEAR
+	help
+	  Say Y here if you want to use the SPEAR keyboard.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called spear-keboard.
+
 config KEYBOARD_TC3589X
 	tristate "TC3589X Keypad support"
 	depends on MFD_TC3589X
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 2aa6ce2..4e5571b 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,7 +4,6 @@
 
 # Each configuration option enables a list of files.
 
-obj-$(CONFIG_KEYBOARD_AAED2000)		+= aaed2000_kbd.o
 obj-$(CONFIG_KEYBOARD_ADP5520)		+= adp5520-keys.o
 obj-$(CONFIG_KEYBOARD_ADP5588)		+= adp5588-keys.o
 obj-$(CONFIG_KEYBOARD_AMIGA)		+= amikbd.o
@@ -38,6 +37,7 @@
 obj-$(CONFIG_KEYBOARD_QT2160)		+= qt2160.o
 obj-$(CONFIG_KEYBOARD_SAMSUNG)		+= samsung-keypad.o
 obj-$(CONFIG_KEYBOARD_SH_KEYSC)		+= sh_keysc.o
+obj-$(CONFIG_KEYBOARD_SPEAR)		+= spear-keyboard.o
 obj-$(CONFIG_KEYBOARD_STMPE)		+= stmpe-keypad.o
 obj-$(CONFIG_KEYBOARD_STOWAWAY)		+= stowaway.o
 obj-$(CONFIG_KEYBOARD_SUNKBD)		+= sunkbd.o
diff --git a/drivers/input/keyboard/aaed2000_kbd.c b/drivers/input/keyboard/aaed2000_kbd.c
deleted file mode 100644
index 18222a6..0000000
--- a/drivers/input/keyboard/aaed2000_kbd.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *  Keyboard driver for the AAED-2000 dev board
- *
- *  Copyright (c) 2006 Nicolas Bellido Y Ortega
- *
- *  Based on corgikbd.c
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input-polldev.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h>
-#include <mach/aaed2000.h>
-
-#define KB_ROWS			12
-#define KB_COLS			8
-#define KB_ROWMASK(r)		(1 << (r))
-#define SCANCODE(r,c)		(((c) * KB_ROWS) + (r))
-#define NR_SCANCODES		(KB_COLS * KB_ROWS)
-
-#define SCAN_INTERVAL		(50) /* ms */
-#define KB_ACTIVATE_DELAY	(20) /* us */
-
-static unsigned char aaedkbd_keycode[NR_SCANCODES] = {
-	KEY_9, KEY_0, KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, 0, KEY_SPACE, KEY_KP6, 0, KEY_KPDOT, 0, 0,
-	KEY_K, KEY_M, KEY_O, KEY_DOT, KEY_SLASH, 0, KEY_F, 0, 0, 0, KEY_LEFTSHIFT, 0,
-	KEY_I, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH, 0, 0, 0, 0, 0, KEY_RIGHTSHIFT, 0,
-	KEY_8, KEY_L, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_ENTER, 0, 0, 0, 0, 0, 0, 0,
-	KEY_J, KEY_H, KEY_B, KEY_KP8, KEY_KP4, 0, KEY_C, KEY_D, KEY_S, KEY_A, 0, KEY_CAPSLOCK,
-	KEY_Y, KEY_U, KEY_N, KEY_T, 0, 0, KEY_R, KEY_E, KEY_W, KEY_Q, 0, KEY_TAB,
-	KEY_7, KEY_6, KEY_G, 0, KEY_5, 0, KEY_4, KEY_3, KEY_2, KEY_1, 0, KEY_GRAVE,
-	0, 0, KEY_COMMA, 0, KEY_KP2, 0, KEY_V, KEY_LEFTALT, KEY_X, KEY_Z, 0, KEY_LEFTCTRL
-};
-
-struct aaedkbd {
-	unsigned char keycode[ARRAY_SIZE(aaedkbd_keycode)];
-	struct input_polled_dev *poll_dev;
-	int kbdscan_state[KB_COLS];
-	int kbdscan_count[KB_COLS];
-};
-
-#define KBDSCAN_STABLE_COUNT 2
-
-static void aaedkbd_report_col(struct aaedkbd *aaedkbd,
-				unsigned int col, unsigned int rowd)
-{
-	unsigned int scancode, pressed;
-	unsigned int row;
-
-	for (row = 0; row < KB_ROWS; row++) {
-		scancode = SCANCODE(row, col);
-		pressed = rowd & KB_ROWMASK(row);
-
-		input_report_key(aaedkbd->poll_dev->input,
-				 aaedkbd->keycode[scancode], pressed);
-	}
-}
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void aaedkbd_poll(struct input_polled_dev *dev)
-{
-	struct aaedkbd *aaedkbd = dev->private;
-	unsigned int col, rowd;
-
-	col = 0;
-	do {
-		AAEC_GPIO_KSCAN = col + 8;
-		udelay(KB_ACTIVATE_DELAY);
-		rowd = AAED_EXT_GPIO & AAED_EGPIO_KBD_SCAN;
-
-		if (rowd != aaedkbd->kbdscan_state[col]) {
-			aaedkbd->kbdscan_count[col] = 0;
-			aaedkbd->kbdscan_state[col] = rowd;
-		} else if (++aaedkbd->kbdscan_count[col] >= KBDSCAN_STABLE_COUNT) {
-			aaedkbd_report_col(aaedkbd, col, rowd);
-			col++;
-		}
-	} while (col < KB_COLS);
-
-	AAEC_GPIO_KSCAN = 0x07;
-	input_sync(dev->input);
-}
-
-static int __devinit aaedkbd_probe(struct platform_device *pdev)
-{
-	struct aaedkbd *aaedkbd;
-	struct input_polled_dev *poll_dev;
-	struct input_dev *input_dev;
-	int i;
-	int error;
-
-	aaedkbd = kzalloc(sizeof(struct aaedkbd), GFP_KERNEL);
-	poll_dev = input_allocate_polled_device();
-	if (!aaedkbd || !poll_dev) {
-		error = -ENOMEM;
-		goto fail;
-	}
-
-	platform_set_drvdata(pdev, aaedkbd);
-
-	aaedkbd->poll_dev = poll_dev;
-	memcpy(aaedkbd->keycode, aaedkbd_keycode, sizeof(aaedkbd->keycode));
-
-	poll_dev->private = aaedkbd;
-	poll_dev->poll = aaedkbd_poll;
-	poll_dev->poll_interval = SCAN_INTERVAL;
-
-	input_dev = poll_dev->input;
-	input_dev->name = "AAED-2000 Keyboard";
-	input_dev->phys = "aaedkbd/input0";
-	input_dev->id.bustype = BUS_HOST;
-	input_dev->id.vendor = 0x0001;
-	input_dev->id.product = 0x0001;
-	input_dev->id.version = 0x0100;
-	input_dev->dev.parent = &pdev->dev;
-
-	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
-	input_dev->keycode = aaedkbd->keycode;
-	input_dev->keycodesize = sizeof(unsigned char);
-	input_dev->keycodemax = ARRAY_SIZE(aaedkbd_keycode);
-
-	for (i = 0; i < ARRAY_SIZE(aaedkbd_keycode); i++)
-		set_bit(aaedkbd->keycode[i], input_dev->keybit);
-	clear_bit(0, input_dev->keybit);
-
-	error = input_register_polled_device(aaedkbd->poll_dev);
-	if (error)
-		goto fail;
-
-	return 0;
-
- fail:	kfree(aaedkbd);
-	input_free_polled_device(poll_dev);
-	return error;
-}
-
-static int __devexit aaedkbd_remove(struct platform_device *pdev)
-{
-	struct aaedkbd *aaedkbd = platform_get_drvdata(pdev);
-
-	input_unregister_polled_device(aaedkbd->poll_dev);
-	input_free_polled_device(aaedkbd->poll_dev);
-	kfree(aaedkbd);
-
-	return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:aaed2000-keyboard");
-
-static struct platform_driver aaedkbd_driver = {
-	.probe		= aaedkbd_probe,
-	.remove		= __devexit_p(aaedkbd_remove),
-	.driver		= {
-		.name	= "aaed2000-keyboard",
-		.owner	= THIS_MODULE,
-	},
-};
-
-static int __init aaedkbd_init(void)
-{
-	return platform_driver_register(&aaedkbd_driver);
-}
-
-static void __exit aaedkbd_exit(void)
-{
-	platform_driver_unregister(&aaedkbd_driver);
-}
-
-module_init(aaedkbd_init);
-module_exit(aaedkbd_exit);
-
-MODULE_AUTHOR("Nicolas Bellido Y Ortega");
-MODULE_DESCRIPTION("AAED-2000 Keyboard Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index a72e61d..0e2a19c 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -65,7 +65,6 @@
 
 static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0);
 
-static int *keymap;
 static unsigned int *row_gpios;
 static unsigned int *col_gpios;
 
@@ -162,20 +161,11 @@
 	}
 }
 
-static inline int omap_kp_find_key(int col, int row)
-{
-	int i, key;
-
-	key = KEY(col, row, 0);
-	for (i = 0; keymap[i] != 0; i++)
-		if ((keymap[i] & 0xff000000) == key)
-			return keymap[i] & 0x00ffffff;
-	return -1;
-}
-
 static void omap_kp_tasklet(unsigned long data)
 {
 	struct omap_kp *omap_kp_data = (struct omap_kp *) data;
+	unsigned short *keycodes = omap_kp_data->input->keycode;
+	unsigned int row_shift = get_count_order(omap_kp_data->cols);
 	unsigned char new_state[8], changed, key_down = 0;
 	int col, row;
 	int spurious = 0;
@@ -199,7 +189,7 @@
 			       row, (new_state[col] & (1 << row)) ?
 			       "pressed" : "released");
 #else
-			key = omap_kp_find_key(col, row);
+			key = keycodes[MATRIX_SCAN_CODE(row, col, row_shift)];
 			if (key < 0) {
 				printk(KERN_WARNING
 				      "omap-keypad: Spurious key event %d-%d\n",
@@ -298,13 +288,18 @@
 	struct input_dev *input_dev;
 	struct omap_kp_platform_data *pdata =  pdev->dev.platform_data;
 	int i, col_idx, row_idx, irq_idx, ret;
+	unsigned int row_shift, keycodemax;
 
-	if (!pdata->rows || !pdata->cols || !pdata->keymap) {
-		printk(KERN_ERR "No rows, cols or keymap from pdata\n");
+	if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
+		printk(KERN_ERR "No rows, cols or keymap_data from pdata\n");
 		return -EINVAL;
 	}
 
-	omap_kp = kzalloc(sizeof(struct omap_kp), GFP_KERNEL);
+	row_shift = get_count_order(pdata->cols);
+	keycodemax = pdata->rows << row_shift;
+
+	omap_kp = kzalloc(sizeof(struct omap_kp) +
+			keycodemax * sizeof(unsigned short), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!omap_kp || !input_dev) {
 		kfree(omap_kp);
@@ -320,7 +315,9 @@
 	if (!cpu_is_omap24xx())
 		omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
 
-	keymap = pdata->keymap;
+	input_dev->keycode      = &omap_kp[1];
+	input_dev->keycodesize  = sizeof(unsigned short);
+	input_dev->keycodemax   = keycodemax;
 
 	if (pdata->rep)
 		__set_bit(EV_REP, input_dev->evbit);
@@ -374,8 +371,8 @@
 
 	/* setup input device */
 	__set_bit(EV_KEY, input_dev->evbit);
-	for (i = 0; keymap[i] != 0; i++)
-		__set_bit(keymap[i] & KEY_MAX, input_dev->keybit);
+	matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
+			input_dev->keycode, input_dev->keybit);
 	input_dev->name = "omap-keypad";
 	input_dev->phys = "omap-keypad/input0";
 	input_dev->dev.parent = &pdev->dev;
@@ -416,7 +413,7 @@
 	return 0;
 err5:
 	for (i = irq_idx - 1; i >=0; i--)
-		free_irq(row_gpios[i], 0);
+		free_irq(row_gpios[i], NULL);
 err4:
 	input_unregister_device(omap_kp->input);
 	input_dev = NULL;
@@ -447,11 +444,11 @@
 			gpio_free(col_gpios[i]);
 		for (i = 0; i < omap_kp->rows; i++) {
 			gpio_free(row_gpios[i]);
-			free_irq(gpio_to_irq(row_gpios[i]), 0);
+			free_irq(gpio_to_irq(row_gpios[i]), NULL);
 		}
 	} else {
 		omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
-		free_irq(omap_kp->irq, 0);
+		free_irq(omap_kp->irq, NULL);
 	}
 
 	del_timer_sync(&omap_kp->timer);
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
new file mode 100644
index 0000000..bee03d6
--- /dev/null
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -0,0 +1,344 @@
+/*
+ * SPEAr Keyboard Driver
+ * Based on omap-keypad driver
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <plat/keyboard.h>
+
+/* Keyboard Registers */
+#define MODE_REG	0x00	/* 16 bit reg */
+#define STATUS_REG	0x0C	/* 2 bit reg */
+#define DATA_REG	0x10	/* 8 bit reg */
+#define INTR_MASK	0x54
+
+/* Register Values */
+/*
+ * pclk freq mask = (APB FEQ -1)= 82 MHZ.Programme bit 15-9 in mode
+ * control register as 1010010(82MHZ)
+ */
+#define PCLK_FREQ_MSK	0xA400	/* 82 MHz */
+#define START_SCAN	0x0100
+#define SCAN_RATE_10	0x0000
+#define SCAN_RATE_20	0x0004
+#define SCAN_RATE_40	0x0008
+#define SCAN_RATE_80	0x000C
+#define MODE_KEYBOARD	0x0002
+#define DATA_AVAIL	0x2
+
+#define KEY_MASK	0xFF000000
+#define KEY_VALUE	0x00FFFFFF
+#define ROW_MASK	0xF0
+#define COLUMN_MASK	0x0F
+#define ROW_SHIFT	4
+
+struct spear_kbd {
+	struct input_dev *input;
+	struct resource *res;
+	void __iomem *io_base;
+	struct clk *clk;
+	unsigned int irq;
+	unsigned short last_key;
+	unsigned short keycodes[256];
+};
+
+static irqreturn_t spear_kbd_interrupt(int irq, void *dev_id)
+{
+	struct spear_kbd *kbd = dev_id;
+	struct input_dev *input = kbd->input;
+	unsigned int key;
+	u8 sts, val;
+
+	sts = readb(kbd->io_base + STATUS_REG);
+	if (sts & DATA_AVAIL)
+		return IRQ_NONE;
+
+	if (kbd->last_key != KEY_RESERVED) {
+		input_report_key(input, kbd->last_key, 0);
+		kbd->last_key = KEY_RESERVED;
+	}
+
+	/* following reads active (row, col) pair */
+	val = readb(kbd->io_base + DATA_REG);
+	key = kbd->keycodes[val];
+
+	input_event(input, EV_MSC, MSC_SCAN, val);
+	input_report_key(input, key, 1);
+	input_sync(input);
+
+	kbd->last_key = key;
+
+	/* clear interrupt */
+	writeb(0, kbd->io_base + STATUS_REG);
+
+	return IRQ_HANDLED;
+}
+
+static int spear_kbd_open(struct input_dev *dev)
+{
+	struct spear_kbd *kbd = input_get_drvdata(dev);
+	int error;
+	u16 val;
+
+	kbd->last_key = KEY_RESERVED;
+
+	error = clk_enable(kbd->clk);
+	if (error)
+		return error;
+
+	/* program keyboard */
+	val = SCAN_RATE_80 | MODE_KEYBOARD | PCLK_FREQ_MSK;
+	writew(val, kbd->io_base + MODE_REG);
+	writeb(1, kbd->io_base + STATUS_REG);
+
+	/* start key scan */
+	val = readw(kbd->io_base + MODE_REG);
+	val |= START_SCAN;
+	writew(val, kbd->io_base + MODE_REG);
+
+	return 0;
+}
+
+static void spear_kbd_close(struct input_dev *dev)
+{
+	struct spear_kbd *kbd = input_get_drvdata(dev);
+	u16 val;
+
+	/* stop key scan */
+	val = readw(kbd->io_base + MODE_REG);
+	val &= ~START_SCAN;
+	writew(val, kbd->io_base + MODE_REG);
+
+	clk_disable(kbd->clk);
+
+	kbd->last_key = KEY_RESERVED;
+}
+
+static int __devinit spear_kbd_probe(struct platform_device *pdev)
+{
+	const struct kbd_platform_data *pdata = pdev->dev.platform_data;
+	const struct matrix_keymap_data *keymap;
+	struct spear_kbd *kbd;
+	struct input_dev *input_dev;
+	struct resource *res;
+	int irq;
+	int error;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "Invalid platform data\n");
+		return -EINVAL;
+	}
+
+	keymap = pdata->keymap;
+	if (!keymap) {
+		dev_err(&pdev->dev, "no keymap defined\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "no keyboard resource defined\n");
+		return -EBUSY;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "not able to get irq for the device\n");
+		return irq;
+	}
+
+	kbd = kzalloc(sizeof(*kbd), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!kbd || !input_dev) {
+		dev_err(&pdev->dev, "out of memory\n");
+		error = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	kbd->input = input_dev;
+	kbd->irq = irq;
+	kbd->res = request_mem_region(res->start, resource_size(res),
+				      pdev->name);
+	if (!kbd->res) {
+		dev_err(&pdev->dev, "keyboard region already claimed\n");
+		error = -EBUSY;
+		goto err_free_mem;
+	}
+
+	kbd->io_base = ioremap(res->start, resource_size(res));
+	if (!kbd->io_base) {
+		dev_err(&pdev->dev, "ioremap failed for kbd_region\n");
+		error = -ENOMEM;
+		goto err_release_mem_region;
+	}
+
+	kbd->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(kbd->clk)) {
+		error = PTR_ERR(kbd->clk);
+		goto err_iounmap;
+	}
+
+	input_dev->name = "Spear Keyboard";
+	input_dev->phys = "keyboard/input0";
+	input_dev->dev.parent = &pdev->dev;
+	input_dev->id.bustype = BUS_HOST;
+	input_dev->id.vendor = 0x0001;
+	input_dev->id.product = 0x0001;
+	input_dev->id.version = 0x0100;
+	input_dev->open = spear_kbd_open;
+	input_dev->close = spear_kbd_close;
+
+	__set_bit(EV_KEY, input_dev->evbit);
+	if (pdata->rep)
+		__set_bit(EV_REP, input_dev->evbit);
+	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+
+	input_dev->keycode = kbd->keycodes;
+	input_dev->keycodesize = sizeof(kbd->keycodes[0]);
+	input_dev->keycodemax = ARRAY_SIZE(kbd->keycodes);
+
+	matrix_keypad_build_keymap(keymap, ROW_SHIFT,
+			input_dev->keycode, input_dev->keybit);
+
+	input_set_drvdata(input_dev, kbd);
+
+	error = request_irq(irq, spear_kbd_interrupt, 0, "keyboard", kbd);
+	if (error) {
+		dev_err(&pdev->dev, "request_irq fail\n");
+		goto err_put_clk;
+	}
+
+	error = input_register_device(input_dev);
+	if (error) {
+		dev_err(&pdev->dev, "Unable to register keyboard device\n");
+		goto err_free_irq;
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+	platform_set_drvdata(pdev, kbd);
+
+	return 0;
+
+err_free_irq:
+	free_irq(kbd->irq, kbd);
+err_put_clk:
+	clk_put(kbd->clk);
+err_iounmap:
+	iounmap(kbd->io_base);
+err_release_mem_region:
+	release_mem_region(res->start, resource_size(res));
+err_free_mem:
+	input_free_device(input_dev);
+	kfree(kbd);
+
+	return error;
+}
+
+static int __devexit spear_kbd_remove(struct platform_device *pdev)
+{
+	struct spear_kbd *kbd = platform_get_drvdata(pdev);
+
+	free_irq(kbd->irq, kbd);
+	input_unregister_device(kbd->input);
+	clk_put(kbd->clk);
+	iounmap(kbd->io_base);
+	release_mem_region(kbd->res->start, resource_size(kbd->res));
+	kfree(kbd);
+
+	device_init_wakeup(&pdev->dev, 1);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int spear_kbd_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct spear_kbd *kbd = platform_get_drvdata(pdev);
+	struct input_dev *input_dev = kbd->input;
+
+	mutex_lock(&input_dev->mutex);
+
+	if (input_dev->users)
+		clk_enable(kbd->clk);
+
+	if (device_may_wakeup(&pdev->dev))
+		enable_irq_wake(kbd->irq);
+
+	mutex_unlock(&input_dev->mutex);
+
+	return 0;
+}
+
+static int spear_kbd_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct spear_kbd *kbd = platform_get_drvdata(pdev);
+	struct input_dev *input_dev = kbd->input;
+
+	mutex_lock(&input_dev->mutex);
+
+	if (device_may_wakeup(&pdev->dev))
+		disable_irq_wake(kbd->irq);
+
+	if (input_dev->users)
+		clk_enable(kbd->clk);
+
+	mutex_unlock(&input_dev->mutex);
+
+	return 0;
+}
+
+static const struct dev_pm_ops spear_kbd_pm_ops = {
+	.suspend	= spear_kbd_suspend,
+	.resume		= spear_kbd_resume,
+};
+#endif
+
+static struct platform_driver spear_kbd_driver = {
+	.probe		= spear_kbd_probe,
+	.remove		= __devexit_p(spear_kbd_remove),
+	.driver		= {
+		.name	= "keyboard",
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &spear_kbd_pm_ops,
+#endif
+	},
+};
+
+static int __init spear_kbd_init(void)
+{
+	return platform_driver_register(&spear_kbd_driver);
+}
+module_init(spear_kbd_init);
+
+static void __exit spear_kbd_exit(void)
+{
+	platform_driver_unregister(&spear_kbd_driver);
+}
+module_exit(spear_kbd_exit);
+
+MODULE_AUTHOR("Rajeev Kumar");
+MODULE_DESCRIPTION("SPEAr Keyboard Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 69dc0cb..dbbe761 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -469,4 +469,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer");
 MODULE_DESCRIPTION("TC35893 Keypad Driver");
-MODULE_ALIAS("platform:tc3589x-keypad")
+MODULE_ALIAS("platform:tc3589x-keypad");
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 00137be..800fbcc 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -29,6 +29,7 @@
 
 static const struct i2c_device_id tca6416_id[] = {
 	{ "tca6416-keys", 16, },
+	{ "tca6408-keys", 8, },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, tca6416_id);
@@ -46,8 +47,9 @@
 	struct i2c_client *client;
 	struct input_dev *input;
 	struct delayed_work dwork;
-	u16 pinmask;
+	int io_size;
 	int irqnum;
+	u16 pinmask;
 	bool use_polling;
 	struct tca6416_button buttons[0];
 };
@@ -56,7 +58,9 @@
 {
 	int error;
 
-	error = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+	error = chip->io_size > 8 ?
+		i2c_smbus_write_word_data(chip->client, reg << 1, val) :
+		i2c_smbus_write_byte_data(chip->client, reg, val);
 	if (error < 0) {
 		dev_err(&chip->client->dev,
 			"%s failed, reg: %d, val: %d, error: %d\n",
@@ -71,7 +75,9 @@
 {
 	int retval;
 
-	retval = i2c_smbus_read_word_data(chip->client, reg << 1);
+	retval = chip->io_size > 8 ?
+		 i2c_smbus_read_word_data(chip->client, reg << 1) :
+		 i2c_smbus_read_byte_data(chip->client, reg);
 	if (retval < 0) {
 		dev_err(&chip->client->dev, "%s failed, reg: %d, error: %d\n",
 			__func__, reg, retval);
@@ -224,6 +230,7 @@
 
 	chip->client = client;
 	chip->input = input;
+	chip->io_size = id->driver_data;
 	chip->pinmask = pdata->pinmask;
 	chip->use_polling = pdata->use_polling;
 
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c1a81bc..b0c6772 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -430,4 +430,28 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called adxl34x-spi.
 
+config INPUT_CMA3000
+	tristate "VTI CMA3000 Tri-axis accelerometer"
+	help
+	  Say Y here if you want to use VTI CMA3000_D0x Accelerometer
+	  driver
+
+	  This driver currently only supports I2C interface to the
+	  controller. Also select the I2C method.
+
+	  If unsure, say N
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cma3000_d0x.
+
+config INPUT_CMA3000_I2C
+	tristate "Support I2C bus connection"
+	depends on INPUT_CMA3000 && I2C
+	help
+	  Say Y here if you want to use VTI CMA3000_D0x Accelerometer
+	  through I2C interface.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cma3000_d0x_i2c.
+
 endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 06b2b51..9b47971 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -18,6 +18,8 @@
 obj-$(CONFIG_INPUT_ATLAS_BTNS)		+= atlas_btns.o
 obj-$(CONFIG_INPUT_BFIN_ROTARY)		+= bfin_rotary.o
 obj-$(CONFIG_INPUT_CM109)		+= cm109.o
+obj-$(CONFIG_INPUT_CMA3000)		+= cma3000_d0x.o
+obj-$(CONFIG_INPUT_CMA3000_I2C)		+= cma3000_d0x_i2c.o
 obj-$(CONFIG_INPUT_COBALT_BTNS)		+= cobalt_btns.o
 obj-$(CONFIG_INPUT_DM355EVM)		+= dm355evm_keys.o
 obj-$(CONFIG_HP_SDC_RTC)		+= hp_sdc_rtc.o
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
new file mode 100644
index 0000000..1633b63
--- /dev/null
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -0,0 +1,398 @@
+/*
+ * VTI CMA3000_D0x Accelerometer driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input/cma3000.h>
+
+#include "cma3000_d0x.h"
+
+#define CMA3000_WHOAMI      0x00
+#define CMA3000_REVID       0x01
+#define CMA3000_CTRL        0x02
+#define CMA3000_STATUS      0x03
+#define CMA3000_RSTR        0x04
+#define CMA3000_INTSTATUS   0x05
+#define CMA3000_DOUTX       0x06
+#define CMA3000_DOUTY       0x07
+#define CMA3000_DOUTZ       0x08
+#define CMA3000_MDTHR       0x09
+#define CMA3000_MDFFTMR     0x0A
+#define CMA3000_FFTHR       0x0B
+
+#define CMA3000_RANGE2G    (1 << 7)
+#define CMA3000_RANGE8G    (0 << 7)
+#define CMA3000_BUSI2C     (0 << 4)
+#define CMA3000_MODEMASK   (7 << 1)
+#define CMA3000_GRANGEMASK (1 << 7)
+
+#define CMA3000_STATUS_PERR    1
+#define CMA3000_INTSTATUS_FFDET (1 << 2)
+
+/* Settling time delay in ms */
+#define CMA3000_SETDELAY    30
+
+/* Delay for clearing interrupt in us */
+#define CMA3000_INTDELAY    44
+
+
+/*
+ * Bit weights in mg for bit 0, other bits need
+ * multipy factor 2^n. Eight bit is the sign bit.
+ */
+#define BIT_TO_2G  18
+#define BIT_TO_8G  71
+
+struct cma3000_accl_data {
+	const struct cma3000_bus_ops *bus_ops;
+	const struct cma3000_platform_data *pdata;
+
+	struct device *dev;
+	struct input_dev *input_dev;
+
+	int bit_to_mg;
+	int irq;
+
+	int g_range;
+	u8 mode;
+
+	struct mutex mutex;
+	bool opened;
+	bool suspended;
+};
+
+#define CMA3000_READ(data, reg, msg) \
+	(data->bus_ops->read(data->dev, reg, msg))
+#define CMA3000_SET(data, reg, val, msg) \
+	((data)->bus_ops->write(data->dev, reg, val, msg))
+
+/*
+ * Conversion for each of the eight modes to g, depending
+ * on G range i.e 2G or 8G. Some modes always operate in
+ * 8G.
+ */
+
+static int mode_to_mg[8][2] = {
+	{ 0, 0 },
+	{ BIT_TO_8G, BIT_TO_2G },
+	{ BIT_TO_8G, BIT_TO_2G },
+	{ BIT_TO_8G, BIT_TO_8G },
+	{ BIT_TO_8G, BIT_TO_8G },
+	{ BIT_TO_8G, BIT_TO_2G },
+	{ BIT_TO_8G, BIT_TO_2G },
+	{ 0, 0},
+};
+
+static void decode_mg(struct cma3000_accl_data *data, int *datax,
+				int *datay, int *dataz)
+{
+	/* Data in 2's complement, convert to mg */
+	*datax = ((s8)*datax) * data->bit_to_mg;
+	*datay = ((s8)*datay) * data->bit_to_mg;
+	*dataz = ((s8)*dataz) * data->bit_to_mg;
+}
+
+static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
+{
+	struct cma3000_accl_data *data = dev_id;
+	int datax, datay, dataz;
+	u8 ctrl, mode, range, intr_status;
+
+	intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
+	if (intr_status < 0)
+		return IRQ_NONE;
+
+	/* Check if free fall is detected, report immediately */
+	if (intr_status & CMA3000_INTSTATUS_FFDET) {
+		input_report_abs(data->input_dev, ABS_MISC, 1);
+		input_sync(data->input_dev);
+	} else {
+		input_report_abs(data->input_dev, ABS_MISC, 0);
+	}
+
+	datax = CMA3000_READ(data, CMA3000_DOUTX, "X");
+	datay = CMA3000_READ(data, CMA3000_DOUTY, "Y");
+	dataz = CMA3000_READ(data, CMA3000_DOUTZ, "Z");
+
+	ctrl = CMA3000_READ(data, CMA3000_CTRL, "ctrl");
+	mode = (ctrl & CMA3000_MODEMASK) >> 1;
+	range = (ctrl & CMA3000_GRANGEMASK) >> 7;
+
+	data->bit_to_mg = mode_to_mg[mode][range];
+
+	/* Interrupt not for this device */
+	if (data->bit_to_mg == 0)
+		return IRQ_NONE;
+
+	/* Decode register values to milli g */
+	decode_mg(data, &datax, &datay, &dataz);
+
+	input_report_abs(data->input_dev, ABS_X, datax);
+	input_report_abs(data->input_dev, ABS_Y, datay);
+	input_report_abs(data->input_dev, ABS_Z, dataz);
+	input_sync(data->input_dev);
+
+	return IRQ_HANDLED;
+}
+
+static int cma3000_reset(struct cma3000_accl_data *data)
+{
+	int val;
+
+	/* Reset sequence */
+	CMA3000_SET(data, CMA3000_RSTR, 0x02, "Reset");
+	CMA3000_SET(data, CMA3000_RSTR, 0x0A, "Reset");
+	CMA3000_SET(data, CMA3000_RSTR, 0x04, "Reset");
+
+	/* Settling time delay */
+	mdelay(10);
+
+	val = CMA3000_READ(data, CMA3000_STATUS, "Status");
+	if (val < 0) {
+		dev_err(data->dev, "Reset failed\n");
+		return val;
+	}
+
+	if (val & CMA3000_STATUS_PERR) {
+		dev_err(data->dev, "Parity Error\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int cma3000_poweron(struct cma3000_accl_data *data)
+{
+	const struct cma3000_platform_data *pdata = data->pdata;
+	u8 ctrl = 0;
+	int ret;
+
+	if (data->g_range == CMARANGE_2G) {
+		ctrl = (data->mode << 1) | CMA3000_RANGE2G;
+	} else if (data->g_range == CMARANGE_8G) {
+		ctrl = (data->mode << 1) | CMA3000_RANGE8G;
+	} else {
+		dev_info(data->dev,
+			 "Invalid G range specified, assuming 8G\n");
+		ctrl = (data->mode << 1) | CMA3000_RANGE8G;
+	}
+
+	ctrl |= data->bus_ops->ctrl_mod;
+
+	CMA3000_SET(data, CMA3000_MDTHR, pdata->mdthr,
+		    "Motion Detect Threshold");
+	CMA3000_SET(data, CMA3000_MDFFTMR, pdata->mdfftmr,
+		    "Time register");
+	CMA3000_SET(data, CMA3000_FFTHR, pdata->ffthr,
+		    "Free fall threshold");
+	ret = CMA3000_SET(data, CMA3000_CTRL, ctrl, "Mode setting");
+	if (ret < 0)
+		return -EIO;
+
+	msleep(CMA3000_SETDELAY);
+
+	return 0;
+}
+
+static int cma3000_poweroff(struct cma3000_accl_data *data)
+{
+	int ret;
+
+	ret = CMA3000_SET(data, CMA3000_CTRL, CMAMODE_POFF, "Mode setting");
+	msleep(CMA3000_SETDELAY);
+
+	return ret;
+}
+
+static int cma3000_open(struct input_dev *input_dev)
+{
+	struct cma3000_accl_data *data = input_get_drvdata(input_dev);
+
+	mutex_lock(&data->mutex);
+
+	if (!data->suspended)
+		cma3000_poweron(data);
+
+	data->opened = true;
+
+	mutex_unlock(&data->mutex);
+
+	return 0;
+}
+
+static void cma3000_close(struct input_dev *input_dev)
+{
+	struct cma3000_accl_data *data = input_get_drvdata(input_dev);
+
+	mutex_lock(&data->mutex);
+
+	if (!data->suspended)
+		cma3000_poweroff(data);
+
+	data->opened = false;
+
+	mutex_unlock(&data->mutex);
+}
+
+void cma3000_suspend(struct cma3000_accl_data *data)
+{
+	mutex_lock(&data->mutex);
+
+	if (!data->suspended && data->opened)
+		cma3000_poweroff(data);
+
+	data->suspended = true;
+
+	mutex_unlock(&data->mutex);
+}
+EXPORT_SYMBOL(cma3000_suspend);
+
+
+void cma3000_resume(struct cma3000_accl_data *data)
+{
+	mutex_lock(&data->mutex);
+
+	if (data->suspended && data->opened)
+		cma3000_poweron(data);
+
+	data->suspended = false;
+
+	mutex_unlock(&data->mutex);
+}
+EXPORT_SYMBOL(cma3000_resume);
+
+struct cma3000_accl_data *cma3000_init(struct device *dev, int irq,
+				       const struct cma3000_bus_ops *bops)
+{
+	const struct cma3000_platform_data *pdata = dev->platform_data;
+	struct cma3000_accl_data *data;
+	struct input_dev *input_dev;
+	int rev;
+	int error;
+
+	if (!pdata) {
+		dev_err(dev, "platform data not found\n");
+		error = -EINVAL;
+		goto err_out;
+	}
+
+
+	/* if no IRQ return error */
+	if (irq == 0) {
+		error = -EINVAL;
+		goto err_out;
+	}
+
+	data = kzalloc(sizeof(struct cma3000_accl_data), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!data || !input_dev) {
+		error = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	data->dev = dev;
+	data->input_dev = input_dev;
+	data->bus_ops = bops;
+	data->pdata = pdata;
+	data->irq = irq;
+	mutex_init(&data->mutex);
+
+	data->mode = pdata->mode;
+	if (data->mode < CMAMODE_DEFAULT || data->mode > CMAMODE_POFF) {
+		data->mode = CMAMODE_MOTDET;
+		dev_warn(dev,
+			 "Invalid mode specified, assuming Motion Detect\n");
+	}
+
+	data->g_range = pdata->g_range;
+	if (data->g_range != CMARANGE_2G && data->g_range != CMARANGE_8G) {
+		dev_info(dev,
+			 "Invalid G range specified, assuming 8G\n");
+		data->g_range = CMARANGE_8G;
+	}
+
+	input_dev->name = "cma3000-accelerometer";
+	input_dev->id.bustype = bops->bustype;
+	input_dev->open = cma3000_open;
+	input_dev->close = cma3000_close;
+
+	 __set_bit(EV_ABS, input_dev->evbit);
+
+	input_set_abs_params(input_dev, ABS_X,
+			-data->g_range, data->g_range, pdata->fuzz_x, 0);
+	input_set_abs_params(input_dev, ABS_Y,
+			-data->g_range, data->g_range, pdata->fuzz_y, 0);
+	input_set_abs_params(input_dev, ABS_Z,
+			-data->g_range, data->g_range, pdata->fuzz_z, 0);
+	input_set_abs_params(input_dev, ABS_MISC, 0, 1, 0, 0);
+
+	input_set_drvdata(input_dev, data);
+
+	error = cma3000_reset(data);
+	if (error)
+		goto err_free_mem;
+
+	rev = CMA3000_READ(data, CMA3000_REVID, "Revid");
+	if (rev < 0) {
+		error = rev;
+		goto err_free_mem;
+	}
+
+	pr_info("CMA3000 Accelerometer: Revision %x\n", rev);
+
+	error = request_threaded_irq(irq, NULL, cma3000_thread_irq,
+				     pdata->irqflags | IRQF_ONESHOT,
+				     "cma3000_d0x", data);
+	if (error) {
+		dev_err(dev, "request_threaded_irq failed\n");
+		goto err_free_mem;
+	}
+
+	error = input_register_device(data->input_dev);
+	if (error) {
+		dev_err(dev, "Unable to register input device\n");
+		goto err_free_irq;
+	}
+
+	return data;
+
+err_free_irq:
+	free_irq(irq, data);
+err_free_mem:
+	input_free_device(input_dev);
+	kfree(data);
+err_out:
+	return ERR_PTR(error);
+}
+EXPORT_SYMBOL(cma3000_init);
+
+void cma3000_exit(struct cma3000_accl_data *data)
+{
+	free_irq(data->irq, data);
+	input_unregister_device(data->input_dev);
+	kfree(data);
+}
+EXPORT_SYMBOL(cma3000_exit);
+
+MODULE_DESCRIPTION("CMA3000-D0x Accelerometer Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
diff --git a/drivers/input/misc/cma3000_d0x.h b/drivers/input/misc/cma3000_d0x.h
new file mode 100644
index 0000000..2304ce3
--- /dev/null
+++ b/drivers/input/misc/cma3000_d0x.h
@@ -0,0 +1,42 @@
+/*
+ * VTI CMA3000_D0x Accelerometer driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _INPUT_CMA3000_H
+#define _INPUT_CMA3000_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+struct device;
+struct cma3000_accl_data;
+
+struct cma3000_bus_ops {
+	u16 bustype;
+	u8 ctrl_mod;
+	int (*read)(struct device *, u8, char *);
+	int (*write)(struct device *, u8, u8, char *);
+};
+
+struct cma3000_accl_data *cma3000_init(struct device *dev, int irq,
+					const struct cma3000_bus_ops *bops);
+void cma3000_exit(struct cma3000_accl_data *);
+void cma3000_suspend(struct cma3000_accl_data *);
+void cma3000_resume(struct cma3000_accl_data *);
+
+#endif
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
new file mode 100644
index 0000000..d100cc5
--- /dev/null
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -0,0 +1,143 @@
+/*
+ * Implements I2C interface for VTI CMA300_D0x Accelerometer driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/input/cma3000.h>
+#include "cma3000_d0x.h"
+
+static int cma3000_i2c_set(struct device *dev,
+			   u8 reg, u8 val, char *msg)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(client, reg, val);
+	if (ret < 0)
+		dev_err(&client->dev,
+			"%s failed (%s, %d)\n", __func__, msg, ret);
+	return ret;
+}
+
+static int cma3000_i2c_read(struct device *dev, u8 reg, char *msg)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		dev_err(&client->dev,
+			"%s failed (%s, %d)\n", __func__, msg, ret);
+	return ret;
+}
+
+static const struct cma3000_bus_ops cma3000_i2c_bops = {
+	.bustype	= BUS_I2C,
+#define CMA3000_BUSI2C     (0 << 4)
+	.ctrl_mod	= CMA3000_BUSI2C,
+	.read		= cma3000_i2c_read,
+	.write		= cma3000_i2c_set,
+};
+
+static int __devinit cma3000_i2c_probe(struct i2c_client *client,
+					const struct i2c_device_id *id)
+{
+	struct cma3000_accl_data *data;
+
+	data = cma3000_init(&client->dev, client->irq, &cma3000_i2c_bops);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	i2c_set_clientdata(client, data);
+
+	return 0;
+}
+
+static int __devexit cma3000_i2c_remove(struct i2c_client *client)
+{
+	struct cma3000_accl_data *data = i2c_get_clientdata(client);
+
+	cma3000_exit(data);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int cma3000_i2c_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct cma3000_accl_data *data = i2c_get_clientdata(client);
+
+	cma3000_suspend(data);
+
+	return 0;
+}
+
+static int cma3000_i2c_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct cma3000_accl_data *data = i2c_get_clientdata(client);
+
+	cma3000_resume(data);
+
+	return 0;
+}
+
+static const struct dev_pm_ops cma3000_i2c_pm_ops = {
+	.suspend	= cma3000_i2c_suspend,
+	.resume		= cma3000_i2c_resume,
+};
+#endif
+
+static const struct i2c_device_id cma3000_i2c_id[] = {
+	{ "cma3000_d01", 0 },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(i2c, cma3000_i2c_id);
+
+static struct i2c_driver cma3000_i2c_driver = {
+	.probe		= cma3000_i2c_probe,
+	.remove		= __devexit_p(cma3000_i2c_remove),
+	.id_table	= cma3000_i2c_id,
+	.driver = {
+		.name	= "cma3000_i2c_accl",
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &cma3000_i2c_pm_ops,
+#endif
+	},
+};
+
+static int __init cma3000_i2c_init(void)
+{
+	return i2c_add_driver(&cma3000_i2c_driver);
+}
+
+static void __exit cma3000_i2c_exit(void)
+{
+	i2c_del_driver(&cma3000_i2c_driver);
+}
+
+module_init(cma3000_i2c_init);
+module_exit(cma3000_i2c_exit);
+
+MODULE_DESCRIPTION("CMA3000-D0x Accelerometer I2C Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index d1583ae..08be1a3 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -169,19 +169,29 @@
 }
 
 #ifdef CONFIG_PM
-static int pcf8574_kp_resume(struct i2c_client *client)
+static int pcf8574_kp_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
+
 	enable_irq(client->irq);
 
 	return 0;
 }
 
-static int pcf8574_kp_suspend(struct i2c_client *client, pm_message_t mesg)
+static int pcf8574_kp_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
+
 	disable_irq(client->irq);
 
 	return 0;
 }
+
+static const struct dev_pm_ops pcf8574_kp_pm_ops = {
+	.suspend	= pcf8574_kp_suspend,
+	.resume		= pcf8574_kp_resume,
+};
+
 #else
 # define pcf8574_kp_resume  NULL
 # define pcf8574_kp_suspend NULL
@@ -197,11 +207,12 @@
 	.driver = {
 		.name  = DRV_NAME,
 		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &pcf8574_kp_pm_ops,
+#endif
 	},
 	.probe    = pcf8574_kp_probe,
 	.remove   = __devexit_p(pcf8574_kp_remove),
-	.suspend  = pcf8574_kp_suspend,
-	.resume   = pcf8574_kp_resume,
 	.id_table = pcf8574_kp_id,
 };
 
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index b941078..82542a1 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -37,6 +37,7 @@
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
 #include <linux/uinput.h>
+#include <linux/input/mt.h>
 #include "../input-compat.h"
 
 static int uinput_dev_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
@@ -406,8 +407,7 @@
 			goto exit;
 		if (test_bit(ABS_MT_SLOT, dev->absbit)) {
 			int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
-			input_mt_create_slots(dev, nslot);
-			input_set_events_per_packet(dev, 6 * nslot);
+			input_mt_init_slots(dev, nslot);
 		} else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
 			input_set_events_per_packet(dev, 60);
 		}
@@ -680,6 +680,10 @@
 			retval = uinput_set_bit(arg, swbit, SW_MAX);
 			break;
 
+		case UI_SET_PROPBIT:
+			retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX);
+			break;
+
 		case UI_SET_PHYS:
 			if (udev->state == UIST_CREATED) {
 				retval = -EINVAL;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b952317..ee82851 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -55,6 +55,14 @@
 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI	0x0236
 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO	0x0237
 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS	0x0238
+/* MacbookAir3,2 (unibody), aka wellspring5 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI	0x023f
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO	0x0240
+#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS	0x0241
+/* MacbookAir3,1 (unibody), aka wellspring4 */
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI	0x0242
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO	0x0243
+#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS	0x0244
 
 #define BCM5974_DEVICE(prod) {					\
 	.match_flags = (USB_DEVICE_ID_MATCH_DEVICE |		\
@@ -80,6 +88,14 @@
 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI),
 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO),
 	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
+	/* MacbookAir3,2 */
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI),
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO),
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS),
+	/* MacbookAir3,1 */
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI),
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO),
+	BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS),
 	/* Terminating entry */
 	{}
 };
@@ -234,6 +250,30 @@
 		{ DIM_X, DIM_X / SN_COORD, -4460, 5166 },
 		{ DIM_Y, DIM_Y / SN_COORD, -75, 6700 }
 	},
+	{
+		USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI,
+		USB_DEVICE_ID_APPLE_WELLSPRING4_ISO,
+		USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
+		HAS_INTEGRATED_BUTTON,
+		0x84, sizeof(struct bt_data),
+		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+		{ DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+		{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+	},
+	{
+		USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI,
+		USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO,
+		USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
+		HAS_INTEGRATED_BUTTON,
+		0x84, sizeof(struct bt_data),
+		0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+		{ DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+		{ DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+		{ DIM_X, DIM_X / SN_COORD, -4616, 5112 },
+		{ DIM_Y, DIM_Y / SN_COORD, -142, 5234 }
+	},
 	{}
 };
 
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 1d2205b..95577c1 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -40,6 +40,8 @@
 #include "psmouse.h"
 #include "hgpk.h"
 
+#define ILLEGAL_XY 999999
+
 static bool tpdebug;
 module_param(tpdebug, bool, 0644);
 MODULE_PARM_DESC(tpdebug, "enable debugging, dumping packets to KERN_DEBUG.");
@@ -47,48 +49,150 @@
 static int recalib_delta = 100;
 module_param(recalib_delta, int, 0644);
 MODULE_PARM_DESC(recalib_delta,
-	"packets containing a delta this large will cause a recalibration.");
+	"packets containing a delta this large will be discarded, and a "
+	"recalibration may be scheduled.");
 
-static int jumpy_delay = 1000;
+static int jumpy_delay = 20;
 module_param(jumpy_delay, int, 0644);
 MODULE_PARM_DESC(jumpy_delay,
 	"delay (ms) before recal after jumpiness detected");
 
-static int spew_delay = 1000;
+static int spew_delay = 1;
 module_param(spew_delay, int, 0644);
 MODULE_PARM_DESC(spew_delay,
 	"delay (ms) before recal after packet spew detected");
 
-static int recal_guard_time = 2000;
+static int recal_guard_time;
 module_param(recal_guard_time, int, 0644);
 MODULE_PARM_DESC(recal_guard_time,
 	"interval (ms) during which recal will be restarted if packet received");
 
-static int post_interrupt_delay = 1000;
+static int post_interrupt_delay = 40;
 module_param(post_interrupt_delay, int, 0644);
 MODULE_PARM_DESC(post_interrupt_delay,
 	"delay (ms) before recal after recal interrupt detected");
 
+static bool autorecal = true;
+module_param(autorecal, bool, 0644);
+MODULE_PARM_DESC(autorecal, "enable recalibration in the driver");
+
+static char hgpk_mode_name[16];
+module_param_string(hgpk_mode, hgpk_mode_name, sizeof(hgpk_mode_name), 0644);
+MODULE_PARM_DESC(hgpk_mode,
+	"default hgpk mode: mouse, glidesensor or pentablet");
+
+static int hgpk_default_mode = HGPK_MODE_MOUSE;
+
+static const char * const hgpk_mode_names[] = {
+	[HGPK_MODE_MOUSE] = "Mouse",
+	[HGPK_MODE_GLIDESENSOR] = "GlideSensor",
+	[HGPK_MODE_PENTABLET] = "PenTablet",
+};
+
+static int hgpk_mode_from_name(const char *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(hgpk_mode_names); i++) {
+		const char *name = hgpk_mode_names[i];
+		if (strlen(name) == len && !strncasecmp(name, buf, len))
+			return i;
+	}
+
+	return HGPK_MODE_INVALID;
+}
+
 /*
- * When the touchpad gets ultra-sensitive, one can keep their finger 1/2"
- * above the pad and still have it send packets.  This causes a jump cursor
- * when one places their finger on the pad.  We can probably detect the
- * jump as we see a large deltas (>= 100px).  In mouse mode, I've been
- * unable to even come close to 100px deltas during normal usage, so I think
- * this threshold is safe.  If a large delta occurs, trigger a recalibration.
+ * see if new value is within 20% of half of old value
  */
-static void hgpk_jumpy_hack(struct psmouse *psmouse, int x, int y)
+static int approx_half(int curr, int prev)
+{
+	int belowhalf, abovehalf;
+
+	if (curr < 5 || prev < 5)
+		return 0;
+
+	belowhalf = (prev * 8) / 20;
+	abovehalf = (prev * 12) / 20;
+
+	return belowhalf < curr && curr <= abovehalf;
+}
+
+/*
+ * Throw out oddly large delta packets, and any that immediately follow whose
+ * values are each approximately half of the previous.  It seems that the ALPS
+ * firmware emits errant packets, and they get averaged out slowly.
+ */
+static int hgpk_discard_decay_hack(struct psmouse *psmouse, int x, int y)
 {
 	struct hgpk_data *priv = psmouse->private;
+	int avx, avy;
+	bool do_recal = false;
 
-	if (abs(x) > recalib_delta || abs(y) > recalib_delta) {
-		hgpk_err(psmouse, ">%dpx jump detected (%d,%d)\n",
-				recalib_delta, x, y);
-		/* My car gets forty rods to the hogshead and that's the
-		 * way I likes it! */
+	avx = abs(x);
+	avy = abs(y);
+
+	/* discard if too big, or half that but > 4 times the prev delta */
+	if (avx > recalib_delta ||
+		(avx > recalib_delta / 2 && ((avx / 4) > priv->xlast))) {
+		hgpk_err(psmouse, "detected %dpx jump in x\n", x);
+		priv->xbigj = avx;
+	} else if (approx_half(avx, priv->xbigj)) {
+		hgpk_err(psmouse, "detected secondary %dpx jump in x\n", x);
+		priv->xbigj = avx;
+		priv->xsaw_secondary++;
+	} else {
+		if (priv->xbigj && priv->xsaw_secondary > 1)
+			do_recal = true;
+		priv->xbigj = 0;
+		priv->xsaw_secondary = 0;
+	}
+
+	if (avy > recalib_delta ||
+		(avy > recalib_delta / 2 && ((avy / 4) > priv->ylast))) {
+		hgpk_err(psmouse, "detected %dpx jump in y\n", y);
+		priv->ybigj = avy;
+	} else if (approx_half(avy, priv->ybigj)) {
+		hgpk_err(psmouse, "detected secondary %dpx jump in y\n", y);
+		priv->ybigj = avy;
+		priv->ysaw_secondary++;
+	} else {
+		if (priv->ybigj && priv->ysaw_secondary > 1)
+			do_recal = true;
+		priv->ybigj = 0;
+		priv->ysaw_secondary = 0;
+	}
+
+	priv->xlast = avx;
+	priv->ylast = avy;
+
+	if (do_recal && jumpy_delay) {
+		hgpk_err(psmouse, "scheduling recalibration\n");
 		psmouse_queue_work(psmouse, &priv->recalib_wq,
 				msecs_to_jiffies(jumpy_delay));
 	}
+
+	return priv->xbigj || priv->ybigj;
+}
+
+static void hgpk_reset_spew_detection(struct hgpk_data *priv)
+{
+	priv->spew_count = 0;
+	priv->dupe_count = 0;
+	priv->x_tally = 0;
+	priv->y_tally = 0;
+	priv->spew_flag = NO_SPEW;
+}
+
+static void hgpk_reset_hack_state(struct psmouse *psmouse)
+{
+	struct hgpk_data *priv = psmouse->private;
+
+	priv->abs_x = priv->abs_y = -1;
+	priv->xlast = priv->ylast = ILLEGAL_XY;
+	priv->xbigj = priv->ybigj = 0;
+	priv->xsaw_secondary = priv->ysaw_secondary = 0;
+	hgpk_reset_spew_detection(priv);
 }
 
 /*
@@ -116,20 +220,57 @@
 	if (l || r)
 		return;
 
+	/* don't track spew if the workaround feature has been turned off */
+	if (!spew_delay)
+		return;
+
+	if (abs(x) > 3 || abs(y) > 3) {
+		/* no spew, or spew ended */
+		hgpk_reset_spew_detection(priv);
+		return;
+	}
+
+	/* Keep a tally of the overall delta to the cursor position caused by
+	 * the spew */
 	priv->x_tally += x;
 	priv->y_tally += y;
 
-	if (++priv->count > 100) {
+	switch (priv->spew_flag) {
+	case NO_SPEW:
+		/* we're not spewing, but this packet might be the start */
+		priv->spew_flag = MAYBE_SPEWING;
+
+		/* fall-through */
+
+	case MAYBE_SPEWING:
+		priv->spew_count++;
+
+		if (priv->spew_count < SPEW_WATCH_COUNT)
+			break;
+
+		/* excessive spew detected, request recalibration */
+		priv->spew_flag = SPEW_DETECTED;
+
+		/* fall-through */
+
+	case SPEW_DETECTED:
+		/* only recalibrate when the overall delta to the cursor
+		 * is really small. if the spew is causing significant cursor
+		 * movement, it is probably a case of the user moving the
+		 * cursor very slowly across the screen. */
 		if (abs(priv->x_tally) < 3 && abs(priv->y_tally) < 3) {
-			hgpk_dbg(psmouse, "packet spew detected (%d,%d)\n",
+			hgpk_err(psmouse, "packet spew detected (%d,%d)\n",
 				 priv->x_tally, priv->y_tally);
+			priv->spew_flag = RECALIBRATING;
 			psmouse_queue_work(psmouse, &priv->recalib_wq,
 					   msecs_to_jiffies(spew_delay));
 		}
-		/* reset every 100 packets */
-		priv->count = 0;
-		priv->x_tally = 0;
-		priv->y_tally = 0;
+
+		break;
+	case RECALIBRATING:
+		/* we already detected a spew and requested a recalibration,
+		 * just wait for the queue to kick into action. */
+		break;
 	}
 }
 
@@ -143,25 +284,168 @@
  * swr/swl are the left/right buttons.
  * x-neg/y-neg are the x and y delta negative bits
  * x-over/y-over are the x and y overflow bits
+ *
+ * ---
+ *
+ * HGPK Advanced Mode - single-mode format
+ *
+ * byte 0(PT):  1    1    0    0    1    1     1     1
+ * byte 0(GS):  1    1    1    1    1    1     1     1
+ * byte 1:      0   x6   x5   x4   x3   x2    x1    x0
+ * byte 2(PT):  0    0   x9   x8   x7    ? pt-dsw    0
+ * byte 2(GS):  0  x10   x9   x8   x7    ? gs-dsw pt-dsw
+ * byte 3:      0   y9   y8   y7    1    0   swr   swl
+ * byte 4:      0   y6   y5   y4   y3   y2    y1    y0
+ * byte 5:      0   z6   z5   z4   z3   z2    z1    z0
+ *
+ * ?'s are not defined in the protocol spec, may vary between models.
+ *
+ * swr/swl are the left/right buttons.
+ *
+ * pt-dsw/gs-dsw indicate that the pt/gs sensor is detecting a
+ * pen/finger
  */
-static int hgpk_validate_byte(unsigned char *packet)
+static bool hgpk_is_byte_valid(struct psmouse *psmouse, unsigned char *packet)
 {
-	return (packet[0] & 0x0C) != 0x08;
+	struct hgpk_data *priv = psmouse->private;
+	int pktcnt = psmouse->pktcnt;
+	bool valid;
+
+	switch (priv->mode) {
+	case HGPK_MODE_MOUSE:
+		valid = (packet[0] & 0x0C) == 0x08;
+		break;
+
+	case HGPK_MODE_GLIDESENSOR:
+		valid = pktcnt == 1 ?
+			packet[0] == HGPK_GS : !(packet[pktcnt - 1] & 0x80);
+		break;
+
+	case HGPK_MODE_PENTABLET:
+		valid = pktcnt == 1 ?
+			packet[0] == HGPK_PT : !(packet[pktcnt - 1] & 0x80);
+		break;
+
+	default:
+		valid = false;
+		break;
+	}
+
+	if (!valid)
+		hgpk_dbg(psmouse,
+			 "bad data, mode %d (%d) %02x %02x %02x %02x %02x %02x\n",
+			 priv->mode, pktcnt,
+			 psmouse->packet[0], psmouse->packet[1],
+			 psmouse->packet[2], psmouse->packet[3],
+			 psmouse->packet[4], psmouse->packet[5]);
+
+	return valid;
 }
 
-static void hgpk_process_packet(struct psmouse *psmouse)
+static void hgpk_process_advanced_packet(struct psmouse *psmouse)
+{
+	struct hgpk_data *priv = psmouse->private;
+	struct input_dev *idev = psmouse->dev;
+	unsigned char *packet = psmouse->packet;
+	int down = !!(packet[2] & 2);
+	int left = !!(packet[3] & 1);
+	int right = !!(packet[3] & 2);
+	int x = packet[1] | ((packet[2] & 0x78) << 4);
+	int y = packet[4] | ((packet[3] & 0x70) << 3);
+
+	if (priv->mode == HGPK_MODE_GLIDESENSOR) {
+		int pt_down = !!(packet[2] & 1);
+		int finger_down = !!(packet[2] & 2);
+		int z = packet[5];
+
+		input_report_abs(idev, ABS_PRESSURE, z);
+		if (tpdebug)
+			hgpk_dbg(psmouse, "pd=%d fd=%d z=%d",
+				 pt_down, finger_down, z);
+	} else {
+		/*
+		 * PenTablet mode does not report pressure, so we don't
+		 * report it here
+		 */
+		if (tpdebug)
+			hgpk_dbg(psmouse, "pd=%d ", down);
+	}
+
+	if (tpdebug)
+		hgpk_dbg(psmouse, "l=%d r=%d x=%d y=%d\n", left, right, x, y);
+
+	input_report_key(idev, BTN_TOUCH, down);
+	input_report_key(idev, BTN_LEFT, left);
+	input_report_key(idev, BTN_RIGHT, right);
+
+	/*
+	 * If this packet says that the finger was removed, reset our position
+	 * tracking so that we don't erroneously detect a jump on next press.
+	 */
+	if (!down) {
+		hgpk_reset_hack_state(psmouse);
+		goto done;
+	}
+
+	/*
+	 * Weed out duplicate packets (we get quite a few, and they mess up
+	 * our jump detection)
+	 */
+	if (x == priv->abs_x && y == priv->abs_y) {
+		if (++priv->dupe_count > SPEW_WATCH_COUNT) {
+			if (tpdebug)
+				hgpk_dbg(psmouse, "hard spew detected\n");
+			priv->spew_flag = RECALIBRATING;
+			psmouse_queue_work(psmouse, &priv->recalib_wq,
+					   msecs_to_jiffies(spew_delay));
+		}
+		goto done;
+	}
+
+	/* not a duplicate, continue with position reporting */
+	priv->dupe_count = 0;
+
+	/* Don't apply hacks in PT mode, it seems reliable */
+	if (priv->mode != HGPK_MODE_PENTABLET && priv->abs_x != -1) {
+		int x_diff = priv->abs_x - x;
+		int y_diff = priv->abs_y - y;
+		if (hgpk_discard_decay_hack(psmouse, x_diff, y_diff)) {
+			if (tpdebug)
+				hgpk_dbg(psmouse, "discarding\n");
+			goto done;
+		}
+		hgpk_spewing_hack(psmouse, left, right, x_diff, y_diff);
+	}
+
+	input_report_abs(idev, ABS_X, x);
+	input_report_abs(idev, ABS_Y, y);
+	priv->abs_x = x;
+	priv->abs_y = y;
+
+done:
+	input_sync(idev);
+}
+
+static void hgpk_process_simple_packet(struct psmouse *psmouse)
 {
 	struct input_dev *dev = psmouse->dev;
 	unsigned char *packet = psmouse->packet;
-	int x, y, left, right;
+	int left = packet[0] & 1;
+	int right = (packet[0] >> 1) & 1;
+	int x = packet[1] - ((packet[0] << 4) & 0x100);
+	int y = ((packet[0] << 3) & 0x100) - packet[2];
 
-	left = packet[0] & 1;
-	right = (packet[0] >> 1) & 1;
+	if (packet[0] & 0xc0)
+		hgpk_dbg(psmouse,
+			 "overflow -- 0x%02x 0x%02x 0x%02x\n",
+			 packet[0], packet[1], packet[2]);
 
-	x = packet[1] - ((packet[0] << 4) & 0x100);
-	y = ((packet[0] << 3) & 0x100) - packet[2];
+	if (hgpk_discard_decay_hack(psmouse, x, y)) {
+		if (tpdebug)
+			hgpk_dbg(psmouse, "discarding\n");
+		return;
+	}
 
-	hgpk_jumpy_hack(psmouse, x, y);
 	hgpk_spewing_hack(psmouse, left, right, x, y);
 
 	if (tpdebug)
@@ -180,15 +464,14 @@
 {
 	struct hgpk_data *priv = psmouse->private;
 
-	if (hgpk_validate_byte(psmouse->packet)) {
-		hgpk_dbg(psmouse, "%s: (%d) %02x %02x %02x\n",
-				__func__, psmouse->pktcnt, psmouse->packet[0],
-				psmouse->packet[1], psmouse->packet[2]);
+	if (!hgpk_is_byte_valid(psmouse, psmouse->packet))
 		return PSMOUSE_BAD_DATA;
-	}
 
 	if (psmouse->pktcnt >= psmouse->pktsize) {
-		hgpk_process_packet(psmouse);
+		if (priv->mode == HGPK_MODE_MOUSE)
+			hgpk_process_simple_packet(psmouse);
+		else
+			hgpk_process_advanced_packet(psmouse);
 		return PSMOUSE_FULL_PACKET;
 	}
 
@@ -210,33 +493,176 @@
 	return PSMOUSE_GOOD_DATA;
 }
 
+static int hgpk_select_mode(struct psmouse *psmouse)
+{
+	struct ps2dev *ps2dev = &psmouse->ps2dev;
+	struct hgpk_data *priv = psmouse->private;
+	int i;
+	int cmd;
+
+	/*
+	 * 4 disables to enable advanced mode
+	 * then 3 0xf2 bytes as the preamble for GS/PT selection
+	 */
+	const int advanced_init[] = {
+		PSMOUSE_CMD_DISABLE, PSMOUSE_CMD_DISABLE,
+		PSMOUSE_CMD_DISABLE, PSMOUSE_CMD_DISABLE,
+		0xf2, 0xf2, 0xf2,
+	};
+
+	switch (priv->mode) {
+	case HGPK_MODE_MOUSE:
+		psmouse->pktsize = 3;
+		break;
+
+	case HGPK_MODE_GLIDESENSOR:
+	case HGPK_MODE_PENTABLET:
+		psmouse->pktsize = 6;
+
+		/* Switch to 'Advanced mode.', four disables in a row. */
+		for (i = 0; i < ARRAY_SIZE(advanced_init); i++)
+			if (ps2_command(ps2dev, NULL, advanced_init[i]))
+				return -EIO;
+
+		/* select between GlideSensor (mouse) or PenTablet */
+		cmd = priv->mode == HGPK_MODE_GLIDESENSOR ?
+			PSMOUSE_CMD_SETSCALE11 : PSMOUSE_CMD_SETSCALE21;
+
+		if (ps2_command(ps2dev, NULL, cmd))
+			return -EIO;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void hgpk_setup_input_device(struct input_dev *input,
+				    struct input_dev *old_input,
+				    enum hgpk_mode mode)
+{
+	if (old_input) {
+		input->name = old_input->name;
+		input->phys = old_input->phys;
+		input->id = old_input->id;
+		input->dev.parent = old_input->dev.parent;
+	}
+
+	memset(input->evbit, 0, sizeof(input->evbit));
+	memset(input->relbit, 0, sizeof(input->relbit));
+	memset(input->keybit, 0, sizeof(input->keybit));
+
+	/* All modes report left and right buttons */
+	__set_bit(EV_KEY, input->evbit);
+	__set_bit(BTN_LEFT, input->keybit);
+	__set_bit(BTN_RIGHT, input->keybit);
+
+	switch (mode) {
+	case HGPK_MODE_MOUSE:
+		__set_bit(EV_REL, input->evbit);
+		__set_bit(REL_X, input->relbit);
+		__set_bit(REL_Y, input->relbit);
+		break;
+
+	case HGPK_MODE_GLIDESENSOR:
+		__set_bit(BTN_TOUCH, input->keybit);
+		__set_bit(BTN_TOOL_FINGER, input->keybit);
+
+		__set_bit(EV_ABS, input->evbit);
+
+		/* GlideSensor has pressure sensor, PenTablet does not */
+		input_set_abs_params(input, ABS_PRESSURE, 0, 15, 0, 0);
+
+		/* From device specs */
+		input_set_abs_params(input, ABS_X, 0, 399, 0, 0);
+		input_set_abs_params(input, ABS_Y, 0, 290, 0, 0);
+
+		/* Calculated by hand based on usable size (52mm x 38mm) */
+		input_abs_set_res(input, ABS_X, 8);
+		input_abs_set_res(input, ABS_Y, 8);
+		break;
+
+	case HGPK_MODE_PENTABLET:
+		__set_bit(BTN_TOUCH, input->keybit);
+		__set_bit(BTN_TOOL_FINGER, input->keybit);
+
+		__set_bit(EV_ABS, input->evbit);
+
+		/* From device specs */
+		input_set_abs_params(input, ABS_X, 0, 999, 0, 0);
+		input_set_abs_params(input, ABS_Y, 5, 239, 0, 0);
+
+		/* Calculated by hand based on usable size (156mm x 38mm) */
+		input_abs_set_res(input, ABS_X, 6);
+		input_abs_set_res(input, ABS_Y, 8);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+static int hgpk_reset_device(struct psmouse *psmouse, bool recalibrate)
+{
+	int err;
+
+	psmouse_reset(psmouse);
+
+	if (recalibrate) {
+		struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+		/* send the recalibrate request */
+		if (ps2_command(ps2dev, NULL, 0xf5) ||
+		    ps2_command(ps2dev, NULL, 0xf5) ||
+		    ps2_command(ps2dev, NULL, 0xe6) ||
+		    ps2_command(ps2dev, NULL, 0xf5)) {
+			return -1;
+		}
+
+		/* according to ALPS, 150mS is required for recalibration */
+		msleep(150);
+	}
+
+	err = hgpk_select_mode(psmouse);
+	if (err) {
+		hgpk_err(psmouse, "failed to select mode\n");
+		return err;
+	}
+
+	hgpk_reset_hack_state(psmouse);
+
+	return 0;
+}
+
 static int hgpk_force_recalibrate(struct psmouse *psmouse)
 {
 	struct ps2dev *ps2dev = &psmouse->ps2dev;
 	struct hgpk_data *priv = psmouse->private;
+	int err;
 
 	/* C-series touchpads added the recalibrate command */
 	if (psmouse->model < HGPK_MODEL_C)
 		return 0;
 
+	if (!autorecal) {
+		hgpk_dbg(psmouse, "recalibrations disabled, ignoring\n");
+		return 0;
+	}
+
+	hgpk_dbg(psmouse, "recalibrating touchpad..\n");
+
 	/* we don't want to race with the irq handler, nor with resyncs */
 	psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
 
 	/* start by resetting the device */
-	psmouse_reset(psmouse);
+	err = hgpk_reset_device(psmouse, true);
+	if (err)
+		return err;
 
-	/* send the recalibrate request */
-	if (ps2_command(ps2dev, NULL, 0xf5) ||
-	    ps2_command(ps2dev, NULL, 0xf5) ||
-	    ps2_command(ps2dev, NULL, 0xe6) ||
-	    ps2_command(ps2dev, NULL, 0xf5)) {
-		return -1;
-	}
-
-	/* according to ALPS, 150mS is required for recalibration */
-	msleep(150);
-
-	/* XXX: If a finger is down during this delay, recalibration will
+	/*
+	 * XXX: If a finger is down during this delay, recalibration will
 	 * detect capacitance incorrectly.  This is a hardware bug, and
 	 * we don't have a good way to deal with it.  The 2s window stuff
 	 * (below) is our best option for now.
@@ -247,25 +673,35 @@
 
 	psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
 
-	/* After we recalibrate, we shouldn't get any packets for 2s.  If
-	 * we do, it's likely that someone's finger was on the touchpad.
-	 * If someone's finger *was* on the touchpad, it's probably
-	 * miscalibrated.  So, we should schedule another recalibration
+	if (tpdebug)
+		hgpk_dbg(psmouse, "touchpad reactivated\n");
+
+	/*
+	 * If we get packets right away after recalibrating, it's likely
+	 * that a finger was on the touchpad.  If so, it's probably
+	 * miscalibrated, so we optionally schedule another.
 	 */
-	priv->recalib_window = jiffies +  msecs_to_jiffies(recal_guard_time);
+	if (recal_guard_time)
+		priv->recalib_window = jiffies +
+			msecs_to_jiffies(recal_guard_time);
 
 	return 0;
 }
 
 /*
- * This kills power to the touchpad; according to ALPS, current consumption
- * goes down to 50uA after running this.  To turn power back on, we drive
- * MS-DAT low.
+ * This puts the touchpad in a power saving mode; according to ALPS, current
+ * consumption goes down to 50uA after running this.  To turn power back on,
+ * we drive MS-DAT low.  Measuring with a 1mA resolution ammeter says that
+ * the current on the SUS_3.3V rail drops from 3mA or 4mA to 0 when we do this.
+ *
+ * We have no formal spec that details this operation -- the low-power
+ * sequence came from a long-lost email trail.
  */
-static int hgpk_toggle_power(struct psmouse *psmouse, int enable)
+static int hgpk_toggle_powersave(struct psmouse *psmouse, int enable)
 {
 	struct ps2dev *ps2dev = &psmouse->ps2dev;
 	int timeo;
+	int err;
 
 	/* Added on D-series touchpads */
 	if (psmouse->model < HGPK_MODEL_D)
@@ -279,24 +715,27 @@
 		 * the controller.  Once we get an ACK back from it, it
 		 * means we can continue with the touchpad re-init.  ALPS
 		 * tells us that 1s should be long enough, so set that as
-		 * the upper bound.
+		 * the upper bound. (in practice, it takes about 3 loops.)
 		 */
 		for (timeo = 20; timeo > 0; timeo--) {
 			if (!ps2_sendbyte(&psmouse->ps2dev,
 					PSMOUSE_CMD_DISABLE, 20))
 				break;
-			msleep(50);
+			msleep(25);
 		}
 
-		psmouse_reset(psmouse);
+		err = hgpk_reset_device(psmouse, false);
+		if (err) {
+			hgpk_err(psmouse, "Failed to reset device!\n");
+			return err;
+		}
 
 		/* should be all set, enable the touchpad */
 		ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
 		psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
-
+		hgpk_dbg(psmouse, "Touchpad powered up.\n");
 	} else {
 		hgpk_dbg(psmouse, "Powering off touchpad.\n");
-		psmouse_set_state(psmouse, PSMOUSE_IGNORE);
 
 		if (ps2_command(ps2dev, NULL, 0xec) ||
 		    ps2_command(ps2dev, NULL, 0xec) ||
@@ -304,6 +743,8 @@
 			return -1;
 		}
 
+		psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+
 		/* probably won't see an ACK, the touchpad will be off */
 		ps2_sendbyte(&psmouse->ps2dev, 0xec, 20);
 	}
@@ -319,17 +760,20 @@
 
 static int hgpk_reconnect(struct psmouse *psmouse)
 {
-	/* During suspend/resume the ps2 rails remain powered.  We don't want
+	struct hgpk_data *priv = psmouse->private;
+
+	/*
+	 * During suspend/resume the ps2 rails remain powered.  We don't want
 	 * to do a reset because it's flush data out of buffers; however,
-	 * earlier prototypes (B1) had some brokenness that required a reset. */
+	 * earlier prototypes (B1) had some brokenness that required a reset.
+	 */
 	if (olpc_board_at_least(olpc_board(0xb2)))
 		if (psmouse->ps2dev.serio->dev.power.power_state.event !=
 				PM_EVENT_ON)
 			return 0;
 
-	psmouse_reset(psmouse);
-
-	return 0;
+	priv->powered = 1;
+	return hgpk_reset_device(psmouse, false);
 }
 
 static ssize_t hgpk_show_powered(struct psmouse *psmouse, void *data, char *buf)
@@ -355,7 +799,7 @@
 		 * hgpk_toggle_power will deal w/ state so
 		 * we're not racing w/ irq
 		 */
-		err = hgpk_toggle_power(psmouse, value);
+		err = hgpk_toggle_powersave(psmouse, value);
 		if (!err)
 			priv->powered = value;
 	}
@@ -366,6 +810,65 @@
 __PSMOUSE_DEFINE_ATTR(powered, S_IWUSR | S_IRUGO, NULL,
 		      hgpk_show_powered, hgpk_set_powered, false);
 
+static ssize_t attr_show_mode(struct psmouse *psmouse, void *data, char *buf)
+{
+	struct hgpk_data *priv = psmouse->private;
+
+	return sprintf(buf, "%s\n", hgpk_mode_names[priv->mode]);
+}
+
+static ssize_t attr_set_mode(struct psmouse *psmouse, void *data,
+			     const char *buf, size_t len)
+{
+	struct hgpk_data *priv = psmouse->private;
+	enum hgpk_mode old_mode = priv->mode;
+	enum hgpk_mode new_mode = hgpk_mode_from_name(buf, len);
+	struct input_dev *old_dev = psmouse->dev;
+	struct input_dev *new_dev;
+	int err;
+
+	if (new_mode == HGPK_MODE_INVALID)
+		return -EINVAL;
+
+	if (old_mode == new_mode)
+		return len;
+
+	new_dev = input_allocate_device();
+	if (!new_dev)
+		return -ENOMEM;
+
+	psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
+
+	/* Switch device into the new mode */
+	priv->mode = new_mode;
+	err = hgpk_reset_device(psmouse, false);
+	if (err)
+		goto err_try_restore;
+
+	hgpk_setup_input_device(new_dev, old_dev, new_mode);
+
+	psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
+
+	err = input_register_device(new_dev);
+	if (err)
+		goto err_try_restore;
+
+	psmouse->dev = new_dev;
+	input_unregister_device(old_dev);
+
+	return len;
+
+err_try_restore:
+	input_free_device(new_dev);
+	priv->mode = old_mode;
+	hgpk_reset_device(psmouse, false);
+
+	return err;
+}
+
+PSMOUSE_DEFINE_ATTR(hgpk_mode, S_IWUSR | S_IRUGO, NULL,
+		    attr_show_mode, attr_set_mode);
+
 static ssize_t hgpk_trigger_recal_show(struct psmouse *psmouse,
 		void *data, char *buf)
 {
@@ -401,6 +904,8 @@
 
 	device_remove_file(&psmouse->ps2dev.serio->dev,
 			   &psmouse_attr_powered.dattr);
+	device_remove_file(&psmouse->ps2dev.serio->dev,
+			   &psmouse_attr_hgpk_mode.dattr);
 
 	if (psmouse->model >= HGPK_MODEL_C)
 		device_remove_file(&psmouse->ps2dev.serio->dev,
@@ -416,14 +921,13 @@
 	struct hgpk_data *priv = container_of(w, struct hgpk_data, recalib_wq);
 	struct psmouse *psmouse = priv->psmouse;
 
-	hgpk_dbg(psmouse, "recalibrating touchpad..\n");
-
 	if (hgpk_force_recalibrate(psmouse))
 		hgpk_err(psmouse, "recalibration failed!\n");
 }
 
 static int hgpk_register(struct psmouse *psmouse)
 {
+	struct hgpk_data *priv = psmouse->private;
 	int err;
 
 	/* register handlers */
@@ -431,13 +935,14 @@
 	psmouse->poll = hgpk_poll;
 	psmouse->disconnect = hgpk_disconnect;
 	psmouse->reconnect = hgpk_reconnect;
-	psmouse->pktsize = 3;
 
 	/* Disable the idle resync. */
 	psmouse->resync_time = 0;
 	/* Reset after a lot of bad bytes. */
 	psmouse->resetafter = 1024;
 
+	hgpk_setup_input_device(psmouse->dev, NULL, priv->mode);
+
 	err = device_create_file(&psmouse->ps2dev.serio->dev,
 				 &psmouse_attr_powered.dattr);
 	if (err) {
@@ -445,6 +950,13 @@
 		return err;
 	}
 
+	err = device_create_file(&psmouse->ps2dev.serio->dev,
+				 &psmouse_attr_hgpk_mode.dattr);
+	if (err) {
+		hgpk_err(psmouse, "Failed creating 'hgpk_mode' sysfs node\n");
+		goto err_remove_powered;
+	}
+
 	/* C-series touchpads added the recalibrate command */
 	if (psmouse->model >= HGPK_MODEL_C) {
 		err = device_create_file(&psmouse->ps2dev.serio->dev,
@@ -452,30 +964,40 @@
 		if (err) {
 			hgpk_err(psmouse,
 				"Failed creating 'recalibrate' sysfs node\n");
-			device_remove_file(&psmouse->ps2dev.serio->dev,
-					&psmouse_attr_powered.dattr);
-			return err;
+			goto err_remove_mode;
 		}
 	}
 
 	return 0;
+
+err_remove_mode:
+	device_remove_file(&psmouse->ps2dev.serio->dev,
+			   &psmouse_attr_hgpk_mode.dattr);
+err_remove_powered:
+	device_remove_file(&psmouse->ps2dev.serio->dev,
+			   &psmouse_attr_powered.dattr);
+	return err;
 }
 
 int hgpk_init(struct psmouse *psmouse)
 {
 	struct hgpk_data *priv;
-	int err = -ENOMEM;
+	int err;
 
 	priv = kzalloc(sizeof(struct hgpk_data), GFP_KERNEL);
-	if (!priv)
+	if (!priv) {
+		err = -ENOMEM;
 		goto alloc_fail;
+	}
 
 	psmouse->private = priv;
+
 	priv->psmouse = psmouse;
 	priv->powered = true;
+	priv->mode = hgpk_default_mode;
 	INIT_DELAYED_WORK(&priv->recalib_wq, hgpk_recalib_work);
 
-	err = psmouse_reset(psmouse);
+	err = hgpk_reset_device(psmouse, false);
 	if (err)
 		goto init_fail;
 
@@ -531,3 +1053,14 @@
 
 	return 0;
 }
+
+void hgpk_module_init(void)
+{
+	hgpk_default_mode = hgpk_mode_from_name(hgpk_mode_name,
+						strlen(hgpk_mode_name));
+	if (hgpk_default_mode == HGPK_MODE_INVALID) {
+		hgpk_default_mode = HGPK_MODE_MOUSE;
+		strlcpy(hgpk_mode_name, hgpk_mode_names[HGPK_MODE_MOUSE],
+			sizeof(hgpk_mode_name));
+	}
+}
diff --git a/drivers/input/mouse/hgpk.h b/drivers/input/mouse/hgpk.h
index d61cfd3..311c0e8 100644
--- a/drivers/input/mouse/hgpk.h
+++ b/drivers/input/mouse/hgpk.h
@@ -5,6 +5,9 @@
 #ifndef _HGPK_H
 #define _HGPK_H
 
+#define HGPK_GS		0xff       /* The GlideSensor */
+#define HGPK_PT		0xcf       /* The PenTablet */
+
 enum hgpk_model_t {
 	HGPK_MODEL_PREA = 0x0a,	/* pre-B1s */
 	HGPK_MODEL_A = 0x14,	/* found on B1s, PT disabled in hardware */
@@ -13,12 +16,34 @@
 	HGPK_MODEL_D = 0x50,	/* C1, mass production */
 };
 
+enum hgpk_spew_flag {
+	NO_SPEW,
+	MAYBE_SPEWING,
+	SPEW_DETECTED,
+	RECALIBRATING,
+};
+
+#define SPEW_WATCH_COUNT 42  /* at 12ms/packet, this is 1/2 second */
+
+enum hgpk_mode {
+	HGPK_MODE_MOUSE,
+	HGPK_MODE_GLIDESENSOR,
+	HGPK_MODE_PENTABLET,
+	HGPK_MODE_INVALID
+};
+
 struct hgpk_data {
 	struct psmouse *psmouse;
+	enum hgpk_mode mode;
 	bool powered;
-	int count, x_tally, y_tally;	/* hardware workaround stuff */
+	enum hgpk_spew_flag spew_flag;
+	int spew_count, x_tally, y_tally;	/* spew detection */
 	unsigned long recalib_window;
 	struct delayed_work recalib_wq;
+	int abs_x, abs_y;
+	int dupe_count;
+	int xbigj, ybigj, xlast, ylast; /* jumpiness detection */
+	int xsaw_secondary, ysaw_secondary; /* jumpiness detection */
 };
 
 #define hgpk_dbg(psmouse, format, arg...)		\
@@ -33,9 +58,13 @@
 	dev_notice(&(psmouse)->ps2dev.serio->dev, format, ## arg)
 
 #ifdef CONFIG_MOUSE_PS2_OLPC
+void hgpk_module_init(void);
 int hgpk_detect(struct psmouse *psmouse, bool set_properties);
 int hgpk_init(struct psmouse *psmouse);
 #else
+static inline void hgpk_module_init(void)
+{
+}
 static inline int hgpk_detect(struct psmouse *psmouse, bool set_properties)
 {
 	return -ENODEV;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index cd9d0c9..3f74bae 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1711,6 +1711,7 @@
 
 	lifebook_module_init();
 	synaptics_module_init();
+	hgpk_module_init();
 
 	kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
 	if (!kpsmoused_wq) {
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 2e300a4..da392c2 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -25,7 +25,7 @@
 
 #include <linux/module.h>
 #include <linux/dmi.h>
-#include <linux/input.h>
+#include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/libps2.h>
 #include <linux/slab.h>
@@ -279,6 +279,25 @@
 	synaptics_mode_cmd(psmouse, priv->mode);
 }
 
+static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
+{
+	static unsigned char param = 0xc8;
+	struct synaptics_data *priv = psmouse->private;
+
+	if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+		return 0;
+
+	if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
+		return -1;
+	if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
+		return -1;
+
+	/* Advanced gesture mode also sends multi finger data */
+	priv->capabilities |= BIT(1);
+
+	return 0;
+}
+
 /*****************************************************************************
  *	Synaptics pass-through PS/2 port support
  ****************************************************************************/
@@ -380,7 +399,9 @@
  *	Functions to interpret the absolute mode packets
  ****************************************************************************/
 
-static void synaptics_parse_hw_state(unsigned char buf[], struct synaptics_data *priv, struct synaptics_hw_state *hw)
+static int synaptics_parse_hw_state(const unsigned char buf[],
+				    struct synaptics_data *priv,
+				    struct synaptics_hw_state *hw)
 {
 	memset(hw, 0, sizeof(struct synaptics_hw_state));
 
@@ -397,6 +418,14 @@
 			 ((buf[0] & 0x04) >> 1) |
 			 ((buf[3] & 0x04) >> 2));
 
+		if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) && hw->w == 2) {
+			/* Gesture packet: (x, y, z) at half resolution */
+			priv->mt.x = (((buf[4] & 0x0f) << 8) | buf[1]) << 1;
+			priv->mt.y = (((buf[4] & 0xf0) << 4) | buf[2]) << 1;
+			priv->mt.z = ((buf[3] & 0x30) | (buf[5] & 0x0f)) << 1;
+			return 1;
+		}
+
 		hw->left  = (buf[0] & 0x01) ? 1 : 0;
 		hw->right = (buf[0] & 0x02) ? 1 : 0;
 
@@ -452,6 +481,36 @@
 		hw->left  = (buf[0] & 0x01) ? 1 : 0;
 		hw->right = (buf[0] & 0x02) ? 1 : 0;
 	}
+
+	return 0;
+}
+
+static void set_slot(struct input_dev *dev, int slot, bool active, int x, int y)
+{
+	input_mt_slot(dev, slot);
+	input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
+	if (active) {
+		input_report_abs(dev, ABS_MT_POSITION_X, x);
+		input_report_abs(dev, ABS_MT_POSITION_Y,
+				 YMAX_NOMINAL + YMIN_NOMINAL - y);
+	}
+}
+
+static void synaptics_report_semi_mt_data(struct input_dev *dev,
+					  const struct synaptics_hw_state *a,
+					  const struct synaptics_hw_state *b,
+					  int num_fingers)
+{
+	if (num_fingers >= 2) {
+		set_slot(dev, 0, true, min(a->x, b->x), min(a->y, b->y));
+		set_slot(dev, 1, true, max(a->x, b->x), max(a->y, b->y));
+	} else if (num_fingers == 1) {
+		set_slot(dev, 0, true, a->x, a->y);
+		set_slot(dev, 1, false, 0, 0);
+	} else {
+		set_slot(dev, 0, false, 0, 0);
+		set_slot(dev, 1, false, 0, 0);
+	}
 }
 
 /*
@@ -466,7 +525,8 @@
 	int finger_width;
 	int i;
 
-	synaptics_parse_hw_state(psmouse->packet, priv, &hw);
+	if (synaptics_parse_hw_state(psmouse->packet, priv, &hw))
+		return;
 
 	if (hw.scroll) {
 		priv->scroll += hw.scroll;
@@ -488,7 +548,7 @@
 		return;
 	}
 
-	if (hw.z > 0) {
+	if (hw.z > 0 && hw.x > 1) {
 		num_fingers = 1;
 		finger_width = 5;
 		if (SYN_CAP_EXTENDED(priv->capabilities)) {
@@ -512,6 +572,9 @@
 		finger_width = 0;
 	}
 
+	if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+		synaptics_report_semi_mt_data(dev, &hw, &priv->mt, num_fingers);
+
 	/* Post events
 	 * BTN_TOUCH has to be first as mousedev relies on it when doing
 	 * absolute -> relative conversion
@@ -519,7 +582,7 @@
 	if (hw.z > 30) input_report_key(dev, BTN_TOUCH, 1);
 	if (hw.z < 25) input_report_key(dev, BTN_TOUCH, 0);
 
-	if (hw.z > 0) {
+	if (num_fingers > 0) {
 		input_report_abs(dev, ABS_X, hw.x);
 		input_report_abs(dev, ABS_Y, YMAX_NOMINAL + YMIN_NOMINAL - hw.y);
 	}
@@ -622,6 +685,8 @@
 {
 	int i;
 
+	__set_bit(INPUT_PROP_POINTER, dev->propbit);
+
 	__set_bit(EV_ABS, dev->evbit);
 	input_set_abs_params(dev, ABS_X,
 			     XMIN_NOMINAL, priv->x_max ?: XMAX_NOMINAL, 0, 0);
@@ -629,6 +694,15 @@
 			     YMIN_NOMINAL, priv->y_max ?: YMAX_NOMINAL, 0, 0);
 	input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
 
+	if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) {
+		__set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
+		input_mt_init_slots(dev, 2);
+		input_set_abs_params(dev, ABS_MT_POSITION_X, XMIN_NOMINAL,
+				     priv->x_max ?: XMAX_NOMINAL, 0, 0);
+		input_set_abs_params(dev, ABS_MT_POSITION_Y, YMIN_NOMINAL,
+				     priv->y_max ?: YMAX_NOMINAL, 0, 0);
+	}
+
 	if (SYN_CAP_PALMDETECT(priv->capabilities))
 		input_set_abs_params(dev, ABS_TOOL_WIDTH, 0, 15, 0, 0);
 
@@ -663,6 +737,7 @@
 	input_abs_set_res(dev, ABS_Y, priv->y_res);
 
 	if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
+		__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
 		/* Clickpads report only left button */
 		__clear_bit(BTN_RIGHT, dev->keybit);
 		__clear_bit(BTN_MIDDLE, dev->keybit);
@@ -702,6 +777,11 @@
 		return -1;
 	}
 
+	if (synaptics_set_advanced_gesture_mode(psmouse)) {
+		printk(KERN_ERR "Advanced gesture mode reconnect failed.\n");
+		return -1;
+	}
+
 	return 0;
 }
 
@@ -744,15 +824,45 @@
 #endif
 };
 
+static bool broken_olpc_ec;
+
+static const struct dmi_system_id __initconst olpc_dmi_table[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_OLPC)
+	{
+		/* OLPC XO-1 or XO-1.5 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "OLPC"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "XO"),
+		},
+	},
+	{ }
+#endif
+};
+
 void __init synaptics_module_init(void)
 {
 	impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
+	broken_olpc_ec = dmi_check_system(olpc_dmi_table);
 }
 
 int synaptics_init(struct psmouse *psmouse)
 {
 	struct synaptics_data *priv;
 
+	/*
+	 * The OLPC XO has issues with Synaptics' absolute mode; similarly to
+	 * the HGPK, it quickly degrades and the hardware becomes jumpy and
+	 * overly sensitive.  Not only that, but the constant packet spew
+	 * (even at a lowered 40pps rate) overloads the EC such that key
+	 * presses on the keyboard are missed.  Given all of that, don't
+	 * even attempt to use Synaptics mode.  Relative mode seems to work
+	 * just fine.
+	 */
+	if (broken_olpc_ec) {
+		printk(KERN_INFO "synaptics: OLPC XO detected, not enabling Synaptics protocol.\n");
+		return -ENODEV;
+	}
+
 	psmouse->private = priv = kzalloc(sizeof(struct synaptics_data), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
@@ -769,6 +879,11 @@
 		goto init_fail;
 	}
 
+	if (synaptics_set_advanced_gesture_mode(psmouse)) {
+		printk(KERN_ERR "Advanced gesture mode init failed.\n");
+		goto init_fail;
+	}
+
 	priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
 
 	printk(KERN_INFO "Synaptics Touchpad, model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx\n",
@@ -802,8 +917,8 @@
 
 	/*
 	 * Toshiba's KBC seems to have trouble handling data from
-	 * Synaptics as full rate, switch to lower rate which is roughly
-	 * thye same as rate of standard PS/2 mouse.
+	 * Synaptics at full rate.  Switch to a lower rate (roughly
+	 * the same rate as a standard PS/2 mouse).
 	 */
 	if (psmouse->rate >= 80 && impaired_toshiba_kbc) {
 		printk(KERN_INFO "synaptics: Toshiba %s detected, limiting rate to 40pps.\n",
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 0aefaa8..25e5d04 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -54,6 +54,7 @@
 #define SYN_CAP_CLICKPAD(ex0c)		((ex0c) & 0x100000) /* 1-button ClickPad */
 #define SYN_CAP_CLICKPAD2BTN(ex0c)	((ex0c) & 0x000100) /* 2-button ClickPad */
 #define SYN_CAP_MAX_DIMENSIONS(ex0c)	((ex0c) & 0x020000)
+#define SYN_CAP_ADV_GESTURE(ex0c)	((ex0c) & 0x080000)
 
 /* synaptics modes query bits */
 #define SYN_MODE_ABSOLUTE(m)		((m) & (1 << 7))
@@ -113,6 +114,8 @@
 	int scroll;
 
 	struct serio *pt_port;			/* Pass-through serio port */
+
+	struct synaptics_hw_state mt;		/* current gesture packet */
 };
 
 void synaptics_module_init(void);
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 2a00ddf..7630273 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -9,6 +9,8 @@
  * the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define MOUSEDEV_MINOR_BASE	32
 #define MOUSEDEV_MINORS		32
 #define MOUSEDEV_MIX		31
@@ -977,7 +979,7 @@
 			break;
 
 	if (minor == MOUSEDEV_MINORS) {
-		printk(KERN_ERR "mousedev: no more free mousedev devices\n");
+		pr_err("no more free mousedev devices\n");
 		return -ENFILE;
 	}
 
@@ -1087,13 +1089,13 @@
 #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
 	error = misc_register(&psaux_mouse);
 	if (error)
-		printk(KERN_WARNING "mice: could not register psaux device, "
-			"error: %d\n", error);
+		pr_warning("could not register psaux device, error: %d\n",
+			   error);
 	else
 		psaux_registered = 1;
 #endif
 
-	printk(KERN_INFO "mice: PS/2 mouse device common for all mice\n");
+	pr_info("PS/2 mouse device common for all mice\n");
 
 	return 0;
 }
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 6256233..307eef7 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -214,7 +214,6 @@
 	tristate "Amstrad Delta (E3) mailboard support"
 	depends on MACH_AMS_DELTA
 	default y
-	select AMS_DELTA_FIQ
 	---help---
 	  Say Y here if you have an E3 and want to use its mailboard,
 	  or any standard AT keyboard connected to the mailboard port.
@@ -230,7 +229,7 @@
 	tristate "TQC PS/2 multiplexer"
 	help
 	  Say Y here if you have the PS/2 line multiplexer like the one
-	  present on TQC boads.
+	  present on TQC boards.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ps2mult.
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 8f1770e..ebe9553 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -172,6 +172,5 @@
 	free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
 	gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_CLK);
 	gpio_free(AMS_DELTA_GPIO_PIN_KEYBRD_DATA);
-	kfree(ams_delta_serio);
 }
 module_exit(ams_delta_serio_exit);
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 4a30846..448c772 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -191,6 +191,9 @@
 
 	serio_register_port(ct82c710_port);
 
+	printk(KERN_INFO "serio: C&T 82c710 mouse port at %#llx irq %d\n",
+		(unsigned long long)CT82C710_DATA, CT82C710_IRQ);
+
 	return 0;
 }
 
@@ -237,11 +240,6 @@
 	if (error)
 		goto err_free_device;
 
-	serio_register_port(ct82c710_port);
-
-	printk(KERN_INFO "serio: C&T 82c710 mouse port at %#llx irq %d\n",
-		(unsigned long long)CT82C710_DATA, CT82C710_IRQ);
-
 	return 0;
 
  err_free_device:
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index e5624d8..bfd3865 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -932,6 +932,11 @@
 		hil_mlc_copy_di_scratch(mlc, i);
 		mlc_serio = kzalloc(sizeof(*mlc_serio), GFP_KERNEL);
 		mlc->serio[i] = mlc_serio;
+		if (!mlc->serio[i]) {
+			for (; i >= 0; i--)
+				kfree(mlc->serio[i]);
+			return -ENOMEM;
+		}
 		snprintf(mlc_serio->name, sizeof(mlc_serio->name)-1, "HIL_SERIO%d", i);
 		snprintf(mlc_serio->phys, sizeof(mlc_serio->phys)-1, "HIL%d", i);
 		mlc_serio->id			= hil_mlc_serio_id;
diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c
index 7d2b820..d50f067 100644
--- a/drivers/input/serio/hp_sdc_mlc.c
+++ b/drivers/input/serio/hp_sdc_mlc.c
@@ -305,6 +305,7 @@
 static int __init hp_sdc_mlc_init(void)
 {
 	hil_mlc *mlc = &hp_sdc_mlc;
+	int err;
 
 #ifdef __mc68000__
 	if (!MACH_IS_HP300)
@@ -323,22 +324,21 @@
 	mlc->out = &hp_sdc_mlc_out;
 	mlc->priv = &hp_sdc_mlc_priv;
 
-	if (hil_mlc_register(mlc)) {
+	err = hil_mlc_register(mlc);
+	if (err) {
 		printk(KERN_WARNING PREFIX "Failed to register MLC structure with hil_mlc\n");
-		goto err0;
+		return err;
 	}
 
 	if (hp_sdc_request_hil_irq(&hp_sdc_mlc_isr)) {
 		printk(KERN_WARNING PREFIX "Request for raw HIL ISR hook denied\n");
-		goto err1;
+		if (hil_mlc_unregister(mlc))
+			printk(KERN_ERR PREFIX "Failed to unregister MLC structure with hil_mlc.\n"
+				"This is bad.  Could cause an oops.\n");
+		return -EBUSY;
 	}
+
 	return 0;
- err1:
-	if (hil_mlc_unregister(mlc))
-		printk(KERN_ERR PREFIX "Failed to unregister MLC structure with hil_mlc.\n"
-			"This is bad.  Could cause an oops.\n");
- err0:
-	return -EBUSY;
 }
 
 static void __exit hp_sdc_mlc_exit(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a5475b5..bb9f5d3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -424,6 +424,13 @@
 			DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
 		},
 	},
+	{
+		/* Dell Vostro V13 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+		},
+	},
 	{ }
 };
 
@@ -545,6 +552,17 @@
 };
 #endif
 
+static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+	{
+		/* Dell Vostro V13 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+		},
+	},
+	{ }
+};
+
 /*
  * Some Wistron based laptops need us to explicitly enable the 'Dritek
  * keyboard extension' to make their extra keys start generating scancodes.
@@ -553,6 +571,13 @@
  */
 static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
 	{
+		/* Acer Aspire 5100 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+		},
+	},
+	{
 		/* Acer Aspire 5610 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -752,7 +777,7 @@
 #endif
 
 	if (i8042_nopnp) {
-		printk(KERN_INFO "i8042: PNP detection disabled\n");
+		pr_info("PNP detection disabled\n");
 		return 0;
 	}
 
@@ -769,7 +794,7 @@
 #if defined(__ia64__)
 		return -ENODEV;
 #else
-		printk(KERN_INFO "PNP: No PS/2 controller found. Probing ports directly.\n");
+		pr_info("PNP: No PS/2 controller found. Probing ports directly.\n");
 		return 0;
 #endif
 	}
@@ -781,7 +806,7 @@
 		snprintf(aux_irq_str, sizeof(aux_irq_str),
 			"%d", i8042_pnp_aux_irq);
 
-	printk(KERN_INFO "PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %s%s%s\n",
+	pr_info("PNP: PS/2 Controller [%s%s%s] at %#x,%#x irq %s%s%s\n",
 		i8042_pnp_kbd_name, (i8042_pnp_kbd_devices && i8042_pnp_aux_devices) ? "," : "",
 		i8042_pnp_aux_name,
 		i8042_pnp_data_reg, i8042_pnp_command_reg,
@@ -798,9 +823,7 @@
 	if (((i8042_pnp_data_reg & ~0xf) == (i8042_data_reg & ~0xf) &&
 	      i8042_pnp_data_reg != i8042_data_reg) ||
 	    !i8042_pnp_data_reg) {
-		printk(KERN_WARNING
-			"PNP: PS/2 controller has invalid data port %#x; "
-			"using default %#x\n",
+		pr_warn("PNP: PS/2 controller has invalid data port %#x; using default %#x\n",
 			i8042_pnp_data_reg, i8042_data_reg);
 		i8042_pnp_data_reg = i8042_data_reg;
 		pnp_data_busted = true;
@@ -809,33 +832,27 @@
 	if (((i8042_pnp_command_reg & ~0xf) == (i8042_command_reg & ~0xf) &&
 	      i8042_pnp_command_reg != i8042_command_reg) ||
 	    !i8042_pnp_command_reg) {
-		printk(KERN_WARNING
-			"PNP: PS/2 controller has invalid command port %#x; "
-			"using default %#x\n",
+		pr_warn("PNP: PS/2 controller has invalid command port %#x; using default %#x\n",
 			i8042_pnp_command_reg, i8042_command_reg);
 		i8042_pnp_command_reg = i8042_command_reg;
 		pnp_data_busted = true;
 	}
 
 	if (!i8042_nokbd && !i8042_pnp_kbd_irq) {
-		printk(KERN_WARNING
-			"PNP: PS/2 controller doesn't have KBD irq; "
-			"using default %d\n", i8042_kbd_irq);
+		pr_warn("PNP: PS/2 controller doesn't have KBD irq; using default %d\n",
+			i8042_kbd_irq);
 		i8042_pnp_kbd_irq = i8042_kbd_irq;
 		pnp_data_busted = true;
 	}
 
 	if (!i8042_noaux && !i8042_pnp_aux_irq) {
 		if (!pnp_data_busted && i8042_pnp_kbd_irq) {
-			printk(KERN_WARNING
-				"PNP: PS/2 appears to have AUX port disabled, "
-				"if this is incorrect please boot with "
-				"i8042.nopnp\n");
+			pr_warn("PNP: PS/2 appears to have AUX port disabled, "
+				"if this is incorrect please boot with i8042.nopnp\n");
 			i8042_noaux = true;
 		} else {
-			printk(KERN_WARNING
-				"PNP: PS/2 controller doesn't have AUX irq; "
-				"using default %d\n", i8042_aux_irq);
+			pr_warn("PNP: PS/2 controller doesn't have AUX irq; using default %d\n",
+				i8042_aux_irq);
 			i8042_pnp_aux_irq = i8042_aux_irq;
 		}
 	}
@@ -897,6 +914,9 @@
 	if (dmi_check_system(i8042_dmi_nomux_table))
 		i8042_nomux = true;
 
+	if (dmi_check_system(i8042_dmi_notimeout_table))
+		i8042_notimeout = true;
+
 	if (dmi_check_system(i8042_dmi_dritek_table))
 		i8042_dritek = true;
 #endif /* CONFIG_X86 */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 18db5a8..ac4c936 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -10,6 +10,8 @@
  * the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/types.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -61,6 +63,10 @@
 module_param_named(noloop, i8042_noloop, bool, 0);
 MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port");
 
+static bool i8042_notimeout;
+module_param_named(notimeout, i8042_notimeout, bool, 0);
+MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+
 #ifdef CONFIG_X86
 static bool i8042_dritek;
 module_param_named(dritek, i8042_dritek, bool, 0);
@@ -225,8 +231,8 @@
 		udelay(50);
 		data = i8042_read_data();
 		i++;
-		dbg("%02x <- i8042 (flush, %s)", data,
-			str & I8042_STR_AUXDATA ? "aux" : "kbd");
+		dbg("%02x <- i8042 (flush, %s)\n",
+		    data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
 	}
 
 	spin_unlock_irqrestore(&i8042_lock, flags);
@@ -253,32 +259,32 @@
 	if (error)
 		return error;
 
-	dbg("%02x -> i8042 (command)", command & 0xff);
+	dbg("%02x -> i8042 (command)\n", command & 0xff);
 	i8042_write_command(command & 0xff);
 
 	for (i = 0; i < ((command >> 12) & 0xf); i++) {
 		error = i8042_wait_write();
 		if (error)
 			return error;
-		dbg("%02x -> i8042 (parameter)", param[i]);
+		dbg("%02x -> i8042 (parameter)\n", param[i]);
 		i8042_write_data(param[i]);
 	}
 
 	for (i = 0; i < ((command >> 8) & 0xf); i++) {
 		error = i8042_wait_read();
 		if (error) {
-			dbg("     -- i8042 (timeout)");
+			dbg("     -- i8042 (timeout)\n");
 			return error;
 		}
 
 		if (command == I8042_CMD_AUX_LOOP &&
 		    !(i8042_read_status() & I8042_STR_AUXDATA)) {
-			dbg("     -- i8042 (auxerr)");
+			dbg("     -- i8042 (auxerr)\n");
 			return -1;
 		}
 
 		param[i] = i8042_read_data();
-		dbg("%02x <- i8042 (return)", param[i]);
+		dbg("%02x <- i8042 (return)\n", param[i]);
 	}
 
 	return 0;
@@ -309,7 +315,7 @@
 	spin_lock_irqsave(&i8042_lock, flags);
 
 	if (!(retval = i8042_wait_write())) {
-		dbg("%02x -> i8042 (kbd-data)", c);
+		dbg("%02x -> i8042 (kbd-data)\n", c);
 		i8042_write_data(c);
 	}
 
@@ -355,17 +361,14 @@
 
 	i8042_ctr &= ~irq_bit;
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR))
-		printk(KERN_WARNING
-			"i8042.c: Can't write CTR while closing %s port.\n",
-			port_name);
+		pr_warn("Can't write CTR while closing %s port\n", port_name);
 
 	udelay(50);
 
 	i8042_ctr &= ~disable_bit;
 	i8042_ctr |= irq_bit;
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR))
-		printk(KERN_ERR "i8042.c: Can't reactivate %s port.\n",
-			port_name);
+		pr_err("Can't reactivate %s port\n", port_name);
 
 	/*
 	 * See if there is any data appeared while we were messing with
@@ -456,7 +459,8 @@
 	str = i8042_read_status();
 	if (unlikely(~str & I8042_STR_OBF)) {
 		spin_unlock_irqrestore(&i8042_lock, flags);
-		if (irq) dbg("Interrupt %d, without any data", irq);
+		if (irq)
+			dbg("Interrupt %d, without any data\n", irq);
 		ret = 0;
 		goto out;
 	}
@@ -469,7 +473,8 @@
 
 		dfl = 0;
 		if (str & I8042_STR_MUXERR) {
-			dbg("MUX error, status is %02x, data is %02x", str, data);
+			dbg("MUX error, status is %02x, data is %02x\n",
+			    str, data);
 /*
  * When MUXERR condition is signalled the data register can only contain
  * 0xfd, 0xfe or 0xff if implementation follows the spec. Unfortunately
@@ -503,7 +508,7 @@
 	} else {
 
 		dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
-		      ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
+		      ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
 
 		port_no = (str & I8042_STR_AUXDATA) ?
 				I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
@@ -512,7 +517,7 @@
 	port = &i8042_ports[port_no];
 	serio = port->exists ? port->serio : NULL;
 
-	dbg("%02x <- i8042 (interrupt, %d, %d%s%s)",
+	dbg("%02x <- i8042 (interrupt, %d, %d%s%s)\n",
 	    data, port_no, irq,
 	    dfl & SERIO_PARITY ? ", bad parity" : "",
 	    dfl & SERIO_TIMEOUT ? ", timeout" : "");
@@ -540,7 +545,7 @@
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
 		i8042_ctr &= ~I8042_CTR_KBDINT;
 		i8042_ctr |= I8042_CTR_KBDDIS;
-		printk(KERN_ERR "i8042.c: Failed to enable KBD port.\n");
+		pr_err("Failed to enable KBD port\n");
 		return -EIO;
 	}
 
@@ -559,7 +564,7 @@
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
 		i8042_ctr &= ~I8042_CTR_AUXINT;
 		i8042_ctr |= I8042_CTR_AUXDIS;
-		printk(KERN_ERR "i8042.c: Failed to enable AUX port.\n");
+		pr_err("Failed to enable AUX port\n");
 		return -EIO;
 	}
 
@@ -641,7 +646,7 @@
 	if (i8042_set_mux_mode(true, &mux_version))
 		return -1;
 
-	printk(KERN_INFO "i8042.c: Detected active multiplexing controller, rev %d.%d.\n",
+	pr_info("Detected active multiplexing controller, rev %d.%d\n",
 		(mux_version >> 4) & 0xf, mux_version & 0xf);
 
 /*
@@ -651,7 +656,7 @@
 	i8042_ctr &= ~I8042_CTR_AUXINT;
 
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
-		printk(KERN_ERR "i8042.c: Failed to disable AUX port, can't use MUX.\n");
+		pr_err("Failed to disable AUX port, can't use MUX\n");
 		return -EIO;
 	}
 
@@ -676,8 +681,8 @@
 	str = i8042_read_status();
 	if (str & I8042_STR_OBF) {
 		data = i8042_read_data();
-		dbg("%02x <- i8042 (aux_test_irq, %s)",
-			data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+		dbg("%02x <- i8042 (aux_test_irq, %s)\n",
+		    data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
 		if (i8042_irq_being_tested &&
 		    data == 0xa5 && (str & I8042_STR_AUXDATA))
 			complete(&i8042_aux_irq_delivered);
@@ -770,8 +775,8 @@
  */
 
 	if (i8042_toggle_aux(false)) {
-		printk(KERN_WARNING "Failed to disable AUX port, but continuing anyway... Is this a SiS?\n");
-		printk(KERN_WARNING "If AUX port is really absent please use the 'i8042.noaux' option.\n");
+		pr_warn("Failed to disable AUX port, but continuing anyway... Is this a SiS?\n");
+		pr_warn("If AUX port is really absent please use the 'i8042.noaux' option\n");
 	}
 
 	if (i8042_toggle_aux(true))
@@ -819,7 +824,7 @@
  * AUX IRQ was never delivered so we need to flush the controller to
  * get rid of the byte we put there; otherwise keyboard may not work.
  */
-		dbg("     -- i8042 (aux irq test timeout)");
+		dbg("     -- i8042 (aux irq test timeout)\n");
 		i8042_flush();
 		retval = -1;
 	}
@@ -845,7 +850,7 @@
 static int i8042_controller_check(void)
 {
 	if (i8042_flush() == I8042_BUFFER_SIZE) {
-		printk(KERN_ERR "i8042.c: No controller found.\n");
+		pr_err("No controller found\n");
 		return -ENODEV;
 	}
 
@@ -864,15 +869,15 @@
 	do {
 
 		if (i8042_command(&param, I8042_CMD_CTL_TEST)) {
-			printk(KERN_ERR "i8042.c: i8042 controller self test timeout.\n");
+			pr_err("i8042 controller self test timeout\n");
 			return -ENODEV;
 		}
 
 		if (param == I8042_RET_CTL_TEST)
 			return 0;
 
-		printk(KERN_ERR "i8042.c: i8042 controller selftest failed. (%#x != %#x)\n",
-			param, I8042_RET_CTL_TEST);
+		pr_err("i8042 controller selftest failed. (%#x != %#x)\n",
+		       param, I8042_RET_CTL_TEST);
 		msleep(50);
 	} while (i++ < 5);
 
@@ -883,8 +888,7 @@
 	 * and user will still get a working keyboard. This is especially
 	 * important on netbooks. On other arches we trust hardware more.
 	 */
-	printk(KERN_INFO
-		"i8042: giving up on controller selftest, continuing anyway...\n");
+	pr_info("giving up on controller selftest, continuing anyway...\n");
 	return 0;
 #else
 	return -EIO;
@@ -909,8 +913,7 @@
 
 	do {
 		if (n >= 10) {
-			printk(KERN_ERR
-				"i8042.c: Unable to get stable CTR read.\n");
+			pr_err("Unable to get stable CTR read\n");
 			return -EIO;
 		}
 
@@ -918,8 +921,7 @@
 			udelay(50);
 
 		if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
-			printk(KERN_ERR
-				"i8042.c: Can't read CTR while initializing i8042.\n");
+			pr_err("Can't read CTR while initializing i8042\n");
 			return -EIO;
 		}
 
@@ -943,7 +945,7 @@
 		if (i8042_unlock)
 			i8042_ctr |= I8042_CTR_IGNKEYLOCK;
 		else
-			printk(KERN_WARNING "i8042.c: Warning: Keylock active.\n");
+			pr_warn("Warning: Keylock active\n");
 	}
 	spin_unlock_irqrestore(&i8042_lock, flags);
 
@@ -970,7 +972,7 @@
  */
 
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
-		printk(KERN_ERR "i8042.c: Can't write CTR while initializing i8042.\n");
+		pr_err("Can't write CTR while initializing i8042\n");
 		return -EIO;
 	}
 
@@ -1000,7 +1002,7 @@
 	i8042_ctr &= ~(I8042_CTR_KBDINT | I8042_CTR_AUXINT);
 
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR))
-		printk(KERN_WARNING "i8042.c: Can't write CTR while resetting.\n");
+		pr_warn("Can't write CTR while resetting\n");
 
 /*
  * Disable MUX mode if present.
@@ -1021,7 +1023,7 @@
  */
 
 	if (i8042_command(&i8042_initial_ctr, I8042_CMD_CTL_WCTR))
-		printk(KERN_WARNING "i8042.c: Can't restore CTR.\n");
+		pr_warn("Can't restore CTR\n");
 }
 
 
@@ -1045,14 +1047,14 @@
 	led = (state) ? 0x01 | 0x04 : 0;
 	while (i8042_read_status() & I8042_STR_IBF)
 		DELAY;
-	dbg("%02x -> i8042 (panic blink)", 0xed);
+	dbg("%02x -> i8042 (panic blink)\n", 0xed);
 	i8042_suppress_kbd_ack = 2;
 	i8042_write_data(0xed); /* set leds */
 	DELAY;
 	while (i8042_read_status() & I8042_STR_IBF)
 		DELAY;
 	DELAY;
-	dbg("%02x -> i8042 (panic blink)", led);
+	dbg("%02x -> i8042 (panic blink)\n", led);
 	i8042_write_data(led);
 	DELAY;
 	return delay;
@@ -1068,9 +1070,7 @@
 
 	error = i8042_command(&param, 0x1059);
 	if (error)
-		printk(KERN_WARNING
-			"Failed to enable DRITEK extension: %d\n",
-			error);
+		pr_warn("Failed to enable DRITEK extension: %d\n", error);
 }
 #endif
 
@@ -1105,10 +1105,10 @@
 	i8042_ctr |= I8042_CTR_AUXDIS | I8042_CTR_KBDDIS;
 	i8042_ctr &= ~(I8042_CTR_AUXINT | I8042_CTR_KBDINT);
 	if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
-		printk(KERN_WARNING "i8042: Can't write CTR to resume, retrying...\n");
+		pr_warn("Can't write CTR to resume, retrying...\n");
 		msleep(50);
 		if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
-			printk(KERN_ERR "i8042: CTR write retry failed\n");
+			pr_err("CTR write retry failed\n");
 			return -EIO;
 		}
 	}
@@ -1121,9 +1121,7 @@
 
 	if (i8042_mux_present) {
 		if (i8042_set_mux_mode(true, NULL) || i8042_enable_mux_ports())
-			printk(KERN_WARNING
-				"i8042: failed to resume active multiplexor, "
-				"mouse won't work.\n");
+			pr_warn("failed to resume active multiplexor, mouse won't work\n");
 	} else if (i8042_ports[I8042_AUX_PORT_NO].serio)
 		i8042_enable_aux_port();
 
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index cbc1beb..ac1d759 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -89,15 +89,19 @@
 #ifdef DEBUG
 static unsigned long i8042_start_time;
 #define dbg_init() do { i8042_start_time = jiffies; } while (0)
-#define dbg(format, arg...) 							\
-	do { 									\
+#define dbg(format, arg...)							\
+	do {									\
 		if (i8042_debug)						\
-			printk(KERN_DEBUG __FILE__ ": " format " [%d]\n" ,	\
-	 			## arg, (int) (jiffies - i8042_start_time));	\
+			printk(KERN_DEBUG KBUILD_MODNAME ": [%d] " format,	\
+			       (int) (jiffies - i8042_start_time), ##arg);	\
 	} while (0)
 #else
 #define dbg_init() do { } while (0)
-#define dbg(format, arg...) do {} while (0)
+#define dbg(format, arg...)							\
+	do {									\
+		if (0)								\
+			printk(KERN_DEBUG pr_fmt(format), ##arg);		\
+	} while (0)
 #endif
 
 #endif /* _I8042_H */
diff --git a/drivers/input/serio/ps2mult.c b/drivers/input/serio/ps2mult.c
index 6bce22e..15aa81c 100644
--- a/drivers/input/serio/ps2mult.c
+++ b/drivers/input/serio/ps2mult.c
@@ -207,7 +207,7 @@
 err_out:
 	while (--i >= 0)
 		kfree(psm->ports[i].serio);
-	kfree(serio);
+	kfree(psm);
 	return error;
 }
 
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 405bf21..db5b0bc 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -32,10 +32,9 @@
 #include <linux/module.h>
 #include <linux/serio.h>
 #include <linux/errno.h>
-#include <linux/wait.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/kthread.h>
+#include <linux/workqueue.h>
 #include <linux/mutex.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
@@ -44,7 +43,7 @@
 
 /*
  * serio_mutex protects entire serio subsystem and is taken every time
- * serio port or driver registrered or unregistered.
+ * serio port or driver registered or unregistered.
  */
 static DEFINE_MUTEX(serio_mutex);
 
@@ -165,8 +164,95 @@
 
 static DEFINE_SPINLOCK(serio_event_lock);	/* protects serio_event_list */
 static LIST_HEAD(serio_event_list);
-static DECLARE_WAIT_QUEUE_HEAD(serio_wait);
-static struct task_struct *serio_task;
+
+static struct serio_event *serio_get_event(void)
+{
+	struct serio_event *event = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&serio_event_lock, flags);
+
+	if (!list_empty(&serio_event_list)) {
+		event = list_first_entry(&serio_event_list,
+					 struct serio_event, node);
+		list_del_init(&event->node);
+	}
+
+	spin_unlock_irqrestore(&serio_event_lock, flags);
+	return event;
+}
+
+static void serio_free_event(struct serio_event *event)
+{
+	module_put(event->owner);
+	kfree(event);
+}
+
+static void serio_remove_duplicate_events(struct serio_event *event)
+{
+	struct serio_event *e, *next;
+	unsigned long flags;
+
+	spin_lock_irqsave(&serio_event_lock, flags);
+
+	list_for_each_entry_safe(e, next, &serio_event_list, node) {
+		if (event->object == e->object) {
+			/*
+			 * If this event is of different type we should not
+			 * look further - we only suppress duplicate events
+			 * that were sent back-to-back.
+			 */
+			if (event->type != e->type)
+				break;
+
+			list_del_init(&e->node);
+			serio_free_event(e);
+		}
+	}
+
+	spin_unlock_irqrestore(&serio_event_lock, flags);
+}
+
+static void serio_handle_event(struct work_struct *work)
+{
+	struct serio_event *event;
+
+	mutex_lock(&serio_mutex);
+
+	while ((event = serio_get_event())) {
+
+		switch (event->type) {
+
+		case SERIO_REGISTER_PORT:
+			serio_add_port(event->object);
+			break;
+
+		case SERIO_RECONNECT_PORT:
+			serio_reconnect_port(event->object);
+			break;
+
+		case SERIO_RESCAN_PORT:
+			serio_disconnect_port(event->object);
+			serio_find_driver(event->object);
+			break;
+
+		case SERIO_RECONNECT_SUBTREE:
+			serio_reconnect_subtree(event->object);
+			break;
+
+		case SERIO_ATTACH_DRIVER:
+			serio_attach_driver(event->object);
+			break;
+		}
+
+		serio_remove_duplicate_events(event);
+		serio_free_event(event);
+	}
+
+	mutex_unlock(&serio_mutex);
+}
+
+static DECLARE_WORK(serio_event_work, serio_handle_event);
 
 static int serio_queue_event(void *object, struct module *owner,
 			     enum serio_event_type event_type)
@@ -212,101 +298,13 @@
 	event->owner = owner;
 
 	list_add_tail(&event->node, &serio_event_list);
-	wake_up(&serio_wait);
+	schedule_work(&serio_event_work);
 
 out:
 	spin_unlock_irqrestore(&serio_event_lock, flags);
 	return retval;
 }
 
-static void serio_free_event(struct serio_event *event)
-{
-	module_put(event->owner);
-	kfree(event);
-}
-
-static void serio_remove_duplicate_events(struct serio_event *event)
-{
-	struct serio_event *e, *next;
-	unsigned long flags;
-
-	spin_lock_irqsave(&serio_event_lock, flags);
-
-	list_for_each_entry_safe(e, next, &serio_event_list, node) {
-		if (event->object == e->object) {
-			/*
-			 * If this event is of different type we should not
-			 * look further - we only suppress duplicate events
-			 * that were sent back-to-back.
-			 */
-			if (event->type != e->type)
-				break;
-
-			list_del_init(&e->node);
-			serio_free_event(e);
-		}
-	}
-
-	spin_unlock_irqrestore(&serio_event_lock, flags);
-}
-
-
-static struct serio_event *serio_get_event(void)
-{
-	struct serio_event *event = NULL;
-	unsigned long flags;
-
-	spin_lock_irqsave(&serio_event_lock, flags);
-
-	if (!list_empty(&serio_event_list)) {
-		event = list_first_entry(&serio_event_list,
-					 struct serio_event, node);
-		list_del_init(&event->node);
-	}
-
-	spin_unlock_irqrestore(&serio_event_lock, flags);
-	return event;
-}
-
-static void serio_handle_event(void)
-{
-	struct serio_event *event;
-
-	mutex_lock(&serio_mutex);
-
-	while ((event = serio_get_event())) {
-
-		switch (event->type) {
-
-		case SERIO_REGISTER_PORT:
-			serio_add_port(event->object);
-			break;
-
-		case SERIO_RECONNECT_PORT:
-			serio_reconnect_port(event->object);
-			break;
-
-		case SERIO_RESCAN_PORT:
-			serio_disconnect_port(event->object);
-			serio_find_driver(event->object);
-			break;
-
-		case SERIO_RECONNECT_SUBTREE:
-			serio_reconnect_subtree(event->object);
-			break;
-
-		case SERIO_ATTACH_DRIVER:
-			serio_attach_driver(event->object);
-			break;
-		}
-
-		serio_remove_duplicate_events(event);
-		serio_free_event(event);
-	}
-
-	mutex_unlock(&serio_mutex);
-}
-
 /*
  * Remove all events that have been submitted for a given
  * object, be it serio port or driver.
@@ -356,18 +354,6 @@
 	return child;
 }
 
-static int serio_thread(void *nothing)
-{
-	do {
-		serio_handle_event();
-		wait_event_interruptible(serio_wait,
-			kthread_should_stop() || !list_empty(&serio_event_list));
-	} while (!kthread_should_stop());
-
-	return 0;
-}
-
-
 /*
  * Serio port operations
  */
@@ -1040,21 +1026,18 @@
 		return error;
 	}
 
-	serio_task = kthread_run(serio_thread, NULL, "kseriod");
-	if (IS_ERR(serio_task)) {
-		bus_unregister(&serio_bus);
-		error = PTR_ERR(serio_task);
-		pr_err("Failed to start kseriod, error: %d\n", error);
-		return error;
-	}
-
 	return 0;
 }
 
 static void __exit serio_exit(void)
 {
 	bus_unregister(&serio_bus);
-	kthread_stop(serio_task);
+
+	/*
+	 * There should not be any outstanding events but work may
+	 * still be scheduled so simply cancel it.
+	 */
+	cancel_work_sync(&serio_event_work);
 }
 
 subsys_initcall(serio_init);
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index de5adb1..23317bd 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -103,6 +103,7 @@
 MODULE_LICENSE(DRIVER_LICENSE);
 
 #define USB_VENDOR_ID_WACOM	0x056a
+#define USB_VENDOR_ID_LENOVO	0x17ef
 
 struct wacom {
 	dma_addr_t data_dma;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 435b0af..5187829 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -14,6 +14,7 @@
 
 #include "wacom_wac.h"
 #include "wacom.h"
+#include <linux/input/mt.h>
 
 static int wacom_penpartner_irq(struct wacom_wac *wacom)
 {
@@ -862,19 +863,21 @@
 	struct wacom_features *features = &wacom->features;
 	struct input_dev *input = wacom->input;
 	unsigned char *data = wacom->data;
-	int sp = 0, sx = 0, sy = 0, count = 0;
 	int i;
 
 	for (i = 0; i < 2; i++) {
 		int p = data[9 * i + 2];
+		bool touch = p && !wacom->shared->stylus_in_proximity;
+
 		input_mt_slot(input, i);
+		input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
 		/*
 		 * Touch events need to be disabled while stylus is
 		 * in proximity because user's hand is resting on touchpad
 		 * and sending unwanted events.  User expects tablet buttons
 		 * to continue working though.
 		 */
-		if (p && !wacom->shared->stylus_in_proximity) {
+		if (touch) {
 			int x = get_unaligned_be16(&data[9 * i + 3]) & 0x7ff;
 			int y = get_unaligned_be16(&data[9 * i + 5]) & 0x7ff;
 			if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) {
@@ -884,23 +887,10 @@
 			input_report_abs(input, ABS_MT_PRESSURE, p);
 			input_report_abs(input, ABS_MT_POSITION_X, x);
 			input_report_abs(input, ABS_MT_POSITION_Y, y);
-			if (wacom->id[i] < 0)
-				wacom->id[i] = wacom->trk_id++ & MAX_TRACKING_ID;
-			if (!count++)
-				sp = p, sx = x, sy = y;
-		} else {
-			wacom->id[i] = -1;
 		}
-		input_report_abs(input, ABS_MT_TRACKING_ID, wacom->id[i]);
 	}
 
-	input_report_key(input, BTN_TOUCH, count > 0);
-	input_report_key(input, BTN_TOOL_FINGER, count == 1);
-	input_report_key(input, BTN_TOOL_DOUBLETAP, count == 2);
-
-	input_report_abs(input, ABS_PRESSURE, sp);
-	input_report_abs(input, ABS_X, sx);
-	input_report_abs(input, ABS_Y, sy);
+	input_mt_report_pointer_emulation(input, true);
 
 	input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
 	input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
@@ -1272,7 +1262,7 @@
 			__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
 			__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
 
-			input_mt_create_slots(input_dev, 2);
+			input_mt_init_slots(input_dev, 2);
 			input_set_abs_params(input_dev, ABS_MT_POSITION_X,
 					     0, features->x_max,
 					     features->x_fuzz, 0);
@@ -1282,8 +1272,6 @@
 			input_set_abs_params(input_dev, ABS_MT_PRESSURE,
 					     0, features->pressure_max,
 					     features->pressure_fuzz, 0);
-			input_set_abs_params(input_dev, ABS_MT_TRACKING_ID, 0,
-					     MAX_TRACKING_ID, 0, 0);
 		} else if (features->device_type == BTN_TOOL_PEN) {
 			__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
 			__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1444,11 +1432,17 @@
 	{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN,  14720,  9200, 1023, 63, BAMBOO_PT };
 static struct wacom_features wacom_features_0xDB =
 	{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN,  21648, 13530, 1023, 63, BAMBOO_PT };
+static const struct wacom_features wacom_features_0x6004 =
+	{ "ISD-V4",               WACOM_PKGLEN_GRAPHIRE,  12800, 8000, 255, 0, TABLETPC };
 
 #define USB_DEVICE_WACOM(prod)					\
 	USB_DEVICE(USB_VENDOR_ID_WACOM, prod),			\
 	.driver_info = (kernel_ulong_t)&wacom_features_##prod
 
+#define USB_DEVICE_LENOVO(prod)					\
+	USB_DEVICE(USB_VENDOR_ID_LENOVO, prod),			\
+	.driver_info = (kernel_ulong_t)&wacom_features_##prod
+
 const struct usb_device_id wacom_ids[] = {
 	{ USB_DEVICE_WACOM(0x00) },
 	{ USB_DEVICE_WACOM(0x10) },
@@ -1525,6 +1519,7 @@
 	{ USB_DEVICE_WACOM(0xE2) },
 	{ USB_DEVICE_WACOM(0xE3) },
 	{ USB_DEVICE_WACOM(0x47) },
+	{ USB_DEVICE_LENOVO(0x6004) },
 	{ }
 };
 MODULE_DEVICE_TABLE(usb, wacom_ids);
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 00ca015..b1310ec 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -42,9 +42,6 @@
 #define WACOM_QUIRK_MULTI_INPUT		0x0001
 #define WACOM_QUIRK_BBTOUCH_LOWRES	0x0002
 
-/* largest reported tracking id */
-#define MAX_TRACKING_ID			0xfff
-
 enum {
 	PENPARTNER = 0,
 	GRAPHIRE,
@@ -100,7 +97,6 @@
 	int id[3];
 	__u32 serial[2];
 	int last_finger;
-	int trk_id;
 	struct wacom_features features;
 	struct wacom_shared *shared;
 	struct input_dev *input;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 06ea8da..0c9f4b1 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -610,7 +610,7 @@
 
 config TOUCHSCREEN_USB_ETT_TC45USB
 	default y
-	bool "ET&T USB series TC4UM/TC5UH touchscreen controler support" if EMBEDDED
+	bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EMBEDDED
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_NEXIO
@@ -659,6 +659,28 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called pcap_ts.
 
+config TOUCHSCREEN_ST1232
+	tristate "Sitronix ST1232 touchscreen controllers"
+	depends on I2C
+	help
+	  Say Y here if you want to support Sitronix ST1232
+	  touchscreen controller.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called st1232_ts.
+
+config TOUCHSCREEN_STMPE
+	tristate "STMicroelectronics STMPE touchscreens"
+	depends on MFD_STMPE
+	help
+	  Say Y here if you want support for STMicroelectronics
+	  STMPE touchscreen controllers.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called stmpe-ts.
+
 config TOUCHSCREEN_TPS6507X
 	tristate "TPS6507x based touchscreens"
 	depends on I2C
@@ -671,14 +693,4 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called tps6507x_ts.
 
-config TOUCHSCREEN_STMPE
-	tristate "STMicroelectronics STMPE touchscreens"
-	depends on MFD_STMPE
-	help
-	  Say Y here if you want support for STMicroelectronics
-	  STMPE touchscreen controllers.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called stmpe-ts.
-
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7cc1b4f..718bcc8 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -39,6 +39,7 @@
 obj-$(CONFIG_TOUCHSCREEN_PENMOUNT)	+= penmount.o
 obj-$(CONFIG_TOUCHSCREEN_QT602240)	+= qt602240_ts.o
 obj-$(CONFIG_TOUCHSCREEN_S3C2410)	+= s3c2410_ts.o
+obj-$(CONFIG_TOUCHSCREEN_ST1232)	+= st1232.o
 obj-$(CONFIG_TOUCHSCREEN_STMPE)		+= stmpe-ts.o
 obj-$(CONFIG_TOUCHSCREEN_TNETV107X)	+= tnetv107x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213)	+= touchit213.o
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index d82a38e..4e4e58c 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -10,14 +10,16 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/pm.h>
 
 #include "ad7879.h"
 
 #define AD7879_DEVID		0x79	/* AD7879-1/AD7889-1 */
 
 #ifdef CONFIG_PM
-static int ad7879_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int ad7879_i2c_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ad7879 *ts = i2c_get_clientdata(client);
 
 	ad7879_suspend(ts);
@@ -25,17 +27,17 @@
 	return 0;
 }
 
-static int ad7879_i2c_resume(struct i2c_client *client)
+static int ad7879_i2c_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ad7879 *ts = i2c_get_clientdata(client);
 
 	ad7879_resume(ts);
 
 	return 0;
 }
-#else
-# define ad7879_i2c_suspend NULL
-# define ad7879_i2c_resume  NULL
+
+static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
 #endif
 
 /* All registers are word-sized.
@@ -117,11 +119,12 @@
 	.driver = {
 		.name	= "ad7879",
 		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &ad7879_i2c_pm,
+#endif
 	},
 	.probe		= ad7879_i2c_probe,
 	.remove		= __devexit_p(ad7879_i2c_remove),
-	.suspend	= ad7879_i2c_suspend,
-	.resume		= ad7879_i2c_resume,
 	.id_table	= ad7879_id,
 };
 
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 2ca9e5d..f7fa9ef 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -365,7 +365,7 @@
 	}
 
 	retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_OFF_REG,
-				BU21013_TH_OFF_4 || BU21013_TH_OFF_3);
+				BU21013_TH_OFF_4 | BU21013_TH_OFF_3);
 	if (retval < 0) {
 		dev_err(&i2c->dev, "BU21013_TH_OFF reg write failed\n");
 		return retval;
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index d0c3a72..a93c5c2 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -280,8 +280,9 @@
 }
 
 #ifdef CONFIG_PM
-static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg)
+static int cy8ctmg110_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct cy8ctmg110 *ts = i2c_get_clientdata(client);
 
 	if (device_may_wakeup(&client->dev))
@@ -293,8 +294,9 @@
 	return 0;
 }
 
-static int cy8ctmg110_resume(struct i2c_client *client)
+static int cy8ctmg110_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct cy8ctmg110 *ts = i2c_get_clientdata(client);
 
 	if (device_may_wakeup(&client->dev))
@@ -305,6 +307,8 @@
 	}
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
 #endif
 
 static int __devexit cy8ctmg110_remove(struct i2c_client *client)
@@ -335,14 +339,13 @@
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= CY8CTMG110_DRIVER_NAME,
+#ifdef CONFIG_PM
+		.pm	= &cy8ctmg110_pm,
+#endif
 	},
 	.id_table	= cy8ctmg110_idtable,
 	.probe		= cy8ctmg110_probe,
 	.remove		= __devexit_p(cy8ctmg110_remove),
-#ifdef CONFIG_PM
-	.suspend	= cy8ctmg110_suspend,
-	.resume		= cy8ctmg110_resume,
-#endif
 };
 
 static int __init cy8ctmg110_init(void)
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 7a3a916..7f8f538 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -261,8 +261,9 @@
 }
 
 #ifdef CONFIG_PM
-static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int eeti_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct eeti_ts_priv *priv = i2c_get_clientdata(client);
 	struct input_dev *input_dev = priv->input;
 
@@ -279,8 +280,9 @@
 	return 0;
 }
 
-static int eeti_ts_resume(struct i2c_client *client)
+static int eeti_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct eeti_ts_priv *priv = i2c_get_clientdata(client);
 	struct input_dev *input_dev = priv->input;
 
@@ -296,9 +298,8 @@
 
 	return 0;
 }
-#else
-#define eeti_ts_suspend NULL
-#define eeti_ts_resume NULL
+
+static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume);
 #endif
 
 static const struct i2c_device_id eeti_ts_id[] = {
@@ -310,11 +311,12 @@
 static struct i2c_driver eeti_ts_driver = {
 	.driver = {
 		.name = "eeti_ts",
+#ifdef CONFIG_PM
+		.pm = &eeti_ts_pm,
+#endif
 	},
 	.probe = eeti_ts_probe,
 	.remove = __devexit_p(eeti_ts_remove),
-	.suspend = eeti_ts_suspend,
-	.resume = eeti_ts_resume,
 	.id_table = eeti_ts_id,
 };
 
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index 6ee9940..2d84c80 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -261,25 +261,27 @@
 }
 
 #ifdef CONFIG_PM
-static int mcs5000_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int mcs5000_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
+
 	/* Touch sleep mode */
 	i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP);
 
 	return 0;
 }
 
-static int mcs5000_ts_resume(struct i2c_client *client)
+static int mcs5000_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct mcs5000_ts_data *data = i2c_get_clientdata(client);
 
 	mcs5000_ts_phys_init(data);
 
 	return 0;
 }
-#else
-#define mcs5000_ts_suspend	NULL
-#define mcs5000_ts_resume	NULL
+
+static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume);
 #endif
 
 static const struct i2c_device_id mcs5000_ts_id[] = {
@@ -291,10 +293,11 @@
 static struct i2c_driver mcs5000_ts_driver = {
 	.probe		= mcs5000_ts_probe,
 	.remove		= __devexit_p(mcs5000_ts_remove),
-	.suspend	= mcs5000_ts_suspend,
-	.resume		= mcs5000_ts_resume,
 	.driver = {
 		.name = "mcs5000_ts",
+#ifdef CONFIG_PM
+		.pm   = &mcs5000_ts_pm,
+#endif
 	},
 	.id_table	= mcs5000_ts_id,
 };
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index defe5dd..5803bd0 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <linux/slab.h>
 #include <asm/io.h>
 #include <linux/i2c.h>
@@ -226,8 +227,9 @@
 	return 0;
 }
 
-static int migor_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int migor_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
 
 	if (device_may_wakeup(&client->dev))
@@ -236,8 +238,9 @@
 	return 0;
 }
 
-static int migor_ts_resume(struct i2c_client *client)
+static int migor_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
 
 	if (device_may_wakeup(&client->dev))
@@ -246,6 +249,8 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(migor_ts_pm, migor_ts_suspend, migor_ts_resume);
+
 static const struct i2c_device_id migor_ts_id[] = {
 	{ "migor_ts", 0 },
 	{ }
@@ -255,11 +260,10 @@
 static struct i2c_driver migor_ts_driver = {
 	.driver = {
 		.name = "migor_ts",
+		.pm = &migor_ts_pm,
 	},
 	.probe = migor_ts_probe,
 	.remove = migor_ts_remove,
-	.suspend = migor_ts_suspend,
-	.resume = migor_ts_resume,
 	.id_table = migor_ts_id,
 };
 
diff --git a/drivers/input/touchscreen/qt602240_ts.c b/drivers/input/touchscreen/qt602240_ts.c
index 66b26ad..4dcb0e8 100644
--- a/drivers/input/touchscreen/qt602240_ts.c
+++ b/drivers/input/touchscreen/qt602240_ts.c
@@ -969,7 +969,7 @@
 		return error;
 
 	data->object_table = kcalloc(info->object_num,
-				     sizeof(struct qt602240_data),
+				     sizeof(struct qt602240_object),
 				     GFP_KERNEL);
 	if (!data->object_table) {
 		dev_err(&client->dev, "Failed to allocate memory\n");
@@ -1324,8 +1324,9 @@
 }
 
 #ifdef CONFIG_PM
-static int qt602240_suspend(struct i2c_client *client, pm_message_t mesg)
+static int qt602240_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct qt602240_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
@@ -1339,8 +1340,9 @@
 	return 0;
 }
 
-static int qt602240_resume(struct i2c_client *client)
+static int qt602240_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct qt602240_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
@@ -1359,9 +1361,11 @@
 
 	return 0;
 }
-#else
-#define qt602240_suspend	NULL
-#define qt602240_resume		NULL
+
+static const struct dev_pm_ops qt602240_pm_ops = {
+	.suspend	= qt602240_suspend,
+	.resume		= qt602240_resume,
+};
 #endif
 
 static const struct i2c_device_id qt602240_id[] = {
@@ -1374,11 +1378,12 @@
 	.driver = {
 		.name	= "qt602240_ts",
 		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &qt602240_pm_ops,
+#endif
 	},
 	.probe		= qt602240_probe,
 	.remove		= __devexit_p(qt602240_remove),
-	.suspend	= qt602240_suspend,
-	.resume		= qt602240_resume,
 	.id_table	= qt602240_id,
 };
 
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
new file mode 100644
index 0000000..4ab3713
--- /dev/null
+++ b/drivers/input/touchscreen/st1232.c
@@ -0,0 +1,274 @@
+/*
+ * ST1232 Touchscreen Controller Driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ *	Tony SIM <chinyeow.sim.xt@renesas.com>
+ *
+ * Using code from:
+ *  - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c
+ *	Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define ST1232_TS_NAME	"st1232-ts"
+
+#define MIN_X		0x00
+#define MIN_Y		0x00
+#define MAX_X		0x31f	/* (800 - 1) */
+#define MAX_Y		0x1df	/* (480 - 1) */
+#define MAX_AREA	0xff
+#define MAX_FINGERS	2
+
+struct st1232_ts_finger {
+	u16 x;
+	u16 y;
+	u8 t;
+	bool is_valid;
+};
+
+struct st1232_ts_data {
+	struct i2c_client *client;
+	struct input_dev *input_dev;
+	struct st1232_ts_finger finger[MAX_FINGERS];
+};
+
+static int st1232_ts_read_data(struct st1232_ts_data *ts)
+{
+	struct st1232_ts_finger *finger = ts->finger;
+	struct i2c_client *client = ts->client;
+	struct i2c_msg msg[2];
+	int error;
+	u8 start_reg;
+	u8 buf[10];
+
+	/* read touchscreen data from ST1232 */
+	msg[0].addr = client->addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &start_reg;
+	start_reg = 0x10;
+
+	msg[1].addr = ts->client->addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = sizeof(buf);
+	msg[1].buf = buf;
+
+	error = i2c_transfer(client->adapter, msg, 2);
+	if (error < 0)
+		return error;
+
+	/* get "valid" bits */
+	finger[0].is_valid = buf[2] >> 7;
+	finger[1].is_valid = buf[5] >> 7;
+
+	/* get xy coordinate */
+	if (finger[0].is_valid) {
+		finger[0].x = ((buf[2] & 0x0070) << 4) | buf[3];
+		finger[0].y = ((buf[2] & 0x0007) << 8) | buf[4];
+		finger[0].t = buf[8];
+	}
+
+	if (finger[1].is_valid) {
+		finger[1].x = ((buf[5] & 0x0070) << 4) | buf[6];
+		finger[1].y = ((buf[5] & 0x0007) << 8) | buf[7];
+		finger[1].t = buf[9];
+	}
+
+	return 0;
+}
+
+static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
+{
+	struct st1232_ts_data *ts = dev_id;
+	struct st1232_ts_finger *finger = ts->finger;
+	struct input_dev *input_dev = ts->input_dev;
+	int count = 0;
+	int i, ret;
+
+	ret = st1232_ts_read_data(ts);
+	if (ret < 0)
+		goto end;
+
+	/* multi touch protocol */
+	for (i = 0; i < MAX_FINGERS; i++) {
+		if (!finger[i].is_valid)
+			continue;
+
+		input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, finger[i].t);
+		input_report_abs(input_dev, ABS_MT_POSITION_X, finger[i].x);
+		input_report_abs(input_dev, ABS_MT_POSITION_Y, finger[i].y);
+		input_mt_sync(input_dev);
+		count++;
+	}
+
+	/* SYN_MT_REPORT only if no contact */
+	if (!count)
+		input_mt_sync(input_dev);
+
+	/* SYN_REPORT */
+	input_sync(input_dev);
+
+end:
+	return IRQ_HANDLED;
+}
+
+static int __devinit st1232_ts_probe(struct i2c_client *client,
+					const struct i2c_device_id *id)
+{
+	struct st1232_ts_data *ts;
+	struct input_dev *input_dev;
+	int error;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "need I2C_FUNC_I2C\n");
+		return -EIO;
+	}
+
+	if (!client->irq) {
+		dev_err(&client->dev, "no IRQ?\n");
+		return -EINVAL;
+	}
+
+
+	ts = kzalloc(sizeof(struct st1232_ts_data), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!ts || !input_dev) {
+		error = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	ts->client = client;
+	ts->input_dev = input_dev;
+
+	input_dev->name = "st1232-touchscreen";
+	input_dev->id.bustype = BUS_I2C;
+	input_dev->dev.parent = &client->dev;
+
+	__set_bit(EV_SYN, input_dev->evbit);
+	__set_bit(EV_KEY, input_dev->evbit);
+	__set_bit(EV_ABS, input_dev->evbit);
+
+	input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, MAX_AREA, 0, 0);
+	input_set_abs_params(input_dev, ABS_MT_POSITION_X, MIN_X, MAX_X, 0, 0);
+	input_set_abs_params(input_dev, ABS_MT_POSITION_Y, MIN_Y, MAX_Y, 0, 0);
+
+	error = request_threaded_irq(client->irq, NULL, st1232_ts_irq_handler,
+				     IRQF_ONESHOT, client->name, ts);
+	if (error) {
+		dev_err(&client->dev, "Failed to register interrupt\n");
+		goto err_free_mem;
+	}
+
+	error = input_register_device(ts->input_dev);
+	if (error) {
+		dev_err(&client->dev, "Unable to register %s input device\n",
+			input_dev->name);
+		goto err_free_irq;
+	}
+
+	i2c_set_clientdata(client, ts);
+	device_init_wakeup(&client->dev, 1);
+
+	return 0;
+
+err_free_irq:
+	free_irq(client->irq, ts);
+err_free_mem:
+	input_free_device(input_dev);
+	kfree(ts);
+	return error;
+}
+
+static int __devexit st1232_ts_remove(struct i2c_client *client)
+{
+	struct st1232_ts_data *ts = i2c_get_clientdata(client);
+
+	device_init_wakeup(&client->dev, 0);
+	free_irq(client->irq, ts);
+	input_unregister_device(ts->input_dev);
+	kfree(ts);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int st1232_ts_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+
+	if (device_may_wakeup(&client->dev))
+		enable_irq_wake(client->irq);
+	else
+		disable_irq(client->irq);
+
+	return 0;
+}
+
+static int st1232_ts_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+
+	if (device_may_wakeup(&client->dev))
+		disable_irq_wake(client->irq);
+	else
+		enable_irq(client->irq);
+
+	return 0;
+}
+
+static const struct dev_pm_ops st1232_ts_pm_ops = {
+	.suspend	= st1232_ts_suspend,
+	.resume		= st1232_ts_resume,
+};
+#endif
+
+static const struct i2c_device_id st1232_ts_id[] = {
+	{ ST1232_TS_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
+
+static struct i2c_driver st1232_ts_driver = {
+	.probe		= st1232_ts_probe,
+	.remove		= __devexit_p(st1232_ts_remove),
+	.id_table	= st1232_ts_id,
+	.driver = {
+		.name	= ST1232_TS_NAME,
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &st1232_ts_pm_ops,
+#endif
+	},
+};
+
+static int __init st1232_ts_init(void)
+{
+	return i2c_add_driver(&st1232_ts_driver);
+}
+module_init(st1232_ts_init);
+
+static void __exit st1232_ts_exit(void)
+{
+	i2c_del_driver(&st1232_ts_driver);
+}
+module_exit(st1232_ts_exit);
+
+MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
+MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 9ae4c7b..5cb8449 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -3,6 +3,7 @@
  *
  * Copyright (c) 2008 Jaya Kumar
  * Copyright (c) 2010 Red Hat, Inc.
+ * Copyright (c) 2010 - 2011 Ping Cheng, Wacom. <pingc@wacom.com>
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License. See the file COPYING in the main directory of this archive for
@@ -15,10 +16,11 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/input.h>
+#include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/init.h>
 #include <linux/ctype.h>
+#include <linux/delay.h>
 
 #define DRIVER_DESC	"Wacom W8001 serial touchscreen driver"
 
@@ -37,6 +39,7 @@
 
 #define W8001_QUERY_PACKET	0x20
 
+#define W8001_CMD_STOP		'0'
 #define W8001_CMD_START		'1'
 #define W8001_CMD_QUERY		'*'
 #define W8001_CMD_TOUCHQUERY	'%'
@@ -48,8 +51,6 @@
 #define W8001_PKTLEN_TPCCTL	11	/* control packet */
 #define W8001_PKTLEN_TOUCH2FG	13
 
-#define MAX_TRACKING_ID		0xFF	/* arbitrarily chosen */
-
 struct w8001_coord {
 	u8 rdy;
 	u8 tsw;
@@ -64,11 +65,11 @@
 
 /* touch query reply packet */
 struct w8001_touch_query {
+	u16 x;
+	u16 y;
 	u8 panel_res;
 	u8 capacity_res;
 	u8 sensor_id;
-	u16 x;
-	u16 y;
 };
 
 /*
@@ -87,10 +88,14 @@
 	char phys[32];
 	int type;
 	unsigned int pktlen;
-	int trkid[2];
+	u16 max_touch_x;
+	u16 max_touch_y;
+	u16 max_pen_x;
+	u16 max_pen_y;
+	char name[64];
 };
 
-static void parse_data(u8 *data, struct w8001_coord *coord)
+static void parse_pen_data(u8 *data, struct w8001_coord *coord)
 {
 	memset(coord, 0, sizeof(*coord));
 
@@ -114,30 +119,58 @@
 	coord->tilt_y = data[8] & 0x7F;
 }
 
-static void parse_touch(struct w8001 *w8001)
+static void parse_single_touch(u8 *data, struct w8001_coord *coord)
 {
-	static int trkid;
+	coord->x = (data[1] << 7) | data[2];
+	coord->y = (data[3] << 7) | data[4];
+	coord->tsw = data[0] & 0x01;
+}
+
+static void scale_touch_coordinates(struct w8001 *w8001,
+				    unsigned int *x, unsigned int *y)
+{
+	if (w8001->max_pen_x && w8001->max_touch_x)
+		*x = *x * w8001->max_pen_x / w8001->max_touch_x;
+
+	if (w8001->max_pen_y && w8001->max_touch_y)
+		*y = *y * w8001->max_pen_y / w8001->max_touch_y;
+}
+
+static void parse_multi_touch(struct w8001 *w8001)
+{
 	struct input_dev *dev = w8001->dev;
 	unsigned char *data = w8001->data;
+	unsigned int x, y;
 	int i;
+	int count = 0;
 
 	for (i = 0; i < 2; i++) {
-		input_mt_slot(dev, i);
+		bool touch = data[0] & (1 << i);
 
-		if (data[0] & (1 << i)) {
-			int x = (data[6 * i + 1] << 7) | (data[6 * i + 2]);
-			int y = (data[6 * i + 3] << 7) | (data[6 * i + 4]);
+		input_mt_slot(dev, i);
+		input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
+		if (touch) {
+			x = (data[6 * i + 1] << 7) | data[6 * i + 2];
+			y = (data[6 * i + 3] << 7) | data[6 * i + 4];
 			/* data[5,6] and [11,12] is finger capacity */
 
+			/* scale to pen maximum */
+			scale_touch_coordinates(w8001, &x, &y);
+
 			input_report_abs(dev, ABS_MT_POSITION_X, x);
 			input_report_abs(dev, ABS_MT_POSITION_Y, y);
-			input_report_abs(dev, ABS_MT_TOOL_TYPE, MT_TOOL_FINGER);
-			if (w8001->trkid[i] < 0)
-				w8001->trkid[i] = trkid++ & MAX_TRACKING_ID;
-		} else {
-			w8001->trkid[i] = -1;
+			count++;
 		}
-		input_report_abs(dev, ABS_MT_TRACKING_ID, w8001->trkid[i]);
+	}
+
+	/* emulate single touch events when stylus is out of proximity.
+	 * This is to make single touch backward support consistent
+	 * across all Wacom single touch devices.
+	 */
+	if (w8001->type != BTN_TOOL_PEN &&
+			    w8001->type != BTN_TOOL_RUBBER) {
+		w8001->type = count == 1 ? BTN_TOOL_FINGER : KEY_RESERVED;
+		input_mt_report_pointer_emulation(dev, true);
 	}
 
 	input_sync(dev);
@@ -158,6 +191,15 @@
 	query->y = data[5] << 9;
 	query->y |= data[6] << 2;
 	query->y |= (data[2] >> 3) & 0x3;
+
+	/* Early days' single-finger touch models need the following defaults */
+	if (!query->x && !query->y) {
+		query->x = 1024;
+		query->y = 1024;
+		if (query->panel_res)
+			query->x = query->y = (1 << query->panel_res);
+		query->panel_res = 10;
+	}
 }
 
 static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
@@ -167,16 +209,15 @@
 	/*
 	 * We have 1 bit for proximity (rdy) and 3 bits for tip, side,
 	 * side2/eraser. If rdy && f2 are set, this can be either pen + side2,
-	 * or eraser. assume
+	 * or eraser. Assume:
 	 * - if dev is already in proximity and f2 is toggled → pen + side2
 	 * - if dev comes into proximity with f2 set → eraser
 	 * If f2 disappears after assuming eraser, fake proximity out for
 	 * eraser and in for pen.
 	 */
 
-	if (!w8001->type) {
-		w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
-	} else if (w8001->type == BTN_TOOL_RUBBER) {
+	switch (w8001->type) {
+	case BTN_TOOL_RUBBER:
 		if (!coord->f2) {
 			input_report_abs(dev, ABS_PRESSURE, 0);
 			input_report_key(dev, BTN_TOUCH, 0);
@@ -186,8 +227,21 @@
 			input_sync(dev);
 			w8001->type = BTN_TOOL_PEN;
 		}
-	} else {
+		break;
+
+	case BTN_TOOL_FINGER:
+		input_report_key(dev, BTN_TOUCH, 0);
+		input_report_key(dev, BTN_TOOL_FINGER, 0);
+		input_sync(dev);
+		/* fall through */
+
+	case KEY_RESERVED:
+		w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+		break;
+
+	default:
 		input_report_key(dev, BTN_STYLUS2, coord->f2);
+		break;
 	}
 
 	input_report_abs(dev, ABS_X, coord->x);
@@ -199,7 +253,26 @@
 	input_sync(dev);
 
 	if (!coord->rdy)
-		w8001->type = 0;
+		w8001->type = KEY_RESERVED;
+}
+
+static void report_single_touch(struct w8001 *w8001, struct w8001_coord *coord)
+{
+	struct input_dev *dev = w8001->dev;
+	unsigned int x = coord->x;
+	unsigned int y = coord->y;
+
+	/* scale to pen maximum */
+	scale_touch_coordinates(w8001, &x, &y);
+
+	input_report_abs(dev, ABS_X, x);
+	input_report_abs(dev, ABS_Y, y);
+	input_report_key(dev, BTN_TOUCH, coord->tsw);
+	input_report_key(dev, BTN_TOOL_FINGER, coord->tsw);
+
+	input_sync(dev);
+
+	w8001->type = coord->tsw ? BTN_TOOL_FINGER : KEY_RESERVED;
 }
 
 static irqreturn_t w8001_interrupt(struct serio *serio,
@@ -220,9 +293,18 @@
 
 	case W8001_PKTLEN_TOUCH93 - 1:
 	case W8001_PKTLEN_TOUCH9A - 1:
-		/* ignore one-finger touch packet. */
-		if (w8001->pktlen == w8001->idx)
+		tmp = w8001->data[0] & W8001_TOUCH_BYTE;
+		if (tmp != W8001_TOUCH_BYTE)
+			break;
+
+		if (w8001->pktlen == w8001->idx) {
 			w8001->idx = 0;
+			if (w8001->type != BTN_TOOL_PEN &&
+			    w8001->type != BTN_TOOL_RUBBER) {
+				parse_single_touch(w8001->data, &coord);
+				report_single_touch(w8001, &coord);
+			}
+		}
 		break;
 
 	/* Pen coordinates packet */
@@ -231,18 +313,18 @@
 		if (unlikely(tmp == W8001_TAB_BYTE))
 			break;
 
-		tmp = (w8001->data[0] & W8001_TOUCH_BYTE);
+		tmp = w8001->data[0] & W8001_TOUCH_BYTE;
 		if (tmp == W8001_TOUCH_BYTE)
 			break;
 
 		w8001->idx = 0;
-		parse_data(w8001->data, &coord);
+		parse_pen_data(w8001->data, &coord);
 		report_pen_events(w8001, &coord);
 		break;
 
 	/* control packet */
 	case W8001_PKTLEN_TPCCTL - 1:
-		tmp = (w8001->data[0] & W8001_TOUCH_MASK);
+		tmp = w8001->data[0] & W8001_TOUCH_MASK;
 		if (tmp == W8001_TOUCH_BYTE)
 			break;
 
@@ -255,7 +337,7 @@
 	/* 2 finger touch packet */
 	case W8001_PKTLEN_TOUCH2FG - 1:
 		w8001->idx = 0;
-		parse_touch(w8001);
+		parse_multi_touch(w8001);
 		break;
 	}
 
@@ -285,52 +367,104 @@
 {
 	struct input_dev *dev = w8001->dev;
 	struct w8001_coord coord;
+	struct w8001_touch_query touch;
 	int error;
 
-	error = w8001_command(w8001, W8001_CMD_QUERY, true);
+	error = w8001_command(w8001, W8001_CMD_STOP, false);
 	if (error)
 		return error;
 
-	parse_data(w8001->response, &coord);
+	msleep(250);	/* wait 250ms before querying the device */
 
-	input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
-	input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
-	input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
-	input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
-	input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0);
+	dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+	strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
 
-	error = w8001_command(w8001, W8001_CMD_TOUCHQUERY, true);
+	/* penabled? */
+	error = w8001_command(w8001, W8001_CMD_QUERY, true);
 	if (!error) {
-		struct w8001_touch_query touch;
+		__set_bit(BTN_TOUCH, dev->keybit);
+		__set_bit(BTN_TOOL_PEN, dev->keybit);
+		__set_bit(BTN_TOOL_RUBBER, dev->keybit);
+		__set_bit(BTN_STYLUS, dev->keybit);
+		__set_bit(BTN_STYLUS2, dev->keybit);
+
+		parse_pen_data(w8001->response, &coord);
+		w8001->max_pen_x = coord.x;
+		w8001->max_pen_y = coord.y;
+
+		input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
+		input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
+		input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
+		if (coord.tilt_x && coord.tilt_y) {
+			input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
+			input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0);
+		}
+		w8001->id = 0x90;
+		strlcat(w8001->name, " Penabled", sizeof(w8001->name));
+	}
+
+	/* Touch enabled? */
+	error = w8001_command(w8001, W8001_CMD_TOUCHQUERY, true);
+
+	/*
+	 * Some non-touch devices may reply to the touch query. But their
+	 * second byte is empty, which indicates touch is not supported.
+	 */
+	if (!error && w8001->response[1]) {
+		__set_bit(BTN_TOUCH, dev->keybit);
+		__set_bit(BTN_TOOL_FINGER, dev->keybit);
 
 		parse_touchquery(w8001->response, &touch);
+		w8001->max_touch_x = touch.x;
+		w8001->max_touch_y = touch.y;
+
+		/* scale to pen maximum */
+		if (w8001->max_pen_x && w8001->max_pen_y) {
+			touch.x = w8001->max_pen_x;
+			touch.y = w8001->max_pen_y;
+		}
+
+		input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
+		input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
 
 		switch (touch.sensor_id) {
 		case 0:
 		case 2:
 			w8001->pktlen = W8001_PKTLEN_TOUCH93;
+			w8001->id = 0x93;
+			strlcat(w8001->name, " 1FG", sizeof(w8001->name));
 			break;
+
 		case 1:
 		case 3:
 		case 4:
 			w8001->pktlen = W8001_PKTLEN_TOUCH9A;
+			strlcat(w8001->name, " 1FG", sizeof(w8001->name));
+			w8001->id = 0x9a;
 			break;
+
 		case 5:
 			w8001->pktlen = W8001_PKTLEN_TOUCH2FG;
 
-			input_mt_create_slots(dev, 2);
-			input_set_abs_params(dev, ABS_MT_TRACKING_ID,
-						0, MAX_TRACKING_ID, 0, 0);
+			input_mt_init_slots(dev, 2);
 			input_set_abs_params(dev, ABS_MT_POSITION_X,
 						0, touch.x, 0, 0);
 			input_set_abs_params(dev, ABS_MT_POSITION_Y,
 						0, touch.y, 0, 0);
 			input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
-						0, 0, 0, 0);
+						0, MT_TOOL_MAX, 0, 0);
+
+			strlcat(w8001->name, " 2FG", sizeof(w8001->name));
+			if (w8001->max_pen_x && w8001->max_pen_y)
+				w8001->id = 0xE3;
+			else
+				w8001->id = 0xE2;
 			break;
 		}
 	}
 
+	strlcat(w8001->name, " Touchscreen", sizeof(w8001->name));
+
 	return w8001_command(w8001, W8001_CMD_START, false);
 }
 
@@ -370,27 +504,10 @@
 	}
 
 	w8001->serio = serio;
-	w8001->id = serio->id.id;
 	w8001->dev = input_dev;
-	w8001->trkid[0] = w8001->trkid[1] = -1;
 	init_completion(&w8001->cmd_done);
 	snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys);
 
-	input_dev->name = "Wacom W8001 Penabled Serial TouchScreen";
-	input_dev->phys = w8001->phys;
-	input_dev->id.bustype = BUS_RS232;
-	input_dev->id.vendor = SERIO_W8001;
-	input_dev->id.product = w8001->id;
-	input_dev->id.version = 0x0100;
-	input_dev->dev.parent = &serio->dev;
-
-	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-	input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-	input_dev->keybit[BIT_WORD(BTN_TOOL_PEN)] |= BIT_MASK(BTN_TOOL_PEN);
-	input_dev->keybit[BIT_WORD(BTN_TOOL_RUBBER)] |= BIT_MASK(BTN_TOOL_RUBBER);
-	input_dev->keybit[BIT_WORD(BTN_STYLUS)] |= BIT_MASK(BTN_STYLUS);
-	input_dev->keybit[BIT_WORD(BTN_STYLUS2)] |= BIT_MASK(BTN_STYLUS2);
-
 	serio_set_drvdata(serio, w8001);
 	err = serio_open(serio, drv);
 	if (err)
@@ -400,6 +517,14 @@
 	if (err)
 		goto fail3;
 
+	input_dev->name = w8001->name;
+	input_dev->phys = w8001->phys;
+	input_dev->id.product = w8001->id;
+	input_dev->id.bustype = BUS_RS232;
+	input_dev->id.vendor = 0x056a;
+	input_dev->id.version = 0x0100;
+	input_dev->dev.parent = &serio->dev;
+
 	err = input_register_device(w8001->dev);
 	if (err)
 		goto fail3;
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index e0c024d..7f85a86 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -17,6 +17,8 @@
  * Switch to grant tables together with xen-fbfront.c.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/module.h>
@@ -84,9 +86,8 @@
 				input_report_key(dev, event->key.keycode,
 						 event->key.pressed);
 			else
-				printk(KERN_WARNING
-				       "xenkbd: unhandled keycode 0x%x\n",
-				       event->key.keycode);
+				pr_warning("unhandled keycode 0x%x\n",
+					   event->key.keycode);
 			break;
 		case XENKBD_TYPE_POS:
 			input_report_abs(dev, ABS_X, event->pos.abs_x);
@@ -292,8 +293,7 @@
 			ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
 					    "request-abs-pointer", "1");
 			if (ret)
-				printk(KERN_WARNING
-				       "xenkbd: can't request abs-pointer");
+				pr_warning("can't request abs-pointer\n");
 		}
 		xenbus_switch_state(dev, XenbusStateConnected);
 		break;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index e54e79d..92607ed 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2297,6 +2297,7 @@
 
 	errcode = capi20_get_profile(0, &profile);
 	if (errcode != CAPI_NOERROR) {
+		unregister_capictr_notifier(&capictr_nb);
 		capi20_release(&global.ap);
 		return -EIO;
 	}
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 3acf94c..2b33b26 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -38,6 +38,7 @@
 #include <linux/rcupdate.h>
 
 static int showcapimsgs = 0;
+static struct workqueue_struct *kcapi_wq;
 
 MODULE_DESCRIPTION("CAPI4Linux: kernel CAPI layer");
 MODULE_AUTHOR("Carsten Paeth");
@@ -291,7 +292,7 @@
 	event->type = event_type;
 	event->controller = controller;
 
-	schedule_work(&event->work);
+	queue_work(kcapi_wq, &event->work);
 	return 0;
 }
 
@@ -408,7 +409,7 @@
 		goto error;
 	}
 	skb_queue_tail(&ap->recv_queue, skb);
-	schedule_work(&ap->recv_work);
+	queue_work(kcapi_wq, &ap->recv_work);
 	rcu_read_unlock();
 
 	return;
@@ -743,7 +744,7 @@
 
 	mutex_unlock(&capi_controller_lock);
 
-	flush_scheduled_work();
+	flush_workqueue(kcapi_wq);
 	skb_queue_purge(&ap->recv_queue);
 
 	if (showcapimsgs & 1) {
@@ -1285,21 +1286,30 @@
 {
 	int err;
 
+	kcapi_wq = alloc_workqueue("kcapi", 0, 0);
+	if (!kcapi_wq)
+		return -ENOMEM;
+
 	register_capictr_notifier(&capictr_nb);
 
 	err = cdebug_init();
-	if (!err)
-		kcapi_proc_init();
-	return err;
+	if (err) {
+		unregister_capictr_notifier(&capictr_nb);
+		destroy_workqueue(kcapi_wq);
+		return err;
+	}
+
+	kcapi_proc_init();
+	return 0;
 }
 
 static void __exit kcapi_exit(void)
 {
         kcapi_proc_exit();
 
-	/* make sure all notifiers are finished */
-	flush_scheduled_work();
+	unregister_capictr_notifier(&capictr_nb);
 	cdebug_exit();
+	destroy_workqueue(kcapi_wq);
 }
 
 module_init(kcapi_init);
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 178942a..8a3c5cf 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2318,7 +2318,7 @@
 		 __func__, le16_to_cpu(udev->descriptor.idVendor),
 		 le16_to_cpu(udev->descriptor.idProduct));
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
 			    GIGASET_MODULENAME);
 	if (!cs)
@@ -2576,7 +2576,7 @@
 {
 	int result;
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
 				    &gigops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index d151dcb..0ef09d0 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -513,7 +513,7 @@
 		return -ENODEV;
 	}
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
 	if (!cs)
 		goto error;
@@ -771,7 +771,7 @@
 		return rc;
 	}
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 					  GIGASET_MODULENAME, GIGASET_DEVNAME,
 					  &ops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4a66338..5e3300d 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -695,7 +695,7 @@
 
 	dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
 	if (!cs)
 		return -ENODEV;
@@ -894,7 +894,7 @@
 {
 	int result;
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
 				    &ops, THIS_MODULE);
diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h
index 74a6ccf..8121e04 100644
--- a/drivers/isdn/hardware/mISDN/ipac.h
+++ b/drivers/isdn/hardware/mISDN/ipac.h
@@ -29,7 +29,7 @@
 	u32			type;
 	u32			off;		/* offset to isac regs */
 	char			*name;
-	spinlock_t		*hwlock;	/* lock HW acccess */
+	spinlock_t		*hwlock;	/* lock HW access */
 	read_reg_func		*read_reg;
 	write_reg_func		*write_reg;
 	fifo_func		*read_fifo;
@@ -70,7 +70,7 @@
 	struct hscx_hw		hscx[2];
 	char			*name;
 	void			*hw;
-	spinlock_t		*hwlock;	/* lock HW acccess */
+	spinlock_t		*hwlock;	/* lock HW access */
 	struct module		*owner;
 	u32			type;
 	read_reg_func		*read_reg;
diff --git a/drivers/isdn/hardware/mISDN/isar.h b/drivers/isdn/hardware/mISDN/isar.h
index 4a134ac..9962bdf 100644
--- a/drivers/isdn/hardware/mISDN/isar.h
+++ b/drivers/isdn/hardware/mISDN/isar.h
@@ -44,7 +44,7 @@
 struct isar_hw {
 	struct	isar_ch	ch[2];
 	void		*hw;
-	spinlock_t	*hwlock;	/* lock HW acccess */
+	spinlock_t	*hwlock;	/* lock HW access */
 	char		*name;
 	struct module	*owner;
 	read_reg_func	*read_reg;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 76d9e67..309bacf 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -112,7 +112,7 @@
  * Disable rx-data:
  * If cmx is realized in hardware, rx data will be disabled if requested by
  * the upper layer. If dtmf decoding is done by software and enabled, rx data
- * will not be diabled but blocked to the upper layer.
+ * will not be disabled but blocked to the upper layer.
  *
  * HFC conference engine:
  * If it is possible to realize all features using hardware, hardware will be
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index 307bd6e..199f374 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -110,7 +110,7 @@
 	}
 	skb_queue_purge(&ch->squeue);
 	skb_queue_purge(&ch->rqueue);
-	flush_scheduled_work();
+	flush_work_sync(&ch->workq);
 	return 0;
 }
 EXPORT_SYMBOL(mISDN_freedchannel);
@@ -143,7 +143,7 @@
 	mISDN_clear_bchannel(ch);
 	skb_queue_purge(&ch->rqueue);
 	ch->rcount = 0;
-	flush_scheduled_work();
+	flush_work_sync(&ch->workq);
 	return 0;
 }
 EXPORT_SYMBOL(mISDN_freebchannel);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 5b59796..bd526f6 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1269,6 +1269,8 @@
 	if (timer_pending(&hc->timeout_tl))
 		del_timer(&hc->timeout_tl);
 
+	cancel_work_sync(&hc->workq);
+
 	if (hc->socket_thread)
 		l1oip_socket_close(hc);
 
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 33facd0..80a3ae3 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -98,7 +98,6 @@
 #define LP5521_EXT_CLK_USED		0x08
 
 struct lp5521_engine {
-	const struct attribute_group *attributes;
 	int		id;
 	u8		mode;
 	u8		prog_page;
@@ -225,25 +224,22 @@
 		    curr);
 }
 
-static void lp5521_init_engine(struct lp5521_chip *chip,
-			const struct attribute_group *attr_group)
+static void lp5521_init_engine(struct lp5521_chip *chip)
 {
 	int i;
 	for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
 		chip->engines[i].id = i + 1;
 		chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
 		chip->engines[i].prog_page = i;
-		chip->engines[i].attributes = &attr_group[i];
 	}
 }
 
-static int lp5521_configure(struct i2c_client *client,
-			const struct attribute_group *attr_group)
+static int lp5521_configure(struct i2c_client *client)
 {
 	struct lp5521_chip *chip = i2c_get_clientdata(client);
 	int ret;
 
-	lp5521_init_engine(chip, attr_group);
+	lp5521_init_engine(chip);
 
 	/* Set all PWMs to direct control mode */
 	ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
@@ -329,9 +325,6 @@
 /* Set engine mode and create appropriate sysfs attributes, if required. */
 static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
 {
-	struct lp5521_chip *chip = engine_to_lp5521(engine);
-	struct i2c_client *client = chip->client;
-	struct device *dev = &client->dev;
 	int ret = 0;
 
 	/* if in that mode already do nothing, except for run */
@@ -343,18 +336,10 @@
 	} else if (mode == LP5521_CMD_LOAD) {
 		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
 		lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
-
-		ret = sysfs_create_group(&dev->kobj, engine->attributes);
-		if (ret)
-			return ret;
 	} else if (mode == LP5521_CMD_DISABLED) {
 		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
 	}
 
-	/* remove load attribute from sysfs if not in load mode */
-	if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
-		sysfs_remove_group(&dev->kobj, engine->attributes);
-
 	engine->mode = mode;
 
 	return ret;
@@ -373,6 +358,8 @@
 	while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
 		/* separate sscanfs because length is working only for %s */
 		ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+		if (ret != 2)
+			goto fail;
 		ret = sscanf(c, "%2x", &cmd);
 		if (ret != 1)
 			goto fail;
@@ -387,7 +374,10 @@
 		goto fail;
 
 	mutex_lock(&chip->lock);
-	ret = lp5521_load_program(engine, pattern);
+	if (engine->mode == LP5521_CMD_LOAD)
+		ret = lp5521_load_program(engine, pattern);
+	else
+		ret = -EINVAL;
 	mutex_unlock(&chip->lock);
 
 	if (ret) {
@@ -574,20 +564,8 @@
 	&dev_attr_engine2_mode.attr,
 	&dev_attr_engine3_mode.attr,
 	&dev_attr_selftest.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine1_attributes[] = {
 	&dev_attr_engine1_load.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine2_attributes[] = {
 	&dev_attr_engine2_load.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine3_attributes[] = {
 	&dev_attr_engine3_load.attr,
 	NULL
 };
@@ -596,12 +574,6 @@
 	.attrs = lp5521_attributes,
 };
 
-static const struct attribute_group lp5521_engine_group[] = {
-	{.attrs = lp5521_engine1_attributes },
-	{.attrs = lp5521_engine2_attributes },
-	{.attrs = lp5521_engine3_attributes },
-};
-
 static int lp5521_register_sysfs(struct i2c_client *client)
 {
 	struct device *dev = &client->dev;
@@ -616,12 +588,6 @@
 
 	sysfs_remove_group(&dev->kobj, &lp5521_group);
 
-	for (i = 0; i <  ARRAY_SIZE(chip->engines); i++) {
-		if (chip->engines[i].mode == LP5521_CMD_LOAD)
-			sysfs_remove_group(&dev->kobj,
-					chip->engines[i].attributes);
-	}
-
 	for (i = 0; i < chip->num_leds; i++)
 		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
 				&lp5521_led_attribute_group);
@@ -651,7 +617,8 @@
 		return -EINVAL;
 	}
 
-	snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+	snprintf(name, sizeof(name), "%s:channel%d",
+			pdata->label ?: client->name, chan);
 	led->cdev.brightness_set = lp5521_set_brightness;
 	led->cdev.name = name;
 	res = led_classdev_register(dev, &led->cdev);
@@ -723,7 +690,7 @@
 
 	dev_info(&client->dev, "%s programmable led chip found\n", id->name);
 
-	ret = lp5521_configure(client, lp5521_engine_group);
+	ret = lp5521_configure(client);
 	if (ret < 0) {
 		dev_err(&client->dev, "error configuring chip\n");
 		goto fail2;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 0cc4ead..d0c4068 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -105,7 +105,6 @@
 #define SHIFT_MASK(id)			(((id) - 1) * 2)
 
 struct lp5523_engine {
-	const struct attribute_group *attributes;
 	int		id;
 	u8		mode;
 	u8		prog_page;
@@ -403,14 +402,23 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lp5523_chip *chip = i2c_get_clientdata(client);
 	u16 mux = 0;
+	ssize_t ret;
 
 	if (lp5523_mux_parse(buf, &mux, len))
 		return -EINVAL;
 
-	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
-		return -EINVAL;
+	mutex_lock(&chip->lock);
+	ret = -EINVAL;
+	if (chip->engines[nr - 1].mode != LP5523_CMD_LOAD)
+		goto leave;
 
-	return len;
+	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
+		goto leave;
+
+	ret = len;
+leave:
+	mutex_unlock(&chip->lock);
+	return ret;
 }
 
 #define store_leds(nr)						\
@@ -556,7 +564,11 @@
 
 	mutex_lock(&chip->lock);
 
-	ret = lp5523_load_program(engine, pattern);
+	if (engine->mode == LP5523_CMD_LOAD)
+		ret = lp5523_load_program(engine, pattern);
+	else
+		ret = -EINVAL;
+
 	mutex_unlock(&chip->lock);
 
 	if (ret) {
@@ -737,37 +749,18 @@
 	&dev_attr_engine2_mode.attr,
 	&dev_attr_engine3_mode.attr,
 	&dev_attr_selftest.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine1_attributes[] = {
 	&dev_attr_engine1_load.attr,
 	&dev_attr_engine1_leds.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine2_attributes[] = {
 	&dev_attr_engine2_load.attr,
 	&dev_attr_engine2_leds.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine3_attributes[] = {
 	&dev_attr_engine3_load.attr,
 	&dev_attr_engine3_leds.attr,
-	NULL
 };
 
 static const struct attribute_group lp5523_group = {
 	.attrs = lp5523_attributes,
 };
 
-static const struct attribute_group lp5523_engine_group[] = {
-	{.attrs = lp5523_engine1_attributes },
-	{.attrs = lp5523_engine2_attributes },
-	{.attrs = lp5523_engine3_attributes },
-};
-
 static int lp5523_register_sysfs(struct i2c_client *client)
 {
 	struct device *dev = &client->dev;
@@ -788,10 +781,6 @@
 
 	sysfs_remove_group(&dev->kobj, &lp5523_group);
 
-	for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
-		if (chip->engines[i].mode == LP5523_CMD_LOAD)
-			sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
-
 	for (i = 0; i < chip->num_leds; i++)
 		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
 				&lp5523_led_attribute_group);
@@ -802,10 +791,6 @@
 /*--------------------------------------------------------------*/
 static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
 {
-	/*  engine to chip */
-	struct lp5523_chip *chip = engine_to_lp5523(engine);
-	struct i2c_client *client = chip->client;
-	struct device *dev = &client->dev;
 	int ret = 0;
 
 	/* if in that mode already do nothing, except for run */
@@ -817,18 +802,10 @@
 	} else if (mode == LP5523_CMD_LOAD) {
 		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
 		lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
-
-		ret = sysfs_create_group(&dev->kobj, engine->attributes);
-		if (ret)
-			return ret;
 	} else if (mode == LP5523_CMD_DISABLED) {
 		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
 	}
 
-	/* remove load attribute from sysfs if not in load mode */
-	if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
-		sysfs_remove_group(&dev->kobj, engine->attributes);
-
 	engine->mode = mode;
 
 	return ret;
@@ -845,7 +822,6 @@
 	engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
 	engine->prog_page = id - 1;
 	engine->mux_page = id + 2;
-	engine->attributes = &lp5523_engine_group[id - 1];
 
 	return 0;
 }
@@ -870,7 +846,8 @@
 			return -EINVAL;
 		}
 
-		snprintf(name, 32, "lp5523:channel%d", chan);
+		snprintf(name, sizeof(name), "%s:channel%d",
+			pdata->label ?: "lp5523", chan);
 
 		led->cdev.name = name;
 		led->cdev.brightness_set = lp5523_set_brightness;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 43d0875..afac338 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -200,6 +200,32 @@
 	pca9532_setled(led);
 }
 
+static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
+{
+	int i = n_devs;
+
+	if (!data)
+		return;
+
+	while (--i >= 0) {
+		switch (data->leds[i].type) {
+		case PCA9532_TYPE_NONE:
+			break;
+		case PCA9532_TYPE_LED:
+			led_classdev_unregister(&data->leds[i].ldev);
+			cancel_work_sync(&data->leds[i].work);
+			break;
+		case PCA9532_TYPE_N2100_BEEP:
+			if (data->idev != NULL) {
+				input_unregister_device(data->idev);
+				cancel_work_sync(&data->work);
+				data->idev = NULL;
+			}
+			break;
+		}
+	}
+}
+
 static int pca9532_configure(struct i2c_client *client,
 	struct pca9532_data *data, struct pca9532_platform_data *pdata)
 {
@@ -274,25 +300,7 @@
 	return 0;
 
 exit:
-	if (i > 0)
-		for (i = i - 1; i >= 0; i--)
-			switch (data->leds[i].type) {
-			case PCA9532_TYPE_NONE:
-				break;
-			case PCA9532_TYPE_LED:
-				led_classdev_unregister(&data->leds[i].ldev);
-				cancel_work_sync(&data->leds[i].work);
-				break;
-			case PCA9532_TYPE_N2100_BEEP:
-				if (data->idev != NULL) {
-					input_unregister_device(data->idev);
-					input_free_device(data->idev);
-					cancel_work_sync(&data->work);
-					data->idev = NULL;
-				}
-				break;
-			}
-
+	pca9532_destroy_devices(data, i);
 	return err;
 }
 
@@ -329,25 +337,7 @@
 static int pca9532_remove(struct i2c_client *client)
 {
 	struct pca9532_data *data = i2c_get_clientdata(client);
-	int i;
-	for (i = 0; i < 16; i++)
-		switch (data->leds[i].type) {
-		case PCA9532_TYPE_NONE:
-			break;
-		case PCA9532_TYPE_LED:
-			led_classdev_unregister(&data->leds[i].ldev);
-			cancel_work_sync(&data->leds[i].work);
-			break;
-		case PCA9532_TYPE_N2100_BEEP:
-			if (data->idev != NULL) {
-				input_unregister_device(data->idev);
-				input_free_device(data->idev);
-				cancel_work_sync(&data->work);
-				data->idev = NULL;
-			}
-			break;
-		}
-
+	pca9532_destroy_devices(data, 16);
 	kfree(data);
 	return 0;
 }
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 5aab32c..a045232 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -276,7 +276,7 @@
 	struct wm8350_led *led = platform_get_drvdata(pdev);
 
 	led_classdev_unregister(&led->cdev);
-	flush_scheduled_work();
+	flush_work_sync(&led->work);
 	wm8350_led_disable(led);
 	regulator_put(led->dcdc);
 	regulator_put(led->isink);
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index f948e57..2b513a2 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -26,6 +26,7 @@
 	int brightness;
 	int old_status;
 	struct notifier_block notifier;
+	unsigned invert;
 };
 
 static int fb_notifier_callback(struct notifier_block *p,
@@ -36,23 +37,64 @@
 	struct led_classdev *led = n->led;
 	struct fb_event *fb_event = data;
 	int *blank = fb_event->data;
+	int new_status = *blank ? BLANK : UNBLANK;
 
 	switch (event) {
 	case FB_EVENT_BLANK :
-		if (*blank && n->old_status == UNBLANK) {
+		if (new_status == n->old_status)
+			break;
+
+		if ((n->old_status == UNBLANK) ^ n->invert) {
 			n->brightness = led->brightness;
 			led_set_brightness(led, LED_OFF);
-			n->old_status = BLANK;
-		} else if (!*blank && n->old_status == BLANK) {
+		} else {
 			led_set_brightness(led, n->brightness);
-			n->old_status = UNBLANK;
 		}
+
+		n->old_status = new_status;
+
 		break;
 	}
 
 	return 0;
 }
 
+static ssize_t bl_trig_invert_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led = dev_get_drvdata(dev);
+	struct bl_trig_notifier *n = led->trigger_data;
+
+	return sprintf(buf, "%u\n", n->invert);
+}
+
+static ssize_t bl_trig_invert_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t num)
+{
+	struct led_classdev *led = dev_get_drvdata(dev);
+	struct bl_trig_notifier *n = led->trigger_data;
+	unsigned long invert;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &invert);
+	if (ret < 0)
+		return ret;
+
+	if (invert > 1)
+		return -EINVAL;
+
+	n->invert = invert;
+
+	/* After inverting, we need to update the LED. */
+	if ((n->old_status == BLANK) ^ n->invert)
+		led_set_brightness(led, LED_OFF);
+	else
+		led_set_brightness(led, n->brightness);
+
+	return num;
+}
+static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store);
+
 static void bl_trig_activate(struct led_classdev *led)
 {
 	int ret;
@@ -66,6 +108,10 @@
 		return;
 	}
 
+	ret = device_create_file(led->dev, &dev_attr_inverted);
+	if (ret)
+		goto err_invert;
+
 	n->led = led;
 	n->brightness = led->brightness;
 	n->old_status = UNBLANK;
@@ -74,6 +120,12 @@
 	ret = fb_register_client(&n->notifier);
 	if (ret)
 		dev_err(led->dev, "unable to register backlight trigger\n");
+
+	return;
+
+err_invert:
+	led->trigger_data = NULL;
+	kfree(n);
 }
 
 static void bl_trig_deactivate(struct led_classdev *led)
@@ -82,6 +134,7 @@
 		(struct bl_trig_notifier *) led->trigger_data;
 
 	if (n) {
+		device_remove_file(led->dev, &dev_attr_inverted);
 		fb_unregister_client(&n->notifier);
 		kfree(n);
 	}
diff --git a/drivers/macintosh/ams/ams-core.c b/drivers/macintosh/ams/ams-core.c
index 2ad62c3..399beb16 100644
--- a/drivers/macintosh/ams/ams-core.c
+++ b/drivers/macintosh/ams/ams-core.c
@@ -226,7 +226,7 @@
 	 * We do this after ams_info.exit(), because an interrupt might
 	 * have arrived before disabling them.
 	 */
-	flush_scheduled_work();
+	flush_work_sync(&ams_info.worker);
 
 	/* Remove device */
 	of_device_unregister(ams_info.of_dev);
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 067f996..6a82388 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -23,6 +23,8 @@
 
 static struct input_dev *mac_hid_emumouse_dev;
 
+static DEFINE_MUTEX(mac_hid_emumouse_mutex);
+
 static int mac_hid_create_emumouse(void)
 {
 	static struct lock_class_key mac_hid_emumouse_dev_event_class;
@@ -187,6 +189,10 @@
 	int old_val = *valp;
 	int rc;
 
+	rc = mutex_lock_killable(&mac_hid_emumouse_mutex);
+	if (rc)
+		return rc;
+
 	rc = proc_dointvec(table, write, buffer, lenp, ppos);
 
 	if (rc == 0 && write && *valp != old_val) {
@@ -202,6 +208,8 @@
 	if (rc)
 		*valp = old_val;
 
+	mutex_unlock(&mac_hid_emumouse_mutex);
+
 	return rc;
 }
 
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6e7ddc..4daf9e5 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,11 +387,10 @@
 	/* Set the DMA ops to the ones from the PCI device, this could be
 	 * fishy if we didn't know that on PowerMac it's always direct ops
 	 * or iommu ops that will work fine
+	 *
+	 * To get all the fields, copy all archdata
 	 */
-	dev->ofdev.dev.archdata.dma_ops =
-		chip->lbus.pdev->dev.archdata.dma_ops;
-	dev->ofdev.dev.archdata.dma_data =
-		chip->lbus.pdev->dev.archdata.dma_data;
+	dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
 #endif /* CONFIG_PCI */
 
 #ifdef DEBUG
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 53cce3a..39f660b 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -285,8 +285,8 @@
 
 static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
 {
-	cancel_rearming_delayed_work(&rm->cpu[0].sniffer);
-	cancel_rearming_delayed_work(&rm->cpu[1].sniffer);
+	cancel_delayed_work_sync(&rm->cpu[0].sniffer);
+	cancel_delayed_work_sync(&rm->cpu[1].sniffer);
 }
 
 static int __devinit rackmeter_setup(struct rackmeter *rm)
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 4454927..2e041fd 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2213,6 +2213,9 @@
 static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
 {
 	state = state_detached;
+	of_dev = dev;
+
+	dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
 
 	/* Lookup the fans in the device tree */
 	fcu_lookup_fans(dev->dev.of_node);
@@ -2235,6 +2238,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, fcu_match);
 
 static struct of_platform_driver fcu_of_platform_driver = 
 {
@@ -2252,8 +2256,6 @@
  */
 static int __init therm_pm72_init(void)
 {
-	struct device_node *np;
-
 	rackmac = of_machine_is_compatible("RackMac3,1");
 
 	if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -2261,34 +2263,12 @@
 	    !rackmac)
 	    	return -ENODEV;
 
-	printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION);
-
-	np = of_find_node_by_type(NULL, "fcu");
-	if (np == NULL) {
-		/* Some machines have strangely broken device-tree */
-		np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
-		if (np == NULL) {
-			    printk(KERN_ERR "Can't find FCU in device-tree !\n");
-			    return -ENODEV;
-		}
-	}
-	of_dev = of_platform_device_create(np, "temperature", NULL);
-	if (of_dev == NULL) {
-		printk(KERN_ERR "Can't register FCU platform device !\n");
-		return -ENODEV;
-	}
-
-	of_register_platform_driver(&fcu_of_platform_driver);
-	
-	return 0;
+	return of_register_platform_driver(&fcu_of_platform_driver);
 }
 
 static void __exit therm_pm72_exit(void)
 {
 	of_unregister_platform_driver(&fcu_of_platform_driver);
-
-	if (of_dev)
-		of_device_unregister(of_dev);
 }
 
 module_init(therm_pm72_init);
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 1cec02f..ade1e65 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -15,7 +15,7 @@
 
 #define MAX_PMU_LEVEL 0xFF
 
-static struct backlight_ops pmu_backlight_data;
+static const struct backlight_ops pmu_backlight_data;
 static DEFINE_SPINLOCK(pmu_backlight_lock);
 static int sleeping, uses_pmu_bl;
 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
@@ -115,7 +115,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops pmu_backlight_data = {
+static const struct backlight_ops pmu_backlight_data = {
 	.get_brightness	= pmu_backlight_get_brightness,
 	.update_status	= pmu_backlight_update_status,
 
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index cd29c82..8b021eb 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2257,7 +2257,7 @@
 		&& (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
 }
 
-static struct platform_suspend_ops pmu_pm_ops = {
+static const struct platform_suspend_ops pmu_pm_ops = {
 	.enter = powerbook_sleep,
 	.valid = pmu_sleep_valid,
 };
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index bf1a95e..98d9ec8 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,6 +240,30 @@
          Allow volume managers to mirror logical volumes, also
          needed for live data migration tools such as 'pvmove'.
 
+config DM_RAID
+       tristate "RAID 4/5/6 target (EXPERIMENTAL)"
+       depends on BLK_DEV_DM && EXPERIMENTAL
+       select MD_RAID456
+       select BLK_DEV_MD
+       ---help---
+	 A dm target that supports RAID4, RAID5 and RAID6 mappings
+
+	 A RAID-5 set of N drives with a capacity of C MB per drive provides
+	 the capacity of C * (N - 1) MB, and protects against a failure
+	 of a single drive. For a given sector (row) number, (N - 1) drives
+	 contain data sectors, and one drive contains the parity protection.
+	 For a RAID-4 set, the parity blocks are present on a single drive,
+	 while a RAID-5 set distributes the parity across the drives in one
+	 of the available parity distribution methods.
+
+	 A RAID-6 set of N drives with a capacity of C MB per drive
+	 provides the capacity of C * (N - 2) MB, and protects
+	 against a failure of any two drives. For a given sector
+	 (row) number, (N - 2) drives contain data sectors, and two
+	 drives contains two independent redundancy syndromes.  Like
+	 RAID-5, RAID-6 distributes the syndromes across the drives
+	 in one of the available parity distribution methods.
+
 config DM_LOG_USERSPACE
 	tristate "Mirror userspace logging (EXPERIMENTAL)"
 	depends on DM_MIRROR && EXPERIMENTAL && NET
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 5e3aac4..d013860 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -36,6 +36,7 @@
 obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o dm-log.o dm-region-hash.o
 obj-$(CONFIG_DM_LOG_USERSPACE)	+= dm-log-userspace.o
 obj-$(CONFIG_DM_ZERO)		+= dm-zero.o
+obj-$(CONFIG_DM_RAID)	+= dm-raid.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
 dm-mod-objs			+= dm-uevent.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5a1ffe3..9a35320 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -210,11 +210,11 @@
 		    || test_bit(Faulty, &rdev->flags))
 			continue;
 
-		target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
+		target = offset + index * (PAGE_SIZE/512);
 
 		if (sync_page_io(rdev, target,
 				 roundup(size, bdev_logical_block_size(rdev->bdev)),
-				 page, READ)) {
+				 page, READ, true)) {
 			page->index = index;
 			attach_page_buffers(page, NULL); /* so that free_buffer will
 							  * quietly no-op */
@@ -264,14 +264,18 @@
 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
 {
 	mdk_rdev_t *rdev = NULL;
+	struct block_device *bdev;
 	mddev_t *mddev = bitmap->mddev;
 
 	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
 		int size = PAGE_SIZE;
 		loff_t offset = mddev->bitmap_info.offset;
+
+		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
+
 		if (page->index == bitmap->file_pages-1)
 			size = roundup(bitmap->last_page_size,
-				       bdev_logical_block_size(rdev->bdev));
+				       bdev_logical_block_size(bdev));
 		/* Just make sure we aren't corrupting data or
 		 * metadata
 		 */
@@ -1542,7 +1546,7 @@
 	wait_event(bitmap->mddev->recovery_wait,
 		   atomic_read(&bitmap->mddev->recovery_active) == 0);
 
-	bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
+	bitmap->mddev->curr_resync_completed = sector;
 	set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
 	sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
 	s = 0;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d5b0e4c..4e054bd 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -18,10 +18,14 @@
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
 #include <linux/backing-dev.h>
+#include <linux/percpu.h>
 #include <asm/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/algapi.h>
 
 #include <linux/device-mapper.h>
 
@@ -63,6 +67,7 @@
 	struct convert_context *ctx;
 	struct scatterlist sg_in;
 	struct scatterlist sg_out;
+	sector_t iv_sector;
 };
 
 struct crypt_config;
@@ -73,11 +78,13 @@
 	void (*dtr)(struct crypt_config *cc);
 	int (*init)(struct crypt_config *cc);
 	int (*wipe)(struct crypt_config *cc);
-	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
+	int (*generator)(struct crypt_config *cc, u8 *iv,
+			 struct dm_crypt_request *dmreq);
+	int (*post)(struct crypt_config *cc, u8 *iv,
+		    struct dm_crypt_request *dmreq);
 };
 
 struct iv_essiv_private {
-	struct crypto_cipher *tfm;
 	struct crypto_hash *hash_tfm;
 	u8 *salt;
 };
@@ -86,11 +93,32 @@
 	int shift;
 };
 
+#define LMK_SEED_SIZE 64 /* hash + 0 */
+struct iv_lmk_private {
+	struct crypto_shash *hash_tfm;
+	u8 *seed;
+};
+
 /*
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
  */
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+
+/*
+ * Duplicated per-CPU state for cipher.
+ */
+struct crypt_cpu {
+	struct ablkcipher_request *req;
+	/* ESSIV: struct crypto_cipher *essiv_tfm */
+	void *iv_private;
+	struct crypto_ablkcipher *tfms[0];
+};
+
+/*
+ * The fields in here must be read only after initialization,
+ * changing state should be in crypt_cpu.
+ */
 struct crypt_config {
 	struct dm_dev *dev;
 	sector_t start;
@@ -108,17 +136,25 @@
 	struct workqueue_struct *crypt_queue;
 
 	char *cipher;
-	char *cipher_mode;
+	char *cipher_string;
 
 	struct crypt_iv_operations *iv_gen_ops;
 	union {
 		struct iv_essiv_private essiv;
 		struct iv_benbi_private benbi;
+		struct iv_lmk_private lmk;
 	} iv_gen_private;
 	sector_t iv_offset;
 	unsigned int iv_size;
 
 	/*
+	 * Duplicated per cpu state. Access through
+	 * per_cpu_ptr() only.
+	 */
+	struct crypt_cpu __percpu *cpu;
+	unsigned tfms_count;
+
+	/*
 	 * Layout of each crypto request:
 	 *
 	 *   struct ablkcipher_request
@@ -132,11 +168,10 @@
 	 * correctly aligned.
 	 */
 	unsigned int dmreq_start;
-	struct ablkcipher_request *req;
 
-	struct crypto_ablkcipher *tfm;
 	unsigned long flags;
 	unsigned int key_size;
+	unsigned int key_parts;
 	u8 key[0];
 };
 
@@ -148,6 +183,20 @@
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
+static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
+
+static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
+{
+	return this_cpu_ptr(cc->cpu);
+}
+
+/*
+ * Use this to access cipher attributes that are the same for each CPU.
+ */
+static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
+{
+	return __this_cpu_ptr(cc->cpu)->tfms[0];
+}
 
 /*
  * Different IV generation algorithms:
@@ -168,23 +217,38 @@
  * null: the initial vector is always zero.  Provides compatibility with
  *       obsolete loop_fish2 devices.  Do not use for new devices.
  *
+ * lmk:  Compatible implementation of the block chaining mode used
+ *       by the Loop-AES block device encryption system
+ *       designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
+ *       It operates on full 512 byte sectors and uses CBC
+ *       with an IV derived from the sector number, the data and
+ *       optionally extra IV seed.
+ *       This means that after decryption the first block
+ *       of sector must be tweaked according to decrypted data.
+ *       Loop-AES can use three encryption schemes:
+ *         version 1: is plain aes-cbc mode
+ *         version 2: uses 64 multikey scheme with lmk IV generator
+ *         version 3: the same as version 2 with additional IV seed
+ *                   (it uses 65 keys, last key is used as IV seed)
+ *
  * plumb: unimplemented, see:
  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  */
 
-static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
+			      struct dm_crypt_request *dmreq)
 {
 	memset(iv, 0, cc->iv_size);
-	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
+	*(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
 
 	return 0;
 }
 
 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
-				sector_t sector)
+				struct dm_crypt_request *dmreq)
 {
 	memset(iv, 0, cc->iv_size);
-	*(u64 *)iv = cpu_to_le64(sector);
+	*(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
 
 	return 0;
 }
@@ -195,7 +259,8 @@
 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 	struct hash_desc desc;
 	struct scatterlist sg;
-	int err;
+	struct crypto_cipher *essiv_tfm;
+	int err, cpu;
 
 	sg_init_one(&sg, cc->key, cc->key_size);
 	desc.tfm = essiv->hash_tfm;
@@ -205,8 +270,16 @@
 	if (err)
 		return err;
 
-	return crypto_cipher_setkey(essiv->tfm, essiv->salt,
+	for_each_possible_cpu(cpu) {
+		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
+
+		err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
 				    crypto_hash_digestsize(essiv->hash_tfm));
+		if (err)
+			return err;
+	}
+
+	return 0;
 }
 
 /* Wipe salt and reset key derived from volume key */
@@ -214,24 +287,76 @@
 {
 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 	unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+	struct crypto_cipher *essiv_tfm;
+	int cpu, r, err = 0;
 
 	memset(essiv->salt, 0, salt_size);
 
-	return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
+	for_each_possible_cpu(cpu) {
+		essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
+		r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
+		if (r)
+			err = r;
+	}
+
+	return err;
+}
+
+/* Set up per cpu cipher state */
+static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
+					     struct dm_target *ti,
+					     u8 *salt, unsigned saltsize)
+{
+	struct crypto_cipher *essiv_tfm;
+	int err;
+
+	/* Setup the essiv_tfm with the given salt */
+	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(essiv_tfm)) {
+		ti->error = "Error allocating crypto tfm for ESSIV";
+		return essiv_tfm;
+	}
+
+	if (crypto_cipher_blocksize(essiv_tfm) !=
+	    crypto_ablkcipher_ivsize(any_tfm(cc))) {
+		ti->error = "Block size of ESSIV cipher does "
+			    "not match IV size of block cipher";
+		crypto_free_cipher(essiv_tfm);
+		return ERR_PTR(-EINVAL);
+	}
+
+	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
+	if (err) {
+		ti->error = "Failed to set key for ESSIV cipher";
+		crypto_free_cipher(essiv_tfm);
+		return ERR_PTR(err);
+	}
+
+	return essiv_tfm;
 }
 
 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
 {
+	int cpu;
+	struct crypt_cpu *cpu_cc;
+	struct crypto_cipher *essiv_tfm;
 	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 
-	crypto_free_cipher(essiv->tfm);
-	essiv->tfm = NULL;
-
 	crypto_free_hash(essiv->hash_tfm);
 	essiv->hash_tfm = NULL;
 
 	kzfree(essiv->salt);
 	essiv->salt = NULL;
+
+	for_each_possible_cpu(cpu) {
+		cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+		essiv_tfm = cpu_cc->iv_private;
+
+		if (essiv_tfm)
+			crypto_free_cipher(essiv_tfm);
+
+		cpu_cc->iv_private = NULL;
+	}
 }
 
 static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -240,7 +365,7 @@
 	struct crypto_cipher *essiv_tfm = NULL;
 	struct crypto_hash *hash_tfm = NULL;
 	u8 *salt = NULL;
-	int err;
+	int err, cpu;
 
 	if (!opts) {
 		ti->error = "Digest algorithm missing for ESSIV mode";
@@ -262,48 +387,44 @@
 		goto bad;
 	}
 
-	/* Allocate essiv_tfm */
-	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(essiv_tfm)) {
-		ti->error = "Error allocating crypto tfm for ESSIV";
-		err = PTR_ERR(essiv_tfm);
-		goto bad;
-	}
-	if (crypto_cipher_blocksize(essiv_tfm) !=
-	    crypto_ablkcipher_ivsize(cc->tfm)) {
-		ti->error = "Block size of ESSIV cipher does "
-			    "not match IV size of block cipher";
-		err = -EINVAL;
-		goto bad;
-	}
-
 	cc->iv_gen_private.essiv.salt = salt;
-	cc->iv_gen_private.essiv.tfm = essiv_tfm;
 	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
 
+	for_each_possible_cpu(cpu) {
+		essiv_tfm = setup_essiv_cpu(cc, ti, salt,
+					crypto_hash_digestsize(hash_tfm));
+		if (IS_ERR(essiv_tfm)) {
+			crypt_iv_essiv_dtr(cc);
+			return PTR_ERR(essiv_tfm);
+		}
+		per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
+	}
+
 	return 0;
 
 bad:
-	if (essiv_tfm && !IS_ERR(essiv_tfm))
-		crypto_free_cipher(essiv_tfm);
 	if (hash_tfm && !IS_ERR(hash_tfm))
 		crypto_free_hash(hash_tfm);
 	kfree(salt);
 	return err;
 }
 
-static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
+			      struct dm_crypt_request *dmreq)
 {
+	struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
+
 	memset(iv, 0, cc->iv_size);
-	*(u64 *)iv = cpu_to_le64(sector);
-	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
+	*(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+	crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
+
 	return 0;
 }
 
 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
 			      const char *opts)
 {
-	unsigned bs = crypto_ablkcipher_blocksize(cc->tfm);
+	unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
 	int log = ilog2(bs);
 
 	/* we need to calculate how far we must shift the sector count
@@ -328,25 +449,177 @@
 {
 }
 
-static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
+			      struct dm_crypt_request *dmreq)
 {
 	__be64 val;
 
 	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
 
-	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
+	val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
 	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
 
 	return 0;
 }
 
-static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
+			     struct dm_crypt_request *dmreq)
 {
 	memset(iv, 0, cc->iv_size);
 
 	return 0;
 }
 
+static void crypt_iv_lmk_dtr(struct crypt_config *cc)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+	if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
+		crypto_free_shash(lmk->hash_tfm);
+	lmk->hash_tfm = NULL;
+
+	kzfree(lmk->seed);
+	lmk->seed = NULL;
+}
+
+static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
+			    const char *opts)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+	lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
+	if (IS_ERR(lmk->hash_tfm)) {
+		ti->error = "Error initializing LMK hash";
+		return PTR_ERR(lmk->hash_tfm);
+	}
+
+	/* No seed in LMK version 2 */
+	if (cc->key_parts == cc->tfms_count) {
+		lmk->seed = NULL;
+		return 0;
+	}
+
+	lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
+	if (!lmk->seed) {
+		crypt_iv_lmk_dtr(cc);
+		ti->error = "Error kmallocing seed storage in LMK";
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int crypt_iv_lmk_init(struct crypt_config *cc)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+	int subkey_size = cc->key_size / cc->key_parts;
+
+	/* LMK seed is on the position of LMK_KEYS + 1 key */
+	if (lmk->seed)
+		memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
+		       crypto_shash_digestsize(lmk->hash_tfm));
+
+	return 0;
+}
+
+static int crypt_iv_lmk_wipe(struct crypt_config *cc)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+
+	if (lmk->seed)
+		memset(lmk->seed, 0, LMK_SEED_SIZE);
+
+	return 0;
+}
+
+static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
+			    struct dm_crypt_request *dmreq,
+			    u8 *data)
+{
+	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+	struct {
+		struct shash_desc desc;
+		char ctx[crypto_shash_descsize(lmk->hash_tfm)];
+	} sdesc;
+	struct md5_state md5state;
+	u32 buf[4];
+	int i, r;
+
+	sdesc.desc.tfm = lmk->hash_tfm;
+	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	r = crypto_shash_init(&sdesc.desc);
+	if (r)
+		return r;
+
+	if (lmk->seed) {
+		r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
+		if (r)
+			return r;
+	}
+
+	/* Sector is always 512B, block size 16, add data of blocks 1-31 */
+	r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
+	if (r)
+		return r;
+
+	/* Sector is cropped to 56 bits here */
+	buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
+	buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
+	buf[2] = cpu_to_le32(4024);
+	buf[3] = 0;
+	r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
+	if (r)
+		return r;
+
+	/* No MD5 padding here */
+	r = crypto_shash_export(&sdesc.desc, &md5state);
+	if (r)
+		return r;
+
+	for (i = 0; i < MD5_HASH_WORDS; i++)
+		__cpu_to_le32s(&md5state.hash[i]);
+	memcpy(iv, &md5state.hash, cc->iv_size);
+
+	return 0;
+}
+
+static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
+			    struct dm_crypt_request *dmreq)
+{
+	u8 *src;
+	int r = 0;
+
+	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
+		src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0);
+		r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
+		kunmap_atomic(src, KM_USER0);
+	} else
+		memset(iv, 0, cc->iv_size);
+
+	return r;
+}
+
+static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
+			     struct dm_crypt_request *dmreq)
+{
+	u8 *dst;
+	int r;
+
+	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
+		return 0;
+
+	dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0);
+	r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
+
+	/* Tweak the first block of plaintext sector */
+	if (!r)
+		crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
+
+	kunmap_atomic(dst, KM_USER0);
+	return r;
+}
+
 static struct crypt_iv_operations crypt_iv_plain_ops = {
 	.generator = crypt_iv_plain_gen
 };
@@ -373,6 +646,15 @@
 	.generator = crypt_iv_null_gen
 };
 
+static struct crypt_iv_operations crypt_iv_lmk_ops = {
+	.ctr	   = crypt_iv_lmk_ctr,
+	.dtr	   = crypt_iv_lmk_dtr,
+	.init	   = crypt_iv_lmk_init,
+	.wipe	   = crypt_iv_lmk_wipe,
+	.generator = crypt_iv_lmk_gen,
+	.post	   = crypt_iv_lmk_post
+};
+
 static void crypt_convert_init(struct crypt_config *cc,
 			       struct convert_context *ctx,
 			       struct bio *bio_out, struct bio *bio_in,
@@ -400,6 +682,13 @@
 	return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
 }
 
+static u8 *iv_of_dmreq(struct crypt_config *cc,
+		       struct dm_crypt_request *dmreq)
+{
+	return (u8 *)ALIGN((unsigned long)(dmreq + 1),
+		crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
+}
+
 static int crypt_convert_block(struct crypt_config *cc,
 			       struct convert_context *ctx,
 			       struct ablkcipher_request *req)
@@ -411,9 +700,9 @@
 	int r = 0;
 
 	dmreq = dmreq_of_req(cc, req);
-	iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
-			 crypto_ablkcipher_alignmask(cc->tfm) + 1);
+	iv = iv_of_dmreq(cc, dmreq);
 
+	dmreq->iv_sector = ctx->sector;
 	dmreq->ctx = ctx;
 	sg_init_table(&dmreq->sg_in, 1);
 	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -436,7 +725,7 @@
 	}
 
 	if (cc->iv_gen_ops) {
-		r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
+		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
 		if (r < 0)
 			return r;
 	}
@@ -449,21 +738,28 @@
 	else
 		r = crypto_ablkcipher_decrypt(req);
 
+	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
+		r = cc->iv_gen_ops->post(cc, iv, dmreq);
+
 	return r;
 }
 
 static void kcryptd_async_done(struct crypto_async_request *async_req,
 			       int error);
+
 static void crypt_alloc_req(struct crypt_config *cc,
 			    struct convert_context *ctx)
 {
-	if (!cc->req)
-		cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
-	ablkcipher_request_set_tfm(cc->req, cc->tfm);
-	ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
-					CRYPTO_TFM_REQ_MAY_SLEEP,
-					kcryptd_async_done,
-					dmreq_of_req(cc, cc->req));
+	struct crypt_cpu *this_cc = this_crypt_config(cc);
+	unsigned key_index = ctx->sector & (cc->tfms_count - 1);
+
+	if (!this_cc->req)
+		this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+
+	ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
+	ablkcipher_request_set_callback(this_cc->req,
+	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+	    kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
 }
 
 /*
@@ -472,6 +768,7 @@
 static int crypt_convert(struct crypt_config *cc,
 			 struct convert_context *ctx)
 {
+	struct crypt_cpu *this_cc = this_crypt_config(cc);
 	int r;
 
 	atomic_set(&ctx->pending, 1);
@@ -483,7 +780,7 @@
 
 		atomic_inc(&ctx->pending);
 
-		r = crypt_convert_block(cc, ctx, cc->req);
+		r = crypt_convert_block(cc, ctx, this_cc->req);
 
 		switch (r) {
 		/* async */
@@ -492,7 +789,7 @@
 			INIT_COMPLETION(ctx->restart);
 			/* fall through*/
 		case -EINPROGRESS:
-			cc->req = NULL;
+			this_cc->req = NULL;
 			ctx->sector++;
 			continue;
 
@@ -651,6 +948,9 @@
  * They must be separated as otherwise the final stages could be
  * starved by new requests which can block in the first stages due
  * to memory allocation.
+ *
+ * The work is done per CPU global for all dm-crypt instances.
+ * They should not depend on each other and do not block.
  */
 static void crypt_endio(struct bio *clone, int error)
 {
@@ -691,26 +991,30 @@
 	clone->bi_destructor = dm_crypt_bio_destructor;
 }
 
-static void kcryptd_io_read(struct dm_crypt_io *io)
+static void kcryptd_unplug(struct crypt_config *cc)
+{
+	blk_unplug(bdev_get_queue(cc->dev->bdev));
+}
+
+static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
 {
 	struct crypt_config *cc = io->target->private;
 	struct bio *base_bio = io->base_bio;
 	struct bio *clone;
 
-	crypt_inc_pending(io);
-
 	/*
 	 * The block layer might modify the bvec array, so always
 	 * copy the required bvecs because we need the original
 	 * one in order to decrypt the whole bio data *afterwards*.
 	 */
-	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
-	if (unlikely(!clone)) {
-		io->error = -ENOMEM;
-		crypt_dec_pending(io);
-		return;
+	clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
+	if (!clone) {
+		kcryptd_unplug(cc);
+		return 1;
 	}
 
+	crypt_inc_pending(io);
+
 	clone_init(io, clone);
 	clone->bi_idx = 0;
 	clone->bi_vcnt = bio_segments(base_bio);
@@ -720,6 +1024,7 @@
 	       sizeof(struct bio_vec) * clone->bi_vcnt);
 
 	generic_make_request(clone);
+	return 0;
 }
 
 static void kcryptd_io_write(struct dm_crypt_io *io)
@@ -732,9 +1037,12 @@
 {
 	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
 
-	if (bio_data_dir(io->base_bio) == READ)
-		kcryptd_io_read(io);
-	else
+	if (bio_data_dir(io->base_bio) == READ) {
+		crypt_inc_pending(io);
+		if (kcryptd_io_read(io, GFP_NOIO))
+			io->error = -ENOMEM;
+		crypt_dec_pending(io);
+	} else
 		kcryptd_io_write(io);
 }
 
@@ -901,6 +1209,9 @@
 		return;
 	}
 
+	if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+		error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+
 	mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
 
 	if (!atomic_dec_and_test(&ctx->pending))
@@ -971,34 +1282,84 @@
 	}
 }
 
+static void crypt_free_tfms(struct crypt_config *cc, int cpu)
+{
+	struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+	unsigned i;
+
+	for (i = 0; i < cc->tfms_count; i++)
+		if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
+			crypto_free_ablkcipher(cpu_cc->tfms[i]);
+			cpu_cc->tfms[i] = NULL;
+		}
+}
+
+static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
+{
+	struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+	unsigned i;
+	int err;
+
+	for (i = 0; i < cc->tfms_count; i++) {
+		cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+		if (IS_ERR(cpu_cc->tfms[i])) {
+			err = PTR_ERR(cpu_cc->tfms[i]);
+			crypt_free_tfms(cc, cpu);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static int crypt_setkey_allcpus(struct crypt_config *cc)
+{
+	unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
+	int cpu, err = 0, i, r;
+
+	for_each_possible_cpu(cpu) {
+		for (i = 0; i < cc->tfms_count; i++) {
+			r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
+						     cc->key + (i * subkey_size), subkey_size);
+			if (r)
+				err = r;
+		}
+	}
+
+	return err;
+}
+
 static int crypt_set_key(struct crypt_config *cc, char *key)
 {
-	unsigned key_size = strlen(key) >> 1;
-
-	if (cc->key_size && cc->key_size != key_size)
+	/* The key size may not be changed. */
+	if (cc->key_size != (strlen(key) >> 1))
 		return -EINVAL;
 
-	cc->key_size = key_size; /* initial settings */
+	/* Hyphen (which gives a key_size of zero) means there is no key. */
+	if (!cc->key_size && strcmp(key, "-"))
+		return -EINVAL;
 
-	if ((!key_size && strcmp(key, "-")) ||
-	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
+	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
 		return -EINVAL;
 
 	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 
-	return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
+	return crypt_setkey_allcpus(cc);
 }
 
 static int crypt_wipe_key(struct crypt_config *cc)
 {
 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
-	return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
+
+	return crypt_setkey_allcpus(cc);
 }
 
 static void crypt_dtr(struct dm_target *ti)
 {
 	struct crypt_config *cc = ti->private;
+	struct crypt_cpu *cpu_cc;
+	int cpu;
 
 	ti->private = NULL;
 
@@ -1010,6 +1371,14 @@
 	if (cc->crypt_queue)
 		destroy_workqueue(cc->crypt_queue);
 
+	if (cc->cpu)
+		for_each_possible_cpu(cpu) {
+			cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+			if (cpu_cc->req)
+				mempool_free(cpu_cc->req, cc->req_pool);
+			crypt_free_tfms(cc, cpu);
+		}
+
 	if (cc->bs)
 		bioset_free(cc->bs);
 
@@ -1023,14 +1392,14 @@
 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
 		cc->iv_gen_ops->dtr(cc);
 
-	if (cc->tfm && !IS_ERR(cc->tfm))
-		crypto_free_ablkcipher(cc->tfm);
-
 	if (cc->dev)
 		dm_put_device(ti, cc->dev);
 
+	if (cc->cpu)
+		free_percpu(cc->cpu);
+
 	kzfree(cc->cipher);
-	kzfree(cc->cipher_mode);
+	kzfree(cc->cipher_string);
 
 	/* Must zero key material before freeing */
 	kzfree(cc);
@@ -1040,9 +1409,9 @@
 			    char *cipher_in, char *key)
 {
 	struct crypt_config *cc = ti->private;
-	char *tmp, *cipher, *chainmode, *ivmode, *ivopts;
+	char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
 	char *cipher_api = NULL;
-	int ret = -EINVAL;
+	int cpu, ret = -EINVAL;
 
 	/* Convert to crypto api definition? */
 	if (strchr(cipher_in, '(')) {
@@ -1050,23 +1419,31 @@
 		return -EINVAL;
 	}
 
+	cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
+	if (!cc->cipher_string)
+		goto bad_mem;
+
 	/*
 	 * Legacy dm-crypt cipher specification
-	 * cipher-mode-iv:ivopts
+	 * cipher[:keycount]-mode-iv:ivopts
 	 */
 	tmp = cipher_in;
-	cipher = strsep(&tmp, "-");
+	keycount = strsep(&tmp, "-");
+	cipher = strsep(&keycount, ":");
+
+	if (!keycount)
+		cc->tfms_count = 1;
+	else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
+		 !is_power_of_2(cc->tfms_count)) {
+		ti->error = "Bad cipher key count specification";
+		return -EINVAL;
+	}
+	cc->key_parts = cc->tfms_count;
 
 	cc->cipher = kstrdup(cipher, GFP_KERNEL);
 	if (!cc->cipher)
 		goto bad_mem;
 
-	if (tmp) {
-		cc->cipher_mode = kstrdup(tmp, GFP_KERNEL);
-		if (!cc->cipher_mode)
-			goto bad_mem;
-	}
-
 	chainmode = strsep(&tmp, "-");
 	ivopts = strsep(&tmp, "-");
 	ivmode = strsep(&ivopts, ":");
@@ -1074,10 +1451,19 @@
 	if (tmp)
 		DMWARN("Ignoring unexpected additional cipher options");
 
-	/* Compatibility mode for old dm-crypt mappings */
+	cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
+				 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
+				 __alignof__(struct crypt_cpu));
+	if (!cc->cpu) {
+		ti->error = "Cannot allocate per cpu state";
+		goto bad_mem;
+	}
+
+	/*
+	 * For compatibility with the original dm-crypt mapping format, if
+	 * only the cipher name is supplied, use cbc-plain.
+	 */
 	if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
-		kfree(cc->cipher_mode);
-		cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL);
 		chainmode = "cbc";
 		ivmode = "plain";
 	}
@@ -1099,11 +1485,12 @@
 	}
 
 	/* Allocate cipher */
-	cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0);
-	if (IS_ERR(cc->tfm)) {
-		ret = PTR_ERR(cc->tfm);
-		ti->error = "Error allocating crypto tfm";
-		goto bad;
+	for_each_possible_cpu(cpu) {
+		ret = crypt_alloc_tfms(cc, cpu, cipher_api);
+		if (ret < 0) {
+			ti->error = "Error allocating crypto tfm";
+			goto bad;
+		}
 	}
 
 	/* Initialize and set key */
@@ -1114,7 +1501,7 @@
 	}
 
 	/* Initialize IV */
-	cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm);
+	cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
 	if (cc->iv_size)
 		/* at least a 64 bit sector number should fit in our buffer */
 		cc->iv_size = max(cc->iv_size,
@@ -1137,7 +1524,15 @@
 		cc->iv_gen_ops = &crypt_iv_benbi_ops;
 	else if (strcmp(ivmode, "null") == 0)
 		cc->iv_gen_ops = &crypt_iv_null_ops;
-	else {
+	else if (strcmp(ivmode, "lmk") == 0) {
+		cc->iv_gen_ops = &crypt_iv_lmk_ops;
+		/* Version 2 and 3 is recognised according
+		 * to length of provided multi-key string.
+		 * If present (version 3), last key is used as IV seed.
+		 */
+		if (cc->key_size % cc->key_parts)
+			cc->key_parts++;
+	} else {
 		ret = -EINVAL;
 		ti->error = "Invalid IV mode";
 		goto bad;
@@ -1194,6 +1589,7 @@
 		ti->error = "Cannot allocate encryption context";
 		return -ENOMEM;
 	}
+	cc->key_size = key_size;
 
 	ti->private = cc;
 	ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
@@ -1208,9 +1604,9 @@
 	}
 
 	cc->dmreq_start = sizeof(struct ablkcipher_request);
-	cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm);
+	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
 	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
-	cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) &
+	cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
 			   ~(crypto_tfm_ctx_alignment() - 1);
 
 	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
@@ -1219,7 +1615,6 @@
 		ti->error = "Cannot allocate crypt request mempool";
 		goto bad;
 	}
-	cc->req = NULL;
 
 	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
 	if (!cc->page_pool) {
@@ -1252,13 +1647,20 @@
 	cc->start = tmpll;
 
 	ret = -ENOMEM;
-	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
+	cc->io_queue = alloc_workqueue("kcryptd_io",
+				       WQ_NON_REENTRANT|
+				       WQ_MEM_RECLAIM,
+				       1);
 	if (!cc->io_queue) {
 		ti->error = "Couldn't create kcryptd io queue";
 		goto bad;
 	}
 
-	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
+	cc->crypt_queue = alloc_workqueue("kcryptd",
+					  WQ_NON_REENTRANT|
+					  WQ_CPU_INTENSIVE|
+					  WQ_MEM_RECLAIM,
+					  1);
 	if (!cc->crypt_queue) {
 		ti->error = "Couldn't create kcryptd queue";
 		goto bad;
@@ -1286,9 +1688,10 @@
 
 	io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
 
-	if (bio_data_dir(io->base_bio) == READ)
-		kcryptd_queue_io(io);
-	else
+	if (bio_data_dir(io->base_bio) == READ) {
+		if (kcryptd_io_read(io, GFP_NOWAIT))
+			kcryptd_queue_io(io);
+	} else
 		kcryptd_queue_crypt(io);
 
 	return DM_MAPIO_SUBMITTED;
@@ -1306,10 +1709,7 @@
 		break;
 
 	case STATUSTYPE_TABLE:
-		if (cc->cipher_mode)
-			DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode);
-		else
-			DMEMIT("%s ", cc->cipher);
+		DMEMIT("%s ", cc->cipher_string);
 
 		if (cc->key_size > 0) {
 			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
@@ -1421,7 +1821,7 @@
 
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 7, 0},
+	.version = {1, 10, 0},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index baa1191..f18375d 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -352,7 +352,7 @@
 {
 	int r = -ENOMEM;
 
-	kdelayd_wq = create_workqueue("kdelayd");
+	kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
 	if (!kdelayd_wq) {
 		DMERR("Couldn't start kdelayd");
 		goto bad_queue;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4b54618..6d12775 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -295,19 +295,55 @@
 		DMWARN("remove_all left %d open device(s)", dev_skipped);
 }
 
+/*
+ * Set the uuid of a hash_cell that isn't already set.
+ */
+static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
+{
+	mutex_lock(&dm_hash_cells_mutex);
+	hc->uuid = new_uuid;
+	mutex_unlock(&dm_hash_cells_mutex);
+
+	list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
+}
+
+/*
+ * Changes the name of a hash_cell and returns the old name for
+ * the caller to free.
+ */
+static char *__change_cell_name(struct hash_cell *hc, char *new_name)
+{
+	char *old_name;
+
+	/*
+	 * Rename and move the name cell.
+	 */
+	list_del(&hc->name_list);
+	old_name = hc->name;
+
+	mutex_lock(&dm_hash_cells_mutex);
+	hc->name = new_name;
+	mutex_unlock(&dm_hash_cells_mutex);
+
+	list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+
+	return old_name;
+}
+
 static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
 					    const char *new)
 {
-	char *new_name, *old_name;
+	char *new_data, *old_name = NULL;
 	struct hash_cell *hc;
 	struct dm_table *table;
 	struct mapped_device *md;
+	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
 
 	/*
 	 * duplicate new.
 	 */
-	new_name = kstrdup(new, GFP_KERNEL);
-	if (!new_name)
+	new_data = kstrdup(new, GFP_KERNEL);
+	if (!new_data)
 		return ERR_PTR(-ENOMEM);
 
 	down_write(&_hash_lock);
@@ -315,13 +351,19 @@
 	/*
 	 * Is new free ?
 	 */
-	hc = __get_name_cell(new);
+	if (change_uuid)
+		hc = __get_uuid_cell(new);
+	else
+		hc = __get_name_cell(new);
+
 	if (hc) {
-		DMWARN("asked to rename to an already-existing name %s -> %s",
+		DMWARN("Unable to change %s on mapped device %s to one that "
+		       "already exists: %s",
+		       change_uuid ? "uuid" : "name",
 		       param->name, new);
 		dm_put(hc->md);
 		up_write(&_hash_lock);
-		kfree(new_name);
+		kfree(new_data);
 		return ERR_PTR(-EBUSY);
 	}
 
@@ -330,22 +372,30 @@
 	 */
 	hc = __get_name_cell(param->name);
 	if (!hc) {
-		DMWARN("asked to rename a non-existent device %s -> %s",
-		       param->name, new);
+		DMWARN("Unable to rename non-existent device, %s to %s%s",
+		       param->name, change_uuid ? "uuid " : "", new);
 		up_write(&_hash_lock);
-		kfree(new_name);
+		kfree(new_data);
 		return ERR_PTR(-ENXIO);
 	}
 
 	/*
-	 * rename and move the name cell.
+	 * Does this device already have a uuid?
 	 */
-	list_del(&hc->name_list);
-	old_name = hc->name;
-	mutex_lock(&dm_hash_cells_mutex);
-	hc->name = new_name;
-	mutex_unlock(&dm_hash_cells_mutex);
-	list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+	if (change_uuid && hc->uuid) {
+		DMWARN("Unable to change uuid of mapped device %s to %s "
+		       "because uuid is already set to %s",
+		       param->name, new, hc->uuid);
+		dm_put(hc->md);
+		up_write(&_hash_lock);
+		kfree(new_data);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (change_uuid)
+		__set_cell_uuid(hc, new_data);
+	else
+		old_name = __change_cell_name(hc, new_data);
 
 	/*
 	 * Wake up any dm event waiters.
@@ -729,7 +779,7 @@
 	hc = __find_device_hash_cell(param);
 
 	if (!hc) {
-		DMWARN("device doesn't appear to be in the dev hash table.");
+		DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
 		up_write(&_hash_lock);
 		return -ENXIO;
 	}
@@ -741,7 +791,7 @@
 	 */
 	r = dm_lock_for_deletion(md);
 	if (r) {
-		DMWARN("unable to remove open device %s", hc->name);
+		DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
 		up_write(&_hash_lock);
 		dm_put(md);
 		return r;
@@ -774,21 +824,24 @@
 static int dev_rename(struct dm_ioctl *param, size_t param_size)
 {
 	int r;
-	char *new_name = (char *) param + param->data_start;
+	char *new_data = (char *) param + param->data_start;
 	struct mapped_device *md;
+	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
 
-	if (new_name < param->data ||
-	    invalid_str(new_name, (void *) param + param_size) ||
-	    strlen(new_name) > DM_NAME_LEN - 1) {
-		DMWARN("Invalid new logical volume name supplied.");
+	if (new_data < param->data ||
+	    invalid_str(new_data, (void *) param + param_size) ||
+	    strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
+		DMWARN("Invalid new mapped device name or uuid string supplied.");
 		return -EINVAL;
 	}
 
-	r = check_name(new_name);
-	if (r)
-		return r;
+	if (!change_uuid) {
+		r = check_name(new_data);
+		if (r)
+			return r;
+	}
 
-	md = dm_hash_rename(param, new_name);
+	md = dm_hash_rename(param, new_data);
 	if (IS_ERR(md))
 		return PTR_ERR(md);
 
@@ -885,7 +938,7 @@
 
 	hc = __find_device_hash_cell(param);
 	if (!hc) {
-		DMWARN("device doesn't appear to be in the dev hash table.");
+		DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
 		up_write(&_hash_lock);
 		return -ENXIO;
 	}
@@ -1212,7 +1265,7 @@
 
 	hc = __find_device_hash_cell(param);
 	if (!hc) {
-		DMWARN("device doesn't appear to be in the dev hash table.");
+		DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
 		up_write(&_hash_lock);
 		return -ENXIO;
 	}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d8587ba..924f5f0 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,6 +37,13 @@
 	unsigned int nr_pages;
 	unsigned int nr_free_pages;
 
+	/*
+	 * Block devices to unplug.
+	 * Non-NULL pointer means that a block device has some pending requests
+	 * and needs to be unplugged.
+	 */
+	struct block_device *unplug[2];
+
 	struct dm_io_client *io_client;
 
 	wait_queue_head_t destroyq;
@@ -308,6 +315,31 @@
 	return 0;
 }
 
+/*
+ * Unplug the block device at the specified index.
+ */
+static void unplug(struct dm_kcopyd_client *kc, int rw)
+{
+	if (kc->unplug[rw] != NULL) {
+		blk_unplug(bdev_get_queue(kc->unplug[rw]));
+		kc->unplug[rw] = NULL;
+	}
+}
+
+/*
+ * Prepare block device unplug. If there's another device
+ * to be unplugged at the same array index, we unplug that
+ * device first.
+ */
+static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
+			   struct block_device *bdev)
+{
+	if (likely(kc->unplug[rw] == bdev))
+		return;
+	unplug(kc, rw);
+	kc->unplug[rw] = bdev;
+}
+
 static void complete_io(unsigned long error, void *context)
 {
 	struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -345,7 +377,7 @@
 {
 	int r;
 	struct dm_io_request io_req = {
-		.bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
+		.bi_rw = job->rw,
 		.mem.type = DM_IO_PAGE_LIST,
 		.mem.ptr.pl = job->pages,
 		.mem.offset = job->offset,
@@ -354,10 +386,16 @@
 		.client = job->kc->io_client,
 	};
 
-	if (job->rw == READ)
+	if (job->rw == READ) {
 		r = dm_io(&io_req, 1, &job->source, NULL);
-	else
+		prepare_unplug(job->kc, READ, job->source.bdev);
+	} else {
+		if (job->num_dests > 1)
+			io_req.bi_rw |= REQ_UNPLUG;
 		r = dm_io(&io_req, job->num_dests, job->dests, NULL);
+		if (!(io_req.bi_rw & REQ_UNPLUG))
+			prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
+	}
 
 	return r;
 }
@@ -435,10 +473,18 @@
 	 * Pages jobs when successful will jump onto the io jobs
 	 * list.  io jobs call wake when they complete and it all
 	 * starts again.
+	 *
+	 * Note that io_jobs add block devices to the unplug array,
+	 * this array is cleared with "unplug" calls. It is thus
+	 * forbidden to run complete_jobs after io_jobs and before
+	 * unplug because the block device could be destroyed in
+	 * job completion callback.
 	 */
 	process_jobs(&kc->complete_jobs, kc, run_complete_job);
 	process_jobs(&kc->pages_jobs, kc, run_pages_job);
 	process_jobs(&kc->io_jobs, kc, run_io_job);
+	unplug(kc, READ);
+	unplug(kc, WRITE);
 }
 
 /*
@@ -619,12 +665,15 @@
 	INIT_LIST_HEAD(&kc->io_jobs);
 	INIT_LIST_HEAD(&kc->pages_jobs);
 
+	memset(kc->unplug, 0, sizeof(kc->unplug));
+
 	kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
 	if (!kc->job_pool)
 		goto bad_slab;
 
 	INIT_WORK(&kc->kcopyd_work, do_work);
-	kc->kcopyd_wq = create_singlethread_workqueue("kcopyd");
+	kc->kcopyd_wq = alloc_workqueue("kcopyd",
+					WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
 	if (!kc->kcopyd_wq)
 		goto bad_workqueue;
 
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 1ed0094..aa2e0c3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -12,12 +12,22 @@
 
 #include "dm-log-userspace-transfer.h"
 
+#define DM_LOG_USERSPACE_VSN "1.1.0"
+
 struct flush_entry {
 	int type;
 	region_t region;
 	struct list_head list;
 };
 
+/*
+ * This limit on the number of mark and clear request is, to a degree,
+ * arbitrary.  However, there is some basis for the choice in the limits
+ * imposed on the size of data payload by dm-log-userspace-transfer.c:
+ * dm_consult_userspace().
+ */
+#define MAX_FLUSH_GROUP_COUNT 32
+
 struct log_c {
 	struct dm_target *ti;
 	uint32_t region_size;
@@ -37,8 +47,15 @@
 	 */
 	uint64_t in_sync_hint;
 
+	/*
+	 * Mark and clear requests are held until a flush is issued
+	 * so that we can group, and thereby limit, the amount of
+	 * network traffic between kernel and userspace.  The 'flush_lock'
+	 * is used to protect these lists.
+	 */
 	spinlock_t flush_lock;
-	struct list_head flush_list;  /* only for clear and mark requests */
+	struct list_head mark_list;
+	struct list_head clear_list;
 };
 
 static mempool_t *flush_entry_pool;
@@ -169,7 +186,8 @@
 
 	strncpy(lc->uuid, argv[0], DM_UUID_LEN);
 	spin_lock_init(&lc->flush_lock);
-	INIT_LIST_HEAD(&lc->flush_list);
+	INIT_LIST_HEAD(&lc->mark_list);
+	INIT_LIST_HEAD(&lc->clear_list);
 
 	str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
 	if (str_size < 0) {
@@ -181,8 +199,11 @@
 	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
 				 ctr_str, str_size, NULL, NULL);
 
-	if (r == -ESRCH) {
-		DMERR("Userspace log server not found");
+	if (r < 0) {
+		if (r == -ESRCH)
+			DMERR("Userspace log server not found");
+		else
+			DMERR("Userspace log server failed to create log");
 		goto out;
 	}
 
@@ -214,10 +235,9 @@
 
 static void userspace_dtr(struct dm_dirty_log *log)
 {
-	int r;
 	struct log_c *lc = log->context;
 
-	r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
+	(void) dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
 				 NULL, 0,
 				 NULL, NULL);
 
@@ -338,6 +358,71 @@
 	return (r) ? 0 : (int)in_sync;
 }
 
+static int flush_one_by_one(struct log_c *lc, struct list_head *flush_list)
+{
+	int r = 0;
+	struct flush_entry *fe;
+
+	list_for_each_entry(fe, flush_list, list) {
+		r = userspace_do_request(lc, lc->uuid, fe->type,
+					 (char *)&fe->region,
+					 sizeof(fe->region),
+					 NULL, NULL);
+		if (r)
+			break;
+	}
+
+	return r;
+}
+
+static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
+{
+	int r = 0;
+	int count;
+	uint32_t type = 0;
+	struct flush_entry *fe, *tmp_fe;
+	LIST_HEAD(tmp_list);
+	uint64_t group[MAX_FLUSH_GROUP_COUNT];
+
+	/*
+	 * Group process the requests
+	 */
+	while (!list_empty(flush_list)) {
+		count = 0;
+
+		list_for_each_entry_safe(fe, tmp_fe, flush_list, list) {
+			group[count] = fe->region;
+			count++;
+
+			list_del(&fe->list);
+			list_add(&fe->list, &tmp_list);
+
+			type = fe->type;
+			if (count >= MAX_FLUSH_GROUP_COUNT)
+				break;
+		}
+
+		r = userspace_do_request(lc, lc->uuid, type,
+					 (char *)(group),
+					 count * sizeof(uint64_t),
+					 NULL, NULL);
+		if (r) {
+			/* Group send failed.  Attempt one-by-one. */
+			list_splice_init(&tmp_list, flush_list);
+			r = flush_one_by_one(lc, flush_list);
+			break;
+		}
+	}
+
+	/*
+	 * Must collect flush_entrys that were successfully processed
+	 * as a group so that they will be free'd by the caller.
+	 */
+	list_splice_init(&tmp_list, flush_list);
+
+	return r;
+}
+
 /*
  * userspace_flush
  *
@@ -360,31 +445,25 @@
 	int r = 0;
 	unsigned long flags;
 	struct log_c *lc = log->context;
-	LIST_HEAD(flush_list);
+	LIST_HEAD(mark_list);
+	LIST_HEAD(clear_list);
 	struct flush_entry *fe, *tmp_fe;
 
 	spin_lock_irqsave(&lc->flush_lock, flags);
-	list_splice_init(&lc->flush_list, &flush_list);
+	list_splice_init(&lc->mark_list, &mark_list);
+	list_splice_init(&lc->clear_list, &clear_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
 
-	if (list_empty(&flush_list))
+	if (list_empty(&mark_list) && list_empty(&clear_list))
 		return 0;
 
-	/*
-	 * FIXME: Count up requests, group request types,
-	 * allocate memory to stick all requests in and
-	 * send to server in one go.  Failing the allocation,
-	 * do it one by one.
-	 */
+	r = flush_by_group(lc, &mark_list);
+	if (r)
+		goto fail;
 
-	list_for_each_entry(fe, &flush_list, list) {
-		r = userspace_do_request(lc, lc->uuid, fe->type,
-					 (char *)&fe->region,
-					 sizeof(fe->region),
-					 NULL, NULL);
-		if (r)
-			goto fail;
-	}
+	r = flush_by_group(lc, &clear_list);
+	if (r)
+		goto fail;
 
 	r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
 				 NULL, 0, NULL, NULL);
@@ -395,7 +474,11 @@
 	 * Calling code will receive an error and will know that
 	 * the log facility has failed.
 	 */
-	list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) {
+	list_for_each_entry_safe(fe, tmp_fe, &mark_list, list) {
+		list_del(&fe->list);
+		mempool_free(fe, flush_entry_pool);
+	}
+	list_for_each_entry_safe(fe, tmp_fe, &clear_list, list) {
 		list_del(&fe->list);
 		mempool_free(fe, flush_entry_pool);
 	}
@@ -425,7 +508,7 @@
 	spin_lock_irqsave(&lc->flush_lock, flags);
 	fe->type = DM_ULOG_MARK_REGION;
 	fe->region = region;
-	list_add(&fe->list, &lc->flush_list);
+	list_add(&fe->list, &lc->mark_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
 
 	return;
@@ -462,7 +545,7 @@
 	spin_lock_irqsave(&lc->flush_lock, flags);
 	fe->type = DM_ULOG_CLEAR_REGION;
 	fe->region = region;
-	list_add(&fe->list, &lc->flush_list);
+	list_add(&fe->list, &lc->clear_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
 
 	return;
@@ -684,7 +767,7 @@
 		return r;
 	}
 
-	DMINFO("version 1.0.0 loaded");
+	DMINFO("version " DM_LOG_USERSPACE_VSN " loaded");
 	return 0;
 }
 
@@ -694,7 +777,7 @@
 	dm_ulog_tfr_exit();
 	mempool_destroy(flush_entry_pool);
 
-	DMINFO("version 1.0.0 unloaded");
+	DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
 	return;
 }
 
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 075cbcf..049eaf1 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -198,6 +198,7 @@
 
 	memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
 	memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+	tfr->version = DM_ULOG_REQUEST_VERSION;
 	tfr->luid = luid;
 	tfr->seq = dm_ulog_seq++;
 
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 33420e6..6951536 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -455,7 +455,7 @@
 			r = PTR_ERR(lc->io_req.client);
 			DMWARN("couldn't allocate disk io client");
 			kfree(lc);
-			return -ENOMEM;
+			return r;
 		}
 
 		lc->disk_header = vmalloc(buf_size);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 487ecda..b82d288 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -23,6 +23,8 @@
 
 #define DM_MSG_PREFIX "multipath"
 #define MESG_STR(x) x, sizeof(x)
+#define DM_PG_INIT_DELAY_MSECS 2000
+#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
 
 /* Path properties */
 struct pgpath {
@@ -33,8 +35,7 @@
 	unsigned fail_count;		/* Cumulative failure count */
 
 	struct dm_path path;
-	struct work_struct deactivate_path;
-	struct work_struct activate_path;
+	struct delayed_work activate_path;
 };
 
 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -65,11 +66,15 @@
 
 	const char *hw_handler_name;
 	char *hw_handler_params;
+
 	unsigned nr_priority_groups;
 	struct list_head priority_groups;
+
+	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
+
 	unsigned pg_init_required;	/* pg_init needs calling? */
 	unsigned pg_init_in_progress;	/* Only one pg_init allowed at once */
-	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
+	unsigned pg_init_delay_retry;	/* Delay pg_init retry? */
 
 	unsigned nr_valid_paths;	/* Total number of usable paths */
 	struct pgpath *current_pgpath;
@@ -82,6 +87,7 @@
 	unsigned saved_queue_if_no_path;/* Saved state during suspension */
 	unsigned pg_init_retries;	/* Number of times to retry pg_init */
 	unsigned pg_init_count;		/* Number of times pg_init called */
+	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
 
 	struct work_struct process_queued_ios;
 	struct list_head queued_ios;
@@ -116,7 +122,6 @@
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
 static void activate_path(struct work_struct *work);
-static void deactivate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -129,8 +134,7 @@
 
 	if (pgpath) {
 		pgpath->is_active = 1;
-		INIT_WORK(&pgpath->deactivate_path, deactivate_path);
-		INIT_WORK(&pgpath->activate_path, activate_path);
+		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
 	}
 
 	return pgpath;
@@ -141,14 +145,6 @@
 	kfree(pgpath);
 }
 
-static void deactivate_path(struct work_struct *work)
-{
-	struct pgpath *pgpath =
-		container_of(work, struct pgpath, deactivate_path);
-
-	blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
-}
-
 static struct priority_group *alloc_priority_group(void)
 {
 	struct priority_group *pg;
@@ -199,6 +195,7 @@
 		INIT_LIST_HEAD(&m->queued_ios);
 		spin_lock_init(&m->lock);
 		m->queue_io = 1;
+		m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 		INIT_WORK(&m->process_queued_ios, process_queued_ios);
 		INIT_WORK(&m->trigger_event, trigger_event);
 		init_waitqueue_head(&m->pg_init_wait);
@@ -238,14 +235,19 @@
 static void __pg_init_all_paths(struct multipath *m)
 {
 	struct pgpath *pgpath;
+	unsigned long pg_init_delay = 0;
 
 	m->pg_init_count++;
 	m->pg_init_required = 0;
+	if (m->pg_init_delay_retry)
+		pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
+						 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 	list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 		/* Skip failed paths */
 		if (!pgpath->is_active)
 			continue;
-		if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+		if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
+				       pg_init_delay))
 			m->pg_init_in_progress++;
 	}
 }
@@ -793,8 +795,9 @@
 	const char *param_name;
 
 	static struct param _params[] = {
-		{0, 3, "invalid number of feature args"},
+		{0, 5, "invalid number of feature args"},
 		{1, 50, "pg_init_retries must be between 1 and 50"},
+		{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
 	};
 
 	r = read_param(_params, shift(as), &argc, &ti->error);
@@ -821,6 +824,14 @@
 			continue;
 		}
 
+		if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
+		    (argc >= 1)) {
+			r = read_param(_params + 2, shift(as),
+				       &m->pg_init_delay_msecs, &ti->error);
+			argc--;
+			continue;
+		}
+
 		ti->error = "Unrecognised multipath feature request";
 		r = -EINVAL;
 	} while (argc && !r);
@@ -931,7 +942,7 @@
 	flush_workqueue(kmpath_handlerd);
 	multipath_wait_for_pg_init_completion(m);
 	flush_workqueue(kmultipathd);
-	flush_scheduled_work();
+	flush_work_sync(&m->trigger_event);
 }
 
 static void multipath_dtr(struct dm_target *ti)
@@ -995,7 +1006,6 @@
 		      pgpath->path.dev->name, m->nr_valid_paths);
 
 	schedule_work(&m->trigger_event);
-	queue_work(kmultipathd, &pgpath->deactivate_path);
 
 out:
 	spin_unlock_irqrestore(&m->lock, flags);
@@ -1034,7 +1044,7 @@
 		m->current_pgpath = NULL;
 		queue_work(kmultipathd, &m->process_queued_ios);
 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
-		if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
 			m->pg_init_in_progress++;
 	}
 
@@ -1169,6 +1179,7 @@
 	struct priority_group *pg = pgpath->pg;
 	struct multipath *m = pg->m;
 	unsigned long flags;
+	unsigned delay_retry = 0;
 
 	/* device or driver problems */
 	switch (errors) {
@@ -1193,8 +1204,9 @@
 		 */
 		bypass_pg(m, pg, 1);
 		break;
-	/* TODO: For SCSI_DH_RETRY we should wait a couple seconds */
 	case SCSI_DH_RETRY:
+		/* Wait before retrying. */
+		delay_retry = 1;
 	case SCSI_DH_IMM_RETRY:
 	case SCSI_DH_RES_TEMP_UNAVAIL:
 		if (pg_init_limit_reached(m, pgpath))
@@ -1227,6 +1239,7 @@
 	if (!m->pg_init_required)
 		m->queue_io = 0;
 
+	m->pg_init_delay_retry = delay_retry;
 	queue_work(kmultipathd, &m->process_queued_ios);
 
 	/*
@@ -1241,7 +1254,7 @@
 static void activate_path(struct work_struct *work)
 {
 	struct pgpath *pgpath =
-		container_of(work, struct pgpath, activate_path);
+		container_of(work, struct pgpath, activate_path.work);
 
 	scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
 				pg_init_done, pgpath);
@@ -1382,11 +1395,14 @@
 		DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
 	else {
 		DMEMIT("%u ", m->queue_if_no_path +
-			      (m->pg_init_retries > 0) * 2);
+			      (m->pg_init_retries > 0) * 2 +
+			      (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
 		if (m->queue_if_no_path)
 			DMEMIT("queue_if_no_path ");
 		if (m->pg_init_retries)
 			DMEMIT("pg_init_retries %u ", m->pg_init_retries);
+		if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
+			DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
 	}
 
 	if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1655,7 +1671,7 @@
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
 	.name = "multipath",
-	.version = {1, 1, 1},
+	.version = {1, 2, 0},
 	.module = THIS_MODULE,
 	.ctr = multipath_ctr,
 	.dtr = multipath_dtr,
@@ -1687,7 +1703,7 @@
 		return -EINVAL;
 	}
 
-	kmultipathd = create_workqueue("kmpathd");
+	kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
 	if (!kmultipathd) {
 		DMERR("failed to create workqueue kmpathd");
 		dm_unregister_target(&multipath_target);
@@ -1701,7 +1717,8 @@
 	 * old workqueue would also create a bottleneck in the
 	 * path of the storage hardware device activation.
 	 */
-	kmpath_handlerd = create_singlethread_workqueue("kmpath_handlerd");
+	kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
+						  WQ_MEM_RECLAIM);
 	if (!kmpath_handlerd) {
 		DMERR("failed to create workqueue kmpath_handlerd");
 		destroy_workqueue(kmultipathd);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
new file mode 100644
index 0000000..b9e1e15
--- /dev/null
+++ b/drivers/md/dm-raid.c
@@ -0,0 +1,697 @@
+/*
+ * Copyright (C) 2010-2011 Neil Brown
+ * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include <linux/slab.h>
+
+#include "md.h"
+#include "raid5.h"
+#include "dm.h"
+#include "bitmap.h"
+
+#define DM_MSG_PREFIX "raid"
+
+/*
+ * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then
+ * make it so the flag doesn't set anything.
+ */
+#ifndef MD_SYNC_STATE_FORCED
+#define MD_SYNC_STATE_FORCED 0
+#endif
+
+struct raid_dev {
+	/*
+	 * Two DM devices, one to hold metadata and one to hold the
+	 * actual data/parity.  The reason for this is to not confuse
+	 * ti->len and give more flexibility in altering size and
+	 * characteristics.
+	 *
+	 * While it is possible for this device to be associated
+	 * with a different physical device than the data_dev, it
+	 * is intended for it to be the same.
+	 *    |--------- Physical Device ---------|
+	 *    |- meta_dev -|------ data_dev ------|
+	 */
+	struct dm_dev *meta_dev;
+	struct dm_dev *data_dev;
+	struct mdk_rdev_s rdev;
+};
+
+/*
+ * Flags for rs->print_flags field.
+ */
+#define DMPF_DAEMON_SLEEP      0x1
+#define DMPF_MAX_WRITE_BEHIND  0x2
+#define DMPF_SYNC              0x4
+#define DMPF_NOSYNC            0x8
+#define DMPF_STRIPE_CACHE      0x10
+#define DMPF_MIN_RECOVERY_RATE 0x20
+#define DMPF_MAX_RECOVERY_RATE 0x40
+
+struct raid_set {
+	struct dm_target *ti;
+
+	uint64_t print_flags;
+
+	struct mddev_s md;
+	struct raid_type *raid_type;
+	struct dm_target_callbacks callbacks;
+
+	struct raid_dev dev[0];
+};
+
+/* Supported raid types and properties. */
+static struct raid_type {
+	const char *name;		/* RAID algorithm. */
+	const char *descr;		/* Descriptor text for logging. */
+	const unsigned parity_devs;	/* # of parity devices. */
+	const unsigned minimal_devs;	/* minimal # of devices in set. */
+	const unsigned level;		/* RAID level. */
+	const unsigned algorithm;	/* RAID algorithm. */
+} raid_types[] = {
+	{"raid4",    "RAID4 (dedicated parity disk)",	1, 2, 5, ALGORITHM_PARITY_0},
+	{"raid5_la", "RAID5 (left asymmetric)",		1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
+	{"raid5_ra", "RAID5 (right asymmetric)",	1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
+	{"raid5_ls", "RAID5 (left symmetric)",		1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
+	{"raid5_rs", "RAID5 (right symmetric)",		1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
+	{"raid6_zr", "RAID6 (zero restart)",		2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
+	{"raid6_nr", "RAID6 (N restart)",		2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
+	{"raid6_nc", "RAID6 (N continue)",		2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
+};
+
+static struct raid_type *get_raid_type(char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(raid_types); i++)
+		if (!strcmp(raid_types[i].name, name))
+			return &raid_types[i];
+
+	return NULL;
+}
+
+static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
+{
+	unsigned i;
+	struct raid_set *rs;
+	sector_t sectors_per_dev;
+
+	if (raid_devs <= raid_type->parity_devs) {
+		ti->error = "Insufficient number of devices";
+		return ERR_PTR(-EINVAL);
+	}
+
+	sectors_per_dev = ti->len;
+	if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
+		ti->error = "Target length not divisible by number of data devices";
+		return ERR_PTR(-EINVAL);
+	}
+
+	rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
+	if (!rs) {
+		ti->error = "Cannot allocate raid context";
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mddev_init(&rs->md);
+
+	rs->ti = ti;
+	rs->raid_type = raid_type;
+	rs->md.raid_disks = raid_devs;
+	rs->md.level = raid_type->level;
+	rs->md.new_level = rs->md.level;
+	rs->md.dev_sectors = sectors_per_dev;
+	rs->md.layout = raid_type->algorithm;
+	rs->md.new_layout = rs->md.layout;
+	rs->md.delta_disks = 0;
+	rs->md.recovery_cp = 0;
+
+	for (i = 0; i < raid_devs; i++)
+		md_rdev_init(&rs->dev[i].rdev);
+
+	/*
+	 * Remaining items to be initialized by further RAID params:
+	 *  rs->md.persistent
+	 *  rs->md.external
+	 *  rs->md.chunk_sectors
+	 *  rs->md.new_chunk_sectors
+	 */
+
+	return rs;
+}
+
+static void context_free(struct raid_set *rs)
+{
+	int i;
+
+	for (i = 0; i < rs->md.raid_disks; i++)
+		if (rs->dev[i].data_dev)
+			dm_put_device(rs->ti, rs->dev[i].data_dev);
+
+	kfree(rs);
+}
+
+/*
+ * For every device we have two words
+ *  <meta_dev>: meta device name or '-' if missing
+ *  <data_dev>: data device name or '-' if missing
+ *
+ * This code parses those words.
+ */
+static int dev_parms(struct raid_set *rs, char **argv)
+{
+	int i;
+	int rebuild = 0;
+	int metadata_available = 0;
+	int ret = 0;
+
+	for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
+		rs->dev[i].rdev.raid_disk = i;
+
+		rs->dev[i].meta_dev = NULL;
+		rs->dev[i].data_dev = NULL;
+
+		/*
+		 * There are no offsets, since there is a separate device
+		 * for data and metadata.
+		 */
+		rs->dev[i].rdev.data_offset = 0;
+		rs->dev[i].rdev.mddev = &rs->md;
+
+		if (strcmp(argv[0], "-")) {
+			rs->ti->error = "Metadata devices not supported";
+			return -EINVAL;
+		}
+
+		if (!strcmp(argv[1], "-")) {
+			if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
+			    (!rs->dev[i].rdev.recovery_offset)) {
+				rs->ti->error = "Drive designated for rebuild not specified";
+				return -EINVAL;
+			}
+
+			continue;
+		}
+
+		ret = dm_get_device(rs->ti, argv[1],
+				    dm_table_get_mode(rs->ti->table),
+				    &rs->dev[i].data_dev);
+		if (ret) {
+			rs->ti->error = "RAID device lookup failure";
+			return ret;
+		}
+
+		rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
+		list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
+		if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+			rebuild++;
+	}
+
+	if (metadata_available) {
+		rs->md.external = 0;
+		rs->md.persistent = 1;
+		rs->md.major_version = 2;
+	} else if (rebuild && !rs->md.recovery_cp) {
+		/*
+		 * Without metadata, we will not be able to tell if the array
+		 * is in-sync or not - we must assume it is not.  Therefore,
+		 * it is impossible to rebuild a drive.
+		 *
+		 * Even if there is metadata, the on-disk information may
+		 * indicate that the array is not in-sync and it will then
+		 * fail at that time.
+		 *
+		 * User could specify 'nosync' option if desperate.
+		 */
+		DMERR("Unable to rebuild drive while array is not in-sync");
+		rs->ti->error = "RAID device lookup failure";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Possible arguments are...
+ * RAID456:
+ *	<chunk_size> [optional_args]
+ *
+ * Optional args:
+ *    [[no]sync]			Force or prevent recovery of the entire array
+ *    [rebuild <idx>]			Rebuild the drive indicated by the index
+ *    [daemon_sleep <ms>]		Time between bitmap daemon work to clear bits
+ *    [min_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
+ *    [max_recovery_rate <kB/sec/disk>]	Throttle RAID initialization
+ *    [max_write_behind <sectors>]	See '-write-behind=' (man mdadm)
+ *    [stripe_cache <sectors>]		Stripe cache size for higher RAIDs
+ */
+static int parse_raid_params(struct raid_set *rs, char **argv,
+			     unsigned num_raid_params)
+{
+	unsigned i, rebuild_cnt = 0;
+	unsigned long value;
+	char *key;
+
+	/*
+	 * First, parse the in-order required arguments
+	 */
+	if ((strict_strtoul(argv[0], 10, &value) < 0) ||
+	    !is_power_of_2(value) || (value < 8)) {
+		rs->ti->error = "Bad chunk size";
+		return -EINVAL;
+	}
+
+	rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
+	argv++;
+	num_raid_params--;
+
+	/*
+	 * Second, parse the unordered optional arguments
+	 */
+	for (i = 0; i < rs->md.raid_disks; i++)
+		set_bit(In_sync, &rs->dev[i].rdev.flags);
+
+	for (i = 0; i < num_raid_params; i++) {
+		if (!strcmp(argv[i], "nosync")) {
+			rs->md.recovery_cp = MaxSector;
+			rs->print_flags |= DMPF_NOSYNC;
+			rs->md.flags |= MD_SYNC_STATE_FORCED;
+			continue;
+		}
+		if (!strcmp(argv[i], "sync")) {
+			rs->md.recovery_cp = 0;
+			rs->print_flags |= DMPF_SYNC;
+			rs->md.flags |= MD_SYNC_STATE_FORCED;
+			continue;
+		}
+
+		/* The rest of the optional arguments come in key/value pairs */
+		if ((i + 1) >= num_raid_params) {
+			rs->ti->error = "Wrong number of raid parameters given";
+			return -EINVAL;
+		}
+
+		key = argv[i++];
+		if (strict_strtoul(argv[i], 10, &value) < 0) {
+			rs->ti->error = "Bad numerical argument given in raid params";
+			return -EINVAL;
+		}
+
+		if (!strcmp(key, "rebuild")) {
+			if (++rebuild_cnt > rs->raid_type->parity_devs) {
+				rs->ti->error = "Too many rebuild drives given";
+				return -EINVAL;
+			}
+			if (value > rs->md.raid_disks) {
+				rs->ti->error = "Invalid rebuild index given";
+				return -EINVAL;
+			}
+			clear_bit(In_sync, &rs->dev[value].rdev.flags);
+			rs->dev[value].rdev.recovery_offset = 0;
+		} else if (!strcmp(key, "max_write_behind")) {
+			rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
+
+			/*
+			 * In device-mapper, we specify things in sectors, but
+			 * MD records this value in kB
+			 */
+			value /= 2;
+			if (value > COUNTER_MAX) {
+				rs->ti->error = "Max write-behind limit out of range";
+				return -EINVAL;
+			}
+			rs->md.bitmap_info.max_write_behind = value;
+		} else if (!strcmp(key, "daemon_sleep")) {
+			rs->print_flags |= DMPF_DAEMON_SLEEP;
+			if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
+				rs->ti->error = "daemon sleep period out of range";
+				return -EINVAL;
+			}
+			rs->md.bitmap_info.daemon_sleep = value;
+		} else if (!strcmp(key, "stripe_cache")) {
+			rs->print_flags |= DMPF_STRIPE_CACHE;
+
+			/*
+			 * In device-mapper, we specify things in sectors, but
+			 * MD records this value in kB
+			 */
+			value /= 2;
+
+			if (rs->raid_type->level < 5) {
+				rs->ti->error = "Inappropriate argument: stripe_cache";
+				return -EINVAL;
+			}
+			if (raid5_set_cache_size(&rs->md, (int)value)) {
+				rs->ti->error = "Bad stripe_cache size";
+				return -EINVAL;
+			}
+		} else if (!strcmp(key, "min_recovery_rate")) {
+			rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
+			if (value > INT_MAX) {
+				rs->ti->error = "min_recovery_rate out of range";
+				return -EINVAL;
+			}
+			rs->md.sync_speed_min = (int)value;
+		} else if (!strcmp(key, "max_recovery_rate")) {
+			rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
+			if (value > INT_MAX) {
+				rs->ti->error = "max_recovery_rate out of range";
+				return -EINVAL;
+			}
+			rs->md.sync_speed_max = (int)value;
+		} else {
+			DMERR("Unable to parse RAID parameter: %s", key);
+			rs->ti->error = "Unable to parse RAID parameters";
+			return -EINVAL;
+		}
+	}
+
+	/* Assume there are no metadata devices until the drives are parsed */
+	rs->md.persistent = 0;
+	rs->md.external = 1;
+
+	return 0;
+}
+
+static void do_table_event(struct work_struct *ws)
+{
+	struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
+
+	dm_table_event(rs->ti->table);
+}
+
+static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
+{
+	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+
+	return md_raid5_congested(&rs->md, bits);
+}
+
+static void raid_unplug(struct dm_target_callbacks *cb)
+{
+	struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+
+	md_raid5_unplug_device(rs->md.private);
+}
+
+/*
+ * Construct a RAID4/5/6 mapping:
+ * Args:
+ *	<raid_type> <#raid_params> <raid_params>		\
+ *	<#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
+ *
+ * ** metadata devices are not supported yet, use '-' instead **
+ *
+ * <raid_params> varies by <raid_type>.  See 'parse_raid_params' for
+ * details on possible <raid_params>.
+ */
+static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+	int ret;
+	struct raid_type *rt;
+	unsigned long num_raid_params, num_raid_devs;
+	struct raid_set *rs = NULL;
+
+	/* Must have at least <raid_type> <#raid_params> */
+	if (argc < 2) {
+		ti->error = "Too few arguments";
+		return -EINVAL;
+	}
+
+	/* raid type */
+	rt = get_raid_type(argv[0]);
+	if (!rt) {
+		ti->error = "Unrecognised raid_type";
+		return -EINVAL;
+	}
+	argc--;
+	argv++;
+
+	/* number of RAID parameters */
+	if (strict_strtoul(argv[0], 10, &num_raid_params) < 0) {
+		ti->error = "Cannot understand number of RAID parameters";
+		return -EINVAL;
+	}
+	argc--;
+	argv++;
+
+	/* Skip over RAID params for now and find out # of devices */
+	if (num_raid_params + 1 > argc) {
+		ti->error = "Arguments do not agree with counts given";
+		return -EINVAL;
+	}
+
+	if ((strict_strtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
+	    (num_raid_devs >= INT_MAX)) {
+		ti->error = "Cannot understand number of raid devices";
+		return -EINVAL;
+	}
+
+	rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
+	if (IS_ERR(rs))
+		return PTR_ERR(rs);
+
+	ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
+	if (ret)
+		goto bad;
+
+	ret = -EINVAL;
+
+	argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
+	argv += num_raid_params + 1;
+
+	if (argc != (num_raid_devs * 2)) {
+		ti->error = "Supplied RAID devices does not match the count given";
+		goto bad;
+	}
+
+	ret = dev_parms(rs, argv);
+	if (ret)
+		goto bad;
+
+	INIT_WORK(&rs->md.event_work, do_table_event);
+	ti->split_io = rs->md.chunk_sectors;
+	ti->private = rs;
+
+	mutex_lock(&rs->md.reconfig_mutex);
+	ret = md_run(&rs->md);
+	rs->md.in_sync = 0; /* Assume already marked dirty */
+	mutex_unlock(&rs->md.reconfig_mutex);
+
+	if (ret) {
+		ti->error = "Fail to run raid array";
+		goto bad;
+	}
+
+	rs->callbacks.congested_fn = raid_is_congested;
+	rs->callbacks.unplug_fn = raid_unplug;
+	dm_table_add_target_callbacks(ti->table, &rs->callbacks);
+
+	return 0;
+
+bad:
+	context_free(rs);
+
+	return ret;
+}
+
+static void raid_dtr(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	list_del_init(&rs->callbacks.list);
+	md_stop(&rs->md);
+	context_free(rs);
+}
+
+static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
+{
+	struct raid_set *rs = ti->private;
+	mddev_t *mddev = &rs->md;
+
+	mddev->pers->make_request(mddev, bio);
+
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int raid_status(struct dm_target *ti, status_type_t type,
+		       char *result, unsigned maxlen)
+{
+	struct raid_set *rs = ti->private;
+	unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
+	unsigned sz = 0;
+	int i;
+	sector_t sync;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
+
+		for (i = 0; i < rs->md.raid_disks; i++) {
+			if (test_bit(Faulty, &rs->dev[i].rdev.flags))
+				DMEMIT("D");
+			else if (test_bit(In_sync, &rs->dev[i].rdev.flags))
+				DMEMIT("A");
+			else
+				DMEMIT("a");
+		}
+
+		if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
+			sync = rs->md.curr_resync_completed;
+		else
+			sync = rs->md.recovery_cp;
+
+		if (sync > rs->md.resync_max_sectors)
+			sync = rs->md.resync_max_sectors;
+
+		DMEMIT(" %llu/%llu",
+		       (unsigned long long) sync,
+		       (unsigned long long) rs->md.resync_max_sectors);
+
+		break;
+	case STATUSTYPE_TABLE:
+		/* The string you would use to construct this array */
+		for (i = 0; i < rs->md.raid_disks; i++)
+			if (rs->dev[i].data_dev &&
+			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
+				raid_param_cnt++; /* for rebuilds */
+
+		raid_param_cnt += (hweight64(rs->print_flags) * 2);
+		if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
+			raid_param_cnt--;
+
+		DMEMIT("%s %u %u", rs->raid_type->name,
+		       raid_param_cnt, rs->md.chunk_sectors);
+
+		if ((rs->print_flags & DMPF_SYNC) &&
+		    (rs->md.recovery_cp == MaxSector))
+			DMEMIT(" sync");
+		if (rs->print_flags & DMPF_NOSYNC)
+			DMEMIT(" nosync");
+
+		for (i = 0; i < rs->md.raid_disks; i++)
+			if (rs->dev[i].data_dev &&
+			    !test_bit(In_sync, &rs->dev[i].rdev.flags))
+				DMEMIT(" rebuild %u", i);
+
+		if (rs->print_flags & DMPF_DAEMON_SLEEP)
+			DMEMIT(" daemon_sleep %lu",
+			       rs->md.bitmap_info.daemon_sleep);
+
+		if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
+			DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
+
+		if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
+			DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
+
+		if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
+			DMEMIT(" max_write_behind %lu",
+			       rs->md.bitmap_info.max_write_behind);
+
+		if (rs->print_flags & DMPF_STRIPE_CACHE) {
+			raid5_conf_t *conf = rs->md.private;
+
+			/* convert from kiB to sectors */
+			DMEMIT(" stripe_cache %d",
+			       conf ? conf->max_nr_stripes * 2 : 0);
+		}
+
+		DMEMIT(" %d", rs->md.raid_disks);
+		for (i = 0; i < rs->md.raid_disks; i++) {
+			DMEMIT(" -"); /* metadata device */
+
+			if (rs->dev[i].data_dev)
+				DMEMIT(" %s", rs->dev[i].data_dev->name);
+			else
+				DMEMIT(" -");
+		}
+	}
+
+	return 0;
+}
+
+static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+{
+	struct raid_set *rs = ti->private;
+	unsigned i;
+	int ret = 0;
+
+	for (i = 0; !ret && i < rs->md.raid_disks; i++)
+		if (rs->dev[i].data_dev)
+			ret = fn(ti,
+				 rs->dev[i].data_dev,
+				 0, /* No offset on data devs */
+				 rs->md.dev_sectors,
+				 data);
+
+	return ret;
+}
+
+static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+	struct raid_set *rs = ti->private;
+	unsigned chunk_size = rs->md.chunk_sectors << 9;
+	raid5_conf_t *conf = rs->md.private;
+
+	blk_limits_io_min(limits, chunk_size);
+	blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
+}
+
+static void raid_presuspend(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	md_stop_writes(&rs->md);
+}
+
+static void raid_postsuspend(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	mddev_suspend(&rs->md);
+}
+
+static void raid_resume(struct dm_target *ti)
+{
+	struct raid_set *rs = ti->private;
+
+	mddev_resume(&rs->md);
+}
+
+static struct target_type raid_target = {
+	.name = "raid",
+	.version = {1, 0, 0},
+	.module = THIS_MODULE,
+	.ctr = raid_ctr,
+	.dtr = raid_dtr,
+	.map = raid_map,
+	.status = raid_status,
+	.iterate_devices = raid_iterate_devices,
+	.io_hints = raid_io_hints,
+	.presuspend = raid_presuspend,
+	.postsuspend = raid_postsuspend,
+	.resume = raid_resume,
+};
+
+static int __init dm_raid_init(void)
+{
+	return dm_register_target(&raid_target);
+}
+
+static void __exit dm_raid_exit(void)
+{
+	dm_unregister_target(&raid_target);
+}
+
+module_init(dm_raid_init);
+module_exit(dm_raid_exit);
+
+MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
+MODULE_ALIAS("dm-raid4");
+MODULE_ALIAS("dm-raid5");
+MODULE_ALIAS("dm-raid6");
+MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 19a59b0..dee3267 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -261,7 +261,7 @@
 	struct dm_io_request io_req = {
 		.bi_rw = WRITE_FLUSH,
 		.mem.type = DM_IO_KMEM,
-		.mem.ptr.bvec = NULL,
+		.mem.ptr.addr = NULL,
 		.client = ms->io_client,
 	};
 
@@ -637,6 +637,12 @@
 		.client = ms->io_client,
 	};
 
+	if (bio->bi_rw & REQ_DISCARD) {
+		io_req.bi_rw |= REQ_DISCARD;
+		io_req.mem.type = DM_IO_KMEM;
+		io_req.mem.ptr.addr = NULL;
+	}
+
 	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
 		map_region(dest++, m, bio);
 
@@ -670,7 +676,8 @@
 	bio_list_init(&requeue);
 
 	while ((bio = bio_list_pop(writes))) {
-		if (bio->bi_rw & REQ_FLUSH) {
+		if ((bio->bi_rw & REQ_FLUSH) ||
+		    (bio->bi_rw & REQ_DISCARD)) {
 			bio_list_add(&sync, bio);
 			continue;
 		}
@@ -1076,8 +1083,10 @@
 	ti->private = ms;
 	ti->split_io = dm_rh_get_region_size(ms->rh);
 	ti->num_flush_requests = 1;
+	ti->num_discard_requests = 1;
 
-	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
+	ms->kmirrord_wq = alloc_workqueue("kmirrord",
+					  WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
 	if (!ms->kmirrord_wq) {
 		DMERR("couldn't start kmirrord");
 		r = -ENOMEM;
@@ -1130,7 +1139,7 @@
 
 	del_timer_sync(&ms->timer);
 	flush_workqueue(ms->kmirrord_wq);
-	flush_scheduled_work();
+	flush_work_sync(&ms->trigger_event);
 	dm_kcopyd_client_destroy(ms->kcopyd_client);
 	destroy_workqueue(ms->kmirrord_wq);
 	free_context(ms, ti, ms->nr_mirrors);
@@ -1406,7 +1415,7 @@
 
 static struct target_type mirror_target = {
 	.name	 = "mirror",
-	.version = {1, 12, 0},
+	.version = {1, 12, 1},
 	.module	 = THIS_MODULE,
 	.ctr	 = mirror_ctr,
 	.dtr	 = mirror_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2129cdb..95891df 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -256,7 +256,7 @@
 	 */
 	INIT_WORK_ONSTACK(&req.work, do_metadata);
 	queue_work(ps->metadata_wq, &req.work);
-	flush_workqueue(ps->metadata_wq);
+	flush_work(&req.work);
 
 	return req.result;
 }
@@ -818,7 +818,7 @@
 	atomic_set(&ps->pending_count, 0);
 	ps->callbacks = NULL;
 
-	ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
+	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
 	if (!ps->metadata_wq) {
 		kfree(ps);
 		DMERR("couldn't start header metadata update thread");
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 53cf79d..fdde53c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,7 +19,6 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
-#include <linux/workqueue.h>
 
 #include "dm-exception-store.h"
 
@@ -80,9 +79,6 @@
 	/* Origin writes don't trigger exceptions until this is set */
 	int active;
 
-	/* Whether or not owning mapped_device is suspended */
-	int suspended;
-
 	atomic_t pending_exceptions_count;
 
 	mempool_t *pending_pool;
@@ -106,10 +102,6 @@
 
 	struct dm_kcopyd_client *kcopyd_client;
 
-	/* Queue of snapshot writes for ksnapd to flush */
-	struct bio_list queued_bios;
-	struct work_struct queued_bios_work;
-
 	/* Wait for events based on state_bits */
 	unsigned long state_bits;
 
@@ -160,9 +152,6 @@
 }
 EXPORT_SYMBOL(dm_snap_cow);
 
-static struct workqueue_struct *ksnapd;
-static void flush_queued_bios(struct work_struct *work);
-
 static sector_t chunk_to_sector(struct dm_exception_store *store,
 				chunk_t chunk)
 {
@@ -1110,7 +1099,6 @@
 	s->ti = ti;
 	s->valid = 1;
 	s->active = 0;
-	s->suspended = 0;
 	atomic_set(&s->pending_exceptions_count, 0);
 	init_rwsem(&s->lock);
 	INIT_LIST_HEAD(&s->list);
@@ -1153,9 +1141,6 @@
 
 	spin_lock_init(&s->tracked_chunk_lock);
 
-	bio_list_init(&s->queued_bios);
-	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
-
 	ti->private = s;
 	ti->num_flush_requests = num_flush_requests;
 
@@ -1279,8 +1264,6 @@
 	struct dm_snapshot *s = ti->private;
 	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 
-	flush_workqueue(ksnapd);
-
 	down_read(&_origins_lock);
 	/* Check whether exception handover must be cancelled */
 	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
@@ -1342,20 +1325,6 @@
 	}
 }
 
-static void flush_queued_bios(struct work_struct *work)
-{
-	struct dm_snapshot *s =
-		container_of(work, struct dm_snapshot, queued_bios_work);
-	struct bio *queued_bios;
-	unsigned long flags;
-
-	spin_lock_irqsave(&s->pe_lock, flags);
-	queued_bios = bio_list_get(&s->queued_bios);
-	spin_unlock_irqrestore(&s->pe_lock, flags);
-
-	flush_bios(queued_bios);
-}
-
 static int do_origin(struct dm_dev *origin, struct bio *bio);
 
 /*
@@ -1760,15 +1729,6 @@
 	stop_merge(s);
 }
 
-static void snapshot_postsuspend(struct dm_target *ti)
-{
-	struct dm_snapshot *s = ti->private;
-
-	down_write(&s->lock);
-	s->suspended = 1;
-	up_write(&s->lock);
-}
-
 static int snapshot_preresume(struct dm_target *ti)
 {
 	int r = 0;
@@ -1783,7 +1743,7 @@
 			DMERR("Unable to resume snapshot source until "
 			      "handover completes.");
 			r = -EINVAL;
-		} else if (!snap_src->suspended) {
+		} else if (!dm_suspended(snap_src->ti)) {
 			DMERR("Unable to perform snapshot handover until "
 			      "source is suspended.");
 			r = -EINVAL;
@@ -1816,7 +1776,6 @@
 
 	down_write(&s->lock);
 	s->active = 1;
-	s->suspended = 0;
 	up_write(&s->lock);
 }
 
@@ -2194,7 +2153,7 @@
 
 static struct target_type origin_target = {
 	.name    = "snapshot-origin",
-	.version = {1, 7, 0},
+	.version = {1, 7, 1},
 	.module  = THIS_MODULE,
 	.ctr     = origin_ctr,
 	.dtr     = origin_dtr,
@@ -2207,13 +2166,12 @@
 
 static struct target_type snapshot_target = {
 	.name    = "snapshot",
-	.version = {1, 9, 0},
+	.version = {1, 10, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
 	.map     = snapshot_map,
 	.end_io  = snapshot_end_io,
-	.postsuspend = snapshot_postsuspend,
 	.preresume  = snapshot_preresume,
 	.resume  = snapshot_resume,
 	.status  = snapshot_status,
@@ -2222,14 +2180,13 @@
 
 static struct target_type merge_target = {
 	.name    = dm_snapshot_merge_target_name,
-	.version = {1, 0, 0},
+	.version = {1, 1, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
 	.map     = snapshot_merge_map,
 	.end_io  = snapshot_end_io,
 	.presuspend = snapshot_merge_presuspend,
-	.postsuspend = snapshot_postsuspend,
 	.preresume  = snapshot_preresume,
 	.resume  = snapshot_merge_resume,
 	.status  = snapshot_status,
@@ -2291,17 +2248,8 @@
 		goto bad_tracked_chunk_cache;
 	}
 
-	ksnapd = create_singlethread_workqueue("ksnapd");
-	if (!ksnapd) {
-		DMERR("Failed to create ksnapd workqueue.");
-		r = -ENOMEM;
-		goto bad_pending_pool;
-	}
-
 	return 0;
 
-bad_pending_pool:
-	kmem_cache_destroy(tracked_chunk_cache);
 bad_tracked_chunk_cache:
 	kmem_cache_destroy(pending_cache);
 bad_pending_cache:
@@ -2322,8 +2270,6 @@
 
 static void __exit dm_snapshot_exit(void)
 {
-	destroy_workqueue(ksnapd);
-
 	dm_unregister_target(&snapshot_target);
 	dm_unregister_target(&origin_target);
 	dm_unregister_target(&merge_target);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index f0371b4..dddfa14 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -39,23 +39,20 @@
 	struct dm_target *ti;
 
 	/* Work struct used for triggering events*/
-	struct work_struct kstriped_ws;
+	struct work_struct trigger_event;
 
 	struct stripe stripe[0];
 };
 
-static struct workqueue_struct *kstriped;
-
 /*
  * An event is triggered whenever a drive
  * drops out of a stripe volume.
  */
 static void trigger_event(struct work_struct *work)
 {
-	struct stripe_c *sc = container_of(work, struct stripe_c, kstriped_ws);
-
+	struct stripe_c *sc = container_of(work, struct stripe_c,
+					   trigger_event);
 	dm_table_event(sc->ti->table);
-
 }
 
 static inline struct stripe_c *alloc_context(unsigned int stripes)
@@ -160,7 +157,7 @@
 		return -ENOMEM;
 	}
 
-	INIT_WORK(&sc->kstriped_ws, trigger_event);
+	INIT_WORK(&sc->trigger_event, trigger_event);
 
 	/* Set pointer to dm target; used in trigger_event */
 	sc->ti = ti;
@@ -211,7 +208,7 @@
 	for (i = 0; i < sc->stripes; i++)
 		dm_put_device(ti, sc->stripe[i].dev);
 
-	flush_workqueue(kstriped);
+	flush_work_sync(&sc->trigger_event);
 	kfree(sc);
 }
 
@@ -367,7 +364,7 @@
 			atomic_inc(&(sc->stripe[i].error_count));
 			if (atomic_read(&(sc->stripe[i].error_count)) <
 			    DM_IO_ERROR_THRESHOLD)
-				queue_work(kstriped, &sc->kstriped_ws);
+				schedule_work(&sc->trigger_event);
 		}
 
 	return error;
@@ -401,7 +398,7 @@
 
 static struct target_type stripe_target = {
 	.name   = "striped",
-	.version = {1, 3, 0},
+	.version = {1, 3, 1},
 	.module = THIS_MODULE,
 	.ctr    = stripe_ctr,
 	.dtr    = stripe_dtr,
@@ -422,20 +419,10 @@
 		return r;
 	}
 
-	kstriped = create_singlethread_workqueue("kstriped");
-	if (!kstriped) {
-		DMERR("failed to create workqueue kstriped");
-		dm_unregister_target(&stripe_target);
-		return -ENOMEM;
-	}
-
 	return r;
 }
 
 void dm_stripe_exit(void)
 {
 	dm_unregister_target(&stripe_target);
-	destroy_workqueue(kstriped);
-
-	return;
 }
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4d705ce..38e4eb1 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -71,6 +71,8 @@
 	void *event_context;
 
 	struct dm_md_mempools *mempools;
+
+	struct list_head target_callbacks;
 };
 
 /*
@@ -204,6 +206,7 @@
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&t->devices);
+	INIT_LIST_HEAD(&t->target_callbacks);
 	atomic_set(&t->holders, 0);
 	t->discards_supported = 1;
 
@@ -325,15 +328,18 @@
 
 	BUG_ON(d->dm_dev.bdev);
 
-	bdev = open_by_devnum(dev, d->dm_dev.mode);
+	bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
 	if (IS_ERR(bdev))
 		return PTR_ERR(bdev);
-	r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
-	if (r)
-		blkdev_put(bdev, d->dm_dev.mode);
-	else
-		d->dm_dev.bdev = bdev;
-	return r;
+
+	r = bd_link_disk_holder(bdev, dm_disk(md));
+	if (r) {
+		blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
+		return r;
+	}
+
+	d->dm_dev.bdev = bdev;
+	return 0;
 }
 
 /*
@@ -344,8 +350,8 @@
 	if (!d->dm_dev.bdev)
 		return;
 
-	bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
-	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
+	bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
+	blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
 	d->dm_dev.bdev = NULL;
 }
 
@@ -1223,10 +1229,17 @@
 	return 0;
 }
 
+void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
+{
+	list_add(&cb->list, &t->target_callbacks);
+}
+EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
+
 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
 {
 	struct dm_dev_internal *dd;
 	struct list_head *devices = dm_table_get_devices(t);
+	struct dm_target_callbacks *cb;
 	int r = 0;
 
 	list_for_each_entry(dd, devices, list) {
@@ -1241,6 +1254,10 @@
 				     bdevname(dd->dm_dev.bdev, b));
 	}
 
+	list_for_each_entry(cb, &t->target_callbacks, list)
+		if (cb->congested_fn)
+			r |= cb->congested_fn(cb, bdi_bits);
+
 	return r;
 }
 
@@ -1262,6 +1279,7 @@
 {
 	struct dm_dev_internal *dd;
 	struct list_head *devices = dm_table_get_devices(t);
+	struct dm_target_callbacks *cb;
 
 	list_for_each_entry(dd, devices, list) {
 		struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
@@ -1274,6 +1292,10 @@
 				     dm_device_name(t->md),
 				     bdevname(dd->dm_dev.bdev, b));
 	}
+
+	list_for_each_entry(cb, &t->target_callbacks, list)
+		if (cb->unplug_fn)
+			cb->unplug_fn(cb);
 }
 
 struct mapped_device *dm_table_get_md(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7cb1352..eaa3af0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -32,7 +32,6 @@
 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
 #define DM_COOKIE_LENGTH 24
 
-static DEFINE_MUTEX(dm_mutex);
 static const char *_name = DM_NAME;
 
 static unsigned int major = 0;
@@ -328,7 +327,6 @@
 {
 	struct mapped_device *md;
 
-	mutex_lock(&dm_mutex);
 	spin_lock(&_minor_lock);
 
 	md = bdev->bd_disk->private_data;
@@ -346,7 +344,6 @@
 
 out:
 	spin_unlock(&_minor_lock);
-	mutex_unlock(&dm_mutex);
 
 	return md ? 0 : -ENXIO;
 }
@@ -355,10 +352,12 @@
 {
 	struct mapped_device *md = disk->private_data;
 
-	mutex_lock(&dm_mutex);
+	spin_lock(&_minor_lock);
+
 	atomic_dec(&md->open_count);
 	dm_put(md);
-	mutex_unlock(&dm_mutex);
+
+	spin_unlock(&_minor_lock);
 
 	return 0;
 }
@@ -630,7 +629,7 @@
 			queue_io(md, bio);
 		} else {
 			/* done with normal IO or empty flush */
-			trace_block_bio_complete(md->queue, bio);
+			trace_block_bio_complete(md->queue, bio, io_error);
 			bio_endio(bio, io_error);
 		}
 	}
@@ -990,8 +989,8 @@
 	if (r == DM_MAPIO_REMAPPED) {
 		/* the bio has been remapped so dispatch it */
 
-		trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
-				    tio->io->bio->bi_bdev->bd_dev, sector);
+		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
+				      tio->io->bio->bi_bdev->bd_dev, sector);
 
 		generic_make_request(clone);
 	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
@@ -1638,13 +1637,15 @@
 		if (map_request(ti, clone, md))
 			goto requeued;
 
-		spin_lock_irq(q->queue_lock);
+		BUG_ON(!irqs_disabled());
+		spin_lock(q->queue_lock);
 	}
 
 	goto out;
 
 requeued:
-	spin_lock_irq(q->queue_lock);
+	BUG_ON(!irqs_disabled());
+	spin_lock(q->queue_lock);
 
 plug_and_out:
 	if (!elv_queue_empty(q))
@@ -1884,7 +1885,8 @@
 	add_disk(md->disk);
 	format_dev_t(md->name, MKDEV(_major, minor));
 
-	md->wq = create_singlethread_workqueue("kdmflush");
+	md->wq = alloc_workqueue("kdmflush",
+				 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
 	if (!md->wq)
 		goto bad_thread;
 
@@ -1992,13 +1994,14 @@
 	wake_up(&md->eventq);
 }
 
+/*
+ * Protected by md->suspend_lock obtained by dm_swap_table().
+ */
 static void __set_size(struct mapped_device *md, sector_t size)
 {
 	set_capacity(md->disk, size);
 
-	mutex_lock(&md->bdev->bd_inode->i_mutex);
 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
-	mutex_unlock(&md->bdev->bd_inode->i_mutex);
 }
 
 /*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 175c424..b76cfc8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -288,10 +288,12 @@
 	int rv;
 	int cpu;
 
-	if (mddev == NULL || mddev->pers == NULL) {
+	if (mddev == NULL || mddev->pers == NULL
+	    || !mddev->ready) {
 		bio_io_error(bio);
 		return 0;
 	}
+	smp_rmb(); /* Ensure implications of  'active' are visible */
 	rcu_read_lock();
 	if (mddev->suspended) {
 		DEFINE_WAIT(__wait);
@@ -703,9 +705,9 @@
 }
 
 /* return the offset of the super block in 512byte sectors */
-static inline sector_t calc_dev_sboffset(struct block_device *bdev)
+static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev)
 {
-	sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
+	sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
 	return MD_NEW_SIZE_SECTORS(num_sectors);
 }
 
@@ -763,7 +765,7 @@
 	 */
 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
-	bio->bi_bdev = rdev->bdev;
+	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
 	bio->bi_sector = sector;
 	bio_add_page(bio, page, size, 0);
 	bio->bi_private = rdev;
@@ -793,7 +795,7 @@
 }
 
 int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
-		 struct page *page, int rw)
+		 struct page *page, int rw, bool metadata_op)
 {
 	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
 	struct completion event;
@@ -801,8 +803,12 @@
 
 	rw |= REQ_SYNC | REQ_UNPLUG;
 
-	bio->bi_bdev = rdev->bdev;
-	bio->bi_sector = sector;
+	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
+		rdev->meta_bdev : rdev->bdev;
+	if (metadata_op)
+		bio->bi_sector = sector + rdev->sb_start;
+	else
+		bio->bi_sector = sector + rdev->data_offset;
 	bio_add_page(bio, page, size, 0);
 	init_completion(&event);
 	bio->bi_private = &event;
@@ -827,7 +833,7 @@
 		return 0;
 
 
-	if (!sync_page_io(rdev, rdev->sb_start, size, rdev->sb_page, READ))
+	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
 		goto fail;
 	rdev->sb_loaded = 1;
 	return 0;
@@ -989,7 +995,7 @@
 	 *
 	 * It also happens to be a multiple of 4Kb.
 	 */
-	rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+	rdev->sb_start = calc_dev_sboffset(rdev);
 
 	ret = read_disk_sb(rdev, MD_SB_BYTES);
 	if (ret) return ret;
@@ -1330,7 +1336,7 @@
 		return 0; /* component must fit device */
 	if (rdev->mddev->bitmap_info.offset)
 		return 0; /* can't move bitmap */
-	rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+	rdev->sb_start = calc_dev_sboffset(rdev);
 	if (!num_sectors || num_sectors > rdev->sb_start)
 		num_sectors = rdev->sb_start;
 	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -1879,7 +1885,7 @@
 	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
 
 	list_add_rcu(&rdev->same_set, &mddev->disks);
-	bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
+	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
 
 	/* May as well allow recovery to be retried once */
 	mddev->recovery_disabled = 0;
@@ -1906,7 +1912,7 @@
 		MD_BUG();
 		return;
 	}
-	bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
+	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
 	list_del_rcu(&rdev->same_set);
 	printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
 	rdev->mddev = NULL;
@@ -1934,19 +1940,13 @@
 	struct block_device *bdev;
 	char b[BDEVNAME_SIZE];
 
-	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				 shared ? (mdk_rdev_t *)lock_rdev : rdev);
 	if (IS_ERR(bdev)) {
 		printk(KERN_ERR "md: could not open %s.\n",
 			__bdevname(dev, b));
 		return PTR_ERR(bdev);
 	}
-	err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
-	if (err) {
-		printk(KERN_ERR "md: could not bd_claim %s.\n",
-			bdevname(bdev, b));
-		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
-		return err;
-	}
 	if (!shared)
 		set_bit(AllReserved, &rdev->flags);
 	rdev->bdev = bdev;
@@ -1959,8 +1959,7 @@
 	rdev->bdev = NULL;
 	if (!bdev)
 		MD_BUG();
-	bd_release(bdev);
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 void md_autodetect_dev(dev_t dev);
@@ -2473,6 +2472,10 @@
 			if (rdev2->raid_disk == slot)
 				return -EEXIST;
 
+		if (slot >= rdev->mddev->raid_disks &&
+		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
+			return -ENOSPC;
+
 		rdev->raid_disk = slot;
 		if (test_bit(In_sync, &rdev->flags))
 			rdev->saved_raid_disk = slot;
@@ -2490,7 +2493,8 @@
 			/* failure here is OK */;
 		/* don't wakeup anyone, leave that to userspace. */
 	} else {
-		if (slot >= rdev->mddev->raid_disks)
+		if (slot >= rdev->mddev->raid_disks &&
+		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
 			return -ENOSPC;
 		rdev->raid_disk = slot;
 		/* assume it is working */
@@ -3115,7 +3119,7 @@
 		char nm[20];
 		if (rdev->raid_disk < 0)
 			continue;
-		if (rdev->new_raid_disk > mddev->raid_disks)
+		if (rdev->new_raid_disk >= mddev->raid_disks)
 			rdev->new_raid_disk = -1;
 		if (rdev->new_raid_disk == rdev->raid_disk)
 			continue;
@@ -3744,6 +3748,8 @@
 	return sprintf(page, "%s\n", type);
 }
 
+static void reap_sync_thread(mddev_t *mddev);
+
 static ssize_t
 action_store(mddev_t *mddev, const char *page, size_t len)
 {
@@ -3758,9 +3764,7 @@
 	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
 		if (mddev->sync_thread) {
 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-			md_unregister_thread(mddev->sync_thread);
-			mddev->sync_thread = NULL;
-			mddev->recovery = 0;
+			reap_sync_thread(mddev);
 		}
 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
 		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -3912,7 +3916,7 @@
 static ssize_t
 sync_completed_show(mddev_t *mddev, char *page)
 {
-	unsigned long max_sectors, resync;
+	unsigned long long max_sectors, resync;
 
 	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
 		return sprintf(page, "none\n");
@@ -3923,7 +3927,7 @@
 		max_sectors = mddev->dev_sectors;
 
 	resync = mddev->curr_resync_completed;
-	return sprintf(page, "%lu / %lu\n", resync, max_sectors);
+	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
 }
 
 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
@@ -4010,19 +4014,24 @@
 {
 	char *e;
 	unsigned long long new = simple_strtoull(buf, &e, 10);
+	unsigned long long old = mddev->suspend_lo;
 
 	if (mddev->pers == NULL || 
 	    mddev->pers->quiesce == NULL)
 		return -EINVAL;
 	if (buf == e || (*e && *e != '\n'))
 		return -EINVAL;
-	if (new >= mddev->suspend_hi ||
-	    (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
-		mddev->suspend_lo = new;
+
+	mddev->suspend_lo = new;
+	if (new >= old)
+		/* Shrinking suspended region */
 		mddev->pers->quiesce(mddev, 2);
-		return len;
-	} else
-		return -EINVAL;
+	else {
+		/* Expanding suspended region - need to wait */
+		mddev->pers->quiesce(mddev, 1);
+		mddev->pers->quiesce(mddev, 0);
+	}
+	return len;
 }
 static struct md_sysfs_entry md_suspend_lo =
 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
@@ -4039,20 +4048,24 @@
 {
 	char *e;
 	unsigned long long new = simple_strtoull(buf, &e, 10);
+	unsigned long long old = mddev->suspend_hi;
 
 	if (mddev->pers == NULL ||
 	    mddev->pers->quiesce == NULL)
 		return -EINVAL;
 	if (buf == e || (*e && *e != '\n'))
 		return -EINVAL;
-	if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
-	    (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
-		mddev->suspend_hi = new;
+
+	mddev->suspend_hi = new;
+	if (new <= old)
+		/* Shrinking suspended region */
+		mddev->pers->quiesce(mddev, 2);
+	else {
+		/* Expanding suspended region - need to wait */
 		mddev->pers->quiesce(mddev, 1);
 		mddev->pers->quiesce(mddev, 0);
-		return len;
-	} else
-		return -EINVAL;
+	}
+	return len;
 }
 static struct md_sysfs_entry md_suspend_hi =
 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
@@ -4430,7 +4443,9 @@
 		 * We don't want the data to overlap the metadata,
 		 * Internal Bitmap issues have been handled elsewhere.
 		 */
-		if (rdev->data_offset < rdev->sb_start) {
+		if (rdev->meta_bdev) {
+			/* Nothing to check */;
+		} else if (rdev->data_offset < rdev->sb_start) {
 			if (mddev->dev_sectors &&
 			    rdev->data_offset + mddev->dev_sectors
 			    > rdev->sb_start) {
@@ -4564,7 +4579,8 @@
 	mddev->safemode_timer.data = (unsigned long) mddev;
 	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
 	mddev->in_sync = 1;
-
+	smp_wmb();
+	mddev->ready = 1;
 	list_for_each_entry(rdev, &mddev->disks, same_set)
 		if (rdev->raid_disk >= 0) {
 			char nm[20];
@@ -4701,13 +4717,12 @@
 	mddev->plug = NULL;
 }
 
-void md_stop_writes(mddev_t *mddev)
+static void __md_stop_writes(mddev_t *mddev)
 {
 	if (mddev->sync_thread) {
 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-		md_unregister_thread(mddev->sync_thread);
-		mddev->sync_thread = NULL;
+		reap_sync_thread(mddev);
 	}
 
 	del_timer_sync(&mddev->safemode_timer);
@@ -4721,10 +4736,18 @@
 		md_update_sb(mddev, 1);
 	}
 }
+
+void md_stop_writes(mddev_t *mddev)
+{
+	mddev_lock(mddev);
+	__md_stop_writes(mddev);
+	mddev_unlock(mddev);
+}
 EXPORT_SYMBOL_GPL(md_stop_writes);
 
 void md_stop(mddev_t *mddev)
 {
+	mddev->ready = 0;
 	mddev->pers->stop(mddev);
 	if (mddev->pers->sync_request && mddev->to_remove == NULL)
 		mddev->to_remove = &md_redundancy_group;
@@ -4744,7 +4767,7 @@
 		goto out;
 	}
 	if (mddev->pers) {
-		md_stop_writes(mddev);
+		__md_stop_writes(mddev);
 
 		err  = -ENXIO;
 		if (mddev->ro==1)
@@ -4781,7 +4804,7 @@
 		if (mddev->ro)
 			set_disk_ro(disk, 0);
 
-		md_stop_writes(mddev);
+		__md_stop_writes(mddev);
 		md_stop(mddev);
 		mddev->queue->merge_bvec_fn = NULL;
 		mddev->queue->unplug_fn = NULL;
@@ -5159,9 +5182,10 @@
 		/* set saved_raid_disk if appropriate */
 		if (!mddev->persistent) {
 			if (info->state & (1<<MD_DISK_SYNC)  &&
-			    info->raid_disk < mddev->raid_disks)
+			    info->raid_disk < mddev->raid_disks) {
 				rdev->raid_disk = info->raid_disk;
-			else
+				set_bit(In_sync, &rdev->flags);
+			} else
 				rdev->raid_disk = -1;
 		} else
 			super_types[mddev->major_version].
@@ -5238,7 +5262,7 @@
 			printk(KERN_INFO "md: nonpersistent superblock ...\n");
 			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
 		} else
-			rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+			rdev->sb_start = calc_dev_sboffset(rdev);
 		rdev->sectors = rdev->sb_start;
 
 		err = bind_rdev_to_array(rdev, mddev);
@@ -5305,7 +5329,7 @@
 	}
 
 	if (mddev->persistent)
-		rdev->sb_start = calc_dev_sboffset(rdev->bdev);
+		rdev->sb_start = calc_dev_sboffset(rdev);
 	else
 		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
 
@@ -5518,7 +5542,6 @@
 	 * sb_start or, if that is <data_offset, it must fit before the size
 	 * of each device.  If num_sectors is zero, we find the largest size
 	 * that fits.
-
 	 */
 	if (mddev->sync_thread)
 		return -EBUSY;
@@ -6041,7 +6064,8 @@
 			 || kthread_should_stop(),
 			 thread->timeout);
 
-		if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags))
+		clear_bit(THREAD_WAKEUP, &thread->flags);
+		if (!kthread_should_stop())
 			thread->run(thread->mddev);
 	}
 
@@ -6807,7 +6831,7 @@
 		       desc, mdname(mddev));
 		mddev->curr_resync = j;
 	}
-	mddev->curr_resync_completed = mddev->curr_resync;
+	mddev->curr_resync_completed = j;
 
 	while (j < max_sectors) {
 		sector_t sectors;
@@ -6825,8 +6849,7 @@
 			md_unplug(mddev);
 			wait_event(mddev->recovery_wait,
 				   atomic_read(&mddev->recovery_active) == 0);
-			mddev->curr_resync_completed =
-				mddev->curr_resync;
+			mddev->curr_resync_completed = j;
 			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 		}
@@ -7031,6 +7054,45 @@
 	}
 	return spares;
 }
+
+static void reap_sync_thread(mddev_t *mddev)
+{
+	mdk_rdev_t *rdev;
+
+	/* resync has finished, collect result */
+	md_unregister_thread(mddev->sync_thread);
+	mddev->sync_thread = NULL;
+	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+		/* success...*/
+		/* activate any spares */
+		if (mddev->pers->spare_active(mddev))
+			sysfs_notify(&mddev->kobj, NULL,
+				     "degraded");
+	}
+	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+	    mddev->pers->finish_reshape)
+		mddev->pers->finish_reshape(mddev);
+	md_update_sb(mddev, 1);
+
+	/* if array is no-longer degraded, then any saved_raid_disk
+	 * information must be scrapped
+	 */
+	if (!mddev->degraded)
+		list_for_each_entry(rdev, &mddev->disks, same_set)
+			rdev->saved_raid_disk = -1;
+
+	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+	/* flag recovery needed just to double check */
+	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+	sysfs_notify_dirent_safe(mddev->sysfs_action);
+	md_new_event(mddev);
+}
+
 /*
  * This routine is regularly called by all per-raid-array threads to
  * deal with generic issues like resync and super-block update.
@@ -7055,9 +7117,6 @@
  */
 void md_check_recovery(mddev_t *mddev)
 {
-	mdk_rdev_t *rdev;
-
-
 	if (mddev->bitmap)
 		bitmap_daemon_work(mddev);
 
@@ -7125,34 +7184,7 @@
 			goto unlock;
 		}
 		if (mddev->sync_thread) {
-			/* resync has finished, collect result */
-			md_unregister_thread(mddev->sync_thread);
-			mddev->sync_thread = NULL;
-			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
-			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
-				/* success...*/
-				/* activate any spares */
-				if (mddev->pers->spare_active(mddev))
-					sysfs_notify(&mddev->kobj, NULL,
-						     "degraded");
-			}
-			if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-			    mddev->pers->finish_reshape)
-				mddev->pers->finish_reshape(mddev);
-			md_update_sb(mddev, 1);
-
-			/* if array is no-longer degraded, then any saved_raid_disk
-			 * information must be scrapped
-			 */
-			if (!mddev->degraded)
-				list_for_each_entry(rdev, &mddev->disks, same_set)
-					rdev->saved_raid_disk = -1;
-
-			mddev->recovery = 0;
-			/* flag recovery needed just to double check */
-			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-			sysfs_notify_dirent_safe(mddev->sysfs_action);
-			md_new_event(mddev);
+			reap_sync_thread(mddev);
 			goto unlock;
 		}
 		/* Set RUNNING before clearing NEEDED to avoid
@@ -7210,7 +7242,11 @@
 					" thread...\n", 
 					mdname(mddev));
 				/* leave the spares where they are, it shouldn't hurt */
-				mddev->recovery = 0;
+				clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+				clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+				clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+				clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+				clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
 			} else
 				md_wakeup_thread(mddev->sync_thread);
 			sysfs_notify_dirent_safe(mddev->sysfs_action);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d05bab5..eec517c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -60,6 +60,12 @@
 	mddev_t *mddev;			/* RAID array if running */
 	int last_events;		/* IO event timestamp */
 
+	/*
+	 * If meta_bdev is non-NULL, it means that a separate device is
+	 * being used to store the metadata (superblock/bitmap) which
+	 * would otherwise be contained on the same device as the data (bdev).
+	 */
+	struct block_device *meta_bdev;
 	struct block_device *bdev;	/* block device handle */
 
 	struct page	*sb_page;
@@ -148,7 +154,8 @@
 						       * are happening, so run/
 						       * takeover/stop are not safe
 						       */
-
+	int				ready; /* See when safe to pass 
+						* IO requests down */
 	struct gendisk			*gendisk;
 
 	struct kobject			kobj;
@@ -497,8 +504,8 @@
 extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
 			   sector_t sector, int size, struct page *page);
 extern void md_super_wait(mddev_t *mddev);
-extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
-			struct page *page, int rw);
+extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 
+			struct page *page, int rw, bool metadata_op);
 extern void md_do_sync(mddev_t *mddev);
 extern void md_new_event(mddev_t *mddev);
 extern int md_allow_write(mddev_t *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 845cf95..a23ffa3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1027,8 +1027,9 @@
 	} else
 		set_bit(Faulty, &rdev->flags);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n"
-	       KERN_ALERT "md/raid1:%s: Operation continuing on %d devices.\n",
+	printk(KERN_ALERT
+	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
+	       "md/raid1:%s: Operation continuing on %d devices.\n",
 	       mdname(mddev), bdevname(rdev->bdev, b),
 	       mdname(mddev), conf->raid_disks - mddev->degraded);
 }
@@ -1364,10 +1365,10 @@
 					 */
 					rdev = conf->mirrors[d].rdev;
 					if (sync_page_io(rdev,
-							 sect + rdev->data_offset,
+							 sect,
 							 s<<9,
 							 bio->bi_io_vec[idx].bv_page,
-							 READ)) {
+							 READ, false)) {
 						success = 1;
 						break;
 					}
@@ -1390,10 +1391,10 @@
 					rdev = conf->mirrors[d].rdev;
 					atomic_add(s, &rdev->corrected_errors);
 					if (sync_page_io(rdev,
-							 sect + rdev->data_offset,
+							 sect,
 							 s<<9,
 							 bio->bi_io_vec[idx].bv_page,
-							 WRITE) == 0)
+							 WRITE, false) == 0)
 						md_error(mddev, rdev);
 				}
 				d = start;
@@ -1405,10 +1406,10 @@
 						continue;
 					rdev = conf->mirrors[d].rdev;
 					if (sync_page_io(rdev,
-							 sect + rdev->data_offset,
+							 sect,
 							 s<<9,
 							 bio->bi_io_vec[idx].bv_page,
-							 READ) == 0)
+							 READ, false) == 0)
 						md_error(mddev, rdev);
 				}
 			} else {
@@ -1488,10 +1489,8 @@
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    test_bit(In_sync, &rdev->flags) &&
-			    sync_page_io(rdev,
-					 sect + rdev->data_offset,
-					 s<<9,
-					 conf->tmppage, READ))
+			    sync_page_io(rdev, sect, s<<9,
+					 conf->tmppage, READ, false))
 				success = 1;
 			else {
 				d++;
@@ -1514,9 +1513,8 @@
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    test_bit(In_sync, &rdev->flags)) {
-				if (sync_page_io(rdev,
-						 sect + rdev->data_offset,
-						 s<<9, conf->tmppage, WRITE)
+				if (sync_page_io(rdev, sect, s<<9,
+						 conf->tmppage, WRITE, false)
 				    == 0)
 					/* Well, this device is dead */
 					md_error(mddev, rdev);
@@ -1531,9 +1529,8 @@
 			rdev = conf->mirrors[d].rdev;
 			if (rdev &&
 			    test_bit(In_sync, &rdev->flags)) {
-				if (sync_page_io(rdev,
-						 sect + rdev->data_offset,
-						 s<<9, conf->tmppage, READ)
+				if (sync_page_io(rdev, sect, s<<9,
+						 conf->tmppage, READ, false)
 				    == 0)
 					/* Well, this device is dead */
 					md_error(mddev, rdev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0641674..69b6595 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1051,8 +1051,9 @@
 	}
 	set_bit(Faulty, &rdev->flags);
 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	printk(KERN_ALERT "md/raid10:%s: Disk failure on %s, disabling device.\n"
-	       KERN_ALERT "md/raid10:%s: Operation continuing on %d devices.\n",
+	printk(KERN_ALERT
+	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
+	       "md/raid10:%s: Operation continuing on %d devices.\n",
 	       mdname(mddev), bdevname(rdev->bdev, b),
 	       mdname(mddev), conf->raid_disks - mddev->degraded);
 }
@@ -1559,9 +1560,9 @@
 				rcu_read_unlock();
 				success = sync_page_io(rdev,
 						       r10_bio->devs[sl].addr +
-						       sect + rdev->data_offset,
+						       sect,
 						       s<<9,
-						       conf->tmppage, READ);
+						       conf->tmppage, READ, false);
 				rdev_dec_pending(rdev, mddev);
 				rcu_read_lock();
 				if (success)
@@ -1598,8 +1599,8 @@
 				atomic_add(s, &rdev->corrected_errors);
 				if (sync_page_io(rdev,
 						 r10_bio->devs[sl].addr +
-						 sect + rdev->data_offset,
-						 s<<9, conf->tmppage, WRITE)
+						 sect,
+						 s<<9, conf->tmppage, WRITE, false)
 				    == 0) {
 					/* Well, this device is dead */
 					printk(KERN_NOTICE
@@ -1635,9 +1636,9 @@
 				rcu_read_unlock();
 				if (sync_page_io(rdev,
 						 r10_bio->devs[sl].addr +
-						 sect + rdev->data_offset,
+						 sect,
 						 s<<9, conf->tmppage,
-						 READ) == 0) {
+						 READ, false) == 0) {
 					/* Well, this device is dead */
 					printk(KERN_NOTICE
 					       "md/raid10:%s: unable to read back "
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dc574f3..5044bab 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1721,7 +1721,6 @@
 		set_bit(Faulty, &rdev->flags);
 		printk(KERN_ALERT
 		       "md/raid:%s: Disk failure on %s, disabling device.\n"
-		       KERN_ALERT
 		       "md/raid:%s: Operation continuing on %d devices.\n",
 		       mdname(mddev),
 		       bdevname(rdev->bdev, b),
@@ -4237,7 +4236,7 @@
 		wait_event(conf->wait_for_overlap,
 			   atomic_read(&conf->reshape_stripes)==0);
 		mddev->reshape_position = conf->reshape_progress;
-		mddev->curr_resync_completed = mddev->curr_resync;
+		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
 		md_wakeup_thread(mddev->thread);
@@ -4338,7 +4337,7 @@
 		wait_event(conf->wait_for_overlap,
 			   atomic_read(&conf->reshape_stripes) == 0);
 		mddev->reshape_position = conf->reshape_progress;
-		mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
+		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
 		set_bit(MD_CHANGE_DEVS, &mddev->flags);
 		md_wakeup_thread(mddev->thread);
@@ -5339,7 +5338,7 @@
 		    && !test_bit(Faulty, &tmp->rdev->flags)
 		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
 			count++;
-			sysfs_notify_dirent(tmp->rdev->sysfs_state);
+			sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
 		}
 	}
 	spin_lock_irqsave(&conf->device_lock, flags);
@@ -5528,8 +5527,8 @@
 		return -ENOSPC;
 
 	list_for_each_entry(rdev, &mddev->disks, same_set)
-		if (rdev->raid_disk < 0 &&
-		    !test_bit(Faulty, &rdev->flags))
+		if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks)
+		     && !test_bit(Faulty, &rdev->flags))
 			spares++;
 
 	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5589,6 +5588,11 @@
 					/* Failure here is OK */;
 			} else
 				break;
+		} else if (rdev->raid_disk >= conf->previous_raid_disks
+			   && !test_bit(Faulty, &rdev->flags)) {
+			/* This is a spare that was manually added */
+			set_bit(In_sync, &rdev->flags);
+			added_devices++;
 		}
 
 	/* When a reshape changes the number of devices, ->degraded
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 4df42aa..51752a9 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1329,7 +1329,8 @@
 		return -EBUSY;
 
 	dvb_net_stop(net);
-	flush_scheduled_work();
+	flush_work_sync(&priv->set_multicast_list_wq);
+	flush_work_sync(&priv->restart_net_feed_wq);
 	printk("dvb_net: removed network interface %s\n", net->name);
 	unregister_netdev(net);
 	dvbnet->state[num]=0;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index c6498f5..23005b3 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -313,8 +313,7 @@
 int dvb_usb_remote_exit(struct dvb_usb_device *d)
 {
 	if (d->state & DVB_USB_STATE_REMOTE) {
-		cancel_rearming_delayed_work(&d->rc_query_work);
-		flush_scheduled_work();
+		cancel_delayed_work_sync(&d->rc_query_work);
 		if (d->props.rc.mode == DVB_RC_LEGACY)
 			input_unregister_device(d->input_dev);
 		else
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
index a7b369a..9f73c2c 100644
--- a/drivers/media/dvb/mantis/mantis_evm.c
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -111,7 +111,7 @@
 	struct mantis_pci *mantis = ca->ca_priv;
 
 	dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
-	flush_scheduled_work();
+	flush_work_sync(&ca->hif_evm_work);
 	mantis_hif_exit(ca);
 	mantis_pcmcia_exit(ca);
 }
diff --git a/drivers/media/dvb/mantis/mantis_uart.c b/drivers/media/dvb/mantis/mantis_uart.c
index 7d2f239..97b889e 100644
--- a/drivers/media/dvb/mantis/mantis_uart.c
+++ b/drivers/media/dvb/mantis/mantis_uart.c
@@ -182,5 +182,6 @@
 {
 	/* disable interrupt */
 	mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
+	flush_work_sync(&mantis->uart_work);
 }
 EXPORT_SYMBOL_GPL(mantis_uart_exit);
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 849cd17..91399c9 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -189,8 +189,14 @@
 	INIT_WORK(&dev->request_module_wk, request_module_async);
 	schedule_work(&dev->request_module_wk);
 }
+
+static void flush_request_modules(struct bttv *dev)
+{
+	flush_work_sync(&dev->request_module_wk);
+}
 #else
 #define request_modules(dev)
+#define flush_request_modules(dev)
 #endif /* CONFIG_MODULES */
 
 
@@ -4429,6 +4435,9 @@
 	if (bttv_verbose)
 		printk("bttv%d: unloading\n",btv->c.nr);
 
+	if (bttv_tvcards[btv->c.type].has_dvb)
+		flush_request_modules(btv);
+
 	/* shutdown everything (DMA+IRQs) */
 	btand(~15, BT848_GPIO_DMA_CTL);
 	btwrite(0, BT848_INT_MASK);
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 97793b9..e8b64bc 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -319,16 +319,13 @@
 
 static void bttv_ir_stop(struct bttv *btv)
 {
-	if (btv->remote->polling) {
+	if (btv->remote->polling)
 		del_timer_sync(&btv->remote->timer);
-		flush_scheduled_work();
-	}
 
 	if (btv->remote->rc5_gpio) {
 		u32 gpio;
 
 		del_timer_sync(&btv->remote->timer);
-		flush_scheduled_work();
 
 		gpio = bttv_gpio_read(&btv->c);
 		bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 789087c..49f1b8f 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2184,9 +2184,7 @@
 	struct cafe_camera *cam = to_cam(v4l2_dev);
 	int ret = 0;
 
-	ret = pci_restore_state(pdev);
-	if (ret)
-		return ret;
+	pci_restore_state(pdev);
 	ret = pci_enable_device(pdev);
 
 	if (ret) {
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 676e5be..133ec2b 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -267,8 +267,14 @@
 	INIT_WORK(&dev->request_module_wk, request_module_async);
 	schedule_work(&dev->request_module_wk);
 }
+
+static void flush_request_modules(struct cx18 *dev)
+{
+	flush_work_sync(&dev->request_module_wk);
+}
 #else
 #define request_modules(dev)
+#define flush_request_modules(dev)
 #endif /* CONFIG_MODULES */
 
 /* Generic utility functions */
@@ -1233,6 +1239,8 @@
 
 	CX18_DEBUG_INFO("Removing Card\n");
 
+	flush_request_modules(cx);
+
 	/* Stop all captures */
 	CX18_DEBUG_INFO("Stopping all streams\n");
 	if (atomic_read(&cx->tot_capturing) > 0)
diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h
index 2c00980..7e40035 100644
--- a/drivers/media/video/cx18/cx23418.h
+++ b/drivers/media/video/cx18/cx23418.h
@@ -177,7 +177,7 @@
    IN[0] - Task handle.
    IN[1] - luma type: 0 = disable, 1 = 1D horizontal only, 2 = 1D vertical only,
 		      3 = 2D H/V separable, 4 = 2D symmetric non-separable
-   IN[2] - chroma type: 0 - diable, 1 = 1D horizontal
+   IN[2] - chroma type: 0 - disable, 1 = 1D horizontal
    ReturnCode - One of the ERR_CAPTURE_... */
 #define CX18_CPU_SET_SPATIAL_FILTER_TYPE     	(CPU_CMD_MASK_CAPTURE | 0x000C)
 
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 6905607..588f3e8 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -813,8 +813,14 @@
 	INIT_WORK(&dev->request_module_wk, request_module_async);
 	schedule_work(&dev->request_module_wk);
 }
+
+static void flush_request_modules(struct cx231xx *dev)
+{
+	flush_work_sync(&dev->request_module_wk);
+}
 #else
 #define request_modules(dev)
+#define flush_request_modules(dev)
 #endif /* CONFIG_MODULES */
 
 /*
@@ -1147,6 +1153,8 @@
 	if (!dev->udev)
 		return;
 
+	flush_request_modules(dev);
+
 	/* delete v4l2 device */
 	v4l2_device_unregister(&dev->v4l2_dev);
 
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index 0b0d066..199b996 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -229,8 +229,6 @@
 		v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
 		v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
 	}
-
-	flush_scheduled_work();
 }
 
 static void cx23885_input_ir_close(struct rc_dev *rc)
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index 627926f..7eb79af 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -261,7 +261,7 @@
 	u32 rem;
 
 	/*
-	 * The 2 lsb's of the pulse width timer count are not accessable, hence
+	 * The 2 lsb's of the pulse width timer count are not accessible, hence
 	 * the (1 << 2)
 	 */
 	n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index f7d71ac..addf954 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -66,8 +66,14 @@
 	INIT_WORK(&dev->request_module_wk, request_module_async);
 	schedule_work(&dev->request_module_wk);
 }
+
+static void flush_request_modules(struct cx8802_dev *dev)
+{
+	flush_work_sync(&dev->request_module_wk);
+}
 #else
 #define request_modules(dev)
+#define flush_request_modules(dev)
 #endif /* CONFIG_MODULES */
 
 
@@ -819,6 +825,8 @@
 
 	dprintk( 1, "%s\n", __func__);
 
+	flush_request_modules(dev);
+
 	if (!list_empty(&dev->drvlist)) {
 		struct cx8802_driver *drv, *tmp;
 		int err;
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 188841b..ebd5c43 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -33,7 +33,7 @@
 #define regr(reg)               readl((reg) + vpif_base)
 #define regw(value, reg)        writel(value, (reg + vpif_base))
 
-/* Register Addresss Offsets */
+/* Register Address Offsets */
 #define VPIF_PID			(0x0000)
 #define VPIF_CH0_CTRL			(0x0004)
 #define VPIF_CH1_CTRL			(0x0008)
diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c
index 7918680..3e5cf27 100644
--- a/drivers/media/video/davinci/vpss.c
+++ b/drivers/media/video/davinci/vpss.c
@@ -85,7 +85,7 @@
 /*
  * vpss operations. Depends on platform. Not all functions are available
  * on all platforms. The api, first check if a functio is available before
- * invoking it. In the probe, the function ptrs are intialized based on
+ * invoking it. In the probe, the function ptrs are initialized based on
  * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
  */
 struct vpss_hw_ops {
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 8af302b..099d5df 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2690,8 +2690,14 @@
 	INIT_WORK(&dev->request_module_wk, request_module_async);
 	schedule_work(&dev->request_module_wk);
 }
+
+static void flush_request_modules(struct em28xx *dev)
+{
+	flush_work_sync(&dev->request_module_wk);
+}
 #else
 #define request_modules(dev)
+#define flush_request_modules(dev)
 #endif /* CONFIG_MODULES */
 
 /*
@@ -3118,6 +3124,8 @@
 
 	em28xx_info("disconnecting %s\n", dev->vdev->name);
 
+	flush_request_modules(dev);
+
 	/* wait until all current v4l2 io is finished then deallocate
 	   resources */
 	mutex_lock(&dev->lock);
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 29cc744..ba1ba86 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -551,7 +551,7 @@
 {
 	if (dev->sbutton_input_dev != NULL) {
 		em28xx_info("Deregistering snapshot button\n");
-		cancel_rearming_delayed_work(&dev->sbutton_query_work);
+		cancel_delayed_work_sync(&dev->sbutton_query_work);
 		input_unregister_device(dev->sbutton_input_dev);
 		dev->sbutton_input_dev = NULL;
 	}
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 83de97a..029a4ba 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1286,7 +1286,7 @@
 	videobuf_mmap_free(q);
 
 	/* Even if apply changes fails we should continue
-	   freeing allocated memeory */
+	   freeing allocated memory */
 	if (vout->streaming) {
 		u32 mask = 0;
 
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 378b094..0175527 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1198,7 +1198,7 @@
 
 	atomic_inc(&cam->reset_disable);
 
-	flush_scheduled_work();
+	flush_work_sync(&cam->sensor_reset_work);
 
 	rval = videobuf_streamoff(q);
 	if (!rval) {
@@ -1512,7 +1512,7 @@
 
 	atomic_inc(&cam->reset_disable);
 
-	flush_scheduled_work();
+	flush_work_sync(&cam->sensor_reset_work);
 
 	/* stop streaming capture */
 	videobuf_streamoff(&fh->vbq);
@@ -1536,7 +1536,7 @@
 	 * not be scheduled anymore since streaming is already
 	 * disabled.)
 	 */
-	flush_scheduled_work();
+	flush_work_sync(&cam->sensor_reset_work);
 
 	mutex_lock(&cam->mutex);
 	if (atomic_dec_return(&cam->users) == 0) {
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 756a278..6abeecf 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -166,8 +166,14 @@
 	schedule_work(&dev->request_module_wk);
 }
 
+static void flush_request_submodules(struct saa7134_dev *dev)
+{
+	flush_work_sync(&dev->request_module_wk);
+}
+
 #else
 #define request_submodules(dev)
+#define flush_request_submodules(dev)
 #endif /* CONFIG_MODULES */
 
 /* ------------------------------------------------------------------ */
@@ -1010,8 +1016,6 @@
 		}
 	}
 
-	request_submodules(dev);
-
 	v4l2_prio_init(&dev->prio);
 
 	mutex_lock(&saa7134_devlist_lock);
@@ -1066,6 +1070,7 @@
 	if (saa7134_dmasound_init && !dev->dmasound.priv_data)
 		saa7134_dmasound_init(dev);
 
+	request_submodules(dev);
 	return 0;
 
  fail4:
@@ -1091,6 +1096,8 @@
 	struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev);
 	struct saa7134_mpeg_ops *mops;
 
+	flush_request_submodules(dev);
+
 	/* Release DMA sound modules if present */
 	if (saa7134_dmasound_exit && dev->dmasound.priv_data) {
 		saa7134_dmasound_exit(dev);
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index b890aaf..6b8459c7 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -553,7 +553,7 @@
 
 	if (NULL == dev->empress_dev)
 		return 0;
-	flush_scheduled_work();
+	flush_work_sync(&dev->empress_workqueue);
 	video_unregister_device(dev->empress_dev);
 	dev->empress_dev = NULL;
 	return 0;
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
index d6bf3f8..58af67f 100644
--- a/drivers/media/video/saa7164/saa7164-core.c
+++ b/drivers/media/video/saa7164/saa7164-core.c
@@ -655,8 +655,8 @@
 		goto out;
 	}
 
-	/* Check that the hardware is accessable. If the status bytes are
-	 * 0xFF then the device is not accessable, the the IRQ belongs
+	/* Check that the hardware is accessible. If the status bytes are
+	 * 0xFF then the device is not accessible, the the IRQ belongs
 	 * to another driver.
 	 * 4 x u32 interrupt registers.
 	 */
diff --git a/drivers/media/video/sn9c102/sn9c102_sensor.h b/drivers/media/video/sn9c102/sn9c102_sensor.h
index 494957b..7f38549 100644
--- a/drivers/media/video/sn9c102/sn9c102_sensor.h
+++ b/drivers/media/video/sn9c102/sn9c102_sensor.h
@@ -147,7 +147,7 @@
 
 struct sn9c102_sensor {
 	char name[32], /* sensor name */
-	     maintainer[64]; /* name of the mantainer <email> */
+	     maintainer[64]; /* name of the maintainer <email> */
 
 	enum sn9c102_bridge supported_bridge; /* supported SN9C1xx bridges */
 
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index c91424c..99c81a9 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -452,7 +452,8 @@
 
 	device_init_wakeup(&udev->dev, 1);
 #ifdef CONFIG_PM
-	pd->udev->autosuspend_delay = HZ * PM_SUSPEND_DELAY;
+	pm_runtime_set_autosuspend_delay(&pd->udev->dev,
+			1000 * PM_SUSPEND_DELAY);
 	usb_enable_autosuspend(pd->udev);
 
 	if (in_hibernation(pd)) {
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index e63b40f..c799e4e 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -789,7 +789,7 @@
  * Get the value of a TVP7002 decoder device register.
  * Returns zero when successful, -EINVAL if register read fails or
  * access to I2C client fails, -EPERM if the call is not allowed
- * by diabled CAP_SYS_ADMIN.
+ * by disabled CAP_SYS_ADMIN.
  */
 static int tvp7002_g_register(struct v4l2_subdev *sd,
 						struct v4l2_dbg_register *reg)
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index e25aca5..2f973cd 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -13,14 +13,12 @@
 #include <linux/pci.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
-#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/videodev2.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-chip-ident.h>
 #include <media/videobuf-dma-sg.h>
-#include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_qos_params.h>
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index c00fe82..e9a3eab 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -465,6 +465,7 @@
 		if (!host->card) {
 			host->card = card;
 			if (device_register(&card->dev)) {
+				put_device(&card->dev);
 				kfree(host->card);
 				host->card = NULL;
 			}
@@ -510,14 +511,18 @@
 {
 	int rc;
 
-	if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
-		return -ENOMEM;
+	while (1) {
+		if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
+			return -ENOMEM;
 
-	spin_lock(&memstick_host_lock);
-	rc = idr_get_new(&memstick_host_idr, host, &host->id);
-	spin_unlock(&memstick_host_lock);
-	if (rc)
-		return rc;
+		spin_lock(&memstick_host_lock);
+		rc = idr_get_new(&memstick_host_idr, host, &host->id);
+		spin_unlock(&memstick_host_lock);
+		if (!rc)
+			break;
+		else if (rc != -EAGAIN)
+			return rc;
+	}
 
 	dev_set_name(&host->dev, "memstick%u", host->id);
 
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 02362ec..57b42bf 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -23,7 +23,6 @@
 
 #define DRIVER_NAME "mspro_block"
 
-static DEFINE_MUTEX(mspro_block_mutex);
 static int major;
 module_param(major, int, 0644);
 
@@ -160,6 +159,13 @@
 	int                   (*mrq_handler)(struct memstick_dev *card,
 					     struct memstick_request **mrq);
 
+
+	/* Default request setup function for data access method preferred by
+	 * this host instance.
+	 */
+	void                  (*setup_transfer)(struct memstick_dev *card,
+						u64 offset, size_t length);
+
 	struct attribute_group attr_group;
 
 	struct scatterlist    req_sg[MSPRO_BLOCK_MAX_SEGS];
@@ -181,7 +187,6 @@
 	struct mspro_block_data *msb = disk->private_data;
 	int rc = -ENXIO;
 
-	mutex_lock(&mspro_block_mutex);
 	mutex_lock(&mspro_block_disk_lock);
 
 	if (msb && msb->card) {
@@ -193,7 +198,6 @@
 	}
 
 	mutex_unlock(&mspro_block_disk_lock);
-	mutex_unlock(&mspro_block_mutex);
 
 	return rc;
 }
@@ -225,11 +229,7 @@
 
 static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
 {
-	int ret;
-	mutex_lock(&mspro_block_mutex);
-	ret = mspro_block_disk_release(disk);
-	mutex_unlock(&mspro_block_mutex);
-	return ret;
+	return mspro_block_disk_release(disk);
 }
 
 static int mspro_block_bd_getgeo(struct block_device *bdev,
@@ -663,14 +663,43 @@
 	}
 }
 
+/*** Transfer setup functions for different access methods. ***/
+
+/** Setup data transfer request for SET_CMD TPC with arguments in card
+ *  registers.
+ *
+ *  @card    Current media instance
+ *  @offset  Target data offset in bytes
+ *  @length  Required transfer length in bytes.
+ */
+static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
+				    size_t length)
+{
+	struct mspro_block_data *msb = memstick_get_drvdata(card);
+	struct mspro_param_register param = {
+		.system = msb->system,
+		.data_count = cpu_to_be16((uint16_t)(length / msb->page_size)),
+		/* ISO C90 warning precludes direct initialization for now. */
+		.data_address = 0,
+		.tpc_param = 0
+	};
+
+	do_div(offset, msb->page_size);
+	param.data_address = cpu_to_be32((uint32_t)offset);
+
+	card->next_request = h_mspro_block_req_init;
+	msb->mrq_handler = h_mspro_block_transfer_data;
+	memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
+			  &param, sizeof(param));
+}
+
 /*** Data transfer ***/
 
 static int mspro_block_issue_req(struct memstick_dev *card, int chunk)
 {
 	struct mspro_block_data *msb = memstick_get_drvdata(card);
-	sector_t t_sec;
+	u64 t_off;
 	unsigned int count;
-	struct mspro_param_register param;
 
 try_again:
 	while (chunk) {
@@ -685,30 +714,17 @@
 			continue;
 		}
 
-		t_sec = blk_rq_pos(msb->block_req) << 9;
-		sector_div(t_sec, msb->page_size);
-
+		t_off = blk_rq_pos(msb->block_req);
+		t_off <<= 9;
 		count = blk_rq_bytes(msb->block_req);
-		count /= msb->page_size;
 
-		param.system = msb->system;
-		param.data_count = cpu_to_be16(count);
-		param.data_address = cpu_to_be32((uint32_t)t_sec);
-		param.tpc_param = 0;
+		msb->setup_transfer(card, t_off, count);
 
 		msb->data_dir = rq_data_dir(msb->block_req);
 		msb->transfer_cmd = msb->data_dir == READ
 				    ? MSPRO_CMD_READ_DATA
 				    : MSPRO_CMD_WRITE_DATA;
 
-		dev_dbg(&card->dev, "data transfer: cmd %x, "
-			"lba %x, count %x\n", msb->transfer_cmd,
-			be32_to_cpu(param.data_address), count);
-
-		card->next_request = h_mspro_block_req_init;
-		msb->mrq_handler = h_mspro_block_transfer_data;
-		memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
-				  &param, sizeof(param));
 		memstick_new_req(card->host);
 		return 0;
 	}
@@ -963,18 +979,16 @@
 static int mspro_block_read_attributes(struct memstick_dev *card)
 {
 	struct mspro_block_data *msb = memstick_get_drvdata(card);
-	struct mspro_param_register param = {
-		.system = msb->system,
-		.data_count = cpu_to_be16(1),
-		.data_address = 0,
-		.tpc_param = 0
-	};
 	struct mspro_attribute *attr = NULL;
 	struct mspro_sys_attr *s_attr = NULL;
 	unsigned char *buffer = NULL;
 	int cnt, rc, attr_count;
-	unsigned int addr;
-	unsigned short page_count;
+	/* While normally physical device offsets, represented here by
+	 * attr_offset and attr_len will be of large numeric types, we can be
+	 * sure, that attributes are close enough to the beginning of the
+	 * device, to save ourselves some trouble.
+	 */
+	unsigned int addr, attr_offset = 0, attr_len = msb->page_size;
 
 	attr = kmalloc(msb->page_size, GFP_KERNEL);
 	if (!attr)
@@ -987,10 +1001,8 @@
 	msb->data_dir = READ;
 	msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
 
-	card->next_request = h_mspro_block_req_init;
-	msb->mrq_handler = h_mspro_block_transfer_data;
-	memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param,
-			  sizeof(param));
+	msb->setup_transfer(card, attr_offset, attr_len);
+
 	memstick_new_req(card->host);
 	wait_for_completion(&card->mrq_complete);
 	if (card->current_mrq.error) {
@@ -1021,13 +1033,12 @@
 	}
 	msb->attr_group.name = "media_attributes";
 
-	buffer = kmalloc(msb->page_size, GFP_KERNEL);
+	buffer = kmalloc(attr_len, GFP_KERNEL);
 	if (!buffer) {
 		rc = -ENOMEM;
 		goto out_free_attr;
 	}
-	memcpy(buffer, (char *)attr, msb->page_size);
-	page_count = 1;
+	memcpy(buffer, (char *)attr, attr_len);
 
 	for (cnt = 0; cnt < attr_count; ++cnt) {
 		s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL);
@@ -1038,9 +1049,10 @@
 
 		msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr;
 		addr = be32_to_cpu(attr->entries[cnt].address);
-		rc = be32_to_cpu(attr->entries[cnt].size);
+		s_attr->size = be32_to_cpu(attr->entries[cnt].size);
 		dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, "
-			"size %x\n", cnt, attr->entries[cnt].id, addr, rc);
+			"size %zx\n", cnt, attr->entries[cnt].id, addr,
+			s_attr->size);
 		s_attr->id = attr->entries[cnt].id;
 		if (mspro_block_attr_name(s_attr->id))
 			snprintf(s_attr->name, sizeof(s_attr->name), "%s",
@@ -1054,57 +1066,47 @@
 		s_attr->dev_attr.attr.mode = S_IRUGO;
 		s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
 
-		if (!rc)
+		if (!s_attr->size)
 			continue;
 
-		s_attr->size = rc;
-		s_attr->data = kmalloc(rc, GFP_KERNEL);
+		s_attr->data = kmalloc(s_attr->size, GFP_KERNEL);
 		if (!s_attr->data) {
 			rc = -ENOMEM;
 			goto out_free_buffer;
 		}
 
-		if (((addr / msb->page_size)
-		     == be32_to_cpu(param.data_address))
-		    && (((addr + rc - 1) / msb->page_size)
-			== be32_to_cpu(param.data_address))) {
+		if (((addr / msb->page_size) == (attr_offset / msb->page_size))
+		    && (((addr + s_attr->size - 1) / msb->page_size)
+			== (attr_offset / msb->page_size))) {
 			memcpy(s_attr->data, buffer + addr % msb->page_size,
-			       rc);
+			       s_attr->size);
 			continue;
 		}
 
-		if (page_count <= (rc / msb->page_size)) {
+		attr_offset = (addr / msb->page_size) * msb->page_size;
+
+		if ((attr_offset + attr_len) < (addr + s_attr->size)) {
 			kfree(buffer);
-			page_count = (rc / msb->page_size) + 1;
-			buffer = kmalloc(page_count * msb->page_size,
-					 GFP_KERNEL);
+			attr_len = (((addr + s_attr->size) / msb->page_size)
+				    + 1 ) * msb->page_size - attr_offset;
+			buffer = kmalloc(attr_len, GFP_KERNEL);
 			if (!buffer) {
 				rc = -ENOMEM;
 				goto out_free_attr;
 			}
 		}
 
-		param.system = msb->system;
-		param.data_count = cpu_to_be16((rc / msb->page_size) + 1);
-		param.data_address = cpu_to_be32(addr / msb->page_size);
-		param.tpc_param = 0;
-
-		sg_init_one(&msb->req_sg[0], buffer,
-			    be16_to_cpu(param.data_count) * msb->page_size);
+		sg_init_one(&msb->req_sg[0], buffer, attr_len);
 		msb->seg_count = 1;
 		msb->current_seg = 0;
 		msb->current_page = 0;
 		msb->data_dir = READ;
 		msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
 
-		dev_dbg(&card->dev, "reading attribute pages %x, %x\n",
-			be32_to_cpu(param.data_address),
-			be16_to_cpu(param.data_count));
+		dev_dbg(&card->dev, "reading attribute range %x, %x\n",
+			attr_offset, attr_len);
 
-		card->next_request = h_mspro_block_req_init;
-		msb->mrq_handler = h_mspro_block_transfer_data;
-		memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
-				  (char *)&param, sizeof(param));
+		msb->setup_transfer(card, attr_offset, attr_len);
 		memstick_new_req(card->host);
 		wait_for_completion(&card->mrq_complete);
 		if (card->current_mrq.error) {
@@ -1112,7 +1114,8 @@
 			goto out_free_buffer;
 		}
 
-		memcpy(s_attr->data, buffer + addr % msb->page_size, rc);
+		memcpy(s_attr->data, buffer + addr % msb->page_size,
+		       s_attr->size);
 	}
 
 	rc = 0;
@@ -1130,6 +1133,8 @@
 	int rc = 0;
 
 	msb->system = MEMSTICK_SYS_SERIAL;
+	msb->setup_transfer = h_mspro_block_setup_cmd;
+
 	card->reg_addr.r_offset = offsetof(struct mspro_register, status);
 	card->reg_addr.r_length = sizeof(struct ms_status_register);
 	card->reg_addr.w_offset = offsetof(struct mspro_register, param);
@@ -1206,10 +1211,12 @@
 
 	msb->page_size = be16_to_cpu(sys_info->unit_size);
 
-	if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL))
-		return -ENOMEM;
-
 	mutex_lock(&mspro_block_disk_lock);
+	if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) {
+		mutex_unlock(&mspro_block_disk_lock);
+		return -ENOMEM;
+	}
+
 	rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
 	mutex_unlock(&mspro_block_disk_lock);
 
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index f2b894c..d89d925 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -61,6 +61,7 @@
 	struct memstick_request *req;
 	unsigned char           cmd_flags;
 	unsigned char           io_pos;
+	unsigned char           ifmode;
 	unsigned int            io_word[2];
 };
 
@@ -136,15 +137,14 @@
 #define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
 #define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
 
+#define CLOCK_CONTROL_BY_MMIO 0x00000008
 #define CLOCK_CONTROL_40MHZ   0x00000001
-#define CLOCK_CONTROL_50MHZ   0x0000000a
-#define CLOCK_CONTROL_60MHZ   0x00000008
-#define CLOCK_CONTROL_62_5MHZ 0x0000000c
+#define CLOCK_CONTROL_50MHZ   0x00000002
+#define CLOCK_CONTROL_60MHZ   0x00000010
+#define CLOCK_CONTROL_62_5MHZ 0x00000004
 #define CLOCK_CONTROL_OFF     0x00000000
 
 #define PCI_CTL_CLOCK_DLY_ADDR   0x000000b0
-#define PCI_CTL_CLOCK_DLY_MASK_A 0x00000f00
-#define PCI_CTL_CLOCK_DLY_MASK_B 0x0000f000
 
 enum {
 	CMD_READY    = 0x01,
@@ -390,8 +390,13 @@
 
 	if (host->req->data_dir == READ)
 		cmd |= TPC_DIR;
-	if (host->req->need_card_int)
-		cmd |= TPC_WAIT_INT;
+
+	if (host->req->need_card_int) {
+		if (host->ifmode == MEMSTICK_SERIAL)
+			cmd |= TPC_GET_INT;
+		else
+			cmd |= TPC_WAIT_INT;
+	}
 
 	data = host->req->data;
 
@@ -529,7 +534,10 @@
 		if (irq_status & INT_STATUS_ANY_ERR) {
 			if (irq_status & INT_STATUS_CRC_ERR)
 				host->req->error = -EILSEQ;
-			else
+			else if (irq_status & INT_STATUS_TPC_ERR) {
+				dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n");
+				jmb38x_ms_complete_cmd(msh, 0);
+			} else
 				host->req->error = -ETIME;
 		} else {
 			if (host->cmd_flags & DMA_DATA) {
@@ -644,7 +652,6 @@
 		ndelay(20);
 	}
 	dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n");
-	/* return -EIO; */
 
 reset_next:
 	writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
@@ -675,7 +682,7 @@
 {
 	struct jmb38x_ms_host *host = memstick_priv(msh);
 	unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
-	unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0;
+	unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0;
 	int rc = 0;
 
 	switch (param) {
@@ -687,9 +694,7 @@
 
 			host_ctl = 7;
 			host_ctl |= HOST_CONTROL_POWER_EN
-				    | HOST_CONTROL_CLOCK_EN
-				    | HOST_CONTROL_HW_OC_P
-				    | HOST_CONTROL_TDELAY_EN;
+				 | HOST_CONTROL_CLOCK_EN;
 			writel(host_ctl, host->addr + HOST_CONTROL);
 
 			writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
@@ -712,46 +717,88 @@
 			return -EINVAL;
 		break;
 	case MEMSTICK_INTERFACE:
+		dev_dbg(&host->chip->pdev->dev,
+			"Set Host Interface Mode to %d\n", value);
+		host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI |
+			      HOST_CONTROL_REO);
+		host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P;
 		host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
-		pci_read_config_dword(host->chip->pdev,
-				      PCI_CTL_CLOCK_DLY_ADDR,
-				      &clock_delay);
-		clock_delay &= host->id ? ~PCI_CTL_CLOCK_DLY_MASK_B
-					: ~PCI_CTL_CLOCK_DLY_MASK_A;
 
 		if (value == MEMSTICK_SERIAL) {
-			host_ctl &= ~HOST_CONTROL_FAST_CLK;
-			host_ctl &= ~HOST_CONTROL_REO;
 			host_ctl |= HOST_CONTROL_IF_SERIAL
 				    << HOST_CONTROL_IF_SHIFT;
 			host_ctl |= HOST_CONTROL_REI;
-			clock_ctl = CLOCK_CONTROL_40MHZ;
+			clock_ctl |= CLOCK_CONTROL_40MHZ;
+			clock_delay = 0;
 		} else if (value == MEMSTICK_PAR4) {
-			host_ctl |= HOST_CONTROL_FAST_CLK | HOST_CONTROL_REO;
+			host_ctl |= HOST_CONTROL_FAST_CLK;
 			host_ctl |= HOST_CONTROL_IF_PAR4
 				    << HOST_CONTROL_IF_SHIFT;
-			host_ctl &= ~HOST_CONTROL_REI;
-			clock_ctl = CLOCK_CONTROL_40MHZ;
-			clock_delay |= host->id ? (4 << 12) : (4 << 8);
+			host_ctl |= HOST_CONTROL_REO;
+			clock_ctl |= CLOCK_CONTROL_40MHZ;
+			clock_delay = 4;
 		} else if (value == MEMSTICK_PAR8) {
 			host_ctl |= HOST_CONTROL_FAST_CLK;
 			host_ctl |= HOST_CONTROL_IF_PAR8
 				    << HOST_CONTROL_IF_SHIFT;
-			host_ctl &= ~(HOST_CONTROL_REI | HOST_CONTROL_REO);
-			clock_ctl = CLOCK_CONTROL_50MHZ;
+			clock_ctl |= CLOCK_CONTROL_50MHZ;
+			clock_delay = 0;
 		} else
 			return -EINVAL;
 
 		writel(host_ctl, host->addr + HOST_CONTROL);
+		writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL);
 		writel(clock_ctl, host->addr + CLOCK_CONTROL);
-		pci_write_config_dword(host->chip->pdev,
-				       PCI_CTL_CLOCK_DLY_ADDR,
-				       clock_delay);
+		pci_write_config_byte(host->chip->pdev,
+				      PCI_CTL_CLOCK_DLY_ADDR + 1,
+				      clock_delay);
+		host->ifmode = value;
 		break;
 	};
 	return 0;
 }
 
+#define PCI_PMOS0_CONTROL		0xae
+#define  PMOS0_ENABLE			0x01
+#define  PMOS0_OVERCURRENT_LEVEL_2_4V	0x06
+#define  PMOS0_EN_OVERCURRENT_DEBOUNCE	0x40
+#define  PMOS0_SW_LED_POLARITY_ENABLE	0x80
+#define  PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \
+			    PMOS0_OVERCURRENT_LEVEL_2_4V)
+#define PCI_PMOS1_CONTROL		0xbd
+#define  PMOS1_ACTIVE_BITS		0x4a
+#define PCI_CLOCK_CTL			0xb9
+
+static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag)
+{
+	unsigned char val;
+
+	pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val);
+	if (flag)
+		val |= PMOS0_ACTIVE_BITS;
+	else
+		val &= ~PMOS0_ACTIVE_BITS;
+	pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val);
+	dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val);
+
+	if (pci_resource_flags(pdev, 1)) {
+		pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val);
+		if (flag)
+			val |= PMOS1_ACTIVE_BITS;
+		else
+			val &= ~PMOS1_ACTIVE_BITS;
+		pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val);
+		dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val);
+	}
+
+	pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val);
+	pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f);
+	pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01);
+	dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n");
+
+        return 0;
+}
+
 #ifdef CONFIG_PM
 
 static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state)
@@ -784,8 +831,7 @@
 		return rc;
 	pci_set_master(dev);
 
-	pci_read_config_dword(dev, 0xac, &rc);
-	pci_write_config_dword(dev, 0xac, rc | 0x00470000);
+	jmb38x_ms_pmos(dev, 1);
 
 	for (rc = 0; rc < jm->host_cnt; ++rc) {
 		if (!jm->hosts[rc])
@@ -894,8 +940,7 @@
 		goto err_out;
 	}
 
-	pci_read_config_dword(pdev, 0xac, &rc);
-	pci_write_config_dword(pdev, 0xac, rc | 0x00470000);
+	jmb38x_ms_pmos(pdev, 1);
 
 	cnt = jmb38x_ms_count_slots(pdev);
 	if (!cnt) {
@@ -976,6 +1021,8 @@
 		jmb38x_ms_free_host(jm->hosts[cnt]);
 	}
 
+	jmb38x_ms_pmos(dev, 0);
+
 	pci_set_drvdata(dev, NULL);
 	pci_release_regions(dev);
 	pci_disable_device(dev);
@@ -983,8 +1030,9 @@
 }
 
 static struct pci_device_id jmb38x_ms_id_tbl [] = {
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS, PCI_ANY_ID,
-	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) },
 	{ }
 };
 
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index 691620d..8b04810 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -268,7 +268,7 @@
 
 /* Compatibility Error : IR Disabled */
 #define IR_LOGINFO_COMPAT_ERROR_RAID_DISABLED                  (0x00010030)
-/* Compatibility Error : Inquiry Comand failed */
+/* Compatibility Error : Inquiry Command failed */
 #define IR_LOGINFO_COMPAT_ERROR_INQUIRY_FAILED                 (0x00010031)
 /* Compatibility Error : Device not direct access device */
 #define IR_LOGINFO_COMPAT_ERROR_NOT_DIRECT_ACCESS              (0x00010032)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3e57b61..3358c0a 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7977,7 +7977,7 @@
 		NULL,						/* 2Eh */
 		NULL,						/* 2Fh */
 		"Compatibility Error: IR Disabled",		/* 30h */
-		"Compatibility Error: Inquiry Comand Failed",	/* 31h */
+		"Compatibility Error: Inquiry Command Failed",	/* 31h */
 		"Compatibility Error: Device not Direct Access "
 		    "Device ",					/* 32h */
 		"Compatibility Error: Removable Device Found",	/* 33h */
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index d48c2c6..8aefb18 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1146,7 +1146,7 @@
  *
  * This function will delete scheduled target reset from the list and
  * try to send next target reset. This will be called from completion
- * context of any Task managment command.
+ * context of any Task management command.
  */
 
 void
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f87a9d4..ae7cad1 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -309,7 +309,7 @@
  *	@ireq: I2O block request
  *	@mptr: message body pointer
  *
- *	Builds the SG list and map it to be accessable by the controller.
+ *	Builds the SG list and map it to be accessible by the controller.
  *
  *	Returns 0 on failure or 1 on success.
  */
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 20895e7..793300c 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -361,12 +361,6 @@
 	},
 };
 
-static inline struct pm860x_irq_data *irq_to_pm860x(struct pm860x_chip *chip,
-						    int irq)
-{
-	return &pm860x_irqs[irq - chip->irq_base];
-}
-
 static irqreturn_t pm860x_irq(int irq, void *data)
 {
 	struct pm860x_chip *chip = data;
@@ -388,16 +382,16 @@
 	return IRQ_HANDLED;
 }
 
-static void pm860x_irq_lock(unsigned int irq)
+static void pm860x_irq_lock(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&chip->irq_lock);
 }
 
-static void pm860x_irq_sync_unlock(unsigned int irq)
+static void pm860x_irq_sync_unlock(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
 	struct pm860x_irq_data *irq_data;
 	struct i2c_client *i2c;
 	static unsigned char cached[3] = {0x0, 0x0, 0x0};
@@ -439,25 +433,25 @@
 	mutex_unlock(&chip->irq_lock);
 }
 
-static void pm860x_irq_enable(unsigned int irq)
+static void pm860x_irq_enable(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
-	pm860x_irqs[irq - chip->irq_base].enable
-		= pm860x_irqs[irq - chip->irq_base].offs;
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
+	pm860x_irqs[data->irq - chip->irq_base].enable
+		= pm860x_irqs[data->irq - chip->irq_base].offs;
 }
 
-static void pm860x_irq_disable(unsigned int irq)
+static void pm860x_irq_disable(struct irq_data *data)
 {
-	struct pm860x_chip *chip = get_irq_chip_data(irq);
-	pm860x_irqs[irq - chip->irq_base].enable = 0;
+	struct pm860x_chip *chip = irq_data_get_irq_chip_data(data);
+	pm860x_irqs[data->irq - chip->irq_base].enable = 0;
 }
 
 static struct irq_chip pm860x_irq_chip = {
 	.name		= "88pm860x",
-	.bus_lock	= pm860x_irq_lock,
-	.bus_sync_unlock = pm860x_irq_sync_unlock,
-	.enable		= pm860x_irq_enable,
-	.disable	= pm860x_irq_disable,
+	.irq_bus_lock	= pm860x_irq_lock,
+	.irq_bus_sync_unlock = pm860x_irq_sync_unlock,
+	.irq_enable	= pm860x_irq_enable,
+	.irq_disable	= pm860x_irq_disable,
 };
 
 static int __devinit device_gpadc_init(struct pm860x_chip *chip,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index da9d297..fd01836 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -496,13 +496,13 @@
 
 config AB8500_CORE
 	bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
-	depends on GENERIC_HARDIRQS && ABX500_CORE && SPI_MASTER && ARCH_U8500
+	depends on GENERIC_HARDIRQS && ABX500_CORE
 	select MFD_CORE
 	help
 	  Select this option to enable access to AB8500 power management
-	  chip. This connects to U8500 either on the SSP/SPI bus
-	  or the I2C bus via PRCMU. It also adds the irq_chip
-	  parts for handling the Mixed Signal chip events.
+	  chip. This connects to U8500 either on the SSP/SPI bus (deprecated
+	  since hardware version v1.0) or the I2C bus via PRCMU. It also adds
+	  the irq_chip parts for handling the Mixed Signal chip events.
 	  This chip embeds various other multimedia funtionalities as well.
 
 config AB8500_I2C_CORE
@@ -537,6 +537,14 @@
 	  LEDs, vibrator, system power and temperature, power management
 	  and ALSA sound.
 
+config MFD_CS5535
+	tristate "Support for CS5535 and CS5536 southbridge core functions"
+	select MFD_CORE
+	depends on PCI
+	---help---
+	  This is the core driver for CS5535/CS5536 MFD functions.  This is
+          necessary for using the board's GPIO and MFGPT functionality.
+
 config MFD_TIMBERDALE
 	tristate "Support for the Timberdale FPGA"
 	select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 848e7ea..a54e2c7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -70,7 +70,7 @@
 obj-$(CONFIG_AB3100_CORE)	+= ab3100-core.o
 obj-$(CONFIG_AB3100_OTP)	+= ab3100-otp.o
 obj-$(CONFIG_AB3550_CORE)	+= ab3550-core.o
-obj-$(CONFIG_AB8500_CORE)	+= ab8500-core.o ab8500-spi.o
+obj-$(CONFIG_AB8500_CORE)	+= ab8500-core.o
 obj-$(CONFIG_AB8500_I2C_CORE)	+= ab8500-i2c.o
 obj-$(CONFIG_AB8500_DEBUG)	+= ab8500-debugfs.o
 obj-$(CONFIG_MFD_TIMBERDALE)    += timberdale.o
@@ -82,3 +82,4 @@
 obj-$(CONFIG_MFD_TPS6586X)	+= tps6586x.o
 obj-$(CONFIG_MFD_VX855)		+= vx855.o
 obj-$(CONFIG_MFD_WL1273_CORE)	+= wl1273-core.o
+obj-$(CONFIG_MFD_CS5535)	+= cs5535-mfd.o
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 8a98739..5fbca34 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1159,15 +1159,16 @@
 	}
 }
 
-static void ab3550_mask(unsigned int irq)
+static void ab3550_mask(struct irq_data *data)
 {
 	unsigned long flags;
 	struct ab3550 *ab;
 	struct ab3550_platform_data *plf_data;
+	int irq;
 
-	ab = get_irq_chip_data(irq);
+	ab = irq_data_get_irq_chip_data(data);
 	plf_data = ab->i2c_client[0]->dev.platform_data;
-	irq -= plf_data->irq.base;
+	irq = data->irq - plf_data->irq.base;
 
 	spin_lock_irqsave(&ab->event_lock, flags);
 	ab->event_mask[irq / 8] |= BIT(irq % 8);
@@ -1176,15 +1177,16 @@
 	schedule_work(&ab->mask_work);
 }
 
-static void ab3550_unmask(unsigned int irq)
+static void ab3550_unmask(struct irq_data *data)
 {
 	unsigned long flags;
 	struct ab3550 *ab;
 	struct ab3550_platform_data *plf_data;
+	int irq;
 
-	ab = get_irq_chip_data(irq);
+	ab = irq_data_get_irq_chip_data(data);
 	plf_data = ab->i2c_client[0]->dev.platform_data;
-	irq -= plf_data->irq.base;
+	irq = data->irq - plf_data->irq.base;
 
 	spin_lock_irqsave(&ab->event_lock, flags);
 	ab->event_mask[irq / 8] &= ~BIT(irq % 8);
@@ -1193,20 +1195,16 @@
 	schedule_work(&ab->mask_work);
 }
 
-static void noop(unsigned int irq)
+static void noop(struct irq_data *data)
 {
 }
 
 static struct irq_chip ab3550_irq_chip = {
 	.name		= "ab3550-core", /* Keep the same name as the request */
-	.startup	= NULL, /* defaults to enable */
-	.shutdown	= NULL, /* defaults to disable */
-	.enable		= NULL, /* defaults to unmask */
-	.disable	= ab3550_mask, /* No default to mask in chip.c */
-	.ack		= noop,
-	.mask		= ab3550_mask,
-	.unmask		= ab3550_unmask,
-	.end		= NULL,
+	.irq_disable	= ab3550_mask, /* No default to mask in chip.c */
+	.irq_ack	= noop,
+	.irq_mask	= ab3550_mask,
+	.irq_unmask	= ab3550_unmask,
 };
 
 struct ab_family_id {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index d9640a6..b688701 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -52,6 +52,7 @@
 #define AB8500_IT_LATCH8_REG		0x27
 #define AB8500_IT_LATCH9_REG		0x28
 #define AB8500_IT_LATCH10_REG		0x29
+#define AB8500_IT_LATCH12_REG		0x2B
 #define AB8500_IT_LATCH19_REG		0x32
 #define AB8500_IT_LATCH20_REG		0x33
 #define AB8500_IT_LATCH21_REG		0x34
@@ -98,13 +99,17 @@
  * offset 0.
  */
 static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
-	0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21,
+	0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21,
 };
 
 static int ab8500_get_chip_id(struct device *dev)
 {
-	struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
-	return (int)ab8500->chip_id;
+	struct ab8500 *ab8500;
+
+	if (!dev)
+		return -EINVAL;
+	ab8500 = dev_get_drvdata(dev->parent);
+	return ab8500 ? (int)ab8500->chip_id : -EINVAL;
 }
 
 static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -228,16 +233,16 @@
 	.startup_irq_enabled = NULL,
 };
 
-static void ab8500_irq_lock(unsigned int irq)
+static void ab8500_irq_lock(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&ab8500->irq_lock);
 }
 
-static void ab8500_irq_sync_unlock(unsigned int irq)
+static void ab8500_irq_sync_unlock(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
@@ -248,6 +253,10 @@
 		if (new == old)
 			continue;
 
+		/* Interrupt register 12 does'nt exist prior to version 0x20 */
+		if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+			continue;
+
 		ab8500->oldmask[i] = new;
 
 		reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
@@ -257,20 +266,20 @@
 	mutex_unlock(&ab8500->irq_lock);
 }
 
-static void ab8500_irq_mask(unsigned int irq)
+static void ab8500_irq_mask(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
-	int offset = irq - ab8500->irq_base;
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - ab8500->irq_base;
 	int index = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	ab8500->mask[index] |= mask;
 }
 
-static void ab8500_irq_unmask(unsigned int irq)
+static void ab8500_irq_unmask(struct irq_data *data)
 {
-	struct ab8500 *ab8500 = get_irq_chip_data(irq);
-	int offset = irq - ab8500->irq_base;
+	struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - ab8500->irq_base;
 	int index = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -279,10 +288,10 @@
 
 static struct irq_chip ab8500_irq_chip = {
 	.name			= "ab8500",
-	.bus_lock		= ab8500_irq_lock,
-	.bus_sync_unlock	= ab8500_irq_sync_unlock,
-	.mask			= ab8500_irq_mask,
-	.unmask			= ab8500_irq_unmask,
+	.irq_bus_lock		= ab8500_irq_lock,
+	.irq_bus_sync_unlock	= ab8500_irq_sync_unlock,
+	.irq_mask		= ab8500_irq_mask,
+	.irq_unmask		= ab8500_irq_unmask,
 };
 
 static irqreturn_t ab8500_irq(int irq, void *dev)
@@ -297,6 +306,10 @@
 		int status;
 		u8 value;
 
+		/* Interrupt register 12 does'nt exist prior to version 0x20 */
+		if (regoffset == 11 && ab8500->chip_id < 0x20)
+			continue;
+
 		status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
 			AB8500_IT_LATCH1_REG + regoffset, &value);
 		if (status < 0 || value == 0)
@@ -393,13 +406,195 @@
 	},
 };
 
+static struct resource ab8500_bm_resources[] = {
+	{
+		.name = "MAIN_EXT_CH_NOT_OK",
+		.start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BATT_OVV",
+		.start = AB8500_INT_BATT_OVV,
+		.end = AB8500_INT_BATT_OVV,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "MAIN_CH_UNPLUG_DET",
+		.start = AB8500_INT_MAIN_CH_UNPLUG_DET,
+		.end = AB8500_INT_MAIN_CH_UNPLUG_DET,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "MAIN_CHARGE_PLUG_DET",
+		.start = AB8500_INT_MAIN_CH_PLUG_DET,
+		.end = AB8500_INT_MAIN_CH_PLUG_DET,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_F",
+		.start = AB8500_INT_VBUS_DET_F,
+		.end = AB8500_INT_VBUS_DET_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_R",
+		.start = AB8500_INT_VBUS_DET_R,
+		.end = AB8500_INT_VBUS_DET_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BAT_CTRL_INDB",
+		.start = AB8500_INT_BAT_CTRL_INDB,
+		.end = AB8500_INT_BAT_CTRL_INDB,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "CH_WD_EXP",
+		.start = AB8500_INT_CH_WD_EXP,
+		.end = AB8500_INT_CH_WD_EXP,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_OVV",
+		.start = AB8500_INT_VBUS_OVV,
+		.end = AB8500_INT_VBUS_OVV,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "NCONV_ACCU",
+		.start = AB8500_INT_CCN_CONV_ACC,
+		.end = AB8500_INT_CCN_CONV_ACC,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "LOW_BAT_F",
+		.start = AB8500_INT_LOW_BAT_F,
+		.end = AB8500_INT_LOW_BAT_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "LOW_BAT_R",
+		.start = AB8500_INT_LOW_BAT_R,
+		.end = AB8500_INT_LOW_BAT_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BTEMP_LOW",
+		.start = AB8500_INT_BTEMP_LOW,
+		.end = AB8500_INT_BTEMP_LOW,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "BTEMP_HIGH",
+		.start = AB8500_INT_BTEMP_HIGH,
+		.end = AB8500_INT_BTEMP_HIGH,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CHARGER_NOT_OKR",
+		.start = AB8500_INT_USB_CHARGER_NOT_OK,
+		.end = AB8500_INT_USB_CHARGER_NOT_OK,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CHARGE_DET_DONE",
+		.start = AB8500_INT_USB_CHG_DET_DONE,
+		.end = AB8500_INT_USB_CHG_DET_DONE,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CH_TH_PROT_R",
+		.start = AB8500_INT_USB_CH_TH_PROT_R,
+		.end = AB8500_INT_USB_CH_TH_PROT_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "MAIN_CH_TH_PROT_R",
+		.start = AB8500_INT_MAIN_CH_TH_PROT_R,
+		.end = AB8500_INT_MAIN_CH_TH_PROT_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_CHARGER_NOT_OKF",
+		.start = AB8500_INT_USB_CHARGER_NOT_OKF,
+		.end = AB8500_INT_USB_CHARGER_NOT_OKF,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct resource ab8500_debug_resources[] = {
+	{
+		.name	= "IRQ_FIRST",
+		.start	= AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.end	= AB8500_INT_MAIN_EXT_CH_NOT_OK,
+		.flags	= IORESOURCE_IRQ,
+	},
+	{
+		.name	= "IRQ_LAST",
+		.start	= AB8500_INT_USB_CHARGER_NOT_OKF,
+		.end	= AB8500_INT_USB_CHARGER_NOT_OKF,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct resource ab8500_usb_resources[] = {
+	{
+		.name = "ID_WAKEUP_R",
+		.start = AB8500_INT_ID_WAKEUP_R,
+		.end = AB8500_INT_ID_WAKEUP_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "ID_WAKEUP_F",
+		.start = AB8500_INT_ID_WAKEUP_F,
+		.end = AB8500_INT_ID_WAKEUP_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_F",
+		.start = AB8500_INT_VBUS_DET_F,
+		.end = AB8500_INT_VBUS_DET_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_R",
+		.start = AB8500_INT_VBUS_DET_R,
+		.end = AB8500_INT_VBUS_DET_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "USB_LINK_STATUS",
+		.start = AB8500_INT_USB_LINK_STATUS,
+		.end = AB8500_INT_USB_LINK_STATUS,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct resource ab8500_temp_resources[] = {
+	{
+		.name  = "AB8500_TEMP_WARM",
+		.start = AB8500_INT_TEMP_WARM,
+		.end   = AB8500_INT_TEMP_WARM,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
 static struct mfd_cell ab8500_devs[] = {
 #ifdef CONFIG_DEBUG_FS
 	{
 		.name = "ab8500-debug",
+		.num_resources = ARRAY_SIZE(ab8500_debug_resources),
+		.resources = ab8500_debug_resources,
 	},
 #endif
 	{
+		.name = "ab8500-sysctrl",
+	},
+	{
+		.name = "ab8500-regulator",
+	},
+	{
 		.name = "ab8500-gpadc",
 		.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
 		.resources = ab8500_gpadc_resources,
@@ -410,6 +605,22 @@
 		.resources = ab8500_rtc_resources,
 	},
 	{
+		.name = "ab8500-bm",
+		.num_resources = ARRAY_SIZE(ab8500_bm_resources),
+		.resources = ab8500_bm_resources,
+	},
+	{ .name = "ab8500-codec", },
+	{
+		.name = "ab8500-usb",
+		.num_resources = ARRAY_SIZE(ab8500_usb_resources),
+		.resources = ab8500_usb_resources,
+	},
+	{
+		.name = "ab8500-poweron-key",
+		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+		.resources = ab8500_poweronkey_db_resources,
+	},
+	{
 		.name = "ab8500-pwm",
 		.id = 1,
 	},
@@ -421,15 +632,35 @@
 		.name = "ab8500-pwm",
 		.id = 3,
 	},
-	{ .name = "ab8500-charger", },
-	{ .name = "ab8500-audio", },
-	{ .name = "ab8500-usb", },
-	{ .name = "ab8500-regulator", },
+	{ .name = "ab8500-leds", },
 	{
-		.name = "ab8500-poweron-key",
-		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
-		.resources = ab8500_poweronkey_db_resources,
+		.name = "ab8500-denc",
 	},
+	{
+		.name = "ab8500-temp",
+		.num_resources = ARRAY_SIZE(ab8500_temp_resources),
+		.resources = ab8500_temp_resources,
+	},
+};
+
+static ssize_t show_chip_id(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct ab8500 *ab8500;
+
+	ab8500 = dev_get_drvdata(dev);
+	return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
+}
+
+static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
+
+static struct attribute *ab8500_sysfs_entries[] = {
+	&dev_attr_chip_id.attr,
+	NULL,
+};
+
+static struct attribute_group ab8500_attr_group = {
+	.attrs	= ab8500_sysfs_entries,
 };
 
 int __devinit ab8500_init(struct ab8500 *ab8500)
@@ -454,8 +685,9 @@
 	 * 0x0 - Early Drop
 	 * 0x10 - Cut 1.0
 	 * 0x11 - Cut 1.1
+	 * 0x20 - Cut 2.0
 	 */
-	if (value == 0x0 || value == 0x10 || value == 0x11) {
+	if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20) {
 		ab8500->revision = value;
 		dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
 	} else {
@@ -468,18 +700,16 @@
 		plat->init(ab8500);
 
 	/* Clear and mask all interrupts */
-	for (i = 0; i < 10; i++) {
-		get_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_LATCH1_REG + i, &value);
-		set_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_MASK1_REG + i, 0xff);
-	}
+	for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
+		/* Interrupt register 12 does'nt exist prior to version 0x20 */
+		if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+			continue;
 
-	for (i = 18; i < 24; i++) {
 		get_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_LATCH1_REG + i, &value);
+			AB8500_IT_LATCH1_REG + ab8500_irq_regoffset[i],
+			&value);
 		set_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_MASK1_REG + i, 0xff);
+			AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i], 0xff);
 	}
 
 	ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
@@ -495,7 +725,8 @@
 			return ret;
 
 		ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
-					   IRQF_ONESHOT, "ab8500", ab8500);
+					   IRQF_ONESHOT | IRQF_NO_SUSPEND,
+					   "ab8500", ab8500);
 		if (ret)
 			goto out_removeirq;
 	}
@@ -506,6 +737,10 @@
 	if (ret)
 		goto out_freeirq;
 
+	ret = sysfs_create_group(&ab8500->dev->kobj, &ab8500_attr_group);
+	if (ret)
+		dev_err(ab8500->dev, "error creating sysfs entries\n");
+
 	return ret;
 
 out_freeirq:
@@ -519,6 +754,7 @@
 
 int __devexit ab8500_exit(struct ab8500 *ab8500)
 {
+	sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
 	mfd_remove_devices(ab8500->dev);
 	if (ab8500->irq_base) {
 		free_irq(ab8500->irq, ab8500);
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 8d1e05a..3c1541a 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -24,9 +24,9 @@
  * @perm: access permissions for the range
  */
 struct ab8500_reg_range {
-       u8 first;
-       u8 last;
-       u8 perm;
+	u8 first;
+	u8 last;
+	u8 perm;
 };
 
 /**
@@ -36,9 +36,9 @@
  * @range: the list of register ranges
  */
 struct ab8500_i2c_ranges {
-       u8 num_ranges;
-       u8 bankid;
-       const struct ab8500_reg_range *range;
+	u8 num_ranges;
+	u8 bankid;
+	const struct ab8500_reg_range *range;
 };
 
 #define AB8500_NAME_STRING "ab8500"
@@ -47,521 +47,521 @@
 #define AB8500_REV_REG 0x80
 
 static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
-       [0x0] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_SYS_CTRL1_BLOCK] = {
-               .num_ranges = 3,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x02,
-                       },
-                       {
-                               .first = 0x42,
-                               .last = 0x42,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x81,
-                       },
-               },
-       },
-       [AB8500_SYS_CTRL2_BLOCK] = {
-               .num_ranges = 4,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x0D,
-                       },
-                       {
-                               .first = 0x0F,
-                               .last = 0x17,
-                       },
-                       {
-                               .first = 0x30,
-                               .last = 0x30,
-                       },
-                       {
-                               .first = 0x32,
-                               .last = 0x33,
-                       },
-               },
-       },
-       [AB8500_REGU_CTRL1] = {
-               .num_ranges = 3,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x00,
-                       },
-                       {
-                               .first = 0x03,
-                               .last = 0x10,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x84,
-                       },
-               },
-       },
-       [AB8500_REGU_CTRL2] = {
-               .num_ranges = 5,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x15,
-                       },
-                       {
-                               .first = 0x17,
-                               .last = 0x19,
-                       },
-                       {
-                               .first = 0x1B,
-                               .last = 0x1D,
-                       },
-                       {
-                               .first = 0x1F,
-                               .last = 0x22,
-                       },
-                       {
-                               .first = 0x40,
-                               .last = 0x44,
-                       },
-                       /* 0x80-0x8B is SIM registers and should
-                        * not be accessed from here */
-               },
-       },
-       [AB8500_USB] = {
-               .num_ranges = 2,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x80,
-                               .last = 0x83,
-                       },
-                       {
-                               .first = 0x87,
-                               .last = 0x8A,
-                       },
-               },
-       },
-       [AB8500_TVOUT] = {
-               .num_ranges = 9,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x12,
-                       },
-                       {
-                               .first = 0x15,
-                               .last = 0x17,
-                       },
-                       {
-                               .first = 0x19,
-                               .last = 0x21,
-                       },
-                       {
-                               .first = 0x27,
-                               .last = 0x2C,
-                       },
-                       {
-                               .first = 0x41,
-                               .last = 0x41,
-                       },
-                       {
-                               .first = 0x45,
-                               .last = 0x5B,
-                       },
-                       {
-                               .first = 0x5D,
-                               .last = 0x5D,
-                       },
-                       {
-                               .first = 0x69,
-                               .last = 0x69,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x81,
-                       },
-               },
-       },
-       [AB8500_DBI] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_ECI_AV_ACC] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x80,
-                               .last = 0x82,
-                       },
-               },
-       },
-       [0x9] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_GPADC] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x08,
-                       },
-               },
-       },
-       [AB8500_CHARGER] = {
-               .num_ranges = 8,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x03,
-                       },
-                       {
-                               .first = 0x05,
-                               .last = 0x05,
-                       },
-                       {
-                               .first = 0x40,
-                               .last = 0x40,
-                       },
-                       {
-                               .first = 0x42,
-                               .last = 0x42,
-                       },
-                       {
-                               .first = 0x44,
-                               .last = 0x44,
-                       },
-                       {
-                               .first = 0x50,
-                               .last = 0x55,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x82,
-                       },
-                       {
-                               .first = 0xC0,
-                               .last = 0xC2,
-                       },
-               },
-       },
-       [AB8500_GAS_GAUGE] = {
-               .num_ranges = 3,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x00,
-                       },
-                       {
-                               .first = 0x07,
-                               .last = 0x0A,
-                       },
-                       {
-                               .first = 0x10,
-                               .last = 0x14,
-                       },
-               },
-       },
-       [AB8500_AUDIO] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x6F,
-                       },
-               },
-       },
-       [AB8500_INTERRUPT] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_RTC] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x0F,
-                       },
-               },
-       },
-       [AB8500_MISC] = {
-               .num_ranges = 8,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x00,
-                               .last = 0x05,
-                       },
-                       {
-                               .first = 0x10,
-                               .last = 0x15,
-                       },
-                       {
-                               .first = 0x20,
-                               .last = 0x25,
-                       },
-                       {
-                               .first = 0x30,
-                               .last = 0x35,
-                       },
-                       {
-                               .first = 0x40,
-                               .last = 0x45,
-                       },
-                       {
-                               .first = 0x50,
-                               .last = 0x50,
-                       },
-                       {
-                               .first = 0x60,
-                               .last = 0x67,
-                       },
-                       {
-                               .first = 0x80,
-                               .last = 0x80,
-                       },
-               },
-       },
-       [0x11] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [0x12] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [0x13] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [0x14] = {
-               .num_ranges = 0,
-               .range = 0,
-       },
-       [AB8500_OTP_EMUL] = {
-               .num_ranges = 1,
-               .range = (struct ab8500_reg_range[]) {
-                       {
-                               .first = 0x01,
-                               .last = 0x0F,
-                       },
-               },
-       },
+	[0x0] = {
+		.num_ranges = 0,
+		.range = 0,
+	},
+	[AB8500_SYS_CTRL1_BLOCK] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x02,
+			},
+			{
+				.first = 0x42,
+				.last = 0x42,
+			},
+			{
+				.first = 0x80,
+				.last = 0x81,
+			},
+		},
+	},
+	[AB8500_SYS_CTRL2_BLOCK] = {
+		.num_ranges = 4,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x0D,
+			},
+			{
+				.first = 0x0F,
+				.last = 0x17,
+			},
+			{
+				.first = 0x30,
+				.last = 0x30,
+			},
+			{
+				.first = 0x32,
+				.last = 0x33,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL1] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x03,
+				.last = 0x10,
+			},
+			{
+				.first = 0x80,
+				.last = 0x84,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL2] = {
+		.num_ranges = 5,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x15,
+			},
+			{
+				.first = 0x17,
+				.last = 0x19,
+			},
+			{
+				.first = 0x1B,
+				.last = 0x1D,
+			},
+			{
+				.first = 0x1F,
+				.last = 0x22,
+			},
+			{
+				.first = 0x40,
+				.last = 0x44,
+			},
+			/* 0x80-0x8B is SIM registers and should
+			 * not be accessed from here */
+		},
+	},
+	[AB8500_USB] = {
+		.num_ranges = 2,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x80,
+				.last = 0x83,
+			},
+			{
+				.first = 0x87,
+				.last = 0x8A,
+			},
+		},
+	},
+	[AB8500_TVOUT] = {
+		.num_ranges = 9,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x12,
+			},
+			{
+				.first = 0x15,
+				.last = 0x17,
+			},
+			{
+				.first = 0x19,
+				.last = 0x21,
+			},
+			{
+				.first = 0x27,
+				.last = 0x2C,
+			},
+			{
+				.first = 0x41,
+				.last = 0x41,
+			},
+			{
+				.first = 0x45,
+				.last = 0x5B,
+			},
+			{
+				.first = 0x5D,
+				.last = 0x5D,
+			},
+			{
+				.first = 0x69,
+				.last = 0x69,
+			},
+			{
+				.first = 0x80,
+				.last = 0x81,
+			},
+		},
+	},
+	[AB8500_DBI] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_ECI_AV_ACC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+		},
+	},
+	[0x9] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_GPADC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x08,
+			},
+		},
+	},
+	[AB8500_CHARGER] = {
+		.num_ranges = 8,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x03,
+			},
+			{
+				.first = 0x05,
+				.last = 0x05,
+			},
+			{
+				.first = 0x40,
+				.last = 0x40,
+			},
+			{
+				.first = 0x42,
+				.last = 0x42,
+			},
+			{
+				.first = 0x44,
+				.last = 0x44,
+			},
+			{
+				.first = 0x50,
+				.last = 0x55,
+			},
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+			{
+				.first = 0xC0,
+				.last = 0xC2,
+			},
+		},
+	},
+	[AB8500_GAS_GAUGE] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x07,
+				.last = 0x0A,
+			},
+			{
+				.first = 0x10,
+				.last = 0x14,
+			},
+		},
+	},
+	[AB8500_AUDIO] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x6F,
+			},
+		},
+	},
+	[AB8500_INTERRUPT] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_RTC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x0F,
+			},
+		},
+	},
+	[AB8500_MISC] = {
+		.num_ranges = 8,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x05,
+			},
+			{
+				.first = 0x10,
+				.last = 0x15,
+			},
+			{
+				.first = 0x20,
+				.last = 0x25,
+			},
+			{
+				.first = 0x30,
+				.last = 0x35,
+			},
+			{
+				.first = 0x40,
+				.last = 0x45,
+			},
+			{
+				.first = 0x50,
+				.last = 0x50,
+			},
+			{
+				.first = 0x60,
+				.last = 0x67,
+			},
+			{
+				.first = 0x80,
+				.last = 0x80,
+			},
+		},
+	},
+	[0x11] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[0x12] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[0x13] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[0x14] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_OTP_EMUL] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x01,
+				.last = 0x0F,
+			},
+		},
+	},
 };
 
 static int ab8500_registers_print(struct seq_file *s, void *p)
 {
-       struct device *dev = s->private;
-       unsigned int i;
-       u32 bank = debug_bank;
+	struct device *dev = s->private;
+	unsigned int i;
+	u32 bank = debug_bank;
 
-       seq_printf(s, AB8500_NAME_STRING " register values:\n");
+	seq_printf(s, AB8500_NAME_STRING " register values:\n");
 
-       seq_printf(s, " bank %u:\n", bank);
-       for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
-               u32 reg;
+	seq_printf(s, " bank %u:\n", bank);
+	for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
+		u32 reg;
 
-               for (reg = debug_ranges[bank].range[i].first;
-                       reg <= debug_ranges[bank].range[i].last;
-                       reg++) {
-                       u8 value;
-                       int err;
+		for (reg = debug_ranges[bank].range[i].first;
+			reg <= debug_ranges[bank].range[i].last;
+			reg++) {
+			u8 value;
+			int err;
 
-                       err = abx500_get_register_interruptible(dev,
-                               (u8)bank, (u8)reg, &value);
-                       if (err < 0) {
-                               dev_err(dev, "ab->read fail %d\n", err);
-                               return err;
-                       }
+			err = abx500_get_register_interruptible(dev,
+				(u8)bank, (u8)reg, &value);
+			if (err < 0) {
+				dev_err(dev, "ab->read fail %d\n", err);
+				return err;
+			}
 
-                       err = seq_printf(s, "  [%u/0x%02X]: 0x%02X\n", bank,
-                               reg, value);
-                       if (err < 0) {
-                               dev_err(dev, "seq_printf overflow\n");
-                               /* Error is not returned here since
-                                * the output is wanted in any case */
-                               return 0;
-                       }
-               }
-       }
-       return 0;
+			err = seq_printf(s, "  [%u/0x%02X]: 0x%02X\n", bank,
+				reg, value);
+			if (err < 0) {
+				dev_err(dev, "seq_printf overflow\n");
+				/* Error is not returned here since
+				 * the output is wanted in any case */
+				return 0;
+			}
+		}
+	}
+	return 0;
 }
 
 static int ab8500_registers_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_registers_print, inode->i_private);
+	return single_open(file, ab8500_registers_print, inode->i_private);
 }
 
 static const struct file_operations ab8500_registers_fops = {
-       .open = ab8500_registers_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_registers_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static int ab8500_bank_print(struct seq_file *s, void *p)
 {
-       return seq_printf(s, "%d\n", debug_bank);
+	return seq_printf(s, "%d\n", debug_bank);
 }
 
 static int ab8500_bank_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_bank_print, inode->i_private);
+	return single_open(file, ab8500_bank_print, inode->i_private);
 }
 
 static ssize_t ab8500_bank_write(struct file *file,
-       const char __user *user_buf,
-       size_t count, loff_t *ppos)
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
 {
-       struct device *dev = ((struct seq_file *)(file->private_data))->private;
-       char buf[32];
-       int buf_size;
-       unsigned long user_bank;
-       int err;
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	char buf[32];
+	int buf_size;
+	unsigned long user_bank;
+	int err;
 
-       /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf) - 1));
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       buf[buf_size] = 0;
+	/* Get userspace string and assure termination */
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
 
-       err = strict_strtoul(buf, 0, &user_bank);
-       if (err)
-               return -EINVAL;
+	err = strict_strtoul(buf, 0, &user_bank);
+	if (err)
+		return -EINVAL;
 
-       if (user_bank >= AB8500_NUM_BANKS) {
-               dev_err(dev, "debugfs error input > number of banks\n");
-               return -EINVAL;
-       }
+	if (user_bank >= AB8500_NUM_BANKS) {
+		dev_err(dev, "debugfs error input > number of banks\n");
+		return -EINVAL;
+	}
 
-       debug_bank = user_bank;
+	debug_bank = user_bank;
 
-       return buf_size;
+	return buf_size;
 }
 
 static int ab8500_address_print(struct seq_file *s, void *p)
 {
-       return seq_printf(s, "0x%02X\n", debug_address);
+	return seq_printf(s, "0x%02X\n", debug_address);
 }
 
 static int ab8500_address_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_address_print, inode->i_private);
+	return single_open(file, ab8500_address_print, inode->i_private);
 }
 
 static ssize_t ab8500_address_write(struct file *file,
-       const char __user *user_buf,
-       size_t count, loff_t *ppos)
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
 {
-       struct device *dev = ((struct seq_file *)(file->private_data))->private;
-       char buf[32];
-       int buf_size;
-       unsigned long user_address;
-       int err;
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	char buf[32];
+	int buf_size;
+	unsigned long user_address;
+	int err;
 
-       /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf) - 1));
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       buf[buf_size] = 0;
+	/* Get userspace string and assure termination */
+	buf_size = min(count, (sizeof(buf) - 1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
 
-       err = strict_strtoul(buf, 0, &user_address);
-       if (err)
-               return -EINVAL;
-       if (user_address > 0xff) {
-               dev_err(dev, "debugfs error input > 0xff\n");
-               return -EINVAL;
-       }
-       debug_address = user_address;
-       return buf_size;
+	err = strict_strtoul(buf, 0, &user_address);
+	if (err)
+		return -EINVAL;
+	if (user_address > 0xff) {
+		dev_err(dev, "debugfs error input > 0xff\n");
+		return -EINVAL;
+	}
+	debug_address = user_address;
+	return buf_size;
 }
 
 static int ab8500_val_print(struct seq_file *s, void *p)
 {
-       struct device *dev = s->private;
-       int ret;
-       u8 regvalue;
+	struct device *dev = s->private;
+	int ret;
+	u8 regvalue;
 
-       ret = abx500_get_register_interruptible(dev,
-               (u8)debug_bank, (u8)debug_address, &regvalue);
-       if (ret < 0) {
-               dev_err(dev, "abx500_get_reg fail %d, %d\n",
-                       ret, __LINE__);
-               return -EINVAL;
-       }
-       seq_printf(s, "0x%02X\n", regvalue);
+	ret = abx500_get_register_interruptible(dev,
+		(u8)debug_bank, (u8)debug_address, &regvalue);
+	if (ret < 0) {
+		dev_err(dev, "abx500_get_reg fail %d, %d\n",
+			ret, __LINE__);
+		return -EINVAL;
+	}
+	seq_printf(s, "0x%02X\n", regvalue);
 
-       return 0;
+	return 0;
 }
 
 static int ab8500_val_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, ab8500_val_print, inode->i_private);
+	return single_open(file, ab8500_val_print, inode->i_private);
 }
 
 static ssize_t ab8500_val_write(struct file *file,
-       const char __user *user_buf,
-       size_t count, loff_t *ppos)
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
 {
-       struct device *dev = ((struct seq_file *)(file->private_data))->private;
-       char buf[32];
-       int buf_size;
-       unsigned long user_val;
-       int err;
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	char buf[32];
+	int buf_size;
+	unsigned long user_val;
+	int err;
 
-       /* Get userspace string and assure termination */
-       buf_size = min(count, (sizeof(buf)-1));
-       if (copy_from_user(buf, user_buf, buf_size))
-               return -EFAULT;
-       buf[buf_size] = 0;
+	/* Get userspace string and assure termination */
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	buf[buf_size] = 0;
 
-       err = strict_strtoul(buf, 0, &user_val);
-       if (err)
-               return -EINVAL;
-       if (user_val > 0xff) {
-               dev_err(dev, "debugfs error input > 0xff\n");
-               return -EINVAL;
-       }
-       err = abx500_set_register_interruptible(dev,
-               (u8)debug_bank, debug_address, (u8)user_val);
-       if (err < 0) {
-               printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
-               return -EINVAL;
-       }
+	err = strict_strtoul(buf, 0, &user_val);
+	if (err)
+		return -EINVAL;
+	if (user_val > 0xff) {
+		dev_err(dev, "debugfs error input > 0xff\n");
+		return -EINVAL;
+	}
+	err = abx500_set_register_interruptible(dev,
+		(u8)debug_bank, debug_address, (u8)user_val);
+	if (err < 0) {
+		printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
+		return -EINVAL;
+	}
 
-       return buf_size;
+	return buf_size;
 }
 
 static const struct file_operations ab8500_bank_fops = {
-       .open = ab8500_bank_open,
-       .write = ab8500_bank_write,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_bank_open,
+	.write = ab8500_bank_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static const struct file_operations ab8500_address_fops = {
-       .open = ab8500_address_open,
-       .write = ab8500_address_write,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_address_open,
+	.write = ab8500_address_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static const struct file_operations ab8500_val_fops = {
-       .open = ab8500_val_open,
-       .write = ab8500_val_write,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
+	.open = ab8500_val_open,
+	.write = ab8500_val_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
 };
 
 static struct dentry *ab8500_dir;
@@ -572,77 +572,77 @@
 
 static int __devinit ab8500_debug_probe(struct platform_device *plf)
 {
-       debug_bank = AB8500_MISC;
-       debug_address = AB8500_REV_REG & 0x00FF;
+	debug_bank = AB8500_MISC;
+	debug_address = AB8500_REV_REG & 0x00FF;
 
-       ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
-       if (!ab8500_dir)
-               goto exit_no_debugfs;
+	ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
+	if (!ab8500_dir)
+		goto exit_no_debugfs;
 
-       ab8500_reg_file = debugfs_create_file("all-bank-registers",
-               S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
-       if (!ab8500_reg_file)
-               goto exit_destroy_dir;
+	ab8500_reg_file = debugfs_create_file("all-bank-registers",
+		S_IRUGO, ab8500_dir, &plf->dev, &ab8500_registers_fops);
+	if (!ab8500_reg_file)
+		goto exit_destroy_dir;
 
-       ab8500_bank_file = debugfs_create_file("register-bank",
-               (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
-       if (!ab8500_bank_file)
-               goto exit_destroy_reg;
+	ab8500_bank_file = debugfs_create_file("register-bank",
+		(S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_bank_fops);
+	if (!ab8500_bank_file)
+		goto exit_destroy_reg;
 
-       ab8500_address_file = debugfs_create_file("register-address",
-               (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
-               &ab8500_address_fops);
-       if (!ab8500_address_file)
-               goto exit_destroy_bank;
+	ab8500_address_file = debugfs_create_file("register-address",
+		(S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev,
+		&ab8500_address_fops);
+	if (!ab8500_address_file)
+		goto exit_destroy_bank;
 
-       ab8500_val_file = debugfs_create_file("register-value",
-               (S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
-       if (!ab8500_val_file)
-               goto exit_destroy_address;
+	ab8500_val_file = debugfs_create_file("register-value",
+		(S_IRUGO | S_IWUGO), ab8500_dir, &plf->dev, &ab8500_val_fops);
+	if (!ab8500_val_file)
+		goto exit_destroy_address;
 
-       return 0;
+	return 0;
 
 exit_destroy_address:
-       debugfs_remove(ab8500_address_file);
+	debugfs_remove(ab8500_address_file);
 exit_destroy_bank:
-       debugfs_remove(ab8500_bank_file);
+	debugfs_remove(ab8500_bank_file);
 exit_destroy_reg:
-       debugfs_remove(ab8500_reg_file);
+	debugfs_remove(ab8500_reg_file);
 exit_destroy_dir:
-       debugfs_remove(ab8500_dir);
+	debugfs_remove(ab8500_dir);
 exit_no_debugfs:
-       dev_err(&plf->dev, "failed to create debugfs entries.\n");
-       return -ENOMEM;
+	dev_err(&plf->dev, "failed to create debugfs entries.\n");
+	return -ENOMEM;
 }
 
 static int __devexit ab8500_debug_remove(struct platform_device *plf)
 {
-       debugfs_remove(ab8500_val_file);
-       debugfs_remove(ab8500_address_file);
-       debugfs_remove(ab8500_bank_file);
-       debugfs_remove(ab8500_reg_file);
-       debugfs_remove(ab8500_dir);
+	debugfs_remove(ab8500_val_file);
+	debugfs_remove(ab8500_address_file);
+	debugfs_remove(ab8500_bank_file);
+	debugfs_remove(ab8500_reg_file);
+	debugfs_remove(ab8500_dir);
 
-       return 0;
+	return 0;
 }
 
 static struct platform_driver ab8500_debug_driver = {
-       .driver = {
-               .name = "ab8500-debug",
-               .owner = THIS_MODULE,
-       },
-       .probe  = ab8500_debug_probe,
-       .remove = __devexit_p(ab8500_debug_remove)
+	.driver = {
+		.name = "ab8500-debug",
+		.owner = THIS_MODULE,
+	},
+	.probe  = ab8500_debug_probe,
+	.remove = __devexit_p(ab8500_debug_remove)
 };
 
 static int __init ab8500_debug_init(void)
 {
-       return platform_driver_register(&ab8500_debug_driver);
+	return platform_driver_register(&ab8500_debug_driver);
 }
 
 static void __exit ab8500_debug_exit(void)
 {
-       platform_driver_unregister(&ab8500_debug_driver);
+	platform_driver_unregister(&ab8500_debug_driver);
 }
 subsys_initcall(ab8500_debug_init);
 module_exit(ab8500_debug_exit);
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
deleted file mode 100644
index b165342..0000000
--- a/drivers/mfd/ab8500-spi.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * License Terms: GNU General Public License v2
- * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/spi/spi.h>
-#include <linux/mfd/ab8500.h>
-
-/*
- * This funtion writes to any AB8500 registers using
- * SPI protocol &  before it writes it packs the data
- * in the below 24 bit frame format
- *
- *	 *|------------------------------------|
- *	 *| 23|22...18|17.......10|9|8|7......0|
- *	 *| r/w  bank       adr          data  |
- *	 * ------------------------------------
- *
- * This function shouldn't be called from interrupt
- * context
- */
-static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
-	struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
-					      dev);
-	unsigned long spi_data = addr << 10 | data;
-	struct spi_transfer xfer;
-	struct spi_message msg;
-
-	ab8500->tx_buf[0] = spi_data;
-	ab8500->rx_buf[0] = 0;
-
-	xfer.tx_buf	= ab8500->tx_buf;
-	xfer.rx_buf	= NULL;
-	xfer.len	= sizeof(unsigned long);
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	return spi_sync(spi, &msg);
-}
-
-static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
-{
-	struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
-					      dev);
-	unsigned long spi_data = 1 << 23 | addr << 10;
-	struct spi_transfer xfer;
-	struct spi_message msg;
-	int ret;
-
-	ab8500->tx_buf[0] = spi_data;
-	ab8500->rx_buf[0] = 0;
-
-	xfer.tx_buf	= ab8500->tx_buf;
-	xfer.rx_buf	= ab8500->rx_buf;
-	xfer.len	= sizeof(unsigned long);
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	ret = spi_sync(spi, &msg);
-	if (!ret)
-		/*
-		 * Only the 8 lowermost bytes are
-		 * defined with value, the rest may
-		 * vary depending on chip/board noise.
-		 */
-		ret = ab8500->rx_buf[0] & 0xFFU;
-
-	return ret;
-}
-
-static int __devinit ab8500_spi_probe(struct spi_device *spi)
-{
-	struct ab8500 *ab8500;
-	int ret;
-
-	spi->bits_per_word = 24;
-	ret = spi_setup(spi);
-	if (ret < 0)
-		return ret;
-
-	ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
-	if (!ab8500)
-		return -ENOMEM;
-
-	ab8500->dev = &spi->dev;
-	ab8500->irq = spi->irq;
-
-	ab8500->read = ab8500_spi_read;
-	ab8500->write = ab8500_spi_write;
-
-	spi_set_drvdata(spi, ab8500);
-
-	ret = ab8500_init(ab8500);
-	if (ret)
-		kfree(ab8500);
-
-	return ret;
-}
-
-static int __devexit ab8500_spi_remove(struct spi_device *spi)
-{
-	struct ab8500 *ab8500 = spi_get_drvdata(spi);
-
-	ab8500_exit(ab8500);
-	kfree(ab8500);
-
-	return 0;
-}
-
-static struct spi_driver ab8500_spi_driver = {
-	.driver = {
-		.name = "ab8500-spi",
-		.owner = THIS_MODULE,
-	},
-	.probe	= ab8500_spi_probe,
-	.remove	= __devexit_p(ab8500_spi_remove)
-};
-
-static int __init ab8500_spi_init(void)
-{
-	return spi_register_driver(&ab8500_spi_driver);
-}
-subsys_initcall(ab8500_spi_init);
-
-static void __exit ab8500_spi_exit(void)
-{
-	spi_unregister_driver(&ab8500_spi_driver);
-}
-module_exit(ab8500_spi_exit);
-
-MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
-MODULE_DESCRIPTION("AB8500 SPI");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 7de708d..6a1f940 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -57,7 +57,7 @@
 		.rate = _rate,			\
 	}
 
-struct asic3_clk asic3_clk_init[] __initdata = {
+static struct asic3_clk asic3_clk_init[] __initdata = {
 	INIT_CDEX(SPI, 0),
 	INIT_CDEX(OWM, 5000000),
 	INIT_CDEX(PWM0, 0),
@@ -102,7 +102,7 @@
 			(reg >> asic->bus_shift));
 }
 
-void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
+static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
 {
 	unsigned long flags;
 	u32 val;
@@ -226,14 +226,14 @@
 	return (irq - asic->irq_base) & 0xf;
 }
 
-static void asic3_mask_gpio_irq(unsigned int irq)
+static void asic3_mask_gpio_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	u32 val, bank, index;
 	unsigned long flags;
 
-	bank = asic3_irq_to_bank(asic, irq);
-	index = asic3_irq_to_index(asic, irq);
+	bank = asic3_irq_to_bank(asic, data->irq);
+	index = asic3_irq_to_index(asic, data->irq);
 
 	spin_lock_irqsave(&asic->lock, flags);
 	val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -242,9 +242,9 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static void asic3_mask_irq(unsigned int irq)
+static void asic3_mask_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	int regval;
 	unsigned long flags;
 
@@ -254,7 +254,7 @@
 				     ASIC3_INTR_INT_MASK);
 
 	regval &= ~(ASIC3_INTMASK_MASK0 <<
-		    (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+		    (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
 
 	asic3_write_register(asic,
 			     ASIC3_INTR_BASE +
@@ -263,14 +263,14 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static void asic3_unmask_gpio_irq(unsigned int irq)
+static void asic3_unmask_gpio_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	u32 val, bank, index;
 	unsigned long flags;
 
-	bank = asic3_irq_to_bank(asic, irq);
-	index = asic3_irq_to_index(asic, irq);
+	bank = asic3_irq_to_bank(asic, data->irq);
+	index = asic3_irq_to_index(asic, data->irq);
 
 	spin_lock_irqsave(&asic->lock, flags);
 	val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
@@ -279,9 +279,9 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static void asic3_unmask_irq(unsigned int irq)
+static void asic3_unmask_irq(struct irq_data *data)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	int regval;
 	unsigned long flags;
 
@@ -291,7 +291,7 @@
 				     ASIC3_INTR_INT_MASK);
 
 	regval |= (ASIC3_INTMASK_MASK0 <<
-		   (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+		   (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
 
 	asic3_write_register(asic,
 			     ASIC3_INTR_BASE +
@@ -300,15 +300,15 @@
 	spin_unlock_irqrestore(&asic->lock, flags);
 }
 
-static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
+static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
 {
-	struct asic3 *asic = get_irq_chip_data(irq);
+	struct asic3 *asic = irq_data_get_irq_chip_data(data);
 	u32 bank, index;
 	u16 trigger, level, edge, bit;
 	unsigned long flags;
 
-	bank = asic3_irq_to_bank(asic, irq);
-	index = asic3_irq_to_index(asic, irq);
+	bank = asic3_irq_to_bank(asic, data->irq);
+	index = asic3_irq_to_index(asic, data->irq);
 	bit = 1<<index;
 
 	spin_lock_irqsave(&asic->lock, flags);
@@ -318,7 +318,7 @@
 				   bank + ASIC3_GPIO_EDGE_TRIGGER);
 	trigger = asic3_read_register(asic,
 				      bank + ASIC3_GPIO_TRIGGER_TYPE);
-	asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit;
+	asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] &= ~bit;
 
 	if (type == IRQ_TYPE_EDGE_RISING) {
 		trigger |= bit;
@@ -328,11 +328,11 @@
 		edge &= ~bit;
 	} else if (type == IRQ_TYPE_EDGE_BOTH) {
 		trigger |= bit;
-		if (asic3_gpio_get(&asic->gpio, irq - asic->irq_base))
+		if (asic3_gpio_get(&asic->gpio, data->irq - asic->irq_base))
 			edge &= ~bit;
 		else
 			edge |= bit;
-		asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit;
+		asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] |= bit;
 	} else if (type == IRQ_TYPE_LEVEL_LOW) {
 		trigger &= ~bit;
 		level &= ~bit;
@@ -359,17 +359,17 @@
 
 static struct irq_chip asic3_gpio_irq_chip = {
 	.name		= "ASIC3-GPIO",
-	.ack		= asic3_mask_gpio_irq,
-	.mask		= asic3_mask_gpio_irq,
-	.unmask		= asic3_unmask_gpio_irq,
-	.set_type	= asic3_gpio_irq_type,
+	.irq_ack	= asic3_mask_gpio_irq,
+	.irq_mask	= asic3_mask_gpio_irq,
+	.irq_unmask	= asic3_unmask_gpio_irq,
+	.irq_set_type	= asic3_gpio_irq_type,
 };
 
 static struct irq_chip asic3_irq_chip = {
 	.name		= "ASIC3",
-	.ack		= asic3_mask_irq,
-	.mask		= asic3_mask_irq,
-	.unmask		= asic3_unmask_irq,
+	.irq_ack	= asic3_mask_irq,
+	.irq_mask	= asic3_mask_irq,
+	.irq_unmask	= asic3_unmask_irq,
 };
 
 static int __init asic3_irq_probe(struct platform_device *pdev)
@@ -635,7 +635,7 @@
 	},
 	{
 		.start = ASIC3_IRQ_OWM,
-		.start = ASIC3_IRQ_OWM,
+		.end   = ASIC3_IRQ_OWM,
 		.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
 	},
 };
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
new file mode 100644
index 0000000..59ca6f1
--- /dev/null
+++ b/drivers/mfd/cs5535-mfd.c
@@ -0,0 +1,151 @@
+/*
+ * cs5535-mfd.c - core MFD driver for CS5535/CS5536 southbridges
+ *
+ * The CS5535 and CS5536 has an ISA bridge on the PCI bus that is
+ * used for accessing GPIOs, MFGPTs, ACPI, etc.  Each subdevice has
+ * an IO range that's specified in a single BAR.  The BAR order is
+ * hardcoded in the CS553x specifications.
+ *
+ * Copyright (c) 2010  Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define DRV_NAME "cs5535-mfd"
+
+enum cs5535_mfd_bars {
+	SMB_BAR = 0,
+	GPIO_BAR = 1,
+	MFGPT_BAR = 2,
+	PMS_BAR = 4,
+	ACPI_BAR = 5,
+	NR_BARS,
+};
+
+static __devinitdata struct resource cs5535_mfd_resources[NR_BARS];
+
+static __devinitdata struct mfd_cell cs5535_mfd_cells[] = {
+	{
+		.id = SMB_BAR,
+		.name = "cs5535-smb",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[SMB_BAR],
+	},
+	{
+		.id = GPIO_BAR,
+		.name = "cs5535-gpio",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[GPIO_BAR],
+	},
+	{
+		.id = MFGPT_BAR,
+		.name = "cs5535-mfgpt",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[MFGPT_BAR],
+	},
+	{
+		.id = PMS_BAR,
+		.name = "cs5535-pms",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[PMS_BAR],
+	},
+	{
+		.id = ACPI_BAR,
+		.name = "cs5535-acpi",
+		.num_resources = 1,
+		.resources = &cs5535_mfd_resources[ACPI_BAR],
+	},
+};
+
+static int __devinit cs5535_mfd_probe(struct pci_dev *pdev,
+		const struct pci_device_id *id)
+{
+	int err, i;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	/* fill in IO range for each cell; subdrivers handle the region */
+	for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) {
+		int bar = cs5535_mfd_cells[i].id;
+		struct resource *r = &cs5535_mfd_resources[bar];
+
+		r->flags = IORESOURCE_IO;
+		r->start = pci_resource_start(pdev, bar);
+		r->end = pci_resource_end(pdev, bar);
+
+		/* id is used for temporarily storing BAR; unset it now */
+		cs5535_mfd_cells[i].id = 0;
+	}
+
+	err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
+			ARRAY_SIZE(cs5535_mfd_cells), NULL, 0);
+	if (err) {
+		dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
+		goto err_disable;
+	}
+
+	dev_info(&pdev->dev, "%zu devices registered.\n",
+			ARRAY_SIZE(cs5535_mfd_cells));
+
+	return 0;
+
+err_disable:
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void __devexit cs5535_mfd_remove(struct pci_dev *pdev)
+{
+	mfd_remove_devices(&pdev->dev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_device_id cs5535_mfd_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
+
+static struct pci_driver cs5535_mfd_drv = {
+	.name = DRV_NAME,
+	.id_table = cs5535_mfd_pci_tbl,
+	.probe = cs5535_mfd_probe,
+	.remove = __devexit_p(cs5535_mfd_remove),
+};
+
+static int __init cs5535_mfd_init(void)
+{
+	return pci_register_driver(&cs5535_mfd_drv);
+}
+
+static void __exit cs5535_mfd_exit(void)
+{
+	pci_unregister_driver(&cs5535_mfd_drv);
+}
+
+module_init(cs5535_mfd_init);
+module_exit(cs5535_mfd_exit);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
+MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index c2b698d..9e2d8dd 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -144,26 +144,26 @@
 }
 EXPORT_SYMBOL_GPL(pcap_to_irq);
 
-static void pcap_mask_irq(unsigned int irq)
+static void pcap_mask_irq(struct irq_data *d)
 {
-	struct pcap_chip *pcap = get_irq_chip_data(irq);
+	struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
 
-	pcap->msr |= 1 << irq_to_pcap(pcap, irq);
+	pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
 	queue_work(pcap->workqueue, &pcap->msr_work);
 }
 
-static void pcap_unmask_irq(unsigned int irq)
+static void pcap_unmask_irq(struct irq_data *d)
 {
-	struct pcap_chip *pcap = get_irq_chip_data(irq);
+	struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
 
-	pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
+	pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
 	queue_work(pcap->workqueue, &pcap->msr_work);
 }
 
 static struct irq_chip pcap_irq_chip = {
-	.name	= "pcap",
-	.mask	= pcap_mask_irq,
-	.unmask	= pcap_unmask_irq,
+	.name		= "pcap",
+	.irq_mask	= pcap_mask_irq,
+	.irq_unmask	= pcap_unmask_irq,
 };
 
 static void pcap_msr_work(struct work_struct *work)
@@ -199,8 +199,7 @@
 			if (service & 1) {
 				struct irq_desc *desc = irq_to_desc(irq);
 
-				if (WARN(!desc, KERN_WARNING
-						"Invalid PCAP IRQ %d\n", irq))
+				if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq))
 					break;
 
 				if (desc->status & IRQ_DISABLED)
@@ -218,7 +217,7 @@
 {
 	struct pcap_chip *pcap = get_irq_data(irq);
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	queue_work(pcap->workqueue, &pcap->isr_work);
 	return;
 }
@@ -282,7 +281,7 @@
 	mutex_lock(&pcap->adc_mutex);
 	req = pcap->adc_queue[pcap->adc_head];
 
-	if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
+	if (WARN(!req, "adc irq without pending request\n")) {
 		mutex_unlock(&pcap->adc_mutex);
 		return IRQ_HANDLED;
 	}
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index d3e74f8..d00b6d1 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -70,31 +70,32 @@
 			ei->ack_write, ei->ack_register << ei->bus_shift);
 }
 
-static void egpio_ack(unsigned int irq)
+static void egpio_ack(struct irq_data *data)
 {
 }
 
 /* There does not appear to be a way to proactively mask interrupts
  * on the egpio chip itself.  So, we simply ignore interrupts that
  * aren't desired. */
-static void egpio_mask(unsigned int irq)
+static void egpio_mask(struct irq_data *data)
 {
-	struct egpio_info *ei = get_irq_chip_data(irq);
-	ei->irqs_enabled &= ~(1 << (irq - ei->irq_start));
-	pr_debug("EGPIO mask %d %04x\n", irq, ei->irqs_enabled);
+	struct egpio_info *ei = irq_data_get_irq_chip_data(data);
+	ei->irqs_enabled &= ~(1 << (data->irq - ei->irq_start));
+	pr_debug("EGPIO mask %d %04x\n", data->irq, ei->irqs_enabled);
 }
-static void egpio_unmask(unsigned int irq)
+
+static void egpio_unmask(struct irq_data *data)
 {
-	struct egpio_info *ei = get_irq_chip_data(irq);
-	ei->irqs_enabled |= 1 << (irq - ei->irq_start);
-	pr_debug("EGPIO unmask %d %04x\n", irq, ei->irqs_enabled);
+	struct egpio_info *ei = irq_data_get_irq_chip_data(data);
+	ei->irqs_enabled |= 1 << (data->irq - ei->irq_start);
+	pr_debug("EGPIO unmask %d %04x\n", data->irq, ei->irqs_enabled);
 }
 
 static struct irq_chip egpio_muxed_chip = {
-	.name   = "htc-egpio",
-	.ack	= egpio_ack,
-	.mask   = egpio_mask,
-	.unmask = egpio_unmask,
+	.name		= "htc-egpio",
+	.irq_ack	= egpio_ack,
+	.irq_mask	= egpio_mask,
+	.irq_unmask	= egpio_unmask,
 };
 
 static void egpio_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 594c9a8..296ad15 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -82,25 +82,25 @@
 /* There does not appear to be a way to proactively mask interrupts
  * on the htcpld chip itself.  So, we simply ignore interrupts that
  * aren't desired. */
-static void htcpld_mask(unsigned int irq)
+static void htcpld_mask(struct irq_data *data)
 {
-	struct htcpld_chip *chip = get_irq_chip_data(irq);
-	chip->irqs_enabled &= ~(1 << (irq - chip->irq_start));
-	pr_debug("HTCPLD mask %d %04x\n", irq, chip->irqs_enabled);
+	struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
+	chip->irqs_enabled &= ~(1 << (data->irq - chip->irq_start));
+	pr_debug("HTCPLD mask %d %04x\n", data->irq, chip->irqs_enabled);
 }
-static void htcpld_unmask(unsigned int irq)
+static void htcpld_unmask(struct irq_data *data)
 {
-	struct htcpld_chip *chip = get_irq_chip_data(irq);
-	chip->irqs_enabled |= 1 << (irq - chip->irq_start);
-	pr_debug("HTCPLD unmask %d %04x\n", irq, chip->irqs_enabled);
+	struct htcpld_chip *chip = irq_data_get_irq_chip_data(data);
+	chip->irqs_enabled |= 1 << (data->irq - chip->irq_start);
+	pr_debug("HTCPLD unmask %d %04x\n", data->irq, chip->irqs_enabled);
 }
 
-static int htcpld_set_type(unsigned int irq, unsigned int flags)
+static int htcpld_set_type(struct irq_data *data, unsigned int flags)
 {
-	struct irq_desc *d = irq_to_desc(irq);
+	struct irq_desc *d = irq_to_desc(data->irq);
 
 	if (!d) {
-		pr_err("HTCPLD invalid IRQ: %d\n", irq);
+		pr_err("HTCPLD invalid IRQ: %d\n", data->irq);
 		return -EINVAL;
 	}
 
@@ -118,10 +118,10 @@
 }
 
 static struct irq_chip htcpld_muxed_chip = {
-	.name     = "htcpld",
-	.mask     = htcpld_mask,
-	.unmask   = htcpld_unmask,
-	.set_type = htcpld_set_type,
+	.name         = "htcpld",
+	.irq_mask     = htcpld_mask,
+	.irq_unmask   = htcpld_unmask,
+	.irq_set_type = htcpld_set_type,
 };
 
 /* To properly dispatch IRQ events, we need to read from the
@@ -235,7 +235,7 @@
  * and that work is scheduled in the set routine.  The kernel can then run
  * the I2C functions, which will sleep, in process context.
  */
-void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
+static void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
 {
 	struct i2c_client *client;
 	struct htcpld_chip *chip_data;
@@ -259,7 +259,7 @@
 	schedule_work(&(chip_data->set_val_work));
 }
 
-void htcpld_chip_set_ni(struct work_struct *work)
+static void htcpld_chip_set_ni(struct work_struct *work)
 {
 	struct htcpld_chip *chip_data;
 	struct i2c_client *client;
@@ -269,7 +269,7 @@
 	i2c_smbus_read_byte_data(client, chip_data->cache_out);
 }
 
-int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
+static int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
 {
 	struct htcpld_chip *chip_data;
 	int val = 0;
@@ -316,7 +316,7 @@
 	return (offset < chip->ngpio) ? 0 : -EINVAL;
 }
 
-int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
+static int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
 {
 	struct htcpld_chip *chip_data;
 
@@ -328,7 +328,7 @@
 		return -EINVAL;
 }
 
-void htcpld_chip_reset(struct i2c_client *client)
+static void htcpld_chip_reset(struct i2c_client *client)
 {
 	struct htcpld_chip *chip_data = i2c_get_clientdata(client);
 	if (!chip_data)
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 9dd1b33..0cc5979 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -84,31 +84,30 @@
 	spin_unlock_irqrestore(&adc->lock, flags);
 }
 
-static void jz4740_adc_irq_mask(unsigned int irq)
+static void jz4740_adc_irq_mask(struct irq_data *data)
 {
-	struct jz4740_adc *adc = get_irq_chip_data(irq);
-	jz4740_adc_irq_set_masked(adc, irq, true);
+	struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+	jz4740_adc_irq_set_masked(adc, data->irq, true);
 }
 
-static void jz4740_adc_irq_unmask(unsigned int irq)
+static void jz4740_adc_irq_unmask(struct irq_data *data)
 {
-	struct jz4740_adc *adc = get_irq_chip_data(irq);
-	jz4740_adc_irq_set_masked(adc, irq, false);
+	struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+	jz4740_adc_irq_set_masked(adc, data->irq, false);
 }
 
-static void jz4740_adc_irq_ack(unsigned int irq)
+static void jz4740_adc_irq_ack(struct irq_data *data)
 {
-	struct jz4740_adc *adc = get_irq_chip_data(irq);
-
-	irq -= adc->irq_base;
+	struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
+	unsigned int irq = data->irq - adc->irq_base;
 	writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
 }
 
 static struct irq_chip jz4740_adc_irq_chip = {
 	.name = "jz4740-adc",
-	.mask = jz4740_adc_irq_mask,
-	.unmask = jz4740_adc_irq_unmask,
-	.ack = jz4740_adc_irq_ack,
+	.irq_mask = jz4740_adc_irq_mask,
+	.irq_unmask = jz4740_adc_irq_unmask,
+	.irq_ack = jz4740_adc_irq_ack,
 };
 
 static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 44695f5..0e998dc 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -407,16 +407,16 @@
 	return IRQ_HANDLED;
 }
 
-static void max8925_irq_lock(unsigned int irq)
+static void max8925_irq_lock(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&chip->irq_lock);
 }
 
-static void max8925_irq_sync_unlock(unsigned int irq)
+static void max8925_irq_sync_unlock(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
 	struct max8925_irq_data *irq_data;
 	static unsigned char cache_chg[2] = {0xff, 0xff};
 	static unsigned char cache_on[2] = {0xff, 0xff};
@@ -492,25 +492,25 @@
 	mutex_unlock(&chip->irq_lock);
 }
 
-static void max8925_irq_enable(unsigned int irq)
+static void max8925_irq_enable(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
-	max8925_irqs[irq - chip->irq_base].enable
-		= max8925_irqs[irq - chip->irq_base].offs;
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+	max8925_irqs[data->irq - chip->irq_base].enable
+		= max8925_irqs[data->irq - chip->irq_base].offs;
 }
 
-static void max8925_irq_disable(unsigned int irq)
+static void max8925_irq_disable(struct irq_data *data)
 {
-	struct max8925_chip *chip = get_irq_chip_data(irq);
-	max8925_irqs[irq - chip->irq_base].enable = 0;
+	struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+	max8925_irqs[data->irq - chip->irq_base].enable = 0;
 }
 
 static struct irq_chip max8925_irq_chip = {
 	.name		= "max8925",
-	.bus_lock	= max8925_irq_lock,
-	.bus_sync_unlock = max8925_irq_sync_unlock,
-	.enable		= max8925_irq_enable,
-	.disable	= max8925_irq_disable,
+	.irq_bus_lock	= max8925_irq_lock,
+	.irq_bus_sync_unlock = max8925_irq_sync_unlock,
+	.irq_enable	= max8925_irq_enable,
+	.irq_disable	= max8925_irq_disable,
 };
 
 static int max8925_irq_init(struct max8925_chip *chip, int irq,
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 45bfe77..3903e1f 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -102,16 +102,16 @@
 	return &max8998_irqs[irq - max8998->irq_base];
 }
 
-static void max8998_irq_lock(unsigned int irq)
+static void max8998_irq_lock(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&max8998->irqlock);
 }
 
-static void max8998_irq_sync_unlock(unsigned int irq)
+static void max8998_irq_sync_unlock(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(max8998->irq_masks_cur); i++) {
@@ -129,28 +129,30 @@
 	mutex_unlock(&max8998->irqlock);
 }
 
-static void max8998_irq_unmask(unsigned int irq)
+static void max8998_irq_unmask(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
+							       data->irq);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
 
-static void max8998_irq_mask(unsigned int irq)
+static void max8998_irq_mask(struct irq_data *data)
 {
-	struct max8998_dev *max8998 = get_irq_chip_data(irq);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, irq);
+	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
+							       data->irq);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
 
 static struct irq_chip max8998_irq_chip = {
 	.name = "max8998",
-	.bus_lock = max8998_irq_lock,
-	.bus_sync_unlock = max8998_irq_sync_unlock,
-	.mask = max8998_irq_mask,
-	.unmask = max8998_irq_unmask,
+	.irq_bus_lock = max8998_irq_lock,
+	.irq_bus_sync_unlock = max8998_irq_sync_unlock,
+	.irq_mask = max8998_irq_mask,
+	.irq_unmask = max8998_irq_unmask,
 };
 
 static irqreturn_t max8998_irq_thread(int irq, void *data)
@@ -181,6 +183,13 @@
 	return IRQ_HANDLED;
 }
 
+int max8998_irq_resume(struct max8998_dev *max8998)
+{
+	if (max8998->irq && max8998->irq_base)
+		max8998_irq_thread(max8998->irq_base, max8998);
+	return 0;
+}
+
 int max8998_irq_init(struct max8998_dev *max8998)
 {
 	int i;
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index bb9977b..bbfe867 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -25,6 +25,8 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
 #include <linux/mutex.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/max8998.h>
@@ -40,6 +42,14 @@
 	},
 };
 
+static struct mfd_cell lp3974_devs[] = {
+	{
+		.name = "lp3974-pmic",
+	}, {
+		.name = "lp3974-rtc",
+	},
+};
+
 int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
 {
 	struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
@@ -135,6 +145,7 @@
 	if (pdata) {
 		max8998->ono = pdata->ono;
 		max8998->irq_base = pdata->irq_base;
+		max8998->wakeup = pdata->wakeup;
 	}
 	mutex_init(&max8998->iolock);
 
@@ -143,9 +154,23 @@
 
 	max8998_irq_init(max8998);
 
-	ret = mfd_add_devices(max8998->dev, -1,
-			      max8998_devs, ARRAY_SIZE(max8998_devs),
-			      NULL, 0);
+	pm_runtime_set_active(max8998->dev);
+
+	switch (id->driver_data) {
+	case TYPE_LP3974:
+		ret = mfd_add_devices(max8998->dev, -1,
+				lp3974_devs, ARRAY_SIZE(lp3974_devs),
+				NULL, 0);
+		break;
+	case TYPE_MAX8998:
+		ret = mfd_add_devices(max8998->dev, -1,
+				max8998_devs, ARRAY_SIZE(max8998_devs),
+				NULL, 0);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
 	if (ret < 0)
 		goto err;
 
@@ -178,10 +203,113 @@
 };
 MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
 
+static int max8998_suspend(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+
+	if (max8998->wakeup)
+		set_irq_wake(max8998->irq, 1);
+	return 0;
+}
+
+static int max8998_resume(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
+
+	if (max8998->wakeup)
+		set_irq_wake(max8998->irq, 0);
+	/*
+	 * In LP3974, if IRQ registers are not "read & clear"
+	 * when it's set during sleep, the interrupt becomes
+	 * disabled.
+	 */
+	return max8998_irq_resume(i2c_get_clientdata(i2c));
+}
+
+struct max8998_reg_dump {
+	u8	addr;
+	u8	val;
+};
+#define SAVE_ITEM(x)	{ .addr = (x), .val = 0x0, }
+struct max8998_reg_dump max8998_dump[] = {
+	SAVE_ITEM(MAX8998_REG_IRQM1),
+	SAVE_ITEM(MAX8998_REG_IRQM2),
+	SAVE_ITEM(MAX8998_REG_IRQM3),
+	SAVE_ITEM(MAX8998_REG_IRQM4),
+	SAVE_ITEM(MAX8998_REG_STATUSM1),
+	SAVE_ITEM(MAX8998_REG_STATUSM2),
+	SAVE_ITEM(MAX8998_REG_CHGR1),
+	SAVE_ITEM(MAX8998_REG_CHGR2),
+	SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
+	SAVE_ITEM(MAX8998_REG_LDO_ACTIVE_DISCHARGE1),
+	SAVE_ITEM(MAX8998_REG_BUCK_ACTIVE_DISCHARGE3),
+	SAVE_ITEM(MAX8998_REG_ONOFF1),
+	SAVE_ITEM(MAX8998_REG_ONOFF2),
+	SAVE_ITEM(MAX8998_REG_ONOFF3),
+	SAVE_ITEM(MAX8998_REG_ONOFF4),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE1),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE2),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE3),
+	SAVE_ITEM(MAX8998_REG_BUCK1_VOLTAGE4),
+	SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE1),
+	SAVE_ITEM(MAX8998_REG_BUCK2_VOLTAGE2),
+	SAVE_ITEM(MAX8998_REG_LDO2_LDO3),
+	SAVE_ITEM(MAX8998_REG_LDO4),
+	SAVE_ITEM(MAX8998_REG_LDO5),
+	SAVE_ITEM(MAX8998_REG_LDO6),
+	SAVE_ITEM(MAX8998_REG_LDO7),
+	SAVE_ITEM(MAX8998_REG_LDO8_LDO9),
+	SAVE_ITEM(MAX8998_REG_LDO10_LDO11),
+	SAVE_ITEM(MAX8998_REG_LDO12),
+	SAVE_ITEM(MAX8998_REG_LDO13),
+	SAVE_ITEM(MAX8998_REG_LDO14),
+	SAVE_ITEM(MAX8998_REG_LDO15),
+	SAVE_ITEM(MAX8998_REG_LDO16),
+	SAVE_ITEM(MAX8998_REG_LDO17),
+	SAVE_ITEM(MAX8998_REG_BKCHR),
+	SAVE_ITEM(MAX8998_REG_LBCNFG1),
+	SAVE_ITEM(MAX8998_REG_LBCNFG2),
+};
+/* Save registers before hibernation */
+static int max8998_freeze(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
+		max8998_read_reg(i2c, max8998_dump[i].addr,
+				&max8998_dump[i].val);
+
+	return 0;
+}
+
+/* Restore registers after hibernation */
+static int max8998_restore(struct device *dev)
+{
+	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(max8998_dump); i++)
+		max8998_write_reg(i2c, max8998_dump[i].addr,
+				max8998_dump[i].val);
+
+	return 0;
+}
+
+const struct dev_pm_ops max8998_pm = {
+	.suspend = max8998_suspend,
+	.resume = max8998_resume,
+	.freeze = max8998_freeze,
+	.restore = max8998_restore,
+};
+
 static struct i2c_driver max8998_i2c_driver = {
 	.driver = {
 		   .name = "max8998",
 		   .owner = THIS_MODULE,
+		   .pm = &max8998_pm,
 	},
 	.probe = max8998_i2c_probe,
 	.remove = max8998_i2c_remove,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index a2ac2ed..b9fcaf0 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -749,7 +749,7 @@
 	if (ret) {
 err_mask:
 err_revision:
-		mutex_unlock(&mc13xxx->lock);
+		mc13xxx_unlock(mc13xxx);
 		dev_set_drvdata(&spi->dev, NULL);
 		kfree(mc13xxx);
 		return ret;
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 4ba85bb..9cee8e7 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1259,7 +1259,7 @@
 	return 0;
 fail2:
 	free_irq(client->irq, menelaus);
-	flush_scheduled_work();
+	flush_work_sync(&menelaus->work);
 fail1:
 	kfree(menelaus);
 	return err;
@@ -1270,6 +1270,7 @@
 	struct menelaus_chip	*menelaus = i2c_get_clientdata(client);
 
 	free_irq(client->irq, menelaus);
+	flush_work_sync(&menelaus->work);
 	kfree(menelaus);
 	the_menelaus = NULL;
 	return 0;
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index ec99f68..d83ad0f 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -15,6 +15,7 @@
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
 #include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
 static int mfd_add_device(struct device *parent, int id,
@@ -82,6 +83,9 @@
 	if (ret)
 		goto fail_res;
 
+	if (cell->pm_runtime_no_callbacks)
+		pm_runtime_no_callbacks(&pdev->dev);
+
 	kfree(res);
 
 	return 0;
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index f1714f9..0a7df44 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -131,11 +131,17 @@
 	 */
 	mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
 
+	/*
+	 * All SDHI blocks support SDIO IRQ signalling.
+	 */
+	mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+
 	if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
 		priv->param_tx.slave_id = p->dma_slave_tx;
 		priv->param_rx.slave_id = p->dma_slave_rx;
 		priv->dma_priv.chan_priv_tx = &priv->param_tx;
 		priv->dma_priv.chan_priv_rx = &priv->param_rx;
+		priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
 		mmc_data->dma = &priv->dma_priv;
 	}
 
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index bc9275c..5de3a76 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -26,7 +26,7 @@
 #include <linux/sm501-regs.h>
 #include <linux/serial_8250.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 struct sm501_device {
 	struct list_head		list;
@@ -745,11 +745,8 @@
 	int ret;
 
 	for (ptr = 0; ptr < pdev->num_resources; ptr++) {
-		printk(KERN_DEBUG "%s[%d] flags %08lx: %08llx..%08llx\n",
-		       pdev->name, ptr,
-		       pdev->resource[ptr].flags,
-		       (unsigned long long)pdev->resource[ptr].start,
-		       (unsigned long long)pdev->resource[ptr].end);
+		printk(KERN_DEBUG "%s[%d] %pR\n",
+		       pdev->name, ptr, &pdev->resource[ptr]);
 	}
 
 	ret = platform_device_register(pdev);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index b11487f..3e5732b 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -699,16 +699,16 @@
 	return IRQ_HANDLED;
 }
 
-static void stmpe_irq_lock(unsigned int irq)
+static void stmpe_irq_lock(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&stmpe->irq_lock);
 }
 
-static void stmpe_irq_sync_unlock(unsigned int irq)
+static void stmpe_irq_sync_unlock(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
 	struct stmpe_variant_info *variant = stmpe->variant;
 	int num = DIV_ROUND_UP(variant->num_irqs, 8);
 	int i;
@@ -727,20 +727,20 @@
 	mutex_unlock(&stmpe->irq_lock);
 }
 
-static void stmpe_irq_mask(unsigned int irq)
+static void stmpe_irq_mask(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
-	int offset = irq - stmpe->irq_base;
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - stmpe->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	stmpe->ier[regoffset] &= ~mask;
 }
 
-static void stmpe_irq_unmask(unsigned int irq)
+static void stmpe_irq_unmask(struct irq_data *data)
 {
-	struct stmpe *stmpe = get_irq_chip_data(irq);
-	int offset = irq - stmpe->irq_base;
+	struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
+	int offset = data->irq - stmpe->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -749,10 +749,10 @@
 
 static struct irq_chip stmpe_irq_chip = {
 	.name			= "stmpe",
-	.bus_lock		= stmpe_irq_lock,
-	.bus_sync_unlock	= stmpe_irq_sync_unlock,
-	.mask			= stmpe_irq_mask,
-	.unmask			= stmpe_irq_unmask,
+	.irq_bus_lock		= stmpe_irq_lock,
+	.irq_bus_sync_unlock	= stmpe_irq_sync_unlock,
+	.irq_mask		= stmpe_irq_mask,
+	.irq_unmask		= stmpe_irq_unmask,
 };
 
 static int __devinit stmpe_irq_init(struct stmpe *stmpe)
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 006c121..9caeb4a 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -199,37 +199,37 @@
 				generic_handle_irq(irq_base + i);
 }
 
-static void t7l66xb_irq_mask(unsigned int irq)
+static void t7l66xb_irq_mask(struct irq_data *data)
 {
-	struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
+	struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
 	unsigned long			flags;
 	u8 imr;
 
 	spin_lock_irqsave(&t7l66xb->lock, flags);
 	imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
-	imr |= 1 << (irq - t7l66xb->irq_base);
+	imr |= 1 << (data->irq - t7l66xb->irq_base);
 	tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&t7l66xb->lock, flags);
 }
 
-static void t7l66xb_irq_unmask(unsigned int irq)
+static void t7l66xb_irq_unmask(struct irq_data *data)
 {
-	struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
+	struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 	u8 imr;
 
 	spin_lock_irqsave(&t7l66xb->lock, flags);
 	imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
-	imr &= ~(1 << (irq - t7l66xb->irq_base));
+	imr &= ~(1 << (data->irq - t7l66xb->irq_base));
 	tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&t7l66xb->lock, flags);
 }
 
 static struct irq_chip t7l66xb_chip = {
-	.name	= "t7l66xb",
-	.ack	= t7l66xb_irq_mask,
-	.mask	= t7l66xb_irq_mask,
-	.unmask	= t7l66xb_irq_unmask,
+	.name		= "t7l66xb",
+	.irq_ack	= t7l66xb_irq_mask,
+	.irq_mask	= t7l66xb_irq_mask,
+	.irq_unmask	= t7l66xb_irq_unmask,
 };
 
 /*--------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1ea80d8a..9a23863 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -527,41 +527,41 @@
 		}
 }
 
-static void tc6393xb_irq_ack(unsigned int irq)
+static void tc6393xb_irq_ack(struct irq_data *data)
 {
 }
 
-static void tc6393xb_irq_mask(unsigned int irq)
+static void tc6393xb_irq_mask(struct irq_data *data)
 {
-	struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+	struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 	u8 imr;
 
 	spin_lock_irqsave(&tc6393xb->lock, flags);
 	imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
-	imr |= 1 << (irq - tc6393xb->irq_base);
+	imr |= 1 << (data->irq - tc6393xb->irq_base);
 	tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&tc6393xb->lock, flags);
 }
 
-static void tc6393xb_irq_unmask(unsigned int irq)
+static void tc6393xb_irq_unmask(struct irq_data *data)
 {
-	struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+	struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 	u8 imr;
 
 	spin_lock_irqsave(&tc6393xb->lock, flags);
 	imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
-	imr &= ~(1 << (irq - tc6393xb->irq_base));
+	imr &= ~(1 << (data->irq - tc6393xb->irq_base));
 	tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
 	spin_unlock_irqrestore(&tc6393xb->lock, flags);
 }
 
 static struct irq_chip tc6393xb_chip = {
-	.name	= "tc6393xb",
-	.ack	= tc6393xb_irq_ack,
-	.mask	= tc6393xb_irq_mask,
-	.unmask	= tc6393xb_irq_unmask,
+	.name		= "tc6393xb",
+	.irq_ack	= tc6393xb_irq_ack,
+	.irq_mask	= tc6393xb_irq_mask,
+	.irq_unmask	= tc6393xb_irq_unmask,
 };
 
 static void tc6393xb_attach_irq(struct platform_device *dev)
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index d0016b6..93d5fdf 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -34,7 +34,7 @@
 
 #include <linux/i2c/tps65010.h>
 
-#include <asm/gpio.h>
+#include <linux/gpio.h>
 
 
 /*-------------------------------------------------------------------------*/
@@ -242,7 +242,7 @@
 	seq_printf(s, "mask2     %s\n", buf);
 	/* ignore ackint2 */
 
-	(void) schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
+	schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
 
 
 	/* VMAIN voltage, enable lowpower, etc */
@@ -400,7 +400,7 @@
 			&& (tps->chgstatus & (TPS_CHG_USB|TPS_CHG_AC)))
 		poll = 1;
 	if (poll)
-		(void) schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
+		schedule_delayed_work(&tps->work, POWER_POLL_DELAY);
 
 	/* also potentially gpio-in rise or fall */
 }
@@ -410,7 +410,7 @@
 {
 	struct tps65010		*tps;
 
-	tps = container_of(work, struct tps65010, work.work);
+	tps = container_of(to_delayed_work(work), struct tps65010, work);
 	mutex_lock(&tps->lock);
 
 	tps65010_interrupt(tps);
@@ -448,7 +448,7 @@
 
 	disable_irq_nosync(irq);
 	set_bit(FLAG_IRQ_ENABLE, &tps->flags);
-	(void) schedule_work(&tps->work.work);
+	schedule_delayed_work(&tps->work, 0);
 	return IRQ_HANDLED;
 }
 
@@ -527,8 +527,7 @@
 	}
 	if (client->irq > 0)
 		free_irq(client->irq, tps);
-	cancel_delayed_work(&tps->work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&tps->work);
 	debugfs_remove(tps->file);
 	kfree(tps);
 	the_tps = NULL;
@@ -720,7 +719,7 @@
 			&& test_and_set_bit(
 				FLAG_VBUS_CHANGED, &the_tps->flags)) {
 		/* gadget drivers call this in_irq() */
-		(void) schedule_work(&the_tps->work.work);
+		schedule_delayed_work(&the_tps->work, 0);
 	}
 	local_irq_restore(flags);
 
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b4931ab..627cf57 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -46,8 +46,6 @@
 
 /* device id */
 #define TPS6586X_VERSIONCRC	0xcd
-#define TPS658621A_VERSIONCRC	0x15
-#define TPS658621C_VERSIONCRC	0x2c
 
 struct tps6586x_irq_data {
 	u8	mask_reg;
@@ -325,37 +323,37 @@
 	return device_for_each_child(tps6586x->dev, NULL, __remove_subdev);
 }
 
-static void tps6586x_irq_lock(unsigned int irq)
+static void tps6586x_irq_lock(struct irq_data *data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&tps6586x->irq_lock);
 }
 
-static void tps6586x_irq_enable(unsigned int irq)
+static void tps6586x_irq_enable(struct irq_data *irq_data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
-	unsigned int __irq = irq - tps6586x->irq_base;
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
+	unsigned int __irq = irq_data->irq - tps6586x->irq_base;
 	const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
 
 	tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
 	tps6586x->irq_en |= (1 << __irq);
 }
 
-static void tps6586x_irq_disable(unsigned int irq)
+static void tps6586x_irq_disable(struct irq_data *irq_data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
 
-	unsigned int __irq = irq - tps6586x->irq_base;
+	unsigned int __irq = irq_data->irq - tps6586x->irq_base;
 	const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
 
 	tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
 	tps6586x->irq_en &= ~(1 << __irq);
 }
 
-static void tps6586x_irq_sync_unlock(unsigned int irq)
+static void tps6586x_irq_sync_unlock(struct irq_data *data)
 {
-	struct tps6586x *tps6586x = get_irq_chip_data(irq);
+	struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
@@ -421,10 +419,10 @@
 	tps6586x->irq_base = irq_base;
 
 	tps6586x->irq_chip.name = "tps6586x";
-	tps6586x->irq_chip.enable = tps6586x_irq_enable;
-	tps6586x->irq_chip.disable = tps6586x_irq_disable;
-	tps6586x->irq_chip.bus_lock = tps6586x_irq_lock;
-	tps6586x->irq_chip.bus_sync_unlock = tps6586x_irq_sync_unlock;
+	tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
+	tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
+	tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
+	tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
 
 	for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
 		int __irq = i + tps6586x->irq_base;
@@ -498,11 +496,7 @@
 		return -EIO;
 	}
 
-	if ((ret != TPS658621A_VERSIONCRC) &&
-	    (ret != TPS658621C_VERSIONCRC)) {
-		dev_err(&client->dev, "Unsupported chip ID: %x\n", ret);
-		return -ENODEV;
-	}
+	dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
 
 	tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
 	if (tps6586x == NULL)
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 35275ba..a35fa7d 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -95,7 +95,8 @@
 #define twl_has_rtc()	false
 #endif
 
-#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE)
+#if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE) ||\
+	defined(CONFIG_TWL6030_USB) || defined(CONFIG_TWL6030_USB_MODULE)
 #define twl_has_usb()	true
 #else
 #define twl_has_usb()	false
@@ -682,6 +683,43 @@
 			usb3v1.dev = child;
 		}
 	}
+	if (twl_has_usb() && pdata->usb && twl_class_is_6030()) {
+
+		static struct regulator_consumer_supply usb3v3 = {
+			.supply =	"vusb",
+		};
+
+		if (twl_has_regulator()) {
+			/* this is a template that gets copied */
+			struct regulator_init_data usb_fixed = {
+				.constraints.valid_modes_mask =
+					REGULATOR_MODE_NORMAL
+					| REGULATOR_MODE_STANDBY,
+				.constraints.valid_ops_mask =
+					REGULATOR_CHANGE_MODE
+					| REGULATOR_CHANGE_STATUS,
+			};
+
+			child = add_regulator_linked(TWL6030_REG_VUSB,
+						      &usb_fixed, &usb3v3, 1);
+			if (IS_ERR(child))
+				return PTR_ERR(child);
+		}
+
+		child = add_child(0, "twl6030_usb",
+			pdata->usb, sizeof(*pdata->usb),
+			true,
+			/* irq1 = VBUS_PRES, irq0 = USB ID */
+			pdata->irq_base + USBOTG_INTR_OFFSET,
+			pdata->irq_base + USB_PRES_INTR_OFFSET);
+
+		if (IS_ERR(child))
+			return PTR_ERR(child);
+		/* we need to connect regulators to this transceiver */
+		if (twl_has_regulator() && child)
+			usb3v3.dev = child;
+
+	}
 
 	if (twl_has_watchdog()) {
 		child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
@@ -815,10 +853,6 @@
 		if (IS_ERR(child))
 			return PTR_ERR(child);
 
-		child = add_regulator(TWL6030_REG_VUSB, pdata->vusb);
-		if (IS_ERR(child))
-			return PTR_ERR(child);
-
 		child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1);
 		if (IS_ERR(child))
 			return PTR_ERR(child);
@@ -969,7 +1003,7 @@
 }
 
 /* NOTE:  this driver only handles a single twl4030/tps659x0 chip */
-static int __init
+static int __devinit
 twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
 	int				status;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d3a147..63a30e8 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -599,38 +599,38 @@
  * completion, potentially including some re-ordering, of these requests.
  */
 
-static void twl4030_sih_mask(unsigned irq)
+static void twl4030_sih_mask(struct irq_data *data)
 {
-	struct sih_agent *sih = get_irq_chip_data(irq);
+	struct sih_agent *sih = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 
 	spin_lock_irqsave(&sih_agent_lock, flags);
-	sih->imr |= BIT(irq - sih->irq_base);
+	sih->imr |= BIT(data->irq - sih->irq_base);
 	sih->imr_change_pending = true;
 	queue_work(wq, &sih->mask_work);
 	spin_unlock_irqrestore(&sih_agent_lock, flags);
 }
 
-static void twl4030_sih_unmask(unsigned irq)
+static void twl4030_sih_unmask(struct irq_data *data)
 {
-	struct sih_agent *sih = get_irq_chip_data(irq);
+	struct sih_agent *sih = irq_data_get_irq_chip_data(data);
 	unsigned long flags;
 
 	spin_lock_irqsave(&sih_agent_lock, flags);
-	sih->imr &= ~BIT(irq - sih->irq_base);
+	sih->imr &= ~BIT(data->irq - sih->irq_base);
 	sih->imr_change_pending = true;
 	queue_work(wq, &sih->mask_work);
 	spin_unlock_irqrestore(&sih_agent_lock, flags);
 }
 
-static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
+static int twl4030_sih_set_type(struct irq_data *data, unsigned trigger)
 {
-	struct sih_agent *sih = get_irq_chip_data(irq);
-	struct irq_desc *desc = irq_to_desc(irq);
+	struct sih_agent *sih = irq_data_get_irq_chip_data(data);
+	struct irq_desc *desc = irq_to_desc(data->irq);
 	unsigned long flags;
 
 	if (!desc) {
-		pr_err("twl4030: Invalid IRQ: %d\n", irq);
+		pr_err("twl4030: Invalid IRQ: %d\n", data->irq);
 		return -EINVAL;
 	}
 
@@ -641,7 +641,7 @@
 	if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
 		desc->status &= ~IRQ_TYPE_SENSE_MASK;
 		desc->status |= trigger;
-		sih->edge_change |= BIT(irq - sih->irq_base);
+		sih->edge_change |= BIT(data->irq - sih->irq_base);
 		queue_work(wq, &sih->edge_work);
 	}
 	spin_unlock_irqrestore(&sih_agent_lock, flags);
@@ -650,9 +650,9 @@
 
 static struct irq_chip twl4030_sih_irq_chip = {
 	.name		= "twl4030",
-	.mask		= twl4030_sih_mask,
-	.unmask		= twl4030_sih_unmask,
-	.set_type	= twl4030_sih_set_type,
+	.irq_mask      	= twl4030_sih_mask,
+	.irq_unmask	= twl4030_sih_unmask,
+	.irq_set_type	= twl4030_sih_set_type,
 };
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index aaedb11..4082ed7 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -74,7 +74,7 @@
 	USBOTG_INTR_OFFSET,	/* Bit 16	ID_WKUP			*/
 	USBOTG_INTR_OFFSET,	/* Bit 17	VBUS_WKUP		*/
 	USBOTG_INTR_OFFSET,	/* Bit 18	ID			*/
-	USBOTG_INTR_OFFSET,	/* Bit 19	VBUS			*/
+	USB_PRES_INTR_OFFSET,	/* Bit 19	VBUS			*/
 	CHARGER_INTR_OFFSET,	/* Bit 20	CHRG_CTRL		*/
 	CHARGER_INTR_OFFSET,	/* Bit 21	EXT_CHRG		*/
 	CHARGER_INTR_OFFSET,	/* Bit 22	INT_CHRG		*/
@@ -128,6 +128,13 @@
 
 		sts.bytes[3] = 0; /* Only 24 bits are valid*/
 
+		/*
+		 * Since VBUS status bit is not reliable for VBUS disconnect
+		 * use CHARGER VBUS detection status bit instead.
+		 */
+		if (sts.bytes[2] & 0x10)
+			sts.bytes[2] |= 0x08;
+
 		for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
 			local_irq_disable();
 			if (sts.int_sts & 0x1) {
@@ -325,7 +332,7 @@
 	 */
 	twl6030_irq_chip = dummy_irq_chip;
 	twl6030_irq_chip.name = "twl6030";
-	twl6030_irq_chip.set_type = NULL;
+	twl6030_irq_chip.irq_set_type = NULL;
 
 	for (i = irq_base; i < irq_end; i++) {
 		set_irq_chip_and_handler(i, &twl6030_irq_chip,
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index ebb0597..348052a 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -112,7 +112,7 @@
 	return ret;
 }
 
-static void vx855_remove(struct pci_dev *pdev)
+static void __devexit vx855_remove(struct pci_dev *pdev)
 {
 	mfd_remove_devices(&pdev->dev);
 	pci_disable_device(pdev);
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 76cadcf..3fe9a58 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1541,6 +1541,12 @@
 		dev_info(wm831x->dev, "WM8325 revision %c\n", 'A' + rev);
 		break;
 
+	case WM8326:
+		parent = WM8326;
+		wm831x->num_gpio = 12;
+		dev_info(wm831x->dev, "WM8326 revision %c\n", 'A' + rev);
+		break;
+
 	default:
 		dev_err(wm831x->dev, "Unknown WM831x device %04x\n", ret);
 		ret = -EINVAL;
@@ -1610,18 +1616,9 @@
 		break;
 
 	case WM8320:
-		ret = mfd_add_devices(wm831x->dev, -1,
-				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
-				      NULL, 0);
-		break;
-
 	case WM8321:
-		ret = mfd_add_devices(wm831x->dev, -1,
-				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
-				      NULL, 0);
-		break;
-
 	case WM8325:
+	case WM8326:
 		ret = mfd_add_devices(wm831x->dev, -1,
 				      wm8320_devs, ARRAY_SIZE(wm8320_devs),
 				      NULL, wm831x->irq_base);
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 156b198..3853fa8 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -94,9 +94,9 @@
 	return 0;
 }
 
-static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
+static int wm831x_i2c_suspend(struct device *dev)
 {
-	struct wm831x *wm831x = i2c_get_clientdata(i2c);
+	struct wm831x *wm831x = dev_get_drvdata(dev);
 
 	return wm831x_device_suspend(wm831x);
 }
@@ -108,19 +108,23 @@
 	{ "wm8320", WM8320 },
 	{ "wm8321", WM8321 },
 	{ "wm8325", WM8325 },
+	{ "wm8326", WM8326 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
 
+static const struct dev_pm_ops wm831x_pm_ops = {
+	.suspend = wm831x_i2c_suspend,
+};
 
 static struct i2c_driver wm831x_i2c_driver = {
 	.driver = {
-		   .name = "wm831x",
-		   .owner = THIS_MODULE,
+		.name = "wm831x",
+		.owner = THIS_MODULE,
+		.pm = &wm831x_pm_ops,
 	},
 	.probe = wm831x_i2c_probe,
 	.remove = wm831x_i2c_remove,
-	.suspend = wm831x_i2c_suspend,
 	.id_table = wm831x_i2c_id,
 };
 
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 294183b..f7192d4 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -345,16 +345,16 @@
 	return &wm831x_irqs[irq - wm831x->irq_base];
 }
 
-static void wm831x_irq_lock(unsigned int irq)
+static void wm831x_irq_lock(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&wm831x->irq_lock);
 }
 
-static void wm831x_irq_sync_unlock(unsigned int irq)
+static void wm831x_irq_sync_unlock(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
@@ -371,28 +371,30 @@
 	mutex_unlock(&wm831x->irq_lock);
 }
 
-static void wm831x_irq_unmask(unsigned int irq)
+static void wm831x_irq_unmask(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
-	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
+							     data->irq);
 
 	wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
 
-static void wm831x_irq_mask(unsigned int irq)
+static void wm831x_irq_mask(struct irq_data *data)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
-	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+	struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
+							     data->irq);
 
 	wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
 
-static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
+static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
 {
-	struct wm831x *wm831x = get_irq_chip_data(irq);
-	int val;
+	struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
+	int val, irq;
 
-	irq = irq - wm831x->irq_base;
+	irq = data->irq - wm831x->irq_base;
 
 	if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
 		/* Ignore internal-only IRQs */
@@ -421,12 +423,12 @@
 }
 
 static struct irq_chip wm831x_irq_chip = {
-	.name = "wm831x",
-	.bus_lock = wm831x_irq_lock,
-	.bus_sync_unlock = wm831x_irq_sync_unlock,
-	.mask = wm831x_irq_mask,
-	.unmask = wm831x_irq_unmask,
-	.set_type = wm831x_irq_set_type,
+	.name			= "wm831x",
+	.irq_bus_lock		= wm831x_irq_lock,
+	.irq_bus_sync_unlock	= wm831x_irq_sync_unlock,
+	.irq_mask		= wm831x_irq_mask,
+	.irq_unmask		= wm831x_irq_unmask,
+	.irq_set_type		= wm831x_irq_set_type,
 };
 
 /* The processing of the primary interrupt occurs in a thread so that
@@ -515,6 +517,17 @@
 		return 0;
 	}
 
+	/* Try to flag /IRQ as a wake source; there are a number of
+	 * unconditional wake sources in the PMIC so this isn't
+	 * conditional but we don't actually care *too* much if it
+	 * fails.
+	 */
+	ret = enable_irq_wake(irq);
+	if (ret != 0) {
+		dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
+			 ret);
+	}
+
 	wm831x->irq = irq;
 	wm831x->irq_base = pdata->irq_base;
 
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 2789b15..0a8f772 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -81,6 +81,8 @@
 		type = WM8321;
 	else if (strcmp(spi->modalias, "wm8325") == 0)
 		type = WM8325;
+	else if (strcmp(spi->modalias, "wm8326") == 0)
+		type = WM8326;
 	else {
 		dev_err(&spi->dev, "Unknown device type\n");
 		return -EINVAL;
@@ -184,6 +186,17 @@
 	.suspend	= wm831x_spi_suspend,
 };
 
+static struct spi_driver wm8326_spi_driver = {
+	.driver = {
+		.name	= "wm8326",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= wm831x_spi_probe,
+	.remove		= __devexit_p(wm831x_spi_remove),
+	.suspend	= wm831x_spi_suspend,
+};
+
 static int __init wm831x_spi_init(void)
 {
 	int ret;
@@ -212,12 +225,17 @@
 	if (ret != 0)
 		pr_err("Failed to register WM8325 SPI driver: %d\n", ret);
 
+	ret = spi_register_driver(&wm8326_spi_driver);
+	if (ret != 0)
+		pr_err("Failed to register WM8326 SPI driver: %d\n", ret);
+
 	return 0;
 }
 subsys_initcall(wm831x_spi_init);
 
 static void __exit wm831x_spi_exit(void)
 {
+	spi_unregister_driver(&wm8326_spi_driver);
 	spi_unregister_driver(&wm8325_spi_driver);
 	spi_unregister_driver(&wm8321_spi_driver);
 	spi_unregister_driver(&wm8320_spi_driver);
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index f56c9ad..5839966 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -417,16 +417,16 @@
 	return IRQ_HANDLED;
 }
 
-static void wm8350_irq_lock(unsigned int irq)
+static void wm8350_irq_lock(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&wm8350->irq_lock);
 }
 
-static void wm8350_irq_sync_unlock(unsigned int irq)
+static void wm8350_irq_sync_unlock(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) {
@@ -442,28 +442,30 @@
 	mutex_unlock(&wm8350->irq_lock);
 }
 
-static void wm8350_irq_enable(unsigned int irq)
+static void wm8350_irq_enable(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
-	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
+	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
+							     data->irq);
 
 	wm8350->irq_masks[irq_data->reg] &= ~irq_data->mask;
 }
 
-static void wm8350_irq_disable(unsigned int irq)
+static void wm8350_irq_disable(struct irq_data *data)
 {
-	struct wm8350 *wm8350 = get_irq_chip_data(irq);
-	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+	struct wm8350 *wm8350 = irq_data_get_irq_chip_data(data);
+	struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350,
+							     data->irq);
 
 	wm8350->irq_masks[irq_data->reg] |= irq_data->mask;
 }
 
 static struct irq_chip wm8350_irq_chip = {
-	.name = "wm8350",
-	.bus_lock = wm8350_irq_lock,
-	.bus_sync_unlock = wm8350_irq_sync_unlock,
-	.disable = wm8350_irq_disable,
-	.enable = wm8350_irq_enable,
+	.name			= "wm8350",
+	.irq_bus_lock		= wm8350_irq_lock,
+	.irq_bus_sync_unlock	= wm8350_irq_sync_unlock,
+	.irq_disable		= wm8350_irq_disable,
+	.irq_enable		= wm8350_irq_enable,
 };
 
 int wm8350_irq_init(struct wm8350 *wm8350, int irq,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index b3b2aaf..41233c7 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -18,6 +18,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/machine.h>
 
@@ -169,8 +170,16 @@
 EXPORT_SYMBOL_GPL(wm8994_set_bits);
 
 static struct mfd_cell wm8994_regulator_devs[] = {
-	{ .name = "wm8994-ldo", .id = 1 },
-	{ .name = "wm8994-ldo", .id = 2 },
+	{
+		.name = "wm8994-ldo",
+		.id = 1,
+		.pm_runtime_no_callbacks = true,
+	},
+	{
+		.name = "wm8994-ldo",
+		.id = 2,
+		.pm_runtime_no_callbacks = true,
+	},
 };
 
 static struct resource wm8994_codec_resources[] = {
@@ -200,6 +209,7 @@
 		.name = "wm8994-gpio",
 		.num_resources = ARRAY_SIZE(wm8994_gpio_resources),
 		.resources = wm8994_gpio_resources,
+		.pm_runtime_no_callbacks = true,
 	},
 };
 
@@ -218,8 +228,20 @@
 	"SPKVDD2",
 };
 
+static const char *wm8958_main_supplies[] = {
+	"DBVDD1",
+	"DBVDD2",
+	"DBVDD3",
+	"DCVDD",
+	"AVDD1",
+	"AVDD2",
+	"CPVDD",
+	"SPKVDD1",
+	"SPKVDD2",
+};
+
 #ifdef CONFIG_PM
-static int wm8994_device_suspend(struct device *dev)
+static int wm8994_suspend(struct device *dev)
 {
 	struct wm8994 *wm8994 = dev_get_drvdata(dev);
 	int ret;
@@ -239,7 +261,7 @@
 	if (ret < 0)
 		dev_err(dev, "Failed to save LDO registers: %d\n", ret);
 
-	ret = regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	ret = regulator_bulk_disable(wm8994->num_supplies,
 				     wm8994->supplies);
 	if (ret != 0) {
 		dev_err(dev, "Failed to disable supplies: %d\n", ret);
@@ -249,12 +271,12 @@
 	return 0;
 }
 
-static int wm8994_device_resume(struct device *dev)
+static int wm8994_resume(struct device *dev)
 {
 	struct wm8994 *wm8994 = dev_get_drvdata(dev);
 	int ret;
 
-	ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+	ret = regulator_bulk_enable(wm8994->num_supplies,
 				    wm8994->supplies);
 	if (ret != 0) {
 		dev_err(dev, "Failed to enable supplies: %d\n", ret);
@@ -305,9 +327,10 @@
 /*
  * Instantiate the generic non-control parts of the device.
  */
-static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
+static int wm8994_device_init(struct wm8994 *wm8994, int irq)
 {
 	struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+	const char *devname;
 	int ret, i;
 
 	mutex_init(&wm8994->io_lock);
@@ -323,25 +346,48 @@
 		goto err;
 	}
 
+	switch (wm8994->type) {
+	case WM8994:
+		wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies);
+		break;
+	case WM8958:
+		wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies);
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+
 	wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
-				   ARRAY_SIZE(wm8994_main_supplies),
+				   wm8994->num_supplies,
 				   GFP_KERNEL);
 	if (!wm8994->supplies) {
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
-		wm8994->supplies[i].supply = wm8994_main_supplies[i];
-
-	ret = regulator_bulk_get(wm8994->dev, ARRAY_SIZE(wm8994_main_supplies),
+	switch (wm8994->type) {
+	case WM8994:
+		for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
+			wm8994->supplies[i].supply = wm8994_main_supplies[i];
+		break;
+	case WM8958:
+		for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++)
+			wm8994->supplies[i].supply = wm8958_main_supplies[i];
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+		
+	ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
 				 wm8994->supplies);
 	if (ret != 0) {
 		dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
 		goto err_supplies;
 	}
 
-	ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+	ret = regulator_bulk_enable(wm8994->num_supplies,
 				    wm8994->supplies);
 	if (ret != 0) {
 		dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
@@ -353,7 +399,22 @@
 		dev_err(wm8994->dev, "Failed to read ID register\n");
 		goto err_enable;
 	}
-	if (ret != 0x8994) {
+	switch (ret) {
+	case 0x8994:
+		devname = "WM8994";
+		if (wm8994->type != WM8994)
+			dev_warn(wm8994->dev, "Device registered as type %d\n",
+				 wm8994->type);
+		wm8994->type = WM8994;
+		break;
+	case 0x8958:
+		devname = "WM8958";
+		if (wm8994->type != WM8958)
+			dev_warn(wm8994->dev, "Device registered as type %d\n",
+				 wm8994->type);
+		wm8994->type = WM8958;
+		break;
+	default:
 		dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n",
 			ret);
 		ret = -EINVAL;
@@ -370,14 +431,16 @@
 	switch (ret) {
 	case 0:
 	case 1:
-		dev_warn(wm8994->dev, "revision %c not fully supported\n",
-			'A' + ret);
+		if (wm8994->type == WM8994)
+			dev_warn(wm8994->dev,
+				 "revision %c not fully supported\n",
+				 'A' + ret);
 		break;
 	default:
-		dev_info(wm8994->dev, "revision %c\n", 'A' + ret);
 		break;
 	}
 
+	dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret);
 
 	if (pdata) {
 		wm8994->irq_base = pdata->irq_base;
@@ -418,15 +481,18 @@
 		goto err_irq;
 	}
 
+	pm_runtime_enable(wm8994->dev);
+	pm_runtime_resume(wm8994->dev);
+
 	return 0;
 
 err_irq:
 	wm8994_irq_exit(wm8994);
 err_enable:
-	regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	regulator_bulk_disable(wm8994->num_supplies,
 			       wm8994->supplies);
 err_get:
-	regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+	regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
 err_supplies:
 	kfree(wm8994->supplies);
 err:
@@ -437,11 +503,12 @@
 
 static void wm8994_device_exit(struct wm8994 *wm8994)
 {
+	pm_runtime_disable(wm8994->dev);
 	mfd_remove_devices(wm8994->dev);
 	wm8994_irq_exit(wm8994);
-	regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	regulator_bulk_disable(wm8994->num_supplies,
 			       wm8994->supplies);
-	regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+	regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
 	kfree(wm8994->supplies);
 	kfree(wm8994);
 }
@@ -506,8 +573,9 @@
 	wm8994->read_dev = wm8994_i2c_read_device;
 	wm8994->write_dev = wm8994_i2c_write_device;
 	wm8994->irq = i2c->irq;
+	wm8994->type = id->driver_data;
 
-	return wm8994_device_init(wm8994, id->driver_data, i2c->irq);
+	return wm8994_device_init(wm8994, i2c->irq);
 }
 
 static int wm8994_i2c_remove(struct i2c_client *i2c)
@@ -519,36 +587,23 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int wm8994_i2c_suspend(struct i2c_client *i2c, pm_message_t state)
-{
-	return wm8994_device_suspend(&i2c->dev);
-}
-
-static int wm8994_i2c_resume(struct i2c_client *i2c)
-{
-	return wm8994_device_resume(&i2c->dev);
-}
-#else
-#define wm8994_i2c_suspend NULL
-#define wm8994_i2c_resume NULL
-#endif
-
 static const struct i2c_device_id wm8994_i2c_id[] = {
-	{ "wm8994", 0 },
+	{ "wm8994", WM8994 },
+	{ "wm8958", WM8958 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
 
+UNIVERSAL_DEV_PM_OPS(wm8994_pm_ops, wm8994_suspend, wm8994_resume, NULL);
+
 static struct i2c_driver wm8994_i2c_driver = {
 	.driver = {
-		   .name = "wm8994",
-		   .owner = THIS_MODULE,
+		.name = "wm8994",
+		.owner = THIS_MODULE,
+		.pm = &wm8994_pm_ops,
 	},
 	.probe = wm8994_i2c_probe,
 	.remove = wm8994_i2c_remove,
-	.suspend = wm8994_i2c_suspend,
-	.resume = wm8994_i2c_resume,
 	.id_table = wm8994_i2c_id,
 };
 
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 8400eb1..29e8faf 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -156,16 +156,16 @@
 	return &wm8994_irqs[irq - wm8994->irq_base];
 }
 
-static void wm8994_irq_lock(unsigned int irq)
+static void wm8994_irq_lock(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&wm8994->irq_lock);
 }
 
-static void wm8994_irq_sync_unlock(unsigned int irq)
+static void wm8994_irq_sync_unlock(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
@@ -182,28 +182,30 @@
 	mutex_unlock(&wm8994->irq_lock);
 }
 
-static void wm8994_irq_unmask(unsigned int irq)
+static void wm8994_irq_unmask(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
-	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
+	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
+							     data->irq);
 
 	wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
 
-static void wm8994_irq_mask(unsigned int irq)
+static void wm8994_irq_mask(struct irq_data *data)
 {
-	struct wm8994 *wm8994 = get_irq_chip_data(irq);
-	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994, irq);
+	struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
+	struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
+							     data->irq);
 
 	wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
 
 static struct irq_chip wm8994_irq_chip = {
-	.name = "wm8994",
-	.bus_lock = wm8994_irq_lock,
-	.bus_sync_unlock = wm8994_irq_sync_unlock,
-	.mask = wm8994_irq_mask,
-	.unmask = wm8994_irq_unmask,
+	.name			= "wm8994",
+	.irq_bus_lock		= wm8994_irq_lock,
+	.irq_bus_sync_unlock	= wm8994_irq_sync_unlock,
+	.irq_mask		= wm8994_irq_mask,
+	.irq_unmask		= wm8994_irq_unmask,
 };
 
 /* The processing of the primary interrupt occurs in a thread so that
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4d073f1..cc8e49d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -64,7 +64,7 @@
 
 config AB8500_PWM
 	bool "AB8500 PWM support"
-	depends on AB8500_CORE
+	depends on AB8500_CORE && ARCH_U8500
 	select HAVE_PWM
 	help
 	  This driver exports functions to enable/disble/config/free Pulse
@@ -402,7 +402,7 @@
 	  DAC7512 16-bit digital-to-analog converter.
 
 	  This driver can also be built as a module. If so, the module
-	  will be calles ti_dac7512.
+	  will be called ti_dac7512.
 
 config VMWARE_BALLOON
 	tristate "VMware Balloon Driver"
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index 9e3879e..fe8616a 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -313,7 +313,7 @@
 	INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work);
 	schedule_delayed_work(&lcd->init_work, 0);
 
-	dev_info(&pdev->dev, "initalized ARM character LCD at %08x\n",
+	dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n",
 		lcd->phybase);
 
 	return 0;
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index 6f62180..d02d302 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -16,12 +16,11 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
-#include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/cs5535.h>
 #include <linux/slab.h>
 
 #define DRV_NAME "cs5535-mfgpt"
-#define MFGPT_BAR 2
 
 static int mfgpt_reset_timers;
 module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
@@ -37,7 +36,7 @@
 	DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
 	resource_size_t base;
 
-	struct pci_dev *pdev;
+	struct platform_device *pdev;
 	spinlock_t lock;
 	int initialized;
 } cs5535_mfgpt_chip;
@@ -290,10 +289,10 @@
 	return timers;
 }
 
-static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
-		const struct pci_device_id *pci_id)
+static int __devinit cs5535_mfgpt_probe(struct platform_device *pdev)
 {
-	int err, t;
+	struct resource *res;
+	int err = -EIO, t;
 
 	/* There are two ways to get the MFGPT base address; one is by
 	 * fetching it from MSR_LBAR_MFGPT, the other is by reading the
@@ -302,29 +301,27 @@
 	 * it turns out to be unreliable in the face of crappy BIOSes, we
 	 * can always go back to using MSRs.. */
 
-	err = pci_enable_device_io(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "can't enable device IO\n");
+	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "can't fetch device resource info\n");
 		goto done;
 	}
 
-	err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME);
-	if (err) {
-		dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
+	if (!request_region(res->start, resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "can't request region\n");
 		goto done;
 	}
 
 	/* set up the driver-specific struct */
-	cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR);
+	cs5535_mfgpt_chip.base = res->start;
 	cs5535_mfgpt_chip.pdev = pdev;
 	spin_lock_init(&cs5535_mfgpt_chip.lock);
 
-	dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR,
-			(unsigned long long) cs5535_mfgpt_chip.base);
+	dev_info(&pdev->dev, "reserved resource region %pR\n", res);
 
 	/* detect the available timers */
 	t = scan_timers(&cs5535_mfgpt_chip);
-	dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t);
+	dev_info(&pdev->dev, "%d MFGPT timers available\n", t);
 	cs5535_mfgpt_chip.initialized = 1;
 	return 0;
 
@@ -332,47 +329,18 @@
 	return err;
 }
 
-static struct pci_device_id cs5535_mfgpt_pci_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
-	{ 0, },
+static struct platform_driver cs5535_mfgpt_drv = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = cs5535_mfgpt_probe,
 };
-MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
 
-/*
- * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
- * registration stuff.  It only allows only one driver to bind to each PCI
- * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
- * device.  Instead, we manually scan for the PCI device, request a single
- * region, and keep track of the devices that we're using.
- */
-
-static int __init cs5535_mfgpt_scan_pci(void)
-{
-	struct pci_dev *pdev;
-	int err = -ENODEV;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
-		pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
-				cs5535_mfgpt_pci_tbl[i].device, NULL);
-		if (pdev) {
-			err = cs5535_mfgpt_probe(pdev,
-					&cs5535_mfgpt_pci_tbl[i]);
-			if (err)
-				pci_dev_put(pdev);
-
-			/* we only support a single CS5535/6 southbridge */
-			break;
-		}
-	}
-
-	return err;
-}
 
 static int __init cs5535_mfgpt_init(void)
 {
-	return cs5535_mfgpt_scan_pci();
+	return platform_driver_register(&cs5535_mfgpt_drv);
 }
 
 module_init(cs5535_mfgpt_init);
@@ -380,3 +348,4 @@
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 559b0b3..ab1ad41 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -20,6 +20,7 @@
 #include <linux/log2.h>
 #include <linux/bitops.h>
 #include <linux/jiffies.h>
+#include <linux/of.h>
 #include <linux/i2c.h>
 #include <linux/i2c/at24.h>
 
@@ -457,6 +458,27 @@
 
 /*-------------------------------------------------------------------------*/
 
+#ifdef CONFIG_OF
+static void at24_get_ofdata(struct i2c_client *client,
+		struct at24_platform_data *chip)
+{
+	const __be32 *val;
+	struct device_node *node = client->dev.of_node;
+
+	if (node) {
+		if (of_get_property(node, "read-only", NULL))
+			chip->flags |= AT24_FLAG_READONLY;
+		val = of_get_property(node, "pagesize", NULL);
+		if (val)
+			chip->page_size = be32_to_cpup(val);
+	}
+}
+#else
+static void at24_get_ofdata(struct i2c_client *client,
+		struct at24_platform_data *chip)
+{ }
+#endif /* CONFIG_OF */
+
 static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
 	struct at24_platform_data chip;
@@ -485,6 +507,9 @@
 		 */
 		chip.page_size = 1;
 
+		/* update chipdata if OF is present */
+		at24_get_ofdata(client, &chip);
+
 		chip.setup = NULL;
 		chip.context = NULL;
 	}
@@ -492,6 +517,11 @@
 	if (!is_power_of_2(chip.byte_len))
 		dev_warn(&client->dev,
 			"byte_len looks suspicious (no power of 2)!\n");
+	if (!chip.page_size) {
+		dev_err(&client->dev, "page_size must not be 0!\n");
+		err = -EINVAL;
+		goto err_out;
+	}
 	if (!is_power_of_2(chip.page_size))
 		dev_warn(&client->dev,
 			"page_size looks suspicious (no power of 2)!\n");
@@ -597,19 +627,15 @@
 
 	i2c_set_clientdata(client, at24);
 
-	dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
+	dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
 		at24->bin.size, client->name,
-		writable ? "(writable)" : "(read-only)");
+		writable ? "writable" : "read-only", at24->write_max);
 	if (use_smbus == I2C_SMBUS_WORD_DATA ||
 	    use_smbus == I2C_SMBUS_BYTE_DATA) {
 		dev_notice(&client->dev, "Falling back to %s reads, "
 			   "performance will suffer\n", use_smbus ==
 			   I2C_SMBUS_WORD_DATA ? "word" : "byte");
 	}
-	dev_dbg(&client->dev,
-		"page_size %d, num_addresses %d, write_max %d, use_smbus %d\n",
-		chip.page_size, num_addresses,
-		at24->write_max, use_smbus);
 
 	/* export data to kernel code */
 	if (chip.setup)
@@ -660,6 +686,11 @@
 
 static int __init at24_init(void)
 {
+	if (!io_limit) {
+		pr_err("at24: io_limit must not be 0!\n");
+		return -EINVAL;
+	}
+
 	io_limit = rounddown_pow_of_two(io_limit);
 	return i2c_add_driver(&at24_driver);
 }
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 1932066..668d41e 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -273,13 +273,11 @@
 static void __devinit
 ioc4_load_modules(struct work_struct *work)
 {
-	/* arg just has to be freed */
-
 	request_module("sgiioc4");
-
-	kfree(work);
 }
 
+static DECLARE_WORK(ioc4_load_modules_work, ioc4_load_modules);
+
 /* Adds a new instance of an IOC4 card */
 static int __devinit
 ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
@@ -396,21 +394,12 @@
 	 * PCI device.
 	 */
 	if (idd->idd_variant != IOC4_VARIANT_PCI_RT) {
-		struct work_struct *work;
-		work = kzalloc(sizeof(struct work_struct), GFP_KERNEL);
-		if (!work) {
-			printk(KERN_WARNING
-			       "%s: IOC4 unable to allocate memory for "
-			       "load of sub-modules.\n", __func__);
-		} else {
-			/* Request the module from a work procedure as the
-			 * modprobe goes out to a userland helper and that
-			 * will hang if done directly from ioc4_probe().
-			 */
-			printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
-			INIT_WORK(work, ioc4_load_modules);
-			schedule_work(work);
-		}
+		/* Request the module from a work procedure as the modprobe
+		 * goes out to a userland helper and that will hang if done
+		 * directly from ioc4_probe().
+		 */
+		printk(KERN_INFO "IOC4 loading sgiioc4 submodule\n");
+		schedule_work(&ioc4_load_modules_work);
 	}
 
 	return 0;
@@ -498,7 +487,7 @@
 ioc4_exit(void)
 {
 	/* Ensure ioc4_load_modules() has completed before exiting */
-	flush_scheduled_work();
+	flush_work_sync(&ioc4_load_modules_work);
 	pci_unregister_driver(&ioc4_driver);
 }
 
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804..4d2ea8e 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.1-k");
+MODULE_VERSION("1.2.1.2-k");
 MODULE_ALIAS("dmi:*:svnVMware*:*");
 MODULE_ALIAS("vmware_vmmemctl");
 MODULE_LICENSE("GPL");
@@ -315,7 +315,8 @@
  * fear that guest will need it. Host may reject some pages, we need to
  * check the return value and maybe submit a different page.
  */
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+				     unsigned int *hv_status)
 {
 	unsigned long status, dummy;
 	u32 pfn32;
@@ -326,7 +327,7 @@
 
 	STATS_INC(b->stats.lock);
 
-	status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+	*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
 	if (vmballoon_check_status(b, status))
 		return true;
 
@@ -410,6 +411,7 @@
 {
 	struct page *page;
 	gfp_t flags;
+	unsigned int hv_status;
 	bool locked = false;
 
 	do {
@@ -429,11 +431,12 @@
 		}
 
 		/* inform monitor */
-		locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+		locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
 		if (!locked) {
 			STATS_INC(b->stats.refused_alloc);
 
-			if (b->reset_required) {
+			if (hv_status == VMW_BALLOON_ERROR_RESET ||
+			    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
 				__free_page(page);
 				return -EIO;
 			}
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416..2a876c4 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@
 
 config MMC_BLOCK_MINORS
 	int "Number of minors per block device"
+	depends on MMC_BLOCK
 	range 4 256
 	default 8
 	help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 217f820..bfc8a8a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -257,7 +257,7 @@
 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err)
-		printk(KERN_ERR "%s: error %d sending status comand",
+		printk(KERN_ERR "%s: error %d sending status command",
 		       req->rq_disk->disk_name, err);
 	return cmd.resp[0];
 }
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd..ef10387 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@
 
 	  This option sets a default which can be overridden by the
 	  module parameter "removable=0" or "removable=1".
+
+config MMC_CLKGATE
+	bool "MMC host clock gating (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  This will attempt to aggressively gate the clock to the MMC card.
+	  This is done to save power due to gating off the logic and bus
+	  noise when the MMC card is not in use. Your host driver has to
+	  support handling this in order for it to be of any use.
+
+	  If unsure, say N.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a..63667a8 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,14 +303,14 @@
 			type, card->rca);
 	}
 
-	ret = device_add(&card->dev);
-	if (ret)
-		return ret;
-
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_card_debugfs(card);
 #endif
 
+	ret = device_add(&card->dev);
+	if (ret)
+		return ret;
+
 	mmc_card_set_present(card);
 
 	return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 57dcf8f..6625c05 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
 #include <linux/scatterlist.h>
 #include <linux/log2.h>
 #include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -130,6 +131,8 @@
 
 		if (mrq->done)
 			mrq->done(mrq);
+
+		mmc_host_clk_gate(host);
 	}
 }
 
@@ -190,6 +193,7 @@
 			mrq->stop->mrq = mrq;
 		}
 	}
+	mmc_host_clk_ungate(host);
 	host->ops->request(host, mrq);
 }
 
@@ -295,8 +299,9 @@
 		unsigned int timeout_us, limit_us;
 
 		timeout_us = data->timeout_ns / 1000;
-		timeout_us += data->timeout_clks * 1000 /
-			(card->host->ios.clock / 1000);
+		if (mmc_host_clk_rate(card->host))
+			timeout_us += data->timeout_clks * 1000 /
+				(mmc_host_clk_rate(card->host) / 1000);
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
@@ -614,6 +619,8 @@
 		 ios->power_mode, ios->chip_select, ios->vdd,
 		 ios->bus_width, ios->timing);
 
+	if (ios->clock > 0)
+		mmc_set_ungated(host);
 	host->ops->set_ios(host, ios);
 }
 
@@ -641,6 +648,61 @@
 	mmc_set_ios(host);
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_old = host->ios.clock;
+	host->ios.clock = 0;
+	host->clk_gated = true;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+	/*
+	 * We should previously have gated the clock, so the clock shall
+	 * be 0 here! The clock may however be 0 during initialization,
+	 * when some request operations are performed before setting
+	 * the frequency. When ungate is requested in that situation
+	 * we just ignore the call.
+	 */
+	if (host->clk_old) {
+		BUG_ON(host->ios.clock);
+		/* This call will also set host->clk_gated to false */
+		mmc_set_clock(host, host->clk_old);
+	}
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/*
+	 * We've been given a new frequency while the clock is gated,
+	 * so make sure we regard this as ungating it.
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_gated = false;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
 /*
  * Change the bus mode (open drain/push-pull) of a host.
  */
@@ -1424,35 +1486,57 @@
 }
 EXPORT_SYMBOL(mmc_set_blocklen);
 
+static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+{
+	host->f_init = freq;
+
+#ifdef CONFIG_MMC_DEBUG
+	pr_info("%s: %s: trying to init card at %u Hz\n",
+		mmc_hostname(host), __func__, host->f_init);
+#endif
+	mmc_power_up(host);
+	sdio_reset(host);
+	mmc_go_idle(host);
+
+	mmc_send_if_cond(host, host->ocr_avail);
+
+	/* Order's important: probe SDIO, then SD, then MMC */
+	if (!mmc_attach_sdio(host))
+		return 0;
+	if (!mmc_attach_sd(host))
+		return 0;
+	if (!mmc_attach_mmc(host))
+		return 0;
+
+	mmc_power_off(host);
+	return -EIO;
+}
+
 void mmc_rescan(struct work_struct *work)
 {
+	static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
-	u32 ocr;
-	int err;
-	unsigned long flags;
 	int i;
-	const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 
-	spin_lock_irqsave(&host->lock, flags);
-
-	if (host->rescan_disable) {
-		spin_unlock_irqrestore(&host->lock, flags);
+	if (host->rescan_disable)
 		return;
-	}
-
-	spin_unlock_irqrestore(&host->lock, flags);
-
 
 	mmc_bus_get(host);
 
-	/* if there is a card registered, check whether it is still present */
-	if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
+	/*
+	 * if there is a _removable_ card registered, check whether it is
+	 * still present
+	 */
+	if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
+	    && mmc_card_is_removable(host))
 		host->bus_ops->detect(host);
 
+	/*
+	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
+	 * the card is no longer present.
+	 */
 	mmc_bus_put(host);
-
-
 	mmc_bus_get(host);
 
 	/* if there still is a card present, stop here */
@@ -1461,8 +1545,6 @@
 		goto out;
 	}
 
-	/* detect a newly inserted card */
-
 	/*
 	 * Only we can add a new handler, so it's safe to
 	 * release the lock here.
@@ -1472,72 +1554,16 @@
 	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
 		goto out;
 
+	mmc_claim_host(host);
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-		mmc_claim_host(host);
-
-		if (freqs[i] >= host->f_min)
-			host->f_init = freqs[i];
-		else if (!i || freqs[i-1] > host->f_min)
-			host->f_init = host->f_min;
-		else {
-			mmc_release_host(host);
-			goto out;
-		}
-#ifdef CONFIG_MMC_DEBUG
-		pr_info("%s: %s: trying to init card at %u Hz\n",
-			mmc_hostname(host), __func__, host->f_init);
-#endif
-		mmc_power_up(host);
-		sdio_reset(host);
-		mmc_go_idle(host);
-
-		mmc_send_if_cond(host, host->ocr_avail);
-
-		/*
-		 * First we search for SDIO...
-		 */
-		err = mmc_send_io_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_sdio(host, ocr)) {
-				mmc_claim_host(host);
-				/*
-				 * Try SDMEM (but not MMC) even if SDIO
-				 * is broken.
-				 */
-				if (mmc_send_app_op_cond(host, 0, &ocr))
-					goto out_fail;
-
-				if (mmc_attach_sd(host, ocr))
-					mmc_power_off(host);
-			}
-			goto out;
-		}
-
-		/*
-		 * ...then normal SD...
-		 */
-		err = mmc_send_app_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_sd(host, ocr))
-				mmc_power_off(host);
-			goto out;
-		}
-
-		/*
-		 * ...and finally MMC.
-		 */
-		err = mmc_send_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_mmc(host, ocr))
-				mmc_power_off(host);
-			goto out;
-		}
-
-out_fail:
-		mmc_release_host(host);
-		mmc_power_off(host);
+		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+			break;
+		if (freqs[i] < host->f_min)
+			break;
 	}
-out:
+	mmc_release_host(host);
+
+ out:
 	if (host->caps & MMC_CAP_NEEDS_POLL)
 		mmc_schedule_delayed_work(&host->detect, HZ);
 }
@@ -1721,6 +1747,18 @@
 		if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
 			mmc_power_up(host);
 			mmc_select_voltage(host, host->ocr);
+			/*
+			 * Tell runtime PM core we just powered up the card,
+			 * since it still believes the card is powered off.
+			 * Note that currently runtime PM is only enabled
+			 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
+			 */
+			if (mmc_card_sdio(host->card) &&
+			    (host->caps & MMC_CAP_POWER_OFF_CARD)) {
+				pm_runtime_disable(&host->card->dev);
+				pm_runtime_set_active(&host->card->dev);
+				pm_runtime_enable(&host->card->dev);
+			}
 		}
 		BUG_ON(!host->bus_ops->resume);
 		err = host->bus_ops->resume(host);
@@ -1790,7 +1828,7 @@
 {
 	int ret;
 
-	workqueue = create_singlethread_workqueue("kmmcd");
+	workqueue = alloc_ordered_workqueue("kmmcd", 0);
 	if (!workqueue)
 		return -ENOMEM;
 
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd..ca1fdde 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,9 @@
 void mmc_start_host(struct mmc_host *host);
 void mmc_stop_host(struct mmc_host *host);
 
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
-int mmc_attach_sd(struct mmc_host *host, u32 ocr);
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
+int mmc_attach_mmc(struct mmc_host *host);
+int mmc_attach_sd(struct mmc_host *host);
+int mmc_attach_sdio(struct mmc_host *host);
 
 /* Module parameters */
 extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405..998797e 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@
 			&mmc_clock_fops))
 		goto err_node;
 
+#ifdef CONFIG_MMC_CLKGATE
+	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+				root, &host->clk_delay))
+		goto err_node;
+#endif
 	return;
 
 err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af2..b3ac6c5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2003 Russell King, All Rights Reserved.
  *  Copyright (C) 2007-2008 Pierre Ossman
+ *  Copyright (C) 2010 Linus Walleij
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
 #include <linux/suspend.h>
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
 
 #include "core.h"
 #include "host.h"
@@ -50,6 +52,205 @@
 static DEFINE_IDR(mmc_host_idr);
 static DEFINE_SPINLOCK(mmc_host_lock);
 
+#ifdef CONFIG_MMC_CLKGATE
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+	unsigned long tick_ns;
+	unsigned long freq = host->ios.clock;
+	unsigned long flags;
+
+	if (!freq) {
+		pr_debug("%s: frequency set to 0 in disable function, "
+			 "this means the clock is already disabled.\n",
+			 mmc_hostname(host));
+		return;
+	}
+	/*
+	 * New requests may have appeared while we were scheduling,
+	 * then there is no reason to delay the check before
+	 * clk_disable().
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+
+	/*
+	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
+	 * to disable the MCI block clock. The reference count may have
+	 * gone up again after this delay due to rescheduling!
+	 */
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		tick_ns = DIV_ROUND_UP(1000000000, freq);
+		ndelay(host->clk_delay * tick_ns);
+	} else {
+		/* New users appeared while waiting for this work */
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		return;
+	}
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		/* This will set host->ios.clock to 0 */
+		mmc_gate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+	}
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+	struct mmc_host *host = container_of(work, struct mmc_host,
+					      clk_gate_work);
+
+	mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ *	mmc_host_clk_ungate - ungate hardware MCI clocks
+ *	@host: host to ungate.
+ *
+ *	Makes sure the host ios.clock is restored to a non-zero value
+ *	past this call.	Increase clock reference count and ungate clock
+ *	if we're the first user.
+ */
+void mmc_host_clk_ungate(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_ungate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+	}
+	host->clk_requests++;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_may_gate_card - check if this card may be gated
+ *	@card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	/* If there is no card we may gate it */
+	if (!card)
+		return true;
+	/*
+	 * Don't gate SDIO cards! These need to be clocked at all times
+	 * since they may be independent systems generating interrupts
+	 * and other events. The clock requests counter from the core will
+	 * go down to zero since the core does not need it, but we will not
+	 * gate the clock, because there is somebody out there that may still
+	 * be using it.
+	 */
+	if (mmc_card_sdio(card))
+		return false;
+
+	return true;
+}
+
+/**
+ *	mmc_host_clk_gate - gate off hardware MCI clocks
+ *	@host: host to gate.
+ *
+ *	Calls the host driver with ios.clock set to zero as often as possible
+ *	in order to gate off hardware MCI clocks. Decrease clock reference
+ *	count and schedule disabling of clock.
+ */
+void mmc_host_clk_gate(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_requests--;
+	if (mmc_host_may_gate_card(host->card) &&
+	    !host->clk_requests)
+		schedule_work(&host->clk_gate_work);
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ *	mmc_host_clk_rate - get current clock frequency setting
+ *	@host: host to get the clock frequency for.
+ *
+ *	Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	unsigned long freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated)
+		freq = host->clk_old;
+	else
+		freq = host->ios.clock;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return freq;
+}
+
+/**
+ *	mmc_host_clk_init - set up clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+	host->clk_requests = 0;
+	/* Hold MCI clock for 8 cycles by default */
+	host->clk_delay = 8;
+	host->clk_gated = false;
+	INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+	spin_lock_init(&host->clk_lock);
+	mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_clk_exit - shut down clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+	/*
+	 * Wait for any outstanding gate and then make sure we're
+	 * ungated before exiting.
+	 */
+	if (cancel_work_sync(&host->clk_gate_work))
+		mmc_host_clk_gate_delayed(host);
+	if (host->clk_gated)
+		mmc_host_clk_ungate(host);
+	/* There should be only one user now */
+	WARN_ON(host->clk_requests > 1);
+}
+
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+#endif
+
 /**
  *	mmc_alloc_host - initialise the per-host structure.
  *	@extra: sizeof private data structure
@@ -82,6 +283,8 @@
 	host->class_dev.class = &mmc_host_class;
 	device_initialize(&host->class_dev);
 
+	mmc_host_clk_init(host);
+
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +366,8 @@
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
+
+	mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +388,3 @@
 }
 
 EXPORT_SYMBOL(mmc_free_host);
-
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e11..de199f9 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
  */
 #ifndef _MMC_CORE_HOST_H
 #define _MMC_CORE_HOST_H
+#include <linux/mmc/host.h>
 
 int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_ungate(struct mmc_host *host);
+void mmc_host_clk_gate(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_ungate(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_gate(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	return host->ios.clock;
+}
+#endif
+
 void mmc_host_deeper_disable(struct work_struct *work);
 
 #endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3..16006ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -534,39 +534,57 @@
 	 */
 	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
 	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
-		unsigned ext_csd_bit, bus_width;
+		static unsigned ext_csd_bits[][2] = {
+			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
+			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
+			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
+		};
+		static unsigned bus_widths[] = {
+			MMC_BUS_WIDTH_8,
+			MMC_BUS_WIDTH_4,
+			MMC_BUS_WIDTH_1
+		};
+		unsigned idx, bus_width = 0;
 
-		if (host->caps & MMC_CAP_8_BIT_DATA) {
-			if (ddr)
-				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
-			else
-				ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
-			bus_width = MMC_BUS_WIDTH_8;
-		} else {
-			if (ddr)
-				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
-			else
-				ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
-			bus_width = MMC_BUS_WIDTH_4;
+		if (host->caps & MMC_CAP_8_BIT_DATA)
+			idx = 0;
+		else
+			idx = 1;
+		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
+			bus_width = bus_widths[idx];
+			if (bus_width == MMC_BUS_WIDTH_1)
+				ddr = 0; /* no DDR for 1-bit width */
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					 EXT_CSD_BUS_WIDTH,
+					 ext_csd_bits[idx][0]);
+			if (!err) {
+				mmc_set_bus_width_ddr(card->host,
+						      bus_width, MMC_SDR_MODE);
+				/*
+				 * If controller can't handle bus width test,
+				 * use the highest bus width to maintain
+				 * compatibility with previous MMC behavior.
+				 */
+				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+					break;
+				err = mmc_bus_test(card, bus_width);
+				if (!err)
+					break;
+			}
 		}
 
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 EXT_CSD_BUS_WIDTH, ext_csd_bit);
-
-		if (err && err != -EBADMSG)
-			goto free_card;
-
+		if (!err && ddr) {
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_BUS_WIDTH,
+					ext_csd_bits[idx][1]);
+		}
 		if (err) {
 			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
-			       "failed\n", mmc_hostname(card->host),
-			       1 << bus_width, ddr);
-			err = 0;
-		} else {
-			if (ddr)
-				mmc_card_set_ddr_mode(card);
-			else
-				ddr = MMC_SDR_MODE;
-
+				"failed\n", mmc_hostname(card->host),
+				1 << bus_width, ddr);
+			goto free_card;
+		} else if (ddr) {
+			mmc_card_set_ddr_mode(card);
 			mmc_set_bus_width_ddr(card->host, bus_width, ddr);
 		}
 	}
@@ -737,14 +755,21 @@
 /*
  * Starting point for MMC card init.
  */
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
+int mmc_attach_mmc(struct mmc_host *host)
 {
 	int err;
+	u32 ocr;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_attach_bus_ops(host);
+	if (host->ocr_avail_mmc)
+		host->ocr_avail = host->ocr_avail_mmc;
 
 	/*
 	 * We need to get OCR a different way for SPI.
@@ -784,20 +809,20 @@
 		goto err;
 
 	mmc_release_host(host);
-
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_card;
 
 	return 0;
 
 remove_card:
+	mmc_release_host(host);
 	mmc_remove_card(host->card);
-	host->card = NULL;
 	mmc_claim_host(host);
+	host->card = NULL;
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c..60842f8 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -462,3 +462,104 @@
 	return 0;
 }
 
+static int
+mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
+		  u8 len)
+{
+	struct mmc_request mrq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+	struct scatterlist sg;
+	u8 *data_buf;
+	u8 *test_buf;
+	int i, err;
+	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
+	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
+
+	/* dma onto stack is unsafe/nonportable, but callers to this
+	 * routine normally provide temporary on-stack buffers ...
+	 */
+	data_buf = kmalloc(len, GFP_KERNEL);
+	if (!data_buf)
+		return -ENOMEM;
+
+	if (len == 8)
+		test_buf = testdata_8bit;
+	else if (len == 4)
+		test_buf = testdata_4bit;
+	else {
+		printk(KERN_ERR "%s: Invalid bus_width %d\n",
+		       mmc_hostname(host), len);
+		kfree(data_buf);
+		return -EINVAL;
+	}
+
+	if (opcode == MMC_BUS_TEST_W)
+		memcpy(data_buf, test_buf, len);
+
+	memset(&mrq, 0, sizeof(struct mmc_request));
+	memset(&cmd, 0, sizeof(struct mmc_command));
+	memset(&data, 0, sizeof(struct mmc_data));
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	cmd.opcode = opcode;
+	cmd.arg = 0;
+
+	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
+	 * rely on callers to never use this with "native" calls for reading
+	 * CSD or CID.  Native versions of those commands use the R2 type,
+	 * not R1 plus a data block.
+	 */
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	data.blksz = len;
+	data.blocks = 1;
+	if (opcode == MMC_BUS_TEST_R)
+		data.flags = MMC_DATA_READ;
+	else
+		data.flags = MMC_DATA_WRITE;
+
+	data.sg = &sg;
+	data.sg_len = 1;
+	sg_init_one(&sg, data_buf, len);
+	mmc_wait_for_req(host, &mrq);
+	err = 0;
+	if (opcode == MMC_BUS_TEST_R) {
+		for (i = 0; i < len / 4; i++)
+			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
+				err = -EIO;
+				break;
+			}
+	}
+	kfree(data_buf);
+
+	if (cmd.error)
+		return cmd.error;
+	if (data.error)
+		return data.error;
+
+	return err;
+}
+
+int mmc_bus_test(struct mmc_card *card, u8 bus_width)
+{
+	int err, width;
+
+	if (bus_width == MMC_BUS_WIDTH_8)
+		width = 8;
+	else if (bus_width == MMC_BUS_WIDTH_4)
+		width = 4;
+	else if (bus_width == MMC_BUS_WIDTH_1)
+		return 0; /* no need for test */
+	else
+		return -EINVAL;
+
+	/*
+	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
+	 * is a problem.  This improves chances that the test will work.
+	 */
+	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
+	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
+	return err;
+}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e..e6d44b8 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@
 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
 int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
 int mmc_card_sleepawake(struct mmc_host *host, int sleep);
+int mmc_bus_test(struct mmc_card *card, u8 bus_width);
 
 #endif
 
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4df..d18c32b 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,14 +764,21 @@
 /*
  * Starting point for SD card init.
  */
-int mmc_attach_sd(struct mmc_host *host, u32 ocr)
+int mmc_attach_sd(struct mmc_host *host)
 {
 	int err;
+	u32 ocr;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_app_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_sd_attach_bus_ops(host);
+	if (host->ocr_avail_sd)
+		host->ocr_avail = host->ocr_avail_sd;
 
 	/*
 	 * We need to get OCR a different way for SPI.
@@ -795,7 +802,8 @@
 		ocr &= ~0x7F;
 	}
 
-	if (ocr & MMC_VDD_165_195) {
+	if ((ocr & MMC_VDD_165_195) &&
+	    !(host->ocr_avail_sd & MMC_VDD_165_195)) {
 		printk(KERN_WARNING "%s: SD card claims to support the "
 		       "incompletely defined 'low voltage range'. This "
 		       "will be ignored.\n", mmc_hostname(host));
@@ -820,20 +828,20 @@
 		goto err;
 
 	mmc_release_host(host);
-
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_card;
 
 	return 0;
 
 remove_card:
+	mmc_release_host(host);
 	mmc_remove_card(host->card);
 	host->card = NULL;
 	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f9..5c4a54d 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -627,15 +627,27 @@
 
 static int mmc_sdio_resume(struct mmc_host *host)
 {
-	int i, err;
+	int i, err = 0;
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	/* Basic card reinitialization. */
 	mmc_claim_host(host);
-	err = mmc_sdio_init_card(host, host->ocr, host->card,
+
+	/* No need to reinitialize powered-resumed nonremovable cards */
+	if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
+		err = mmc_sdio_init_card(host, host->ocr, host->card,
 				 (host->pm_flags & MMC_PM_KEEP_POWER));
+	else if (mmc_card_is_powered_resumed(host)) {
+		/* We may have switched to 1-bit mode during suspend */
+		err = sdio_enable_4bit_bus(host->card);
+		if (err > 0) {
+			mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+			err = 0;
+		}
+	}
+
 	if (!err && host->sdio_irqs)
 		mmc_signal_sdio_irq(host);
 	mmc_release_host(host);
@@ -690,16 +702,22 @@
 /*
  * Starting point for SDIO card init.
  */
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
+int mmc_attach_sdio(struct mmc_host *host)
 {
-	int err;
-	int i, funcs;
+	int err, i, funcs;
+	u32 ocr;
 	struct mmc_card *card;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_io_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_attach_bus(host, &mmc_sdio_ops);
+	if (host->ocr_avail_sdio)
+		host->ocr_avail = host->ocr_avail_sdio;
 
 	/*
 	 * Sanity check the voltages that the card claims to
@@ -769,12 +787,12 @@
 			pm_runtime_enable(&card->sdio_func[i]->dev);
 	}
 
-	mmc_release_host(host);
-
 	/*
 	 * First add the card to the driver model...
 	 */
+	mmc_release_host(host);
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_added;
 
@@ -792,15 +810,17 @@
 
 remove_added:
 	/* Remove without lock if the device has been added. */
+	mmc_release_host(host);
 	mmc_sdio_remove(host);
 	mmc_claim_host(host);
 remove:
 	/* And with lock if it hasn't been added. */
+	mmc_release_host(host);
 	if (host->card)
 		mmc_sdio_remove(host);
+	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da44..d29b9c3 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@
 
 #ifdef CONFIG_PM_RUNTIME
 
-static int sdio_bus_pm_prepare(struct device *dev)
-{
-	struct sdio_func *func = dev_to_sdio_func(dev);
-
-	/*
-	 * Resume an SDIO device which was suspended at run time at this
-	 * point, in order to allow standard SDIO suspend/resume paths
-	 * to keep working as usual.
-	 *
-	 * Ultimately, the SDIO driver itself will decide (in its
-	 * suspend handler, or lack thereof) whether the card should be
-	 * removed or kept, and if kept, at what power state.
-	 *
-	 * At this point, PM core have increased our use count, so it's
-	 * safe to directly resume the device. After system is resumed
-	 * again, PM core will drop back its runtime PM use count, and if
-	 * needed device will be suspended again.
-	 *
-	 * The end result is guaranteed to be a power state that is
-	 * coherent with the device's runtime PM use count.
-	 *
-	 * The return value of pm_runtime_resume is deliberately unchecked
-	 * since there is little point in failing system suspend if a
-	 * device can't be resumed.
-	 */
-	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
-		pm_runtime_resume(dev);
-
-	return 0;
-}
-
 static const struct dev_pm_ops sdio_bus_pm_ops = {
 	SET_RUNTIME_PM_OPS(
 		pm_generic_runtime_suspend,
 		pm_generic_runtime_resume,
 		pm_generic_runtime_idle
 	)
-	.prepare = sdio_bus_pm_prepare,
 };
 
 #define SDIO_PM_OPS_PTR	(&sdio_bus_pm_ops)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d618e86..afe8c6f 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -83,7 +83,7 @@
 
 config MMC_SDHCI_OF
 	tristate "SDHCI support on OpenFirmware platforms"
-	depends on MMC_SDHCI && PPC_OF
+	depends on MMC_SDHCI && OF
 	help
 	  This selects the OF support for Secure Digital Host Controller
 	  Interfaces.
@@ -93,6 +93,7 @@
 config MMC_SDHCI_OF_ESDHC
 	bool "SDHCI OF support for the Freescale eSDHC controller"
 	depends on MMC_SDHCI_OF
+	depends on PPC_OF
 	select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
 	help
 	  This selects the Freescale eSDHC controller support.
@@ -102,6 +103,7 @@
 config MMC_SDHCI_OF_HLWD
 	bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
 	depends on MMC_SDHCI_OF
+	depends on PPC_OF
 	select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
 	help
 	  This selects the Secure Digital Host Controller Interface (SDHCI)
@@ -140,6 +142,27 @@
 
 	  If unsure, say N.
 
+config MMC_SDHCI_DOVE
+	bool "SDHCI support on Marvell's Dove SoC"
+	depends on ARCH_DOVE
+	depends on MMC_SDHCI_PLTFM
+	select MMC_SDHCI_IO_ACCESSORS
+	help
+	  This selects the Secure Digital Host Controller Interface in
+	  Marvell's Dove SoC.
+
+	  If unsure, say N.
+
+config MMC_SDHCI_TEGRA
+	tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+	depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
+	select MMC_SDHCI_IO_ACCESSORS
+	help
+	  This selects the Tegra SD/MMC controller. If you have a Tegra
+	  platform with SD or MMC devices, say Y or M here.
+
+	  If unsure, say N.
+
 config MMC_SDHCI_S3C
 	tristate "SDHCI support on Samsung S3C SoC"
 	depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -458,11 +481,27 @@
 	help
 	  If you say yes here SD-Cards may work on the EZkit.
 
+config MMC_DW
+	tristate "Synopsys DesignWare Memory Card Interface"
+	depends on ARM
+	help
+	  This selects support for the Synopsys DesignWare Mobile Storage IP
+	  block, this provides host support for SD and MMC interfaces, in both
+	  PIO and external DMA modes.
+
+config MMC_DW_IDMAC
+	bool "Internal DMAC interface"
+	depends on MMC_DW
+	help
+	  This selects support for the internal DMAC block within the Synopsys
+	  Designware Mobile Storage IP block. This disables the external DMA
+	  interface.
+
 config MMC_SH_MMCIF
 	tristate "SuperH Internal MMCIF support"
 	depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
 	help
-	  This selects the MMC Host Interface controler (MMCIF).
+	  This selects the MMC Host Interface controller (MMCIF).
 
 	  This driver supports MMCIF in sh7724/sh7757/sh7372.
 
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff..e834fb2 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@
 obj-$(CONFIG_MMC_CB710)	+= cb710-mmc.o
 obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o
 obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
+obj-$(CONFIG_MMC_DW)		+= dw_mmc.o
 obj-$(CONFIG_MMC_SH_MMCIF)	+= sh_mmcif.o
 obj-$(CONFIG_MMC_JZ4740)	+= jz4740_mmc.o
 obj-$(CONFIG_MMC_USHC)		+= ushc.o
@@ -39,6 +40,8 @@
 sdhci-platform-y				:= sdhci-pltfm.o
 sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX)	+= sdhci-cns3xxx.o
 sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX)	+= sdhci-esdhc-imx.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE)		+= sdhci-dove.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA)	+= sdhci-tegra.o
 
 obj-$(CONFIG_MMC_SDHCI_OF)	+= sdhci-of.o
 sdhci-of-y				:= sdhci-of-core.o
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 41e5a60..ef72e87 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -192,7 +192,7 @@
 	au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
 	au_sync();
 
-	/* Send the stop commmand */
+	/* Send the stop command */
 	au_writel(STOP_CMD, HOST_CMD(host));
 }
 
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547c..0076c74 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
 #define DAVINCI_MMCBLNC      0x60
 #define DAVINCI_SDIOCTL      0x64
 #define DAVINCI_SDIOST0      0x68
-#define DAVINCI_SDIOEN       0x6C
-#define DAVINCI_SDIOST       0x70
+#define DAVINCI_SDIOIEN      0x6C
+#define DAVINCI_SDIOIST      0x70
 #define DAVINCI_MMCFIFOCTL   0x74 /* FIFO Control Register             */
 
 /* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
 #define MMCFIFOCTL_ACCWD_2    (2 << 3) /* access width of 2 bytes    */
 #define MMCFIFOCTL_ACCWD_1    (3 << 3) /* access width of 1 byte     */
 
+/* DAVINCI_SDIOST0 definitions */
+#define SDIOST0_DAT1_HI       BIT(0)
+
+/* DAVINCI_SDIOIEN definitions */
+#define SDIOIEN_IOINTEN       BIT(0)
+
+/* DAVINCI_SDIOIST definitions */
+#define SDIOIST_IOINT         BIT(0)
 
 /* MMCSD Init clock in Hz in opendrain mode */
 #define MMCSD_INIT_CLOCK		200000
@@ -164,7 +172,7 @@
 	unsigned int mmc_input_clk;
 	void __iomem *base;
 	struct resource *mem_res;
-	int irq;
+	int mmc_irq, sdio_irq;
 	unsigned char bus_mode;
 
 #define DAVINCI_MMC_DATADIR_NONE	0
@@ -184,6 +192,7 @@
 	u32 rxdma, txdma;
 	bool use_dma;
 	bool do_dma;
+	bool sdio_int;
 
 	/* Scatterlist DMA uses one or more parameter RAM entries:
 	 * the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@
 	struct scatterlist	*sg;
 	unsigned		sg_len;
 	unsigned		bytes_left = host->bytes_left;
-	const unsigned		shift = ffs(rw_threshold) - 1;;
+	const unsigned		shift = ffs(rw_threshold) - 1;
 
 	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
 		template = &host->tx_template;
@@ -866,6 +875,19 @@
 {
 	host->data = NULL;
 
+	if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+		/*
+		 * SDIO Interrupt Detection work-around as suggested by
+		 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
+		 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
+		 */
+		if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
+					SDIOST0_DAT1_HI)) {
+			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+			mmc_signal_sdio_irq(host->mmc);
+		}
+	}
+
 	if (host->do_dma) {
 		davinci_abort_dma(host);
 
@@ -932,6 +954,21 @@
 	mmc_davinci_reset_ctrl(host, 0);
 }
 
+static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
+{
+	struct mmc_davinci_host *host = dev_id;
+	unsigned int status;
+
+	status = readl(host->base + DAVINCI_SDIOIST);
+	if (status & SDIOIST_IOINT) {
+		dev_dbg(mmc_dev(host->mmc),
+			"SDIO interrupt status %x\n", status);
+		writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+		mmc_signal_sdio_irq(host->mmc);
+	}
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
 {
 	struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@
 	return config->get_ro(pdev->id);
 }
 
+static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct mmc_davinci_host *host = mmc_priv(mmc);
+
+	if (enable) {
+		if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
+			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+			mmc_signal_sdio_irq(host->mmc);
+		} else {
+			host->sdio_int = true;
+			writel(readl(host->base + DAVINCI_SDIOIEN) |
+			       SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
+		}
+	} else {
+		host->sdio_int = false;
+		writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
+		       host->base + DAVINCI_SDIOIEN);
+	}
+}
+
 static struct mmc_host_ops mmc_davinci_ops = {
 	.request	= mmc_davinci_request,
 	.set_ios	= mmc_davinci_set_ios,
 	.get_cd		= mmc_davinci_get_cd,
 	.get_ro		= mmc_davinci_get_ro,
+	.enable_sdio_irq = mmc_davinci_enable_sdio_irq,
 };
 
 /*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@
 		host->nr_sg = MAX_NR_SG;
 
 	host->use_dma = use_dma;
-	host->irq = irq;
+	host->mmc_irq = irq;
+	host->sdio_irq = platform_get_irq(pdev, 1);
 
 	if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
 		host->use_dma = 0;
@@ -1270,6 +1329,13 @@
 	if (ret)
 		goto out;
 
+	if (host->sdio_irq >= 0) {
+		ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
+				  mmc_hostname(mmc), host);
+		if (!ret)
+			mmc->caps |= MMC_CAP_SDIO_IRQ;
+	}
+
 	rename_region(mem, mmc_hostname(mmc));
 
 	dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@
 		mmc_davinci_cpufreq_deregister(host);
 
 		mmc_remove_host(host->mmc);
-		free_irq(host->irq, host);
+		free_irq(host->mmc_irq, host);
+		if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+			free_irq(host->sdio_irq, host);
 
 		davinci_release_dma_channels(host);
 
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 0000000..2fcc825
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/bitops.h>
+
+#include "dw_mmc.h"
+
+/* Common flag combinations */
+#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DTO | SDMMC_INT_DCRC | \
+				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
+				 SDMMC_INT_EBE)
+#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
+				 SDMMC_INT_RESP_ERR)
+#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
+				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
+#define DW_MCI_SEND_STATUS	1
+#define DW_MCI_RECV_STATUS	2
+#define DW_MCI_DMA_THRESHOLD	16
+
+#ifdef CONFIG_MMC_DW_IDMAC
+struct idmac_desc {
+	u32		des0;	/* Control Descriptor */
+#define IDMAC_DES0_DIC	BIT(1)
+#define IDMAC_DES0_LD	BIT(2)
+#define IDMAC_DES0_FD	BIT(3)
+#define IDMAC_DES0_CH	BIT(4)
+#define IDMAC_DES0_ER	BIT(5)
+#define IDMAC_DES0_CES	BIT(30)
+#define IDMAC_DES0_OWN	BIT(31)
+
+	u32		des1;	/* Buffer sizes */
+#define IDMAC_SET_BUFFER1_SIZE(d, s) \
+	((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+
+	u32		des2;	/* buffer 1 physical address */
+
+	u32		des3;	/* buffer 2 physical address */
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ *	processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ *	&struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct dw_mci_slot {
+	struct mmc_host		*mmc;
+	struct dw_mci		*host;
+
+	u32			ctype;
+
+	struct mmc_request	*mrq;
+	struct list_head	queue_node;
+
+	unsigned int		clock;
+	unsigned long		flags;
+#define DW_MMC_CARD_PRESENT	0
+#define DW_MMC_CARD_NEED_INIT	1
+	int			id;
+	int			last_detect_state;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static int dw_mci_req_show(struct seq_file *s, void *v)
+{
+	struct dw_mci_slot *slot = s->private;
+	struct mmc_request *mrq;
+	struct mmc_command *cmd;
+	struct mmc_command *stop;
+	struct mmc_data	*data;
+
+	/* Make sure we get a consistent snapshot */
+	spin_lock_bh(&slot->host->lock);
+	mrq = slot->mrq;
+
+	if (mrq) {
+		cmd = mrq->cmd;
+		data = mrq->data;
+		stop = mrq->stop;
+
+		if (cmd)
+			seq_printf(s,
+				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+				   cmd->opcode, cmd->arg, cmd->flags,
+				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
+				   cmd->resp[2], cmd->error);
+		if (data)
+			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+				   data->bytes_xfered, data->blocks,
+				   data->blksz, data->flags, data->error);
+		if (stop)
+			seq_printf(s,
+				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+				   stop->opcode, stop->arg, stop->flags,
+				   stop->resp[0], stop->resp[1], stop->resp[2],
+				   stop->resp[2], stop->error);
+	}
+
+	spin_unlock_bh(&slot->host->lock);
+
+	return 0;
+}
+
+static int dw_mci_req_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dw_mci_req_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_req_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dw_mci_req_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dw_mci_regs_show(struct seq_file *s, void *v)
+{
+	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
+	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
+	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
+	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
+	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
+	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+
+	return 0;
+}
+
+static int dw_mci_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dw_mci_regs_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_regs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dw_mci_regs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
+{
+	struct mmc_host	*mmc = slot->mmc;
+	struct dw_mci *host = slot->host;
+	struct dentry *root;
+	struct dentry *node;
+
+	root = mmc->debugfs_root;
+	if (!root)
+		return;
+
+	node = debugfs_create_file("regs", S_IRUSR, root, host,
+				   &dw_mci_regs_fops);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_file("req", S_IRUSR, root, slot,
+				   &dw_mci_req_fops);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_x32("pending_events", S_IRUSR, root,
+				  (u32 *)&host->pending_events);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_x32("completed_events", S_IRUSR, root,
+				  (u32 *)&host->completed_events);
+	if (!node)
+		goto err;
+
+	return;
+
+err:
+	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+static void dw_mci_set_timeout(struct dw_mci *host)
+{
+	/* timeout (maximum) */
+	mci_writel(host, TMOUT, 0xffffffff);
+}
+
+static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+	struct mmc_data	*data;
+	u32 cmdr;
+	cmd->error = -EINPROGRESS;
+
+	cmdr = cmd->opcode;
+
+	if (cmdr == MMC_STOP_TRANSMISSION)
+		cmdr |= SDMMC_CMD_STOP;
+	else
+		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		/* We expect a response, so set this bit */
+		cmdr |= SDMMC_CMD_RESP_EXP;
+		if (cmd->flags & MMC_RSP_136)
+			cmdr |= SDMMC_CMD_RESP_LONG;
+	}
+
+	if (cmd->flags & MMC_RSP_CRC)
+		cmdr |= SDMMC_CMD_RESP_CRC;
+
+	data = cmd->data;
+	if (data) {
+		cmdr |= SDMMC_CMD_DAT_EXP;
+		if (data->flags & MMC_DATA_STREAM)
+			cmdr |= SDMMC_CMD_STRM_MODE;
+		if (data->flags & MMC_DATA_WRITE)
+			cmdr |= SDMMC_CMD_DAT_WR;
+	}
+
+	return cmdr;
+}
+
+static void dw_mci_start_command(struct dw_mci *host,
+				 struct mmc_command *cmd, u32 cmd_flags)
+{
+	host->cmd = cmd;
+	dev_vdbg(&host->pdev->dev,
+		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
+		 cmd->arg, cmd_flags);
+
+	mci_writel(host, CMDARG, cmd->arg);
+	wmb();
+
+	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+}
+
+static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
+{
+	dw_mci_start_command(host, data->stop, host->stop_cmdr);
+}
+
+/* DMA interface functions */
+static void dw_mci_stop_dma(struct dw_mci *host)
+{
+	if (host->use_dma) {
+		host->dma_ops->stop(host);
+		host->dma_ops->cleanup(host);
+	} else {
+		/* Data transfer was stopped by the interrupt handler */
+		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+	}
+}
+
+#ifdef CONFIG_MMC_DW_IDMAC
+static void dw_mci_dma_cleanup(struct dw_mci *host)
+{
+	struct mmc_data *data = host->data;
+
+	if (data)
+		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+			     ((data->flags & MMC_DATA_WRITE)
+			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+static void dw_mci_idmac_stop_dma(struct dw_mci *host)
+{
+	u32 temp;
+
+	/* Disable and reset the IDMAC interface */
+	temp = mci_readl(host, CTRL);
+	temp &= ~SDMMC_CTRL_USE_IDMAC;
+	temp |= SDMMC_CTRL_DMA_RESET;
+	mci_writel(host, CTRL, temp);
+
+	/* Stop the IDMAC running */
+	temp = mci_readl(host, BMOD);
+	temp &= ~SDMMC_IDMAC_ENABLE;
+	mci_writel(host, BMOD, temp);
+}
+
+static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+{
+	struct mmc_data *data = host->data;
+
+	dev_vdbg(&host->pdev->dev, "DMA complete\n");
+
+	host->dma_ops->cleanup(host);
+
+	/*
+	 * If the card was removed, data will be NULL. No point in trying to
+	 * send the stop command or waiting for NBUSY in this case.
+	 */
+	if (data) {
+		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+		tasklet_schedule(&host->tasklet);
+	}
+}
+
+static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
+				    unsigned int sg_len)
+{
+	int i;
+	struct idmac_desc *desc = host->sg_cpu;
+
+	for (i = 0; i < sg_len; i++, desc++) {
+		unsigned int length = sg_dma_len(&data->sg[i]);
+		u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+		/* Set the OWN bit and disable interrupts for this descriptor */
+		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
+
+		/* Buffer length */
+		IDMAC_SET_BUFFER1_SIZE(desc, length);
+
+		/* Physical address to DMA to/from */
+		desc->des2 = mem_addr;
+	}
+
+	/* Set first descriptor */
+	desc = host->sg_cpu;
+	desc->des0 |= IDMAC_DES0_FD;
+
+	/* Set last descriptor */
+	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
+	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+	desc->des0 |= IDMAC_DES0_LD;
+
+	wmb();
+}
+
+static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+{
+	u32 temp;
+
+	dw_mci_translate_sglist(host, host->data, sg_len);
+
+	/* Select IDMAC interface */
+	temp = mci_readl(host, CTRL);
+	temp |= SDMMC_CTRL_USE_IDMAC;
+	mci_writel(host, CTRL, temp);
+
+	wmb();
+
+	/* Enable the IDMAC */
+	temp = mci_readl(host, BMOD);
+	temp |= SDMMC_IDMAC_ENABLE;
+	mci_writel(host, BMOD, temp);
+
+	/* Start it running */
+	mci_writel(host, PLDMND, 1);
+}
+
+static int dw_mci_idmac_init(struct dw_mci *host)
+{
+	struct idmac_desc *p;
+	int i;
+
+	/* Number of descriptors in the ring buffer */
+	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+
+	/* Forward link the descriptor list */
+	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
+
+	/* Set the last descriptor as the end-of-ring descriptor */
+	p->des3 = host->sg_dma;
+	p->des0 = IDMAC_DES0_ER;
+
+	/* Mask out interrupts - get Tx & Rx complete only */
+	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
+		   SDMMC_IDMAC_INT_TI);
+
+	/* Set the descriptor base address */
+	mci_writel(host, DBADDR, host->sg_dma);
+	return 0;
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+	.init = dw_mci_idmac_init,
+	.start = dw_mci_idmac_start_dma,
+	.stop = dw_mci_idmac_stop_dma,
+	.complete = dw_mci_idmac_complete_dma,
+	.cleanup = dw_mci_dma_cleanup,
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+	struct scatterlist *sg;
+	unsigned int i, direction, sg_len;
+	u32 temp;
+
+	/* If we don't have a channel, we can't do DMA */
+	if (!host->use_dma)
+		return -ENODEV;
+
+	/*
+	 * We don't do DMA on "complex" transfers, i.e. with
+	 * non-word-aligned buffers or lengths. Also, we don't bother
+	 * with all the DMA setup overhead for short transfers.
+	 */
+	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
+		return -EINVAL;
+	if (data->blksz & 3)
+		return -EINVAL;
+
+	for_each_sg(data->sg, sg, data->sg_len, i) {
+		if (sg->offset & 3 || sg->length & 3)
+			return -EINVAL;
+	}
+
+	if (data->flags & MMC_DATA_READ)
+		direction = DMA_FROM_DEVICE;
+	else
+		direction = DMA_TO_DEVICE;
+
+	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+			    direction);
+
+	dev_vdbg(&host->pdev->dev,
+		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
+		 sg_len);
+
+	/* Enable the DMA interface */
+	temp = mci_readl(host, CTRL);
+	temp |= SDMMC_CTRL_DMA_ENABLE;
+	mci_writel(host, CTRL, temp);
+
+	/* Disable RX/TX IRQs, let DMA handle it */
+	temp = mci_readl(host, INTMASK);
+	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+	mci_writel(host, INTMASK, temp);
+
+	host->dma_ops->start(host, sg_len);
+
+	return 0;
+}
+
+static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
+{
+	u32 temp;
+
+	data->error = -EINPROGRESS;
+
+	WARN_ON(host->data);
+	host->sg = NULL;
+	host->data = data;
+
+	if (dw_mci_submit_data_dma(host, data)) {
+		host->sg = data->sg;
+		host->pio_offset = 0;
+		if (data->flags & MMC_DATA_READ)
+			host->dir_status = DW_MCI_RECV_STATUS;
+		else
+			host->dir_status = DW_MCI_SEND_STATUS;
+
+		temp = mci_readl(host, INTMASK);
+		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
+		mci_writel(host, INTMASK, temp);
+
+		temp = mci_readl(host, CTRL);
+		temp &= ~SDMMC_CTRL_DMA_ENABLE;
+		mci_writel(host, CTRL, temp);
+	}
+}
+
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
+{
+	struct dw_mci *host = slot->host;
+	unsigned long timeout = jiffies + msecs_to_jiffies(500);
+	unsigned int cmd_status = 0;
+
+	mci_writel(host, CMDARG, arg);
+	wmb();
+	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+
+	while (time_before(jiffies, timeout)) {
+		cmd_status = mci_readl(host, CMD);
+		if (!(cmd_status & SDMMC_CMD_START))
+			return;
+	}
+	dev_err(&slot->mmc->class_dev,
+		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
+		cmd, arg, cmd_status);
+}
+
+static void dw_mci_setup_bus(struct dw_mci_slot *slot)
+{
+	struct dw_mci *host = slot->host;
+	u32 div;
+
+	if (slot->clock != host->current_speed) {
+		if (host->bus_hz % slot->clock)
+			/*
+			 * move the + 1 after the divide to prevent
+			 * over-clocking the card.
+			 */
+			div = ((host->bus_hz / slot->clock) >> 1) + 1;
+		else
+			div = (host->bus_hz  / slot->clock) >> 1;
+
+		dev_info(&slot->mmc->class_dev,
+			 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
+			 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
+			 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+
+		/* disable clock */
+		mci_writel(host, CLKENA, 0);
+		mci_writel(host, CLKSRC, 0);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		/* set clock to desired speed */
+		mci_writel(host, CLKDIV, div);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		/* enable clock */
+		mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		host->current_speed = slot->clock;
+	}
+
+	/* Set the current slot bus width */
+	mci_writel(host, CTYPE, slot->ctype);
+}
+
+static void dw_mci_start_request(struct dw_mci *host,
+				 struct dw_mci_slot *slot)
+{
+	struct mmc_request *mrq;
+	struct mmc_command *cmd;
+	struct mmc_data	*data;
+	u32 cmdflags;
+
+	mrq = slot->mrq;
+	if (host->pdata->select_slot)
+		host->pdata->select_slot(slot->id);
+
+	/* Slot specific timing and width adjustment */
+	dw_mci_setup_bus(slot);
+
+	host->cur_slot = slot;
+	host->mrq = mrq;
+
+	host->pending_events = 0;
+	host->completed_events = 0;
+	host->data_status = 0;
+
+	data = mrq->data;
+	if (data) {
+		dw_mci_set_timeout(host);
+		mci_writel(host, BYTCNT, data->blksz*data->blocks);
+		mci_writel(host, BLKSIZ, data->blksz);
+	}
+
+	cmd = mrq->cmd;
+	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
+
+	/* this is the first command, send the initialization clock */
+	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
+		cmdflags |= SDMMC_CMD_INIT;
+
+	if (data) {
+		dw_mci_submit_data(host, data);
+		wmb();
+	}
+
+	dw_mci_start_command(host, cmd, cmdflags);
+
+	if (mrq->stop)
+		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+}
+
+static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
+				 struct mmc_request *mrq)
+{
+	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+		 host->state);
+
+	spin_lock_bh(&host->lock);
+	slot->mrq = mrq;
+
+	if (host->state == STATE_IDLE) {
+		host->state = STATE_SENDING_CMD;
+		dw_mci_start_request(host, slot);
+	} else {
+		list_add_tail(&slot->queue_node, &host->queue);
+	}
+
+	spin_unlock_bh(&host->lock);
+}
+
+static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci *host = slot->host;
+
+	WARN_ON(slot->mrq);
+
+	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+		mrq->cmd->error = -ENOMEDIUM;
+		mmc_request_done(mmc, mrq);
+		return;
+	}
+
+	/* We don't support multiple blocks of weird lengths. */
+	dw_mci_queue_request(host, slot, mrq);
+}
+
+static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+
+	/* set default 1 bit mode */
+	slot->ctype = SDMMC_CTYPE_1BIT;
+
+	switch (ios->bus_width) {
+	case MMC_BUS_WIDTH_1:
+		slot->ctype = SDMMC_CTYPE_1BIT;
+		break;
+	case MMC_BUS_WIDTH_4:
+		slot->ctype = SDMMC_CTYPE_4BIT;
+		break;
+	}
+
+	if (ios->clock) {
+		/*
+		 * Use mirror of ios->clock to prevent race with mmc
+		 * core ios update when finding the minimum.
+		 */
+		slot->clock = ios->clock;
+	}
+
+	switch (ios->power_mode) {
+	case MMC_POWER_UP:
+		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+		break;
+	default:
+		break;
+	}
+}
+
+static int dw_mci_get_ro(struct mmc_host *mmc)
+{
+	int read_only;
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci_board *brd = slot->host->pdata;
+
+	/* Use platform get_ro function, else try on board write protect */
+	if (brd->get_ro)
+		read_only = brd->get_ro(slot->id);
+	else
+		read_only =
+			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+
+	dev_dbg(&mmc->class_dev, "card is %s\n",
+		read_only ? "read-only" : "read-write");
+
+	return read_only;
+}
+
+static int dw_mci_get_cd(struct mmc_host *mmc)
+{
+	int present;
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci_board *brd = slot->host->pdata;
+
+	/* Use platform get_cd function, else try onboard card detect */
+	if (brd->get_cd)
+		present = !brd->get_cd(slot->id);
+	else
+		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
+			== 0 ? 1 : 0;
+
+	if (present)
+		dev_dbg(&mmc->class_dev, "card is present\n");
+	else
+		dev_dbg(&mmc->class_dev, "card is not present\n");
+
+	return present;
+}
+
+static const struct mmc_host_ops dw_mci_ops = {
+	.request	= dw_mci_request,
+	.set_ios	= dw_mci_set_ios,
+	.get_ro		= dw_mci_get_ro,
+	.get_cd		= dw_mci_get_cd,
+};
+
+static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
+	__releases(&host->lock)
+	__acquires(&host->lock)
+{
+	struct dw_mci_slot *slot;
+	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
+
+	WARN_ON(host->cmd || host->data);
+
+	host->cur_slot->mrq = NULL;
+	host->mrq = NULL;
+	if (!list_empty(&host->queue)) {
+		slot = list_entry(host->queue.next,
+				  struct dw_mci_slot, queue_node);
+		list_del(&slot->queue_node);
+		dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
+			 mmc_hostname(slot->mmc));
+		host->state = STATE_SENDING_CMD;
+		dw_mci_start_request(host, slot);
+	} else {
+		dev_vdbg(&host->pdev->dev, "list empty\n");
+		host->state = STATE_IDLE;
+	}
+
+	spin_unlock(&host->lock);
+	mmc_request_done(prev_mmc, mrq);
+	spin_lock(&host->lock);
+}
+
+static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+{
+	u32 status = host->cmd_status;
+
+	host->cmd_status = 0;
+
+	/* Read the response from the card (up to 16 bytes) */
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		if (cmd->flags & MMC_RSP_136) {
+			cmd->resp[3] = mci_readl(host, RESP0);
+			cmd->resp[2] = mci_readl(host, RESP1);
+			cmd->resp[1] = mci_readl(host, RESP2);
+			cmd->resp[0] = mci_readl(host, RESP3);
+		} else {
+			cmd->resp[0] = mci_readl(host, RESP0);
+			cmd->resp[1] = 0;
+			cmd->resp[2] = 0;
+			cmd->resp[3] = 0;
+		}
+	}
+
+	if (status & SDMMC_INT_RTO)
+		cmd->error = -ETIMEDOUT;
+	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+		cmd->error = -EILSEQ;
+	else if (status & SDMMC_INT_RESP_ERR)
+		cmd->error = -EIO;
+	else
+		cmd->error = 0;
+
+	if (cmd->error) {
+		/* newer ip versions need a delay between retries */
+		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
+			mdelay(20);
+
+		if (cmd->data) {
+			host->data = NULL;
+			dw_mci_stop_dma(host);
+		}
+	}
+}
+
+static void dw_mci_tasklet_func(unsigned long priv)
+{
+	struct dw_mci *host = (struct dw_mci *)priv;
+	struct mmc_data	*data;
+	struct mmc_command *cmd;
+	enum dw_mci_state state;
+	enum dw_mci_state prev_state;
+	u32 status;
+
+	spin_lock(&host->lock);
+
+	state = host->state;
+	data = host->data;
+
+	do {
+		prev_state = state;
+
+		switch (state) {
+		case STATE_IDLE:
+			break;
+
+		case STATE_SENDING_CMD:
+			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+						&host->pending_events))
+				break;
+
+			cmd = host->cmd;
+			host->cmd = NULL;
+			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
+			dw_mci_command_complete(host, host->mrq->cmd);
+			if (!host->mrq->data || cmd->error) {
+				dw_mci_request_end(host, host->mrq);
+				goto unlock;
+			}
+
+			prev_state = state = STATE_SENDING_DATA;
+			/* fall through */
+
+		case STATE_SENDING_DATA:
+			if (test_and_clear_bit(EVENT_DATA_ERROR,
+					       &host->pending_events)) {
+				dw_mci_stop_dma(host);
+				if (data->stop)
+					send_stop_cmd(host, data);
+				state = STATE_DATA_ERROR;
+				break;
+			}
+
+			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+						&host->pending_events))
+				break;
+
+			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+			prev_state = state = STATE_DATA_BUSY;
+			/* fall through */
+
+		case STATE_DATA_BUSY:
+			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
+						&host->pending_events))
+				break;
+
+			host->data = NULL;
+			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
+			status = host->data_status;
+
+			if (status & DW_MCI_DATA_ERROR_FLAGS) {
+				if (status & SDMMC_INT_DTO) {
+					dev_err(&host->pdev->dev,
+						"data timeout error\n");
+					data->error = -ETIMEDOUT;
+				} else if (status & SDMMC_INT_DCRC) {
+					dev_err(&host->pdev->dev,
+						"data CRC error\n");
+					data->error = -EILSEQ;
+				} else {
+					dev_err(&host->pdev->dev,
+						"data FIFO error "
+						"(status=%08x)\n",
+						status);
+					data->error = -EIO;
+				}
+			} else {
+				data->bytes_xfered = data->blocks * data->blksz;
+				data->error = 0;
+			}
+
+			if (!data->stop) {
+				dw_mci_request_end(host, host->mrq);
+				goto unlock;
+			}
+
+			prev_state = state = STATE_SENDING_STOP;
+			if (!data->error)
+				send_stop_cmd(host, data);
+			/* fall through */
+
+		case STATE_SENDING_STOP:
+			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+						&host->pending_events))
+				break;
+
+			host->cmd = NULL;
+			dw_mci_command_complete(host, host->mrq->stop);
+			dw_mci_request_end(host, host->mrq);
+			goto unlock;
+
+		case STATE_DATA_ERROR:
+			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+						&host->pending_events))
+				break;
+
+			state = STATE_DATA_BUSY;
+			break;
+		}
+	} while (state != prev_state);
+
+	host->state = state;
+unlock:
+	spin_unlock(&host->lock);
+
+}
+
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+	u16 *pdata = (u16 *)buf;
+
+	WARN_ON(cnt % 2 != 0);
+
+	cnt = cnt >> 1;
+	while (cnt > 0) {
+		mci_writew(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+	u16 *pdata = (u16 *)buf;
+
+	WARN_ON(cnt % 2 != 0);
+
+	cnt = cnt >> 1;
+	while (cnt > 0) {
+		*pdata++ = mci_readw(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
+{
+	u32 *pdata = (u32 *)buf;
+
+	WARN_ON(cnt % 4 != 0);
+	WARN_ON((unsigned long)pdata & 0x3);
+
+	cnt = cnt >> 2;
+	while (cnt > 0) {
+		mci_writel(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
+{
+	u32 *pdata = (u32 *)buf;
+
+	WARN_ON(cnt % 4 != 0);
+	WARN_ON((unsigned long)pdata & 0x3);
+
+	cnt = cnt >> 2;
+	while (cnt > 0) {
+		*pdata++ = mci_readl(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
+{
+	u64 *pdata = (u64 *)buf;
+
+	WARN_ON(cnt % 8 != 0);
+
+	cnt = cnt >> 3;
+	while (cnt > 0) {
+		mci_writeq(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
+{
+	u64 *pdata = (u64 *)buf;
+
+	WARN_ON(cnt % 8 != 0);
+
+	cnt = cnt >> 3;
+	while (cnt > 0) {
+		*pdata++ = mci_readq(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_read_data_pio(struct dw_mci *host)
+{
+	struct scatterlist *sg = host->sg;
+	void *buf = sg_virt(sg);
+	unsigned int offset = host->pio_offset;
+	struct mmc_data	*data = host->data;
+	int shift = host->data_shift;
+	u32 status;
+	unsigned int nbytes = 0, len, old_len, count = 0;
+
+	do {
+		len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+		if (count == 0)
+			old_len = len;
+
+		if (offset + len <= sg->length) {
+			host->pull_data(host, (void *)(buf + offset), len);
+
+			offset += len;
+			nbytes += len;
+
+			if (offset == sg->length) {
+				flush_dcache_page(sg_page(sg));
+				host->sg = sg = sg_next(sg);
+				if (!sg)
+					goto done;
+
+				offset = 0;
+				buf = sg_virt(sg);
+			}
+		} else {
+			unsigned int remaining = sg->length - offset;
+			host->pull_data(host, (void *)(buf + offset),
+					remaining);
+			nbytes += remaining;
+
+			flush_dcache_page(sg_page(sg));
+			host->sg = sg = sg_next(sg);
+			if (!sg)
+				goto done;
+
+			offset = len - remaining;
+			buf = sg_virt(sg);
+			host->pull_data(host, buf, offset);
+			nbytes += offset;
+		}
+
+		status = mci_readl(host, MINTSTS);
+		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+		if (status & DW_MCI_DATA_ERROR_FLAGS) {
+			host->data_status = status;
+			data->bytes_xfered += nbytes;
+			smp_wmb();
+
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+			tasklet_schedule(&host->tasklet);
+			return;
+		}
+		count++;
+	} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
+	len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
+	host->pio_offset = offset;
+	data->bytes_xfered += nbytes;
+	return;
+
+done:
+	data->bytes_xfered += nbytes;
+	smp_wmb();
+	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_write_data_pio(struct dw_mci *host)
+{
+	struct scatterlist *sg = host->sg;
+	void *buf = sg_virt(sg);
+	unsigned int offset = host->pio_offset;
+	struct mmc_data	*data = host->data;
+	int shift = host->data_shift;
+	u32 status;
+	unsigned int nbytes = 0, len;
+
+	do {
+		len = SDMMC_FIFO_SZ -
+			(SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+		if (offset + len <= sg->length) {
+			host->push_data(host, (void *)(buf + offset), len);
+
+			offset += len;
+			nbytes += len;
+			if (offset == sg->length) {
+				host->sg = sg = sg_next(sg);
+				if (!sg)
+					goto done;
+
+				offset = 0;
+				buf = sg_virt(sg);
+			}
+		} else {
+			unsigned int remaining = sg->length - offset;
+
+			host->push_data(host, (void *)(buf + offset),
+					remaining);
+			nbytes += remaining;
+
+			host->sg = sg = sg_next(sg);
+			if (!sg)
+				goto done;
+
+			offset = len - remaining;
+			buf = sg_virt(sg);
+			host->push_data(host, (void *)buf, offset);
+			nbytes += offset;
+		}
+
+		status = mci_readl(host, MINTSTS);
+		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+		if (status & DW_MCI_DATA_ERROR_FLAGS) {
+			host->data_status = status;
+			data->bytes_xfered += nbytes;
+
+			smp_wmb();
+
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+			tasklet_schedule(&host->tasklet);
+			return;
+		}
+	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+
+	host->pio_offset = offset;
+	data->bytes_xfered += nbytes;
+
+	return;
+
+done:
+	data->bytes_xfered += nbytes;
+	smp_wmb();
+	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
+{
+	if (!host->cmd_status)
+		host->cmd_status = status;
+
+	smp_wmb();
+
+	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+	tasklet_schedule(&host->tasklet);
+}
+
+static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
+{
+	struct dw_mci *host = dev_id;
+	u32 status, pending;
+	unsigned int pass_count = 0;
+
+	do {
+		status = mci_readl(host, RINTSTS);
+		pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+		/*
+		 * DTO fix - version 2.10a and below, and only if internal DMA
+		 * is configured.
+		 */
+		if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+			if (!pending &&
+			    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+				pending |= SDMMC_INT_DATA_OVER;
+		}
+
+		if (!pending)
+			break;
+
+		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
+			host->cmd_status = status;
+			smp_wmb();
+			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+			/* if there is an error report DATA_ERROR */
+			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
+			host->data_status = status;
+			smp_wmb();
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & SDMMC_INT_DATA_OVER) {
+			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+			if (!host->data_status)
+				host->data_status = status;
+			smp_wmb();
+			if (host->dir_status == DW_MCI_RECV_STATUS) {
+				if (host->sg != NULL)
+					dw_mci_read_data_pio(host);
+			}
+			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & SDMMC_INT_RXDR) {
+			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+			if (host->sg)
+				dw_mci_read_data_pio(host);
+		}
+
+		if (pending & SDMMC_INT_TXDR) {
+			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+			if (host->sg)
+				dw_mci_write_data_pio(host);
+		}
+
+		if (pending & SDMMC_INT_CMD_DONE) {
+			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+			dw_mci_cmd_interrupt(host, status);
+		}
+
+		if (pending & SDMMC_INT_CD) {
+			mci_writel(host, RINTSTS, SDMMC_INT_CD);
+			tasklet_schedule(&host->card_tasklet);
+		}
+
+	} while (pass_count++ < 5);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+	/* Handle DMA interrupts */
+	pending = mci_readl(host, IDSTS);
+	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
+		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
+		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
+		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+		host->dma_ops->complete(host);
+	}
+#endif
+
+	return IRQ_HANDLED;
+}
+
+static void dw_mci_tasklet_card(unsigned long data)
+{
+	struct dw_mci *host = (struct dw_mci *)data;
+	int i;
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		struct mmc_host *mmc = slot->mmc;
+		struct mmc_request *mrq;
+		int present;
+		u32 ctrl;
+
+		present = dw_mci_get_cd(mmc);
+		while (present != slot->last_detect_state) {
+			spin_lock(&host->lock);
+
+			dev_dbg(&slot->mmc->class_dev, "card %s\n",
+				present ? "inserted" : "removed");
+
+			/* Card change detected */
+			slot->last_detect_state = present;
+
+			/* Power up slot */
+			if (present != 0) {
+				if (host->pdata->setpower)
+					host->pdata->setpower(slot->id,
+							      mmc->ocr_avail);
+
+				set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+			}
+
+			/* Clean up queue if present */
+			mrq = slot->mrq;
+			if (mrq) {
+				if (mrq == host->mrq) {
+					host->data = NULL;
+					host->cmd = NULL;
+
+					switch (host->state) {
+					case STATE_IDLE:
+						break;
+					case STATE_SENDING_CMD:
+						mrq->cmd->error = -ENOMEDIUM;
+						if (!mrq->data)
+							break;
+						/* fall through */
+					case STATE_SENDING_DATA:
+						mrq->data->error = -ENOMEDIUM;
+						dw_mci_stop_dma(host);
+						break;
+					case STATE_DATA_BUSY:
+					case STATE_DATA_ERROR:
+						if (mrq->data->error == -EINPROGRESS)
+							mrq->data->error = -ENOMEDIUM;
+						if (!mrq->stop)
+							break;
+						/* fall through */
+					case STATE_SENDING_STOP:
+						mrq->stop->error = -ENOMEDIUM;
+						break;
+					}
+
+					dw_mci_request_end(host, mrq);
+				} else {
+					list_del(&slot->queue_node);
+					mrq->cmd->error = -ENOMEDIUM;
+					if (mrq->data)
+						mrq->data->error = -ENOMEDIUM;
+					if (mrq->stop)
+						mrq->stop->error = -ENOMEDIUM;
+
+					spin_unlock(&host->lock);
+					mmc_request_done(slot->mmc, mrq);
+					spin_lock(&host->lock);
+				}
+			}
+
+			/* Power down slot */
+			if (present == 0) {
+				if (host->pdata->setpower)
+					host->pdata->setpower(slot->id, 0);
+				clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+				/*
+				 * Clear down the FIFO - doing so generates a
+				 * block interrupt, hence setting the
+				 * scatter-gather pointer to NULL.
+				 */
+				host->sg = NULL;
+
+				ctrl = mci_readl(host, CTRL);
+				ctrl |= SDMMC_CTRL_FIFO_RESET;
+				mci_writel(host, CTRL, ctrl);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+				ctrl = mci_readl(host, BMOD);
+				ctrl |= 0x01; /* Software reset of DMA */
+				mci_writel(host, BMOD, ctrl);
+#endif
+
+			}
+
+			spin_unlock(&host->lock);
+			present = dw_mci_get_cd(mmc);
+		}
+
+		mmc_detect_change(slot->mmc,
+			msecs_to_jiffies(host->pdata->detect_delay_ms));
+	}
+}
+
+static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+{
+	struct mmc_host *mmc;
+	struct dw_mci_slot *slot;
+
+	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
+	if (!mmc)
+		return -ENOMEM;
+
+	slot = mmc_priv(mmc);
+	slot->id = id;
+	slot->mmc = mmc;
+	slot->host = host;
+
+	mmc->ops = &dw_mci_ops;
+	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
+	mmc->f_max = host->bus_hz;
+
+	if (host->pdata->get_ocr)
+		mmc->ocr_avail = host->pdata->get_ocr(id);
+	else
+		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+	/*
+	 * Start with slot power disabled, it will be enabled when a card
+	 * is detected.
+	 */
+	if (host->pdata->setpower)
+		host->pdata->setpower(id, 0);
+
+	mmc->caps = 0;
+	if (host->pdata->get_bus_wd)
+		if (host->pdata->get_bus_wd(slot->id) >= 4)
+			mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+	if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
+		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+#ifdef CONFIG_MMC_DW_IDMAC
+	mmc->max_segs = host->ring_size;
+	mmc->max_blk_size = 65536;
+	mmc->max_blk_count = host->ring_size;
+	mmc->max_seg_size = 0x1000;
+	mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
+#else
+	if (host->pdata->blk_settings) {
+		mmc->max_segs = host->pdata->blk_settings->max_segs;
+		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
+		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
+		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
+		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
+	} else {
+		/* Useful defaults if platform data is unset. */
+		mmc->max_segs = 64;
+		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+		mmc->max_blk_count = 512;
+		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+		mmc->max_seg_size = mmc->max_req_size;
+	}
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+	if (dw_mci_get_cd(mmc))
+		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+	else
+		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+	host->slot[id] = slot;
+	mmc_add_host(mmc);
+
+#if defined(CONFIG_DEBUG_FS)
+	dw_mci_init_debugfs(slot);
+#endif
+
+	/* Card initially undetected */
+	slot->last_detect_state = 0;
+
+	return 0;
+}
+
+static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
+{
+	/* Shutdown detect IRQ */
+	if (slot->host->pdata->exit)
+		slot->host->pdata->exit(id);
+
+	/* Debugfs stuff is cleaned up by mmc core */
+	mmc_remove_host(slot->mmc);
+	slot->host->slot[id] = NULL;
+	mmc_free_host(slot->mmc);
+}
+
+static void dw_mci_init_dma(struct dw_mci *host)
+{
+	/* Alloc memory for sg translation */
+	host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
+					  &host->sg_dma, GFP_KERNEL);
+	if (!host->sg_cpu) {
+		dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
+			__func__);
+		goto no_dma;
+	}
+
+	/* Determine which DMA interface to use */
+#ifdef CONFIG_MMC_DW_IDMAC
+	host->dma_ops = &dw_mci_idmac_ops;
+	dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
+#endif
+
+	if (!host->dma_ops)
+		goto no_dma;
+
+	if (host->dma_ops->init) {
+		if (host->dma_ops->init(host)) {
+			dev_err(&host->pdev->dev, "%s: Unable to initialize "
+				"DMA Controller.\n", __func__);
+			goto no_dma;
+		}
+	} else {
+		dev_err(&host->pdev->dev, "DMA initialization not found.\n");
+		goto no_dma;
+	}
+
+	host->use_dma = 1;
+	return;
+
+no_dma:
+	dev_info(&host->pdev->dev, "Using PIO mode.\n");
+	host->use_dma = 0;
+	return;
+}
+
+static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(500);
+	unsigned int ctrl;
+
+	mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+				SDMMC_CTRL_DMA_RESET));
+
+	/* wait till resets clear */
+	do {
+		ctrl = mci_readl(host, CTRL);
+		if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+			      SDMMC_CTRL_DMA_RESET)))
+			return true;
+	} while (time_before(jiffies, timeout));
+
+	dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+
+	return false;
+}
+
+static int dw_mci_probe(struct platform_device *pdev)
+{
+	struct dw_mci *host;
+	struct resource	*regs;
+	struct dw_mci_board *pdata;
+	int irq, ret, i, width;
+	u32 fifo_size;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENXIO;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	host->pdev = pdev;
+	host->pdata = pdata = pdev->dev.platform_data;
+	if (!pdata || !pdata->init) {
+		dev_err(&pdev->dev,
+			"Platform data must supply init function\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	if (!pdata->select_slot && pdata->num_slots > 1) {
+		dev_err(&pdev->dev,
+			"Platform data must supply select_slot function\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	if (!pdata->bus_hz) {
+		dev_err(&pdev->dev,
+			"Platform data must supply bus speed\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	host->bus_hz = pdata->bus_hz;
+	host->quirks = pdata->quirks;
+
+	spin_lock_init(&host->lock);
+	INIT_LIST_HEAD(&host->queue);
+
+	ret = -ENOMEM;
+	host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!host->regs)
+		goto err_freehost;
+
+	host->dma_ops = pdata->dma_ops;
+	dw_mci_init_dma(host);
+
+	/*
+	 * Get the host data width - this assumes that HCON has been set with
+	 * the correct values.
+	 */
+	i = (mci_readl(host, HCON) >> 7) & 0x7;
+	if (!i) {
+		host->push_data = dw_mci_push_data16;
+		host->pull_data = dw_mci_pull_data16;
+		width = 16;
+		host->data_shift = 1;
+	} else if (i == 2) {
+		host->push_data = dw_mci_push_data64;
+		host->pull_data = dw_mci_pull_data64;
+		width = 64;
+		host->data_shift = 3;
+	} else {
+		/* Check for a reserved value, and warn if it is */
+		WARN((i != 1),
+		     "HCON reports a reserved host data width!\n"
+		     "Defaulting to 32-bit access.\n");
+		host->push_data = dw_mci_push_data32;
+		host->pull_data = dw_mci_pull_data32;
+		width = 32;
+		host->data_shift = 2;
+	}
+
+	/* Reset all blocks */
+	if (!mci_wait_reset(&pdev->dev, host)) {
+		ret = -ENODEV;
+		goto err_dmaunmap;
+	}
+
+	/* Clear the interrupts for the host controller */
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+	/* Put in max timeout */
+	mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+	/*
+	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
+	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
+	 */
+	fifo_size = mci_readl(host, FIFOTH);
+	fifo_size = (fifo_size >> 16) & 0x7ff;
+	mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
+				  ((fifo_size/2) << 0)));
+
+	/* disable clock to CIU */
+	mci_writel(host, CLKENA, 0);
+	mci_writel(host, CLKSRC, 0);
+
+	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+	tasklet_init(&host->card_tasklet,
+		     dw_mci_tasklet_card, (unsigned long)host);
+
+	ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
+	if (ret)
+		goto err_dmaunmap;
+
+	platform_set_drvdata(pdev, host);
+
+	if (host->pdata->num_slots)
+		host->num_slots = host->pdata->num_slots;
+	else
+		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+
+	/* We need at least one slot to succeed */
+	for (i = 0; i < host->num_slots; i++) {
+		ret = dw_mci_init_slot(host, i);
+		if (ret) {
+			ret = -ENODEV;
+			goto err_init_slot;
+		}
+	}
+
+	/*
+	 * Enable interrupts for command done, data over, data empty, card det,
+	 * receive ready and error such as transmit, receive timeout, crc error
+	 */
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+	dev_info(&pdev->dev, "DW MMC controller at irq %d, "
+		 "%d bit host data width\n", irq, width);
+	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
+		dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
+
+	return 0;
+
+err_init_slot:
+	/* De-init any initialized slots */
+	while (i > 0) {
+		if (host->slot[i])
+			dw_mci_cleanup_slot(host->slot[i], i);
+		i--;
+	}
+	free_irq(irq, host);
+
+err_dmaunmap:
+	if (host->use_dma && host->dma_ops->exit)
+		host->dma_ops->exit(host);
+	dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
+			  host->sg_cpu, host->sg_dma);
+	iounmap(host->regs);
+
+err_freehost:
+	kfree(host);
+	return ret;
+}
+
+static int __exit dw_mci_remove(struct platform_device *pdev)
+{
+	struct dw_mci *host = platform_get_drvdata(pdev);
+	int i;
+
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+	platform_set_drvdata(pdev, NULL);
+
+	for (i = 0; i < host->num_slots; i++) {
+		dev_dbg(&pdev->dev, "remove slot %d\n", i);
+		if (host->slot[i])
+			dw_mci_cleanup_slot(host->slot[i], i);
+	}
+
+	/* disable clock to CIU */
+	mci_writel(host, CLKENA, 0);
+	mci_writel(host, CLKSRC, 0);
+
+	free_irq(platform_get_irq(pdev, 0), host);
+	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+	if (host->use_dma && host->dma_ops->exit)
+		host->dma_ops->exit(host);
+
+	iounmap(host->regs);
+
+	kfree(host);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	int i, ret;
+	struct dw_mci *host = platform_get_drvdata(pdev);
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		if (!slot)
+			continue;
+		ret = mmc_suspend_host(slot->mmc);
+		if (ret < 0) {
+			while (--i >= 0) {
+				slot = host->slot[i];
+				if (slot)
+					mmc_resume_host(host->slot[i]->mmc);
+			}
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int dw_mci_resume(struct platform_device *pdev)
+{
+	int i, ret;
+	struct dw_mci *host = platform_get_drvdata(pdev);
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		if (!slot)
+			continue;
+		ret = mmc_resume_host(host->slot[i]->mmc);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+#else
+#define dw_mci_suspend	NULL
+#define dw_mci_resume	NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver dw_mci_driver = {
+	.remove		= __exit_p(dw_mci_remove),
+	.suspend	= dw_mci_suspend,
+	.resume		= dw_mci_resume,
+	.driver		= {
+		.name		= "dw_mmc",
+	},
+};
+
+static int __init dw_mci_init(void)
+{
+	return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
+}
+
+static void __exit dw_mci_exit(void)
+{
+	platform_driver_unregister(&dw_mci_driver);
+}
+
+module_init(dw_mci_init);
+module_exit(dw_mci_exit);
+
+MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 0000000..5dd55a7
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_H_
+#define _DW_MMC_H_
+
+#define SDMMC_CTRL		0x000
+#define SDMMC_PWREN		0x004
+#define SDMMC_CLKDIV		0x008
+#define SDMMC_CLKSRC		0x00c
+#define SDMMC_CLKENA		0x010
+#define SDMMC_TMOUT		0x014
+#define SDMMC_CTYPE		0x018
+#define SDMMC_BLKSIZ		0x01c
+#define SDMMC_BYTCNT		0x020
+#define SDMMC_INTMASK		0x024
+#define SDMMC_CMDARG		0x028
+#define SDMMC_CMD		0x02c
+#define SDMMC_RESP0		0x030
+#define SDMMC_RESP1		0x034
+#define SDMMC_RESP2		0x038
+#define SDMMC_RESP3		0x03c
+#define SDMMC_MINTSTS		0x040
+#define SDMMC_RINTSTS		0x044
+#define SDMMC_STATUS		0x048
+#define SDMMC_FIFOTH		0x04c
+#define SDMMC_CDETECT		0x050
+#define SDMMC_WRTPRT		0x054
+#define SDMMC_GPIO		0x058
+#define SDMMC_TCBCNT		0x05c
+#define SDMMC_TBBCNT		0x060
+#define SDMMC_DEBNCE		0x064
+#define SDMMC_USRID		0x068
+#define SDMMC_VERID		0x06c
+#define SDMMC_HCON		0x070
+#define SDMMC_BMOD		0x080
+#define SDMMC_PLDMND		0x084
+#define SDMMC_DBADDR		0x088
+#define SDMMC_IDSTS		0x08c
+#define SDMMC_IDINTEN		0x090
+#define SDMMC_DSCADDR		0x094
+#define SDMMC_BUFADDR		0x098
+#define SDMMC_DATA		0x100
+#define SDMMC_DATA_ADR		0x100
+
+/* shift bit field */
+#define _SBF(f, v)		((v) << (f))
+
+/* Control register defines */
+#define SDMMC_CTRL_USE_IDMAC		BIT(25)
+#define SDMMC_CTRL_CEATA_INT_EN		BIT(11)
+#define SDMMC_CTRL_SEND_AS_CCSD		BIT(10)
+#define SDMMC_CTRL_SEND_CCSD		BIT(9)
+#define SDMMC_CTRL_ABRT_READ_DATA	BIT(8)
+#define SDMMC_CTRL_SEND_IRQ_RESP	BIT(7)
+#define SDMMC_CTRL_READ_WAIT		BIT(6)
+#define SDMMC_CTRL_DMA_ENABLE		BIT(5)
+#define SDMMC_CTRL_INT_ENABLE		BIT(4)
+#define SDMMC_CTRL_DMA_RESET		BIT(2)
+#define SDMMC_CTRL_FIFO_RESET		BIT(1)
+#define SDMMC_CTRL_RESET		BIT(0)
+/* Clock Enable register defines */
+#define SDMMC_CLKEN_LOW_PWR		BIT(16)
+#define SDMMC_CLKEN_ENABLE		BIT(0)
+/* time-out register defines */
+#define SDMMC_TMOUT_DATA(n)		_SBF(8, (n))
+#define SDMMC_TMOUT_DATA_MSK		0xFFFFFF00
+#define SDMMC_TMOUT_RESP(n)		((n) & 0xFF)
+#define SDMMC_TMOUT_RESP_MSK		0xFF
+/* card-type register defines */
+#define SDMMC_CTYPE_8BIT		BIT(16)
+#define SDMMC_CTYPE_4BIT		BIT(0)
+#define SDMMC_CTYPE_1BIT		0
+/* Interrupt status & mask register defines */
+#define SDMMC_INT_SDIO			BIT(16)
+#define SDMMC_INT_EBE			BIT(15)
+#define SDMMC_INT_ACD			BIT(14)
+#define SDMMC_INT_SBE			BIT(13)
+#define SDMMC_INT_HLE			BIT(12)
+#define SDMMC_INT_FRUN			BIT(11)
+#define SDMMC_INT_HTO			BIT(10)
+#define SDMMC_INT_DTO			BIT(9)
+#define SDMMC_INT_RTO			BIT(8)
+#define SDMMC_INT_DCRC			BIT(7)
+#define SDMMC_INT_RCRC			BIT(6)
+#define SDMMC_INT_RXDR			BIT(5)
+#define SDMMC_INT_TXDR			BIT(4)
+#define SDMMC_INT_DATA_OVER		BIT(3)
+#define SDMMC_INT_CMD_DONE		BIT(2)
+#define SDMMC_INT_RESP_ERR		BIT(1)
+#define SDMMC_INT_CD			BIT(0)
+#define SDMMC_INT_ERROR			0xbfc2
+/* Command register defines */
+#define SDMMC_CMD_START			BIT(31)
+#define SDMMC_CMD_CCS_EXP		BIT(23)
+#define SDMMC_CMD_CEATA_RD		BIT(22)
+#define SDMMC_CMD_UPD_CLK		BIT(21)
+#define SDMMC_CMD_INIT			BIT(15)
+#define SDMMC_CMD_STOP			BIT(14)
+#define SDMMC_CMD_PRV_DAT_WAIT		BIT(13)
+#define SDMMC_CMD_SEND_STOP		BIT(12)
+#define SDMMC_CMD_STRM_MODE		BIT(11)
+#define SDMMC_CMD_DAT_WR		BIT(10)
+#define SDMMC_CMD_DAT_EXP		BIT(9)
+#define SDMMC_CMD_RESP_CRC		BIT(8)
+#define SDMMC_CMD_RESP_LONG		BIT(7)
+#define SDMMC_CMD_RESP_EXP		BIT(6)
+#define SDMMC_CMD_INDX(n)		((n) & 0x1F)
+/* Status register defines */
+#define SDMMC_GET_FCNT(x)		(((x)>>17) & 0x1FF)
+#define SDMMC_FIFO_SZ			32
+/* Internal DMAC interrupt defines */
+#define SDMMC_IDMAC_INT_AI		BIT(9)
+#define SDMMC_IDMAC_INT_NI		BIT(8)
+#define SDMMC_IDMAC_INT_CES		BIT(5)
+#define SDMMC_IDMAC_INT_DU		BIT(4)
+#define SDMMC_IDMAC_INT_FBE		BIT(2)
+#define SDMMC_IDMAC_INT_RI		BIT(1)
+#define SDMMC_IDMAC_INT_TI		BIT(0)
+/* Internal DMAC bus mode bits */
+#define SDMMC_IDMAC_ENABLE		BIT(7)
+#define SDMMC_IDMAC_FB			BIT(1)
+#define SDMMC_IDMAC_SWRESET		BIT(0)
+
+/* Register access macros */
+#define mci_readl(dev, reg)			\
+	__raw_readl(dev->regs + SDMMC_##reg)
+#define mci_writel(dev, reg, value)			\
+	__raw_writel((value), dev->regs + SDMMC_##reg)
+
+/* 16-bit FIFO access macros */
+#define mci_readw(dev, reg)			\
+	__raw_readw(dev->regs + SDMMC_##reg)
+#define mci_writew(dev, reg, value)			\
+	__raw_writew((value), dev->regs + SDMMC_##reg)
+
+/* 64-bit FIFO access macros */
+#ifdef readq
+#define mci_readq(dev, reg)			\
+	__raw_readq(dev->regs + SDMMC_##reg)
+#define mci_writeq(dev, reg, value)			\
+	__raw_writeq((value), dev->regs + SDMMC_##reg)
+#else
+/*
+ * Dummy readq implementation for architectures that don't define it.
+ *
+ * We would assume that none of these architectures would configure
+ * the IP block with a 64bit FIFO width, so this code will never be
+ * executed on those machines. Defining these macros here keeps the
+ * rest of the code free from ifdefs.
+ */
+#define mci_readq(dev, reg)			\
+	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+#define mci_writeq(dev, reg, value)			\
+	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+#endif
+
+#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb..4428594 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,6 +31,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/dma.h>
 #include <asm/irq.h>
@@ -141,10 +142,49 @@
 
 	struct work_struct	datawork;
 	spinlock_t		lock;
+
+	struct regulator	*vcc;
 };
 
 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 
+static inline void mxcmci_init_ocr(struct mxcmci_host *host)
+{
+	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+
+	if (IS_ERR(host->vcc)) {
+		host->vcc = NULL;
+	} else {
+		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
+		if (host->pdata && host->pdata->ocr_avail)
+			dev_warn(mmc_dev(host->mmc),
+				"pdata->ocr_avail will not be used\n");
+	}
+
+	if (host->vcc == NULL) {
+		/* fall-back to platform data */
+		if (host->pdata && host->pdata->ocr_avail)
+			host->mmc->ocr_avail = host->pdata->ocr_avail;
+		else
+			host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+	}
+}
+
+static inline void mxcmci_set_power(struct mxcmci_host *host,
+				    unsigned char power_mode,
+				    unsigned int vdd)
+{
+	if (host->vcc) {
+		if (power_mode == MMC_POWER_UP)
+			mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+		else if (power_mode == MMC_POWER_OFF)
+			mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+	}
+
+	if (host->pdata && host->pdata->setpower)
+		host->pdata->setpower(mmc_dev(host->mmc), vdd);
+}
+
 static inline int mxcmci_use_dma(struct mxcmci_host *host)
 {
 	return host->do_dma;
@@ -680,9 +720,9 @@
 		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 
 	if (host->power_mode != ios->power_mode) {
-		if (host->pdata && host->pdata->setpower)
-			host->pdata->setpower(mmc_dev(mmc), ios->vdd);
+		mxcmci_set_power(host, ios->power_mode, ios->vdd);
 		host->power_mode = ios->power_mode;
+
 		if (ios->power_mode == MMC_POWER_ON)
 			host->cmdat |= CMD_DAT_CONT_INIT;
 	}
@@ -807,10 +847,7 @@
 	host->pdata = pdev->dev.platform_data;
 	spin_lock_init(&host->lock);
 
-	if (host->pdata && host->pdata->ocr_avail)
-		mmc->ocr_avail = host->pdata->ocr_avail;
-	else
-		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+	mxcmci_init_ocr(host);
 
 	if (host->pdata && host->pdata->dat3_card_detect)
 		host->default_irq_mask =
@@ -915,6 +952,9 @@
 
 	mmc_remove_host(mmc);
 
+	if (host->vcc)
+		regulator_put(host->vcc);
+
 	if (host->pdata && host->pdata->exit)
 		host->pdata->exit(&pdev->dev, mmc);
 
@@ -927,7 +967,6 @@
 	clk_put(host->clk);
 
 	release_mem_region(host->res->start, resource_size(host->res));
-	release_resource(host->res);
 
 	mmc_free_host(mmc);
 
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 0c7e37f..379d2ff 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -173,6 +173,8 @@
 	struct omap_mmc_platform_data *pdata;
 };
 
+static struct workqueue_struct *mmc_omap_wq;
+
 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
 {
 	unsigned long tick_ns;
@@ -289,7 +291,7 @@
 		host->next_slot = new_slot;
 		host->mmc = new_slot->mmc;
 		spin_unlock_irqrestore(&host->slot_lock, flags);
-		schedule_work(&host->slot_release_work);
+		queue_work(mmc_omap_wq, &host->slot_release_work);
 		return;
 	}
 
@@ -457,7 +459,7 @@
 	}
 
 	host->stop_data = data;
-	schedule_work(&host->send_stop_work);
+	queue_work(mmc_omap_wq, &host->send_stop_work);
 }
 
 static void
@@ -637,7 +639,7 @@
 		OMAP_MMC_WRITE(host, IE, 0);
 		disable_irq(host->irq);
 		host->abort = 1;
-		schedule_work(&host->cmd_abort_work);
+		queue_work(mmc_omap_wq, &host->cmd_abort_work);
 	}
 	spin_unlock_irqrestore(&host->slot_lock, flags);
 }
@@ -826,7 +828,7 @@
 		host->abort = 1;
 		OMAP_MMC_WRITE(host, IE, 0);
 		disable_irq_nosync(host->irq);
-		schedule_work(&host->cmd_abort_work);
+		queue_work(mmc_omap_wq, &host->cmd_abort_work);
 		return IRQ_HANDLED;
 	}
 
@@ -1387,7 +1389,7 @@
 
 	tasklet_kill(&slot->cover_tasklet);
 	del_timer_sync(&slot->cover_timer);
-	flush_scheduled_work();
+	flush_workqueue(mmc_omap_wq);
 
 	mmc_remove_host(mmc);
 	mmc_free_host(mmc);
@@ -1608,12 +1610,22 @@
 
 static int __init mmc_omap_init(void)
 {
-	return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
+	int ret;
+
+	mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+	if (!mmc_omap_wq)
+		return -ENOMEM;
+
+	ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
+	if (ret)
+		destroy_workqueue(mmc_omap_wq);
+	return ret;
 }
 
 static void __exit mmc_omap_exit(void)
 {
 	platform_driver_unregister(&mmc_omap_driver);
+	destroy_workqueue(mmc_omap_wq);
 }
 
 module_init(mmc_omap_init);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5d46021..078fdf1 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2290,7 +2290,7 @@
 		free_irq(host->irq, host);
 		if (mmc_slot(host).card_detect_irq)
 			free_irq(mmc_slot(host).card_detect_irq, host);
-		flush_scheduled_work();
+		flush_work_sync(&host->mmc_carddetect_work);
 
 		mmc_host_disable(host->mmc);
 		clk_disable(host->iclk);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 0000000..2aeef4f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
+/*
+ * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
+ *
+ * Author: Saeed Bishara <saeed@marvell.com>
+ *	   Mike Rapoport <mike@compulab.co.il>
+ * Based on sdhci-cns3xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
+{
+	u16 ret;
+
+	switch (reg) {
+	case SDHCI_HOST_VERSION:
+	case SDHCI_SLOT_INT_STATUS:
+		/* those registers don't exist */
+		return 0;
+	default:
+		ret = readw(host->ioaddr + reg);
+	}
+	return ret;
+}
+
+static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
+{
+	u32 ret;
+
+	switch (reg) {
+	case SDHCI_CAPABILITIES:
+		ret = readl(host->ioaddr + reg);
+		/* Mask the support for 3.0V */
+		ret &= ~SDHCI_CAN_VDD_300;
+		break;
+	default:
+		ret = readl(host->ioaddr + reg);
+	}
+	return ret;
+}
+
+static struct sdhci_ops sdhci_dove_ops = {
+	.read_w	= sdhci_dove_readw,
+	.read_l	= sdhci_dove_readl,
+};
+
+struct sdhci_pltfm_data sdhci_dove_pdata = {
+	.ops	= &sdhci_dove_ops,
+	.quirks	= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+		  SDHCI_QUIRK_NO_BUSY_IRQ |
+		  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+		  SDHCI_QUIRK_FORCE_DMA,
+};
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index c51b711..dd84124f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -13,6 +13,7 @@
  * your option) any later version.
  */
 
+#include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -20,8 +21,12 @@
 #include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/mmc/host.h>
+#ifdef CONFIG_PPC
 #include <asm/machdep.h>
+#endif
 #include "sdhci-of.h"
 #include "sdhci.h"
 
@@ -112,7 +117,11 @@
 		return true;
 
 	/* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
 	return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+	return false;
+#endif
 }
 
 static int __devinit sdhci_of_probe(struct platform_device *ofdev,
@@ -122,7 +131,7 @@
 	struct sdhci_of_data *sdhci_of_data = match->data;
 	struct sdhci_host *host;
 	struct sdhci_of_host *of_host;
-	const u32 *clk;
+	const __be32 *clk;
 	int size;
 	int ret;
 
@@ -166,7 +175,7 @@
 
 	clk = of_get_property(np, "clock-frequency", &size);
 	if (clk && size == sizeof(*clk) && *clk)
-		of_host->clock = *clk;
+		of_host->clock = be32_to_cpup(clk);
 
 	ret = sdhci_add_host(host);
 	if (ret)
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c246..0dc905b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@
 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 };
 
+/* O2Micro extra registers */
+#define O2_SD_LOCK_WP		0xD3
+#define O2_SD_MULTI_VCC3V	0xEE
+#define O2_SD_CLKREQ		0xEC
+#define O2_SD_CAPS		0xE0
+#define O2_SD_ADMA1		0xE2
+#define O2_SD_ADMA2		0xE7
+#define O2_SD_INF_MOD		0xF1
+
+static int o2_probe(struct sdhci_pci_chip *chip)
+{
+	int ret;
+	u8 scratch;
+
+	switch (chip->pdev->device) {
+	case PCI_DEVICE_ID_O2_8220:
+	case PCI_DEVICE_ID_O2_8221:
+	case PCI_DEVICE_ID_O2_8320:
+	case PCI_DEVICE_ID_O2_8321:
+		/* This extra setup is required due to broken ADMA. */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+		if (ret)
+			return ret;
+		scratch &= 0x7f;
+		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+		/* Set Multi 3 to VCC3V# */
+		pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+		/* Disable CLK_REQ# support after media DET */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x20;
+		pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+		/* Choose capabilities, enable SDMA.  We have to write 0x01
+		 * to the capabilities register first to unlock it.
+		 */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x01;
+		pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+		pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+		/* Disable ADMA1/2 */
+		pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+		pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+		/* Disable the infinite transfer mode */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x08;
+		pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+		/* Lock WP */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x80;
+		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+	}
+
+	return 0;
+}
+
 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
 {
 	u8 scratch;
@@ -204,6 +272,7 @@
 static int jmicron_probe(struct sdhci_pci_chip *chip)
 {
 	int ret;
+	u16 mmcdev = 0;
 
 	if (chip->pdev->revision == 0) {
 		chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@
 	 * 2. The MMC interface has a lower subfunction number
 	 *    than the SD interface.
 	 */
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
+		mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
+	else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
+		mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
+
+	if (mmcdev) {
 		struct pci_dev *sd_dev;
 
 		sd_dev = NULL;
 		while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
-			PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) {
+						mmcdev, sd_dev)) != NULL) {
 			if ((PCI_SLOT(chip->pdev->devfn) ==
 				PCI_SLOT(sd_dev->devfn)) &&
 				(chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@
 			slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 	}
 
+	/* JM388 MMC doesn't support 1.8V while SD supports it */
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+		slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
+			MMC_VDD_29_30 | MMC_VDD_30_31 |
+			MMC_VDD_165_195; /* allow 1.8V */
+		slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
+			MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
+	}
+
 	/*
 	 * The secondary interface requires a bit set to get the
 	 * interrupts.
 	 */
-	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
 		jmicron_enable_mmc(slot->host, 1);
 
+	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+
 	return 0;
 }
 
@@ -305,7 +391,8 @@
 	if (dead)
 		return;
 
-	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
 		jmicron_enable_mmc(slot->host, 0);
 }
 
@@ -313,7 +400,8 @@
 {
 	int i;
 
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
 		for (i = 0;i < chip->num_slots;i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 0);
 	}
@@ -325,7 +413,8 @@
 {
 	int ret, i;
 
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
 		for (i = 0;i < chip->num_slots;i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 1);
 	}
@@ -339,6 +428,10 @@
 	return 0;
 }
 
+static const struct sdhci_pci_fixes sdhci_o2 = {
+	.probe		= o2_probe,
+};
+
 static const struct sdhci_pci_fixes sdhci_jmicron = {
 	.probe		= jmicron_probe,
 
@@ -510,6 +603,22 @@
 	},
 
 	{
+		.vendor		= PCI_VENDOR_ID_JMICRON,
+		.device		= PCI_DEVICE_ID_JMICRON_JMB388_SD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_JMICRON,
+		.device		= PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
+	},
+
+	{
 		.vendor		= PCI_VENDOR_ID_SYSKONNECT,
 		.device		= 0x8000,
 		.subvendor	= PCI_ANY_ID,
@@ -589,6 +698,46 @@
 		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
 	},
 
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8120,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8220,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8221,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8320,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8321,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
 	{	/* Generic SD host controller */
 		PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
 	},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89..dbab040 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@
 #ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
 	{ "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
 #endif
+#ifdef CONFIG_MMC_SDHCI_DOVE
+	{ "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
+#endif
+#ifdef CONFIG_MMC_SDHCI_TEGRA
+	{ "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
+#endif
 	{ },
 };
 MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48..ea2e44d 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -22,5 +22,7 @@
 
 extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
 extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
+extern struct sdhci_pltfm_data sdhci_dove_pdata;
+extern struct sdhci_pltfm_data sdhci_tegra_pdata;
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862..1720358 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@
 	if (!clksrc)
 		return UINT_MAX;
 
+	/*
+	 * Clock divider's step is different as 1 from that of host controller
+	 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
+	 */
+	if (ourhost->pdata->clk_type) {
+		rate = clk_round_rate(clksrc, wanted);
+		return wanted - rate;
+	}
+
 	rate = clk_get_rate(clksrc);
 
 	for (div = 1; div < 256; div *= 2) {
@@ -232,6 +241,42 @@
 	return min;
 }
 
+/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
+static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
+}
+
+/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
+static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	/*
+	 * initial clock can be in the frequency range of
+	 * 100KHz-400KHz, so we set it as max value.
+	 */
+	return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
+}
+
+/* sdhci_cmu_set_clock - callback on clock change.*/
+static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	/* don't bother if the clock is going off */
+	if (clock == 0)
+		return;
+
+	sdhci_s3c_set_clock(host, clock);
+
+	clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+
+	host->clock = clock;
+}
+
 static struct sdhci_ops sdhci_s3c_ops = {
 	.get_max_clock		= sdhci_s3c_get_max_clk,
 	.set_clock		= sdhci_s3c_set_clock,
@@ -361,6 +406,13 @@
 
 		clks++;
 		sc->clk_bus[ptr] = clk;
+
+		/*
+		 * save current clock index to know which clock bus
+		 * is used later in overriding functions.
+		 */
+		sc->cur_clk = ptr;
+
 		clk_enable(clk);
 
 		dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -427,6 +479,20 @@
 	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
 	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
 
+	/*
+	 * If controller does not have internal clock divider,
+	 * we can use overriding functions instead of default.
+	 */
+	if (pdata->clk_type) {
+		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+	}
+
+	/* It supports additional host capabilities if needed */
+	if (pdata->host_caps)
+		host->mmc->caps |= pdata->host_caps;
+
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 0000000..4823ee9
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <mach/gpio.h>
+#include <mach/sdhci.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
+{
+	u32 val;
+
+	if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+		/* Use wp_gpio here instead? */
+		val = readl(host->ioaddr + reg);
+		return val | SDHCI_WRITE_PROTECT;
+	}
+
+	return readl(host->ioaddr + reg);
+}
+
+static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+{
+	if (unlikely(reg == SDHCI_HOST_VERSION)) {
+		/* Erratum: Version register is invalid in HW. */
+		return SDHCI_SPEC_200;
+	}
+
+	return readw(host->ioaddr + reg);
+}
+
+static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+	/* Seems like we're getting spurious timeout and crc errors, so
+	 * disable signalling of them. In case of real errors software
+	 * timers should take care of eventually detecting them.
+	 */
+	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
+		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
+
+	writel(val, host->ioaddr + reg);
+
+	if (unlikely(reg == SDHCI_INT_ENABLE)) {
+		/* Erratum: Must enable block gap interrupt detection */
+		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+		if (val & SDHCI_INT_CARD_INT)
+			gap_ctrl |= 0x8;
+		else
+			gap_ctrl &= ~0x8;
+		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+	}
+}
+
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
+{
+	struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
+	struct tegra_sdhci_platform_data *plat;
+
+	plat = pdev->dev.platform_data;
+
+	if (!gpio_is_valid(plat->wp_gpio))
+		return -1;
+
+	return gpio_get_value(plat->wp_gpio);
+}
+
+static irqreturn_t carddetect_irq(int irq, void *data)
+{
+	struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+	tasklet_schedule(&sdhost->card_tasklet);
+	return IRQ_HANDLED;
+};
+
+static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
+{
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+	u32 ctrl;
+
+	plat = pdev->dev.platform_data;
+
+	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+	if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+		ctrl &= ~SDHCI_CTRL_4BITBUS;
+		ctrl |= SDHCI_CTRL_8BITBUS;
+	} else {
+		ctrl &= ~SDHCI_CTRL_8BITBUS;
+		if (bus_width == MMC_BUS_WIDTH_4)
+			ctrl |= SDHCI_CTRL_4BITBUS;
+		else
+			ctrl &= ~SDHCI_CTRL_4BITBUS;
+	}
+	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+	return 0;
+}
+
+
+static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
+				  struct sdhci_pltfm_data *pdata)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+	struct clk *clk;
+	int rc;
+
+	plat = pdev->dev.platform_data;
+	if (plat == NULL) {
+		dev_err(mmc_dev(host->mmc), "missing platform data\n");
+		return -ENXIO;
+	}
+
+	if (gpio_is_valid(plat->power_gpio)) {
+		rc = gpio_request(plat->power_gpio, "sdhci_power");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate power gpio\n");
+			goto out;
+		}
+		tegra_gpio_enable(plat->power_gpio);
+		gpio_direction_output(plat->power_gpio, 1);
+	}
+
+	if (gpio_is_valid(plat->cd_gpio)) {
+		rc = gpio_request(plat->cd_gpio, "sdhci_cd");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate cd gpio\n");
+			goto out_power;
+		}
+		tegra_gpio_enable(plat->cd_gpio);
+		gpio_direction_input(plat->cd_gpio);
+
+		rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+				 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+				 mmc_hostname(host->mmc), host);
+
+		if (rc)	{
+			dev_err(mmc_dev(host->mmc), "request irq error\n");
+			goto out_cd;
+		}
+
+	}
+
+	if (gpio_is_valid(plat->wp_gpio)) {
+		rc = gpio_request(plat->wp_gpio, "sdhci_wp");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate wp gpio\n");
+			goto out_cd;
+		}
+		tegra_gpio_enable(plat->wp_gpio);
+		gpio_direction_input(plat->wp_gpio);
+	}
+
+	clk = clk_get(mmc_dev(host->mmc), NULL);
+	if (IS_ERR(clk)) {
+		dev_err(mmc_dev(host->mmc), "clk err\n");
+		rc = PTR_ERR(clk);
+		goto out_wp;
+	}
+	clk_enable(clk);
+	pltfm_host->clk = clk;
+
+	if (plat->is_8bit)
+		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+	return 0;
+
+out_wp:
+	if (gpio_is_valid(plat->wp_gpio)) {
+		tegra_gpio_disable(plat->wp_gpio);
+		gpio_free(plat->wp_gpio);
+	}
+
+out_cd:
+	if (gpio_is_valid(plat->cd_gpio)) {
+		tegra_gpio_disable(plat->cd_gpio);
+		gpio_free(plat->cd_gpio);
+	}
+
+out_power:
+	if (gpio_is_valid(plat->power_gpio)) {
+		tegra_gpio_disable(plat->power_gpio);
+		gpio_free(plat->power_gpio);
+	}
+
+out:
+	return rc;
+}
+
+static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+
+	plat = pdev->dev.platform_data;
+
+	if (gpio_is_valid(plat->wp_gpio)) {
+		tegra_gpio_disable(plat->wp_gpio);
+		gpio_free(plat->wp_gpio);
+	}
+
+	if (gpio_is_valid(plat->cd_gpio)) {
+		tegra_gpio_disable(plat->cd_gpio);
+		gpio_free(plat->cd_gpio);
+	}
+
+	if (gpio_is_valid(plat->power_gpio)) {
+		tegra_gpio_disable(plat->power_gpio);
+		gpio_free(plat->power_gpio);
+	}
+
+	clk_disable(pltfm_host->clk);
+	clk_put(pltfm_host->clk);
+}
+
+static struct sdhci_ops tegra_sdhci_ops = {
+	.get_ro     = tegra_sdhci_get_ro,
+	.read_l     = tegra_sdhci_readl,
+	.read_w     = tegra_sdhci_readw,
+	.write_l    = tegra_sdhci_writel,
+	.platform_8bit_width = tegra_sdhci_8bit,
+};
+
+struct sdhci_pltfm_data sdhci_tegra_pdata = {
+	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
+		  SDHCI_QUIRK_NO_HISPD_BIT |
+		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+	.ops  = &tegra_sdhci_ops,
+	.init = tegra_sdhci_pltfm_init,
+	.exit = tegra_sdhci_pltfm_exit,
+};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db42..9e15f41 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
 
 #include <linux/leds.h>
 
+#include <linux/mmc/mmc.h>
 #include <linux/mmc/host.h>
 
 #include "sdhci.h"
@@ -77,8 +78,11 @@
 	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
 		sdhci_readw(host, SDHCI_ACMD12_ERR),
 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
-	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
+	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 		sdhci_readl(host, SDHCI_CAPABILITIES),
+		sdhci_readl(host, SDHCI_CAPABILITIES_1));
+	printk(KERN_DEBUG DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
+		sdhci_readw(host, SDHCI_COMMAND),
 		sdhci_readl(host, SDHCI_MAX_CURRENT));
 
 	if (host->flags & SDHCI_USE_ADMA)
@@ -1518,7 +1522,11 @@
 
 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
 		host->data->error = -ETIMEDOUT;
-	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+	else if (intmask & SDHCI_INT_DATA_END_BIT)
+		host->data->error = -EILSEQ;
+	else if ((intmask & SDHCI_INT_DATA_CRC) &&
+		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+			!= MMC_BUS_TEST_R)
 		host->data->error = -EILSEQ;
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
 		printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1744,7 @@
 int sdhci_add_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
-	unsigned int caps;
+	unsigned int caps, ocr_avail;
 	int ret;
 
 	WARN_ON(host == NULL);
@@ -1890,13 +1898,26 @@
 	    mmc_card_is_removable(mmc))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-	mmc->ocr_avail = 0;
+	ocr_avail = 0;
 	if (caps & SDHCI_CAN_VDD_330)
-		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
+		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
 	if (caps & SDHCI_CAN_VDD_300)
-		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
+		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
 	if (caps & SDHCI_CAN_VDD_180)
-		mmc->ocr_avail |= MMC_VDD_165_195;
+		ocr_avail |= MMC_VDD_165_195;
+
+	mmc->ocr_avail = ocr_avail;
+	mmc->ocr_avail_sdio = ocr_avail;
+	if (host->ocr_avail_sdio)
+		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
+	mmc->ocr_avail_sd = ocr_avail;
+	if (host->ocr_avail_sd)
+		mmc->ocr_avail_sd &= host->ocr_avail_sd;
+	else /* normal SD controllers don't support 1.8V */
+		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
+	mmc->ocr_avail_mmc = ocr_avail;
+	if (host->ocr_avail_mmc)
+		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
 
 	if (mmc->ocr_avail == 0) {
 		printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1949,14 @@
 	 * of bytes. When doing hardware scatter/gather, each entry cannot
 	 * be larger than 64 KiB though.
 	 */
-	if (host->flags & SDHCI_USE_ADMA)
-		mmc->max_seg_size = 65536;
-	else
+	if (host->flags & SDHCI_USE_ADMA) {
+		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+			mmc->max_seg_size = 65535;
+		else
+			mmc->max_seg_size = 65536;
+	} else {
 		mmc->max_seg_size = mmc->max_req_size;
+	}
 
 	/*
 	 * Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f0..6e0969e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -52,6 +52,7 @@
 #define  SDHCI_CMD_RESP_SHORT_BUSY 0x03
 
 #define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
+#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
 
 #define SDHCI_RESPONSE		0x10
 
@@ -165,7 +166,7 @@
 #define  SDHCI_CAN_VDD_180	0x04000000
 #define  SDHCI_CAN_64BIT	0x10000000
 
-/* 44-47 reserved for more caps */
+#define SDHCI_CAPABILITIES_1	0x44
 
 #define SDHCI_MAX_CURRENT	0x48
 
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f472c27..bbc298f 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -446,7 +446,7 @@
 	mmc->max_seg_size = 1024 * 512;
 	mmc->max_blk_size = 512;
 
-	/* reset the controler */
+	/* reset the controller */
 	if (sdricoh_reset(host)) {
 		dev_dbg(dev, "could not reset\n");
 		result = -EIO;
@@ -478,7 +478,7 @@
 	dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
 		" %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
 
-	/* search pci cardbus bridge that contains the mmc controler */
+	/* search pci cardbus bridge that contains the mmc controller */
 	/* the io region is already claimed by yenta_socket... */
 	while ((pci_dev =
 		pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index ddd0984..12884c2 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,16 +16,19 @@
  *
  */
 
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/dma-mapping.h>
-#include <linux/mmc/host.h>
+#include <linux/dmaengine.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/sdio.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
 #include <linux/mmc/sh_mmcif.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
 
 #define DRIVER_NAME	"sh_mmcif"
 #define DRIVER_VERSION	"2010-04-28"
@@ -62,25 +65,6 @@
 /* CE_BLOCK_SET */
 #define BLOCK_SIZE_MASK		0x0000ffff
 
-/* CE_CLK_CTRL */
-#define CLK_ENABLE		(1 << 24) /* 1: output mmc clock */
-#define CLK_CLEAR		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
-#define CLK_SUP_PCLK		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
-#define SRSPTO_256		((1 << 13) | (0 << 12)) /* resp timeout */
-#define SRBSYTO_29		((1 << 11) | (1 << 10) |	\
-				 (1 << 9) | (1 << 8)) /* resp busy timeout */
-#define SRWDTO_29		((1 << 7) | (1 << 6) |		\
-				 (1 << 5) | (1 << 4)) /* read/write timeout */
-#define SCCSTO_29		((1 << 3) | (1 << 2) |		\
-				 (1 << 1) | (1 << 0)) /* ccs timeout */
-
-/* CE_BUF_ACC */
-#define BUF_ACC_DMAWEN		(1 << 25)
-#define BUF_ACC_DMAREN		(1 << 24)
-#define BUF_ACC_BUSW_32		(0 << 17)
-#define BUF_ACC_BUSW_16		(1 << 17)
-#define BUF_ACC_ATYP		(1 << 16)
-
 /* CE_INT */
 #define INT_CCSDE		(1 << 29)
 #define INT_CMD12DRE		(1 << 26)
@@ -165,10 +149,6 @@
 				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
 				 STS2_AC12RSPTO | STS2_RSPTO)
 
-/* CE_VERSION */
-#define SOFT_RST_ON		(1 << 31)
-#define SOFT_RST_OFF		(0 << 31)
-
 #define CLKDEV_EMMC_DATA	52000000 /* 52MHz */
 #define CLKDEV_MMC_DATA		20000000 /* 20MHz */
 #define CLKDEV_INIT		400000   /* 400 KHz */
@@ -176,18 +156,21 @@
 struct sh_mmcif_host {
 	struct mmc_host *mmc;
 	struct mmc_data *data;
-	struct mmc_command *cmd;
 	struct platform_device *pd;
 	struct clk *hclk;
 	unsigned int clk;
 	int bus_width;
-	u16 wait_int;
-	u16 sd_error;
+	bool sd_error;
 	long timeout;
 	void __iomem *addr;
-	wait_queue_head_t intr_wait;
-};
+	struct completion intr_wait;
 
+	/* DMA support */
+	struct dma_chan		*chan_rx;
+	struct dma_chan		*chan_tx;
+	struct completion	dma_complete;
+	unsigned int            dma_sglen;
+};
 
 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
 					unsigned int reg, u32 val)
@@ -201,6 +184,188 @@
 	writel(~val & readl(host->addr + reg), host->addr + reg);
 }
 
+static void mmcif_dma_complete(void *arg)
+{
+	struct sh_mmcif_host *host = arg;
+	dev_dbg(&host->pd->dev, "Command completed\n");
+
+	if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
+		 dev_name(&host->pd->dev)))
+		return;
+
+	if (host->data->flags & MMC_DATA_READ)
+		dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+			     DMA_FROM_DEVICE);
+	else
+		dma_unmap_sg(&host->pd->dev, host->data->sg, host->dma_sglen,
+			     DMA_TO_DEVICE);
+
+	complete(&host->dma_complete);
+}
+
+static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
+{
+	struct scatterlist *sg = host->data->sg;
+	struct dma_async_tx_descriptor *desc = NULL;
+	struct dma_chan *chan = host->chan_rx;
+	dma_cookie_t cookie = -EINVAL;
+	int ret;
+
+	ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_FROM_DEVICE);
+	if (ret > 0) {
+		host->dma_sglen = ret;
+		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+			DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	}
+
+	if (desc) {
+		desc->callback = mmcif_dma_complete;
+		desc->callback_param = host;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
+		} else {
+			sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
+			chan->device->device_issue_pending(chan);
+		}
+	}
+	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+		__func__, host->data->sg_len, ret, cookie);
+
+	if (!desc) {
+		/* DMA failed, fall back to PIO */
+		if (ret >= 0)
+			ret = -EIO;
+		host->chan_rx = NULL;
+		host->dma_sglen = 0;
+		dma_release_channel(chan);
+		/* Free the Tx channel too */
+		chan = host->chan_tx;
+		if (chan) {
+			host->chan_tx = NULL;
+			dma_release_channel(chan);
+		}
+		dev_warn(&host->pd->dev,
+			 "DMA failed: %d, falling back to PIO\n", ret);
+		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	}
+
+	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+		desc, cookie, host->data->sg_len);
+}
+
+static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
+{
+	struct scatterlist *sg = host->data->sg;
+	struct dma_async_tx_descriptor *desc = NULL;
+	struct dma_chan *chan = host->chan_tx;
+	dma_cookie_t cookie = -EINVAL;
+	int ret;
+
+	ret = dma_map_sg(&host->pd->dev, sg, host->data->sg_len, DMA_TO_DEVICE);
+	if (ret > 0) {
+		host->dma_sglen = ret;
+		desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+			DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	}
+
+	if (desc) {
+		desc->callback = mmcif_dma_complete;
+		desc->callback_param = host;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
+		} else {
+			sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
+			chan->device->device_issue_pending(chan);
+		}
+	}
+	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
+		__func__, host->data->sg_len, ret, cookie);
+
+	if (!desc) {
+		/* DMA failed, fall back to PIO */
+		if (ret >= 0)
+			ret = -EIO;
+		host->chan_tx = NULL;
+		host->dma_sglen = 0;
+		dma_release_channel(chan);
+		/* Free the Rx channel too */
+		chan = host->chan_rx;
+		if (chan) {
+			host->chan_rx = NULL;
+			dma_release_channel(chan);
+		}
+		dev_warn(&host->pd->dev,
+			 "DMA failed: %d, falling back to PIO\n", ret);
+		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	}
+
+	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
+		desc, cookie);
+}
+
+static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
+{
+	dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
+	chan->private = arg;
+	return true;
+}
+
+static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
+				 struct sh_mmcif_plat_data *pdata)
+{
+	host->dma_sglen = 0;
+
+	/* We can only either use DMA for both Tx and Rx or not use it at all */
+	if (pdata->dma) {
+		dma_cap_mask_t mask;
+
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+
+		host->chan_tx = dma_request_channel(mask, sh_mmcif_filter,
+						    &pdata->dma->chan_priv_tx);
+		dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
+			host->chan_tx);
+
+		if (!host->chan_tx)
+			return;
+
+		host->chan_rx = dma_request_channel(mask, sh_mmcif_filter,
+						    &pdata->dma->chan_priv_rx);
+		dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
+			host->chan_rx);
+
+		if (!host->chan_rx) {
+			dma_release_channel(host->chan_tx);
+			host->chan_tx = NULL;
+			return;
+		}
+
+		init_completion(&host->dma_complete);
+	}
+}
+
+static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
+{
+	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+	/* Descriptors are freed automatically */
+	if (host->chan_tx) {
+		struct dma_chan *chan = host->chan_tx;
+		host->chan_tx = NULL;
+		dma_release_channel(chan);
+	}
+	if (host->chan_rx) {
+		struct dma_chan *chan = host->chan_rx;
+		host->chan_rx = NULL;
+		dma_release_channel(chan);
+	}
+
+	host->dma_sglen = 0;
+}
 
 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
 {
@@ -239,13 +404,12 @@
 	u32 state1, state2;
 	int ret, timeout = 10000000;
 
-	host->sd_error = 0;
-	host->wait_int = 0;
+	host->sd_error = false;
 
 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
-	pr_debug("%s: ERR HOST_STS1 = %08x\n", DRIVER_NAME, state1);
-	pr_debug("%s: ERR HOST_STS2 = %08x\n", DRIVER_NAME, state2);
+	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
+	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
 
 	if (state1 & STS1_CMDSEQ) {
 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
@@ -253,8 +417,8 @@
 		while (1) {
 			timeout--;
 			if (timeout < 0) {
-				pr_err(DRIVER_NAME": Forceed end of " \
-					"command sequence timeout err\n");
+				dev_err(&host->pd->dev,
+					"Forceed end of command sequence timeout err\n");
 				return -EIO;
 			}
 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
@@ -263,18 +427,18 @@
 			mdelay(1);
 		}
 		sh_mmcif_sync_reset(host);
-		pr_debug(DRIVER_NAME": Forced end of command sequence\n");
+		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
 		return -EIO;
 	}
 
 	if (state2 & STS2_CRC_ERR) {
-		pr_debug(DRIVER_NAME": Happened CRC error\n");
+		dev_dbg(&host->pd->dev, ": Happened CRC error\n");
 		ret = -EIO;
 	} else if (state2 & STS2_TIMEOUT_ERR) {
-		pr_debug(DRIVER_NAME": Happened Timeout error\n");
+		dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
 		ret = -ETIMEDOUT;
 	} else {
-		pr_debug(DRIVER_NAME": Happened End/Index error\n");
+		dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
 		ret = -EIO;
 	}
 	return ret;
@@ -287,17 +451,13 @@
 	long time;
 	u32 blocksize, i, *p = sg_virt(data->sg);
 
-	host->wait_int = 0;
-
 	/* buf read enable */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
-	time = wait_event_interruptible_timeout(host->intr_wait,
-			host->wait_int == 1 ||
-			host->sd_error == 1, host->timeout);
-	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+			host->timeout);
+	if (time <= 0 || host->sd_error)
 		return sh_mmcif_error_manage(host);
 
-	host->wait_int = 0;
 	blocksize = (BLOCK_SIZE_MASK &
 			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
 	for (i = 0; i < blocksize / 4; i++)
@@ -305,13 +465,11 @@
 
 	/* buffer read end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
-	time = wait_event_interruptible_timeout(host->intr_wait,
-			host->wait_int == 1 ||
-			host->sd_error == 1, host->timeout);
-	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+			host->timeout);
+	if (time <= 0 || host->sd_error)
 		return sh_mmcif_error_manage(host);
 
-	host->wait_int = 0;
 	return 0;
 }
 
@@ -326,19 +484,15 @@
 						     MMCIF_CE_BLOCK_SET);
 	for (j = 0; j < data->sg_len; j++) {
 		p = sg_virt(data->sg);
-		host->wait_int = 0;
 		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
 			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 			/* buf read enable */
-			time = wait_event_interruptible_timeout(host->intr_wait,
-				host->wait_int == 1 ||
-				host->sd_error == 1, host->timeout);
+			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+				host->timeout);
 
-			if (host->wait_int != 1 &&
-			    (time == 0 || host->sd_error != 0))
+			if (time <= 0 || host->sd_error)
 				return sh_mmcif_error_manage(host);
 
-			host->wait_int = 0;
 			for (i = 0; i < blocksize / 4; i++)
 				*p++ = sh_mmcif_readl(host->addr,
 						      MMCIF_CE_DATA);
@@ -356,17 +510,14 @@
 	long time;
 	u32 blocksize, i, *p = sg_virt(data->sg);
 
-	host->wait_int = 0;
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 
 	/* buf write enable */
-	time = wait_event_interruptible_timeout(host->intr_wait,
-			host->wait_int == 1 ||
-			host->sd_error == 1, host->timeout);
-	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+			host->timeout);
+	if (time <= 0 || host->sd_error)
 		return sh_mmcif_error_manage(host);
 
-	host->wait_int = 0;
 	blocksize = (BLOCK_SIZE_MASK &
 			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
 	for (i = 0; i < blocksize / 4; i++)
@@ -375,13 +526,11 @@
 	/* buffer write end */
 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
 
-	time = wait_event_interruptible_timeout(host->intr_wait,
-			host->wait_int == 1 ||
-			host->sd_error == 1, host->timeout);
-	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
+	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+			host->timeout);
+	if (time <= 0 || host->sd_error)
 		return sh_mmcif_error_manage(host);
 
-	host->wait_int = 0;
 	return 0;
 }
 
@@ -397,19 +546,15 @@
 
 	for (j = 0; j < data->sg_len; j++) {
 		p = sg_virt(data->sg);
-		host->wait_int = 0;
 		for (sec = 0; sec < data->sg->length / blocksize; sec++) {
 			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
 			/* buf write enable*/
-			time = wait_event_interruptible_timeout(host->intr_wait,
-				host->wait_int == 1 ||
-				host->sd_error == 1, host->timeout);
+			time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+				host->timeout);
 
-			if (host->wait_int != 1 &&
-			    (time == 0 || host->sd_error != 0))
+			if (time <= 0 || host->sd_error)
 				return sh_mmcif_error_manage(host);
 
-			host->wait_int = 0;
 			for (i = 0; i < blocksize / 4; i++)
 				sh_mmcif_writel(host->addr,
 						MMCIF_CE_DATA, *p++);
@@ -457,7 +602,7 @@
 		tmp |= CMD_SET_RTYP_17B;
 		break;
 	default:
-		pr_err(DRIVER_NAME": Not support type response.\n");
+		dev_err(&host->pd->dev, "Unsupported response type.\n");
 		break;
 	}
 	switch (opc) {
@@ -485,7 +630,7 @@
 			tmp |= CMD_SET_DATW_8;
 			break;
 		default:
-			pr_err(DRIVER_NAME": Not support bus width.\n");
+			dev_err(&host->pd->dev, "Unsupported bus width.\n");
 			break;
 		}
 	}
@@ -513,10 +658,10 @@
 	return opc = ((opc << 24) | tmp);
 }
 
-static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
+static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
 				struct mmc_request *mrq, u32 opc)
 {
-	u32 ret;
+	int ret;
 
 	switch (opc) {
 	case MMC_READ_MULTIPLE_BLOCK:
@@ -533,7 +678,7 @@
 		ret = sh_mmcif_single_read(host, mrq);
 		break;
 	default:
-		pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
+		dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
 		ret = -EINVAL;
 		break;
 	}
@@ -547,8 +692,6 @@
 	int ret = 0, mask = 0;
 	u32 opc = cmd->opcode;
 
-	host->cmd = cmd;
-
 	switch (opc) {
 	/* respons busy check */
 	case MMC_SWITCH:
@@ -579,13 +722,12 @@
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
 	/* set arg */
 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
-	host->wait_int = 0;
 	/* set cmd */
 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
 
-	time = wait_event_interruptible_timeout(host->intr_wait,
-		host->wait_int == 1 || host->sd_error == 1, host->timeout);
-	if (host->wait_int != 1 && time == 0) {
+	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+		host->timeout);
+	if (time <= 0) {
 		cmd->error = sh_mmcif_error_manage(host);
 		return;
 	}
@@ -597,26 +739,34 @@
 			cmd->error = -ETIMEDOUT;
 			break;
 		default:
-			pr_debug("%s: Cmd(d'%d) err\n",
-					DRIVER_NAME, cmd->opcode);
+			dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
+					cmd->opcode);
 			cmd->error = sh_mmcif_error_manage(host);
 			break;
 		}
-		host->sd_error = 0;
-		host->wait_int = 0;
+		host->sd_error = false;
 		return;
 	}
 	if (!(cmd->flags & MMC_RSP_PRESENT)) {
-		cmd->error = ret;
-		host->wait_int = 0;
+		cmd->error = 0;
 		return;
 	}
-	if (host->wait_int == 1) {
-		sh_mmcif_get_response(host, cmd);
-		host->wait_int = 0;
-	}
+	sh_mmcif_get_response(host, cmd);
 	if (host->data) {
-		ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
+		if (!host->dma_sglen) {
+			ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
+		} else {
+			long time =
+				wait_for_completion_interruptible_timeout(&host->dma_complete,
+									  host->timeout);
+			if (!time)
+				ret = -ETIMEDOUT;
+			else if (time < 0)
+				ret = time;
+			sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+					BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+			host->dma_sglen = 0;
+		}
 		if (ret < 0)
 			mrq->data->bytes_xfered = 0;
 		else
@@ -636,20 +786,18 @@
 	else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
 	else {
-		pr_err(DRIVER_NAME": not support stop cmd\n");
+		dev_err(&host->pd->dev, "unsupported stop cmd\n");
 		cmd->error = sh_mmcif_error_manage(host);
 		return;
 	}
 
-	time = wait_event_interruptible_timeout(host->intr_wait,
-			host->wait_int == 1 ||
-			host->sd_error == 1, host->timeout);
-	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
+	time = wait_for_completion_interruptible_timeout(&host->intr_wait,
+			host->timeout);
+	if (time <= 0 || host->sd_error) {
 		cmd->error = sh_mmcif_error_manage(host);
 		return;
 	}
 	sh_mmcif_get_cmd12response(host, cmd);
-	host->wait_int = 0;
 	cmd->error = 0;
 }
 
@@ -676,6 +824,15 @@
 		break;
 	}
 	host->data = mrq->data;
+	if (mrq->data) {
+		if (mrq->data->flags & MMC_DATA_READ) {
+			if (host->chan_rx)
+				sh_mmcif_start_dma_rx(host);
+		} else {
+			if (host->chan_tx)
+				sh_mmcif_start_dma_tx(host);
+		}
+	}
 	sh_mmcif_start_cmd(host, mrq, mrq->cmd);
 	host->data = NULL;
 
@@ -735,7 +892,7 @@
 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 {
 	struct sh_mmcif_host *host = dev_id;
-	u32 state = 0;
+	u32 state;
 	int err = 0;
 
 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
@@ -774,17 +931,19 @@
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
 		err = 1;
 	} else {
-		pr_debug("%s: Not support int\n", DRIVER_NAME);
+		dev_dbg(&host->pd->dev, "Not support int\n");
 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
 		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
 		err = 1;
 	}
 	if (err) {
-		host->sd_error = 1;
-		pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
+		host->sd_error = true;
+		dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
 	}
-	host->wait_int = 1;
-	wake_up(&host->intr_wait);
+	if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
+		complete(&host->intr_wait);
+	else
+		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
 
 	return IRQ_HANDLED;
 }
@@ -793,8 +952,8 @@
 {
 	int ret = 0, irq[2];
 	struct mmc_host *mmc;
-	struct sh_mmcif_host *host = NULL;
-	struct sh_mmcif_plat_data *pd = NULL;
+	struct sh_mmcif_host *host;
+	struct sh_mmcif_plat_data *pd;
 	struct resource *res;
 	void __iomem *reg;
 	char clk_name[8];
@@ -802,7 +961,7 @@
 	irq[0] = platform_get_irq(pdev, 0);
 	irq[1] = platform_get_irq(pdev, 1);
 	if (irq[0] < 0 || irq[1] < 0) {
-		pr_err(DRIVER_NAME": Get irq error\n");
+		dev_err(&pdev->dev, "Get irq error\n");
 		return -ENXIO;
 	}
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -815,7 +974,7 @@
 		dev_err(&pdev->dev, "ioremap error.\n");
 		return -ENOMEM;
 	}
-	pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
+	pd = pdev->dev.platform_data;
 	if (!pd) {
 		dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
 		ret = -ENXIO;
@@ -842,7 +1001,7 @@
 	host->clk = clk_get_rate(host->hclk);
 	host->pd = pdev;
 
-	init_waitqueue_head(&host->intr_wait);
+	init_completion(&host->intr_wait);
 
 	mmc->ops = &sh_mmcif_ops;
 	mmc->f_max = host->clk;
@@ -858,33 +1017,37 @@
 	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
 	if (pd->caps)
 		mmc->caps |= pd->caps;
-	mmc->max_segs = 128;
+	mmc->max_segs = 32;
 	mmc->max_blk_size = 512;
-	mmc->max_blk_count = 65535;
-	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
 	mmc->max_seg_size = mmc->max_req_size;
 
 	sh_mmcif_sync_reset(host);
 	platform_set_drvdata(pdev, host);
+
+	/* See if we also get DMA */
+	sh_mmcif_request_dma(host, pd);
+
 	mmc_add_host(mmc);
 
 	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
 	if (ret) {
-		pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
+		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
 		goto clean_up2;
 	}
 	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
 	if (ret) {
 		free_irq(irq[0], host);
-		pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
+		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
 		goto clean_up2;
 	}
 
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 	sh_mmcif_detect(host->mmc);
 
-	pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
-	pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
+	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
+	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
 		sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
 	return ret;
 
@@ -903,20 +1066,22 @@
 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
 	int irq[2];
 
+	mmc_remove_host(host->mmc);
+	sh_mmcif_release_dma(host);
+
+	if (host->addr)
+		iounmap(host->addr);
+
 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
 
 	irq[0] = platform_get_irq(pdev, 0);
 	irq[1] = platform_get_irq(pdev, 1);
 
-	if (host->addr)
-		iounmap(host->addr);
-
-	platform_set_drvdata(pdev, NULL);
-	mmc_remove_host(host->mmc);
-
 	free_irq(irq[0], host);
 	free_irq(irq[1], host);
 
+	platform_set_drvdata(pdev, NULL);
+
 	clk_disable(host->hclk);
 	mmc_free_host(host->mmc);
 
@@ -947,5 +1112,5 @@
 
 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a8..e3c6ef2 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -25,16 +25,261 @@
  *   double buffer support
  *
  */
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/device.h>
+
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/dmaengine.h>
-#include <linux/mmc/host.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
 
-#include "tmio_mmc.h"
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+#define CTL_STATUS 0x1c
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_RESET_SD 0xe0
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND    0x00000001
+#define TMIO_STAT_DATAEND       0x00000004
+#define TMIO_STAT_CARD_REMOVE   0x00000008
+#define TMIO_STAT_CARD_INSERT   0x00000010
+#define TMIO_STAT_SIGSTATE      0x00000020
+#define TMIO_STAT_WRPROTECT     0x00000080
+#define TMIO_STAT_CARD_REMOVE_A 0x00000100
+#define TMIO_STAT_CARD_INSERT_A 0x00000200
+#define TMIO_STAT_SIGSTATE_A    0x00000400
+#define TMIO_STAT_CMD_IDX_ERR   0x00010000
+#define TMIO_STAT_CRCFAIL       0x00020000
+#define TMIO_STAT_STOPBIT_ERR   0x00040000
+#define TMIO_STAT_DATATIMEOUT   0x00080000
+#define TMIO_STAT_RXOVERFLOW    0x00100000
+#define TMIO_STAT_TXUNDERRUN    0x00200000
+#define TMIO_STAT_CMDTIMEOUT    0x00400000
+#define TMIO_STAT_RXRDY         0x01000000
+#define TMIO_STAT_TXRQ          0x02000000
+#define TMIO_STAT_ILL_FUNC      0x20000000
+#define TMIO_STAT_CMD_BUSY      0x40000000
+#define TMIO_STAT_ILL_ACCESS    0x80000000
+
+/* Definitions for values the CTRL_SDIO_STATUS register can take. */
+#define TMIO_SDIO_STAT_IOIRQ	0x0001
+#define TMIO_SDIO_STAT_EXPUB52	0x4000
+#define TMIO_SDIO_STAT_EXWT	0x8000
+#define TMIO_SDIO_MASK_ALL	0xc007
+
+/* Define some IRQ masks */
+/* This is the mask used at reset by the chip */
+#define TMIO_MASK_ALL           0x837f031d
+#define TMIO_MASK_READOP  (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
+#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
+#define TMIO_MASK_CMD     (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
+		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
+#define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+
+#define enable_mmc_irqs(host, i) \
+	do { \
+		u32 mask;\
+		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+		mask &= ~((i) & TMIO_MASK_IRQ); \
+		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+	} while (0)
+
+#define disable_mmc_irqs(host, i) \
+	do { \
+		u32 mask;\
+		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+		mask |= ((i) & TMIO_MASK_IRQ); \
+		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+	} while (0)
+
+#define ack_mmc_irqs(host, i) \
+	do { \
+		sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
+	} while (0)
+
+/* This is arbitrary, just noone needed any higher alignment yet */
+#define MAX_ALIGN 4
+
+struct tmio_mmc_host {
+	void __iomem *ctl;
+	unsigned long bus_shift;
+	struct mmc_command      *cmd;
+	struct mmc_request      *mrq;
+	struct mmc_data         *data;
+	struct mmc_host         *mmc;
+	int                     irq;
+	unsigned int		sdio_irq_enabled;
+
+	/* Callbacks for clock / power control */
+	void (*set_pwr)(struct platform_device *host, int state);
+	void (*set_clk_div)(struct platform_device *host, int state);
+
+	/* pio related stuff */
+	struct scatterlist      *sg_ptr;
+	struct scatterlist      *sg_orig;
+	unsigned int            sg_len;
+	unsigned int            sg_off;
+
+	struct platform_device *pdev;
+
+	/* DMA support */
+	struct dma_chan		*chan_rx;
+	struct dma_chan		*chan_tx;
+	struct tasklet_struct	dma_complete;
+	struct tasklet_struct	dma_issue;
+#ifdef CONFIG_TMIO_MMC_DMA
+	unsigned int            dma_sglen;
+	u8			bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
+	struct scatterlist	bounce_sg;
+#endif
+
+	/* Track lost interrupts */
+	struct delayed_work	delayed_reset_work;
+	spinlock_t		lock;
+	unsigned long		last_req_ts;
+};
+
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
+
+static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+	return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+		u16 *buf, int count)
+{
+	readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+	return readw(host->ctl + (addr << host->bus_shift)) |
+	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+	writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+		u16 *buf, int count)
+{
+	writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+	writew(val, host->ctl + (addr << host->bus_shift));
+	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
+{
+	host->sg_len = data->sg_len;
+	host->sg_ptr = data->sg;
+	host->sg_orig = data->sg;
+	host->sg_off = 0;
+}
+
+static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
+{
+	host->sg_ptr = sg_next(host->sg_ptr);
+	host->sg_off = 0;
+	return --host->sg_len;
+}
+
+static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+{
+	local_irq_save(*flags);
+	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
+{
+	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+	local_irq_restore(*flags);
+}
+
+#ifdef CONFIG_MMC_DEBUG
+
+#define STATUS_TO_TEXT(a) \
+	do { \
+		if (status & TMIO_STAT_##a) \
+			printk(#a); \
+	} while (0)
+
+void pr_debug_status(u32 status)
+{
+	printk(KERN_DEBUG "status: %08x = ", status);
+	STATUS_TO_TEXT(CARD_REMOVE);
+	STATUS_TO_TEXT(CARD_INSERT);
+	STATUS_TO_TEXT(SIGSTATE);
+	STATUS_TO_TEXT(WRPROTECT);
+	STATUS_TO_TEXT(CARD_REMOVE_A);
+	STATUS_TO_TEXT(CARD_INSERT_A);
+	STATUS_TO_TEXT(SIGSTATE_A);
+	STATUS_TO_TEXT(CMD_IDX_ERR);
+	STATUS_TO_TEXT(STOPBIT_ERR);
+	STATUS_TO_TEXT(ILL_FUNC);
+	STATUS_TO_TEXT(CMD_BUSY);
+	STATUS_TO_TEXT(CMDRESPEND);
+	STATUS_TO_TEXT(DATAEND);
+	STATUS_TO_TEXT(CRCFAIL);
+	STATUS_TO_TEXT(DATATIMEOUT);
+	STATUS_TO_TEXT(CMDTIMEOUT);
+	STATUS_TO_TEXT(RXOVERFLOW);
+	STATUS_TO_TEXT(TXUNDERRUN);
+	STATUS_TO_TEXT(RXRDY);
+	STATUS_TO_TEXT(TXRQ);
+	STATUS_TO_TEXT(ILL_ACCESS);
+	printk("\n");
+}
+
+#else
+#define pr_debug_status(s)  do { } while (0)
+#endif
+
+static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct tmio_mmc_host *host = mmc_priv(mmc);
+
+	if (enable) {
+		host->sdio_irq_enabled = 1;
+		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
+			(TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
+	} else {
+		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
+		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+		host->sdio_irq_enabled = 0;
+	}
+}
 
 static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
 {
@@ -55,8 +300,23 @@
 
 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
 {
+	struct mfd_cell *cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+
+	/*
+	 * Testing on sh-mobile showed that SDIO IRQs are unmasked when
+	 * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
+	 * device IRQ here and restore the SDIO IRQ mask before
+	 * re-enabling the device IRQ.
+	 */
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		disable_irq(host->irq);
 	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
 	msleep(10);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+		enable_irq(host->irq);
+	}
 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 	msleep(10);
@@ -64,11 +324,21 @@
 
 static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
 {
+	struct mfd_cell *cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+
 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 	msleep(10);
+	/* see comment in tmio_mmc_clk_stop above */
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		disable_irq(host->irq);
 	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
 	msleep(10);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+		enable_irq(host->irq);
+	}
 }
 
 static void reset(struct tmio_mmc_host *host)
@@ -82,15 +352,60 @@
 	msleep(10);
 }
 
+static void tmio_mmc_reset_work(struct work_struct *work)
+{
+	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+						  delayed_reset_work.work);
+	struct mmc_request *mrq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	mrq = host->mrq;
+
+	/* request already finished */
+	if (!mrq
+	    || time_is_after_jiffies(host->last_req_ts +
+		msecs_to_jiffies(2000))) {
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+
+	dev_warn(&host->pdev->dev,
+		"timeout waiting for hardware interrupt (CMD%u)\n",
+		mrq->cmd->opcode);
+
+	if (host->data)
+		host->data->error = -ETIMEDOUT;
+	else if (host->cmd)
+		host->cmd->error = -ETIMEDOUT;
+	else
+		mrq->cmd->error = -ETIMEDOUT;
+
+	host->cmd = NULL;
+	host->data = NULL;
+	host->mrq = NULL;
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	reset(host);
+
+	mmc_request_done(host->mmc, mrq);
+}
+
 static void
 tmio_mmc_finish_request(struct tmio_mmc_host *host)
 {
 	struct mmc_request *mrq = host->mrq;
 
+	if (!mrq)
+		return;
+
 	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
 
+	cancel_delayed_work(&host->delayed_reset_work);
+
 	mmc_request_done(host->mmc, mrq);
 }
 
@@ -200,6 +515,7 @@
 	return;
 }
 
+/* needs to be called with host->lock held */
 static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
 {
 	struct mmc_data *data = host->data;
@@ -233,6 +549,8 @@
 	if (data->flags & MMC_DATA_READ) {
 		if (!host->chan_rx)
 			disable_mmc_irqs(host, TMIO_MASK_READOP);
+		else
+			tmio_check_bounce_buffer(host);
 		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
 			host->mrq);
 	} else {
@@ -254,10 +572,12 @@
 
 static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
 {
-	struct mmc_data *data = host->data;
+	struct mmc_data *data;
+	spin_lock(&host->lock);
+	data = host->data;
 
 	if (!data)
-		return;
+		goto out;
 
 	if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
 		/*
@@ -278,6 +598,8 @@
 	} else {
 		tmio_mmc_do_data_irq(host);
 	}
+out:
+	spin_unlock(&host->lock);
 }
 
 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -286,9 +608,11 @@
 	struct mmc_command *cmd = host->cmd;
 	int i, addr;
 
+	spin_lock(&host->lock);
+
 	if (!host->cmd) {
 		pr_debug("Spurious CMD irq\n");
-		return;
+		goto out;
 	}
 
 	host->cmd = NULL;
@@ -324,8 +648,7 @@
 			if (!host->chan_rx)
 				enable_mmc_irqs(host, TMIO_MASK_READOP);
 		} else {
-			struct dma_chan *chan = host->chan_tx;
-			if (!chan)
+			if (!host->chan_tx)
 				enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
 			else
 				tasklet_schedule(&host->dma_issue);
@@ -334,13 +657,19 @@
 		tmio_mmc_finish_request(host);
 	}
 
+out:
+	spin_unlock(&host->lock);
+
 	return;
 }
 
 static irqreturn_t tmio_mmc_irq(int irq, void *devid)
 {
 	struct tmio_mmc_host *host = devid;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
 	unsigned int ireg, irq_mask, status;
+	unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
 
 	pr_debug("MMC IRQ begin\n");
 
@@ -348,6 +677,29 @@
 	irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
 	ireg = status & TMIO_MASK_IRQ & ~irq_mask;
 
+	sdio_ireg = 0;
+	if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+		sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
+		sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
+
+		sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
+
+		if (sdio_ireg && !host->sdio_irq_enabled) {
+			pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
+				   sdio_status, sdio_irq_mask, sdio_ireg);
+			tmio_mmc_enable_sdio_irq(host->mmc, 0);
+			goto out;
+		}
+
+		if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+			sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
+			mmc_signal_sdio_irq(host->mmc);
+
+		if (sdio_ireg)
+			goto out;
+	}
+
 	pr_debug_status(status);
 	pr_debug_status(ireg);
 
@@ -375,8 +727,10 @@
  */
 
 		/* Command completion */
-		if (ireg & TMIO_MASK_CMD) {
-			ack_mmc_irqs(host, TMIO_MASK_CMD);
+		if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+			ack_mmc_irqs(host,
+				     TMIO_STAT_CMDRESPEND |
+				     TMIO_STAT_CMDTIMEOUT);
 			tmio_mmc_cmd_irq(host, status);
 		}
 
@@ -407,6 +761,16 @@
 }
 
 #ifdef CONFIG_TMIO_MMC_DMA
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+	if (host->sg_ptr == &host->bounce_sg) {
+		unsigned long flags;
+		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
+		tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+	}
+}
+
 static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
 {
 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
@@ -427,12 +791,39 @@
 		enable_mmc_irqs(host, TMIO_STAT_DATAEND);
 }
 
-static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
 {
-	struct scatterlist *sg = host->sg_ptr;
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_rx;
-	int ret;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+			  align >= MAX_ALIGN)) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
 
 	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
 	if (ret > 0) {
@@ -442,21 +833,21 @@
 	}
 
 	if (desc) {
-		host->desc = desc;
 		desc->callback = tmio_dma_complete;
 		desc->callback_param = host;
-		host->cookie = desc->tx_submit(desc);
-		if (host->cookie < 0) {
-			host->desc = NULL;
-			ret = host->cookie;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
 		} else {
 			chan->device->device_issue_pending(chan);
 		}
 	}
 	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-		__func__, host->sg_len, ret, host->cookie, host->mrq);
+		__func__, host->sg_len, ret, cookie, host->mrq);
 
-	if (!host->desc) {
+pio:
+	if (!desc) {
 		/* DMA failed, fall back to PIO */
 		if (ret >= 0)
 			ret = -EIO;
@@ -471,24 +862,49 @@
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		tmio_mmc_enable_dma(host, false);
-		reset(host);
-		/* Fail this request, let above layers recover */
-		host->mrq->cmd->error = ret;
-		tmio_mmc_finish_request(host);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
-		desc, host->cookie, host->sg_len);
-
-	return ret > 0 ? 0 : ret;
+		desc, cookie, host->sg_len);
 }
 
-static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
 {
-	struct scatterlist *sg = host->sg_ptr;
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_tx;
-	int ret;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+			  align >= MAX_ALIGN)) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		unsigned long flags;
+		void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+		tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
 
 	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
 	if (ret > 0) {
@@ -498,19 +914,19 @@
 	}
 
 	if (desc) {
-		host->desc = desc;
 		desc->callback = tmio_dma_complete;
 		desc->callback_param = host;
-		host->cookie = desc->tx_submit(desc);
-		if (host->cookie < 0) {
-			host->desc = NULL;
-			ret = host->cookie;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
 		}
 	}
 	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-		__func__, host->sg_len, ret, host->cookie, host->mrq);
+		__func__, host->sg_len, ret, cookie, host->mrq);
 
-	if (!host->desc) {
+pio:
+	if (!desc) {
 		/* DMA failed, fall back to PIO */
 		if (ret >= 0)
 			ret = -EIO;
@@ -525,30 +941,22 @@
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		tmio_mmc_enable_dma(host, false);
-		reset(host);
-		/* Fail this request, let above layers recover */
-		host->mrq->cmd->error = ret;
-		tmio_mmc_finish_request(host);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
-		desc, host->cookie);
-
-	return ret > 0 ? 0 : ret;
+		desc, cookie);
 }
 
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
 			       struct mmc_data *data)
 {
 	if (data->flags & MMC_DATA_READ) {
 		if (host->chan_rx)
-			return tmio_mmc_start_dma_rx(host);
+			tmio_mmc_start_dma_rx(host);
 	} else {
 		if (host->chan_tx)
-			return tmio_mmc_start_dma_tx(host);
+			tmio_mmc_start_dma_tx(host);
 	}
-
-	return 0;
 }
 
 static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -562,6 +970,12 @@
 static void tmio_tasklet_fn(unsigned long arg)
 {
 	struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	if (!host->data)
+		goto out;
 
 	if (host->data->flags & MMC_DATA_READ)
 		dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -571,6 +985,8 @@
 			     DMA_TO_DEVICE);
 
 	tmio_mmc_do_data_irq(host);
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 /* It might be necessary to make filter MFD specific */
@@ -584,9 +1000,6 @@
 static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
 				 struct tmio_mmc_data *pdata)
 {
-	host->cookie = -EINVAL;
-	host->desc = NULL;
-
 	/* We can only either use DMA for both Tx and Rx or not use it at all */
 	if (pdata->dma) {
 		dma_cap_mask_t mask;
@@ -632,15 +1045,15 @@
 		host->chan_rx = NULL;
 		dma_release_channel(chan);
 	}
-
-	host->cookie = -EINVAL;
-	host->desc = NULL;
 }
 #else
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+}
+
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
 			       struct mmc_data *data)
 {
-	return 0;
 }
 
 static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +1095,9 @@
 	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
 	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
 
-	return tmio_mmc_start_dma(host, data);
+	tmio_mmc_start_dma(host, data);
+
+	return 0;
 }
 
 /* Process requests from the MMC layer */
@@ -694,6 +1109,8 @@
 	if (host->mrq)
 		pr_debug("request not null\n");
 
+	host->last_req_ts = jiffies;
+	wmb();
 	host->mrq = mrq;
 
 	if (mrq->data) {
@@ -703,10 +1120,14 @@
 	}
 
 	ret = tmio_mmc_start_command(host, mrq->cmd);
-	if (!ret)
+	if (!ret) {
+		schedule_delayed_work(&host->delayed_reset_work,
+				      msecs_to_jiffies(2000));
 		return;
+	}
 
 fail:
+	host->mrq = NULL;
 	mrq->cmd->error = ret;
 	mmc_request_done(mmc, mrq);
 }
@@ -780,6 +1201,7 @@
 	.set_ios	= tmio_mmc_set_ios,
 	.get_ro         = tmio_mmc_get_ro,
 	.get_cd		= tmio_mmc_get_cd,
+	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
 };
 
 #ifdef CONFIG_PM
@@ -864,10 +1286,15 @@
 		goto host_free;
 
 	mmc->ops = &tmio_mmc_ops;
-	mmc->caps = MMC_CAP_4_BIT_DATA;
-	mmc->caps |= pdata->capabilities;
+	mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
 	mmc->f_max = pdata->hclk;
 	mmc->f_min = mmc->f_max / 512;
+	mmc->max_segs = 32;
+	mmc->max_blk_size = 512;
+	mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+		mmc->max_segs;
+	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+	mmc->max_seg_size = mmc->max_req_size;
 	if (pdata->ocr_mask)
 		mmc->ocr_avail = pdata->ocr_mask;
 	else
@@ -890,12 +1317,19 @@
 		goto cell_disable;
 
 	disable_mmc_irqs(host, TMIO_MASK_ALL);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		tmio_mmc_enable_sdio_irq(mmc, 0);
 
 	ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
 		IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
 	if (ret)
 		goto cell_disable;
 
+	spin_lock_init(&host->lock);
+
+	/* Init delayed work for request timeouts */
+	INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
+
 	/* See if we also get DMA */
 	tmio_mmc_request_dma(host, pdata);
 
@@ -934,6 +1368,7 @@
 	if (mmc) {
 		struct tmio_mmc_host *host = mmc_priv(mmc);
 		mmc_remove_host(mmc);
+		cancel_delayed_work_sync(&host->delayed_reset_work);
 		tmio_mmc_release_dma(host);
 		free_irq(host->irq, host);
 		if (cell->disable)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
deleted file mode 100644
index 0fedc78..0000000
--- a/drivers/mmc/host/tmio_mmc.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* Definitons for use with the tmio_mmc.c
- *
- * (c) 2004 Ian Molton <spyro@f2s.com>
- * (c) 2007 Ian Molton <spyro@f2s.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_RESET_SD 0xe0
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND    0x00000001
-#define TMIO_STAT_DATAEND       0x00000004
-#define TMIO_STAT_CARD_REMOVE   0x00000008
-#define TMIO_STAT_CARD_INSERT   0x00000010
-#define TMIO_STAT_SIGSTATE      0x00000020
-#define TMIO_STAT_WRPROTECT     0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A    0x00000400
-#define TMIO_STAT_CMD_IDX_ERR   0x00010000
-#define TMIO_STAT_CRCFAIL       0x00020000
-#define TMIO_STAT_STOPBIT_ERR   0x00040000
-#define TMIO_STAT_DATATIMEOUT   0x00080000
-#define TMIO_STAT_RXOVERFLOW    0x00100000
-#define TMIO_STAT_TXUNDERRUN    0x00200000
-#define TMIO_STAT_CMDTIMEOUT    0x00400000
-#define TMIO_STAT_RXRDY         0x01000000
-#define TMIO_STAT_TXRQ          0x02000000
-#define TMIO_STAT_ILL_FUNC      0x20000000
-#define TMIO_STAT_CMD_BUSY      0x40000000
-#define TMIO_STAT_ILL_ACCESS    0x80000000
-
-/* Define some IRQ masks */
-/* This is the mask used at reset by the chip */
-#define TMIO_MASK_ALL           0x837f031d
-#define TMIO_MASK_READOP  (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
-#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
-#define TMIO_MASK_CMD     (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
-		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
-#define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
-
-
-#define enable_mmc_irqs(host, i) \
-	do { \
-		u32 mask;\
-		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
-		mask &= ~((i) & TMIO_MASK_IRQ); \
-		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
-	} while (0)
-
-#define disable_mmc_irqs(host, i) \
-	do { \
-		u32 mask;\
-		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
-		mask |= ((i) & TMIO_MASK_IRQ); \
-		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
-	} while (0)
-
-#define ack_mmc_irqs(host, i) \
-	do { \
-		sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
-	} while (0)
-
-
-struct tmio_mmc_host {
-	void __iomem *ctl;
-	unsigned long bus_shift;
-	struct mmc_command      *cmd;
-	struct mmc_request      *mrq;
-	struct mmc_data         *data;
-	struct mmc_host         *mmc;
-	int                     irq;
-
-	/* Callbacks for clock / power control */
-	void (*set_pwr)(struct platform_device *host, int state);
-	void (*set_clk_div)(struct platform_device *host, int state);
-
-	/* pio related stuff */
-	struct scatterlist      *sg_ptr;
-	unsigned int            sg_len;
-	unsigned int            sg_off;
-
-	struct platform_device *pdev;
-
-	/* DMA support */
-	struct dma_chan		*chan_rx;
-	struct dma_chan		*chan_tx;
-	struct tasklet_struct	dma_complete;
-	struct tasklet_struct	dma_issue;
-#ifdef CONFIG_TMIO_MMC_DMA
-	struct dma_async_tx_descriptor *desc;
-	unsigned int            dma_sglen;
-	dma_cookie_t		cookie;
-#endif
-};
-
-#include <linux/io.h>
-
-static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
-	return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
-{
-	readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
-	return readw(host->ctl + (addr << host->bus_shift)) |
-	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
-		u16 val)
-{
-	writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
-{
-	writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
-		u32 val)
-{
-	writew(val, host->ctl + (addr << host->bus_shift));
-	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
-#include <linux/scatterlist.h>
-#include <linux/blkdev.h>
-
-static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
-	struct mmc_data *data)
-{
-	host->sg_len = data->sg_len;
-	host->sg_ptr = data->sg;
-	host->sg_off = 0;
-}
-
-static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
-{
-	host->sg_ptr = sg_next(host->sg_ptr);
-	host->sg_off = 0;
-	return --host->sg_len;
-}
-
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
-	unsigned long *flags)
-{
-	local_irq_save(*flags);
-	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
-}
-
-static inline void tmio_mmc_kunmap_atomic(void *virt,
-	unsigned long *flags)
-{
-	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
-	local_irq_restore(*flags);
-}
-
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a) \
-	do { \
-		if (status & TMIO_STAT_##a) \
-			printk(#a); \
-	} while (0)
-
-void pr_debug_status(u32 status)
-{
-	printk(KERN_DEBUG "status: %08x = ", status);
-	STATUS_TO_TEXT(CARD_REMOVE);
-	STATUS_TO_TEXT(CARD_INSERT);
-	STATUS_TO_TEXT(SIGSTATE);
-	STATUS_TO_TEXT(WRPROTECT);
-	STATUS_TO_TEXT(CARD_REMOVE_A);
-	STATUS_TO_TEXT(CARD_INSERT_A);
-	STATUS_TO_TEXT(SIGSTATE_A);
-	STATUS_TO_TEXT(CMD_IDX_ERR);
-	STATUS_TO_TEXT(STOPBIT_ERR);
-	STATUS_TO_TEXT(ILL_FUNC);
-	STATUS_TO_TEXT(CMD_BUSY);
-	STATUS_TO_TEXT(CMDRESPEND);
-	STATUS_TO_TEXT(DATAEND);
-	STATUS_TO_TEXT(CRCFAIL);
-	STATUS_TO_TEXT(DATATIMEOUT);
-	STATUS_TO_TEXT(CMDTIMEOUT);
-	STATUS_TO_TEXT(RXOVERFLOW);
-	STATUS_TO_TEXT(TXUNDERRUN);
-	STATUS_TO_TEXT(RXRDY);
-	STATUS_TO_TEXT(TXRQ);
-	STATUS_TO_TEXT(ILL_ACCESS);
-	printk("\n");
-}
-
-#else
-#define pr_debug_status(s)  do { } while (0)
-#endif
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 1e2cbf5..b1f7689 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -159,7 +159,7 @@
 
 config MTD_OF_PARTS
 	tristate "Flash partition map based on OF description"
-	depends on (MICROBLAZE || PPC_OF) && MTD_PARTITIONS
+	depends on OF && MTD_PARTITIONS
 	help
 	  This provides a partition parsing function which derives
 	  the partition map from the children of the flash node,
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 2cf0cc6..f29a6f9 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -224,7 +224,7 @@
 	if (dev->blkdev) {
 		invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
 					0, -1);
-		close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
+		blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 	}
 
 	kfree(dev);
@@ -234,6 +234,7 @@
 /* FIXME: ensure that mtd->size % erase_size == 0 */
 static struct block2mtd_dev *add_device(char *devname, int erase_size)
 {
+	const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
 	struct block_device *bdev;
 	struct block2mtd_dev *dev;
 	char *name;
@@ -246,7 +247,7 @@
 		return NULL;
 
 	/* Get a handle on the device */
-	bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
+	bdev = blkdev_get_by_path(devname, mode, dev);
 #ifndef MODULE
 	if (IS_ERR(bdev)) {
 
@@ -254,9 +255,8 @@
 		   to resolve the device name by other means. */
 
 		dev_t devt = name_to_dev_t(devname);
-		if (devt) {
-			bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
-		}
+		if (devt)
+			bdev = blkdev_get_by_dev(devt, mode, dev);
 	}
 #endif
 
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index a0dd7bb..5d37d31 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -72,7 +72,7 @@
 
 config MTD_PHYSMAP_OF
 	tristate "Flash device in physical memory map based on OF description"
-	depends on (MICROBLAZE || PPC_OF) && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
+	depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
 	help
 	  This provides a 'mapping' driver which allows the NOR Flash and
 	  ROM driver code to communicate with chips which are mapped
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 4759d82..ee4bb33 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1134,7 +1134,7 @@
 static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
 				int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC);
+	return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
 }
 
 static struct file_system_type mtd_inodefs_type = {
@@ -1201,7 +1201,7 @@
 static void __exit cleanup_mtdchar(void)
 {
 	unregister_mtd_user(&mtdchar_notifier);
-	mntput(mtd_inode_mnt);
+	mntput_long(mtd_inode_mnt);
 	unregister_filesystem(&mtd_inodefs_type);
 	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
 }
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1ee72f3..c948150 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -307,6 +307,11 @@
 	unsigned long l1_cpy, l2_cpy;
 	char *dst;
 
+	if (reason != KMSG_DUMP_OOPS &&
+	    reason != KMSG_DUMP_PANIC &&
+	    reason != KMSG_DUMP_KEXEC)
+		return;
+
 	/* Only dump oopses if dump_oops is set */
 	if (reason == KMSG_DUMP_OOPS && !dump_oops)
 		return;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1f75a1b..31bf376 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -821,7 +821,7 @@
  *
  * Wait for command done. This is a helper function for nand_wait used when
  * we are in interrupt context. May happen when in panic and trying to write
- * an oops trough mtdoops.
+ * an oops through mtdoops.
  */
 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
 			    unsigned long timeo)
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 9f322f1..d0894ca 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -721,6 +721,9 @@
 	case 3:
 		c->freq = 83;
 		break;
+	case 4:
+		c->freq = 104;
+		break;
 	}
 
 #ifdef CONFIG_MTD_PARTITIONS
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3fda24a..4c8bfc9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,19 +1944,12 @@
 config FEC
 	bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
 	depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
-		MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+		MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
 	select PHYLIB
 	help
 	  Say Y here if you want to use the built-in 10/100 Fast ethernet
 	  controller on some Motorola ColdFire and Freescale i.MX processors.
 
-config FEC2
-	bool "Second FEC ethernet controller (on some ColdFire CPUs)"
-	depends on FEC
-	help
-	  Say Y here if you want to use the second built-in 10/100 Fast
-	  ethernet controller on some Motorola ColdFire processors.
-
 config FEC_MPC52xx
 	tristate "MPC52xx FEC driver"
 	depends on PPC_MPC52xx && PPC_BESTCOMM
@@ -2970,6 +2963,7 @@
 config XEN_NETDEV_FRONTEND
 	tristate "Xen network device frontend driver"
 	depends on XEN
+	select XEN_XENBUS_FRONTEND
 	default y
 	help
 	  The network device frontend driver allows the kernel to
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 54c6d84..62d6f88 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -854,12 +854,12 @@
 }
 
 /**
- *	ks8695_get_settings - Get device-specific settings.
+ *	ks8695_wan_get_settings - Get device-specific settings.
  *	@ndev: The network device to read settings from
  *	@cmd: The ethtool structure to read into
  */
 static int
-ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
@@ -870,69 +870,50 @@
 			  SUPPORTED_TP | SUPPORTED_MII);
 	cmd->transceiver = XCVR_INTERNAL;
 
-	/* Port specific extras */
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		cmd->phy_address = 0;
-		/* not supported for HPNA */
+	cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+	cmd->port = PORT_MII;
+	cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+	cmd->phy_address = 0;
+
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+	if ((ctrl & WMC_WAND) == 0) {
+		/* auto-negotiation is enabled */
+		cmd->advertising |= ADVERTISED_Autoneg;
+		if (ctrl & WMC_WANA100F)
+			cmd->advertising |= ADVERTISED_100baseT_Full;
+		if (ctrl & WMC_WANA100H)
+			cmd->advertising |= ADVERTISED_100baseT_Half;
+		if (ctrl & WMC_WANA10F)
+			cmd->advertising |= ADVERTISED_10baseT_Full;
+		if (ctrl & WMC_WANA10H)
+			cmd->advertising |= ADVERTISED_10baseT_Half;
+		if (ctrl & WMC_WANAP)
+			cmd->advertising |= ADVERTISED_Pause;
+		cmd->autoneg = AUTONEG_ENABLE;
+
+		cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+		cmd->duplex = (ctrl & WMC_WDS) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+	} else {
+		/* auto-negotiation is disabled */
 		cmd->autoneg = AUTONEG_DISABLE;
 
-		/* BUG: Erm, dtype hpna implies no phy regs */
-		/*
-		ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
-		cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
-		cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
-		*/
-		return -EOPNOTSUPP;
-	case KS8695_DTYPE_WAN:
-		cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
-		cmd->port = PORT_MII;
-		cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
-		cmd->phy_address = 0;
-
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-		if ((ctrl & WMC_WAND) == 0) {
-			/* auto-negotiation is enabled */
-			cmd->advertising |= ADVERTISED_Autoneg;
-			if (ctrl & WMC_WANA100F)
-				cmd->advertising |= ADVERTISED_100baseT_Full;
-			if (ctrl & WMC_WANA100H)
-				cmd->advertising |= ADVERTISED_100baseT_Half;
-			if (ctrl & WMC_WANA10F)
-				cmd->advertising |= ADVERTISED_10baseT_Full;
-			if (ctrl & WMC_WANA10H)
-				cmd->advertising |= ADVERTISED_10baseT_Half;
-			if (ctrl & WMC_WANAP)
-				cmd->advertising |= ADVERTISED_Pause;
-			cmd->autoneg = AUTONEG_ENABLE;
-
-			cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
-			cmd->duplex = (ctrl & WMC_WDS) ?
-				DUPLEX_FULL : DUPLEX_HALF;
-		} else {
-			/* auto-negotiation is disabled */
-			cmd->autoneg = AUTONEG_DISABLE;
-
-			cmd->speed = (ctrl & WMC_WANF100) ?
-				SPEED_100 : SPEED_10;
-			cmd->duplex = (ctrl & WMC_WANFF) ?
-				DUPLEX_FULL : DUPLEX_HALF;
-		}
-		break;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
+		cmd->speed = (ctrl & WMC_WANF100) ?
+			SPEED_100 : SPEED_10;
+		cmd->duplex = (ctrl & WMC_WANFF) ?
+			DUPLEX_FULL : DUPLEX_HALF;
 	}
 
 	return 0;
 }
 
 /**
- *	ks8695_set_settings - Set device-specific settings.
+ *	ks8695_wan_set_settings - Set device-specific settings.
  *	@ndev: The network device to configure
  *	@cmd: The settings to configure
  */
 static int
-ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
@@ -956,171 +937,85 @@
 				ADVERTISED_100baseT_Full)) == 0)
 			return -EINVAL;
 
-		switch (ksp->dtype) {
-		case KS8695_DTYPE_HPNA:
-			/* HPNA does not support auto-negotiation. */
-			return -EINVAL;
-		case KS8695_DTYPE_WAN:
-			ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-			ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
-				  WMC_WANA10F | WMC_WANA10H);
-			if (cmd->advertising & ADVERTISED_100baseT_Full)
-				ctrl |= WMC_WANA100F;
-			if (cmd->advertising & ADVERTISED_100baseT_Half)
-				ctrl |= WMC_WANA100H;
-			if (cmd->advertising & ADVERTISED_10baseT_Full)
-				ctrl |= WMC_WANA10F;
-			if (cmd->advertising & ADVERTISED_10baseT_Half)
-				ctrl |= WMC_WANA10H;
+		ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
+			  WMC_WANA10F | WMC_WANA10H);
+		if (cmd->advertising & ADVERTISED_100baseT_Full)
+			ctrl |= WMC_WANA100F;
+		if (cmd->advertising & ADVERTISED_100baseT_Half)
+			ctrl |= WMC_WANA100H;
+		if (cmd->advertising & ADVERTISED_10baseT_Full)
+			ctrl |= WMC_WANA10F;
+		if (cmd->advertising & ADVERTISED_10baseT_Half)
+			ctrl |= WMC_WANA10H;
 
-			/* force a re-negotiation */
-			ctrl |= WMC_WANR;
-			writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
-			break;
-		case KS8695_DTYPE_LAN:
-			return -EOPNOTSUPP;
-		}
-
+		/* force a re-negotiation */
+		ctrl |= WMC_WANR;
+		writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
 	} else {
-		switch (ksp->dtype) {
-		case KS8695_DTYPE_HPNA:
-			/* BUG: dtype_hpna implies no phy registers */
-			/*
-			ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
+		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-			ctrl &= ~(HMC_HSS | HMC_HDS);
-			if (cmd->speed == SPEED_100)
-				ctrl |= HMC_HSS;
-			if (cmd->duplex == DUPLEX_FULL)
-				ctrl |= HMC_HDS;
+		/* disable auto-negotiation */
+		ctrl |= WMC_WAND;
+		ctrl &= ~(WMC_WANF100 | WMC_WANFF);
 
-			__raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
-			*/
-			return -EOPNOTSUPP;
-		case KS8695_DTYPE_WAN:
-			ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+		if (cmd->speed == SPEED_100)
+			ctrl |= WMC_WANF100;
+		if (cmd->duplex == DUPLEX_FULL)
+			ctrl |= WMC_WANFF;
 
-			/* disable auto-negotiation */
-			ctrl |= WMC_WAND;
-			ctrl &= ~(WMC_WANF100 | WMC_WANFF);
-
-			if (cmd->speed == SPEED_100)
-				ctrl |= WMC_WANF100;
-			if (cmd->duplex == DUPLEX_FULL)
-				ctrl |= WMC_WANFF;
-
-			writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
-			break;
-		case KS8695_DTYPE_LAN:
-			return -EOPNOTSUPP;
-		}
+		writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
 	}
 
 	return 0;
 }
 
 /**
- *	ks8695_nwayreset - Restart the autonegotiation on the port.
+ *	ks8695_wan_nwayreset - Restart the autonegotiation on the port.
  *	@ndev: The network device to restart autoneotiation on
  */
 static int
-ks8695_nwayreset(struct net_device *ndev)
+ks8695_wan_nwayreset(struct net_device *ndev)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
 
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* No phy means no autonegotiation on hpna */
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+
+	if ((ctrl & WMC_WAND) == 0)
+		writel(ctrl | WMC_WANR,
+		       ksp->phyiface_regs + KS8695_WMC);
+	else
+		/* auto-negotiation not enabled */
 		return -EINVAL;
-	case KS8695_DTYPE_WAN:
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-
-		if ((ctrl & WMC_WAND) == 0)
-			writel(ctrl | WMC_WANR,
-			       ksp->phyiface_regs + KS8695_WMC);
-		else
-			/* auto-negotiation not enabled */
-			return -EINVAL;
-		break;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
-	}
 
 	return 0;
 }
 
 /**
- *	ks8695_get_link - Retrieve link status of network interface
- *	@ndev: The network interface to retrive the link status of.
- */
-static u32
-ks8695_get_link(struct net_device *ndev)
-{
-	struct ks8695_priv *ksp = netdev_priv(ndev);
-	u32 ctrl;
-
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* HPNA always has link */
-		return 1;
-	case KS8695_DTYPE_WAN:
-		/* WAN we can read the PHY for */
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
-		return ctrl & WMC_WLS;
-	case KS8695_DTYPE_LAN:
-		return -EOPNOTSUPP;
-	}
-	return 0;
-}
-
-/**
- *	ks8695_get_pause - Retrieve network pause/flow-control advertising
+ *	ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
  *	@ndev: The device to retrieve settings from
  *	@param: The structure to fill out with the information
  */
 static void
-ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
+ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
 {
 	struct ks8695_priv *ksp = netdev_priv(ndev);
 	u32 ctrl;
 
-	switch (ksp->dtype) {
-	case KS8695_DTYPE_HPNA:
-		/* No phy link on hpna to configure */
-		return;
-	case KS8695_DTYPE_WAN:
-		ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
+	ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
 
-		/* advertise Pause */
-		param->autoneg = (ctrl & WMC_WANAP);
+	/* advertise Pause */
+	param->autoneg = (ctrl & WMC_WANAP);
 
-		/* current Rx Flow-control */
-		ctrl = ks8695_readreg(ksp, KS8695_DRXC);
-		param->rx_pause = (ctrl & DRXC_RFCE);
+	/* current Rx Flow-control */
+	ctrl = ks8695_readreg(ksp, KS8695_DRXC);
+	param->rx_pause = (ctrl & DRXC_RFCE);
 
-		/* current Tx Flow-control */
-		ctrl = ks8695_readreg(ksp, KS8695_DTXC);
-		param->tx_pause = (ctrl & DTXC_TFCE);
-		break;
-	case KS8695_DTYPE_LAN:
-		/* The LAN's "phy" is a direct-attached switch */
-		return;
-	}
-}
-
-/**
- *	ks8695_set_pause - Configure pause/flow-control
- *	@ndev: The device to configure
- *	@param: The pause parameters to set
- *
- *	TODO: Implement this
- */
-static int
-ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
-{
-	return -EOPNOTSUPP;
+	/* current Tx Flow-control */
+	ctrl = ks8695_readreg(ksp, KS8695_DTXC);
+	param->tx_pause = (ctrl & DTXC_TFCE);
 }
 
 /**
@@ -1140,12 +1035,17 @@
 static const struct ethtool_ops ks8695_ethtool_ops = {
 	.get_msglevel	= ks8695_get_msglevel,
 	.set_msglevel	= ks8695_set_msglevel,
-	.get_settings	= ks8695_get_settings,
-	.set_settings	= ks8695_set_settings,
-	.nway_reset	= ks8695_nwayreset,
-	.get_link	= ks8695_get_link,
-	.get_pauseparam = ks8695_get_pause,
-	.set_pauseparam = ks8695_set_pause,
+	.get_drvinfo	= ks8695_get_drvinfo,
+};
+
+static const struct ethtool_ops ks8695_wan_ethtool_ops = {
+	.get_msglevel	= ks8695_get_msglevel,
+	.set_msglevel	= ks8695_set_msglevel,
+	.get_settings	= ks8695_wan_get_settings,
+	.set_settings	= ks8695_wan_set_settings,
+	.nway_reset	= ks8695_wan_nwayreset,
+	.get_link	= ethtool_op_get_link,
+	.get_pauseparam = ks8695_wan_get_pause,
 	.get_drvinfo	= ks8695_get_drvinfo,
 };
 
@@ -1541,7 +1441,6 @@
 
 	/* driver system setup */
 	ndev->netdev_ops = &ks8695_netdev_ops;
-	SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	ndev->watchdog_timeo	 = msecs_to_jiffies(watchdog);
 
 	netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
@@ -1608,12 +1507,15 @@
 	if (ksp->phyiface_regs && ksp->link_irq == -1) {
 		ks8695_init_switch(ksp);
 		ksp->dtype = KS8695_DTYPE_LAN;
+		SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
 		ks8695_init_wan_phy(ksp);
 		ksp->dtype = KS8695_DTYPE_WAN;
+		SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
 	} else {
 		/* No initialisation since HPNA does not have a PHY */
 		ksp->dtype = KS8695_DTYPE_HPNA;
+		SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
 	}
 
 	/* And bring up the net_device with the net core */
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9..22abfb3 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
  * Licensed under the GPL-2 or later.
  */
 
+#define DRV_VERSION	"1.1"
+#define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -41,12 +46,7 @@
 
 #include "bfin_mac.h"
 
-#define DRV_NAME	"bfin_mac"
-#define DRV_VERSION	"1.1"
-#define DRV_AUTHOR	"Bryan Wu, Luke Yang"
-#define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
-
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("Bryan Wu, Luke Yang");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@
 		/* allocate a new skb for next time receive */
 		new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
 		if (!new_skb) {
-			printk(KERN_NOTICE DRV_NAME
-			       ": init: low on mem - packet dropped\n");
+			pr_notice("init: low on mem - packet dropped\n");
 			goto init_error;
 		}
 		skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@
 
 init_error:
 	desc_list_free();
-	printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+	pr_err("kmalloc failed\n");
 	return -ENOMEM;
 }
 
@@ -259,8 +258,7 @@
 	while ((bfin_read_EMAC_STAADD()) & STABUSY) {
 		udelay(1);
 		if (timeout_cnt-- < 0) {
-			printk(KERN_ERR DRV_NAME
-			": wait MDC/MDIO transaction to complete timeout\n");
+			pr_err("wait MDC/MDIO transaction to complete timeout\n");
 			return -ETIMEDOUT;
 		}
 	}
@@ -350,9 +348,9 @@
 					opmode &= ~RMII_10;
 					break;
 				default:
-					printk(KERN_WARNING
-						"%s: Ack!  Speed (%d) is not 10/100!\n",
-						DRV_NAME, phydev->speed);
+					netdev_warn(dev,
+						"Ack! Speed (%d) is not 10/100!\n",
+						phydev->speed);
 					break;
 				}
 				bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@
 
 	/* now we are supposed to have a proper phydev, to attach to... */
 	if (!phydev) {
-		printk(KERN_INFO "%s: Don't found any phy device at all\n",
-			dev->name);
+		netdev_err(dev, "no phy device found\n");
 		return -ENODEV;
 	}
 
 	if (phy_mode != PHY_INTERFACE_MODE_RMII &&
 		phy_mode != PHY_INTERFACE_MODE_MII) {
-		printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+		netdev_err(dev, "invalid phy interface mode\n");
 		return -EINVAL;
 	}
 
@@ -432,7 +429,7 @@
 			0, phy_mode);
 
 	if (IS_ERR(phydev)) {
-		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+		netdev_err(dev, "could not attach PHY\n");
 		return PTR_ERR(phydev);
 	}
 
@@ -453,11 +450,10 @@
 	lp->old_duplex = -1;
 	lp->phydev = phydev;
 
-	printk(KERN_INFO "%s: attached PHY driver [%s] "
-	       "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
-	       "@sclk=%dMHz)\n",
-	       DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
-	       MDC_CLK, mdc_div, sclk/1000000);
+	pr_info("attached PHY driver [%s] "
+	        "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
+	        phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
+	        MDC_CLK, mdc_div, sclk/1000000);
 
 	return 0;
 }
@@ -502,7 +498,7 @@
 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
+	strcpy(info->driver, KBUILD_MODNAME);
 	strcpy(info->version, DRV_VERSION);
 	strcpy(info->fw_version, "N/A");
 	strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@
 };
 
 /**************************************************************************/
-void setup_system_regs(struct net_device *dev)
+static void setup_system_regs(struct net_device *dev)
 {
 	struct bfin_mac_local *lp = netdev_priv(dev);
 	int i;
@@ -592,6 +588,10 @@
 
 	bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
 
+	/* Set vlan regs to let 1522 bytes long packets pass through */
+	bfin_write_EMAC_VLAN1(lp->vlan1_mask);
+	bfin_write_EMAC_VLAN2(lp->vlan2_mask);
+
 	/* Initialize the TX DMA channel registers */
 	bfin_write_DMA2_X_COUNT(0);
 	bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@
 		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
 			udelay(1);
 		if (timeout_cnt == 0)
-			printk(KERN_ERR DRV_NAME
-					": fails to timestamp the TX packet\n");
+			netdev_err(netdev, "timestamp the TX packet failed\n");
 		else {
 			struct skb_shared_hwtstamps shhwtstamps;
 			u64 ns;
@@ -1083,8 +1082,7 @@
 	 * we which case we simply drop the packet
 	 */
 	if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
-		printk(KERN_NOTICE DRV_NAME
-		       ": rx: receive error - packet dropped\n");
+		netdev_notice(dev, "rx: receive error - packet dropped\n");
 		dev->stats.rx_dropped++;
 		goto out;
 	}
@@ -1094,8 +1092,7 @@
 
 	new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
 	if (!new_skb) {
-		printk(KERN_NOTICE DRV_NAME
-		       ": rx: low on mem - packet dropped\n");
+		netdev_notice(dev, "rx: low on mem - packet dropped\n");
 		dev->stats.rx_dropped++;
 		goto out;
 	}
@@ -1213,7 +1210,7 @@
 	int ret;
 	u32 opmode;
 
-	pr_debug("%s: %s\n", DRV_NAME, __func__);
+	pr_debug("%s\n", __func__);
 
 	/* Set RX DMA */
 	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1287,19 +1284,12 @@
 {
 	u32 emac_hashhi, emac_hashlo;
 	struct netdev_hw_addr *ha;
-	char *addrs;
 	u32 crc;
 
 	emac_hashhi = emac_hashlo = 0;
 
 	netdev_for_each_mc_addr(ha, dev) {
-		addrs = ha->addr;
-
-		/* skip non-multicast addresses */
-		if (!(*addrs & 1))
-			continue;
-
-		crc = ether_crc(ETH_ALEN, addrs);
+		crc = ether_crc(ETH_ALEN, ha->addr);
 		crc >>= 26;
 
 		if (crc & 0x20)
@@ -1323,7 +1313,7 @@
 	u32 sysctl;
 
 	if (dev->flags & IFF_PROMISC) {
-		printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+		netdev_info(dev, "set promisc mode\n");
 		sysctl = bfin_read_EMAC_OPMODE();
 		sysctl |= PR;
 		bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1383,7 @@
 	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
 	 */
 	if (!is_valid_ether_addr(dev->dev_addr)) {
-		printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+		netdev_warn(dev, "no valid ethernet hw addr\n");
 		return -EINVAL;
 	}
 
@@ -1527,6 +1517,9 @@
 		goto out_err_mii_probe;
 	}
 
+	lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
+	lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
+
 	/* Fill in the fields of the device structure with ethernet values. */
 	ether_setup(ndev);
 
@@ -1558,7 +1551,7 @@
 	bfin_mac_hwtstamp_init(ndev);
 
 	/* now, print out the card info, in a short format.. */
-	dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
+	netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
 
 	return 0;
 
@@ -1650,7 +1643,7 @@
 	 * so set the GPIO pins to Ethernet mode
 	 */
 	pin_req = mii_bus_pd->mac_peripherals;
-	rc = peripheral_request_list(pin_req, DRV_NAME);
+	rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
 	if (rc) {
 		dev_err(&pdev->dev, "Requesting peripherals failed!\n");
 		return rc;
@@ -1739,7 +1732,7 @@
 	.resume = bfin_mac_resume,
 	.suspend = bfin_mac_suspend,
 	.driver = {
-		.name = DRV_NAME,
+		.name = KBUILD_MODNAME,
 		.owner	= THIS_MODULE,
 	},
 };
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68be..f8559ac 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
 #include <linux/etherdevice.h>
 #include <linux/bfin_mac.h>
 
+/*
+ * Disable hardware checksum for bug #5600 if writeback cache is
+ * enabled. Otherwize, corrupted RX packet will be sent up stack
+ * without error mark.
+ */
+#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
 #define BFIN_MAC_CSUM_OFFLOAD
+#endif
 
 #define TX_RECLAIM_JIFFIES (HZ / 5)
 
@@ -68,7 +75,6 @@
 	 */
 	struct net_device_stats stats;
 
-	unsigned char Mac[6];	/* MAC address of the board */
 	spinlock_t lock;
 
 	int wol;		/* Wake On Lan */
@@ -76,6 +82,9 @@
 	struct timer_list tx_reclaim_timer;
 	struct net_device *ndev;
 
+	/* Data for EMAC_VLAN1 regs */
+	u16 vlan1_mask, vlan2_mask;
+
 	/* MII and PHY stuffs */
 	int old_link;          /* used by bf537_adjust_link */
 	int old_speed;
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 99be5ae..142d604 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -275,7 +275,6 @@
 
 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
 	if (ioc_attr) {
-		memset(ioc_attr, 0, sizeof(*ioc_attr));
 		spin_lock_irqsave(&bnad->bna_lock, flags);
 		bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 77d6c8d..a6cd335 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -636,6 +636,7 @@
 
 #define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
 #define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
+#define CHIP_PARITY_ENABLED(bp)	(CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
 
 	int			flash_size;
 #define NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
@@ -1414,12 +1415,12 @@
 		else
 
 /* skip rx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
  */
 #define skip_rx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
 /* skip tx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
  */
 #define skip_tx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25..fb3ff7c 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
 /* bnx2x_dump.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2009 Broadcom Corporation
+ * Copyright (c) 2011 Broadcom Corporation
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
  */
 
 
@@ -17,53 +23,53 @@
 #define BNX2X_DUMP_H
 
 
+
+/*definitions */
+#define XSTORM_WAITP_ADDR    0x2b8a80
+#define TSTORM_WAITP_ADDR    0x1b8a80
+#define USTORM_WAITP_ADDR    0x338a80
+#define CSTORM_WAITP_ADDR    0x238a80
+#define TSTORM_CAM_MODE         0x1B1440
+
+#define MAX_TIMER_PENDING      200
+#define TIMER_SCAN_DONT_CARE   0xFF
+#define RI_E1			0x1
+#define RI_E1H			0x2
+#define RI_E2			0x4
+#define RI_ONLINE		0x100
+#define RI_PATH0_DUMP		0x200
+#define RI_PATH1_DUMP		0x400
+#define RI_E1_OFFLINE		(RI_E1)
+#define RI_E1_ONLINE		(RI_E1 | RI_ONLINE)
+#define RI_E1H_OFFLINE		(RI_E1H)
+#define RI_E1H_ONLINE		(RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE		(RI_E2)
+#define RI_E2_ONLINE		(RI_E2 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE	(RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE		(RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1HE2_OFFLINE	(RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE		(RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE		(RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE		(RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_ALL_OFFLINE         (RI_E1 | RI_E1H | RI_E2)
+#define RI_ALL_ONLINE          (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+
 struct dump_sign {
 	u32 time_stamp;
 	u32 diag_ver;
 	u32 grc_dump_ver;
 };
 
-#define TSTORM_WAITP_ADDR		0x1b8a80
-#define CSTORM_WAITP_ADDR		0x238a80
-#define XSTORM_WAITP_ADDR		0x2b8a80
-#define USTORM_WAITP_ADDR		0x338a80
-#define TSTORM_CAM_MODE			0x1b1440
-
-#define RI_E1				0x1
-#define RI_E1H				0x2
-#define RI_E2			0x4
-#define RI_ONLINE			0x100
-#define RI_PATH0_DUMP		0x200
-#define RI_PATH1_DUMP		0x400
-#define RI_E1_OFFLINE			(RI_E1)
-#define RI_E1_ONLINE			(RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE			(RI_E1H)
-#define RI_E1H_ONLINE			(RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE			(RI_E2)
-#define RI_E2_ONLINE			(RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE		(RI_E1 | RI_E1H)
-#define RI_E1E1H_ONLINE			(RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE		(RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE			(RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE			(RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE			(RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE			(RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE			(RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-
-#define MAX_TIMER_PENDING		200
-#define TIMER_SCAN_DONT_CARE		0xFF
-
-
 struct dump_hdr {
-	u32		 hdr_size;	/* in dwords, excluding this field */
-	struct dump_sign dump_sign;
-	u32		 xstorm_waitp;
-	u32		 tstorm_waitp;
-	u32		 ustorm_waitp;
-	u32		 cstorm_waitp;
-	u16		 info;
-	u8		 idle_chk;
-	u8		 reserved;
+	u32  hdr_size;	/* in dwords, excluding this field */
+	struct dump_sign	dump_sign;
+	u32  xstorm_waitp;
+	u32  tstorm_waitp;
+	u32  ustorm_waitp;
+	u32  cstorm_waitp;
+	u16  info;
+	u8   idle_chk;
+	u8   reserved;
 };
 
 struct reg_addr {
@@ -80,202 +86,185 @@
 	u16 info;
 };
 
-
-#define REGS_COUNT			558
+#define REGS_COUNT			834
 static const struct reg_addr reg_addrs[REGS_COUNT] = {
 	{ 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
 	{ 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
-	{ 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
-	{ 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
-	{ 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
-	{ 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
-	{ 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
-	{ 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
-	{ 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
-	{ 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
-	{ 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
-	{ 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
-	{ 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
-	{ 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
-	{ 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
-	{ 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
-	{ 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
-	{ 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
-	{ 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
-	{ 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
-	{ 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
-	{ 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
-	{ 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
-	{ 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
-	{ 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
-	{ 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
-	{ 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
-	{ 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
-	{ 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
-	{ 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
-	{ 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
-	{ 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
-	{ 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
-	{ 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
-	{ 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
-	{ 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
-	{ 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
-	{ 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
-	{ 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
-	{ 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
-	{ 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
-	{ 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
-	{ 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
-	{ 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
-	{ 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
-	{ 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
-	{ 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
+	{ 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
+	{ 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
+	{ 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
+	{ 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
+	{ 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
+	{ 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
+	{ 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
+	{ 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
+	{ 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
+	{ 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
+	{ 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
+	{ 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
+	{ 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
+	{ 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
+	{ 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
+	{ 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
+	{ 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
+	{ 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
+	{ 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
+	{ 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
+	{ 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
+	{ 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
+	{ 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
+	{ 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
+	{ 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
+	{ 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
+	{ 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
+	{ 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
+	{ 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
+	{ 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
+	{ 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
+	{ 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
+	{ 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
+	{ 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
+	{ 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
+	{ 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
+	{ 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
+	{ 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
+	{ 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
+	{ 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
+	{ 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
+	{ 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
+	{ 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
+	{ 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
+	{ 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
+	{ 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
+	{ 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
+	{ 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
+	{ 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
+	{ 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
+	{ 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
+	{ 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
+	{ 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
+	{ 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
+	{ 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
+	{ 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
+	{ 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
 	{ 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
 	{ 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
-	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
-	{ 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
-	{ 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
-	{ 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
-	{ 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
-	{ 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
-	{ 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
-	{ 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
-	{ 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
-	{ 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
+	{ 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
+	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
+	{ 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
+	{ 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
+	{ 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
+	{ 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
+	{ 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
+	{ 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
+	{ 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
+	{ 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
+	{ 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
+	{ 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
+	{ 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
 	{ 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
 	{ 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
-	{ 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
-	{ 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
-	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
+	{ 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
+	{ 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
+	{ 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
+	{ 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
+	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
+	{ 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
 	{ 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
-	{ 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
-	{ 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
-	{ 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
-	{ 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
-	{ 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
-	{ 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
-	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
-	{ 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
-	{ 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
-	{ 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
+	{ 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
+	{ 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
+	{ 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
+	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
+	{ 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
+	{ 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
 	{ 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
 	{ 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
-	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
-	{ 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
-	{ 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
-	{ 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
+	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
+	{ 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
+	{ 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
+	{ 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
 	{ 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
 	{ 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
-	{ 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
-	{ 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
-	{ 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
-	{ 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
-	{ 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
-	{ 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
-	{ 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
-	{ 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
-	{ 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
-	{ 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
+	{ 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
+	{ 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
+	{ 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
+	{ 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
+	{ 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
+	{ 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
+	{ 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
+	{ 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
+	{ 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
+	{ 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
 	{ 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
 	{ 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
 	{ 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
 	{ 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
-	{ 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
-	{ 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
-	{ 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
-	{ 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
+	{ 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
+	{ 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
+	{ 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
 	{ 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
-	{ 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
-	{ 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
-	{ 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
+	{ 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
+	{ 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
+	{ 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
+	{ 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
 	{ 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
-	{ 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
-	{ 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
-	{ 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
-	{ 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
-	{ 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
-	{ 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
-	{ 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
-	{ 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
-	{ 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
-	{ 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
-	{ 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
-	{ 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
-	{ 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
-	{ 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
-	{ 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
-	{ 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
-	{ 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
-	{ 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
-	{ 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
-	{ 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
-	{ 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
-	{ 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
-	{ 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
-	{ 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
-	{ 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
-	{ 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
-	{ 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
-	{ 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
-	{ 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
-	{ 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
-	{ 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
-	{ 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
-	{ 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
-	{ 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
-	{ 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
-	{ 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
-	{ 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
-	{ 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
-	{ 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
-	{ 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
-	{ 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
-	{ 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
-	{ 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
-	{ 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
-	{ 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
-	{ 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
-	{ 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
-	{ 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
-	{ 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
-	{ 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
-	{ 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
-	{ 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
-	{ 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
-	{ 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
-	{ 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
-	{ 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
-	{ 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
-	{ 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
-	{ 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
-	{ 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
-	{ 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
-	{ 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
-	{ 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
-	{ 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
-	{ 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
-	{ 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
-	{ 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
-	{ 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
-	{ 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
-	{ 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
-	{ 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
-	{ 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
-	{ 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
-	{ 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
-	{ 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
-	{ 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
-	{ 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
-	{ 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
-	{ 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
-	{ 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
-	{ 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
-	{ 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
-	{ 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
-	{ 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
-	{ 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
-	{ 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
-	{ 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
+	{ 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
+	{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
+	{ 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
+	{ 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
+	{ 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
+	{ 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
+	{ 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
+	{ 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
+	{ 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
+	{ 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
+	{ 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
+	{ 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
+	{ 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
+	{ 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
+	{ 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
+	{ 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
+	{ 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
+	{ 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
+	{ 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
+	{ 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
+	{ 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
+	{ 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
+	{ 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
+	{ 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
+	{ 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
+	{ 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
+	{ 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
+	{ 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
+	{ 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
+	{ 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
+	{ 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
+	{ 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
+	{ 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
+	{ 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
+	{ 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
+	{ 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
+	{ 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
+	{ 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
+	{ 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
+	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
+	{ 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
+	{ 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
+	{ 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
+	{ 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
+	{ 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
+	{ 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
+	{ 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
+	{ 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
+	{ 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
+	{ 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
+	{ 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
+	{ 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
 	{ 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
 	{ 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
 	{ 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@
 	{ 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
 	{ 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
 	{ 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
-	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
-	{ 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
+	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
 	{ 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
-	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
-	{ 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
-	{ 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
-	{ 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
-	{ 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
-	{ 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
-	{ 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
-	{ 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
-	{ 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
-	{ 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
-	{ 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
-	{ 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
-	{ 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
-	{ 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
-	{ 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
-	{ 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
+	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
+	{ 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
+	{ 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
+	{ 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
+	{ 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
+	{ 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
+	{ 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
+	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
+	{ 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
+	{ 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
+	{ 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
+	{ 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
+	{ 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
+	{ 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
+	{ 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
+	{ 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
+	{ 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
+	{ 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
+	{ 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
+	{ 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
+	{ 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
+	{ 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
+	{ 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
+	{ 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
+	{ 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
+	{ 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
 	{ 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
-	{ 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
-	{ 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
-	{ 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
-	{ 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
-	{ 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
-	{ 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
-	{ 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
+	{ 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
+	{ 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
+	{ 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
+	{ 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
+	{ 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
+	{ 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
+	{ 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
+	{ 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
+	{ 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
+	{ 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
+	{ 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
+	{ 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
+	{ 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
+	{ 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
 	{ 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
-	{ 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
-	{ 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
-	{ 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
-	{ 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
-	{ 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
-	{ 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
-	{ 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
-	{ 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
-	{ 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
-	{ 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
-	{ 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
-	{ 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
-	{ 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
-	{ 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
-	{ 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
-	{ 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
-	{ 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
-	{ 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
-	{ 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
-	{ 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
-	{ 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
-	{ 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
-	{ 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
-	{ 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
-	{ 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
-	{ 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
-	{ 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
-	{ 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
-	{ 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
-	{ 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
-	{ 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
-	{ 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
-	{ 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
-	{ 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
-	{ 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
-	{ 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
-	{ 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
+	{ 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
+	{ 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
+	{ 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
+	{ 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
+	{ 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
+	{ 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
+	{ 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
+	{ 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
+	{ 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
+	{ 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
+	{ 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
+	{ 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
+	{ 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
+	{ 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
+	{ 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
+	{ 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
+	{ 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
+	{ 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
+	{ 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
+	{ 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
+	{ 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
+	{ 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
+	{ 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
+	{ 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
+	{ 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
+	{ 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
+	{ 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
+	{ 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
+	{ 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
+	{ 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
+	{ 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
+	{ 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
+	{ 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
+	{ 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
+	{ 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
+	{ 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
+	{ 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
+	{ 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
+	{ 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
+	{ 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
+	{ 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
+	{ 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
+	{ 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
+	{ 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
+	{ 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
+	{ 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
+	{ 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+	{ 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+	{ 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
+	{ 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
+	{ 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
+	{ 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
+	{ 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
+	{ 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
+	{ 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
+	{ 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
+	{ 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
+	{ 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
+	{ 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
+	{ 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
+	{ 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
+	{ 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
+	{ 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
+	{ 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
+	{ 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
+	{ 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
+	{ 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
+	{ 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
+	{ 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
+	{ 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
+	{ 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
+	{ 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
+	{ 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
+	{ 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
+	{ 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
+	{ 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
+	{ 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
+	{ 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
+	{ 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
+	{ 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
+	{ 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
+	{ 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
+	{ 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
+	{ 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
+	{ 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
+	{ 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
+	{ 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
+	{ 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
+	{ 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
+	{ 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
+	{ 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
+	{ 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
+	{ 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
+	{ 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
+	{ 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
+	{ 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
+	{ 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
+	{ 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+	{ 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
+	{ 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
+	{ 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
+	{ 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
+	{ 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
+	{ 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
+	{ 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
+	{ 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
+	{ 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
+	{ 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
+	{ 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
+	{ 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
+	{ 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
+	{ 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
+	{ 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
+	{ 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
+	{ 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
+	{ 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
+	{ 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
+	{ 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
+	{ 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
 	{ 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
-	{ 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
+	{ 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
+	{ 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
+	{ 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
+	{ 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
 	{ 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
-	{ 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
-	{ 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
-	{ 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
-	{ 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
-	{ 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
-	{ 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
-	{ 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
+	{ 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
+	{ 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
+	{ 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
+	{ 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
+	{ 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
+	{ 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
+	{ 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
+	{ 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
+	{ 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
+	{ 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
+	{ 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
+	{ 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
+	{ 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
+	{ 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
+	{ 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
+	{ 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
 	{ 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
-	{ 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
-	{ 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
+	{ 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
+	{ 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
+	{ 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
+	{ 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
+	{ 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
+	{ 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
+	{ 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
+	{ 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
+	{ 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
+	{ 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
+	{ 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
+	{ 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
+	{ 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
+	{ 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
+	{ 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
+	{ 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
+	{ 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
+	{ 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
+	{ 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
+	{ 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
+	{ 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
+	{ 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
+	{ 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
+	{ 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
+	{ 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
+	{ 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
 };
 
-
-#define IDLE_REGS_COUNT			277
+#define IDLE_REGS_COUNT			237
 static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
-	{ 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
-	{ 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
-	{ 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
+	{ 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
+	{ 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
+	{ 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
+	{ 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
+	{ 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
+	{ 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
+	{ 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
+	{ 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
 	{ 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
-	{ 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
-	{ 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
-	{ 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
-	{ 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
-	{ 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
-	{ 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
-	{ 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
-	{ 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
-	{ 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
-	{ 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
-	{ 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
-	{ 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
-	{ 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
-	{ 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
-	{ 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
-	{ 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
-	{ 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
-	{ 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
-	{ 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
-	{ 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
-	{ 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
-	{ 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
-	{ 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
-	{ 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
-	{ 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
-	{ 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
-	{ 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
-	{ 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
-	{ 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
-	{ 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
-	{ 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
-	{ 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
-	{ 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
+	{ 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
+	{ 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
+	{ 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
+	{ 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
+	{ 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
+	{ 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
+	{ 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
+	{ 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
+	{ 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
+	{ 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
+	{ 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
+	{ 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
+	{ 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
+	{ 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
+	{ 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
+	{ 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
+	{ 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
+	{ 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
+	{ 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
+	{ 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
+	{ 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
+	{ 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
+	{ 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
+	{ 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
+	{ 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
+	{ 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
+	{ 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
+	{ 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
+	{ 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
+	{ 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
+	{ 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
+	{ 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
+	{ 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
+	{ 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
+	{ 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
+	{ 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
+	{ 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
 	{ 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
 	{ 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
 	{ 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
 	{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
-	{ 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
-	{ 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
-	{ 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
-	{ 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
-	{ 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
-	{ 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
-	{ 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
-	{ 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
-	{ 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
-	{ 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
-	{ 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
-	{ 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
-	{ 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
-	{ 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
-	{ 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
-	{ 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
-	{ 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
-	{ 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
-	{ 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
-	{ 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
-	{ 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
-	{ 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
-	{ 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
-	{ 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
-	{ 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
-	{ 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
-	{ 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
-	{ 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
-	{ 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
-	{ 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
-	{ 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
-	{ 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
-	{ 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
-	{ 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
-	{ 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
-	{ 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
-	{ 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
-	{ 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
-	{ 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
+	{ 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
+	{ 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
+	{ 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
+	{ 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
+	{ 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
+	{ 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
+	{ 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
+	{ 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
 	{ 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
 	{ 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
 	{ 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@
 	{ 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
 	{ 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
 	{ 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
-	{ 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
-	{ 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
-	{ 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
-	{ 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
-	{ 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
-	{ 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
-	{ 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
-	{ 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
-	{ 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
-	{ 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
-	{ 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
-	{ 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
-	{ 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
-	{ 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
+	{ 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
+	{ 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
+	{ 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
+	{ 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
+	{ 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
+	{ 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
+	{ 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
+	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
+	{ 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
+	{ 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
+	{ 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
+	{ 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
+	{ 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
+	{ 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
+	{ 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
+	{ 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
+	{ 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
+	{ 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
+	{ 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
 	{ 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
 	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
-	{ 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
-	{ 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
-	{ 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
-	{ 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
-	{ 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
-	{ 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
-	{ 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
-	{ 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
-	{ 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
-	{ 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
-	{ 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
-	{ 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
-	{ 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
-	{ 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
-	{ 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
-	{ 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
-	{ 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
-	{ 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
-	{ 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
-	{ 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
-	{ 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
-	{ 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
+	{ 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
+	{ 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
+	{ 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
+	{ 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
+	{ 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
+	{ 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
+	{ 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
+	{ 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
+	{ 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
+	{ 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
+	{ 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
+	{ 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
+	{ 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+	{ 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+	{ 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
+	{ 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
+	{ 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
+	{ 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+	{ 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
 	{ 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
 	{ 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
-	{ 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
-	{ 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
 	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
 	{ 0x3380c0, 1, RI_ALL_ONLINE }
 };
@@ -515,7 +635,6 @@
 	{ 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
 };
 
-
 #define WREGS_COUNT_E1H			1
 static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
 
@@ -530,22 +649,53 @@
 	{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
 };
 
-static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
-
+static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
 
 #define TIMER_REGS_COUNT_E1		2
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
-	{ 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
-	{ 0x1640d0, 0x1640d4 };
 
+static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
+	0x1640d0, 0x1640d4 };
 
 #define TIMER_REGS_COUNT_E1H		2
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
-	{ 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
-	{ 0x1640d0, 0x1640d4 };
 
+static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+	0x1640d0, 0x1640d4 };
+
+#define TIMER_REGS_COUNT_E2		2
+
+static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
+	0x1640d0, 0x1640d4 };
+
+#define PAGE_MODE_VALUES_E1 0
+
+#define PAGE_READ_REGS_E1 0
+
+#define PAGE_WRITE_REGS_E1 0
+
+static const u32 page_vals_e1[] = { 0 };
+
+static const u32 page_write_regs_e1[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
+
+#define PAGE_MODE_VALUES_E1H 0
+
+#define PAGE_READ_REGS_E1H 0
+
+#define PAGE_WRITE_REGS_E1H 0
+
+static const u32 page_vals_e1h[] = { 0 };
+
+static const u32 page_write_regs_e1h[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1h[] = {
+	{ 0x0, 0, RI_E1H_ONLINE } };
 
 #define PAGE_MODE_VALUES_E2 2
 
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 99c672d..5b44a8b 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,7 @@
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 #include "bnx2x_dump.h"
+#include "bnx2x_init.h"
 
 /* Note: in the format strings below %s is replaced by the queue-name which is
  * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -472,7 +473,7 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	int regdump_len = 0;
-	int i;
+	int i, j, k;
 
 	if (CHIP_IS_E1(bp)) {
 		for (i = 0; i < REGS_COUNT; i++)
@@ -502,6 +503,15 @@
 			if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
 				regdump_len += wreg_addrs_e2[i].size *
 					(1 + wreg_addrs_e2[i].read_regs_count);
+
+		for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
+			for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
+				for (k = 0; k < PAGE_READ_REGS_E2; k++)
+					if (IS_E2_ONLINE(page_read_regs_e2[k].
+							 info))
+						regdump_len +=
+						page_read_regs_e2[k].size;
+			}
 	}
 	regdump_len *= 4;
 	regdump_len += sizeof(struct dump_hdr);
@@ -539,6 +549,12 @@
 	if (!netif_running(bp->dev))
 		return;
 
+	/* Disable parity attentions as long as following dump may
+	 * cause false alarms by reading never written registers. We
+	 * will re-enable parity attentions right after the dump.
+	 */
+	bnx2x_disable_blocks_parity(bp);
+
 	dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
 	dump_hdr.dump_sign = dump_sign_all;
 	dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -580,6 +596,10 @@
 
 		bnx2x_read_pages_regs_e2(bp, p);
 	}
+	/* Re-enable parity attentions */
+	bnx2x_clear_blocks_parity(bp);
+	if (CHIP_PARITY_ENABLED(bp))
+		bnx2x_enable_blocks_parity(bp);
 }
 
 #define PHY_FW_VER_LEN			20
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d5487..5a268e9 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@
 	u64 next;
 };
 
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK, \
+	block##_REG_##block##_PRTY_STS_CLR, \
+	en_mask, {m1, m1h, m2}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK_0, \
+	block##_REG_##block##_PRTY_STS_CLR_0, \
+	en_mask, {m1, m1h, m2}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK_1, \
+	block##_REG_##block##_PRTY_STS_CLR_1, \
+	en_mask, {m1, m1h, m2}, #block"_1" \
+}
+
+static const struct {
+	u32 mask_addr;
+	u32 sts_clr_addr;
+	u32 en_mask;		/* Mask to enable parity attentions */
+	struct {
+		u32 e1;		/* 57710 */
+		u32 e1h;	/* 57711 */
+		u32 e2;		/* 57712 */
+	} reg_mask;		/* Register mask (all valid bits) */
+	char name[7];		/* Block's longest name is 6 characters long
+				 * (name + suffix)
+				 */
+} bnx2x_blocks_parity_data[] = {
+	/* bit 19 masked */
+	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+	/* bit 5,18,20-31 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+	/* bit 5 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
+	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+	 * want to handle "system kill" flow at the moment.
+	 */
+	BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(PXP2,	0x7ff, 0x7f, 0x7f, 0x7ff),
+	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
+	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
+	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf}, "UPB"},
+	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf}, "XPB"},
+	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
+	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
+	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+	MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_0,
+	MISC_REG_AEU_ENABLE4_PXP_0,
+	MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_1,
+	MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+	int i;
+	u32 reg_val;
+
+	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+		reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+		if (enable)
+			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+		else
+			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+		REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+	}
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+	if (CHIP_IS_E1(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+	else if (CHIP_IS_E1H(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+	else
+		return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (dis_mask) {
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+			       dis_mask);
+			DP(NETIF_MSG_HW, "Setting parity mask "
+						 "for %s to\t\t0x%x\n",
+				    bnx2x_blocks_parity_data[i].name, dis_mask);
+		}
+	}
+
+	/* Disable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+	u32 reg_val, mcp_aeu_bits =
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+	/* Clear SEM_FAST parities */
+	REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask) {
+			reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+					 sts_clr_addr);
+			if (reg_val & reg_mask)
+				DP(NETIF_MSG_HW,
+					    "Parity errors in %s: 0x%x\n",
+					    bnx2x_blocks_parity_data[i].name,
+					    reg_val & reg_mask);
+		}
+	}
+
+	/* Check if there were parity attentions in MCP */
+	reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+	if (reg_val & mcp_aeu_bits)
+		DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+		   reg_val & mcp_aeu_bits);
+
+	/* Clear parity attentions in MCP:
+	 * [7]  clears Latched rom_parity
+	 * [8]  clears Latched ump_rx_parity
+	 * [9]  clears Latched ump_tx_parity
+	 * [10] clears Latched scpad_parity (both ports)
+	 */
+	REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask)
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+				bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+	}
+
+	/* Enable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, true);
+}
+
+
 #endif /* BNX2X_INIT_H */
 
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 489a551..8cdcf5b 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -3152,7 +3152,6 @@
 #define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
 #define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
 #define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
 
 /*
  * should be run under rtnl lock
@@ -3527,7 +3526,7 @@
 	   try to handle this event */
 	bnx2x_acquire_alr(bp);
 
-	if (bnx2x_chk_parity_attn(bp)) {
+	if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
 		bp->recovery_state = BNX2X_RECOVERY_INIT;
 		bnx2x_set_reset_in_progress(bp);
 		schedule_delayed_work(&bp->reset_task, 0);
@@ -4754,7 +4753,7 @@
 	return 0; /* OK */
 }
 
-static void enable_blocks_attention(struct bnx2x *bp)
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
 {
 	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 	if (CHIP_IS_E2(bp))
@@ -4808,53 +4807,9 @@
 	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
 	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
 /*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
-	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
+	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);		/* bit 3,4 masked */
 }
 
-static const struct {
-	u32 addr;
-	u32 mask;
-} bnx2x_parity_mask[] = {
-	{PXP_REG_PXP_PRTY_MASK,		0x3ffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_0,	0xffffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_1,	0x7f},
-	{HC_REG_HC_PRTY_MASK,		0x7},
-	{MISC_REG_MISC_PRTY_MASK,	0x1},
-	{QM_REG_QM_PRTY_MASK,		0x0},
-	{DORQ_REG_DORQ_PRTY_MASK,	0x0},
-	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{SRC_REG_SRC_PRTY_MASK,		0x4}, /* bit 2 */
-	{CDU_REG_CDU_PRTY_MASK,		0x0},
-	{CFC_REG_CFC_PRTY_MASK,		0x0},
-	{DBG_REG_DBG_PRTY_MASK,		0x0},
-	{DMAE_REG_DMAE_PRTY_MASK,	0x0},
-	{BRB1_REG_BRB1_PRTY_MASK,	0x0},
-	{PRS_REG_PRS_PRTY_MASK,		(1<<6)},/* bit 6 */
-	{TSDM_REG_TSDM_PRTY_MASK,	0x18},	/* bit 3,4 */
-	{CSDM_REG_CSDM_PRTY_MASK,	0x8},	/* bit 3 */
-	{USDM_REG_USDM_PRTY_MASK,	0x38},  /* bit 3,4,5 */
-	{XSDM_REG_XSDM_PRTY_MASK,	0x8},	/* bit 3 */
-	{TSEM_REG_TSEM_PRTY_MASK_0,	0x0},
-	{TSEM_REG_TSEM_PRTY_MASK_1,	0x0},
-	{USEM_REG_USEM_PRTY_MASK_0,	0x0},
-	{USEM_REG_USEM_PRTY_MASK_1,	0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_0,	0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_1,	0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_0,	0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_1,	0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
-		REG_WR(bp, bnx2x_parity_mask[i].addr,
-			bnx2x_parity_mask[i].mask);
-}
-
-
 static void bnx2x_reset_common(struct bnx2x *bp)
 {
 	/* reset_common */
@@ -5082,7 +5037,7 @@
 		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
 		memset(&ilt, 0, sizeof(struct bnx2x_ilt));
 
-		/* initalize dummy TM client */
+		/* initialize dummy TM client */
 		ilt_cli.start = 0;
 		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 		ilt_cli.client_num = ILT_CLIENT_TM;
@@ -5350,9 +5305,9 @@
 	/* clear PXP2 attentions */
 	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
 
-	enable_blocks_attention(bp);
-	if (CHIP_PARITY_SUPPORTED(bp))
-		enable_blocks_parity(bp);
+	bnx2x_enable_blocks_attention(bp);
+	if (CHIP_PARITY_ENABLED(bp))
+		bnx2x_enable_blocks_parity(bp);
 
 	if (!BP_NOMCP(bp)) {
 		/* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -8751,13 +8706,6 @@
 		dev_err(&bp->pdev->dev, "MCP disabled, "
 					"must load devices in order!\n");
 
-	/* Set multi queue mode */
-	if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
-	    ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
-		dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
-					"requested is not MSI-X\n");
-		multi_mode = ETH_RSS_MODE_DISABLED;
-	}
 	bp->multi_mode = multi_mode;
 	bp->int_mode = int_mode;
 
@@ -9560,9 +9508,15 @@
 	/* Delete all NAPI objects */
 	bnx2x_del_all_napi(bp);
 
+	/* Power on: we can't let PCI layer write to us while we are in D3 */
+	bnx2x_set_power_state(bp, PCI_D0);
+
 	/* Disable MSI/MSI-X */
 	bnx2x_disable_msi(bp);
 
+	/* Power off */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
 	/* Make sure RESET task is not scheduled before continuing */
 	cancel_delayed_work_sync(&bp->reset_task);
 
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index bfd875b..c939683 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
  * WR - Write Clear (write 1 to clear the bit)
  *
  */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
 
 #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR			 (0x1<<0)
 #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS		 (0x1<<2)
@@ -39,6 +41,8 @@
 #define BRB1_REG_BRB1_PRTY_MASK 				 0x60138
 /* [R 4] Parity register #0 read */
 #define BRB1_REG_BRB1_PRTY_STS					 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR				 0x60130
 /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
  * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
  * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
 #define CCM_REG_CCM_INT_MASK					 0xd01e4
 /* [R 11] Interrupt register #0 read */
 #define CCM_REG_CCM_INT_STS					 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK					 0xd01f4
 /* [R 27] Parity register #0 read */
 #define CCM_REG_CCM_PRTY_STS					 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR				 0xd01ec
 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
 #define CDU_REG_CDU_PRTY_MASK					 0x10104c
 /* [R 5] Parity register #0 read */
 #define CDU_REG_CDU_PRTY_STS					 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR				 0x101044
 /* [RC 32] logging of error data in case of a CDU load error:
    {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
    ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
 #define CFC_REG_CFC_PRTY_MASK					 0x104118
 /* [R 4] Parity register #0 read */
 #define CFC_REG_CFC_PRTY_STS					 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR				 0x104110
 /* [RW 21] CID cam access (21:1 - Data; alid - 0) */
 #define CFC_REG_CID_CAM 					 0x104800
 #define CFC_REG_CONTROL0					 0x104028
@@ -466,6 +478,8 @@
 #define CSDM_REG_CSDM_PRTY_MASK 				 0xc22bc
 /* [R 11] Parity register #0 read */
 #define CSDM_REG_CSDM_PRTY_STS					 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR				 0xc22b4
 #define CSDM_REG_ENABLE_IN1					 0xc2238
 #define CSDM_REG_ENABLE_IN2					 0xc223c
 #define CSDM_REG_ENABLE_OUT1					 0xc2240
@@ -556,6 +570,9 @@
 /* [R 32] Parity register #0 read */
 #define CSEM_REG_CSEM_PRTY_STS_0				 0x200124
 #define CSEM_REG_CSEM_PRTY_STS_1				 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0				 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1				 0x200138
 #define CSEM_REG_ENABLE_IN					 0x2000a4
 #define CSEM_REG_ENABLE_OUT					 0x2000a8
 /* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
 #define DBG_REG_DBG_PRTY_MASK					 0xc0a8
 /* [R 1] Parity register #0 read */
 #define DBG_REG_DBG_PRTY_STS					 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR				 0xc0a0
 /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
  * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
  * 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
 #define DMAE_REG_DMAE_PRTY_MASK 				 0x102064
 /* [R 4] Parity register #0 read */
 #define DMAE_REG_DMAE_PRTY_STS					 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR				 0x10205c
 /* [RW 1] Command 0 go. */
 #define DMAE_REG_GO_C0						 0x102080
 /* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
 #define DORQ_REG_DORQ_PRTY_MASK 				 0x170190
 /* [R 2] Parity register #0 read */
 #define DORQ_REG_DORQ_PRTY_STS					 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR				 0x170188
 /* [RW 8] The address to write the DPM CID to STORM. */
 #define DORQ_REG_DPM_CID_ADDR					 0x170044
 /* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
 /* [R 1] data availble for error memory. If this bit is clear do not red
  * from error_handling_memory. */
 #define IGU_REG_ERROR_HANDLING_DATA_VALID			 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK					 0x1300a8
 /* [R 11] Parity register #0 read */
 #define IGU_REG_IGU_PRTY_STS					 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR				 0x1300a0
 /* [R 4] Debug: int_handle_fsm */
 #define IGU_REG_INT_HANDLE_FSM					 0x130050
 #define IGU_REG_LEADING_EDGE_LATCH				 0x130134
@@ -1501,6 +1528,8 @@
 #define MISC_REG_MISC_PRTY_MASK 				 0xa398
 /* [R 1] Parity register #0 read */
 #define MISC_REG_MISC_PRTY_STS					 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR				 0xa390
 #define MISC_REG_NIG_WOL_P0					 0xa270
 #define MISC_REG_NIG_WOL_P1					 0xa274
 /* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -1604,7 +1633,7 @@
    (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
 #define MISC_REG_SW_TIMER_RELOAD_VAL_4				 0xa2fc
 /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
-   in this register. addres 0 - timer 1; address 1 - timer 2, ...  address 7 -
+   in this register. address 0 - timer 1; address 1 - timer 2, ...  address 7 -
    timer 8 */
 #define MISC_REG_SW_TIMER_VAL					 0xa5c0
 /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2082,6 +2111,10 @@
 #define PBF_REG_PBF_INT_MASK					 0x1401d4
 /* [R 5] Interrupt register #0 read */
 #define PBF_REG_PBF_INT_STS					 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK					 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR				 0x1401dc
 #define PB_REG_CONTROL						 0
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK					 0x28
@@ -2091,6 +2124,8 @@
 #define PB_REG_PB_PRTY_MASK					 0x38
 /* [R 4] Parity register #0 read */
 #define PB_REG_PB_PRTY_STS					 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR					 0x30
 #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR		 (0x1<<0)
 #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW	 (0x1<<8)
 #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR	 (0x1<<1)
@@ -2446,6 +2481,8 @@
 #define PRS_REG_PRS_PRTY_MASK					 0x401a4
 /* [R 8] Parity register #0 read */
 #define PRS_REG_PRS_PRTY_STS					 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR				 0x4019c
 /* [RW 8] Context region for pure acknowledge packets. Used in CFC load
    request message */
 #define PRS_REG_PURE_REGIONS					 0x40024
@@ -2599,6 +2636,9 @@
 /* [R 32] Parity register #0 read */
 #define PXP2_REG_PXP2_PRTY_STS_0				 0x12057c
 #define PXP2_REG_PXP2_PRTY_STS_1				 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0				 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1				 0x120590
 /* [R 1] Debug only: The 'almost full' indication from each fifo (gives
    indication about backpressure) */
 #define PXP2_REG_RD_ALMOST_FULL_0				 0x120424
@@ -3001,6 +3041,8 @@
 #define PXP_REG_PXP_PRTY_MASK					 0x103094
 /* [R 26] Parity register #0 read */
 #define PXP_REG_PXP_PRTY_STS					 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR				 0x10308c
 /* [RW 4] The activity counter initial increment value sent in the load
    request */
 #define QM_REG_ACTCTRINITVAL_0					 0x168040
@@ -3157,6 +3199,8 @@
 #define QM_REG_QM_PRTY_MASK					 0x168454
 /* [R 12] Parity register #0 read */
 #define QM_REG_QM_PRTY_STS					 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR					 0x16844c
 /* [R 32] Current queues in pipeline: Queues from 32 to 63 */
 #define QM_REG_QSTATUS_HIGH					 0x16802c
 /* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3442,6 +3486,8 @@
 #define QM_REG_WRRWEIGHTS_9					 0x168848
 /* [R 6] Keep the fill level of the fifo from write client 1 */
 #define QM_REG_XQM_WRC_FIFOLVL					 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST					 0x18840
 #define SRC_REG_COUNTFREE0					 0x40500
 /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
    ports. If set the searcher support 8 functions. */
@@ -3470,6 +3516,8 @@
 #define SRC_REG_SRC_PRTY_MASK					 0x404c8
 /* [R 3] Parity register #0 read */
 #define SRC_REG_SRC_PRTY_STS					 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR				 0x404c0
 /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
 #define TCM_REG_CAM_OCCUP					 0x5017c
 /* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3596,8 +3644,12 @@
 #define TCM_REG_TCM_INT_MASK					 0x501dc
 /* [R 11] Interrupt register #0 read */
 #define TCM_REG_TCM_INT_STS					 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK					 0x501ec
 /* [R 27] Parity register #0 read */
 #define TCM_REG_TCM_PRTY_STS					 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR				 0x501e4
 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -3755,6 +3807,10 @@
 #define TM_REG_TM_INT_MASK					 0x1640fc
 /* [R 1] Interrupt register #0 read */
 #define TM_REG_TM_INT_STS					 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK					 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR					 0x164104
 /* [RW 8] The event id for aggregated interrupt 0 */
 #define TSDM_REG_AGG_INT_EVENT_0				 0x42038
 #define TSDM_REG_AGG_INT_EVENT_1				 0x4203c
@@ -3835,6 +3891,8 @@
 #define TSDM_REG_TSDM_PRTY_MASK 				 0x422bc
 /* [R 11] Parity register #0 read */
 #define TSDM_REG_TSDM_PRTY_STS					 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR				 0x422b4
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define TSEM_REG_ARB_CYCLE_SIZE 				 0x180034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3914,6 +3972,9 @@
 #define TSEM_REG_SLOW_EXT_STORE_EMPTY				 0x1802a0
 /* [RW 8] List of free threads . There is a bit per thread. */
 #define TSEM_REG_THREADS_LIST					 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0				 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1				 0x180128
 /* [RW 3] The arbitration scheme of time_slot 0 */
 #define TSEM_REG_TS_0_AS					 0x180038
 /* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4116,6 +4177,8 @@
 #define UCM_REG_UCM_INT_STS					 0xe01c8
 /* [R 27] Parity register #0 read */
 #define UCM_REG_UCM_PRTY_STS					 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR				 0xe01dc
 /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -4292,6 +4355,8 @@
 #define USDM_REG_USDM_PRTY_MASK 				 0xc42c0
 /* [R 11] Parity register #0 read */
 #define USDM_REG_USDM_PRTY_STS					 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR				 0xc42b8
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define USEM_REG_ARB_CYCLE_SIZE 				 0x300034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4421,6 +4486,9 @@
 /* [R 32] Parity register #0 read */
 #define USEM_REG_USEM_PRTY_STS_0				 0x300124
 #define USEM_REG_USEM_PRTY_STS_1				 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0				 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1				 0x300138
 /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
  * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
 #define USEM_REG_VFPF_ERR_NUM					 0x300380
@@ -4797,6 +4865,8 @@
 #define XSDM_REG_XSDM_PRTY_MASK 				 0x1662bc
 /* [R 11] Parity register #0 read */
 #define XSDM_REG_XSDM_PRTY_STS					 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR				 0x1662b4
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define XSEM_REG_ARB_CYCLE_SIZE 				 0x280034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4929,6 +4999,9 @@
 /* [R 32] Parity register #0 read */
 #define XSEM_REG_XSEM_PRTY_STS_0				 0x280124
 #define XSEM_REG_XSEM_PRTY_STS_1				 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0				 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1				 0x280138
 #define MCPR_NVM_ACCESS_ENABLE_EN				 (1L<<0)
 #define MCPR_NVM_ACCESS_ENABLE_WR_EN				 (1L<<1)
 #define MCPR_NVM_ADDR_NVM_ADDR_VALUE				 (0xffffffL<<0)
@@ -6316,3 +6389,4 @@
 }
 
 
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6e4d9b1..bda60d5 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,6 +158,11 @@
 
 		spin_lock_bh(&bp->stats_lock);
 
+		if (bp->stats_pending) {
+			spin_unlock_bh(&bp->stats_lock);
+			return;
+		}
+
 		ramrod_data.drv_counter = bp->stats_counter++;
 		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
 		for_each_eth_queue(bp, i)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 48cf24f..171782e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -840,7 +840,7 @@
 	lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
 
 	memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-	/* Note: source addres is set to be the member's PERMANENT address,
+	/* Note: source address is set to be the member's PERMANENT address,
 	   because we use it to identify loopback lacpdus in receive. */
 	memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -881,7 +881,7 @@
 	marker_header = (struct bond_marker_header *)skb_put(skb, length);
 
 	memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-	/* Note: source addres is set to be the member's PERMANENT address,
+	/* Note: source address is set to be the member's PERMANENT address,
 	   because we use it to identify loopback MARKERs in receive. */
 	memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -1916,7 +1916,7 @@
 		return -1;
 	}
 
-	//check that the slave has not been intialized yet.
+	//check that the slave has not been initialized yet.
 	if (SLAVE_AD_INFO(slave).port.slave != slave) {
 
 		// port initialization
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4da384c..31fe980 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -18,7 +18,6 @@
 #include <linux/timer.h>
 #include <linux/proc_fs.h>
 #include <linux/if_bonding.h>
-#include <linux/kobject.h>
 #include <linux/cpumask.h>
 #include <linux/in6.h>
 #include "bond_3ad.h"
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 7206ab2..3437613 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3203,7 +3203,7 @@
 	int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
 	int mac_off  = 0;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	const unsigned char *addr;
 #endif
 
@@ -3354,7 +3354,7 @@
 	if (found & VPD_FOUND_MAC)
 		goto done;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	addr = of_get_property(cp->of_node, "local-mac-address", NULL);
 	if (addr != NULL) {
 		memcpy(dev_addr, addr, 6);
@@ -5031,7 +5031,7 @@
 	cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
 	  cassini_debug;
 
-#if defined(CONFIG_OF)
+#if defined(CONFIG_SPARC)
 	cp->of_node = pci_device_to_OF_node(pdev);
 #endif
 
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 4c60285..a683fd3 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -22,7 +22,7 @@
 
 static int my3126_interrupt_disable(struct cphy *cphy)
 {
-	cancel_rearming_delayed_work(&cphy->phy_update);
+	cancel_delayed_work_sync(&cphy->phy_update);
 	return 0;
 }
 
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 63ebf76..8a43c7e 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -556,7 +556,7 @@
 #define EEPROM_MAX_POLL   4
 
 /*
- * Read SEEPROM. A zero is written to the flag register when the addres is
+ * Read SEEPROM. A zero is written to the flag register when the address is
  * written to the Control register. The hardware device will set the flag to a
  * one when 4B have been transferred to the Data register.
  */
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index a8766fb..e13b7fe 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -318,7 +318,7 @@
 
 /*
  * Initialization that requires the OS and protocol layers to already
- * be intialized goes here.
+ * be initialized goes here.
  */
 int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
 		unsigned int nroutes)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ec8579a..d55db6b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -607,7 +607,7 @@
  *
  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
  *	VPD ROM capability.  A zero is written to the flag bit when the
- *	addres is written to the control register.  The hardware device will
+ *	address is written to the control register.  The hardware device will
  *	set the flag to 1 when 4 bytes have been read into the data register.
  */
 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 3c403f8..56166ae 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -749,13 +749,19 @@
 	netif_set_real_num_tx_queues(dev, pi->nqsets);
 	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
 	if (err)
-		return err;
-	set_bit(pi->port_id, &adapter->open_device_map);
+		goto err_unwind;
 	err = link_start(dev);
 	if (err)
-		return err;
+		goto err_unwind;
+
 	netif_tx_start_all_queues(dev);
+	set_bit(pi->port_id, &adapter->open_device_map);
 	return 0;
+
+err_unwind:
+	if (adapter->open_device_map == 0)
+		adapter_down(adapter);
+	return err;
 }
 
 /*
@@ -764,13 +770,12 @@
  */
 static int cxgb4vf_stop(struct net_device *dev)
 {
-	int ret;
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adapter = pi->adapter;
 
 	netif_tx_stop_all_queues(dev);
 	netif_carrier_off(dev);
-	ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+	t4vf_enable_vi(adapter, pi->viid, false, false);
 	pi->link_cfg.link_ok = 0;
 
 	clear_bit(pi->port_id, &adapter->open_device_map);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e4bec78..0f51c80 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -147,9 +147,20 @@
 	/*
 	 * Write the command array into the Mailbox Data register array and
 	 * transfer ownership of the mailbox to the firmware.
+	 *
+	 * For the VFs, the Mailbox Data "registers" are actually backed by
+	 * T4's "MA" interface rather than PL Registers (as is the case for
+	 * the PFs).  Because these are in different coherency domains, the
+	 * write to the VF's PL-register-backed Mailbox Control can race in
+	 * front of the writes to the MA-backed VF Mailbox Data "registers".
+	 * So we need to do a read-back on at least one byte of the VF Mailbox
+	 * Data registers before doing the write to the VF Mailbox Control
+	 * register.
 	 */
 	for (i = 0, p = cmd; i < size; i += 8)
 		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+	t4_read_reg(adapter, mbox_data);         /* flush write */
+
 	t4_write_reg(adapter, mbox_ctl,
 		     MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
 	t4_read_reg(adapter, mbox_ctl);          /* flush write */
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 77d08e6..aed223b 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -130,10 +130,15 @@
 		if (hw->mac_type == e1000_82541 ||
 		    hw->mac_type == e1000_82541_rev_2 ||
 		    hw->mac_type == e1000_82547 ||
-		    hw->mac_type == e1000_82547_rev_2) {
+		    hw->mac_type == e1000_82547_rev_2)
 			hw->phy_type = e1000_phy_igp;
-			break;
-		}
+		break;
+	case RTL8211B_PHY_ID:
+		hw->phy_type = e1000_phy_8211;
+		break;
+	case RTL8201N_PHY_ID:
+		hw->phy_type = e1000_phy_8201;
+		break;
 	default:
 		/* Should never have loaded on this device */
 		hw->phy_type = e1000_phy_undefined;
@@ -318,6 +323,9 @@
 	case E1000_DEV_ID_82547GI:
 		hw->mac_type = e1000_82547_rev_2;
 		break;
+	case E1000_DEV_ID_INTEL_CE4100_GBE:
+		hw->mac_type = e1000_ce4100;
+		break;
 	default:
 		/* Should never have loaded on this device */
 		return -E1000_ERR_MAC_TYPE;
@@ -372,6 +380,9 @@
 		case e1000_82542_rev2_1:
 			hw->media_type = e1000_media_type_fiber;
 			break;
+		case e1000_ce4100:
+			hw->media_type = e1000_media_type_copper;
+			break;
 		default:
 			status = er32(STATUS);
 			if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +471,7 @@
 		/* Reset is performed on a shadow of the control register */
 		ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
 		break;
+	case e1000_ce4100:
 	default:
 		ew32(CTRL, (ctrl | E1000_CTRL_RST));
 		break;
@@ -952,6 +964,67 @@
 }
 
 /**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	/* SW reset the PHY so all changes take effect */
+	ret_val = e1000_phy_reset(hw);
+	if (ret_val) {
+		e_dbg("Error Resetting the PHY\n");
+		return ret_val;
+	}
+
+	return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u32 ctrl_aux;
+
+	switch (hw->phy_type) {
+	case e1000_phy_8211:
+		ret_val = e1000_copper_link_rtl_setup(hw);
+		if (ret_val) {
+			e_dbg("e1000_copper_link_rtl_setup failed!\n");
+			return ret_val;
+		}
+		break;
+	case e1000_phy_8201:
+		/* Set RMII mode */
+		ctrl_aux = er32(CTL_AUX);
+		ctrl_aux |= E1000_CTL_AUX_RMII;
+		ew32(CTL_AUX, ctrl_aux);
+		E1000_WRITE_FLUSH();
+
+		/* Disable the J/K bits required for receive */
+		ctrl_aux = er32(CTL_AUX);
+		ctrl_aux |= 0x4;
+		ctrl_aux &= ~0x2;
+		ew32(CTL_AUX, ctrl_aux);
+		E1000_WRITE_FLUSH();
+		ret_val = e1000_copper_link_rtl_setup(hw);
+
+		if (ret_val) {
+			e_dbg("e1000_copper_link_rtl_setup failed!\n");
+			return ret_val;
+		}
+		break;
+	default:
+		e_dbg("Error Resetting the PHY\n");
+		return E1000_ERR_PHY_TYPE;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
  * e1000_copper_link_preconfig - early configuration for copper
  * @hw: Struct containing variables accessed by shared code
  *
@@ -1286,6 +1359,10 @@
 	if (hw->autoneg_advertised == 0)
 		hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
+	/* IFE/RTL8201N PHY only supports 10/100 */
+	if (hw->phy_type == e1000_phy_8201)
+		hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
 	e_dbg("Reconfiguring auto-neg advertisement params\n");
 	ret_val = e1000_phy_setup_autoneg(hw);
 	if (ret_val) {
@@ -1341,7 +1418,7 @@
 	s32 ret_val;
 	e_dbg("e1000_copper_link_postconfig");
 
-	if (hw->mac_type >= e1000_82544) {
+	if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
 		e1000_config_collision_dist(hw);
 	} else {
 		ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1472,12 @@
 		ret_val = e1000_copper_link_mgp_setup(hw);
 		if (ret_val)
 			return ret_val;
+	} else {
+		ret_val = gbe_dhg_phy_setup(hw);
+		if (ret_val) {
+			e_dbg("gbe_dhg_phy_setup failed!\n");
+			return ret_val;
+		}
 	}
 
 	if (hw->autoneg) {
@@ -1461,10 +1544,11 @@
 		return ret_val;
 
 	/* Read the MII 1000Base-T Control Register (Address 9). */
-	ret_val =
-	    e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+	ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
 	if (ret_val)
 		return ret_val;
+	else if (hw->phy_type == e1000_phy_8201)
+		mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
 
 	/* Need to parse both autoneg_advertised and fc and set up
 	 * the appropriate PHY registers.  First we will parse for
@@ -1577,9 +1661,14 @@
 
 	e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
 
-	ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
-	if (ret_val)
-		return ret_val;
+	if (hw->phy_type == e1000_phy_8201) {
+		mii_1000t_ctrl_reg = 0;
+	} else {
+		ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+		                              mii_1000t_ctrl_reg);
+		if (ret_val)
+			return ret_val;
+	}
 
 	return E1000_SUCCESS;
 }
@@ -1860,7 +1949,7 @@
 
 	/* 82544 or newer MAC, Auto Speed Detection takes care of
 	 * MAC speed/duplex configuration.*/
-	if (hw->mac_type >= e1000_82544)
+	if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
 		return E1000_SUCCESS;
 
 	/* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1959,49 @@
 	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
 	ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
 
-	/* Set up duplex in the Device Control and Transmit Control
-	 * registers depending on negotiated values.
-	 */
-	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-	if (ret_val)
-		return ret_val;
+	switch (hw->phy_type) {
+	case e1000_phy_8201:
+		ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+		if (ret_val)
+			return ret_val;
 
-	if (phy_data & M88E1000_PSSR_DPLX)
-		ctrl |= E1000_CTRL_FD;
-	else
-		ctrl &= ~E1000_CTRL_FD;
+		if (phy_data & RTL_PHY_CTRL_FD)
+			ctrl |= E1000_CTRL_FD;
+		else
+			ctrl &= ~E1000_CTRL_FD;
 
-	e1000_config_collision_dist(hw);
+		if (phy_data & RTL_PHY_CTRL_SPD_100)
+			ctrl |= E1000_CTRL_SPD_100;
+		else
+			ctrl |= E1000_CTRL_SPD_10;
 
-	/* Set up speed in the Device Control register depending on
-	 * negotiated values.
-	 */
-	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
-		ctrl |= E1000_CTRL_SPD_1000;
-	else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
-		ctrl |= E1000_CTRL_SPD_100;
+		e1000_config_collision_dist(hw);
+		break;
+	default:
+		/* Set up duplex in the Device Control and Transmit Control
+		 * registers depending on negotiated values.
+		 */
+		ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+		                             &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		if (phy_data & M88E1000_PSSR_DPLX)
+			ctrl |= E1000_CTRL_FD;
+		else
+			ctrl &= ~E1000_CTRL_FD;
+
+		e1000_config_collision_dist(hw);
+
+		/* Set up speed in the Device Control register depending on
+		 * negotiated values.
+		 */
+		if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+			ctrl |= E1000_CTRL_SPD_1000;
+		else if ((phy_data & M88E1000_PSSR_SPEED) ==
+		         M88E1000_PSSR_100MBS)
+			ctrl |= E1000_CTRL_SPD_100;
+	}
 
 	/* Write the configured values back to the Device Control Reg. */
 	ew32(CTRL, ctrl);
@@ -2401,7 +2512,8 @@
 		 * speed/duplex on the MAC to the current PHY speed/duplex
 		 * settings.
 		 */
-		if (hw->mac_type >= e1000_82544)
+		if ((hw->mac_type >= e1000_82544) &&
+		    (hw->mac_type != e1000_ce4100))
 			e1000_config_collision_dist(hw);
 		else {
 			ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2850,7 @@
 {
 	u32 i;
 	u32 mdic = 0;
-	const u32 phy_addr = 1;
+	const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
 
 	e_dbg("e1000_read_phy_reg_ex");
 
@@ -2752,28 +2864,61 @@
 		 * Control register.  The MAC will take care of interfacing with the
 		 * PHY to retrieve the desired data.
 		 */
-		mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
-			(phy_addr << E1000_MDIC_PHY_SHIFT) |
-			(E1000_MDIC_OP_READ));
+		if (hw->mac_type == e1000_ce4100) {
+			mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(INTEL_CE_GBE_MDIC_OP_READ) |
+				(INTEL_CE_GBE_MDIC_GO));
 
-		ew32(MDIC, mdic);
+			writel(mdic, E1000_MDIO_CMD);
 
-		/* Poll the ready bit to see if the MDI read completed */
-		for (i = 0; i < 64; i++) {
-			udelay(50);
-			mdic = er32(MDIC);
-			if (mdic & E1000_MDIC_READY)
-				break;
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 64; i++) {
+				udelay(50);
+				mdic = readl(E1000_MDIO_CMD);
+				if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+					break;
+			}
+
+			if (mdic & INTEL_CE_GBE_MDIC_GO) {
+				e_dbg("MDI Read did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+
+			mdic = readl(E1000_MDIO_STS);
+			if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+				e_dbg("MDI Read Error\n");
+				return -E1000_ERR_PHY;
+			}
+			*phy_data = (u16) mdic;
+		} else {
+			mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(E1000_MDIC_OP_READ));
+
+			ew32(MDIC, mdic);
+
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 64; i++) {
+				udelay(50);
+				mdic = er32(MDIC);
+				if (mdic & E1000_MDIC_READY)
+					break;
+			}
+			if (!(mdic & E1000_MDIC_READY)) {
+				e_dbg("MDI Read did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+			if (mdic & E1000_MDIC_ERROR) {
+				e_dbg("MDI Error\n");
+				return -E1000_ERR_PHY;
+			}
+			*phy_data = (u16) mdic;
 		}
-		if (!(mdic & E1000_MDIC_READY)) {
-			e_dbg("MDI Read did not complete\n");
-			return -E1000_ERR_PHY;
-		}
-		if (mdic & E1000_MDIC_ERROR) {
-			e_dbg("MDI Error\n");
-			return -E1000_ERR_PHY;
-		}
-		*phy_data = (u16) mdic;
 	} else {
 		/* We must first send a preamble through the MDIO pin to signal the
 		 * beginning of an MII instruction.  This is done by sending 32
@@ -2840,7 +2985,7 @@
 {
 	u32 i;
 	u32 mdic = 0;
-	const u32 phy_addr = 1;
+	const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
 
 	e_dbg("e1000_write_phy_reg_ex");
 
@@ -2850,27 +2995,54 @@
 	}
 
 	if (hw->mac_type > e1000_82543) {
-		/* Set up Op-code, Phy Address, register address, and data intended
-		 * for the PHY register in the MDI Control register.  The MAC will take
-		 * care of interfacing with the PHY to send the desired data.
+		/* Set up Op-code, Phy Address, register address, and data
+		 * intended for the PHY register in the MDI Control register.
+		 * The MAC will take care of interfacing with the PHY to send
+		 * the desired data.
 		 */
-		mdic = (((u32) phy_data) |
-			(reg_addr << E1000_MDIC_REG_SHIFT) |
-			(phy_addr << E1000_MDIC_PHY_SHIFT) |
-			(E1000_MDIC_OP_WRITE));
+		if (hw->mac_type == e1000_ce4100) {
+			mdic = (((u32) phy_data) |
+				(reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(INTEL_CE_GBE_MDIC_OP_WRITE) |
+				(INTEL_CE_GBE_MDIC_GO));
 
-		ew32(MDIC, mdic);
+			writel(mdic, E1000_MDIO_CMD);
 
-		/* Poll the ready bit to see if the MDI read completed */
-		for (i = 0; i < 641; i++) {
-			udelay(5);
-			mdic = er32(MDIC);
-			if (mdic & E1000_MDIC_READY)
-				break;
-		}
-		if (!(mdic & E1000_MDIC_READY)) {
-			e_dbg("MDI Write did not complete\n");
-			return -E1000_ERR_PHY;
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 640; i++) {
+				udelay(5);
+				mdic = readl(E1000_MDIO_CMD);
+				if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+					break;
+			}
+			if (mdic & INTEL_CE_GBE_MDIC_GO) {
+				e_dbg("MDI Write did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+		} else {
+			mdic = (((u32) phy_data) |
+				(reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(E1000_MDIC_OP_WRITE));
+
+			ew32(MDIC, mdic);
+
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 641; i++) {
+				udelay(5);
+				mdic = er32(MDIC);
+				if (mdic & E1000_MDIC_READY)
+					break;
+			}
+			if (!(mdic & E1000_MDIC_READY)) {
+				e_dbg("MDI Write did not complete\n");
+				return -E1000_ERR_PHY;
+			}
 		}
 	} else {
 		/* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3220,11 @@
 		if (hw->phy_id == M88E1011_I_PHY_ID)
 			match = true;
 		break;
+	case e1000_ce4100:
+		if ((hw->phy_id == RTL8211B_PHY_ID) ||
+		    (hw->phy_id == RTL8201N_PHY_ID))
+			match = true;
+		break;
 	case e1000_82541:
 	case e1000_82541_rev_2:
 	case e1000_82547:
@@ -3291,6 +3468,9 @@
 
 	if (hw->phy_type == e1000_phy_igp)
 		return e1000_phy_igp_get_info(hw, phy_info);
+	else if ((hw->phy_type == e1000_phy_8211) ||
+	         (hw->phy_type == e1000_phy_8201))
+		return E1000_SUCCESS;
 	else
 		return e1000_phy_m88_get_info(hw, phy_info);
 }
@@ -3742,6 +3922,12 @@
 
 	e_dbg("e1000_read_eeprom");
 
+	if (hw->mac_type == e1000_ce4100) {
+		GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+		                      data);
+		return E1000_SUCCESS;
+	}
+
 	/* If eeprom is not yet detected, do so now */
 	if (eeprom->word_size == 0)
 		e1000_init_eeprom_params(hw);
@@ -3904,6 +4090,12 @@
 
 	e_dbg("e1000_write_eeprom");
 
+	if (hw->mac_type == e1000_ce4100) {
+		GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+		                       data);
+		return E1000_SUCCESS;
+	}
+
 	/* If eeprom is not yet detected, do so now */
 	if (eeprom->word_size == 0)
 		e1000_init_eeprom_params(hw);
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c..196eeda 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -41,7 +41,7 @@
 struct e1000_hw_stats;
 
 /* Enumerated types specific to the e1000 hardware */
-/* Media Access Controlers */
+/* Media Access Controllers */
 typedef enum {
 	e1000_undefined = 0,
 	e1000_82542_rev2_0,
@@ -52,6 +52,7 @@
 	e1000_82545,
 	e1000_82545_rev_3,
 	e1000_82546,
+	e1000_ce4100,
 	e1000_82546_rev_3,
 	e1000_82541,
 	e1000_82541_rev_2,
@@ -209,9 +210,11 @@
 } e1000_1000t_rx_status;
 
 typedef enum {
-    e1000_phy_m88 = 0,
-    e1000_phy_igp,
-    e1000_phy_undefined = 0xFF
+	e1000_phy_m88 = 0,
+	e1000_phy_igp,
+	e1000_phy_8211,
+	e1000_phy_8201,
+	e1000_phy_undefined = 0xFF
 } e1000_phy_type;
 
 typedef enum {
@@ -442,6 +445,7 @@
 #define E1000_DEV_ID_82547EI             0x1019
 #define E1000_DEV_ID_82547EI_MOBILE      0x101A
 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_INTEL_CE4100_GBE    0x2E6E
 
 #define NODE_ADDRESS_SIZE 6
 #define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@
 #define E1000_CTRL_EXT 0x00018	/* Extended Device Control - RW */
 #define E1000_FLA      0x0001C	/* Flash Access - RW */
 #define E1000_MDIC     0x00020	/* MDI Control - RW */
+
+extern void __iomem *ce4100_gbe_mdio_base_virt;
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE    (ce4100_gbe_mdio_base_virt)
+#define E1000_MDIO_STS  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
+#define E1000_MDIO_CMD  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
+#define E1000_MDIO_DRV  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
+#define E1000_MDC_CMD   (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
+#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
+#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
+
 #define E1000_SCTL     0x00024	/* SerDes Control - RW */
 #define E1000_FEXTNVM  0x00028	/* Future Extended NVM register */
 #define E1000_FCAL     0x00028	/* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@
 #define E1000_IMS      0x000D0	/* Interrupt Mask Set - RW */
 #define E1000_IMC      0x000D8	/* Interrupt Mask Clear - WO */
 #define E1000_IAM      0x000E0	/* Interrupt Acknowledge Auto Mask */
+
+/* Auxiliary Control Register. This register is CE4100 specific,
+ * RMII/RGMII function is switched by this register - RW
+ * Following are bits definitions of the Auxiliary Control Register
+ */
+#define E1000_CTL_AUX  0x000E0
+#define E1000_CTL_AUX_END_SEL_SHIFT     10
+#define E1000_CTL_AUX_ENDIANESS_SHIFT   8
+#define E1000_CTL_AUX_RGMII_RMII_SHIFT  0
+
+/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_DES_PKT   (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use CTL_AUX.ENDIANESS, packet use default */
+#define E1000_CTL_AUX_DES       (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use default, packet use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_PKT       (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* all use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_ALL       (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
+
+#define E1000_CTL_AUX_RGMII     (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+#define E1000_CTL_AUX_RMII      (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+
+/* LW little endian, Byte big endian */
+#define E1000_CTL_AUX_LWLE_BBE  (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWLE_BLE  (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BBE  (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BLE  (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+
 #define E1000_RCTL     0x00100	/* RX Control - RW */
 #define E1000_RDTR1    0x02820	/* RX Delay Timer (1) - RW */
 #define E1000_RDBAL1   0x02900	/* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@
  * in more current versions of the 8254x. Despite the difference in location,
  * the registers function in the same manner.
  */
+#define E1000_82542_CTL_AUX  E1000_CTL_AUX
 #define E1000_82542_CTRL     E1000_CTRL
 #define E1000_82542_CTRL_DUP E1000_CTRL_DUP
 #define E1000_82542_STATUS   E1000_STATUS
@@ -1571,6 +1614,11 @@
 #define E1000_MDIC_INT_EN    0x20000000
 #define E1000_MDIC_ERROR     0x40000000
 
+#define INTEL_CE_GBE_MDIC_OP_WRITE      0x04000000
+#define INTEL_CE_GBE_MDIC_OP_READ       0x00000000
+#define INTEL_CE_GBE_MDIC_GO            0x80000000
+#define INTEL_CE_GBE_MDIC_READ_ERROR    0x80000000
+
 #define E1000_KUMCTRLSTA_MASK           0x0000FFFF
 #define E1000_KUMCTRLSTA_OFFSET         0x001F0000
 #define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
@@ -2871,6 +2919,11 @@
 #define M88E1111_I_PHY_ID  0x01410CC0
 #define L1LXT971A_PHY_ID   0x001378E0
 
+#define RTL8211B_PHY_ID    0x001CC910
+#define RTL8201N_PHY_ID    0x8200
+#define RTL_PHY_CTRL_FD    0x0100 /* Full duplex.0=half; 1=full */
+#define RTL_PHY_CTRL_SPD_100    0x200000 /* Force 100Mb */
+
 /* Bits...
  * 15-5: page
  * 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 340e12d..bfab140 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
 
 #include "e1000.h"
 #include <net/ip6_checksum.h>
+#include <linux/io.h>
+
+/* Intel Media SOC GbE MDIO physical base address */
+static unsigned long ce4100_gbe_mdio_base_phy;
+/* Intel Media SOC GbE MDIO virtual base address */
+void __iomem *ce4100_gbe_mdio_base_virt;
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@
 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
 	/* required last entry */
 	{0,}
 };
@@ -459,6 +466,7 @@
 		case e1000_82545:
 		case e1000_82545_rev_3:
 		case e1000_82546:
+		case e1000_ce4100:
 		case e1000_82546_rev_3:
 		case e1000_82541:
 		case e1000_82541_rev_2:
@@ -573,6 +581,7 @@
 	case e1000_82545:
 	case e1000_82545_rev_3:
 	case e1000_82546:
+	case e1000_ce4100:
 	case e1000_82546_rev_3:
 		pba = E1000_PBA_48K;
 		break;
@@ -894,6 +903,7 @@
 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
 	int i, err, pci_using_dac;
 	u16 eeprom_data = 0;
+	u16 tmp = 0;
 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 	int bars, need_ioport;
 
@@ -996,6 +1006,14 @@
 		goto err_sw_init;
 
 	err = -EIO;
+	if (hw->mac_type == e1000_ce4100) {
+		ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
+		ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+		                                pci_resource_len(pdev, BAR_1));
+
+		if (!ce4100_gbe_mdio_base_virt)
+			goto err_mdio_ioremap;
+	}
 
 	if (hw->mac_type >= e1000_82543) {
 		netdev->features = NETIF_F_SG |
@@ -1135,6 +1153,20 @@
 	adapter->wol = adapter->eeprom_wol;
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
+	/* Auto detect PHY address */
+	if (hw->mac_type == e1000_ce4100) {
+		for (i = 0; i < 32; i++) {
+			hw->phy_addr = i;
+			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
+			if (tmp == 0 || tmp == 0xFF) {
+				if (i == 31)
+					goto err_eeprom;
+				continue;
+			} else
+				break;
+		}
+	}
+
 	/* reset the hardware with the new settings */
 	e1000_reset(adapter);
 
@@ -1171,6 +1203,8 @@
 	kfree(adapter->rx_ring);
 err_dma:
 err_sw_init:
+err_mdio_ioremap:
+	iounmap(ce4100_gbe_mdio_base_virt);
 	iounmap(hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
@@ -1409,6 +1443,7 @@
 	/* First rev 82545 and 82546 need to not allow any memory
 	 * write location to cross 64k boundary due to errata 23 */
 	if (hw->mac_type == e1000_82545 ||
+	    hw->mac_type == e1000_ce4100 ||
 	    hw->mac_type == e1000_82546) {
 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
 	}
@@ -2198,7 +2233,7 @@
 	 * addresses take precedence to avoid disabling unicast filtering
 	 * when possible.
 	 *
-	 * RAR 0 is used for the station MAC adddress
+	 * RAR 0 is used for the station MAC address
 	 * if there are not 14 addresses, go ahead and clear the filters
 	 */
 	i = 1;
@@ -3443,9 +3478,17 @@
 	struct e1000_hw *hw = &adapter->hw;
 	u32 icr = er32(ICR);
 
-	if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
+	if (unlikely((!icr)))
 		return IRQ_NONE;  /* Not our interrupt */
 
+	/*
+	 * we might have caused the interrupt, but the above
+	 * read cleared it, and just in case the driver is
+	 * down there is nothing to do so return handled
+	 */
+	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
+		return IRQ_HANDLED;
+
 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
 		hw->get_link_status = 1;
 		/* guard against interrupt when we're going down */
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75..55c1711f 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,21 @@
 #ifndef _E1000_OSDEP_H_
 #define _E1000_OSDEP_H_
 
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
 #include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
+
+#define CONFIG_RAM_BASE         0x60000
+#define GBE_CONFIG_OFFSET       0x0
+
+#define GBE_CONFIG_RAM_BASE \
+	((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
+
+#define GBE_CONFIG_BASE_VIRT    phys_to_virt(GBE_CONFIG_RAM_BASE)
+
+#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
+	(iowrite16_rep(base + offset, data, count))
+
+#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
+	(ioread16_rep(base + (offset << 1), data, count))
 
 #define er32(reg)							\
 	(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543)		\
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index e57e409..89a6903 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -78,6 +78,8 @@
 static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
 static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
 static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
 
 /**
  *  e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -113,6 +115,8 @@
 		phy->type		 = e1000_phy_bm;
 		phy->ops.acquire = e1000_get_hw_semaphore_82574;
 		phy->ops.release = e1000_put_hw_semaphore_82574;
+		phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+		phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
 		break;
 	default:
 		return -E1000_ERR_PHY;
@@ -121,29 +125,36 @@
 
 	/* This can only be done after all function pointers are setup. */
 	ret_val = e1000_get_phy_id_82571(hw);
+	if (ret_val) {
+		e_dbg("Error getting PHY ID\n");
+		return ret_val;
+	}
 
 	/* Verify phy id */
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
 		if (phy->id != IGP01E1000_I_PHY_ID)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	case e1000_82573:
 		if (phy->id != M88E1111_I_PHY_ID)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	case e1000_82574:
 	case e1000_82583:
 		if (phy->id != BME1000_E_PHY_ID_R2)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	default:
-		return -E1000_ERR_PHY;
+		ret_val = -E1000_ERR_PHY;
 		break;
 	}
 
-	return 0;
+	if (ret_val)
+		e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+	return ret_val;
 }
 
 /**
@@ -317,7 +328,7 @@
 
 	/*
 	 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
-	 * first NVM or PHY acess. This should be done for single-port
+	 * first NVM or PHY access. This should be done for single-port
 	 * devices, and for one port only on dual-port devices so that
 	 * for those devices we can still use the SMBI lock to synchronize
 	 * inter-port accesses to the PHY & NVM.
@@ -649,6 +660,58 @@
 }
 
 /**
+ *  e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.
+ *  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (active)
+		data |= E1000_PHY_CTRL_D0A_LPLU;
+	else
+		data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  when active is true, else clear lplu for D3. LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (!active) {
+		data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+	} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_PHY_CTRL_NOND0A_LPLU;
+	}
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
  *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
  *  @hw: pointer to the HW structure
  *
@@ -956,7 +1019,7 @@
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl, ctrl_ext, icr;
+	u32 ctrl, ctrl_ext;
 	s32 ret_val;
 
 	/*
@@ -1040,7 +1103,7 @@
 
 	/* Clear any pending interrupt events. */
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	if (hw->mac.type == e1000_82571) {
 		/* Install any alternate MAC address into RAR0 */
@@ -1247,7 +1310,7 @@
 		 * apply workaround for hardware errata documented in errata
 		 * docs Fixes issue where some error prone or unreliable PCIe
 		 * completions are occurring, particularly with ASPM enabled.
-		 * Without fix, issue can cause tx timeouts.
+		 * Without fix, issue can cause Tx timeouts.
 		 */
 		reg = er32(GCR2);
 		reg |= 1;
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 360c913..28519ac 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2008 Intel Corporation.
+# Copyright(c) 1999 - 2011 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 7245dc2..1314998 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 2c913b8..e610e13 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,7 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
+#include <linux/crc32.h>
 
 #include "hw.h"
 
@@ -496,6 +497,8 @@
 extern void e1000e_update_stats(struct e1000_adapter *adapter);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
 extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
 
 extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index b18c644..2fefa82 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -784,7 +784,7 @@
  **/
 static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
 {
-	u32 ctrl, icr;
+	u32 ctrl;
 	s32 ret_val;
 
 	/*
@@ -818,7 +818,7 @@
 
 	/* Clear any pending interrupt events. */
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	ret_val = e1000_check_alt_mac_addr_generic(hw);
 
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index affcacf..fa08b63 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -624,20 +624,24 @@
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	char firmware_version[32];
 
-	strncpy(drvinfo->driver,  e1000e_driver_name, 32);
-	strncpy(drvinfo->version, e1000e_driver_version, 32);
+	strncpy(drvinfo->driver,  e1000e_driver_name,
+		sizeof(drvinfo->driver) - 1);
+	strncpy(drvinfo->version, e1000e_driver_version,
+		sizeof(drvinfo->version) - 1);
 
 	/*
 	 * EEPROM image version # is reported as firmware version # for
 	 * PCI-E controllers
 	 */
-	sprintf(firmware_version, "%d.%d-%d",
+	snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
 		(adapter->eeprom_vers & 0xF000) >> 12,
 		(adapter->eeprom_vers & 0x0FF0) >> 4,
 		(adapter->eeprom_vers & 0x000F));
 
-	strncpy(drvinfo->fw_version, firmware_version, 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strncpy(drvinfo->fw_version, firmware_version,
+		sizeof(drvinfo->fw_version) - 1);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info) - 1);
 	drvinfo->regdump_len = e1000_get_regs_len(netdev);
 	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
@@ -1704,6 +1708,19 @@
 	bool if_running = netif_running(netdev);
 
 	set_bit(__E1000_TESTING, &adapter->state);
+
+	if (!if_running) {
+		/* Get control of and reset hardware */
+		if (adapter->flags & FLAG_HAS_AMT)
+			e1000e_get_hw_control(adapter);
+
+		e1000e_power_up_phy(adapter);
+
+		adapter->hw.phy.autoneg_wait_to_complete = 1;
+		e1000e_reset(adapter);
+		adapter->hw.phy.autoneg_wait_to_complete = 0;
+	}
+
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 
@@ -1717,8 +1734,6 @@
 		if (if_running)
 			/* indicate we're in test mode */
 			dev_close(netdev);
-		else
-			e1000e_reset(adapter);
 
 		if (e1000_reg_test(adapter, &data[0]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1732,8 +1747,6 @@
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		e1000e_reset(adapter);
-		/* make sure the phy is powered up */
-		e1000e_power_up_phy(adapter);
 		if (e1000_loopback_test(adapter, &data[3]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1755,28 +1768,29 @@
 		if (if_running)
 			dev_open(netdev);
 	} else {
-		if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
-			clear_bit(__E1000_TESTING, &adapter->state);
-			dev_open(netdev);
-			set_bit(__E1000_TESTING, &adapter->state);
-		}
+		/* Online tests */
 
 		e_info("online testing starting\n");
-		/* Online tests */
-		if (e1000_link_test(adapter, &data[4]))
-			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		/* Online tests aren't run; pass by default */
+		/* register, eeprom, intr and loopback tests not run online */
 		data[0] = 0;
 		data[1] = 0;
 		data[2] = 0;
 		data[3] = 0;
 
-		if (!if_running && (adapter->flags & FLAG_HAS_AMT))
-			dev_close(netdev);
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		clear_bit(__E1000_TESTING, &adapter->state);
 	}
+
+	if (!if_running) {
+		e1000e_reset(adapter);
+
+		if (adapter->flags & FLAG_HAS_AMT)
+			e1000e_release_hw_control(adapter);
+	}
+
 	msleep_interruptible(4 * 1000);
 }
 
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5..bc0860a 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -83,6 +83,7 @@
 	E1000_EXTCNF_CTRL  = 0x00F00, /* Extended Configuration Control */
 	E1000_EXTCNF_SIZE  = 0x00F08, /* Extended Configuration Size */
 	E1000_PHY_CTRL     = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB	E1000_PHY_CTRL	/* PHY OEM Bits */
 	E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
 	E1000_PBS      = 0x01008, /* Packet Buffer Size */
 	E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
@@ -101,7 +102,7 @@
 	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
 	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
 #define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
-	E1000_RADV     = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
+	E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
 
 /* Convenience macros
  *
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d86cc08..fb46974 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -321,7 +321,7 @@
 	}
 
 	/*
-	 * Reset the PHY before any acccess to it.  Doing so, ensures that
+	 * Reset the PHY before any access to it.  Doing so, ensures that
 	 * the PHY is in a known good state before we read/write PHY registers.
 	 * The generic reset is sufficient here, because we haven't determined
 	 * the PHY type yet.
@@ -1395,22 +1395,6 @@
 	}
 }
 
-static u32 e1000_calc_rx_da_crc(u8 mac[])
-{
-	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
-	u32 i, j, mask, crc;
-
-	crc = 0xffffffff;
-	for (i = 0; i < 6; i++) {
-		crc = crc ^ mac[i];
-		for (j = 8; j > 0; j--) {
-			mask = (crc & 1) * (-1);
-			crc = (crc >> 1) ^ (poly & mask);
-		}
-	}
-	return ~crc;
-}
-
 /**
  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
  *  with 82579 PHY
@@ -1453,8 +1437,7 @@
 			mac_addr[4] = (addr_high & 0xFF);
 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
 
-			ew32(PCH_RAICC(i),
-					e1000_calc_rx_da_crc(mac_addr));
+			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
 		}
 
 		/* Write Rx addresses to the PHY */
@@ -2977,7 +2960,7 @@
 {
 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 	u16 reg;
-	u32 ctrl, icr, kab;
+	u32 ctrl, kab;
 	s32 ret_val;
 
 	/*
@@ -3067,7 +3050,7 @@
 		ew32(CRC_OFFSET, 0x65656565);
 
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	kab = er32(KABGTXD);
 	kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3118,7 +3101,7 @@
 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
 	 */
 	if (hw->phy.type == e1000_phy_82578) {
-		hw->phy.ops.read_reg(hw, BM_WUC, &i);
+		e1e_rphy(hw, BM_WUC, &i);
 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
 		if (ret_val)
 			return ret_val;
@@ -3276,9 +3259,8 @@
 	    (hw->phy.type == e1000_phy_82577)) {
 		ew32(FCRTV_PCH, hw->fc.refresh_time);
 
-		ret_val = hw->phy.ops.write_reg(hw,
-		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
-		                             hw->fc.pause_time);
+		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+				   hw->fc.pause_time);
 		if (ret_val)
 			return ret_val;
 	}
@@ -3342,8 +3324,7 @@
 			return ret_val;
 		break;
 	case e1000_phy_ife:
-		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
-		                               &reg_data);
+		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
 		if (ret_val)
 			return ret_val;
 
@@ -3361,8 +3342,7 @@
 			reg_data |= IFE_PMC_AUTO_MDIX;
 			break;
 		}
-		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
-		                                reg_data);
+		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
 		if (ret_val)
 			return ret_val;
 		break;
@@ -3646,7 +3626,8 @@
 {
 	if (hw->phy.type == e1000_phy_ife)
 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
-			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+				(IFE_PSCL_PROBE_MODE |
+				 IFE_PSCL_PROBE_LEDS_OFF));
 
 	ew32(LEDCTL, hw->mac.ledctl_mode1);
 	return 0;
@@ -3660,8 +3641,7 @@
  **/
 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
-					(u16)hw->mac.ledctl_mode1);
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
 }
 
 /**
@@ -3672,8 +3652,7 @@
  **/
 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
-					(u16)hw->mac.ledctl_default);
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
 }
 
 /**
@@ -3704,7 +3683,7 @@
 		}
 	}
 
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3735,7 +3714,7 @@
 		}
 	}
 
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3844,20 +3823,20 @@
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82579) ||
 	    (hw->phy.type == e1000_phy_82577)) {
-		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
+		e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+		e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
+		e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+		e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
+		e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_DC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+		e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
 	}
 }
 
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7e55170..68aa174 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -533,7 +533,7 @@
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -556,7 +556,7 @@
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -598,7 +598,7 @@
 			mac->autoneg_failed = 1;
 			return 0;
 		}
-		e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
 
 		/* Disable auto-negotiation in the TXCW register */
 		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -621,7 +621,7 @@
 		 * and disable forced link in the Device Control register
 		 * in an attempt to auto-negotiate with our link partner.
 		 */
-		e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
 		ew32(TXCW, mac->txcw);
 		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -800,9 +800,9 @@
 	 * The possible values of the "fc" parameter are:
 	 *      0:  Flow control is completely disabled
 	 *      1:  Rx flow control is enabled (we can receive pause frames,
-	 *	  but not send pause frames).
+	 *          but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames but we
-	 *	  do not support receiving pause frames).
+	 *          do not support receiving pause frames).
 	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
 	 */
 	switch (hw->fc.current_mode) {
@@ -1031,9 +1031,9 @@
 	 * The possible values of the "fc" parameter are:
 	 *      0:  Flow control is completely disabled
 	 *      1:  Rx flow control is enabled (we can receive pause
-	 *	  frames but not send pause frames).
+	 *          frames but not send pause frames).
 	 *      2:  Tx flow control is enabled (we can send pause frames
-	 *	  frames but we do not receive pause frames).
+	 *          frames but we do not receive pause frames).
 	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
 	 *  other:  No other values should be possible at this point.
 	 */
@@ -1135,7 +1135,8 @@
 		ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
 		if (ret_val)
 			return ret_val;
-		ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+		ret_val =
+		    e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
 		if (ret_val)
 			return ret_val;
 
@@ -1188,7 +1189,7 @@
 			} else {
 				hw->fc.current_mode = e1000_fc_rx_pause;
 				e_dbg("Flow Control = "
-					 "RX PAUSE frames only.\r\n");
+				      "Rx PAUSE frames only.\r\n");
 			}
 		}
 		/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fe50242..1c18f26 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -77,17 +77,17 @@
 	char *name;
 };
 
-#define E1000_RDFH	0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT	0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS	0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS	0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC	0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
+#define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */
 
-#define E1000_TDFH	0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT	0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS	0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS	0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC	0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
+#define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
 
 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
 
@@ -99,7 +99,7 @@
 	/* Interrupt Registers */
 	{E1000_ICR, "ICR"},
 
-	/* RX Registers */
+	/* Rx Registers */
 	{E1000_RCTL, "RCTL"},
 	{E1000_RDLEN, "RDLEN"},
 	{E1000_RDH, "RDH"},
@@ -115,7 +115,7 @@
 	{E1000_RDFTS, "RDFTS"},
 	{E1000_RDFPC, "RDFPC"},
 
-	/* TX Registers */
+	/* Tx Registers */
 	{E1000_TCTL, "TCTL"},
 	{E1000_TDBAL, "TDBAL"},
 	{E1000_TDBAH, "TDBAH"},
@@ -160,7 +160,7 @@
 		break;
 	default:
 		printk(KERN_INFO "%-15s %08x\n",
-			reginfo->name, __er32(hw, reginfo->ofs));
+		       reginfo->name, __er32(hw, reginfo->ofs));
 		return;
 	}
 
@@ -171,9 +171,8 @@
 	printk(KERN_CONT "\n");
 }
 
-
 /*
- * e1000e_dump - Print registers, tx-ring and rx-ring
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
  */
 static void e1000e_dump(struct e1000_adapter *adapter)
 {
@@ -182,12 +181,20 @@
 	struct e1000_reg_info *reginfo;
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	struct e1000_tx_desc *tx_desc;
-	struct my_u0 { u64 a; u64 b; } *u0;
+	struct my_u0 {
+		u64 a;
+		u64 b;
+	} *u0;
 	struct e1000_buffer *buffer_info;
 	struct e1000_ring *rx_ring = adapter->rx_ring;
 	union e1000_rx_desc_packet_split *rx_desc_ps;
 	struct e1000_rx_desc *rx_desc;
-	struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+	struct my_u1 {
+		u64 a;
+		u64 b;
+		u64 c;
+		u64 d;
+	} *u1;
 	u32 staterr;
 	int i = 0;
 
@@ -198,12 +205,10 @@
 	if (netdev) {
 		dev_info(&adapter->pdev->dev, "Net device Info\n");
 		printk(KERN_INFO "Device Name     state            "
-			"trans_start      last_rx\n");
+		       "trans_start      last_rx\n");
 		printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
-			netdev->name,
-			netdev->state,
-			netdev->trans_start,
-			netdev->last_rx);
+		       netdev->name, netdev->state, netdev->trans_start,
+		       netdev->last_rx);
 	}
 
 	/* Print Registers */
@@ -214,26 +219,26 @@
 		e1000_regdump(hw, reginfo);
 	}
 
-	/* Print TX Ring Summary */
+	/* Print Tx Ring Summary */
 	if (!netdev || !netif_running(netdev))
 		goto exit;
 
-	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
 	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
-		" leng ntw timestamp\n");
+	       " leng ntw timestamp\n");
 	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
 	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
-		0, tx_ring->next_to_use, tx_ring->next_to_clean,
-		(unsigned long long)buffer_info->dma,
-		buffer_info->length,
-		buffer_info->next_to_watch,
-		(unsigned long long)buffer_info->time_stamp);
+	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
+	       (unsigned long long)buffer_info->dma,
+	       buffer_info->length,
+	       buffer_info->next_to_watch,
+	       (unsigned long long)buffer_info->time_stamp);
 
-	/* Print TX Rings */
+	/* Print Tx Ring */
 	if (!netif_msg_tx_done(adapter))
 		goto rx_ring_summary;
 
-	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
 
 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
 	 *
@@ -263,22 +268,22 @@
 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
 	 */
 	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Legacy format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Legacy format\n");
 	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Ext Context format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Context format\n");
 	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
-		" [bi->dma       ] leng  ntw timestamp        bi->skb "
-		"<-- Ext Data format\n");
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Data format\n");
 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
 		tx_desc = E1000_TX_DESC(*tx_ring, i);
 		buffer_info = &tx_ring->buffer_info[i];
 		u0 = (struct my_u0 *)tx_desc;
 		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
-			"%04X  %3X %016llX %p",
-		       (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
-			((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+		       "%04X  %3X %016llX %p",
+		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
 		       (unsigned long long)le64_to_cpu(u0->a),
 		       (unsigned long long)le64_to_cpu(u0->b),
 		       (unsigned long long)buffer_info->dma,
@@ -296,22 +301,22 @@
 
 		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
 			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
-					buffer_info->length, true);
+				       16, 1, phys_to_virt(buffer_info->dma),
+				       buffer_info->length, true);
 	}
 
-	/* Print RX Rings Summary */
+	/* Print Rx Ring Summary */
 rx_ring_summary:
-	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
 	printk(KERN_INFO "Queue [NTU] [NTC]\n");
 	printk(KERN_INFO " %5d %5X %5X\n", 0,
-		rx_ring->next_to_use, rx_ring->next_to_clean);
+	       rx_ring->next_to_use, rx_ring->next_to_clean);
 
-	/* Print RX Rings */
+	/* Print Rx Ring */
 	if (!netif_msg_rx_status(adapter))
 		goto exit;
 
-	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
 	switch (adapter->rx_ps_pages) {
 	case 1:
 	case 2:
@@ -329,7 +334,7 @@
 		 *    +-----------------------------------------------------+
 		 */
 		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
-			"[buffer 1 63:0 ] "
+		       "[buffer 1 63:0 ] "
 		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
 		       "[bi->skb] <-- Ext Pkt Split format\n");
 		/* [Extended] Receive Descriptor (Write-Back) Format
@@ -344,7 +349,7 @@
 		 *   63       48 47    32 31            20 19               0
 		 */
 		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
-			"[vl   l0 ee  es] "
+		       "[vl   l0 ee  es] "
 		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
 		       "[bi->skb] <-- Ext Rx Write-Back format\n");
 		for (i = 0; i < rx_ring->count; i++) {
@@ -352,26 +357,26 @@
 			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
 			u1 = (struct my_u1 *)rx_desc_ps;
 			staterr =
-				le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
 			if (staterr & E1000_RXD_STAT_DD) {
 				/* Descriptor Done */
 				printk(KERN_INFO "RWB[0x%03X]     %016llX "
-					"%016llX %016llX %016llX "
-					"---------------- %p", i,
-					(unsigned long long)le64_to_cpu(u1->a),
-					(unsigned long long)le64_to_cpu(u1->b),
-					(unsigned long long)le64_to_cpu(u1->c),
-					(unsigned long long)le64_to_cpu(u1->d),
-					buffer_info->skb);
+				       "%016llX %016llX %016llX "
+				       "---------------- %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       buffer_info->skb);
 			} else {
 				printk(KERN_INFO "R  [0x%03X]     %016llX "
-					"%016llX %016llX %016llX %016llX %p", i,
-					(unsigned long long)le64_to_cpu(u1->a),
-					(unsigned long long)le64_to_cpu(u1->b),
-					(unsigned long long)le64_to_cpu(u1->c),
-					(unsigned long long)le64_to_cpu(u1->d),
-					(unsigned long long)buffer_info->dma,
-					buffer_info->skb);
+				       "%016llX %016llX %016llX %016llX %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       (unsigned long long)buffer_info->dma,
+				       buffer_info->skb);
 
 				if (netif_msg_pktdata(adapter))
 					print_hex_dump(KERN_INFO, "",
@@ -400,18 +405,18 @@
 		 * 63       48 47    40 39      32 31         16 15      0
 		 */
 		printk(KERN_INFO "Rl[desc]     [address 63:0  ] "
-			"[vl er S cks ln] [bi->dma       ] [bi->skb] "
-			"<-- Legacy format\n");
+		       "[vl er S cks ln] [bi->dma       ] [bi->skb] "
+		       "<-- Legacy format\n");
 		for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
 			rx_desc = E1000_RX_DESC(*rx_ring, i);
 			buffer_info = &rx_ring->buffer_info[i];
 			u0 = (struct my_u0 *)rx_desc;
 			printk(KERN_INFO "Rl[0x%03X]    %016llX %016llX "
-				"%016llX %p", i,
-				(unsigned long long)le64_to_cpu(u0->a),
-				(unsigned long long)le64_to_cpu(u0->b),
-				(unsigned long long)buffer_info->dma,
-				buffer_info->skb);
+			       "%016llX %p", i,
+			       (unsigned long long)le64_to_cpu(u0->a),
+			       (unsigned long long)le64_to_cpu(u0->b),
+			       (unsigned long long)buffer_info->dma,
+			       buffer_info->skb);
 			if (i == rx_ring->next_to_use)
 				printk(KERN_CONT " NTU\n");
 			else if (i == rx_ring->next_to_clean)
@@ -421,9 +426,10 @@
 
 			if (netif_msg_pktdata(adapter))
 				print_hex_dump(KERN_INFO, "",
-					DUMP_PREFIX_ADDRESS,
-					16, 1, phys_to_virt(buffer_info->dma),
-					adapter->rx_buffer_len, true);
+					       DUMP_PREFIX_ADDRESS,
+					       16, 1,
+					       phys_to_virt(buffer_info->dma),
+					       adapter->rx_buffer_len, true);
 		}
 	}
 
@@ -450,8 +456,7 @@
  * @skb: pointer to sk_buff to be indicated to stack
  **/
 static void e1000_receive_skb(struct e1000_adapter *adapter,
-			      struct net_device *netdev,
-			      struct sk_buff *skb,
+			      struct net_device *netdev, struct sk_buff *skb,
 			      u8 status, __le16 vlan)
 {
 	skb->protocol = eth_type_trans(skb, netdev);
@@ -464,7 +469,7 @@
 }
 
 /**
- * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * e1000_rx_checksum - Receive Checksum Offload
  * @adapter:     board private structure
  * @status_err:  receive descriptor status and error fields
  * @csum:	receive descriptor csum field
@@ -548,7 +553,7 @@
 						  adapter->rx_buffer_len,
 						  DMA_FROM_DEVICE);
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-			dev_err(&pdev->dev, "RX DMA map failed\n");
+			dev_err(&pdev->dev, "Rx DMA map failed\n");
 			adapter->rx_dma_failed++;
 			break;
 		}
@@ -601,7 +606,8 @@
 			ps_page = &buffer_info->ps_pages[j];
 			if (j >= adapter->rx_ps_pages) {
 				/* all unused desc entries get hw null ptr */
-				rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
+				rx_desc->read.buffer_addr[j + 1] =
+				    ~cpu_to_le64(0);
 				continue;
 			}
 			if (!ps_page->page) {
@@ -617,7 +623,7 @@
 				if (dma_mapping_error(&pdev->dev,
 						      ps_page->dma)) {
 					dev_err(&adapter->pdev->dev,
-					  "RX DMA page map failed\n");
+						"Rx DMA page map failed\n");
 					adapter->rx_dma_failed++;
 					goto no_buffers;
 				}
@@ -627,8 +633,8 @@
 			 * didn't change because each write-back
 			 * erases this info.
 			 */
-			rx_desc->read.buffer_addr[j+1] =
-			     cpu_to_le64(ps_page->dma);
+			rx_desc->read.buffer_addr[j + 1] =
+			    cpu_to_le64(ps_page->dma);
 		}
 
 		skb = netdev_alloc_skb_ip_align(netdev,
@@ -644,7 +650,7 @@
 						  adapter->rx_ps_bsize0,
 						  DMA_FROM_DEVICE);
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-			dev_err(&pdev->dev, "RX DMA map failed\n");
+			dev_err(&pdev->dev, "Rx DMA map failed\n");
 			adapter->rx_dma_failed++;
 			/* cleanup skb */
 			dev_kfree_skb_any(skb);
@@ -662,7 +668,7 @@
 			 * such as IA-64).
 			 */
 			wmb();
-			writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+			writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
 		}
 
 		i++;
@@ -1106,11 +1112,10 @@
 		cleaned = 1;
 		cleaned_count++;
 		dma_unmap_single(&pdev->dev, buffer_info->dma,
-				 adapter->rx_ps_bsize0,
-				 DMA_FROM_DEVICE);
+				 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
 		buffer_info->dma = 0;
 
-		/* see !EOP comment in other rx routine */
+		/* see !EOP comment in other Rx routine */
 		if (!(staterr & E1000_RXD_STAT_EOP))
 			adapter->flags2 |= FLAG2_IS_DISCARDING;
 
@@ -1980,15 +1985,15 @@
 }
 
 /**
- * e1000_get_hw_control - get control of the h/w from f/w
+ * e1000e_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
  *
- * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that
  * the driver is loaded. For AMT version (only with 82573)
  * of the f/w this means that the network i/f is open.
  **/
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
@@ -2005,16 +2010,16 @@
 }
 
 /**
- * e1000_release_hw_control - release control of the h/w to f/w
+ * e1000e_release_hw_control - release control of the h/w to f/w
  * @adapter: address of board private structure
  *
- * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that the
  * driver is no longer loaded. For AMT version (only with 82573) i
  * of the f/w this means that the network i/f is closed.
  *
  **/
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
@@ -2445,7 +2450,7 @@
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
 	    (vid == adapter->mng_vlan_id)) {
 		/* release control to f/w */
-		e1000_release_hw_control(adapter);
+		e1000e_release_hw_control(adapter);
 		return;
 	}
 
@@ -2610,7 +2615,7 @@
 }
 
 /**
- * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * e1000_configure_tx - Configure Transmit Unit after Reset
  * @adapter: board private structure
  *
  * Configure the Tx unit of the MAC after a reset.
@@ -2663,7 +2668,7 @@
 		 * hthresh = 1 ==> prefetch when one or more available
 		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
 		 * BEWARE: this seems to work but should be considered first if
-		 * there are tx hangs or other tx related bugs
+		 * there are Tx hangs or other Tx related bugs
 		 */
 		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
 		ew32(TXDCTL(0), txdctl);
@@ -2734,6 +2739,9 @@
 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
 		else
 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+		if (ret_val)
+			e_dbg("failed to enable jumbo frame workaround mode\n");
 	}
 
 	/* Program MC offset vector base */
@@ -2874,7 +2882,7 @@
 	if (adapter->rx_ps_pages) {
 		/* this is a 32 byte descriptor */
 		rdlen = rx_ring->count *
-			sizeof(union e1000_rx_desc_packet_split);
+		    sizeof(union e1000_rx_desc_packet_split);
 		adapter->clean_rx = e1000_clean_rx_irq_ps;
 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
 	} else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
@@ -2897,7 +2905,7 @@
 		/*
 		 * set the writeback threshold (only takes effect if the RDTR
 		 * is set). set GRAN=1 and write back up to 0x4 worth, and
-		 * enable prefetching of 0x20 rx descriptors
+		 * enable prefetching of 0x20 Rx descriptors
 		 * granularity = 01
 		 * wthresh = 04,
 		 * hthresh = 04,
@@ -2978,12 +2986,10 @@
 			 * excessive C-state transition latencies result in
 			 * dropped transactions.
 			 */
-			pm_qos_update_request(
-				&adapter->netdev->pm_qos_req, 55);
+			pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
 		} else {
-			pm_qos_update_request(
-				&adapter->netdev->pm_qos_req,
-				PM_QOS_DEFAULT_VALUE);
+			pm_qos_update_request(&adapter->netdev->pm_qos_req,
+					      PM_QOS_DEFAULT_VALUE);
 		}
 	}
 
@@ -3149,7 +3155,7 @@
 		/* lower 16 bits has Rx packet buffer allocation size in KB */
 		pba &= 0xffff;
 		/*
-		 * the Tx fifo also stores 16 bytes of information about the tx
+		 * the Tx fifo also stores 16 bytes of information about the Tx
 		 * but don't include ethernet FCS because hardware appends it
 		 */
 		min_tx_space = (adapter->max_frame_size +
@@ -3172,7 +3178,7 @@
 			pba -= min_tx_space - tx_space;
 
 			/*
-			 * if short on Rx space, Rx wins and must trump tx
+			 * if short on Rx space, Rx wins and must trump Tx
 			 * adjustment or use Early Receive if available
 			 */
 			if ((pba < min_rx_space) &&
@@ -3184,7 +3190,6 @@
 		ew32(PBA, pba);
 	}
 
-
 	/*
 	 * flow control settings
 	 *
@@ -3272,7 +3277,7 @@
 	 * that the network interface is in control
 	 */
 	if (adapter->flags & FLAG_HAS_AMT)
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 	ew32(WUC, 0);
 
@@ -3285,6 +3290,13 @@
 	ew32(VET, ETH_P_8021Q);
 
 	e1000e_reset_adaptive(hw);
+
+	if (!netif_running(adapter->netdev) &&
+	    !test_bit(__E1000_TESTING, &adapter->state)) {
+		e1000_power_down_phy(adapter);
+		return;
+	}
+
 	e1000_get_phy_info(hw);
 
 	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3570,7 +3582,7 @@
 	 * interface is now open and reset the part to a known state.
 	 */
 	if (adapter->flags & FLAG_HAS_AMT) {
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 		e1000e_reset(adapter);
 	}
 
@@ -3634,7 +3646,7 @@
 	return 0;
 
 err_req_irq:
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 	e1000_power_down_phy(adapter);
 	e1000e_free_rx_resources(adapter);
 err_setup_rx:
@@ -3689,8 +3701,9 @@
 	 * If AMT is enabled, let the firmware know that the network
 	 * interface is now closed
 	 */
-	if (adapter->flags & FLAG_HAS_AMT)
-		e1000_release_hw_control(adapter);
+	if ((adapter->flags & FLAG_HAS_AMT) &&
+	    !test_bit(__E1000_TESTING, &adapter->state))
+		e1000e_release_hw_control(adapter);
 
 	if ((adapter->flags & FLAG_HAS_ERT) ||
 	    (adapter->hw.mac.type == e1000_pch2lan))
@@ -4029,11 +4042,11 @@
 	       adapter->netdev->name,
 	       adapter->link_speed,
 	       (adapter->link_duplex == FULL_DUPLEX) ?
-	                        "Full Duplex" : "Half Duplex",
+	       "Full Duplex" : "Half Duplex",
 	       ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
-	                        "RX/TX" :
-	       ((ctrl & E1000_CTRL_RFCE) ? "RX" :
-	       ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
+	       "Rx/Tx" :
+	       ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+		((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
 }
 
 static bool e1000e_has_link(struct e1000_adapter *adapter)
@@ -4328,7 +4341,7 @@
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = 1;
 
-	/* flush partial descriptors to memory before detecting tx hang */
+	/* flush partial descriptors to memory before detecting Tx hang */
 	if (adapter->flags2 & FLAG2_DMA_BURST) {
 		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
 		ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
@@ -4519,7 +4532,7 @@
 		buffer_info->next_to_watch = i;
 		buffer_info->dma = dma_map_single(&pdev->dev,
 						  skb->data + offset,
-						  size,	DMA_TO_DEVICE);
+						  size, DMA_TO_DEVICE);
 		buffer_info->mapped_as_page = false;
 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
 			goto dma_error;
@@ -4566,7 +4579,7 @@
 		}
 	}
 
-	segs = skb_shinfo(skb)->gso_segs ?: 1;
+	segs = skb_shinfo(skb)->gso_segs ? : 1;
 	/* multiply data chunks by size of headers */
 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
 
@@ -4578,13 +4591,13 @@
 	return count;
 
 dma_error:
-	dev_err(&pdev->dev, "TX DMA map failed\n");
+	dev_err(&pdev->dev, "Tx DMA map failed\n");
 	buffer_info->dma = 0;
 	if (count)
 		count--;
 
 	while (count--) {
-		if (i==0)
+		if (i == 0)
 			i += tx_ring->count;
 		i--;
 		buffer_info = &tx_ring->buffer_info[i];
@@ -5209,7 +5222,7 @@
 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 
 	pci_disable_device(pdev);
 
@@ -5366,7 +5379,7 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 	return 0;
 }
@@ -5613,7 +5626,7 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 }
 
@@ -5636,7 +5649,7 @@
 	ret_val = e1000_read_pba_string_generic(hw, pba_str,
 						E1000_PBANUM_LENGTH);
 	if (ret_val)
-		strcpy(pba_str, "Unknown");
+		strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
 	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
 	       hw->mac.type, hw->phy.type, pba_str);
 }
@@ -5963,9 +5976,9 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
-	strcpy(netdev->name, "eth%d");
+	strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
 	err = register_netdev(netdev);
 	if (err)
 		goto err_register;
@@ -5982,12 +5995,11 @@
 
 err_register:
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_release_hw_control(adapter);
+		e1000e_release_hw_control(adapter);
 err_eeprom:
 	if (!e1000_check_reset_block(&adapter->hw))
 		e1000_phy_hw_reset(&adapter->hw);
 err_hw_init:
-
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
 err_sw_init:
@@ -6053,7 +6065,7 @@
 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 
 	e1000e_reset_interrupt_capability(adapter);
 	kfree(adapter->tx_ring);
@@ -6184,7 +6196,7 @@
 	int ret;
 	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
 		e1000e_driver_version);
-	pr_info("Copyright (c) 1999 - 2010 Intel Corporation.\n");
+	pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
 	ret = pci_register_driver(&e1000_driver);
 
 	return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index a9612b0..4dd9b63 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -62,10 +62,9 @@
 	module_param_array_named(X, X, int, &num_##X, 0);	\
 	MODULE_PARM_DESC(X, desc);
 
-
 /*
  * Transmit Interrupt Delay in units of 1.024 microseconds
- * Tx interrupt delay needs to typically be set to something non zero
+ * Tx interrupt delay needs to typically be set to something non-zero
  *
  * Valid Range: 0-65535
  */
@@ -112,6 +111,7 @@
 #define DEFAULT_ITR 3
 #define MAX_ITR 100000
 #define MIN_ITR 100
+
 /* IntMode (Interrupt Mode)
  *
  * Valid Range: 0 - 2
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 1781efe..6bea051 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2010 Intel Corporation.
+  Copyright(c) 1999 - 2011 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -637,12 +637,11 @@
  **/
 s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
 {
-	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 	u16 phy_data;
 
-	/* Enable CRS on TX. This must be set for half-duplex operation. */
-	ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
 	if (ret_val)
 		goto out;
 
@@ -651,7 +650,7 @@
 	/* Enable downshift */
 	phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
 
-	ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+	ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
 
 out:
 	return ret_val;
@@ -774,16 +773,14 @@
 	}
 
 	if (phy->type == e1000_phy_82578) {
-		ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-		                            &phy_data);
+		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
 		if (ret_val)
 			return ret_val;
 
 		/* 82578 PHY - set the downshift count to 1x. */
 		phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
 		phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
-		ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-		                             phy_data);
+		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
 		if (ret_val)
 			return ret_val;
 	}
@@ -1319,9 +1316,8 @@
 				 * We didn't get link.
 				 * Reset the DSP and cross our fingers.
 				 */
-				ret_val = e1e_wphy(hw,
-						M88E1000_PHY_PAGE_SELECT,
-						0x001d);
+				ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+						   0x001d);
 				if (ret_val)
 					return ret_val;
 				ret_val = e1000e_phy_reset_dsp(hw);
@@ -2990,7 +2986,7 @@
 }
 
 /**
- *  e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ *  e1000_get_phy_addr_for_hv_page - Get PHY address based on page
  *  @page: page to be accessed
  **/
 static u32 e1000_get_phy_addr_for_hv_page(u32 page)
@@ -3071,12 +3067,12 @@
 		goto out;
 
 	/* Do not apply workaround if in PHY loopback bit 14 set */
-	hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+	e1e_rphy(hw, PHY_CONTROL, &data);
 	if (data & PHY_CONTROL_LB)
 		goto out;
 
 	/* check if link is up and at 1Gbps */
-	ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+	ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
 	if (ret_val)
 		goto out;
 
@@ -3092,14 +3088,12 @@
 	mdelay(200);
 
 	/* flush the packets in the fifo buffer */
-	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
-	                                HV_MUX_DATA_CTRL_GEN_TO_MAC |
-	                                HV_MUX_DATA_CTRL_FORCE_SPEED);
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+			   HV_MUX_DATA_CTRL_FORCE_SPEED);
 	if (ret_val)
 		goto out;
 
-	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
-	                                HV_MUX_DATA_CTRL_GEN_TO_MAC);
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
 
 out:
 	return ret_val;
@@ -3119,7 +3113,7 @@
 	s32 ret_val;
 	u16 data;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
 
 	if (!ret_val)
 		phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3142,13 +3136,13 @@
 	u16 phy_data;
 	bool link;
 
-	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
 	if (ret_val)
 		goto out;
 
 	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
 
-	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
 	if (ret_val)
 		goto out;
 
@@ -3212,7 +3206,7 @@
 	if (ret_val)
 		goto out;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
 	if (ret_val)
 		goto out;
 
@@ -3224,7 +3218,7 @@
 		if (ret_val)
 			goto out;
 
-		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
 		if (ret_val)
 			goto out;
 
@@ -3258,7 +3252,7 @@
 	s32 ret_val;
 	u16 phy_data, length;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+	ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
 	if (ret_val)
 		goto out;
 
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 4fa8d2a..eb35951 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1761,7 +1761,7 @@
 module_param_array(irq, int, NULL, 0);
 module_param_array(mem, int, NULL, 0);
 module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
 MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
 MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
 MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a724a2d..6c7257b 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0106"
+#define DRV_VERSION	"EHEA_0107"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1032b5b..f75d314 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -437,7 +437,7 @@
 		}
 	}
 	/* Ring doorbell */
-	ehea_update_rq1a(pr->qp, i);
+	ehea_update_rq1a(pr->qp, i - 1);
 }
 
 static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -1329,9 +1329,7 @@
 	int ret;
 	struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
 
-	ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
-			       - init_attr->act_nr_rwqes_rq2
-			       - init_attr->act_nr_rwqes_rq3 - 1);
+	ehea_init_fill_rq1(pr, pr->rq1_skba.len);
 
 	ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
 
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d4..2a71373 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
  *
  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
  * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
  */
 
 #include <linux/module.h>
@@ -45,29 +47,41 @@
 
 #include <asm/cacheflush.h>
 
-#ifndef CONFIG_ARCH_MXC
+#ifndef CONFIG_ARM
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #endif
 
 #include "fec.h"
 
-#ifdef CONFIG_ARCH_MXC
-#include <mach/hardware.h>
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 #define FEC_ALIGNMENT	0xf
 #else
 #define FEC_ALIGNMENT	0x3
 #endif
 
-/*
- * Define the fixed address of the FEC hardware.
- */
-#if defined(CONFIG_M5272)
+#define DRIVER_NAME	"fec"
 
-static unsigned char	fec_mac_default[] = {
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC		(1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME		(1 << 1)
+
+static struct platform_device_id fec_devtype[] = {
+	{
+		.name = DRIVER_NAME,
+		.driver_data = 0,
+	}, {
+		.name = "imx28-fec",
+		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+	}
 };
 
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
 /*
  * Some hardware gets it MAC address out of local flash memory.
  * if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +147,8 @@
  * account when setting it.
  */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
 #else
 #define	OPT_FRAME_SIZE	0
@@ -186,7 +201,6 @@
 	int     mii_timeout;
 	uint    phy_speed;
 	phy_interface_t	phy_interface;
-	int	index;
 	int	link;
 	int	full_duplex;
 	struct	completion mdio_done;
@@ -213,10 +227,23 @@
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
+static void *swap_buffer(void *bufaddr, int len)
+{
+	int i;
+	unsigned int *buf = bufaddr;
+
+	for (i = 0; i < (len + 3) / 4; i++, buf++)
+		*buf = cpu_to_be32(*buf);
+
+	return bufaddr;
+}
+
 static netdev_tx_t
 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
 	void *bufaddr;
 	unsigned short	status;
@@ -261,6 +288,14 @@
 		bufaddr = fep->tx_bounce[index];
 	}
 
+	/*
+	 * Some design made an incorrect assumption on endian mode of
+	 * the system that it's running on. As the result, driver has to
+	 * swap every frame going to and coming from the controller.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+		swap_buffer(bufaddr, skb->len);
+
 	/* Save skb pointer */
 	fep->tx_skbuff[fep->skb_cur] = skb;
 
@@ -429,6 +464,8 @@
 fec_enet_rx(struct net_device *dev)
 {
 	struct	fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
 	unsigned short status;
 	struct	sk_buff	*skb;
@@ -492,6 +529,9 @@
 	        dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
         			DMA_FROM_DEVICE);
 
+		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+			swap_buffer(data, pkt_len);
+
 		/* This does 16 byte alignment, exactly what we need.
 		 * The packet length includes FCS, but we don't want to
 		 * include that when passing upstream as it messes up
@@ -538,37 +578,50 @@
 }
 
 /* ------------------------------------------------------------------------- */
-#ifdef CONFIG_M5272
 static void __inline__ fec_get_mac(struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
 	unsigned char *iap, tmpaddr[ETH_ALEN];
 
-	if (FEC_FLASHMAC) {
-		/*
-		 * Get MAC address from FLASH.
-		 * If it is all 1's or 0's, use the default.
-		 */
-		iap = (unsigned char *)FEC_FLASHMAC;
-		if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
-		    (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
-			iap = fec_mac_default;
-		if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
-		    (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
-			iap = fec_mac_default;
-	} else {
-		*((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
-		*((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+	/*
+	 * try to get mac address in following order:
+	 *
+	 * 1) module parameter via kernel command line in form
+	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+	 */
+	iap = macaddr;
+
+	/*
+	 * 2) from flash or fuse (via platform data)
+	 */
+	if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+		if (FEC_FLASHMAC)
+			iap = (unsigned char *)FEC_FLASHMAC;
+#else
+		if (pdata)
+			memcpy(iap, pdata->mac, ETH_ALEN);
+#endif
+	}
+
+	/*
+	 * 3) FEC mac registers set by bootloader
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		*((unsigned long *) &tmpaddr[0]) =
+			be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+		*((unsigned short *) &tmpaddr[4]) =
+			be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
 		iap = &tmpaddr[0];
 	}
 
 	memcpy(dev->dev_addr, iap, ETH_ALEN);
 
-	/* Adjust MAC if using default MAC address */
-	if (iap == fec_mac_default)
-		 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+	/* Adjust MAC if using macaddr */
+	if (iap == macaddr)
+		 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
 }
-#endif
 
 /* ------------------------------------------------------------------------- */
 
@@ -651,8 +704,8 @@
 	fep->mii_timeout = 0;
 	init_completion(&fep->mdio_done);
 
-	/* start a read op */
-	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+	/* start a write op */
+	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
 		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
 		FEC_MMFR_TA | FEC_MMFR_DATA(value),
 		fep->hwp + FEC_MII_DATA);
@@ -681,6 +734,7 @@
 	char mdio_bus_id[MII_BUS_ID_SIZE];
 	char phy_name[MII_BUS_ID_SIZE + 3];
 	int phy_id;
+	int dev_id = fep->pdev->id;
 
 	fep->phy_dev = NULL;
 
@@ -692,6 +746,8 @@
 			continue;
 		if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
 			continue;
+		if (dev_id--)
+			continue;
 		strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
 		break;
 	}
@@ -729,10 +785,35 @@
 
 static int fec_enet_mii_init(struct platform_device *pdev)
 {
+	static struct mii_bus *fec0_mii_bus;
 	struct net_device *dev = platform_get_drvdata(pdev);
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	int err = -ENXIO, i;
 
+	/*
+	 * The dual fec interfaces are not equivalent with enet-mac.
+	 * Here are the differences:
+	 *
+	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
+	 *  - fec0 acts as the 1588 time master while fec1 is slave
+	 *  - external phys can only be configured by fec0
+	 *
+	 * That is to say fec1 can not work independently. It only works
+	 * when fec0 is working. The reason behind this design is that the
+	 * second interface is added primarily for Switch mode.
+	 *
+	 * Because of the last point above, both phys are attached on fec0
+	 * mdio interface in board design, and need to be configured by
+	 * fec0 mii_bus.
+	 */
+	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
+		/* fec1 uses fec0 mii_bus */
+		fep->mii_bus = fec0_mii_bus;
+		return 0;
+	}
+
 	fep->mii_timeout = 0;
 
 	/*
@@ -769,6 +850,10 @@
 	if (mdiobus_register(fep->mii_bus))
 		goto err_out_free_mdio_irq;
 
+	/* save fec0 mii_bus */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+		fec0_mii_bus = fep->mii_bus;
+
 	return 0;
 
 err_out_free_mdio_irq:
@@ -1067,9 +1152,8 @@
  /*
   * XXX:  We need to clean up on failure exits here.
   *
-  * index is only used in legacy code
   */
-static int fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct bufdesc *cbd_base;
@@ -1086,26 +1170,11 @@
 
 	spin_lock_init(&fep->hw_lock);
 
-	fep->index = index;
 	fep->hwp = (void __iomem *)dev->base_addr;
 	fep->netdev = dev;
 
-	/* Set the Ethernet address */
-#ifdef CONFIG_M5272
+	/* Get the Ethernet address */
 	fec_get_mac(dev);
-#else
-	{
-		unsigned long l;
-		l = readl(fep->hwp + FEC_ADDR_LOW);
-		dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
-		dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
-		dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
-		dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
-		l = readl(fep->hwp + FEC_ADDR_HIGH);
-		dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
-		dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
-	}
-#endif
 
 	/* Set receive and transmit descriptor base. */
 	fep->rx_bd_base = cbd_base;
@@ -1156,12 +1225,25 @@
 fec_restart(struct net_device *dev, int duplex)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	int i;
+	u32 val, temp_mac[2];
 
 	/* Whack a reset.  We should wait for this. */
 	writel(1, fep->hwp + FEC_ECNTRL);
 	udelay(10);
 
+	/*
+	 * enet-mac reset will reset mac address registers too,
+	 * so need to reconfigure it.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
+		writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+		writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+	}
+
 	/* Clear any outstanding interrupt. */
 	writel(0xffc00000, fep->hwp + FEC_IEVENT);
 
@@ -1208,20 +1290,45 @@
 	/* Set MII speed */
 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
+	/*
+	 * The phy interface and speed need to get configured
+	 * differently on enet-mac.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		val = readl(fep->hwp + FEC_R_CNTRL);
+
+		/* MII or RMII */
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+			val |= (1 << 8);
+		else
+			val &= ~(1 << 8);
+
+		/* 10M or 100M */
+		if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+			val &= ~(1 << 9);
+		else
+			val |= (1 << 9);
+
+		writel(val, fep->hwp + FEC_R_CNTRL);
+	} else {
 #ifdef FEC_MIIGSK_ENR
-	if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
-		/* disable the gasket and wait */
-		writel(0, fep->hwp + FEC_MIIGSK_ENR);
-		while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
-			udelay(1);
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+			/* disable the gasket and wait */
+			writel(0, fep->hwp + FEC_MIIGSK_ENR);
+			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+				udelay(1);
 
-		/* configure the gasket: RMII, 50 MHz, no loopback, no echo */
-		writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+			/*
+			 * configure the gasket:
+			 *   RMII, 50 MHz, no loopback, no echo
+			 */
+			writel(1, fep->hwp + FEC_MIIGSK_CFGR);
 
-		/* re-enable the gasket */
-		writel(2, fep->hwp + FEC_MIIGSK_ENR);
-	}
+			/* re-enable the gasket */
+			writel(2, fep->hwp + FEC_MIIGSK_ENR);
+		}
 #endif
+	}
 
 	/* And last, enable the transmit and receive processing */
 	writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1423,7 @@
 	}
 	clk_enable(fep->clk);
 
-	ret = fec_enet_init(ndev, 0);
+	ret = fec_enet_init(ndev);
 	if (ret)
 		goto failed_init;
 
@@ -1380,8 +1487,10 @@
 
 	if (ndev) {
 		fep = netdev_priv(ndev);
-		if (netif_running(ndev))
-			fec_enet_close(ndev);
+		if (netif_running(ndev)) {
+			fec_stop(ndev);
+			netif_device_detach(ndev);
+		}
 		clk_disable(fep->clk);
 	}
 	return 0;
@@ -1396,8 +1505,10 @@
 	if (ndev) {
 		fep = netdev_priv(ndev);
 		clk_enable(fep->clk);
-		if (netif_running(ndev))
-			fec_enet_open(ndev);
+		if (netif_running(ndev)) {
+			fec_restart(ndev, fep->full_duplex);
+			netif_device_attach(ndev);
+		}
 	}
 	return 0;
 }
@@ -1414,12 +1525,13 @@
 
 static struct platform_driver fec_driver = {
 	.driver	= {
-		.name	= "fec",
+		.name	= DRIVER_NAME,
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &fec_pm_ops,
 #endif
 	},
+	.id_table = fec_devtype,
 	.probe	= fec_probe,
 	.remove	= __devexit_p(fec_drv_remove),
 };
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25..ace318d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
 /****************************************************************************/
 
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 /*
  *	Just figures, Motorola would have to change the offsets for
  *	registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
 /*
  *	Define the buffer descriptor structure.
  */
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 struct bufdesc {
 	unsigned short cbd_datlen;	/* Data length */
 	unsigned short cbd_sc;	/* Control and status info */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cd2d72d..af09296 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3949,6 +3949,7 @@
 		writel(flags, base + NvRegWakeUpFlags);
 		spin_unlock_irq(&np->lock);
 	}
+	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
 	return 0;
 }
 
@@ -5488,14 +5489,10 @@
 	/* set mac address */
 	nv_copy_mac_to_hw(dev);
 
-	/* Workaround current PCI init glitch:  wakeup bits aren't
-	 * being set from PCI PM capability.
-	 */
-	device_init_wakeup(&pci_dev->dev, 1);
-
 	/* disable WOL */
 	writel(0, base + NvRegWakeUpFlags);
 	np->wolenabled = 0;
+	device_set_wakeup_enable(&pci_dev->dev, false);
 
 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
 
@@ -5746,8 +5743,9 @@
 }
 
 #ifdef CONFIG_PM
-static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+static int nv_suspend(struct device *device)
 {
+	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
@@ -5763,25 +5761,17 @@
 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
 
-	pci_save_state(pdev);
-	pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
-	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 	return 0;
 }
 
-static int nv_resume(struct pci_dev *pdev)
+static int nv_resume(struct device *device)
 {
+	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 	int i, rc = 0;
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	/* ack any pending wake events, disable PME */
-	pci_enable_wake(pdev, PCI_D0, 0);
-
 	/* restore non-pci configuration space */
 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		writel(np->saved_config_space[i], base+i*sizeof(u32));
@@ -5800,6 +5790,9 @@
 	return rc;
 }
 
+static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
+#define NV_PM_OPS (&nv_pm_ops)
+
 static void nv_shutdown(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,15 +5815,13 @@
 	 * only put the device into D3 if we really go for poweroff.
 	 */
 	if (system_state == SYSTEM_POWER_OFF) {
-		if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
-			pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
+		pci_wake_from_d3(pdev, np->wolenabled);
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
 }
 #else
-#define nv_suspend NULL
+#define NV_PM_OPS NULL
 #define nv_shutdown NULL
-#define nv_resume NULL
 #endif /* CONFIG_PM */
 
 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6002,9 +5993,8 @@
 	.id_table	= pci_tbl,
 	.probe		= nv_probe,
 	.remove		= __devexit_p(nv_remove),
-	.suspend	= nv_suspend,
-	.resume		= nv_resume,
 	.shutdown	= nv_shutdown,
+	.driver.pm	= NV_PM_OPS,
 };
 
 static int __init init_nic(void)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index d684f18..7a1f3d0 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -40,6 +40,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
+#include <linux/of_net.h>
 
 #include <linux/vmalloc.h>
 #include <asm/pgtable.h>
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 45c4b7b..119aa20 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -95,6 +95,7 @@
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/of.h>
+#include <linux/of_net.h>
 
 #include "gianfar.h"
 #include "fsl_pq_mdio.h"
@@ -433,7 +434,6 @@
 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
 {
 	struct gfar_private *priv = netdev_priv(dev);
-	struct netdev_queue *txq;
 	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
 	unsigned long tx_packets = 0, tx_bytes = 0;
 	int i = 0;
@@ -449,9 +449,8 @@
 	dev->stats.rx_dropped = rx_dropped;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
-		txq = netdev_get_tx_queue(dev, i);
-		tx_bytes += txq->tx_bytes;
-		tx_packets += txq->tx_packets;
+		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+		tx_packets += priv->tx_queue[i]->stats.tx_packets;
 	}
 
 	dev->stats.tx_bytes = tx_bytes;
@@ -2108,8 +2107,8 @@
 	}
 
 	/* Update transmit stats */
-	txq->tx_bytes += skb->len;
-	txq->tx_packets ++;
+	tx_queue->stats.tx_bytes += skb->len;
+	tx_queue->stats.tx_packets++;
 
 	txbdp = txbdp_start = tx_queue->cur_tx;
 	lstatus = txbdp->lstatus;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 68984eb..54de413 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -907,12 +907,21 @@
 	MQ_MG_MODE
 };
 
+/*
+ * Per TX queue stats
+ */
+struct tx_q_stats {
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+};
+
 /**
  *	struct gfar_priv_tx_q - per tx queue structure
  *	@txlock: per queue tx spin lock
  *	@tx_skbuff:skb pointers
  *	@skb_curtx: to be used skb pointer
  *	@skb_dirtytx:the last used skb pointer
+ *	@stats: bytes/packets stats
  *	@qindex: index of this queue
  *	@dev: back pointer to the dev structure
  *	@grp: back pointer to the group to which this queue belongs
@@ -934,6 +943,7 @@
 	struct	txbd8 *tx_bd_base;
 	struct	txbd8 *cur_tx;
 	struct	txbd8 *dirty_tx;
+	struct tx_q_stats stats;
 	struct	net_device *dev;
 	struct gfar_priv_grp *grp;
 	u16	skb_curtx;
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 27d6960..fdb0333 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -1,7 +1,7 @@
 /*
  * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
  *
- * 2005-2009 (c) Aeroflex Gaisler AB
+ * 2005-2010 (c) Aeroflex Gaisler AB
  *
  * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
  * available in the GRLIB VHDL IP core library.
@@ -356,6 +356,8 @@
 		dev_dbg(&dev->dev, " starting queue\n");
 	netif_start_queue(dev);
 
+	GRETH_REGSAVE(greth->regs->status, 0xFF);
+
 	napi_enable(&greth->napi);
 
 	greth_enable_irqs(greth);
@@ -371,7 +373,9 @@
 
 	napi_disable(&greth->napi);
 
+	greth_disable_irqs(greth);
 	greth_disable_tx(greth);
+	greth_disable_rx(greth);
 
 	netif_stop_queue(dev);
 
@@ -388,12 +392,20 @@
 	struct greth_private *greth = netdev_priv(dev);
 	struct greth_bd *bdp;
 	int err = NETDEV_TX_OK;
-	u32 status, dma_addr;
+	u32 status, dma_addr, ctrl;
+	unsigned long flags;
 
-	bdp = greth->tx_bd_base + greth->tx_next;
+	/* Clean TX Ring */
+	greth_clean_tx(greth->netdev);
 
 	if (unlikely(greth->tx_free <= 0)) {
+		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		/* Enable TX IRQ only if not already in poll() routine */
+		if (ctrl & GRETH_RXI)
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
@@ -406,13 +418,14 @@
 		goto out;
 	}
 
+	bdp = greth->tx_bd_base + greth->tx_next;
 	dma_addr = greth_read_bd(&bdp->addr);
 
 	memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
 
 	dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 
-	status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+	status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
 
 	/* Wrap around descriptor ring */
 	if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
@@ -422,22 +435,11 @@
 	greth->tx_next = NEXT_TX(greth->tx_next);
 	greth->tx_free--;
 
-	/* No more descriptors */
-	if (unlikely(greth->tx_free == 0)) {
-
-		/* Free transmitted descriptors */
-		greth_clean_tx(dev);
-
-		/* If nothing was cleaned, stop queue & wait for irq */
-		if (unlikely(greth->tx_free == 0)) {
-			status |= GRETH_BD_IE;
-			netif_stop_queue(dev);
-		}
-	}
-
 	/* Write descriptor control word and enable transmission */
 	greth_write_bd(&bdp->stat, status);
+	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 	greth_enable_tx(greth);
+	spin_unlock_irqrestore(&greth->devlock, flags);
 
 out:
 	dev_kfree_skb(skb);
@@ -450,13 +452,23 @@
 {
 	struct greth_private *greth = netdev_priv(dev);
 	struct greth_bd *bdp;
-	u32 status = 0, dma_addr;
+	u32 status = 0, dma_addr, ctrl;
 	int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+	unsigned long flags;
 
 	nr_frags = skb_shinfo(skb)->nr_frags;
 
+	/* Clean TX Ring */
+	greth_clean_tx_gbit(dev);
+
 	if (greth->tx_free < nr_frags + 1) {
+		spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		/* Enable TX IRQ only if not already in poll() routine */
+		if (ctrl & GRETH_RXI)
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		err = NETDEV_TX_BUSY;
 		goto out;
 	}
@@ -499,7 +511,7 @@
 		greth->tx_skbuff[curr_tx] = NULL;
 		bdp = greth->tx_bd_base + curr_tx;
 
-		status = GRETH_TXBD_CSALL;
+		status = GRETH_TXBD_CSALL | GRETH_BD_EN;
 		status |= frag->size & GRETH_BD_LEN;
 
 		/* Wrap around descriptor ring */
@@ -509,14 +521,8 @@
 		/* More fragments left */
 		if (i < nr_frags - 1)
 			status |= GRETH_TXBD_MORE;
-
-		/* ... last fragment, check if out of descriptors  */
-		else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
-
-			/* Enable interrupts and stop queue */
-			status |= GRETH_BD_IE;
-			netif_stop_queue(dev);
-		}
+		else
+			status |= GRETH_BD_IE; /* enable IRQ on last fragment */
 
 		greth_write_bd(&bdp->stat, status);
 
@@ -536,26 +542,29 @@
 
 	wmb();
 
-	/* Enable the descriptors that we configured ...  */
-	for (i = 0; i < nr_frags + 1; i++) {
-		bdp = greth->tx_bd_base + greth->tx_next;
-		greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
-		greth->tx_next = NEXT_TX(greth->tx_next);
-		greth->tx_free--;
-	}
+	/* Enable the descriptor chain by enabling the first descriptor */
+	bdp = greth->tx_bd_base + greth->tx_next;
+	greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+	greth->tx_next = curr_tx;
+	greth->tx_free -= nr_frags + 1;
 
+	wmb();
+
+	spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 	greth_enable_tx(greth);
+	spin_unlock_irqrestore(&greth->devlock, flags);
 
 	return NETDEV_TX_OK;
 
 frag_map_error:
-	/* Unmap SKB mappings that succeeded */
+	/* Unmap SKB mappings that succeeded and disable descriptor */
 	for (i = 0; greth->tx_next + i != curr_tx; i++) {
 		bdp = greth->tx_bd_base + greth->tx_next + i;
 		dma_unmap_single(greth->dev,
 				 greth_read_bd(&bdp->addr),
 				 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
 				 DMA_TO_DEVICE);
+		greth_write_bd(&bdp->stat, 0);
 	}
 map_error:
 	if (net_ratelimit())
@@ -565,12 +574,11 @@
 	return err;
 }
 
-
 static irqreturn_t greth_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = dev_id;
 	struct greth_private *greth;
-	u32 status;
+	u32 status, ctrl;
 	irqreturn_t retval = IRQ_NONE;
 
 	greth = netdev_priv(dev);
@@ -580,13 +588,15 @@
 	/* Get the interrupt events that caused us to be here. */
 	status = GRETH_REGLOAD(greth->regs->status);
 
+	/* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
+	 * set regardless of whether IRQ is enabled or not. Especially
+	 * important when shared IRQ.
+	 */
+	ctrl = GRETH_REGLOAD(greth->regs->control);
+
 	/* Handle rx and tx interrupts through poll */
-	if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
-
-		/* Clear interrupt status */
-		GRETH_REGORIN(greth->regs->status,
-			      status & (GRETH_INT_RX | GRETH_INT_TX));
-
+	if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
+	    ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
 		retval = IRQ_HANDLED;
 
 		/* Disable interrupts and schedule poll() */
@@ -610,6 +620,8 @@
 
 	while (1) {
 		bdp = greth->tx_bd_base + greth->tx_last;
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+		mb();
 		stat = greth_read_bd(&bdp->stat);
 
 		if (unlikely(stat & GRETH_BD_EN))
@@ -670,7 +682,10 @@
 
 		/* We only clean fully completed SKBs */
 		bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
-		stat = bdp_last_frag->stat;
+
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
+		mb();
+		stat = greth_read_bd(&bdp_last_frag->stat);
 
 		if (stat & GRETH_BD_EN)
 			break;
@@ -702,21 +717,9 @@
 		greth->tx_free += nr_frags+1;
 		dev_kfree_skb(skb);
 	}
-	if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
-		netif_wake_queue(dev);
-	}
-}
 
-static int greth_pending_packets(struct greth_private *greth)
-{
-	struct greth_bd *bdp;
-	u32 status;
-	bdp = greth->rx_bd_base + greth->rx_cur;
-	status = greth_read_bd(&bdp->stat);
-	if (status & GRETH_BD_EN)
-		return 0;
-	else
-		return 1;
+	if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
+		netif_wake_queue(dev);
 }
 
 static int greth_rx(struct net_device *dev, int limit)
@@ -727,20 +730,24 @@
 	int pkt_len;
 	int bad, count;
 	u32 status, dma_addr;
+	unsigned long flags;
 
 	greth = netdev_priv(dev);
 
 	for (count = 0; count < limit; ++count) {
 
 		bdp = greth->rx_bd_base + greth->rx_cur;
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+		mb();
 		status = greth_read_bd(&bdp->stat);
-		dma_addr = greth_read_bd(&bdp->addr);
-		bad = 0;
 
 		if (unlikely(status & GRETH_BD_EN)) {
 			break;
 		}
 
+		dma_addr = greth_read_bd(&bdp->addr);
+		bad = 0;
+
 		/* Check status for errors. */
 		if (unlikely(status & GRETH_RXBD_STATUS)) {
 			if (status & GRETH_RXBD_ERR_FT) {
@@ -802,7 +809,9 @@
 
 		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
 
+		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
 		greth_enable_rx(greth);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 
 		greth->rx_cur = NEXT_RX(greth->rx_cur);
 	}
@@ -836,6 +845,7 @@
 	int pkt_len;
 	int bad, count = 0;
 	u32 status, dma_addr;
+	unsigned long flags;
 
 	greth = netdev_priv(dev);
 
@@ -843,6 +853,8 @@
 
 		bdp = greth->rx_bd_base + greth->rx_cur;
 		skb = greth->rx_skbuff[greth->rx_cur];
+		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
+		mb();
 		status = greth_read_bd(&bdp->stat);
 		bad = 0;
 
@@ -865,10 +877,9 @@
 			}
 		}
 
-		/* Allocate new skb to replace current */
-		newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
-
-		if (!bad && newskb) {
+		/* Allocate new skb to replace current, not needed if the
+		 * current skb can be reused */
+		if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
 			skb_reserve(newskb, NET_IP_ALIGN);
 
 			dma_addr = dma_map_single(greth->dev,
@@ -905,11 +916,22 @@
 				if (net_ratelimit())
 					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
 				dev_kfree_skb(newskb);
+				/* reusing current skb, so it is a drop */
 				dev->stats.rx_dropped++;
 			}
+		} else if (bad) {
+			/* Bad Frame transfer, the skb is reused */
+			dev->stats.rx_dropped++;
 		} else {
+			/* Failed Allocating a new skb. This is rather stupid
+			 * but the current "filled" skb is reused, as if
+			 * transfer failure. One could argue that RX descriptor
+			 * table handling should be divided into cleaning and
+			 * filling as the TX part of the driver
+			 */
 			if (net_ratelimit())
 				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+			/* reusing current skb, so it is a drop */
 			dev->stats.rx_dropped++;
 		}
 
@@ -920,7 +942,9 @@
 
 		wmb();
 		greth_write_bd(&bdp->stat, status);
+		spin_lock_irqsave(&greth->devlock, flags);
 		greth_enable_rx(greth);
+		spin_unlock_irqrestore(&greth->devlock, flags);
 		greth->rx_cur = NEXT_RX(greth->rx_cur);
 	}
 
@@ -932,15 +956,18 @@
 {
 	struct greth_private *greth;
 	int work_done = 0;
+	unsigned long flags;
+	u32 mask, ctrl;
 	greth = container_of(napi, struct greth_private, napi);
 
-	if (greth->gbit_mac) {
-		greth_clean_tx_gbit(greth->netdev);
-	} else {
-		greth_clean_tx(greth->netdev);
+restart_txrx_poll:
+	if (netif_queue_stopped(greth->netdev)) {
+		if (greth->gbit_mac)
+			greth_clean_tx_gbit(greth->netdev);
+		else
+			greth_clean_tx(greth->netdev);
 	}
 
-restart_poll:
 	if (greth->gbit_mac) {
 		work_done += greth_rx_gbit(greth->netdev, budget - work_done);
 	} else {
@@ -949,15 +976,29 @@
 
 	if (work_done < budget) {
 
-		napi_complete(napi);
+		spin_lock_irqsave(&greth->devlock, flags);
 
-		if (greth_pending_packets(greth)) {
-			napi_reschedule(napi);
-			goto restart_poll;
+		ctrl = GRETH_REGLOAD(greth->regs->control);
+		if (netif_queue_stopped(greth->netdev)) {
+			GRETH_REGSAVE(greth->regs->control,
+					ctrl | GRETH_TXI | GRETH_RXI);
+			mask = GRETH_INT_RX | GRETH_INT_RE |
+			       GRETH_INT_TX | GRETH_INT_TE;
+		} else {
+			GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
+			mask = GRETH_INT_RX | GRETH_INT_RE;
+		}
+
+		if (GRETH_REGLOAD(greth->regs->status) & mask) {
+			GRETH_REGSAVE(greth->regs->control, ctrl);
+			spin_unlock_irqrestore(&greth->devlock, flags);
+			goto restart_txrx_poll;
+		} else {
+			__napi_complete(napi);
+			spin_unlock_irqrestore(&greth->devlock, flags);
 		}
 	}
 
-	greth_enable_irqs(greth);
 	return work_done;
 }
 
@@ -1152,11 +1193,11 @@
 };
 
 static struct net_device_ops greth_netdev_ops = {
-	.ndo_open = greth_open,
-	.ndo_stop = greth_close,
-	.ndo_start_xmit = greth_start_xmit,
-	.ndo_set_mac_address = greth_set_mac_add,
-	.ndo_validate_addr 	= eth_validate_addr,
+	.ndo_open		= greth_open,
+	.ndo_stop		= greth_close,
+	.ndo_start_xmit		= greth_start_xmit,
+	.ndo_set_mac_address	= greth_set_mac_add,
+	.ndo_validate_addr	= eth_validate_addr,
 };
 
 static inline int wait_for_mdio(struct greth_private *greth)
@@ -1217,29 +1258,26 @@
 	struct greth_private *greth = netdev_priv(dev);
 	struct phy_device *phydev = greth->phy;
 	unsigned long flags;
-
 	int status_change = 0;
+	u32 ctrl;
 
 	spin_lock_irqsave(&greth->devlock, flags);
 
 	if (phydev->link) {
 
 		if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
-
-			GRETH_REGANDIN(greth->regs->control,
-				       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+			ctrl = GRETH_REGLOAD(greth->regs->control) &
+			       ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
 
 			if (phydev->duplex)
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
+				ctrl |= GRETH_CTRL_FD;
 
-			if (phydev->speed == SPEED_100) {
-
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
-			}
-
+			if (phydev->speed == SPEED_100)
+				ctrl |= GRETH_CTRL_SP;
 			else if (phydev->speed == SPEED_1000)
-				GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+				ctrl |= GRETH_CTRL_GB;
 
+			GRETH_REGSAVE(greth->regs->control, ctrl);
 			greth->speed = phydev->speed;
 			greth->duplex = phydev->duplex;
 			status_change = 1;
@@ -1600,6 +1638,9 @@
 	{
 	 .name = "GAISLER_ETHMAC",
 	 },
+	{
+	 .name = "01_01d",
+	 },
 	{},
 };
 
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index 03ad903..be0f206 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -23,6 +23,7 @@
 #define GRETH_BD_LEN 0x7FF
 
 #define GRETH_TXEN 0x1
+#define GRETH_INT_TE 0x2
 #define GRETH_INT_TX 0x8
 #define GRETH_TXI 0x4
 #define GRETH_TXBD_STATUS 0x0001C000
@@ -35,6 +36,7 @@
 #define GRETH_TXBD_ERR_UE 0x4000
 #define GRETH_TXBD_ERR_AL 0x8000
 
+#define GRETH_INT_RE         0x1
 #define GRETH_INT_RX         0x4
 #define GRETH_RXEN           0x2
 #define GRETH_RXI            0x8
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0..7d9ced0 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@
 	while (p) {
 		if (p->bitrate == bitrate) {
 			memcpy(p->bits, bits, YAM_FPGA_SIZE);
-			return p->bits;
+			goto out;
 		}
 		p = p->next;
 	}
@@ -411,7 +411,7 @@
 	p->bitrate = bitrate;
 	p->next = yam_data;
 	yam_data = p;
-
+ out:
 	release_firmware(fw);
 	return p->bits;
 }
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 8f11d29..6d9275c 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1279,7 +1279,7 @@
 	netif_carrier_off(dev->ndev);
 	smp_rmb();
 	if (dev->link_polling) {
-		cancel_rearming_delayed_work(&dev->link_work);
+		cancel_delayed_work_sync(&dev->link_work);
 		if (dev->link_polling)
 			schedule_delayed_work(&dev->link_work,  PHY_POLL_LINK_OFF);
 	}
@@ -1294,7 +1294,7 @@
 
 	if (dev->phy.address >= 0) {
 		dev->link_polling = 0;
-		cancel_rearming_delayed_work(&dev->link_work);
+		cancel_delayed_work_sync(&dev->link_work);
 	}
 	mutex_lock(&dev->link_lock);
 	emac_netif_stop(dev);
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index b54a6f0..e3b285a 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,6 +26,8 @@
 #include <asm/cacheflush.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
+#include <mach/bfin_serial_5xx.h>
+#undef DRIVER_NAME
 
 #ifdef CONFIG_SIR_BFIN_DMA
 struct dma_rx_buf {
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 4dc39e5..77fcf44 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -30,7 +30,7 @@
  *     or the type-DO IR port.
  *
  * IrDA chip set list from Toshiba Computer Engineering Corp.
- * model			method	maker	controler		Version 
+ * model			method	maker	controller		Version 
  * Portege 320CT	FIR,SIR Toshiba Oboe(Triangle) 
  * Portege 3010CT	FIR,SIR Toshiba Oboe(Sydney) 
  * Portege 3015CT	FIR,SIR Toshiba Oboe(Sydney) 
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8..3b8c924 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@
 extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+				   struct ixgbe_ring *);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@@ -524,26 +526,13 @@
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                                 struct ixgbe_atr_input *input,
+						 union ixgbe_atr_hash_dword input,
+						 union ixgbe_atr_hash_dword common,
                                                  u8 queue);
 extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_atr_input *input,
+                                      union ixgbe_atr_input *input,
                                       struct ixgbe_atr_input_masks *input_masks,
                                       u16 soft_id, u8 queue);
-extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
-                                       u16 vlan_id);
-extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 src_addr);
-extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 dst_addr);
-extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
-                                        u16 src_port);
-extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
-                                        u16 dst_port);
-extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
-                                         u16 flex_byte);
-extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
-                                      u8 l4type);
 extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
                                    struct ixgbe_ring *ring);
 extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index bfd3c22..a21f581 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1003,7 +1003,7 @@
 		udelay(10);
 	}
 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
-		hw_dbg(hw ,"Flow Director previous command isn't complete, "
+		hw_dbg(hw, "Flow Director previous command isn't complete, "
 		       "aborting table re-initialization.\n");
 		return IXGBE_ERR_FDIR_REINIT_FAILED;
 	}
@@ -1079,7 +1079,7 @@
 
 	/*
 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
-	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * initialized to zero for non DCB mode otherwise actual total RX PB
 	 * would be bigger than programmed and filter space would run into
 	 * the PB 0 region.
 	 */
@@ -1113,13 +1113,10 @@
 	/* Move the flexible bytes to use the ethertype - shift 6 words */
 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
 
-	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
 
 	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
 
 	/*
 	 * Poll init-done after we write the register.  Estimated times:
@@ -1170,7 +1167,7 @@
 
 	/*
 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
-	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * initialized to zero for non DCB mode otherwise actual total RX PB
 	 * would be bigger than programmed and filter space would run into
 	 * the PB 0 region.
 	 */
@@ -1209,10 +1206,8 @@
 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
 
 	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
 
 	/*
 	 * Poll init-done after we write the register.  Estimated times:
@@ -1251,8 +1246,8 @@
  *  @stream: input bitstream to compute the hash on
  *  @key: 32-bit hash key
  **/
-static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
-                                        u32 key)
+static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+					u32 key)
 {
 	/*
 	 * The algorithm is as follows:
@@ -1272,410 +1267,250 @@
 	 *    To simplify for programming, the algorithm is implemented
 	 *    in software this way:
 	 *
-	 *    Key[31:0], Stream[335:0]
+	 *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
 	 *
-	 *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
-	 *    int_key[350:0] = tmp_key[351:1]
-	 *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+	 *    for (i = 0; i < 352; i+=32)
+	 *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
 	 *
-	 *    hash[15:0] = 0;
-	 *    for (i = 0; i < 351; i++) {
-	 *        if (int_key[i])
-	 *            hash ^= int_stream[(i + 15):i];
+	 *    lo_hash_dword[15:0]  ^= Stream[15:0];
+	 *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
+	 *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+	 *
+	 *    hi_hash_dword[31:0]  ^= Stream[351:320];
+	 *
+	 *    if(key[0])
+	 *        hash[15:0] ^= Stream[15:0];
+	 *
+	 *    for (i = 0; i < 16; i++) {
+	 *        if (key[i])
+	 *            hash[15:0] ^= lo_hash_dword[(i+15):i];
+	 *        if (key[i + 16])
+	 *            hash[15:0] ^= hi_hash_dword[(i+15):i];
 	 *    }
+	 *
 	 */
+	__be32 common_hash_dword = 0;
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 hash_result = 0;
+	u8 i;
 
-	union {
-		u64    fill[6];
-		u32    key[11];
-		u8     key_stream[44];
-	} tmp_key;
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
 
-	u8   *stream = (u8 *)atr_input;
-	u8   int_key[44];      /* upper-most bit unused */
-	u8   hash_str[46];     /* upper-most 2 bits unused */
-	u16  hash_result = 0;
-	int  i, j, k, h;
+	/* generate common hash dword */
+	for (i = 10; i; i -= 2)
+		common_hash_dword ^= atr_input->dword_stream[i] ^
+				     atr_input->dword_stream[i - 1];
+
+	hi_hash_dword = ntohl(common_hash_dword);
+
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+	/* Process bits 0 and 16 */
+	if (key & 0x0001) hash_result ^= lo_hash_dword;
+	if (key & 0x00010000) hash_result ^= hi_hash_dword;
 
 	/*
-	 * Initialize the fill member to prevent warnings
-	 * on some compilers
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the vlan until after bit 0 was processed
 	 */
-	 tmp_key.fill[0] = 0;
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 
-	/* First load the temporary key stream */
-	for (i = 0; i < 6; i++) {
-		u64 fillkey = ((u64)key << 32) | key;
-		tmp_key.fill[i] = fillkey;
+
+	/* process the remaining 30 bits in the key 2 bits at a time */
+	for (i = 15; i; i-- ) {
+		if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+		if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
 	}
 
-	/*
-	 * Set the interim key for the hashing.  Bit 352 is unused, so we must
-	 * shift and compensate when building the key.
-	 */
-
-	int_key[0] = tmp_key.key_stream[0] >> 1;
-	for (i = 1, j = 0; i < 44; i++) {
-		unsigned int this_key = tmp_key.key_stream[j] << 7;
-		j++;
-		int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
-	}
-
-	/*
-	 * Set the interim bit string for the hashing.  Bits 368 and 367 are
-	 * unused, so shift and compensate when building the string.
-	 */
-	hash_str[0] = (stream[40] & 0x7f) >> 1;
-	for (i = 1, j = 40; i < 46; i++) {
-		unsigned int this_str = stream[j] << 7;
-		j++;
-		if (j > 41)
-			j = 0;
-		hash_str[i] = (u8)(this_str | (stream[j] >> 1));
-	}
-
-	/*
-	 * Now compute the hash.  i is the index into hash_str, j is into our
-	 * key stream, k is counting the number of bits, and h interates within
-	 * each byte.
-	 */
-	for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
-		for (h = 0; h < 8 && k < 351; h++, k++) {
-			if (int_key[j] & (1 << h)) {
-				/*
-				 * Key bit is set, XOR in the current 16-bit
-				 * string.  Example of processing:
-				 *    h = 0,
-				 *      tmp = (hash_str[i - 2] & 0 << 16) |
-				 *            (hash_str[i - 1] & 0xff << 8) |
-				 *            (hash_str[i] & 0xff >> 0)
-				 *      So tmp = hash_str[15 + k:k], since the
-				 *      i + 2 clause rolls off the 16-bit value
-				 *    h = 7,
-				 *      tmp = (hash_str[i - 2] & 0x7f << 9) |
-				 *            (hash_str[i - 1] & 0xff << 1) |
-				 *            (hash_str[i] & 0x80 >> 7)
-				 */
-				int tmp = (hash_str[i] >> h);
-				tmp |= (hash_str[i - 1] << (8 - h));
-				tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
-				             << (16 - h);
-				hash_result ^= (u16)tmp;
-			}
-		}
-	}
-
-	return hash_result;
+	return hash_result & IXGBE_ATR_HASH_MASK;
 }
 
-/**
- *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- *  @input: input stream to modify
- *  @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
-{
-	input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
-	input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
-	return 0;
-}
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+	u32 n = (_n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+		common_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+		bucket_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+		sig_hash ^= lo_hash_dword << (16 - n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+		common_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+		bucket_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+		sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
 
 /**
- *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- *  @input: input stream to modify
- *  @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
-{
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
-	                                               (src_addr >> 16) & 0xff;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
-	                                                (src_addr >> 8) & 0xff;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- *  @input: input stream to modify
- *  @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
-{
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
-	                                               (dst_addr >> 16) & 0xff;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
-	                                                (dst_addr >> 8) & 0xff;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_src_port_82599 - Sets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
- **/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
-{
-	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
-	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- **/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
-{
-	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
-	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
-{
-	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
-	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
-{
-	input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- *  @input: input stream to search
- *  @vlan: the VLAN id to load
- **/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
-{
-	*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
-	*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- *  @input: input stream to search
- *  @src_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 *src_addr)
-{
-	*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- *  @input: input stream to search
- *  @dst_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 *dst_addr)
-{
-	*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- *  @input: input stream to search
- *  @src_addr_1: the first 4 bytes of the IP address to load
- *  @src_addr_2: the second 4 bytes of the IP address to load
- *  @src_addr_3: the third 4 bytes of the IP address to load
- *  @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
-                                        u32 *src_addr_1, u32 *src_addr_2,
-                                        u32 *src_addr_3, u32 *src_addr_4)
-{
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_port_82599 - Gets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
+ *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ *  @stream: input bitstream to compute the hash on
  *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
+ *  This function is almost identical to the function above but contains
+ *  several optomizations such as unwinding all of the loops, letting the
+ *  compiler work out all of the conditional ifs since the keys are static
+ *  defines, and computing two keys at once since the hashed dword stream
+ *  will be the same for both keys.
  **/
-static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
-                                        u16 *src_port)
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+					    union ixgbe_atr_hash_dword common)
 {
-	*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
-	*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
 
-	return 0;
-}
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = ntohl(input.dword);
 
-/**
- *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
- **/
-static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
-                                        u16 *dst_port)
-{
-	*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
-	*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+	/* generate common hash dword */
+	hi_hash_dword = ntohl(common.dword);
 
-	return 0;
-}
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
 
-/**
- *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
-                                         u16 *flex_byte)
-{
-	*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
-	*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
 
-	return 0;
-}
+	/* Process bits 0 and 16 */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
 
-/**
- *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
-                                      u8 *l4type)
-{
-	*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+	/*
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the vlan until after bit 0 was processed
+	 */
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 
-	return 0;
+	/* Process remaining 30 bit of the key */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+	/* combine common_hash result with signature and bucket hashes */
+	bucket_hash ^= common_hash;
+	bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+	sig_hash ^= common_hash << 16;
+	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+	/* return completed signature hash */
+	return sig_hash ^ bucket_hash;
 }
 
 /**
  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
  *  @hw: pointer to hardware structure
- *  @stream: input bitstream
+ *  @input: unique input dword
+ *  @common: compressed common input dword
  *  @queue: queue index to direct traffic to
  **/
 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_atr_input *input,
+                                          union ixgbe_atr_hash_dword input,
+                                          union ixgbe_atr_hash_dword common,
                                           u8 queue)
 {
 	u64  fdirhashcmd;
-	u64  fdircmd;
-	u32  fdirhash;
-	u16  bucket_hash, sig_hash;
-	u8   l4type;
+	u32  fdircmd;
 
-	bucket_hash = ixgbe_atr_compute_hash_82599(input,
-	                                           IXGBE_ATR_BUCKET_HASH_KEY);
+	/*
+	 * Get the flow_type in order to program FDIRCMD properly
+	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+	 */
+	switch (input.formatted.flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+	case IXGBE_ATR_FLOW_TYPE_TCPV6:
+	case IXGBE_ATR_FLOW_TYPE_UDPV6:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+		break;
+	default:
+		hw_dbg(hw, " Error on flow type input\n");
+		return IXGBE_ERR_CONFIG;
+	}
 
-	/* bucket_hash is only 15 bits */
-	bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-	sig_hash = ixgbe_atr_compute_hash_82599(input,
-	                                        IXGBE_ATR_SIGNATURE_HASH_KEY);
-
-	/* Get the l4type in order to program FDIRCMD properly */
-	/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
-	ixgbe_atr_get_l4type_82599(input, &l4type);
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+	          IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 
 	/*
 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
 	 */
-	fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-	fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-	           IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-		break;
-	case IXGBE_ATR_L4TYPE_SCTP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
-		break;
-	default:
-		hw_dbg(hw, "Error on l4type input\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
-		fdircmd |= IXGBE_FDIRCMD_IPV6;
-
-	fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
-	fdirhashcmd = ((fdircmd << 32) | fdirhash);
+	fdirhashcmd = (u64)fdircmd << 32;
+	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
 
 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
 
+	hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
 	return 0;
 }
 
 /**
+ *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ *  @input_mask: mask to be bit swapped
+ *
+ *  The source and destination port masks for flow director are bit swapped
+ *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
+ *  generate a correctly swapped value we need to bit swap the mask and that
+ *  is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+	u32 mask = ntohs(input_masks->dst_port_mask);
+	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+	mask |= ntohs(input_masks->src_port_mask);
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian.  As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+	(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+/**
  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
  *  @hw: pointer to hardware structure
  *  @input: input bitstream
@@ -1687,135 +1522,139 @@
  *  hardware writes must be protected from one another.
  **/
 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_atr_input *input,
+                                      union ixgbe_atr_input *input,
                                       struct ixgbe_atr_input_masks *input_masks,
                                       u16 soft_id, u8 queue)
 {
-	u32 fdircmd = 0;
 	u32 fdirhash;
-	u32 src_ipv4 = 0, dst_ipv4 = 0;
-	u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
-	u16 src_port, dst_port, vlan_id, flex_bytes;
-	u16 bucket_hash;
-	u8  l4type;
-	u8  fdirm = 0;
-
-	/* Get our input values */
-	ixgbe_atr_get_l4type_82599(input, &l4type);
+	u32 fdircmd;
+	u32 fdirport, fdirtcpm;
+	u32 fdirvlan;
+	/* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+	u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+		    IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 
 	/*
-	 * Check l4type formatting, and bail out before we touch the hardware
+	 * Check flow_type formatting, and bail out before we touch the hardware
 	 * if there's a configuration issue
 	 */
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-		break;
-	case IXGBE_ATR_L4TYPE_SCTP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+	switch (input->formatted.flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_IPV4:
+		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+		fdirm |= IXGBE_FDIRM_L4P;
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+		if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+			hw_dbg(hw, " Error on src/dst port mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
 		break;
 	default:
-		hw_dbg(hw, "Error on l4type input\n");
+		hw_dbg(hw, " Error on flow type input\n");
 		return IXGBE_ERR_CONFIG;
 	}
 
-	bucket_hash = ixgbe_atr_compute_hash_82599(input,
-	                                           IXGBE_ATR_BUCKET_HASH_KEY);
-
-	/* bucket_hash is only 15 bits */
-	bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-	ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
-	ixgbe_atr_get_src_port_82599(input, &src_port);
-	ixgbe_atr_get_dst_port_82599(input, &dst_port);
-	ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
-	fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-	/* Now figure out if we're IPv4 or IPv6 */
-	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
-		/* IPv6 */
-		ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
-	                                     &src_ipv6_3, &src_ipv6_4);
-
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
-		/* The last 4 bytes is the same register as IPv4 */
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
-		fdircmd |= IXGBE_FDIRCMD_IPV6;
-		fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
-	} else {
-		/* IPv4 */
-		ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-	}
-
-	ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
-	                            (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
-	              (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
 	/*
-	 * Program the relevant mask registers.  L4type cannot be
-	 * masked out in this implementation.
+	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+	 * are zero, then assume a full mask for that field.  Also assume that
+	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+	 * cannot be masked out in this implementation.
 	 *
 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
 	 * point in time.
 	 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
 
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
-				(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
-				 (input_masks->dst_port_mask << 16)));
+	/* Program FDIRM */
+	switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
+	case 0xEFFF:
+		/* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+		fdirm &= ~IXGBE_FDIRM_VLANID;
+	case 0xE000:
+		/* Unmask VLAN prio - bit 1 */
+		fdirm &= ~IXGBE_FDIRM_VLANP;
 		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
-				(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
-				 (input_masks->src_port_mask << 16)));
+	case 0x0FFF:
+		/* Unmask VLAN ID - bit 0 */
+		fdirm &= ~IXGBE_FDIRM_VLANID;
+		break;
+	case 0x0000:
+		/* do nothing, vlans already masked */
 		break;
 	default:
-		/* this already would have failed above */
-		break;
+		hw_dbg(hw, " Error on VLAN mask\n");
+		return IXGBE_ERR_CONFIG;
 	}
 
-	/* Program the last mask register, FDIRM */
-	if (input_masks->vlan_id_mask)
-		/* Mask both VLAN and VLANP - bits 0 and 1 */
-		fdirm |= 0x3;
-
-	if (input_masks->data_mask)
-		/* Flex bytes need masking, so mask the whole thing - bit 4 */
-		fdirm |= 0x10;
+	if (input_masks->flex_mask & 0xFFFF) {
+		if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+			hw_dbg(hw, " Error on flexible byte mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+		/* Unmask Flex Bytes - bit 4 */
+		fdirm &= ~IXGBE_FDIRM_FLEX;
+	}
 
 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
-	fdirm |= 0x24;
-
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
-	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
-	fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
-	fdircmd |= IXGBE_FDIRCMD_LAST;
-	fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	/* store the TCP/UDP port masks, bit reversed from port layout */
+	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+	/* write both the same so that UDP and TCP use the same mask */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+	/* store source and destination IP masks (big-enian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+			     ~input_masks->src_ip_mask[0]);
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+			     ~input_masks->dst_ip_mask[0]);
+
+	/* Apply masks to input data */
+	input->formatted.vlan_id &= input_masks->vlan_id_mask;
+	input->formatted.flex_bytes &= input_masks->flex_mask;
+	input->formatted.src_port &= input_masks->src_port_mask;
+	input->formatted.dst_port &= input_masks->dst_port_mask;
+	input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+	input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+	/* record vlan (little-endian) and flex_bytes(big-endian) */
+	fdirvlan =
+		IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
+	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+	fdirvlan |= ntohs(input->formatted.vlan_id);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+	/* record source and destination port (little-endian)*/
+	fdirport = ntohs(input->formatted.dst_port);
+	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+	fdirport |= ntohs(input->formatted.src_port);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+	/* record the first 32 bits of the destination address (big-endian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+	/* record the source address (big-endian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+	/* we only want the bucket hash so drop the upper 16 bits */
+	fdirhash = ixgbe_atr_compute_hash_82599(input,
+						IXGBE_ATR_BUCKET_HASH_KEY);
+	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
 
 	return 0;
 }
+
 /**
  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e..2002ea8 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@
 	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	reg_ctl &= ~IXGBE_RXCTRL_RXEN;
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
-	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
-	reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
-	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
+	ixgbe_disable_rx_queue(adapter, rx_ring);
 
 	/* now Tx */
 	reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@@ -2279,10 +2277,11 @@
                                struct ethtool_rx_ntuple *cmd)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
-	struct ixgbe_atr_input input_struct;
+	struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
+	union ixgbe_atr_input input_struct;
 	struct ixgbe_atr_input_masks input_masks;
 	int target_queue;
+	int err;
 
 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 		return -EOPNOTSUPP;
@@ -2291,67 +2290,122 @@
 	 * Don't allow programming if the action is a queue greater than
 	 * the number of online Tx queues.
 	 */
-	if ((fs.action >= adapter->num_tx_queues) ||
-	    (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+	if ((fs->action >= adapter->num_tx_queues) ||
+	    (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
 		return -EINVAL;
 
-	memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+	memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
 	memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
 
-	input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
-	input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
-	input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
-	input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
-	input_masks.vlan_id_mask = fs.vlan_tag_mask;
-	/* only use the lowest 2 bytes for flex bytes */
-	input_masks.data_mask = (fs.data_mask & 0xffff);
-
-	switch (fs.flow_type) {
+	/* record flow type */
+	switch (fs->flow_type) {
+	case IPV4_FLOW:
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+		break;
 	case TCP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
 		break;
 	case UDP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
 		break;
 	case SCTP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
 		break;
 	default:
 		return -1;
 	}
 
-	/* Mask bits from the inputs based on user-supplied mask */
-	ixgbe_atr_set_src_ipv4_82599(&input_struct,
-	            (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
-	ixgbe_atr_set_dst_ipv4_82599(&input_struct,
-	            (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
-	/* 82599 expects these to be byte-swapped for perfect filtering */
-	ixgbe_atr_set_src_port_82599(&input_struct,
-	       ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
-	ixgbe_atr_set_dst_port_82599(&input_struct,
-	       ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+	/* copy vlan tag minus the CFI bit */
+	if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
+		input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
+		if (!fs->vlan_tag_mask) {
+			input_masks.vlan_id_mask = htons(0xEFFF);
+		} else {
+			switch (~fs->vlan_tag_mask & 0xEFFF) {
+			/* all of these are valid vlan-mask values */
+			case 0xEFFF:
+			case 0xE000:
+			case 0x0FFF:
+			case 0x0000:
+				input_masks.vlan_id_mask =
+					htons(~fs->vlan_tag_mask);
+				break;
+			/* exit with error if vlan-mask is invalid */
+			default:
+				e_err(drv, "Partial VLAN ID or "
+				      "priority mask in vlan-mask is not "
+				      "supported by hardware\n");
+				return -1;
+			}
+		}
+	}
 
-	/* VLAN and Flex bytes are either completely masked or not */
-	if (!fs.vlan_tag_mask)
-		ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+	/* make sure we only use the first 2 bytes of user data */
+	if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
+		input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
+		if (!(fs->data_mask & 0xFFFF)) {
+			input_masks.flex_mask = 0xFFFF;
+		} else if (~fs->data_mask & 0xFFFF) {
+			e_err(drv, "Partial user-def-mask is not "
+			      "supported by hardware\n");
+			return -1;
+		}
+	}
 
-	if (!input_masks.data_mask)
-		/* make sure we only use the first 2 bytes of user data */
-		ixgbe_atr_set_flex_byte_82599(&input_struct,
-		                              (fs.data & 0xffff));
+	/*
+	 * Copy input into formatted structures
+	 *
+	 * These assignments are based on the following logic
+	 * If neither input or mask are set assume value is masked out.
+	 * If input is set, but mask is not mask should default to accept all.
+	 * If input is not set, but mask is set then mask likely results in 0.
+	 * If input is set and mask is set then assign both.
+	 */
+	if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
+		input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
+		if (!fs->m_u.tcp_ip4_spec.ip4src)
+			input_masks.src_ip_mask[0] = 0xFFFFFFFF;
+		else
+			input_masks.src_ip_mask[0] =
+				~fs->m_u.tcp_ip4_spec.ip4src;
+	}
+	if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
+		input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
+		if (!fs->m_u.tcp_ip4_spec.ip4dst)
+			input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
+		else
+			input_masks.dst_ip_mask[0] =
+				~fs->m_u.tcp_ip4_spec.ip4dst;
+	}
+	if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
+		input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
+		if (!fs->m_u.tcp_ip4_spec.psrc)
+			input_masks.src_port_mask = 0xFFFF;
+		else
+			input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+	}
+	if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
+		input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
+		if (!fs->m_u.tcp_ip4_spec.pdst)
+			input_masks.dst_port_mask = 0xFFFF;
+		else
+			input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+	}
 
 	/* determine if we need to drop or route the packet */
-	if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+	if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
 		target_queue = MAX_RX_QUEUES - 1;
 	else
-		target_queue = fs.action;
+		target_queue = fs->action;
 
 	spin_lock(&adapter->fdir_perfect_lock);
-	ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
-	                                    &input_masks, 0, target_queue);
+	err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
+						  &input_struct,
+						  &input_masks, 0,
+						  target_queue);
 	spin_unlock(&adapter->fdir_perfect_lock);
 
-	return 0;
+	return err ? -1 : 0;
 }
 
 static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3..602078b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3024,6 +3024,36 @@
 	}
 }
 
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+			    struct ixgbe_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+	/* write value back with RXDCTL.ENABLE bit cleared */
+	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+	if (hw->mac.type == ixgbe_mac_82598EB &&
+	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+		return;
+
+	/* the hardware may take up to 100us to really disable the rx queue */
+	do {
+		udelay(10);
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+	if (!wait_loop) {
+		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+		      "the polling period\n", reg_idx);
+	}
+}
+
 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 			     struct ixgbe_ring *ring)
 {
@@ -3034,9 +3064,7 @@
 
 	/* disable queue to avoid issues while updating state */
 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
-	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
-			rxdctl & ~IXGBE_RXDCTL_ENABLE);
-	IXGBE_WRITE_FLUSH(hw);
+	ixgbe_disable_rx_queue(adapter, ring);
 
 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -4064,7 +4092,11 @@
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
-	IXGBE_WRITE_FLUSH(hw);
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		/* this call also flushes the previous write */
+		ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
 	msleep(10);
 
 	netif_tx_stop_all_queues(netdev);
@@ -4789,6 +4821,12 @@
 
 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+	if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
+			      IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+		e_err(probe,
+		      "Flow Director is not supported while multiple "
+		      "queues are disabled.  Disabling Flow Director\n");
+	}
 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 	adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 	adapter->atr_sample_rate = 0;
@@ -5094,16 +5132,11 @@
 		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
 		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
-		if (dev->features & NETIF_F_NTUPLE) {
-			/* Flow Director perfect filter enabled */
-			adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-			adapter->atr_sample_rate = 0;
-			spin_lock_init(&adapter->fdir_perfect_lock);
-		} else {
-			/* Flow Director hash filters enabled */
-			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-			adapter->atr_sample_rate = 20;
-		}
+		/* n-tuple support exists, always init our spinlock */
+		spin_lock_init(&adapter->fdir_perfect_lock);
+		/* Flow Director hash filters enabled */
+		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+		adapter->atr_sample_rate = 20;
 		adapter->ring_feature[RING_F_FDIR].indices =
 							 IXGBE_MAX_FDIR_INDICES;
 		adapter->fdir_pballoc = 0;
@@ -6474,38 +6507,92 @@
 	writel(i, tx_ring->tail);
 }
 
-static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-		      u8 queue, u32 tx_flags, __be16 protocol)
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+		      u32 tx_flags, __be16 protocol)
 {
-	struct ixgbe_atr_input atr_input;
-	struct iphdr *iph = ip_hdr(skb);
-	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct ixgbe_q_vector *q_vector = ring->q_vector;
+	union ixgbe_atr_hash_dword input = { .dword = 0 };
+	union ixgbe_atr_hash_dword common = { .dword = 0 };
+	union {
+		unsigned char *network;
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
 	struct tcphdr *th;
-	u16 vlan_id;
+	__be16 vlan_id;
 
-	/* Right now, we support IPv4 w/ TCP only */
-	if (protocol != htons(ETH_P_IP) ||
-	    iph->protocol != IPPROTO_TCP)
+	/* if ring doesn't have a interrupt vector, cannot perform ATR */
+	if (!q_vector)
 		return;
 
-	memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+	/* do nothing if sampling is disabled */
+	if (!ring->atr_sample_rate)
+		return;
 
-	vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
-		   IXGBE_TX_FLAGS_VLAN_SHIFT;
+	ring->atr_count++;
+
+	/* snag network header to get L4 type and address */
+	hdr.network = skb_network_header(skb);
+
+	/* Currently only IPv4/IPv6 with TCP is supported */
+	if ((protocol != __constant_htons(ETH_P_IPV6) ||
+	     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+	    (protocol != __constant_htons(ETH_P_IP) ||
+	     hdr.ipv4->protocol != IPPROTO_TCP))
+		return;
 
 	th = tcp_hdr(skb);
 
-	ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-	ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
-	ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
-	ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
-	ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
-	/* src and dst are inverted, think how the receiver sees them */
-	ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
-	ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
+	/* skip this packet since the socket is closing */
+	if (th->fin)
+		return;
+
+	/* sample on all syn packets or once every atr sample count */
+	if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+		return;
+
+	/* reset sample count */
+	ring->atr_count = 0;
+
+	vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+	/*
+	 * src and dst are inverted, think how the receiver sees them
+	 *
+	 * The input is broken into two sections, a non-compressed section
+	 * containing vm_pool, vlan_id, and flow_type.  The rest of the data
+	 * is XORed together and stored in the compressed dword.
+	 */
+	input.formatted.vlan_id = vlan_id;
+
+	/*
+	 * since src port and flex bytes occupy the same word XOR them together
+	 * and write the value to source port portion of compressed dword
+	 */
+	if (vlan_id)
+		common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+	else
+		common.port.src ^= th->dest ^ protocol;
+	common.port.dst ^= th->source;
+
+	if (protocol == __constant_htons(ETH_P_IP)) {
+		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+		common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+	} else {
+		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+		common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+			     hdr.ipv6->saddr.s6_addr32[1] ^
+			     hdr.ipv6->saddr.s6_addr32[2] ^
+			     hdr.ipv6->saddr.s6_addr32[3] ^
+			     hdr.ipv6->daddr.s6_addr32[0] ^
+			     hdr.ipv6->daddr.s6_addr32[1] ^
+			     hdr.ipv6->daddr.s6_addr32[2] ^
+			     hdr.ipv6->daddr.s6_addr32[3];
+	}
 
 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
-	ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+					      input, common, ring->queue_index);
 }
 
 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@@ -6580,8 +6667,6 @@
 			  struct ixgbe_adapter *adapter,
 			  struct ixgbe_ring *tx_ring)
 {
-	struct net_device *netdev = tx_ring->netdev;
-	struct netdev_queue *txq;
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	u8 hdr_len = 0;
@@ -6676,19 +6761,8 @@
 	count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
 	if (count) {
 		/* add the ATR filter if ATR is on */
-		if (tx_ring->atr_sample_rate) {
-			++tx_ring->atr_count;
-			if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-			     test_bit(__IXGBE_TX_FDIR_INIT_DONE,
-				      &tx_ring->state)) {
-				ixgbe_atr(adapter, skb, tx_ring->queue_index,
-					  tx_flags, protocol);
-				tx_ring->atr_count = 0;
-			}
-		}
-		txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
-		txq->tx_bytes += skb->len;
-		txq->tx_packets++;
+		if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+			ixgbe_atr(tx_ring, skb, tx_flags, protocol);
 		ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
 		ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
@@ -6846,8 +6920,6 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	int i;
 
-	/* accurate rx/tx bytes/packets stats */
-	dev_txq_stats_fold(netdev, stats);
 	rcu_read_lock();
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
@@ -6864,6 +6936,22 @@
 			stats->rx_bytes   += bytes;
 		}
 	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin_bh(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes   = ring->stats.bytes;
+			} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+			stats->tx_packets += packets;
+			stats->tx_bytes   += bytes;
+		}
+	}
 	rcu_read_unlock();
 	/* following stats updated by ixgbe_watchdog_task() */
 	stats->multicast	= netdev->stats.multicast;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 446f3467..fd3358f 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1947,10 +1947,9 @@
 #define IXGBE_FDIRM_VLANID                      0x00000001
 #define IXGBE_FDIRM_VLANP                       0x00000002
 #define IXGBE_FDIRM_POOL                        0x00000004
-#define IXGBE_FDIRM_L3P                         0x00000008
-#define IXGBE_FDIRM_L4P                         0x00000010
-#define IXGBE_FDIRM_FLEX                        0x00000020
-#define IXGBE_FDIRM_DIPv6                       0x00000040
+#define IXGBE_FDIRM_L4P                         0x00000008
+#define IXGBE_FDIRM_FLEX                        0x00000010
+#define IXGBE_FDIRM_DIPv6                       0x00000020
 
 #define IXGBE_FDIRFREE_FREE_MASK                0xFFFF
 #define IXGBE_FDIRFREE_FREE_SHIFT               0
@@ -1990,6 +1989,7 @@
 #define IXGBE_FDIRCMD_LAST                      0x00000800
 #define IXGBE_FDIRCMD_COLLISION                 0x00001000
 #define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5
 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
 #define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
 #define IXGBE_FDIR_INIT_DONE_POLL               10
@@ -2147,51 +2147,80 @@
 #define FC_LOW_WATER(MTU)  (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
 
 /* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY    0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
+#define IXGBE_ATR_BUCKET_HASH_KEY    0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
 
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET       0
-#define IXGBE_ATR_SRC_IPV6_OFFSET   2
-#define IXGBE_ATR_SRC_IPV4_OFFSET  14
-#define IXGBE_ATR_DST_IPV6_OFFSET  18
-#define IXGBE_ATR_DST_IPV4_OFFSET  30
-#define IXGBE_ATR_SRC_PORT_OFFSET  34
-#define IXGBE_ATR_DST_PORT_OFFSET  36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET   40
-#define IXGBE_ATR_L4TYPE_OFFSET    41
-
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK     0x7fff
 #define IXGBE_ATR_L4TYPE_MASK      0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
 #define IXGBE_ATR_L4TYPE_UDP       0x1
 #define IXGBE_ATR_L4TYPE_TCP       0x2
 #define IXGBE_ATR_L4TYPE_SCTP      0x3
-#define IXGBE_ATR_HASH_MASK     0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+	IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0,
+	IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1,
+	IXGBE_ATR_FLOW_TYPE_TCPV4  = 0x2,
+	IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	IXGBE_ATR_FLOW_TYPE_IPV6   = 0x4,
+	IXGBE_ATR_FLOW_TYPE_UDPV6  = 0x5,
+	IXGBE_ATR_FLOW_TYPE_TCPV6  = 0x6,
+	IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
 
 /* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
-	/* Byte layout in order, all values with MSB first:
+union ixgbe_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
 	 *
+	 * vm_pool    - 1 byte
+	 * flow_type  - 1 byte
 	 * vlan_id    - 2 bytes
 	 * src_ip     - 16 bytes
 	 * dst_ip     - 16 bytes
 	 * src_port   - 2 bytes
 	 * dst_port   - 2 bytes
 	 * flex_bytes - 2 bytes
-	 * vm_pool    - 1 byte
-	 * l4type     - 1 byte
+	 * rsvd0      - 2 bytes - space reserved must be 0.
 	 */
-	u8 byte_stream[42];
+	struct {
+		u8     vm_pool;
+		u8     flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 src_ip[4];
+		__be16 src_port;
+		__be16 dst_port;
+		__be16 flex_bytes;
+		__be16 rsvd0;
+	} formatted;
+	__be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
 };
 
 struct ixgbe_atr_input_masks {
-	u32 src_ip_mask;
-	u32 dst_ip_mask;
-	u16 src_port_mask;
-	u16 dst_port_mask;
-	u16 vlan_id_mask;
-	u16 data_mask;
+	__be16 rsvd0;
+	__be16 vlan_id_mask;
+	__be32 dst_ip_mask[4];
+	__be32 src_ip_mask[4];
+	__be16 src_port_mask;
+	__be16 dst_port_mask;
+	__be16 flex_mask;
 };
 
 enum ixgbe_eeprom_type {
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 183765c..f35554d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -238,7 +238,7 @@
 		goto out;
 	}
 	/* allocate the tx and rx ring buffer descriptors. */
-	/* returns a virtual addres and a physical address. */
+	/* returns a virtual address and a physical address. */
 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 					 &lp->tx_bd_p, GFP_KERNEL);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 21845af..5933621 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -585,7 +585,7 @@
 	rcu_read_lock_bh();
 	vlan = rcu_dereference(q->vlan);
 	if (vlan)
-		netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+		vlan->dev->stats.tx_dropped++;
 	rcu_read_unlock_bh();
 
 	return err;
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f..3a4277f 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@
 	} else {
 		int i;
 
+		buf->direct.buf  = NULL;
 		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 		buf->npages      = buf->nbufs;
 		buf->page_shift  = PAGE_SHIFT;
@@ -229,7 +230,7 @@
 		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 				  buf->direct.map);
 	else {
-		if (BITS_PER_LONG == 64)
+		if (BITS_PER_LONG == 64 && buf->direct.buf)
 			vunmap(buf->direct.buf);
 
 		for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b..897f576 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@
 	int i;
 	int err;
 
-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
+	dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+	    prof->tx_ring_num, prof->rx_ring_num);
 	if (dev == NULL) {
 		mlx4_err(mdev, "Net device allocation failed\n");
 		return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18b..5de1db8 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
 		dev_cap->bf_reg_size = 1 << (field & 0x1f);
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
-		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
-			mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
 			field = 3;
-		}
 		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
 		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
 			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a37fcf1..ea5cfe2 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -3403,9 +3403,7 @@
 		return -EIO;
 	}
 
-	status = pci_restore_state(pdev);
-	if (status)
-		return status;
+	pci_restore_state(pdev);
 
 	status = pci_enable_device(pdev);
 	if (status) {
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2c15891..e953793 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@
 	PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
 	PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
 	PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
+	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
 	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
 	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
 	PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6..a1b82c9 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <asm/uaccess.h>
 #include <asm/string.h>
 
@@ -542,7 +543,7 @@
 	data = ap->tpkt->data;
 	count = ap->tpkt->len;
 	fcs = ap->tfcs;
-	proto = (data[0] << 8) + data[1];
+	proto = get_unaligned_be16(data);
 
 	/*
 	 * LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@
 	code = data[0];
 	if (code != CONFACK && code != CONFREQ)
 		return;
-	dlen = (data[2] << 8) + data[3];
+	dlen = get_unaligned_be16(data + 2);
 	if (len < dlen)
 		return;		/* packet got truncated or length is bogus */
 
@@ -997,15 +998,14 @@
 	while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
 		switch (data[0]) {
 		case LCP_MRU:
-			val = (data[2] << 8) + data[3];
+			val = get_unaligned_be16(data + 2);
 			if (inbound)
 				ap->mru = val;
 			else
 				ap->chan.mtu = val;
 			break;
 		case LCP_ASYNCMAP:
-			val = (data[2] << 24) + (data[3] << 16)
-				+ (data[4] << 8) + data[5];
+			val = get_unaligned_be32(data + 2);
 			if (inbound)
 				ap->raccm = val;
 			else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83..4358330 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
 #include <linux/ppp-comp.h>
 
 #include <linux/zlib.h>
+#include <asm/unaligned.h>
 
 /*
  * State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@
 	 */
 	wptr[0] = PPP_ADDRESS(rptr);
 	wptr[1] = PPP_CONTROL(rptr);
-	wptr[2] = PPP_COMP >> 8;
-	wptr[3] = PPP_COMP;
+	put_unaligned_be16(PPP_COMP, wptr + 2);
 	wptr += PPP_HDRLEN;
-	wptr[0] = state->seqno >> 8;
-	wptr[1] = state->seqno;
+	put_unaligned_be16(state->seqno, wptr);
 	wptr += DEFLATE_OVHD;
 	olen = PPP_HDRLEN + DEFLATE_OVHD;
 	state->strm.next_out = wptr;
@@ -451,7 +450,7 @@
 	}
 
 	/* Check the sequence number. */
-	seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1];
+	seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
 	if (seq != (state->seqno & 0xffff)) {
 		if (state->debug)
 			printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6456484..c7a6c44 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <net/slhc_vj.h>
 #include <asm/atomic.h>
 
@@ -210,7 +211,7 @@
 };
 
 /* Get the PPP protocol number from a skb */
-#define PPP_PROTO(skb)	(((skb)->data[0] << 8) + (skb)->data[1])
+#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
 
 /* We limit the length of ppp->file.rq to this (arbitrary) value */
 #define PPP_MAX_RQLEN	32
@@ -964,8 +965,7 @@
 
 	pp = skb_push(skb, 2);
 	proto = npindex_to_proto[npi];
-	pp[0] = proto >> 8;
-	pp[1] = proto;
+	put_unaligned_be16(proto, pp);
 
 	netif_stop_queue(dev);
 	skb_queue_tail(&ppp->file.xq, skb);
@@ -1473,8 +1473,7 @@
 		q = skb_put(frag, flen + hdrlen);
 
 		/* make the MP header */
-		q[0] = PPP_MP >> 8;
-		q[1] = PPP_MP;
+		put_unaligned_be16(PPP_MP, q);
 		if (ppp->flags & SC_MP_XSHORTSEQ) {
 			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
 			q[3] = ppp->nxseq;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b8..9a1849a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
 #include <linux/ppp_defs.h>
 #include <linux/ppp-comp.h>
 #include <linux/scatterlist.h>
+#include <asm/unaligned.h>
 
 #include "ppp_mppe.h"
 
@@ -395,16 +396,14 @@
 	 */
 	obuf[0] = PPP_ADDRESS(ibuf);
 	obuf[1] = PPP_CONTROL(ibuf);
-	obuf[2] = PPP_COMP >> 8;	/* isize + MPPE_OVHD + 1 */
-	obuf[3] = PPP_COMP;	/* isize + MPPE_OVHD + 2 */
+	put_unaligned_be16(PPP_COMP, obuf + 2);
 	obuf += PPP_HDRLEN;
 
 	state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
 	if (state->debug >= 7)
 		printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
 		       state->ccount);
-	obuf[0] = state->ccount >> 8;
-	obuf[1] = state->ccount & 0xff;
+	put_unaligned_be16(state->ccount, obuf);
 
 	if (!state->stateful ||	/* stateless mode     */
 	    ((state->ccount & 0xff) == 0xff) ||	/* "flag" packet      */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3..4e6b72f 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
 #include <linux/completion.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <asm/uaccess.h>
 
 #define PPP_VERSION	"2.4.2"
@@ -563,7 +564,7 @@
 	int islcp;
 
 	data  = skb->data;
-	proto = (data[0] << 8) + data[1];
+	proto = get_unaligned_be16(data);
 
 	/* LCP packets with codes between 1 (configure-request)
 	 * and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9c2a02d..44e316f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -34,8 +34,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 14
-#define QLCNIC_LINUX_VERSIONID  "5.0.14"
+#define _QLCNIC_LINUX_SUBVERSION 15
+#define QLCNIC_LINUX_VERSIONID  "5.0.15"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -289,6 +289,26 @@
 	u32	reserved[5];
 };
 
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION	0x3F1000
+#define QLCNIC_FW_IMAGE_REGION	0x74
+struct qlcnic_flt_header {
+	u16 version;
+	u16 len;
+	u16 checksum;
+	u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+	u8 region;
+	u8 reserved0;
+	u8 attrib;
+	u8 reserved1;
+	u32 size;
+	u32 start_addr;
+	u32 end_add;
+};
+
 /* Magic number to let user know flash is programmed */
 #define	QLCNIC_BDINFO_MAGIC 0x12345678
 
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 1e7af70..4c14510 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -672,7 +672,7 @@
 	if (data[1])
 		eth_test->flags |= ETH_TEST_FL_FAILED;
 
-	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+	if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
 		data[2] = qlcnic_irq_test(dev);
 		if (data[2])
 			eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9b9c7c3..a7f1d5b 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -627,12 +627,73 @@
 	return 0;
 }
 
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+				struct qlcnic_flt_entry *region_entry)
+{
+	struct qlcnic_flt_header flt_hdr;
+	struct qlcnic_flt_entry *flt_entry;
+	int i = 0, ret;
+	u32 entry_size;
+
+	memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+	ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+					 (u8 *)&flt_hdr,
+					 sizeof(struct qlcnic_flt_header));
+	if (ret) {
+		dev_warn(&adapter->pdev->dev,
+			 "error reading flash layout header\n");
+		return -EIO;
+	}
+
+	entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+	flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
+	if (flt_entry == NULL) {
+		dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+		return -EIO;
+	}
+
+	ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+					 sizeof(struct qlcnic_flt_header),
+					 (u8 *)flt_entry, entry_size);
+	if (ret) {
+		dev_warn(&adapter->pdev->dev,
+			 "error reading flash layout entries\n");
+		goto err_out;
+	}
+
+	while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+		if (flt_entry[i].region == region)
+			break;
+		i++;
+	}
+	if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+		dev_warn(&adapter->pdev->dev,
+			 "region=%x not found in %d regions\n", region, i);
+		ret = -EIO;
+		goto err_out;
+	}
+	memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+	vfree(flt_entry);
+	return ret;
+}
+
 int
 qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_flt_entry fw_entry;
 	u32 ver = -1, min_ver;
+	int ret;
 
-	qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+	ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
+	if (!ret)
+		/* 0-4:-signature,  4-8:-fw version */
+		qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+				     (int *)&ver);
+	else
+		qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+				     (int *)&ver);
 
 	ver = QLCNIC_DECODE_VERSION(ver);
 	min_ver = QLCNIC_MIN_FW_VERSION;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 11e3a46..37c04b4 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -31,15 +31,15 @@
 
 static struct workqueue_struct *qlcnic_wq;
 static int qlcnic_mac_learn;
-module_param(qlcnic_mac_learn, int, 0644);
+module_param(qlcnic_mac_learn, int, 0444);
 MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
 
 static int use_msi = 1;
-module_param(use_msi, int, 0644);
+module_param(use_msi, int, 0444);
 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
 
 static int use_msi_x = 1;
-module_param(use_msi_x, int, 0644);
+module_param(use_msi_x, int, 0444);
 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
 
 static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -47,11 +47,11 @@
 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
 
 static int load_fw_file;
-module_param(load_fw_file, int, 0644);
+module_param(load_fw_file, int, 0444);
 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
 
 static int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0644);
+module_param(qlcnic_config_npars, int, 0444);
 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
 
 static int __devinit qlcnic_probe(struct pci_dev *pdev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 27a7c20..bde7d61 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -554,6 +554,8 @@
 	struct mii_if_info mii;
 	struct rtl8169_counters counters;
 	u32 saved_wolopts;
+
+	const struct firmware *fw;
 };
 
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -1632,42 +1634,163 @@
 {
 	__le32 *phytable = (__le32 *)fw->data;
 	struct net_device *dev = tp->dev;
-	size_t i;
+	size_t index, fw_size = fw->size / sizeof(*phytable);
+	u32 predata, count;
 
 	if (fw->size % sizeof(*phytable)) {
 		netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
 		return;
 	}
 
-	for (i = 0; i < fw->size / sizeof(*phytable); i++) {
-		u32 action = le32_to_cpu(phytable[i]);
+	for (index = 0; index < fw_size; index++) {
+		u32 action = le32_to_cpu(phytable[index]);
+		u32 regno = (action & 0x0fff0000) >> 16;
 
-		if (!action)
+		switch(action & 0xf0000000) {
+		case PHY_READ:
+		case PHY_DATA_OR:
+		case PHY_DATA_AND:
+		case PHY_READ_EFUSE:
+		case PHY_CLEAR_READCOUNT:
+		case PHY_WRITE:
+		case PHY_WRITE_PREVIOUS:
+		case PHY_DELAY_MS:
 			break;
 
-		if ((action & 0xf0000000) != PHY_WRITE) {
-			netif_err(tp, probe, dev,
-				  "unknown action 0x%08x\n", action);
+		case PHY_BJMPN:
+			if (regno > index) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+		case PHY_READCOUNT_EQ_SKIP:
+			if (index + 2 >= fw_size) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+		case PHY_COMP_EQ_SKIPN:
+		case PHY_COMP_NEQ_SKIPN:
+		case PHY_SKIPN:
+			if (index + 1 + regno >= fw_size) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+
+		case PHY_READ_MAC_BYTE:
+		case PHY_WRITE_MAC_BYTE:
+		case PHY_WRITE_ERI_WORD:
+		default:
+			netif_err(tp, probe, tp->dev,
+				  "Invalid action 0x%08x\n", action);
 			return;
 		}
 	}
 
-	while (i-- != 0) {
-		u32 action = le32_to_cpu(*phytable);
+	predata = 0;
+	count = 0;
+
+	for (index = 0; index < fw_size; ) {
+		u32 action = le32_to_cpu(phytable[index]);
 		u32 data = action & 0x0000ffff;
-		u32 reg = (action & 0x0fff0000) >> 16;
+		u32 regno = (action & 0x0fff0000) >> 16;
+
+		if (!action)
+			break;
 
 		switch(action & 0xf0000000) {
-		case PHY_WRITE:
-			rtl_writephy(tp, reg, data);
-			phytable++;
+		case PHY_READ:
+			predata = rtl_readphy(tp, regno);
+			count++;
+			index++;
 			break;
+		case PHY_DATA_OR:
+			predata |= data;
+			index++;
+			break;
+		case PHY_DATA_AND:
+			predata &= data;
+			index++;
+			break;
+		case PHY_BJMPN:
+			index -= regno;
+			break;
+		case PHY_READ_EFUSE:
+			predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+			index++;
+			break;
+		case PHY_CLEAR_READCOUNT:
+			count = 0;
+			index++;
+			break;
+		case PHY_WRITE:
+			rtl_writephy(tp, regno, data);
+			index++;
+			break;
+		case PHY_READCOUNT_EQ_SKIP:
+			if (count == data)
+				index += 2;
+			else
+				index += 1;
+			break;
+		case PHY_COMP_EQ_SKIPN:
+			if (predata == data)
+				index += regno;
+			index++;
+			break;
+		case PHY_COMP_NEQ_SKIPN:
+			if (predata != data)
+				index += regno;
+			index++;
+			break;
+		case PHY_WRITE_PREVIOUS:
+			rtl_writephy(tp, regno, predata);
+			index++;
+			break;
+		case PHY_SKIPN:
+			index += regno + 1;
+			break;
+		case PHY_DELAY_MS:
+			mdelay(data);
+			index++;
+			break;
+
+		case PHY_READ_MAC_BYTE:
+		case PHY_WRITE_MAC_BYTE:
+		case PHY_WRITE_ERI_WORD:
 		default:
 			BUG();
 		}
 	}
 }
 
+static void rtl_release_firmware(struct rtl8169_private *tp)
+{
+	release_firmware(tp->fw);
+	tp->fw = NULL;
+}
+
+static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+{
+	const struct firmware **fw = &tp->fw;
+	int rc = !*fw;
+
+	if (rc) {
+		rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
+		if (rc < 0)
+			goto out;
+	}
+
+	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
+	rtl_phy_write_fw(tp, *fw);
+out:
+	return rc;
+}
+
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
 {
 	static const struct phy_reg phy_reg_init[] = {
@@ -2041,7 +2164,6 @@
 		{ 0x0d, 0xf880 }
 	};
 	void __iomem *ioaddr = tp->mmio_addr;
-	const struct firmware *fw;
 
 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 
@@ -2105,11 +2227,8 @@
 
 	rtl_writephy(tp, 0x1f, 0x0005);
 	rtl_writephy(tp, 0x05, 0x001b);
-	if (rtl_readphy(tp, 0x06) == 0xbf00 &&
-	    request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) {
-		rtl_phy_write_fw(tp, fw);
-		release_firmware(fw);
-	} else {
+	if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
+	    (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
 		netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
 	}
 
@@ -2159,7 +2278,6 @@
 		{ 0x0d, 0xf880 }
 	};
 	void __iomem *ioaddr = tp->mmio_addr;
-	const struct firmware *fw;
 
 	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
 
@@ -2214,11 +2332,8 @@
 
 	rtl_writephy(tp, 0x1f, 0x0005);
 	rtl_writephy(tp, 0x05, 0x001b);
-	if (rtl_readphy(tp, 0x06) == 0xb300 &&
-	    request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) {
-		rtl_phy_write_fw(tp, fw);
-		release_firmware(fw);
-	} else {
+	if ((rtl_readphy(tp, 0x06) != 0xb300) ||
+	    (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
 		netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
 	}
 
@@ -3069,15 +3184,6 @@
 		rtl8168_driver_start(tp);
 	}
 
-	rtl8169_init_phy(dev, tp);
-
-	/*
-	 * Pretend we are using VLANs; This bypasses a nasty bug where
-	 * Interrupts stop flowing on high load on 8110SCd controllers.
-	 */
-	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
-
 	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
 
 	if (pci_dev_run_wake(pdev))
@@ -3111,6 +3217,8 @@
 
 	cancel_delayed_work_sync(&tp->task);
 
+	rtl_release_firmware(tp);
+
 	unregister_netdev(dev);
 
 	if (pci_dev_run_wake(pdev))
@@ -3127,6 +3235,7 @@
 static int rtl8169_open(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
 	struct pci_dev *pdev = tp->pci_dev;
 	int retval = -ENOMEM;
 
@@ -3162,6 +3271,15 @@
 
 	napi_enable(&tp->napi);
 
+	rtl8169_init_phy(dev, tp);
+
+	/*
+	 * Pretend we are using VLANs; This bypasses a nasty bug where
+	 * Interrupts stop flowing on high load on 8110SCd controllers.
+	 */
+	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+
 	rtl_pll_power_up(tp);
 
 	rtl_hw_start(dev);
@@ -3171,7 +3289,7 @@
 	tp->saved_wolopts = 0;
 	pm_runtime_put_noidle(&pdev->dev);
 
-	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+	rtl8169_check_link_status(dev, tp, ioaddr);
 out:
 	return retval;
 
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 711449c..002bac7 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1153,6 +1153,9 @@
 	int count;
 	int cpu;
 
+	if (rss_cpus)
+		return rss_cpus;
+
 	if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
 		printk(KERN_WARNING
 		       "sfc: RSS disabled due to allocation failure\n");
@@ -1266,27 +1269,18 @@
 	efx->legacy_irq = 0;
 }
 
-struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
-	unsigned tx_channel_offset =
-		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
-			    type >= EFX_TXQ_TYPES);
-	return &efx->channel[tx_channel_offset + index]->tx_queue[type];
-}
-
 static void efx_set_channels(struct efx_nic *efx)
 {
 	struct efx_channel *channel;
 	struct efx_tx_queue *tx_queue;
-	unsigned tx_channel_offset =
+
+	efx->tx_channel_offset =
 		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
 
 	/* Channel pointers were set in efx_init_struct() but we now
 	 * need to clear them for TX queues in any RX-only channels. */
 	efx_for_each_channel(channel, efx) {
-		if (channel->channel - tx_channel_offset >=
+		if (channel->channel - efx->tx_channel_offset >=
 		    efx->n_tx_channels) {
 			efx_for_each_channel_tx_queue(tx_queue, channel)
 				tx_queue->channel = NULL;
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 70e4f7d..61ddd2c 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1107,22 +1107,9 @@
 
 	/* Restore PCI configuration if needed */
 	if (method == RESET_TYPE_WORLD) {
-		if (efx_nic_is_dual_func(efx)) {
-			rc = pci_restore_state(nic_data->pci_dev2);
-			if (rc) {
-				netif_err(efx, drv, efx->net_dev,
-					  "failed to restore PCI config for "
-					  "the secondary function\n");
-				goto fail3;
-			}
-		}
-		rc = pci_restore_state(efx->pci_dev);
-		if (rc) {
-			netif_err(efx, drv, efx->net_dev,
-				  "failed to restore PCI config for the "
-				  "primary function\n");
-			goto fail4;
-		}
+		if (efx_nic_is_dual_func(efx))
+			pci_restore_state(nic_data->pci_dev2);
+		pci_restore_state(efx->pci_dev);
 		netif_dbg(efx, drv, efx->net_dev,
 			  "successfully restored PCI config\n");
 	}
@@ -1133,7 +1120,7 @@
 		rc = -ETIMEDOUT;
 		netif_err(efx, hw, efx->net_dev,
 			  "timed out waiting for hardware reset\n");
-		goto fail5;
+		goto fail3;
 	}
 	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
 
@@ -1141,11 +1128,9 @@
 
 	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
 fail2:
-fail3:
 	pci_restore_state(efx->pci_dev);
 fail1:
-fail4:
-fail5:
+fail3:
 	return rc;
 }
 
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index bdce66d..28df866 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -735,6 +735,7 @@
 	unsigned next_buffer_table;
 	unsigned n_channels;
 	unsigned n_rx_channels;
+	unsigned tx_channel_offset;
 	unsigned n_tx_channels;
 	unsigned int rx_buffer_len;
 	unsigned int rx_buffer_order;
@@ -929,8 +930,13 @@
 	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
 		     (_efx)->channel[_channel->channel + 1] : NULL)
 
-extern struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
+{
+	EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
+			    type >= EFX_TXQ_TYPES);
+	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
+}
 
 static inline struct efx_tx_queue *
 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5818368..5976d1d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -36,7 +36,7 @@
    Rev 1.07.06 Nov.  7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
    Rev 1.07.05 Nov.  6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
    Rev 1.07.04 Sep.  6 2000 Lei-Chun Chang added ICS1893 PHY support
-   Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule
+   Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule
    Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
    Rev 1.07    Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
    Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 39996bf..7d85a38 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
 
 #include <asm/irq.h>
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define SKY2_VLAN_TAG_USED 1
-#endif
-
 #include "sky2.h"
 
 #define DRV_NAME		"sky2"
@@ -1326,39 +1322,34 @@
 	return err;
 }
 
-#ifdef SKY2_VLAN_TAG_USED
-static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
-{
-	if (onoff) {
-		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
-			     RX_VLAN_STRIP_ON);
-		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-			     TX_VLAN_TAG_ON);
-	} else {
-		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
-			     RX_VLAN_STRIP_OFF);
-		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-			     TX_VLAN_TAG_OFF);
-	}
-}
+#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
 
-static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+static void sky2_vlan_mode(struct net_device *dev)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
 	u16 port = sky2->port;
 
-	netif_tx_lock_bh(dev);
-	napi_disable(&hw->napi);
+	if (dev->features & NETIF_F_HW_VLAN_RX)
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_ON);
+	else
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_OFF);
 
-	sky2->vlgrp = grp;
-	sky2_set_vlan_mode(hw, port, grp != NULL);
+	dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
+	if (dev->features & NETIF_F_HW_VLAN_TX)
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_ON);
+	else {
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_OFF);
 
-	sky2_read32(hw, B0_Y2_SP_LISR);
-	napi_enable(&hw->napi);
-	netif_tx_unlock_bh(dev);
+		/* Can't do transmit offload of vlan without hw vlan */
+		dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
+					| NETIF_F_ALL_CSUM);
+	}
 }
-#endif
 
 /* Amount of required worst case padding in rx buffer */
 static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
@@ -1635,9 +1626,7 @@
 	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
 			   sky2->tx_ring_size - 1);
 
-#ifdef SKY2_VLAN_TAG_USED
-	sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
-#endif
+	sky2_vlan_mode(sky2->netdev);
 
 	sky2_rx_start(sky2);
 }
@@ -1780,7 +1769,7 @@
 	}
 
 	ctrl = 0;
-#ifdef SKY2_VLAN_TAG_USED
+
 	/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
 	if (vlan_tx_tag_present(skb)) {
 		if (!le) {
@@ -1792,7 +1781,6 @@
 		le->length = cpu_to_be16(vlan_tx_tag_get(skb));
 		ctrl |= INS_VLAN;
 	}
-#endif
 
 	/* Handle TCP checksum offload */
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2432,11 +2420,8 @@
 	struct sk_buff *skb = NULL;
 	u16 count = (status & GMR_FS_LEN) >> 16;
 
-#ifdef SKY2_VLAN_TAG_USED
-	/* Account for vlan tag */
-	if (sky2->vlgrp && (status & GMR_FS_VLAN))
-		count -= VLAN_HLEN;
-#endif
+	if (status & GMR_FS_VLAN)
+		count -= VLAN_HLEN;	/* Account for vlan tag */
 
 	netif_printk(sky2, rx_status, KERN_DEBUG, dev,
 		     "rx slot %u status 0x%x len %d\n",
@@ -2504,17 +2489,9 @@
 static inline void sky2_skb_rx(const struct sky2_port *sky2,
 			       u32 status, struct sk_buff *skb)
 {
-#ifdef SKY2_VLAN_TAG_USED
-	u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
-	if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
-		if (skb->ip_summed == CHECKSUM_NONE)
-			vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
-		else
-			vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
-					 vlan_tag, skb);
-		return;
-	}
-#endif
+	if (status & GMR_FS_VLAN)
+		__vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+
 	if (skb->ip_summed == CHECKSUM_NONE)
 		netif_receive_skb(skb);
 	else
@@ -2631,7 +2608,6 @@
 				goto exit_loop;
 			break;
 
-#ifdef SKY2_VLAN_TAG_USED
 		case OP_RXVLAN:
 			sky2->rx_tag = length;
 			break;
@@ -2639,7 +2615,6 @@
 		case OP_RXCHKSVLAN:
 			sky2->rx_tag = length;
 			/* fall through */
-#endif
 		case OP_RXCHKS:
 			if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
 				sky2_rx_checksum(sky2, status);
@@ -3042,6 +3017,10 @@
 			| SKY2_HW_NEW_LE
 			| SKY2_HW_AUTO_TX_SUM
 			| SKY2_HW_ADV_POWER_CTL;
+
+		/* The workaround for status conflicts VLAN tag detection. */
+		if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+			hw->flags |= SKY2_HW_VLAN_BROKEN;
 		break;
 
 	case CHIP_ID_YUKON_SUPR:
@@ -3411,18 +3390,15 @@
 		u32 modes = SUPPORTED_10baseT_Half
 			| SUPPORTED_10baseT_Full
 			| SUPPORTED_100baseT_Half
-			| SUPPORTED_100baseT_Full
-			| SUPPORTED_Autoneg | SUPPORTED_TP;
+			| SUPPORTED_100baseT_Full;
 
 		if (hw->flags & SKY2_HW_GIGABIT)
 			modes |= SUPPORTED_1000baseT_Half
 				| SUPPORTED_1000baseT_Full;
 		return modes;
 	} else
-		return  SUPPORTED_1000baseT_Half
-			| SUPPORTED_1000baseT_Full
-			| SUPPORTED_Autoneg
-			| SUPPORTED_FIBRE;
+		return SUPPORTED_1000baseT_Half
+			| SUPPORTED_1000baseT_Full;
 }
 
 static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3436,9 +3412,11 @@
 	if (sky2_is_copper(hw)) {
 		ecmd->port = PORT_TP;
 		ecmd->speed = sky2->speed;
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
 	} else {
 		ecmd->speed = SPEED_1000;
 		ecmd->port = PORT_FIBRE;
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
 	}
 
 	ecmd->advertising = sky2->advertising;
@@ -3455,8 +3433,19 @@
 	u32 supported = sky2_supported_modes(hw);
 
 	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		if (ecmd->advertising & ~supported)
+			return -EINVAL;
+
+		if (sky2_is_copper(hw))
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_TP |
+					    ADVERTISED_Autoneg;
+		else
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_FIBRE |
+					    ADVERTISED_Autoneg;
+
 		sky2->flags |= SKY2_FLAG_AUTO_SPEED;
-		ecmd->advertising = supported;
 		sky2->duplex = -1;
 		sky2->speed = -1;
 	} else {
@@ -3500,8 +3489,6 @@
 		sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
 	}
 
-	sky2->advertising = ecmd->advertising;
-
 	if (netif_running(dev)) {
 		sky2_phy_reinit(sky2);
 		sky2_set_multicast(dev);
@@ -4229,15 +4216,28 @@
 static int sky2_set_flags(struct net_device *dev, u32 data)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
-	u32 supported =
-		(sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+	unsigned long old_feat = dev->features;
+	u32 supported = 0;
 	int rc;
 
+	if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
+		supported |= ETH_FLAG_RXHASH;
+
+	if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
+		supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+
+	printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
+	       supported, data);
+
 	rc = ethtool_op_set_flags(dev, data, supported);
 	if (rc)
 		return rc;
 
-	rx_set_rss(dev);
+	if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
+		rx_set_rss(dev);
+
+	if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
+		sky2_vlan_mode(dev);
 
 	return 0;
 }
@@ -4273,6 +4273,7 @@
 	.get_sset_count = sky2_get_sset_count,
 	.get_ethtool_stats = sky2_get_ethtool_stats,
 	.set_flags	= sky2_set_flags,
+	.get_flags	= ethtool_op_get_flags,
 };
 
 #ifdef CONFIG_SKY2_DEBUG
@@ -4554,9 +4555,6 @@
 	.ndo_change_mtu		= sky2_change_mtu,
 	.ndo_tx_timeout		= sky2_tx_timeout,
 	.ndo_get_stats64	= sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= sky2_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= sky2_netpoll,
 #endif
@@ -4572,9 +4570,6 @@
 	.ndo_change_mtu		= sky2_change_mtu,
 	.ndo_tx_timeout		= sky2_tx_timeout,
 	.ndo_get_stats64	= sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= sky2_vlan_rx_register,
-#endif
   },
 };
 
@@ -4625,7 +4620,8 @@
 	sky2->port = port;
 
 	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
-		| NETIF_F_TSO  | NETIF_F_GRO;
+		| NETIF_F_TSO | NETIF_F_GRO;
+
 	if (highmem)
 		dev->features |= NETIF_F_HIGHDMA;
 
@@ -4633,13 +4629,8 @@
 	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
 		dev->features |= NETIF_F_RXHASH;
 
-#ifdef SKY2_VLAN_TAG_USED
-	/* The workaround for FE+ status conflicts with VLAN tag detection. */
-	if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
-	      sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+	if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
 		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-	}
-#endif
 
 	/* read the mac address */
 	memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 80bdc40..6861b0e 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2236,11 +2236,8 @@
 	u16		     rx_pending;
 	u16		     rx_data_size;
 	u16		     rx_nfrags;
-
-#ifdef SKY2_VLAN_TAG_USED
 	u16		     rx_tag;
-	struct vlan_group    *vlgrp;
-#endif
+
 	struct {
 		unsigned long last;
 		u32	mac_rp;
@@ -2284,6 +2281,7 @@
 #define SKY2_HW_AUTO_TX_SUM	0x00000040	/* new IP decode for Tx */
 #define SKY2_HW_ADV_POWER_CTL	0x00000080	/* additional PHY power regs */
 #define SKY2_HW_RSS_BROKEN	0x00000100
+#define SKY2_HW_VLAN_BROKEN     0x00000200
 
 	u8	     	     chip_id;
 	u8		     chip_rev;
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 296000b..3397618 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -12,7 +12,7 @@
 /*
  * RX HW/SW interaction overview
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of RX communication channels betwean driver and NIC.
+ * There are 2 types of RX communication channels between driver and NIC.
  * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
  * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
  * info about buffer's location, size and ID. An ID field is used to identify a
@@ -821,7 +821,7 @@
 		}
 
 		/* use PMF to accept first MAC_MCST_NUM (15) addresses */
-		/* TBD: sort addreses and write them in ascending order
+		/* TBD: sort addresses and write them in ascending order
 		 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
 		 * multicast frames throu IMF */
 		/* accept the rest of addresses throu IMF */
@@ -1346,7 +1346,7 @@
 /*
  * TX HW/SW interaction overview
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of TX communication channels betwean driver and NIC.
+ * There are 2 types of TX communication channels between driver and NIC.
  * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
  * 2) TX Data Fifo - TXD - holds descriptors of full buffers.
  *
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0e6bac5..7cb301d 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -142,14 +142,6 @@
 MODULE_AUTHOR("Tilera");
 MODULE_LICENSE("GPL");
 
-
-#define IS_MULTICAST(mac_addr) \
-	(((u8 *)(mac_addr))[0] & 0x01)
-
-#define IS_BROADCAST(mac_addr) \
-	(((u16 *)(mac_addr))[0] == 0xffff)
-
-
 /*
  * Queue of incoming packets for a specific cpu and device.
  *
@@ -795,7 +787,7 @@
 		/*
 		 * FIXME: Implement HW multicast filter.
 		 */
-		if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
+		if (is_unicast_ether_addr(buf)) {
 			/* Filter packets not for our address. */
 			const u8 *mine = dev->dev_addr;
 			filter = compare_ether_addr(mine, buf);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7599c45..b100bd5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1309,7 +1309,7 @@
 		break;
 
 	case SIOCGIFHWADDR:
-		/* Get hw addres */
+		/* Get hw address */
 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
 		if (copy_to_user(argp, &ifr, ifreq_len))
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index acbdab3..715e7b4 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -28,6 +28,7 @@
 #include <linux/phy.h>
 #include <linux/workqueue.h>
 #include <linux/of_mdio.h>
+#include <linux/of_net.h>
 #include <linux/of_platform.h>
 
 #include <asm/uaccess.h>
@@ -2031,7 +2032,7 @@
 			netdev_for_each_mc_addr(ha, dev) {
 				/* Only support group multicast for now.
 				 */
-				if (!(ha->addr[0] & 1))
+				if (!is_multicast_ether_addr(ha->addr))
 					continue;
 
 				/* Ask CPM to run CRC and set bit in
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 593c104..d776c4a 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1021,13 +1021,15 @@
 		    (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
 			pr_debug("invalid frame detected (ignored)"
 				"offset[%u]=%u, length=%u, skb=%p\n",
-							x, offset, temp, skb);
+							x, offset, temp, skb_in);
 			if (!x)
 				goto error;
 			break;
 
 		} else {
 			skb = skb_clone(skb_in, GFP_ATOMIC);
+			if (!skb)
+				goto error;
 			skb->len = temp;
 			skb->data = ((u8 *)skb_in->data) + offset;
 			skb_set_tail_pointer(skb, temp);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index cab96ad..09cac70 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -898,7 +898,7 @@
 	set_mii_flow_control(vptr);
 
 	/*
-	   Check if new status is consisent with current status
+	   Check if new status is consistent with current status
 	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 	       (mii_status==curr_status)) {
 	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 1ac9b56..c81a651 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4120,6 +4120,7 @@
 	       "hotplug event.\n");
 
 out:
+	release_firmware(fw);
 	return ret;
 }
 
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 8c3103f..d48486d 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1695,7 +1695,7 @@
  * struct vxge_hw_device_stats - Contains HW per-device statistics,
  * including hw.
  * @devh: HW device handle.
- * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+ * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
  * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
  *                space.
  * @hw_info_dma_acch: One more DMA handle used subsequently to free the
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 34cff6c..4578e5b 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -125,7 +125,7 @@
 /* Module parameters */
 
 MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
-MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
 MODULE_LICENSE("GPL");
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug,"Enable/disable extra messages");
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index f060332..65bc334 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -232,7 +232,7 @@
 			result);
 		goto error;
 	}
-	/* Extract MAC addresss */
+	/* Extract MAC address */
 	ddi = (void *) skb->data;
 	BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
 	d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 17ecaa4..030cbfd 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -186,7 +186,7 @@
  * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
  *
  * This structure will be used to create a device specific poke table
- * to put the device in a consistant state at boot time.
+ * to put the device in a consistent state at boot time.
  *
  * @address: The device address to poke
  *
@@ -703,7 +703,7 @@
  * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
  *     rom after reading the MAC address. This is quite a dirty hack,
  *     if you ask me -- the device requires the bootrom to be
- *     intialized after reading the MAC address.
+ *     initialized after reading the MAC address.
  */
 enum i2400m_bri {
 	I2400M_BRI_SOFT       = 1 << 1,
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 10e3ab3..298f2b0 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -514,7 +514,7 @@
 #ifdef CONFIG_PM
 	iface->needs_remote_wakeup = 1;		/* autosuspend (15s delay) */
 	device_init_wakeup(dev, 1);
-	usb_dev->autosuspend_delay = 15 * HZ;
+	pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000);
 	usb_enable_autosuspend(usb_dev);
 #endif
 
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 7ad05d4..fd14b91 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1064,7 +1064,7 @@
 /*
  * EEPROM command register
  */
-#define AR5K_EEPROM_CMD		0x6008			/* Register Addres */
+#define AR5K_EEPROM_CMD		0x6008			/* Register Address */
 #define AR5K_EEPROM_CMD_READ	0x00000001	/* EEPROM read */
 #define AR5K_EEPROM_CMD_WRITE	0x00000002	/* EEPROM write */
 #define AR5K_EEPROM_CMD_RESET	0x00000004	/* EEPROM reset */
@@ -1084,7 +1084,7 @@
 /*
  * EEPROM config register
  */
-#define AR5K_EEPROM_CFG			0x6010			/* Register Addres */
+#define AR5K_EEPROM_CFG			0x6010			/* Register Address */
 #define AR5K_EEPROM_CFG_SIZE		0x00000003		/* Size determination override */
 #define AR5K_EEPROM_CFG_SIZE_AUTO	0
 #define AR5K_EEPROM_CFG_SIZE_4KBIT	1
@@ -1126,7 +1126,7 @@
  * Second station id register (Upper 16 bits of MAC address + PCU settings)
  */
 #define AR5K_STA_ID1			0x8004			/* Register Address */
-#define	AR5K_STA_ID1_ADDR_U16		0x0000ffff	/* Upper 16 bits of MAC addres */
+#define	AR5K_STA_ID1_ADDR_U16		0x0000ffff	/* Upper 16 bits of MAC address */
 #define AR5K_STA_ID1_AP			0x00010000	/* Set AP mode */
 #define AR5K_STA_ID1_ADHOC		0x00020000	/* Set Ad-Hoc mode */
 #define AR5K_STA_ID1_PWR_SV		0x00040000	/* Power save reporting */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 01880aa..ea2e7d7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -954,6 +954,9 @@
 				&adc_dc_cal_multi_sample;
 		}
 		ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
+
+		if (AR_SREV_9287(ah))
+			ah->supp_cals &= ~ADC_GAIN_CAL;
 	}
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 088f141..749a936 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -226,6 +226,10 @@
 	    eep->baseEepHeader.pwdclkind == 0)
 		ah->need_an_top2_fixup = 1;
 
+	if ((common->bus_ops->ath_bus_type == ATH_USB) &&
+	    (AR_SREV_9280(ah)))
+		eep->modalHeader[0].xpaBiasLvl = 0;
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index a099b3e..1ce506f 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -433,6 +433,7 @@
 void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
 			enum htc_endpoint_id ep_id, bool txok);
 
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv);
 void ath9k_htc_station_work(struct work_struct *work);
 void ath9k_htc_aggr_work(struct work_struct *work);
 void ath9k_ani_work(struct work_struct *work);;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 845b4c9..f4d576b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -301,6 +301,16 @@
 
 	priv->nstations++;
 
+	/*
+	 * Set chainmask etc. on the target.
+	 */
+	ret = ath9k_htc_update_cap_target(priv);
+	if (ret)
+		ath_dbg(common, ATH_DBG_CONFIG,
+			"Failed to update capability in target\n");
+
+	priv->ah->is_monitoring = true;
+
 	return 0;
 
 err_vif:
@@ -328,6 +338,7 @@
 	}
 
 	priv->nstations--;
+	priv->ah->is_monitoring = false;
 
 	return 0;
 }
@@ -419,7 +430,7 @@
 	return 0;
 }
 
-static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
+int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
 {
 	struct ath9k_htc_cap_target tcap;
 	int ret;
@@ -1186,6 +1197,20 @@
 		}
 	}
 
+	/*
+	 * Monitor interface should be added before
+	 * IEEE80211_CONF_CHANGE_CHANNEL is handled.
+	 */
+	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+		if (conf->flags & IEEE80211_CONF_MONITOR) {
+			if (ath9k_htc_add_monitor_interface(priv))
+				ath_err(common, "Failed to set monitor mode\n");
+			else
+				ath_dbg(common, ATH_DBG_CONFIG,
+					"HW opmode set to Monitor mode\n");
+		}
+	}
+
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
 		struct ieee80211_channel *curchan = hw->conf.channel;
 		int pos = curchan->hw_value;
@@ -1221,16 +1246,6 @@
 		ath_update_txpow(priv);
 	}
 
-	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
-		if (conf->flags & IEEE80211_CONF_MONITOR) {
-			if (ath9k_htc_add_monitor_interface(priv))
-				ath_err(common, "Failed to set monitor mode\n");
-			else
-				ath_dbg(common, ATH_DBG_CONFIG,
-					"HW opmode set to Monitor mode\n");
-		}
-	}
-
 	if (changed & IEEE80211_CONF_CHANGE_IDLE) {
 		mutex_lock(&priv->htc_pm_lock);
 		if (!priv->ps_idle) {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fde9786..1afb8bb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -436,9 +436,10 @@
 
 static int ath9k_hw_post_init(struct ath_hw *ah)
 {
+	struct ath_common *common = ath9k_hw_common(ah);
 	int ecode;
 
-	if (!AR_SREV_9271(ah)) {
+	if (common->bus_ops->ath_bus_type != ATH_USB) {
 		if (!ath9k_hw_chip_test(ah))
 			return -ENODEV;
 	}
@@ -1213,7 +1214,7 @@
 	ah->txchainmask = common->tx_chainmask;
 	ah->rxchainmask = common->rx_chainmask;
 
-	if (!ah->chip_fullsleep) {
+	if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
 		ath9k_hw_abortpcurecv(ah);
 		if (!ath9k_hw_stopdmarecv(ah)) {
 			ath_dbg(common, ATH_DBG_XMIT,
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 0dc33b6..be48281 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1919,7 +1919,7 @@
 	b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
 }
 
-/* Intialize B/G PHY power control */
+/* Initialize B/G PHY power control */
 static void b43_phy_init_pctl(struct b43_wldev *dev)
 {
 	struct ssb_bus *bus = dev->dev->bus;
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 35033dd..28e477d 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -153,7 +153,7 @@
 	phy->calibrated = 1;
 }
 
-/* intialize B PHY power control
+/* initialize B PHY power control
  * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
  */
 static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index bd8a413..2176ede 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -518,22 +518,21 @@
 	hw_priv->link = link;
 
 	/*
-	 * Make sure the IRQ handler cannot proceed until at least
-	 * dev->base_addr is initialized.
+	 * We enable IRQ here, but IRQ handler will not proceed
+	 * until dev->base_addr is set below. This protect us from
+	 * receive interrupts when driver is not initialized.
 	 */
-	spin_lock_irqsave(&local->irq_init_lock, flags);
-
 	ret = pcmcia_request_irq(link, prism2_interrupt);
 	if (ret)
-		goto failed_unlock;
+		goto failed;
 
 	ret = pcmcia_enable_device(link);
 	if (ret)
-		goto failed_unlock;
+		goto failed;
 
+	spin_lock_irqsave(&local->irq_init_lock, flags);
 	dev->irq = link->irq;
 	dev->base_addr = link->resource[0]->start;
-
 	spin_unlock_irqrestore(&local->irq_init_lock, flags);
 
 	local->shutdown = 0;
@@ -546,8 +545,6 @@
 
 	return ret;
 
- failed_unlock:
-	spin_unlock_irqrestore(&local->irq_init_lock, flags);
  failed:
 	kfree(hw_priv);
 	prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d6ed5f..ae438ed 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1973,6 +1973,13 @@
 
 	inta = ipw_read32(priv, IPW_INTA_RW);
 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
+		/* Only handle the cached INTA values */
+		inta = 0;
+	}
 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
 
 	/* Add any cached INTA values that need to be handled */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index a5dbfea..b5cb3be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -197,7 +197,7 @@
 
  none:
 	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if diabled by irq  and no schedules tasklet. */
+	/* only Re-enable if disabled by irq  and no schedules tasklet. */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
 		iwl_enable_interrupts(priv);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f13a83a..36335b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1154,7 +1154,7 @@
 	}
 
 	/* Re-enable all interrupts */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
 
@@ -1368,7 +1368,7 @@
 	}
 
 	/* Re-enable all interrupts */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
index a08b4e5..bb1a742 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -619,7 +619,7 @@
 
 none:
 	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
 	spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4776323..49493d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -107,7 +107,7 @@
 	/*
 	 * XXX: The MAC address in the command buffer is often changed from
 	 * the original sent to the device. That is, the MAC address
-	 * written to the command buffer often is not the same MAC adress
+	 * written to the command buffer often is not the same MAC address
 	 * read from the command buffer when the command returns. This
 	 * issue has not yet been resolved and this debugging is left to
 	 * observe the problem.
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 76b2318a..f618b96 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -618,7 +618,7 @@
 	else
 		*burst_possible = false;
 
-	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+	if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
 		*flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
 
 	if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 2c8cc95..ec2c75d 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -630,7 +630,7 @@
 	printk(KERN_DEBUG "islpci_alloc_memory\n");
 #endif
 
-	/* remap the PCI device base address to accessable */
+	/* remap the PCI device base address to accessible */
 	if (!(priv->device_base =
 	      ioremap(pci_resource_start(priv->pdev, 0),
 		      ISL38XX_PCI_MEM_SIZE))) {
@@ -709,7 +709,7 @@
 				   PCI_DMA_FROMDEVICE);
 		if (!priv->pci_map_rx_address[counter]) {
 			/* error mapping the buffer to device
-			   accessable memory address */
+			   accessible memory address */
 			printk(KERN_ERR "failed to map skb DMA'able\n");
 			goto out_free;
 		}
@@ -773,7 +773,7 @@
 		priv->data_low_rx[counter] = NULL;
 	}
 
-	/* Free the acces control list and the WPA list */
+	/* Free the access control list and the WPA list */
 	prism54_acl_clean(&priv->acl);
 	prism54_wpa_bss_ie_clean(priv);
 	mgt_clean(priv);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 2fc52bc..d44f8e2 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -450,7 +450,7 @@
 				   MAX_FRAGMENT_SIZE_RX + 2,
 				   PCI_DMA_FROMDEVICE);
 		if (unlikely(!priv->pci_map_rx_address[index])) {
-			/* error mapping the buffer to device accessable memory address */
+			/* error mapping the buffer to device accessible memory address */
 			DEBUG(SHOW_ERROR_MESSAGES,
 			      "Error mapping DMA address\n");
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 658542d..f3da051 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -273,7 +273,7 @@
 	intf->beacon = entry;
 
 	/*
-	 * The MAC adddress must be configured after the device
+	 * The MAC address must be configured after the device
 	 * has been initialized. Otherwise the device can reset
 	 * the MAC registers.
 	 * The BSSID address must only be configured in AP mode,
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 73631c6..ace0b66 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -363,12 +363,12 @@
 	struct rt2x00_dev *rt2x00dev = hw->priv;
 
 	if (pci_set_power_state(pci_dev, PCI_D0) ||
-	    pci_enable_device(pci_dev) ||
-	    pci_restore_state(pci_dev)) {
+	    pci_enable_device(pci_dev)) {
 		ERROR(rt2x00dev, "Failed to resume device.\n");
 		return -EIO;
 	}
 
+	pci_restore_state(pci_dev);
 	return rt2x00lib_resume(rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00pci_resume);
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index e54b21a..efcc3aa 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -1272,10 +1272,10 @@
 /* OBSOLETE */
 #define WL1251_ACX_INTR_WAKE_ON_HOST	BIT(6)
 
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
 #define WL1251_ACX_INTR_TRACE_A		BIT(7)
 
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
 #define WL1251_ACX_INTR_TRACE_B		BIT(8)
 
 /* Command processing completion */
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index 13fbeec..c0ce2c8 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -419,7 +419,7 @@
 #define WL1251_FW_NAME "wl1251-fw.bin"
 #define WL1251_NVS_NAME "wl1251-nvs.bin"
 
-#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */
+#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
 
 #define WL1251_PART_DOWN_MEM_START	0x0
 #define WL1251_PART_DOWN_MEM_SIZE	0x16800
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 9cbc3f4..7bd8e4d 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -47,9 +47,9 @@
 #define WL1271_ACX_INTR_HW_AVAILABLE       BIT(5)
 /* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */
 #define WL1271_ACX_INTR_DATA               BIT(6)
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
 #define WL1271_ACX_INTR_TRACE_A            BIT(7)
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
 #define WL1271_ACX_INTR_TRACE_B            BIT(8)
 
 #define WL1271_ACX_INTR_ALL		   0xFFFFFFFF
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index ce3d31f..9050dd9 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -416,8 +416,8 @@
 
 /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
    on in case is has been shut down shortly before */
-#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
-#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
+#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */
 
 /* Macros to handle wl1271.sta_rate_set */
 #define HW_BG_RATES_MASK	0xffff
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ee82df6..3e5befe 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -192,7 +192,7 @@
 }
 
 /*
- * Get Ethernet MAC addresss.
+ * Get Ethernet MAC address.
  *
  * WARNING: We switch to FPAGE0 and switc back again.
  *          Making sure there is no other WL function beening called by ISR.
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 43307bd..6107304 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1207,7 +1207,6 @@
 static void housekeeping_disable(struct zd_mac *mac)
 {
 	dev_dbg_f(zd_mac_dev(mac), "\n");
-	cancel_rearming_delayed_workqueue(zd_workqueue,
-		&mac->housekeeping.link_led_work);
+	cancel_delayed_work_sync(&mac->housekeeping.link_led_work);
 	zd_chip_control_leds(&mac->chip, ZD_LED_OFF);
 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9..546de57 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -488,7 +488,7 @@
 
 	if (unlikely(!netif_carrier_ok(dev) ||
 		     (frags > 1 && !xennet_can_sg(dev)) ||
-		     netif_needs_gso(dev, skb))) {
+		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 		spin_unlock_irq(&np->tx_lock);
 		goto drop;
 	}
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index de6c308..cad66ce 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -24,6 +24,7 @@
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #include <linux/of_mdio.h>
+#include <linux/of_net.h>
 #include <linux/phy.h>
 
 #define DRIVER_NAME "xilinx_emaclite"
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
new file mode 100644
index 0000000..ffedfd4
--- /dev/null
+++ b/drivers/nfc/Kconfig
@@ -0,0 +1,30 @@
+#
+# Near Field Communication (NFC) devices
+#
+
+menuconfig NFC_DEVICES
+	bool "NFC devices"
+	default n
+	---help---
+	  You'll have to say Y if your computer contains an NFC device that
+	  you want to use under Linux.
+
+	  You can say N here if you don't have any Near Field Communication
+	  devices connected to your computer.
+
+if NFC_DEVICES
+
+config PN544_NFC
+	tristate "PN544 NFC driver"
+	depends on I2C
+	select CRC_CCITT
+	default n
+	---help---
+	  Say yes if you want PN544 Near Field Communication driver.
+	  This is for i2c connected version. If unsure, say N here.
+
+	  To compile this driver as a module, choose m here. The module will
+	  be called pn544.
+
+
+endif # NFC_DEVICES
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
new file mode 100644
index 0000000..a4efb16
--- /dev/null
+++ b/drivers/nfc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for nfc devices
+#
+
+obj-$(CONFIG_PN544_NFC)		+= pn544.o
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
new file mode 100644
index 0000000..401c44b
--- /dev/null
+++ b/drivers/nfc/pn544.c
@@ -0,0 +1,891 @@
+/*
+ * Driver for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/completion.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nfc/pn544.h>
+#include <linux/poll.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serial_core.h> /* for TCGETS */
+#include <linux/slab.h>
+
+#define DRIVER_CARD	"PN544 NFC"
+#define DRIVER_DESC	"NFC driver for PN544"
+
+static struct i2c_device_id pn544_id_table[] = {
+	{ PN544_DRIVER_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, pn544_id_table);
+
+#define HCI_MODE	0
+#define FW_MODE		1
+
+enum pn544_state {
+	PN544_ST_COLD,
+	PN544_ST_FW_READY,
+	PN544_ST_READY,
+};
+
+enum pn544_irq {
+	PN544_NONE,
+	PN544_INT,
+};
+
+struct pn544_info {
+	struct miscdevice miscdev;
+	struct i2c_client *i2c_dev;
+	struct regulator_bulk_data regs[2];
+
+	enum pn544_state state;
+	wait_queue_head_t read_wait;
+	loff_t read_offset;
+	enum pn544_irq read_irq;
+	struct mutex read_mutex; /* Serialize read_irq access */
+	struct mutex mutex; /* Serialize info struct access */
+	u8 *buf;
+	unsigned int buflen;
+};
+
+static const char reg_vdd_io[]	= "Vdd_IO";
+static const char reg_vbat[]	= "VBat";
+
+/* sysfs interface */
+static ssize_t pn544_test(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct pn544_info *info = dev_get_drvdata(dev);
+	struct i2c_client *client = info->i2c_dev;
+	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
+}
+
+static int pn544_enable(struct pn544_info *info, int mode)
+{
+	struct pn544_nfc_platform_data *pdata;
+	struct i2c_client *client = info->i2c_dev;
+
+	int r;
+
+	r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
+	if (r < 0)
+		return r;
+
+	pdata = client->dev.platform_data;
+	info->read_irq = PN544_NONE;
+	if (pdata->enable)
+		pdata->enable(mode);
+
+	if (mode) {
+		info->state = PN544_ST_FW_READY;
+		dev_dbg(&client->dev, "now in FW-mode\n");
+	} else {
+		info->state = PN544_ST_READY;
+		dev_dbg(&client->dev, "now in HCI-mode\n");
+	}
+
+	usleep_range(10000, 15000);
+
+	return 0;
+}
+
+static void pn544_disable(struct pn544_info *info)
+{
+	struct pn544_nfc_platform_data *pdata;
+	struct i2c_client *client = info->i2c_dev;
+
+	pdata = client->dev.platform_data;
+	if (pdata->disable)
+		pdata->disable();
+
+	info->state = PN544_ST_COLD;
+
+	dev_dbg(&client->dev, "Now in OFF-mode\n");
+
+	msleep(PN544_RESETVEN_TIME);
+
+	info->read_irq = PN544_NONE;
+	regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+	u8 len;
+	u16 crc;
+
+	len = buf[0] + 1;
+	if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
+		pr_err(PN544_DRIVER_NAME
+		       ": CRC; corrupt packet len %u (%d)\n", len, buflen);
+		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+			       16, 2, buf, buflen, false);
+		return -EPERM;
+	}
+	crc = crc_ccitt(0xffff, buf, len - 2);
+	crc = ~crc;
+
+	if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
+		pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
+		       crc, buf[len-1], buf[len-2]);
+
+		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+			       16, 2, buf, buflen, false);
+		return -EPERM;
+	}
+	return 0;
+}
+
+static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
+{
+	int r;
+
+	if (len < 4 || len != (buf[0] + 1)) {
+		dev_err(&client->dev, "%s: Illegal message length: %d\n",
+			__func__, len);
+		return -EINVAL;
+	}
+
+	if (check_crc(buf, len))
+		return -EINVAL;
+
+	usleep_range(3000, 6000);
+
+	r = i2c_master_send(client, buf, len);
+	dev_dbg(&client->dev, "send: %d\n", r);
+
+	if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+		usleep_range(6000, 10000);
+		r = i2c_master_send(client, buf, len);
+		dev_dbg(&client->dev, "send2: %d\n", r);
+	}
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	return r;
+}
+
+static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+	int r;
+	u8 len;
+
+	/*
+	 * You could read a packet in one go, but then you'd need to read
+	 * max size and rest would be 0xff fill, so we do split reads.
+	 */
+	r = i2c_master_recv(client, &len, 1);
+	dev_dbg(&client->dev, "recv1: %d\n", r);
+
+	if (r != 1)
+		return -EREMOTEIO;
+
+	if (len < PN544_LLC_HCI_OVERHEAD)
+		len = PN544_LLC_HCI_OVERHEAD;
+	else if (len > (PN544_MSG_MAX_SIZE - 1))
+		len = PN544_MSG_MAX_SIZE - 1;
+
+	if (1 + len > buflen) /* len+(data+crc16) */
+		return -EMSGSIZE;
+
+	buf[0] = len;
+
+	r = i2c_master_recv(client, buf + 1, len);
+	dev_dbg(&client->dev, "recv2: %d\n", r);
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	usleep_range(3000, 6000);
+
+	return r + 1;
+}
+
+static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
+{
+	int r;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	if (len < PN544_FW_HEADER_SIZE ||
+	    (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
+		return -EINVAL;
+
+	r = i2c_master_send(client, buf, len);
+	dev_dbg(&client->dev, "fw send: %d\n", r);
+
+	if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+		usleep_range(6000, 10000);
+		r = i2c_master_send(client, buf, len);
+		dev_dbg(&client->dev, "fw send2: %d\n", r);
+	}
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	return r;
+}
+
+static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+	int r, len;
+
+	if (buflen < PN544_FW_HEADER_SIZE)
+		return -EINVAL;
+
+	r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
+	dev_dbg(&client->dev, "FW recv1: %d\n", r);
+
+	if (r < 0)
+		return r;
+
+	if (r < PN544_FW_HEADER_SIZE)
+		return -EINVAL;
+
+	len = (buf[1] << 8) + buf[2];
+	if (len == 0) /* just header, no additional data */
+		return r;
+
+	if (len > buflen - PN544_FW_HEADER_SIZE)
+		return -EMSGSIZE;
+
+	r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
+	dev_dbg(&client->dev, "fw recv2: %d\n", r);
+
+	if (r != len)
+		return -EINVAL;
+
+	return r + PN544_FW_HEADER_SIZE;
+}
+
+static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
+{
+	struct pn544_info *info = dev_id;
+	struct i2c_client *client = info->i2c_dev;
+
+	BUG_ON(!info);
+	BUG_ON(irq != info->i2c_dev->irq);
+
+	dev_dbg(&client->dev, "IRQ\n");
+
+	mutex_lock(&info->read_mutex);
+	info->read_irq = PN544_INT;
+	mutex_unlock(&info->read_mutex);
+
+	wake_up_interruptible(&info->read_wait);
+
+	return IRQ_HANDLED;
+}
+
+static enum pn544_irq pn544_irq_state(struct pn544_info *info)
+{
+	enum pn544_irq irq;
+
+	mutex_lock(&info->read_mutex);
+	irq = info->read_irq;
+	mutex_unlock(&info->read_mutex);
+	/*
+	 * XXX: should we check GPIO-line status directly?
+	 * return pdata->irq_status() ? PN544_INT : PN544_NONE;
+	 */
+
+	return irq;
+}
+
+static ssize_t pn544_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *offset)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	enum pn544_irq irq;
+	size_t len;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
+		info, count);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	irq = pn544_irq_state(info);
+	if (irq == PN544_NONE) {
+		if (file->f_flags & O_NONBLOCK) {
+			r = -EAGAIN;
+			goto out;
+		}
+
+		if (wait_event_interruptible(info->read_wait,
+					     (info->read_irq == PN544_INT))) {
+			r = -ERESTARTSYS;
+			goto out;
+		}
+	}
+
+	if (info->state == PN544_ST_FW_READY) {
+		len = min(count, info->buflen);
+
+		mutex_lock(&info->read_mutex);
+		r = pn544_fw_read(info->i2c_dev, info->buf, len);
+		info->read_irq = PN544_NONE;
+		mutex_unlock(&info->read_mutex);
+
+		if (r < 0) {
+			dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, r, false);
+
+		*offset += r;
+		if (copy_to_user(buf, info->buf, r)) {
+			r = -EFAULT;
+			goto out;
+		}
+	} else {
+		len = min(count, info->buflen);
+
+		mutex_lock(&info->read_mutex);
+		r = pn544_i2c_read(info->i2c_dev, info->buf, len);
+		info->read_irq = PN544_NONE;
+		mutex_unlock(&info->read_mutex);
+
+		if (r < 0) {
+			dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
+			goto out;
+		}
+		print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, r, false);
+
+		*offset += r;
+		if (copy_to_user(buf, info->buf, r)) {
+			r = -EFAULT;
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static unsigned int pn544_poll(struct file *file, poll_table *wait)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	poll_wait(file, &info->read_wait, wait);
+
+	if (pn544_irq_state(info) == PN544_INT) {
+		r = POLLIN | POLLRDNORM;
+		goto out;
+	}
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static ssize_t pn544_write(struct file *file, const char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	ssize_t	len;
+	int r;
+
+	dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
+		info, count);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * XXX: should we detect rset-writes and clean possible
+	 * read_irq state
+	 */
+	if (info->state == PN544_ST_FW_READY) {
+		size_t fw_len;
+
+		if (count < PN544_FW_HEADER_SIZE) {
+			r = -EINVAL;
+			goto out;
+		}
+
+		len = min(count, info->buflen);
+		if (copy_from_user(info->buf, buf, len)) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, len, false);
+
+		fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
+			info->buf[2];
+
+		if (len > fw_len) /* 1 msg at a time */
+			len = fw_len;
+
+		r = pn544_fw_write(info->i2c_dev, info->buf, len);
+	} else {
+		if (count < PN544_LLC_MIN_SIZE) {
+			r = -EINVAL;
+			goto out;
+		}
+
+		len = min(count, info->buflen);
+		if (copy_from_user(info->buf, buf, len)) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, len, false);
+
+		if (len > (info->buf[0] + 1)) /* 1 msg at a time */
+			len  = info->buf[0] + 1;
+
+		r = pn544_i2c_write(info->i2c_dev, info->buf, len);
+	}
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+
+}
+
+static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	struct pn544_nfc_platform_data *pdata;
+	unsigned int val;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	pdata = info->i2c_dev->dev.platform_data;
+	switch (cmd) {
+	case PN544_GET_FW_MODE:
+		dev_dbg(&client->dev, "%s:  PN544_GET_FW_MODE\n", __func__);
+
+		val = (info->state == PN544_ST_FW_READY);
+		if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		break;
+
+	case PN544_SET_FW_MODE:
+		dev_dbg(&client->dev, "%s:  PN544_SET_FW_MODE\n", __func__);
+
+		if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		if (val) {
+			if (info->state == PN544_ST_FW_READY)
+				break;
+
+			pn544_disable(info);
+			r = pn544_enable(info, FW_MODE);
+			if (r < 0)
+				goto out;
+		} else {
+			if (info->state == PN544_ST_READY)
+				break;
+			pn544_disable(info);
+			r = pn544_enable(info, HCI_MODE);
+			if (r < 0)
+				goto out;
+		}
+		file->f_pos = info->read_offset;
+		break;
+
+	case TCGETS:
+		dev_dbg(&client->dev, "%s:  TCGETS\n", __func__);
+
+		r = -ENOIOCTLCMD;
+		break;
+
+	default:
+		dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
+		r = -ENOIOCTLCMD;
+		break;
+	}
+
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static int pn544_open(struct inode *inode, struct file *file)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+		info, info->i2c_dev);
+
+	mutex_lock(&info->mutex);
+
+	/*
+	 * Only 1 at a time.
+	 * XXX: maybe user (counter) would work better
+	 */
+	if (info->state != PN544_ST_COLD) {
+		r = -EBUSY;
+		goto out;
+	}
+
+	file->f_pos = info->read_offset;
+	r = pn544_enable(info, HCI_MODE);
+
+out:
+	mutex_unlock(&info->mutex);
+	return r;
+}
+
+static int pn544_close(struct inode *inode, struct file *file)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n",
+		__func__, info, info->i2c_dev);
+
+	mutex_lock(&info->mutex);
+	pn544_disable(info);
+	mutex_unlock(&info->mutex);
+
+	return 0;
+}
+
+static const struct file_operations pn544_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.read		= pn544_read,
+	.write		= pn544_write,
+	.poll		= pn544_poll,
+	.open		= pn544_open,
+	.release	= pn544_close,
+	.unlocked_ioctl	= pn544_ioctl,
+};
+
+#ifdef CONFIG_PM
+static int pn544_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct pn544_info *info;
+	int r = 0;
+
+	dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
+
+	info = i2c_get_clientdata(client);
+	dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
+		 info, client);
+
+	mutex_lock(&info->mutex);
+
+	switch (info->state) {
+	case PN544_ST_FW_READY:
+		/* Do not suspend while upgrading FW, please! */
+		r = -EPERM;
+		break;
+
+	case PN544_ST_READY:
+		/*
+		 * CHECK: Device should be in standby-mode. No way to check?
+		 * Allowing low power mode for the regulator is potentially
+		 * dangerous if pn544 does not go to suspension.
+		 */
+		break;
+
+	case PN544_ST_COLD:
+		break;
+	};
+
+	mutex_unlock(&info->mutex);
+	return r;
+}
+
+static int pn544_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct pn544_info *info = i2c_get_clientdata(client);
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+		info, client);
+
+	mutex_lock(&info->mutex);
+
+	switch (info->state) {
+	case PN544_ST_READY:
+		/*
+		 * CHECK: If regulator low power mode is allowed in
+		 * pn544_suspend, we should go back to normal mode
+		 * here.
+		 */
+		break;
+
+	case PN544_ST_COLD:
+		break;
+
+	case PN544_ST_FW_READY:
+		break;
+	};
+
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
+#endif
+
+static struct device_attribute pn544_attr =
+	__ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
+
+static int __devinit pn544_probe(struct i2c_client *client,
+				 const struct i2c_device_id *id)
+{
+	struct pn544_info *info;
+	struct pn544_nfc_platform_data *pdata;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+	/* private data allocation */
+	info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(&client->dev,
+			"Cannot allocate memory for pn544_info.\n");
+		r = -ENOMEM;
+		goto err_info_alloc;
+	}
+
+	info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
+	info->buf = kzalloc(info->buflen, GFP_KERNEL);
+	if (!info->buf) {
+		dev_err(&client->dev,
+			"Cannot allocate memory for pn544_info->buf.\n");
+		r = -ENOMEM;
+		goto err_buf_alloc;
+	}
+
+	info->regs[0].supply = reg_vdd_io;
+	info->regs[1].supply = reg_vbat;
+	r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
+				 info->regs);
+	if (r < 0)
+		goto err_kmalloc;
+
+	info->i2c_dev = client;
+	info->state = PN544_ST_COLD;
+	info->read_irq = PN544_NONE;
+	mutex_init(&info->read_mutex);
+	mutex_init(&info->mutex);
+	init_waitqueue_head(&info->read_wait);
+	i2c_set_clientdata(client, info);
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		dev_err(&client->dev, "No platform data\n");
+		r = -EINVAL;
+		goto err_reg;
+	}
+
+	if (!pdata->request_resources) {
+		dev_err(&client->dev, "request_resources() missing\n");
+		r = -EINVAL;
+		goto err_reg;
+	}
+
+	r = pdata->request_resources(client);
+	if (r) {
+		dev_err(&client->dev, "Cannot get platform resources\n");
+		goto err_reg;
+	}
+
+	r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
+				 IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
+				 info);
+	if (r < 0) {
+		dev_err(&client->dev, "Unable to register IRQ handler\n");
+		goto err_res;
+	}
+
+	/* If we don't have the test we don't need the sysfs file */
+	if (pdata->test) {
+		r = device_create_file(&client->dev, &pn544_attr);
+		if (r) {
+			dev_err(&client->dev,
+				"sysfs registration failed, error %d\n", r);
+			goto err_irq;
+		}
+	}
+
+	info->miscdev.minor = MISC_DYNAMIC_MINOR;
+	info->miscdev.name = PN544_DRIVER_NAME;
+	info->miscdev.fops = &pn544_fops;
+	info->miscdev.parent = &client->dev;
+	r = misc_register(&info->miscdev);
+	if (r < 0) {
+		dev_err(&client->dev, "Device registration failed\n");
+		goto err_sysfs;
+	}
+
+	dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
+		__func__, info, pdata, client);
+
+	return 0;
+
+err_sysfs:
+	if (pdata->test)
+		device_remove_file(&client->dev, &pn544_attr);
+err_irq:
+	free_irq(client->irq, info);
+err_res:
+	if (pdata->free_resources)
+		pdata->free_resources();
+err_reg:
+	regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+err_kmalloc:
+	kfree(info->buf);
+err_buf_alloc:
+	kfree(info);
+err_info_alloc:
+	return r;
+}
+
+static __devexit int pn544_remove(struct i2c_client *client)
+{
+	struct pn544_info *info = i2c_get_clientdata(client);
+	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	misc_deregister(&info->miscdev);
+	if (pdata->test)
+		device_remove_file(&client->dev, &pn544_attr);
+
+	if (info->state != PN544_ST_COLD) {
+		if (pdata->disable)
+			pdata->disable();
+
+		info->read_irq = PN544_NONE;
+	}
+
+	free_irq(client->irq, info);
+	if (pdata->free_resources)
+		pdata->free_resources();
+
+	regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+	kfree(info->buf);
+	kfree(info);
+
+	return 0;
+}
+
+static struct i2c_driver pn544_driver = {
+	.driver = {
+		.name = PN544_DRIVER_NAME,
+#ifdef CONFIG_PM
+		.pm = &pn544_pm_ops,
+#endif
+	},
+	.probe = pn544_probe,
+	.id_table = pn544_id_table,
+	.remove = __devexit_p(pn544_remove),
+};
+
+static int __init pn544_init(void)
+{
+	int r;
+
+	pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+	r = i2c_add_driver(&pn544_driver);
+	if (r) {
+		pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static void __exit pn544_exit(void)
+{
+	i2c_del_driver(&pn544_driver);
+	pr_info(DRIVER_DESC ", Exiting.\n");
+}
+
+module_init(pn544_init);
+module_exit(pn544_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index aa675eb..3c6e100 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -19,6 +19,10 @@
 	bool
 	select DTC
 
+config OF_EARLY_FLATTREE
+	bool
+	select OF_FLATTREE
+
 config OF_PROMTREE
 	bool
 
@@ -49,6 +53,10 @@
 	help
 	  OpenFirmware I2C accessors
 
+config OF_NET
+	depends on NETDEVICES
+	def_bool y
+
 config OF_SPI
 	def_tristate SPI
 	depends on SPI && !SPARC
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 7888155..3ab21a0 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -6,5 +6,6 @@
 obj-$(CONFIG_OF_DEVICE) += device.o platform.o
 obj-$(CONFIG_OF_GPIO)   += gpio.o
 obj-$(CONFIG_OF_I2C)	+= of_i2c.o
+obj-$(CONFIG_OF_NET)	+= of_net.o
 obj-$(CONFIG_OF_SPI)	+= of_spi.o
 obj-$(CONFIG_OF_MDIO)	+= of_mdio.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 3a1c7e7..b4559c5 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -12,13 +12,13 @@
 			(ns) > 0)
 
 static struct of_bus *of_match_bus(struct device_node *np);
-static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
-				    u64 size, unsigned int flags,
+static int __of_address_to_resource(struct device_node *dev,
+		const __be32 *addrp, u64 size, unsigned int flags,
 				    struct resource *r);
 
 /* Debug utility */
 #ifdef DEBUG
-static void of_dump_addr(const char *s, const u32 *addr, int na)
+static void of_dump_addr(const char *s, const __be32 *addr, int na)
 {
 	printk(KERN_DEBUG "%s", s);
 	while (na--)
@@ -26,7 +26,7 @@
 	printk("\n");
 }
 #else
-static void of_dump_addr(const char *s, const u32 *addr, int na) { }
+static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
 #endif
 
 /* Callbacks for bus specific translators */
@@ -36,10 +36,10 @@
 	int		(*match)(struct device_node *parent);
 	void		(*count_cells)(struct device_node *child,
 				       int *addrc, int *sizec);
-	u64		(*map)(u32 *addr, const u32 *range,
+	u64		(*map)(u32 *addr, const __be32 *range,
 				int na, int ns, int pna);
 	int		(*translate)(u32 *addr, u64 offset, int na);
-	unsigned int	(*get_flags)(const u32 *addr);
+	unsigned int	(*get_flags)(const __be32 *addr);
 };
 
 /*
@@ -55,7 +55,7 @@
 		*sizec = of_n_size_cells(dev);
 }
 
-static u64 of_bus_default_map(u32 *addr, const u32 *range,
+static u64 of_bus_default_map(u32 *addr, const __be32 *range,
 		int na, int ns, int pna)
 {
 	u64 cp, s, da;
@@ -85,7 +85,7 @@
 	return 0;
 }
 
-static unsigned int of_bus_default_get_flags(const u32 *addr)
+static unsigned int of_bus_default_get_flags(const __be32 *addr)
 {
 	return IORESOURCE_MEM;
 }
@@ -110,10 +110,10 @@
 		*sizec = 2;
 }
 
-static unsigned int of_bus_pci_get_flags(const u32 *addr)
+static unsigned int of_bus_pci_get_flags(const __be32 *addr)
 {
 	unsigned int flags = 0;
-	u32 w = addr[0];
+	u32 w = be32_to_cpup(addr);
 
 	switch((w >> 24) & 0x03) {
 	case 0x01:
@@ -129,7 +129,8 @@
 	return flags;
 }
 
-static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+static u64 of_bus_pci_map(u32 *addr, const __be32 *range, int na, int ns,
+		int pna)
 {
 	u64 cp, s, da;
 	unsigned int af, rf;
@@ -160,7 +161,7 @@
 	return of_bus_default_translate(addr + 1, offset, na - 1);
 }
 
-const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
 			unsigned int *flags)
 {
 	const __be32 *prop;
@@ -207,7 +208,7 @@
 int of_pci_address_to_resource(struct device_node *dev, int bar,
 			       struct resource *r)
 {
-	const u32	*addrp;
+	const __be32	*addrp;
 	u64		size;
 	unsigned int	flags;
 
@@ -237,12 +238,13 @@
 		*sizec = 1;
 }
 
-static u64 of_bus_isa_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+static u64 of_bus_isa_map(u32 *addr, const __be32 *range, int na, int ns,
+		int pna)
 {
 	u64 cp, s, da;
 
 	/* Check address type match */
-	if ((addr[0] ^ range[0]) & 0x00000001)
+	if ((addr[0] ^ range[0]) & cpu_to_be32(1))
 		return OF_BAD_ADDR;
 
 	/* Read address values, skipping high cell */
@@ -264,10 +266,10 @@
 	return of_bus_default_translate(addr + 1, offset, na - 1);
 }
 
-static unsigned int of_bus_isa_get_flags(const u32 *addr)
+static unsigned int of_bus_isa_get_flags(const __be32 *addr)
 {
 	unsigned int flags = 0;
-	u32 w = addr[0];
+	u32 w = be32_to_cpup(addr);
 
 	if (w & 1)
 		flags |= IORESOURCE_IO;
@@ -330,7 +332,7 @@
 			    struct of_bus *pbus, u32 *addr,
 			    int na, int ns, int pna, const char *rprop)
 {
-	const u32 *ranges;
+	const __be32 *ranges;
 	unsigned int rlen;
 	int rone;
 	u64 offset = OF_BAD_ADDR;
@@ -398,7 +400,7 @@
  * that can be mapped to a cpu physical address). This is not really specified
  * that way, but this is traditionally the way IBM at least do things
  */
-u64 __of_translate_address(struct device_node *dev, const u32 *in_addr,
+u64 __of_translate_address(struct device_node *dev, const __be32 *in_addr,
 			   const char *rprop)
 {
 	struct device_node *parent = NULL;
@@ -475,22 +477,22 @@
 	return result;
 }
 
-u64 of_translate_address(struct device_node *dev, const u32 *in_addr)
+u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
 {
 	return __of_translate_address(dev, in_addr, "ranges");
 }
 EXPORT_SYMBOL(of_translate_address);
 
-u64 of_translate_dma_address(struct device_node *dev, const u32 *in_addr)
+u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
 {
 	return __of_translate_address(dev, in_addr, "dma-ranges");
 }
 EXPORT_SYMBOL(of_translate_dma_address);
 
-const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
+const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
 		    unsigned int *flags)
 {
-	const u32 *prop;
+	const __be32 *prop;
 	unsigned int psize;
 	struct device_node *parent;
 	struct of_bus *bus;
@@ -525,8 +527,8 @@
 }
 EXPORT_SYMBOL(of_get_address);
 
-static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
-				    u64 size, unsigned int flags,
+static int __of_address_to_resource(struct device_node *dev,
+		const __be32 *addrp, u64 size, unsigned int flags,
 				    struct resource *r)
 {
 	u64 taddr;
@@ -564,7 +566,7 @@
 int of_address_to_resource(struct device_node *dev, int index,
 			   struct resource *r)
 {
-	const u32	*addrp;
+	const __be32	*addrp;
 	u64		size;
 	unsigned int	flags;
 
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c1360e0..c787c3d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -11,10 +11,12 @@
 
 #include <linux/kernel.h>
 #include <linux/initrd.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/string.h>
 #include <linux/errno.h>
+#include <linux/slab.h>
 
 #ifdef CONFIG_PPC
 #include <asm/machdep.h>
@@ -22,104 +24,19 @@
 
 #include <asm/page.h>
 
-int __initdata dt_root_addr_cells;
-int __initdata dt_root_size_cells;
-
-struct boot_param_header *initial_boot_params;
-
-char *find_flat_dt_string(u32 offset)
+char *of_fdt_get_string(struct boot_param_header *blob, u32 offset)
 {
-	return ((char *)initial_boot_params) +
-		be32_to_cpu(initial_boot_params->off_dt_strings) + offset;
+	return ((char *)blob) +
+		be32_to_cpu(blob->off_dt_strings) + offset;
 }
 
 /**
- * of_scan_flat_dt - scan flattened tree blob and call callback on each.
- * @it: callback function
- * @data: context data pointer
- *
- * This function is used to scan the flattened device-tree, it is
- * used to extract the memory information at boot before we can
- * unflatten the tree
+ * of_fdt_get_property - Given a node in the given flat blob, return
+ * the property ptr
  */
-int __init of_scan_flat_dt(int (*it)(unsigned long node,
-				     const char *uname, int depth,
-				     void *data),
-			   void *data)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		be32_to_cpu(initial_boot_params->off_dt_struct);
-	int rc = 0;
-	int depth = -1;
-
-	do {
-		u32 tag = be32_to_cpup((__be32 *)p);
-		char *pathp;
-
-		p += 4;
-		if (tag == OF_DT_END_NODE) {
-			depth--;
-			continue;
-		}
-		if (tag == OF_DT_NOP)
-			continue;
-		if (tag == OF_DT_END)
-			break;
-		if (tag == OF_DT_PROP) {
-			u32 sz = be32_to_cpup((__be32 *)p);
-			p += 8;
-			if (be32_to_cpu(initial_boot_params->version) < 0x10)
-				p = ALIGN(p, sz >= 8 ? 8 : 4);
-			p += sz;
-			p = ALIGN(p, 4);
-			continue;
-		}
-		if (tag != OF_DT_BEGIN_NODE) {
-			pr_err("Invalid tag %x in flat device tree!\n", tag);
-			return -EINVAL;
-		}
-		depth++;
-		pathp = (char *)p;
-		p = ALIGN(p + strlen(pathp) + 1, 4);
-		if ((*pathp) == '/') {
-			char *lp, *np;
-			for (lp = NULL, np = pathp; *np; np++)
-				if ((*np) == '/')
-					lp = np+1;
-			if (lp != NULL)
-				pathp = lp;
-		}
-		rc = it(p, pathp, depth, data);
-		if (rc != 0)
-			break;
-	} while (1);
-
-	return rc;
-}
-
-/**
- * of_get_flat_dt_root - find the root node in the flat blob
- */
-unsigned long __init of_get_flat_dt_root(void)
-{
-	unsigned long p = ((unsigned long)initial_boot_params) +
-		be32_to_cpu(initial_boot_params->off_dt_struct);
-
-	while (be32_to_cpup((__be32 *)p) == OF_DT_NOP)
-		p += 4;
-	BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE);
-	p += 4;
-	return ALIGN(p + strlen((char *)p) + 1, 4);
-}
-
-/**
- * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
- *
- * This function can be used within scan_flattened_dt callback to get
- * access to properties
- */
-void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
-				 unsigned long *size)
+void *of_fdt_get_property(struct boot_param_header *blob,
+		       unsigned long node, const char *name,
+		       unsigned long *size)
 {
 	unsigned long p = node;
 
@@ -137,10 +54,10 @@
 		sz = be32_to_cpup((__be32 *)p);
 		noff = be32_to_cpup((__be32 *)(p + 4));
 		p += 8;
-		if (be32_to_cpu(initial_boot_params->version) < 0x10)
+		if (be32_to_cpu(blob->version) < 0x10)
 			p = ALIGN(p, sz >= 8 ? 8 : 4);
 
-		nstr = find_flat_dt_string(noff);
+		nstr = of_fdt_get_string(blob, noff);
 		if (nstr == NULL) {
 			pr_warning("Can't find property index name !\n");
 			return NULL;
@@ -156,21 +73,28 @@
 }
 
 /**
- * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
+ * of_fdt_is_compatible - Return true if given node from the given blob has
+ * compat in its compatible list
+ * @blob: A device tree blob
  * @node: node to test
  * @compat: compatible string to compare with compatible list.
+ *
+ * On match, returns a non-zero value with smaller values returned for more
+ * specific compatible values.
  */
-int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
+int of_fdt_is_compatible(struct boot_param_header *blob,
+		      unsigned long node, const char *compat)
 {
 	const char *cp;
-	unsigned long cplen, l;
+	unsigned long cplen, l, score = 0;
 
-	cp = of_get_flat_dt_prop(node, "compatible", &cplen);
+	cp = of_fdt_get_property(blob, node, "compatible", &cplen);
 	if (cp == NULL)
 		return 0;
 	while (cplen > 0) {
+		score++;
 		if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
-			return 1;
+			return score;
 		l = strlen(cp) + 1;
 		cp += l;
 		cplen -= l;
@@ -179,7 +103,28 @@
 	return 0;
 }
 
-static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
+/**
+ * of_fdt_match - Return true if node matches a list of compatible values
+ */
+int of_fdt_match(struct boot_param_header *blob, unsigned long node,
+                 const char **compat)
+{
+	unsigned int tmp, score = 0;
+
+	if (!compat)
+		return 0;
+
+	while (*compat) {
+		tmp = of_fdt_is_compatible(blob, node, *compat);
+		if (tmp && (score == 0 || (tmp < score)))
+			score = tmp;
+		compat++;
+	}
+
+	return score;
+}
+
+static void *unflatten_dt_alloc(unsigned long *mem, unsigned long size,
 				       unsigned long align)
 {
 	void *res;
@@ -193,16 +138,18 @@
 
 /**
  * unflatten_dt_node - Alloc and populate a device_node from the flat tree
+ * @blob: The parent device tree blob
  * @p: pointer to node in flat tree
  * @dad: Parent struct device_node
  * @allnextpp: pointer to ->allnext from last allocated device_node
  * @fpsize: Size of the node path up at the current depth.
  */
-unsigned long __init unflatten_dt_node(unsigned long mem,
-					unsigned long *p,
-					struct device_node *dad,
-					struct device_node ***allnextpp,
-					unsigned long fpsize)
+unsigned long unflatten_dt_node(struct boot_param_header *blob,
+				unsigned long mem,
+				unsigned long *p,
+				struct device_node *dad,
+				struct device_node ***allnextpp,
+				unsigned long fpsize)
 {
 	struct device_node *np;
 	struct property *pp, **prev_pp = NULL;
@@ -298,10 +245,10 @@
 		sz = be32_to_cpup((__be32 *)(*p));
 		noff = be32_to_cpup((__be32 *)((*p) + 4));
 		*p += 8;
-		if (be32_to_cpu(initial_boot_params->version) < 0x10)
+		if (be32_to_cpu(blob->version) < 0x10)
 			*p = ALIGN(*p, sz >= 8 ? 8 : 4);
 
-		pname = find_flat_dt_string(noff);
+		pname = of_fdt_get_string(blob, noff);
 		if (pname == NULL) {
 			pr_info("Can't find property name in list !\n");
 			break;
@@ -380,7 +327,8 @@
 		if (tag == OF_DT_NOP)
 			*p += 4;
 		else
-			mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
+			mem = unflatten_dt_node(blob, mem, p, np, allnextpp,
+						fpsize);
 		tag = be32_to_cpup((__be32 *)(*p));
 	}
 	if (tag != OF_DT_END_NODE) {
@@ -391,6 +339,211 @@
 	return mem;
 }
 
+/**
+ * __unflatten_device_tree - create tree of device_nodes from flat blob
+ *
+ * unflattens a device-tree, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used.
+ * @blob: The blob to expand
+ * @mynodes: The device_node tree created by the call
+ * @dt_alloc: An allocator that provides a virtual address to memory
+ * for the resulting tree
+ */
+void __unflatten_device_tree(struct boot_param_header *blob,
+			     struct device_node **mynodes,
+			     void * (*dt_alloc)(u64 size, u64 align))
+{
+	unsigned long start, mem, size;
+	struct device_node **allnextp = mynodes;
+
+	pr_debug(" -> unflatten_device_tree()\n");
+
+	if (!blob) {
+		pr_debug("No device tree pointer\n");
+		return;
+	}
+
+	pr_debug("Unflattening device tree:\n");
+	pr_debug("magic: %08x\n", be32_to_cpu(blob->magic));
+	pr_debug("size: %08x\n", be32_to_cpu(blob->totalsize));
+	pr_debug("version: %08x\n", be32_to_cpu(blob->version));
+
+	if (be32_to_cpu(blob->magic) != OF_DT_HEADER) {
+		pr_err("Invalid device tree blob header\n");
+		return;
+	}
+
+	/* First pass, scan for size */
+	start = ((unsigned long)blob) +
+		be32_to_cpu(blob->off_dt_struct);
+	size = unflatten_dt_node(blob, 0, &start, NULL, NULL, 0);
+	size = (size | 3) + 1;
+
+	pr_debug("  size is %lx, allocating...\n", size);
+
+	/* Allocate memory for the expanded device tree */
+	mem = (unsigned long)
+		dt_alloc(size + 4, __alignof__(struct device_node));
+
+	((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
+
+	pr_debug("  unflattening %lx...\n", mem);
+
+	/* Second pass, do actual unflattening */
+	start = ((unsigned long)blob) +
+		be32_to_cpu(blob->off_dt_struct);
+	unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0);
+	if (be32_to_cpup((__be32 *)start) != OF_DT_END)
+		pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
+	if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
+		pr_warning("End of tree marker overwritten: %08x\n",
+			   be32_to_cpu(((__be32 *)mem)[size / 4]));
+	*allnextp = NULL;
+
+	pr_debug(" <- unflatten_device_tree()\n");
+}
+
+static void *kernel_tree_alloc(u64 size, u64 align)
+{
+	return kzalloc(size, GFP_KERNEL);
+}
+
+/**
+ * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
+ *
+ * unflattens the device-tree passed by the firmware, creating the
+ * tree of struct device_node. It also fills the "name" and "type"
+ * pointers of the nodes so the normal device-tree walking functions
+ * can be used.
+ */
+void of_fdt_unflatten_tree(unsigned long *blob,
+			struct device_node **mynodes)
+{
+	struct boot_param_header *device_tree =
+		(struct boot_param_header *)blob;
+	__unflatten_device_tree(device_tree, mynodes, &kernel_tree_alloc);
+}
+EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
+
+/* Everything below here references initial_boot_params directly. */
+int __initdata dt_root_addr_cells;
+int __initdata dt_root_size_cells;
+
+struct boot_param_header *initial_boot_params;
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+
+/**
+ * of_scan_flat_dt - scan flattened tree blob and call callback on each.
+ * @it: callback function
+ * @data: context data pointer
+ *
+ * This function is used to scan the flattened device-tree, it is
+ * used to extract the memory information at boot before we can
+ * unflatten the tree
+ */
+int __init of_scan_flat_dt(int (*it)(unsigned long node,
+				     const char *uname, int depth,
+				     void *data),
+			   void *data)
+{
+	unsigned long p = ((unsigned long)initial_boot_params) +
+		be32_to_cpu(initial_boot_params->off_dt_struct);
+	int rc = 0;
+	int depth = -1;
+
+	do {
+		u32 tag = be32_to_cpup((__be32 *)p);
+		char *pathp;
+
+		p += 4;
+		if (tag == OF_DT_END_NODE) {
+			depth--;
+			continue;
+		}
+		if (tag == OF_DT_NOP)
+			continue;
+		if (tag == OF_DT_END)
+			break;
+		if (tag == OF_DT_PROP) {
+			u32 sz = be32_to_cpup((__be32 *)p);
+			p += 8;
+			if (be32_to_cpu(initial_boot_params->version) < 0x10)
+				p = ALIGN(p, sz >= 8 ? 8 : 4);
+			p += sz;
+			p = ALIGN(p, 4);
+			continue;
+		}
+		if (tag != OF_DT_BEGIN_NODE) {
+			pr_err("Invalid tag %x in flat device tree!\n", tag);
+			return -EINVAL;
+		}
+		depth++;
+		pathp = (char *)p;
+		p = ALIGN(p + strlen(pathp) + 1, 4);
+		if ((*pathp) == '/') {
+			char *lp, *np;
+			for (lp = NULL, np = pathp; *np; np++)
+				if ((*np) == '/')
+					lp = np+1;
+			if (lp != NULL)
+				pathp = lp;
+		}
+		rc = it(p, pathp, depth, data);
+		if (rc != 0)
+			break;
+	} while (1);
+
+	return rc;
+}
+
+/**
+ * of_get_flat_dt_root - find the root node in the flat blob
+ */
+unsigned long __init of_get_flat_dt_root(void)
+{
+	unsigned long p = ((unsigned long)initial_boot_params) +
+		be32_to_cpu(initial_boot_params->off_dt_struct);
+
+	while (be32_to_cpup((__be32 *)p) == OF_DT_NOP)
+		p += 4;
+	BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE);
+	p += 4;
+	return ALIGN(p + strlen((char *)p) + 1, 4);
+}
+
+/**
+ * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
+ *
+ * This function can be used within scan_flattened_dt callback to get
+ * access to properties
+ */
+void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
+				 unsigned long *size)
+{
+	return of_fdt_get_property(initial_boot_params, node, name, size);
+}
+
+/**
+ * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
+ * @node: node to test
+ * @compat: compatible string to compare with compatible list.
+ */
+int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
+{
+	return of_fdt_is_compatible(initial_boot_params, node, compat);
+}
+
+/**
+ * of_flat_dt_match - Return true if node matches a list of compatible values
+ */
+int __init of_flat_dt_match(unsigned long node, const char **compat)
+{
+	return of_fdt_match(initial_boot_params, node, compat);
+}
+
 #ifdef CONFIG_BLK_DEV_INITRD
 /**
  * early_init_dt_check_for_initrd - Decode initrd location from flat tree
@@ -539,6 +692,12 @@
 	return 1;
 }
 
+static void *__init early_device_tree_alloc(u64 size, u64 align)
+{
+	unsigned long mem = early_init_dt_alloc_memory_arch(size, align);
+	return __va(mem);
+}
+
 /**
  * unflatten_device_tree - create tree of device_nodes from flat blob
  *
@@ -549,58 +708,13 @@
  */
 void __init unflatten_device_tree(void)
 {
-	unsigned long start, mem, size;
-	struct device_node **allnextp = &allnodes;
-
-	pr_debug(" -> unflatten_device_tree()\n");
-
-	if (!initial_boot_params) {
-		pr_debug("No device tree pointer\n");
-		return;
-	}
-
-	pr_debug("Unflattening device tree:\n");
-	pr_debug("magic: %08x\n", be32_to_cpu(initial_boot_params->magic));
-	pr_debug("size: %08x\n", be32_to_cpu(initial_boot_params->totalsize));
-	pr_debug("version: %08x\n", be32_to_cpu(initial_boot_params->version));
-
-	if (be32_to_cpu(initial_boot_params->magic) != OF_DT_HEADER) {
-		pr_err("Invalid device tree blob header\n");
-		return;
-	}
-
-	/* First pass, scan for size */
-	start = ((unsigned long)initial_boot_params) +
-		be32_to_cpu(initial_boot_params->off_dt_struct);
-	size = unflatten_dt_node(0, &start, NULL, NULL, 0);
-	size = (size | 3) + 1;
-
-	pr_debug("  size is %lx, allocating...\n", size);
-
-	/* Allocate memory for the expanded device tree */
-	mem = early_init_dt_alloc_memory_arch(size + 4,
-			__alignof__(struct device_node));
-	mem = (unsigned long) __va(mem);
-
-	((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
-
-	pr_debug("  unflattening %lx...\n", mem);
-
-	/* Second pass, do actual unflattening */
-	start = ((unsigned long)initial_boot_params) +
-		be32_to_cpu(initial_boot_params->off_dt_struct);
-	unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
-	if (be32_to_cpup((__be32 *)start) != OF_DT_END)
-		pr_warning("Weird tag at end of tree: %08x\n", *((u32 *)start));
-	if (be32_to_cpu(((__be32 *)mem)[size / 4]) != 0xdeadbeef)
-		pr_warning("End of tree marker overwritten: %08x\n",
-			   be32_to_cpu(((__be32 *)mem)[size / 4]));
-	*allnextp = NULL;
+	__unflatten_device_tree(initial_boot_params, &allnodes,
+				early_device_tree_alloc);
 
 	/* Get pointer to OF "/chosen" node for use everywhere */
 	of_chosen = of_find_node_by_path("/chosen");
 	if (of_chosen == NULL)
 		of_chosen = of_find_node_by_path("/chosen@0");
-
-	pr_debug(" <- unflatten_device_tree()\n");
 }
+
+#endif /* CONFIG_OF_EARLY_FLATTREE */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1fce00e..dcd7857 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -52,27 +52,35 @@
 
 	/* Loop over the child nodes and register a phy_device for each one */
 	for_each_child_of_node(np, child) {
-		const __be32 *addr;
+		const __be32 *paddr;
+		u32 addr;
 		int len;
 
 		/* A PHY must have a reg property in the range [0-31] */
-		addr = of_get_property(child, "reg", &len);
-		if (!addr || len < sizeof(*addr) || *addr >= 32 || *addr < 0) {
+		paddr = of_get_property(child, "reg", &len);
+		if (!paddr || len < sizeof(*paddr)) {
 			dev_err(&mdio->dev, "%s has invalid PHY address\n",
 				child->full_name);
 			continue;
 		}
 
-		if (mdio->irq) {
-			mdio->irq[*addr] = irq_of_parse_and_map(child, 0);
-			if (!mdio->irq[*addr])
-				mdio->irq[*addr] = PHY_POLL;
+		addr = be32_to_cpup(paddr);
+		if (addr >= 32) {
+			dev_err(&mdio->dev, "%s PHY address %i is too large\n",
+				child->full_name, addr);
+			continue;
 		}
 
-		phy = get_phy_device(mdio, be32_to_cpup(addr));
+		if (mdio->irq) {
+			mdio->irq[addr] = irq_of_parse_and_map(child, 0);
+			if (!mdio->irq[addr])
+				mdio->irq[addr] = PHY_POLL;
+		}
+
+		phy = get_phy_device(mdio, addr);
 		if (!phy || IS_ERR(phy)) {
 			dev_err(&mdio->dev, "error probing PHY at address %i\n",
-				*addr);
+				addr);
 			continue;
 		}
 		phy_scan_fixups(phy);
@@ -91,7 +99,7 @@
 		}
 
 		dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
-			child->name, *addr);
+			child->name, addr);
 	}
 
 	return 0;
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
new file mode 100644
index 0000000..86f334a
--- /dev/null
+++ b/drivers/of/of_net.c
@@ -0,0 +1,48 @@
+/*
+ * OF helpers for network devices.
+ *
+ * This file is released under the GPLv2
+ *
+ * Initially copied out of arch/powerpc/kernel/prom_parse.c
+ */
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/of_net.h>
+
+/**
+ * Search the device tree for the best MAC address to use.  'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address.  If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot.  For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses.  Some older U-Boots only initialized 'local-mac-address'.  In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+*/
+const void *of_get_mac_address(struct device_node *np)
+{
+	struct property *pp;
+
+	pp = of_find_property(np, "mac-address", NULL);
+	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+		return pp->value;
+
+	pp = of_find_property(np, "local-mac-address", NULL);
+	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+		return pp->value;
+
+	pp = of_find_property(np, "address", NULL);
+	if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value))
+		return pp->value;
+
+	return NULL;
+}
+EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b4a07f..c01cd1a 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -633,6 +633,9 @@
  * @np: pointer to node to create device for
  * @bus_id: name to assign device
  * @parent: Linux device model parent device.
+ *
+ * Returns pointer to created platform device, or NULL if a device was not
+ * registered.  Unavailable devices will not get registered.
  */
 struct platform_device *of_platform_device_create(struct device_node *np,
 					    const char *bus_id,
@@ -640,6 +643,9 @@
 {
 	struct platform_device *dev;
 
+	if (!of_device_is_available(np))
+		return NULL;
+
 	dev = of_device_alloc(np, bus_id, parent);
 	if (!dev)
 		return NULL;
@@ -683,8 +689,9 @@
 		pr_debug("   create child: %s\n", child->full_name);
 		dev = of_platform_device_create(child, NULL, parent);
 		if (dev == NULL)
-			rc = -ENOMEM;
-		else if (!of_match_node(matches, child))
+			continue;
+
+		if (!of_match_node(matches, child))
 			continue;
 		if (rc == 0) {
 			pr_debug("   and sub busses\n");
@@ -733,10 +740,9 @@
 	if (of_match_node(matches, root)) {
 		pr_debug(" root match, create all sub devices\n");
 		dev = of_platform_device_create(root, NULL, parent);
-		if (dev == NULL) {
-			rc = -ENOMEM;
+		if (dev == NULL)
 			goto bail;
-		}
+
 		pr_debug(" create all sub busses\n");
 		rc = of_platform_bus_create(root, matches, &dev->dev);
 		goto bail;
@@ -748,9 +754,9 @@
 		pr_debug("  match: %s\n", child->full_name);
 		dev = of_platform_device_create(child, NULL, parent);
 		if (dev == NULL)
-			rc = -ENOMEM;
-		else
-			rc = of_platform_bus_create(child, matches, &dev->dev);
+			continue;
+
+		rc = of_platform_bus_create(child, matches, &dev->dev);
 		if (rc) {
 			of_node_put(child);
 			break;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5b1630e..a9523fd 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -45,6 +45,7 @@
         depends on PCI && X86 && XEN
         select HOTPLUG
         select PCI_XEN
+	select XEN_XENBUS_FRONTEND
         default y
         help
           The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index bab5204..7722108 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -36,7 +36,6 @@
 #define _ACPIPHP_H
 
 #include <linux/acpi.h>
-#include <linux/kobject.h>
 #include <linux/mutex.h>
 #include <linux/pci_hotplug.h>
 
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 2ea9cf1..b283bbe 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -24,7 +24,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/kobject.h>
 #include <linux/sysfs.h>
 #include <linux/pci.h>
 #include <linux/string.h>
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7c24dce..44b0aee 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -168,8 +168,9 @@
 	u32 mask_bits = desc->masked;
 	unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
 						PCI_MSIX_ENTRY_VECTOR_CTRL;
-	mask_bits &= ~1;
-	mask_bits |= flag;
+	mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+	if (flag)
+		mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
 	writel(mask_bits, desc->mask_base + offset);
 
 	return mask_bits;
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index feff3be..65c42f8 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -6,12 +6,6 @@
 #ifndef MSI_H
 #define MSI_H
 
-#define PCI_MSIX_ENTRY_SIZE		16
-#define  PCI_MSIX_ENTRY_LOWER_ADDR	0
-#define  PCI_MSIX_ENTRY_UPPER_ADDR	4
-#define  PCI_MSIX_ENTRY_DATA		8
-#define  PCI_MSIX_ENTRY_VECTOR_CTRL	12
-
 #define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
 #define msi_lower_address_reg(base)	(base + PCI_MSI_ADDRESS_LO)
 #define msi_upper_address_reg(base)	(base + PCI_MSI_ADDRESS_HI)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 24e19c5..6fe0772 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -46,9 +46,9 @@
 	struct pci_dev *pci_dev = context;
 
 	if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+		pci_wakeup_event(pci_dev);
 		pci_check_pme_status(pci_dev);
 		pm_runtime_resume(&pci_dev->dev);
-		pci_wakeup_event(pci_dev);
 		if (pci_dev->subordinate)
 			pci_pme_wakeup_bus(pci_dev->subordinate);
 	}
@@ -399,6 +399,7 @@
 
 	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
 		printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
+		pcie_clear_aspm();
 		pcie_no_aspm();
 	}
 
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 8a6f797..88246dd 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -338,7 +338,7 @@
 }
 
 /**
- * __pci_device_probe()
+ * __pci_device_probe - check if a driver wants to claim a specific PCI device
  * @drv: driver to call to check if it wants the PCI device
  * @pci_dev: PCI device being probed
  * 
@@ -449,7 +449,8 @@
 			return error;
 	}
 
-	return pci_restore_state(pci_dev);
+	pci_restore_state(pci_dev);
+	return 0;
 }
 
 static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index f7b68ca..775e933 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -47,6 +47,10 @@
 	if (rc)
 		return rc;
 
+	/* no ids passed actually */
+	if (ids[0] == '\0')
+		return 0;
+
 	/* add ids specified in the module parameter */
 	p = ids;
 	while ((id = strsep(&p, ","))) {
@@ -54,6 +58,9 @@
 			subdevice = PCI_ANY_ID, class=0, class_mask=0;
 		int fields;
 
+		if (!strlen(id))
+			continue;
+
 		fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
 				&vendor, &device, &subvendor, &subdevice,
 				&class, &class_mask);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 63d5042..8ecaac9 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1149,7 +1149,7 @@
 		sysfs_bin_attr_init(attr);
 		attr->size = rom_size;
 		attr->attr.name = "rom";
-		attr->attr.mode = S_IRUSR;
+		attr->attr.mode = S_IRUSR | S_IWUSR;
 		attr->read = pci_read_rom;
 		attr->write = pci_write_rom;
 		retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 710c8a2..b714d78 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -937,14 +937,13 @@
  * pci_restore_state - Restore the saved state of a PCI device
  * @dev: - PCI device that we're dealing with
  */
-int 
-pci_restore_state(struct pci_dev *dev)
+void pci_restore_state(struct pci_dev *dev)
 {
 	int i;
 	u32 val;
 
 	if (!dev->state_saved)
-		return 0;
+		return;
 
 	/* PCI Express register must be restored first */
 	pci_restore_pcie_state(dev);
@@ -968,8 +967,6 @@
 	pci_restore_iov_state(dev);
 
 	dev->state_saved = false;
-
-	return 0;
 }
 
 static int do_pci_enable_device(struct pci_dev *dev, int bars)
@@ -1300,22 +1297,6 @@
 	return ret;
 }
 
-/*
- * Time to wait before the system can be put into a sleep state after reporting
- * a wakeup event signaled by a PCI device.
- */
-#define PCI_WAKEUP_COOLDOWN	100
-
-/**
- * pci_wakeup_event - Report a wakeup event related to a given PCI device.
- * @dev: Device to report the wakeup event for.
- */
-void pci_wakeup_event(struct pci_dev *dev)
-{
-	if (device_may_wakeup(&dev->dev))
-		pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN);
-}
-
 /**
  * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
  * @dev: Device to handle.
@@ -1327,8 +1308,8 @@
 static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
 {
 	if (pci_check_pme_status(dev)) {
-		pm_request_resume(&dev->dev);
 		pci_wakeup_event(dev);
+		pm_request_resume(&dev->dev);
 	}
 	return 0;
 }
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7d33f66..f69d6e0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -74,6 +74,12 @@
 extern void platform_pci_wakeup_init(struct pci_dev *dev);
 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
 
+static inline void pci_wakeup_event(struct pci_dev *dev)
+{
+	/* Wait 100 ms before the system can be put into a sleep state. */
+	pm_wakeup_event(&dev->dev, 100);
+}
+
 static inline bool pci_is_bridge(struct pci_dev *pci_dev)
 {
 	return !!(pci_dev->subordinate);
@@ -140,14 +146,6 @@
 static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
 #endif
 
-#ifdef CONFIG_PCIEAER
-void pci_no_aer(void);
-bool pci_aer_available(void);
-#else
-static inline void pci_no_aer(void) { }
-static inline bool pci_aer_available(void) { return false; }
-#endif
-
 static inline int pci_no_d1d2(struct pci_dev *dev)
 {
 	unsigned int parent_dstates = 0;
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 2b2b650..58ad791 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -17,6 +17,7 @@
 
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pci-acpi.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 9656e30..80c11d1 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -132,7 +132,6 @@
 
 #ifdef CONFIG_ACPI_APEI
 extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
-extern bool aer_acpi_firmware_first(void);
 #else
 static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
 {
@@ -140,8 +139,6 @@
 		return pci_dev->__aer_firmware_first;
 	return 0;
 }
-
-static inline bool aer_acpi_firmware_first(void) { return false; }
 #endif
 
 static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 7122281..3188cd9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -68,7 +68,7 @@
 	struct aspm_latency acceptable[8];
 };
 
-static int aspm_disabled, aspm_force;
+static int aspm_disabled, aspm_force, aspm_clear_state;
 static DEFINE_MUTEX(aspm_lock);
 static LIST_HEAD(link_list);
 
@@ -139,7 +139,7 @@
 {
 	/* Don't enable Clock PM if the link is not Clock PM capable */
 	if (!link->clkpm_capable && enable)
-		return;
+		enable = 0;
 	/* Need nothing if the specified equals to current state */
 	if (link->clkpm_enabled == enable)
 		return;
@@ -498,6 +498,10 @@
 	struct pci_dev *child;
 	int pos;
 	u32 reg32;
+
+	if (aspm_clear_state)
+		return -EINVAL;
+
 	/*
 	 * Some functions in a slot might not all be PCIe functions,
 	 * very strange. Disable ASPM for the whole slot
@@ -563,12 +567,15 @@
 	struct pcie_link_state *link;
 	int blacklist = !!pcie_aspm_sanity_check(pdev);
 
-	if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state)
+	if (!pci_is_pcie(pdev) || pdev->link_state)
 		return;
 	if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
 	    pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
 		return;
 
+	if (aspm_disabled && !aspm_clear_state)
+		return;
+
 	/* VIA has a strange chipset, root port is under a bridge */
 	if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
 	    pdev->bus->self)
@@ -641,7 +648,7 @@
 	struct pci_dev *parent = pdev->bus->self;
 	struct pcie_link_state *link, *root, *parent_link;
 
-	if (aspm_disabled || !pci_is_pcie(pdev) ||
+	if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
 	    !parent || !parent->link_state)
 		return;
 	if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
@@ -899,6 +906,12 @@
 
 __setup("pcie_aspm=", pcie_aspm_disable);
 
+void pcie_clear_aspm(void)
+{
+	if (!aspm_force)
+		aspm_clear_state = 1;
+}
+
 void pcie_no_aspm(void)
 {
 	if (!aspm_force)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 2f3c904..0057344 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -26,9 +26,6 @@
 #include "../pci.h"
 #include "portdrv.h"
 
-#define PCI_EXP_RTSTA_PME	0x10000 /* PME status */
-#define PCI_EXP_RTSTA_PENDING	0x20000 /* PME pending */
-
 /*
  * If this switch is set, MSI will not be used for PCIe PME signaling.  This
  * causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -74,22 +71,6 @@
 }
 
 /**
- * pcie_pme_clear_status - Clear root port PME interrupt status.
- * @dev: PCIe root port or event collector.
- */
-static void pcie_pme_clear_status(struct pci_dev *dev)
-{
-	int rtsta_pos;
-	u32 rtsta;
-
-	rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
-
-	pci_read_config_dword(dev, rtsta_pos, &rtsta);
-	rtsta |= PCI_EXP_RTSTA_PME;
-	pci_write_config_dword(dev, rtsta_pos, rtsta);
-}
-
-/**
  * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
  * @bus: PCI bus to scan.
  *
@@ -103,8 +84,8 @@
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		/* Skip PCIe devices in case we started from a root port. */
 		if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
-			pm_request_resume(&dev->dev);
 			pci_wakeup_event(dev);
+			pm_request_resume(&dev->dev);
 			ret = true;
 		}
 
@@ -206,8 +187,8 @@
 		/* The device is there, but we have to check its PME status. */
 		found = pci_check_pme_status(dev);
 		if (found) {
-			pm_request_resume(&dev->dev);
 			pci_wakeup_event(dev);
+			pm_request_resume(&dev->dev);
 		}
 		pci_dev_put(dev);
 	} else if (devfn) {
@@ -253,7 +234,7 @@
 			 * Clear PME status of the port.  If there are other
 			 * pending PMEs, the status will be set again.
 			 */
-			pcie_pme_clear_status(port);
+			pcie_clear_root_pme_status(port);
 
 			spin_unlock_irq(&data->lock);
 			pcie_pme_handle_request(port, rtsta & 0xffff);
@@ -378,7 +359,7 @@
 
 	port = srv->port;
 	pcie_pme_interrupt_enable(port, false);
-	pcie_pme_clear_status(port);
+	pcie_clear_root_pme_status(port);
 
 	ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
 	if (ret) {
@@ -402,7 +383,7 @@
 
 	spin_lock_irq(&data->lock);
 	pcie_pme_interrupt_enable(port, false);
-	pcie_pme_clear_status(port);
+	pcie_clear_root_pme_status(port);
 	data->noirq = true;
 	spin_unlock_irq(&data->lock);
 
@@ -422,7 +403,7 @@
 
 	spin_lock_irq(&data->lock);
 	data->noirq = false;
-	pcie_pme_clear_status(port);
+	pcie_clear_root_pme_status(port);
 	pcie_pme_interrupt_enable(port, true);
 	spin_unlock_irq(&data->lock);
 
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 7b5aba0..bd00a01 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,9 +20,6 @@
 
 #define get_descriptor_id(type, service) (((type - 4) << 4) | service)
 
-extern bool pcie_ports_disabled;
-extern bool pcie_ports_auto;
-
 extern struct bus_type pcie_port_bus_type;
 extern int pcie_port_device_register(struct pci_dev *dev);
 #ifdef CONFIG_PM
@@ -35,6 +32,8 @@
 
 struct pci_dev;
 
+extern void pcie_clear_root_pme_status(struct pci_dev *dev);
+
 #ifdef CONFIG_PCIE_PME
 extern bool pcie_pme_msi_disabled;
 
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index 5982b6a..a86b56e 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -33,7 +33,7 @@
  */
 int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
 {
-	acpi_status status;
+	struct acpi_pci_root *root;
 	acpi_handle handle;
 	u32 flags;
 
@@ -44,26 +44,11 @@
 	if (!handle)
 		return -EINVAL;
 
-	flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
-		| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
-		| OSC_PCI_EXPRESS_PME_CONTROL;
-
-	if (pci_aer_available()) {
-		if (aer_acpi_firmware_first())
-			dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
-		else
-			flags |= OSC_PCI_EXPRESS_AER_CONTROL;
-	}
-
-	status = acpi_pci_osc_control_set(handle, &flags,
-					OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-	if (ACPI_FAILURE(status)) {
-		dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
-			status);
+	root = acpi_pci_find_root(handle);
+	if (!root)
 		return -ENODEV;
-	}
 
-	dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+	flags = root->osc_control_set;
 
 	*srv_mask = PCIE_PORT_SERVICE_VC;
 	if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index a9c222d..5130d0d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -241,17 +241,17 @@
 	int cap_mask;
 	int err;
 
+	if (pcie_ports_disabled)
+		return 0;
+
 	err = pcie_port_platform_notify(dev, &cap_mask);
-	if (pcie_ports_auto) {
-		if (err) {
-			pcie_no_aspm();
-			return 0;
-		}
-	} else {
+	if (!pcie_ports_auto) {
 		cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
 				| PCIE_PORT_SERVICE_VC;
 		if (pci_aer_available())
 			cap_mask |= PCIE_PORT_SERVICE_AER;
+	} else if (err) {
+			return 0;
 	}
 
 	pos = pci_pcie_cap(dev);
@@ -349,15 +349,18 @@
 	int status, capabilities, i, nr_service;
 	int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
 
-	/* Get and check PCI Express port services */
-	capabilities = get_port_device_capability(dev);
-	if (!capabilities)
-		return -ENODEV;
-
 	/* Enable PCI Express port device */
 	status = pci_enable_device(dev);
 	if (status)
 		return status;
+
+	/* Get and check PCI Express port services */
+	capabilities = get_port_device_capability(dev);
+	if (!capabilities) {
+		pcie_no_aspm();
+		return 0;
+	}
+
 	pci_set_master(dev);
 	/*
 	 * Initialize service irqs. Don't use service devices that
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f9033e1..e0610bd 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -57,6 +57,22 @@
 
 /* global data */
 
+/**
+ * pcie_clear_root_pme_status - Clear root port PME interrupt status.
+ * @dev: PCIe root port or event collector.
+ */
+void pcie_clear_root_pme_status(struct pci_dev *dev)
+{
+	int rtsta_pos;
+	u32 rtsta;
+
+	rtsta_pos = pci_pcie_cap(dev) + PCI_EXP_RTSTA;
+
+	pci_read_config_dword(dev, rtsta_pos, &rtsta);
+	rtsta |= PCI_EXP_RTSTA_PME;
+	pci_write_config_dword(dev, rtsta_pos, rtsta);
+}
+
 static int pcie_portdrv_restore_config(struct pci_dev *dev)
 {
 	int retval;
@@ -69,6 +85,20 @@
 }
 
 #ifdef CONFIG_PM
+static int pcie_port_resume_noirq(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	/*
+	 * Some BIOSes forget to clear Root PME Status bits after system wakeup
+	 * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
+	 * bits now just in case (shouldn't hurt).
+	 */
+	if(pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+		pcie_clear_root_pme_status(pdev);
+	return 0;
+}
+
 static const struct dev_pm_ops pcie_portdrv_pm_ops = {
 	.suspend	= pcie_port_device_suspend,
 	.resume		= pcie_port_device_resume,
@@ -76,6 +106,7 @@
 	.thaw		= pcie_port_device_resume,
 	.poweroff	= pcie_port_device_suspend,
 	.restore	= pcie_port_device_resume,
+	.resume_noirq	= pcie_port_resume_noirq,
 };
 
 #define PCIE_PORTDRV_PM_OPS	(&pcie_portdrv_pm_ops)
@@ -327,10 +358,8 @@
 {
 	int retval;
 
-	if (pcie_ports_disabled) {
-		pcie_no_aspm();
-		return -EACCES;
-	}
+	if (pcie_ports_disabled)
+		return pci_register_driver(&pcie_portdriver);
 
 	dmi_check_system(pcie_portdrv_dmi_table);
 
diff --git a/drivers/pcmcia/m32r_cfc.h b/drivers/pcmcia/m32r_cfc.h
index 8146e3b..f558e1a 100644
--- a/drivers/pcmcia/m32r_cfc.h
+++ b/drivers/pcmcia/m32r_cfc.h
@@ -9,7 +9,7 @@
 #endif
 
 /*
- * M32R PC Card Controler
+ * M32R PC Card Controller
  */
 #define M32R_PCC0_BASE        0x00ef7000
 #define M32R_PCC1_BASE        0x00ef7020
diff --git a/drivers/pcmcia/m32r_pcc.h b/drivers/pcmcia/m32r_pcc.h
index e4fffe4..f95c585 100644
--- a/drivers/pcmcia/m32r_pcc.h
+++ b/drivers/pcmcia/m32r_pcc.h
@@ -5,7 +5,7 @@
 #define M32R_MAX_PCC	2
 
 /*
- * M32R PC Card Controler
+ * M32R PC Card Controller
  */
 #define M32R_PCC0_BASE        0x00ef7000
 #define M32R_PCC1_BASE        0x00ef7020
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 99d4f23..0db4827 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1198,7 +1198,7 @@
 	out_be32(M8XX_PGCRX(1),
 		 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
 
-	/* intialize the fixed memory windows */
+	/* initialize the fixed memory windows */
 
 	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
 		for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index faec777..d163bc2 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -18,12 +18,14 @@
 config ACER_WMI
 	tristate "Acer WMI Laptop Extras"
 	depends on ACPI
-	depends on LEDS_CLASS
-	depends on NEW_LEDS
+	select LEDS_CLASS
+	select NEW_LEDS
 	depends on BACKLIGHT_CLASS_DEVICE
 	depends on SERIO_I8042
+	depends on INPUT
 	depends on RFKILL || RFKILL = n
-	select ACPI_WMI
+	depends on ACPI_WMI
+	select INPUT_SPARSEKMAP
 	---help---
 	  This is a driver for newer Acer (and Wistron) laptops. It adds
 	  wireless radio and bluetooth control, and on some laptops,
@@ -131,7 +133,7 @@
 	depends on !X86_64
 	depends on EXPERIMENTAL
 	depends on ACPI
-	select ACPI_WMI
+	depends on ACPI_WMI
 	---help---
 	  This is a driver for the WMI extensions (wireless and bluetooth power
 	  control) of the HP Compaq TC1100 tablet.
@@ -226,6 +228,7 @@
 	tristate "Lenovo IdeaPad Laptop Extras"
 	depends on ACPI
 	depends on RFKILL
+	select INPUT_SPARSEKMAP
 	help
 	  This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
 
@@ -425,7 +428,10 @@
 	depends on INPUT
 	depends on EXPERIMENTAL
 	depends on BACKLIGHT_CLASS_DEVICE
+	depends on RFKILL || RFKILL = n
 	select INPUT_SPARSEKMAP
+	select LEDS_CLASS
+	select NEW_LEDS
 	---help---
 	  Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
 
@@ -510,8 +516,8 @@
 config ACPI_TOSHIBA
 	tristate "Toshiba Laptop Extras"
 	depends on ACPI
-	depends on LEDS_CLASS
-	depends on NEW_LEDS
+	select LEDS_CLASS
+	select NEW_LEDS
 	depends on BACKLIGHT_CLASS_DEVICE
 	depends on INPUT
 	depends on RFKILL || RFKILL = n
@@ -576,6 +582,15 @@
 	  some embedded Intel x86 platforms. This is not needed for PC-type
 	  machines.
 
+config INTEL_SCU_IPC_UTIL
+	tristate "Intel SCU IPC utility driver"
+	depends on INTEL_SCU_IPC
+	default y
+	---help---
+	  The IPC Util driver provides an interface with the SCU enabling
+	  low level access for debug work and updating the firmware. Say
+	  N unless you will be doing this on an Intel MID platform.
+
 config GPIO_INTEL_PMIC
 	bool "Intel PMIC GPIO support"
 	depends on INTEL_SCU_IPC && GPIOLIB
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9950ccc..4ec4ff8 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_ACPI_TOSHIBA)	+= toshiba_acpi.o
 obj-$(CONFIG_TOSHIBA_BT_RFKILL)	+= toshiba_bluetooth.o
 obj-$(CONFIG_INTEL_SCU_IPC)	+= intel_scu_ipc.o
+obj-$(CONFIG_INTEL_SCU_IPC_UTIL)+= intel_scu_ipcutil.o
 obj-$(CONFIG_RAR_REGISTER)	+= intel_rar_register.o
 obj-$(CONFIG_INTEL_IPS)		+= intel_ips.o
 obj-$(CONFIG_GPIO_INTEL_PMIC)	+= intel_pmic_gpio.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c8c6537..c5c4b8c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -37,6 +37,9 @@
 #include <linux/workqueue.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/dmi.h>
 
 #include <acpi/acpi_drivers.h>
 
@@ -48,6 +51,7 @@
 #define ACER_ERR KERN_ERR ACER_LOGPREFIX
 #define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX
 #define ACER_INFO KERN_INFO ACER_LOGPREFIX
+#define ACER_WARNING KERN_WARNING ACER_LOGPREFIX
 
 /*
  * Magic Number
@@ -82,9 +86,82 @@
 #define AMW0_GUID2		"431F16ED-0C2B-444C-B267-27DEB140CF9C"
 #define WMID_GUID1		"6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
 #define WMID_GUID2		"95764E09-FB56-4e83-B31A-37761F60994A"
+#define WMID_GUID3		"61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
+
+/*
+ * Acer ACPI event GUIDs
+ */
+#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
 
 MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
 MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3");
+MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
+
+enum acer_wmi_event_ids {
+	WMID_HOTKEY_EVENT = 0x1,
+};
+
+static const struct key_entry acer_wmi_keymap[] = {
+	{KE_KEY, 0x01, {KEY_WLAN} },     /* WiFi */
+	{KE_KEY, 0x12, {KEY_BLUETOOTH} },	/* BT */
+	{KE_KEY, 0x21, {KEY_PROG1} },    /* Backup */
+	{KE_KEY, 0x22, {KEY_PROG2} },    /* Arcade */
+	{KE_KEY, 0x23, {KEY_PROG3} },    /* P_Key */
+	{KE_KEY, 0x24, {KEY_PROG4} },    /* Social networking_Key */
+	{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} },	/* Display Switch */
+	{KE_KEY, 0x82, {KEY_F22} },      /* Touch Pad On/Off */
+	{KE_END, 0}
+};
+
+static struct input_dev *acer_wmi_input_dev;
+
+struct event_return_value {
+	u8 function;
+	u8 key_num;
+	u16 device_state;
+	u32 reserved;
+} __attribute__((packed));
+
+/*
+ * GUID3 Get Device Status device flags
+ */
+#define ACER_WMID3_GDS_WIRELESS		(1<<0)	/* WiFi */
+#define ACER_WMID3_GDS_THREEG		(1<<6)	/* 3G */
+#define ACER_WMID3_GDS_BLUETOOTH	(1<<11)	/* BT */
+
+struct lm_input_params {
+	u8 function_num;        /* Function Number */
+	u16 commun_devices;     /* Communication type devices default status */
+	u16 devices;            /* Other type devices default status */
+	u8 lm_status;           /* Launch Manager Status */
+	u16 reserved;
+} __attribute__((packed));
+
+struct lm_return_value {
+	u8 error_code;          /* Error Code */
+	u8 ec_return_value;     /* EC Return Value */
+	u16 reserved;
+} __attribute__((packed));
+
+struct wmid3_gds_input_param {	/* Get Device Status input parameter */
+	u8 function_num;	/* Function Number */
+	u8 hotkey_number;	/* Hotkey Number */
+	u16 devices;		/* Get Device */
+} __attribute__((packed));
+
+struct wmid3_gds_return_value {	/* Get Device Status return value*/
+	u8 error_code;		/* Error Code */
+	u8 ec_return_value;	/* EC Return Value */
+	u16 devices;		/* Current Device Status */
+	u32 reserved;
+} __attribute__((packed));
+
+struct hotkey_function_type_aa {
+	u8 type;
+	u8 length;
+	u16 handle;
+	u16 commun_func_bitmap;
+} __attribute__((packed));
 
 /*
  * Interface capability flags
@@ -116,15 +193,19 @@
 static int brightness = -1;
 static int threeg = -1;
 static int force_series;
+static bool ec_raw_mode;
+static bool has_type_aa;
 
 module_param(mailled, int, 0444);
 module_param(brightness, int, 0444);
 module_param(threeg, int, 0444);
 module_param(force_series, int, 0444);
+module_param(ec_raw_mode, bool, 0444);
 MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
 MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
 MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
 MODULE_PARM_DESC(force_series, "Force a different laptop series");
+MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
 
 struct acer_data {
 	int mailled;
@@ -140,6 +221,7 @@
 
 static struct rfkill *wireless_rfkill;
 static struct rfkill *bluetooth_rfkill;
+static struct rfkill *threeg_rfkill;
 
 /* Each low-level interface must define at least some of the following */
 struct wmi_interface {
@@ -753,6 +835,28 @@
 	return WMI_execute_u32(method_id, (u32)value, NULL);
 }
 
+static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy)
+{
+	struct hotkey_function_type_aa *type_aa;
+
+	/* We are looking for OEM-specific Type AAh */
+	if (header->type != 0xAA)
+		return;
+
+	has_type_aa = true;
+	type_aa = (struct hotkey_function_type_aa *) header;
+
+	printk(ACER_INFO "Function bitmap for Communication Button: 0x%x\n",
+		type_aa->commun_func_bitmap);
+
+	if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS)
+		interface->capability |= ACER_CAP_WIRELESS;
+	if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_THREEG)
+		interface->capability |= ACER_CAP_THREEG;
+	if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
+		interface->capability |= ACER_CAP_BLUETOOTH;
+}
+
 static acpi_status WMID_set_capabilities(void)
 {
 	struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -773,16 +877,17 @@
 		return AE_ERROR;
 	}
 
-	/* Not sure on the meaning of the relevant bits yet to detect these */
-	interface->capability |= ACER_CAP_WIRELESS;
-	interface->capability |= ACER_CAP_THREEG;
+	dmi_walk(type_aa_dmi_decode, NULL);
+	if (!has_type_aa) {
+		interface->capability |= ACER_CAP_WIRELESS;
+		interface->capability |= ACER_CAP_THREEG;
+		if (devices & 0x10)
+			interface->capability |= ACER_CAP_BLUETOOTH;
+	}
 
 	/* WMID always provides brightness methods */
 	interface->capability |= ACER_CAP_BRIGHTNESS;
 
-	if (devices & 0x10)
-		interface->capability |= ACER_CAP_BLUETOOTH;
-
 	if (!(devices & 0x20))
 		max_brightness = 0x9;
 
@@ -861,7 +966,8 @@
 	 * capability isn't available on the given interface
 	 */
 	set_u32(mailled, ACER_CAP_MAILLED);
-	set_u32(threeg, ACER_CAP_THREEG);
+	if (!has_type_aa)
+		set_u32(threeg, ACER_CAP_THREEG);
 	set_u32(brightness, ACER_CAP_BRIGHTNESS);
 }
 
@@ -915,7 +1021,7 @@
 	return 0;
 }
 
-static struct backlight_ops acer_bl_ops = {
+static const struct backlight_ops acer_bl_ops = {
 	.get_brightness = read_brightness,
 	.update_status = update_bl_status,
 };
@@ -948,6 +1054,79 @@
 	backlight_device_unregister(acer_backlight_device);
 }
 
+static acpi_status wmid3_get_device_status(u32 *value, u16 device)
+{
+	struct wmid3_gds_return_value return_value;
+	acpi_status status;
+	union acpi_object *obj;
+	struct wmid3_gds_input_param params = {
+		.function_num = 0x1,
+		.hotkey_number = 0x01,
+		.devices = device,
+	};
+	struct acpi_buffer input = {
+		sizeof(struct wmid3_gds_input_param),
+		&params
+	};
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
+	if (ACPI_FAILURE(status))
+		return status;
+
+	obj = output.pointer;
+
+	if (!obj)
+		return AE_ERROR;
+	else if (obj->type != ACPI_TYPE_BUFFER) {
+		kfree(obj);
+		return AE_ERROR;
+	}
+	if (obj->buffer.length != 8) {
+		printk(ACER_WARNING "Unknown buffer length %d\n",
+			obj->buffer.length);
+		kfree(obj);
+		return AE_ERROR;
+	}
+
+	return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
+	kfree(obj);
+
+	if (return_value.error_code || return_value.ec_return_value)
+		printk(ACER_WARNING "Get Device Status failed: "
+			"0x%x - 0x%x\n", return_value.error_code,
+			return_value.ec_return_value);
+	else
+		*value = !!(return_value.devices & device);
+
+	return status;
+}
+
+static acpi_status get_device_status(u32 *value, u32 cap)
+{
+	if (wmi_has_guid(WMID_GUID3)) {
+		u16 device;
+
+		switch (cap) {
+		case ACER_CAP_WIRELESS:
+			device = ACER_WMID3_GDS_WIRELESS;
+			break;
+		case ACER_CAP_BLUETOOTH:
+			device = ACER_WMID3_GDS_BLUETOOTH;
+			break;
+		case ACER_CAP_THREEG:
+			device = ACER_WMID3_GDS_THREEG;
+			break;
+		default:
+			return AE_ERROR;
+		}
+		return wmid3_get_device_status(value, device);
+
+	} else {
+		return get_u32(value, cap);
+	}
+}
+
 /*
  * Rfkill devices
  */
@@ -968,6 +1147,13 @@
 			rfkill_set_sw_state(bluetooth_rfkill, !state);
 	}
 
+	if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) {
+		status = wmid3_get_device_status(&state,
+				ACER_WMID3_GDS_THREEG);
+		if (ACPI_SUCCESS(status))
+			rfkill_set_sw_state(threeg_rfkill, !state);
+	}
+
 	schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
 }
 
@@ -991,6 +1177,8 @@
 {
 	int err;
 	struct rfkill *rfkill_dev;
+	u32 state;
+	acpi_status status;
 
 	rfkill_dev = rfkill_alloc(name, dev, type,
 				  &acer_rfkill_ops,
@@ -998,6 +1186,10 @@
 	if (!rfkill_dev)
 		return ERR_PTR(-ENOMEM);
 
+	status = get_device_status(&state, cap);
+	if (ACPI_SUCCESS(status))
+		rfkill_init_sw_state(rfkill_dev, !state);
+
 	err = rfkill_register(rfkill_dev);
 	if (err) {
 		rfkill_destroy(rfkill_dev);
@@ -1024,6 +1216,19 @@
 		}
 	}
 
+	if (has_cap(ACER_CAP_THREEG)) {
+		threeg_rfkill = acer_rfkill_register(dev,
+			RFKILL_TYPE_WWAN, "acer-threeg",
+			ACER_CAP_THREEG);
+		if (IS_ERR(threeg_rfkill)) {
+			rfkill_unregister(wireless_rfkill);
+			rfkill_destroy(wireless_rfkill);
+			rfkill_unregister(bluetooth_rfkill);
+			rfkill_destroy(bluetooth_rfkill);
+			return PTR_ERR(threeg_rfkill);
+		}
+	}
+
 	schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
 
 	return 0;
@@ -1040,6 +1245,11 @@
 		rfkill_unregister(bluetooth_rfkill);
 		rfkill_destroy(bluetooth_rfkill);
 	}
+
+	if (has_cap(ACER_CAP_THREEG)) {
+		rfkill_unregister(threeg_rfkill);
+		rfkill_destroy(threeg_rfkill);
+	}
 	return;
 }
 
@@ -1050,7 +1260,12 @@
 	struct device_attribute *attr, char *buf)
 {
 	u32 result; \
-	acpi_status status = get_u32(&result, ACER_CAP_THREEG);
+	acpi_status status;
+	if (wmi_has_guid(WMID_GUID3))
+		status = wmid3_get_device_status(&result,
+				ACER_WMID3_GDS_THREEG);
+	else
+		status = get_u32(&result, ACER_CAP_THREEG);
 	if (ACPI_SUCCESS(status))
 		return sprintf(buf, "%u\n", result);
 	return sprintf(buf, "Read error\n");
@@ -1085,6 +1300,178 @@
 
 static DEVICE_ATTR(interface, S_IRUGO, show_interface, NULL);
 
+static void acer_wmi_notify(u32 value, void *context)
+{
+	struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *obj;
+	struct event_return_value return_value;
+	acpi_status status;
+
+	status = wmi_get_event_data(value, &response);
+	if (status != AE_OK) {
+		printk(ACER_WARNING "bad event status 0x%x\n", status);
+		return;
+	}
+
+	obj = (union acpi_object *)response.pointer;
+
+	if (!obj)
+		return;
+	if (obj->type != ACPI_TYPE_BUFFER) {
+		printk(ACER_WARNING "Unknown response received %d\n",
+			obj->type);
+		kfree(obj);
+		return;
+	}
+	if (obj->buffer.length != 8) {
+		printk(ACER_WARNING "Unknown buffer length %d\n",
+			obj->buffer.length);
+		kfree(obj);
+		return;
+	}
+
+	return_value = *((struct event_return_value *)obj->buffer.pointer);
+	kfree(obj);
+
+	switch (return_value.function) {
+	case WMID_HOTKEY_EVENT:
+		if (!sparse_keymap_report_event(acer_wmi_input_dev,
+				return_value.key_num, 1, true))
+			printk(ACER_WARNING "Unknown key number - 0x%x\n",
+				return_value.key_num);
+		break;
+	default:
+		printk(ACER_WARNING "Unknown function number - %d - %d\n",
+			return_value.function, return_value.key_num);
+		break;
+	}
+}
+
+static acpi_status
+wmid3_set_lm_mode(struct lm_input_params *params,
+		  struct lm_return_value *return_value)
+{
+	acpi_status status;
+	union acpi_object *obj;
+
+	struct acpi_buffer input = { sizeof(struct lm_input_params), params };
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+	status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output);
+	if (ACPI_FAILURE(status))
+		return status;
+
+	obj = output.pointer;
+
+	if (!obj)
+		return AE_ERROR;
+	else if (obj->type != ACPI_TYPE_BUFFER) {
+		kfree(obj);
+		return AE_ERROR;
+	}
+	if (obj->buffer.length != 4) {
+		printk(ACER_WARNING "Unknown buffer length %d\n",
+		       obj->buffer.length);
+		kfree(obj);
+		return AE_ERROR;
+	}
+
+	*return_value = *((struct lm_return_value *)obj->buffer.pointer);
+	kfree(obj);
+
+	return status;
+}
+
+static int acer_wmi_enable_ec_raw(void)
+{
+	struct lm_return_value return_value;
+	acpi_status status;
+	struct lm_input_params params = {
+		.function_num = 0x1,
+		.commun_devices = 0xFFFF,
+		.devices = 0xFFFF,
+		.lm_status = 0x00,            /* Launch Manager Deactive */
+	};
+
+	status = wmid3_set_lm_mode(&params, &return_value);
+
+	if (return_value.error_code || return_value.ec_return_value)
+		printk(ACER_WARNING "Enabling EC raw mode failed: "
+		       "0x%x - 0x%x\n", return_value.error_code,
+		       return_value.ec_return_value);
+	else
+		printk(ACER_INFO "Enabled EC raw mode");
+
+	return status;
+}
+
+static int acer_wmi_enable_lm(void)
+{
+	struct lm_return_value return_value;
+	acpi_status status;
+	struct lm_input_params params = {
+		.function_num = 0x1,
+		.commun_devices = 0xFFFF,
+		.devices = 0xFFFF,
+		.lm_status = 0x01,            /* Launch Manager Active */
+	};
+
+	status = wmid3_set_lm_mode(&params, &return_value);
+
+	if (return_value.error_code || return_value.ec_return_value)
+		printk(ACER_WARNING "Enabling Launch Manager failed: "
+		       "0x%x - 0x%x\n", return_value.error_code,
+		       return_value.ec_return_value);
+
+	return status;
+}
+
+static int __init acer_wmi_input_setup(void)
+{
+	acpi_status status;
+	int err;
+
+	acer_wmi_input_dev = input_allocate_device();
+	if (!acer_wmi_input_dev)
+		return -ENOMEM;
+
+	acer_wmi_input_dev->name = "Acer WMI hotkeys";
+	acer_wmi_input_dev->phys = "wmi/input0";
+	acer_wmi_input_dev->id.bustype = BUS_HOST;
+
+	err = sparse_keymap_setup(acer_wmi_input_dev, acer_wmi_keymap, NULL);
+	if (err)
+		goto err_free_dev;
+
+	status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
+						acer_wmi_notify, NULL);
+	if (ACPI_FAILURE(status)) {
+		err = -EIO;
+		goto err_free_keymap;
+	}
+
+	err = input_register_device(acer_wmi_input_dev);
+	if (err)
+		goto err_uninstall_notifier;
+
+	return 0;
+
+err_uninstall_notifier:
+	wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
+err_free_keymap:
+	sparse_keymap_free(acer_wmi_input_dev);
+err_free_dev:
+	input_free_device(acer_wmi_input_dev);
+	return err;
+}
+
+static void acer_wmi_input_destroy(void)
+{
+	wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
+	sparse_keymap_free(acer_wmi_input_dev);
+	input_unregister_device(acer_wmi_input_dev);
+}
+
 /*
  * debugfs functions
  */
@@ -1327,6 +1714,26 @@
 		       "generic video driver\n");
 	}
 
+	if (wmi_has_guid(WMID_GUID3)) {
+		if (ec_raw_mode) {
+			if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) {
+				printk(ACER_ERR "Cannot enable EC raw mode\n");
+				return -ENODEV;
+			}
+		} else if (ACPI_FAILURE(acer_wmi_enable_lm())) {
+			printk(ACER_ERR "Cannot enable Launch Manager mode\n");
+			return -ENODEV;
+		}
+	} else if (ec_raw_mode) {
+		printk(ACER_INFO "No WMID EC raw mode enable method\n");
+	}
+
+	if (wmi_has_guid(ACERWMID_EVENT_GUID)) {
+		err = acer_wmi_input_setup();
+		if (err)
+			return err;
+	}
+
 	err = platform_driver_register(&acer_platform_driver);
 	if (err) {
 		printk(ACER_ERR "Unable to register platform driver.\n");
@@ -1368,11 +1775,17 @@
 error_device_alloc:
 	platform_driver_unregister(&acer_platform_driver);
 error_platform_register:
+	if (wmi_has_guid(ACERWMID_EVENT_GUID))
+		acer_wmi_input_destroy();
+
 	return err;
 }
 
 static void __exit acer_wmi_exit(void)
 {
+	if (wmi_has_guid(ACERWMID_EVENT_GUID))
+		acer_wmi_input_destroy();
+
 	remove_sysfs(acer_platform_device);
 	remove_debugfs();
 	platform_device_unregister(acer_platform_device);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d235f44..f3aa6a7 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -640,7 +640,7 @@
 	return asus_lcd_set(asus, value);
 }
 
-static struct backlight_ops asusbl_ops = {
+static const struct backlight_ops asusbl_ops = {
 	.get_brightness = asus_read_brightness,
 	.update_status = update_bl_status,
 };
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ca05aef..4633fd8 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1467,7 +1467,7 @@
 	return 0;
 }
 
-static struct backlight_ops asus_backlight_data = {
+static const struct backlight_ops asus_backlight_data = {
 	.get_brightness = read_brightness,
 	.update_status  = set_brightness_status,
 };
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 341cbfe..9111354 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -522,18 +522,20 @@
 	acpi_status status;
 	acpi_handle handle;
 	unsigned long long state;
+	bool is_blocked;
 
 	handle = data;
 	status = cmpc_get_rfkill_wlan(handle, &state);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
-	if (blocked)
-		state &= ~1;
-	else
-		state |= 1;
-	status = cmpc_set_rfkill_wlan(handle, state);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
+	/* Check if we really need to call cmpc_set_rfkill_wlan */
+	is_blocked = state & 1 ? false : true;
+	if (is_blocked != blocked) {
+		state = blocked ? 0 : 1;
+		status = cmpc_set_rfkill_wlan(handle, state);
+		if (ACPI_FAILURE(status))
+			return -ENODEV;
+	}
 	return 0;
 }
 
@@ -653,8 +655,9 @@
 
 	if ((event & 0x0F) < ARRAY_SIZE(cmpc_keys_codes))
 		code = cmpc_keys_codes[event & 0x0F];
-	inputdev = dev_get_drvdata(&dev->dev);;
+	inputdev = dev_get_drvdata(&dev->dev);
 	input_report_key(inputdev, code, !(event & 0x10));
+	input_sync(inputdev);
 }
 
 static void cmpc_keys_idev_init(struct input_dev *inputdev)
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 097083c..034572b 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -872,6 +872,14 @@
 		},
 		.callback = dmi_check_cb_extra
 	},
+	{
+		.ident = "KHLB2",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_NAME, "KHLB2"),
+			DMI_MATCH(DMI_BOARD_VERSION, "REFERENCE"),
+		},
+		.callback = dmi_check_cb_extra
+	},
 	{ }
 };
 
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index cf8a89a..34657f9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -546,7 +546,7 @@
 	return buffer->output[1];
 }
 
-static struct backlight_ops dell_ops = {
+static const struct backlight_ops dell_ops = {
 	.get_brightness = dell_get_intensity,
 	.update_status  = dell_send_intensity,
 };
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index b2edfdc..49d9ad7 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -529,6 +529,15 @@
 	queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
 }
 
+static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
+{
+	struct eeepc_laptop *eeepc;
+
+	eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
+
+	return get_acpi(eeepc, CM_ASL_TPD);
+}
+
 static int eeepc_led_init(struct eeepc_laptop *eeepc)
 {
 	int rv;
@@ -543,6 +552,8 @@
 
 	eeepc->tpd_led.name = "eeepc::touchpad";
 	eeepc->tpd_led.brightness_set = tpd_led_set;
+	if (get_acpi(eeepc, CM_ASL_TPD) >= 0) /* if method is available */
+	  eeepc->tpd_led.brightness_get = tpd_led_get;
 	eeepc->tpd_led.max_brightness = 1;
 
 	rv = led_classdev_register(&eeepc->platform_device->dev,
@@ -1115,7 +1126,7 @@
 	return set_brightness(bd, bd->props.brightness);
 }
 
-static struct backlight_ops eeepcbl_ops = {
+static const struct backlight_ops eeepcbl_ops = {
 	.get_brightness = read_brightness,
 	.update_status = update_bl_status,
 };
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 0d50fbb..4d38f98 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -2,6 +2,7 @@
  * Eee PC WMI hotkey driver
  *
  * Copyright(C) 2010 Intel Corporation.
+ * Copyright(C) 2010 Corentin Chary <corentin.chary@gmail.com>
  *
  * Portions based on wistron_btns.c:
  * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
@@ -34,6 +35,10 @@
 #include <linux/input/sparse-keymap.h>
 #include <linux/fb.h>
 #include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/rfkill.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 #include <linux/platform_device.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
@@ -44,6 +49,8 @@
 MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
 MODULE_LICENSE("GPL");
 
+#define EEEPC_ACPI_HID		"ASUS010" /* old _HID used in eeepc-laptop */
+
 #define EEEPC_WMI_EVENT_GUID	"ABBC0F72-8EA1-11D1-00A0-C90629100000"
 #define EEEPC_WMI_MGMT_GUID	"97845ED0-4E6D-11DE-8A39-0800200C9A66"
 
@@ -60,6 +67,10 @@
 #define EEEPC_WMI_METHODID_CFVS	0x53564643
 
 #define EEEPC_WMI_DEVID_BACKLIGHT	0x00050012
+#define EEEPC_WMI_DEVID_TPDLED		0x00100011
+#define EEEPC_WMI_DEVID_WLAN		0x00010011
+#define EEEPC_WMI_DEVID_BLUETOOTH	0x00010013
+#define EEEPC_WMI_DEVID_WWAN3G		0x00010019
 
 static const struct key_entry eeepc_wmi_keymap[] = {
 	/* Sleep already handled via generic ACPI code */
@@ -83,11 +94,37 @@
 	u32	ctrl_param;
 };
 
+/*
+ * eeepc-wmi/    - debugfs root directory
+ *   dev_id      - current dev_id
+ *   ctrl_param  - current ctrl_param
+ *   devs        - call DEVS(dev_id, ctrl_param) and print result
+ *   dsts        - call DSTS(dev_id)  and print result
+ */
+struct eeepc_wmi_debug {
+	struct dentry *root;
+	u32 dev_id;
+	u32 ctrl_param;
+};
+
 struct eeepc_wmi {
 	struct input_dev *inputdev;
 	struct backlight_device *backlight_device;
+	struct platform_device *platform_device;
+
+	struct led_classdev tpd_led;
+	int tpd_led_wk;
+	struct workqueue_struct *led_workqueue;
+	struct work_struct tpd_led_work;
+
+	struct rfkill *wlan_rfkill;
+	struct rfkill *bluetooth_rfkill;
+	struct rfkill *wwan3g_rfkill;
+
+	struct eeepc_wmi_debug debug;
 };
 
+/* Only used in eeepc_wmi_init() and eeepc_wmi_exit() */
 static struct platform_device *platform_device;
 
 static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
@@ -101,7 +138,7 @@
 	eeepc->inputdev->name = "Eee PC WMI hotkeys";
 	eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0";
 	eeepc->inputdev->id.bustype = BUS_HOST;
-	eeepc->inputdev->dev.parent = &platform_device->dev;
+	eeepc->inputdev->dev.parent = &eeepc->platform_device->dev;
 
 	err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL);
 	if (err)
@@ -130,7 +167,7 @@
 	eeepc->inputdev = NULL;
 }
 
-static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
+static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *retval)
 {
 	struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id };
 	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -150,8 +187,8 @@
 	else
 		tmp = 0;
 
-	if (ctrl_param)
-		*ctrl_param = tmp;
+	if (retval)
+		*retval = tmp;
 
 	kfree(obj);
 
@@ -159,7 +196,8 @@
 
 }
 
-static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param)
+static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
+					  u32 *retval)
 {
 	struct bios_args args = {
 		.dev_id = dev_id,
@@ -168,34 +206,281 @@
 	struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
 	acpi_status status;
 
-	status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
-			1, EEEPC_WMI_METHODID_DEVS, &input, NULL);
+	if (!retval) {
+		status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1,
+					     EEEPC_WMI_METHODID_DEVS,
+					     &input, NULL);
+	} else {
+		struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+		union acpi_object *obj;
+		u32 tmp;
+
+		status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID, 1,
+					     EEEPC_WMI_METHODID_DEVS,
+					     &input, &output);
+
+		if (ACPI_FAILURE(status))
+			return status;
+
+		obj = (union acpi_object *)output.pointer;
+		if (obj && obj->type == ACPI_TYPE_INTEGER)
+			tmp = (u32)obj->integer.value;
+		else
+			tmp = 0;
+
+		*retval = tmp;
+
+		kfree(obj);
+	}
 
 	return status;
 }
 
-static int read_brightness(struct backlight_device *bd)
+/*
+ * LEDs
+ */
+/*
+ * These functions actually update the LED's, and are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Eeepc ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+static void tpd_led_update(struct work_struct *work)
 {
-	static u32 ctrl_param;
+	int ctrl_param;
+	struct eeepc_wmi *eeepc;
+
+	eeepc = container_of(work, struct eeepc_wmi, tpd_led_work);
+
+	ctrl_param = eeepc->tpd_led_wk;
+	eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_TPDLED, ctrl_param, NULL);
+}
+
+static void tpd_led_set(struct led_classdev *led_cdev,
+			enum led_brightness value)
+{
+	struct eeepc_wmi *eeepc;
+
+	eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led);
+
+	eeepc->tpd_led_wk = !!value;
+	queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
+}
+
+static int read_tpd_state(struct eeepc_wmi *eeepc)
+{
+	u32 retval;
 	acpi_status status;
 
-	status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param);
+	status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_TPDLED, &retval);
+
+	if (ACPI_FAILURE(status))
+		return -1;
+	else if (!retval || retval == 0x00060000)
+		/*
+		 * if touchpad led is present, DSTS will set some bits,
+		 * usually 0x00020000.
+		 * 0x00060000 means that the device is not supported
+		 */
+		return -ENODEV;
+	else
+		/* Status is stored in the first bit */
+		return retval & 0x1;
+}
+
+static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
+{
+	struct eeepc_wmi *eeepc;
+
+	eeepc = container_of(led_cdev, struct eeepc_wmi, tpd_led);
+
+	return read_tpd_state(eeepc);
+}
+
+static int eeepc_wmi_led_init(struct eeepc_wmi *eeepc)
+{
+	int rv;
+
+	if (read_tpd_state(eeepc) < 0)
+		return 0;
+
+	eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
+	if (!eeepc->led_workqueue)
+		return -ENOMEM;
+	INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
+
+	eeepc->tpd_led.name = "eeepc::touchpad";
+	eeepc->tpd_led.brightness_set = tpd_led_set;
+	eeepc->tpd_led.brightness_get = tpd_led_get;
+	eeepc->tpd_led.max_brightness = 1;
+
+	rv = led_classdev_register(&eeepc->platform_device->dev,
+				   &eeepc->tpd_led);
+	if (rv) {
+		destroy_workqueue(eeepc->led_workqueue);
+		return rv;
+	}
+
+	return 0;
+}
+
+static void eeepc_wmi_led_exit(struct eeepc_wmi *eeepc)
+{
+	if (eeepc->tpd_led.dev)
+		led_classdev_unregister(&eeepc->tpd_led);
+	if (eeepc->led_workqueue)
+		destroy_workqueue(eeepc->led_workqueue);
+}
+
+/*
+ * Rfkill devices
+ */
+static int eeepc_rfkill_set(void *data, bool blocked)
+{
+	int dev_id = (unsigned long)data;
+	u32 ctrl_param = !blocked;
+
+	return eeepc_wmi_set_devstate(dev_id, ctrl_param, NULL);
+}
+
+static void eeepc_rfkill_query(struct rfkill *rfkill, void *data)
+{
+	int dev_id = (unsigned long)data;
+	u32 retval;
+	acpi_status status;
+
+	status = eeepc_wmi_get_devstate(dev_id, &retval);
+
+	if (ACPI_FAILURE(status))
+		return ;
+
+	rfkill_set_sw_state(rfkill, !(retval & 0x1));
+}
+
+static const struct rfkill_ops eeepc_rfkill_ops = {
+	.set_block = eeepc_rfkill_set,
+	.query = eeepc_rfkill_query,
+};
+
+static int eeepc_new_rfkill(struct eeepc_wmi *eeepc,
+			    struct rfkill **rfkill,
+			    const char *name,
+			    enum rfkill_type type, int dev_id)
+{
+	int result;
+	u32 retval;
+	acpi_status status;
+
+	status = eeepc_wmi_get_devstate(dev_id, &retval);
+
+	if (ACPI_FAILURE(status))
+		return -1;
+
+	/* If the device is present, DSTS will always set some bits
+	 * 0x00070000 - 1110000000000000000 - device supported
+	 * 0x00060000 - 1100000000000000000 - not supported
+	 * 0x00020000 - 0100000000000000000 - device supported
+	 * 0x00010000 - 0010000000000000000 - not supported / special mode ?
+	 */
+	if (!retval || retval == 0x00060000)
+		return -ENODEV;
+
+	*rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
+			       &eeepc_rfkill_ops, (void *)(long)dev_id);
+
+	if (!*rfkill)
+		return -EINVAL;
+
+	rfkill_init_sw_state(*rfkill, !(retval & 0x1));
+	result = rfkill_register(*rfkill);
+	if (result) {
+		rfkill_destroy(*rfkill);
+		*rfkill = NULL;
+		return result;
+	}
+	return 0;
+}
+
+static void eeepc_wmi_rfkill_exit(struct eeepc_wmi *eeepc)
+{
+	if (eeepc->wlan_rfkill) {
+		rfkill_unregister(eeepc->wlan_rfkill);
+		rfkill_destroy(eeepc->wlan_rfkill);
+		eeepc->wlan_rfkill = NULL;
+	}
+	if (eeepc->bluetooth_rfkill) {
+		rfkill_unregister(eeepc->bluetooth_rfkill);
+		rfkill_destroy(eeepc->bluetooth_rfkill);
+		eeepc->bluetooth_rfkill = NULL;
+	}
+	if (eeepc->wwan3g_rfkill) {
+		rfkill_unregister(eeepc->wwan3g_rfkill);
+		rfkill_destroy(eeepc->wwan3g_rfkill);
+		eeepc->wwan3g_rfkill = NULL;
+	}
+}
+
+static int eeepc_wmi_rfkill_init(struct eeepc_wmi *eeepc)
+{
+	int result = 0;
+
+	result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
+				  "eeepc-wlan", RFKILL_TYPE_WLAN,
+				  EEEPC_WMI_DEVID_WLAN);
+
+	if (result && result != -ENODEV)
+		goto exit;
+
+	result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
+				  "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
+				  EEEPC_WMI_DEVID_BLUETOOTH);
+
+	if (result && result != -ENODEV)
+		goto exit;
+
+	result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
+				  "eeepc-wwan3g", RFKILL_TYPE_WWAN,
+				  EEEPC_WMI_DEVID_WWAN3G);
+
+	if (result && result != -ENODEV)
+		goto exit;
+
+exit:
+	if (result && result != -ENODEV)
+		eeepc_wmi_rfkill_exit(eeepc);
+
+	if (result == -ENODEV)
+		result = 0;
+
+	return result;
+}
+
+/*
+ * Backlight
+ */
+static int read_brightness(struct backlight_device *bd)
+{
+	u32 retval;
+	acpi_status status;
+
+	status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &retval);
 
 	if (ACPI_FAILURE(status))
 		return -1;
 	else
-		return ctrl_param & 0xFF;
+		return retval & 0xFF;
 }
 
 static int update_bl_status(struct backlight_device *bd)
 {
 
-	static u32 ctrl_param;
+	u32 ctrl_param;
 	acpi_status status;
 
 	ctrl_param = bd->props.brightness;
 
-	status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param);
+	status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT,
+					ctrl_param, NULL);
 
 	if (ACPI_FAILURE(status))
 		return -1;
@@ -234,7 +519,7 @@
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.max_brightness = 15;
 	bd = backlight_device_register(EEEPC_WMI_FILE,
-				       &platform_device->dev, eeepc,
+				       &eeepc->platform_device->dev, eeepc,
 				       &eeepc_wmi_bl_ops, &props);
 	if (IS_ERR(bd)) {
 		pr_err("Could not register backlight device\n");
@@ -321,65 +606,240 @@
 
 static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
 
+static struct attribute *platform_attributes[] = {
+	&dev_attr_cpufv.attr,
+	NULL
+};
+
+static struct attribute_group platform_attribute_group = {
+	.attrs = platform_attributes
+};
+
 static void eeepc_wmi_sysfs_exit(struct platform_device *device)
 {
-	device_remove_file(&device->dev, &dev_attr_cpufv);
+	sysfs_remove_group(&device->dev.kobj, &platform_attribute_group);
 }
 
 static int eeepc_wmi_sysfs_init(struct platform_device *device)
 {
-	int retval = -ENOMEM;
-
-	retval = device_create_file(&device->dev, &dev_attr_cpufv);
-	if (retval)
-		goto error_sysfs;
-
-	return 0;
-
-error_sysfs:
-	eeepc_wmi_sysfs_exit(platform_device);
-	return retval;
+	return sysfs_create_group(&device->dev.kobj, &platform_attribute_group);
 }
 
-static int __devinit eeepc_wmi_platform_probe(struct platform_device *device)
+/*
+ * Platform device
+ */
+static int __init eeepc_wmi_platform_init(struct eeepc_wmi *eeepc)
 {
-	struct eeepc_wmi *eeepc;
 	int err;
-	acpi_status status;
 
-	eeepc = platform_get_drvdata(device);
+	eeepc->platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
+	if (!eeepc->platform_device)
+		return -ENOMEM;
+	platform_set_drvdata(eeepc->platform_device, eeepc);
 
-	err = eeepc_wmi_input_init(eeepc);
+	err = platform_device_add(eeepc->platform_device);
 	if (err)
-		goto error_input;
+		goto fail_platform_device;
 
-	if (!acpi_video_backlight_support()) {
-		err = eeepc_wmi_backlight_init(eeepc);
-		if (err)
-			goto error_backlight;
-	} else
-		pr_info("Backlight controlled by ACPI video driver\n");
+	err = eeepc_wmi_sysfs_init(eeepc->platform_device);
+	if (err)
+		goto fail_sysfs;
+	return 0;
 
-	status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
-					eeepc_wmi_notify, eeepc);
-	if (ACPI_FAILURE(status)) {
-		pr_err("Unable to register notify handler - %d\n",
-			status);
-		err = -ENODEV;
-		goto error_wmi;
+fail_sysfs:
+	platform_device_del(eeepc->platform_device);
+fail_platform_device:
+	platform_device_put(eeepc->platform_device);
+	return err;
+}
+
+static void eeepc_wmi_platform_exit(struct eeepc_wmi *eeepc)
+{
+	eeepc_wmi_sysfs_exit(eeepc->platform_device);
+	platform_device_unregister(eeepc->platform_device);
+}
+
+/*
+ * debugfs
+ */
+struct eeepc_wmi_debugfs_node {
+	struct eeepc_wmi *eeepc;
+	char *name;
+	int (*show)(struct seq_file *m, void *data);
+};
+
+static int show_dsts(struct seq_file *m, void *data)
+{
+	struct eeepc_wmi *eeepc = m->private;
+	acpi_status status;
+	u32 retval = -1;
+
+	status = eeepc_wmi_get_devstate(eeepc->debug.dev_id, &retval);
+
+	if (ACPI_FAILURE(status))
+		return -EIO;
+
+	seq_printf(m, "DSTS(%x) = %x\n", eeepc->debug.dev_id, retval);
+
+	return 0;
+}
+
+static int show_devs(struct seq_file *m, void *data)
+{
+	struct eeepc_wmi *eeepc = m->private;
+	acpi_status status;
+	u32 retval = -1;
+
+	status = eeepc_wmi_set_devstate(eeepc->debug.dev_id,
+					eeepc->debug.ctrl_param, &retval);
+	if (ACPI_FAILURE(status))
+		return -EIO;
+
+	seq_printf(m, "DEVS(%x, %x) = %x\n", eeepc->debug.dev_id,
+		   eeepc->debug.ctrl_param, retval);
+
+	return 0;
+}
+
+static struct eeepc_wmi_debugfs_node eeepc_wmi_debug_files[] = {
+	{ NULL, "devs", show_devs },
+	{ NULL, "dsts", show_dsts },
+};
+
+static int eeepc_wmi_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct eeepc_wmi_debugfs_node *node = inode->i_private;
+
+	return single_open(file, node->show, node->eeepc);
+}
+
+static const struct file_operations eeepc_wmi_debugfs_io_ops = {
+	.owner = THIS_MODULE,
+	.open  = eeepc_wmi_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void eeepc_wmi_debugfs_exit(struct eeepc_wmi *eeepc)
+{
+	debugfs_remove_recursive(eeepc->debug.root);
+}
+
+static int eeepc_wmi_debugfs_init(struct eeepc_wmi *eeepc)
+{
+	struct dentry *dent;
+	int i;
+
+	eeepc->debug.root = debugfs_create_dir(EEEPC_WMI_FILE, NULL);
+	if (!eeepc->debug.root) {
+		pr_err("failed to create debugfs directory");
+		goto error_debugfs;
+	}
+
+	dent = debugfs_create_x32("dev_id", S_IRUGO|S_IWUSR,
+				  eeepc->debug.root, &eeepc->debug.dev_id);
+	if (!dent)
+		goto error_debugfs;
+
+	dent = debugfs_create_x32("ctrl_param", S_IRUGO|S_IWUSR,
+				  eeepc->debug.root, &eeepc->debug.ctrl_param);
+	if (!dent)
+		goto error_debugfs;
+
+	for (i = 0; i < ARRAY_SIZE(eeepc_wmi_debug_files); i++) {
+		struct eeepc_wmi_debugfs_node *node = &eeepc_wmi_debug_files[i];
+
+		node->eeepc = eeepc;
+		dent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
+					   eeepc->debug.root, node,
+					   &eeepc_wmi_debugfs_io_ops);
+		if (!dent) {
+			pr_err("failed to create debug file: %s\n", node->name);
+			goto error_debugfs;
+		}
 	}
 
 	return 0;
 
-error_wmi:
-	eeepc_wmi_backlight_exit(eeepc);
-error_backlight:
-	eeepc_wmi_input_exit(eeepc);
-error_input:
-	return err;
+error_debugfs:
+	eeepc_wmi_debugfs_exit(eeepc);
+	return -ENOMEM;
 }
 
-static int __devexit eeepc_wmi_platform_remove(struct platform_device *device)
+/*
+ * WMI Driver
+ */
+static struct platform_device * __init eeepc_wmi_add(void)
+{
+	struct eeepc_wmi *eeepc;
+	acpi_status status;
+	int err;
+
+	eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
+	if (!eeepc)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * Register the platform device first.  It is used as a parent for the
+	 * sub-devices below.
+	 */
+	err = eeepc_wmi_platform_init(eeepc);
+	if (err)
+		goto fail_platform;
+
+	err = eeepc_wmi_input_init(eeepc);
+	if (err)
+		goto fail_input;
+
+	err = eeepc_wmi_led_init(eeepc);
+	if (err)
+		goto fail_leds;
+
+	err = eeepc_wmi_rfkill_init(eeepc);
+	if (err)
+		goto fail_rfkill;
+
+	if (!acpi_video_backlight_support()) {
+		err = eeepc_wmi_backlight_init(eeepc);
+		if (err)
+			goto fail_backlight;
+	} else
+		pr_info("Backlight controlled by ACPI video driver\n");
+
+	status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
+					    eeepc_wmi_notify, eeepc);
+	if (ACPI_FAILURE(status)) {
+		pr_err("Unable to register notify handler - %d\n",
+			status);
+		err = -ENODEV;
+		goto fail_wmi_handler;
+	}
+
+	err = eeepc_wmi_debugfs_init(eeepc);
+	if (err)
+		goto fail_debugfs;
+
+	return eeepc->platform_device;
+
+fail_debugfs:
+	wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
+fail_wmi_handler:
+	eeepc_wmi_backlight_exit(eeepc);
+fail_backlight:
+	eeepc_wmi_rfkill_exit(eeepc);
+fail_rfkill:
+	eeepc_wmi_led_exit(eeepc);
+fail_leds:
+	eeepc_wmi_input_exit(eeepc);
+fail_input:
+	eeepc_wmi_platform_exit(eeepc);
+fail_platform:
+	kfree(eeepc);
+	return ERR_PTR(err);
+}
+
+static int eeepc_wmi_remove(struct platform_device *device)
 {
 	struct eeepc_wmi *eeepc;
 
@@ -387,7 +847,12 @@
 	wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
 	eeepc_wmi_backlight_exit(eeepc);
 	eeepc_wmi_input_exit(eeepc);
+	eeepc_wmi_led_exit(eeepc);
+	eeepc_wmi_rfkill_exit(eeepc);
+	eeepc_wmi_debugfs_exit(eeepc);
+	eeepc_wmi_platform_exit(eeepc);
 
+	kfree(eeepc);
 	return 0;
 }
 
@@ -396,13 +861,31 @@
 		.name = EEEPC_WMI_FILE,
 		.owner = THIS_MODULE,
 	},
-	.probe = eeepc_wmi_platform_probe,
-	.remove = __devexit_p(eeepc_wmi_platform_remove),
 };
 
+static acpi_status __init eeepc_wmi_parse_device(acpi_handle handle, u32 level,
+						 void *context, void **retval)
+{
+	pr_warning("Found legacy ATKD device (%s)", EEEPC_ACPI_HID);
+	*(bool *)context = true;
+	return AE_CTRL_TERMINATE;
+}
+
+static int __init eeepc_wmi_check_atkd(void)
+{
+	acpi_status status;
+	bool found = false;
+
+	status = acpi_get_devices(EEEPC_ACPI_HID, eeepc_wmi_parse_device,
+				  &found, NULL);
+
+	if (ACPI_FAILURE(status) || !found)
+		return 0;
+	return -1;
+}
+
 static int __init eeepc_wmi_init(void)
 {
-	struct eeepc_wmi *eeepc;
 	int err;
 
 	if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) ||
@@ -411,58 +894,40 @@
 		return -ENODEV;
 	}
 
-	eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
-	if (!eeepc)
-		return -ENOMEM;
-
-	platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
-	if (!platform_device) {
-		pr_warning("Unable to allocate platform device\n");
-		err = -ENOMEM;
-		goto fail_platform;
+	if (eeepc_wmi_check_atkd()) {
+		pr_warning("WMI device present, but legacy ATKD device is also "
+			   "present and enabled.");
+		pr_warning("You probably booted with acpi_osi=\"Linux\" or "
+			   "acpi_osi=\"!Windows 2009\"");
+		pr_warning("Can't load eeepc-wmi, use default acpi_osi "
+			   "(preferred) or eeepc-laptop");
+		return -ENODEV;
 	}
 
-	err = platform_device_add(platform_device);
-	if (err) {
-		pr_warning("Unable to add platform device\n");
-		goto put_dev;
+	platform_device = eeepc_wmi_add();
+	if (IS_ERR(platform_device)) {
+		err = PTR_ERR(platform_device);
+		goto fail_eeepc_wmi;
 	}
 
-	platform_set_drvdata(platform_device, eeepc);
-
 	err = platform_driver_register(&platform_driver);
 	if (err) {
 		pr_warning("Unable to register platform driver\n");
-		goto del_dev;
+		goto fail_platform_driver;
 	}
 
-	err = eeepc_wmi_sysfs_init(platform_device);
-	if (err)
-		goto del_sysfs;
-
 	return 0;
 
-del_sysfs:
-	eeepc_wmi_sysfs_exit(platform_device);
-del_dev:
-	platform_device_del(platform_device);
-put_dev:
-	platform_device_put(platform_device);
-fail_platform:
-	kfree(eeepc);
-
+fail_platform_driver:
+	eeepc_wmi_remove(platform_device);
+fail_eeepc_wmi:
 	return err;
 }
 
 static void __exit eeepc_wmi_exit(void)
 {
-	struct eeepc_wmi *eeepc;
-
-	eeepc_wmi_sysfs_exit(platform_device);
-	eeepc = platform_get_drvdata(platform_device);
+	eeepc_wmi_remove(platform_device);
 	platform_driver_unregister(&platform_driver);
-	platform_device_unregister(platform_device);
-	kfree(eeepc);
 }
 
 module_init(eeepc_wmi_init);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index f44cd26..95e3b09 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -437,7 +437,7 @@
 	return ret;
 }
 
-static struct backlight_ops fujitsubl_ops = {
+static const struct backlight_ops fujitsubl_ops = {
 	.get_brightness = bl_get_brightness,
 	.update_status = bl_update_status,
 };
@@ -689,7 +689,7 @@
 	if (error)
 		goto err_free_input_dev;
 
-	result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
+	result = acpi_bus_update_power(fujitsu->acpi_handle, &state);
 	if (result) {
 		printk(KERN_ERR "Error reading power state\n");
 		goto err_unregister_input_dev;
@@ -857,7 +857,7 @@
 	if (error)
 		goto err_free_input_dev;
 
-	result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state);
+	result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
 	if (result) {
 		printk(KERN_ERR "Error reading power state\n");
 		goto err_unregister_input_dev;
@@ -1240,7 +1240,7 @@
 MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
 MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
 
-static struct pnp_device_id pnp_ids[] = {
+static struct pnp_device_id pnp_ids[] __used = {
 	{.id = "FUJ02bf"},
 	{.id = "FUJ02B1"},
 	{.id = "FUJ02E3"},
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 5ff1220..114d952 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1,5 +1,5 @@
 /*
- *  ideapad_acpi.c - Lenovo IdeaPad ACPI Extras
+ *  ideapad-laptop.c - Lenovo IdeaPad ACPI Extras
  *
  *  Copyright © 2010 Intel Corporation
  *  Copyright © 2010 David Woodhouse <dwmw2@infradead.org>
@@ -27,31 +27,19 @@
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
 
-#define IDEAPAD_DEV_CAMERA	0
-#define IDEAPAD_DEV_WLAN	1
-#define IDEAPAD_DEV_BLUETOOTH	2
-#define IDEAPAD_DEV_3G		3
-#define IDEAPAD_DEV_KILLSW	4
+#define IDEAPAD_RFKILL_DEV_NUM	(3)
 
 struct ideapad_private {
-	acpi_handle handle;
-	struct rfkill *rfk[5];
-} *ideapad_priv;
-
-static struct {
-	char *name;
-	int cfgbit;
-	int opcode;
-	int type;
-} ideapad_rfk_data[] = {
-	{ "ideapad_camera",	19, 0x1E, NUM_RFKILL_TYPES },
-	{ "ideapad_wlan",	18, 0x15, RFKILL_TYPE_WLAN },
-	{ "ideapad_bluetooth",	16, 0x17, RFKILL_TYPE_BLUETOOTH },
-	{ "ideapad_3g",		17, 0x20, RFKILL_TYPE_WWAN },
-	{ "ideapad_killsw",	0,  0,    RFKILL_TYPE_WLAN }
+	struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+	struct platform_device *platform_device;
+	struct input_dev *inputdev;
 };
 
+static acpi_handle ideapad_handle;
 static bool no_bt_rfkill;
 module_param(no_bt_rfkill, bool, 0444);
 MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -163,17 +151,17 @@
 	pr_err("timeout in write_ec_cmd\n");
 	return -1;
 }
-/* the above is ACPI helpers */
 
+/*
+ * camera power
+ */
 static ssize_t show_ideapad_cam(struct device *dev,
 				struct device_attribute *attr,
 				char *buf)
 {
-	struct ideapad_private *priv = dev_get_drvdata(dev);
-	acpi_handle handle = priv->handle;
 	unsigned long result;
 
-	if (read_ec_data(handle, 0x1D, &result))
+	if (read_ec_data(ideapad_handle, 0x1D, &result))
 		return sprintf(buf, "-1\n");
 	return sprintf(buf, "%lu\n", result);
 }
@@ -182,15 +170,13 @@
 				 struct device_attribute *attr,
 				 const char *buf, size_t count)
 {
-	struct ideapad_private *priv = dev_get_drvdata(dev);
-	acpi_handle handle = priv->handle;
 	int ret, state;
 
 	if (!count)
 		return 0;
 	if (sscanf(buf, "%i", &state) != 1)
 		return -EINVAL;
-	ret = write_ec_cmd(handle, 0x1E, state);
+	ret = write_ec_cmd(ideapad_handle, 0x1E, state);
 	if (ret < 0)
 		return ret;
 	return count;
@@ -198,16 +184,27 @@
 
 static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
 
+/*
+ * Rfkill
+ */
+struct ideapad_rfk_data {
+	char *name;
+	int cfgbit;
+	int opcode;
+	int type;
+};
+
+const struct ideapad_rfk_data ideapad_rfk_data[] = {
+	{ "ideapad_wlan",	18, 0x15, RFKILL_TYPE_WLAN },
+	{ "ideapad_bluetooth",	16, 0x17, RFKILL_TYPE_BLUETOOTH },
+	{ "ideapad_3g",		17, 0x20, RFKILL_TYPE_WWAN },
+};
+
 static int ideapad_rfk_set(void *data, bool blocked)
 {
-	int device = (unsigned long)data;
+	unsigned long opcode = (unsigned long)data;
 
-	if (device == IDEAPAD_DEV_KILLSW)
-		return -EINVAL;
-
-	return write_ec_cmd(ideapad_priv->handle,
-			    ideapad_rfk_data[device].opcode,
-			    !blocked);
+	return write_ec_cmd(ideapad_handle, opcode, !blocked);
 }
 
 static struct rfkill_ops ideapad_rfk_ops = {
@@ -217,20 +214,20 @@
 static void ideapad_sync_rfk_state(struct acpi_device *adevice)
 {
 	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-	acpi_handle handle = priv->handle;
 	unsigned long hw_blocked;
 	int i;
 
-	if (read_ec_data(handle, 0x23, &hw_blocked))
+	if (read_ec_data(ideapad_handle, 0x23, &hw_blocked))
 		return;
 	hw_blocked = !hw_blocked;
 
-	for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++)
+	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
 		if (priv->rfk[i])
 			rfkill_set_hw_state(priv->rfk[i], hw_blocked);
 }
 
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int __devinit ideapad_register_rfkill(struct acpi_device *adevice,
+					     int dev)
 {
 	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
 	int ret;
@@ -239,7 +236,7 @@
 	if (no_bt_rfkill &&
 	    (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
 		/* Force to enable bluetooth when no_bt_rfkill=1 */
-		write_ec_cmd(ideapad_priv->handle,
+		write_ec_cmd(ideapad_handle,
 			     ideapad_rfk_data[dev].opcode, 1);
 		return 0;
 	}
@@ -250,7 +247,7 @@
 	if (!priv->rfk[dev])
 		return -ENOMEM;
 
-	if (read_ec_data(ideapad_priv->handle, ideapad_rfk_data[dev].opcode-1,
+	if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
 			 &sw_blocked)) {
 		rfkill_init_sw_state(priv->rfk[dev], 0);
 	} else {
@@ -266,7 +263,8 @@
 	return 0;
 }
 
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
+						int dev)
 {
 	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
 
@@ -277,73 +275,177 @@
 	rfkill_destroy(priv->rfk[dev]);
 }
 
+/*
+ * Platform device
+ */
+static struct attribute *ideapad_attributes[] = {
+	&dev_attr_camera_power.attr,
+	NULL
+};
+
+static struct attribute_group ideapad_attribute_group = {
+	.attrs = ideapad_attributes
+};
+
+static int __devinit ideapad_platform_init(struct ideapad_private *priv)
+{
+	int result;
+
+	priv->platform_device = platform_device_alloc("ideapad", -1);
+	if (!priv->platform_device)
+		return -ENOMEM;
+	platform_set_drvdata(priv->platform_device, priv);
+
+	result = platform_device_add(priv->platform_device);
+	if (result)
+		goto fail_platform_device;
+
+	result = sysfs_create_group(&priv->platform_device->dev.kobj,
+				    &ideapad_attribute_group);
+	if (result)
+		goto fail_sysfs;
+	return 0;
+
+fail_sysfs:
+	platform_device_del(priv->platform_device);
+fail_platform_device:
+	platform_device_put(priv->platform_device);
+	return result;
+}
+
+static void ideapad_platform_exit(struct ideapad_private *priv)
+{
+	sysfs_remove_group(&priv->platform_device->dev.kobj,
+			   &ideapad_attribute_group);
+	platform_device_unregister(priv->platform_device);
+}
+
+/*
+ * input device
+ */
+static const struct key_entry ideapad_keymap[] = {
+	{ KE_KEY, 0x06, { KEY_SWITCHVIDEOMODE } },
+	{ KE_KEY, 0x0D, { KEY_WLAN } },
+	{ KE_END, 0 },
+};
+
+static int __devinit ideapad_input_init(struct ideapad_private *priv)
+{
+	struct input_dev *inputdev;
+	int error;
+
+	inputdev = input_allocate_device();
+	if (!inputdev) {
+		pr_info("Unable to allocate input device\n");
+		return -ENOMEM;
+	}
+
+	inputdev->name = "Ideapad extra buttons";
+	inputdev->phys = "ideapad/input0";
+	inputdev->id.bustype = BUS_HOST;
+	inputdev->dev.parent = &priv->platform_device->dev;
+
+	error = sparse_keymap_setup(inputdev, ideapad_keymap, NULL);
+	if (error) {
+		pr_err("Unable to setup input device keymap\n");
+		goto err_free_dev;
+	}
+
+	error = input_register_device(inputdev);
+	if (error) {
+		pr_err("Unable to register input device\n");
+		goto err_free_keymap;
+	}
+
+	priv->inputdev = inputdev;
+	return 0;
+
+err_free_keymap:
+	sparse_keymap_free(inputdev);
+err_free_dev:
+	input_free_device(inputdev);
+	return error;
+}
+
+static void __devexit ideapad_input_exit(struct ideapad_private *priv)
+{
+	sparse_keymap_free(priv->inputdev);
+	input_unregister_device(priv->inputdev);
+	priv->inputdev = NULL;
+}
+
+static void ideapad_input_report(struct ideapad_private *priv,
+				 unsigned long scancode)
+{
+	sparse_keymap_report_event(priv->inputdev, scancode, 1, true);
+}
+
+/*
+ * module init/exit
+ */
 static const struct acpi_device_id ideapad_device_ids[] = {
 	{ "VPC2004", 0},
 	{ "", 0},
 };
 MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
 
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
 {
-	int i, cfg;
-	int devs_present[5];
+	int ret, i, cfg;
 	struct ideapad_private *priv;
 
 	if (read_method_int(adevice->handle, "_CFG", &cfg))
 		return -ENODEV;
 
-	for (i = IDEAPAD_DEV_CAMERA; i < IDEAPAD_DEV_KILLSW; i++) {
-		if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
-			devs_present[i] = 1;
-		else
-			devs_present[i] = 0;
-	}
-
-	/* The hardware switch is always present */
-	devs_present[IDEAPAD_DEV_KILLSW] = 1;
-
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
-
-	if (devs_present[IDEAPAD_DEV_CAMERA]) {
-		int ret = device_create_file(&adevice->dev, &dev_attr_camera_power);
-		if (ret) {
-			kfree(priv);
-			return ret;
-		}
-	}
-
-	priv->handle = adevice->handle;
 	dev_set_drvdata(&adevice->dev, priv);
-	ideapad_priv = priv;
-	for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++) {
-		if (!devs_present[i])
-			continue;
+	ideapad_handle = adevice->handle;
 
-		ideapad_register_rfkill(adevice, i);
+	ret = ideapad_platform_init(priv);
+	if (ret)
+		goto platform_failed;
+
+	ret = ideapad_input_init(priv);
+	if (ret)
+		goto input_failed;
+
+	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
+		if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
+			ideapad_register_rfkill(adevice, i);
+		else
+			priv->rfk[i] = NULL;
 	}
 	ideapad_sync_rfk_state(adevice);
+
 	return 0;
+
+input_failed:
+	ideapad_platform_exit(priv);
+platform_failed:
+	kfree(priv);
+	return ret;
 }
 
-static int ideapad_acpi_remove(struct acpi_device *adevice, int type)
+static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
 {
 	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
 	int i;
 
-	device_remove_file(&adevice->dev, &dev_attr_camera_power);
-
-	for (i = IDEAPAD_DEV_WLAN; i <= IDEAPAD_DEV_KILLSW; i++)
+	for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
 		ideapad_unregister_rfkill(adevice, i);
-
+	ideapad_input_exit(priv);
+	ideapad_platform_exit(priv);
 	dev_set_drvdata(&adevice->dev, NULL);
 	kfree(priv);
+
 	return 0;
 }
 
 static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
 {
+	struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
 	acpi_handle handle = adevice->handle;
 	unsigned long vpc1, vpc2, vpc_bit;
 
@@ -357,6 +459,8 @@
 		if (test_bit(vpc_bit, &vpc1)) {
 			if (vpc_bit == 9)
 				ideapad_sync_rfk_state(adevice);
+			else
+				ideapad_input_report(priv, vpc_bit);
 		}
 	}
 }
@@ -371,19 +475,14 @@
 	.owner = THIS_MODULE,
 };
 
-
 static int __init ideapad_acpi_module_init(void)
 {
-	acpi_bus_register_driver(&ideapad_acpi_driver);
-
-	return 0;
+	return acpi_bus_register_driver(&ideapad_acpi_driver);
 }
 
-
 static void __exit ideapad_acpi_module_exit(void)
 {
 	acpi_bus_unregister_driver(&ideapad_acpi_driver);
-
 }
 
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index f0b3ad1..1294a39 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1474,7 +1474,7 @@
 }
 
 void
-ips_link_to_i915_driver()
+ips_link_to_i915_driver(void)
 {
 	/* We can't cleanly get at the various ips_driver structs from
 	 * this caller (the i915 driver), so just set a flag saying
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index e61db9d..930e627 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -244,7 +244,11 @@
 			generic_handle_irq(pg->irq_base + gpio);
 		}
 	}
-	desc->chip->eoi(irq);
+
+	if (desc->chip->irq_eoi)
+		desc->chip->irq_eoi(irq_get_irq_data(irq));
+	else
+		dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
 }
 
 static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index ca35b0c..1752ef0 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -497,7 +497,7 @@
 			"intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd);
 
 		mutex_unlock(&ipclock);
-		return -1;
+		return -EIO;
 	}
 	mutex_unlock(&ipclock);
 	return 0;
@@ -642,7 +642,7 @@
 
 	if (status == IPC_FW_UPDATE_SUCCESS)
 		return 0;
-	return -1;
+	return -EIO;
 }
 EXPORT_SYMBOL(intel_scu_ipc_fw_update);
 
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
new file mode 100644
index 0000000..ba3231d
--- /dev/null
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -0,0 +1,133 @@
+/*
+ * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
+ *
+ * (C) Copyright 2008-2010 Intel Corporation
+ * Author: Sreedhara DS (sreedhara.ds@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * This driver provides ioctl interfaces to call intel scu ipc driver api
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/intel_scu_ipc.h>
+
+static u32 major;
+
+#define MAX_FW_SIZE 264192
+
+/* ioctl commnds */
+#define	INTE_SCU_IPC_REGISTER_READ	0
+#define INTE_SCU_IPC_REGISTER_WRITE	1
+#define INTE_SCU_IPC_REGISTER_UPDATE	2
+#define INTE_SCU_IPC_FW_UPDATE		0xA2
+
+struct scu_ipc_data {
+	u32     count;  /* No. of registers */
+	u16     addr[5]; /* Register addresses */
+	u8      data[5]; /* Register data */
+	u8      mask; /* Valid for read-modify-write */
+};
+
+/**
+ *	scu_reg_access		-	implement register access ioctls
+ *	@cmd: command we are doing (read/write/update)
+ *	@data: kernel copy of ioctl data
+ *
+ *	Allow the user to perform register accesses on the SCU via the
+ *	kernel interface
+ */
+
+static int scu_reg_access(u32 cmd, struct scu_ipc_data  *data)
+{
+	int count = data->count;
+
+	if (count == 0 || count == 3 || count > 4)
+		return -EINVAL;
+
+	switch (cmd) {
+	case INTE_SCU_IPC_REGISTER_READ:
+		return intel_scu_ipc_readv(data->addr, data->data, count);
+	case INTE_SCU_IPC_REGISTER_WRITE:
+		return intel_scu_ipc_writev(data->addr, data->data, count);
+	case INTE_SCU_IPC_REGISTER_UPDATE:
+		return intel_scu_ipc_update_register(data->addr[0],
+						    data->data[0], data->mask);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/**
+ *	scu_ipc_ioctl		-	control ioctls for the SCU
+ *	@fp: file handle of the SCU device
+ *	@cmd: ioctl coce
+ *	@arg: pointer to user passed structure
+ *
+ *	Support the I/O and firmware flashing interfaces of the SCU
+ */
+static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
+							unsigned long arg)
+{
+	int ret;
+	struct scu_ipc_data  data;
+	void __user *argp = (void __user *)arg;
+
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	if (cmd == INTE_SCU_IPC_FW_UPDATE) {
+			u8 *fwbuf = kmalloc(MAX_FW_SIZE, GFP_KERNEL);
+			if (fwbuf == NULL)
+				return -ENOMEM;
+			if (copy_from_user(fwbuf, (u8 *)arg, MAX_FW_SIZE)) {
+				kfree(fwbuf);
+				return -EFAULT;
+			}
+			ret = intel_scu_ipc_fw_update(fwbuf, MAX_FW_SIZE);
+			kfree(fwbuf);
+			return ret;
+	} else {
+		if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
+			return -EFAULT;
+		ret = scu_reg_access(cmd, &data);
+		if (ret < 0)
+			return ret;
+		if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
+			return -EFAULT;
+		return 0;
+	}
+}
+
+static const struct file_operations scu_ipc_fops = {
+	.unlocked_ioctl = scu_ipc_ioctl,
+};
+
+static int __init ipc_module_init(void)
+{
+	return register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
+}
+
+static void __exit ipc_module_exit(void)
+{
+	unregister_chrdev(major, "intel_mid_scu");
+}
+
+module_init(ipc_module_init);
+module_exit(ipc_module_exit);
+
+MODULE_LICENSE("GPL V2");
+MODULE_DESCRIPTION("Utility driver for intel scu ipc");
+MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index f200677..5e83370 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -235,6 +235,7 @@
 	57,	/* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */
 	-1,	/* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */
 	58,	/* 72 SONYPI_EVENT_MEDIA_PRESSED */
+	59,	/* 72 SONYPI_EVENT_VENDOR_PRESSED */
 };
 
 static int sony_laptop_input_keycode_map[] = {
@@ -297,6 +298,7 @@
 	KEY_VOLUMEUP,	/* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */
 	KEY_VOLUMEDOWN,	/* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */
 	KEY_MEDIA,	/* 58 SONYPI_EVENT_MEDIA_PRESSED */
+	KEY_VENDOR,	/* 59 SONYPI_EVENT_VENDOR_PRESSED */
 };
 
 /* release buttons after a short delay if pressed */
@@ -856,7 +858,7 @@
 }
 
 static struct backlight_device *sony_backlight_device;
-static struct backlight_ops sony_backlight_ops = {
+static const struct backlight_ops sony_backlight_ops = {
 	.update_status = sony_backlight_update_status,
 	.get_brightness = sony_backlight_get_brightness,
 };
@@ -894,10 +896,18 @@
 	{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
 	{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
 	{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
+	{ 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
+	{ 0x1d, SONYPI_EVENT_ANYBUTTON_RELEASED },
 	{ 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED },
 	{ 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED },
 	{ 0xa1, SONYPI_EVENT_MEDIA_PRESSED },
 	{ 0x21, SONYPI_EVENT_ANYBUTTON_RELEASED },
+	{ 0xa4, SONYPI_EVENT_CD_EJECT_PRESSED },
+	{ 0x24, SONYPI_EVENT_ANYBUTTON_RELEASED },
+	{ 0xa5, SONYPI_EVENT_VENDOR_PRESSED },
+	{ 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
+	{ 0xa6, SONYPI_EVENT_HELP_PRESSED },
+	{ 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
 	{ 0, 0 },
 };
 
@@ -1131,7 +1141,7 @@
 	return err;
 }
 
-static void sony_nc_rfkill_update()
+static void sony_nc_rfkill_update(void)
 {
 	enum sony_nc_rfkill i;
 	int result;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e8c2199..dd59958 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -589,6 +589,7 @@
 		default:
 			printk(TPACPI_ERR "acpi_evalf() called "
 			       "with invalid format character '%c'\n", c);
+			va_end(ap);
 			return 0;
 		}
 	}
@@ -6109,7 +6110,7 @@
 			       BACKLIGHT_UPDATE_HOTKEY);
 }
 
-static struct backlight_ops ibm_backlight_data = {
+static const struct backlight_ops ibm_backlight_data = {
 	.get_brightness = brightness_get,
 	.update_status  = brightness_update_status,
 };
@@ -6345,7 +6346,7 @@
 			"as change notification\n");
 	tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
 				| TP_ACPI_HKEY_BRGHTUP_MASK
-				| TP_ACPI_HKEY_BRGHTDWN_MASK);;
+				| TP_ACPI_HKEY_BRGHTDWN_MASK);
 	return 0;
 }
 
@@ -7193,7 +7194,7 @@
  * 		TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
  *
  *	FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
- *	boot. Apparently the EC does not intialize it, so unless ACPI DSDT
+ *	boot. Apparently the EC does not initialize it, so unless ACPI DSDT
  *	does so, its initial value is meaningless (0x07).
  *
  *	For firmware bugs, refer to:
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 4276da7..209cced 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -841,7 +841,7 @@
 	remove_proc_entry("version", toshiba_proc_dir);
 }
 
-static struct backlight_ops toshiba_backlight_data = {
+static const struct backlight_ops toshiba_backlight_data = {
         .get_brightness = get_lcd,
         .update_status  = set_lcd_status,
 };
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index aecd9a9..05cc796 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -549,21 +549,34 @@
 wmi_notify_handler handler, void *data)
 {
 	struct wmi_block *block;
-	acpi_status status;
+	acpi_status status = AE_NOT_EXIST;
+	char tmp[16], guid_input[16];
+	struct list_head *p;
 
 	if (!guid || !handler)
 		return AE_BAD_PARAMETER;
 
-	if (!find_guid(guid, &block))
-		return AE_NOT_EXIST;
+	wmi_parse_guid(guid, tmp);
+	wmi_swap_bytes(tmp, guid_input);
 
-	if (block->handler && block->handler != wmi_notify_debug)
-		return AE_ALREADY_ACQUIRED;
+	list_for_each(p, &wmi_block_list) {
+		acpi_status wmi_status;
+		block = list_entry(p, struct wmi_block, list);
 
-	block->handler = handler;
-	block->handler_data = data;
+		if (memcmp(block->gblock.guid, guid_input, 16) == 0) {
+			if (block->handler &&
+			    block->handler != wmi_notify_debug)
+				return AE_ALREADY_ACQUIRED;
 
-	status = wmi_method_enable(block, 1);
+			block->handler = handler;
+			block->handler_data = data;
+
+			wmi_status = wmi_method_enable(block, 1);
+			if ((wmi_status != AE_OK) ||
+			    ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
+				status = wmi_status;
+		}
+	}
 
 	return status;
 }
@@ -577,24 +590,40 @@
 acpi_status wmi_remove_notify_handler(const char *guid)
 {
 	struct wmi_block *block;
-	acpi_status status = AE_OK;
+	acpi_status status = AE_NOT_EXIST;
+	char tmp[16], guid_input[16];
+	struct list_head *p;
 
 	if (!guid)
 		return AE_BAD_PARAMETER;
 
-	if (!find_guid(guid, &block))
-		return AE_NOT_EXIST;
+	wmi_parse_guid(guid, tmp);
+	wmi_swap_bytes(tmp, guid_input);
 
-	if (!block->handler || block->handler == wmi_notify_debug)
-		return AE_NULL_ENTRY;
+	list_for_each(p, &wmi_block_list) {
+		acpi_status wmi_status;
+		block = list_entry(p, struct wmi_block, list);
 
-	if (debug_event) {
-		block->handler = wmi_notify_debug;
-	} else {
-		status = wmi_method_enable(block, 0);
-		block->handler = NULL;
-		block->handler_data = NULL;
+		if (memcmp(block->gblock.guid, guid_input, 16) == 0) {
+			if (!block->handler ||
+			    block->handler == wmi_notify_debug)
+				return AE_NULL_ENTRY;
+
+			if (debug_event) {
+				block->handler = wmi_notify_debug;
+				status = AE_OK;
+			} else {
+				wmi_status = wmi_method_enable(block, 0);
+				block->handler = NULL;
+				block->handler_data = NULL;
+				if ((wmi_status != AE_OK) ||
+				    ((wmi_status == AE_OK) &&
+				     (status == AE_NOT_EXIST)))
+					status = wmi_status;
+			}
+		}
 	}
+
 	return status;
 }
 EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
@@ -705,22 +734,11 @@
 	.dev_attrs = wmi_dev_attrs,
 };
 
-static struct wmi_block *wmi_create_device(const struct guid_block *gblock,
-					   acpi_handle handle)
+static int wmi_create_device(const struct guid_block *gblock,
+			     struct wmi_block *wblock, acpi_handle handle)
 {
-	struct wmi_block *wblock;
-	int error;
 	char guid_string[37];
 
-	wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
-	if (!wblock) {
-		error = -ENOMEM;
-		goto err_out;
-	}
-
-	wblock->handle = handle;
-	wblock->gblock = *gblock;
-
 	wblock->dev.class = &wmi_class;
 
 	wmi_gtoa(gblock->guid, guid_string);
@@ -728,17 +746,7 @@
 
 	dev_set_drvdata(&wblock->dev, wblock);
 
-	error = device_register(&wblock->dev);
-	if (error)
-		goto err_free;
-
-	list_add_tail(&wblock->list, &wmi_block_list);
-	return wblock;
-
-err_free:
-	kfree(wblock);
-err_out:
-	return ERR_PTR(error);
+	return device_register(&wblock->dev);
 }
 
 static void wmi_free_devices(void)
@@ -747,7 +755,8 @@
 
 	/* Delete devices for all the GUIDs */
 	list_for_each_entry_safe(wblock, next, &wmi_block_list, list)
-		device_unregister(&wblock->dev);
+		if (wblock->dev.class)
+			device_unregister(&wblock->dev);
 }
 
 static bool guid_already_parsed(const char *guid_string)
@@ -770,7 +779,6 @@
 	union acpi_object *obj;
 	const struct guid_block *gblock;
 	struct wmi_block *wblock;
-	char guid_string[37];
 	acpi_status status;
 	int retval;
 	u32 i, total;
@@ -792,29 +800,32 @@
 	total = obj->buffer.length / sizeof(struct guid_block);
 
 	for (i = 0; i < total; i++) {
-		/*
-		  Some WMI devices, like those for nVidia hooks, have a
-		  duplicate GUID. It's not clear what we should do in this
-		  case yet, so for now, we'll just ignore the duplicate.
-		  Anyone who wants to add support for that device can come
-		  up with a better workaround for the mess then.
-		*/
-		if (guid_already_parsed(gblock[i].guid) == true) {
-			wmi_gtoa(gblock[i].guid, guid_string);
-			pr_info("Skipping duplicate GUID %s\n", guid_string);
-			continue;
-		}
-
 		if (debug_dump_wdg)
 			wmi_dump_wdg(&gblock[i]);
 
-		wblock = wmi_create_device(&gblock[i], handle);
-		if (IS_ERR(wblock)) {
-			retval = PTR_ERR(wblock);
-			wmi_free_devices();
-			break;
+		wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+		if (!wblock)
+			return AE_NO_MEMORY;
+
+		wblock->handle = handle;
+		wblock->gblock = gblock[i];
+
+		/*
+		  Some WMI devices, like those for nVidia hooks, have a
+		  duplicate GUID. It's not clear what we should do in this
+		  case yet, so for now, we'll just ignore the duplicate
+		  for device creation.
+		*/
+		if (!guid_already_parsed(gblock[i].guid)) {
+			retval = wmi_create_device(&gblock[i], wblock, handle);
+			if (retval) {
+				wmi_free_devices();
+				goto out_free_pointer;
+			}
 		}
 
+		list_add_tail(&wblock->list, &wmi_block_list);
+
 		if (debug_event) {
 			wblock->handler = wmi_notify_debug;
 			wmi_method_enable(wblock, 1);
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index 8de3775..bfba893 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -2,11 +2,13 @@
 # Makefile for the Linux Plug-and-Play Support.
 #
 
-obj-y		:= core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
+obj-y		:= pnp.o
+
+pnp-y		:= core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
 
 obj-$(CONFIG_PNPACPI)		+= pnpacpi/
 obj-$(CONFIG_PNPBIOS)		+= pnpbios/
 obj-$(CONFIG_ISAPNP)		+= isapnp/
 
 # pnp_system_init goes after pnpacpi/pnpbios init
-obj-y				+= system.o
+pnp-y				+= system.o
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 0f34d96..cb6ce42 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -220,10 +220,5 @@
 int pnp_debug;
 
 #if defined(CONFIG_PNP_DEBUG_MESSAGES)
-static int __init pnp_debug_setup(char *__unused)
-{
-	pnp_debug = 1;
-	return 1;
-}
-__setup("pnp.debug", pnp_debug_setup);
+module_param_named(debug, pnp_debug, int, 0644);
 #endif
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index d1dbb9d..00e9403 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -189,8 +189,11 @@
 	if (!pnp_drv)
 		return 0;
 
-	if (pnp_dev->protocol->resume)
-		pnp_dev->protocol->resume(pnp_dev);
+	if (pnp_dev->protocol->resume) {
+		error = pnp_dev->protocol->resume(pnp_dev);
+		if (error)
+			return error;
+	}
 
 	if (pnp_can_write(pnp_dev)) {
 		error = pnp_start_dev(pnp_dev);
diff --git a/drivers/pnp/isapnp/Makefile b/drivers/pnp/isapnp/Makefile
index cac18bb..6e607aa 100644
--- a/drivers/pnp/isapnp/Makefile
+++ b/drivers/pnp/isapnp/Makefile
@@ -1,7 +1,7 @@
 #
 # Makefile for the kernel ISAPNP driver.
 #
+obj-y			+= pnp.o
+pnp-y			:= core.o compat.o
 
-isapnp-proc-$(CONFIG_PROC_FS) = proc.o
-
-obj-y := core.o compat.o $(isapnp-proc-y)
+pnp-$(CONFIG_PROC_FS)	+= proc.o
diff --git a/drivers/pnp/pnpacpi/Makefile b/drivers/pnp/pnpacpi/Makefile
index 905326f..40c93da 100644
--- a/drivers/pnp/pnpacpi/Makefile
+++ b/drivers/pnp/pnpacpi/Makefile
@@ -1,5 +1,6 @@
 #
 # Makefile for the kernel PNPACPI driver.
 #
+obj-y += pnp.o
 
-obj-y := core.o rsparser.o
+pnp-y := core.o rsparser.o
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 57313f4..ca84d50 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -81,12 +81,19 @@
 
 static int pnpacpi_set_resources(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
 	struct acpi_buffer buffer;
 	int ret;
 
 	pnp_dbg(&dev->dev, "set resources\n");
+
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return -ENODEV;
+	}
+
 	ret = pnpacpi_build_resource_template(dev, &buffer);
 	if (ret)
 		return ret;
@@ -105,12 +112,18 @@
 
 static int pnpacpi_disable_resources(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
 	int ret;
 
 	dev_dbg(&dev->dev, "disable resources\n");
 
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return 0;
+	}
+
 	/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
 	ret = 0;
 	if (acpi_bus_power_manageable(handle))
@@ -124,46 +137,74 @@
 #ifdef CONFIG_ACPI_SLEEP
 static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
+
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return false;
+	}
 
 	return acpi_bus_can_wakeup(handle);
 }
 
 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
-	int power_state;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle;
+	int error = 0;
+
+	handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return 0;
+	}
 
 	if (device_can_wakeup(&dev->dev)) {
-		int rc = acpi_pm_device_sleep_wake(&dev->dev,
+		error = acpi_pm_device_sleep_wake(&dev->dev,
 				device_may_wakeup(&dev->dev));
-
-		if (rc)
-			return rc;
+		if (error)
+			return error;
 	}
-	power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
-	if (power_state < 0)
-		power_state = (state.event == PM_EVENT_ON) ?
-				ACPI_STATE_D0 : ACPI_STATE_D3;
 
-	/* acpi_bus_set_power() often fails (keyboard port can't be
-	 * powered-down?), and in any case, our return value is ignored
-	 * by pnp_bus_suspend().  Hence we don't revert the wakeup
-	 * setting if the set_power fails.
-	 */
-	return acpi_bus_set_power(handle, power_state);
+	if (acpi_bus_power_manageable(handle)) {
+		int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
+
+		if (power_state < 0)
+			power_state = (state.event == PM_EVENT_ON) ?
+					ACPI_STATE_D0 : ACPI_STATE_D3;
+
+		/*
+		 * acpi_bus_set_power() often fails (keyboard port can't be
+		 * powered-down?), and in any case, our return value is ignored
+		 * by pnp_bus_suspend().  Hence we don't revert the wakeup
+		 * setting if the set_power fails.
+		 */
+		error = acpi_bus_set_power(handle, power_state);
+	}
+
+	return error;
 }
 
 static int pnpacpi_resume(struct pnp_dev *dev)
 {
-	struct acpi_device *acpi_dev = dev->data;
-	acpi_handle handle = acpi_dev->handle;
+	struct acpi_device *acpi_dev;
+	acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
+	int error = 0;
+
+	if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) {
+		dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
+		return -ENODEV;
+	}
 
 	if (device_may_wakeup(&dev->dev))
 		acpi_pm_device_sleep_wake(&dev->dev, false);
-	return acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+	if (acpi_bus_power_manageable(handle))
+		error = acpi_bus_set_power(handle, ACPI_STATE_D0);
+
+	return error;
 }
 #endif
 
diff --git a/drivers/pnp/pnpbios/Makefile b/drivers/pnp/pnpbios/Makefile
index 3cd3ed7..240b0ff 100644
--- a/drivers/pnp/pnpbios/Makefile
+++ b/drivers/pnp/pnpbios/Makefile
@@ -1,7 +1,8 @@
 #
 # Makefile for the kernel PNPBIOS driver.
 #
+obj-y := pnp.o
 
-pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
+pnp-y := core.o bioscalls.o rsparser.o
 
-obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
+pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 60d83d9..61bf5d7 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -136,6 +136,16 @@
 	  in handheld and portable equipment. The MAX17040 is configured
 	  to operate with a single lithium cell
 
+config BATTERY_MAX17042
+	tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+	depends on I2C
+	help
+	  MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
+	  in handheld and portable equipment. The MAX17042 is configured
+	  to operate with a single lithium cell. MAX8997 and MAX8966 are
+	  multi-function devices that include fuel gauages that are compatible
+	  with MAX17042.
+
 config BATTERY_Z2
 	tristate "Z2 battery driver"
 	depends on I2C && MACH_ZIPIT2
@@ -185,4 +195,14 @@
 	help
 	  Say Y here to enable support for TWL4030 Battery Charge Interface.
 
+config CHARGER_GPIO
+	tristate "GPIO charger"
+	depends on GPIOLIB
+	help
+	  Say Y to include support for chargers which report their online status
+	  through a GPIO pin.
+
+	  This driver can be build as a module. If so, the module will be
+	  called gpio-charger.
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index c75772e..8385bfa 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,6 +25,7 @@
 obj-$(CONFIG_BATTERY_BQ27x00)	+= bq27x00_battery.o
 obj-$(CONFIG_BATTERY_DA9030)	+= da9030_battery.o
 obj-$(CONFIG_BATTERY_MAX17040)	+= max17040_battery.o
+obj-$(CONFIG_BATTERY_MAX17042)	+= max17042_battery.o
 obj-$(CONFIG_BATTERY_Z2)	+= z2_battery.o
 obj-$(CONFIG_BATTERY_S3C_ADC)	+= s3c_adc_battery.o
 obj-$(CONFIG_CHARGER_PCF50633)	+= pcf50633-charger.o
@@ -32,3 +33,4 @@
 obj-$(CONFIG_BATTERY_INTEL_MID)	+= intel_mid_battery.o
 obj-$(CONFIG_CHARGER_ISP1704)	+= isp1704_charger.o
 obj-$(CONFIG_CHARGER_TWL4030)	+= twl4030_charger.o
+obj-$(CONFIG_CHARGER_GPIO)	+= gpio-charger.o
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 039f41a..548d263 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -295,7 +295,7 @@
 static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
 {
 	/* flush all pending status updates */
-	flush_scheduled_work();
+	flush_work_sync(&bat_work);
 	return 0;
 }
 
@@ -362,7 +362,7 @@
 err_psy_reg_main:
 
 	/* see comment in collie_bat_remove */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	i--;
 err_gpio:
@@ -382,12 +382,11 @@
 	power_supply_unregister(&collie_bat_main.psy);
 
 	/*
-	 * now flush all pending work.
-	 * we won't get any more schedules, since all
-	 * sources (isr and external_power_changed)
-	 * are unregistered now.
+	 * Now cancel the bat_work.  We won't get any more schedules,
+	 * since all sources (isr and external_power_changed) are
+	 * unregistered now.
 	 */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
 		gpio_free(gpios[i].gpio);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index b3c01c1..e534290 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -212,7 +212,7 @@
 	if (di->rem_capacity > 100)
 		di->rem_capacity = 100;
 
-	if (di->current_uA >= 100L)
+	if (di->current_uA < -100L)
 		di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L)
 					/ (di->current_uA / 100L);
 	else
@@ -580,10 +580,8 @@
 {
 	struct ds2760_device_info *di = platform_get_drvdata(pdev);
 
-	cancel_rearming_delayed_workqueue(di->monitor_wqueue,
-					  &di->monitor_work);
-	cancel_rearming_delayed_workqueue(di->monitor_wqueue,
-					  &di->set_charged_work);
+	cancel_delayed_work_sync(&di->monitor_work);
+	cancel_delayed_work_sync(&di->set_charged_work);
 	destroy_workqueue(di->monitor_wqueue);
 	power_supply_unregister(&di->bat);
 	kfree(di);
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
new file mode 100644
index 0000000..25b88ac
--- /dev/null
+++ b/drivers/power/gpio-charger.c
@@ -0,0 +1,188 @@
+/*
+ *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *  Driver for chargers which report their online status through a GPIO pin
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+
+#include <linux/power/gpio-charger.h>
+
+struct gpio_charger {
+	const struct gpio_charger_platform_data *pdata;
+	unsigned int irq;
+
+	struct power_supply charger;
+};
+
+static irqreturn_t gpio_charger_irq(int irq, void *devid)
+{
+	struct power_supply *charger = devid;
+
+	power_supply_changed(charger);
+
+	return IRQ_HANDLED;
+}
+
+static inline struct gpio_charger *psy_to_gpio_charger(struct power_supply *psy)
+{
+	return container_of(psy, struct gpio_charger, charger);
+}
+
+static int gpio_charger_get_property(struct power_supply *psy,
+		enum power_supply_property psp, union power_supply_propval *val)
+{
+	struct gpio_charger *gpio_charger = psy_to_gpio_charger(psy);
+	const struct gpio_charger_platform_data *pdata = gpio_charger->pdata;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = gpio_get_value(pdata->gpio);
+		val->intval ^= pdata->gpio_active_low;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static enum power_supply_property gpio_charger_properties[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int __devinit gpio_charger_probe(struct platform_device *pdev)
+{
+	const struct gpio_charger_platform_data *pdata = pdev->dev.platform_data;
+	struct gpio_charger *gpio_charger;
+	struct power_supply *charger;
+	int ret;
+	int irq;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform data\n");
+		return -EINVAL;
+	}
+
+	if (!gpio_is_valid(pdata->gpio)) {
+		dev_err(&pdev->dev, "Invalid gpio pin\n");
+		return -EINVAL;
+	}
+
+	gpio_charger = kzalloc(sizeof(*gpio_charger), GFP_KERNEL);
+	if (!gpio_charger) {
+		dev_err(&pdev->dev, "Failed to alloc driver structure\n");
+		return -ENOMEM;
+	}
+
+	charger = &gpio_charger->charger;
+
+	charger->name = pdata->name ? pdata->name : "gpio-charger";
+	charger->type = pdata->type;
+	charger->properties = gpio_charger_properties;
+	charger->num_properties = ARRAY_SIZE(gpio_charger_properties);
+	charger->get_property = gpio_charger_get_property;
+	charger->supplied_to = pdata->supplied_to;
+	charger->num_supplicants = pdata->num_supplicants;
+
+	ret = gpio_request(pdata->gpio, dev_name(&pdev->dev));
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to request gpio pin: %d\n", ret);
+		goto err_free;
+	}
+	ret = gpio_direction_input(pdata->gpio);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to set gpio to input: %d\n", ret);
+		goto err_gpio_free;
+	}
+
+	gpio_charger->pdata = pdata;
+
+	ret = power_supply_register(&pdev->dev, charger);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to register power supply: %d\n",
+			ret);
+		goto err_gpio_free;
+	}
+
+	irq = gpio_to_irq(pdata->gpio);
+	if (irq > 0) {
+		ret = request_any_context_irq(irq, gpio_charger_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				dev_name(&pdev->dev), charger);
+		if (ret)
+			dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
+		else
+			gpio_charger->irq = irq;
+	}
+
+	platform_set_drvdata(pdev, gpio_charger);
+
+	return 0;
+
+err_gpio_free:
+	gpio_free(pdata->gpio);
+err_free:
+	kfree(gpio_charger);
+	return ret;
+}
+
+static int __devexit gpio_charger_remove(struct platform_device *pdev)
+{
+	struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+
+	if (gpio_charger->irq)
+		free_irq(gpio_charger->irq, &gpio_charger->charger);
+
+	power_supply_unregister(&gpio_charger->charger);
+
+	gpio_free(gpio_charger->pdata->gpio);
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(gpio_charger);
+
+	return 0;
+}
+
+static struct platform_driver gpio_charger_driver = {
+	.probe = gpio_charger_probe,
+	.remove = __devexit_p(gpio_charger_remove),
+	.driver = {
+		.name = "gpio-charger",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init gpio_charger_init(void)
+{
+	return platform_driver_register(&gpio_charger_driver);
+}
+module_init(gpio_charger_init);
+
+static void __exit gpio_charger_exit(void)
+{
+	platform_driver_unregister(&gpio_charger_driver);
+}
+module_exit(gpio_charger_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-charger");
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index 2a10cd3..bce3a01 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -730,8 +730,7 @@
 power_reg_failed_1:
 	power_supply_unregister(&pbi->batt);
 power_reg_failed:
-	cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
-						&pbi->monitor_battery);
+	cancel_delayed_work_sync(&pbi->monitor_battery);
 requestirq_failed:
 	destroy_workqueue(pbi->monitor_wqueue);
 wqueue_failed:
@@ -760,14 +759,13 @@
 	struct pmic_power_module_info *pbi = dev_get_drvdata(&pdev->dev);
 
 	free_irq(pbi->irq, pbi);
-	cancel_rearming_delayed_workqueue(pbi->monitor_wqueue,
-					&pbi->monitor_battery);
+	cancel_delayed_work_sync(&pbi->monitor_battery);
 	destroy_workqueue(pbi->monitor_wqueue);
 
 	power_supply_unregister(&pbi->usb);
 	power_supply_unregister(&pbi->batt);
 
-	flush_scheduled_work();
+	cancel_work_sync(&pbi->handler);
 	kfree(pbi);
 	return 0;
 }
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 7251218..2ad9b14 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -59,11 +59,61 @@
 	struct notifier_block	nb;
 	struct work_struct	work;
 
-	char			model[7];
+	/* properties */
+	char			model[8];
 	unsigned		present:1;
+	unsigned		online:1;
+	unsigned		current_max;
+
+	/* temp storage variables */
+	unsigned long		event;
+	unsigned		max_power;
 };
 
 /*
+ * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
+ * chargers).
+ *
+ * REVISIT: The method is defined in Battery Charging Specification and is
+ * applicable to any ULPI transceiver. Nothing isp170x specific here.
+ */
+static inline int isp1704_charger_type(struct isp1704_charger *isp)
+{
+	u8 reg;
+	u8 func_ctrl;
+	u8 otg_ctrl;
+	int type = POWER_SUPPLY_TYPE_USB_DCP;
+
+	func_ctrl = otg_io_read(isp->otg, ULPI_FUNC_CTRL);
+	otg_ctrl = otg_io_read(isp->otg, ULPI_OTG_CTRL);
+
+	/* disable pulldowns */
+	reg = ULPI_OTG_CTRL_DM_PULLDOWN | ULPI_OTG_CTRL_DP_PULLDOWN;
+	otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), reg);
+
+	/* full speed */
+	otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL),
+			ULPI_FUNC_CTRL_XCVRSEL_MASK);
+	otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL),
+			ULPI_FUNC_CTRL_FULL_SPEED);
+
+	/* Enable strong pull-up on DP (1.5K) and reset */
+	reg = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET;
+	otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), reg);
+	usleep_range(1000, 2000);
+
+	reg = otg_io_read(isp->otg, ULPI_DEBUG);
+	if ((reg & 3) != 3)
+		type = POWER_SUPPLY_TYPE_USB_CDP;
+
+	/* recover original state */
+	otg_io_write(isp->otg, ULPI_FUNC_CTRL, func_ctrl);
+	otg_io_write(isp->otg, ULPI_OTG_CTRL, otg_ctrl);
+
+	return type;
+}
+
+/*
  * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger
  * is actually a dedicated charger, the following steps need to be taken.
  */
@@ -127,16 +177,19 @@
 static inline int isp1704_charger_detect(struct isp1704_charger *isp)
 {
 	unsigned long	timeout;
-	u8		r;
+	u8		pwr_ctrl;
 	int		ret = 0;
 
+	pwr_ctrl = otg_io_read(isp->otg, ISP1704_PWR_CTRL);
+
 	/* set SW control bit in PWR_CTRL register */
 	otg_io_write(isp->otg, ISP1704_PWR_CTRL,
 			ISP1704_PWR_CTRL_SWCTRL);
 
 	/* enable manual charger detection */
-	r = (ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN);
-	otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), r);
+	otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL),
+			ISP1704_PWR_CTRL_SWCTRL
+			| ISP1704_PWR_CTRL_DPVSRC_EN);
 	usleep_range(1000, 2000);
 
 	timeout = jiffies + msecs_to_jiffies(300);
@@ -147,7 +200,10 @@
 			ret = isp1704_charger_verify(isp);
 			break;
 		}
-	} while (!time_after(jiffies, timeout));
+	} while (!time_after(jiffies, timeout) && isp->online);
+
+	/* recover original state */
+	otg_io_write(isp->otg, ISP1704_PWR_CTRL, pwr_ctrl);
 
 	return ret;
 }
@@ -155,52 +211,92 @@
 static void isp1704_charger_work(struct work_struct *data)
 {
 	int			detect;
+	unsigned long		event;
+	unsigned		power;
 	struct isp1704_charger	*isp =
 		container_of(data, struct isp1704_charger, work);
+	static DEFINE_MUTEX(lock);
 
-	/*
-	 * FIXME Only supporting dedicated chargers even though isp1704 can
-	 * detect HUB and HOST chargers. If the device has already been
-	 * enumerated, the detection will break the connection.
-	 */
-	if (isp->otg->state != OTG_STATE_B_IDLE)
-		return;
+	event = isp->event;
+	power = isp->max_power;
 
-	/* disable data pullups */
-	if (isp->otg->gadget)
-		usb_gadget_disconnect(isp->otg->gadget);
+	mutex_lock(&lock);
 
-	/* detect charger */
-	detect = isp1704_charger_detect(isp);
-	if (detect) {
-		isp->present = detect;
-		power_supply_changed(&isp->psy);
+	switch (event) {
+	case USB_EVENT_VBUS:
+		isp->online = true;
+
+		/* detect charger */
+		detect = isp1704_charger_detect(isp);
+
+		if (detect) {
+			isp->present = detect;
+			isp->psy.type = isp1704_charger_type(isp);
+		}
+
+		switch (isp->psy.type) {
+		case POWER_SUPPLY_TYPE_USB_DCP:
+			isp->current_max = 1800;
+			break;
+		case POWER_SUPPLY_TYPE_USB_CDP:
+			/*
+			 * Only 500mA here or high speed chirp
+			 * handshaking may break
+			 */
+			isp->current_max = 500;
+			/* FALLTHROUGH */
+		case POWER_SUPPLY_TYPE_USB:
+		default:
+			/* enable data pullups */
+			if (isp->otg->gadget)
+				usb_gadget_connect(isp->otg->gadget);
+		}
+		break;
+	case USB_EVENT_NONE:
+		isp->online = false;
+		isp->current_max = 0;
+		isp->present = 0;
+		isp->current_max = 0;
+		isp->psy.type = POWER_SUPPLY_TYPE_USB;
+
+		/*
+		 * Disable data pullups. We need to prevent the controller from
+		 * enumerating.
+		 *
+		 * FIXME: This is here to allow charger detection with Host/HUB
+		 * chargers. The pullups may be enabled elsewhere, so this can
+		 * not be the final solution.
+		 */
+		if (isp->otg->gadget)
+			usb_gadget_disconnect(isp->otg->gadget);
+		break;
+	case USB_EVENT_ENUMERATED:
+		if (isp->present)
+			isp->current_max = 1800;
+		else
+			isp->current_max = power;
+		break;
+	default:
+		goto out;
 	}
 
-	/* enable data pullups */
-	if (isp->otg->gadget)
-		usb_gadget_connect(isp->otg->gadget);
+	power_supply_changed(&isp->psy);
+out:
+	mutex_unlock(&lock);
 }
 
 static int isp1704_notifier_call(struct notifier_block *nb,
-		unsigned long event, void *unused)
+		unsigned long event, void *power)
 {
 	struct isp1704_charger *isp =
 		container_of(nb, struct isp1704_charger, nb);
 
-	switch (event) {
-	case USB_EVENT_VBUS:
-		schedule_work(&isp->work);
-		break;
-	case USB_EVENT_NONE:
-		if (isp->present) {
-			isp->present = 0;
-			power_supply_changed(&isp->psy);
-		}
-		break;
-	default:
-		return NOTIFY_DONE;
-	}
+	isp->event = event;
+
+	if (power)
+		isp->max_power = *((unsigned *)power);
+
+	schedule_work(&isp->work);
 
 	return NOTIFY_OK;
 }
@@ -216,6 +312,12 @@
 	case POWER_SUPPLY_PROP_PRESENT:
 		val->intval = isp->present;
 		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = isp->online;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		val->intval = isp->current_max;
+		break;
 	case POWER_SUPPLY_PROP_MODEL_NAME:
 		val->strval = isp->model;
 		break;
@@ -230,6 +332,8 @@
 
 static enum power_supply_property power_props[] = {
 	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
 };
@@ -287,13 +391,13 @@
 	if (!isp->otg)
 		goto fail0;
 
+	isp->dev = &pdev->dev;
+	platform_set_drvdata(pdev, isp);
+
 	ret = isp1704_test_ulpi(isp);
 	if (ret < 0)
 		goto fail1;
 
-	isp->dev = &pdev->dev;
-	platform_set_drvdata(pdev, isp);
-
 	isp->psy.name		= "isp1704";
 	isp->psy.type		= POWER_SUPPLY_TYPE_USB;
 	isp->psy.properties	= power_props;
@@ -318,6 +422,23 @@
 
 	dev_info(isp->dev, "registered with product id %s\n", isp->model);
 
+	/*
+	 * Taking over the D+ pullup.
+	 *
+	 * FIXME: The device will be disconnected if it was already
+	 * enumerated. The charger driver should be always loaded before any
+	 * gadget is loaded.
+	 */
+	if (isp->otg->gadget)
+		usb_gadget_disconnect(isp->otg->gadget);
+
+	/* Detect charger if VBUS is valid (the cable was already plugged). */
+	ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
+	if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
+		isp->event = USB_EVENT_VBUS;
+		schedule_work(&isp->work);
+	}
+
 	return 0;
 fail2:
 	power_supply_unregister(&isp->psy);
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index a8108a7..02414db 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/delay.h>
 #include <linux/gpio.h>
@@ -47,6 +48,8 @@
 
 	struct power_supply battery;
 	struct delayed_work work;
+
+	struct mutex lock;
 };
 
 static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy)
@@ -68,6 +71,8 @@
 	unsigned long val;
 	long voltage;
 
+	mutex_lock(&battery->lock);
+
 	INIT_COMPLETION(battery->read_completion);
 
 	enable_irq(battery->irq);
@@ -91,6 +96,8 @@
 	battery->cell->disable(battery->pdev);
 	disable_irq(battery->irq);
 
+	mutex_unlock(&battery->lock);
+
 	return voltage;
 }
 
@@ -240,6 +247,11 @@
 	struct jz_battery *jz_battery;
 	struct power_supply *battery;
 
+	if (!pdata) {
+		dev_err(&pdev->dev, "No platform_data supplied\n");
+		return -ENXIO;
+	}
+
 	jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL);
 	if (!jz_battery) {
 		dev_err(&pdev->dev, "Failed to allocate driver structure\n");
@@ -291,6 +303,7 @@
 	jz_battery->pdev = pdev;
 
 	init_completion(&jz_battery->read_completion);
+	mutex_init(&jz_battery->lock);
 
 	INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work);
 
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
new file mode 100644
index 0000000..c5c8805
--- /dev/null
+++ b/drivers/power/max17042_battery.c
@@ -0,0 +1,239 @@
+/*
+ * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ *  Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * This driver is based on max17040_battery.c
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/power_supply.h>
+#include <linux/power/max17042_battery.h>
+
+enum max17042_register {
+	MAX17042_STATUS		= 0x00,
+	MAX17042_VALRT_Th	= 0x01,
+	MAX17042_TALRT_Th	= 0x02,
+	MAX17042_SALRT_Th	= 0x03,
+	MAX17042_AtRate		= 0x04,
+	MAX17042_RepCap		= 0x05,
+	MAX17042_RepSOC		= 0x06,
+	MAX17042_Age		= 0x07,
+	MAX17042_TEMP		= 0x08,
+	MAX17042_VCELL		= 0x09,
+	MAX17042_Current	= 0x0A,
+	MAX17042_AvgCurrent	= 0x0B,
+	MAX17042_Qresidual	= 0x0C,
+	MAX17042_SOC		= 0x0D,
+	MAX17042_AvSOC		= 0x0E,
+	MAX17042_RemCap		= 0x0F,
+	MAX17402_FullCAP	= 0x10,
+	MAX17042_TTE		= 0x11,
+	MAX17042_V_empty	= 0x12,
+
+	MAX17042_RSLOW		= 0x14,
+
+	MAX17042_AvgTA		= 0x16,
+	MAX17042_Cycles		= 0x17,
+	MAX17042_DesignCap	= 0x18,
+	MAX17042_AvgVCELL	= 0x19,
+	MAX17042_MinMaxTemp	= 0x1A,
+	MAX17042_MinMaxVolt	= 0x1B,
+	MAX17042_MinMaxCurr	= 0x1C,
+	MAX17042_CONFIG		= 0x1D,
+	MAX17042_ICHGTerm	= 0x1E,
+	MAX17042_AvCap		= 0x1F,
+	MAX17042_ManName	= 0x20,
+	MAX17042_DevName	= 0x21,
+	MAX17042_DevChem	= 0x22,
+
+	MAX17042_TempNom	= 0x24,
+	MAX17042_TempCold	= 0x25,
+	MAX17042_TempHot	= 0x26,
+	MAX17042_AIN		= 0x27,
+	MAX17042_LearnCFG	= 0x28,
+	MAX17042_SHFTCFG	= 0x29,
+	MAX17042_RelaxCFG	= 0x2A,
+	MAX17042_MiscCFG	= 0x2B,
+	MAX17042_TGAIN		= 0x2C,
+	MAx17042_TOFF		= 0x2D,
+	MAX17042_CGAIN		= 0x2E,
+	MAX17042_COFF		= 0x2F,
+
+	MAX17042_Q_empty	= 0x33,
+	MAX17042_T_empty	= 0x34,
+
+	MAX17042_RCOMP0		= 0x38,
+	MAX17042_TempCo		= 0x39,
+	MAX17042_Rx		= 0x3A,
+	MAX17042_T_empty0	= 0x3B,
+	MAX17042_TaskPeriod	= 0x3C,
+	MAX17042_FSTAT		= 0x3D,
+
+	MAX17042_SHDNTIMER	= 0x3F,
+
+	MAX17042_VFRemCap	= 0x4A,
+
+	MAX17042_QH		= 0x4D,
+	MAX17042_QL		= 0x4E,
+};
+
+struct max17042_chip {
+	struct i2c_client *client;
+	struct power_supply battery;
+	struct max17042_platform_data *pdata;
+};
+
+static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
+{
+	int ret = i2c_smbus_write_word_data(client, reg, value);
+
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static int max17042_read_reg(struct i2c_client *client, u8 reg)
+{
+	int ret = i2c_smbus_read_word_data(client, reg);
+
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static enum power_supply_property max17042_battery_props[] = {
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int max17042_get_property(struct power_supply *psy,
+			    enum power_supply_property psp,
+			    union power_supply_propval *val)
+{
+	struct max17042_chip *chip = container_of(psy,
+				struct max17042_chip, battery);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		val->intval = max17042_read_reg(chip->client,
+				MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+		val->intval = max17042_read_reg(chip->client,
+				MAX17042_AvgVCELL) * 83;
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = max17042_read_reg(chip->client,
+				MAX17042_SOC) / 256;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __devinit max17042_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+	struct max17042_chip *chip;
+	int ret;
+
+	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+		return -EIO;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->pdata = client->dev.platform_data;
+
+	i2c_set_clientdata(client, chip);
+
+	chip->battery.name		= "max17042_battery";
+	chip->battery.type		= POWER_SUPPLY_TYPE_BATTERY;
+	chip->battery.get_property	= max17042_get_property;
+	chip->battery.properties	= max17042_battery_props;
+	chip->battery.num_properties	= ARRAY_SIZE(max17042_battery_props);
+
+	ret = power_supply_register(&client->dev, &chip->battery);
+	if (ret) {
+		dev_err(&client->dev, "failed: power supply register\n");
+		i2c_set_clientdata(client, NULL);
+		kfree(chip);
+		return ret;
+	}
+
+	if (!chip->pdata->enable_current_sense) {
+		max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
+		max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
+		max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+	}
+
+	return 0;
+}
+
+static int __devexit max17042_remove(struct i2c_client *client)
+{
+	struct max17042_chip *chip = i2c_get_clientdata(client);
+
+	power_supply_unregister(&chip->battery);
+	i2c_set_clientdata(client, NULL);
+	kfree(chip);
+	return 0;
+}
+
+static const struct i2c_device_id max17042_id[] = {
+	{ "max17042", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, max17042_id);
+
+static struct i2c_driver max17042_i2c_driver = {
+	.driver	= {
+		.name	= "max17042",
+	},
+	.probe		= max17042_probe,
+	.remove		= __devexit_p(max17042_remove),
+	.id_table	= max17042_id,
+};
+
+static int __init max17042_init(void)
+{
+	return i2c_add_driver(&max17042_i2c_driver);
+}
+module_init(max17042_init);
+
+static void __exit max17042_exit(void)
+{
+	i2c_del_driver(&max17042_i2c_driver);
+}
+module_exit(max17042_exit);
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 5bc1dcf..0b0ff3a 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -201,6 +201,72 @@
 	return ret;
 }
 
+static int olpc_bat_get_charge_full_design(union power_supply_propval *val)
+{
+	uint8_t ec_byte;
+	union power_supply_propval tech;
+	int ret, mfr;
+
+	ret = olpc_bat_get_tech(&tech);
+	if (ret)
+		return ret;
+
+	ec_byte = BAT_ADDR_MFR_TYPE;
+	ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
+	if (ret)
+		return ret;
+
+	mfr = ec_byte >> 4;
+
+	switch (tech.intval) {
+	case POWER_SUPPLY_TECHNOLOGY_NiMH:
+		switch (mfr) {
+		case 1: /* Gold Peak */
+			val->intval = 3000000*.8;
+			break;
+		default:
+			return -EIO;
+		}
+		break;
+
+	case POWER_SUPPLY_TECHNOLOGY_LiFe:
+		switch (mfr) {
+		case 1: /* Gold Peak */
+			val->intval = 2800000;
+			break;
+		case 2: /* BYD */
+			val->intval = 3100000;
+			break;
+		default:
+			return -EIO;
+		}
+		break;
+
+	default:
+		return -EIO;
+	}
+
+	return ret;
+}
+
+static int olpc_bat_get_charge_now(union power_supply_propval *val)
+{
+	uint8_t soc;
+	union power_supply_propval full;
+	int ret;
+
+	ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &soc, 1);
+	if (ret)
+		return ret;
+
+	ret = olpc_bat_get_charge_full_design(&full);
+	if (ret)
+		return ret;
+
+	val->intval = soc * (full.intval / 100);
+	return 0;
+}
+
 /*********************************************************************
  *		Battery properties
  *********************************************************************/
@@ -267,6 +333,7 @@
 			return ret;
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		ret = olpc_ec_cmd(EC_BAT_VOLTAGE, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
 			return ret;
@@ -274,6 +341,7 @@
 		val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_AVG:
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
 		ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
 			return ret;
@@ -294,6 +362,16 @@
 		else
 			val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		ret = olpc_bat_get_charge_full_design(val);
+		if (ret)
+			return ret;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		ret = olpc_bat_get_charge_now(val);
+		if (ret)
+			return ret;
+		break;
 	case POWER_SUPPLY_PROP_TEMP:
 		ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2);
 		if (ret)
@@ -331,16 +409,20 @@
 	return ret;
 }
 
-static enum power_supply_property olpc_bat_props[] = {
+static enum power_supply_property olpc_xo1_bat_props[] = {
 	POWER_SUPPLY_PROP_STATUS,
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_HEALTH,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
 	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
 	POWER_SUPPLY_PROP_CAPACITY,
 	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TEMP_AMBIENT,
 	POWER_SUPPLY_PROP_MANUFACTURER,
@@ -348,6 +430,27 @@
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 };
 
+/* XO-1.5 does not have ambient temperature property */
+static enum power_supply_property olpc_xo15_bat_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_VOLTAGE_AVG,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_AVG,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_MANUFACTURER,
+	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+};
+
 /* EEPROM reading goes completely around the power_supply API, sadly */
 
 #define EEPROM_START	0x20
@@ -419,8 +522,6 @@
 static struct platform_device *bat_pdev;
 
 static struct power_supply olpc_bat = {
-	.properties = olpc_bat_props,
-	.num_properties = ARRAY_SIZE(olpc_bat_props),
 	.get_property = olpc_bat_get_property,
 	.use_for_apm = 1,
 };
@@ -466,6 +567,13 @@
 		goto ac_failed;
 
 	olpc_bat.name = bat_pdev->name;
+	if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
+		olpc_bat.properties = olpc_xo15_bat_props;
+		olpc_bat.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
+	} else { /* XO-1 */
+		olpc_bat.properties = olpc_xo1_bat_props;
+		olpc_bat.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
+	}
 
 	ret = power_supply_register(&bat_pdev->dev, &olpc_bat);
 	if (ret)
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 91606bb..970f733 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -190,10 +190,10 @@
 	goto success;
 
 create_triggers_failed:
-	device_unregister(psy->dev);
+	device_del(dev);
 kobject_set_name_failed:
 device_add_failed:
-	kfree(dev);
+	put_device(dev);
 success:
 	return rc;
 }
@@ -201,7 +201,7 @@
 
 void power_supply_unregister(struct power_supply *psy)
 {
-	flush_scheduled_work();
+	cancel_work_sync(&psy->changed_work);
 	power_supply_remove_triggers(psy);
 	device_unregister(psy->dev);
 }
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index fe16b48..4255f23 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -1,5 +1,5 @@
 /*
- *	iPAQ h1930/h1940/rx1950 battery controler driver
+ *	iPAQ h1930/h1940/rx1950 battery controller driver
  *	Copyright (c) Vasily Khoruzhick
  *	Based on h1940_battery.c by Arnaud Patard
  *
@@ -112,6 +112,13 @@
 	return volt_val + cur_val * impedance / 1000;
 }
 
+static int charge_finished(struct s3c_adc_bat *bat)
+{
+	return bat->pdata->gpio_inverted ?
+		!gpio_get_value(bat->pdata->gpio_charge_finished) :
+		gpio_get_value(bat->pdata->gpio_charge_finished);
+}
+
 static int s3c_adc_bat_get_property(struct power_supply *psy,
 				    enum power_supply_property psp,
 				    union power_supply_propval *val)
@@ -140,7 +147,7 @@
 
 	if (bat->cable_plugged &&
 		((bat->pdata->gpio_charge_finished < 0) ||
-		!gpio_get_value(bat->pdata->gpio_charge_finished))) {
+		!charge_finished(bat))) {
 		lut = bat->pdata->lut_acin;
 		lut_size = bat->pdata->lut_acin_cnt;
 	}
@@ -236,8 +243,7 @@
 		}
 	} else {
 		if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) {
-			is_charged = gpio_get_value(
-				main_bat.pdata->gpio_charge_finished);
+			is_charged = charge_finished(&main_bat);
 			if (is_charged) {
 				if (bat->pdata->disable_charger)
 					bat->pdata->disable_charger();
@@ -427,5 +433,5 @@
 module_exit(s3c_adc_bat_exit);
 
 MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
-MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controler driver");
+MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index ee04936..53f0d35 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -332,7 +332,7 @@
 static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
 {
 	/* flush all pending status updates */
-	flush_scheduled_work();
+	flush_work_sync(&bat_work);
 	return 0;
 }
 
@@ -422,7 +422,7 @@
 err_psy_reg_main:
 
 	/* see comment in tosa_bat_remove */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	i--;
 err_gpio:
@@ -445,12 +445,11 @@
 	power_supply_unregister(&tosa_bat_main.psy);
 
 	/*
-	 * now flush all pending work.
-	 * we won't get any more schedules, since all
-	 * sources (isr and external_power_changed)
-	 * are unregistered now.
+	 * Now cancel the bat_work.  We won't get any more schedules,
+	 * since all sources (isr and external_power_changed) are
+	 * unregistered now.
 	 */
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 
 	for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
 		gpio_free(gpios[i].gpio);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 5071d85..156559e 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -147,7 +147,7 @@
 #ifdef CONFIG_PM
 static int wm97xx_bat_suspend(struct device *dev)
 {
-	flush_scheduled_work();
+	flush_work_sync(&bat_work);
 	return 0;
 }
 
@@ -273,7 +273,7 @@
 		free_irq(gpio_to_irq(pdata->charge_gpio), dev);
 		gpio_free(pdata->charge_gpio);
 	}
-	flush_scheduled_work();
+	cancel_work_sync(&bat_work);
 	power_supply_unregister(&bat_ps);
 	kfree(prop);
 	return 0;
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 85064a9..e5ed52d 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -254,7 +254,7 @@
 	struct z2_charger *charger = i2c_get_clientdata(client);
 	struct z2_battery_info *info = charger->info;
 
-	flush_scheduled_work();
+	cancel_work_sync(&charger->bat_work);
 	power_supply_unregister(&charger->batt_ps);
 
 	kfree(charger->batt_ps.properties);
@@ -271,7 +271,9 @@
 #ifdef CONFIG_PM
 static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
 {
-	flush_scheduled_work();
+	struct z2_charger *charger = i2c_get_clientdata(client);
+
+	flush_work_sync(&charger->bat_work);
 	return 0;
 }
 
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 1afe4e0..f0d3376 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -30,6 +30,17 @@
 	  messages to the system log.  Select this if you are having a
 	  problem with PPS support and want to see more of what is going on.
 
+config NTP_PPS
+	bool "PPS kernel consumer support"
+	depends on PPS && !NO_HZ
+	help
+	  This option adds support for direct in-kernel time
+	  syncronization using an external PPS signal.
+
+	  It doesn't work on tickless systems at the moment.
+
 source drivers/pps/clients/Kconfig
 
+source drivers/pps/generators/Kconfig
+
 endmenu
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
index 98960dd..4483eaa 100644
--- a/drivers/pps/Makefile
+++ b/drivers/pps/Makefile
@@ -3,7 +3,8 @@
 #
 
 pps_core-y			:= pps.o kapi.o sysfs.o
+pps_core-$(CONFIG_NTP_PPS)	+= kc.o
 obj-$(CONFIG_PPS)		:= pps_core.o
-obj-y				+= clients/
+obj-y				+= clients/ generators/
 
 ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 4e801bd..8520a7f 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -22,4 +22,11 @@
 	  If you say yes here you get support for a PPS source connected
 	  with the CD (Carrier Detect) pin of your serial port.
 
+config PPS_CLIENT_PARPORT
+	tristate "Parallel port PPS client"
+	depends on PPS && PARPORT
+	help
+	  If you say yes here you get support for a PPS source connected
+	  with the interrupt pin of your parallel port.
+
 endif
diff --git a/drivers/pps/clients/Makefile b/drivers/pps/clients/Makefile
index 812c9b1..42517da 100644
--- a/drivers/pps/clients/Makefile
+++ b/drivers/pps/clients/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_PPS_CLIENT_KTIMER)	+= pps-ktimer.o
 obj-$(CONFIG_PPS_CLIENT_LDISC)	+= pps-ldisc.o
+obj-$(CONFIG_PPS_CLIENT_PARPORT) += pps_parport.o
 
 ifeq ($(CONFIG_PPS_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index e7ef5b8..2728469 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -19,6 +19,7 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -31,7 +32,7 @@
  * Global variables
  */
 
-static int source;
+static struct pps_device *pps;
 static struct timer_list ktimer;
 
 /*
@@ -40,19 +41,14 @@
 
 static void pps_ktimer_event(unsigned long ptr)
 {
-	struct timespec __ts;
-	struct pps_ktime ts;
+	struct pps_event_time ts;
 
 	/* First of all we get the time stamp... */
-	getnstimeofday(&__ts);
+	pps_get_ts(&ts);
 
-	pr_info("PPS event at %lu\n", jiffies);
+	dev_info(pps->dev, "PPS event at %lu\n", jiffies);
 
-	/* ... and translate it to PPS time data struct */
-	ts.sec = __ts.tv_sec;
-	ts.nsec = __ts.tv_nsec;
-
-	pps_event(source, &ts, PPS_CAPTUREASSERT, NULL);
+	pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
 
 	mod_timer(&ktimer, jiffies + HZ);
 }
@@ -61,12 +57,11 @@
  * The echo function
  */
 
-static void pps_ktimer_echo(int source, int event, void *data)
+static void pps_ktimer_echo(struct pps_device *pps, int event, void *data)
 {
-	pr_info("echo %s %s for source %d\n",
+	dev_info(pps->dev, "echo %s %s\n",
 		event & PPS_CAPTUREASSERT ? "assert" : "",
-		event & PPS_CAPTURECLEAR ? "clear" : "",
-		source);
+		event & PPS_CAPTURECLEAR ? "clear" : "");
 }
 
 /*
@@ -89,30 +84,27 @@
 
 static void __exit pps_ktimer_exit(void)
 {
-	del_timer_sync(&ktimer);
-	pps_unregister_source(source);
+	dev_info(pps->dev, "ktimer PPS source unregistered\n");
 
-	pr_info("ktimer PPS source unregistered\n");
+	del_timer_sync(&ktimer);
+	pps_unregister_source(pps);
 }
 
 static int __init pps_ktimer_init(void)
 {
-	int ret;
-
-	ret = pps_register_source(&pps_ktimer_info,
+	pps = pps_register_source(&pps_ktimer_info,
 				PPS_CAPTUREASSERT | PPS_OFFSETASSERT);
-	if (ret < 0) {
-		printk(KERN_ERR "cannot register ktimer source\n");
-		return ret;
+	if (pps == NULL) {
+		pr_err("cannot register PPS source\n");
+		return -ENOMEM;
 	}
-	source = ret;
 
 	setup_timer(&ktimer, pps_ktimer_event, 0);
 	mod_timer(&ktimer, jiffies + HZ);
 
-	pr_info("ktimer PPS source registered at %d\n", source);
+	dev_info(pps->dev, "ktimer PPS source registered\n");
 
-	return  0;
+	return 0;
 }
 
 module_init(pps_ktimer_init);
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 8e1932d..79451f2 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -19,6 +19,8 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/serial_core.h>
 #include <linux/tty.h>
@@ -27,30 +29,18 @@
 #define PPS_TTY_MAGIC		0x0001
 
 static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
-				struct timespec *ts)
+				struct pps_event_time *ts)
 {
-	int id = (long)tty->disc_data;
-	struct timespec __ts;
-	struct pps_ktime pps_ts;
+	struct pps_device *pps = (struct pps_device *)tty->disc_data;
 
-	/* First of all we get the time stamp... */
-	getnstimeofday(&__ts);
-
-	/* Does caller give us a timestamp? */
-	if (ts) {	/* Yes. Let's use it! */
-		pps_ts.sec = ts->tv_sec;
-		pps_ts.nsec = ts->tv_nsec;
-	} else {	/* No. Do it ourself! */
-		pps_ts.sec = __ts.tv_sec;
-		pps_ts.nsec = __ts.tv_nsec;
-	}
+	BUG_ON(pps == NULL);
 
 	/* Now do the PPS event report */
-	pps_event(id, &pps_ts, status ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR,
-			NULL);
+	pps_event(pps, ts, status ? PPS_CAPTUREASSERT :
+			PPS_CAPTURECLEAR, NULL);
 
-	pr_debug("PPS %s at %lu on source #%d\n",
-			status ? "assert" : "clear", jiffies, id);
+	dev_dbg(pps->dev, "PPS %s at %lu\n",
+			status ? "assert" : "clear", jiffies);
 }
 
 static int (*alias_n_tty_open)(struct tty_struct *tty);
@@ -60,6 +50,7 @@
 	struct pps_source_info info;
 	struct tty_driver *drv = tty->driver;
 	int index = tty->index + drv->name_base;
+	struct pps_device *pps;
 	int ret;
 
 	info.owner = THIS_MODULE;
@@ -70,34 +61,42 @@
 			PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
 			PPS_CANWAIT | PPS_TSFMT_TSPEC;
 
-	ret = pps_register_source(&info, PPS_CAPTUREBOTH | \
+	pps = pps_register_source(&info, PPS_CAPTUREBOTH | \
 				PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
-	if (ret < 0) {
+	if (pps == NULL) {
 		pr_err("cannot register PPS source \"%s\"\n", info.path);
-		return ret;
+		return -ENOMEM;
 	}
-	tty->disc_data = (void *)(long)ret;
+	tty->disc_data = pps;
 
 	/* Should open N_TTY ldisc too */
 	ret = alias_n_tty_open(tty);
-	if (ret < 0)
-		pps_unregister_source((long)tty->disc_data);
+	if (ret < 0) {
+		pr_err("cannot open tty ldisc \"%s\"\n", info.path);
+		goto err_unregister;
+	}
 
-	pr_info("PPS source #%d \"%s\" added\n", ret, info.path);
+	dev_info(pps->dev, "source \"%s\" added\n", info.path);
 
 	return 0;
+
+err_unregister:
+	tty->disc_data = NULL;
+	pps_unregister_source(pps);
+	return ret;
 }
 
 static void (*alias_n_tty_close)(struct tty_struct *tty);
 
 static void pps_tty_close(struct tty_struct *tty)
 {
-	int id = (long)tty->disc_data;
+	struct pps_device *pps = (struct pps_device *)tty->disc_data;
 
-	pps_unregister_source(id);
 	alias_n_tty_close(tty);
 
-	pr_info("PPS source #%d removed\n", id);
+	tty->disc_data = NULL;
+	dev_info(pps->dev, "removed\n");
+	pps_unregister_source(pps);
 }
 
 static struct tty_ldisc_ops pps_ldisc_ops;
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
new file mode 100644
index 0000000..32221ef
--- /dev/null
+++ b/drivers/pps/clients/pps_parport.c
@@ -0,0 +1,258 @@
+/*
+ * pps_parport.c -- kernel parallel port PPS client
+ *
+ *
+ * Copyright (C) 2009   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * implement echo over SEL pin
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irqnr.h>
+#include <linux/time.h>
+#include <linux/parport.h>
+#include <linux/pps_kernel.h>
+
+#define DRVDESC "parallel port PPS client"
+
+/* module parameters */
+
+#define CLEAR_WAIT_MAX		100
+#define CLEAR_WAIT_MAX_ERRORS	5
+
+static unsigned int clear_wait = 100;
+MODULE_PARM_DESC(clear_wait,
+	"Maximum number of port reads when polling for signal clear,"
+	" zero turns clear edge capture off entirely");
+module_param(clear_wait, uint, 0);
+
+
+/* internal per port structure */
+struct pps_client_pp {
+	struct pardevice *pardev;	/* parport device */
+	struct pps_device *pps;		/* PPS device */
+	unsigned int cw;		/* port clear timeout */
+	unsigned int cw_err;		/* number of timeouts */
+};
+
+static inline int signal_is_set(struct parport *port)
+{
+	return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0;
+}
+
+/* parport interrupt handler */
+static void parport_irq(void *handle)
+{
+	struct pps_event_time ts_assert, ts_clear;
+	struct pps_client_pp *dev = handle;
+	struct parport *port = dev->pardev->port;
+	unsigned int i;
+	unsigned long flags;
+
+	/* first of all we get the time stamp... */
+	pps_get_ts(&ts_assert);
+
+	if (dev->cw == 0)
+		/* clear edge capture disabled */
+		goto out_assert;
+
+	/* try capture the clear edge */
+
+	/* We have to disable interrupts here. The idea is to prevent
+	 * other interrupts on the same processor to introduce random
+	 * lags while polling the port. Reading from IO port is known
+	 * to take approximately 1us while other interrupt handlers can
+	 * take much more potentially.
+	 *
+	 * Interrupts won't be disabled for a long time because the
+	 * number of polls is limited by clear_wait parameter which is
+	 * kept rather low. So it should never be an issue.
+	 */
+	local_irq_save(flags);
+	/* check the signal (no signal means the pulse is lost this time) */
+	if (!signal_is_set(port)) {
+		local_irq_restore(flags);
+		dev_err(dev->pps->dev, "lost the signal\n");
+		goto out_assert;
+	}
+
+	/* poll the port until the signal is unset */
+	for (i = dev->cw; i; i--)
+		if (!signal_is_set(port)) {
+			pps_get_ts(&ts_clear);
+			local_irq_restore(flags);
+			dev->cw_err = 0;
+			goto out_both;
+		}
+	local_irq_restore(flags);
+
+	/* timeout */
+	dev->cw_err++;
+	if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
+		dev_err(dev->pps->dev, "disabled clear edge capture after %d"
+				" timeouts\n", dev->cw_err);
+		dev->cw = 0;
+		dev->cw_err = 0;
+	}
+
+out_assert:
+	/* fire assert event */
+	pps_event(dev->pps, &ts_assert,
+			PPS_CAPTUREASSERT, NULL);
+	return;
+
+out_both:
+	/* fire assert event */
+	pps_event(dev->pps, &ts_assert,
+			PPS_CAPTUREASSERT, NULL);
+	/* fire clear event */
+	pps_event(dev->pps, &ts_clear,
+			PPS_CAPTURECLEAR, NULL);
+	return;
+}
+
+/* the PPS echo function */
+static void pps_echo(struct pps_device *pps, int event, void *data)
+{
+	dev_info(pps->dev, "echo %s %s\n",
+		event & PPS_CAPTUREASSERT ? "assert" : "",
+		event & PPS_CAPTURECLEAR ? "clear" : "");
+}
+
+static void parport_attach(struct parport *port)
+{
+	struct pps_client_pp *device;
+	struct pps_source_info info = {
+		.name		= KBUILD_MODNAME,
+		.path		= "",
+		.mode		= PPS_CAPTUREBOTH | \
+				  PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
+				  PPS_ECHOASSERT | PPS_ECHOCLEAR | \
+				  PPS_CANWAIT | PPS_TSFMT_TSPEC,
+		.echo		= pps_echo,
+		.owner		= THIS_MODULE,
+		.dev		= NULL
+	};
+
+	device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL);
+	if (!device) {
+		pr_err("memory allocation failed, not attaching\n");
+		return;
+	}
+
+	device->pardev = parport_register_device(port, KBUILD_MODNAME,
+			NULL, NULL, parport_irq, 0, device);
+	if (!device->pardev) {
+		pr_err("couldn't register with %s\n", port->name);
+		goto err_free;
+	}
+
+	if (parport_claim_or_block(device->pardev) < 0) {
+		pr_err("couldn't claim %s\n", port->name);
+		goto err_unregister_dev;
+	}
+
+	device->pps = pps_register_source(&info,
+			PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
+	if (device->pps == NULL) {
+		pr_err("couldn't register PPS source\n");
+		goto err_release_dev;
+	}
+
+	device->cw = clear_wait;
+
+	port->ops->enable_irq(port);
+
+	pr_info("attached to %s\n", port->name);
+
+	return;
+
+err_release_dev:
+	parport_release(device->pardev);
+err_unregister_dev:
+	parport_unregister_device(device->pardev);
+err_free:
+	kfree(device);
+}
+
+static void parport_detach(struct parport *port)
+{
+	struct pardevice *pardev = port->cad;
+	struct pps_client_pp *device;
+
+	/* FIXME: oooh, this is ugly! */
+	if (strcmp(pardev->name, KBUILD_MODNAME))
+		/* not our port */
+		return;
+
+	device = pardev->private;
+
+	port->ops->disable_irq(port);
+	pps_unregister_source(device->pps);
+	parport_release(pardev);
+	parport_unregister_device(pardev);
+	kfree(device);
+}
+
+static struct parport_driver pps_parport_driver = {
+	.name = KBUILD_MODNAME,
+	.attach = parport_attach,
+	.detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_parport_init(void)
+{
+	int ret;
+
+	pr_info(DRVDESC "\n");
+
+	if (clear_wait > CLEAR_WAIT_MAX) {
+		pr_err("clear_wait value should be not greater"
+				" then %d\n", CLEAR_WAIT_MAX);
+		return -EINVAL;
+	}
+
+	ret = parport_register_driver(&pps_parport_driver);
+	if (ret) {
+		pr_err("unable to register with parport\n");
+		return ret;
+	}
+
+	return  0;
+}
+
+static void __exit pps_parport_exit(void)
+{
+	parport_unregister_driver(&pps_parport_driver);
+}
+
+module_init(pps_parport_init);
+module_exit(pps_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
new file mode 100644
index 0000000..f3a73dd
--- /dev/null
+++ b/drivers/pps/generators/Kconfig
@@ -0,0 +1,13 @@
+#
+# PPS generators configuration
+#
+
+comment "PPS generators support"
+
+config PPS_GENERATOR_PARPORT
+	tristate "Parallel port PPS signal generator"
+	depends on PARPORT
+	help
+	  If you say yes here you get support for a PPS signal generator which
+	  utilizes STROBE pin of a parallel port to send PPS signals. It uses
+	  parport abstraction layer and hrtimers to precisely control the signal.
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
new file mode 100644
index 0000000..303304a
--- /dev/null
+++ b/drivers/pps/generators/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for PPS generators.
+#
+
+obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
+
+ifeq ($(CONFIG_PPS_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
new file mode 100644
index 0000000..5c32f8d
--- /dev/null
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -0,0 +1,282 @@
+/*
+ * pps_gen_parport.c -- kernel parallel port PPS signal generator
+ *
+ *
+ * Copyright (C) 2009   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * fix issues when realtime clock is adjusted in a leap
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/parport.h>
+
+#define DRVDESC "parallel port PPS signal generator"
+
+#define SIGNAL		0
+#define NO_SIGNAL	PARPORT_CONTROL_STROBE
+
+/* module parameters */
+
+#define SEND_DELAY_MAX		100000
+
+static unsigned int send_delay = 30000;
+MODULE_PARM_DESC(delay,
+	"Delay between setting and dropping the signal (ns)");
+module_param_named(delay, send_delay, uint, 0);
+
+
+#define SAFETY_INTERVAL	3000	/* set the hrtimer earlier for safety (ns) */
+
+/* internal per port structure */
+struct pps_generator_pp {
+	struct pardevice *pardev;	/* parport device */
+	struct hrtimer timer;
+	long port_write_time;		/* calibrated port write time (ns) */
+};
+
+static struct pps_generator_pp device = {
+	.pardev = NULL,
+};
+
+static int attached;
+
+/* calibrated time between a hrtimer event and the reaction */
+static long hrtimer_error = SAFETY_INTERVAL;
+
+/* the kernel hrtimer event */
+static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
+{
+	struct timespec expire_time, ts1, ts2, ts3, dts;
+	struct pps_generator_pp *dev;
+	struct parport *port;
+	long lim, delta;
+	unsigned long flags;
+
+	/* We have to disable interrupts here. The idea is to prevent
+	 * other interrupts on the same processor to introduce random
+	 * lags while polling the clock. getnstimeofday() takes <1us on
+	 * most machines while other interrupt handlers can take much
+	 * more potentially.
+	 *
+	 * NB: approx time with blocked interrupts =
+	 * send_delay + 3 * SAFETY_INTERVAL
+	 */
+	local_irq_save(flags);
+
+	/* first of all we get the time stamp... */
+	getnstimeofday(&ts1);
+	expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer));
+	dev = container_of(timer, struct pps_generator_pp, timer);
+	lim = NSEC_PER_SEC - send_delay - dev->port_write_time;
+
+	/* check if we are late */
+	if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
+		local_irq_restore(flags);
+		pr_err("we are late this time %ld.%09ld\n",
+				ts1.tv_sec, ts1.tv_nsec);
+		goto done;
+	}
+
+	/* busy loop until the time is right for an assert edge */
+	do {
+		getnstimeofday(&ts2);
+	} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+	/* set the signal */
+	port = dev->pardev->port;
+	port->ops->write_control(port, SIGNAL);
+
+	/* busy loop until the time is right for a clear edge */
+	lim = NSEC_PER_SEC - dev->port_write_time;
+	do {
+		getnstimeofday(&ts2);
+	} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+	/* unset the signal */
+	port->ops->write_control(port, NO_SIGNAL);
+
+	getnstimeofday(&ts3);
+
+	local_irq_restore(flags);
+
+	/* update calibrated port write time */
+	dts = timespec_sub(ts3, ts2);
+	dev->port_write_time =
+		(dev->port_write_time + timespec_to_ns(&dts)) >> 1;
+
+done:
+	/* update calibrated hrtimer error */
+	dts = timespec_sub(ts1, expire_time);
+	delta = timespec_to_ns(&dts);
+	/* If the new error value is bigger then the old, use the new
+	 * value, if not then slowly move towards the new value. This
+	 * way it should be safe in bad conditions and efficient in
+	 * good conditions.
+	 */
+	if (delta >= hrtimer_error)
+		hrtimer_error = delta;
+	else
+		hrtimer_error = (3 * hrtimer_error + delta) >> 2;
+
+	/* update the hrtimer expire time */
+	hrtimer_set_expires(timer,
+			ktime_set(expire_time.tv_sec + 1,
+				NSEC_PER_SEC - (send_delay +
+				dev->port_write_time + SAFETY_INTERVAL +
+				2 * hrtimer_error)));
+
+	return HRTIMER_RESTART;
+}
+
+/* calibrate port write time */
+#define PORT_NTESTS_SHIFT	5
+static void calibrate_port(struct pps_generator_pp *dev)
+{
+	struct parport *port = dev->pardev->port;
+	int i;
+	long acc = 0;
+
+	for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) {
+		struct timespec a, b;
+		unsigned long irq_flags;
+
+		local_irq_save(irq_flags);
+		getnstimeofday(&a);
+		port->ops->write_control(port, NO_SIGNAL);
+		getnstimeofday(&b);
+		local_irq_restore(irq_flags);
+
+		b = timespec_sub(b, a);
+		acc += timespec_to_ns(&b);
+	}
+
+	dev->port_write_time = acc >> PORT_NTESTS_SHIFT;
+	pr_info("port write takes %ldns\n", dev->port_write_time);
+}
+
+static inline ktime_t next_intr_time(struct pps_generator_pp *dev)
+{
+	struct timespec ts;
+
+	getnstimeofday(&ts);
+
+	return ktime_set(ts.tv_sec +
+			((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0),
+			NSEC_PER_SEC - (send_delay +
+			dev->port_write_time + 3 * SAFETY_INTERVAL));
+}
+
+static void parport_attach(struct parport *port)
+{
+	if (attached) {
+		/* we already have a port */
+		return;
+	}
+
+	device.pardev = parport_register_device(port, KBUILD_MODNAME,
+			NULL, NULL, NULL, 0, &device);
+	if (!device.pardev) {
+		pr_err("couldn't register with %s\n", port->name);
+		return;
+	}
+
+	if (parport_claim_or_block(device.pardev) < 0) {
+		pr_err("couldn't claim %s\n", port->name);
+		goto err_unregister_dev;
+	}
+
+	pr_info("attached to %s\n", port->name);
+	attached = 1;
+
+	calibrate_port(&device);
+
+	hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+	device.timer.function = hrtimer_event;
+#ifdef CONFIG_PREEMPT_RT
+	/* hrtimer interrupt will run in the interrupt context with this */
+	device.timer.irqsafe = 1;
+#endif
+
+	hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
+
+	return;
+
+err_unregister_dev:
+	parport_unregister_device(device.pardev);
+}
+
+static void parport_detach(struct parport *port)
+{
+	if (port->cad != device.pardev)
+		return;	/* not our port */
+
+	hrtimer_cancel(&device.timer);
+	parport_release(device.pardev);
+	parport_unregister_device(device.pardev);
+}
+
+static struct parport_driver pps_gen_parport_driver = {
+	.name = KBUILD_MODNAME,
+	.attach = parport_attach,
+	.detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_gen_parport_init(void)
+{
+	int ret;
+
+	pr_info(DRVDESC "\n");
+
+	if (send_delay > SEND_DELAY_MAX) {
+		pr_err("delay value should be not greater"
+				" then %d\n", SEND_DELAY_MAX);
+		return -EINVAL;
+	}
+
+	ret = parport_register_driver(&pps_gen_parport_driver);
+	if (ret) {
+		pr_err("unable to register with parport\n");
+		return ret;
+	}
+
+	return  0;
+}
+
+static void __exit pps_gen_parport_exit(void)
+{
+	parport_unregister_driver(&pps_gen_parport_driver);
+	pr_info("hrtimer avg error is %ldns\n", hrtimer_error);
+}
+
+module_init(pps_gen_parport_init);
+module_exit(pps_gen_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 1aa02db..cba1b43 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -19,24 +19,20 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/time.h>
+#include <linux/timex.h>
 #include <linux/spinlock.h>
-#include <linux/idr.h>
 #include <linux/fs.h>
 #include <linux/pps_kernel.h>
 #include <linux/slab.h>
 
-/*
- * Global variables
- */
-
-DEFINE_SPINLOCK(pps_idr_lock);
-DEFINE_IDR(pps_idr);
+#include "kc.h"
 
 /*
  * Local functions
@@ -60,60 +56,6 @@
  * Exported functions
  */
 
-/* pps_get_source - find a PPS source
- * @source: the PPS source ID.
- *
- * This function is used to find an already registered PPS source into the
- * system.
- *
- * The function returns NULL if found nothing, otherwise it returns a pointer
- * to the PPS source data struct (the refcounter is incremented by 1).
- */
-
-struct pps_device *pps_get_source(int source)
-{
-	struct pps_device *pps;
-	unsigned long flags;
-
-	spin_lock_irqsave(&pps_idr_lock, flags);
-
-	pps = idr_find(&pps_idr, source);
-	if (pps != NULL)
-		atomic_inc(&pps->usage);
-
-	spin_unlock_irqrestore(&pps_idr_lock, flags);
-
-	return pps;
-}
-
-/* pps_put_source - free the PPS source data
- * @pps: a pointer to the PPS source.
- *
- * This function is used to free a PPS data struct if its refcount is 0.
- */
-
-void pps_put_source(struct pps_device *pps)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&pps_idr_lock, flags);
-	BUG_ON(atomic_read(&pps->usage) == 0);
-
-	if (!atomic_dec_and_test(&pps->usage)) {
-		pps = NULL;
-		goto exit;
-	}
-
-	/* No more reference to the PPS source. We can safely remove the
-	 * PPS data struct.
-	 */
-	idr_remove(&pps_idr, pps->id);
-
-exit:
-	spin_unlock_irqrestore(&pps_idr_lock, flags);
-	kfree(pps);
-}
-
 /* pps_register_source - add a PPS source in the system
  * @info: the PPS info struct
  * @default_params: the default PPS parameters of the new source
@@ -122,31 +64,31 @@
  * source is described by info's fields and it will have, as default PPS
  * parameters, the ones specified into default_params.
  *
- * The function returns, in case of success, the PPS source ID.
+ * The function returns, in case of success, the PPS device. Otherwise NULL.
  */
 
-int pps_register_source(struct pps_source_info *info, int default_params)
+struct pps_device *pps_register_source(struct pps_source_info *info,
+		int default_params)
 {
 	struct pps_device *pps;
-	int id;
 	int err;
 
 	/* Sanity checks */
 	if ((info->mode & default_params) != default_params) {
-		printk(KERN_ERR "pps: %s: unsupported default parameters\n",
+		pr_err("%s: unsupported default parameters\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
 	}
 	if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
 			info->echo == NULL) {
-		printk(KERN_ERR "pps: %s: echo function is not defined\n",
+		pr_err("%s: echo function is not defined\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
 	}
 	if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
-		printk(KERN_ERR "pps: %s: unspecified time format\n",
+		pr_err("%s: unspecified time format\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
@@ -168,94 +110,48 @@
 
 	init_waitqueue_head(&pps->queue);
 	spin_lock_init(&pps->lock);
-	atomic_set(&pps->usage, 1);
-
-	/* Get new ID for the new PPS source */
-	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
-		err = -ENOMEM;
-		goto kfree_pps;
-	}
-
-	spin_lock_irq(&pps_idr_lock);
-
-	/* Now really allocate the PPS source.
-	 * After idr_get_new() calling the new source will be freely available
-	 * into the kernel.
-	 */
-	err = idr_get_new(&pps_idr, pps, &id);
-	if (err < 0) {
-		spin_unlock_irq(&pps_idr_lock);
-		goto kfree_pps;
-	}
-
-	id = id & MAX_ID_MASK;
-	if (id >= PPS_MAX_SOURCES) {
-		spin_unlock_irq(&pps_idr_lock);
-
-		printk(KERN_ERR "pps: %s: too many PPS sources in the system\n",
-					info->name);
-		err = -EBUSY;
-		goto free_idr;
-	}
-	pps->id = id;
-
-	spin_unlock_irq(&pps_idr_lock);
 
 	/* Create the char device */
 	err = pps_register_cdev(pps);
 	if (err < 0) {
-		printk(KERN_ERR "pps: %s: unable to create char device\n",
+		pr_err("%s: unable to create char device\n",
 					info->name);
-		goto free_idr;
+		goto kfree_pps;
 	}
 
-	pr_info("new PPS source %s at ID %d\n", info->name, id);
+	dev_info(pps->dev, "new PPS source %s\n", info->name);
 
-	return id;
-
-free_idr:
-	spin_lock_irq(&pps_idr_lock);
-	idr_remove(&pps_idr, id);
-	spin_unlock_irq(&pps_idr_lock);
+	return pps;
 
 kfree_pps:
 	kfree(pps);
 
 pps_register_source_exit:
-	printk(KERN_ERR "pps: %s: unable to register source\n", info->name);
+	pr_err("%s: unable to register source\n", info->name);
 
-	return err;
+	return NULL;
 }
 EXPORT_SYMBOL(pps_register_source);
 
 /* pps_unregister_source - remove a PPS source from the system
- * @source: the PPS source ID
+ * @pps: the PPS source
  *
  * This function is used to remove a previously registered PPS source from
  * the system.
  */
 
-void pps_unregister_source(int source)
+void pps_unregister_source(struct pps_device *pps)
 {
-	struct pps_device *pps;
-
-	spin_lock_irq(&pps_idr_lock);
-	pps = idr_find(&pps_idr, source);
-
-	if (!pps) {
-		BUG();
-		spin_unlock_irq(&pps_idr_lock);
-		return;
-	}
-	spin_unlock_irq(&pps_idr_lock);
-
+	pps_kc_remove(pps);
 	pps_unregister_cdev(pps);
-	pps_put_source(pps);
+
+	/* don't have to kfree(pps) here because it will be done on
+	 * device destruction */
 }
 EXPORT_SYMBOL(pps_unregister_source);
 
 /* pps_event - register a PPS event into the system
- * @source: the PPS source ID
+ * @pps: the PPS device
  * @ts: the event timestamp
  * @event: the event type
  * @data: userdef pointer
@@ -263,78 +159,72 @@
  * This function is used by each PPS client in order to register a new
  * PPS event into the system (it's usually called inside an IRQ handler).
  *
- * If an echo function is associated with the PPS source it will be called
+ * If an echo function is associated with the PPS device it will be called
  * as:
- *	pps->info.echo(source, event, data);
+ *	pps->info.echo(pps, event, data);
  */
-
-void pps_event(int source, struct pps_ktime *ts, int event, void *data)
+void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+		void *data)
 {
-	struct pps_device *pps;
 	unsigned long flags;
 	int captured = 0;
+	struct pps_ktime ts_real;
 
-	if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) {
-		printk(KERN_ERR "pps: unknown event (%x) for source %d\n",
-			event, source);
-		return;
-	}
+	/* check event type */
+	BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
 
-	pps = pps_get_source(source);
-	if (!pps)
-		return;
+	dev_dbg(pps->dev, "PPS event at %ld.%09ld\n",
+			ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
 
-	pr_debug("PPS event on source %d at %llu.%06u\n",
-			pps->id, (unsigned long long) ts->sec, ts->nsec);
+	timespec_to_pps_ktime(&ts_real, ts->ts_real);
 
 	spin_lock_irqsave(&pps->lock, flags);
 
 	/* Must call the echo function? */
 	if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)))
-		pps->info.echo(source, event, data);
+		pps->info.echo(pps, event, data);
 
 	/* Check the event */
 	pps->current_mode = pps->params.mode;
-	if ((event & PPS_CAPTUREASSERT) &
-			(pps->params.mode & PPS_CAPTUREASSERT)) {
+	if (event & pps->params.mode & PPS_CAPTUREASSERT) {
 		/* We have to add an offset? */
 		if (pps->params.mode & PPS_OFFSETASSERT)
-			pps_add_offset(ts, &pps->params.assert_off_tu);
+			pps_add_offset(&ts_real,
+					&pps->params.assert_off_tu);
 
 		/* Save the time stamp */
-		pps->assert_tu = *ts;
+		pps->assert_tu = ts_real;
 		pps->assert_sequence++;
-		pr_debug("capture assert seq #%u for source %d\n",
-			pps->assert_sequence, source);
+		dev_dbg(pps->dev, "capture assert seq #%u\n",
+			pps->assert_sequence);
 
 		captured = ~0;
 	}
-	if ((event & PPS_CAPTURECLEAR) &
-			(pps->params.mode & PPS_CAPTURECLEAR)) {
+	if (event & pps->params.mode & PPS_CAPTURECLEAR) {
 		/* We have to add an offset? */
 		if (pps->params.mode & PPS_OFFSETCLEAR)
-			pps_add_offset(ts, &pps->params.clear_off_tu);
+			pps_add_offset(&ts_real,
+					&pps->params.clear_off_tu);
 
 		/* Save the time stamp */
-		pps->clear_tu = *ts;
+		pps->clear_tu = ts_real;
 		pps->clear_sequence++;
-		pr_debug("capture clear seq #%u for source %d\n",
-			pps->clear_sequence, source);
+		dev_dbg(pps->dev, "capture clear seq #%u\n",
+			pps->clear_sequence);
 
 		captured = ~0;
 	}
 
-	/* Wake up iif captured somthing */
+	pps_kc_event(pps, ts, event);
+
+	/* Wake up if captured something */
 	if (captured) {
-		pps->go = ~0;
-		wake_up_interruptible(&pps->queue);
+		pps->last_ev++;
+		wake_up_interruptible_all(&pps->queue);
 
 		kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
 	}
 
 	spin_unlock_irqrestore(&pps->lock, flags);
-
-	/* Now we can release the PPS source for (possible) deregistration */
-	pps_put_source(pps);
 }
 EXPORT_SYMBOL(pps_event);
diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
new file mode 100644
index 0000000..079e930
--- /dev/null
+++ b/drivers/pps/kc.c
@@ -0,0 +1,122 @@
+/*
+ * PPS kernel consumer API
+ *
+ * Copyright (C) 2009-2010   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pps_kernel.h>
+
+#include "kc.h"
+
+/*
+ * Global variables
+ */
+
+/* state variables to bind kernel consumer */
+DEFINE_SPINLOCK(pps_kc_hardpps_lock);
+/* PPS API (RFC 2783): current source and mode for kernel consumer */
+struct pps_device *pps_kc_hardpps_dev;	/* unique pointer to device */
+int pps_kc_hardpps_mode;		/* mode bits for kernel consumer */
+
+/* pps_kc_bind - control PPS kernel consumer binding
+ * @pps: the PPS source
+ * @bind_args: kernel consumer bind parameters
+ *
+ * This function is used to bind or unbind PPS kernel consumer according to
+ * supplied parameters. Should not be called in interrupt context.
+ */
+int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+{
+	/* Check if another consumer is already bound */
+	spin_lock_irq(&pps_kc_hardpps_lock);
+
+	if (bind_args->edge == 0)
+		if (pps_kc_hardpps_dev == pps) {
+			pps_kc_hardpps_mode = 0;
+			pps_kc_hardpps_dev = NULL;
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_info(pps->dev, "unbound kernel"
+					" consumer\n");
+		} else {
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_err(pps->dev, "selected kernel consumer"
+					" is not bound\n");
+			return -EINVAL;
+		}
+	else
+		if (pps_kc_hardpps_dev == NULL ||
+				pps_kc_hardpps_dev == pps) {
+			pps_kc_hardpps_mode = bind_args->edge;
+			pps_kc_hardpps_dev = pps;
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_info(pps->dev, "bound kernel consumer: "
+				"edge=0x%x\n", bind_args->edge);
+		} else {
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_err(pps->dev, "another kernel consumer"
+					" is already bound\n");
+			return -EINVAL;
+		}
+
+	return 0;
+}
+
+/* pps_kc_remove - unbind kernel consumer on PPS source removal
+ * @pps: the PPS source
+ *
+ * This function is used to disable kernel consumer on PPS source removal
+ * if this source was bound to PPS kernel consumer. Can be called on any
+ * source safely. Should not be called in interrupt context.
+ */
+void pps_kc_remove(struct pps_device *pps)
+{
+	spin_lock_irq(&pps_kc_hardpps_lock);
+	if (pps == pps_kc_hardpps_dev) {
+		pps_kc_hardpps_mode = 0;
+		pps_kc_hardpps_dev = NULL;
+		spin_unlock_irq(&pps_kc_hardpps_lock);
+		dev_info(pps->dev, "unbound kernel consumer"
+				" on device removal\n");
+	} else
+		spin_unlock_irq(&pps_kc_hardpps_lock);
+}
+
+/* pps_kc_event - call hardpps() on PPS event
+ * @pps: the PPS source
+ * @ts: PPS event timestamp
+ * @event: PPS event edge
+ *
+ * This function calls hardpps() when an event from bound PPS source occurs.
+ */
+void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts,
+		int event)
+{
+	unsigned long flags;
+
+	/* Pass some events to kernel consumer if activated */
+	spin_lock_irqsave(&pps_kc_hardpps_lock, flags);
+	if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode)
+		hardpps(&ts->ts_real, &ts->ts_raw);
+	spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags);
+}
diff --git a/drivers/pps/kc.h b/drivers/pps/kc.h
new file mode 100644
index 0000000..d296fcd
--- /dev/null
+++ b/drivers/pps/kc.h
@@ -0,0 +1,46 @@
+/*
+ * PPS kernel consumer API header
+ *
+ * Copyright (C) 2009-2010   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_PPS_KC_H
+#define LINUX_PPS_KC_H
+
+#include <linux/errno.h>
+#include <linux/pps_kernel.h>
+
+#ifdef CONFIG_NTP_PPS
+
+extern int pps_kc_bind(struct pps_device *pps,
+		struct pps_bind_args *bind_args);
+extern void pps_kc_remove(struct pps_device *pps);
+extern void pps_kc_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event);
+
+
+#else /* CONFIG_NTP_PPS */
+
+static inline int pps_kc_bind(struct pps_device *pps,
+		struct pps_bind_args *bind_args) { return -EOPNOTSUPP; }
+static inline void pps_kc_remove(struct pps_device *pps) {}
+static inline void pps_kc_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event) {}
+
+#endif /* CONFIG_NTP_PPS */
+
+#endif /* LINUX_PPS_KC_H */
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index ca5183b..2baadd2 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -19,6 +19,7 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -26,9 +27,13 @@
 #include <linux/sched.h>
 #include <linux/uaccess.h>
 #include <linux/idr.h>
+#include <linux/mutex.h>
 #include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/pps_kernel.h>
+#include <linux/slab.h>
+
+#include "kc.h"
 
 /*
  * Local variables
@@ -37,6 +42,9 @@
 static dev_t pps_devt;
 static struct class *pps_class;
 
+static DEFINE_MUTEX(pps_idr_lock);
+static DEFINE_IDR(pps_idr);
+
 /*
  * Char device methods
  */
@@ -61,15 +69,13 @@
 {
 	struct pps_device *pps = file->private_data;
 	struct pps_kparams params;
-	struct pps_fdata fdata;
-	unsigned long ticks;
 	void __user *uarg = (void __user *) arg;
 	int __user *iuarg = (int __user *) arg;
 	int err;
 
 	switch (cmd) {
 	case PPS_GETPARAMS:
-		pr_debug("PPS_GETPARAMS: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_GETPARAMS\n");
 
 		spin_lock_irq(&pps->lock);
 
@@ -85,7 +91,7 @@
 		break;
 
 	case PPS_SETPARAMS:
-		pr_debug("PPS_SETPARAMS: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_SETPARAMS\n");
 
 		/* Check the capabilities */
 		if (!capable(CAP_SYS_TIME))
@@ -95,14 +101,14 @@
 		if (err)
 			return -EFAULT;
 		if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
-			pr_debug("capture mode unspecified (%x)\n",
+			dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
 								params.mode);
 			return -EINVAL;
 		}
 
 		/* Check for supported capabilities */
 		if ((params.mode & ~pps->info.mode) != 0) {
-			pr_debug("unsupported capabilities (%x)\n",
+			dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
 								params.mode);
 			return -EINVAL;
 		}
@@ -115,7 +121,7 @@
 		/* Restore the read only parameters */
 		if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
 			/* section 3.3 of RFC 2783 interpreted */
-			pr_debug("time format unspecified (%x)\n",
+			dev_dbg(pps->dev, "time format unspecified (%x)\n",
 								params.mode);
 			pps->params.mode |= PPS_TSFMT_TSPEC;
 		}
@@ -128,7 +134,7 @@
 		break;
 
 	case PPS_GETCAP:
-		pr_debug("PPS_GETCAP: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_GETCAP\n");
 
 		err = put_user(pps->info.mode, iuarg);
 		if (err)
@@ -136,20 +142,26 @@
 
 		break;
 
-	case PPS_FETCH:
-		pr_debug("PPS_FETCH: source %d\n", pps->id);
+	case PPS_FETCH: {
+		struct pps_fdata fdata;
+		unsigned int ev;
+
+		dev_dbg(pps->dev, "PPS_FETCH\n");
 
 		err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
 		if (err)
 			return -EFAULT;
 
-		pps->go = 0;
+		ev = pps->last_ev;
 
 		/* Manage the timeout */
 		if (fdata.timeout.flags & PPS_TIME_INVALID)
-			err = wait_event_interruptible(pps->queue, pps->go);
+			err = wait_event_interruptible(pps->queue,
+					ev != pps->last_ev);
 		else {
-			pr_debug("timeout %lld.%09d\n",
+			unsigned long ticks;
+
+			dev_dbg(pps->dev, "timeout %lld.%09d\n",
 					(long long) fdata.timeout.sec,
 					fdata.timeout.nsec);
 			ticks = fdata.timeout.sec * HZ;
@@ -157,7 +169,9 @@
 
 			if (ticks != 0) {
 				err = wait_event_interruptible_timeout(
-						pps->queue, pps->go, ticks);
+						pps->queue,
+						ev != pps->last_ev,
+						ticks);
 				if (err == 0)
 					return -ETIMEDOUT;
 			}
@@ -165,7 +179,7 @@
 
 		/* Check for pending signals */
 		if (err == -ERESTARTSYS) {
-			pr_debug("pending signal caught\n");
+			dev_dbg(pps->dev, "pending signal caught\n");
 			return -EINTR;
 		}
 
@@ -185,10 +199,44 @@
 			return -EFAULT;
 
 		break;
+	}
+	case PPS_KC_BIND: {
+		struct pps_bind_args bind_args;
 
+		dev_dbg(pps->dev, "PPS_KC_BIND\n");
+
+		/* Check the capabilities */
+		if (!capable(CAP_SYS_TIME))
+			return -EPERM;
+
+		if (copy_from_user(&bind_args, uarg,
+					sizeof(struct pps_bind_args)))
+			return -EFAULT;
+
+		/* Check for supported capabilities */
+		if ((bind_args.edge & ~pps->info.mode) != 0) {
+			dev_err(pps->dev, "unsupported capabilities (%x)\n",
+					bind_args.edge);
+			return -EINVAL;
+		}
+
+		/* Validate parameters roughly */
+		if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
+				(bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
+				bind_args.consumer != PPS_KC_HARDPPS) {
+			dev_err(pps->dev, "invalid kernel consumer bind"
+					" parameters (%x)\n", bind_args.edge);
+			return -EINVAL;
+		}
+
+		err = pps_kc_bind(pps, &bind_args);
+		if (err < 0)
+			return err;
+
+		break;
+	}
 	default:
 		return -ENOTTY;
-		break;
 	}
 
 	return 0;
@@ -198,12 +246,6 @@
 {
 	struct pps_device *pps = container_of(inode->i_cdev,
 						struct pps_device, cdev);
-	int found;
-
-	found = pps_get_source(pps->id) != 0;
-	if (!found)
-		return -ENODEV;
-
 	file->private_data = pps;
 
 	return 0;
@@ -211,11 +253,6 @@
 
 static int pps_cdev_release(struct inode *inode, struct file *file)
 {
-	struct pps_device *pps = file->private_data;
-
-	/* Free the PPS source and wake up (possible) deregistration */
-	pps_put_source(pps);
-
 	return 0;
 }
 
@@ -233,25 +270,67 @@
 	.release	= pps_cdev_release,
 };
 
+static void pps_device_destruct(struct device *dev)
+{
+	struct pps_device *pps = dev_get_drvdata(dev);
+
+	/* release id here to protect others from using it while it's
+	 * still in use */
+	mutex_lock(&pps_idr_lock);
+	idr_remove(&pps_idr, pps->id);
+	mutex_unlock(&pps_idr_lock);
+
+	kfree(dev);
+	kfree(pps);
+}
+
 int pps_register_cdev(struct pps_device *pps)
 {
 	int err;
+	dev_t devt;
 
-	pps->devno = MKDEV(MAJOR(pps_devt), pps->id);
+	mutex_lock(&pps_idr_lock);
+	/* Get new ID for the new PPS source */
+	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
+		mutex_unlock(&pps_idr_lock);
+		return -ENOMEM;
+	}
+
+	/* Now really allocate the PPS source.
+	 * After idr_get_new() calling the new source will be freely available
+	 * into the kernel.
+	 */
+	err = idr_get_new(&pps_idr, pps, &pps->id);
+	mutex_unlock(&pps_idr_lock);
+
+	if (err < 0)
+		return err;
+
+	pps->id &= MAX_ID_MASK;
+	if (pps->id >= PPS_MAX_SOURCES) {
+		pr_err("%s: too many PPS sources in the system\n",
+					pps->info.name);
+		err = -EBUSY;
+		goto free_idr;
+	}
+
+	devt = MKDEV(MAJOR(pps_devt), pps->id);
+
 	cdev_init(&pps->cdev, &pps_cdev_fops);
 	pps->cdev.owner = pps->info.owner;
 
-	err = cdev_add(&pps->cdev, pps->devno, 1);
+	err = cdev_add(&pps->cdev, devt, 1);
 	if (err) {
-		printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n",
+		pr_err("%s: failed to add char device %d:%d\n",
 				pps->info.name, MAJOR(pps_devt), pps->id);
-		return err;
+		goto free_idr;
 	}
-	pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
+	pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
 							"pps%d", pps->id);
 	if (IS_ERR(pps->dev))
 		goto del_cdev;
-	dev_set_drvdata(pps->dev, pps);
+
+	pps->dev->release = pps_device_destruct;
 
 	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
 			MAJOR(pps_devt), pps->id);
@@ -261,12 +340,17 @@
 del_cdev:
 	cdev_del(&pps->cdev);
 
+free_idr:
+	mutex_lock(&pps_idr_lock);
+	idr_remove(&pps_idr, pps->id);
+	mutex_unlock(&pps_idr_lock);
+
 	return err;
 }
 
 void pps_unregister_cdev(struct pps_device *pps)
 {
-	device_destroy(pps_class, pps->devno);
+	device_destroy(pps_class, pps->dev->devt);
 	cdev_del(&pps->cdev);
 }
 
@@ -286,14 +370,14 @@
 
 	pps_class = class_create(THIS_MODULE, "pps");
 	if (!pps_class) {
-		printk(KERN_ERR "pps: failed to allocate class\n");
+		pr_err("failed to allocate class\n");
 		return -ENOMEM;
 	}
 	pps_class->dev_attrs = pps_attrs;
 
 	err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
 	if (err < 0) {
-		printk(KERN_ERR "pps: failed to allocate char device region\n");
+		pr_err("failed to allocate char device region\n");
 		goto remove_class;
 	}
 
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index ccea15c..50cb1e1 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
 obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
-ps3av_mod-objs		+= ps3av.o ps3av_cmd.o
+ps3av_mod-y		:= ps3av.o ps3av_cmd.o
 obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
 obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
 obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 1eb82c4..467e82b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -46,7 +46,6 @@
 DEFINE_SPINLOCK(rio_global_list_lock);
 
 static int next_destid = 0;
-static int next_switchid = 0;
 static int next_net = 0;
 static int next_comptag = 1;
 
@@ -378,12 +377,30 @@
 	struct rio_dev *rdev;
 	struct rio_switch *rswitch = NULL;
 	int result, rdid;
+	size_t size;
+	u32 swpinfo = 0;
 
-	rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL);
+	size = sizeof(struct rio_dev);
+	if (rio_mport_read_config_32(port, destid, hopcount,
+				     RIO_PEF_CAR, &result))
+		return NULL;
+
+	if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+		rio_mport_read_config_32(port, destid, hopcount,
+					 RIO_SWP_INFO_CAR, &swpinfo);
+		if (result & RIO_PEF_SWITCH) {
+			size += (RIO_GET_TOTAL_PORTS(swpinfo) *
+				sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+		}
+	}
+
+	rdev = kzalloc(size, GFP_KERNEL);
 	if (!rdev)
 		return NULL;
 
 	rdev->net = net;
+	rdev->pef = result;
+	rdev->swpinfo = swpinfo;
 	rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
 				 &result);
 	rdev->did = result >> 16;
@@ -397,8 +414,6 @@
 	rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR,
 				 &result);
 	rdev->asm_rev = result >> 16;
-	rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
-				 &rdev->pef);
 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
 		rdev->efptr = result & 0xffff;
 		rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
@@ -408,11 +423,6 @@
 						hopcount, RIO_EFB_ERR_MGMNT);
 	}
 
-	if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
-		rio_mport_read_config_32(port, destid, hopcount,
-					 RIO_SWP_INFO_CAR, &rdev->swpinfo);
-	}
-
 	rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
 				 &rdev->src_ops);
 	rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
@@ -427,6 +437,10 @@
 		rio_mport_write_config_32(port, destid, hopcount,
 					  RIO_COMPONENT_TAG_CSR, next_comptag);
 		rdev->comp_tag = next_comptag++;
+	}  else {
+		rio_mport_read_config_32(port, destid, hopcount,
+					 RIO_COMPONENT_TAG_CSR,
+					 &rdev->comp_tag);
 	}
 
 	if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
@@ -437,21 +451,20 @@
 				next_destid++;
 		} else
 			rdev->destid = rio_get_device_id(port, destid, hopcount);
-	} else
-		/* Switch device has an associated destID */
-		rdev->destid = RIO_INVALID_DESTID;
+
+		rdev->hopcount = 0xff;
+	} else {
+		/* Switch device has an associated destID which
+		 * will be adjusted later
+		 */
+		rdev->destid = destid;
+		rdev->hopcount = hopcount;
+	}
 
 	/* If a PE has both switch and other functions, show it as a switch */
 	if (rio_is_switch(rdev)) {
-		rswitch = kzalloc(sizeof(*rswitch) +
-				  RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
-				  sizeof(rswitch->nextdev[0]),
-				  GFP_KERNEL);
-		if (!rswitch)
-			goto cleanup;
-		rswitch->switchid = next_switchid;
-		rswitch->hopcount = hopcount;
-		rswitch->destid = destid;
+		rswitch = rdev->rswitch;
+		rswitch->switchid = rdev->comp_tag & RIO_CTAG_UDEVID;
 		rswitch->port_ok = 0;
 		rswitch->route_table = kzalloc(sizeof(u8)*
 					RIO_MAX_ROUTE_ENTRIES(port->sys_size),
@@ -462,15 +475,13 @@
 		for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
 				rdid++)
 			rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
-		rdev->rswitch = rswitch;
-		rswitch->rdev = rdev;
 		dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
-			     rdev->rswitch->switchid);
+			     rswitch->switchid);
 		rio_switch_init(rdev, do_enum);
 
-		if (do_enum && rdev->rswitch->clr_table)
-			rdev->rswitch->clr_table(port, destid, hopcount,
-						 RIO_GLOBAL_TABLE);
+		if (do_enum && rswitch->clr_table)
+			rswitch->clr_table(port, destid, hopcount,
+					   RIO_GLOBAL_TABLE);
 
 		list_add_tail(&rswitch->node, &rio_switches);
 
@@ -506,10 +517,9 @@
 	return rdev;
 
 cleanup:
-	if (rswitch) {
+	if (rswitch->route_table)
 		kfree(rswitch->route_table);
-		kfree(rswitch);
-	}
+
 	kfree(rdev);
 	return NULL;
 }
@@ -632,8 +642,7 @@
 
 /**
  * rio_route_add_entry- Add a route entry to a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Port number to be routed
@@ -647,31 +656,31 @@
  * on failure.
  */
 static int
-rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
+rio_route_add_entry(struct rio_dev *rdev,
 		    u16 table, u16 route_destid, u8 route_port, int lock)
 {
 	int rc;
 
 	if (lock) {
-		rc = rio_lock_device(mport, rswitch->destid,
-				     rswitch->hopcount, 1000);
+		rc = rio_lock_device(rdev->net->hport, rdev->destid,
+				     rdev->hopcount, 1000);
 		if (rc)
 			return rc;
 	}
 
-	rc = rswitch->add_entry(mport, rswitch->destid,
-					rswitch->hopcount, table,
-					route_destid, route_port);
+	rc = rdev->rswitch->add_entry(rdev->net->hport, rdev->destid,
+				      rdev->hopcount, table,
+				      route_destid, route_port);
 	if (lock)
-		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+		rio_unlock_device(rdev->net->hport, rdev->destid,
+				  rdev->hopcount);
 
 	return rc;
 }
 
 /**
  * rio_route_get_entry- Read a route entry in a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Pointer to read port number into
@@ -685,23 +694,24 @@
  * on failure.
  */
 static int
-rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
+rio_route_get_entry(struct rio_dev *rdev, u16 table,
 		    u16 route_destid, u8 *route_port, int lock)
 {
 	int rc;
 
 	if (lock) {
-		rc = rio_lock_device(mport, rswitch->destid,
-				     rswitch->hopcount, 1000);
+		rc = rio_lock_device(rdev->net->hport, rdev->destid,
+				     rdev->hopcount, 1000);
 		if (rc)
 			return rc;
 	}
 
-	rc = rswitch->get_entry(mport, rswitch->destid,
-					rswitch->hopcount, table,
-					route_destid, route_port);
+	rc = rdev->rswitch->get_entry(rdev->net->hport, rdev->destid,
+				      rdev->hopcount, table,
+				      route_destid, route_port);
 	if (lock)
-		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+		rio_unlock_device(rdev->net->hport, rdev->destid,
+				  rdev->hopcount);
 
 	return rc;
 }
@@ -809,16 +819,15 @@
 		return -1;
 
 	if (rio_is_switch(rdev)) {
-		next_switchid++;
 		sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
-		rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+		rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 				    port->host_deviceid, sw_inport, 0);
 		rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
 
 		for (destid = 0; destid < next_destid; destid++) {
 			if (destid == port->host_deviceid)
 				continue;
-			rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+			rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 					    destid, sw_inport, 0);
 			rdev->rswitch->route_table[destid] = sw_inport;
 		}
@@ -850,8 +859,7 @@
 				    "RIO: scanning device on port %d\n",
 				    port_num);
 				rdev->rswitch->port_ok |= (1 << port_num);
-				rio_route_add_entry(port, rdev->rswitch,
-						RIO_GLOBAL_TABLE,
+				rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 						RIO_ANY_DESTID(port->sys_size),
 						port_num, 0);
 
@@ -865,7 +873,7 @@
 					     destid < next_destid; destid++) {
 						if (destid == port->host_deviceid)
 							continue;
-						rio_route_add_entry(port, rdev->rswitch,
+						rio_route_add_entry(rdev,
 								    RIO_GLOBAL_TABLE,
 								    destid,
 								    port_num,
@@ -904,7 +912,7 @@
 				next_destid++;
 		}
 
-		rdev->rswitch->destid = sw_destid;
+		rdev->destid = sw_destid;
 	} else
 		pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n",
 		    rio_name(rdev), rdev->vid, rdev->did);
@@ -941,7 +949,7 @@
  */
 static int __devinit
 rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
-	      u8 hopcount)
+	      u8 hopcount, struct rio_dev *prev, int prev_port)
 {
 	u8 port_num, route_port;
 	struct rio_dev *rdev;
@@ -951,14 +959,15 @@
 	if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) {
 		/* Add device to the global and bus/net specific list. */
 		list_add_tail(&rdev->net_list, &net->devices);
+		rdev->prev = prev;
+		if (prev && rio_is_switch(prev))
+			prev->rswitch->nextdev[prev_port] = rdev;
 	} else
 		return -1;
 
 	if (rio_is_switch(rdev)) {
-		next_switchid++;
-
 		/* Associated destid is how we accessed this switch */
-		rdev->rswitch->destid = destid;
+		rdev->destid = destid;
 
 		pr_debug(
 		    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
@@ -981,7 +990,7 @@
 				for (ndestid = 0;
 				     ndestid < RIO_ANY_DESTID(port->sys_size);
 				     ndestid++) {
-					rio_route_get_entry(port, rdev->rswitch,
+					rio_route_get_entry(rdev,
 							    RIO_GLOBAL_TABLE,
 							    ndestid,
 							    &route_port, 0);
@@ -992,8 +1001,8 @@
 				if (ndestid == RIO_ANY_DESTID(port->sys_size))
 					continue;
 				rio_unlock_device(port, destid, hopcount);
-				if (rio_disc_peer
-				    (net, port, ndestid, hopcount + 1) < 0)
+				if (rio_disc_peer(net, port, ndestid,
+					hopcount + 1, rdev, port_num) < 0)
 					return -1;
 			}
 		}
@@ -1069,14 +1078,14 @@
  */
 static void rio_update_route_tables(struct rio_mport *port)
 {
-	struct rio_dev *rdev;
+	struct rio_dev *rdev, *swrdev;
 	struct rio_switch *rswitch;
 	u8 sport;
 	u16 destid;
 
 	list_for_each_entry(rdev, &rio_devices, global_list) {
 
-		destid = (rio_is_switch(rdev))?rdev->rswitch->destid:rdev->destid;
+		destid = rdev->destid;
 
 		list_for_each_entry(rswitch, &rio_switches, node) {
 
@@ -1084,14 +1093,16 @@
 				continue;
 
 			if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
+				swrdev = sw_to_rio_dev(rswitch);
+
 				/* Skip if destid ends in empty switch*/
-				if (rswitch->destid == destid)
+				if (swrdev->destid == destid)
 					continue;
 
-				sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
+				sport = RIO_GET_PORT_NUM(swrdev->swpinfo);
 
 				if (rswitch->add_entry)	{
-					rio_route_add_entry(port, rswitch,
+					rio_route_add_entry(swrdev,
 						RIO_GLOBAL_TABLE, destid,
 						sport, 0);
 					rswitch->route_table[destid] = sport;
@@ -1203,21 +1214,20 @@
 
 	list_for_each_entry(rdev, &rio_devices, global_list)
 		if (rio_is_switch(rdev)) {
-			rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
-					rdev->rswitch->hopcount, 1000);
+			rio_lock_device(rdev->net->hport, rdev->destid,
+					rdev->hopcount, 1000);
 			for (i = 0;
 			     i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
 			     i++) {
-				if (rio_route_get_entry
-				    (rdev->net->hport, rdev->rswitch,
-				     RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
+				if (rio_route_get_entry(rdev,
+					RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
 					continue;
 				rdev->rswitch->route_table[i] = sport;
 			}
 
 			rio_unlock_device(rdev->net->hport,
-					  rdev->rswitch->destid,
-					  rdev->rswitch->hopcount);
+					  rdev->destid,
+					  rdev->hopcount);
 		}
 }
 
@@ -1284,7 +1294,7 @@
 						   mport->host_deviceid);
 
 		if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
-					0) < 0) {
+					0, NULL, 0) < 0) {
 			printk(KERN_INFO
 			       "RIO: master port %d device has failed discovery\n",
 			       mport->id);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 137ed93..76b4185 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -217,7 +217,7 @@
 
 	err = device_create_bin_file(&rdev->dev, &rio_config_attr);
 
-	if (!err && rdev->rswitch) {
+	if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
 		err = device_create_file(&rdev->dev, &dev_attr_routes);
 		if (!err && rdev->rswitch->sw_sysfs)
 			err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
@@ -239,7 +239,7 @@
 void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
 {
 	device_remove_bin_file(&rdev->dev, &rio_config_attr);
-	if (rdev->rswitch) {
+	if (rdev->pef & RIO_PEF_SWITCH) {
 		device_remove_file(&rdev->dev, &dev_attr_routes);
 		if (rdev->rswitch->sw_sysfs)
 			rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 7b5080c..cc2a3b7 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -471,16 +471,9 @@
  */
 int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
 	u32 regval;
 
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
+	rio_read_config_32(rdev,
 				 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
 				 &regval);
 	if (lock)
@@ -488,7 +481,7 @@
 	else
 		regval &= ~RIO_PORT_N_CTL_LOCKOUT;
 
-	rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
+	rio_write_config_32(rdev,
 				  rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
 				  regval);
 	return 0;
@@ -507,7 +500,7 @@
 rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
 {
 	u32 result;
-	int p_port, dstid, rc = -EIO;
+	int p_port, rc = -EIO;
 	struct rio_dev *prev = NULL;
 
 	/* Find switch with failed RIO link */
@@ -522,9 +515,7 @@
 	if (prev == NULL)
 		goto err_out;
 
-	dstid = (rdev->pef & RIO_PEF_SWITCH) ?
-			rdev->rswitch->destid : rdev->destid;
-	p_port = prev->rswitch->route_table[dstid];
+	p_port = prev->rswitch->route_table[rdev->destid];
 
 	if (p_port != RIO_INVALID_ROUTE) {
 		pr_debug("RIO: link failed on [%s]-P%d\n",
@@ -567,15 +558,8 @@
  */
 static int rio_chk_dev_access(struct rio_dev *rdev)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+	return rio_mport_chk_dev_access(rdev->net->hport,
+					rdev->destid, rdev->hopcount);
 }
 
 /**
@@ -588,23 +572,20 @@
 static int
 rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int checkcount;
 
 	if (lnkresp) {
 		/* Read from link maintenance response register
 		 * to clear valid bit */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
 			&regval);
 		udelay(50);
 	}
 
 	/* Issue Input-status command */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
 		RIO_MNT_REQ_CMD_IS);
 
@@ -615,7 +596,7 @@
 	checkcount = 3;
 	while (checkcount--) {
 		udelay(50);
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
 			&regval);
 		if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
@@ -635,15 +616,12 @@
  */
 static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
 	u32 regval;
 	u32 far_ackid, far_linkstat, near_ackid;
 
 	if (err_status == 0)
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 
@@ -661,7 +639,7 @@
 			 pnum, regval);
 		far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
 		far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
 			&regval);
 		pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
@@ -679,9 +657,8 @@
 			/* Align near outstanding/outbound ackIDs with
 			 * far inbound.
 			 */
-			rio_mport_write_config_32(mport, destid,
-				hopcount, rdev->phys_efptr +
-					RIO_PORT_N_ACK_STS_CSR(pnum),
+			rio_write_config_32(rdev,
+				rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
 				(near_ackid << 24) |
 					(far_ackid << 8) | far_ackid);
 			/* Align far outstanding/outbound ackIDs with
@@ -698,7 +675,7 @@
 				pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
 		}
 rd_err:
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -710,7 +687,7 @@
 				     RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
 		udelay(50);
 
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -730,13 +707,10 @@
 int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
 {
 	struct rio_dev *rdev;
-	struct rio_mport *mport;
-	u8 hopcount;
-	u16 destid;
 	u32 err_status, em_perrdet, em_ltlerrdet;
 	int rc, portnum;
 
-	rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
+	rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);
 	if (rdev == NULL) {
 		/* Device removed or enumeration error */
 		pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
@@ -800,17 +774,13 @@
 		return 0;
 	}
 
-	mport = rdev->net->hport;
-	destid = rdev->rswitch->destid;
-	hopcount = rdev->rswitch->hopcount;
-
 	/*
 	 * Process the port-write notification from switch
 	 */
 	if (rdev->rswitch->em_handle)
 		rdev->rswitch->em_handle(rdev, portnum);
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			&err_status);
 	pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
@@ -840,7 +810,7 @@
 			rdev->rswitch->port_ok &= ~(1 << portnum);
 			rio_set_port_lockout(rdev, portnum, 1);
 
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ACK_STS_CSR(portnum),
 				RIO_PORT_N_ACK_CLEAR);
@@ -851,28 +821,28 @@
 		}
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
 	if (em_perrdet) {
 		pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
 			 portnum, em_perrdet);
 		/* Clear EM Port N Error Detect CSR */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
 	if (em_ltlerrdet) {
 		pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
 			 em_ltlerrdet);
 		/* Clear EM L/T Layer Error Detect CSR */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
 	}
 
 	/* Clear remaining error bits and Port-Write Pending bit */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			err_status);
 
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 0bb871c..095016a 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -209,9 +209,6 @@
 static int
 idtg2_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int i, tmp;
 
@@ -220,29 +217,25 @@
 	 * All standard EM configuration should be performed at upper level.
 	 */
 
-	pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	/* Set Port-Write info CSR: PRIO=3 and CRF=1 */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PW_INFO_CSR, 0x0000e000);
+	rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000);
 
 	/*
 	 * Configure LT LAYER error reporting.
 	 */
 
 	/* Enable standard (RIO.p8) error reporting */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_LT_ERR_REPORT_EN,
+	rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN,
 			REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
 			REM_LTL_ERR_UNSUPTR);
 
 	/* Use Port-Writes for LT layer error reporting.
 	 * Enable per-port reset
 	 */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_DEV_CTRL_1, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_DEV_CTRL_1,
+	rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval);
+	rio_write_config_32(rdev, IDT_DEV_CTRL_1,
 			regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
 
 	/*
@@ -250,45 +243,40 @@
 	 */
 
 	/* Report all RIO.p8 errors supported by device */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+	rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
 
 	/* Configure reporting of implementation specific errors/events */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+	rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC,
+			    IDT_PORT_INIT_TX_ACQUIRED);
 
 	/* Use Port-Writes for port error reporting and enable error logging */
 	tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
 	for (i = 0; i < tmp; i++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				IDT_PORT_OPS(i), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval);
+		rio_write_config_32(rdev,
 				IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
 				IDT_PORT_OPS_PL_ELOG |
 				IDT_PORT_OPS_LL_ELOG |
 				IDT_PORT_OPS_LT_ELOG);
 	}
 	/* Overwrite error log if full */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+	rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
 
 	/*
 	 * Configure LANE error reporting.
 	 */
 
 	/* Disable line error reporting */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_LANE_ERR_REPORT_EN_BC, 0);
+	rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0);
 
 	/* Use Port-Writes for lane error reporting (when enabled)
 	 * (do per-lane update because lanes may have different configuration)
 	 */
 	tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
 	for (i = 0; i < tmp; i++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				IDT_LANE_CTRL(i), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
-				IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+		rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval);
+		rio_write_config_32(rdev, IDT_LANE_CTRL(i),
+				    regval | IDT_LANE_CTRL_GENPW);
 	}
 
 	/*
@@ -296,41 +284,32 @@
 	 */
 
 	/* Disable JTAG and I2C Error capture */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_AUX_PORT_ERR_CAP_EN, 0);
+	rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0);
 
 	/* Disable JTAG and I2C Error reporting/logging */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_AUX_ERR_REPORT_EN, 0);
+	rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0);
 
 	/* Disable Port-Write notification from JTAG */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_JTAG_CTRL, 0);
+	rio_write_config_32(rdev, IDT_JTAG_CTRL, 0);
 
 	/* Disable Port-Write notification from I2C */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_I2C_MCTRL, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_I2C_MCTRL,
-			regval & ~IDT_I2C_MCTRL_GENPW);
+	rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval);
+	rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW);
 
 	/*
 	 * Configure CFG_BLK error reporting.
 	 */
 
 	/* Disable Configuration Block error capture */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+	rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0);
 
 	/* Disable Port-Writes for Configuration Block error reporting */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_REPORT, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_REPORT,
-			regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+	rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval);
+	rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT,
+			    regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
 
 	/* set TVAL = ~50us */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
 
 	return 0;
@@ -339,18 +318,15 @@
 static int
 idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval, em_perrdet, em_ltlerrdet;
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
 	if (em_ltlerrdet) {
 		/* Service Logical/Transport Layer Error(s) */
 		if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
 			/* Implementation specific error reported */
-			rio_mport_read_config_32(mport, destid, hopcount,
+			rio_read_config_32(rdev,
 					IDT_ISLTL_ADDRESS_CAP, &regval);
 
 			pr_debug("RIO: %s Implementation Specific LTL errors" \
@@ -358,13 +334,12 @@
 				 rio_name(rdev), em_ltlerrdet, regval);
 
 			/* Clear implementation specific address capture CSR */
-			rio_mport_write_config_32(mport, destid, hopcount,
-					IDT_ISLTL_ADDRESS_CAP, 0);
+			rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0);
 
 		}
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
 	if (em_perrdet) {
 		/* Service Port-Level Error(s) */
@@ -372,14 +347,14 @@
 			/* Implementation Specific port error reported */
 
 			/* Get IS errors reported */
-			rio_mport_read_config_32(mport, destid, hopcount,
+			rio_read_config_32(rdev,
 					IDT_PORT_ISERR_DET(portnum), &regval);
 
 			pr_debug("RIO: %s Implementation Specific Port" \
 				 " errors 0x%x\n", rio_name(rdev), regval);
 
 			/* Clear all implementation specific events */
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 					IDT_PORT_ISERR_DET(portnum), 0);
 		}
 	}
@@ -391,14 +366,10 @@
 idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct rio_dev *rdev = to_rio_dev(dev);
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	ssize_t len = 0;
 	u32 regval;
 
-	while (!rio_mport_read_config_32(mport, destid, hopcount,
-					 IDT_ERR_RD, &regval)) {
+	while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) {
 		if (!regval)    /* 0 = end of log */
 			break;
 		len += snprintf(buf + len, PAGE_SIZE - len,
@@ -445,3 +416,5 @@
 
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index fc9f637..3a97107 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -117,10 +117,6 @@
 
 static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
-
 	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
 	rdev->rswitch->add_entry = idtcps_route_add_entry;
 	rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -132,7 +128,7 @@
 
 	if (do_enum) {
 		/* set TVAL = ~50us */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
 	}
 
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index b9a389b..3994c00 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -113,22 +113,17 @@
 static int
 tsi568_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int portnum;
 
-	pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	/* Make sure that Port-Writes are disabled (for all ports) */
 	for (portnum = 0;
 	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				TSI568_SP_MODE(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
-				TSI568_SP_MODE(portnum),
-				regval | TSI568_SP_MODE_PW_DIS);
+		rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval);
+		rio_write_config_32(rdev, TSI568_SP_MODE(portnum),
+				    regval | TSI568_SP_MODE_PW_DIS);
 	}
 
 	return 0;
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 2003fb6..1a62934 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -158,48 +158,45 @@
 static int
 tsi57x_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int portnum;
 
-	pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	for (portnum = 0;
 	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
 		/* Make sure that Port-Writes are enabled (for all ports) */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_MODE(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_MODE(portnum),
 				regval & ~TSI578_SP_MODE_PW_DIS);
 
 		/* Clear all pending interrupts */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ERR_STS_CSR(portnum),
 				&regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ERR_STS_CSR(portnum),
 				regval & 0x07120214);
 
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_INT_STATUS(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_INT_STATUS(portnum),
 				regval & 0x000700bd);
 
 		/* Enable all interrupts to allow ports to send a port-write */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_CTL_INDEP(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_CTL_INDEP(portnum),
 				regval | 0x000b0000);
 
 		/* Skip next (odd) port if the current port is in x4 mode */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				&regval);
 		if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
@@ -207,7 +204,7 @@
 	}
 
 	/* set TVAL = ~50us */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
 
 	return 0;
@@ -217,14 +214,12 @@
 tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
 {
 	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 intstat, err_status;
 	int sendcount, checkcount;
 	u8 route_port;
 	u32 regval;
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			&err_status);
 
@@ -232,15 +227,15 @@
 	    (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
 			  RIO_PORT_N_ERR_STS_PW_INP_ES))) {
 		/* Remove any queued packets by locking/unlocking port */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 			&regval);
 		if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				regval | RIO_PORT_N_CTL_LOCKOUT);
 			udelay(50);
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				regval);
 		}
@@ -248,7 +243,7 @@
 		/* Read from link maintenance response register to clear
 		 * valid bit
 		 */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
 			&regval);
 
@@ -257,13 +252,12 @@
 		 */
 		sendcount = 3;
 		while (sendcount) {
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 					  TSI578_SP_CS_TX(portnum), 0x40fc8000);
 			checkcount = 3;
 			while (checkcount--) {
 				udelay(50);
-				rio_mport_read_config_32(
-					mport, destid, hopcount,
+				rio_read_config_32(rdev,
 					rdev->phys_efptr +
 						RIO_PORT_N_MNT_RSP_CSR(portnum),
 					&regval);
@@ -277,25 +271,23 @@
 
 exit_es:
 	/* Clear implementation specific error status bits */
-	rio_mport_read_config_32(mport, destid, hopcount,
-				 TSI578_SP_INT_STATUS(portnum), &intstat);
+	rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);
 	pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
-		 destid, hopcount, portnum, intstat);
+		 rdev->destid, rdev->hopcount, portnum, intstat);
 
 	if (intstat & 0x10000) {
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_LUT_PEINF(portnum), &regval);
 		regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
 		route_port = rdev->rswitch->route_table[regval];
 		pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
 			rio_name(rdev), portnum, regval);
-		tsi57x_route_add_entry(mport, destid, hopcount,
+		tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,
 				RIO_GLOBAL_TABLE, regval, route_port);
 	}
 
-	rio_mport_write_config_32(mport, destid, hopcount,
-				  TSI578_SP_INT_STATUS(portnum),
-				  intstat & 0x000700bd);
+	rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum),
+			    intstat & 0x000700bd);
 
 	return 0;
 }
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 2ce2eb7..dd63084 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -249,7 +249,7 @@
 }
 
 static int pm8607_set_voltage(struct regulator_dev *rdev,
-			      int min_uV, int max_uV)
+			      int min_uV, int max_uV, unsigned *selector)
 {
 	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
 	uint8_t val, mask;
@@ -263,6 +263,7 @@
 	ret = choose_voltage(rdev, min_uV, max_uV);
 	if (ret < 0)
 		return -EINVAL;
+	*selector = ret;
 	val = (uint8_t)(ret << info->vol_shift);
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index dd30e88..e1d9436 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -186,13 +186,25 @@
 	 This driver provides support for the voltage regulators of the
 	 PCAP2 PMIC.
 
+config REGULATOR_MC13XXX_CORE
+	tristate
+
 config REGULATOR_MC13783
 	tristate "Support regulators on Freescale MC13783 PMIC"
 	depends on MFD_MC13783
+	select REGULATOR_MC13XXX_CORE
 	help
 	  Say y here to support the regulators found on the Freescale MC13783
 	  PMIC.
 
+config REGULATOR_MC13892
+	tristate "Support regulators on Freescale MC13892 PMIC"
+	depends on MFD_MC13XXX
+	select REGULATOR_MC13XXX_CORE
+	help
+	  Say y here to support the regulators found on the Freescale MC13892
+	  PMIC.
+
 config REGULATOR_AB3100
 	tristate "ST-Ericsson AB3100 Regulator functions"
 	depends on AB3100_CORE
@@ -250,5 +262,15 @@
 	help
 	  This driver supports TPS6586X voltage regulator chips.
 
+config REGULATOR_TPS6524X
+	tristate "TI TPS6524X Power regulators"
+	depends on SPI
+	help
+	  This driver supports TPS6524X voltage regulator chips. TPS6524X
+	  provides three step-down converters and two general-purpose LDO
+	  voltage regulators.  This device is interfaced using a customized
+	  serial interface currently supported on the sequencer serial
+	  port controller.
+
 endif
 
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bff8157..0b5e88c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -30,10 +30,13 @@
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
+obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
+obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
 
 obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
 obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_AB8500)	+= ab8500.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b349266..ed6feaf 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -362,7 +362,8 @@
 }
 
 static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
-					int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
 	struct ab3100_regulator *abreg = reg->reg_data;
 	u8 regval;
@@ -373,6 +374,8 @@
 	if (bestindex < 0)
 		return bestindex;
 
+	*selector = bestindex;
+
 	err = abx500_get_register_interruptible(abreg->dev, 0,
 						abreg->regreg, &regval);
 	if (err) {
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index db6b70f..d9a052c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -3,18 +3,13 @@
  *
  * License Terms: GNU General Public License v2
  *
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ *          Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
  *
  * AB8500 peripheral regulators
  *
- * AB8500 supports the following regulators,
- * LDOs - VAUDIO, VANAMIC2/2, VDIGMIC, VINTCORE12, VTVOUT,
- *        VAUX1/2/3, VANA
- *
- * for DB8500 cut 1.0 and previous versions of the silicon, all accesses
- * to registers are through the DB8500 SPI. In cut 1.1 onwards, these
- * accesses are through the DB8500 PRCMU I2C
- *
+ * AB8500 supports the following regulators:
+ *   VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -28,38 +23,37 @@
 
 /**
  * struct ab8500_regulator_info - ab8500 regulator information
+ * @dev: device pointer
  * @desc: regulator description
- * @ab8500: ab8500 parent
  * @regulator_dev: regulator device
  * @max_uV: maximum voltage (for variable voltage supplies)
  * @min_uV: minimum voltage (for variable voltage supplies)
  * @fixed_uV: typical voltage (for fixed voltage supplies)
  * @update_bank: bank to control on/off
  * @update_reg: register to control on/off
- * @mask: mask to enable/disable regulator
- * @enable: bits to enable the regulator in normal(high power) mode
+ * @update_mask: mask to enable/disable regulator
+ * @update_val_enable: bits to enable the regulator in normal (high power) mode
  * @voltage_bank: bank to control regulator voltage
  * @voltage_reg: register to control regulator voltage
  * @voltage_mask: mask to control regulator voltage
- * @supported_voltages: supported voltage table
+ * @voltages: supported voltage table
  * @voltages_len: number of supported voltages for the regulator
  */
 struct ab8500_regulator_info {
 	struct device		*dev;
 	struct regulator_desc	desc;
-	struct ab8500		*ab8500;
 	struct regulator_dev	*regulator;
 	int max_uV;
 	int min_uV;
 	int fixed_uV;
 	u8 update_bank;
 	u8 update_reg;
-	u8 mask;
-	u8 enable;
+	u8 update_mask;
+	u8 update_val_enable;
 	u8 voltage_bank;
 	u8 voltage_reg;
 	u8 voltage_mask;
-	int const *supported_voltages;
+	int const *voltages;
 	int voltages_len;
 };
 
@@ -83,6 +77,17 @@
 	3300000,
 };
 
+static const int ldo_vaux3_voltages[] = {
+	1200000,
+	1500000,
+	1800000,
+	2100000,
+	2500000,
+	2750000,
+	2790000,
+	2910000,
+};
+
 static const int ldo_vintcore_voltages[] = {
 	1200000,
 	1225000,
@@ -95,57 +100,80 @@
 
 static int ab8500_regulator_enable(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, info->mask, info->enable);
+		info->update_bank, info->update_reg,
+		info->update_mask, info->update_val_enable);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 			"couldn't set enable bits for regulator\n");
+
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, info->update_val_enable);
+
 	return ret;
 }
 
 static int ab8500_regulator_disable(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, info->mask, 0x0);
+		info->update_bank, info->update_reg,
+		info->update_mask, 0x0);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 			"couldn't set disable bits for regulator\n");
+
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, 0x0);
+
 	return ret;
 }
 
 static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-	u8 value;
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_get_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, &value);
+		info->update_bank, info->update_reg, &regval);
 	if (ret < 0) {
 		dev_err(rdev_get_dev(rdev),
 			"couldn't read 0x%x register\n", info->update_reg);
 		return ret;
 	}
 
-	if (value & info->mask)
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-is_enabled (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, regval);
+
+	if (regval & info->update_mask)
 		return true;
 	else
 		return false;
@@ -153,12 +181,12 @@
 
 static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
 {
-	int regulator_id;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	/* return the uV for the fixed regulators */
 	if (info->fixed_uV)
@@ -167,33 +195,40 @@
 	if (selector >= info->voltages_len)
 		return -EINVAL;
 
-	return info->supported_voltages[selector];
+	return info->voltages[selector];
 }
 
 static int ab8500_regulator_get_voltage(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret, val;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-	u8 value;
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
-	ret = abx500_get_register_interruptible(info->dev, info->voltage_bank,
-		info->voltage_reg, &value);
+	ret = abx500_get_register_interruptible(info->dev,
+			info->voltage_bank, info->voltage_reg, &regval);
 	if (ret < 0) {
 		dev_err(rdev_get_dev(rdev),
 			"couldn't read voltage reg for regulator\n");
 		return ret;
 	}
 
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->voltage_bank, info->voltage_reg,
+		info->voltage_mask, regval);
+
 	/* vintcore has a different layout */
-	value &= info->voltage_mask;
-	if (regulator_id == AB8500_LDO_INTCORE)
-		ret = info->supported_voltages[value >> 0x3];
+	val = regval & info->voltage_mask;
+	if (info->desc.id == AB8500_LDO_INTCORE)
+		ret = info->voltages[val >> 0x3];
 	else
-		ret = info->supported_voltages[value];
+		ret = info->voltages[val];
 
 	return ret;
 }
@@ -206,8 +241,8 @@
 
 	/* check the supported voltage */
 	for (i = 0; i < info->voltages_len; i++) {
-		if ((info->supported_voltages[i] >= min_uV) &&
-		    (info->supported_voltages[i] <= max_uV))
+		if ((info->voltages[i] >= min_uV) &&
+		    (info->voltages[i] <= max_uV))
 			return i;
 	}
 
@@ -215,14 +250,17 @@
 }
 
 static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
-		int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	/* get the appropriate voltages within the range */
 	ret = ab8500_get_best_voltage_index(rdev, min_uV, max_uV);
@@ -232,14 +270,23 @@
 		return ret;
 	}
 
+	*selector = ret;
+
 	/* set the registers for the request */
+	regval = (u8)ret;
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->voltage_bank, info->voltage_reg,
-		info->voltage_mask, (u8)ret);
+			info->voltage_bank, info->voltage_reg,
+			info->voltage_mask, regval);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 		"couldn't set voltage reg for regulator\n");
 
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->voltage_bank, info->voltage_reg,
+		info->voltage_mask, regval);
+
 	return ret;
 }
 
@@ -254,17 +301,17 @@
 
 static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
 {
-	int regulator_id;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	return info->fixed_uV;
 }
 
-static struct regulator_ops ab8500_ldo_fixed_ops = {
+static struct regulator_ops ab8500_regulator_fixed_ops = {
 	.enable		= ab8500_regulator_enable,
 	.disable	= ab8500_regulator_disable,
 	.is_enabled	= ab8500_regulator_is_enabled,
@@ -272,89 +319,198 @@
 	.list_voltage	= ab8500_list_voltage,
 };
 
-#define AB8500_LDO(_id, min, max, bank, reg, reg_mask,		\
-		reg_enable, volt_bank, volt_reg, volt_mask,	\
-		voltages, len_volts)				\
-{								\
-	.desc	= {						\
-		.name	= "LDO-" #_id,				\
-		.ops	= &ab8500_regulator_ops,		\
-		.type	= REGULATOR_VOLTAGE,			\
-		.id	= AB8500_LDO_##_id,			\
-		.owner	= THIS_MODULE,				\
-	},							\
-	.min_uV		= (min) * 1000,				\
-	.max_uV		= (max) * 1000,				\
-	.update_bank	= bank,					\
-	.update_reg	= reg,					\
-	.mask		= reg_mask,				\
-	.enable		= reg_enable,				\
-	.voltage_bank	= volt_bank,				\
-	.voltage_reg	= volt_reg,				\
-	.voltage_mask	= volt_mask,				\
-	.supported_voltages = voltages,				\
-	.voltages_len	= len_volts,				\
-	.fixed_uV	= 0,					\
-}
-
-#define AB8500_FIXED_LDO(_id, fixed, bank, reg,		\
-			reg_mask, reg_enable)		\
-{							\
-	.desc	= {					\
-		.name	= "LDO-" #_id,			\
-		.ops	= &ab8500_ldo_fixed_ops,	\
-		.type	= REGULATOR_VOLTAGE,		\
-		.id	= AB8500_LDO_##_id,		\
-		.owner	= THIS_MODULE,			\
-	},						\
-	.fixed_uV	= fixed * 1000,			\
-	.update_bank	= bank,				\
-	.update_reg	= reg,				\
-	.mask		= reg_mask,			\
-	.enable		= reg_enable,			\
-}
-
-static struct ab8500_regulator_info ab8500_regulator_info[] = {
+static struct ab8500_regulator_info
+		ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
 	/*
-	 * Variable Voltage LDOs
-	 * name, min uV, max uV, ctrl bank, ctrl reg, reg mask, enable mask,
-	 *      volt ctrl bank, volt ctrl reg, volt ctrl mask, volt table,
-	 *      num supported volts
+	 * Variable Voltage Regulators
+	 *   name, min mV, max mV,
+	 *   update bank, reg, mask, enable val
+	 *   volt bank, reg, mask, table, table length
 	 */
-	AB8500_LDO(AUX1, 1100, 3300, 0x04, 0x09, 0x3, 0x1, 0x04, 0x1f, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(AUX2, 1100, 3300, 0x04, 0x09, 0xc, 0x4, 0x04, 0x20, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(AUX3, 1100, 3300, 0x04, 0x0a, 0x3, 0x1, 0x04, 0x21, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(INTCORE, 1100, 3300, 0x03, 0x80, 0x4, 0x4, 0x03, 0x80, 0x38,
-		ldo_vintcore_voltages, ARRAY_SIZE(ldo_vintcore_voltages)),
+	[AB8500_LDO_AUX1] = {
+		.desc = {
+			.name		= "LDO-AUX1",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX1,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vauxn_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x09,
+		.update_mask		= 0x03,
+		.update_val_enable	= 0x01,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x1f,
+		.voltage_mask		= 0x0f,
+		.voltages		= ldo_vauxn_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vauxn_voltages),
+	},
+	[AB8500_LDO_AUX2] = {
+		.desc = {
+			.name		= "LDO-AUX2",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX2,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vauxn_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x09,
+		.update_mask		= 0x0c,
+		.update_val_enable	= 0x04,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x20,
+		.voltage_mask		= 0x0f,
+		.voltages		= ldo_vauxn_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vauxn_voltages),
+	},
+	[AB8500_LDO_AUX3] = {
+		.desc = {
+			.name		= "LDO-AUX3",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX3,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vaux3_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x0a,
+		.update_mask		= 0x03,
+		.update_val_enable	= 0x01,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x21,
+		.voltage_mask		= 0x07,
+		.voltages		= ldo_vaux3_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vaux3_voltages),
+	},
+	[AB8500_LDO_INTCORE] = {
+		.desc = {
+			.name		= "LDO-INTCORE",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_INTCORE,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vintcore_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x80,
+		.update_mask		= 0x44,
+		.update_val_enable	= 0x04,
+		.voltage_bank		= 0x03,
+		.voltage_reg		= 0x80,
+		.voltage_mask		= 0x38,
+		.voltages		= ldo_vintcore_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vintcore_voltages),
+	},
 
 	/*
-	 * Fixed Voltage LDOs
-	 *		 name,	o/p uV, ctrl bank, ctrl reg, enable, disable
+	 * Fixed Voltage Regulators
+	 *   name, fixed mV,
+	 *   update bank, reg, mask, enable val
 	 */
-	AB8500_FIXED_LDO(TVOUT,	  2000, 0x03,      0x80,     0x2,    0x2),
-	AB8500_FIXED_LDO(AUDIO,   2000, 0x03,      0x83,     0x2,    0x2),
-	AB8500_FIXED_LDO(ANAMIC1, 2050, 0x03,      0x83,     0x4,    0x4),
-	AB8500_FIXED_LDO(ANAMIC2, 2050, 0x03,      0x83,     0x8,    0x8),
-	AB8500_FIXED_LDO(DMIC,    1800, 0x03,      0x83,     0x10,   0x10),
-	AB8500_FIXED_LDO(ANA,     1200, 0x03,      0x83,     0xc,    0x4),
+	[AB8500_LDO_TVOUT] = {
+		.desc = {
+			.name		= "LDO-TVOUT",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_TVOUT,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2000000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x80,
+		.update_mask		= 0x82,
+		.update_val_enable	= 0x02,
+	},
+	[AB8500_LDO_AUDIO] = {
+		.desc = {
+			.name		= "LDO-AUDIO",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUDIO,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2000000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x02,
+		.update_val_enable	= 0x02,
+	},
+	[AB8500_LDO_ANAMIC1] = {
+		.desc = {
+			.name		= "LDO-ANAMIC1",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANAMIC1,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2050000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x08,
+		.update_val_enable	= 0x08,
+	},
+	[AB8500_LDO_ANAMIC2] = {
+		.desc = {
+			.name		= "LDO-ANAMIC2",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANAMIC2,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2050000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x10,
+		.update_val_enable	= 0x10,
+	},
+	[AB8500_LDO_DMIC] = {
+		.desc = {
+			.name		= "LDO-DMIC",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_DMIC,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 1800000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x04,
+		.update_val_enable	= 0x04,
+	},
+	[AB8500_LDO_ANA] = {
+		.desc = {
+			.name		= "LDO-ANA",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANA,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 1200000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x06,
+		.update_mask		= 0x0c,
+		.update_val_enable	= 0x04,
+	},
+
+
 };
 
-static inline struct ab8500_regulator_info *find_regulator_info(int id)
-{
-	struct ab8500_regulator_info *info;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
-		info = &ab8500_regulator_info[i];
-		if (info->desc.id == id)
-			return info;
-	}
-	return NULL;
-}
-
 static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
 {
 	struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
@@ -366,6 +522,16 @@
 		return -EINVAL;
 	}
 	pdata = dev_get_platdata(ab8500->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "null pdata\n");
+		return -EINVAL;
+	}
+
+	/* make sure the platform data has the correct size */
+	if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
+		dev_err(&pdev->dev, "platform configuration error\n");
+		return -EINVAL;
+	}
 
 	/* register all regulators */
 	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -374,10 +540,22 @@
 		/* assign per-regulator data */
 		info = &ab8500_regulator_info[i];
 		info->dev = &pdev->dev;
-		info->ab8500 = ab8500;
 
+		/* fix for hardware before ab8500v2.0 */
+		if (abx500_get_chip_id(info->dev) < 0x20) {
+			if (info->desc.id == AB8500_LDO_AUX3) {
+				info->desc.n_voltages =
+					ARRAY_SIZE(ldo_vauxn_voltages);
+				info->voltages = ldo_vauxn_voltages;
+				info->voltages_len =
+					ARRAY_SIZE(ldo_vauxn_voltages);
+				info->voltage_mask = 0xf;
+			}
+		}
+
+		/* register regulator with framework */
 		info->regulator = regulator_register(&info->desc, &pdev->dev,
-				pdata->regulator[i], info);
+				&pdata->regulator[i], info);
 		if (IS_ERR(info->regulator)) {
 			err = PTR_ERR(info->regulator);
 			dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -389,6 +567,9 @@
 			}
 			return err;
 		}
+
+		dev_vdbg(rdev_get_dev(info->regulator),
+			"%s-probed\n", info->desc.name);
 	}
 
 	return 0;
@@ -401,6 +582,10 @@
 	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
 		struct ab8500_regulator_info *info = NULL;
 		info = &ab8500_regulator_info[i];
+
+		dev_vdbg(rdev_get_dev(info->regulator),
+			"%s-remove\n", info->desc.name);
+
 		regulator_unregister(info->regulator);
 	}
 
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ba521f0..9fa2095 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -13,8 +13,11 @@
  *
  */
 
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/err.h>
@@ -25,16 +28,30 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/regulator.h>
+
 #include "dummy.h"
 
-#define REGULATOR_VERSION "0.5"
+#define rdev_err(rdev, fmt, ...)					\
+	pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_warn(rdev, fmt, ...)					\
+	pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_info(rdev, fmt, ...)					\
+	pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_dbg(rdev, fmt, ...)					\
+	pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
 
 static DEFINE_MUTEX(regulator_list_mutex);
 static LIST_HEAD(regulator_list);
 static LIST_HEAD(regulator_map_list);
-static int has_full_constraints;
+static bool has_full_constraints;
 static bool board_wants_dummy_regulator;
 
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_root;
+#endif
+
 /*
  * struct regulator_map
  *
@@ -71,6 +88,8 @@
 static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
 static void _notifier_call_chain(struct regulator_dev *rdev,
 				  unsigned long event, void *data);
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+				     int min_uV, int max_uV);
 
 static const char *rdev_get_name(struct regulator_dev *rdev)
 {
@@ -111,13 +130,11 @@
 	BUG_ON(*min_uV > *max_uV);
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 
@@ -132,6 +149,27 @@
 	return 0;
 }
 
+/* Make sure we select a voltage that suits the needs of all
+ * regulator consumers
+ */
+static int regulator_check_consumers(struct regulator_dev *rdev,
+				     int *min_uV, int *max_uV)
+{
+	struct regulator *regulator;
+
+	list_for_each_entry(regulator, &rdev->consumer_list, list) {
+		if (*max_uV > regulator->max_uV)
+			*max_uV = regulator->max_uV;
+		if (*min_uV < regulator->min_uV)
+			*min_uV = regulator->min_uV;
+	}
+
+	if (*min_uV > *max_uV)
+		return -EINVAL;
+
+	return 0;
+}
+
 /* current constraint check */
 static int regulator_check_current_limit(struct regulator_dev *rdev,
 					int *min_uA, int *max_uA)
@@ -139,13 +177,11 @@
 	BUG_ON(*min_uA > *max_uA);
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 
@@ -174,18 +210,15 @@
 	}
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	if (!(rdev->constraints->valid_modes_mask & mode)) {
-		printk(KERN_ERR "%s: invalid mode %x for %s\n",
-		       __func__, mode, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid mode %x\n", mode);
 		return -EINVAL;
 	}
 	return 0;
@@ -195,13 +228,11 @@
 static int regulator_check_drms(struct regulator_dev *rdev)
 {
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	return 0;
@@ -553,18 +584,21 @@
 
 	err = regulator_check_drms(rdev);
 	if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
-	    !rdev->desc->ops->get_voltage || !rdev->desc->ops->set_mode)
+	    (!rdev->desc->ops->get_voltage &&
+	     !rdev->desc->ops->get_voltage_sel) ||
+	    !rdev->desc->ops->set_mode)
 		return;
 
 	/* get output voltage */
-	output_uV = rdev->desc->ops->get_voltage(rdev);
+	output_uV = _regulator_get_voltage(rdev);
 	if (output_uV <= 0)
 		return;
 
 	/* get input voltage */
-	if (rdev->supply && rdev->supply->desc->ops->get_voltage)
-		input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
-	else
+	input_uV = 0;
+	if (rdev->supply)
+		input_uV = _regulator_get_voltage(rdev);
+	if (input_uV <= 0)
 		input_uV = rdev->constraints->input_uV;
 	if (input_uV <= 0)
 		return;
@@ -598,20 +632,17 @@
 	 */
 	if (!rstate->enabled && !rstate->disabled) {
 		if (can_set_state)
-			printk(KERN_WARNING "%s: No configuration for %s\n",
-			       __func__, rdev_get_name(rdev));
+			rdev_warn(rdev, "No configuration\n");
 		return 0;
 	}
 
 	if (rstate->enabled && rstate->disabled) {
-		printk(KERN_ERR "%s: invalid configuration for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid configuration\n");
 		return -EINVAL;
 	}
 
 	if (!can_set_state) {
-		printk(KERN_ERR "%s: no way to set suspend state\n",
-			__func__);
+		rdev_err(rdev, "no way to set suspend state\n");
 		return -EINVAL;
 	}
 
@@ -620,15 +651,14 @@
 	else
 		ret = rdev->desc->ops->set_suspend_disable(rdev);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to enabled/disable\n", __func__);
+		rdev_err(rdev, "failed to enabled/disable\n");
 		return ret;
 	}
 
 	if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) {
 		ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set voltage\n",
-				__func__);
+			rdev_err(rdev, "failed to set voltage\n");
 			return ret;
 		}
 	}
@@ -636,7 +666,7 @@
 	if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) {
 		ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set mode\n", __func__);
+			rdev_err(rdev, "failed to set mode\n");
 			return ret;
 		}
 	}
@@ -714,29 +744,27 @@
 	if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
 		count += sprintf(buf + count, "standby");
 
-	printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
+	rdev_info(rdev, "%s\n", buf);
 }
 
 static int machine_constraints_voltage(struct regulator_dev *rdev,
 	struct regulation_constraints *constraints)
 {
 	struct regulator_ops *ops = rdev->desc->ops;
-	const char *name = rdev_get_name(rdev);
 	int ret;
 
 	/* do we need to apply the constraint voltage */
 	if (rdev->constraints->apply_uV &&
-		rdev->constraints->min_uV == rdev->constraints->max_uV &&
-		ops->set_voltage) {
-		ret = ops->set_voltage(rdev,
-			rdev->constraints->min_uV, rdev->constraints->max_uV);
-			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
-				       __func__,
-				       rdev->constraints->min_uV, name);
-				rdev->constraints = NULL;
-				return ret;
-			}
+	    rdev->constraints->min_uV == rdev->constraints->max_uV) {
+		ret = _regulator_do_set_voltage(rdev,
+						rdev->constraints->min_uV,
+						rdev->constraints->max_uV);
+		if (ret < 0) {
+			rdev_err(rdev, "failed to apply %duV constraint\n",
+				 rdev->constraints->min_uV);
+			rdev->constraints = NULL;
+			return ret;
+		}
 	}
 
 	/* constrain machine-level voltage specs to fit
@@ -765,8 +793,7 @@
 
 		/* else require explicit machine-level constraints */
 		if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
-			pr_err("%s: %s '%s' voltage constraints\n",
-				       __func__, "invalid", name);
+			rdev_err(rdev, "invalid voltage constraints\n");
 			return -EINVAL;
 		}
 
@@ -787,22 +814,19 @@
 
 		/* final: [min_uV..max_uV] valid iff constraints valid */
 		if (max_uV < min_uV) {
-			pr_err("%s: %s '%s' voltage constraints\n",
-				       __func__, "unsupportable", name);
+			rdev_err(rdev, "unsupportable voltage constraints\n");
 			return -EINVAL;
 		}
 
 		/* use regulator's subset of machine constraints */
 		if (constraints->min_uV < min_uV) {
-			pr_debug("%s: override '%s' %s, %d -> %d\n",
-				       __func__, name, "min_uV",
-					constraints->min_uV, min_uV);
+			rdev_dbg(rdev, "override min_uV, %d -> %d\n",
+				 constraints->min_uV, min_uV);
 			constraints->min_uV = min_uV;
 		}
 		if (constraints->max_uV > max_uV) {
-			pr_debug("%s: override '%s' %s, %d -> %d\n",
-				       __func__, name, "max_uV",
-					constraints->max_uV, max_uV);
+			rdev_dbg(rdev, "override max_uV, %d -> %d\n",
+				 constraints->max_uV, max_uV);
 			constraints->max_uV = max_uV;
 		}
 	}
@@ -822,26 +846,25 @@
  * set_mode.
  */
 static int set_machine_constraints(struct regulator_dev *rdev,
-	struct regulation_constraints *constraints)
+	const struct regulation_constraints *constraints)
 {
 	int ret = 0;
-	const char *name;
 	struct regulator_ops *ops = rdev->desc->ops;
 
-	rdev->constraints = constraints;
+	rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+				    GFP_KERNEL);
+	if (!rdev->constraints)
+		return -ENOMEM;
 
-	name = rdev_get_name(rdev);
-
-	ret = machine_constraints_voltage(rdev, constraints);
+	ret = machine_constraints_voltage(rdev, rdev->constraints);
 	if (ret != 0)
 		goto out;
 
 	/* do we need to setup our suspend state */
 	if (constraints->initial_state) {
-		ret = suspend_prepare(rdev, constraints->initial_state);
+		ret = suspend_prepare(rdev, rdev->constraints->initial_state);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set suspend state for %s\n",
-			       __func__, name);
+			rdev_err(rdev, "failed to set suspend state\n");
 			rdev->constraints = NULL;
 			goto out;
 		}
@@ -849,17 +872,14 @@
 
 	if (constraints->initial_mode) {
 		if (!ops->set_mode) {
-			printk(KERN_ERR "%s: no set_mode operation for %s\n",
-			       __func__, name);
+			rdev_err(rdev, "no set_mode operation\n");
 			ret = -EINVAL;
 			goto out;
 		}
 
-		ret = ops->set_mode(rdev, constraints->initial_mode);
+		ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
 		if (ret < 0) {
-			printk(KERN_ERR
-			       "%s: failed to set initial mode for %s: %d\n",
-			       __func__, name, ret);
+			rdev_err(rdev, "failed to set initial mode: %d\n", ret);
 			goto out;
 		}
 	}
@@ -867,11 +887,11 @@
 	/* If the constraints say the regulator should be on at this point
 	 * and we have control then make sure it is enabled.
 	 */
-	if ((constraints->always_on || constraints->boot_on) && ops->enable) {
+	if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
+	    ops->enable) {
 		ret = ops->enable(rdev);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to enable %s\n",
-			       __func__, name);
+			rdev_err(rdev, "failed to enable\n");
 			rdev->constraints = NULL;
 			goto out;
 		}
@@ -899,9 +919,8 @@
 	err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
 				"supply");
 	if (err) {
-		printk(KERN_ERR
-		       "%s: could not add device link %s err %d\n",
-		       __func__, supply_rdev->dev.kobj.name, err);
+		rdev_err(rdev, "could not add device link %s err %d\n",
+			 supply_rdev->dev.kobj.name, err);
 		       goto out;
 	}
 	rdev->supply = supply_rdev;
@@ -957,10 +976,10 @@
 			continue;
 
 		dev_dbg(consumer_dev, "%s/%s is '%s' supply; fail %s/%s\n",
-				dev_name(&node->regulator->dev),
-				node->regulator->desc->name,
-				supply,
-				dev_name(&rdev->dev), rdev_get_name(rdev));
+			dev_name(&node->regulator->dev),
+			node->regulator->desc->name,
+			supply,
+			dev_name(&rdev->dev), rdev_get_name(rdev));
 		return -EBUSY;
 	}
 
@@ -1031,8 +1050,7 @@
 		regulator->dev_attr.show = device_requested_uA_show;
 		err = device_create_file(dev, &regulator->dev_attr);
 		if (err < 0) {
-			printk(KERN_WARNING "%s: could not add regulator_dev"
-				" load sysfs\n", __func__);
+			rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n");
 			goto attr_name_err;
 		}
 
@@ -1049,9 +1067,8 @@
 		err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
 					buf);
 		if (err) {
-			printk(KERN_WARNING
-			       "%s: could not add device link %s err %d\n",
-			       __func__, dev->kobj.name, err);
+			rdev_warn(rdev, "could not add device link %s err %d\n",
+				  dev->kobj.name, err);
 			goto link_name_err;
 		}
 	}
@@ -1088,7 +1105,7 @@
 	int ret;
 
 	if (id == NULL) {
-		printk(KERN_ERR "regulator: get() with no identifier\n");
+		pr_err("get() with no identifier\n");
 		return regulator;
 	}
 
@@ -1122,8 +1139,8 @@
 	 * substitute in a dummy regulator so consumers can continue.
 	 */
 	if (!has_full_constraints) {
-		pr_warning("%s supply %s not found, using dummy regulator\n",
-			   devname, id);
+		pr_warn("%s supply %s not found, using dummy regulator\n",
+			devname, id);
 		rdev = dummy_regulator_rdev;
 		goto found;
 	}
@@ -1274,8 +1291,7 @@
 			ret = _regulator_enable(rdev->supply);
 			mutex_unlock(&rdev->supply->mutex);
 			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to enable %s: %d\n",
-				       __func__, rdev_get_name(rdev), ret);
+				rdev_err(rdev, "failed to enable: %d\n", ret);
 				return ret;
 			}
 		}
@@ -1302,13 +1318,13 @@
 			if (ret >= 0) {
 				delay = ret;
 			} else {
-				printk(KERN_WARNING
-					"%s: enable_time() failed for %s: %d\n",
-					__func__, rdev_get_name(rdev),
-					ret);
+				rdev_warn(rdev, "enable_time() failed: %d\n",
+					   ret);
 				delay = 0;
 			}
 
+			trace_regulator_enable(rdev_get_name(rdev));
+
 			/* Allow the regulator to ramp; it would be useful
 			 * to extend this for bulk operations so that the
 			 * regulators can ramp together.  */
@@ -1316,6 +1332,8 @@
 			if (ret < 0)
 				return ret;
 
+			trace_regulator_enable_delay(rdev_get_name(rdev));
+
 			if (delay >= 1000) {
 				mdelay(delay / 1000);
 				udelay(delay % 1000);
@@ -1323,9 +1341,10 @@
 				udelay(delay);
 			}
 
+			trace_regulator_enable_complete(rdev_get_name(rdev));
+
 		} else if (ret < 0) {
-			printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
-			       __func__, rdev_get_name(rdev), ret);
+			rdev_err(rdev, "is_enabled() failed: %d\n", ret);
 			return ret;
 		}
 		/* Fallthrough on positive return values - already enabled */
@@ -1367,8 +1386,7 @@
 	*supply_rdev_ptr = NULL;
 
 	if (WARN(rdev->use_count <= 0,
-			"unbalanced disables for %s\n",
-			rdev_get_name(rdev)))
+		 "unbalanced disables for %s\n", rdev_get_name(rdev)))
 		return -EIO;
 
 	/* are we the last user and permitted to disable ? */
@@ -1378,13 +1396,16 @@
 		/* we are last user */
 		if (_regulator_can_change_status(rdev) &&
 		    rdev->desc->ops->disable) {
+			trace_regulator_disable(rdev_get_name(rdev));
+
 			ret = rdev->desc->ops->disable(rdev);
 			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to disable %s\n",
-				       __func__, rdev_get_name(rdev));
+				rdev_err(rdev, "failed to disable\n");
 				return ret;
 			}
 
+			trace_regulator_disable_complete(rdev_get_name(rdev));
+
 			_notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
 					     NULL);
 		}
@@ -1451,8 +1472,7 @@
 		/* ah well, who wants to live forever... */
 		ret = rdev->desc->ops->disable(rdev);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to force disable %s\n",
-			       __func__, rdev_get_name(rdev));
+			rdev_err(rdev, "failed to force disable\n");
 			return ret;
 		}
 		/* notify other consumers that power has been forced off */
@@ -1605,6 +1625,62 @@
 	return 0;
 }
 
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+				     int min_uV, int max_uV)
+{
+	int ret;
+	unsigned int selector;
+
+	trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
+
+	if (rdev->desc->ops->set_voltage) {
+		ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
+						   &selector);
+
+		if (rdev->desc->ops->list_voltage)
+			selector = rdev->desc->ops->list_voltage(rdev,
+								 selector);
+		else
+			selector = -1;
+	} else if (rdev->desc->ops->set_voltage_sel) {
+		int best_val = INT_MAX;
+		int i;
+
+		selector = 0;
+
+		/* Find the smallest voltage that falls within the specified
+		 * range.
+		 */
+		for (i = 0; i < rdev->desc->n_voltages; i++) {
+			ret = rdev->desc->ops->list_voltage(rdev, i);
+			if (ret < 0)
+				continue;
+
+			if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+				best_val = ret;
+				selector = i;
+			}
+		}
+
+		if (best_val != INT_MAX) {
+			ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
+			selector = best_val;
+		} else {
+			ret = -EINVAL;
+		}
+	} else {
+		ret = -EINVAL;
+	}
+
+	if (ret == 0)
+		_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
+				     NULL);
+
+	trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector);
+
+	return ret;
+}
+
 /**
  * regulator_set_voltage - set regulator output voltage
  * @regulator: regulator source
@@ -1626,12 +1702,20 @@
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
 {
 	struct regulator_dev *rdev = regulator->rdev;
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&rdev->mutex);
 
+	/* If we're setting the same range as last time the change
+	 * should be a noop (some cpufreq implementations use the same
+	 * voltage for multiple frequencies, for example).
+	 */
+	if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+		goto out;
+
 	/* sanity check */
-	if (!rdev->desc->ops->set_voltage) {
+	if (!rdev->desc->ops->set_voltage &&
+	    !rdev->desc->ops->set_voltage_sel) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1642,18 +1726,76 @@
 		goto out;
 	regulator->min_uV = min_uV;
 	regulator->max_uV = max_uV;
-	ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV);
+
+	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
 
 out:
-	_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL);
 	mutex_unlock(&rdev->mutex);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_voltage);
 
+/**
+ * regulator_sync_voltage - re-apply last regulator output voltage
+ * @regulator: regulator source
+ *
+ * Re-apply the last configured voltage.  This is intended to be used
+ * where some external control source the consumer is cooperating with
+ * has caused the configured voltage to change.
+ */
+int regulator_sync_voltage(struct regulator *regulator)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret, min_uV, max_uV;
+
+	mutex_lock(&rdev->mutex);
+
+	if (!rdev->desc->ops->set_voltage &&
+	    !rdev->desc->ops->set_voltage_sel) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* This is only going to work if we've had a voltage configured. */
+	if (!regulator->min_uV && !regulator->max_uV) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	min_uV = regulator->min_uV;
+	max_uV = regulator->max_uV;
+
+	/* This should be a paranoia check... */
+	ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+
+out:
+	mutex_unlock(&rdev->mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_sync_voltage);
+
 static int _regulator_get_voltage(struct regulator_dev *rdev)
 {
-	/* sanity check */
+	int sel;
+
+	if (rdev->desc->ops->get_voltage_sel) {
+		sel = rdev->desc->ops->get_voltage_sel(rdev);
+		if (sel < 0)
+			return sel;
+		return rdev->desc->ops->list_voltage(rdev, sel);
+	}
 	if (rdev->desc->ops->get_voltage)
 		return rdev->desc->ops->get_voltage(rdev);
 	else
@@ -1880,21 +2022,20 @@
 		goto out;
 
 	/* get output voltage */
-	output_uV = rdev->desc->ops->get_voltage(rdev);
+	output_uV = _regulator_get_voltage(rdev);
 	if (output_uV <= 0) {
-		printk(KERN_ERR "%s: invalid output voltage found for %s\n",
-			__func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid output voltage found\n");
 		goto out;
 	}
 
 	/* get input voltage */
-	if (rdev->supply && rdev->supply->desc->ops->get_voltage)
-		input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
-	else
+	input_uV = 0;
+	if (rdev->supply)
+		input_uV = _regulator_get_voltage(rdev->supply);
+	if (input_uV <= 0)
 		input_uV = rdev->constraints->input_uV;
 	if (input_uV <= 0) {
-		printk(KERN_ERR "%s: invalid input voltage found for %s\n",
-			__func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid input voltage found\n");
 		goto out;
 	}
 
@@ -1907,16 +2048,14 @@
 						 total_uA_load);
 	ret = regulator_check_mode(rdev, mode);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to get optimum mode for %s @"
-			" %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
-			total_uA_load, input_uV, output_uV);
+		rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
+			 total_uA_load, input_uV, output_uV);
 		goto out;
 	}
 
 	ret = rdev->desc->ops->set_mode(rdev, mode);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
-			__func__, mode, rdev_get_name(rdev));
+		rdev_err(rdev, "failed to set optimum mode %x\n", mode);
 		goto out;
 	}
 	ret = mode;
@@ -2047,7 +2186,7 @@
 	return 0;
 
 err:
-	printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
+	pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
 	for (--i; i >= 0; --i)
 		regulator_disable(consumers[i].consumer);
 
@@ -2082,8 +2221,7 @@
 	return 0;
 
 err:
-	printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
-	       ret);
+	pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
 	for (--i; i >= 0; --i)
 		regulator_enable(consumers[i].consumer);
 
@@ -2166,7 +2304,7 @@
 	int			status = 0;
 
 	/* some attributes need specific methods to be displayed */
-	if (ops->get_voltage) {
+	if (ops->get_voltage || ops->get_voltage_sel) {
 		status = device_create_file(dev, &dev_attr_microvolts);
 		if (status < 0)
 			return status;
@@ -2207,7 +2345,7 @@
 		return status;
 
 	/* constraints need specific supporting methods */
-	if (ops->set_voltage) {
+	if (ops->set_voltage || ops->set_voltage_sel) {
 		status = device_create_file(dev, &dev_attr_min_microvolts);
 		if (status < 0)
 			return status;
@@ -2271,6 +2409,23 @@
 	return status;
 }
 
+static void rdev_init_debugfs(struct regulator_dev *rdev)
+{
+#ifdef CONFIG_DEBUG_FS
+	rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
+	if (IS_ERR(rdev->debugfs) || !rdev->debugfs) {
+		rdev_warn(rdev, "Failed to create debugfs directory\n");
+		rdev->debugfs = NULL;
+		return;
+	}
+
+	debugfs_create_u32("use_count", 0444, rdev->debugfs,
+			   &rdev->use_count);
+	debugfs_create_u32("open_count", 0444, rdev->debugfs,
+			   &rdev->open_count);
+#endif
+}
+
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
@@ -2282,7 +2437,7 @@
  * Returns 0 on success.
  */
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, struct regulator_init_data *init_data,
+	struct device *dev, const struct regulator_init_data *init_data,
 	void *driver_data)
 {
 	static atomic_t regulator_no = ATOMIC_INIT(0);
@@ -2302,6 +2457,22 @@
 	if (!init_data)
 		return ERR_PTR(-EINVAL);
 
+	/* Only one of each should be implemented */
+	WARN_ON(regulator_desc->ops->get_voltage &&
+		regulator_desc->ops->get_voltage_sel);
+	WARN_ON(regulator_desc->ops->set_voltage &&
+		regulator_desc->ops->set_voltage_sel);
+
+	/* If we're using selectors we must implement list_voltage. */
+	if (regulator_desc->ops->get_voltage_sel &&
+	    !regulator_desc->ops->list_voltage) {
+		return ERR_PTR(-EINVAL);
+	}
+	if (regulator_desc->ops->set_voltage_sel &&
+	    !regulator_desc->ops->list_voltage) {
+		return ERR_PTR(-EINVAL);
+	}
+
 	rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
 	if (rdev == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -2399,6 +2570,8 @@
 	}
 
 	list_add(&rdev->list, &regulator_list);
+
+	rdev_init_debugfs(rdev);
 out:
 	mutex_unlock(&regulator_list_mutex);
 	return rdev;
@@ -2431,12 +2604,16 @@
 		return;
 
 	mutex_lock(&regulator_list_mutex);
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove_recursive(rdev->debugfs);
+#endif
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
 	list_del(&rdev->list);
 	if (rdev->supply)
 		sysfs_remove_link(&rdev->dev.kobj, "supply");
 	device_unregister(&rdev->dev);
+	kfree(rdev->constraints);
 	mutex_unlock(&regulator_list_mutex);
 }
 EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -2465,8 +2642,7 @@
 		mutex_unlock(&rdev->mutex);
 
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to prepare %s\n",
-				__func__, rdev_get_name(rdev));
+			rdev_err(rdev, "failed to prepare\n");
 			goto out;
 		}
 	}
@@ -2572,10 +2748,16 @@
 {
 	int ret;
 
-	printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
-
 	ret = class_register(&regulator_class);
 
+#ifdef CONFIG_DEBUG_FS
+	debugfs_root = debugfs_create_dir("regulator", NULL);
+	if (IS_ERR(debugfs_root) || !debugfs_root) {
+		pr_warn("regulator: Failed to create debugfs directory\n");
+		debugfs_root = NULL;
+	}
+#endif
+
 	regulator_dummy_init();
 
 	return ret;
@@ -2590,7 +2772,6 @@
 	struct regulator_ops *ops;
 	struct regulation_constraints *c;
 	int enabled, ret;
-	const char *name;
 
 	mutex_lock(&regulator_list_mutex);
 
@@ -2602,8 +2783,6 @@
 		ops = rdev->desc->ops;
 		c = rdev->constraints;
 
-		name = rdev_get_name(rdev);
-
 		if (!ops->disable || (c && c->always_on))
 			continue;
 
@@ -2624,13 +2803,10 @@
 		if (has_full_constraints) {
 			/* We log since this may kill the system if it
 			 * goes wrong. */
-			printk(KERN_INFO "%s: disabling %s\n",
-			       __func__, name);
+			rdev_info(rdev, "disabling\n");
 			ret = ops->disable(rdev);
 			if (ret != 0) {
-				printk(KERN_ERR
-				       "%s: couldn't disable %s: %d\n",
-				       __func__, name, ret);
+				rdev_err(rdev, "couldn't disable: %d\n", ret);
 			}
 		} else {
 			/* The intention is that in future we will
@@ -2638,9 +2814,7 @@
 			 * so warn even if we aren't going to do
 			 * anything here.
 			 */
-			printk(KERN_WARNING
-			       "%s: incomplete constraints, leaving %s on\n",
-			       __func__, name);
+			rdev_warn(rdev, "incomplete constraints, leaving on\n");
 		}
 
 unlock:
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index f8c4661..362e082 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -107,7 +107,7 @@
 
 /* DA9030/DA9034 common operations */
 static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -119,6 +119,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -187,7 +188,8 @@
 
 /* DA9030 specific operations */
 static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
-				       int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da903x_dev = to_da903x_dev(rdev);
@@ -200,6 +202,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 	val |= DA9030_LDO_UNLOCK; /* have to set UNLOCK bits */
@@ -214,7 +217,8 @@
 }
 
 static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				    int min_uV, int max_uV,
+				    unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da903x_dev = to_da903x_dev(rdev);
@@ -234,6 +238,7 @@
 		val = (min_uV - thresh + info->step_uV - 1) / info->step_uV;
 	}
 
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -263,7 +268,7 @@
 
 /* DA9034 specific operations */
 static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -276,6 +281,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -289,7 +295,7 @@
 }
 
 static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -302,6 +308,7 @@
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
 	val = (val >= 20) ? val - 12 : ((val > 7) ? 8 : val);
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index b8cc638..e4b3592 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -58,7 +58,9 @@
 	return data;
 }
 
-static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV)
+static int isl6271a_set_voltage(struct regulator_dev *dev,
+				int minuV, int maxuV,
+				unsigned *selector)
 {
 	struct isl_pmic *pmic = rdev_get_drvdata(dev);
 	int vsel, err, data;
@@ -78,6 +80,8 @@
 	/* Convert the microvolts to data for the chip */
 	data = (vsel - ISL6271A_VOLTAGE_MIN) / ISL6271A_VOLTAGE_STEP;
 
+	*selector = data;
+
 	mutex_lock(&pmic->mtx);
 
 	err = i2c_smbus_write_byte(pmic->client, data);
@@ -169,7 +173,7 @@
 						init_data, pmic);
 		if (IS_ERR(pmic->rdev[i])) {
 			dev_err(&i2c->dev, "failed to register %s\n", id->name);
-			err = PTR_ERR(pmic->rdev);
+			err = PTR_ERR(pmic->rdev[i]);
 			goto error;
 		}
 	}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 3bb82b6..0f22ef1 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -168,7 +168,8 @@
 }
 
 static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV,
+				  unsigned int *selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3971_LDO1;
@@ -187,6 +188,8 @@
 	if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
 			LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo),
 			val << LDO_VOL_CONTR_SHIFT(ldo));
@@ -256,7 +259,8 @@
 }
 
 static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV,
+				   unsigned int *selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3971_DCDC1;
@@ -277,6 +281,8 @@
 	if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
 	       BUCK_TARGET_VOL_MASK, val);
 	if (ret)
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index e07062f..6aa1b50 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -292,7 +292,8 @@
 }
 
 static int lp3972_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV,
+				  unsigned int *selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3972_LDO1;
@@ -313,6 +314,8 @@
 	if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo);
 	ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo),
 		LP3972_LDO_VOL_MASK(ldo) << shift, val << shift);
@@ -416,7 +419,8 @@
 }
 
 static int lp3972_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV,
+				   unsigned int *selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3972_DCDC1;
@@ -438,6 +442,8 @@
 	    vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck),
 				LP3972_BUCK_VOL_MASK, val);
 	if (ret)
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 559cfa2..3f49512 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -63,12 +63,12 @@
 	return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL);
 }
 
-static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			  unsigned *selector)
 {
 	struct max1586_data *max1586 = rdev_get_drvdata(rdev);
 	struct i2c_client *client = max1586->client;
 	unsigned range_uV = max1586->max_uV - max1586->min_uV;
-	unsigned selector;
 	u8 v3_prog;
 
 	if (min_uV > max1586->max_uV || max_uV < max1586->min_uV)
@@ -76,15 +76,15 @@
 	if (min_uV < max1586->min_uV)
 		min_uV = max1586->min_uV;
 
-	selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
+	*selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
 			range_uV - 1) / range_uV;
-	if (max1586_v3_calc_voltage(max1586, selector) > max_uV)
+	if (max1586_v3_calc_voltage(max1586, *selector) > max_uV)
 		return -EINVAL;
 
 	dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
-		max1586_v3_calc_voltage(max1586, selector) / 1000);
+		max1586_v3_calc_voltage(max1586, *selector) / 1000);
 
-	v3_prog = I2C_V3_SELECT | (u8) selector;
+	v3_prog = I2C_V3_SELECT | (u8) *selector;
 	return i2c_smbus_write_byte(client, v3_prog);
 }
 
@@ -110,10 +110,10 @@
 	return voltages_uv[selector];
 }
 
-static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			  unsigned int *selector)
 {
 	struct i2c_client *client = rdev_get_drvdata(rdev);
-	unsigned selector;
 	u8 v6_prog;
 
 	if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV)
@@ -122,21 +122,21 @@
 		return -EINVAL;
 
 	if (min_uV < 1800000)
-		selector = 0;
+		*selector = 0;
 	else if (min_uV < 2500000)
-		selector = 1;
+		*selector = 1;
 	else if (min_uV < 3000000)
-		selector = 2;
+		*selector = 2;
 	else if (min_uV >= 3000000)
-		selector = 3;
+		*selector = 3;
 
-	if (max1586_v6_calc_voltage(selector) > max_uV)
+	if (max1586_v6_calc_voltage(*selector) > max_uV)
 		return -EINVAL;
 
 	dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
-		max1586_v6_calc_voltage(selector) / 1000);
+		max1586_v6_calc_voltage(*selector) / 1000);
 
-	v6_prog = I2C_V6_SELECT | (u8) selector;
+	v6_prog = I2C_V6_SELECT | (u8) *selector;
 	return i2c_smbus_write_byte(client, v6_prog);
 }
 
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 6b60a9c..30eb9e5 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -155,7 +155,7 @@
 }
 
 static int max8649_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
 	unsigned char data, mask;
@@ -168,6 +168,7 @@
 	data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1)
 		/ MAX8649_DCDC_STEP;
 	mask = MAX8649_VOL_MASK;
+	*selector = data & mask;
 
 	return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
 }
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index c570e6e..33f5d9a 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -141,7 +141,8 @@
 	return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
 }
 
-static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			    unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 reg, selector, bits;
@@ -154,6 +155,7 @@
 
 	selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
 			/ MAX8660_DCDC_STEP;
+	*s = selector;
 
 	ret = max8660_dcdc_list(rdev, selector);
 	if (ret < 0 || ret > max_uV)
@@ -196,7 +198,8 @@
 	return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
 }
 
-static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			    unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 selector;
@@ -213,6 +216,8 @@
 	if (ret < 0 || ret > max_uV)
 		return -EINVAL;
 
+	*s = selector;
+
 	ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
 	if (ret)
 		return ret;
@@ -270,7 +275,8 @@
 	return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
 }
 
-static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV,
+			     int max_uV, unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 selector;
@@ -288,6 +294,8 @@
 	if (ret < 0 || ret > max_uV)
 		return -EINVAL;
 
+	*s = selector;
+
 	if (rdev_get_id(rdev) == MAX8660_V6)
 		return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
 	else
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 552cad8..8ae1475 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -55,7 +55,7 @@
 }
 
 static int max8925_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned int *selector)
 {
 	struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 	unsigned char data, mask;
@@ -66,6 +66,7 @@
 		return -EINVAL;
 	}
 	data = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = data;
 	data <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
 
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 0d5dda4..a8f4ecf 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -133,7 +133,7 @@
 }
 
 static int max8952_set_voltage(struct regulator_dev *rdev,
-				int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8952_data *max8952 = rdev_get_drvdata(rdev);
 	s8 vid = -1, i;
@@ -156,6 +156,7 @@
 	if (vid >= 0 && vid < MAX8952_NUM_DVS_MODE) {
 		max8952->vid0 = (vid % 2 == 1);
 		max8952->vid1 = (((vid >> 1) % 2) == 1);
+		*selector = vid;
 		gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
 		gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
 	} else
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 5c20756..0ec49ca 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -304,7 +304,7 @@
 }
 
 static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
-				int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
 	struct i2c_client *i2c = max8998->iodev->i2c;
@@ -331,6 +331,8 @@
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
 
+	*selector = i;
+
 	ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
 	if (ret)
 		return ret;
@@ -352,7 +354,7 @@
 }
 
 static int max8998_set_voltage_buck(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
 	struct max8998_platform_data *pdata =
@@ -384,6 +386,8 @@
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
 
+	*selector = i;
+
 	ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
 	if (ret)
 		return ret;
@@ -420,6 +424,9 @@
 				}
 			}
 
+			if (pdata->buck_voltage_lock)
+				return -EINVAL;
+
 			/* no predefine regulator found */
 			max8998->buck1_idx = (buck1_last_val % 2) + 2;
 			dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n",
@@ -447,18 +454,26 @@
 			"BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n"
 			, i, max8998->buck2_vol[0], max8998->buck2_vol[1]);
 		if (gpio_is_valid(pdata->buck2_set3)) {
-			if (max8998->buck2_vol[0] == i) {
-				max8998->buck1_idx = 0;
-				buck2_gpio_set(pdata->buck2_set3, 0);
-			} else {
-				max8998->buck1_idx = 1;
-				ret = max8998_get_voltage_register(rdev, &reg,
-								   &shift,
-								   &mask);
-				ret = max8998_write_reg(i2c, reg, i);
-				max8998->buck2_vol[1] = i;
-				buck2_gpio_set(pdata->buck2_set3, 1);
+
+			/* check if requested voltage */
+			/* value is already defined */
+			for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
+				if (max8998->buck2_vol[j] == i) {
+					max8998->buck2_idx = j;
+					buck2_gpio_set(pdata->buck2_set3, j);
+					goto buck2_exit;
+				}
 			}
+
+			if (pdata->buck_voltage_lock)
+				return -EINVAL;
+
+			max8998_get_voltage_register(rdev,
+					&reg, &shift, &mask);
+			ret = max8998_write_reg(i2c, reg, i);
+			max8998->buck2_vol[max8998->buck2_idx] = i;
+			buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
+buck2_exit:
 			dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
 				gpio_get_value(pdata->buck2_set3));
 		} else {
@@ -703,6 +718,9 @@
 	platform_set_drvdata(pdev, max8998);
 	i2c = max8998->iodev->i2c;
 
+	max8998->buck1_idx = pdata->buck1_default_idx;
+	max8998->buck2_idx = pdata->buck2_default_idx;
+
 	/* NOTE: */
 	/* For unused GPIO NOT marked as -1 (thereof equal to 0)  WARN_ON */
 	/* will be displayed */
@@ -735,23 +753,46 @@
 		i = 0;
 		while (buck12_voltage_map_desc.min +
 		       buck12_voltage_map_desc.step*i
-		       != (pdata->buck1_max_voltage1 / 1000))
+		       < (pdata->buck1_voltage1 / 1000))
 			i++;
-		printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
 		max8998->buck1_vol[0] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
+		if (ret)
+			return ret;
 
 		/* Set predefined value for BUCK1 register 2 */
 		i = 0;
 		while (buck12_voltage_map_desc.min +
 		       buck12_voltage_map_desc.step*i
-		       != (pdata->buck1_max_voltage2 / 1000))
+		       < (pdata->buck1_voltage2 / 1000))
 			i++;
 
 		max8998->buck1_vol[1] = i;
-		printk(KERN_ERR "i:%d, buck1_idx:%d\n", i, max8998->buck1_idx);
-		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i)
-			+ ret;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
+		if (ret)
+			return ret;
+
+		/* Set predefined value for BUCK1 register 3 */
+		i = 0;
+		while (buck12_voltage_map_desc.min +
+		       buck12_voltage_map_desc.step*i
+		       < (pdata->buck1_voltage3 / 1000))
+			i++;
+
+		max8998->buck1_vol[2] = i;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
+		if (ret)
+			return ret;
+
+		/* Set predefined value for BUCK1 register 4 */
+		i = 0;
+		while (buck12_voltage_map_desc.min +
+		       buck12_voltage_map_desc.step*i
+		       < (pdata->buck1_voltage4 / 1000))
+			i++;
+
+		max8998->buck1_vol[3] = i;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
 		if (ret)
 			return ret;
 
@@ -768,18 +809,28 @@
 		gpio_direction_output(pdata->buck2_set3,
 				      max8998->buck2_idx & 0x1);
 
-		/* BUCK2 - set preset default voltage value to buck2_vol[0] */
+		/* BUCK2 register 1 */
 		i = 0;
 		while (buck12_voltage_map_desc.min +
 		       buck12_voltage_map_desc.step*i
-		       != (pdata->buck2_max_voltage / 1000))
+		       < (pdata->buck2_voltage1 / 1000))
 			i++;
-		printk(KERN_ERR "i:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
 		max8998->buck2_vol[0] = i;
 		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
 		if (ret)
 			return ret;
 
+		/* BUCK2 register 2 */
+		i = 0;
+		while (buck12_voltage_map_desc.min +
+		       buck12_voltage_map_desc.step*i
+		       < (pdata->buck2_voltage2 / 1000))
+			i++;
+		printk(KERN_ERR "i2:%d, buck2_idx:%d\n", i, max8998->buck2_idx);
+		max8998->buck2_vol[1] = i;
+		ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
+		if (ret)
+			return ret;
 	}
 
 	for (i = 0; i < pdata->num_regulators; i++) {
@@ -831,6 +882,12 @@
 	return 0;
 }
 
+static const struct platform_device_id max8998_pmic_id[] = {
+	{ "max8998-pmic", TYPE_MAX8998 },
+	{ "lp3974-pmic", TYPE_LP3974 },
+	{ }
+};
+
 static struct platform_driver max8998_pmic_driver = {
 	.driver = {
 		.name = "max8998-pmic",
@@ -838,6 +895,7 @@
 	},
 	.probe = max8998_pmic_probe,
 	.remove = __devexit_p(max8998_pmic_remove),
+	.id_table = max8998_pmic_id,
 };
 
 static int __init max8998_pmic_init(void)
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index ecd99f5..3e5d0c3 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -1,6 +1,7 @@
 /*
  * Regulator Driver for Freescale MC13783 PMIC
  *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
  * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
  *
@@ -17,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include "mc13xxx.h"
 
 #define MC13783_REG_SWITCHERS5			29
 #define MC13783_REG_SWITCHERS5_SW3EN			(1 << 20)
@@ -89,154 +91,106 @@
 #define MC13783_REG_POWERMISC_PWGTSPI_M			(3 << 15)
 
 
-struct mc13783_regulator {
-	struct regulator_desc desc;
-	int reg;
-	int enable_bit;
-	int vsel_reg;
-	int vsel_shift;
-	int vsel_mask;
-	int const *voltages;
-};
-
 /* Voltage Values */
-static const int const mc13783_sw3_val[] = {
+static const int mc13783_sw3_val[] = {
 	5000000, 5000000, 5000000, 5500000,
 };
 
-static const int const mc13783_vaudio_val[] = {
+static const int mc13783_vaudio_val[] = {
 	2775000,
 };
 
-static const int const mc13783_viohi_val[] = {
+static const int mc13783_viohi_val[] = {
 	2775000,
 };
 
-static const int const mc13783_violo_val[] = {
+static const int mc13783_violo_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 };
 
-static const int const mc13783_vdig_val[] = {
+static const int mc13783_vdig_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 };
 
-static const int const mc13783_vgen_val[] = {
+static const int mc13783_vgen_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 	1100000, 2000000, 2775000, 2400000,
 };
 
-static const int const mc13783_vrfdig_val[] = {
+static const int mc13783_vrfdig_val[] = {
 	1200000, 1500000, 1800000, 1875000,
 };
 
-static const int const mc13783_vrfref_val[] = {
+static const int mc13783_vrfref_val[] = {
 	2475000, 2600000, 2700000, 2775000,
 };
 
-static const int const mc13783_vrfcp_val[] = {
+static const int mc13783_vrfcp_val[] = {
 	2700000, 2775000,
 };
 
-static const int const mc13783_vsim_val[] = {
+static const int mc13783_vsim_val[] = {
 	1800000, 2900000, 3000000,
 };
 
-static const int const mc13783_vesim_val[] = {
+static const int mc13783_vesim_val[] = {
 	1800000, 2900000,
 };
 
-static const int const mc13783_vcam_val[] = {
+static const int mc13783_vcam_val[] = {
 	1500000, 1800000, 2500000, 2550000,
 	2600000, 2750000, 2800000, 3000000,
 };
 
-static const int const mc13783_vrfbg_val[] = {
+static const int mc13783_vrfbg_val[] = {
 	1250000,
 };
 
-static const int const mc13783_vvib_val[] = {
+static const int mc13783_vvib_val[] = {
 	1300000, 1800000, 2000000, 3000000,
 };
 
-static const int const mc13783_vmmc_val[] = {
+static const int mc13783_vmmc_val[] = {
 	1600000, 1800000, 2000000, 2600000,
 	2700000, 2800000, 2900000, 3000000,
 };
 
-static const int const mc13783_vrf_val[] = {
+static const int mc13783_vrf_val[] = {
 	1500000, 1875000, 2700000, 2775000,
 };
 
-static const int const mc13783_gpo_val[] = {
+static const int mc13783_gpo_val[] = {
 	3100000,
 };
 
-static const int const mc13783_pwgtdrv_val[] = {
+static const int mc13783_pwgtdrv_val[] = {
 	5500000,
 };
 
-static struct regulator_ops mc13783_regulator_ops;
-static struct regulator_ops mc13783_fixed_regulator_ops;
 static struct regulator_ops mc13783_gpo_regulator_ops;
 
-#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages)	\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_regulator_ops,			\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.vsel_reg = MC13783_REG_ ## _vsel_reg,			\
-		.vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
-		.vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \
+			mc13xxx_regulator_ops)
 
-#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages)		\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_fixed_regulator_ops,		\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages)		\
+	MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \
+			mc13xxx_fixed_regulator_ops)
 
-#define MC13783_GPO_DEFINE(prefix, _name, _reg,  _voltages)		\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_gpo_regulator_ops,		\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_GPO_DEFINE(prefix, name, reg, voltages)		\
+	MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \
+			mc13783_gpo_regulator_ops)
 
 #define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages)		\
-	MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
+	MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
 #define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages)		\
-	MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
+	MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
 
-static struct mc13783_regulator mc13783_regulators[] = {
+static struct mc13xxx_regulator mc13783_regulators[] = {
 	MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
 
-	MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
-	MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
+	MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
+	MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
 	MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0,	\
 			    mc13783_violo_val),
 	MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,	\
@@ -255,7 +209,7 @@
 			    mc13783_vesim_val),
 	MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,	\
 			    mc13783_vcam_val),
-	MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
+	MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
 	MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1,	\
 			    mc13783_vvib_val),
 	MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1,	\
@@ -266,215 +220,24 @@
 			    mc13783_vmmc_val),
 	MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1,	\
 			    mc13783_vmmc_val),
-	MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
-	MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
+	MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
+	MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
 };
 
-struct mc13783_regulator_priv {
-	struct mc13783 *mc13783;
-	u32 powermisc_pwgt_state;
-	struct regulator_dev *regulators[];
-};
-
-static int mc13783_regulator_enable(struct regulator_dev *rdev)
+static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+		u32 val)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
-			mc13783_regulators[id].enable_bit,
-			mc13783_regulators[id].enable_bit);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_disable(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
-			mc13783_regulators[id].enable_bit, 0);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int ret, id = rdev_get_id(rdev);
-	unsigned int val;
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
-	mc13783_unlock(priv->mc13783);
-
-	if (ret)
-		return ret;
-
-	return (val & mc13783_regulators[id].enable_bit) != 0;
-}
-
-static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
-						unsigned selector)
-{
-	int id = rdev_get_id(rdev);
-
-	if (selector >= mc13783_regulators[id].desc.n_voltages)
-		return -EINVAL;
-
-	return mc13783_regulators[id].voltages[selector];
-}
-
-static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	int reg_id = rdev_get_id(rdev);
-	int i;
-	int bestmatch;
-	int bestindex;
-
-	/*
-	 * Locate the minimum voltage fitting the criteria on
-	 * this regulator. The switchable voltages are not
-	 * in strict falling order so we need to check them
-	 * all for the best match.
-	 */
-	bestmatch = INT_MAX;
-	bestindex = -1;
-	for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
-		if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
-		    mc13783_regulators[reg_id].voltages[i] < bestmatch) {
-			bestmatch = mc13783_regulators[reg_id].voltages[i];
-			bestindex = i;
-		}
-	}
-
-	if (bestindex < 0 || bestmatch > max_uV) {
-		dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
-				min_uV, max_uV);
-		return -EINVAL;
-	}
-	return bestindex;
-}
-
-static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int value, id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	/* Find the best index */
-	value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
-	dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
-	if (value < 0)
-		return value;
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
-			mc13783_regulators[id].vsel_mask,
-			value << mc13783_regulators[id].vsel_shift);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int ret, id = rdev_get_id(rdev);
-	unsigned int val;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783,
-				mc13783_regulators[id].vsel_reg, &val);
-	mc13783_unlock(priv->mc13783);
-
-	if (ret)
-		return ret;
-
-	val = (val & mc13783_regulators[id].vsel_mask)
-		>> mc13783_regulators[id].vsel_shift;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
-
-	BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
-
-	return mc13783_regulators[id].voltages[val];
-}
-
-static struct regulator_ops mc13783_regulator_ops = {
-	.enable = mc13783_regulator_enable,
-	.disable = mc13783_regulator_disable,
-	.is_enabled = mc13783_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_regulator_set_voltage,
-	.get_voltage = mc13783_regulator_get_voltage,
-};
-
-static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	int id = rdev_get_id(rdev);
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	if (min_uV >= mc13783_regulators[id].voltages[0] &&
-	    max_uV <= mc13783_regulators[id].voltages[0])
-		return 0;
-	else
-		return -EINVAL;
-}
-
-static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
-{
-	int id = rdev_get_id(rdev);
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	return mc13783_regulators[id].voltages[0];
-}
-
-static struct regulator_ops mc13783_fixed_regulator_ops = {
-	.enable = mc13783_regulator_enable,
-	.disable = mc13783_regulator_disable,
-	.is_enabled = mc13783_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_fixed_regulator_set_voltage,
-	.get_voltage = mc13783_fixed_regulator_get_voltage,
-};
-
-static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
-				 u32 val)
-{
-	struct mc13783 *mc13783 = priv->mc13783;
+	struct mc13xxx *mc13783 = priv->mc13xxx;
 	int ret;
 	u32 valread;
 
 	BUG_ON(val & ~mask);
 
-	ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
+	ret = mc13xxx_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
 	if (ret)
 		return ret;
 
@@ -489,34 +252,36 @@
 	valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
 						priv->powermisc_pwgt_state;
 
-	return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
+	return mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
 }
 
 static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int id = rdev_get_id(rdev);
 	int ret;
-	u32 en_val = mc13783_regulators[id].enable_bit;
+	u32 en_val = mc13xxx_regulators[id].enable_bit;
 
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
 
 	/* Power Gate enable value is 0 */
-	if (id == MC13783_REGU_PWGT1SPI ||
-	    id == MC13783_REGU_PWGT2SPI)
+	if (id == MC13783_REG_PWGT1SPI ||
+	    id == MC13783_REG_PWGT2SPI)
 		en_val = 0;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
 					en_val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
 }
 
 static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int id = rdev_get_id(rdev);
 	int ret;
 	u32 dis_val = 0;
@@ -524,27 +289,28 @@
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
 
 	/* Power Gate disable value is 1 */
-	if (id == MC13783_REGU_PWGT1SPI ||
-	    id == MC13783_REGU_PWGT2SPI)
-		dis_val = mc13783_regulators[id].enable_bit;
+	if (id == MC13783_REG_PWGT1SPI ||
+	    id == MC13783_REG_PWGT2SPI)
+		dis_val = mc13xxx_regulators[id].enable_bit;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
 					dis_val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
 }
 
 static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int ret, id = rdev_get_id(rdev);
 	unsigned int val;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	if (ret)
 		return ret;
@@ -554,22 +320,22 @@
 	val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
 	      (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
 
-	return (val & mc13783_regulators[id].enable_bit) != 0;
+	return (val & mc13xxx_regulators[id].enable_bit) != 0;
 }
 
 static struct regulator_ops mc13783_gpo_regulator_ops = {
 	.enable = mc13783_gpo_regulator_enable,
 	.disable = mc13783_gpo_regulator_disable,
 	.is_enabled = mc13783_gpo_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_fixed_regulator_set_voltage,
-	.get_voltage = mc13783_fixed_regulator_get_voltage,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
 };
 
 static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
 {
-	struct mc13783_regulator_priv *priv;
-	struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
+	struct mc13xxx_regulator_priv *priv;
+	struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
 	struct mc13783_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	struct mc13783_regulator_init_data *init_data;
@@ -583,7 +349,8 @@
 	if (!priv)
 		return -ENOMEM;
 
-	priv->mc13783 = mc13783;
+	priv->mc13xxx_regulators = mc13783_regulators;
+	priv->mc13xxx = mc13783;
 
 	for (i = 0; i < pdata->num_regulators; i++) {
 		init_data = &pdata->regulators[i];
@@ -613,7 +380,7 @@
 
 static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
 {
-	struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
+	struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
 	struct mc13783_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	int i;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
new file mode 100644
index 0000000..1b8f739
--- /dev/null
+++ b/drivers/regulator/mc13892-regulator.c
@@ -0,0 +1,635 @@
+/*
+ * Regulator Driver for Freescale MC13892 PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13892.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+#define MC13892_REVISION			7
+
+#define MC13892_POWERCTL0			13
+#define MC13892_POWERCTL0_USEROFFSPI		3
+#define MC13892_POWERCTL0_VCOINCELLVSEL		20
+#define MC13892_POWERCTL0_VCOINCELLVSEL_M	(7<<20)
+#define MC13892_POWERCTL0_VCOINCELLEN		(1<<23)
+
+#define MC13892_SWITCHERS0_SWxHI		(1<<23)
+
+#define MC13892_SWITCHERS0			24
+#define MC13892_SWITCHERS0_SW1VSEL		0
+#define MC13892_SWITCHERS0_SW1VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS0_SW1HI		(1<<23)
+#define MC13892_SWITCHERS0_SW1EN		0
+
+#define MC13892_SWITCHERS1			25
+#define MC13892_SWITCHERS1_SW2VSEL		0
+#define MC13892_SWITCHERS1_SW2VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS1_SW2HI		(1<<23)
+#define MC13892_SWITCHERS1_SW2EN		0
+
+#define MC13892_SWITCHERS2			26
+#define MC13892_SWITCHERS2_SW3VSEL		0
+#define MC13892_SWITCHERS2_SW3VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS2_SW3HI		(1<<23)
+#define MC13892_SWITCHERS2_SW3EN		0
+
+#define MC13892_SWITCHERS3			27
+#define MC13892_SWITCHERS3_SW4VSEL		0
+#define MC13892_SWITCHERS3_SW4VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS3_SW4HI		(1<<23)
+#define MC13892_SWITCHERS3_SW4EN		0
+
+#define MC13892_SWITCHERS4			28
+#define MC13892_SWITCHERS4_SW1MODE		0
+#define MC13892_SWITCHERS4_SW1MODE_AUTO		(8<<0)
+#define MC13892_SWITCHERS4_SW1MODE_M		(0xf<<0)
+#define MC13892_SWITCHERS4_SW2MODE		10
+#define MC13892_SWITCHERS4_SW2MODE_AUTO		(8<<10)
+#define MC13892_SWITCHERS4_SW2MODE_M		(0xf<<10)
+
+#define MC13892_SWITCHERS5			29
+#define MC13892_SWITCHERS5_SW3MODE		0
+#define MC13892_SWITCHERS5_SW3MODE_AUTO		(8<<0)
+#define MC13892_SWITCHERS5_SW3MODE_M		(0xf<<0)
+#define MC13892_SWITCHERS5_SW4MODE		8
+#define MC13892_SWITCHERS5_SW4MODE_AUTO		(8<<8)
+#define MC13892_SWITCHERS5_SW4MODE_M		(0xf<<8)
+#define MC13892_SWITCHERS5_SWBSTEN		(1<<20)
+
+#define MC13892_REGULATORSETTING0		30
+#define MC13892_REGULATORSETTING0_VGEN1VSEL	0
+#define MC13892_REGULATORSETTING0_VDIGVSEL	4
+#define MC13892_REGULATORSETTING0_VGEN2VSEL	6
+#define MC13892_REGULATORSETTING0_VPLLVSEL	9
+#define MC13892_REGULATORSETTING0_VUSB2VSEL	11
+#define MC13892_REGULATORSETTING0_VGEN3VSEL	14
+#define MC13892_REGULATORSETTING0_VCAMVSEL	16
+
+#define MC13892_REGULATORSETTING0_VGEN1VSEL_M	(3<<0)
+#define MC13892_REGULATORSETTING0_VDIGVSEL_M	(3<<4)
+#define MC13892_REGULATORSETTING0_VGEN2VSEL_M	(7<<6)
+#define MC13892_REGULATORSETTING0_VPLLVSEL_M	(3<<9)
+#define MC13892_REGULATORSETTING0_VUSB2VSEL_M	(3<<11)
+#define MC13892_REGULATORSETTING0_VGEN3VSEL_M	(1<<14)
+#define MC13892_REGULATORSETTING0_VCAMVSEL_M	(3<<16)
+
+#define MC13892_REGULATORSETTING1		31
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL	2
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL	4
+#define MC13892_REGULATORSETTING1_VSDVSEL	6
+
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL_M	(3<<2)
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL_M	(3<<4)
+#define MC13892_REGULATORSETTING1_VSDVSEL_M	(7<<6)
+
+#define MC13892_REGULATORMODE0			32
+#define MC13892_REGULATORMODE0_VGEN1EN		(1<<0)
+#define MC13892_REGULATORMODE0_VGEN1STDBY	(1<<1)
+#define MC13892_REGULATORMODE0_VGEN1MODE	(1<<2)
+#define MC13892_REGULATORMODE0_VIOHIEN		(1<<3)
+#define MC13892_REGULATORMODE0_VIOHISTDBY	(1<<4)
+#define MC13892_REGULATORMODE0_VIOHIMODE	(1<<5)
+#define MC13892_REGULATORMODE0_VDIGEN		(1<<9)
+#define MC13892_REGULATORMODE0_VDIGSTDBY	(1<<10)
+#define MC13892_REGULATORMODE0_VDIGMODE		(1<<11)
+#define MC13892_REGULATORMODE0_VGEN2EN		(1<<12)
+#define MC13892_REGULATORMODE0_VGEN2STDBY	(1<<13)
+#define MC13892_REGULATORMODE0_VGEN2MODE	(1<<14)
+#define MC13892_REGULATORMODE0_VPLLEN		(1<<15)
+#define MC13892_REGULATORMODE0_VPLLSTDBY	(1<<16)
+#define MC13892_REGULATORMODE0_VPLLMODE		(1<<17)
+#define MC13892_REGULATORMODE0_VUSB2EN		(1<<18)
+#define MC13892_REGULATORMODE0_VUSB2STDBY	(1<<19)
+#define MC13892_REGULATORMODE0_VUSB2MODE	(1<<20)
+
+#define MC13892_REGULATORMODE1			33
+#define MC13892_REGULATORMODE1_VGEN3EN		(1<<0)
+#define MC13892_REGULATORMODE1_VGEN3STDBY	(1<<1)
+#define MC13892_REGULATORMODE1_VGEN3MODE	(1<<2)
+#define MC13892_REGULATORMODE1_VCAMEN		(1<<6)
+#define MC13892_REGULATORMODE1_VCAMSTDBY	(1<<7)
+#define MC13892_REGULATORMODE1_VCAMMODE		(1<<8)
+#define MC13892_REGULATORMODE1_VCAMCONFIGEN	(1<<9)
+#define MC13892_REGULATORMODE1_VVIDEOEN		(1<<12)
+#define MC13892_REGULATORMODE1_VVIDEOSTDBY	(1<<13)
+#define MC13892_REGULATORMODE1_VVIDEOMODE	(1<<14)
+#define MC13892_REGULATORMODE1_VAUDIOEN		(1<<15)
+#define MC13892_REGULATORMODE1_VAUDIOSTDBY	(1<<16)
+#define MC13892_REGULATORMODE1_VAUDIOMODE	(1<<17)
+#define MC13892_REGULATORMODE1_VSDEN		(1<<18)
+#define MC13892_REGULATORMODE1_VSDSTDBY		(1<<19)
+#define MC13892_REGULATORMODE1_VSDMODE		(1<<20)
+
+#define MC13892_POWERMISC			34
+#define MC13892_POWERMISC_GPO1EN		(1<<6)
+#define MC13892_POWERMISC_GPO2EN		(1<<8)
+#define MC13892_POWERMISC_GPO3EN		(1<<10)
+#define MC13892_POWERMISC_GPO4EN		(1<<12)
+#define MC13892_POWERMISC_PWGT1SPIEN		(1<<15)
+#define MC13892_POWERMISC_PWGT2SPIEN		(1<<16)
+#define MC13892_POWERMISC_GPO4ADINEN		(1<<21)
+
+#define MC13892_POWERMISC_PWGTSPI_M		(3 << 15)
+
+#define MC13892_USB1				50
+#define MC13892_USB1_VUSBEN			(1<<3)
+
+static const int mc13892_vcoincell[] = {
+	2500000, 2700000, 2800000, 2900000, 3000000, 3100000,
+	3200000, 3300000,
+};
+
+static const int mc13892_sw1[] = {
+	600000,   625000,  650000,  675000,  700000,  725000,
+	750000,   775000,  800000,  825000,  850000,  875000,
+	900000,   925000,  950000,  975000, 1000000, 1025000,
+	1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+	1350000, 1375000
+};
+
+static const int mc13892_sw[] = {
+	600000,   625000,  650000,  675000,  700000,  725000,
+	750000,   775000,  800000,  825000,  850000,  875000,
+	900000,   925000,  950000,  975000, 1000000, 1025000,
+	1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+	1350000, 1375000, 1400000, 1425000, 1450000, 1475000,
+	1500000, 1525000, 1550000, 1575000, 1600000, 1625000,
+	1650000, 1675000, 1700000, 1725000, 1750000, 1775000,
+	1800000, 1825000, 1850000, 1875000
+};
+
+static const int mc13892_swbst[] = {
+	5000000,
+};
+
+static const int mc13892_viohi[] = {
+	2775000,
+};
+
+static const int mc13892_vpll[] = {
+	1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vdig[] = {
+	1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vsd[] = {
+	1800000, 2000000, 2600000, 2700000,
+	2800000, 2900000, 3000000, 3150000,
+};
+
+static const int mc13892_vusb2[] = {
+	2400000, 2600000, 2700000, 2775000,
+};
+
+static const int mc13892_vvideo[] = {
+	2700000, 2775000, 2500000, 2600000,
+};
+
+static const int mc13892_vaudio[] = {
+	2300000, 2500000, 2775000, 3000000,
+};
+
+static const int mc13892_vcam[] = {
+	2500000, 2600000, 2750000, 3000000,
+};
+
+static const int mc13892_vgen1[] = {
+	1200000, 1500000, 2775000, 3150000,
+};
+
+static const int mc13892_vgen2[] = {
+	1200000, 1500000, 1600000, 1800000,
+	2700000, 2800000, 3000000, 3150000,
+};
+
+static const int mc13892_vgen3[] = {
+	1800000, 2900000,
+};
+
+static const int mc13892_vusb[] = {
+	3300000,
+};
+
+static const int mc13892_gpo[] = {
+	2750000,
+};
+
+static const int mc13892_pwgtdrv[] = {
+	5000000,
+};
+
+static struct regulator_ops mc13892_gpo_regulator_ops;
+/* sw regulators need special care due to the "hi bit" */
+static struct regulator_ops mc13892_sw_regulator_ops;
+
+
+#define MC13892_FIXED_DEFINE(name, reg, voltages)		\
+	MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages,	\
+			mc13xxx_fixed_regulator_ops)
+
+#define MC13892_GPO_DEFINE(name, reg, voltages)			\
+	MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages,	\
+			mc13892_gpo_regulator_ops)
+
+#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+			mc13892_sw_regulator_ops)
+
+#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+			mc13xxx_regulator_ops)
+
+static struct mc13xxx_regulator mc13892_regulators[] = {
+	MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell),
+	MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
+	MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw),
+	MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw),
+	MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
+	MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
+	MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
+	MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vpll),
+	MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vdig),
+	MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vsd),
+	MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vusb2),
+	MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vvideo),
+	MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vaudio),
+	MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,	\
+		mc13892_vcam),
+	MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vgen1),
+	MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vgen2),
+	MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,	\
+		mc13892_vgen3),
+	MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
+	MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv),
+	MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv),
+};
+
+static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+				 u32 val)
+{
+	struct mc13xxx *mc13892 = priv->mc13xxx;
+	int ret;
+	u32 valread;
+
+	BUG_ON(val & ~mask);
+
+	ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread);
+	if (ret)
+		return ret;
+
+	/* Update the stored state for Power Gates. */
+	priv->powermisc_pwgt_state =
+		(priv->powermisc_pwgt_state & ~mask) | val;
+	priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M;
+
+	/* Construct the new register value */
+	valread = (valread & ~mask) | val;
+	/* Overwrite the PWGTxEN with the stored version */
+	valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) |
+		priv->powermisc_pwgt_state;
+
+	return mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread);
+}
+
+static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	int ret;
+	u32 en_val = mc13892_regulators[id].enable_bit;
+	u32 mask = mc13892_regulators[id].enable_bit;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	/* Power Gate enable value is 0 */
+	if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+		en_val = 0;
+
+	if (id == MC13892_GPO4)
+		mask |= MC13892_POWERMISC_GPO4ADINEN;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13892_powermisc_rmw(priv, mask, en_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	int ret;
+	u32 dis_val = 0;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	/* Power Gate disable value is 1 */
+	if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+		dis_val = mc13892_regulators[id].enable_bit;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit,
+		dis_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	/* Power Gates state is stored in powermisc_pwgt_state
+	 * where the meaning of bits is negated */
+	val = (val & ~MC13892_POWERMISC_PWGTSPI_M) |
+		(priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M);
+
+	return (val & mc13892_regulators[id].enable_bit) != 0;
+}
+
+
+static struct regulator_ops mc13892_gpo_regulator_ops = {
+	.enable = mc13892_gpo_regulator_enable,
+	.disable = mc13892_gpo_regulator_disable,
+	.is_enabled = mc13892_gpo_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+
+static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val, hi;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+		mc13892_regulators[id].vsel_reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+	if (ret)
+		return ret;
+
+	hi  = val & MC13892_SWITCHERS0_SWxHI;
+	val = (val & mc13892_regulators[id].vsel_mask)
+		>> mc13892_regulators[id].vsel_shift;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+	if (hi)
+		val = (25000 * val) + 1100000;
+	else
+		val = (25000 * val) + 600000;
+
+	return val;
+}
+
+static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int hi, value, val, mask, id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	/* Find the best index */
+	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+	if (value < 0)
+		return value;
+
+	value = mc13892_regulators[id].voltages[value];
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+		mc13892_regulators[id].vsel_reg, &val);
+	if (ret)
+		goto err;
+
+	hi  = val & MC13892_SWITCHERS0_SWxHI;
+	if (value > 1375)
+		hi = 1;
+	if (value < 1100)
+		hi = 0;
+
+	if (hi) {
+		value = (value - 1100000) / 25000;
+		value |= MC13892_SWITCHERS0_SWxHI;
+	} else
+		value = (value - 600000) / 25000;
+
+	mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+			mask, value << mc13892_regulators[id].vsel_shift);
+err:
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static struct regulator_ops mc13892_sw_regulator_ops = {
+	.is_enabled = mc13xxx_sw_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13892_sw_regulator_set_voltage,
+	.get_voltage = mc13892_sw_regulator_get_voltage,
+};
+
+static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	unsigned int en_val = 0;
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+
+	if (mode == REGULATOR_MODE_FAST)
+		en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg,
+		MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN)
+		return REGULATOR_MODE_FAST;
+
+	return REGULATOR_MODE_NORMAL;
+}
+
+
+static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
+{
+	struct mc13xxx_regulator_priv *priv;
+	struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
+	struct mc13xxx_regulator_platform_data *pdata =
+		dev_get_platdata(&pdev->dev);
+	struct mc13xxx_regulator_init_data *init_data;
+	int i, ret;
+	u32 val;
+
+	priv = kzalloc(sizeof(*priv) +
+		pdata->num_regulators * sizeof(priv->regulators[0]),
+		GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->mc13xxx_regulators = mc13892_regulators;
+	priv->mc13xxx = mc13892;
+
+	mc13xxx_lock(mc13892);
+	ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
+	if (ret)
+		goto err_free;
+
+	/* enable switch auto mode */
+	if ((val & 0x0000FFFF) == 0x45d0) {
+		ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
+			MC13892_SWITCHERS4_SW1MODE_M |
+			MC13892_SWITCHERS4_SW2MODE_M,
+			MC13892_SWITCHERS4_SW1MODE_AUTO |
+			MC13892_SWITCHERS4_SW2MODE_AUTO);
+		if (ret)
+			goto err_free;
+
+		ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5,
+			MC13892_SWITCHERS5_SW3MODE_M |
+			MC13892_SWITCHERS5_SW4MODE_M,
+			MC13892_SWITCHERS5_SW3MODE_AUTO |
+			MC13892_SWITCHERS5_SW4MODE_AUTO);
+		if (ret)
+			goto err_free;
+	}
+	mc13xxx_unlock(mc13892);
+
+	mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+		= mc13892_vcam_set_mode;
+	mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+		= mc13892_vcam_get_mode;
+	for (i = 0; i < pdata->num_regulators; i++) {
+		init_data = &pdata->regulators[i];
+		priv->regulators[i] = regulator_register(
+			&mc13892_regulators[init_data->id].desc,
+			&pdev->dev, init_data->init_data, priv);
+
+		if (IS_ERR(priv->regulators[i])) {
+			dev_err(&pdev->dev, "failed to register regulator %s\n",
+				mc13892_regulators[i].desc.name);
+			ret = PTR_ERR(priv->regulators[i]);
+			goto err;
+		}
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+err:
+	while (--i >= 0)
+		regulator_unregister(priv->regulators[i]);
+
+err_free:
+	mc13xxx_unlock(mc13892);
+	kfree(priv);
+
+	return ret;
+}
+
+static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
+{
+	struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+	struct mc13xxx_regulator_platform_data *pdata =
+		dev_get_platdata(&pdev->dev);
+	int i;
+
+	platform_set_drvdata(pdev, NULL);
+
+	for (i = 0; i < pdata->num_regulators; i++)
+		regulator_unregister(priv->regulators[i]);
+
+	kfree(priv);
+	return 0;
+}
+
+static struct platform_driver mc13892_regulator_driver = {
+	.driver	= {
+		.name	= "mc13892-regulator",
+		.owner	= THIS_MODULE,
+	},
+	.remove	= __devexit_p(mc13892_regulator_remove),
+	.probe	= mc13892_regulator_probe,
+};
+
+static int __init mc13892_regulator_init(void)
+{
+	return platform_driver_register(&mc13892_regulator_driver);
+}
+subsys_initcall(mc13892_regulator_init);
+
+static void __exit mc13892_regulator_exit(void)
+{
+	platform_driver_unregister(&mc13892_regulator_driver);
+}
+module_exit(mc13892_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC");
+MODULE_ALIAS("platform:mc13892-regulator");
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
new file mode 100644
index 0000000..f53d31b
--- /dev/null
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -0,0 +1,241 @@
+/*
+ * Regulator Driver for Freescale MC13xxx PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on mc13783 regulator driver :
+ * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
+ * from freescale
+ */
+
+#include <linux/mfd/mc13xxx.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+			mc13xxx_regulators[id].enable_bit,
+			mc13xxx_regulators[id].enable_bit);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_disable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+			mc13xxx_regulators[id].enable_bit, 0);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	return (val & mc13xxx_regulators[id].enable_bit) != 0;
+}
+
+int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+						unsigned selector)
+{
+	int id = rdev_get_id(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+
+	if (selector >= mc13xxx_regulators[id].desc.n_voltages)
+		return -EINVAL;
+
+	return mc13xxx_regulators[id].voltages[selector];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
+
+int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+						int min_uV, int max_uV)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int reg_id = rdev_get_id(rdev);
+	int i;
+	int bestmatch;
+	int bestindex;
+
+	/*
+	 * Locate the minimum voltage fitting the criteria on
+	 * this regulator. The switchable voltages are not
+	 * in strict falling order so we need to check them
+	 * all for the best match.
+	 */
+	bestmatch = INT_MAX;
+	bestindex = -1;
+	for (i = 0; i < mc13xxx_regulators[reg_id].desc.n_voltages; i++) {
+		if (mc13xxx_regulators[reg_id].voltages[i] >= min_uV &&
+		    mc13xxx_regulators[reg_id].voltages[i] < bestmatch) {
+			bestmatch = mc13xxx_regulators[reg_id].voltages[i];
+			bestindex = i;
+		}
+	}
+
+	if (bestindex < 0 || bestmatch > max_uV) {
+		dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
+				min_uV, max_uV);
+		return -EINVAL;
+	}
+	return bestindex;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_get_best_voltage_index);
+
+static int mc13xxx_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+		int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int value, id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	/* Find the best index */
+	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+	if (value < 0)
+		return value;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg,
+			mc13xxx_regulators[id].vsel_mask,
+			value << mc13xxx_regulators[id].vsel_shift);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+				mc13xxx_regulators[id].vsel_reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	val = (val & mc13xxx_regulators[id].vsel_mask)
+		>> mc13xxx_regulators[id].vsel_shift;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+	BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+
+	return mc13xxx_regulators[id].voltages[val];
+}
+
+struct regulator_ops mc13xxx_regulator_ops = {
+	.enable = mc13xxx_regulator_enable,
+	.disable = mc13xxx_regulator_disable,
+	.is_enabled = mc13xxx_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_regulator_set_voltage,
+	.get_voltage = mc13xxx_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
+
+int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+	       int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	if (min_uV >= mc13xxx_regulators[id].voltages[0] &&
+	    max_uV <= mc13xxx_regulators[id].voltages[0])
+		return 0;
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
+
+int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	return mc13xxx_regulators[id].voltages[0];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage);
+
+struct regulator_ops mc13xxx_fixed_regulator_ops = {
+	.enable = mc13xxx_regulator_enable,
+	.disable = mc13xxx_regulator_disable,
+	.is_enabled = mc13xxx_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
+
+int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	return 1;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
+MODULE_ALIAS("mc13xxx-regulator-core");
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
new file mode 100644
index 0000000..2775826
--- /dev/null
+++ b/drivers/regulator/mc13xxx.h
@@ -0,0 +1,101 @@
+/*
+ * mc13xxx.h - regulators for the Freescale mc13xxx PMIC
+ *
+ *  Copyright (C) 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_REGULATOR_MC13XXX_H
+#define __LINUX_REGULATOR_MC13XXX_H
+
+#include <linux/regulator/driver.h>
+
+struct mc13xxx_regulator {
+	struct regulator_desc desc;
+	int reg;
+	int enable_bit;
+	int vsel_reg;
+	int vsel_shift;
+	int vsel_mask;
+	int hi_bit;
+	int const *voltages;
+};
+
+struct mc13xxx_regulator_priv {
+	struct mc13xxx *mc13xxx;
+	u32 powermisc_pwgt_state;
+	struct mc13xxx_regulator *mc13xxx_regulators;
+	struct regulator_dev *regulators[];
+};
+
+extern int mc13xxx_sw_regulator(struct regulator_dev *rdev);
+extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev);
+extern int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+						int min_uV, int max_uV);
+extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+						unsigned selector);
+extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector);
+extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
+
+extern struct regulator_ops mc13xxx_regulator_ops;
+extern struct regulator_ops mc13xxx_fixed_regulator_ops;
+
+#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,			\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.vsel_reg = prefix ## _vsel_reg,			\
+		.vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\
+		.vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,		\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_GPO_DEFINE(prefix, _name, _reg,  _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,		\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops)	\
+	MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops)	\
+	MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops)
+
+#endif
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 29d0566..31f6e11 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -151,7 +151,8 @@
 };
 
 static int pcap_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
 	void *pcap = rdev_get_drvdata(rdev);
@@ -170,10 +171,12 @@
 			i = 0;
 
 		uV = vreg->voltage_table[i] * 1000;
-		if (min_uV <= uV && uV <= max_uV)
+		if (min_uV <= uV && uV <= max_uV) {
+			*selector = i;
 			return ezx_pcap_set_bits(pcap, vreg->reg,
 					(vreg->n_voltages - 1) << vreg->index,
 					i << vreg->index);
+		}
 
 		if (i == 0 && rdev_get_id(rdev) == V1)
 			i = vreg->n_voltages - 1;
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index c8f41dc..69a11d9 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -108,7 +108,8 @@
 }
 
 static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
+					  int min_uV, int max_uV,
+					  unsigned *selector)
 {
 	struct pcf50633 *pcf;
 	int regulator_id, millivolts;
@@ -147,6 +148,8 @@
 		return -EINVAL;
 	}
 
+	*selector = volt_bits;
+
 	return pcf50633_reg_write(pcf, regnr, volt_bits);
 }
 
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index cd6d4fc..60a7ca5 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -321,7 +321,8 @@
 }
 
 static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+				     int min_uV, int max_uV,
+				     unsigned *selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
 	int dcdc = rdev_get_id(dev);
@@ -346,6 +347,8 @@
 			break;
 	}
 
+	*selector = vsel;
+
 	/* write to the register in case we found a match */
 	if (vsel == tps->info[dcdc]->table_len)
 		return -EINVAL;
@@ -371,7 +374,7 @@
 }
 
 static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, ldo = rdev_get_id(dev);
@@ -396,6 +399,8 @@
 	if (vsel == tps->info[ldo]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
 	if (data < 0)
 		return data;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 020f587..0647552 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -369,7 +369,8 @@
 }
 
 static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+					  int min_uV, int max_uV,
+					  unsigned *selector)
 {
 	struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, dcdc = rdev_get_id(dev);
@@ -415,6 +416,8 @@
 	if (vsel == tps->info[dcdc]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps6507x_pmic_reg_read(tps, reg);
 	if (data < 0)
 		return data;
@@ -450,7 +453,8 @@
 }
 
 static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+					 int min_uV, int max_uV,
+					 unsigned *selector)
 {
 	struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, ldo = rdev_get_id(dev);
@@ -483,6 +487,8 @@
 	if (vsel == tps->info[ldo]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps6507x_pmic_reg_read(tps, reg);
 	if (data < 0)
 		return data;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
new file mode 100644
index 0000000..176a6be
--- /dev/null
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -0,0 +1,693 @@
+/*
+ * Regulator driver for TPS6524x PMIC
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#define REG_LDO_SET		0x0
+#define LDO_ILIM_MASK		1	/* 0 = 400-800, 1 = 900-1500 */
+#define LDO_VSEL_MASK		0x0f
+#define LDO2_ILIM_SHIFT		12
+#define LDO2_VSEL_SHIFT		4
+#define LDO1_ILIM_SHIFT		8
+#define LDO1_VSEL_SHIFT		0
+
+#define REG_BLOCK_EN		0x1
+#define BLOCK_MASK		1
+#define BLOCK_LDO1_SHIFT	0
+#define BLOCK_LDO2_SHIFT	1
+#define BLOCK_LCD_SHIFT		2
+#define BLOCK_USB_SHIFT		3
+
+#define REG_DCDC_SET		0x2
+#define DCDC_VDCDC_MASK		0x1f
+#define DCDC_VDCDC1_SHIFT	0
+#define DCDC_VDCDC2_SHIFT	5
+#define DCDC_VDCDC3_SHIFT	10
+
+#define REG_DCDC_EN		0x3
+#define DCDCDCDC_EN_MASK	0x1
+#define DCDCDCDC1_EN_SHIFT	0
+#define DCDCDCDC1_PG_MSK	BIT(1)
+#define DCDCDCDC2_EN_SHIFT	2
+#define DCDCDCDC2_PG_MSK	BIT(3)
+#define DCDCDCDC3_EN_SHIFT	4
+#define DCDCDCDC3_PG_MSK	BIT(5)
+
+#define REG_USB			0x4
+#define USB_ILIM_SHIFT		0
+#define USB_ILIM_MASK		0x3
+#define USB_TSD_SHIFT		2
+#define USB_TSD_MASK		0x3
+#define USB_TWARN_SHIFT		4
+#define USB_TWARN_MASK		0x3
+#define USB_IWARN_SD		BIT(6)
+#define USB_FAST_LOOP		BIT(7)
+
+#define REG_ALARM		0x5
+#define ALARM_LDO1		BIT(0)
+#define ALARM_DCDC1		BIT(1)
+#define ALARM_DCDC2		BIT(2)
+#define ALARM_DCDC3		BIT(3)
+#define ALARM_LDO2		BIT(4)
+#define ALARM_USB_WARN		BIT(5)
+#define ALARM_USB_ALARM		BIT(6)
+#define ALARM_LCD		BIT(9)
+#define ALARM_TEMP_WARM		BIT(10)
+#define ALARM_TEMP_HOT		BIT(11)
+#define ALARM_NRST		BIT(14)
+#define ALARM_POWERUP		BIT(15)
+
+#define REG_INT_ENABLE		0x6
+#define INT_LDO1		BIT(0)
+#define INT_DCDC1		BIT(1)
+#define INT_DCDC2		BIT(2)
+#define INT_DCDC3		BIT(3)
+#define INT_LDO2		BIT(4)
+#define INT_USB_WARN		BIT(5)
+#define INT_USB_ALARM		BIT(6)
+#define INT_LCD			BIT(9)
+#define INT_TEMP_WARM		BIT(10)
+#define INT_TEMP_HOT		BIT(11)
+#define INT_GLOBAL_EN		BIT(15)
+
+#define REG_INT_STATUS		0x7
+#define STATUS_LDO1		BIT(0)
+#define STATUS_DCDC1		BIT(1)
+#define STATUS_DCDC2		BIT(2)
+#define STATUS_DCDC3		BIT(3)
+#define STATUS_LDO2		BIT(4)
+#define STATUS_USB_WARN		BIT(5)
+#define STATUS_USB_ALARM	BIT(6)
+#define STATUS_LCD		BIT(9)
+#define STATUS_TEMP_WARM	BIT(10)
+#define STATUS_TEMP_HOT		BIT(11)
+
+#define REG_SOFTWARE_RESET	0xb
+#define REG_WRITE_ENABLE	0xd
+#define REG_REV_ID		0xf
+
+#define N_DCDC			3
+#define N_LDO			2
+#define N_SWITCH		2
+#define N_REGULATORS		(3 /* DCDC */ + \
+				 2 /* LDO */  + \
+				 2 /* switch */)
+
+#define FIXED_ILIMSEL		BIT(0)
+#define FIXED_VOLTAGE		BIT(1)
+
+#define CMD_READ(reg)		((reg) << 6)
+#define CMD_WRITE(reg)		(BIT(5) | (reg) << 6)
+#define STAT_CLK		BIT(3)
+#define STAT_WRITE		BIT(2)
+#define STAT_INVALID		BIT(1)
+#define STAT_WP			BIT(0)
+
+struct field {
+	int		reg;
+	int		shift;
+	int		mask;
+};
+
+struct supply_info {
+	const char	*name;
+	int		n_voltages;
+	const int	*voltages;
+	int		fixed_voltage;
+	int		n_ilimsels;
+	const int	*ilimsels;
+	int		fixed_ilimsel;
+	int		flags;
+	struct field	enable, voltage, ilimsel;
+};
+
+struct tps6524x {
+	struct device		*dev;
+	struct spi_device	*spi;
+	struct mutex		lock;
+	struct regulator_desc	desc[N_REGULATORS];
+	struct regulator_dev	*rdev[N_REGULATORS];
+};
+
+static int __read_reg(struct tps6524x *hw, int reg)
+{
+	int error = 0;
+	u16 cmd = CMD_READ(reg), in;
+	u8 status;
+	struct spi_message m;
+	struct spi_transfer t[3];
+
+	spi_message_init(&m);
+	memset(t, 0, sizeof(t));
+
+	t[0].tx_buf = &cmd;
+	t[0].len = 2;
+	t[0].bits_per_word = 12;
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].rx_buf = &in;
+	t[1].len = 2;
+	t[1].bits_per_word = 16;
+	spi_message_add_tail(&t[1], &m);
+
+	t[2].rx_buf = &status;
+	t[2].len = 1;
+	t[2].bits_per_word = 4;
+	spi_message_add_tail(&t[2], &m);
+
+	error = spi_sync(hw->spi, &m);
+	if (error < 0)
+		return error;
+
+	dev_dbg(hw->dev, "read reg %d, data %x, status %x\n",
+		reg, in, status);
+
+	if (!(status & STAT_CLK) || (status & STAT_WRITE))
+		return -EIO;
+
+	if (status & STAT_INVALID)
+		return -EINVAL;
+
+	return in;
+}
+
+static int read_reg(struct tps6524x *hw, int reg)
+{
+	int ret;
+
+	mutex_lock(&hw->lock);
+	ret = __read_reg(hw, reg);
+	mutex_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int __write_reg(struct tps6524x *hw, int reg, int val)
+{
+	int error = 0;
+	u16 cmd = CMD_WRITE(reg), out = val;
+	u8 status;
+	struct spi_message m;
+	struct spi_transfer t[3];
+
+	spi_message_init(&m);
+	memset(t, 0, sizeof(t));
+
+	t[0].tx_buf = &cmd;
+	t[0].len = 2;
+	t[0].bits_per_word = 12;
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].tx_buf = &out;
+	t[1].len = 2;
+	t[1].bits_per_word = 16;
+	spi_message_add_tail(&t[1], &m);
+
+	t[2].rx_buf = &status;
+	t[2].len = 1;
+	t[2].bits_per_word = 4;
+	spi_message_add_tail(&t[2], &m);
+
+	error = spi_sync(hw->spi, &m);
+	if (error < 0)
+		return error;
+
+	dev_dbg(hw->dev, "wrote reg %d, data %x, status %x\n",
+		reg, out, status);
+
+	if (!(status & STAT_CLK) || !(status & STAT_WRITE))
+		return -EIO;
+
+	if (status & (STAT_INVALID | STAT_WP))
+		return -EINVAL;
+
+	return error;
+}
+
+static int __rmw_reg(struct tps6524x *hw, int reg, int mask, int val)
+{
+	int ret;
+
+	ret = __read_reg(hw, reg);
+	if (ret < 0)
+		return ret;
+
+	ret &= ~mask;
+	ret |= val;
+
+	ret = __write_reg(hw, reg, ret);
+
+	return (ret < 0) ? ret : 0;
+}
+
+static int rmw_protect(struct tps6524x *hw, int reg, int mask, int val)
+{
+	int ret;
+
+	mutex_lock(&hw->lock);
+
+	ret = __write_reg(hw, REG_WRITE_ENABLE, 1);
+	if (ret) {
+		dev_err(hw->dev, "failed to set write enable\n");
+		goto error;
+	}
+
+	ret = __rmw_reg(hw, reg, mask, val);
+	if (ret)
+		dev_err(hw->dev, "failed to rmw register %d\n", reg);
+
+	ret = __write_reg(hw, REG_WRITE_ENABLE, 0);
+	if (ret) {
+		dev_err(hw->dev, "failed to clear write enable\n");
+		goto error;
+	}
+
+error:
+	mutex_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int read_field(struct tps6524x *hw, const struct field *field)
+{
+	int tmp;
+
+	tmp = read_reg(hw, field->reg);
+	if (tmp < 0)
+		return tmp;
+
+	return (tmp >> field->shift) & field->mask;
+}
+
+static int write_field(struct tps6524x *hw, const struct field *field,
+		       int val)
+{
+	if (val & ~field->mask)
+		return -EOVERFLOW;
+
+	return rmw_protect(hw, field->reg,
+				    field->mask << field->shift,
+				    val << field->shift);
+}
+
+static const int dcdc1_voltages[] = {
+	 800000,  825000,  850000,  875000,
+	 900000,  925000,  950000,  975000,
+	1000000, 1025000, 1050000, 1075000,
+	1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000,
+	1300000, 1325000, 1350000, 1375000,
+	1400000, 1425000, 1450000, 1475000,
+	1500000, 1525000, 1550000, 1575000,
+};
+
+static const int dcdc2_voltages[] = {
+	1400000, 1450000, 1500000, 1550000,
+	1600000, 1650000, 1700000, 1750000,
+	1800000, 1850000, 1900000, 1950000,
+	2000000, 2050000, 2100000, 2150000,
+	2200000, 2250000, 2300000, 2350000,
+	2400000, 2450000, 2500000, 2550000,
+	2600000, 2650000, 2700000, 2750000,
+	2800000, 2850000, 2900000, 2950000,
+};
+
+static const int dcdc3_voltages[] = {
+	2400000, 2450000, 2500000, 2550000, 2600000,
+	2650000, 2700000, 2750000, 2800000, 2850000,
+	2900000, 2950000, 3000000, 3050000, 3100000,
+	3150000, 3200000, 3250000, 3300000, 3350000,
+	3400000, 3450000, 3500000, 3550000, 3600000,
+};
+
+static const int ldo1_voltages[] = {
+	4300000, 4350000, 4400000, 4450000,
+	4500000, 4550000, 4600000, 4650000,
+	4700000, 4750000, 4800000, 4850000,
+	4900000, 4950000, 5000000, 5050000,
+};
+
+static const int ldo2_voltages[] = {
+	1100000, 1150000, 1200000, 1250000,
+	1300000, 1700000, 1750000, 1800000,
+	1850000, 1900000, 3150000, 3200000,
+	3250000, 3300000, 3350000, 3400000,
+};
+
+static const int ldo_ilimsel[] = {
+	400000, 1500000
+};
+
+static const int usb_ilimsel[] = {
+	200000, 400000, 800000, 1000000
+};
+
+#define __MK_FIELD(_reg, _mask, _shift) \
+	{ .reg = (_reg), .mask = (_mask), .shift = (_shift), }
+
+static const struct supply_info supply_info[N_REGULATORS] = {
+	{
+		.name		= "DCDC1",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc1_voltages),
+		.voltages	= dcdc1_voltages,
+		.fixed_ilimsel	= 2400000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					     DCDCDCDC1_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC1_SHIFT),
+	},
+	{
+		.name		= "DCDC2",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc2_voltages),
+		.voltages	= dcdc2_voltages,
+		.fixed_ilimsel	= 1200000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					     DCDCDCDC2_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC2_SHIFT),
+	},
+	{
+		.name		= "DCDC3",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc3_voltages),
+		.voltages	= dcdc3_voltages,
+		.fixed_ilimsel	= 1200000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					DCDCDCDC3_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC3_SHIFT),
+	},
+	{
+		.name		= "LDO1",
+		.n_voltages	= ARRAY_SIZE(ldo1_voltages),
+		.voltages	= ldo1_voltages,
+		.n_ilimsels	= ARRAY_SIZE(ldo_ilimsel),
+		.ilimsels	= ldo_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LDO1_SHIFT),
+		.voltage	= __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+					     LDO1_VSEL_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+					     LDO1_ILIM_SHIFT),
+	},
+	{
+		.name		= "LDO2",
+		.n_voltages	= ARRAY_SIZE(ldo2_voltages),
+		.voltages	= ldo2_voltages,
+		.n_ilimsels	= ARRAY_SIZE(ldo_ilimsel),
+		.ilimsels	= ldo_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LDO2_SHIFT),
+		.voltage	= __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+					     LDO2_VSEL_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+					     LDO2_ILIM_SHIFT),
+	},
+	{
+		.name		= "USB",
+		.flags		= FIXED_VOLTAGE,
+		.fixed_voltage	= 5000000,
+		.n_ilimsels	= ARRAY_SIZE(usb_ilimsel),
+		.ilimsels	= usb_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_USB_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_USB, USB_ILIM_MASK,
+					     USB_ILIM_SHIFT),
+	},
+	{
+		.name		= "LCD",
+		.flags		= FIXED_VOLTAGE | FIXED_ILIMSEL,
+		.fixed_voltage	= 5000000,
+		.fixed_ilimsel	=  400000,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LCD_SHIFT),
+	},
+};
+
+static int list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return selector ? -EINVAL : info->fixed_voltage;
+
+	return ((selector < info->n_voltages) ?
+		info->voltages[selector] : -EINVAL);
+}
+
+static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	unsigned i;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return -EINVAL;
+
+	for (i = 0; i < info->n_voltages; i++)
+		if (min_uV <= info->voltages[i] &&
+		    max_uV >= info->voltages[i])
+			break;
+
+	if (i >= info->n_voltages)
+		i = info->n_voltages - 1;
+
+	*selector = info->voltages[i];
+
+	return write_field(hw, &info->voltage, i);
+}
+
+static int get_voltage(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int ret;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return info->fixed_voltage;
+
+	ret = read_field(hw, &info->voltage);
+	if (ret < 0)
+		return ret;
+	if (WARN_ON(ret >= info->n_voltages))
+		return -EIO;
+
+	return info->voltages[ret];
+}
+
+static int set_current_limit(struct regulator_dev *rdev, int min_uA,
+			     int max_uA)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int i;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_ILIMSEL)
+		return -EINVAL;
+
+	for (i = 0; i < info->n_ilimsels; i++)
+		if (min_uA <= info->ilimsels[i] &&
+		    max_uA >= info->ilimsels[i])
+			break;
+
+	if (i >= info->n_ilimsels)
+		return -EINVAL;
+
+	return write_field(hw, &info->ilimsel, i);
+}
+
+static int get_current_limit(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int ret;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_ILIMSEL)
+		return info->fixed_ilimsel;
+
+	ret = read_field(hw, &info->ilimsel);
+	if (ret < 0)
+		return ret;
+	if (WARN_ON(ret >= info->n_ilimsels))
+		return -EIO;
+
+	return info->ilimsels[ret];
+}
+
+static int enable_supply(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return write_field(hw, &info->enable, 1);
+}
+
+static int disable_supply(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return write_field(hw, &info->enable, 0);
+}
+
+static int is_supply_enabled(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return read_field(hw, &info->enable);
+}
+
+static struct regulator_ops regulator_ops = {
+	.is_enabled		= is_supply_enabled,
+	.enable			= enable_supply,
+	.disable		= disable_supply,
+	.get_voltage		= get_voltage,
+	.set_voltage		= set_voltage,
+	.list_voltage		= list_voltage,
+	.set_current_limit	= set_current_limit,
+	.get_current_limit	= get_current_limit,
+};
+
+static int __devexit pmic_remove(struct spi_device *spi)
+{
+	struct tps6524x *hw = spi_get_drvdata(spi);
+	int i;
+
+	if (!hw)
+		return 0;
+	for (i = 0; i < N_REGULATORS; i++) {
+		if (hw->rdev[i])
+			regulator_unregister(hw->rdev[i]);
+		hw->rdev[i] = NULL;
+	}
+	spi_set_drvdata(spi, NULL);
+	kfree(hw);
+	return 0;
+}
+
+static int __devinit pmic_probe(struct spi_device *spi)
+{
+	struct tps6524x *hw;
+	struct device *dev = &spi->dev;
+	const struct supply_info *info = supply_info;
+	struct regulator_init_data *init_data;
+	int ret = 0, i;
+
+	init_data = dev->platform_data;
+	if (!init_data) {
+		dev_err(dev, "could not find regulator platform data\n");
+		return -EINVAL;
+	}
+
+	hw = kzalloc(sizeof(struct tps6524x), GFP_KERNEL);
+	if (!hw) {
+		dev_err(dev, "cannot allocate regulator private data\n");
+		return -ENOMEM;
+	}
+	spi_set_drvdata(spi, hw);
+
+	memset(hw, 0, sizeof(struct tps6524x));
+	hw->dev = dev;
+	hw->spi = spi_dev_get(spi);
+	mutex_init(&hw->lock);
+
+	for (i = 0; i < N_REGULATORS; i++, info++, init_data++) {
+		hw->desc[i].name	= info->name;
+		hw->desc[i].id		= i;
+		hw->desc[i].n_voltages	= info->n_voltages;
+		hw->desc[i].ops		= &regulator_ops;
+		hw->desc[i].type	= REGULATOR_VOLTAGE;
+		hw->desc[i].owner	= THIS_MODULE;
+
+		if (info->flags & FIXED_VOLTAGE)
+			hw->desc[i].n_voltages = 1;
+
+		hw->rdev[i] = regulator_register(&hw->desc[i], dev,
+						 init_data, hw);
+		if (IS_ERR(hw->rdev[i])) {
+			ret = PTR_ERR(hw->rdev[i]);
+			hw->rdev[i] = NULL;
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	pmic_remove(spi);
+	return ret;
+}
+
+static struct spi_driver pmic_driver = {
+	.probe		= pmic_probe,
+	.remove		= __devexit_p(pmic_remove),
+	.driver		= {
+		.name	= "tps6524x",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pmic_driver_init(void)
+{
+	return spi_register_driver(&pmic_driver);
+}
+module_init(pmic_driver_init);
+
+static void __exit pmic_driver_exit(void)
+{
+	spi_unregister_driver(&pmic_driver);
+}
+module_exit(pmic_driver_exit);
+
+MODULE_DESCRIPTION("TPS6524X PMIC Driver");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tps6524x");
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 6d20b04..bb04a75 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -85,7 +85,8 @@
 
 static int __tps6586x_ldo_set_voltage(struct device *parent,
 				      struct tps6586x_regulator *ri,
-				      int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	int val, uV;
 	uint8_t mask;
@@ -100,6 +101,8 @@
 		/* use the first in-range value */
 		if (min_uV <= uV && uV <= max_uV) {
 
+			*selector = val;
+
 			val <<= ri->volt_shift;
 			mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
 
@@ -111,12 +114,13 @@
 }
 
 static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
 
-	return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+	return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+					  selector);
 }
 
 static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
@@ -140,13 +144,14 @@
 }
 
 static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
 	int ret;
 
-	ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+	ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+					 selector);
 	if (ret)
 		return ret;
 
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index a57262a..bd332cf 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -329,7 +329,8 @@
 }
 
 static int
-twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			vsel;
@@ -345,9 +346,11 @@
 		/* REVISIT for VAUX2, first match may not be best/lowest */
 
 		/* use the first in-range value */
-		if (min_uV <= uV && uV <= max_uV)
+		if (min_uV <= uV && uV <= max_uV) {
+			*selector = vsel;
 			return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
 							VREG_VOLTAGE, vsel);
+		}
 	}
 
 	return -EDOM;
@@ -389,7 +392,8 @@
 }
 
 static int
-twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			vsel;
@@ -402,6 +406,7 @@
 	 * mV = 1000mv + 100mv * (vsel - 1)
 	 */
 	vsel = (min_uV/1000 - 1000)/100 + 1;
+	*selector = vsel;
 	return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
 
 }
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index dbfaf59..8b0d2c4 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -302,7 +302,7 @@
 }
 
 static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -314,6 +314,8 @@
 	if (vsel < 0)
 		return vsel;
 
+	*selector = vsel;
+
 	/* If this value is already set then do a GPIO update if we can */
 	if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
 		return wm831x_buckv_set_dvs(rdev, 0);
@@ -375,14 +377,14 @@
 	return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
 }
 
-static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 
 	if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
-		return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
+		return dcdc->dvs_vsel;
 	else
-		return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
+		return dcdc->on_vsel;
 }
 
 /* Current limit options */
@@ -424,7 +426,7 @@
 
 static struct regulator_ops wm831x_buckv_ops = {
 	.set_voltage = wm831x_buckv_set_voltage,
-	.get_voltage = wm831x_buckv_get_voltage,
+	.get_voltage_sel = wm831x_buckv_get_voltage_sel,
 	.list_voltage = wm831x_buckv_list_voltage,
 	.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
 	.set_current_limit = wm831x_buckv_set_current_limit,
@@ -636,7 +638,7 @@
 }
 
 static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg,
-					int min_uV, int max_uV)
+					int min_uV, int max_uV, int *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -650,16 +652,20 @@
 	if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_buckp_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV,
+				    unsigned *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
 
-	return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV,
+					    selector);
 }
 
 static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev,
@@ -667,11 +673,12 @@
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
+	unsigned selector;
 
-	return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_buckp_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckp_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -682,12 +689,12 @@
 	if (val < 0)
 		return val;
 
-	return wm831x_buckp_list_voltage(rdev, val & WM831X_DC3_ON_VSEL_MASK);
+	return val & WM831X_DC3_ON_VSEL_MASK;
 }
 
 static struct regulator_ops wm831x_buckp_ops = {
 	.set_voltage = wm831x_buckp_set_voltage,
-	.get_voltage = wm831x_buckp_get_voltage,
+	.get_voltage_sel = wm831x_buckp_get_voltage_sel,
 	.list_voltage = wm831x_buckp_list_voltage,
 	.set_suspend_voltage = wm831x_buckp_set_suspend_voltage,
 
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 9edf8f6..c94fc5b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -113,7 +113,8 @@
 }
 
 static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
-					 int min_uV, int max_uV)
+					 int min_uV, int max_uV,
+					 unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -133,16 +134,20 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+				     int min_uV, int max_uV,
+				     unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
 
-	return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+					     selector);
 }
 
 static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -150,11 +155,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+	unsigned int selector;
 
-	return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_gp_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -167,7 +173,7 @@
 
 	ret &= WM831X_LDO1_ON_VSEL_MASK;
 
-	return wm831x_gp_ldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
@@ -287,7 +293,7 @@
 
 static struct regulator_ops wm831x_gp_ldo_ops = {
 	.list_voltage = wm831x_gp_ldo_list_voltage,
-	.get_voltage = wm831x_gp_ldo_get_voltage,
+	.get_voltage_sel = wm831x_gp_ldo_get_voltage_sel,
 	.set_voltage = wm831x_gp_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
 	.get_mode = wm831x_gp_ldo_get_mode,
@@ -413,7 +419,8 @@
 }
 
 static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
-					 int min_uV, int max_uV)
+				       int min_uV, int max_uV,
+				       unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -433,16 +440,19 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_aldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
 
-	return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+					   selector);
 }
 
 static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -450,11 +460,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+	unsigned int selector;
 
-	return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_aldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_aldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -467,7 +478,7 @@
 
 	ret &= WM831X_LDO7_ON_VSEL_MASK;
 
-	return wm831x_aldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
@@ -548,7 +559,7 @@
 
 static struct regulator_ops wm831x_aldo_ops = {
 	.list_voltage = wm831x_aldo_list_voltage,
-	.get_voltage = wm831x_aldo_get_voltage,
+	.get_voltage_sel = wm831x_aldo_get_voltage_sel,
 	.set_voltage = wm831x_aldo_set_voltage,
 	.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
 	.get_mode = wm831x_aldo_get_mode,
@@ -666,7 +677,8 @@
 
 static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
 					    int reg,
-					    int min_uV, int max_uV)
+					    int min_uV, int max_uV,
+					    unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -680,16 +692,20 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
 
-	return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+						selector);
 }
 
 static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -697,11 +713,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
+	unsigned selector;
 
-	return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_alive_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -714,7 +731,7 @@
 
 	ret &= WM831X_LDO11_ON_VSEL_MASK;
 
-	return wm831x_alive_ldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
@@ -736,7 +753,7 @@
 
 static struct regulator_ops wm831x_alive_ldo_ops = {
 	.list_voltage = wm831x_alive_ldo_list_voltage,
-	.get_voltage = wm831x_alive_ldo_get_voltage,
+	.get_voltage_sel = wm831x_alive_ldo_get_voltage_sel,
 	.set_voltage = wm831x_alive_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
 	.get_status = wm831x_alive_ldo_get_status,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index fe4b8a8..1bcb22c 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -360,7 +360,7 @@
 EXPORT_SYMBOL_GPL(wm8350_isink_set_flash);
 
 static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV,
-	int max_uV)
+				   int max_uV, unsigned *selector)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, dcdc = rdev_get_id(rdev), mV,
@@ -397,17 +397,18 @@
 		return -EINVAL;
 	}
 
+	*selector = mV;
+
 	/* all DCDCs have same mV bits */
 	val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
 	wm8350_reg_write(wm8350, volt_reg, val | mV);
 	return 0;
 }
 
-static int wm8350_dcdc_get_voltage(struct regulator_dev *rdev)
+static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, dcdc = rdev_get_id(rdev);
-	u16 val;
 
 	switch (dcdc) {
 	case WM8350_DCDC_1:
@@ -429,8 +430,7 @@
 	}
 
 	/* all DCDCs have same mV bits */
-	val = wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
-	return wm8350_dcdc_val_to_mvolts(val) * 1000;
+	return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
 }
 
 static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev,
@@ -754,7 +754,7 @@
 }
 
 static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV,
-	int max_uV)
+				  int max_uV, unsigned *selector)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000,
@@ -797,17 +797,18 @@
 		return -EINVAL;
 	}
 
+	*selector = mV;
+
 	/* all LDOs have same mV bits */
 	val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
 	wm8350_reg_write(wm8350, volt_reg, val | mV);
 	return 0;
 }
 
-static int wm8350_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, ldo = rdev_get_id(rdev);
-	u16 val;
 
 	switch (ldo) {
 	case WM8350_LDO_1:
@@ -827,8 +828,7 @@
 	}
 
 	/* all LDOs have same mV bits */
-	val = wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
-	return wm8350_ldo_val_to_mvolts(val) * 1000;
+	return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
 }
 
 static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
@@ -1225,7 +1225,7 @@
 
 static struct regulator_ops wm8350_dcdc_ops = {
 	.set_voltage = wm8350_dcdc_set_voltage,
-	.get_voltage = wm8350_dcdc_get_voltage,
+	.get_voltage_sel = wm8350_dcdc_get_voltage_sel,
 	.list_voltage = wm8350_dcdc_list_voltage,
 	.enable = wm8350_dcdc_enable,
 	.disable = wm8350_dcdc_disable,
@@ -1249,7 +1249,7 @@
 
 static struct regulator_ops wm8350_ldo_ops = {
 	.set_voltage = wm8350_ldo_set_voltage,
-	.get_voltage = wm8350_ldo_get_voltage,
+	.get_voltage_sel = wm8350_ldo_get_voltage_sel,
 	.list_voltage = wm8350_ldo_list_voltage,
 	.enable = wm8350_ldo_enable,
 	.disable = wm8350_ldo_disable,
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 924c7eb..b42d01c 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -67,7 +67,7 @@
 }
 
 static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
 	u16 val;
@@ -93,6 +93,8 @@
 		val += 0xf;
 	}
 
+	*selector = val;
+
 	return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev),
 			       WM8400_LDO1_VSEL_MASK, val);
 }
@@ -156,7 +158,7 @@
 }
 
 static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
 	u16 val;
@@ -171,6 +173,8 @@
 		return -EINVAL;
 	BUG_ON(850000 + (25000 * val) < min_uV);
 
+	*selector = val;
+
 	return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
 			       WM8400_DC1_VSEL_MASK, val);
 }
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 03713bc..35b2958 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -86,7 +86,7 @@
 	return (selector * 100000) + 2400000;
 }
 
-static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo1_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int val;
@@ -95,13 +95,11 @@
 	if (val < 0)
 		return val;
 
-	val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
-
-	return wm8994_ldo1_list_voltage(rdev, val);
+	return (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
 }
 
 static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *s)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int selector, v;
@@ -111,6 +109,7 @@
 	if (v < 0 || v > max_uV)
 		return -EINVAL;
 
+	*s = selector;
 	selector <<= WM8994_LDO1_VSEL_SHIFT;
 
 	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
@@ -124,20 +123,29 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo1_list_voltage,
-	.get_voltage = wm8994_ldo1_get_voltage,
+	.get_voltage_sel = wm8994_ldo1_get_voltage_sel,
 	.set_voltage = wm8994_ldo1_set_voltage,
 };
 
 static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
 				    unsigned int selector)
 {
+	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
 	if (selector > WM8994_LDO2_MAX_SELECTOR)
 		return -EINVAL;
 
-	return (selector * 100000) + 900000;
+	switch (ldo->wm8994->type) {
+	case WM8994:
+		return (selector * 100000) + 900000;
+	case WM8958:
+		return (selector * 100000) + 1000000;
+	default:
+		return -EINVAL;
+	}
 }
 
-static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo2_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int val;
@@ -146,22 +154,31 @@
 	if (val < 0)
 		return val;
 
-	val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
-
-	return wm8994_ldo2_list_voltage(rdev, val);
+	return (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
 }
 
 static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *s)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int selector, v;
 
-	selector = (min_uV - 900000) / 100000;
+	switch (ldo->wm8994->type) {
+	case WM8994:
+		selector = (min_uV - 900000) / 100000;
+		break;
+	case WM8958:
+		selector = (min_uV - 1000000) / 100000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	v = wm8994_ldo2_list_voltage(rdev, selector);
 	if (v < 0 || v > max_uV)
 		return -EINVAL;
 
+	*s = selector;
 	selector <<= WM8994_LDO2_VSEL_SHIFT;
 
 	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
@@ -175,7 +192,7 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo2_list_voltage,
-	.get_voltage = wm8994_ldo2_get_voltage,
+	.get_voltage_sel = wm8994_ldo2_get_voltage_sel,
 	.set_voltage = wm8994_ldo2_set_voltage,
 };
 
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e6539cb..9583cbc 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -16,6 +16,7 @@
 #include <linux/kdev_t.h>
 #include <linux/idr.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 
 #include "rtc-core.h"
 
@@ -152,6 +153,18 @@
 	spin_lock_init(&rtc->irq_task_lock);
 	init_waitqueue_head(&rtc->irq_queue);
 
+	/* Init timerqueue */
+	timerqueue_init_head(&rtc->timerqueue);
+	INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+	/* Init aie timer */
+	rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
+	/* Init uie timer */
+	rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
+	/* Init pie timer */
+	hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rtc->pie_timer.function = rtc_pie_update_irq;
+	rtc->pie_enabled = 0;
+
 	strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
 	dev_set_name(&rtc->dev, "rtc%d", id);
 
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c8162..90384b9 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -14,6 +14,21 @@
 #include <linux/rtc.h>
 #include <linux/sched.h>
 #include <linux/log2.h>
+#include <linux/workqueue.h>
+
+static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
+{
+	int err;
+	if (!rtc->ops)
+		err = -ENODEV;
+	else if (!rtc->ops->read_time)
+		err = -EINVAL;
+	else {
+		memset(tm, 0, sizeof(struct rtc_time));
+		err = rtc->ops->read_time(rtc->dev.parent, tm);
+	}
+	return err;
+}
 
 int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
 {
@@ -23,15 +38,7 @@
 	if (err)
 		return err;
 
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->read_time)
-		err = -EINVAL;
-	else {
-		memset(tm, 0, sizeof(struct rtc_time));
-		err = rtc->ops->read_time(rtc->dev.parent, tm);
-	}
-
+	err = __rtc_read_time(rtc, tm);
 	mutex_unlock(&rtc->ops_lock);
 	return err;
 }
@@ -106,189 +113,55 @@
 }
 EXPORT_SYMBOL_GPL(rtc_set_mmss);
 
-static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
 	int err;
 
 	err = mutex_lock_interruptible(&rtc->ops_lock);
 	if (err)
 		return err;
-
-	if (rtc->ops == NULL)
-		err = -ENODEV;
-	else if (!rtc->ops->read_alarm)
-		err = -EINVAL;
-	else {
-		memset(alarm, 0, sizeof(struct rtc_wkalrm));
-		err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
-	}
-
+	alarm->enabled = rtc->aie_timer.enabled;
+	if (alarm->enabled)
+		alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
 	mutex_unlock(&rtc->ops_lock);
-	return err;
-}
 
-int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
-{
-	int err;
-	struct rtc_time before, now;
-	int first_time = 1;
-	unsigned long t_now, t_alm;
-	enum { none, day, month, year } missing = none;
-	unsigned days;
-
-	/* The lower level RTC driver may return -1 in some fields,
-	 * creating invalid alarm->time values, for reasons like:
-	 *
-	 *   - The hardware may not be capable of filling them in;
-	 *     many alarms match only on time-of-day fields, not
-	 *     day/month/year calendar data.
-	 *
-	 *   - Some hardware uses illegal values as "wildcard" match
-	 *     values, which non-Linux firmware (like a BIOS) may try
-	 *     to set up as e.g. "alarm 15 minutes after each hour".
-	 *     Linux uses only oneshot alarms.
-	 *
-	 * When we see that here, we deal with it by using values from
-	 * a current RTC timestamp for any missing (-1) values.  The
-	 * RTC driver prevents "periodic alarm" modes.
-	 *
-	 * But this can be racey, because some fields of the RTC timestamp
-	 * may have wrapped in the interval since we read the RTC alarm,
-	 * which would lead to us inserting inconsistent values in place
-	 * of the -1 fields.
-	 *
-	 * Reading the alarm and timestamp in the reverse sequence
-	 * would have the same race condition, and not solve the issue.
-	 *
-	 * So, we must first read the RTC timestamp,
-	 * then read the RTC alarm value,
-	 * and then read a second RTC timestamp.
-	 *
-	 * If any fields of the second timestamp have changed
-	 * when compared with the first timestamp, then we know
-	 * our timestamp may be inconsistent with that used by
-	 * the low-level rtc_read_alarm_internal() function.
-	 *
-	 * So, when the two timestamps disagree, we just loop and do
-	 * the process again to get a fully consistent set of values.
-	 *
-	 * This could all instead be done in the lower level driver,
-	 * but since more than one lower level RTC implementation needs it,
-	 * then it's probably best best to do it here instead of there..
-	 */
-
-	/* Get the "before" timestamp */
-	err = rtc_read_time(rtc, &before);
-	if (err < 0)
-		return err;
-	do {
-		if (!first_time)
-			memcpy(&before, &now, sizeof(struct rtc_time));
-		first_time = 0;
-
-		/* get the RTC alarm values, which may be incomplete */
-		err = rtc_read_alarm_internal(rtc, alarm);
-		if (err)
-			return err;
-		if (!alarm->enabled)
-			return 0;
-
-		/* full-function RTCs won't have such missing fields */
-		if (rtc_valid_tm(&alarm->time) == 0)
-			return 0;
-
-		/* get the "after" timestamp, to detect wrapped fields */
-		err = rtc_read_time(rtc, &now);
-		if (err < 0)
-			return err;
-
-		/* note that tm_sec is a "don't care" value here: */
-	} while (   before.tm_min   != now.tm_min
-		 || before.tm_hour  != now.tm_hour
-		 || before.tm_mon   != now.tm_mon
-		 || before.tm_year  != now.tm_year);
-
-	/* Fill in the missing alarm fields using the timestamp; we
-	 * know there's at least one since alarm->time is invalid.
-	 */
-	if (alarm->time.tm_sec == -1)
-		alarm->time.tm_sec = now.tm_sec;
-	if (alarm->time.tm_min == -1)
-		alarm->time.tm_min = now.tm_min;
-	if (alarm->time.tm_hour == -1)
-		alarm->time.tm_hour = now.tm_hour;
-
-	/* For simplicity, only support date rollover for now */
-	if (alarm->time.tm_mday == -1) {
-		alarm->time.tm_mday = now.tm_mday;
-		missing = day;
-	}
-	if (alarm->time.tm_mon == -1) {
-		alarm->time.tm_mon = now.tm_mon;
-		if (missing == none)
-			missing = month;
-	}
-	if (alarm->time.tm_year == -1) {
-		alarm->time.tm_year = now.tm_year;
-		if (missing == none)
-			missing = year;
-	}
-
-	/* with luck, no rollover is needed */
-	rtc_tm_to_time(&now, &t_now);
-	rtc_tm_to_time(&alarm->time, &t_alm);
-	if (t_now < t_alm)
-		goto done;
-
-	switch (missing) {
-
-	/* 24 hour rollover ... if it's now 10am Monday, an alarm that
-	 * that will trigger at 5am will do so at 5am Tuesday, which
-	 * could also be in the next month or year.  This is a common
-	 * case, especially for PCs.
-	 */
-	case day:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
-		t_alm += 24 * 60 * 60;
-		rtc_time_to_tm(t_alm, &alarm->time);
-		break;
-
-	/* Month rollover ... if it's the 31th, an alarm on the 3rd will
-	 * be next month.  An alarm matching on the 30th, 29th, or 28th
-	 * may end up in the month after that!  Many newer PCs support
-	 * this type of alarm.
-	 */
-	case month:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
-		do {
-			if (alarm->time.tm_mon < 11)
-				alarm->time.tm_mon++;
-			else {
-				alarm->time.tm_mon = 0;
-				alarm->time.tm_year++;
-			}
-			days = rtc_month_days(alarm->time.tm_mon,
-					alarm->time.tm_year);
-		} while (days < alarm->time.tm_mday);
-		break;
-
-	/* Year rollover ... easy except for leap years! */
-	case year:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
-		do {
-			alarm->time.tm_year++;
-		} while (rtc_valid_tm(&alarm->time) != 0);
-		break;
-
-	default:
-		dev_warn(&rtc->dev, "alarm rollover not handled\n");
-	}
-
-done:
 	return 0;
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
+int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+	struct rtc_time tm;
+	long now, scheduled;
+	int err;
+
+	err = rtc_valid_tm(&alarm->time);
+	if (err)
+		return err;
+	rtc_tm_to_time(&alarm->time, &scheduled);
+
+	/* Make sure we're not setting alarms in the past */
+	err = __rtc_read_time(rtc, &tm);
+	rtc_tm_to_time(&tm, &now);
+	if (scheduled <= now)
+		return -ETIME;
+	/*
+	 * XXX - We just checked to make sure the alarm time is not
+	 * in the past, but there is still a race window where if
+	 * the is alarm set for the next second and the second ticks
+	 * over right here, before we set the alarm.
+	 */
+
+	if (!rtc->ops)
+		err = -ENODEV;
+	else if (!rtc->ops->set_alarm)
+		err = -EINVAL;
+	else
+		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+	return err;
+}
+
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
 	int err;
@@ -300,16 +173,18 @@
 	err = mutex_lock_interruptible(&rtc->ops_lock);
 	if (err)
 		return err;
-
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->set_alarm)
-		err = -EINVAL;
-	else
-		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
+	if (rtc->aie_timer.enabled) {
+		rtc_timer_remove(rtc, &rtc->aie_timer);
+		rtc->aie_timer.enabled = 0;
+	}
+	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+	rtc->aie_timer.period = ktime_set(0, 0);
+	if (alarm->enabled) {
+		rtc->aie_timer.enabled = 1;
+		rtc_timer_enqueue(rtc, &rtc->aie_timer);
+	}
 	mutex_unlock(&rtc->ops_lock);
-	return err;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
 
@@ -319,6 +194,16 @@
 	if (err)
 		return err;
 
+	if (rtc->aie_timer.enabled != enabled) {
+		if (enabled) {
+			rtc->aie_timer.enabled = 1;
+			rtc_timer_enqueue(rtc, &rtc->aie_timer);
+		} else {
+			rtc_timer_remove(rtc, &rtc->aie_timer);
+			rtc->aie_timer.enabled = 0;
+		}
+	}
+
 	if (!rtc->ops)
 		err = -ENODEV;
 	else if (!rtc->ops->alarm_irq_enable)
@@ -337,38 +222,114 @@
 	if (err)
 		return err;
 
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	if (enabled == 0 && rtc->uie_irq_active) {
-		mutex_unlock(&rtc->ops_lock);
-		return rtc_dev_update_irq_enable_emul(rtc, enabled);
+	/* make sure we're changing state */
+	if (rtc->uie_rtctimer.enabled == enabled)
+		goto out;
+
+	if (enabled) {
+		struct rtc_time tm;
+		ktime_t now, onesec;
+
+		__rtc_read_time(rtc, &tm);
+		onesec = ktime_set(1, 0);
+		now = rtc_tm_to_ktime(tm);
+		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
+		rtc->uie_rtctimer.period = ktime_set(1, 0);
+		rtc->uie_rtctimer.enabled = 1;
+		rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+	} else {
+		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
+		rtc->uie_rtctimer.enabled = 0;
 	}
-#endif
 
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->update_irq_enable)
-		err = -EINVAL;
-	else
-		err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
-
+out:
 	mutex_unlock(&rtc->ops_lock);
-
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	/*
-	 * Enable emulation if the driver did not provide
-	 * the update_irq_enable function pointer or if returned
-	 * -EINVAL to signal that it has been configured without
-	 * interrupts or that are not available at the moment.
-	 */
-	if (err == -EINVAL)
-		err = rtc_dev_update_irq_enable_emul(rtc, enabled);
-#endif
 	return err;
+
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
 
+
 /**
- * rtc_update_irq - report RTC periodic, alarm, and/or update irqs
+ * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
+ * @rtc: pointer to the rtc device
+ *
+ * This function is called when an AIE, UIE or PIE mode interrupt
+ * has occured (or been emulated).
+ *
+ * Triggers the registered irq_task function callback.
+ */
+static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+{
+	unsigned long flags;
+
+	/* mark one irq of the appropriate mode */
+	spin_lock_irqsave(&rtc->irq_lock, flags);
+	rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
+	spin_unlock_irqrestore(&rtc->irq_lock, flags);
+
+	/* call the task func */
+	spin_lock_irqsave(&rtc->irq_task_lock, flags);
+	if (rtc->irq_task)
+		rtc->irq_task->func(rtc->irq_task->private_data);
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+
+	wake_up_interruptible(&rtc->irq_queue);
+	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
+}
+
+
+/**
+ * rtc_aie_update_irq - AIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the aie_timer expires.
+ */
+void rtc_aie_update_irq(void *private)
+{
+	struct rtc_device *rtc = (struct rtc_device *)private;
+	rtc_handle_legacy_irq(rtc, 1, RTC_AF);
+}
+
+
+/**
+ * rtc_uie_update_irq - UIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the uie_timer expires.
+ */
+void rtc_uie_update_irq(void *private)
+{
+	struct rtc_device *rtc = (struct rtc_device *)private;
+	rtc_handle_legacy_irq(rtc, 1,  RTC_UF);
+}
+
+
+/**
+ * rtc_pie_update_irq - PIE mode hrtimer hook
+ * @timer: pointer to the pie mode hrtimer
+ *
+ * This function is used to emulate PIE mode interrupts
+ * using an hrtimer. This function is called when the periodic
+ * hrtimer expires.
+ */
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
+{
+	struct rtc_device *rtc;
+	ktime_t period;
+	int count;
+	rtc = container_of(timer, struct rtc_device, pie_timer);
+
+	period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+	count = hrtimer_forward_now(timer, period);
+
+	rtc_handle_legacy_irq(rtc, count, RTC_PF);
+
+	return HRTIMER_RESTART;
+}
+
+/**
+ * rtc_update_irq - Triggered when a RTC interrupt occurs.
  * @rtc: the rtc device
  * @num: how many irqs are being reported (usually one)
  * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
@@ -377,19 +338,7 @@
 void rtc_update_irq(struct rtc_device *rtc,
 		unsigned long num, unsigned long events)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&rtc->irq_lock, flags);
-	rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
-	spin_unlock_irqrestore(&rtc->irq_lock, flags);
-
-	spin_lock_irqsave(&rtc->irq_task_lock, flags);
-	if (rtc->irq_task)
-		rtc->irq_task->func(rtc->irq_task->private_data);
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
-	wake_up_interruptible(&rtc->irq_queue);
-	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
+	schedule_work(&rtc->irqwork);
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq);
 
@@ -477,18 +426,20 @@
 	int err = 0;
 	unsigned long flags;
 
-	if (rtc->ops->irq_set_state == NULL)
-		return -ENXIO;
-
 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
 	if (rtc->irq_task != NULL && task == NULL)
 		err = -EBUSY;
 	if (rtc->irq_task != task)
 		err = -EACCES;
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 
-	if (err == 0)
-		err = rtc->ops->irq_set_state(rtc->dev.parent, enabled);
+	if (enabled) {
+		ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+		hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
+	} else {
+		hrtimer_cancel(&rtc->pie_timer);
+	}
+	rtc->pie_enabled = enabled;
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 
 	return err;
 }
@@ -509,21 +460,194 @@
 	int err = 0;
 	unsigned long flags;
 
-	if (rtc->ops->irq_set_freq == NULL)
-		return -ENXIO;
-
 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
 	if (rtc->irq_task != NULL && task == NULL)
 		err = -EBUSY;
 	if (rtc->irq_task != task)
 		err = -EACCES;
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
 	if (err == 0) {
-		err = rtc->ops->irq_set_freq(rtc->dev.parent, freq);
-		if (err == 0)
-			rtc->irq_freq = freq;
+		rtc->irq_freq = freq;
+		if (rtc->pie_enabled) {
+			ktime_t period;
+			hrtimer_cancel(&rtc->pie_timer);
+			period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+			hrtimer_start(&rtc->pie_timer, period,
+					HRTIMER_MODE_REL);
+		}
 	}
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 	return err;
 }
 EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
+
+/**
+ * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being added.
+ *
+ * Enqueues a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+	timerqueue_add(&rtc->timerqueue, &timer->node);
+	if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+		struct rtc_wkalrm alarm;
+		int err;
+		alarm.time = rtc_ktime_to_tm(timer->node.expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			schedule_work(&rtc->irqwork);
+	}
+}
+
+/**
+ * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Removes a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+	timerqueue_del(&rtc->timerqueue, &timer->node);
+
+	if (next == &timer->node) {
+		struct rtc_wkalrm alarm;
+		int err;
+		next = timerqueue_getnext(&rtc->timerqueue);
+		if (!next)
+			return;
+		alarm.time = rtc_ktime_to_tm(next->expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			schedule_work(&rtc->irqwork);
+	}
+}
+
+/**
+ * rtc_timer_do_work - Expires rtc timers
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Expires rtc timers. Reprograms next alarm event if needed.
+ * Called via worktask.
+ *
+ * Serializes access to timerqueue via ops_lock mutex
+ */
+void rtc_timer_do_work(struct work_struct *work)
+{
+	struct rtc_timer *timer;
+	struct timerqueue_node *next;
+	ktime_t now;
+	struct rtc_time tm;
+
+	struct rtc_device *rtc =
+		container_of(work, struct rtc_device, irqwork);
+
+	mutex_lock(&rtc->ops_lock);
+again:
+	__rtc_read_time(rtc, &tm);
+	now = rtc_tm_to_ktime(tm);
+	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
+		if (next->expires.tv64 > now.tv64)
+			break;
+
+		/* expire timer */
+		timer = container_of(next, struct rtc_timer, node);
+		timerqueue_del(&rtc->timerqueue, &timer->node);
+		timer->enabled = 0;
+		if (timer->task.func)
+			timer->task.func(timer->task.private_data);
+
+		/* Re-add/fwd periodic timers */
+		if (ktime_to_ns(timer->period)) {
+			timer->node.expires = ktime_add(timer->node.expires,
+							timer->period);
+			timer->enabled = 1;
+			timerqueue_add(&rtc->timerqueue, &timer->node);
+		}
+	}
+
+	/* Set next alarm */
+	if (next) {
+		struct rtc_wkalrm alarm;
+		int err;
+		alarm.time = rtc_ktime_to_tm(next->expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			goto again;
+	}
+
+	mutex_unlock(&rtc->ops_lock);
+}
+
+
+/* rtc_timer_init - Initializes an rtc_timer
+ * @timer: timer to be intiialized
+ * @f: function pointer to be called when timer fires
+ * @data: private data passed to function pointer
+ *
+ * Kernel interface to initializing an rtc_timer.
+ */
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
+{
+	timerqueue_init(&timer->node);
+	timer->enabled = 0;
+	timer->task.func = f;
+	timer->task.private_data = data;
+}
+
+/* rtc_timer_start - Sets an rtc_timer to fire in the future
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ * @ expires: time at which to expire the timer
+ * @ period: period that the timer will recur
+ *
+ * Kernel interface to set an rtc_timer
+ */
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+			ktime_t expires, ktime_t period)
+{
+	int ret = 0;
+	mutex_lock(&rtc->ops_lock);
+	if (timer->enabled)
+		rtc_timer_remove(rtc, timer);
+
+	timer->node.expires = expires;
+	timer->period = period;
+
+	timer->enabled = 1;
+	rtc_timer_enqueue(rtc, timer);
+
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
+
+/* rtc_timer_cancel - Stops an rtc_timer
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ *
+ * Kernel interface to cancel an rtc_timer
+ */
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
+{
+	int ret = 0;
+	mutex_lock(&rtc->ops_lock);
+	if (timer->enabled)
+		rtc_timer_remove(rtc, timer);
+	timer->enabled = 0;
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
+
+
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5856167..c7ff8df 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,6 +36,7 @@
 #include <linux/platform_device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/log2.h>
+#include <linux/pm.h>
 
 /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
 #include <asm-generic/rtc.h>
@@ -687,7 +688,8 @@
 #if	defined(CONFIG_ATARI)
 	address_space = 64;
 #elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
-			|| defined(__sparc__) || defined(__mips__)
+			|| defined(__sparc__) || defined(__mips__) \
+			|| defined(__powerpc__)
 	address_space = 128;
 #else
 #warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -850,7 +852,7 @@
 
 #ifdef	CONFIG_PM
 
-static int cmos_suspend(struct device *dev, pm_message_t mesg)
+static int cmos_suspend(struct device *dev)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 	unsigned char	tmp;
@@ -898,7 +900,7 @@
  */
 static inline int cmos_poweroff(struct device *dev)
 {
-	return cmos_suspend(dev, PMSG_HIBERNATE);
+	return cmos_suspend(dev);
 }
 
 static int cmos_resume(struct device *dev)
@@ -945,9 +947,9 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+
 #else
-#define	cmos_suspend	NULL
-#define	cmos_resume	NULL
 
 static inline int cmos_poweroff(struct device *dev)
 {
@@ -1077,7 +1079,7 @@
 
 static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
 {
-	return cmos_suspend(&pnp->dev, mesg);
+	return cmos_suspend(&pnp->dev);
 }
 
 static int cmos_pnp_resume(struct pnp_dev *pnp)
@@ -1157,8 +1159,9 @@
 	.shutdown	= cmos_platform_shutdown,
 	.driver = {
 		.name		= (char *) driver_name,
-		.suspend	= cmos_suspend,
-		.resume		= cmos_resume,
+#ifdef CONFIG_PM
+		.pm		= &cmos_pm_ops,
+#endif
 	}
 };
 
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 62227cd..212b16e 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,105 +46,6 @@
 	return err;
 }
 
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-/*
- * Routine to poll RTC seconds field for change as often as possible,
- * after first RTC_UIE use timer to reduce polling
- */
-static void rtc_uie_task(struct work_struct *work)
-{
-	struct rtc_device *rtc =
-		container_of(work, struct rtc_device, uie_task);
-	struct rtc_time tm;
-	int num = 0;
-	int err;
-
-	err = rtc_read_time(rtc, &tm);
-
-	spin_lock_irq(&rtc->irq_lock);
-	if (rtc->stop_uie_polling || err) {
-		rtc->uie_task_active = 0;
-	} else if (rtc->oldsecs != tm.tm_sec) {
-		num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
-		rtc->oldsecs = tm.tm_sec;
-		rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
-		rtc->uie_timer_active = 1;
-		rtc->uie_task_active = 0;
-		add_timer(&rtc->uie_timer);
-	} else if (schedule_work(&rtc->uie_task) == 0) {
-		rtc->uie_task_active = 0;
-	}
-	spin_unlock_irq(&rtc->irq_lock);
-	if (num)
-		rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
-}
-static void rtc_uie_timer(unsigned long data)
-{
-	struct rtc_device *rtc = (struct rtc_device *)data;
-	unsigned long flags;
-
-	spin_lock_irqsave(&rtc->irq_lock, flags);
-	rtc->uie_timer_active = 0;
-	rtc->uie_task_active = 1;
-	if ((schedule_work(&rtc->uie_task) == 0))
-		rtc->uie_task_active = 0;
-	spin_unlock_irqrestore(&rtc->irq_lock, flags);
-}
-
-static int clear_uie(struct rtc_device *rtc)
-{
-	spin_lock_irq(&rtc->irq_lock);
-	if (rtc->uie_irq_active) {
-		rtc->stop_uie_polling = 1;
-		if (rtc->uie_timer_active) {
-			spin_unlock_irq(&rtc->irq_lock);
-			del_timer_sync(&rtc->uie_timer);
-			spin_lock_irq(&rtc->irq_lock);
-			rtc->uie_timer_active = 0;
-		}
-		if (rtc->uie_task_active) {
-			spin_unlock_irq(&rtc->irq_lock);
-			flush_scheduled_work();
-			spin_lock_irq(&rtc->irq_lock);
-		}
-		rtc->uie_irq_active = 0;
-	}
-	spin_unlock_irq(&rtc->irq_lock);
-	return 0;
-}
-
-static int set_uie(struct rtc_device *rtc)
-{
-	struct rtc_time tm;
-	int err;
-
-	err = rtc_read_time(rtc, &tm);
-	if (err)
-		return err;
-	spin_lock_irq(&rtc->irq_lock);
-	if (!rtc->uie_irq_active) {
-		rtc->uie_irq_active = 1;
-		rtc->stop_uie_polling = 0;
-		rtc->oldsecs = tm.tm_sec;
-		rtc->uie_task_active = 1;
-		if (schedule_work(&rtc->uie_task) == 0)
-			rtc->uie_task_active = 0;
-	}
-	rtc->irq_data = 0;
-	spin_unlock_irq(&rtc->irq_lock);
-	return 0;
-}
-
-int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
-{
-	if (enabled)
-		return set_uie(rtc);
-	else
-		return clear_uie(rtc);
-}
-EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
-
-#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
 
 static ssize_t
 rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -493,11 +394,6 @@
 
 	rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
 
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	INIT_WORK(&rtc->uie_task, rtc_uie_task);
-	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
-#endif
-
 	cdev_init(&rtc->char_dev, &rtc_dev_fops);
 	rtc->char_dev.owner = rtc->owner;
 }
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 48da85e..077af1d 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -813,7 +813,7 @@
 	if (spi->irq) {
 		set_bit(FLAG_EXITING, &ds1305->flags);
 		free_irq(spi->irq, ds1305);
-		flush_scheduled_work();
+		cancel_work_sync(&ds1305->work);
 	}
 
 	rtc_device_unregister(ds1305->rtc);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index d827ce5..0d559b6 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -106,9 +106,9 @@
 	struct i2c_client	*client;
 	struct rtc_device	*rtc;
 	struct work_struct	work;
-	s32 (*read_block_data)(struct i2c_client *client, u8 command,
+	s32 (*read_block_data)(const struct i2c_client *client, u8 command,
 			       u8 length, u8 *values);
-	s32 (*write_block_data)(struct i2c_client *client, u8 command,
+	s32 (*write_block_data)(const struct i2c_client *client, u8 command,
 				u8 length, const u8 *values);
 };
 
@@ -158,8 +158,8 @@
 
 #define BLOCK_DATA_MAX_TRIES 10
 
-static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command,
-				  u8 length, u8 *values)
+static s32 ds1307_read_block_data_once(const struct i2c_client *client,
+				       u8 command, u8 length, u8 *values)
 {
 	s32 i, data;
 
@@ -172,7 +172,7 @@
 	return i;
 }
 
-static s32 ds1307_read_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command,
 				  u8 length, u8 *values)
 {
 	u8 oldvalues[I2C_SMBUS_BLOCK_MAX];
@@ -198,7 +198,7 @@
 	return length;
 }
 
-static s32 ds1307_write_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command,
 				   u8 length, const u8 *values)
 {
 	u8 currvalues[I2C_SMBUS_BLOCK_MAX];
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 1f0007f..47fb635 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -417,7 +417,7 @@
 		mutex_unlock(&ds1374->mutex);
 
 		free_irq(client->irq, client);
-		flush_scheduled_work();
+		cancel_work_sync(&ds1374->work);
 	}
 
 	rtc_device_unregister(ds1374->rtc);
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 5706355..23a9ee1 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -463,7 +463,7 @@
 		mutex_unlock(&ds3232->mutex);
 
 		free_irq(client->irq, client);
-		flush_scheduled_work();
+		cancel_work_sync(&ds3232->work);
 	}
 
 	rtc_device_unregister(ds3232->rtc);
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 773851f..075f170 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,32 @@
 }
 EXPORT_SYMBOL(rtc_tm_to_time);
 
+/*
+ * Convert rtc_time to ktime
+ */
+ktime_t rtc_tm_to_ktime(struct rtc_time tm)
+{
+	time_t time;
+	rtc_tm_to_time(&tm, &time);
+	return ktime_set(time, 0);
+}
+EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
+
+/*
+ * Convert ktime to rtc_time
+ */
+struct rtc_time rtc_ktime_to_tm(ktime_t kt)
+{
+	struct timespec ts;
+	struct rtc_time ret;
+
+	ts = ktime_to_timespec(kt);
+	/* Round up any ns */
+	if (ts.tv_nsec)
+		ts.tv_sec++;
+	rtc_time_to_tm(ts.tv_sec, &ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 657403e..0ec3f58 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -139,12 +139,13 @@
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
 
+	dev_set_drvdata(&spi->dev, rtc);
 	return 0;
 }
 
 static int __devexit max6902_remove(struct spi_device *spi)
 {
-	struct rtc_device *rtc = platform_get_drvdata(spi);
+	struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
 
 	rtc_device_unregister(rtc);
 	return 0;
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index f22dee3..3f7bc6b 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -20,6 +20,7 @@
 #include <linux/platform_device.h>
 #include <linux/mfd/max8998.h>
 #include <linux/mfd/max8998-private.h>
+#include <linux/delay.h>
 
 #define MAX8998_RTC_SEC			0x00
 #define MAX8998_RTC_MIN			0x01
@@ -73,6 +74,7 @@
 	struct i2c_client	*rtc;
 	struct rtc_device	*rtc_dev;
 	int irq;
+	bool lp3974_bug_workaround;
 };
 
 static void max8998_data_to_tm(u8 *data, struct rtc_time *tm)
@@ -124,10 +126,16 @@
 {
 	struct max8998_rtc_info *info = dev_get_drvdata(dev);
 	u8 data[8];
+	int ret;
 
 	max8998_tm_to_data(tm, data);
 
-	return max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
+	ret = max8998_bulk_write(info->rtc, MAX8998_RTC_SEC, 8, data);
+
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
+
+	return ret;
 }
 
 static int max8998_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -163,12 +171,29 @@
 
 static int max8998_rtc_stop_alarm(struct max8998_rtc_info *info)
 {
-	return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
+	int ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0);
+
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
+
+	return ret;
 }
 
 static int max8998_rtc_start_alarm(struct max8998_rtc_info *info)
 {
-	return max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, 0x77);
+	int ret;
+	u8 alarm0_conf = 0x77;
+
+	/* LP3974 with delay bug chips has rtc alarm bugs with "MONTH" field */
+	if (info->lp3974_bug_workaround)
+		alarm0_conf = 0x57;
+
+	ret = max8998_write_reg(info->rtc, MAX8998_ALARM0_CONF, alarm0_conf);
+
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
+
+	return ret;
 }
 
 static int max8998_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -187,10 +212,13 @@
 	if (ret < 0)
 		return ret;
 
-	if (alrm->enabled)
-		return max8998_rtc_start_alarm(info);
+	if (info->lp3974_bug_workaround)
+		msleep(2000);
 
-	return 0;
+	if (alrm->enabled)
+		ret = max8998_rtc_start_alarm(info);
+
+	return ret;
 }
 
 static int max8998_rtc_alarm_irq_enable(struct device *dev,
@@ -224,6 +252,7 @@
 static int __devinit max8998_rtc_probe(struct platform_device *pdev)
 {
 	struct max8998_dev *max8998 = dev_get_drvdata(pdev->dev.parent);
+	struct max8998_platform_data *pdata = dev_get_platdata(max8998->dev);
 	struct max8998_rtc_info *info;
 	int ret;
 
@@ -249,10 +278,18 @@
 
 	ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
 			"rtc-alarm0", info);
+
 	if (ret < 0)
 		dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
 			info->irq, ret);
 
+	dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
+	if (pdata->rtc_delay) {
+		info->lp3974_bug_workaround = true;
+		dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
+				" RTC updates will be extremely slow.\n");
+	}
+
 	return 0;
 
 out_rtc:
@@ -273,6 +310,12 @@
 	return 0;
 }
 
+static const struct platform_device_id max8998_rtc_id[] = {
+	{ "max8998-rtc", TYPE_MAX8998 },
+	{ "lp3974-rtc", TYPE_LP3974 },
+	{ }
+};
+
 static struct platform_driver max8998_rtc_driver = {
 	.driver		= {
 		.name	= "max8998-rtc",
@@ -280,6 +323,7 @@
 	},
 	.probe		= max8998_rtc_probe,
 	.remove		= __devexit_p(max8998_rtc_remove),
+	.id_table	= max8998_rtc_id,
 };
 
 static int __init max8998_rtc_init(void)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 73377b0..e72b523 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -429,13 +429,14 @@
 fail0:
 	iounmap(rtc_base);
 fail:
-	release_resource(mem);
+	release_mem_region(mem->start, resource_size(mem));
 	return -EIO;
 }
 
 static int __exit omap_rtc_remove(struct platform_device *pdev)
 {
 	struct rtc_device	*rtc = platform_get_drvdata(pdev);
+	struct resource		*mem = dev_get_drvdata(&rtc->dev);
 
 	device_init_wakeup(&pdev->dev, 0);
 
@@ -447,8 +448,9 @@
 	if (omap_rtc_timer != omap_rtc_alarm)
 		free_irq(omap_rtc_alarm, rtc);
 
-	release_resource(dev_get_drvdata(&rtc->dev));
 	rtc_device_unregister(rtc);
+	iounmap(rtc_base);
+	release_mem_region(mem->start, resource_size(mem));
 	return 0;
 }
 
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 1146e35..af32a62 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -650,7 +650,7 @@
 		mutex_unlock(lock);
 
 		free_irq(client->irq, client);
-		flush_scheduled_work();
+		cancel_work_sync(&rx8025->work);
 	}
 
 	rx8025_sysfs_unregister(&client->dev);
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 0788319..8e477bb 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -2,7 +2,8 @@
 	depends on S390 && BLOCK
 
 config BLK_DEV_XPRAM
-	tristate "XPRAM disk support"
+	def_tristate m
+	prompt "XPRAM disk support"
 	depends on S390 && BLOCK
 	help
 	  Select this option if you want to use your expanded storage on S/390
@@ -12,13 +13,15 @@
 	  xpram.  If unsure, say "N".
 
 config DCSSBLK
-	tristate "DCSSBLK support"
+	def_tristate m
+	prompt "DCSSBLK support"
 	depends on S390 && BLOCK
 	help
 	  Support for dcss block device
 
 config DASD
-	tristate "Support for DASD devices"
+	def_tristate y
+	prompt "Support for DASD devices"
 	depends on CCW && BLOCK
 	select IOSCHED_DEADLINE
 	help
@@ -27,28 +30,32 @@
 	  natively on a single image or an LPAR.
 
 config DASD_PROFILE
-	bool "Profiling support for dasd devices"
+	def_bool y
+	prompt "Profiling support for dasd devices"
 	depends on DASD
 	help
 	  Enable this option if you want to see profiling information
           in /proc/dasd/statistics.
 
 config DASD_ECKD
-	tristate "Support for ECKD Disks"
+	def_tristate y
+	prompt "Support for ECKD Disks"
 	depends on DASD
 	help
 	  ECKD devices are the most commonly used devices. You should enable
 	  this option unless you are very sure to have no ECKD device.
 
 config DASD_FBA
-	tristate "Support for FBA  Disks"
+	def_tristate y
+	prompt "Support for FBA  Disks"
 	depends on DASD
 	help
 	  Select this option to be able to access FBA devices. It is safe to
 	  say "Y".
 
 config DASD_DIAG
-	tristate "Support for DIAG access to Disks"
+	def_tristate y
+	prompt "Support for DIAG access to Disks"
 	depends on DASD
 	help
 	  Select this option if you want to use Diagnose250 command to access
@@ -56,7 +63,8 @@
 	  say "N".
 
 config DASD_EER
-	bool "Extended error reporting (EER)"
+	def_bool y
+	prompt "Extended error reporting (EER)"
 	depends on DASD
 	help
 	  This driver provides a character device interface to the
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fb613d7..794bfd9 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -11,6 +11,7 @@
 #define KMSG_COMPONENT "dasd"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/kmod.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -368,6 +369,11 @@
 	device->state = DASD_STATE_ONLINE;
 	if (device->block) {
 		dasd_schedule_block_bh(device->block);
+		if ((device->features & DASD_FEATURE_USERAW)) {
+			disk = device->block->gdp;
+			kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+			return 0;
+		}
 		disk = device->block->bdev->bd_disk;
 		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 		while ((part = disk_part_iter_next(&piter)))
@@ -393,7 +399,7 @@
 			return rc;
 	}
 	device->state = DASD_STATE_READY;
-	if (device->block) {
+	if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
 		disk = device->block->bdev->bd_disk;
 		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
 		while ((part = disk_part_iter_next(&piter)))
@@ -744,10 +750,6 @@
 	char *data;
 	int size;
 
-	/* Sanity checks */
-	BUG_ON(datasize > PAGE_SIZE ||
-	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
-
 	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
 	if (cplength > 0)
 		size += cplength * sizeof(struct ccw1);
@@ -853,7 +855,6 @@
 		rc = ccw_device_clear(device->cdev, (long) cqr);
 		switch (rc) {
 		case 0:	/* termination successful */
-			cqr->retries--;
 			cqr->status = DASD_CQR_CLEAR_PENDING;
 			cqr->stopclk = get_clock();
 			cqr->starttime = 0;
@@ -905,6 +906,16 @@
 		return rc;
 	}
 	device = (struct dasd_device *) cqr->startdev;
+	if (((cqr->block &&
+	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
+	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
+	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
+			      "because of stolen lock", cqr);
+		cqr->status = DASD_CQR_ERROR;
+		cqr->intrc = -EPERM;
+		return -EPERM;
+	}
 	if (cqr->retries < 0) {
 		/* internal error 14 - start_IO run out of retries */
 		sprintf(errorstring, "14 %p", cqr);
@@ -916,6 +927,11 @@
 	cqr->startclk = get_clock();
 	cqr->starttime = jiffies;
 	cqr->retries--;
+	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+		cqr->lpm &= device->path_data.opm;
+		if (!cqr->lpm)
+			cqr->lpm = device->path_data.opm;
+	}
 	if (cqr->cpmode == 1) {
 		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
 					 (long) cqr, cqr->lpm);
@@ -928,35 +944,53 @@
 		cqr->status = DASD_CQR_IN_IO;
 		break;
 	case -EBUSY:
-		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			      "start_IO: device busy, retry later");
 		break;
 	case -ETIMEDOUT:
-		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			      "start_IO: request timeout, retry later");
 		break;
 	case -EACCES:
-		/* -EACCES indicates that the request used only a
-		 * subset of the available pathes and all these
-		 * pathes are gone.
-		 * Do a retry with all available pathes.
+		/* -EACCES indicates that the request used only a subset of the
+		 * available paths and all these paths are gone. If the lpm of
+		 * this request was only a subset of the opm (e.g. the ppm) then
+		 * we just do a retry with all available paths.
+		 * If we already use the full opm, something is amiss, and we
+		 * need a full path verification.
 		 */
-		cqr->lpm = LPM_ANYPATH;
-		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
-			      "start_IO: selected pathes gone,"
-			      " retry on all pathes");
+		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+			DBF_DEV_EVENT(DBF_WARNING, device,
+				      "start_IO: selected paths gone (%x)",
+				      cqr->lpm);
+		} else if (cqr->lpm != device->path_data.opm) {
+			cqr->lpm = device->path_data.opm;
+			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+				      "start_IO: selected paths gone,"
+				      " retry on all paths");
+		} else {
+			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+				      "start_IO: all paths in opm gone,"
+				      " do path verification");
+			dasd_generic_last_path_gone(device);
+			device->path_data.opm = 0;
+			device->path_data.ppm = 0;
+			device->path_data.npm = 0;
+			device->path_data.tbvpm =
+				ccw_device_get_path_mask(device->cdev);
+		}
 		break;
 	case -ENODEV:
-		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			      "start_IO: -ENODEV device gone, retry");
 		break;
 	case -EIO:
-		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			      "start_IO: -EIO device gone, retry");
 		break;
 	case -EINVAL:
 		/* most likely caused in power management context */
-		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			      "start_IO: -EINVAL device currently "
 			      "not accessible");
 		break;
@@ -1076,6 +1110,7 @@
 	unsigned long long now;
 	int expires;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
 	if (IS_ERR(irb)) {
 		switch (PTR_ERR(irb)) {
 		case -EIO:
@@ -1094,16 +1129,11 @@
 	}
 
 	now = get_clock();
-
-	/* check for unsolicited interrupts */
 	cqr = (struct dasd_ccw_req *) intparm;
-	if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
-		     (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
-		     ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) ||
-		      (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND |
-						  SCSW_STCTL_ALERT_STATUS))))) {
-		if (cqr && cqr->status == DASD_CQR_IN_IO)
-			cqr->status = DASD_CQR_QUEUED;
+	/* check for conditions that should be handled immediately */
+	if (!cqr ||
+	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+	      scsw_cstat(&irb->scsw) == 0)) {
 		if (cqr)
 			memcpy(&cqr->irb, irb, sizeof(*irb));
 		device = dasd_device_from_cdev_locked(cdev);
@@ -1114,17 +1144,14 @@
 			dasd_put_device(device);
 			return;
 		}
-		device->discipline->dump_sense_dbf(device, irb,
-						   "unsolicited");
-		if ((device->features & DASD_FEATURE_ERPLOG))
-			device->discipline->dump_sense(device, cqr,
-						       irb);
-		dasd_device_clear_timer(device);
-		device->discipline->handle_unsolicited_interrupt(device,
-								 irb);
+		device->discipline->dump_sense_dbf(device, irb, "int");
+		if (device->features & DASD_FEATURE_ERPLOG)
+			device->discipline->dump_sense(device, cqr, irb);
+		device->discipline->check_for_device_change(device, cqr, irb);
 		dasd_put_device(device);
-		return;
 	}
+	if (!cqr)
+		return;
 
 	device = (struct dasd_device *) cqr->startdev;
 	if (!device ||
@@ -1164,25 +1191,19 @@
 					  struct dasd_ccw_req, devlist);
 		}
 	} else {  /* error */
-		memcpy(&cqr->irb, irb, sizeof(struct irb));
-		/* log sense for every failed I/O to s390 debugfeature */
-		dasd_log_sense_dbf(cqr, irb);
-		if (device->features & DASD_FEATURE_ERPLOG) {
-			dasd_log_sense(cqr, irb);
-		}
-
 		/*
 		 * If we don't want complex ERP for this request, then just
 		 * reset this and retry it in the fastpath
 		 */
 		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
 		    cqr->retries > 0) {
-			if (cqr->lpm == LPM_ANYPATH)
+			if (cqr->lpm == device->path_data.opm)
 				DBF_DEV_EVENT(DBF_DEBUG, device,
 					      "default ERP in fastpath "
 					      "(%i retries left)",
 					      cqr->retries);
-			cqr->lpm    = LPM_ANYPATH;
+			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+				cqr->lpm = device->path_data.opm;
 			cqr->status = DASD_CQR_QUEUED;
 			next = cqr;
 		} else
@@ -1210,13 +1231,13 @@
 		goto out;
 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
 	   device->state != device->target ||
-	   !device->discipline->handle_unsolicited_interrupt){
+	   !device->discipline->check_for_device_change){
 		dasd_put_device(device);
 		goto out;
 	}
-
-	dasd_device_clear_timer(device);
-	device->discipline->handle_unsolicited_interrupt(device, irb);
+	if (device->discipline->dump_sense_dbf)
+		device->discipline->dump_sense_dbf(device, irb, "uc");
+	device->discipline->check_for_device_change(device, NULL, irb);
 	dasd_put_device(device);
 out:
 	return UC_TODO_RETRY;
@@ -1366,8 +1387,14 @@
 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
 	if (cqr->status != DASD_CQR_QUEUED)
 		return;
-	/* when device is stopped, return request to previous layer */
-	if (device->stopped) {
+	/* when device is stopped, return request to previous layer
+	 * exception: only the disconnect or unresumed bits are set and the
+	 * cqr is a path verification request
+	 */
+	if (device->stopped &&
+	    !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
+	      && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
+		cqr->intrc = -EAGAIN;
 		cqr->status = DASD_CQR_CLEARED;
 		dasd_schedule_device_bh(device);
 		return;
@@ -1383,6 +1410,23 @@
 		dasd_device_set_timer(device, 50);
 }
 
+static void __dasd_device_check_path_events(struct dasd_device *device)
+{
+	int rc;
+
+	if (device->path_data.tbvpm) {
+		if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
+					DASD_UNRESUMED_PM))
+			return;
+		rc = device->discipline->verify_path(
+			device, device->path_data.tbvpm);
+		if (rc)
+			dasd_device_set_timer(device, 50);
+		else
+			device->path_data.tbvpm = 0;
+	}
+};
+
 /*
  * Go through all request on the dasd_device request queue,
  * terminate them on the cdev if necessary, and return them to the
@@ -1457,6 +1501,7 @@
 	__dasd_device_check_expire(device);
 	/* find final requests on ccw queue */
 	__dasd_device_process_ccw_queue(device, &final_queue);
+	__dasd_device_check_path_events(device);
 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
 	/* Now call the callback function of requests with final status */
 	__dasd_device_process_final_queue(device, &final_queue);
@@ -1613,7 +1658,12 @@
 			continue;
 		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
 			continue;
-
+		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EPERM;
+			continue;
+		}
 		/* Non-temporary stop condition will trigger fail fast */
 		if (device->stopped & ~DASD_STOPPED_PENDING &&
 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -1621,7 +1671,6 @@
 			cqr->status = DASD_CQR_FAILED;
 			continue;
 		}
-
 		/* Don't try to start requests if device is stopped */
 		if (interruptible) {
 			rc = wait_event_interruptible(
@@ -1706,13 +1755,18 @@
 	int rc;
 
 	device = cqr->startdev;
+	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
+	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+		cqr->status = DASD_CQR_FAILED;
+		cqr->intrc = -EPERM;
+		return -EIO;
+	}
 	spin_lock_irq(get_ccwdev_lock(device->cdev));
 	rc = _dasd_term_running_cqr(device);
 	if (rc) {
 		spin_unlock_irq(get_ccwdev_lock(device->cdev));
 		return rc;
 	}
-
 	cqr->callback = dasd_wakeup_cb;
 	cqr->callback_data = DASD_SLEEPON_START_TAG;
 	cqr->status = DASD_CQR_QUEUED;
@@ -2016,6 +2070,13 @@
 	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
 		if (cqr->status != DASD_CQR_FILLED)
 			continue;
+		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
+		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
+			cqr->status = DASD_CQR_FAILED;
+			cqr->intrc = -EPERM;
+			dasd_schedule_block_bh(block);
+			continue;
+		}
 		/* Non-temporary stop condition will trigger fail fast */
 		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
@@ -2201,8 +2262,20 @@
 {
 	int max;
 
-	blk_queue_logical_block_size(block->request_queue, block->bp_block);
-	max = block->base->discipline->max_blocks << block->s2b_shift;
+	if (block->base->features & DASD_FEATURE_USERAW) {
+		/*
+		 * the max_blocks value for raw_track access is 256
+		 * it is higher than the native ECKD value because we
+		 * only need one ccw per track
+		 * so the max_hw_sectors are
+		 * 2048 x 512B = 1024kB = 16 tracks
+		 */
+		max = 2048;
+	} else {
+		max = block->base->discipline->max_blocks << block->s2b_shift;
+	}
+	blk_queue_logical_block_size(block->request_queue,
+				     block->bp_block);
 	blk_queue_max_hw_sectors(block->request_queue, max);
 	blk_queue_max_segments(block->request_queue, -1L);
 	/* with page sized segments we can translate each segement into
@@ -2588,10 +2661,53 @@
 	return 0;
 }
 
+int dasd_generic_last_path_gone(struct dasd_device *device)
+{
+	struct dasd_ccw_req *cqr;
+
+	dev_warn(&device->cdev->dev, "No operational channel path is left "
+		 "for the device\n");
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
+	/* First of all call extended error reporting. */
+	dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+
+	if (device->state < DASD_STATE_BASIC)
+		return 0;
+	/* Device is active. We want to keep it. */
+	list_for_each_entry(cqr, &device->ccw_queue, devlist)
+		if ((cqr->status == DASD_CQR_IN_IO) ||
+		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
+			cqr->status = DASD_CQR_QUEUED;
+			cqr->retries++;
+		}
+	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
+	dasd_device_clear_timer(device);
+	dasd_schedule_device_bh(device);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
+
+int dasd_generic_path_operational(struct dasd_device *device)
+{
+	dev_info(&device->cdev->dev, "A channel path to the device has become "
+		 "operational\n");
+	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
+	if (device->stopped & DASD_UNRESUMED_PM) {
+		dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
+		dasd_restore_device(device);
+		return 1;
+	}
+	dasd_schedule_device_bh(device);
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
+
 int dasd_generic_notify(struct ccw_device *cdev, int event)
 {
 	struct dasd_device *device;
-	struct dasd_ccw_req *cqr;
 	int ret;
 
 	device = dasd_device_from_cdev_locked(cdev);
@@ -2602,41 +2718,64 @@
 	case CIO_GONE:
 	case CIO_BOXED:
 	case CIO_NO_PATH:
-		/* First of all call extended error reporting. */
-		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
-
-		if (device->state < DASD_STATE_BASIC)
-			break;
-		/* Device is active. We want to keep it. */
-		list_for_each_entry(cqr, &device->ccw_queue, devlist)
-			if (cqr->status == DASD_CQR_IN_IO) {
-				cqr->status = DASD_CQR_QUEUED;
-				cqr->retries++;
-			}
-		dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
-		dasd_device_clear_timer(device);
-		dasd_schedule_device_bh(device);
-		ret = 1;
+		device->path_data.opm = 0;
+		device->path_data.ppm = 0;
+		device->path_data.npm = 0;
+		ret = dasd_generic_last_path_gone(device);
 		break;
 	case CIO_OPER:
-		/* FIXME: add a sanity check. */
-		dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
-		if (device->stopped & DASD_UNRESUMED_PM) {
-			dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
-			dasd_restore_device(device);
-			ret = 1;
-			break;
-		}
-		dasd_schedule_device_bh(device);
-		if (device->block)
-			dasd_schedule_block_bh(device->block);
 		ret = 1;
+		if (device->path_data.opm)
+			ret = dasd_generic_path_operational(device);
 		break;
 	}
 	dasd_put_device(device);
 	return ret;
 }
 
+void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
+{
+	int chp;
+	__u8 oldopm, eventlpm;
+	struct dasd_device *device;
+
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device))
+		return;
+	for (chp = 0; chp < 8; chp++) {
+		eventlpm = 0x80 >> chp;
+		if (path_event[chp] & PE_PATH_GONE) {
+			oldopm = device->path_data.opm;
+			device->path_data.opm &= ~eventlpm;
+			device->path_data.ppm &= ~eventlpm;
+			device->path_data.npm &= ~eventlpm;
+			if (oldopm && !device->path_data.opm)
+				dasd_generic_last_path_gone(device);
+		}
+		if (path_event[chp] & PE_PATH_AVAILABLE) {
+			device->path_data.opm &= ~eventlpm;
+			device->path_data.ppm &= ~eventlpm;
+			device->path_data.npm &= ~eventlpm;
+			device->path_data.tbvpm |= eventlpm;
+			dasd_schedule_device_bh(device);
+		}
+	}
+	dasd_put_device(device);
+}
+EXPORT_SYMBOL_GPL(dasd_generic_path_event);
+
+int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
+{
+	if (!device->path_data.opm && lpm) {
+		device->path_data.opm = lpm;
+		dasd_generic_path_operational(device);
+	} else
+		device->path_data.opm |= lpm;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
+
+
 int dasd_generic_pm_freeze(struct ccw_device *cdev)
 {
 	struct dasd_ccw_req *cqr, *n;
@@ -2646,6 +2785,10 @@
 
 	if (IS_ERR(device))
 		return PTR_ERR(device);
+
+	if (device->discipline->freeze)
+		rc = device->discipline->freeze(device);
+
 	/* disallow new I/O  */
 	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
 	/* clear active requests */
@@ -2682,9 +2825,6 @@
 	list_splice_tail(&freeze_queue, &device->ccw_queue);
 	spin_unlock_irq(get_ccwdev_lock(cdev));
 
-	if (device->discipline->freeze)
-		rc = device->discipline->freeze(device);
-
 	dasd_put_device(device);
 	return rc;
 }
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 968c76c..1654a24 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,9 +152,9 @@
 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 	opm = ccw_device_get_path_mask(device->cdev);
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
-	//FIXME: start with get_opm ?
 	if (erp->lpm == 0)
-		erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
+		erp->lpm = device->path_data.opm &
+			~(erp->irb.esw.esw0.sublog.lpum);
 	else
 		erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
 
@@ -270,10 +270,11 @@
 {
 	erp->function = dasd_3990_erp_action_1;
 	dasd_3990_erp_alternate_path(erp);
-	if (erp->status == DASD_CQR_FAILED) {
+	if (erp->status == DASD_CQR_FAILED &&
+	    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
 		erp->status = DASD_CQR_FILLED;
 		erp->retries = 10;
-		erp->lpm = LPM_ANYPATH;
+		erp->lpm = erp->startdev->path_data.opm;
 		erp->function = dasd_3990_erp_action_1_sec;
 	}
 	return erp;
@@ -1907,15 +1908,14 @@
 static void
 dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
 {
-
 	if (sense[25] & DASD_SENSE_BIT_3) {
 		dasd_3990_erp_alternate_path(erp);
 
-		if (erp->status == DASD_CQR_FAILED) {
+		if (erp->status == DASD_CQR_FAILED &&
+		    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
 			/* reset the lpm and the status to be able to
 			 * try further actions. */
-
-			erp->lpm = 0;
+			erp->lpm = erp->startdev->path_data.opm;
 			erp->status = DASD_CQR_NEED_ERP;
 		}
 	}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 8d41f3e..cb6a67b 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -208,6 +208,8 @@
 			features |= DASD_FEATURE_READONLY;
 		else if (len == 4 && !strncmp(str, "diag", 4))
 			features |= DASD_FEATURE_USEDIAG;
+		else if (len == 3 && !strncmp(str, "raw", 3))
+			features |= DASD_FEATURE_USERAW;
 		else if (len == 6 && !strncmp(str, "erplog", 6))
 			features |= DASD_FEATURE_ERPLOG;
 		else if (len == 8 && !strncmp(str, "failfast", 8))
@@ -639,6 +641,7 @@
 {
 	wake_up(&dasd_delete_wq);
 }
+EXPORT_SYMBOL_GPL(dasd_put_device_wake);
 
 /*
  * Return dasd_device structure associated with cdev.
@@ -856,7 +859,7 @@
 	spin_lock(&dasd_devmap_lock);
 	/* Changing diag discipline flag is only allowed in offline state. */
 	rc = count;
-	if (!devmap->device) {
+	if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
 		if (val)
 			devmap->features |= DASD_FEATURE_USEDIAG;
 		else
@@ -869,6 +872,56 @@
 
 static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
 
+/*
+ * use_raw controls whether the driver should give access to raw eckd data or
+ * operate in standard mode
+ */
+static ssize_t
+dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct dasd_devmap *devmap;
+	int use_raw;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (!IS_ERR(devmap))
+		use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
+	else
+		use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
+	return sprintf(buf, use_raw ? "1\n" : "0\n");
+}
+
+static ssize_t
+dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	ssize_t rc;
+	unsigned long val;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+
+	if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
+		return -EINVAL;
+
+	spin_lock(&dasd_devmap_lock);
+	/* Changing diag discipline flag is only allowed in offline state. */
+	rc = count;
+	if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
+		if (val)
+			devmap->features |= DASD_FEATURE_USERAW;
+		else
+			devmap->features &= ~DASD_FEATURE_USERAW;
+	} else
+		rc = -EPERM;
+	spin_unlock(&dasd_devmap_lock);
+	return rc;
+}
+
+static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
+		   dasd_use_raw_store);
+
 static ssize_t
 dasd_discipline_show(struct device *dev, struct device_attribute *attr,
 		     char *buf)
@@ -1126,6 +1179,103 @@
 
 static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
 
+static ssize_t dasd_reservation_policy_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct dasd_devmap *devmap;
+	int rc = 0;
+
+	devmap = dasd_find_busid(dev_name(dev));
+	if (IS_ERR(devmap)) {
+		rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+	} else {
+		spin_lock(&dasd_devmap_lock);
+		if (devmap->features & DASD_FEATURE_FAILONSLCK)
+			rc = snprintf(buf, PAGE_SIZE, "fail\n");
+		else
+			rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+		spin_unlock(&dasd_devmap_lock);
+	}
+	return rc;
+}
+
+static ssize_t dasd_reservation_policy_store(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf, size_t count)
+{
+	struct dasd_devmap *devmap;
+	int rc;
+
+	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(devmap))
+		return PTR_ERR(devmap);
+	rc = 0;
+	spin_lock(&dasd_devmap_lock);
+	if (sysfs_streq("ignore", buf))
+		devmap->features &= ~DASD_FEATURE_FAILONSLCK;
+	else if (sysfs_streq("fail", buf))
+		devmap->features |= DASD_FEATURE_FAILONSLCK;
+	else
+		rc = -EINVAL;
+	if (devmap->device)
+		devmap->device->features = devmap->features;
+	spin_unlock(&dasd_devmap_lock);
+	if (rc)
+		return rc;
+	else
+		return count;
+}
+
+static DEVICE_ATTR(reservation_policy, 0644,
+		   dasd_reservation_policy_show, dasd_reservation_policy_store);
+
+static ssize_t dasd_reservation_state_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct dasd_device *device;
+	int rc = 0;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return snprintf(buf, PAGE_SIZE, "none\n");
+
+	if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
+		rc = snprintf(buf, PAGE_SIZE, "reserved\n");
+	else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
+		rc = snprintf(buf, PAGE_SIZE, "lost\n");
+	else
+		rc = snprintf(buf, PAGE_SIZE, "none\n");
+	dasd_put_device(device);
+	return rc;
+}
+
+static ssize_t dasd_reservation_state_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	int rc = 0;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	if (sysfs_streq("reset", buf))
+		clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+	else
+		rc = -EINVAL;
+	dasd_put_device(device);
+
+	if (rc)
+		return rc;
+	else
+		return count;
+}
+
+static DEVICE_ATTR(last_known_reservation_state, 0644,
+		   dasd_reservation_state_show, dasd_reservation_state_store);
+
 static struct attribute * dasd_attrs[] = {
 	&dev_attr_readonly.attr,
 	&dev_attr_discipline.attr,
@@ -1134,10 +1284,13 @@
 	&dev_attr_vendor.attr,
 	&dev_attr_uid.attr,
 	&dev_attr_use_diag.attr,
+	&dev_attr_raw_track_access.attr,
 	&dev_attr_eer_enabled.attr,
 	&dev_attr_erplog.attr,
 	&dev_attr_failfast.attr,
 	&dev_attr_expires.attr,
+	&dev_attr_reservation_policy.attr,
+	&dev_attr_last_known_reservation_state.attr,
 	NULL,
 };
 
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 266b34b..29143ed 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -10,6 +10,7 @@
 
 #define KMSG_COMPONENT "dasd"
 
+#include <linux/kernel_stat.h>
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -238,6 +239,7 @@
 	addr_t ip;
 	int rc;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
 	switch (ext_int_code >> 24) {
 	case DASD_DIAG_CODE_31BIT:
 		ip = (addr_t) param32;
@@ -617,6 +619,7 @@
 	.ebcname = "DIAG",
 	.max_blocks = DIAG_MAX_BLOCKS,
 	.check_device = dasd_diag_check_device,
+	.verify_path = dasd_generic_verify_path,
 	.fill_geometry = dasd_diag_fill_geometry,
 	.start_IO = dasd_start_diag,
 	.term_IO = dasd_diag_term_IO,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bf61274..318672d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -54,6 +54,15 @@
 #define ECKD_F7(i) (i->factor7)
 #define ECKD_F8(i) (i->factor8)
 
+/*
+ * raw track access always map to 64k in memory
+ * so it maps to 16 blocks of 4k per track
+ */
+#define DASD_RAW_BLOCK_PER_TRACK 16
+#define DASD_RAW_BLOCKSIZE 4096
+/* 64k are 128 x 512 byte sectors  */
+#define DASD_RAW_SECTORS_PER_TRACK 128
+
 MODULE_LICENSE("GPL");
 
 static struct dasd_discipline dasd_eckd_discipline;
@@ -90,6 +99,18 @@
 } *dasd_reserve_req;
 static DEFINE_MUTEX(dasd_reserve_mutex);
 
+/* definitions for the path verification worker */
+struct path_verification_work_data {
+	struct work_struct worker;
+	struct dasd_device *device;
+	struct dasd_ccw_req cqr;
+	struct ccw1 ccw;
+	__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
+	int isglobal;
+	__u8 tbvpm;
+};
+static struct path_verification_work_data *path_verification_worker;
+static DEFINE_MUTEX(dasd_path_verification_mutex);
 
 /* initial attempt at a probe function. this can be simplified once
  * the other detection code is gone */
@@ -373,6 +394,23 @@
 		data->length = reclen;
 		data->operation.operation = 0x03;
 		break;
+	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+		data->operation.orientation = 0x0;
+		data->operation.operation = 0x3F;
+		data->extended_operation = 0x11;
+		data->length = 0;
+		data->extended_parameter_length = 0x02;
+		if (data->count > 8) {
+			data->extended_parameter[0] = 0xFF;
+			data->extended_parameter[1] = 0xFF;
+			data->extended_parameter[1] <<= (16 - count);
+		} else {
+			data->extended_parameter[0] = 0xFF;
+			data->extended_parameter[0] <<= (8 - count);
+			data->extended_parameter[1] = 0x00;
+		}
+		data->sector = 0xFF;
+		break;
 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
 		data->auxiliary.length_valid = 0x1;
 		data->length = reclen;	/* not tlf, as one might think */
@@ -396,6 +434,12 @@
 	case DASD_ECKD_CCW_READ_COUNT:
 		data->operation.operation = 0x06;
 		break;
+	case DASD_ECKD_CCW_READ_TRACK:
+		data->operation.orientation = 0x1;
+		data->operation.operation = 0x0C;
+		data->extended_parameter_length = 0;
+		data->sector = 0xFF;
+		break;
 	case DASD_ECKD_CCW_READ_TRACK_DATA:
 		data->auxiliary.length_valid = 0x1;
 		data->length = tlf;
@@ -439,10 +483,16 @@
 
 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
 	ccw->flags = 0;
-	ccw->count = sizeof(*pfxdata);
-	ccw->cda = (__u32) __pa(pfxdata);
+	if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
+		ccw->count = sizeof(*pfxdata) + 2;
+		ccw->cda = (__u32) __pa(pfxdata);
+		memset(pfxdata, 0, sizeof(*pfxdata) + 2);
+	} else {
+		ccw->count = sizeof(*pfxdata);
+		ccw->cda = (__u32) __pa(pfxdata);
+		memset(pfxdata, 0, sizeof(*pfxdata));
+	}
 
-	memset(pfxdata, 0, sizeof(*pfxdata));
 	/* prefix data */
 	if (format > 1) {
 		DBF_DEV_EVENT(DBF_ERR, basedev,
@@ -476,6 +526,7 @@
 		dedata->mask.perm = 0x1;
 		dedata->attributes.operation = basepriv->attrib.operation;
 		break;
+	case DASD_ECKD_CCW_READ_TRACK:
 	case DASD_ECKD_CCW_READ_TRACK_DATA:
 		dedata->mask.perm = 0x1;
 		dedata->attributes.operation = basepriv->attrib.operation;
@@ -502,6 +553,11 @@
 		dedata->attributes.operation = DASD_BYPASS_CACHE;
 		rc = check_XRC_on_prefix(pfxdata, basedev);
 		break;
+	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+		dedata->mask.perm = 0x03;
+		dedata->attributes.operation = basepriv->attrib.operation;
+		dedata->blk_size = 0;
+		break;
 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
 		dedata->mask.perm = 0x02;
 		dedata->attributes.operation = basepriv->attrib.operation;
@@ -755,26 +811,27 @@
 	return -EINVAL;
 }
 
-static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
-						    void *rcd_buffer,
-						    struct ciw *ciw, __u8 lpm)
+static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
+				   struct dasd_ccw_req *cqr,
+				   __u8 *rcd_buffer,
+				   __u8 lpm)
 {
-	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
-
-	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
-				   device);
-
-	if (IS_ERR(cqr)) {
-		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
-			      "Could not allocate RCD request");
-		return cqr;
-	}
+	/*
+	 * buffer has to start with EBCDIC "V1.0" to show
+	 * support for virtual device SNEQ
+	 */
+	rcd_buffer[0] = 0xE5;
+	rcd_buffer[1] = 0xF1;
+	rcd_buffer[2] = 0x4B;
+	rcd_buffer[3] = 0xF0;
 
 	ccw = cqr->cpaddr;
-	ccw->cmd_code = ciw->cmd;
+	ccw->cmd_code = DASD_ECKD_CCW_RCD;
+	ccw->flags = 0;
 	ccw->cda = (__u32)(addr_t)rcd_buffer;
-	ccw->count = ciw->count;
+	ccw->count = DASD_ECKD_RCD_DATA_SIZE;
+	cqr->magic = DASD_ECKD_MAGIC;
 
 	cqr->startdev = device;
 	cqr->memdev = device;
@@ -784,7 +841,30 @@
 	cqr->retries = 256;
 	cqr->buildclk = get_clock();
 	cqr->status = DASD_CQR_FILLED;
-	return cqr;
+	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
+}
+
+static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
+					   struct dasd_ccw_req *cqr,
+					   __u8 *rcd_buffer,
+					   __u8 lpm)
+{
+	struct ciw *ciw;
+	int rc;
+	/*
+	 * sanity check: scan for RCD command in extended SenseID data
+	 * some devices do not support RCD
+	 */
+	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
+	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
+		return -EOPNOTSUPP;
+
+	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
+	cqr->retries = 5;
+	rc = dasd_sleep_on_immediatly(cqr);
+	return rc;
 }
 
 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
@@ -797,32 +877,29 @@
 	struct dasd_ccw_req *cqr;
 
 	/*
-	 * scan for RCD command in extended SenseID data
+	 * sanity check: scan for RCD command in extended SenseID data
+	 * some devices do not support RCD
 	 */
 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
-	if (!ciw || ciw->cmd == 0) {
+	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
 		ret = -EOPNOTSUPP;
 		goto out_error;
 	}
-	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
+	rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
 	if (!rcd_buf) {
 		ret = -ENOMEM;
 		goto out_error;
 	}
-
-	/*
-	 * buffer has to start with EBCDIC "V1.0" to show
-	 * support for virtual device SNEQ
-	 */
-	rcd_buf[0] = 0xE5;
-	rcd_buf[1] = 0xF1;
-	rcd_buf[2] = 0x4B;
-	rcd_buf[3] = 0xF0;
-	cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
+				   0, /* use rcd_buf as data ara */
+				   device);
 	if (IS_ERR(cqr)) {
-		ret =  PTR_ERR(cqr);
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "Could not allocate RCD request");
+		ret = -ENOMEM;
 		goto out_error;
 	}
+	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
 	ret = dasd_sleep_on(cqr);
 	/*
 	 * on success we update the user input parms
@@ -831,7 +908,7 @@
 	if (ret)
 		goto out_error;
 
-	*rcd_buffer_size = ciw->count;
+	*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
 	*rcd_buffer = rcd_buf;
 	return 0;
 out_error:
@@ -901,18 +978,18 @@
 	void *conf_data;
 	int conf_len, conf_data_saved;
 	int rc;
-	__u8 lpm;
+	__u8 lpm, opm;
 	struct dasd_eckd_private *private;
-	struct dasd_eckd_path *path_data;
+	struct dasd_path *path_data;
 
 	private = (struct dasd_eckd_private *) device->private;
-	path_data = (struct dasd_eckd_path *) &private->path_data;
-	path_data->opm = ccw_device_get_path_mask(device->cdev);
+	path_data = &device->path_data;
+	opm = ccw_device_get_path_mask(device->cdev);
 	lpm = 0x80;
 	conf_data_saved = 0;
 	/* get configuration data per operational path */
 	for (lpm = 0x80; lpm; lpm>>= 1) {
-		if (lpm & path_data->opm){
+		if (lpm & opm) {
 			rc = dasd_eckd_read_conf_lpm(device, &conf_data,
 						     &conf_len, lpm);
 			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
@@ -925,6 +1002,8 @@
 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
 						"No configuration data "
 						"retrieved");
+				/* no further analysis possible */
+				path_data->opm |= lpm;
 				continue;	/* no error */
 			}
 			/* save first valid configuration data */
@@ -948,6 +1027,7 @@
 				path_data->ppm |= lpm;
 				break;
 			}
+			path_data->opm |= lpm;
 			if (conf_data != private->conf_data)
 				kfree(conf_data);
 		}
@@ -955,6 +1035,140 @@
 	return 0;
 }
 
+static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
+{
+	struct dasd_eckd_private *private;
+	int mdc;
+	u32 fcx_max_data;
+
+	private = (struct dasd_eckd_private *) device->private;
+	if (private->fcx_max_data) {
+		mdc = ccw_device_get_mdc(device->cdev, lpm);
+		if ((mdc < 0)) {
+			dev_warn(&device->cdev->dev,
+				 "Detecting the maximum data size for zHPF "
+				 "requests failed (rc=%d) for a new path %x\n",
+				 mdc, lpm);
+			return mdc;
+		}
+		fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
+		if (fcx_max_data < private->fcx_max_data) {
+			dev_warn(&device->cdev->dev,
+				 "The maximum data size for zHPF requests %u "
+				 "on a new path %x is below the active maximum "
+				 "%u\n", fcx_max_data, lpm,
+				 private->fcx_max_data);
+			return -EACCES;
+		}
+	}
+	return 0;
+}
+
+static void do_path_verification_work(struct work_struct *work)
+{
+	struct path_verification_work_data *data;
+	struct dasd_device *device;
+	__u8 lpm, opm, npm, ppm, epm;
+	unsigned long flags;
+	int rc;
+
+	data = container_of(work, struct path_verification_work_data, worker);
+	device = data->device;
+
+	opm = 0;
+	npm = 0;
+	ppm = 0;
+	epm = 0;
+	for (lpm = 0x80; lpm; lpm >>= 1) {
+		if (lpm & data->tbvpm) {
+			memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+			memset(&data->cqr, 0, sizeof(data->cqr));
+			data->cqr.cpaddr = &data->ccw;
+			rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+							     data->rcd_buffer,
+							     lpm);
+			if (!rc) {
+				switch (dasd_eckd_path_access(data->rcd_buffer,
+						     DASD_ECKD_RCD_DATA_SIZE)) {
+				case 0x02:
+					npm |= lpm;
+					break;
+				case 0x03:
+					ppm |= lpm;
+					break;
+				}
+				opm |= lpm;
+			} else if (rc == -EOPNOTSUPP) {
+				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+				       "path verification: No configuration "
+				       "data retrieved");
+				opm |= lpm;
+			} else if (rc == -EAGAIN) {
+				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+					"path verification: device is stopped,"
+					" try again later");
+				epm |= lpm;
+			} else {
+				dev_warn(&device->cdev->dev,
+					 "Reading device feature codes failed "
+					 "(rc=%d) for new path %x\n", rc, lpm);
+				continue;
+			}
+			if (verify_fcx_max_data(device, lpm)) {
+				opm &= ~lpm;
+				npm &= ~lpm;
+				ppm &= ~lpm;
+			}
+		}
+	}
+	/*
+	 * There is a small chance that a path is lost again between
+	 * above path verification and the following modification of
+	 * the device opm mask. We could avoid that race here by using
+	 * yet another path mask, but we rather deal with this unlikely
+	 * situation in dasd_start_IO.
+	 */
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	if (!device->path_data.opm && opm) {
+		device->path_data.opm = opm;
+		dasd_generic_path_operational(device);
+	} else
+		device->path_data.opm |= opm;
+	device->path_data.npm |= npm;
+	device->path_data.ppm |= ppm;
+	device->path_data.tbvpm |= epm;
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
+	dasd_put_device(device);
+	if (data->isglobal)
+		mutex_unlock(&dasd_path_verification_mutex);
+	else
+		kfree(data);
+}
+
+static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
+{
+	struct path_verification_work_data *data;
+
+	data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
+	if (!data) {
+		if (mutex_trylock(&dasd_path_verification_mutex)) {
+			data = path_verification_worker;
+			data->isglobal = 1;
+		} else
+			return -ENOMEM;
+	} else {
+		memset(data, 0, sizeof(*data));
+		data->isglobal = 0;
+	}
+	INIT_WORK(&data->worker, do_path_verification_work);
+	dasd_get_device(device);
+	data->device = device;
+	data->tbvpm = lpm;
+	schedule_work(&data->worker);
+	return 0;
+}
+
 static int dasd_eckd_read_features(struct dasd_device *device)
 {
 	struct dasd_psf_prssd_data *prssdp;
@@ -1105,6 +1319,37 @@
 			"returned rc=%d", private->uid.ssid, rc);
 }
 
+static u32 get_fcx_max_data(struct dasd_device *device)
+{
+#if defined(CONFIG_64BIT)
+	int tpm, mdc;
+	int fcx_in_css, fcx_in_gneq, fcx_in_features;
+	struct dasd_eckd_private *private;
+
+	if (dasd_nofcx)
+		return 0;
+	/* is transport mode supported? */
+	private = (struct dasd_eckd_private *) device->private;
+	fcx_in_css = css_general_characteristics.fcx;
+	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+	fcx_in_features = private->features.feature[40] & 0x80;
+	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+	if (!tpm)
+		return 0;
+
+	mdc = ccw_device_get_mdc(device->cdev, 0);
+	if (mdc < 0) {
+		dev_warn(&device->cdev->dev, "Detecting the maximum supported"
+			 " data size for zHPF requests failed\n");
+		return 0;
+	} else
+		return mdc * FCX_MAX_DATA_FACTOR;
+#else
+	return 0;
+#endif
+}
+
 /*
  * Check device characteristics.
  * If the device is accessible using ECKD discipline, the device is enabled.
@@ -1223,6 +1468,8 @@
 	else
 		private->real_cyl = private->rdc_data.no_cyl;
 
+	private->fcx_max_data = get_fcx_max_data(device);
+
 	readonly = dasd_device_is_ro(device);
 	if (readonly)
 		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
@@ -1404,6 +1651,13 @@
 		dasd_sfree_request(init_cqr, device);
 	}
 
+	if (device->features & DASD_FEATURE_USERAW) {
+		block->bp_block = DASD_RAW_BLOCKSIZE;
+		blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
+		block->s2b_shift = 3;
+		goto raw;
+	}
+
 	if (status == INIT_CQR_UNFORMATTED) {
 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
 		return -EMEDIUMTYPE;
@@ -1441,6 +1695,7 @@
 			dev_warn(&device->cdev->dev,
 				 "Track 0 has no records following the VTOC\n");
 	}
+
 	if (count_area != NULL && count_area->kl == 0) {
 		/* we found notthing violating our disk layout */
 		if (dasd_check_blocksize(count_area->dl) == 0)
@@ -1456,6 +1711,8 @@
 		block->s2b_shift++;
 
 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
+
+raw:
 	block->blocks = (private->real_cyl *
 			  private->rdc_data.trk_per_cyl *
 			  blk_per_trk);
@@ -1716,6 +1973,7 @@
 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
 		dasd_eckd_reset_ccw_to_base_io(cqr);
 		cqr->startdev = cqr->block->base;
+		cqr->lpm = cqr->block->base->path_data.opm;
 	}
 };
 
@@ -1744,9 +2002,9 @@
 	return dasd_default_erp_postaction;
 }
 
-
-static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
-						   struct irb *irb)
+static void dasd_eckd_check_for_device_change(struct dasd_device *device,
+					      struct dasd_ccw_req *cqr,
+					      struct irb *irb)
 {
 	char mask;
 	char *sense = NULL;
@@ -1770,40 +2028,41 @@
 			/* schedule worker to reload device */
 			dasd_reload_device(device);
 		}
-
 		dasd_generic_handle_state_change(device);
 		return;
 	}
 
-	/* summary unit check */
 	sense = dasd_get_sense(irb);
-	if (sense && (sense[7] == 0x0D) &&
+	if (!sense)
+		return;
+
+	/* summary unit check */
+	if ((sense[7] == 0x0D) &&
 	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
 		dasd_alias_handle_summary_unit_check(device, irb);
 		return;
 	}
 
 	/* service information message SIM */
-	if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
+	if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
 		dasd_3990_erp_handle_sim(device, sense);
-		dasd_schedule_device_bh(device);
 		return;
 	}
 
-	if ((scsw_cc(&irb->scsw) == 1) && !sense &&
-	    (scsw_fctl(&irb->scsw) == SCSW_FCTL_START_FUNC) &&
-	    (scsw_actl(&irb->scsw) == SCSW_ACTL_START_PEND) &&
-	    (scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND)) {
-		/* fake irb do nothing, they are handled elsewhere */
-		dasd_schedule_device_bh(device);
-		return;
+	/* loss of device reservation is handled via base devices only
+	 * as alias devices may be used with several bases
+	 */
+	if (device->block && (sense[7] == 0x3F) &&
+	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
+	    test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
+		if (device->features & DASD_FEATURE_FAILONSLCK)
+			set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
+		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
+		dev_err(&device->cdev->dev,
+			"The device reservation was lost\n");
 	}
-
-	dasd_schedule_device_bh(device);
-	return;
-};
-
+}
 
 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
 					       struct dasd_device *startdev,
@@ -1984,7 +2243,7 @@
 	cqr->memdev = startdev;
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
-	cqr->lpm = private->path_data.ppm;
+	cqr->lpm = startdev->path_data.ppm;
 	cqr->retries = 256;
 	cqr->buildclk = get_clock();
 	cqr->status = DASD_CQR_FILLED;
@@ -2161,7 +2420,7 @@
 	cqr->memdev = startdev;
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
-	cqr->lpm = private->path_data.ppm;
+	cqr->lpm = startdev->path_data.ppm;
 	cqr->retries = 256;
 	cqr->buildclk = get_clock();
 	cqr->status = DASD_CQR_FILLED;
@@ -2326,6 +2585,12 @@
 	struct tidaw *last_tidaw = NULL;
 	int itcw_op;
 	size_t itcw_size;
+	u8 tidaw_flags;
+	unsigned int seg_len, part_len, len_to_track_end;
+	unsigned char new_track;
+	sector_t recid, trkid;
+	unsigned int offs;
+	unsigned int count, count_to_trk_end;
 
 	basedev = block->base;
 	private = (struct dasd_eckd_private *) basedev->private;
@@ -2341,12 +2606,16 @@
 	/* trackbased I/O needs address all memory via TIDAWs,
 	 * not just for 64 bit addresses. This allows us to map
 	 * each segment directly to one tidaw.
+	 * In the case of write requests, additional tidaws may
+	 * be needed when a segment crosses a track boundary.
 	 */
 	trkcount = last_trk - first_trk + 1;
 	ctidaw = 0;
 	rq_for_each_segment(bv, req, iter) {
 		++ctidaw;
 	}
+	if (rq_data_dir(req) == WRITE)
+		ctidaw += (last_trk - first_trk);
 
 	/* Allocate the ccw request. */
 	itcw_size = itcw_calc_size(0, ctidaw, 0);
@@ -2354,15 +2623,6 @@
 	if (IS_ERR(cqr))
 		return cqr;
 
-	cqr->cpmode = 1;
-	cqr->startdev = startdev;
-	cqr->memdev = startdev;
-	cqr->block = block;
-	cqr->expires = 100*HZ;
-	cqr->buildclk = get_clock();
-	cqr->status = DASD_CQR_FILLED;
-	cqr->retries = 10;
-
 	/* transfer length factor: how many bytes to read from the last track */
 	if (first_trk == last_trk)
 		tlf = last_offs - first_offs + 1;
@@ -2371,8 +2631,11 @@
 	tlf *= blksize;
 
 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
+	if (IS_ERR(itcw)) {
+		dasd_sfree_request(cqr, startdev);
+		return ERR_PTR(-EINVAL);
+	}
 	cqr->cpaddr = itcw_get_tcw(itcw);
-
 	if (prepare_itcw(itcw, first_trk, last_trk,
 			 cmd, basedev, startdev,
 			 first_offs + 1,
@@ -2385,31 +2648,69 @@
 		dasd_sfree_request(cqr, startdev);
 		return ERR_PTR(-EAGAIN);
 	}
-
 	/*
 	 * A tidaw can address 4k of memory, but must not cross page boundaries
 	 * We can let the block layer handle this by setting
 	 * blk_queue_segment_boundary to page boundaries and
 	 * blk_max_segment_size to page size when setting up the request queue.
+	 * For write requests, a TIDAW must not cross track boundaries, because
+	 * we have to set the CBC flag on the last tidaw for each track.
 	 */
-	rq_for_each_segment(bv, req, iter) {
-		dst = page_address(bv->bv_page) + bv->bv_offset;
-		last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
-		if (IS_ERR(last_tidaw))
-			return (struct dasd_ccw_req *)last_tidaw;
+	if (rq_data_dir(req) == WRITE) {
+		new_track = 1;
+		recid = first_rec;
+		rq_for_each_segment(bv, req, iter) {
+			dst = page_address(bv->bv_page) + bv->bv_offset;
+			seg_len = bv->bv_len;
+			while (seg_len) {
+				if (new_track) {
+					trkid = recid;
+					offs = sector_div(trkid, blk_per_trk);
+					count_to_trk_end = blk_per_trk - offs;
+					count = min((last_rec - recid + 1),
+						    (sector_t)count_to_trk_end);
+					len_to_track_end = count * blksize;
+					recid += count;
+					new_track = 0;
+				}
+				part_len = min(seg_len, len_to_track_end);
+				seg_len -= part_len;
+				len_to_track_end -= part_len;
+				/* We need to end the tidaw at track end */
+				if (!len_to_track_end) {
+					new_track = 1;
+					tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
+				} else
+					tidaw_flags = 0;
+				last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
+							    dst, part_len);
+				if (IS_ERR(last_tidaw))
+					return ERR_PTR(-EINVAL);
+				dst += part_len;
+			}
+		}
+	} else {
+		rq_for_each_segment(bv, req, iter) {
+			dst = page_address(bv->bv_page) + bv->bv_offset;
+			last_tidaw = itcw_add_tidaw(itcw, 0x00,
+						    dst, bv->bv_len);
+			if (IS_ERR(last_tidaw))
+				return ERR_PTR(-EINVAL);
+		}
 	}
-
-	last_tidaw->flags |= 0x80;
+	last_tidaw->flags |= TIDAW_FLAGS_LAST;
+	last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
 	itcw_finalize(itcw);
 
 	if (blk_noretry_request(req) ||
 	    block->base->features & DASD_FEATURE_FAILFAST)
 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->cpmode = 1;
 	cqr->startdev = startdev;
 	cqr->memdev = startdev;
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
-	cqr->lpm = private->path_data.ppm;
+	cqr->lpm = startdev->path_data.ppm;
 	cqr->retries = 256;
 	cqr->buildclk = get_clock();
 	cqr->status = DASD_CQR_FILLED;
@@ -2420,11 +2721,9 @@
 					       struct dasd_block *block,
 					       struct request *req)
 {
-	int tpm, cmdrtd, cmdwtd;
+	int cmdrtd, cmdwtd;
 	int use_prefix;
-#if defined(CONFIG_64BIT)
-	int fcx_in_css, fcx_in_gneq, fcx_in_features;
-#endif
+	int fcx_multitrack;
 	struct dasd_eckd_private *private;
 	struct dasd_device *basedev;
 	sector_t first_rec, last_rec;
@@ -2432,6 +2731,7 @@
 	unsigned int first_offs, last_offs;
 	unsigned int blk_per_trk, blksize;
 	int cdlspecial;
+	unsigned int data_size;
 	struct dasd_ccw_req *cqr;
 
 	basedev = block->base;
@@ -2450,15 +2750,11 @@
 	last_offs = sector_div(last_trk, blk_per_trk);
 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
 
-	/* is transport mode supported? */
-#if defined(CONFIG_64BIT)
-	fcx_in_css = css_general_characteristics.fcx;
-	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
-	fcx_in_features = private->features.feature[40] & 0x80;
-	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
-#else
-	tpm = 0;
-#endif
+	fcx_multitrack = private->features.feature[40] & 0x20;
+	data_size = blk_rq_bytes(req);
+	/* tpm write request add CBC data on each track boundary */
+	if (rq_data_dir(req) == WRITE)
+		data_size += (last_trk - first_trk) * 4;
 
 	/* is read track data and write track data in command mode supported? */
 	cmdrtd = private->features.feature[9] & 0x20;
@@ -2468,13 +2764,15 @@
 	cqr = NULL;
 	if (cdlspecial || dasd_page_cache) {
 		/* do nothing, just fall through to the cmd mode single case */
-	} else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
+	} else if ((data_size <= private->fcx_max_data)
+		   && (fcx_multitrack || (first_trk == last_trk))) {
 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
 						    first_rec, last_rec,
 						    first_trk, last_trk,
 						    first_offs, last_offs,
 						    blk_per_trk, blksize);
-		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
+		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+		    (PTR_ERR(cqr) != -ENOMEM))
 			cqr = NULL;
 	} else if (use_prefix &&
 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
@@ -2484,7 +2782,8 @@
 						   first_trk, last_trk,
 						   first_offs, last_offs,
 						   blk_per_trk, blksize);
-		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
+		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
+		    (PTR_ERR(cqr) != -ENOMEM))
 			cqr = NULL;
 	}
 	if (!cqr)
@@ -2496,6 +2795,135 @@
 	return cqr;
 }
 
+static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
+					       struct dasd_block *block,
+					       struct request *req)
+{
+	struct dasd_eckd_private *private;
+	unsigned long *idaws;
+	struct dasd_device *basedev;
+	struct dasd_ccw_req *cqr;
+	struct ccw1 *ccw;
+	struct req_iterator iter;
+	struct bio_vec *bv;
+	char *dst;
+	unsigned char cmd;
+	unsigned int trkcount;
+	unsigned int seg_len, len_to_track_end;
+	unsigned int first_offs;
+	unsigned int cidaw, cplength, datasize;
+	sector_t first_trk, last_trk;
+	unsigned int pfx_datasize;
+
+	/*
+	 * raw track access needs to be mutiple of 64k and on 64k boundary
+	 */
+	if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
+		cqr = ERR_PTR(-EINVAL);
+		goto out;
+	}
+	if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
+	     DASD_RAW_SECTORS_PER_TRACK) != 0) {
+		cqr = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
+	last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
+		DASD_RAW_SECTORS_PER_TRACK;
+	trkcount = last_trk - first_trk + 1;
+	first_offs = 0;
+	basedev = block->base;
+	private = (struct dasd_eckd_private *) basedev->private;
+
+	if (rq_data_dir(req) == READ)
+		cmd = DASD_ECKD_CCW_READ_TRACK;
+	else if (rq_data_dir(req) == WRITE)
+		cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
+	else {
+		cqr = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	/*
+	 * Raw track based I/O needs IDAWs for each page,
+	 * and not just for 64 bit addresses.
+	 */
+	cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
+
+	/* 1x prefix + one read/write ccw per track */
+	cplength = 1 + trkcount;
+
+	/*
+	 * struct PFX_eckd_data has up to 2 byte as extended parameter
+	 * this is needed for write full track and has to be mentioned
+	 * seperately
+	 * add 8 instead of 2 to keep 8 byte boundary
+	 */
+	pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
+
+	datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
+
+	/* Allocate the ccw request. */
+	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
+				   datasize, startdev);
+	if (IS_ERR(cqr))
+		goto out;
+	ccw = cqr->cpaddr;
+
+	if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
+		       basedev, startdev, 1 /* format */, first_offs + 1,
+		       trkcount, 0, 0) == -EAGAIN) {
+		/* Clock not in sync and XRC is enabled.
+		 * Try again later.
+		 */
+		dasd_sfree_request(cqr, startdev);
+		cqr = ERR_PTR(-EAGAIN);
+		goto out;
+	}
+
+	idaws = (unsigned long *)(cqr->data + pfx_datasize);
+
+	len_to_track_end = 0;
+
+	rq_for_each_segment(bv, req, iter) {
+		dst = page_address(bv->bv_page) + bv->bv_offset;
+		seg_len = bv->bv_len;
+		if (!len_to_track_end) {
+			ccw[-1].flags |= CCW_FLAG_CC;
+			ccw->cmd_code = cmd;
+			/* maximum 3390 track size */
+			ccw->count = 57326;
+			/* 64k map to one track */
+			len_to_track_end = 65536;
+			ccw->cda = (__u32)(addr_t)idaws;
+			ccw->flags |= CCW_FLAG_IDA;
+			ccw->flags |= CCW_FLAG_SLI;
+			ccw++;
+		}
+		len_to_track_end -= seg_len;
+		idaws = idal_create_words(idaws, dst, seg_len);
+	}
+
+	if (blk_noretry_request(req) ||
+	    block->base->features & DASD_FEATURE_FAILFAST)
+		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	cqr->startdev = startdev;
+	cqr->memdev = startdev;
+	cqr->block = block;
+	cqr->expires = startdev->default_expires * HZ;
+	cqr->lpm = startdev->path_data.ppm;
+	cqr->retries = 256;
+	cqr->buildclk = get_clock();
+	cqr->status = DASD_CQR_FILLED;
+
+	if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
+		cqr = NULL;
+out:
+	return cqr;
+}
+
+
 static int
 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
 {
@@ -2600,7 +3028,10 @@
 
 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
 	private->count++;
-	cqr = dasd_eckd_build_cp(startdev, block, req);
+	if ((base->features & DASD_FEATURE_USERAW))
+		cqr = dasd_raw_build_cp(startdev, block, req);
+	else
+		cqr = dasd_eckd_build_cp(startdev, block, req);
 	if (IS_ERR(cqr))
 		private->count--;
 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
@@ -2688,6 +3119,8 @@
 	cqr->status = DASD_CQR_FILLED;
 
 	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
 
 	if (useglobal)
 		mutex_unlock(&dasd_reserve_mutex);
@@ -2741,6 +3174,8 @@
 	cqr->status = DASD_CQR_FILLED;
 
 	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
 
 	if (useglobal)
 		mutex_unlock(&dasd_reserve_mutex);
@@ -2793,6 +3228,8 @@
 	cqr->status = DASD_CQR_FILLED;
 
 	rc = dasd_sleep_on_immediatly(cqr);
+	if (!rc)
+		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
 
 	if (useglobal)
 		mutex_unlock(&dasd_reserve_mutex);
@@ -2845,6 +3282,7 @@
 	cqr->memdev = device;
 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
 	cqr->retries = 5;
 	cqr->expires = 10 * HZ;
 	cqr->buildclk = get_clock();
@@ -3279,10 +3717,8 @@
 {
 	char *page;
 	int len, sl, sct, residual;
-
 	struct tsb *tsb;
-	u8 *sense;
-
+	u8 *sense, *rcq;
 
 	page = (char *) get_zeroed_page(GFP_ATOMIC);
 	if (page == NULL) {
@@ -3348,12 +3784,15 @@
 		case 2: /* ts_ddpc */
 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
-			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
-			       " tsb->tsa.ddpc.rcq:  ");
-			for (sl = 0; sl < 16; sl++) {
+			for (sl = 0; sl < 2; sl++) {
+				len += sprintf(page + len,
+					       KERN_ERR PRINTK_HEADER
+					       " tsb->tsa.ddpc.rcq %2d-%2d: ",
+					       (8 * sl), ((8 * sl) + 7));
+				rcq = tsb->tsa.ddpc.rcq;
 				for (sct = 0; sct < 8; sct++) {
 					len += sprintf(page + len, " %02x",
-						       tsb->tsa.ddpc.rcq[sl]);
+						       rcq[8 * sl + sct]);
 				}
 				len += sprintf(page + len, "\n");
 			}
@@ -3550,6 +3989,7 @@
 	.set_offline = dasd_generic_set_offline,
 	.set_online  = dasd_eckd_set_online,
 	.notify      = dasd_generic_notify,
+	.path_event  = dasd_generic_path_event,
 	.freeze      = dasd_generic_pm_freeze,
 	.thaw	     = dasd_generic_restore_device,
 	.restore     = dasd_generic_restore_device,
@@ -3573,10 +4013,11 @@
 	.owner = THIS_MODULE,
 	.name = "ECKD",
 	.ebcname = "ECKD",
-	.max_blocks = 240,
+	.max_blocks = 190,
 	.check_device = dasd_eckd_check_characteristics,
 	.uncheck_device = dasd_eckd_uncheck_device,
 	.do_analysis = dasd_eckd_do_analysis,
+	.verify_path = dasd_eckd_verify_path,
 	.ready_to_online = dasd_eckd_ready_to_online,
 	.online_to_ready = dasd_eckd_online_to_ready,
 	.fill_geometry = dasd_eckd_fill_geometry,
@@ -3586,7 +4027,7 @@
 	.format_device = dasd_eckd_format_device,
 	.erp_action = dasd_eckd_erp_action,
 	.erp_postaction = dasd_eckd_erp_postaction,
-	.handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
+	.check_for_device_change = dasd_eckd_check_for_device_change,
 	.build_cp = dasd_eckd_build_alias_cp,
 	.free_cp = dasd_eckd_free_alias_cp,
 	.dump_sense = dasd_eckd_dump_sense,
@@ -3609,11 +4050,19 @@
 				   GFP_KERNEL | GFP_DMA);
 	if (!dasd_reserve_req)
 		return -ENOMEM;
+	path_verification_worker = kmalloc(sizeof(*path_verification_worker),
+				   GFP_KERNEL | GFP_DMA);
+	if (!path_verification_worker) {
+		kfree(dasd_reserve_req);
+		return -ENOMEM;
+	}
 	ret = ccw_driver_register(&dasd_eckd_driver);
 	if (!ret)
 		wait_for_device_probe();
-	else
+	else {
+		kfree(path_verification_worker);
 		kfree(dasd_reserve_req);
+	}
 	return ret;
 }
 
@@ -3621,6 +4070,7 @@
 dasd_eckd_cleanup(void)
 {
 	ccw_driver_unregister(&dasd_eckd_driver);
+	kfree(path_verification_worker);
 	kfree(dasd_reserve_req);
 }
 
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 12097c2..4a688a8 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -37,14 +37,17 @@
 #define DASD_ECKD_CCW_WRITE_KD_MT	 0x8d
 #define DASD_ECKD_CCW_READ_KD_MT	 0x8e
 #define DASD_ECKD_CCW_RELEASE		 0x94
+#define DASD_ECKD_CCW_WRITE_FULL_TRACK	 0x95
 #define DASD_ECKD_CCW_READ_CKD_MT	 0x9e
 #define DASD_ECKD_CCW_WRITE_CKD_MT	 0x9d
 #define DASD_ECKD_CCW_WRITE_TRACK_DATA	 0xA5
 #define DASD_ECKD_CCW_READ_TRACK_DATA	 0xA6
 #define DASD_ECKD_CCW_RESERVE		 0xB4
+#define DASD_ECKD_CCW_READ_TRACK	 0xDE
 #define DASD_ECKD_CCW_PFX		 0xE7
 #define DASD_ECKD_CCW_PFX_READ		 0xEA
 #define DASD_ECKD_CCW_RSCK		 0xF9
+#define DASD_ECKD_CCW_RCD		 0xFA
 
 /*
  * Perform Subsystem Function / Sub-Orders
@@ -57,6 +60,11 @@
  */
 #define LV_COMPAT_CYL 0xFFFE
 
+
+#define FCX_MAX_DATA_FACTOR 65536
+#define DASD_ECKD_RCD_DATA_SIZE 256
+
+
 /*****************************************************************************
  * SECTION: Type Definitions
  ****************************************************************************/
@@ -331,12 +339,6 @@
 	__u8 reserved2[22];
 } __attribute__ ((packed));
 
-struct dasd_eckd_path {
-	__u8 opm;
-	__u8 ppm;
-	__u8 npm;
-};
-
 struct dasd_rssd_features {
 	char feature[256];
 } __attribute__((packed));
@@ -442,7 +444,6 @@
 	struct vd_sneq *vdsneq;
 	struct dasd_gneq *gneq;
 
-	struct dasd_eckd_path path_data;
 	struct eckd_count count_area[5];
 	int init_cqr_status;
 	int uses_cdl;
@@ -455,6 +456,8 @@
 	struct alias_pav_group *pavgroup;
 	struct alias_lcu *lcu;
 	int count;
+
+	u32 fcx_max_data;
 };
 
 
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 83b4615..77f778b 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -473,6 +473,7 @@
 	cqr->retries = 255;
 	cqr->expires = 10 * HZ;
 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
 
 	ccw = cqr->cpaddr;
 	ccw->cmd_code = DASD_ECKD_CCW_SNSS;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 7656384..0eafe2e 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,8 @@
 		DBF_DEV_EVENT(DBF_DEBUG, device,
                              "default ERP called (%i retries left)",
                              cqr->retries);
-		cqr->lpm    = LPM_ANYPATH;
+		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
+			cqr->lpm = device->path_data.opm;
 		cqr->status = DASD_CQR_FILLED;
         } else {
 		pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index bec5486e..be89b3a 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -73,6 +73,7 @@
 	.set_offline = dasd_generic_set_offline,
 	.set_online  = dasd_fba_set_online,
 	.notify      = dasd_generic_notify,
+	.path_event  = dasd_generic_path_event,
 	.freeze      = dasd_generic_pm_freeze,
 	.thaw	     = dasd_generic_restore_device,
 	.restore     = dasd_generic_restore_device,
@@ -164,6 +165,7 @@
 	}
 
 	device->default_expires = DASD_EXPIRES;
+	device->path_data.opm = LPM_ANYPATH;
 
 	readonly = dasd_device_is_ro(device);
 	if (readonly)
@@ -231,24 +233,16 @@
 	return NULL;
 }
 
-static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
-						   struct irb *irb)
+static void dasd_fba_check_for_device_change(struct dasd_device *device,
+					     struct dasd_ccw_req *cqr,
+					     struct irb *irb)
 {
 	char mask;
 
 	/* first of all check for state change pending interrupt */
 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
-	if ((irb->scsw.cmd.dstat & mask) == mask) {
+	if ((irb->scsw.cmd.dstat & mask) == mask)
 		dasd_generic_handle_state_change(device);
-		return;
-	}
-
-	/* check for unsolicited interrupts */
-	DBF_DEV_EVENT(DBF_WARNING, device, "%s",
-		    "unsolicited interrupt received");
-	device->discipline->dump_sense_dbf(device, irb, "unsolicited");
-	dasd_schedule_device_bh(device);
-	return;
 };
 
 static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
@@ -596,13 +590,14 @@
 	.max_blocks = 96,
 	.check_device = dasd_fba_check_characteristics,
 	.do_analysis = dasd_fba_do_analysis,
+	.verify_path = dasd_generic_verify_path,
 	.fill_geometry = dasd_fba_fill_geometry,
 	.start_IO = dasd_start_IO,
 	.term_IO = dasd_term_IO,
 	.handle_terminated_request = dasd_fba_handle_terminated_request,
 	.erp_action = dasd_fba_erp_action,
 	.erp_postaction = dasd_fba_erp_postaction,
-	.handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt,
+	.check_for_device_change = dasd_fba_check_for_device_change,
 	.build_cp = dasd_fba_build_cp,
 	.free_cp = dasd_fba_free_cp,
 	.dump_sense = dasd_fba_dump_sense,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 30a1ca3..5505bc0 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -103,7 +103,7 @@
 	struct block_device *bdev;
 
 	bdev = bdget_disk(block->gdp, 0);
-	if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
+	if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
 		return -ENODEV;
 	/*
 	 * See fs/partition/check.c:register_disk,rescan_partitions
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 500678d..df9f699 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -231,6 +231,11 @@
 /* per dasd_ccw_req flags */
 #define DASD_CQR_FLAGS_USE_ERP   0	/* use ERP for this request */
 #define DASD_CQR_FLAGS_FAILFAST  1	/* FAILFAST */
+#define DASD_CQR_VERIFY_PATH	 2	/* path verification request */
+#define DASD_CQR_ALLOW_SLOCK	 3	/* Try this request even when lock was
+					 * stolen. Should not be combined with
+					 * DASD_CQR_FLAGS_USE_ERP
+					 */
 
 /* Signature for error recovery functions. */
 typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -287,6 +292,14 @@
 	int (*do_analysis) (struct dasd_block *);
 
 	/*
+	 * This function is called, when new paths become available.
+	 * Disciplins may use this callback to do necessary setup work,
+	 * e.g. verify that new path is compatible with the current
+	 * configuration.
+	 */
+	int (*verify_path)(struct dasd_device *, __u8);
+
+	/*
 	 * Last things to do when a device is set online, and first things
 	 * when it is set offline.
 	 */
@@ -325,9 +338,9 @@
 	void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
 			    struct irb *);
 	void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *);
-
-	void (*handle_unsolicited_interrupt) (struct dasd_device *,
-					      struct irb *);
+	void (*check_for_device_change) (struct dasd_device *,
+					 struct dasd_ccw_req *,
+					 struct irb *);
 
         /* i/o control functions. */
 	int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
@@ -362,6 +375,13 @@
 #define DASD_EER_STATECHANGE 3
 #define DASD_EER_PPRCSUSPEND 4
 
+struct dasd_path {
+	__u8 opm;
+	__u8 tbvpm;
+	__u8 ppm;
+	__u8 npm;
+};
+
 struct dasd_device {
 	/* Block device stuff. */
 	struct dasd_block *block;
@@ -377,6 +397,7 @@
 	struct dasd_discipline *discipline;
 	struct dasd_discipline *base_discipline;
 	char *private;
+	struct dasd_path path_data;
 
 	/* Device state and target state. */
 	int state, target;
@@ -456,6 +477,9 @@
 					 * confuse this with the user specified
 					 * read-only feature.
 					 */
+#define DASD_FLAG_IS_RESERVED	7	/* The device is reserved */
+#define DASD_FLAG_LOCK_STOLEN	8	/* The device lock was stolen */
+
 
 void dasd_put_device_wake(struct dasd_device *);
 
@@ -620,10 +644,15 @@
 int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
 int dasd_generic_set_offline (struct ccw_device *cdev);
 int dasd_generic_notify(struct ccw_device *, int);
+int dasd_generic_last_path_gone(struct dasd_device *);
+int dasd_generic_path_operational(struct dasd_device *);
+
 void dasd_generic_handle_state_change(struct dasd_device *);
 int dasd_generic_pm_freeze(struct ccw_device *);
 int dasd_generic_restore_device(struct ccw_device *);
 enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
+void dasd_generic_path_event(struct ccw_device *, int *);
+int dasd_generic_verify_path(struct dasd_device *, __u8);
 
 int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
 char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 40834f1..dcee3c5 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -2,76 +2,85 @@
 	depends on S390
 
 config TN3270
-	tristate "Support for locally attached 3270 terminals"
+	def_tristate y
+	prompt "Support for locally attached 3270 terminals"
 	depends on CCW
 	help
 	  Include support for IBM 3270 terminals.
 
 config TN3270_TTY
-	tristate "Support for tty input/output on 3270 terminals"
+	def_tristate y
+	prompt "Support for tty input/output on 3270 terminals"
 	depends on TN3270
 	help
 	  Include support for using an IBM 3270 terminal as a Linux tty.
 
 config TN3270_FS
-	tristate "Support for fullscreen applications on 3270 terminals"
+	def_tristate m
+	prompt "Support for fullscreen applications on 3270 terminals"
 	depends on TN3270
 	help
 	  Include support for fullscreen applications on an IBM 3270 terminal.
 
 config TN3270_CONSOLE
-	bool "Support for console on 3270 terminal"
+	def_bool y
+	prompt "Support for console on 3270 terminal"
 	depends on TN3270=y && TN3270_TTY=y
 	help
 	  Include support for using an IBM 3270 terminal as a Linux system
 	  console.  Available only if 3270 support is compiled in statically.
 
 config TN3215
-	bool "Support for 3215 line mode terminal"
+	def_bool y
+	prompt "Support for 3215 line mode terminal"
 	depends on CCW
 	help
 	  Include support for IBM 3215 line-mode terminals.
 
 config TN3215_CONSOLE
-	bool "Support for console on 3215 line mode terminal"
+	def_bool y
+	prompt "Support for console on 3215 line mode terminal"
 	depends on TN3215
 	help
 	  Include support for using an IBM 3215 line-mode terminal as a
 	  Linux system console.
 
 config CCW_CONSOLE
-	bool
-	depends on TN3215_CONSOLE || TN3270_CONSOLE
-	default y
+	def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
 
 config SCLP_TTY
-	bool "Support for SCLP line mode terminal"
+	def_bool y
+	prompt "Support for SCLP line mode terminal"
 	depends on S390
 	help
 	  Include support for IBM SCLP line-mode terminals.
 
 config SCLP_CONSOLE
-	bool "Support for console on SCLP line mode terminal"
+	def_bool y
+	prompt "Support for console on SCLP line mode terminal"
 	depends on SCLP_TTY
 	help
 	  Include support for using an IBM HWC line-mode terminal as the Linux
 	  system console.
 
 config SCLP_VT220_TTY
-	bool "Support for SCLP VT220-compatible terminal"
+	def_bool y
+	prompt "Support for SCLP VT220-compatible terminal"
 	depends on S390
 	help
 	  Include support for an IBM SCLP VT220-compatible terminal.
 
 config SCLP_VT220_CONSOLE
-	bool "Support for console on SCLP VT220-compatible terminal"
+	def_bool y
+	prompt "Support for console on SCLP VT220-compatible terminal"
 	depends on SCLP_VT220_TTY
 	help
 	  Include support for using an IBM SCLP VT220-compatible terminal as a
 	  Linux system console.
 
 config SCLP_CPI
-	tristate "Control-Program Identification"
+	def_tristate m
+	prompt "Control-Program Identification"
 	depends on S390
 	help
 	  This option enables the hardware console interface for system
@@ -83,7 +92,8 @@
 	  need this feature and intend to run your kernel in LPAR.
 
 config SCLP_ASYNC
-	tristate "Support for Call Home via Asynchronous SCLP Records"
+	def_tristate m
+	prompt "Support for Call Home via Asynchronous SCLP Records"
 	depends on S390
 	help
 	  This option enables the call home function, which is able to inform
@@ -93,7 +103,8 @@
 	  need this feature and intend to run your kernel in LPAR.
 
 config S390_TAPE
-	tristate "S/390 tape device support"
+	def_tristate m
+	prompt "S/390 tape device support"
 	depends on CCW
 	help
 	  Select this option if you want to access channel-attached tape
@@ -109,7 +120,8 @@
 	depends on S390_TAPE
 
 config S390_TAPE_BLOCK
-	bool "Support for tape block devices"
+	def_bool y
+	prompt "Support for tape block devices"
 	depends on S390_TAPE && BLOCK
 	help
 	  Select this option if you want to access your channel-attached tape
@@ -123,7 +135,8 @@
 	depends on S390_TAPE
 
 config S390_TAPE_34XX
-	tristate "Support for 3480/3490 tape hardware"
+	def_tristate m
+	prompt "Support for 3480/3490 tape hardware"
 	depends on S390_TAPE
 	help
 	  Select this option if you want to access IBM 3480/3490 magnetic
@@ -131,7 +144,8 @@
 	  It is safe to say "Y" here.
 
 config S390_TAPE_3590
-	tristate "Support for 3590 tape hardware"
+	def_tristate m
+	prompt "Support for 3590 tape hardware"
 	depends on S390_TAPE
 	help
 	  Select this option if you want to access IBM 3590 magnetic
@@ -139,7 +153,8 @@
 	  It is safe to say "Y" here.
 
 config VMLOGRDR
-	tristate "Support for the z/VM recording system services (VM only)"
+	def_tristate m
+	prompt "Support for the z/VM recording system services (VM only)"
 	depends on IUCV
 	help
 	  Select this option if you want to be able to receive records collected
@@ -148,29 +163,31 @@
 	  This driver depends on the IUCV support driver.
 
 config VMCP
-	bool "Support for the z/VM CP interface"
+	def_bool y
+	prompt "Support for the z/VM CP interface"
 	depends on S390
 	help
 	  Select this option if you want to be able to interact with the control
 	  program on z/VM
 
 config MONREADER
-	tristate "API for reading z/VM monitor service records"
+	def_tristate m
+	prompt "API for reading z/VM monitor service records"
 	depends on IUCV
 	help
 	  Character device driver for reading z/VM monitor service records
 
 config MONWRITER
-	tristate "API for writing z/VM monitor service records"
+	def_tristate m
+	prompt "API for writing z/VM monitor service records"
 	depends on S390
-	default "m"
 	help
 	  Character device driver for writing z/VM monitor service records
 
 config S390_VMUR
-	tristate "z/VM unit record device driver"
+	def_tristate m
+	prompt "z/VM unit record device driver"
 	depends on S390
-	default "m"
 	help
 	  Character device driver for z/VM reader, puncher and printer.
 
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 59ec073..3fb4335 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,6 +9,7 @@
  *	      Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
  */
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kdev_t.h>
@@ -361,6 +362,7 @@
 	int cstat, dstat;
 	int count;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
 	raw = dev_get_drvdata(&cdev->dev);
 	req = (struct raw3215_req *) intparm;
 	cstat = irb->scsw.cmd.cstat;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 2a4c566..96ba2fd 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,6 +7,7 @@
  *     Copyright IBM Corp. 2003, 2009
  */
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -329,6 +330,7 @@
 	struct raw3270_request *rq;
 	int rc;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
 	rp = dev_get_drvdata(&cdev->dev);
 	if (!rp)
 		return;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 35cc468..b76c61f 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -7,6 +7,7 @@
  *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/spinlock.h>
@@ -18,16 +19,14 @@
 #include <linux/suspend.h>
 #include <linux/completion.h>
 #include <linux/platform_device.h>
-#include <asm/types.h>
 #include <asm/s390_ext.h>
+#include <asm/types.h>
+#include <asm/irq.h>
 
 #include "sclp.h"
 
 #define SCLP_HEADER		"sclp: "
 
-/* Structure for register_early_external_interrupt. */
-static ext_int_info_t ext_int_info_hwc;
-
 /* Lock to protect internal data consistency. */
 static DEFINE_SPINLOCK(sclp_lock);
 
@@ -402,6 +401,7 @@
 	u32 finished_sccb;
 	u32 evbuf_pending;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
 	spin_lock(&sclp_lock);
 	finished_sccb = param32 & 0xfffffff8;
 	evbuf_pending = param32 & 0x3;
@@ -824,6 +824,7 @@
 {
 	u32 finished_sccb;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
 	finished_sccb = param32 & 0xfffffff8;
 	/* Is this the interrupt we are waiting for? */
 	if (finished_sccb == 0)
@@ -866,8 +867,7 @@
 
 	spin_lock_irqsave(&sclp_lock, flags);
 	/* Prepare init mask command */
-	rc = register_early_external_interrupt(0x2401, sclp_check_handler,
-					       &ext_int_info_hwc);
+	rc = register_external_interrupt(0x2401, sclp_check_handler);
 	if (rc) {
 		spin_unlock_irqrestore(&sclp_lock, flags);
 		return rc;
@@ -900,8 +900,7 @@
 		} else
 			rc = -EBUSY;
 	}
-	unregister_early_external_interrupt(0x2401, sclp_check_handler,
-					    &ext_int_info_hwc);
+	unregister_external_interrupt(0x2401, sclp_check_handler);
 	spin_unlock_irqrestore(&sclp_lock, flags);
 	return rc;
 }
@@ -1064,8 +1063,7 @@
 	if (rc)
 		goto fail_init_state_uninitialized;
 	/* Register interrupt handler */
-	rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
-					       &ext_int_info_hwc);
+	rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
 	if (rc)
 		goto fail_unregister_reboot_notifier;
 	sclp_init_state = sclp_init_state_initialized;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index b497afe..16e232a 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -33,6 +33,7 @@
 	int cpu;
 	struct sys_device *sysdev;
 
+	s390_adjust_jiffies();
 	pr_warning("cpu capability changed.\n");
 	get_online_cpus();
 	for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index deff2c3..fbe361f 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -24,6 +24,8 @@
 #include "tape_std.h"
 #include "tape_3590.h"
 
+static struct workqueue_struct *tape_3590_wq;
+
 /*
  * Pointer to debug area.
  */
@@ -613,7 +615,7 @@
 	p->device = tape_get_device(device);
 	p->op = op;
 
-	schedule_work(&p->work);
+	queue_work(tape_3590_wq, &p->work);
 	return 0;
 }
 
@@ -1629,7 +1631,7 @@
 static void
 tape_3590_cleanup_device(struct tape_device *device)
 {
-	flush_scheduled_work();
+	flush_workqueue(tape_3590_wq);
 	tape_std_unassign(device);
 
 	kfree(device->discdata);
@@ -1733,11 +1735,17 @@
 #endif
 
 	DBF_EVENT(3, "3590 init\n");
+
+	tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
+	if (!tape_3590_wq)
+		return -ENOMEM;
+
 	/* Register driver for 3590 tapes. */
 	rc = ccw_driver_register(&tape_3590_driver);
-	if (rc)
+	if (rc) {
+		destroy_workqueue(tape_3590_wq);
 		DBF_EVENT(3, "3590 init failed\n");
-	else
+	} else
 		DBF_EVENT(3, "3590 registered\n");
 	return rc;
 }
@@ -1746,7 +1754,7 @@
 tape_3590_exit(void)
 {
 	ccw_driver_unregister(&tape_3590_driver);
-
+	destroy_workqueue(tape_3590_wq);
 	debug_unregister(TAPE_DBF_AREA);
 }
 
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f0fa9ca..55d2d0f 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -264,7 +264,7 @@
 void
 tapeblock_cleanup_device(struct tape_device *device)
 {
-	flush_scheduled_work();
+	flush_work_sync(&device->blk_data.requeue_task);
 	tape_put_device(device);
 
 	if (!device->blk_data.disk) {
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index 707b7f4..9e32780 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -14,7 +14,6 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/major.h>
-#include <linux/kobject.h>
 #include <linux/kobj_map.h>
 #include <linux/cdev.h>
 
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index b3a3e8e..7978a0a 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -14,6 +14,7 @@
 #define KMSG_COMPONENT "tape"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/init.h>	     // for kernel parameters
 #include <linux/kmod.h>	     // for requesting modules
@@ -1114,6 +1115,7 @@
 	struct tape_request *request;
 	int rc;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
 	device = dev_get_drvdata(&cdev->dev);
 	if (device == NULL) {
 		return;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index f7e4ae6..caef175 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -11,6 +11,7 @@
 #define KMSG_COMPONENT "vmur"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/cdev.h>
 #include <linux/slab.h>
 
@@ -302,6 +303,7 @@
 {
 	struct urdev *urd;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
 	TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
 	      intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
 	      irb->scsw.cmd.count);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 97b25d6..2864581 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -67,6 +67,27 @@
 }
 
 /*
+ * Remove references from ccw devices to ccw group device and from
+ * ccw group device to ccw devices.
+ */
+static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
+{
+	struct ccw_device *cdev;
+	int i;
+
+	for (i = 0; i < gdev->count; i++) {
+		cdev = gdev->cdev[i];
+		if (!cdev)
+			continue;
+		spin_lock_irq(cdev->ccwlock);
+		dev_set_drvdata(&cdev->dev, NULL);
+		spin_unlock_irq(cdev->ccwlock);
+		gdev->cdev[i] = NULL;
+		put_device(&cdev->dev);
+	}
+}
+
+/*
  * Provide an 'ungroup' attribute so the user can remove group devices no
  * longer needed or accidentially created. Saves memory :)
  */
@@ -78,6 +99,7 @@
 	if (device_is_registered(&gdev->dev)) {
 		__ccwgroup_remove_symlinks(gdev);
 		device_unregister(dev);
+		__ccwgroup_remove_cdev_refs(gdev);
 	}
 	mutex_unlock(&gdev->reg_mutex);
 }
@@ -116,21 +138,7 @@
 static void
 ccwgroup_release (struct device *dev)
 {
-	struct ccwgroup_device *gdev;
-	int i;
-
-	gdev = to_ccwgroupdev(dev);
-
-	for (i = 0; i < gdev->count; i++) {
-		if (gdev->cdev[i]) {
-			spin_lock_irq(gdev->cdev[i]->ccwlock);
-			if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
-				dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
-			spin_unlock_irq(gdev->cdev[i]->ccwlock);
-			put_device(&gdev->cdev[i]->dev);
-		}
-	}
-	kfree(gdev);
+	kfree(to_ccwgroupdev(dev));
 }
 
 static int
@@ -639,6 +647,7 @@
 		mutex_lock(&gdev->reg_mutex);
 		__ccwgroup_remove_symlinks(gdev);
 		device_unregister(dev);
+		__ccwgroup_remove_cdev_refs(gdev);
 		mutex_unlock(&gdev->reg_mutex);
 		put_device(dev);
 	}
@@ -660,25 +669,6 @@
 	return 0;
 }
 
-static struct ccwgroup_device *
-__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
-{
-	struct ccwgroup_device *gdev;
-
-	gdev = dev_get_drvdata(&cdev->dev);
-	if (gdev) {
-		if (get_device(&gdev->dev)) {
-			mutex_lock(&gdev->reg_mutex);
-			if (device_is_registered(&gdev->dev))
-				return gdev;
-			mutex_unlock(&gdev->reg_mutex);
-			put_device(&gdev->dev);
-		}
-		return NULL;
-	}
-	return NULL;
-}
-
 /**
  * ccwgroup_remove_ccwdev() - remove function for slave devices
  * @cdev: ccw device to be removed
@@ -694,13 +684,25 @@
 	/* Ignore offlining errors, device is gone anyway. */
 	ccw_device_set_offline(cdev);
 	/* If one of its devices is gone, the whole group is done for. */
-	gdev = __ccwgroup_get_gdev_by_cdev(cdev);
-	if (gdev) {
+	spin_lock_irq(cdev->ccwlock);
+	gdev = dev_get_drvdata(&cdev->dev);
+	if (!gdev) {
+		spin_unlock_irq(cdev->ccwlock);
+		return;
+	}
+	/* Get ccwgroup device reference for local processing. */
+	get_device(&gdev->dev);
+	spin_unlock_irq(cdev->ccwlock);
+	/* Unregister group device. */
+	mutex_lock(&gdev->reg_mutex);
+	if (device_is_registered(&gdev->dev)) {
 		__ccwgroup_remove_symlinks(gdev);
 		device_unregister(&gdev->dev);
-		mutex_unlock(&gdev->reg_mutex);
-		put_device(&gdev->dev);
+		__ccwgroup_remove_cdev_refs(gdev);
 	}
+	mutex_unlock(&gdev->reg_mutex);
+	/* Release ccwgroup device reference for local processing. */
+	put_device(&gdev->dev);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1aaddea..0689fcf 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -695,6 +695,25 @@
 	return ret;
 }
 
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc_fmt1 *desc)
+{
+	struct chsc_response_struct *chsc_resp;
+	struct chsc_scpd *scpd_area;
+	int ret;
+
+	spin_lock_irq(&chsc_page_lock);
+	scpd_area = chsc_page;
+	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
+	if (ret)
+		goto out;
+	chsc_resp = (void *)&scpd_area->response;
+	memcpy(desc, &chsc_resp->data, sizeof(*desc));
+out:
+	spin_unlock_irq(&chsc_page_lock);
+	return ret;
+}
+
 static void
 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
 			  struct cmg_chars *chars)
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 6693f5e..3f15b2a 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -35,6 +35,22 @@
 	u8 chpp;
 } __attribute__ ((packed));
 
+struct channel_path_desc_fmt1 {
+	u8 flags;
+	u8 lsn;
+	u8 desc;
+	u8 chpid;
+	u32:24;
+	u8 chpp;
+	u32 unused[3];
+	u16 mdc;
+	u16:13;
+	u8 r:1;
+	u8 s:1;
+	u8 f:1;
+	u32 zeros[2];
+} __attribute__ ((packed));
+
 struct channel_path;
 
 struct css_chsc_char {
@@ -92,6 +108,8 @@
 				     int c, int m, void *page);
 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
 					  struct channel_path_desc *desc);
+int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
+					  struct channel_path_desc_fmt1 *desc);
 void chsc_chp_online(struct chp_id chpid);
 void chsc_chp_offline(struct chp_id chpid);
 int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index f4e6cf3..430f875 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -619,7 +619,7 @@
 	s390_idle_check(regs, S390_lowcore.int_clock,
 			S390_lowcore.async_enter_timer);
 	irq_enter();
-	__get_cpu_var(s390_idle).nohz_delay = 1;
+	__this_cpu_write(s390_idle.nohz_delay, 1);
 	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
 		/* Serve timer interrupts first. */
 		clock_comparator_work();
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 825951b..24d8e97 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -618,6 +618,7 @@
 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 {
 	struct subchannel_id mchk_schid;
+	struct subchannel *sch;
 
 	if (overflow) {
 		css_schedule_eval_all();
@@ -637,6 +638,13 @@
 	if (crw1)
 		mchk_schid.ssid = (crw1->rsid >> 4) & 3;
 
+	if (crw0->erc == CRW_ERC_PMOD) {
+		sch = get_subchannel_by_schid(mchk_schid);
+		if (sch) {
+			css_update_ssd_info(sch);
+			put_device(&sch->dev);
+		}
+	}
 	/*
 	 * Since we are always presented with IPI in the CRW, we have to
 	 * use stsch() to find out if the subchannel in question has come
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e8391b8..b7eaff9 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1835,6 +1835,7 @@
 	 * available again. Kick re-detection.
 	 */
 	cdev->private->flags.resuming = 1;
+	cdev->private->path_new_mask = LPM_ANYPATH;
 	css_schedule_eval(sch->schid);
 	spin_unlock_irq(sch->lock);
 	css_complete_work();
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 6da8454..651976b 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -687,6 +687,46 @@
 EXPORT_SYMBOL(ccw_device_tm_start_timeout);
 
 /**
+ * ccw_device_get_mdc - accumulate max data count
+ * @cdev: ccw device for which the max data count is accumulated
+ * @mask: mask of paths to use
+ *
+ * Return the number of 64K-bytes blocks all paths at least support
+ * for a transport command. Return values <= 0 indicate failures.
+ */
+int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
+{
+	struct subchannel *sch = to_subchannel(cdev->dev.parent);
+	struct channel_path_desc_fmt1 desc;
+	struct chp_id chpid;
+	int mdc = 0, ret, i;
+
+	/* Adjust requested path mask to excluded varied off paths. */
+	if (mask)
+		mask &= sch->lpm;
+	else
+		mask = sch->lpm;
+
+	chp_id_init(&chpid);
+	for (i = 0; i < 8; i++) {
+		if (!(mask & (0x80 >> i)))
+			continue;
+		chpid.id = sch->schib.pmcw.chpid[i];
+		ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
+		if (ret)
+			return ret;
+		if (!desc.f)
+			return 0;
+		if (!desc.r)
+			mdc = 1;
+		mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
+	}
+
+	return mdc;
+}
+EXPORT_SYMBOL(ccw_device_get_mdc);
+
+/**
  * ccw_device_tm_intrg - perform interrogate function
  * @cdev: ccw device on which to perform the interrogate function
  *
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index a0ae295..358ee16 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -93,6 +93,7 @@
 size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
 {
 	size_t len;
+	int cross_count;
 
 	/* Main data. */
 	len = sizeof(struct itcw);
@@ -105,12 +106,27 @@
 		       /* TSB */ sizeof(struct tsb) +
 		       /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
 	}
+
 	/* Maximum required alignment padding. */
 	len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
-	/* Maximum padding for structures that may not cross 4k boundary. */
-	if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
-		len += max(max_tidaws, intrg_max_tidaws) *
-		       sizeof(struct tidaw) - 1;
+
+	/* TIDAW lists may not cross a 4k boundary. To cross a
+	 * boundary we need to add a TTIC TIDAW. We need to reserve
+	 * one additional TIDAW for a TTIC that we may need to add due
+	 * to the placement of the data chunk in memory, and a further
+	 * TIDAW for each page boundary that the TIDAW list may cross
+	 * due to it's own size.
+	 */
+	if (max_tidaws) {
+		cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+		len += cross_count * sizeof(struct tidaw);
+	}
+	if (intrg_max_tidaws) {
+		cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+		len += cross_count * sizeof(struct tidaw);
+	}
 	return len;
 }
 EXPORT_SYMBOL(itcw_calc_size);
@@ -165,6 +181,7 @@
 	void *chunk;
 	addr_t start;
 	addr_t end;
+	int cross_count;
 
 	/* Check for 2G limit. */
 	start = (addr_t) buffer;
@@ -177,8 +194,17 @@
 	if (IS_ERR(chunk))
 		return chunk;
 	itcw = chunk;
-	itcw->max_tidaws = max_tidaws;
-	itcw->intrg_max_tidaws = intrg_max_tidaws;
+	/* allow for TTIC tidaws that may be needed to cross a page boundary */
+	cross_count = 0;
+	if (max_tidaws)
+		cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+	itcw->max_tidaws = max_tidaws + cross_count;
+	cross_count = 0;
+	if (intrg_max_tidaws)
+		cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
+				   >> PAGE_SHIFT);
+	itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
 	/* Main TCW. */
 	chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
 	if (IS_ERR(chunk))
@@ -198,7 +224,7 @@
 	/* Data TIDAL. */
 	if (max_tidaws > 0) {
 		chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
-				  max_tidaws, 16, 1);
+				  itcw->max_tidaws, 16, 0);
 		if (IS_ERR(chunk))
 			return chunk;
 		tcw_set_data(itcw->tcw, chunk, 1);
@@ -206,7 +232,7 @@
 	/* Interrogate data TIDAL. */
 	if (intrg && (intrg_max_tidaws > 0)) {
 		chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
-				  intrg_max_tidaws, 16, 1);
+				  itcw->intrg_max_tidaws, 16, 0);
 		if (IS_ERR(chunk))
 			return chunk;
 		tcw_set_data(itcw->intrg_tcw, chunk, 1);
@@ -283,13 +309,29 @@
  * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
  * available space.
  *
- * Note: the tidaw-list is assumed to be contiguous with no ttics. The
- * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
+ * Note: TTIC tidaws are automatically added when needed, so explicitly calling
+ * this interface with the TTIC flag is not supported. The last-tidaw flag
+ * for the last tidaw in the list will be set by itcw_finalize.
  */
 struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
 {
+	struct tidaw *following;
+
 	if (itcw->num_tidaws >= itcw->max_tidaws)
 		return ERR_PTR(-ENOSPC);
+	/*
+	 * Is the tidaw, which follows the one we are about to fill, on the next
+	 * page? Then we have to insert a TTIC tidaw first, that points to the
+	 * tidaw on the new page.
+	 */
+	following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+		+ itcw->num_tidaws + 1;
+	if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
+		tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
+			      TIDAW_FLAGS_TTIC, following, 0);
+		if (itcw->num_tidaws >= itcw->max_tidaws)
+			return ERR_PTR(-ENOSPC);
+	}
 	return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
 }
 EXPORT_SYMBOL(itcw_add_tidaw);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 0f4ef87..7bc643f 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -91,6 +91,12 @@
 #define AC1_SC_QEBSM_AVAILABLE		0x02	/* available for subchannel */
 #define AC1_SC_QEBSM_ENABLED		0x01	/* enabled for subchannel */
 
+/* SIGA flags */
+#define QDIO_SIGA_WRITE		0x00
+#define QDIO_SIGA_READ		0x01
+#define QDIO_SIGA_SYNC		0x02
+#define QDIO_SIGA_QEBSM_FLAG	0x80
+
 #ifdef CONFIG_64BIT
 static inline int do_sqbs(u64 token, unsigned char state, int queue,
 			  int *start, int *count)
@@ -142,10 +148,9 @@
 	u8 input:1;
 	u8 output:1;
 	u8 sync:1;
-	u8 no_sync_ti:1;
-	u8 no_sync_out_ti:1;
-	u8 no_sync_out_pci:1;
-	u8:2;
+	u8 sync_after_ai:1;
+	u8 sync_out_after_pci:1;
+	u8:3;
 } __attribute__ ((packed));
 
 struct chsc_ssqd_area {
@@ -202,6 +207,7 @@
 	unsigned int inbound_queue_full;
 	unsigned int outbound_call;
 	unsigned int outbound_handler;
+	unsigned int outbound_queue_full;
 	unsigned int fast_requeue;
 	unsigned int target_full;
 	unsigned int eqbs;
@@ -245,10 +251,10 @@
 struct qdio_output_q {
 	/* PCIs are enabled for the queue */
 	int pci_out_enabled;
-	/* IQDIO: output multiple buffers (enhanced SIGA) */
-	int use_enh_siga;
 	/* timer to check for more outbound work */
 	struct timer_list timer;
+	/* used SBALs before tasklet schedule */
+	int scan_threshold;
 };
 
 /*
@@ -383,12 +389,13 @@
 	(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
 #define is_qebsm(q)			(q->irq_ptr->sch_token != 0)
 
-#define need_siga_sync_thinint(q)	(!q->irq_ptr->siga_flag.no_sync_ti)
-#define need_siga_sync_out_thinint(q)	(!q->irq_ptr->siga_flag.no_sync_out_ti)
 #define need_siga_in(q)			(q->irq_ptr->siga_flag.input)
 #define need_siga_out(q)		(q->irq_ptr->siga_flag.output)
-#define need_siga_sync(q)		(q->irq_ptr->siga_flag.sync)
-#define siga_syncs_out_pci(q)		(q->irq_ptr->siga_flag.no_sync_out_pci)
+#define need_siga_sync(q)		(unlikely(q->irq_ptr->siga_flag.sync))
+#define need_siga_sync_after_ai(q)	\
+	(unlikely(q->irq_ptr->siga_flag.sync_after_ai))
+#define need_siga_sync_out_after_pci(q)	\
+	(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
 
 #define for_each_input_queue(irq_ptr, q, i)	\
 	for (i = 0, q = irq_ptr->input_qs[0];	\
@@ -423,9 +430,9 @@
 
 extern struct indicator_t *q_indicators;
 
-static inline int shared_ind(struct qdio_irq *irq_ptr)
+static inline int shared_ind(u32 *dsci)
 {
-	return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
+	return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
 }
 
 /* prototypes for thin interrupt */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 28868e7..f8b03a6 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -151,6 +151,7 @@
 	"Inbound queue full",
 	"Outbound calls",
 	"Outbound handler",
+	"Outbound queue full",
 	"Outbound fast_requeue",
 	"Outbound target_full",
 	"QEBSM eqbs",
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 5fcfa7f..e9fff2b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
 #include <linux/timer.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
+#include <linux/kernel_stat.h>
 #include <asm/atomic.h>
 #include <asm/debug.h>
 #include <asm/qdio.h>
@@ -29,11 +30,12 @@
 MODULE_DESCRIPTION("QDIO base support");
 MODULE_LICENSE("GPL");
 
-static inline int do_siga_sync(struct subchannel_id schid,
-			       unsigned int out_mask, unsigned int in_mask)
+static inline int do_siga_sync(unsigned long schid,
+			       unsigned int out_mask, unsigned int in_mask,
+			       unsigned int fc)
 {
-	register unsigned long __fc asm ("0") = 2;
-	register struct subchannel_id __schid asm ("1") = schid;
+	register unsigned long __fc asm ("0") = fc;
+	register unsigned long __schid asm ("1") = schid;
 	register unsigned long out asm ("2") = out_mask;
 	register unsigned long in asm ("3") = in_mask;
 	int cc;
@@ -47,10 +49,11 @@
 	return cc;
 }
 
-static inline int do_siga_input(struct subchannel_id schid, unsigned int mask)
+static inline int do_siga_input(unsigned long schid, unsigned int mask,
+				unsigned int fc)
 {
-	register unsigned long __fc asm ("0") = 1;
-	register struct subchannel_id __schid asm ("1") = schid;
+	register unsigned long __fc asm ("0") = fc;
+	register unsigned long __schid asm ("1") = schid;
 	register unsigned long __mask asm ("2") = mask;
 	int cc;
 
@@ -279,16 +282,20 @@
 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 			  unsigned int input)
 {
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_SYNC;
 	int cc;
 
-	if (!need_siga_sync(q))
-		return 0;
-
 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 	qperf_inc(q, siga_sync);
 
-	cc = do_siga_sync(q->irq_ptr->schid, output, input);
-	if (cc)
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+
+	cc = do_siga_sync(schid, output, input, fc);
+	if (unlikely(cc))
 		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 	return cc;
 }
@@ -301,38 +308,22 @@
 		return qdio_siga_sync(q, q->mask, 0);
 }
 
-static inline int qdio_siga_sync_out(struct qdio_q *q)
-{
-	return qdio_siga_sync(q, ~0U, 0);
-}
-
-static inline int qdio_siga_sync_all(struct qdio_q *q)
-{
-	return qdio_siga_sync(q, ~0U, ~0U);
-}
-
 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
 {
-	unsigned long schid;
-	unsigned int fc = 0;
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_WRITE;
 	u64 start_time = 0;
 	int cc;
 
-	if (q->u.out.use_enh_siga)
-		fc = 3;
-
 	if (is_qebsm(q)) {
 		schid = q->irq_ptr->sch_token;
-		fc |= 0x80;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
 	}
-	else
-		schid = *((u32 *)&q->irq_ptr->schid);
-
 again:
 	cc = do_siga_output(schid, q->mask, busy_bit, fc);
 
 	/* hipersocket busy condition */
-	if (*busy_bit) {
+	if (unlikely(*busy_bit)) {
 		WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
 
 		if (!start_time) {
@@ -347,32 +338,41 @@
 
 static inline int qdio_siga_input(struct qdio_q *q)
 {
+	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+	unsigned int fc = QDIO_SIGA_READ;
 	int cc;
 
 	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 	qperf_inc(q, siga_read);
 
-	cc = do_siga_input(q->irq_ptr->schid, q->mask);
-	if (cc)
+	if (is_qebsm(q)) {
+		schid = q->irq_ptr->sch_token;
+		fc |= QDIO_SIGA_QEBSM_FLAG;
+	}
+
+	cc = do_siga_input(schid, q->mask, fc);
+	if (unlikely(cc))
 		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 	return cc;
 }
 
-static inline void qdio_sync_after_thinint(struct qdio_q *q)
+#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
+#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
+
+static inline void qdio_sync_queues(struct qdio_q *q)
 {
-	if (pci_out_supported(q)) {
-		if (need_siga_sync_thinint(q))
-			qdio_siga_sync_all(q);
-		else if (need_siga_sync_out_thinint(q))
-			qdio_siga_sync_out(q);
-	} else
+	/* PCI capable outbound queues will also be scanned so sync them too */
+	if (pci_out_supported(q))
+		qdio_siga_sync_all(q);
+	else
 		qdio_siga_sync_q(q);
 }
 
 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 			unsigned char *state)
 {
-	qdio_siga_sync_q(q);
+	if (need_siga_sync(q))
+		qdio_siga_sync_q(q);
 	return get_buf_states(q, bufnr, state, 1, 0);
 }
 
@@ -549,7 +549,8 @@
 	if (!atomic_read(&q->nr_buf_used))
 		return 1;
 
-	qdio_siga_sync_q(q);
+	if (need_siga_sync(q))
+		qdio_siga_sync_q(q);
 	get_buf_state(q, q->first_to_check, &state, 0);
 
 	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -644,9 +645,12 @@
 	int count, stop;
 	unsigned char state;
 
-	if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) ||
-	    (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q)))
-		qdio_siga_sync_q(q);
+	if (need_siga_sync(q))
+		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
+		    !pci_out_supported(q)) ||
+		    (queue_type(q) == QDIO_IQDIO_QFMT &&
+		    multicast_outbound(q)))
+			qdio_siga_sync_q(q);
 
 	/*
 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -818,7 +822,8 @@
 static void __tiqdio_inbound_processing(struct qdio_q *q)
 {
 	qperf_inc(q, tasklet_inbound);
-	qdio_sync_after_thinint(q);
+	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
+		qdio_sync_queues(q);
 
 	/*
 	 * The interrupt could be caused by a PCI request. Check the
@@ -898,16 +903,14 @@
 			tasklet_schedule(&q->tasklet);
 	}
 
-	if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
+	if (!pci_out_supported(q))
 		return;
 
 	for_each_output_queue(irq_ptr, q, i) {
 		if (qdio_outbound_q_done(q))
 			continue;
-
-		if (!siga_syncs_out_pci(q))
+		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
 			qdio_siga_sync_q(q);
-
 		tasklet_schedule(&q->tasklet);
 	}
 }
@@ -970,6 +973,7 @@
 		return;
 	}
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
 	if (irq_ptr->perf_stat_enabled)
 		irq_ptr->perf_stat.qdio_int++;
 
@@ -1273,7 +1277,6 @@
 	}
 
 	qdio_setup_ssqd_info(irq_ptr);
-	DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
 	DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
 
 	/* qebsm is now setup if available, initialize buffer states */
@@ -1445,52 +1448,38 @@
 	used = atomic_add_return(count, &q->nr_buf_used);
 	BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
 
+	if (used == QDIO_MAX_BUFFERS_PER_Q)
+		qperf_inc(q, outbound_queue_full);
+
 	if (callflags & QDIO_FLAG_PCI_OUT) {
 		q->u.out.pci_out_enabled = 1;
 		qperf_inc(q, pci_request_int);
-	}
-	else
+	} else
 		q->u.out.pci_out_enabled = 0;
 
 	if (queue_type(q) == QDIO_IQDIO_QFMT) {
-		if (multicast_outbound(q))
+		/* One SIGA-W per buffer required for unicast HiperSockets. */
+		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
+
+		rc = qdio_kick_outbound_q(q);
+	} else if (need_siga_sync(q)) {
+		rc = qdio_siga_sync_q(q);
+	} else {
+		/* try to fast requeue buffers */
+		get_buf_state(q, prev_buf(bufnr), &state, 0);
+		if (state != SLSB_CU_OUTPUT_PRIMED)
 			rc = qdio_kick_outbound_q(q);
 		else
-			if ((q->irq_ptr->ssqd_desc.mmwc > 1) &&
-			    (count > 1) &&
-			    (count <= q->irq_ptr->ssqd_desc.mmwc)) {
-				/* exploit enhanced SIGA */
-				q->u.out.use_enh_siga = 1;
-				rc = qdio_kick_outbound_q(q);
-			} else {
-				/*
-				* One siga-w per buffer required for unicast
-				* HiperSockets.
-				*/
-				q->u.out.use_enh_siga = 0;
-				while (count--) {
-					rc = qdio_kick_outbound_q(q);
-					if (rc)
-						goto out;
-				}
-			}
-		goto out;
+			qperf_inc(q, fast_requeue);
 	}
 
-	if (need_siga_sync(q)) {
-		qdio_siga_sync_q(q);
-		goto out;
-	}
-
-	/* try to fast requeue buffers */
-	get_buf_state(q, prev_buf(bufnr), &state, 0);
-	if (state != SLSB_CU_OUTPUT_PRIMED)
-		rc = qdio_kick_outbound_q(q);
+	/* in case of SIGA errors we must process the error immediately */
+	if (used >= q->u.out.scan_threshold || rc)
+		tasklet_schedule(&q->tasklet);
 	else
-		qperf_inc(q, fast_requeue);
-
-out:
-	tasklet_schedule(&q->tasklet);
+		/* free the SBALs in case of no further traffic */
+		if (!timer_pending(&q->u.out.timer))
+			mod_timer(&q->u.out.timer, jiffies + HZ);
 	return rc;
 }
 
@@ -1550,7 +1539,7 @@
 
 	WARN_ON(queue_irqs_enabled(q));
 
-	if (!shared_ind(q->irq_ptr))
+	if (!shared_ind(q->irq_ptr->dsci))
 		xchg(q->irq_ptr->dsci, 0);
 
 	qdio_stop_polling(q);
@@ -1560,7 +1549,7 @@
 	 * We need to check again to not lose initiative after
 	 * resetting the ACK state.
 	 */
-	if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci)
+	if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
 		goto rescan;
 	if (!qdio_inbound_q_done(q))
 		goto rescan;
@@ -1600,12 +1589,14 @@
 	q = irq_ptr->input_qs[nr];
 	WARN_ON(queue_irqs_enabled(q));
 
-	qdio_sync_after_thinint(q);
-
 	/*
-	 * The interrupt could be caused by a PCI request. Check the
-	 * PCI capable outbound queues.
+	 * Cannot rely on automatic sync after interrupt since queues may
+	 * also be examined without interrupt.
 	 */
+	if (need_siga_sync(q))
+		qdio_sync_queues(q);
+
+	/* check the PCI capable outbound queues. */
 	qdio_check_outbound_after_thinint(q);
 
 	if (!qdio_inbound_q_moved(q))
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index a13cf7e..89107d0 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -178,6 +178,7 @@
 		setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
 
 		q->is_input_q = 0;
+		q->u.out.scan_threshold = qdio_init->scan_threshold;
 		setup_storage_lists(q, irq_ptr, output_sbal_array, i);
 		output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
 
@@ -196,14 +197,10 @@
 		irq_ptr->siga_flag.output = 1;
 	if (qdioac & AC1_SIGA_SYNC_NEEDED)
 		irq_ptr->siga_flag.sync = 1;
-	if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
-		irq_ptr->siga_flag.no_sync_ti = 1;
-	if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
-		irq_ptr->siga_flag.no_sync_out_pci = 1;
-
-	if (irq_ptr->siga_flag.no_sync_out_pci &&
-	    irq_ptr->siga_flag.no_sync_ti)
-		irq_ptr->siga_flag.no_sync_out_ti = 1;
+	if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
+		irq_ptr->siga_flag.sync_after_ai = 1;
+	if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
+		irq_ptr->siga_flag.sync_out_after_pci = 1;
 }
 
 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
@@ -451,7 +448,7 @@
 	char s[80];
 
 	snprintf(s, 80, "qdio: %s %s on SC %x using "
-		 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n",
+		 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
 		 dev_name(&cdev->dev),
 		 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
 			((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -463,9 +460,8 @@
 		 (irq_ptr->siga_flag.input) ? "R" : " ",
 		 (irq_ptr->siga_flag.output) ? "W" : " ",
 		 (irq_ptr->siga_flag.sync) ? "S" : " ",
-		 (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ",
-		 (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ",
-		 (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
+		 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
+		 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
 	printk(KERN_INFO "%s", s);
 }
 
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 5d9c666..5c4e741 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -8,6 +8,7 @@
  */
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/kernel_stat.h>
 #include <asm/atomic.h>
 #include <asm/debug.h>
 #include <asm/qdio.h>
@@ -35,22 +36,8 @@
 
 struct indicator_t *q_indicators;
 
-static int css_qdio_omit_svs;
-
 static u64 last_ai_time;
 
-static inline unsigned long do_clear_global_summary(void)
-{
-	register unsigned long __fn asm("1") = 3;
-	register unsigned long __tmp asm("2");
-	register unsigned long __time asm("3");
-
-	asm volatile(
-		"	.insn	rre,0xb2650000,2,0"
-		: "+d" (__fn), "=d" (__tmp), "=d" (__time));
-	return __time;
-}
-
 /* returns addr for the device state change indicator */
 static u32 *get_indicator(void)
 {
@@ -83,10 +70,6 @@
 	struct qdio_q *q;
 	int i;
 
-	/* No TDD facility? If we must use SIGA-s we can also omit SVS. */
-	if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
-		css_qdio_omit_svs = 1;
-
 	mutex_lock(&tiq_list_lock);
 	for_each_input_queue(irq_ptr, q, i)
 		list_add_rcu(&q->entry, &tiq_list);
@@ -112,9 +95,9 @@
 	}
 }
 
-static inline int shared_ind_used(void)
+static inline u32 shared_ind_set(void)
 {
-	return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count);
+	return q_indicators[TIQDIO_SHARED_IND].ind;
 }
 
 /**
@@ -124,20 +107,11 @@
  */
 static void tiqdio_thinint_handler(void *alsi, void *data)
 {
+	u32 si_used = shared_ind_set();
 	struct qdio_q *q;
 
 	last_ai_time = S390_lowcore.int_clock;
-
-	/*
-	 * SVS only when needed: issue SVS to benefit from iqdio interrupt
-	 * avoidance (SVS clears adapter interrupt suppression overwrite).
-	 */
-	if (!css_qdio_omit_svs)
-		do_clear_global_summary();
-
-	/* reset local summary indicator */
-	if (shared_ind_used())
-		xchg(tiqdio_alsi, 0);
+	kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
 
 	/* protect tiq_list entries, only changed in activate or shutdown */
 	rcu_read_lock();
@@ -146,7 +120,10 @@
 	list_for_each_entry_rcu(q, &tiq_list, entry) {
 
 		/* only process queues from changed sets */
-		if (!*q->irq_ptr->dsci)
+		if (unlikely(shared_ind(q->irq_ptr->dsci))) {
+			if (!si_used)
+				continue;
+		} else if (!*q->irq_ptr->dsci)
 			continue;
 
 		if (q->u.in.queue_start_poll) {
@@ -162,7 +139,7 @@
 						 q->irq_ptr->int_parm);
 		} else {
 			/* only clear it if the indicator is non-shared */
-			if (!shared_ind(q->irq_ptr))
+			if (!shared_ind(q->irq_ptr->dsci))
 				xchg(q->irq_ptr->dsci, 0);
 			/*
 			 * Call inbound processing but not directly
@@ -178,13 +155,8 @@
 	 * If the shared indicator was used clear it now after all queues
 	 * were processed.
 	 */
-	if (shared_ind_used()) {
+	if (si_used && shared_ind_set())
 		xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
-
-		/* prevent racing */
-		if (*tiqdio_alsi)
-			xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
-	}
 }
 
 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -269,12 +241,6 @@
 {
 	if (!is_thinint_irq(irq_ptr))
 		return 0;
-
-	/* Check for aif time delay disablement. If installed,
-	 * omit SVS even under LPAR
-	 */
-	if (css_general_characteristics.aif_tdd)
-		css_qdio_omit_svs = 1;
 	return set_subchannel_ind(irq_ptr, 0);
 }
 
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 8fd8c62..67302b9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -27,6 +27,7 @@
 #define KMSG_COMPONENT "ap"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
@@ -154,7 +155,7 @@
  */
 static int ap_interrupts_available(void)
 {
-	return test_facility(1) && test_facility(2);
+	return test_facility(2) && test_facility(65);
 }
 
 /**
@@ -221,6 +222,69 @@
 }
 #endif
 
+static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid,
+								  int *support)
+{
+	register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm ("2") = 0UL;
+
+	asm volatile(
+		".long 0xb2af0000\n"
+		"0: la    %1,0\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (reg0), "=d" (reg1), "=d" (reg2)
+		:
+		: "cc");
+
+	if (reg2 & 0x6000000000000000ULL)
+		*support = 1;
+	else
+		*support = 0;
+
+	return reg1;
+}
+
+/**
+ * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
+ * support.
+ * @qid: The AP queue number
+ *
+ * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
+ */
+int ap_4096_commands_available(ap_qid_t qid)
+{
+	struct ap_queue_status status;
+	int i, support = 0;
+	status = __ap_4096_commands_available(qid, &support);
+
+	for (i = 0; i < AP_MAX_RESET; i++) {
+		switch (status.response_code) {
+		case AP_RESPONSE_NORMAL:
+			return support;
+		case AP_RESPONSE_RESET_IN_PROGRESS:
+		case AP_RESPONSE_BUSY:
+			break;
+		case AP_RESPONSE_Q_NOT_AVAIL:
+		case AP_RESPONSE_DECONFIGURED:
+		case AP_RESPONSE_CHECKSTOPPED:
+		case AP_RESPONSE_INVALID_ADDRESS:
+			return 0;
+		case AP_RESPONSE_OTHERWISE_CHANGED:
+			break;
+		default:
+			break;
+		}
+		if (i < AP_MAX_RESET - 1) {
+			udelay(5);
+			status = __ap_4096_commands_available(qid, &support);
+		}
+	}
+	return support;
+}
+EXPORT_SYMBOL(ap_4096_commands_available);
+
 /**
  * ap_queue_enable_interruption(): Enable interruption on an AP.
  * @qid: The AP queue number
@@ -1042,6 +1106,7 @@
 
 static void ap_interrupt_handler(void *unused1, void *unused2)
 {
+	kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
 	tasklet_schedule(&ap_tasklet);
 }
 
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 4785d07..08b9738 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -196,4 +196,6 @@
 int ap_module_init(void);
 void ap_module_exit(void);
 
+int ap_4096_commands_available(ap_qid_t qid);
+
 #endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 7fca9c1..8e65447f 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -396,8 +396,15 @@
 			if (copied == 0) {
 				unsigned int len;
 				spin_unlock_bh(&zcrypt_device_lock);
-				/* len is max 256 / 2 - 120 = 8 */
-				len = crt->inputdatalength / 2 - 120;
+				/* len is max 256 / 2 - 120 = 8
+				 * For bigger device just assume len of leading
+				 * 0s is 8 as stated in the requirements for
+				 * ica_rsa_modexpo_crt struct in zcrypt.h.
+				 */
+				if (crt->inputdatalength <= 256)
+					len = crt->inputdatalength / 2 - 120;
+				else
+					len = 8;
 				if (len > sizeof(z1))
 					return -EFAULT;
 				z1 = z2 = z3 = 0;
@@ -405,6 +412,7 @@
 				    copy_from_user(&z2, crt->bp_key, len) ||
 				    copy_from_user(&z3, crt->u_mult_inv, len))
 					return -EFAULT;
+				z1 = z2 = z3 = 0;
 				copied = 1;
 				/*
 				 * We have to restart device lookup -
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 8e7ffbf..88ebd11 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -109,6 +109,7 @@
 	int request_count;		/* # current requests. */
 
 	struct ap_message reply;	/* Per-device reply structure. */
+	int max_exp_bit_length;
 };
 
 struct zcrypt_device *zcrypt_device_alloc(size_t);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 9c409ef..2176d00 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -41,7 +41,7 @@
 #define CEX2A_MIN_MOD_SIZE	  1	/*    8 bits	*/
 #define CEX2A_MAX_MOD_SIZE	256	/* 2048 bits	*/
 #define CEX3A_MIN_MOD_SIZE	CEX2A_MIN_MOD_SIZE
-#define CEX3A_MAX_MOD_SIZE	CEX2A_MAX_MOD_SIZE
+#define CEX3A_MAX_MOD_SIZE	512	/* 4096 bits	*/
 
 #define CEX2A_SPEED_RATING	970
 #define CEX3A_SPEED_RATING	900 /* Fixme: Needs finetuning */
@@ -49,8 +49,10 @@
 #define CEX2A_MAX_MESSAGE_SIZE	0x390	/* sizeof(struct type50_crb2_msg)    */
 #define CEX2A_MAX_RESPONSE_SIZE 0x110	/* max outputdatalength + type80_hdr */
 
-#define CEX3A_MAX_MESSAGE_SIZE	CEX2A_MAX_MESSAGE_SIZE
-#define CEX3A_MAX_RESPONSE_SIZE	CEX2A_MAX_RESPONSE_SIZE
+#define CEX3A_MAX_RESPONSE_SIZE	0x210	/* 512 bit modulus
+					 * (max outputdatalength) +
+					 * type80_hdr*/
+#define CEX3A_MAX_MESSAGE_SIZE	sizeof(struct type50_crb3_msg)
 
 #define CEX2A_CLEANUP_TIME	(15*HZ)
 #define CEX3A_CLEANUP_TIME	CEX2A_CLEANUP_TIME
@@ -110,7 +112,7 @@
 		mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
 		exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
 		inp = meb1->message + sizeof(meb1->message) - mod_len;
-	} else {
+	} else if (mod_len <= 256) {
 		struct type50_meb2_msg *meb2 = ap_msg->message;
 		memset(meb2, 0, sizeof(*meb2));
 		ap_msg->length = sizeof(*meb2);
@@ -120,6 +122,17 @@
 		mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
 		exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
 		inp = meb2->message + sizeof(meb2->message) - mod_len;
+	} else {
+		/* mod_len > 256 = 4096 bit RSA Key */
+		struct type50_meb3_msg *meb3 = ap_msg->message;
+		memset(meb3, 0, sizeof(*meb3));
+		ap_msg->length = sizeof(*meb3);
+		meb3->header.msg_type_code = TYPE50_TYPE_CODE;
+		meb3->header.msg_len = sizeof(*meb3);
+		meb3->keyblock_type = TYPE50_MEB3_FMT;
+		mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
+		exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
+		inp = meb3->message + sizeof(meb3->message) - mod_len;
 	}
 
 	if (copy_from_user(mod, mex->n_modulus, mod_len) ||
@@ -142,7 +155,7 @@
 				       struct ap_message *ap_msg,
 				       struct ica_rsa_modexpo_crt *crt)
 {
-	int mod_len, short_len, long_len, long_offset;
+	int mod_len, short_len, long_len, long_offset, limit;
 	unsigned char *p, *q, *dp, *dq, *u, *inp;
 
 	mod_len = crt->inputdatalength;
@@ -152,14 +165,20 @@
 	/*
 	 * CEX2A cannot handle p, dp, or U > 128 bytes.
 	 * If we have one of these, we need to do extra checking.
+	 * For CEX3A the limit is 256 bytes.
 	 */
-	if (long_len > 128) {
+	if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
+		limit = 256;
+	else
+		limit = 128;
+
+	if (long_len > limit) {
 		/*
 		 * zcrypt_rsa_crt already checked for the leading
 		 * zeroes of np_prime, bp_key and u_mult_inc.
 		 */
-		long_offset = long_len - 128;
-		long_len = 128;
+		long_offset = long_len - limit;
+		long_len = limit;
 	} else
 		long_offset = 0;
 
@@ -180,7 +199,7 @@
 		dq = crb1->dq + sizeof(crb1->dq) - short_len;
 		u = crb1->u + sizeof(crb1->u) - long_len;
 		inp = crb1->message + sizeof(crb1->message) - mod_len;
-	} else {
+	} else if (long_len <= 128) {
 		struct type50_crb2_msg *crb2 = ap_msg->message;
 		memset(crb2, 0, sizeof(*crb2));
 		ap_msg->length = sizeof(*crb2);
@@ -193,6 +212,20 @@
 		dq = crb2->dq + sizeof(crb2->dq) - short_len;
 		u = crb2->u + sizeof(crb2->u) - long_len;
 		inp = crb2->message + sizeof(crb2->message) - mod_len;
+	} else {
+		/* long_len >= 256 */
+		struct type50_crb3_msg *crb3 = ap_msg->message;
+		memset(crb3, 0, sizeof(*crb3));
+		ap_msg->length = sizeof(*crb3);
+		crb3->header.msg_type_code = TYPE50_TYPE_CODE;
+		crb3->header.msg_len = sizeof(*crb3);
+		crb3->keyblock_type = TYPE50_CRB3_FMT;
+		p = crb3->p + sizeof(crb3->p) - long_len;
+		q = crb3->q + sizeof(crb3->q) - short_len;
+		dp = crb3->dp + sizeof(crb3->dp) - long_len;
+		dq = crb3->dq + sizeof(crb3->dq) - short_len;
+		u = crb3->u + sizeof(crb3->u) - long_len;
+		inp = crb3->message + sizeof(crb3->message) - mod_len;
 	}
 
 	if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
@@ -203,7 +236,6 @@
 	    copy_from_user(inp, crt->inputdata, mod_len))
 		return -EFAULT;
 
-
 	return 0;
 }
 
@@ -230,7 +262,10 @@
 		zdev->online = 0;
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
-	BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+	if (zdev->user_space_type == ZCRYPT_CEX2A)
+		BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
+	else
+		BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
 	data = reply->message + t80h->len - outputdatalength;
 	if (copy_to_user(outputdata, data, outputdatalength))
 		return -EFAULT;
@@ -282,7 +317,10 @@
 	}
 	t80h = reply->message;
 	if (t80h->type == TYPE80_RSP_CODE) {
-		length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
+		if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
+			length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
+		else
+			length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
 		memcpy(msg->message, reply->message, length);
 	} else
 		memcpy(msg->message, reply->message, sizeof error_reply);
@@ -307,7 +345,10 @@
 	int rc;
 
 	ap_init_message(&ap_msg);
-	ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	if (zdev->user_space_type == ZCRYPT_CEX2A)
+		ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	else
+		ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -345,7 +386,10 @@
 	int rc;
 
 	ap_init_message(&ap_msg);
-	ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	if (zdev->user_space_type == ZCRYPT_CEX2A)
+		ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
+	else
+		ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
 	if (!ap_msg.message)
 		return -ENOMEM;
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
@@ -397,6 +441,7 @@
 		zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
 		zdev->short_crt = 1;
 		zdev->speed_rating = CEX2A_SPEED_RATING;
+		zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
 		break;
 	case AP_DEVICE_TYPE_CEX3A:
 		zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
@@ -404,8 +449,13 @@
 			return -ENOMEM;
 		zdev->user_space_type = ZCRYPT_CEX3A;
 		zdev->type_string = "CEX3A";
-		zdev->min_mod_size = CEX3A_MIN_MOD_SIZE;
-		zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
+		zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
+		zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
+		zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+		if (ap_4096_commands_available(ap_dev->qid)) {
+			zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
+			zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
+		}
 		zdev->short_crt = 1;
 		zdev->speed_rating = CEX3A_SPEED_RATING;
 		break;
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 8f69d1d..0350665 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -51,8 +51,10 @@
 
 #define TYPE50_MEB1_FMT		0x0001
 #define TYPE50_MEB2_FMT		0x0002
+#define TYPE50_MEB3_FMT		0x0003
 #define TYPE50_CRB1_FMT		0x0011
 #define TYPE50_CRB2_FMT		0x0012
+#define TYPE50_CRB3_FMT		0x0013
 
 /* Mod-Exp, with a small modulus */
 struct type50_meb1_msg {
@@ -74,6 +76,16 @@
 	unsigned char	message[256];
 } __attribute__((packed));
 
+/* Mod-Exp, with a larger modulus */
+struct type50_meb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0003 */
+	unsigned char	reserved[6];
+	unsigned char	exponent[512];
+	unsigned char	modulus[512];
+	unsigned char	message[512];
+} __attribute__((packed));
+
 /* CRT, with a small modulus */
 struct type50_crb1_msg {
 	struct type50_hdr header;
@@ -100,6 +112,19 @@
 	unsigned char	message[256];
 } __attribute__((packed));
 
+/* CRT, with a larger modulus */
+struct type50_crb3_msg {
+	struct type50_hdr header;
+	unsigned short	keyblock_type;	/* 0x0013 */
+	unsigned char	reserved[6];
+	unsigned char	p[256];
+	unsigned char	q[256];
+	unsigned char	dp[256];
+	unsigned char	dq[256];
+	unsigned char	u[256];
+	unsigned char	message[512];
+} __attribute__((packed));
+
 /**
  * The type 80 response family is associated with a CEX2A card.
  *
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 09e934b..1afb69c 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -373,6 +373,7 @@
 	zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
 	zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
 	zdev->speed_rating = PCICA_SPEED_RATING;
+	zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE;
 	ap_dev->reply = &zdev->reply;
 	ap_dev->private = zdev;
 	rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 9dec5c7..aa4c050 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -579,6 +579,7 @@
 	zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
 	zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
 	zdev->speed_rating = PCICC_SPEED_RATING;
+	zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE;
 	ap_dev->reply = &zdev->reply;
 	ap_dev->private = zdev;
 	rc = zcrypt_device_register(zdev);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 510fab4..4f85eb7 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -45,12 +45,12 @@
 #define PCIXCC_MIN_MOD_SIZE_OLD	 64	/*  512 bits	*/
 #define PCIXCC_MAX_MOD_SIZE	256	/* 2048 bits	*/
 #define CEX3C_MIN_MOD_SIZE	PCIXCC_MIN_MOD_SIZE
-#define CEX3C_MAX_MOD_SIZE	PCIXCC_MAX_MOD_SIZE
+#define CEX3C_MAX_MOD_SIZE	512	/* 4096 bits	*/
 
 #define PCIXCC_MCL2_SPEED_RATING	7870
 #define PCIXCC_MCL3_SPEED_RATING	7870
 #define CEX2C_SPEED_RATING		7000
-#define CEX3C_SPEED_RATING		6500	/* FIXME: needs finetuning */
+#define CEX3C_SPEED_RATING		6500
 
 #define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c  /* max size type6 v2 crt message */
 #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply	    */
@@ -567,6 +567,15 @@
 	case TYPE88_RSP_CODE:
 		return convert_error(zdev, reply);
 	case TYPE86_RSP_CODE:
+		if (msg->cprbx.ccp_rtcode &&
+		   (msg->cprbx.ccp_rscode == 0x14f) &&
+		   (outputdatalength > 256)) {
+			if (zdev->max_exp_bit_length <= 17) {
+				zdev->max_exp_bit_length = 17;
+				return -EAGAIN;
+			} else
+				return -EINVAL;
+		}
 		if (msg->hdr.reply_code)
 			return convert_error(zdev, reply);
 		if (msg->cprbx.cprb_ver_id == 0x02)
@@ -1052,11 +1061,13 @@
 			zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
 			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
 			zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+			zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
 		} else {
 			zdev->type_string = "PCIXCC_MCL3";
 			zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
 			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
 			zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+			zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
 		}
 		break;
 	case AP_DEVICE_TYPE_CEX2C:
@@ -1065,6 +1076,7 @@
 		zdev->speed_rating = CEX2C_SPEED_RATING;
 		zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
 		zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+		zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
 		break;
 	case AP_DEVICE_TYPE_CEX3C:
 		zdev->user_space_type = ZCRYPT_CEX3C;
@@ -1072,6 +1084,7 @@
 		zdev->speed_rating = CEX3C_SPEED_RATING;
 		zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
 		zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
+		zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
 		break;
 	default:
 		goto out_free;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 375aeea..414427d 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -10,6 +10,7 @@
  *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  */
 
+#include <linux/kernel_stat.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/err.h>
@@ -25,6 +26,7 @@
 #include <asm/kvm_virtio.h>
 #include <asm/setup.h>
 #include <asm/s390_ext.h>
+#include <asm/irq.h>
 
 #define VIRTIO_SUBCODE_64 0x0D00
 
@@ -379,6 +381,7 @@
 	u16 subcode;
 	u32 param;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
 	subcode = ext_int_code >> 16;
 	if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
 		return;
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 456b187..fa80ba1 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -2,7 +2,8 @@
 	depends on NETDEVICES && S390
 
 config LCS
-	tristate "Lan Channel Station Interface"
+	def_tristate m
+	prompt "Lan Channel Station Interface"
 	depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
 	help
 	   Select this option if you want to use LCS networking on IBM System z.
@@ -12,7 +13,8 @@
 	   If you do not know what it is, it's safe to choose Y.
 
 config CTCM
-	tristate "CTC and MPC SNA device support"
+	def_tristate m
+	prompt "CTC and MPC SNA device support"
 	depends on CCW && NETDEVICES
 	help
 	  Select this option if you want to use channel-to-channel
@@ -26,7 +28,8 @@
 	  If you do not need any channel-to-channel connection, choose N.
 
 config NETIUCV
-	tristate "IUCV network device support (VM only)"
+	def_tristate m
+	prompt "IUCV network device support (VM only)"
 	depends on IUCV && NETDEVICES
 	help
 	  Select this option if you want to use inter-user communication
@@ -37,14 +40,16 @@
 	  The module name is netiucv. If unsure, choose Y.
 
 config SMSGIUCV
-	tristate "IUCV special message support (VM only)"
+	def_tristate m
+	prompt "IUCV special message support (VM only)"
 	depends on IUCV
 	help
 	  Select this option if you want to be able to receive SMSG messages
 	  from other VM guest systems.
 
 config SMSGIUCV_EVENT
-	tristate "Deliver IUCV special messages as uevents (VM only)"
+	def_tristate m
+	prompt "Deliver IUCV special messages as uevents (VM only)"
 	depends on SMSGIUCV
 	help
 	  Select this option to deliver CP special messages (SMSGs) as
@@ -54,7 +59,8 @@
 	  To compile as a module, choose M. The module name is "smsgiucv_app".
 
 config CLAW
-	tristate "CLAW device support"
+	def_tristate m
+	prompt "CLAW device support"
 	depends on CCW && NETDEVICES
 	help
 	  This driver supports channel attached CLAW devices.
@@ -64,7 +70,8 @@
 	  To compile into the kernel, choose Y.
 
 config QETH
-	tristate "Gigabit Ethernet device support"
+	def_tristate y
+	prompt "Gigabit Ethernet device support"
 	depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
 	help
 	  This driver supports the IBM System z OSA Express adapters
@@ -78,25 +85,25 @@
 	  The module name is qeth.
 
 config QETH_L2
-        tristate "qeth layer 2 device support"
-        depends on QETH
-        help
-          Select this option to be able to run qeth devices in layer 2 mode.
-          To compile as a module, choose M. The module name is qeth_l2.
-          If unsure, choose y.
+	def_tristate y
+	prompt "qeth layer 2 device support"
+	depends on QETH
+	help
+	  Select this option to be able to run qeth devices in layer 2 mode.
+	  To compile as a module, choose M. The module name is qeth_l2.
+	  If unsure, choose y.
 
 config QETH_L3
-        tristate "qeth layer 3 device support"
-        depends on QETH
-        help
-          Select this option to be able to run qeth devices in layer 3 mode.
-          To compile as a module choose M. The module name is qeth_l3.
-          If unsure, choose Y.
+	def_tristate y
+	prompt "qeth layer 3 device support"
+	depends on QETH
+	help
+	  Select this option to be able to run qeth devices in layer 3 mode.
+	  To compile as a module choose M. The module name is qeth_l3.
+	  If unsure, choose Y.
 
 config QETH_IPV6
-        bool
-        depends on (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
-        default y
+	def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
 
 config CCWGROUP
 	tristate
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 8e4153d..ce3a5c1 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -63,6 +63,7 @@
 
 #define KMSG_COMPONENT "claw"
 
+#include <linux/kernel_stat.h>
 #include <asm/ccwdev.h>
 #include <asm/ccwgroup.h>
 #include <asm/debug.h>
@@ -640,6 +641,7 @@
         struct claw_env  *p_env;
         struct chbk *p_ch_r=NULL;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
 	CLAW_DBF_TEXT(4, trace, "clawirq");
         /* Bypass all 'unsolicited interrupts' */
 	privptr = dev_get_drvdata(&cdev->dev);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 2c7d2d9..4c28459 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -24,6 +24,7 @@
 #define KMSG_COMPONENT "ctcm"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -1204,6 +1205,7 @@
 	int cstat;
 	int dstat;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
 		"Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
 
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c9f13b9..30b2a82 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,6 +26,7 @@
 #define KMSG_COMPONENT		"lcs"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/if.h>
 #include <linux/netdevice.h>
@@ -840,7 +841,7 @@
 }
 
 /**
- * Emit buffer of a lan comand.
+ * Emit buffer of a lan command.
  */
 static void
 lcs_lancmd_timeout(unsigned long data)
@@ -1398,6 +1399,7 @@
 	int rc, index;
 	int cstat, dstat;
 
+	kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
 	if (lcs_check_irb_error(cdev, irb))
 		return;
 
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b7d9dc0..29f848b 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3831,6 +3831,8 @@
 	init_data.int_parm               = (unsigned long) card;
 	init_data.input_sbal_addr_array  = (void **) in_sbal_ptrs;
 	init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
+	init_data.scan_threshold =
+		(card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
 
 	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
 		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 044fb22..51c666f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -45,8 +45,8 @@
 module_param_named(device, init_device, charp, 0400);
 MODULE_PARM_DESC(device, "specify initial device");
 
-static struct kmem_cache *zfcp_cache_hw_align(const char *name,
-					      unsigned long size)
+static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
+						      unsigned long size)
 {
 	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
 }
@@ -311,8 +311,7 @@
 		if (zfcp_fsf_status_read(adapter->qdio)) {
 			if (atomic_read(&adapter->stat_miss) >=
 			    adapter->stat_read_buf_num) {
-				zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
-							NULL);
+				zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
 				return 1;
 			}
 			break;
@@ -459,7 +458,7 @@
 	sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
 
 	zfcp_erp_thread_kill(adapter);
-	zfcp_dbf_adapter_unregister(adapter->dbf);
+	zfcp_dbf_adapter_unregister(adapter);
 	zfcp_qdio_destroy(adapter->qdio);
 
 	zfcp_ccw_adapter_put(adapter); /* final put to release */
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 0833c2b..4f7852d 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -48,7 +48,7 @@
 
 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"ccresu2", NULL);
+				"ccresu2");
 	zfcp_erp_wait(adapter);
 	flush_work(&adapter->scan_work);
 
@@ -182,7 +182,7 @@
 	if (!adapter)
 		return 0;
 
-	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
 	zfcp_erp_wait(adapter);
 
 	zfcp_ccw_adapter_put(adapter);
@@ -207,24 +207,24 @@
 	switch (event) {
 	case CIO_GONE:
 		dev_warn(&cdev->dev, "The FCP device has been detached\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
 		break;
 	case CIO_NO_PATH:
 		dev_warn(&cdev->dev,
 			 "The CHPID for the FCP device is offline\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
 		break;
 	case CIO_OPER:
 		dev_info(&cdev->dev, "The FCP device is operational again\n");
 		zfcp_erp_set_adapter_status(adapter,
 					    ZFCP_STATUS_COMMON_RUNNING);
 		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-					"ccnoti4", NULL);
+					"ccnoti4");
 		break;
 	case CIO_BOXED:
 		dev_warn(&cdev->dev, "The FCP device did not respond within "
 				     "the specified time\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
+		zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5");
 		break;
 	}
 
@@ -243,7 +243,7 @@
 	if (!adapter)
 		return;
 
-	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
+	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1");
 	zfcp_erp_wait(adapter);
 	zfcp_erp_thread_kill(adapter);
 
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index d692e22..303dde0 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -288,7 +288,7 @@
 		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "cfaac_1", NULL);
+					     "cfaac_1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
@@ -299,7 +299,7 @@
 		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
 			zfcp_erp_lun_reopen(sdev,
 					    ZFCP_STATUS_COMMON_ERP_FAILED,
-					    "cfaac_2", NULL);
+					    "cfaac_2");
 	}
 }
 
@@ -317,7 +317,7 @@
 
 /**
  * zfcp_cfdc_port_denied - Process "access denied" for port
- * @port: The port where the acces has been denied
+ * @port: The port where the access has been denied
  * @qual: The FSF status qualifier for the access denied FSF status
  */
 void zfcp_cfdc_port_denied(struct zfcp_port *port,
@@ -426,7 +426,7 @@
 			zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
 		return -EACCES;
 	}
 
@@ -437,7 +437,7 @@
 			zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
-		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
 		return -EACCES;
 	}
 
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 2cdd6b2..96d1462 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -22,980 +22,392 @@
 MODULE_PARM_DESC(dbfsize,
 		 "number of pages for each debug feature area (default 4)");
 
-static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len,
-			     int level, char *from, int from_len)
+static inline unsigned int zfcp_dbf_plen(unsigned int offset)
 {
-	int offset;
-	struct zfcp_dbf_dump *dump = to;
-	int room = to_len - sizeof(*dump);
+	return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
+}
 
-	for (offset = 0; offset < from_len; offset += dump->size) {
-		memset(to, 0, to_len);
-		strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
-		dump->total_size = from_len;
-		dump->offset = offset;
-		dump->size = min(from_len - offset, room);
-		memcpy(dump->data, from + offset, dump->size);
-		debug_event(dbf, level, dump, dump->size + sizeof(*dump));
+static inline
+void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
+		       u64 req_id)
+{
+	struct zfcp_dbf_pay *pl = &dbf->pay_buf;
+	u16 offset = 0, rec_length;
+
+	spin_lock(&dbf->pay_lock);
+	memset(pl, 0, sizeof(*pl));
+	pl->fsf_req_id = req_id;
+	memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
+
+	while (offset < length) {
+		rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
+				 (u16) (length - offset));
+		memcpy(pl->data, data + offset, rec_length);
+		debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
+
+		offset += rec_length;
+		pl->counter++;
 	}
+
+	spin_unlock(&dbf->pay_lock);
 }
 
-static void zfcp_dbf_tag(char **p, const char *label, const char *tag)
+/**
+ * zfcp_dbf_hba_fsf_res - trace event for fsf responses
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request for which a response was received
+ */
+void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
 {
-	int i;
-
-	*p += sprintf(*p, "%-24s", label);
-	for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
-		*p += sprintf(*p, "%c", tag[i]);
-	*p += sprintf(*p, "\n");
-}
-
-static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2)
-{
-	*buf += sprintf(*buf, "%-24s%s\n", s1, s2);
-}
-
-static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...)
-{
-	va_list arg;
-
-	*buf += sprintf(*buf, "%-24s", s);
-	va_start(arg, format);
-	*buf += vsprintf(*buf, format, arg);
-	va_end(arg);
-	*buf += sprintf(*buf, "\n");
-}
-
-static void zfcp_dbf_outd(char **p, const char *label, char *buffer,
-			  int buflen, int offset, int total_size)
-{
-	if (!offset)
-		*p += sprintf(*p, "%-24s  ", label);
-	while (buflen--) {
-		if (offset > 0) {
-			if ((offset % 32) == 0)
-				*p += sprintf(*p, "\n%-24c  ", ' ');
-			else if ((offset % 4) == 0)
-				*p += sprintf(*p, " ");
-		}
-		*p += sprintf(*p, "%02x", *buffer++);
-		if (++offset == total_size) {
-			*p += sprintf(*p, "\n");
-			break;
-		}
-	}
-	if (!total_size)
-		*p += sprintf(*p, "\n");
-}
-
-static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view,
-				int area, debug_entry_t *entry, char *out_buf)
-{
-	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
-	struct timespec t;
-	char *p = out_buf;
-
-	if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
-		stck_to_timespec(entry->id.stck, &t);
-		zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu",
-			     t.tv_sec, t.tv_nsec);
-		zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
-	} else	{
-		zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset,
-			      dump->total_size);
-		if ((dump->offset + dump->size) == dump->total_size)
-			p += sprintf(p, "\n");
-	}
-	return p - out_buf;
-}
-
-void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
-				struct zfcp_fsf_req *fsf_req,
-				struct zfcp_dbf *dbf)
-{
-	struct fsf_qtcb *qtcb = fsf_req->qtcb;
-	union fsf_prot_status_qual *prot_status_qual =
-					&qtcb->prefix.prot_status_qual;
-	union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
-	struct scsi_cmnd *scsi_cmnd;
-	struct zfcp_port *port;
-	struct zfcp_unit *unit;
-	struct zfcp_send_els *send_els;
-	struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
-	struct zfcp_dbf_hba_record_response *response = &rec->u.response;
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
+	struct fsf_qtcb_header *q_head = &req->qtcb->header;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(rec, 0, sizeof(*rec));
-	strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
-	strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
 
-	response->fsf_command = fsf_req->fsf_command;
-	response->fsf_reqid = fsf_req->req_id;
-	response->fsf_seqno = fsf_req->seq_no;
-	response->fsf_issued = fsf_req->issued;
-	response->fsf_prot_status = qtcb->prefix.prot_status;
-	response->fsf_status = qtcb->header.fsf_status;
-	memcpy(response->fsf_prot_status_qual,
-	       prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
-	memcpy(response->fsf_status_qual,
-	       fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
-	response->fsf_req_status = fsf_req->status;
-	response->sbal_first = fsf_req->qdio_req.sbal_first;
-	response->sbal_last = fsf_req->qdio_req.sbal_last;
-	response->sbal_response = fsf_req->qdio_req.sbal_response;
-	response->pool = fsf_req->pool != NULL;
-	response->erp_action = (unsigned long)fsf_req->erp_action;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_RES;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+	rec->fsf_seq_no = req->seq_no;
+	rec->u.res.req_issued = req->issued;
+	rec->u.res.prot_status = q_pref->prot_status;
+	rec->u.res.fsf_status = q_head->fsf_status;
 
-	switch (fsf_req->fsf_command) {
-	case FSF_QTCB_FCP_CMND:
-		if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
-			break;
-		scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
-		if (scsi_cmnd) {
-			response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
-			response->u.fcp.data_dir =
-				qtcb->bottom.io.data_direction;
-		}
-		break;
+	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
+	       FSF_PROT_STATUS_QUAL_SIZE);
+	memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
+	       FSF_STATUS_QUALIFIER_SIZE);
 
-	case FSF_QTCB_OPEN_PORT_WITH_DID:
-	case FSF_QTCB_CLOSE_PORT:
-	case FSF_QTCB_CLOSE_PHYSICAL_PORT:
-		port = (struct zfcp_port *)fsf_req->data;
-		response->u.port.wwpn = port->wwpn;
-		response->u.port.d_id = port->d_id;
-		response->u.port.port_handle = qtcb->header.port_handle;
-		break;
-
-	case FSF_QTCB_OPEN_LUN:
-	case FSF_QTCB_CLOSE_LUN:
-		unit = (struct zfcp_unit *)fsf_req->data;
-		port = unit->port;
-		response->u.unit.wwpn = port->wwpn;
-		response->u.unit.fcp_lun = unit->fcp_lun;
-		response->u.unit.port_handle = qtcb->header.port_handle;
-		response->u.unit.lun_handle = qtcb->header.lun_handle;
-		break;
-
-	case FSF_QTCB_SEND_ELS:
-		send_els = (struct zfcp_send_els *)fsf_req->data;
-		response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
-		break;
-
-	case FSF_QTCB_ABORT_FCP_CMND:
-	case FSF_QTCB_SEND_GENERIC:
-	case FSF_QTCB_EXCHANGE_CONFIG_DATA:
-	case FSF_QTCB_EXCHANGE_PORT_DATA:
-	case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
-	case FSF_QTCB_UPLOAD_CONTROL_FILE:
-		break;
+	if (req->fsf_command != FSF_QTCB_FCP_CMND) {
+		rec->pl_len = q_head->log_length;
+		zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+				  rec->pl_len, "fsf_res", req->req_id);
 	}
 
-	debug_event(dbf->hba, level, rec, sizeof(*rec));
-
-	/* have fcp channel microcode fixed to use as little as possible */
-	if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) {
-		/* adjust length skipping trailing zeros */
-		char *buf = (char *)qtcb + qtcb->header.log_start;
-		int len = qtcb->header.log_length;
-		for (; len && !buf[len - 1]; len--);
-		zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf,
-				 len);
-	}
-
+	debug_event(dbf->hba, 1, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
-void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf,
-			     struct fsf_status_read_buffer *status_buffer)
+/**
+ * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request providing the unsolicited status
+ */
+void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
 {
-	struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_status_read_buffer *srb = req->data;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(rec, 0, sizeof(*rec));
-	strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
-	strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
 
-	rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss);
-	if (status_buffer != NULL) {
-		rec->u.status.status_type = status_buffer->status_type;
-		rec->u.status.status_subtype = status_buffer->status_subtype;
-		memcpy(&rec->u.status.queue_designator,
-		       &status_buffer->queue_designator,
-		       sizeof(struct fsf_queue_designator));
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_USS;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
 
-		switch (status_buffer->status_type) {
-		case FSF_STATUS_READ_SENSE_DATA_AVAIL:
-			rec->u.status.payload_size =
-			    ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
-			break;
+	if (!srb)
+		goto log;
 
-		case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-			rec->u.status.payload_size =
-			    ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
-			break;
+	rec->u.uss.status_type = srb->status_type;
+	rec->u.uss.status_subtype = srb->status_subtype;
+	rec->u.uss.d_id = ntoh24(srb->d_id);
+	rec->u.uss.lun = srb->fcp_lun;
+	memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
+	       sizeof(rec->u.uss.queue_designator));
 
-		case FSF_STATUS_READ_LINK_DOWN:
-			switch (status_buffer->status_subtype) {
-			case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
-			case FSF_STATUS_READ_SUB_FDISC_FAILED:
-				rec->u.status.payload_size =
-					sizeof(struct fsf_link_down_info);
-			}
-			break;
+	/* status read buffer payload length */
+	rec->pl_len = (!srb->length) ? 0 : srb->length -
+			offsetof(struct fsf_status_read_buffer, payload);
 
-		case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
-			rec->u.status.payload_size =
-			    ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
-			break;
-		}
-		memcpy(&rec->u.status.payload,
-		       &status_buffer->payload, rec->u.status.payload_size);
-	}
-
-	debug_event(dbf->hba, level, rec, sizeof(*rec));
+	if (rec->pl_len)
+		zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
+				  "fsf_uss", req->req_id);
+log:
+	debug_event(dbf->hba, 2, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
 /**
- * zfcp_dbf_hba_qdio - trace event for QDIO related failure
- * @qdio: qdio structure affected by this QDIO related event
- * @qdio_error: as passed by qdio module
- * @sbal_index: first buffer with error condition, as passed by qdio module
- * @sbal_count: number of buffers affected, as passed by qdio module
+ * zfcp_dbf_hba_bit_err - trace event for bit error conditions
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @req: request which caused the bit_error condition
  */
-void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error,
-		       int sbal_index, int sbal_count)
+void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
 {
-	struct zfcp_dbf_hba_record *r = &dbf->hba_buf;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dbf->hba_lock, flags);
-	memset(r, 0, sizeof(*r));
-	strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
-	r->u.qdio.qdio_error = qdio_error;
-	r->u.qdio.sbal_index = sbal_index;
-	r->u.qdio.sbal_count = sbal_count;
-	debug_event(dbf->hba, 0, r, sizeof(*r));
-	spin_unlock_irqrestore(&dbf->hba_lock, flags);
-}
-
-/**
- * zfcp_dbf_hba_berr - trace event for bit error threshold
- * @dbf: dbf structure affected by this QDIO related event
- * @req: fsf request
- */
-void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req)
-{
-	struct zfcp_dbf_hba_record *r = &dbf->hba_buf;
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct zfcp_dbf_hba *rec = &dbf->hba_buf;
 	struct fsf_status_read_buffer *sr_buf = req->data;
-	struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dbf->hba_lock, flags);
-	memset(r, 0, sizeof(*r));
-	strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
-	memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
-	debug_event(dbf->hba, 0, r, sizeof(*r));
+	memset(rec, 0, sizeof(*rec));
+
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_HBA_BIT;
+	rec->fsf_req_id = req->req_id;
+	rec->fsf_req_status = req->status;
+	rec->fsf_cmd = req->fsf_command;
+	memcpy(&rec->u.be, &sr_buf->payload.bit_error,
+	       sizeof(struct fsf_bit_error_payload));
+
+	debug_event(dbf->hba, 1, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
-static void zfcp_dbf_hba_view_response(char **p,
-				       struct zfcp_dbf_hba_record_response *r)
+
+static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+				struct zfcp_adapter *adapter,
+				struct zfcp_port *port,
+				struct scsi_device *sdev)
 {
-	struct timespec t;
-
-	zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command);
-	zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
-	zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno);
-	stck_to_timespec(r->fsf_issued, &t);
-	zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
-	zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status);
-	zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status);
-	zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual,
-		      FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE);
-	zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual,
-		      FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE);
-	zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status);
-	zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first);
-	zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last);
-	zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response);
-	zfcp_dbf_out(p, "pool", "0x%02x", r->pool);
-
-	switch (r->fsf_command) {
-	case FSF_QTCB_FCP_CMND:
-		if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
-			break;
-		zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
-		zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
-		*p += sprintf(*p, "\n");
-		break;
-
-	case FSF_QTCB_OPEN_PORT_WITH_DID:
-	case FSF_QTCB_CLOSE_PORT:
-	case FSF_QTCB_CLOSE_PHYSICAL_PORT:
-		zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn);
-		zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id);
-		zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle);
-		break;
-
-	case FSF_QTCB_OPEN_LUN:
-	case FSF_QTCB_CLOSE_LUN:
-		zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn);
-		zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun);
-		zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle);
-		zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle);
-		break;
-
-	case FSF_QTCB_SEND_ELS:
-		zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
-		break;
-
-	case FSF_QTCB_ABORT_FCP_CMND:
-	case FSF_QTCB_SEND_GENERIC:
-	case FSF_QTCB_EXCHANGE_CONFIG_DATA:
-	case FSF_QTCB_EXCHANGE_PORT_DATA:
-	case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
-	case FSF_QTCB_UPLOAD_CONTROL_FILE:
-		break;
+	rec->adapter_status = atomic_read(&adapter->status);
+	if (port) {
+		rec->port_status = atomic_read(&port->status);
+		rec->wwpn = port->wwpn;
+		rec->d_id = port->d_id;
+	}
+	if (sdev) {
+		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
+		rec->lun = zfcp_scsi_dev_lun(sdev);
 	}
 }
 
-static void zfcp_dbf_hba_view_status(char **p,
-				     struct zfcp_dbf_hba_record_status *r)
-{
-	zfcp_dbf_out(p, "failed", "0x%02x", r->failed);
-	zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type);
-	zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype);
-	zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator,
-		      sizeof(struct fsf_queue_designator), 0,
-		      sizeof(struct fsf_queue_designator));
-	zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0,
-		      r->payload_size);
-}
-
-static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r)
-{
-	zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
-	zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
-	zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
-}
-
-static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r)
-{
-	zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
-	zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
-	zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count);
-	zfcp_dbf_out(p, "prim_seq_err", "%d",
-		     r->primitive_sequence_error_count);
-	zfcp_dbf_out(p, "inval_trans_word_err", "%d",
-		     r->invalid_transmission_word_error_count);
-	zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
-	zfcp_dbf_out(p, "prim_seq_event_to", "%d",
-		     r->primitive_sequence_event_timeout_count);
-	zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
-		     r->elastic_buffer_overrun_error_count);
-	zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
-		     r->advertised_receive_b2b_credit);
-	zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
-		     r->current_receive_b2b_credit);
-	zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
-		     r->advertised_transmit_b2b_credit);
-	zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
-		     r->current_transmit_b2b_credit);
-}
-
-static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view,
-				    char *out_buf, const char *in_buf)
-{
-	struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf;
-	char *p = out_buf;
-
-	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
-		return 0;
-
-	zfcp_dbf_tag(&p, "tag", r->tag);
-	if (isalpha(r->tag2[0]))
-		zfcp_dbf_tag(&p, "tag2", r->tag2);
-
-	if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_dbf_hba_view_response(&p, &r->u.response);
-	else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_dbf_hba_view_status(&p, &r->u.status);
-	else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_dbf_hba_view_qdio(&p, &r->u.qdio);
-	else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_dbf_hba_view_berr(&p, &r->u.berr);
-
-	if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0)
-		p += sprintf(p, "\n");
-	return p - out_buf;
-}
-
-static struct debug_view zfcp_dbf_hba_view = {
-	.name = "structured",
-	.header_proc = zfcp_dbf_view_header,
-	.format_proc = zfcp_dbf_hba_view_format,
-};
-
-static const char *zfcp_dbf_rec_tags[] = {
-	[ZFCP_REC_DBF_ID_THREAD] = "thread",
-	[ZFCP_REC_DBF_ID_TARGET] = "target",
-	[ZFCP_REC_DBF_ID_TRIGGER] = "trigger",
-	[ZFCP_REC_DBF_ID_ACTION] = "action",
-};
-
-static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
-				    char *buf, const char *_rec)
-{
-	struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec;
-	char *p = buf;
-	char hint[ZFCP_DBF_ID_SIZE + 1];
-
-	memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE);
-	hint[ZFCP_DBF_ID_SIZE] = 0;
-	zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]);
-	zfcp_dbf_outs(&p, "hint", hint);
-	switch (r->id) {
-	case ZFCP_REC_DBF_ID_THREAD:
-		zfcp_dbf_out(&p, "total", "%d", r->u.thread.total);
-		zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready);
-		zfcp_dbf_out(&p, "running", "%d", r->u.thread.running);
-		break;
-	case ZFCP_REC_DBF_ID_TARGET:
-		zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref);
-		zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status);
-		zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count);
-		zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id);
-		zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn);
-		zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun);
-		break;
-	case ZFCP_REC_DBF_ID_TRIGGER:
-		zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref);
-		zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action);
-		zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want);
-		zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need);
-		zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn);
-		zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
-		zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
-		zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
-		zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls);
-		break;
-	case ZFCP_REC_DBF_ID_ACTION:
-		zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
-		zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req);
-		zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status);
-		zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step);
-		break;
-	}
-	p += sprintf(p, "\n");
-	return p - buf;
-}
-
-static struct debug_view zfcp_dbf_rec_view = {
-	.name = "structured",
-	.header_proc = zfcp_dbf_view_header,
-	.format_proc = zfcp_dbf_rec_view_format,
-};
-
 /**
- * zfcp_dbf_rec_thread - trace event related to recovery thread operation
- * @id2: identifier for event
- * @dbf: reference to dbf structure
- * This function assumes that the caller is holding erp_lock.
+ * zfcp_dbf_rec_trig - trace event related to triggered recovery
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock has to be held.
  */
-void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf)
+void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
+		       struct zfcp_port *port, struct scsi_device *sdev,
+		       u8 want, u8 need)
 {
-	struct zfcp_adapter *adapter = dbf->adapter;
-	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
-	unsigned long flags = 0;
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
 	struct list_head *entry;
-	unsigned ready = 0, running = 0, total;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->rec_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_TRIG;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	zfcp_dbf_set_common(rec, adapter, port, sdev);
 
 	list_for_each(entry, &adapter->erp_ready_head)
-		ready++;
+		rec->u.trig.ready++;
+
 	list_for_each(entry, &adapter->erp_running_head)
-		running++;
-	total = adapter->erp_total_count;
+		rec->u.trig.running++;
 
-	spin_lock_irqsave(&dbf->rec_lock, flags);
-	memset(r, 0, sizeof(*r));
-	r->id = ZFCP_REC_DBF_ID_THREAD;
-	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
-	r->u.thread.total = total;
-	r->u.thread.ready = ready;
-	r->u.thread.running = running;
-	debug_event(dbf->rec, 6, r, sizeof(*r));
+	rec->u.trig.want = want;
+	rec->u.trig.need = need;
+
+	debug_event(dbf->rec, 1, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
+
 /**
- * zfcp_dbf_rec_thread - trace event related to recovery thread operation
- * @id2: identifier for event
- * @adapter: adapter
- * This function assumes that the caller does not hold erp_lock.
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
  */
-void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf)
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
 {
-	struct zfcp_adapter *adapter = dbf->adapter;
-	unsigned long flags;
-
-	read_lock_irqsave(&adapter->erp_lock, flags);
-	zfcp_dbf_rec_thread(id2, dbf);
-	read_unlock_irqrestore(&adapter->erp_lock, flags);
-}
-
-static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf,
-				atomic_t *status, atomic_t *erp_count, u64 wwpn,
-				u32 d_id, u64 fcp_lun)
-{
-	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
+	struct zfcp_dbf *dbf = erp->adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dbf->rec_lock, flags);
-	memset(r, 0, sizeof(*r));
-	r->id = ZFCP_REC_DBF_ID_TARGET;
-	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
-	r->u.target.ref = (unsigned long)ref;
-	r->u.target.status = atomic_read(status);
-	r->u.target.wwpn = wwpn;
-	r->u.target.d_id = d_id;
-	r->u.target.fcp_lun = fcp_lun;
-	r->u.target.erp_count = atomic_read(erp_count);
-	debug_event(dbf->rec, 3, r, sizeof(*r));
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_RUN;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
+
+	rec->u.run.fsf_req_id = erp->fsf_req_id;
+	rec->u.run.rec_status = erp->status;
+	rec->u.run.rec_step = erp->step;
+	rec->u.run.rec_action = erp->action;
+
+	if (erp->sdev)
+		rec->u.run.rec_count =
+			atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
+	else if (erp->port)
+		rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
+	else
+		rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
+
+	debug_event(dbf->rec, 1, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
-/**
- * zfcp_dbf_rec_adapter - trace event for adapter state change
- * @id: identifier for trigger of state change
- * @ref: additional reference (e.g. request)
- * @dbf: reference to dbf structure
- */
-void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
+static inline
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
+		  u64 req_id, u32 d_id)
 {
-	struct zfcp_adapter *adapter = dbf->adapter;
-
-	zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
-			    &adapter->erp_counter, 0, 0,
-			    ZFCP_DBF_INVALID_LUN);
-}
-
-/**
- * zfcp_dbf_rec_port - trace event for port state change
- * @id: identifier for trigger of state change
- * @ref: additional reference (e.g. request)
- * @port: port
- */
-void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
-{
-	struct zfcp_dbf *dbf = port->adapter->dbf;
-
-	zfcp_dbf_rec_target(id, ref, dbf, &port->status,
-			    &port->erp_counter, port->wwpn, port->d_id,
-			    ZFCP_DBF_INVALID_LUN);
-}
-
-/**
- * zfcp_dbf_rec_lun - trace event for LUN state change
- * @id: identifier for trigger of state change
- * @ref: additional reference (e.g. request)
- * @sdev: SCSI device
- */
-void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev)
-{
-	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
-	struct zfcp_port *port = zfcp_sdev->port;
-	struct zfcp_dbf *dbf = port->adapter->dbf;
-
-	zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status,
-			    &zfcp_sdev->erp_counter, port->wwpn, port->d_id,
-			    zfcp_scsi_dev_lun(sdev));
-}
-
-/**
- * zfcp_dbf_rec_trigger - trace event for triggered error recovery
- * @id2: identifier for error recovery trigger
- * @ref: additional reference (e.g. request)
- * @want: originally requested error recovery action
- * @need: error recovery action actually initiated
- * @action: address of error recovery action struct
- * @adapter: adapter
- * @port: port
- * @sdev: SCSI device
- */
-void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
-			  struct zfcp_adapter *adapter, struct zfcp_port *port,
-			  struct scsi_device *sdev)
-{
-	struct zfcp_dbf *dbf = adapter->dbf;
-	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dbf->rec_lock, flags);
-	memset(r, 0, sizeof(*r));
-	r->id = ZFCP_REC_DBF_ID_TRIGGER;
-	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
-	r->u.trigger.ref = (unsigned long)ref;
-	r->u.trigger.want = want;
-	r->u.trigger.need = need;
-	r->u.trigger.action = (unsigned long)action;
-	r->u.trigger.as = atomic_read(&adapter->status);
-	if (port) {
-		r->u.trigger.ps = atomic_read(&port->status);
-		r->u.trigger.wwpn = port->wwpn;
-	}
-	if (sdev)
-		r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status);
-	r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) :
-				      ZFCP_DBF_INVALID_LUN;
-	debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
-	spin_unlock_irqrestore(&dbf->rec_lock, flags);
-}
-
-/**
- * zfcp_dbf_rec_action - trace event showing progress of recovery action
- * @id2: identifier
- * @erp_action: error recovery action struct pointer
- */
-void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
-{
-	struct zfcp_dbf *dbf = erp_action->adapter->dbf;
-	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dbf->rec_lock, flags);
-	memset(r, 0, sizeof(*r));
-	r->id = ZFCP_REC_DBF_ID_ACTION;
-	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
-	r->u.action.action = (unsigned long)erp_action;
-	r->u.action.status = erp_action->status;
-	r->u.action.step = erp_action->step;
-	r->u.action.fsf_req = erp_action->fsf_req_id;
-	debug_event(dbf->rec, 5, r, sizeof(*r));
-	spin_unlock_irqrestore(&dbf->rec_lock, flags);
-}
-
-/**
- * zfcp_dbf_san_ct_request - trace event for issued CT request
- * @fsf_req: request containing issued CT data
- * @d_id: destination id where ct request is sent to
- */
-void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
-{
-	struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
-	struct zfcp_adapter *adapter = fsf_req->adapter;
-	struct zfcp_dbf *dbf = adapter->dbf;
-	struct fc_ct_hdr *hdr = sg_virt(ct->req);
-	struct zfcp_dbf_san_record *r = &dbf->san_buf;
-	struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
-	int level = 3;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dbf->san_lock, flags);
-	memset(r, 0, sizeof(*r));
-	strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
-	r->fsf_reqid = fsf_req->req_id;
-	r->fsf_seqno = fsf_req->seq_no;
-	oct->d_id = d_id;
-	oct->cmd_req_code = hdr->ct_cmd;
-	oct->revision = hdr->ct_rev;
-	oct->gs_type = hdr->ct_fs_type;
-	oct->gs_subtype = hdr->ct_fs_subtype;
-	oct->options = hdr->ct_options;
-	oct->max_res_size = hdr->ct_mr_size;
-	oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
-		       ZFCP_DBF_SAN_MAX_PAYLOAD);
-	debug_event(dbf->san, level, r, sizeof(*r));
-	zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
-			 (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
-	spin_unlock_irqrestore(&dbf->san_lock, flags);
-}
-
-/**
- * zfcp_dbf_san_ct_response - trace event for completion of CT request
- * @fsf_req: request containing CT response
- */
-void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
-{
-	struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
-	struct zfcp_adapter *adapter = fsf_req->adapter;
-	struct fc_ct_hdr *hdr = sg_virt(ct->resp);
-	struct zfcp_dbf *dbf = adapter->dbf;
-	struct zfcp_dbf_san_record *r = &dbf->san_buf;
-	struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
-	int level = 3;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dbf->san_lock, flags);
-	memset(r, 0, sizeof(*r));
-	strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
-	r->fsf_reqid = fsf_req->req_id;
-	r->fsf_seqno = fsf_req->seq_no;
-	rct->cmd_rsp_code = hdr->ct_cmd;
-	rct->revision = hdr->ct_rev;
-	rct->reason_code = hdr->ct_reason;
-	rct->expl = hdr->ct_explan;
-	rct->vendor_unique = hdr->ct_vendor;
-	rct->max_res_size = hdr->ct_mr_size;
-	rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
-		       ZFCP_DBF_SAN_MAX_PAYLOAD);
-	debug_event(dbf->san, level, r, sizeof(*r));
-	zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
-			 (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
-	spin_unlock_irqrestore(&dbf->san_lock, flags);
-}
-
-static void zfcp_dbf_san_els(const char *tag, int level,
-			     struct zfcp_fsf_req *fsf_req, u32 d_id,
-			     void *buffer, int buflen)
-{
-	struct zfcp_adapter *adapter = fsf_req->adapter;
-	struct zfcp_dbf *dbf = adapter->dbf;
-	struct zfcp_dbf_san_record *rec = &dbf->san_buf;
+	struct zfcp_dbf_san *rec = &dbf->san_buf;
+	u16 rec_len;
 	unsigned long flags;
 
 	spin_lock_irqsave(&dbf->san_lock, flags);
 	memset(rec, 0, sizeof(*rec));
-	strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
-	rec->fsf_reqid = fsf_req->req_id;
-	rec->fsf_seqno = fsf_req->seq_no;
-	rec->u.els.d_id = d_id;
-	debug_event(dbf->san, level, rec, sizeof(*rec));
-	zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
-			 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
+
+	rec->id = id;
+	rec->fsf_req_id = req_id;
+	rec->d_id = d_id;
+	rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
+	memcpy(rec->payload, data, rec_len);
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+
+	debug_event(dbf->san, 1, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 
 /**
- * zfcp_dbf_san_els_request - trace event for issued ELS
- * @fsf_req: request containing issued ELS
+ * zfcp_dbf_san_req - trace event for issued SAN request
+ * @tag: indentifier for event
+ * @fsf_req: request containing issued CT data
+ * d_id: destination ID
  */
-void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
 {
-	struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
-	u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	u16 length;
 
-	zfcp_dbf_san_els("oels", 2, fsf_req, d_id,
-			 sg_virt(els->req), els->req->length);
+	length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
+	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
+		     fsf->req_id, d_id);
 }
 
 /**
- * zfcp_dbf_san_els_response - trace event for completed ELS
- * @fsf_req: request containing ELS response
+ * zfcp_dbf_san_res - trace event for received SAN request
+ * @tag: indentifier for event
+ * @fsf_req: request containing issued CT data
  */
-void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
 {
-	struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
-	u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	u16 length;
 
-	zfcp_dbf_san_els("rels", 2, fsf_req, d_id,
-			       sg_virt(els->resp), els->resp->length);
+	length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
+	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
+		     fsf->req_id, 0);
 }
 
 /**
- * zfcp_dbf_san_incoming_els - trace event for incomig ELS
- * @fsf_req: request containing unsolicited status buffer with incoming ELS
+ * zfcp_dbf_san_in_els - trace event for incoming ELS
+ * @tag: indentifier for event
+ * @fsf_req: request containing issued CT data
  */
-void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
 {
-	struct fsf_status_read_buffer *buf =
-			(struct fsf_status_read_buffer *)fsf_req->data;
-	int length = (int)buf->length -
-		     (int)((void *)&buf->payload - (void *)buf);
+	struct zfcp_dbf *dbf = fsf->adapter->dbf;
+	struct fsf_status_read_buffer *srb =
+		(struct fsf_status_read_buffer *) fsf->data;
+	u16 length;
 
-	zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id),
-			       (void *)buf->payload.data, length);
+	length = (u16)(srb->length -
+			offsetof(struct fsf_status_read_buffer, payload));
+	zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
+		     fsf->req_id, ntoh24(srb->d_id));
 }
 
-static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
-				    char *out_buf, const char *in_buf)
+/**
+ * zfcp_dbf_scsi - trace event for scsi commands
+ * @tag: identifier for event
+ * @sc: pointer to struct scsi_cmnd
+ * @fsf: pointer to struct zfcp_fsf_req
+ */
+void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
 {
-	struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf;
-	char *p = out_buf;
-
-	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
-		return 0;
-
-	zfcp_dbf_tag(&p, "tag", r->tag);
-	zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
-	zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
-
-	if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
-		struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
-		zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
-		zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
-		zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
-		zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
-		zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype);
-		zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
-		zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
-	} else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
-		struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp;
-		zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
-		zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
-		zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
-		zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
-		zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
-		zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
-	} else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
-		   strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
-		   strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
-		struct zfcp_dbf_san_record_els *els = &r->u.els;
-		zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
-	}
-	return p - out_buf;
-}
-
-static struct debug_view zfcp_dbf_san_view = {
-	.name = "structured",
-	.header_proc = zfcp_dbf_view_header,
-	.format_proc = zfcp_dbf_san_view_format,
-};
-
-void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
-		    struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd,
-		    struct zfcp_fsf_req *fsf_req, unsigned long old_req_id)
-{
-	struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
-	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
-	unsigned long flags;
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) sc->device->host->hostdata[0];
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
 	struct fcp_resp_with_ext *fcp_rsp;
-	struct fcp_resp_rsp_info *fcp_rsp_info = NULL;
-	char *fcp_sns_info = NULL;
-	int offset = 0, buflen = 0;
+	struct fcp_resp_rsp_info *fcp_rsp_info;
+	unsigned long flags;
 
 	spin_lock_irqsave(&dbf->scsi_lock, flags);
-	do {
-		memset(rec, 0, sizeof(*rec));
-		if (offset == 0) {
-			strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
-			strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
-			if (scsi_cmnd != NULL) {
-				if (scsi_cmnd->device) {
-					rec->scsi_id = scsi_cmnd->device->id;
-					rec->scsi_lun = scsi_cmnd->device->lun;
-				}
-				rec->scsi_result = scsi_cmnd->result;
-				rec->scsi_cmnd = (unsigned long)scsi_cmnd;
-				memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
-					min((int)scsi_cmnd->cmd_len,
-						ZFCP_DBF_SCSI_OPCODE));
-				rec->scsi_retries = scsi_cmnd->retries;
-				rec->scsi_allowed = scsi_cmnd->allowed;
-			}
-			if (fsf_req != NULL) {
-				fcp_rsp = (struct fcp_resp_with_ext *)
-					&(fsf_req->qtcb->bottom.io.fcp_rsp);
-				fcp_rsp_info = (struct fcp_resp_rsp_info *)
-					&fcp_rsp[1];
-				fcp_sns_info = (char *) &fcp_rsp[1];
-				if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
-					fcp_sns_info += fcp_rsp->ext.fr_sns_len;
+	memset(rec, 0, sizeof(*rec));
 
-				rec->rsp_validity = fcp_rsp->resp.fr_flags;
-				rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
-				rec->rsp_resid = fcp_rsp->ext.fr_resid;
-				if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
-					rec->rsp_code = fcp_rsp_info->rsp_code;
-				if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
-					buflen = min(fcp_rsp->ext.fr_sns_len,
-					   (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
-					rec->sns_info_len = buflen;
-					memcpy(rec->sns_info, fcp_sns_info,
-					       min(buflen,
-						   ZFCP_DBF_SCSI_FCP_SNS_INFO));
-					offset += min(buflen,
-						      ZFCP_DBF_SCSI_FCP_SNS_INFO);
-				}
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->id = ZFCP_DBF_SCSI_CMND;
+	rec->scsi_result = sc->result;
+	rec->scsi_retries = sc->retries;
+	rec->scsi_allowed = sc->allowed;
+	rec->scsi_id = sc->device->id;
+	rec->scsi_lun = sc->device->lun;
+	rec->host_scribble = (unsigned long)sc->host_scribble;
 
-				rec->fsf_reqid = fsf_req->req_id;
-				rec->fsf_seqno = fsf_req->seq_no;
-				rec->fsf_issued = fsf_req->issued;
-			}
-			rec->old_fsf_reqid = old_req_id;
-		} else {
-			strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
-			dump->total_size = buflen;
-			dump->offset = offset;
-			dump->size = min(buflen - offset,
-					 (int)sizeof(struct
-						     zfcp_dbf_scsi_record) -
-					 (int)sizeof(struct zfcp_dbf_dump));
-			memcpy(dump->data, fcp_sns_info + offset, dump->size);
-			offset += dump->size;
+	memcpy(rec->scsi_opcode, sc->cmnd,
+	       min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
+
+	if (fsf) {
+		rec->fsf_req_id = fsf->req_id;
+		fcp_rsp = (struct fcp_resp_with_ext *)
+				&(fsf->qtcb->bottom.io.fcp_rsp);
+		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
 		}
-		debug_event(dbf->scsi, level, rec, sizeof(*rec));
-	} while (offset < buflen);
+		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+			rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
+					  (u16)ZFCP_DBF_PAY_MAX_REC);
+			zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
+					  "fcp_sns", fsf->req_id);
+		}
+	}
+
+	debug_event(dbf->scsi, 1, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
 }
 
-static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view,
-				     char *out_buf, const char *in_buf)
-{
-	struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf;
-	struct timespec t;
-	char *p = out_buf;
-
-	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
-		return 0;
-
-	zfcp_dbf_tag(&p, "tag", r->tag);
-	zfcp_dbf_tag(&p, "tag2", r->tag2);
-	zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id);
-	zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
-	zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
-	zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
-	zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
-		      0, ZFCP_DBF_SCSI_OPCODE);
-	zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);
-	zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed);
-	if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid);
-	zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
-	zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
-	stck_to_timespec(r->fsf_issued, &t);
-	zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
-
-	if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
-		zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity);
-		zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x",
-			     r->rsp_scsi_status);
-		zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid);
-		zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code);
-		zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len);
-		zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info,
-			      min((int)r->sns_info_len,
-			      ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
-			      r->sns_info_len);
-	}
-	p += sprintf(p, "\n");
-	return p - out_buf;
-}
-
-static struct debug_view zfcp_dbf_scsi_view = {
-	.name = "structured",
-	.header_proc = zfcp_dbf_view_header,
-	.format_proc = zfcp_dbf_scsi_view_format,
-};
-
-static debug_info_t *zfcp_dbf_reg(const char *name, int level,
-				  struct debug_view *view, int size)
+static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
 {
 	struct debug_info *d;
 
-	d = debug_register(name, dbfsize, level, size);
+	d = debug_register(name, size, 1, rec_size);
 	if (!d)
 		return NULL;
 
 	debug_register_view(d, &debug_hex_ascii_view);
-	debug_register_view(d, view);
-	debug_set_level(d, level);
+	debug_set_level(d, 3);
 
 	return d;
 }
 
+static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
+{
+	if (!dbf)
+		return;
+
+	debug_unregister(dbf->scsi);
+	debug_unregister(dbf->san);
+	debug_unregister(dbf->hba);
+	debug_unregister(dbf->pay);
+	debug_unregister(dbf->rec);
+	kfree(dbf);
+}
+
 /**
  * zfcp_adapter_debug_register - registers debug feature for an adapter
  * @adapter: pointer to adapter for which debug features should be registered
@@ -1003,69 +415,66 @@
  */
 int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
 {
-	char dbf_name[DEBUG_MAX_NAME_LEN];
+	char name[DEBUG_MAX_NAME_LEN];
 	struct zfcp_dbf *dbf;
 
 	dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
 	if (!dbf)
 		return -ENOMEM;
 
-	dbf->adapter = adapter;
-
+	spin_lock_init(&dbf->pay_lock);
 	spin_lock_init(&dbf->hba_lock);
 	spin_lock_init(&dbf->san_lock);
 	spin_lock_init(&dbf->scsi_lock);
 	spin_lock_init(&dbf->rec_lock);
 
 	/* debug feature area which records recovery activity */
-	sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
-	dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view,
-				sizeof(struct zfcp_dbf_rec_record));
+	sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
+	dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
 	if (!dbf->rec)
 		goto err_out;
 
 	/* debug feature area which records HBA (FSF and QDIO) conditions */
-	sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
-	dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view,
-				sizeof(struct zfcp_dbf_hba_record));
+	sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
+	dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
 	if (!dbf->hba)
 		goto err_out;
 
+	/* debug feature area which records payload info */
+	sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
+	dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
+	if (!dbf->pay)
+		goto err_out;
+
 	/* debug feature area which records SAN command failures and recovery */
-	sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
-	dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view,
-				sizeof(struct zfcp_dbf_san_record));
+	sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
+	dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
 	if (!dbf->san)
 		goto err_out;
 
 	/* debug feature area which records SCSI command failures and recovery */
-	sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
-	dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view,
-				 sizeof(struct zfcp_dbf_scsi_record));
+	sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
+	dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
 	if (!dbf->scsi)
 		goto err_out;
 
 	adapter->dbf = dbf;
-	return 0;
 
+	return 0;
 err_out:
-	zfcp_dbf_adapter_unregister(dbf);
+	zfcp_dbf_unregister(dbf);
 	return -ENOMEM;
 }
 
 /**
  * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
- * @dbf: pointer to dbf for which debug features should be unregistered
+ * @adapter: pointer to adapter for which debug features should be unregistered
  */
-void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
+void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
 {
-	if (!dbf)
-		return;
-	debug_unregister(dbf->scsi);
-	debug_unregister(dbf->san);
-	debug_unregister(dbf->hba);
-	debug_unregister(dbf->rec);
-	dbf->adapter->dbf = NULL;
-	kfree(dbf);
+	struct zfcp_dbf *dbf = adapter->dbf;
+
+	adapter->dbf = NULL;
+	zfcp_dbf_unregister(dbf);
 }
 
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 04081b1..714f087 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -1,22 +1,8 @@
 /*
- * This file is part of the zfcp device driver for
- * FCP adapters for IBM System z9 and zSeries.
+ * zfcp device driver
+ * debug feature declarations
  *
- * Copyright IBM Corp. 2008, 2009
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright IBM Corp. 2008, 2010
  */
 
 #ifndef ZFCP_DBF_H
@@ -27,322 +13,350 @@
 #include "zfcp_fsf.h"
 #include "zfcp_def.h"
 
-#define ZFCP_DBF_TAG_SIZE      4
-#define ZFCP_DBF_ID_SIZE       7
+#define ZFCP_DBF_TAG_LEN       7
 
 #define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
 
-struct zfcp_dbf_dump {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u32 total_size;		/* size of total dump data */
-	u32 offset;		/* how much data has being already dumped */
-	u32 size;		/* how much data comes with this record */
-	u8 data[];		/* dump data */
-} __attribute__ ((packed));
-
-struct zfcp_dbf_rec_record_thread {
-	u32 total;
+/**
+ * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
+ * @ready: number of ready recovery actions
+ * @running: number of running recovery actions
+ * @want: wanted recovery action
+ * @need: needed recovery action
+ */
+struct zfcp_dbf_rec_trigger {
 	u32 ready;
 	u32 running;
-};
-
-struct zfcp_dbf_rec_record_target {
-	u64 ref;
-	u32 status;
-	u32 d_id;
-	u64 wwpn;
-	u64 fcp_lun;
-	u32 erp_count;
-};
-
-struct zfcp_dbf_rec_record_trigger {
 	u8 want;
 	u8 need;
-	u32 as;
-	u32 ps;
-	u32 ls;
-	u64 ref;
-	u64 action;
-	u64 wwpn;
-	u64 fcp_lun;
+} __packed;
+
+/**
+ * struct zfcp_dbf_rec_running - trace record for running recovery
+ * @fsf_req_id: request id for fsf requests
+ * @rec_status: status of the fsf request
+ * @rec_step: current step of the recovery action
+ * rec_count: recovery counter
+ */
+struct zfcp_dbf_rec_running {
+	u64 fsf_req_id;
+	u32 rec_status;
+	u16 rec_step;
+	u8 rec_action;
+	u8 rec_count;
+} __packed;
+
+/**
+ * enum zfcp_dbf_rec_id - recovery trace record id
+ * @ZFCP_DBF_REC_TRIG: triggered recovery identifier
+ * @ZFCP_DBF_REC_RUN: running recovery identifier
+ */
+enum zfcp_dbf_rec_id {
+	ZFCP_DBF_REC_TRIG	= 1,
+	ZFCP_DBF_REC_RUN	= 2,
 };
 
-struct zfcp_dbf_rec_record_action {
-	u32 status;
-	u32 step;
-	u64 action;
-	u64 fsf_req;
-};
-
-struct zfcp_dbf_rec_record {
+/**
+ * struct zfcp_dbf_rec - trace record for error recovery actions
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @lun: logical unit number
+ * @wwpn: word wide port number
+ * @d_id: destination ID
+ * @adapter_status: current status of the adapter
+ * @port_status: current status of the port
+ * @lun_status: current status of the lun
+ * @u.trig: structure zfcp_dbf_rec_trigger
+ * @u.run: structure zfcp_dbf_rec_running
+ */
+struct zfcp_dbf_rec {
 	u8 id;
-	char id2[7];
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 lun;
+	u64 wwpn;
+	u32 d_id;
+	u32 adapter_status;
+	u32 port_status;
+	u32 lun_status;
 	union {
-		struct zfcp_dbf_rec_record_action action;
-		struct zfcp_dbf_rec_record_thread thread;
-		struct zfcp_dbf_rec_record_target target;
-		struct zfcp_dbf_rec_record_trigger trigger;
+		struct zfcp_dbf_rec_trigger trig;
+		struct zfcp_dbf_rec_running run;
 	} u;
+} __packed;
+
+/**
+ * enum zfcp_dbf_san_id - SAN trace record identifier
+ * @ZFCP_DBF_SAN_REQ: request trace record id
+ * @ZFCP_DBF_SAN_RES: response trace record id
+ * @ZFCP_DBF_SAN_ELS: extended link service record id
+ */
+enum zfcp_dbf_san_id {
+	ZFCP_DBF_SAN_REQ	= 1,
+	ZFCP_DBF_SAN_RES	= 2,
+	ZFCP_DBF_SAN_ELS	= 3,
 };
 
-enum {
-	ZFCP_REC_DBF_ID_ACTION,
-	ZFCP_REC_DBF_ID_THREAD,
-	ZFCP_REC_DBF_ID_TARGET,
-	ZFCP_REC_DBF_ID_TRIGGER,
-};
+/** struct zfcp_dbf_san - trace record for SAN requests and responses
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @payload: unformatted information related to request/response
+ * @d_id: destination id
+ */
+struct zfcp_dbf_san {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 d_id;
+#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
+	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+} __packed;
 
-struct zfcp_dbf_hba_record_response {
-	u32 fsf_command;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	u32 fsf_prot_status;
+/**
+ * struct zfcp_dbf_hba_res - trace record for hba responses
+ * @req_issued: timestamp when request was issued
+ * @prot_status: protocol status
+ * @prot_status_qual: protocol status qualifier
+ * @fsf_status: fsf status
+ * @fsf_status_qual: fsf status qualifier
+ */
+struct zfcp_dbf_hba_res {
+	u64 req_issued;
+	u32 prot_status;
+	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
 	u32 fsf_status;
-	u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
-	u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
-	u32 fsf_req_status;
-	u8 sbal_first;
-	u8 sbal_last;
-	u8 sbal_response;
-	u8 pool;
-	u64 erp_action;
-	union {
-		struct {
-			u64 cmnd;
-			u32 data_dir;
-		} fcp;
-		struct {
-			u64 wwpn;
-			u32 d_id;
-			u32 port_handle;
-		} port;
-		struct {
-			u64 wwpn;
-			u64 fcp_lun;
-			u32 port_handle;
-			u32 lun_handle;
-		} unit;
-		struct {
-			u32 d_id;
-		} els;
-	} u;
-} __attribute__ ((packed));
+	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+} __packed;
 
-struct zfcp_dbf_hba_record_status {
-	u8 failed;
+/**
+ * struct zfcp_dbf_hba_uss - trace record for unsolicited status
+ * @status_type: type of unsolicited status
+ * @status_subtype: subtype of unsolicited status
+ * @d_id: destination ID
+ * @lun: logical unit number
+ * @queue_designator: queue designator
+ */
+struct zfcp_dbf_hba_uss {
 	u32 status_type;
 	u32 status_subtype;
-	struct fsf_queue_designator
-	 queue_designator;
-	u32 payload_size;
-#define ZFCP_DBF_UNSOL_PAYLOAD				80
-#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL		32
-#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD	56
-#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT	2 * sizeof(u32)
-	u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
-} __attribute__ ((packed));
-
-struct zfcp_dbf_hba_record_qdio {
-	u32 qdio_error;
-	u8 sbal_index;
-	u8 sbal_count;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_hba_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
-	union {
-		struct zfcp_dbf_hba_record_response response;
-		struct zfcp_dbf_hba_record_status status;
-		struct zfcp_dbf_hba_record_qdio qdio;
-		struct fsf_bit_error_payload berr;
-	} u;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record_ct_request {
-	u16 cmd_req_code;
-	u8 revision;
-	u8 gs_type;
-	u8 gs_subtype;
-	u8 options;
-	u16 max_res_size;
-	u32 len;
 	u32 d_id;
-} __attribute__ ((packed));
+	u64 lun;
+	u64 queue_designator;
+} __packed;
 
-struct zfcp_dbf_san_record_ct_response {
-	u16 cmd_rsp_code;
-	u8 revision;
-	u8 reason_code;
-	u8 expl;
-	u8 vendor_unique;
-	u16 max_res_size;
-	u32 len;
-} __attribute__ ((packed));
+/**
+ * enum zfcp_dbf_hba_id - HBA trace record identifier
+ * @ZFCP_DBF_HBA_RES: response trace record
+ * @ZFCP_DBF_HBA_USS: unsolicited status trace record
+ * @ZFCP_DBF_HBA_BIT: bit error trace record
+ */
+enum zfcp_dbf_hba_id {
+	ZFCP_DBF_HBA_RES	= 1,
+	ZFCP_DBF_HBA_USS	= 2,
+	ZFCP_DBF_HBA_BIT	= 3,
+};
 
-struct zfcp_dbf_san_record_els {
-	u32 d_id;
-} __attribute__ ((packed));
-
-struct zfcp_dbf_san_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u64 fsf_reqid;
-	u32 fsf_seqno;
+/**
+ * struct zfcp_dbf_hba - common trace record for HBA records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @fsf_req_id: request id for fsf requests
+ * @fsf_req_status: status of fsf request
+ * @fsf_cmd: fsf command
+ * @fsf_seq_no: fsf sequence number
+ * @pl_len: length of payload stored as zfcp_dbf_pay
+ * @u: record type specific data
+ */
+struct zfcp_dbf_hba {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+	u32 fsf_req_status;
+	u32 fsf_cmd;
+	u32 fsf_seq_no;
+	u16 pl_len;
 	union {
-		struct zfcp_dbf_san_record_ct_request ct_req;
-		struct zfcp_dbf_san_record_ct_response ct_resp;
-		struct zfcp_dbf_san_record_els els;
+		struct zfcp_dbf_hba_res res;
+		struct zfcp_dbf_hba_uss uss;
+		struct fsf_bit_error_payload be;
 	} u;
-} __attribute__ ((packed));
+} __packed;
 
-#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
+/**
+ * enum zfcp_dbf_scsi_id - scsi trace record identifier
+ * @ZFCP_DBF_SCSI_CMND: scsi command trace record
+ */
+enum zfcp_dbf_scsi_id {
+	ZFCP_DBF_SCSI_CMND	= 1,
+};
 
-struct zfcp_dbf_scsi_record {
-	u8 tag[ZFCP_DBF_TAG_SIZE];
-	u8 tag2[ZFCP_DBF_TAG_SIZE];
+/**
+ * struct zfcp_dbf_scsi - common trace record for SCSI records
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @scsi_id: scsi device id
+ * @scsi_lun: scsi device logical unit number
+ * @scsi_result: scsi result
+ * @scsi_retries: current retry number of scsi request
+ * @scsi_allowed: allowed retries
+ * @fcp_rsp_info: FCP response info
+ * @scsi_opcode: scsi opcode
+ * @fsf_req_id: request id of fsf request
+ * @host_scribble: LLD specific data attached to SCSI request
+ * @pl_len: length of paload stored as zfcp_dbf_pay
+ * @fsf_rsp: response for fsf request
+ */
+struct zfcp_dbf_scsi {
+	u8 id;
+	char tag[ZFCP_DBF_TAG_LEN];
 	u32 scsi_id;
 	u32 scsi_lun;
 	u32 scsi_result;
-	u64 scsi_cmnd;
-#define ZFCP_DBF_SCSI_OPCODE	16
-	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
 	u8 scsi_retries;
 	u8 scsi_allowed;
-	u64 fsf_reqid;
-	u32 fsf_seqno;
-	u64 fsf_issued;
-	u64 old_fsf_reqid;
-	u8 rsp_validity;
-	u8 rsp_scsi_status;
-	u32 rsp_resid;
-	u8 rsp_code;
-#define ZFCP_DBF_SCSI_FCP_SNS_INFO	16
-#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO	256
-	u32 sns_info_len;
-	u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
-} __attribute__ ((packed));
+	u8 fcp_rsp_info;
+#define ZFCP_DBF_SCSI_OPCODE	16
+	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
+	u64 fsf_req_id;
+	u64 host_scribble;
+	u16 pl_len;
+	struct fcp_resp_with_ext fcp_rsp;
+} __packed;
 
+/**
+ * struct zfcp_dbf_pay - trace record for unformatted payload information
+ * @area: area this record is originated from
+ * @counter: ascending record number
+ * @fsf_req_id: request id of fsf request
+ * @data: unformatted data
+ */
+struct zfcp_dbf_pay {
+	u8 counter;
+	char area[ZFCP_DBF_TAG_LEN];
+	u64 fsf_req_id;
+#define ZFCP_DBF_PAY_MAX_REC 0x100
+	char data[ZFCP_DBF_PAY_MAX_REC];
+} __packed;
+
+/**
+ * struct zfcp_dbf - main dbf trace structure
+ * @pay: reference to payload trace area
+ * @rec: reference to recovery trace area
+ * @hba: reference to hba trace area
+ * @san: reference to san trace area
+ * @scsi: reference to scsi trace area
+ * @pay_lock: lock protecting payload trace buffer
+ * @rec_lock: lock protecting recovery trace buffer
+ * @hba_lock: lock protecting hba trace buffer
+ * @san_lock: lock protecting san trace buffer
+ * @scsi_lock: lock protecting scsi trace buffer
+ * @pay_buf: pre-allocated buffer for payload
+ * @rec_buf: pre-allocated buffer for recovery
+ * @hba_buf: pre-allocated buffer for hba
+ * @san_buf: pre-allocated buffer for san
+ * @scsi_buf: pre-allocated buffer for scsi
+ */
 struct zfcp_dbf {
+	debug_info_t			*pay;
 	debug_info_t			*rec;
 	debug_info_t			*hba;
 	debug_info_t			*san;
 	debug_info_t			*scsi;
+	spinlock_t			pay_lock;
 	spinlock_t			rec_lock;
 	spinlock_t			hba_lock;
 	spinlock_t			san_lock;
 	spinlock_t			scsi_lock;
-	struct zfcp_dbf_rec_record	rec_buf;
-	struct zfcp_dbf_hba_record	hba_buf;
-	struct zfcp_dbf_san_record	san_buf;
-	struct zfcp_dbf_scsi_record	scsi_buf;
-	struct zfcp_adapter		*adapter;
+	struct zfcp_dbf_pay		pay_buf;
+	struct zfcp_dbf_rec		rec_buf;
+	struct zfcp_dbf_hba		hba_buf;
+	struct zfcp_dbf_san		san_buf;
+	struct zfcp_dbf_scsi		scsi_buf;
 };
 
 static inline
-void zfcp_dbf_hba_fsf_resp(const char *tag2, int level,
-			   struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
+void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
-	if (level <= dbf->hba->level)
-		_zfcp_dbf_hba_fsf_response(tag2, level, req, dbf);
+	if (level <= req->adapter->dbf->hba->level)
+		zfcp_dbf_hba_fsf_res(tag, req);
 }
 
 /**
  * zfcp_dbf_hba_fsf_response - trace event for request completion
- * @fsf_req: request that has been completed
+ * @req: request that has been completed
  */
-static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+static inline
+void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
 {
-	struct zfcp_dbf *dbf = req->adapter->dbf;
 	struct fsf_qtcb *qtcb = req->qtcb;
 
 	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
 	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
-		zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
 
 	} else if (qtcb->header.fsf_status != FSF_GOOD) {
-		zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
 
 	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
 		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
-		zfcp_dbf_hba_fsf_resp("open", 4, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_open", 4, req);
 
 	} else if (qtcb->header.log_length) {
-		zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req);
 
 	} else {
-		zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf);
+		zfcp_dbf_hba_fsf_resp("fs_norm", 6, req);
 	}
- }
-
-/**
- * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
- * @tag: tag indicating which kind of unsolicited status has been received
- * @dbf: reference to dbf structure
- * @status_buffer: buffer containing payload of unsolicited status
- */
-static inline
-void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
-			    struct fsf_status_read_buffer *buf)
-{
-	int level = 2;
-
-	if (level <= dbf->hba->level)
-		_zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
 }
 
 static inline
-void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
-		   struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
-		   struct zfcp_fsf_req *req, unsigned long old_id)
+void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
+		   struct zfcp_fsf_req *req)
 {
-	if (level <= dbf->scsi->level)
-		_zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id);
+	struct zfcp_adapter *adapter = (struct zfcp_adapter *)
+					scmd->device->host->hostdata[0];
+
+	if (level <= adapter->dbf->scsi->level)
+		zfcp_dbf_scsi(tag, scmd, req);
 }
 
 /**
  * zfcp_dbf_scsi_result - trace event for SCSI command completion
- * @dbf: adapter dbf trace
  * @scmd: SCSI command pointer
  * @req: FSF request used to issue SCSI command
  */
 static inline
-void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
-			  struct zfcp_fsf_req *req)
+void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req)
 {
 	if (scmd->result != 0)
-		zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_err", 3, scmd, req);
 	else if (scmd->retries > 0)
-		zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_ret", 4, scmd, req);
 	else
-		zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
+		_zfcp_dbf_scsi("rsl_nor", 6, scmd, req);
 }
 
 /**
  * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
- * @dbf: adapter dbf trace
  * @scmd: SCSI command pointer
  */
 static inline
-void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
+void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd)
 {
-	zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
+	_zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL);
 }
 
 /**
  * zfcp_dbf_scsi_abort - trace event for SCSI command abort
  * @tag: tag indicating success or failure of abort operation
- * @adapter: adapter thas has been used to issue SCSI command to be aborted
  * @scmd: SCSI command to be aborted
- * @new_req: request containing abort (might be NULL)
- * @old_id: identifier of request containg SCSI command to be aborted
+ * @fsf_req: request containing abort (might be NULL)
  */
 static inline
-void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
-			 struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req,
-			 unsigned long old_id)
+void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+			 struct zfcp_fsf_req *fsf_req)
 {
-	zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id);
+	_zfcp_dbf_scsi(tag, 1, scmd, fsf_req);
 }
 
 /**
@@ -352,12 +366,17 @@
  * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
  */
 static inline
-void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
 {
-	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
+	char tmp_tag[ZFCP_DBF_TAG_LEN];
 
-	zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
-		      zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0);
+	if (flag == FCP_TMF_TGT_RESET)
+		memcpy(tmp_tag, "tr_", 3);
+	else
+		memcpy(tmp_tag, "lr_", 3);
+
+	memcpy(&tmp_tag[3], tag, 4);
+	_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
 }
 
 #endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 0bcd580..e003e30 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -76,9 +76,9 @@
 	struct zfcp_adapter *adapter = act->adapter;
 
 	list_move(&act->list, &act->adapter->erp_ready_head);
-	zfcp_dbf_rec_action("erardy1", act);
+	zfcp_dbf_rec_run("erardy1", act);
 	wake_up(&adapter->erp_ready_wq);
-	zfcp_dbf_rec_thread("erardy2", adapter->dbf);
+	zfcp_dbf_rec_run("erardy2", act);
 }
 
 static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -236,10 +236,10 @@
 static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 				   struct zfcp_port *port,
 				   struct scsi_device *sdev,
-				   char *id, void *ref, u32 act_status)
+				   char *id, u32 act_status)
 {
 	int retval = 1, need;
-	struct zfcp_erp_action *act = NULL;
+	struct zfcp_erp_action *act;
 
 	if (!adapter->erp_thread)
 		return -EIO;
@@ -255,15 +255,14 @@
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
-	zfcp_dbf_rec_thread("eracte1", adapter->dbf);
 	retval = 0;
  out:
-	zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev);
+	zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
 	return retval;
 }
 
 static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
-				    int clear_mask, char *id, void *ref)
+				    int clear_mask, char *id)
 {
 	zfcp_erp_adapter_block(adapter, clear_mask);
 	zfcp_scsi_schedule_rports_block(adapter);
@@ -275,7 +274,7 @@
 		return -EIO;
 	}
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
-				       adapter, NULL, NULL, id, ref, 0);
+				       adapter, NULL, NULL, id, 0);
 }
 
 /**
@@ -283,10 +282,8 @@
  * @adapter: Adapter to reopen.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
-			     char *id, void *ref)
+void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
 {
 	unsigned long flags;
 
@@ -299,7 +296,7 @@
 					    ZFCP_STATUS_COMMON_ERP_FAILED);
 	else
 		zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
-					NULL, NULL, id, ref, 0);
+					NULL, NULL, id, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -308,13 +305,12 @@
  * @adapter: Adapter to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
 void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear,
-			       char *id, void *ref)
+			       char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref);
+	zfcp_erp_adapter_reopen(adapter, clear | flags, id);
 }
 
 /**
@@ -322,13 +318,11 @@
  * @port: Port to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
-			    void *ref)
+void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_port_reopen(port, clear | flags, id, ref);
+	zfcp_erp_port_reopen(port, clear | flags, id);
 }
 
 static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
@@ -337,8 +331,8 @@
 				    ZFCP_STATUS_COMMON_UNBLOCKED | clear);
 }
 
-static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
-					 int clear, char *id, void *ref)
+static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+					 char *id)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
@@ -347,28 +341,26 @@
 		return;
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
-				port->adapter, port, NULL, id, ref, 0);
+				port->adapter, port, NULL, id, 0);
 }
 
 /**
  * zfcp_erp_port_forced_reopen - Forced close of port and open again
  * @port: Port to force close and to reopen.
+ * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
-				 void *ref)
+void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_port_forced_reopen(port, clear, id, ref);
+	_zfcp_erp_port_forced_reopen(port, clear, id);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
-				 void *ref)
+static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	zfcp_erp_port_block(port, clear);
 	zfcp_scsi_schedule_rport_block(port);
@@ -380,24 +372,25 @@
 	}
 
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
-				       port->adapter, port, NULL, id, ref, 0);
+				       port->adapter, port, NULL, id, 0);
 }
 
 /**
  * zfcp_erp_port_reopen - trigger remote port recovery
  * @port: port to recover
  * @clear_mask: flags in port status to be cleared
+ * @id: Id for debug trace event.
  *
  * Returns 0 if recovery has been triggered, < 0 if not.
  */
-int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
+int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
 {
 	int retval;
 	unsigned long flags;
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	retval = _zfcp_erp_port_reopen(port, clear, id, ref);
+	retval = _zfcp_erp_port_reopen(port, clear, id);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	return retval;
@@ -410,7 +403,7 @@
 }
 
 static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
-				 void *ref, u32 act_status)
+				 u32 act_status)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -421,17 +414,18 @@
 		return;
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
-				zfcp_sdev->port, sdev, id, ref, act_status);
+				zfcp_sdev->port, sdev, id, act_status);
 }
 
 /**
  * zfcp_erp_lun_reopen - initiate reopen of a LUN
  * @sdev: SCSI device / LUN to be reopened
  * @clear_mask: specifies flags in LUN status to be cleared
+ * @id: Id for debug trace event.
+ *
  * Return: 0 on success, < 0 on error
  */
-void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
-			 void *ref)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id)
 {
 	unsigned long flags;
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -439,7 +433,7 @@
 	struct zfcp_adapter *adapter = port->adapter;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
+	_zfcp_erp_lun_reopen(sdev, clear, id, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
@@ -448,13 +442,11 @@
  * @sdev: SCSI device / LUN to shut down.
  * @clear: Status flags to clear.
  * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
  */
-void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id,
-			   void *ref)
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id)
 {
 	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_lun_reopen(sdev, clear | flags, id, ref);
+	zfcp_erp_lun_reopen(sdev, clear | flags, id);
 }
 
 /**
@@ -476,7 +468,7 @@
 	int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF);
+	_zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 
 	zfcp_erp_wait(adapter);
@@ -490,14 +482,14 @@
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
-		zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf);
+		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
 static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
-		zfcp_dbf_rec_port("erpubl1", NULL, port);
+		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
@@ -506,14 +498,14 @@
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
-		zfcp_dbf_rec_lun("erlubl1", NULL, sdev);
+		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
 {
 	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
-	zfcp_dbf_rec_action("erator1", erp_action);
+	zfcp_dbf_rec_run("erator1", erp_action);
 }
 
 static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -530,11 +522,11 @@
 		if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
 				   ZFCP_STATUS_ERP_TIMEDOUT)) {
 			req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
-			zfcp_dbf_rec_action("erscf_1", act);
+			zfcp_dbf_rec_run("erscf_1", act);
 			req->erp_action = NULL;
 		}
 		if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
-			zfcp_dbf_rec_action("erscf_2", act);
+			zfcp_dbf_rec_run("erscf_2", act);
 		if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
 			act->fsf_req_id = 0;
 	} else
@@ -585,40 +577,40 @@
 }
 
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
-				      int clear, char *id, void *ref)
+				      int clear, char *id)
 {
 	struct zfcp_port *port;
 
 	read_lock(&adapter->port_list_lock);
 	list_for_each_entry(port, &adapter->port_list, list)
-		_zfcp_erp_port_reopen(port, clear, id, ref);
+		_zfcp_erp_port_reopen(port, clear, id);
 	read_unlock(&adapter->port_list_lock);
 }
 
 static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
-				     char *id, void *ref)
+				     char *id)
 {
 	struct scsi_device *sdev;
 
 	shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
-			_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
+			_zfcp_erp_lun_reopen(sdev, clear, id, 0);
 }
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
 {
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL);
+		_zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL);
+		_zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		_zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
+		_zfcp_erp_port_reopen(act->port, 0, "ersff_3");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
-		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0);
+		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0);
 		break;
 	}
 }
@@ -627,13 +619,13 @@
 {
 	switch (act->action) {
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL);
+		_zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
-		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
+		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2");
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL);
+		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3");
 		break;
 	}
 }
@@ -652,17 +644,6 @@
 	read_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
-{
-	struct zfcp_qdio *qdio = act->adapter->qdio;
-
-	if (zfcp_qdio_open(qdio))
-		return ZFCP_ERP_FAILED;
-	init_waitqueue_head(&qdio->req_q_wq);
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
-	return ZFCP_ERP_SUCCEEDED;
-}
-
 static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
 {
 	struct zfcp_port *port;
@@ -670,7 +651,7 @@
 				 adapter->peer_d_id);
 	if (IS_ERR(port)) /* error or port already attached */
 		return;
-	_zfcp_erp_port_reopen(port, 0, "ereptp1", NULL);
+	_zfcp_erp_port_reopen(port, 0, "ereptp1");
 }
 
 static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -693,10 +674,8 @@
 			return ZFCP_ERP_FAILED;
 		}
 
-		zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
 		wait_event(adapter->erp_ready_wq,
 			   !list_empty(&adapter->erp_ready_head));
-		zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
 		if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
 			break;
 
@@ -735,10 +714,10 @@
 	if (ret)
 		return ZFCP_ERP_FAILED;
 
-	zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf);
+	zfcp_dbf_rec_run("erasox1", act);
 	wait_event(adapter->erp_ready_wq,
 		   !list_empty(&adapter->erp_ready_head));
-	zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf);
+	zfcp_dbf_rec_run("erasox2", act);
 	if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
 		return ZFCP_ERP_FAILED;
 
@@ -788,7 +767,7 @@
 {
 	struct zfcp_adapter *adapter = act->adapter;
 
-	if (zfcp_erp_adapter_strategy_open_qdio(act)) {
+	if (zfcp_qdio_open(adapter->qdio)) {
 		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
 				  &adapter->status);
@@ -1166,7 +1145,7 @@
 		if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) {
 			_zfcp_erp_adapter_reopen(adapter,
 						 ZFCP_STATUS_COMMON_ERP_FAILED,
-						 "ersscg1", NULL);
+						 "ersscg1");
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1176,7 +1155,7 @@
 		if (zfcp_erp_strat_change_det(&port->status, erp_status)) {
 			_zfcp_erp_port_reopen(port,
 					      ZFCP_STATUS_COMMON_ERP_FAILED,
-					      "ersscg2", NULL);
+					      "ersscg2");
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1186,7 +1165,7 @@
 		if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
 			_zfcp_erp_lun_reopen(sdev,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "ersscg3", NULL, 0);
+					     "ersscg3", 0);
 			return ZFCP_ERP_EXIT;
 		}
 		break;
@@ -1206,7 +1185,7 @@
 	}
 
 	list_del(&erp_action->list);
-	zfcp_dbf_rec_action("eractd1", erp_action);
+	zfcp_dbf_rec_run("eractd1", erp_action);
 
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
@@ -1313,7 +1292,7 @@
 			erp_action->status |= ZFCP_STATUS_ERP_LOWMEM;
 		}
 		if (adapter->erp_total_count == adapter->erp_low_mem_count)
-			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL);
+			_zfcp_erp_adapter_reopen(adapter, 0, "erstgy1");
 		else {
 			zfcp_erp_strategy_memwait(erp_action);
 			retval = ZFCP_ERP_CONTINUES;
@@ -1357,11 +1336,9 @@
 	unsigned long flags;
 
 	for (;;) {
-		zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
 		wait_event_interruptible(adapter->erp_ready_wq,
 			   !list_empty(&adapter->erp_ready_head) ||
 			   kthread_should_stop());
-		zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
 
 		if (kthread_should_stop())
 			break;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bf8f3e5..6e325284 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -45,47 +45,33 @@
 
 /* zfcp_dbf.c */
 extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
-extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *);
-extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
-extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
-extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *);
-extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
-				 struct zfcp_adapter *, struct zfcp_port *,
-				 struct scsi_device *);
-extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
-extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
-				       struct zfcp_dbf *);
-extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
-					  struct fsf_status_read_buffer *);
-extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
+extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+			      struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
-extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
-extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
-extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
-			   struct scsi_cmnd *, struct zfcp_fsf_req *,
-			   unsigned long);
+extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
 
 /* zfcp_erp.c */
 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
 extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
-extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
-extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
-				      void *);
+extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
+extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
 extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
 extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
-extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
-extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
-					void *);
+extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
-extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *);
-extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *);
+extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
+extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *);
 extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
 extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
 extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
@@ -149,6 +135,8 @@
 extern int zfcp_qdio_open(struct zfcp_qdio *);
 extern void zfcp_qdio_close(struct zfcp_qdio *);
 extern void zfcp_qdio_siosl(struct zfcp_adapter *);
+extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
+					     struct qdio_buffer *);
 
 /* zfcp_scsi.c */
 extern struct zfcp_data zfcp_data;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 86fd905..30cf91a 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -174,7 +174,7 @@
 		if (!port->d_id)
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
-					     "fcrscn1", NULL);
+					     "fcrscn1");
 	}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -215,7 +215,7 @@
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->wwpn == wwpn) {
-			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
+			zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
 			break;
 		}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -251,7 +251,7 @@
 		(struct fsf_status_read_buffer *) fsf_req->data;
 	unsigned int els_type = status_buffer->payload.data[0];
 
-	zfcp_dbf_san_incoming_els(fsf_req);
+	zfcp_dbf_san_in_els("fciels1", fsf_req);
 	if (els_type == ELS_PLOGI)
 		zfcp_fc_incoming_plogi(fsf_req);
 	else if (els_type == ELS_LOGO)
@@ -360,7 +360,7 @@
 	ret = zfcp_fc_ns_gid_pn(port);
 	if (ret) {
 		/* could not issue gid_pn for some reason */
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
 		goto out;
 	}
 
@@ -369,7 +369,7 @@
 		goto out;
 	}
 
-	zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
+	zfcp_erp_port_reopen(port, 0, "fcgpn_3");
 out:
 	put_device(&port->dev);
 }
@@ -426,7 +426,7 @@
 	if (adisc->els.status) {
 		/* request rejected or timed out */
 		zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-					    "fcadh_1", NULL);
+					    "fcadh_1");
 		goto out;
 	}
 
@@ -436,7 +436,7 @@
 	if ((port->wwpn != adisc_resp->adisc_wwpn) ||
 	    !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-				     "fcadh_2", NULL);
+				     "fcadh_2");
 		goto out;
 	}
 
@@ -507,7 +507,7 @@
 
 	/* send of ADISC was not possible */
 	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
-	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
+	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
 
 out:
 	put_device(&port->dev);
@@ -659,7 +659,7 @@
 		port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
 					 ZFCP_STATUS_COMMON_NOESC, d_id);
 		if (!IS_ERR(port))
-			zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
+			zfcp_erp_port_reopen(port, 0, "fcegpf1");
 		else if (PTR_ERR(port) != -EEXIST)
 			ret = PTR_ERR(port);
 	}
@@ -671,7 +671,7 @@
 	write_unlock_irqrestore(&adapter->port_list_lock, flags);
 
 	list_for_each_entry_safe(port, tmp, &remove_lh, list) {
-		zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
+		zfcp_erp_port_shutdown(port, 0, "fcegpf2");
 		zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
 	}
 
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 2eb7dd5..60ff9d1 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -23,7 +23,7 @@
 	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
 	zfcp_qdio_siosl(adapter);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"fsrth_1", NULL);
+				"fsrth_1");
 }
 
 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
@@ -65,7 +65,7 @@
 {
 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
 		"operational because of an unsupported FC class\n");
-	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
+	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
 
@@ -98,7 +98,7 @@
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
 		if (port->d_id == d_id) {
-			zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
+			zfcp_erp_port_reopen(port, 0, "fssrpc1");
 			break;
 		}
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
@@ -211,13 +211,13 @@
 	struct fsf_status_read_buffer *sr_buf = req->data;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
-		zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
+		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
 		mempool_free(sr_buf, adapter->pool.status_read_data);
 		zfcp_fsf_req_free(req);
 		return;
 	}
 
-	zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
+	zfcp_dbf_hba_fsf_uss("fssrh_2", req);
 
 	switch (sr_buf->status_type) {
 	case FSF_STATUS_READ_PORT_CLOSED:
@@ -232,7 +232,7 @@
 		dev_warn(&adapter->ccw_device->dev,
 			 "The error threshold for checksum statistics "
 			 "has been exceeded\n");
-		zfcp_dbf_hba_berr(adapter->dbf, req);
+		zfcp_dbf_hba_bit_err("fssrh_3", req);
 		break;
 	case FSF_STATUS_READ_LINK_DOWN:
 		zfcp_fsf_status_read_link_down(req);
@@ -247,7 +247,7 @@
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
 					ZFCP_STATUS_COMMON_ERP_FAILED,
-					"fssrh_2", req);
+					"fssrh_2");
 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
 
 		break;
@@ -287,7 +287,7 @@
 			"The FCP adapter reported a problem "
 			"that cannot be recovered\n");
 		zfcp_qdio_siosl(req->adapter);
-		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
 		break;
 	}
 	/* all non-return stats set FSFREQ_ERROR*/
@@ -304,7 +304,7 @@
 		dev_err(&req->adapter->ccw_device->dev,
 			"The FCP adapter does not recognize the command 0x%x\n",
 			req->qtcb->header.fsf_command);
-		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
+		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -335,17 +335,17 @@
 			"QTCB version 0x%x not supported by FCP adapter "
 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
 			psq->word[0], psq->word[1]);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
 		break;
 	case FSF_PROT_ERROR_STATE:
 	case FSF_PROT_SEQ_NUMB_ERROR:
-		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PROT_UNSUPP_QTCB_TYPE:
 		dev_err(&adapter->ccw_device->dev,
 			"The QTCB type is not supported by the FCP adapter\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
 		break;
 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
@@ -355,12 +355,12 @@
 		dev_err(&adapter->ccw_device->dev,
 			"0x%Lx is an ambiguous request identifier\n",
 			(unsigned long long)qtcb->bottom.support.req_handle);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
 		break;
 	case FSF_PROT_LINK_DOWN:
 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
 		/* go through reopen to flush pending requests */
-		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
 		break;
 	case FSF_PROT_REEST_QUEUE:
 		/* All ports should be marked as ready to run again */
@@ -369,14 +369,14 @@
 		zfcp_erp_adapter_reopen(adapter,
 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
 					ZFCP_STATUS_COMMON_ERP_FAILED,
-					"fspse_8", req);
+					"fspse_8");
 		break;
 	default:
 		dev_err(&adapter->ccw_device->dev,
 			"0x%x is not a valid transfer protocol status\n",
 			qtcb->prefix.prot_status);
 		zfcp_qdio_siosl(adapter);
-		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
 	}
 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 }
@@ -482,7 +482,7 @@
 		dev_err(&adapter->ccw_device->dev,
 			"Unknown or unsupported arbitrated loop "
 			"fibre channel topology detected\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
 		return -EIO;
 	}
 
@@ -518,7 +518,7 @@
 				"FCP adapter maximum QTCB size (%d bytes) "
 				"is too small\n",
 				bottom->max_qtcb_size);
-			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
+			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
 			return;
 		}
 		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
@@ -536,7 +536,7 @@
 			&qtcb->header.fsf_status_qual.link_down_info);
 		break;
 	default:
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
 		return;
 	}
 
@@ -552,14 +552,14 @@
 		dev_err(&adapter->ccw_device->dev,
 			"The FCP adapter only supports newer "
 			"control block versions\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
 		return;
 	}
 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
 		dev_err(&adapter->ccw_device->dev,
 			"The FCP adapter only supports older "
 			"control block versions\n");
-		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
+		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
 	}
 }
 
@@ -700,7 +700,7 @@
 		del_timer(&req->timer);
 		/* lookup request again, list might have changed */
 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
-		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
 		return -EIO;
 	}
 
@@ -754,10 +754,11 @@
 	goto out;
 
 failed_req_send:
+	req->data = NULL;
 	mempool_free(sr_buf, adapter->pool.status_read_data);
 failed_buf:
+	zfcp_dbf_hba_fsf_uss("fssr__1", req);
 	zfcp_fsf_req_free(req);
-	zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	return retval;
@@ -776,14 +777,13 @@
 	case FSF_PORT_HANDLE_NOT_VALID:
 		if (fsq->word[0] == fsq->word[1]) {
 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
-						"fsafch1", req);
+						"fsafch1");
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
 	case FSF_LUN_HANDLE_NOT_VALID:
 		if (fsq->word[0] == fsq->word[1]) {
-			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2",
-					     req);
+			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		}
 		break;
@@ -794,14 +794,13 @@
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "fsafch4", req);
+				    "fsafch4");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                 break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -882,7 +881,7 @@
 
 	switch (header->fsf_status) {
         case FSF_GOOD:
-		zfcp_dbf_san_ct_response(req);
+		zfcp_dbf_san_res("fsscth1", req);
 		ct->status = 0;
 		break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -902,7 +901,7 @@
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
 		/* fall through */
 	case FSF_GENERIC_COMMAND_REJECTED:
 	case FSF_PAYLOAD_SIZE_MISMATCH:
@@ -1025,7 +1024,7 @@
 	req->qtcb->header.port_handle = wka_port->handle;
 	req->data = ct;
 
-	zfcp_dbf_san_ct_request(req, wka_port->d_id);
+	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1053,7 +1052,7 @@
 
 	switch (header->fsf_status) {
 	case FSF_GOOD:
-		zfcp_dbf_san_els_response(req);
+		zfcp_dbf_san_res("fsselh1", req);
 		send_els->status = 0;
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1127,7 +1126,7 @@
 	req->handler = zfcp_fsf_send_els_handler;
 	req->data = els;
 
-	zfcp_dbf_san_els_request(req);
+	zfcp_dbf_san_req("fssels1", req, d_id);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1448,7 +1447,7 @@
 
 	switch (req->qtcb->header.fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1580,7 +1579,7 @@
 
 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
-		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
+		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
 	}
 
 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
@@ -1638,7 +1637,7 @@
 
 	switch (header->fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ACCESS_DENIED:
@@ -1654,7 +1653,7 @@
 						  &sdev_to_zfcp(sdev)->status);
 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
-				     "fscpph2", req);
+				     "fscpph2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -1743,7 +1742,7 @@
 	switch (header->fsf_status) {
 
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req);
+		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
 		/* fall through */
 	case FSF_LUN_ALREADY_OPEN:
 		break;
@@ -1755,8 +1754,7 @@
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_SHARING_VIOLATION:
@@ -1852,20 +1850,18 @@
 
 	switch (req->qtcb->header.fsf_status) {
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1",
-					req);
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_HANDLE_NOT_VALID:
-		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req);
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2002,13 +1998,12 @@
 	switch (header->fsf_status) {
 	case FSF_HANDLE_MISMATCH:
 	case FSF_PORT_HANDLE_NOT_VALID:
-		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1",
-					req);
+		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_FCPLUN_NOT_VALID:
 	case FSF_LUN_HANDLE_NOT_VALID:
-		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req);
+		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -2026,7 +2021,7 @@
 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
-					  "fssfch3", req);
+					  "fssfch3");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_CMND_LENGTH_NOT_VALID:
@@ -2037,21 +2032,20 @@
 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
 			(unsigned long long)zfcp_sdev->port->wwpn);
 		zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
-					  "fssfch4", req);
+					  "fssfch4");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_PORT_BOXED:
 		zfcp_erp_set_port_status(zfcp_sdev->port,
 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(zfcp_sdev->port,
-				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5",
-				     req);
+				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_LUN_BOXED:
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "fssfch6", req);
+				    "fssfch6");
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	case FSF_ADAPTER_STATUS_AVAILABLE:
@@ -2104,7 +2098,7 @@
 
 skip_fsfstatus:
 	zfcp_fsf_req_trace(req, scpnt);
-	zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
+	zfcp_dbf_scsi_result(scpnt, req);
 
 	scpnt->host_scribble = NULL;
 	(scpnt->scsi_done) (scpnt);
@@ -2420,3 +2414,12 @@
 			break;
 	}
 }
+
+struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
+				      struct qdio_buffer *sbal)
+{
+	struct qdio_buffer_element *sbale = &sbal->element[0];
+	u64 req_id = (unsigned long) sbale->addr;
+
+	return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
+}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index a0554be..8da5ed6 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -41,7 +41,7 @@
 		zfcp_qdio_siosl(adapter);
 	zfcp_erp_adapter_reopen(adapter,
 				ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
-				ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL);
+				ZFCP_STATUS_COMMON_ERP_FAILED, id);
 }
 
 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
@@ -74,7 +74,6 @@
 	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
 
 	if (unlikely(qdio_err)) {
-		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
 		zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
 		return;
 	}
@@ -97,7 +96,6 @@
 	int sbal_idx, sbal_no;
 
 	if (unlikely(qdio_err)) {
-		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count);
 		zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
 		return;
 	}
@@ -116,7 +114,7 @@
 	 * put SBALs back to response queue
 	 */
 	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
-		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL);
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
 }
 
 static struct qdio_buffer_element *
@@ -236,7 +234,7 @@
 	if (!ret) {
 		atomic_inc(&qdio->req_q_full);
 		/* assume hanging outbound queue, try queue recovery */
-		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
+		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
 	}
 
 	spin_lock_irq(&qdio->req_q_lock);
@@ -292,6 +290,8 @@
 	id->int_parm = (unsigned long) qdio;
 	id->input_sbal_addr_array = (void **) (qdio->res_q);
 	id->output_sbal_addr_array = (void **) (qdio->req_q);
+	id->scan_threshold =
+		QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
 }
 
 /**
@@ -309,6 +309,7 @@
 		return -ENOMEM;
 
 	zfcp_qdio_setup_init_data(&init_data, qdio);
+	init_waitqueue_head(&qdio->req_q_wq);
 
 	return qdio_allocate(&init_data);
 }
@@ -393,6 +394,7 @@
 	/* set index of first avalable SBALS / number of available SBALS */
 	qdio->req_q_idx = 0;
 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
+	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 
 	return 0;
 
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 63529ed..ddb5800 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,6 +30,10 @@
 MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
 #endif
 
+static bool allow_lun_scan = 1;
+module_param(allow_lun_scan, bool, 0600);
+MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs");
+
 static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
 					int reason)
 {
@@ -68,11 +72,8 @@
 
 static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
 {
-	struct zfcp_adapter *adapter =
-		(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
-
 	set_host_byte(scpnt, result);
-	zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
+	zfcp_dbf_scsi_fail_send(scpnt);
 	scpnt->scsi_done(scpnt);
 }
 
@@ -80,7 +81,6 @@
 int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
-	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
 	int    status, scsi_result, ret;
 
@@ -91,7 +91,7 @@
 	scsi_result = fc_remote_port_chkready(rport);
 	if (unlikely(scsi_result)) {
 		scpnt->result = scsi_result;
-		zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
+		zfcp_dbf_scsi_fail_send(scpnt);
 		scpnt->scsi_done(scpnt);
 		return 0;
 	}
@@ -134,6 +134,7 @@
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_port *port;
 	struct zfcp_unit *unit;
+	int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
 
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 	if (!port)
@@ -143,7 +144,7 @@
 	if (unit)
 		put_device(&unit->dev);
 
-	if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+	if (!unit && !(allow_lun_scan && npiv)) {
 		put_device(&port->dev);
 		return -ENXIO;
 	}
@@ -158,7 +159,7 @@
 	spin_lock_init(&zfcp_sdev->latencies.lock);
 
 	zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
-	zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL);
+	zfcp_erp_lun_reopen(sdev, 0, "scsla_1");
 	zfcp_erp_wait(port->adapter);
 
 	return 0;
@@ -182,8 +183,7 @@
 	old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
 	if (!old_req) {
 		write_unlock_irqrestore(&adapter->abort_lock, flags);
-		zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
-				    old_reqid);
+		zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL);
 		return FAILED; /* completion could be in progress */
 	}
 	old_req->data = NULL;
@@ -198,29 +198,32 @@
 
 		zfcp_erp_wait(adapter);
 		ret = fc_block_scsi_eh(scpnt);
-		if (ret)
+		if (ret) {
+			zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL);
 			return ret;
+		}
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
-			zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
-					    old_reqid);
+			zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL);
 			return SUCCESS;
 		}
 	}
-	if (!abrt_req)
+	if (!abrt_req) {
+		zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL);
 		return FAILED;
+	}
 
 	wait_for_completion(&abrt_req->completion);
 
 	if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
-		dbf_tag = "okay";
+		dbf_tag = "abrt_ok";
 	else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED)
-		dbf_tag = "lte2";
+		dbf_tag = "abrt_nn";
 	else {
-		dbf_tag = "fail";
+		dbf_tag = "abrt_fa";
 		retval = FAILED;
 	}
-	zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid);
+	zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req);
 	zfcp_fsf_req_free(abrt_req);
 	return retval;
 }
@@ -280,7 +283,7 @@
 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	int ret;
 
-	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
+	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
 	zfcp_erp_wait(adapter);
 	ret = fc_block_scsi_eh(scpnt);
 	if (ret)
@@ -518,7 +521,7 @@
 	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 
 	if (port) {
-		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL);
+		zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
 		put_device(&port->dev);
 	}
 }
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 2f2c54f..cdc4ff7 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -105,8 +105,7 @@
 		return -EINVAL;
 
 	zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
-	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2",
-			     NULL);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2");
 	zfcp_erp_wait(port->adapter);
 
 	return count;
@@ -148,7 +147,7 @@
 	if (sdev) {
 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
-				    "syufai2", NULL);
+				    "syufai2");
 		zfcp_erp_wait(unit->port->adapter);
 	} else
 		zfcp_unit_scsi_scan(unit);
@@ -198,7 +197,7 @@
 
 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"syafai2", NULL);
+				"syafai2");
 	zfcp_erp_wait(adapter);
 out:
 	zfcp_ccw_adapter_put(adapter);
@@ -256,7 +255,7 @@
 
 	put_device(&port->dev);
 
-	zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
+	zfcp_erp_port_shutdown(port, 0, "syprs_1");
 	zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
  out:
 	zfcp_ccw_adapter_put(adapter);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index a624f5a..e856622 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -467,7 +467,7 @@
 
 	node = prom_getchild(prom_root_node);
 	node = prom_searchsiblings(node, "flash-memory");
-	if (node != 0 && node != -1) {
+	if (node != 0 && (s32)node != -1) {
 		if (prom_getproperty(node, "reg",
 		    (char *)&reg0, sizeof(reg0)) == -1) {
 			printk("jsflash: no \"reg\" property\n");
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index dc5ac6e..a391090 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -416,7 +416,7 @@
 	/* Go back and check they match */
 
 	outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL);	/* Reset program count 0 */
-	bios_addr -= 0x1000;	/* Reset the BIOS adddress      */
+	bios_addr -= 0x1000;	/* Reset the BIOS address */
 	for (i = 0, data32_ptr = (u8 *) & data32;	/* Check the code       */
 	     i < 0x1000;	/* Firmware code size = 4K      */
 	     i++, bios_addr++) {
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index afc9aeb..060ac4b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -91,7 +91,7 @@
  *	aac_fib_setup	-	setup the fibs
  *	@dev: Adapter to set up
  *
- *	Allocate the PCI space for the fibs, map it and then intialise the
+ *	Allocate the PCI space for the fibs, map it and then initialise the
  *	fib area, the unmapped fib data and also the free list
  */
 
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq
index 5997e7c..1565be9 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx.seq
+++ b/drivers/scsi/aic7xxx_old/aic7xxx.seq
@@ -1178,7 +1178,7 @@
 /*
  * Retrieve an SCB by SCBID first searching the disconnected list falling
  * back to DMA'ing the SCB down from the host.  This routine assumes that
- * ARG_1 is the SCBID of interrest and that SINDEX is the position in the
+ * ARG_1 is the SCBID of interest and that SINDEX is the position in the
  * disconnected list to start the search from.  If SINDEX is SCB_LIST_NULL,
  * we go directly to the host for the SCB.
  */
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
index 28aaf34..40273a7 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg_def.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -1689,7 +1689,7 @@
 #define		PHY_START_CAL		0x01
 
 /*
- * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC)
+ * HST_PCIX2 Registers, Address Range: (0x00-0xFC)
  */
 #define PCIX_REG_BASE_ADR		0xB8040000
 
@@ -1802,7 +1802,7 @@
 #define PCIC_TP_CTRL	0xFC
 
 /*
- * EXSI Registers, Addresss Range: (0x00-0xFC)
+ * EXSI Registers, Address Range: (0x00-0xFC)
  */
 #define EXSI_REG_BASE_ADR		REG_BASE_ADDR_EXSI
 
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index c43698b..2959327 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -867,7 +867,7 @@
  * resources they have with this SCB, and then call this one at the
  * end of their timeout function.  To do this, one should initialize
  * the ascb->timer.{function, data, expires} prior to calling the post
- * funcion.  The timer is started by the post function.
+ * function. The timer is started by the post function.
  */
 void asd_ascb_timedout(unsigned long data)
 {
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 7437461..390168f 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -797,7 +797,7 @@
 		int j;
 		/* Start from Page 1 of Mode 0 and 1. */
 		moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
-		/* All the fields of page 1 can be intialized to 0. */
+		/* All the fields of page 1 can be initialized to 0. */
 		for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
 			asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
 	}
@@ -938,7 +938,7 @@
 	asd_write_reg_dword(asd_ha, SCBPRO, 0);
 	asd_write_reg_dword(asd_ha, CSEQCON, 0);
 
-	/* Intialize CSEQ Mode 11 Interrupt Vectors.
+	/* Initialize CSEQ Mode 11 Interrupt Vectors.
 	 * The addresses are 16 bit wide and in dword units.
 	 * The values of their macros are in byte units.
 	 * Thus we have to divide by 4. */
@@ -961,7 +961,7 @@
 	asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
 
 	for (i = 0; i < 8; i++) {
-		/* Intialize Mode n Link m Interrupt Enable. */
+		/* Initialize Mode n Link m Interrupt Enable. */
 		asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
 		/* Initialize Mode n Request Mailbox. */
 		asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 17e3df4..1cadcd6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1171,9 +1171,8 @@
 	arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
 	if ( arccdbsize > 256)
 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
-	if (pcmd->cmnd[0]|WRITE_6 || pcmd->cmnd[0]|WRITE_10 || pcmd->cmnd[0]|WRITE_12 ){
+	if (pcmd->sc_data_direction == DMA_TO_DEVICE)
 		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
-	}
 	ccb->arc_cdb_size = arccdbsize;
 	return SUCCESS;
 }
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 75a85aa..79cefbe 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3785,7 +3785,7 @@
 	dma_addr_t paddr;
 
 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
-					  GFP_KERNEL, &paddr);
+					  GFP_ATOMIC, &paddr);
 	if (!io_task->cmd_bhs)
 		return -ENOMEM;
 	io_task->bhs_pa.u.a64.address = paddr;
@@ -3914,7 +3914,8 @@
 			io_task->psgl_handle = NULL;
 		}
 	} else {
-		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
+		if (task->hdr &&
+		   ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
 			return;
 		if (io_task->psgl_handle) {
 			spin_lock(&phba->mgmt_sgl_lock);
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index d2eefd3..4ce6f49 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -3,6 +3,4 @@
 bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
 bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
 bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
-bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o
-
-ccflags-y := -DBFA_PERF_BUILD
+bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index ff2bd07..7be6b5a 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -17,7 +17,7 @@
 #ifndef __BFA_H__
 #define __BFA_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_cs.h"
 #include "bfa_plog.h"
 #include "bfa_defs_svc.h"
@@ -33,7 +33,6 @@
  * Interrupt message handlers
  */
 void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
-void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 
 /*
  * Request and response queue related defines
@@ -121,8 +120,8 @@
 									\
 		struct list_head *waitq = bfa_reqq(__bfa, __reqq);      \
 									\
-		bfa_assert(((__reqq) < BFI_IOC_MAX_CQS));      \
-		bfa_assert((__wqe)->qresume && (__wqe)->cbarg);      \
+		WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS));			\
+		WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg));		\
 									\
 		list_add_tail(&(__wqe)->qe, waitq);      \
 	} while (0)
@@ -297,7 +296,6 @@
 		      struct bfa_iocfc_cfg_s *cfg,
 		      struct bfa_meminfo_s *meminfo,
 		      struct bfa_pcidev_s *pcidev);
-void bfa_iocfc_detach(struct bfa_s *bfa);
 void bfa_iocfc_init(struct bfa_s *bfa);
 void bfa_iocfc_start(struct bfa_s *bfa);
 void bfa_iocfc_stop(struct bfa_s *bfa);
@@ -333,12 +331,9 @@
 			   u32 *maxvec);
 void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
 				 u32 *end);
-void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
 void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
 wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
 wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
-void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
-				struct bfa_boot_pbc_s *pbcfg);
 int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
 				struct bfi_pbc_vport_s *pbc_vport);
 
@@ -386,19 +381,11 @@
 void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 		struct bfa_meminfo_s *meminfo,
 		struct bfa_pcidev_s *pcidev);
-void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
-void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
 void bfa_detach(struct bfa_s *bfa);
-void bfa_init(struct bfa_s *bfa);
-void bfa_start(struct bfa_s *bfa);
-void bfa_stop(struct bfa_s *bfa);
-void bfa_attach_fcs(struct bfa_s *bfa);
 void bfa_cb_init(void *bfad, bfa_status_t status);
 void bfa_cb_updateq(void *bfad, bfa_status_t status);
 
 bfa_boolean_t bfa_intx(struct bfa_s *bfa);
-void bfa_intx_disable(struct bfa_s *bfa);
-void bfa_intx_enable(struct bfa_s *bfa);
 void bfa_isr_enable(struct bfa_s *bfa);
 void bfa_isr_disable(struct bfa_s *bfa);
 
@@ -408,31 +395,14 @@
 
 typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
 void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
-void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
 
-void bfa_adapter_get_attr(struct bfa_s *bfa,
-			  struct bfa_adapter_attr_s *ad_attr);
-u64 bfa_adapter_get_id(struct bfa_s *bfa);
 
 bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
 				   struct bfa_iocfc_intr_attr_s *attr);
 
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
-void bfa_chip_reset(struct bfa_s *bfa);
-void bfa_timer_tick(struct bfa_s *bfa);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)		\
 	bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
 
-/*
- * BFA debug API functions
- */
-bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
-bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
-bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
-			      u32 *offset, int *buflen);
-void bfa_debug_fwsave_clear(struct bfa_s *bfa);
-bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
-bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
-
 #endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h
deleted file mode 100644
index 6f02101..0000000
--- a/drivers/scsi/bfa/bfa_cb_ioim.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_HCB_IOIM_H__
-#define __BFA_HCB_IOIM_H__
-
-#include "bfa_os_inc.h"
-/*
- * task attribute values in FCP-2 FCP_CMND IU
- */
-#define SIMPLE_Q    0
-#define HEAD_OF_Q   1
-#define ORDERED_Q   2
-#define ACA_Q	    4
-#define UNTAGGED    5
-
-static inline lun_t
-bfad_int_to_lun(u32 luno)
-{
-	union {
-		u16	scsi_lun[4];
-		lun_t		bfa_lun;
-	} lun;
-
-	lun.bfa_lun     = 0;
-	lun.scsi_lun[0] = cpu_to_be16(luno);
-
-	return lun.bfa_lun;
-}
-
-/*
- * Get LUN for the I/O request
- */
-#define bfa_cb_ioim_get_lun(__dio)	\
-	bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
-
-/*
- * Get CDB for the I/O request
- */
-static inline u8 *
-bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return (u8 *) cmnd->cmnd;
-}
-
-/*
- * Get I/O direction (read/write) for the I/O request
- */
-static inline enum fcp_iodir
-bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	enum dma_data_direction dmadir;
-
-	dmadir = cmnd->sc_data_direction;
-	if (dmadir == DMA_TO_DEVICE)
-		return FCP_IODIR_WRITE;
-	else if (dmadir == DMA_FROM_DEVICE)
-		return FCP_IODIR_READ;
-	else
-		return FCP_IODIR_NONE;
-}
-
-/*
- * Get IO size in bytes for the I/O request
- */
-static inline u32
-bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return scsi_bufflen(cmnd);
-}
-
-/*
- * Get timeout for the I/O request
- */
-static inline u8
-bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	/*
-	 * TBD: need a timeout for scsi passthru
-	 */
-	if (cmnd->device->host == NULL)
-		return 4;
-
-	return 0;
-}
-
-/*
- * Get Command Reference Number for the I/O request. 0 if none.
- */
-static inline u8
-bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
-{
-	return 0;
-}
-
-/*
- * Get SAM-3 priority for the I/O request. 0 is default.
- */
-static inline u8
-bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
-{
-	return 0;
-}
-
-/*
- * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
- */
-static inline u8
-bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	u8	task_attr = UNTAGGED;
-
-	if (cmnd->device->tagged_supported) {
-		switch (cmnd->tag) {
-		case HEAD_OF_QUEUE_TAG:
-			task_attr = HEAD_OF_Q;
-			break;
-		case ORDERED_QUEUE_TAG:
-			task_attr = ORDERED_Q;
-			break;
-		default:
-			task_attr = SIMPLE_Q;
-			break;
-		}
-	}
-
-	return task_attr;
-}
-
-/*
- * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
- */
-static inline u8
-bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
-{
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-
-	return cmnd->cmd_len;
-}
-
-/*
- * Assign queue to be used for the I/O request. This value depends on whether
- * the driver wants to use the queues via any specific algorithm. Currently,
- * this is not supported.
- */
-#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
-
-#endif /* __BFA_HCB_IOIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 2345f48..1cd5c8b 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -15,13 +15,100 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_ctreg.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(HAL, CORE);
 
 /*
+ * BFA module list terminated by NULL
+ */
+static struct bfa_module_s *hal_mods[] = {
+	&hal_mod_sgpg,
+	&hal_mod_fcport,
+	&hal_mod_fcxp,
+	&hal_mod_lps,
+	&hal_mod_uf,
+	&hal_mod_rport,
+	&hal_mod_fcpim,
+	NULL
+};
+
+/*
+ * Message handlers for various modules.
+ */
+static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
+	bfa_isr_unhandled,	/* NONE */
+	bfa_isr_unhandled,	/* BFI_MC_IOC */
+	bfa_isr_unhandled,	/* BFI_MC_DIAG */
+	bfa_isr_unhandled,	/* BFI_MC_FLASH */
+	bfa_isr_unhandled,	/* BFI_MC_CEE */
+	bfa_fcport_isr,		/* BFI_MC_FCPORT */
+	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
+	bfa_isr_unhandled,	/* BFI_MC_LL */
+	bfa_uf_isr,		/* BFI_MC_UF */
+	bfa_fcxp_isr,		/* BFI_MC_FCXP */
+	bfa_lps_isr,		/* BFI_MC_LPS */
+	bfa_rport_isr,		/* BFI_MC_RPORT */
+	bfa_itnim_isr,		/* BFI_MC_ITNIM */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_READ */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_WRITE */
+	bfa_isr_unhandled,	/* BFI_MC_IOIM_IO */
+	bfa_ioim_isr,		/* BFI_MC_IOIM */
+	bfa_ioim_good_comp_isr,	/* BFI_MC_IOIM_IOCOM */
+	bfa_tskim_isr,		/* BFI_MC_TSKIM */
+	bfa_isr_unhandled,	/* BFI_MC_SBOOT */
+	bfa_isr_unhandled,	/* BFI_MC_IPFC */
+	bfa_isr_unhandled,	/* BFI_MC_PORT */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+	bfa_isr_unhandled,	/* --------- */
+};
+/*
+ * Message handlers for mailbox command classes
+ */
+static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
+	NULL,
+	NULL,		/* BFI_MC_IOC   */
+	NULL,		/* BFI_MC_DIAG  */
+	NULL,		/* BFI_MC_FLASH */
+	NULL,		/* BFI_MC_CEE   */
+	NULL,		/* BFI_MC_PORT  */
+	bfa_iocfc_isr,	/* BFI_MC_IOCFC */
+	NULL,
+};
+
+
+
+static void
+bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+{
+	struct bfa_port_s	*port = &bfa->modules.port;
+	u32			dm_len;
+	u8			*dm_kva;
+	u64			dm_pa;
+
+	dm_len = bfa_port_meminfo();
+	dm_kva = bfa_meminfo_dma_virt(mi);
+	dm_pa  = bfa_meminfo_dma_phys(mi);
+
+	memset(port, 0, sizeof(struct bfa_port_s));
+	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
+	bfa_port_mem_claim(port, dm_kva, dm_pa);
+
+	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
+	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+}
+
+/*
  * BFA IOC FC related definitions
  */
 
@@ -67,18 +154,6 @@
  * BFA Interrupt handling functions
  */
 static void
-bfa_msix_errint(struct bfa_s *bfa, u32 intr)
-{
-	bfa_ioc_error_isr(&bfa->ioc);
-}
-
-static void
-bfa_msix_lpu(struct bfa_s *bfa)
-{
-	bfa_ioc_mbox_isr(&bfa->ioc);
-}
-
-static void
 bfa_reqq_resume(struct bfa_s *bfa, int qid)
 {
 	struct list_head *waitq, *qe, *qen;
@@ -104,9 +179,6 @@
 	bfa_intx(bfa);
 }
 
-/*
- *  hal_intr_api
- */
 bfa_boolean_t
 bfa_intx(struct bfa_s *bfa)
 {
@@ -151,18 +223,6 @@
 }
 
 void
-bfa_intx_enable(struct bfa_s *bfa)
-{
-	writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
-}
-
-void
-bfa_intx_disable(struct bfa_s *bfa)
-{
-	writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
-}
-
-void
 bfa_isr_enable(struct bfa_s *bfa)
 {
 	u32 intr_unmask;
@@ -225,7 +285,7 @@
 	bfa_trc(bfa, m->mhdr.msg_class);
 	bfa_trc(bfa, m->mhdr.msg_id);
 	bfa_trc(bfa, m->mhdr.mtag.i2htok);
-	bfa_assert(0);
+	WARN_ON(1);
 	bfa_trc_stop(bfa->trcmod);
 }
 
@@ -236,8 +296,6 @@
 	u32 pi, ci;
 	struct list_head *waitq;
 
-	bfa_trc_fp(bfa, qid);
-
 	qid &= (BFI_IOC_MAX_CQS - 1);
 
 	bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
@@ -245,16 +303,10 @@
 	ci = bfa_rspq_ci(bfa, qid);
 	pi = bfa_rspq_pi(bfa, qid);
 
-	bfa_trc_fp(bfa, ci);
-	bfa_trc_fp(bfa, pi);
-
 	if (bfa->rme_process) {
 		while (ci != pi) {
 			m = bfa_rspq_elem(bfa, qid, ci);
-			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
-
 			bfa_isrs[m->mhdr.msg_class] (bfa, m);
-
 			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
 		}
 	}
@@ -282,7 +334,7 @@
 	intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
 	if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
-		bfa_msix_lpu(bfa);
+		bfa_ioc_mbox_isr(&bfa->ioc);
 
 	intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
 		__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
@@ -313,22 +365,16 @@
 		}
 
 		writel(intr, bfa->iocfc.bfa_regs.intr_status);
-		bfa_msix_errint(bfa, intr);
+		bfa_ioc_error_isr(&bfa->ioc);
 	}
 }
 
-void
-bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
-{
-	bfa_isrs[mc] = isr_func;
-}
-
 /*
  * BFA IOC FC related functions
  */
 
 /*
- *  hal_ioc_pvt BFA IOC private functions
+ *  BFA IOC private functions
  */
 
 static void
@@ -379,7 +425,7 @@
 	struct bfa_iocfc_cfg_s	*cfg = &iocfc->cfg;
 	int		i;
 
-	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
+	WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
 	bfa_trc(bfa, cfg->fwcfg.num_cqs);
 
 	bfa_iocfc_reset_queues(bfa);
@@ -488,8 +534,8 @@
 	 * First allocate dma memory for IOC.
 	 */
 	bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
-	dm_kva += bfa_ioc_meminfo();
-	dm_pa  += bfa_ioc_meminfo();
+	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+	dm_pa  += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
 
 	/*
 	 * Claim DMA-able memory for the request/response queues and for shadow
@@ -552,7 +598,7 @@
 	bfa_meminfo_dma_virt(meminfo) = dm_kva;
 	bfa_meminfo_dma_phys(meminfo) = dm_pa;
 
-	dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
+	dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 	if (dbgsz > 0) {
 		bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
 		bfa_meminfo_kva(meminfo) += dbgsz;
@@ -699,7 +745,7 @@
 		bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
 			     bfa);
 	else {
-		bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
+		WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
 		bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
 			     bfa);
 	}
@@ -735,9 +781,6 @@
 	bfa_isr_enable(bfa);
 }
 
-/*
- *  hal_ioc_public
- */
 
 /*
  * Query IOC memory requirement information.
@@ -747,11 +790,11 @@
 		  u32 *dm_len)
 {
 	/* dma memory for IOC */
-	*dm_len += bfa_ioc_meminfo();
+	*dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
 
 	bfa_iocfc_fw_cfg_sz(cfg, dm_len);
 	bfa_iocfc_cqs_sz(cfg, dm_len);
-	*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
+	*km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 }
 
 /*
@@ -783,7 +826,7 @@
 
 	bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
 	bfa_iocfc_mem_claim(bfa, cfg, meminfo);
-	bfa_timer_init(&bfa->timer_mod);
+	INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
 
 	INIT_LIST_HEAD(&bfa->comp_q);
 	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
@@ -794,15 +837,6 @@
  * Query IOC memory requirement information.
  */
 void
-bfa_iocfc_detach(struct bfa_s *bfa)
-{
-	bfa_ioc_detach(&bfa->ioc);
-}
-
-/*
- * Query IOC memory requirement information.
- */
-void
 bfa_iocfc_init(struct bfa_s *bfa)
 {
 	bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
@@ -852,23 +886,11 @@
 		iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
 		break;
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
 void
-bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
-{
-	bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
-}
-
-u64
-bfa_adapter_get_id(struct bfa_s *bfa)
-{
-	return bfa_ioc_get_adid(&bfa->ioc);
-}
-
-void
 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
 {
 	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
@@ -976,18 +998,6 @@
 	memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
 }
 
-void
-bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
-{
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
-	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
-	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
-	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
-}
-
 int
 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
 {
@@ -998,9 +1008,6 @@
 	return cfgrsp->pbc_cfg.nvports;
 }
 
-/*
- *  hal_api
- */
 
 /*
  * Use this function query the memory requirement of the BFA library.
@@ -1036,7 +1043,7 @@
 	int		i;
 	u32	km_len = 0, dm_len = 0;
 
-	bfa_assert((cfg != NULL) && (meminfo != NULL));
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
 
 	memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
 	meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
@@ -1090,7 +1097,7 @@
 
 	bfa->fcs = BFA_FALSE;
 
-	bfa_assert((cfg != NULL) && (meminfo != NULL));
+	WARN_ON((cfg == NULL) || (meminfo == NULL));
 
 	/*
 	 * initialize all memory pointers for iterative allocation
@@ -1129,79 +1136,7 @@
 
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->detach(bfa);
-
-	bfa_iocfc_detach(bfa);
-}
-
-
-void
-bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
-{
-	bfa->trcmod = trcmod;
-}
-
-void
-bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
-{
-	bfa->plog = plog;
-}
-
-/*
- * Initialize IOC.
- *
- * This function will return immediately, when the IOC initialization is
- * completed, the bfa_cb_init() will be called.
- *
- * @param[in]	bfa	instance
- *
- * @return void
- *
- * Special Considerations:
- *
- * @note
- * When this function returns, the driver should register the interrupt service
- * routine(s) and enable the device interrupts. If this is not done,
- * bfa_cb_init() will never get called
- */
-void
-bfa_init(struct bfa_s *bfa)
-{
-	bfa_iocfc_init(bfa);
-}
-
-/*
- * Use this function initiate the IOC configuration setup. This function
- * will return immediately.
- *
- * @param[in]	bfa	instance
- *
- * @return None
- */
-void
-bfa_start(struct bfa_s *bfa)
-{
-	bfa_iocfc_start(bfa);
-}
-
-/*
- * Use this function quiese the IOC. This function will return immediately,
- * when the IOC is actually stopped, the bfad->comp will be set.
- *
- * @param[in]bfa - pointer to bfa_t.
- *
- * @return None
- *
- * Special Considerations:
- * bfad->comp can be set before or after bfa_stop() returns.
- *
- * @note
- * In case of any failure, we could handle it automatically by doing a
- * reset and then succeed the bfa_stop() call.
- */
-void
-bfa_stop(struct bfa_s *bfa)
-{
-	bfa_iocfc_stop(bfa);
+	bfa_ioc_detach(&bfa->ioc);
 }
 
 void
@@ -1237,20 +1172,6 @@
 	}
 }
 
-void
-bfa_attach_fcs(struct bfa_s *bfa)
-{
-	bfa->fcs = BFA_TRUE;
-}
-
-/*
- * Periodic timer heart beat from driver
- */
-void
-bfa_timer_tick(struct bfa_s *bfa)
-{
-	bfa_timer_beat(&bfa->timer_mod);
-}
 
 /*
  * Return the list of PCI vendor/device id lists supported by this
@@ -1321,89 +1242,3 @@
 	cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
 	cfg->drvcfg.min_cfg	   = BFA_TRUE;
 }
-
-void
-bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
-{
-	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
-}
-
-/*
- * Retrieve firmware trace information on IOC failure.
- */
-bfa_status_t
-bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
-{
-	return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
-}
-
-/*
- * Clear the saved firmware trace information of an IOC.
- */
-void
-bfa_debug_fwsave_clear(struct bfa_s *bfa)
-{
-	bfa_ioc_debug_fwsave_clear(&bfa->ioc);
-}
-
-/*
- * Fetch firmware trace data.
- *
- * @param[in]		bfa			BFA instance
- * @param[out]		trcdata		Firmware trace buffer
- * @param[in,out]	trclen		Firmware trace buffer len
- *
- * @retval BFA_STATUS_OK			Firmware trace is fetched.
- * @retval BFA_STATUS_INPROGRESS	Firmware trace fetch is in progress.
- */
-bfa_status_t
-bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
-{
-	return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
-}
-
-/*
- * Dump firmware memory.
- *
- * @param[in]		bfa		BFA instance
- * @param[out]		buf		buffer for dump
- * @param[in,out]	offset		smem offset to start read
- * @param[in,out]	buflen		length of buffer
- *
- * @retval BFA_STATUS_OK		Firmware memory is dumped.
- * @retval BFA_STATUS_INPROGRESS	Firmware memory dump is in progress.
- */
-bfa_status_t
-bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
-{
-	return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
-}
-/*
- * Reset hw semaphore & usage cnt regs and initialize.
- */
-void
-bfa_chip_reset(struct bfa_s *bfa)
-{
-	bfa_ioc_ownership_reset(&bfa->ioc);
-	bfa_ioc_pll_init(&bfa->ioc);
-}
-
-/*
- * Fetch firmware statistics data.
- *
- * @param[in]		bfa		BFA instance
- * @param[out]		data		Firmware stats buffer
- *
- * @retval BFA_STATUS_OK		Firmware trace is fetched.
- */
-bfa_status_t
-bfa_fw_stats_get(struct bfa_s *bfa, void *data)
-{
-	return bfa_ioc_fw_stats_get(&bfa->ioc, data);
-}
-
-bfa_status_t
-bfa_fw_stats_clear(struct bfa_s *bfa)
-{
-	return bfa_ioc_fw_stats_clear(&bfa->ioc);
-}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 99f242b..12bfeed 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -22,7 +22,7 @@
 #ifndef __BFA_CS_H__
 #define __BFA_CS_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 
 /*
  * BFA TRC
@@ -32,12 +32,20 @@
 #define BFA_TRC_MAX	(4 * 1024)
 #endif
 
+#define BFA_TRC_TS(_trcm)                               \
+	({                                              \
+		struct timeval tv;                      \
+							\
+		do_gettimeofday(&tv);                   \
+		(tv.tv_sec*1000000+tv.tv_usec);         \
+	})
+
 #ifndef BFA_TRC_TS
 #define BFA_TRC_TS(_trcm)	((_trcm)->ticks++)
 #endif
 
 struct bfa_trc_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u16	fileno;
 	u16	line;
 #else
@@ -99,13 +107,6 @@
 	trcm->stopped = 1;
 }
 
-#ifdef FWTRC
-extern void dc_flush(void *data);
-#else
-#define dc_flush(data)
-#endif
-
-
 static inline void
 __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
 {
@@ -119,12 +120,10 @@
 	trc->line = (u16) line;
 	trc->data.u64 = data;
 	trc->timestamp = BFA_TRC_TS(trcm);
-	dc_flush(trc);
 
 	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
 	if (trcm->tail == trcm->head)
 		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-	dc_flush(trcm);
 }
 
 
@@ -141,42 +140,18 @@
 	trc->line = (u16) line;
 	trc->data.u32.u32 = data;
 	trc->timestamp = BFA_TRC_TS(trcm);
-	dc_flush(trc);
 
 	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
 	if (trcm->tail == trcm->head)
 		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-	dc_flush(trcm);
 }
 
-#ifndef BFA_PERF_BUILD
-#define bfa_trc_fp(_trcp, _data)	bfa_trc(_trcp, _data)
-#else
-#define bfa_trc_fp(_trcp, _data)
-#endif
-
-/*
- * @ BFA LOG interfaces
- */
-#define bfa_assert(__cond)	do {					\
-	if (!(__cond)) {						\
-		printk(KERN_ERR "assert(%s) failed at %s:%d\\n",         \
-		#__cond, __FILE__, __LINE__);				\
-	}								\
-} while (0)
-
 #define bfa_sm_fault(__mod, __event)	do {				\
 	bfa_trc(__mod, (((u32)0xDEAD << 16) | __event));		\
 	printk(KERN_ERR	"Assertion failure: %s:%d: %d",			\
 		__FILE__, __LINE__, (__event));				\
 } while (0)
 
-#ifndef BFA_PERF_BUILD
-#define bfa_assert_fp(__cond)	bfa_assert(__cond)
-#else
-#define bfa_assert_fp(__cond)
-#endif
-
 /* BFA queue definitions */
 #define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
 #define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
@@ -199,7 +174,6 @@
 		bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =	\
 				(struct list_head *) (_q);		\
 		bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
-		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
 	} else {							\
 		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
 	}								\
@@ -214,7 +188,6 @@
 		bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) =	\
 			(struct list_head *) (_q);			\
 		bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
-		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
 	} else {							\
 		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
 	}								\
@@ -236,16 +209,6 @@
 	return 0;
 }
 
-/*
- * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
- * consistent across modules)
- */
-#ifndef BFA_PERF_BUILD
-#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
-#else
-#define BFA_Q_DBG_INIT(_qe)
-#endif
-
 #define bfa_q_is_on_q(_q, _qe)      \
 	bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
 
@@ -361,4 +324,43 @@
 	bfa_wc_down(wc);
 }
 
+static inline void
+wwn2str(char *wwn_str, u64 wwn)
+{
+	union {
+		u64 wwn;
+		u8 byte[8];
+	} w;
+
+	w.wwn = wwn;
+	sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
+		w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
+		w.byte[6], w.byte[7]);
+}
+
+static inline void
+fcid2str(char *fcid_str, u32 fcid)
+{
+	union {
+		u32 fcid;
+		u8 byte[4];
+	} f;
+
+	f.fcid = fcid;
+	sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
+}
+
+#define bfa_swap_3b(_x)				\
+	((((_x) & 0xff) << 16) |		\
+	((_x) & 0x00ff00) |			\
+	(((_x) & 0xff0000) >> 16))
+
+#ifndef __BIG_ENDIAN
+#define bfa_hton3b(_x)  bfa_swap_3b(_x)
+#else
+#define bfa_hton3b(_x)  (_x)
+#endif
+
+#define bfa_ntoh3b(_x)  bfa_hton3b(_x)
+
 #endif /* __BFA_CS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index 4b5b9e3..d85f93a 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -19,7 +19,7 @@
 #define __BFA_DEFS_H__
 
 #include "bfa_fc.h"
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 
 #define BFA_MFG_SERIALNUM_SIZE                  11
 #define STRSZ(_n)                               (((_n) + 4) & ~3)
@@ -446,8 +446,8 @@
  * Boot lun information.
  */
 struct bfa_boot_bootlun_s {
-	wwn_t   pwwn;   /*  port wwn of target */
-	lun_t   lun;    /*  64-bit lun */
+	wwn_t   pwwn;		/*  port wwn of target */
+	struct scsi_lun   lun;  /*  64-bit lun */
 };
 #pragma pack()
 
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e24e9f7..648c841 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -34,8 +34,8 @@
 struct bfa_iocfc_intr_attr_s {
 	u8		coalesce;	/*  enable/disable coalescing */
 	u8		rsvd[3];
-	u16	latency;	/*  latency in microseconds   */
-	u16	delay;		/*  delay in microseconds     */
+	__be16	latency;	/*  latency in microseconds   */
+	__be16	delay;		/*  delay in microseconds     */
 };
 
 /*
@@ -743,7 +743,7 @@
 	u8	 qos_enabled;	/*  qos enabled or not		*/
 	u8	 cfg_hardalpa;	/*  is hard alpa configured	*/
 	u8	 hardalpa;	/*  configured hard alpa	*/
-	u16 maxfrsize;	/*  maximum frame size		*/
+	__be16	 maxfrsize;	/*  maximum frame size		*/
 	u8	 rx_bbcredit;	/*  receive buffer credits	*/
 	u8	 tx_bbcredit;	/*  transmit buffer credits	*/
 	u8	 ratelimit;	/*  ratelimit enabled or not	*/
@@ -843,7 +843,7 @@
 	u8	 fka_disabled;   /*  FKA is disabled	  */
 	u8	 maxsz_verified; /*  FCoE max size verified   */
 	u8	 fc_map[3];      /*  FC map		   */
-	u16	vlan;	   /*  FCoE vlan tag/priority   */
+	__be16	 vlan;	   /*  FCoE vlan tag/priority   */
 	u32	fka_adv_per;    /*  FIP  ka advert. period   */
 	mac_t	   mac;	    /*  FCF mac		  */
 };
diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c
deleted file mode 100644
index 0222d7c..0000000
--- a/drivers/scsi/bfa/bfa_drv.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include "bfa_modules.h"
-
-/*
- * BFA module list terminated by NULL
- */
-struct bfa_module_s *hal_mods[] = {
-	&hal_mod_sgpg,
-	&hal_mod_fcport,
-	&hal_mod_fcxp,
-	&hal_mod_lps,
-	&hal_mod_uf,
-	&hal_mod_rport,
-	&hal_mod_fcpim,
-	NULL
-};
-
-/*
- * Message handlers for various modules.
- */
-bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
-	bfa_isr_unhandled,	/* NONE */
-	bfa_isr_unhandled,	/* BFI_MC_IOC */
-	bfa_isr_unhandled,	/* BFI_MC_DIAG */
-	bfa_isr_unhandled,	/* BFI_MC_FLASH */
-	bfa_isr_unhandled,	/* BFI_MC_CEE */
-	bfa_fcport_isr,		/* BFI_MC_FCPORT */
-	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
-	bfa_isr_unhandled,	/* BFI_MC_LL */
-	bfa_uf_isr,		/* BFI_MC_UF */
-	bfa_fcxp_isr,		/* BFI_MC_FCXP */
-	bfa_lps_isr,		/* BFI_MC_LPS */
-	bfa_rport_isr,		/* BFI_MC_RPORT */
-	bfa_itnim_isr,		/* BFI_MC_ITNIM */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_READ */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_WRITE */
-	bfa_isr_unhandled,	/* BFI_MC_IOIM_IO */
-	bfa_ioim_isr,		/* BFI_MC_IOIM */
-	bfa_ioim_good_comp_isr,	/* BFI_MC_IOIM_IOCOM */
-	bfa_tskim_isr,		/* BFI_MC_TSKIM */
-	bfa_isr_unhandled,	/* BFI_MC_SBOOT */
-	bfa_isr_unhandled,	/* BFI_MC_IPFC */
-	bfa_isr_unhandled,	/* BFI_MC_PORT */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-	bfa_isr_unhandled,	/* --------- */
-};
-
-
-/*
- * Message handlers for mailbox command classes
- */
-bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
-	NULL,
-	NULL,			/* BFI_MC_IOC   */
-	NULL,			/* BFI_MC_DIAG  */
-	NULL,		/* BFI_MC_FLASH */
-	NULL,			/* BFI_MC_CEE   */
-	NULL,			/* BFI_MC_PORT  */
-	bfa_iocfc_isr,		/* BFI_MC_IOCFC */
-	NULL,
-};
-
-
-
-void
-bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
-{
-	struct bfa_port_s	*port = &bfa->modules.port;
-	u32		dm_len;
-	u8			*dm_kva;
-	u64		dm_pa;
-
-	dm_len = bfa_port_meminfo();
-	dm_kva = bfa_meminfo_dma_virt(mi);
-	dm_pa  = bfa_meminfo_dma_phys(mi);
-
-	memset(port, 0, sizeof(struct bfa_port_s));
-	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
-	bfa_port_mem_claim(port, dm_kva, dm_pa);
-
-	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
-	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
-}
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index e929d25..8e764fa 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -18,14 +18,12 @@
 #ifndef __BFA_FC_H__
 #define __BFA_FC_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 
 typedef u64 wwn_t;
-typedef u64 lun_t;
 
 #define WWN_NULL	(0)
 #define FC_SYMNAME_MAX	256	/*  max name server symbolic name size */
-#define FC_ALPA_MAX	128
 
 #pragma pack(1)
 
@@ -40,7 +38,6 @@
 struct scsi_cdb_s {
 	u8         scsi_cdb[SCSI_MAX_CDBLEN];
 };
-#define scsi_cdb_t struct scsi_cdb_s
 
 /* ------------------------------------------------------------
  * SCSI status byte values
@@ -63,7 +60,7 @@
  * Fibre Channel Header Structure (FCHS) definition
  */
 struct fchs_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32        routing:4;	/* routing bits */
 	u32        cat_info:4;	/* category info */
 #else
@@ -75,34 +72,19 @@
 	u32        cs_ctl:8;	/* class specific control */
 	u32        s_id:24;	/* source identifier */
 
-	u32        type:8;		/* data structure type */
+	u32        type:8;	/* data structure type */
 	u32        f_ctl:24;	/* initial frame control */
 
-	u8         seq_id;		/* sequence identifier */
-	u8         df_ctl;		/* data field control */
+	u8         seq_id;	/* sequence identifier */
+	u8         df_ctl;	/* data field control */
 	u16        seq_cnt;	/* sequence count */
 
-	u16        ox_id;		/* originator exchange ID */
-	u16        rx_id;		/* responder exchange ID */
+	__be16     ox_id;	/* originator exchange ID */
+	u16        rx_id;	/* responder exchange ID */
 
 	u32        ro;		/* relative offset */
 };
 
-#define FC_SOF_LEN		4
-#define FC_EOF_LEN		4
-#define FC_CRC_LEN		4
-
-/*
- * Fibre Channel BB_E Header Structure
- */
-struct fcbbehs_s {
-	u16	ver_rsvd;
-	u32	rsvd[2];
-	u32	rsvd__sof;
-};
-
-#define FC_SEQ_ID_MAX		256
-
 /*
  * routing bit definitions
  */
@@ -149,22 +131,6 @@
 };
 
 /*
- * information category for Link Control
- */
-enum {
-	FC_CAT_ACK_1		= 0x00,
-	FC_CAT_ACK_0_N		= 0x01,
-	FC_CAT_P_RJT		= 0x02,
-	FC_CAT_F_RJT		= 0x03,
-	FC_CAT_P_BSY		= 0x04,
-	FC_CAT_F_BSY_DATA	= 0x05,
-	FC_CAT_F_BSY_LINK_CTL	= 0x06,
-	FC_CAT_F_LCR		= 0x07,
-	FC_CAT_NTY		= 0x08,
-	FC_CAT_END		= 0x09,
-};
-
-/*
  * Type Field Definitions. FC-PH Section 18.5 pg. 165
  */
 enum {
@@ -182,10 +148,6 @@
 	FC_TYPE_MAX		= 256,	/* 256 FC-4 types */
 };
 
-struct fc_fc4types_s {
-	u8         bits[FC_TYPE_MAX / 8];
-};
-
 /*
  * Frame Control Definitions. FC-PH Table-45. pg. 168
  */
@@ -288,7 +250,6 @@
 	FC_ELS_AUTH = 0x90,	/* Authentication. Ref FC-SP */
 	FC_ELS_RFCN = 0x97,	/* Request Fabric Change Notification. Ref
 				 *FC-SP */
-
 };
 
 /*
@@ -314,12 +275,12 @@
  * FC-PH-x. Figure-76. pg. 308.
  */
 struct fc_plogi_csp_s {
-	u8         verhi;	/* FC-PH high version */
-	u8         verlo;	/* FC-PH low version */
-	u16        bbcred;	/* BB_Credit */
+	u8		verhi;		/* FC-PH high version */
+	u8		verlo;		/* FC-PH low version */
+	__be16		bbcred;		/* BB_Credit */
 
-#ifdef __BIGENDIAN
-	u8         ciro:1,		/* continuously increasing RO */
+#ifdef __BIG_ENDIAN
+	u8		ciro:1,		/* continuously increasing RO */
 			rro:1,		/* random relative offset */
 			npiv_supp:1,	/* NPIV supported */
 			port_type:1,	/* N_Port/F_port */
@@ -328,7 +289,7 @@
 			vvl_info:1,	/* VVL Info included */
 			reserved1:1;
 
-	u8         hg_supp:1,
+	u8		hg_supp:1,
 			query_dbc:1,
 			security:1,
 			sync_cap:1,
@@ -337,7 +298,7 @@
 			cisc:1,		/* continuously increasing seq count */
 			payload:1;
 #else
-	u8         reserved2:2,
+	u8		reserved2:2,
 			resolution:1,	/* ms/ns ED_TOV resolution */
 			altbbcred:1,	/* alternate BB_Credit */
 			port_type:1,	/* N_Port/F_port */
@@ -345,7 +306,7 @@
 			rro:1,		/* random relative offset */
 			ciro:1;		/* continuously increasing RO */
 
-	u8         payload:1,
+	u8		payload:1,
 			cisc:1,		/* continuously increasing seq count */
 			dh_dup_supp:1,
 			r_t_tov:1,
@@ -354,13 +315,10 @@
 			query_dbc:1,
 			hg_supp:1;
 #endif
-
-	u16        rxsz;		/* recieve data_field size */
-
-	u16        conseq;
-	u16        ro_bitmap;
-
-	u32        e_d_tov;
+	__be16		rxsz;		/* recieve data_field size */
+	__be16		conseq;
+	__be16		ro_bitmap;
+	__be32		e_d_tov;
 };
 
 /*
@@ -368,12 +326,11 @@
  * FC-PH-x. Figure 78. pg. 318.
  */
 struct fc_plogi_clp_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32        class_valid:1;
 	u32        intermix:1;	/* class intermix supported if set =1.
-					 * valid only for class1. Reserved for
-					 * class2 & class3
-					 */
+				 * valid only for class1. Reserved for
+				 * class2 & class3 */
 	u32        reserved1:2;
 	u32        sequential:1;
 	u32        reserved2:3;
@@ -382,12 +339,10 @@
 	u32        sequential:1;
 	u32        reserved1:2;
 	u32        intermix:1;	/* class intermix supported if set =1.
-					 * valid only for class1. Reserved for
-					 * class2 & class3
-					 */
+				 * valid only for class1. Reserved for
+				 * class2 & class3 */
 	u32        class_valid:1;
 #endif
-
 	u32        reserved3:24;
 
 	u32        reserved4:16;
@@ -395,7 +350,7 @@
 
 	u32        reserved5:8;
 	u32        conseq:8;
-	u32        e2e_credit:16;	/* end to end credit */
+	u32        e2e_credit:16; /* end to end credit */
 
 	u32        reserved7:8;
 	u32        ospx:8;
@@ -409,24 +364,24 @@
  * PLOGI els command and reply payload
  */
 struct fc_logi_s {
-	struct fc_els_cmd_s els_cmd;	/* ELS command code */
-	struct fc_plogi_csp_s csp;		/* common service params */
-	wwn_t           port_name;
-	wwn_t           node_name;
-	struct fc_plogi_clp_s class1;		/* class 1 service parameters */
-	struct fc_plogi_clp_s class2;		/* class 2 service parameters */
-	struct fc_plogi_clp_s class3;		/* class 3 service parameters */
-	struct fc_plogi_clp_s class4;		/* class 4 service parameters */
-	u8         vvl[16];	/* vendor version level */
+	struct fc_els_cmd_s	els_cmd;	/* ELS command code */
+	struct fc_plogi_csp_s	csp;		/* common service params */
+	wwn_t			port_name;
+	wwn_t			node_name;
+	struct fc_plogi_clp_s	class1;		/* class 1 service parameters */
+	struct fc_plogi_clp_s	class2;		/* class 2 service parameters */
+	struct fc_plogi_clp_s	class3;		/* class 3 service parameters */
+	struct fc_plogi_clp_s	class4;		/* class 4 service parameters */
+	u8			vvl[16];	/* vendor version level */
 };
 
 /*
  * LOGO els command payload
  */
 struct fc_logo_s {
-	struct fc_els_cmd_s els_cmd;	/* ELS command code */
-	u32        res1:8;
-	u32        nport_id:24;	/* N_Port identifier of source */
+	struct fc_els_cmd_s	els_cmd;	/* ELS command code */
+	u32			res1:8;
+	u32		nport_id:24;	/* N_Port identifier of source */
 	wwn_t           orig_port_name;	/* Port name of the LOGO originator */
 };
 
@@ -435,12 +390,12 @@
  */
 struct fc_adisc_s {
 	struct fc_els_cmd_s els_cmd;	/* ELS command code */
-	u32        res1:8;
-	u32        orig_HA:24;	/* originator hard address */
-	wwn_t           orig_port_name;	/* originator port name */
-	wwn_t           orig_node_name;	/* originator node name */
-	u32        res2:8;
-	u32        nport_id:24;	/* originator NPortID */
+	u32		res1:8;
+	u32		orig_HA:24;	/* originator hard address */
+	wwn_t		orig_port_name;	/* originator port name */
+	wwn_t		orig_node_name;	/* originator node name */
+	u32		res2:8;
+	u32		nport_id:24;	/* originator NPortID */
 };
 
 /*
@@ -466,7 +421,7 @@
 struct fc_res_s {
 	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        res1:8;
-	u32        nport_id:24;	/* N_Port identifier of source */
+	u32        nport_id:24;		/* N_Port identifier of source */
 	u32        oxid:16;
 	u32        rxid:16;
 	u8         assoc_hdr[32];
@@ -512,8 +467,8 @@
 	u32        orig_id:24;	/* N_Port id of exchange originator */
 	u32        res2:8;
 	u32        resp_id:24;	/* N_Port id of exchange responder */
-	u32        count;		/* data transfer count */
-	u32        e_stat;		/* exchange status */
+	u32        count;	/* data transfer count */
+	u32        e_stat;	/* exchange status */
 };
 
 /*
@@ -533,7 +488,7 @@
  */
 struct fc_prli_params_s {
 	u32        reserved:16;
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32        reserved1:5;
 	u32        rec_support:1;
 	u32        task_retry_id:1;
@@ -575,7 +530,7 @@
 struct fc_prli_params_page_s {
 	u32        type:8;
 	u32        codext:8;
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32        origprocasv:1;
 	u32        rsppav:1;
 	u32        imagepair:1;
@@ -611,18 +566,14 @@
 struct fc_prlo_params_page_s {
 	u32        type:8;
 	u32        type_ext:8;
-#ifdef __BIGENDIAN
-	u32        opa_valid:1;	/* originator process associator
-					 * valid
-					 */
+#ifdef __BIG_ENDIAN
+	u32        opa_valid:1;	/* originator process associator valid */
 	u32        rpa_valid:1;	/* responder process associator valid */
 	u32        res1:14;
 #else
 	u32        res1:14;
 	u32        rpa_valid:1;	/* responder process associator valid */
-	u32        opa_valid:1;	/* originator process associator
-					 * valid
-					 */
+	u32        opa_valid:1;	/* originator process associator valid */
 #endif
 	u32        orig_process_assc;
 	u32        resp_process_assc;
@@ -647,18 +598,14 @@
 	u32        type:8;
 	u32        type_ext:8;
 
-#ifdef __BIGENDIAN
-	u32        opa_valid:1;	/* originator process associator
-					 * valid
-					 */
+#ifdef __BIG_ENDIAN
+	u32        opa_valid:1;	/* originator process associator valid */
 	u32        rpa_valid:1;	/* responder process associator valid */
 	u32        res1:14;
 #else
 	u32        res1:14;
 	u32        rpa_valid:1;	/* responder process associator valid */
-	u32        opa_valid:1;	/* originator process associator
-					 * valid
-					 */
+	u32        opa_valid:1;	/* originator process associator valid */
 #endif
 	u32        orig_process_assc;
 	u32        resp_process_assc;
@@ -715,9 +662,9 @@
  * LS_RJT els reply payload
  */
 struct fc_ls_rjt_s {
-	struct fc_els_cmd_s els_cmd;		/* ELS command code */
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        res1:8;
-	u32        reason_code:8;		/* Reason code for reject */
+	u32        reason_code:8;	/* Reason code for reject */
 	u32        reason_code_expl:8;	/* Reason code explanation */
 	u32        vendor_unique:8;	/* Vendor specific */
 };
@@ -779,12 +726,12 @@
  */
 struct fc_ba_acc_s {
 	u32        seq_id_valid:8;	/* set to 0x00 for Abort Exchange */
-	u32        seq_id:8;	/* invalid for Abort Exchange */
+	u32        seq_id:8;		/* invalid for Abort Exchange */
 	u32        res2:16;
-	u32        ox_id:16;	/* OX_ID from ABTS frame */
-	u32        rx_id:16;	/* RX_ID from ABTS frame */
+	u32        ox_id:16;		/* OX_ID from ABTS frame */
+	u32        rx_id:16;		/* RX_ID from ABTS frame */
 	u32        low_seq_cnt:16;	/* set to 0x0000 for Abort Exchange */
-	u32        high_seq_cnt:16;/* set to 0xFFFF for Abort Exchange */
+	u32        high_seq_cnt:16;	/* set to 0xFFFF for Abort Exchange */
 };
 
 /*
@@ -794,17 +741,17 @@
 	u32        res1:8;		/* Reserved */
 	u32        reason_code:8;	/* reason code for reject */
 	u32        reason_expl:8;	/* reason code explanation */
-	u32        vendor_unique:8;/* vendor unique reason code,set to 0 */
+	u32        vendor_unique:8; /* vendor unique reason code,set to 0 */
 };
 
 /*
  * TPRLO logout parameter page
  */
 struct fc_tprlo_params_page_s {
-u32        type:8;
-u32        type_ext:8;
+	u32        type:8;
+	u32        type_ext:8;
 
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32        opa_valid:1;
 	u32        rpa_valid:1;
 	u32        tpo_nport_valid:1;
@@ -864,16 +811,16 @@
 };
 
 struct fc_rscn_event_s {
-	u32        format:2;
-	u32        qualifier:4;
-	u32        resvd:2;
-	u32        portid:24;
+	u32	format:2;
+	u32	qualifier:4;
+	u32	resvd:2;
+	u32	portid:24;
 };
 
 struct fc_rscn_pl_s {
-	u8         command;
-	u8         pagelen;
-	u16        payldlen;
+	u8	command;
+	u8	pagelen;
+	__be16	payldlen;
 	struct fc_rscn_event_s event[1];
 };
 
@@ -887,7 +834,6 @@
 /*
  * RNID els command
  */
-
 #define RNID_NODEID_DATA_FORMAT_COMMON			0x00
 #define RNID_NODEID_DATA_FORMAT_FCP3			0x08
 #define RNID_NODEID_DATA_FORMAT_DISCOVERY		0xDF
@@ -920,15 +866,15 @@
  */
 
 struct fc_rnid_common_id_data_s {
-	wwn_t           port_name;
+	wwn_t		port_name;
 	wwn_t           node_name;
 };
 
 struct fc_rnid_general_topology_data_s {
 	u32        vendor_unique[4];
-	u32        asso_type;
+	__be32     asso_type;
 	u32        phy_port_num;
-	u32        num_attached_nodes;
+	__be32     num_attached_nodes;
 	u32        node_mgmt:8;
 	u32        ip_version:8;
 	u32        udp_tcp_port_num:16;
@@ -980,59 +926,17 @@
 	RPSC_OP_SPEED_8G = 0x0800,
 	RPSC_OP_SPEED_16G = 0x0400,
 
-	RPSC_OP_SPEED_NOT_EST = 0x0001,	/*! speed not established */
+	RPSC_OP_SPEED_NOT_EST = 0x0001,	/* speed not established */
 };
 
 struct fc_rpsc_speed_info_s {
-	u16        port_speed_cap;	/*! see enum fc_rpsc_speed_cap */
-	u16        port_op_speed;	/*! see enum fc_rpsc_op_speed */
-};
-
-enum link_e2e_beacon_subcmd {
-	LINK_E2E_BEACON_ON = 1,
-	LINK_E2E_BEACON_OFF = 2
-};
-
-enum beacon_type {
-	BEACON_TYPE_NORMAL	= 1,	/*! Normal Beaconing. Green */
-	BEACON_TYPE_WARN	= 2,	/*! Warning Beaconing. Yellow/Amber */
-	BEACON_TYPE_CRITICAL	= 3	/*! Critical Beaconing. Red */
-};
-
-struct link_e2e_beacon_param_s {
-	u8         beacon_type;	/* Beacon Type. See enum beacon_type */
-	u8         beacon_frequency;
-					/* Beacon frequency. Number of blinks
-					 * per 10 seconds
-					 */
-	u16        beacon_duration;/* Beacon duration (in Seconds). The
-					 * command operation should be
-					 * terminated at the end of this
-					 * timeout value.
-					 *
-					 * Ignored if diag_sub_cmd is
-					 * LINK_E2E_BEACON_OFF.
-					 *
-					 * If 0, beaconing will continue till a
-					 * BEACON OFF request is received
-					 */
-};
-
-/*
- * Link E2E beacon request/good response format.
- * For LS_RJTs use struct fc_ls_rjt_s
- */
-struct link_e2e_beacon_req_s {
-	u32        ls_code;	/*! FC_ELS_E2E_LBEACON in requests *
-					 *or FC_ELS_ACC in good replies */
-	u32        ls_sub_cmd;	/*! See enum link_e2e_beacon_subcmd */
-	struct link_e2e_beacon_param_s beacon_parm;
+	__be16        port_speed_cap;	/* see enum fc_rpsc_speed_cap */
+	__be16        port_op_speed;	/* see enum fc_rpsc_op_speed */
 };
 
 /*
  * If RPSC request is sent to the Domain Controller, the request is for
- * all the ports within that domain (TODO - I don't think FOS implements
- * this...).
+ * all the ports within that domain.
  */
 struct fc_rpsc_cmd_s {
 	struct fc_els_cmd_s els_cmd;
@@ -1056,9 +960,9 @@
 
 struct fc_rpsc2_cmd_s {
 	struct fc_els_cmd_s els_cmd;
-	u32	token;
+	__be32	token;
 	u16	resvd;
-	u16	num_pids;	/* Number of pids in the request */
+	__be16	num_pids;		/* Number of pids in the request */
 	struct  {
 		u32	rsvd1:8;
 		u32	pid:24;		/* port identifier */
@@ -1072,16 +976,17 @@
 	RPSC2_PORT_TYPE_NPIV_PORT  = 0x5f,
 	RPSC2_PORT_TYPE_NPORT_TRUNK  = 0x6f,
 };
+
 /*
  * RPSC2 portInfo entry structure
  */
 struct fc_rpsc2_port_info_s {
-    u32    pid;        /* PID */
-    u16    resvd1;
-    u16    index;      /* port number / index */
-    u8     resvd2;
-    u8	   type;	/* port type N/NL/... */
-    u16    speed;      /* port Operating Speed */
+	__be32	pid;		/* PID */
+	u16	resvd1;
+	__be16	index;		/* port number / index */
+	u8	resvd2;
+	u8	type;		/* port type N/NL/... */
+	__be16	speed;		/* port Operating Speed */
 };
 
 /*
@@ -1090,8 +995,8 @@
 struct fc_rpsc2_acc_s {
 	u8        els_cmd;
 	u8        resvd;
-    u16       num_pids;  /* Number of pids in the request */
-    struct fc_rpsc2_port_info_s port_info[1];    /* port information */
+	__be16    num_pids; /* Number of pids in the request */
+	struct fc_rpsc2_port_info_s port_info[1]; /* port information */
 };
 
 /*
@@ -1110,18 +1015,14 @@
 	u8         symname[FC_SYMNAME_MAX];
 };
 
-struct fc_alpabm_s {
-	u8         alpa_bm[FC_ALPA_MAX / 8];
-};
-
 /*
  * protocol default timeout values
  */
-#define FC_ED_TOV		2
-#define FC_REC_TOV		(FC_ED_TOV + 1)
-#define FC_RA_TOV		10
-#define FC_ELS_TOV		(2 * FC_RA_TOV)
-#define FC_FCCT_TOV		(3 * FC_RA_TOV)
+#define FC_ED_TOV	2
+#define FC_REC_TOV	(FC_ED_TOV + 1)
+#define FC_RA_TOV	10
+#define FC_ELS_TOV	(2 * FC_RA_TOV)
+#define FC_FCCT_TOV	(3 * FC_RA_TOV)
 
 /*
  * virtual fabric related defines
@@ -1157,50 +1058,34 @@
 };
 
 /*
- * SRR FC-4 LS payload
- */
-struct fc_srr_s {
-	u32	ls_cmd;
-	u32        ox_id:16;	/* ox-id */
-	u32        rx_id:16;	/* rx-id */
-	u32        ro;		/* relative offset */
-	u32        r_ctl:8;		/* R_CTL for I.U. */
-	u32        res:24;
-};
-
-
-/*
  * FCP_CMND definitions
  */
 #define FCP_CMND_CDB_LEN    16
 #define FCP_CMND_LUN_LEN    8
 
 struct fcp_cmnd_s {
-	lun_t           lun;		/* 64-bit LU number */
-	u8         crn;		/* command reference number */
-#ifdef __BIGENDIAN
-	u8         resvd:1,
+	struct scsi_lun	lun;		/* 64-bit LU number */
+	u8		crn;		/* command reference number */
+#ifdef __BIG_ENDIAN
+	u8		resvd:1,
 			priority:4,	/* FCP-3: SAM-3 priority */
 			taskattr:3;	/* scsi task attribute */
 #else
-	u8         taskattr:3,	/* scsi task attribute */
+	u8		taskattr:3,	/* scsi task attribute */
 			priority:4,	/* FCP-3: SAM-3 priority */
 			resvd:1;
 #endif
-	u8         tm_flags;	/* task management flags */
-#ifdef __BIGENDIAN
-	u8         addl_cdb_len:6,	/* additional CDB length words */
+	u8		tm_flags;	/* task management flags */
+#ifdef __BIG_ENDIAN
+	u8		addl_cdb_len:6,	/* additional CDB length words */
 			iodir:2;	/* read/write FCP_DATA IUs */
 #else
-	u8         iodir:2,	/* read/write FCP_DATA IUs */
+	u8		iodir:2,	/* read/write FCP_DATA IUs */
 			addl_cdb_len:6;	/* additional CDB length */
 #endif
-	scsi_cdb_t      cdb;
+	struct scsi_cdb_s      cdb;
 
-	/*
-	 * !!! additional cdb bytes follows here!!!
-	 */
-	u32        fcp_dl;	/* bytes to be transferred */
+	__be32        fcp_dl;	/* bytes to be transferred */
 };
 
 #define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
@@ -1210,21 +1095,10 @@
  * struct fcp_cmnd_s .iodir field values
  */
 enum fcp_iodir {
-	FCP_IODIR_NONE	= 0,
+	FCP_IODIR_NONE  = 0,
 	FCP_IODIR_WRITE = 1,
-	FCP_IODIR_READ	= 2,
-	FCP_IODIR_RW	= 3,
-};
-
-/*
- * Task attribute field
- */
-enum {
-	FCP_TASK_ATTR_SIMPLE	= 0,
-	FCP_TASK_ATTR_HOQ	= 1,
-	FCP_TASK_ATTR_ORDERED	= 2,
-	FCP_TASK_ATTR_ACA	= 4,
-	FCP_TASK_ATTR_UNTAGGED	= 5,	/* obsolete in FCP-3 */
+	FCP_IODIR_READ  = 2,
+	FCP_IODIR_RW    = 3,
 };
 
 /*
@@ -1239,58 +1113,40 @@
 };
 
 /*
- * FCP_XFER_RDY IU defines
- */
-struct fcp_xfer_rdy_s {
-	u32        data_ro;
-	u32        burst_len;
-	u32        reserved;
-};
-
-/*
  * FCP_RSP residue flags
  */
 enum fcp_residue {
-	FCP_NO_RESIDUE = 0,	/* no residue */
-	FCP_RESID_OVER = 1,	/* more data left that was not sent */
-	FCP_RESID_UNDER = 2,	/* less data than requested */
-};
-
-enum {
-	FCP_RSPINFO_GOOD = 0,
-	FCP_RSPINFO_DATALEN_MISMATCH = 1,
-	FCP_RSPINFO_CMND_INVALID = 2,
-	FCP_RSPINFO_ROLEN_MISMATCH = 3,
-	FCP_RSPINFO_TM_NOT_SUPP = 4,
-	FCP_RSPINFO_TM_FAILED = 5,
+	FCP_NO_RESIDUE = 0,     /* no residue */
+	FCP_RESID_OVER = 1,     /* more data left that was not sent */
+	FCP_RESID_UNDER = 2,    /* less data than requested */
 };
 
 struct fcp_rspinfo_s {
 	u32        res0:24;
-	u32        rsp_code:8;	/* response code (as above) */
+	u32        rsp_code:8;		/* response code (as above) */
 	u32        res1;
 };
 
 struct fcp_resp_s {
-	u32        reserved[2];	/* 2 words reserved */
+	u32        reserved[2];		/* 2 words reserved */
 	u16        reserved2;
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u8         reserved3:3;
 	u8         fcp_conf_req:1;	/* FCP_CONF is requested */
 	u8         resid_flags:2;	/* underflow/overflow */
-	u8         sns_len_valid:1;/* sense len is valid */
-	u8         rsp_len_valid:1;/* response len is valid */
+	u8         sns_len_valid:1;	/* sense len is valid */
+	u8         rsp_len_valid:1;	/* response len is valid */
 #else
-	u8         rsp_len_valid:1;/* response len is valid */
-	u8         sns_len_valid:1;/* sense len is valid */
+	u8         rsp_len_valid:1;	/* response len is valid */
+	u8         sns_len_valid:1;	/* sense len is valid */
 	u8         resid_flags:2;	/* underflow/overflow */
 	u8         fcp_conf_req:1;	/* FCP_CONF is requested */
 	u8         reserved3:3;
 #endif
-	u8         scsi_status;	/* one byte SCSI status */
-	u32        residue;	/* residual data bytes */
-	u32        sns_len;	/* length od sense info */
-	u32        rsp_len;	/* length of response info */
+	u8         scsi_status;		/* one byte SCSI status */
+	u32        residue;		/* residual data bytes */
+	u32        sns_len;		/* length od sense info */
+	u32        rsp_len;		/* length of response info */
 };
 
 #define fcp_snslen(__fcprsp)	((__fcprsp)->sns_len_valid ?		\
@@ -1300,12 +1156,6 @@
 #define fcp_rspinfo(__fcprsp)	((struct fcp_rspinfo_s *)((__fcprsp) + 1))
 #define fcp_snsinfo(__fcprsp)	(((u8 *)fcp_rspinfo(__fcprsp)) +	\
 						fcp_rsplen(__fcprsp))
-
-struct fcp_cmnd_fr_s {
-	struct fchs_s fchs;
-	struct fcp_cmnd_s fcp;
-};
-
 /*
  * CT
  */
@@ -1379,7 +1229,7 @@
 	CT_RSN_LOGICAL_BUSY	= 0x05,
 	CT_RSN_PROTO_ERR	= 0x07,
 	CT_RSN_UNABLE_TO_PERF	= 0x09,
-	CT_RSN_NOT_SUPP			= 0x0B,
+	CT_RSN_NOT_SUPP		= 0x0B,
 	CT_RSN_SERVER_NOT_AVBL  = 0x0D,
 	CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
 	CT_RSN_VENDOR_SPECIFIC  = 0xFF,
@@ -1419,10 +1269,10 @@
  * defintions for the explanation code for all servers
  */
 enum {
-	CT_EXP_AUTH_EXCEPTION			= 0xF1,
-	CT_EXP_DB_FULL					= 0xF2,
-	CT_EXP_DB_EMPTY					= 0xF3,
-	CT_EXP_PROCESSING_REQ			= 0xF4,
+	CT_EXP_AUTH_EXCEPTION		= 0xF1,
+	CT_EXP_DB_FULL			= 0xF2,
+	CT_EXP_DB_EMPTY			= 0xF3,
+	CT_EXP_PROCESSING_REQ		= 0xF4,
 	CT_EXP_UNABLE_TO_VERIFY_CONN	= 0xF5,
 	CT_EXP_DEVICES_NOT_IN_CMN_ZONE  = 0xF6
 };
@@ -1446,7 +1296,7 @@
 	GS_RFF_ID	= 0x021F,	/* Register FC4 Feature		*/
 };
 
-struct fcgs_id_req_s{
+struct fcgs_id_req_s {
 	u32 rsvd:8;
 	u32 dap:24; /* port identifier */
 };
@@ -1460,7 +1310,7 @@
 
 struct fcgs_gidpn_resp_s {
 	u32	rsvd:8;
-	u32	dap:24;	/* port identifier */
+	u32	dap:24;		/* port identifier */
 };
 
 /*
@@ -1469,22 +1319,21 @@
 struct fcgs_rftid_req_s {
 	u32	rsvd:8;
 	u32	dap:24;		/* port identifier */
-	u32	fc4_type[8];	/* fc4 types */
+	__be32	fc4_type[8];	/* fc4 types */
 };
 
 /*
  * RFF_ID : Register FC4 features.
  */
-
 #define FC_GS_FCP_FC4_FEATURE_INITIATOR  0x02
 #define FC_GS_FCP_FC4_FEATURE_TARGET	 0x01
 
 struct fcgs_rffid_req_s {
-    u32    rsvd:8;
-    u32    dap:24;		/* port identifier	*/
-    u32    rsvd1:16;
-    u32    fc4ftr_bits:8;		/* fc4 feature bits	*/
-    u32    fc4_type:8;		/* corresponding FC4 Type */
+	u32	rsvd:8;
+	u32	dap:24;		/* port identifier */
+	u32	rsvd1:16;
+	u32	fc4ftr_bits:8;	/* fc4 feature bits */
+	u32	fc4_type:8;		/* corresponding FC4 Type */
 };
 
 /*
@@ -1495,16 +1344,16 @@
 	u8	domain_id;	/* domain, 0 - all fabric */
 	u8	area_id;	/* area, 0 - whole domain */
 	u8	fc4_type;	/* FC_TYPE_FCP for SCSI devices */
-};		/* GID_FT Request */
+};
 
 /*
  * GID_FT Response
  */
 struct fcgs_gidft_resp_s {
-	u8		last:1;	/* last port identifier flag */
-	u8		reserved:7;
-	u32	pid:24;	/* port identifier */
-};		/* GID_FT Response */
+	u8	last:1;		/* last port identifier flag */
+	u8	reserved:7;
+	u32	pid:24;		/* port identifier */
+};
 
 /*
  * RSPN_ID
@@ -1512,8 +1361,8 @@
 struct fcgs_rspnid_req_s {
 	u32	rsvd:8;
 	u32	dap:24;		/* port identifier */
-	u8		spn_len;	/* symbolic port name length */
-	u8		spn[256];	/* symbolic port name */
+	u8	spn_len;	/* symbolic port name length */
+	u8	spn[256];	/* symbolic port name */
 };
 
 /*
@@ -1522,7 +1371,7 @@
 struct fcgs_rpnid_req_s {
 	u32	rsvd:8;
 	u32	port_id:24;
-	wwn_t		port_name;
+	wwn_t	port_name;
 };
 
 /*
@@ -1531,7 +1380,7 @@
 struct fcgs_rnnid_req_s {
 	u32	rsvd:8;
 	u32	port_id:24;
-	wwn_t		node_name;
+	wwn_t	node_name;
 };
 
 /*
@@ -1565,8 +1414,8 @@
  * GA_NXT Response
  */
 struct fcgs_ganxt_rsp_s {
-	u32	port_type:8;	/* Port Type */
-	u32	port_id:24;	/* Port Identifier */
+	u32		port_type:8;	/* Port Type */
+	u32		port_id:24;	/* Port Identifier */
 	wwn_t		port_name;	/* Port Name */
 	u8		spn_len;	/* Length of Symbolic Port Name */
 	char		spn[255];	/* Symbolic Port Name */
@@ -1575,19 +1424,14 @@
 	char		snn[255];	/* Symbolic Node Name */
 	u8		ipa[8];		/* Initial Process Associator */
 	u8		ip[16];		/* IP Address */
-	u32	cos;		/* Class of Service */
-	u32	fc4types[8];	/* FC-4 TYPEs */
-	wwn_t		fabric_port_name;
-					/* Fabric Port Name */
-	u32	rsvd:8;		/* Reserved */
-	u32	hard_addr:24;	/* Hard Address */
+	u32		cos;		/* Class of Service */
+	u32		fc4types[8];	/* FC-4 TYPEs */
+	wwn_t		fabric_port_name; /* Fabric Port Name */
+	u32		rsvd:8;		/* Reserved */
+	u32		hard_addr:24;	/* Hard Address */
 };
 
 /*
- * Fabric Config Server
- */
-
-/*
  * Command codes for Fabric Configuration Server
  */
 enum {
@@ -1598,159 +1442,9 @@
 };
 
 /*
- * Source or Destination Port Tags.
- */
-enum {
-	GS_FTRACE_TAG_NPORT_ID		= 1,
-	GS_FTRACE_TAG_NPORT_NAME	= 2,
-};
-
-/*
-* Port Value : Could be a Port id or wwn
- */
-union fcgs_port_val_u {
-	u32	nport_id;
-	wwn_t		nport_wwn;
-};
-
-#define GS_FTRACE_MAX_HOP_COUNT	20
-#define GS_FTRACE_REVISION	1
-
-/*
- * Ftrace Related Structures.
- */
-
-/*
- * STR (Switch Trace) Reject Reason Codes. From FC-SW.
- */
-enum {
-	GS_FTRACE_STR_CMD_COMPLETED_SUCC	= 0,
-	GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
-	GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
-	GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
-	GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
-	GS_FTRACE_STR_DST_PORT_NOT_FOUND,
-	GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
-	GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
-	GS_FTRACE_STR_NO_ADDL_EXPLN,
-	GS_FTRACE_STR_FABRIC_BUSY,
-	GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
-	GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
-	GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
-};
-
-/*
- * Ftrace Request
- */
-struct fcgs_ftrace_req_s {
-	u32	revision;
-	u16	src_port_tag;	/* Source Port tag */
-	u16	src_port_len;	/* Source Port len */
-	union fcgs_port_val_u src_port_val;	/* Source Port value */
-	u16	dst_port_tag;	/* Destination Port tag */
-	u16	dst_port_len;	/* Destination Port len */
-	union fcgs_port_val_u dst_port_val;	/* Destination Port value */
-	u32	token;
-	u8		vendor_id[8];	/* T10 Vendor Identifier */
-	u8		vendor_info[8];	/* Vendor specific Info */
-	u32	max_hop_cnt;	/* Max Hop Count */
-};
-
-/*
- * Path info structure
- */
-struct fcgs_ftrace_path_info_s {
-	wwn_t		switch_name;		/* Switch WWN */
-	u32	domain_id;
-	wwn_t		ingress_port_name;	/* Ingress ports wwn */
-	u32	ingress_phys_port_num;	/* Ingress ports physical port
-						 * number
-						 */
-	wwn_t		egress_port_name;	/* Ingress ports wwn */
-	u32	egress_phys_port_num;	/* Ingress ports physical port
-						 * number
-						 */
-};
-
-/*
- * Ftrace Acc Response
- */
-struct fcgs_ftrace_resp_s {
-	u32	revision;
-	u32	token;
-	u8		vendor_id[8];		/* T10 Vendor Identifier */
-	u8		vendor_info[8];		/* Vendor specific Info */
-	u32	str_rej_reason_code;	/* STR Reject Reason Code */
-	u32	num_path_info_entries;	/* No. of path info entries */
-	/*
-	 * path info entry/entries.
-	 */
-	struct fcgs_ftrace_path_info_s path_info[1];
-
-};
-
-/*
-* Fabric Config Server : FCPing
- */
-
-/*
- * FC Ping Request
- */
-struct fcgs_fcping_req_s {
-	u32	revision;
-	u16	port_tag;
-	u16	port_len;	/* Port len */
-	union fcgs_port_val_u port_val;	/* Port value */
-	u32	token;
-};
-
-/*
- * FC Ping Response
- */
-struct fcgs_fcping_resp_s {
-	u32	token;
-};
-
-/*
- * Command codes for zone server query.
- */
-enum {
-	ZS_GZME = 0x0124,	/* Get zone member extended */
-};
-
-/*
- * ZS GZME request
- */
-#define ZS_GZME_ZNAMELEN	32
-struct zs_gzme_req_s {
-	u8	znamelen;
-	u8	rsvd[3];
-	u8	zname[ZS_GZME_ZNAMELEN];
-};
-
-enum zs_mbr_type {
-	ZS_MBR_TYPE_PWWN	= 1,
-	ZS_MBR_TYPE_DOMPORT	= 2,
-	ZS_MBR_TYPE_PORTID	= 3,
-	ZS_MBR_TYPE_NWWN	= 4,
-};
-
-struct zs_mbr_wwn_s {
-	u8	mbr_type;
-	u8	rsvd[3];
-	wwn_t	wwn;
-};
-
-struct zs_query_resp_s {
-	u32	nmbrs;	/*  number of zone members */
-	struct zs_mbr_wwn_s	mbr[1];
-};
-
-/*
  * GMAL Command ( Get ( interconnect Element) Management Address List)
  * To retrieve the IP Address of a Switch.
  */
-
 #define CT_GMAL_RESP_PREFIX_TELNET	 "telnet://"
 #define CT_GMAL_RESP_PREFIX_HTTP	 "http://"
 
@@ -1764,7 +1458,7 @@
 
 /* Accept Response to GMAL */
 struct fcgs_gmal_resp_s {
-	u32	ms_len;   /* Num of entries */
+	__be32	ms_len;   /* Num of entries */
 	u8	ms_ma[256];
 };
 
@@ -1775,9 +1469,6 @@
 };
 
 /*
- * FDMI
- */
-/*
  * FDMI Command Codes
  */
 #define	FDMI_GRHL		0x0100
@@ -1856,8 +1547,8 @@
  * FDMI attribute
  */
 struct fdmi_attr_s {
-	u16        type;
-	u16        len;
+	__be16        type;
+	__be16        len;
 	u8         value[1];
 };
 
@@ -1865,7 +1556,7 @@
  * HBA Attribute Block
  */
 struct fdmi_hba_attr_s {
-	u32        attr_count;	/* # of attributes */
+	__be32        attr_count;	/* # of attributes */
 	struct fdmi_attr_s hba_attr;	/* n attributes */
 };
 
@@ -1873,15 +1564,15 @@
  * Registered Port List
  */
 struct fdmi_port_list_s {
-	u32        num_ports;	/* number Of Port Entries */
-	wwn_t           port_entry;	/* one or more */
+	__be32		num_ports;	/* number Of Port Entries */
+	wwn_t		port_entry;	/* one or more */
 };
 
 /*
  * Port Attribute Block
  */
 struct fdmi_port_attr_s {
-	u32        attr_count;	/* # of attributes */
+	__be32        attr_count;	/* # of attributes */
 	struct fdmi_attr_s port_attr;	/* n attributes */
 };
 
@@ -1889,7 +1580,7 @@
  * FDMI Register HBA Attributes
  */
 struct fdmi_rhba_s {
-	wwn_t           hba_id;		/* HBA Identifier */
+	wwn_t			hba_id;		/* HBA Identifier */
 	struct fdmi_port_list_s port_list;	/* Registered Port List */
 	struct fdmi_hba_attr_s hba_attr_blk;	/* HBA attribute block */
 };
@@ -1898,8 +1589,8 @@
  * FDMI Register Port
  */
 struct fdmi_rprt_s {
-	wwn_t           hba_id;		/* HBA Identifier */
-	wwn_t           port_name;	/* Port wwn */
+	wwn_t			hba_id;		/* HBA Identifier */
+	wwn_t			port_name;	/* Port wwn */
 	struct fdmi_port_attr_s port_attr_blk;	/* Port Attr Block */
 };
 
@@ -1907,7 +1598,7 @@
  * FDMI Register Port Attributes
  */
 struct fdmi_rpa_s {
-	wwn_t           port_name;	/* port wwn */
+	wwn_t			port_name;	/* port wwn */
 	struct fdmi_port_attr_s port_attr_blk;	/* Port Attr Block */
 };
 
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 9c72531..b7e2534 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -18,16 +18,16 @@
  * fcbuild.c - FC link service frame building and parsing routines
  */
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_fcbuild.h"
 
 /*
  * static build functions
  */
 static void     fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-				 u16 ox_id);
+				 __be16 ox_id);
 static void     fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-				 u16 ox_id);
+				 __be16 ox_id);
 static struct fchs_s fc_els_req_tmpl;
 static struct fchs_s fc_els_rsp_tmpl;
 static struct fchs_s fc_bls_req_tmpl;
@@ -48,7 +48,7 @@
 	fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST;
 	fc_els_req_tmpl.type = FC_TYPE_ELS;
 	fc_els_req_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
 			      FCTL_SI_XFER);
 	fc_els_req_tmpl.rx_id = FC_RXID_ANY;
 
@@ -59,7 +59,7 @@
 	fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY;
 	fc_els_rsp_tmpl.type = FC_TYPE_ELS;
 	fc_els_rsp_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
 			      FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_els_rsp_tmpl.rx_id = FC_RXID_ANY;
 
@@ -68,7 +68,7 @@
 	 */
 	fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK;
 	fc_bls_req_tmpl.type = FC_TYPE_BLS;
-	fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
+	fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_bls_req_tmpl.rx_id = FC_RXID_ANY;
 
 	/*
@@ -78,7 +78,7 @@
 	fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC;
 	fc_bls_rsp_tmpl.type = FC_TYPE_BLS;
 	fc_bls_rsp_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+		bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
 			      FCTL_END_SEQ | FCTL_SI_XFER);
 	fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY;
 
@@ -129,7 +129,7 @@
 	fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
 	fcp_fchs_tmpl.type = FC_TYPE_FCP;
 	fcp_fchs_tmpl.f_ctl =
-		bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
+		bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER);
 	fcp_fchs_tmpl.seq_id = 1;
 	fcp_fchs_tmpl.rx_id = FC_RXID_ANY;
 }
@@ -143,7 +143,7 @@
 	fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
 	fchs->type = FC_TYPE_SERVICES;
 	fchs->f_ctl =
-		bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
+		bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ |
 			      FCTL_SI_XFER);
 	fchs->rx_id = FC_RXID_ANY;
 	fchs->d_id = (d_id);
@@ -157,7 +157,7 @@
 }
 
 void
-fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = (d_id);
@@ -166,7 +166,7 @@
 }
 
 static void
-fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
@@ -196,7 +196,7 @@
 }
 
 static void
-fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
 	memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
@@ -206,7 +206,7 @@
 
 static          u16
 fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		 u16 ox_id, wwn_t port_name, wwn_t node_name,
+		 __be16 ox_id, wwn_t port_name, wwn_t node_name,
 		 u16 pdu_size, u8 els_code)
 {
 	struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
@@ -232,8 +232,8 @@
 		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
 	       u8 set_npiv, u8 set_auth, u16 local_bb_credits)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
-	u32	*vvl_info;
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
+	__be32	*vvl_info;
 
 	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
@@ -267,7 +267,7 @@
 
 u16
 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
-		   u16 ox_id, wwn_t port_name, wwn_t node_name,
+		   __be16 ox_id, wwn_t port_name, wwn_t node_name,
 		   u16 pdu_size, u16 local_bb_credits)
 {
 	u32        d_id = 0;
@@ -289,7 +289,7 @@
 fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
+	u32        d_id = bfa_hton3b(FC_FABRIC_PORT);
 
 	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
@@ -392,7 +392,7 @@
 
 u16
 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		  u16 ox_id, enum bfa_lport_role role)
+		  __be16 ox_id, enum bfa_lport_role role)
 {
 	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
@@ -456,9 +456,9 @@
 	return sizeof(struct fc_logo_s);
 }
 
-static          u16
+static u16
 fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		 u32 s_id, u16 ox_id, wwn_t port_name,
+		 u32 s_id, __be16 ox_id, wwn_t port_name,
 		 wwn_t node_name, u8 els_code)
 {
 	memset(adisc, '\0', sizeof(struct fc_adisc_s));
@@ -480,7 +480,7 @@
 
 u16
 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name)
+		u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name)
 {
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
 				node_name, FC_ELS_ADISC);
@@ -488,7 +488,7 @@
 
 u16
 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		   u32 s_id, u16 ox_id, wwn_t port_name,
+		   u32 s_id, __be16 ox_id, wwn_t port_name,
 		   wwn_t node_name)
 {
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
@@ -592,7 +592,7 @@
 
 u16
 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		  u16 ox_id)
+		  __be16 ox_id)
 {
 	struct fc_els_cmd_s *acc = pld;
 
@@ -606,7 +606,7 @@
 
 u16
 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
-		u32 s_id, u16 ox_id, u8 reason_code,
+		u32 s_id, __be16 ox_id, u8 reason_code,
 		u8 reason_code_expl)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
@@ -622,7 +622,7 @@
 
 u16
 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
-		u32 s_id, u16 ox_id, u16 rx_id)
+		u32 s_id, __be16 ox_id, u16 rx_id)
 {
 	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
 
@@ -638,7 +638,7 @@
 
 u16
 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
-		u32 s_id, u16 ox_id)
+		u32 s_id, __be16 ox_id)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
@@ -666,7 +666,7 @@
 
 u16
 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages)
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
 {
 	int             page;
 
@@ -690,7 +690,7 @@
 
 u16
 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
-		  u32 s_id, u16 ox_id, int num_pages)
+		  u32 s_id, __be16 ox_id, int num_pages)
 {
 	int             page;
 
@@ -728,7 +728,7 @@
 
 u16
 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
-		  u32 s_id, u16 ox_id, u32 data_format,
+		  u32 s_id, __be16 ox_id, u32 data_format,
 		  struct fc_rnid_common_id_data_s *common_id_data,
 		  struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
@@ -770,10 +770,10 @@
 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
 		u32 s_id, u32 *pid_list, u16 npids)
 {
-	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
+	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id));
 	int i = 0;
 
-	fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
+	fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0);
 
 	memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
 
@@ -788,7 +788,7 @@
 
 u16
 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
-		u32 d_id, u32 s_id, u16 ox_id,
+		u32 d_id, u32 s_id, __be16 ox_id,
 		  struct fc_rpsc_speed_info_s *oper_speed)
 {
 	memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
@@ -807,11 +807,6 @@
 	return sizeof(struct fc_rpsc_acc_s);
 }
 
-/*
- * TBD -
- * . get rid of unnecessary memsets
- */
-
 u16
 fc_logo_rsp_parse(struct fchs_s *fchs, int len)
 {
@@ -995,7 +990,7 @@
 }
 
 u16
-fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
 		u32 reason_code, u32 reason_expl)
 {
 	struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
@@ -1045,7 +1040,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
@@ -1061,7 +1056,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
@@ -1077,7 +1072,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
@@ -1104,7 +1099,7 @@
 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
 		u8 set_br_reg, u32 s_id, u16 ox_id)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
@@ -1121,7 +1116,7 @@
 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
 		u32 s_id, u16 ox_id)
 {
-	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
+	u32        d_id = bfa_hton3b(FC_FABRIC_CONTROLLER);
 	u16        payldlen;
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
@@ -1143,7 +1138,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
-	u32        type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        type_value, d_id = bfa_hton3b(FC_NAME_SERVER);
 	u8         index;
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1167,7 +1162,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
@@ -1187,7 +1182,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
-	u32         d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32         d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
@@ -1209,7 +1204,7 @@
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rspnid_req_s *rspnid =
 			(struct fcgs_rspnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
@@ -1229,7 +1224,7 @@
 
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 
@@ -1249,7 +1244,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
@@ -1267,7 +1262,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
@@ -1286,7 +1281,7 @@
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rcsid_req_s *rcsid =
 			(struct fcgs_rcsid_req_s *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
@@ -1304,7 +1299,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
@@ -1321,7 +1316,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
+	u32        d_id = bfa_hton3b(FC_NAME_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
@@ -1341,7 +1336,7 @@
 {
 
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code);
@@ -1356,7 +1351,7 @@
 fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
 {
 	u8         index;
-	u32       *ptr = (u32 *) bit_mask;
+	__be32       *ptr = (__be32 *) bit_mask;
 	u32        type_value;
 
 	/*
@@ -1377,7 +1372,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
@@ -1397,7 +1392,7 @@
 {
 	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
-	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
+	u32        d_id = bfa_hton3b(FC_MGMT_SERVER);
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index 73abd02..ece51ec 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -21,7 +21,7 @@
 #ifndef __FCBUILD_H__
 #define __FCBUILD_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_fc.h"
 #include "bfa_defs_fcs.h"
 
@@ -138,7 +138,7 @@
 			       u16 pdu_size);
 
 u16        fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
-				   u32 s_id, u16 ox_id,
+				   u32 s_id, __be16 ox_id,
 				   wwn_t port_name, wwn_t node_name,
 				   u16 pdu_size,
 				   u16 local_bb_credits);
@@ -186,7 +186,7 @@
 				   u16 pdu_size);
 
 u16        fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
-			u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name,
+			u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
 			       wwn_t node_name);
 
 enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
@@ -196,20 +196,20 @@
 				 wwn_t port_name, wwn_t node_name);
 
 u16        fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
-				   u32 d_id, u32 s_id, u16 ox_id,
+				   u32 d_id, u32 s_id, __be16 ox_id,
 				   wwn_t port_name, wwn_t node_name);
 u16        fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
-				u32 d_id, u32 s_id, u16 ox_id,
+				u32 d_id, u32 s_id, __be16 ox_id,
 				u8 reason_code, u8 reason_code_expl);
 u16        fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
-				u32 d_id, u32 s_id, u16 ox_id);
+				u32 d_id, u32 s_id, __be16 ox_id);
 u16        fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
 			      u32 s_id, u16 ox_id);
 
 enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
 
 u16        fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
-				  u32 s_id, u16 ox_id,
+				  u32 s_id, __be16 ox_id,
 				  enum bfa_lport_role role);
 
 u16        fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
@@ -218,7 +218,7 @@
 
 u16        fc_rnid_acc_build(struct fchs_s *fchs,
 			struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
-			u16 ox_id, u32 data_format,
+			__be16 ox_id, u32 data_format,
 			struct fc_rnid_common_id_data_s *common_id_data,
 			struct fc_rnid_general_topology_data_s *gen_topo_data);
 
@@ -228,7 +228,7 @@
 			      u32 d_id, u32 s_id, u16 ox_id);
 u16        fc_rpsc_acc_build(struct fchs_s *fchs,
 			struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
-			u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
+			__be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
 u16        fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
 				u8 fc4_type);
 
@@ -251,7 +251,7 @@
 			      u32 s_id, u16 ox_id, wwn_t port_name);
 
 u16        fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
-				  u32 s_id, u16 ox_id);
+				  u32 s_id, __be16 ox_id);
 
 u16        fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 				     u16 cmd_code);
@@ -261,7 +261,7 @@
 void		fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
 
 void		fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-					 u16 ox_id);
+					 __be16 ox_id);
 
 enum fc_parse_status	fc_els_rsp_parse(struct fchs_s *fchs, int len);
 
@@ -274,15 +274,15 @@
 					wwn_t port_name);
 
 u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
-		u32 s_id, u16 ox_id, u16 rx_id);
+		u32 s_id, __be16 ox_id, u16 rx_id);
 
 int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
 
 u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
 u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
-		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
 u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
 
@@ -304,7 +304,7 @@
 u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
 
 u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-		u16 ox_id, u32 reason_code, u32 reason_expl);
+		__be16 ox_id, u32 reason_code, u32 reason_expl);
 
 u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 		u32 port_id);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 135c442..c0353cd 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -15,17 +15,12 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
-#include "bfa_cb_ioim.h"
 
 BFA_TRC_FILE(HAL, FCPIM);
 BFA_MODULE(fcpim);
 
-
-#define bfa_fcpim_add_iostats(__l, __r, __stats)	\
-	(__l->__stats += __r->__stats)
-
-
 /*
  *  BFA ITNIM Related definitions
  */
@@ -37,12 +32,12 @@
 #define bfa_fcpim_additn(__itnim)					\
 	list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
 #define bfa_fcpim_delitn(__itnim)	do {				\
-	bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));      \
+	WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
 	bfa_itnim_update_del_itn_stats(__itnim);      \
 	list_del(&(__itnim)->qe);      \
-	bfa_assert(list_empty(&(__itnim)->io_q));      \
-	bfa_assert(list_empty(&(__itnim)->io_cleanup_q));      \
-	bfa_assert(list_empty(&(__itnim)->pending_q));      \
+	WARN_ON(!list_empty(&(__itnim)->io_q));				\
+	WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));			\
+	WARN_ON(!list_empty(&(__itnim)->pending_q));			\
 } while (0)
 
 #define bfa_itnim_online_cb(__itnim) do {				\
@@ -73,10 +68,8 @@
 } while (0)
 
 /*
- *  bfa_itnim_sm BFA itnim state machine
+ *  itnim state machine event
  */
-
-
 enum bfa_itnim_event {
 	BFA_ITNIM_SM_CREATE = 1,	/*  itnim is created */
 	BFA_ITNIM_SM_ONLINE = 2,	/*  itnim is online */
@@ -107,9 +100,6 @@
 	if ((__fcpim)->profile_start)					\
 		(__fcpim)->profile_start(__ioim);			\
 } while (0)
-/*
- *  hal_ioim_sm
- */
 
 /*
  * IO state machine events
@@ -221,8 +211,7 @@
  * forward declaration for BFA IOIM functions
  */
 static bfa_boolean_t	bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
-static bfa_boolean_t	bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
-static void		bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
+static bfa_boolean_t	bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
 static bfa_boolean_t	bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
 static void		bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
 static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
@@ -232,7 +221,6 @@
 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
 
-
 /*
  * forward declaration of BFA IO state machine
  */
@@ -260,14 +248,13 @@
 					enum bfa_ioim_event event);
 static void	bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
 					enum bfa_ioim_event event);
-
 /*
  * forward declaration for BFA TSKIM functions
  */
 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
 static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
-					lun_t lun);
+					struct scsi_lun lun);
 static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
 static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
 static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
@@ -275,7 +262,6 @@
 static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
 
-
 /*
  * forward declaration of BFA TSKIM state machine
  */
@@ -293,13 +279,12 @@
 					enum bfa_tskim_event event);
 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
 					enum bfa_tskim_event event);
-
 /*
- *  hal_fcpim_mod BFA FCP Initiator Mode module
+ *  BFA FCP Initiator Mode module
  */
 
 /*
- *	Compute and return memory needed by FCP(im) module.
+ * Compute and return memory needed by FCP(im) module.
  */
 static void
 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
@@ -357,10 +342,6 @@
 static void
 bfa_fcpim_detach(struct bfa_s *bfa)
 {
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-
-	bfa_ioim_detach(fcpim);
-	bfa_tskim_detach(fcpim);
 }
 
 static void
@@ -387,56 +368,6 @@
 }
 
 void
-bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
-		struct bfa_itnim_iostats_s *rstats)
-{
-	bfa_fcpim_add_iostats(lstats, rstats, total_ios);
-	bfa_fcpim_add_iostats(lstats, rstats, qresumes);
-	bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
-	bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
-	bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
-	bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
-	bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
-	bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
-	bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
-	bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
-	bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
-	bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
-	bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
-	bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
-	bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
-	bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
-	bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
-	bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
-	bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
-	bfa_fcpim_add_iostats(lstats, rstats, onlines);
-	bfa_fcpim_add_iostats(lstats, rstats, offlines);
-	bfa_fcpim_add_iostats(lstats, rstats, creates);
-	bfa_fcpim_add_iostats(lstats, rstats, deletes);
-	bfa_fcpim_add_iostats(lstats, rstats, create_comps);
-	bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
-	bfa_fcpim_add_iostats(lstats, rstats, sler_events);
-	bfa_fcpim_add_iostats(lstats, rstats, fw_create);
-	bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
-	bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
-	bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_success);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
-	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
-	bfa_fcpim_add_iostats(lstats, rstats, io_comps);
-	bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
-	bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
-	bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
-	bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
-}
-
-void
 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@ -454,128 +385,6 @@
 	return fcpim->path_tov / 1000;
 }
 
-bfa_status_t
-bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
-	u8 lp_tag)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct list_head *qe, *qen;
-	struct bfa_itnim_s *itnim;
-
-	/* accumulate IO stats from itnim */
-	memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
-	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
-		itnim = (struct bfa_itnim_s *) qe;
-		if (itnim->rport->rport_info.lp_tag != lp_tag)
-			continue;
-		bfa_fcpim_add_stats(stats, &(itnim->stats));
-	}
-	return BFA_STATUS_OK;
-}
-bfa_status_t
-bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct list_head *qe, *qen;
-	struct bfa_itnim_s *itnim;
-
-	/* accumulate IO stats from itnim */
-	memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
-	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
-		itnim = (struct bfa_itnim_s *) qe;
-		bfa_fcpim_add_stats(modstats, &(itnim->stats));
-	}
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
-	 struct bfa_fcpim_del_itn_stats_s *modstats)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-
-	*modstats = fcpim->del_itn_stats;
-
-	return BFA_STATUS_OK;
-}
-
-
-bfa_status_t
-bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
-{
-	struct bfa_itnim_s *itnim;
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct list_head *qe, *qen;
-
-	/* accumulate IO stats from itnim */
-	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
-		itnim = (struct bfa_itnim_s *) qe;
-		bfa_itnim_clear_stats(itnim);
-	}
-	fcpim->io_profile = BFA_TRUE;
-	fcpim->io_profile_start_time = time;
-	fcpim->profile_comp = bfa_ioim_profile_comp;
-	fcpim->profile_start = bfa_ioim_profile_start;
-
-	return BFA_STATUS_OK;
-}
-bfa_status_t
-bfa_fcpim_profile_off(struct bfa_s *bfa)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	fcpim->io_profile = BFA_FALSE;
-	fcpim->io_profile_start_time = 0;
-	fcpim->profile_comp = NULL;
-	fcpim->profile_start = NULL;
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct list_head *qe, *qen;
-	struct bfa_itnim_s *itnim;
-
-	/* clear IO stats from all active itnims */
-	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
-		itnim = (struct bfa_itnim_s *) qe;
-		if (itnim->rport->rport_info.lp_tag != lp_tag)
-			continue;
-		bfa_itnim_clear_stats(itnim);
-	}
-	return BFA_STATUS_OK;
-
-}
-
-bfa_status_t
-bfa_fcpim_clr_modstats(struct bfa_s *bfa)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct list_head *qe, *qen;
-	struct bfa_itnim_s *itnim;
-
-	/* clear IO stats from all active itnims */
-	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
-		itnim = (struct bfa_itnim_s *) qe;
-		bfa_itnim_clear_stats(itnim);
-	}
-	memset(&fcpim->del_itn_stats, 0,
-		sizeof(struct bfa_fcpim_del_itn_stats_s));
-
-	return BFA_STATUS_OK;
-}
-
-void
-bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-
-	bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
-
-	fcpim->q_depth = q_depth;
-}
-
 u16
 bfa_fcpim_qdepth_get(struct bfa_s *bfa)
 {
@@ -584,32 +393,12 @@
 	return fcpim->q_depth;
 }
 
-void
-bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
-{
-	bfa_boolean_t ioredirect;
-
-	/*
-	 * IO redirection is turned off when QoS is enabled and vice versa
-	 */
-	ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
-}
-
-void
-bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	fcpim->ioredirect = state;
-}
-
-
-
 /*
  *  BFA ITNIM module state machine functions
  */
 
 /*
- *	Beginning/unallocated state - no events expected.
+ * Beginning/unallocated state - no events expected.
  */
 static void
 bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -630,7 +419,7 @@
 }
 
 /*
- *	Beginning state, only online event expected.
+ * Beginning state, only online event expected.
  */
 static void
 bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -733,7 +522,7 @@
 }
 
 /*
- *	Waiting for itnim create response from firmware, a delete is pending.
+ * Waiting for itnim create response from firmware, a delete is pending.
  */
 static void
 bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
@@ -761,7 +550,7 @@
 }
 
 /*
- *	Online state - normal parking state.
+ * Online state - normal parking state.
  */
 static void
 bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -803,7 +592,7 @@
 }
 
 /*
- *	Second level error recovery need.
+ * Second level error recovery need.
  */
 static void
 bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -834,7 +623,7 @@
 }
 
 /*
- *	Going offline. Waiting for active IO cleanup.
+ * Going offline. Waiting for active IO cleanup.
  */
 static void
 bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
@@ -871,7 +660,7 @@
 }
 
 /*
- *	Deleting itnim. Waiting for active IO cleanup.
+ * Deleting itnim. Waiting for active IO cleanup.
  */
 static void
 bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
@@ -956,7 +745,7 @@
 }
 
 /*
- *	Offline state.
+ * Offline state.
  */
 static void
 bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -987,9 +776,6 @@
 	}
 }
 
-/*
- *	IOC h/w failed state.
- */
 static void
 bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
 				enum bfa_itnim_event event)
@@ -1024,7 +810,7 @@
 }
 
 /*
- *	Itnim is deleted, waiting for firmware response to delete.
+ * Itnim is deleted, waiting for firmware response to delete.
  */
 static void
 bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@ -1069,7 +855,7 @@
 }
 
 /*
- *	Initiate cleanup of all IOs on an IOC failure.
+ * Initiate cleanup of all IOs on an IOC failure.
  */
 static void
 bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
@@ -1103,7 +889,7 @@
 }
 
 /*
- *	IO cleanup completion
+ * IO cleanup completion
  */
 static void
 bfa_itnim_cleanp_comp(void *itnim_cbarg)
@@ -1115,7 +901,7 @@
 }
 
 /*
- *	Initiate cleanup of all IOs.
+ * Initiate cleanup of all IOs.
  */
 static void
 bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
@@ -1187,9 +973,6 @@
 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
 }
 
-
-
-
 /*
  *  bfa_itnim_public
  */
@@ -1401,7 +1184,7 @@
 	if (itnim->fcpim->path_tov > 0) {
 
 		itnim->iotov_active = BFA_TRUE;
-		bfa_assert(bfa_itnim_hold_io(itnim));
+		WARN_ON(!bfa_itnim_hold_io(itnim));
 		bfa_timer_start(itnim->bfa, &itnim->timer,
 			bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
 	}
@@ -1457,14 +1240,12 @@
 	fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
 }
 
-
-
 /*
- *  bfa_itnim_public
+ * bfa_itnim_public
  */
 
 /*
- *	Itnim interrupt processing.
+ * Itnim interrupt processing.
  */
 void
 bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
@@ -1481,7 +1262,7 @@
 	case BFI_ITNIM_I2H_CREATE_RSP:
 		itnim = BFA_ITNIM_FROM_TAG(fcpim,
 						msg.create_rsp->bfa_handle);
-		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
+		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
 		bfa_stats(itnim, create_comps);
 		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
 		break;
@@ -1489,7 +1270,7 @@
 	case BFI_ITNIM_I2H_DELETE_RSP:
 		itnim = BFA_ITNIM_FROM_TAG(fcpim,
 						msg.delete_rsp->bfa_handle);
-		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
+		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
 		bfa_stats(itnim, delete_comps);
 		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
 		break;
@@ -1503,14 +1284,12 @@
 
 	default:
 		bfa_trc(bfa, m->mhdr.msg_id);
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
-
-
 /*
- *  bfa_itnim_api
+ * bfa_itnim_api
  */
 
 struct bfa_itnim_s *
@@ -1520,7 +1299,7 @@
 	struct bfa_itnim_s *itnim;
 
 	itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
-	bfa_assert(itnim->rport == rport);
+	WARN_ON(itnim->rport != rport);
 
 	itnim->ditn = ditn;
 
@@ -1568,31 +1347,6 @@
 		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
 }
 
-bfa_status_t
-bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
-		struct bfa_itnim_ioprofile_s *ioprofile)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
-	if (!fcpim->io_profile)
-		return BFA_STATUS_IOPROFILE_OFF;
-
-	itnim->ioprofile.index = BFA_IOBUCKET_MAX;
-	itnim->ioprofile.io_profile_start_time =
-		bfa_io_profile_start_time(itnim->bfa);
-	itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
-	itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
-	*ioprofile = itnim->ioprofile;
-
-	return BFA_STATUS_OK;
-}
-
-void
-bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
-	struct bfa_itnim_iostats_s *stats)
-{
-	*stats = itnim->stats;
-}
-
 void
 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
 {
@@ -1608,14 +1362,11 @@
  */
 
 /*
- *	IO is not started (unallocated).
+ * IO is not started (unallocated).
  */
 static void
 bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 {
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_trc_fp(ioim->bfa, event);
-
 	switch (event) {
 	case BFA_IOIM_SM_START:
 		if (!bfa_itnim_is_online(ioim->itnim)) {
@@ -1635,7 +1386,7 @@
 		}
 
 		if (ioim->nsges > BFI_SGE_INLINE) {
-			if (!bfa_ioim_sge_setup(ioim)) {
+			if (!bfa_ioim_sgpg_alloc(ioim)) {
 				bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
 				return;
 			}
@@ -1662,7 +1413,7 @@
 		 * requests immediately.
 		 */
 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
+		WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
 		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
 				__bfa_cb_ioim_abort, ioim);
 		break;
@@ -1673,7 +1424,7 @@
 }
 
 /*
- *	IO is waiting for SG pages.
+ * IO is waiting for SG pages.
  */
 static void
 bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -1720,14 +1471,11 @@
 }
 
 /*
- *	IO is active.
+ * IO is active.
  */
 static void
 bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 {
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_trc_fp(ioim->bfa, event);
-
 	switch (event) {
 	case BFA_IOIM_SM_COMP_GOOD:
 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
@@ -1786,8 +1534,8 @@
 		break;
 
 	case BFA_IOIM_SM_SQRETRY:
-		if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
-			/* max retry completed free IO */
+		if (bfa_ioim_maxretry_reached(ioim)) {
+			/* max retry reached, free IO */
 			bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
 			bfa_ioim_move_to_comp_q(ioim);
 			bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
@@ -1804,17 +1552,15 @@
 }
 
 /*
-*	IO is retried with new tag.
-*/
+ * IO is retried with new tag.
+ */
 static void
 bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 {
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_trc_fp(ioim->bfa, event);
-
 	switch (event) {
 	case BFA_IOIM_SM_FREE:
 		/* abts and rrq done. Now retry the IO with new tag */
+		bfa_ioim_update_iotag(ioim);
 		if (!bfa_ioim_send_ioreq(ioim)) {
 			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
 			break;
@@ -1858,7 +1604,7 @@
 }
 
 /*
- *	IO is being aborted, waiting for completion from firmware.
+ * IO is being aborted, waiting for completion from firmware.
  */
 static void
 bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -1894,7 +1640,7 @@
 		break;
 
 	case BFA_IOIM_SM_CLEANUP:
-		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
+		WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
 		ioim->iosp->abort_explicit = BFA_FALSE;
 
 		if (bfa_ioim_send_abort(ioim))
@@ -1981,7 +1727,7 @@
 }
 
 /*
- *	IO is waiting for room in request CQ
+ * IO is waiting for room in request CQ
  */
 static void
 bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -2025,7 +1771,7 @@
 }
 
 /*
- *	Active IO is being aborted, waiting for room in request CQ.
+ * Active IO is being aborted, waiting for room in request CQ.
  */
 static void
 bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -2040,7 +1786,7 @@
 		break;
 
 	case BFA_IOIM_SM_CLEANUP:
-		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
+		WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
 		ioim->iosp->abort_explicit = BFA_FALSE;
 		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
 		break;
@@ -2076,7 +1822,7 @@
 }
 
 /*
- *	Active IO is being cleaned up, waiting for room in request CQ.
+ * Active IO is being cleaned up, waiting for room in request CQ.
  */
 static void
 bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
@@ -2092,7 +1838,7 @@
 
 	case BFA_IOIM_SM_ABORT:
 		/*
-		 * IO is alraedy being cleaned up implicitly
+		 * IO is already being cleaned up implicitly
 		 */
 		ioim->io_cbfn = __bfa_cb_ioim_abort;
 		break;
@@ -2131,9 +1877,6 @@
 static void
 bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 {
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_trc_fp(ioim->bfa, event);
-
 	switch (event) {
 	case BFA_IOIM_SM_HCB:
 		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
@@ -2213,11 +1956,6 @@
 }
 
 
-
-/*
- *  hal_ioim_private
- */
-
 static void
 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
 {
@@ -2323,7 +2061,7 @@
 
 	ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
 	list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
-	bfa_ioim_sgpg_setup(ioim);
+	ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
 	bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
 }
 
@@ -2335,13 +2073,16 @@
 {
 	struct bfa_itnim_s *itnim = ioim->itnim;
 	struct bfi_ioim_req_s *m;
-	static struct fcp_cmnd_s cmnd_z0 = { 0 };
-	struct bfi_sge_s      *sge;
+	static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
+	struct bfi_sge_s *sge, *sgpge;
 	u32	pgdlen = 0;
 	u32	fcp_dl;
 	u64 addr;
 	struct scatterlist *sg;
+	struct bfa_sgpg_s *sgpg;
 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
+	u32 i, sge_id, pgcumsz;
+	enum dma_data_direction dmadir;
 
 	/*
 	 * check for room in queue to send request now
@@ -2359,22 +2100,61 @@
 	 */
 	m->io_tag = cpu_to_be16(ioim->iotag);
 	m->rport_hdl = ioim->itnim->rport->fw_handle;
-	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
+	m->io_timeout = 0;
 
-	/*
-	 * build inline IO SG element here
-	 */
 	sge = &m->sges[0];
-	if (ioim->nsges) {
-		sg = (struct scatterlist *)scsi_sglist(cmnd);
-		addr = bfa_os_sgaddr(sg_dma_address(sg));
-		sge->sga = *(union bfi_addr_u *) &addr;
-		pgdlen = sg_dma_len(sg);
-		sge->sg_len = pgdlen;
-		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
+	sgpg = ioim->sgpg;
+	sge_id = 0;
+	sgpge = NULL;
+	pgcumsz = 0;
+	scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
+		if (i == 0) {
+			/* build inline IO SG element */
+			addr = bfa_sgaddr_le(sg_dma_address(sg));
+			sge->sga = *(union bfi_addr_u *) &addr;
+			pgdlen = sg_dma_len(sg);
+			sge->sg_len = pgdlen;
+			sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
 					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
-		bfa_sge_to_be(sge);
-		sge++;
+			bfa_sge_to_be(sge);
+			sge++;
+		} else {
+			if (sge_id == 0)
+				sgpge = sgpg->sgpg->sges;
+
+			addr = bfa_sgaddr_le(sg_dma_address(sg));
+			sgpge->sga = *(union bfi_addr_u *) &addr;
+			sgpge->sg_len = sg_dma_len(sg);
+			pgcumsz += sgpge->sg_len;
+
+			/* set flags */
+			if (i < (ioim->nsges - 1) &&
+					sge_id < (BFI_SGPG_DATA_SGES - 1))
+				sgpge->flags = BFI_SGE_DATA;
+			else if (i < (ioim->nsges - 1))
+				sgpge->flags = BFI_SGE_DATA_CPL;
+			else
+				sgpge->flags = BFI_SGE_DATA_LAST;
+
+			bfa_sge_to_le(sgpge);
+
+			sgpge++;
+			if (i == (ioim->nsges - 1)) {
+				sgpge->flags = BFI_SGE_PGDLEN;
+				sgpge->sga.a32.addr_lo = 0;
+				sgpge->sga.a32.addr_hi = 0;
+				sgpge->sg_len = pgcumsz;
+				bfa_sge_to_le(sgpge);
+			} else if (++sge_id == BFI_SGPG_DATA_SGES) {
+				sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
+				sgpge->flags = BFI_SGE_LINK;
+				sgpge->sga = sgpg->sgpg_pa;
+				sgpge->sg_len = pgcumsz;
+				bfa_sge_to_le(sgpge);
+				sge_id = 0;
+				pgcumsz = 0;
+			}
+		}
 	}
 
 	if (ioim->nsges > BFI_SGE_INLINE) {
@@ -2391,10 +2171,17 @@
 	 * set up I/O command parameters
 	 */
 	m->cmnd = cmnd_z0;
-	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
-	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
-	m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
-	fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
+	int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
+	dmadir = cmnd->sc_data_direction;
+	if (dmadir == DMA_TO_DEVICE)
+		m->cmnd.iodir = FCP_IODIR_WRITE;
+	else if (dmadir == DMA_FROM_DEVICE)
+		m->cmnd.iodir = FCP_IODIR_READ;
+	else
+		m->cmnd.iodir = FCP_IODIR_NONE;
+
+	m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
+	fcp_dl = scsi_bufflen(cmnd);
 	m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
 
 	/*
@@ -2418,28 +2205,9 @@
 		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
 	}
 	if (itnim->seq_rec ||
-	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
+	    (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
 		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
 
-#ifdef IOIM_ADVANCED
-	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
-	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
-	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
-
-	/*
-	 * Handle large CDB (>16 bytes).
-	 */
-	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
-					FCP_CMND_CDB_LEN) / sizeof(u32);
-	if (m->cmnd.addl_cdb_len) {
-		memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
-				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
-				m->cmnd.addl_cdb_len * sizeof(u32));
-		fcp_cmnd_fcpdl(&m->cmnd) =
-				cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
-	}
-#endif
-
 	/*
 	 * queue I/O message to firmware
 	 */
@@ -2452,11 +2220,11 @@
  * at queuing time.
  */
 static bfa_boolean_t
-bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
+bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
 {
 	u16	nsgpgs;
 
-	bfa_assert(ioim->nsges > BFI_SGE_INLINE);
+	WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
 
 	/*
 	 * allocate SG pages needed
@@ -2472,73 +2240,11 @@
 	}
 
 	ioim->nsgpgs = nsgpgs;
-	bfa_ioim_sgpg_setup(ioim);
+	ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
 
 	return BFA_TRUE;
 }
 
-static void
-bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
-{
-	int		sgeid, nsges, i;
-	struct bfi_sge_s      *sge;
-	struct bfa_sgpg_s *sgpg;
-	u32	pgcumsz;
-	u64        addr;
-	struct scatterlist *sg;
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
-
-	sgeid = BFI_SGE_INLINE;
-	ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
-
-	sg = scsi_sglist(cmnd);
-	sg = sg_next(sg);
-
-	do {
-		sge = sgpg->sgpg->sges;
-		nsges = ioim->nsges - sgeid;
-		if (nsges > BFI_SGPG_DATA_SGES)
-			nsges = BFI_SGPG_DATA_SGES;
-
-		pgcumsz = 0;
-		for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
-			addr = bfa_os_sgaddr(sg_dma_address(sg));
-			sge->sga = *(union bfi_addr_u *) &addr;
-			sge->sg_len = sg_dma_len(sg);
-			pgcumsz += sge->sg_len;
-
-			/*
-			 * set flags
-			 */
-			if (i < (nsges - 1))
-				sge->flags = BFI_SGE_DATA;
-			else if (sgeid < (ioim->nsges - 1))
-				sge->flags = BFI_SGE_DATA_CPL;
-			else
-				sge->flags = BFI_SGE_DATA_LAST;
-
-			bfa_sge_to_le(sge);
-		}
-
-		sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
-
-		/*
-		 * set the link element of each page
-		 */
-		if (sgeid == ioim->nsges) {
-			sge->flags = BFI_SGE_PGDLEN;
-			sge->sga.a32.addr_lo = 0;
-			sge->sga.a32.addr_hi = 0;
-		} else {
-			sge->flags = BFI_SGE_LINK;
-			sge->sga = sgpg->sgpg_pa;
-		}
-		sge->sg_len = pgcumsz;
-
-		bfa_sge_to_le(sge);
-	} while (sgeid < ioim->nsges);
-}
-
 /*
  * Send I/O abort request to firmware.
  */
@@ -2605,7 +2311,7 @@
 		}
 		bfa_itnim_iodone(ioim->itnim);
 	} else
-		bfa_tskim_iodone(ioim->iosp->tskim);
+		bfa_wc_down(&ioim->iosp->tskim->wc);
 }
 
 static bfa_boolean_t
@@ -2623,9 +2329,6 @@
 	return BFA_TRUE;
 }
 
-/*
- *	or after the link comes back.
- */
 void
 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
 {
@@ -2653,11 +2356,6 @@
 }
 
 
-
-/*
- *  hal_ioim_friend
- */
-
 /*
  * Memory allocation and initialization.
  */
@@ -2722,14 +2420,6 @@
 	}
 }
 
-/*
- * Driver detach time call.
- */
-void
-bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
-{
-}
-
 void
 bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
@@ -2742,7 +2432,7 @@
 	iotag = be16_to_cpu(rsp->io_tag);
 
 	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
-	bfa_assert(ioim->iotag == iotag);
+	WARN_ON(ioim->iotag != iotag);
 
 	bfa_trc(ioim->bfa, ioim->iotag);
 	bfa_trc(ioim->bfa, rsp->io_status);
@@ -2773,13 +2463,13 @@
 
 	case BFI_IOIM_STS_PROTO_ERR:
 		bfa_stats(ioim->itnim, iocom_proto_err);
-		bfa_assert(rsp->reuse_io_tag);
+		WARN_ON(!rsp->reuse_io_tag);
 		evt = BFA_IOIM_SM_COMP;
 		break;
 
 	case BFI_IOIM_STS_SQER_NEEDED:
 		bfa_stats(ioim->itnim, iocom_sqer_needed);
-		bfa_assert(rsp->reuse_io_tag == 0);
+		WARN_ON(rsp->reuse_io_tag != 0);
 		evt = BFA_IOIM_SM_SQRETRY;
 		break;
 
@@ -2808,7 +2498,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 
 	bfa_sm_send_event(ioim, evt);
@@ -2825,39 +2515,12 @@
 	iotag = be16_to_cpu(rsp->io_tag);
 
 	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
-	bfa_assert(ioim->iotag == iotag);
+	WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
 
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
 	bfa_ioim_cb_profile_comp(fcpim, ioim);
-
 	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
 }
 
-void
-bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
-{
-	ioim->start_time = jiffies;
-}
-
-void
-bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
-{
-	u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
-	u32 index = bfa_ioim_get_index(fcp_dl);
-	u64 end_time = jiffies;
-	struct bfa_itnim_latency_s *io_lat =
-			&(ioim->itnim->ioprofile.io_latency);
-	u32 val = (u32)(end_time - ioim->start_time);
-
-	bfa_itnim_ioprofile_update(ioim->itnim, index);
-
-	io_lat->count[index]++;
-	io_lat->min[index] = (io_lat->min[index] < val) ?
-		io_lat->min[index] : val;
-	io_lat->max[index] = (io_lat->max[index] > val) ?
-		io_lat->max[index] : val;
-	io_lat->avg[index] += val;
-}
 /*
  * Called by itnim to clean up IO while going offline.
  */
@@ -2903,11 +2566,6 @@
 }
 
 
-
-/*
- *  hal_ioim_api
- */
-
 /*
  * Allocate IOIM resource for initiator mode I/O request.
  */
@@ -2936,7 +2594,6 @@
 	fcpim->ios_active++;
 
 	list_add_tail(&ioim->qe, &itnim->io_q);
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
 
 	return ioim;
 }
@@ -2946,18 +2603,13 @@
 {
 	struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
 
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
-
-	bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
-			(ioim->nsges > BFI_SGE_INLINE));
-
 	if (ioim->nsgpgs > 0)
 		bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
 
 	bfa_stats(ioim->itnim, io_comps);
 	fcpim->ios_active--;
 
+	ioim->iotag &= BFA_IOIM_IOTAG_MASK;
 	list_del(&ioim->qe);
 	list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
 }
@@ -2965,16 +2617,13 @@
 void
 bfa_ioim_start(struct bfa_ioim_s *ioim)
 {
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-
 	bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
 	/*
 	 * Obtain the queue over which this request has to be issued
 	 */
 	ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
-			bfa_cb_ioim_get_reqq(ioim->dio) :
-			bfa_itnim_get_reqq(ioim);
+			BFA_FALSE : bfa_itnim_get_reqq(ioim);
 
 	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
 }
@@ -2997,13 +2646,12 @@
 	return BFA_STATUS_OK;
 }
 
-
 /*
  *  BFA TSKIM state machine functions
  */
 
 /*
- *	Task management command beginning state.
+ * Task management command beginning state.
  */
 static void
 bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3040,9 +2688,8 @@
 }
 
 /*
- * brief
- *	TM command is active, awaiting completion from firmware to
- *	cleanup IO requests in TM scope.
+ * TM command is active, awaiting completion from firmware to
+ * cleanup IO requests in TM scope.
  */
 static void
 bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3077,8 +2724,8 @@
 }
 
 /*
- *	An active TM is being cleaned up since ITN is offline. Awaiting cleanup
- *	completion event from firmware.
+ * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
+ * completion event from firmware.
  */
 static void
 bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3138,7 +2785,7 @@
 }
 
 /*
- *	Task management command is waiting for room in request CQ
+ * Task management command is waiting for room in request CQ
  */
 static void
 bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3173,8 +2820,8 @@
 }
 
 /*
- *	Task management command is active, awaiting for room in request CQ
- *	to send clean up request.
+ * Task management command is active, awaiting for room in request CQ
+ * to send clean up request.
  */
 static void
 bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
@@ -3186,10 +2833,8 @@
 	case BFA_TSKIM_SM_DONE:
 		bfa_reqq_wcancel(&tskim->reqq_wait);
 		/*
-		 *
 		 * Fall through !!!
 		 */
-
 	case BFA_TSKIM_SM_QRESUME:
 		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
 		bfa_tskim_send_abort(tskim);
@@ -3208,7 +2853,7 @@
 }
 
 /*
- *	BFA callback is pending
+ * BFA callback is pending
  */
 static void
 bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@ -3233,12 +2878,6 @@
 	}
 }
 
-
-
-/*
- *  hal_tskim_private
- */
-
 static void
 __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
 {
@@ -3268,8 +2907,8 @@
 				BFI_TSKIM_STS_FAILED);
 }
 
-static	bfa_boolean_t
-bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
+static bfa_boolean_t
+bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
 {
 	switch (tskim->tm_cmnd) {
 	case FCP_TM_TARGET_RESET:
@@ -3279,24 +2918,26 @@
 	case FCP_TM_CLEAR_TASK_SET:
 	case FCP_TM_LUN_RESET:
 	case FCP_TM_CLEAR_ACA:
-		return (tskim->lun == lun);
+		return !memcmp(&tskim->lun, &lun, sizeof(lun));
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 
 	return BFA_FALSE;
 }
 
 /*
- *	Gather affected IO requests and task management commands.
+ * Gather affected IO requests and task management commands.
  */
 static void
 bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
 {
 	struct bfa_itnim_s *itnim = tskim->itnim;
 	struct bfa_ioim_s *ioim;
-	struct list_head	*qe, *qen;
+	struct list_head *qe, *qen;
+	struct scsi_cmnd *cmnd;
+	struct scsi_lun scsilun;
 
 	INIT_LIST_HEAD(&tskim->io_q);
 
@@ -3305,8 +2946,9 @@
 	 */
 	list_for_each_safe(qe, qen, &itnim->io_q) {
 		ioim = (struct bfa_ioim_s *) qe;
-		if (bfa_tskim_match_scope
-			(tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
+		cmnd = (struct scsi_cmnd *) ioim->dio;
+		int_to_scsilun(cmnd->device->lun, &scsilun);
+		if (bfa_tskim_match_scope(tskim, scsilun)) {
 			list_del(&ioim->qe);
 			list_add_tail(&ioim->qe, &tskim->io_q);
 		}
@@ -3317,8 +2959,9 @@
 	 */
 	list_for_each_safe(qe, qen, &itnim->pending_q) {
 		ioim = (struct bfa_ioim_s *) qe;
-		if (bfa_tskim_match_scope
-			(tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
+		cmnd = (struct scsi_cmnd *) ioim->dio;
+		int_to_scsilun(cmnd->device->lun, &scsilun);
+		if (bfa_tskim_match_scope(tskim, scsilun)) {
 			list_del(&ioim->qe);
 			list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
 			bfa_ioim_tov(ioim);
@@ -3327,7 +2970,7 @@
 }
 
 /*
- *	IO cleanup completion
+ * IO cleanup completion
  */
 static void
 bfa_tskim_cleanp_comp(void *tskim_cbarg)
@@ -3339,7 +2982,7 @@
 }
 
 /*
- *	Gather affected IO requests and task management commands.
+ * Gather affected IO requests and task management commands.
  */
 static void
 bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
@@ -3359,7 +3002,7 @@
 }
 
 /*
- *	Send task management request to firmware.
+ * Send task management request to firmware.
  */
 static bfa_boolean_t
 bfa_tskim_send(struct bfa_tskim_s *tskim)
@@ -3394,7 +3037,7 @@
 }
 
 /*
- *	Send abort request to cleanup an active TM to firmware.
+ * Send abort request to cleanup an active TM to firmware.
  */
 static bfa_boolean_t
 bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
@@ -3425,7 +3068,7 @@
 }
 
 /*
- *	Call to resume task management cmnd waiting for room in request queue.
+ * Call to resume task management cmnd waiting for room in request queue.
  */
 static void
 bfa_tskim_qresume(void *cbarg)
@@ -3451,12 +3094,6 @@
 	}
 }
 
-
-
-/*
- *  hal_tskim_friend
- */
-
 /*
  * Notification on completions from related ioim.
  */
@@ -3489,7 +3126,7 @@
 }
 
 /*
- *	Memory allocation and initialization.
+ * Memory allocation and initialization.
  */
 void
 bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
@@ -3522,14 +3159,6 @@
 }
 
 void
-bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
-{
-	/*
-	* @todo
-	*/
-}
-
-void
 bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@ -3538,7 +3167,7 @@
 	u16	tsk_tag = be16_to_cpu(rsp->tsk_tag);
 
 	tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
-	bfa_assert(tskim->tsk_tag == tsk_tag);
+	WARN_ON(tskim->tsk_tag != tsk_tag);
 
 	tskim->tsk_status = rsp->tsk_status;
 
@@ -3556,12 +3185,6 @@
 }
 
 
-
-/*
- *  hal_tskim_api
- */
-
-
 struct bfa_tskim_s *
 bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
 {
@@ -3579,13 +3202,13 @@
 void
 bfa_tskim_free(struct bfa_tskim_s *tskim)
 {
-	bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
+	WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
 	list_del(&tskim->qe);
 	list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
 }
 
 /*
- *	Start a task management command.
+ * Start a task management command.
  *
  * @param[in]	tskim	BFA task management command instance
  * @param[in]	itnim	i-t nexus for the task management command
@@ -3596,7 +3219,8 @@
  * @return None.
  */
 void
-bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
+bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
+			struct scsi_lun lun,
 			enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
 {
 	tskim->itnim	= itnim;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index db53717..1e38dad 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -41,7 +41,7 @@
 	(__itnim->ioprofile.iocomps[__index]++)
 
 #define BFA_IOIM_RETRY_TAG_OFFSET 11
-#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */
+#define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */
 #define BFA_IOIM_RETRY_MAX 7
 
 /* Buckets are are 512 bytes to 2MB */
@@ -94,12 +94,12 @@
 	struct list_head	ioim_resfree_q; /*  IOs waiting for f/w */
 	struct list_head	ioim_comp_q;	/*  IO global comp Q	*/
 	struct list_head	tskim_free_q;
-	u32		ios_active;	/*  current active IOs	*/
-	u32		delay_comp;
+	u32			ios_active;	/*  current active IOs	*/
+	u32			delay_comp;
 	struct bfa_fcpim_del_itn_stats_s del_itn_stats;
 	bfa_boolean_t		ioredirect;
 	bfa_boolean_t		io_profile;
-	u32		io_profile_start_time;
+	u32			io_profile_start_time;
 	bfa_fcpim_profile_t     profile_comp;
 	bfa_fcpim_profile_t     profile_start;
 };
@@ -114,25 +114,24 @@
 	struct bfa_fcpim_mod_s	*fcpim;		/*  parent fcpim module */
 	struct bfa_itnim_s	*itnim;		/*  i-t-n nexus for this IO  */
 	struct bfad_ioim_s	*dio;		/*  driver IO handle	*/
-	u16		iotag;		/*  FWI IO tag	*/
-	u16		abort_tag;	/*  unqiue abort request tag */
-	u16		nsges;		/*  number of SG elements */
-	u16		nsgpgs;		/*  number of SG pages	*/
+	u16			iotag;		/*  FWI IO tag	*/
+	u16			abort_tag;	/*  unqiue abort request tag */
+	u16			nsges;		/*  number of SG elements */
+	u16			nsgpgs;		/*  number of SG pages	*/
 	struct bfa_sgpg_s	*sgpg;		/*  first SG page	*/
 	struct list_head	sgpg_q;		/*  allocated SG pages	*/
 	struct bfa_cb_qe_s	hcb_qe;		/*  bfa callback qelem	*/
 	bfa_cb_cbfn_t		io_cbfn;	/*  IO completion handler */
-	struct bfa_ioim_sp_s *iosp;		/*  slow-path IO handling */
-	u8		reqq;		/*  Request queue for I/O */
-	u64 start_time;			/*  IO's Profile start val */
+	struct bfa_ioim_sp_s	*iosp;		/*  slow-path IO handling */
+	u8			reqq;		/*  Request queue for I/O */
+	u64			start_time;	/*  IO's Profile start val */
 };
 
-
 struct bfa_ioim_sp_s {
 	struct bfi_msg_s	comp_rspmsg;	/*  IO comp f/w response */
 	u8			*snsinfo;	/*  sense info for this IO   */
-	struct bfa_sgpg_wqe_s sgpg_wqe;	/*  waitq elem for sgpg	*/
-	struct bfa_reqq_wait_s reqq_wait;	/*  to wait for room in reqq */
+	struct bfa_sgpg_wqe_s	sgpg_wqe;	/*  waitq elem for sgpg	*/
+	struct bfa_reqq_wait_s	reqq_wait;	/*  to wait for room in reqq */
 	bfa_boolean_t		abort_explicit;	/*  aborted by OS	*/
 	struct bfa_tskim_s	*tskim;		/*  Relevant TM cmd	*/
 };
@@ -143,35 +142,34 @@
 struct bfa_tskim_s {
 	struct list_head	qe;
 	bfa_sm_t		sm;
-	struct bfa_s	*bfa;	/*  BFA module  */
+	struct bfa_s		*bfa;	/*  BFA module  */
 	struct bfa_fcpim_mod_s  *fcpim;	/*  parent fcpim module	*/
 	struct bfa_itnim_s	*itnim;	/*  i-t-n nexus for this IO  */
-	struct bfad_tskim_s	*dtsk;   /*  driver task mgmt cmnd	*/
-	bfa_boolean_t	notify;	/*  notify itnim on TM comp  */
-	lun_t	lun;	/*  lun if applicable	*/
-	enum fcp_tm_cmnd	tm_cmnd;	/*  task management command  */
-	u16	tsk_tag;	/*  FWI IO tag	*/
-	u8	tsecs;	/*  timeout in seconds	*/
+	struct bfad_tskim_s	*dtsk;  /*  driver task mgmt cmnd	*/
+	bfa_boolean_t		notify;	/*  notify itnim on TM comp  */
+	struct scsi_lun		lun;	/*  lun if applicable	*/
+	enum fcp_tm_cmnd	tm_cmnd; /*  task management command  */
+	u16			tsk_tag; /*  FWI IO tag	*/
+	u8			tsecs;	/*  timeout in seconds	*/
 	struct bfa_reqq_wait_s  reqq_wait;   /*  to wait for room in reqq */
 	struct list_head	io_q;	/*  queue of affected IOs	*/
-	struct bfa_wc_s	wc;	/*  waiting counter	*/
+	struct bfa_wc_s		wc;	/*  waiting counter	*/
 	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
 	enum bfi_tskim_status   tsk_status;  /*  TM status	*/
 };
 
-
 /*
  * BFA i-t-n (initiator mode)
  */
 struct bfa_itnim_s {
-	struct list_head	qe;		/*  queue element	*/
-	bfa_sm_t	  sm;		/*  i-t-n im BFA state machine  */
-	struct bfa_s	*bfa;		/*  bfa instance	*/
-	struct bfa_rport_s *rport;	/*  bfa rport	*/
-	void	*ditn;		/*  driver i-t-n structure	*/
+	struct list_head	qe;	/*  queue element	*/
+	bfa_sm_t		sm;	/*  i-t-n im BFA state machine  */
+	struct bfa_s		*bfa;	/*  bfa instance	*/
+	struct bfa_rport_s	*rport;	/*  bfa rport	*/
+	void			*ditn;	/*  driver i-t-n structure	*/
 	struct bfi_mhdr_s	mhdr;	/*  pre-built mhdr	*/
-	u8	msg_no;		/*  itnim/rport firmware handle */
-	u8	reqq;		/*  CQ for requests	*/
+	u8			msg_no;	/*  itnim/rport firmware handle */
+	u8			reqq;	/*  CQ for requests	*/
 	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
 	struct list_head pending_q;	/*  queue of pending IO requests */
 	struct list_head io_q;		/*  queue of active IO requests */
@@ -181,19 +179,19 @@
 	bfa_boolean_t   seq_rec;	/*  SQER supported	*/
 	bfa_boolean_t   is_online;	/*  itnim is ONLINE for IO	*/
 	bfa_boolean_t   iotov_active;	/*  IO TOV timer is active	 */
-	struct bfa_wc_s	wc;	/*  waiting counter	*/
-	struct bfa_timer_s timer;	/*  pending IO TOV		 */
+	struct bfa_wc_s	wc;		/*  waiting counter	*/
+	struct bfa_timer_s timer;	/*  pending IO TOV	 */
 	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
 	struct bfa_fcpim_mod_s *fcpim;	/*  fcpim module	*/
 	struct bfa_itnim_iostats_s	stats;
 	struct bfa_itnim_ioprofile_s  ioprofile;
 };
 
-
 #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
 #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_IOIM_TAG_2_ID(_iotag)	((_iotag) & BFA_IOIM_IOTAG_MASK)
 #define BFA_IOIM_FROM_TAG(_fcpim, _iotag)	\
-	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)])
+	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
 #define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag)	\
 	(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
 
@@ -201,26 +199,26 @@
 	(_bfa->modules.fcpim_mod.io_profile_start_time)
 #define bfa_fcpim_get_io_profile(_bfa)	\
 	(_bfa->modules.fcpim_mod.io_profile)
+#define bfa_ioim_update_iotag(__ioim) do {				\
+	uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;	\
+	k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK;			\
+	(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;		\
+} while (0)
 
 static inline bfa_boolean_t
-bfa_ioim_get_iotag(struct bfa_ioim_s *ioim)
+bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
-	u16 k = ioim->iotag;
-
-	k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
-
-	if (k > BFA_IOIM_RETRY_MAX)
+	uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET;
+	if (k < BFA_IOIM_RETRY_MAX)
 		return BFA_FALSE;
-	ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
-	ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
 	return BFA_TRUE;
 }
+
 /*
  * function prototypes
  */
 void	bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
 					struct bfa_meminfo_s *minfo);
-void	bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
 void	bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_ioim_good_comp_isr(struct bfa_s *bfa,
 					struct bfi_msg_s *msg);
@@ -232,7 +230,6 @@
 
 void	bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
 					struct bfa_meminfo_s *minfo);
-void	bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
 void	bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 void	bfa_tskim_iodone(struct bfa_tskim_s *tskim);
 void	bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
@@ -248,32 +245,14 @@
 void	bfa_itnim_iodone(struct bfa_itnim_s *itnim);
 void	bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
 bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
-void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
-void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
-
 
 /*
  * bfa fcpim module API functions
  */
-void		bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
+void	bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
 u16	bfa_fcpim_path_tov_get(struct bfa_s *bfa);
-void		bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
 u16	bfa_fcpim_qdepth_get(struct bfa_s *bfa);
-bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
-	 struct bfa_itnim_iostats_s *modstats);
-bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
-		struct bfa_itnim_iostats_s *stats, u8 lp_tag);
-bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
-	 struct bfa_fcpim_del_itn_stats_s *modstats);
-bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
-void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
-		struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
-void		bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
-				bfa_boolean_t state);
-void		bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
-bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
+
 #define bfa_fcpim_ioredirect_enabled(__bfa)				\
 	(((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
 
@@ -291,48 +270,33 @@
  * bfa itnim API functions
  */
 struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
-					struct bfa_rport_s *rport, void *itnim);
-void		bfa_itnim_delete(struct bfa_itnim_s *itnim);
-void		bfa_itnim_online(struct bfa_itnim_s *itnim,
-				 bfa_boolean_t seq_rec);
-void		bfa_itnim_offline(struct bfa_itnim_s *itnim);
-void		bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
-			struct bfa_itnim_iostats_s *stats);
-void		bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
-bfa_status_t	bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
-		struct bfa_itnim_ioprofile_s *ioprofile);
+		struct bfa_rport_s *rport, void *itnim);
+void bfa_itnim_delete(struct bfa_itnim_s *itnim);
+void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec);
+void bfa_itnim_offline(struct bfa_itnim_s *itnim);
+void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
+bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+			struct bfa_itnim_ioprofile_s *ioprofile);
+
 #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
 
 /*
- *	BFA completion callback for bfa_itnim_online().
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
+ * BFA completion callback for bfa_itnim_online().
  */
 void	bfa_cb_itnim_online(void *itnim);
 
 /*
- *	BFA completion callback for bfa_itnim_offline().
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
+ * BFA completion callback for bfa_itnim_offline().
  */
 void	bfa_cb_itnim_offline(void *itnim);
 void	bfa_cb_itnim_tov_begin(void *itnim);
 void	bfa_cb_itnim_tov(void *itnim);
 
 /*
- *	BFA notification to FCS/driver for second level error recovery.
- *
+ * BFA notification to FCS/driver for second level error recovery.
  * Atleast one I/O request has timedout and target is unresponsive to
  * repeated abort requests. Second level error recovery should be initiated
  * by starting implicit logout and recovery procedures.
- *
- * @param[in]		itnim		FCS or driver itnim instance
- *
- * return None
  */
 void	bfa_cb_itnim_sler(void *itnim);
 
@@ -349,10 +313,8 @@
 bfa_status_t	bfa_ioim_abort(struct bfa_ioim_s *ioim);
 void		bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
 				      bfa_boolean_t iotov);
-
-
 /*
- *	I/O completion notification.
+ * I/O completion notification.
  *
  * @param[in]		dio			driver IO structure
  * @param[in]		io_status		IO completion status
@@ -363,39 +325,31 @@
  *
  * @return None
  */
-void	bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
-				  enum bfi_ioim_status io_status,
-				  u8 scsi_status, int sns_len,
-				  u8 *sns_info, s32 residue);
+void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
+			enum bfi_ioim_status io_status,
+			u8 scsi_status, int sns_len,
+			u8 *sns_info, s32 residue);
 
 /*
- *	I/O good completion notification.
- *
- * @param[in]		dio			driver IO structure
- *
- * @return None
+ * I/O good completion notification.
  */
-void	bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
+void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
 
 /*
- *	I/O abort completion notification
- *
- * @param[in]		dio			driver IO that was aborted
- *
- * @return None
+ * I/O abort completion notification
  */
-void	bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
+void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
 
 /*
  * bfa tskim API functions
  */
-struct bfa_tskim_s	*bfa_tskim_alloc(struct bfa_s *bfa,
-					struct bfad_tskim_s *dtsk);
-void		bfa_tskim_free(struct bfa_tskim_s *tskim);
-void		bfa_tskim_start(struct bfa_tskim_s *tskim,
-				struct bfa_itnim_s *itnim, lun_t lun,
-				enum fcp_tm_cmnd tm, u8 t_secs);
-void		bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
-				  enum bfi_tskim_status tsk_status);
+struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa,
+			struct bfad_tskim_s *dtsk);
+void bfa_tskim_free(struct bfa_tskim_s *tskim);
+void bfa_tskim_start(struct bfa_tskim_s *tskim,
+			struct bfa_itnim_s *itnim, struct scsi_lun lun,
+			enum fcp_tm_cmnd tm, u8 t_secs);
+void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+			enum bfi_tskim_status tsk_status);
 
 #endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 045d7e8..f674f93 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -19,9 +19,9 @@
  *  bfa_fcs.c BFA FCS main
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, FCS);
 
@@ -76,7 +76,7 @@
 	fcs->bfad = bfad;
 	fcs->min_cfg = min_cfg;
 
-	bfa_attach_fcs(bfa);
+	bfa->fcs = BFA_TRUE;
 	fcbuild_init();
 
 	for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
@@ -110,14 +110,6 @@
 	}
 }
 
-/*
- * Start FCS operations.
- */
-void
-bfa_fcs_start(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_fabric_modstart(fcs);
-}
 
 /*
  *	brief
@@ -140,22 +132,6 @@
 
 /*
  *	brief
- *		FCS FDMI Driver Parameter Initialization
- *
- *	param[in]		fcs		FCS instance
- *	param[in]		fdmi_enable	TRUE/FALSE
- *
- *	return None
- */
-void
-bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
-{
-
-	fcs->fdmi_enabled = fdmi_enable;
-
-}
-/*
- *	brief
  *		FCS instance cleanup and exit.
  *
  *	param[in]		fcs			FCS instance
@@ -184,18 +160,6 @@
 }
 
 
-void
-bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
-{
-	fcs->trcmod = trcmod;
-}
-
-void
-bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
-{
-	bfa_wc_down(&fcs->wc);
-}
-
 /*
  * Fabric module implementation.
  */
@@ -232,31 +196,6 @@
 					 u32 rsp_len,
 					 u32 resid_len,
 					 struct fchs_s *rspfchs);
-/*
- *  fcs_fabric_sm fabric state machine functions
- */
-
-/*
- * Fabric state machine events
- */
-enum bfa_fcs_fabric_event {
-	BFA_FCS_FABRIC_SM_CREATE	= 1,	/*  create from driver	      */
-	BFA_FCS_FABRIC_SM_DELETE	= 2,	/*  delete from driver	      */
-	BFA_FCS_FABRIC_SM_LINK_DOWN	= 3,	/*  link down from port      */
-	BFA_FCS_FABRIC_SM_LINK_UP	= 4,	/*  link up from port	      */
-	BFA_FCS_FABRIC_SM_CONT_OP	= 5,	/*  flogi/auth continue op   */
-	BFA_FCS_FABRIC_SM_RETRY_OP	= 6,	/*  flogi/auth retry op      */
-	BFA_FCS_FABRIC_SM_NO_FABRIC	= 7,	/*  from flogi/auth	      */
-	BFA_FCS_FABRIC_SM_PERF_EVFP	= 8,	/*  from flogi/auth	      */
-	BFA_FCS_FABRIC_SM_ISOLATE	= 9,	/*  from EVFP processing     */
-	BFA_FCS_FABRIC_SM_NO_TAGGING	= 10,	/*  no VFT tagging from EVFP */
-	BFA_FCS_FABRIC_SM_DELAYED	= 11,	/*  timeout delay event      */
-	BFA_FCS_FABRIC_SM_AUTH_FAILED	= 12,	/*  auth failed	      */
-	BFA_FCS_FABRIC_SM_AUTH_SUCCESS	= 13,	/*  auth successful	      */
-	BFA_FCS_FABRIC_SM_DELCOMP	= 14,	/*  all vports deleted event */
-	BFA_FCS_FABRIC_SM_LOOPBACK	= 15,	/*  Received our own FLOGI   */
-	BFA_FCS_FABRIC_SM_START		= 16,	/*  from driver	      */
-};
 
 static void	bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
 					 enum bfa_fcs_fabric_event event);
@@ -270,14 +209,8 @@
 					      enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
 				       enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
-					      enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
-					   enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
 					   enum bfa_fcs_fabric_event event);
-static void	bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
-					 enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
 				       enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
@@ -337,7 +270,7 @@
 
 	case BFA_FCS_FABRIC_SM_DELETE:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-		bfa_fcs_modexit_comp(fabric->fcs);
+		bfa_wc_down(&fabric->fcs->wc);
 		break;
 
 	default:
@@ -410,7 +343,7 @@
 
 	case BFA_FCS_FABRIC_SM_LOOPBACK:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_set_opertype(fabric);
 		break;
 
@@ -424,12 +357,12 @@
 
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_delete(fabric);
 		break;
 
@@ -481,7 +414,7 @@
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -495,7 +428,7 @@
 
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_DELETE:
@@ -511,7 +444,7 @@
 /*
  *   Authentication failed
  */
-static void
+void
 bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
 			      enum bfa_fcs_fabric_event event)
 {
@@ -537,7 +470,7 @@
 /*
  *   Port is in loopback mode.
  */
-static void
+void
 bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
 			   enum bfa_fcs_fabric_event event)
 {
@@ -573,7 +506,7 @@
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_notify_offline(fabric);
 		break;
 
@@ -596,7 +529,7 @@
 /*
  *   Fabric is online - normal operating state.
  */
-static void
+void
 bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
 			 enum bfa_fcs_fabric_event event)
 {
@@ -606,7 +539,7 @@
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_LINK_DOWN:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_fabric_notify_offline(fabric);
 		break;
 
@@ -617,7 +550,7 @@
 
 	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-		bfa_lps_discard(fabric->lps);
+		bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
@@ -697,7 +630,7 @@
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_DELCOMP:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-		bfa_fcs_modexit_comp(fabric->fcs);
+		bfa_wc_down(&fabric->fcs->wc);
 		break;
 
 	case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -724,8 +657,8 @@
 	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
 
 	port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
-	port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
-	port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
+	port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn;
+	port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn;
 }
 
 /*
@@ -813,7 +746,7 @@
 		return;
 
 	case BFA_STATUS_EPROTOCOL:
-		switch (bfa_lps_get_extstatus(fabric->lps)) {
+		switch (fabric->lps->ext_status) {
 		case BFA_EPROTO_BAD_ACCEPT:
 			fabric->stats.flogi_acc_err++;
 			break;
@@ -840,26 +773,26 @@
 		return;
 	}
 
-	fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
+	fabric->bb_credit = fabric->lps->pr_bbcred;
 	bfa_trc(fabric->fcs, fabric->bb_credit);
 
-	if (!bfa_lps_is_brcd_fabric(fabric->lps))
-		fabric->fabric_name =  bfa_lps_get_peer_nwwn(fabric->lps);
+	if (!(fabric->lps->brcd_switch))
+		fabric->fabric_name =  fabric->lps->pr_nwwn;
 
 	/*
 	 * Check port type. It should be 1 = F-port.
 	 */
-	if (bfa_lps_is_fport(fabric->lps)) {
-		fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
-		fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
-		fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
+	if (fabric->lps->fport) {
+		fabric->bport.pid = fabric->lps->lp_pid;
+		fabric->is_npiv = fabric->lps->npiv_en;
+		fabric->is_auth = fabric->lps->auth_req;
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
 	} else {
 		/*
 		 * Nport-2-Nport direct attached
 		 */
 		fabric->bport.port_topo.pn2n.rem_port_wwn =
-			bfa_lps_get_peer_pwwn(fabric->lps);
+			fabric->lps->pr_pwwn;
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
 	}
 
@@ -987,7 +920,7 @@
 	INIT_LIST_HEAD(&fabric->vport_q);
 	INIT_LIST_HEAD(&fabric->vf_q);
 	fabric->lps = bfa_lps_alloc(fcs->bfa);
-	bfa_assert(fabric->lps);
+	WARN_ON(!fabric->lps);
 
 	/*
 	 * Initialize fabric delete completion handler. Fabric deletion is
@@ -1038,31 +971,6 @@
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
 }
 
-/*
- *   Suspend fabric activity as part of driver suspend.
- */
-void
-bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
-{
-}
-
-bfa_boolean_t
-bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
-}
-
-bfa_boolean_t
-bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
-}
-
-enum bfa_port_type
-bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
-{
-	return fabric->oper_type;
-}
 
 /*
  *   Link up notification from BFA physical port module.
@@ -1123,40 +1031,6 @@
 	bfa_wc_down(&fabric->wc);
 }
 
-/*
- *   Base port is deleted.
- */
-void
-bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
-{
-	bfa_wc_down(&fabric->wc);
-}
-
-
-/*
- *    Check if fabric is online.
- *
- *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
- *
- *   @return  TRUE/FALSE
- */
-int
-bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
-{
-	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
-}
-
-/*
- *	brief
- *
- */
-bfa_status_t
-bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
-		     struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
-{
-	bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
-	return BFA_STATUS_OK;
-}
 
 /*
  * Lookup for a vport withing a fabric given its pwwn
@@ -1176,18 +1050,6 @@
 	return NULL;
 }
 
-/*
- *    In a given fabric, return the number of lports.
- *
- *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
- *
- *   @return : 1 or more.
- */
-u16
-bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
-{
-	return fabric->num_vports;
-}
 
 /*
  *  Get OUI of the attached switch.
@@ -1207,7 +1069,7 @@
 	u8 *tmp;
 	u16 oui;
 
-	fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
+	fab_nwwn = fabric->lps->pr_nwwn;
 
 	tmp = (u8 *)&fab_nwwn;
 	oui = (tmp[3] << 8) | tmp[4];
@@ -1235,7 +1097,7 @@
 	 * external loopback cable is in place. Our own FLOGI frames are
 	 * sometimes looped back when switch port gets temporarily bypassed.
 	 */
-	if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) &&
+	if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) &&
 	    (els_cmd->els_code == FC_ELS_FLOGI) &&
 	    (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
 		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
@@ -1245,7 +1107,7 @@
 	/*
 	 * FLOGI/EVFP exchanges should be consumed by base fabric.
 	 */
-	if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
+	if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) {
 		bfa_trc(fabric->fcs, pid);
 		bfa_fcs_fabric_process_uf(fabric, fchs, len);
 		return;
@@ -1358,13 +1220,13 @@
 		return;
 
 	reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-				    bfa_os_hton3b(FC_FABRIC_PORT),
+				    bfa_hton3b(FC_FABRIC_PORT),
 				    n2n_port->reply_oxid, pcfg->pwwn,
 				    pcfg->nwwn,
 				    bfa_fcport_get_maxfrsize(bfa),
 				    bfa_fcport_get_rx_bbcredit(bfa));
 
-	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
+	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag,
 		      BFA_FALSE, FC_CLASS_3,
 		      reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
 		      FC_MAX_PDUSZ, 0);
@@ -1455,7 +1317,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -1502,7 +1364,7 @@
 		 * drop frame if vfid is unknown
 		 */
 		if (!fabric) {
-			bfa_assert(0);
+			WARN_ON(1);
 			bfa_stats(fcs, uf.vfid_unknown);
 			bfa_uf_free(uf);
 			return;
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 9cb6a55..0fd6316 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -27,6 +27,22 @@
 #define BFA_FCS_OS_STR_LEN		64
 
 /*
+ *  lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+	BFA_LPS_SM_LOGIN	= 1,	/* login request from user      */
+	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user     */
+	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout */
+	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue  */
+	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user         */
+	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline              */
+	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link        */
+	BFA_LPS_SM_SET_N2N_PID  = 8,	/* Set assigned PID for n2n */
+};
+
+
+/*
  * !!! Only append to the enums defined here to avoid any versioning
  * !!! needed between trace utility and driver version
  */
@@ -41,13 +57,12 @@
 struct bfa_fcs_s;
 
 #define __fcs_min_cfg(__fcs)       ((__fcs)->min_cfg)
-void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
 
 #define BFA_FCS_BRCD_SWITCH_OUI  0x051e
 #define N2N_LOCAL_PID	    0x010000
 #define N2N_REMOTE_PID		0x020000
 #define	BFA_FCS_RETRY_TIMEOUT 2000
-#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_os_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
+#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
 
 
 
@@ -109,7 +124,7 @@
 
 struct bfa_fcs_lport_n2n_s {
 	u32        rsvd;
-	u16        reply_oxid;	/*  ox_id from the req flogi to be
+	__be16     reply_oxid;	/*  ox_id from the req flogi to be
 					 *used in flogi acc */
 	wwn_t           rem_port_wwn;	/*  Attached port's wwn */
 };
@@ -316,8 +331,6 @@
 				       struct bfa_fcs_rport_s *rport);
 void            bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
 				       struct bfa_fcs_rport_s *rport);
-void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
 void            bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
 void            bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
@@ -359,9 +372,6 @@
 bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
 			    struct bfa_vport_attr_s *vport_attr);
-void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
-			     struct bfa_vport_stats_s *vport_stats);
-void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
 struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
 					     u16 vf_id, wwn_t vpwwn);
 void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
@@ -406,7 +416,7 @@
 	struct bfad_rport_s	*rp_drv;	/*  driver peer instance */
 	u32	pid;	/*  port ID of rport */
 	u16	maxfrsize;	/*  maximum frame size */
-	u16	reply_oxid;	/*  OX_ID of inbound requests */
+	__be16	reply_oxid;	/*  OX_ID of inbound requests */
 	enum fc_cos	fc_cos;	/*  FC classes of service supp */
 	bfa_boolean_t	cisc;	/*  CISC capable device */
 	bfa_boolean_t	prlo;	/*  processing prlo or LOGO */
@@ -437,32 +447,18 @@
 /*
  * bfa fcs rport API functions
  */
-bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
-			       struct bfa_fcs_rport_s *rport,
-			       struct bfad_rport_s *rport_drv);
-bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
-			    struct bfa_rport_attr_s *attr);
-void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
-			     struct bfa_rport_stats_s *stats);
-void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
 					     wwn_t rpwwn);
 struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
 	struct bfa_fcs_lport_s *port, wwn_t rnwwn);
 void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
 
-void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
-			     enum bfa_port_speed speed);
 void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
 	 struct fchs_s *fchs, u16 len);
 void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
 
 struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
 	 u32 pid);
-void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
 			 struct fc_logi_s *plogi_rsp);
 void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
@@ -470,10 +466,8 @@
 				struct fc_logi_s *plogi);
 void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
 			 struct fc_logi_s *plogi);
-void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
-void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
+void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id);
 
-void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
 void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
 int  bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
@@ -618,7 +612,7 @@
 	u8         option_rom_ver[BFA_VERSION_LEN];
 	u8         fw_version[8];
 	u8         os_name[256];
-	u32        max_ct_pyld;
+	__be32        max_ct_pyld;
 };
 
 /*
@@ -626,9 +620,9 @@
  */
 struct bfa_fcs_fdmi_port_attr_s {
 	u8         supp_fc4_types[32];	/* supported FC4 types */
-	u32        supp_speed;	/* supported speed */
-	u32        curr_speed;	/* current Speed */
-	u32        max_frm_size;	/* max frame size */
+	__be32        supp_speed;	/* supported speed */
+	__be32        curr_speed;	/* current Speed */
+	__be32        max_frm_size;	/* max frame size */
 	u8         os_device_name[256];	/* OS device Name */
 	u8         host_name[256];	/* host name */
 };
@@ -664,6 +658,57 @@
 };
 
 /*
+ *  fcs_fabric_sm fabric state machine functions
+ */
+
+/*
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+	BFA_FCS_FABRIC_SM_CREATE        = 1,    /*  create from driver        */
+	BFA_FCS_FABRIC_SM_DELETE        = 2,    /*  delete from driver        */
+	BFA_FCS_FABRIC_SM_LINK_DOWN     = 3,    /*  link down from port      */
+	BFA_FCS_FABRIC_SM_LINK_UP       = 4,    /*  link up from port         */
+	BFA_FCS_FABRIC_SM_CONT_OP       = 5,    /*  flogi/auth continue op   */
+	BFA_FCS_FABRIC_SM_RETRY_OP      = 6,    /*  flogi/auth retry op      */
+	BFA_FCS_FABRIC_SM_NO_FABRIC     = 7,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_PERF_EVFP     = 8,    /*  from flogi/auth           */
+	BFA_FCS_FABRIC_SM_ISOLATE       = 9,    /*  from EVFP processing     */
+	BFA_FCS_FABRIC_SM_NO_TAGGING    = 10,   /*  no VFT tagging from EVFP */
+	BFA_FCS_FABRIC_SM_DELAYED       = 11,   /*  timeout delay event      */
+	BFA_FCS_FABRIC_SM_AUTH_FAILED   = 12,   /*  auth failed       */
+	BFA_FCS_FABRIC_SM_AUTH_SUCCESS  = 13,   /*  auth successful           */
+	BFA_FCS_FABRIC_SM_DELCOMP       = 14,   /*  all vports deleted event */
+	BFA_FCS_FABRIC_SM_LOOPBACK      = 15,   /*  Received our own FLOGI   */
+	BFA_FCS_FABRIC_SM_START         = 16,   /*  from driver       */
+};
+
+/*
+ *  fcs_rport_sm FCS rport state machine events
+ */
+
+enum rport_event {
+	RPSM_EVENT_PLOGI_SEND   = 1,    /*  new rport; start with PLOGI */
+	RPSM_EVENT_PLOGI_RCVD   = 2,    /*  Inbound PLOGI from remote port */
+	RPSM_EVENT_PLOGI_COMP   = 3,    /*  PLOGI completed to rport    */
+	RPSM_EVENT_LOGO_RCVD    = 4,    /*  LOGO from remote device     */
+	RPSM_EVENT_LOGO_IMP     = 5,    /*  implicit logo for SLER      */
+	RPSM_EVENT_FCXP_SENT    = 6,    /*  Frame from has been sent    */
+	RPSM_EVENT_DELETE       = 7,    /*  RPORT delete request        */
+	RPSM_EVENT_SCN          = 8,    /*  state change notification   */
+	RPSM_EVENT_ACCEPTED     = 9,    /*  Good response from remote device */
+	RPSM_EVENT_FAILED       = 10,   /*  Request to rport failed.    */
+	RPSM_EVENT_TIMEOUT      = 11,   /*  Rport SM timeout event      */
+	RPSM_EVENT_HCB_ONLINE  = 12,    /*  BFA rport online callback   */
+	RPSM_EVENT_HCB_OFFLINE = 13,    /*  BFA rport offline callback  */
+	RPSM_EVENT_FC4_OFFLINE = 14,    /*  FC-4 offline complete       */
+	RPSM_EVENT_ADDRESS_CHANGE = 15, /*  Rport's PID has changed     */
+	RPSM_EVENT_ADDRESS_DISC = 16,   /*  Need to Discover rport's PID */
+	RPSM_EVENT_PRLO_RCVD   = 17,    /*  PRLO from remote device     */
+	RPSM_EVENT_PLOGI_RETRY = 18,    /*  Retry PLOGI continously */
+};
+
+/*
  * bfa fcs API functions
  */
 void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
@@ -672,16 +717,12 @@
 void bfa_fcs_init(struct bfa_fcs_s *fcs);
 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 			      struct bfa_fcs_driver_info_s *driver_info);
-void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
 void bfa_fcs_exit(struct bfa_fcs_s *fcs);
-void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
-void		bfa_fcs_start(struct bfa_fcs_s *fcs);
 
 /*
  * bfa fcs vf public functions
  */
 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
-u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 
 /*
  * fabric protected interface functions
@@ -689,32 +730,29 @@
 void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
-void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
 	struct bfa_fcs_vport_s *vport);
 void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
 	struct bfa_fcs_vport_s *vport);
-int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
 struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
 		struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
 void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
 		struct fchs_s *fchs, u16 len);
-bfa_boolean_t	bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
-bfa_boolean_t	bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
-enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
 void	bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
-void	bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
-bfa_status_t	bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
-			struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
-			struct bfad_vf_s *vf_drv);
 void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
 	       wwn_t fabric_name);
 u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
+void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event);
 
 /*
  * BFA FCS callback interfaces
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 413b58e..e7b49f4 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -19,9 +19,9 @@
  *  fcpim.c - FCP initiator mode i-t nexus state machine
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 #include "bfad_im.h"
 
 BFA_TRC_FILE(FCS, FCPIM);
@@ -103,7 +103,7 @@
 		break;
 
 	case BFA_FCS_ITNIM_SM_OFFLINE:
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -140,7 +140,7 @@
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -181,7 +181,7 @@
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_fcxp_discard(itnim->fcxp);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -217,7 +217,7 @@
 		} else {
 			/* invoke target offline */
 			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-			bfa_fcs_rport_logo_imp(itnim->rport);
+			bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
 		}
 		break;
 
@@ -225,7 +225,7 @@
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_timer_stop(&itnim->timer);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_INITIATOR:
@@ -269,7 +269,7 @@
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_itnim_offline(itnim->bfa_itnim);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -330,7 +330,7 @@
 	switch (event) {
 	case BFA_FCS_ITNIM_SM_HCB_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -358,7 +358,7 @@
 	switch (event) {
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
-		bfa_fcs_rport_itnim_ack(itnim->rport);
+		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
 		break;
 
 	case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -536,7 +536,7 @@
 	if (bfa_itnim == NULL) {
 		bfa_trc(port->fcs, rport->pwwn);
 		bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
-		bfa_assert(0);
+		WARN_ON(1);
 		return NULL;
 	}
 
@@ -688,7 +688,7 @@
 
 	itnim->stats.sler++;
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
-	bfa_fcs_rport_logo_imp(itnim->rport);
+	bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
 }
 
 struct bfa_fcs_itnim_s *
@@ -700,7 +700,7 @@
 	if (!rport)
 		return NULL;
 
-	bfa_assert(rport->itnim != NULL);
+	WARN_ON(rport->itnim == NULL);
 	return rport->itnim;
 }
 
@@ -729,7 +729,7 @@
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 
-	bfa_assert(port != NULL);
+	WARN_ON(port == NULL);
 
 	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
 
@@ -746,7 +746,7 @@
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 
-	bfa_assert(port != NULL);
+	WARN_ON(port == NULL);
 
 	itnim = bfa_fcs_itnim_lookup(port, rpwwn);
 
@@ -778,6 +778,6 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 8d65130..43fa986b 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -15,10 +15,10 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 #include "bfa_fc.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, PORT);
 
@@ -159,7 +159,7 @@
 			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
 			list_for_each_safe(qe, qen, &port->rport_q) {
 				rport = (struct bfa_fcs_rport_s *) qe;
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		break;
@@ -197,7 +197,7 @@
 			bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
 			list_for_each_safe(qe, qen, &port->rport_q) {
 				rport = (struct bfa_fcs_rport_s *) qe;
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		break;
@@ -309,6 +309,7 @@
 			return;
 		}
 		port->pid  = rx_fchs->d_id;
+		bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
 	}
 
 	/*
@@ -323,6 +324,7 @@
 			(memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
 			(void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
 			port->pid  = rx_fchs->d_id;
+			bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
 			rport->pid = rx_fchs->s_id;
 		}
 		bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
@@ -349,8 +351,8 @@
 		 * This is a different device with the same pid. Old device
 		 * disappeared. Send implicit LOGO to old device.
 		 */
-		bfa_assert(rport->pwwn != plogi->port_name);
-		bfa_fcs_rport_logo_imp(rport);
+		WARN_ON(rport->pwwn == plogi->port_name);
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 
 		/*
 		 * Inbound PLOGI from a new device (with old PID).
@@ -362,7 +364,7 @@
 	/*
 	 * PLOGI crossing each other.
 	 */
-	bfa_assert(rport->pwwn == WWN_NULL);
+	WARN_ON(rport->pwwn != WWN_NULL);
 	bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
 }
 
@@ -511,7 +513,8 @@
 	__port_action[port->fabric->fab_type].offline(port);
 
 	wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
-	if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
+	if (bfa_sm_cmp_state(port->fabric,
+			bfa_fcs_fabric_sm_online) == BFA_TRUE)
 		BFA_LOG(KERN_ERR, bfad, bfa_log_level,
 		"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
 		lpwwn_buf, "Initiator");
@@ -522,26 +525,26 @@
 
 	list_for_each_safe(qe, qen, &port->rport_q) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		bfa_fcs_rport_offline(rport);
+		bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 	}
 }
 
 static void
 bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
 bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
 bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
 {
-	bfa_assert(0);
+	WARN_ON(1);
 }
 
 static void
@@ -584,33 +587,11 @@
 				port->vport ? port->vport->vport_drv : NULL);
 		bfa_fcs_vport_delete_comp(port->vport);
 	} else {
-		 bfa_fcs_fabric_port_delete_comp(port->fabric);
+		bfa_wc_down(&port->fabric->wc);
 	}
 }
 
 
-
-/*
- *  fcs_lport_api BFA FCS port API
- */
-/*
- *   Module initialization
- */
-void
-bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
-{
-
-}
-
-/*
- *   Module cleanup
- */
-void
-bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
-}
-
 /*
  * Unsolicited frame receive handling.
  */
@@ -623,6 +604,7 @@
 	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
 
 	bfa_stats(lport, uf_recvs);
+	bfa_trc(lport->fcs, fchs->type);
 
 	if (!bfa_fcs_lport_is_online(lport)) {
 		bfa_stats(lport, uf_recv_drops);
@@ -682,8 +664,11 @@
 	 * Only handles ELS frames for now.
 	 */
 	if (fchs->type != FC_TYPE_ELS) {
-		bfa_trc(lport->fcs, fchs->type);
-		bfa_assert(0);
+		bfa_trc(lport->fcs, fchs->s_id);
+		bfa_trc(lport->fcs, fchs->d_id);
+		/* ignore type FC_TYPE_FC_FSS */
+		if (fchs->type != FC_TYPE_FC_FSS)
+			bfa_sm_fault(lport->fcs, fchs->type);
 		return;
 	}
 
@@ -792,7 +777,7 @@
 	struct bfa_fcs_lport_s *port,
 	struct bfa_fcs_rport_s *rport)
 {
-	bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
+	WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
 	list_del(&rport->qe);
 	port->num_rports--;
 
@@ -850,8 +835,8 @@
 	lport->fcs = fcs;
 	lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
 	lport->vport = vport;
-	lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
-				  bfa_lps_get_tag(lport->fabric->lps);
+	lport->lp_tag = (vport) ? vport->lps->lp_tag :
+				  lport->fabric->lps->lp_tag;
 
 	INIT_LIST_HEAD(&lport->rport_q);
 	lport->num_rports = 0;
@@ -903,10 +888,12 @@
 	port_attr->port_cfg = port->port_cfg;
 
 	if (port->fabric) {
-		port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
-		port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
+		port_attr->port_type = port->fabric->oper_type;
+		port_attr->loopback = bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_loopback);
 		port_attr->authfail =
-			bfa_fcs_fabric_is_auth_failed(port->fabric);
+			bfa_sm_cmp_state(port->fabric,
+				bfa_fcs_fabric_sm_auth_failed);
 		port_attr->fabric_name  = bfa_fcs_lport_get_fabric_name(port);
 		memcpy(port_attr->fabric_ip_addr,
 			bfa_fcs_lport_get_fabric_ipaddr(port),
@@ -915,10 +902,10 @@
 		if (port->vport != NULL) {
 			port_attr->port_type = BFA_PORT_TYPE_VPORT;
 			port_attr->fpma_mac =
-				bfa_lps_get_lp_mac(port->vport->lps);
+				port->vport->lps->lp_mac;
 		} else {
 			port_attr->fpma_mac =
-				bfa_lps_get_lp_mac(port->fabric->lps);
+				port->fabric->lps->lp_mac;
 		}
 	} else {
 		port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
@@ -998,6 +985,7 @@
 	    ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
 	     sizeof(wwn_t)) > 0) {
 		port->pid = N2N_LOCAL_PID;
+		bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
 		/*
 		 * First, check if we know the device by pwwn.
 		 */
@@ -1007,7 +995,7 @@
 			bfa_trc(port->fcs, rport->pid);
 			bfa_trc(port->fcs, rport->pwwn);
 			rport->pid = N2N_REMOTE_PID;
-			bfa_fcs_rport_online(rport);
+			bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
 			return;
 		}
 
@@ -1017,10 +1005,10 @@
 		 */
 		if (port->num_rports > 0) {
 			rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
-			bfa_assert(rport != NULL);
+			WARN_ON(rport == NULL);
 			if (rport) {
 				bfa_trc(port->fcs, rport->pwwn);
-				bfa_fcs_rport_delete(rport);
+				bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 			}
 		}
 		bfa_fcs_rport_create(port, N2N_REMOTE_PID);
@@ -1569,6 +1557,7 @@
 	struct fdmi_attr_s *attr;
 	u8        *curr_ptr;
 	u16        len, count;
+	u16	templen;
 
 	/*
 	 * get hba attributes
@@ -1594,69 +1583,69 @@
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
-	attr->len = sizeof(wwn_t);
-	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(wwn_t);
+	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Manufacturer
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
-	attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
-	memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->manufacturer);
+	memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Serial Number
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
-	attr->len = (u16) strlen(fcs_hba_attr->serial_num);
-	memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->serial_num);
+	memcpy(attr->value, fcs_hba_attr->serial_num, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Model
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
-	attr->len = (u16) strlen(fcs_hba_attr->model);
-	memcpy(attr->value, fcs_hba_attr->model, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->model);
+	memcpy(attr->value, fcs_hba_attr->model, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Model Desc
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
-	attr->len = (u16) strlen(fcs_hba_attr->model_desc);
-	memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->model_desc);
+	memcpy(attr->value, fcs_hba_attr->model_desc, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * H/W Version
@@ -1664,14 +1653,14 @@
 	if (fcs_hba_attr->hw_version[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
-		attr->len = (u16) strlen(fcs_hba_attr->hw_version);
-		memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->hw_version);
+		memcpy(attr->value, fcs_hba_attr->hw_version, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					 sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
 	}
 
 	/*
@@ -1679,14 +1668,14 @@
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
-	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
-	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;;
+	templen = (u16) strlen(fcs_hba_attr->driver_version);
+	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Option Rom Version
@@ -1694,14 +1683,14 @@
 	if (fcs_hba_attr->option_rom_ver[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
-		attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
-		memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
+		memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					 sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					 sizeof(templen));
 	}
 
 	/*
@@ -1709,14 +1698,14 @@
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
-	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
-	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = (u16) strlen(fcs_hba_attr->driver_version);
+	memcpy(attr->value, fcs_hba_attr->driver_version, templen);
+	templen = fc_roundup(templen, sizeof(u32));
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * OS Name
@@ -1724,14 +1713,14 @@
 	if (fcs_hba_attr->os_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
-		attr->len = (u16) strlen(fcs_hba_attr->os_name);
-		memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_hba_attr->os_name);
+		memcpy(attr->value, fcs_hba_attr->os_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		count++;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
 	}
 
 	/*
@@ -1739,12 +1728,12 @@
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
-	attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
-	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
-	len += attr->len;
+	templen = sizeof(fcs_hba_attr->max_ct_pyld);
+	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
+	len += templen;
 	count++;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Update size of payload
@@ -1845,6 +1834,7 @@
 	u8        *curr_ptr;
 	u16        len;
 	u8	count = 0;
+	u16	templen;
 
 	/*
 	 * get port attributes
@@ -1863,54 +1853,54 @@
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
-	attr->len = sizeof(fcs_port_attr.supp_fc4_types);
-	memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.supp_fc4_types);
+	memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
 	attr->len =
-		cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * Supported Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
-	attr->len = sizeof(fcs_port_attr.supp_speed);
-	memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.supp_speed);
+	memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
 	attr->len =
-		cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+		cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * current Port Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
-	attr->len = sizeof(fcs_port_attr.curr_speed);
-	memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.curr_speed);
+	memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * max frame size
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
 	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
-	attr->len = sizeof(fcs_port_attr.max_frm_size);
-	memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
-	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-	len += attr->len;
+	templen = sizeof(fcs_port_attr.max_frm_size);
+	memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
+	curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+	len += templen;
 	++count;
-	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-			     sizeof(attr->len));
+	attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+			     sizeof(templen));
 
 	/*
 	 * OS Device Name
@@ -1918,14 +1908,14 @@
 	if (fcs_port_attr.os_device_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
-		attr->len = (u16) strlen(fcs_port_attr.os_device_name);
-		memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_port_attr.os_device_name);
+		memcpy(attr->value, fcs_port_attr.os_device_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		++count;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-					sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+					sizeof(templen));
 	}
 	/*
 	 * Host Name
@@ -1933,14 +1923,14 @@
 	if (fcs_port_attr.host_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
 		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
-		attr->len = (u16) strlen(fcs_port_attr.host_name);
-		memcpy(attr->value, fcs_port_attr.host_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));
-		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
-		len += attr->len;
+		templen = (u16) strlen(fcs_port_attr.host_name);
+		memcpy(attr->value, fcs_port_attr.host_name, templen);
+		templen = fc_roundup(templen, sizeof(u32));
+		curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
+		len += templen;
 		++count;
-		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
-				sizeof(attr->len));
+		attr->len = cpu_to_be16(templen + sizeof(attr->type) +
+				sizeof(templen));
 	}
 
 	/*
@@ -2103,7 +2093,7 @@
 	bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
 }
 
-void
+static void
 bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 			 struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
 {
@@ -2147,7 +2137,7 @@
 	hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
 }
 
-void
+static void
 bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 			  struct bfa_fcs_fdmi_port_attr_s *port_attr)
 {
@@ -2560,7 +2550,7 @@
 
 	len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 			     bfa_fcs_lport_get_fcid(port),
-				 bfa_lps_get_peer_nwwn(port->fabric->lps));
+				 port->fabric->lps->pr_nwwn);
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len, &fchs,
@@ -2760,7 +2750,7 @@
 
 	len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 			     bfa_fcs_lport_get_fcid(port),
-				 bfa_lps_get_peer_nwwn(port->fabric->lps));
+				 port->fabric->lps->pr_nwwn);
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len, &fchs,
@@ -2836,7 +2826,7 @@
 	ms->fcxp = fcxp;
 
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-			     bfa_os_hton3b(FC_MGMT_SERVER),
+			     bfa_hton3b(FC_MGMT_SERVER),
 			     bfa_fcs_lport_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
 				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -3593,7 +3583,7 @@
 	ns->fcxp = fcxp;
 
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-			     bfa_os_hton3b(FC_NAME_SERVER),
+			     bfa_hton3b(FC_NAME_SERVER),
 			     bfa_fcs_lport_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
 				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@ -4150,7 +4140,7 @@
 	bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
 }
 
-void
+static void
 bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
 {
 
@@ -4163,7 +4153,7 @@
 
 	for (ii = 0 ; ii < nwwns; ++ii) {
 		rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
-		bfa_assert(rport);
+		WARN_ON(!rport);
 	}
 }
 
@@ -4352,8 +4342,8 @@
 	/* Handle VU registrations for Base port only */
 	if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
 		len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
-				bfa_lps_is_brcd_fabric(port->fabric->lps),
-							port->pid, 0);
+				port->fabric->lps->brcd_switch,
+				port->pid, 0);
 	} else {
 	    len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 				    BFA_FALSE,
@@ -4626,7 +4616,7 @@
 
 
 		default:
-			bfa_assert(0);
+			WARN_ON(1);
 			nsquery = BFA_TRUE;
 		}
 	}
@@ -4672,7 +4662,7 @@
 
 	while ((qe != qh) && (i < nrports)) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
 			qe = bfa_q_next(qe);
 			bfa_trc(fcs, (u32) rport->pwwn);
 			bfa_trc(fcs, rport->pid);
@@ -4720,7 +4710,7 @@
 
 	while ((qe != qh) && (i < *nrports)) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
+		if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
 			qe = bfa_q_next(qe);
 			bfa_trc(fcs, (u32) rport->pwwn);
 			bfa_trc(fcs, rport->pid);
@@ -4771,7 +4761,7 @@
 
 	while (qe != qh) {
 		rport = (struct bfa_fcs_rport_s *) qe;
-		if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
+		if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
 			(bfa_fcs_rport_get_state(rport) ==
 			  BFA_RPORT_OFFLINE)) {
 			qe = bfa_q_next(qe);
@@ -4807,7 +4797,7 @@
 	struct bfa_fcs_vport_s *vport;
 	bfa_fcs_vf_t   *vf;
 
-	bfa_assert(fcs != NULL);
+	WARN_ON(fcs == NULL);
 
 	vf = bfa_fcs_vf_lookup(fcs, vf_id);
 	if (vf == NULL) {
@@ -4853,7 +4843,7 @@
 		port_info->max_vports_supp =
 			bfa_lps_get_max_vport(port->fcs->bfa);
 		port_info->num_vports_inuse =
-			bfa_fcs_fabric_vport_count(port->fabric);
+			port->fabric->num_vports;
 		port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
 		port_info->num_rports_inuse = port->num_rports;
 	} else {
@@ -4997,7 +4987,8 @@
 
 	switch (event) {
 	case BFA_FCS_VPORT_SM_START:
-		if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
+		if (bfa_sm_cmp_state(__vport_fabric(vport),
+					bfa_fcs_fabric_sm_online)
 		    && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
 			bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
 			bfa_fcs_vport_do_fdisc(vport);
@@ -5080,13 +5071,13 @@
 	switch (event) {
 	case BFA_FCS_VPORT_SM_DELETE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_lport_delete(&vport->lport);
 		break;
 
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		break;
 
 	case BFA_FCS_VPORT_SM_RSP_OK:
@@ -5166,7 +5157,7 @@
 
 	case BFA_FCS_VPORT_SM_OFFLINE:
 		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		bfa_fcs_lport_offline(&vport->lport);
 		break;
 
@@ -5266,7 +5257,7 @@
 
 	switch (event) {
 	case BFA_FCS_VPORT_SM_OFFLINE:
-		bfa_lps_discard(vport->lps);
+		bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
 		/*
 		 * !!! fall through !!!
 		 */
@@ -5305,14 +5296,14 @@
 static void
 bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
 {
-	u8		lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
-	u8		lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
+	u8		lsrjt_rsn = vport->lps->lsrjt_rsn;
+	u8		lsrjt_expl = vport->lps->lsrjt_expl;
 
 	bfa_trc(__vport_fcs(vport), lsrjt_rsn);
 	bfa_trc(__vport_fcs(vport), lsrjt_expl);
 
 	/* For certain reason codes, we don't want to retry. */
-	switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
+	switch (vport->lps->lsrjt_expl) {
 	case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
 	case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
 		if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
@@ -5476,7 +5467,7 @@
 	if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
 		return BFA_STATUS_VPORT_EXISTS;
 
-	if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
+	if (fcs->fabric.num_vports ==
 			bfa_lps_get_max_vport(fcs->bfa))
 		return BFA_STATUS_VPORT_MAX;
 
@@ -5618,33 +5609,6 @@
 	attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
 }
 
-/*
- *	Use this function to get vport's statistics.
- *
- *	param[in]	vport	pointer to bfa_fcs_vport_t.
- *	param[out]	stats	pointer to return vport statistics in
- *
- *	return None
- */
-void
-bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
-			struct bfa_vport_stats_s *stats)
-{
-	*stats = vport->vport_stats;
-}
-
-/*
- *	Use this function to clear vport's statistics.
- *
- *	param[in]	vport	pointer to bfa_fcs_vport_t.
- *
- *	return None
- */
-void
-bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
-{
-	memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
-}
 
 /*
  *	Lookup a virtual port. Excludes base port from lookup.
@@ -5682,9 +5646,9 @@
 	switch (status) {
 	case BFA_STATUS_OK:
 		/*
-		 * Initialiaze the V-Port fields
+		 * Initialize the V-Port fields
 		 */
-		__vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
+		__vport_fcid(vport) = vport->lps->lp_pid;
 		vport->vport_stats.fdisc_accepts++;
 		bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
 		break;
@@ -5697,7 +5661,7 @@
 		break;
 
 	case BFA_STATUS_EPROTOCOL:
-		switch (bfa_lps_get_extstatus(vport->lps)) {
+		switch (vport->lps->ext_status) {
 		case BFA_EPROTO_BAD_ACCEPT:
 			vport->vport_stats.fdisc_acc_bad++;
 			break;
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index cf4a6e7..caaee6f 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -19,9 +19,9 @@
  *  rport.c Remote port implementation.
  */
 
+#include "bfad_drv.h"
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(FCS, RPORT);
 
@@ -75,30 +75,6 @@
 static void	bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
 				struct fchs_s *rx_fchs, u16 len);
 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
-/*
- *  fcs_rport_sm FCS rport state machine events
- */
-
-enum rport_event {
-	RPSM_EVENT_PLOGI_SEND	= 1,	/*  new rport; start with PLOGI */
-	RPSM_EVENT_PLOGI_RCVD	= 2,	/*  Inbound PLOGI from remote port */
-	RPSM_EVENT_PLOGI_COMP	= 3,	/*  PLOGI completed to rport	*/
-	RPSM_EVENT_LOGO_RCVD	= 4,	/*  LOGO from remote device	*/
-	RPSM_EVENT_LOGO_IMP	= 5,	/*  implicit logo for SLER	*/
-	RPSM_EVENT_FCXP_SENT	= 6,	/*  Frame from has been sent	*/
-	RPSM_EVENT_DELETE	= 7,	/*  RPORT delete request	*/
-	RPSM_EVENT_SCN		= 8,	/*  state change notification	*/
-	RPSM_EVENT_ACCEPTED	= 9,	/*  Good response from remote device */
-	RPSM_EVENT_FAILED	= 10,	/*  Request to rport failed.	*/
-	RPSM_EVENT_TIMEOUT	= 11,	/*  Rport SM timeout event	*/
-	RPSM_EVENT_HCB_ONLINE  = 12,	/*  BFA rport online callback	*/
-	RPSM_EVENT_HCB_OFFLINE = 13,	/*  BFA rport offline callback	*/
-	RPSM_EVENT_FC4_OFFLINE = 14,	/*  FC-4 offline complete	*/
-	RPSM_EVENT_ADDRESS_CHANGE = 15,	/*  Rport's PID has changed	*/
-	RPSM_EVENT_ADDRESS_DISC = 16,	/*  Need to Discover rport's PID */
-	RPSM_EVENT_PRLO_RCVD   = 17,	/*  PRLO from remote device	*/
-	RPSM_EVENT_PLOGI_RETRY = 18,	/*  Retry PLOGI continously */
-};
 
 static void	bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
 					enum rport_event event);
@@ -498,24 +474,24 @@
 
 	case RPSM_EVENT_LOGO_RCVD:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_LOGO_IMP:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_PLOGI_RCVD:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		bfa_fcs_rport_send_plogiacc(rport, NULL);
 		break;
 
 	case RPSM_EVENT_DELETE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_SCN:
@@ -824,7 +800,7 @@
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_DELETE:
@@ -856,7 +832,7 @@
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	default:
@@ -878,7 +854,7 @@
 	switch (event) {
 	case RPSM_EVENT_FC4_OFFLINE:
 		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
-		bfa_rport_offline(rport->bfa_rport);
+		bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
 		break;
 
 	case RPSM_EVENT_SCN:
@@ -1459,7 +1435,7 @@
 			twin->stats.plogi_rcvd	  += rport->stats.plogi_rcvd;
 			twin->stats.plogi_accs++;
 
-			bfa_fcs_rport_delete(rport);
+			bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 
 			bfa_fcs_rport_update(twin, plogi_rsp);
 			twin->pid = rsp_fchs->s_id;
@@ -1992,13 +1968,14 @@
 	/*
 	 * allocate FC-4s
 	 */
-	bfa_assert(bfa_fcs_lport_is_initiator(port));
+	WARN_ON(!bfa_fcs_lport_is_initiator(port));
 
 	if (bfa_fcs_lport_is_initiator(port)) {
 		rport->itnim = bfa_fcs_itnim_create(rport);
 		if (!rport->itnim) {
 			bfa_trc(fcs, rpid);
-			bfa_rport_delete(rport->bfa_rport);
+			bfa_sm_send_event(rport->bfa_rport,
+						BFA_RPORT_SM_DELETE);
 			kfree(rport_drv);
 			return NULL;
 		}
@@ -2032,7 +2009,7 @@
 			bfa_fcs_rpf_rport_offline(rport);
 	}
 
-	bfa_rport_delete(rport->bfa_rport);
+	bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
 	bfa_fcs_lport_del_rport(port, rport);
 	kfree(rport->rp_drv);
 }
@@ -2307,40 +2284,8 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
 }
 
-/*
- * Called by bport/vport to delete a remote port instance.
- *
- * Rport delete is called under the following conditions:
- *		- vport is deleted
- *		- vf is deleted
- *		- explicit request from OS to delete rport
- */
-void
-bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
-}
 
 /*
- * Called by bport/vport to  when a target goes offline.
- *
- */
-void
-bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
-}
-
-/*
- * Called by bport in n2n when a target (attached port) becomes online.
- *
- */
-void
-bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
-}
-/*
  *	Called by bport/vport to notify SCN for the remote port
  */
 void
@@ -2350,23 +2295,6 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_SCN);
 }
 
-/*
- *	Called by	fcpim to notify that the ITN cleanup is done.
- */
-void
-bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
-}
-
-/*
- *	Called by fcptm to notify that the ITN cleanup is done.
- */
-void
-bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
-}
 
 /*
  *	brief
@@ -2465,15 +2393,6 @@
  *		Called to process any unsolicted frames from this remote port
  */
 void
-bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
-{
-	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
-}
-
-/*
- *		Called to process any unsolicted frames from this remote port
- */
-void
 bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
 			struct fchs_s *fchs, u16 len)
 {
@@ -2586,6 +2505,7 @@
 	return bfa_sm_to_state(rport_sm_table, rport->sm);
 }
 
+
 /*
  *	brief
  *		 Called by the Driver to set rport delete/ageout timeout
@@ -2602,7 +2522,7 @@
 		bfa_fcs_rport_del_timeout = rport_tmo * 1000;
 }
 void
-bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
+bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
 {
 	bfa_trc(rport->fcs, rport->pid);
 
@@ -2621,106 +2541,6 @@
  *  fcs_rport_api FCS rport API.
  */
 
-/*
- *	Direct API to add a target by port wwn. This interface is used, for
- *	example, by bios when target pwwn is known from boot lun configuration.
- */
-bfa_status_t
-bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
-		struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv)
-{
-	bfa_trc(port->fcs, *pwwn);
-
-	return BFA_STATUS_OK;
-}
-
-/*
- *	Direct API to remove a target and its associated resources. This
- *	interface is used, for example, by driver to remove target
- *	ports from the target list for a VM.
- */
-bfa_status_t
-bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
-{
-
-	struct bfa_fcs_rport_s *rport;
-
-	bfa_trc(rport_in->fcs, rport_in->pwwn);
-
-	rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn);
-	if (rport == NULL) {
-		/*
-		 * TBD Error handling
-		 */
-		bfa_trc(rport_in->fcs, rport_in->pid);
-		return BFA_STATUS_UNKNOWN_RWWN;
-	}
-
-	/*
-	 * TBD if this remote port is online, send a logo
-	 */
-	return BFA_STATUS_OK;
-
-}
-
-/*
- *	Remote device status for display/debug.
- */
-void
-bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
-			struct bfa_rport_attr_s *rport_attr)
-{
-	struct bfa_rport_qos_attr_s qos_attr;
-	bfa_fcs_lport_t *port = rport->port;
-	bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
-
-	memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
-
-	rport_attr->pid = rport->pid;
-	rport_attr->pwwn = rport->pwwn;
-	rport_attr->nwwn = rport->nwwn;
-	rport_attr->cos_supported = rport->fc_cos;
-	rport_attr->df_sz = rport->maxfrsize;
-	rport_attr->state = bfa_fcs_rport_get_state(rport);
-	rport_attr->fc_cos = rport->fc_cos;
-	rport_attr->cisc = rport->cisc;
-	rport_attr->scsi_function = rport->scsi_function;
-	rport_attr->curr_speed  = rport->rpf.rpsc_speed;
-	rport_attr->assigned_speed  = rport->rpf.assigned_speed;
-
-	bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr);
-	rport_attr->qos_attr = qos_attr;
-
-	rport_attr->trl_enforced = BFA_FALSE;
-	if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
-		if (rport_speed == BFA_PORT_SPEED_UNKNOWN) {
-			/* Use default ratelim speed setting */
-			rport_speed =
-				bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
-		}
-
-		if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
-			rport_attr->trl_enforced = BFA_TRUE;
-	}
-}
-
-/*
- *	Per remote device statistics.
- */
-void
-bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
-			struct bfa_rport_stats_s *stats)
-{
-	*stats = rport->stats;
-}
-
-void
-bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
-{
-	memset((char *)&rport->stats, 0,
-			sizeof(struct bfa_rport_stats_s));
-}
-
 struct bfa_fcs_rport_s *
 bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 {
@@ -2752,22 +2572,6 @@
 }
 
 /*
- * This API is to set the Rport's speed. Should be used when RPSC is not
- * supported by the rport.
- */
-void
-bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
-{
-	rport->rpf.assigned_speed  = speed;
-
-	/* Set this speed in f/w only if the RPSC speed is not available */
-	if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
-		bfa_rport_speed(rport->bfa_rport, speed);
-}
-
-
-
-/*
  * Remote port features (RPF) implementation.
  */
 
@@ -2827,7 +2631,7 @@
 	case RPFSM_EVENT_RPORT_ONLINE:
 		/* Send RPSC2 to a Brocade fabric only. */
 		if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
-			((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) ||
+			((rport->port->fabric->lps->brcd_switch) ||
 			(bfa_fcs_fabric_get_switch_oui(fabric) ==
 						BFA_FCS_BRCD_SWITCH_OUI))) {
 			bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
@@ -3093,7 +2897,7 @@
 		num_ents = be16_to_cpu(rpsc2_acc->num_pids);
 		bfa_trc(rport->fcs, num_ents);
 		if (num_ents > 0) {
-			bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
+			WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid);
 			bfa_trc(rport->fcs,
 				be16_to_cpu(rpsc2_acc->port_info[0].pid));
 			bfa_trc(rport->fcs,
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index d8464ae..977e681 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_cbreg.h"
 
@@ -110,7 +111,7 @@
 {
 	int i;
 
-	bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS));
+	WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
 
 	bfa->msix.nvecs = nvecs;
 	if (nvecs == 1) {
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index b0efbc7..21018d9 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_modules.h"
 #include "bfi_ctreg.h"
 
@@ -116,7 +117,7 @@
 void
 bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
 {
-	bfa_assert((nvecs == 1) || (nvecs == BFA_MSIX_CT_MAX));
+	WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX));
 	bfa_trc(bfa, nvecs);
 
 	bfa->msix.nvecs = nvecs;
@@ -143,7 +144,7 @@
 	for (; i <= BFA_MSIX_RME_Q3; i++)
 		bfa->msix.handler[i] = bfa_msix_rspq;
 
-	bfa_assert(i == BFA_MSIX_LPU_ERR);
+	WARN_ON(i != BFA_MSIX_LPU_ERR);
 	bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
 }
 
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 9f4aa39..c1f72c4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -15,11 +15,11 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_ioc.h"
 #include "bfi_ctreg.h"
 #include "bfa_defs.h"
 #include "bfa_defs_svc.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(CNA, IOC);
 
@@ -29,7 +29,7 @@
 #define BFA_IOC_TOV		3000	/* msecs */
 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
 #define BFA_IOC_HB_TOV		500	/* msecs */
-#define BFA_IOC_HWINIT_MAX	2
+#define BFA_IOC_HWINIT_MAX	5
 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
 
 #define bfa_ioc_timer_start(__ioc)					\
@@ -42,11 +42,6 @@
 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
 
-#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
-#define BFA_DBG_FWTRC_LEN					\
-	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
-	 (sizeof(struct bfa_trc_mod_s) -			\
-	  BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
 
 /*
@@ -59,17 +54,16 @@
 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
-#define bfa_ioc_notify_hbfail(__ioc)			\
-			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
-
-#ifdef BFA_IOC_IS_UEFI
-#define bfa_ioc_is_bios_optrom(__ioc) (0)
-#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
-#else
-#define bfa_ioc_is_bios_optrom(__ioc)	\
-	(bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
-#define bfa_ioc_is_uefi(__ioc) (0)
-#endif
+#define bfa_ioc_notify_fail(__ioc)              \
+			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_join(__ioc)                \
+			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
+#define bfa_ioc_sync_leave(__ioc)               \
+			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
+#define bfa_ioc_sync_ack(__ioc)                 \
+			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
+#define bfa_ioc_sync_complete(__ioc)            \
+			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
 
 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -81,29 +75,22 @@
  * forward declarations
  */
 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
-static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
 static void bfa_ioc_timeout(void *ioc);
 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
-static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
-static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
-static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
-static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
-static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
+static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
+static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 
-/*
- *  hal_ioc_sm
- */
 
 /*
  * IOC state machine definitions/declarations
@@ -116,10 +103,11 @@
 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
-	IOC_E_FAILED		= 8,	/*  failure notice by iocpf sm	*/
-	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
-	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
-	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
+	IOC_E_INITFAILED	= 8,	/*  failure notice by iocpf sm	*/
+	IOC_E_PFFAILED		= 9,	/*  failure notice by iocpf sm	*/
+	IOC_E_HBFAIL		= 10,	/*  heartbeat failure		*/
+	IOC_E_HWERROR		= 11,	/*  hardware error interrupt	*/
+	IOC_E_TIMEOUT		= 12,	/*  timeout			*/
 };
 
 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -127,7 +115,7 @@
 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
-bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
@@ -138,7 +126,7 @@
 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
-	{BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
+	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
@@ -165,12 +153,6 @@
 /*
  * Forward declareations for iocpf state machine
  */
-static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
-static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
-static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
-static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
-static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
-static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
 static void bfa_iocpf_timeout(void *ioc_arg);
 static void bfa_iocpf_sem_timeout(void *ioc_arg);
 
@@ -213,9 +195,14 @@
 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
+						enum iocpf_event);
 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
+bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
+						enum iocpf_event);
 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
 
 static struct bfa_sm_table_s iocpf_sm_table[] = {
@@ -226,9 +213,12 @@
 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
+	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
+	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
+	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 };
 
@@ -301,7 +291,7 @@
 static void
 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 {
-	bfa_iocpf_enable(ioc);
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
 }
 
 /*
@@ -318,13 +308,13 @@
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 		break;
 
-	case IOC_E_FAILED:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
-		break;
-
+	case IOC_E_PFFAILED:
+		/* !!! fall through !!! */
 	case IOC_E_HWERROR:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
-		bfa_iocpf_initfail(ioc);
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 		break;
 
 	case IOC_E_DISABLE:
@@ -333,7 +323,7 @@
 
 	case IOC_E_DETACH:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
-		bfa_iocpf_stop(ioc);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 		break;
 
 	case IOC_E_ENABLE:
@@ -367,18 +357,16 @@
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 		break;
 
-	case IOC_E_FAILED:
-		bfa_ioc_timer_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
 		break;
-
+	case IOC_E_PFFAILED:
 	case IOC_E_HWERROR:
 		bfa_ioc_timer_stop(ioc);
-		/* fall through */
-
+		/* !!! fall through !!! */
 	case IOC_E_TIMEOUT:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
-		bfa_iocpf_getattrfail(ioc);
+		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
 		break;
 
 	case IOC_E_DISABLE:
@@ -415,22 +403,24 @@
 		break;
 
 	case IOC_E_DISABLE:
-		bfa_ioc_hb_stop(ioc);
+		bfa_hb_timer_stop(ioc);
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
-	case IOC_E_FAILED:
-		bfa_ioc_hb_stop(ioc);
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
-		break;
-
+	case IOC_E_PFFAILED:
 	case IOC_E_HWERROR:
-		bfa_ioc_hb_stop(ioc);
+		bfa_hb_timer_stop(ioc);
 		/* !!! fall through !!! */
-
 	case IOC_E_HBFAIL:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
-		bfa_iocpf_fail(ioc);
+		bfa_ioc_fail_notify(ioc);
+
+		if (ioc->iocpf.auto_recover)
+			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+		else
+			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 		break;
 
 	default:
@@ -443,7 +433,7 @@
 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 {
 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
-	bfa_iocpf_disable(ioc);
+	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
 }
 
@@ -466,7 +456,7 @@
 		 * after iocpf sm completes failure processing and
 		 * moves to disabled state.
 		 */
-		bfa_iocpf_fail(ioc);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 		break;
 
 	default:
@@ -499,7 +489,7 @@
 
 	case IOC_E_DETACH:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
-		bfa_iocpf_stop(ioc);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 		break;
 
 	default:
@@ -509,16 +499,16 @@
 
 
 static void
-bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
+bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
 {
-	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+	bfa_trc(ioc, 0);
 }
 
 /*
- * Hardware initialization failed.
+ * Hardware initialization retry.
  */
 static void
-bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
 {
 	bfa_trc(ioc, event);
 
@@ -527,11 +517,21 @@
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 		break;
 
-	case IOC_E_FAILED:
+	case IOC_E_PFFAILED:
+	case IOC_E_HWERROR:
 		/*
-		 * Initialization failure during iocpf init retry.
+		 * Initialization retry failed.
 		 */
 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+		if (event != IOC_E_PFFAILED)
+			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
+		break;
+
+	case IOC_E_INITFAILED:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+		break;
+
+	case IOC_E_ENABLE:
 		break;
 
 	case IOC_E_DISABLE:
@@ -540,7 +540,7 @@
 
 	case IOC_E_DETACH:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
-		bfa_iocpf_stop(ioc);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 		break;
 
 	default:
@@ -552,21 +552,7 @@
 static void
 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
 {
-	struct list_head			*qe;
-	struct bfa_ioc_hbfail_notify_s	*notify;
-	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
-
-	/*
-	 * Notify driver and common modules registered for notification.
-	 */
-	ioc->cbfn->hbfail_cbfn(ioc->bfa);
-	list_for_each(qe, &ioc->hb_notify_q) {
-		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
-		notify->cbfn(notify->cbarg);
-	}
-
-	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
-		"Heart Beat of IOC has failed\n");
+	bfa_trc(ioc, 0);
 }
 
 /*
@@ -579,23 +565,19 @@
 
 	switch (event) {
 
-	case IOC_E_FAILED:
-		/*
-		 * Initialization failure during iocpf recovery.
-		 * !!! Fall through !!!
-		 */
 	case IOC_E_ENABLE:
 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 		break;
 
-	case IOC_E_ENABLED:
-		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
-		break;
-
 	case IOC_E_DISABLE:
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 		break;
 
+	case IOC_E_DETACH:
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
+		break;
+
 	case IOC_E_HWERROR:
 		/*
 		 * HB failure notification, ignore.
@@ -606,13 +588,10 @@
 	}
 }
 
-
-
 /*
  * IOCPF State Machine
  */
 
-
 /*
  * Reset entry actions -- initialize state machine
  */
@@ -668,22 +647,29 @@
 	switch (event) {
 	case IOCPF_E_SEMLOCKED:
 		if (bfa_ioc_firmware_lock(ioc)) {
-			iocpf->retry_count = 0;
-			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			if (bfa_ioc_sync_complete(ioc)) {
+				iocpf->retry_count = 0;
+				bfa_ioc_sync_join(ioc);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			} else {
+				bfa_ioc_firmware_unlock(ioc);
+				writel(1, ioc->ioc_regs.ioc_sem_reg);
+				bfa_sem_timer_start(ioc);
+			}
 		} else {
-			bfa_ioc_hw_sem_release(ioc);
+			writel(1, ioc->ioc_regs.ioc_sem_reg);
 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
 		}
 		break;
 
 	case IOCPF_E_DISABLE:
-		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_sem_timer_stop(ioc);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
-		bfa_ioc_pf_disabled(ioc);
+		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 		break;
 
 	case IOCPF_E_STOP:
-		bfa_ioc_hw_sem_get_cancel(ioc);
+		bfa_sem_timer_stop(ioc);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 		break;
 
@@ -726,7 +712,7 @@
 	case IOCPF_E_DISABLE:
 		bfa_iocpf_timer_stop(ioc);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
-		bfa_ioc_pf_disabled(ioc);
+		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 		break;
 
 	case IOCPF_E_STOP:
@@ -760,13 +746,18 @@
 
 	switch (event) {
 	case IOCPF_E_SEMLOCKED:
-		iocpf->retry_count = 0;
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+		if (bfa_ioc_sync_complete(ioc)) {
+			bfa_ioc_sync_join(ioc);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+		} else {
+			writel(1, ioc->ioc_regs.ioc_sem_reg);
+			bfa_sem_timer_start(ioc);
+		}
 		break;
 
 	case IOCPF_E_DISABLE:
-		bfa_ioc_hw_sem_get_cancel(ioc);
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 		break;
 
 	default:
@@ -774,12 +765,11 @@
 	}
 }
 
-
 static void
 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 {
 	bfa_iocpf_timer_start(iocpf->ioc);
-	bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
+	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 }
 
 /*
@@ -806,23 +796,16 @@
 		 */
 
 	case IOCPF_E_TIMEOUT:
-		iocpf->retry_count++;
-		if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
-			bfa_iocpf_timer_start(ioc);
-			bfa_ioc_reset(ioc, BFA_TRUE);
-			break;
-		}
-
-		bfa_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
-
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
 		if (event == IOCPF_E_TIMEOUT)
-			bfa_ioc_pf_failed(ioc);
+			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 		break;
 
 	case IOCPF_E_DISABLE:
-		bfa_ioc_hw_sem_release(ioc);
 		bfa_iocpf_timer_stop(ioc);
+		bfa_ioc_sync_leave(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 		break;
 
@@ -831,7 +814,6 @@
 	}
 }
 
-
 static void
 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 {
@@ -853,7 +835,7 @@
 	switch (event) {
 	case IOCPF_E_FWRSP_ENABLE:
 		bfa_iocpf_timer_stop(ioc);
-		bfa_ioc_hw_sem_release(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
 		break;
 
@@ -864,23 +846,15 @@
 		 */
 
 	case IOCPF_E_TIMEOUT:
-		iocpf->retry_count++;
-		if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
-			writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
-			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
-			break;
-		}
-
-		bfa_ioc_hw_sem_release(ioc);
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
-
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
 		if (event == IOCPF_E_TIMEOUT)
-			bfa_ioc_pf_failed(ioc);
+			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 		break;
 
 	case IOCPF_E_DISABLE:
 		bfa_iocpf_timer_stop(ioc);
-		bfa_ioc_hw_sem_release(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 		break;
 
@@ -893,12 +867,10 @@
 	}
 }
 
-
-
 static void
 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
 {
-	bfa_ioc_pf_enabled(iocpf->ioc);
+	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
 }
 
 static void
@@ -914,20 +886,21 @@
 		break;
 
 	case IOCPF_E_GETATTRFAIL:
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 		break;
 
 	case IOCPF_E_FAIL:
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
 		break;
 
 	case IOCPF_E_FWREADY:
-		if (bfa_ioc_is_operational(ioc))
-			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
-		else
-			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
-
-		bfa_ioc_pf_failed(ioc);
+		if (bfa_ioc_is_operational(ioc)) {
+			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
+		} else {
+			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
+		}
 		break;
 
 	default:
@@ -935,7 +908,6 @@
 	}
 }
 
-
 static void
 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
 {
@@ -957,7 +929,7 @@
 	case IOCPF_E_FWRSP_DISABLE:
 	case IOCPF_E_FWREADY:
 		bfa_iocpf_timer_stop(ioc);
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 		break;
 
 	case IOCPF_E_FAIL:
@@ -968,7 +940,7 @@
 
 	case IOCPF_E_TIMEOUT:
 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 		break;
 
 	case IOCPF_E_FWRSP_ENABLE:
@@ -979,13 +951,44 @@
 	}
 }
 
+static void
+bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * IOC hb ack request is being removed.
+ */
+static void
+bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		bfa_ioc_sync_leave(ioc);
+		writel(1, ioc->ioc_regs.ioc_sem_reg);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
 /*
  * IOC disable completion entry.
  */
 static void
 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
 {
-	bfa_ioc_pf_disabled(iocpf->ioc);
+	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
 }
 
 static void
@@ -997,6 +1000,7 @@
 
 	switch (event) {
 	case IOCPF_E_ENABLE:
+		iocpf->retry_count = 0;
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
 		break;
 
@@ -1010,11 +1014,64 @@
 	}
 }
 
+static void
+bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
+{
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+/*
+ * Hardware initialization failed.
+ */
+static void
+bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		bfa_ioc_notify_fail(ioc);
+		bfa_ioc_sync_ack(ioc);
+		iocpf->retry_count++;
+		if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
+			bfa_ioc_sync_leave(ioc);
+			writel(1, ioc->ioc_regs.ioc_sem_reg);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+		} else {
+			if (bfa_ioc_sync_complete(ioc))
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			else {
+				writel(1, ioc->ioc_regs.ioc_sem_reg);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+			}
+		}
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_STOP:
+		bfa_sem_timer_stop(ioc);
+		bfa_ioc_firmware_unlock(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
 
 static void
 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
 {
-	bfa_iocpf_timer_start(iocpf->ioc);
+	bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
 }
 
 /*
@@ -1029,47 +1086,77 @@
 
 	switch (event) {
 	case IOCPF_E_DISABLE:
-		bfa_iocpf_timer_stop(ioc);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 		break;
 
 	case IOCPF_E_STOP:
-		bfa_iocpf_timer_stop(ioc);
 		bfa_ioc_firmware_unlock(ioc);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 		break;
 
-	case IOCPF_E_TIMEOUT:
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
-		break;
-
 	default:
 		bfa_sm_fault(ioc, event);
 	}
 }
 
-
 static void
-bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
+bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
 {
 	/*
 	 * Mark IOC as failed in hardware and stop firmware.
 	 */
 	bfa_ioc_lpu_stop(iocpf->ioc);
-	writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
-
-	/*
-	 * Notify other functions on HB failure.
-	 */
-	bfa_ioc_notify_hbfail(iocpf->ioc);
 
 	/*
 	 * Flush any queued up mailbox requests.
 	 */
 	bfa_ioc_mbox_hbfail(iocpf->ioc);
 
-	if (iocpf->auto_recover)
-		bfa_iocpf_recovery_timer_start(iocpf->ioc);
+	bfa_ioc_hw_sem_get(iocpf->ioc);
+}
+
+static void
+bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
+{
+	struct bfa_ioc_s *ioc = iocpf->ioc;
+
+	bfa_trc(ioc, event);
+
+	switch (event) {
+	case IOCPF_E_SEMLOCKED:
+		iocpf->retry_count = 0;
+		bfa_ioc_sync_ack(ioc);
+		bfa_ioc_notify_fail(ioc);
+		if (!iocpf->auto_recover) {
+			bfa_ioc_sync_leave(ioc);
+			writel(1, ioc->ioc_regs.ioc_sem_reg);
+			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+		} else {
+			if (bfa_ioc_sync_complete(ioc))
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
+			else {
+				writel(1, ioc->ioc_regs.ioc_sem_reg);
+				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
+			}
+		}
+		break;
+
+	case IOCPF_E_DISABLE:
+		bfa_sem_timer_stop(ioc);
+		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
+		break;
+
+	case IOCPF_E_FAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioc, event);
+	}
+}
+
+static void
+bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
+{
 }
 
 /*
@@ -1084,24 +1171,16 @@
 
 	switch (event) {
 	case IOCPF_E_DISABLE:
-		if (iocpf->auto_recover)
-			bfa_iocpf_timer_stop(ioc);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 		break;
 
-	case IOCPF_E_TIMEOUT:
-		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
-		break;
-
 	default:
 		bfa_sm_fault(ioc, event);
 	}
 }
 
-
-
 /*
- *  hal_ioc_pvt BFA IOC private functions
+ *  BFA IOC private functions
  */
 
 static void
@@ -1139,16 +1218,10 @@
 	if (r32 == 0)
 		return BFA_TRUE;
 
-	bfa_assert(cnt < BFA_SEM_SPINCNT);
+	WARN_ON(cnt >= BFA_SEM_SPINCNT);
 	return BFA_FALSE;
 }
 
-void
-bfa_ioc_sem_release(void __iomem *sem_reg)
-{
-	writel(1, sem_reg);
-}
-
 static void
 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
 {
@@ -1167,18 +1240,6 @@
 	bfa_sem_timer_start(ioc);
 }
 
-void
-bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
-{
-	writel(1, ioc->ioc_regs.ioc_sem_reg);
-}
-
-static void
-bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
-{
-	bfa_sem_timer_stop(ioc);
-}
-
 /*
  * Initialize LPU local memory (aka secondary memory / SRAM)
  */
@@ -1212,7 +1273,7 @@
 	 * If memory initialization is not successful, IOC timeout will catch
 	 * such failures.
 	 */
-	bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
+	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
 	bfa_trc(ioc, pss_ctl);
 
 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
@@ -1258,8 +1319,8 @@
 	int		i;
 	u32	*fwsig = (u32 *) fwhdr;
 
-	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
-	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
@@ -1304,12 +1365,6 @@
 {
 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
 
-	/*
-	 * If bios/efi boot (flash based) -- return true
-	 */
-	if (bfa_ioc_is_bios_optrom(ioc))
-		return BFA_TRUE;
-
 	bfa_ioc_fwver_get(ioc, &fwhdr);
 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
@@ -1342,7 +1397,6 @@
 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
 }
 
-
 static void
 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
 {
@@ -1362,22 +1416,6 @@
 	boot_env = BFI_BOOT_LOADER_OS;
 
 	/*
-	 * Flash based firmware boot BIOS env.
-	 */
-	if (bfa_ioc_is_bios_optrom(ioc)) {
-		boot_type = BFI_BOOT_TYPE_FLASH;
-		boot_env = BFI_BOOT_LOADER_BIOS;
-	}
-
-	/*
-	 * Flash based firmware boot UEFI env.
-	 */
-	if (bfa_ioc_is_uefi(ioc)) {
-		boot_type = BFI_BOOT_TYPE_FLASH;
-		boot_env = BFI_BOOT_LOADER_UEFI;
-	}
-
-	/*
 	 * check if firmware is valid
 	 */
 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@@ -1405,8 +1443,7 @@
 	 * convergence, IOC will be in operational state when 2nd driver
 	 * is loaded.
 	 */
-	if (ioc_fwstate == BFI_IOC_DISABLED ||
-	    (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
+	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
 
 		/*
 		 * When using MSI-X any pending firmware ready event should
@@ -1442,7 +1479,7 @@
 	bfa_trc(ioc, msgp[0]);
 	bfa_trc(ioc, len);
 
-	bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
+	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
 
 	/*
 	 * first write msg to mailbox registers
@@ -1465,12 +1502,12 @@
 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
 {
 	struct bfi_ioc_ctrl_req_s enable_req;
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
 		    bfa_ioc_portid(ioc));
 	enable_req.ioc_class = ioc->ioc_mc;
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
 }
@@ -1504,7 +1541,6 @@
 
 	hb_count = readl(ioc->ioc_regs.heartbeat);
 	if (ioc->hb_count == hb_count) {
-		printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
 		bfa_ioc_recover(ioc);
 		return;
 	} else {
@@ -1522,13 +1558,6 @@
 	bfa_hb_timer_start(ioc);
 }
 
-static void
-bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
-{
-	bfa_hb_timer_stop(ioc);
-}
-
-
 /*
  *	Initiate a full firmware download.
  */
@@ -1550,8 +1579,8 @@
 	bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
 	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
 
-	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
-	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+	pgoff = PSS_SMEM_PGOFF(loff);
 
 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
@@ -1581,7 +1610,8 @@
 		}
 	}
 
-	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
+	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+			ioc->ioc_regs.host_page_num_fn);
 
 	/*
 	 * Set boot type and boot param at the end.
@@ -1592,11 +1622,6 @@
 			swab32(boot_env));
 }
 
-static void
-bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
-{
-	bfa_ioc_hwinit(ioc, force);
-}
 
 /*
  * Update BFA configuration from firmware configuration.
@@ -1683,12 +1708,13 @@
 static bfa_status_t
 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
 {
-	u32 pgnum, loff, r32;
+	u32 pgnum, loff;
+	__be32 r32;
 	int i, len;
 	u32 *buf = tbuf;
 
-	pgnum = bfa_ioc_smem_pgnum(ioc, soff);
-	loff = bfa_ioc_smem_pgoff(ioc, soff);
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+	loff = PSS_SMEM_PGOFF(soff);
 	bfa_trc(ioc, pgnum);
 	bfa_trc(ioc, loff);
 	bfa_trc(ioc, sz);
@@ -1719,11 +1745,12 @@
 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 		}
 	}
-	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
+	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+			ioc->ioc_regs.host_page_num_fn);
 	/*
 	 *  release semaphore.
 	 */
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 
 	bfa_trc(ioc, pgnum);
 	return BFA_STATUS_OK;
@@ -1742,8 +1769,8 @@
 	int i, len;
 	u32 pgnum, loff;
 
-	pgnum = bfa_ioc_smem_pgnum(ioc, soff);
-	loff = bfa_ioc_smem_pgoff(ioc, soff);
+	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+	loff = PSS_SMEM_PGOFF(soff);
 	bfa_trc(ioc, pgnum);
 	bfa_trc(ioc, loff);
 	bfa_trc(ioc, sz);
@@ -1773,35 +1800,38 @@
 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 		}
 	}
-	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
+	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+			ioc->ioc_regs.host_page_num_fn);
 
 	/*
 	 *  release semaphore.
 	 */
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 	bfa_trc(ioc, pgnum);
 	return BFA_STATUS_OK;
 }
 
-/*
- * hal iocpf to ioc interface
- */
 static void
-bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
+bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
 {
-	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
-}
+	struct list_head		*qe;
+	struct bfa_ioc_hbfail_notify_s	*notify;
+	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
-static void
-bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
-}
+	/*
+	 * Notify driver and common modules registered for notification.
+	 */
+	ioc->cbfn->hbfail_cbfn(ioc->bfa);
+	list_for_each(qe, &ioc->hb_notify_q) {
+		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
+		notify->cbfn(notify->cbarg);
+	}
 
-static void
-bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(ioc, IOC_E_FAILED);
+	bfa_ioc_debug_save_ftrc(ioc);
+
+	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
+		"Heart Beat of IOC has failed\n");
+
 }
 
 static void
@@ -1817,12 +1847,6 @@
 		"with the driver version\n");
 }
 
-
-
-/*
- *  hal_ioc_public
- */
-
 bfa_status_t
 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
 {
@@ -1838,7 +1862,7 @@
 	/*
 	 *  release semaphore.
 	 */
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
 
 	return BFA_STATUS_OK;
 }
@@ -1909,7 +1933,7 @@
 void
 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
 {
-	u32	*msgp = mbmsg;
+	__be32	*msgp = mbmsg;
 	u32	r32;
 	int		i;
 
@@ -1962,7 +1986,7 @@
 
 	default:
 		bfa_trc(ioc, msg->mh.msg_id);
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -2043,15 +2067,6 @@
 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
 }
 
-/*
- * Return size of dma memory required.
- */
-u32
-bfa_ioc_meminfo(void)
-{
-	return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
-}
-
 void
 bfa_ioc_enable(struct bfa_ioc_s *ioc)
 {
@@ -2068,18 +2083,6 @@
 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
 }
 
-/*
- * Returns memory required for saving firmware trace in case of crash.
- * Driver must call this interface to allocate memory required for
- * automatic saving of firmware trace. Driver should call
- * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
- * trace memory.
- */
-int
-bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
-{
-	return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
-}
 
 /*
  * Initialize memory for saving firmware trace. Driver must initialize
@@ -2089,19 +2092,7 @@
 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
 {
 	ioc->dbg_fwsave	    = dbg_fwsave;
-	ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
-}
-
-u32
-bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
-{
-	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
-}
-
-u32
-bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
-{
-	return PSS_SMEM_PGOFF(fmaddr);
+	ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 }
 
 /*
@@ -2265,14 +2256,13 @@
 }
 
 /*
- * Add to IOC heartbeat failure notification queue. To be used by common
- * modules such as cee, port, diag.
+ * Reset IOC fwstate registers.
  */
 void
-bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
-			struct bfa_ioc_hbfail_notify_s *notify)
+bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
 {
-	list_add_tail(&notify->qe, &ioc->hb_notify_q);
+	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
 }
 
 #define BFA_MFG_NAME "Brocade"
@@ -2306,7 +2296,7 @@
 	else
 		ad_attr->prototype = 0;
 
-	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
+	ad_attr->pwwn = ioc->attr->pwwn;
 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
 
 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
@@ -2317,7 +2307,8 @@
 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
 
 	ad_attr->cna_capable = ioc->cna;
-	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
+	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
+				!ad_attr->is_mezz;
 }
 
 enum bfa_ioc_type_e
@@ -2330,7 +2321,7 @@
 	else if (ioc->ioc_mc == BFI_MC_LL)
 		return BFA_IOC_TYPE_LL;
 	else {
-		bfa_assert(ioc->ioc_mc == BFI_MC_LL);
+		WARN_ON(ioc->ioc_mc != BFI_MC_LL);
 		return BFA_IOC_TYPE_LL;
 	}
 }
@@ -2354,7 +2345,7 @@
 void
 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
 {
-	bfa_assert(chip_rev);
+	WARN_ON(!chip_rev);
 
 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
 
@@ -2386,7 +2377,7 @@
 {
 	struct bfi_ioc_attr_s	*ioc_attr;
 
-	bfa_assert(model);
+	WARN_ON(!model);
 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
 
 	ioc_attr = ioc->attr;
@@ -2455,27 +2446,6 @@
 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
 }
 
-/*
- *  hal_wwn_public
- */
-wwn_t
-bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
-{
-	return ioc->attr->pwwn;
-}
-
-wwn_t
-bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
-{
-	return ioc->attr->nwwn;
-}
-
-u64
-bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
-{
-	return ioc->attr->mfg_pwwn;
-}
-
 mac_t
 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
 {
@@ -2488,18 +2458,6 @@
 		return ioc->attr->mac;
 }
 
-wwn_t
-bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
-{
-	return ioc->attr->mfg_pwwn;
-}
-
-wwn_t
-bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
-{
-	return ioc->attr->mfg_nwwn;
-}
-
 mac_t
 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
 {
@@ -2541,14 +2499,6 @@
 	return BFA_STATUS_OK;
 }
 
-/*
- * Clear saved firmware trace
- */
-void
-bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
-{
-	ioc->dbg_fwsave_once = BFA_TRUE;
-}
 
 /*
  * Retrieve saved firmware trace from a prior IOC failure.
@@ -2701,13 +2651,16 @@
  * Save firmware trace if configured.
  */
 static void
-bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
+bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
 {
 	int		tlen;
 
-	if (ioc->dbg_fwsave_len) {
-		tlen = ioc->dbg_fwsave_len;
-		bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+	if (ioc->dbg_fwsave_once) {
+		ioc->dbg_fwsave_once = BFA_FALSE;
+		if (ioc->dbg_fwsave_len) {
+			tlen = ioc->dbg_fwsave_len;
+			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+		}
 	}
 }
 
@@ -2717,11 +2670,6 @@
 static void
 bfa_ioc_recover(struct bfa_ioc_s *ioc)
 {
-	if (ioc->dbg_fwsave_once) {
-		ioc->dbg_fwsave_once = BFA_FALSE;
-		bfa_ioc_debug_save(ioc);
-	}
-
 	bfa_ioc_stats(ioc, ioc_hbfails);
 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
 }
@@ -2734,45 +2682,8 @@
 }
 
 /*
- *  hal_iocpf_pvt BFA IOC PF private functions
+ *  BFA IOC PF private functions
  */
-
-static void
-bfa_iocpf_enable(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
-}
-
-static void
-bfa_iocpf_disable(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
-}
-
-static void
-bfa_iocpf_fail(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
-}
-
-static void
-bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
-}
-
-static void
-bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
-}
-
-static void
-bfa_iocpf_stop(struct bfa_ioc_s *ioc)
-{
-	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
-}
-
 static void
 bfa_iocpf_timeout(void *ioc_arg)
 {
@@ -2794,12 +2705,6 @@
  *  bfa timer function
  */
 void
-bfa_timer_init(struct bfa_timer_mod_s *mod)
-{
-	INIT_LIST_HEAD(&mod->timer_q);
-}
-
-void
 bfa_timer_beat(struct bfa_timer_mod_s *mod)
 {
 	struct list_head *qh = &mod->timer_q;
@@ -2843,8 +2748,8 @@
 		    void (*timercb) (void *), void *arg, unsigned int timeout)
 {
 
-	bfa_assert(timercb != NULL);
-	bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
+	WARN_ON(timercb == NULL);
+	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
 
 	timer->timeout = timeout;
 	timer->timercb = timercb;
@@ -2859,7 +2764,7 @@
 void
 bfa_timer_stop(struct bfa_timer_s *timer)
 {
-	bfa_assert(!list_empty(&timer->qe));
+	WARN_ON(list_empty(&timer->qe));
 
 	list_del(&timer->qe);
 }
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 9c407a8..ec9cf08 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -18,10 +18,15 @@
 #ifndef __BFA_IOC_H__
 #define __BFA_IOC_H__
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_cs.h"
 #include "bfi.h"
 
+#define BFA_DBG_FWTRC_ENTS	(BFI_IOC_TRC_ENTS)
+#define BFA_DBG_FWTRC_LEN					\
+	(BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +	\
+	(sizeof(struct bfa_trc_mod_s) -				\
+	BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 /*
  * BFA timer declarations
  */
@@ -47,7 +52,6 @@
 #define BFA_TIMER_FREQ 200 /* specified in millisecs */
 
 void bfa_timer_beat(struct bfa_timer_mod_s *mod);
-void bfa_timer_init(struct bfa_timer_mod_s *mod);
 void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
 			bfa_timer_cbfn_t timercb, void *arg,
 			unsigned int timeout);
@@ -70,7 +74,7 @@
 #define bfa_swap_words(_x)  (	\
 	((_x) << 32) | ((_x) >> 32))
 
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 #define bfa_sge_to_be(_x)
 #define bfa_sge_to_le(_x)	bfa_sge_word_swap(_x)
 #define bfa_sgaddr_le(_x)	bfa_swap_words(_x)
@@ -115,8 +119,8 @@
 static inline void
 __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-	dma_addr->a32.addr_lo = (u32) pa;
-	dma_addr->a32.addr_hi = (u32) (bfa_os_u32(pa));
+	dma_addr->a32.addr_lo = (__be32) pa;
+	dma_addr->a32.addr_hi = (__be32) (pa >> 32);
 }
 
 
@@ -125,8 +129,8 @@
 static inline void
 __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-	dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
-	dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
+	dma_addr->a32.addr_lo = cpu_to_be32(pa);
+	dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
 }
 
 struct bfa_ioc_regs_s {
@@ -145,8 +149,11 @@
 	void __iomem *host_page_num_fn;
 	void __iomem *heartbeat;
 	void __iomem *ioc_fwstate;
+	void __iomem *alt_ioc_fwstate;
 	void __iomem *ll_halt;
+	void __iomem *alt_ll_halt;
 	void __iomem *err_set;
+	void __iomem *ioc_fail_sync;
 	void __iomem *shirq_isr_next;
 	void __iomem *shirq_msk_next;
 	void __iomem *smem_page_start;
@@ -254,8 +261,12 @@
 	void		(*ioc_map_port)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_isr_mode_set)	(struct bfa_ioc_s *ioc,
 					bfa_boolean_t msix);
-	void		(*ioc_notify_hbfail)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_notify_fail)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_ownership_reset)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_join)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_leave)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_sync_ack)		(struct bfa_ioc_s *ioc);
+	bfa_boolean_t	(*ioc_sync_complete)	(struct bfa_ioc_s *ioc);
 };
 
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -325,7 +336,6 @@
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 		enum bfi_mclass mc);
-u32 bfa_ioc_meminfo(void);
 void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa);
 void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 void bfa_ioc_disable(struct bfa_ioc_s *ioc);
@@ -340,6 +350,7 @@
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
+void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
 enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
 void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
 void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
@@ -353,24 +364,16 @@
 void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
 void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 		struct bfa_adapter_attr_s *ad_attr);
-int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover);
 void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
 bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
 		int *trclen);
-void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
 bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 				 int *trclen);
 bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
 	u32 *offset, int *buflen);
-u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
-u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
 void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
-void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
-	struct bfa_ioc_hbfail_notify_s *notify);
 bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
-void bfa_ioc_sem_release(void __iomem *sem_reg);
-void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
 bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
@@ -381,13 +384,8 @@
 /*
  * bfa mfg wwn API functions
  */
-wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc);
-wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
-u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
 
 /*
  * F/W Image Size & Chunk
@@ -421,7 +419,7 @@
 		return bfi_image_ct_cna_get_chunk(off);	break;
 	case BFI_IMAGE_CB_FC:
 		return bfi_image_cb_fc_get_chunk(off);	break;
-	default: return 0;
+	default: return NULL;
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 9099450..e4a0713 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_ioc.h"
 #include "bfi_cbreg.h"
 #include "bfa_defs.h"
@@ -29,10 +30,14 @@
 static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
-static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
 
-struct bfa_ioc_hwif_s hwif_cb;
+static struct bfa_ioc_hwif_s hwif_cb;
 
 /*
  * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +51,12 @@
 	hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
 	hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
 	hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
-	hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail;
+	hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
 	hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
+	hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
+	hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
+	hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
+	hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
 
 	ioc->ioc_hwif = &hwif_cb;
 }
@@ -58,6 +67,21 @@
 static bfa_boolean_t
 bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
 {
+	struct bfi_ioc_image_hdr_s fwhdr;
+	uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	if (fwstate == BFI_IOC_UNINIT)
+		return BFA_TRUE;
+
+	bfa_ioc_fwver_get(ioc, &fwhdr);
+
+	if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
+		return BFA_TRUE;
+
+	bfa_trc(ioc, fwstate);
+	bfa_trc(ioc, fwhdr.exec);
+	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+
 	return BFA_TRUE;
 }
 
@@ -70,7 +94,7 @@
  * Notify other functions on HB failure.
  */
 static void
-bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
 {
 	writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
 	readl(ioc->ioc_regs.err_set);
@@ -108,9 +132,11 @@
 	if (ioc->port_id == 0) {
 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 	} else {
 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
 	}
 
 	/*
@@ -181,10 +207,71 @@
 	 * will lock it instead of clearing it.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
 }
 
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
+{
+}
 
+static void
+bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
+{
+}
+
+static void
+bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
+{
+	writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+}
+
+static bfa_boolean_t
+bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
+{
+	uint32_t fwstate, alt_fwstate;
+	fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+	/*
+	 * At this point, this IOC is hoding the hw sem in the
+	 * start path (fwcheck) OR in the disable/enable path
+	 * OR to check if the other IOC has acknowledged failure.
+	 *
+	 * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
+	 * or in MEMTEST states. In a normal scenario, this IOC
+	 * can not be in OP state when this function is called.
+	 *
+	 * However, this IOC could still be in OP state when
+	 * the OS driver is starting up, if the OptROM code has
+	 * left it in that state.
+	 *
+	 * If we had marked this IOC's fwstate as BFI_IOC_FAIL
+	 * in the failure case and now, if the fwstate is not
+	 * BFI_IOC_FAIL it implies that the other PCI fn have
+	 * reinitialized the ASIC or this IOC got disabled, so
+	 * return TRUE.
+	 */
+	if (fwstate == BFI_IOC_UNINIT ||
+		fwstate == BFI_IOC_INITING ||
+		fwstate == BFI_IOC_DISABLED ||
+		fwstate == BFI_IOC_MEMTEST ||
+		fwstate == BFI_IOC_OP)
+		return BFA_TRUE;
+	else {
+		alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate);
+		if (alt_fwstate == BFI_IOC_FAIL ||
+			alt_fwstate == BFI_IOC_DISABLED ||
+			alt_fwstate == BFI_IOC_UNINIT ||
+			alt_fwstate == BFI_IOC_INITING ||
+			alt_fwstate == BFI_IOC_MEMTEST)
+			return BFA_TRUE;
+		else
+			return BFA_FALSE;
+	}
+}
 
 bfa_status_t
 bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 115730c..008d129 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -15,12 +15,22 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_ioc.h"
 #include "bfi_ctreg.h"
 #include "bfa_defs.h"
 
 BFA_TRC_FILE(CNA, IOC_CT);
 
+#define bfa_ioc_ct_sync_pos(__ioc)      \
+		((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
+#define BFA_IOC_SYNC_REQD_SH    16
+#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
+#define bfa_ioc_ct_clear_sync_ackd(__val)       (__val & 0xffff0000)
+#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
+#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
+			(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
+
 /*
  * forward declarations
  */
@@ -29,10 +39,14 @@
 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
-static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
 
-struct bfa_ioc_hwif_s hwif_ct;
+static struct bfa_ioc_hwif_s hwif_ct;
 
 /*
  * Called from bfa_ioc_attach() to map asic specific calls.
@@ -46,8 +60,12 @@
 	hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
 	hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
 	hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
-	hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
+	hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
 	hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+	hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
+	hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
+	hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
+	hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
 
 	ioc->ioc_hwif = &hwif_ct;
 }
@@ -83,7 +101,8 @@
 	 */
 	if (usecnt == 0) {
 		writel(1, ioc->ioc_regs.ioc_usage_reg);
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(0, ioc->ioc_regs.ioc_fail_sync);
 		bfa_trc(ioc, usecnt);
 		return BFA_TRUE;
 	}
@@ -94,14 +113,14 @@
 	/*
 	 * Use count cannot be non-zero and chip in uninitialized state.
 	 */
-	bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
+	WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
 
 	/*
 	 * Check if another driver with a different firmware is active
 	 */
 	bfa_ioc_fwver_get(ioc, &fwhdr);
 	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 		bfa_trc(ioc, usecnt);
 		return BFA_FALSE;
 	}
@@ -111,7 +130,7 @@
 	 */
 	usecnt++;
 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 	bfa_trc(ioc, usecnt);
 	return BFA_TRUE;
 }
@@ -139,25 +158,27 @@
 	 */
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
-	bfa_assert(usecnt > 0);
+	WARN_ON(usecnt <= 0);
 
 	usecnt--;
 	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
 	bfa_trc(ioc, usecnt);
 
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 }
 
 /*
  * Notify other functions on HB failure.
  */
 static void
-bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
 {
 	if (ioc->cna) {
 		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
+		writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
 		/* Wait for halt to take effect */
 		readl(ioc->ioc_regs.ll_halt);
+		readl(ioc->ioc_regs.alt_ll_halt);
 	} else {
 		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
 		readl(ioc->ioc_regs.err_set);
@@ -209,15 +230,19 @@
 	if (ioc->port_id == 0) {
 		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
 		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
 	} else {
 		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
 		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
 		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
 		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+		ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
 	}
 
 	/*
@@ -235,6 +260,7 @@
 	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+	ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
 
 	/*
 	 * sram memory access
@@ -313,7 +339,7 @@
 	if (ioc->cna) {
 		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 		writel(0, ioc->ioc_regs.ioc_usage_reg);
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
 	}
 
 	/*
@@ -322,10 +348,80 @@
 	 * will lock it instead of clearing it.
 	 */
 	readl(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
 }
 
+/*
+ * Synchronized IOC failure processing routines
+ */
+static void
+bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
 
+	writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
+					bfa_ioc_ct_sync_pos(ioc);
+
+	writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
+}
+
+static void
+bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+
+	writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
+		ioc->ioc_regs.ioc_fail_sync);
+}
+
+static bfa_boolean_t
+bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
+{
+	uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+	uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+	uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
+	uint32_t tmp_ackd;
+
+	if (sync_ackd == 0)
+		return BFA_TRUE;
+
+	/*
+	 * The check below is to see whether any other PCI fn
+	 * has reinitialized the ASIC (reset sync_ackd bits)
+	 * and failed again while this IOC was waiting for hw
+	 * semaphore (in bfa_iocpf_sm_semwait()).
+	 */
+	tmp_ackd = sync_ackd;
+	if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
+		!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
+		sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
+
+	if (sync_reqd == sync_ackd) {
+		writel(bfa_ioc_ct_clear_sync_ackd(r32),
+			ioc->ioc_regs.ioc_fail_sync);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
+		return BFA_TRUE;
+	}
+
+	/*
+	 * If another PCI fn reinitialized and failed again while
+	 * this IOC was waiting for hw sem, the sync_ackd bit for
+	 * this IOC need to be set again to allow reinitialization.
+	 */
+	if (tmp_ackd != sync_ackd)
+		writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
+
+	return BFA_FALSE;
+}
 
 /*
  * Check the firmware state to know if pll_init has been completed already
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 15407ab..ab79ff6 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -99,7 +99,6 @@
 	void (*iocdisable) (struct bfa_s *bfa);
 };
 
-extern struct bfa_module_s *hal_mods[];
 
 struct bfa_s {
 	void			*bfad;		/*  BFA driver instance    */
@@ -116,8 +115,6 @@
 	struct bfa_msix_s	msix;
 };
 
-extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
-extern bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[];
 extern bfa_boolean_t bfa_auto_recover;
 extern struct bfa_module_s hal_mod_sgpg;
 extern struct bfa_module_s hal_mod_fcport;
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
deleted file mode 100644
index 65df62e..0000000
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_OS_INC_H__
-#define __BFA_OS_INC_H__
-
-#include <linux/types.h>
-#include <linux/version.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-#include <linux/interrupt.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_transport_fc.h>
-#include <scsi/scsi_transport.h>
-
-#ifdef __BIG_ENDIAN
-#define __BIGENDIAN
-#endif
-
-static inline u64 bfa_os_get_log_time(void)
-{
-	u64 system_time = 0;
-	struct timeval tv;
-	do_gettimeofday(&tv);
-
-	/* We are interested in seconds only. */
-	system_time = tv.tv_sec;
-	return system_time;
-}
-
-#define bfa_io_lat_clock_res_div HZ
-#define bfa_io_lat_clock_res_mul 1000
-
-#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
-do {									\
-	if (((mask) == 4) || (level[1] <= '4'))				\
-		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
-} while (0)
-
-#define bfa_swap_3b(_x)				\
-	((((_x) & 0xff) << 16) |		\
-	((_x) & 0x00ff00) |			\
-	(((_x) & 0xff0000) >> 16))
-
-#define bfa_os_swap_sgaddr(_x)  ((u64)(                                 \
-	(((u64)(_x) & (u64)0x00000000000000ffull) << 32)        |       \
-	(((u64)(_x) & (u64)0x000000000000ff00ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x0000000000ff0000ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x00000000ff000000ull) << 32)        |       \
-	(((u64)(_x) & (u64)0x000000ff00000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0x00ff000000000000ull) >> 32)        |       \
-	(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
-
-#ifndef __BIGENDIAN
-#define bfa_os_hton3b(_x)  bfa_swap_3b(_x)
-#define bfa_os_sgaddr(_x)  (_x)
-#else
-#define bfa_os_hton3b(_x)  (_x)
-#define bfa_os_sgaddr(_x)  bfa_os_swap_sgaddr(_x)
-#endif
-
-#define bfa_os_ntoh3b(_x)  bfa_os_hton3b(_x)
-#define bfa_os_u32(__pa64) ((__pa64) >> 32)
-
-#define BFA_TRC_TS(_trcm)				\
-	({						\
-		struct timeval tv;			\
-							\
-		do_gettimeofday(&tv);			\
-		(tv.tv_sec*1000000+tv.tv_usec);		\
-	 })
-
-#define boolean_t int
-
-/*
- * For current time stamp, OS API will fill-in
- */
-struct bfa_timeval_s {
-	u32	tv_sec;		/*  seconds        */
-	u32	tv_usec;	/*  microseconds   */
-};
-
-static inline void
-bfa_os_gettimeofday(struct bfa_timeval_s *tv)
-{
-	struct timeval  tmp_tv;
-
-	do_gettimeofday(&tmp_tv);
-	tv->tv_sec = (u32) tmp_tv.tv_sec;
-	tv->tv_usec = (u32) tmp_tv.tv_usec;
-}
-
-static inline void
-wwn2str(char *wwn_str, u64 wwn)
-{
-	union {
-		u64 wwn;
-		u8 byte[8];
-	} w;
-
-	w.wwn = wwn;
-	sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0],
-		w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5],
-		w.byte[6], w.byte[7]);
-}
-
-static inline void
-fcid2str(char *fcid_str, u32 fcid)
-{
-	union {
-		u32 fcid;
-		u8 byte[4];
-	} f;
-
-	f.fcid = fcid;
-	sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]);
-}
-
-#endif /* __BFA_OS_INC_H__ */
diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h
index 501f0ed..1c9baa6 100644
--- a/drivers/scsi/bfa/bfa_plog.h
+++ b/drivers/scsi/bfa/bfa_plog.h
@@ -151,9 +151,5 @@
 void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 			enum bfa_plog_eid event, u16 misc,
 			struct fchs_s *fchdr, u32 pld_w0);
-void bfa_plog_clear(struct bfa_plog_s *plog);
-void bfa_plog_enable(struct bfa_plog_s *plog);
-void bfa_plog_disable(struct bfa_plog_s *plog);
-bfa_boolean_t	bfa_plog_get_setting(struct bfa_plog_s *plog);
 
 #endif /* __BFA_PORTLOG_H__ */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index fff9622..3f8e9d6 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -15,6 +15,7 @@
  * General Public License for more details.
  */
 
+#include "bfad_drv.h"
 #include "bfa_defs_svc.h"
 #include "bfa_port.h"
 #include "bfi.h"
@@ -29,14 +30,14 @@
 bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
 {
 	u32    *dip = (u32 *) stats;
-	u32    t0, t1;
+	__be32    t0, t1;
 	int	    i;
 
 	for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
 		i += 2) {
 		t0 = dip[i];
 		t1 = dip[i + 1];
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 		dip[i] = be32_to_cpu(t0);
 		dip[i + 1] = be32_to_cpu(t1);
 #else
@@ -96,13 +97,13 @@
 	port->stats_busy = BFA_FALSE;
 
 	if (status == BFA_STATUS_OK) {
-		struct bfa_timeval_s tv;
+		struct timeval tv;
 
 		memcpy(port->stats, port->stats_dma.kva,
 		       sizeof(union bfa_port_stats_u));
 		bfa_port_stats_swap(port, port->stats);
 
-		bfa_os_gettimeofday(&tv);
+		do_gettimeofday(&tv);
 		port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
 	}
 
@@ -124,7 +125,7 @@
 static void
 bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 {
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
 	port->stats_status = status;
 	port->stats_busy   = BFA_FALSE;
@@ -132,7 +133,7 @@
 	/*
 	* re-initialize time stamp for stats reset
 	*/
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	port->stats_reset_time = tv.tv_sec;
 
 	if (port->stats_cbfn) {
@@ -185,7 +186,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -432,9 +433,9 @@
 bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 		 void *dev, struct bfa_trc_mod_s *trcmod)
 {
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
-	bfa_assert(port);
+	WARN_ON(!port);
 
 	port->dev    = dev;
 	port->ioc    = ioc;
@@ -447,27 +448,13 @@
 
 	bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
 	bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
-	bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
+	list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q);
 
 	/*
 	 * initialize time stamp for stats reset
 	 */
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	port->stats_reset_time = tv.tv_sec;
 
 	bfa_trc(port, 0);
 }
-
-/*
- * bfa_port_detach()
- *
- *
- * @param[in] port - Pointer to the Port module data structure
- *
- * @return void
- */
-void
-bfa_port_detach(struct bfa_port_s *port)
-{
-	bfa_trc(port, 0);
-}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index dbce9df..c4ee9db 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -48,7 +48,6 @@
 
 void	     bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
 				void *dev, struct bfa_trc_mod_s *trcmod);
-void	     bfa_port_detach(struct bfa_port_s *port);
 void	     bfa_port_hbfail(void *arg);
 
 bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 37e16ac..1d34921 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -15,11 +15,10 @@
  * General Public License for more details.
  */
 
-#include "bfa_os_inc.h"
+#include "bfad_drv.h"
 #include "bfa_plog.h"
 #include "bfa_cs.h"
 #include "bfa_modules.h"
-#include "bfad_drv.h"
 
 BFA_TRC_FILE(HAL, FCXP);
 BFA_MODULE(fcxp);
@@ -41,19 +40,6 @@
 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
 
-/*
- *  lps_pvt BFA LPS private functions
- */
-
-enum bfa_lps_event {
-	BFA_LPS_SM_LOGIN	= 1,	/* login request from user	*/
-	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user	*/
-	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout	*/
-	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue	*/
-	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user		*/
-	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline		*/
-	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link	*/
-};
 
 /*
  * FC PORT related definitions
@@ -66,7 +52,6 @@
 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
 
-
 /*
  * BFA port state machine events
  */
@@ -113,19 +98,6 @@
 		}							\
 } while (0)
 
-
-enum bfa_rport_event {
-	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event		*/
-	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport	*/
-	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online		*/
-	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline		*/
-	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response		*/
-	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure		*/
-	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware	*/
-	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed		*/
-	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue	*/
-};
-
 /*
  * forward declarations FCXP related functions
  */
@@ -159,6 +131,7 @@
 static void bfa_lps_free(struct bfa_lps_s *lps);
 static void bfa_lps_send_login(struct bfa_lps_s *lps);
 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
+static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
@@ -171,6 +144,8 @@
 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
 					event);
 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
+					enum bfa_lps_event event);
 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
 					event);
@@ -312,6 +287,18 @@
 	return 0;
 }
 
+static u64
+bfa_get_log_time(void)
+{
+	u64 system_time = 0;
+	struct timeval tv;
+	do_gettimeofday(&tv);
+
+	/* We are interested in seconds only. */
+	system_time = tv.tv_sec;
+	return system_time;
+}
+
 static void
 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
 {
@@ -322,7 +309,7 @@
 		return;
 
 	if (plkd_validate_logrec(pl_rec)) {
-		bfa_assert(0);
+		WARN_ON(1);
 		return;
 	}
 
@@ -332,7 +319,7 @@
 
 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
 
-	pl_recp->tv = bfa_os_get_log_time();
+	pl_recp->tv = bfa_get_log_time();
 	BFA_PL_LOG_REC_INCR(plog->tail);
 
 	if (plog->head == plog->tail)
@@ -437,29 +424,6 @@
 	}
 }
 
-void
-bfa_plog_clear(struct bfa_plog_s *plog)
-{
-	plog->head = plog->tail = 0;
-}
-
-void
-bfa_plog_enable(struct bfa_plog_s *plog)
-{
-	plog->plog_enabled = 1;
-}
-
-void
-bfa_plog_disable(struct bfa_plog_s *plog)
-{
-	plog->plog_enabled = 0;
-}
-
-bfa_boolean_t
-bfa_plog_get_setting(struct bfa_plog_s *plog)
-{
-	return (bfa_boolean_t)plog->plog_enabled;
-}
 
 /*
  *  fcxp_pvt BFA FCXP private functions
@@ -637,15 +601,15 @@
 	       bfa_fcxp_get_sglen_t sglen_cbfn)
 {
 
-	bfa_assert(bfa != NULL);
+	WARN_ON(bfa == NULL);
 
 	bfa_trc(bfa, fcxp->fcxp_tag);
 
 	if (n_sgles == 0) {
 		*use_ibuf = 1;
 	} else {
-		bfa_assert(*sga_cbfn != NULL);
-		bfa_assert(*sglen_cbfn != NULL);
+		WARN_ON(*sga_cbfn == NULL);
+		WARN_ON(*sglen_cbfn == NULL);
 
 		*use_ibuf = 0;
 		*r_sga_cbfn = sga_cbfn;
@@ -657,7 +621,7 @@
 		 * alloc required sgpgs
 		 */
 		if (n_sgles > BFI_SGE_INLINE)
-			bfa_assert(0);
+			WARN_ON(1);
 	}
 
 }
@@ -671,7 +635,7 @@
 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
 {
 
-	bfa_assert(bfa != NULL);
+	WARN_ON(bfa == NULL);
 
 	bfa_trc(bfa, fcxp->fcxp_tag);
 
@@ -708,7 +672,7 @@
 		return;
 	}
 
-	bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
+	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
 	list_del(&fcxp->qe);
 	list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
 }
@@ -757,7 +721,7 @@
 
 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
 
-	bfa_assert(fcxp->send_cbfn != NULL);
+	WARN_ON(fcxp->send_cbfn == NULL);
 
 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
 
@@ -913,13 +877,13 @@
 					BFA_FCXP_REQ_PLD_PA(fcxp));
 	} else {
 		if (fcxp->nreq_sgles > 0) {
-			bfa_assert(fcxp->nreq_sgles == 1);
+			WARN_ON(fcxp->nreq_sgles != 1);
 			hal_fcxp_set_local_sges(send_req->req_sge,
 						reqi->req_tot_len,
 						fcxp->req_sga_cbfn(fcxp->caller,
 								   0));
 		} else {
-			bfa_assert(reqi->req_tot_len == 0);
+			WARN_ON(reqi->req_tot_len != 0);
 			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
 		}
 	}
@@ -928,20 +892,20 @@
 	 * setup rsp sgles
 	 */
 	if (fcxp->use_irspbuf == 1) {
-		bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
+		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
 
 		hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
 					BFA_FCXP_RSP_PLD_PA(fcxp));
 
 	} else {
 		if (fcxp->nrsp_sgles > 0) {
-			bfa_assert(fcxp->nrsp_sgles == 1);
+			WARN_ON(fcxp->nrsp_sgles != 1);
 			hal_fcxp_set_local_sges(send_req->rsp_sge,
 						rspi->rsp_maxlen,
 						fcxp->rsp_sga_cbfn(fcxp->caller,
 								   0));
 		} else {
-			bfa_assert(rspi->rsp_maxlen == 0);
+			WARN_ON(rspi->rsp_maxlen != 0);
 			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
 		}
 	}
@@ -955,10 +919,6 @@
 }
 
 /*
- *  hal_fcxp_api BFA FCXP API
- */
-
-/*
  * Allocate an FCXP instance to send a response or to send a request
  * that has a response. Request/response buffers are allocated by caller.
  *
@@ -990,7 +950,7 @@
 {
 	struct bfa_fcxp_s *fcxp = NULL;
 
-	bfa_assert(bfa != NULL);
+	WARN_ON(bfa == NULL);
 
 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
 	if (fcxp == NULL)
@@ -1017,7 +977,7 @@
 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 	void	*reqbuf;
 
-	bfa_assert(fcxp->use_ireqbuf == 1);
+	WARN_ON(fcxp->use_ireqbuf != 1);
 	reqbuf = ((u8 *)mod->req_pld_list_kva) +
 		fcxp->fcxp_tag * mod->req_pld_sz;
 	return reqbuf;
@@ -1044,7 +1004,7 @@
 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 	void	*rspbuf;
 
-	bfa_assert(fcxp->use_irspbuf == 1);
+	WARN_ON(fcxp->use_irspbuf != 1);
 
 	rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
 		fcxp->fcxp_tag * mod->rsp_pld_sz;
@@ -1052,7 +1012,7 @@
 }
 
 /*
- *		Free the BFA FCXP
+ * Free the BFA FCXP
  *
  * @param[in]	fcxp			BFA fcxp pointer
  *
@@ -1063,7 +1023,7 @@
 {
 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
 
-	bfa_assert(fcxp != NULL);
+	WARN_ON(fcxp == NULL);
 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
 	bfa_fcxp_put(fcxp);
 }
@@ -1142,7 +1102,7 @@
 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
 {
 	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
-	bfa_assert(0);
+	WARN_ON(1);
 	return BFA_STATUS_OK;
 }
 
@@ -1157,7 +1117,7 @@
 {
 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-	bfa_assert(list_empty(&mod->fcxp_free_q));
+	WARN_ON(!list_empty(&mod->fcxp_free_q));
 
 	wqe->alloc_cbfn = alloc_cbfn;
 	wqe->alloc_cbarg = alloc_cbarg;
@@ -1178,7 +1138,7 @@
 {
 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-	bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
+	WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
 	list_del(&wqe->qe);
 }
 
@@ -1199,12 +1159,6 @@
 	fcxp->send_cbfn = bfa_fcxp_null_comp;
 }
 
-
-
-/*
- *  hal_fcxp_public BFA FCXP public functions
- */
-
 void
 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 {
@@ -1215,7 +1169,7 @@
 
 	default:
 		bfa_trc(bfa, msg->mhdr.msg_id);
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -1303,6 +1257,12 @@
 			else
 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
+			/* If N2N, send the assigned PID to FW */
+			bfa_trc(lps->bfa, lps->fport);
+			bfa_trc(lps->bfa, lps->lp_pid);
+
+			if (!lps->fport && lps->lp_pid)
+				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
 		} else {
 			bfa_sm_set_state(lps, bfa_lps_sm_init);
 			if (lps->fdisc)
@@ -1321,6 +1281,11 @@
 		bfa_sm_set_state(lps, bfa_lps_sm_init);
 		break;
 
+	case BFA_LPS_SM_SET_N2N_PID:
+		bfa_trc(lps->bfa, lps->fport);
+		bfa_trc(lps->bfa, lps->lp_pid);
+		break;
+
 	default:
 		bfa_sm_fault(lps->bfa, event);
 	}
@@ -1389,6 +1354,14 @@
 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
 		break;
 
+	case BFA_LPS_SM_SET_N2N_PID:
+		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
+			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+		} else
+			bfa_lps_send_set_n2n_pid(lps);
+		break;
+
 	case BFA_LPS_SM_OFFLINE:
 	case BFA_LPS_SM_DELETE:
 		bfa_sm_set_state(lps, bfa_lps_sm_init);
@@ -1400,6 +1373,48 @@
 }
 
 /*
+ * login complete
+ */
+static void
+bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_RESUME:
+		bfa_sm_set_state(lps, bfa_lps_sm_online);
+		bfa_lps_send_set_n2n_pid(lps);
+		break;
+
+	case BFA_LPS_SM_LOGOUT:
+		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_LOGO, 0, "Logout");
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+
+		/* Let the vport module know about this event */
+		bfa_lps_cvl_event(lps);
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/*
  * logout in progress - awaiting firmware response
  */
 static void
@@ -1540,15 +1555,16 @@
 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
 	struct bfa_lps_s	*lps;
 
-	bfa_assert(rsp->lp_tag < mod->num_lps);
+	WARN_ON(rsp->lp_tag >= mod->num_lps);
 	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
 
 	lps->status = rsp->status;
 	switch (rsp->status) {
 	case BFA_STATUS_OK:
 		lps->fport	= rsp->f_port;
+		if (lps->fport)
+			lps->lp_pid = rsp->lp_pid;
 		lps->npiv_en	= rsp->npiv_en;
-		lps->lp_pid	= rsp->lp_pid;
 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
 		lps->pr_pwwn	= rsp->port_name;
 		lps->pr_nwwn	= rsp->node_name;
@@ -1587,7 +1603,7 @@
 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
 	struct bfa_lps_s	*lps;
 
-	bfa_assert(rsp->lp_tag < mod->num_lps);
+	WARN_ON(rsp->lp_tag >= mod->num_lps);
 	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
 
 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
@@ -1640,7 +1656,7 @@
 	struct bfi_lps_login_req_s	*m;
 
 	m = bfa_reqq_next(lps->bfa, lps->reqq);
-	bfa_assert(m);
+	WARN_ON(!m);
 
 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
 		bfa_lpuid(lps->bfa));
@@ -1665,7 +1681,7 @@
 	struct bfi_lps_logout_req_s *m;
 
 	m = bfa_reqq_next(lps->bfa, lps->reqq);
-	bfa_assert(m);
+	WARN_ON(!m);
 
 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
 		bfa_lpuid(lps->bfa));
@@ -1676,6 +1692,25 @@
 }
 
 /*
+ * send n2n pid set request to firmware
+ */
+static void
+bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
+{
+	struct bfi_lps_n2n_pid_req_s *m;
+
+	m = bfa_reqq_next(lps->bfa, lps->reqq);
+	WARN_ON(!m);
+
+	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
+		bfa_lpuid(lps->bfa));
+
+	m->lp_tag = lps->lp_tag;
+	m->lp_pid = lps->lp_pid;
+	bfa_reqq_produce(lps->bfa, lps->reqq);
+}
+
+/*
  * Indirect login completion handler for non-fcs
  */
 static void
@@ -1853,14 +1888,6 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
-/*
- * Initiate a lport logout (flogi).
- */
-void
-bfa_lps_flogo(struct bfa_lps_s *lps)
-{
-	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
-}
 
 /*
  * Initiate a lport FDSIC logout.
@@ -1871,24 +1898,6 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
 }
 
-/*
- * Discard a pending login request -- should be called only for
- * link down handling.
- */
-void
-bfa_lps_discard(struct bfa_lps_s *lps)
-{
-	bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
-}
-
-/*
- * Return lport services tag
- */
-u8
-bfa_lps_get_tag(struct bfa_lps_s *lps)
-{
-	return lps->lp_tag;
-}
 
 /*
  * Return lport services tag given the pid
@@ -1909,55 +1918,6 @@
 	return 0;
 }
 
-/*
- * return if fabric login indicates support for NPIV
- */
-bfa_boolean_t
-bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
-{
-	return lps->npiv_en;
-}
-
-/*
- * Return TRUE if attached to F-Port, else return FALSE
- */
-bfa_boolean_t
-bfa_lps_is_fport(struct bfa_lps_s *lps)
-{
-	return lps->fport;
-}
-
-/*
- * Return TRUE if attached to a Brocade Fabric
- */
-bfa_boolean_t
-bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
-{
-	return lps->brcd_switch;
-}
-/*
- * return TRUE if authentication is required
- */
-bfa_boolean_t
-bfa_lps_is_authreq(struct bfa_lps_s *lps)
-{
-	return lps->auth_req;
-}
-
-bfa_eproto_status_t
-bfa_lps_get_extstatus(struct bfa_lps_s *lps)
-{
-	return lps->ext_status;
-}
-
-/*
- * return port id assigned to the lport
- */
-u32
-bfa_lps_get_pid(struct bfa_lps_s *lps)
-{
-	return lps->lp_pid;
-}
 
 /*
  * return port id assigned to the base lport
@@ -1971,57 +1931,16 @@
 }
 
 /*
- * Return bb_credit assigned in FLOGI response
+ * Set PID in case of n2n (which is assigned during PLOGI)
  */
-u16
-bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
+void
+bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
 {
-	return lps->pr_bbcred;
-}
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, n2n_pid);
 
-/*
- * Return peer port name
- */
-wwn_t
-bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
-{
-	return lps->pr_pwwn;
-}
-
-/*
- * Return peer node name
- */
-wwn_t
-bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
-{
-	return lps->pr_nwwn;
-}
-
-/*
- * return reason code if login request is rejected
- */
-u8
-bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
-{
-	return lps->lsrjt_rsn;
-}
-
-/*
- * return explanation code if login request is rejected
- */
-u8
-bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
-{
-	return lps->lsrjt_expl;
-}
-
-/*
- * Return fpma/spma MAC for lport
- */
-mac_t
-bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
-{
-	return lps->lp_mac;
+	lps->lp_pid = n2n_pid;
+	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
 }
 
 /*
@@ -2050,7 +1969,7 @@
 
 	default:
 		bfa_trc(bfa, m->mhdr.msg_id);
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -2068,6 +1987,8 @@
 		/*
 		 * Start event after IOC is configured and BFA is started.
 		 */
+		fcport->use_flash_cfg = BFA_TRUE;
+
 		if (bfa_fcport_send_enable(fcport)) {
 			bfa_trc(fcport->bfa, BFA_TRUE);
 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
@@ -2178,7 +2099,7 @@
 		bfa_fcport_update_linkinfo(fcport);
 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
 
-		bfa_assert(fcport->event_cbfn);
+		WARN_ON(!fcport->event_cbfn);
 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
 		break;
 
@@ -2229,7 +2150,7 @@
 	case BFA_FCPORT_SM_LINKUP:
 		bfa_fcport_update_linkinfo(fcport);
 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
-		bfa_assert(fcport->event_cbfn);
+		WARN_ON(!fcport->event_cbfn);
 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
@@ -2803,12 +2724,6 @@
 	}
 }
 
-
-
-/*
- *  hal_port_private
- */
-
 static void
 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
 {
@@ -2839,7 +2754,7 @@
 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
 		break;
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -2906,7 +2821,7 @@
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
 	struct bfa_fcport_ln_s *ln = &fcport->ln;
-	struct bfa_timeval_s tv;
+	struct timeval tv;
 
 	memset(fcport, 0, sizeof(struct bfa_fcport_s));
 	fcport->bfa = bfa;
@@ -2920,7 +2835,7 @@
 	/*
 	 * initialize time stamp for stats reset
 	 */
-	bfa_os_gettimeofday(&tv);
+	do_gettimeofday(&tv);
 	fcport->stats_reset_time = tv.tv_sec;
 
 	/*
@@ -3039,6 +2954,7 @@
 	m->port_cfg = fcport->cfg;
 	m->msgtag = fcport->msgtag;
 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
+	 m->use_flash_cfg = fcport->use_flash_cfg;
 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
@@ -3089,8 +3005,8 @@
 static void
 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
 {
-	fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
-	fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
+	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
+	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
 
 	bfa_trc(fcport->bfa, fcport->pwwn);
 	bfa_trc(fcport->bfa, fcport->nwwn);
@@ -3127,7 +3043,7 @@
 	struct bfa_qos_stats_s *s)
 {
 	u32	*dip = (u32 *) d;
-	u32	*sip = (u32 *) s;
+	__be32	*sip = (__be32 *) s;
 	int		i;
 
 	/* Now swap the 32 bit fields */
@@ -3140,12 +3056,12 @@
 	struct bfa_fcoe_stats_s *s)
 {
 	u32	*dip = (u32 *) d;
-	u32	*sip = (u32 *) s;
+	__be32	*sip = (__be32 *) s;
 	int		i;
 
 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
 	     i = i + 2) {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 		dip[i] = be32_to_cpu(sip[i]);
 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
 #else
@@ -3162,7 +3078,7 @@
 
 	if (complete) {
 		if (fcport->stats_status == BFA_STATUS_OK) {
-			struct bfa_timeval_s tv;
+			struct timeval tv;
 
 			/* Swap FC QoS or FCoE stats */
 			if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
@@ -3174,7 +3090,7 @@
 					&fcport->stats_ret->fcoe,
 					&fcport->stats->fcoe);
 
-				bfa_os_gettimeofday(&tv);
+				do_gettimeofday(&tv);
 				fcport->stats_ret->fcoe.secs_reset =
 					tv.tv_sec - fcport->stats_reset_time;
 			}
@@ -3233,12 +3149,12 @@
 	struct bfa_fcport_s *fcport = cbarg;
 
 	if (complete) {
-		struct bfa_timeval_s tv;
+		struct timeval tv;
 
 		/*
 		 * re-initialize time stamp for stats reset
 		 */
-		bfa_os_gettimeofday(&tv);
+		do_gettimeofday(&tv);
 		fcport->stats_reset_time = tv.tv_sec;
 
 		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
@@ -3303,8 +3219,8 @@
 	int link_bm = 0;
 
 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
-	bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
-		   scn->trunk_state == BFA_TRUNK_OFFLINE);
+	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
+		   scn->trunk_state != BFA_TRUNK_OFFLINE);
 
 	bfa_trc(fcport->bfa, trunk->attr.state);
 	bfa_trc(fcport->bfa, scn->trunk_state);
@@ -3396,12 +3312,6 @@
 	}
 }
 
-
-
-/*
- *  hal_port_public
- */
-
 /*
  * Called to initialize port attributes
  */
@@ -3419,9 +3329,9 @@
 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
 
-	bfa_assert(fcport->cfg.maxfrsize);
-	bfa_assert(fcport->cfg.rx_bbcredit);
-	bfa_assert(fcport->speed_sup);
+	WARN_ON(!fcport->cfg.maxfrsize);
+	WARN_ON(!fcport->cfg.rx_bbcredit);
+	WARN_ON(!fcport->speed_sup);
 }
 
 /*
@@ -3441,8 +3351,28 @@
 
 	switch (msg->mhdr.msg_id) {
 	case BFI_FCPORT_I2H_ENABLE_RSP:
-		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
+
+			if (fcport->use_flash_cfg) {
+				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
+				fcport->cfg.maxfrsize =
+					cpu_to_be16(fcport->cfg.maxfrsize);
+				fcport->cfg.path_tov =
+					cpu_to_be16(fcport->cfg.path_tov);
+				fcport->cfg.q_depth =
+					cpu_to_be16(fcport->cfg.q_depth);
+
+				if (fcport->cfg.trunked)
+					fcport->trunk.attr.state =
+						BFA_TRUNK_OFFLINE;
+				else
+					fcport->trunk.attr.state =
+						BFA_TRUNK_DISABLED;
+				fcport->use_flash_cfg = BFA_FALSE;
+			}
+
 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
+		}
 		break;
 
 	case BFI_FCPORT_I2H_DISABLE_RSP:
@@ -3498,17 +3428,11 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 	break;
 	}
 }
 
-
-
-/*
- *  hal_port_api
- */
-
 /*
  * Registered callback for port events.
  */
@@ -3732,8 +3656,8 @@
 	attr->nwwn = fcport->nwwn;
 	attr->pwwn = fcport->pwwn;
 
-	attr->factorypwwn =  bfa_ioc_get_mfg_pwwn(&bfa->ioc);
-	attr->factorynwwn =  bfa_ioc_get_mfg_nwwn(&bfa->ioc);
+	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
+	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
 
 	memcpy(&attr->pport_cfg, &fcport->cfg,
 		sizeof(struct bfa_port_cfg_s));
@@ -3751,7 +3675,7 @@
 	/* beacon attributes */
 	attr->beacon = fcport->beacon;
 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
-	attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
+	attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
 	attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
 
 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
@@ -3818,89 +3742,6 @@
 	return BFA_STATUS_OK;
 }
 
-/*
- * Fetch FCQoS port statistics
- */
-bfa_status_t
-bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
-	bfa_cb_port_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FC mode */
-	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
-}
-
-/*
- * Reset FCoE port statistics
- */
-bfa_status_t
-bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FC mode */
-	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
-}
-
-/*
- * Fetch FCQoS port statistics
- */
-bfa_status_t
-bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
-	bfa_cb_port_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FCoE mode */
-	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
-}
-
-/*
- * Reset FCoE port statistics
- */
-bfa_status_t
-bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FCoE mode */
-	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
-}
-
-void
-bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	qos_attr->state = fcport->qos_attr.state;
-	qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
-}
-
-void
-bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
-	struct bfa_qos_vc_attr_s *qos_vc_attr)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
-	u32 i = 0;
-
-	qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
-	qos_vc_attr->shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
-	qos_vc_attr->elp_opmode_flags  =
-			be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
-
-	/* Individual VC info */
-	while (i < qos_vc_attr->total_vc_count) {
-		qos_vc_attr->vc_info[i].vc_credit	=
-				bfa_vc_attr->vc_info[i].vc_credit;
-		qos_vc_attr->vc_info[i].borrow_credit	=
-				bfa_vc_attr->vc_info[i].borrow_credit;
-		qos_vc_attr->vc_info[i].priority	=
-				bfa_vc_attr->vc_info[i].priority;
-		++i;
-	}
-}
 
 /*
  * Fetch port attributes.
@@ -3924,60 +3765,6 @@
 
 }
 
-void
-bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
-
-	bfa_trc(bfa, on_off);
-	bfa_trc(bfa, fcport->cfg.qos_enabled);
-
-	bfa_trc(bfa, ioc_type);
-
-	if (ioc_type == BFA_IOC_TYPE_FC) {
-		fcport->cfg.qos_enabled = on_off;
-		/*
-		 * Notify fcpim of the change in QoS state
-		 */
-		bfa_fcpim_update_ioredirect(bfa);
-	}
-}
-
-void
-bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, on_off);
-	bfa_trc(bfa, fcport->cfg.ratelimit);
-
-	fcport->cfg.ratelimit = on_off;
-	if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
-		fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
-}
-
-/*
- * Configure default minimum ratelim speed
- */
-bfa_status_t
-bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, speed);
-
-	/* Auto and speeds greater than the supported speed, are invalid */
-	if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
-		bfa_trc(bfa, fcport->speed_sup);
-		return BFA_STATUS_UNSUPP_SPEED;
-	}
-
-	fcport->cfg.trl_def_speed = speed;
-
-	return BFA_STATUS_OK;
-}
-
 /*
  * Get default minimum ratelim speed
  */
@@ -3990,32 +3777,6 @@
 	return fcport->cfg.trl_def_speed;
 
 }
-void
-bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, status);
-	bfa_trc(bfa, fcport->diag_busy);
-
-	fcport->diag_busy = status;
-}
-
-void
-bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
-	bfa_boolean_t link_e2e_beacon)
-{
-	struct bfa_s *bfa = dev;
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, beacon);
-	bfa_trc(bfa, link_e2e_beacon);
-	bfa_trc(bfa, fcport->beacon);
-	bfa_trc(bfa, fcport->link_e2e_beacon);
-
-	fcport->beacon = beacon;
-	fcport->link_e2e_beacon = link_e2e_beacon;
-}
 
 bfa_boolean_t
 bfa_fcport_is_linkup(struct bfa_s *bfa)
@@ -4036,63 +3797,6 @@
 	return fcport->cfg.qos_enabled;
 }
 
-bfa_status_t
-bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
-
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
-
-	bfa_trc(bfa, fcport->cfg.trunked);
-	bfa_trc(bfa, trunk->attr.state);
-	*attr = trunk->attr;
-	attr->port_id = bfa_lps_get_base_pid(bfa);
-
-	return BFA_STATUS_OK;
-}
-
-void
-bfa_trunk_enable_cfg(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
-
-	bfa_trc(bfa, 1);
-	trunk->attr.state = BFA_TRUNK_OFFLINE;
-	fcport->cfg.trunked = BFA_TRUE;
-}
-
-bfa_status_t
-bfa_trunk_enable(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
-
-	bfa_trc(bfa, 1);
-
-	trunk->attr.state   = BFA_TRUNK_OFFLINE;
-	bfa_fcport_disable(bfa);
-	fcport->cfg.trunked = BFA_TRUE;
-	bfa_fcport_enable(bfa);
-
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_trunk_disable(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
-
-	bfa_trc(bfa, 0);
-	trunk->attr.state   = BFA_TRUNK_DISABLED;
-	bfa_fcport_disable(bfa);
-	fcport->cfg.trunked = BFA_FALSE;
-	bfa_fcport_enable(bfa);
-	return BFA_STATUS_OK;
-}
-
-
 /*
  * Rport State machine functions
  */
@@ -4606,8 +4310,8 @@
 	mod->rps_list = rp;
 	mod->num_rports = cfg->fwcfg.num_rports;
 
-	bfa_assert(mod->num_rports &&
-		   !(mod->num_rports & (mod->num_rports - 1)));
+	WARN_ON(!mod->num_rports ||
+		   (mod->num_rports & (mod->num_rports - 1)));
 
 	for (i = 0; i < mod->num_rports; i++, rp++) {
 		memset(rp, 0, sizeof(struct bfa_rport_s));
@@ -4675,7 +4379,7 @@
 {
 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
 
-	bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
+	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
 	list_del(&rport->qe);
 	list_add_tail(&rport->qe, &mod->rp_free_q);
 }
@@ -4788,13 +4492,13 @@
 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
 		rp->fw_handle = msg.create_rsp->fw_handle;
 		rp->qos_attr = msg.create_rsp->qos_attr;
-		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
+		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
 		break;
 
 	case BFI_RPORT_I2H_DELETE_RSP:
 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
-		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
+		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
 		break;
 
@@ -4806,7 +4510,7 @@
 
 	default:
 		bfa_trc(bfa, m->mhdr.msg_id);
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
@@ -4828,24 +4532,18 @@
 
 	rp->bfa = bfa;
 	rp->rport_drv = rport_drv;
-	bfa_rport_clear_stats(rp);
+	memset(&rp->stats, 0, sizeof(rp->stats));
 
-	bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
+	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
 
 	return rp;
 }
 
 void
-bfa_rport_delete(struct bfa_rport_s *rport)
-{
-	bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
-}
-
-void
 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
 {
-	bfa_assert(rport_info->max_frmsz != 0);
+	WARN_ON(rport_info->max_frmsz == 0);
 
 	/*
 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
@@ -4861,43 +4559,15 @@
 }
 
 void
-bfa_rport_offline(struct bfa_rport_s *rport)
-{
-	bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
-}
-
-void
 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
 {
-	bfa_assert(speed != 0);
-	bfa_assert(speed != BFA_PORT_SPEED_AUTO);
+	WARN_ON(speed == 0);
+	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
 
 	rport->rport_info.speed = speed;
 	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
 }
 
-void
-bfa_rport_get_stats(struct bfa_rport_s *rport,
-	struct bfa_rport_hal_stats_s *stats)
-{
-	*stats = rport->stats;
-}
-
-void
-bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
-					struct bfa_rport_qos_attr_s *qos_attr)
-{
-	qos_attr->qos_priority  = rport->qos_attr.qos_priority;
-	qos_attr->qos_flow_id  = be32_to_cpu(rport->qos_attr.qos_flow_id);
-
-}
-
-void
-bfa_rport_clear_stats(struct bfa_rport_s *rport)
-{
-	memset(&rport->stats, 0, sizeof(rport->stats));
-}
-
 
 /*
  * SGPG related functions
@@ -4952,7 +4622,7 @@
 	sgpg_pa.pa = mod->sgpg_arr_pa;
 	mod->free_sgpgs = mod->num_sgpgs;
 
-	bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
+	WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1));
 
 	for (i = 0; i < mod->num_sgpgs; i++) {
 		memset(hsgpg, 0, sizeof(*hsgpg));
@@ -4993,12 +4663,6 @@
 {
 }
 
-
-
-/*
- *  hal_sgpg_public BFA SGPG public functions
- */
-
 bfa_status_t
 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
 {
@@ -5006,14 +4670,12 @@
 	struct bfa_sgpg_s *hsgpg;
 	int i;
 
-	bfa_trc_fp(bfa, nsgpgs);
-
 	if (mod->free_sgpgs < nsgpgs)
 		return BFA_STATUS_ENOMEM;
 
 	for (i = 0; i < nsgpgs; i++) {
 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
-		bfa_assert(hsgpg);
+		WARN_ON(!hsgpg);
 		list_add_tail(&hsgpg->qe, sgpg_q);
 	}
 
@@ -5027,10 +4689,8 @@
 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
 	struct bfa_sgpg_wqe_s *wqe;
 
-	bfa_trc_fp(bfa, nsgpg);
-
 	mod->free_sgpgs += nsgpg;
-	bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
+	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
 
 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
 
@@ -5060,8 +4720,8 @@
 {
 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
 
-	bfa_assert(nsgpg > 0);
-	bfa_assert(nsgpg > mod->free_sgpgs);
+	WARN_ON(nsgpg <= 0);
+	WARN_ON(nsgpg <= mod->free_sgpgs);
 
 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
 
@@ -5072,7 +4732,7 @@
 		/*
 		 * no one else is waiting for SGPG
 		 */
-		bfa_assert(list_empty(&mod->sgpg_wait_q));
+		WARN_ON(!list_empty(&mod->sgpg_wait_q));
 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
 		wqe->nsgpg -= mod->free_sgpgs;
 		mod->free_sgpgs = 0;
@@ -5086,7 +4746,7 @@
 {
 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
 
-	bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
+	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
 	list_del(&wqe->qe);
 
 	if (wqe->nsgpg_total != wqe->nsgpg)
@@ -5318,7 +4978,7 @@
 	uf->data_ptr = buf;
 	uf->data_len = m->xfr_len;
 
-	bfa_assert(uf->data_len >= sizeof(struct fchs_s));
+	WARN_ON(uf->data_len < sizeof(struct fchs_s));
 
 	if (uf->data_len == sizeof(struct fchs_s)) {
 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
@@ -5361,12 +5021,6 @@
 	bfa_uf_post_all(BFA_UF_MOD(bfa));
 }
 
-
-
-/*
- *  hal_uf_api
- */
-
 /*
  * Register handler for all unsolicted recieve frames.
  *
@@ -5414,7 +5068,7 @@
 
 	default:
 		bfa_trc(bfa, msg->mhdr.msg_id);
-		bfa_assert(0);
+		WARN_ON(1);
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index e2349d5..331ad99 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -220,6 +220,18 @@
 /*
  * RPORT related defines
  */
+enum bfa_rport_event {
+	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event          */
+	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport  */
+	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online             */
+	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline            */
+	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response           */
+	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure             */
+	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware       */
+	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed             */
+	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue      */
+};
+
 #define BFA_RPORT_MIN	4
 
 struct bfa_rport_mod_s {
@@ -432,6 +444,7 @@
 	u8			myalpa;	/*  my ALPA in LOOP topology */
 	u8			rsvd[3];
 	struct bfa_port_cfg_s	cfg;	/*  current port configuration */
+	bfa_boolean_t		use_flash_cfg; /* get port cfg from flash */
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
 	struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
 	struct bfa_reqq_wait_s	reqq_wait;
@@ -500,30 +513,9 @@
 			void (*event_cbfn) (void *cbarg,
 			enum bfa_port_linkstate event), void *event_cbarg);
 bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
-void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
-void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
-bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
-					  enum bfa_port_speed speed);
 enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
 void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
-void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
-void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
-		       bfa_boolean_t link_e2e_beacon);
-void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
-			     struct bfa_qos_attr_s *qos_attr);
-void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
-				struct bfa_qos_vc_attr_s *qos_vc_attr);
-bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
-				      union bfa_fcport_stats_u *stats,
-				      bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-					void *cbarg);
-bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
-				       union bfa_fcport_stats_u *stats,
-				       bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
-					 void *cbarg);
 bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
 bfa_boolean_t	bfa_fcport_is_linkup(struct bfa_s *bfa);
 bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
@@ -537,14 +529,9 @@
  * bfa rport API functions
  */
 struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
-void bfa_rport_delete(struct bfa_rport_s *rport);
 void bfa_rport_online(struct bfa_rport_s *rport,
 		      struct bfa_rport_info_s *rport_info);
-void bfa_rport_offline(struct bfa_rport_s *rport);
 void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
-void bfa_rport_get_stats(struct bfa_rport_s *rport,
-			 struct bfa_rport_hal_stats_s *stats);
-void bfa_rport_clear_stats(struct bfa_rport_s *rport);
 void bfa_cb_rport_online(void *rport);
 void bfa_cb_rport_offline(void *rport);
 void bfa_cb_rport_qos_scn_flowid(void *rport,
@@ -553,8 +540,6 @@
 void bfa_cb_rport_qos_scn_prio(void *rport,
 			       struct bfa_rport_qos_attr_s old_qos_attr,
 			       struct bfa_rport_qos_attr_s new_qos_attr);
-void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
-			    struct bfa_rport_qos_attr_s *qos_attr);
 
 /*
  * bfa fcxp API functions
@@ -619,38 +604,18 @@
 u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
 struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
 void bfa_lps_delete(struct bfa_lps_s *lps);
-void bfa_lps_discard(struct bfa_lps_s *lps);
 void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
 		   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
 		   bfa_boolean_t auth_en);
 void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
 		   wwn_t pwwn, wwn_t nwwn);
-void bfa_lps_flogo(struct bfa_lps_s *lps);
 void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
-u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
-bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
-bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
-u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
+void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
 u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
 u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
-u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
-wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
-wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
-u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
-u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
-mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
-void bfa_trunk_enable_cfg(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
-bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
-		struct bfa_trunk_attr_s *attr);
-
 #endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6797720..44524cf 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -32,7 +32,6 @@
 #include "bfad_drv.h"
 #include "bfad_im.h"
 #include "bfa_fcs.h"
-#include "bfa_os_inc.h"
 #include "bfa_defs.h"
 #include "bfa.h"
 
@@ -61,12 +60,12 @@
 u32	bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
 u32     *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
 
-const char *msix_name_ct[] = {
+static const char *msix_name_ct[] = {
 	"cpe0", "cpe1", "cpe2", "cpe3",
 	"rme0", "rme1", "rme2", "rme3",
 	"ctrl" };
 
-const char *msix_name_cb[] = {
+static const char *msix_name_cb[] = {
 	"cpe0", "cpe1", "cpe2", "cpe3",
 	"rme0", "rme1", "rme2", "rme3",
 	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
@@ -206,7 +205,7 @@
 		}
 
 		spin_lock_irqsave(&bfad->bfad_lock, flags);
-		bfa_init(&bfad->bfa);
+		bfa_iocfc_init(&bfad->bfa);
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 		/* Set up interrupt handler for each vectors */
@@ -533,7 +532,7 @@
 					(dma_addr_t) meminfo_elem->dma);
 				break;
 			default:
-				bfa_assert(0);
+				WARN_ON(1);
 				break;
 			}
 		}
@@ -725,7 +724,7 @@
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	bfa_timer_tick(&bfad->bfa);
+	bfa_timer_beat(&bfad->bfa.timer_mod);
 
 	bfa_comp_deq(&bfad->bfa, &doneq);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -882,8 +881,8 @@
 		goto out_hal_mem_alloc_failure;
 	}
 
-	bfa_init_trc(&bfad->bfa, bfad->trcmod);
-	bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
+	bfad->bfa.trcmod = bfad->trcmod;
+	bfad->bfa.plog = &bfad->plog_buf;
 	bfa_plog_init(&bfad->plog_buf);
 	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
 		     0, "Driver Attach");
@@ -893,9 +892,9 @@
 
 	/* FCS INIT */
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
+	bfad->bfa_fcs.trcmod = bfad->trcmod;
 	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
-	bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
+	bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -913,7 +912,7 @@
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	init_completion(&bfad->comp);
-	bfa_stop(&bfad->bfa);
+	bfa_iocfc_stop(&bfad->bfa);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	wait_for_completion(&bfad->comp);
 
@@ -932,8 +931,8 @@
 	unsigned long	flags;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_start(&bfad->bfa);
-	bfa_fcs_start(&bfad->bfa_fcs);
+	bfa_iocfc_start(&bfad->bfa);
+	bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
 	bfad->bfad_flags |= BFAD_HAL_START_DONE;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -963,7 +962,7 @@
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	init_completion(&bfad->comp);
-	bfa_stop(&bfad->bfa);
+	bfa_iocfc_stop(&bfad->bfa);
 	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	wait_for_completion(&bfad->comp);
@@ -1102,15 +1101,15 @@
 
 	/*
 	 * If bfa_linkup_delay is set to -1 default; try to retrive the
-	 * value using the bfad_os_get_linkup_delay(); else use the
+	 * value using the bfad_get_linkup_delay(); else use the
 	 * passed in module param value as the bfa_linkup_delay.
 	 */
 	if (bfa_linkup_delay < 0) {
-		bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
-		bfad_os_rport_online_wait(bfad);
+		bfa_linkup_delay = bfad_get_linkup_delay(bfad);
+		bfad_rport_online_wait(bfad);
 		bfa_linkup_delay = -1;
 	} else
-		bfad_os_rport_online_wait(bfad);
+		bfad_rport_online_wait(bfad);
 
 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
 
@@ -1167,7 +1166,6 @@
 		spin_lock_irqsave(&bfad->bfad_lock, flags);
 		bfa_comp_free(&bfad->bfa, &doneq);
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		bfa_trc_fp(bfad, irq);
 	}
 
 	return IRQ_HANDLED;
@@ -1524,7 +1522,7 @@
 	if (strcmp(FCPI_NAME, " fcpim") == 0)
 		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
 
-	bfa_ioc_auto_recover(ioc_auto_recover);
+	bfa_auto_recover = ioc_auto_recover;
 	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
 
 	error = pci_register_driver(&bfad_pci_driver);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index ed9fff4..a94ea42 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -25,7 +25,7 @@
 /*
  * FC transport template entry, get SCSI target port ID.
  */
-void
+static void
 bfad_im_get_starget_port_id(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -40,7 +40,7 @@
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
 
@@ -51,7 +51,7 @@
 /*
  * FC transport template entry, get SCSI target nwwn.
  */
-void
+static void
 bfad_im_get_starget_node_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -66,7 +66,7 @@
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
 
@@ -77,7 +77,7 @@
 /*
  * FC transport template entry, get SCSI target pwwn.
  */
-void
+static void
 bfad_im_get_starget_port_name(struct scsi_target *starget)
 {
 	struct Scsi_Host *shost;
@@ -92,7 +92,7 @@
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 
-	itnim = bfad_os_get_itnim(im_port, starget->id);
+	itnim = bfad_get_itnim(im_port, starget->id);
 	if (itnim)
 		port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
 
@@ -103,7 +103,7 @@
 /*
  * FC transport template entry, get SCSI host port ID.
  */
-void
+static void
 bfad_im_get_host_port_id(struct Scsi_Host *shost)
 {
 	struct bfad_im_port_s *im_port =
@@ -111,7 +111,7 @@
 	struct bfad_port_s    *port = im_port->port;
 
 	fc_host_port_id(shost) =
-			bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
+			bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
 }
 
 /*
@@ -487,7 +487,7 @@
 	wait_for_completion(vport->comp_del);
 
 free_scsi_host:
-	bfad_os_scsi_host_free(bfad, im_port);
+	bfad_scsi_host_free(bfad, im_port);
 
 	kfree(vport);
 
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 1fedeeb..c66e32e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -90,7 +90,7 @@
 	memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_debug_fwtrc(&bfad->bfa,
+	rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
 			fw_debug->debug_buffer,
 			&fw_debug->buffer_len);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -134,7 +134,7 @@
 	memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_debug_fwsave(&bfad->bfa,
+	rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
 			fw_debug->debug_buffer,
 			&fw_debug->buffer_len);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -208,7 +208,7 @@
 	if (!debug || !debug->debug_buffer)
 		return 0;
 
-	return memory_read_from_buffer(buf, nbytes, pos,
+	return simple_read_from_buffer(buf, nbytes, pos,
 				debug->debug_buffer, debug->buffer_len);
 }
 
@@ -254,7 +254,7 @@
 	if (!bfad->regdata)
 		return 0;
 
-	rc = memory_read_from_buffer(buf, nbytes, pos,
+	rc = simple_read_from_buffer(buf, nbytes, pos,
 			bfad->regdata, bfad->reglen);
 
 	if ((*pos + nbytes) >= bfad->reglen) {
@@ -279,15 +279,31 @@
 	u32 *regbuf;
 	void __iomem *rb, *reg_addr;
 	unsigned long flags;
+	void *kern_buf;
 
-	rc = sscanf(buf, "%x:%x", &addr, &len);
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+
+	if (!kern_buf) {
+		printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
 	if (rc < 2) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
+		kfree(kern_buf);
 		return -EINVAL;
 	}
 
+	kfree(kern_buf);
 	kfree(bfad->regdata);
 	bfad->regdata = NULL;
 	bfad->reglen = 0;
@@ -339,14 +355,30 @@
 	int addr, val, rc;
 	void __iomem *reg_addr;
 	unsigned long flags;
+	void *kern_buf;
 
-	rc = sscanf(buf, "%x:%x", &addr, &val);
+	kern_buf = kzalloc(nbytes, GFP_KERNEL);
+
+	if (!kern_buf) {
+		printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n",
+				bfad->inst_no);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(kern_buf, (void  __user *)buf, nbytes)) {
+		kfree(kern_buf);
+		return -ENOMEM;
+	}
+
+	rc = sscanf(kern_buf, "%x:%x", &addr, &val);
 	if (rc < 2) {
 		printk(KERN_INFO
 			"bfad[%d]: %s failed to read user buf\n",
 			bfad->inst_no, __func__);
+		kfree(kern_buf);
 		return -EINVAL;
 	}
+	kfree(kern_buf);
 
 	addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
 
@@ -359,7 +391,7 @@
 		return -EINVAL;
 	}
 
-	reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
+	reg_addr = (bfa_ioc_bar0(ioc)) + addr;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	writel(val, reg_addr);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index d5ce234..7f9ea90 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -26,7 +26,23 @@
 #ifndef __BFAD_DRV_H__
 #define __BFAD_DRV_H__
 
-#include "bfa_os_inc.h"
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -39,7 +55,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "2.3.2.0"
+#define BFAD_DRIVER_VERSION    "2.3.2.3"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
@@ -263,28 +279,21 @@
  */
 #define nextLowerInt(x)                         \
 do {                                            \
-	int i;                                  \
+	int __i;                                  \
 	(*x)--;					\
-	for (i = 1; i < (sizeof(int)*8); i <<= 1) \
-		(*x) = (*x) | (*x) >> i;	\
+	for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
+		(*x) = (*x) | (*x) >> __i;	\
 	(*x)++;					\
 	(*x) = (*x) >> 1;			\
 } while (0)
 
 
-#define list_remove_head(list, entry, type, member)		\
-do {								\
-	entry = NULL;                                           \
-	if (!list_empty(list)) {                                \
-		entry = list_entry((list)->next, type, member);	\
-		list_del_init(&entry->member);			\
-	}							\
+#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
+do {									\
+	if (((mask) == 4) || (level[1] <= '4'))				\
+		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
 } while (0)
 
-#define list_get_first(list, type, member)				\
-((list_empty(list)) ? NULL :						\
-	list_entry((list)->next, type, member))
-
 bfa_status_t	bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
 				  struct bfa_lport_cfg_s *port_cfg,
 				  struct device *dev);
@@ -316,8 +325,8 @@
 
 void bfad_pci_remove(struct pci_dev *pdev);
 int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
-void bfad_os_rport_online_wait(struct bfad_s *bfad);
-int bfad_os_get_linkup_delay(struct bfad_s *bfad);
+void bfad_rport_online_wait(struct bfad_s *bfad);
+int bfad_get_linkup_delay(struct bfad_s *bfad);
 int bfad_install_msix_handler(struct bfad_s *bfad);
 
 extern struct idr bfad_im_port_index;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index fbad5e9..c2b3617 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -21,7 +21,6 @@
 
 #include "bfad_drv.h"
 #include "bfad_im.h"
-#include "bfa_cb_ioim.h"
 #include "bfa_fcs.h"
 
 BFA_TRC_FILE(LDRV, IM);
@@ -93,10 +92,10 @@
 		if (!cmnd->result && itnim &&
 			 (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
 			/* Queue depth adjustment for good status completion */
-			bfad_os_ramp_up_qdepth(itnim, cmnd->device);
+			bfad_ramp_up_qdepth(itnim, cmnd->device);
 		} else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
 			/* qfull handling */
-			bfad_os_handle_qfull(itnim, cmnd->device);
+			bfad_handle_qfull(itnim, cmnd->device);
 		}
 	}
 
@@ -124,7 +123,7 @@
 		if (itnim_data) {
 			itnim = itnim_data->itnim;
 			if (itnim)
-				bfad_os_ramp_up_qdepth(itnim, cmnd->device);
+				bfad_ramp_up_qdepth(itnim, cmnd->device);
 		}
 	}
 
@@ -183,7 +182,7 @@
 	bfa_get_adapter_model(bfa, model);
 
 	memset(bfa_buf, 0, sizeof(bfa_buf));
-	if (ioc->ctdev)
+	if (ioc->ctdev && !ioc->fcmode)
 		snprintf(bfa_buf, sizeof(bfa_buf),
 		"Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
 		 model, bfad->pci_name, BFAD_DRIVER_VERSION);
@@ -258,6 +257,7 @@
 	struct bfa_tskim_s *tskim;
 	struct bfa_itnim_s *bfa_itnim;
 	bfa_status_t    rc = BFA_STATUS_OK;
+	struct scsi_lun scsilun;
 
 	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
 	if (!tskim) {
@@ -274,7 +274,8 @@
 	cmnd->host_scribble = NULL;
 	cmnd->SCp.Status = 0;
 	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
-	bfa_tskim_start(tskim, bfa_itnim, (lun_t)0,
+	memset(&scsilun, 0, sizeof(scsilun));
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
 			    FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
 out:
 	return rc;
@@ -301,6 +302,7 @@
 	int             rc = SUCCESS;
 	unsigned long   flags;
 	enum bfi_tskim_status task_status;
+	struct scsi_lun scsilun;
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	itnim = itnim_data->itnim;
@@ -327,8 +329,8 @@
 	cmnd->SCp.ptr = (char *)&wq;
 	cmnd->SCp.Status = 0;
 	bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
-	bfa_tskim_start(tskim, bfa_itnim,
-			    bfad_int_to_lun(cmnd->device->lun),
+	int_to_scsilun(cmnd->device->lun, &scsilun);
+	bfa_tskim_start(tskim, bfa_itnim, scsilun,
 			    FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -364,7 +366,7 @@
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	for (i = 0; i < MAX_FCP_TARGET; i++) {
-		itnim = bfad_os_get_itnim(im_port, i);
+		itnim = bfad_get_itnim(im_port, i);
 		if (itnim) {
 			cmnd->SCp.ptr = (char *)&wq;
 			rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
@@ -447,7 +449,7 @@
 	struct bfad_im_s	*im = itnim_drv->im;
 
 	/* online to free state transtion should not happen */
-	bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
+	WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE);
 
 	itnim_drv->queue_work = 1;
 	/* offline request is not yet done, use the same request to free */
@@ -545,7 +547,7 @@
 
 	mutex_unlock(&bfad_mutex);
 
-	im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
+	im_port->shost = bfad_scsi_host_alloc(im_port, bfad);
 	if (!im_port->shost) {
 		error = 1;
 		goto out_free_idr;
@@ -571,7 +573,7 @@
 	}
 
 	/* setup host fixed attribute if the lk supports */
-	bfad_os_fc_host_init(im_port);
+	bfad_fc_host_init(im_port);
 
 	return 0;
 
@@ -662,7 +664,7 @@
 	}
 
 	/* the itnim_mapped_list must be empty at this time */
-	bfa_assert(list_empty(&im_port->itnim_mapped_list));
+	WARN_ON(!list_empty(&im_port->itnim_mapped_list));
 
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
@@ -682,7 +684,7 @@
 	bfad->im = im;
 	im->bfad = bfad;
 
-	if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) {
+	if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
 		kfree(im);
 		rc = BFA_STATUS_FAILED;
 	}
@@ -695,14 +697,14 @@
 bfad_im_probe_undo(struct bfad_s *bfad)
 {
 	if (bfad->im) {
-		bfad_os_destroy_workq(bfad->im);
+		bfad_destroy_workq(bfad->im);
 		kfree(bfad->im);
 		bfad->im = NULL;
 	}
 }
 
 struct Scsi_Host *
-bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
+bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 {
 	struct scsi_host_template *sht;
 
@@ -717,7 +719,7 @@
 }
 
 void
-bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
+bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
 {
 	if (!(im_port->flags & BFAD_PORT_DELETE))
 		flush_workqueue(bfad->im->drv_workq);
@@ -727,7 +729,7 @@
 }
 
 void
-bfad_os_destroy_workq(struct bfad_im_s *im)
+bfad_destroy_workq(struct bfad_im_s *im)
 {
 	if (im && im->drv_workq) {
 		flush_workqueue(im->drv_workq);
@@ -737,7 +739,7 @@
 }
 
 bfa_status_t
-bfad_os_thread_workq(struct bfad_s *bfad)
+bfad_thread_workq(struct bfad_s *bfad)
 {
 	struct bfad_im_s      *im = bfad->im;
 
@@ -841,7 +843,7 @@
 }
 
 void
-bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 {
 	struct scsi_device *tmp_sdev;
 
@@ -869,7 +871,7 @@
 }
 
 void
-bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
+bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 {
 	struct scsi_device *tmp_sdev;
 
@@ -883,7 +885,7 @@
 }
 
 struct bfad_itnim_s *
-bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
+bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
 {
 	struct bfad_itnim_s   *itnim = NULL;
 
@@ -922,7 +924,7 @@
 	if (!ioc_attr)
 		return 0;
 
-	bfa_get_attr(bfa, ioc_attr);
+	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
 	if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
 		if (ioc_attr->adapter_attr.is_mezz) {
 			supported_speed |= FC_PORTSPEED_8GBIT |
@@ -944,7 +946,7 @@
 }
 
 void
-bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
+bfad_fc_host_init(struct bfad_im_port_s *im_port)
 {
 	struct Scsi_Host *host = im_port->shost;
 	struct bfad_s         *bfad = im_port->bfad;
@@ -988,7 +990,7 @@
 	rport_ids.port_name =
 		cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
 	rport_ids.port_id =
-		bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
+		bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
 
 	itnim->fc_rport = fc_rport =
@@ -1109,7 +1111,7 @@
 		kfree(itnim);
 		break;
 	default:
-		bfa_assert(0);
+		WARN_ON(1);
 		break;
 	}
 
@@ -1172,7 +1174,6 @@
 	}
 
 	cmnd->host_scribble = (char *)hal_io;
-	bfa_trc_fp(bfad, hal_io->iotag);
 	bfa_ioim_start(hal_io);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
@@ -1190,7 +1191,7 @@
 static DEF_SCSI_QCMD(bfad_im_queuecommand)
 
 void
-bfad_os_rport_online_wait(struct bfad_s *bfad)
+bfad_rport_online_wait(struct bfad_s *bfad)
 {
 	int i;
 	int rport_delay = 10;
@@ -1218,7 +1219,7 @@
 }
 
 int
-bfad_os_get_linkup_delay(struct bfad_s *bfad)
+bfad_get_linkup_delay(struct bfad_s *bfad)
 {
 	u8		nwwns = 0;
 	wwn_t		wwns[BFA_PREBOOT_BOOTLUN_MAX];
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index b038c0e..bfee63b 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -117,17 +117,17 @@
 	char            drv_workq_name[KOBJ_NAME_LEN];
 };
 
-struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
+struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
 				struct bfad_s *);
-bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
-void bfad_os_destroy_workq(struct bfad_im_s *im);
-void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
-void bfad_os_scsi_host_free(struct bfad_s *bfad,
+bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
+void bfad_destroy_workq(struct bfad_im_s *im);
+void bfad_fc_host_init(struct bfad_im_port_s *im_port);
+void bfad_scsi_host_free(struct bfad_s *bfad,
 				 struct bfad_im_port_s *im_port);
-void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
+void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim,
 				 struct scsi_device *sdev);
-void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
-struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
+void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
+struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id);
 
 extern struct scsi_host_template bfad_im_scsi_host_template;
 extern struct scsi_host_template bfad_im_vport_template;
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 58796d1..72b69a0 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -95,8 +95,8 @@
  */
 union bfi_addr_u {
 	struct {
-		u32	addr_lo;
-		u32	addr_hi;
+		__be32	addr_lo;
+		__be32	addr_hi;
 	} a32;
 };
 
@@ -104,7 +104,7 @@
  * Scatter Gather Element
  */
 struct bfi_sge_s {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
 	u32	flags:2,
 			rsvd:2,
 			sg_len:28;
@@ -399,7 +399,7 @@
  */
 struct bfi_pbc_blun_s {
 	wwn_t		tgt_pwwn;
-	lun_t		tgt_lun;
+	struct scsi_lun	tgt_lun;
 };
 
 /*
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
index 6f03ed3..39ad42b 100644
--- a/drivers/scsi/bfa/bfi_cbreg.h
+++ b/drivers/scsi/bfa/bfi_cbreg.h
@@ -208,6 +208,7 @@
 #define BFA_IOC1_HBEAT_REG               HOST_SEM2_INFO_REG
 #define BFA_IOC1_STATE_REG               HOST_SEM3_INFO_REG
 #define BFA_FW_USE_COUNT                 HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		 HOST_SEM5_INFO_REG
 
 #define CPE_Q_DEPTH(__n) \
 	(CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
index 62b86a4..fc4ce4a 100644
--- a/drivers/scsi/bfa/bfi_ctreg.h
+++ b/drivers/scsi/bfa/bfi_ctreg.h
@@ -522,6 +522,7 @@
 #define BFA_IOC1_HBEAT_REG		HOST_SEM2_INFO_REG
 #define BFA_IOC1_STATE_REG		HOST_SEM3_INFO_REG
 #define BFA_FW_USE_COUNT		 HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC		HOST_SEM5_INFO_REG
 
 #define CPE_DEPTH_Q(__n) \
 	(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -539,22 +540,30 @@
 	(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
 #define RME_CI_PTR_Q(__n) \
 	(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
-	* (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
-	* (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
+#define HQM_QSET_RXQ_DRBL_P0(__n) \
+	(HQM_QSET0_RXQ_DRBL_P0 + (__n) *	\
+	(HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
+#define HQM_QSET_TXQ_DRBL_P0(__n) \
+	(HQM_QSET0_TXQ_DRBL_P0 + (__n) *	\
+	(HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
+#define HQM_QSET_IB_DRBL_1_P0(__n) \
+	(HQM_QSET0_IB_DRBL_1_P0 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
+#define HQM_QSET_IB_DRBL_2_P0(__n) \
+	(HQM_QSET0_IB_DRBL_2_P0 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
+#define HQM_QSET_RXQ_DRBL_P1(__n) \
+	(HQM_QSET0_RXQ_DRBL_P1 + (__n) *	\
+	(HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
+#define HQM_QSET_TXQ_DRBL_P1(__n) \
+	(HQM_QSET0_TXQ_DRBL_P1 + (__n) *	\
+	(HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
+#define HQM_QSET_IB_DRBL_1_P1(__n) \
+	(HQM_QSET0_IB_DRBL_1_P1 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
+#define HQM_QSET_IB_DRBL_2_P1(__n) \
+	(HQM_QSET0_IB_DRBL_2_P1 + (__n) *	\
+	(HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
 
 #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
 #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index fa9f6fb..19e888a 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -47,10 +47,10 @@
 	 */
 	union bfi_addr_u  req_cq_ba[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  req_shadow_ci[BFI_IOC_MAX_CQS];
-	u16    req_cq_elems[BFI_IOC_MAX_CQS];
+	__be16    req_cq_elems[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  rsp_cq_ba[BFI_IOC_MAX_CQS];
 	union bfi_addr_u  rsp_shadow_pi[BFI_IOC_MAX_CQS];
-	u16    rsp_cq_elems[BFI_IOC_MAX_CQS];
+	__be16    rsp_cq_elems[BFI_IOC_MAX_CQS];
 
 	union bfi_addr_u  stats_addr;	/*  DMA-able address for stats	  */
 	union bfi_addr_u  cfgrsp_addr;	/*  config response dma address  */
@@ -102,8 +102,8 @@
 	struct bfi_mhdr_s mh;		/*  common msg header		*/
 	u8		coalesce;	/*  enable intr coalescing	*/
 	u8		rsvd[3];
-	u16	delay;		/*  delay timer 0..1125us	*/
-	u16	latency;	/*  latency timer 0..225us	*/
+	__be16	delay;		/*  delay timer 0..1125us	*/
+	__be16	latency;	/*  latency timer 0..225us	*/
 };
 
 
@@ -188,7 +188,8 @@
 	struct bfi_mhdr_s  mh;		/*  common msg header		    */
 	u8		   status;	/*  port enable status		    */
 	u8		   rsvd[3];
-	u32	   msgtag;	/*  msgtag for reply		    */
+	struct	bfa_port_cfg_s port_cfg;/* port configuration	*/
+	u32	msgtag;			/* msgtag for reply	*/
 };
 
 /*
@@ -202,7 +203,8 @@
 	struct bfa_port_cfg_s port_cfg; /*  port configuration	    */
 	union bfi_addr_u   stats_dma_addr; /*  DMA address for stats	    */
 	u32	   msgtag;	/*  msgtag for reply		    */
-	u32	   rsvd2;
+	u8	use_flash_cfg;	/* get prot cfg from flash */
+	u8	rsvd2[3];
 };
 
 /*
@@ -210,7 +212,7 @@
  */
 struct bfi_fcport_set_svc_params_req_s {
 	struct bfi_mhdr_s  mh;		/*  msg header */
-	u16	   tx_bbcredit;	/*  Tx credits */
+	__be16	   tx_bbcredit;	/*  Tx credits */
 	u16	   rsvd;
 };
 
@@ -231,7 +233,7 @@
 	u8			state;		/* bfa_trunk_link_state_t */
 	u8			speed;		/* bfa_port_speed_t */
 	u8			rsvd;
-	u32		deskew;
+	__be32		deskew;
 };
 
 #define BFI_FCPORT_MAX_LINKS	2
@@ -284,17 +286,17 @@
  */
 struct bfi_fcxp_send_req_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
-	u16	fcxp_tag;	/*  driver request tag		    */
-	u16	max_frmsz;	/*  max send frame size	    */
-	u16	vf_id;		/*  vsan tag if applicable	    */
+	__be16	fcxp_tag;	/*  driver request tag		    */
+	__be16	max_frmsz;	/*  max send frame size	    */
+	__be16	vf_id;		/*  vsan tag if applicable	    */
 	u16	rport_fw_hndl;	/*  FW Handle for the remote port  */
 	u8	 class;		/*  FC class used for req/rsp	    */
 	u8	 rsp_timeout;	/*  timeout in secs, 0-no response */
 	u8	 cts;		/*  continue sequence		    */
 	u8	 lp_tag;	/*  lport tag			    */
 	struct fchs_s	fchs;	/*  request FC header structure    */
-	u32	req_len;	/*  request payload length	    */
-	u32	rsp_maxlen;	/*  max response length expected   */
+	__be32	req_len;	/*  request payload length	    */
+	__be32	rsp_maxlen;	/*  max response length expected   */
 	struct bfi_sge_s   req_sge[BFA_FCXP_MAX_SGES];	/*  request buf    */
 	struct bfi_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];	/*  response buf   */
 };
@@ -304,11 +306,11 @@
  */
 struct bfi_fcxp_send_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		    */
-	u16	fcxp_tag;	/*  send request tag		    */
+	__be16	fcxp_tag;	/*  send request tag		    */
 	u8	 req_status;	/*  request status		    */
 	u8	 rsvd;
-	u32	rsp_len;	/*  actual response length	    */
-	u32	residue_len;	/*  residual response length	    */
+	__be32	rsp_len;	/*  actual response length	    */
+	__be32	residue_len;	/*  residual response length	    */
 	struct fchs_s	fchs;	/*  response FC header structure   */
 };
 
@@ -325,7 +327,7 @@
 struct bfi_uf_buf_post_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		*/
 	u16	buf_tag;	/*  buffer tag			*/
-	u16	buf_len;	/*  total buffer length	*/
+	__be16	buf_len;	/*  total buffer length	*/
 	struct bfi_sge_s   sge[BFA_UF_MAX_SGES]; /*  buffer DMA SGEs	*/
 };
 
@@ -340,6 +342,7 @@
 enum bfi_lps_h2i_msgs {
 	BFI_LPS_H2I_LOGIN_REQ	= 1,
 	BFI_LPS_H2I_LOGOUT_REQ	= 2,
+	BFI_LPS_H2I_N2N_PID_REQ = 3,
 };
 
 enum bfi_lps_i2h_msgs {
@@ -352,7 +355,7 @@
 	struct bfi_mhdr_s  mh;		/*  common msg header		*/
 	u8		lp_tag;
 	u8		alpa;
-	u16	pdu_size;
+	__be16		pdu_size;
 	wwn_t		pwwn;
 	wwn_t		nwwn;
 	u8		fdisc;
@@ -368,7 +371,7 @@
 	u8		lsrjt_expl;
 	wwn_t		port_name;
 	wwn_t		node_name;
-	u16	bb_credit;
+	__be16		bb_credit;
 	u8		f_port;
 	u8		npiv_en;
 	u32	lp_pid:24;
@@ -399,10 +402,17 @@
 	u8		rsvd[3];
 };
 
+struct bfi_lps_n2n_pid_req_s {
+	struct bfi_mhdr_s	mh;	/*  common msg header		*/
+	u8	lp_tag;
+	u32	lp_pid:24;
+};
+
 union bfi_lps_h2i_msg_u {
 	struct bfi_mhdr_s		*msg;
 	struct bfi_lps_login_req_s	*login_req;
 	struct bfi_lps_logout_req_s	*logout_req;
+	struct bfi_lps_n2n_pid_req_s	*n2n_pid_req;
 };
 
 union bfi_lps_i2h_msg_u {
@@ -427,7 +437,7 @@
 struct bfi_rport_create_req_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		*/
 	u16	bfa_handle;	/*  host rport handle		*/
-	u16	max_frmsz;	/*  max rcv pdu size		*/
+	__be16	max_frmsz;	/*  max rcv pdu size		*/
 	u32	pid:24,	/*  remote port ID		*/
 		lp_tag:8;	/*  local port tag		*/
 	u32	local_pid:24,	/*  local port ID		*/
@@ -583,7 +593,7 @@
  */
 struct bfi_ioim_req_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
-	u16	io_tag;		/*  I/O tag			 */
+	__be16	io_tag;		/*  I/O tag			 */
 	u16	rport_hdl;	/*  itnim/rport firmware handle */
 	struct fcp_cmnd_s	cmnd;	/*  IO request info	*/
 
@@ -689,7 +699,7 @@
  */
 struct bfi_ioim_rsp_s {
 	struct bfi_mhdr_s	mh;	/*  common msg header		*/
-	u16	io_tag;		/*  completed IO tag		 */
+	__be16	io_tag;		/*  completed IO tag		 */
 	u16	bfa_rport_hndl;	/*  releated rport handle	 */
 	u8	io_status;	/*  IO completion status	 */
 	u8	reuse_io_tag;	/*  IO tag can be reused	*/
@@ -698,13 +708,13 @@
 	u8		sns_len;	/*  scsi sense length		 */
 	u8		resid_flags;	/*  IO residue flags		 */
 	u8		rsvd_a;
-	u32	residue;	/*  IO residual length in bytes */
+	__be32	residue;	/*  IO residual length in bytes */
 	u32	rsvd_b[3];
 };
 
 struct bfi_ioim_abort_req_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header  */
-	u16	io_tag;	/*  I/O tag	*/
+	__be16	io_tag;	/*  I/O tag	*/
 	u16	abort_tag;	/*  unique request tag */
 };
 
@@ -723,9 +733,9 @@
 
 struct bfi_tskim_req_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
-	u16	tsk_tag;	/*  task management tag	*/
+	__be16	tsk_tag;	/*  task management tag	*/
 	u16	itn_fhdl;	/*  itn firmware handle	*/
-	lun_t	lun;	/*  LU number	*/
+	struct 	scsi_lun lun;	/*  LU number	*/
 	u8	tm_flags;	/*  see enum fcp_tm_cmnd	*/
 	u8	t_secs;	/*  Timeout value in seconds	*/
 	u8	rsvd[2];
@@ -733,7 +743,7 @@
 
 struct bfi_tskim_abortreq_s {
 	struct bfi_mhdr_s  mh;	/*  Common msg header	*/
-	u16	tsk_tag;	/*  task management tag	*/
+	__be16	tsk_tag;	/*  task management tag	*/
 	u16	rsvd;
 };
 
@@ -755,7 +765,7 @@
 
 struct bfi_tskim_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  Common msg header		 */
-	u16	tsk_tag;	/*  task mgmt cmnd tag		 */
+	__be16	tsk_tag;	/*  task mgmt cmnd tag		 */
 	u8	tsk_status;	/*  @ref bfi_tskim_status */
 	u8	rsvd;
 };
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 1b6f86b..30e6bdb 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,12 +1,13 @@
 /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 #ifndef __57XX_ISCSI_CONSTANTS_H_
 #define __57XX_ISCSI_CONSTANTS_H_
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 36af1af..dad6c8a 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,12 +1,13 @@
 /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 #ifndef __57XX_ISCSI_HSI_LINUX_LE__
 #define __57XX_ISCSI_HSI_LINUX_LE__
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index a44b1b3..e1ca5fe 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
 /* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #ifndef _BNX2I_H_
@@ -649,6 +650,7 @@
 	EP_STATE_OFLD_FAILED            = 0x8000000,
 	EP_STATE_CONNECT_FAILED         = 0x10000000,
 	EP_STATE_DISCONN_TIMEDOUT       = 0x20000000,
+	EP_STATE_OFLD_FAILED_CID_BUSY   = 0x80000000,
 };
 
 /**
@@ -717,14 +719,11 @@
  * Function Prototypes
  */
 extern void bnx2i_identify_device(struct bnx2i_hba *hba);
-extern void bnx2i_register_device(struct bnx2i_hba *hba);
 
 extern void bnx2i_ulp_init(struct cnic_dev *dev);
 extern void bnx2i_ulp_exit(struct cnic_dev *dev);
 extern void bnx2i_start(void *handle);
 extern void bnx2i_stop(void *handle);
-extern void bnx2i_reg_dev_all(void);
-extern void bnx2i_unreg_dev_all(void);
 extern struct bnx2i_hba *get_adapter_list_head(void);
 
 struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
@@ -761,11 +760,11 @@
 				   struct iscsi_task *mtask);
 extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
 				       struct bnx2i_cmd *cmd);
-extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
-				     struct bnx2i_endpoint *ep);
-extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
-extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+extern int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
 				    struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern int bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+				   struct bnx2i_endpoint *ep);
 
 extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
 			       struct bnx2i_endpoint *ep);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 2f9622e..96505e3 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
 /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include <linux/gfp.h>
@@ -385,6 +386,7 @@
 	struct bnx2i_cmd *bnx2i_cmd;
 	struct bnx2i_tmf_request *tmfabort_wqe;
 	u32 dword;
+	u32 scsi_lun[2];
 
 	bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
 	tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -426,7 +428,10 @@
 	default:
 		tmfabort_wqe->ref_itt = RESERVED_ITT;
 	}
-	memcpy(tmfabort_wqe->lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
+	memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
+	tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
+	tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
+
 	tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
 
 	tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
@@ -697,10 +702,11 @@
  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
  * 	iscsi connection context clean-up process
  */
-void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 {
 	struct kwqe *kwqe_arr[2];
 	struct iscsi_kwqe_conn_destroy conn_cleanup;
+	int rc = -EINVAL;
 
 	memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
 
@@ -717,7 +723,9 @@
 
 	kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
 	if (hba->cnic && hba->cnic->submit_kwqes)
-		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+
+	return rc;
 }
 
 
@@ -728,8 +736,8 @@
  *
  * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
-static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
-					  struct bnx2i_endpoint *ep)
+static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					 struct bnx2i_endpoint *ep)
 {
 	struct kwqe *kwqe_arr[2];
 	struct iscsi_kwqe_conn_offload1 ofld_req1;
@@ -737,6 +745,7 @@
 	dma_addr_t dma_addr;
 	int num_kwqes = 2;
 	u32 *ptbl;
+	int rc = -EINVAL;
 
 	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
 	ofld_req1.hdr.flags =
@@ -774,7 +783,9 @@
 	ofld_req2.num_additional_wqes = 0;
 
 	if (hba->cnic && hba->cnic->submit_kwqes)
-		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
 }
 
 
@@ -785,8 +796,8 @@
  *
  * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
-static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
-					   struct bnx2i_endpoint *ep)
+static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+					  struct bnx2i_endpoint *ep)
 {
 	struct kwqe *kwqe_arr[5];
 	struct iscsi_kwqe_conn_offload1 ofld_req1;
@@ -795,6 +806,7 @@
 	dma_addr_t dma_addr;
 	int num_kwqes = 2;
 	u32 *ptbl;
+	int rc = -EINVAL;
 
 	ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
 	ofld_req1.hdr.flags =
@@ -840,7 +852,9 @@
 	num_kwqes += 1;
 
 	if (hba->cnic && hba->cnic->submit_kwqes)
-		hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+		rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+	return rc;
 }
 
 /**
@@ -851,12 +865,16 @@
  *
  * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
  */
-void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
 {
+	int rc;
+
 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
-		bnx2i_5771x_send_conn_ofld_req(hba, ep);
+		rc = bnx2i_5771x_send_conn_ofld_req(hba, ep);
 	else
-		bnx2i_570x_send_conn_ofld_req(hba, ep);
+		rc = bnx2i_570x_send_conn_ofld_req(hba, ep);
+
+	return rc;
 }
 
 
@@ -1513,7 +1531,7 @@
 	task = iscsi_itt_to_task(conn,
 				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
 	if (task)
-		iscsi_put_task(task);
+		__iscsi_put_task(task);
 	spin_unlock(&session->lock);
 }
 
@@ -1549,11 +1567,9 @@
 	struct iscsi_task *task;
 	struct bnx2i_nop_in_msg *nop_in;
 	struct iscsi_nopin *hdr;
-	u32 itt;
 	int tgt_async_nop = 0;
 
 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
-	itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
 
 	spin_lock(&session->lock);
 	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
@@ -1563,7 +1579,7 @@
 	hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
 	hdr->ttt = cpu_to_be32(nop_in->ttt);
 
-	if (itt == (u16) RESERVED_ITT) {
+	if (nop_in->itt == (u16) RESERVED_ITT) {
 		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
 		hdr->itt = RESERVED_ITT;
 		tgt_async_nop = 1;
@@ -1571,7 +1587,8 @@
 	}
 
 	/* this is a response to one of our nop-outs */
-	task = iscsi_itt_to_task(conn, itt);
+	task = iscsi_itt_to_task(conn,
+			 (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX));
 	if (task) {
 		hdr->flags = ISCSI_FLAG_CMD_FINAL;
 		hdr->itt = task->hdr->itt;
@@ -1721,9 +1738,18 @@
 		if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
 			break;
 
-		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
+		if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+			if (nopin->op_code == ISCSI_OP_NOOP_IN &&
+			    nopin->itt == (u16) RESERVED_ITT) {
+				printk(KERN_ALERT "bnx2i: Unsolicited "
+					"NOP-In detected for suspended "
+					"connection dev=%s!\n",
+					bnx2i_conn->hba->netdev->name);
+				bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+				goto cqe_out;
+			}
 			break;
-
+		}
 		tgt_async_msg = 0;
 
 		switch (nopin->op_code) {
@@ -1770,10 +1796,9 @@
 			printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
 					  nopin->op_code);
 		}
-
 		if (!tgt_async_msg)
 			bnx2i_conn->ep->num_active_cmds--;
-
+cqe_out:
 		/* clear out in production version only, till beta keep opcode
 		 * field intact, will be helpful in debugging (context dump)
 		 * nopin->op_code = 0;
@@ -2154,11 +2179,24 @@
 	}
 
 	if (ofld_kcqe->completion_status) {
+		ep->state = EP_STATE_OFLD_FAILED;
 		if (ofld_kcqe->completion_status ==
 		    ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
-			printk(KERN_ALERT "bnx2i: unable to allocate"
-					  " iSCSI context resources\n");
-		ep->state = EP_STATE_OFLD_FAILED;
+			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable "
+				"to allocate iSCSI context resources\n",
+				hba->netdev->name);
+		else if (ofld_kcqe->completion_status ==
+			 ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE)
+			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
+				"opcode\n", hba->netdev->name);
+		else if (ofld_kcqe->completion_status ==
+			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
+			/* error status code valid only for 5771x chipset */
+			ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
+		else
+			printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
+				"error code %d\n", hba->netdev->name,
+				ofld_kcqe->completion_status);
 	} else {
 		ep->state = EP_STATE_OFLD_COMPL;
 		cid_addr = ofld_kcqe->iscsi_conn_context_id;
@@ -2339,10 +2377,14 @@
 static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
 {
 	struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+	u32 old_state = ep->state;
 
 	ep->state = EP_STATE_TCP_RST_RCVD;
-	if (ep->conn)
-		bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+	if (old_state == EP_STATE_DISCONN_START)
+		wake_up_interruptible(&ep->ofld_wait);
+	else
+		if (ep->conn)
+			bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
 }
 
 
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 50c2aa3..72a7b2d 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
 /* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include "bnx2i.h"
@@ -17,8 +18,8 @@
 static u32 adapter_count;
 
 #define DRV_MODULE_NAME		"bnx2i"
-#define DRV_MODULE_VERSION	"2.1.3"
-#define DRV_MODULE_RELDATE	"Aug 10, 2010"
+#define DRV_MODULE_VERSION	"2.6.2.2"
+#define DRV_MODULE_RELDATE	"Nov 23, 2010"
 
 static char version[] __devinitdata =
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -65,8 +66,6 @@
 
 u64 iscsi_error_mask = 0x00;
 
-static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
-
 
 /**
  * bnx2i_identify_device - identifies NetXtreme II device type
@@ -211,13 +210,24 @@
 {
 	struct bnx2i_hba *hba = handle;
 	int conns_active;
+	int wait_delay = 1 * HZ;
 
 	/* check if cleanup happened in GOING_DOWN context */
-	if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
-				&hba->adapter_state))
+	if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN,
+			      &hba->adapter_state)) {
 		iscsi_host_for_each_session(hba->shost,
 					    bnx2i_drop_session);
-
+		wait_delay = hba->hba_shutdown_tmo;
+	}
+	/* Wait for inflight offload connection tasks to complete before
+	 * proceeding. Forcefully terminate all connection recovery in
+	 * progress at the earliest, either in bind(), send_pdu(LOGIN),
+	 * or conn_start()
+	 */
+	wait_event_interruptible_timeout(hba->eh_wait,
+					 (list_empty(&hba->ep_ofld_list) &&
+					 list_empty(&hba->ep_destroy_list)),
+					 10 * HZ);
 	/* Wait for all endpoints to be torn down, Chip will be reset once
 	 *  control returns to network driver. So it is required to cleanup and
 	 * release all connection resources before returning from this routine.
@@ -226,7 +236,7 @@
 		conns_active = hba->ofld_conns_active;
 		wait_event_interruptible_timeout(hba->eh_wait,
 				(hba->ofld_conns_active != conns_active),
-				hba->hba_shutdown_tmo);
+				wait_delay);
 		if (hba->ofld_conns_active == conns_active)
 			break;
 	}
@@ -235,88 +245,10 @@
 	/* This flag should be cleared last so that ep_disconnect() gracefully
 	 * cleans up connection context
 	 */
+	clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
 	clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
 }
 
-/**
- * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
- * @hba:	Adapter instance to register
- *
- * registers bnx2i adapter instance with the cnic driver while holding the
- *	adapter structure lock
- */
-void bnx2i_register_device(struct bnx2i_hba *hba)
-{
-	int rc;
-
-	if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
-	    test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
-		return;
-	}
-
-	rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
-
-	if (!rc)
-		set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-}
-
-
-/**
- * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
- *
- * registers all bnx2i adapter instances with the cnic driver while holding
- *	the global resource lock
- */
-void bnx2i_reg_dev_all(void)
-{
-	struct bnx2i_hba *hba, *temp;
-
-	mutex_lock(&bnx2i_dev_lock);
-	list_for_each_entry_safe(hba, temp, &adapter_list, link)
-		bnx2i_register_device(hba);
-	mutex_unlock(&bnx2i_dev_lock);
-}
-
-
-/**
- * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
- * @hba:	Adapter instance to unregister
- *
- * registers bnx2i adapter instance with the cnic driver while holding
- *	the adapter structure lock
- */
-static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
-{
-	if (hba->ofld_conns_active ||
-	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
-	    test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
-		return;
-
-	hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
-
-	/* ep_disconnect could come before NETDEV_DOWN, driver won't
-	 * see NETDEV_DOWN as it already unregistered itself.
-	 */
-	hba->adapter_state = 0;
-	clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-}
-
-/**
- * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
- *
- * unregisters all bnx2i adapter instances with the cnic driver while holding
- *	the global resource lock
- */
-void bnx2i_unreg_dev_all(void)
-{
-	struct bnx2i_hba *hba, *temp;
-
-	mutex_lock(&bnx2i_dev_lock);
-	list_for_each_entry_safe(hba, temp, &adapter_list, link)
-		bnx2i_unreg_one_device(hba);
-	mutex_unlock(&bnx2i_dev_lock);
-}
-
 
 /**
  * bnx2i_init_one - initialize an adapter instance and allocate memory resources
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fb50efb..f0dce26 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
 /*
  * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2006 - 2010 Broadcom Corporation
  * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
  * Copyright (c) 2007, 2008 Mike Christie
  *
@@ -10,6 +10,7 @@
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include <linux/slab.h>
@@ -411,7 +412,9 @@
 	bnx2i_ep->state = EP_STATE_IDLE;
 	bnx2i_ep->hba->ofld_conns_active--;
 
-	bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+	if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
+		bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+
 	if (bnx2i_ep->conn) {
 		bnx2i_ep->conn->ep = NULL;
 		bnx2i_ep->conn = NULL;
@@ -1383,6 +1386,12 @@
 	ep = iscsi_lookup_endpoint(transport_fd);
 	if (!ep)
 		return -EINVAL;
+	/*
+	 * Forcefully terminate all in progress connection recovery at the
+	 * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
+	 */
+	if (bnx2i_adapter_ready(hba))
+		return -EIO;
 
 	bnx2i_ep = ep->dd_data;
 	if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
@@ -1404,7 +1413,6 @@
 				  hba->netdev->name);
 		return -EEXIST;
 	}
-
 	bnx2i_ep->conn = bnx2i_conn;
 	bnx2i_conn->ep = bnx2i_ep;
 	bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
@@ -1461,21 +1469,28 @@
 	struct bnx2i_conn *bnx2i_conn = conn->dd_data;
 	int len = 0;
 
+	if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
+		goto out;
+
 	switch (param) {
 	case ISCSI_PARAM_CONN_PORT:
-		if (bnx2i_conn->ep)
+		mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
+		if (bnx2i_conn->ep->cm_sk)
 			len = sprintf(buf, "%hu\n",
 				      bnx2i_conn->ep->cm_sk->dst_port);
+		mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
 		break;
 	case ISCSI_PARAM_CONN_ADDRESS:
-		if (bnx2i_conn->ep)
+		mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
+		if (bnx2i_conn->ep->cm_sk)
 			len = sprintf(buf, "%pI4\n",
 				      &bnx2i_conn->ep->cm_sk->dst_ip);
+		mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
 		break;
 	default:
 		return iscsi_conn_get_param(cls_conn, param, buf);
 	}
-
+out:
 	return len;
 }
 
@@ -1599,8 +1614,6 @@
 	struct bnx2i_hba *hba;
 	struct cnic_dev *cnic = NULL;
 
-	bnx2i_reg_dev_all();
-
 	hba = get_adapter_list_head();
 	if (hba && hba->cnic)
 		cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
@@ -1640,18 +1653,26 @@
 static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
 				 struct bnx2i_endpoint *ep)
 {
-	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
 		hba->cnic->cm_destroy(ep->cm_sk);
 
-	if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
-		ep->state = EP_STATE_DISCONN_COMPL;
-
 	if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
 	    ep->state == EP_STATE_DISCONN_TIMEDOUT) {
-		printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
-				  " NW/PCIe trace, driver msgs to developers"
-				  " for analysis\n");
-		return 1;
+		if (ep->conn && ep->conn->cls_conn &&
+		    ep->conn->cls_conn->dd_data) {
+			struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
+
+			/* Must suspend all rx queue activity for this ep */
+			set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+		}
+		/* CONN_DISCONNECT timeout may or may not be an issue depending
+		 * on what transcribed in TCP layer, different targets behave
+		 * differently
+		 */
+		printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
+				  "please submit GRC Dump, NW/PCIe trace, "
+				  "driver msgs to developers for analysis\n",
+				  hba->netdev->name);
 	}
 
 	ep->state = EP_STATE_CLEANUP_START;
@@ -1664,7 +1685,9 @@
 	bnx2i_ep_destroy_list_add(hba, ep);
 
 	/* destroy iSCSI context, wait for it to complete */
-	bnx2i_send_conn_destroy(hba, ep);
+	if (bnx2i_send_conn_destroy(hba, ep))
+		ep->state = EP_STATE_CLEANUP_CMPL;
+
 	wait_event_interruptible(ep->ofld_wait,
 				 (ep->state != EP_STATE_CLEANUP_START));
 
@@ -1711,8 +1734,6 @@
 	if (shost) {
 		/* driver is given scsi host to work with */
 		hba = iscsi_host_priv(shost);
-		/* Register the device with cnic if not already done so */
-		bnx2i_register_device(hba);
 	} else
 		/*
 		 * check if the given destination can be reached through
@@ -1720,13 +1741,17 @@
 		 */
 		hba = bnx2i_check_route(dst_addr);
 
-	if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
+	if (!hba) {
 		rc = -EINVAL;
 		goto nohba;
 	}
-
-	cnic = hba->cnic;
 	mutex_lock(&hba->net_dev_lock);
+
+	if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
+		rc = -EPERM;
+		goto check_busy;
+	}
+	cnic = hba->cnic;
 	ep = bnx2i_alloc_ep(hba);
 	if (!ep) {
 		rc = -ENOMEM;
@@ -1734,23 +1759,21 @@
 	}
 	bnx2i_ep = ep->dd_data;
 
-	if (bnx2i_adapter_ready(hba)) {
-		rc = -EPERM;
-		goto net_if_down;
-	}
-
 	bnx2i_ep->num_active_cmds = 0;
 	iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
 	if (iscsi_cid == -1) {
-		printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+		printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
+			"iscsi cid\n", hba->netdev->name);
 		rc = -ENOMEM;
-		goto iscsi_cid_err;
+		bnx2i_free_ep(ep);
+		goto check_busy;
 	}
 	bnx2i_ep->hba_age = hba->age;
 
 	rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
 	if (rc != 0) {
-		printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+		printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
+			"\n", hba->netdev->name);
 		rc = -ENOMEM;
 		goto qp_resc_err;
 	}
@@ -1765,7 +1788,18 @@
 	bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
 	add_timer(&bnx2i_ep->ofld_timer);
 
-	bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
+	if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
+		if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
+			printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
+				hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
+			rc = -EBUSY;
+		} else
+			rc = -ENOSPC;
+		printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
+			"\n", hba->netdev->name);
+		bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+		goto conn_failed;
+	}
 
 	/* Wait for CNIC hardware to setup conn context and return 'cid' */
 	wait_event_interruptible(bnx2i_ep->ofld_wait,
@@ -1778,7 +1812,12 @@
 	bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
 
 	if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
-		rc = -ENOSPC;
+		if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
+			printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
+				hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
+			rc = -EBUSY;
+		} else
+			rc = -ENOSPC;
 		goto conn_failed;
 	}
 
@@ -1786,7 +1825,8 @@
 			     iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
 	if (rc) {
 		rc = -EINVAL;
-		goto conn_failed;
+		/* Need to terminate and cleanup the connection */
+		goto release_ep;
 	}
 
 	bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
@@ -1830,15 +1870,12 @@
 		return ERR_PTR(rc);
 	}
 conn_failed:
-net_if_down:
-iscsi_cid_err:
 	bnx2i_free_qp_resc(hba, bnx2i_ep);
 qp_resc_err:
 	bnx2i_free_ep(ep);
 check_busy:
 	mutex_unlock(&hba->net_dev_lock);
 nohba:
-	bnx2i_unreg_dev_all();
 	return ERR_PTR(rc);
 }
 
@@ -1898,12 +1935,13 @@
 		cnic_dev_10g = 1;
 
 	switch (bnx2i_ep->state) {
-	case EP_STATE_CONNECT_START:
+	case EP_STATE_CONNECT_FAILED:
 	case EP_STATE_CLEANUP_FAILED:
 	case EP_STATE_OFLD_FAILED:
 	case EP_STATE_DISCONN_TIMEDOUT:
 		ret = 0;
 		break;
+	case EP_STATE_CONNECT_START:
 	case EP_STATE_CONNECT_COMPL:
 	case EP_STATE_ULP_UPDATE_START:
 	case EP_STATE_ULP_UPDATE_COMPL:
@@ -1914,13 +1952,10 @@
 		ret = 1;
 		break;
 	case EP_STATE_TCP_RST_RCVD:
-		ret = 0;
-		break;
-	case EP_STATE_CONNECT_FAILED:
 		if (cnic_dev_10g)
-			ret = 1;
-		else
 			ret = 0;
+		else
+			ret = 1;
 		break;
 	default:
 		ret = 0;
@@ -1953,7 +1988,8 @@
 	if (!cnic)
 		return 0;
 
-	if (bnx2i_ep->state == EP_STATE_IDLE)
+	if (bnx2i_ep->state == EP_STATE_IDLE ||
+	    bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
 		return 0;
 
 	if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
@@ -1979,9 +2015,10 @@
 			if (session->state == ISCSI_STATE_LOGGING_OUT) {
 				if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
 					/* Logout sent, but no resp */
-					printk(KERN_ALERT "bnx2i - WARNING "
-						"logout response was not "
-						"received!\n");
+					printk(KERN_ALERT "bnx2i (%s): WARNING"
+						" logout response was not "
+						"received!\n",
+						bnx2i_ep->hba->netdev->name);
 				} else if (bnx2i_ep->state ==
 					   EP_STATE_LOGOUT_RESP_RCVD)
 					close = 1;
@@ -1999,9 +2036,8 @@
 	else
 		close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
 
-	/* No longer allow CFC delete if cm_close/abort fails the request */
 	if (close_ret)
-		printk(KERN_ALERT "bnx2i: %s close/abort(%d) returned %d\n",
+		printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
 			bnx2i_ep->hba->netdev->name, close, close_ret);
 	else
 		/* wait for option-2 conn teardown */
@@ -2015,7 +2051,7 @@
 destroy_conn:
 	bnx2i_ep_active_list_del(hba, bnx2i_ep);
 	if (bnx2i_tear_down_conn(hba, bnx2i_ep))
-		ret = -EINVAL;
+		return -EINVAL;
 out:
 	bnx2i_ep->state = EP_STATE_IDLE;
 	return ret;
@@ -2054,14 +2090,17 @@
 
 	mutex_lock(&hba->net_dev_lock);
 
+	if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
+		goto out;
+
 	if (bnx2i_ep->state == EP_STATE_IDLE)
-		goto return_bnx2i_ep;
-
-	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
 		goto free_resc;
 
-	if (bnx2i_ep->hba_age != hba->age)
+	if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+	    (bnx2i_ep->hba_age != hba->age)) {
+		bnx2i_ep_active_list_del(hba, bnx2i_ep);
 		goto free_resc;
+	}
 
 	/* Do all chip cleanup here */
 	if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
@@ -2070,14 +2109,13 @@
 	}
 free_resc:
 	bnx2i_free_qp_resc(hba, bnx2i_ep);
-return_bnx2i_ep:
+
 	if (bnx2i_conn)
 		bnx2i_conn->ep = NULL;
 
 	bnx2i_free_ep(ep);
+out:
 	mutex_unlock(&hba->net_dev_lock);
-	if (!hba->ofld_conns_active)
-		bnx2i_unreg_dev_all();
 
 	wake_up_interruptible(&hba->eh_wait);
 }
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 96426b7..9174196 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,12 +1,13 @@
 /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
  *
- * Copyright (c) 2004 - 2009 Broadcom Corporation
+ * Copyright (c) 2004 - 2010 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  */
 
 #include "bnx2i.h"
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8f1b5c8..b0f8523 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3796,7 +3796,7 @@
  * adapter_add_device - Adds the device instance to the adaptor instance.
  *
  * @acb: The adapter device to be updated
- * @dcb: A newly created and intialised device instance to add.
+ * @dcb: A newly created and initialised device instance to add.
  **/
 static void adapter_add_device(struct AdapterCtlBlk *acb,
 		struct DeviceCtlBlk *dcb)
@@ -4498,7 +4498,7 @@
  * init_adapter - Grab the resource for the card, setup the adapter
  * information, set the card into a known state, create the various
  * tables etc etc. This basically gets all adapter information all up
- * to date, intialised and gets the chip in sync with it.
+ * to date, initialised and gets the chip in sync with it.
  *
  * @host:	This hosts adapter structure
  * @io_port:	The base I/O port
@@ -4789,7 +4789,7 @@
  * that it finds in the system. The pci_dev strcuture indicates which
  * instance we are being called from.
  * 
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
  * @id: Looks like a pointer to the entry in our pci device table
  * that was actually matched by the PCI subsystem.
  *
@@ -4860,7 +4860,7 @@
  * dc395x_remove_one - Called to remove a single instance of the
  * adapter.
  *
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
  **/
 static void __devexit dc395x_remove_one(struct pci_dev *dev)
 {
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 6fae3d2..b837c5b 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -442,12 +442,19 @@
 	sdev = q->queuedata;
 	if (sdev && sdev->scsi_dh_data)
 		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-	if (!scsi_dh || !get_device(&sdev->sdev_gendev))
+	if (!scsi_dh || !get_device(&sdev->sdev_gendev) ||
+	    sdev->sdev_state == SDEV_CANCEL ||
+	    sdev->sdev_state == SDEV_DEL)
 		err = SCSI_DH_NOSYS;
+	if (sdev->sdev_state == SDEV_OFFLINE)
+		err = SCSI_DH_DEV_OFFLINED;
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
-	if (err)
+	if (err) {
+		if (fn)
+			fn(data, err);
 		return err;
+	}
 
 	if (scsi_dh->activate)
 		err = scsi_dh->activate(sdev, fn, data);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index d23a538..9f9600b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -854,7 +854,6 @@
 
 	/* Cleanup the fc_lport */
 	fc_lport_destroy(lport);
-	fc_fcp_destroy(lport);
 
 	/* Stop the transmit retry timer */
 	del_timer_sync(&port->timer);
@@ -876,6 +875,9 @@
 	fc_remove_host(lport->host);
 	scsi_remove_host(lport->host);
 
+	/* Destroy lport scsi_priv */
+	fc_fcp_destroy(lport);
+
 	/* There are no more rports or I/O, free the EM */
 	fc_exch_mgr_free(lport);
 
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index bc17c71..625c6be 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -54,6 +54,7 @@
 static void fcoe_ctlr_timeout(unsigned long);
 static void fcoe_ctlr_timer_work(struct work_struct *);
 static void fcoe_ctlr_recv_work(struct work_struct *);
+static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
 
 static void fcoe_ctlr_vn_start(struct fcoe_ctlr *);
 static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *);
@@ -176,6 +177,7 @@
 	fip->mode = mode;
 	INIT_LIST_HEAD(&fip->fcfs);
 	mutex_init(&fip->ctlr_mutex);
+	spin_lock_init(&fip->ctlr_lock);
 	fip->flogi_oxid = FC_XID_UNKNOWN;
 	setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
 	INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
@@ -231,6 +233,49 @@
 EXPORT_SYMBOL(fcoe_ctlr_destroy);
 
 /**
+ * fcoe_ctlr_announce() - announce new FCF selection
+ * @fip: The FCoE controller
+ *
+ * Also sets the destination MAC for FCoE and control packets
+ *
+ * Called with neither ctlr_mutex nor ctlr_lock held.
+ */
+static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
+{
+	struct fcoe_fcf *sel;
+	struct fcoe_fcf *fcf;
+
+	mutex_lock(&fip->ctlr_mutex);
+	spin_lock_bh(&fip->ctlr_lock);
+
+	kfree_skb(fip->flogi_req);
+	fip->flogi_req = NULL;
+	list_for_each_entry(fcf, &fip->fcfs, list)
+		fcf->flogi_sent = 0;
+
+	spin_unlock_bh(&fip->ctlr_lock);
+	sel = fip->sel_fcf;
+
+	if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr))
+		goto unlock;
+	if (!is_zero_ether_addr(fip->dest_addr)) {
+		printk(KERN_NOTICE "libfcoe: host%d: "
+		       "FIP Fibre-Channel Forwarder MAC %pM deselected\n",
+		       fip->lp->host->host_no, fip->dest_addr);
+		memset(fip->dest_addr, 0, ETH_ALEN);
+	}
+	if (sel) {
+		printk(KERN_INFO "libfcoe: host%d: FIP selected "
+		       "Fibre-Channel Forwarder MAC %pM\n",
+		       fip->lp->host->host_no, sel->fcf_mac);
+		memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
+		fip->map_dest = 0;
+	}
+unlock:
+	mutex_unlock(&fip->ctlr_mutex);
+}
+
+/**
  * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port
  * @fip: The FCoE controller to get the maximum FCoE size from
  *
@@ -564,6 +609,9 @@
  * The caller must check that the length is a multiple of 4.
  * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes).
  * The the skb must also be an fc_frame.
+ *
+ * This is called from the lower-level driver with spinlocks held,
+ * so we must not take a mutex here.
  */
 int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
 		       struct sk_buff *skb)
@@ -601,7 +649,15 @@
 	switch (op) {
 	case ELS_FLOGI:
 		op = FIP_DT_FLOGI;
-		break;
+		if (fip->mode == FIP_MODE_VN2VN)
+			break;
+		spin_lock_bh(&fip->ctlr_lock);
+		kfree_skb(fip->flogi_req);
+		fip->flogi_req = skb;
+		fip->flogi_req_send = 1;
+		spin_unlock_bh(&fip->ctlr_lock);
+		schedule_work(&fip->timer_work);
+		return -EINPROGRESS;
 	case ELS_FDISC:
 		if (ntoh24(fh->fh_s_id))
 			return 0;
@@ -922,11 +978,9 @@
 	}
 	mtu_valid = fcoe_ctlr_mtu_valid(fcf);
 	fcf->time = jiffies;
-	if (!found) {
-		LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx "
-				"map %x val %d\n",
-				fcf->fabric_name, fcf->fc_map, mtu_valid);
-	}
+	if (!found)
+		LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+				fcf->fabric_name, fcf->fcf_mac);
 
 	/*
 	 * If this advertisement is not solicited and our max receive size
@@ -945,6 +999,17 @@
 		fcoe_ctlr_solicit(fip, NULL);
 
 	/*
+	 * Put this FCF at the head of the list for priority among equals.
+	 * This helps in the case of an NPV switch which insists we use
+	 * the FCF that answers multicast solicitations, not the others that
+	 * are sending periodic multicast advertisements.
+	 */
+	if (mtu_valid) {
+		list_del(&fcf->list);
+		list_add(&fcf->list, &fip->fcfs);
+	}
+
+	/*
 	 * If this is the first validated FCF, note the time and
 	 * set a timer to trigger selection.
 	 */
@@ -1061,18 +1126,24 @@
 	els_op = *(u8 *)(fh + 1);
 
 	if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) &&
-	    sub == FIP_SC_REP && els_op == ELS_LS_ACC &&
-	    fip->mode != FIP_MODE_VN2VN) {
-		if (!is_valid_ether_addr(granted_mac)) {
-			LIBFCOE_FIP_DBG(fip,
-				"Invalid MAC address %pM in FIP ELS\n",
-				granted_mac);
-			goto drop;
-		}
-		memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
+	    sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) {
+		if (els_op == ELS_LS_ACC) {
+			if (!is_valid_ether_addr(granted_mac)) {
+				LIBFCOE_FIP_DBG(fip,
+					"Invalid MAC address %pM in FIP ELS\n",
+					granted_mac);
+				goto drop;
+			}
+			memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
 
-		if (fip->flogi_oxid == ntohs(fh->fh_ox_id))
-			fip->flogi_oxid = FC_XID_UNKNOWN;
+			if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
+				fip->flogi_oxid = FC_XID_UNKNOWN;
+				if (els_dtype == FIP_DT_FLOGI)
+					fcoe_ctlr_announce(fip);
+			}
+		} else if (els_dtype == FIP_DT_FLOGI &&
+			   !fcoe_ctlr_flogi_retry(fip))
+			goto drop;	/* retrying FLOGI so drop reject */
 	}
 
 	if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) &&
@@ -1326,20 +1397,39 @@
  * fcoe_ctlr_select() - Select the best FCF (if possible)
  * @fip: The FCoE controller
  *
+ * Returns the selected FCF, or NULL if none are usable.
+ *
  * If there are conflicting advertisements, no FCF can be chosen.
  *
+ * If there is already a selected FCF, this will choose a better one or
+ * an equivalent one that hasn't already been sent a FLOGI.
+ *
  * Called with lock held.
  */
-static void fcoe_ctlr_select(struct fcoe_ctlr *fip)
+static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
 {
 	struct fcoe_fcf *fcf;
-	struct fcoe_fcf *best = NULL;
+	struct fcoe_fcf *best = fip->sel_fcf;
+	struct fcoe_fcf *first;
+
+	first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
 
 	list_for_each_entry(fcf, &fip->fcfs, list) {
-		LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx "
-				"VFID %d map %x val %d\n",
-				fcf->fabric_name, fcf->vfid,
-				fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
+		LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
+				"VFID %d mac %pM map %x val %d "
+				"sent %u pri %u\n",
+				fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
+				fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
+				fcf->flogi_sent, fcf->pri);
+		if (fcf->fabric_name != first->fabric_name ||
+		    fcf->vfid != first->vfid ||
+		    fcf->fc_map != first->fc_map) {
+			LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+					"or FC-MAP\n");
+			return NULL;
+		}
+		if (fcf->flogi_sent)
+			continue;
 		if (!fcoe_ctlr_fcf_usable(fcf)) {
 			LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
 					"map %x %svalid %savailable\n",
@@ -1349,21 +1439,131 @@
 					"" : "un");
 			continue;
 		}
-		if (!best) {
-			best = fcf;
-			continue;
-		}
-		if (fcf->fabric_name != best->fabric_name ||
-		    fcf->vfid != best->vfid ||
-		    fcf->fc_map != best->fc_map) {
-			LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
-					"or FC-MAP\n");
-			return;
-		}
-		if (fcf->pri < best->pri)
+		if (!best || fcf->pri < best->pri || best->flogi_sent)
 			best = fcf;
 	}
 	fip->sel_fcf = best;
+	if (best) {
+		LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac);
+		fip->port_ka_time = jiffies +
+			msecs_to_jiffies(FIP_VN_KA_PERIOD);
+		fip->ctlr_ka_time = jiffies + best->fka_period;
+		if (time_before(fip->ctlr_ka_time, fip->timer.expires))
+			mod_timer(&fip->timer, fip->ctlr_ka_time);
+	}
+	return best;
+}
+
+/**
+ * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF
+ * @fip: The FCoE controller
+ *
+ * Returns non-zero error if it could not be sent.
+ *
+ * Called with ctlr_mutex and ctlr_lock held.
+ * Caller must verify that fip->sel_fcf is not NULL.
+ */
+static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
+{
+	struct sk_buff *skb;
+	struct sk_buff *skb_orig;
+	struct fc_frame_header *fh;
+	int error;
+
+	skb_orig = fip->flogi_req;
+	if (!skb_orig)
+		return -EINVAL;
+
+	/*
+	 * Clone and send the FLOGI request.  If clone fails, use original.
+	 */
+	skb = skb_clone(skb_orig, GFP_ATOMIC);
+	if (!skb) {
+		skb = skb_orig;
+		fip->flogi_req = NULL;
+	}
+	fh = (struct fc_frame_header *)skb->data;
+	error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb,
+				 ntoh24(fh->fh_d_id));
+	if (error) {
+		kfree_skb(skb);
+		return error;
+	}
+	fip->send(fip, skb);
+	fip->sel_fcf->flogi_sent = 1;
+	return 0;
+}
+
+/**
+ * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible
+ * @fip: The FCoE controller
+ *
+ * Returns non-zero error code if there's no FLOGI request to retry or
+ * no alternate FCF available.
+ */
+static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
+{
+	struct fcoe_fcf *fcf;
+	int error;
+
+	mutex_lock(&fip->ctlr_mutex);
+	spin_lock_bh(&fip->ctlr_lock);
+	LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
+	fcf = fcoe_ctlr_select(fip);
+	if (!fcf || fcf->flogi_sent) {
+		kfree_skb(fip->flogi_req);
+		fip->flogi_req = NULL;
+		error = -ENOENT;
+	} else {
+		fcoe_ctlr_solicit(fip, NULL);
+		error = fcoe_ctlr_flogi_send_locked(fip);
+	}
+	spin_unlock_bh(&fip->ctlr_lock);
+	mutex_unlock(&fip->ctlr_mutex);
+	return error;
+}
+
+
+/**
+ * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI.
+ * @fip: The FCoE controller that timed out
+ *
+ * Done here because fcoe_ctlr_els_send() can't get mutex.
+ *
+ * Called with ctlr_mutex held.  The caller must not hold ctlr_lock.
+ */
+static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
+{
+	struct fcoe_fcf *fcf;
+
+	spin_lock_bh(&fip->ctlr_lock);
+	fcf = fip->sel_fcf;
+	if (!fcf || !fip->flogi_req_send)
+		goto unlock;
+
+	LIBFCOE_FIP_DBG(fip, "sending FLOGI\n");
+
+	/*
+	 * If this FLOGI is being sent due to a timeout retry
+	 * to the same FCF as before, select a different FCF if possible.
+	 */
+	if (fcf->flogi_sent) {
+		LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n");
+		fcf = fcoe_ctlr_select(fip);
+		if (!fcf || fcf->flogi_sent) {
+			LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n");
+			list_for_each_entry(fcf, &fip->fcfs, list)
+				fcf->flogi_sent = 0;
+			fcf = fcoe_ctlr_select(fip);
+		}
+	}
+	if (fcf) {
+		fcoe_ctlr_flogi_send_locked(fip);
+		fip->flogi_req_send = 0;
+	} else /* XXX */
+		LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
+unlock:
+	spin_unlock_bh(&fip->ctlr_lock);
 }
 
 /**
@@ -1411,34 +1611,16 @@
 	sel = fip->sel_fcf;
 	if (!sel && fip->sel_time) {
 		if (time_after_eq(jiffies, fip->sel_time)) {
-			fcoe_ctlr_select(fip);
-			sel = fip->sel_fcf;
+			sel = fcoe_ctlr_select(fip);
 			fip->sel_time = 0;
 		} else if (time_after(next_timer, fip->sel_time))
 			next_timer = fip->sel_time;
 	}
 
-	if (sel != fcf) {
-		fcf = sel;		/* the old FCF may have been freed */
-		if (sel) {
-			printk(KERN_INFO "libfcoe: host%d: FIP selected "
-			       "Fibre-Channel Forwarder MAC %pM\n",
-			       fip->lp->host->host_no, sel->fcf_mac);
-			memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
-			fip->map_dest = 0;
-			fip->port_ka_time = jiffies +
-				msecs_to_jiffies(FIP_VN_KA_PERIOD);
-			fip->ctlr_ka_time = jiffies + sel->fka_period;
-			if (time_after(next_timer, fip->ctlr_ka_time))
-				next_timer = fip->ctlr_ka_time;
-		} else {
-			printk(KERN_NOTICE "libfcoe: host%d: "
-			       "FIP Fibre-Channel Forwarder timed out.	"
-			       "Starting FCF discovery.\n",
-			       fip->lp->host->host_no);
-			reset = 1;
-		}
-	}
+	if (sel && fip->flogi_req_send)
+		fcoe_ctlr_flogi_send(fip);
+	else if (!sel && fcf)
+		reset = 1;
 
 	if (sel && !sel->fd_flags) {
 		if (time_after_eq(jiffies, fip->ctlr_ka_time)) {
@@ -2475,7 +2657,7 @@
 	case FIP_ST_LINK_WAIT:
 		goto unlock;
 	default:
-		WARN(1, "unexpected state %d", fip->state);
+		WARN(1, "unexpected state %d\n", fip->state);
 		goto unlock;
 	}
 	mod_timer(&fip->timer, next_time);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 7636570..3242bca 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4273,8 +4273,10 @@
     }
 
     rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info);
-    if (rval < 0)
+    if (rval < 0) {
+	gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
         return rval;
+    }
     gen.status = rval;
 
     if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, 
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 0572b9b..6527543 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -365,8 +365,10 @@
                     len = 0;
                     begin = pos;
                 }
-                if (pos > offset + length)
+		if (pos > offset + length) {
+		    gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                     goto stop_output;
+		}
             }
         }
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
@@ -450,8 +452,10 @@
                     len = 0;
                     begin = pos;
                 }
-                if (pos > offset + length)
+		if (pos > offset + length) {
+		    gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                     goto stop_output;
+		}
             } while (drv_no != -1);
              
             if (is_mirr) {
@@ -472,8 +476,10 @@
                 len = 0;
                 begin = pos;
             }
-            if (pos > offset + length)
+	    if (pos > offset + length) {
+		gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                 goto stop_output;
+	    }
         }       
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
         
@@ -542,8 +548,10 @@
                     len = 0;
                     begin = pos;
                 }
-                if (pos > offset + length)
+		if (pos > offset + length) {
+		    gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
                     goto stop_output;
+		}
             }
         }
         gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a6dea08..12deffc 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -641,11 +641,6 @@
 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
 	struct hpsa_scsi_dev_t *dev2)
 {
-	if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
-		(dev1->lun != -1 && dev2->lun != -1)) &&
-		dev1->devtype != 0x0C)
-		return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
-
 	/* we compare everything except lun and target as these
 	 * are not yet assigned.  Compare parts likely
 	 * to differ first
@@ -660,12 +655,8 @@
 		return 0;
 	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
 		return 0;
-	if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
-		return 0;
 	if (dev1->devtype != dev2->devtype)
 		return 0;
-	if (dev1->raid_level != dev2->raid_level)
-		return 0;
 	if (dev1->bus != dev2->bus)
 		return 0;
 	return 1;
@@ -1477,8 +1468,6 @@
 		sizeof(this_device->vendor));
 	memcpy(this_device->model, &inq_buff[16],
 		sizeof(this_device->model));
-	memcpy(this_device->revision, &inq_buff[32],
-		sizeof(this_device->revision));
 	memset(this_device->device_id, 0,
 		sizeof(this_device->device_id));
 	hpsa_get_device_id(h, scsi3addr, this_device->device_id,
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a203ef6..19586e1 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -45,7 +45,6 @@
 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
-	unsigned char revision[4];      /* bytes 32-35 of inquiry data */
 	unsigned char raid_level;	/* from inquiry page 0xC1 */
 };
 
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 57cad7e..b765061 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2493,23 +2493,23 @@
 }
 
 static const struct ibmvfc_async_desc ae_desc [] = {
-	{ IBMVFC_AE_ELS_PLOGI,		"PLOGI",		IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_ELS_LOGO,		"LOGO",		IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_ELS_PRLO,		"PRLO",		IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_SCN_NPORT,		"N-Port SCN",	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_SCN_GROUP,		"Group SCN",	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
-	{ IBMVFC_AE_SCN_DOMAIN,		"Domain SCN",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_SCN_FABRIC,		"Fabric SCN",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_LINK_UP,		"Link Up",		IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_LINK_DOWN,		"Link Down",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_LINK_DEAD,		"Link Dead",	IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_HALT,			"Halt",		IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_RESUME,		"Resume",		IBMVFC_DEFAULT_LOG_LEVEL },
-	{ IBMVFC_AE_ADAPTER_FAILED,	"Adapter Failed",	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
+	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
+	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
 };
 
 static const struct ibmvfc_async_desc unknown_ae = {
-	0, "Unknown async", IBMVFC_DEFAULT_LOG_LEVEL
+	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
 };
 
 /**
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index ef663e7..834c37f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -542,8 +542,8 @@
 };
 
 struct ibmvfc_async_desc {
-	enum ibmvfc_async_event ae;
 	const char *desc;
+	enum ibmvfc_async_event ae;
 	int log_level;
 };
 
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5bbaee5..9c5c8be 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -146,7 +146,7 @@
 		}
 	},
 	{ /* CRoC */
-		.mailbox = 0x00040,
+		.mailbox = 0x00044,
 		.cache_line_size = 0x20,
 		{
 			.set_interrupt_mask_reg = 0x00010,
@@ -1048,6 +1048,8 @@
 			sizeof(res->res_path));
 
 		res->bus = 0;
+		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+			sizeof(res->dev_lun.scsi_lun));
 		res->lun = scsilun_to_int(&res->dev_lun);
 
 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
@@ -1063,9 +1065,6 @@
 								  ioa_cfg->max_devs_supported);
 				set_bit(res->target, ioa_cfg->target_ids);
 			}
-
-			memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
-				sizeof(res->dev_lun.scsi_lun));
 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
 			res->target = 0;
@@ -1116,7 +1115,7 @@
 	if (res->ioa_cfg->sis64) {
 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
-			!memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
+			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
 					sizeof(cfgtew->u.cfgte64->lun))) {
 			return 1;
 		}
@@ -2901,6 +2900,12 @@
 		return;
 	}
 
+	if (ioa_cfg->sis64) {
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+		ssleep(IPR_DUMP_DELAY_SECONDS);
+		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+	}
+
 	start_addr = readl(ioa_cfg->ioa_mailbox);
 
 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
@@ -5743,7 +5748,7 @@
 	}
 
 	if (ipr_is_gata(res) && res->sata_port)
-		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
+		return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
 
 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 	ioarcb = &ipr_cmd->ioarcb;
@@ -7473,6 +7478,29 @@
 }
 
 /**
+ * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
+ * @ipr_cmd:	ipr command struct
+ *
+ * Description: This function will call to get the unit check buffer.
+ *
+ * Return value:
+ *	IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
+{
+	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+	ENTER;
+	ioa_cfg->ioa_unit_checked = 0;
+	ipr_get_unit_check_buffer(ioa_cfg);
+	ipr_cmd->job_step = ipr_reset_alert;
+	ipr_reset_start_timer(ipr_cmd, 0);
+
+	LEAVE;
+	return IPR_RC_JOB_RETURN;
+}
+
+/**
  * ipr_reset_restore_cfg_space - Restore PCI config space.
  * @ipr_cmd:	ipr command struct
  *
@@ -7487,16 +7515,10 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	volatile u32 int_reg;
-	int rc;
 
 	ENTER;
 	ioa_cfg->pdev->state_saved = true;
-	rc = pci_restore_state(ioa_cfg->pdev);
-
-	if (rc != PCIBIOS_SUCCESSFUL) {
-		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
-		return IPR_RC_JOB_CONTINUE;
-	}
+	pci_restore_state(ioa_cfg->pdev);
 
 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
@@ -7512,11 +7534,17 @@
 	}
 
 	if (ioa_cfg->ioa_unit_checked) {
-		ioa_cfg->ioa_unit_checked = 0;
-		ipr_get_unit_check_buffer(ioa_cfg);
-		ipr_cmd->job_step = ipr_reset_alert;
-		ipr_reset_start_timer(ipr_cmd, 0);
-		return IPR_RC_JOB_RETURN;
+		if (ioa_cfg->sis64) {
+			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
+			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
+			return IPR_RC_JOB_RETURN;
+		} else {
+			ioa_cfg->ioa_unit_checked = 0;
+			ipr_get_unit_check_buffer(ioa_cfg);
+			ipr_cmd->job_step = ipr_reset_alert;
+			ipr_reset_start_timer(ipr_cmd, 0);
+			return IPR_RC_JOB_RETURN;
+		}
 	}
 
 	if (ioa_cfg->in_ioa_bringdown) {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b28a00f..13f425f 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -218,6 +218,8 @@
 #define IPR_WAIT_FOR_BIST_TIMEOUT		(2 * HZ)
 #define IPR_PCI_RESET_TIMEOUT			(HZ / 2)
 #define IPR_DUMP_TIMEOUT			(15 * HZ)
+#define IPR_DUMP_DELAY_SECONDS			4
+#define IPR_DUMP_DELAY_TIMEOUT			(IPR_DUMP_DELAY_SECONDS * HZ)
 
 /*
  * SCSI Literals
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index ec2a1ae..d21367d 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -67,6 +67,11 @@
 struct fc_exch_pool {
 	u16		 next_index;
 	u16		 total_exches;
+
+	/* two cache of free slot in exch array */
+	u16		 left;
+	u16		 right;
+
 	spinlock_t	 lock;
 	struct list_head ex_list;
 };
@@ -108,7 +113,6 @@
 		atomic_t non_bls_resp;
 	} stats;
 };
-#define	fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
 
 /**
  * struct fc_exch_mgr_anchor - primary structure for list of EMs
@@ -397,13 +401,23 @@
 static void fc_exch_delete(struct fc_exch *ep)
 {
 	struct fc_exch_pool *pool;
+	u16 index;
 
 	pool = ep->pool;
 	spin_lock_bh(&pool->lock);
 	WARN_ON(pool->total_exches <= 0);
 	pool->total_exches--;
-	fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
-			NULL);
+
+	/* update cache of free slot */
+	index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
+	if (pool->left == FC_XID_UNKNOWN)
+		pool->left = index;
+	else if (pool->right == FC_XID_UNKNOWN)
+		pool->right = index;
+	else
+		pool->next_index = index;
+
+	fc_exch_ptr_set(pool, index, NULL);
 	list_del(&ep->ex_list);
 	spin_unlock_bh(&pool->lock);
 	fc_exch_release(ep);	/* drop hold for exch in mp */
@@ -636,10 +650,13 @@
 		if (e_stat & ESB_ST_ABNORMAL)
 			rc = fc_exch_done_locked(ep);
 		spin_unlock_bh(&ep->ex_lock);
-		if (!rc)
-			fc_exch_delete(ep);
 		if (resp)
 			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+		if (!rc) {
+			/* delete the exchange if it's already being aborted */
+			fc_exch_delete(ep);
+			return;
+		}
 		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
 		goto done;
 	}
@@ -679,6 +696,19 @@
 	pool = per_cpu_ptr(mp->pool, cpu);
 	spin_lock_bh(&pool->lock);
 	put_cpu();
+
+	/* peek cache of free slot */
+	if (pool->left != FC_XID_UNKNOWN) {
+		index = pool->left;
+		pool->left = FC_XID_UNKNOWN;
+		goto hit;
+	}
+	if (pool->right != FC_XID_UNKNOWN) {
+		index = pool->right;
+		pool->right = FC_XID_UNKNOWN;
+		goto hit;
+	}
+
 	index = pool->next_index;
 	/* allocate new exch from pool */
 	while (fc_exch_ptr_get(pool, index)) {
@@ -687,7 +717,7 @@
 			goto err;
 	}
 	pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
-
+hit:
 	fc_exch_hold(ep);	/* hold for exch in mp */
 	spin_lock_init(&ep->ex_lock);
 	/*
@@ -1247,7 +1277,7 @@
 
 	list_for_each_entry(ema, &lport->ema_list, ema_list)
 		if ((!ema->match || ema->match(fp)) &&
-		    fc_seq_lookup_recip(lport, ema->mp, fp) != FC_RJT_NONE)
+		    fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
 			break;
 	return fr_seq(fp);
 }
@@ -1343,7 +1373,7 @@
 	}
 	if (ep->esb_stat & ESB_ST_COMPLETE) {
 		atomic_inc(&mp->stats.xid_not_found);
-		goto out;
+		goto rel;
 	}
 	if (ep->rxid == FC_XID_UNKNOWN)
 		ep->rxid = ntohs(fh->fh_rx_id);
@@ -2181,6 +2211,8 @@
 		goto free_mempool;
 	for_each_possible_cpu(cpu) {
 		pool = per_cpu_ptr(mp->pool, cpu);
+		pool->left = FC_XID_UNKNOWN;
+		pool->right = FC_XID_UNKNOWN;
 		spin_lock_init(&pool->lock);
 		INIT_LIST_HEAD(&pool->ex_list);
 	}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2924363..5962d1a 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -57,6 +57,9 @@
 #define FC_SRB_READ		(1 << 1)
 #define FC_SRB_WRITE		(1 << 0)
 
+/* constant added to e_d_tov timeout to get rec_tov value */
+#define REC_TOV_CONST		1
+
 /*
  * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
  */
@@ -96,7 +99,7 @@
 static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
 static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
 static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
-static void fc_fcp_recovery(struct fc_fcp_pkt *);
+static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
 static void fc_fcp_timeout(unsigned long);
 static void fc_fcp_rec(struct fc_fcp_pkt *);
 static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@ -120,14 +123,13 @@
 #define FC_DATA_UNDRUN		7
 #define FC_ERROR		8
 #define FC_HRD_ERROR		9
-#define FC_CMD_RECOVERY		10
+#define FC_CRC_ERROR		10
+#define FC_TIMED_OUT		11
 
 /*
  * Error recovery timeout values.
  */
-#define FC_SCSI_ER_TIMEOUT	(10 * HZ)
 #define FC_SCSI_TM_TOV		(10 * HZ)
-#define FC_SCSI_REC_TOV		(2 * HZ)
 #define FC_HOST_RESET_TIMEOUT	(30 * HZ)
 #define FC_CAN_QUEUE_PERIOD	(60 * HZ)
 
@@ -438,6 +440,7 @@
 	void *buf;
 	struct scatterlist *sg;
 	u32 nents;
+	u8 host_bcode = FC_COMPLETE;
 
 	fh = fc_frame_header_get(fp);
 	offset = ntohl(fh->fh_parm_offset);
@@ -446,13 +449,16 @@
 	buf = fc_frame_payload_get(fp, 0);
 
 	/*
-	 * if this I/O is ddped then clear it
-	 * and initiate recovery since data
-	 * frames are expected to be placed
-	 * directly in that case.
+	 * if this I/O is ddped then clear it and initiate recovery since data
+	 * frames are expected to be placed directly in that case.
+	 *
+	 * Indicate error to scsi-ml because something went wrong with the
+	 * ddp handling to get us here.
 	 */
 	if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
 		fc_fcp_ddp_done(fsp);
+		FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n");
+		host_bcode = FC_ERROR;
 		goto err;
 	}
 	if (offset + len > fsp->data_len) {
@@ -462,6 +468,9 @@
 			goto crc_err;
 		FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
 			   "data_len %x\n", len, offset, fsp->data_len);
+
+		/* Data is corrupted indicate scsi-ml should retry */
+		host_bcode = FC_DATA_OVRRUN;
 		goto err;
 	}
 	if (offset != fsp->xfer_len)
@@ -498,8 +507,10 @@
 			 * If so, we need to retry the entire operation.
 			 * Otherwise, ignore it.
 			 */
-			if (fsp->state & FC_SRB_DISCONTIG)
+			if (fsp->state & FC_SRB_DISCONTIG) {
+				host_bcode = FC_CRC_ERROR;
 				goto err;
+			}
 			return;
 		}
 	}
@@ -517,7 +528,7 @@
 		fc_fcp_complete_locked(fsp);
 	return;
 err:
-	fc_fcp_recovery(fsp);
+	fc_fcp_recovery(fsp, host_bcode);
 }
 
 /**
@@ -962,7 +973,13 @@
 		}
 		lport->tt.exch_done(seq);
 	}
-	fc_io_compl(fsp);
+	/*
+	 * Some resets driven by SCSI are not I/Os and do not have
+	 * SCSI commands associated with the requests. We should not
+	 * call I/O completion if we do not have a SCSI command.
+	 */
+	if (fsp->cmd)
+		fc_io_compl(fsp);
 }
 
 /**
@@ -1073,6 +1090,21 @@
 }
 
 /**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rpriv;
+
+	rport = fsp->rport;
+	rpriv = rport->dd_data;
+
+	return rpriv->e_d_tov + REC_TOV_CONST;
+}
+
+/**
  * fc_fcp_cmd_send() - Send a FCP command
  * @lport: The local port to send the command on
  * @fsp:   The FCP packet the command is on
@@ -1089,6 +1121,7 @@
 	struct fc_rport_libfc_priv *rpriv;
 	const size_t len = sizeof(fsp->cdb_cmd);
 	int rc = 0;
+	unsigned int rec_tov;
 
 	if (fc_fcp_lock_pkt(fsp))
 		return 0;
@@ -1119,10 +1152,13 @@
 	fsp->seq_ptr = seq;
 	fc_fcp_pkt_hold(fsp);	/* hold for fc_fcp_pkt_destroy */
 
+	rec_tov = get_fsp_rec_tov(fsp);
+
 	setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
-	fc_fcp_timer_set(fsp,
-			 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
-			 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
+
+	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+		fc_fcp_timer_set(fsp, rec_tov);
+
 unlock:
 	fc_fcp_unlock_pkt(fsp);
 	return rc;
@@ -1197,13 +1233,16 @@
 {
 	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
 	struct fc_lport *lport = fsp->lp;
+	unsigned int rec_tov;
+
 	if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
 		if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
 			return;
 		if (fc_fcp_lock_pkt(fsp))
 			return;
+		rec_tov = get_fsp_rec_tov(fsp);
 		setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
-		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		fc_fcp_timer_set(fsp, rec_tov);
 		fc_fcp_unlock_pkt(fsp);
 	}
 }
@@ -1211,7 +1250,7 @@
 /**
  * fc_lun_reset() - Send a LUN RESET command to a device
  *		    and wait for the reply
- * @lport: The local port to sent the comand on
+ * @lport: The local port to sent the command on
  * @fsp:   The FCP packet that identifies the LUN to be reset
  * @id:	   The SCSI command ID
  * @lun:   The LUN ID to be reset
@@ -1282,27 +1321,27 @@
 		 *
 		 * scsi-eh will escalate for when either happens.
 		 */
-		return;
+		goto out;
 	}
 
 	if (fc_fcp_lock_pkt(fsp))
-		return;
+		goto out;
 
 	/*
 	 * raced with eh timeout handler.
 	 */
-	if (!fsp->seq_ptr || !fsp->wait_for_comp) {
-		spin_unlock_bh(&fsp->scsi_pkt_lock);
-		return;
-	}
+	if (!fsp->seq_ptr || !fsp->wait_for_comp)
+		goto out_unlock;
 
 	fh = fc_frame_header_get(fp);
 	if (fh->fh_type != FC_TYPE_BLS)
 		fc_fcp_resp(fsp, fp);
 	fsp->seq_ptr = NULL;
 	fsp->lp->tt.exch_done(seq);
-	fc_frame_free(fp);
+out_unlock:
 	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_frame_free(fp);
 }
 
 /**
@@ -1341,13 +1380,10 @@
 
 	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
 		fc_fcp_rec(fsp);
-	else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
-			       jiffies))
-		fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
 	else if (fsp->state & FC_SRB_RCV_STATUS)
 		fc_fcp_complete_locked(fsp);
 	else
-		fc_fcp_recovery(fsp);
+		fc_fcp_recovery(fsp, FC_TIMED_OUT);
 	fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
 unlock:
 	fc_fcp_unlock_pkt(fsp);
@@ -1373,6 +1409,7 @@
 		fc_fcp_complete_locked(fsp);
 		return;
 	}
+
 	fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
 	if (!fp)
 		goto retry;
@@ -1383,15 +1420,15 @@
 		       FC_FCTL_REQ, 0);
 	if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
 				 fc_fcp_rec_resp, fsp,
-				 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
+				 2 * lport->r_a_tov)) {
 		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
 		return;
 	}
 retry:
 	if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
-		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
 	else
-		fc_fcp_recovery(fsp);
+		fc_fcp_recovery(fsp, FC_TIMED_OUT);
 }
 
 /**
@@ -1445,7 +1482,6 @@
 			 * making progress.
 			 */
 			rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
-			fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
 			break;
 		case ELS_RJT_LOGIC:
 		case ELS_RJT_UNAB:
@@ -1460,7 +1496,7 @@
 				fc_fcp_retry_cmd(fsp);
 				break;
 			}
-			fc_fcp_recovery(fsp);
+			fc_fcp_recovery(fsp, FC_ERROR);
 			break;
 		}
 	} else if (opcode == ELS_LS_ACC) {
@@ -1498,12 +1534,12 @@
 			}
 			fc_fcp_srr(fsp, r_ctl, offset);
 		} else if (e_stat & ESB_ST_SEQ_INIT) {
-
+			unsigned int rec_tov = get_fsp_rec_tov(fsp);
 			/*
 			 * The remote port has the initiative, so just
 			 * keep waiting for it to complete.
 			 */
-			fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+			fc_fcp_timer_set(fsp, rec_tov);
 		} else {
 
 			/*
@@ -1575,7 +1611,7 @@
 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 			fc_fcp_rec(fsp);
 		else
-			fc_fcp_recovery(fsp);
+			fc_fcp_recovery(fsp, FC_ERROR);
 		break;
 	}
 	fc_fcp_unlock_pkt(fsp);
@@ -1587,9 +1623,9 @@
  * fc_fcp_recovery() - Handler for fcp_pkt recovery
  * @fsp: The FCP pkt that needs to be aborted
  */
-static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
+static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
 {
-	fsp->status_code = FC_CMD_RECOVERY;
+	fsp->status_code = code;
 	fsp->cdb_status = 0;
 	fsp->io_status = 0;
 	/*
@@ -1616,6 +1652,7 @@
 	struct fcp_srr *srr;
 	struct fc_frame *fp;
 	u8 cdb_op;
+	unsigned int rec_tov;
 
 	rport = fsp->rport;
 	rpriv = rport->dd_data;
@@ -1640,8 +1677,9 @@
 		       rpriv->local_port->port_id, FC_TYPE_FCP,
 		       FC_FCTL_REQ, 0);
 
+	rec_tov = get_fsp_rec_tov(fsp);
 	seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
-				      fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
+				      fsp, jiffies_to_msecs(rec_tov));
 	if (!seq)
 		goto retry;
 
@@ -1665,6 +1703,7 @@
 {
 	struct fc_fcp_pkt *fsp = arg;
 	struct fc_frame_header *fh;
+	unsigned int rec_tov;
 
 	if (IS_ERR(fp)) {
 		fc_fcp_srr_error(fsp, fp);
@@ -1691,11 +1730,12 @@
 	switch (fc_frame_payload_op(fp)) {
 	case ELS_LS_ACC:
 		fsp->recov_retry = 0;
-		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		rec_tov = get_fsp_rec_tov(fsp);
+		fc_fcp_timer_set(fsp, rec_tov);
 		break;
 	case ELS_LS_RJT:
 	default:
-		fc_fcp_recovery(fsp);
+		fc_fcp_recovery(fsp, FC_ERROR);
 		break;
 	}
 	fc_fcp_unlock_pkt(fsp);
@@ -1721,7 +1761,7 @@
 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 			fc_fcp_rec(fsp);
 		else
-			fc_fcp_recovery(fsp);
+			fc_fcp_recovery(fsp, FC_TIMED_OUT);
 		break;
 	case -FC_EX_CLOSED:			/* e.g., link failure */
 		/* fall through */
@@ -1820,19 +1860,17 @@
 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		fsp->req_flags = FC_SRB_READ;
 		stats->InputRequests++;
-		stats->InputMegabytes = fsp->data_len;
+		stats->InputBytes += fsp->data_len;
 	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
 		fsp->req_flags = FC_SRB_WRITE;
 		stats->OutputRequests++;
-		stats->OutputMegabytes = fsp->data_len;
+		stats->OutputBytes += fsp->data_len;
 	} else {
 		fsp->req_flags = 0;
 		stats->ControlRequests++;
 	}
 	put_cpu();
 
-	fsp->tgt_flags = rpriv->flags;
-
 	init_timer(&fsp->timer);
 	fsp->timer.data = (unsigned long)fsp;
 
@@ -1946,18 +1984,29 @@
 		break;
 	case FC_CMD_ABORTED:
 		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
-			   "due to FC_CMD_ABORTED\n");
+			  "due to FC_CMD_ABORTED\n");
 		sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
 		break;
-	case FC_CMD_RECOVERY:
-		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
-		break;
 	case FC_CMD_RESET:
+		FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
+			   "due to FC_CMD_RESET\n");
 		sc_cmd->result = (DID_RESET << 16);
 		break;
 	case FC_HRD_ERROR:
+		FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
+			   "due to FC_HRD_ERROR\n");
 		sc_cmd->result = (DID_NO_CONNECT << 16);
 		break;
+	case FC_CRC_ERROR:
+		FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml "
+			   "due to FC_CRC_ERROR\n");
+		sc_cmd->result = (DID_PARITY << 16);
+		break;
+	case FC_TIMED_OUT:
+		FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
+			   "due to FC_TIMED_OUT\n");
+		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
+		break;
 	default:
 		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
 			   "due to unknown error\n");
@@ -2004,7 +2053,7 @@
 	fsp = CMD_SP(sc_cmd);
 	if (!fsp) {
 		/* command completed while scsi eh was setting up */
-		spin_unlock_irqrestore(lport->host->host_lock, flags);
+		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 		return SUCCESS;
 	}
 	/* grab a ref so the fsp and sc_cmd cannot be relased from under us */
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index 16d2162..eea0c35 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -66,9 +66,21 @@
 
 #define FC_FCP_DBG(pkt, fmt, args...)					\
 	FC_CHECK_LOGGING(FC_FCP_LOGGING,				\
-			 printk(KERN_INFO "host%u: fcp: %6.6x: " fmt,	\
+	{								\
+		if ((pkt)->seq_ptr) {					\
+			struct fc_exch *_ep = NULL;			\
+			_ep = fc_seq_exch((pkt)->seq_ptr);		\
+			printk(KERN_INFO "host%u: fcp: %6.6x: "		\
+				"xid %04x-%04x: " fmt,			\
 				(pkt)->lp->host->host_no,		\
-				pkt->rport->port_id, ##args))
+				(pkt)->rport->port_id,			\
+				(_ep)->oxid, (_ep)->rxid, ##args);	\
+		} else {						\
+			printk(KERN_INFO "host%u: fcp: %6.6x: " fmt,	\
+				(pkt)->lp->host->host_no,		\
+				(pkt)->rport->port_id, ##args);		\
+		}							\
+	})
 
 #define FC_EXCH_DBG(exch, fmt, args...)					\
 	FC_CHECK_LOGGING(FC_EXCH_LOGGING,				\
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 9be63ed..c5a10f9 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -288,6 +288,8 @@
 	struct fc_lport *lport = shost_priv(shost);
 	struct timespec v0, v1;
 	unsigned int cpu;
+	u64 fcp_in_bytes = 0;
+	u64 fcp_out_bytes = 0;
 
 	fcoe_stats = &lport->host_stats;
 	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
@@ -310,10 +312,12 @@
 		fcoe_stats->fcp_input_requests += stats->InputRequests;
 		fcoe_stats->fcp_output_requests += stats->OutputRequests;
 		fcoe_stats->fcp_control_requests += stats->ControlRequests;
-		fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
-		fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
+		fcp_in_bytes += stats->InputBytes;
+		fcp_out_bytes += stats->OutputBytes;
 		fcoe_stats->link_failure_count += stats->LinkFailureCount;
 	}
+	fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
+	fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
 	fcoe_stats->lip_count = -1;
 	fcoe_stats->nos_count = -1;
 	fcoe_stats->loss_of_sync_count = -1;
@@ -1703,8 +1707,10 @@
 	info->sg = job->reply_payload.sg_list;
 
 	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-				     NULL, info, tov))
+				     NULL, info, tov)) {
+		kfree(info);
 		return -ECOMM;
+	}
 	return 0;
 }
 
@@ -1762,8 +1768,10 @@
 	info->sg = job->reply_payload.sg_list;
 
 	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-				     NULL, info, tov))
+				     NULL, info, tov)) {
+		kfree(info);
 		return -ECOMM;
+	}
 	return 0;
 }
 
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index a84ef13..a7175ad 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -652,7 +652,7 @@
 	FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
 
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
-		return;
+		goto put;
 
 	mutex_lock(&rdata->rp_mutex);
 
@@ -689,6 +689,7 @@
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
+put:
 	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 	return;
 bad:
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c15fde8..da8b615 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -505,6 +505,7 @@
 	struct iscsi_conn *conn = task->conn;
 	struct iscsi_session *session = conn->session;
 	struct scsi_cmnd *sc = task->sc;
+	int oldstate = task->state;
 
 	ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
 			  task->itt, task->state, task->sc);
@@ -525,10 +526,10 @@
 		/* SCSI eh reuses commands to verify us */
 		sc->SCp.ptr = NULL;
 		/*
-		 * queue command may call this to free the task, but
-		 * not have setup the sc callback
+		 * queue command may call this to free the task, so
+		 * it will decide how to return sc to scsi-ml.
 		 */
-		if (sc->scsi_done)
+		if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ)
 			sc->scsi_done(sc);
 	}
 }
@@ -539,11 +540,12 @@
 }
 EXPORT_SYMBOL_GPL(__iscsi_get_task);
 
-static void __iscsi_put_task(struct iscsi_task *task)
+void __iscsi_put_task(struct iscsi_task *task)
 {
 	if (atomic_dec_and_test(&task->refcount))
 		iscsi_free_task(task);
 }
+EXPORT_SYMBOL_GPL(__iscsi_put_task);
 
 void iscsi_put_task(struct iscsi_task *task)
 {
@@ -571,7 +573,8 @@
 			  task->itt, task->state, task->sc);
 	if (task->state == ISCSI_TASK_COMPLETED ||
 	    task->state == ISCSI_TASK_ABRT_TMF ||
-	    task->state == ISCSI_TASK_ABRT_SESS_RECOV)
+	    task->state == ISCSI_TASK_ABRT_SESS_RECOV ||
+	    task->state == ISCSI_TASK_REQUEUE_SCSIQ)
 		return;
 	WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
 	task->state = state;
@@ -1335,17 +1338,16 @@
 {
 	struct iscsi_conn *conn;
 	struct device *dev;
-	unsigned long flags;
 
-	spin_lock_irqsave(&session->lock, flags);
+	spin_lock_bh(&session->lock);
 	conn = session->leadconn;
 	if (session->state == ISCSI_STATE_TERMINATE || !conn) {
-		spin_unlock_irqrestore(&session->lock, flags);
+		spin_unlock_bh(&session->lock);
 		return;
 	}
 
 	dev = get_device(&conn->cls_conn->dev);
-	spin_unlock_irqrestore(&session->lock, flags);
+	spin_unlock_bh(&session->lock);
 	if (!dev)
 	        return;
 	/*
@@ -1364,17 +1366,16 @@
 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
 {
 	struct iscsi_session *session = conn->session;
-	unsigned long flags;
 
-	spin_lock_irqsave(&session->lock, flags);
+	spin_lock_bh(&session->lock);
 	if (session->state == ISCSI_STATE_FAILED) {
-		spin_unlock_irqrestore(&session->lock, flags);
+		spin_unlock_bh(&session->lock);
 		return;
 	}
 
 	if (conn->stop_stage == 0)
 		session->state = ISCSI_STATE_FAILED;
-	spin_unlock_irqrestore(&session->lock, flags);
+	spin_unlock_bh(&session->lock);
 
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
@@ -1599,27 +1600,23 @@
 	FAILURE_SESSION_NOT_READY,
 };
 
-static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
 {
 	struct iscsi_cls_session *cls_session;
-	struct Scsi_Host *host;
 	struct iscsi_host *ihost;
 	int reason = 0;
 	struct iscsi_session *session;
 	struct iscsi_conn *conn;
 	struct iscsi_task *task = NULL;
 
-	sc->scsi_done = done;
 	sc->result = 0;
 	sc->SCp.ptr = NULL;
 
-	host = sc->device->host;
 	ihost = shost_priv(host);
-	spin_unlock(host->host_lock);
 
 	cls_session = starget_to_session(scsi_target(sc->device));
 	session = cls_session->dd_data;
-	spin_lock(&session->lock);
+	spin_lock_bh(&session->lock);
 
 	reason = iscsi_session_chkready(cls_session);
 	if (reason) {
@@ -1705,25 +1702,21 @@
 	}
 
 	session->queued_cmdsn++;
-	spin_unlock(&session->lock);
-	spin_lock(host->host_lock);
+	spin_unlock_bh(&session->lock);
 	return 0;
 
 prepd_reject:
-	sc->scsi_done = NULL;
-	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 reject:
-	spin_unlock(&session->lock);
+	spin_unlock_bh(&session->lock);
 	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
 			  sc->cmnd[0], reason);
-	spin_lock(host->host_lock);
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
 prepd_fault:
-	sc->scsi_done = NULL;
-	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
 fault:
-	spin_unlock(&session->lock);
+	spin_unlock_bh(&session->lock);
 	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
 			  sc->cmnd[0], reason);
 	if (!scsi_bidi_cmnd(sc))
@@ -1732,12 +1725,9 @@
 		scsi_out(sc)->resid = scsi_out(sc)->length;
 		scsi_in(sc)->resid = scsi_in(sc)->length;
 	}
-	done(sc);
-	spin_lock(host->host_lock);
+	sc->scsi_done(sc);
 	return 0;
 }
-
-DEF_SCSI_QCMD(iscsi_queuecommand)
 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
 
 int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
@@ -1795,9 +1785,9 @@
 				      NULL, 0);
 	if (!task) {
 		spin_unlock_bh(&session->lock);
+		iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
 		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 		spin_lock_bh(&session->lock);
-		ISCSI_DBG_EH(session, "tmf exec failure\n");
 		return -EPERM;
 	}
 	conn->tmfcmd_pdus_cnt++;
@@ -2202,7 +2192,7 @@
 		goto success_unlocked;
 	case TMF_TIMEDOUT:
 		spin_unlock_bh(&session->lock);
-		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto failed_unlocked;
 	case TMF_NOT_FOUND:
 		if (!sc->SCp.ptr) {
@@ -2289,7 +2279,7 @@
 		break;
 	case TMF_TIMEDOUT:
 		spin_unlock_bh(&session->lock);
-		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
 		conn->tmf_state = TMF_INITIAL;
@@ -2370,7 +2360,7 @@
 	 * we drop the lock here but the leadconn cannot be destoyed while
 	 * we are in the scsi eh
 	 */
-	iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+	iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 
 	ISCSI_DBG_EH(session, "wait for relogin\n");
 	wait_event_interruptible(conn->ehwait,
@@ -2452,7 +2442,7 @@
 		break;
 	case TMF_TIMEDOUT:
 		spin_unlock_bh(&session->lock);
-		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
 		goto done;
 	default:
 		conn->tmf_state = TMF_INITIAL;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index fe8b74c..5257fdf 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -28,6 +28,17 @@
 #include <scsi/scsi_transport_sas.h>
 #include "../scsi_sas_internal.h"
 
+static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
+{
+	struct sas_ha_struct *sas_ha = phy->ha;
+
+	if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
+		   SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports &&
+	     memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0))
+		return false;
+	return true;
+}
+
 /**
  * sas_form_port -- add this phy to a port
  * @phy: the phy of interest
@@ -45,8 +56,7 @@
 	unsigned long flags;
 
 	if (port) {
-		if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
-			   SAS_ADDR_SIZE) != 0)
+		if (!phy_is_wideport_member(port, phy))
 			sas_deform_port(phy);
 		else {
 			SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
@@ -62,9 +72,7 @@
 		port = sas_ha->sas_port[i];
 		spin_lock(&port->phy_list_lock);
 		if (*(u64 *) port->sas_addr &&
-		    memcmp(port->attached_sas_addr,
-			   phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 &&
-		    port->num_phys > 0) {
+		    phy_is_wideport_member(port, phy) && port->num_phys > 0) {
 			/* wide port */
 			SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
 				    port->id);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 29251fa..5815cbe 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -211,8 +211,7 @@
 			unsigned long flags;
 
 			spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
-			res = ata_sas_queuecmd(cmd, scsi_done,
-					       dev->sata_dev.ap);
+			res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
 			spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
 			goto out;
 		}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 196de40..746dd3d 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -464,12 +464,29 @@
 #define UNSOL_VALID	0x00000001
 };
 
+#define LPFC_USER_LINK_SPEED_AUTO	0	/* auto select (default)*/
+#define LPFC_USER_LINK_SPEED_1G		1	/* 1 Gigabaud */
+#define LPFC_USER_LINK_SPEED_2G		2	/* 2 Gigabaud */
+#define LPFC_USER_LINK_SPEED_4G		4	/* 4 Gigabaud */
+#define LPFC_USER_LINK_SPEED_8G		8	/* 8 Gigabaud */
+#define LPFC_USER_LINK_SPEED_10G	10	/* 10 Gigabaud */
+#define LPFC_USER_LINK_SPEED_16G	16	/* 16 Gigabaud */
+#define LPFC_USER_LINK_SPEED_MAX	LPFC_USER_LINK_SPEED_16G
+#define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \
+				     (1 << LPFC_USER_LINK_SPEED_10G) | \
+				     (1 << LPFC_USER_LINK_SPEED_8G) | \
+				     (1 << LPFC_USER_LINK_SPEED_4G) | \
+				     (1 << LPFC_USER_LINK_SPEED_2G) | \
+				     (1 << LPFC_USER_LINK_SPEED_1G) | \
+				     (1 << LPFC_USER_LINK_SPEED_AUTO))
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+
 struct lpfc_hba {
 	/* SCSI interface function jump table entries */
 	int (*lpfc_new_scsi_buf)
 		(struct lpfc_vport *, int);
 	struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
-		(struct lpfc_hba *);
+		(struct lpfc_hba *, struct lpfc_nodelist *);
 	int (*lpfc_scsi_prep_dma_buf)
 		(struct lpfc_hba *, struct lpfc_scsi_buf *);
 	void (*lpfc_scsi_unprep_dma_buf)
@@ -545,7 +562,7 @@
 	uint32_t hba_flag;	/* hba generic flags */
 #define HBA_ERATT_HANDLED	0x1 /* This flag is set when eratt handled */
 #define DEFER_ERATT		0x2 /* Deferred error attention in progress */
-#define HBA_FCOE_SUPPORT	0x4 /* HBA function supports FCOE */
+#define HBA_FCOE_MODE		0x4 /* HBA function in FCoE Mode */
 #define HBA_SP_QUEUE_EVT	0x8 /* Slow-path qevt posted to worker thread*/
 #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
 #define FCP_XRI_ABORT_EVENT	0x20
@@ -557,6 +574,7 @@
 #define HBA_FIP_SUPPORT		0x800 /* FIP support in HBA */
 #define HBA_AER_ENABLED		0x1000 /* AER enabled with HBA */
 #define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
+#define HBA_RRQ_ACTIVE		0x4000 /* process the rrq active list */
 	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
 	struct lpfc_dmabuf slim2p;
 
@@ -606,6 +624,7 @@
 	/* HBA Config Parameters */
 	uint32_t cfg_ack0;
 	uint32_t cfg_enable_npiv;
+	uint32_t cfg_enable_rrq;
 	uint32_t cfg_topology;
 	uint32_t cfg_link_speed;
 	uint32_t cfg_cr_delay;
@@ -716,6 +735,7 @@
 	uint32_t total_scsi_bufs;
 	struct list_head lpfc_iocb_list;
 	uint32_t total_iocbq_bufs;
+	struct list_head active_rrq_list;
 	spinlock_t hbalock;
 
 	/* pci_mem_pools */
@@ -728,6 +748,7 @@
 
 	mempool_t *mbox_mem_pool;
 	mempool_t *nlp_mem_pool;
+	mempool_t *rrq_pool;
 
 	struct fc_host_statistics link_stats;
 	enum intr_type_t intr_type;
@@ -784,6 +805,7 @@
 	unsigned long skipped_hb;
 	struct timer_list hb_tmofunc;
 	uint8_t hb_outstanding;
+	struct timer_list rrq_tmr;
 	enum hba_temp_state over_temp_state;
 	/* ndlp reference management */
 	spinlock_t ndlp_lock;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c1cbec0..3512abb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -52,10 +52,6 @@
 #define LPFC_MIN_DEVLOSS_TMO 1
 #define LPFC_MAX_DEVLOSS_TMO 255
 
-#define LPFC_MAX_LINK_SPEED 8
-#define LPFC_LINK_SPEED_BITMAP 0x00000117
-#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
-
 /**
  * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
  * @incr: integer to convert.
@@ -463,7 +459,7 @@
 		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
 			len += snprintf(buf + len, PAGE_SIZE-len,
 					"   Menlo Maint Mode\n");
-		else if (phba->fc_topology == TOPOLOGY_LOOP) {
+		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			if (vport->fc_flag & FC_PUBLIC_LOOP)
 				len += snprintf(buf + len, PAGE_SIZE-len,
 						"   Public Loop\n");
@@ -1339,7 +1335,7 @@
 }
 
 /**
- * lpfc_param_init - Intializes a cfg attribute
+ * lpfc_param_init - Initializes a cfg attribute
  *
  * Description:
  * Macro that given an attr e.g. hba_queue_depth expands
@@ -1981,6 +1977,13 @@
 lpfc_param_init(enable_npiv, 1, 0, 1);
 static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
 
+int lpfc_enable_rrq;
+module_param(lpfc_enable_rrq, int, 0);
+MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
+lpfc_param_show(enable_rrq);
+lpfc_param_init(enable_rrq, 0, 0, 1);
+static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+
 /*
 # lpfc_suppress_link_up:  Bring link up at initialization
 #            0x0  = bring link up (issue MBX_INIT_LINK)
@@ -2837,14 +2840,8 @@
 /*
 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
 # connection.
-#       0  = auto select (default)
-#       1  = 1 Gigabaud
-#       2  = 2 Gigabaud
-#       4  = 4 Gigabaud
-#       8  = 8 Gigabaud
-# Value range is [0,8]. Default value is 0.
+# Value range is [0,16]. Default value is 0.
 */
-
 /**
  * lpfc_link_speed_set - Set the adapters link speed
  * @phba: lpfc_hba pointer.
@@ -2869,7 +2866,7 @@
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
 	struct lpfc_hba   *phba = vport->phba;
-	int val = 0;
+	int val = LPFC_USER_LINK_SPEED_AUTO;
 	int nolip = 0;
 	const char *val_buf = buf;
 	int err;
@@ -2885,15 +2882,20 @@
 	if (sscanf(val_buf, "%i", &val) != 1)
 		return -EINVAL;
 
-	if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
-		((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
-		((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
-		((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
-		((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)))
+	if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2879 lpfc_link_speed attribute cannot be set "
+				"to %d. Speed is not supported by this port.\n",
+				val);
 		return -EINVAL;
-
-	if ((val >= 0 && val <= 8)
-		&& (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
+	}
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
 		prev_val = phba->cfg_link_speed;
 		phba->cfg_link_speed = val;
 		if (nolip)
@@ -2906,11 +2908,9 @@
 		} else
 			return strlen(buf);
 	}
-
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-		"%d:0469 lpfc_link_speed attribute cannot be set to %d, "
-		"allowed range is [0, 8]\n",
-		phba->brd_no, val);
+		"0469 lpfc_link_speed attribute cannot be set to %d, "
+		"allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
 	return -EINVAL;
 }
 
@@ -2938,8 +2938,8 @@
 static int
 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
 {
-	if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED)
-		&& (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
+	if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
+	    (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
 		phba->cfg_link_speed = val;
 		return 0;
 	}
@@ -2947,12 +2947,12 @@
 			"0405 lpfc_link_speed attribute cannot "
 			"be set to %d, allowed values are "
 			"["LPFC_LINK_SPEED_STRING"]\n", val);
-	phba->cfg_link_speed = 0;
+	phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 	return -EINVAL;
 }
 
 static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
-		lpfc_link_speed_show, lpfc_link_speed_store);
+		   lpfc_link_speed_show, lpfc_link_speed_store);
 
 /*
 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
@@ -3305,12 +3305,12 @@
 LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
 
 /*
-# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer..
+# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
 #       0  = HBA Heartbeat disabled
 #       1  = HBA Heartbeat enabled (default)
 # Value range is [0,1]. Default value is 1.
 */
-LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
+LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
 
 /*
 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
@@ -3401,6 +3401,7 @@
 	&dev_attr_lpfc_fdmi_on,
 	&dev_attr_lpfc_max_luns,
 	&dev_attr_lpfc_enable_npiv,
+	&dev_attr_lpfc_enable_rrq,
 	&dev_attr_nport_evt_cnt,
 	&dev_attr_board_mode,
 	&dev_attr_max_vpi,
@@ -3798,8 +3799,7 @@
 			}
 			break;
 		case MBX_READ_SPARM64:
-		case MBX_READ_LA:
-		case MBX_READ_LA64:
+		case MBX_READ_TOPOLOGY:
 		case MBX_REG_LOGIN:
 		case MBX_REG_LOGIN64:
 		case MBX_CONFIG_PORT:
@@ -3989,7 +3989,7 @@
 	if (vport->port_type == LPFC_NPIV_PORT) {
 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
 	} else if (lpfc_is_link_up(phba)) {
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			if (vport->fc_flag & FC_PUBLIC_LOOP)
 				fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
 			else
@@ -4058,23 +4058,26 @@
 
 	if (lpfc_is_link_up(phba)) {
 		switch(phba->fc_linkspeed) {
-			case LA_1GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+		case LPFC_LINK_SPEED_1GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
 			break;
-			case LA_2GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
+		case LPFC_LINK_SPEED_2GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
 			break;
-			case LA_4GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
+		case LPFC_LINK_SPEED_4GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
 			break;
-			case LA_8GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
+		case LPFC_LINK_SPEED_8GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 			break;
-			case LA_10GHZ_LINK:
-				fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+		case LPFC_LINK_SPEED_10GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
 			break;
-			default:
-				fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+		case LPFC_LINK_SPEED_16GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+			break;
+		default:
+			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			break;
 		}
 	} else
@@ -4097,7 +4100,7 @@
 	spin_lock_irq(shost->host_lock);
 
 	if ((vport->fc_flag & FC_FABRIC) ||
-	    ((phba->fc_topology == TOPOLOGY_LOOP) &&
+	    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
 	     (vport->fc_flag & FC_PUBLIC_LOOP)))
 		node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
 	else
@@ -4208,11 +4211,11 @@
 	hs->invalid_crc_count -= lso->invalid_crc_count;
 	hs->error_frames -= lso->error_frames;
 
-	if (phba->hba_flag & HBA_FCOE_SUPPORT) {
+	if (phba->hba_flag & HBA_FCOE_MODE) {
 		hs->lip_count = -1;
 		hs->nos_count = (phba->link_events >> 1);
 		hs->nos_count -= lso->link_events;
-	} else if (phba->fc_topology == TOPOLOGY_LOOP) {
+	} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		hs->lip_count = (phba->fc_eventTag >> 1);
 		hs->lip_count -= lso->link_events;
 		hs->nos_count = -1;
@@ -4303,7 +4306,7 @@
 	lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
 	lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
 	lso->error_frames = pmb->un.varRdLnk.crcCnt;
-	if (phba->hba_flag & HBA_FCOE_SUPPORT)
+	if (phba->hba_flag & HBA_FCOE_MODE)
 		lso->link_events = (phba->link_events >> 1);
 	else
 		lso->link_events = (phba->fc_eventTag >> 1);
@@ -4615,6 +4618,7 @@
 	lpfc_link_speed_init(phba, lpfc_link_speed);
 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
 	lpfc_use_msi_init(phba, lpfc_use_msi);
 	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
 	lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7260c3a..0dd43bb 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -162,7 +162,6 @@
 			struct lpfc_iocbq *cmdiocbq,
 			struct lpfc_iocbq *rspiocbq)
 {
-	unsigned long iflags;
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
@@ -173,9 +172,10 @@
 	int rc = 0;
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context1;
+	dd_data = cmdiocbq->context2;
 	if (!dd_data) {
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		lpfc_sli_release_iocbq(phba, cmdiocbq);
 		return;
 	}
 
@@ -183,17 +183,9 @@
 	job = iocb->set_job;
 	job->dd_data = NULL; /* so timeout handler does not reply */
 
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
-	if (cmdiocbq->context2 && rspiocbq)
-		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
-		       &rspiocbq->iocb, sizeof(IOCB_t));
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
-
 	bmp = iocb->bmp;
-	rspiocbq = iocb->rspiocbq;
 	rsp = &rspiocbq->iocb;
-	ndlp = iocb->ndlp;
+	ndlp = cmdiocbq->context1;
 
 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -220,7 +212,6 @@
 			rsp->un.genreq64.bdl.bdeSize;
 
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-	lpfc_sli_release_iocbq(phba, rspiocbq);
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
 	kfree(bmp);
@@ -247,9 +238,7 @@
 	struct ulp_bde64 *bpl = NULL;
 	uint32_t timeout;
 	struct lpfc_iocbq *cmdiocbq = NULL;
-	struct lpfc_iocbq *rspiocbq = NULL;
 	IOCB_t *cmd;
-	IOCB_t *rsp;
 	struct lpfc_dmabuf *bmp = NULL;
 	int request_nseg;
 	int reply_nseg;
@@ -296,17 +285,10 @@
 	}
 
 	cmd = &cmdiocbq->iocb;
-	rspiocbq = lpfc_sli_get_iocbq(phba);
-	if (!rspiocbq) {
-		rc = -ENOMEM;
-		goto free_cmdiocbq;
-	}
-
-	rsp = &rspiocbq->iocb;
 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
 	if (!bmp->virt) {
 		rc = -ENOMEM;
-		goto free_rspiocbq;
+		goto free_cmdiocbq;
 	}
 
 	INIT_LIST_HEAD(&bmp->list);
@@ -358,14 +340,12 @@
 	cmd->ulpTimeout = timeout;
 
 	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
-	cmdiocbq->context1 = dd_data;
-	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->context1 = ndlp;
+	cmdiocbq->context2 = dd_data;
 	dd_data->type = TYPE_IOCB;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
-	dd_data->context_un.iocb.rspiocbq = rspiocbq;
 	dd_data->context_un.iocb.set_job = job;
 	dd_data->context_un.iocb.bmp = bmp;
-	dd_data->context_un.iocb.ndlp = ndlp;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		creg_val = readl(phba->HCregaddr);
@@ -391,8 +371,6 @@
 
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 
-free_rspiocbq:
-	lpfc_sli_release_iocbq(phba, rspiocbq);
 free_cmdiocbq:
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 free_bmp:
@@ -1220,7 +1198,7 @@
 	int rc = 0;
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context1;
+	dd_data = cmdiocbq->context2;
 	/* normal completion and timeout crossed paths, already done */
 	if (!dd_data) {
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -1369,8 +1347,8 @@
 	ctiocb->context3 = bmp;
 
 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
-	ctiocb->context1 = dd_data;
-	ctiocb->context2 = NULL;
+	ctiocb->context2 = dd_data;
+	ctiocb->context1 = ndlp;
 	dd_data->type = TYPE_IOCB;
 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
 	dd_data->context_un.iocb.rspiocbq = NULL;
@@ -1641,7 +1619,7 @@
  * This function obtains a remote port login id so the diag loopback test
  * can send and receive its own unsolicited CT command.
  **/
-static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
+static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
 {
 	LPFC_MBOXQ_t *mbox;
 	struct lpfc_dmabuf *dmabuff;
@@ -1651,10 +1629,14 @@
 	if (!mbox)
 		return -ENOMEM;
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		*rpi = lpfc_sli4_alloc_rpi(phba);
 	status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
-				(uint8_t *)&phba->pport->fc_sparam, mbox, 0);
+			      (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
 	if (status) {
 		mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
 		return -ENOMEM;
 	}
 
@@ -1668,6 +1650,8 @@
 		kfree(dmabuff);
 		if (status != MBX_TIMEOUT)
 			mempool_free(mbox, phba->mbox_mem_pool);
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_free_rpi(phba, *rpi);
 		return -ENODEV;
 	}
 
@@ -1704,8 +1688,9 @@
 			mempool_free(mbox, phba->mbox_mem_pool);
 		return -EIO;
 	}
-
 	mempool_free(mbox, phba->mbox_mem_pool);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, rpi);
 	return 0;
 }
 
@@ -2102,7 +2087,7 @@
 	uint32_t size;
 	uint32_t full_size;
 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
-	uint16_t rpi;
+	uint16_t rpi = 0;
 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
 	IOCB_t *cmd, *rsp;
 	struct lpfc_sli_ct_request *ctreq;
@@ -2162,7 +2147,7 @@
 		goto loopback_test_exit;
 	}
 
-	if (size >= BUF_SZ_4K) {
+	if (full_size >= BUF_SZ_4K) {
 		/*
 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
 		 * then we allocate 64k and re-use that buffer over and over to
@@ -2171,7 +2156,7 @@
 		 * problem with GET_FCPTARGETMAPPING...
 		 */
 		if (size <= (64 * 1024))
-			total_mem = size;
+			total_mem = full_size;
 		else
 			total_mem = 64 * 1024;
 	} else
@@ -2189,7 +2174,6 @@
 	sg_copy_to_buffer(job->request_payload.sg_list,
 				job->request_payload.sg_cnt,
 				ptr, size);
-
 	rc = lpfcdiag_loop_self_reg(phba, &rpi);
 	if (rc)
 		goto loopback_test_exit;
@@ -2601,12 +2585,11 @@
 			phba->wait_4_mlo_maint_flg = 1;
 		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
 			phba->link_flag &= ~LS_LOOPBACK_MODE;
-			phba->fc_topology = TOPOLOGY_PT_PT;
+			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
 		}
 		break;
 	case MBX_READ_SPARM64:
-	case MBX_READ_LA:
-	case MBX_READ_LA64:
+	case MBX_READ_TOPOLOGY:
 	case MBX_REG_LOGIN:
 	case MBX_REG_LOGIN64:
 	case MBX_CONFIG_PORT:
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a5f5a09..17fde52 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -31,7 +31,7 @@
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
 void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
+int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
 void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *);
 void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -40,7 +40,7 @@
 void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
-		 LPFC_MBOXQ_t *, uint32_t);
+		 LPFC_MBOXQ_t *, uint16_t);
 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
@@ -64,7 +64,7 @@
 int lpfc_linkdown(struct lpfc_hba *);
 void lpfc_linkdown_port(struct lpfc_vport *);
 void lpfc_port_link_failure(struct lpfc_vport *);
-void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
 void lpfc_retry_pport_discovery(struct lpfc_hba *);
@@ -121,6 +121,7 @@
 int lpfc_els_chk_latt(struct lpfc_vport *);
 int lpfc_els_abort_flogi(struct lpfc_hba *);
 int lpfc_initial_flogi(struct lpfc_vport *);
+void lpfc_issue_init_vfi(struct lpfc_vport *);
 int lpfc_initial_fdisc(struct lpfc_vport *);
 int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
 int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
@@ -415,5 +416,13 @@
 int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 	struct lpfc_iocbq *, uint32_t);
 uint32_t lpfc_drain_txq(struct lpfc_hba *);
-
-
+void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *);
+int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t);
+void lpfc_handle_rrq_active(struct lpfc_hba *);
+int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
+int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
+	uint16_t, uint16_t, uint16_t);
+void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
+struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
+	uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 463b749..c004fa9 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -48,14 +48,14 @@
 #include "lpfc_vport.h"
 #include "lpfc_debugfs.h"
 
-#define HBA_PORTSPEED_UNKNOWN               0	/* Unknown - transceiver
-						 * incapable of reporting */
-#define HBA_PORTSPEED_1GBIT                 1	/* 1 GBit/sec */
-#define HBA_PORTSPEED_2GBIT                 2	/* 2 GBit/sec */
-#define HBA_PORTSPEED_4GBIT                 8   /* 4 GBit/sec */
-#define HBA_PORTSPEED_8GBIT                16   /* 8 GBit/sec */
-#define HBA_PORTSPEED_10GBIT                4	/* 10 GBit/sec */
-#define HBA_PORTSPEED_NOT_NEGOTIATED        5	/* Speed not established */
+/* FDMI Port Speed definitions */
+#define HBA_PORTSPEED_1GBIT		0x0001	/* 1 GBit/sec */
+#define HBA_PORTSPEED_2GBIT		0x0002	/* 2 GBit/sec */
+#define HBA_PORTSPEED_4GBIT		0x0008	/* 4 GBit/sec */
+#define HBA_PORTSPEED_10GBIT		0x0004	/* 10 GBit/sec */
+#define HBA_PORTSPEED_8GBIT		0x0010	/* 8 GBit/sec */
+#define HBA_PORTSPEED_16GBIT		0x0020	/* 16 GBit/sec */
+#define HBA_PORTSPEED_UNKNOWN		0x0800	/* Unknown */
 
 #define FOURBYTES	4
 
@@ -1593,8 +1593,10 @@
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
 
 			ae->un.SupportSpeed = 0;
+			if (phba->lmt & LMT_16Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
 			if (phba->lmt & LMT_10Gb)
-				ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT;
+				ae->un.SupportSpeed |= HBA_PORTSPEED_10GBIT;
 			if (phba->lmt & LMT_8Gb)
 				ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT;
 			if (phba->lmt & LMT_4Gb)
@@ -1612,24 +1614,26 @@
 			ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED);
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
 			switch(phba->fc_linkspeed) {
-				case LA_1GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
+			case LPFC_LINK_SPEED_1GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_1GBIT;
 				break;
-				case LA_2GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
+			case LPFC_LINK_SPEED_2GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_2GBIT;
 				break;
-				case LA_4GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
+			case LPFC_LINK_SPEED_4GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_4GBIT;
 				break;
-				case LA_8GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
+			case LPFC_LINK_SPEED_8GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
 				break;
-				case LA_10GHZ_LINK:
-					ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+			case LPFC_LINK_SPEED_10GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
 				break;
-				default:
-					ae->un.PortSpeed =
-						HBA_PORTSPEED_UNKNOWN;
+			case LPFC_LINK_SPEED_16GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
+				break;
+			default:
+				ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
 				break;
 			}
 			pab->ab.EntryCnt++;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 7cae69d..1d84b63 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -68,6 +68,12 @@
 	} un;
 };
 
+#define LPFC_SLI4_MAX_XRI	1024	/* Used to make the ndlp's xri_bitmap */
+#define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG)
+struct lpfc_node_rrqs {
+	unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
+};
+
 struct lpfc_nodelist {
 	struct list_head nlp_listp;
 	struct lpfc_name nlp_portname;
@@ -110,8 +116,19 @@
 	atomic_t cmd_pending;
 	uint32_t cmd_qdepth;
 	unsigned long last_change_time;
+	struct lpfc_node_rrqs active_rrqs;
 	struct lpfc_scsicmd_bkt *lat_data;	/* Latency data */
 };
+struct lpfc_node_rrq {
+	struct list_head list;
+	uint16_t xritag;
+	uint16_t send_rrq;
+	uint16_t rxid;
+	uint32_t         nlp_DID;		/* FC D_ID of entry */
+	struct lpfc_vport *vport;
+	struct lpfc_nodelist *ndlp;
+	unsigned long rrq_stop_time;
+};
 
 /* Defines for nlp_flag (uint32) */
 #define NLP_IGNR_REG_CMPL  0x00000001 /* Rcvd rscn before we cmpl reg login */
@@ -136,7 +153,7 @@
 #define NLP_NODEV_REMOVE   0x08000000	/* Defer removal till discovery ends */
 #define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
 #define NLP_SC_REQ         0x20000000	/* Target requires authentication */
-#define NLP_RPI_VALID      0x80000000	/* nlp_rpi is valid */
+#define NLP_RPI_REGISTERED 0x80000000	/* nlp_rpi is valid */
 
 /* ndlp usage management macros */
 #define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 884f4d3..c62d567 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -375,7 +375,8 @@
 		err = 4;
 		goto fail;
 	}
-	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
+	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
+			  ndlp->nlp_rpi);
 	if (rc) {
 		err = 5;
 		goto fail_free_mbox;
@@ -523,7 +524,7 @@
 	phba->fc_edtovResol = sp->cmn.edtovResolution;
 	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
 
-	if (phba->fc_topology == TOPOLOGY_LOOP) {
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_PUBLIC_LOOP;
 		spin_unlock_irq(shost->host_lock);
@@ -832,6 +833,12 @@
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
 			goto out;
 
+		/* FLOGI failure */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpTimeout);
+
 		/* FLOGI failed, so there is no fabric */
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -843,13 +850,16 @@
 		 */
 		if (phba->alpa_map[0] == 0) {
 			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
+			if ((phba->sli_rev == LPFC_SLI_REV4) &&
+			    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+			     (vport->fc_prevDID != vport->fc_myDID))) {
+				if (vport->fc_flag & FC_VFI_REGISTERED)
+					lpfc_sli4_unreg_all_rpis(vport);
+				lpfc_issue_reg_vfi(vport);
+				lpfc_nlp_put(ndlp);
+				goto out;
+			}
 		}
-
-		/* FLOGI failure */
-		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
-				 irsp->ulpStatus, irsp->un.ulpWord[4],
-				 irsp->ulpTimeout);
 		goto flogifail;
 	}
 	spin_lock_irq(shost->host_lock);
@@ -879,7 +889,7 @@
 		 */
 		if (sp->cmn.fPort)
 			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
-		else if (!(phba->hba_flag & HBA_FCOE_SUPPORT))
+		else if (!(phba->hba_flag & HBA_FCOE_MODE))
 			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
 		else {
 			lpfc_printf_vlog(vport, KERN_ERR,
@@ -1014,7 +1024,9 @@
 	if (sp->cmn.fcphHigh < FC_PH3)
 		sp->cmn.fcphHigh = FC_PH3;
 
-	if  (phba->sli_rev == LPFC_SLI_REV4) {
+	if  ((phba->sli_rev == LPFC_SLI_REV4) &&
+	     (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	      LPFC_SLI_INTF_IF_TYPE_0)) {
 		elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
 		elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
 		/* FLOGI needs to be 3 for WQE FCFI */
@@ -1027,7 +1039,7 @@
 		icmd->ulpCt_l = 0;
 	}
 
-	if (phba->fc_topology != TOPOLOGY_LOOP) {
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
 		icmd->un.elsreq64.myID = 0;
 		icmd->un.elsreq64.fl = 1;
 	}
@@ -1281,6 +1293,7 @@
 	uint32_t rc, keepDID = 0;
 	int  put_node;
 	int  put_rport;
+	struct lpfc_node_rrqs rrq;
 
 	/* Fabric nodes can have the same WWPN so we don't bother searching
 	 * by WWPN.  Just return the ndlp that was given to us.
@@ -1298,6 +1311,7 @@
 
 	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
 		return ndlp;
+	memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
 
 	if (!new_ndlp) {
 		rc = memcmp(&ndlp->nlp_portname, name,
@@ -1318,12 +1332,25 @@
 		if (!new_ndlp)
 			return ndlp;
 		keepDID = new_ndlp->nlp_DID;
-	} else
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&rrq.xri_bitmap,
+				&new_ndlp->active_rrqs.xri_bitmap,
+				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	} else {
 		keepDID = new_ndlp->nlp_DID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&rrq.xri_bitmap,
+				&new_ndlp->active_rrqs.xri_bitmap,
+				sizeof(new_ndlp->active_rrqs.xri_bitmap));
+	}
 
 	lpfc_unreg_rpi(vport, new_ndlp);
 	new_ndlp->nlp_DID = ndlp->nlp_DID;
 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		memcpy(new_ndlp->active_rrqs.xri_bitmap,
+			&ndlp->active_rrqs.xri_bitmap,
+			sizeof(ndlp->active_rrqs.xri_bitmap));
 
 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
 		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -1362,12 +1389,20 @@
 
 		/* Two ndlps cannot have the same did on the nodelist */
 		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&ndlp->active_rrqs.xri_bitmap,
+				&rrq.xri_bitmap,
+				sizeof(ndlp->active_rrqs.xri_bitmap));
 		lpfc_drop_node(vport, ndlp);
 	}
 	else {
 		lpfc_unreg_rpi(vport, ndlp);
 		/* Two ndlps cannot have the same did */
 		ndlp->nlp_DID = keepDID;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			memcpy(&ndlp->active_rrqs.xri_bitmap,
+				&rrq.xri_bitmap,
+				sizeof(ndlp->active_rrqs.xri_bitmap));
 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
 		/* Since we are swapping the ndlp passed in with the new one
 		 * and the did has already been swapped, copy over the
@@ -1428,6 +1463,73 @@
 }
 
 /**
+ * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ * This routine will call the clear rrq function to free the rrq and
+ * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
+ * exist then the clear_rrq is still called because the rrq needs to
+ * be freed.
+ **/
+
+static void
+lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+		    struct lpfc_iocbq *rspiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	IOCB_t *irsp;
+	struct lpfc_nodelist *ndlp;
+	struct lpfc_node_rrq *rrq;
+
+	/* we pass cmdiocb to state machine which needs rspiocb as well */
+	rrq = cmdiocb->context_un.rrq;
+	cmdiocb->context_un.rsp_iocb = rspiocb;
+
+	irsp = &rspiocb->iocb;
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"RRQ cmpl:      status:x%x/x%x did:x%x",
+		irsp->ulpStatus, irsp->un.ulpWord[4],
+		irsp->un.elsreq64.remoteID);
+
+	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2882 RRQ completes to NPort x%x "
+				 "with no ndlp. Data: x%x x%x x%x\n",
+				 irsp->un.elsreq64.remoteID,
+				 irsp->ulpStatus, irsp->un.ulpWord[4],
+				 irsp->ulpIoTag);
+		goto out;
+	}
+
+	/* rrq completes to NPort <nlp_DID> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2880 RRQ completes to NPort x%x "
+			 "Data: x%x x%x x%x x%x x%x\n",
+			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+
+	if (irsp->ulpStatus) {
+		/* Check for retry */
+		/* RRQ failed Don't print the vport to vport rjts */
+		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
+			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
+			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
+			(phba)->pport->cfg_log_verbose & LOG_ELS)
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
+	}
+out:
+	if (rrq)
+		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	lpfc_els_free_iocb(phba, cmdiocb);
+	return;
+}
+/**
  * lpfc_cmpl_els_plogi - Completion callback function for plogi
  * @phba: pointer to lpfc hba data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -2722,7 +2824,7 @@
 			if (cmd == ELS_CMD_FLOGI) {
 				if (PCI_DEVICE_ID_HORNET ==
 					phba->pcidev->device) {
-					phba->fc_topology = TOPOLOGY_LOOP;
+					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
 					phba->pport->fc_myDID = 0;
 					phba->alpa_map[0] = 0;
 					phba->alpa_map[1] = 0;
@@ -2877,7 +2979,7 @@
 		retry = 1;
 
 	if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
-	    (phba->fc_topology != TOPOLOGY_LOOP) &&
+	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
 	    !lpfc_error_lost_link(irsp)) {
 		/* FLOGI retry policy */
 		retry = 1;
@@ -3219,14 +3321,6 @@
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 
-	/*
-	 * This routine is used to register and unregister in previous SLI
-	 * modes.
-	 */
-	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
-	    (phba->sli_rev == LPFC_SLI_REV4))
-		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
-
 	pmb->context1 = NULL;
 	pmb->context2 = NULL;
 
@@ -3904,6 +3998,47 @@
 }
 
 /**
+ * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
+ * @vport: pointer to a virtual N_Port data structure.
+ * @iocb: pointer to the lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return
+ **/
+static void
+lpfc_els_clear_rrq(struct lpfc_vport *vport,
+      struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	uint8_t *pcmd;
+	struct RRQ *rrq;
+	uint16_t rxid;
+	struct lpfc_node_rrq *prrq;
+
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+	pcmd += sizeof(uint32_t);
+	rrq = (struct RRQ *)pcmd;
+	rxid = bf_get(rrq_oxid, rrq);
+
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
+			" x%x x%x\n",
+			bf_get(rrq_did, rrq),
+			bf_get(rrq_oxid, rrq),
+			rxid,
+			iocb->iotag, iocb->iocb.ulpContext);
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
+		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
+	prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+	if (prrq)
+		lpfc_clr_rrq_active(phba, rxid, prrq);
+	return;
+}
+
+/**
  * lpfc_els_rsp_echo_acc - Issue echo acc response
  * @vport: pointer to a virtual N_Port data structure.
  * @data: pointer to echo data to return in the accept.
@@ -4597,7 +4732,7 @@
 
 	lpfc_set_disctmo(vport);
 
-	if (phba->fc_topology == TOPOLOGY_LOOP) {
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		/* We should never receive a FLOGI in loop mode, ignore it */
 		did = icmd->un.elsreq64.remoteID;
 
@@ -4792,6 +4927,8 @@
 		 struct lpfc_nodelist *ndlp)
 {
 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
 }
 
 /**
@@ -4940,7 +5077,7 @@
 	pcmd += sizeof(uint32_t); /* Skip past command */
 	rps_rsp = (RPS_RSP *)pcmd;
 
-	if (phba->fc_topology != TOPOLOGY_LOOP)
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
 		status = 0x10;
 	else
 		status = 0x8;
@@ -5194,6 +5331,97 @@
 	return 0;
 }
 
+/* lpfc_issue_els_rrq - Process an unsolicited rps iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: pointer to a node-list data structure.
+ * @did: DID of the target.
+ * @rrq: Pointer to the rrq struct.
+ *
+ * Build a ELS RRQ command and send it to the target. If the issue_iocb is
+ * Successful the the completion handler will clear the RRQ.
+ *
+ * Return codes
+ *   0 - Successfully sent rrq els iocb.
+ *   1 - Failed to send rrq els iocb.
+ **/
+static int
+lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+			uint32_t did, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct RRQ *els_rrq;
+	IOCB_t *icmd;
+	struct lpfc_iocbq *elsiocb;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int ret;
+
+
+	if (ndlp != rrq->ndlp)
+		ndlp = rrq->ndlp;
+	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+		return 1;
+
+	/* If ndlp is not NULL, we will bump the reference count on it */
+	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
+	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
+				     ELS_CMD_RRQ);
+	if (!elsiocb)
+		return 1;
+
+	icmd = &elsiocb->iocb;
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+
+	/* For RRQ request, remainder of payload is Exchange IDs */
+	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
+	pcmd += sizeof(uint32_t);
+	els_rrq = (struct RRQ *) pcmd;
+
+	bf_set(rrq_oxid, els_rrq, rrq->xritag);
+	bf_set(rrq_rxid, els_rrq, rrq->rxid);
+	bf_set(rrq_did, els_rrq, vport->fc_myDID);
+	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
+	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
+
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+		"Issue RRQ:     did:x%x",
+		did, rrq->xritag, rrq->rxid);
+	elsiocb->context_un.rrq = rrq;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+
+	if (ret == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * lpfc_send_rrq - Sends ELS RRQ if needed.
+ * @phba: pointer to lpfc hba data structure.
+ * @rrq: pointer to the active rrq.
+ *
+ * This routine will call the lpfc_issue_els_rrq if the rrq is
+ * still active for the xri. If this function returns a failure then
+ * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
+ *
+ * Returns 0 Success.
+ *         1 Failure.
+ **/
+int
+lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
+{
+	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
+							rrq->nlp_DID);
+	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
+		return lpfc_issue_els_rrq(rrq->vport, ndlp,
+					 rrq->nlp_DID, rrq);
+	else
+		return 1;
+}
+
 /**
  * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
  * @vport: pointer to a host virtual N_Port data structure.
@@ -5482,7 +5710,7 @@
 		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
 			    sizeof(struct lpfc_name)))) {
 			/* This port has switched fabrics. FLOGI is required */
-			lpfc_initial_flogi(vport);
+			lpfc_issue_init_vfi(vport);
 		} else {
 			/* FAN verified - skip FLOGI */
 			vport->fc_myDID = vport->fc_prevDID;
@@ -6201,7 +6429,7 @@
 			cmd, did, vport->port_state);
 
 		/* Unsupported ELS command, reject */
-		rjt_err = LSRJT_INVALID_CMD;
+		rjt_err = LSRJT_CMD_UNSUPPORTED;
 
 		/* Unknown ELS command <elsCmd> received from NPORT <did> */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6373,7 +6601,7 @@
 	if (!ndlp) {
 		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
 		if (!ndlp) {
-			if (phba->fc_topology == TOPOLOGY_LOOP) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 				lpfc_disc_start(vport);
 				return;
 			}
@@ -6386,7 +6614,7 @@
 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
 		if (!ndlp) {
-			if (phba->fc_topology == TOPOLOGY_LOOP) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 				lpfc_disc_start(vport);
 				return;
 			}
@@ -6408,18 +6636,31 @@
 	}
 
 	if (vport->cfg_fdmi_on) {
-		ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
-					  GFP_KERNEL);
+		/* If this is the first time, allocate an ndlp and initialize
+		 * it. Otherwise, make sure the node is enabled and then do the
+		 * login.
+		 */
+		ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID);
+		if (!ndlp_fdmi) {
+			ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
+						  GFP_KERNEL);
+			if (ndlp_fdmi) {
+				lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
+				ndlp_fdmi->nlp_type |= NLP_FABRIC;
+			} else
+				return;
+		}
+		if (!NLP_CHK_NODE_ACT(ndlp_fdmi))
+			ndlp_fdmi = lpfc_enable_node(vport,
+						     ndlp_fdmi,
+						     NLP_STE_NPR_NODE);
+
 		if (ndlp_fdmi) {
-			lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
-			ndlp_fdmi->nlp_type |= NLP_FABRIC;
 			lpfc_nlp_set_state(vport, ndlp_fdmi,
-				NLP_STE_PLOGI_ISSUE);
-			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
-					     0);
+					   NLP_STE_PLOGI_ISSUE);
+			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0);
 		}
 	}
-	return;
 }
 
 /**
@@ -6497,7 +6738,7 @@
 			spin_unlock_irq(shost->host_lock);
 			if (vport->port_type == LPFC_PHYSICAL_PORT
 				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
-				lpfc_initial_flogi(vport);
+				lpfc_issue_init_vfi(vport);
 			else
 				lpfc_initial_fdisc(vport);
 			break;
@@ -6734,7 +6975,7 @@
 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
 	vport->fc_flag |= FC_FABRIC;
-	if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
 		vport->fc_flag |=  FC_PUBLIC_LOOP;
 	spin_unlock_irq(shost->host_lock);
 
@@ -6844,7 +7085,9 @@
 	icmd->un.elsreq64.myID = 0;
 	icmd->un.elsreq64.fl = 1;
 
-	if  (phba->sli_rev == LPFC_SLI_REV4) {
+	if  ((phba->sli_rev == LPFC_SLI_REV4) &&
+	     (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	      LPFC_SLI_INTF_IF_TYPE_0)) {
 		/* FDISC needs to be 1 for WQE VPI */
 		elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
 		elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
@@ -7351,8 +7594,11 @@
 			  struct sli4_wcqe_xri_aborted *axri)
 {
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+
 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
 	unsigned long iflag = 0;
+	struct lpfc_nodelist *ndlp;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 
 	spin_lock_irqsave(&phba->hbalock, iflag);
@@ -7361,11 +7607,14 @@
 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
 		if (sglq_entry->sli4_xritag == xri) {
 			list_del(&sglq_entry->list);
+			ndlp = sglq_entry->ndlp;
+			sglq_entry->ndlp = NULL;
 			list_add_tail(&sglq_entry->list,
 				&phba->sli4_hba.lpfc_sgl_list);
 			sglq_entry->state = SGL_FREED;
 			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
 
 			/* Check if TXQ queue needs to be serviced */
 			if (pring->txq_cnt)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a5d1695..bb01596 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -607,6 +607,8 @@
 
 	/* Process SLI4 events */
 	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+		if (phba->hba_flag & HBA_RRQ_ACTIVE)
+			lpfc_handle_rrq_active(phba);
 		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
 			lpfc_sli4_fcp_xri_abort_event_proc(phba);
 		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
@@ -966,6 +968,7 @@
 	struct lpfc_vport **vports;
 	int i;
 
+	lpfc_cleanup_wt_rrqs(phba);
 	phba->link_state = LPFC_LINK_UP;
 
 	/* Unblock fabric iocbs if they are blocked */
@@ -1064,7 +1067,7 @@
 
 	mempool_free(pmb, phba->mbox_mem_pool);
 
-	if (phba->fc_topology == TOPOLOGY_LOOP &&
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
 	    vport->fc_flag & FC_PUBLIC_LOOP &&
 	    !(vport->fc_flag & FC_LBIT)) {
 			/* Need to wait for FAN - use discovery timer
@@ -1078,9 +1081,8 @@
 	/* Start discovery by sending a FLOGI. port_state is identically
 	 * LPFC_FLOGI while waiting for FLOGI cmpl
 	 */
-	if (vport->port_state != LPFC_FLOGI) {
+	if (vport->port_state != LPFC_FLOGI)
 		lpfc_initial_flogi(vport);
-	}
 	return;
 
 out:
@@ -1131,7 +1133,7 @@
 	if (vport->port_state != LPFC_FLOGI) {
 		phba->hba_flag |= FCF_RR_INPROG;
 		spin_unlock_irq(&phba->hbalock);
-		lpfc_initial_flogi(vport);
+		lpfc_issue_init_vfi(vport);
 		goto out;
 	}
 	spin_unlock_irq(&phba->hbalock);
@@ -1353,7 +1355,7 @@
 		if (phba->pport->port_state != LPFC_FLOGI) {
 			phba->hba_flag |= FCF_RR_INPROG;
 			spin_unlock_irq(&phba->hbalock);
-			lpfc_initial_flogi(phba->pport);
+			lpfc_issue_init_vfi(phba->pport);
 			return;
 		}
 		spin_unlock_irq(&phba->hbalock);
@@ -2331,7 +2333,7 @@
 				phba->fcf.current_rec.fcf_indx, fcf_index);
 		/* Wait 500 ms before retrying FLOGI to current FCF */
 		msleep(500);
-		lpfc_initial_flogi(phba->pport);
+		lpfc_issue_init_vfi(phba->pport);
 		goto out;
 	}
 
@@ -2422,6 +2424,63 @@
 }
 
 /**
+ * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vfi mailbox command.
+ */
+void
+lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+
+	if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_MBOX,
+				"2891 Init VFI mailbox failed 0x%x\n",
+				mboxq->u.mb.mbxStatus);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+	lpfc_initial_flogi(vport);
+	mempool_free(mboxq, phba->mbox_mem_pool);
+	return;
+}
+
+/**
+ * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vfi mailbox command to initialize the VFI and
+ * VPI for the physical port.
+ */
+void
+lpfc_issue_init_vfi(struct lpfc_vport *vport)
+{
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
+	struct lpfc_hba *phba = vport->phba;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2892 Failed to allocate "
+			"init_vfi mailbox\n");
+		return;
+	}
+	lpfc_init_vfi(mboxq, vport);
+	mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
+	}
+}
+
+/**
  * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
  * @phba: pointer to lpfc hba data structure.
  * @mboxq: pointer to mailbox data structure.
@@ -2528,7 +2587,7 @@
 						     FC_VPORT_FAILED);
 				continue;
 			}
-			if (phba->fc_topology == TOPOLOGY_LOOP) {
+			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 				lpfc_vport_set_state(vports[i],
 						     FC_VPORT_LINKDOWN);
 				continue;
@@ -2564,7 +2623,7 @@
 			 "2018 REG_VFI mbxStatus error x%x "
 			 "HBA state x%x\n",
 			 mboxq->u.mb.mbxStatus, vport->port_state);
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			/* FLOGI failed, use loop map to make discovery list */
 			lpfc_disc_list_loopmap(vport);
 			/* Start discovery */
@@ -2582,8 +2641,18 @@
 	spin_unlock_irq(shost->host_lock);
 
 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
-		lpfc_start_fdiscs(phba);
-		lpfc_do_scr_ns_plogi(phba, vport);
+		/* For private loop just start discovery and we are done. */
+		if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
+		    (phba->alpa_map[0] == 0) &&
+		    !(vport->fc_flag & FC_PUBLIC_LOOP)) {
+			/* Use loop map to make discovery list */
+			lpfc_disc_list_loopmap(vport);
+			/* Start discovery */
+			lpfc_disc_start(vport);
+		} else {
+			lpfc_start_fdiscs(phba);
+			lpfc_do_scr_ns_plogi(phba, vport);
+		}
 	}
 
 fail_free_mem:
@@ -2644,7 +2713,7 @@
 }
 
 static void
-lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
+lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 {
 	struct lpfc_vport *vport = phba->pport;
 	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
@@ -2654,31 +2723,24 @@
 	struct fcf_record *fcf_record;
 
 	spin_lock_irq(&phba->hbalock);
-	switch (la->UlnkSpeed) {
-	case LA_1GHZ_LINK:
-		phba->fc_linkspeed = LA_1GHZ_LINK;
-		break;
-	case LA_2GHZ_LINK:
-		phba->fc_linkspeed = LA_2GHZ_LINK;
-		break;
-	case LA_4GHZ_LINK:
-		phba->fc_linkspeed = LA_4GHZ_LINK;
-		break;
-	case LA_8GHZ_LINK:
-		phba->fc_linkspeed = LA_8GHZ_LINK;
-		break;
-	case LA_10GHZ_LINK:
-		phba->fc_linkspeed = LA_10GHZ_LINK;
+	switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
+	case LPFC_LINK_SPEED_1GHZ:
+	case LPFC_LINK_SPEED_2GHZ:
+	case LPFC_LINK_SPEED_4GHZ:
+	case LPFC_LINK_SPEED_8GHZ:
+	case LPFC_LINK_SPEED_10GHZ:
+	case LPFC_LINK_SPEED_16GHZ:
+		phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
 		break;
 	default:
-		phba->fc_linkspeed = LA_UNKNW_LINK;
+		phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
 		break;
 	}
 
-	phba->fc_topology = la->topology;
+	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
 	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
-	if (phba->fc_topology == TOPOLOGY_LOOP) {
+	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
 		/* if npiv is enabled and this adapter supports npiv log
@@ -2689,11 +2751,11 @@
 				"1309 Link Up Event npiv not supported in loop "
 				"topology\n");
 				/* Get Loop Map information */
-		if (la->il)
+		if (bf_get(lpfc_mbx_read_top_il, la))
 			vport->fc_flag |= FC_LBIT;
 
-		vport->fc_myDID = la->granted_AL_PA;
-		i = la->un.lilpBde64.tus.f.bdeSize;
+		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
+		i = la->lilpBde64.tus.f.bdeSize;
 
 		if (i == 0) {
 			phba->alpa_map[0] = 0;
@@ -2764,7 +2826,7 @@
 		goto out;
 	}
 
-	if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
 		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (!cfglink_mbox)
 			goto out;
@@ -2790,7 +2852,7 @@
 			if (unlikely(!fcf_record)) {
 				lpfc_printf_log(phba, KERN_ERR,
 					LOG_MBOX | LOG_SLI,
-					"2554 Could not allocate memmory for "
+					"2554 Could not allocate memory for "
 					"fcf record\n");
 				rc = -ENODEV;
 				goto out;
@@ -2874,17 +2936,17 @@
 
 
 /*
- * This routine handles processing a READ_LA mailbox
+ * This routine handles processing a READ_TOPOLOGY mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
  * as the completion routine when the command is
  * handed off to the SLI layer.
  */
 void
-lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport *vport = pmb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-	READ_LA_VAR *la;
+	struct lpfc_mbx_read_top *la;
 	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
@@ -2897,15 +2959,15 @@
 				mb->mbxStatus, vport->port_state);
 		lpfc_mbx_issue_link_down(phba);
 		phba->link_state = LPFC_HBA_ERROR;
-		goto lpfc_mbx_cmpl_read_la_free_mbuf;
+		goto lpfc_mbx_cmpl_read_topology_free_mbuf;
 	}
 
-	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
+	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
 
 	memcpy(&phba->alpa_map[0], mp->virt, 128);
 
 	spin_lock_irq(shost->host_lock);
-	if (la->pb)
+	if (bf_get(lpfc_mbx_read_top_pb, la))
 		vport->fc_flag |= FC_BYPASSED_MODE;
 	else
 		vport->fc_flag &= ~FC_BYPASSED_MODE;
@@ -2914,41 +2976,48 @@
 	if ((phba->fc_eventTag  < la->eventTag) ||
 	    (phba->fc_eventTag == la->eventTag)) {
 		phba->fc_stat.LinkMultiEvent++;
-		if (la->attType == AT_LINK_UP)
+		if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
 			if (phba->fc_eventTag != 0)
 				lpfc_linkdown(phba);
 	}
 
 	phba->fc_eventTag = la->eventTag;
 	spin_lock_irq(&phba->hbalock);
-	if (la->mm)
+	if (bf_get(lpfc_mbx_read_top_mm, la))
 		phba->sli.sli_flag |= LPFC_MENLO_MAINT;
 	else
 		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
 	spin_unlock_irq(&phba->hbalock);
 
 	phba->link_events++;
-	if (la->attType == AT_LINK_UP && (!la->mm)) {
+	if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
+	    (!bf_get(lpfc_mbx_read_top_mm, la))) {
 		phba->fc_stat.LinkUp++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 					"1306 Link Up Event in loop back mode "
 					"x%x received Data: x%x x%x x%x x%x\n",
 					la->eventTag, phba->fc_eventTag,
-					la->granted_AL_PA, la->UlnkSpeed,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
 					phba->alpa_map[0]);
 		} else {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 					"1303 Link Up Event x%x received "
 					"Data: x%x x%x x%x x%x x%x x%x %d\n",
 					la->eventTag, phba->fc_eventTag,
-					la->granted_AL_PA, la->UlnkSpeed,
+					bf_get(lpfc_mbx_read_top_alpa_granted,
+					       la),
+					bf_get(lpfc_mbx_read_top_link_spd, la),
 					phba->alpa_map[0],
-					la->mm, la->fa,
+					bf_get(lpfc_mbx_read_top_mm, la),
+					bf_get(lpfc_mbx_read_top_fa, la),
 					phba->wait_4_mlo_maint_flg);
 		}
 		lpfc_mbx_process_link_up(phba, la);
-	} else if (la->attType == AT_LINK_DOWN) {
+	} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
+		   LPFC_ATT_LINK_DOWN) {
 		phba->fc_stat.LinkDown++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -2964,11 +3033,13 @@
 				"Data: x%x x%x x%x x%x x%x\n",
 				la->eventTag, phba->fc_eventTag,
 				phba->pport->port_state, vport->fc_flag,
-				la->mm, la->fa);
+				bf_get(lpfc_mbx_read_top_mm, la),
+				bf_get(lpfc_mbx_read_top_fa, la));
 		}
 		lpfc_mbx_issue_link_down(phba);
 	}
-	if (la->mm && la->attType == AT_LINK_UP) {
+	if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
+	    (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
 		if (phba->link_state != LPFC_LINK_DOWN) {
 			phba->fc_stat.LinkDown++;
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -2996,14 +3067,15 @@
 		}
 	}
 
-	if (la->fa) {
-		if (la->mm)
+	if (bf_get(lpfc_mbx_read_top_fa, la)) {
+		if (bf_get(lpfc_mbx_read_top_mm, la))
 			lpfc_issue_clear_la(phba, vport);
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
-				"1311 fa %d\n", la->fa);
+				"1311 fa %d\n",
+				bf_get(lpfc_mbx_read_top_fa, la));
 	}
 
-lpfc_mbx_cmpl_read_la_free_mbuf:
+lpfc_mbx_cmpl_read_topology_free_mbuf:
 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
 	kfree(mp);
 	mempool_free(pmb, phba->mbox_mem_pool);
@@ -3030,8 +3102,8 @@
 	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
 		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
 
-	if (ndlp->nlp_flag &  NLP_IGNR_REG_CMPL ||
-		ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
+	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
 		/* We rcvd a rscn after issuing this
 		 * mbox reg login, we may have cycled
 		 * back through the state and be
@@ -3043,10 +3115,6 @@
 		spin_lock_irq(shost->host_lock);
 		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
 		spin_unlock_irq(shost->host_lock);
-		if (phba->sli_rev == LPFC_SLI_REV4)
-			lpfc_sli4_free_rpi(phba,
-				pmb->u.mb.un.varRegLogin.rpi);
-
 	} else
 		/* Good status, call state machine */
 		lpfc_disc_state_machine(vport, ndlp, pmb,
@@ -3092,6 +3160,7 @@
 	spin_unlock_irq(shost->host_lock);
 	vport->unreg_vpi_cmpl = VPORT_OK;
 	mempool_free(pmb, phba->mbox_mem_pool);
+	lpfc_cleanup_vports_rrqs(vport);
 	/*
 	 * This shost reference might have been taken at the beginning of
 	 * lpfc_vport_delete()
@@ -3333,7 +3402,7 @@
 		kfree(mp);
 		mempool_free(pmb, phba->mbox_mem_pool);
 
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			/* FLOGI failed, use loop map to make discovery list */
 			lpfc_disc_list_loopmap(vport);
 
@@ -3355,7 +3424,7 @@
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -3413,7 +3482,7 @@
 		/* If no other thread is using the ndlp, free it */
 		lpfc_nlp_not_used(ndlp);
 
-		if (phba->fc_topology == TOPOLOGY_LOOP) {
+		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
 			/*
 			 * RegLogin failed, use loop map to make discovery
 			 * list
@@ -3429,7 +3498,7 @@
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -3762,6 +3831,8 @@
 	NLP_INT_NODE_ACT(ndlp);
 	atomic_set(&ndlp->cmd_pending, 0);
 	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
+	if (vport->phba->sli_rev == LPFC_SLI_REV4)
+		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
 }
 
 struct lpfc_nodelist *
@@ -3975,7 +4046,7 @@
 	 * by firmware with a no rpi error.
 	 */
 	psli = &phba->sli;
-	if (ndlp->nlp_flag & NLP_RPI_VALID) {
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
 		/* Now process each ring */
 		for (i = 0; i < psli->num_rings; i++) {
 			pring = &psli->ring[i];
@@ -4023,7 +4094,7 @@
 	LPFC_MBOXQ_t    *mbox;
 	int rc;
 
-	if (ndlp->nlp_flag & NLP_RPI_VALID) {
+	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 		if (mbox) {
 			lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -4035,8 +4106,9 @@
 		}
 		lpfc_no_rpi(phba, ndlp);
 
-		ndlp->nlp_rpi = 0;
-		ndlp->nlp_flag &= ~NLP_RPI_VALID;
+		if (phba->sli_rev != LPFC_SLI_REV4)
+			ndlp->nlp_rpi = 0;
+		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 		return 1;
 	}
@@ -4059,11 +4131,16 @@
 	int i;
 
 	vports = lpfc_create_vport_work_array(phba);
+	if (!vports) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+			"2884 Vport array allocation failed \n");
+		return;
+	}
 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 		shost = lpfc_shost_from_vport(vports[i]);
 		spin_lock_irq(shost->host_lock);
 		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
-			if (ndlp->nlp_flag & NLP_RPI_VALID) {
+			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
 				/* The mempool_alloc might sleep */
 				spin_unlock_irq(shost->host_lock);
 				lpfc_unreg_rpi(vports[i], ndlp);
@@ -4192,9 +4269,6 @@
 				kfree(mp);
 			}
 			list_del(&mb->list);
-			if (phba->sli_rev == LPFC_SLI_REV4)
-				lpfc_sli4_free_rpi(phba,
-					 mb->u.mb.un.varRegLogin.rpi);
 			mempool_free(mb, phba->mbox_mem_pool);
 			/* We shall not invoke the lpfc_nlp_put to decrement
 			 * the ndlp reference count as we are in the process
@@ -4236,15 +4310,15 @@
 
 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
 	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
-		!(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
-	    !(ndlp->nlp_flag & NLP_RPI_VALID)) {
+	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
+	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
 		/* For this case we need to cleanup the default rpi
 		 * allocated by the firmware.
 		 */
 		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
 			!= NULL) {
 			rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
-			    (uint8_t *) &vport->fc_sparam, mbox, 0);
+			    (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
 			if (rc) {
 				mempool_free(mbox, phba->mbox_mem_pool);
 			}
@@ -4436,7 +4510,7 @@
 	if (!lpfc_is_link_up(phba))
 		return;
 
-	if (phba->fc_topology != TOPOLOGY_LOOP)
+	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
 		return;
 
 	/* Check for loop map present or not */
@@ -4788,7 +4862,10 @@
 			}
 		}
 		if (vport->port_state != LPFC_FLOGI) {
-			lpfc_initial_flogi(vport);
+			if (phba->sli_rev <= LPFC_SLI_REV3)
+				lpfc_initial_flogi(vport);
+			else
+				lpfc_issue_init_vfi(vport);
 			return;
 		}
 		break;
@@ -4979,7 +5056,7 @@
 	pmb->context2 = NULL;
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	ndlp->nlp_type |= NLP_FABRIC;
 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -5103,6 +5180,8 @@
 	spin_lock_irqsave(&phba->ndlp_lock, flags);
 	NLP_CLR_NODE_ACT(ndlp);
 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
 
 	/* free ndlp memory for final ndlp release */
 	if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -5254,6 +5333,10 @@
 
 	vports = lpfc_create_vport_work_array(phba);
 
+	/* If driver cannot allocate memory, indicate fcf is in use */
+	if (!vports)
+		return 1;
+
 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
 		shost = lpfc_shost_from_vport(vports[i]);
 		spin_lock_irq(shost->host_lock);
@@ -5269,7 +5352,7 @@
 					"logged in\n",
 					ndlp->nlp_rpi, ndlp->nlp_DID,
 					ndlp->nlp_flag);
-				if (ndlp->nlp_flag & NLP_RPI_VALID)
+				if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
 					ret = 1;
 			}
 		}
@@ -5550,7 +5633,7 @@
 	 * registered, do nothing.
 	 */
 	spin_lock_irq(&phba->hbalock);
-	if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
+	if (!(phba->hba_flag & HBA_FCOE_MODE) ||
 	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
 	    !(phba->hba_flag & HBA_FIP_SUPPORT) ||
 	    (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 9b83334..96ed3ba 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -880,6 +880,24 @@
 	uint32_t crcCnt;
 };
 
+struct RRQ {			/* Structure is in Big Endian format */
+	uint32_t rrq;
+#define rrq_rsvd_SHIFT		24
+#define rrq_rsvd_MASK		0x000000ff
+#define rrq_rsvd_WORD		rrq
+#define rrq_did_SHIFT		0
+#define rrq_did_MASK		0x00ffffff
+#define rrq_did_WORD		rrq
+	uint32_t rrq_exchg;
+#define rrq_oxid_SHIFT		16
+#define rrq_oxid_MASK		0xffff
+#define rrq_oxid_WORD		rrq_exchg
+#define rrq_rxid_SHIFT		0
+#define rrq_rxid_MASK		0xffff
+#define rrq_rxid_WORD		rrq_exchg
+};
+
+
 struct RTV_RSP {		/* Structure is in Big Endian format */
 	uint32_t ratov;
 	uint32_t edtov;
@@ -1172,7 +1190,10 @@
 #define PCI_VENDOR_ID_EMULEX        0x10df
 #define PCI_DEVICE_ID_FIREFLY       0x1ae5
 #define PCI_DEVICE_ID_PROTEUS_VF    0xe100
+#define PCI_DEVICE_ID_BALIUS        0xe131
 #define PCI_DEVICE_ID_PROTEUS_PF    0xe180
+#define PCI_DEVICE_ID_LANCER_FC     0xe200
+#define PCI_DEVICE_ID_LANCER_FCOE   0xe260
 #define PCI_DEVICE_ID_SAT_SMB       0xf011
 #define PCI_DEVICE_ID_SAT_MID       0xf015
 #define PCI_DEVICE_ID_RFLY          0xf095
@@ -1189,6 +1210,7 @@
 #define PCI_DEVICE_ID_SAT           0xf100
 #define PCI_DEVICE_ID_SAT_SCSP      0xf111
 #define PCI_DEVICE_ID_SAT_DCSP      0xf112
+#define PCI_DEVICE_ID_FALCON        0xf180
 #define PCI_DEVICE_ID_SUPERFLY      0xf700
 #define PCI_DEVICE_ID_DRAGONFLY     0xf800
 #define PCI_DEVICE_ID_CENTAUR       0xf900
@@ -1210,8 +1232,6 @@
 #define PCI_VENDOR_ID_SERVERENGINE  0x19a2
 #define PCI_DEVICE_ID_TIGERSHARK    0x0704
 #define PCI_DEVICE_ID_TOMCAT        0x0714
-#define PCI_DEVICE_ID_FALCON        0xf180
-#define PCI_DEVICE_ID_BALIUS        0xe131
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
@@ -1368,7 +1388,6 @@
 #define MBX_READ_LNK_STAT   0x12
 #define MBX_REG_LOGIN       0x13
 #define MBX_UNREG_LOGIN     0x14
-#define MBX_READ_LA         0x15
 #define MBX_CLEAR_LA        0x16
 #define MBX_DUMP_MEMORY     0x17
 #define MBX_DUMP_CONTEXT    0x18
@@ -1402,7 +1421,7 @@
 #define MBX_READ_SPARM64    0x8D
 #define MBX_READ_RPI64      0x8F
 #define MBX_REG_LOGIN64     0x93
-#define MBX_READ_LA64       0x95
+#define MBX_READ_TOPOLOGY   0x95
 #define MBX_REG_VPI	    0x96
 #define MBX_UNREG_VPI	    0x97
 
@@ -1823,12 +1842,13 @@
 #define FLAGS_IMED_ABORT             0x04000	/* Bit 14 */
 
 	uint32_t link_speed;
-#define LINK_SPEED_AUTO 0       /* Auto selection */
-#define LINK_SPEED_1G   1       /* 1 Gigabaud */
-#define LINK_SPEED_2G   2       /* 2 Gigabaud */
-#define LINK_SPEED_4G   4       /* 4 Gigabaud */
-#define LINK_SPEED_8G   8       /* 8 Gigabaud */
-#define LINK_SPEED_10G   16      /* 10 Gigabaud */
+#define LINK_SPEED_AUTO 0x0     /* Auto selection */
+#define LINK_SPEED_1G   0x1     /* 1 Gigabaud */
+#define LINK_SPEED_2G   0x2     /* 2 Gigabaud */
+#define LINK_SPEED_4G   0x4     /* 4 Gigabaud */
+#define LINK_SPEED_8G   0x8     /* 8 Gigabaud */
+#define LINK_SPEED_10G  0x10    /* 10 Gigabaud */
+#define LINK_SPEED_16G  0x11    /* 16 Gigabaud */
 
 } INIT_LINK_VAR;
 
@@ -1999,6 +2019,7 @@
 #define LMT_4Gb       0x040
 #define LMT_8Gb       0x080
 #define LMT_10Gb      0x100
+#define LMT_16Gb      0x200
 	uint32_t rsvd2;
 	uint32_t rsvd3;
 	uint32_t max_xri;
@@ -2394,100 +2415,93 @@
 #endif
 } UNREG_D_ID_VAR;
 
-/* Structure for MB Command READ_LA (21) */
-/* Structure for MB Command READ_LA64 (0x95) */
-
-typedef struct {
+/* Structure for MB Command READ_TOPOLOGY (0x95) */
+struct lpfc_mbx_read_top {
 	uint32_t eventTag;	/* Event tag */
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t rsvd1:19;
-	uint32_t fa:1;
-	uint32_t mm:1;		/* Menlo Maintenance mode enabled */
-	uint32_t rx:1;
-	uint32_t pb:1;
-	uint32_t il:1;
-	uint32_t attType:8;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint32_t attType:8;
-	uint32_t il:1;
-	uint32_t pb:1;
-	uint32_t rx:1;
-	uint32_t mm:1;
-	uint32_t fa:1;
-	uint32_t rsvd1:19;
-#endif
-
-#define AT_RESERVED    0x00	/* Reserved - attType */
-#define AT_LINK_UP     0x01	/* Link is up */
-#define AT_LINK_DOWN   0x02	/* Link is down */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint8_t granted_AL_PA;
-	uint8_t lipAlPs;
-	uint8_t lipType;
-	uint8_t topology;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint8_t topology;
-	uint8_t lipType;
-	uint8_t lipAlPs;
-	uint8_t granted_AL_PA;
-#endif
-
-#define TOPOLOGY_PT_PT 0x01	/* Topology is pt-pt / pt-fabric */
-#define TOPOLOGY_LOOP  0x02	/* Topology is FC-AL */
-#define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */
-
-	union {
-		struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer
-					   to */
-		/* store the LILP AL_PA position map into */
-		struct ulp_bde64 lilpBde64;
-	} un;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t Dlu:1;
-	uint32_t Dtf:1;
-	uint32_t Drsvd2:14;
-	uint32_t DlnkSpeed:8;
-	uint32_t DnlPort:4;
-	uint32_t Dtx:2;
-	uint32_t Drx:2;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint32_t Drx:2;
-	uint32_t Dtx:2;
-	uint32_t DnlPort:4;
-	uint32_t DlnkSpeed:8;
-	uint32_t Drsvd2:14;
-	uint32_t Dtf:1;
-	uint32_t Dlu:1;
-#endif
-
-#ifdef __BIG_ENDIAN_BITFIELD
-	uint32_t Ulu:1;
-	uint32_t Utf:1;
-	uint32_t Ursvd2:14;
-	uint32_t UlnkSpeed:8;
-	uint32_t UnlPort:4;
-	uint32_t Utx:2;
-	uint32_t Urx:2;
-#else	/*  __LITTLE_ENDIAN_BITFIELD */
-	uint32_t Urx:2;
-	uint32_t Utx:2;
-	uint32_t UnlPort:4;
-	uint32_t UlnkSpeed:8;
-	uint32_t Ursvd2:14;
-	uint32_t Utf:1;
-	uint32_t Ulu:1;
-#endif
-
-#define LA_UNKNW_LINK  0x0    /* lnkSpeed */
-#define LA_1GHZ_LINK   0x04   /* lnkSpeed */
-#define LA_2GHZ_LINK   0x08   /* lnkSpeed */
-#define LA_4GHZ_LINK   0x10   /* lnkSpeed */
-#define LA_8GHZ_LINK   0x20   /* lnkSpeed */
-#define LA_10GHZ_LINK  0x40   /* lnkSpeed */
-
-} READ_LA_VAR;
+	uint32_t word2;
+#define lpfc_mbx_read_top_fa_SHIFT		12
+#define lpfc_mbx_read_top_fa_MASK		0x00000001
+#define lpfc_mbx_read_top_fa_WORD		word2
+#define lpfc_mbx_read_top_mm_SHIFT		11
+#define lpfc_mbx_read_top_mm_MASK		0x00000001
+#define lpfc_mbx_read_top_mm_WORD		word2
+#define lpfc_mbx_read_top_pb_SHIFT		9
+#define lpfc_mbx_read_top_pb_MASK		0X00000001
+#define lpfc_mbx_read_top_pb_WORD		word2
+#define lpfc_mbx_read_top_il_SHIFT		8
+#define lpfc_mbx_read_top_il_MASK		0x00000001
+#define lpfc_mbx_read_top_il_WORD		word2
+#define lpfc_mbx_read_top_att_type_SHIFT	0
+#define lpfc_mbx_read_top_att_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_att_type_WORD		word2
+#define LPFC_ATT_RESERVED    0x00	/* Reserved - attType */
+#define LPFC_ATT_LINK_UP     0x01	/* Link is up */
+#define LPFC_ATT_LINK_DOWN   0x02	/* Link is down */
+	uint32_t word3;
+#define lpfc_mbx_read_top_alpa_granted_SHIFT	24
+#define lpfc_mbx_read_top_alpa_granted_MASK	0x000000FF
+#define lpfc_mbx_read_top_alpa_granted_WORD	word3
+#define lpfc_mbx_read_top_lip_alps_SHIFT	16
+#define lpfc_mbx_read_top_lip_alps_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_alps_WORD		word3
+#define lpfc_mbx_read_top_lip_type_SHIFT	8
+#define lpfc_mbx_read_top_lip_type_MASK		0x000000FF
+#define lpfc_mbx_read_top_lip_type_WORD		word3
+#define lpfc_mbx_read_top_topology_SHIFT	0
+#define lpfc_mbx_read_top_topology_MASK		0x000000FF
+#define lpfc_mbx_read_top_topology_WORD		word3
+#define LPFC_TOPOLOGY_PT_PT 0x01	/* Topology is pt-pt / pt-fabric */
+#define LPFC_TOPOLOGY_LOOP  0x02	/* Topology is FC-AL */
+#define LPFC_TOPOLOGY_MM    0x05	/* maint mode zephtr to menlo */
+	/* store the LILP AL_PA position map into */
+	struct ulp_bde64 lilpBde64;
+#define LPFC_ALPA_MAP_SIZE	128
+	uint32_t word7;
+#define lpfc_mbx_read_top_ld_lu_SHIFT		31
+#define lpfc_mbx_read_top_ld_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_lu_WORD		word7
+#define lpfc_mbx_read_top_ld_tf_SHIFT		30
+#define lpfc_mbx_read_top_ld_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_ld_tf_WORD		word7
+#define lpfc_mbx_read_top_ld_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_ld_link_spd_MASK	0x000000FF
+#define lpfc_mbx_read_top_ld_link_spd_WORD	word7
+#define lpfc_mbx_read_top_ld_nl_port_SHIFT	4
+#define lpfc_mbx_read_top_ld_nl_port_MASK	0x0000000F
+#define lpfc_mbx_read_top_ld_nl_port_WORD	word7
+#define lpfc_mbx_read_top_ld_tx_SHIFT		2
+#define lpfc_mbx_read_top_ld_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_tx_WORD		word7
+#define lpfc_mbx_read_top_ld_rx_SHIFT		0
+#define lpfc_mbx_read_top_ld_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_ld_rx_WORD		word7
+	uint32_t word8;
+#define lpfc_mbx_read_top_lu_SHIFT		31
+#define lpfc_mbx_read_top_lu_MASK		0x00000001
+#define lpfc_mbx_read_top_lu_WORD		word8
+#define lpfc_mbx_read_top_tf_SHIFT		30
+#define lpfc_mbx_read_top_tf_MASK		0x00000001
+#define lpfc_mbx_read_top_tf_WORD		word8
+#define lpfc_mbx_read_top_link_spd_SHIFT	8
+#define lpfc_mbx_read_top_link_spd_MASK		0x000000FF
+#define lpfc_mbx_read_top_link_spd_WORD		word8
+#define lpfc_mbx_read_top_nl_port_SHIFT		4
+#define lpfc_mbx_read_top_nl_port_MASK		0x0000000F
+#define lpfc_mbx_read_top_nl_port_WORD		word8
+#define lpfc_mbx_read_top_tx_SHIFT		2
+#define lpfc_mbx_read_top_tx_MASK		0x00000003
+#define lpfc_mbx_read_top_tx_WORD		word8
+#define lpfc_mbx_read_top_rx_SHIFT		0
+#define lpfc_mbx_read_top_rx_MASK		0x00000003
+#define lpfc_mbx_read_top_rx_WORD		word8
+#define LPFC_LINK_SPEED_UNKNOWN	0x0
+#define LPFC_LINK_SPEED_1GHZ	0x04
+#define LPFC_LINK_SPEED_2GHZ	0x08
+#define LPFC_LINK_SPEED_4GHZ	0x10
+#define LPFC_LINK_SPEED_8GHZ	0x20
+#define LPFC_LINK_SPEED_10GHZ	0x40
+#define LPFC_LINK_SPEED_16GHZ	0x80
+};
 
 /* Structure for MB Command CLEAR_LA (22) */
 
@@ -3016,7 +3030,6 @@
 	READ_LNK_VAR varRdLnk;		/* cmd = 18 (READ_LNK_STAT)  */
 	REG_LOGIN_VAR varRegLogin;	/* cmd = 19 (REG_LOGIN(64))  */
 	UNREG_LOGIN_VAR varUnregLogin;	/* cmd = 20 (UNREG_LOGIN)    */
-	READ_LA_VAR varReadLA;		/* cmd = 21 (READ_LA(64))    */
 	CLEAR_LA_VAR varClearLA;	/* cmd = 22 (CLEAR_LA)       */
 	DUMP_VAR varDmp;		/* Warm Start DUMP mbx cmd   */
 	UNREG_D_ID_VAR varUnregDID;	/* cmd = 0x23 (UNREG_D_ID)   */
@@ -3026,6 +3039,7 @@
 	struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ)  */
 	struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/
 	CONFIG_PORT_VAR varCfgPort;	/* cmd = 0x88 (CONFIG_PORT)  */
+	struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */
 	REG_VPI_VAR varRegVpi;		/* cmd = 0x96 (REG_VPI) */
 	UNREG_VPI_VAR varUnregVpi;	/* cmd = 0x97 (UNREG_VPI) */
 	ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6e4bc34..94c1aa1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -64,29 +64,39 @@
 #define lpfc_sli_intf_valid_MASK		0x00000007
 #define lpfc_sli_intf_valid_WORD		word0
 #define LPFC_SLI_INTF_VALID		6
-#define lpfc_sli_intf_featurelevel2_SHIFT	24
-#define lpfc_sli_intf_featurelevel2_MASK	0x0000001F
-#define lpfc_sli_intf_featurelevel2_WORD	word0
-#define lpfc_sli_intf_featurelevel1_SHIFT	16
-#define lpfc_sli_intf_featurelevel1_MASK	0x000000FF
-#define lpfc_sli_intf_featurelevel1_WORD	word0
-#define LPFC_SLI_INTF_FEATURELEVEL1_1	1
-#define LPFC_SLI_INTF_FEATURELEVEL1_2	2
+#define lpfc_sli_intf_sli_hint2_SHIFT		24
+#define lpfc_sli_intf_sli_hint2_MASK		0x0000001F
+#define lpfc_sli_intf_sli_hint2_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT2_NONE	0
+#define lpfc_sli_intf_sli_hint1_SHIFT		16
+#define lpfc_sli_intf_sli_hint1_MASK		0x000000FF
+#define lpfc_sli_intf_sli_hint1_WORD		word0
+#define LPFC_SLI_INTF_SLI_HINT1_NONE	0
+#define LPFC_SLI_INTF_SLI_HINT1_1	1
+#define LPFC_SLI_INTF_SLI_HINT1_2	2
+#define lpfc_sli_intf_if_type_SHIFT		12
+#define lpfc_sli_intf_if_type_MASK		0x0000000F
+#define lpfc_sli_intf_if_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_0		0
+#define LPFC_SLI_INTF_IF_TYPE_1		1
+#define LPFC_SLI_INTF_IF_TYPE_2		2
 #define lpfc_sli_intf_sli_family_SHIFT		8
-#define lpfc_sli_intf_sli_family_MASK		0x000000FF
+#define lpfc_sli_intf_sli_family_MASK		0x0000000F
 #define lpfc_sli_intf_sli_family_WORD		word0
-#define LPFC_SLI_INTF_FAMILY_BE2	0
-#define LPFC_SLI_INTF_FAMILY_BE3	1
+#define LPFC_SLI_INTF_FAMILY_BE2	0x0
+#define LPFC_SLI_INTF_FAMILY_BE3	0x1
+#define LPFC_SLI_INTF_FAMILY_LNCR_A0	0xa
+#define LPFC_SLI_INTF_FAMILY_LNCR_B0	0xb
 #define lpfc_sli_intf_slirev_SHIFT		4
 #define lpfc_sli_intf_slirev_MASK		0x0000000F
 #define lpfc_sli_intf_slirev_WORD		word0
 #define LPFC_SLI_INTF_REV_SLI3		3
 #define LPFC_SLI_INTF_REV_SLI4		4
-#define lpfc_sli_intf_if_type_SHIFT		0
-#define lpfc_sli_intf_if_type_MASK		0x00000007
-#define lpfc_sli_intf_if_type_WORD		word0
-#define LPFC_SLI_INTF_IF_TYPE_0		0
-#define LPFC_SLI_INTF_IF_TYPE_1		1
+#define lpfc_sli_intf_func_type_SHIFT		0
+#define lpfc_sli_intf_func_type_MASK		0x00000001
+#define lpfc_sli_intf_func_type_WORD		word0
+#define LPFC_SLI_INTF_IF_TYPE_PHYS	0
+#define LPFC_SLI_INTF_IF_TYPE_VIRT	1
 };
 
 #define LPFC_SLI4_MBX_EMBED	true
@@ -450,35 +460,40 @@
 	uint32_t word0;
 };
 
+/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
 #define LPFC_UERR_STATUS_HI		0x00A4
 #define LPFC_UERR_STATUS_LO		0x00A0
 #define LPFC_UE_MASK_HI			0x00AC
 #define LPFC_UE_MASK_LO			0x00A8
+
+/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
 #define LPFC_SLI_INTF			0x0058
 
-/* BAR0 Registers */
-#define LPFC_HST_STATE			0x00AC
-#define lpfc_hst_state_perr_SHIFT	31
-#define lpfc_hst_state_perr_MASK	0x1
-#define lpfc_hst_state_perr_WORD	word0
-#define lpfc_hst_state_sfi_SHIFT	30
-#define lpfc_hst_state_sfi_MASK		0x1
-#define lpfc_hst_state_sfi_WORD		word0
-#define lpfc_hst_state_nip_SHIFT	29
-#define lpfc_hst_state_nip_MASK		0x1
-#define lpfc_hst_state_nip_WORD		word0
-#define lpfc_hst_state_ipc_SHIFT	28
-#define lpfc_hst_state_ipc_MASK		0x1
-#define lpfc_hst_state_ipc_WORD		word0
-#define lpfc_hst_state_xrom_SHIFT	27
-#define lpfc_hst_state_xrom_MASK	0x1
-#define lpfc_hst_state_xrom_WORD	word0
-#define lpfc_hst_state_dl_SHIFT		26
-#define lpfc_hst_state_dl_MASK		0x1
-#define lpfc_hst_state_dl_WORD		word0
-#define lpfc_hst_state_port_status_SHIFT	0
-#define lpfc_hst_state_port_status_MASK		0xFFFF
-#define lpfc_hst_state_port_status_WORD		word0
+#define LPFC_SLIPORT_IF2_SMPHR		0x0400
+#define lpfc_port_smphr_perr_SHIFT	31
+#define lpfc_port_smphr_perr_MASK	0x1
+#define lpfc_port_smphr_perr_WORD	word0
+#define lpfc_port_smphr_sfi_SHIFT	30
+#define lpfc_port_smphr_sfi_MASK	0x1
+#define lpfc_port_smphr_sfi_WORD	word0
+#define lpfc_port_smphr_nip_SHIFT	29
+#define lpfc_port_smphr_nip_MASK	0x1
+#define lpfc_port_smphr_nip_WORD	word0
+#define lpfc_port_smphr_ipc_SHIFT	28
+#define lpfc_port_smphr_ipc_MASK	0x1
+#define lpfc_port_smphr_ipc_WORD	word0
+#define lpfc_port_smphr_scr1_SHIFT	27
+#define lpfc_port_smphr_scr1_MASK	0x1
+#define lpfc_port_smphr_scr1_WORD	word0
+#define lpfc_port_smphr_scr2_SHIFT	26
+#define lpfc_port_smphr_scr2_MASK	0x1
+#define lpfc_port_smphr_scr2_WORD	word0
+#define lpfc_port_smphr_host_scratch_SHIFT	16
+#define lpfc_port_smphr_host_scratch_MASK	0xFF
+#define lpfc_port_smphr_host_scratch_WORD	word0
+#define lpfc_port_smphr_port_status_SHIFT	0
+#define lpfc_port_smphr_port_status_MASK	0xFFFF
+#define lpfc_port_smphr_port_status_WORD	word0
 
 #define LPFC_POST_STAGE_POWER_ON_RESET			0x0000
 #define LPFC_POST_STAGE_AWAITING_HOST_RDY		0x0001
@@ -511,10 +526,46 @@
 #define LPFC_POST_STAGE_RC_DONE				0x0B07
 #define LPFC_POST_STAGE_REBOOT_SYSTEM			0x0B08
 #define LPFC_POST_STAGE_MAC_ADDRESS			0x0C00
-#define LPFC_POST_STAGE_ARMFW_READY			0xC000
-#define LPFC_POST_STAGE_ARMFW_UE 			0xF000
+#define LPFC_POST_STAGE_PORT_READY			0xC000
+#define LPFC_POST_STAGE_PORT_UE 			0xF000
 
-/* BAR1 Registers */
+#define LPFC_SLIPORT_STATUS		0x0404
+#define lpfc_sliport_status_err_SHIFT	31
+#define lpfc_sliport_status_err_MASK	0x1
+#define lpfc_sliport_status_err_WORD	word0
+#define lpfc_sliport_status_end_SHIFT	30
+#define lpfc_sliport_status_end_MASK	0x1
+#define lpfc_sliport_status_end_WORD	word0
+#define lpfc_sliport_status_oti_SHIFT	29
+#define lpfc_sliport_status_oti_MASK	0x1
+#define lpfc_sliport_status_oti_WORD	word0
+#define lpfc_sliport_status_rn_SHIFT	24
+#define lpfc_sliport_status_rn_MASK	0x1
+#define lpfc_sliport_status_rn_WORD	word0
+#define lpfc_sliport_status_rdy_SHIFT	23
+#define lpfc_sliport_status_rdy_MASK	0x1
+#define lpfc_sliport_status_rdy_WORD	word0
+#define MAX_IF_TYPE_2_RESETS	1000
+
+#define LPFC_SLIPORT_CNTRL		0x0408
+#define lpfc_sliport_ctrl_end_SHIFT	30
+#define lpfc_sliport_ctrl_end_MASK	0x1
+#define lpfc_sliport_ctrl_end_WORD	word0
+#define LPFC_SLIPORT_LITTLE_ENDIAN 0
+#define LPFC_SLIPORT_BIG_ENDIAN	   1
+#define lpfc_sliport_ctrl_ip_SHIFT	27
+#define lpfc_sliport_ctrl_ip_MASK	0x1
+#define lpfc_sliport_ctrl_ip_WORD	word0
+#define LPFC_SLIPORT_INIT_PORT	1
+
+#define LPFC_SLIPORT_ERR_1		0x040C
+#define LPFC_SLIPORT_ERR_2		0x0410
+
+/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
+ * reside in BAR 2.
+ */
+#define LPFC_SLIPORT_IF0_SMPHR	0x00AC
+
 #define LPFC_IMR_MASK_ALL	0xFFFFFFFF
 #define LPFC_ISCR_CLEAR_ALL	0xFFFFFFFF
 
@@ -569,14 +620,21 @@
 #define LPFC_SLI4_INTR30		BIT30
 #define LPFC_SLI4_INTR31		BIT31
 
-/* BAR2 Registers */
+/*
+ * The Doorbell registers defined here exist in different BAR
+ * register sets depending on the UCNA Port's reported if_type
+ * value.  For UCNA ports running SLI4 and if_type 0, they reside in
+ * BAR4.  For UCNA ports running SLI4 and if_type 2, they reside in
+ * BAR0.  The offsets are the same so the driver must account for
+ * any base address difference.
+ */
 #define LPFC_RQ_DOORBELL		0x00A0
 #define lpfc_rq_doorbell_num_posted_SHIFT	16
 #define lpfc_rq_doorbell_num_posted_MASK	0x3FFF
 #define lpfc_rq_doorbell_num_posted_WORD	word0
 #define LPFC_RQ_POST_BATCH		8	/* RQEs to post at one time */
 #define lpfc_rq_doorbell_id_SHIFT		0
-#define lpfc_rq_doorbell_id_MASK		0x03FF
+#define lpfc_rq_doorbell_id_MASK		0xFFFF
 #define lpfc_rq_doorbell_id_WORD		word0
 
 #define LPFC_WQ_DOORBELL		0x0040
@@ -591,6 +649,11 @@
 #define lpfc_wq_doorbell_id_WORD		word0
 
 #define LPFC_EQCQ_DOORBELL		0x0120
+#define lpfc_eqcq_doorbell_se_SHIFT		31
+#define lpfc_eqcq_doorbell_se_MASK		0x0001
+#define lpfc_eqcq_doorbell_se_WORD		word0
+#define LPFC_EQCQ_SOLICIT_ENABLE_OFF	0
+#define LPFC_EQCQ_SOLICIT_ENABLE_ON	1
 #define lpfc_eqcq_doorbell_arm_SHIFT		29
 #define lpfc_eqcq_doorbell_arm_MASK		0x0001
 #define lpfc_eqcq_doorbell_arm_WORD		word0
@@ -628,7 +691,7 @@
 #define lpfc_mq_doorbell_num_posted_MASK	0x3FFF
 #define lpfc_mq_doorbell_num_posted_WORD	word0
 #define lpfc_mq_doorbell_id_SHIFT		0
-#define lpfc_mq_doorbell_id_MASK		0x03FF
+#define lpfc_mq_doorbell_id_MASK		0xFFFF
 #define lpfc_mq_doorbell_id_WORD		word0
 
 struct lpfc_sli4_cfg_mhdr {
@@ -1048,12 +1111,18 @@
 #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT	LPFC_TRAILER_CODE_LINK
 #define lpfc_mbx_mq_create_ext_async_evt_link_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_link_WORD	async_evt_bmap
-#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT	LPFC_TRAILER_CODE_FCOE
-#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK	0x00000001
-#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT	LPFC_TRAILER_CODE_FCOE
+#define lpfc_mbx_mq_create_ext_async_evt_fip_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fip_WORD	async_evt_bmap
 #define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT	LPFC_TRAILER_CODE_GRP5
 #define lpfc_mbx_mq_create_ext_async_evt_group5_MASK	0x00000001
 #define lpfc_mbx_mq_create_ext_async_evt_group5_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT	LPFC_TRAILER_CODE_FC
+#define lpfc_mbx_mq_create_ext_async_evt_fc_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_fc_WORD	async_evt_bmap
+#define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT	LPFC_TRAILER_CODE_SLI
+#define lpfc_mbx_mq_create_ext_async_evt_sli_MASK	0x00000001
+#define lpfc_mbx_mq_create_ext_async_evt_sli_WORD	async_evt_bmap
 			struct mq_context context;
 			struct dma_address page[LPFC_MAX_MQ_PAGE];
 		} request;
@@ -1307,7 +1376,7 @@
 #define lpfc_function_mode_dal_WORD		function_mode
 #define lpfc_function_mode_lro_SHIFT		9
 #define lpfc_function_mode_lro_MASK		0x00000001
-#define lpfc_function_mode_lro_WORD		function_mode9
+#define lpfc_function_mode_lro_WORD		function_mode
 #define lpfc_function_mode_flex10_SHIFT		10
 #define lpfc_function_mode_flex10_MASK		0x00000001
 #define lpfc_function_mode_flex10_WORD		function_mode
@@ -1358,10 +1427,16 @@
 #define lpfc_init_vfi_vf_SHIFT		29
 #define lpfc_init_vfi_vf_MASK		0x00000001
 #define lpfc_init_vfi_vf_WORD		word1
+#define lpfc_init_vfi_vp_SHIFT		28
+#define lpfc_init_vfi_vp_MASK		0x00000001
+#define lpfc_init_vfi_vp_WORD		word1
 #define lpfc_init_vfi_vfi_SHIFT		0
 #define lpfc_init_vfi_vfi_MASK		0x0000FFFF
 #define lpfc_init_vfi_vfi_WORD		word1
 	uint32_t word2;
+#define lpfc_init_vfi_vpi_SHIFT		16
+#define lpfc_init_vfi_vpi_MASK		0x0000FFFF
+#define lpfc_init_vfi_vpi_WORD		word2
 #define lpfc_init_vfi_fcfi_SHIFT	0
 #define lpfc_init_vfi_fcfi_MASK		0x0000FFFF
 #define lpfc_init_vfi_fcfi_WORD		word2
@@ -2069,6 +2144,8 @@
 #define LPFC_TRAILER_CODE_FCOE	0x2
 #define LPFC_TRAILER_CODE_DCBX	0x3
 #define LPFC_TRAILER_CODE_GRP5	0x5
+#define LPFC_TRAILER_CODE_FC	0x10
+#define LPFC_TRAILER_CODE_SLI	0x11
 };
 
 struct lpfc_acqe_link {
@@ -2094,11 +2171,12 @@
 #define LPFC_ASYNC_LINK_STATUS_UP		0x1
 #define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN	0x2
 #define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP	0x3
-#define lpfc_acqe_link_physical_SHIFT		0
-#define lpfc_acqe_link_physical_MASK		0x000000FF
-#define lpfc_acqe_link_physical_WORD		word0
-#define LPFC_ASYNC_LINK_PORT_A			0x0
-#define LPFC_ASYNC_LINK_PORT_B			0x1
+#define lpfc_acqe_link_type_SHIFT		6
+#define lpfc_acqe_link_type_MASK		0x00000003
+#define lpfc_acqe_link_type_WORD		word0
+#define lpfc_acqe_link_number_SHIFT		0
+#define lpfc_acqe_link_number_MASK		0x0000003F
+#define lpfc_acqe_link_number_WORD		word0
 	uint32_t word1;
 #define lpfc_acqe_link_fault_SHIFT	0
 #define lpfc_acqe_link_fault_MASK	0x000000FF
@@ -2106,29 +2184,31 @@
 #define LPFC_ASYNC_LINK_FAULT_NONE	0x0
 #define LPFC_ASYNC_LINK_FAULT_LOCAL	0x1
 #define LPFC_ASYNC_LINK_FAULT_REMOTE	0x2
-#define lpfc_acqe_qos_link_speed_SHIFT	16
-#define lpfc_acqe_qos_link_speed_MASK	0x0000FFFF
-#define lpfc_acqe_qos_link_speed_WORD	word1
+#define lpfc_acqe_logical_link_speed_SHIFT	16
+#define lpfc_acqe_logical_link_speed_MASK	0x0000FFFF
+#define lpfc_acqe_logical_link_speed_WORD	word1
 	uint32_t event_tag;
 	uint32_t trailer;
+#define LPFC_LINK_EVENT_TYPE_PHYSICAL	0x0
+#define LPFC_LINK_EVENT_TYPE_VIRTUAL	0x1
 };
 
-struct lpfc_acqe_fcoe {
+struct lpfc_acqe_fip {
 	uint32_t index;
 	uint32_t word1;
-#define lpfc_acqe_fcoe_fcf_count_SHIFT		0
-#define lpfc_acqe_fcoe_fcf_count_MASK		0x0000FFFF
-#define lpfc_acqe_fcoe_fcf_count_WORD		word1
-#define lpfc_acqe_fcoe_event_type_SHIFT		16
-#define lpfc_acqe_fcoe_event_type_MASK		0x0000FFFF
-#define lpfc_acqe_fcoe_event_type_WORD		word1
-#define LPFC_FCOE_EVENT_TYPE_NEW_FCF		0x1
-#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL	0x2
-#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD		0x3
-#define LPFC_FCOE_EVENT_TYPE_CVL		0x4
-#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD	0x5
+#define lpfc_acqe_fip_fcf_count_SHIFT		0
+#define lpfc_acqe_fip_fcf_count_MASK		0x0000FFFF
+#define lpfc_acqe_fip_fcf_count_WORD		word1
+#define lpfc_acqe_fip_event_type_SHIFT		16
+#define lpfc_acqe_fip_event_type_MASK		0x0000FFFF
+#define lpfc_acqe_fip_event_type_WORD		word1
 	uint32_t event_tag;
 	uint32_t trailer;
+#define LPFC_FIP_EVENT_TYPE_NEW_FCF		0x1
+#define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL	0x2
+#define LPFC_FIP_EVENT_TYPE_FCF_DEAD		0x3
+#define LPFC_FIP_EVENT_TYPE_CVL			0x4
+#define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD	0x5
 };
 
 struct lpfc_acqe_dcbx {
@@ -2140,9 +2220,12 @@
 
 struct lpfc_acqe_grp5 {
 	uint32_t word0;
-#define lpfc_acqe_grp5_pport_SHIFT	0
-#define lpfc_acqe_grp5_pport_MASK	0x000000FF
-#define lpfc_acqe_grp5_pport_WORD	word0
+#define lpfc_acqe_grp5_type_SHIFT		6
+#define lpfc_acqe_grp5_type_MASK		0x00000003
+#define lpfc_acqe_grp5_type_WORD		word0
+#define lpfc_acqe_grp5_number_SHIFT		0
+#define lpfc_acqe_grp5_number_MASK		0x0000003F
+#define lpfc_acqe_grp5_number_WORD		word0
 	uint32_t word1;
 #define lpfc_acqe_grp5_llink_spd_SHIFT	16
 #define lpfc_acqe_grp5_llink_spd_MASK	0x0000FFFF
@@ -2151,6 +2234,68 @@
 	uint32_t trailer;
 };
 
+struct lpfc_acqe_fc_la {
+	uint32_t word0;
+#define lpfc_acqe_fc_la_speed_SHIFT		24
+#define lpfc_acqe_fc_la_speed_MASK		0x000000FF
+#define lpfc_acqe_fc_la_speed_WORD		word0
+#define LPFC_FC_LA_SPEED_UNKOWN		0x0
+#define LPFC_FC_LA_SPEED_1G		0x1
+#define LPFC_FC_LA_SPEED_2G		0x2
+#define LPFC_FC_LA_SPEED_4G		0x4
+#define LPFC_FC_LA_SPEED_8G		0x8
+#define LPFC_FC_LA_SPEED_10G		0xA
+#define LPFC_FC_LA_SPEED_16G		0x10
+#define lpfc_acqe_fc_la_topology_SHIFT		16
+#define lpfc_acqe_fc_la_topology_MASK		0x000000FF
+#define lpfc_acqe_fc_la_topology_WORD		word0
+#define LPFC_FC_LA_TOP_UNKOWN		0x0
+#define LPFC_FC_LA_TOP_P2P		0x1
+#define LPFC_FC_LA_TOP_FCAL		0x2
+#define LPFC_FC_LA_TOP_INTERNAL_LOOP	0x3
+#define LPFC_FC_LA_TOP_SERDES_LOOP	0x4
+#define lpfc_acqe_fc_la_att_type_SHIFT		8
+#define lpfc_acqe_fc_la_att_type_MASK		0x000000FF
+#define lpfc_acqe_fc_la_att_type_WORD		word0
+#define LPFC_FC_LA_TYPE_LINK_UP		0x1
+#define LPFC_FC_LA_TYPE_LINK_DOWN	0x2
+#define LPFC_FC_LA_TYPE_NO_HARD_ALPA	0x3
+#define lpfc_acqe_fc_la_port_type_SHIFT		6
+#define lpfc_acqe_fc_la_port_type_MASK		0x00000003
+#define lpfc_acqe_fc_la_port_type_WORD		word0
+#define LPFC_LINK_TYPE_ETHERNET		0x0
+#define LPFC_LINK_TYPE_FC		0x1
+#define lpfc_acqe_fc_la_port_number_SHIFT	0
+#define lpfc_acqe_fc_la_port_number_MASK	0x0000003F
+#define lpfc_acqe_fc_la_port_number_WORD	word0
+	uint32_t word1;
+#define lpfc_acqe_fc_la_llink_spd_SHIFT		16
+#define lpfc_acqe_fc_la_llink_spd_MASK		0x0000FFFF
+#define lpfc_acqe_fc_la_llink_spd_WORD		word1
+#define lpfc_acqe_fc_la_fault_SHIFT		0
+#define lpfc_acqe_fc_la_fault_MASK		0x000000FF
+#define lpfc_acqe_fc_la_fault_WORD		word1
+#define LPFC_FC_LA_FAULT_NONE		0x0
+#define LPFC_FC_LA_FAULT_LOCAL		0x1
+#define LPFC_FC_LA_FAULT_REMOTE		0x2
+	uint32_t event_tag;
+	uint32_t trailer;
+#define LPFC_FC_LA_EVENT_TYPE_FC_LINK		0x1
+#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK	0x2
+};
+
+struct lpfc_acqe_sli {
+	uint32_t event_data1;
+	uint32_t event_data2;
+	uint32_t reserved;
+	uint32_t trailer;
+#define LPFC_SLI_EVENT_TYPE_PORT_ERROR		0x1
+#define LPFC_SLI_EVENT_TYPE_OVER_TEMP		0x2
+#define LPFC_SLI_EVENT_TYPE_NORM_TEMP		0x3
+#define LPFC_SLI_EVENT_TYPE_NVLOG_POST		0x4
+#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP		0x5
+};
+
 /*
  * Define the bootstrap mailbox (bmbx) region used to communicate
  * mailbox command between the host and port. The mailbox consists
@@ -2210,7 +2355,7 @@
 #define wqe_rcvoxid_WORD      word9
 	uint32_t word10;
 #define wqe_ebde_cnt_SHIFT    0
-#define wqe_ebde_cnt_MASK     0x00000007
+#define wqe_ebde_cnt_MASK     0x0000000f
 #define wqe_ebde_cnt_WORD     word10
 #define wqe_lenloc_SHIFT      7
 #define wqe_lenloc_MASK       0x00000003
@@ -2402,7 +2547,6 @@
 	uint32_t relative_offset;
 	struct wqe_rctl_dfctl wge_ctl;
 	struct wqe_common wqe_com; /* words 6-11 */
-	/* Note: word10 different REVISIT */
 	uint32_t xmit_len;
 	uint32_t rsvd_12_15[3];
 };
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b306579..6d0b36a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -446,23 +446,25 @@
 	/* Get the default values for Model Name and Description */
 	lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
 
-	if ((phba->cfg_link_speed > LINK_SPEED_10G)
-	    || ((phba->cfg_link_speed == LINK_SPEED_1G)
+	if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
+	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
 		&& !(phba->lmt & LMT_1Gb))
-	    || ((phba->cfg_link_speed == LINK_SPEED_2G)
+	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
 		&& !(phba->lmt & LMT_2Gb))
-	    || ((phba->cfg_link_speed == LINK_SPEED_4G)
+	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
 		&& !(phba->lmt & LMT_4Gb))
-	    || ((phba->cfg_link_speed == LINK_SPEED_8G)
+	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
 		&& !(phba->lmt & LMT_8Gb))
-	    || ((phba->cfg_link_speed == LINK_SPEED_10G)
-		&& !(phba->lmt & LMT_10Gb))) {
+	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
+		&& !(phba->lmt & LMT_10Gb))
+	    || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
+		&& !(phba->lmt & LMT_16Gb))) {
 		/* Reset link speed to auto */
 		lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
 			"1302 Invalid speed for this board: "
 			"Reset link speed to auto: x%x\n",
 			phba->cfg_link_speed);
-			phba->cfg_link_speed = LINK_SPEED_AUTO;
+			phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
 	}
 
 	phba->link_state = LPFC_LINK_DOWN;
@@ -648,22 +650,23 @@
 	mb = &pmb->u.mb;
 	pmb->vport = vport;
 
-	lpfc_init_link(phba, pmb, phba->cfg_topology,
-		phba->cfg_link_speed);
+	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 	lpfc_set_loopback_flag(phba);
 	rc = lpfc_sli_issue_mbox(phba, pmb, flag);
-	if (rc != MBX_SUCCESS) {
+	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 			"0498 Adapter failed to init, mbxCmd x%x "
 			"INIT_LINK, mbxStatus x%x\n",
 			mb->mbxCommand, mb->mbxStatus);
-		/* Clear all interrupt enable conditions */
-		writel(0, phba->HCregaddr);
-		readl(phba->HCregaddr); /* flush */
-		/* Clear all pending interrupts */
-		writel(0xffffffff, phba->HAregaddr);
-		readl(phba->HAregaddr); /* flush */
+		if (phba->sli_rev <= LPFC_SLI_REV3) {
+			/* Clear all interrupt enable conditions */
+			writel(0, phba->HCregaddr);
+			readl(phba->HCregaddr); /* flush */
+			/* Clear all pending interrupts */
+			writel(0xffffffff, phba->HAregaddr);
+			readl(phba->HAregaddr); /* flush */
+		}
 		phba->link_state = LPFC_HBA_ERROR;
 		if (rc != MBX_BUSY || flag == MBX_POLL)
 			mempool_free(pmb, phba->mbox_mem_pool);
@@ -927,6 +930,35 @@
 }
 
 /**
+ * lpfc_rrq_timeout - The RRQ-timer timeout handler
+ * @ptr: unsigned long holds the pointer to lpfc hba data structure.
+ *
+ * This is the RRQ-timer timeout handler registered to the lpfc driver. When
+ * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
+ * work-port-events bitmap and the worker thread is notified. This timeout
+ * event will be used by the worker thread to invoke the actual timeout
+ * handler routine, lpfc_rrq_handler. Any periodical operations will
+ * be performed in the timeout handler and the RRQ timeout event bit shall
+ * be cleared by the worker thread after it has taken the event bitmap out.
+ **/
+static void
+lpfc_rrq_timeout(unsigned long ptr)
+{
+	struct lpfc_hba *phba;
+	uint32_t tmo_posted;
+	unsigned long iflag;
+
+	phba = (struct lpfc_hba *)ptr;
+	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
+	tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
+	if (!tmo_posted)
+		phba->hba_flag |= HBA_RRQ_ACTIVE;
+	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
+	if (!tmo_posted)
+		lpfc_worker_wake_up(phba);
+}
+
+/**
  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
  * @phba: pointer to lpfc hba data structure.
  * @pmboxq: pointer to the driver internal queue element for mailbox command.
@@ -1374,6 +1406,8 @@
 	struct lpfc_vport *vport = phba->pport;
 	uint32_t event_data;
 	struct Scsi_Host *shost;
+	uint32_t if_type;
+	struct lpfc_register portstat_reg;
 
 	/* If the pci channel is offline, ignore possible errors, since
 	 * we cannot communicate with the pci card anyway.
@@ -1390,17 +1424,49 @@
 	/* For now, the actual action for SLI4 device handling is not
 	 * specified yet, just treated it as adaptor hardware failure
 	 */
-	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
-			phba->work_status[0], phba->work_status[1]);
-
 	event_data = FC_REG_DUMP_EVENT;
 	shost = lpfc_shost_from_vport(vport);
 	fc_host_post_vendor_event(shost, fc_get_event_number(),
 				  sizeof(event_data), (char *) &event_data,
 				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
 
-	lpfc_sli4_offline_eratt(phba);
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		lpfc_sli4_offline_eratt(phba);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		portstat_reg.word0 =
+			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
+
+		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
+			/* TODO: Register for Overtemp async events. */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2889 Port Overtemperature event, "
+				"taking port\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->over_temp_state = HBA_OVER_TEMP;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_sli4_offline_eratt(phba);
+			return;
+		}
+		if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) {
+			/*
+			 * TODO: Attempt port recovery via a port reset.
+			 * When fully implemented, the driver should
+			 * attempt to recover the port here and return.
+			 * For now, log an error and take the port offline.
+			 */
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2887 Port Error: Attempting "
+					"Port Recovery\n");
+		}
+		lpfc_sli4_offline_eratt(phba);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
+	}
 }
 
 /**
@@ -1459,8 +1525,8 @@
 	lpfc_els_flush_all_cmd(phba);
 
 	psli->slistat.link_event++;
-	lpfc_read_la(phba, pmb, mp);
-	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
 	pmb->vport = vport;
 	/* Block ELS IOCBs until we have processed this mbox command */
 	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
@@ -1853,6 +1919,14 @@
 		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
 				"Fibre Channel Adapter"};
 		break;
+	case PCI_DEVICE_ID_LANCER_FC:
+		oneConnect = 1;
+		m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
+		break;
+	case PCI_DEVICE_ID_LANCER_FCOE:
+		oneConnect = 1;
+		m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
+		break;
 	default:
 		m = (typeof(m)){"Unknown", "", ""};
 		break;
@@ -2943,63 +3017,6 @@
 }
 
 /**
- * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
- * @phba: pointer to lpfc hba data structure.
- *
- * This function uses the QUERY_FW_CFG mailbox command to determine if the
- * firmware loaded supports FCoE. A return of zero indicates that the mailbox
- * was successful and the firmware supports FCoE. Any other return indicates
- * a error. It is assumed that this function will be called before interrupts
- * are enabled.
- **/
-static int
-lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
-{
-	int rc = 0;
-	LPFC_MBOXQ_t *mboxq;
-	struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
-	uint32_t length;
-	uint32_t shdr_status, shdr_add_status;
-
-	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mboxq) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"2621 Failed to allocate mbox for "
-				"query firmware config cmd\n");
-		return -ENOMEM;
-	}
-	query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
-	length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
-		  sizeof(struct lpfc_sli4_cfg_mhdr));
-	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
-			 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
-			 length, LPFC_SLI4_MBX_EMBED);
-	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
-	/* The IOCTL status is embedded in the mailbox subheader. */
-	shdr_status = bf_get(lpfc_mbox_hdr_status,
-			     &query_fw_cfg->header.cfg_shdr.response);
-	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
-				 &query_fw_cfg->header.cfg_shdr.response);
-	if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2622 Query Firmware Config failed "
-				"mbx status x%x, status x%x add_status x%x\n",
-				rc, shdr_status, shdr_add_status);
-		return -EINVAL;
-	}
-	if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-				"2623 FCoE Function not supported by firmware. "
-				"Function mode = %08x\n",
-				query_fw_cfg->function_mode);
-		return -EINVAL;
-	}
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mboxq, phba->mbox_mem_pool);
-	return 0;
-}
-
-/**
  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async link completion queue entry.
@@ -3051,20 +3068,20 @@
 	switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
 	case LPFC_ASYNC_LINK_STATUS_DOWN:
 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
-		att_type = AT_LINK_DOWN;
+		att_type = LPFC_ATT_LINK_DOWN;
 		break;
 	case LPFC_ASYNC_LINK_STATUS_UP:
 		/* Ignore physical link up events - wait for logical link up */
-		att_type = AT_RESERVED;
+		att_type = LPFC_ATT_RESERVED;
 		break;
 	case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
-		att_type = AT_LINK_UP;
+		att_type = LPFC_ATT_LINK_UP;
 		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0399 Invalid link attention type: x%x\n",
 				bf_get(lpfc_acqe_link_status, acqe_link));
-		att_type = AT_RESERVED;
+		att_type = LPFC_ATT_RESERVED;
 		break;
 	}
 	return att_type;
@@ -3088,36 +3105,32 @@
 
 	switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
 	case LPFC_ASYNC_LINK_SPEED_ZERO:
-		link_speed = LA_UNKNW_LINK;
-		break;
 	case LPFC_ASYNC_LINK_SPEED_10MBPS:
-		link_speed = LA_UNKNW_LINK;
-		break;
 	case LPFC_ASYNC_LINK_SPEED_100MBPS:
-		link_speed = LA_UNKNW_LINK;
+		link_speed = LPFC_LINK_SPEED_UNKNOWN;
 		break;
 	case LPFC_ASYNC_LINK_SPEED_1GBPS:
-		link_speed = LA_1GHZ_LINK;
+		link_speed = LPFC_LINK_SPEED_1GHZ;
 		break;
 	case LPFC_ASYNC_LINK_SPEED_10GBPS:
-		link_speed = LA_10GHZ_LINK;
+		link_speed = LPFC_LINK_SPEED_10GHZ;
 		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0483 Invalid link-attention link speed: x%x\n",
 				bf_get(lpfc_acqe_link_speed, acqe_link));
-		link_speed = LA_UNKNW_LINK;
+		link_speed = LPFC_LINK_SPEED_UNKNOWN;
 		break;
 	}
 	return link_speed;
 }
 
 /**
- * lpfc_sli4_async_link_evt - Process the asynchronous link event
+ * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async link completion queue entry.
  *
- * This routine is to handle the SLI4 asynchronous link event.
+ * This routine is to handle the SLI4 asynchronous FCoE link event.
  **/
 static void
 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
@@ -3126,11 +3139,12 @@
 	struct lpfc_dmabuf *mp;
 	LPFC_MBOXQ_t *pmb;
 	MAILBOX_t *mb;
-	READ_LA_VAR *la;
+	struct lpfc_mbx_read_top *la;
 	uint8_t att_type;
+	int rc;
 
 	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
-	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
+	if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
 		return;
 	phba->fcoe_eventtag = acqe_link->event_tag;
 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -3161,28 +3175,11 @@
 	/* Update link event statistics */
 	phba->sli.slistat.link_event++;
 
-	/* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
-	lpfc_read_la(phba, pmb, mp);
+	/* Create lpfc_handle_latt mailbox command from link ACQE */
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
 	pmb->vport = phba->pport;
 
-	/* Parse and translate status field */
-	mb = &pmb->u.mb;
-	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
-
-	/* Parse and translate link attention fields */
-	la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
-	la->eventTag = acqe_link->event_tag;
-	la->attType = att_type;
-	la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
-
-	/* Fake the the following irrelvant fields */
-	la->topology = TOPOLOGY_PT_PT;
-	la->granted_AL_PA = 0;
-	la->il = 0;
-	la->pb = 0;
-	la->fa = 0;
-	la->mm = 0;
-
 	/* Keep the link status for extra SLI4 state machine reference */
 	phba->sli4_hba.link_state.speed =
 				bf_get(lpfc_acqe_link_speed, acqe_link);
@@ -3190,15 +3187,61 @@
 				bf_get(lpfc_acqe_link_duplex, acqe_link);
 	phba->sli4_hba.link_state.status =
 				bf_get(lpfc_acqe_link_status, acqe_link);
-	phba->sli4_hba.link_state.physical =
-				bf_get(lpfc_acqe_link_physical, acqe_link);
+	phba->sli4_hba.link_state.type =
+				bf_get(lpfc_acqe_link_type, acqe_link);
+	phba->sli4_hba.link_state.number =
+				bf_get(lpfc_acqe_link_number, acqe_link);
 	phba->sli4_hba.link_state.fault =
 				bf_get(lpfc_acqe_link_fault, acqe_link);
 	phba->sli4_hba.link_state.logical_speed =
-				bf_get(lpfc_acqe_qos_link_speed, acqe_link);
+			bf_get(lpfc_acqe_logical_link_speed, acqe_link);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2900 Async FCoE Link event - Speed:%dGBit duplex:x%x "
+			"LA Type:x%x Port Type:%d Port Number:%d Logical "
+			"speed:%dMbps Fault:%d\n",
+			phba->sli4_hba.link_state.speed,
+			phba->sli4_hba.link_state.topology,
+			phba->sli4_hba.link_state.status,
+			phba->sli4_hba.link_state.type,
+			phba->sli4_hba.link_state.number,
+			phba->sli4_hba.link_state.logical_speed * 10,
+			phba->sli4_hba.link_state.fault);
+	/*
+	 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
+	 * topology info. Note: Optional for non FC-AL ports.
+	 */
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			goto out_free_dmabuf;
+		return;
+	}
+	/*
+	 * For FCoE Mode: fill in all the topology information we need and call
+	 * the READ_TOPOLOGY completion routine to continue without actually
+	 * sending the READ_TOPOLOGY mailbox command to the port.
+	 */
+	/* Parse and translate status field */
+	mb = &pmb->u.mb;
+	mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+
+	/* Parse and translate link attention fields */
+	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
+	la->eventTag = acqe_link->event_tag;
+	bf_set(lpfc_mbx_read_top_att_type, la, att_type);
+	bf_set(lpfc_mbx_read_top_link_spd, la,
+	       lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
+
+	/* Fake the the following irrelvant fields */
+	bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
+	bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
+	bf_set(lpfc_mbx_read_top_il, la, 0);
+	bf_set(lpfc_mbx_read_top_pb, la, 0);
+	bf_set(lpfc_mbx_read_top_fa, la, 0);
+	bf_set(lpfc_mbx_read_top_mm, la, 0);
 
 	/* Invoke the lpfc_handle_latt mailbox command callback function */
-	lpfc_mbx_cmpl_read_la(phba, pmb);
+	lpfc_mbx_cmpl_read_topology(phba, pmb);
 
 	return;
 
@@ -3209,6 +3252,118 @@
 }
 
 /**
+ * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async fc completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous FC event. It will simply log
+ * that the event was received and then issue a read_topology mailbox command so
+ * that the rest of the driver will treat it the same as SLI3.
+ **/
+static void
+lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
+{
+	struct lpfc_dmabuf *mp;
+	LPFC_MBOXQ_t *pmb;
+	int rc;
+
+	if (bf_get(lpfc_trailer_type, acqe_fc) !=
+	    LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2895 Non FC link Event detected.(%d)\n",
+				bf_get(lpfc_trailer_type, acqe_fc));
+		return;
+	}
+	/* Keep the link status for extra SLI4 state machine reference */
+	phba->sli4_hba.link_state.speed =
+				bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
+	phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
+	phba->sli4_hba.link_state.topology =
+				bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
+	phba->sli4_hba.link_state.status =
+				bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
+	phba->sli4_hba.link_state.type =
+				bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
+	phba->sli4_hba.link_state.number =
+				bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
+	phba->sli4_hba.link_state.fault =
+				bf_get(lpfc_acqe_link_fault, acqe_fc);
+	phba->sli4_hba.link_state.logical_speed =
+				bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2896 Async FC event - Speed:%dGBaud Topology:x%x "
+			"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
+			"%dMbps Fault:%d\n",
+			phba->sli4_hba.link_state.speed,
+			phba->sli4_hba.link_state.topology,
+			phba->sli4_hba.link_state.status,
+			phba->sli4_hba.link_state.type,
+			phba->sli4_hba.link_state.number,
+			phba->sli4_hba.link_state.logical_speed * 10,
+			phba->sli4_hba.link_state.fault);
+	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2897 The mboxq allocation failed\n");
+		return;
+	}
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!mp) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2898 The lpfc_dmabuf allocation failed\n");
+		goto out_free_pmb;
+	}
+	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+	if (!mp->virt) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2899 The mbuf allocation failed\n");
+		goto out_free_dmabuf;
+	}
+
+	/* Cleanup any outstanding ELS commands */
+	lpfc_els_flush_all_cmd(phba);
+
+	/* Block ELS IOCBs until we have done process link event */
+	phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+	/* Update link event statistics */
+	phba->sli.slistat.link_event++;
+
+	/* Create lpfc_handle_latt mailbox command from link ACQE */
+	lpfc_read_topology(phba, pmb, mp);
+	pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
+	pmb->vport = phba->pport;
+
+	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		goto out_free_dmabuf;
+	return;
+
+out_free_dmabuf:
+	kfree(mp);
+out_free_pmb:
+	mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_fc: pointer to the async SLI completion queue entry.
+ *
+ * This routine is to handle the SLI4 asynchronous SLI events.
+ **/
+static void
+lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+			"2901 Async SLI event - Event Data1:x%08x Event Data2:"
+			"x%08x SLI Event Type:%d",
+			acqe_sli->event_data1, acqe_sli->event_data2,
+			bf_get(lpfc_trailer_type, acqe_sli));
+	return;
+}
+
+/**
  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
  * @vport: pointer to vport data structure.
  *
@@ -3247,10 +3402,12 @@
 		if (!ndlp)
 			return 0;
 	}
-	if (phba->pport->port_state < LPFC_FLOGI)
+	if ((phba->pport->port_state < LPFC_FLOGI) &&
+		(phba->pport->port_state != LPFC_VPORT_FAILED))
 		return NULL;
 	/* If virtual link is not yet instantiated ignore CVL */
-	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC))
+	if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
+		&& (vport->port_state != LPFC_VPORT_FAILED))
 		return NULL;
 	shost = lpfc_shost_from_vport(vport);
 	if (!shost)
@@ -3285,17 +3442,17 @@
 }
 
 /**
- * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
+ * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async fcoe completion queue entry.
  *
  * This routine is to handle the SLI4 asynchronous fcoe event.
  **/
 static void
-lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
-			 struct lpfc_acqe_fcoe *acqe_fcoe)
+lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
+			struct lpfc_acqe_fip *acqe_fip)
 {
-	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
+	uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
 	int rc;
 	struct lpfc_vport *vport;
 	struct lpfc_nodelist *ndlp;
@@ -3304,25 +3461,25 @@
 	struct lpfc_vport **vports;
 	int i;
 
-	phba->fc_eventTag = acqe_fcoe->event_tag;
-	phba->fcoe_eventtag = acqe_fcoe->event_tag;
+	phba->fc_eventTag = acqe_fip->event_tag;
+	phba->fcoe_eventtag = acqe_fip->event_tag;
 	switch (event_type) {
-	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
-	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
-		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
+	case LPFC_FIP_EVENT_TYPE_NEW_FCF:
+	case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
+		if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
 					LOG_DISCOVERY,
 					"2546 New FCF event, evt_tag:x%x, "
 					"index:x%x\n",
-					acqe_fcoe->event_tag,
-					acqe_fcoe->index);
+					acqe_fip->event_tag,
+					acqe_fip->index);
 		else
 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
 					LOG_DISCOVERY,
 					"2788 FCF param modified event, "
 					"evt_tag:x%x, index:x%x\n",
-					acqe_fcoe->event_tag,
-					acqe_fcoe->index);
+					acqe_fip->event_tag,
+					acqe_fip->index);
 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
 			/*
 			 * During period of FCF discovery, read the FCF
@@ -3333,8 +3490,8 @@
 					LOG_DISCOVERY,
 					"2779 Read FCF (x%x) for updating "
 					"roundrobin FCF failover bmask\n",
-					acqe_fcoe->index);
-			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
+					acqe_fip->index);
+			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
 		}
 
 		/* If the FCF discovery is in progress, do nothing. */
@@ -3360,7 +3517,7 @@
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
 				"2770 Start FCF table scan per async FCF "
 				"event, evt_tag:x%x, index:x%x\n",
-				acqe_fcoe->event_tag, acqe_fcoe->index);
+				acqe_fip->event_tag, acqe_fip->index);
 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
 						     LPFC_FCOE_FCF_GET_FIRST);
 		if (rc)
@@ -3369,17 +3526,17 @@
 					"command failed (x%x)\n", rc);
 		break;
 
-	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
+	case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"2548 FCF Table full count 0x%x tag 0x%x\n",
-			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
-			acqe_fcoe->event_tag);
+			bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
+			acqe_fip->event_tag);
 		break;
 
-	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
+	case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
 			"2549 FCF (x%x) disconnected from network, "
-			"tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
+			"tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
 		/*
 		 * If we are in the middle of FCF failover process, clear
 		 * the corresponding FCF bit in the roundrobin bitmap.
@@ -3388,13 +3545,13 @@
 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
 			spin_unlock_irq(&phba->hbalock);
 			/* Update FLOGI FCF failover eligible FCF bmask */
-			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
+			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
 			break;
 		}
 		spin_unlock_irq(&phba->hbalock);
 
 		/* If the event is not for currently used fcf do nothing */
-		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
+		if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
 			break;
 
 		/*
@@ -3411,7 +3568,7 @@
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
 				"2771 Start FCF fast failover process due to "
 				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
-				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
+				"\n", acqe_fip->event_tag, acqe_fip->index);
 		rc = lpfc_sli4_redisc_fcf_table(phba);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3438,12 +3595,12 @@
 			lpfc_sli4_perform_all_vport_cvl(phba);
 		}
 		break;
-	case LPFC_FCOE_EVENT_TYPE_CVL:
+	case LPFC_FIP_EVENT_TYPE_CVL:
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
 			"2718 Clear Virtual Link Received for VPI 0x%x"
-			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
+			" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
 		vport = lpfc_find_vport_by_vpid(phba,
-				acqe_fcoe->index - phba->vpi_base);
+				acqe_fip->index - phba->vpi_base);
 		ndlp = lpfc_sli4_perform_vport_cvl(vport);
 		if (!ndlp)
 			break;
@@ -3494,7 +3651,7 @@
 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
 					LOG_DISCOVERY,
 					"2773 Start FCF failover per CVL, "
-					"evt_tag:x%x\n", acqe_fcoe->event_tag);
+					"evt_tag:x%x\n", acqe_fip->event_tag);
 			rc = lpfc_sli4_redisc_fcf_table(phba);
 			if (rc) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3522,7 +3679,7 @@
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0288 Unknown FCoE event type 0x%x event tag "
-			"0x%x\n", event_type, acqe_fcoe->event_tag);
+			"0x%x\n", event_type, acqe_fip->event_tag);
 		break;
 	}
 }
@@ -3599,8 +3756,7 @@
 						 &cq_event->cqe.acqe_link);
 			break;
 		case LPFC_TRAILER_CODE_FCOE:
-			lpfc_sli4_async_fcoe_evt(phba,
-						 &cq_event->cqe.acqe_fcoe);
+			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
 			break;
 		case LPFC_TRAILER_CODE_DCBX:
 			lpfc_sli4_async_dcbx_evt(phba,
@@ -3610,6 +3766,12 @@
 			lpfc_sli4_async_grp5_evt(phba,
 						 &cq_event->cqe.acqe_grp5);
 			break;
+		case LPFC_TRAILER_CODE_FC:
+			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
+			break;
+		case LPFC_TRAILER_CODE_SLI:
+			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
+			break;
 		default:
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 					"1804 Invalid asynchrous event code: "
@@ -3948,7 +4110,7 @@
 	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
 	struct lpfc_mqe *mqe;
-	int longs;
+	int longs, sli_family;
 
 	/* Before proceed, wait for POST done and device ready */
 	rc = lpfc_sli4_post_status_check(phba);
@@ -3963,6 +4125,9 @@
 	init_timer(&phba->hb_tmofunc);
 	phba->hb_tmofunc.function = lpfc_hb_timeout;
 	phba->hb_tmofunc.data = (unsigned long)phba;
+	init_timer(&phba->rrq_tmr);
+	phba->rrq_tmr.function = lpfc_rrq_timeout;
+	phba->rrq_tmr.data = (unsigned long)phba;
 
 	psli = &phba->sli;
 	/* MBOX heartbeat timer */
@@ -4010,12 +4175,22 @@
 	 */
 	buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
 		    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
-	/* Feature Level 1 hardware is limited to 2 pages */
-	if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
-	     LPFC_SLI_INTF_FEATURELEVEL1_1))
-		max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
-	else
-		max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
+
+	sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
+	max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
+	switch (sli_family) {
+	case LPFC_SLI_INTF_FAMILY_BE2:
+	case LPFC_SLI_INTF_FAMILY_BE3:
+		/* There is a single hint for BE - 2 pages per BPL. */
+		if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
+		    LPFC_SLI_INTF_SLI_HINT1_1)
+			max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
+		break;
+	case LPFC_SLI_INTF_FAMILY_LNCR_A0:
+	case LPFC_SLI_INTF_FAMILY_LNCR_B0:
+	default:
+		break;
+	}
 	for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
 	     dma_buf_size < max_buf_size && buf_size > dma_buf_size;
 	     dma_buf_size = dma_buf_size << 1)
@@ -4070,6 +4245,14 @@
 	if (rc)
 		return -ENOMEM;
 
+	/* IF Type 2 ports get initialized now. */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_2) {
+		rc = lpfc_pci_function_reset(phba);
+		if (unlikely(rc))
+			return -ENODEV;
+	}
+
 	/* Create the bootstrap mailbox command */
 	rc = lpfc_create_bootstrap_mbox(phba);
 	if (unlikely(rc))
@@ -4080,19 +4263,18 @@
 	if (unlikely(rc))
 		goto out_free_bsmbx;
 
-	rc = lpfc_sli4_fw_cfg_check(phba);
-	if (unlikely(rc))
-		goto out_free_bsmbx;
-
 	/* Set up the hba's configuration parameters. */
 	rc = lpfc_sli4_read_config(phba);
 	if (unlikely(rc))
 		goto out_free_bsmbx;
 
-	/* Perform a function reset */
-	rc = lpfc_pci_function_reset(phba);
-	if (unlikely(rc))
-		goto out_free_bsmbx;
+	/* IF Type 0 ports get initialized now. */
+	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+	    LPFC_SLI_INTF_IF_TYPE_0) {
+		rc = lpfc_pci_function_reset(phba);
+		if (unlikely(rc))
+			goto out_free_bsmbx;
+	}
 
 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
 						       GFP_KERNEL);
@@ -5190,97 +5372,183 @@
 int
 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
 {
-	struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
-	int i, port_error = -ENODEV;
+	struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
+	struct lpfc_register reg_data;
+	int i, port_error = 0;
+	uint32_t if_type;
 
-	if (!phba->sli4_hba.STAregaddr)
+	if (!phba->sli4_hba.PSMPHRregaddr)
 		return -ENODEV;
 
 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
 	for (i = 0; i < 3000; i++) {
-		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
-		/* Encounter fatal POST error, break out */
-		if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
+		portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr);
+		if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) {
+			/* Port has a fatal POST error, break out */
 			port_error = -ENODEV;
 			break;
 		}
-		if (LPFC_POST_STAGE_ARMFW_READY ==
-		    bf_get(lpfc_hst_state_port_status, &sta_reg)) {
-			port_error = 0;
+		if (LPFC_POST_STAGE_PORT_READY ==
+		    bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
 			break;
-		}
 		msleep(10);
 	}
 
-	if (port_error)
+	/*
+	 * If there was a port error during POST, then don't proceed with
+	 * other register reads as the data may not be valid.  Just exit.
+	 */
+	if (port_error) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"1408 Failure HBA POST Status: sta_reg=0x%x, "
-			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
-			"dl=x%x, pstatus=x%x\n", sta_reg.word0,
-			bf_get(lpfc_hst_state_perr, &sta_reg),
-			bf_get(lpfc_hst_state_sfi, &sta_reg),
-			bf_get(lpfc_hst_state_nip, &sta_reg),
-			bf_get(lpfc_hst_state_ipc, &sta_reg),
-			bf_get(lpfc_hst_state_xrom, &sta_reg),
-			bf_get(lpfc_hst_state_dl, &sta_reg),
-			bf_get(lpfc_hst_state_port_status, &sta_reg));
-
-	/* Log device information */
-	phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
-	if (bf_get(lpfc_sli_intf_valid,
-		   &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
+			"1408 Port Failed POST - portsmphr=0x%x, "
+			"perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
+			"scr2=x%x, hscratch=x%x, pstatus=x%x\n",
+			portsmphr_reg.word0,
+			bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
+			bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
+	} else {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-				"2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
-				"FeatureL1=0x%x, FeatureL2=0x%x\n",
+				"2534 Device Info: SLIFamily=0x%x, "
+				"SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
+				"SLIHint_2=0x%x, FT=0x%x\n",
 				bf_get(lpfc_sli_intf_sli_family,
 				       &phba->sli4_hba.sli_intf),
 				bf_get(lpfc_sli_intf_slirev,
 				       &phba->sli4_hba.sli_intf),
-				bf_get(lpfc_sli_intf_featurelevel1,
+				bf_get(lpfc_sli_intf_if_type,
 				       &phba->sli4_hba.sli_intf),
-				bf_get(lpfc_sli_intf_featurelevel2,
+				bf_get(lpfc_sli_intf_sli_hint1,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_sli_hint2,
+				       &phba->sli4_hba.sli_intf),
+				bf_get(lpfc_sli_intf_func_type,
 				       &phba->sli4_hba.sli_intf));
+		/*
+		 * Check for other Port errors during the initialization
+		 * process.  Fail the load if the port did not come up
+		 * correctly.
+		 */
+		if_type = bf_get(lpfc_sli_intf_if_type,
+				 &phba->sli4_hba.sli_intf);
+		switch (if_type) {
+		case LPFC_SLI_INTF_IF_TYPE_0:
+			phba->sli4_hba.ue_mask_lo =
+			      readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
+			phba->sli4_hba.ue_mask_hi =
+			      readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
+			uerrlo_reg.word0 =
+			      readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
+			uerrhi_reg.word0 =
+				readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+			if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
+			    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"1422 Unrecoverable Error "
+						"Detected during POST "
+						"uerr_lo_reg=0x%x, "
+						"uerr_hi_reg=0x%x, "
+						"ue_mask_lo_reg=0x%x, "
+						"ue_mask_hi_reg=0x%x\n",
+						uerrlo_reg.word0,
+						uerrhi_reg.word0,
+						phba->sli4_hba.ue_mask_lo,
+						phba->sli4_hba.ue_mask_hi);
+				port_error = -ENODEV;
+			}
+			break;
+		case LPFC_SLI_INTF_IF_TYPE_2:
+			/* Final checks.  The port status should be clean. */
+			reg_data.word0 =
+				readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
+			if (bf_get(lpfc_sliport_status_err, &reg_data)) {
+				phba->work_status[0] =
+					readl(phba->sli4_hba.u.if_type2.
+					      ERR1regaddr);
+				phba->work_status[1] =
+					readl(phba->sli4_hba.u.if_type2.
+					      ERR2regaddr);
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2888 Port Error Detected "
+					"during POST: "
+					"port status reg 0x%x, "
+					"port_smphr reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					reg_data.word0,
+					portsmphr_reg.word0,
+					phba->work_status[0],
+					phba->work_status[1]);
+				port_error = -ENODEV;
+			}
+			break;
+		case LPFC_SLI_INTF_IF_TYPE_1:
+		default:
+			break;
+		}
 	}
-	phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
-	phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
-	/* With uncoverable error, log the error message and return error */
-	uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
-	uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
-	if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
-	    (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1422 HBA Unrecoverable error: "
-				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
-				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
-				uerrlo_reg.word0, uerrhi_reg.word0,
-				phba->sli4_hba.ue_mask_lo,
-				phba->sli4_hba.ue_mask_hi);
-		return -ENODEV;
-	}
-
 	return port_error;
 }
 
 /**
  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
  * @phba: pointer to lpfc hba data structure.
+ * @if_type:  The SLI4 interface type getting configured.
  *
  * This routine is invoked to set up SLI4 BAR0 PCI config space register
  * memory map.
  **/
 static void
-lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
+lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
 {
-	phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_UERR_STATUS_LO;
-	phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_UERR_STATUS_HI;
-	phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_UE_MASK_LO;
-	phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_UE_MASK_HI;
-	phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
-					LPFC_SLI_INTF;
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		phba->sli4_hba.u.if_type0.UERRLOregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
+		phba->sli4_hba.u.if_type0.UERRHIregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
+		phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
+		phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
+		phba->sli4_hba.SLIINTFregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		phba->sli4_hba.u.if_type2.ERR1regaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
+		phba->sli4_hba.u.if_type2.ERR2regaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
+		phba->sli4_hba.u.if_type2.CTRLregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
+		phba->sli4_hba.u.if_type2.STATUSregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
+		phba->sli4_hba.SLIINTFregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
+		phba->sli4_hba.PSMPHRregaddr =
+		     phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
+		phba->sli4_hba.RQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
+		phba->sli4_hba.WQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
+		phba->sli4_hba.EQCQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
+		phba->sli4_hba.MQDBregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
+		phba->sli4_hba.BMBXregaddr =
+			phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		dev_printk(KERN_ERR, &phba->pcidev->dev,
+			   "FATAL - unsupported SLI4 interface type - %d\n",
+			   if_type);
+		break;
+	}
 }
 
 /**
@@ -5293,16 +5561,14 @@
 static void
 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
 {
-
-	phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-				    LPFC_HST_STATE;
+	phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+		LPFC_SLIPORT_IF0_SMPHR;
 	phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-				    LPFC_HST_ISR0;
+		LPFC_HST_ISR0;
 	phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-				    LPFC_HST_IMR0;
+		LPFC_HST_IMR0;
 	phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
-				     LPFC_HST_ISCR0;
-	return;
+		LPFC_HST_ISCR0;
 }
 
 /**
@@ -5542,11 +5808,12 @@
 }
 
 /**
- * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
+ * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to setup the host-side endian order to the
- * HBA consistent with the SLI-4 interface spec.
+ * This routine is invoked to setup the port-side endian order when
+ * the port if_type is 0.  This routine has no function for other
+ * if_types.
  *
  * Return codes
  * 	0 - successful
@@ -5557,34 +5824,44 @@
 lpfc_setup_endian_order(struct lpfc_hba *phba)
 {
 	LPFC_MBOXQ_t *mboxq;
-	uint32_t rc = 0;
+	uint32_t if_type, rc = 0;
 	uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
 				      HOST_ENDIAN_HIGH_WORD1};
 
-	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mboxq) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0492 Unable to allocate memory for issuing "
-				"SLI_CONFIG_SPECIAL mailbox command\n");
-		return -ENOMEM;
-	}
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+		if (!mboxq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0492 Unable to allocate memory for "
+					"issuing SLI_CONFIG_SPECIAL mailbox "
+					"command\n");
+			return -ENOMEM;
+		}
 
-	/*
-	 * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
-	 * words to contain special data values and no other data.
-	 */
-	memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
-	memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
-	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
-	if (rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0493 SLI_CONFIG_SPECIAL mailbox failed with "
-				"status x%x\n",
-				rc);
-		rc = -EIO;
+		/*
+		 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
+		 * two words to contain special data values and no other data.
+		 */
+		memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+		memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0493 SLI_CONFIG_SPECIAL mailbox "
+					"failed with status x%x\n",
+					rc);
+			rc = -EIO;
+		}
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
 	}
-
-	mempool_free(mboxq, phba->mbox_mem_pool);
 	return rc;
 }
 
@@ -6416,36 +6693,124 @@
 lpfc_pci_function_reset(struct lpfc_hba *phba)
 {
 	LPFC_MBOXQ_t *mboxq;
-	uint32_t rc = 0;
+	uint32_t rc = 0, if_type;
 	uint32_t shdr_status, shdr_add_status;
+	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
 	union lpfc_sli4_cfg_shdr *shdr;
+	struct lpfc_register reg_data;
 
-	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-	if (!mboxq) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0494 Unable to allocate memory for issuing "
-				"SLI_FUNCTION_RESET mailbox command\n");
-		return -ENOMEM;
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+						       GFP_KERNEL);
+		if (!mboxq) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0494 Unable to allocate memory for "
+					"issuing SLI_FUNCTION_RESET mailbox "
+					"command\n");
+			return -ENOMEM;
+		}
+
+		/* Setup PCI function reset mailbox-ioctl command */
+		lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+				 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
+				 LPFC_SLI4_MBX_EMBED);
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		shdr = (union lpfc_sli4_cfg_shdr *)
+			&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+		shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+		shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+					 &shdr->response);
+		if (rc != MBX_TIMEOUT)
+			mempool_free(mboxq, phba->mbox_mem_pool);
+		if (shdr_status || shdr_add_status || rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"0495 SLI_FUNCTION_RESET mailbox "
+					"failed with status x%x add_status x%x,"
+					" mbx status x%x\n",
+					shdr_status, shdr_add_status, rc);
+			rc = -ENXIO;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		for (num_resets = 0;
+		     num_resets < MAX_IF_TYPE_2_RESETS;
+		     num_resets++) {
+			reg_data.word0 = 0;
+			bf_set(lpfc_sliport_ctrl_end, &reg_data,
+			       LPFC_SLIPORT_LITTLE_ENDIAN);
+			bf_set(lpfc_sliport_ctrl_ip, &reg_data,
+			       LPFC_SLIPORT_INIT_PORT);
+			writel(reg_data.word0, phba->sli4_hba.u.if_type2.
+			       CTRLregaddr);
+
+			/*
+			 * Poll the Port Status Register and wait for RDY for
+			 * up to 10 seconds.  If the port doesn't respond, treat
+			 * it as an error.  If the port responds with RN, start
+			 * the loop again.
+			 */
+			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
+				reg_data.word0 =
+					readl(phba->sli4_hba.u.if_type2.
+					      STATUSregaddr);
+				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
+					break;
+				if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+					reset_again++;
+					break;
+				}
+				msleep(10);
+			}
+
+			/*
+			 * If the port responds to the init request with
+			 * reset needed, delay for a bit and restart the loop.
+			 */
+			if (reset_again) {
+				msleep(10);
+				reset_again = 0;
+				continue;
+			}
+
+			/* Detect any port errors. */
+			reg_data.word0 = readl(phba->sli4_hba.u.if_type2.
+					       STATUSregaddr);
+			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
+			    (rdy_chk >= 1000)) {
+				phba->work_status[0] = readl(
+					phba->sli4_hba.u.if_type2.ERR1regaddr);
+				phba->work_status[1] = readl(
+					phba->sli4_hba.u.if_type2.ERR2regaddr);
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2890 Port Error Detected "
+					"during Port Reset: "
+					"port status reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					reg_data.word0,
+					phba->work_status[0],
+					phba->work_status[1]);
+				rc = -ENODEV;
+			}
+
+			/*
+			 * Terminate the outer loop provided the Port indicated
+			 * ready within 10 seconds.
+			 */
+			if (rdy_chk < 1000)
+				break;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
+		break;
 	}
 
-	/* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
-	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
-			 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
-			 LPFC_SLI4_MBX_EMBED);
-	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
-	shdr = (union lpfc_sli4_cfg_shdr *)
-		&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
-	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
-	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mboxq, phba->mbox_mem_pool);
-	if (shdr_status || shdr_add_status || rc) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"0495 SLI_FUNCTION_RESET mailbox failed with "
-				"status x%x add_status x%x, mbx status x%x\n",
-				shdr_status, shdr_add_status, rc);
-		rc = -ENXIO;
-	}
+	/* Catch the not-ready port failure after a port reset. */
+	if (num_resets >= MAX_IF_TYPE_2_RESETS)
+		rc = -ENODEV;
+
 	return rc;
 }
 
@@ -6536,6 +6901,7 @@
 	struct pci_dev *pdev;
 	unsigned long bar0map_len, bar1map_len, bar2map_len;
 	int error = -ENODEV;
+	uint32_t if_type;
 
 	/* Obtain PCI device reference */
 	if (!phba->pcidev)
@@ -6552,61 +6918,105 @@
 		}
 	}
 
-	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
-	 * number of bytes required by each mapping. They are actually
-	 * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
+	/*
+	 * The BARs and register set definitions and offset locations are
+	 * dependent on the if_type.
+	 */
+	if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
+				  &phba->sli4_hba.sli_intf.word0)) {
+		return error;
+	}
+
+	/* There is no SLI3 failback for SLI4 devices. */
+	if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
+	    LPFC_SLI_INTF_VALID) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2894 SLI_INTF reg contents invalid "
+				"sli_intf reg 0x%x\n",
+				phba->sli4_hba.sli_intf.word0);
+		return error;
+	}
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	/*
+	 * Get the bus address of SLI4 device Bar regions and the
+	 * number of bytes required by each mapping. The mapping of the
+	 * particular PCI BARs regions is dependent on the type of
+	 * SLI4 device.
 	 */
 	if (pci_resource_start(pdev, 0)) {
 		phba->pci_bar0_map = pci_resource_start(pdev, 0);
 		bar0map_len = pci_resource_len(pdev, 0);
+
+		/*
+		 * Map SLI4 PCI Config Space Register base to a kernel virtual
+		 * addr
+		 */
+		phba->sli4_hba.conf_regs_memmap_p =
+			ioremap(phba->pci_bar0_map, bar0map_len);
+		if (!phba->sli4_hba.conf_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				   "ioremap failed for SLI4 PCI config "
+				   "registers.\n");
+			goto out;
+		}
+		/* Set up BAR0 PCI config space register memory map */
+		lpfc_sli4_bar0_register_memmap(phba, if_type);
 	} else {
 		phba->pci_bar0_map = pci_resource_start(pdev, 1);
 		bar0map_len = pci_resource_len(pdev, 1);
-	}
-	phba->pci_bar1_map = pci_resource_start(pdev, 2);
-	bar1map_len = pci_resource_len(pdev, 2);
-
-	phba->pci_bar2_map = pci_resource_start(pdev, 4);
-	bar2map_len = pci_resource_len(pdev, 4);
-
-	/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
-	phba->sli4_hba.conf_regs_memmap_p =
+		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+			dev_printk(KERN_ERR, &pdev->dev,
+			   "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
+			goto out;
+		}
+		phba->sli4_hba.conf_regs_memmap_p =
 				ioremap(phba->pci_bar0_map, bar0map_len);
-	if (!phba->sli4_hba.conf_regs_memmap_p) {
-		dev_printk(KERN_ERR, &pdev->dev,
-			   "ioremap failed for SLI4 PCI config registers.\n");
-		goto out;
+		if (!phba->sli4_hba.conf_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
+				"ioremap failed for SLI4 PCI config "
+				"registers.\n");
+				goto out;
+		}
+		lpfc_sli4_bar0_register_memmap(phba, if_type);
 	}
 
-	/* Map SLI4 HBA Control Register base to a kernel virtual address. */
-	phba->sli4_hba.ctrl_regs_memmap_p =
+	if (pci_resource_start(pdev, 2)) {
+		/*
+		 * Map SLI4 if type 0 HBA Control Register base to a kernel
+		 * virtual address and setup the registers.
+		 */
+		phba->pci_bar1_map = pci_resource_start(pdev, 2);
+		bar1map_len = pci_resource_len(pdev, 2);
+		phba->sli4_hba.ctrl_regs_memmap_p =
 				ioremap(phba->pci_bar1_map, bar1map_len);
-	if (!phba->sli4_hba.ctrl_regs_memmap_p) {
-		dev_printk(KERN_ERR, &pdev->dev,
+		if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
 			   "ioremap failed for SLI4 HBA control registers.\n");
-		goto out_iounmap_conf;
+			goto out_iounmap_conf;
+		}
+		lpfc_sli4_bar1_register_memmap(phba);
 	}
 
-	/* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
-	phba->sli4_hba.drbl_regs_memmap_p =
+	if (pci_resource_start(pdev, 4)) {
+		/*
+		 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
+		 * virtual address and setup the registers.
+		 */
+		phba->pci_bar2_map = pci_resource_start(pdev, 4);
+		bar2map_len = pci_resource_len(pdev, 4);
+		phba->sli4_hba.drbl_regs_memmap_p =
 				ioremap(phba->pci_bar2_map, bar2map_len);
-	if (!phba->sli4_hba.drbl_regs_memmap_p) {
-		dev_printk(KERN_ERR, &pdev->dev,
+		if (!phba->sli4_hba.drbl_regs_memmap_p) {
+			dev_printk(KERN_ERR, &pdev->dev,
 			   "ioremap failed for SLI4 HBA doorbell registers.\n");
-		goto out_iounmap_ctrl;
+			goto out_iounmap_ctrl;
+		}
+		error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+		if (error)
+			goto out_iounmap_all;
 	}
 
-	/* Set up BAR0 PCI config space register memory map */
-	lpfc_sli4_bar0_register_memmap(phba);
-
-	/* Set up BAR1 register memory map */
-	lpfc_sli4_bar1_register_memmap(phba);
-
-	/* Set up BAR2 register memory map */
-	error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
-	if (error)
-		goto out_iounmap_all;
-
 	return 0;
 
 out_iounmap_all:
@@ -7661,7 +8071,7 @@
 	 * the HBA.
 	 */
 
-	/* HBA interrupt will be diabled after this call */
+	/* HBA interrupt will be disabled after this call */
 	lpfc_sli_hba_down(phba);
 	/* Stop kthread signal shall trigger work_done one more time */
 	kthread_stop(phba->worker_thread);
@@ -8149,6 +8559,8 @@
 		goto out_unset_driver_resource_s4;
 	}
 
+	INIT_LIST_HEAD(&phba->active_rrq_list);
+
 	/* Set up common device driver resources */
 	error = lpfc_setup_driver_resource_phase2(phba);
 	if (error) {
@@ -8218,7 +8630,11 @@
 				"0451 Configure interrupt mode (%d) "
 				"failed active interrupt test.\n",
 				intr_mode);
-		/* Unset the preivous SLI-4 HBA setup */
+		/* Unset the previous SLI-4 HBA setup. */
+		/*
+		 * TODO:  Is this operation compatible with IF TYPE 2
+		 * devices?  All port state is deleted and cleared.
+		 */
 		lpfc_sli4_unset_hba(phba);
 		/* Try next level of interrupt mode */
 		cfg_mode = --intr_mode;
@@ -8990,6 +9406,10 @@
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
 		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
+		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
+		PCI_ANY_ID, PCI_ANY_ID, },
 	{ 0 }
 };
 
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index bb59e92..e3b790e 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -33,7 +33,7 @@
 #define LOG_FCP_ERROR	0x00001000	/* log errors, not underruns */
 #define LOG_LIBDFC	0x00002000	/* Libdfc events */
 #define LOG_VPORT	0x00004000	/* NPIV events */
-#define LOF_SECURITY	0x00008000	/* Security events */
+#define LOG_SECURITY	0x00008000	/* Security events */
 #define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
 #define LOG_FIP		0x00020000	/* FIP events */
 #define LOG_ALL_MSG	0xffffffff	/* LOG all messages */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 62d0957..23403c6 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -263,18 +263,19 @@
 }
 
 /**
- * lpfc_read_la - Prepare a mailbox command for reading HBA link attention
+ * lpfc_read_topology - Prepare a mailbox command for reading HBA topology
  * @phba: pointer to lpfc hba data structure.
  * @pmb: pointer to the driver internal queue element for mailbox command.
  * @mp: DMA buffer memory for reading the link attention information into.
  *
- * The read link attention mailbox command is issued to read the Link Event
- * Attention information indicated by the HBA port when the Link Event bit
- * of the Host Attention (HSTATT) register is set to 1. A Link Event
+ * The read topology mailbox command is issued to read the link topology
+ * information indicated by the HBA port when the Link Event bit of the Host
+ * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link
+ * Attention ACQE is received from the port (For SLI-4). A Link Event
  * Attention occurs based on an exception detected at the Fibre Channel link
  * interface.
  *
- * This routine prepares the mailbox command for reading HBA link attention
+ * This routine prepares the mailbox command for reading HBA link topology
  * information. A DMA memory has been set aside and address passed to the
  * HBA through @mp for the HBA to DMA link attention information into the
  * memory as part of the execution of the mailbox command.
@@ -283,7 +284,8 @@
  *    0 - Success (currently always return 0)
  **/
 int
-lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
+lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+		   struct lpfc_dmabuf *mp)
 {
 	MAILBOX_t *mb;
 	struct lpfc_sli *psli;
@@ -293,15 +295,15 @@
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 	INIT_LIST_HEAD(&mp->list);
-	mb->mbxCommand = MBX_READ_LA64;
-	mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
-	mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
-	mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys);
+	mb->mbxCommand = MBX_READ_TOPOLOGY;
+	mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
+	mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
+	mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
 
 	/* Save address for later completion and set the owner to host so that
 	 * the FW knows this mailbox is available for processing.
 	 */
-	pmb->context1 = (uint8_t *) mp;
+	pmb->context1 = (uint8_t *)mp;
 	mb->mbxOwner = OWN_HOST;
 	return (0);
 }
@@ -516,18 +518,33 @@
 	vpd = &phba->vpd;
 	if (vpd->rev.feaLevelHigh >= 0x02){
 		switch(linkspeed){
-			case LINK_SPEED_1G:
-			case LINK_SPEED_2G:
-			case LINK_SPEED_4G:
-			case LINK_SPEED_8G:
-				mb->un.varInitLnk.link_flags |=
-							FLAGS_LINK_SPEED;
-				mb->un.varInitLnk.link_speed = linkspeed;
+		case LPFC_USER_LINK_SPEED_1G:
+			mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
 			break;
-			case LINK_SPEED_AUTO:
-			default:
-				mb->un.varInitLnk.link_speed =
-							LINK_SPEED_AUTO;
+		case LPFC_USER_LINK_SPEED_2G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
+			break;
+		case LPFC_USER_LINK_SPEED_4G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
+			break;
+		case LPFC_USER_LINK_SPEED_8G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
+			break;
+		case LPFC_USER_LINK_SPEED_10G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
+			break;
+		case LPFC_USER_LINK_SPEED_16G:
+			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
+			break;
+		case LPFC_USER_LINK_SPEED_AUTO:
+		default:
+			mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
 			break;
 		}
 
@@ -693,7 +710,7 @@
  * @did: remote port identifier.
  * @param: pointer to memory holding the server parameters.
  * @pmb: pointer to the driver internal queue element for mailbox command.
- * @flag: action flag to be passed back for the complete function.
+ * @rpi: the rpi to use in the registration (usually only used for SLI4.
  *
  * The registration login mailbox command is used to register an N_Port or
  * F_Port login. This registration allows the HBA to cache the remote N_Port
@@ -712,7 +729,7 @@
  **/
 int
 lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
-	       uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
+	     uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
 {
 	MAILBOX_t *mb = &pmb->u.mb;
 	uint8_t *sparam;
@@ -722,17 +739,13 @@
 
 	mb->un.varRegLogin.rpi = 0;
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
+		mb->un.varRegLogin.rpi = rpi;
 		if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
 			return 1;
 	}
-
 	mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
 	mb->un.varRegLogin.did = did;
-	mb->un.varWords[30] = flag;	/* Set flag to issue action on cmpl */
-
 	mb->mbxOwner = OWN_HOST;
-
 	/* Get a buffer to hold NPorts Service Parameters */
 	mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
 	if (mp)
@@ -743,7 +756,7 @@
 		/* REG_LOGIN: no buffers */
 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
 				"0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
-				"flag x%x\n", vpi, did, flag);
+				"rpi x%x\n", vpi, did, rpi);
 		return (1);
 	}
 	INIT_LIST_HEAD(&mp->list);
@@ -1918,11 +1931,14 @@
 	struct lpfc_mbx_init_vfi *init_vfi;
 
 	memset(mbox, 0, sizeof(*mbox));
+	mbox->vport = vport;
 	init_vfi = &mbox->u.mqe.un.init_vfi;
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
 	bf_set(lpfc_init_vfi_vr, init_vfi, 1);
 	bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+	bf_set(lpfc_init_vfi_vp, init_vfi, 1);
 	bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
+	bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
 	bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 8f879e4..cbb48ee 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -113,11 +113,16 @@
 		goto fail_free_mbox_pool;
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
+		phba->rrq_pool =
+			mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
+						sizeof(struct lpfc_node_rrq));
+		if (!phba->rrq_pool)
+			goto fail_free_nlp_mem_pool;
 		phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
 					      phba->pcidev,
 					      LPFC_HDR_BUF_SIZE, align, 0);
 		if (!phba->lpfc_hrb_pool)
-			goto fail_free_nlp_mem_pool;
+			goto fail_free_rrq_mem_pool;
 
 		phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
 					      phba->pcidev,
@@ -147,6 +152,9 @@
  fail_free_hrb_pool:
 	pci_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
+ fail_free_rrq_mem_pool:
+	mempool_destroy(phba->rrq_pool);
+	phba->rrq_pool = NULL;
  fail_free_nlp_mem_pool:
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bccc9c6..d85a742 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -386,7 +386,7 @@
 		goto out;
 
 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
-			    (uint8_t *) sp, mbox, 0);
+			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
 	if (rc) {
 		mempool_free(mbox, phba->mbox_mem_pool);
 		goto out;
@@ -632,7 +632,7 @@
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-	if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
+	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
 		return 0;
 	}
@@ -968,7 +968,7 @@
 	lpfc_unreg_rpi(vport, ndlp);
 
 	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
-			   (uint8_t *) sp, mbox, 0) == 0) {
+			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
 		switch (ndlp->nlp_DID) {
 		case NameServer_DID:
 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -1338,12 +1338,6 @@
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
-			if (phba->sli_rev == LPFC_SLI_REV4) {
-				spin_unlock_irq(&phba->hbalock);
-				lpfc_sli4_free_rpi(phba,
-					mb->u.mb.un.varRegLogin.rpi);
-				spin_lock_irq(&phba->hbalock);
-			}
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1426,7 +1420,7 @@
 	}
 
 	ndlp->nlp_rpi = mb->un.varWords[0];
-	ndlp->nlp_flag |= NLP_RPI_VALID;
+	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 
 	/* Only if we are not a fabric nport do we issue PRLI */
 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -2027,7 +2021,7 @@
 
 	if (!mb->mbxStatus) {
 		ndlp->nlp_rpi = mb->un.varWords[0];
-		ndlp->nlp_flag |= NLP_RPI_VALID;
+		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
 	} else {
 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
 			lpfc_drop_node(vport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 581837b..c97751c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -621,10 +621,13 @@
 			  struct sli4_wcqe_xri_aborted *axri)
 {
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
 	struct lpfc_scsi_buf *psb, *next_psb;
 	unsigned long iflag = 0;
 	struct lpfc_iocbq *iocbq;
 	int i;
+	struct lpfc_nodelist *ndlp;
+	int rrq_empty = 0;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 
 	spin_lock_irqsave(&phba->hbalock, iflag);
@@ -637,8 +640,14 @@
 			psb->status = IOSTAT_SUCCESS;
 			spin_unlock(
 				&phba->sli4_hba.abts_scsi_buf_list_lock);
+			ndlp = psb->rdata->pnode;
+			rrq_empty = list_empty(&phba->active_rrq_list);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
+			if (ndlp)
+				lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
 			lpfc_release_scsi_buf_s4(phba, psb);
+			if (rrq_empty)
+				lpfc_worker_wake_up(phba);
 			return;
 		}
 	}
@@ -914,7 +923,7 @@
 }
 
 /**
- * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
  * @phba: The HBA for which this call is being executed.
  *
  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
@@ -925,7 +934,7 @@
  *   Pointer to lpfc_scsi_buf - Success
  **/
 static struct lpfc_scsi_buf*
-lpfc_get_scsi_buf(struct lpfc_hba * phba)
+lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
 	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
@@ -941,6 +950,67 @@
 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
 	return  lpfc_cmd;
 }
+/**
+ * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	struct  lpfc_scsi_buf *lpfc_cmd = NULL;
+	struct  lpfc_scsi_buf *start_lpfc_cmd = NULL;
+	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+	unsigned long iflag = 0;
+	int found = 0;
+
+	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
+	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+	while (!found && lpfc_cmd) {
+		if (lpfc_test_rrq_active(phba, ndlp,
+					 lpfc_cmd->cur_iocbq.sli4_xritag)) {
+			lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
+			spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+			list_remove_head(scsi_buf_list, lpfc_cmd,
+					 struct lpfc_scsi_buf, list);
+			spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+						 iflag);
+			if (lpfc_cmd == start_lpfc_cmd) {
+				lpfc_cmd = NULL;
+				break;
+			} else
+				continue;
+		}
+		found = 1;
+		lpfc_cmd->seg_cnt = 0;
+		lpfc_cmd->nonsg_phys = 0;
+		lpfc_cmd->prot_seg_cnt = 0;
+	}
+	return  lpfc_cmd;
+}
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ *   NULL - Error
+ *   Pointer to lpfc_scsi_buf - Success
+ **/
+static struct lpfc_scsi_buf*
+lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+	return  phba->lpfc_get_scsi_buf(phba, ndlp);
+}
 
 /**
  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
@@ -2744,18 +2814,19 @@
 
 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
-	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
 
 	switch (dev_grp) {
 	case LPFC_PCI_DEV_LP:
 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
 		break;
 	case LPFC_PCI_DEV_OC:
 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
 		break;
 	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2764,7 +2835,6 @@
 		return -ENODEV;
 		break;
 	}
-	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
 	return 0;
@@ -2940,7 +3010,7 @@
 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
 		goto out_host_busy;
 
-	lpfc_cmd = lpfc_get_scsi_buf(phba);
+	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
 	if (lpfc_cmd == NULL) {
 		lpfc_rampdown_queue_depth(phba);
 
@@ -3239,7 +3309,7 @@
 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
 		return FAILED;
 
-	lpfc_cmd = lpfc_get_scsi_buf(phba);
+	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
 	if (lpfc_cmd == NULL)
 		return FAILED;
 	lpfc_cmd->timeout = 60;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 554efa6..a359d2b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -513,8 +513,344 @@
 }
 
 /**
+ * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: nodelist pointer for this target.
+ * @xritag: xri used in this exchange.
+ * @rxid: Remote Exchange ID.
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
+ *
+ * This function is called with hbalock held.
+ * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
+ * rrq struct and adds it to the active_rrq_list.
+ *
+ * returns  0 for rrq slot for this xri
+ *         < 0  Were not able to get rrq mem or invalid parameter.
+ **/
+static int
+__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+		uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+{
+	uint16_t adj_xri;
+	struct lpfc_node_rrq *rrq;
+	int empty;
+
+	/*
+	 * set the active bit even if there is no mem available.
+	 */
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (!ndlp)
+		return -EINVAL;
+	if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+		return -EINVAL;
+	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
+	if (rrq) {
+		rrq->send_rrq = send_rrq;
+		rrq->xritag = xritag;
+		rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+		rrq->ndlp = ndlp;
+		rrq->nlp_DID = ndlp->nlp_DID;
+		rrq->vport = ndlp->vport;
+		rrq->rxid = rxid;
+		empty = list_empty(&phba->active_rrq_list);
+		if (phba->cfg_enable_rrq && send_rrq)
+			/*
+			 * We need the xri before we can add this to the
+			 * phba active rrq list.
+			 */
+			rrq->send_rrq = send_rrq;
+		else
+			rrq->send_rrq = 0;
+		list_add_tail(&rrq->list, &phba->active_rrq_list);
+		if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
+			phba->hba_flag |= HBA_RRQ_ACTIVE;
+			if (empty)
+				lpfc_worker_wake_up(phba);
+		}
+		return 0;
+	}
+	return -ENOMEM;
+}
+
+/**
+ * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @xritag: xri used in this exchange.
+ * @rrq: The RRQ to be cleared.
+ *
+ * This function is called with hbalock held. This function
+ **/
+static void
+__lpfc_clr_rrq_active(struct lpfc_hba *phba,
+			uint16_t xritag,
+			struct lpfc_node_rrq *rrq)
+{
+	uint16_t adj_xri;
+	struct lpfc_nodelist *ndlp;
+
+	ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+
+	/* The target DID could have been swapped (cable swap)
+	 * we should use the ndlp from the findnode if it is
+	 * available.
+	 */
+	if (!ndlp)
+		ndlp = rrq->ndlp;
+
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
+		rrq->send_rrq = 0;
+		rrq->xritag = 0;
+		rrq->rrq_stop_time = 0;
+	}
+	mempool_free(rrq, phba->rrq_pool);
+}
+
+/**
+ * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Checks if stop_time (ratov from setting rrq active) has
+ * been reached, if it has and the send_rrq flag is set then
+ * it will call lpfc_send_rrq. If the send_rrq flag is not set
+ * then it will just call the routine to clear the rrq and
+ * free the rrq resource.
+ * The timer is set to the next rrq that is going to expire before
+ * leaving the routine.
+ *
+ **/
+void
+lpfc_handle_rrq_active(struct lpfc_hba *phba)
+{
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long next_time;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	next_time = jiffies + HZ * (phba->fc_ratov + 1);
+	list_for_each_entry_safe(rrq, nextrrq,
+			&phba->active_rrq_list, list) {
+		if (time_after(jiffies, rrq->rrq_stop_time)) {
+			list_del(&rrq->list);
+			if (!rrq->send_rrq)
+				/* this call will free the rrq */
+				__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+			else {
+			/* if we send the rrq then the completion handler
+			 *  will clear the bit in the xribitmap.
+			 */
+				spin_unlock_irqrestore(&phba->hbalock, iflags);
+				if (lpfc_send_rrq(phba, rrq)) {
+					lpfc_clr_rrq_active(phba, rrq->xritag,
+								 rrq);
+				}
+				spin_lock_irqsave(&phba->hbalock, iflags);
+			}
+		} else if  (time_before(rrq->rrq_stop_time, next_time))
+			next_time = rrq->rrq_stop_time;
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (!list_empty(&phba->active_rrq_list))
+		mod_timer(&phba->rrq_tmr, next_time);
+}
+
+/**
+ * lpfc_get_active_rrq - Get the active RRQ for this exchange.
+ * @vport: Pointer to vport context object.
+ * @xri: The xri used in the exchange.
+ * @did: The targets DID for this exchange.
+ *
+ * returns NULL = rrq not found in the phba->active_rrq_list.
+ *         rrq = rrq for this xri and target.
+ **/
+struct lpfc_node_rrq *
+lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return NULL;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		if (rrq->vport == vport && rrq->xritag == xri &&
+				rrq->nlp_DID == did){
+			list_del(&rrq->list);
+			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			return rrq;
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return NULL;
+}
+
+/**
+ * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
+ * @vport: Pointer to vport context object.
+ *
+ * Remove all active RRQs for this vport from the phba->active_rrq_list and
+ * clear the rrq.
+ **/
+void
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
+
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		if (rrq->vport == vport) {
+			list_del(&rrq->list);
+			__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+		}
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
+ * @phba: Pointer to HBA context object.
+ *
+ * Remove all rrqs from the phba->active_rrq_list and free them by
+ * calling __lpfc_clr_active_rrq
+ *
+ **/
+void
+lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
+{
+	struct lpfc_node_rrq *rrq;
+	struct lpfc_node_rrq *nextrrq;
+	unsigned long next_time;
+	unsigned long iflags;
+
+	if (phba->sli_rev != LPFC_SLI_REV4)
+		return;
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+	next_time = jiffies + HZ * (phba->fc_ratov * 2);
+	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+		list_del(&rrq->list);
+		__lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	if (!list_empty(&phba->active_rrq_list))
+		mod_timer(&phba->rrq_tmr, next_time);
+}
+
+
+/**
+ * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: Targets nodelist pointer for this exchange.
+ * @xritag the xri in the bitmap to test.
+ *
+ * This function is called with hbalock held. This function
+ * returns 0 = rrq not active for this xri
+ *         1 = rrq is valid for this xri.
+ **/
+static int
+__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t  xritag)
+{
+	uint16_t adj_xri;
+
+	adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+	if (!ndlp)
+		return 0;
+	if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
+			return 1;
+	else
+		return 0;
+}
+
+/**
+ * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: nodelist pointer for this target.
+ * @xritag: xri used in this exchange.
+ * @rxid: Remote Exchange ID.
+ * @send_rrq: Flag used to determine if we should send rrq els cmd.
+ *
+ * This function takes the hbalock.
+ * The active bit is always set in the active rrq xri_bitmap even
+ * if there is no slot avaiable for the other rrq information.
+ *
+ * returns 0 rrq actived for this xri
+ *         < 0 No memory or invalid ndlp.
+ **/
+int
+lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
+{
+	int ret;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+}
+
+/**
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @xritag: xri used in this exchange.
+ * @rrq: The RRQ to be cleared.
+ *
+ * This function is takes the hbalock.
+ **/
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+			uint16_t xritag,
+			struct lpfc_node_rrq *rrq)
+{
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	__lpfc_clr_rrq_active(phba, xritag, rrq);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return;
+}
+
+
+
+/**
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * @phba: Pointer to HBA context object.
+ * @ndlp: Targets nodelist pointer for this exchange.
+ * @xritag the xri in the bitmap to test.
+ *
+ * This function takes the hbalock.
+ * returns 0 = rrq not active for this xri
+ *         1 = rrq is valid for this xri.
+ **/
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+			uint16_t  xritag)
+{
+	int ret;
+	unsigned long iflags;
+
+	spin_lock_irqsave(&phba->hbalock, iflags);
+	ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
+	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	return ret;
+}
+
+/**
  * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
  * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
  *
  * This function is called with hbalock held. This function
  * Gets a new driver sglq object from the sglq list. If the
@@ -522,17 +858,51 @@
  * allocated sglq object else it returns NULL.
  **/
 static struct lpfc_sglq *
-__lpfc_sli_get_sglq(struct lpfc_hba *phba)
+__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
 {
 	struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
 	struct lpfc_sglq *sglq = NULL;
+	struct lpfc_sglq *start_sglq = NULL;
 	uint16_t adj_xri;
+	struct lpfc_scsi_buf *lpfc_cmd;
+	struct lpfc_nodelist *ndlp;
+	int found = 0;
+
+	if (piocbq->iocb_flag &  LPFC_IO_FCP) {
+		lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
+		ndlp = lpfc_cmd->rdata->pnode;
+	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
+			!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
+		ndlp = piocbq->context_un.ndlp;
+	else
+		ndlp = piocbq->context1;
+
 	list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
-	if (!sglq)
-		return NULL;
-	adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
-	phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
-	sglq->state = SGL_ALLOCATED;
+	start_sglq = sglq;
+	while (!found) {
+		if (!sglq)
+			return NULL;
+		adj_xri = sglq->sli4_xritag -
+				phba->sli4_hba.max_cfg_param.xri_base;
+		if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+			/* This xri has an rrq outstanding for this DID.
+			 * put it back in the list and get another xri.
+			 */
+			list_add_tail(&sglq->list, lpfc_sgl_list);
+			sglq = NULL;
+			list_remove_head(lpfc_sgl_list, sglq,
+						struct lpfc_sglq, list);
+			if (sglq == start_sglq) {
+				sglq = NULL;
+				break;
+			} else
+				continue;
+		}
+		sglq->ndlp = ndlp;
+		found = 1;
+		phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+		sglq->state = SGL_ALLOCATED;
+	}
 	return sglq;
 }
 
@@ -598,6 +968,7 @@
 				&phba->sli4_hba.abts_sgl_list_lock, iflag);
 		} else {
 			sglq->state = SGL_FREED;
+			sglq->ndlp = NULL;
 			list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
 
 			/* Check if TXQ queue needs to be serviced */
@@ -1634,7 +2005,6 @@
 	case MBX_READ_LNK_STAT:
 	case MBX_REG_LOGIN:
 	case MBX_UNREG_LOGIN:
-	case MBX_READ_LA:
 	case MBX_CLEAR_LA:
 	case MBX_DUMP_MEMORY:
 	case MBX_DUMP_CONTEXT:
@@ -1656,7 +2026,7 @@
 	case MBX_READ_SPARM64:
 	case MBX_READ_RPI64:
 	case MBX_REG_LOGIN64:
-	case MBX_READ_LA64:
+	case MBX_READ_TOPOLOGY:
 	case MBX_WRITE_WWN:
 	case MBX_SET_DEBUG:
 	case MBX_LOAD_EXP_ROM:
@@ -1746,11 +2116,6 @@
 		kfree(mp);
 	}
 
-	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
-	    (phba->sli_rev == LPFC_SLI_REV4) &&
-	    (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
-		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
-
 	/*
 	 * If a REG_LOGIN succeeded  after node is destroyed or node
 	 * is in re-discovery driver need to cleanup the RPI.
@@ -3483,12 +3848,6 @@
 	phba->pport->fc_myDID = 0;
 	phba->pport->fc_prevDID = 0;
 
-	/* Turn off parity checking and serr during the physical reset */
-	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
-	pci_write_config_word(phba->pcidev, PCI_COMMAND,
-			      (cfg_value &
-			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
-
 	spin_lock_irq(&phba->hbalock);
 	psli->sli_flag &= ~(LPFC_PROCESS_LA);
 	phba->fcf.fcf_flag = 0;
@@ -3508,9 +3867,18 @@
 	/* Now physically reset the device */
 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 			"0389 Performing PCI function reset!\n");
+
+	/* Turn off parity checking and serr during the physical reset */
+	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
+			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
 	/* Perform FCoE PCI function reset */
 	lpfc_pci_function_reset(phba);
 
+	/* Restore PCI cmd register */
+	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
+
 	return 0;
 }
 
@@ -4317,6 +4685,10 @@
 	struct lpfc_vport *vport = phba->pport;
 	struct lpfc_dmabuf *mp;
 
+	/*
+	 * TODO:  Why does this routine execute these task in a different
+	 * order from probe?
+	 */
 	/* Perform a PCI function reset to start from clean */
 	rc = lpfc_pci_function_reset(phba);
 	if (unlikely(rc))
@@ -4357,13 +4729,16 @@
 	}
 
 	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
-	if (unlikely(rc))
-		goto out_free_vpd;
-
+	if (unlikely(rc)) {
+		kfree(vpd);
+		goto out_free_mbox;
+	}
 	mqe = &mboxq->u.mqe;
 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
-		phba->hba_flag |= HBA_FCOE_SUPPORT;
+		phba->hba_flag |= HBA_FCOE_MODE;
+	else
+		phba->hba_flag &= ~HBA_FCOE_MODE;
 
 	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
 		LPFC_DCBX_CEE_MODE)
@@ -4372,13 +4747,14 @@
 		phba->hba_flag &= ~HBA_FIP_SUPPORT;
 
 	if (phba->sli_rev != LPFC_SLI_REV4 ||
-	    !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
+	    !(phba->hba_flag & HBA_FCOE_MODE)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
 			"0376 READ_REV Error. SLI Level %d "
 			"FCoE enabled %d\n",
-			phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
+			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
 		rc = -EIO;
-		goto out_free_vpd;
+		kfree(vpd);
+		goto out_free_mbox;
 	}
 	/*
 	 * Evaluate the read rev and vpd data. Populate the driver
@@ -4392,6 +4768,7 @@
 				"Using defaults.\n", rc);
 		rc = 0;
 	}
+	kfree(vpd);
 
 	/* Save information as VPD data */
 	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
@@ -4428,7 +4805,7 @@
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 	if (unlikely(rc)) {
 		rc = -EIO;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/*
@@ -4476,7 +4853,7 @@
 	if (rc) {
 		phba->link_state = LPFC_HBA_ERROR;
 		rc = -ENOMEM;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	mboxq->vport = vport;
@@ -4501,7 +4878,7 @@
 				rc, bf_get(lpfc_mqe_status, mqe));
 		phba->link_state = LPFC_HBA_ERROR;
 		rc = -EIO;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	if (phba->cfg_soft_wwnn)
@@ -4526,7 +4903,7 @@
 				"0582 Error %d during sgl post operation\n",
 					rc);
 		rc = -ENODEV;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/* Register SCSI SGL pool to the device */
@@ -4538,7 +4915,7 @@
 		/* Some Scsi buffers were moved to the abort scsi list */
 		/* A pci function reset will repost them */
 		rc = -ENODEV;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/* Post the rpi header region to the device. */
@@ -4548,7 +4925,7 @@
 				"0393 Error %d during rpi post operation\n",
 				rc);
 		rc = -ENODEV;
-		goto out_free_vpd;
+		goto out_free_mbox;
 	}
 
 	/* Set up all the queues to the device */
@@ -4608,33 +4985,33 @@
 		}
 	}
 
+	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+		/*
+		 * The FC Port needs to register FCFI (index 0)
+		 */
+		lpfc_reg_fcfi(phba, mboxq);
+		mboxq->vport = phba->pport;
+		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+		if (rc == MBX_SUCCESS)
+			rc = 0;
+		else
+			goto out_unset_queue;
+	}
 	/*
 	 * The port is ready, set the host's link state to LINK_DOWN
 	 * in preparation for link interrupts.
 	 */
-	lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
-	mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-	lpfc_set_loopback_flag(phba);
-	/* Change driver state to LPFC_LINK_DOWN right before init link */
 	spin_lock_irq(&phba->hbalock);
 	phba->link_state = LPFC_LINK_DOWN;
 	spin_unlock_irq(&phba->hbalock);
-	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
-	if (unlikely(rc != MBX_NOT_FINISHED)) {
-		kfree(vpd);
-		return 0;
-	} else
-		rc = -EIO;
-
+	rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+out_unset_queue:
 	/* Unset all the queues set up in this routine when error out */
 	if (rc)
 		lpfc_sli4_queue_unset(phba);
-
 out_stop_timers:
 	if (rc)
 		lpfc_stop_hba_timers(phba);
-out_free_vpd:
-	kfree(vpd);
 out_free_mbox:
 	mempool_free(mboxq, phba->mbox_mem_pool);
 	return rc;
@@ -5863,6 +6240,8 @@
 	IOCB_t *icmd;
 	int numBdes = 0;
 	int i = 0;
+	uint32_t offset = 0; /* accumulated offset in the sg request list */
+	int inbound = 0; /* number of sg reply entries inbound from firmware */
 
 	if (!piocbq || !sglq)
 		return xritag;
@@ -5897,6 +6276,20 @@
 			 */
 			bde.tus.w = le32_to_cpu(bpl->tus.w);
 			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
+			/* The offsets in the sgl need to be accumulated
+			 * separately for the request and reply lists.
+			 * The request is always first, the reply follows.
+			 */
+			if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
+				/* add up the reply sg entries */
+				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
+					inbound++;
+				/* first inbound? reset the offset */
+				if (inbound == 1)
+					offset = 0;
+				bf_set(lpfc_sli4_sge_offset, sgl, offset);
+				offset += bde.tus.f.bdeSize;
+			}
 			bpl++;
 			sgl++;
 		}
@@ -6028,11 +6421,6 @@
 		bf_set(els_req64_vf, &wqe->els_req, 0);
 		/* And a VFID for word 12 */
 		bf_set(els_req64_vfid, &wqe->els_req, 0);
-		/*
-		 * Set ct field to 3, indicates that the context_tag field
-		 * contains the FCFI and remote N_Port_ID is
-		 * in word 5.
-		 */
 		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
 		bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
 		       iocbq->iocb.ulpContext);
@@ -6140,6 +6528,18 @@
 		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
 	break;
 	case CMD_GEN_REQUEST64_CR:
+		/* For this command calculate the xmit length of the
+		 * request bde.
+		 */
+		xmit_len = 0;
+		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
+			sizeof(struct ulp_bde64);
+		for (i = 0; i < numBdes; i++) {
+			if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+				break;
+			bde.tus.w = le32_to_cpu(bpl[i].tus.w);
+			xmit_len += bde.tus.f.bdeSize;
+		}
 		/* word3 iocb=IO_TAG wqe=request_payload_len */
 		wqe->gen_req.request_payload_len = xmit_len;
 		/* word4 iocb=parameter wqe=relative_offset memcpy */
@@ -6320,7 +6720,7 @@
 					return IOCB_BUSY;
 				}
 			} else {
-			sglq = __lpfc_sli_get_sglq(phba);
+			sglq = __lpfc_sli_get_sglq(phba, piocb);
 				if (!sglq) {
 					if (!(flag & SLI_IOCB_RET_IOCB)) {
 						__lpfc_sli_ringtx_put(phba,
@@ -8033,29 +8433,66 @@
 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
 {
 	uint32_t uerr_sta_hi, uerr_sta_lo;
+	uint32_t if_type, portsmphr;
+	struct lpfc_register portstat_reg;
 
-	/* For now, use the SLI4 device internal unrecoverable error
+	/*
+	 * For now, use the SLI4 device internal unrecoverable error
 	 * registers for error attention. This can be changed later.
 	 */
-	uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
-	uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
-	if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
-	    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	switch (if_type) {
+	case LPFC_SLI_INTF_IF_TYPE_0:
+		uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
+		uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
+		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
+		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1423 HBA Unrecoverable error: "
+					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+					"ue_mask_lo_reg=0x%x, "
+					"ue_mask_hi_reg=0x%x\n",
+					uerr_sta_lo, uerr_sta_hi,
+					phba->sli4_hba.ue_mask_lo,
+					phba->sli4_hba.ue_mask_hi);
+			phba->work_status[0] = uerr_sta_lo;
+			phba->work_status[1] = uerr_sta_hi;
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_2:
+		portstat_reg.word0 =
+			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
+		portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
+		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
+			phba->work_status[0] =
+				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+			phba->work_status[1] =
+				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2885 Port Error Detected: "
+					"port status reg 0x%x, "
+					"port smphr reg 0x%x, "
+					"error 1=0x%x, error 2=0x%x\n",
+					portstat_reg.word0,
+					portsmphr,
+					phba->work_status[0],
+					phba->work_status[1]);
+			phba->work_ha |= HA_ERATT;
+			phba->hba_flag |= HBA_ERATT_HANDLED;
+			return 1;
+		}
+		break;
+	case LPFC_SLI_INTF_IF_TYPE_1:
+	default:
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1423 HBA Unrecoverable error: "
-				"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
-				"ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
-				uerr_sta_lo, uerr_sta_hi,
-				phba->sli4_hba.ue_mask_lo,
-				phba->sli4_hba.ue_mask_hi);
-		phba->work_status[0] = uerr_sta_lo;
-		phba->work_status[1] = uerr_sta_hi;
-		/* Set the driver HA work bitmap */
-		phba->work_ha |= HA_ERATT;
-		/* Indicate polling handles this ERATT */
-		phba->hba_flag |= HBA_ERATT_HANDLED;
+				"2886 HBA Error Attention on unsupported "
+				"if type %d.", if_type);
 		return 1;
 	}
+
 	return 0;
 }
 
@@ -8110,7 +8547,7 @@
 		ha_copy = lpfc_sli_eratt_read(phba);
 		break;
 	case LPFC_SLI_REV4:
-		/* Read devcie Uncoverable Error (UERR) registers */
+		/* Read device Uncoverable Error (UERR) registers */
 		ha_copy = lpfc_sli4_eratt_read(phba);
 		break;
 	default:
@@ -9735,7 +10172,7 @@
  * lpfc_sli4_queue_free - free a queue structure and associated memory
  * @queue: The queue structure to free.
  *
- * This function frees a queue structure and the DMAable memeory used for
+ * This function frees a queue structure and the DMAable memory used for
  * the host resident queue. This function must be called after destroying the
  * queue on the HBA.
  **/
@@ -10155,16 +10592,20 @@
 			 length, LPFC_SLI4_MBX_EMBED);
 
 	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
-	bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
-		    mq->page_count);
-	bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
-	       1);
-	bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
+	bf_set(lpfc_mbx_mq_create_ext_num_pages,
+	       &mq_create_ext->u.request, mq->page_count);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
 	       &mq_create_ext->u.request, 1);
 	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
 	       &mq_create_ext->u.request, 1);
-	bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
-	       cq->queue_id);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
+	       &mq_create_ext->u.request, 1);
+	bf_set(lpfc_mq_context_cq_id,
+	       &mq_create_ext->u.request.context, cq->queue_id);
 	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
 	switch (mq->entry_count) {
 	default:
@@ -11137,7 +11578,8 @@
 static int
 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
 {
-	char *rctl_names[] = FC_RCTL_NAMES_INIT;
+	/*  make rctl_names static to save stack space */
+	static char *rctl_names[] = FC_RCTL_NAMES_INIT;
 	char *type_names[] = FC_TYPE_NAMES_INIT;
 	struct fc_vft_header *fc_vft_hdr;
 
@@ -11538,6 +11980,10 @@
 				"SID:x%x\n", oxid, sid);
 		return;
 	}
+	if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
+		&& rxid <= (phba->sli4_hba.max_cfg_param.max_xri
+		+ phba->sli4_hba.max_cfg_param.xri_base))
+		lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
 
 	/* Allocate buffer for acc iocb */
 	ctiocb = lpfc_sli_get_iocbq(phba);
@@ -11560,6 +12006,7 @@
 	icmd->ulpLe = 1;
 	icmd->ulpClass = CLASS3;
 	icmd->ulpContext = ndlp->nlp_rpi;
+	ctiocb->context1 = ndlp;
 
 	ctiocb->iocb_cmpl = NULL;
 	ctiocb->vport = phba->pport;
@@ -12129,42 +12576,37 @@
 
 /**
  * lpfc_sli4_init_vpi - Initialize a vpi with the port
- * @phba: pointer to lpfc hba data structure.
- * @vpi: vpi value to activate with the port.
+ * @vport: Pointer to the vport for which the vpi is being initialized
  *
- * This routine is invoked to activate a vpi with the
- * port when the host intends to use vports with a
- * nonzero vpi.
+ * This routine is invoked to activate a vpi with the port.
  *
  * Returns:
  *    0 success
  *    -Evalue otherwise
  **/
 int
-lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
+lpfc_sli4_init_vpi(struct lpfc_vport *vport)
 {
 	LPFC_MBOXQ_t *mboxq;
 	int rc = 0;
 	int retval = MBX_SUCCESS;
 	uint32_t mbox_tmo;
-
-	if (vpi == 0)
-		return -EINVAL;
+	struct lpfc_hba *phba = vport->phba;
 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mboxq)
 		return -ENOMEM;
-	lpfc_init_vpi(phba, mboxq, vpi);
+	lpfc_init_vpi(phba, mboxq, vport->vpi);
 	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
 	if (rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
 				"2022 INIT VPI Mailbox failed "
 				"status %d, mbxStatus x%x\n", rc,
 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
 		retval = -EIO;
 	}
 	if (rc != MBX_TIMEOUT)
-		mempool_free(mboxq, phba->mbox_mem_pool);
+		mempool_free(mboxq, vport->phba->mbox_mem_pool);
 
 	return retval;
 }
@@ -12854,6 +13296,7 @@
 	struct lpfc_nodelist *act_mbx_ndlp = NULL;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	LIST_HEAD(mbox_cmd_list);
+	uint8_t restart_loop;
 
 	/* Clean up internally queued mailbox commands with the vport */
 	spin_lock_irq(&phba->hbalock);
@@ -12882,15 +13325,44 @@
 			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
 		}
 	}
+	/* Cleanup any mailbox completions which are not yet processed */
+	do {
+		restart_loop = 0;
+		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+			/*
+			 * If this mailox is already processed or it is
+			 * for another vport ignore it.
+			 */
+			if ((mb->vport != vport) ||
+				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
+				continue;
+
+			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+				(mb->u.mb.mbxCommand != MBX_REG_VPI))
+				continue;
+
+			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+				ndlp = (struct lpfc_nodelist *)mb->context2;
+				/* Unregister the RPI when mailbox complete */
+				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
+				restart_loop = 1;
+				spin_unlock_irq(&phba->hbalock);
+				spin_lock(shost->host_lock);
+				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+				spin_unlock(shost->host_lock);
+				spin_lock_irq(&phba->hbalock);
+				break;
+			}
+		}
+	} while (restart_loop);
+
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Release the cleaned-up mailbox commands */
 	while (!list_empty(&mbox_cmd_list)) {
 		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
-			if (phba->sli_rev == LPFC_SLI_REV4)
-				__lpfc_sli4_free_rpi(phba,
-						mb->u.mb.un.varRegLogin.rpi);
 			mp = (struct lpfc_dmabuf *) (mb->context1);
 			if (mp) {
 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -12948,12 +13420,13 @@
 	while (pring->txq_cnt) {
 		spin_lock_irqsave(&phba->hbalock, iflags);
 
-		sglq = __lpfc_sli_get_sglq(phba);
+		piocbq = lpfc_sli_ringtx_get(phba, pring);
+		sglq = __lpfc_sli_get_sglq(phba, piocbq);
 		if (!sglq) {
+			__lpfc_sli_ringtx_put(phba, pring, piocbq);
 			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			break;
 		} else {
-			piocbq = lpfc_sli_ringtx_get(phba, pring);
 			if (!piocbq) {
 				/* The txq_cnt out of sync. This should
 				 * never happen
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index cd56d6c..453577c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -34,9 +34,11 @@
 	union {
 		struct lpfc_mcqe		mcqe_cmpl;
 		struct lpfc_acqe_link		acqe_link;
-		struct lpfc_acqe_fcoe		acqe_fcoe;
+		struct lpfc_acqe_fip		acqe_fip;
 		struct lpfc_acqe_dcbx		acqe_dcbx;
 		struct lpfc_acqe_grp5		acqe_grp5;
+		struct lpfc_acqe_fc_la		acqe_fc;
+		struct lpfc_acqe_sli		acqe_sli;
 		struct lpfc_rcqe		rcqe_cmpl;
 		struct sli4_wcqe_xri_aborted	wcqe_axri;
 		struct lpfc_wcqe_complete	wcqe_cmpl;
@@ -82,6 +84,7 @@
 		struct lpfc_iocbq    *rsp_iocb;
 		struct lpfcMboxq     *mbox;
 		struct lpfc_nodelist *ndlp;
+		struct lpfc_node_rrq *rrq;
 	} context_un;
 
 	void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c4483fe..c7217d5 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -137,9 +137,11 @@
 	uint8_t speed;
 	uint8_t duplex;
 	uint8_t status;
-	uint8_t physical;
+	uint8_t type;
+	uint8_t number;
 	uint8_t fault;
 	uint16_t logical_speed;
+	uint16_t topology;
 };
 
 struct lpfc_fcf_rec {
@@ -367,23 +369,39 @@
 					     PCI BAR1, control registers */
 	void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
 					     PCI BAR2, doorbell registers */
-	/* BAR0 PCI config space register memory map */
-	void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
-	void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
-	void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
-	void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
-	void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
-	/* BAR1 FCoE function CSR register memory map */
-	void __iomem *STAregaddr;    /* Address to HST_STATE register */
-	void __iomem *ISRregaddr;    /* Address to HST_ISR register */
-	void __iomem *IMRregaddr;    /* Address to HST_IMR register */
-	void __iomem *ISCRregaddr;   /* Address to HST_ISCR register */
-	/* BAR2 VF-0 doorbell register memory map */
-	void __iomem *RQDBregaddr;   /* Address to RQ_DOORBELL register */
-	void __iomem *WQDBregaddr;   /* Address to WQ_DOORBELL register */
-	void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
-	void __iomem *MQDBregaddr;   /* Address to MQ_DOORBELL register */
-	void __iomem *BMBXregaddr;   /* Address to BootStrap MBX register */
+	union {
+		struct {
+			/* IF Type 0, BAR 0 PCI cfg space reg mem map */
+			void __iomem *UERRLOregaddr;
+			void __iomem *UERRHIregaddr;
+			void __iomem *UEMASKLOregaddr;
+			void __iomem *UEMASKHIregaddr;
+		} if_type0;
+		struct {
+			/* IF Type 2, BAR 0 PCI cfg space reg mem map. */
+			void __iomem *STATUSregaddr;
+			void __iomem *CTRLregaddr;
+			void __iomem *ERR1regaddr;
+			void __iomem *ERR2regaddr;
+		} if_type2;
+	} u;
+
+	/* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
+	void __iomem *PSMPHRregaddr;
+
+	/* Well-known SLI INTF register memory map. */
+	void __iomem *SLIINTFregaddr;
+
+	/* IF type 0, BAR 1 function CSR register memory map */
+	void __iomem *ISRregaddr;	/* HST_ISR register */
+	void __iomem *IMRregaddr;	/* HST_IMR register */
+	void __iomem *ISCRregaddr;	/* HST_ISCR register */
+	/* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
+	void __iomem *RQDBregaddr;	/* RQ_DOORBELL register */
+	void __iomem *WQDBregaddr;	/* WQ_DOORBELL register */
+	void __iomem *EQCQDBregaddr;	/* EQCQ_DOORBELL register */
+	void __iomem *MQDBregaddr;	/* MQ_DOORBELL register */
+	void __iomem *BMBXregaddr;	/* BootStrap MBX register */
 
 	uint32_t ue_mask_lo;
 	uint32_t ue_mask_hi;
@@ -466,6 +484,7 @@
 	struct list_head clist;
 	enum lpfc_sge_type buff_type; /* is this a scsi sgl */
 	enum lpfc_sgl_state state;
+	struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
 	uint16_t iotag;         /* pre-assigned IO tag */
 	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
 	struct sli4_sge *sgl;	/* pre-assigned SGL */
@@ -532,7 +551,6 @@
 struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
 void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
 int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
-void __lpfc_sli4_free_rpi(struct lpfc_hba *, int);
 void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
 void lpfc_sli4_remove_rpis(struct lpfc_hba *);
 void lpfc_sli4_async_event_proc(struct lpfc_hba *);
@@ -548,7 +566,7 @@
 int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
 void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
 int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
-int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_init_vpi(struct lpfc_vport *);
 uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
 uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
 void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 7a1b5b1..386cf92 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.18"
+#define LPFC_DRIVER_VERSION "8.3.20"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a5281ce..6b8d295 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -395,8 +395,8 @@
 	 * by the port.
 	 */
 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
-		(pport->fc_flag & FC_VFI_REGISTERED)) {
-		rc = lpfc_sli4_init_vpi(phba, vpi);
+	    (pport->fc_flag & FC_VFI_REGISTERED)) {
+		rc = lpfc_sli4_init_vpi(vport);
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
 					"1838 Failed to INIT_VPI on vpi %d "
@@ -418,7 +418,7 @@
 
 	if ((phba->link_state < LPFC_LINK_UP) ||
 	    (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
-	    (phba->fc_topology == TOPOLOGY_LOOP)) {
+	    (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
 		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
 		rc = VPORT_OK;
 		goto out;
@@ -514,7 +514,7 @@
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
 	if ((phba->link_state < LPFC_LINK_UP) ||
-	    (phba->fc_topology == TOPOLOGY_LOOP)) {
+	    (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) {
 		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
 		return VPORT_OK;
 	}
@@ -665,7 +665,7 @@
 	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
 	    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
 	    phba->link_state >= LPFC_LINK_UP &&
-	    phba->fc_topology != TOPOLOGY_LOOP) {
+	    phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
 		if (vport->cfg_enable_da_id) {
 			timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
 			if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index f564474..8534119 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -13,7 +13,7 @@
  */
 
 /*
- * Comand coalescing - This feature allows the driver to be able to combine
+ * Command coalescing - This feature allows the driver to be able to combine
  * two or more commands and issue as one command in order to boost I/O
  * performance. Useful if the nature of the I/O is sequential. It is not very
  * useful for random natured I/Os.
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index f469915..5826ed5 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,3 +1,5 @@
 obj-$(CONFIG_MEGARAID_MM)	+= megaraid_mm.o
 obj-$(CONFIG_MEGARAID_MAILBOX)	+= megaraid_mbox.o
 obj-$(CONFIG_MEGARAID_SAS)	+= megaraid_sas.o
+megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
+	megaraid_sas_fp.o
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index a7008c0..25506c7 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -224,7 +224,7 @@
 {
 	int err;
 
-	/* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */
+	/* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
 	mutex_lock(&mraid_mm_mutex);
 	err = mraid_mm_ioctl(filep, cmd, arg);
 	mutex_unlock(&mraid_mm_mutex);
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
deleted file mode 100644
index 7451bc0..0000000
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ /dev/null
@@ -1,5193 +0,0 @@
-/*
- *
- *		Linux MegaRAID driver for SAS based RAID controllers
- *
- * Copyright (c) 2003-2005  LSI Corporation.
- *
- *	   This program is free software; you can redistribute it and/or
- *	   modify it under the terms of the GNU General Public License
- *	   as published by the Free Software Foundation; either version
- *	   2 of the License, or (at your option) any later version.
- *
- * FILE		: megaraid_sas.c
- * Version     : v00.00.04.31-rc1
- *
- * Authors:
- *	(email-id : megaraidlinux@lsi.com)
- * 	Sreenivas Bagalkote
- * 	Sumant Patro
- *	Bo Yang
- *
- * List of supported controllers
- *
- * OEM	Product Name			VID	DID	SSVID	SSID
- * ---	------------			---	---	----	----
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/list.h>
-#include <linux/moduleparam.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/uio.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-#include <linux/fs.h>
-#include <linux/compat.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
-#include <linux/poll.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include "megaraid_sas.h"
-
-/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
-	"Complete cmds from IO path, (default=0)");
-
-/*
- * Number of sectors per IO command
- * Will be set in megasas_init_mfi if user does not provide
- */
-static unsigned int max_sectors;
-module_param_named(max_sectors, max_sectors, int, 0);
-MODULE_PARM_DESC(max_sectors,
-	"Maximum number of sectors per IO command");
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION(MEGASAS_VERSION);
-MODULE_AUTHOR("megaraidlinux@lsi.com");
-MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
-
-static int megasas_transition_to_ready(struct megasas_instance *instance);
-static int megasas_get_pd_list(struct megasas_instance *instance);
-static int megasas_issue_init_mfi(struct megasas_instance *instance);
-static int megasas_register_aen(struct megasas_instance *instance,
-				u32 seq_num, u32 class_locale_word);
-/*
- * PCI ID table for all supported controllers
- */
-static struct pci_device_id megasas_pci_table[] = {
-
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
-	/* xscale IOP */
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
-	/* ppc IOP */
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
-	/* ppc IOP */
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
-	/* gen2*/
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
-	/* gen2*/
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
-	/* skinny*/
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
-	/* skinny*/
-	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
-	/* xscale IOP, vega */
-	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
-	/* xscale IOP */
-	{}
-};
-
-MODULE_DEVICE_TABLE(pci, megasas_pci_table);
-
-static int megasas_mgmt_majorno;
-static struct megasas_mgmt_info megasas_mgmt_info;
-static struct fasync_struct *megasas_async_queue;
-static DEFINE_MUTEX(megasas_async_queue_mutex);
-
-static int megasas_poll_wait_aen;
-static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
-static u32 support_poll_for_event;
-static u32 megasas_dbg_lvl;
-static u32 support_device_change;
-
-/* define lock for aen poll */
-spinlock_t poll_aen_lock;
-
-static void
-megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
-		     u8 alt_status);
-
-/**
- * megasas_get_cmd -	Get a command from the free pool
- * @instance:		Adapter soft state
- *
- * Returns a free command from the pool
- */
-static struct megasas_cmd *megasas_get_cmd(struct megasas_instance
-						  *instance)
-{
-	unsigned long flags;
-	struct megasas_cmd *cmd = NULL;
-
-	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
-
-	if (!list_empty(&instance->cmd_pool)) {
-		cmd = list_entry((&instance->cmd_pool)->next,
-				 struct megasas_cmd, list);
-		list_del_init(&cmd->list);
-	} else {
-		printk(KERN_ERR "megasas: Command pool empty!\n");
-	}
-
-	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
-	return cmd;
-}
-
-/**
- * megasas_return_cmd -	Return a cmd to free command pool
- * @instance:		Adapter soft state
- * @cmd:		Command packet to be returned to free command pool
- */
-static inline void
-megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
-
-	cmd->scmd = NULL;
-	list_add_tail(&cmd->list, &instance->cmd_pool);
-
-	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
-}
-
-
-/**
-*	The following functions are defined for xscale 
-*	(deviceid : 1064R, PERC5) controllers
-*/
-
-/**
- * megasas_enable_intr_xscale -	Enables interrupts
- * @regs:			MFI register set
- */
-static inline void
-megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
-{
-	writel(0, &(regs)->outbound_intr_mask);
-
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_disable_intr_xscale -Disables interrupt
- * @regs:			MFI register set
- */
-static inline void
-megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs)
-{
-	u32 mask = 0x1f;
-	writel(mask, &regs->outbound_intr_mask);
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_read_fw_status_reg_xscale - returns the current FW status value
- * @regs:			MFI register set
- */
-static u32
-megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
-{
-	return readl(&(regs)->outbound_msg_0);
-}
-/**
- * megasas_clear_interrupt_xscale -	Check & clear interrupt
- * @regs:				MFI register set
- */
-static int 
-megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
-{
-	u32 status;
-	u32 mfiStatus = 0;
-	/*
-	 * Check if it is our interrupt
-	 */
-	status = readl(&regs->outbound_intr_status);
-
-	if (status & MFI_OB_INTR_STATUS_MASK)
-		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
-	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
-		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
-
-	/*
-	 * Clear the interrupt by writing back the same value
-	 */
-	if (mfiStatus)
-		writel(status, &regs->outbound_intr_status);
-
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_status);
-
-	return mfiStatus;
-}
-
-/**
- * megasas_fire_cmd_xscale -	Sends command to the FW
- * @frame_phys_addr :		Physical address of cmd
- * @frame_count :		Number of frames for the command
- * @regs :			MFI register set
- */
-static inline void 
-megasas_fire_cmd_xscale(struct megasas_instance *instance,
-		dma_addr_t frame_phys_addr,
-		u32 frame_count,
-		struct megasas_register_set __iomem *regs)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	writel((frame_phys_addr >> 3)|(frame_count),
-	       &(regs)->inbound_queue_port);
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-}
-
-/**
- * megasas_adp_reset_xscale -  For controller reset
- * @regs:                              MFI register set
- */
-static int
-megasas_adp_reset_xscale(struct megasas_instance *instance,
-	struct megasas_register_set __iomem *regs)
-{
-	u32 i;
-	u32 pcidata;
-	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
-
-	for (i = 0; i < 3; i++)
-		msleep(1000); /* sleep for 3 secs */
-	pcidata  = 0;
-	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
-	printk(KERN_NOTICE "pcidata = %x\n", pcidata);
-	if (pcidata & 0x2) {
-		printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
-		pcidata &= ~0x2;
-		pci_write_config_dword(instance->pdev,
-				MFI_1068_PCSR_OFFSET, pcidata);
-
-		for (i = 0; i < 2; i++)
-			msleep(1000); /* need to wait 2 secs again */
-
-		pcidata  = 0;
-		pci_read_config_dword(instance->pdev,
-				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
-		printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
-		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
-			printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
-			pcidata = 0;
-			pci_write_config_dword(instance->pdev,
-				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
-		}
-	}
-	return 0;
-}
-
-/**
- * megasas_check_reset_xscale -	For controller reset check
- * @regs:				MFI register set
- */
-static int
-megasas_check_reset_xscale(struct megasas_instance *instance,
-		struct megasas_register_set __iomem *regs)
-{
-	u32 consumer;
-	consumer = *instance->consumer;
-
-	if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
-		(*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
-		return 1;
-	}
-	return 0;
-}
-
-static struct megasas_instance_template megasas_instance_template_xscale = {
-
-	.fire_cmd = megasas_fire_cmd_xscale,
-	.enable_intr = megasas_enable_intr_xscale,
-	.disable_intr = megasas_disable_intr_xscale,
-	.clear_intr = megasas_clear_intr_xscale,
-	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
-	.adp_reset = megasas_adp_reset_xscale,
-	.check_reset = megasas_check_reset_xscale,
-};
-
-/**
-*	This is the end of set of functions & definitions specific 
-*	to xscale (deviceid : 1064R, PERC5) controllers
-*/
-
-/**
-*	The following functions are defined for ppc (deviceid : 0x60) 
-* 	controllers
-*/
-
-/**
- * megasas_enable_intr_ppc -	Enables interrupts
- * @regs:			MFI register set
- */
-static inline void
-megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
-{
-	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
-    
-	writel(~0x80000000, &(regs)->outbound_intr_mask);
-
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_disable_intr_ppc -	Disable interrupt
- * @regs:			MFI register set
- */
-static inline void
-megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs)
-{
-	u32 mask = 0xFFFFFFFF;
-	writel(mask, &regs->outbound_intr_mask);
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_read_fw_status_reg_ppc - returns the current FW status value
- * @regs:			MFI register set
- */
-static u32
-megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
-{
-	return readl(&(regs)->outbound_scratch_pad);
-}
-
-/**
- * megasas_clear_interrupt_ppc -	Check & clear interrupt
- * @regs:				MFI register set
- */
-static int 
-megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
-{
-	u32 status;
-	/*
-	 * Check if it is our interrupt
-	 */
-	status = readl(&regs->outbound_intr_status);
-
-	if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
-		return 0;
-	}
-
-	/*
-	 * Clear the interrupt by writing back the same value
-	 */
-	writel(status, &regs->outbound_doorbell_clear);
-
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_doorbell_clear);
-
-	return 1;
-}
-/**
- * megasas_fire_cmd_ppc -	Sends command to the FW
- * @frame_phys_addr :		Physical address of cmd
- * @frame_count :		Number of frames for the command
- * @regs :			MFI register set
- */
-static inline void 
-megasas_fire_cmd_ppc(struct megasas_instance *instance,
-		dma_addr_t frame_phys_addr,
-		u32 frame_count,
-		struct megasas_register_set __iomem *regs)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	writel((frame_phys_addr | (frame_count<<1))|1, 
-			&(regs)->inbound_queue_port);
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-}
-
-/**
- * megasas_adp_reset_ppc -	For controller reset
- * @regs:				MFI register set
- */
-static int
-megasas_adp_reset_ppc(struct megasas_instance *instance,
-			struct megasas_register_set __iomem *regs)
-{
-	return 0;
-}
-
-/**
- * megasas_check_reset_ppc -	For controller reset check
- * @regs:				MFI register set
- */
-static int
-megasas_check_reset_ppc(struct megasas_instance *instance,
-			struct megasas_register_set __iomem *regs)
-{
-	return 0;
-}
-static struct megasas_instance_template megasas_instance_template_ppc = {
-	
-	.fire_cmd = megasas_fire_cmd_ppc,
-	.enable_intr = megasas_enable_intr_ppc,
-	.disable_intr = megasas_disable_intr_ppc,
-	.clear_intr = megasas_clear_intr_ppc,
-	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
-	.adp_reset = megasas_adp_reset_ppc,
-	.check_reset = megasas_check_reset_ppc,
-};
-
-/**
- * megasas_enable_intr_skinny -	Enables interrupts
- * @regs:			MFI register set
- */
-static inline void
-megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs)
-{
-	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
-
-	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
-
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_disable_intr_skinny -	Disables interrupt
- * @regs:			MFI register set
- */
-static inline void
-megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs)
-{
-	u32 mask = 0xFFFFFFFF;
-	writel(mask, &regs->outbound_intr_mask);
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_read_fw_status_reg_skinny - returns the current FW status value
- * @regs:			MFI register set
- */
-static u32
-megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
-{
-	return readl(&(regs)->outbound_scratch_pad);
-}
-
-/**
- * megasas_clear_interrupt_skinny -	Check & clear interrupt
- * @regs:				MFI register set
- */
-static int
-megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
-{
-	u32 status;
-	/*
-	 * Check if it is our interrupt
-	 */
-	status = readl(&regs->outbound_intr_status);
-
-	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
-		return 0;
-	}
-
-	/*
-	 * Clear the interrupt by writing back the same value
-	 */
-	writel(status, &regs->outbound_intr_status);
-
-	/*
-	* dummy read to flush PCI
-	*/
-	readl(&regs->outbound_intr_status);
-
-	return 1;
-}
-
-/**
- * megasas_fire_cmd_skinny -	Sends command to the FW
- * @frame_phys_addr :		Physical address of cmd
- * @frame_count :		Number of frames for the command
- * @regs :			MFI register set
- */
-static inline void
-megasas_fire_cmd_skinny(struct megasas_instance *instance,
-			dma_addr_t frame_phys_addr,
-			u32 frame_count,
-			struct megasas_register_set __iomem *regs)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	writel(0, &(regs)->inbound_high_queue_port);
-	writel((frame_phys_addr | (frame_count<<1))|1,
-		&(regs)->inbound_low_queue_port);
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-}
-
-/**
- * megasas_adp_reset_skinny -	For controller reset
- * @regs:				MFI register set
- */
-static int
-megasas_adp_reset_skinny(struct megasas_instance *instance,
-			struct megasas_register_set __iomem *regs)
-{
-	return 0;
-}
-
-/**
- * megasas_check_reset_skinny -	For controller reset check
- * @regs:				MFI register set
- */
-static int
-megasas_check_reset_skinny(struct megasas_instance *instance,
-				struct megasas_register_set __iomem *regs)
-{
-	return 0;
-}
-
-static struct megasas_instance_template megasas_instance_template_skinny = {
-
-	.fire_cmd = megasas_fire_cmd_skinny,
-	.enable_intr = megasas_enable_intr_skinny,
-	.disable_intr = megasas_disable_intr_skinny,
-	.clear_intr = megasas_clear_intr_skinny,
-	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
-	.adp_reset = megasas_adp_reset_skinny,
-	.check_reset = megasas_check_reset_skinny,
-};
-
-
-/**
-*	The following functions are defined for gen2 (deviceid : 0x78 0x79)
-*	controllers
-*/
-
-/**
- * megasas_enable_intr_gen2 -  Enables interrupts
- * @regs:                      MFI register set
- */
-static inline void
-megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs)
-{
-	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
-
-	/* write ~0x00000005 (4 & 1) to the intr mask*/
-	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
-
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_disable_intr_gen2 - Disables interrupt
- * @regs:                      MFI register set
- */
-static inline void
-megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs)
-{
-	u32 mask = 0xFFFFFFFF;
-	writel(mask, &regs->outbound_intr_mask);
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_read_fw_status_reg_gen2 - returns the current FW status value
- * @regs:                      MFI register set
- */
-static u32
-megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
-{
-	return readl(&(regs)->outbound_scratch_pad);
-}
-
-/**
- * megasas_clear_interrupt_gen2 -      Check & clear interrupt
- * @regs:                              MFI register set
- */
-static int
-megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
-{
-	u32 status;
-	u32 mfiStatus = 0;
-	/*
-	 * Check if it is our interrupt
-	 */
-	status = readl(&regs->outbound_intr_status);
-
-	if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) {
-		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
-	}
-	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
-		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
-	}
-
-	/*
-	 * Clear the interrupt by writing back the same value
-	 */
-	if (mfiStatus)
-		writel(status, &regs->outbound_doorbell_clear);
-
-	/* Dummy readl to force pci flush */
-	readl(&regs->outbound_intr_status);
-
-	return mfiStatus;
-}
-/**
- * megasas_fire_cmd_gen2 -     Sends command to the FW
- * @frame_phys_addr :          Physical address of cmd
- * @frame_count :              Number of frames for the command
- * @regs :                     MFI register set
- */
-static inline void
-megasas_fire_cmd_gen2(struct megasas_instance *instance,
-			dma_addr_t frame_phys_addr,
-			u32 frame_count,
-			struct megasas_register_set __iomem *regs)
-{
-	unsigned long flags;
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	writel((frame_phys_addr | (frame_count<<1))|1,
-			&(regs)->inbound_queue_port);
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-}
-
-/**
- * megasas_adp_reset_gen2 -	For controller reset
- * @regs:				MFI register set
- */
-static int
-megasas_adp_reset_gen2(struct megasas_instance *instance,
-			struct megasas_register_set __iomem *reg_set)
-{
-	u32			retry = 0 ;
-	u32			HostDiag;
-
-	writel(0, &reg_set->seq_offset);
-	writel(4, &reg_set->seq_offset);
-	writel(0xb, &reg_set->seq_offset);
-	writel(2, &reg_set->seq_offset);
-	writel(7, &reg_set->seq_offset);
-	writel(0xd, &reg_set->seq_offset);
-	msleep(1000);
-
-	HostDiag = (u32)readl(&reg_set->host_diag);
-
-	while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
-		msleep(100);
-		HostDiag = (u32)readl(&reg_set->host_diag);
-		printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
-					retry, HostDiag);
-
-		if (retry++ >= 100)
-			return 1;
-
-	}
-
-	printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
-
-	writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
-
-	ssleep(10);
-
-	HostDiag = (u32)readl(&reg_set->host_diag);
-	while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
-		msleep(100);
-		HostDiag = (u32)readl(&reg_set->host_diag);
-		printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
-				retry, HostDiag);
-
-		if (retry++ >= 1000)
-			return 1;
-
-	}
-	return 0;
-}
-
-/**
- * megasas_check_reset_gen2 -	For controller reset check
- * @regs:				MFI register set
- */
-static int
-megasas_check_reset_gen2(struct megasas_instance *instance,
-		struct megasas_register_set __iomem *regs)
-{
-	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
-		return 1;
-	}
-
-	return 0;
-}
-
-static struct megasas_instance_template megasas_instance_template_gen2 = {
-
-	.fire_cmd = megasas_fire_cmd_gen2,
-	.enable_intr = megasas_enable_intr_gen2,
-	.disable_intr = megasas_disable_intr_gen2,
-	.clear_intr = megasas_clear_intr_gen2,
-	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
-	.adp_reset = megasas_adp_reset_gen2,
-	.check_reset = megasas_check_reset_gen2,
-};
-
-/**
-*	This is the end of set of functions & definitions
-*       specific to gen2 (deviceid : 0x78, 0x79) controllers
-*/
-
-/**
- * megasas_issue_polled -	Issues a polling command
- * @instance:			Adapter soft state
- * @cmd:			Command packet to be issued 
- *
- * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
- */
-static int
-megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
-	int i;
-	u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
-
-	struct megasas_header *frame_hdr = &cmd->frame->hdr;
-
-	frame_hdr->cmd_status = 0xFF;
-	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
-
-	/*
-	 * Issue the frame using inbound queue port
-	 */
-	instance->instancet->fire_cmd(instance,
-			cmd->frame_phys_addr, 0, instance->reg_set);
-
-	/*
-	 * Wait for cmd_status to change
-	 */
-	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) {
-		rmb();
-		msleep(1);
-	}
-
-	if (frame_hdr->cmd_status == 0xff)
-		return -ETIME;
-
-	return 0;
-}
-
-/**
- * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
- * @instance:			Adapter soft state
- * @cmd:			Command to be issued
- *
- * This function waits on an event for the command to be returned from ISR.
- * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
- * Used to issue ioctl commands.
- */
-static int
-megasas_issue_blocked_cmd(struct megasas_instance *instance,
-			  struct megasas_cmd *cmd)
-{
-	cmd->cmd_status = ENODATA;
-
-	instance->instancet->fire_cmd(instance,
-			cmd->frame_phys_addr, 0, instance->reg_set);
-
-	wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
-
-	return 0;
-}
-
-/**
- * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
- * @instance:				Adapter soft state
- * @cmd_to_abort:			Previously issued cmd to be aborted
- *
- * MFI firmware can abort previously issued AEN comamnd (automatic event
- * notification). The megasas_issue_blocked_abort_cmd() issues such abort
- * cmd and waits for return status.
- * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
- */
-static int
-megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
-				struct megasas_cmd *cmd_to_abort)
-{
-	struct megasas_cmd *cmd;
-	struct megasas_abort_frame *abort_fr;
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd)
-		return -1;
-
-	abort_fr = &cmd->frame->abort;
-
-	/*
-	 * Prepare and issue the abort frame
-	 */
-	abort_fr->cmd = MFI_CMD_ABORT;
-	abort_fr->cmd_status = 0xFF;
-	abort_fr->flags = 0;
-	abort_fr->abort_context = cmd_to_abort->index;
-	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
-	abort_fr->abort_mfi_phys_addr_hi = 0;
-
-	cmd->sync_cmd = 1;
-	cmd->cmd_status = 0xFF;
-
-	instance->instancet->fire_cmd(instance,
-			cmd->frame_phys_addr, 0, instance->reg_set);
-
-	/*
-	 * Wait for this cmd to complete
-	 */
-	wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
-	cmd->sync_cmd = 0;
-
-	megasas_return_cmd(instance, cmd);
-	return 0;
-}
-
-/**
- * megasas_make_sgl32 -	Prepares 32-bit SGL
- * @instance:		Adapter soft state
- * @scp:		SCSI command from the mid-layer
- * @mfi_sgl:		SGL to be filled in
- *
- * If successful, this function returns the number of SG elements. Otherwise,
- * it returnes -1.
- */
-static int
-megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
-		   union megasas_sgl *mfi_sgl)
-{
-	int i;
-	int sge_count;
-	struct scatterlist *os_sgl;
-
-	sge_count = scsi_dma_map(scp);
-	BUG_ON(sge_count < 0);
-
-	if (sge_count) {
-		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-			mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
-			mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
-		}
-	}
-	return sge_count;
-}
-
-/**
- * megasas_make_sgl64 -	Prepares 64-bit SGL
- * @instance:		Adapter soft state
- * @scp:		SCSI command from the mid-layer
- * @mfi_sgl:		SGL to be filled in
- *
- * If successful, this function returns the number of SG elements. Otherwise,
- * it returnes -1.
- */
-static int
-megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
-		   union megasas_sgl *mfi_sgl)
-{
-	int i;
-	int sge_count;
-	struct scatterlist *os_sgl;
-
-	sge_count = scsi_dma_map(scp);
-	BUG_ON(sge_count < 0);
-
-	if (sge_count) {
-		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-			mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
-			mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
-		}
-	}
-	return sge_count;
-}
-
-/**
- * megasas_make_sgl_skinny - Prepares IEEE SGL
- * @instance:           Adapter soft state
- * @scp:                SCSI command from the mid-layer
- * @mfi_sgl:            SGL to be filled in
- *
- * If successful, this function returns the number of SG elements. Otherwise,
- * it returnes -1.
- */
-static int
-megasas_make_sgl_skinny(struct megasas_instance *instance,
-		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
-{
-	int i;
-	int sge_count;
-	struct scatterlist *os_sgl;
-
-	sge_count = scsi_dma_map(scp);
-
-	if (sge_count) {
-		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
-			mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
-			mfi_sgl->sge_skinny[i].phys_addr =
-						sg_dma_address(os_sgl);
-			mfi_sgl->sge_skinny[i].flag = 0;
-		}
-	}
-	return sge_count;
-}
-
- /**
- * megasas_get_frame_count - Computes the number of frames
- * @frame_type		: type of frame- io or pthru frame
- * @sge_count		: number of sg elements
- *
- * Returns the number of frames required for numnber of sge's (sge_count)
- */
-
-static u32 megasas_get_frame_count(struct megasas_instance *instance,
-			u8 sge_count, u8 frame_type)
-{
-	int num_cnt;
-	int sge_bytes;
-	u32 sge_sz;
-	u32 frame_count=0;
-
-	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
-	    sizeof(struct megasas_sge32);
-
-	if (instance->flag_ieee) {
-		sge_sz = sizeof(struct megasas_sge_skinny);
-	}
-
-	/*
-	 * Main frame can contain 2 SGEs for 64-bit SGLs and
-	 * 3 SGEs for 32-bit SGLs for ldio &
-	 * 1 SGEs for 64-bit SGLs and
-	 * 2 SGEs for 32-bit SGLs for pthru frame
-	 */
-	if (unlikely(frame_type == PTHRU_FRAME)) {
-		if (instance->flag_ieee == 1) {
-			num_cnt = sge_count - 1;
-		} else if (IS_DMA64)
-			num_cnt = sge_count - 1;
-		else
-			num_cnt = sge_count - 2;
-	} else {
-		if (instance->flag_ieee == 1) {
-			num_cnt = sge_count - 1;
-		} else if (IS_DMA64)
-			num_cnt = sge_count - 2;
-		else
-			num_cnt = sge_count - 3;
-	}
-
-	if(num_cnt>0){
-		sge_bytes = sge_sz * num_cnt;
-
-		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
-		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
-	}
-	/* Main frame */
-	frame_count +=1;
-
-	if (frame_count > 7)
-		frame_count = 8;
-	return frame_count;
-}
-
-/**
- * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
- * @instance:		Adapter soft state
- * @scp:		SCSI command
- * @cmd:		Command to be prepared in
- *
- * This function prepares CDB commands. These are typcially pass-through
- * commands to the devices.
- */
-static int
-megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
-		   struct megasas_cmd *cmd)
-{
-	u32 is_logical;
-	u32 device_id;
-	u16 flags = 0;
-	struct megasas_pthru_frame *pthru;
-
-	is_logical = MEGASAS_IS_LOGICAL(scp);
-	device_id = MEGASAS_DEV_INDEX(instance, scp);
-	pthru = (struct megasas_pthru_frame *)cmd->frame;
-
-	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
-		flags = MFI_FRAME_DIR_WRITE;
-	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
-		flags = MFI_FRAME_DIR_READ;
-	else if (scp->sc_data_direction == PCI_DMA_NONE)
-		flags = MFI_FRAME_DIR_NONE;
-
-	if (instance->flag_ieee == 1) {
-		flags |= MFI_FRAME_IEEE;
-	}
-
-	/*
-	 * Prepare the DCDB frame
-	 */
-	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
-	pthru->cmd_status = 0x0;
-	pthru->scsi_status = 0x0;
-	pthru->target_id = device_id;
-	pthru->lun = scp->device->lun;
-	pthru->cdb_len = scp->cmd_len;
-	pthru->timeout = 0;
-	pthru->pad_0 = 0;
-	pthru->flags = flags;
-	pthru->data_xfer_len = scsi_bufflen(scp);
-
-	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
-
-	/*
-	* If the command is for the tape device, set the
-	* pthru timeout to the os layer timeout value.
-	*/
-	if (scp->device->type == TYPE_TAPE) {
-		if ((scp->request->timeout / HZ) > 0xFFFF)
-			pthru->timeout = 0xFFFF;
-		else
-			pthru->timeout = scp->request->timeout / HZ;
-	}
-
-	/*
-	 * Construct SGL
-	 */
-	if (instance->flag_ieee == 1) {
-		pthru->flags |= MFI_FRAME_SGL64;
-		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
-						      &pthru->sgl);
-	} else if (IS_DMA64) {
-		pthru->flags |= MFI_FRAME_SGL64;
-		pthru->sge_count = megasas_make_sgl64(instance, scp,
-						      &pthru->sgl);
-	} else
-		pthru->sge_count = megasas_make_sgl32(instance, scp,
-						      &pthru->sgl);
-
-	if (pthru->sge_count > instance->max_num_sge) {
-		printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
-			pthru->sge_count);
-		return 0;
-	}
-
-	/*
-	 * Sense info specific
-	 */
-	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
-	pthru->sense_buf_phys_addr_hi = 0;
-	pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
-
-	/*
-	 * Compute the total number of frames this command consumes. FW uses
-	 * this number to pull sufficient number of frames from host memory.
-	 */
-	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
-							PTHRU_FRAME);
-
-	return cmd->frame_count;
-}
-
-/**
- * megasas_build_ldio -	Prepares IOs to logical devices
- * @instance:		Adapter soft state
- * @scp:		SCSI command
- * @cmd:		Command to be prepared
- *
- * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
- */
-static int
-megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
-		   struct megasas_cmd *cmd)
-{
-	u32 device_id;
-	u8 sc = scp->cmnd[0];
-	u16 flags = 0;
-	struct megasas_io_frame *ldio;
-
-	device_id = MEGASAS_DEV_INDEX(instance, scp);
-	ldio = (struct megasas_io_frame *)cmd->frame;
-
-	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
-		flags = MFI_FRAME_DIR_WRITE;
-	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
-		flags = MFI_FRAME_DIR_READ;
-
-	if (instance->flag_ieee == 1) {
-		flags |= MFI_FRAME_IEEE;
-	}
-
-	/*
-	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
-	 */
-	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
-	ldio->cmd_status = 0x0;
-	ldio->scsi_status = 0x0;
-	ldio->target_id = device_id;
-	ldio->timeout = 0;
-	ldio->reserved_0 = 0;
-	ldio->pad_0 = 0;
-	ldio->flags = flags;
-	ldio->start_lba_hi = 0;
-	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
-
-	/*
-	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
-	 */
-	if (scp->cmd_len == 6) {
-		ldio->lba_count = (u32) scp->cmnd[4];
-		ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
-		    ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
-
-		ldio->start_lba_lo &= 0x1FFFFF;
-	}
-
-	/*
-	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
-	 */
-	else if (scp->cmd_len == 10) {
-		ldio->lba_count = (u32) scp->cmnd[8] |
-		    ((u32) scp->cmnd[7] << 8);
-		ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
-		    ((u32) scp->cmnd[3] << 16) |
-		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
-	}
-
-	/*
-	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
-	 */
-	else if (scp->cmd_len == 12) {
-		ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
-		    ((u32) scp->cmnd[7] << 16) |
-		    ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
-
-		ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
-		    ((u32) scp->cmnd[3] << 16) |
-		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
-	}
-
-	/*
-	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
-	 */
-	else if (scp->cmd_len == 16) {
-		ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
-		    ((u32) scp->cmnd[11] << 16) |
-		    ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
-
-		ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
-		    ((u32) scp->cmnd[7] << 16) |
-		    ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
-
-		ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
-		    ((u32) scp->cmnd[3] << 16) |
-		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
-
-	}
-
-	/*
-	 * Construct SGL
-	 */
-	if (instance->flag_ieee) {
-		ldio->flags |= MFI_FRAME_SGL64;
-		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
-					      &ldio->sgl);
-	} else if (IS_DMA64) {
-		ldio->flags |= MFI_FRAME_SGL64;
-		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
-	} else
-		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
-
-	if (ldio->sge_count > instance->max_num_sge) {
-		printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
-			ldio->sge_count);
-		return 0;
-	}
-
-	/*
-	 * Sense info specific
-	 */
-	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
-	ldio->sense_buf_phys_addr_hi = 0;
-	ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
-
-	/*
-	 * Compute the total number of frames this command consumes. FW uses
-	 * this number to pull sufficient number of frames from host memory.
-	 */
-	cmd->frame_count = megasas_get_frame_count(instance,
-			ldio->sge_count, IO_FRAME);
-
-	return cmd->frame_count;
-}
-
-/**
- * megasas_is_ldio -		Checks if the cmd is for logical drive
- * @scmd:			SCSI command
- *	
- * Called by megasas_queue_command to find out if the command to be queued
- * is a logical drive command	
- */
-static inline int megasas_is_ldio(struct scsi_cmnd *cmd)
-{
-	if (!MEGASAS_IS_LOGICAL(cmd))
-		return 0;
-	switch (cmd->cmnd[0]) {
-	case READ_10:
-	case WRITE_10:
-	case READ_12:
-	case WRITE_12:
-	case READ_6:
-	case WRITE_6:
-	case READ_16:
-	case WRITE_16:
-		return 1;
-	default:
-		return 0;
-	}
-}
-
- /**
- * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
- *                              	in FW
- * @instance:				Adapter soft state
- */
-static inline void
-megasas_dump_pending_frames(struct megasas_instance *instance)
-{
-	struct megasas_cmd *cmd;
-	int i,n;
-	union megasas_sgl *mfi_sgl;
-	struct megasas_io_frame *ldio;
-	struct megasas_pthru_frame *pthru;
-	u32 sgcount;
-	u32 max_cmd = instance->max_fw_cmds;
-
-	printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
-	printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
-	if (IS_DMA64)
-		printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
-	else
-		printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
-
-	printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
-	for (i = 0; i < max_cmd; i++) {
-		cmd = instance->cmd_list[i];
-		if(!cmd->scmd)
-			continue;
-		printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
-		if (megasas_is_ldio(cmd->scmd)){
-			ldio = (struct megasas_io_frame *)cmd->frame;
-			mfi_sgl = &ldio->sgl;
-			sgcount = ldio->sge_count;
-			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
-		}
-		else {
-			pthru = (struct megasas_pthru_frame *) cmd->frame;
-			mfi_sgl = &pthru->sgl;
-			sgcount = pthru->sge_count;
-			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
-		}
-	if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
-		for (n = 0; n < sgcount; n++){
-			if (IS_DMA64)
-				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
-			else
-				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
-			}
-		}
-		printk(KERN_ERR "\n");
-	} /*for max_cmd*/
-	printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
-	for (i = 0; i < max_cmd; i++) {
-
-		cmd = instance->cmd_list[i];
-
-		if(cmd->sync_cmd == 1){
-			printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
-		}
-	}
-	printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
-}
-
-/**
- * megasas_queue_command -	Queue entry point
- * @scmd:			SCSI command to be queued
- * @done:			Callback entry point
- */
-static int
-megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
-{
-	u32 frame_count;
-	struct megasas_cmd *cmd;
-	struct megasas_instance *instance;
-	unsigned long flags;
-
-	instance = (struct megasas_instance *)
-	    scmd->device->host->hostdata;
-
-	if (instance->issuepend_done == 0)
-		return SCSI_MLQUEUE_HOST_BUSY;
-
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		return SCSI_MLQUEUE_HOST_BUSY;
-	}
-
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-	scmd->scsi_done = done;
-	scmd->result = 0;
-
-	if (MEGASAS_IS_LOGICAL(scmd) &&
-	    (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
-		scmd->result = DID_BAD_TARGET << 16;
-		goto out_done;
-	}
-
-	switch (scmd->cmnd[0]) {
-	case SYNCHRONIZE_CACHE:
-		/*
-		 * FW takes care of flush cache on its own
-		 * No need to send it down
-		 */
-		scmd->result = DID_OK << 16;
-		goto out_done;
-	default:
-		break;
-	}
-
-	cmd = megasas_get_cmd(instance);
-	if (!cmd)
-		return SCSI_MLQUEUE_HOST_BUSY;
-
-	/*
-	 * Logical drive command
-	 */
-	if (megasas_is_ldio(scmd))
-		frame_count = megasas_build_ldio(instance, scmd, cmd);
-	else
-		frame_count = megasas_build_dcdb(instance, scmd, cmd);
-
-	if (!frame_count)
-		goto out_return_cmd;
-
-	cmd->scmd = scmd;
-	scmd->SCp.ptr = (char *)cmd;
-
-	/*
-	 * Issue the command to the FW
-	 */
-	atomic_inc(&instance->fw_outstanding);
-
-	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
-				cmd->frame_count-1, instance->reg_set);
-	/*
-	 * Check if we have pend cmds to be completed
-	 */
-	if (poll_mode_io && atomic_read(&instance->fw_outstanding))
-		tasklet_schedule(&instance->isr_tasklet);
-
-
-	return 0;
-
- out_return_cmd:
-	megasas_return_cmd(instance, cmd);
- out_done:
-	done(scmd);
-	return 0;
-}
-
-static DEF_SCSI_QCMD(megasas_queue_command)
-
-static struct megasas_instance *megasas_lookup_instance(u16 host_no)
-{
-	int i;
-
-	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-
-		if ((megasas_mgmt_info.instance[i]) &&
-		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
-			return megasas_mgmt_info.instance[i];
-	}
-
-	return NULL;
-}
-
-static int megasas_slave_configure(struct scsi_device *sdev)
-{
-	u16             pd_index = 0;
-	struct  megasas_instance *instance ;
-
-	instance = megasas_lookup_instance(sdev->host->host_no);
-
-	/*
-	* Don't export physical disk devices to the disk driver.
-	*
-	* FIXME: Currently we don't export them to the midlayer at all.
-	*        That will be fixed once LSI engineers have audited the
-	*        firmware for possible issues.
-	*/
-	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
-				sdev->type == TYPE_DISK) {
-		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
-								sdev->id;
-		if (instance->pd_list[pd_index].driveState ==
-						MR_PD_STATE_SYSTEM) {
-			blk_queue_rq_timeout(sdev->request_queue,
-				MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
-			return 0;
-		}
-		return -ENXIO;
-	}
-
-	/*
-	* The RAID firmware may require extended timeouts.
-	*/
-	blk_queue_rq_timeout(sdev->request_queue,
-		MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
-	return 0;
-}
-
-static int megasas_slave_alloc(struct scsi_device *sdev)
-{
-	u16             pd_index = 0;
-	struct megasas_instance *instance ;
-	instance = megasas_lookup_instance(sdev->host->host_no);
-	if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
-				(sdev->type == TYPE_DISK)) {
-		/*
-		 * Open the OS scan to the SYSTEM PD
-		 */
-		pd_index =
-			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
-			sdev->id;
-		if ((instance->pd_list[pd_index].driveState ==
-					MR_PD_STATE_SYSTEM) &&
-			(instance->pd_list[pd_index].driveType ==
-						TYPE_DISK)) {
-			return 0;
-		}
-		return -ENXIO;
-	}
-	return 0;
-}
-
-static void megaraid_sas_kill_hba(struct megasas_instance *instance)
-{
-	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-		writel(MFI_STOP_ADP,
-			&instance->reg_set->reserved_0[0]);
-	} else {
-		writel(MFI_STOP_ADP,
-			&instance->reg_set->inbound_doorbell);
-	}
-}
-
-/**
- * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
- * @instance_addr:			Address of adapter soft state
- *
- * Tasklet to complete cmds
- */
-static void megasas_complete_cmd_dpc(unsigned long instance_addr)
-{
-	u32 producer;
-	u32 consumer;
-	u32 context;
-	struct megasas_cmd *cmd;
-	struct megasas_instance *instance =
-				(struct megasas_instance *)instance_addr;
-	unsigned long flags;
-
-	/* If we have already declared adapter dead, donot complete cmds */
-	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
-		return;
-
-	spin_lock_irqsave(&instance->completion_lock, flags);
-
-	producer = *instance->producer;
-	consumer = *instance->consumer;
-
-	while (consumer != producer) {
-		context = instance->reply_queue[consumer];
-		if (context >= instance->max_fw_cmds) {
-			printk(KERN_ERR "Unexpected context value %x\n",
-				context);
-			BUG();
-		}
-
-		cmd = instance->cmd_list[context];
-
-		megasas_complete_cmd(instance, cmd, DID_OK);
-
-		consumer++;
-		if (consumer == (instance->max_fw_cmds + 1)) {
-			consumer = 0;
-		}
-	}
-
-	*instance->consumer = producer;
-
-	spin_unlock_irqrestore(&instance->completion_lock, flags);
-
-	/*
-	 * Check if we can restore can_queue
-	 */
-	if (instance->flag & MEGASAS_FW_BUSY
-		&& time_after(jiffies, instance->last_time + 5 * HZ)
-		&& atomic_read(&instance->fw_outstanding) < 17) {
-
-		spin_lock_irqsave(instance->host->host_lock, flags);
-		instance->flag &= ~MEGASAS_FW_BUSY;
-		if ((instance->pdev->device ==
-			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-			(instance->pdev->device ==
-			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-			instance->host->can_queue =
-				instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
-		} else
-			instance->host->can_queue =
-				instance->max_fw_cmds - MEGASAS_INT_CMDS;
-
-		spin_unlock_irqrestore(instance->host->host_lock, flags);
-	}
-}
-
-static void
-megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
-
-static void
-process_fw_state_change_wq(struct work_struct *work);
-
-void megasas_do_ocr(struct megasas_instance *instance)
-{
-	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
-	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
-	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
-		*instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
-	}
-	instance->instancet->disable_intr(instance->reg_set);
-	instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
-	instance->issuepend_done = 0;
-
-	atomic_set(&instance->fw_outstanding, 0);
-	megasas_internal_reset_defer_cmds(instance);
-	process_fw_state_change_wq(&instance->work_init);
-}
-
-/**
- * megasas_wait_for_outstanding -	Wait for all outstanding cmds
- * @instance:				Adapter soft state
- *
- * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
- * complete all its outstanding commands. Returns error if one or more IOs
- * are pending after this time period. It also marks the controller dead.
- */
-static int megasas_wait_for_outstanding(struct megasas_instance *instance)
-{
-	int i;
-	u32 reset_index;
-	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
-	u8 adprecovery;
-	unsigned long flags;
-	struct list_head clist_local;
-	struct megasas_cmd *reset_cmd;
-	u32 fw_state;
-	u8 kill_adapter_flag;
-
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	adprecovery = instance->adprecovery;
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-	if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
-
-		INIT_LIST_HEAD(&clist_local);
-		spin_lock_irqsave(&instance->hba_lock, flags);
-		list_splice_init(&instance->internal_reset_pending_q,
-				&clist_local);
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-		printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
-		for (i = 0; i < wait_time; i++) {
-			msleep(1000);
-			spin_lock_irqsave(&instance->hba_lock, flags);
-			adprecovery = instance->adprecovery;
-			spin_unlock_irqrestore(&instance->hba_lock, flags);
-			if (adprecovery == MEGASAS_HBA_OPERATIONAL)
-				break;
-		}
-
-		if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
-			printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
-			spin_lock_irqsave(&instance->hba_lock, flags);
-			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
-			spin_unlock_irqrestore(&instance->hba_lock, flags);
-			return FAILED;
-		}
-
-		reset_index	= 0;
-		while (!list_empty(&clist_local)) {
-			reset_cmd	= list_entry((&clist_local)->next,
-						struct megasas_cmd, list);
-			list_del_init(&reset_cmd->list);
-			if (reset_cmd->scmd) {
-				reset_cmd->scmd->result = DID_RESET << 16;
-				printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n",
-					reset_index, reset_cmd,
-					reset_cmd->scmd->cmnd[0],
-					reset_cmd->scmd->serial_number);
-
-				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
-				megasas_return_cmd(instance, reset_cmd);
-			} else if (reset_cmd->sync_cmd) {
-				printk(KERN_NOTICE "megasas:%p synch cmds"
-						"reset queue\n",
-						reset_cmd);
-
-				reset_cmd->cmd_status = ENODATA;
-				instance->instancet->fire_cmd(instance,
-						reset_cmd->frame_phys_addr,
-						0, instance->reg_set);
-			} else {
-				printk(KERN_NOTICE "megasas: %p unexpected"
-					"cmds lst\n",
-					reset_cmd);
-			}
-			reset_index++;
-		}
-
-		return SUCCESS;
-	}
-
-	for (i = 0; i < wait_time; i++) {
-
-		int outstanding = atomic_read(&instance->fw_outstanding);
-
-		if (!outstanding)
-			break;
-
-		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
-			       "commands to complete\n",i,outstanding);
-			/*
-			 * Call cmd completion routine. Cmd to be
-			 * be completed directly without depending on isr.
-			 */
-			megasas_complete_cmd_dpc((unsigned long)instance);
-		}
-
-		msleep(1000);
-	}
-
-	i = 0;
-	kill_adapter_flag = 0;
-	do {
-		fw_state = instance->instancet->read_fw_status_reg(
-					instance->reg_set) & MFI_STATE_MASK;
-		if ((fw_state == MFI_STATE_FAULT) &&
-			(instance->disableOnlineCtrlReset == 0)) {
-			if (i == 3) {
-				kill_adapter_flag = 2;
-				break;
-			}
-			megasas_do_ocr(instance);
-			kill_adapter_flag = 1;
-
-			/* wait for 1 secs to let FW finish the pending cmds */
-			msleep(1000);
-		}
-		i++;
-	} while (i <= 3);
-
-	if (atomic_read(&instance->fw_outstanding) &&
-					!kill_adapter_flag) {
-		if (instance->disableOnlineCtrlReset == 0) {
-
-			megasas_do_ocr(instance);
-
-			/* wait for 5 secs to let FW finish the pending cmds */
-			for (i = 0; i < wait_time; i++) {
-				int outstanding =
-					atomic_read(&instance->fw_outstanding);
-				if (!outstanding)
-					return SUCCESS;
-				msleep(1000);
-			}
-		}
-	}
-
-	if (atomic_read(&instance->fw_outstanding) ||
-					(kill_adapter_flag == 2)) {
-		printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
-		/*
-		* Send signal to FW to stop processing any pending cmds.
-		* The controller will be taken offline by the OS now.
-		*/
-		if ((instance->pdev->device ==
-			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-			(instance->pdev->device ==
-			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-			writel(MFI_STOP_ADP,
-				&instance->reg_set->reserved_0[0]);
-		} else {
-			writel(MFI_STOP_ADP,
-				&instance->reg_set->inbound_doorbell);
-		}
-		megasas_dump_pending_frames(instance);
-		spin_lock_irqsave(&instance->hba_lock, flags);
-		instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		return FAILED;
-	}
-
-	printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
-
-	return SUCCESS;
-}
-
-/**
- * megasas_generic_reset -	Generic reset routine
- * @scmd:			Mid-layer SCSI command
- *
- * This routine implements a generic reset handler for device, bus and host
- * reset requests. Device, bus and host specific reset handlers can use this
- * function after they do their specific tasks.
- */
-static int megasas_generic_reset(struct scsi_cmnd *scmd)
-{
-	int ret_val;
-	struct megasas_instance *instance;
-
-	instance = (struct megasas_instance *)scmd->device->host->hostdata;
-
-	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n",
-		 scmd->serial_number, scmd->cmnd[0], scmd->retries);
-
-	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
-		printk(KERN_ERR "megasas: cannot recover from previous reset "
-		       "failures\n");
-		return FAILED;
-	}
-
-	ret_val = megasas_wait_for_outstanding(instance);
-	if (ret_val == SUCCESS)
-		printk(KERN_NOTICE "megasas: reset successful \n");
-	else
-		printk(KERN_ERR "megasas: failed to do reset\n");
-
-	return ret_val;
-}
-
-/**
- * megasas_reset_timer - quiesce the adapter if required
- * @scmd:		scsi cmnd
- *
- * Sets the FW busy flag and reduces the host->can_queue if the
- * cmd has not been completed within the timeout period.
- */
-static enum
-blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
-{
-	struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
-	struct megasas_instance *instance;
-	unsigned long flags;
-
-	if (time_after(jiffies, scmd->jiffies_at_alloc +
-				(MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
-		return BLK_EH_NOT_HANDLED;
-	}
-
-	instance = cmd->instance;
-	if (!(instance->flag & MEGASAS_FW_BUSY)) {
-		/* FW is busy, throttle IO */
-		spin_lock_irqsave(instance->host->host_lock, flags);
-
-		instance->host->can_queue = 16;
-		instance->last_time = jiffies;
-		instance->flag |= MEGASAS_FW_BUSY;
-
-		spin_unlock_irqrestore(instance->host->host_lock, flags);
-	}
-	return BLK_EH_RESET_TIMER;
-}
-
-/**
- * megasas_reset_device -	Device reset handler entry point
- */
-static int megasas_reset_device(struct scsi_cmnd *scmd)
-{
-	int ret;
-
-	/*
-	 * First wait for all commands to complete
-	 */
-	ret = megasas_generic_reset(scmd);
-
-	return ret;
-}
-
-/**
- * megasas_reset_bus_host -	Bus & host reset handler entry point
- */
-static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
-{
-	int ret;
-
-	/*
-	 * First wait for all commands to complete
-	 */
-	ret = megasas_generic_reset(scmd);
-
-	return ret;
-}
-
-/**
- * megasas_bios_param - Returns disk geometry for a disk
- * @sdev: 		device handle
- * @bdev:		block device
- * @capacity:		drive capacity
- * @geom:		geometry parameters
- */
-static int
-megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
-		 sector_t capacity, int geom[])
-{
-	int heads;
-	int sectors;
-	sector_t cylinders;
-	unsigned long tmp;
-	/* Default heads (64) & sectors (32) */
-	heads = 64;
-	sectors = 32;
-
-	tmp = heads * sectors;
-	cylinders = capacity;
-
-	sector_div(cylinders, tmp);
-
-	/*
-	 * Handle extended translation size for logical drives > 1Gb
-	 */
-
-	if (capacity >= 0x200000) {
-		heads = 255;
-		sectors = 63;
-		tmp = heads*sectors;
-		cylinders = capacity;
-		sector_div(cylinders, tmp);
-	}
-
-	geom[0] = heads;
-	geom[1] = sectors;
-	geom[2] = cylinders;
-
-	return 0;
-}
-
-static void megasas_aen_polling(struct work_struct *work);
-
-/**
- * megasas_service_aen -	Processes an event notification
- * @instance:			Adapter soft state
- * @cmd:			AEN command completed by the ISR
- *
- * For AEN, driver sends a command down to FW that is held by the FW till an
- * event occurs. When an event of interest occurs, FW completes the command
- * that it was previously holding.
- *
- * This routines sends SIGIO signal to processes that have registered with the
- * driver for AEN.
- */
-static void
-megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
-	unsigned long flags;
-	/*
-	 * Don't signal app if it is just an aborted previously registered aen
-	 */
-	if ((!cmd->abort_aen) && (instance->unload == 0)) {
-		spin_lock_irqsave(&poll_aen_lock, flags);
-		megasas_poll_wait_aen = 1;
-		spin_unlock_irqrestore(&poll_aen_lock, flags);
-		wake_up(&megasas_poll_wait);
-		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
-	}
-	else
-		cmd->abort_aen = 0;
-
-	instance->aen_cmd = NULL;
-	megasas_return_cmd(instance, cmd);
-
-	if ((instance->unload == 0) &&
-		((instance->issuepend_done == 1))) {
-		struct megasas_aen_event *ev;
-		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-		if (!ev) {
-			printk(KERN_ERR "megasas_service_aen: out of memory\n");
-		} else {
-			ev->instance = instance;
-			instance->ev = ev;
-			INIT_WORK(&ev->hotplug_work, megasas_aen_polling);
-			schedule_delayed_work(
-				(struct delayed_work *)&ev->hotplug_work, 0);
-		}
-	}
-}
-
-/*
- * Scsi host template for megaraid_sas driver
- */
-static struct scsi_host_template megasas_template = {
-
-	.module = THIS_MODULE,
-	.name = "LSI SAS based MegaRAID driver",
-	.proc_name = "megaraid_sas",
-	.slave_configure = megasas_slave_configure,
-	.slave_alloc = megasas_slave_alloc,
-	.queuecommand = megasas_queue_command,
-	.eh_device_reset_handler = megasas_reset_device,
-	.eh_bus_reset_handler = megasas_reset_bus_host,
-	.eh_host_reset_handler = megasas_reset_bus_host,
-	.eh_timed_out = megasas_reset_timer,
-	.bios_param = megasas_bios_param,
-	.use_clustering = ENABLE_CLUSTERING,
-};
-
-/**
- * megasas_complete_int_cmd -	Completes an internal command
- * @instance:			Adapter soft state
- * @cmd:			Command to be completed
- *
- * The megasas_issue_blocked_cmd() function waits for a command to complete
- * after it issues a command. This function wakes up that waiting routine by
- * calling wake_up() on the wait queue.
- */
-static void
-megasas_complete_int_cmd(struct megasas_instance *instance,
-			 struct megasas_cmd *cmd)
-{
-	cmd->cmd_status = cmd->frame->io.cmd_status;
-
-	if (cmd->cmd_status == ENODATA) {
-		cmd->cmd_status = 0;
-	}
-	wake_up(&instance->int_cmd_wait_q);
-}
-
-/**
- * megasas_complete_abort -	Completes aborting a command
- * @instance:			Adapter soft state
- * @cmd:			Cmd that was issued to abort another cmd
- *
- * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q 
- * after it issues an abort on a previously issued command. This function 
- * wakes up all functions waiting on the same wait queue.
- */
-static void
-megasas_complete_abort(struct megasas_instance *instance,
-		       struct megasas_cmd *cmd)
-{
-	if (cmd->sync_cmd) {
-		cmd->sync_cmd = 0;
-		cmd->cmd_status = 0;
-		wake_up(&instance->abort_cmd_wait_q);
-	}
-
-	return;
-}
-
-/**
- * megasas_complete_cmd -	Completes a command
- * @instance:			Adapter soft state
- * @cmd:			Command to be completed
- * @alt_status:			If non-zero, use this value as status to 
- * 				SCSI mid-layer instead of the value returned
- * 				by the FW. This should be used if caller wants
- * 				an alternate status (as in the case of aborted
- * 				commands)
- */
-static void
-megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
-		     u8 alt_status)
-{
-	int exception = 0;
-	struct megasas_header *hdr = &cmd->frame->hdr;
-	unsigned long flags;
-
-	/* flag for the retry reset */
-	cmd->retry_for_fw_reset = 0;
-
-	if (cmd->scmd)
-		cmd->scmd->SCp.ptr = NULL;
-
-	switch (hdr->cmd) {
-
-	case MFI_CMD_PD_SCSI_IO:
-	case MFI_CMD_LD_SCSI_IO:
-
-		/*
-		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
-		 * issued either through an IO path or an IOCTL path. If it
-		 * was via IOCTL, we will send it to internal completion.
-		 */
-		if (cmd->sync_cmd) {
-			cmd->sync_cmd = 0;
-			megasas_complete_int_cmd(instance, cmd);
-			break;
-		}
-
-	case MFI_CMD_LD_READ:
-	case MFI_CMD_LD_WRITE:
-
-		if (alt_status) {
-			cmd->scmd->result = alt_status << 16;
-			exception = 1;
-		}
-
-		if (exception) {
-
-			atomic_dec(&instance->fw_outstanding);
-
-			scsi_dma_unmap(cmd->scmd);
-			cmd->scmd->scsi_done(cmd->scmd);
-			megasas_return_cmd(instance, cmd);
-
-			break;
-		}
-
-		switch (hdr->cmd_status) {
-
-		case MFI_STAT_OK:
-			cmd->scmd->result = DID_OK << 16;
-			break;
-
-		case MFI_STAT_SCSI_IO_FAILED:
-		case MFI_STAT_LD_INIT_IN_PROGRESS:
-			cmd->scmd->result =
-			    (DID_ERROR << 16) | hdr->scsi_status;
-			break;
-
-		case MFI_STAT_SCSI_DONE_WITH_ERROR:
-
-			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
-
-			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
-				memset(cmd->scmd->sense_buffer, 0,
-				       SCSI_SENSE_BUFFERSIZE);
-				memcpy(cmd->scmd->sense_buffer, cmd->sense,
-				       hdr->sense_len);
-
-				cmd->scmd->result |= DRIVER_SENSE << 24;
-			}
-
-			break;
-
-		case MFI_STAT_LD_OFFLINE:
-		case MFI_STAT_DEVICE_NOT_FOUND:
-			cmd->scmd->result = DID_BAD_TARGET << 16;
-			break;
-
-		default:
-			printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
-			       hdr->cmd_status);
-			cmd->scmd->result = DID_ERROR << 16;
-			break;
-		}
-
-		atomic_dec(&instance->fw_outstanding);
-
-		scsi_dma_unmap(cmd->scmd);
-		cmd->scmd->scsi_done(cmd->scmd);
-		megasas_return_cmd(instance, cmd);
-
-		break;
-
-	case MFI_CMD_SMP:
-	case MFI_CMD_STP:
-	case MFI_CMD_DCMD:
-		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
-			cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
-			spin_lock_irqsave(&poll_aen_lock, flags);
-			megasas_poll_wait_aen = 0;
-			spin_unlock_irqrestore(&poll_aen_lock, flags);
-		}
-
-		/*
-		 * See if got an event notification
-		 */
-		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
-			megasas_service_aen(instance, cmd);
-		else
-			megasas_complete_int_cmd(instance, cmd);
-
-		break;
-
-	case MFI_CMD_ABORT:
-		/*
-		 * Cmd issued to abort another cmd returned
-		 */
-		megasas_complete_abort(instance, cmd);
-		break;
-
-	default:
-		printk("megasas: Unknown command completed! [0x%X]\n",
-		       hdr->cmd);
-		break;
-	}
-}
-
-/**
- * megasas_issue_pending_cmds_again -	issue all pending cmds
- *                              	in FW again because of the fw reset
- * @instance:				Adapter soft state
- */
-static inline void
-megasas_issue_pending_cmds_again(struct megasas_instance *instance)
-{
-	struct megasas_cmd *cmd;
-	struct list_head clist_local;
-	union megasas_evt_class_locale class_locale;
-	unsigned long flags;
-	u32 seq_num;
-
-	INIT_LIST_HEAD(&clist_local);
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-	while (!list_empty(&clist_local)) {
-		cmd	= list_entry((&clist_local)->next,
-					struct megasas_cmd, list);
-		list_del_init(&cmd->list);
-
-		if (cmd->sync_cmd || cmd->scmd) {
-			printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
-				"detected to be pending while HBA reset.\n",
-					cmd, cmd->scmd, cmd->sync_cmd);
-
-			cmd->retry_for_fw_reset++;
-
-			if (cmd->retry_for_fw_reset == 3) {
-				printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
-					"was tried multiple times during reset."
-					"Shutting down the HBA\n",
-					cmd, cmd->scmd, cmd->sync_cmd);
-				megaraid_sas_kill_hba(instance);
-
-				instance->adprecovery =
-						MEGASAS_HW_CRITICAL_ERROR;
-				return;
-			}
-		}
-
-		if (cmd->sync_cmd == 1) {
-			if (cmd->scmd) {
-				printk(KERN_NOTICE "megaraid_sas: unexpected"
-					"cmd attached to internal command!\n");
-			}
-			printk(KERN_NOTICE "megasas: %p synchronous cmd"
-						"on the internal reset queue,"
-						"issue it again.\n", cmd);
-			cmd->cmd_status = ENODATA;
-			instance->instancet->fire_cmd(instance,
-							cmd->frame_phys_addr ,
-							0, instance->reg_set);
-		} else if (cmd->scmd) {
-			printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx"
-			"detected on the internal queue, issue again.\n",
-			cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number);
-
-			atomic_inc(&instance->fw_outstanding);
-			instance->instancet->fire_cmd(instance,
-					cmd->frame_phys_addr,
-					cmd->frame_count-1, instance->reg_set);
-		} else {
-			printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
-				"internal reset defer list while re-issue!!\n",
-				cmd);
-		}
-	}
-
-	if (instance->aen_cmd) {
-		printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
-		megasas_return_cmd(instance, instance->aen_cmd);
-
-		instance->aen_cmd	= NULL;
-	}
-
-	/*
-	* Initiate AEN (Asynchronous Event Notification)
-	*/
-	seq_num = instance->last_seq_num;
-	class_locale.members.reserved = 0;
-	class_locale.members.locale = MR_EVT_LOCALE_ALL;
-	class_locale.members.class = MR_EVT_CLASS_DEBUG;
-
-	megasas_register_aen(instance, seq_num, class_locale.word);
-}
-
-/**
- * Move the internal reset pending commands to a deferred queue.
- *
- * We move the commands pending at internal reset time to a
- * pending queue. This queue would be flushed after successful
- * completion of the internal reset sequence. if the internal reset
- * did not complete in time, the kernel reset handler would flush
- * these commands.
- **/
-static void
-megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
-{
-	struct megasas_cmd *cmd;
-	int i;
-	u32 max_cmd = instance->max_fw_cmds;
-	u32 defer_index;
-	unsigned long flags;
-
-	defer_index     = 0;
-	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
-	for (i = 0; i < max_cmd; i++) {
-		cmd = instance->cmd_list[i];
-		if (cmd->sync_cmd == 1 || cmd->scmd) {
-			printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
-					"on the defer queue as internal\n",
-				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
-
-			if (!list_empty(&cmd->list)) {
-				printk(KERN_NOTICE "megaraid_sas: ERROR while"
-					" moving this cmd:%p, %d %p, it was"
-					"discovered on some list?\n",
-					cmd, cmd->sync_cmd, cmd->scmd);
-
-				list_del_init(&cmd->list);
-			}
-			defer_index++;
-			list_add_tail(&cmd->list,
-				&instance->internal_reset_pending_q);
-		}
-	}
-	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
-}
-
-
-static void
-process_fw_state_change_wq(struct work_struct *work)
-{
-	struct megasas_instance *instance =
-		container_of(work, struct megasas_instance, work_init);
-	u32 wait;
-	unsigned long flags;
-
-	if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
-		printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
-				instance->adprecovery);
-		return ;
-	}
-
-	if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
-		printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
-					"state, restarting it...\n");
-
-		instance->instancet->disable_intr(instance->reg_set);
-		atomic_set(&instance->fw_outstanding, 0);
-
-		atomic_set(&instance->fw_reset_no_pci_access, 1);
-		instance->instancet->adp_reset(instance, instance->reg_set);
-		atomic_set(&instance->fw_reset_no_pci_access, 0 );
-
-		printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
-					"initiating next stage...\n");
-
-		printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
-					"state 2 starting...\n");
-
-		/*waitting for about 20 second before start the second init*/
-		for (wait = 0; wait < 30; wait++) {
-			msleep(1000);
-		}
-
-		if (megasas_transition_to_ready(instance)) {
-			printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
-
-			megaraid_sas_kill_hba(instance);
-			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
-			return ;
-		}
-
-		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
-			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
-			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
-			) {
-			*instance->consumer = *instance->producer;
-		} else {
-			*instance->consumer = 0;
-			*instance->producer = 0;
-		}
-
-		megasas_issue_init_mfi(instance);
-
-		spin_lock_irqsave(&instance->hba_lock, flags);
-		instance->adprecovery	= MEGASAS_HBA_OPERATIONAL;
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		instance->instancet->enable_intr(instance->reg_set);
-
-		megasas_issue_pending_cmds_again(instance);
-		instance->issuepend_done = 1;
-	}
-	return ;
-}
-
-/**
- * megasas_deplete_reply_queue -	Processes all completed commands
- * @instance:				Adapter soft state
- * @alt_status:				Alternate status to be returned to
- * 					SCSI mid-layer instead of the status
- * 					returned by the FW
- * Note: this must be called with hba lock held
- */
-static int
-megasas_deplete_reply_queue(struct megasas_instance *instance,
-					u8 alt_status)
-{
-	u32 mfiStatus;
-	u32 fw_state;
-
-	if ((mfiStatus = instance->instancet->check_reset(instance,
-					instance->reg_set)) == 1) {
-		return IRQ_HANDLED;
-	}
-
-	if ((mfiStatus = instance->instancet->clear_intr(
-						instance->reg_set)
-						) == 0) {
-		return IRQ_NONE;
-	}
-
-	instance->mfiStatus = mfiStatus;
-
-	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
-		fw_state = instance->instancet->read_fw_status_reg(
-				instance->reg_set) & MFI_STATE_MASK;
-
-		if (fw_state != MFI_STATE_FAULT) {
-			printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
-						fw_state);
-		}
-
-		if ((fw_state == MFI_STATE_FAULT) &&
-				(instance->disableOnlineCtrlReset == 0)) {
-			printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
-
-			if ((instance->pdev->device ==
-					PCI_DEVICE_ID_LSI_SAS1064R) ||
-				(instance->pdev->device ==
-					PCI_DEVICE_ID_DELL_PERC5) ||
-				(instance->pdev->device ==
-					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
-
-				*instance->consumer =
-					MEGASAS_ADPRESET_INPROG_SIGN;
-			}
-
-
-			instance->instancet->disable_intr(instance->reg_set);
-			instance->adprecovery	= MEGASAS_ADPRESET_SM_INFAULT;
-			instance->issuepend_done = 0;
-
-			atomic_set(&instance->fw_outstanding, 0);
-			megasas_internal_reset_defer_cmds(instance);
-
-			printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
-					fw_state, instance->adprecovery);
-
-			schedule_work(&instance->work_init);
-			return IRQ_HANDLED;
-
-		} else {
-			printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
-				fw_state, instance->disableOnlineCtrlReset);
-		}
-	}
-
-	tasklet_schedule(&instance->isr_tasklet);
-	return IRQ_HANDLED;
-}
-/**
- * megasas_isr - isr entry point
- */
-static irqreturn_t megasas_isr(int irq, void *devp)
-{
-	struct megasas_instance *instance;
-	unsigned long flags;
-	irqreturn_t	rc;
-
-	if (atomic_read(
-		&(((struct megasas_instance *)devp)->fw_reset_no_pci_access)))
-		return IRQ_HANDLED;
-
-	instance = (struct megasas_instance *)devp;
-
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	rc =  megasas_deplete_reply_queue(instance, DID_OK);
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-	return rc;
-}
-
-/**
- * megasas_transition_to_ready -	Move the FW to READY state
- * @instance:				Adapter soft state
- *
- * During the initialization, FW passes can potentially be in any one of
- * several possible states. If the FW in operational, waiting-for-handshake
- * states, driver must take steps to bring it to ready state. Otherwise, it
- * has to wait for the ready state.
- */
-static int
-megasas_transition_to_ready(struct megasas_instance* instance)
-{
-	int i;
-	u8 max_wait;
-	u32 fw_state;
-	u32 cur_state;
-	u32 abs_state, curr_abs_state;
-
-	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
-
-	if (fw_state != MFI_STATE_READY)
- 		printk(KERN_INFO "megasas: Waiting for FW to come to ready"
- 		       " state\n");
-
-	while (fw_state != MFI_STATE_READY) {
-
-		abs_state =
-		instance->instancet->read_fw_status_reg(instance->reg_set);
-
-		switch (fw_state) {
-
-		case MFI_STATE_FAULT:
-
-			printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
-			return -ENODEV;
-
-		case MFI_STATE_WAIT_HANDSHAKE:
-			/*
-			 * Set the CLR bit in inbound doorbell
-			 */
-			if ((instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-				(instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-
-				writel(
-				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
-				  &instance->reg_set->reserved_0[0]);
-			} else {
-				writel(
-				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
-					&instance->reg_set->inbound_doorbell);
-			}
-
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_WAIT_HANDSHAKE;
-			break;
-
-		case MFI_STATE_BOOT_MESSAGE_PENDING:
-			if ((instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-			(instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-				writel(MFI_INIT_HOTPLUG,
-				&instance->reg_set->reserved_0[0]);
-			} else
-				writel(MFI_INIT_HOTPLUG,
-					&instance->reg_set->inbound_doorbell);
-
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
-			break;
-
-		case MFI_STATE_OPERATIONAL:
-			/*
-			 * Bring it to READY state; assuming max wait 10 secs
-			 */
-			instance->instancet->disable_intr(instance->reg_set);
-			if ((instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-				(instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-				writel(MFI_RESET_FLAGS,
-					&instance->reg_set->reserved_0[0]);
-			} else
-				writel(MFI_RESET_FLAGS,
-					&instance->reg_set->inbound_doorbell);
-
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_OPERATIONAL;
-			break;
-
-		case MFI_STATE_UNDEFINED:
-			/*
-			 * This state should not last for more than 2 seconds
-			 */
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_UNDEFINED;
-			break;
-
-		case MFI_STATE_BB_INIT:
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_BB_INIT;
-			break;
-
-		case MFI_STATE_FW_INIT:
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_FW_INIT;
-			break;
-
-		case MFI_STATE_FW_INIT_2:
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_FW_INIT_2;
-			break;
-
-		case MFI_STATE_DEVICE_SCAN:
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_DEVICE_SCAN;
-			break;
-
-		case MFI_STATE_FLUSH_CACHE:
-			max_wait = MEGASAS_RESET_WAIT_TIME;
-			cur_state = MFI_STATE_FLUSH_CACHE;
-			break;
-
-		default:
-			printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
-			       fw_state);
-			return -ENODEV;
-		}
-
-		/*
-		 * The cur_state should not last for more than max_wait secs
-		 */
-		for (i = 0; i < (max_wait * 1000); i++) {
-			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &  
-					MFI_STATE_MASK ;
-		curr_abs_state =
-		instance->instancet->read_fw_status_reg(instance->reg_set);
-
-			if (abs_state == curr_abs_state) {
-				msleep(1);
-			} else
-				break;
-		}
-
-		/*
-		 * Return error if fw_state hasn't changed after max_wait
-		 */
-		if (curr_abs_state == abs_state) {
-			printk(KERN_DEBUG "FW state [%d] hasn't changed "
-			       "in %d secs\n", fw_state, max_wait);
-			return -ENODEV;
-		}
-	}
- 	printk(KERN_INFO "megasas: FW now in Ready state\n");
-
-	return 0;
-}
-
-/**
- * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
- * @instance:				Adapter soft state
- */
-static void megasas_teardown_frame_pool(struct megasas_instance *instance)
-{
-	int i;
-	u32 max_cmd = instance->max_fw_cmds;
-	struct megasas_cmd *cmd;
-
-	if (!instance->frame_dma_pool)
-		return;
-
-	/*
-	 * Return all frames to pool
-	 */
-	for (i = 0; i < max_cmd; i++) {
-
-		cmd = instance->cmd_list[i];
-
-		if (cmd->frame)
-			pci_pool_free(instance->frame_dma_pool, cmd->frame,
-				      cmd->frame_phys_addr);
-
-		if (cmd->sense)
-			pci_pool_free(instance->sense_dma_pool, cmd->sense,
-				      cmd->sense_phys_addr);
-	}
-
-	/*
-	 * Now destroy the pool itself
-	 */
-	pci_pool_destroy(instance->frame_dma_pool);
-	pci_pool_destroy(instance->sense_dma_pool);
-
-	instance->frame_dma_pool = NULL;
-	instance->sense_dma_pool = NULL;
-}
-
-/**
- * megasas_create_frame_pool -	Creates DMA pool for cmd frames
- * @instance:			Adapter soft state
- *
- * Each command packet has an embedded DMA memory buffer that is used for
- * filling MFI frame and the SG list that immediately follows the frame. This
- * function creates those DMA memory buffers for each command packet by using
- * PCI pool facility.
- */
-static int megasas_create_frame_pool(struct megasas_instance *instance)
-{
-	int i;
-	u32 max_cmd;
-	u32 sge_sz;
-	u32 sgl_sz;
-	u32 total_sz;
-	u32 frame_count;
-	struct megasas_cmd *cmd;
-
-	max_cmd = instance->max_fw_cmds;
-
-	/*
-	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
-	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
-	 */
-	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
-	    sizeof(struct megasas_sge32);
-
-	if (instance->flag_ieee) {
-		sge_sz = sizeof(struct megasas_sge_skinny);
-	}
-
-	/*
-	 * Calculated the number of 64byte frames required for SGL
-	 */
-	sgl_sz = sge_sz * instance->max_num_sge;
-	frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
-	frame_count = 15;
-
-	/*
-	 * We need one extra frame for the MFI command
-	 */
-	frame_count++;
-
-	total_sz = MEGAMFI_FRAME_SIZE * frame_count;
-	/*
-	 * Use DMA pool facility provided by PCI layer
-	 */
-	instance->frame_dma_pool = pci_pool_create("megasas frame pool",
-						   instance->pdev, total_sz, 64,
-						   0);
-
-	if (!instance->frame_dma_pool) {
-		printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
-		return -ENOMEM;
-	}
-
-	instance->sense_dma_pool = pci_pool_create("megasas sense pool",
-						   instance->pdev, 128, 4, 0);
-
-	if (!instance->sense_dma_pool) {
-		printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
-
-		pci_pool_destroy(instance->frame_dma_pool);
-		instance->frame_dma_pool = NULL;
-
-		return -ENOMEM;
-	}
-
-	/*
-	 * Allocate and attach a frame to each of the commands in cmd_list.
-	 * By making cmd->index as the context instead of the &cmd, we can
-	 * always use 32bit context regardless of the architecture
-	 */
-	for (i = 0; i < max_cmd; i++) {
-
-		cmd = instance->cmd_list[i];
-
-		cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
-					    GFP_KERNEL, &cmd->frame_phys_addr);
-
-		cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
-					    GFP_KERNEL, &cmd->sense_phys_addr);
-
-		/*
-		 * megasas_teardown_frame_pool() takes care of freeing
-		 * whatever has been allocated
-		 */
-		if (!cmd->frame || !cmd->sense) {
-			printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
-			megasas_teardown_frame_pool(instance);
-			return -ENOMEM;
-		}
-
-		memset(cmd->frame, 0, total_sz);
-		cmd->frame->io.context = cmd->index;
-		cmd->frame->io.pad_0 = 0;
-	}
-
-	return 0;
-}
-
-/**
- * megasas_free_cmds -	Free all the cmds in the free cmd pool
- * @instance:		Adapter soft state
- */
-static void megasas_free_cmds(struct megasas_instance *instance)
-{
-	int i;
-	/* First free the MFI frame pool */
-	megasas_teardown_frame_pool(instance);
-
-	/* Free all the commands in the cmd_list */
-	for (i = 0; i < instance->max_fw_cmds; i++)
-		kfree(instance->cmd_list[i]);
-
-	/* Free the cmd_list buffer itself */
-	kfree(instance->cmd_list);
-	instance->cmd_list = NULL;
-
-	INIT_LIST_HEAD(&instance->cmd_pool);
-}
-
-/**
- * megasas_alloc_cmds -	Allocates the command packets
- * @instance:		Adapter soft state
- *
- * Each command that is issued to the FW, whether IO commands from the OS or
- * internal commands like IOCTLs, are wrapped in local data structure called
- * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
- * the FW.
- *
- * Each frame has a 32-bit field called context (tag). This context is used
- * to get back the megasas_cmd from the frame when a frame gets completed in
- * the ISR. Typically the address of the megasas_cmd itself would be used as
- * the context. But we wanted to keep the differences between 32 and 64 bit
- * systems to the mininum. We always use 32 bit integers for the context. In
- * this driver, the 32 bit values are the indices into an array cmd_list.
- * This array is used only to look up the megasas_cmd given the context. The
- * free commands themselves are maintained in a linked list called cmd_pool.
- */
-static int megasas_alloc_cmds(struct megasas_instance *instance)
-{
-	int i;
-	int j;
-	u32 max_cmd;
-	struct megasas_cmd *cmd;
-
-	max_cmd = instance->max_fw_cmds;
-
-	/*
-	 * instance->cmd_list is an array of struct megasas_cmd pointers.
-	 * Allocate the dynamic array first and then allocate individual
-	 * commands.
-	 */
-	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
-
-	if (!instance->cmd_list) {
-		printk(KERN_DEBUG "megasas: out of memory\n");
-		return -ENOMEM;
-	}
-
-
-	for (i = 0; i < max_cmd; i++) {
-		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
-						GFP_KERNEL);
-
-		if (!instance->cmd_list[i]) {
-
-			for (j = 0; j < i; j++)
-				kfree(instance->cmd_list[j]);
-
-			kfree(instance->cmd_list);
-			instance->cmd_list = NULL;
-
-			return -ENOMEM;
-		}
-	}
-
-	/*
-	 * Add all the commands to command pool (instance->cmd_pool)
-	 */
-	for (i = 0; i < max_cmd; i++) {
-		cmd = instance->cmd_list[i];
-		memset(cmd, 0, sizeof(struct megasas_cmd));
-		cmd->index = i;
-		cmd->scmd = NULL;
-		cmd->instance = instance;
-
-		list_add_tail(&cmd->list, &instance->cmd_pool);
-	}
-
-	/*
-	 * Create a frame pool and assign one frame to each cmd
-	 */
-	if (megasas_create_frame_pool(instance)) {
-		printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
-		megasas_free_cmds(instance);
-	}
-
-	return 0;
-}
-
-/*
- * megasas_get_pd_list_info -	Returns FW's pd_list structure
- * @instance:				Adapter soft state
- * @pd_list:				pd_list structure
- *
- * Issues an internal command (DCMD) to get the FW's controller PD
- * list structure.  This information is mainly used to find out SYSTEM
- * supported by the FW.
- */
-static int
-megasas_get_pd_list(struct megasas_instance *instance)
-{
-	int ret = 0, pd_index = 0;
-	struct megasas_cmd *cmd;
-	struct megasas_dcmd_frame *dcmd;
-	struct MR_PD_LIST *ci;
-	struct MR_PD_ADDRESS *pd_addr;
-	dma_addr_t ci_h = 0;
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd) {
-		printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
-		return -ENOMEM;
-	}
-
-	dcmd = &cmd->frame->dcmd;
-
-	ci = pci_alloc_consistent(instance->pdev,
-		  MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
-
-	if (!ci) {
-		printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
-		megasas_return_cmd(instance, cmd);
-		return -ENOMEM;
-	}
-
-	memset(ci, 0, sizeof(*ci));
-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
-	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
-	dcmd->mbox.b[1] = 0;
-	dcmd->cmd = MFI_CMD_DCMD;
-	dcmd->cmd_status = 0xFF;
-	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
-	dcmd->timeout = 0;
-	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
-	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
-
-	if (!megasas_issue_polled(instance, cmd)) {
-		ret = 0;
-	} else {
-		ret = -1;
-	}
-
-	/*
-	* the following function will get the instance PD LIST.
-	*/
-
-	pd_addr = ci->addr;
-
-	if ( ret == 0 &&
-		(ci->count <
-		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
-
-		memset(instance->pd_list, 0,
-			MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
-
-		for (pd_index = 0; pd_index < ci->count; pd_index++) {
-
-			instance->pd_list[pd_addr->deviceId].tid	=
-							pd_addr->deviceId;
-			instance->pd_list[pd_addr->deviceId].driveType	=
-							pd_addr->scsiDevType;
-			instance->pd_list[pd_addr->deviceId].driveState	=
-							MR_PD_STATE_SYSTEM;
-			pd_addr++;
-		}
-	}
-
-	pci_free_consistent(instance->pdev,
-				MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
-				ci, ci_h);
-	megasas_return_cmd(instance, cmd);
-
-	return ret;
-}
-
-/*
- * megasas_get_ld_list_info -	Returns FW's ld_list structure
- * @instance:				Adapter soft state
- * @ld_list:				ld_list structure
- *
- * Issues an internal command (DCMD) to get the FW's controller PD
- * list structure.  This information is mainly used to find out SYSTEM
- * supported by the FW.
- */
-static int
-megasas_get_ld_list(struct megasas_instance *instance)
-{
-	int ret = 0, ld_index = 0, ids = 0;
-	struct megasas_cmd *cmd;
-	struct megasas_dcmd_frame *dcmd;
-	struct MR_LD_LIST *ci;
-	dma_addr_t ci_h = 0;
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd) {
-		printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
-		return -ENOMEM;
-	}
-
-	dcmd = &cmd->frame->dcmd;
-
-	ci = pci_alloc_consistent(instance->pdev,
-				sizeof(struct MR_LD_LIST),
-				&ci_h);
-
-	if (!ci) {
-		printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
-		megasas_return_cmd(instance, cmd);
-		return -ENOMEM;
-	}
-
-	memset(ci, 0, sizeof(*ci));
-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
-	dcmd->cmd = MFI_CMD_DCMD;
-	dcmd->cmd_status = 0xFF;
-	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
-	dcmd->timeout = 0;
-	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
-	dcmd->opcode = MR_DCMD_LD_GET_LIST;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
-	dcmd->pad_0  = 0;
-
-	if (!megasas_issue_polled(instance, cmd)) {
-		ret = 0;
-	} else {
-		ret = -1;
-	}
-
-	/* the following function will get the instance PD LIST */
-
-	if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
-		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-
-		for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
-			if (ci->ldList[ld_index].state != 0) {
-				ids = ci->ldList[ld_index].ref.targetId;
-				instance->ld_ids[ids] =
-					ci->ldList[ld_index].ref.targetId;
-			}
-		}
-	}
-
-	pci_free_consistent(instance->pdev,
-				sizeof(struct MR_LD_LIST),
-				ci,
-				ci_h);
-
-	megasas_return_cmd(instance, cmd);
-	return ret;
-}
-
-/**
- * megasas_get_controller_info -	Returns FW's controller structure
- * @instance:				Adapter soft state
- * @ctrl_info:				Controller information structure
- *
- * Issues an internal command (DCMD) to get the FW's controller structure.
- * This information is mainly used to find out the maximum IO transfer per
- * command supported by the FW.
- */
-static int
-megasas_get_ctrl_info(struct megasas_instance *instance,
-		      struct megasas_ctrl_info *ctrl_info)
-{
-	int ret = 0;
-	struct megasas_cmd *cmd;
-	struct megasas_dcmd_frame *dcmd;
-	struct megasas_ctrl_info *ci;
-	dma_addr_t ci_h = 0;
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd) {
-		printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
-		return -ENOMEM;
-	}
-
-	dcmd = &cmd->frame->dcmd;
-
-	ci = pci_alloc_consistent(instance->pdev,
-				  sizeof(struct megasas_ctrl_info), &ci_h);
-
-	if (!ci) {
-		printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
-		megasas_return_cmd(instance, cmd);
-		return -ENOMEM;
-	}
-
-	memset(ci, 0, sizeof(*ci));
-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
-	dcmd->cmd = MFI_CMD_DCMD;
-	dcmd->cmd_status = 0xFF;
-	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
-	dcmd->timeout = 0;
-	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
-	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
-	dcmd->sgl.sge32[0].phys_addr = ci_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
-
-	if (!megasas_issue_polled(instance, cmd)) {
-		ret = 0;
-		memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
-	} else {
-		ret = -1;
-	}
-
-	pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
-			    ci, ci_h);
-
-	megasas_return_cmd(instance, cmd);
-	return ret;
-}
-
-/**
- * megasas_issue_init_mfi -	Initializes the FW
- * @instance:		Adapter soft state
- *
- * Issues the INIT MFI cmd
- */
-static int
-megasas_issue_init_mfi(struct megasas_instance *instance)
-{
-	u32 context;
-
-	struct megasas_cmd *cmd;
-
-	struct megasas_init_frame *init_frame;
-	struct megasas_init_queue_info *initq_info;
-	dma_addr_t init_frame_h;
-	dma_addr_t initq_info_h;
-
-	/*
-	 * Prepare a init frame. Note the init frame points to queue info
-	 * structure. Each frame has SGL allocated after first 64 bytes. For
-	 * this frame - since we don't need any SGL - we use SGL's space as
-	 * queue info structure
-	 *
-	 * We will not get a NULL command below. We just created the pool.
-	 */
-	cmd = megasas_get_cmd(instance);
-
-	init_frame = (struct megasas_init_frame *)cmd->frame;
-	initq_info = (struct megasas_init_queue_info *)
-		((unsigned long)init_frame + 64);
-
-	init_frame_h = cmd->frame_phys_addr;
-	initq_info_h = init_frame_h + 64;
-
-	context = init_frame->context;
-	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
-	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
-	init_frame->context = context;
-
-	initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
-	initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
-
-	initq_info->producer_index_phys_addr_lo = instance->producer_h;
-	initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
-
-	init_frame->cmd = MFI_CMD_INIT;
-	init_frame->cmd_status = 0xFF;
-	init_frame->queue_info_new_phys_addr_lo = initq_info_h;
-
-	init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
-
-	/*
-	 * disable the intr before firing the init frame to FW
-	 */
-	instance->instancet->disable_intr(instance->reg_set);
-
-	/*
-	 * Issue the init frame in polled mode
-	 */
-
-	if (megasas_issue_polled(instance, cmd)) {
-		printk(KERN_ERR "megasas: Failed to init firmware\n");
-		megasas_return_cmd(instance, cmd);
-		goto fail_fw_init;
-	}
-
-	megasas_return_cmd(instance, cmd);
-
-	return 0;
-
-fail_fw_init:
-	return -EINVAL;
-}
-
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance:		Adapter soft state
- * @timer:		timer object to be initialized
- * @fn:			timer function
- * @interval:		time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
-			struct timer_list *timer,
-			void *fn, unsigned long interval)
-{
-	init_timer(timer);
-	timer->expires = jiffies + interval;
-	timer->data = (unsigned long)instance;
-	timer->function = fn;
-	add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr:	Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
-	struct megasas_instance *instance =
-			(struct megasas_instance *)instance_addr;
-
-	if (atomic_read(&instance->fw_outstanding))
-		tasklet_schedule(&instance->isr_tasklet);
-
-	/* Restart timer */
-	if (poll_mode_io)
-		mod_timer(&instance->io_completion_timer,
-			jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
-/**
- * megasas_init_mfi -	Initializes the FW
- * @instance:		Adapter soft state
- *
- * This is the main function for initializing MFI firmware.
- */
-static int megasas_init_mfi(struct megasas_instance *instance)
-{
-	u32 context_sz;
-	u32 reply_q_sz;
-	u32 max_sectors_1;
-	u32 max_sectors_2;
-	u32 tmp_sectors;
-	struct megasas_register_set __iomem *reg_set;
-	struct megasas_ctrl_info *ctrl_info;
-	/*
-	 * Map the message registers
-	 */
-	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
-		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
-		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) {
-		instance->base_addr = pci_resource_start(instance->pdev, 1);
-	} else {
-		instance->base_addr = pci_resource_start(instance->pdev, 0);
-	}
-
-	if (pci_request_selected_regions(instance->pdev,
-		pci_select_bars(instance->pdev, IORESOURCE_MEM),
-		"megasas: LSI")) {
-		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
-		return -EBUSY;
-	}
-
-	instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
-
-	if (!instance->reg_set) {
-		printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
-		goto fail_ioremap;
-	}
-
-	reg_set = instance->reg_set;
-
-	switch(instance->pdev->device)
-	{
-		case PCI_DEVICE_ID_LSI_SAS1078R:
-		case PCI_DEVICE_ID_LSI_SAS1078DE:
-			instance->instancet = &megasas_instance_template_ppc;
-			break;
-		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
-		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
-			instance->instancet = &megasas_instance_template_gen2;
-			break;
-		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
-		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
-			instance->instancet = &megasas_instance_template_skinny;
-			break;
-		case PCI_DEVICE_ID_LSI_SAS1064R:
-		case PCI_DEVICE_ID_DELL_PERC5:
-		default:
-			instance->instancet = &megasas_instance_template_xscale;
-			break;
-	}
-
-	/*
-	 * We expect the FW state to be READY
-	 */
-	if (megasas_transition_to_ready(instance))
-		goto fail_ready_state;
-
-	/*
-	 * Get various operational parameters from status register
-	 */
-	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
-	/*
-	 * Reduce the max supported cmds by 1. This is to ensure that the
-	 * reply_q_sz (1 more than the max cmd that driver may send)
-	 * does not exceed max cmds that the FW can support
-	 */
-	instance->max_fw_cmds = instance->max_fw_cmds-1;
-	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 
-					0x10;
-	/*
-	 * Create a pool of commands
-	 */
-	if (megasas_alloc_cmds(instance))
-		goto fail_alloc_cmds;
-
-	/*
-	 * Allocate memory for reply queue. Length of reply queue should
-	 * be _one_ more than the maximum commands handled by the firmware.
-	 *
-	 * Note: When FW completes commands, it places corresponding contex
-	 * values in this circular reply queue. This circular queue is a fairly
-	 * typical producer-consumer queue. FW is the producer (of completed
-	 * commands) and the driver is the consumer.
-	 */
-	context_sz = sizeof(u32);
-	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
-
-	instance->reply_queue = pci_alloc_consistent(instance->pdev,
-						     reply_q_sz,
-						     &instance->reply_queue_h);
-
-	if (!instance->reply_queue) {
-		printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
-		goto fail_reply_queue;
-	}
-
-	if (megasas_issue_init_mfi(instance))
-		goto fail_fw_init;
-
-	instance->fw_support_ieee = 0;
-	instance->fw_support_ieee =
-		(instance->instancet->read_fw_status_reg(reg_set) &
-		0x04000000);
-
-	printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
-			instance->fw_support_ieee);
-
-	if (instance->fw_support_ieee)
-		instance->flag_ieee = 1;
-
-	/** for passthrough
-	* the following function will get the PD LIST.
-	*/
-
-	memset(instance->pd_list, 0 ,
-		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
-	megasas_get_pd_list(instance);
-
-	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-	megasas_get_ld_list(instance);
-
-	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
-
-	/*
-	 * Compute the max allowed sectors per IO: The controller info has two
-	 * limits on max sectors. Driver should use the minimum of these two.
-	 *
-	 * 1 << stripe_sz_ops.min = max sectors per strip
-	 *
-	 * Note that older firmwares ( < FW ver 30) didn't report information
-	 * to calculate max_sectors_1. So the number ended up as zero always.
-	 */
-	tmp_sectors = 0;
-	if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
-
-		max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
-		    ctrl_info->max_strips_per_io;
-		max_sectors_2 = ctrl_info->max_request_size;
-
-		tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
-		instance->disableOnlineCtrlReset =
-		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
-	}
-
-	instance->max_sectors_per_req = instance->max_num_sge *
-						PAGE_SIZE / 512;
-	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
-		instance->max_sectors_per_req = tmp_sectors;
-
-	kfree(ctrl_info);
-
-        /*
-	* Setup tasklet for cmd completion
-	*/
-
-	tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
-		(unsigned long)instance);
-
-	/* Initialize the cmd completion timer */
-	if (poll_mode_io)
-		megasas_start_timer(instance, &instance->io_completion_timer,
-				megasas_io_completion_timer,
-				MEGASAS_COMPLETION_TIMER_INTERVAL);
-	return 0;
-
-      fail_fw_init:
-
-	pci_free_consistent(instance->pdev, reply_q_sz,
-			    instance->reply_queue, instance->reply_queue_h);
-      fail_reply_queue:
-	megasas_free_cmds(instance);
-
-      fail_alloc_cmds:
-      fail_ready_state:
-	iounmap(instance->reg_set);
-
-      fail_ioremap:
-	pci_release_selected_regions(instance->pdev,
-		pci_select_bars(instance->pdev, IORESOURCE_MEM));
-
-	return -EINVAL;
-}
-
-/**
- * megasas_release_mfi -	Reverses the FW initialization
- * @intance:			Adapter soft state
- */
-static void megasas_release_mfi(struct megasas_instance *instance)
-{
-	u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1);
-
-	pci_free_consistent(instance->pdev, reply_q_sz,
-			    instance->reply_queue, instance->reply_queue_h);
-
-	megasas_free_cmds(instance);
-
-	iounmap(instance->reg_set);
-
-	pci_release_selected_regions(instance->pdev,
-		pci_select_bars(instance->pdev, IORESOURCE_MEM));
-}
-
-/**
- * megasas_get_seq_num -	Gets latest event sequence numbers
- * @instance:			Adapter soft state
- * @eli:			FW event log sequence numbers information
- *
- * FW maintains a log of all events in a non-volatile area. Upper layers would
- * usually find out the latest sequence number of the events, the seq number at
- * the boot etc. They would "read" all the events below the latest seq number
- * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
- * number), they would subsribe to AEN (asynchronous event notification) and
- * wait for the events to happen.
- */
-static int
-megasas_get_seq_num(struct megasas_instance *instance,
-		    struct megasas_evt_log_info *eli)
-{
-	struct megasas_cmd *cmd;
-	struct megasas_dcmd_frame *dcmd;
-	struct megasas_evt_log_info *el_info;
-	dma_addr_t el_info_h = 0;
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd) {
-		return -ENOMEM;
-	}
-
-	dcmd = &cmd->frame->dcmd;
-	el_info = pci_alloc_consistent(instance->pdev,
-				       sizeof(struct megasas_evt_log_info),
-				       &el_info_h);
-
-	if (!el_info) {
-		megasas_return_cmd(instance, cmd);
-		return -ENOMEM;
-	}
-
-	memset(el_info, 0, sizeof(*el_info));
-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
-	dcmd->cmd = MFI_CMD_DCMD;
-	dcmd->cmd_status = 0x0;
-	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
-	dcmd->timeout = 0;
-	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
-	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
-	dcmd->sgl.sge32[0].phys_addr = el_info_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
-
-	megasas_issue_blocked_cmd(instance, cmd);
-
-	/*
-	 * Copy the data back into callers buffer
-	 */
-	memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
-
-	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
-			    el_info, el_info_h);
-
-	megasas_return_cmd(instance, cmd);
-
-	return 0;
-}
-
-/**
- * megasas_register_aen -	Registers for asynchronous event notification
- * @instance:			Adapter soft state
- * @seq_num:			The starting sequence number
- * @class_locale:		Class of the event
- *
- * This function subscribes for AEN for events beyond the @seq_num. It requests
- * to be notified if and only if the event is of type @class_locale
- */
-static int
-megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
-		     u32 class_locale_word)
-{
-	int ret_val;
-	struct megasas_cmd *cmd;
-	struct megasas_dcmd_frame *dcmd;
-	union megasas_evt_class_locale curr_aen;
-	union megasas_evt_class_locale prev_aen;
-
-	/*
-	 * If there an AEN pending already (aen_cmd), check if the
-	 * class_locale of that pending AEN is inclusive of the new
-	 * AEN request we currently have. If it is, then we don't have
-	 * to do anything. In other words, whichever events the current
-	 * AEN request is subscribing to, have already been subscribed
-	 * to.
-	 *
-	 * If the old_cmd is _not_ inclusive, then we have to abort
-	 * that command, form a class_locale that is superset of both
-	 * old and current and re-issue to the FW
-	 */
-
-	curr_aen.word = class_locale_word;
-
-	if (instance->aen_cmd) {
-
-		prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
-
-		/*
-		 * A class whose enum value is smaller is inclusive of all
-		 * higher values. If a PROGRESS (= -1) was previously
-		 * registered, then a new registration requests for higher
-		 * classes need not be sent to FW. They are automatically
-		 * included.
-		 *
-		 * Locale numbers don't have such hierarchy. They are bitmap
-		 * values
-		 */
-		if ((prev_aen.members.class <= curr_aen.members.class) &&
-		    !((prev_aen.members.locale & curr_aen.members.locale) ^
-		      curr_aen.members.locale)) {
-			/*
-			 * Previously issued event registration includes
-			 * current request. Nothing to do.
-			 */
-			return 0;
-		} else {
-			curr_aen.members.locale |= prev_aen.members.locale;
-
-			if (prev_aen.members.class < curr_aen.members.class)
-				curr_aen.members.class = prev_aen.members.class;
-
-			instance->aen_cmd->abort_aen = 1;
-			ret_val = megasas_issue_blocked_abort_cmd(instance,
-								  instance->
-								  aen_cmd);
-
-			if (ret_val) {
-				printk(KERN_DEBUG "megasas: Failed to abort "
-				       "previous AEN command\n");
-				return ret_val;
-			}
-		}
-	}
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd)
-		return -ENOMEM;
-
-	dcmd = &cmd->frame->dcmd;
-
-	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
-
-	/*
-	 * Prepare DCMD for aen registration
-	 */
-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
-	dcmd->cmd = MFI_CMD_DCMD;
-	dcmd->cmd_status = 0x0;
-	dcmd->sge_count = 1;
-	dcmd->flags = MFI_FRAME_DIR_READ;
-	dcmd->timeout = 0;
-	dcmd->pad_0 = 0;
-	instance->last_seq_num = seq_num;
-	dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
-	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
-	dcmd->mbox.w[0] = seq_num;
-	dcmd->mbox.w[1] = curr_aen.word;
-	dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
-	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
-
-	if (instance->aen_cmd != NULL) {
-		megasas_return_cmd(instance, cmd);
-		return 0;
-	}
-
-	/*
-	 * Store reference to the cmd used to register for AEN. When an
-	 * application wants us to register for AEN, we have to abort this
-	 * cmd and re-register with a new EVENT LOCALE supplied by that app
-	 */
-	instance->aen_cmd = cmd;
-
-	/*
-	 * Issue the aen registration frame
-	 */
-	instance->instancet->fire_cmd(instance,
-			cmd->frame_phys_addr, 0, instance->reg_set);
-
-	return 0;
-}
-
-/**
- * megasas_start_aen -	Subscribes to AEN during driver load time
- * @instance:		Adapter soft state
- */
-static int megasas_start_aen(struct megasas_instance *instance)
-{
-	struct megasas_evt_log_info eli;
-	union megasas_evt_class_locale class_locale;
-
-	/*
-	 * Get the latest sequence number from FW
-	 */
-	memset(&eli, 0, sizeof(eli));
-
-	if (megasas_get_seq_num(instance, &eli))
-		return -1;
-
-	/*
-	 * Register AEN with FW for latest sequence number plus 1
-	 */
-	class_locale.members.reserved = 0;
-	class_locale.members.locale = MR_EVT_LOCALE_ALL;
-	class_locale.members.class = MR_EVT_CLASS_DEBUG;
-
-	return megasas_register_aen(instance, eli.newest_seq_num + 1,
-				    class_locale.word);
-}
-
-/**
- * megasas_io_attach -	Attaches this driver to SCSI mid-layer
- * @instance:		Adapter soft state
- */
-static int megasas_io_attach(struct megasas_instance *instance)
-{
-	struct Scsi_Host *host = instance->host;
-
-	/*
-	 * Export parameters required by SCSI mid-layer
-	 */
-	host->irq = instance->pdev->irq;
-	host->unique_id = instance->unique_id;
-	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-		host->can_queue =
-			instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
-	} else
-		host->can_queue =
-			instance->max_fw_cmds - MEGASAS_INT_CMDS;
-	host->this_id = instance->init_id;
-	host->sg_tablesize = instance->max_num_sge;
-	/*
-	 * Check if the module parameter value for max_sectors can be used
-	 */
-	if (max_sectors && max_sectors < instance->max_sectors_per_req)
-		instance->max_sectors_per_req = max_sectors;
-	else {
-		if (max_sectors) {
-			if (((instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
-				(instance->pdev->device ==
-				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
-				(max_sectors <= MEGASAS_MAX_SECTORS)) {
-				instance->max_sectors_per_req = max_sectors;
-			} else {
-			printk(KERN_INFO "megasas: max_sectors should be > 0"
-				"and <= %d (or < 1MB for GEN2 controller)\n",
-				instance->max_sectors_per_req);
-			}
-		}
-	}
-
-	host->max_sectors = instance->max_sectors_per_req;
-	host->cmd_per_lun = 128;
-	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
-	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
-	host->max_lun = MEGASAS_MAX_LUN;
-	host->max_cmd_len = 16;
-
-	/*
-	 * Notify the mid-layer about the new controller
-	 */
-	if (scsi_add_host(host, &instance->pdev->dev)) {
-		printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
-		return -ENODEV;
-	}
-
-	/*
-	 * Trigger SCSI to scan our drives
-	 */
-	scsi_scan_host(host);
-	return 0;
-}
-
-static int
-megasas_set_dma_mask(struct pci_dev *pdev)
-{
-	/*
-	 * All our contollers are capable of performing 64-bit DMA
-	 */
-	if (IS_DMA64) {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
-
-			if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
-				goto fail_set_dma_mask;
-		}
-	} else {
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
-			goto fail_set_dma_mask;
-	}
-	return 0;
-
-fail_set_dma_mask:
-	return 1;
-}
-
-/**
- * megasas_probe_one -	PCI hotplug entry point
- * @pdev:		PCI device structure
- * @id:			PCI ids of supported hotplugged adapter	
- */
-static int __devinit
-megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-	int rval;
-	struct Scsi_Host *host;
-	struct megasas_instance *instance;
-
-	/*
-	 * Announce PCI information
-	 */
-	printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
-	       pdev->vendor, pdev->device, pdev->subsystem_vendor,
-	       pdev->subsystem_device);
-
-	printk("bus %d:slot %d:func %d\n",
-	       pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
-
-	/*
-	 * PCI prepping: enable device set bus mastering and dma mask
-	 */
-	rval = pci_enable_device_mem(pdev);
-
-	if (rval) {
-		return rval;
-	}
-
-	pci_set_master(pdev);
-
-	if (megasas_set_dma_mask(pdev))
-		goto fail_set_dma_mask;
-
-	host = scsi_host_alloc(&megasas_template,
-			       sizeof(struct megasas_instance));
-
-	if (!host) {
-		printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
-		goto fail_alloc_instance;
-	}
-
-	instance = (struct megasas_instance *)host->hostdata;
-	memset(instance, 0, sizeof(*instance));
-	atomic_set( &instance->fw_reset_no_pci_access, 0 );
-
-	instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
-						  &instance->producer_h);
-	instance->consumer = pci_alloc_consistent(pdev, sizeof(u32),
-						  &instance->consumer_h);
-
-	if (!instance->producer || !instance->consumer) {
-		printk(KERN_DEBUG "megasas: Failed to allocate memory for "
-		       "producer, consumer\n");
-		goto fail_alloc_dma_buf;
-	}
-
-	*instance->producer = 0;
-	*instance->consumer = 0;
-	megasas_poll_wait_aen = 0;
-	instance->flag_ieee = 0;
-	instance->ev = NULL;
-	instance->issuepend_done = 1;
-	instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
-	megasas_poll_wait_aen = 0;
-
-	instance->evt_detail = pci_alloc_consistent(pdev,
-						    sizeof(struct
-							   megasas_evt_detail),
-						    &instance->evt_detail_h);
-
-	if (!instance->evt_detail) {
-		printk(KERN_DEBUG "megasas: Failed to allocate memory for "
-		       "event detail structure\n");
-		goto fail_alloc_dma_buf;
-	}
-
-	/*
-	 * Initialize locks and queues
-	 */
-	INIT_LIST_HEAD(&instance->cmd_pool);
-	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
-
-	atomic_set(&instance->fw_outstanding,0);
-
-	init_waitqueue_head(&instance->int_cmd_wait_q);
-	init_waitqueue_head(&instance->abort_cmd_wait_q);
-
-	spin_lock_init(&instance->cmd_pool_lock);
-	spin_lock_init(&instance->hba_lock);
-	spin_lock_init(&instance->completion_lock);
-	spin_lock_init(&poll_aen_lock);
-
-	mutex_init(&instance->aen_mutex);
-
-	/*
-	 * Initialize PCI related and misc parameters
-	 */
-	instance->pdev = pdev;
-	instance->host = host;
-	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
-	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
-
-	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
-		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
-		instance->flag_ieee = 1;
-		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
-	} else
-		sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
-
-	megasas_dbg_lvl = 0;
-	instance->flag = 0;
-	instance->unload = 1;
-	instance->last_time = 0;
-	instance->disableOnlineCtrlReset = 1;
-
-	INIT_WORK(&instance->work_init, process_fw_state_change_wq);
-
-	/*
-	 * Initialize MFI Firmware
-	 */
-	if (megasas_init_mfi(instance))
-		goto fail_init_mfi;
-
-	/*
-	 * Register IRQ
-	 */
-	if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, "megasas", instance)) {
-		printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
-		goto fail_irq;
-	}
-
-	instance->instancet->enable_intr(instance->reg_set);
-
-	/*
-	 * Store instance in PCI softstate
-	 */
-	pci_set_drvdata(pdev, instance);
-
-	/*
-	 * Add this controller to megasas_mgmt_info structure so that it
-	 * can be exported to management applications
-	 */
-	megasas_mgmt_info.count++;
-	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
-	megasas_mgmt_info.max_index++;
-
-	/*
-	 * Initiate AEN (Asynchronous Event Notification)
-	 */
-	if (megasas_start_aen(instance)) {
-		printk(KERN_DEBUG "megasas: start aen failed\n");
-		goto fail_start_aen;
-	}
-
-	/*
-	 * Register with SCSI mid-layer
-	 */
-	if (megasas_io_attach(instance))
-		goto fail_io_attach;
-
-	instance->unload = 0;
-	return 0;
-
-      fail_start_aen:
-      fail_io_attach:
-	megasas_mgmt_info.count--;
-	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
-	megasas_mgmt_info.max_index--;
-
-	pci_set_drvdata(pdev, NULL);
-	instance->instancet->disable_intr(instance->reg_set);
-	free_irq(instance->pdev->irq, instance);
-
-	megasas_release_mfi(instance);
-
-      fail_irq:
-      fail_init_mfi:
-      fail_alloc_dma_buf:
-	if (instance->evt_detail)
-		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
-				    instance->evt_detail,
-				    instance->evt_detail_h);
-
-	if (instance->producer)
-		pci_free_consistent(pdev, sizeof(u32), instance->producer,
-				    instance->producer_h);
-	if (instance->consumer)
-		pci_free_consistent(pdev, sizeof(u32), instance->consumer,
-				    instance->consumer_h);
-	scsi_host_put(host);
-
-      fail_alloc_instance:
-      fail_set_dma_mask:
-	pci_disable_device(pdev);
-
-	return -ENODEV;
-}
-
-/**
- * megasas_flush_cache -	Requests FW to flush all its caches
- * @instance:			Adapter soft state
- */
-static void megasas_flush_cache(struct megasas_instance *instance)
-{
-	struct megasas_cmd *cmd;
-	struct megasas_dcmd_frame *dcmd;
-
-	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
-		return;
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd)
-		return;
-
-	dcmd = &cmd->frame->dcmd;
-
-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
-	dcmd->cmd = MFI_CMD_DCMD;
-	dcmd->cmd_status = 0x0;
-	dcmd->sge_count = 0;
-	dcmd->flags = MFI_FRAME_DIR_NONE;
-	dcmd->timeout = 0;
-	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = 0;
-	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
-	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
-
-	megasas_issue_blocked_cmd(instance, cmd);
-
-	megasas_return_cmd(instance, cmd);
-
-	return;
-}
-
-/**
- * megasas_shutdown_controller -	Instructs FW to shutdown the controller
- * @instance:				Adapter soft state
- * @opcode:				Shutdown/Hibernate
- */
-static void megasas_shutdown_controller(struct megasas_instance *instance,
-					u32 opcode)
-{
-	struct megasas_cmd *cmd;
-	struct megasas_dcmd_frame *dcmd;
-
-	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
-		return;
-
-	cmd = megasas_get_cmd(instance);
-
-	if (!cmd)
-		return;
-
-	if (instance->aen_cmd)
-		megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
-
-	dcmd = &cmd->frame->dcmd;
-
-	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
-	dcmd->cmd = MFI_CMD_DCMD;
-	dcmd->cmd_status = 0x0;
-	dcmd->sge_count = 0;
-	dcmd->flags = MFI_FRAME_DIR_NONE;
-	dcmd->timeout = 0;
-	dcmd->pad_0 = 0;
-	dcmd->data_xfer_len = 0;
-	dcmd->opcode = opcode;
-
-	megasas_issue_blocked_cmd(instance, cmd);
-
-	megasas_return_cmd(instance, cmd);
-
-	return;
-}
-
-#ifdef CONFIG_PM
-/**
- * megasas_suspend -	driver suspend entry point
- * @pdev:		PCI device structure
- * @state:		PCI power state to suspend routine
- */
-static int
-megasas_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-	struct Scsi_Host *host;
-	struct megasas_instance *instance;
-
-	instance = pci_get_drvdata(pdev);
-	host = instance->host;
-	instance->unload = 1;
-
-	if (poll_mode_io)
-		del_timer_sync(&instance->io_completion_timer);
-
-	megasas_flush_cache(instance);
-	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
-
-	/* cancel the delayed work if this work still in queue */
-	if (instance->ev != NULL) {
-		struct megasas_aen_event *ev = instance->ev;
-		cancel_delayed_work(
-			(struct delayed_work *)&ev->hotplug_work);
-		flush_scheduled_work();
-		instance->ev = NULL;
-	}
-
-	tasklet_kill(&instance->isr_tasklet);
-
-	pci_set_drvdata(instance->pdev, instance);
-	instance->instancet->disable_intr(instance->reg_set);
-	free_irq(instance->pdev->irq, instance);
-
-	pci_save_state(pdev);
-	pci_disable_device(pdev);
-
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
-	return 0;
-}
-
-/**
- * megasas_resume-      driver resume entry point
- * @pdev:               PCI device structure
- */
-static int
-megasas_resume(struct pci_dev *pdev)
-{
-	int rval;
-	struct Scsi_Host *host;
-	struct megasas_instance *instance;
-
-	instance = pci_get_drvdata(pdev);
-	host = instance->host;
-	pci_set_power_state(pdev, PCI_D0);
-	pci_enable_wake(pdev, PCI_D0, 0);
-	pci_restore_state(pdev);
-
-	/*
-	 * PCI prepping: enable device set bus mastering and dma mask
-	 */
-	rval = pci_enable_device_mem(pdev);
-
-	if (rval) {
-		printk(KERN_ERR "megasas: Enable device failed\n");
-		return rval;
-	}
-
-	pci_set_master(pdev);
-
-	if (megasas_set_dma_mask(pdev))
-		goto fail_set_dma_mask;
-
-	/*
-	 * Initialize MFI Firmware
-	 */
-
-	*instance->producer = 0;
-	*instance->consumer = 0;
-
-	atomic_set(&instance->fw_outstanding, 0);
-
-	/*
-	 * We expect the FW state to be READY
-	 */
-	if (megasas_transition_to_ready(instance))
-		goto fail_ready_state;
-
-	if (megasas_issue_init_mfi(instance))
-		goto fail_init_mfi;
-
-	tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
-			(unsigned long)instance);
-
-	/*
-	 * Register IRQ
-	 */
-	if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED,
-		"megasas", instance)) {
-		printk(KERN_ERR "megasas: Failed to register IRQ\n");
-		goto fail_irq;
-	}
-
-	instance->instancet->enable_intr(instance->reg_set);
-
-	/*
-	 * Initiate AEN (Asynchronous Event Notification)
-	 */
-	if (megasas_start_aen(instance))
-		printk(KERN_ERR "megasas: Start AEN failed\n");
-
-	/* Initialize the cmd completion timer */
-	if (poll_mode_io)
-		megasas_start_timer(instance, &instance->io_completion_timer,
-				megasas_io_completion_timer,
-				MEGASAS_COMPLETION_TIMER_INTERVAL);
-	instance->unload = 0;
-
-	return 0;
-
-fail_irq:
-fail_init_mfi:
-	if (instance->evt_detail)
-		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
-				instance->evt_detail,
-				instance->evt_detail_h);
-
-	if (instance->producer)
-		pci_free_consistent(pdev, sizeof(u32), instance->producer,
-				instance->producer_h);
-	if (instance->consumer)
-		pci_free_consistent(pdev, sizeof(u32), instance->consumer,
-				instance->consumer_h);
-	scsi_host_put(host);
-
-fail_set_dma_mask:
-fail_ready_state:
-
-	pci_disable_device(pdev);
-
-	return -ENODEV;
-}
-#else
-#define megasas_suspend	NULL
-#define megasas_resume	NULL
-#endif
-
-/**
- * megasas_detach_one -	PCI hot"un"plug entry point
- * @pdev:		PCI device structure
- */
-static void __devexit megasas_detach_one(struct pci_dev *pdev)
-{
-	int i;
-	struct Scsi_Host *host;
-	struct megasas_instance *instance;
-
-	instance = pci_get_drvdata(pdev);
-	instance->unload = 1;
-	host = instance->host;
-
-	if (poll_mode_io)
-		del_timer_sync(&instance->io_completion_timer);
-
-	scsi_remove_host(instance->host);
-	megasas_flush_cache(instance);
-	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
-
-	/* cancel the delayed work if this work still in queue*/
-	if (instance->ev != NULL) {
-		struct megasas_aen_event *ev = instance->ev;
-		cancel_delayed_work(
-			(struct delayed_work *)&ev->hotplug_work);
-		flush_scheduled_work();
-		instance->ev = NULL;
-	}
-
-	tasklet_kill(&instance->isr_tasklet);
-
-	/*
-	 * Take the instance off the instance array. Note that we will not
-	 * decrement the max_index. We let this array be sparse array
-	 */
-	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-		if (megasas_mgmt_info.instance[i] == instance) {
-			megasas_mgmt_info.count--;
-			megasas_mgmt_info.instance[i] = NULL;
-
-			break;
-		}
-	}
-
-	pci_set_drvdata(instance->pdev, NULL);
-
-	instance->instancet->disable_intr(instance->reg_set);
-
-	free_irq(instance->pdev->irq, instance);
-
-	megasas_release_mfi(instance);
-
-	pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
-			    instance->evt_detail, instance->evt_detail_h);
-
-	pci_free_consistent(pdev, sizeof(u32), instance->producer,
-			    instance->producer_h);
-
-	pci_free_consistent(pdev, sizeof(u32), instance->consumer,
-			    instance->consumer_h);
-
-	scsi_host_put(host);
-
-	pci_set_drvdata(pdev, NULL);
-
-	pci_disable_device(pdev);
-
-	return;
-}
-
-/**
- * megasas_shutdown -	Shutdown entry point
- * @device:		Generic device structure
- */
-static void megasas_shutdown(struct pci_dev *pdev)
-{
-	struct megasas_instance *instance = pci_get_drvdata(pdev);
-	instance->unload = 1;
-	megasas_flush_cache(instance);
-	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
-}
-
-/**
- * megasas_mgmt_open -	char node "open" entry point
- */
-static int megasas_mgmt_open(struct inode *inode, struct file *filep)
-{
-	/*
-	 * Allow only those users with admin rights
-	 */
-	if (!capable(CAP_SYS_ADMIN))
-		return -EACCES;
-
-	return 0;
-}
-
-/**
- * megasas_mgmt_fasync -	Async notifier registration from applications
- *
- * This function adds the calling process to a driver global queue. When an
- * event occurs, SIGIO will be sent to all processes in this queue.
- */
-static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
-{
-	int rc;
-
-	mutex_lock(&megasas_async_queue_mutex);
-
-	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
-
-	mutex_unlock(&megasas_async_queue_mutex);
-
-	if (rc >= 0) {
-		/* For sanity check when we get ioctl */
-		filep->private_data = filep;
-		return 0;
-	}
-
-	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
-
-	return rc;
-}
-
-/**
- * megasas_mgmt_poll -  char node "poll" entry point
- * */
-static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
-{
-	unsigned int mask;
-	unsigned long flags;
-	poll_wait(file, &megasas_poll_wait, wait);
-	spin_lock_irqsave(&poll_aen_lock, flags);
-	if (megasas_poll_wait_aen)
-		mask =   (POLLIN | POLLRDNORM);
-	else
-		mask = 0;
-	spin_unlock_irqrestore(&poll_aen_lock, flags);
-	return mask;
-}
-
-/**
- * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
- * @instance:			Adapter soft state
- * @argp:			User's ioctl packet
- */
-static int
-megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
-		      struct megasas_iocpacket __user * user_ioc,
-		      struct megasas_iocpacket *ioc)
-{
-	struct megasas_sge32 *kern_sge32;
-	struct megasas_cmd *cmd;
-	void *kbuff_arr[MAX_IOCTL_SGE];
-	dma_addr_t buf_handle = 0;
-	int error = 0, i;
-	void *sense = NULL;
-	dma_addr_t sense_handle;
-	unsigned long *sense_ptr;
-
-	memset(kbuff_arr, 0, sizeof(kbuff_arr));
-
-	if (ioc->sge_count > MAX_IOCTL_SGE) {
-		printk(KERN_DEBUG "megasas: SGE count [%d] >  max limit [%d]\n",
-		       ioc->sge_count, MAX_IOCTL_SGE);
-		return -EINVAL;
-	}
-
-	cmd = megasas_get_cmd(instance);
-	if (!cmd) {
-		printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
-		return -ENOMEM;
-	}
-
-	/*
-	 * User's IOCTL packet has 2 frames (maximum). Copy those two
-	 * frames into our cmd's frames. cmd->frame's context will get
-	 * overwritten when we copy from user's frames. So set that value
-	 * alone separately
-	 */
-	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
-	cmd->frame->hdr.context = cmd->index;
-	cmd->frame->hdr.pad_0 = 0;
-
-	/*
-	 * The management interface between applications and the fw uses
-	 * MFI frames. E.g, RAID configuration changes, LD property changes
-	 * etc are accomplishes through different kinds of MFI frames. The
-	 * driver needs to care only about substituting user buffers with
-	 * kernel buffers in SGLs. The location of SGL is embedded in the
-	 * struct iocpacket itself.
-	 */
-	kern_sge32 = (struct megasas_sge32 *)
-	    ((unsigned long)cmd->frame + ioc->sgl_off);
-
-	/*
-	 * For each user buffer, create a mirror buffer and copy in
-	 */
-	for (i = 0; i < ioc->sge_count; i++) {
-		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
-						    ioc->sgl[i].iov_len,
-						    &buf_handle, GFP_KERNEL);
-		if (!kbuff_arr[i]) {
-			printk(KERN_DEBUG "megasas: Failed to alloc "
-			       "kernel SGL buffer for IOCTL \n");
-			error = -ENOMEM;
-			goto out;
-		}
-
-		/*
-		 * We don't change the dma_coherent_mask, so
-		 * pci_alloc_consistent only returns 32bit addresses
-		 */
-		kern_sge32[i].phys_addr = (u32) buf_handle;
-		kern_sge32[i].length = ioc->sgl[i].iov_len;
-
-		/*
-		 * We created a kernel buffer corresponding to the
-		 * user buffer. Now copy in from the user buffer
-		 */
-		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
-				   (u32) (ioc->sgl[i].iov_len))) {
-			error = -EFAULT;
-			goto out;
-		}
-	}
-
-	if (ioc->sense_len) {
-		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
-					     &sense_handle, GFP_KERNEL);
-		if (!sense) {
-			error = -ENOMEM;
-			goto out;
-		}
-
-		sense_ptr =
-		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
-		*sense_ptr = sense_handle;
-	}
-
-	/*
-	 * Set the sync_cmd flag so that the ISR knows not to complete this
-	 * cmd to the SCSI mid-layer
-	 */
-	cmd->sync_cmd = 1;
-	megasas_issue_blocked_cmd(instance, cmd);
-	cmd->sync_cmd = 0;
-
-	/*
-	 * copy out the kernel buffers to user buffers
-	 */
-	for (i = 0; i < ioc->sge_count; i++) {
-		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
-				 ioc->sgl[i].iov_len)) {
-			error = -EFAULT;
-			goto out;
-		}
-	}
-
-	/*
-	 * copy out the sense
-	 */
-	if (ioc->sense_len) {
-		/*
-		 * sense_ptr points to the location that has the user
-		 * sense buffer address
-		 */
-		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
-				ioc->sense_off);
-
-		if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
-				 sense, ioc->sense_len)) {
-			printk(KERN_ERR "megasas: Failed to copy out to user "
-					"sense data\n");
-			error = -EFAULT;
-			goto out;
-		}
-	}
-
-	/*
-	 * copy the status codes returned by the fw
-	 */
-	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
-			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
-		printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
-		error = -EFAULT;
-	}
-
-      out:
-	if (sense) {
-		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
-				    sense, sense_handle);
-	}
-
-	for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
-		dma_free_coherent(&instance->pdev->dev,
-				    kern_sge32[i].length,
-				    kbuff_arr[i], kern_sge32[i].phys_addr);
-	}
-
-	megasas_return_cmd(instance, cmd);
-	return error;
-}
-
-static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
-{
-	struct megasas_iocpacket __user *user_ioc =
-	    (struct megasas_iocpacket __user *)arg;
-	struct megasas_iocpacket *ioc;
-	struct megasas_instance *instance;
-	int error;
-	int i;
-	unsigned long flags;
-	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
-
-	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
-	if (!ioc)
-		return -ENOMEM;
-
-	if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
-		error = -EFAULT;
-		goto out_kfree_ioc;
-	}
-
-	instance = megasas_lookup_instance(ioc->host_no);
-	if (!instance) {
-		error = -ENODEV;
-		goto out_kfree_ioc;
-	}
-
-	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
-		printk(KERN_ERR "Controller in crit error\n");
-		error = -ENODEV;
-		goto out_kfree_ioc;
-	}
-
-	if (instance->unload == 1) {
-		error = -ENODEV;
-		goto out_kfree_ioc;
-	}
-
-	/*
-	 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
-	 */
-	if (down_interruptible(&instance->ioctl_sem)) {
-		error = -ERESTARTSYS;
-		goto out_kfree_ioc;
-	}
-
-	for (i = 0; i < wait_time; i++) {
-
-		spin_lock_irqsave(&instance->hba_lock, flags);
-		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
-			spin_unlock_irqrestore(&instance->hba_lock, flags);
-			break;
-		}
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: waiting"
-				"for controller reset to finish\n");
-		}
-
-		msleep(1000);
-	}
-
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-		printk(KERN_ERR "megaraid_sas: timed out while"
-			"waiting for HBA to recover\n");
-		error = -ENODEV;
-		goto out_kfree_ioc;
-	}
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
-	up(&instance->ioctl_sem);
-
-      out_kfree_ioc:
-	kfree(ioc);
-	return error;
-}
-
-static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
-{
-	struct megasas_instance *instance;
-	struct megasas_aen aen;
-	int error;
-	int i;
-	unsigned long flags;
-	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
-
-	if (file->private_data != file) {
-		printk(KERN_DEBUG "megasas: fasync_helper was not "
-		       "called first\n");
-		return -EINVAL;
-	}
-
-	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
-		return -EFAULT;
-
-	instance = megasas_lookup_instance(aen.host_no);
-
-	if (!instance)
-		return -ENODEV;
-
-	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
-		return -ENODEV;
-	}
-
-	if (instance->unload == 1) {
-		return -ENODEV;
-	}
-
-	for (i = 0; i < wait_time; i++) {
-
-		spin_lock_irqsave(&instance->hba_lock, flags);
-		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
-			spin_unlock_irqrestore(&instance->hba_lock,
-						flags);
-			break;
-		}
-
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: waiting for"
-				"controller reset to finish\n");
-		}
-
-		msleep(1000);
-	}
-
-	spin_lock_irqsave(&instance->hba_lock, flags);
-	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
-		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		printk(KERN_ERR "megaraid_sas: timed out while waiting"
-				"for HBA to recover.\n");
-		return -ENODEV;
-	}
-	spin_unlock_irqrestore(&instance->hba_lock, flags);
-
-	mutex_lock(&instance->aen_mutex);
-	error = megasas_register_aen(instance, aen.seq_num,
-				     aen.class_locale_word);
-	mutex_unlock(&instance->aen_mutex);
-	return error;
-}
-
-/**
- * megasas_mgmt_ioctl -	char node ioctl entry point
- */
-static long
-megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	switch (cmd) {
-	case MEGASAS_IOC_FIRMWARE:
-		return megasas_mgmt_ioctl_fw(file, arg);
-
-	case MEGASAS_IOC_GET_AEN:
-		return megasas_mgmt_ioctl_aen(file, arg);
-	}
-
-	return -ENOTTY;
-}
-
-#ifdef CONFIG_COMPAT
-static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
-{
-	struct compat_megasas_iocpacket __user *cioc =
-	    (struct compat_megasas_iocpacket __user *)arg;
-	struct megasas_iocpacket __user *ioc =
-	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
-	int i;
-	int error = 0;
-	compat_uptr_t ptr;
-
-	if (clear_user(ioc, sizeof(*ioc)))
-		return -EFAULT;
-
-	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
-	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
-	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
-	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
-	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
-	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
-		return -EFAULT;
-
-	/*
-	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
-	 * sense_len is not null, so prepare the 64bit value under
-	 * the same condition.
-	 */
-	if (ioc->sense_len) {
-		void __user **sense_ioc_ptr =
-			(void __user **)(ioc->frame.raw + ioc->sense_off);
-		compat_uptr_t *sense_cioc_ptr =
-			(compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
-		if (get_user(ptr, sense_cioc_ptr) ||
-		    put_user(compat_ptr(ptr), sense_ioc_ptr))
-			return -EFAULT;
-	}
-
-	for (i = 0; i < MAX_IOCTL_SGE; i++) {
-		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
-		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
-		    copy_in_user(&ioc->sgl[i].iov_len,
-				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
-			return -EFAULT;
-	}
-
-	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
-
-	if (copy_in_user(&cioc->frame.hdr.cmd_status,
-			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
-		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
-		return -EFAULT;
-	}
-	return error;
-}
-
-static long
-megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
-			  unsigned long arg)
-{
-	switch (cmd) {
-	case MEGASAS_IOC_FIRMWARE32:
-		return megasas_mgmt_compat_ioctl_fw(file, arg);
-	case MEGASAS_IOC_GET_AEN:
-		return megasas_mgmt_ioctl_aen(file, arg);
-	}
-
-	return -ENOTTY;
-}
-#endif
-
-/*
- * File operations structure for management interface
- */
-static const struct file_operations megasas_mgmt_fops = {
-	.owner = THIS_MODULE,
-	.open = megasas_mgmt_open,
-	.fasync = megasas_mgmt_fasync,
-	.unlocked_ioctl = megasas_mgmt_ioctl,
-	.poll = megasas_mgmt_poll,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl = megasas_mgmt_compat_ioctl,
-#endif
-	.llseek = noop_llseek,
-};
-
-/*
- * PCI hotplug support registration structure
- */
-static struct pci_driver megasas_pci_driver = {
-
-	.name = "megaraid_sas",
-	.id_table = megasas_pci_table,
-	.probe = megasas_probe_one,
-	.remove = __devexit_p(megasas_detach_one),
-	.suspend = megasas_suspend,
-	.resume = megasas_resume,
-	.shutdown = megasas_shutdown,
-};
-
-/*
- * Sysfs driver attributes
- */
-static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
-{
-	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
-			MEGASAS_VERSION);
-}
-
-static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
-
-static ssize_t
-megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
-{
-	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
-			MEGASAS_RELDATE);
-}
-
-static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
-		   NULL);
-
-static ssize_t
-megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
-{
-	return sprintf(buf, "%u\n", support_poll_for_event);
-}
-
-static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
-			megasas_sysfs_show_support_poll_for_event, NULL);
-
- static ssize_t
-megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
-{
-	return sprintf(buf, "%u\n", support_device_change);
-}
-
-static DRIVER_ATTR(support_device_change, S_IRUGO,
-			megasas_sysfs_show_support_device_change, NULL);
-
-static ssize_t
-megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
-{
-	return sprintf(buf, "%u\n", megasas_dbg_lvl);
-}
-
-static ssize_t
-megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
-{
-	int retval = count;
-	if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
-		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
-		retval = -EINVAL;
-	}
-	return retval;
-}
-
-static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
-		megasas_sysfs_set_dbg_lvl);
-
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
-	return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
-				const char *buf, size_t count)
-{
-	int retval = count;
-	int tmp = poll_mode_io;
-	int i;
-	struct megasas_instance *instance;
-
-	if (sscanf(buf, "%u", &poll_mode_io) < 1) {
-		printk(KERN_ERR "megasas: could not set poll_mode_io\n");
-		retval = -EINVAL;
-	}
-
-	/*
-	 * Check if poll_mode_io is already set or is same as previous value
-	 */
-	if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
-		goto out;
-
-	if (poll_mode_io) {
-		/*
-		 * Start timers for all adapters
-		 */
-		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-			instance = megasas_mgmt_info.instance[i];
-			if (instance) {
-				megasas_start_timer(instance,
-					&instance->io_completion_timer,
-					megasas_io_completion_timer,
-					MEGASAS_COMPLETION_TIMER_INTERVAL);
-			}
-		}
-	} else {
-		/*
-		 * Delete timers for all adapters
-		 */
-		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-			instance = megasas_mgmt_info.instance[i];
-			if (instance)
-				del_timer_sync(&instance->io_completion_timer);
-		}
-	}
-
-out:
-	return retval;
-}
-
-static void
-megasas_aen_polling(struct work_struct *work)
-{
-	struct megasas_aen_event *ev =
-		container_of(work, struct megasas_aen_event, hotplug_work);
-	struct megasas_instance *instance = ev->instance;
-	union megasas_evt_class_locale class_locale;
-	struct  Scsi_Host *host;
-	struct  scsi_device *sdev1;
-	u16     pd_index = 0;
-	u16	ld_index = 0;
-	int     i, j, doscan = 0;
-	u32 seq_num;
-	int error;
-
-	if (!instance) {
-		printk(KERN_ERR "invalid instance!\n");
-		kfree(ev);
-		return;
-	}
-	instance->ev = NULL;
-	host = instance->host;
-	if (instance->evt_detail) {
-
-		switch (instance->evt_detail->code) {
-		case MR_EVT_PD_INSERTED:
-			if (megasas_get_pd_list(instance) == 0) {
-			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-				for (j = 0;
-				j < MEGASAS_MAX_DEV_PER_CHANNEL;
-				j++) {
-
-				pd_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-				sdev1 =
-				scsi_device_lookup(host, i, j, 0);
-
-				if (instance->pd_list[pd_index].driveState
-						== MR_PD_STATE_SYSTEM) {
-						if (!sdev1) {
-						scsi_add_device(host, i, j, 0);
-						}
-
-					if (sdev1)
-						scsi_device_put(sdev1);
-					}
-				}
-			}
-			}
-			doscan = 0;
-			break;
-
-		case MR_EVT_PD_REMOVED:
-			if (megasas_get_pd_list(instance) == 0) {
-			megasas_get_pd_list(instance);
-			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-				for (j = 0;
-				j < MEGASAS_MAX_DEV_PER_CHANNEL;
-				j++) {
-
-				pd_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-				sdev1 =
-				scsi_device_lookup(host, i, j, 0);
-
-				if (instance->pd_list[pd_index].driveState
-					== MR_PD_STATE_SYSTEM) {
-					if (sdev1) {
-						scsi_device_put(sdev1);
-					}
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
-					}
-				}
-				}
-			}
-			}
-			doscan = 0;
-			break;
-
-		case MR_EVT_LD_OFFLINE:
-		case MR_EVT_LD_DELETED:
-			megasas_get_ld_list(instance);
-			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-				for (j = 0;
-				j < MEGASAS_MAX_DEV_PER_CHANNEL;
-				j++) {
-
-				ld_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-				sdev1 = scsi_device_lookup(host,
-					i + MEGASAS_MAX_LD_CHANNELS,
-					j,
-					0);
-
-				if (instance->ld_ids[ld_index] != 0xff) {
-					if (sdev1) {
-						scsi_device_put(sdev1);
-					}
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
-					}
-				}
-				}
-			}
-			doscan = 0;
-			break;
-		case MR_EVT_LD_CREATED:
-			megasas_get_ld_list(instance);
-			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-				for (j = 0;
-					j < MEGASAS_MAX_DEV_PER_CHANNEL;
-					j++) {
-					ld_index =
-					(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-					sdev1 = scsi_device_lookup(host,
-						i+MEGASAS_MAX_LD_CHANNELS,
-						j, 0);
-
-					if (instance->ld_ids[ld_index] !=
-								0xff) {
-						if (!sdev1) {
-							scsi_add_device(host,
-								i + 2,
-								j, 0);
-						}
-					}
-					if (sdev1) {
-						scsi_device_put(sdev1);
-					}
-				}
-			}
-			doscan = 0;
-			break;
-		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
-		case MR_EVT_FOREIGN_CFG_IMPORTED:
-			doscan = 1;
-			break;
-		default:
-			doscan = 0;
-			break;
-		}
-	} else {
-		printk(KERN_ERR "invalid evt_detail!\n");
-		kfree(ev);
-		return;
-	}
-
-	if (doscan) {
-		printk(KERN_INFO "scanning ...\n");
-		megasas_get_pd_list(instance);
-		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
-			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
-				sdev1 = scsi_device_lookup(host, i, j, 0);
-				if (instance->pd_list[pd_index].driveState ==
-							MR_PD_STATE_SYSTEM) {
-					if (!sdev1) {
-						scsi_add_device(host, i, j, 0);
-					}
-					if (sdev1)
-						scsi_device_put(sdev1);
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
-					}
-				}
-			}
-		}
-
-		megasas_get_ld_list(instance);
-		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
-			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
-				ld_index =
-				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
-				sdev1 = scsi_device_lookup(host,
-					i+MEGASAS_MAX_LD_CHANNELS, j, 0);
-				if (instance->ld_ids[ld_index] != 0xff) {
-					if (!sdev1) {
-						scsi_add_device(host,
-								i+2,
-								j, 0);
-					} else {
-						scsi_device_put(sdev1);
-					}
-				} else {
-					if (sdev1) {
-						scsi_remove_device(sdev1);
-						scsi_device_put(sdev1);
-					}
-				}
-			}
-		}
-	}
-
-	if ( instance->aen_cmd != NULL ) {
-		kfree(ev);
-		return ;
-	}
-
-	seq_num = instance->evt_detail->seq_num + 1;
-
-	/* Register AEN with FW for latest sequence number plus 1 */
-	class_locale.members.reserved = 0;
-	class_locale.members.locale = MR_EVT_LOCALE_ALL;
-	class_locale.members.class = MR_EVT_CLASS_DEBUG;
-	mutex_lock(&instance->aen_mutex);
-	error = megasas_register_aen(instance, seq_num,
-					class_locale.word);
-	mutex_unlock(&instance->aen_mutex);
-
-	if (error)
-		printk(KERN_ERR "register aen failed error %x\n", error);
-
-	kfree(ev);
-}
-
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
-		megasas_sysfs_show_poll_mode_io,
-		megasas_sysfs_set_poll_mode_io);
-
-/**
- * megasas_init - Driver load entry point
- */
-static int __init megasas_init(void)
-{
-	int rval;
-
-	/*
-	 * Announce driver version and other information
-	 */
-	printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
-	       MEGASAS_EXT_VERSION);
-
-	support_poll_for_event = 2;
-	support_device_change = 1;
-
-	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
-
-	/*
-	 * Register character device node
-	 */
-	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
-
-	if (rval < 0) {
-		printk(KERN_DEBUG "megasas: failed to open device node\n");
-		return rval;
-	}
-
-	megasas_mgmt_majorno = rval;
-
-	/*
-	 * Register ourselves as PCI hotplug module
-	 */
-	rval = pci_register_driver(&megasas_pci_driver);
-
-	if (rval) {
-		printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
-		goto err_pcidrv;
-	}
-
-	rval = driver_create_file(&megasas_pci_driver.driver,
-				  &driver_attr_version);
-	if (rval)
-		goto err_dcf_attr_ver;
-	rval = driver_create_file(&megasas_pci_driver.driver,
-				  &driver_attr_release_date);
-	if (rval)
-		goto err_dcf_rel_date;
-
-	rval = driver_create_file(&megasas_pci_driver.driver,
-				&driver_attr_support_poll_for_event);
-	if (rval)
-		goto err_dcf_support_poll_for_event;
-
-	rval = driver_create_file(&megasas_pci_driver.driver,
-				  &driver_attr_dbg_lvl);
-	if (rval)
-		goto err_dcf_dbg_lvl;
-	rval = driver_create_file(&megasas_pci_driver.driver,
-				  &driver_attr_poll_mode_io);
-	if (rval)
-		goto err_dcf_poll_mode_io;
-
-	rval = driver_create_file(&megasas_pci_driver.driver,
-				&driver_attr_support_device_change);
-	if (rval)
-		goto err_dcf_support_device_change;
-
-	return rval;
-
-err_dcf_support_device_change:
-	driver_remove_file(&megasas_pci_driver.driver,
-		  &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
-	driver_remove_file(&megasas_pci_driver.driver,
-			   &driver_attr_dbg_lvl);
-err_dcf_dbg_lvl:
-	driver_remove_file(&megasas_pci_driver.driver,
-			&driver_attr_support_poll_for_event);
-
-err_dcf_support_poll_for_event:
-	driver_remove_file(&megasas_pci_driver.driver,
-			   &driver_attr_release_date);
-
-err_dcf_rel_date:
-	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
-err_dcf_attr_ver:
-	pci_unregister_driver(&megasas_pci_driver);
-err_pcidrv:
-	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
-  	return rval;
-}
-
-/**
- * megasas_exit - Driver unload entry point
- */
-static void __exit megasas_exit(void)
-{
-	driver_remove_file(&megasas_pci_driver.driver,
-			   &driver_attr_poll_mode_io);
-	driver_remove_file(&megasas_pci_driver.driver,
-			   &driver_attr_dbg_lvl);
-	driver_remove_file(&megasas_pci_driver.driver,
-			&driver_attr_support_poll_for_event);
-	driver_remove_file(&megasas_pci_driver.driver,
-			&driver_attr_support_device_change);
-	driver_remove_file(&megasas_pci_driver.driver,
-			   &driver_attr_release_date);
-	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
-
-	pci_unregister_driver(&megasas_pci_driver);
-	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
-}
-
-module_init(megasas_init);
-module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index ad16f5e..1b5e375 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,15 +1,30 @@
 /*
+ *  Linux MegaRAID driver for SAS based RAID controllers
  *
- *		Linux MegaRAID driver for SAS based RAID controllers
+ *  Copyright (c) 2009-2011  LSI Corporation.
  *
- * Copyright (c) 2003-2005  LSI Corporation.
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
  *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
  *
- * FILE		: megaraid_sas.h
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *  FILE: megaraid_sas.h
+ *
+ *  Authors: LSI Corporation
+ *
+ *  Send feedback to: <megaraidlinux@lsi.com>
+ *
+ *  Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ *     ATTN: Linuxraid
  */
 
 #ifndef LSI_MEGARAID_SAS_H
@@ -18,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION			"00.00.04.31-rc1"
-#define MEGASAS_RELDATE			"May 3, 2010"
-#define MEGASAS_EXT_VERSION		"Mon. May 3, 11:41:51 PST 2010"
+#define MEGASAS_VERSION				"00.00.05.29-rc1"
+#define MEGASAS_RELDATE				"Dec. 7, 2010"
+#define MEGASAS_EXT_VERSION			"Tue. Dec. 7 17:00:00 PDT 2010"
 
 /*
  * Device IDs
@@ -32,6 +47,7 @@
 #define	PCI_DEVICE_ID_LSI_SAS0079GEN2		0x0079
 #define	PCI_DEVICE_ID_LSI_SAS0073SKINNY		0x0073
 #define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071
+#define	PCI_DEVICE_ID_LSI_FUSION		0x005b
 
 /*
  * =====================================
@@ -421,7 +437,6 @@
 	* Add properties that can be controlled by
 	* a bit in the following structure.
 	*/
-
 	struct {
 		u32     copyBackDisabled            : 1;
 		u32     SMARTerEnabled              : 1;
@@ -701,6 +716,7 @@
 #define MEGASAS_DEFAULT_INIT_ID			-1
 #define MEGASAS_MAX_LUN				8
 #define MEGASAS_MAX_LD				64
+#define MEGASAS_DEFAULT_CMD_PER_LUN		128
 #define MEGASAS_MAX_PD                          (MEGASAS_MAX_PD_CHANNELS * \
 						MEGASAS_MAX_DEV_PER_CHANNEL)
 #define MEGASAS_MAX_LD_IDS			(MEGASAS_MAX_LD_CHANNELS * \
@@ -769,7 +785,10 @@
 */
  
 struct megasas_register_set {
-	u32 	reserved_0[4];			/*0000h*/
+	u32	doorbell;                       /*0000h*/
+	u32	fusion_seq_offset;		/*0004h*/
+	u32	fusion_host_diag;		/*0008h*/
+	u32	reserved_01;			/*000Ch*/
 
 	u32 	inbound_msg_0;			/*0010h*/
 	u32 	inbound_msg_1;			/*0014h*/
@@ -789,15 +808,18 @@
 	u32 	inbound_queue_port;		/*0040h*/
 	u32 	outbound_queue_port;		/*0044h*/
 
-	u32 	reserved_2[22];			/*0048h*/
+	u32	reserved_2[9];			/*0048h*/
+	u32	reply_post_host_index;		/*006Ch*/
+	u32	reserved_2_2[12];		/*0070h*/
 
 	u32 	outbound_doorbell_clear;	/*00A0h*/
 
 	u32 	reserved_3[3];			/*00A4h*/
 
 	u32 	outbound_scratch_pad ;		/*00B0h*/
+	u32	outbound_scratch_pad_2;         /*00B4h*/
 
-	u32 	reserved_4[3];			/*00B4h*/
+	u32	reserved_4[2];			/*00B8h*/
 
 	u32 	inbound_low_queue_port ;	/*00C0h*/
 
@@ -1272,6 +1294,9 @@
 
 	u16 max_num_sge;
 	u16 max_fw_cmds;
+	/* For Fusion its num IOCTL cmds, for others MFI based its
+	   max_fw_cmds */
+	u16 max_mfi_cmds;
 	u32 max_sectors_per_req;
 	struct megasas_aen_event *ev;
 
@@ -1320,6 +1345,16 @@
 
 	struct timer_list io_completion_timer;
 	struct list_head internal_reset_pending_q;
+
+	/* Ptr to hba specfic information */
+	void *ctrl_context;
+	u8	msi_flag;
+	struct msix_entry msixentry;
+	u64 map_id;
+	struct megasas_cmd *map_update_cmd;
+	unsigned long bar;
+	long reset_flags;
+	struct mutex reset_mutex;
 };
 
 enum {
@@ -1345,6 +1380,13 @@
 		struct megasas_register_set __iomem *);
 	int (*check_reset)(struct megasas_instance *, \
 		struct megasas_register_set __iomem *);
+	irqreturn_t (*service_isr)(int irq, void *devp);
+	void (*tasklet)(unsigned long);
+	u32 (*init_adapter)(struct megasas_instance *);
+	u32 (*build_and_issue_cmd) (struct megasas_instance *,
+				    struct scsi_cmnd *);
+	void (*issue_dcmd) (struct megasas_instance *instance,
+			    struct megasas_cmd *cmd);
 };
 
 #define MEGASAS_IS_LOGICAL(scp)						\
@@ -1371,7 +1413,13 @@
 	struct list_head list;
 	struct scsi_cmnd *scmd;
 	struct megasas_instance *instance;
-	u32 frame_count;
+	union {
+		struct {
+			u16 smid;
+			u16 resvd;
+		} context;
+		u32 frame_count;
+	};
 };
 
 #define MAX_MGMT_ADAPTERS		1024
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
new file mode 100644
index 0000000..5d6d07b
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -0,0 +1,5444 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2009-2011  LSI Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *  FILE: megaraid_sas_base.c
+ *  Version : v00.00.05.29-rc1
+ *
+ *  Authors: LSI Corporation
+ *           Sreenivas Bagalkote
+ *           Sumant Patro
+ *           Bo Yang
+ *
+ *  Send feedback to: <megaraidlinux@lsi.com>
+ *
+ *  Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ *     ATTN: Linuxraid
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+/*
+ * poll_mode_io:1- schedule complete completion from q cmd
+ */
+static unsigned int poll_mode_io;
+module_param_named(poll_mode_io, poll_mode_io, int, 0);
+MODULE_PARM_DESC(poll_mode_io,
+	"Complete cmds from IO path, (default=0)");
+
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+	"Maximum number of sectors per IO command");
+
+static int msix_disable;
+module_param(msix_disable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGASAS_VERSION);
+MODULE_AUTHOR("megaraidlinux@lsi.com");
+MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
+
+int megasas_transition_to_ready(struct megasas_instance *instance);
+static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_issue_init_mfi(struct megasas_instance *instance);
+static int megasas_register_aen(struct megasas_instance *instance,
+				u32 seq_num, u32 class_locale_word);
+/*
+ * PCI ID table for all supported controllers
+ */
+static struct pci_device_id megasas_pci_table[] = {
+
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
+	/* xscale IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
+	/* ppc IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
+	/* ppc IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
+	/* gen2*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
+	/* gen2*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
+	/* skinny*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
+	/* skinny*/
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
+	/* xscale IOP, vega */
+	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
+	/* xscale IOP */
+	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
+	/* Fusion */
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, megasas_pci_table);
+
+static int megasas_mgmt_majorno;
+static struct megasas_mgmt_info megasas_mgmt_info;
+static struct fasync_struct *megasas_async_queue;
+static DEFINE_MUTEX(megasas_async_queue_mutex);
+
+static int megasas_poll_wait_aen;
+static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
+static u32 support_poll_for_event;
+u32 megasas_dbg_lvl;
+static u32 support_device_change;
+
+/* define lock for aen poll */
+spinlock_t poll_aen_lock;
+
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+		     u8 alt_status);
+
+static irqreturn_t megasas_isr(int irq, void *devp);
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance);
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+			    struct scsi_cmnd *scmd);
+static void megasas_complete_cmd_dpc(unsigned long instance_addr);
+void
+megasas_release_fusion(struct megasas_instance *instance);
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance);
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance);
+u8
+megasas_get_map_info(struct megasas_instance *instance);
+int
+megasas_sync_map_info(struct megasas_instance *instance);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+void megasas_reset_reply_desc(struct megasas_instance *instance);
+u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
+		      struct LD_LOAD_BALANCE_INFO *lbInfo);
+int megasas_reset_fusion(struct Scsi_Host *shost);
+void megasas_fusion_ocr_wq(struct work_struct *work);
+
+void
+megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	instance->instancet->fire_cmd(instance,
+		cmd->frame_phys_addr, 0, instance->reg_set);
+}
+
+/**
+ * megasas_get_cmd -	Get a command from the free pool
+ * @instance:		Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+						  *instance)
+{
+	unsigned long flags;
+	struct megasas_cmd *cmd = NULL;
+
+	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+
+	if (!list_empty(&instance->cmd_pool)) {
+		cmd = list_entry((&instance->cmd_pool)->next,
+				 struct megasas_cmd, list);
+		list_del_init(&cmd->list);
+	} else {
+		printk(KERN_ERR "megasas: Command pool empty!\n");
+	}
+
+	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+	return cmd;
+}
+
+/**
+ * megasas_return_cmd -	Return a cmd to free command pool
+ * @instance:		Adapter soft state
+ * @cmd:		Command packet to be returned to free command pool
+ */
+inline void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+
+	cmd->scmd = NULL;
+	cmd->frame_count = 0;
+	list_add_tail(&cmd->list, &instance->cmd_pool);
+
+	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+}
+
+
+/**
+*	The following functions are defined for xscale
+*	(deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+ * megasas_enable_intr_xscale -	Enables interrupts
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+	writel(0, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_xscale -Disables interrupt
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+	u32 mask = 0x1f;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_xscale - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+{
+	return readl(&(regs)->outbound_msg_0);
+}
+/**
+ * megasas_clear_interrupt_xscale -	Check & clear interrupt
+ * @regs:				MFI register set
+ */
+static int
+megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+	u32 status;
+	u32 mfiStatus = 0;
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (status & MFI_OB_INTR_STATUS_MASK)
+		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
+		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	if (mfiStatus)
+		writel(status, &regs->outbound_intr_status);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_status);
+
+	return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_xscale -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+static inline void
+megasas_fire_cmd_xscale(struct megasas_instance *instance,
+		dma_addr_t frame_phys_addr,
+		u32 frame_count,
+		struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel((frame_phys_addr >> 3)|(frame_count),
+	       &(regs)->inbound_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_xscale -  For controller reset
+ * @regs:                              MFI register set
+ */
+static int
+megasas_adp_reset_xscale(struct megasas_instance *instance,
+	struct megasas_register_set __iomem *regs)
+{
+	u32 i;
+	u32 pcidata;
+	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
+
+	for (i = 0; i < 3; i++)
+		msleep(1000); /* sleep for 3 secs */
+	pcidata  = 0;
+	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
+	printk(KERN_NOTICE "pcidata = %x\n", pcidata);
+	if (pcidata & 0x2) {
+		printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
+		pcidata &= ~0x2;
+		pci_write_config_dword(instance->pdev,
+				MFI_1068_PCSR_OFFSET, pcidata);
+
+		for (i = 0; i < 2; i++)
+			msleep(1000); /* need to wait 2 secs again */
+
+		pcidata  = 0;
+		pci_read_config_dword(instance->pdev,
+				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
+		printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
+		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
+			printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
+			pcidata = 0;
+			pci_write_config_dword(instance->pdev,
+				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
+		}
+	}
+	return 0;
+}
+
+/**
+ * megasas_check_reset_xscale -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_xscale(struct megasas_instance *instance,
+		struct megasas_register_set __iomem *regs)
+{
+	u32 consumer;
+	consumer = *instance->consumer;
+
+	if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+		(*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) {
+		return 1;
+	}
+	return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_xscale = {
+
+	.fire_cmd = megasas_fire_cmd_xscale,
+	.enable_intr = megasas_enable_intr_xscale,
+	.disable_intr = megasas_disable_intr_xscale,
+	.clear_intr = megasas_clear_intr_xscale,
+	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
+	.adp_reset = megasas_adp_reset_xscale,
+	.check_reset = megasas_check_reset_xscale,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+*	This is the end of set of functions & definitions specific
+*	to xscale (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+*	The following functions are defined for ppc (deviceid : 0x60)
+* 	controllers
+*/
+
+/**
+ * megasas_enable_intr_ppc -	Enables interrupts
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
+{
+	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+	writel(~0x80000000, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_ppc -	Disable interrupt
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs)
+{
+	u32 mask = 0xFFFFFFFF;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_ppc - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_ppc -	Check & clear interrupt
+ * @regs:				MFI register set
+ */
+static int
+megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+{
+	u32 status;
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
+		return 0;
+	}
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	writel(status, &regs->outbound_doorbell_clear);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_doorbell_clear);
+
+	return 1;
+}
+/**
+ * megasas_fire_cmd_ppc -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+static inline void
+megasas_fire_cmd_ppc(struct megasas_instance *instance,
+		dma_addr_t frame_phys_addr,
+		u32 frame_count,
+		struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel((frame_phys_addr | (frame_count<<1))|1,
+			&(regs)->inbound_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_ppc -	For controller reset
+ * @regs:				MFI register set
+ */
+static int
+megasas_adp_reset_ppc(struct megasas_instance *instance,
+			struct megasas_register_set __iomem *regs)
+{
+	return 0;
+}
+
+/**
+ * megasas_check_reset_ppc -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_ppc(struct megasas_instance *instance,
+			struct megasas_register_set __iomem *regs)
+{
+	return 0;
+}
+static struct megasas_instance_template megasas_instance_template_ppc = {
+
+	.fire_cmd = megasas_fire_cmd_ppc,
+	.enable_intr = megasas_enable_intr_ppc,
+	.disable_intr = megasas_disable_intr_ppc,
+	.clear_intr = megasas_clear_intr_ppc,
+	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
+	.adp_reset = megasas_adp_reset_ppc,
+	.check_reset = megasas_check_reset_ppc,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+ * megasas_enable_intr_skinny -	Enables interrupts
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
+
+	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_skinny -	Disables interrupt
+ * @regs:			MFI register set
+ */
+static inline void
+megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+	u32 mask = 0xFFFFFFFF;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_skinny - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_skinny -	Check & clear interrupt
+ * @regs:				MFI register set
+ */
+static int
+megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+	u32 status;
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
+		return 0;
+	}
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	writel(status, &regs->outbound_intr_status);
+
+	/*
+	* dummy read to flush PCI
+	*/
+	readl(&regs->outbound_intr_status);
+
+	return 1;
+}
+
+/**
+ * megasas_fire_cmd_skinny -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+static inline void
+megasas_fire_cmd_skinny(struct megasas_instance *instance,
+			dma_addr_t frame_phys_addr,
+			u32 frame_count,
+			struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel(0, &(regs)->inbound_high_queue_port);
+	writel((frame_phys_addr | (frame_count<<1))|1,
+		&(regs)->inbound_low_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_skinny -	For controller reset
+ * @regs:				MFI register set
+ */
+static int
+megasas_adp_reset_skinny(struct megasas_instance *instance,
+			struct megasas_register_set __iomem *regs)
+{
+	return 0;
+}
+
+/**
+ * megasas_check_reset_skinny -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_skinny(struct megasas_instance *instance,
+				struct megasas_register_set __iomem *regs)
+{
+	return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_skinny = {
+
+	.fire_cmd = megasas_fire_cmd_skinny,
+	.enable_intr = megasas_enable_intr_skinny,
+	.disable_intr = megasas_disable_intr_skinny,
+	.clear_intr = megasas_clear_intr_skinny,
+	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
+	.adp_reset = megasas_adp_reset_skinny,
+	.check_reset = megasas_check_reset_skinny,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+
+/**
+*	The following functions are defined for gen2 (deviceid : 0x78 0x79)
+*	controllers
+*/
+
+/**
+ * megasas_enable_intr_gen2 -  Enables interrupts
+ * @regs:                      MFI register set
+ */
+static inline void
+megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+	/* write ~0x00000005 (4 & 1) to the intr mask*/
+	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_gen2 - Disables interrupt
+ * @regs:                      MFI register set
+ */
+static inline void
+megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+	u32 mask = 0xFFFFFFFF;
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_gen2 - returns the current FW status value
+ * @regs:                      MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_gen2 -      Check & clear interrupt
+ * @regs:                              MFI register set
+ */
+static int
+megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+	u32 status;
+	u32 mfiStatus = 0;
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) {
+		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+	}
+	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
+		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+	}
+
+	/*
+	 * Clear the interrupt by writing back the same value
+	 */
+	if (mfiStatus)
+		writel(status, &regs->outbound_doorbell_clear);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_status);
+
+	return mfiStatus;
+}
+/**
+ * megasas_fire_cmd_gen2 -     Sends command to the FW
+ * @frame_phys_addr :          Physical address of cmd
+ * @frame_count :              Number of frames for the command
+ * @regs :                     MFI register set
+ */
+static inline void
+megasas_fire_cmd_gen2(struct megasas_instance *instance,
+			dma_addr_t frame_phys_addr,
+			u32 frame_count,
+			struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	writel((frame_phys_addr | (frame_count<<1))|1,
+			&(regs)->inbound_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_gen2 -	For controller reset
+ * @regs:				MFI register set
+ */
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+			struct megasas_register_set __iomem *reg_set)
+{
+	u32			retry = 0 ;
+	u32			HostDiag;
+
+	writel(0, &reg_set->seq_offset);
+	writel(4, &reg_set->seq_offset);
+	writel(0xb, &reg_set->seq_offset);
+	writel(2, &reg_set->seq_offset);
+	writel(7, &reg_set->seq_offset);
+	writel(0xd, &reg_set->seq_offset);
+	msleep(1000);
+
+	HostDiag = (u32)readl(&reg_set->host_diag);
+
+	while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
+		msleep(100);
+		HostDiag = (u32)readl(&reg_set->host_diag);
+		printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
+					retry, HostDiag);
+
+		if (retry++ >= 100)
+			return 1;
+
+	}
+
+	printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
+
+	writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
+
+	ssleep(10);
+
+	HostDiag = (u32)readl(&reg_set->host_diag);
+	while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
+		msleep(100);
+		HostDiag = (u32)readl(&reg_set->host_diag);
+		printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
+				retry, HostDiag);
+
+		if (retry++ >= 1000)
+			return 1;
+
+	}
+	return 0;
+}
+
+/**
+ * megasas_check_reset_gen2 -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_gen2(struct megasas_instance *instance,
+		struct megasas_register_set __iomem *regs)
+{
+	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+		return 1;
+	}
+
+	return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_gen2 = {
+
+	.fire_cmd = megasas_fire_cmd_gen2,
+	.enable_intr = megasas_enable_intr_gen2,
+	.disable_intr = megasas_disable_intr_gen2,
+	.clear_intr = megasas_clear_intr_gen2,
+	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
+	.adp_reset = megasas_adp_reset_gen2,
+	.check_reset = megasas_check_reset_gen2,
+	.service_isr = megasas_isr,
+	.tasklet = megasas_complete_cmd_dpc,
+	.init_adapter = megasas_init_adapter_mfi,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd,
+	.issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+*	This is the end of set of functions & definitions
+*       specific to gen2 (deviceid : 0x78, 0x79) controllers
+*/
+
+/*
+ * Template added for TB (Fusion)
+ */
+extern struct megasas_instance_template megasas_instance_template_fusion;
+
+/**
+ * megasas_issue_polled -	Issues a polling command
+ * @instance:			Adapter soft state
+ * @cmd:			Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+
+	struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+	frame_hdr->cmd_status = 0xFF;
+	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+	/*
+	 * Issue the frame using inbound queue port
+	 */
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	/*
+	 * Wait for cmd_status to change
+	 */
+	return wait_and_poll(instance, cmd);
+}
+
+/**
+ * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
+ * @instance:			Adapter soft state
+ * @cmd:			Command to be issued
+ *
+ * This function waits on an event for the command to be returned from ISR.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ * Used to issue ioctl commands.
+ */
+static int
+megasas_issue_blocked_cmd(struct megasas_instance *instance,
+			  struct megasas_cmd *cmd)
+{
+	cmd->cmd_status = ENODATA;
+
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA);
+
+	return 0;
+}
+
+/**
+ * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
+ * @instance:				Adapter soft state
+ * @cmd_to_abort:			Previously issued cmd to be aborted
+ *
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * notification). The megasas_issue_blocked_abort_cmd() issues such abort
+ * cmd and waits for return status.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ */
+static int
+megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+				struct megasas_cmd *cmd_to_abort)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_abort_frame *abort_fr;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return -1;
+
+	abort_fr = &cmd->frame->abort;
+
+	/*
+	 * Prepare and issue the abort frame
+	 */
+	abort_fr->cmd = MFI_CMD_ABORT;
+	abort_fr->cmd_status = 0xFF;
+	abort_fr->flags = 0;
+	abort_fr->abort_context = cmd_to_abort->index;
+	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
+	abort_fr->abort_mfi_phys_addr_hi = 0;
+
+	cmd->sync_cmd = 1;
+	cmd->cmd_status = 0xFF;
+
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	/*
+	 * Wait for this cmd to complete
+	 */
+	wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF);
+	cmd->sync_cmd = 0;
+
+	megasas_return_cmd(instance, cmd);
+	return 0;
+}
+
+/**
+ * megasas_make_sgl32 -	Prepares 32-bit SGL
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command from the mid-layer
+ * @mfi_sgl:		SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   union megasas_sgl *mfi_sgl)
+{
+	int i;
+	int sge_count;
+	struct scatterlist *os_sgl;
+
+	sge_count = scsi_dma_map(scp);
+	BUG_ON(sge_count < 0);
+
+	if (sge_count) {
+		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+			mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
+			mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
+		}
+	}
+	return sge_count;
+}
+
+/**
+ * megasas_make_sgl64 -	Prepares 64-bit SGL
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command from the mid-layer
+ * @mfi_sgl:		SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   union megasas_sgl *mfi_sgl)
+{
+	int i;
+	int sge_count;
+	struct scatterlist *os_sgl;
+
+	sge_count = scsi_dma_map(scp);
+	BUG_ON(sge_count < 0);
+
+	if (sge_count) {
+		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+			mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
+			mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
+		}
+	}
+	return sge_count;
+}
+
+/**
+ * megasas_make_sgl_skinny - Prepares IEEE SGL
+ * @instance:           Adapter soft state
+ * @scp:                SCSI command from the mid-layer
+ * @mfi_sgl:            SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl_skinny(struct megasas_instance *instance,
+		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
+{
+	int i;
+	int sge_count;
+	struct scatterlist *os_sgl;
+
+	sge_count = scsi_dma_map(scp);
+
+	if (sge_count) {
+		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+			mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
+			mfi_sgl->sge_skinny[i].phys_addr =
+						sg_dma_address(os_sgl);
+			mfi_sgl->sge_skinny[i].flag = 0;
+		}
+	}
+	return sge_count;
+}
+
+ /**
+ * megasas_get_frame_count - Computes the number of frames
+ * @frame_type		: type of frame- io or pthru frame
+ * @sge_count		: number of sg elements
+ *
+ * Returns the number of frames required for numnber of sge's (sge_count)
+ */
+
+static u32 megasas_get_frame_count(struct megasas_instance *instance,
+			u8 sge_count, u8 frame_type)
+{
+	int num_cnt;
+	int sge_bytes;
+	u32 sge_sz;
+	u32 frame_count=0;
+
+	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+	    sizeof(struct megasas_sge32);
+
+	if (instance->flag_ieee) {
+		sge_sz = sizeof(struct megasas_sge_skinny);
+	}
+
+	/*
+	 * Main frame can contain 2 SGEs for 64-bit SGLs and
+	 * 3 SGEs for 32-bit SGLs for ldio &
+	 * 1 SGEs for 64-bit SGLs and
+	 * 2 SGEs for 32-bit SGLs for pthru frame
+	 */
+	if (unlikely(frame_type == PTHRU_FRAME)) {
+		if (instance->flag_ieee == 1) {
+			num_cnt = sge_count - 1;
+		} else if (IS_DMA64)
+			num_cnt = sge_count - 1;
+		else
+			num_cnt = sge_count - 2;
+	} else {
+		if (instance->flag_ieee == 1) {
+			num_cnt = sge_count - 1;
+		} else if (IS_DMA64)
+			num_cnt = sge_count - 2;
+		else
+			num_cnt = sge_count - 3;
+	}
+
+	if(num_cnt>0){
+		sge_bytes = sge_sz * num_cnt;
+
+		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
+		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
+	}
+	/* Main frame */
+	frame_count +=1;
+
+	if (frame_count > 7)
+		frame_count = 8;
+	return frame_count;
+}
+
+/**
+ * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static int
+megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   struct megasas_cmd *cmd)
+{
+	u32 is_logical;
+	u32 device_id;
+	u16 flags = 0;
+	struct megasas_pthru_frame *pthru;
+
+	is_logical = MEGASAS_IS_LOGICAL(scp);
+	device_id = MEGASAS_DEV_INDEX(instance, scp);
+	pthru = (struct megasas_pthru_frame *)cmd->frame;
+
+	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+		flags = MFI_FRAME_DIR_WRITE;
+	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		flags = MFI_FRAME_DIR_READ;
+	else if (scp->sc_data_direction == PCI_DMA_NONE)
+		flags = MFI_FRAME_DIR_NONE;
+
+	if (instance->flag_ieee == 1) {
+		flags |= MFI_FRAME_IEEE;
+	}
+
+	/*
+	 * Prepare the DCDB frame
+	 */
+	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
+	pthru->cmd_status = 0x0;
+	pthru->scsi_status = 0x0;
+	pthru->target_id = device_id;
+	pthru->lun = scp->device->lun;
+	pthru->cdb_len = scp->cmd_len;
+	pthru->timeout = 0;
+	pthru->pad_0 = 0;
+	pthru->flags = flags;
+	pthru->data_xfer_len = scsi_bufflen(scp);
+
+	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+	/*
+	* If the command is for the tape device, set the
+	* pthru timeout to the os layer timeout value.
+	*/
+	if (scp->device->type == TYPE_TAPE) {
+		if ((scp->request->timeout / HZ) > 0xFFFF)
+			pthru->timeout = 0xFFFF;
+		else
+			pthru->timeout = scp->request->timeout / HZ;
+	}
+
+	/*
+	 * Construct SGL
+	 */
+	if (instance->flag_ieee == 1) {
+		pthru->flags |= MFI_FRAME_SGL64;
+		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
+						      &pthru->sgl);
+	} else if (IS_DMA64) {
+		pthru->flags |= MFI_FRAME_SGL64;
+		pthru->sge_count = megasas_make_sgl64(instance, scp,
+						      &pthru->sgl);
+	} else
+		pthru->sge_count = megasas_make_sgl32(instance, scp,
+						      &pthru->sgl);
+
+	if (pthru->sge_count > instance->max_num_sge) {
+		printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+			pthru->sge_count);
+		return 0;
+	}
+
+	/*
+	 * Sense info specific
+	 */
+	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
+	pthru->sense_buf_phys_addr_hi = 0;
+	pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+
+	/*
+	 * Compute the total number of frames this command consumes. FW uses
+	 * this number to pull sufficient number of frames from host memory.
+	 */
+	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
+							PTHRU_FRAME);
+
+	return cmd->frame_count;
+}
+
+/**
+ * megasas_build_ldio -	Prepares IOs to logical devices
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
+ */
+static int
+megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
+		   struct megasas_cmd *cmd)
+{
+	u32 device_id;
+	u8 sc = scp->cmnd[0];
+	u16 flags = 0;
+	struct megasas_io_frame *ldio;
+
+	device_id = MEGASAS_DEV_INDEX(instance, scp);
+	ldio = (struct megasas_io_frame *)cmd->frame;
+
+	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+		flags = MFI_FRAME_DIR_WRITE;
+	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		flags = MFI_FRAME_DIR_READ;
+
+	if (instance->flag_ieee == 1) {
+		flags |= MFI_FRAME_IEEE;
+	}
+
+	/*
+	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
+	 */
+	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
+	ldio->cmd_status = 0x0;
+	ldio->scsi_status = 0x0;
+	ldio->target_id = device_id;
+	ldio->timeout = 0;
+	ldio->reserved_0 = 0;
+	ldio->pad_0 = 0;
+	ldio->flags = flags;
+	ldio->start_lba_hi = 0;
+	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
+
+	/*
+	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
+	 */
+	if (scp->cmd_len == 6) {
+		ldio->lba_count = (u32) scp->cmnd[4];
+		ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
+		    ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+
+		ldio->start_lba_lo &= 0x1FFFFF;
+	}
+
+	/*
+	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
+	 */
+	else if (scp->cmd_len == 10) {
+		ldio->lba_count = (u32) scp->cmnd[8] |
+		    ((u32) scp->cmnd[7] << 8);
+		ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+		    ((u32) scp->cmnd[3] << 16) |
+		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	/*
+	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+	 */
+	else if (scp->cmd_len == 12) {
+		ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
+		    ((u32) scp->cmnd[7] << 16) |
+		    ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+		ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+		    ((u32) scp->cmnd[3] << 16) |
+		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	/*
+	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
+	 */
+	else if (scp->cmd_len == 16) {
+		ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
+		    ((u32) scp->cmnd[11] << 16) |
+		    ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+
+		ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
+		    ((u32) scp->cmnd[7] << 16) |
+		    ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+		ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
+		    ((u32) scp->cmnd[3] << 16) |
+		    ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+
+	}
+
+	/*
+	 * Construct SGL
+	 */
+	if (instance->flag_ieee) {
+		ldio->flags |= MFI_FRAME_SGL64;
+		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
+					      &ldio->sgl);
+	} else if (IS_DMA64) {
+		ldio->flags |= MFI_FRAME_SGL64;
+		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
+	} else
+		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+
+	if (ldio->sge_count > instance->max_num_sge) {
+		printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+			ldio->sge_count);
+		return 0;
+	}
+
+	/*
+	 * Sense info specific
+	 */
+	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
+	ldio->sense_buf_phys_addr_hi = 0;
+	ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
+
+	/*
+	 * Compute the total number of frames this command consumes. FW uses
+	 * this number to pull sufficient number of frames from host memory.
+	 */
+	cmd->frame_count = megasas_get_frame_count(instance,
+			ldio->sge_count, IO_FRAME);
+
+	return cmd->frame_count;
+}
+
+/**
+ * megasas_is_ldio -		Checks if the cmd is for logical drive
+ * @scmd:			SCSI command
+ *
+ * Called by megasas_queue_command to find out if the command to be queued
+ * is a logical drive command
+ */
+inline int megasas_is_ldio(struct scsi_cmnd *cmd)
+{
+	if (!MEGASAS_IS_LOGICAL(cmd))
+		return 0;
+	switch (cmd->cmnd[0]) {
+	case READ_10:
+	case WRITE_10:
+	case READ_12:
+	case WRITE_12:
+	case READ_6:
+	case WRITE_6:
+	case READ_16:
+	case WRITE_16:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+ /**
+ * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
+ *                              	in FW
+ * @instance:				Adapter soft state
+ */
+static inline void
+megasas_dump_pending_frames(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	int i,n;
+	union megasas_sgl *mfi_sgl;
+	struct megasas_io_frame *ldio;
+	struct megasas_pthru_frame *pthru;
+	u32 sgcount;
+	u32 max_cmd = instance->max_fw_cmds;
+
+	printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
+	printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
+	if (IS_DMA64)
+		printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
+	else
+		printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
+
+	printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
+	for (i = 0; i < max_cmd; i++) {
+		cmd = instance->cmd_list[i];
+		if(!cmd->scmd)
+			continue;
+		printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
+		if (megasas_is_ldio(cmd->scmd)){
+			ldio = (struct megasas_io_frame *)cmd->frame;
+			mfi_sgl = &ldio->sgl;
+			sgcount = ldio->sge_count;
+			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
+		}
+		else {
+			pthru = (struct megasas_pthru_frame *) cmd->frame;
+			mfi_sgl = &pthru->sgl;
+			sgcount = pthru->sge_count;
+			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
+		}
+	if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
+		for (n = 0; n < sgcount; n++){
+			if (IS_DMA64)
+				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
+			else
+				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
+			}
+		}
+		printk(KERN_ERR "\n");
+	} /*for max_cmd*/
+	printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = instance->cmd_list[i];
+
+		if(cmd->sync_cmd == 1){
+			printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
+		}
+	}
+	printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
+}
+
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+			    struct scsi_cmnd *scmd)
+{
+	struct megasas_cmd *cmd;
+	u32 frame_count;
+
+	cmd = megasas_get_cmd(instance);
+	if (!cmd)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	/*
+	 * Logical drive command
+	 */
+	if (megasas_is_ldio(scmd))
+		frame_count = megasas_build_ldio(instance, scmd, cmd);
+	else
+		frame_count = megasas_build_dcdb(instance, scmd, cmd);
+
+	if (!frame_count)
+		goto out_return_cmd;
+
+	cmd->scmd = scmd;
+	scmd->SCp.ptr = (char *)cmd;
+
+	/*
+	 * Issue the command to the FW
+	 */
+	atomic_inc(&instance->fw_outstanding);
+
+	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
+				cmd->frame_count-1, instance->reg_set);
+	/*
+	 * Check if we have pend cmds to be completed
+	 */
+	if (poll_mode_io && atomic_read(&instance->fw_outstanding))
+		tasklet_schedule(&instance->isr_tasklet);
+
+	return 0;
+out_return_cmd:
+	megasas_return_cmd(instance, cmd);
+	return 1;
+}
+
+
+/**
+ * megasas_queue_command -	Queue entry point
+ * @scmd:			SCSI command to be queued
+ * @done:			Callback entry point
+ */
+static int
+megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
+{
+	struct megasas_instance *instance;
+	unsigned long flags;
+
+	instance = (struct megasas_instance *)
+	    scmd->device->host->hostdata;
+
+	if (instance->issuepend_done == 0)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	scmd->scsi_done = done;
+	scmd->result = 0;
+
+	if (MEGASAS_IS_LOGICAL(scmd) &&
+	    (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
+		scmd->result = DID_BAD_TARGET << 16;
+		goto out_done;
+	}
+
+	switch (scmd->cmnd[0]) {
+	case SYNCHRONIZE_CACHE:
+		/*
+		 * FW takes care of flush cache on its own
+		 * No need to send it down
+		 */
+		scmd->result = DID_OK << 16;
+		goto out_done;
+	default:
+		break;
+	}
+
+	if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
+		printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n");
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	return 0;
+
+ out_done:
+	done(scmd);
+	return 0;
+}
+
+static DEF_SCSI_QCMD(megasas_queue_command)
+
+static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+{
+	int i;
+
+	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+
+		if ((megasas_mgmt_info.instance[i]) &&
+		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+			return megasas_mgmt_info.instance[i];
+	}
+
+	return NULL;
+}
+
+static int megasas_slave_configure(struct scsi_device *sdev)
+{
+	u16             pd_index = 0;
+	struct  megasas_instance *instance ;
+
+	instance = megasas_lookup_instance(sdev->host->host_no);
+
+	/*
+	* Don't export physical disk devices to the disk driver.
+	*
+	* FIXME: Currently we don't export them to the midlayer at all.
+	*        That will be fixed once LSI engineers have audited the
+	*        firmware for possible issues.
+	*/
+	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
+				sdev->type == TYPE_DISK) {
+		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+								sdev->id;
+		if (instance->pd_list[pd_index].driveState ==
+						MR_PD_STATE_SYSTEM) {
+			blk_queue_rq_timeout(sdev->request_queue,
+				MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+			return 0;
+		}
+		return -ENXIO;
+	}
+
+	/*
+	* The RAID firmware may require extended timeouts.
+	*/
+	blk_queue_rq_timeout(sdev->request_queue,
+		MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+	return 0;
+}
+
+static int megasas_slave_alloc(struct scsi_device *sdev)
+{
+	u16             pd_index = 0;
+	struct megasas_instance *instance ;
+	instance = megasas_lookup_instance(sdev->host->host_no);
+	if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
+				(sdev->type == TYPE_DISK)) {
+		/*
+		 * Open the OS scan to the SYSTEM PD
+		 */
+		pd_index =
+			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+			sdev->id;
+		if ((instance->pd_list[pd_index].driveState ==
+					MR_PD_STATE_SYSTEM) &&
+			(instance->pd_list[pd_index].driveType ==
+						TYPE_DISK)) {
+			return 0;
+		}
+		return -ENXIO;
+	}
+	return 0;
+}
+
+void megaraid_sas_kill_hba(struct megasas_instance *instance)
+{
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) {
+		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+	} else {
+		writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
+	}
+}
+
+ /**
+  * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
+  *					restored to max value
+  * @instance:			Adapter soft state
+  *
+  */
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
+{
+	unsigned long flags;
+	if (instance->flag & MEGASAS_FW_BUSY
+		&& time_after(jiffies, instance->last_time + 5 * HZ)
+		&& atomic_read(&instance->fw_outstanding) < 17) {
+
+		spin_lock_irqsave(instance->host->host_lock, flags);
+		instance->flag &= ~MEGASAS_FW_BUSY;
+		if ((instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+			(instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+			instance->host->can_queue =
+				instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
+		} else
+			instance->host->can_queue =
+				instance->max_fw_cmds - MEGASAS_INT_CMDS;
+
+		spin_unlock_irqrestore(instance->host->host_lock, flags);
+	}
+}
+
+/**
+ * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
+ * @instance_addr:			Address of adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+{
+	u32 producer;
+	u32 consumer;
+	u32 context;
+	struct megasas_cmd *cmd;
+	struct megasas_instance *instance =
+				(struct megasas_instance *)instance_addr;
+	unsigned long flags;
+
+	/* If we have already declared adapter dead, donot complete cmds */
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
+		return;
+
+	spin_lock_irqsave(&instance->completion_lock, flags);
+
+	producer = *instance->producer;
+	consumer = *instance->consumer;
+
+	while (consumer != producer) {
+		context = instance->reply_queue[consumer];
+		if (context >= instance->max_fw_cmds) {
+			printk(KERN_ERR "Unexpected context value %x\n",
+				context);
+			BUG();
+		}
+
+		cmd = instance->cmd_list[context];
+
+		megasas_complete_cmd(instance, cmd, DID_OK);
+
+		consumer++;
+		if (consumer == (instance->max_fw_cmds + 1)) {
+			consumer = 0;
+		}
+	}
+
+	*instance->consumer = producer;
+
+	spin_unlock_irqrestore(&instance->completion_lock, flags);
+
+	/*
+	 * Check if we can restore can_queue
+	 */
+	megasas_check_and_restore_queue_depth(instance);
+}
+
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+		*instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
+	}
+	instance->instancet->disable_intr(instance->reg_set);
+	instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
+	instance->issuepend_done = 0;
+
+	atomic_set(&instance->fw_outstanding, 0);
+	megasas_internal_reset_defer_cmds(instance);
+	process_fw_state_change_wq(&instance->work_init);
+}
+
+/**
+ * megasas_wait_for_outstanding -	Wait for all outstanding cmds
+ * @instance:				Adapter soft state
+ *
+ * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
+ * complete all its outstanding commands. Returns error if one or more IOs
+ * are pending after this time period. It also marks the controller dead.
+ */
+static int megasas_wait_for_outstanding(struct megasas_instance *instance)
+{
+	int i;
+	u32 reset_index;
+	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+	u8 adprecovery;
+	unsigned long flags;
+	struct list_head clist_local;
+	struct megasas_cmd *reset_cmd;
+	u32 fw_state;
+	u8 kill_adapter_flag;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	adprecovery = instance->adprecovery;
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+
+		INIT_LIST_HEAD(&clist_local);
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		list_splice_init(&instance->internal_reset_pending_q,
+				&clist_local);
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
+		for (i = 0; i < wait_time; i++) {
+			msleep(1000);
+			spin_lock_irqsave(&instance->hba_lock, flags);
+			adprecovery = instance->adprecovery;
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			if (adprecovery == MEGASAS_HBA_OPERATIONAL)
+				break;
+		}
+
+		if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+			printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
+			spin_lock_irqsave(&instance->hba_lock, flags);
+			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			return FAILED;
+		}
+
+		reset_index	= 0;
+		while (!list_empty(&clist_local)) {
+			reset_cmd	= list_entry((&clist_local)->next,
+						struct megasas_cmd, list);
+			list_del_init(&reset_cmd->list);
+			if (reset_cmd->scmd) {
+				reset_cmd->scmd->result = DID_RESET << 16;
+				printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n",
+					reset_index, reset_cmd,
+					reset_cmd->scmd->cmnd[0],
+					reset_cmd->scmd->serial_number);
+
+				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
+				megasas_return_cmd(instance, reset_cmd);
+			} else if (reset_cmd->sync_cmd) {
+				printk(KERN_NOTICE "megasas:%p synch cmds"
+						"reset queue\n",
+						reset_cmd);
+
+				reset_cmd->cmd_status = ENODATA;
+				instance->instancet->fire_cmd(instance,
+						reset_cmd->frame_phys_addr,
+						0, instance->reg_set);
+			} else {
+				printk(KERN_NOTICE "megasas: %p unexpected"
+					"cmds lst\n",
+					reset_cmd);
+			}
+			reset_index++;
+		}
+
+		return SUCCESS;
+	}
+
+	for (i = 0; i < wait_time; i++) {
+
+		int outstanding = atomic_read(&instance->fw_outstanding);
+
+		if (!outstanding)
+			break;
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+			       "commands to complete\n",i,outstanding);
+			/*
+			 * Call cmd completion routine. Cmd to be
+			 * be completed directly without depending on isr.
+			 */
+			megasas_complete_cmd_dpc((unsigned long)instance);
+		}
+
+		msleep(1000);
+	}
+
+	i = 0;
+	kill_adapter_flag = 0;
+	do {
+		fw_state = instance->instancet->read_fw_status_reg(
+					instance->reg_set) & MFI_STATE_MASK;
+		if ((fw_state == MFI_STATE_FAULT) &&
+			(instance->disableOnlineCtrlReset == 0)) {
+			if (i == 3) {
+				kill_adapter_flag = 2;
+				break;
+			}
+			megasas_do_ocr(instance);
+			kill_adapter_flag = 1;
+
+			/* wait for 1 secs to let FW finish the pending cmds */
+			msleep(1000);
+		}
+		i++;
+	} while (i <= 3);
+
+	if (atomic_read(&instance->fw_outstanding) &&
+					!kill_adapter_flag) {
+		if (instance->disableOnlineCtrlReset == 0) {
+
+			megasas_do_ocr(instance);
+
+			/* wait for 5 secs to let FW finish the pending cmds */
+			for (i = 0; i < wait_time; i++) {
+				int outstanding =
+					atomic_read(&instance->fw_outstanding);
+				if (!outstanding)
+					return SUCCESS;
+				msleep(1000);
+			}
+		}
+	}
+
+	if (atomic_read(&instance->fw_outstanding) ||
+					(kill_adapter_flag == 2)) {
+		printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
+		/*
+		* Send signal to FW to stop processing any pending cmds.
+		* The controller will be taken offline by the OS now.
+		*/
+		if ((instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+			(instance->pdev->device ==
+			PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+			writel(MFI_STOP_ADP,
+				&instance->reg_set->doorbell);
+		} else {
+			writel(MFI_STOP_ADP,
+				&instance->reg_set->inbound_doorbell);
+		}
+		megasas_dump_pending_frames(instance);
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		return FAILED;
+	}
+
+	printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
+
+	return SUCCESS;
+}
+
+/**
+ * megasas_generic_reset -	Generic reset routine
+ * @scmd:			Mid-layer SCSI command
+ *
+ * This routine implements a generic reset handler for device, bus and host
+ * reset requests. Device, bus and host specific reset handlers can use this
+ * function after they do their specific tasks.
+ */
+static int megasas_generic_reset(struct scsi_cmnd *scmd)
+{
+	int ret_val;
+	struct megasas_instance *instance;
+
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n",
+		 scmd->serial_number, scmd->cmnd[0], scmd->retries);
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+		printk(KERN_ERR "megasas: cannot recover from previous reset "
+		       "failures\n");
+		return FAILED;
+	}
+
+	ret_val = megasas_wait_for_outstanding(instance);
+	if (ret_val == SUCCESS)
+		printk(KERN_NOTICE "megasas: reset successful \n");
+	else
+		printk(KERN_ERR "megasas: failed to do reset\n");
+
+	return ret_val;
+}
+
+/**
+ * megasas_reset_timer - quiesce the adapter if required
+ * @scmd:		scsi cmnd
+ *
+ * Sets the FW busy flag and reduces the host->can_queue if the
+ * cmd has not been completed within the timeout period.
+ */
+static enum
+blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+{
+	struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
+	struct megasas_instance *instance;
+	unsigned long flags;
+
+	if (time_after(jiffies, scmd->jiffies_at_alloc +
+				(MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+		return BLK_EH_NOT_HANDLED;
+	}
+
+	instance = cmd->instance;
+	if (!(instance->flag & MEGASAS_FW_BUSY)) {
+		/* FW is busy, throttle IO */
+		spin_lock_irqsave(instance->host->host_lock, flags);
+
+		instance->host->can_queue = 16;
+		instance->last_time = jiffies;
+		instance->flag |= MEGASAS_FW_BUSY;
+
+		spin_unlock_irqrestore(instance->host->host_lock, flags);
+	}
+	return BLK_EH_RESET_TIMER;
+}
+
+/**
+ * megasas_reset_device -	Device reset handler entry point
+ */
+static int megasas_reset_device(struct scsi_cmnd *scmd)
+{
+	int ret;
+
+	/*
+	 * First wait for all commands to complete
+	 */
+	ret = megasas_generic_reset(scmd);
+
+	return ret;
+}
+
+/**
+ * megasas_reset_bus_host -	Bus & host reset handler entry point
+ */
+static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
+{
+	int ret;
+	struct megasas_instance *instance;
+	instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+	/*
+	 * First wait for all commands to complete
+	 */
+	if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+		ret = megasas_reset_fusion(scmd->device->host);
+	else
+		ret = megasas_generic_reset(scmd);
+
+	return ret;
+}
+
+/**
+ * megasas_bios_param - Returns disk geometry for a disk
+ * @sdev: 		device handle
+ * @bdev:		block device
+ * @capacity:		drive capacity
+ * @geom:		geometry parameters
+ */
+static int
+megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+		 sector_t capacity, int geom[])
+{
+	int heads;
+	int sectors;
+	sector_t cylinders;
+	unsigned long tmp;
+	/* Default heads (64) & sectors (32) */
+	heads = 64;
+	sectors = 32;
+
+	tmp = heads * sectors;
+	cylinders = capacity;
+
+	sector_div(cylinders, tmp);
+
+	/*
+	 * Handle extended translation size for logical drives > 1Gb
+	 */
+
+	if (capacity >= 0x200000) {
+		heads = 255;
+		sectors = 63;
+		tmp = heads*sectors;
+		cylinders = capacity;
+		sector_div(cylinders, tmp);
+	}
+
+	geom[0] = heads;
+	geom[1] = sectors;
+	geom[2] = cylinders;
+
+	return 0;
+}
+
+static void megasas_aen_polling(struct work_struct *work);
+
+/**
+ * megasas_service_aen -	Processes an event notification
+ * @instance:			Adapter soft state
+ * @cmd:			AEN command completed by the ISR
+ *
+ * For AEN, driver sends a command down to FW that is held by the FW till an
+ * event occurs. When an event of interest occurs, FW completes the command
+ * that it was previously holding.
+ *
+ * This routines sends SIGIO signal to processes that have registered with the
+ * driver for AEN.
+ */
+static void
+megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	unsigned long flags;
+	/*
+	 * Don't signal app if it is just an aborted previously registered aen
+	 */
+	if ((!cmd->abort_aen) && (instance->unload == 0)) {
+		spin_lock_irqsave(&poll_aen_lock, flags);
+		megasas_poll_wait_aen = 1;
+		spin_unlock_irqrestore(&poll_aen_lock, flags);
+		wake_up(&megasas_poll_wait);
+		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
+	}
+	else
+		cmd->abort_aen = 0;
+
+	instance->aen_cmd = NULL;
+	megasas_return_cmd(instance, cmd);
+
+	if ((instance->unload == 0) &&
+		((instance->issuepend_done == 1))) {
+		struct megasas_aen_event *ev;
+		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+		if (!ev) {
+			printk(KERN_ERR "megasas_service_aen: out of memory\n");
+		} else {
+			ev->instance = instance;
+			instance->ev = ev;
+			INIT_WORK(&ev->hotplug_work, megasas_aen_polling);
+			schedule_delayed_work(
+				(struct delayed_work *)&ev->hotplug_work, 0);
+		}
+	}
+}
+
+/*
+ * Scsi host template for megaraid_sas driver
+ */
+static struct scsi_host_template megasas_template = {
+
+	.module = THIS_MODULE,
+	.name = "LSI SAS based MegaRAID driver",
+	.proc_name = "megaraid_sas",
+	.slave_configure = megasas_slave_configure,
+	.slave_alloc = megasas_slave_alloc,
+	.queuecommand = megasas_queue_command,
+	.eh_device_reset_handler = megasas_reset_device,
+	.eh_bus_reset_handler = megasas_reset_bus_host,
+	.eh_host_reset_handler = megasas_reset_bus_host,
+	.eh_timed_out = megasas_reset_timer,
+	.bios_param = megasas_bios_param,
+	.use_clustering = ENABLE_CLUSTERING,
+};
+
+/**
+ * megasas_complete_int_cmd -	Completes an internal command
+ * @instance:			Adapter soft state
+ * @cmd:			Command to be completed
+ *
+ * The megasas_issue_blocked_cmd() function waits for a command to complete
+ * after it issues a command. This function wakes up that waiting routine by
+ * calling wake_up() on the wait queue.
+ */
+static void
+megasas_complete_int_cmd(struct megasas_instance *instance,
+			 struct megasas_cmd *cmd)
+{
+	cmd->cmd_status = cmd->frame->io.cmd_status;
+
+	if (cmd->cmd_status == ENODATA) {
+		cmd->cmd_status = 0;
+	}
+	wake_up(&instance->int_cmd_wait_q);
+}
+
+/**
+ * megasas_complete_abort -	Completes aborting a command
+ * @instance:			Adapter soft state
+ * @cmd:			Cmd that was issued to abort another cmd
+ *
+ * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
+ * after it issues an abort on a previously issued command. This function
+ * wakes up all functions waiting on the same wait queue.
+ */
+static void
+megasas_complete_abort(struct megasas_instance *instance,
+		       struct megasas_cmd *cmd)
+{
+	if (cmd->sync_cmd) {
+		cmd->sync_cmd = 0;
+		cmd->cmd_status = 0;
+		wake_up(&instance->abort_cmd_wait_q);
+	}
+
+	return;
+}
+
+/**
+ * megasas_complete_cmd -	Completes a command
+ * @instance:			Adapter soft state
+ * @cmd:			Command to be completed
+ * @alt_status:			If non-zero, use this value as status to
+ * 				SCSI mid-layer instead of the value returned
+ * 				by the FW. This should be used if caller wants
+ * 				an alternate status (as in the case of aborted
+ * 				commands)
+ */
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+		     u8 alt_status)
+{
+	int exception = 0;
+	struct megasas_header *hdr = &cmd->frame->hdr;
+	unsigned long flags;
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	/* flag for the retry reset */
+	cmd->retry_for_fw_reset = 0;
+
+	if (cmd->scmd)
+		cmd->scmd->SCp.ptr = NULL;
+
+	switch (hdr->cmd) {
+
+	case MFI_CMD_PD_SCSI_IO:
+	case MFI_CMD_LD_SCSI_IO:
+
+		/*
+		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+		 * issued either through an IO path or an IOCTL path. If it
+		 * was via IOCTL, we will send it to internal completion.
+		 */
+		if (cmd->sync_cmd) {
+			cmd->sync_cmd = 0;
+			megasas_complete_int_cmd(instance, cmd);
+			break;
+		}
+
+	case MFI_CMD_LD_READ:
+	case MFI_CMD_LD_WRITE:
+
+		if (alt_status) {
+			cmd->scmd->result = alt_status << 16;
+			exception = 1;
+		}
+
+		if (exception) {
+
+			atomic_dec(&instance->fw_outstanding);
+
+			scsi_dma_unmap(cmd->scmd);
+			cmd->scmd->scsi_done(cmd->scmd);
+			megasas_return_cmd(instance, cmd);
+
+			break;
+		}
+
+		switch (hdr->cmd_status) {
+
+		case MFI_STAT_OK:
+			cmd->scmd->result = DID_OK << 16;
+			break;
+
+		case MFI_STAT_SCSI_IO_FAILED:
+		case MFI_STAT_LD_INIT_IN_PROGRESS:
+			cmd->scmd->result =
+			    (DID_ERROR << 16) | hdr->scsi_status;
+			break;
+
+		case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
+
+			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
+				memset(cmd->scmd->sense_buffer, 0,
+				       SCSI_SENSE_BUFFERSIZE);
+				memcpy(cmd->scmd->sense_buffer, cmd->sense,
+				       hdr->sense_len);
+
+				cmd->scmd->result |= DRIVER_SENSE << 24;
+			}
+
+			break;
+
+		case MFI_STAT_LD_OFFLINE:
+		case MFI_STAT_DEVICE_NOT_FOUND:
+			cmd->scmd->result = DID_BAD_TARGET << 16;
+			break;
+
+		default:
+			printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
+			       hdr->cmd_status);
+			cmd->scmd->result = DID_ERROR << 16;
+			break;
+		}
+
+		atomic_dec(&instance->fw_outstanding);
+
+		scsi_dma_unmap(cmd->scmd);
+		cmd->scmd->scsi_done(cmd->scmd);
+		megasas_return_cmd(instance, cmd);
+
+		break;
+
+	case MFI_CMD_SMP:
+	case MFI_CMD_STP:
+	case MFI_CMD_DCMD:
+		/* Check for LD map update */
+		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
+		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
+			spin_lock_irqsave(instance->host->host_lock, flags);
+			if (cmd->frame->hdr.cmd_status != 0) {
+				if (cmd->frame->hdr.cmd_status !=
+				    MFI_STAT_NOT_FOUND)
+					printk(KERN_WARNING "megasas: map sync"
+					       "failed, status = 0x%x.\n",
+					       cmd->frame->hdr.cmd_status);
+				else {
+					megasas_return_cmd(instance, cmd);
+					spin_unlock_irqrestore(
+						instance->host->host_lock,
+						flags);
+					break;
+				}
+			} else
+				instance->map_id++;
+			megasas_return_cmd(instance, cmd);
+			if (MR_ValidateMapInfo(
+				    fusion->ld_map[(instance->map_id & 1)],
+				    fusion->load_balance_info))
+				fusion->fast_path_io = 1;
+			else
+				fusion->fast_path_io = 0;
+			megasas_sync_map_info(instance);
+			spin_unlock_irqrestore(instance->host->host_lock,
+					       flags);
+			break;
+		}
+		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+			cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
+			spin_lock_irqsave(&poll_aen_lock, flags);
+			megasas_poll_wait_aen = 0;
+			spin_unlock_irqrestore(&poll_aen_lock, flags);
+		}
+
+		/*
+		 * See if got an event notification
+		 */
+		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
+			megasas_service_aen(instance, cmd);
+		else
+			megasas_complete_int_cmd(instance, cmd);
+
+		break;
+
+	case MFI_CMD_ABORT:
+		/*
+		 * Cmd issued to abort another cmd returned
+		 */
+		megasas_complete_abort(instance, cmd);
+		break;
+
+	default:
+		printk("megasas: Unknown command completed! [0x%X]\n",
+		       hdr->cmd);
+		break;
+	}
+}
+
+/**
+ * megasas_issue_pending_cmds_again -	issue all pending cmds
+ *                              	in FW again because of the fw reset
+ * @instance:				Adapter soft state
+ */
+static inline void
+megasas_issue_pending_cmds_again(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	struct list_head clist_local;
+	union megasas_evt_class_locale class_locale;
+	unsigned long flags;
+	u32 seq_num;
+
+	INIT_LIST_HEAD(&clist_local);
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	while (!list_empty(&clist_local)) {
+		cmd	= list_entry((&clist_local)->next,
+					struct megasas_cmd, list);
+		list_del_init(&cmd->list);
+
+		if (cmd->sync_cmd || cmd->scmd) {
+			printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
+				"detected to be pending while HBA reset.\n",
+					cmd, cmd->scmd, cmd->sync_cmd);
+
+			cmd->retry_for_fw_reset++;
+
+			if (cmd->retry_for_fw_reset == 3) {
+				printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
+					"was tried multiple times during reset."
+					"Shutting down the HBA\n",
+					cmd, cmd->scmd, cmd->sync_cmd);
+				megaraid_sas_kill_hba(instance);
+
+				instance->adprecovery =
+						MEGASAS_HW_CRITICAL_ERROR;
+				return;
+			}
+		}
+
+		if (cmd->sync_cmd == 1) {
+			if (cmd->scmd) {
+				printk(KERN_NOTICE "megaraid_sas: unexpected"
+					"cmd attached to internal command!\n");
+			}
+			printk(KERN_NOTICE "megasas: %p synchronous cmd"
+						"on the internal reset queue,"
+						"issue it again.\n", cmd);
+			cmd->cmd_status = ENODATA;
+			instance->instancet->fire_cmd(instance,
+							cmd->frame_phys_addr ,
+							0, instance->reg_set);
+		} else if (cmd->scmd) {
+			printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx"
+			"detected on the internal queue, issue again.\n",
+			cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number);
+
+			atomic_inc(&instance->fw_outstanding);
+			instance->instancet->fire_cmd(instance,
+					cmd->frame_phys_addr,
+					cmd->frame_count-1, instance->reg_set);
+		} else {
+			printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
+				"internal reset defer list while re-issue!!\n",
+				cmd);
+		}
+	}
+
+	if (instance->aen_cmd) {
+		printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
+		megasas_return_cmd(instance, instance->aen_cmd);
+
+		instance->aen_cmd	= NULL;
+	}
+
+	/*
+	* Initiate AEN (Asynchronous Event Notification)
+	*/
+	seq_num = instance->last_seq_num;
+	class_locale.members.reserved = 0;
+	class_locale.members.locale = MR_EVT_LOCALE_ALL;
+	class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+	megasas_register_aen(instance, seq_num, class_locale.word);
+}
+
+/**
+ * Move the internal reset pending commands to a deferred queue.
+ *
+ * We move the commands pending at internal reset time to a
+ * pending queue. This queue would be flushed after successful
+ * completion of the internal reset sequence. if the internal reset
+ * did not complete in time, the kernel reset handler would flush
+ * these commands.
+ **/
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	int i;
+	u32 max_cmd = instance->max_fw_cmds;
+	u32 defer_index;
+	unsigned long flags;
+
+	defer_index     = 0;
+	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+	for (i = 0; i < max_cmd; i++) {
+		cmd = instance->cmd_list[i];
+		if (cmd->sync_cmd == 1 || cmd->scmd) {
+			printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
+					"on the defer queue as internal\n",
+				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
+
+			if (!list_empty(&cmd->list)) {
+				printk(KERN_NOTICE "megaraid_sas: ERROR while"
+					" moving this cmd:%p, %d %p, it was"
+					"discovered on some list?\n",
+					cmd, cmd->sync_cmd, cmd->scmd);
+
+				list_del_init(&cmd->list);
+			}
+			defer_index++;
+			list_add_tail(&cmd->list,
+				&instance->internal_reset_pending_q);
+		}
+	}
+	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+}
+
+
+static void
+process_fw_state_change_wq(struct work_struct *work)
+{
+	struct megasas_instance *instance =
+		container_of(work, struct megasas_instance, work_init);
+	u32 wait;
+	unsigned long flags;
+
+	if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
+		printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
+				instance->adprecovery);
+		return ;
+	}
+
+	if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+		printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
+					"state, restarting it...\n");
+
+		instance->instancet->disable_intr(instance->reg_set);
+		atomic_set(&instance->fw_outstanding, 0);
+
+		atomic_set(&instance->fw_reset_no_pci_access, 1);
+		instance->instancet->adp_reset(instance, instance->reg_set);
+		atomic_set(&instance->fw_reset_no_pci_access, 0 );
+
+		printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
+					"initiating next stage...\n");
+
+		printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
+					"state 2 starting...\n");
+
+		/*waitting for about 20 second before start the second init*/
+		for (wait = 0; wait < 30; wait++) {
+			msleep(1000);
+		}
+
+		if (megasas_transition_to_ready(instance)) {
+			printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+
+			megaraid_sas_kill_hba(instance);
+			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
+			return ;
+		}
+
+		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
+			) {
+			*instance->consumer = *instance->producer;
+		} else {
+			*instance->consumer = 0;
+			*instance->producer = 0;
+		}
+
+		megasas_issue_init_mfi(instance);
+
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		instance->adprecovery	= MEGASAS_HBA_OPERATIONAL;
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		instance->instancet->enable_intr(instance->reg_set);
+
+		megasas_issue_pending_cmds_again(instance);
+		instance->issuepend_done = 1;
+	}
+	return ;
+}
+
+/**
+ * megasas_deplete_reply_queue -	Processes all completed commands
+ * @instance:				Adapter soft state
+ * @alt_status:				Alternate status to be returned to
+ * 					SCSI mid-layer instead of the status
+ * 					returned by the FW
+ * Note: this must be called with hba lock held
+ */
+static int
+megasas_deplete_reply_queue(struct megasas_instance *instance,
+					u8 alt_status)
+{
+	u32 mfiStatus;
+	u32 fw_state;
+
+	if ((mfiStatus = instance->instancet->check_reset(instance,
+					instance->reg_set)) == 1) {
+		return IRQ_HANDLED;
+	}
+
+	if ((mfiStatus = instance->instancet->clear_intr(
+						instance->reg_set)
+						) == 0) {
+		return IRQ_NONE;
+	}
+
+	instance->mfiStatus = mfiStatus;
+
+	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
+		fw_state = instance->instancet->read_fw_status_reg(
+				instance->reg_set) & MFI_STATE_MASK;
+
+		if (fw_state != MFI_STATE_FAULT) {
+			printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
+						fw_state);
+		}
+
+		if ((fw_state == MFI_STATE_FAULT) &&
+				(instance->disableOnlineCtrlReset == 0)) {
+			printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
+
+			if ((instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_SAS1064R) ||
+				(instance->pdev->device ==
+					PCI_DEVICE_ID_DELL_PERC5) ||
+				(instance->pdev->device ==
+					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+
+				*instance->consumer =
+					MEGASAS_ADPRESET_INPROG_SIGN;
+			}
+
+
+			instance->instancet->disable_intr(instance->reg_set);
+			instance->adprecovery	= MEGASAS_ADPRESET_SM_INFAULT;
+			instance->issuepend_done = 0;
+
+			atomic_set(&instance->fw_outstanding, 0);
+			megasas_internal_reset_defer_cmds(instance);
+
+			printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
+					fw_state, instance->adprecovery);
+
+			schedule_work(&instance->work_init);
+			return IRQ_HANDLED;
+
+		} else {
+			printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
+				fw_state, instance->disableOnlineCtrlReset);
+		}
+	}
+
+	tasklet_schedule(&instance->isr_tasklet);
+	return IRQ_HANDLED;
+}
+/**
+ * megasas_isr - isr entry point
+ */
+static irqreturn_t megasas_isr(int irq, void *devp)
+{
+	struct megasas_instance *instance;
+	unsigned long flags;
+	irqreturn_t	rc;
+
+	if (atomic_read(
+		&(((struct megasas_instance *)devp)->fw_reset_no_pci_access)))
+		return IRQ_HANDLED;
+
+	instance = (struct megasas_instance *)devp;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	rc =  megasas_deplete_reply_queue(instance, DID_OK);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	return rc;
+}
+
+/**
+ * megasas_transition_to_ready -	Move the FW to READY state
+ * @instance:				Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+int
+megasas_transition_to_ready(struct megasas_instance* instance)
+{
+	int i;
+	u8 max_wait;
+	u32 fw_state;
+	u32 cur_state;
+	u32 abs_state, curr_abs_state;
+
+	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+
+	if (fw_state != MFI_STATE_READY)
+		printk(KERN_INFO "megasas: Waiting for FW to come to ready"
+		       " state\n");
+
+	while (fw_state != MFI_STATE_READY) {
+
+		abs_state =
+		instance->instancet->read_fw_status_reg(instance->reg_set);
+
+		switch (fw_state) {
+
+		case MFI_STATE_FAULT:
+
+			printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
+			return -ENODEV;
+
+		case MFI_STATE_WAIT_HANDSHAKE:
+			/*
+			 * Set the CLR bit in inbound doorbell
+			 */
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+				(instance->pdev->device ==
+				 PCI_DEVICE_ID_LSI_FUSION)) {
+				writel(
+				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+				  &instance->reg_set->doorbell);
+			} else {
+				writel(
+				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+					&instance->reg_set->inbound_doorbell);
+			}
+
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_WAIT_HANDSHAKE;
+			break;
+
+		case MFI_STATE_BOOT_MESSAGE_PENDING:
+			if ((instance->pdev->device ==
+			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+			    (instance->pdev->device ==
+			     PCI_DEVICE_ID_LSI_FUSION)) {
+				writel(MFI_INIT_HOTPLUG,
+				       &instance->reg_set->doorbell);
+			} else
+				writel(MFI_INIT_HOTPLUG,
+					&instance->reg_set->inbound_doorbell);
+
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+			break;
+
+		case MFI_STATE_OPERATIONAL:
+			/*
+			 * Bring it to READY state; assuming max wait 10 secs
+			 */
+			instance->instancet->disable_intr(instance->reg_set);
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
+				(instance->pdev->device
+					== PCI_DEVICE_ID_LSI_FUSION)) {
+				writel(MFI_RESET_FLAGS,
+					&instance->reg_set->doorbell);
+				if (instance->pdev->device ==
+				    PCI_DEVICE_ID_LSI_FUSION) {
+					for (i = 0; i < (10 * 1000); i += 20) {
+						if (readl(
+							    &instance->
+							    reg_set->
+							    doorbell) & 1)
+							msleep(20);
+						else
+							break;
+					}
+				}
+			} else
+				writel(MFI_RESET_FLAGS,
+					&instance->reg_set->inbound_doorbell);
+
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_OPERATIONAL;
+			break;
+
+		case MFI_STATE_UNDEFINED:
+			/*
+			 * This state should not last for more than 2 seconds
+			 */
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_UNDEFINED;
+			break;
+
+		case MFI_STATE_BB_INIT:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_BB_INIT;
+			break;
+
+		case MFI_STATE_FW_INIT:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_FW_INIT;
+			break;
+
+		case MFI_STATE_FW_INIT_2:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_FW_INIT_2;
+			break;
+
+		case MFI_STATE_DEVICE_SCAN:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_DEVICE_SCAN;
+			break;
+
+		case MFI_STATE_FLUSH_CACHE:
+			max_wait = MEGASAS_RESET_WAIT_TIME;
+			cur_state = MFI_STATE_FLUSH_CACHE;
+			break;
+
+		default:
+			printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
+			       fw_state);
+			return -ENODEV;
+		}
+
+		/*
+		 * The cur_state should not last for more than max_wait secs
+		 */
+		for (i = 0; i < (max_wait * 1000); i++) {
+			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &
+					MFI_STATE_MASK ;
+		curr_abs_state =
+		instance->instancet->read_fw_status_reg(instance->reg_set);
+
+			if (abs_state == curr_abs_state) {
+				msleep(1);
+			} else
+				break;
+		}
+
+		/*
+		 * Return error if fw_state hasn't changed after max_wait
+		 */
+		if (curr_abs_state == abs_state) {
+			printk(KERN_DEBUG "FW state [%d] hasn't changed "
+			       "in %d secs\n", fw_state, max_wait);
+			return -ENODEV;
+		}
+	}
+	printk(KERN_INFO "megasas: FW now in Ready state\n");
+
+	return 0;
+}
+
+/**
+ * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
+ * @instance:				Adapter soft state
+ */
+static void megasas_teardown_frame_pool(struct megasas_instance *instance)
+{
+	int i;
+	u32 max_cmd = instance->max_mfi_cmds;
+	struct megasas_cmd *cmd;
+
+	if (!instance->frame_dma_pool)
+		return;
+
+	/*
+	 * Return all frames to pool
+	 */
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = instance->cmd_list[i];
+
+		if (cmd->frame)
+			pci_pool_free(instance->frame_dma_pool, cmd->frame,
+				      cmd->frame_phys_addr);
+
+		if (cmd->sense)
+			pci_pool_free(instance->sense_dma_pool, cmd->sense,
+				      cmd->sense_phys_addr);
+	}
+
+	/*
+	 * Now destroy the pool itself
+	 */
+	pci_pool_destroy(instance->frame_dma_pool);
+	pci_pool_destroy(instance->sense_dma_pool);
+
+	instance->frame_dma_pool = NULL;
+	instance->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_create_frame_pool -	Creates DMA pool for cmd frames
+ * @instance:			Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility.
+ */
+static int megasas_create_frame_pool(struct megasas_instance *instance)
+{
+	int i;
+	u32 max_cmd;
+	u32 sge_sz;
+	u32 sgl_sz;
+	u32 total_sz;
+	u32 frame_count;
+	struct megasas_cmd *cmd;
+
+	max_cmd = instance->max_mfi_cmds;
+
+	/*
+	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
+	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
+	 */
+	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+	    sizeof(struct megasas_sge32);
+
+	if (instance->flag_ieee) {
+		sge_sz = sizeof(struct megasas_sge_skinny);
+	}
+
+	/*
+	 * Calculated the number of 64byte frames required for SGL
+	 */
+	sgl_sz = sge_sz * instance->max_num_sge;
+	frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
+	frame_count = 15;
+
+	/*
+	 * We need one extra frame for the MFI command
+	 */
+	frame_count++;
+
+	total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+	/*
+	 * Use DMA pool facility provided by PCI layer
+	 */
+	instance->frame_dma_pool = pci_pool_create("megasas frame pool",
+						   instance->pdev, total_sz, 64,
+						   0);
+
+	if (!instance->frame_dma_pool) {
+		printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+		return -ENOMEM;
+	}
+
+	instance->sense_dma_pool = pci_pool_create("megasas sense pool",
+						   instance->pdev, 128, 4, 0);
+
+	if (!instance->sense_dma_pool) {
+		printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
+
+		pci_pool_destroy(instance->frame_dma_pool);
+		instance->frame_dma_pool = NULL;
+
+		return -ENOMEM;
+	}
+
+	/*
+	 * Allocate and attach a frame to each of the commands in cmd_list.
+	 * By making cmd->index as the context instead of the &cmd, we can
+	 * always use 32bit context regardless of the architecture
+	 */
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = instance->cmd_list[i];
+
+		cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
+					    GFP_KERNEL, &cmd->frame_phys_addr);
+
+		cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
+					    GFP_KERNEL, &cmd->sense_phys_addr);
+
+		/*
+		 * megasas_teardown_frame_pool() takes care of freeing
+		 * whatever has been allocated
+		 */
+		if (!cmd->frame || !cmd->sense) {
+			printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
+			megasas_teardown_frame_pool(instance);
+			return -ENOMEM;
+		}
+
+		memset(cmd->frame, 0, total_sz);
+		cmd->frame->io.context = cmd->index;
+		cmd->frame->io.pad_0 = 0;
+	}
+
+	return 0;
+}
+
+/**
+ * megasas_free_cmds -	Free all the cmds in the free cmd pool
+ * @instance:		Adapter soft state
+ */
+void megasas_free_cmds(struct megasas_instance *instance)
+{
+	int i;
+	/* First free the MFI frame pool */
+	megasas_teardown_frame_pool(instance);
+
+	/* Free all the commands in the cmd_list */
+	for (i = 0; i < instance->max_mfi_cmds; i++)
+
+		kfree(instance->cmd_list[i]);
+
+	/* Free the cmd_list buffer itself */
+	kfree(instance->cmd_list);
+	instance->cmd_list = NULL;
+
+	INIT_LIST_HEAD(&instance->cmd_pool);
+}
+
+/**
+ * megasas_alloc_cmds -	Allocates the command packets
+ * @instance:		Adapter soft state
+ *
+ * Each command that is issued to the FW, whether IO commands from the OS or
+ * internal commands like IOCTLs, are wrapped in local data structure called
+ * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
+ * the FW.
+ *
+ * Each frame has a 32-bit field called context (tag). This context is used
+ * to get back the megasas_cmd from the frame when a frame gets completed in
+ * the ISR. Typically the address of the megasas_cmd itself would be used as
+ * the context. But we wanted to keep the differences between 32 and 64 bit
+ * systems to the mininum. We always use 32 bit integers for the context. In
+ * this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd given the context. The
+ * free commands themselves are maintained in a linked list called cmd_pool.
+ */
+int megasas_alloc_cmds(struct megasas_instance *instance)
+{
+	int i;
+	int j;
+	u32 max_cmd;
+	struct megasas_cmd *cmd;
+
+	max_cmd = instance->max_mfi_cmds;
+
+	/*
+	 * instance->cmd_list is an array of struct megasas_cmd pointers.
+	 * Allocate the dynamic array first and then allocate individual
+	 * commands.
+	 */
+	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
+
+	if (!instance->cmd_list) {
+		printk(KERN_DEBUG "megasas: out of memory\n");
+		return -ENOMEM;
+	}
+
+	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
+
+	for (i = 0; i < max_cmd; i++) {
+		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
+						GFP_KERNEL);
+
+		if (!instance->cmd_list[i]) {
+
+			for (j = 0; j < i; j++)
+				kfree(instance->cmd_list[j]);
+
+			kfree(instance->cmd_list);
+			instance->cmd_list = NULL;
+
+			return -ENOMEM;
+		}
+	}
+
+	/*
+	 * Add all the commands to command pool (instance->cmd_pool)
+	 */
+	for (i = 0; i < max_cmd; i++) {
+		cmd = instance->cmd_list[i];
+		memset(cmd, 0, sizeof(struct megasas_cmd));
+		cmd->index = i;
+		cmd->scmd = NULL;
+		cmd->instance = instance;
+
+		list_add_tail(&cmd->list, &instance->cmd_pool);
+	}
+
+	/*
+	 * Create a frame pool and assign one frame to each cmd
+	 */
+	if (megasas_create_frame_pool(instance)) {
+		printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+		megasas_free_cmds(instance);
+	}
+
+	return 0;
+}
+
+/*
+ * megasas_get_pd_list_info -	Returns FW's pd_list structure
+ * @instance:				Adapter soft state
+ * @pd_list:				pd_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_pd_list(struct megasas_instance *instance)
+{
+	int ret = 0, pd_index = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_PD_LIST *ci;
+	struct MR_PD_ADDRESS *pd_addr;
+	dma_addr_t ci_h = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+		  MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
+
+	if (!ci) {
+		printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+	dcmd->mbox.b[1] = 0;
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
+	dcmd->sgl.sge32[0].phys_addr = ci_h;
+	dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
+
+	if (!megasas_issue_polled(instance, cmd)) {
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	/*
+	* the following function will get the instance PD LIST.
+	*/
+
+	pd_addr = ci->addr;
+
+	if ( ret == 0 &&
+		(ci->count <
+		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+
+		memset(instance->pd_list, 0,
+			MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+
+		for (pd_index = 0; pd_index < ci->count; pd_index++) {
+
+			instance->pd_list[pd_addr->deviceId].tid	=
+							pd_addr->deviceId;
+			instance->pd_list[pd_addr->deviceId].driveType	=
+							pd_addr->scsiDevType;
+			instance->pd_list[pd_addr->deviceId].driveState	=
+							MR_PD_STATE_SYSTEM;
+			pd_addr++;
+		}
+	}
+
+	pci_free_consistent(instance->pdev,
+				MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+				ci, ci_h);
+	megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+/*
+ * megasas_get_ld_list_info -	Returns FW's ld_list structure
+ * @instance:				Adapter soft state
+ * @ld_list:				ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_list(struct megasas_instance *instance)
+{
+	int ret = 0, ld_index = 0, ids = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_LIST *ci;
+	dma_addr_t ci_h = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+				sizeof(struct MR_LD_LIST),
+				&ci_h);
+
+	if (!ci) {
+		printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
+	dcmd->opcode = MR_DCMD_LD_GET_LIST;
+	dcmd->sgl.sge32[0].phys_addr = ci_h;
+	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+	dcmd->pad_0  = 0;
+
+	if (!megasas_issue_polled(instance, cmd)) {
+		ret = 0;
+	} else {
+		ret = -1;
+	}
+
+	/* the following function will get the instance PD LIST */
+
+	if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) {
+		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+		for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
+			if (ci->ldList[ld_index].state != 0) {
+				ids = ci->ldList[ld_index].ref.targetId;
+				instance->ld_ids[ids] =
+					ci->ldList[ld_index].ref.targetId;
+			}
+		}
+	}
+
+	pci_free_consistent(instance->pdev,
+				sizeof(struct MR_LD_LIST),
+				ci,
+				ci_h);
+
+	megasas_return_cmd(instance, cmd);
+	return ret;
+}
+
+/**
+ * megasas_get_controller_info -	Returns FW's controller structure
+ * @instance:				Adapter soft state
+ * @ctrl_info:				Controller information structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+static int
+megasas_get_ctrl_info(struct megasas_instance *instance,
+		      struct megasas_ctrl_info *ctrl_info)
+{
+	int ret = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct megasas_ctrl_info *ci;
+	dma_addr_t ci_h = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	ci = pci_alloc_consistent(instance->pdev,
+				  sizeof(struct megasas_ctrl_info), &ci_h);
+
+	if (!ci) {
+		printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
+	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
+	dcmd->sgl.sge32[0].phys_addr = ci_h;
+	dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
+
+	if (!megasas_issue_polled(instance, cmd)) {
+		ret = 0;
+		memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+	} else {
+		ret = -1;
+	}
+
+	pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
+			    ci, ci_h);
+
+	megasas_return_cmd(instance, cmd);
+	return ret;
+}
+
+/**
+ * megasas_issue_init_mfi -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * Issues the INIT MFI cmd
+ */
+static int
+megasas_issue_init_mfi(struct megasas_instance *instance)
+{
+	u32 context;
+
+	struct megasas_cmd *cmd;
+
+	struct megasas_init_frame *init_frame;
+	struct megasas_init_queue_info *initq_info;
+	dma_addr_t init_frame_h;
+	dma_addr_t initq_info_h;
+
+	/*
+	 * Prepare a init frame. Note the init frame points to queue info
+	 * structure. Each frame has SGL allocated after first 64 bytes. For
+	 * this frame - since we don't need any SGL - we use SGL's space as
+	 * queue info structure
+	 *
+	 * We will not get a NULL command below. We just created the pool.
+	 */
+	cmd = megasas_get_cmd(instance);
+
+	init_frame = (struct megasas_init_frame *)cmd->frame;
+	initq_info = (struct megasas_init_queue_info *)
+		((unsigned long)init_frame + 64);
+
+	init_frame_h = cmd->frame_phys_addr;
+	initq_info_h = init_frame_h + 64;
+
+	context = init_frame->context;
+	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+	init_frame->context = context;
+
+	initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
+	initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
+
+	initq_info->producer_index_phys_addr_lo = instance->producer_h;
+	initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
+
+	init_frame->cmd = MFI_CMD_INIT;
+	init_frame->cmd_status = 0xFF;
+	init_frame->queue_info_new_phys_addr_lo = initq_info_h;
+
+	init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
+
+	/*
+	 * disable the intr before firing the init frame to FW
+	 */
+	instance->instancet->disable_intr(instance->reg_set);
+
+	/*
+	 * Issue the init frame in polled mode
+	 */
+
+	if (megasas_issue_polled(instance, cmd)) {
+		printk(KERN_ERR "megasas: Failed to init firmware\n");
+		megasas_return_cmd(instance, cmd);
+		goto fail_fw_init;
+	}
+
+	megasas_return_cmd(instance, cmd);
+
+	return 0;
+
+fail_fw_init:
+	return -EINVAL;
+}
+
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance:		Adapter soft state
+ * @timer:		timer object to be initialized
+ * @fn:			timer function
+ * @interval:		time interval between timer function call
+ */
+static inline void
+megasas_start_timer(struct megasas_instance *instance,
+			struct timer_list *timer,
+			void *fn, unsigned long interval)
+{
+	init_timer(timer);
+	timer->expires = jiffies + interval;
+	timer->data = (unsigned long)instance;
+	timer->function = fn;
+	add_timer(timer);
+}
+
+/**
+ * megasas_io_completion_timer - Timer fn
+ * @instance_addr:	Address of adapter soft state
+ *
+ * Schedules tasklet for cmd completion
+ * if poll_mode_io is set
+ */
+static void
+megasas_io_completion_timer(unsigned long instance_addr)
+{
+	struct megasas_instance *instance =
+			(struct megasas_instance *)instance_addr;
+
+	if (atomic_read(&instance->fw_outstanding))
+		tasklet_schedule(&instance->isr_tasklet);
+
+	/* Restart timer */
+	if (poll_mode_io)
+		mod_timer(&instance->io_completion_timer,
+			jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
+}
+
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *reg_set;
+	u32 context_sz;
+	u32 reply_q_sz;
+
+	reg_set = instance->reg_set;
+
+	/*
+	 * Get various operational parameters from status register
+	 */
+	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+	/*
+	 * Reduce the max supported cmds by 1. This is to ensure that the
+	 * reply_q_sz (1 more than the max cmd that driver may send)
+	 * does not exceed max cmds that the FW can support
+	 */
+	instance->max_fw_cmds = instance->max_fw_cmds-1;
+	instance->max_mfi_cmds = instance->max_fw_cmds;
+	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+					0x10;
+	/*
+	 * Create a pool of commands
+	 */
+	if (megasas_alloc_cmds(instance))
+		goto fail_alloc_cmds;
+
+	/*
+	 * Allocate memory for reply queue. Length of reply queue should
+	 * be _one_ more than the maximum commands handled by the firmware.
+	 *
+	 * Note: When FW completes commands, it places corresponding contex
+	 * values in this circular reply queue. This circular queue is a fairly
+	 * typical producer-consumer queue. FW is the producer (of completed
+	 * commands) and the driver is the consumer.
+	 */
+	context_sz = sizeof(u32);
+	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
+
+	instance->reply_queue = pci_alloc_consistent(instance->pdev,
+						     reply_q_sz,
+						     &instance->reply_queue_h);
+
+	if (!instance->reply_queue) {
+		printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
+		goto fail_reply_queue;
+	}
+
+	if (megasas_issue_init_mfi(instance))
+		goto fail_fw_init;
+
+	instance->fw_support_ieee = 0;
+	instance->fw_support_ieee =
+		(instance->instancet->read_fw_status_reg(reg_set) &
+		0x04000000);
+
+	printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
+			instance->fw_support_ieee);
+
+	if (instance->fw_support_ieee)
+		instance->flag_ieee = 1;
+
+	return 0;
+
+fail_fw_init:
+
+	pci_free_consistent(instance->pdev, reply_q_sz,
+			    instance->reply_queue, instance->reply_queue_h);
+fail_reply_queue:
+	megasas_free_cmds(instance);
+
+fail_alloc_cmds:
+	iounmap(instance->reg_set);
+	return 1;
+}
+
+/**
+ * megasas_init_fw -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * This is the main function for initializing firmware
+ */
+
+static int megasas_init_fw(struct megasas_instance *instance)
+{
+	u32 max_sectors_1;
+	u32 max_sectors_2;
+	u32 tmp_sectors;
+	struct megasas_register_set __iomem *reg_set;
+	struct megasas_ctrl_info *ctrl_info;
+	unsigned long bar_list;
+
+	/* Find first memory bar */
+	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
+	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+	instance->base_addr = pci_resource_start(instance->pdev, instance->bar);
+	if (pci_request_selected_regions(instance->pdev, instance->bar,
+					 "megasas: LSI")) {
+		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+		return -EBUSY;
+	}
+
+	instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
+
+	if (!instance->reg_set) {
+		printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+		goto fail_ioremap;
+	}
+
+	reg_set = instance->reg_set;
+
+	switch (instance->pdev->device) {
+	case PCI_DEVICE_ID_LSI_FUSION:
+		instance->instancet = &megasas_instance_template_fusion;
+		break;
+	case PCI_DEVICE_ID_LSI_SAS1078R:
+	case PCI_DEVICE_ID_LSI_SAS1078DE:
+		instance->instancet = &megasas_instance_template_ppc;
+		break;
+	case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+	case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+		instance->instancet = &megasas_instance_template_gen2;
+		break;
+	case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+	case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+		instance->instancet = &megasas_instance_template_skinny;
+		break;
+	case PCI_DEVICE_ID_LSI_SAS1064R:
+	case PCI_DEVICE_ID_DELL_PERC5:
+	default:
+		instance->instancet = &megasas_instance_template_xscale;
+		break;
+	}
+
+	/*
+	 * We expect the FW state to be READY
+	 */
+	if (megasas_transition_to_ready(instance))
+		goto fail_ready_state;
+
+	/* Get operational params, sge flags, send init cmd to controller */
+	if (instance->instancet->init_adapter(instance))
+		return -ENODEV;
+
+	printk(KERN_ERR "megasas: INIT adapter done\n");
+
+	/** for passthrough
+	* the following function will get the PD LIST.
+	*/
+
+	memset(instance->pd_list, 0 ,
+		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+	megasas_get_pd_list(instance);
+
+	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+	megasas_get_ld_list(instance);
+
+	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
+
+	/*
+	 * Compute the max allowed sectors per IO: The controller info has two
+	 * limits on max sectors. Driver should use the minimum of these two.
+	 *
+	 * 1 << stripe_sz_ops.min = max sectors per strip
+	 *
+	 * Note that older firmwares ( < FW ver 30) didn't report information
+	 * to calculate max_sectors_1. So the number ended up as zero always.
+	 */
+	tmp_sectors = 0;
+	if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
+
+		max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+		    ctrl_info->max_strips_per_io;
+		max_sectors_2 = ctrl_info->max_request_size;
+
+		tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+		instance->disableOnlineCtrlReset =
+		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+	}
+
+	instance->max_sectors_per_req = instance->max_num_sge *
+						PAGE_SIZE / 512;
+	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+		instance->max_sectors_per_req = tmp_sectors;
+
+	kfree(ctrl_info);
+
+        /*
+	* Setup tasklet for cmd completion
+	*/
+
+	tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+		(unsigned long)instance);
+
+	/* Initialize the cmd completion timer */
+	if (poll_mode_io)
+		megasas_start_timer(instance, &instance->io_completion_timer,
+				megasas_io_completion_timer,
+				MEGASAS_COMPLETION_TIMER_INTERVAL);
+	return 0;
+
+fail_ready_state:
+	iounmap(instance->reg_set);
+
+      fail_ioremap:
+	pci_release_selected_regions(instance->pdev, instance->bar);
+
+	return -EINVAL;
+}
+
+/**
+ * megasas_release_mfi -	Reverses the FW initialization
+ * @intance:			Adapter soft state
+ */
+static void megasas_release_mfi(struct megasas_instance *instance)
+{
+	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
+
+	if (instance->reply_queue)
+		pci_free_consistent(instance->pdev, reply_q_sz,
+			    instance->reply_queue, instance->reply_queue_h);
+
+	megasas_free_cmds(instance);
+
+	iounmap(instance->reg_set);
+
+	pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_get_seq_num -	Gets latest event sequence numbers
+ * @instance:			Adapter soft state
+ * @eli:			FW event log sequence numbers information
+ *
+ * FW maintains a log of all events in a non-volatile area. Upper layers would
+ * usually find out the latest sequence number of the events, the seq number at
+ * the boot etc. They would "read" all the events below the latest seq number
+ * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
+ * number), they would subsribe to AEN (asynchronous event notification) and
+ * wait for the events to happen.
+ */
+static int
+megasas_get_seq_num(struct megasas_instance *instance,
+		    struct megasas_evt_log_info *eli)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct megasas_evt_log_info *el_info;
+	dma_addr_t el_info_h = 0;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+	el_info = pci_alloc_consistent(instance->pdev,
+				       sizeof(struct megasas_evt_log_info),
+				       &el_info_h);
+
+	if (!el_info) {
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(el_info, 0, sizeof(*el_info));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
+	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
+	dcmd->sgl.sge32[0].phys_addr = el_info_h;
+	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
+
+	megasas_issue_blocked_cmd(instance, cmd);
+
+	/*
+	 * Copy the data back into callers buffer
+	 */
+	memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
+
+	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
+			    el_info, el_info_h);
+
+	megasas_return_cmd(instance, cmd);
+
+	return 0;
+}
+
+/**
+ * megasas_register_aen -	Registers for asynchronous event notification
+ * @instance:			Adapter soft state
+ * @seq_num:			The starting sequence number
+ * @class_locale:		Class of the event
+ *
+ * This function subscribes for AEN for events beyond the @seq_num. It requests
+ * to be notified if and only if the event is of type @class_locale
+ */
+static int
+megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+		     u32 class_locale_word)
+{
+	int ret_val;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	union megasas_evt_class_locale curr_aen;
+	union megasas_evt_class_locale prev_aen;
+
+	/*
+	 * If there an AEN pending already (aen_cmd), check if the
+	 * class_locale of that pending AEN is inclusive of the new
+	 * AEN request we currently have. If it is, then we don't have
+	 * to do anything. In other words, whichever events the current
+	 * AEN request is subscribing to, have already been subscribed
+	 * to.
+	 *
+	 * If the old_cmd is _not_ inclusive, then we have to abort
+	 * that command, form a class_locale that is superset of both
+	 * old and current and re-issue to the FW
+	 */
+
+	curr_aen.word = class_locale_word;
+
+	if (instance->aen_cmd) {
+
+		prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+
+		/*
+		 * A class whose enum value is smaller is inclusive of all
+		 * higher values. If a PROGRESS (= -1) was previously
+		 * registered, then a new registration requests for higher
+		 * classes need not be sent to FW. They are automatically
+		 * included.
+		 *
+		 * Locale numbers don't have such hierarchy. They are bitmap
+		 * values
+		 */
+		if ((prev_aen.members.class <= curr_aen.members.class) &&
+		    !((prev_aen.members.locale & curr_aen.members.locale) ^
+		      curr_aen.members.locale)) {
+			/*
+			 * Previously issued event registration includes
+			 * current request. Nothing to do.
+			 */
+			return 0;
+		} else {
+			curr_aen.members.locale |= prev_aen.members.locale;
+
+			if (prev_aen.members.class < curr_aen.members.class)
+				curr_aen.members.class = prev_aen.members.class;
+
+			instance->aen_cmd->abort_aen = 1;
+			ret_val = megasas_issue_blocked_abort_cmd(instance,
+								  instance->
+								  aen_cmd);
+
+			if (ret_val) {
+				printk(KERN_DEBUG "megasas: Failed to abort "
+				       "previous AEN command\n");
+				return ret_val;
+			}
+		}
+	}
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return -ENOMEM;
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
+
+	/*
+	 * Prepare DCMD for aen registration
+	 */
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	instance->last_seq_num = seq_num;
+	dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
+	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
+	dcmd->mbox.w[0] = seq_num;
+	dcmd->mbox.w[1] = curr_aen.word;
+	dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
+	dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
+
+	if (instance->aen_cmd != NULL) {
+		megasas_return_cmd(instance, cmd);
+		return 0;
+	}
+
+	/*
+	 * Store reference to the cmd used to register for AEN. When an
+	 * application wants us to register for AEN, we have to abort this
+	 * cmd and re-register with a new EVENT LOCALE supplied by that app
+	 */
+	instance->aen_cmd = cmd;
+
+	/*
+	 * Issue the aen registration frame
+	 */
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	return 0;
+}
+
+/**
+ * megasas_start_aen -	Subscribes to AEN during driver load time
+ * @instance:		Adapter soft state
+ */
+static int megasas_start_aen(struct megasas_instance *instance)
+{
+	struct megasas_evt_log_info eli;
+	union megasas_evt_class_locale class_locale;
+
+	/*
+	 * Get the latest sequence number from FW
+	 */
+	memset(&eli, 0, sizeof(eli));
+
+	if (megasas_get_seq_num(instance, &eli))
+		return -1;
+
+	/*
+	 * Register AEN with FW for latest sequence number plus 1
+	 */
+	class_locale.members.reserved = 0;
+	class_locale.members.locale = MR_EVT_LOCALE_ALL;
+	class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+	return megasas_register_aen(instance, eli.newest_seq_num + 1,
+				    class_locale.word);
+}
+
+/**
+ * megasas_io_attach -	Attaches this driver to SCSI mid-layer
+ * @instance:		Adapter soft state
+ */
+static int megasas_io_attach(struct megasas_instance *instance)
+{
+	struct Scsi_Host *host = instance->host;
+
+	/*
+	 * Export parameters required by SCSI mid-layer
+	 */
+	host->irq = instance->pdev->irq;
+	host->unique_id = instance->unique_id;
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+		host->can_queue =
+			instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
+	} else
+		host->can_queue =
+			instance->max_fw_cmds - MEGASAS_INT_CMDS;
+	host->this_id = instance->init_id;
+	host->sg_tablesize = instance->max_num_sge;
+	/*
+	 * Check if the module parameter value for max_sectors can be used
+	 */
+	if (max_sectors && max_sectors < instance->max_sectors_per_req)
+		instance->max_sectors_per_req = max_sectors;
+	else {
+		if (max_sectors) {
+			if (((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+				(max_sectors <= MEGASAS_MAX_SECTORS)) {
+				instance->max_sectors_per_req = max_sectors;
+			} else {
+			printk(KERN_INFO "megasas: max_sectors should be > 0"
+				"and <= %d (or < 1MB for GEN2 controller)\n",
+				instance->max_sectors_per_req);
+			}
+		}
+	}
+
+	host->max_sectors = instance->max_sectors_per_req;
+	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
+	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
+	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
+	host->max_lun = MEGASAS_MAX_LUN;
+	host->max_cmd_len = 16;
+
+	/* Fusion only supports host reset */
+	if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) {
+		host->hostt->eh_device_reset_handler = NULL;
+		host->hostt->eh_bus_reset_handler = NULL;
+	}
+
+	/*
+	 * Notify the mid-layer about the new controller
+	 */
+	if (scsi_add_host(host, &instance->pdev->dev)) {
+		printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * Trigger SCSI to scan our drives
+	 */
+	scsi_scan_host(host);
+	return 0;
+}
+
+static int
+megasas_set_dma_mask(struct pci_dev *pdev)
+{
+	/*
+	 * All our contollers are capable of performing 64-bit DMA
+	 */
+	if (IS_DMA64) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+
+			if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+				goto fail_set_dma_mask;
+		}
+	} else {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+			goto fail_set_dma_mask;
+	}
+	return 0;
+
+fail_set_dma_mask:
+	return 1;
+}
+
+/**
+ * megasas_probe_one -	PCI hotplug entry point
+ * @pdev:		PCI device structure
+ * @id:			PCI ids of supported hotplugged adapter
+ */
+static int __devinit
+megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int rval;
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+
+	/*
+	 * Announce PCI information
+	 */
+	printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+	       pdev->vendor, pdev->device, pdev->subsystem_vendor,
+	       pdev->subsystem_device);
+
+	printk("bus %d:slot %d:func %d\n",
+	       pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+	/*
+	 * PCI prepping: enable device set bus mastering and dma mask
+	 */
+	rval = pci_enable_device_mem(pdev);
+
+	if (rval) {
+		return rval;
+	}
+
+	pci_set_master(pdev);
+
+	if (megasas_set_dma_mask(pdev))
+		goto fail_set_dma_mask;
+
+	host = scsi_host_alloc(&megasas_template,
+			       sizeof(struct megasas_instance));
+
+	if (!host) {
+		printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
+		goto fail_alloc_instance;
+	}
+
+	instance = (struct megasas_instance *)host->hostdata;
+	memset(instance, 0, sizeof(*instance));
+	atomic_set( &instance->fw_reset_no_pci_access, 0 );
+	instance->pdev = pdev;
+
+	switch (instance->pdev->device) {
+	case PCI_DEVICE_ID_LSI_FUSION:
+	{
+		struct fusion_context *fusion;
+
+		instance->ctrl_context =
+			kzalloc(sizeof(struct fusion_context), GFP_KERNEL);
+		if (!instance->ctrl_context) {
+			printk(KERN_DEBUG "megasas: Failed to allocate "
+			       "memory for Fusion context info\n");
+			goto fail_alloc_dma_buf;
+		}
+		fusion = instance->ctrl_context;
+		INIT_LIST_HEAD(&fusion->cmd_pool);
+		spin_lock_init(&fusion->cmd_pool_lock);
+	}
+	break;
+	default: /* For all other supported controllers */
+
+		instance->producer =
+			pci_alloc_consistent(pdev, sizeof(u32),
+					     &instance->producer_h);
+		instance->consumer =
+			pci_alloc_consistent(pdev, sizeof(u32),
+					     &instance->consumer_h);
+
+		if (!instance->producer || !instance->consumer) {
+			printk(KERN_DEBUG "megasas: Failed to allocate"
+			       "memory for producer, consumer\n");
+			goto fail_alloc_dma_buf;
+		}
+
+		*instance->producer = 0;
+		*instance->consumer = 0;
+		break;
+	}
+
+	megasas_poll_wait_aen = 0;
+	instance->flag_ieee = 0;
+	instance->ev = NULL;
+	instance->issuepend_done = 1;
+	instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+	megasas_poll_wait_aen = 0;
+
+	instance->evt_detail = pci_alloc_consistent(pdev,
+						    sizeof(struct
+							   megasas_evt_detail),
+						    &instance->evt_detail_h);
+
+	if (!instance->evt_detail) {
+		printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+		       "event detail structure\n");
+		goto fail_alloc_dma_buf;
+	}
+
+	/*
+	 * Initialize locks and queues
+	 */
+	INIT_LIST_HEAD(&instance->cmd_pool);
+	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
+
+	atomic_set(&instance->fw_outstanding,0);
+
+	init_waitqueue_head(&instance->int_cmd_wait_q);
+	init_waitqueue_head(&instance->abort_cmd_wait_q);
+
+	spin_lock_init(&instance->cmd_pool_lock);
+	spin_lock_init(&instance->hba_lock);
+	spin_lock_init(&instance->completion_lock);
+	spin_lock_init(&poll_aen_lock);
+
+	mutex_init(&instance->aen_mutex);
+	mutex_init(&instance->reset_mutex);
+
+	/*
+	 * Initialize PCI related and misc parameters
+	 */
+	instance->host = host;
+	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+		instance->flag_ieee = 1;
+		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
+	} else
+		sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
+
+	megasas_dbg_lvl = 0;
+	instance->flag = 0;
+	instance->unload = 1;
+	instance->last_time = 0;
+	instance->disableOnlineCtrlReset = 1;
+
+	if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
+	else
+		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+
+	/*
+	 * Initialize MFI Firmware
+	 */
+	if (megasas_init_fw(instance))
+		goto fail_init_mfi;
+
+	/* Try to enable MSI-X */
+	if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
+	    (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
+	    (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) &&
+	    !msix_disable && !pci_enable_msix(instance->pdev,
+					      &instance->msixentry, 1))
+		instance->msi_flag = 1;
+
+	/*
+	 * Register IRQ
+	 */
+	if (request_irq(instance->msi_flag ? instance->msixentry.vector :
+			pdev->irq, instance->instancet->service_isr,
+			IRQF_SHARED, "megasas", instance)) {
+		printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+		goto fail_irq;
+	}
+
+	instance->instancet->enable_intr(instance->reg_set);
+
+	/*
+	 * Store instance in PCI softstate
+	 */
+	pci_set_drvdata(pdev, instance);
+
+	/*
+	 * Add this controller to megasas_mgmt_info structure so that it
+	 * can be exported to management applications
+	 */
+	megasas_mgmt_info.count++;
+	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
+	megasas_mgmt_info.max_index++;
+
+	/*
+	 * Initiate AEN (Asynchronous Event Notification)
+	 */
+	if (megasas_start_aen(instance)) {
+		printk(KERN_DEBUG "megasas: start aen failed\n");
+		goto fail_start_aen;
+	}
+
+	/*
+	 * Register with SCSI mid-layer
+	 */
+	if (megasas_io_attach(instance))
+		goto fail_io_attach;
+
+	instance->unload = 0;
+	return 0;
+
+      fail_start_aen:
+      fail_io_attach:
+	megasas_mgmt_info.count--;
+	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+	megasas_mgmt_info.max_index--;
+
+	pci_set_drvdata(pdev, NULL);
+	instance->instancet->disable_intr(instance->reg_set);
+	free_irq(instance->msi_flag ? instance->msixentry.vector :
+		 instance->pdev->irq, instance);
+	if (instance->msi_flag)
+		pci_disable_msix(instance->pdev);
+
+      fail_irq:
+      fail_init_mfi:
+      fail_alloc_dma_buf:
+	if (instance->evt_detail)
+		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+				    instance->evt_detail,
+				    instance->evt_detail_h);
+
+	if (instance->producer) {
+		pci_free_consistent(pdev, sizeof(u32), instance->producer,
+				    instance->producer_h);
+		megasas_release_mfi(instance);
+	} else {
+		megasas_release_fusion(instance);
+	}
+	if (instance->consumer)
+		pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+				    instance->consumer_h);
+	scsi_host_put(host);
+
+      fail_alloc_instance:
+      fail_set_dma_mask:
+	pci_disable_device(pdev);
+
+	return -ENODEV;
+}
+
+/**
+ * megasas_flush_cache -	Requests FW to flush all its caches
+ * @instance:			Adapter soft state
+ */
+static void megasas_flush_cache(struct megasas_instance *instance)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+		return;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return;
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 0;
+	dcmd->flags = MFI_FRAME_DIR_NONE;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = 0;
+	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
+	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+	megasas_issue_blocked_cmd(instance, cmd);
+
+	megasas_return_cmd(instance, cmd);
+
+	return;
+}
+
+/**
+ * megasas_shutdown_controller -	Instructs FW to shutdown the controller
+ * @instance:				Adapter soft state
+ * @opcode:				Shutdown/Hibernate
+ */
+static void megasas_shutdown_controller(struct megasas_instance *instance,
+					u32 opcode)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+		return;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd)
+		return;
+
+	if (instance->aen_cmd)
+		megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
+	if (instance->map_update_cmd)
+		megasas_issue_blocked_abort_cmd(instance,
+						instance->map_update_cmd);
+	dcmd = &cmd->frame->dcmd;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0x0;
+	dcmd->sge_count = 0;
+	dcmd->flags = MFI_FRAME_DIR_NONE;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = 0;
+	dcmd->opcode = opcode;
+
+	megasas_issue_blocked_cmd(instance, cmd);
+
+	megasas_return_cmd(instance, cmd);
+
+	return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * megasas_suspend -	driver suspend entry point
+ * @pdev:		PCI device structure
+ * @state:		PCI power state to suspend routine
+ */
+static int
+megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+
+	instance = pci_get_drvdata(pdev);
+	host = instance->host;
+	instance->unload = 1;
+
+	if (poll_mode_io)
+		del_timer_sync(&instance->io_completion_timer);
+
+	megasas_flush_cache(instance);
+	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
+
+	/* cancel the delayed work if this work still in queue */
+	if (instance->ev != NULL) {
+		struct megasas_aen_event *ev = instance->ev;
+		cancel_delayed_work(
+			(struct delayed_work *)&ev->hotplug_work);
+		flush_scheduled_work();
+		instance->ev = NULL;
+	}
+
+	tasklet_kill(&instance->isr_tasklet);
+
+	pci_set_drvdata(instance->pdev, instance);
+	instance->instancet->disable_intr(instance->reg_set);
+	free_irq(instance->msi_flag ? instance->msixentry.vector :
+		 instance->pdev->irq, instance);
+	if (instance->msi_flag)
+		pci_disable_msix(instance->pdev);
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+/**
+ * megasas_resume-      driver resume entry point
+ * @pdev:               PCI device structure
+ */
+static int
+megasas_resume(struct pci_dev *pdev)
+{
+	int rval;
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+
+	instance = pci_get_drvdata(pdev);
+	host = instance->host;
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+
+	/*
+	 * PCI prepping: enable device set bus mastering and dma mask
+	 */
+	rval = pci_enable_device_mem(pdev);
+
+	if (rval) {
+		printk(KERN_ERR "megasas: Enable device failed\n");
+		return rval;
+	}
+
+	pci_set_master(pdev);
+
+	if (megasas_set_dma_mask(pdev))
+		goto fail_set_dma_mask;
+
+	/*
+	 * Initialize MFI Firmware
+	 */
+
+	atomic_set(&instance->fw_outstanding, 0);
+
+	/*
+	 * We expect the FW state to be READY
+	 */
+	if (megasas_transition_to_ready(instance))
+		goto fail_ready_state;
+
+	switch (instance->pdev->device) {
+	case PCI_DEVICE_ID_LSI_FUSION:
+	{
+		megasas_reset_reply_desc(instance);
+		if (megasas_ioc_init_fusion(instance)) {
+			megasas_free_cmds(instance);
+			megasas_free_cmds_fusion(instance);
+			goto fail_init_mfi;
+		}
+		if (!megasas_get_map_info(instance))
+			megasas_sync_map_info(instance);
+	}
+	break;
+	default:
+		*instance->producer = 0;
+		*instance->consumer = 0;
+		if (megasas_issue_init_mfi(instance))
+			goto fail_init_mfi;
+		break;
+	}
+
+	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+		     (unsigned long)instance);
+
+	/* Now re-enable MSI-X */
+	if (instance->msi_flag)
+		pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
+	/*
+	 * Register IRQ
+	 */
+	if (request_irq(instance->msi_flag ? instance->msixentry.vector :
+			pdev->irq, instance->instancet->service_isr,
+			IRQF_SHARED, "megasas", instance)) {
+		printk(KERN_ERR "megasas: Failed to register IRQ\n");
+		goto fail_irq;
+	}
+
+	instance->instancet->enable_intr(instance->reg_set);
+
+	/*
+	 * Initiate AEN (Asynchronous Event Notification)
+	 */
+	if (megasas_start_aen(instance))
+		printk(KERN_ERR "megasas: Start AEN failed\n");
+
+	/* Initialize the cmd completion timer */
+	if (poll_mode_io)
+		megasas_start_timer(instance, &instance->io_completion_timer,
+				megasas_io_completion_timer,
+				MEGASAS_COMPLETION_TIMER_INTERVAL);
+	instance->unload = 0;
+
+	return 0;
+
+fail_irq:
+fail_init_mfi:
+	if (instance->evt_detail)
+		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+				instance->evt_detail,
+				instance->evt_detail_h);
+
+	if (instance->producer)
+		pci_free_consistent(pdev, sizeof(u32), instance->producer,
+				instance->producer_h);
+	if (instance->consumer)
+		pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+				instance->consumer_h);
+	scsi_host_put(host);
+
+fail_set_dma_mask:
+fail_ready_state:
+
+	pci_disable_device(pdev);
+
+	return -ENODEV;
+}
+#else
+#define megasas_suspend	NULL
+#define megasas_resume	NULL
+#endif
+
+/**
+ * megasas_detach_one -	PCI hot"un"plug entry point
+ * @pdev:		PCI device structure
+ */
+static void __devexit megasas_detach_one(struct pci_dev *pdev)
+{
+	int i;
+	struct Scsi_Host *host;
+	struct megasas_instance *instance;
+	struct fusion_context *fusion;
+
+	instance = pci_get_drvdata(pdev);
+	instance->unload = 1;
+	host = instance->host;
+	fusion = instance->ctrl_context;
+
+	if (poll_mode_io)
+		del_timer_sync(&instance->io_completion_timer);
+
+	scsi_remove_host(instance->host);
+	megasas_flush_cache(instance);
+	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+	/* cancel the delayed work if this work still in queue*/
+	if (instance->ev != NULL) {
+		struct megasas_aen_event *ev = instance->ev;
+		cancel_delayed_work(
+			(struct delayed_work *)&ev->hotplug_work);
+		flush_scheduled_work();
+		instance->ev = NULL;
+	}
+
+	tasklet_kill(&instance->isr_tasklet);
+
+	/*
+	 * Take the instance off the instance array. Note that we will not
+	 * decrement the max_index. We let this array be sparse array
+	 */
+	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+		if (megasas_mgmt_info.instance[i] == instance) {
+			megasas_mgmt_info.count--;
+			megasas_mgmt_info.instance[i] = NULL;
+
+			break;
+		}
+	}
+
+	pci_set_drvdata(instance->pdev, NULL);
+
+	instance->instancet->disable_intr(instance->reg_set);
+
+	free_irq(instance->msi_flag ? instance->msixentry.vector :
+		 instance->pdev->irq, instance);
+	if (instance->msi_flag)
+		pci_disable_msix(instance->pdev);
+
+	switch (instance->pdev->device) {
+	case PCI_DEVICE_ID_LSI_FUSION:
+		megasas_release_fusion(instance);
+		for (i = 0; i < 2 ; i++)
+			if (fusion->ld_map[i])
+				dma_free_coherent(&instance->pdev->dev,
+						  fusion->map_sz,
+						  fusion->ld_map[i],
+						  fusion->
+						  ld_map_phys[i]);
+		kfree(instance->ctrl_context);
+		break;
+	default:
+		megasas_release_mfi(instance);
+		pci_free_consistent(pdev,
+				    sizeof(struct megasas_evt_detail),
+				    instance->evt_detail,
+				    instance->evt_detail_h);
+		pci_free_consistent(pdev, sizeof(u32),
+				    instance->producer,
+				    instance->producer_h);
+		pci_free_consistent(pdev, sizeof(u32),
+				    instance->consumer,
+				    instance->consumer_h);
+		break;
+	}
+
+	scsi_host_put(host);
+
+	pci_set_drvdata(pdev, NULL);
+
+	pci_disable_device(pdev);
+
+	return;
+}
+
+/**
+ * megasas_shutdown -	Shutdown entry point
+ * @device:		Generic device structure
+ */
+static void megasas_shutdown(struct pci_dev *pdev)
+{
+	struct megasas_instance *instance = pci_get_drvdata(pdev);
+	instance->unload = 1;
+	megasas_flush_cache(instance);
+	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+}
+
+/**
+ * megasas_mgmt_open -	char node "open" entry point
+ */
+static int megasas_mgmt_open(struct inode *inode, struct file *filep)
+{
+	/*
+	 * Allow only those users with admin rights
+	 */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	return 0;
+}
+
+/**
+ * megasas_mgmt_fasync -	Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
+{
+	int rc;
+
+	mutex_lock(&megasas_async_queue_mutex);
+
+	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
+
+	mutex_unlock(&megasas_async_queue_mutex);
+
+	if (rc >= 0) {
+		/* For sanity check when we get ioctl */
+		filep->private_data = filep;
+		return 0;
+	}
+
+	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
+
+	return rc;
+}
+
+/**
+ * megasas_mgmt_poll -  char node "poll" entry point
+ * */
+static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+{
+	unsigned int mask;
+	unsigned long flags;
+	poll_wait(file, &megasas_poll_wait, wait);
+	spin_lock_irqsave(&poll_aen_lock, flags);
+	if (megasas_poll_wait_aen)
+		mask =   (POLLIN | POLLRDNORM);
+	else
+		mask = 0;
+	spin_unlock_irqrestore(&poll_aen_lock, flags);
+	return mask;
+}
+
+/**
+ * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
+ * @instance:			Adapter soft state
+ * @argp:			User's ioctl packet
+ */
+static int
+megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+		      struct megasas_iocpacket __user * user_ioc,
+		      struct megasas_iocpacket *ioc)
+{
+	struct megasas_sge32 *kern_sge32;
+	struct megasas_cmd *cmd;
+	void *kbuff_arr[MAX_IOCTL_SGE];
+	dma_addr_t buf_handle = 0;
+	int error = 0, i;
+	void *sense = NULL;
+	dma_addr_t sense_handle;
+	unsigned long *sense_ptr;
+
+	memset(kbuff_arr, 0, sizeof(kbuff_arr));
+
+	if (ioc->sge_count > MAX_IOCTL_SGE) {
+		printk(KERN_DEBUG "megasas: SGE count [%d] >  max limit [%d]\n",
+		       ioc->sge_count, MAX_IOCTL_SGE);
+		return -EINVAL;
+	}
+
+	cmd = megasas_get_cmd(instance);
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * User's IOCTL packet has 2 frames (maximum). Copy those two
+	 * frames into our cmd's frames. cmd->frame's context will get
+	 * overwritten when we copy from user's frames. So set that value
+	 * alone separately
+	 */
+	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+	cmd->frame->hdr.context = cmd->index;
+	cmd->frame->hdr.pad_0 = 0;
+
+	/*
+	 * The management interface between applications and the fw uses
+	 * MFI frames. E.g, RAID configuration changes, LD property changes
+	 * etc are accomplishes through different kinds of MFI frames. The
+	 * driver needs to care only about substituting user buffers with
+	 * kernel buffers in SGLs. The location of SGL is embedded in the
+	 * struct iocpacket itself.
+	 */
+	kern_sge32 = (struct megasas_sge32 *)
+	    ((unsigned long)cmd->frame + ioc->sgl_off);
+
+	/*
+	 * For each user buffer, create a mirror buffer and copy in
+	 */
+	for (i = 0; i < ioc->sge_count; i++) {
+		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
+						    ioc->sgl[i].iov_len,
+						    &buf_handle, GFP_KERNEL);
+		if (!kbuff_arr[i]) {
+			printk(KERN_DEBUG "megasas: Failed to alloc "
+			       "kernel SGL buffer for IOCTL \n");
+			error = -ENOMEM;
+			goto out;
+		}
+
+		/*
+		 * We don't change the dma_coherent_mask, so
+		 * pci_alloc_consistent only returns 32bit addresses
+		 */
+		kern_sge32[i].phys_addr = (u32) buf_handle;
+		kern_sge32[i].length = ioc->sgl[i].iov_len;
+
+		/*
+		 * We created a kernel buffer corresponding to the
+		 * user buffer. Now copy in from the user buffer
+		 */
+		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
+				   (u32) (ioc->sgl[i].iov_len))) {
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	if (ioc->sense_len) {
+		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
+					     &sense_handle, GFP_KERNEL);
+		if (!sense) {
+			error = -ENOMEM;
+			goto out;
+		}
+
+		sense_ptr =
+		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
+		*sense_ptr = sense_handle;
+	}
+
+	/*
+	 * Set the sync_cmd flag so that the ISR knows not to complete this
+	 * cmd to the SCSI mid-layer
+	 */
+	cmd->sync_cmd = 1;
+	megasas_issue_blocked_cmd(instance, cmd);
+	cmd->sync_cmd = 0;
+
+	/*
+	 * copy out the kernel buffers to user buffers
+	 */
+	for (i = 0; i < ioc->sge_count; i++) {
+		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
+				 ioc->sgl[i].iov_len)) {
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	/*
+	 * copy out the sense
+	 */
+	if (ioc->sense_len) {
+		/*
+		 * sense_ptr points to the location that has the user
+		 * sense buffer address
+		 */
+		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
+				ioc->sense_off);
+
+		if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+				 sense, ioc->sense_len)) {
+			printk(KERN_ERR "megasas: Failed to copy out to user "
+					"sense data\n");
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	/*
+	 * copy the status codes returned by the fw
+	 */
+	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
+			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
+		printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
+		error = -EFAULT;
+	}
+
+      out:
+	if (sense) {
+		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
+				    sense, sense_handle);
+	}
+
+	for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
+		dma_free_coherent(&instance->pdev->dev,
+				    kern_sge32[i].length,
+				    kbuff_arr[i], kern_sge32[i].phys_addr);
+	}
+
+	megasas_return_cmd(instance, cmd);
+	return error;
+}
+
+static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
+{
+	struct megasas_iocpacket __user *user_ioc =
+	    (struct megasas_iocpacket __user *)arg;
+	struct megasas_iocpacket *ioc;
+	struct megasas_instance *instance;
+	int error;
+	int i;
+	unsigned long flags;
+	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+	if (!ioc)
+		return -ENOMEM;
+
+	if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
+		error = -EFAULT;
+		goto out_kfree_ioc;
+	}
+
+	instance = megasas_lookup_instance(ioc->host_no);
+	if (!instance) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+		printk(KERN_ERR "Controller in crit error\n");
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	if (instance->unload == 1) {
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+
+	/*
+	 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
+	 */
+	if (down_interruptible(&instance->ioctl_sem)) {
+		error = -ERESTARTSYS;
+		goto out_kfree_ioc;
+	}
+
+	for (i = 0; i < wait_time; i++) {
+
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+			spin_unlock_irqrestore(&instance->hba_lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			printk(KERN_NOTICE "megasas: waiting"
+				"for controller reset to finish\n");
+		}
+
+		msleep(1000);
+	}
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		printk(KERN_ERR "megaraid_sas: timed out while"
+			"waiting for HBA to recover\n");
+		error = -ENODEV;
+		goto out_kfree_ioc;
+	}
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+	up(&instance->ioctl_sem);
+
+      out_kfree_ioc:
+	kfree(ioc);
+	return error;
+}
+
+static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
+{
+	struct megasas_instance *instance;
+	struct megasas_aen aen;
+	int error;
+	int i;
+	unsigned long flags;
+	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+	if (file->private_data != file) {
+		printk(KERN_DEBUG "megasas: fasync_helper was not "
+		       "called first\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
+		return -EFAULT;
+
+	instance = megasas_lookup_instance(aen.host_no);
+
+	if (!instance)
+		return -ENODEV;
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+		return -ENODEV;
+	}
+
+	if (instance->unload == 1) {
+		return -ENODEV;
+	}
+
+	for (i = 0; i < wait_time; i++) {
+
+		spin_lock_irqsave(&instance->hba_lock, flags);
+		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+			spin_unlock_irqrestore(&instance->hba_lock,
+						flags);
+			break;
+		}
+
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			printk(KERN_NOTICE "megasas: waiting for"
+				"controller reset to finish\n");
+		}
+
+		msleep(1000);
+	}
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		printk(KERN_ERR "megaraid_sas: timed out while waiting"
+				"for HBA to recover.\n");
+		return -ENODEV;
+	}
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	mutex_lock(&instance->aen_mutex);
+	error = megasas_register_aen(instance, aen.seq_num,
+				     aen.class_locale_word);
+	mutex_unlock(&instance->aen_mutex);
+	return error;
+}
+
+/**
+ * megasas_mgmt_ioctl -	char node ioctl entry point
+ */
+static long
+megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case MEGASAS_IOC_FIRMWARE:
+		return megasas_mgmt_ioctl_fw(file, arg);
+
+	case MEGASAS_IOC_GET_AEN:
+		return megasas_mgmt_ioctl_aen(file, arg);
+	}
+
+	return -ENOTTY;
+}
+
+#ifdef CONFIG_COMPAT
+static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+{
+	struct compat_megasas_iocpacket __user *cioc =
+	    (struct compat_megasas_iocpacket __user *)arg;
+	struct megasas_iocpacket __user *ioc =
+	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
+	int i;
+	int error = 0;
+	compat_uptr_t ptr;
+
+	if (clear_user(ioc, sizeof(*ioc)))
+		return -EFAULT;
+
+	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
+	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
+	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
+	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
+	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
+	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
+		return -EFAULT;
+
+	/*
+	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+	 * sense_len is not null, so prepare the 64bit value under
+	 * the same condition.
+	 */
+	if (ioc->sense_len) {
+		void __user **sense_ioc_ptr =
+			(void __user **)(ioc->frame.raw + ioc->sense_off);
+		compat_uptr_t *sense_cioc_ptr =
+			(compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+		if (get_user(ptr, sense_cioc_ptr) ||
+		    put_user(compat_ptr(ptr), sense_ioc_ptr))
+			return -EFAULT;
+	}
+
+	for (i = 0; i < MAX_IOCTL_SGE; i++) {
+		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
+		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
+		    copy_in_user(&ioc->sgl[i].iov_len,
+				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
+			return -EFAULT;
+	}
+
+	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
+
+	if (copy_in_user(&cioc->frame.hdr.cmd_status,
+			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
+		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
+		return -EFAULT;
+	}
+	return error;
+}
+
+static long
+megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	switch (cmd) {
+	case MEGASAS_IOC_FIRMWARE32:
+		return megasas_mgmt_compat_ioctl_fw(file, arg);
+	case MEGASAS_IOC_GET_AEN:
+		return megasas_mgmt_ioctl_aen(file, arg);
+	}
+
+	return -ENOTTY;
+}
+#endif
+
+/*
+ * File operations structure for management interface
+ */
+static const struct file_operations megasas_mgmt_fops = {
+	.owner = THIS_MODULE,
+	.open = megasas_mgmt_open,
+	.fasync = megasas_mgmt_fasync,
+	.unlocked_ioctl = megasas_mgmt_ioctl,
+	.poll = megasas_mgmt_poll,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = megasas_mgmt_compat_ioctl,
+#endif
+	.llseek = noop_llseek,
+};
+
+/*
+ * PCI hotplug support registration structure
+ */
+static struct pci_driver megasas_pci_driver = {
+
+	.name = "megaraid_sas",
+	.id_table = megasas_pci_table,
+	.probe = megasas_probe_one,
+	.remove = __devexit_p(megasas_detach_one),
+	.suspend = megasas_suspend,
+	.resume = megasas_resume,
+	.shutdown = megasas_shutdown,
+};
+
+/*
+ * Sysfs driver attributes
+ */
+static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
+{
+	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
+			MEGASAS_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
+
+static ssize_t
+megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
+{
+	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
+			MEGASAS_RELDATE);
+}
+
+static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
+		   NULL);
+
+static ssize_t
+megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", support_poll_for_event);
+}
+
+static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
+			megasas_sysfs_show_support_poll_for_event, NULL);
+
+ static ssize_t
+megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", support_device_change);
+}
+
+static DRIVER_ATTR(support_device_change, S_IRUGO,
+			megasas_sysfs_show_support_device_change, NULL);
+
+static ssize_t
+megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", megasas_dbg_lvl);
+}
+
+static ssize_t
+megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
+{
+	int retval = count;
+	if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
+		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
+		retval = -EINVAL;
+	}
+	return retval;
+}
+
+static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
+		megasas_sysfs_set_dbg_lvl);
+
+static ssize_t
+megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", poll_mode_io);
+}
+
+static ssize_t
+megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
+				const char *buf, size_t count)
+{
+	int retval = count;
+	int tmp = poll_mode_io;
+	int i;
+	struct megasas_instance *instance;
+
+	if (sscanf(buf, "%u", &poll_mode_io) < 1) {
+		printk(KERN_ERR "megasas: could not set poll_mode_io\n");
+		retval = -EINVAL;
+	}
+
+	/*
+	 * Check if poll_mode_io is already set or is same as previous value
+	 */
+	if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
+		goto out;
+
+	if (poll_mode_io) {
+		/*
+		 * Start timers for all adapters
+		 */
+		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+			instance = megasas_mgmt_info.instance[i];
+			if (instance) {
+				megasas_start_timer(instance,
+					&instance->io_completion_timer,
+					megasas_io_completion_timer,
+					MEGASAS_COMPLETION_TIMER_INTERVAL);
+			}
+		}
+	} else {
+		/*
+		 * Delete timers for all adapters
+		 */
+		for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+			instance = megasas_mgmt_info.instance[i];
+			if (instance)
+				del_timer_sync(&instance->io_completion_timer);
+		}
+	}
+
+out:
+	return retval;
+}
+
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+	struct megasas_aen_event *ev =
+		container_of(work, struct megasas_aen_event, hotplug_work);
+	struct megasas_instance *instance = ev->instance;
+	union megasas_evt_class_locale class_locale;
+	struct  Scsi_Host *host;
+	struct  scsi_device *sdev1;
+	u16     pd_index = 0;
+	u16	ld_index = 0;
+	int     i, j, doscan = 0;
+	u32 seq_num;
+	int error;
+
+	if (!instance) {
+		printk(KERN_ERR "invalid instance!\n");
+		kfree(ev);
+		return;
+	}
+	instance->ev = NULL;
+	host = instance->host;
+	if (instance->evt_detail) {
+
+		switch (instance->evt_detail->code) {
+		case MR_EVT_PD_INSERTED:
+			if (megasas_get_pd_list(instance) == 0) {
+			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+				for (j = 0;
+				j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				j++) {
+
+				pd_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 =
+				scsi_device_lookup(host, i, j, 0);
+
+				if (instance->pd_list[pd_index].driveState
+						== MR_PD_STATE_SYSTEM) {
+						if (!sdev1) {
+						scsi_add_device(host, i, j, 0);
+						}
+
+					if (sdev1)
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+			}
+			doscan = 0;
+			break;
+
+		case MR_EVT_PD_REMOVED:
+			if (megasas_get_pd_list(instance) == 0) {
+			megasas_get_pd_list(instance);
+			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+				for (j = 0;
+				j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				j++) {
+
+				pd_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 =
+				scsi_device_lookup(host, i, j, 0);
+
+				if (instance->pd_list[pd_index].driveState
+					== MR_PD_STATE_SYSTEM) {
+					if (sdev1) {
+						scsi_device_put(sdev1);
+					}
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+				}
+			}
+			}
+			doscan = 0;
+			break;
+
+		case MR_EVT_LD_OFFLINE:
+		case MR_EVT_LD_DELETED:
+			megasas_get_ld_list(instance);
+			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+				for (j = 0;
+				j < MEGASAS_MAX_DEV_PER_CHANNEL;
+				j++) {
+
+				ld_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 = scsi_device_lookup(host,
+					i + MEGASAS_MAX_LD_CHANNELS,
+					j,
+					0);
+
+				if (instance->ld_ids[ld_index] != 0xff) {
+					if (sdev1) {
+						scsi_device_put(sdev1);
+					}
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+				}
+			}
+			doscan = 0;
+			break;
+		case MR_EVT_LD_CREATED:
+			megasas_get_ld_list(instance);
+			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+				for (j = 0;
+					j < MEGASAS_MAX_DEV_PER_CHANNEL;
+					j++) {
+					ld_index =
+					(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+					sdev1 = scsi_device_lookup(host,
+						i+MEGASAS_MAX_LD_CHANNELS,
+						j, 0);
+
+					if (instance->ld_ids[ld_index] !=
+								0xff) {
+						if (!sdev1) {
+							scsi_add_device(host,
+								i + 2,
+								j, 0);
+						}
+					}
+					if (sdev1) {
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+			doscan = 0;
+			break;
+		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+		case MR_EVT_FOREIGN_CFG_IMPORTED:
+		case MR_EVT_LD_STATE_CHANGE:
+			doscan = 1;
+			break;
+		default:
+			doscan = 0;
+			break;
+		}
+	} else {
+		printk(KERN_ERR "invalid evt_detail!\n");
+		kfree(ev);
+		return;
+	}
+
+	if (doscan) {
+		printk(KERN_INFO "scanning ...\n");
+		megasas_get_pd_list(instance);
+		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+				sdev1 = scsi_device_lookup(host, i, j, 0);
+				if (instance->pd_list[pd_index].driveState ==
+							MR_PD_STATE_SYSTEM) {
+					if (!sdev1) {
+						scsi_add_device(host, i, j, 0);
+					}
+					if (sdev1)
+						scsi_device_put(sdev1);
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+		}
+
+		megasas_get_ld_list(instance);
+		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+				ld_index =
+				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+				sdev1 = scsi_device_lookup(host,
+					i+MEGASAS_MAX_LD_CHANNELS, j, 0);
+				if (instance->ld_ids[ld_index] != 0xff) {
+					if (!sdev1) {
+						scsi_add_device(host,
+								i+2,
+								j, 0);
+					} else {
+						scsi_device_put(sdev1);
+					}
+				} else {
+					if (sdev1) {
+						scsi_remove_device(sdev1);
+						scsi_device_put(sdev1);
+					}
+				}
+			}
+		}
+	}
+
+	if ( instance->aen_cmd != NULL ) {
+		kfree(ev);
+		return ;
+	}
+
+	seq_num = instance->evt_detail->seq_num + 1;
+
+	/* Register AEN with FW for latest sequence number plus 1 */
+	class_locale.members.reserved = 0;
+	class_locale.members.locale = MR_EVT_LOCALE_ALL;
+	class_locale.members.class = MR_EVT_CLASS_DEBUG;
+	mutex_lock(&instance->aen_mutex);
+	error = megasas_register_aen(instance, seq_num,
+					class_locale.word);
+	mutex_unlock(&instance->aen_mutex);
+
+	if (error)
+		printk(KERN_ERR "register aen failed error %x\n", error);
+
+	kfree(ev);
+}
+
+
+static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
+		megasas_sysfs_show_poll_mode_io,
+		megasas_sysfs_set_poll_mode_io);
+
+/**
+ * megasas_init - Driver load entry point
+ */
+static int __init megasas_init(void)
+{
+	int rval;
+
+	/*
+	 * Announce driver version and other information
+	 */
+	printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
+	       MEGASAS_EXT_VERSION);
+
+	support_poll_for_event = 2;
+	support_device_change = 1;
+
+	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
+
+	/*
+	 * Register character device node
+	 */
+	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
+
+	if (rval < 0) {
+		printk(KERN_DEBUG "megasas: failed to open device node\n");
+		return rval;
+	}
+
+	megasas_mgmt_majorno = rval;
+
+	/*
+	 * Register ourselves as PCI hotplug module
+	 */
+	rval = pci_register_driver(&megasas_pci_driver);
+
+	if (rval) {
+		printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
+		goto err_pcidrv;
+	}
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_version);
+	if (rval)
+		goto err_dcf_attr_ver;
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_release_date);
+	if (rval)
+		goto err_dcf_rel_date;
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				&driver_attr_support_poll_for_event);
+	if (rval)
+		goto err_dcf_support_poll_for_event;
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_dbg_lvl);
+	if (rval)
+		goto err_dcf_dbg_lvl;
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				  &driver_attr_poll_mode_io);
+	if (rval)
+		goto err_dcf_poll_mode_io;
+
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				&driver_attr_support_device_change);
+	if (rval)
+		goto err_dcf_support_device_change;
+
+	return rval;
+
+err_dcf_support_device_change:
+	driver_remove_file(&megasas_pci_driver.driver,
+		  &driver_attr_poll_mode_io);
+
+err_dcf_poll_mode_io:
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_dbg_lvl);
+err_dcf_dbg_lvl:
+	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_poll_for_event);
+
+err_dcf_support_poll_for_event:
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_release_date);
+
+err_dcf_rel_date:
+	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+err_dcf_attr_ver:
+	pci_unregister_driver(&megasas_pci_driver);
+err_pcidrv:
+	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+	return rval;
+}
+
+/**
+ * megasas_exit - Driver unload entry point
+ */
+static void __exit megasas_exit(void)
+{
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_poll_mode_io);
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_dbg_lvl);
+	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_poll_for_event);
+	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_device_change);
+	driver_remove_file(&megasas_pci_driver.driver,
+			   &driver_attr_release_date);
+	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+
+	pci_unregister_driver(&megasas_pci_driver);
+	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+}
+
+module_init(megasas_init);
+module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
new file mode 100644
index 0000000..53fa96a
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -0,0 +1,516 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2009-2011  LSI Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *  FILE: megaraid_sas_fp.c
+ *
+ *  Authors: LSI Corporation
+ *           Sumant Patro
+ *           Varad Talamacki
+ *           Manoj Jose
+ *
+ *  Send feedback to: <megaraidlinux@lsi.com>
+ *
+ *  Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ *     ATTN: Linuxraid
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include <asm/div64.h>
+
+#define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
+#define MR_LD_STATE_OPTIMAL 3
+#define FALSE 0
+#define TRUE 1
+
+/* Prototypes */
+void
+mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
+			      struct LD_LOAD_BALANCE_INFO *lbInfo);
+
+u32 mega_mod64(u64 dividend, u32 divisor)
+{
+	u64 d;
+	u32 remainder;
+
+	if (!divisor)
+		printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
+	d = dividend;
+	remainder = do_div(d, divisor);
+	return remainder;
+}
+
+/**
+ * @param dividend    : Dividend
+ * @param divisor    : Divisor
+ *
+ * @return quotient
+ **/
+u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+{
+	u32 remainder;
+	u64 d;
+
+	if (!divisor)
+		printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
+
+	d = dividend;
+	remainder = do_div(d, divisor);
+
+	return d;
+}
+
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+{
+	return &map->raidMap.ldSpanMap[ld].ldRaid;
+}
+
+static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
+						   struct MR_FW_RAID_MAP_ALL
+						   *map)
+{
+	return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+}
+
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
+{
+	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+}
+
+static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+{
+	return map->raidMap.arMapInfo[ar].pd[arm];
+}
+
+static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+{
+	return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
+}
+
+static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+{
+	return map->raidMap.devHndlInfo[pd].curDevHdl;
+}
+
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+{
+	return map->raidMap.ldSpanMap[ld].ldRaid.targetId;
+}
+
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+{
+	return map->raidMap.ldTgtIdToLd[ldTgtId];
+}
+
+static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
+					  struct MR_FW_RAID_MAP_ALL *map)
+{
+	return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+}
+
+/*
+ * This function will validate Map info data provided by FW
+ */
+u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
+		      struct LD_LOAD_BALANCE_INFO *lbInfo)
+{
+	struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+
+	if (pFwRaidMap->totalSize !=
+	    (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
+	     (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) {
+		printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
+		       (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
+				       sizeof(struct MR_LD_SPAN_MAP)) +
+				      (sizeof(struct MR_LD_SPAN_MAP) *
+				       pFwRaidMap->ldCount)));
+		printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
+		       ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+		       pFwRaidMap->totalSize);
+		return 0;
+	}
+
+	mr_update_load_balance_params(map, lbInfo);
+
+	return 1;
+}
+
+u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+		    struct MR_FW_RAID_MAP_ALL *map, int *div_error)
+{
+	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+	struct MR_QUAD_ELEMENT    *quad;
+	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
+	u32                span, j;
+
+	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
+
+		for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) {
+			quad = &pSpanBlock->block_span_info.quad[j];
+
+			if (quad->diff == 0) {
+				*div_error = 1;
+				return span;
+			}
+			if (quad->logStart <= row  &&  row <= quad->logEnd  &&
+			    (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
+				if (span_blk != NULL) {
+					u64  blk, debugBlk;
+					blk =
+						mega_div64_32(
+							(row-quad->logStart),
+							quad->diff);
+					debugBlk = blk;
+
+					blk = (blk + quad->offsetInSpan) <<
+						raid->stripeShift;
+					*span_blk = blk;
+				}
+				return span;
+			}
+		}
+	}
+	return span;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe.
+*
+* Inputs :
+*
+*    ld   - Logical drive number
+*    stripRow        - Stripe number
+*    stripRef    - Reference in stripe
+*
+* Outputs :
+*
+*    span          - Span number
+*    block         - Absolute Block number in the physical disk
+*/
+u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
+		   u16 *pDevHandle, struct RAID_CONTEXT *pRAID_Context,
+		   struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
+	u32         pd, arRef;
+	u8          physArm, span;
+	u64         row;
+	u8	    retval = TRUE;
+	int	    error_code = 0;
+
+	row =  mega_div64_32(stripRow, raid->rowDataSize);
+
+	if (raid->level == 6) {
+		/* logical arm within row */
+		u32 logArm =  mega_mod64(stripRow, raid->rowDataSize);
+		u32 rowMod, armQ, arm;
+
+		if (raid->rowSize == 0)
+			return FALSE;
+		/* get logical row mod */
+		rowMod = mega_mod64(row, raid->rowSize);
+		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
+		arm = armQ+1+logArm; /* data always logically follows Q */
+		if (arm >= raid->rowSize) /* handle wrap condition */
+			arm -= raid->rowSize;
+		physArm = (u8)arm;
+	} else  {
+		if (raid->modFactor == 0)
+			return FALSE;
+		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
+							  raid->modFactor),
+					  map);
+	}
+
+	if (raid->spanDepth == 1) {
+		span = 0;
+		*pdBlock = row << raid->stripeShift;
+	} else {
+		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
+		if (error_code == 1)
+			return FALSE;
+	}
+
+	/* Get the array on which this span is present */
+	arRef       = MR_LdSpanArrayGet(ld, span, map);
+	pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
+
+	if (pd != MR_PD_INVALID)
+		/* Get dev handle from Pd. */
+		*pDevHandle = MR_PdDevHandleGet(pd, map);
+	else {
+		*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
+		if (raid->level >= 5)
+			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+		else if (raid->level == 1) {
+			/* Get alternate Pd. */
+			pd = MR_ArPdGet(arRef, physArm + 1, map);
+			if (pd != MR_PD_INVALID)
+				/* Get dev handle from Pd */
+				*pDevHandle = MR_PdDevHandleGet(pd, map);
+		}
+		retval = FALSE;
+	}
+
+	*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
+	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+		physArm;
+	return retval;
+}
+
+/*
+******************************************************************************
+*
+* MR_BuildRaidContext function
+*
+* This function will initiate command processing.  The start/end row and strip
+* information is calculated then the lock is acquired.
+* This function will return 0 if region lock was acquired OR return num strips
+*/
+u8
+MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
+		    struct RAID_CONTEXT *pRAID_Context,
+		    struct MR_FW_RAID_MAP_ALL *map)
+{
+	struct MR_LD_RAID  *raid;
+	u32         ld, stripSize, stripe_mask;
+	u64         endLba, endStrip, endRow, start_row, start_strip;
+	u64         regStart;
+	u32         regSize;
+	u8          num_strips, numRows;
+	u16         ref_in_start_stripe, ref_in_end_stripe;
+	u64         ldStartBlock;
+	u32         numBlocks, ldTgtId;
+	u8          isRead;
+	u8	    retval = 0;
+
+	ldStartBlock = io_info->ldStartBlock;
+	numBlocks = io_info->numBlocks;
+	ldTgtId = io_info->ldTgtId;
+	isRead = io_info->isRead;
+
+	ld = MR_TargetIdToLdGet(ldTgtId, map);
+	raid = MR_LdRaidGet(ld, map);
+
+	stripSize = 1 << raid->stripeShift;
+	stripe_mask = stripSize-1;
+	/*
+	 * calculate starting row and stripe, and number of strips and rows
+	 */
+	start_strip         = ldStartBlock >> raid->stripeShift;
+	ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
+	endLba              = ldStartBlock + numBlocks - 1;
+	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
+	endStrip            = endLba >> raid->stripeShift;
+	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
+	if (raid->rowDataSize == 0)
+		return FALSE;
+	start_row           =  mega_div64_32(start_strip, raid->rowDataSize);
+	endRow              =  mega_div64_32(endStrip, raid->rowDataSize);
+	numRows             = (u8)(endRow - start_row + 1);
+
+	/*
+	 * calculate region info.
+	 */
+
+	/* assume region is at the start of the first row */
+	regStart            = start_row << raid->stripeShift;
+	/* assume this IO needs the full row - we'll adjust if not true */
+	regSize             = stripSize;
+
+	/* If IO spans more than 1 strip, fp is not possible
+	   FP is not possible for writes on non-0 raid levels
+	   FP is not possible if LD is not capable */
+	if (num_strips > 1 || (!isRead && raid->level != 0) ||
+	    !raid->capability.fpCapable) {
+		io_info->fpOkForIo = FALSE;
+	} else {
+		io_info->fpOkForIo = TRUE;
+	}
+
+	if (numRows == 1) {
+		/* single-strip IOs can always lock only the data needed */
+		if (num_strips == 1) {
+			regStart += ref_in_start_stripe;
+			regSize = numBlocks;
+		}
+		/* multi-strip IOs always need to full stripe locked */
+	} else {
+		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+			/* If the start strip is the last in the start row */
+			regStart += ref_in_start_stripe;
+			regSize = stripSize - ref_in_start_stripe;
+			/* initialize count to sectors from startref to end
+			   of strip */
+		}
+
+		if (numRows > 2)
+			/* Add complete rows in the middle of the transfer */
+			regSize += (numRows-2) << raid->stripeShift;
+
+		/* if IO ends within first strip of last row */
+		if (endStrip == endRow*raid->rowDataSize)
+			regSize += ref_in_end_stripe+1;
+		else
+			regSize += stripSize;
+	}
+
+	pRAID_Context->timeoutValue     = map->raidMap.fpPdIoTimeoutSec;
+	pRAID_Context->regLockFlags     = (isRead) ? REGION_TYPE_SHARED_READ :
+		raid->regTypeReqOnWrite;
+	pRAID_Context->VirtualDiskTgtId = raid->targetId;
+	pRAID_Context->regLockRowLBA    = regStart;
+	pRAID_Context->regLockLength    = regSize;
+	pRAID_Context->configSeqNum	= raid->seqNum;
+
+	/*Get Phy Params only if FP capable, or else leave it to MR firmware
+	  to do the calculation.*/
+	if (io_info->fpOkForIo) {
+		retval = MR_GetPhyParams(ld, start_strip, ref_in_start_stripe,
+					 &io_info->pdBlock,
+					 &io_info->devHandle, pRAID_Context,
+					 map);
+		/* If IO on an invalid Pd, then FP i snot possible */
+		if (io_info->devHandle == MR_PD_INVALID)
+			io_info->fpOkForIo = FALSE;
+		return retval;
+	} else if (isRead) {
+		uint stripIdx;
+		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
+			if (!MR_GetPhyParams(ld, start_strip + stripIdx,
+					     ref_in_start_stripe,
+					     &io_info->pdBlock,
+					     &io_info->devHandle,
+					     pRAID_Context, map))
+				return TRUE;
+		}
+	}
+	return TRUE;
+}
+
+void
+mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
+			      struct LD_LOAD_BALANCE_INFO *lbInfo)
+{
+	int ldCount;
+	u16 ld;
+	struct MR_LD_RAID *raid;
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, map);
+		if (ld >= MAX_LOGICAL_DRIVES) {
+			lbInfo[ldCount].loadBalanceFlag = 0;
+			continue;
+		}
+
+		raid = MR_LdRaidGet(ld, map);
+
+		/* Two drive Optimal RAID 1 */
+		if ((raid->level == 1)  &&  (raid->rowSize == 2) &&
+		    (raid->spanDepth == 1) && raid->ldState ==
+		    MR_LD_STATE_OPTIMAL) {
+			u32 pd, arRef;
+
+			lbInfo[ldCount].loadBalanceFlag = 1;
+
+			/* Get the array on which this span is present */
+			arRef = MR_LdSpanArrayGet(ld, 0, map);
+
+			/* Get the Pd */
+			pd = MR_ArPdGet(arRef, 0, map);
+			/* Get dev handle from Pd */
+			lbInfo[ldCount].raid1DevHandle[0] =
+				MR_PdDevHandleGet(pd, map);
+			/* Get the Pd */
+			pd = MR_ArPdGet(arRef, 1, map);
+
+			/* Get the dev handle from Pd */
+			lbInfo[ldCount].raid1DevHandle[1] =
+				MR_PdDevHandleGet(pd, map);
+		} else
+			lbInfo[ldCount].loadBalanceFlag = 0;
+	}
+}
+
+u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
+			u32 count)
+{
+	u16     pend0, pend1;
+	u64     diff0, diff1;
+	u8      bestArm;
+
+	/* get the pending cmds for the data and mirror arms */
+	pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
+	pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+
+	/* Determine the disk whose head is nearer to the req. block */
+	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
+	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
+	bestArm = (diff0 <= diff1 ? 0 : 1);
+
+	if ((bestArm == arm && pend0 > pend1 + 16)  ||
+	    (bestArm != arm && pend1 > pend0 + 16))
+		bestArm ^= 1;
+
+	/* Update the last accessed block on the correct pd */
+	lbInfo->last_accessed_block[bestArm] = block + count - 1;
+
+	return bestArm;
+}
+
+u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
+			   struct IO_REQUEST_INFO *io_info)
+{
+	u8 arm, old_arm;
+	u16 devHandle;
+
+	old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
+
+	/* get best new arm */
+	arm  = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
+				    io_info->numBlocks);
+	devHandle = lbInfo->raid1DevHandle[arm];
+	atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
+
+	return devHandle;
+}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
new file mode 100644
index 0000000..c1e09d5
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -0,0 +1,2248 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2009-2011  LSI Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *  FILE: megaraid_sas_fusion.c
+ *
+ *  Authors: LSI Corporation
+ *           Sumant Patro
+ *           Adam Radford <linuxraid@lsi.com>
+ *
+ *  Send feedback to: <megaraidlinux@lsi.com>
+ *
+ *  Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ *     ATTN: Linuxraid
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/smp_lock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+extern void megasas_free_cmds(struct megasas_instance *instance);
+extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+					   *instance);
+extern void
+megasas_complete_cmd(struct megasas_instance *instance,
+		     struct megasas_cmd *cmd, u8 alt_status);
+int megasas_is_ldio(struct scsi_cmnd *cmd);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd);
+
+void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
+int megasas_alloc_cmds(struct megasas_instance *instance);
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+int
+megasas_issue_polled(struct megasas_instance *instance,
+		     struct megasas_cmd *cmd);
+
+u8
+MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
+		    struct RAID_CONTEXT *pRAID_Context,
+		    struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
+		      struct LD_LOAD_BALANCE_INFO *lbInfo);
+u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
+			   struct IO_REQUEST_INFO *in_info);
+int megasas_transition_to_ready(struct megasas_instance *instance);
+void megaraid_sas_kill_hba(struct megasas_instance *instance);
+
+extern u32 megasas_dbg_lvl;
+
+/**
+ * megasas_enable_intr_fusion -	Enables interrupts
+ * @regs:			MFI register set
+ */
+void
+megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs)
+{
+	writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+	/* Dummy readl to force pci flush */
+	readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_fusion - Disables interrupt
+ * @regs:			 MFI register set
+ */
+void
+megasas_disable_intr_fusion(struct megasas_register_set __iomem *regs)
+{
+	u32 mask = 0xFFFFFFFF;
+	u32 status;
+
+	writel(mask, &regs->outbound_intr_mask);
+	/* Dummy readl to force pci flush */
+	status = readl(&regs->outbound_intr_mask);
+}
+
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+{
+	u32 status;
+	/*
+	 * Check if it is our interrupt
+	 */
+	status = readl(&regs->outbound_intr_status);
+
+	if (status & 1) {
+		writel(status, &regs->outbound_intr_status);
+		readl(&regs->outbound_intr_status);
+		return 1;
+	}
+	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+		return 0;
+
+	/*
+	 * dummy read to flush PCI
+	 */
+	readl(&regs->outbound_intr_status);
+
+	return 1;
+}
+
+/**
+ * megasas_get_cmd_fusion -	Get a command from the free pool
+ * @instance:		Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
+						  *instance)
+{
+	unsigned long flags;
+	struct fusion_context *fusion =
+		(struct fusion_context *)instance->ctrl_context;
+	struct megasas_cmd_fusion *cmd = NULL;
+
+	spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+
+	if (!list_empty(&fusion->cmd_pool)) {
+		cmd = list_entry((&fusion->cmd_pool)->next,
+				 struct megasas_cmd_fusion, list);
+		list_del_init(&cmd->list);
+	} else {
+		printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
+	}
+
+	spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+	return cmd;
+}
+
+/**
+ * megasas_return_cmd_fusion -	Return a cmd to free command pool
+ * @instance:		Adapter soft state
+ * @cmd:		Command packet to be returned to free command pool
+ */
+static inline void
+megasas_return_cmd_fusion(struct megasas_instance *instance,
+			  struct megasas_cmd_fusion *cmd)
+{
+	unsigned long flags;
+	struct fusion_context *fusion =
+		(struct fusion_context *)instance->ctrl_context;
+
+	spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+
+	cmd->scmd = NULL;
+	cmd->sync_cmd_idx = (u32)ULONG_MAX;
+	list_add_tail(&cmd->list, &fusion->cmd_pool);
+
+	spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+}
+
+/**
+ * megasas_teardown_frame_pool_fusion -	Destroy the cmd frame DMA pool
+ * @instance:				Adapter soft state
+ */
+static void megasas_teardown_frame_pool_fusion(
+	struct megasas_instance *instance)
+{
+	int i;
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	u16 max_cmd = instance->max_fw_cmds;
+
+	struct megasas_cmd_fusion *cmd;
+
+	if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
+		printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
+		       "sense pool : %p\n", fusion->sg_dma_pool,
+		       fusion->sense_dma_pool);
+		return;
+	}
+
+	/*
+	 * Return all frames to pool
+	 */
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = fusion->cmd_list[i];
+
+		if (cmd->sg_frame)
+			pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+				      cmd->sg_frame_phys_addr);
+
+		if (cmd->sense)
+			pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+				      cmd->sense_phys_addr);
+	}
+
+	/*
+	 * Now destroy the pool itself
+	 */
+	pci_pool_destroy(fusion->sg_dma_pool);
+	pci_pool_destroy(fusion->sense_dma_pool);
+
+	fusion->sg_dma_pool = NULL;
+	fusion->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_free_cmds_fusion -	Free all the cmds in the free cmd pool
+ * @instance:		Adapter soft state
+ */
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance)
+{
+	int i;
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	u32 max_cmds, req_sz, reply_sz, io_frames_sz;
+
+
+	req_sz = fusion->request_alloc_sz;
+	reply_sz = fusion->reply_alloc_sz;
+	io_frames_sz = fusion->io_frames_alloc_sz;
+
+	max_cmds = instance->max_fw_cmds;
+
+	/* Free descriptors and request Frames memory */
+	if (fusion->req_frames_desc)
+		dma_free_coherent(&instance->pdev->dev, req_sz,
+				  fusion->req_frames_desc,
+				  fusion->req_frames_desc_phys);
+
+	if (fusion->reply_frames_desc) {
+		pci_pool_free(fusion->reply_frames_desc_pool,
+			      fusion->reply_frames_desc,
+			      fusion->reply_frames_desc_phys);
+		pci_pool_destroy(fusion->reply_frames_desc_pool);
+	}
+
+	if (fusion->io_request_frames) {
+		pci_pool_free(fusion->io_request_frames_pool,
+			      fusion->io_request_frames,
+			      fusion->io_request_frames_phys);
+		pci_pool_destroy(fusion->io_request_frames_pool);
+	}
+
+	/* Free the Fusion frame pool */
+	megasas_teardown_frame_pool_fusion(instance);
+
+	/* Free all the commands in the cmd_list */
+	for (i = 0; i < max_cmds; i++)
+		kfree(fusion->cmd_list[i]);
+
+	/* Free the cmd_list buffer itself */
+	kfree(fusion->cmd_list);
+	fusion->cmd_list = NULL;
+
+	INIT_LIST_HEAD(&fusion->cmd_pool);
+}
+
+/**
+ * megasas_create_frame_pool_fusion -	Creates DMA pool for cmd frames
+ * @instance:			Adapter soft state
+ *
+ */
+static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
+{
+	int i;
+	u32 max_cmd;
+	struct fusion_context *fusion;
+	struct megasas_cmd_fusion *cmd;
+	u32 total_sz_chain_frame;
+
+	fusion = instance->ctrl_context;
+	max_cmd = instance->max_fw_cmds;
+
+	total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME;
+
+	/*
+	 * Use DMA pool facility provided by PCI layer
+	 */
+
+	fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion",
+					      instance->pdev,
+					      total_sz_chain_frame, 4,
+					      0);
+	if (!fusion->sg_dma_pool) {
+		printk(KERN_DEBUG "megasas: failed to setup request pool "
+		       "fusion\n");
+		return -ENOMEM;
+	}
+	fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
+						 instance->pdev,
+						 SCSI_SENSE_BUFFERSIZE, 64, 0);
+
+	if (!fusion->sense_dma_pool) {
+		printk(KERN_DEBUG "megasas: failed to setup sense pool "
+		       "fusion\n");
+		pci_pool_destroy(fusion->sg_dma_pool);
+		fusion->sg_dma_pool = NULL;
+		return -ENOMEM;
+	}
+
+	/*
+	 * Allocate and attach a frame to each of the commands in cmd_list
+	 */
+	for (i = 0; i < max_cmd; i++) {
+
+		cmd = fusion->cmd_list[i];
+
+		cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
+					       GFP_KERNEL,
+					       &cmd->sg_frame_phys_addr);
+
+		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+					    GFP_KERNEL, &cmd->sense_phys_addr);
+		/*
+		 * megasas_teardown_frame_pool_fusion() takes care of freeing
+		 * whatever has been allocated
+		 */
+		if (!cmd->sg_frame || !cmd->sense) {
+			printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
+			megasas_teardown_frame_pool_fusion(instance);
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/**
+ * megasas_alloc_cmds_fusion -	Allocates the command packets
+ * @instance:		Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+	int i, j;
+	u32 max_cmd, io_frames_sz;
+	struct fusion_context *fusion;
+	struct megasas_cmd_fusion *cmd;
+	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+	u32 offset;
+	dma_addr_t io_req_base_phys;
+	u8 *io_req_base;
+
+	fusion = instance->ctrl_context;
+
+	max_cmd = instance->max_fw_cmds;
+
+	fusion->req_frames_desc =
+		dma_alloc_coherent(&instance->pdev->dev,
+				   fusion->request_alloc_sz,
+				   &fusion->req_frames_desc_phys, GFP_KERNEL);
+
+	if (!fusion->req_frames_desc) {
+		printk(KERN_ERR "megasas; Could not allocate memory for "
+		       "request_frames\n");
+		goto fail_req_desc;
+	}
+
+	fusion->reply_frames_desc_pool =
+		pci_pool_create("reply_frames pool", instance->pdev,
+				fusion->reply_alloc_sz, 16, 0);
+
+	if (!fusion->reply_frames_desc_pool) {
+		printk(KERN_ERR "megasas; Could not allocate memory for "
+		       "reply_frame pool\n");
+		goto fail_reply_desc;
+	}
+
+	fusion->reply_frames_desc =
+		pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
+			       &fusion->reply_frames_desc_phys);
+	if (!fusion->reply_frames_desc) {
+		printk(KERN_ERR "megasas; Could not allocate memory for "
+		       "reply_frame pool\n");
+		pci_pool_destroy(fusion->reply_frames_desc_pool);
+		goto fail_reply_desc;
+	}
+
+	reply_desc = fusion->reply_frames_desc;
+	for (i = 0; i < fusion->reply_q_depth; i++, reply_desc++)
+		reply_desc->Words = ULLONG_MAX;
+
+	io_frames_sz = fusion->io_frames_alloc_sz;
+
+	fusion->io_request_frames_pool =
+		pci_pool_create("io_request_frames pool", instance->pdev,
+				fusion->io_frames_alloc_sz, 16, 0);
+
+	if (!fusion->io_request_frames_pool) {
+		printk(KERN_ERR "megasas: Could not allocate memory for "
+		       "io_request_frame pool\n");
+		goto fail_io_frames;
+	}
+
+	fusion->io_request_frames =
+		pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
+			       &fusion->io_request_frames_phys);
+	if (!fusion->io_request_frames) {
+		printk(KERN_ERR "megasas: Could not allocate memory for "
+		       "io_request_frames frames\n");
+		pci_pool_destroy(fusion->io_request_frames_pool);
+		goto fail_io_frames;
+	}
+
+	/*
+	 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+	 * Allocate the dynamic array first and then allocate individual
+	 * commands.
+	 */
+	fusion->cmd_list = kmalloc(sizeof(struct megasas_cmd_fusion *)
+				   *max_cmd, GFP_KERNEL);
+
+	if (!fusion->cmd_list) {
+		printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
+		       "memory for cmd_list_fusion\n");
+		goto fail_cmd_list;
+	}
+
+	memset(fusion->cmd_list, 0, sizeof(struct megasas_cmd_fusion *)
+	       *max_cmd);
+
+	max_cmd = instance->max_fw_cmds;
+	for (i = 0; i < max_cmd; i++) {
+		fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
+					      GFP_KERNEL);
+		if (!fusion->cmd_list[i]) {
+			printk(KERN_ERR "Could not alloc cmd list fusion\n");
+
+			for (j = 0; j < i; j++)
+				kfree(fusion->cmd_list[j]);
+
+			kfree(fusion->cmd_list);
+			fusion->cmd_list = NULL;
+			goto fail_cmd_list;
+		}
+	}
+
+	/* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
+	io_req_base = fusion->io_request_frames +
+		MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+	io_req_base_phys = fusion->io_request_frames_phys +
+		MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+
+	/*
+	 * Add all the commands to command pool (fusion->cmd_pool)
+	 */
+
+	/* SMID 0 is reserved. Set SMID/index from 1 */
+	for (i = 0; i < max_cmd; i++) {
+		cmd = fusion->cmd_list[i];
+		offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+		memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
+		cmd->index = i + 1;
+		cmd->scmd = NULL;
+		cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */
+		cmd->instance = instance;
+		cmd->io_request =
+			(struct MPI2_RAID_SCSI_IO_REQUEST *)
+		  (io_req_base + offset);
+		memset(cmd->io_request, 0,
+		       sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+		cmd->io_request_phys_addr = io_req_base_phys + offset;
+
+		list_add_tail(&cmd->list, &fusion->cmd_pool);
+	}
+
+	/*
+	 * Create a frame pool and assign one frame to each cmd
+	 */
+	if (megasas_create_frame_pool_fusion(instance)) {
+		printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+		megasas_free_cmds_fusion(instance);
+		goto fail_req_desc;
+	}
+
+	return 0;
+
+fail_cmd_list:
+	pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
+		      fusion->io_request_frames_phys);
+	pci_pool_destroy(fusion->io_request_frames_pool);
+fail_io_frames:
+	dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+			  fusion->reply_frames_desc,
+			  fusion->reply_frames_desc_phys);
+	pci_pool_free(fusion->reply_frames_desc_pool,
+		      fusion->reply_frames_desc,
+		      fusion->reply_frames_desc_phys);
+	pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+fail_reply_desc:
+	dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+			  fusion->req_frames_desc,
+			  fusion->req_frames_desc_phys);
+fail_req_desc:
+	return -ENOMEM;
+}
+
+/**
+ * wait_and_poll -	Issues a polling command
+ * @instance:			Adapter soft state
+ * @cmd:			Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	int i;
+	struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+	u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
+
+	/*
+	 * Wait for cmd_status to change
+	 */
+	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
+		rmb();
+		msleep(20);
+	}
+
+	if (frame_hdr->cmd_status == 0xff)
+		return -ETIME;
+
+	return 0;
+}
+
+/**
+ * megasas_ioc_init_fusion -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * Issues the IOC Init cmd
+ */
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance)
+{
+	struct megasas_init_frame *init_frame;
+	struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
+	dma_addr_t	ioc_init_handle;
+	u32 context;
+	struct megasas_cmd *cmd;
+	u8 ret;
+	struct fusion_context *fusion;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	int i;
+	struct megasas_header *frame_hdr;
+
+	fusion = instance->ctrl_context;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
+		ret = 1;
+		goto fail_get_cmd;
+	}
+
+	IOCInitMessage =
+	  dma_alloc_coherent(&instance->pdev->dev,
+			     sizeof(struct MPI2_IOC_INIT_REQUEST),
+			     &ioc_init_handle, GFP_KERNEL);
+
+	if (!IOCInitMessage) {
+		printk(KERN_ERR "Could not allocate memory for "
+		       "IOCInitMessage\n");
+		ret = 1;
+		goto fail_fw_init;
+	}
+
+	memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+	IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
+	IOCInitMessage->WhoInit	= MPI2_WHOINIT_HOST_DRIVER;
+	IOCInitMessage->MsgVersion = MPI2_VERSION;
+	IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION;
+	IOCInitMessage->SystemRequestFrameSize =
+		MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
+
+	IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth;
+	IOCInitMessage->ReplyDescriptorPostQueueAddress	=
+		fusion->reply_frames_desc_phys;
+	IOCInitMessage->SystemRequestFrameBaseAddress =
+		fusion->io_request_frames_phys;
+
+	init_frame = (struct megasas_init_frame *)cmd->frame;
+	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+
+	frame_hdr = &cmd->frame->hdr;
+	context = init_frame->context;
+	init_frame->context = context;
+
+	frame_hdr->cmd_status = 0xFF;
+	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+	init_frame->cmd	= MFI_CMD_INIT;
+	init_frame->cmd_status = 0xFF;
+
+	init_frame->queue_info_new_phys_addr_lo = ioc_init_handle;
+	init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST);
+
+	req_desc =
+	  (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc;
+
+	req_desc->Words = cmd->frame_phys_addr;
+	req_desc->MFAIo.RequestFlags =
+		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
+		 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+	/*
+	 * disable the intr before firing the init frame
+	 */
+	instance->instancet->disable_intr(instance->reg_set);
+
+	for (i = 0; i < (10 * 1000); i += 20) {
+		if (readl(&instance->reg_set->doorbell) & 1)
+			msleep(20);
+		else
+			break;
+	}
+
+	instance->instancet->fire_cmd(instance, req_desc->u.low,
+				      req_desc->u.high, instance->reg_set);
+
+	wait_and_poll(instance, cmd);
+
+	frame_hdr = &cmd->frame->hdr;
+	if (frame_hdr->cmd_status != 0) {
+		ret = 1;
+		goto fail_fw_init;
+	}
+	printk(KERN_ERR "megasas:IOC Init cmd success\n");
+
+	ret = 0;
+
+fail_fw_init:
+	megasas_return_cmd(instance, cmd);
+	if (IOCInitMessage)
+		dma_free_coherent(&instance->pdev->dev,
+				  sizeof(struct MPI2_IOC_INIT_REQUEST),
+				  IOCInitMessage, ioc_init_handle);
+fail_get_cmd:
+	return ret;
+}
+
+/*
+ * megasas_return_cmd_for_smid -	Returns a cmd_fusion for a SMID
+ * @instance:				Adapter soft state
+ *
+ */
+void
+megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid)
+{
+	struct fusion_context *fusion;
+	struct megasas_cmd_fusion *cmd;
+
+	fusion = instance->ctrl_context;
+	cmd = fusion->cmd_list[smid - 1];
+	megasas_return_cmd_fusion(instance, cmd);
+}
+
+/*
+ * megasas_get_ld_map_info -	Returns FW's ld_map structure
+ * @instance:				Adapter soft state
+ * @pend:				Pend the command or not
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_map_info(struct megasas_instance *instance)
+{
+	int ret = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_FW_RAID_MAP_ALL *ci;
+	dma_addr_t ci_h = 0;
+	u32 size_map_info;
+	struct fusion_context *fusion;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
+		return -ENOMEM;
+	}
+
+	fusion = instance->ctrl_context;
+
+	if (!fusion) {
+		megasas_return_cmd(instance, cmd);
+		return 1;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	size_map_info = sizeof(struct MR_FW_RAID_MAP) +
+		(sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+
+	ci = fusion->ld_map[(instance->map_id & 1)];
+	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
+
+	if (!ci) {
+		printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	memset(ci, 0, sizeof(*ci));
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_READ;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = size_map_info;
+	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+	dcmd->sgl.sge32[0].phys_addr = ci_h;
+	dcmd->sgl.sge32[0].length = size_map_info;
+
+	if (!megasas_issue_polled(instance, cmd))
+		ret = 0;
+	else {
+		printk(KERN_ERR "megasas: Get LD Map Info Failed\n");
+		ret = -1;
+	}
+
+	megasas_return_cmd(instance, cmd);
+
+	return ret;
+}
+
+u8
+megasas_get_map_info(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	fusion->fast_path_io = 0;
+	if (!megasas_get_ld_map_info(instance)) {
+		if (MR_ValidateMapInfo(fusion->ld_map[(instance->map_id & 1)],
+				       fusion->load_balance_info)) {
+			fusion->fast_path_io = 1;
+			return 0;
+		}
+	}
+	return 1;
+}
+
+/*
+ * megasas_sync_map_info -	Returns FW's ld_map structure
+ * @instance:				Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure.  This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+int
+megasas_sync_map_info(struct megasas_instance *instance)
+{
+	int ret = 0, i;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	u32 size_sync_info, num_lds;
+	struct fusion_context *fusion;
+	struct MR_LD_TARGET_SYNC *ci = NULL;
+	struct MR_FW_RAID_MAP_ALL *map;
+	struct MR_LD_RAID  *raid;
+	struct MR_LD_TARGET_SYNC *ld_sync;
+	dma_addr_t ci_h = 0;
+	u32 size_map_info;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
+		       "info.\n");
+		return -ENOMEM;
+	}
+
+	fusion = instance->ctrl_context;
+
+	if (!fusion) {
+		megasas_return_cmd(instance, cmd);
+		return 1;
+	}
+
+	map = fusion->ld_map[instance->map_id & 1];
+
+	num_lds = map->raidMap.ldCount;
+
+	dcmd = &cmd->frame->dcmd;
+
+	size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	ci = (struct MR_LD_TARGET_SYNC *)
+	  fusion->ld_map[(instance->map_id - 1) & 1];
+	memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL));
+
+	ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
+
+	ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
+
+	for (i = 0; i < num_lds; i++, ld_sync++) {
+		raid = MR_LdRaidGet(i, map);
+		ld_sync->targetId = MR_GetLDTgtId(i, map);
+		ld_sync->seqNum = raid->seqNum;
+	}
+
+	size_map_info = sizeof(struct MR_FW_RAID_MAP) +
+		(sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_WRITE;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = size_map_info;
+	dcmd->mbox.b[0] = num_lds;
+	dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
+	dcmd->sgl.sge32[0].phys_addr = ci_h;
+	dcmd->sgl.sge32[0].length = size_map_info;
+
+	instance->map_update_cmd = cmd;
+
+	instance->instancet->issue_dcmd(instance, cmd);
+
+	return ret;
+}
+
+/**
+ * megasas_init_adapter_fusion -	Initializes the FW
+ * @instance:		Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+u32
+megasas_init_adapter_fusion(struct megasas_instance *instance)
+{
+	struct megasas_register_set __iomem *reg_set;
+	struct fusion_context *fusion;
+	u32 max_cmd;
+	int i = 0;
+
+	fusion = instance->ctrl_context;
+
+	reg_set = instance->reg_set;
+
+	/*
+	 * Get various operational parameters from status register
+	 */
+	instance->max_fw_cmds =
+		instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+	instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
+
+	/*
+	 * Reduce the max supported cmds by 1. This is to ensure that the
+	 * reply_q_sz (1 more than the max cmd that driver may send)
+	 * does not exceed max cmds that the FW can support
+	 */
+	instance->max_fw_cmds = instance->max_fw_cmds-1;
+	/* Only internal cmds (DCMD) need to have MFI frames */
+	instance->max_mfi_cmds = MEGASAS_INT_CMDS;
+
+	max_cmd = instance->max_fw_cmds;
+
+	fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16;
+
+	fusion->request_alloc_sz =
+		sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
+	fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
+		*(fusion->reply_q_depth);
+	fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
+		 (max_cmd + 1)); /* Extra 1 for SMID 0 */
+
+	fusion->max_sge_in_main_msg =
+	  (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+	   offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
+
+	fusion->max_sge_in_chain =
+		MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
+
+	instance->max_num_sge = fusion->max_sge_in_main_msg +
+		fusion->max_sge_in_chain - 2;
+
+	/* Used for pass thru MFI frame (DCMD) */
+	fusion->chain_offset_mfi_pthru =
+		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
+
+	fusion->chain_offset_io_request =
+		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+		 sizeof(union MPI2_SGE_IO_UNION))/16;
+
+	fusion->last_reply_idx = 0;
+
+	/*
+	 * Allocate memory for descriptors
+	 * Create a pool of commands
+	 */
+	if (megasas_alloc_cmds(instance))
+		goto fail_alloc_mfi_cmds;
+	if (megasas_alloc_cmds_fusion(instance))
+		goto fail_alloc_cmds;
+
+	if (megasas_ioc_init_fusion(instance))
+		goto fail_ioc_init;
+
+	instance->flag_ieee = 1;
+
+	fusion->map_sz =  sizeof(struct MR_FW_RAID_MAP) +
+	  (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+
+	fusion->fast_path_io = 0;
+
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
+						       fusion->map_sz,
+						       &fusion->ld_map_phys[i],
+						       GFP_KERNEL);
+		if (!fusion->ld_map[i]) {
+			printk(KERN_ERR "megasas: Could not allocate memory "
+			       "for map info\n");
+			goto fail_map_info;
+		}
+	}
+
+	if (!megasas_get_map_info(instance))
+		megasas_sync_map_info(instance);
+
+	return 0;
+
+fail_alloc_cmds:
+fail_alloc_mfi_cmds:
+fail_map_info:
+	if (i == 1)
+		dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
+				  fusion->ld_map[0], fusion->ld_map_phys[0]);
+fail_ioc_init:
+	return 1;
+}
+
+/**
+ * megasas_fire_cmd_fusion -	Sends command to the FW
+ * @frame_phys_addr :		Physical address of cmd
+ * @frame_count :		Number of frames for the command
+ * @regs :			MFI register set
+ */
+void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+			dma_addr_t req_desc_lo,
+			u32 req_desc_hi,
+			struct megasas_register_set __iomem *regs)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&instance->hba_lock, flags);
+
+	writel(req_desc_lo,
+	       &(regs)->inbound_low_queue_port);
+	writel(req_desc_hi, &(regs)->inbound_high_queue_port);
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * map_cmd_status -	Maps FW cmd status to OS cmd status
+ * @cmd :		Pointer to cmd
+ * @status :		status of cmd returned by FW
+ * @ext_status :	ext status of cmd returned by FW
+ */
+
+void
+map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
+{
+
+	switch (status) {
+
+	case MFI_STAT_OK:
+		cmd->scmd->result = DID_OK << 16;
+		break;
+
+	case MFI_STAT_SCSI_IO_FAILED:
+	case MFI_STAT_LD_INIT_IN_PROGRESS:
+		cmd->scmd->result = (DID_ERROR << 16) | ext_status;
+		break;
+
+	case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+		cmd->scmd->result = (DID_OK << 16) | ext_status;
+		if (ext_status == SAM_STAT_CHECK_CONDITION) {
+			memset(cmd->scmd->sense_buffer, 0,
+			       SCSI_SENSE_BUFFERSIZE);
+			memcpy(cmd->scmd->sense_buffer, cmd->sense,
+			       SCSI_SENSE_BUFFERSIZE);
+			cmd->scmd->result |= DRIVER_SENSE << 24;
+		}
+		break;
+
+	case MFI_STAT_LD_OFFLINE:
+	case MFI_STAT_DEVICE_NOT_FOUND:
+		cmd->scmd->result = DID_BAD_TARGET << 16;
+		break;
+
+	default:
+		printk(KERN_DEBUG "megasas: FW status %#x\n", status);
+		cmd->scmd->result = DID_ERROR << 16;
+		break;
+	}
+}
+
+/**
+ * megasas_make_sgl_fusion -	Prepares 32-bit SGL
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command from the mid-layer
+ * @sgl_ptr:		SGL to be filled in
+ * @cmd:		cmd we are working on
+ *
+ * If successful, this function returns the number of SG elements.
+ */
+static int
+megasas_make_sgl_fusion(struct megasas_instance *instance,
+			struct scsi_cmnd *scp,
+			struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+			struct megasas_cmd_fusion *cmd)
+{
+	int i, sg_processed;
+	int sge_count, sge_idx;
+	struct scatterlist *os_sgl;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	cmd->io_request->ChainOffset = 0;
+
+	sge_count = scsi_dma_map(scp);
+
+	BUG_ON(sge_count < 0);
+
+	if (sge_count > instance->max_num_sge || !sge_count)
+		return sge_count;
+
+	if (sge_count > fusion->max_sge_in_main_msg) {
+		/* One element to store the chain info */
+		sge_idx = fusion->max_sge_in_main_msg - 1;
+	} else
+		sge_idx = sge_count;
+
+	scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+		sgl_ptr->Length = sg_dma_len(os_sgl);
+		sgl_ptr->Address = sg_dma_address(os_sgl);
+		sgl_ptr->Flags = 0;
+		sgl_ptr++;
+
+		sg_processed = i + 1;
+
+		if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
+		    (sge_count > fusion->max_sge_in_main_msg)) {
+
+			struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
+			cmd->io_request->ChainOffset =
+				fusion->chain_offset_io_request;
+			sg_chain = sgl_ptr;
+			/* Prepare chain element */
+			sg_chain->NextChainOffset = 0;
+			sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+					   MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+			sg_chain->Length =  (sizeof(union MPI2_SGE_IO_UNION)
+					     *(sge_count - sg_processed));
+			sg_chain->Address = cmd->sg_frame_phys_addr;
+
+			sgl_ptr =
+			  (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
+		}
+	}
+
+	return sge_count;
+}
+
+/**
+ * megasas_set_pd_lba -	Sets PD LBA
+ * @cdb:		CDB
+ * @cdb_len:		cdb length
+ * @start_blk:		Start block of IO
+ *
+ * Used to set the PD LBA in CDB for FP IOs
+ */
+void
+megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
+		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
+		   struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+{
+	struct MR_LD_RAID *raid;
+	u32 ld;
+	u64 start_blk = io_info->pdBlock;
+	u8 *cdb = io_request->CDB.CDB32;
+	u32 num_blocks = io_info->numBlocks;
+	u8 opcode, flagvals, groupnum, control;
+
+	/* Check if T10 PI (DIF) is enabled for this LD */
+	ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+	raid = MR_LdRaidGet(ld, local_map_ptr);
+	if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+		memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+		cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
+		cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
+
+		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
+		else
+			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
+		cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
+
+		/* LBA */
+		cdb[12] = (u8)((start_blk >> 56) & 0xff);
+		cdb[13] = (u8)((start_blk >> 48) & 0xff);
+		cdb[14] = (u8)((start_blk >> 40) & 0xff);
+		cdb[15] = (u8)((start_blk >> 32) & 0xff);
+		cdb[16] = (u8)((start_blk >> 24) & 0xff);
+		cdb[17] = (u8)((start_blk >> 16) & 0xff);
+		cdb[18] = (u8)((start_blk >> 8) & 0xff);
+		cdb[19] = (u8)(start_blk & 0xff);
+
+		/* Logical block reference tag */
+		io_request->CDB.EEDP32.PrimaryReferenceTag =
+			cpu_to_be32(ref_tag);
+		io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
+
+		io_request->DataLength = num_blocks * 512;
+		io_request->IoFlags = 32; /* Specify 32-byte cdb */
+
+		/* Transfer length */
+		cdb[28] = (u8)((num_blocks >> 24) & 0xff);
+		cdb[29] = (u8)((num_blocks >> 16) & 0xff);
+		cdb[30] = (u8)((num_blocks >> 8) & 0xff);
+		cdb[31] = (u8)(num_blocks & 0xff);
+
+		/* set SCSI IO EEDPFlags */
+		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+			io_request->EEDPFlags =
+				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+		} else {
+			io_request->EEDPFlags =
+				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+		}
+		io_request->Control |= (0x4 << 26);
+		io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+	} else {
+		/* Some drives don't support 16/12 byte CDB's, convert to 10 */
+		if (((cdb_len == 12) || (cdb_len == 16)) &&
+		    (start_blk <= 0xffffffff)) {
+			if (cdb_len == 16) {
+				opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+				flagvals = cdb[1];
+				groupnum = cdb[14];
+				control = cdb[15];
+			} else {
+				opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+				flagvals = cdb[1];
+				groupnum = cdb[10];
+				control = cdb[11];
+			}
+
+			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+			cdb[0] = opcode;
+			cdb[1] = flagvals;
+			cdb[6] = groupnum;
+			cdb[9] = control;
+
+			/* Transfer length */
+			cdb[8] = (u8)(num_blocks & 0xff);
+			cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+
+			cdb_len = 10;
+		}
+
+		/* Normal case, just load LBA here */
+		switch (cdb_len) {
+		case 6:
+		{
+			u8 val = cdb[1] & 0xE0;
+			cdb[3] = (u8)(start_blk & 0xff);
+			cdb[2] = (u8)((start_blk >> 8) & 0xff);
+			cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
+			break;
+		}
+		case 10:
+			cdb[5] = (u8)(start_blk & 0xff);
+			cdb[4] = (u8)((start_blk >> 8) & 0xff);
+			cdb[3] = (u8)((start_blk >> 16) & 0xff);
+			cdb[2] = (u8)((start_blk >> 24) & 0xff);
+			break;
+		case 12:
+			cdb[5]    = (u8)(start_blk & 0xff);
+			cdb[4]    = (u8)((start_blk >> 8) & 0xff);
+			cdb[3]    = (u8)((start_blk >> 16) & 0xff);
+			cdb[2]    = (u8)((start_blk >> 24) & 0xff);
+			break;
+		case 16:
+			cdb[9]    = (u8)(start_blk & 0xff);
+			cdb[8]    = (u8)((start_blk >> 8) & 0xff);
+			cdb[7]    = (u8)((start_blk >> 16) & 0xff);
+			cdb[6]    = (u8)((start_blk >> 24) & 0xff);
+			cdb[5]    = (u8)((start_blk >> 32) & 0xff);
+			cdb[4]    = (u8)((start_blk >> 40) & 0xff);
+			cdb[3]    = (u8)((start_blk >> 48) & 0xff);
+			cdb[2]    = (u8)((start_blk >> 56) & 0xff);
+			break;
+		}
+	}
+}
+
+/**
+ * megasas_build_ldio_fusion -	Prepares IOs to devices
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Prepares the io_request and chain elements (sg_frame) for IO
+ * The IO can be for PD (Fast Path) or LD
+ */
+void
+megasas_build_ldio_fusion(struct megasas_instance *instance,
+			  struct scsi_cmnd *scp,
+			  struct megasas_cmd_fusion *cmd)
+{
+	u8 fp_possible;
+	u32 start_lba_lo, start_lba_hi, device_id;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	struct IO_REQUEST_INFO io_info;
+	struct fusion_context *fusion;
+	struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+
+	device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+	fusion = instance->ctrl_context;
+
+	io_request = cmd->io_request;
+	io_request->RaidContext.VirtualDiskTgtId = device_id;
+	io_request->RaidContext.status = 0;
+	io_request->RaidContext.exStatus = 0;
+
+	req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+
+	start_lba_lo = 0;
+	start_lba_hi = 0;
+	fp_possible = 0;
+
+	/*
+	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
+	 */
+	if (scp->cmd_len == 6) {
+		io_request->DataLength = (u32) scp->cmnd[4];
+		start_lba_lo = ((u32) scp->cmnd[1] << 16) |
+			((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+
+		start_lba_lo &= 0x1FFFFF;
+	}
+
+	/*
+	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
+	 */
+	else if (scp->cmd_len == 10) {
+		io_request->DataLength = (u32) scp->cmnd[8] |
+			((u32) scp->cmnd[7] << 8);
+		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+			((u32) scp->cmnd[3] << 16) |
+			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	/*
+	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+	 */
+	else if (scp->cmd_len == 12) {
+		io_request->DataLength = ((u32) scp->cmnd[6] << 24) |
+			((u32) scp->cmnd[7] << 16) |
+			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+			((u32) scp->cmnd[3] << 16) |
+			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	/*
+	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
+	 */
+	else if (scp->cmd_len == 16) {
+		io_request->DataLength = ((u32) scp->cmnd[10] << 24) |
+			((u32) scp->cmnd[11] << 16) |
+			((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+		start_lba_lo = ((u32) scp->cmnd[6] << 24) |
+			((u32) scp->cmnd[7] << 16) |
+			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+		start_lba_hi = ((u32) scp->cmnd[2] << 24) |
+			((u32) scp->cmnd[3] << 16) |
+			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+	}
+
+	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
+	io_info.numBlocks = io_request->DataLength;
+	io_info.ldTgtId = device_id;
+
+	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		io_info.isRead = 1;
+
+	local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+
+	if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
+	     MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) {
+		io_request->RaidContext.regLockFlags  = 0;
+		fp_possible = 0;
+	} else {
+		if (MR_BuildRaidContext(&io_info, &io_request->RaidContext,
+					local_map_ptr))
+			fp_possible = io_info.fpOkForIo;
+	}
+
+	if (fp_possible) {
+		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
+				   local_map_ptr, start_lba_lo);
+		io_request->DataLength = scsi_bufflen(scp);
+		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+		if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
+		    (io_info.isRead)) {
+			io_info.devHandle =
+				get_updated_dev_handle(
+					&fusion->load_balance_info[device_id],
+					&io_info);
+			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+		} else
+			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+		io_request->DevHandle = io_info.devHandle;
+	} else {
+		io_request->RaidContext.timeoutValue =
+			local_map_ptr->raidMap.fpPdIoTimeoutSec;
+		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+		io_request->DevHandle = device_id;
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
+			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+	} /* Not FP */
+}
+
+/**
+ * megasas_build_dcdb_fusion -	Prepares IOs to devices
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Prepares the io_request frame for non-io cmds
+ */
+static void
+megasas_build_dcdb_fusion(struct megasas_instance *instance,
+			  struct scsi_cmnd *scmd,
+			  struct megasas_cmd_fusion *cmd)
+{
+	u32 device_id;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+	u16 pd_index = 0;
+	struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+	struct fusion_context *fusion = instance->ctrl_context;
+
+	io_request = cmd->io_request;
+	device_id = MEGASAS_DEV_INDEX(instance, scmd);
+	pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+		+scmd->device->id;
+	local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+
+	/* Check if this is a system PD I/O */
+	if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) &&
+	    (instance->pd_list[pd_index].driveType == TYPE_DISK)) {
+		io_request->Function = 0;
+		io_request->DevHandle =
+			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+		io_request->RaidContext.timeoutValue =
+			local_map_ptr->raidMap.fpPdIoTimeoutSec;
+		io_request->RaidContext.regLockFlags = 0;
+		io_request->RaidContext.regLockRowLBA = 0;
+		io_request->RaidContext.regLockLength = 0;
+		io_request->RaidContext.RAIDFlags =
+			MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
+			MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+	} else {
+		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+		io_request->DevHandle = device_id;
+		cmd->request_desc->SCSIIO.RequestFlags =
+			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+	}
+	io_request->RaidContext.VirtualDiskTgtId = device_id;
+	io_request->LUN[0] = scmd->device->lun;
+	io_request->DataLength = scsi_bufflen(scmd);
+}
+
+/**
+ * megasas_build_io_fusion -	Prepares IOs to devices
+ * @instance:		Adapter soft state
+ * @scp:		SCSI command
+ * @cmd:		Command to be prepared
+ *
+ * Invokes helper functions to prepare request frames
+ * and sets flags appropriate for IO/Non-IO cmd
+ */
+int
+megasas_build_io_fusion(struct megasas_instance *instance,
+			struct scsi_cmnd *scp,
+			struct megasas_cmd_fusion *cmd)
+{
+	u32 device_id, sge_count;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+
+	device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+	/* Zero out some fields so they don't get reused */
+	io_request->LUN[0] = 0;
+	io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
+	io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
+	io_request->EEDPFlags = 0;
+	io_request->Control = 0;
+	io_request->EEDPBlockSize = 0;
+	io_request->IoFlags = 0;
+	io_request->RaidContext.RAIDFlags = 0;
+
+	memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
+	/*
+	 * Just the CDB length,rest of the Flags are zero
+	 * This will be modified for FP in build_ldio_fusion
+	 */
+	io_request->IoFlags = scp->cmd_len;
+
+	if (megasas_is_ldio(scp))
+		megasas_build_ldio_fusion(instance, scp, cmd);
+	else
+		megasas_build_dcdb_fusion(instance, scp, cmd);
+
+	/*
+	 * Construct SGL
+	 */
+
+	sge_count =
+		megasas_make_sgl_fusion(instance, scp,
+					(struct MPI25_IEEE_SGE_CHAIN64 *)
+					&io_request->SGL, cmd);
+
+	if (sge_count > instance->max_num_sge) {
+		printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
+		       "max (0x%x) allowed\n", sge_count,
+		       instance->max_num_sge);
+		return 1;
+	}
+
+	io_request->RaidContext.numSGE = sge_count;
+
+	io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
+
+	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+		io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
+	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+		io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
+
+	io_request->SGLOffset0 =
+		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
+
+	io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
+	io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+
+	cmd->scmd = scp;
+	scp->SCp.ptr = (char *)cmd;
+
+	return 0;
+}
+
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
+{
+	u8 *p;
+	struct fusion_context *fusion;
+
+	if (index >= instance->max_fw_cmds) {
+		printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
+		       "descriptor\n", index);
+		return NULL;
+	}
+	fusion = instance->ctrl_context;
+	p = fusion->req_frames_desc
+		+sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
+
+	return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
+}
+
+/**
+ * megasas_build_and_issue_cmd_fusion -Main routine for building and
+ *                                     issuing non IOCTL cmd
+ * @instance:			Adapter soft state
+ * @scmd:			pointer to scsi cmd from OS
+ */
+static u32
+megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
+				   struct scsi_cmnd *scmd)
+{
+	struct megasas_cmd_fusion *cmd;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	u32 index;
+	struct fusion_context *fusion;
+
+	fusion = instance->ctrl_context;
+
+	cmd = megasas_get_cmd_fusion(instance);
+	if (!cmd)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	index = cmd->index;
+
+	req_desc = megasas_get_request_descriptor(instance, index-1);
+	if (!req_desc)
+		return 1;
+
+	req_desc->Words = 0;
+	cmd->request_desc = req_desc;
+	cmd->request_desc->Words = 0;
+
+	if (megasas_build_io_fusion(instance, scmd, cmd)) {
+		megasas_return_cmd_fusion(instance, cmd);
+		printk(KERN_ERR "megasas: Error building command.\n");
+		cmd->request_desc = NULL;
+		return 1;
+	}
+
+	req_desc = cmd->request_desc;
+	req_desc->SCSIIO.SMID = index;
+
+	if (cmd->io_request->ChainOffset != 0 &&
+	    cmd->io_request->ChainOffset != 0xF)
+		printk(KERN_ERR "megasas: The chain offset value is not "
+		       "correct : %x\n", cmd->io_request->ChainOffset);
+
+	/*
+	 * Issue the command to the FW
+	 */
+	atomic_inc(&instance->fw_outstanding);
+
+	instance->instancet->fire_cmd(instance,
+				      req_desc->u.low, req_desc->u.high,
+				      instance->reg_set);
+
+	return 0;
+}
+
+/**
+ * complete_cmd_fusion -	Completes command
+ * @instance:			Adapter soft state
+ * Completes all commands that is in reply descriptor queue
+ */
+int
+complete_cmd_fusion(struct megasas_instance *instance)
+{
+	union MPI2_REPLY_DESCRIPTORS_UNION *desc;
+	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+	struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
+	struct fusion_context *fusion;
+	struct megasas_cmd *cmd_mfi;
+	struct megasas_cmd_fusion *cmd_fusion;
+	u16 smid, num_completed;
+	u8 reply_descript_type, arm;
+	u32 status, extStatus, device_id;
+	union desc_value d_val;
+	struct LD_LOAD_BALANCE_INFO *lbinfo;
+
+	fusion = instance->ctrl_context;
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+		return IRQ_HANDLED;
+
+	desc = fusion->reply_frames_desc;
+	desc += fusion->last_reply_idx;
+
+	reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+	d_val.word = desc->Words;
+
+	reply_descript_type = reply_desc->ReplyFlags &
+		MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+		return IRQ_NONE;
+
+	d_val.word = desc->Words;
+
+	num_completed = 0;
+
+	while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
+		smid = reply_desc->SMID;
+
+		cmd_fusion = fusion->cmd_list[smid - 1];
+
+		scsi_io_req =
+			(struct MPI2_RAID_SCSI_IO_REQUEST *)
+		  cmd_fusion->io_request;
+
+		if (cmd_fusion->scmd)
+			cmd_fusion->scmd->SCp.ptr = NULL;
+
+		status = scsi_io_req->RaidContext.status;
+		extStatus = scsi_io_req->RaidContext.exStatus;
+
+		switch (scsi_io_req->Function) {
+		case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
+			/* Update load balancing info */
+			device_id = MEGASAS_DEV_INDEX(instance,
+						      cmd_fusion->scmd);
+			lbinfo = &fusion->load_balance_info[device_id];
+			if (cmd_fusion->scmd->SCp.Status &
+			    MEGASAS_LOAD_BALANCE_FLAG) {
+				arm = lbinfo->raid1DevHandle[0] ==
+					cmd_fusion->io_request->DevHandle ? 0 :
+					1;
+				atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+				cmd_fusion->scmd->SCp.Status &=
+					~MEGASAS_LOAD_BALANCE_FLAG;
+			}
+			if (reply_descript_type ==
+			    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+				if (megasas_dbg_lvl == 5)
+					printk(KERN_ERR "\nmegasas: FAST Path "
+					       "IO Success\n");
+			}
+			/* Fall thru and complete IO */
+		case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
+			/* Map the FW Cmd Status */
+			map_cmd_status(cmd_fusion, status, extStatus);
+			scsi_dma_unmap(cmd_fusion->scmd);
+			cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+			scsi_io_req->RaidContext.status = 0;
+			scsi_io_req->RaidContext.exStatus = 0;
+			megasas_return_cmd_fusion(instance, cmd_fusion);
+			atomic_dec(&instance->fw_outstanding);
+
+			break;
+		case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
+			cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+			megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+			cmd_fusion->flags = 0;
+			megasas_return_cmd_fusion(instance, cmd_fusion);
+
+			break;
+		}
+
+		fusion->last_reply_idx++;
+		if (fusion->last_reply_idx >= fusion->reply_q_depth)
+			fusion->last_reply_idx = 0;
+
+		desc->Words = ULLONG_MAX;
+		num_completed++;
+
+		/* Get the next reply descriptor */
+		if (!fusion->last_reply_idx)
+			desc = fusion->reply_frames_desc;
+		else
+			desc++;
+
+		reply_desc =
+		  (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+		d_val.word = desc->Words;
+
+		reply_descript_type = reply_desc->ReplyFlags &
+			MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+			break;
+	}
+
+	if (!num_completed)
+		return IRQ_NONE;
+
+	wmb();
+	writel(fusion->last_reply_idx,
+	       &instance->reg_set->reply_post_host_index);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * megasas_complete_cmd_dpc_fusion -	Completes command
+ * @instance:			Adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+void
+megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
+{
+	struct megasas_instance *instance =
+		(struct megasas_instance *)instance_addr;
+	unsigned long flags;
+
+	/* If we have already declared adapter dead, donot complete cmds */
+	spin_lock_irqsave(&instance->hba_lock, flags);
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+		spin_unlock_irqrestore(&instance->hba_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+	spin_lock_irqsave(&instance->completion_lock, flags);
+	complete_cmd_fusion(instance);
+	spin_unlock_irqrestore(&instance->completion_lock, flags);
+}
+
+/**
+ * megasas_isr_fusion - isr entry point
+ */
+irqreturn_t megasas_isr_fusion(int irq, void *devp)
+{
+	struct megasas_instance *instance = (struct megasas_instance *)devp;
+	u32 mfiStatus, fw_state;
+
+	if (!instance->msi_flag) {
+		mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+		if (!mfiStatus)
+			return IRQ_NONE;
+	}
+
+	/* If we are resetting, bail */
+	if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+		return IRQ_HANDLED;
+
+	if (!complete_cmd_fusion(instance)) {
+		/* If we didn't complete any commands, check for FW fault */
+		fw_state = instance->instancet->read_fw_status_reg(
+			instance->reg_set) & MFI_STATE_MASK;
+		if (fw_state == MFI_STATE_FAULT)
+			schedule_work(&instance->work_init);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
+ * @instance:			Adapter soft state
+ * mfi_cmd:			megasas_cmd pointer
+ *
+ */
+u8
+build_mpt_mfi_pass_thru(struct megasas_instance *instance,
+			struct megasas_cmd *mfi_cmd)
+{
+	struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+	struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
+	struct megasas_cmd_fusion *cmd;
+	struct fusion_context *fusion;
+	struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+
+	cmd = megasas_get_cmd_fusion(instance);
+	if (!cmd)
+		return 1;
+
+	/*  Save the smid. To be used for returning the cmd */
+	mfi_cmd->context.smid = cmd->index;
+
+	cmd->sync_cmd_idx = mfi_cmd->index;
+
+	/*
+	 * For cmds where the flag is set, store the flag and check
+	 * on completion. For cmds with this flag, don't call
+	 * megasas_complete_cmd
+	 */
+
+	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
+		cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+	fusion = instance->ctrl_context;
+	io_req = cmd->io_request;
+	mpi25_ieee_chain =
+	  (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+	io_req->Function    = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+	io_req->SGLOffset0  = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
+				       SGL) / 4;
+	io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
+
+	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
+
+	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+	mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME;
+
+	return 0;
+}
+
+/**
+ * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
+ * @instance:			Adapter soft state
+ * @cmd:			mfi cmd to build
+ *
+ */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	u16 index;
+
+	if (build_mpt_mfi_pass_thru(instance, cmd)) {
+		printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
+		return NULL;
+	}
+
+	index = cmd->context.smid;
+
+	req_desc = megasas_get_request_descriptor(instance, index - 1);
+
+	if (!req_desc)
+		return NULL;
+
+	req_desc->Words = 0;
+	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+					 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+	req_desc->SCSIIO.SMID = index;
+
+	return req_desc;
+}
+
+/**
+ * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
+ * @instance:			Adapter soft state
+ * @cmd:			mfi cmd pointer
+ *
+ */
+void
+megasas_issue_dcmd_fusion(struct megasas_instance *instance,
+			  struct megasas_cmd *cmd)
+{
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	union desc_value d_val;
+
+	req_desc = build_mpt_cmd(instance, cmd);
+	if (!req_desc) {
+		printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
+		return;
+	}
+	d_val.word = req_desc->Words;
+
+	instance->instancet->fire_cmd(instance, req_desc->u.low,
+				      req_desc->u.high, instance->reg_set);
+}
+
+/**
+ * megasas_release_fusion -	Reverses the FW initialization
+ * @intance:			Adapter soft state
+ */
+void
+megasas_release_fusion(struct megasas_instance *instance)
+{
+	megasas_free_cmds(instance);
+	megasas_free_cmds_fusion(instance);
+
+	iounmap(instance->reg_set);
+
+	pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_read_fw_status_reg_fusion - returns the current FW status value
+ * @regs:			MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+{
+	return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_adp_reset_fusion -	For controller reset
+ * @regs:				MFI register set
+ */
+static int
+megasas_adp_reset_fusion(struct megasas_instance *instance,
+			 struct megasas_register_set __iomem *regs)
+{
+	return 0;
+}
+
+/**
+ * megasas_check_reset_fusion -	For controller reset check
+ * @regs:				MFI register set
+ */
+static int
+megasas_check_reset_fusion(struct megasas_instance *instance,
+			   struct megasas_register_set __iomem *regs)
+{
+	return 0;
+}
+
+/* This function waits for outstanding commands on fusion to complete */
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
+{
+	int i, outstanding, retval = 0;
+	u32 fw_state, wait_time = MEGASAS_RESET_WAIT_TIME;
+
+	for (i = 0; i < wait_time; i++) {
+		/* Check if firmware is in fault state */
+		fw_state = instance->instancet->read_fw_status_reg(
+			instance->reg_set) & MFI_STATE_MASK;
+		if (fw_state == MFI_STATE_FAULT) {
+			printk(KERN_WARNING "megasas: Found FW in FAULT state,"
+			       " will reset adapter.\n");
+			retval = 1;
+			goto out;
+		}
+
+		outstanding = atomic_read(&instance->fw_outstanding);
+		if (!outstanding)
+			goto out;
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+			       "commands to complete\n", i, outstanding);
+			megasas_complete_cmd_dpc_fusion(
+				(unsigned long)instance);
+		}
+		msleep(1000);
+	}
+
+	if (atomic_read(&instance->fw_outstanding)) {
+		printk("megaraid_sas: pending commands remain after waiting, "
+		       "will reset adapter.\n");
+		retval = 1;
+	}
+out:
+	return retval;
+}
+
+void  megasas_reset_reply_desc(struct megasas_instance *instance)
+{
+	int i;
+	struct fusion_context *fusion;
+	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+
+	fusion = instance->ctrl_context;
+	fusion->last_reply_idx = 0;
+	reply_desc = fusion->reply_frames_desc;
+	for (i = 0 ; i < fusion->reply_q_depth; i++, reply_desc++)
+		reply_desc->Words = ULLONG_MAX;
+}
+
+/* Core fusion reset function */
+int megasas_reset_fusion(struct Scsi_Host *shost)
+{
+	int retval = SUCCESS, i, j, retry = 0;
+	struct megasas_instance *instance;
+	struct megasas_cmd_fusion *cmd_fusion;
+	struct fusion_context *fusion;
+	struct megasas_cmd *cmd_mfi;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+	u32 host_diag, abs_state;
+
+	instance = (struct megasas_instance *)shost->hostdata;
+	fusion = instance->ctrl_context;
+
+	mutex_lock(&instance->reset_mutex);
+	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+	instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+	instance->instancet->disable_intr(instance->reg_set);
+	msleep(1000);
+
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+		printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
+		       "returning FAILED.\n");
+		retval = FAILED;
+		goto out;
+	}
+
+	/* First try waiting for commands to complete */
+	if (megasas_wait_for_outstanding_fusion(instance)) {
+		printk(KERN_WARNING "megaraid_sas: resetting fusion "
+		       "adapter.\n");
+		/* Now return commands back to the OS */
+		for (i = 0 ; i < instance->max_fw_cmds; i++) {
+			cmd_fusion = fusion->cmd_list[i];
+			if (cmd_fusion->scmd) {
+				scsi_dma_unmap(cmd_fusion->scmd);
+				cmd_fusion->scmd->result = (DID_RESET << 16);
+				cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+				megasas_return_cmd_fusion(instance, cmd_fusion);
+				atomic_dec(&instance->fw_outstanding);
+			}
+		}
+
+		if (instance->disableOnlineCtrlReset == 1) {
+			/* Reset not supported, kill adapter */
+			printk(KERN_WARNING "megaraid_sas: Reset not supported"
+			       ", killing adapter.\n");
+			megaraid_sas_kill_hba(instance);
+			instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+			retval = FAILED;
+			goto out;
+		}
+
+		/* Now try to reset the chip */
+		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
+			writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
+			       &instance->reg_set->fusion_seq_offset);
+			writel(MPI2_WRSEQ_1ST_KEY_VALUE,
+			       &instance->reg_set->fusion_seq_offset);
+			writel(MPI2_WRSEQ_2ND_KEY_VALUE,
+			       &instance->reg_set->fusion_seq_offset);
+			writel(MPI2_WRSEQ_3RD_KEY_VALUE,
+			       &instance->reg_set->fusion_seq_offset);
+			writel(MPI2_WRSEQ_4TH_KEY_VALUE,
+			       &instance->reg_set->fusion_seq_offset);
+			writel(MPI2_WRSEQ_5TH_KEY_VALUE,
+			       &instance->reg_set->fusion_seq_offset);
+			writel(MPI2_WRSEQ_6TH_KEY_VALUE,
+			       &instance->reg_set->fusion_seq_offset);
+
+			/* Check that the diag write enable (DRWE) bit is on */
+			host_diag = readl(&instance->reg_set->fusion_host_diag);
+			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+				msleep(100);
+				host_diag =
+				readl(&instance->reg_set->fusion_host_diag);
+				if (retry++ == 100) {
+					printk(KERN_WARNING "megaraid_sas: "
+					       "Host diag unlock failed!\n");
+					break;
+				}
+			}
+			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+				continue;
+
+			/* Send chip reset command */
+			writel(host_diag | HOST_DIAG_RESET_ADAPTER,
+			       &instance->reg_set->fusion_host_diag);
+			msleep(3000);
+
+			/* Make sure reset adapter bit is cleared */
+			host_diag = readl(&instance->reg_set->fusion_host_diag);
+			retry = 0;
+			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+				msleep(100);
+				host_diag =
+				readl(&instance->reg_set->fusion_host_diag);
+				if (retry++ == 1000) {
+					printk(KERN_WARNING "megaraid_sas: "
+					       "Diag reset adapter never "
+					       "cleared!\n");
+					break;
+				}
+			}
+			if (host_diag & HOST_DIAG_RESET_ADAPTER)
+				continue;
+
+			abs_state =
+				instance->instancet->read_fw_status_reg(
+					instance->reg_set);
+			retry = 0;
+
+			while ((abs_state <= MFI_STATE_FW_INIT) &&
+			       (retry++ < 1000)) {
+				msleep(100);
+				abs_state =
+				instance->instancet->read_fw_status_reg(
+					instance->reg_set);
+			}
+			if (abs_state <= MFI_STATE_FW_INIT) {
+				printk(KERN_WARNING "megaraid_sas: firmware "
+				       "state < MFI_STATE_FW_INIT, state = "
+				       "0x%x\n", abs_state);
+				continue;
+			}
+
+			/* Wait for FW to become ready */
+			if (megasas_transition_to_ready(instance)) {
+				printk(KERN_WARNING "megaraid_sas: Failed to "
+				       "transition controller to ready.\n");
+				continue;
+			}
+
+			megasas_reset_reply_desc(instance);
+			if (megasas_ioc_init_fusion(instance)) {
+				printk(KERN_WARNING "megaraid_sas: "
+				       "megasas_ioc_init_fusion() failed!\n");
+				continue;
+			}
+
+			instance->instancet->enable_intr(instance->reg_set);
+			instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
+			/* Re-fire management commands */
+			for (j = 0 ; j < instance->max_fw_cmds; j++) {
+				cmd_fusion = fusion->cmd_list[j];
+				if (cmd_fusion->sync_cmd_idx !=
+				    (u32)ULONG_MAX) {
+					cmd_mfi =
+					instance->
+					cmd_list[cmd_fusion->sync_cmd_idx];
+					if (cmd_mfi->frame->dcmd.opcode ==
+					    MR_DCMD_LD_MAP_GET_INFO) {
+						megasas_return_cmd(instance,
+								   cmd_mfi);
+						megasas_return_cmd_fusion(
+							instance, cmd_fusion);
+					} else  {
+						req_desc =
+						megasas_get_request_descriptor(
+							instance,
+							cmd_mfi->context.smid
+							-1);
+						if (!req_desc)
+							printk(KERN_WARNING
+							       "req_desc NULL"
+							       "\n");
+						else {
+							instance->instancet->
+							fire_cmd(instance,
+								 req_desc->
+								 u.low,
+								 req_desc->
+								 u.high,
+								 instance->
+								 reg_set);
+						}
+					}
+				}
+			}
+
+			/* Reset load balance info */
+			memset(fusion->load_balance_info, 0,
+			       sizeof(struct LD_LOAD_BALANCE_INFO)
+			       *MAX_LOGICAL_DRIVES);
+
+			if (!megasas_get_map_info(instance))
+				megasas_sync_map_info(instance);
+
+			/* Adapter reset completed successfully */
+			printk(KERN_WARNING "megaraid_sas: Reset "
+			       "successful.\n");
+			retval = SUCCESS;
+			goto out;
+		}
+		/* Reset failed, kill the adapter */
+		printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
+		       "adapter.\n");
+		megaraid_sas_kill_hba(instance);
+		retval = FAILED;
+	} else {
+		instance->instancet->enable_intr(instance->reg_set);
+		instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+	}
+out:
+	clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+	mutex_unlock(&instance->reset_mutex);
+	return retval;
+}
+
+/* Fusion OCR work queue */
+void megasas_fusion_ocr_wq(struct work_struct *work)
+{
+	struct megasas_instance *instance =
+		container_of(work, struct megasas_instance, work_init);
+
+	megasas_reset_fusion(instance->host);
+}
+
+struct megasas_instance_template megasas_instance_template_fusion = {
+	.fire_cmd = megasas_fire_cmd_fusion,
+	.enable_intr = megasas_enable_intr_fusion,
+	.disable_intr = megasas_disable_intr_fusion,
+	.clear_intr = megasas_clear_intr_fusion,
+	.read_fw_status_reg = megasas_read_fw_status_reg_fusion,
+	.adp_reset = megasas_adp_reset_fusion,
+	.check_reset = megasas_check_reset_fusion,
+	.service_isr = megasas_isr_fusion,
+	.tasklet = megasas_complete_cmd_dpc_fusion,
+	.init_adapter = megasas_init_adapter_fusion,
+	.build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
+	.issue_dcmd = megasas_issue_dcmd_fusion,
+};
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
new file mode 100644
index 0000000..82b577a
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -0,0 +1,695 @@
+/*
+ *  Linux MegaRAID driver for SAS based RAID controllers
+ *
+ *  Copyright (c) 2009-2011  LSI Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *  FILE: megaraid_sas_fusion.h
+ *
+ *  Authors: LSI Corporation
+ *           Manoj Jose
+ *           Sumant Patro
+ *
+ *  Send feedback to: <megaraidlinux@lsi.com>
+ *
+ *  Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ *     ATTN: Linuxraid
+ */
+
+#ifndef _MEGARAID_SAS_FUSION_H_
+#define _MEGARAID_SAS_FUSION_H_
+
+/* Fusion defines */
+#define MEGASAS_MAX_SZ_CHAIN_FRAME 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST   0xF0
+#define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST         0xF1
+#define MEGASAS_LOAD_BALANCE_FLAG		    0x1
+#define MEGASAS_DCMD_MBOX_PEND_FLAG		    0x1
+#define HOST_DIAG_WRITE_ENABLE			    0x80
+#define HOST_DIAG_RESET_ADAPTER			    0x4
+#define MEGASAS_FUSION_MAX_RESET_TRIES		    3
+
+/* T10 PI defines */
+#define MR_PROT_INFO_TYPE_CONTROLLER                0x8
+#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD            0x7f
+#define MEGASAS_SCSI_SERVICE_ACTION_READ32          0x9
+#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32         0xB
+#define MEGASAS_SCSI_ADDL_CDB_LEN                   0x18
+#define MEGASAS_RD_WR_PROTECT_CHECK_ALL		    0x20
+#define MEGASAS_RD_WR_PROTECT_CHECK_NONE	    0x60
+#define MEGASAS_EEDPBLOCKSIZE			    512
+
+/*
+ * Raid context flags
+ */
+
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT   0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK    0x30
+enum MR_RAID_FLAGS_IO_SUB_TYPE {
+	MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+	MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+};
+
+/*
+ * Request descriptor types
+ */
+#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO           0x7
+#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA             0x1
+
+#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT      1
+
+#define MEGASAS_FP_CMD_LEN	16
+#define MEGASAS_FUSION_IN_RESET 0
+
+/*
+ * Raid Context structure which describes MegaRAID specific IO Paramenters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ */
+
+struct RAID_CONTEXT {
+	u16     resvd0;
+	u16     timeoutValue;
+	u8      regLockFlags;
+	u8      resvd1;
+	u16     VirtualDiskTgtId;
+	u64     regLockRowLBA;
+	u32     regLockLength;
+	u16     nextLMId;
+	u8      exStatus;
+	u8      status;
+	u8      RAIDFlags;
+	u8      numSGE;
+	u16	configSeqNum;
+	u8      spanArm;
+	u8      resvd2[3];
+};
+
+#define RAID_CTX_SPANARM_ARM_SHIFT	(0)
+#define RAID_CTX_SPANARM_ARM_MASK	(0x1f)
+
+#define RAID_CTX_SPANARM_SPAN_SHIFT	(5)
+#define RAID_CTX_SPANARM_SPAN_MASK	(0xE0)
+
+/*
+ * define region lock types
+ */
+enum REGION_TYPE {
+	REGION_TYPE_UNUSED       = 0,
+	REGION_TYPE_SHARED_READ  = 1,
+	REGION_TYPE_SHARED_WRITE = 2,
+	REGION_TYPE_EXCLUSIVE    = 3,
+};
+
+/* MPI2 defines */
+#define MPI2_FUNCTION_IOC_INIT              (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER            (0x04)
+#define MPI2_VERSION_MAJOR                  (0x02)
+#define MPI2_VERSION_MINOR                  (0x00)
+#define MPI2_VERSION_MAJOR_MASK             (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT            (8)
+#define MPI2_VERSION_MINOR_MASK             (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT            (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+		      MPI2_VERSION_MINOR)
+#define MPI2_HEADER_VERSION_UNIT            (0x10)
+#define MPI2_HEADER_VERSION_DEV             (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
+#define MPI2_HEADER_VERSION_DEV_MASK        (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT       (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+			     MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR      (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG        (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG          (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP       (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG          (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD           (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP             (0x0004)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST               (0x00) /* SCSI IO */
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY           (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO                 (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING        (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE               (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ                (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK       (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED          (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK       (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE              (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET              (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE                (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE                (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE                (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE                (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE                (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE                (0xD)
+
+struct MPI25_IEEE_SGE_CHAIN64 {
+	u64                     Address;
+	u32                     Length;
+	u16                     Reserved1;
+	u8                      NextChainOffset;
+	u8                      Flags;
+};
+
+struct MPI2_SGE_SIMPLE_UNION {
+	u32                     FlagsLength;
+	union {
+		u32                 Address32;
+		u64                 Address64;
+	} u;
+};
+
+struct MPI2_SCSI_IO_CDB_EEDP32 {
+	u8                      CDB[20];                    /* 0x00 */
+	u32                     PrimaryReferenceTag;        /* 0x14 */
+	u16                     PrimaryApplicationTag;      /* 0x18 */
+	u16                     PrimaryApplicationTagMask;  /* 0x1A */
+	u32                     TransferLength;             /* 0x1C */
+};
+
+struct MPI2_SGE_CHAIN_UNION {
+	u16                     Length;
+	u8                      NextChainOffset;
+	u8                      Flags;
+	union {
+		u32                 Address32;
+		u64                 Address64;
+	} u;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE32 {
+	u32                     Address;
+	u32                     FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_CHAIN32 {
+	u32                     Address;
+	u32                     FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE64 {
+	u64                     Address;
+	u32                     Length;
+	u16                     Reserved1;
+	u8                      Reserved2;
+	u8                      Flags;
+};
+
+struct MPI2_IEEE_SGE_CHAIN64 {
+	u64                     Address;
+	u32                     Length;
+	u16                     Reserved1;
+	u8                      Reserved2;
+	u8                      Flags;
+};
+
+union MPI2_IEEE_SGE_SIMPLE_UNION {
+	struct MPI2_IEEE_SGE_SIMPLE32  Simple32;
+	struct MPI2_IEEE_SGE_SIMPLE64  Simple64;
+};
+
+union MPI2_IEEE_SGE_CHAIN_UNION {
+	struct MPI2_IEEE_SGE_CHAIN32   Chain32;
+	struct MPI2_IEEE_SGE_CHAIN64   Chain64;
+};
+
+union MPI2_SGE_IO_UNION {
+	struct MPI2_SGE_SIMPLE_UNION       MpiSimple;
+	struct MPI2_SGE_CHAIN_UNION        MpiChain;
+	union MPI2_IEEE_SGE_SIMPLE_UNION  IeeeSimple;
+	union MPI2_IEEE_SGE_CHAIN_UNION   IeeeChain;
+};
+
+union MPI2_SCSI_IO_CDB_UNION {
+	u8                      CDB32[32];
+	struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+	struct MPI2_SGE_SIMPLE_UNION SGE;
+};
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than  _MPI2_SCSI_IO_REQUEST
+ */
+struct MPI2_RAID_SCSI_IO_REQUEST {
+	u16                     DevHandle;                      /* 0x00 */
+	u8                      ChainOffset;                    /* 0x02 */
+	u8                      Function;                       /* 0x03 */
+	u16                     Reserved1;                      /* 0x04 */
+	u8                      Reserved2;                      /* 0x06 */
+	u8                      MsgFlags;                       /* 0x07 */
+	u8                      VP_ID;                          /* 0x08 */
+	u8                      VF_ID;                          /* 0x09 */
+	u16                     Reserved3;                      /* 0x0A */
+	u32                     SenseBufferLowAddress;          /* 0x0C */
+	u16                     SGLFlags;                       /* 0x10 */
+	u8                      SenseBufferLength;              /* 0x12 */
+	u8                      Reserved4;                      /* 0x13 */
+	u8                      SGLOffset0;                     /* 0x14 */
+	u8                      SGLOffset1;                     /* 0x15 */
+	u8                      SGLOffset2;                     /* 0x16 */
+	u8                      SGLOffset3;                     /* 0x17 */
+	u32                     SkipCount;                      /* 0x18 */
+	u32                     DataLength;                     /* 0x1C */
+	u32                     BidirectionalDataLength;        /* 0x20 */
+	u16                     IoFlags;                        /* 0x24 */
+	u16                     EEDPFlags;                      /* 0x26 */
+	u32                     EEDPBlockSize;                  /* 0x28 */
+	u32                     SecondaryReferenceTag;          /* 0x2C */
+	u16                     SecondaryApplicationTag;        /* 0x30 */
+	u16                     ApplicationTagTranslationMask;  /* 0x32 */
+	u8                      LUN[8];                         /* 0x34 */
+	u32                     Control;                        /* 0x3C */
+	union MPI2_SCSI_IO_CDB_UNION  CDB;			/* 0x40 */
+	struct RAID_CONTEXT	RaidContext;                    /* 0x60 */
+	union MPI2_SGE_IO_UNION       SGL;			/* 0x80 */
+};
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+	u32     RequestFlags:8;
+	u32     MessageAddress1:24; /* bits 31:8*/
+	u32     MessageAddress2;      /* bits 61:32 */
+};
+
+/* Default Request Descriptor */
+struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u16             LMID;                       /* 0x04 */
+	u16             DescriptorTypeDependent;    /* 0x06 */
+};
+
+/* High Priority Request Descriptor */
+struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u16             LMID;                       /* 0x04 */
+	u16             Reserved1;                  /* 0x06 */
+};
+
+/* SCSI IO Request Descriptor */
+struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u16             LMID;                       /* 0x04 */
+	u16             DevHandle;                  /* 0x06 */
+};
+
+/* SCSI Target Request Descriptor */
+struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u16             LMID;                       /* 0x04 */
+	u16             IoIndex;                    /* 0x06 */
+};
+
+/* RAID Accelerator Request Descriptor */
+struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+	u8              RequestFlags;               /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u16             LMID;                       /* 0x04 */
+	u16             Reserved;                   /* 0x06 */
+};
+
+/* union of Request Descriptors */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION {
+	struct MPI2_DEFAULT_REQUEST_DESCRIPTOR             Default;
+	struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR       HighPriority;
+	struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR             SCSIIO;
+	struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR         SCSITarget;
+	struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR          RAIDAccelerator;
+	struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR      MFAIo;
+	union {
+		struct {
+			u32 low;
+			u32 high;
+		} u;
+		u64 Words;
+	};
+};
+
+/* Default Reply Descriptor */
+struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             DescriptorTypeDependent1;   /* 0x02 */
+	u32             DescriptorTypeDependent2;   /* 0x04 */
+};
+
+/* Address Reply Descriptor */
+struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u32             ReplyFrameAddress;          /* 0x04 */
+};
+
+/* SCSI IO Success Reply Descriptor */
+struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u16             TaskTag;                    /* 0x04 */
+	u16             Reserved1;                  /* 0x06 */
+};
+
+/* TargetAssist Success Reply Descriptor */
+struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u8              SequenceNumber;             /* 0x04 */
+	u8              Reserved1;                  /* 0x05 */
+	u16             IoIndex;                    /* 0x06 */
+};
+
+/* Target Command Buffer Reply Descriptor */
+struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u8              VP_ID;                      /* 0x02 */
+	u8              Flags;                      /* 0x03 */
+	u16             InitiatorDevHandle;         /* 0x04 */
+	u16             IoIndex;                    /* 0x06 */
+};
+
+/* RAID Accelerator Success Reply Descriptor */
+struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+	u8              ReplyFlags;                 /* 0x00 */
+	u8              MSIxIndex;                  /* 0x01 */
+	u16             SMID;                       /* 0x02 */
+	u32             Reserved;                   /* 0x04 */
+};
+
+/* union of Reply Descriptors */
+union MPI2_REPLY_DESCRIPTORS_UNION {
+	struct MPI2_DEFAULT_REPLY_DESCRIPTOR                   Default;
+	struct MPI2_ADDRESS_REPLY_DESCRIPTOR                   AddressReply;
+	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR           SCSIIOSuccess;
+	struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+	struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+	struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+	RAIDAcceleratorSuccess;
+	u64                                             Words;
+};
+
+/* IOCInit Request message */
+struct MPI2_IOC_INIT_REQUEST {
+	u8                      WhoInit;                        /* 0x00 */
+	u8                      Reserved1;                      /* 0x01 */
+	u8                      ChainOffset;                    /* 0x02 */
+	u8                      Function;                       /* 0x03 */
+	u16                     Reserved2;                      /* 0x04 */
+	u8                      Reserved3;                      /* 0x06 */
+	u8                      MsgFlags;                       /* 0x07 */
+	u8                      VP_ID;                          /* 0x08 */
+	u8                      VF_ID;                          /* 0x09 */
+	u16                     Reserved4;                      /* 0x0A */
+	u16                     MsgVersion;                     /* 0x0C */
+	u16                     HeaderVersion;                  /* 0x0E */
+	u32                     Reserved5;                      /* 0x10 */
+	u16                     Reserved6;                      /* 0x14 */
+	u8                      Reserved7;                      /* 0x16 */
+	u8                      HostMSIxVectors;                /* 0x17 */
+	u16                     Reserved8;                      /* 0x18 */
+	u16                     SystemRequestFrameSize;         /* 0x1A */
+	u16                     ReplyDescriptorPostQueueDepth;  /* 0x1C */
+	u16                     ReplyFreeQueueDepth;            /* 0x1E */
+	u32                     SenseBufferAddressHigh;         /* 0x20 */
+	u32                     SystemReplyAddressHigh;         /* 0x24 */
+	u64                     SystemRequestFrameBaseAddress;  /* 0x28 */
+	u64                     ReplyDescriptorPostQueueAddress;/* 0x30 */
+	u64                     ReplyFreeQueueAddress;          /* 0x38 */
+	u64                     TimeStamp;                      /* 0x40 */
+};
+
+/* mrpriv defines */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO             0x0300e101
+
+struct MR_DEV_HANDLE_INFO {
+	u16     curDevHdl;
+	u8      validHandles;
+	u8      reserved;
+	u16     devHandle[2];
+};
+
+struct MR_ARRAY_INFO {
+	u16      pd[MAX_RAIDMAP_ROW_SIZE];
+};
+
+struct MR_QUAD_ELEMENT {
+	u64     logStart;
+	u64     logEnd;
+	u64     offsetInSpan;
+	u32     diff;
+	u32     reserved1;
+};
+
+struct MR_SPAN_INFO {
+	u32             noElements;
+	u32             reserved1;
+	struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_LD_SPAN {
+	u64      startBlk;
+	u64      numBlks;
+	u16      arrayRef;
+	u8       reserved[6];
+};
+
+struct MR_SPAN_BLOCK_INFO {
+	u64          num_rows;
+	struct MR_LD_SPAN   span;
+	struct MR_SPAN_INFO block_span_info;
+};
+
+struct MR_LD_RAID {
+	struct {
+		u32     fpCapable:1;
+		u32     reserved5:3;
+		u32     ldPiMode:4;
+		u32     pdPiMode:4;
+		u32     encryptionType:8;
+		u32     fpWriteCapable:1;
+		u32     fpReadCapable:1;
+		u32     fpWriteAcrossStripe:1;
+		u32     fpReadAcrossStripe:1;
+		u32     reserved4:8;
+	} capability;
+	u32     reserved6;
+	u64     size;
+	u8      spanDepth;
+	u8      level;
+	u8      stripeShift;
+	u8      rowSize;
+	u8      rowDataSize;
+	u8      writeMode;
+	u8      PRL;
+	u8      SRL;
+	u16     targetId;
+	u8      ldState;
+	u8      regTypeReqOnWrite;
+	u8      modFactor;
+	u8      reserved2[1];
+	u16     seqNum;
+
+	struct {
+		u32 ldSyncRequired:1;
+		u32 reserved:31;
+	} flags;
+
+	u8      reserved3[0x5C];
+};
+
+struct MR_LD_SPAN_MAP {
+	struct MR_LD_RAID          ldRaid;
+	u8                  dataArmMap[MAX_RAIDMAP_ROW_SIZE];
+	struct MR_SPAN_BLOCK_INFO  spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_FW_RAID_MAP {
+	u32                 totalSize;
+	union {
+		struct {
+			u32         maxLd;
+			u32         maxSpanDepth;
+			u32         maxRowSize;
+			u32         maxPdCount;
+			u32         maxArrays;
+		} validationInfo;
+		u32             version[5];
+		u32             reserved1[5];
+	};
+
+	u32                 ldCount;
+	u32                 Reserved1;
+	u8                  ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
+					MAX_RAIDMAP_VIEWS];
+	u8                  fpPdIoTimeoutSec;
+	u8                  reserved2[7];
+	struct MR_ARRAY_INFO       arMapInfo[MAX_RAIDMAP_ARRAYS];
+	struct MR_DEV_HANDLE_INFO  devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+	struct MR_LD_SPAN_MAP      ldSpanMap[1];
+};
+
+struct IO_REQUEST_INFO {
+	u64 ldStartBlock;
+	u32 numBlocks;
+	u16 ldTgtId;
+	u8 isRead;
+	u16 devHandle;
+	u64 pdBlock;
+	u8 fpOkForIo;
+};
+
+struct MR_LD_TARGET_SYNC {
+	u8  targetId;
+	u8  reserved;
+	u16 seqNum;
+};
+
+#define IEEE_SGE_FLAGS_ADDR_MASK            (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR          (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR          (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR          (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR       (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT        (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST          (0x40)
+
+struct megasas_register_set;
+struct megasas_instance;
+
+union desc_word {
+	u64 word;
+	struct {
+		u32 low;
+		u32 high;
+	} u;
+};
+
+struct megasas_cmd_fusion {
+	struct MPI2_RAID_SCSI_IO_REQUEST	*io_request;
+	dma_addr_t			io_request_phys_addr;
+
+	union MPI2_SGE_IO_UNION	*sg_frame;
+	dma_addr_t		sg_frame_phys_addr;
+
+	u8 *sense;
+	dma_addr_t sense_phys_addr;
+
+	struct list_head list;
+	struct scsi_cmnd *scmd;
+	struct megasas_instance *instance;
+
+	u8 retry_for_fw_reset;
+	union MEGASAS_REQUEST_DESCRIPTOR_UNION  *request_desc;
+
+	/*
+	 * Context for a MFI frame.
+	 * Used to get the mfi cmd from list when a MFI cmd is completed
+	 */
+	u32 sync_cmd_idx;
+	u32 index;
+	u8 flags;
+};
+
+struct LD_LOAD_BALANCE_INFO {
+	u8	loadBalanceFlag;
+	u8	reserved1;
+	u16     raid1DevHandle[2];
+	atomic_t     scsi_pending_cmds[2];
+	u64     last_accessed_block[2];
+};
+
+struct MR_FW_RAID_MAP_ALL {
+	struct MR_FW_RAID_MAP raidMap;
+	struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} __attribute__ ((packed));
+
+struct fusion_context {
+	struct megasas_cmd_fusion **cmd_list;
+	struct list_head cmd_pool;
+
+	spinlock_t cmd_pool_lock;
+
+	dma_addr_t req_frames_desc_phys;
+	u8 *req_frames_desc;
+
+	struct dma_pool *io_request_frames_pool;
+	dma_addr_t io_request_frames_phys;
+	u8 *io_request_frames;
+
+	struct dma_pool *sg_dma_pool;
+	struct dma_pool *sense_dma_pool;
+
+	dma_addr_t reply_frames_desc_phys;
+	union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
+	struct dma_pool *reply_frames_desc_pool;
+
+	u16 last_reply_idx;
+
+	u32 reply_q_depth;
+	u32 request_alloc_sz;
+	u32 reply_alloc_sz;
+	u32 io_frames_alloc_sz;
+
+	u16	max_sge_in_main_msg;
+	u16	max_sge_in_chain;
+
+	u8	chain_offset_io_request;
+	u8	chain_offset_mfi_pthru;
+
+	struct MR_FW_RAID_MAP_ALL *ld_map[2];
+	dma_addr_t ld_map_phys[2];
+
+	u32 map_sz;
+	u8 fast_path_io;
+	struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
+};
+
+union desc_value {
+	u64 word;
+	struct {
+		u32 low;
+		u32 high;
+	} u;
+};
+
+#endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 4b1c2f0..8be75e6 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.15
+ *  mpi2.h Version:  02.00.16
  *
  *  Version History
  *  ---------------
@@ -61,6 +61,8 @@
  *                      Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
  *                      Added defines for product-specific range of message
  *                      function codes, 0xF0 to 0xFF.
+ *  05-12-10  02.00.16  Bumped MPI2_HEADER_VERSION_UNIT.
+ *                      Added alternative defines for the SGE Direction bit.
  *  --------------------------------------------------------------------------
  */
 
@@ -86,7 +88,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x0F)
+#define MPI2_HEADER_VERSION_UNIT            (0x10)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -929,6 +931,9 @@
 #define MPI2_SGE_FLAGS_IOC_TO_HOST              (0x00)
 #define MPI2_SGE_FLAGS_HOST_TO_IOC              (0x04)
 
+#define MPI2_SGE_FLAGS_DEST                     (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE                   (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
 /* Address Size */
 
 #define MPI2_SGE_FLAGS_32_BIT_ADDRESSING        (0x00)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index e3728d7..d76a658 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.14
+ *    mpi2_cnfg.h Version:  02.00.15
  *
  *  Version History
  *  ---------------
@@ -121,6 +121,10 @@
  *                      Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
  *                      Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
  *                      Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ *  05-12-10  02.00.15  Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ *                      define.
+ *                      Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ *                      Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
  *  --------------------------------------------------------------------------
  */
 
@@ -333,7 +337,7 @@
 #define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM          (0x06)
 #define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE      (0x07)
 
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
 
 
 /* Config Reply Message */
@@ -379,6 +383,8 @@
 #define MPI2_MFGPAGE_DEVID_SAS2116_1                (0x0064)
 #define MPI2_MFGPAGE_DEVID_SAS2116_2                (0x0065)
 
+#define MPI2_MFGPAGE_DEVID_SSS6200                  (0x007E)
+
 #define MPI2_MFGPAGE_DEVID_SAS2208_1                (0x0080)
 #define MPI2_MFGPAGE_DEVID_SAS2208_2                (0x0081)
 #define MPI2_MFGPAGE_DEVID_SAS2208_3                (0x0082)
@@ -390,6 +396,8 @@
 #define MPI2_MFGPAGE_DEVID_SAS2308_3                (0x006E)
 
 
+
+
 /* Manufacturing Page 0 */
 
 typedef struct _MPI2_CONFIG_PAGE_MAN_0
@@ -729,6 +737,7 @@
 /* IO Unit Page 1 Flags defines */
 #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY    (0x00000800)
 #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE          (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT         (9)
 #define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE        (0x00000000)
 #define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE       (0x00000200)
 #define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE     (0x00000400)
@@ -1347,6 +1356,7 @@
 #define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION        (0x00040000)
 #define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT           (0x00020000)
 #define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS        (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT        (0x00000080)
 #define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED               (0x00000040)
 #define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE              (0x00000020)
 #define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR          (0x00000000)
@@ -1469,11 +1479,15 @@
 #define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA             (0x03)
 #define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD   (0x04)
 #define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA    (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE          (0x06)
 #define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN             (0xFF)
 
 /* PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK                (0x0C)
 #define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE         (0x08)
 #define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE           (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK             (0x03)
 #define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL              (0x02)
 #define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL             (0x01)
 
@@ -1545,6 +1559,7 @@
 #define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE        (0x03)
 #define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR            (0x04)
 #define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS    (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY          (0x06)
 #define MPI2_SAS_NEG_LINK_RATE_1_5                      (0x08)
 #define MPI2_SAS_NEG_LINK_RATE_3_0                      (0x09)
 #define MPI2_SAS_NEG_LINK_RATE_6_0                      (0x0A)
@@ -1571,6 +1586,7 @@
 #define MPI2_SAS_PHYINFO_PHY_VACANT                     (0x80000000)
 
 #define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK       (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION      (27)
 #define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE               (0x00000000)
 #define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL              (0x08000000)
 #define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER              (0x10000000)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index bd6c92b..b1e88f2 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -291,6 +291,7 @@
  *                      can be sized by the build environment.
  *  07-30-09  02.00.04  Added proper define for the Use Default Settings bit of
  *                      VolumeCreationFlags and marked the old one as obsolete.
+ *  05-12-10  02.00.05  Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
  *  --------------------------------------------------------------------------
 
 mpi2_sas.h
@@ -301,6 +302,7 @@
  *                      Request.
  *  10-28-09  02.00.03  Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
  *                      to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ *  05-12-10  02.00.04  Modified some comments.
  *  --------------------------------------------------------------------------
 
 mpi2_targ.h
@@ -324,6 +326,7 @@
  *                      and reply messages.
  *                      Added MPI2_DIAG_BUF_TYPE_EXTENDED.
  *                      Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ *  05-12-10  02.00.05  Added Diagnostic Data Upload tool.
  *  --------------------------------------------------------------------------
 
 mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index c4c99df..20e6b88 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
  *          Title:  MPI SCSI initiator mode messages and structures
  *  Creation Date:  June 23, 2006
  *
- *    mpi2_init.h Version:  02.00.09
+ *    mpi2_init.h Version:  02.00.10
  *
  *  Version History
  *  ---------------
@@ -32,6 +32,7 @@
  *                      Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
  *                      Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
  *  02-10-10  02.00.09  Removed unused structure that had "#if 0" around it.
+ *  05-12-10  02.00.10  Added optional vendor-unique region to SCSI IO Request.
  *  --------------------------------------------------------------------------
  */
 
@@ -98,7 +99,13 @@
     U8                      LUN[8];                         /* 0x34 */
     U32                     Control;                        /* 0x3C */
     MPI2_SCSI_IO_CDB_UNION  CDB;                            /* 0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */
+	MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
     MPI2_SGE_IO_UNION       SGL;                            /* 0x60 */
+
 } MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST,
   Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t;
 
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 495bedc..761cbdb 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.14
+ *  mpi2_ioc.h Version:  02.00.15
  *
  *  Version History
  *  ---------------
@@ -101,6 +101,8 @@
  *  02-10-10  02.00.14  Added SAS Quiesce Event structure and defines.
  *                      Added PowerManagementControl Request structures and
  *                      defines.
+ *  05-12-10  02.00.15  Marked Task Set Full Event as obsolete.
+ *                      Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
  *  --------------------------------------------------------------------------
  */
 
@@ -456,7 +458,7 @@
 #define MPI2_EVENT_STATE_CHANGE                     (0x0002)
 #define MPI2_EVENT_HARD_RESET_RECEIVED              (0x0005)
 #define MPI2_EVENT_EVENT_CHANGE                     (0x000A)
-#define MPI2_EVENT_TASK_SET_FULL                    (0x000E)
+#define MPI2_EVENT_TASK_SET_FULL                    (0x000E) /* obsolete */
 #define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE         (0x000F)
 #define MPI2_EVENT_IR_OPERATION_STATUS              (0x0014)
 #define MPI2_EVENT_SAS_DISCOVERY                    (0x0016)
@@ -517,6 +519,7 @@
   MPI2_POINTER pMpi2EventDataHardResetReceived_t;
 
 /* Task Set Full Event data */
+/*   this event is obsolete */
 
 typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL
 {
@@ -831,6 +834,7 @@
 #define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE            (0x03)
 #define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR                (0x04)
 #define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS        (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY              (0x06)
 #define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5                     (0x08)
 #define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0                     (0x09)
 #define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0                     (0x0A)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 5160c33..bd61a7b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2008 LSI Corporation.
+ *  Copyright (c) 2000-2010 LSI Corporation.
  *
  *
  *           Name:  mpi2_raid.h
  *          Title:  MPI Integrated RAID messages and structures
  *  Creation Date:  April 26, 2007
  *
- *    mpi2_raid.h Version:  02.00.04
+ *    mpi2_raid.h Version:  02.00.05
  *
  *  Version History
  *  ---------------
@@ -22,6 +22,7 @@
  *                      can be sized by the build environment.
  *  07-30-09  02.00.04  Added proper define for the Use Default Settings bit of
  *                      VolumeCreationFlags and marked the old one as obsolete.
+ *  05-12-10  02.00.05  Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
  *  --------------------------------------------------------------------------
  */
 
@@ -260,6 +261,7 @@
 #define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
 #define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK    (0x00000002)
 #define MPI2_RAID_VOL_FLAGS_OP_RESYNC               (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC                  (0x00000004)
 
 
 /* RAID Action Reply ActionData union */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 2d8aeed..608f6d6 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2007 LSI Corporation.
+ *  Copyright (c) 2000-2010 LSI Corporation.
  *
  *
  *           Name:  mpi2_sas.h
  *          Title:  MPI Serial Attached SCSI structures and definitions
  *  Creation Date:  February 9, 2007
  *
- *  mpi2.h Version:  02.00.03
+ *  mpi2_sas.h Version:  02.00.04
  *
  *  Version History
  *  ---------------
@@ -20,6 +20,7 @@
  *                      Request.
  *  10-28-09  02.00.03  Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
  *                      to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ *  05-12-10  02.00.04  Modified some comments.
  *  --------------------------------------------------------------------------
  */
 
@@ -110,7 +111,7 @@
 /* values for PassthroughFlags field */
 #define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE      (0x80)
 
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
 
 
 /* SMP Passthrough Reply Message */
@@ -174,7 +175,7 @@
 #define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE             (0x0002)
 #define MPI2_SATA_PT_REQ_PT_FLAGS_READ              (0x0001)
 
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
 
 
 /* SATA Passthrough Reply Message */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 686b09b..5c6e3a6 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
  *          Title:  MPI diagnostic tool structures and definitions
  *  Creation Date:  March 26, 2007
  *
- *    mpi2_tool.h Version:  02.00.04
+ *    mpi2_tool.h Version:  02.00.05
  *
  *  Version History
  *  ---------------
@@ -22,6 +22,7 @@
  *                      and reply messages.
  *                      Added MPI2_DIAG_BUF_TYPE_EXTENDED.
  *                      Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ *  05-12-10  02.00.05  Added Diagnostic Data Upload tool.
  *  --------------------------------------------------------------------------
  */
 
@@ -37,6 +38,7 @@
 /* defines for the Tools */
 #define MPI2_TOOLBOX_CLEAN_TOOL                     (0x00)
 #define MPI2_TOOLBOX_MEMORY_MOVE_TOOL               (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL          (0x02)
 #define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL          (0x03)
 #define MPI2_TOOLBOX_BEACON_TOOL                    (0x05)
 #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL            (0x06)
@@ -102,8 +104,7 @@
 *  Toolbox Memory Move request
 ****************************************************************************/
 
-typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST
-{
+typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST {
     U8                      Tool;                       /* 0x00 */
     U8                      Reserved1;                  /* 0x01 */
     U8                      ChainOffset;                /* 0x02 */
@@ -120,6 +121,44 @@
 
 
 /****************************************************************************
+*  Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST {
+	U8                      Tool;                       /* 0x00 */
+	U8                      Reserved1;                  /* 0x01 */
+	U8                      ChainOffset;                /* 0x02 */
+	U8                      Function;                   /* 0x03 */
+	U16                     Reserved2;                  /* 0x04 */
+	U8                      Reserved3;                  /* 0x06 */
+	U8                      MsgFlags;                   /* 0x07 */
+	U8                      VP_ID;                      /* 0x08 */
+	U8                      VF_ID;                      /* 0x09 */
+	U16                     Reserved4;                  /* 0x0A */
+	U8                      SGLFlags;                   /* 0x0C */
+	U8                      Reserved5;                  /* 0x0D */
+	U16                     Reserved6;                  /* 0x0E */
+	U32                     Flags;                      /* 0x10 */
+	U32                     DataLength;                 /* 0x14 */
+	MPI2_SGE_SIMPLE_UNION   SGL;                        /* 0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+Mpi2ToolboxDiagDataUploadRequest_t,
+MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t;
+
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER {
+	U32                     DiagDataLength;             /* 00h */
+	U8                      FormatCode;                 /* 04h */
+	U8                      Reserved1;                  /* 05h */
+	U16                     Reserved2;                  /* 06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t;
+
+
+/****************************************************************************
 *  Toolbox ISTWI Read Write Tool
 ****************************************************************************/
 
@@ -162,7 +201,7 @@
 #define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS          (0x11)
 #define MPI2_TOOL_ISTWI_ACTION_RESET                (0x12)
 
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
 
 
 /* Toolbox ISTWI Read Write Tool reply message */
@@ -248,7 +287,7 @@
   Mpi2ToolboxDiagnosticCliRequest_t,
   MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t;
 
-/* values for SGLFlags field are in the SGL section of mpi2.h */
+/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
 
 
 /* Toolbox Diagnostic CLI Tool reply message */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 12faf64..b2a8170 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -65,7 +65,6 @@
 static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
 
 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
-#define MPT2SAS_MAX_REQUEST_QUEUE 600 /* maximum controller queue depth */
 
 static int max_queue_depth = -1;
 module_param(max_queue_depth, int, 0);
@@ -79,6 +78,10 @@
 module_param(msix_disable, int, 0);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
 /* diag_buffer_enable is bitwise
  * bit 0 set = TRACE
  * bit 1 set = SNAPSHOT
@@ -515,9 +518,6 @@
 	case MPI2_EVENT_EVENT_CHANGE:
 		desc = "Event Change";
 		break;
-	case MPI2_EVENT_TASK_SET_FULL:
-		desc = "Task Set Full";
-		break;
 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
 		desc = "Device Status Change";
 		break;
@@ -758,7 +758,7 @@
 		if (smid < ioc->internal_smid) {
 			i = smid - ioc->hi_priority_smid;
 			cb_idx = ioc->hpr_lookup[i].cb_idx;
-		} else {
+		} else if (smid <= ioc->hba_queue_depth)  {
 			i = smid - ioc->internal_smid;
 			cb_idx = ioc->internal_lookup[i].cb_idx;
 		}
@@ -848,6 +848,7 @@
 		return IRQ_NONE;
 
 	completed_cmds = 0;
+	cb_idx = 0xFF;
 	do {
 		rd.word = rpf->Words;
 		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
@@ -860,6 +861,9 @@
 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
 			reply = le32_to_cpu
 				(rpf->AddressReply.ReplyFrameAddress);
+			if (reply > ioc->reply_dma_max_address ||
+			    reply < ioc->reply_dma_min_address)
+				reply = 0;
 		} else if (request_desript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
 			goto next;
@@ -1489,6 +1493,7 @@
 {
 	unsigned long flags;
 	int i;
+	struct chain_tracker *chain_req, *next;
 
 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 	if (smid >= ioc->hi_priority_smid) {
@@ -1511,6 +1516,14 @@
 
 	/* scsiio queue */
 	i = smid - 1;
+	if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+		list_for_each_entry_safe(chain_req, next,
+		    &ioc->scsi_lookup[i].chain_list, tracker_list) {
+			list_del_init(&chain_req->tracker_list);
+			list_add_tail(&chain_req->tracker_list,
+			    &ioc->free_chain_list);
+		}
+	}
 	ioc->scsi_lookup[i].cb_idx = 0xFF;
 	ioc->scsi_lookup[i].scmd = NULL;
 	list_add_tail(&ioc->scsi_lookup[i].tracker_list,
@@ -1819,6 +1832,97 @@
 }
 
 /**
+ * _base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+ *
+ * Return nothing.
+ *
+ * Passed on the command line, this function will modify the device missing
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+static void
+_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+	u16 device_missing_delay, u8 io_missing_delay)
+{
+	u16 dmd, dmd_new, dmd_orignal;
+	u8 io_missing_delay_original;
+	u16 sz;
+	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+	Mpi2ConfigReply_t mpi_reply;
+	u8 num_phys = 0;
+	u16 ioc_status;
+
+	mpt2sas_config_get_number_hba_phys(ioc, &num_phys);
+	if (!num_phys)
+		return;
+
+	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
+	    sizeof(Mpi2SasIOUnit1PhyData_t));
+	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+	if (!sas_iounit_pg1) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		goto out;
+	}
+	if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+	    sas_iounit_pg1, sz))) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		goto out;
+	}
+	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+	    MPI2_IOCSTATUS_MASK;
+	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+		    ioc->name, __FILE__, __LINE__, __func__);
+		goto out;
+	}
+
+	/* device missing delay */
+	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
+	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+	else
+		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+	dmd_orignal = dmd;
+	if (device_missing_delay > 0x7F) {
+		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
+		    device_missing_delay;
+		dmd = dmd / 16;
+		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
+	} else
+		dmd = device_missing_delay;
+	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
+
+	/* io missing delay */
+	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
+	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
+
+	if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+	    sz)) {
+		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+			dmd_new = (dmd &
+			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+		else
+			dmd_new =
+		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+		printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), "
+		    "new(%d)\n", ioc->name, dmd_orignal, dmd_new);
+		printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), "
+		    "new(%d)\n", ioc->name, io_missing_delay_original,
+		    io_missing_delay);
+		ioc->device_missing_delay = dmd_new;
+		ioc->io_missing_delay = io_missing_delay;
+	}
+
+out:
+	kfree(sas_iounit_pg1);
+}
+
+/**
  * _base_static_config_pages - static start of day config pages
  * @ioc: per adapter object
  *
@@ -1855,6 +1959,7 @@
 		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
 	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
 	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+
 }
 
 /**
@@ -1868,6 +1973,8 @@
 static void
 _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
 {
+	int i;
+
 	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
 
@@ -1932,6 +2039,20 @@
 	}
 	kfree(ioc->hpr_lookup);
 	kfree(ioc->internal_lookup);
+	if (ioc->chain_lookup) {
+		for (i = 0; i < ioc->chain_depth; i++) {
+			if (ioc->chain_lookup[i].chain_buffer)
+				pci_pool_free(ioc->chain_dma_pool,
+				    ioc->chain_lookup[i].chain_buffer,
+				    ioc->chain_lookup[i].chain_buffer_dma);
+		}
+		if (ioc->chain_dma_pool)
+			pci_pool_destroy(ioc->chain_dma_pool);
+	}
+	if (ioc->chain_lookup) {
+		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+		ioc->chain_lookup = NULL;
+	}
 }
 
 
@@ -1953,6 +2074,7 @@
 	u32 sz, total_sz;
 	u32 retry_sz;
 	u16 max_request_credit;
+	int i;
 
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -1970,14 +2092,11 @@
 	}
 
 	/* command line tunables  for max controller queue depth */
-	if (max_queue_depth != -1) {
+	if (max_queue_depth != -1)
 		max_request_credit = (max_queue_depth < facts->RequestCredit)
 		    ? max_queue_depth : facts->RequestCredit;
-	} else {
-		max_request_credit = (facts->RequestCredit >
-		    MPT2SAS_MAX_REQUEST_QUEUE) ? MPT2SAS_MAX_REQUEST_QUEUE :
-		    facts->RequestCredit;
-	}
+	else
+		max_request_credit = facts->RequestCredit;
 
 	ioc->hba_queue_depth = max_request_credit;
 	ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2083,7 +2202,7 @@
 	 * "frame for smid=0
 	 */
 	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
-	sz = ((ioc->scsiio_depth + 1 + ioc->chain_depth) * ioc->request_sz);
+	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
 
 	/* hi-priority queue */
 	sz += (ioc->hi_priority_depth * ioc->request_sz);
@@ -2124,19 +2243,11 @@
 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
 	    ioc->request_sz);
 
-	ioc->chain = ioc->internal + (ioc->internal_depth *
-	    ioc->request_sz);
-	ioc->chain_dma = ioc->internal_dma + (ioc->internal_depth *
-	    ioc->request_sz);
 
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
 	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
 	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
 	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
-	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool(0x%p): depth"
-	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->chain,
-	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
-	    ioc->request_sz))/1024));
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
 	    ioc->name, (unsigned long long) ioc->request_dma));
 	total_sz += sz;
@@ -2155,6 +2266,38 @@
 	    "depth(%d)\n", ioc->name, ioc->request,
 	    ioc->scsiio_depth));
 
+	/* loop till the allocation succeeds */
+	do {
+		sz = ioc->chain_depth * sizeof(struct chain_tracker);
+		ioc->chain_pages = get_order(sz);
+		ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+		    GFP_KERNEL, ioc->chain_pages);
+		if (ioc->chain_lookup == NULL)
+			ioc->chain_depth -= 100;
+	} while (ioc->chain_lookup == NULL);
+	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+	    ioc->request_sz, 16, 0);
+	if (!ioc->chain_dma_pool) {
+		printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create "
+		    "failed\n", ioc->name);
+		goto out;
+	}
+	for (i = 0; i < ioc->chain_depth; i++) {
+		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+		    ioc->chain_dma_pool , GFP_KERNEL,
+		    &ioc->chain_lookup[i].chain_buffer_dma);
+		if (!ioc->chain_lookup[i].chain_buffer) {
+			ioc->chain_depth = i;
+			goto chain_done;
+		}
+		total_sz += ioc->request_sz;
+	}
+chain_done:
+	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth"
+	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
+	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
+	    ioc->request_sz))/1024));
+
 	/* initialize hi-priority queue smid's */
 	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
 	    sizeof(struct request_tracker), GFP_KERNEL);
@@ -2221,6 +2364,8 @@
 		    ioc->name);
 		goto out;
 	}
+	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
 	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
 	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
@@ -2302,7 +2447,6 @@
 	return 0;
 
  out:
-	_base_release_memory_pools(ioc);
 	return -ENOMEM;
 }
 
@@ -3485,6 +3629,7 @@
 	INIT_LIST_HEAD(&ioc->free_list);
 	smid = 1;
 	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
+		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
 		ioc->scsi_lookup[i].cb_idx = 0xFF;
 		ioc->scsi_lookup[i].smid = smid;
 		ioc->scsi_lookup[i].scmd = NULL;
@@ -3511,6 +3656,13 @@
 		list_add_tail(&ioc->internal_lookup[i].tracker_list,
 		    &ioc->internal_free_list);
 	}
+
+	/* chain pool */
+	INIT_LIST_HEAD(&ioc->free_chain_list);
+	for (i = 0; i < ioc->chain_depth; i++)
+		list_add_tail(&ioc->chain_lookup[i].tracker_list,
+		    &ioc->free_chain_list);
+
 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
 	/* initialize Reply Free Queue */
@@ -3708,12 +3860,15 @@
 	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
 	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
 	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
-	_base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL);
 	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
 	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
 	if (r)
 		goto out_free_resources;
 
+	if (missing_delay[0] != -1 && missing_delay[1] != -1)
+		_base_update_missing_delay(ioc, missing_delay[0],
+		    missing_delay[1]);
+
 	mpt2sas_base_start_watchdog(ioc);
 	return 0;
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 0b15a8b..283568c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"06.100.00.00"
-#define MPT2SAS_MAJOR_VERSION		06
+#define MPT2SAS_DRIVER_VERSION		"07.100.00.00"
+#define MPT2SAS_MAJOR_VERSION		07
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		00
 #define MPT2SAS_RELEASE_VERSION		00
@@ -419,6 +419,18 @@
 };
 
 /**
+ * struct chain_tracker - firmware chain tracker
+ * @chain_buffer: chain buffer
+ * @chain_buffer_dma: physical address
+ * @tracker_list: list of free request (ioc->free_chain_list)
+ */
+struct chain_tracker {
+	void *chain_buffer;
+	dma_addr_t chain_buffer_dma;
+	struct list_head tracker_list;
+};
+
+/**
  * struct request_tracker - firmware request tracker
  * @smid: system message id
  * @scmd: scsi request pointer
@@ -430,6 +442,7 @@
 	u16	smid;
 	struct scsi_cmnd *scmd;
 	u8	cb_idx;
+	struct list_head chain_list;
 	struct list_head tracker_list;
 };
 
@@ -704,8 +717,10 @@
 	wait_queue_head_t reset_wq;
 
 	/* chain */
-	u8		*chain;
-	dma_addr_t	chain_dma;
+	struct chain_tracker *chain_lookup;
+	struct list_head free_chain_list;
+	struct dma_pool *chain_dma_pool;
+	ulong		chain_pages;
 	u16 		max_sges_in_main_message;
 	u16		max_sges_in_chain_message;
 	u16		chains_needed_per_io;
@@ -737,6 +752,8 @@
 	u16		reply_sz;
 	u8		*reply;
 	dma_addr_t	reply_dma;
+	u32		reply_dma_max_address;
+	u32		reply_dma_min_address;
 	struct dma_pool *reply_dma_pool;
 
 	/* reply free queue */
@@ -832,6 +849,8 @@
     ulong timeout, struct scsi_cmnd *scmd);
 void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
 void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
+void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
+void mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
 struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc,
     u16 handle);
 struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 40cb8ae..e92b77a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -81,6 +81,7 @@
 	BLOCKING,
 };
 
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 /**
  * _ctl_sas_device_find_by_handle - sas device search
  * @ioc: per adapter object
@@ -107,7 +108,6 @@
 	return r;
 }
 
-#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 /**
  * _ctl_display_some_debug - debug routine
  * @ioc: per adapter object
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 1a96a00..eda347c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -931,31 +931,32 @@
 }
 
 /**
- * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
+ * _scsih_get_chain_buffer_tracker - obtain chain tracker
  * @ioc: per adapter object
- * @smid: system request message index
+ * @smid: smid associated to an IO request
  *
- * Returns phys pointer to chain buffer.
+ * Returns chain tracker(from ioc->free_chain_list)
  */
-static dma_addr_t
-_scsih_get_chain_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+static struct chain_tracker *
+_scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
 {
-	return ioc->chain_dma + ((smid - 1) * (ioc->request_sz *
-	    ioc->chains_needed_per_io));
-}
+	struct chain_tracker *chain_req;
+	unsigned long flags;
 
-/**
- * _scsih_get_chain_buffer - obtain block of chains assigned to a mf request
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns virt pointer to chain buffer.
- */
-static void *
-_scsih_get_chain_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
-{
-	return (void *)(ioc->chain + ((smid - 1) * (ioc->request_sz *
-	    ioc->chains_needed_per_io)));
+	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+	if (list_empty(&ioc->free_chain_list)) {
+		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+		printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
+		    ioc->name);
+		return NULL;
+	}
+	chain_req = list_entry(ioc->free_chain_list.next,
+	    struct chain_tracker, tracker_list);
+	list_del_init(&chain_req->tracker_list);
+	list_add_tail(&chain_req->tracker_list,
+	    &ioc->scsi_lookup[smid - 1].chain_list);
+	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+	return chain_req;
 }
 
 /**
@@ -986,6 +987,7 @@
 	u32 sgl_flags;
 	u32 sgl_flags_last_element;
 	u32 sgl_flags_end_buffer;
+	struct chain_tracker *chain_req;
 
 	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
 
@@ -1033,8 +1035,11 @@
 
 	/* initializing the chain flags and pointers */
 	chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
-	chain = _scsih_get_chain_buffer(ioc, smid);
-	chain_dma = _scsih_get_chain_buffer_dma(ioc, smid);
+	chain_req = _scsih_get_chain_buffer_tracker(ioc, smid);
+	if (!chain_req)
+		return -1;
+	chain = chain_req->chain_buffer;
+	chain_dma = chain_req->chain_buffer_dma;
 	do {
 		sges_in_segment = (sges_left <=
 		    ioc->max_sges_in_chain_message) ? sges_left :
@@ -1070,8 +1075,11 @@
 			sges_in_segment--;
 		}
 
-		chain_dma += ioc->request_sz;
-		chain += ioc->request_sz;
+		chain_req = _scsih_get_chain_buffer_tracker(ioc, smid);
+		if (!chain_req)
+			return -1;
+		chain = chain_req->chain_buffer;
+		chain_dma = chain_req->chain_buffer_dma;
 	} while (1);
 
 
@@ -1094,28 +1102,24 @@
 }
 
 /**
- * _scsih_change_queue_depth - setting device queue depth
+ * _scsih_adjust_queue_depth - setting device queue depth
  * @sdev: scsi device struct
  * @qdepth: requested queue depth
- * @reason: calling context
  *
- * Returns queue depth.
+ *
+ * Returns nothing
  */
-static int
-_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+static void
+_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
 {
 	struct Scsi_Host *shost = sdev->host;
 	int max_depth;
-	int tag_type;
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 	struct _sas_device *sas_device;
 	unsigned long flags;
 
-	if (reason != SCSI_QDEPTH_DEFAULT)
-		return -EOPNOTSUPP;
-
 	max_depth = shost->can_queue;
 
 	/* limit max device queue for SATA to 32 */
@@ -1141,8 +1145,27 @@
 		max_depth = 1;
 	if (qdepth > max_depth)
 		qdepth = max_depth;
-	tag_type = (qdepth == 1) ? 0 : MSG_SIMPLE_TAG;
-	scsi_adjust_queue_depth(sdev, tag_type, qdepth);
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+}
+
+/**
+ * _scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
+ * (see include/scsi/scsi_host.h for definition)
+ *
+ * Returns queue depth.
+ */
+static int
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+	if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
+		_scsih_adjust_queue_depth(sdev, qdepth);
+	else if (reason == SCSI_QDEPTH_QFULL)
+		scsi_track_queue_full(sdev, qdepth);
+	else
+		return -EOPNOTSUPP;
 
 	if (sdev->inquiry_len > 7)
 		sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), "
@@ -2251,13 +2274,13 @@
 
 	struct scsi_target *starget = scmd->device->sdev_target;
 
-	starget_printk(KERN_INFO, starget, "attempting target reset! "
+	starget_printk(KERN_INFO, starget, "attempting device reset! "
 	    "scmd(%p)\n", scmd);
 	_scsih_tm_display_info(ioc, scmd);
 
 	sas_device_priv_data = scmd->device->hostdata;
 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
-		starget_printk(KERN_INFO, starget, "target been deleted! "
+		starget_printk(KERN_INFO, starget, "device been deleted! "
 		    "scmd(%p)\n", scmd);
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->scsi_done(scmd);
@@ -2576,9 +2599,9 @@
 	   &sas_expander->sas_port_list, port_list) {
 
 		if (mpt2sas_port->remote_identify.device_type ==
-		    MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+		    SAS_EDGE_EXPANDER_DEVICE ||
 		    mpt2sas_port->remote_identify.device_type ==
-		    MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
+		    SAS_FANOUT_EXPANDER_DEVICE) {
 
 			spin_lock_irqsave(&ioc->sas_node_lock, flags);
 			expander_sibling =
@@ -2715,9 +2738,10 @@
 _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid,
     u8 msix_index, u32 reply)
 {
+#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	Mpi2SasIoUnitControlReply_t *mpi_reply =
 	    mpt2sas_base_get_reply_virt_addr(ioc, reply);
-
+#endif
 	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
 	    "sc_complete:handle(0x%04x), (open) "
 	    "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -3963,6 +3987,7 @@
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
 	u16 attached_handle;
+	u8 link_rate;
 
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
 	    "updating handles for sas_host(0x%016llx)\n",
@@ -3984,15 +4009,17 @@
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
 		goto out;
 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
 		if (i == 0)
 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
 			    PhyData[0].ControllerDevHandle);
 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
 		    AttachedDevHandle);
+		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
 		mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
-		    attached_handle, i, sas_iounit_pg0->PhyData[i].
-		    NegotiatedLinkRate >> 4);
+		    attached_handle, i, link_rate);
 	}
  out:
 	kfree(sas_iounit_pg0);
@@ -4336,14 +4363,14 @@
 }
 
 /**
- * _scsih_expander_remove - removing expander object
+ * mpt2sas_expander_remove - removing expander object
  * @ioc: per adapter object
  * @sas_address: expander sas_address
  *
  * Return nothing.
  */
-static void
-_scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
+void
+mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
 {
 	struct _sas_node *sas_expander;
 	unsigned long flags;
@@ -4354,6 +4381,11 @@
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc,
 	    sas_address);
+	if (!sas_expander) {
+		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+		return;
+	}
+	list_del(&sas_expander->list);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 	_scsih_expander_node_remove(ioc, sas_expander);
 }
@@ -4643,6 +4675,33 @@
 	    sas_device_backup.sas_address));
 }
 
+/**
+ * mpt2sas_device_remove - removing device object
+ * @ioc: per adapter object
+ * @sas_address: expander sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
+{
+	struct _sas_device *sas_device;
+	unsigned long flags;
+
+	if (ioc->shost_recovery)
+		return;
+
+	spin_lock_irqsave(&ioc->sas_device_lock, flags);
+	sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+	    sas_address);
+	if (!sas_device) {
+		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+	_scsih_remove_device(ioc, sas_device);
+}
+
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 /**
  * _scsih_sas_topology_change_event_debug - debug for topology event
@@ -4737,7 +4796,7 @@
 	int i;
 	u16 parent_handle, handle;
 	u16 reason_code;
-	u8 phy_number;
+	u8 phy_number, max_phys;
 	struct _sas_node *sas_expander;
 	struct _sas_device *sas_device;
 	u64 sas_address;
@@ -4775,11 +4834,13 @@
 	sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc,
 	    parent_handle);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-	if (sas_expander)
+	if (sas_expander) {
 		sas_address = sas_expander->sas_address;
-	else if (parent_handle < ioc->sas_hba.num_phys)
+		max_phys = sas_expander->num_phys;
+	} else if (parent_handle < ioc->sas_hba.num_phys) {
 		sas_address = ioc->sas_hba.sas_address;
-	else
+		max_phys = ioc->sas_hba.num_phys;
+	} else
 		return;
 
 	/* handle siblings events */
@@ -4793,6 +4854,8 @@
 		    ioc->pci_error_recovery)
 			return;
 		phy_number = event_data->StartPhyNum + i;
+		if (phy_number >= max_phys)
+			continue;
 		reason_code = event_data->PHY[i].PhyStatus &
 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
 		if ((event_data->PHY[i].PhyStatus &
@@ -4844,7 +4907,7 @@
 	/* handle expander removal */
 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
 	    sas_expander)
-		_scsih_expander_remove(ioc, sas_address);
+		mpt2sas_expander_remove(ioc, sas_address);
 
 }
 
@@ -5773,90 +5836,6 @@
 }
 
 /**
- * _scsih_task_set_full - handle task set full
- * @ioc: per adapter object
- * @fw_event: The fw_event_work object
- * Context: user.
- *
- * Throttle back qdepth.
- */
-static void
-_scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
-	*fw_event)
-{
-	unsigned long flags;
-	struct _sas_device *sas_device;
-	static struct _raid_device *raid_device;
-	struct scsi_device *sdev;
-	int depth;
-	u16 current_depth;
-	u16 handle;
-	int id, channel;
-	u64 sas_address;
-	Mpi2EventDataTaskSetFull_t *event_data = fw_event->event_data;
-
-	current_depth = le16_to_cpu(event_data->CurrentDepth);
-	handle = le16_to_cpu(event_data->DevHandle);
-	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
-	if (!sas_device) {
-		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-		return;
-	}
-	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-	id = sas_device->id;
-	channel = sas_device->channel;
-	sas_address = sas_device->sas_address;
-
-	/* if hidden raid component, then change to volume characteristics */
-	if (test_bit(handle, ioc->pd_handles) && sas_device->volume_handle) {
-		spin_lock_irqsave(&ioc->raid_device_lock, flags);
-		raid_device = _scsih_raid_device_find_by_handle(
-		    ioc, sas_device->volume_handle);
-		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
-		if (raid_device) {
-			id = raid_device->id;
-			channel = raid_device->channel;
-			handle = raid_device->handle;
-			sas_address = raid_device->wwid;
-		}
-	}
-
-	if (ioc->logging_level & MPT_DEBUG_TASK_SET_FULL)
-		starget_printk(KERN_INFO, sas_device->starget, "task set "
-		    "full: handle(0x%04x), sas_addr(0x%016llx), depth(%d)\n",
-		    handle, (unsigned long long)sas_address, current_depth);
-
-	shost_for_each_device(sdev, ioc->shost) {
-		if (sdev->id == id && sdev->channel == channel) {
-			if (current_depth > sdev->queue_depth) {
-				if (ioc->logging_level &
-				    MPT_DEBUG_TASK_SET_FULL)
-					sdev_printk(KERN_INFO, sdev, "strange "
-					    "observation, the queue depth is"
-					    " (%d) meanwhile fw queue depth "
-					    "is (%d)\n", sdev->queue_depth,
-					    current_depth);
-				continue;
-			}
-			depth = scsi_track_queue_full(sdev,
-			    current_depth - 1);
-			if (depth > 0)
-				sdev_printk(KERN_INFO, sdev, "Queue depth "
-				    "reduced to (%d)\n", depth);
-			else if (depth < 0)
-				sdev_printk(KERN_INFO, sdev, "Tagged Command "
-				    "Queueing is being disabled\n");
-			else if (depth == 0)
-				if (ioc->logging_level &
-				     MPT_DEBUG_TASK_SET_FULL)
-					sdev_printk(KERN_INFO, sdev,
-					     "Queue depth not changed yet\n");
-		}
-	}
-}
-
-/**
  * _scsih_prep_device_scan - initialize parameters prior to device scan
  * @ioc: per adapter object
  *
@@ -6219,7 +6198,7 @@
 			sas_expander->responding = 0;
 			continue;
 		}
-		_scsih_expander_remove(ioc, sas_expander->sas_address);
+		mpt2sas_expander_remove(ioc, sas_expander->sas_address);
 		goto retry_expander_search;
 	}
 }
@@ -6343,9 +6322,6 @@
 	case MPI2_EVENT_IR_OPERATION_STATUS:
 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
 		break;
-	case MPI2_EVENT_TASK_SET_FULL:
-		_scsih_task_set_full(ioc, fw_event);
-		break;
 	}
 	_scsih_fw_event_free(ioc, fw_event);
 }
@@ -6415,7 +6391,6 @@
 	case MPI2_EVENT_SAS_DISCOVERY:
 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
 	case MPI2_EVENT_IR_PHYSICAL_DISK:
-	case MPI2_EVENT_TASK_SET_FULL:
 		break;
 
 	default: /* ignore the rest */
@@ -6490,56 +6465,23 @@
 _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
     struct _sas_node *sas_expander)
 {
-	struct _sas_port *mpt2sas_port;
-	struct _sas_device *sas_device;
-	struct _sas_node *expander_sibling;
-	unsigned long flags;
-
-	if (!sas_expander)
-		return;
+	struct _sas_port *mpt2sas_port, *next;
 
 	/* remove sibling ports attached to this expander */
- retry_device_search:
-	list_for_each_entry(mpt2sas_port,
+	list_for_each_entry_safe(mpt2sas_port, next,
 	   &sas_expander->sas_port_list, port_list) {
+		if (ioc->shost_recovery)
+			return;
 		if (mpt2sas_port->remote_identify.device_type ==
-		    SAS_END_DEVICE) {
-			spin_lock_irqsave(&ioc->sas_device_lock, flags);
-			sas_device =
-			    mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-			   mpt2sas_port->remote_identify.sas_address);
-			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-			if (!sas_device)
-				continue;
-			_scsih_remove_device(ioc, sas_device);
-			if (ioc->shost_recovery)
-				return;
-			goto retry_device_search;
-		}
-	}
-
- retry_expander_search:
-	list_for_each_entry(mpt2sas_port,
-	   &sas_expander->sas_port_list, port_list) {
-
-		if (mpt2sas_port->remote_identify.device_type ==
-		    MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+		    SAS_END_DEVICE)
+			mpt2sas_device_remove(ioc,
+			    mpt2sas_port->remote_identify.sas_address);
+		else if (mpt2sas_port->remote_identify.device_type ==
+		    SAS_EDGE_EXPANDER_DEVICE ||
 		    mpt2sas_port->remote_identify.device_type ==
-		    MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
-
-			spin_lock_irqsave(&ioc->sas_node_lock, flags);
-			expander_sibling =
-			    mpt2sas_scsih_expander_find_by_sas_address(
-			    ioc, mpt2sas_port->remote_identify.sas_address);
-			spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
-			if (!expander_sibling)
-				continue;
-			_scsih_expander_remove(ioc,
-			    expander_sibling->sas_address);
-			if (ioc->shost_recovery)
-				return;
-			goto retry_expander_search;
-		}
+		    SAS_FANOUT_EXPANDER_DEVICE)
+			mpt2sas_expander_remove(ioc,
+			    mpt2sas_port->remote_identify.sas_address);
 	}
 
 	mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
@@ -6550,7 +6492,6 @@
 	    sas_expander->handle, (unsigned long long)
 	    sas_expander->sas_address);
 
-	list_del(&sas_expander->list);
 	kfree(sas_expander->phy);
 	kfree(sas_expander);
 }
@@ -6668,9 +6609,7 @@
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
-	struct _sas_port *mpt2sas_port;
-	struct _sas_device *sas_device;
-	struct _sas_node *expander_sibling;
+	struct _sas_port *mpt2sas_port, *next_port;
 	struct _raid_device *raid_device, *next;
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 	struct workqueue_struct	*wq;
@@ -6702,28 +6641,18 @@
 	}
 
 	/* free ports attached to the sas_host */
- retry_again:
-	list_for_each_entry(mpt2sas_port,
+	list_for_each_entry_safe(mpt2sas_port, next_port,
 	   &ioc->sas_hba.sas_port_list, port_list) {
 		if (mpt2sas_port->remote_identify.device_type ==
-		    SAS_END_DEVICE) {
-			sas_device =
-			    mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
-			   mpt2sas_port->remote_identify.sas_address);
-			if (sas_device) {
-				_scsih_remove_device(ioc, sas_device);
-				goto retry_again;
-			}
-		} else {
-			expander_sibling =
-			    mpt2sas_scsih_expander_find_by_sas_address(ioc,
+		    SAS_END_DEVICE)
+			mpt2sas_device_remove(ioc,
 			    mpt2sas_port->remote_identify.sas_address);
-			if (expander_sibling) {
-				_scsih_expander_remove(ioc,
-				    expander_sibling->sas_address);
-				goto retry_again;
-			}
-		}
+		else if (mpt2sas_port->remote_identify.device_type ==
+		    SAS_EDGE_EXPANDER_DEVICE ||
+		    mpt2sas_port->remote_identify.device_type ==
+		    SAS_FANOUT_EXPANDER_DEVICE)
+			mpt2sas_expander_remove(ioc,
+			    mpt2sas_port->remote_identify.sas_address);
 	}
 
 	/* free phys attached to the sas_host */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index b55c6dc..cb1cdec 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -465,62 +465,149 @@
 	return rc;
 }
 
-
 /**
- * _transport_delete_duplicate_port - (see below description)
+ * _transport_delete_port - helper function to removing a port
  * @ioc: per adapter object
- * @sas_node: sas node object (either expander or sas host)
- * @sas_address: sas address of device being added
- * @phy_num: phy number
+ * @mpt2sas_port: mpt2sas per port object
  *
- * This function is called when attempting to add a new port that is claiming
- * the same phy resources already in use by another port.  If we don't release
- * the claimed phy resources, the sas transport layer will hang from the BUG
- * in sas_port_add_phy.
- *
- * The reason we would hit this issue is becuase someone is changing the
- * sas address of a device on the fly, meanwhile controller firmware sends
- * EVENTs out of order when removing the previous instance of the device.
+ * Returns nothing.
  */
 static void
-_transport_delete_duplicate_port(struct MPT2SAS_ADAPTER *ioc,
-    struct _sas_node *sas_node, u64 sas_address, int phy_num)
+_transport_delete_port(struct MPT2SAS_ADAPTER *ioc,
+	struct _sas_port *mpt2sas_port)
 {
-	struct _sas_port *mpt2sas_port, *mpt2sas_port_duplicate;
-	struct _sas_phy *mpt2sas_phy;
+	u64 sas_address = mpt2sas_port->remote_identify.sas_address;
+	enum sas_device_type device_type =
+	    mpt2sas_port->remote_identify.device_type;
 
-	printk(MPT2SAS_ERR_FMT "new device located at sas_addr(0x%016llx), "
-	    "phy_id(%d)\n", ioc->name, (unsigned long long)sas_address,
-	    phy_num);
+	dev_printk(KERN_INFO, &mpt2sas_port->port->dev,
+	    "remove: sas_addr(0x%016llx)\n",
+	    (unsigned long long) sas_address);
 
-	mpt2sas_port_duplicate = NULL;
-	list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, port_list) {
-		dev_printk(KERN_ERR, &mpt2sas_port->port->dev,
-		    "existing device at sas_addr(0x%016llx), num_phys(%d)\n",
-		    (unsigned long long)
-		    mpt2sas_port->remote_identify.sas_address,
-		    mpt2sas_port->num_phys);
-		list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list,
-		    port_siblings) {
-			dev_printk(KERN_ERR, &mpt2sas_phy->phy->dev,
-			    "phy_number(%d)\n", mpt2sas_phy->phy_id);
-			if (mpt2sas_phy->phy_id == phy_num)
-				mpt2sas_port_duplicate = mpt2sas_port;
-		}
-	}
+	ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+	if (device_type == SAS_END_DEVICE)
+		mpt2sas_device_remove(ioc, sas_address);
+	else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+	    device_type == SAS_FANOUT_EXPANDER_DEVICE)
+		mpt2sas_expander_remove(ioc, sas_address);
+	ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
 
-	if (!mpt2sas_port_duplicate)
+/**
+ * _transport_delete_phy - helper function to removing single phy from port
+ * @ioc: per adapter object
+ * @mpt2sas_port: mpt2sas per port object
+ * @mpt2sas_phy: mpt2sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_phy(struct MPT2SAS_ADAPTER *ioc,
+	struct _sas_port *mpt2sas_port, struct _sas_phy *mpt2sas_phy)
+{
+	u64 sas_address = mpt2sas_port->remote_identify.sas_address;
+
+	dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
+	    "remove: sas_addr(0x%016llx), phy(%d)\n",
+	    (unsigned long long) sas_address, mpt2sas_phy->phy_id);
+
+	list_del(&mpt2sas_phy->port_siblings);
+	mpt2sas_port->num_phys--;
+	sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy);
+	mpt2sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * _transport_add_phy - helper function to adding single phy to port
+ * @ioc: per adapter object
+ * @mpt2sas_port: mpt2sas per port object
+ * @mpt2sas_phy: mpt2sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_port *mpt2sas_port,
+	struct _sas_phy *mpt2sas_phy)
+{
+	u64 sas_address = mpt2sas_port->remote_identify.sas_address;
+
+	dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev,
+	    "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
+	    sas_address, mpt2sas_phy->phy_id);
+
+	list_add_tail(&mpt2sas_phy->port_siblings, &mpt2sas_port->phy_list);
+	mpt2sas_port->num_phys++;
+	sas_port_add_phy(mpt2sas_port->port, mpt2sas_phy->phy);
+	mpt2sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt2sas_phy: mpt2sas per phy object
+ * @sas_address: sas address of device/expander were phy needs to be added to
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy_to_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
+struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy, u64 sas_address)
+{
+	struct _sas_port *mpt2sas_port;
+	struct _sas_phy *phy_srch;
+
+	if (mpt2sas_phy->phy_belongs_to_port == 1)
 		return;
 
-	dev_printk(KERN_ERR, &mpt2sas_port_duplicate->port->dev,
-	    "deleting duplicate device at sas_addr(0x%016llx), phy(%d)!!!!\n",
-	    (unsigned long long)
-	    mpt2sas_port_duplicate->remote_identify.sas_address, phy_num);
-	ioc->logging_level |= MPT_DEBUG_TRANSPORT;
-	mpt2sas_transport_port_remove(ioc,
-	    mpt2sas_port_duplicate->remote_identify.sas_address,
-	    sas_node->sas_address);
-	ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+	list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list,
+	    port_list) {
+		if (mpt2sas_port->remote_identify.sas_address !=
+		    sas_address)
+			continue;
+		list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
+		    port_siblings) {
+			if (phy_srch == mpt2sas_phy)
+				return;
+		}
+		_transport_add_phy(ioc, mpt2sas_port, mpt2sas_phy);
+			return;
+	}
+
+}
+
+/**
+ * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt2sas_phy: mpt2sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_del_phy_from_an_existing_port(struct MPT2SAS_ADAPTER *ioc,
+	struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy)
+{
+	struct _sas_port *mpt2sas_port, *next;
+	struct _sas_phy *phy_srch;
+
+	if (mpt2sas_phy->phy_belongs_to_port == 0)
+		return;
+
+	list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list,
+	    port_list) {
+		list_for_each_entry(phy_srch, &mpt2sas_port->phy_list,
+		    port_siblings) {
+			if (phy_srch != mpt2sas_phy)
+				continue;
+			if (mpt2sas_port->num_phys == 1)
+				_transport_delete_port(ioc, mpt2sas_port);
+			else
+				_transport_delete_phy(ioc, mpt2sas_port,
+				    mpt2sas_phy);
+			return;
+		}
+	}
 }
 
 /**
@@ -537,11 +624,13 @@
 {
 	int i;
 
-	for (i = 0; i < sas_node->num_phys; i++)
-		if (sas_node->phy[i].remote_identify.sas_address == sas_address)
-			if (sas_node->phy[i].phy_belongs_to_port)
-				_transport_delete_duplicate_port(ioc, sas_node,
-					sas_address, i);
+	for (i = 0; i < sas_node->num_phys; i++) {
+		if (sas_node->phy[i].remote_identify.sas_address != sas_address)
+			continue;
+		if (sas_node->phy[i].phy_belongs_to_port == 1)
+			_transport_del_phy_from_an_existing_port(ioc, sas_node,
+			    &sas_node->phy[i]);
+	}
 }
 
 /**
@@ -905,10 +994,12 @@
 
 	mpt2sas_phy = &sas_node->phy[phy_number];
 	mpt2sas_phy->attached_handle = handle;
-	if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5))
+	if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
 		_transport_set_identify(ioc, handle,
 		    &mpt2sas_phy->remote_identify);
-	else
+		_transport_add_phy_to_an_existing_port(ioc, sas_node,
+		    mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
+	} else
 		memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
 		    sas_identify));
 
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c86b2..b95285f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -603,7 +603,7 @@
 #endif
 
 intx:
-	/* intialize the INT-X interrupt */
+	/* initialize the INT-X interrupt */
 	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
 		SHOST_TO_SAS_HA(pm8001_ha->shost));
 	return rc;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 300d59f..321cf3a 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2228,12 +2228,7 @@
 		/* Once either bist or pci reset is done, restore PCI config
 		 * space. If this fails, proceed with hard reset again
 		 */
-		if (pci_restore_state(pinstance->pdev)) {
-			pmcraid_info("config-space error resetting again\n");
-			pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
-			pmcraid_reset_alert(cmd);
-			break;
-		}
+		pci_restore_state(pinstance->pdev);
 
 		/* fail all pending commands */
 		pmcraid_fail_outstanding_cmds(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index bc8194f..44578b5 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1309,6 +1309,31 @@
 }
 
 static ssize_t
+qla2x00_thermal_temp_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int rval = QLA_FUNCTION_FAILED;
+	uint16_t temp, frac;
+
+	if (!vha->hw->flags.thermal_supported)
+		return snprintf(buf, PAGE_SIZE, "\n");
+
+	temp = frac = 0;
+	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+		DEBUG2_3_11(printk(KERN_WARNING
+		    "%s(%ld): isp reset in progress.\n",
+		    __func__, vha->host_no));
+	else if (!vha->hw->flags.eeh_busy)
+		rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
+	if (rval != QLA_SUCCESS)
+		temp = frac = 0;
+
+	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+}
+
+static ssize_t
 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
     char *buf)
 {
@@ -1366,6 +1391,7 @@
 		   qla2x00_vn_port_mac_address_show, NULL);
 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
+static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -1394,6 +1420,7 @@
 	&dev_attr_fabric_param,
 	&dev_attr_fw_state,
 	&dev_attr_optrom_gold_fw_version,
+	&dev_attr_thermal_temp,
 	NULL,
 };
 
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 31a4121..903b058 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -103,7 +103,7 @@
 
 	bsg_job->reply->reply_payload_rcv_len = 0;
 
-	if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) {
+	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
 		ret = -EINVAL;
 		goto exit_fcp_prio_cfg;
 	}
@@ -753,7 +753,7 @@
 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
 			rval = qla2x00_loopback_test(vha, &elreq, response);
 
-			if (new_config[1]) {
+			if (new_config[0]) {
 				/* Revert back to original port config
 				 * Also clear internal loopback
 				 */
@@ -1512,6 +1512,7 @@
 				if (((sp_bsg->type == SRB_CT_CMD) ||
 					(sp_bsg->type == SRB_ELS_CMD_HST))
 					&& (sp_bsg->u.bsg_job == bsg_job)) {
+					spin_unlock_irqrestore(&ha->hardware_lock, flags);
 					if (ha->isp_ops->abort_command(sp)) {
 						DEBUG2(qla_printk(KERN_INFO, ha,
 						    "scsi(%ld): mbx "
@@ -1527,6 +1528,7 @@
 						bsg_job->req->errors =
 						bsg_job->reply->result = 0;
 					}
+					spin_lock_irqsave(&ha->hardware_lock, flags);
 					goto done;
 				}
 			}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9ce539d..ccfc8e7 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2425,6 +2425,9 @@
 		uint32_t	disable_msix_handshake	:1;
 		uint32_t	fcp_prio_enabled	:1;
 		uint32_t	fw_hung	:1;
+		uint32_t        quiesce_owner:1;
+		uint32_t	thermal_supported:1;
+		/* 26 bits */
 	} flags;
 
 	/* This spinlock is used to protect "io transactions", you must
@@ -2863,6 +2866,7 @@
 #define ISP_UNRECOVERABLE	17
 #define FCOE_CTX_RESET_NEEDED	18	/* Initiate FCoE context reset */
 #define MPI_RESET_NEEDED	19	/* Initiate MPI FW reset */
+#define ISP_QUIESCE_NEEDED	20	/* Driver need some quiescence */
 
 	uint32_t	device_flags;
 #define SWITCH_FOUND		BIT_0
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9382a81..89e900a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -36,6 +36,7 @@
 extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *);
 extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *);
 
+extern int qla2x00_perform_loop_resync(scsi_qla_host_t *);
 extern int qla2x00_loop_resync(scsi_qla_host_t *);
 
 extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
@@ -45,12 +46,15 @@
 
 extern int qla2x00_abort_isp(scsi_qla_host_t *);
 extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
+extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *);
 
 extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
 
 extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
 extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
 
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+
 extern void qla84xx_put_chip(struct scsi_qla_host *);
 
 extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
@@ -68,6 +72,7 @@
 extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
 	struct srb_iocb *);
 extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
 
 extern fc_port_t *
 qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
@@ -90,7 +95,6 @@
 extern int ql2xetsenable;
 extern int ql2xshiftctondsd;
 extern int ql2xdbwr;
-extern int ql2xdontresethba;
 extern int ql2xasynctmfenable;
 extern int ql2xgffidenable;
 extern int ql2xenabledif;
@@ -549,9 +553,11 @@
 
 /* ISP 8021 IDC */
 extern void qla82xx_clear_drv_active(struct qla_hw_data *);
+extern uint32_t  qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
 extern int qla82xx_idc_lock(struct qla_hw_data *);
 extern void qla82xx_idc_unlock(struct qla_hw_data *);
 extern int qla82xx_device_state_handler(scsi_qla_host_t *);
+extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
 
 extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
     size_t, char *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 259f511..f948e1a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -498,6 +498,7 @@
 	vha->flags.reset_active = 0;
 	ha->flags.pci_channel_io_perm_failure = 0;
 	ha->flags.eeh_busy = 0;
+	ha->flags.thermal_supported = 1;
 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 	atomic_set(&vha->loop_state, LOOP_DOWN);
 	vha->device_flags = DFLG_NO_CABLE;
@@ -2023,6 +2024,7 @@
 	    &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
 	if (rval != QLA_SUCCESS) {
 		if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
+		    IS_QLA8XXX_TYPE(ha) ||
 		    (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
 			DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
 			    __func__, vha->host_no));
@@ -2928,6 +2930,7 @@
 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
 
 	qla2x00_iidma_fcport(vha, fcport);
+	qla24xx_update_fcport_fcp_prio(vha, fcport);
 	qla2x00_reg_remote_port(vha, fcport);
 	atomic_set(&fcport->state, FCS_ONLINE);
 }
@@ -3844,6 +3847,37 @@
 	return (rval);
 }
 
+/*
+* qla2x00_perform_loop_resync
+* Description: This function will set the appropriate flags and call
+*              qla2x00_loop_resync. If successful loop will be resynced
+* Arguments : scsi_qla_host_t pointer
+* returm    : Success or Failure
+*/
+
+int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
+{
+	int32_t rval = 0;
+
+	if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
+		/*Configure the flags so that resync happens properly*/
+		atomic_set(&ha->loop_down_timer, 0);
+		if (!(ha->device_flags & DFLG_NO_CABLE)) {
+			atomic_set(&ha->loop_state, LOOP_UP);
+			set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+			set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+			set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+
+			rval = qla2x00_loop_resync(ha);
+		} else
+			atomic_set(&ha->loop_state, LOOP_DEAD);
+
+		clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
+	}
+
+	return rval;
+}
+
 void
 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
 {
@@ -3857,7 +3891,7 @@
 	list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
 		atomic_inc(&vha->vref_count);
 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
-			if (fcport && fcport->drport &&
+			if (fcport->drport &&
 			    atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
 				spin_unlock_irqrestore(&ha->vport_slock, flags);
 
@@ -3871,11 +3905,43 @@
 	spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
 
+/*
+* qla82xx_quiescent_state_cleanup
+* Description: This function will block the new I/Os
+*              Its not aborting any I/Os as context
+*              is not destroyed during quiescence
+* Arguments: scsi_qla_host_t
+* return   : void
+*/
+void
+qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct scsi_qla_host *vp;
+
+	qla_printk(KERN_INFO, ha,
+			"Performing ISP error recovery - ha= %p.\n", ha);
+
+	atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
+	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		qla2x00_mark_all_devices_lost(vha, 0);
+		list_for_each_entry(vp, &ha->vp_list, list)
+			qla2x00_mark_all_devices_lost(vha, 0);
+	} else {
+		if (!atomic_read(&vha->loop_down_timer))
+			atomic_set(&vha->loop_down_timer,
+					LOOP_DOWN_TIME);
+	}
+	/* Wait for pending cmds to complete */
+	qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
+}
+
 void
 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
+	struct scsi_qla_host *vp;
 	unsigned long flags;
 
 	vha->flags.online = 0;
@@ -3896,7 +3962,7 @@
 		qla2x00_mark_all_devices_lost(vha, 0);
 
 		spin_lock_irqsave(&ha->vport_slock, flags);
-		list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
+		list_for_each_entry(vp, &ha->vp_list, list) {
 			atomic_inc(&vp->vref_count);
 			spin_unlock_irqrestore(&ha->vport_slock, flags);
 
@@ -5410,7 +5476,7 @@
  *	the tag (priority) value is returned.
  *
  * Input:
- *	ha = adapter block po
+ *	vha = scsi host structure pointer.
  *	fcport = port structure pointer.
  *
  * Return:
@@ -5504,7 +5570,7 @@
  *	Activates fcp priority for the logged in fc port
  *
  * Input:
- *	ha = adapter block pointer.
+ *	vha = scsi host structure pointer.
  *	fcp = port structure pointer.
  *
  * Return:
@@ -5514,25 +5580,24 @@
  *	Kernel context.
  */
 int
-qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport)
+qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 	int ret;
 	uint8_t priority;
 	uint16_t mb[5];
 
-	if (atomic_read(&fcport->state) == FCS_UNCONFIGURED ||
-		fcport->port_type != FCT_TARGET ||
-		fcport->loop_id == FC_NO_LOOP_ID)
+	if (fcport->port_type != FCT_TARGET ||
+	    fcport->loop_id == FC_NO_LOOP_ID)
 		return QLA_FUNCTION_FAILED;
 
-	priority = qla24xx_get_fcp_prio(ha, fcport);
-	ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb);
+	priority = qla24xx_get_fcp_prio(vha, fcport);
+	ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
 	if (ret == QLA_SUCCESS)
 		fcport->fcp_prio = priority;
 	else
 		DEBUG2(printk(KERN_WARNING
 			"scsi(%ld): Unable to activate fcp priority, "
-			" ret=0x%x\n", ha->host_no, ret));
+			" ret=0x%x\n", vha->host_no, ret));
 
 	return  ret;
 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7f77898..d17ed9a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -321,6 +321,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
 	uint32_t	rscn_entry, host_pid;
 	uint8_t		rscn_queue_index;
 	unsigned long	flags;
@@ -498,6 +499,7 @@
 
 	case MBA_LOOP_DOWN:		/* Loop Down Event */
 		mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
+		mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
 		    "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
 		    mbx));
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index effd8a1..e473e9f 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4125,7 +4125,7 @@
 		return QLA_FUNCTION_FAILED;
 
 	DEBUG11(printk(KERN_INFO
-	    "%s(%ld): entered.\n", __func__, ha->host_no));
+	    "%s(%ld): entered.\n", __func__, vha->host_no));
 
 	mcp->mb[0] = MBC_PORT_PARAMS;
 	mcp->mb[1] = loop_id;
@@ -4160,6 +4160,71 @@
 }
 
 int
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
+{
+	int rval;
+	mbx_cmd_t mc;
+	mbx_cmd_t *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, ha->host_no));
+
+	/* High bits. */
+	mcp->mb[0] = MBC_READ_SFP;
+	mcp->mb[1] = 0x98;
+	mcp->mb[2] = 0;
+	mcp->mb[3] = 0;
+	mcp->mb[6] = 0;
+	mcp->mb[7] = 0;
+	mcp->mb[8] = 1;
+	mcp->mb[9] = 0x01;
+	mcp->mb[10] = BIT_13|BIT_0;
+	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2_3_11(printk(KERN_WARNING
+		    "%s(%ld): failed=%x (%x).\n", __func__,
+		    vha->host_no, rval, mcp->mb[0]));
+		ha->flags.thermal_supported = 0;
+		goto fail;
+	}
+	*temp = mcp->mb[1] & 0xFF;
+
+	/* Low bits. */
+	mcp->mb[0] = MBC_READ_SFP;
+	mcp->mb[1] = 0x98;
+	mcp->mb[2] = 0;
+	mcp->mb[3] = 0;
+	mcp->mb[6] = 0;
+	mcp->mb[7] = 0;
+	mcp->mb[8] = 1;
+	mcp->mb[9] = 0x10;
+	mcp->mb[10] = BIT_13|BIT_0;
+	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qla2x00_mailbox_command(vha, mcp);
+	if (rval != QLA_SUCCESS) {
+		DEBUG2_3_11(printk(KERN_WARNING
+		    "%s(%ld): failed=%x (%x).\n", __func__,
+		    vha->host_no, rval, mcp->mb[0]));
+		ha->flags.thermal_supported = 0;
+		goto fail;
+	}
+	*frac = ((mcp->mb[1] & 0xFF) >> 6) * 25;
+
+	if (rval == QLA_SUCCESS)
+		DEBUG11(printk(KERN_INFO
+		    "%s(%ld): done.\n", __func__, ha->host_no));
+fail:
+	return rval;
+}
+
+int
 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
 {
 	int rval;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index ae2acac..fdb96a3 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1079,11 +1079,55 @@
 
 	/* Halt all the indiviual PEGs and other blocks of the ISP */
 	qla82xx_rom_lock(ha);
+
+	/* mask all niu interrupts */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+	/* disable xge rx/tx */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+	/* disable xg1 rx/tx */
+	qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+
+	/* halt sre */
+	val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+	qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+
+	/* halt epg */
+	qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+
+	/* halt timers */
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+	qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+
+	/* halt pegs */
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+
+	/* big hammer */
+	msleep(1000);
 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
 		/* don't reset CAM block on reset */
 		qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
 	else
 		qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
+
+	/* reset ms */
+	val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
+	val |= (1 << 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
+	msleep(20);
+
+	/* unreset ms */
+	val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
+	val &= ~(1 << 1);
+	qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
+	msleep(20);
+
 	qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
 
 	/* Read the signature value from the flash.
@@ -1210,25 +1254,6 @@
 }
 
 static int
-qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
-{
-	u32 val = 0;
-	val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
-	val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
-	if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
-		qla_printk(KERN_INFO, ha,
-			"Memory DIMM SPD not programmed. "
-			" Assumed valid.\n");
-		return 1;
-	} else if (val) {
-		qla_printk(KERN_INFO, ha,
-			"Memory DIMM type incorrect.Info:%08X.\n", val);
-		return 2;
-	}
-	return 0;
-}
-
-static int
 qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
 		u64 off, void *data, int size)
 {
@@ -1293,11 +1318,6 @@
 		word[startword+1] |= tmpw >> (sz[0] * 8);
 	}
 
-	/*
-	 * don't lock here - write_wx gets the lock if each time
-	 * write_lock_irqsave(&adapter->adapter_lock, flags);
-	 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
-	 */
 	for (i = 0; i < loop; i++) {
 		temp = off8 + (i << shift_amount);
 		qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
@@ -1399,12 +1419,6 @@
 	off0[1] = 0;
 	sz[1] = size - sz[0];
 
-	/*
-	 * don't lock here - write_wx gets the lock if each time
-	 * write_lock_irqsave(&adapter->adapter_lock, flags);
-	 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
-	 */
-
 	for (i = 0; i < loop; i++) {
 		temp = off8 + (i << shift_amount);
 		qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
@@ -1437,11 +1451,6 @@
 		}
 	}
 
-	/*
-	 * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
-	 * write_unlock_irqrestore(&adapter->adapter_lock, flags);
-	 */
-
 	if (j >= MAX_CTL_CHECK)
 		return -1;
 
@@ -1872,7 +1881,6 @@
 	qla_printk(KERN_INFO, ha,
 	    "Cmd Peg initialization failed: 0x%x.\n", val);
 
-	qla82xx_check_for_bad_spd(ha);
 	val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
 	read_lock(&ha->hw_lock);
 	qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
@@ -2343,6 +2351,17 @@
 	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
 }
 
+void
+qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t qsnt_state;
+
+	qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
+	qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
+}
+
 static int
 qla82xx_load_fw(scsi_qla_host_t *vha)
 {
@@ -2542,7 +2561,7 @@
 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
-			cur_seg++;
+			cur_seg = sg_next(cur_seg);
 			avail_dsds--;
 		}
 	}
@@ -3261,6 +3280,104 @@
 	return QLA_SUCCESS;
 }
 
+/*
+* qla82xx_need_qsnt_handler
+*    Code to start quiescence sequence
+*
+* Note:
+*      IDC lock must be held upon entry
+*
+* Return: void
+*/
+
+static void
+qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t dev_state, drv_state, drv_active;
+	unsigned long reset_timeout;
+
+	if (vha->flags.online) {
+		/*Block any further I/O and wait for pending cmnds to complete*/
+		qla82xx_quiescent_state_cleanup(vha);
+	}
+
+	/* Set the quiescence ready bit */
+	qla82xx_set_qsnt_ready(ha);
+
+	/*wait for 30 secs for other functions to ack */
+	reset_timeout = jiffies + (30 * HZ);
+
+	drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+	drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+	/* Its 2 that is written when qsnt is acked, moving one bit */
+	drv_active = drv_active << 0x01;
+
+	while (drv_state != drv_active) {
+
+		if (time_after_eq(jiffies, reset_timeout)) {
+			/* quiescence timeout, other functions didn't ack
+			 * changing the state to DEV_READY
+			 */
+			qla_printk(KERN_INFO, ha,
+			    "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
+			qla_printk(KERN_INFO, ha,
+			    "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
+			    drv_state);
+			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+						QLA82XX_DEV_READY);
+			qla_printk(KERN_INFO, ha,
+			    "HW State: DEV_READY\n");
+			qla82xx_idc_unlock(ha);
+			qla2x00_perform_loop_resync(vha);
+			qla82xx_idc_lock(ha);
+
+			qla82xx_clear_qsnt_ready(vha);
+			return;
+		}
+
+		qla82xx_idc_unlock(ha);
+		msleep(1000);
+		qla82xx_idc_lock(ha);
+
+		drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
+		drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
+		drv_active = drv_active << 0x01;
+	}
+	dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+	/* everyone acked so set the state to DEV_QUIESCENCE */
+	if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
+		qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
+		qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
+	}
+}
+
+/*
+* qla82xx_wait_for_state_change
+*    Wait for device state to change from given current state
+*
+* Note:
+*     IDC lock must not be held upon entry
+*
+* Return:
+*    Changed device state.
+*/
+uint32_t
+qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t dev_state;
+
+	do {
+		msleep(1000);
+		qla82xx_idc_lock(ha);
+		dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+		qla82xx_idc_unlock(ha);
+	} while (dev_state == curr_state);
+
+	return dev_state;
+}
+
 static void
 qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
 {
@@ -3439,15 +3556,28 @@
 			qla82xx_idc_lock(ha);
 			break;
 		case QLA82XX_DEV_NEED_RESET:
-			if (!ql2xdontresethba)
-				qla82xx_need_reset_handler(vha);
+			qla82xx_need_reset_handler(vha);
 			break;
 		case QLA82XX_DEV_NEED_QUIESCENT:
-			qla82xx_set_qsnt_ready(ha);
+			qla82xx_need_qsnt_handler(vha);
+			/* Reset timeout value after quiescence handler */
+			dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
+							 * HZ);
+			break;
 		case QLA82XX_DEV_QUIESCENT:
+			/* Owner will exit and other will wait for the state
+			 * to get changed
+			 */
+			if (ha->flags.quiesce_owner)
+				goto exit;
+
 			qla82xx_idc_unlock(ha);
 			msleep(1000);
 			qla82xx_idc_lock(ha);
+
+			/* Reset timeout value after quiescence handler */
+			dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
+							 * HZ);
 			break;
 		case QLA82XX_DEV_FAILED:
 			qla82xx_dev_failed_handler(vha);
@@ -3490,6 +3620,13 @@
 					&ha->mbx_cmd_flags))
 					complete(&ha->mbx_intr_comp);
 			}
+		} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
+			!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+			DEBUG(qla_printk(KERN_INFO, ha,
+				"scsi(%ld) %s - detected quiescence needed\n",
+				vha->host_no, __func__));
+			set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
 		} else {
 			qla82xx_check_fw_alive(vha);
 		}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 51ec0c5..ed5883f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -523,8 +523,6 @@
 # define QLA82XX_CAM_RAM_BASE		(QLA82XX_CRB_CAM + 0x02000)
 # define QLA82XX_CAM_RAM(reg)		(QLA82XX_CAM_RAM_BASE + (reg))
 
-#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED	0x80000000
-#define QLA82XX_BOOT_LOADER_MN_ISSUE	0xff00ffff
 #define QLA82XX_PORT_MODE_ADDR		(QLA82XX_CAM_RAM(0x24))
 #define QLA82XX_PEG_HALT_STATUS1	(QLA82XX_CAM_RAM(0xa8))
 #define QLA82XX_PEG_HALT_STATUS2	(QLA82XX_CAM_RAM(0xac))
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2c0876c..c194c23 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -37,12 +37,12 @@
 static struct kmem_cache *ctx_cachep;
 
 int ql2xlogintimeout = 20;
-module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
+module_param(ql2xlogintimeout, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xlogintimeout,
 		"Login timeout value in seconds.");
 
 int qlport_down_retry;
-module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR);
+module_param(qlport_down_retry, int, S_IRUGO);
 MODULE_PARM_DESC(qlport_down_retry,
 		"Maximum number of command retries to a port that returns "
 		"a PORT-DOWN status.");
@@ -55,12 +55,12 @@
 		"Default is 0 - no PLOGI. 1 - perfom PLOGI.");
 
 int ql2xloginretrycount = 0;
-module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
+module_param(ql2xloginretrycount, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xloginretrycount,
 		"Specify an alternate value for the NVRAM login retry count.");
 
 int ql2xallocfwdump = 1;
-module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR);
+module_param(ql2xallocfwdump, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xallocfwdump,
 		"Option to enable allocation of memory for a firmware dump "
 		"during HBA initialization.  Memory allocation requirements "
@@ -73,7 +73,7 @@
 		"Default is 0 - no logging. 1 - log errors.");
 
 int ql2xshiftctondsd = 6;
-module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR);
+module_param(ql2xshiftctondsd, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xshiftctondsd,
 		"Set to control shifting of command type processing "
 		"based on total number of SG elements.");
@@ -81,7 +81,7 @@
 static void qla2x00_free_device(scsi_qla_host_t *);
 
 int ql2xfdmienable=1;
-module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
+module_param(ql2xfdmienable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xfdmienable,
 		"Enables FDMI registrations. "
 		"0 - no FDMI. Default is 1 - perform FDMI.");
@@ -106,27 +106,27 @@
 		" Default is 0 - Error isolation disabled, 1 - Enable it");
 
 int ql2xiidmaenable=1;
-module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
+module_param(ql2xiidmaenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xiidmaenable,
 		"Enables iIDMA settings "
 		"Default is 1 - perform iIDMA. 0 - no iIDMA.");
 
 int ql2xmaxqueues = 1;
-module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
+module_param(ql2xmaxqueues, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmaxqueues,
 		"Enables MQ settings "
 		"Default is 1 for single queue. Set it to number "
 		"of queues in MQ mode.");
 
 int ql2xmultique_tag;
-module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
+module_param(ql2xmultique_tag, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmultique_tag,
 		"Enables CPU affinity settings for the driver "
 		"Default is 0 for no affinity of request and response IO. "
 		"Set it to 1 to turn on the cpu affinity.");
 
 int ql2xfwloadbin;
-module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
+module_param(ql2xfwloadbin, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xfwloadbin,
 		"Option to specify location from which to load ISP firmware:\n"
 		" 2 -- load firmware via the request_firmware() (hotplug)\n"
@@ -135,39 +135,32 @@
 		" 0 -- use default semantics.\n");
 
 int ql2xetsenable;
-module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
+module_param(ql2xetsenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xetsenable,
 		"Enables firmware ETS burst."
 		"Default is 0 - skip ETS enablement.");
 
 int ql2xdbwr = 1;
-module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR);
+module_param(ql2xdbwr, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xdbwr,
 	"Option to specify scheme for request queue posting\n"
 	" 0 -- Regular doorbell.\n"
 	" 1 -- CAMRAM doorbell (faster).\n");
 
-int ql2xdontresethba;
-module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR);
-MODULE_PARM_DESC(ql2xdontresethba,
-	"Option to specify reset behaviour\n"
-	" 0 (Default) -- Reset on failure.\n"
-	" 1 -- Do not reset on failure.\n");
-
 int ql2xtargetreset = 1;
-module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR);
+module_param(ql2xtargetreset, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xtargetreset,
 		 "Enable target reset."
 		 "Default is 1 - use hw defaults.");
 
 int ql2xgffidenable;
-module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
+module_param(ql2xgffidenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xgffidenable,
 		"Enables GFF_ID checks of port type. "
 		"Default is 0 - Do not use GFF_ID information.");
 
 int ql2xasynctmfenable;
-module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
+module_param(ql2xasynctmfenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xasynctmfenable,
 		"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
 		"Default is 0 - Issue TM IOCBs via mailbox mechanism.");
@@ -2371,7 +2364,7 @@
 	list_for_each_entry(vha, &ha->vp_list, list) {
 		atomic_inc(&vha->vref_count);
 
-		if (vha && vha->fc_vport) {
+		if (vha->fc_vport) {
 			spin_unlock_irqrestore(&ha->vport_slock, flags);
 
 			fc_vport_terminate(vha->fc_vport);
@@ -3386,6 +3379,21 @@
 			clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
 		}
 
+		if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
+			DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched "
+			    "qla2x00_quiesce_needed ha = %p\n",
+			    base_vha->host_no, ha));
+			qla82xx_device_state_handler(base_vha);
+			clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
+			if (!ha->flags.quiesce_owner) {
+				qla2x00_perform_loop_resync(base_vha);
+
+				qla82xx_idc_lock(ha);
+				qla82xx_clear_qsnt_ready(base_vha);
+				qla82xx_idc_unlock(ha);
+			}
+		}
+
 		if (test_and_clear_bit(RESET_MARKER_NEEDED,
 							&base_vha->dpc_flags) &&
 		    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
@@ -3589,13 +3597,16 @@
 		return;
 	}
 
-	if (IS_QLA82XX(ha))
-		qla82xx_watchdog(vha);
-
 	/* Hardware read to raise pending EEH errors during mailbox waits. */
 	if (!pci_channel_offline(ha->pdev))
 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
 
+	if (IS_QLA82XX(ha)) {
+		if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
+			start_dpc++;
+		qla82xx_watchdog(vha);
+	}
+
 	/* Loop down handler. */
 	if (atomic_read(&vha->loop_down_timer) > 0 &&
 	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 76de957..2207062 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -669,6 +669,13 @@
 		def = 1;
 	else if (IS_QLA81XX(ha))
 		def = 2;
+
+	/* Assign FCP prio region since older adapters may not have FLT, or
+	   FCP prio region in it's FLT.
+	 */
+	ha->flt_region_fcp_prio = ha->flags.port0 ?
+	    fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
+
 	ha->flt_region_flt = flt_addr;
 	wptr = (uint16_t *)req->ring;
 	flt = (struct qla_flt_header *)req->ring;
@@ -696,10 +703,6 @@
 		goto no_flash_data;
 	}
 
-	/* Assign FCP prio region since older FLT's may not have it */
-	ha->flt_region_fcp_prio = ha->flags.port0 ?
-	    fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
-
 	loc = locations[1];
 	cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
 	for ( ; cnt; cnt--, region++) {
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index edcf048..af62c3c 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index d861c3b..abd8360 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 0f3bfc3..2fc0045 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -175,7 +175,7 @@
 struct srb {
 	struct list_head list;	/* (8)	 */
 	struct scsi_qla_host *ha;	/* HA the SP is queued on */
-	struct ddb_entry	*ddb;
+	struct ddb_entry *ddb;
 	uint16_t flags;		/* (1) Status flags. */
 
 #define SRB_DMA_VALID		BIT_3	/* DMA Buffer mapped. */
@@ -191,7 +191,6 @@
 	struct scsi_cmnd *cmd;	/* (4) SCSI command block */
 	dma_addr_t dma_handle;	/* (4) for unmap of single transfers */
 	struct kref srb_ref;	/* reference count for this srb */
-	uint32_t fw_ddb_index;
 	uint8_t err_id;		/* error id */
 #define SRB_ERR_PORT	   1	/* Request failed because "port down" */
 #define SRB_ERR_LOOP	   2	/* Request failed because "loop down" */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 5e757d7..c198579 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 6575a47..8fad99b 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index dc01fa3..1629c48 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 9471ac7..62f90bd 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 5ae49fd..75fcd82 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 7c33fd5..6ffbe97 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -554,7 +554,8 @@
 			/* mbox_sts[2] = Old ACB state
 			 * mbox_sts[3] = new ACB state */
 			if ((mbox_sts[3] == ACB_STATE_VALID) &&
-			    (mbox_sts[2] == ACB_STATE_TENTATIVE))
+			    ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
+			    (mbox_sts[2] == ACB_STATE_ACQUIRING)))
 				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
 			else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
 			    (mbox_sts[2] == ACB_STATE_VALID))
@@ -1077,7 +1078,7 @@
 	ret = pci_enable_msi(ha->pdev);
 	if (!ret) {
 		ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
-			IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
+			0, DRIVER_NAME, ha);
 		if (!ret) {
 			DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
 			set_bit(AF_MSI_ENABLED, &ha->flags);
@@ -1095,7 +1096,7 @@
 try_intx:
 	/* Trying INTx */
 	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
-	    IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha);
+	    IRQF_SHARED, DRIVER_NAME, ha);
 	if (!ret) {
 		DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
 		set_bit(AF_INTx_ENABLED, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 2d2f9c8..f65626a 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -81,23 +81,7 @@
 	 */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	if (is_qla8022(ha)) {
-		intr_status = readl(&ha->qla4_8xxx_reg->host_int);
-		if (intr_status & ISRX_82XX_RISC_INT) {
-			/* Service existing interrupt */
-			DEBUG2(printk("scsi%ld: %s: "
-			    "servicing existing interrupt\n",
-			    ha->host_no, __func__));
-			intr_status = readl(&ha->qla4_8xxx_reg->host_status);
-			ha->isp_ops->interrupt_service_routine(ha, intr_status);
-			clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
-			if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
-			    test_bit(AF_INTx_ENABLED, &ha->flags))
-				qla4_8xxx_wr_32(ha,
-				    ha->nx_legacy_intr.tgt_mask_reg,
-				    0xfbff);
-		}
-	} else {
+	if (!is_qla8022(ha)) {
 		intr_status = readl(&ha->reg->ctrl_status);
 		if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
 			/* Service existing interrupt */
@@ -934,7 +918,7 @@
 		return status;
 
 	mbox_cmd[0] = MBOX_CMD_ABORT_TASK;
-	mbox_cmd[1] = srb->fw_ddb_index;
+	mbox_cmd[1] = srb->ddb->fw_ddb_index;
 	mbox_cmd[2] = index;
 	/* Immediate Command Enable */
 	mbox_cmd[5] = 0x01;
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index f0d0fbf..b4b859b 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 7a8fc66..b3831bd 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 474b10d..3d5ef2d 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2009 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -942,12 +942,55 @@
 
 	/* Halt all the indiviual PEGs and other blocks of the ISP */
 	qla4_8xxx_rom_lock(ha);
+
+	/* mask all niu interrupts */
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
+	/* disable xge rx/tx */
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
+	/* disable xg1 rx/tx */
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
+
+	/* halt sre */
+	val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
+
+	/* halt epg */
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
+
+	/* halt timers */
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
+
+	/* halt pegs */
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
+
+	/* big hammer */
+	msleep(1000);
 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
 		/* don't reset CAM block on reset */
 		qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
 	else
 		qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
 
+	/* reset ms */
+	val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
+	val |= (1 << 1);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
+
+	msleep(20);
+	/* unreset ms */
+	val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
+	val &= ~(1 << 1);
+	qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
+	msleep(20);
+
 	qla4_8xxx_rom_unlock(ha);
 
 	/* Read the signature value from the flash.
@@ -1084,14 +1127,14 @@
 static int
 qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
 {
-	int  i;
+	int  i, rval = 0;
 	long size = 0;
 	long flashaddr, memaddr;
 	u64 data;
 	u32 high, low;
 
 	flashaddr = memaddr = ha->hw.flt_region_bootload;
-	size = (image_start - flashaddr)/8;
+	size = (image_start - flashaddr) / 8;
 
 	DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n",
 	    ha->host_no, __func__, flashaddr, image_start));
@@ -1100,14 +1143,18 @@
 		if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
 		    (qla4_8xxx_rom_fast_read(ha, flashaddr + 4,
 		    (int *)&high))) {
-			return -1;
+			rval = -1;
+			goto exit_load_from_flash;
 		}
 		data = ((u64)high << 32) | low ;
-		qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8);
+		rval = qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8);
+		if (rval)
+			goto exit_load_from_flash;
+
 		flashaddr += 8;
 		memaddr   += 8;
 
-		if (i%0x1000 == 0)
+		if (i % 0x1000 == 0)
 			msleep(1);
 
 	}
@@ -1119,7 +1166,8 @@
 	qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
 	read_unlock(&ha->hw_lock);
 
-	return 0;
+exit_load_from_flash:
+	return rval;
 }
 
 static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index ff689bf..35376a1 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,8 +1,8 @@
 /*
- * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2008 QLogic Corporation
+ * QLogic iSCSI HBA Driver
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
- * See LICENSE.qla2xxx for copyright and licensing details.
+ * See LICENSE.qla4xxx for copyright and licensing details.
  */
 #ifndef __QLA_NX_H
 #define __QLA_NX_H
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0d48fb4..3fc1d25 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
@@ -706,18 +706,22 @@
 	dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
 
 	/* don't poll if reset is going on */
-	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags)) {
+	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
+	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) {
 		if (dev_state == QLA82XX_DEV_NEED_RESET &&
 		    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
-			printk("scsi%ld: %s: HW State: NEED RESET!\n",
-			    ha->host_no, __func__);
-			set_bit(DPC_RESET_HA, &ha->dpc_flags);
-			qla4xxx_wake_dpc(ha);
-			qla4xxx_mailbox_premature_completion(ha);
+			if (!ql4xdontresethba) {
+				ql4_printk(KERN_INFO, ha, "%s: HW State: "
+				    "NEED RESET!\n", __func__);
+				set_bit(DPC_RESET_HA, &ha->dpc_flags);
+				qla4xxx_wake_dpc(ha);
+				qla4xxx_mailbox_premature_completion(ha);
+			}
 		} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
-			printk("scsi%ld: %s: HW State: NEED QUIES!\n",
-			    ha->host_no, __func__);
+			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
+			    __func__);
 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
 			qla4xxx_wake_dpc(ha);
 		} else  {
@@ -1721,6 +1725,14 @@
 	if (!test_bit(AF_ONLINE, &ha->flags)) {
 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
 
+		if (is_qla8022(ha) && ql4xdontresethba) {
+			/* Put the device in failed state. */
+			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
+			qla4_8xxx_idc_lock(ha);
+			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+			    QLA82XX_DEV_FAILED);
+			qla4_8xxx_idc_unlock(ha);
+		}
 		ret = -ENODEV;
 		goto probe_failed;
 	}
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 9bfacf4..8475b30 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
 /*
  * QLogic iSCSI HBA Driver
- * Copyright (c)  2003-2006 QLogic Corporation
+ * Copyright (c)  2003-2010 QLogic Corporation
  *
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k4"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k5"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2f1f9b0..7b31093 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1805,6 +1805,7 @@
 			devip->sense_buff[5] = (ret >> 8) & 0xff;
 			devip->sense_buff[6] = ret & 0xff;
 		}
+	        scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
 		return check_condition_result;
 	}
 
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 30ac116..45c7564 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1124,51 +1124,40 @@
 				struct list_head *work_q,
 				struct list_head *done_q)
 {
-	struct scsi_cmnd *scmd, *tgtr_scmd, *next;
-	unsigned int id = 0;
-	int rtn;
+	LIST_HEAD(tmp_list);
 
-	do {
-		tgtr_scmd = NULL;
-		list_for_each_entry(scmd, work_q, eh_entry) {
-			if (id == scmd_id(scmd)) {
-				tgtr_scmd = scmd;
-				break;
-			}
-		}
-		if (!tgtr_scmd) {
-			/* not one exactly equal; find the next highest */
-			list_for_each_entry(scmd, work_q, eh_entry) {
-				if (scmd_id(scmd) > id &&
-				    (!tgtr_scmd ||
-				     scmd_id(tgtr_scmd) > scmd_id(scmd)))
-						tgtr_scmd = scmd;
-			}
-		}
-		if (!tgtr_scmd)
-			/* no more commands, that's it */
-			break;
+	list_splice_init(work_q, &tmp_list);
+
+	while (!list_empty(&tmp_list)) {
+		struct scsi_cmnd *next, *scmd;
+		int rtn;
+		unsigned int id;
+
+		scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
+		id = scmd_id(scmd);
 
 		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
 						  "to target %d\n",
 						  current->comm, id));
-		rtn = scsi_try_target_reset(tgtr_scmd);
-		if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
-			list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
-				if (id == scmd_id(scmd))
-					if (!scsi_device_online(scmd->device) ||
-					    rtn == FAST_IO_FAIL ||
-					    !scsi_eh_tur(tgtr_scmd))
-						scsi_eh_finish_cmd(scmd,
-								   done_q);
-			}
-		} else
+		rtn = scsi_try_target_reset(scmd);
+		if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
 			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset"
 							  " failed target: "
 							  "%d\n",
 							  current->comm, id));
-		id++;
-	} while(id != 0);
+		list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
+			if (scmd_id(scmd) != id)
+				continue;
+
+			if ((rtn == SUCCESS || rtn == FAST_IO_FAIL)
+			    && (!scsi_device_online(scmd->device) ||
+				 rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd)))
+				scsi_eh_finish_cmd(scmd, done_q);
+			else
+				/* push back on work queue for further processing */
+				list_move(&scmd->eh_entry, work_q);
+		}
+	}
 
 	return list_empty(work_q);
 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4a38422..9045c52 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1278,11 +1278,10 @@
 	}
 
 	if (scsi_target_is_busy(starget)) {
-		if (list_empty(&sdev->starved_entry)) {
+		if (list_empty(&sdev->starved_entry))
 			list_add_tail(&sdev->starved_entry,
 				      &shost->starved_list);
-			return 0;
-		}
+		return 0;
 	}
 
 	/* We're OK to process the command, so we can't be starved */
@@ -1978,8 +1977,7 @@
  *		in.
  *
  *	Returns zero if unsuccessful or an error if TUR failed.  For
- *	removable media, a return of NOT_READY or UNIT_ATTENTION is
- *	translated to success, with the ->changed flag updated.
+ *	removable media, UNIT_ATTENTION sets ->changed flag.
  **/
 int
 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
@@ -2006,16 +2004,6 @@
 	} while (scsi_sense_valid(sshdr) &&
 		 sshdr->sense_key == UNIT_ATTENTION && --retries);
 
-	if (!sshdr)
-		/* could not allocate sense buffer, so can't process it */
-		return result;
-
-	if (sdev->removable && scsi_sense_valid(sshdr) &&
-	    (sshdr->sense_key == UNIT_ATTENTION ||
-	     sshdr->sense_key == NOT_READY)) {
-		sdev->changed = 1;
-		result = 0;
-	}
 	if (!sshdr_external)
 		kfree(sshdr);
 	return result;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index d53e650..a2ed201 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -477,7 +477,7 @@
 
 
 /**
- * scsi_netlink_init - Called by SCSI subsystem to intialize
+ * scsi_netlink_init - Called by SCSI subsystem to initialize
  * 	the SCSI transport netlink interface
  *
  **/
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 76ee2e7..490ce21 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -864,13 +864,15 @@
 
 	error = device_add(&sdev->sdev_gendev);
 	if (error) {
-		printk(KERN_INFO "error 1\n");
+		sdev_printk(KERN_INFO, sdev,
+				"failed to add device: %d\n", error);
 		return error;
 	}
 	device_enable_async_suspend(&sdev->sdev_dev);
 	error = device_add(&sdev->sdev_dev);
 	if (error) {
-		printk(KERN_INFO "error 2\n");
+		sdev_printk(KERN_INFO, sdev,
+				"failed to add class device: %d\n", error);
 		device_del(&sdev->sdev_gendev);
 		return error;
 	}
@@ -993,16 +995,14 @@
  */
 void scsi_remove_target(struct device *dev)
 {
-	struct device *rdev;
-
 	if (scsi_is_target_device(dev)) {
 		__scsi_remove_target(to_scsi_target(dev));
 		return;
 	}
 
-	rdev = get_device(dev);
+	get_device(dev);
 	device_for_each_child(dev, NULL, __remove_child);
-	put_device(rdev);
+	put_device(dev);
 }
 EXPORT_SYMBOL(scsi_remove_target);
 
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 332387a..f905ecb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2200,3 +2200,4 @@
 MODULE_DESCRIPTION("iSCSI Transport Interface");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9564961..e567302 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -583,7 +583,7 @@
 		 * quietly refuse to do anything to a changed disc until 
 		 * the changed bit has been reset
 		 */
-		/* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
+		/* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
 		goto out;
 	}
 
@@ -990,30 +990,51 @@
 
 static void set_media_not_present(struct scsi_disk *sdkp)
 {
-	sdkp->media_present = 0;
-	sdkp->capacity = 0;
-	sdkp->device->changed = 1;
+	if (sdkp->media_present)
+		sdkp->device->changed = 1;
+
+	if (sdkp->device->removable) {
+		sdkp->media_present = 0;
+		sdkp->capacity = 0;
+	}
+}
+
+static int media_not_present(struct scsi_disk *sdkp,
+			     struct scsi_sense_hdr *sshdr)
+{
+	if (!scsi_sense_valid(sshdr))
+		return 0;
+
+	/* not invoked for commands that could return deferred errors */
+	switch (sshdr->sense_key) {
+	case UNIT_ATTENTION:
+	case NOT_READY:
+		/* medium not present */
+		if (sshdr->asc == 0x3A) {
+			set_media_not_present(sdkp);
+			return 1;
+		}
+	}
+	return 0;
 }
 
 /**
- *	sd_media_changed - check if our medium changed
- *	@disk: kernel device descriptor 
+ *	sd_check_events - check media events
+ *	@disk: kernel device descriptor
+ *	@clearing: disk events currently being cleared
  *
- *	Returns 0 if not applicable or no change; 1 if change
+ *	Returns mask of DISK_EVENT_*.
  *
  *	Note: this function is invoked from the block subsystem.
  **/
-static int sd_media_changed(struct gendisk *disk)
+static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
 {
 	struct scsi_disk *sdkp = scsi_disk(disk);
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_sense_hdr *sshdr = NULL;
 	int retval;
 
-	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
-
-	if (!sdp->removable)
-		return 0;
+	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
 
 	/*
 	 * If the device is offline, don't send any commands - just pretend as
@@ -1023,7 +1044,6 @@
 	 */
 	if (!scsi_device_online(sdp)) {
 		set_media_not_present(sdkp);
-		retval = 1;
 		goto out;
 	}
 
@@ -1044,34 +1064,32 @@
 					      sshdr);
 	}
 
-	/*
-	 * Unable to test, unit probably not ready.   This usually
-	 * means there is no disc in the drive.  Mark as changed,
-	 * and we will figure it out later once the drive is
-	 * available again.
-	 */
-	if (retval || (scsi_sense_valid(sshdr) &&
-		       /* 0x3a is medium not present */
-		       sshdr->asc == 0x3a)) {
+	/* failed to execute TUR, assume media not present */
+	if (host_byte(retval)) {
 		set_media_not_present(sdkp);
-		retval = 1;
 		goto out;
 	}
 
+	if (media_not_present(sdkp, sshdr))
+		goto out;
+
 	/*
 	 * For removable scsi disk we have to recognise the presence
-	 * of a disk in the drive. This is kept in the struct scsi_disk
-	 * struct and tested at open !  Daniel Roche (dan@lectra.fr)
+	 * of a disk in the drive.
 	 */
+	if (!sdkp->media_present)
+		sdp->changed = 1;
 	sdkp->media_present = 1;
-
-	retval = sdp->changed;
-	sdp->changed = 0;
 out:
-	if (retval != sdkp->previous_state)
-		sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
-	sdkp->previous_state = retval;
+	/*
+	 * sdp->changed is set under the following conditions:
+	 *
+	 *	Medium present state has changed in either direction.
+	 *	Device has indicated UNIT_ATTENTION.
+	 */
 	kfree(sshdr);
+	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+	sdp->changed = 0;
 	return retval;
 }
 
@@ -1164,7 +1182,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_ioctl		= sd_compat_ioctl,
 #endif
-	.media_changed		= sd_media_changed,
+	.check_events		= sd_check_events,
 	.revalidate_disk	= sd_revalidate_disk,
 	.unlock_native_capacity	= sd_unlock_native_capacity,
 };
@@ -1175,6 +1193,12 @@
 	u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
 	u64 bad_lba;
 	int info_valid;
+	/*
+	 * resid is optional but mostly filled in.  When it's unused,
+	 * its value is zero, so we assume the whole buffer transferred
+	 */
+	unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
+	unsigned int good_bytes;
 
 	if (scmd->request->cmd_type != REQ_TYPE_FS)
 		return 0;
@@ -1208,7 +1232,8 @@
 	/* This computation should always be done in terms of
 	 * the resolution of the device's medium.
 	 */
-	return (bad_lba - start_lba) * scmd->device->sector_size;
+	good_bytes = (bad_lba - start_lba) * scmd->device->sector_size;
+	return min(good_bytes, transferred);
 }
 
 /**
@@ -1300,23 +1325,6 @@
 	return good_bytes;
 }
 
-static int media_not_present(struct scsi_disk *sdkp,
-			     struct scsi_sense_hdr *sshdr)
-{
-
-	if (!scsi_sense_valid(sshdr))
-		return 0;
-	/* not invoked for commands that could return deferred errors */
-	if (sshdr->sense_key != NOT_READY &&
-	    sshdr->sense_key != UNIT_ATTENTION)
-		return 0;
-	if (sshdr->asc != 0x3A) /* medium not present */
-		return 0;
-
-	set_media_not_present(sdkp);
-	return 1;
-}
-
 /*
  * spinup disk - called only in sd_revalidate_disk()
  */
@@ -1491,7 +1499,7 @@
 	 */
 	if (sdp->removable &&
 	    sense_valid && sshdr->sense_key == NOT_READY)
-		sdp->changed = 1;
+		set_media_not_present(sdkp);
 
 	/*
 	 * We used to set media_present to 0 here to indicate no media
@@ -1902,10 +1910,14 @@
 	int old_rcd = sdkp->RCD;
 	int old_dpofua = sdkp->DPOFUA;
 
-	if (sdp->skip_ms_page_8)
-		goto defaults;
-
-	if (sdp->type == TYPE_RBC) {
+	if (sdp->skip_ms_page_8) {
+		if (sdp->type == TYPE_RBC)
+			goto defaults;
+		else {
+			modepage = 0x3F;
+			dbd = 0;
+		}
+	} else if (sdp->type == TYPE_RBC) {
 		modepage = 6;
 		dbd = 8;
 	} else {
@@ -1933,13 +1945,11 @@
 	 */
 	if (len < 3)
 		goto bad_sense;
-	if (len > 20)
-		len = 20;
-
-	/* Take headers and block descriptors into account */
-	len += data.header_length + data.block_descriptor_length;
-	if (len > SD_BUF_SIZE)
-		goto bad_sense;
+	else if (len > SD_BUF_SIZE) {
+		sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
+			  "data from %d to %d bytes\n", len, SD_BUF_SIZE);
+		len = SD_BUF_SIZE;
+	}
 
 	/* Get the data */
 	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
@@ -1947,16 +1957,45 @@
 	if (scsi_status_is_good(res)) {
 		int offset = data.header_length + data.block_descriptor_length;
 
-		if (offset >= SD_BUF_SIZE - 2) {
-			sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n");
-			goto defaults;
+		while (offset < len) {
+			u8 page_code = buffer[offset] & 0x3F;
+			u8 spf       = buffer[offset] & 0x40;
+
+			if (page_code == 8 || page_code == 6) {
+				/* We're interested only in the first 3 bytes.
+				 */
+				if (len - offset <= 2) {
+					sd_printk(KERN_ERR, sdkp, "Incomplete "
+						  "mode parameter data\n");
+					goto defaults;
+				} else {
+					modepage = page_code;
+					goto Page_found;
+				}
+			} else {
+				/* Go to the next page */
+				if (spf && len - offset > 3)
+					offset += 4 + (buffer[offset+2] << 8) +
+						buffer[offset+3];
+				else if (!spf && len - offset > 1)
+					offset += 2 + buffer[offset+1];
+				else {
+					sd_printk(KERN_ERR, sdkp, "Incomplete "
+						  "mode parameter data\n");
+					goto defaults;
+				}
+			}
 		}
 
-		if ((buffer[offset] & 0x3f) != modepage) {
+		if (modepage == 0x3F) {
+			sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+				  "present\n");
+			goto defaults;
+		} else if ((buffer[offset] & 0x3f) != modepage) {
 			sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
 			goto defaults;
 		}
-
+	Page_found:
 		if (modepage == 8) {
 			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
 			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
@@ -2346,8 +2385,10 @@
 
 	gd->driverfs_dev = &sdp->sdev_gendev;
 	gd->flags = GENHD_FL_EXT_DEVT;
-	if (sdp->removable)
+	if (sdp->removable) {
 		gd->flags |= GENHD_FL_REMOVABLE;
+		gd->events |= DISK_EVENT_MEDIA_CHANGE;
+	}
 
 	add_disk(gd);
 	sd_dif_config_host(sdkp);
@@ -2429,7 +2470,6 @@
 	sdkp->disk = gd;
 	sdkp->index = index;
 	atomic_set(&sdkp->openers, 0);
-	sdkp->previous_state = 1;
 
 	if (!sdp->request_queue->rq_timeout) {
 		if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 55488fa..c9d8f6c 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -55,7 +55,6 @@
 	u8		media_present;
 	u8		write_prot;
 	u8		protection_type;/* Data Integrity Field */
-	unsigned	previous_state : 1;
 	unsigned	ATO : 1;	/* state of disk ATO bit */
 	unsigned	WCE : 1;	/* state of disk WCE bit */
 	unsigned	RCD : 1;	/* state of disk RCD bit, unused */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d7b383c..aefadc6 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -104,14 +104,15 @@
 static void get_sectorsize(struct scsi_cd *);
 static void get_capabilities(struct scsi_cd *);
 
-static int sr_media_change(struct cdrom_device_info *, int);
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+				    unsigned int clearing, int slot);
 static int sr_packet(struct cdrom_device_info *, struct packet_command *);
 
 static struct cdrom_device_ops sr_dops = {
 	.open			= sr_open,
 	.release	 	= sr_release,
 	.drive_status	 	= sr_drive_status,
-	.media_changed		= sr_media_change,
+	.check_events		= sr_check_events,
 	.tray_move		= sr_tray_move,
 	.lock_door		= sr_lock_door,
 	.select_speed		= sr_select_speed,
@@ -165,90 +166,92 @@
 	mutex_unlock(&sr_ref_mutex);
 }
 
-/* identical to scsi_test_unit_ready except that it doesn't
- * eat the NOT_READY returns for removable media */
-int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
+static unsigned int sr_get_events(struct scsi_device *sdev)
 {
-	int retries = MAX_RETRIES;
-	int the_result;
-	u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 };
+	u8 buf[8];
+	u8 cmd[] = { GET_EVENT_STATUS_NOTIFICATION,
+		     1,			/* polled */
+		     0, 0,		/* reserved */
+		     1 << 4,		/* notification class: media */
+		     0, 0,		/* reserved */
+		     0, sizeof(buf),	/* allocation length */
+		     0,			/* control */
+	};
+	struct event_header *eh = (void *)buf;
+	struct media_event_desc *med = (void *)(buf + 4);
+	struct scsi_sense_hdr sshdr;
+	int result;
 
-	/* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
-	 * conditions are gone, or a timeout happens
-	 */
-	do {
-		the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
-					      0, sshdr, SR_TIMEOUT,
-					      retries--, NULL);
-		if (scsi_sense_valid(sshdr) &&
-		    sshdr->sense_key == UNIT_ATTENTION)
-			sdev->changed = 1;
+	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, sizeof(buf),
+				  &sshdr, SR_TIMEOUT, MAX_RETRIES, NULL);
+	if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
+		return DISK_EVENT_MEDIA_CHANGE;
 
-	} while (retries > 0 &&
-		 (!scsi_status_is_good(the_result) ||
-		  (scsi_sense_valid(sshdr) &&
-		   sshdr->sense_key == UNIT_ATTENTION)));
-	return the_result;
+	if (result || be16_to_cpu(eh->data_len) < sizeof(*med))
+		return 0;
+
+	if (eh->nea || eh->notification_class != 0x4)
+		return 0;
+
+	if (med->media_event_code == 1)
+		return DISK_EVENT_EJECT_REQUEST;
+	else if (med->media_event_code == 2)
+		return DISK_EVENT_MEDIA_CHANGE;
+	return 0;
 }
 
 /*
- * This function checks to see if the media has been changed in the
- * CDROM drive.  It is possible that we have already sensed a change,
- * or the drive may have sensed one and not yet reported it.  We must
- * be ready for either case. This function always reports the current
- * value of the changed bit.  If flag is 0, then the changed bit is reset.
- * This function could be done as an ioctl, but we would need to have
- * an inode for that to work, and we do not always have one.
+ * This function checks to see if the media has been changed or eject
+ * button has been pressed.  It is possible that we have already
+ * sensed a change, or the drive may have sensed one and not yet
+ * reported it.  The past events are accumulated in sdev->changed and
+ * returned together with the current state.
  */
-
-static int sr_media_change(struct cdrom_device_info *cdi, int slot)
+static unsigned int sr_check_events(struct cdrom_device_info *cdi,
+				    unsigned int clearing, int slot)
 {
 	struct scsi_cd *cd = cdi->handle;
-	int retval;
-	struct scsi_sense_hdr *sshdr;
+	bool last_present;
+	struct scsi_sense_hdr sshdr;
+	unsigned int events;
+	int ret;
 
-	if (CDSL_CURRENT != slot) {
-		/* no changer support */
-		return -EINVAL;
+	/* no changer support */
+	if (CDSL_CURRENT != slot)
+		return 0;
+
+	events = sr_get_events(cd->device);
+	/*
+	 * GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE
+	 * is being cleared.  Note that there are devices which hang
+	 * if asked to execute TUR repeatedly.
+	 */
+	if (!(clearing & DISK_EVENT_MEDIA_CHANGE))
+		goto skip_tur;
+
+	/* let's see whether the media is there with TUR */
+	last_present = cd->media_present;
+	ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
+
+	/*
+	 * Media is considered to be present if TUR succeeds or fails with
+	 * sense data indicating something other than media-not-present
+	 * (ASC 0x3a).
+	 */
+	cd->media_present = scsi_status_is_good(ret) ||
+		(scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a);
+
+	if (last_present != cd->media_present)
+		events |= DISK_EVENT_MEDIA_CHANGE;
+skip_tur:
+	if (cd->device->changed) {
+		events |= DISK_EVENT_MEDIA_CHANGE;
+		cd->device->changed = 0;
 	}
 
-	sshdr =  kzalloc(sizeof(*sshdr), GFP_KERNEL);
-	retval = sr_test_unit_ready(cd->device, sshdr);
-	if (retval || (scsi_sense_valid(sshdr) &&
-		       /* 0x3a is medium not present */
-		       sshdr->asc == 0x3a)) {
-		/* Media not present or unable to test, unit probably not
-		 * ready. This usually means there is no disc in the drive.
-		 * Mark as changed, and we will figure it out later once
-		 * the drive is available again.
-		 */
-		cd->device->changed = 1;
-		/* This will force a flush, if called from check_disk_change */
-		retval = 1;
-		goto out;
-	};
-
-	retval = cd->device->changed;
-	cd->device->changed = 0;
-	/* If the disk changed, the capacity will now be different,
-	 * so we force a re-read of this information */
-	if (retval) {
-		/* check multisession offset etc */
-		sr_cd_check(cdi);
-		get_sectorsize(cd);
-	}
-
-out:
-	/* Notify userspace, that media has changed. */
-	if (retval != cd->previous_state)
-		sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
-				     GFP_KERNEL);
-	cd->previous_state = retval;
-	kfree(sshdr);
-
-	return retval;
+	return events;
 }
- 
+
 /*
  * sr_done is the interrupt routine for the device driver.
  *
@@ -533,10 +536,25 @@
 	return ret;
 }
 
-static int sr_block_media_changed(struct gendisk *disk)
+static unsigned int sr_block_check_events(struct gendisk *disk,
+					  unsigned int clearing)
 {
 	struct scsi_cd *cd = scsi_cd(disk);
-	return cdrom_media_changed(&cd->cdi);
+	return cdrom_check_events(&cd->cdi, clearing);
+}
+
+static int sr_block_revalidate_disk(struct gendisk *disk)
+{
+	struct scsi_cd *cd = scsi_cd(disk);
+	struct scsi_sense_hdr sshdr;
+
+	/* if the unit is not ready, nothing more to do */
+	if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+		return 0;
+
+	sr_cd_check(&cd->cdi);
+	get_sectorsize(cd);
+	return 0;
 }
 
 static const struct block_device_operations sr_bdops =
@@ -545,7 +563,8 @@
 	.open		= sr_block_open,
 	.release	= sr_block_release,
 	.ioctl		= sr_block_ioctl,
-	.media_changed	= sr_block_media_changed,
+	.check_events	= sr_block_check_events,
+	.revalidate_disk = sr_block_revalidate_disk,
 	/* 
 	 * No compat_ioctl for now because sr_block_ioctl never
 	 * seems to pass arbitary ioctls down to host drivers.
@@ -618,6 +637,7 @@
 	sprintf(disk->disk_name, "sr%d", minor);
 	disk->fops = &sr_bdops;
 	disk->flags = GENHD_FL_CD;
+	disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
 
 	blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
 
@@ -627,7 +647,7 @@
 	cd->disk = disk;
 	cd->capacity = 0x1fffff;
 	cd->device->changed = 1;	/* force recheck CD type */
-	cd->previous_state = 1;
+	cd->media_present = 1;
 	cd->use = 1;
 	cd->readcd_known = 0;
 	cd->readcd_cdda = 0;
@@ -780,7 +800,7 @@
 	}
 
 	/* eat unit attentions */
-	sr_test_unit_ready(cd->device, &sshdr);
+	scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
 
 	/* ask for mode page 0x2a */
 	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 1e144df..e036f1d 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -40,7 +40,7 @@
 	unsigned xa_flag:1;	/* CD has XA sectors ? */
 	unsigned readcd_known:1;	/* drive supports READ_CD (0xbe) */
 	unsigned readcd_cdda:1;	/* reading audio data using READ_CD */
-	unsigned previous_state:1;	/* media has changed */
+	unsigned media_present:1;	/* media is present */
 	struct cdrom_device_info cdi;
 	/* We hold gendisk and scsi_device references on probe and use
 	 * the refs on this kref to decide when to release them */
@@ -61,7 +61,6 @@
 int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
 
 int sr_is_xa(Scsi_CD *);
-int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr);
 
 /* sr_vendor.c */
 void sr_vendor_init(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 3cd8ffb..8be3055 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -307,7 +307,7 @@
 		/* we have no changer support */
 		return -EINVAL;
 	}
-	if (0 == sr_test_unit_ready(cd->device, &sshdr))
+	if (!scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
 		return CDS_DISC_OK;
 
 	/* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 5b7388f..1871b8a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
    Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
  */
 
-static const char *verstr = "20100829";
+static const char *verstr = "20101219";
 
 #include <linux/module.h>
 
@@ -3729,9 +3729,11 @@
 		b_size = PAGE_SIZE << order;
 	} else {
 		for (b_size = PAGE_SIZE, order = 0;
-		     order < ST_MAX_ORDER && b_size < new_size;
+		     order < ST_MAX_ORDER &&
+			     max_segs * (PAGE_SIZE << order) < new_size;
 		     order++, b_size *= 2)
 			;  /* empty */
+		STbuffer->reserved_page_order = order;
 	}
 	if (max_segs * (PAGE_SIZE << order) < new_size) {
 		if (order == ST_MAX_ORDER)
@@ -3758,7 +3760,6 @@
 		segs++;
 	}
 	STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
-	STbuffer->reserved_page_order = order;
 
 	return 1;
 }
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 6b97ded..b4543f5 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1866,7 +1866,7 @@
  *
  * This routine is similar to sym_set_workarounds(), except
  * that, at this point, we already know that the device was
- * successfully intialized at least once before, and so most
+ * successfully initialized at least once before, and so most
  * of the steps taken there are un-needed here.
  */
 static void sym2_reset_workarounds(struct pci_dev *pdev)
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 09a5508..b25e6e4 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -454,21 +454,40 @@
 		writeb(value, p->membase + offset);
 }
 
+/* Save the LCR value so it can be re-written when a Busy Detect IRQ occurs. */
+static inline void dwapb_save_out_value(struct uart_port *p, int offset,
+					int value)
+{
+	struct uart_8250_port *up =
+		container_of(p, struct uart_8250_port, port);
+
+	if (offset == UART_LCR)
+		up->lcr = value;
+}
+
+/* Read the IER to ensure any interrupt is cleared before returning from ISR. */
+static inline void dwapb_check_clear_ier(struct uart_port *p, int offset)
+{
+	if (offset == UART_TX || offset == UART_IER)
+		p->serial_in(p, UART_IER);
+}
+
 static void dwapb_serial_out(struct uart_port *p, int offset, int value)
 {
 	int save_offset = offset;
 	offset = map_8250_out_reg(p, offset) << p->regshift;
-	/* Save the LCR value so it can be re-written when a
-	 * Busy Detect interrupt occurs. */
-	if (save_offset == UART_LCR) {
-		struct uart_8250_port *up = (struct uart_8250_port *)p;
-		up->lcr = value;
-	}
+	dwapb_save_out_value(p, save_offset, value);
 	writeb(value, p->membase + offset);
-	/* Read the IER to ensure any interrupt is cleared before
-	 * returning from ISR. */
-	if (save_offset == UART_TX || save_offset == UART_IER)
-		value = p->serial_in(p, UART_IER);
+	dwapb_check_clear_ier(p, save_offset);
+}
+
+static void dwapb32_serial_out(struct uart_port *p, int offset, int value)
+{
+	int save_offset = offset;
+	offset = map_8250_out_reg(p, offset) << p->regshift;
+	dwapb_save_out_value(p, save_offset, value);
+	writel(value, p->membase + offset);
+	dwapb_check_clear_ier(p, save_offset);
 }
 
 static unsigned int io_serial_in(struct uart_port *p, int offset)
@@ -485,7 +504,8 @@
 
 static void set_io_from_upio(struct uart_port *p)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)p;
+	struct uart_8250_port *up =
+		container_of(p, struct uart_8250_port, port);
 	switch (p->iotype) {
 	case UPIO_HUB6:
 		p->serial_in = hub6_serial_in;
@@ -518,6 +538,11 @@
 		p->serial_out = dwapb_serial_out;
 		break;
 
+	case UPIO_DWAPB32:
+		p->serial_in = mem32_serial_in;
+		p->serial_out = dwapb32_serial_out;
+		break;
+
 	default:
 		p->serial_in = io_serial_in;
 		p->serial_out = io_serial_out;
@@ -536,6 +561,7 @@
 	case UPIO_MEM32:
 	case UPIO_AU:
 	case UPIO_DWAPB:
+	case UPIO_DWAPB32:
 		p->serial_out(p, offset, value);
 		p->serial_in(p, UART_LCR);	/* safe, no side-effects */
 		break;
@@ -653,13 +679,13 @@
 {
 	if (p->capabilities & UART_CAP_SLEEP) {
 		if (p->capabilities & UART_CAP_EFR) {
-			serial_outp(p, UART_LCR, 0xBF);
+			serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_B);
 			serial_outp(p, UART_EFR, UART_EFR_ECB);
 			serial_outp(p, UART_LCR, 0);
 		}
 		serial_outp(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
 		if (p->capabilities & UART_CAP_EFR) {
-			serial_outp(p, UART_LCR, 0xBF);
+			serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_B);
 			serial_outp(p, UART_EFR, 0);
 			serial_outp(p, UART_LCR, 0);
 		}
@@ -752,7 +778,7 @@
 	serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
 		    UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
 	serial_outp(up, UART_MCR, UART_MCR_LOOP);
-	serial_outp(up, UART_LCR, UART_LCR_DLAB);
+	serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
 	old_dl = serial_dl_read(up);
 	serial_dl_write(up, 0x0001);
 	serial_outp(up, UART_LCR, 0x03);
@@ -764,7 +790,7 @@
 		serial_inp(up, UART_RX);
 	serial_outp(up, UART_FCR, old_fcr);
 	serial_outp(up, UART_MCR, old_mcr);
-	serial_outp(up, UART_LCR, UART_LCR_DLAB);
+	serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
 	serial_dl_write(up, old_dl);
 	serial_outp(up, UART_LCR, old_lcr);
 
@@ -782,7 +808,7 @@
 	unsigned int id;
 
 	old_lcr = serial_inp(p, UART_LCR);
-	serial_outp(p, UART_LCR, UART_LCR_DLAB);
+	serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_A);
 
 	old_dll = serial_inp(p, UART_DLL);
 	old_dlm = serial_inp(p, UART_DLM);
@@ -836,7 +862,7 @@
 	 * recommended for new designs).
 	 */
 	up->acr = 0;
-	serial_out(up, UART_LCR, 0xBF);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 	serial_out(up, UART_EFR, UART_EFR_ECB);
 	serial_out(up, UART_LCR, 0x00);
 	id1 = serial_icr_read(up, UART_ID1);
@@ -945,7 +971,7 @@
 	 * Check for presence of the EFR when DLAB is set.
 	 * Only ST16C650V1 UARTs pass this test.
 	 */
-	serial_outp(up, UART_LCR, UART_LCR_DLAB);
+	serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
 	if (serial_in(up, UART_EFR) == 0) {
 		serial_outp(up, UART_EFR, 0xA8);
 		if (serial_in(up, UART_EFR) != 0) {
@@ -963,7 +989,7 @@
 	 * Maybe it requires 0xbf to be written to the LCR.
 	 * (other ST16C650V2 UARTs, TI16C752A, etc)
 	 */
-	serial_outp(up, UART_LCR, 0xBF);
+	serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
 	if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
 		DEBUG_AUTOCONF("EFRv2 ");
 		autoconfig_has_efr(up);
@@ -1024,7 +1050,7 @@
 	serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
 	status1 = serial_in(up, UART_IIR) >> 5;
 	serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-	serial_outp(up, UART_LCR, UART_LCR_DLAB);
+	serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
 	serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
 	status2 = serial_in(up, UART_IIR) >> 5;
 	serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
@@ -1183,7 +1209,7 @@
 	 * We also initialise the EFR (if any) to zero for later.  The
 	 * EFR occupies the same register location as the FCR and IIR.
 	 */
-	serial_outp(up, UART_LCR, 0xBF);
+	serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
 	serial_outp(up, UART_EFR, 0);
 	serial_outp(up, UART_LCR, 0);
 
@@ -1319,7 +1345,8 @@
 
 static void serial8250_stop_tx(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 
 	__stop_tx(up);
 
@@ -1336,7 +1363,8 @@
 
 static void serial8250_start_tx(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 
 	if (!(up->ier & UART_IER_THRI)) {
 		up->ier |= UART_IER_THRI;
@@ -1364,7 +1392,8 @@
 
 static void serial8250_stop_rx(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 
 	up->ier &= ~UART_IER_RLSI;
 	up->port.read_status_mask &= ~UART_LSR_DR;
@@ -1373,7 +1402,8 @@
 
 static void serial8250_enable_ms(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 
 	/* no MSR capabilities */
 	if (up->bugs & UART_BUG_NOMSR)
@@ -1581,7 +1611,8 @@
 			handled = 1;
 
 			end = NULL;
-		} else if (up->port.iotype == UPIO_DWAPB &&
+		} else if ((up->port.iotype == UPIO_DWAPB ||
+			    up->port.iotype == UPIO_DWAPB32) &&
 			  (iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
 			/* The DesignWare APB UART has an Busy Detect (0x07)
 			 * interrupt meaning an LCR write attempt occured while the
@@ -1781,7 +1812,8 @@
 
 static unsigned int serial8250_tx_empty(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned long flags;
 	unsigned int lsr;
 
@@ -1795,7 +1827,8 @@
 
 static unsigned int serial8250_get_mctrl(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned int status;
 	unsigned int ret;
 
@@ -1815,7 +1848,8 @@
 
 static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned char mcr = 0;
 
 	if (mctrl & TIOCM_RTS)
@@ -1836,7 +1870,8 @@
 
 static void serial8250_break_ctl(struct uart_port *port, int break_state)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned long flags;
 
 	spin_lock_irqsave(&up->port.lock, flags);
@@ -1890,7 +1925,8 @@
 
 static int serial8250_get_poll_char(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned char lsr = serial_inp(up, UART_LSR);
 
 	if (!(lsr & UART_LSR_DR))
@@ -1904,7 +1940,8 @@
 			 unsigned char c)
 {
 	unsigned int ier;
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 
 	/*
 	 *	First save the IER then disable the interrupts
@@ -1938,11 +1975,14 @@
 
 static int serial8250_startup(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned long flags;
 	unsigned char lsr, iir;
 	int retval;
 
+	up->port.fifosize = uart_config[up->port.type].fifo_size;
+	up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
 	up->capabilities = uart_config[up->port.type].flags;
 	up->mcr = 0;
 
@@ -1952,7 +1992,7 @@
 	if (up->port.type == PORT_16C950) {
 		/* Wake up and initialize UART */
 		up->acr = 0;
-		serial_outp(up, UART_LCR, 0xBF);
+		serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
 		serial_outp(up, UART_EFR, UART_EFR_ECB);
 		serial_outp(up, UART_IER, 0);
 		serial_outp(up, UART_LCR, 0);
@@ -2002,7 +2042,7 @@
 	if (up->port.type == PORT_16850) {
 		unsigned char fctr;
 
-		serial_outp(up, UART_LCR, 0xbf);
+		serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
 
 		fctr = serial_inp(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
 		serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_RX);
@@ -2166,7 +2206,8 @@
 
 static void serial8250_shutdown(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned long flags;
 
 	/*
@@ -2235,7 +2276,8 @@
 serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
 		          struct ktermios *old)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	unsigned char cval, fcr = 0;
 	unsigned long flags;
 	unsigned int baud, quot;
@@ -2363,7 +2405,7 @@
 		if (termios->c_cflag & CRTSCTS)
 			efr |= UART_EFR_CTS;
 
-		serial_outp(up, UART_LCR, 0xBF);
+		serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
 		serial_outp(up, UART_EFR, efr);
 	}
 
@@ -2435,7 +2477,8 @@
 void serial8250_do_pm(struct uart_port *port, unsigned int state,
 		      unsigned int oldstate)
 {
-	struct uart_8250_port *p = (struct uart_8250_port *)port;
+	struct uart_8250_port *p =
+		container_of(port, struct uart_8250_port, port);
 
 	serial8250_set_sleep(p, state != 0);
 }
@@ -2476,6 +2519,7 @@
 	case UPIO_MEM32:
 	case UPIO_MEM:
 	case UPIO_DWAPB:
+	case UPIO_DWAPB32:
 		if (!up->port.mapbase)
 			break;
 
@@ -2513,6 +2557,7 @@
 	case UPIO_MEM32:
 	case UPIO_MEM:
 	case UPIO_DWAPB:
+	case UPIO_DWAPB32:
 		if (!up->port.mapbase)
 			break;
 
@@ -2566,7 +2611,8 @@
 
 static void serial8250_release_port(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 
 	serial8250_release_std_resource(up);
 	if (up->port.type == PORT_RSA)
@@ -2575,7 +2621,8 @@
 
 static int serial8250_request_port(struct uart_port *port)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	int ret = 0;
 
 	ret = serial8250_request_std_resource(up);
@@ -2590,7 +2637,8 @@
 
 static void serial8250_config_port(struct uart_port *port, int flags)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 	int probeflags = PROBE_ANY;
 	int ret;
 
@@ -2771,7 +2819,8 @@
 
 static void serial8250_console_putchar(struct uart_port *port, int ch)
 {
-	struct uart_8250_port *up = (struct uart_8250_port *)port;
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
 
 	wait_for_xmitr(up, UART_LSR_THRE);
 	serial_out(up, UART_TX, ch);
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 842e3b2..8b8930f 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -957,6 +957,22 @@
 	return setup_port(priv, port, bar, offset, board->reg_shift);
 }
 
+static int
+ce4100_serial_setup(struct serial_private *priv,
+		  const struct pciserial_board *board,
+		  struct uart_port *port, int idx)
+{
+	int ret;
+
+	ret = setup_port(priv, port, 0, 0, board->reg_shift);
+	port->iotype = UPIO_MEM32;
+	port->type = PORT_XSCALE;
+	port->flags = (port->flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
+	port->regshift = 2;
+
+	return ret;
+}
+
 static int skip_tx_en_setup(struct serial_private *priv,
 			const struct pciserial_board *board,
 			struct uart_port *port, int idx)
@@ -981,6 +997,7 @@
 #define PCI_SUBDEVICE_ID_POCTAL232	0x0308
 #define PCI_SUBDEVICE_ID_POCTAL422	0x0408
 #define PCI_VENDOR_ID_ADVANTECH		0x13fe
+#define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
 #define PCI_DEVICE_ID_ADVANTECH_PCI3620	0x3620
 #define PCI_DEVICE_ID_TITAN_200I	0x8028
 #define PCI_DEVICE_ID_TITAN_400I	0x8048
@@ -1072,6 +1089,13 @@
 		.subdevice	= PCI_ANY_ID,
 		.setup		= skip_tx_en_setup,
 	},
+	{
+		.vendor		= PCI_VENDOR_ID_INTEL,
+		.device		= PCI_DEVICE_ID_INTEL_CE4100_UART,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.setup		= ce4100_serial_setup,
+	},
 	/*
 	 * ITE
 	 */
@@ -1592,6 +1616,7 @@
 	pbn_ADDIDATA_PCIe_2_3906250,
 	pbn_ADDIDATA_PCIe_4_3906250,
 	pbn_ADDIDATA_PCIe_8_3906250,
+	pbn_ce4100_1_115200,
 };
 
 /*
@@ -2281,6 +2306,12 @@
 		.uart_offset	= 0x200,
 		.first_offset	= 0x1000,
 	},
+	[pbn_ce4100_1_115200] = {
+		.flags		= FL_BASE0,
+		.num_ports	= 1,
+		.base_baud	= 921600,
+		.reg_shift      = 2,
+	},
 };
 
 static const struct pci_device_id softmodem_blacklist[] = {
@@ -3765,6 +3796,11 @@
 	{	PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
 		0xA000, 0x3004,
 		0, 0, pbn_b0_bt_4_115200 },
+	/* Intel CE4100 */
+	{	PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART,
+		PCI_ANY_ID,  PCI_ANY_ID, 0, 0,
+		pbn_ce4100_1_115200 },
+
 
 	/*
 	 * These entries match devices with class COMMUNICATION_SERIAL,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index aff9dcd..c1df767 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -776,24 +776,7 @@
 	bool "Enable UART0 hardware flow control"
 	depends on SERIAL_BFIN_UART0
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART0_CTS_PIN
-	int "UART0 CTS pin"
-	depends on BFIN_UART0_CTSRTS && !BF548
-	default 23
-	help
-	  The default pin is GPIO_GP7.
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART0_RTS_PIN
-	int "UART0 RTS pin"
-	depends on BFIN_UART0_CTSRTS && !BF548
-	default 22
-	help
-	  The default pin is GPIO_GP6.
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_BFIN_UART1
 	bool "Enable UART1"
@@ -805,22 +788,7 @@
 	bool "Enable UART1 hardware flow control"
 	depends on SERIAL_BFIN_UART1
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART1_CTS_PIN
-	int "UART1 CTS pin"
-	depends on BFIN_UART1_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART1_RTS_PIN
-	int "UART1 RTS pin"
-	depends on BFIN_UART1_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_BFIN_UART2
 	bool "Enable UART2"
@@ -832,22 +800,7 @@
 	bool "Enable UART2 hardware flow control"
 	depends on SERIAL_BFIN_UART2
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART2_CTS_PIN
-	int "UART2 CTS pin"
-	depends on BFIN_UART2_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART2_RTS_PIN
-	int "UART2 RTS pin"
-	depends on BFIN_UART2_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_BFIN_UART3
 	bool "Enable UART3"
@@ -859,22 +812,7 @@
 	bool "Enable UART3 hardware flow control"
 	depends on SERIAL_BFIN_UART3
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART3_CTS_PIN
-	int "UART3 CTS pin"
-	depends on BFIN_UART3_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART3_RTS_PIN
-	int "UART3 RTS pin"
-	depends on BFIN_UART3_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_IMX
 	bool "IMX serial port support"
@@ -1381,6 +1319,16 @@
 	depends on SERIAL_MSM=y
 	select SERIAL_CORE_CONSOLE
 
+config SERIAL_VT8500
+	bool "VIA VT8500 on-chip serial port support"
+	depends on ARM && ARCH_VT8500
+	select SERIAL_CORE
+
+config SERIAL_VT8500_CONSOLE
+	bool "VIA VT8500 serial console support"
+	depends on SERIAL_VT8500=y
+	select SERIAL_CORE_CONSOLE
+
 config SERIAL_NETX
 	tristate "NetX serial port support"
 	depends on ARM && ARCH_NETX
@@ -1402,7 +1350,7 @@
 
 config SERIAL_OF_PLATFORM
 	tristate "Serial port on Open Firmware platform bus"
-	depends on PPC_OF || MICROBLAZE
+	depends on OF
 	depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL
 	help
 	  If you have a PowerPC based system that has serial ports
@@ -1632,4 +1580,19 @@
 	help
 	  Enable a Altera UART port to be the system console.
 
+config SERIAL_IFX6X60
+        tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
+	depends on GPIOLIB && SPI && EXPERIMENTAL
+	help
+	  Support for the IFX6x60 modem devices on Intel MID platforms.
+
+config SERIAL_PCH_UART
+	tristate "Intel EG20T PCH UART"
+	depends on PCI && DMADEVICES
+	select SERIAL_CORE
+	select PCH_DMA
+	help
+	  This driver is for PCH(Platform controller Hub) UART of Intel EG20T
+	  which is an IOH(Input/Output Hub) for x86 embedded processor.
+	  Enabling PCH_DMA, this PCH UART works as DMA mode.
 endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index c570576..8ea92e9 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -80,12 +80,15 @@
 obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
+obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
 obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
 obj-$(CONFIG_SERIAL_TIMBERDALE)	+= timbuart.o
 obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
 obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
 obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
+obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
 obj-$(CONFIG_SERIAL_MRST_MAX3110)	+= mrst_max3110.o
 obj-$(CONFIG_SERIAL_MFD_HSU)	+= mfd.o
-obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
+obj-$(CONFIG_SERIAL_IFX6X60)  	+= ifx6x60.o
+obj-$(CONFIG_SERIAL_PCH_UART)	+= pch_uart.o
diff --git a/drivers/serial/apbuart.c b/drivers/serial/apbuart.c
index 767ce9e..095a5d5 100644
--- a/drivers/serial/apbuart.c
+++ b/drivers/serial/apbuart.c
@@ -521,11 +521,12 @@
 };
 
 
-static void grlib_apbuart_configure(void);
+static int grlib_apbuart_configure(void);
 
 static int __init apbuart_console_init(void)
 {
-	grlib_apbuart_configure();
+	if (grlib_apbuart_configure())
+		return -ENODEV;
 	register_console(&grlib_apbuart_console);
 	return 0;
 }
@@ -596,57 +597,49 @@
 };
 
 
-static void grlib_apbuart_configure(void)
+static int grlib_apbuart_configure(void)
 {
-	static int enum_done;
 	struct device_node *np, *rp;
-	struct uart_port *port = NULL;
 	const u32 *prop;
-	int freq_khz;
-	int v = 0, d = 0;
-	unsigned int addr;
-	int irq, line;
-	struct amba_prom_registers *regs;
-
-	if (enum_done)
-		return;
+	int freq_khz, line = 0;
 
 	/* Get bus frequency */
 	rp = of_find_node_by_path("/");
+	if (!rp)
+		return -ENODEV;
 	rp = of_get_next_child(rp, NULL);
+	if (!rp)
+		return -ENODEV;
 	prop = of_get_property(rp, "clock-frequency", NULL);
+	if (!prop)
+		return -ENODEV;
 	freq_khz = *prop;
 
-	line = 0;
 	for_each_matching_node(np, apbuart_match) {
+		const int *irqs, *ampopts;
+		const struct amba_prom_registers *regs;
+		struct uart_port *port;
+		unsigned long addr;
 
-		int *vendor = (int *) of_get_property(np, "vendor", NULL);
-		int *device = (int *) of_get_property(np, "device", NULL);
-		int *irqs = (int *) of_get_property(np, "interrupts", NULL);
-		int *ampopts = (int *) of_get_property(np, "ampopts", NULL);
-		regs = (struct amba_prom_registers *)
-		    of_get_property(np, "reg", NULL);
-
+		ampopts = of_get_property(np, "ampopts", NULL);
 		if (ampopts && (*ampopts == 0))
 			continue; /* Ignore if used by another OS instance */
-		if (vendor)
-			v = *vendor;
-		if (device)
-			d = *device;
+
+		irqs = of_get_property(np, "interrupts", NULL);
+		regs = of_get_property(np, "reg", NULL);
 
 		if (!irqs || !regs)
-			return;
+			continue;
 
 		grlib_apbuart_nodes[line] = np;
 
 		addr = regs->phys_addr;
-		irq = *irqs;
 
 		port = &grlib_apbuart_ports[line];
 
 		port->mapbase = addr;
 		port->membase = ioremap(addr, sizeof(struct grlib_apbuart_regs_map));
-		port->irq = irq;
+		port->irq = *irqs;
 		port->iotype = UPIO_MEM;
 		port->ops = &grlib_apbuart_ops;
 		port->flags = UPF_BOOT_AUTOCONF;
@@ -658,12 +651,10 @@
 		/* We support maximum UART_NR uarts ... */
 		if (line == UART_NR)
 			break;
-
 	}
 
-	enum_done = 1;
-
 	grlib_apbuart_driver.nr = grlib_apbuart_port_nr = line;
+	return line ? 0 : -ENODEV;
 }
 
 static int __init grlib_apbuart_init(void)
@@ -671,7 +662,9 @@
 	int ret;
 
 	/* Find all APBUARTS in device the tree and initialize their ports */
-	grlib_apbuart_configure();
+	ret = grlib_apbuart_configure();
+	if (ret)
+		return ret;
 
 	printk(KERN_INFO "Serial: GRLIB APBUART driver\n");
 
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 3892666..2a1d52f 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1732,6 +1732,11 @@
 	device_init_wakeup(&pdev->dev, 1);
 	platform_set_drvdata(pdev, port);
 
+	if (port->rs485.flags & SER_RS485_ENABLED) {
+		UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
+		UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
+	}
+
 	return 0;
 
 err_add_port:
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 19cac9f..e381b89 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -1,7 +1,7 @@
 /*
  * Blackfin On-Chip Serial Driver
  *
- * Copyright 2006-2008 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
  *
  * Enter bugs at http://blackfin.uclinux.org/
  *
@@ -12,6 +12,9 @@
 #define SUPPORT_SYSRQ
 #endif
 
+#define DRIVER_NAME "bfin-uart"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/gfp.h>
@@ -23,21 +26,20 @@
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/kgdb.h>
+#include <linux/slab.h>
 #include <linux/dma-mapping.h>
 
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
-	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
-#include <linux/kgdb.h>
-#include <asm/irq_regs.h>
-#endif
-
-#include <asm/gpio.h>
-#include <mach/bfin_serial_5xx.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
+#include <asm/portmux.h>
 #include <asm/cacheflush.h>
+#include <asm/dma.h>
+
+#define port_membase(uart)     (((struct bfin_serial_port *)(uart))->port.membase)
+#define get_lsr_cache(uart)    (((struct bfin_serial_port *)(uart))->lsr)
+#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
+#include <asm/bfin_serial.h>
 
 #ifdef CONFIG_SERIAL_BFIN_MODULE
 # undef CONFIG_EARLY_PRINTK
@@ -48,12 +50,11 @@
 #endif
 
 /* UART name and device definitions */
-#define BFIN_SERIAL_NAME	"ttyBF"
+#define BFIN_SERIAL_DEV_NAME	"ttyBF"
 #define BFIN_SERIAL_MAJOR	204
 #define BFIN_SERIAL_MINOR	64
 
-static struct bfin_serial_port bfin_serial_ports[BFIN_UART_NR_PORTS];
-static int nr_active_ports = ARRAY_SIZE(bfin_serial_resource);
+static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
 
 #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
 	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -743,14 +744,14 @@
 		}
 	}
 	if (uart->rts_pin >= 0) {
-		gpio_request(uart->rts_pin, DRIVER_NAME);
 		gpio_direction_output(uart->rts_pin, 0);
 	}
 #endif
 #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (request_irq(uart->status_irq,
+	if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
 		bfin_serial_mctrl_cts_int,
 		IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+		uart->cts_pin = -1;
 		pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
 	}
 
@@ -796,11 +797,9 @@
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
 	if (uart->cts_pin >= 0)
 		free_irq(gpio_to_irq(uart->cts_pin), uart);
-	if (uart->rts_pin >= 0)
-		gpio_free(uart->rts_pin);
 #endif
 #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (UART_GET_IER(uart) & EDSSI)
+	if (uart->cts_pin >= 0)
 		free_irq(uart->status_irq, uart);
 #endif
 }
@@ -962,33 +961,33 @@
  */
 static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
 {
-	int line = port->line;
+	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
 	unsigned short val;
 
 	switch (ld) {
 	case N_IRDA:
-		val = UART_GET_GCTL(&bfin_serial_ports[line]);
+		val = UART_GET_GCTL(uart);
 		val |= (IREN | RPOLC);
-		UART_PUT_GCTL(&bfin_serial_ports[line], val);
+		UART_PUT_GCTL(uart, val);
 		break;
 	default:
-		val = UART_GET_GCTL(&bfin_serial_ports[line]);
+		val = UART_GET_GCTL(uart);
 		val &= ~(IREN | RPOLC);
-		UART_PUT_GCTL(&bfin_serial_ports[line], val);
+		UART_PUT_GCTL(uart, val);
 	}
 }
 
 static void bfin_serial_reset_irda(struct uart_port *port)
 {
-	int line = port->line;
+	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
 	unsigned short val;
 
-	val = UART_GET_GCTL(&bfin_serial_ports[line]);
+	val = UART_GET_GCTL(uart);
 	val &= ~(IREN | RPOLC);
-	UART_PUT_GCTL(&bfin_serial_ports[line], val);
+	UART_PUT_GCTL(uart, val);
 	SSYNC();
 	val |= (IREN | RPOLC);
-	UART_PUT_GCTL(&bfin_serial_ports[line], val);
+	UART_PUT_GCTL(uart, val);
 	SSYNC();
 }
 
@@ -1070,85 +1069,6 @@
 #endif
 };
 
-static void __init bfin_serial_hw_init(void)
-{
-#ifdef CONFIG_SERIAL_BFIN_UART0
-	peripheral_request(P_UART0_TX, DRIVER_NAME);
-	peripheral_request(P_UART0_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART1
-	peripheral_request(P_UART1_TX, DRIVER_NAME);
-	peripheral_request(P_UART1_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART1_CTSRTS) && defined(CONFIG_BF54x)
-	peripheral_request(P_UART1_RTS, DRIVER_NAME);
-	peripheral_request(P_UART1_CTS, DRIVER_NAME);
-# endif
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART2
-	peripheral_request(P_UART2_TX, DRIVER_NAME);
-	peripheral_request(P_UART2_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART3
-	peripheral_request(P_UART3_TX, DRIVER_NAME);
-	peripheral_request(P_UART3_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART3_CTSRTS) && defined(CONFIG_BF54x)
-	peripheral_request(P_UART3_RTS, DRIVER_NAME);
-	peripheral_request(P_UART3_CTS, DRIVER_NAME);
-# endif
-#endif
-}
-
-static void __init bfin_serial_init_ports(void)
-{
-	static int first = 1;
-	int i;
-
-	if (!first)
-		return;
-	first = 0;
-
-	bfin_serial_hw_init();
-
-	for (i = 0; i < nr_active_ports; i++) {
-		spin_lock_init(&bfin_serial_ports[i].port.lock);
-		bfin_serial_ports[i].port.uartclk   = get_sclk();
-		bfin_serial_ports[i].port.fifosize  = BFIN_UART_TX_FIFO_SIZE;
-		bfin_serial_ports[i].port.ops       = &bfin_serial_pops;
-		bfin_serial_ports[i].port.line      = i;
-		bfin_serial_ports[i].port.iotype    = UPIO_MEM;
-		bfin_serial_ports[i].port.membase   =
-			(void __iomem *)bfin_serial_resource[i].uart_base_addr;
-		bfin_serial_ports[i].port.mapbase   =
-			bfin_serial_resource[i].uart_base_addr;
-		bfin_serial_ports[i].port.irq       =
-			bfin_serial_resource[i].uart_irq;
-		bfin_serial_ports[i].status_irq	    =
-			bfin_serial_resource[i].uart_status_irq;
-		bfin_serial_ports[i].port.flags     = UPF_BOOT_AUTOCONF;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-		bfin_serial_ports[i].tx_done	    = 1;
-		bfin_serial_ports[i].tx_count	    = 0;
-		bfin_serial_ports[i].tx_dma_channel =
-			bfin_serial_resource[i].uart_tx_dma_channel;
-		bfin_serial_ports[i].rx_dma_channel =
-			bfin_serial_resource[i].uart_rx_dma_channel;
-		init_timer(&(bfin_serial_ports[i].rx_dma_timer));
-#endif
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
-	defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
-		bfin_serial_ports[i].cts_pin	    =
-			bfin_serial_resource[i].uart_cts_pin;
-		bfin_serial_ports[i].rts_pin	    =
-			bfin_serial_resource[i].uart_rts_pin;
-#endif
-	}
-}
-
 #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
 /*
  * If the port was already initialised (eg, by a boot loader),
@@ -1196,6 +1116,34 @@
 
 static struct uart_driver bfin_serial_reg;
 
+static void bfin_serial_console_putchar(struct uart_port *port, int ch)
+{
+	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+	while (!(UART_GET_LSR(uart) & THRE))
+		barrier();
+	UART_PUT_CHAR(uart, ch);
+}
+
+#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
+		 defined (CONFIG_EARLY_PRINTK) */
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+#define CLASS_BFIN_CONSOLE	"bfin-console"
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct bfin_serial_port *uart = bfin_serial_ports[co->index];
+	unsigned long flags;
+
+	spin_lock_irqsave(&uart->port.lock, flags);
+	uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
+	spin_unlock_irqrestore(&uart->port.lock, flags);
+
+}
+
 static int __init
 bfin_serial_console_setup(struct console *co, char *options)
 {
@@ -1215,9 +1163,12 @@
 	 * if so, search for the first available port that does have
 	 * console support.
 	 */
-	if (co->index == -1 || co->index >= nr_active_ports)
-		co->index = 0;
-	uart = &bfin_serial_ports[co->index];
+	if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
+		return -ENODEV;
+
+	uart = bfin_serial_ports[co->index];
+	if (!uart)
+		return -ENODEV;
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1226,36 +1177,9 @@
 
 	return uart_set_options(&uart->port, co, baud, parity, bits, flow);
 }
-#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
-				 defined (CONFIG_EARLY_PRINTK) */
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
-static void bfin_serial_console_putchar(struct uart_port *port, int ch)
-{
-	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-	while (!(UART_GET_LSR(uart) & THRE))
-		barrier();
-	UART_PUT_CHAR(uart, ch);
-	SSYNC();
-}
-
-/*
- * Interrupts are disabled on entering
- */
-static void
-bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
-{
-	struct bfin_serial_port *uart = &bfin_serial_ports[co->index];
-	unsigned long flags;
-
-	spin_lock_irqsave(&uart->port.lock, flags);
-	uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
-	spin_unlock_irqrestore(&uart->port.lock, flags);
-
-}
 
 static struct console bfin_serial_console = {
-	.name		= BFIN_SERIAL_NAME,
+	.name		= BFIN_SERIAL_DEV_NAME,
 	.write		= bfin_serial_console_write,
 	.device		= uart_console_device,
 	.setup		= bfin_serial_console_setup,
@@ -1263,44 +1187,30 @@
 	.index		= -1,
 	.data		= &bfin_serial_reg,
 };
-
-static int __init bfin_serial_rs_console_init(void)
-{
-	bfin_serial_init_ports();
-	register_console(&bfin_serial_console);
-
-	return 0;
-}
-console_initcall(bfin_serial_rs_console_init);
-
 #define BFIN_SERIAL_CONSOLE	&bfin_serial_console
 #else
 #define BFIN_SERIAL_CONSOLE	NULL
 #endif /* CONFIG_SERIAL_BFIN_CONSOLE */
 
+#ifdef	CONFIG_EARLY_PRINTK
+static struct bfin_serial_port bfin_earlyprintk_port;
+#define CLASS_BFIN_EARLYPRINTK	"bfin-earlyprintk"
 
-#ifdef CONFIG_EARLY_PRINTK
-static __init void early_serial_putc(struct uart_port *port, int ch)
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
 {
-	unsigned timeout = 0xffff;
-	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+	unsigned long flags;
 
-	while ((!(UART_GET_LSR(uart) & THRE)) && --timeout)
-		cpu_relax();
-	UART_PUT_CHAR(uart, ch);
-}
+	if (bfin_earlyprintk_port.port.line != co->index)
+		return;
 
-static __init void early_serial_write(struct console *con, const char *s,
-					unsigned int n)
-{
-	struct bfin_serial_port *uart = &bfin_serial_ports[con->index];
-	unsigned int i;
-
-	for (i = 0; i < n; i++, s++) {
-		if (*s == '\n')
-			early_serial_putc(&uart->port, '\r');
-		early_serial_putc(&uart->port, *s);
-	}
+	spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
+	uart_console_write(&bfin_earlyprintk_port.port, s, count,
+		bfin_serial_console_putchar);
+	spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
 }
 
 /*
@@ -1311,18 +1221,326 @@
  */
 static struct __initdata console bfin_early_serial_console = {
 	.name = "early_BFuart",
-	.write = early_serial_write,
+	.write = bfin_earlyprintk_console_write,
 	.device = uart_console_device,
 	.flags = CON_PRINTBUFFER,
 	.index = -1,
 	.data  = &bfin_serial_reg,
 };
+#endif
+
+static struct uart_driver bfin_serial_reg = {
+	.owner			= THIS_MODULE,
+	.driver_name		= DRIVER_NAME,
+	.dev_name		= BFIN_SERIAL_DEV_NAME,
+	.major			= BFIN_SERIAL_MAJOR,
+	.minor			= BFIN_SERIAL_MINOR,
+	.nr			= BFIN_UART_NR_PORTS,
+	.cons			= BFIN_SERIAL_CONSOLE,
+};
+
+static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+	return uart_suspend_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_resume(struct platform_device *pdev)
+{
+	struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+	return uart_resume_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct bfin_serial_port *uart = NULL;
+	int ret = 0;
+
+	if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+		dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
+		return -ENOENT;
+	}
+
+	if (bfin_serial_ports[pdev->id] == NULL) {
+
+		uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+		if (!uart) {
+			dev_err(&pdev->dev,
+				"fail to malloc bfin_serial_port\n");
+			return -ENOMEM;
+		}
+		bfin_serial_ports[pdev->id] = uart;
+
+#ifdef CONFIG_EARLY_PRINTK
+		if (!(bfin_earlyprintk_port.port.membase
+			&& bfin_earlyprintk_port.port.line == pdev->id)) {
+			/*
+			 * If the peripheral PINs of current port is allocated
+			 * in earlyprintk probe stage, don't do it again.
+			 */
+#endif
+		ret = peripheral_request_list(
+			(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"fail to request bfin serial peripherals\n");
+			goto out_error_free_mem;
+		}
+#ifdef CONFIG_EARLY_PRINTK
+		}
+#endif
+
+		spin_lock_init(&uart->port.lock);
+		uart->port.uartclk   = get_sclk();
+		uart->port.fifosize  = BFIN_UART_TX_FIFO_SIZE;
+		uart->port.ops       = &bfin_serial_pops;
+		uart->port.line      = pdev->id;
+		uart->port.iotype    = UPIO_MEM;
+		uart->port.flags     = UPF_BOOT_AUTOCONF;
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (res == NULL) {
+			dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+			ret = -ENOENT;
+			goto out_error_free_peripherals;
+		}
+
+		uart->port.membase = ioremap(res->start,
+			res->end - res->start);
+		if (!uart->port.membase) {
+			dev_err(&pdev->dev, "Cannot map uart IO\n");
+			ret = -ENXIO;
+			goto out_error_free_peripherals;
+		}
+		uart->port.mapbase = res->start;
+
+		uart->port.irq = platform_get_irq(pdev, 0);
+		if (uart->port.irq < 0) {
+			dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+
+		uart->status_irq = platform_get_irq(pdev, 1);
+		if (uart->status_irq < 0) {
+			dev_err(&pdev->dev, "No uart status IRQ specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+		uart->tx_done	    = 1;
+		uart->tx_count	    = 0;
+
+		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+		if (res == NULL) {
+			dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+		uart->tx_dma_channel = res->start;
+
+		res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+		if (res == NULL) {
+			dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+		uart->rx_dma_channel = res->start;
+
+		init_timer(&(uart->rx_dma_timer));
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+	defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+		res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+		if (res == NULL)
+			uart->cts_pin = -1;
+		else
+			uart->cts_pin = res->start;
+
+		res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+		if (res == NULL)
+			uart->rts_pin = -1;
+		else
+			uart->rts_pin = res->start;
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
+		if (uart->rts_pin >= 0)
+			gpio_request(uart->rts_pin, DRIVER_NAME);
+# endif
+#endif
+	}
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+	if (!is_early_platform_device(pdev)) {
+#endif
+		uart = bfin_serial_ports[pdev->id];
+		uart->port.dev = &pdev->dev;
+		dev_set_drvdata(&pdev->dev, uart);
+		ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+	}
+#endif
+
+	if (!ret)
+		return 0;
+
+	if (uart) {
+out_error_unmap:
+		iounmap(uart->port.membase);
+out_error_free_peripherals:
+		peripheral_free_list(
+			(unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+		kfree(uart);
+		bfin_serial_ports[pdev->id] = NULL;
+	}
+
+	return ret;
+}
+
+static int __devexit bfin_serial_remove(struct platform_device *pdev)
+{
+	struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	if (uart) {
+		uart_remove_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+		if (uart->rts_pin >= 0)
+			gpio_free(uart->rts_pin);
+#endif
+		iounmap(uart->port.membase);
+		peripheral_free_list(
+			(unsigned short *)pdev->dev.platform_data);
+		kfree(uart);
+		bfin_serial_ports[pdev->id] = NULL;
+	}
+
+	return 0;
+}
+
+static struct platform_driver bfin_serial_driver = {
+	.probe		= bfin_serial_probe,
+	.remove		= __devexit_p(bfin_serial_remove),
+	.suspend	= bfin_serial_suspend,
+	.resume		= bfin_serial_resume,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
+static __initdata struct early_platform_driver early_bfin_serial_driver = {
+	.class_str = CLASS_BFIN_CONSOLE,
+	.pdrv = &bfin_serial_driver,
+	.requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init bfin_serial_rs_console_init(void)
+{
+	early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
+
+	early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
+
+	register_console(&bfin_serial_console);
+
+	return 0;
+}
+console_initcall(bfin_serial_rs_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Memory can't be allocated dynamically during earlyprink init stage.
+ * So, do individual probe for earlyprink with a static uart port variable.
+ */
+static int bfin_earlyprintk_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+
+	if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+		dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
+		return -ENOENT;
+	}
+
+	ret = peripheral_request_list(
+		(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+	if (ret) {
+		dev_err(&pdev->dev,
+				"fail to request bfin serial peripherals\n");
+			return ret;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+		ret = -ENOENT;
+		goto out_error_free_peripherals;
+	}
+
+	bfin_earlyprintk_port.port.membase = ioremap(res->start,
+			res->end - res->start);
+	if (!bfin_earlyprintk_port.port.membase) {
+		dev_err(&pdev->dev, "Cannot map uart IO\n");
+		ret = -ENXIO;
+		goto out_error_free_peripherals;
+	}
+	bfin_earlyprintk_port.port.mapbase = res->start;
+	bfin_earlyprintk_port.port.line = pdev->id;
+	bfin_earlyprintk_port.port.uartclk = get_sclk();
+	bfin_earlyprintk_port.port.fifosize  = BFIN_UART_TX_FIFO_SIZE;
+	spin_lock_init(&bfin_earlyprintk_port.port.lock);
+
+	return 0;
+
+out_error_free_peripherals:
+	peripheral_free_list(
+		(unsigned short *)pdev->dev.platform_data);
+
+	return ret;
+}
+
+static struct platform_driver bfin_earlyprintk_driver = {
+	.probe		= bfin_earlyprintk_probe,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
+	.class_str = CLASS_BFIN_EARLYPRINTK,
+	.pdrv = &bfin_earlyprintk_driver,
+	.requested_id = EARLY_PLATFORM_ID_UNSET,
+};
 
 struct console __init *bfin_earlyserial_init(unsigned int port,
 						unsigned int cflag)
 {
-	struct bfin_serial_port *uart;
 	struct ktermios t;
+	char port_name[20];
+
+	if (port < 0 || port >= BFIN_UART_NR_PORTS)
+		return NULL;
+
+	/*
+	 * Only probe resource of the given port in earlyprintk boot arg.
+	 * The expected port id should be indicated in port name string.
+	 */
+	snprintf(port_name, 20, DRIVER_NAME ".%d", port);
+	early_platform_driver_register(&early_bfin_earlyprintk_driver,
+		port_name);
+	early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
+
+	if (!bfin_earlyprintk_port.port.membase)
+		return NULL;
 
 #ifdef CONFIG_SERIAL_BFIN_CONSOLE
 	/*
@@ -1332,124 +1550,36 @@
 	bfin_serial_console.flags &= ~CON_PRINTBUFFER;
 #endif
 
-	if (port == -1 || port >= nr_active_ports)
-		port = 0;
-	bfin_serial_init_ports();
 	bfin_early_serial_console.index = port;
-	uart = &bfin_serial_ports[port];
 	t.c_cflag = cflag;
 	t.c_iflag = 0;
 	t.c_oflag = 0;
 	t.c_lflag = ICANON;
 	t.c_line = port;
-	bfin_serial_set_termios(&uart->port, &t, &t);
+	bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
+
 	return &bfin_early_serial_console;
 }
-
 #endif /* CONFIG_EARLY_PRINTK */
 
-static struct uart_driver bfin_serial_reg = {
-	.owner			= THIS_MODULE,
-	.driver_name		= "bfin-uart",
-	.dev_name		= BFIN_SERIAL_NAME,
-	.major			= BFIN_SERIAL_MAJOR,
-	.minor			= BFIN_SERIAL_MINOR,
-	.nr			= BFIN_UART_NR_PORTS,
-	.cons			= BFIN_SERIAL_CONSOLE,
-};
-
-static int bfin_serial_suspend(struct platform_device *dev, pm_message_t state)
-{
-	int i;
-
-	for (i = 0; i < nr_active_ports; i++) {
-		if (bfin_serial_ports[i].port.dev != &dev->dev)
-			continue;
-		uart_suspend_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-	}
-
-	return 0;
-}
-
-static int bfin_serial_resume(struct platform_device *dev)
-{
-	int i;
-
-	for (i = 0; i < nr_active_ports; i++) {
-		if (bfin_serial_ports[i].port.dev != &dev->dev)
-			continue;
-		uart_resume_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-	}
-
-	return 0;
-}
-
-static int bfin_serial_probe(struct platform_device *dev)
-{
-	struct resource *res = dev->resource;
-	int i;
-
-	for (i = 0; i < dev->num_resources; i++, res++)
-		if (res->flags & IORESOURCE_MEM)
-			break;
-
-	if (i < dev->num_resources) {
-		for (i = 0; i < nr_active_ports; i++, res++) {
-			if (bfin_serial_ports[i].port.mapbase != res->start)
-				continue;
-			bfin_serial_ports[i].port.dev = &dev->dev;
-			uart_add_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-		}
-	}
-
-	return 0;
-}
-
-static int bfin_serial_remove(struct platform_device *dev)
-{
-	int i;
-
-	for (i = 0; i < nr_active_ports; i++) {
-		if (bfin_serial_ports[i].port.dev != &dev->dev)
-			continue;
-		uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-		bfin_serial_ports[i].port.dev = NULL;
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
-		gpio_free(bfin_serial_ports[i].cts_pin);
-		gpio_free(bfin_serial_ports[i].rts_pin);
-#endif
-	}
-
-	return 0;
-}
-
-static struct platform_driver bfin_serial_driver = {
-	.probe		= bfin_serial_probe,
-	.remove		= bfin_serial_remove,
-	.suspend	= bfin_serial_suspend,
-	.resume		= bfin_serial_resume,
-	.driver		= {
-		.name	= "bfin-uart",
-		.owner	= THIS_MODULE,
-	},
-};
-
 static int __init bfin_serial_init(void)
 {
 	int ret;
 
-	pr_info("Serial: Blackfin serial driver\n");
-
-	bfin_serial_init_ports();
+	pr_info("Blackfin serial driver\n");
 
 	ret = uart_register_driver(&bfin_serial_reg);
-	if (ret == 0) {
-		ret = platform_driver_register(&bfin_serial_driver);
-		if (ret) {
-			pr_debug("uart register failed\n");
-			uart_unregister_driver(&bfin_serial_reg);
-		}
+	if (ret) {
+		pr_err("failed to register %s:%d\n",
+			bfin_serial_reg.driver_name, ret);
 	}
+
+	ret = platform_driver_register(&bfin_serial_driver);
+	if (ret) {
+		pr_err("fail to register bfin uart\n");
+		uart_unregister_driver(&bfin_serial_reg);
+	}
+
 	return ret;
 }
 
@@ -1463,7 +1593,7 @@
 module_init(bfin_serial_init);
 module_exit(bfin_serial_exit);
 
-MODULE_AUTHOR("Aubrey.Li <aubrey.li@analog.com>");
+MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
 MODULE_DESCRIPTION("Blackfin generic serial port driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/serial/cpm_uart/cpm_uart.h
index 7274b52..b754dcf 100644
--- a/drivers/serial/cpm_uart/cpm_uart.h
+++ b/drivers/serial/cpm_uart/cpm_uart.h
@@ -76,18 +76,12 @@
 	unsigned char		*tx_buf;
 	unsigned char		*rx_buf;
 	u32			flags;
-	void			(*set_lineif)(struct uart_cpm_port *);
 	struct clk		*clk;
 	u8			brg;
 	uint			 dp_addr;
 	void			*mem_addr;
 	dma_addr_t		 dma_addr;
 	u32			mem_size;
-	/* helpers */
-	int			 baud;
-	int			 bits;
-	/* Keep track of 'odd' SMC2 wirings */
-	int			is_portb;
 	/* wait on close if needed */
 	int			wait_closing;
 	/* value to combine with opcode to form cpm command */
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index f2b8adc..8692ff9 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -72,6 +72,8 @@
 
 /**************************************************************/
 
+#define HW_BUF_SPD_THRESHOLD    9600
+
 /*
  * Check, if transmit buffers are processed
 */
@@ -503,6 +505,11 @@
 	pr_debug("CPM uart[%d]:set_termios\n", port->line);
 
 	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+	if (baud <= HW_BUF_SPD_THRESHOLD ||
+	    (pinfo->port.state && pinfo->port.state->port.tty->low_latency))
+		pinfo->rx_fifosize = 1;
+	else
+		pinfo->rx_fifosize = RX_BUF_SIZE;
 
 	/* Character length programmed into the mode register is the
 	 * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
@@ -594,6 +601,17 @@
 	 */
 	bits++;
 	if (IS_SMC(pinfo)) {
+		/*
+		 * MRBLR can be changed while an SMC/SCC is operating only
+		 * if it is done in a single bus cycle with one 16-bit move
+		 * (not two 8-bit bus cycles back-to-back). This occurs when
+		 * the cp shifts control to the next RxBD, so the change does
+		 * not take effect immediately. To guarantee the exact RxBD
+		 * on which the change occurs, change MRBLR only while the
+		 * SMC/SCC receiver is disabled.
+		 */
+		out_be16(&pinfo->smcup->smc_mrblr, pinfo->rx_fifosize);
+
 		/* Set the mode register.  We want to keep a copy of the
 		 * enables, because we want to put them back if they were
 		 * present.
@@ -604,6 +622,7 @@
 		out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval |
 		    SMCMR_SM_UART | prev_mode);
 	} else {
+		out_be16(&pinfo->sccup->scc_genscc.scc_mrblr, pinfo->rx_fifosize);
 		out_be16(&sccp->scc_psmr, (sbits << 12) | scval);
 	}
 
diff --git a/drivers/serial/ifx6x60.c b/drivers/serial/ifx6x60.c
new file mode 100644
index 0000000..ab93763
--- /dev/null
+++ b/drivers/serial/ifx6x60.c
@@ -0,0 +1,1406 @@
+/****************************************************************************
+ *
+ * Driver for the IFX 6x60 spi modem.
+ *
+ * Copyright (C) 2008 Option International
+ * Copyright (C) 2008 Filip Aben <f.aben@option.com>
+ *		      Denis Joseph Barrow <d.barow@option.com>
+ *		      Jan Dumon <j.dumon@option.com>
+ *
+ * Copyright (C) 2009, 2010 Intel Corp
+ * Russ Gorby <richardx.r.gorby@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA
+ *
+ * Driver modified by Intel from Option gtm501l_spi.c
+ *
+ * Notes
+ * o	The driver currently assumes a single device only. If you need to
+ *	change this then look for saved_ifx_dev and add a device lookup
+ * o	The driver is intended to be big-endian safe but has never been
+ *	tested that way (no suitable hardware). There are a couple of FIXME
+ *	notes by areas that may need addressing
+ * o	Some of the GPIO naming/setup assumptions may need revisiting if
+ *	you need to use this driver for another platform.
+ *
+ *****************************************************************************/
+#include <linux/module.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/tty.h>
+#include <linux/kfifo.h>
+#include <linux/tty_flip.h>
+#include <linux/timer.h>
+#include <linux/serial.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/rfkill.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/dmapool.h>
+#include <linux/gpio.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/ifx_modem.h>
+#include <linux/delay.h>
+
+#include "ifx6x60.h"
+
+#define IFX_SPI_MORE_MASK		0x10
+#define IFX_SPI_MORE_BIT		12	/* bit position in u16 */
+#define IFX_SPI_CTS_BIT			13	/* bit position in u16 */
+#define IFX_SPI_TTY_ID			0
+#define IFX_SPI_TIMEOUT_SEC		2
+#define IFX_SPI_HEADER_0		(-1)
+#define IFX_SPI_HEADER_F		(-2)
+
+/* forward reference */
+static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev);
+
+/* local variables */
+static int spi_b16 = 1;			/* 8 or 16 bit word length */
+static struct tty_driver *tty_drv;
+static struct ifx_spi_device *saved_ifx_dev;
+static struct lock_class_key ifx_spi_key;
+
+/* GPIO/GPE settings */
+
+/**
+ *	mrdy_set_high		-	set MRDY GPIO
+ *	@ifx: device we are controlling
+ *
+ */
+static inline void mrdy_set_high(struct ifx_spi_device *ifx)
+{
+	gpio_set_value(ifx->gpio.mrdy, 1);
+}
+
+/**
+ *	mrdy_set_low		-	clear MRDY GPIO
+ *	@ifx: device we are controlling
+ *
+ */
+static inline void mrdy_set_low(struct ifx_spi_device *ifx)
+{
+	gpio_set_value(ifx->gpio.mrdy, 0);
+}
+
+/**
+ *	ifx_spi_power_state_set
+ *	@ifx_dev: our SPI device
+ *	@val: bits to set
+ *
+ *	Set bit in power status and signal power system if status becomes non-0
+ */
+static void
+ifx_spi_power_state_set(struct ifx_spi_device *ifx_dev, unsigned char val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ifx_dev->power_lock, flags);
+
+	/*
+	 * if power status is already non-0, just update, else
+	 * tell power system
+	 */
+	if (!ifx_dev->power_status)
+		pm_runtime_get(&ifx_dev->spi_dev->dev);
+	ifx_dev->power_status |= val;
+
+	spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
+}
+
+/**
+ *	ifx_spi_power_state_clear	-	clear power bit
+ *	@ifx_dev: our SPI device
+ *	@val: bits to clear
+ *
+ *	clear bit in power status and signal power system if status becomes 0
+ */
+static void
+ifx_spi_power_state_clear(struct ifx_spi_device *ifx_dev, unsigned char val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ifx_dev->power_lock, flags);
+
+	if (ifx_dev->power_status) {
+		ifx_dev->power_status &= ~val;
+		if (!ifx_dev->power_status)
+			pm_runtime_put(&ifx_dev->spi_dev->dev);
+	}
+
+	spin_unlock_irqrestore(&ifx_dev->power_lock, flags);
+}
+
+/**
+ *	swap_buf
+ *	@buf: our buffer
+ *	@len : number of bytes (not words) in the buffer
+ *	@end: end of buffer
+ *
+ *	Swap the contents of a buffer into big endian format
+ */
+static inline void swap_buf(u16 *buf, int len, void *end)
+{
+	int n;
+
+	len = ((len + 1) >> 1);
+	if ((void *)&buf[len] > end) {
+		pr_err("swap_buf: swap exceeds boundary (%p > %p)!",
+		       &buf[len], end);
+		return;
+	}
+	for (n = 0; n < len; n++) {
+		*buf = cpu_to_be16(*buf);
+		buf++;
+	}
+}
+
+/**
+ *	mrdy_assert		-	assert MRDY line
+ *	@ifx_dev: our SPI device
+ *
+ *	Assert mrdy and set timer to wait for SRDY interrupt, if SRDY is low
+ *	now.
+ *
+ *	FIXME: Can SRDY even go high as we are running this code ?
+ */
+static void mrdy_assert(struct ifx_spi_device *ifx_dev)
+{
+	int val = gpio_get_value(ifx_dev->gpio.srdy);
+	if (!val) {
+		if (!test_and_set_bit(IFX_SPI_STATE_TIMER_PENDING,
+				      &ifx_dev->flags)) {
+			ifx_dev->spi_timer.expires =
+				jiffies + IFX_SPI_TIMEOUT_SEC*HZ;
+			add_timer(&ifx_dev->spi_timer);
+
+		}
+	}
+	ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_DATA_PENDING);
+	mrdy_set_high(ifx_dev);
+}
+
+/**
+ *	ifx_spi_hangup		-	hang up an IFX device
+ *	@ifx_dev: our SPI device
+ *
+ *	Hang up the tty attached to the IFX device if one is currently
+ *	open. If not take no action
+ */
+static void ifx_spi_ttyhangup(struct ifx_spi_device *ifx_dev)
+{
+	struct tty_port *pport = &ifx_dev->tty_port;
+	struct tty_struct *tty = tty_port_tty_get(pport);
+	if (tty) {
+		tty_hangup(tty);
+		tty_kref_put(tty);
+	}
+}
+
+/**
+ *	ifx_spi_timeout		-	SPI timeout
+ *	@arg: our SPI device
+ *
+ *	The SPI has timed out: hang up the tty. Users will then see a hangup
+ *	and error events.
+ */
+static void ifx_spi_timeout(unsigned long arg)
+{
+	struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+
+	dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
+	ifx_spi_ttyhangup(ifx_dev);
+	mrdy_set_low(ifx_dev);
+	clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+}
+
+/* char/tty operations */
+
+/**
+ *	ifx_spi_tiocmget	-	get modem lines
+ *	@tty: our tty device
+ *	@filp: file handle issuing the request
+ *
+ *	Map the signal state into Linux modem flags and report the value
+ *	in Linux terms
+ */
+static int ifx_spi_tiocmget(struct tty_struct *tty, struct file *filp)
+{
+	unsigned int value;
+	struct ifx_spi_device *ifx_dev = tty->driver_data;
+
+	value =
+	(test_bit(IFX_SPI_RTS, &ifx_dev->signal_state) ? TIOCM_RTS : 0) |
+	(test_bit(IFX_SPI_DTR, &ifx_dev->signal_state) ? TIOCM_DTR : 0) |
+	(test_bit(IFX_SPI_CTS, &ifx_dev->signal_state) ? TIOCM_CTS : 0) |
+	(test_bit(IFX_SPI_DSR, &ifx_dev->signal_state) ? TIOCM_DSR : 0) |
+	(test_bit(IFX_SPI_DCD, &ifx_dev->signal_state) ? TIOCM_CAR : 0) |
+	(test_bit(IFX_SPI_RI, &ifx_dev->signal_state) ? TIOCM_RNG : 0);
+	return value;
+}
+
+/**
+ *	ifx_spi_tiocmset	-	set modem bits
+ *	@tty: the tty structure
+ *	@filp: file handle issuing the request
+ *	@set: bits to set
+ *	@clear: bits to clear
+ *
+ *	The IFX6x60 only supports DTR and RTS. Set them accordingly
+ *	and flag that an update to the modem is needed.
+ *
+ *	FIXME: do we need to kick the tranfers when we do this ?
+ */
+static int ifx_spi_tiocmset(struct tty_struct *tty, struct file *filp,
+			    unsigned int set, unsigned int clear)
+{
+	struct ifx_spi_device *ifx_dev = tty->driver_data;
+
+	if (set & TIOCM_RTS)
+		set_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
+	if (set & TIOCM_DTR)
+		set_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
+	if (clear & TIOCM_RTS)
+		clear_bit(IFX_SPI_RTS, &ifx_dev->signal_state);
+	if (clear & TIOCM_DTR)
+		clear_bit(IFX_SPI_DTR, &ifx_dev->signal_state);
+
+	set_bit(IFX_SPI_UPDATE, &ifx_dev->signal_state);
+	return 0;
+}
+
+/**
+ *	ifx_spi_open	-	called on tty open
+ *	@tty: our tty device
+ *	@filp: file handle being associated with the tty
+ *
+ *	Open the tty interface. We let the tty_port layer do all the work
+ *	for us.
+ *
+ *	FIXME: Remove single device assumption and saved_ifx_dev
+ */
+static int ifx_spi_open(struct tty_struct *tty, struct file *filp)
+{
+	return tty_port_open(&saved_ifx_dev->tty_port, tty, filp);
+}
+
+/**
+ *	ifx_spi_close	-	called when our tty closes
+ *	@tty: the tty being closed
+ *	@filp: the file handle being closed
+ *
+ *	Perform the close of the tty. We use the tty_port layer to do all
+ *	our hard work.
+ */
+static void ifx_spi_close(struct tty_struct *tty, struct file *filp)
+{
+	struct ifx_spi_device *ifx_dev = tty->driver_data;
+	tty_port_close(&ifx_dev->tty_port, tty, filp);
+	/* FIXME: should we do an ifx_spi_reset here ? */
+}
+
+/**
+ *	ifx_decode_spi_header	-	decode received header
+ *	@buffer: the received data
+ *	@length: decoded length
+ *	@more: decoded more flag
+ *	@received_cts: status of cts we received
+ *
+ *	Note how received_cts is handled -- if header is all F it is left
+ *	the same as it was, if header is all 0 it is set to 0 otherwise it is
+ *	taken from the incoming header.
+ *
+ *	FIXME: endianness
+ */
+static int ifx_spi_decode_spi_header(unsigned char *buffer, int *length,
+			unsigned char *more, unsigned char *received_cts)
+{
+	u16 h1;
+	u16 h2;
+	u16 *in_buffer = (u16 *)buffer;
+
+	h1 = *in_buffer;
+	h2 = *(in_buffer+1);
+
+	if (h1 == 0 && h2 == 0) {
+		*received_cts = 0;
+		return IFX_SPI_HEADER_0;
+	} else if (h1 == 0xffff && h2 == 0xffff) {
+		/* spi_slave_cts remains as it was */
+		return IFX_SPI_HEADER_F;
+	}
+
+	*length = h1 & 0xfff;	/* upper bits of byte are flags */
+	*more = (buffer[1] >> IFX_SPI_MORE_BIT) & 1;
+	*received_cts = (buffer[3] >> IFX_SPI_CTS_BIT) & 1;
+	return 0;
+}
+
+/**
+ *	ifx_setup_spi_header	-	set header fields
+ *	@txbuffer: pointer to start of SPI buffer
+ *	@tx_count: bytes
+ *	@more: indicate if more to follow
+ *
+ *	Format up an SPI header for a transfer
+ *
+ *	FIXME: endianness?
+ */
+static void ifx_spi_setup_spi_header(unsigned char *txbuffer, int tx_count,
+					unsigned char more)
+{
+	*(u16 *)(txbuffer) = tx_count;
+	*(u16 *)(txbuffer+2) = IFX_SPI_PAYLOAD_SIZE;
+	txbuffer[1] |= (more << IFX_SPI_MORE_BIT) & IFX_SPI_MORE_MASK;
+}
+
+/**
+ *	ifx_spi_wakeup_serial	-	SPI space made
+ *	@port_data: our SPI device
+ *
+ *	We have emptied the FIFO enough that we want to get more data
+ *	queued into it. Poke the line discipline via tty_wakeup so that
+ *	it will feed us more bits
+ */
+static void ifx_spi_wakeup_serial(struct ifx_spi_device *ifx_dev)
+{
+	struct tty_struct *tty;
+
+	tty = tty_port_tty_get(&ifx_dev->tty_port);
+	if (!tty)
+		return;
+	tty_wakeup(tty);
+	tty_kref_put(tty);
+}
+
+/**
+ *	ifx_spi_prepare_tx_buffer	-	prepare transmit frame
+ *	@ifx_dev: our SPI device
+ *
+ *	The transmit buffr needs a header and various other bits of
+ *	information followed by as much data as we can pull from the FIFO
+ *	and transfer. This function formats up a suitable buffer in the
+ *	ifx_dev->tx_buffer
+ *
+ *	FIXME: performance - should we wake the tty when the queue is half
+ *			     empty ?
+ */
+static int ifx_spi_prepare_tx_buffer(struct ifx_spi_device *ifx_dev)
+{
+	int temp_count;
+	int queue_length;
+	int tx_count;
+	unsigned char *tx_buffer;
+
+	tx_buffer = ifx_dev->tx_buffer;
+	memset(tx_buffer, 0, IFX_SPI_TRANSFER_SIZE);
+
+	/* make room for required SPI header */
+	tx_buffer += IFX_SPI_HEADER_OVERHEAD;
+	tx_count = IFX_SPI_HEADER_OVERHEAD;
+
+	/* clear to signal no more data if this turns out to be the
+	 * last buffer sent in a sequence */
+	ifx_dev->spi_more = 0;
+
+	/* if modem cts is set, just send empty buffer */
+	if (!ifx_dev->spi_slave_cts) {
+		/* see if there's tx data */
+		queue_length = kfifo_len(&ifx_dev->tx_fifo);
+		if (queue_length != 0) {
+			/* data to mux -- see if there's room for it */
+			temp_count = min(queue_length, IFX_SPI_PAYLOAD_SIZE);
+			temp_count = kfifo_out_locked(&ifx_dev->tx_fifo,
+					tx_buffer, temp_count,
+					&ifx_dev->fifo_lock);
+
+			/* update buffer pointer and data count in message */
+			tx_buffer += temp_count;
+			tx_count += temp_count;
+			if (temp_count == queue_length)
+				/* poke port to get more data */
+				ifx_spi_wakeup_serial(ifx_dev);
+			else /* more data in port, use next SPI message */
+				ifx_dev->spi_more = 1;
+		}
+	}
+	/* have data and info for header -- set up SPI header in buffer */
+	/* spi header needs payload size, not entire buffer size */
+	ifx_spi_setup_spi_header(ifx_dev->tx_buffer,
+					tx_count-IFX_SPI_HEADER_OVERHEAD,
+					ifx_dev->spi_more);
+	/* swap actual data in the buffer */
+	swap_buf((u16 *)(ifx_dev->tx_buffer), tx_count,
+		&ifx_dev->tx_buffer[IFX_SPI_TRANSFER_SIZE]);
+	return tx_count;
+}
+
+/**
+ *	ifx_spi_write		-	line discipline write
+ *	@tty: our tty device
+ *	@buf: pointer to buffer to write (kernel space)
+ *	@count: size of buffer
+ *
+ *	Write the characters we have been given into the FIFO. If the device
+ *	is not active then activate it, when the SRDY line is asserted back
+ *	this will commence I/O
+ */
+static int ifx_spi_write(struct tty_struct *tty, const unsigned char *buf,
+			 int count)
+{
+	struct ifx_spi_device *ifx_dev = tty->driver_data;
+	unsigned char *tmp_buf = (unsigned char *)buf;
+	int tx_count = kfifo_in_locked(&ifx_dev->tx_fifo, tmp_buf, count,
+				   &ifx_dev->fifo_lock);
+	mrdy_assert(ifx_dev);
+	return tx_count;
+}
+
+/**
+ *	ifx_spi_chars_in_buffer	-	line discipline helper
+ *	@tty: our tty device
+ *
+ *	Report how much data we can accept before we drop bytes. As we use
+ *	a simple FIFO this is nice and easy.
+ */
+static int ifx_spi_write_room(struct tty_struct *tty)
+{
+	struct ifx_spi_device *ifx_dev = tty->driver_data;
+	return IFX_SPI_FIFO_SIZE - kfifo_len(&ifx_dev->tx_fifo);
+}
+
+/**
+ *	ifx_spi_chars_in_buffer	-	line discipline helper
+ *	@tty: our tty device
+ *
+ *	Report how many characters we have buffered. In our case this is the
+ *	number of bytes sitting in our transmit FIFO.
+ */
+static int ifx_spi_chars_in_buffer(struct tty_struct *tty)
+{
+	struct ifx_spi_device *ifx_dev = tty->driver_data;
+	return kfifo_len(&ifx_dev->tx_fifo);
+}
+
+/**
+ *	ifx_port_hangup
+ *	@port: our tty port
+ *
+ *	tty port hang up. Called when tty_hangup processing is invoked either
+ *	by loss of carrier, or by software (eg vhangup). Serialized against
+ *	activate/shutdown by the tty layer.
+ */
+static void ifx_spi_hangup(struct tty_struct *tty)
+{
+	struct ifx_spi_device *ifx_dev = tty->driver_data;
+	tty_port_hangup(&ifx_dev->tty_port);
+}
+
+/**
+ *	ifx_port_activate
+ *	@port: our tty port
+ *
+ *	tty port activate method - called for first open. Serialized
+ *	with hangup and shutdown by the tty layer.
+ */
+static int ifx_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+	struct ifx_spi_device *ifx_dev =
+		container_of(port, struct ifx_spi_device, tty_port);
+
+	/* clear any old data; can't do this in 'close' */
+	kfifo_reset(&ifx_dev->tx_fifo);
+
+	/* put port data into this tty */
+	tty->driver_data = ifx_dev;
+
+	/* allows flip string push from int context */
+	tty->low_latency = 1;
+
+	return 0;
+}
+
+/**
+ *	ifx_port_shutdown
+ *	@port: our tty port
+ *
+ *	tty port shutdown method - called for last port close. Serialized
+ *	with hangup and activate by the tty layer.
+ */
+static void ifx_port_shutdown(struct tty_port *port)
+{
+	struct ifx_spi_device *ifx_dev =
+		container_of(port, struct ifx_spi_device, tty_port);
+
+	mrdy_set_low(ifx_dev);
+	clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+	tasklet_kill(&ifx_dev->io_work_tasklet);
+}
+
+static const struct tty_port_operations ifx_tty_port_ops = {
+	.activate = ifx_port_activate,
+	.shutdown = ifx_port_shutdown,
+};
+
+static const struct tty_operations ifx_spi_serial_ops = {
+	.open = ifx_spi_open,
+	.close = ifx_spi_close,
+	.write = ifx_spi_write,
+	.hangup = ifx_spi_hangup,
+	.write_room = ifx_spi_write_room,
+	.chars_in_buffer = ifx_spi_chars_in_buffer,
+	.tiocmget = ifx_spi_tiocmget,
+	.tiocmset = ifx_spi_tiocmset,
+};
+
+/**
+ *	ifx_spi_insert_fip_string	-	queue received data
+ *	@ifx_ser: our SPI device
+ *	@chars: buffer we have received
+ *	@size: number of chars reeived
+ *
+ *	Queue bytes to the tty assuming the tty side is currently open. If
+ *	not the discard the data.
+ */
+static void ifx_spi_insert_flip_string(struct ifx_spi_device *ifx_dev,
+				    unsigned char *chars, size_t size)
+{
+	struct tty_struct *tty = tty_port_tty_get(&ifx_dev->tty_port);
+	if (!tty)
+		return;
+	tty_insert_flip_string(tty, chars, size);
+	tty_flip_buffer_push(tty);
+	tty_kref_put(tty);
+}
+
+/**
+ *	ifx_spi_complete	-	SPI transfer completed
+ *	@ctx: our SPI device
+ *
+ *	An SPI transfer has completed. Process any received data and kick off
+ *	any further transmits we can commence.
+ */
+static void ifx_spi_complete(void *ctx)
+{
+	struct ifx_spi_device *ifx_dev = ctx;
+	struct tty_struct *tty;
+	struct tty_ldisc *ldisc = NULL;
+	int length;
+	int actual_length;
+	unsigned char more;
+	unsigned char cts;
+	int local_write_pending = 0;
+	int queue_length;
+	int srdy;
+	int decode_result;
+
+	mrdy_set_low(ifx_dev);
+
+	if (!ifx_dev->spi_msg.status) {
+		/* check header validity, get comm flags */
+		swap_buf((u16 *)ifx_dev->rx_buffer, IFX_SPI_HEADER_OVERHEAD,
+			&ifx_dev->rx_buffer[IFX_SPI_HEADER_OVERHEAD]);
+		decode_result = ifx_spi_decode_spi_header(ifx_dev->rx_buffer,
+				&length, &more, &cts);
+		if (decode_result == IFX_SPI_HEADER_0) {
+			dev_dbg(&ifx_dev->spi_dev->dev,
+				"ignore input: invalid header 0");
+			ifx_dev->spi_slave_cts = 0;
+			goto complete_exit;
+		} else if (decode_result == IFX_SPI_HEADER_F) {
+			dev_dbg(&ifx_dev->spi_dev->dev,
+				"ignore input: invalid header F");
+			goto complete_exit;
+		}
+
+		ifx_dev->spi_slave_cts = cts;
+
+		actual_length = min((unsigned int)length,
+					ifx_dev->spi_msg.actual_length);
+		swap_buf((u16 *)(ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD),
+			 actual_length,
+			 &ifx_dev->rx_buffer[IFX_SPI_TRANSFER_SIZE]);
+		ifx_spi_insert_flip_string(
+			ifx_dev,
+			ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD,
+			(size_t)actual_length);
+	} else {
+		dev_dbg(&ifx_dev->spi_dev->dev, "SPI transfer error %d",
+		       ifx_dev->spi_msg.status);
+	}
+
+complete_exit:
+	if (ifx_dev->write_pending) {
+		ifx_dev->write_pending = 0;
+		local_write_pending = 1;
+	}
+
+	clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &(ifx_dev->flags));
+
+	queue_length = kfifo_len(&ifx_dev->tx_fifo);
+	srdy = gpio_get_value(ifx_dev->gpio.srdy);
+	if (!srdy)
+		ifx_spi_power_state_clear(ifx_dev, IFX_SPI_POWER_SRDY);
+
+	/* schedule output if there is more to do */
+	if (test_and_clear_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags))
+		tasklet_schedule(&ifx_dev->io_work_tasklet);
+	else {
+		if (more || ifx_dev->spi_more || queue_length > 0 ||
+			local_write_pending) {
+			if (ifx_dev->spi_slave_cts) {
+				if (more)
+					mrdy_assert(ifx_dev);
+			} else
+				mrdy_assert(ifx_dev);
+		} else {
+			/*
+			 * poke line discipline driver if any for more data
+			 * may or may not get more data to write
+			 * for now, say not busy
+			 */
+			ifx_spi_power_state_clear(ifx_dev,
+						  IFX_SPI_POWER_DATA_PENDING);
+			tty = tty_port_tty_get(&ifx_dev->tty_port);
+			if (tty) {
+				ldisc = tty_ldisc_ref(tty);
+				if (ldisc) {
+					ldisc->ops->write_wakeup(tty);
+					tty_ldisc_deref(ldisc);
+				}
+				tty_kref_put(tty);
+			}
+		}
+	}
+}
+
+/**
+ *	ifx_spio_io		-	I/O tasklet
+ *	@data: our SPI device
+ *
+ *	Queue data for transmission if possible and then kick off the
+ *	transfer.
+ */
+static void ifx_spi_io(unsigned long data)
+{
+	int retval;
+	struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *) data;
+
+	if (!test_and_set_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags)) {
+		if (ifx_dev->gpio.unack_srdy_int_nb > 0)
+			ifx_dev->gpio.unack_srdy_int_nb--;
+
+		ifx_spi_prepare_tx_buffer(ifx_dev);
+
+		spi_message_init(&ifx_dev->spi_msg);
+		INIT_LIST_HEAD(&ifx_dev->spi_msg.queue);
+
+		ifx_dev->spi_msg.context = ifx_dev;
+		ifx_dev->spi_msg.complete = ifx_spi_complete;
+
+		/* set up our spi transfer */
+		/* note len is BYTES, not transfers */
+		ifx_dev->spi_xfer.len = IFX_SPI_TRANSFER_SIZE;
+		ifx_dev->spi_xfer.cs_change = 0;
+		ifx_dev->spi_xfer.speed_hz = 12500000;
+		/* ifx_dev->spi_xfer.speed_hz = 390625; */
+		ifx_dev->spi_xfer.bits_per_word = spi_b16 ? 16 : 8;
+
+		ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
+		ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
+
+		/*
+		 * setup dma pointers
+		 */
+		if (ifx_dev->is_6160) {
+			ifx_dev->spi_msg.is_dma_mapped = 1;
+			ifx_dev->tx_dma = ifx_dev->tx_bus;
+			ifx_dev->rx_dma = ifx_dev->rx_bus;
+			ifx_dev->spi_xfer.tx_dma = ifx_dev->tx_dma;
+			ifx_dev->spi_xfer.rx_dma = ifx_dev->rx_dma;
+		} else {
+			ifx_dev->spi_msg.is_dma_mapped = 0;
+			ifx_dev->tx_dma = (dma_addr_t)0;
+			ifx_dev->rx_dma = (dma_addr_t)0;
+			ifx_dev->spi_xfer.tx_dma = (dma_addr_t)0;
+			ifx_dev->spi_xfer.rx_dma = (dma_addr_t)0;
+		}
+
+		spi_message_add_tail(&ifx_dev->spi_xfer, &ifx_dev->spi_msg);
+
+		/* Assert MRDY. This may have already been done by the write
+		 * routine.
+		 */
+		mrdy_assert(ifx_dev);
+
+		retval = spi_async(ifx_dev->spi_dev, &ifx_dev->spi_msg);
+		if (retval) {
+			clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS,
+				  &ifx_dev->flags);
+			tasklet_schedule(&ifx_dev->io_work_tasklet);
+			return;
+		}
+	} else
+		ifx_dev->write_pending = 1;
+}
+
+/**
+ *	ifx_spi_free_port	-	free up the tty side
+ *	@ifx_dev: IFX device going away
+ *
+ *	Unregister and free up a port when the device goes away
+ */
+static void ifx_spi_free_port(struct ifx_spi_device *ifx_dev)
+{
+	if (ifx_dev->tty_dev)
+		tty_unregister_device(tty_drv, ifx_dev->minor);
+	kfifo_free(&ifx_dev->tx_fifo);
+}
+
+/**
+ *	ifx_spi_create_port	-	create a new port
+ *	@ifx_dev: our spi device
+ *
+ *	Allocate and initialise the tty port that goes with this interface
+ *	and add it to the tty layer so that it can be opened.
+ */
+static int ifx_spi_create_port(struct ifx_spi_device *ifx_dev)
+{
+	int ret = 0;
+	struct tty_port *pport = &ifx_dev->tty_port;
+
+	spin_lock_init(&ifx_dev->fifo_lock);
+	lockdep_set_class_and_subclass(&ifx_dev->fifo_lock,
+		&ifx_spi_key, 0);
+
+	if (kfifo_alloc(&ifx_dev->tx_fifo, IFX_SPI_FIFO_SIZE, GFP_KERNEL)) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	pport->ops = &ifx_tty_port_ops;
+	tty_port_init(pport);
+	ifx_dev->minor = IFX_SPI_TTY_ID;
+	ifx_dev->tty_dev = tty_register_device(tty_drv, ifx_dev->minor,
+					       &ifx_dev->spi_dev->dev);
+	if (IS_ERR(ifx_dev->tty_dev)) {
+		dev_dbg(&ifx_dev->spi_dev->dev,
+			"%s: registering tty device failed", __func__);
+		ret = PTR_ERR(ifx_dev->tty_dev);
+		goto error_ret;
+	}
+	return 0;
+
+error_ret:
+	ifx_spi_free_port(ifx_dev);
+	return ret;
+}
+
+/**
+ *	ifx_spi_handle_srdy		-	handle SRDY
+ *	@ifx_dev: device asserting SRDY
+ *
+ *	Check our device state and see what we need to kick off when SRDY
+ *	is asserted. This usually means killing the timer and firing off the
+ *	I/O processing.
+ */
+static void ifx_spi_handle_srdy(struct ifx_spi_device *ifx_dev)
+{
+	if (test_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags)) {
+		del_timer_sync(&ifx_dev->spi_timer);
+		clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+	}
+
+	ifx_spi_power_state_set(ifx_dev, IFX_SPI_POWER_SRDY);
+
+	if (!test_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags))
+		tasklet_schedule(&ifx_dev->io_work_tasklet);
+	else
+		set_bit(IFX_SPI_STATE_IO_READY, &ifx_dev->flags);
+}
+
+/**
+ *	ifx_spi_srdy_interrupt	-	SRDY asserted
+ *	@irq: our IRQ number
+ *	@dev: our ifx device
+ *
+ *	The modem asserted SRDY. Handle the srdy event
+ */
+static irqreturn_t ifx_spi_srdy_interrupt(int irq, void *dev)
+{
+	struct ifx_spi_device *ifx_dev = dev;
+	ifx_dev->gpio.unack_srdy_int_nb++;
+	ifx_spi_handle_srdy(ifx_dev);
+	return IRQ_HANDLED;
+}
+
+/**
+ *	ifx_spi_reset_interrupt	-	Modem has changed reset state
+ *	@irq: interrupt number
+ *	@dev: our device pointer
+ *
+ *	The modem has either entered or left reset state. Check the GPIO
+ *	line to see which.
+ *
+ *	FIXME: review locking on MR_INPROGRESS versus
+ *	parallel unsolicited reset/solicited reset
+ */
+static irqreturn_t ifx_spi_reset_interrupt(int irq, void *dev)
+{
+	struct ifx_spi_device *ifx_dev = dev;
+	int val = gpio_get_value(ifx_dev->gpio.reset_out);
+	int solreset = test_bit(MR_START, &ifx_dev->mdm_reset_state);
+
+	if (val == 0) {
+		/* entered reset */
+		set_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
+		if (!solreset) {
+			/* unsolicited reset  */
+			ifx_spi_ttyhangup(ifx_dev);
+		}
+	} else {
+		/* exited reset */
+		clear_bit(MR_INPROGRESS, &ifx_dev->mdm_reset_state);
+		if (solreset) {
+			set_bit(MR_COMPLETE, &ifx_dev->mdm_reset_state);
+			wake_up(&ifx_dev->mdm_reset_wait);
+		}
+	}
+	return IRQ_HANDLED;
+}
+
+/**
+ *	ifx_spi_free_device - free device
+ *	@ifx_dev: device to free
+ *
+ *	Free the IFX device
+ */
+static void ifx_spi_free_device(struct ifx_spi_device *ifx_dev)
+{
+	ifx_spi_free_port(ifx_dev);
+	dma_free_coherent(&ifx_dev->spi_dev->dev,
+				IFX_SPI_TRANSFER_SIZE,
+				ifx_dev->tx_buffer,
+				ifx_dev->tx_bus);
+	dma_free_coherent(&ifx_dev->spi_dev->dev,
+				IFX_SPI_TRANSFER_SIZE,
+				ifx_dev->rx_buffer,
+				ifx_dev->rx_bus);
+}
+
+/**
+ *	ifx_spi_reset	-	reset modem
+ *	@ifx_dev: modem to reset
+ *
+ *	Perform a reset on the modem
+ */
+static int ifx_spi_reset(struct ifx_spi_device *ifx_dev)
+{
+	int ret;
+	/*
+	 * set up modem power, reset
+	 *
+	 * delays are required on some platforms for the modem
+	 * to reset properly
+	 */
+	set_bit(MR_START, &ifx_dev->mdm_reset_state);
+	gpio_set_value(ifx_dev->gpio.po, 0);
+	gpio_set_value(ifx_dev->gpio.reset, 0);
+	msleep(25);
+	gpio_set_value(ifx_dev->gpio.reset, 1);
+	msleep(1);
+	gpio_set_value(ifx_dev->gpio.po, 1);
+	msleep(1);
+	gpio_set_value(ifx_dev->gpio.po, 0);
+	ret = wait_event_timeout(ifx_dev->mdm_reset_wait,
+				 test_bit(MR_COMPLETE,
+					  &ifx_dev->mdm_reset_state),
+				 IFX_RESET_TIMEOUT);
+	if (!ret)
+		dev_warn(&ifx_dev->spi_dev->dev, "Modem reset timeout: (state:%lx)",
+			 ifx_dev->mdm_reset_state);
+
+	ifx_dev->mdm_reset_state = 0;
+	return ret;
+}
+
+/**
+ *	ifx_spi_spi_probe	-	probe callback
+ *	@spi: our possible matching SPI device
+ *
+ *	Probe for a 6x60 modem on SPI bus. Perform any needed device and
+ *	GPIO setup.
+ *
+ *	FIXME:
+ *	-	Support for multiple devices
+ *	-	Split out MID specific GPIO handling eventually
+ */
+
+static int ifx_spi_spi_probe(struct spi_device *spi)
+{
+	int ret;
+	int srdy;
+	struct ifx_modem_platform_data *pl_data = NULL;
+	struct ifx_spi_device *ifx_dev;
+
+	if (saved_ifx_dev) {
+		dev_dbg(&spi->dev, "ignoring subsequent detection");
+		return -ENODEV;
+	}
+
+	/* initialize structure to hold our device variables */
+	ifx_dev = kzalloc(sizeof(struct ifx_spi_device), GFP_KERNEL);
+	if (!ifx_dev) {
+		dev_err(&spi->dev, "spi device allocation failed");
+		return -ENOMEM;
+	}
+	saved_ifx_dev = ifx_dev;
+	ifx_dev->spi_dev = spi;
+	clear_bit(IFX_SPI_STATE_IO_IN_PROGRESS, &ifx_dev->flags);
+	spin_lock_init(&ifx_dev->write_lock);
+	spin_lock_init(&ifx_dev->power_lock);
+	ifx_dev->power_status = 0;
+	init_timer(&ifx_dev->spi_timer);
+	ifx_dev->spi_timer.function = ifx_spi_timeout;
+	ifx_dev->spi_timer.data = (unsigned long)ifx_dev;
+	ifx_dev->is_6160 = pl_data->is_6160;
+
+	/* ensure SPI protocol flags are initialized to enable transfer */
+	ifx_dev->spi_more = 0;
+	ifx_dev->spi_slave_cts = 0;
+
+	/*initialize transfer and dma buffers */
+	ifx_dev->tx_buffer = dma_alloc_coherent(&ifx_dev->spi_dev->dev,
+				IFX_SPI_TRANSFER_SIZE,
+				&ifx_dev->tx_bus,
+				GFP_KERNEL);
+	if (!ifx_dev->tx_buffer) {
+		dev_err(&spi->dev, "DMA-TX buffer allocation failed");
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	ifx_dev->rx_buffer = dma_alloc_coherent(&ifx_dev->spi_dev->dev,
+				IFX_SPI_TRANSFER_SIZE,
+				&ifx_dev->rx_bus,
+				GFP_KERNEL);
+	if (!ifx_dev->rx_buffer) {
+		dev_err(&spi->dev, "DMA-RX buffer allocation failed");
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	/* initialize waitq for modem reset */
+	init_waitqueue_head(&ifx_dev->mdm_reset_wait);
+
+	spi_set_drvdata(spi, ifx_dev);
+	tasklet_init(&ifx_dev->io_work_tasklet, ifx_spi_io,
+						(unsigned long)ifx_dev);
+
+	set_bit(IFX_SPI_STATE_PRESENT, &ifx_dev->flags);
+
+	/* create our tty port */
+	ret = ifx_spi_create_port(ifx_dev);
+	if (ret != 0) {
+		dev_err(&spi->dev, "create default tty port failed");
+		goto error_ret;
+	}
+
+	pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data;
+	if (pl_data) {
+		ifx_dev->gpio.reset = pl_data->rst_pmu;
+		ifx_dev->gpio.po = pl_data->pwr_on;
+		ifx_dev->gpio.mrdy = pl_data->mrdy;
+		ifx_dev->gpio.srdy = pl_data->srdy;
+		ifx_dev->gpio.reset_out = pl_data->rst_out;
+	} else {
+		dev_err(&spi->dev, "missing platform data!");
+		ret = -ENODEV;
+		goto error_ret;
+	}
+
+	dev_info(&spi->dev, "gpios %d, %d, %d, %d, %d",
+		 ifx_dev->gpio.reset, ifx_dev->gpio.po, ifx_dev->gpio.mrdy,
+		 ifx_dev->gpio.srdy, ifx_dev->gpio.reset_out);
+
+	/* Configure gpios */
+	ret = gpio_request(ifx_dev->gpio.reset, "ifxModem");
+	if (ret < 0) {
+		dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET)",
+			ifx_dev->gpio.reset);
+		goto error_ret;
+	}
+	ret += gpio_direction_output(ifx_dev->gpio.reset, 0);
+	ret += gpio_export(ifx_dev->gpio.reset, 1);
+	if (ret) {
+		dev_err(&spi->dev, "Unable to configure GPIO%d (RESET)",
+			ifx_dev->gpio.reset);
+		ret = -EBUSY;
+		goto error_ret2;
+	}
+
+	ret = gpio_request(ifx_dev->gpio.po, "ifxModem");
+	ret += gpio_direction_output(ifx_dev->gpio.po, 0);
+	ret += gpio_export(ifx_dev->gpio.po, 1);
+	if (ret) {
+		dev_err(&spi->dev, "Unable to configure GPIO%d (ON)",
+			ifx_dev->gpio.po);
+		ret = -EBUSY;
+		goto error_ret3;
+	}
+
+	ret = gpio_request(ifx_dev->gpio.mrdy, "ifxModem");
+	if (ret < 0) {
+		dev_err(&spi->dev, "Unable to allocate GPIO%d (MRDY)",
+			ifx_dev->gpio.mrdy);
+		goto error_ret3;
+	}
+	ret += gpio_export(ifx_dev->gpio.mrdy, 1);
+	ret += gpio_direction_output(ifx_dev->gpio.mrdy, 0);
+	if (ret) {
+		dev_err(&spi->dev, "Unable to configure GPIO%d (MRDY)",
+			ifx_dev->gpio.mrdy);
+		ret = -EBUSY;
+		goto error_ret4;
+	}
+
+	ret = gpio_request(ifx_dev->gpio.srdy, "ifxModem");
+	if (ret < 0) {
+		dev_err(&spi->dev, "Unable to allocate GPIO%d (SRDY)",
+			ifx_dev->gpio.srdy);
+		ret = -EBUSY;
+		goto error_ret4;
+	}
+	ret += gpio_export(ifx_dev->gpio.srdy, 1);
+	ret += gpio_direction_input(ifx_dev->gpio.srdy);
+	if (ret) {
+		dev_err(&spi->dev, "Unable to configure GPIO%d (SRDY)",
+			ifx_dev->gpio.srdy);
+		ret = -EBUSY;
+		goto error_ret5;
+	}
+
+	ret = gpio_request(ifx_dev->gpio.reset_out, "ifxModem");
+	if (ret < 0) {
+		dev_err(&spi->dev, "Unable to allocate GPIO%d (RESET_OUT)",
+			ifx_dev->gpio.reset_out);
+		goto error_ret5;
+	}
+	ret += gpio_export(ifx_dev->gpio.reset_out, 1);
+	ret += gpio_direction_input(ifx_dev->gpio.reset_out);
+	if (ret) {
+		dev_err(&spi->dev, "Unable to configure GPIO%d (RESET_OUT)",
+			ifx_dev->gpio.reset_out);
+		ret = -EBUSY;
+		goto error_ret6;
+	}
+
+	ret = request_irq(gpio_to_irq(ifx_dev->gpio.reset_out),
+			  ifx_spi_reset_interrupt,
+			  IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, DRVNAME,
+		(void *)ifx_dev);
+	if (ret) {
+		dev_err(&spi->dev, "Unable to get irq %x\n",
+			gpio_to_irq(ifx_dev->gpio.reset_out));
+		goto error_ret6;
+	}
+
+	ret = ifx_spi_reset(ifx_dev);
+
+	ret = request_irq(gpio_to_irq(ifx_dev->gpio.srdy),
+			  ifx_spi_srdy_interrupt,
+			  IRQF_TRIGGER_RISING, DRVNAME,
+			  (void *)ifx_dev);
+	if (ret) {
+		dev_err(&spi->dev, "Unable to get irq %x",
+			gpio_to_irq(ifx_dev->gpio.srdy));
+		goto error_ret7;
+	}
+
+	/* set pm runtime power state and register with power system */
+	pm_runtime_set_active(&spi->dev);
+	pm_runtime_enable(&spi->dev);
+
+	/* handle case that modem is already signaling SRDY */
+	/* no outgoing tty open at this point, this just satisfies the
+	 * modem's read and should reset communication properly
+	 */
+	srdy = gpio_get_value(ifx_dev->gpio.srdy);
+
+	if (srdy) {
+		mrdy_assert(ifx_dev);
+		ifx_spi_handle_srdy(ifx_dev);
+	} else
+		mrdy_set_low(ifx_dev);
+	return 0;
+
+error_ret7:
+	free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
+error_ret6:
+	gpio_free(ifx_dev->gpio.srdy);
+error_ret5:
+	gpio_free(ifx_dev->gpio.mrdy);
+error_ret4:
+	gpio_free(ifx_dev->gpio.reset);
+error_ret3:
+	gpio_free(ifx_dev->gpio.po);
+error_ret2:
+	gpio_free(ifx_dev->gpio.reset_out);
+error_ret:
+	ifx_spi_free_device(ifx_dev);
+	saved_ifx_dev = NULL;
+	return ret;
+}
+
+/**
+ *	ifx_spi_spi_remove	-	SPI device was removed
+ *	@spi: SPI device
+ *
+ *	FIXME: We should be shutting the device down here not in
+ *	the module unload path.
+ */
+
+static int ifx_spi_spi_remove(struct spi_device *spi)
+{
+	struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
+	/* stop activity */
+	tasklet_kill(&ifx_dev->io_work_tasklet);
+	/* free irq */
+	free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
+	free_irq(gpio_to_irq(ifx_dev->gpio.srdy), (void *)ifx_dev);
+
+	gpio_free(ifx_dev->gpio.srdy);
+	gpio_free(ifx_dev->gpio.mrdy);
+	gpio_free(ifx_dev->gpio.reset);
+	gpio_free(ifx_dev->gpio.po);
+	gpio_free(ifx_dev->gpio.reset_out);
+
+	/* free allocations */
+	ifx_spi_free_device(ifx_dev);
+
+	saved_ifx_dev = NULL;
+	return 0;
+}
+
+/**
+ *	ifx_spi_spi_shutdown	-	called on SPI shutdown
+ *	@spi: SPI device
+ *
+ *	No action needs to be taken here
+ */
+
+static void ifx_spi_spi_shutdown(struct spi_device *spi)
+{
+}
+
+/*
+ * various suspends and resumes have nothing to do
+ * no hardware to save state for
+ */
+
+/**
+ *	ifx_spi_spi_suspend	-	suspend SPI on system suspend
+ *	@dev: device being suspended
+ *
+ *	Suspend the SPI side. No action needed on Intel MID platforms, may
+ *	need extending for other systems.
+ */
+static int ifx_spi_spi_suspend(struct spi_device *spi, pm_message_t msg)
+{
+	return 0;
+}
+
+/**
+ *	ifx_spi_spi_resume	-	resume SPI side on system resume
+ *	@dev: device being suspended
+ *
+ *	Suspend the SPI side. No action needed on Intel MID platforms, may
+ *	need extending for other systems.
+ */
+static int ifx_spi_spi_resume(struct spi_device *spi)
+{
+	return 0;
+}
+
+/**
+ *	ifx_spi_pm_suspend	-	suspend modem on system suspend
+ *	@dev: device being suspended
+ *
+ *	Suspend the modem. No action needed on Intel MID platforms, may
+ *	need extending for other systems.
+ */
+static int ifx_spi_pm_suspend(struct device *dev)
+{
+	return 0;
+}
+
+/**
+ *	ifx_spi_pm_resume	-	resume modem on system resume
+ *	@dev: device being suspended
+ *
+ *	Allow the modem to resume. No action needed.
+ *
+ *	FIXME: do we need to reset anything here ?
+ */
+static int ifx_spi_pm_resume(struct device *dev)
+{
+	return 0;
+}
+
+/**
+ *	ifx_spi_pm_runtime_resume	-	suspend modem
+ *	@dev: device being suspended
+ *
+ *	Allow the modem to resume. No action needed.
+ */
+static int ifx_spi_pm_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+/**
+ *	ifx_spi_pm_runtime_suspend	-	suspend modem
+ *	@dev: device being suspended
+ *
+ *	Allow the modem to suspend and thus suspend to continue up the
+ *	device tree.
+ */
+static int ifx_spi_pm_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+/**
+ *	ifx_spi_pm_runtime_idle		-	check if modem idle
+ *	@dev: our device
+ *
+ *	Check conditions and queue runtime suspend if idle.
+ */
+static int ifx_spi_pm_runtime_idle(struct device *dev)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
+
+	if (!ifx_dev->power_status)
+		pm_runtime_suspend(dev);
+
+	return 0;
+}
+
+static const struct dev_pm_ops ifx_spi_pm = {
+	.resume = ifx_spi_pm_resume,
+	.suspend = ifx_spi_pm_suspend,
+	.runtime_resume = ifx_spi_pm_runtime_resume,
+	.runtime_suspend = ifx_spi_pm_runtime_suspend,
+	.runtime_idle = ifx_spi_pm_runtime_idle
+};
+
+static const struct spi_device_id ifx_id_table[] = {
+	{"ifx6160", 0},
+	{"ifx6260", 0},
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, ifx_id_table);
+
+/* spi operations */
+static const struct spi_driver ifx_spi_driver_6160 = {
+	.driver = {
+		.name = "ifx6160",
+		.bus = &spi_bus_type,
+		.pm = &ifx_spi_pm,
+		.owner = THIS_MODULE},
+	.probe = ifx_spi_spi_probe,
+	.shutdown = ifx_spi_spi_shutdown,
+	.remove = __devexit_p(ifx_spi_spi_remove),
+	.suspend = ifx_spi_spi_suspend,
+	.resume = ifx_spi_spi_resume,
+	.id_table = ifx_id_table
+};
+
+/**
+ *	ifx_spi_exit	-	module exit
+ *
+ *	Unload the module.
+ */
+
+static void __exit ifx_spi_exit(void)
+{
+	/* unregister */
+	tty_unregister_driver(tty_drv);
+	spi_unregister_driver((void *)&ifx_spi_driver_6160);
+}
+
+/**
+ *	ifx_spi_init		-	module entry point
+ *
+ *	Initialise the SPI and tty interfaces for the IFX SPI driver
+ *	We need to initialize upper-edge spi driver after the tty
+ *	driver because otherwise the spi probe will race
+ */
+
+static int __init ifx_spi_init(void)
+{
+	int result;
+
+	tty_drv = alloc_tty_driver(1);
+	if (!tty_drv) {
+		pr_err("%s: alloc_tty_driver failed", DRVNAME);
+		return -ENOMEM;
+	}
+
+	tty_drv->magic = TTY_DRIVER_MAGIC;
+	tty_drv->owner = THIS_MODULE;
+	tty_drv->driver_name = DRVNAME;
+	tty_drv->name = TTYNAME;
+	tty_drv->minor_start = IFX_SPI_TTY_ID;
+	tty_drv->num = 1;
+	tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+	tty_drv->subtype = SERIAL_TYPE_NORMAL;
+	tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	tty_drv->init_termios = tty_std_termios;
+
+	tty_set_operations(tty_drv, &ifx_spi_serial_ops);
+
+	result = tty_register_driver(tty_drv);
+	if (result) {
+		pr_err("%s: tty_register_driver failed(%d)",
+			DRVNAME, result);
+		put_tty_driver(tty_drv);
+		return result;
+	}
+
+	result = spi_register_driver((void *)&ifx_spi_driver_6160);
+	if (result) {
+		pr_err("%s: spi_register_driver failed(%d)",
+			DRVNAME, result);
+		tty_unregister_driver(tty_drv);
+	}
+	return result;
+}
+
+module_init(ifx_spi_init);
+module_exit(ifx_spi_exit);
+
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("IFX6x60 spi driver");
+MODULE_LICENSE("GPL");
+MODULE_INFO(Version, "0.1-IFX6x60");
diff --git a/drivers/serial/ifx6x60.h b/drivers/serial/ifx6x60.h
new file mode 100644
index 0000000..deb7b8d
--- /dev/null
+++ b/drivers/serial/ifx6x60.h
@@ -0,0 +1,129 @@
+/****************************************************************************
+ *
+ * Driver for the IFX spi modem.
+ *
+ * Copyright (C) 2009, 2010 Intel Corp
+ * Jim Stanley <jim.stanley@intel.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
+ * USA
+ *
+ *
+ *
+ *****************************************************************************/
+#ifndef _IFX6X60_H
+#define _IFX6X60_H
+
+#define DRVNAME				"ifx6x60"
+#define TTYNAME				"ttyIFX"
+
+/* #define IFX_THROTTLE_CODE */
+
+#define IFX_SPI_MAX_MINORS		1
+#define IFX_SPI_TRANSFER_SIZE		2048
+#define IFX_SPI_FIFO_SIZE		4096
+
+#define IFX_SPI_HEADER_OVERHEAD		4
+#define IFX_RESET_TIMEOUT		msecs_to_jiffies(50)
+
+/* device flags bitfield definitions */
+#define IFX_SPI_STATE_PRESENT		0
+#define IFX_SPI_STATE_IO_IN_PROGRESS	1
+#define IFX_SPI_STATE_IO_READY		2
+#define IFX_SPI_STATE_TIMER_PENDING	3
+
+/* flow control bitfields */
+#define IFX_SPI_DCD			0
+#define IFX_SPI_CTS			1
+#define IFX_SPI_DSR			2
+#define IFX_SPI_RI			3
+#define IFX_SPI_DTR			4
+#define IFX_SPI_RTS			5
+#define IFX_SPI_TX_FC			6
+#define IFX_SPI_RX_FC			7
+#define IFX_SPI_UPDATE			8
+
+#define IFX_SPI_PAYLOAD_SIZE		(IFX_SPI_TRANSFER_SIZE - \
+						IFX_SPI_HEADER_OVERHEAD)
+
+#define IFX_SPI_IRQ_TYPE		DETECT_EDGE_RISING
+#define IFX_SPI_GPIO_TARGET		0
+#define IFX_SPI_GPIO0			0x105
+
+#define IFX_SPI_STATUS_TIMEOUT		(2000*HZ)
+
+/* values for bits in power status byte */
+#define IFX_SPI_POWER_DATA_PENDING	1
+#define IFX_SPI_POWER_SRDY		2
+
+struct ifx_spi_device {
+	/* Our SPI device */
+	struct spi_device *spi_dev;
+
+	/* Port specific data */
+	struct kfifo tx_fifo;
+	spinlock_t fifo_lock;
+	unsigned long signal_state;
+
+	/* TTY Layer logic */
+	struct tty_port tty_port;
+	struct device *tty_dev;
+	int minor;
+
+	/* Low level I/O work */
+	struct tasklet_struct io_work_tasklet;
+	unsigned long flags;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+
+	int is_6160;				/* Modem type */
+
+	spinlock_t write_lock;
+	int write_pending;
+	spinlock_t power_lock;
+	unsigned char power_status;
+
+	unsigned char *rx_buffer;
+	unsigned char *tx_buffer;
+	dma_addr_t rx_bus;
+	dma_addr_t tx_bus;
+	unsigned char spi_more;
+	unsigned char spi_slave_cts;
+
+	struct timer_list spi_timer;
+
+	struct spi_message spi_msg;
+	struct spi_transfer spi_xfer;
+
+	struct {
+		/* gpio lines */
+		unsigned short srdy;		/* slave-ready gpio */
+		unsigned short mrdy;		/* master-ready gpio */
+		unsigned short reset;		/* modem-reset gpio */
+		unsigned short po;		/* modem-on gpio */
+		unsigned short reset_out;	/* modem-in-reset gpio */
+		/* state/stats */
+		int unack_srdy_int_nb;
+	} gpio;
+
+	/* modem reset */
+	unsigned long mdm_reset_state;
+#define MR_START	0
+#define MR_INPROGRESS	1
+#define MR_COMPLETE	2
+	wait_queue_head_t mdm_reset_wait;
+};
+
+#endif /* _IFX6X60_H */
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index c4399e2..126ec7f 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -838,7 +838,11 @@
 static const char *
 mpc52xx_uart_type(struct uart_port *port)
 {
-	return port->type == PORT_MPC52xx ? "MPC52xx PSC" : NULL;
+	/*
+	 * We keep using PORT_MPC52xx for historic reasons although it applies
+	 * for MPC512x, too, but print "MPC5xxx" to not irritate users
+	 */
+	return port->type == PORT_MPC52xx ? "MPC5xxx PSC" : NULL;
 }
 
 static void
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 17849dc..5c7abe4 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -15,6 +15,7 @@
 #include <linux/serial_core.h>
 #include <linux/serial_8250.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/nwpserial.h>
 
diff --git a/drivers/serial/omap-serial.c b/drivers/serial/omap-serial.c
index 14365f7..7f2f010 100644
--- a/drivers/serial/omap-serial.c
+++ b/drivers/serial/omap-serial.c
@@ -570,7 +570,7 @@
 	unsigned char efr = 0;
 
 	up->lcr = serial_in(up, UART_LCR);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 	up->efr = serial_in(up, UART_EFR);
 	serial_out(up, UART_EFR, up->efr & ~UART_EFR_ECB);
 
@@ -598,7 +598,7 @@
 		efr |= OMAP_UART_SW_RX;
 
 	serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
-	serial_out(up, UART_LCR, UART_LCR_DLAB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
 
 	up->mcr = serial_in(up, UART_MCR);
 
@@ -612,14 +612,14 @@
 		up->mcr |= UART_MCR_XONANY;
 
 	serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 	serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
 	/* Enable special char function UARTi.EFR_REG[5] and
 	 * load the new software flow control mode IXON or IXOFF
 	 * and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
 	 */
 	serial_out(up, UART_EFR, efr | UART_EFR_SCD);
-	serial_out(up, UART_LCR, UART_LCR_DLAB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
 
 	serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
 	serial_out(up, UART_LCR, up->lcr);
@@ -724,22 +724,22 @@
 	 * baud clock is not running
 	 * DLL_REG and DLH_REG set to 0.
 	 */
-	serial_out(up, UART_LCR, UART_LCR_DLAB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
 	serial_out(up, UART_DLL, 0);
 	serial_out(up, UART_DLM, 0);
 	serial_out(up, UART_LCR, 0);
 
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 
 	up->efr = serial_in(up, UART_EFR);
 	serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
 
-	serial_out(up, UART_LCR, UART_LCR_DLAB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
 	up->mcr = serial_in(up, UART_MCR);
 	serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
 	/* FIFO ENABLE, DMA MODE */
 	serial_out(up, UART_FCR, up->fcr);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 
 	if (up->use_dma) {
 		serial_out(up, UART_TI752_TLR, 0);
@@ -748,52 +748,52 @@
 	}
 
 	serial_out(up, UART_EFR, up->efr);
-	serial_out(up, UART_LCR, UART_LCR_DLAB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
 	serial_out(up, UART_MCR, up->mcr);
 
 	/* Protocol, Baud Rate, and Interrupt Settings */
 
-	serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_DISABLE);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 
 	up->efr = serial_in(up, UART_EFR);
 	serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
 
 	serial_out(up, UART_LCR, 0);
 	serial_out(up, UART_IER, 0);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 
 	serial_out(up, UART_DLL, quot & 0xff);          /* LS of divisor */
 	serial_out(up, UART_DLM, quot >> 8);            /* MS of divisor */
 
 	serial_out(up, UART_LCR, 0);
 	serial_out(up, UART_IER, up->ier);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 
 	serial_out(up, UART_EFR, up->efr);
 	serial_out(up, UART_LCR, cval);
 
 	if (baud > 230400 && baud != 3000000)
-		serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE13X);
+		serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
 	else
-		serial_out(up, UART_OMAP_MDR1, OMAP_MDR1_MODE16X);
+		serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
 
 	/* Hardware Flow Control Configuration */
 
 	if (termios->c_cflag & CRTSCTS) {
 		efr |= (UART_EFR_CTS | UART_EFR_RTS);
-		serial_out(up, UART_LCR, UART_LCR_DLAB);
+		serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
 
 		up->mcr = serial_in(up, UART_MCR);
 		serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
 
-		serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+		serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 		up->efr = serial_in(up, UART_EFR);
 		serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
 
 		serial_out(up, UART_TI752_TCR, OMAP_UART_TCR_TRIG);
 		serial_out(up, UART_EFR, efr); /* Enable AUTORTS and AUTOCTS */
-		serial_out(up, UART_LCR, UART_LCR_DLAB);
+		serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
 		serial_out(up, UART_MCR, up->mcr | UART_MCR_RTS);
 		serial_out(up, UART_LCR, cval);
 	}
@@ -815,13 +815,13 @@
 	unsigned char efr;
 
 	dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 	efr = serial_in(up, UART_EFR);
 	serial_out(up, UART_EFR, efr | UART_EFR_ECB);
 	serial_out(up, UART_LCR, 0);
 
 	serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0);
-	serial_out(up, UART_LCR, OMAP_UART_LCR_CONF_MDB);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
 	serial_out(up, UART_EFR, efr);
 	serial_out(up, UART_LCR, 0);
 	/* Enable module level wake up */
@@ -866,12 +866,6 @@
 	return up->name;
 }
 
-#ifdef CONFIG_SERIAL_OMAP_CONSOLE
-
-static struct uart_omap_port *serial_omap_console_ports[4];
-
-static struct uart_driver serial_omap_reg;
-
 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
 
 static inline void wait_for_xmitr(struct uart_omap_port *up)
@@ -905,6 +899,34 @@
 	}
 }
 
+#ifdef CONFIG_CONSOLE_POLL
+
+static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+	struct uart_omap_port *up = (struct uart_omap_port *)port;
+	wait_for_xmitr(up);
+	serial_out(up, UART_TX, ch);
+}
+
+static int serial_omap_poll_get_char(struct uart_port *port)
+{
+	struct uart_omap_port *up = (struct uart_omap_port *)port;
+	unsigned int status = serial_in(up, UART_LSR);
+
+	if (!(status & UART_LSR_DR))
+		return NO_POLL_CHAR;
+
+	return serial_in(up, UART_RX);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+#ifdef CONFIG_SERIAL_OMAP_CONSOLE
+
+static struct uart_omap_port *serial_omap_console_ports[4];
+
+static struct uart_driver serial_omap_reg;
+
 static void serial_omap_console_putchar(struct uart_port *port, int ch)
 {
 	struct uart_omap_port *up = (struct uart_omap_port *)port;
@@ -1022,6 +1044,10 @@
 	.request_port	= serial_omap_request_port,
 	.config_port	= serial_omap_config_port,
 	.verify_port	= serial_omap_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_put_char  = serial_omap_poll_put_char,
+	.poll_get_char  = serial_omap_poll_get_char,
+#endif
 };
 
 static struct uart_driver serial_omap_reg = {
diff --git a/drivers/serial/pch_uart.c b/drivers/serial/pch_uart.c
new file mode 100644
index 0000000..70a6145
--- /dev/null
+++ b/drivers/serial/pch_uart.c
@@ -0,0 +1,1451 @@
+/*
+ *Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ *This program is free software; you can redistribute it and/or modify
+ *it under the terms of the GNU General Public License as published by
+ *the Free Software Foundation; version 2 of the License.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *GNU General Public License for more details.
+ *
+ *You should have received a copy of the GNU General Public License
+ *along with this program; if not, write to the Free Software
+ *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ */
+#include <linux/serial_reg.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/serial_core.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <linux/dmaengine.h>
+#include <linux/pch_dma.h>
+
+enum {
+	PCH_UART_HANDLED_RX_INT_SHIFT,
+	PCH_UART_HANDLED_TX_INT_SHIFT,
+	PCH_UART_HANDLED_RX_ERR_INT_SHIFT,
+	PCH_UART_HANDLED_RX_TRG_INT_SHIFT,
+	PCH_UART_HANDLED_MS_INT_SHIFT,
+};
+
+enum {
+	PCH_UART_8LINE,
+	PCH_UART_2LINE,
+};
+
+#define PCH_UART_DRIVER_DEVICE "ttyPCH"
+
+#define PCH_UART_NR_GE_256FIFO		1
+#define PCH_UART_NR_GE_64FIFO		3
+#define PCH_UART_NR_GE	(PCH_UART_NR_GE_256FIFO+PCH_UART_NR_GE_64FIFO)
+#define PCH_UART_NR	PCH_UART_NR_GE
+
+#define PCH_UART_HANDLED_RX_INT	(1<<((PCH_UART_HANDLED_RX_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_TX_INT	(1<<((PCH_UART_HANDLED_TX_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_RX_ERR_INT	(1<<((\
+					PCH_UART_HANDLED_RX_ERR_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_RX_TRG_INT	(1<<((\
+					PCH_UART_HANDLED_RX_TRG_INT_SHIFT)<<1))
+#define PCH_UART_HANDLED_MS_INT	(1<<((PCH_UART_HANDLED_MS_INT_SHIFT)<<1))
+
+#define PCH_UART_RBR		0x00
+#define PCH_UART_THR		0x00
+
+#define PCH_UART_IER_MASK	(PCH_UART_IER_ERBFI|PCH_UART_IER_ETBEI|\
+				PCH_UART_IER_ELSI|PCH_UART_IER_EDSSI)
+#define PCH_UART_IER_ERBFI	0x00000001
+#define PCH_UART_IER_ETBEI	0x00000002
+#define PCH_UART_IER_ELSI	0x00000004
+#define PCH_UART_IER_EDSSI	0x00000008
+
+#define PCH_UART_IIR_IP			0x00000001
+#define PCH_UART_IIR_IID		0x00000006
+#define PCH_UART_IIR_MSI		0x00000000
+#define PCH_UART_IIR_TRI		0x00000002
+#define PCH_UART_IIR_RRI		0x00000004
+#define PCH_UART_IIR_REI		0x00000006
+#define PCH_UART_IIR_TOI		0x00000008
+#define PCH_UART_IIR_FIFO256		0x00000020
+#define PCH_UART_IIR_FIFO64		PCH_UART_IIR_FIFO256
+#define PCH_UART_IIR_FE			0x000000C0
+
+#define PCH_UART_FCR_FIFOE		0x00000001
+#define PCH_UART_FCR_RFR		0x00000002
+#define PCH_UART_FCR_TFR		0x00000004
+#define PCH_UART_FCR_DMS		0x00000008
+#define PCH_UART_FCR_FIFO256		0x00000020
+#define PCH_UART_FCR_RFTL		0x000000C0
+
+#define PCH_UART_FCR_RFTL1		0x00000000
+#define PCH_UART_FCR_RFTL64		0x00000040
+#define PCH_UART_FCR_RFTL128		0x00000080
+#define PCH_UART_FCR_RFTL224		0x000000C0
+#define PCH_UART_FCR_RFTL16		PCH_UART_FCR_RFTL64
+#define PCH_UART_FCR_RFTL32		PCH_UART_FCR_RFTL128
+#define PCH_UART_FCR_RFTL56		PCH_UART_FCR_RFTL224
+#define PCH_UART_FCR_RFTL4		PCH_UART_FCR_RFTL64
+#define PCH_UART_FCR_RFTL8		PCH_UART_FCR_RFTL128
+#define PCH_UART_FCR_RFTL14		PCH_UART_FCR_RFTL224
+#define PCH_UART_FCR_RFTL_SHIFT		6
+
+#define PCH_UART_LCR_WLS	0x00000003
+#define PCH_UART_LCR_STB	0x00000004
+#define PCH_UART_LCR_PEN	0x00000008
+#define PCH_UART_LCR_EPS	0x00000010
+#define PCH_UART_LCR_SP		0x00000020
+#define PCH_UART_LCR_SB		0x00000040
+#define PCH_UART_LCR_DLAB	0x00000080
+#define PCH_UART_LCR_NP		0x00000000
+#define PCH_UART_LCR_OP		PCH_UART_LCR_PEN
+#define PCH_UART_LCR_EP		(PCH_UART_LCR_PEN | PCH_UART_LCR_EPS)
+#define PCH_UART_LCR_1P		(PCH_UART_LCR_PEN | PCH_UART_LCR_SP)
+#define PCH_UART_LCR_0P		(PCH_UART_LCR_PEN | PCH_UART_LCR_EPS |\
+				PCH_UART_LCR_SP)
+
+#define PCH_UART_LCR_5BIT	0x00000000
+#define PCH_UART_LCR_6BIT	0x00000001
+#define PCH_UART_LCR_7BIT	0x00000002
+#define PCH_UART_LCR_8BIT	0x00000003
+
+#define PCH_UART_MCR_DTR	0x00000001
+#define PCH_UART_MCR_RTS	0x00000002
+#define PCH_UART_MCR_OUT	0x0000000C
+#define PCH_UART_MCR_LOOP	0x00000010
+#define PCH_UART_MCR_AFE	0x00000020
+
+#define PCH_UART_LSR_DR		0x00000001
+#define PCH_UART_LSR_ERR	(1<<7)
+
+#define PCH_UART_MSR_DCTS	0x00000001
+#define PCH_UART_MSR_DDSR	0x00000002
+#define PCH_UART_MSR_TERI	0x00000004
+#define PCH_UART_MSR_DDCD	0x00000008
+#define PCH_UART_MSR_CTS	0x00000010
+#define PCH_UART_MSR_DSR	0x00000020
+#define PCH_UART_MSR_RI		0x00000040
+#define PCH_UART_MSR_DCD	0x00000080
+#define PCH_UART_MSR_DELTA	(PCH_UART_MSR_DCTS | PCH_UART_MSR_DDSR |\
+				PCH_UART_MSR_TERI | PCH_UART_MSR_DDCD)
+
+#define PCH_UART_DLL		0x00
+#define PCH_UART_DLM		0x01
+
+#define DIV_ROUND(a, b)	(((a) + ((b)/2)) / (b))
+
+#define PCH_UART_IID_RLS	(PCH_UART_IIR_REI)
+#define PCH_UART_IID_RDR	(PCH_UART_IIR_RRI)
+#define PCH_UART_IID_RDR_TO	(PCH_UART_IIR_RRI | PCH_UART_IIR_TOI)
+#define PCH_UART_IID_THRE	(PCH_UART_IIR_TRI)
+#define PCH_UART_IID_MS		(PCH_UART_IIR_MSI)
+
+#define PCH_UART_HAL_PARITY_NONE	(PCH_UART_LCR_NP)
+#define PCH_UART_HAL_PARITY_ODD		(PCH_UART_LCR_OP)
+#define PCH_UART_HAL_PARITY_EVEN	(PCH_UART_LCR_EP)
+#define PCH_UART_HAL_PARITY_FIX1	(PCH_UART_LCR_1P)
+#define PCH_UART_HAL_PARITY_FIX0	(PCH_UART_LCR_0P)
+#define PCH_UART_HAL_5BIT		(PCH_UART_LCR_5BIT)
+#define PCH_UART_HAL_6BIT		(PCH_UART_LCR_6BIT)
+#define PCH_UART_HAL_7BIT		(PCH_UART_LCR_7BIT)
+#define PCH_UART_HAL_8BIT		(PCH_UART_LCR_8BIT)
+#define PCH_UART_HAL_STB1		0
+#define PCH_UART_HAL_STB2		(PCH_UART_LCR_STB)
+
+#define PCH_UART_HAL_CLR_TX_FIFO	(PCH_UART_FCR_TFR)
+#define PCH_UART_HAL_CLR_RX_FIFO	(PCH_UART_FCR_RFR)
+#define PCH_UART_HAL_CLR_ALL_FIFO	(PCH_UART_HAL_CLR_TX_FIFO | \
+					PCH_UART_HAL_CLR_RX_FIFO)
+
+#define PCH_UART_HAL_DMA_MODE0		0
+#define PCH_UART_HAL_FIFO_DIS		0
+#define PCH_UART_HAL_FIFO16		(PCH_UART_FCR_FIFOE)
+#define PCH_UART_HAL_FIFO256		(PCH_UART_FCR_FIFOE | \
+					PCH_UART_FCR_FIFO256)
+#define PCH_UART_HAL_FIFO64		(PCH_UART_HAL_FIFO256)
+#define PCH_UART_HAL_TRIGGER1		(PCH_UART_FCR_RFTL1)
+#define PCH_UART_HAL_TRIGGER64		(PCH_UART_FCR_RFTL64)
+#define PCH_UART_HAL_TRIGGER128		(PCH_UART_FCR_RFTL128)
+#define PCH_UART_HAL_TRIGGER224		(PCH_UART_FCR_RFTL224)
+#define PCH_UART_HAL_TRIGGER16		(PCH_UART_FCR_RFTL16)
+#define PCH_UART_HAL_TRIGGER32		(PCH_UART_FCR_RFTL32)
+#define PCH_UART_HAL_TRIGGER56		(PCH_UART_FCR_RFTL56)
+#define PCH_UART_HAL_TRIGGER4		(PCH_UART_FCR_RFTL4)
+#define PCH_UART_HAL_TRIGGER8		(PCH_UART_FCR_RFTL8)
+#define PCH_UART_HAL_TRIGGER14		(PCH_UART_FCR_RFTL14)
+#define PCH_UART_HAL_TRIGGER_L		(PCH_UART_FCR_RFTL64)
+#define PCH_UART_HAL_TRIGGER_M		(PCH_UART_FCR_RFTL128)
+#define PCH_UART_HAL_TRIGGER_H		(PCH_UART_FCR_RFTL224)
+
+#define PCH_UART_HAL_RX_INT		(PCH_UART_IER_ERBFI)
+#define PCH_UART_HAL_TX_INT		(PCH_UART_IER_ETBEI)
+#define PCH_UART_HAL_RX_ERR_INT		(PCH_UART_IER_ELSI)
+#define PCH_UART_HAL_MS_INT		(PCH_UART_IER_EDSSI)
+#define PCH_UART_HAL_ALL_INT		(PCH_UART_IER_MASK)
+
+#define PCH_UART_HAL_DTR		(PCH_UART_MCR_DTR)
+#define PCH_UART_HAL_RTS		(PCH_UART_MCR_RTS)
+#define PCH_UART_HAL_OUT		(PCH_UART_MCR_OUT)
+#define PCH_UART_HAL_LOOP		(PCH_UART_MCR_LOOP)
+#define PCH_UART_HAL_AFE		(PCH_UART_MCR_AFE)
+
+struct pch_uart_buffer {
+	unsigned char *buf;
+	int size;
+};
+
+struct eg20t_port {
+	struct uart_port port;
+	int port_type;
+	void __iomem *membase;
+	resource_size_t mapbase;
+	unsigned int iobase;
+	struct pci_dev *pdev;
+	int fifo_size;
+	int base_baud;
+	int start_tx;
+	int start_rx;
+	int tx_empty;
+	int int_dis_flag;
+	int trigger;
+	int trigger_level;
+	struct pch_uart_buffer rxbuf;
+	unsigned int dmsr;
+	unsigned int fcr;
+	unsigned int use_dma;
+	unsigned int use_dma_flag;
+	struct dma_async_tx_descriptor	*desc_tx;
+	struct dma_async_tx_descriptor	*desc_rx;
+	struct pch_dma_slave		param_tx;
+	struct pch_dma_slave		param_rx;
+	struct dma_chan			*chan_tx;
+	struct dma_chan			*chan_rx;
+	struct scatterlist		sg_tx;
+	struct scatterlist		sg_rx;
+	int				tx_dma_use;
+	void				*rx_buf_virt;
+	dma_addr_t			rx_buf_dma;
+};
+
+static unsigned int default_baud = 9600;
+static const int trigger_level_256[4] = { 1, 64, 128, 224 };
+static const int trigger_level_64[4] = { 1, 16, 32, 56 };
+static const int trigger_level_16[4] = { 1, 4, 8, 14 };
+static const int trigger_level_1[4] = { 1, 1, 1, 1 };
+
+static void pch_uart_hal_request(struct pci_dev *pdev, int fifosize,
+				 int base_baud)
+{
+	struct eg20t_port *priv = pci_get_drvdata(pdev);
+
+	priv->trigger_level = 1;
+	priv->fcr = 0;
+}
+
+static unsigned int get_msr(struct eg20t_port *priv, void __iomem *base)
+{
+	unsigned int msr = ioread8(base + UART_MSR);
+	priv->dmsr |= msr & PCH_UART_MSR_DELTA;
+
+	return msr;
+}
+
+static void pch_uart_hal_enable_interrupt(struct eg20t_port *priv,
+					  unsigned int flag)
+{
+	u8 ier = ioread8(priv->membase + UART_IER);
+	ier |= flag & PCH_UART_IER_MASK;
+	iowrite8(ier, priv->membase + UART_IER);
+}
+
+static void pch_uart_hal_disable_interrupt(struct eg20t_port *priv,
+					   unsigned int flag)
+{
+	u8 ier = ioread8(priv->membase + UART_IER);
+	ier &= ~(flag & PCH_UART_IER_MASK);
+	iowrite8(ier, priv->membase + UART_IER);
+}
+
+static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud,
+				 unsigned int parity, unsigned int bits,
+				 unsigned int stb)
+{
+	unsigned int dll, dlm, lcr;
+	int div;
+
+	div = DIV_ROUND(priv->base_baud / 16, baud);
+	if (div < 0 || USHRT_MAX <= div) {
+		pr_err("Invalid Baud(div=0x%x)\n", div);
+		return -EINVAL;
+	}
+
+	dll = (unsigned int)div & 0x00FFU;
+	dlm = ((unsigned int)div >> 8) & 0x00FFU;
+
+	if (parity & ~(PCH_UART_LCR_PEN | PCH_UART_LCR_EPS | PCH_UART_LCR_SP)) {
+		pr_err("Invalid parity(0x%x)\n", parity);
+		return -EINVAL;
+	}
+
+	if (bits & ~PCH_UART_LCR_WLS) {
+		pr_err("Invalid bits(0x%x)\n", bits);
+		return -EINVAL;
+	}
+
+	if (stb & ~PCH_UART_LCR_STB) {
+		pr_err("Invalid STB(0x%x)\n", stb);
+		return -EINVAL;
+	}
+
+	lcr = parity;
+	lcr |= bits;
+	lcr |= stb;
+
+	pr_debug("%s:baud = %d, div = %04x, lcr = %02x (%lu)\n",
+		 __func__, baud, div, lcr, jiffies);
+	iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR);
+	iowrite8(dll, priv->membase + PCH_UART_DLL);
+	iowrite8(dlm, priv->membase + PCH_UART_DLM);
+	iowrite8(lcr, priv->membase + UART_LCR);
+
+	return 0;
+}
+
+static int pch_uart_hal_fifo_reset(struct eg20t_port *priv,
+				    unsigned int flag)
+{
+	if (flag & ~(PCH_UART_FCR_TFR | PCH_UART_FCR_RFR)) {
+		pr_err("%s:Invalid flag(0x%x)\n", __func__, flag);
+		return -EINVAL;
+	}
+
+	iowrite8(PCH_UART_FCR_FIFOE | priv->fcr, priv->membase + UART_FCR);
+	iowrite8(PCH_UART_FCR_FIFOE | priv->fcr | flag,
+		 priv->membase + UART_FCR);
+	iowrite8(priv->fcr, priv->membase + UART_FCR);
+
+	return 0;
+}
+
+static int pch_uart_hal_set_fifo(struct eg20t_port *priv,
+				 unsigned int dmamode,
+				 unsigned int fifo_size, unsigned int trigger)
+{
+	u8 fcr;
+
+	if (dmamode & ~PCH_UART_FCR_DMS) {
+		pr_err("%s:Invalid DMA Mode(0x%x)\n", __func__, dmamode);
+		return -EINVAL;
+	}
+
+	if (fifo_size & ~(PCH_UART_FCR_FIFOE | PCH_UART_FCR_FIFO256)) {
+		pr_err("%s:Invalid FIFO SIZE(0x%x)\n", __func__, fifo_size);
+		return -EINVAL;
+	}
+
+	if (trigger & ~PCH_UART_FCR_RFTL) {
+		pr_err("%s:Invalid TRIGGER(0x%x)\n", __func__, trigger);
+		return -EINVAL;
+	}
+
+	switch (priv->fifo_size) {
+	case 256:
+		priv->trigger_level =
+		    trigger_level_256[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+		break;
+	case 64:
+		priv->trigger_level =
+		    trigger_level_64[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+		break;
+	case 16:
+		priv->trigger_level =
+		    trigger_level_16[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+		break;
+	default:
+		priv->trigger_level =
+		    trigger_level_1[trigger >> PCH_UART_FCR_RFTL_SHIFT];
+		break;
+	}
+	fcr =
+	    dmamode | fifo_size | trigger | PCH_UART_FCR_RFR | PCH_UART_FCR_TFR;
+	iowrite8(PCH_UART_FCR_FIFOE, priv->membase + UART_FCR);
+	iowrite8(PCH_UART_FCR_FIFOE | PCH_UART_FCR_RFR | PCH_UART_FCR_TFR,
+		 priv->membase + UART_FCR);
+	iowrite8(fcr, priv->membase + UART_FCR);
+	priv->fcr = fcr;
+
+	return 0;
+}
+
+static u8 pch_uart_hal_get_modem(struct eg20t_port *priv)
+{
+	priv->dmsr = 0;
+	return get_msr(priv, priv->membase);
+}
+
+static int pch_uart_hal_write(struct eg20t_port *priv,
+			      const unsigned char *buf, int tx_size)
+{
+	int i;
+	unsigned int thr;
+
+	for (i = 0; i < tx_size;) {
+		thr = buf[i++];
+		iowrite8(thr, priv->membase + PCH_UART_THR);
+	}
+	return i;
+}
+
+static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
+			     int rx_size)
+{
+	int i;
+	u8 rbr, lsr;
+
+	lsr = ioread8(priv->membase + UART_LSR);
+	for (i = 0, lsr = ioread8(priv->membase + UART_LSR);
+	     i < rx_size && lsr & UART_LSR_DR;
+	     lsr = ioread8(priv->membase + UART_LSR)) {
+		rbr = ioread8(priv->membase + PCH_UART_RBR);
+		buf[i++] = rbr;
+	}
+	return i;
+}
+
+static unsigned int pch_uart_hal_get_iid(struct eg20t_port *priv)
+{
+	unsigned int iir;
+	int ret;
+
+	iir = ioread8(priv->membase + UART_IIR);
+	ret = (iir & (PCH_UART_IIR_IID | PCH_UART_IIR_TOI | PCH_UART_IIR_IP));
+	return ret;
+}
+
+static u8 pch_uart_hal_get_line_status(struct eg20t_port *priv)
+{
+	return ioread8(priv->membase + UART_LSR);
+}
+
+static void pch_uart_hal_set_break(struct eg20t_port *priv, int on)
+{
+	unsigned int lcr;
+
+	lcr = ioread8(priv->membase + UART_LCR);
+	if (on)
+		lcr |= PCH_UART_LCR_SB;
+	else
+		lcr &= ~PCH_UART_LCR_SB;
+
+	iowrite8(lcr, priv->membase + UART_LCR);
+}
+
+static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
+		   int size)
+{
+	struct uart_port *port;
+	struct tty_struct *tty;
+
+	port = &priv->port;
+	tty = tty_port_tty_get(&port->state->port);
+	if (!tty) {
+		pr_debug("%s:tty is busy now", __func__);
+		return -EBUSY;
+	}
+
+	tty_insert_flip_string(tty, buf, size);
+	tty_flip_buffer_push(tty);
+	tty_kref_put(tty);
+
+	return 0;
+}
+
+static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
+{
+	int ret;
+	struct uart_port *port = &priv->port;
+
+	if (port->x_char) {
+		pr_debug("%s:X character send %02x (%lu)\n", __func__,
+			port->x_char, jiffies);
+		buf[0] = port->x_char;
+		port->x_char = 0;
+		ret = 1;
+	} else {
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int dma_push_rx(struct eg20t_port *priv, int size)
+{
+	struct tty_struct *tty;
+	int room;
+	struct uart_port *port = &priv->port;
+
+	port = &priv->port;
+	tty = tty_port_tty_get(&port->state->port);
+	if (!tty) {
+		pr_debug("%s:tty is busy now", __func__);
+		return 0;
+	}
+
+	room = tty_buffer_request_room(tty, size);
+
+	if (room < size)
+		dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
+			 size - room);
+	if (!room)
+		return room;
+
+	tty_insert_flip_string(tty, sg_virt(&priv->sg_rx), size);
+
+	port->icount.rx += room;
+	tty_kref_put(tty);
+
+	return room;
+}
+
+static void pch_free_dma(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	priv = container_of(port, struct eg20t_port, port);
+
+	if (priv->chan_tx) {
+		dma_release_channel(priv->chan_tx);
+		priv->chan_tx = NULL;
+	}
+	if (priv->chan_rx) {
+		dma_release_channel(priv->chan_rx);
+		priv->chan_rx = NULL;
+	}
+	if (sg_dma_address(&priv->sg_rx))
+		dma_free_coherent(port->dev, port->fifosize,
+				  sg_virt(&priv->sg_rx),
+				  sg_dma_address(&priv->sg_rx));
+
+	return;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+	struct pch_dma_slave *param = slave;
+
+	if ((chan->chan_id == param->chan_id) && (param->dma_dev ==
+						  chan->device->dev)) {
+		chan->private = param;
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static void pch_request_dma(struct uart_port *port)
+{
+	dma_cap_mask_t mask;
+	struct dma_chan *chan;
+	struct pci_dev *dma_dev;
+	struct pch_dma_slave *param;
+	struct eg20t_port *priv =
+				container_of(port, struct eg20t_port, port);
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev
+								information */
+	/* Set Tx DMA */
+	param = &priv->param_tx;
+	param->dma_dev = &dma_dev->dev;
+	param->chan_id = priv->port.line;
+	param->tx_reg = port->mapbase + UART_TX;
+	chan = dma_request_channel(mask, filter, param);
+	if (!chan) {
+		pr_err("%s:dma_request_channel FAILS(Tx)\n", __func__);
+		return;
+	}
+	priv->chan_tx = chan;
+
+	/* Set Rx DMA */
+	param = &priv->param_rx;
+	param->dma_dev = &dma_dev->dev;
+	param->chan_id = priv->port.line + 1; /* Rx = Tx + 1 */
+	param->rx_reg = port->mapbase + UART_RX;
+	chan = dma_request_channel(mask, filter, param);
+	if (!chan) {
+		pr_err("%s:dma_request_channel FAILS(Rx)\n", __func__);
+		dma_release_channel(priv->chan_tx);
+		return;
+	}
+
+	/* Get Consistent memory for DMA */
+	priv->rx_buf_virt = dma_alloc_coherent(port->dev, port->fifosize,
+				    &priv->rx_buf_dma, GFP_KERNEL);
+	priv->chan_rx = chan;
+}
+
+static void pch_dma_rx_complete(void *arg)
+{
+	struct eg20t_port *priv = arg;
+	struct uart_port *port = &priv->port;
+	struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+
+	if (!tty) {
+		pr_debug("%s:tty is busy now", __func__);
+		return;
+	}
+
+	if (dma_push_rx(priv, priv->trigger_level))
+		tty_flip_buffer_push(tty);
+
+	tty_kref_put(tty);
+}
+
+static void pch_dma_tx_complete(void *arg)
+{
+	struct eg20t_port *priv = arg;
+	struct uart_port *port = &priv->port;
+	struct circ_buf *xmit = &port->state->xmit;
+
+	xmit->tail += sg_dma_len(&priv->sg_tx);
+	xmit->tail &= UART_XMIT_SIZE - 1;
+	port->icount.tx += sg_dma_len(&priv->sg_tx);
+
+	async_tx_ack(priv->desc_tx);
+	priv->tx_dma_use = 0;
+}
+
+static int pop_tx(struct eg20t_port *priv, unsigned char *buf, int size)
+{
+	int count = 0;
+	struct uart_port *port = &priv->port;
+	struct circ_buf *xmit = &port->state->xmit;
+
+	if (uart_tx_stopped(port) || uart_circ_empty(xmit) || count >= size)
+		goto pop_tx_end;
+
+	do {
+		int cnt_to_end =
+		    CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+		int sz = min(size - count, cnt_to_end);
+		memcpy(&buf[count], &xmit->buf[xmit->tail], sz);
+		xmit->tail = (xmit->tail + sz) & (UART_XMIT_SIZE - 1);
+		count += sz;
+	} while (!uart_circ_empty(xmit) && count < size);
+
+pop_tx_end:
+	pr_debug("%d characters. Remained %d characters. (%lu)\n",
+		 count, size - count, jiffies);
+
+	return count;
+}
+
+static int handle_rx_to(struct eg20t_port *priv)
+{
+	struct pch_uart_buffer *buf;
+	int rx_size;
+	int ret;
+	if (!priv->start_rx) {
+		pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+		return 0;
+	}
+	buf = &priv->rxbuf;
+	do {
+		rx_size = pch_uart_hal_read(priv, buf->buf, buf->size);
+		ret = push_rx(priv, buf->buf, rx_size);
+		if (ret)
+			return 0;
+	} while (rx_size == buf->size);
+
+	return PCH_UART_HANDLED_RX_INT;
+}
+
+static int handle_rx(struct eg20t_port *priv)
+{
+	return handle_rx_to(priv);
+}
+
+static int dma_handle_rx(struct eg20t_port *priv)
+{
+	struct uart_port *port = &priv->port;
+	struct dma_async_tx_descriptor *desc;
+	struct scatterlist *sg;
+
+	priv = container_of(port, struct eg20t_port, port);
+	sg = &priv->sg_rx;
+
+	sg_init_table(&priv->sg_rx, 1); /* Initialize SG table */
+
+	sg_dma_len(sg) = priv->fifo_size;
+
+	sg_set_page(&priv->sg_rx, virt_to_page(priv->rx_buf_virt),
+		     sg_dma_len(sg), (unsigned long)priv->rx_buf_virt &
+		     ~PAGE_MASK);
+
+	sg_dma_address(sg) = priv->rx_buf_dma;
+
+	desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
+			sg, 1, DMA_FROM_DEVICE,
+			DMA_PREP_INTERRUPT);
+	if (!desc)
+		return 0;
+
+	priv->desc_rx = desc;
+	desc->callback = pch_dma_rx_complete;
+	desc->callback_param = priv;
+	desc->tx_submit(desc);
+	dma_async_issue_pending(priv->chan_rx);
+
+	return PCH_UART_HANDLED_RX_INT;
+}
+
+static unsigned int handle_tx(struct eg20t_port *priv)
+{
+	struct uart_port *port = &priv->port;
+	struct circ_buf *xmit = &port->state->xmit;
+	int ret;
+	int fifo_size;
+	int tx_size;
+	int size;
+	int tx_empty;
+
+	if (!priv->start_tx) {
+		pr_info("%s:Tx isn't started. (%lu)\n", __func__, jiffies);
+		pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+		priv->tx_empty = 1;
+		return 0;
+	}
+
+	fifo_size = max(priv->fifo_size, 1);
+	tx_empty = 1;
+	if (pop_tx_x(priv, xmit->buf)) {
+		pch_uart_hal_write(priv, xmit->buf, 1);
+		port->icount.tx++;
+		tx_empty = 0;
+		fifo_size--;
+	}
+	size = min(xmit->head - xmit->tail, fifo_size);
+	tx_size = pop_tx(priv, xmit->buf, size);
+	if (tx_size > 0) {
+		ret = pch_uart_hal_write(priv, xmit->buf, tx_size);
+		port->icount.tx += ret;
+		tx_empty = 0;
+	}
+
+	priv->tx_empty = tx_empty;
+
+	if (tx_empty)
+		pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+
+	return PCH_UART_HANDLED_TX_INT;
+}
+
+static unsigned int dma_handle_tx(struct eg20t_port *priv)
+{
+	struct uart_port *port = &priv->port;
+	struct circ_buf *xmit = &port->state->xmit;
+	struct scatterlist *sg = &priv->sg_tx;
+	int nent;
+	int fifo_size;
+	int tx_empty;
+	struct dma_async_tx_descriptor *desc;
+
+	if (!priv->start_tx) {
+		pr_info("%s:Tx isn't started. (%lu)\n", __func__, jiffies);
+		pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+		priv->tx_empty = 1;
+		return 0;
+	}
+
+	fifo_size = max(priv->fifo_size, 1);
+	tx_empty = 1;
+	if (pop_tx_x(priv, xmit->buf)) {
+		pch_uart_hal_write(priv, xmit->buf, 1);
+		port->icount.tx++;
+		tx_empty = 0;
+		fifo_size--;
+	}
+
+	pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
+
+	priv->tx_dma_use = 1;
+
+	sg_init_table(&priv->sg_tx, 1); /* Initialize SG table */
+
+	sg_set_page(&priv->sg_tx, virt_to_page(xmit->buf),
+		    UART_XMIT_SIZE, (int)xmit->buf & ~PAGE_MASK);
+
+	nent = dma_map_sg(port->dev, &priv->sg_tx, 1, DMA_TO_DEVICE);
+	if (!nent) {
+		pr_err("%s:dma_map_sg Failed\n", __func__);
+		return 0;
+	}
+
+	sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
+	sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
+			      sg->offset;
+	sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail,
+			     UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head,
+			     xmit->tail, UART_XMIT_SIZE));
+
+	desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
+		sg, nent, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc) {
+		pr_err("%s:device_prep_slave_sg Failed\n", __func__);
+		return 0;
+	}
+
+	dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
+
+	priv->desc_tx = desc;
+	desc->callback = pch_dma_tx_complete;
+	desc->callback_param = priv;
+
+	desc->tx_submit(desc);
+
+	dma_async_issue_pending(priv->chan_tx);
+
+	return PCH_UART_HANDLED_TX_INT;
+}
+
+static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
+{
+	u8 fcr = ioread8(priv->membase + UART_FCR);
+
+	/* Reset FIFO */
+	fcr |= UART_FCR_CLEAR_RCVR;
+	iowrite8(fcr, priv->membase + UART_FCR);
+
+	if (lsr & PCH_UART_LSR_ERR)
+		dev_err(&priv->pdev->dev, "Error data in FIFO\n");
+
+	if (lsr & UART_LSR_FE)
+		dev_err(&priv->pdev->dev, "Framing Error\n");
+
+	if (lsr & UART_LSR_PE)
+		dev_err(&priv->pdev->dev, "Parity Error\n");
+
+	if (lsr & UART_LSR_OE)
+		dev_err(&priv->pdev->dev, "Overrun Error\n");
+}
+
+static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+{
+	struct eg20t_port *priv = dev_id;
+	unsigned int handled;
+	u8 lsr;
+	int ret = 0;
+	unsigned int iid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->port.lock, flags);
+	handled = 0;
+	while ((iid = pch_uart_hal_get_iid(priv)) > 1) {
+		switch (iid) {
+		case PCH_UART_IID_RLS:	/* Receiver Line Status */
+			lsr = pch_uart_hal_get_line_status(priv);
+			if (lsr & (PCH_UART_LSR_ERR | UART_LSR_FE |
+						UART_LSR_PE | UART_LSR_OE)) {
+				pch_uart_err_ir(priv, lsr);
+				ret = PCH_UART_HANDLED_RX_ERR_INT;
+			}
+			break;
+		case PCH_UART_IID_RDR:	/* Received Data Ready */
+			if (priv->use_dma)
+				ret = dma_handle_rx(priv);
+			else
+				ret = handle_rx(priv);
+			break;
+		case PCH_UART_IID_RDR_TO:	/* Received Data Ready
+						   (FIFO Timeout) */
+			ret = handle_rx_to(priv);
+			break;
+		case PCH_UART_IID_THRE:	/* Transmitter Holding Register
+						   Empty */
+			if (priv->use_dma)
+				ret = dma_handle_tx(priv);
+			else
+				ret = handle_tx(priv);
+			break;
+		case PCH_UART_IID_MS:	/* Modem Status */
+			ret = PCH_UART_HANDLED_MS_INT;
+			break;
+		default:	/* Never junp to this label */
+			pr_err("%s:iid=%d (%lu)\n", __func__, iid, jiffies);
+			ret = -1;
+			break;
+		}
+		handled |= (unsigned int)ret;
+	}
+	if (handled == 0 && iid <= 1) {
+		if (priv->int_dis_flag)
+			priv->int_dis_flag = 0;
+	}
+
+	spin_unlock_irqrestore(&priv->port.lock, flags);
+	return IRQ_RETVAL(handled);
+}
+
+/* This function tests whether the transmitter fifo and shifter for the port
+						described by 'port' is empty. */
+static unsigned int pch_uart_tx_empty(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	int ret;
+	priv = container_of(port, struct eg20t_port, port);
+	if (priv->tx_empty)
+		ret = TIOCSER_TEMT;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+/* Returns the current state of modem control inputs. */
+static unsigned int pch_uart_get_mctrl(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	u8 modem;
+	unsigned int ret = 0;
+
+	priv = container_of(port, struct eg20t_port, port);
+	modem = pch_uart_hal_get_modem(priv);
+
+	if (modem & UART_MSR_DCD)
+		ret |= TIOCM_CAR;
+
+	if (modem & UART_MSR_RI)
+		ret |= TIOCM_RNG;
+
+	if (modem & UART_MSR_DSR)
+		ret |= TIOCM_DSR;
+
+	if (modem & UART_MSR_CTS)
+		ret |= TIOCM_CTS;
+
+	return ret;
+}
+
+static void pch_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	u32 mcr = 0;
+	unsigned int dat;
+	struct eg20t_port *priv = container_of(port, struct eg20t_port, port);
+
+	if (mctrl & TIOCM_DTR)
+		mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_RTS)
+		mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_LOOP)
+		mcr |= UART_MCR_LOOP;
+
+	if (mctrl) {
+		dat = pch_uart_get_mctrl(port);
+		dat |= mcr;
+		iowrite8(dat, priv->membase + UART_MCR);
+	}
+}
+
+static void pch_uart_stop_tx(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	priv = container_of(port, struct eg20t_port, port);
+	priv->start_tx = 0;
+	priv->tx_dma_use = 0;
+}
+
+static void pch_uart_start_tx(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+
+	priv = container_of(port, struct eg20t_port, port);
+
+	if (priv->use_dma)
+		if (priv->tx_dma_use)
+			return;
+
+	priv->start_tx = 1;
+	pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
+}
+
+static void pch_uart_stop_rx(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	priv = container_of(port, struct eg20t_port, port);
+	priv->start_rx = 0;
+	pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+	priv->int_dis_flag = 1;
+}
+
+/* Enable the modem status interrupts. */
+static void pch_uart_enable_ms(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	priv = container_of(port, struct eg20t_port, port);
+	pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_MS_INT);
+}
+
+/* Control the transmission of a break signal. */
+static void pch_uart_break_ctl(struct uart_port *port, int ctl)
+{
+	struct eg20t_port *priv;
+	unsigned long flags;
+
+	priv = container_of(port, struct eg20t_port, port);
+	spin_lock_irqsave(&port->lock, flags);
+	pch_uart_hal_set_break(priv, ctl);
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* Grab any interrupt resources and initialise any low level driver state. */
+static int pch_uart_startup(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	int ret;
+	int fifo_size;
+	int trigger_level;
+
+	priv = container_of(port, struct eg20t_port, port);
+	priv->tx_empty = 1;
+	port->uartclk = priv->base_baud;
+	pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+	ret = pch_uart_hal_set_line(priv, default_baud,
+			      PCH_UART_HAL_PARITY_NONE, PCH_UART_HAL_8BIT,
+			      PCH_UART_HAL_STB1);
+	if (ret)
+		return ret;
+
+	switch (priv->fifo_size) {
+	case 256:
+		fifo_size = PCH_UART_HAL_FIFO256;
+		break;
+	case 64:
+		fifo_size = PCH_UART_HAL_FIFO64;
+		break;
+	case 16:
+		fifo_size = PCH_UART_HAL_FIFO16;
+	case 1:
+	default:
+		fifo_size = PCH_UART_HAL_FIFO_DIS;
+		break;
+	}
+
+	switch (priv->trigger) {
+	case PCH_UART_HAL_TRIGGER1:
+		trigger_level = 1;
+		break;
+	case PCH_UART_HAL_TRIGGER_L:
+		trigger_level = priv->fifo_size / 4;
+		break;
+	case PCH_UART_HAL_TRIGGER_M:
+		trigger_level = priv->fifo_size / 2;
+		break;
+	case PCH_UART_HAL_TRIGGER_H:
+	default:
+		trigger_level = priv->fifo_size - (priv->fifo_size / 8);
+		break;
+	}
+
+	priv->trigger_level = trigger_level;
+	ret = pch_uart_hal_set_fifo(priv, PCH_UART_HAL_DMA_MODE0,
+				    fifo_size, priv->trigger);
+	if (ret < 0)
+		return ret;
+
+	ret = request_irq(priv->port.irq, pch_uart_interrupt, IRQF_SHARED,
+			KBUILD_MODNAME, priv);
+	if (ret < 0)
+		return ret;
+
+	if (priv->use_dma)
+		pch_request_dma(port);
+
+	priv->start_rx = 1;
+	pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+	uart_update_timeout(port, CS8, default_baud);
+
+	return 0;
+}
+
+static void pch_uart_shutdown(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	int ret;
+
+	priv = container_of(port, struct eg20t_port, port);
+	pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+	pch_uart_hal_fifo_reset(priv, PCH_UART_HAL_CLR_ALL_FIFO);
+	ret = pch_uart_hal_set_fifo(priv, PCH_UART_HAL_DMA_MODE0,
+			      PCH_UART_HAL_FIFO_DIS, PCH_UART_HAL_TRIGGER1);
+	if (ret)
+		pr_err("pch_uart_hal_set_fifo Failed(ret=%d)\n", ret);
+
+	if (priv->use_dma_flag)
+		pch_free_dma(port);
+
+	free_irq(priv->port.irq, priv);
+}
+
+/* Change the port parameters, including word length, parity, stop
+ *bits.  Update read_status_mask and ignore_status_mask to indicate
+ *the types of events we are interested in receiving.  */
+static void pch_uart_set_termios(struct uart_port *port,
+				 struct ktermios *termios, struct ktermios *old)
+{
+	int baud;
+	int rtn;
+	unsigned int parity, bits, stb;
+	struct eg20t_port *priv;
+	unsigned long flags;
+
+	priv = container_of(port, struct eg20t_port, port);
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+		bits = PCH_UART_HAL_5BIT;
+		break;
+	case CS6:
+		bits = PCH_UART_HAL_6BIT;
+		break;
+	case CS7:
+		bits = PCH_UART_HAL_7BIT;
+		break;
+	default:		/* CS8 */
+		bits = PCH_UART_HAL_8BIT;
+		break;
+	}
+	if (termios->c_cflag & CSTOPB)
+		stb = PCH_UART_HAL_STB2;
+	else
+		stb = PCH_UART_HAL_STB1;
+
+	if (termios->c_cflag & PARENB) {
+		if (!(termios->c_cflag & PARODD))
+			parity = PCH_UART_HAL_PARITY_ODD;
+		else
+			parity = PCH_UART_HAL_PARITY_EVEN;
+
+	} else {
+		parity = PCH_UART_HAL_PARITY_NONE;
+	}
+	termios->c_cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
+
+	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	uart_update_timeout(port, termios->c_cflag, baud);
+	rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
+	if (rtn)
+		goto out;
+
+	/* Don't rewrite B0 */
+	if (tty_termios_baud_rate(termios))
+		tty_termios_encode_baud_rate(termios, baud, baud);
+
+out:
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *pch_uart_type(struct uart_port *port)
+{
+	return KBUILD_MODNAME;
+}
+
+static void pch_uart_release_port(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+
+	priv = container_of(port, struct eg20t_port, port);
+	pci_iounmap(priv->pdev, priv->membase);
+	pci_release_regions(priv->pdev);
+}
+
+static int pch_uart_request_port(struct uart_port *port)
+{
+	struct eg20t_port *priv;
+	int ret;
+	void __iomem *membase;
+
+	priv = container_of(port, struct eg20t_port, port);
+	ret = pci_request_regions(priv->pdev, KBUILD_MODNAME);
+	if (ret < 0)
+		return -EBUSY;
+
+	membase = pci_iomap(priv->pdev, 1, 0);
+	if (!membase) {
+		pci_release_regions(priv->pdev);
+		return -EBUSY;
+	}
+	priv->membase = port->membase = membase;
+
+	return 0;
+}
+
+static void pch_uart_config_port(struct uart_port *port, int type)
+{
+	struct eg20t_port *priv;
+
+	priv = container_of(port, struct eg20t_port, port);
+	if (type & UART_CONFIG_TYPE) {
+		port->type = priv->port_type;
+		pch_uart_request_port(port);
+	}
+}
+
+static int pch_uart_verify_port(struct uart_port *port,
+				struct serial_struct *serinfo)
+{
+	struct eg20t_port *priv;
+
+	priv = container_of(port, struct eg20t_port, port);
+	if (serinfo->flags & UPF_LOW_LATENCY) {
+		pr_info("PCH UART : Use PIO Mode (without DMA)\n");
+		priv->use_dma = 0;
+		serinfo->flags &= ~UPF_LOW_LATENCY;
+	} else {
+#ifndef CONFIG_PCH_DMA
+		pr_err("%s : PCH DMA is not Loaded.\n", __func__);
+		return -EOPNOTSUPP;
+#endif
+		priv->use_dma = 1;
+		priv->use_dma_flag = 1;
+		pr_info("PCH UART : Use DMA Mode\n");
+	}
+
+	return 0;
+}
+
+static struct uart_ops pch_uart_ops = {
+	.tx_empty = pch_uart_tx_empty,
+	.set_mctrl = pch_uart_set_mctrl,
+	.get_mctrl = pch_uart_get_mctrl,
+	.stop_tx = pch_uart_stop_tx,
+	.start_tx = pch_uart_start_tx,
+	.stop_rx = pch_uart_stop_rx,
+	.enable_ms = pch_uart_enable_ms,
+	.break_ctl = pch_uart_break_ctl,
+	.startup = pch_uart_startup,
+	.shutdown = pch_uart_shutdown,
+	.set_termios = pch_uart_set_termios,
+/*	.pm		= pch_uart_pm,		Not supported yet */
+/*	.set_wake	= pch_uart_set_wake,	Not supported yet */
+	.type = pch_uart_type,
+	.release_port = pch_uart_release_port,
+	.request_port = pch_uart_request_port,
+	.config_port = pch_uart_config_port,
+	.verify_port = pch_uart_verify_port
+};
+
+static struct uart_driver pch_uart_driver = {
+	.owner = THIS_MODULE,
+	.driver_name = KBUILD_MODNAME,
+	.dev_name = PCH_UART_DRIVER_DEVICE,
+	.major = 0,
+	.minor = 0,
+	.nr = PCH_UART_NR,
+};
+
+static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
+						int port_type)
+{
+	struct eg20t_port *priv;
+	int ret;
+	unsigned int iobase;
+	unsigned int mapbase;
+	unsigned char *rxbuf;
+	int fifosize, base_baud;
+	static int num;
+
+	priv = kzalloc(sizeof(struct eg20t_port), GFP_KERNEL);
+	if (priv == NULL)
+		goto init_port_alloc_err;
+
+	rxbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
+	if (!rxbuf)
+		goto init_port_free_txbuf;
+
+	switch (port_type) {
+	case PORT_UNKNOWN:
+		fifosize = 256; /* UART0 */
+		base_baud = 1843200; /* 1.8432MHz */
+		break;
+	case PORT_8250:
+		fifosize = 64; /* UART1~3 */
+		base_baud = 1843200; /* 1.8432MHz */
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid Port Type(=%d)\n", port_type);
+		goto init_port_hal_free;
+	}
+
+	iobase = pci_resource_start(pdev, 0);
+	mapbase = pci_resource_start(pdev, 1);
+	priv->mapbase = mapbase;
+	priv->iobase = iobase;
+	priv->pdev = pdev;
+	priv->tx_empty = 1;
+	priv->rxbuf.buf = rxbuf;
+	priv->rxbuf.size = PAGE_SIZE;
+
+	priv->fifo_size = fifosize;
+	priv->base_baud = base_baud;
+	priv->port_type = PORT_MAX_8250 + port_type + 1;
+	priv->port.dev = &pdev->dev;
+	priv->port.iobase = iobase;
+	priv->port.membase = NULL;
+	priv->port.mapbase = mapbase;
+	priv->port.irq = pdev->irq;
+	priv->port.iotype = UPIO_PORT;
+	priv->port.ops = &pch_uart_ops;
+	priv->port.flags = UPF_BOOT_AUTOCONF;
+	priv->port.fifosize = fifosize;
+	priv->port.line = num++;
+	priv->trigger = PCH_UART_HAL_TRIGGER_M;
+
+	pci_set_drvdata(pdev, priv);
+	pch_uart_hal_request(pdev, fifosize, base_baud);
+	ret = uart_add_one_port(&pch_uart_driver, &priv->port);
+	if (ret < 0)
+		goto init_port_hal_free;
+
+	return priv;
+
+init_port_hal_free:
+	free_page((unsigned long)rxbuf);
+init_port_free_txbuf:
+	kfree(priv);
+init_port_alloc_err:
+
+	return NULL;
+}
+
+static void pch_uart_exit_port(struct eg20t_port *priv)
+{
+	uart_remove_one_port(&pch_uart_driver, &priv->port);
+	pci_set_drvdata(priv->pdev, NULL);
+	free_page((unsigned long)priv->rxbuf.buf);
+}
+
+static void pch_uart_pci_remove(struct pci_dev *pdev)
+{
+	struct eg20t_port *priv;
+
+	priv = (struct eg20t_port *)pci_get_drvdata(pdev);
+	pch_uart_exit_port(priv);
+	pci_disable_device(pdev);
+	kfree(priv);
+	return;
+}
+#ifdef CONFIG_PM
+static int pch_uart_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct eg20t_port *priv = pci_get_drvdata(pdev);
+
+	uart_suspend_port(&pch_uart_driver, &priv->port);
+
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int pch_uart_pci_resume(struct pci_dev *pdev)
+{
+	struct eg20t_port *priv = pci_get_drvdata(pdev);
+	int ret;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev,
+		"%s-pci_enable_device failed(ret=%d) ", __func__, ret);
+		return ret;
+	}
+
+	uart_resume_port(&pch_uart_driver, &priv->port);
+
+	return 0;
+}
+#else
+#define pch_uart_pci_suspend NULL
+#define pch_uart_pci_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(pch_uart_pci_id) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8811),
+	 .driver_data = PCH_UART_8LINE},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8812),
+	 .driver_data = PCH_UART_2LINE},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8813),
+	 .driver_data = PCH_UART_2LINE},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8814),
+	 .driver_data = PCH_UART_2LINE},
+	{0,},
+};
+
+static int __devinit pch_uart_pci_probe(struct pci_dev *pdev,
+					const struct pci_device_id *id)
+{
+	int ret;
+	struct eg20t_port *priv;
+
+	ret = pci_enable_device(pdev);
+	if (ret < 0)
+		goto probe_error;
+
+	priv = pch_uart_init_port(pdev, id->driver_data);
+	if (!priv) {
+		ret = -EBUSY;
+		goto probe_disable_device;
+	}
+	pci_set_drvdata(pdev, priv);
+
+	return ret;
+
+probe_disable_device:
+	pci_disable_device(pdev);
+probe_error:
+	return ret;
+}
+
+static struct pci_driver pch_uart_pci_driver = {
+	.name = "pch_uart",
+	.id_table = pch_uart_pci_id,
+	.probe = pch_uart_pci_probe,
+	.remove = __devexit_p(pch_uart_pci_remove),
+	.suspend = pch_uart_pci_suspend,
+	.resume = pch_uart_pci_resume,
+};
+
+static int __init pch_uart_module_init(void)
+{
+	int ret;
+
+	/* register as UART driver */
+	ret = uart_register_driver(&pch_uart_driver);
+	if (ret < 0)
+		return ret;
+
+	/* register as PCI driver */
+	ret = pci_register_driver(&pch_uart_pci_driver);
+	if (ret < 0)
+		uart_unregister_driver(&pch_uart_driver);
+
+	return ret;
+}
+module_init(pch_uart_module_init);
+
+static void __exit pch_uart_module_exit(void)
+{
+	pci_unregister_driver(&pch_uart_pci_driver);
+	uart_unregister_driver(&pch_uart_driver);
+}
+module_exit(pch_uart_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel EG20T PCH UART PCI Driver");
+module_param(default_baud, uint, S_IRUGO);
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 9ffa5be..460a72d 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -1985,7 +1985,8 @@
 
 	tty_dev = device_find_child(uport->dev, &match, serial_match_port);
 	if (device_may_wakeup(tty_dev)) {
-		enable_irq_wake(uport->irq);
+		if (!enable_irq_wake(uport->irq))
+			uport->irq_wake = 1;
 		put_device(tty_dev);
 		mutex_unlock(&port->mutex);
 		return 0;
@@ -2051,7 +2052,10 @@
 
 	tty_dev = device_find_child(uport->dev, &match, serial_match_port);
 	if (!uport->suspended && device_may_wakeup(tty_dev)) {
-		disable_irq_wake(uport->irq);
+		if (uport->irq_wake) {
+			disable_irq_wake(uport->irq);
+			uport->irq_wake = 0;
+		}
 		mutex_unlock(&port->mutex);
 		return 0;
 	}
@@ -2134,6 +2138,7 @@
 	case UPIO_AU:
 	case UPIO_TSI:
 	case UPIO_DWAPB:
+	case UPIO_DWAPB32:
 		snprintf(address, sizeof(address),
 			 "MMIO 0x%llx", (unsigned long long)port->mapbase);
 		break;
@@ -2554,6 +2559,7 @@
 	case UPIO_AU:
 	case UPIO_TSI:
 	case UPIO_DWAPB:
+	case UPIO_DWAPB32:
 		return (port1->mapbase == port2->mapbase);
 	}
 	return 0;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index c291b3a..92c91c8 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -3,7 +3,7 @@
  *
  * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
  *
- *  Copyright (C) 2002 - 2008  Paul Mundt
+ *  Copyright (C) 2002 - 2011  Paul Mundt
  *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
  *
  * based off of the old drivers/char/sh-sci.c by:
@@ -81,14 +81,22 @@
 	struct timer_list	break_timer;
 	int			break_flag;
 
+	/* SCSCR initialization */
+	unsigned int		scscr;
+
+	/* SCBRR calculation algo */
+	unsigned int		scbrr_algo_id;
+
 	/* Interface clock */
 	struct clk		*iclk;
 	/* Function clock */
 	struct clk		*fclk;
 
 	struct list_head	node;
+
 	struct dma_chan			*chan_tx;
 	struct dma_chan			*chan_rx;
+
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	struct device			*dma_dev;
 	unsigned int			slave_tx;
@@ -415,9 +423,9 @@
 	if (!(status & SCxSR_TDxE(port))) {
 		ctrl = sci_in(port, SCSCR);
 		if (uart_circ_empty(xmit))
-			ctrl &= ~SCI_CTRL_FLAGS_TIE;
+			ctrl &= ~SCSCR_TIE;
 		else
-			ctrl |= SCI_CTRL_FLAGS_TIE;
+			ctrl |= SCSCR_TIE;
 		sci_out(port, SCSCR, ctrl);
 		return;
 	}
@@ -459,7 +467,7 @@
 			sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
 		}
 
-		ctrl |= SCI_CTRL_FLAGS_TIE;
+		ctrl |= SCSCR_TIE;
 		sci_out(port, SCSCR, ctrl);
 	}
 }
@@ -708,7 +716,7 @@
 			disable_irq_nosync(irq);
 			scr |= 0x4000;
 		} else {
-			scr &= ~SCI_CTRL_FLAGS_RIE;
+			scr &= ~SCSCR_RIE;
 		}
 		sci_out(port, SCSCR, scr);
 		/* Clear current interrupt */
@@ -777,6 +785,18 @@
 	return IRQ_HANDLED;
 }
 
+static inline unsigned long port_rx_irq_mask(struct uart_port *port)
+{
+	/*
+	 * Not all ports (such as SCIFA) will support REIE. Rather than
+	 * special-casing the port type, we check the port initialization
+	 * IRQ enable mask to see whether the IRQ is desired at all. If
+	 * it's unset, it's logically inferred that there's no point in
+	 * testing for it.
+	 */
+	return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE);
+}
+
 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
 {
 	unsigned short ssr_status, scr_status, err_enabled;
@@ -786,22 +806,25 @@
 
 	ssr_status = sci_in(port, SCxSR);
 	scr_status = sci_in(port, SCSCR);
-	err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
+	err_enabled = scr_status & port_rx_irq_mask(port);
 
 	/* Tx Interrupt */
-	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) &&
+	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
 	    !s->chan_tx)
 		ret = sci_tx_interrupt(irq, ptr);
+
 	/*
 	 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
 	 * DR flags
 	 */
 	if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
-	    (scr_status & SCI_CTRL_FLAGS_RIE))
+	    (scr_status & SCSCR_RIE))
 		ret = sci_rx_interrupt(irq, ptr);
+
 	/* Error Interrupt */
 	if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
 		ret = sci_er_interrupt(irq, ptr);
+
 	/* Break Interrupt */
 	if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
 		ret = sci_br_interrupt(irq, ptr);
@@ -951,7 +974,7 @@
 		schedule_work(&s->work_tx);
 	} else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		u16 ctrl = sci_in(port, SCSCR);
-		sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
+		sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
 	}
 
 	spin_unlock_irqrestore(&port->lock, flags);
@@ -1214,14 +1237,16 @@
 		if (new != scr)
 			sci_out(port, SCSCR, new);
 	}
+
 	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
 	    s->cookie_tx < 0)
 		schedule_work(&s->work_tx);
 #endif
+
 	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
 		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
 		ctrl = sci_in(port, SCSCR);
-		sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
+		sci_out(port, SCSCR, ctrl | SCSCR_TIE);
 	}
 }
 
@@ -1231,20 +1256,24 @@
 
 	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
 	ctrl = sci_in(port, SCSCR);
+
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x8000;
-	ctrl &= ~SCI_CTRL_FLAGS_TIE;
+
+	ctrl &= ~SCSCR_TIE;
+
 	sci_out(port, SCSCR, ctrl);
 }
 
 static void sci_start_rx(struct uart_port *port)
 {
-	unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE;
+	unsigned short ctrl;
 
-	/* Set RIE (Receive Interrupt Enable) bit in SCSCR */
-	ctrl |= sci_in(port, SCSCR);
+	ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
+
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
+
 	sci_out(port, SCSCR, ctrl);
 }
 
@@ -1252,11 +1281,13 @@
 {
 	unsigned short ctrl;
 
-	/* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
 	ctrl = sci_in(port, SCSCR);
+
 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
 		ctrl &= ~0x4000;
-	ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
+
+	ctrl &= ~port_rx_irq_mask(port);
+
 	sci_out(port, SCSCR, ctrl);
 }
 
@@ -1296,7 +1327,7 @@
 		scr &= ~0x4000;
 		enable_irq(s->irqs[1]);
 	}
-	sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
+	sci_out(port, SCSCR, scr | SCSCR_RIE);
 	dev_dbg(port->dev, "DMA Rx timed out\n");
 	schedule_work(&s->work_rx);
 }
@@ -1442,12 +1473,31 @@
 		s->disable(port);
 }
 
+static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
+				   unsigned long freq)
+{
+	switch (algo_id) {
+	case SCBRR_ALGO_1:
+		return ((freq + 16 * bps) / (16 * bps) - 1);
+	case SCBRR_ALGO_2:
+		return ((freq + 16 * bps) / (32 * bps) - 1);
+	case SCBRR_ALGO_3:
+		return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
+	case SCBRR_ALGO_4:
+		return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
+	case SCBRR_ALGO_5:
+		return (((freq * 1000 / 32) / bps) - 1);
+	}
+
+	/* Warn, but use a safe default */
+	WARN_ON(1);
+	return ((freq + 16 * bps) / (32 * bps) - 1);
+}
+
 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
 			    struct ktermios *old)
 {
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
 	struct sci_port *s = to_sci_port(port);
-#endif
 	unsigned int status, baud, smr_val, max_baud;
 	int t = -1;
 	u16 scfcr = 0;
@@ -1464,7 +1514,7 @@
 
 	baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
 	if (likely(baud && port->uartclk))
-		t = SCBRR_VALUE(baud, port->uartclk);
+		t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk);
 
 	do {
 		status = sci_in(port, SCxSR);
@@ -1490,7 +1540,7 @@
 	sci_out(port, SCSMR, smr_val);
 
 	dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
-		SCSCR_INIT(port));
+		s->scscr);
 
 	if (t > 0) {
 		if (t >= 256) {
@@ -1506,7 +1556,7 @@
 	sci_init_pins(port, termios->c_cflag);
 	sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
 
-	sci_out(port, SCSCR, SCSCR_INIT(port));
+	sci_out(port, SCSCR, s->scscr);
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	/*
@@ -1679,9 +1729,11 @@
 	port->mapbase	= p->mapbase;
 	port->membase	= p->membase;
 
-	port->irq	= p->irqs[SCIx_TXI_IRQ];
-	port->flags	= p->flags;
-	sci_port->type	= port->type = p->type;
+	port->irq		= p->irqs[SCIx_TXI_IRQ];
+	port->flags		= p->flags;
+	sci_port->type		= port->type = p->type;
+	sci_port->scscr		= p->scscr;
+	sci_port->scbrr_algo_id	= p->scbrr_algo_id;
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	sci_port->dma_dev	= p->dma_dev;
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index d2352ac..b223d6c 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -15,26 +15,17 @@
     defined(CONFIG_CPU_SUBTYPE_SH7709)
 # define SCPCR  0xA4000116 /* 16 bit SCI and SCIF */
 # define SCPDR  0xA4000136 /* 8  bit SCI and SCIF */
-# define SCSCR_INIT(port)          0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7705)
 # define SCIF0		0xA4400000
 # define SCIF2		0xA4410000
-# define SCSMR_Ir	0xA44A0000
-# define IRDA_SCIF	SCIF0
 # define SCPCR 0xA4000116
 # define SCPDR 0xA4000136
-
-/* Set the clock source,
- * SCIF2 (0xA4410000) -> External clock, SCK pin used as clock input
- * SCIF0 (0xA4400000) -> Internal clock, SCK pin as serial clock output
- */
-# define SCSCR_INIT(port) (port->mapbase == SCIF2) ? 0xF3 : 0xF0
 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
       defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+      defined(CONFIG_ARCH_SH73A0) || \
       defined(CONFIG_ARCH_SH7367) || \
       defined(CONFIG_ARCH_SH7377) || \
       defined(CONFIG_ARCH_SH7372)
-# define SCSCR_INIT(port)  0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
 # define PORT_PTCR	   0xA405011EUL
 # define PORT_PVCR	   0xA4050122UL
 # define SCIF_ORER	   0x0200   /* overrun error bit */
@@ -42,7 +33,6 @@
 # define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
 # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
-# define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7750)  || \
       defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
       defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
@@ -52,39 +42,31 @@
 # define SCSPTR1 0xffe0001c /* 8  bit SCI */
 # define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
-# define SCSCR_INIT(port) (((port)->type == PORT_SCI) ? \
-	0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \
-	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ )
 #elif defined(CONFIG_CPU_SUBTYPE_SH7760)
 # define SCSPTR0 0xfe600024 /* 16 bit SCIF */
 # define SCSPTR1 0xfe610024 /* 16 bit SCIF */
 # define SCSPTR2 0xfe620024 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)          0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
 # define SCSPTR0 0xA4400000	  /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
 # define PACR 0xa4050100
 # define PBCR 0xa4050102
-# define SCSCR_INIT(port)          0x3B
 #elif defined(CONFIG_CPU_SUBTYPE_SH7343)
 # define SCSPTR0 0xffe00010	/* 16 bit SCIF */
 # define SCSPTR1 0xffe10010	/* 16 bit SCIF */
 # define SCSPTR2 0xffe20010	/* 16 bit SCIF */
 # define SCSPTR3 0xffe30010	/* 16 bit SCIF */
-# define SCSCR_INIT(port) 0x32	/* TIE=0,RIE=0,TE=1,RE=1,REIE=0,CKE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
 # define PADR			0xA4050120
 # define PSDR			0xA405013e
 # define PWDR			0xA4050166
 # define PSCR			0xA405011E
 # define SCIF_ORER		0x0001	/* overrun error bit */
-# define SCSCR_INIT(port)	0x0038	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7366)
 # define SCPDR0			0xA405013E      /* 16 bit SCIF0 PSDR */
 # define SCSPTR0		SCPDR0
 # define SCIF_ORER		0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x0038  /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7723)
 # define SCSPTR0                0xa4050160
 # define SCSPTR1                0xa405013e
@@ -93,62 +75,38 @@
 # define SCSPTR4                0xa4050128
 # define SCSPTR5                0xa4050128
 # define SCIF_ORER              0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)       0x0038  /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7724)
 # define SCIF_ORER              0x0001  /* overrun error bit */
-# define SCSCR_INIT(port) ((port)->type == PORT_SCIFA ? \
-	0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ : \
-	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ )
 #elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
 # define SCSPTR2 0xffe80020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001   /* overrun error bit */
-# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-# define SCIF_BASE_ADDR    0x01030000
-# define SCIF_ADDR_SH5     PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
 # define SCIF_PTR2_OFFS    0x0000020
-# define SCIF_LSR2_OFFS    0x0000024
 # define SCSPTR2           ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */
-# define SCLSR2            ((port->mapbase)+SCIF_LSR2_OFFS) /* 16 bit SCIF */
-# define SCSCR_INIT(port)  0x38		/* TIE=0,RIE=0, TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
-# define SCSCR_INIT(port)          0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
 # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
 #elif defined(CONFIG_H8S2678)
-# define SCSCR_INIT(port)          0x30 /* TIE=0,RIE=0,TE=1,RE=1 */
 # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
 # define SCSPTR0 0xfe4b0020
 # define SCSPTR1 0xfe4b0020
 # define SCSPTR2 0xfe4b0020
 # define SCIF_ORER 0x0001
-# define SCSCR_INIT(port)	0x38
 # define SCIF_ONLY
 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 # define SCSPTR0 0xffe00024 /* 16 bit SCIF */
 # define SCSPTR1 0xffe08024 /* 16 bit SCIF */
 # define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x38	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7770)
 # define SCSPTR0 0xff923020 /* 16 bit SCIF */
 # define SCSPTR1 0xff924020 /* 16 bit SCIF */
 # define SCSPTR2 0xff925020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x3c /* TIE=0,RIE=0,TE=1,RE=1,REIE=1,cke=2 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7780)
 # define SCSPTR0	0xffe00024	/* 16 bit SCIF */
 # define SCSPTR1	0xffe10024	/* 16 bit SCIF */
 # define SCIF_ORER	0x0001		/* Overrun error bit */
-
-#if defined(CONFIG_SH_SH2007)
-/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=0 */
-# define SCSCR_INIT(port)	0x38
-#else
-/* TIE=0,RIE=0,TE=1,RE=1,REIE=1,CKE1=1 */
-# define SCSCR_INIT(port)	0x3a
-#endif
-
 #elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
       defined(CONFIG_CPU_SUBTYPE_SH7786)
 # define SCSPTR0	0xffea0024	/* 16 bit SCIF */
@@ -158,7 +116,6 @@
 # define SCSPTR4	0xffee0024	/* 16 bit SCIF */
 # define SCSPTR5	0xffef0024	/* 16 bit SCIF */
 # define SCIF_ORER	0x0001		/* Overrun error bit */
-# define SCSCR_INIT(port)	0x3a	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
       defined(CONFIG_CPU_SUBTYPE_SH7203) || \
       defined(CONFIG_CPU_SUBTYPE_SH7206) || \
@@ -173,52 +130,21 @@
 #  define SCSPTR6 0xfffeB020 /* 16 bit SCIF */
 #  define SCSPTR7 0xfffeB820 /* 16 bit SCIF */
 # endif
-# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 # define SCSPTR0 0xf8400020 /* 16 bit SCIF */
 # define SCSPTR1 0xf8410020 /* 16 bit SCIF */
 # define SCSPTR2 0xf8420020 /* 16 bit SCIF */
 # define SCIF_ORER 0x0001  /* overrun error bit */
-# define SCSCR_INIT(port)	0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #elif defined(CONFIG_CPU_SUBTYPE_SHX3)
 # define SCSPTR0 0xffc30020		/* 16 bit SCIF */
 # define SCSPTR1 0xffc40020		/* 16 bit SCIF */
 # define SCSPTR2 0xffc50020		/* 16 bit SCIF */
 # define SCSPTR3 0xffc60020		/* 16 bit SCIF */
 # define SCIF_ORER 0x0001		/* Overrun error bit */
-# define SCSCR_INIT(port)	0x38	/* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 #else
 # error CPU subtype not defined
 #endif
 
-/* SCSCR */
-#define SCI_CTRL_FLAGS_TIE  0x80 /* all */
-#define SCI_CTRL_FLAGS_RIE  0x40 /* all */
-#define SCI_CTRL_FLAGS_TE   0x20 /* all */
-#define SCI_CTRL_FLAGS_RE   0x10 /* all */
-#if defined(CONFIG_CPU_SUBTYPE_SH7750)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7091)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7722)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7751)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7763)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7780)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7785)  || \
-    defined(CONFIG_CPU_SUBTYPE_SH7786)  || \
-    defined(CONFIG_CPU_SUBTYPE_SHX3)
-#define SCI_CTRL_FLAGS_REIE 0x08 /* 7750 SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-#define SCI_CTRL_FLAGS_REIE ((port)->type == PORT_SCIFA ? 0 : 8)
-#else
-#define SCI_CTRL_FLAGS_REIE 0
-#endif
-/*      SCI_CTRL_FLAGS_MPIE 0x08  * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/*      SCI_CTRL_FLAGS_TEIE 0x04  * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/*      SCI_CTRL_FLAGS_CKE1 0x02  * all */
-/*      SCI_CTRL_FLAGS_CKE0 0x01  * 7707 SCI/SCIF, 7708 SCI, 7709 SCI/SCIF, 7750 SCI */
-
 /* SCxSR SCI */
 #define SCI_TDRE  0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
 #define SCI_RDRF  0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
@@ -244,6 +170,7 @@
 #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
     defined(CONFIG_CPU_SUBTYPE_SH7720) || \
     defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+    defined(CONFIG_ARCH_SH73A0) || \
     defined(CONFIG_ARCH_SH7367) || \
     defined(CONFIG_ARCH_SH7377) || \
     defined(CONFIG_ARCH_SH7372)
@@ -280,6 +207,7 @@
 #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
     defined(CONFIG_CPU_SUBTYPE_SH7720) || \
     defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+    defined(CONFIG_ARCH_SH73A0) || \
     defined(CONFIG_ARCH_SH7367) || \
     defined(CONFIG_ARCH_SH7377) || \
     defined(CONFIG_ARCH_SH7372)
@@ -297,23 +225,11 @@
 /* SCFCR */
 #define SCFCR_RFRST 0x0002
 #define SCFCR_TFRST 0x0004
-#define SCFCR_TCRST 0x4000
 #define SCFCR_MCE   0x0008
 
 #define SCI_MAJOR		204
 #define SCI_MINOR_START		8
 
-/* Generic serial flags */
-#define SCI_RX_THROTTLE		0x0000001
-
-#define SCI_MAGIC 0xbabeface
-
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at rs interrupt time.
- */
-#define SCI_EVENT_WRITE_WAKEUP	0
-
 #define SCI_IN(size, offset)					\
   if ((size) == 8) {						\
     return ioread8(port->membase + (offset));			\
@@ -378,6 +294,7 @@
   }
 
 #if defined(CONFIG_CPU_SH3) || \
+    defined(CONFIG_ARCH_SH73A0) || \
     defined(CONFIG_ARCH_SH7367) || \
     defined(CONFIG_ARCH_SH7377) || \
     defined(CONFIG_ARCH_SH7372)
@@ -391,6 +308,7 @@
 #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
       defined(CONFIG_CPU_SUBTYPE_SH7720) || \
       defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+      defined(CONFIG_ARCH_SH73A0) || \
       defined(CONFIG_ARCH_SH7367) || \
       defined(CONFIG_ARCH_SH7377)
 #define SCIF_FNS(name, scif_offset, scif_size) \
@@ -433,14 +351,13 @@
 #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
     defined(CONFIG_CPU_SUBTYPE_SH7720) || \
     defined(CONFIG_CPU_SUBTYPE_SH7721) || \
+    defined(CONFIG_ARCH_SH73A0) || \
     defined(CONFIG_ARCH_SH7367) || \
     defined(CONFIG_ARCH_SH7377)
 
 SCIF_FNS(SCSMR,  0x00, 16)
 SCIF_FNS(SCBRR,  0x04,  8)
 SCIF_FNS(SCSCR,  0x08, 16)
-SCIF_FNS(SCTDSR, 0x0c,  8)
-SCIF_FNS(SCFER,  0x10, 16)
 SCIF_FNS(SCxSR,  0x14, 16)
 SCIF_FNS(SCFCR,  0x18, 16)
 SCIF_FNS(SCFDR,  0x1c, 16)
@@ -470,8 +387,6 @@
 SCIx_FNS(SCxSR,  0x14, 16, 0x10, 16)
 SCIx_FNS(SCxRDR, 0x24,  8, 0x14,  8)
 SCIx_FNS(SCSPTR, 0,     0,    0,  0)
-SCIF_FNS(SCTDSR, 0x0c,  8)
-SCIF_FNS(SCFER,  0x10, 16)
 SCIF_FNS(SCFCR,  0x18, 16)
 SCIF_FNS(SCFDR,  0x1c, 16)
 SCIF_FNS(SCLSR,  0x24, 16)
@@ -497,7 +412,6 @@
 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 SCIF_FNS(SCFDR,				0,  0, 0x1C, 16)
 SCIF_FNS(SCSPTR2,			0,  0, 0x20, 16)
-SCIF_FNS(SCLSR2,			0,  0, 0x24, 16)
 SCIF_FNS(SCTFDR,		     0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCRFDR,		     0x0e, 16, 0x20, 16)
 SCIF_FNS(SCSPTR,			0,  0, 0x24, 16)
@@ -591,63 +505,3 @@
 	return 1;
 }
 #endif
-
-/*
- * Values for the BitRate Register (SCBRR)
- *
- * The values are actually divisors for a frequency which can
- * be internal to the SH3 (14.7456MHz) or derived from an external
- * clock source.  This driver assumes the internal clock is used;
- * to support using an external clock source, config options or
- * possibly command-line options would need to be added.
- *
- * Also, to support speeds below 2400 (why?) the lower 2 bits of
- * the SCSMR register would also need to be set to non-zero values.
- *
- * -- Greg Banks 27Feb2000
- *
- * Answer: The SCBRR register is only eight bits, and the value in
- * it gets larger with lower baud rates. At around 2400 (depending on
- * the peripherial module clock) you run out of bits. However the
- * lower two bits of SCSMR allow the module clock to be divided down,
- * scaling the value which is needed in SCBRR.
- *
- * -- Stuart Menefy - 23 May 2000
- *
- * I meant, why would anyone bother with bitrates below 2400.
- *
- * -- Greg Banks - 7Jul2000
- *
- * You "speedist"!  How will I use my 110bps ASR-33 teletype with paper
- * tape reader as a console!
- *
- * -- Mitch Davis - 15 Jul 2000
- */
-
-#if (defined(CONFIG_CPU_SUBTYPE_SH7780)  || \
-     defined(CONFIG_CPU_SUBTYPE_SH7785)  || \
-     defined(CONFIG_CPU_SUBTYPE_SH7786)) && \
-    !defined(CONFIG_SH_SH2007)
-#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(16*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7720) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7721) || \
-      defined(CONFIG_ARCH_SH7367) || \
-      defined(CONFIG_ARCH_SH7377) || \
-      defined(CONFIG_ARCH_SH7372)
-#define SCBRR_VALUE(bps, clk) (((clk*2)+16*bps)/(32*bps)-1)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
-      defined(CONFIG_CPU_SUBTYPE_SH7724)
-static inline int scbrr_calc(struct uart_port *port, int bps, int clk)
-{
-	if (port->type == PORT_SCIF)
-		return (clk+16*bps)/(32*bps)-1;
-	else
-		return ((clk*2)+16*bps)/(16*bps)-1;
-}
-#define SCBRR_VALUE(bps, clk) scbrr_calc(port, bps, clk)
-#elif defined(__H8300H__) || defined(__H8300S__)
-#define SCBRR_VALUE(bps, clk) (((clk*1000/32)/bps)-1)
-#else /* Generic SH */
-#define SCBRR_VALUE(bps, clk) ((clk+16*bps)/(32*bps)-1)
-#endif
diff --git a/drivers/serial/vt8500_serial.c b/drivers/serial/vt8500_serial.c
new file mode 100644
index 0000000..322bf56
--- /dev/null
+++ b/drivers/serial/vt8500_serial.c
@@ -0,0 +1,648 @@
+/*
+ * drivers/serial/vt8500_serial.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * Based on msm_serial.c, which is:
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+# define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/*
+ * UART Register offsets
+ */
+
+#define VT8500_URTDR		0x0000	/* Transmit data */
+#define VT8500_URRDR		0x0004	/* Receive data */
+#define VT8500_URDIV		0x0008	/* Clock/Baud rate divisor */
+#define VT8500_URLCR		0x000C	/* Line control */
+#define VT8500_URICR		0x0010	/* IrDA control */
+#define VT8500_URIER		0x0014	/* Interrupt enable */
+#define VT8500_URISR		0x0018	/* Interrupt status */
+#define VT8500_URUSR		0x001c	/* UART status */
+#define VT8500_URFCR		0x0020	/* FIFO control */
+#define VT8500_URFIDX		0x0024	/* FIFO index */
+#define VT8500_URBKR		0x0028	/* Break signal count */
+#define VT8500_URTOD		0x002c	/* Time out divisor */
+#define VT8500_TXFIFO		0x1000	/* Transmit FIFO (16x8) */
+#define VT8500_RXFIFO		0x1020	/* Receive FIFO (16x10) */
+
+/*
+ * Interrupt enable and status bits
+ */
+
+#define TXDE	(1 << 0)	/* Tx Data empty */
+#define RXDF	(1 << 1)	/* Rx Data full */
+#define TXFAE	(1 << 2)	/* Tx FIFO almost empty */
+#define TXFE	(1 << 3)	/* Tx FIFO empty */
+#define RXFAF	(1 << 4)	/* Rx FIFO almost full */
+#define RXFF	(1 << 5)	/* Rx FIFO full */
+#define TXUDR	(1 << 6)	/* Tx underrun */
+#define RXOVER	(1 << 7)	/* Rx overrun */
+#define PER	(1 << 8)	/* Parity error */
+#define FER	(1 << 9)	/* Frame error */
+#define TCTS	(1 << 10)	/* Toggle of CTS */
+#define RXTOUT	(1 << 11)	/* Rx timeout */
+#define BKDONE	(1 << 12)	/* Break signal done */
+#define ERR	(1 << 13)	/* AHB error response */
+
+#define RX_FIFO_INTS	(RXFAF | RXFF | RXOVER | PER | FER | RXTOUT)
+#define TX_FIFO_INTS	(TXFAE | TXFE | TXUDR)
+
+struct vt8500_port {
+	struct uart_port	uart;
+	char			name[16];
+	struct clk		*clk;
+	unsigned int		ier;
+};
+
+static inline void vt8500_write(struct uart_port *port, unsigned int val,
+			     unsigned int off)
+{
+	writel(val, port->membase + off);
+}
+
+static inline unsigned int vt8500_read(struct uart_port *port, unsigned int off)
+{
+	return readl(port->membase + off);
+}
+
+static void vt8500_stop_tx(struct uart_port *port)
+{
+	struct vt8500_port *vt8500_port = container_of(port,
+						       struct vt8500_port,
+						       uart);
+
+	vt8500_port->ier &= ~TX_FIFO_INTS;
+	vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void vt8500_stop_rx(struct uart_port *port)
+{
+	struct vt8500_port *vt8500_port = container_of(port,
+						       struct vt8500_port,
+						       uart);
+
+	vt8500_port->ier &= ~RX_FIFO_INTS;
+	vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void vt8500_enable_ms(struct uart_port *port)
+{
+	struct vt8500_port *vt8500_port = container_of(port,
+						       struct vt8500_port,
+						       uart);
+
+	vt8500_port->ier |= TCTS;
+	vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void handle_rx(struct uart_port *port)
+{
+	struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+	if (!tty) {
+		/* Discard data: no tty available */
+		int count = (vt8500_read(port, VT8500_URFIDX) & 0x1f00) >> 8;
+		u16 ch;
+		while (count--)
+			ch = readw(port->membase + VT8500_RXFIFO);
+		return;
+	}
+
+	/*
+	 * Handle overrun
+	 */
+	if ((vt8500_read(port, VT8500_URISR) & RXOVER)) {
+		port->icount.overrun++;
+		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+	}
+
+	/* and now the main RX loop */
+	while (vt8500_read(port, VT8500_URFIDX) & 0x1f00) {
+		unsigned int c;
+		char flag = TTY_NORMAL;
+
+		c = readw(port->membase + VT8500_RXFIFO) & 0x3ff;
+
+		/* Mask conditions we're ignorning. */
+		c &= ~port->read_status_mask;
+
+		if (c & FER) {
+			port->icount.frame++;
+			flag = TTY_FRAME;
+		} else if (c & PER) {
+			port->icount.parity++;
+			flag = TTY_PARITY;
+		}
+		port->icount.rx++;
+
+		if (!uart_handle_sysrq_char(port, c))
+			tty_insert_flip_char(tty, c, flag);
+	}
+
+	tty_flip_buffer_push(tty);
+	tty_kref_put(tty);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+	struct circ_buf *xmit = &port->state->xmit;
+
+	if (port->x_char) {
+		writeb(port->x_char, port->membase + VT8500_TXFIFO);
+		port->icount.tx++;
+		port->x_char = 0;
+	}
+	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+		vt8500_stop_tx(port);
+		return;
+	}
+
+	while ((vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16) {
+		if (uart_circ_empty(xmit))
+			break;
+
+		writeb(xmit->buf[xmit->tail], port->membase + VT8500_TXFIFO);
+
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+		port->icount.tx++;
+	}
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+
+	if (uart_circ_empty(xmit))
+		vt8500_stop_tx(port);
+}
+
+static void vt8500_start_tx(struct uart_port *port)
+{
+	struct vt8500_port *vt8500_port = container_of(port,
+						       struct vt8500_port,
+						       uart);
+
+	vt8500_port->ier &= ~TX_FIFO_INTS;
+	vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+	handle_tx(port);
+	vt8500_port->ier |= TX_FIFO_INTS;
+	vt8500_write(port, vt8500_port->ier, VT8500_URIER);
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+	port->icount.cts++;
+	wake_up_interruptible(&port->state->port.delta_msr_wait);
+}
+
+static irqreturn_t vt8500_irq(int irq, void *dev_id)
+{
+	struct uart_port *port = dev_id;
+	unsigned long isr;
+
+	spin_lock(&port->lock);
+	isr = vt8500_read(port, VT8500_URISR);
+
+	/* Acknowledge active status bits */
+	vt8500_write(port, isr, VT8500_URISR);
+
+	if (isr & RX_FIFO_INTS)
+		handle_rx(port);
+	if (isr & TX_FIFO_INTS)
+		handle_tx(port);
+	if (isr & TCTS)
+		handle_delta_cts(port);
+
+	spin_unlock(&port->lock);
+
+	return IRQ_HANDLED;
+}
+
+static unsigned int vt8500_tx_empty(struct uart_port *port)
+{
+	return (vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16 ?
+						TIOCSER_TEMT : 0;
+}
+
+static unsigned int vt8500_get_mctrl(struct uart_port *port)
+{
+	unsigned int usr;
+
+	usr = vt8500_read(port, VT8500_URUSR);
+	if (usr & (1 << 4))
+		return TIOCM_CTS;
+	else
+		return 0;
+}
+
+static void vt8500_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void vt8500_break_ctl(struct uart_port *port, int break_ctl)
+{
+	if (break_ctl)
+		vt8500_write(port, vt8500_read(port, VT8500_URLCR) | (1 << 9),
+			     VT8500_URLCR);
+}
+
+static int vt8500_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+	unsigned long div;
+	unsigned int loops = 1000;
+
+	div = vt8500_read(port, VT8500_URDIV) & ~(0x3ff);
+
+	if (unlikely((baud < 900) || (baud > 921600)))
+		div |= 7;
+	else
+		div |= (921600 / baud) - 1;
+
+	while ((vt8500_read(port, VT8500_URUSR) & (1 << 5)) && --loops)
+		cpu_relax();
+	vt8500_write(port, div, VT8500_URDIV);
+
+	return baud;
+}
+
+static int vt8500_startup(struct uart_port *port)
+{
+	struct vt8500_port *vt8500_port =
+			container_of(port, struct vt8500_port, uart);
+	int ret;
+
+	snprintf(vt8500_port->name, sizeof(vt8500_port->name),
+		 "vt8500_serial%d", port->line);
+
+	ret = request_irq(port->irq, vt8500_irq, IRQF_TRIGGER_HIGH,
+			  vt8500_port->name, port);
+	if (unlikely(ret))
+		return ret;
+
+	vt8500_write(port, 0x03, VT8500_URLCR);	/* enable TX & RX */
+
+	return 0;
+}
+
+static void vt8500_shutdown(struct uart_port *port)
+{
+	struct vt8500_port *vt8500_port =
+			container_of(port, struct vt8500_port, uart);
+
+	vt8500_port->ier = 0;
+
+	/* disable interrupts and FIFOs */
+	vt8500_write(&vt8500_port->uart, 0, VT8500_URIER);
+	vt8500_write(&vt8500_port->uart, 0x880, VT8500_URFCR);
+	free_irq(port->irq, port);
+}
+
+static void vt8500_set_termios(struct uart_port *port,
+			       struct ktermios *termios,
+			       struct ktermios *old)
+{
+	struct vt8500_port *vt8500_port =
+			container_of(port, struct vt8500_port, uart);
+	unsigned long flags;
+	unsigned int baud, lcr;
+	unsigned int loops = 1000;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	/* calculate and set baud rate */
+	baud = uart_get_baud_rate(port, termios, old, 900, 921600);
+	baud = vt8500_set_baud_rate(port, baud);
+	if (tty_termios_baud_rate(termios))
+		tty_termios_encode_baud_rate(termios, baud, baud);
+
+	/* calculate parity */
+	lcr = vt8500_read(&vt8500_port->uart, VT8500_URLCR);
+	lcr &= ~((1 << 5) | (1 << 4));
+	if (termios->c_cflag & PARENB) {
+		lcr |= (1 << 4);
+		termios->c_cflag &= ~CMSPAR;
+		if (termios->c_cflag & PARODD)
+			lcr |= (1 << 5);
+	}
+
+	/* calculate bits per char */
+	lcr &= ~(1 << 2);
+	switch (termios->c_cflag & CSIZE) {
+	case CS7:
+		break;
+	case CS8:
+	default:
+		lcr |= (1 << 2);
+		termios->c_cflag &= ~CSIZE;
+		termios->c_cflag |= CS8;
+		break;
+	}
+
+	/* calculate stop bits */
+	lcr &= ~(1 << 3);
+	if (termios->c_cflag & CSTOPB)
+		lcr |= (1 << 3);
+
+	/* set parity, bits per char, and stop bit */
+	vt8500_write(&vt8500_port->uart, lcr, VT8500_URLCR);
+
+	/* Configure status bits to ignore based on termio flags. */
+	port->read_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		port->read_status_mask = FER | PER;
+
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	/* Reset FIFOs */
+	vt8500_write(&vt8500_port->uart, 0x88c, VT8500_URFCR);
+	while ((vt8500_read(&vt8500_port->uart, VT8500_URFCR) & 0xc)
+							&& --loops)
+		cpu_relax();
+
+	/* Every possible FIFO-related interrupt */
+	vt8500_port->ier = RX_FIFO_INTS | TX_FIFO_INTS;
+
+	/*
+	 * CTS flow control
+	 */
+	if (UART_ENABLE_MS(&vt8500_port->uart, termios->c_cflag))
+		vt8500_port->ier |= TCTS;
+
+	vt8500_write(&vt8500_port->uart, 0x881, VT8500_URFCR);
+	vt8500_write(&vt8500_port->uart, vt8500_port->ier, VT8500_URIER);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *vt8500_type(struct uart_port *port)
+{
+	struct vt8500_port *vt8500_port =
+			container_of(port, struct vt8500_port, uart);
+	return vt8500_port->name;
+}
+
+static void vt8500_release_port(struct uart_port *port)
+{
+}
+
+static int vt8500_request_port(struct uart_port *port)
+{
+	return 0;
+}
+
+static void vt8500_config_port(struct uart_port *port, int flags)
+{
+	port->type = PORT_VT8500;
+}
+
+static int vt8500_verify_port(struct uart_port *port,
+			      struct serial_struct *ser)
+{
+	if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_VT8500))
+		return -EINVAL;
+	if (unlikely(port->irq != ser->irq))
+		return -EINVAL;
+	return 0;
+}
+
+static struct vt8500_port *vt8500_uart_ports[4];
+static struct uart_driver vt8500_uart_driver;
+
+#ifdef CONFIG_SERIAL_VT8500_CONSOLE
+
+static inline void wait_for_xmitr(struct uart_port *port)
+{
+	unsigned int status, tmout = 10000;
+
+	/* Wait up to 10ms for the character(s) to be sent. */
+	do {
+		status = vt8500_read(port, VT8500_URFIDX);
+
+		if (--tmout == 0)
+			break;
+		udelay(1);
+	} while (status & 0x10);
+}
+
+static void vt8500_console_putchar(struct uart_port *port, int c)
+{
+	wait_for_xmitr(port);
+	writeb(c, port->membase + VT8500_TXFIFO);
+}
+
+static void vt8500_console_write(struct console *co, const char *s,
+			      unsigned int count)
+{
+	struct vt8500_port *vt8500_port = vt8500_uart_ports[co->index];
+	unsigned long ier;
+
+	BUG_ON(co->index < 0 || co->index >= vt8500_uart_driver.nr);
+
+	ier = vt8500_read(&vt8500_port->uart, VT8500_URIER);
+	vt8500_write(&vt8500_port->uart, VT8500_URIER, 0);
+
+	uart_console_write(&vt8500_port->uart, s, count,
+			   vt8500_console_putchar);
+
+	/*
+	 *	Finally, wait for transmitter to become empty
+	 *	and switch back to FIFO
+	 */
+	wait_for_xmitr(&vt8500_port->uart);
+	vt8500_write(&vt8500_port->uart, VT8500_URIER, ier);
+}
+
+static int __init vt8500_console_setup(struct console *co, char *options)
+{
+	struct vt8500_port *vt8500_port;
+	int baud = 9600;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	if (unlikely(co->index >= vt8500_uart_driver.nr || co->index < 0))
+		return -ENXIO;
+
+	vt8500_port = vt8500_uart_ports[co->index];
+
+	if (!vt8500_port)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+	return uart_set_options(&vt8500_port->uart,
+				 co, baud, parity, bits, flow);
+}
+
+static struct console vt8500_console = {
+	.name = "ttyWMT",
+	.write = vt8500_console_write,
+	.device = uart_console_device,
+	.setup = vt8500_console_setup,
+	.flags = CON_PRINTBUFFER,
+	.index = -1,
+	.data = &vt8500_uart_driver,
+};
+
+#define VT8500_CONSOLE	(&vt8500_console)
+
+#else
+#define VT8500_CONSOLE	NULL
+#endif
+
+static struct uart_ops vt8500_uart_pops = {
+	.tx_empty	= vt8500_tx_empty,
+	.set_mctrl	= vt8500_set_mctrl,
+	.get_mctrl	= vt8500_get_mctrl,
+	.stop_tx	= vt8500_stop_tx,
+	.start_tx	= vt8500_start_tx,
+	.stop_rx	= vt8500_stop_rx,
+	.enable_ms	= vt8500_enable_ms,
+	.break_ctl	= vt8500_break_ctl,
+	.startup	= vt8500_startup,
+	.shutdown	= vt8500_shutdown,
+	.set_termios	= vt8500_set_termios,
+	.type		= vt8500_type,
+	.release_port	= vt8500_release_port,
+	.request_port	= vt8500_request_port,
+	.config_port	= vt8500_config_port,
+	.verify_port	= vt8500_verify_port,
+};
+
+static struct uart_driver vt8500_uart_driver = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "vt8500_serial",
+	.dev_name	= "ttyWMT",
+	.nr		= 6,
+	.cons		= VT8500_CONSOLE,
+};
+
+static int __init vt8500_serial_probe(struct platform_device *pdev)
+{
+	struct vt8500_port *vt8500_port;
+	struct resource *mmres, *irqres;
+	int ret;
+
+	mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!mmres || !irqres)
+		return -ENODEV;
+
+	vt8500_port = kzalloc(sizeof(struct vt8500_port), GFP_KERNEL);
+	if (!vt8500_port)
+		return -ENOMEM;
+
+	vt8500_port->uart.type = PORT_VT8500;
+	vt8500_port->uart.iotype = UPIO_MEM;
+	vt8500_port->uart.mapbase = mmres->start;
+	vt8500_port->uart.irq = irqres->start;
+	vt8500_port->uart.fifosize = 16;
+	vt8500_port->uart.ops = &vt8500_uart_pops;
+	vt8500_port->uart.line = pdev->id;
+	vt8500_port->uart.dev = &pdev->dev;
+	vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+	vt8500_port->uart.uartclk = 24000000;
+
+	snprintf(vt8500_port->name, sizeof(vt8500_port->name),
+		 "VT8500 UART%d", pdev->id);
+
+	vt8500_port->uart.membase = ioremap(mmres->start,
+					    mmres->end - mmres->start + 1);
+	if (!vt8500_port->uart.membase) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	vt8500_uart_ports[pdev->id] = vt8500_port;
+
+	uart_add_one_port(&vt8500_uart_driver, &vt8500_port->uart);
+
+	platform_set_drvdata(pdev, vt8500_port);
+
+	return 0;
+
+err:
+	kfree(vt8500_port);
+	return ret;
+}
+
+static int __devexit vt8500_serial_remove(struct platform_device *pdev)
+{
+	struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
+	kfree(vt8500_port);
+
+	return 0;
+}
+
+static struct platform_driver vt8500_platform_driver = {
+	.probe  = vt8500_serial_probe,
+	.remove = vt8500_serial_remove,
+	.driver = {
+		.name = "vt8500_serial",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init vt8500_serial_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&vt8500_uart_driver);
+	if (unlikely(ret))
+		return ret;
+
+	ret = platform_driver_register(&vt8500_platform_driver);
+
+	if (unlikely(ret))
+		uart_unregister_driver(&vt8500_uart_driver);
+
+	return ret;
+}
+
+static void __exit vt8500_serial_exit(void)
+{
+#ifdef CONFIG_SERIAL_VT8500_CONSOLE
+	unregister_console(&vt8500_console);
+#endif
+	platform_driver_unregister(&vt8500_platform_driver);
+	uart_unregister_driver(&vt8500_uart_driver);
+}
+
+module_init(vt8500_serial_init);
+module_exit(vt8500_serial_exit);
+
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
+MODULE_DESCRIPTION("Driver for vt8500 serial device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index ceba593..04113e5 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -101,7 +101,7 @@
 		return NULL;
 
 	if (sfi_use_ioremap)
-		return ioremap(phys, size);
+		return ioremap_cache(phys, size);
 	else
 		return early_ioremap(phys, size);
 }
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 3f5e387..5f63c3b 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -21,7 +21,6 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/list.h>
-#include <linux/kobject.h>
 #include <linux/sysdev.h>
 #include <linux/seq_file.h>
 #include <linux/err.h>
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 78f9fd0..13bfa9d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -111,11 +111,14 @@
 	  will be called coldfire_qspi.
 
 config SPI_DAVINCI
-	tristate "SPI controller driver for DaVinci/DA8xx SoC's"
+	tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
 	depends on SPI_MASTER && ARCH_DAVINCI
 	select SPI_BITBANG
 	help
-	  SPI master controller for DaVinci and DA8xx SPI modules.
+	  SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
+
+	  This driver can also be built as a module. The module will be called
+	  davinci_spi.
 
 config SPI_EP93XX
 	tristate "Cirrus Logic EP93xx SPI controller"
@@ -153,10 +156,10 @@
 	def_bool y if ARCH_MX31
 
 config SPI_IMX_VER_0_7
-	def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51
+	def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53
 
 config SPI_IMX_VER_2_3
-	def_bool y if ARCH_MX51
+	def_bool y if ARCH_MX51 || ARCH_MX53
 
 config SPI_IMX
 	tristate "Freescale i.MX SPI controllers"
@@ -267,12 +270,15 @@
 
 config SPI_PXA2XX
 	tristate "PXA2xx SSP SPI master"
-	depends on ARCH_PXA && EXPERIMENTAL
-	select PXA_SSP
+	depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL
+	select PXA_SSP if ARCH_PXA
 	help
-	  This enables using a PXA2xx SSP port as a SPI master controller.
-	  The driver can be configured to use any SSP port and additional
-	  documentation can be found a Documentation/spi/pxa2xx.
+	  This enables using a PXA2xx or Sodaville SSP port as a SPI master
+	  controller. The driver can be configured to use any SSP port and
+	  additional documentation can be found a Documentation/spi/pxa2xx.
+
+config SPI_PXA2XX_PCI
+	def_bool SPI_PXA2XX && X86_32 && PCI
 
 config SPI_S3C24XX
 	tristate "Samsung S3C24XX series SPI"
@@ -304,8 +310,8 @@
 
 config SPI_S3C64XX
 	tristate "Samsung S3C64XX series type SPI"
-	depends on ARCH_S3C64XX && EXPERIMENTAL
-	select S3C64XX_DMA
+	depends on (ARCH_S3C64XX || ARCH_S5P64X0)
+	select S3C64XX_DMA if ARCH_S3C64XX
 	help
 	  SPI driver for Samsung S3C64XX and newer SoCs.
 
@@ -353,7 +359,6 @@
 	tristate "Xilinx SPI controller common module"
 	depends on HAS_IOMEM && EXPERIMENTAL
 	select SPI_BITBANG
-	select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE)
 	help
 	  This exposes the SPI controller IP from the Xilinx EDK.
 
@@ -362,19 +367,6 @@
 
 	  Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
 
-config SPI_XILINX_OF
-	tristate "Xilinx SPI controller OF device"
-	depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE)
-	help
-	  This is the OF driver for the SPI controller IP from the Xilinx EDK.
-
-config SPI_XILINX_PLTFM
-	tristate "Xilinx SPI controller platform device"
-	depends on SPI_XILINX
-	help
-	  This is the platform driver for the SPI controller IP
-	  from the Xilinx EDK.
-
 config SPI_NUC900
 	tristate "Nuvoton NUC900 series SPI"
 	depends on ARCH_W90X900 && EXPERIMENTAL
@@ -396,6 +388,10 @@
 	tristate "PCI interface driver for DW SPI core"
 	depends on SPI_DESIGNWARE && PCI
 
+config SPI_DW_MID_DMA
+	bool "DMA support for DW SPI controller on Intel Moorestown platform"
+	depends on SPI_DW_PCI && INTEL_MID_DMAC
+
 config SPI_DW_MMIO
 	tristate "Memory-mapped io interface driver for DW SPI core"
 	depends on SPI_DESIGNWARE && HAVE_CLK
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8bc1a5a..3a42463 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,13 +17,15 @@
 obj-$(CONFIG_SPI_COLDFIRE_QSPI)		+= coldfire_qspi.o
 obj-$(CONFIG_SPI_DAVINCI)		+= davinci_spi.o
 obj-$(CONFIG_SPI_DESIGNWARE)		+= dw_spi.o
-obj-$(CONFIG_SPI_DW_PCI)		+= dw_spi_pci.o
+obj-$(CONFIG_SPI_DW_PCI)		+= dw_spi_midpci.o
+dw_spi_midpci-objs			:= dw_spi_pci.o dw_spi_mid.o
 obj-$(CONFIG_SPI_DW_MMIO)		+= dw_spi_mmio.o
 obj-$(CONFIG_SPI_EP93XX)		+= ep93xx_spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi_gpio.o
 obj-$(CONFIG_SPI_IMX)			+= spi_imx.o
 obj-$(CONFIG_SPI_LM70_LLP)		+= spi_lm70llp.o
 obj-$(CONFIG_SPI_PXA2XX)		+= pxa2xx_spi.o
+obj-$(CONFIG_SPI_PXA2XX_PCI)		+= pxa2xx_spi_pci.o
 obj-$(CONFIG_SPI_OMAP_UWIRE)		+= omap_uwire.o
 obj-$(CONFIG_SPI_OMAP24XX)		+= omap2_mcspi.o
 obj-$(CONFIG_SPI_OMAP_100K)		+= omap_spi_100k.o
@@ -43,8 +45,6 @@
 obj-$(CONFIG_SPI_TOPCLIFF_PCH)		+= spi_topcliff_pch.o
 obj-$(CONFIG_SPI_TXX9)			+= spi_txx9.o
 obj-$(CONFIG_SPI_XILINX)		+= xilinx_spi.o
-obj-$(CONFIG_SPI_XILINX_OF)		+= xilinx_spi_of.o
-obj-$(CONFIG_SPI_XILINX_PLTFM)		+= xilinx_spi_pltfm.o
 obj-$(CONFIG_SPI_SH_SCI)		+= spi_sh_sci.o
 obj-$(CONFIG_SPI_SH_MSIOF)		+= spi_sh_msiof.o
 obj-$(CONFIG_SPI_STMP3XXX)		+= spi_stmp.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index fb3d1b3..71a1219 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -253,11 +253,6 @@
 #define STATE_ERROR			((void *) -1)
 
 /*
- * Queue State
- */
-#define QUEUE_RUNNING			(0)
-#define QUEUE_STOPPED			(1)
-/*
  * SSP State - Whether Enabled or Disabled
  */
 #define SSP_DISABLED			(0)
@@ -344,7 +339,7 @@
  * @lock: spinlock to syncronise access to driver data
  * @workqueue: a workqueue on which any spi_message request is queued
  * @busy: workqueue is busy
- * @run: workqueue is running
+ * @running: workqueue is running
  * @pump_transfers: Tasklet used in Interrupt Transfer mode
  * @cur_msg: Pointer to current spi_message being processed
  * @cur_transfer: Pointer to current spi_transfer
@@ -369,8 +364,8 @@
 	struct work_struct		pump_messages;
 	spinlock_t			queue_lock;
 	struct list_head		queue;
-	int				busy;
-	int				run;
+	bool				busy;
+	bool				running;
 	/* Message transfer pump */
 	struct tasklet_struct		pump_transfers;
 	struct spi_message		*cur_msg;
@@ -782,9 +777,9 @@
 static void unmap_free_dma_scatter(struct pl022 *pl022)
 {
 	/* Unmap and free the SG tables */
-	dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
+	dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
 		     pl022->sgt_tx.nents, DMA_TO_DEVICE);
-	dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
+	dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
 		     pl022->sgt_rx.nents, DMA_FROM_DEVICE);
 	sg_free_table(&pl022->sgt_rx);
 	sg_free_table(&pl022->sgt_tx);
@@ -917,7 +912,7 @@
 	};
 	unsigned int pages;
 	int ret;
-	int sglen;
+	int rx_sglen, tx_sglen;
 	struct dma_chan *rxchan = pl022->dma_rx_channel;
 	struct dma_chan *txchan = pl022->dma_tx_channel;
 	struct dma_async_tx_descriptor *rxdesc;
@@ -956,7 +951,7 @@
 		tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 		break;
 	case WRITING_U32:
-		tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;;
+		tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 		break;
 	}
 
@@ -991,20 +986,20 @@
 			  pl022->cur_transfer->len, &pl022->sgt_tx);
 
 	/* Map DMA buffers */
-	sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
+	rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
 			   pl022->sgt_rx.nents, DMA_FROM_DEVICE);
-	if (!sglen)
+	if (!rx_sglen)
 		goto err_rx_sgmap;
 
-	sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
+	tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
 			   pl022->sgt_tx.nents, DMA_TO_DEVICE);
-	if (!sglen)
+	if (!tx_sglen)
 		goto err_tx_sgmap;
 
 	/* Send both scatterlists */
 	rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
 				      pl022->sgt_rx.sgl,
-				      pl022->sgt_rx.nents,
+				      rx_sglen,
 				      DMA_FROM_DEVICE,
 				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!rxdesc)
@@ -1012,7 +1007,7 @@
 
 	txdesc = txchan->device->device_prep_slave_sg(txchan,
 				      pl022->sgt_tx.sgl,
-				      pl022->sgt_tx.nents,
+				      tx_sglen,
 				      DMA_TO_DEVICE,
 				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!txdesc)
@@ -1040,10 +1035,10 @@
 	txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
 err_rxdesc:
 	rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
-	dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl,
+	dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
 		     pl022->sgt_tx.nents, DMA_TO_DEVICE);
 err_tx_sgmap:
-	dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl,
+	dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
 		     pl022->sgt_tx.nents, DMA_FROM_DEVICE);
 err_rx_sgmap:
 	sg_free_table(&pl022->sgt_tx);
@@ -1460,8 +1455,8 @@
 
 	/* Lock queue and check for queue work */
 	spin_lock_irqsave(&pl022->queue_lock, flags);
-	if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) {
-		pl022->busy = 0;
+	if (list_empty(&pl022->queue) || !pl022->running) {
+		pl022->busy = false;
 		spin_unlock_irqrestore(&pl022->queue_lock, flags);
 		return;
 	}
@@ -1475,7 +1470,7 @@
 	    list_entry(pl022->queue.next, struct spi_message, queue);
 
 	list_del_init(&pl022->cur_msg->queue);
-	pl022->busy = 1;
+	pl022->busy = true;
 	spin_unlock_irqrestore(&pl022->queue_lock, flags);
 
 	/* Initial message state */
@@ -1507,8 +1502,8 @@
 	INIT_LIST_HEAD(&pl022->queue);
 	spin_lock_init(&pl022->queue_lock);
 
-	pl022->run = QUEUE_STOPPED;
-	pl022->busy = 0;
+	pl022->running = false;
+	pl022->busy = false;
 
 	tasklet_init(&pl022->pump_transfers,
 			pump_transfers,	(unsigned long)pl022);
@@ -1529,12 +1524,12 @@
 
 	spin_lock_irqsave(&pl022->queue_lock, flags);
 
-	if (pl022->run == QUEUE_RUNNING || pl022->busy) {
+	if (pl022->running || pl022->busy) {
 		spin_unlock_irqrestore(&pl022->queue_lock, flags);
 		return -EBUSY;
 	}
 
-	pl022->run = QUEUE_RUNNING;
+	pl022->running = true;
 	pl022->cur_msg = NULL;
 	pl022->cur_transfer = NULL;
 	pl022->cur_chip = NULL;
@@ -1566,7 +1561,8 @@
 
 	if (!list_empty(&pl022->queue) || pl022->busy)
 		status = -EBUSY;
-	else pl022->run = QUEUE_STOPPED;
+	else
+		pl022->running = false;
 
 	spin_unlock_irqrestore(&pl022->queue_lock, flags);
 
@@ -1684,7 +1680,7 @@
 
 	spin_lock_irqsave(&pl022->queue_lock, flags);
 
-	if (pl022->run == QUEUE_STOPPED) {
+	if (!pl022->running) {
 		spin_unlock_irqrestore(&pl022->queue_lock, flags);
 		return -ESHUTDOWN;
 	}
@@ -1693,7 +1689,7 @@
 	msg->state = STATE_START;
 
 	list_add_tail(&msg->queue, &pl022->queue);
-	if (pl022->run == QUEUE_RUNNING && !pl022->busy)
+	if (pl022->running && !pl022->busy)
 		queue_work(pl022->workqueue, &pl022->pump_messages);
 
 	spin_unlock_irqrestore(&pl022->queue_lock, flags);
@@ -1799,7 +1795,7 @@
 {
 	struct pl022_config_chip const *chip_info;
 	struct chip_data *chip;
-	struct ssp_clock_params clk_freq;
+	struct ssp_clock_params clk_freq = {0, };
 	int status = 0;
 	struct pl022 *pl022 = spi_master_get_devdata(spi->master);
 	unsigned int bits = spi->bits_per_word;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index a067046..1a478bf 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -341,9 +341,9 @@
 /*
  * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
  *  - The buffer is either valid for CPU access, else NULL
- *  - If the buffer is valid, so is its DMA addresss
+ *  - If the buffer is valid, so is its DMA address
  *
- * This driver manages the dma addresss unless message->is_dma_mapped.
+ * This driver manages the dma address unless message->is_dma_mapped.
  */
 static int
 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index b85090c..6beab99 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2009 Texas Instruments.
+ * Copyright (C) 2010 EF Johnson Technologies
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -38,11 +39,6 @@
 
 #define CS_DEFAULT	0xFF
 
-#define SPI_BUFSIZ	(SMP_CACHE_BYTES + 1)
-#define DAVINCI_DMA_DATA_TYPE_S8	0x01
-#define DAVINCI_DMA_DATA_TYPE_S16	0x02
-#define DAVINCI_DMA_DATA_TYPE_S32	0x04
-
 #define SPIFMT_PHASE_MASK	BIT(16)
 #define SPIFMT_POLARITY_MASK	BIT(17)
 #define SPIFMT_DISTIMER_MASK	BIT(18)
@@ -52,34 +48,43 @@
 #define SPIFMT_ODD_PARITY_MASK	BIT(23)
 #define SPIFMT_WDELAY_MASK	0x3f000000u
 #define SPIFMT_WDELAY_SHIFT	24
-#define SPIFMT_CHARLEN_MASK	0x0000001Fu
-
-/* SPIGCR1 */
-#define SPIGCR1_SPIENA_MASK	0x01000000u
+#define SPIFMT_PRESCALE_SHIFT	8
 
 /* SPIPC0 */
 #define SPIPC0_DIFUN_MASK	BIT(11)		/* MISO */
 #define SPIPC0_DOFUN_MASK	BIT(10)		/* MOSI */
 #define SPIPC0_CLKFUN_MASK	BIT(9)		/* CLK */
 #define SPIPC0_SPIENA_MASK	BIT(8)		/* nREADY */
-#define SPIPC0_EN1FUN_MASK	BIT(1)
-#define SPIPC0_EN0FUN_MASK	BIT(0)
 
 #define SPIINT_MASKALL		0x0101035F
-#define SPI_INTLVL_1		0x000001FFu
-#define SPI_INTLVL_0		0x00000000u
+#define SPIINT_MASKINT		0x0000015F
+#define SPI_INTLVL_1		0x000001FF
+#define SPI_INTLVL_0		0x00000000
 
-/* SPIDAT1 */
-#define SPIDAT1_CSHOLD_SHIFT	28
-#define SPIDAT1_CSNR_SHIFT	16
+/* SPIDAT1 (upper 16 bit defines) */
+#define SPIDAT1_CSHOLD_MASK	BIT(12)
+
+/* SPIGCR1 */
 #define SPIGCR1_CLKMOD_MASK	BIT(1)
 #define SPIGCR1_MASTER_MASK     BIT(0)
+#define SPIGCR1_POWERDOWN_MASK	BIT(8)
 #define SPIGCR1_LOOPBACK_MASK	BIT(16)
+#define SPIGCR1_SPIENA_MASK	BIT(24)
 
 /* SPIBUF */
 #define SPIBUF_TXFULL_MASK	BIT(29)
 #define SPIBUF_RXEMPTY_MASK	BIT(31)
 
+/* SPIDELAY */
+#define SPIDELAY_C2TDELAY_SHIFT 24
+#define SPIDELAY_C2TDELAY_MASK  (0xFF << SPIDELAY_C2TDELAY_SHIFT)
+#define SPIDELAY_T2CDELAY_SHIFT 16
+#define SPIDELAY_T2CDELAY_MASK  (0xFF << SPIDELAY_T2CDELAY_SHIFT)
+#define SPIDELAY_T2EDELAY_SHIFT 8
+#define SPIDELAY_T2EDELAY_MASK  (0xFF << SPIDELAY_T2EDELAY_SHIFT)
+#define SPIDELAY_C2EDELAY_SHIFT 0
+#define SPIDELAY_C2EDELAY_MASK  0xFF
+
 /* Error Masks */
 #define SPIFLG_DLEN_ERR_MASK		BIT(0)
 #define SPIFLG_TIMEOUT_MASK		BIT(1)
@@ -87,29 +92,13 @@
 #define SPIFLG_DESYNC_MASK		BIT(3)
 #define SPIFLG_BITERR_MASK		BIT(4)
 #define SPIFLG_OVRRUN_MASK		BIT(6)
-#define SPIFLG_RX_INTR_MASK		BIT(8)
-#define SPIFLG_TX_INTR_MASK		BIT(9)
 #define SPIFLG_BUF_INIT_ACTIVE_MASK	BIT(24)
-#define SPIFLG_MASK			(SPIFLG_DLEN_ERR_MASK \
+#define SPIFLG_ERROR_MASK		(SPIFLG_DLEN_ERR_MASK \
 				| SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
 				| SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
-				| SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \
-				| SPIFLG_TX_INTR_MASK \
-				| SPIFLG_BUF_INIT_ACTIVE_MASK)
+				| SPIFLG_OVRRUN_MASK)
 
-#define SPIINT_DLEN_ERR_INTR	BIT(0)
-#define SPIINT_TIMEOUT_INTR	BIT(1)
-#define SPIINT_PARERR_INTR	BIT(2)
-#define SPIINT_DESYNC_INTR	BIT(3)
-#define SPIINT_BITERR_INTR	BIT(4)
-#define SPIINT_OVRRUN_INTR	BIT(6)
-#define SPIINT_RX_INTR		BIT(8)
-#define SPIINT_TX_INTR		BIT(9)
 #define SPIINT_DMA_REQ_EN	BIT(16)
-#define SPIINT_ENABLE_HIGHZ	BIT(24)
-
-#define SPI_T2CDELAY_SHIFT	16
-#define SPI_C2TDELAY_SHIFT	24
 
 /* SPI Controller registers */
 #define SPIGCR0		0x00
@@ -118,44 +107,18 @@
 #define SPILVL		0x0c
 #define SPIFLG		0x10
 #define SPIPC0		0x14
-#define SPIPC1		0x18
-#define SPIPC2		0x1c
-#define SPIPC3		0x20
-#define SPIPC4		0x24
-#define SPIPC5		0x28
-#define SPIPC6		0x2c
-#define SPIPC7		0x30
-#define SPIPC8		0x34
-#define SPIDAT0		0x38
 #define SPIDAT1		0x3c
 #define SPIBUF		0x40
-#define SPIEMU		0x44
 #define SPIDELAY	0x48
 #define SPIDEF		0x4c
 #define SPIFMT0		0x50
-#define SPIFMT1		0x54
-#define SPIFMT2		0x58
-#define SPIFMT3		0x5c
-#define TGINTVEC0	0x60
-#define TGINTVEC1	0x64
-
-struct davinci_spi_slave {
-	u32	cmd_to_write;
-	u32	clk_ctrl_to_write;
-	u32	bytes_per_word;
-	u8	active_cs;
-};
 
 /* We have 2 DMA channels per CS, one for RX and one for TX */
 struct davinci_spi_dma {
-	int			dma_tx_channel;
-	int			dma_rx_channel;
-	int			dma_tx_sync_dev;
-	int			dma_rx_sync_dev;
+	int			tx_channel;
+	int			rx_channel;
+	int			dummy_param_slot;
 	enum dma_event_q	eventq;
-
-	struct completion	dma_tx_completion;
-	struct completion	dma_rx_completion;
 };
 
 /* SPI Controller driver's private data. */
@@ -166,58 +129,63 @@
 	u8			version;
 	resource_size_t		pbase;
 	void __iomem		*base;
-	size_t			region_size;
 	u32			irq;
 	struct completion	done;
 
 	const void		*tx;
 	void			*rx;
-	u8			*tmp_buf;
-	int			count;
-	struct davinci_spi_dma	*dma_channels;
-	struct			davinci_spi_platform_data *pdata;
+#define SPI_TMP_BUFSZ	(SMP_CACHE_BYTES + 1)
+	u8			rx_tmp_buf[SPI_TMP_BUFSZ];
+	int			rcount;
+	int			wcount;
+	struct davinci_spi_dma	dma;
+	struct davinci_spi_platform_data *pdata;
 
 	void			(*get_rx)(u32 rx_data, struct davinci_spi *);
 	u32			(*get_tx)(struct davinci_spi *);
 
-	struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
+	u8			bytes_per_word[SPI_MAX_CHIPSELECT];
 };
 
-static unsigned use_dma;
+static struct davinci_spi_config davinci_spi_default_cfg;
 
-static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
+static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
 {
-	u8 *rx = davinci_spi->rx;
-
-	*rx++ = (u8)data;
-	davinci_spi->rx = rx;
+	if (dspi->rx) {
+		u8 *rx = dspi->rx;
+		*rx++ = (u8)data;
+		dspi->rx = rx;
+	}
 }
 
-static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
+static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
 {
-	u16 *rx = davinci_spi->rx;
-
-	*rx++ = (u16)data;
-	davinci_spi->rx = rx;
+	if (dspi->rx) {
+		u16 *rx = dspi->rx;
+		*rx++ = (u16)data;
+		dspi->rx = rx;
+	}
 }
 
-static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
+static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
 {
-	u32 data;
-	const u8 *tx = davinci_spi->tx;
-
-	data = *tx++;
-	davinci_spi->tx = tx;
+	u32 data = 0;
+	if (dspi->tx) {
+		const u8 *tx = dspi->tx;
+		data = *tx++;
+		dspi->tx = tx;
+	}
 	return data;
 }
 
-static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
+static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
 {
-	u32 data;
-	const u16 *tx = davinci_spi->tx;
-
-	data = *tx++;
-	davinci_spi->tx = tx;
+	u32 data = 0;
+	if (dspi->tx) {
+		const u16 *tx = dspi->tx;
+		data = *tx++;
+		dspi->tx = tx;
+	}
 	return data;
 }
 
@@ -237,55 +205,67 @@
 	iowrite32(v, addr);
 }
 
-static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
-{
-	set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
-}
-
-static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
-{
-	clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
-}
-
-static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
-{
-	struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
-
-	if (enable)
-		set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
-	else
-		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
-}
-
 /*
  * Interface to control the chip select signal
  */
 static void davinci_spi_chipselect(struct spi_device *spi, int value)
 {
-	struct davinci_spi *davinci_spi;
+	struct davinci_spi *dspi;
 	struct davinci_spi_platform_data *pdata;
-	u32 data1_reg_val = 0;
+	u8 chip_sel = spi->chip_select;
+	u16 spidat1 = CS_DEFAULT;
+	bool gpio_chipsel = false;
 
-	davinci_spi = spi_master_get_devdata(spi->master);
-	pdata = davinci_spi->pdata;
+	dspi = spi_master_get_devdata(spi->master);
+	pdata = dspi->pdata;
+
+	if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
+				pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
+		gpio_chipsel = true;
 
 	/*
 	 * Board specific chip select logic decides the polarity and cs
 	 * line for the controller
 	 */
-	if (value == BITBANG_CS_INACTIVE) {
-		set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT);
+	if (gpio_chipsel) {
+		if (value == BITBANG_CS_ACTIVE)
+			gpio_set_value(pdata->chip_sel[chip_sel], 0);
+		else
+			gpio_set_value(pdata->chip_sel[chip_sel], 1);
+	} else {
+		if (value == BITBANG_CS_ACTIVE) {
+			spidat1 |= SPIDAT1_CSHOLD_MASK;
+			spidat1 &= ~(0x1 << chip_sel);
+		}
 
-		data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
-		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
-
-		while ((ioread32(davinci_spi->base + SPIBUF)
-					& SPIBUF_RXEMPTY_MASK) == 0)
-			cpu_relax();
+		iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
 	}
 }
 
 /**
+ * davinci_spi_get_prescale - Calculates the correct prescale value
+ * @maxspeed_hz: the maximum rate the SPI clock can run at
+ *
+ * This function calculates the prescale value that generates a clock rate
+ * less than or equal to the specified maximum.
+ *
+ * Returns: calculated prescale - 1 for easy programming into SPI registers
+ * or negative error number if valid prescalar cannot be updated.
+ */
+static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
+							u32 max_speed_hz)
+{
+	int ret;
+
+	ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
+
+	if (ret < 3 || ret > 256)
+		return -EINVAL;
+
+	return ret - 1;
+}
+
+/**
  * davinci_spi_setup_transfer - This functions will determine transfer method
  * @spi: spi device on which data transfer to be done
  * @t: spi transfer in which transfer info is filled
@@ -298,13 +278,15 @@
 		struct spi_transfer *t)
 {
 
-	struct davinci_spi *davinci_spi;
-	struct davinci_spi_platform_data *pdata;
+	struct davinci_spi *dspi;
+	struct davinci_spi_config *spicfg;
 	u8 bits_per_word = 0;
-	u32 hz = 0, prescale = 0, clkspeed;
+	u32 hz = 0, spifmt = 0, prescale = 0;
 
-	davinci_spi = spi_master_get_devdata(spi->master);
-	pdata = davinci_spi->pdata;
+	dspi = spi_master_get_devdata(spi->master);
+	spicfg = (struct davinci_spi_config *)spi->controller_data;
+	if (!spicfg)
+		spicfg = &davinci_spi_default_cfg;
 
 	if (t) {
 		bits_per_word = t->bits_per_word;
@@ -320,186 +302,35 @@
 	 * 8bit, 16bit or 32bit transfer
 	 */
 	if (bits_per_word <= 8 && bits_per_word >= 2) {
-		davinci_spi->get_rx = davinci_spi_rx_buf_u8;
-		davinci_spi->get_tx = davinci_spi_tx_buf_u8;
-		davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
+		dspi->get_rx = davinci_spi_rx_buf_u8;
+		dspi->get_tx = davinci_spi_tx_buf_u8;
+		dspi->bytes_per_word[spi->chip_select] = 1;
 	} else if (bits_per_word <= 16 && bits_per_word >= 2) {
-		davinci_spi->get_rx = davinci_spi_rx_buf_u16;
-		davinci_spi->get_tx = davinci_spi_tx_buf_u16;
-		davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
+		dspi->get_rx = davinci_spi_rx_buf_u16;
+		dspi->get_tx = davinci_spi_tx_buf_u16;
+		dspi->bytes_per_word[spi->chip_select] = 2;
 	} else
 		return -EINVAL;
 
 	if (!hz)
 		hz = spi->max_speed_hz;
 
-	clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
-			spi->chip_select);
-	set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
-			spi->chip_select);
+	/* Set up SPIFMTn register, unique to this chipselect. */
 
-	clkspeed = clk_get_rate(davinci_spi->clk);
-	if (hz > clkspeed / 2)
-		prescale = 1 << 8;
-	if (hz < clkspeed / 256)
-		prescale = 255 << 8;
-	if (!prescale)
-		prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00;
+	prescale = davinci_spi_get_prescale(dspi, hz);
+	if (prescale < 0)
+		return prescale;
 
-	clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
-	set_fmt_bits(davinci_spi->base, prescale, spi->chip_select);
+	spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
 
-	return 0;
-}
-
-static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
-{
-	struct spi_device *spi = (struct spi_device *)data;
-	struct davinci_spi *davinci_spi;
-	struct davinci_spi_dma *davinci_spi_dma;
-	struct davinci_spi_platform_data *pdata;
-
-	davinci_spi = spi_master_get_devdata(spi->master);
-	davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
-	pdata = davinci_spi->pdata;
-
-	if (ch_status == DMA_COMPLETE)
-		edma_stop(davinci_spi_dma->dma_rx_channel);
-	else
-		edma_clean_channel(davinci_spi_dma->dma_rx_channel);
-
-	complete(&davinci_spi_dma->dma_rx_completion);
-	/* We must disable the DMA RX request */
-	davinci_spi_set_dma_req(spi, 0);
-}
-
-static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
-{
-	struct spi_device *spi = (struct spi_device *)data;
-	struct davinci_spi *davinci_spi;
-	struct davinci_spi_dma *davinci_spi_dma;
-	struct davinci_spi_platform_data *pdata;
-
-	davinci_spi = spi_master_get_devdata(spi->master);
-	davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
-	pdata = davinci_spi->pdata;
-
-	if (ch_status == DMA_COMPLETE)
-		edma_stop(davinci_spi_dma->dma_tx_channel);
-	else
-		edma_clean_channel(davinci_spi_dma->dma_tx_channel);
-
-	complete(&davinci_spi_dma->dma_tx_completion);
-	/* We must disable the DMA TX request */
-	davinci_spi_set_dma_req(spi, 0);
-}
-
-static int davinci_spi_request_dma(struct spi_device *spi)
-{
-	struct davinci_spi *davinci_spi;
-	struct davinci_spi_dma *davinci_spi_dma;
-	struct davinci_spi_platform_data *pdata;
-	struct device *sdev;
-	int r;
-
-	davinci_spi = spi_master_get_devdata(spi->master);
-	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
-	pdata = davinci_spi->pdata;
-	sdev = davinci_spi->bitbang.master->dev.parent;
-
-	r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
-				davinci_spi_dma_rx_callback, spi,
-				davinci_spi_dma->eventq);
-	if (r < 0) {
-		dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
-		return -EAGAIN;
-	}
-	davinci_spi_dma->dma_rx_channel = r;
-	r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
-				davinci_spi_dma_tx_callback, spi,
-				davinci_spi_dma->eventq);
-	if (r < 0) {
-		edma_free_channel(davinci_spi_dma->dma_rx_channel);
-		davinci_spi_dma->dma_rx_channel = -1;
-		dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
-		return -EAGAIN;
-	}
-	davinci_spi_dma->dma_tx_channel = r;
-
-	return 0;
-}
-
-/**
- * davinci_spi_setup - This functions will set default transfer method
- * @spi: spi device on which data transfer to be done
- *
- * This functions sets the default transfer method.
- */
-
-static int davinci_spi_setup(struct spi_device *spi)
-{
-	int retval;
-	struct davinci_spi *davinci_spi;
-	struct davinci_spi_dma *davinci_spi_dma;
-	struct device *sdev;
-
-	davinci_spi = spi_master_get_devdata(spi->master);
-	sdev = davinci_spi->bitbang.master->dev.parent;
-
-	/* if bits per word length is zero then set it default 8 */
-	if (!spi->bits_per_word)
-		spi->bits_per_word = 8;
-
-	davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
-
-	if (use_dma && davinci_spi->dma_channels) {
-		davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
-
-		if ((davinci_spi_dma->dma_rx_channel == -1)
-				|| (davinci_spi_dma->dma_tx_channel == -1)) {
-			retval = davinci_spi_request_dma(spi);
-			if (retval < 0)
-				return retval;
-		}
-	}
-
-	/*
-	 * SPI in DaVinci and DA8xx operate between
-	 * 600 KHz and 50 MHz
-	 */
-	if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
-		dev_dbg(sdev, "Operating frequency is not in acceptable "
-				"range\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Set up SPIFMTn register, unique to this chipselect.
-	 *
-	 * NOTE: we could do all of these with one write.  Also, some
-	 * of the "version 2" features are found in chips that don't
-	 * support all of them...
-	 */
 	if (spi->mode & SPI_LSB_FIRST)
-		set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
-				spi->chip_select);
-	else
-		clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
-				spi->chip_select);
+		spifmt |= SPIFMT_SHIFTDIR_MASK;
 
 	if (spi->mode & SPI_CPOL)
-		set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
-				spi->chip_select);
-	else
-		clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
-				spi->chip_select);
+		spifmt |= SPIFMT_POLARITY_MASK;
 
 	if (!(spi->mode & SPI_CPHA))
-		set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
-				spi->chip_select);
-	else
-		clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
-				spi->chip_select);
+		spifmt |= SPIFMT_PHASE_MASK;
 
 	/*
 	 * Version 1 hardware supports two basic SPI modes:
@@ -514,111 +345,84 @@
 	 *  - 4 pin with enable is (SPI_READY | SPI_NO_CS)
 	 */
 
-	if (davinci_spi->version == SPI_VERSION_2) {
-		clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
-				spi->chip_select);
-		set_fmt_bits(davinci_spi->base,
-				(davinci_spi->pdata->wdelay
-						<< SPIFMT_WDELAY_SHIFT)
-					& SPIFMT_WDELAY_MASK,
-				spi->chip_select);
+	if (dspi->version == SPI_VERSION_2) {
 
-		if (davinci_spi->pdata->odd_parity)
-			set_fmt_bits(davinci_spi->base,
-					SPIFMT_ODD_PARITY_MASK,
-					spi->chip_select);
-		else
-			clear_fmt_bits(davinci_spi->base,
-					SPIFMT_ODD_PARITY_MASK,
-					spi->chip_select);
+		u32 delay = 0;
 
-		if (davinci_spi->pdata->parity_enable)
-			set_fmt_bits(davinci_spi->base,
-					SPIFMT_PARITYENA_MASK,
-					spi->chip_select);
-		else
-			clear_fmt_bits(davinci_spi->base,
-					SPIFMT_PARITYENA_MASK,
-					spi->chip_select);
+		spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
+							& SPIFMT_WDELAY_MASK);
 
-		if (davinci_spi->pdata->wait_enable)
-			set_fmt_bits(davinci_spi->base,
-					SPIFMT_WAITENA_MASK,
-					spi->chip_select);
-		else
-			clear_fmt_bits(davinci_spi->base,
-					SPIFMT_WAITENA_MASK,
-					spi->chip_select);
+		if (spicfg->odd_parity)
+			spifmt |= SPIFMT_ODD_PARITY_MASK;
 
-		if (davinci_spi->pdata->timer_disable)
-			set_fmt_bits(davinci_spi->base,
-					SPIFMT_DISTIMER_MASK,
-					spi->chip_select);
-		else
-			clear_fmt_bits(davinci_spi->base,
-					SPIFMT_DISTIMER_MASK,
-					spi->chip_select);
-	}
+		if (spicfg->parity_enable)
+			spifmt |= SPIFMT_PARITYENA_MASK;
 
-	retval = davinci_spi_setup_transfer(spi, NULL);
-
-	return retval;
-}
-
-static void davinci_spi_cleanup(struct spi_device *spi)
-{
-	struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
-	struct davinci_spi_dma *davinci_spi_dma;
-
-	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
-
-	if (use_dma && davinci_spi->dma_channels) {
-		davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
-
-		if ((davinci_spi_dma->dma_rx_channel != -1)
-				&& (davinci_spi_dma->dma_tx_channel != -1)) {
-			edma_free_channel(davinci_spi_dma->dma_tx_channel);
-			edma_free_channel(davinci_spi_dma->dma_rx_channel);
+		if (spicfg->timer_disable) {
+			spifmt |= SPIFMT_DISTIMER_MASK;
+		} else {
+			delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
+						& SPIDELAY_C2TDELAY_MASK;
+			delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
+						& SPIDELAY_T2CDELAY_MASK;
 		}
+
+		if (spi->mode & SPI_READY) {
+			spifmt |= SPIFMT_WAITENA_MASK;
+			delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
+						& SPIDELAY_T2EDELAY_MASK;
+			delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
+						& SPIDELAY_C2EDELAY_MASK;
+		}
+
+		iowrite32(delay, dspi->base + SPIDELAY);
 	}
-}
 
-static int davinci_spi_bufs_prep(struct spi_device *spi,
-				 struct davinci_spi *davinci_spi)
-{
-	int op_mode = 0;
-
-	/*
-	 * REVISIT  unless devices disagree about SPI_LOOP or
-	 * SPI_READY (SPI_NO_CS only allows one device!), this
-	 * should not need to be done before each message...
-	 * optimize for both flags staying cleared.
-	 */
-
-	op_mode = SPIPC0_DIFUN_MASK
-		| SPIPC0_DOFUN_MASK
-		| SPIPC0_CLKFUN_MASK;
-	if (!(spi->mode & SPI_NO_CS))
-		op_mode |= 1 << spi->chip_select;
-	if (spi->mode & SPI_READY)
-		op_mode |= SPIPC0_SPIENA_MASK;
-
-	iowrite32(op_mode, davinci_spi->base + SPIPC0);
-
-	if (spi->mode & SPI_LOOP)
-		set_io_bits(davinci_spi->base + SPIGCR1,
-				SPIGCR1_LOOPBACK_MASK);
-	else
-		clear_io_bits(davinci_spi->base + SPIGCR1,
-				SPIGCR1_LOOPBACK_MASK);
+	iowrite32(spifmt, dspi->base + SPIFMT0);
 
 	return 0;
 }
 
-static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
-				   int int_status)
+/**
+ * davinci_spi_setup - This functions will set default transfer method
+ * @spi: spi device on which data transfer to be done
+ *
+ * This functions sets the default transfer method.
+ */
+static int davinci_spi_setup(struct spi_device *spi)
 {
-	struct device *sdev = davinci_spi->bitbang.master->dev.parent;
+	int retval = 0;
+	struct davinci_spi *dspi;
+	struct davinci_spi_platform_data *pdata;
+
+	dspi = spi_master_get_devdata(spi->master);
+	pdata = dspi->pdata;
+
+	/* if bits per word length is zero then set it default 8 */
+	if (!spi->bits_per_word)
+		spi->bits_per_word = 8;
+
+	if (!(spi->mode & SPI_NO_CS)) {
+		if ((pdata->chip_sel == NULL) ||
+		    (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
+			set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
+
+	}
+
+	if (spi->mode & SPI_READY)
+		set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
+
+	if (spi->mode & SPI_LOOP)
+		set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+	else
+		clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+
+	return retval;
+}
+
+static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
+{
+	struct device *sdev = dspi->bitbang.master->dev.parent;
 
 	if (int_status & SPIFLG_TIMEOUT_MASK) {
 		dev_dbg(sdev, "SPI Time-out Error\n");
@@ -633,7 +437,7 @@
 		return -EIO;
 	}
 
-	if (davinci_spi->version == SPI_VERSION_2) {
+	if (dspi->version == SPI_VERSION_2) {
 		if (int_status & SPIFLG_DLEN_ERR_MASK) {
 			dev_dbg(sdev, "SPI Data Length Error\n");
 			return -EIO;
@@ -646,10 +450,6 @@
 			dev_dbg(sdev, "SPI Data Overrun error\n");
 			return -EIO;
 		}
-		if (int_status & SPIFLG_TX_INTR_MASK) {
-			dev_dbg(sdev, "SPI TX intr bit set\n");
-			return -EIO;
-		}
 		if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
 			dev_dbg(sdev, "SPI Buffer Init Active\n");
 			return -EBUSY;
@@ -660,6 +460,61 @@
 }
 
 /**
+ * davinci_spi_process_events - check for and handle any SPI controller events
+ * @dspi: the controller data
+ *
+ * This function will check the SPIFLG register and handle any events that are
+ * detected there
+ */
+static int davinci_spi_process_events(struct davinci_spi *dspi)
+{
+	u32 buf, status, errors = 0, spidat1;
+
+	buf = ioread32(dspi->base + SPIBUF);
+
+	if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
+		dspi->get_rx(buf & 0xFFFF, dspi);
+		dspi->rcount--;
+	}
+
+	status = ioread32(dspi->base + SPIFLG);
+
+	if (unlikely(status & SPIFLG_ERROR_MASK)) {
+		errors = status & SPIFLG_ERROR_MASK;
+		goto out;
+	}
+
+	if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
+		spidat1 = ioread32(dspi->base + SPIDAT1);
+		dspi->wcount--;
+		spidat1 &= ~0xFFFF;
+		spidat1 |= 0xFFFF & dspi->get_tx(dspi);
+		iowrite32(spidat1, dspi->base + SPIDAT1);
+	}
+
+out:
+	return errors;
+}
+
+static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
+{
+	struct davinci_spi *dspi = data;
+	struct davinci_spi_dma *dma = &dspi->dma;
+
+	edma_stop(lch);
+
+	if (status == DMA_COMPLETE) {
+		if (lch == dma->rx_channel)
+			dspi->rcount = 0;
+		if (lch == dma->tx_channel)
+			dspi->wcount = 0;
+	}
+
+	if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE))
+		complete(&dspi->done);
+}
+
+/**
  * davinci_spi_bufs - functions which will handle transfer data
  * @spi: spi device on which data transfer to be done
  * @t: spi transfer in which transfer info is filled
@@ -668,358 +523,276 @@
  * of SPI controller and then wait until the completion will be marked
  * by the IRQ Handler.
  */
-static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
+static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
 {
-	struct davinci_spi *davinci_spi;
-	int int_status, count, ret;
-	u8 conv, tmp;
-	u32 tx_data, data1_reg_val;
-	u32 buf_val, flg_val;
+	struct davinci_spi *dspi;
+	int data_type, ret;
+	u32 tx_data, spidat1;
+	u32 errors = 0;
+	struct davinci_spi_config *spicfg;
 	struct davinci_spi_platform_data *pdata;
-
-	davinci_spi = spi_master_get_devdata(spi->master);
-	pdata = davinci_spi->pdata;
-
-	davinci_spi->tx = t->tx_buf;
-	davinci_spi->rx = t->rx_buf;
-
-	/* convert len to words based on bits_per_word */
-	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
-	davinci_spi->count = t->len / conv;
-
-	INIT_COMPLETION(davinci_spi->done);
-
-	ret = davinci_spi_bufs_prep(spi, davinci_spi);
-	if (ret)
-		return ret;
-
-	/* Enable SPI */
-	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
-
-	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
-			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
-			davinci_spi->base + SPIDELAY);
-
-	count = davinci_spi->count;
-	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
-	tmp = ~(0x1 << spi->chip_select);
-
-	clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
-
-	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
-
-	while ((ioread32(davinci_spi->base + SPIBUF)
-				& SPIBUF_RXEMPTY_MASK) == 0)
-		cpu_relax();
-
-	/* Determine the command to execute READ or WRITE */
-	if (t->tx_buf) {
-		clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
-
-		while (1) {
-			tx_data = davinci_spi->get_tx(davinci_spi);
-
-			data1_reg_val &= ~(0xFFFF);
-			data1_reg_val |= (0xFFFF & tx_data);
-
-			buf_val = ioread32(davinci_spi->base + SPIBUF);
-			if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
-				iowrite32(data1_reg_val,
-						davinci_spi->base + SPIDAT1);
-
-				count--;
-			}
-			while (ioread32(davinci_spi->base + SPIBUF)
-					& SPIBUF_RXEMPTY_MASK)
-				cpu_relax();
-
-			/* getting the returned byte */
-			if (t->rx_buf) {
-				buf_val = ioread32(davinci_spi->base + SPIBUF);
-				davinci_spi->get_rx(buf_val, davinci_spi);
-			}
-			if (count <= 0)
-				break;
-		}
-	} else {
-		if (pdata->poll_mode) {
-			while (1) {
-				/* keeps the serial clock going */
-				if ((ioread32(davinci_spi->base + SPIBUF)
-						& SPIBUF_TXFULL_MASK) == 0)
-					iowrite32(data1_reg_val,
-						davinci_spi->base + SPIDAT1);
-
-				while (ioread32(davinci_spi->base + SPIBUF) &
-						SPIBUF_RXEMPTY_MASK)
-					cpu_relax();
-
-				flg_val = ioread32(davinci_spi->base + SPIFLG);
-				buf_val = ioread32(davinci_spi->base + SPIBUF);
-
-				davinci_spi->get_rx(buf_val, davinci_spi);
-
-				count--;
-				if (count <= 0)
-					break;
-			}
-		} else {	/* Receive in Interrupt mode */
-			int i;
-
-			for (i = 0; i < davinci_spi->count; i++) {
-				set_io_bits(davinci_spi->base + SPIINT,
-						SPIINT_BITERR_INTR
-						| SPIINT_OVRRUN_INTR
-						| SPIINT_RX_INTR);
-
-				iowrite32(data1_reg_val,
-						davinci_spi->base + SPIDAT1);
-
-				while (ioread32(davinci_spi->base + SPIINT) &
-						SPIINT_RX_INTR)
-					cpu_relax();
-			}
-			iowrite32((data1_reg_val & 0x0ffcffff),
-					davinci_spi->base + SPIDAT1);
-		}
-	}
-
-	/*
-	 * Check for bit error, desync error,parity error,timeout error and
-	 * receive overflow errors
-	 */
-	int_status = ioread32(davinci_spi->base + SPIFLG);
-
-	ret = davinci_spi_check_error(davinci_spi, int_status);
-	if (ret != 0)
-		return ret;
-
-	/* SPI Framework maintains the count only in bytes so convert back */
-	davinci_spi->count *= conv;
-
-	return t->len;
-}
-
-#define DAVINCI_DMA_DATA_TYPE_S8	0x01
-#define DAVINCI_DMA_DATA_TYPE_S16	0x02
-#define DAVINCI_DMA_DATA_TYPE_S32	0x04
-
-static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
-{
-	struct davinci_spi *davinci_spi;
-	int int_status = 0;
-	int count, temp_count;
-	u8 conv = 1;
-	u8 tmp;
-	u32 data1_reg_val;
-	struct davinci_spi_dma *davinci_spi_dma;
-	int word_len, data_type, ret;
-	unsigned long tx_reg, rx_reg;
-	struct davinci_spi_platform_data *pdata;
+	unsigned uninitialized_var(rx_buf_count);
 	struct device *sdev;
 
-	davinci_spi = spi_master_get_devdata(spi->master);
-	pdata = davinci_spi->pdata;
-	sdev = davinci_spi->bitbang.master->dev.parent;
-
-	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
-
-	tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
-	rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
-
-	davinci_spi->tx = t->tx_buf;
-	davinci_spi->rx = t->rx_buf;
+	dspi = spi_master_get_devdata(spi->master);
+	pdata = dspi->pdata;
+	spicfg = (struct davinci_spi_config *)spi->controller_data;
+	if (!spicfg)
+		spicfg = &davinci_spi_default_cfg;
+	sdev = dspi->bitbang.master->dev.parent;
 
 	/* convert len to words based on bits_per_word */
-	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
-	davinci_spi->count = t->len / conv;
+	data_type = dspi->bytes_per_word[spi->chip_select];
 
-	INIT_COMPLETION(davinci_spi->done);
+	dspi->tx = t->tx_buf;
+	dspi->rx = t->rx_buf;
+	dspi->wcount = t->len / data_type;
+	dspi->rcount = dspi->wcount;
 
-	init_completion(&davinci_spi_dma->dma_rx_completion);
-	init_completion(&davinci_spi_dma->dma_tx_completion);
+	spidat1 = ioread32(dspi->base + SPIDAT1);
 
-	word_len = conv * 8;
+	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
 
-	if (word_len <= 8)
-		data_type = DAVINCI_DMA_DATA_TYPE_S8;
-	else if (word_len <= 16)
-		data_type = DAVINCI_DMA_DATA_TYPE_S16;
-	else if (word_len <= 32)
-		data_type = DAVINCI_DMA_DATA_TYPE_S32;
-	else
-		return -EINVAL;
+	INIT_COMPLETION(dspi->done);
 
-	ret = davinci_spi_bufs_prep(spi, davinci_spi);
-	if (ret)
-		return ret;
+	if (spicfg->io_type == SPI_IO_TYPE_INTR)
+		set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
 
-	/* Put delay val if required */
-	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
-			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
-			davinci_spi->base + SPIDELAY);
-
-	count = davinci_spi->count;	/* the number of elements */
-	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
-
-	/* CS default = 0xFF */
-	tmp = ~(0x1 << spi->chip_select);
-
-	clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
-
-	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
-
-	/* disable all interrupts for dma transfers */
-	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
-	/* Disable SPI to write configuration bits in SPIDAT */
-	clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
-	iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
-	/* Enable SPI */
-	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
-
-	while ((ioread32(davinci_spi->base + SPIBUF)
-				& SPIBUF_RXEMPTY_MASK) == 0)
-		cpu_relax();
-
-
-	if (t->tx_buf) {
-		t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
-				DMA_TO_DEVICE);
-		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
-			dev_dbg(sdev, "Unable to DMA map a %d bytes"
-				" TX buffer\n", count);
-			return -ENOMEM;
-		}
-		temp_count = count;
+	if (spicfg->io_type != SPI_IO_TYPE_DMA) {
+		/* start the transfer */
+		dspi->wcount--;
+		tx_data = dspi->get_tx(dspi);
+		spidat1 &= 0xFFFF0000;
+		spidat1 |= tx_data & 0xFFFF;
+		iowrite32(spidat1, dspi->base + SPIDAT1);
 	} else {
-		/* We need TX clocking for RX transaction */
-		t->tx_dma = dma_map_single(&spi->dev,
-				(void *)davinci_spi->tmp_buf, count + 1,
-				DMA_TO_DEVICE);
-		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
-			dev_dbg(sdev, "Unable to DMA map a %d bytes"
-				" TX tmp buffer\n", count);
-			return -ENOMEM;
+		struct davinci_spi_dma *dma;
+		unsigned long tx_reg, rx_reg;
+		struct edmacc_param param;
+		void *rx_buf;
+
+		dma = &dspi->dma;
+
+		tx_reg = (unsigned long)dspi->pbase + SPIDAT1;
+		rx_reg = (unsigned long)dspi->pbase + SPIBUF;
+
+		/*
+		 * Transmit DMA setup
+		 *
+		 * If there is transmit data, map the transmit buffer, set it
+		 * as the source of data and set the source B index to data
+		 * size. If there is no transmit data, set the transmit register
+		 * as the source of data, and set the source B index to zero.
+		 *
+		 * The destination is always the transmit register itself. And
+		 * the destination never increments.
+		 */
+
+		if (t->tx_buf) {
+			t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
+						dspi->wcount, DMA_TO_DEVICE);
+			if (dma_mapping_error(&spi->dev, t->tx_dma)) {
+				dev_dbg(sdev, "Unable to DMA map %d bytes"
+						"TX buffer\n", dspi->wcount);
+				return -ENOMEM;
+			}
 		}
-		temp_count = count + 1;
-	}
 
-	edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
-					data_type, temp_count, 1, 0, ASYNC);
-	edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
-	edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
-	edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
-	edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
+		param.opt = TCINTEN | EDMA_TCC(dma->tx_channel);
+		param.src = t->tx_buf ? t->tx_dma : tx_reg;
+		param.a_b_cnt = dspi->wcount << 16 | data_type;
+		param.dst = tx_reg;
+		param.src_dst_bidx = t->tx_buf ? data_type : 0;
+		param.link_bcntrld = 0xffff;
+		param.src_dst_cidx = 0;
+		param.ccnt = 1;
+		edma_write_slot(dma->tx_channel, &param);
+		edma_link(dma->tx_channel, dma->dummy_param_slot);
 
-	if (t->rx_buf) {
-		/* initiate transaction */
-		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
+		/*
+		 * Receive DMA setup
+		 *
+		 * If there is receive buffer, use it to receive data. If there
+		 * is none provided, use a temporary receive buffer. Set the
+		 * destination B index to 0 so effectively only one byte is used
+		 * in the temporary buffer (address does not increment).
+		 *
+		 * The source of receive data is the receive data register. The
+		 * source address never increments.
+		 */
 
-		t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
-				DMA_FROM_DEVICE);
+		if (t->rx_buf) {
+			rx_buf = t->rx_buf;
+			rx_buf_count = dspi->rcount;
+		} else {
+			rx_buf = dspi->rx_tmp_buf;
+			rx_buf_count = sizeof(dspi->rx_tmp_buf);
+		}
+
+		t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
+							DMA_FROM_DEVICE);
 		if (dma_mapping_error(&spi->dev, t->rx_dma)) {
 			dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
-					count);
-			if (t->tx_buf != NULL)
-				dma_unmap_single(NULL, t->tx_dma,
-						 count, DMA_TO_DEVICE);
+								rx_buf_count);
+			if (t->tx_buf)
+				dma_unmap_single(NULL, t->tx_dma, dspi->wcount,
+								DMA_TO_DEVICE);
 			return -ENOMEM;
 		}
-		edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
-				data_type, count, 1, 0, ASYNC);
-		edma_set_src(davinci_spi_dma->dma_rx_channel,
-				rx_reg, INCR, W8BIT);
-		edma_set_dest(davinci_spi_dma->dma_rx_channel,
-				t->rx_dma, INCR, W8BIT);
-		edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
-		edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
-				data_type, 0);
+
+		param.opt = TCINTEN | EDMA_TCC(dma->rx_channel);
+		param.src = rx_reg;
+		param.a_b_cnt = dspi->rcount << 16 | data_type;
+		param.dst = t->rx_dma;
+		param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
+		param.link_bcntrld = 0xffff;
+		param.src_dst_cidx = 0;
+		param.ccnt = 1;
+		edma_write_slot(dma->rx_channel, &param);
+
+		if (pdata->cshold_bug)
+			iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
+
+		edma_start(dma->rx_channel);
+		edma_start(dma->tx_channel);
+		set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
 	}
 
-	if ((t->tx_buf) || (t->rx_buf))
-		edma_start(davinci_spi_dma->dma_tx_channel);
+	/* Wait for the transfer to complete */
+	if (spicfg->io_type != SPI_IO_TYPE_POLL) {
+		wait_for_completion_interruptible(&(dspi->done));
+	} else {
+		while (dspi->rcount > 0 || dspi->wcount > 0) {
+			errors = davinci_spi_process_events(dspi);
+			if (errors)
+				break;
+			cpu_relax();
+		}
+	}
 
-	if (t->rx_buf)
-		edma_start(davinci_spi_dma->dma_rx_channel);
+	clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
+	if (spicfg->io_type == SPI_IO_TYPE_DMA) {
 
-	if ((t->rx_buf) || (t->tx_buf))
-		davinci_spi_set_dma_req(spi, 1);
+		if (t->tx_buf)
+			dma_unmap_single(NULL, t->tx_dma, dspi->wcount,
+								DMA_TO_DEVICE);
 
-	if (t->tx_buf)
-		wait_for_completion_interruptible(
-				&davinci_spi_dma->dma_tx_completion);
+		dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
+							DMA_FROM_DEVICE);
 
-	if (t->rx_buf)
-		wait_for_completion_interruptible(
-				&davinci_spi_dma->dma_rx_completion);
+		clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+	}
 
-	dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
-
-	if (t->rx_buf)
-		dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
+	clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
 
 	/*
 	 * Check for bit error, desync error,parity error,timeout error and
 	 * receive overflow errors
 	 */
-	int_status = ioread32(davinci_spi->base + SPIFLG);
-
-	ret = davinci_spi_check_error(davinci_spi, int_status);
-	if (ret != 0)
+	if (errors) {
+		ret = davinci_spi_check_error(dspi, errors);
+		WARN(!ret, "%s: error reported but no error found!\n",
+							dev_name(&spi->dev));
 		return ret;
+	}
 
-	/* SPI Framework maintains the count only in bytes so convert back */
-	davinci_spi->count *= conv;
+	if (dspi->rcount != 0 || dspi->wcount != 0) {
+		dev_err(sdev, "SPI data transfer error\n");
+		return -EIO;
+	}
 
 	return t->len;
 }
 
 /**
- * davinci_spi_irq - IRQ handler for DaVinci SPI
+ * davinci_spi_irq - Interrupt handler for SPI Master Controller
  * @irq: IRQ number for this SPI Master
  * @context_data: structure for SPI Master controller davinci_spi
+ *
+ * ISR will determine that interrupt arrives either for READ or WRITE command.
+ * According to command it will do the appropriate action. It will check
+ * transfer length and if it is not zero then dispatch transfer command again.
+ * If transfer length is zero then it will indicate the COMPLETION so that
+ * davinci_spi_bufs function can go ahead.
  */
-static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
+static irqreturn_t davinci_spi_irq(s32 irq, void *data)
 {
-	struct davinci_spi *davinci_spi = context_data;
-	u32 int_status, rx_data = 0;
-	irqreturn_t ret = IRQ_NONE;
+	struct davinci_spi *dspi = data;
+	int status;
 
-	int_status = ioread32(davinci_spi->base + SPIFLG);
+	status = davinci_spi_process_events(dspi);
+	if (unlikely(status != 0))
+		clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
 
-	while ((int_status & SPIFLG_RX_INTR_MASK)) {
-		if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
-			ret = IRQ_HANDLED;
+	if ((!dspi->rcount && !dspi->wcount) || status)
+		complete(&dspi->done);
 
-			rx_data = ioread32(davinci_spi->base + SPIBUF);
-			davinci_spi->get_rx(rx_data, davinci_spi);
+	return IRQ_HANDLED;
+}
 
-			/* Disable Receive Interrupt */
-			iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
-					davinci_spi->base + SPIINT);
-		} else
-			(void)davinci_spi_check_error(davinci_spi, int_status);
+static int davinci_spi_request_dma(struct davinci_spi *dspi)
+{
+	int r;
+	struct davinci_spi_dma *dma = &dspi->dma;
 
-		int_status = ioread32(davinci_spi->base + SPIFLG);
+	r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi,
+								dma->eventq);
+	if (r < 0) {
+		pr_err("Unable to request DMA channel for SPI RX\n");
+		r = -EAGAIN;
+		goto rx_dma_failed;
 	}
 
-	return ret;
+	r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi,
+								dma->eventq);
+	if (r < 0) {
+		pr_err("Unable to request DMA channel for SPI TX\n");
+		r = -EAGAIN;
+		goto tx_dma_failed;
+	}
+
+	r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
+	if (r < 0) {
+		pr_err("Unable to request SPI TX DMA param slot\n");
+		r = -EAGAIN;
+		goto param_failed;
+	}
+	dma->dummy_param_slot = r;
+	edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
+
+	return 0;
+param_failed:
+	edma_free_channel(dma->tx_channel);
+tx_dma_failed:
+	edma_free_channel(dma->rx_channel);
+rx_dma_failed:
+	return r;
 }
 
 /**
  * davinci_spi_probe - probe function for SPI Master Controller
  * @pdev: platform_device structure which contains plateform specific data
+ *
+ * According to Linux Device Model this function will be invoked by Linux
+ * with platform_device struct which contains the device specific info.
+ * This function will map the SPI controller's memory, register IRQ,
+ * Reset SPI controller and setting its registers to default value.
+ * It will invoke spi_bitbang_start to create work queue so that client driver
+ * can register transfer method to work queue.
  */
 static int davinci_spi_probe(struct platform_device *pdev)
 {
 	struct spi_master *master;
-	struct davinci_spi *davinci_spi;
+	struct davinci_spi *dspi;
 	struct davinci_spi_platform_data *pdata;
 	struct resource *r, *mem;
 	resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
 	resource_size_t	dma_tx_chan = SPI_NO_RESOURCE;
 	resource_size_t	dma_eventq = SPI_NO_RESOURCE;
 	int i = 0, ret = 0;
+	u32 spipc0;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
@@ -1035,8 +808,8 @@
 
 	dev_set_drvdata(&pdev->dev, master);
 
-	davinci_spi = spi_master_get_devdata(master);
-	if (davinci_spi == NULL) {
+	dspi = spi_master_get_devdata(master);
+	if (dspi == NULL) {
 		ret = -ENOENT;
 		goto free_master;
 	}
@@ -1047,164 +820,143 @@
 		goto free_master;
 	}
 
-	davinci_spi->pbase = r->start;
-	davinci_spi->region_size = resource_size(r);
-	davinci_spi->pdata = pdata;
+	dspi->pbase = r->start;
+	dspi->pdata = pdata;
 
-	mem = request_mem_region(r->start, davinci_spi->region_size,
-					pdev->name);
+	mem = request_mem_region(r->start, resource_size(r), pdev->name);
 	if (mem == NULL) {
 		ret = -EBUSY;
 		goto free_master;
 	}
 
-	davinci_spi->base = (struct davinci_spi_reg __iomem *)
-			ioremap(r->start, davinci_spi->region_size);
-	if (davinci_spi->base == NULL) {
+	dspi->base = ioremap(r->start, resource_size(r));
+	if (dspi->base == NULL) {
 		ret = -ENOMEM;
 		goto release_region;
 	}
 
-	davinci_spi->irq = platform_get_irq(pdev, 0);
-	if (davinci_spi->irq <= 0) {
+	dspi->irq = platform_get_irq(pdev, 0);
+	if (dspi->irq <= 0) {
 		ret = -EINVAL;
 		goto unmap_io;
 	}
 
-	ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
-			  dev_name(&pdev->dev), davinci_spi);
+	ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev),
+									dspi);
 	if (ret)
 		goto unmap_io;
 
-	/* Allocate tmp_buf for tx_buf */
-	davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
-	if (davinci_spi->tmp_buf == NULL) {
-		ret = -ENOMEM;
+	dspi->bitbang.master = spi_master_get(master);
+	if (dspi->bitbang.master == NULL) {
+		ret = -ENODEV;
 		goto irq_free;
 	}
 
-	davinci_spi->bitbang.master = spi_master_get(master);
-	if (davinci_spi->bitbang.master == NULL) {
-		ret = -ENODEV;
-		goto free_tmp_buf;
-	}
-
-	davinci_spi->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(davinci_spi->clk)) {
+	dspi->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dspi->clk)) {
 		ret = -ENODEV;
 		goto put_master;
 	}
-	clk_enable(davinci_spi->clk);
-
+	clk_enable(dspi->clk);
 
 	master->bus_num = pdev->id;
 	master->num_chipselect = pdata->num_chipselect;
 	master->setup = davinci_spi_setup;
-	master->cleanup = davinci_spi_cleanup;
 
-	davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
-	davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
+	dspi->bitbang.chipselect = davinci_spi_chipselect;
+	dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
 
-	davinci_spi->version = pdata->version;
-	use_dma = pdata->use_dma;
+	dspi->version = pdata->version;
 
-	davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
-	if (davinci_spi->version == SPI_VERSION_2)
-		davinci_spi->bitbang.flags |= SPI_READY;
+	dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
+	if (dspi->version == SPI_VERSION_2)
+		dspi->bitbang.flags |= SPI_READY;
 
-	if (use_dma) {
-			r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-			if (r)
-				dma_rx_chan = r->start;
-			r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-			if (r)
-				dma_tx_chan = r->start;
-			r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
-			if (r)
-				dma_eventq = r->start;
-	}
+	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (r)
+		dma_rx_chan = r->start;
+	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (r)
+		dma_tx_chan = r->start;
+	r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+	if (r)
+		dma_eventq = r->start;
 
-	if (!use_dma ||
-	    dma_rx_chan == SPI_NO_RESOURCE ||
-	    dma_tx_chan == SPI_NO_RESOURCE ||
-	    dma_eventq	== SPI_NO_RESOURCE) {
-		davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
-		use_dma = 0;
-	} else {
-		davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
-		davinci_spi->dma_channels = kzalloc(master->num_chipselect
-				* sizeof(struct davinci_spi_dma), GFP_KERNEL);
-		if (davinci_spi->dma_channels == NULL) {
-			ret = -ENOMEM;
+	dspi->bitbang.txrx_bufs = davinci_spi_bufs;
+	if (dma_rx_chan != SPI_NO_RESOURCE &&
+	    dma_tx_chan != SPI_NO_RESOURCE &&
+	    dma_eventq != SPI_NO_RESOURCE) {
+		dspi->dma.rx_channel = dma_rx_chan;
+		dspi->dma.tx_channel = dma_tx_chan;
+		dspi->dma.eventq = dma_eventq;
+
+		ret = davinci_spi_request_dma(dspi);
+		if (ret)
 			goto free_clk;
-		}
 
-		for (i = 0; i < master->num_chipselect; i++) {
-			davinci_spi->dma_channels[i].dma_rx_channel = -1;
-			davinci_spi->dma_channels[i].dma_rx_sync_dev =
-				dma_rx_chan;
-			davinci_spi->dma_channels[i].dma_tx_channel = -1;
-			davinci_spi->dma_channels[i].dma_tx_sync_dev =
-				dma_tx_chan;
-			davinci_spi->dma_channels[i].eventq = dma_eventq;
-		}
-		dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
-				"Using RX channel = %d , TX channel = %d and "
-				"event queue = %d", dma_rx_chan, dma_tx_chan,
+		dev_info(&pdev->dev, "DMA: supported\n");
+		dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
+				"event queue: %d\n", dma_rx_chan, dma_tx_chan,
 				dma_eventq);
 	}
 
-	davinci_spi->get_rx = davinci_spi_rx_buf_u8;
-	davinci_spi->get_tx = davinci_spi_tx_buf_u8;
+	dspi->get_rx = davinci_spi_rx_buf_u8;
+	dspi->get_tx = davinci_spi_tx_buf_u8;
 
-	init_completion(&davinci_spi->done);
+	init_completion(&dspi->done);
 
 	/* Reset In/OUT SPI module */
-	iowrite32(0, davinci_spi->base + SPIGCR0);
+	iowrite32(0, dspi->base + SPIGCR0);
 	udelay(100);
-	iowrite32(1, davinci_spi->base + SPIGCR0);
+	iowrite32(1, dspi->base + SPIGCR0);
 
-	/* Clock internal */
-	if (davinci_spi->pdata->clk_internal)
-		set_io_bits(davinci_spi->base + SPIGCR1,
-				SPIGCR1_CLKMOD_MASK);
+	/* Set up SPIPC0.  CS and ENA init is done in davinci_spi_setup */
+	spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
+	iowrite32(spipc0, dspi->base + SPIPC0);
+
+	/* initialize chip selects */
+	if (pdata->chip_sel) {
+		for (i = 0; i < pdata->num_chipselect; i++) {
+			if (pdata->chip_sel[i] != SPI_INTERN_CS)
+				gpio_direction_output(pdata->chip_sel[i], 1);
+		}
+	}
+
+	if (pdata->intr_line)
+		iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
 	else
-		clear_io_bits(davinci_spi->base + SPIGCR1,
-				SPIGCR1_CLKMOD_MASK);
+		iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
+
+	iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
 
 	/* master mode default */
-	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
+	set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
 
-	if (davinci_spi->pdata->intr_level)
-		iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
-	else
-		iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
-
-	ret = spi_bitbang_start(&davinci_spi->bitbang);
+	ret = spi_bitbang_start(&dspi->bitbang);
 	if (ret)
-		goto free_clk;
+		goto free_dma;
 
-	dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base);
-
-	if (!pdata->poll_mode)
-		dev_info(&pdev->dev, "Operating in interrupt mode"
-			" using IRQ %d\n", davinci_spi->irq);
+	dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
 
 	return ret;
 
+free_dma:
+	edma_free_channel(dspi->dma.tx_channel);
+	edma_free_channel(dspi->dma.rx_channel);
+	edma_free_slot(dspi->dma.dummy_param_slot);
 free_clk:
-	clk_disable(davinci_spi->clk);
-	clk_put(davinci_spi->clk);
+	clk_disable(dspi->clk);
+	clk_put(dspi->clk);
 put_master:
 	spi_master_put(master);
-free_tmp_buf:
-	kfree(davinci_spi->tmp_buf);
 irq_free:
-	free_irq(davinci_spi->irq, davinci_spi);
+	free_irq(dspi->irq, dspi);
 unmap_io:
-	iounmap(davinci_spi->base);
+	iounmap(dspi->base);
 release_region:
-	release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
+	release_mem_region(dspi->pbase, resource_size(r));
 free_master:
 	kfree(master);
 err:
@@ -1222,27 +974,31 @@
  */
 static int __exit davinci_spi_remove(struct platform_device *pdev)
 {
-	struct davinci_spi *davinci_spi;
+	struct davinci_spi *dspi;
 	struct spi_master *master;
+	struct resource *r;
 
 	master = dev_get_drvdata(&pdev->dev);
-	davinci_spi = spi_master_get_devdata(master);
+	dspi = spi_master_get_devdata(master);
 
-	spi_bitbang_stop(&davinci_spi->bitbang);
+	spi_bitbang_stop(&dspi->bitbang);
 
-	clk_disable(davinci_spi->clk);
-	clk_put(davinci_spi->clk);
+	clk_disable(dspi->clk);
+	clk_put(dspi->clk);
 	spi_master_put(master);
-	kfree(davinci_spi->tmp_buf);
-	free_irq(davinci_spi->irq, davinci_spi);
-	iounmap(davinci_spi->base);
-	release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
+	free_irq(dspi->irq, dspi);
+	iounmap(dspi->base);
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(dspi->pbase, resource_size(r));
 
 	return 0;
 }
 
 static struct platform_driver davinci_spi_driver = {
-	.driver.name = "spi_davinci",
+	.driver = {
+		.name = "spi_davinci",
+		.owner = THIS_MODULE,
+	},
 	.remove = __exit_p(davinci_spi_remove),
 };
 
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index 0838c79..22af77f 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -164,20 +164,23 @@
 
 static void wait_till_not_busy(struct dw_spi *dws)
 {
-	unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
+	unsigned long end = jiffies + 1 + usecs_to_jiffies(5000);
 
 	while (time_before(jiffies, end)) {
 		if (!(dw_readw(dws, sr) & SR_BUSY))
 			return;
+		cpu_relax();
 	}
 	dev_err(&dws->master->dev,
-		"DW SPI: Status keeps busy for 1000us after a read/write!\n");
+		"DW SPI: Status keeps busy for 5000us after a read/write!\n");
 }
 
 static void flush(struct dw_spi *dws)
 {
-	while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+	while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) {
 		dw_readw(dws, dr);
+		cpu_relax();
+	}
 
 	wait_till_not_busy(dws);
 }
@@ -285,8 +288,10 @@
  */
 static int map_dma_buffers(struct dw_spi *dws)
 {
-	if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
-		|| !dws->cur_chip->enable_dma)
+	if (!dws->cur_msg->is_dma_mapped
+		|| !dws->dma_inited
+		|| !dws->cur_chip->enable_dma
+		|| !dws->dma_ops)
 		return 0;
 
 	if (dws->cur_transfer->tx_dma)
@@ -338,7 +343,7 @@
 	tasklet_schedule(&dws->pump_transfers);
 }
 
-static void transfer_complete(struct dw_spi *dws)
+void dw_spi_xfer_done(struct dw_spi *dws)
 {
 	/* Update total byte transfered return count actual bytes read */
 	dws->cur_msg->actual_length += dws->len;
@@ -353,6 +358,7 @@
 	} else
 		tasklet_schedule(&dws->pump_transfers);
 }
+EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
 
 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
 {
@@ -384,7 +390,7 @@
 		if (dws->tx_end > dws->tx)
 			spi_umask_intr(dws, SPI_INT_TXEI);
 		else
-			transfer_complete(dws);
+			dw_spi_xfer_done(dws);
 	}
 
 	return IRQ_HANDLED;
@@ -419,11 +425,7 @@
 	 */
 	dws->read(dws);
 
-	transfer_complete(dws);
-}
-
-static void dma_transfer(struct dw_spi *dws, int cs_change)
-{
+	dw_spi_xfer_done(dws);
 }
 
 static void pump_transfers(unsigned long data)
@@ -592,7 +594,7 @@
 		spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
 		spi_chip_sel(dws, spi->chip_select);
 
-		/* Set the interrupt mask, for poll mode just diable all int */
+		/* Set the interrupt mask, for poll mode just disable all int */
 		spi_mask_intr(dws, 0xff);
 		if (imask)
 			spi_umask_intr(dws, imask);
@@ -605,7 +607,7 @@
 	}
 
 	if (dws->dma_mapped)
-		dma_transfer(dws, cs_change);
+		dws->dma_ops->dma_transfer(dws, cs_change);
 
 	if (chip->poll_mode)
 		poll_transfer(dws);
@@ -901,11 +903,17 @@
 	master->setup = dw_spi_setup;
 	master->transfer = dw_spi_transfer;
 
-	dws->dma_inited = 0;
-
 	/* Basic HW init */
 	spi_hw_init(dws);
 
+	if (dws->dma_ops && dws->dma_ops->dma_init) {
+		ret = dws->dma_ops->dma_init(dws);
+		if (ret) {
+			dev_warn(&master->dev, "DMA init failed\n");
+			dws->dma_inited = 0;
+		}
+	}
+
 	/* Initial and start queue */
 	ret = init_queue(dws);
 	if (ret) {
@@ -930,6 +938,8 @@
 
 err_queue_alloc:
 	destroy_queue(dws);
+	if (dws->dma_ops && dws->dma_ops->dma_exit)
+		dws->dma_ops->dma_exit(dws);
 err_diable_hw:
 	spi_enable_chip(dws, 0);
 	free_irq(dws->irq, dws);
@@ -938,7 +948,7 @@
 exit:
 	return ret;
 }
-EXPORT_SYMBOL(dw_spi_add_host);
+EXPORT_SYMBOL_GPL(dw_spi_add_host);
 
 void __devexit dw_spi_remove_host(struct dw_spi *dws)
 {
@@ -954,6 +964,8 @@
 		dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
 			"complete, message memory not freed\n");
 
+	if (dws->dma_ops && dws->dma_ops->dma_exit)
+		dws->dma_ops->dma_exit(dws);
 	spi_enable_chip(dws, 0);
 	/* Disable clk */
 	spi_set_clk(dws, 0);
@@ -962,7 +974,7 @@
 	/* Disconnect from the SPI framework */
 	spi_unregister_master(dws->master);
 }
-EXPORT_SYMBOL(dw_spi_remove_host);
+EXPORT_SYMBOL_GPL(dw_spi_remove_host);
 
 int dw_spi_suspend_host(struct dw_spi *dws)
 {
@@ -975,7 +987,7 @@
 	spi_set_clk(dws, 0);
 	return ret;
 }
-EXPORT_SYMBOL(dw_spi_suspend_host);
+EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
 
 int dw_spi_resume_host(struct dw_spi *dws)
 {
@@ -987,7 +999,7 @@
 		dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
 	return ret;
 }
-EXPORT_SYMBOL(dw_spi_resume_host);
+EXPORT_SYMBOL_GPL(dw_spi_resume_host);
 
 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/dw_spi_mid.c
new file mode 100644
index 0000000..c91c966
--- /dev/null
+++ b/drivers/spi/dw_spi_mid.c
@@ -0,0 +1,223 @@
+/*
+ * dw_spi_mid.c - special handling for DW core on Intel MID platform
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/dw_spi.h>
+
+#ifdef CONFIG_SPI_DW_MID_DMA
+#include <linux/intel_mid_dma.h>
+#include <linux/pci.h>
+
+struct mid_dma {
+	struct intel_mid_dma_slave	dmas_tx;
+	struct intel_mid_dma_slave	dmas_rx;
+};
+
+static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+	struct dw_spi *dws = param;
+
+	return dws->dmac && (&dws->dmac->dev == chan->device->dev);
+}
+
+static int mid_spi_dma_init(struct dw_spi *dws)
+{
+	struct mid_dma *dw_dma = dws->dma_priv;
+	struct intel_mid_dma_slave *rxs, *txs;
+	dma_cap_mask_t mask;
+
+	/*
+	 * Get pci device for DMA controller, currently it could only
+	 * be the DMA controller of either Moorestown or Medfield
+	 */
+	dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
+	if (!dws->dmac)
+		dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	/* 1. Init rx channel */
+	dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
+	if (!dws->rxchan)
+		goto err_exit;
+	rxs = &dw_dma->dmas_rx;
+	rxs->hs_mode = LNW_DMA_HW_HS;
+	rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
+	dws->rxchan->private = rxs;
+
+	/* 2. Init tx channel */
+	dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
+	if (!dws->txchan)
+		goto free_rxchan;
+	txs = &dw_dma->dmas_tx;
+	txs->hs_mode = LNW_DMA_HW_HS;
+	txs->cfg_mode = LNW_DMA_MEM_TO_PER;
+	dws->txchan->private = txs;
+
+	dws->dma_inited = 1;
+	return 0;
+
+free_rxchan:
+	dma_release_channel(dws->rxchan);
+err_exit:
+	return -1;
+
+}
+
+static void mid_spi_dma_exit(struct dw_spi *dws)
+{
+	dma_release_channel(dws->txchan);
+	dma_release_channel(dws->rxchan);
+}
+
+/*
+ * dws->dma_chan_done is cleared before the dma transfer starts,
+ * callback for rx/tx channel will each increment it by 1.
+ * Reaching 2 means the whole spi transaction is done.
+ */
+static void dw_spi_dma_done(void *arg)
+{
+	struct dw_spi *dws = arg;
+
+	if (++dws->dma_chan_done != 2)
+		return;
+	dw_spi_xfer_done(dws);
+}
+
+static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
+{
+	struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
+	struct dma_chan *txchan, *rxchan;
+	struct dma_slave_config txconf, rxconf;
+	u16 dma_ctrl = 0;
+
+	/* 1. setup DMA related registers */
+	if (cs_change) {
+		spi_enable_chip(dws, 0);
+		dw_writew(dws, dmardlr, 0xf);
+		dw_writew(dws, dmatdlr, 0x10);
+		if (dws->tx_dma)
+			dma_ctrl |= 0x2;
+		if (dws->rx_dma)
+			dma_ctrl |= 0x1;
+		dw_writew(dws, dmacr, dma_ctrl);
+		spi_enable_chip(dws, 1);
+	}
+
+	dws->dma_chan_done = 0;
+	txchan = dws->txchan;
+	rxchan = dws->rxchan;
+
+	/* 2. Prepare the TX dma transfer */
+	txconf.direction = DMA_TO_DEVICE;
+	txconf.dst_addr = dws->dma_addr;
+	txconf.dst_maxburst = LNW_DMA_MSIZE_16;
+	txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+	txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
+				       (unsigned long) &txconf);
+
+	memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
+	dws->tx_sgl.dma_address = dws->tx_dma;
+	dws->tx_sgl.length = dws->len;
+
+	txdesc = txchan->device->device_prep_slave_sg(txchan,
+				&dws->tx_sgl,
+				1,
+				DMA_TO_DEVICE,
+				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+	txdesc->callback = dw_spi_dma_done;
+	txdesc->callback_param = dws;
+
+	/* 3. Prepare the RX dma transfer */
+	rxconf.direction = DMA_FROM_DEVICE;
+	rxconf.src_addr = dws->dma_addr;
+	rxconf.src_maxburst = LNW_DMA_MSIZE_16;
+	rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+	rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
+				       (unsigned long) &rxconf);
+
+	memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
+	dws->rx_sgl.dma_address = dws->rx_dma;
+	dws->rx_sgl.length = dws->len;
+
+	rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
+				&dws->rx_sgl,
+				1,
+				DMA_FROM_DEVICE,
+				DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
+	rxdesc->callback = dw_spi_dma_done;
+	rxdesc->callback_param = dws;
+
+	/* rx must be started before tx due to spi instinct */
+	rxdesc->tx_submit(rxdesc);
+	txdesc->tx_submit(txdesc);
+	return 0;
+}
+
+static struct dw_spi_dma_ops mid_dma_ops = {
+	.dma_init	= mid_spi_dma_init,
+	.dma_exit	= mid_spi_dma_exit,
+	.dma_transfer	= mid_spi_dma_transfer,
+};
+#endif
+
+/* Some specific info for SPI0 controller on Moorestown */
+
+/* HW info for MRST CLk Control Unit, one 32b reg */
+#define MRST_SPI_CLK_BASE	100000000	/* 100m */
+#define MRST_CLK_SPI0_REG	0xff11d86c
+#define CLK_SPI_BDIV_OFFSET	0
+#define CLK_SPI_BDIV_MASK	0x00000007
+#define CLK_SPI_CDIV_OFFSET	9
+#define CLK_SPI_CDIV_MASK	0x00000e00
+#define CLK_SPI_DISABLE_OFFSET	8
+
+int dw_spi_mid_init(struct dw_spi *dws)
+{
+	u32 *clk_reg, clk_cdiv;
+
+	clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
+	if (!clk_reg)
+		return -ENOMEM;
+
+	/* get SPI controller operating freq info */
+	clk_cdiv  = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
+	dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
+	iounmap(clk_reg);
+
+	dws->num_cs = 16;
+	dws->fifo_len = 40;	/* FIFO has 40 words buffer */
+
+#ifdef CONFIG_SPI_DW_MID_DMA
+	dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
+	if (!dws->dma_priv)
+		return -ENOMEM;
+	dws->dma_ops = &mid_dma_ops;
+#endif
+	return 0;
+}
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
index db35bd9..2fa012c 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/dw_spi_mmio.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -68,8 +69,8 @@
 	}
 
 	dwsmmio->clk = clk_get(&pdev->dev, NULL);
-	if (!dwsmmio->clk) {
-		ret = -ENODEV;
+	if (IS_ERR(dwsmmio->clk)) {
+		ret = PTR_ERR(dwsmmio->clk);
 		goto err_irq;
 	}
 	clk_enable(dwsmmio->clk);
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
index 1f52755..49ec3aa 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/dw_spi_pci.c
@@ -1,5 +1,5 @@
 /*
- * mrst_spi_pci.c - PCI interface driver for DW SPI Core
+ * dw_spi_pci.c - PCI interface driver for DW SPI Core
  *
  * Copyright (c) 2009, Intel Corporation.
  *
@@ -26,8 +26,8 @@
 #define DRIVER_NAME "dw_spi_pci"
 
 struct dw_spi_pci {
-	struct pci_dev		*pdev;
-	struct dw_spi		dws;
+	struct pci_dev	*pdev;
+	struct dw_spi	dws;
 };
 
 static int __devinit spi_pci_probe(struct pci_dev *pdev,
@@ -72,9 +72,17 @@
 	dws->parent_dev = &pdev->dev;
 	dws->bus_num = 0;
 	dws->num_cs = 4;
-	dws->max_freq = 25000000;	/* for Moorestwon */
 	dws->irq = pdev->irq;
-	dws->fifo_len = 40;		/* FIFO has 40 words buffer */
+
+	/*
+	 * Specific handling for Intel MID paltforms, like dma setup,
+	 * clock rate, FIFO depth.
+	 */
+	if (pdev->device == 0x0800) {
+		ret = dw_spi_mid_init(dws);
+		if (ret)
+			goto err_unmap;
+	}
 
 	ret = dw_spi_add_host(dws);
 	if (ret)
@@ -140,7 +148,7 @@
 #endif
 
 static const struct pci_device_id pci_ids[] __devinitdata = {
-	/* Intel Moorestown platform SPI controller 0 */
+	/* Intel MID platform SPI controller 0 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
 	{},
 };
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 983fbbf..8a904c1 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -363,7 +363,7 @@
 }
 
 /* bus_num is used only for the case dev->platform_data == NULL */
-static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
+static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
 				u32 size, unsigned int irq, s16 bus_num)
 {
 	struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -450,22 +450,7 @@
 	return ret;
 }
 
-static int __exit mpc52xx_psc_spi_do_remove(struct device *dev)
-{
-	struct spi_master *master = dev_get_drvdata(dev);
-	struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
-
-	flush_workqueue(mps->workqueue);
-	destroy_workqueue(mps->workqueue);
-	spi_unregister_master(master);
-	free_irq(mps->irq, mps);
-	if (mps->psc)
-		iounmap(mps->psc);
-
-	return 0;
-}
-
-static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op,
+static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op,
 	const struct of_device_id *match)
 {
 	const u32 *regaddr_p;
@@ -495,9 +480,19 @@
 				irq_of_parse_and_map(op->dev.of_node, 0), id);
 }
 
-static int __exit mpc52xx_psc_spi_of_remove(struct platform_device *op)
+static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op)
 {
-	return mpc52xx_psc_spi_do_remove(&op->dev);
+	struct spi_master *master = dev_get_drvdata(&op->dev);
+	struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
+
+	flush_workqueue(mps->workqueue);
+	destroy_workqueue(mps->workqueue);
+	spi_unregister_master(master);
+	free_irq(mps->irq, mps);
+	if (mps->psc)
+		iounmap(mps->psc);
+
+	return 0;
 }
 
 static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
@@ -510,7 +505,7 @@
 
 static struct of_platform_driver mpc52xx_psc_spi_of_driver = {
 	.probe = mpc52xx_psc_spi_of_probe,
-	.remove = __exit_p(mpc52xx_psc_spi_of_remove),
+	.remove = __devexit_p(mpc52xx_psc_spi_of_remove),
 	.driver = {
 		.name = "mpc52xx-psc-spi",
 		.owner = THIS_MODULE,
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 951a160f..abb1ffb 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -397,7 +397,7 @@
 
 	if (tx != NULL) {
 		wait_for_completion(&mcspi_dma->dma_tx_completion);
-		dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
+		dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE);
 
 		/* for TX_ONLY mode, be sure all words have shifted out */
 		if (rx == NULL) {
@@ -412,7 +412,7 @@
 
 	if (rx != NULL) {
 		wait_for_completion(&mcspi_dma->dma_rx_completion);
-		dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
+		dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE);
 		omap2_mcspi_set_enable(spi, 0);
 
 		if (l & OMAP2_MCSPI_CHCONF_TURBO) {
@@ -1025,11 +1025,6 @@
 		if (m->is_dma_mapped || len < DMA_MIN_BYTES)
 			continue;
 
-		/* Do DMA mapping "early" for better error reporting and
-		 * dcache use.  Note that if dma_unmap_single() ever starts
-		 * to do real work on ARM, we'd need to clean up mappings
-		 * for previous transfers on *ALL* exits of this loop...
-		 */
 		if (tx_buf != NULL) {
 			t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
 					len, DMA_TO_DEVICE);
@@ -1046,7 +1041,7 @@
 				dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
 						'R', len);
 				if (tx_buf != NULL)
-					dma_unmap_single(NULL, t->tx_dma,
+					dma_unmap_single(&spi->dev, t->tx_dma,
 							len, DMA_TO_DEVICE);
 				return -EINVAL;
 			}
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index e76b1af..9592883 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -23,11 +23,11 @@
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
+#include <linux/spi/pxa2xx_spi.h>
 #include <linux/dma-mapping.h>
 #include <linux/spi/spi.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
-#include <linux/clk.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
 
@@ -35,9 +35,6 @@
 #include <asm/irq.h>
 #include <asm/delay.h>
 
-#include <mach/dma.h>
-#include <plat/ssp.h>
-#include <mach/pxa2xx_spi.h>
 
 MODULE_AUTHOR("Stephen Street");
 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
@@ -46,8 +43,6 @@
 
 #define MAX_BUSES 3
 
-#define RX_THRESH_DFLT 	8
-#define TX_THRESH_DFLT 	8
 #define TIMOUT_DFLT		1000
 
 #define DMA_INT_MASK		(DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -168,7 +163,10 @@
 	u8 enable_dma;
 	u8 bits_per_word;
 	u32 speed_hz;
-	int gpio_cs;
+	union {
+		int gpio_cs;
+		unsigned int frm;
+	};
 	int gpio_cs_inverted;
 	int (*write)(struct driver_data *drv_data);
 	int (*read)(struct driver_data *drv_data);
@@ -181,6 +179,11 @@
 {
 	struct chip_data *chip = drv_data->cur_chip;
 
+	if (drv_data->ssp_type == CE4100_SSP) {
+		write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
+		return;
+	}
+
 	if (chip->cs_control) {
 		chip->cs_control(PXA2XX_CS_ASSERT);
 		return;
@@ -194,6 +197,9 @@
 {
 	struct chip_data *chip = drv_data->cur_chip;
 
+	if (drv_data->ssp_type == CE4100_SSP)
+		return;
+
 	if (chip->cs_control) {
 		chip->cs_control(PXA2XX_CS_DEASSERT);
 		return;
@@ -203,6 +209,25 @@
 		gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
 }
 
+static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
+{
+	void __iomem *reg = drv_data->ioaddr;
+
+	if (drv_data->ssp_type == CE4100_SSP)
+		val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
+
+	write_SSSR(val, reg);
+}
+
+static int pxa25x_ssp_comp(struct driver_data *drv_data)
+{
+	if (drv_data->ssp_type == PXA25x_SSP)
+		return 1;
+	if (drv_data->ssp_type == CE4100_SSP)
+		return 1;
+	return 0;
+}
+
 static int flush(struct driver_data *drv_data)
 {
 	unsigned long limit = loops_per_jiffy << 1;
@@ -214,7 +239,7 @@
 			read_SSDR(reg);
 		}
 	} while ((read_SSSR(reg) & SSSR_BSY) && --limit);
-	write_SSSR(SSSR_ROR, reg);
+	write_SSSR_CS(drv_data, SSSR_ROR);
 
 	return limit;
 }
@@ -224,7 +249,7 @@
 	void __iomem *reg = drv_data->ioaddr;
 	u8 n_bytes = drv_data->n_bytes;
 
-	if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
 		|| (drv_data->tx == drv_data->tx_end))
 		return 0;
 
@@ -252,7 +277,7 @@
 {
 	void __iomem *reg = drv_data->ioaddr;
 
-	if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
 		|| (drv_data->tx == drv_data->tx_end))
 		return 0;
 
@@ -279,7 +304,7 @@
 {
 	void __iomem *reg = drv_data->ioaddr;
 
-	if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
 		|| (drv_data->tx == drv_data->tx_end))
 		return 0;
 
@@ -306,7 +331,7 @@
 {
 	void __iomem *reg = drv_data->ioaddr;
 
-	if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
+	if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
 		|| (drv_data->tx == drv_data->tx_end))
 		return 0;
 
@@ -507,9 +532,9 @@
 	/* Stop and reset */
 	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
 	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
-	write_SSSR(drv_data->clear_sr, reg);
+	write_SSSR_CS(drv_data, drv_data->clear_sr);
 	write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
-	if (drv_data->ssp_type != PXA25x_SSP)
+	if (!pxa25x_ssp_comp(drv_data))
 		write_SSTO(0, reg);
 	flush(drv_data);
 	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
@@ -529,7 +554,7 @@
 
 	/* Clear and disable interrupts on SSP and DMA channels*/
 	write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
-	write_SSSR(drv_data->clear_sr, reg);
+	write_SSSR_CS(drv_data, drv_data->clear_sr);
 	DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
 	DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
 
@@ -622,7 +647,7 @@
 
 		/* Clear and disable timeout interrupt, do the rest in
 		 * dma_transfer_complete */
-		if (drv_data->ssp_type != PXA25x_SSP)
+		if (!pxa25x_ssp_comp(drv_data))
 			write_SSTO(0, reg);
 
 		/* finish this transfer, start the next */
@@ -635,14 +660,26 @@
 	return IRQ_NONE;
 }
 
+static void reset_sccr1(struct driver_data *drv_data)
+{
+	void __iomem *reg = drv_data->ioaddr;
+	struct chip_data *chip = drv_data->cur_chip;
+	u32 sccr1_reg;
+
+	sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
+	sccr1_reg &= ~SSCR1_RFT;
+	sccr1_reg |= chip->threshold;
+	write_SSCR1(sccr1_reg, reg);
+}
+
 static void int_error_stop(struct driver_data *drv_data, const char* msg)
 {
 	void __iomem *reg = drv_data->ioaddr;
 
 	/* Stop and reset SSP */
-	write_SSSR(drv_data->clear_sr, reg);
-	write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
-	if (drv_data->ssp_type != PXA25x_SSP)
+	write_SSSR_CS(drv_data, drv_data->clear_sr);
+	reset_sccr1(drv_data);
+	if (!pxa25x_ssp_comp(drv_data))
 		write_SSTO(0, reg);
 	flush(drv_data);
 	write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
@@ -658,9 +695,9 @@
 	void __iomem *reg = drv_data->ioaddr;
 
 	/* Stop SSP */
-	write_SSSR(drv_data->clear_sr, reg);
-	write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
-	if (drv_data->ssp_type != PXA25x_SSP)
+	write_SSSR_CS(drv_data, drv_data->clear_sr);
+	reset_sccr1(drv_data);
+	if (!pxa25x_ssp_comp(drv_data))
 		write_SSTO(0, reg);
 
 	/* Update total byte transfered return count actual bytes read */
@@ -714,24 +751,34 @@
 	}
 
 	if (drv_data->tx == drv_data->tx_end) {
-		write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg);
-		/* PXA25x_SSP has no timeout, read trailing bytes */
-		if (drv_data->ssp_type == PXA25x_SSP) {
-			if (!wait_ssp_rx_stall(reg))
-			{
-				int_error_stop(drv_data, "interrupt_transfer: "
-						"rx stall failed");
-				return IRQ_HANDLED;
+		u32 bytes_left;
+		u32 sccr1_reg;
+
+		sccr1_reg = read_SSCR1(reg);
+		sccr1_reg &= ~SSCR1_TIE;
+
+		/*
+		 * PXA25x_SSP has no timeout, set up rx threshould for the
+		 * remaing RX bytes.
+		 */
+		if (pxa25x_ssp_comp(drv_data)) {
+
+			sccr1_reg &= ~SSCR1_RFT;
+
+			bytes_left = drv_data->rx_end - drv_data->rx;
+			switch (drv_data->n_bytes) {
+			case 4:
+				bytes_left >>= 1;
+			case 2:
+				bytes_left >>= 1;
 			}
-			if (!drv_data->read(drv_data))
-			{
-				int_error_stop(drv_data,
-						"interrupt_transfer: "
-						"trailing byte read failed");
-				return IRQ_HANDLED;
-			}
-			int_transfer_complete(drv_data);
+
+			if (bytes_left > RX_THRESH_DFLT)
+				bytes_left = RX_THRESH_DFLT;
+
+			sccr1_reg |= SSCR1_RxTresh(bytes_left);
 		}
+		write_SSCR1(sccr1_reg, reg);
 	}
 
 	/* We did something */
@@ -742,14 +789,26 @@
 {
 	struct driver_data *drv_data = dev_id;
 	void __iomem *reg = drv_data->ioaddr;
+	u32 sccr1_reg = read_SSCR1(reg);
+	u32 mask = drv_data->mask_sr;
+	u32 status;
+
+	status = read_SSSR(reg);
+
+	/* Ignore possible writes if we don't need to write */
+	if (!(sccr1_reg & SSCR1_TIE))
+		mask &= ~SSSR_TFS;
+
+	if (!(status & mask))
+		return IRQ_NONE;
 
 	if (!drv_data->cur_msg) {
 
 		write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
 		write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
-		if (drv_data->ssp_type != PXA25x_SSP)
+		if (!pxa25x_ssp_comp(drv_data))
 			write_SSTO(0, reg);
-		write_SSSR(drv_data->clear_sr, reg);
+		write_SSSR_CS(drv_data, drv_data->clear_sr);
 
 		dev_err(&drv_data->pdev->dev, "bad message state "
 			"in interrupt handler\n");
@@ -862,7 +921,7 @@
 {
 	unsigned long ssp_clk = clk_get_rate(ssp->clk);
 
-	if (ssp->type == PXA25x_SSP)
+	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
 		return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
 	else
 		return ((ssp_clk / rate - 1) & 0xfff) << 8;
@@ -1088,7 +1147,7 @@
 
 		/* Clear status  */
 		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
-		write_SSSR(drv_data->clear_sr, reg);
+		write_SSSR_CS(drv_data, drv_data->clear_sr);
 	}
 
 	/* see if we need to reload the config registers */
@@ -1098,7 +1157,7 @@
 
 		/* stop the SSP, and update the other bits */
 		write_SSCR0(cr0 & ~SSCR0_SSE, reg);
-		if (drv_data->ssp_type != PXA25x_SSP)
+		if (!pxa25x_ssp_comp(drv_data))
 			write_SSTO(chip->timeout, reg);
 		/* first set CR1 without interrupt and service enables */
 		write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
@@ -1106,7 +1165,7 @@
 		write_SSCR0(cr0, reg);
 
 	} else {
-		if (drv_data->ssp_type != PXA25x_SSP)
+		if (!pxa25x_ssp_comp(drv_data))
 			write_SSTO(chip->timeout, reg);
 	}
 
@@ -1233,14 +1292,13 @@
 	uint tx_thres = TX_THRESH_DFLT;
 	uint rx_thres = RX_THRESH_DFLT;
 
-	if (drv_data->ssp_type != PXA25x_SSP
+	if (!pxa25x_ssp_comp(drv_data)
 		&& (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
 		dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
 				"b/w not 4-32 for type non-PXA25x_SSP\n",
 				drv_data->ssp_type, spi->bits_per_word);
 		return -EINVAL;
-	}
-	else if (drv_data->ssp_type == PXA25x_SSP
+	} else if (pxa25x_ssp_comp(drv_data)
 			&& (spi->bits_per_word < 4
 				|| spi->bits_per_word > 16)) {
 		dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
@@ -1259,7 +1317,17 @@
 			return -ENOMEM;
 		}
 
-		chip->gpio_cs = -1;
+		if (drv_data->ssp_type == CE4100_SSP) {
+			if (spi->chip_select > 4) {
+				dev_err(&spi->dev, "failed setup: "
+				"cs number must not be > 4.\n");
+				kfree(chip);
+				return -EINVAL;
+			}
+
+			chip->frm = spi->chip_select;
+		} else
+			chip->gpio_cs = -1;
 		chip->enable_dma = 0;
 		chip->timeout = TIMOUT_DFLT;
 		chip->dma_burst_size = drv_data->master_info->enable_dma ?
@@ -1315,7 +1383,7 @@
 			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
 
 	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
-	if (drv_data->ssp_type != PXA25x_SSP)
+	if (!pxa25x_ssp_comp(drv_data))
 		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
 			clk_get_rate(ssp->clk)
 				/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
@@ -1350,23 +1418,27 @@
 
 	spi_set_ctldata(spi, chip);
 
+	if (drv_data->ssp_type == CE4100_SSP)
+		return 0;
+
 	return setup_cs(spi, chip, chip_info);
 }
 
 static void cleanup(struct spi_device *spi)
 {
 	struct chip_data *chip = spi_get_ctldata(spi);
+	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
 
 	if (!chip)
 		return;
 
-	if (gpio_is_valid(chip->gpio_cs))
+	if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
 		gpio_free(chip->gpio_cs);
 
 	kfree(chip);
 }
 
-static int __init init_queue(struct driver_data *drv_data)
+static int __devinit init_queue(struct driver_data *drv_data)
 {
 	INIT_LIST_HEAD(&drv_data->queue);
 	spin_lock_init(&drv_data->lock);
@@ -1454,7 +1526,7 @@
 	return 0;
 }
 
-static int __init pxa2xx_spi_probe(struct platform_device *pdev)
+static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct pxa2xx_spi_master *platform_info;
@@ -1484,6 +1556,10 @@
 	drv_data->pdev = pdev;
 	drv_data->ssp = ssp;
 
+	master->dev.parent = &pdev->dev;
+#ifdef CONFIG_OF
+	master->dev.of_node = pdev->dev.of_node;
+#endif
 	/* the spi->mode bits understood by this driver: */
 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
 
@@ -1500,7 +1576,7 @@
 
 	drv_data->ioaddr = ssp->mmio_base;
 	drv_data->ssdr_physical = ssp->phys_base + SSDR;
-	if (ssp->type == PXA25x_SSP) {
+	if (pxa25x_ssp_comp(drv_data)) {
 		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
 		drv_data->dma_cr1 = 0;
 		drv_data->clear_sr = SSSR_ROR;
@@ -1512,7 +1588,8 @@
 		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
 	}
 
-	status = request_irq(ssp->irq, ssp_int, 0, dev_name(dev), drv_data);
+	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
+			drv_data);
 	if (status < 0) {
 		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
 		goto out_error_master_alloc;
@@ -1561,7 +1638,7 @@
 			| SSCR0_Motorola
 			| SSCR0_DataSize(8),
 			drv_data->ioaddr);
-	if (drv_data->ssp_type != PXA25x_SSP)
+	if (!pxa25x_ssp_comp(drv_data))
 		write_SSTO(0, drv_data->ioaddr);
 	write_SSPSP(0, drv_data->ioaddr);
 
@@ -1723,13 +1800,14 @@
 		.pm	= &pxa2xx_spi_pm_ops,
 #endif
 	},
+	.probe = pxa2xx_spi_probe,
 	.remove = pxa2xx_spi_remove,
 	.shutdown = pxa2xx_spi_shutdown,
 };
 
 static int __init pxa2xx_spi_init(void)
 {
-	return platform_driver_probe(&driver, pxa2xx_spi_probe);
+	return platform_driver_register(&driver);
 }
 subsys_initcall(pxa2xx_spi_init);
 
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
new file mode 100644
index 0000000..351d8a3
--- /dev/null
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -0,0 +1,201 @@
+/*
+ * CE4100's SPI device is more or less the same one as found on PXA
+ *
+ */
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+struct awesome_struct {
+	struct ssp_device ssp;
+	struct platform_device spi_pdev;
+	struct pxa2xx_spi_master spi_pdata;
+};
+
+static DEFINE_MUTEX(ssp_lock);
+static LIST_HEAD(ssp_list);
+
+struct ssp_device *pxa_ssp_request(int port, const char *label)
+{
+	struct ssp_device *ssp = NULL;
+
+	mutex_lock(&ssp_lock);
+
+	list_for_each_entry(ssp, &ssp_list, node) {
+		if (ssp->port_id == port && ssp->use_count == 0) {
+			ssp->use_count++;
+			ssp->label = label;
+			break;
+		}
+	}
+
+	mutex_unlock(&ssp_lock);
+
+	if (&ssp->node == &ssp_list)
+		return NULL;
+
+	return ssp;
+}
+EXPORT_SYMBOL_GPL(pxa_ssp_request);
+
+void pxa_ssp_free(struct ssp_device *ssp)
+{
+	mutex_lock(&ssp_lock);
+	if (ssp->use_count) {
+		ssp->use_count--;
+		ssp->label = NULL;
+	} else
+		dev_err(&ssp->pdev->dev, "device already free\n");
+	mutex_unlock(&ssp_lock);
+}
+EXPORT_SYMBOL_GPL(pxa_ssp_free);
+
+static void plat_dev_release(struct device *dev)
+{
+	struct awesome_struct *as = container_of(dev,
+			struct awesome_struct, spi_pdev.dev);
+
+	of_device_node_put(&as->spi_pdev.dev);
+}
+
+static int __devinit ce4100_spi_probe(struct pci_dev *dev,
+		const struct pci_device_id *ent)
+{
+	int ret;
+	resource_size_t phys_beg;
+	resource_size_t phys_len;
+	struct awesome_struct *spi_info;
+	struct platform_device *pdev;
+	struct pxa2xx_spi_master *spi_pdata;
+	struct ssp_device *ssp;
+
+	ret = pci_enable_device(dev);
+	if (ret)
+		return ret;
+
+	phys_beg = pci_resource_start(dev, 0);
+	phys_len = pci_resource_len(dev, 0);
+
+	if (!request_mem_region(phys_beg, phys_len,
+				"CE4100 SPI")) {
+		dev_err(&dev->dev, "Can't request register space.\n");
+		ret = -EBUSY;
+		return ret;
+	}
+
+	spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
+	if (!spi_info) {
+		ret = -ENOMEM;
+		goto err_kz;
+	}
+	ssp = &spi_info->ssp;
+	pdev = &spi_info->spi_pdev;
+	spi_pdata =  &spi_info->spi_pdata;
+
+	pdev->name = "pxa2xx-spi";
+	pdev->id = dev->devfn;
+	pdev->dev.parent = &dev->dev;
+	pdev->dev.platform_data = &spi_info->spi_pdata;
+
+#ifdef CONFIG_OF
+	pdev->dev.of_node = dev->dev.of_node;
+#endif
+	pdev->dev.release = plat_dev_release;
+
+	spi_pdata->num_chipselect = dev->devfn;
+
+	ssp->phys_base = pci_resource_start(dev, 0);
+	ssp->mmio_base = ioremap(phys_beg, phys_len);
+	if (!ssp->mmio_base) {
+		dev_err(&pdev->dev, "failed to ioremap() registers\n");
+		ret = -EIO;
+		goto err_remap;
+	}
+	ssp->irq = dev->irq;
+	ssp->port_id = pdev->id;
+	ssp->type = PXA25x_SSP;
+
+	mutex_lock(&ssp_lock);
+	list_add(&ssp->node, &ssp_list);
+	mutex_unlock(&ssp_lock);
+
+	pci_set_drvdata(dev, spi_info);
+
+	ret = platform_device_register(pdev);
+	if (ret)
+		goto err_dev_add;
+
+	return ret;
+
+err_dev_add:
+	pci_set_drvdata(dev, NULL);
+	mutex_lock(&ssp_lock);
+	list_del(&ssp->node);
+	mutex_unlock(&ssp_lock);
+	iounmap(ssp->mmio_base);
+
+err_remap:
+	kfree(spi_info);
+
+err_kz:
+	release_mem_region(phys_beg, phys_len);
+
+	return ret;
+}
+
+static void __devexit ce4100_spi_remove(struct pci_dev *dev)
+{
+	struct awesome_struct *spi_info;
+	struct platform_device *pdev;
+	struct ssp_device *ssp;
+
+	spi_info = pci_get_drvdata(dev);
+
+	ssp = &spi_info->ssp;
+	pdev = &spi_info->spi_pdev;
+
+	platform_device_unregister(pdev);
+
+	iounmap(ssp->mmio_base);
+	release_mem_region(pci_resource_start(dev, 0),
+			pci_resource_len(dev, 0));
+
+	mutex_lock(&ssp_lock);
+	list_del(&ssp->node);
+	mutex_unlock(&ssp_lock);
+
+	pci_set_drvdata(dev, NULL);
+	pci_disable_device(dev);
+	kfree(spi_info);
+}
+
+static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
+
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
+	{ },
+};
+MODULE_DEVICE_TABLE(pci, ce4100_spi_devices);
+
+static struct pci_driver ce4100_spi_driver = {
+	.name           = "ce4100_spi",
+	.id_table       = ce4100_spi_devices,
+	.probe          = ce4100_spi_probe,
+	.remove         = __devexit_p(ce4100_spi_remove),
+};
+
+static int __init ce4100_spi_init(void)
+{
+	return pci_register_driver(&ce4100_spi_driver);
+}
+module_init(ce4100_spi_init);
+
+static void __exit ce4100_spi_exit(void)
+{
+	pci_unregister_driver(&ce4100_spi_driver);
+}
+module_exit(ce4100_spi_exit);
+
+MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b02d0cb..34bb17f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -28,6 +28,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/spi/spi.h>
 #include <linux/of_spi.h>
+#include <linux/pm_runtime.h>
 
 static void spidev_release(struct device *dev)
 {
@@ -100,9 +101,8 @@
 	return 0;
 }
 
-#ifdef	CONFIG_PM
-
-static int spi_suspend(struct device *dev, pm_message_t message)
+#ifdef CONFIG_PM_SLEEP
+static int spi_legacy_suspend(struct device *dev, pm_message_t message)
 {
 	int			value = 0;
 	struct spi_driver	*drv = to_spi_driver(dev->driver);
@@ -117,7 +117,7 @@
 	return value;
 }
 
-static int spi_resume(struct device *dev)
+static int spi_legacy_resume(struct device *dev)
 {
 	int			value = 0;
 	struct spi_driver	*drv = to_spi_driver(dev->driver);
@@ -132,18 +132,94 @@
 	return value;
 }
 
+static int spi_pm_suspend(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_suspend(dev);
+	else
+		return spi_legacy_suspend(dev, PMSG_SUSPEND);
+}
+
+static int spi_pm_resume(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_resume(dev);
+	else
+		return spi_legacy_resume(dev);
+}
+
+static int spi_pm_freeze(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_freeze(dev);
+	else
+		return spi_legacy_suspend(dev, PMSG_FREEZE);
+}
+
+static int spi_pm_thaw(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_thaw(dev);
+	else
+		return spi_legacy_resume(dev);
+}
+
+static int spi_pm_poweroff(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_poweroff(dev);
+	else
+		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
+}
+
+static int spi_pm_restore(struct device *dev)
+{
+	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+	if (pm)
+		return pm_generic_restore(dev);
+	else
+		return spi_legacy_resume(dev);
+}
 #else
-#define spi_suspend	NULL
-#define spi_resume	NULL
+#define spi_pm_suspend	NULL
+#define spi_pm_resume	NULL
+#define spi_pm_freeze	NULL
+#define spi_pm_thaw	NULL
+#define spi_pm_poweroff	NULL
+#define spi_pm_restore	NULL
 #endif
 
+static const struct dev_pm_ops spi_pm = {
+	.suspend = spi_pm_suspend,
+	.resume = spi_pm_resume,
+	.freeze = spi_pm_freeze,
+	.thaw = spi_pm_thaw,
+	.poweroff = spi_pm_poweroff,
+	.restore = spi_pm_restore,
+	SET_RUNTIME_PM_OPS(
+		pm_generic_runtime_suspend,
+		pm_generic_runtime_resume,
+		pm_generic_runtime_idle
+	)
+};
+
 struct bus_type spi_bus_type = {
 	.name		= "spi",
 	.dev_attrs	= spi_dev_attrs,
 	.match		= spi_match_device,
 	.uevent		= spi_uevent,
-	.suspend	= spi_suspend,
-	.resume		= spi_resume,
+	.pm		= &spi_pm,
 };
 EXPORT_SYMBOL_GPL(spi_bus_type);
 
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 55a38e2..1cf9d5f 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -66,7 +66,6 @@
 	SPI_IMX_VER_0_5,
 	SPI_IMX_VER_0_7,
 	SPI_IMX_VER_2_3,
-	SPI_IMX_VER_AUTODETECT,
 };
 
 struct spi_imx_data;
@@ -720,9 +719,6 @@
 
 static struct platform_device_id spi_imx_devtype[] = {
 	{
-		.name = DRIVER_NAME,
-		.driver_data = SPI_IMX_VER_AUTODETECT,
-	}, {
 		.name = "imx1-cspi",
 		.driver_data = SPI_IMX_VER_IMX1,
 	}, {
@@ -747,6 +743,12 @@
 		.name = "imx51-ecspi",
 		.driver_data = SPI_IMX_VER_2_3,
 	}, {
+		.name = "imx53-cspi",
+		.driver_data = SPI_IMX_VER_0_7,
+	}, {
+		.name = "imx53-ecspi",
+		.driver_data = SPI_IMX_VER_2_3,
+	}, {
 		/* sentinel */
 	}
 };
@@ -802,30 +804,8 @@
 
 	init_completion(&spi_imx->xfer_done);
 
-	if (pdev->id_entry->driver_data == SPI_IMX_VER_AUTODETECT) {
-		if (cpu_is_mx25() || cpu_is_mx35())
-			spi_imx->devtype_data =
-				spi_imx_devtype_data[SPI_IMX_VER_0_7];
-		else if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35())
-			spi_imx->devtype_data =
-				spi_imx_devtype_data[SPI_IMX_VER_0_4];
-		else if (cpu_is_mx27() || cpu_is_mx21())
-			spi_imx->devtype_data =
-				spi_imx_devtype_data[SPI_IMX_VER_0_0];
-		else if (cpu_is_mx1())
-			spi_imx->devtype_data =
-				spi_imx_devtype_data[SPI_IMX_VER_IMX1];
-		else
-			BUG();
-	} else
-		spi_imx->devtype_data =
-			spi_imx_devtype_data[pdev->id_entry->driver_data];
-
-	if (!spi_imx->devtype_data.intctrl) {
-		dev_err(&pdev->dev, "no support for this device compiled in\n");
-		ret = -ENODEV;
-		goto out_gpio_free;
-	}
+	spi_imx->devtype_data =
+		spi_imx_devtype_data[pdev->id_entry->driver_data];
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
@@ -847,7 +827,7 @@
 	}
 
 	spi_imx->irq = platform_get_irq(pdev, 0);
-	if (spi_imx->irq <= 0) {
+	if (spi_imx->irq < 0) {
 		ret = -EINVAL;
 		goto out_iounmap;
 	}
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c
index dff63be..d5be18b 100644
--- a/drivers/spi/spi_nuc900.c
+++ b/drivers/spi/spi_nuc900.c
@@ -449,7 +449,7 @@
 	release_mem_region(hw->res->start, resource_size(hw->res));
 	kfree(hw->ioarea);
 err_pdata:
-	spi_master_put(hw->master);;
+	spi_master_put(hw->master);
 
 err_nomem:
 	return err;
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c
index bb7df02..891e590 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi_tegra.c
@@ -513,7 +513,7 @@
 	}
 
 	tspi->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR_OR_NULL(tspi->clk)) {
+	if (IS_ERR(tspi->clk)) {
 		dev_err(&pdev->dev, "can not get clock\n");
 		ret = PTR_ERR(tspi->clk);
 		goto err2;
diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c
index 58e187f..79e48d4 100644
--- a/drivers/spi/spi_topcliff_pch.c
+++ b/drivers/spi/spi_topcliff_pch.c
@@ -267,7 +267,7 @@
 	if (reg_spsr_val & SPSR_FI_BIT) {
 		/* disable FI & RFI interrupts */
 		pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
-				   SPCR_FIE_BIT | SPCR_TFIE_BIT);
+				   SPCR_FIE_BIT | SPCR_RFIE_BIT);
 
 		/* transfer is completed;inform pch_spi_process_messages */
 		data->transfer_complete = true;
@@ -677,15 +677,15 @@
 {
 	/* enable interrupts */
 	if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) {
-		/* set receive threhold to PCH_RX_THOLD */
+		/* set receive threshold to PCH_RX_THOLD */
 		pch_spi_setclr_reg(data->master, PCH_SPCR,
-				   PCH_RX_THOLD << SPCR_TFIC_FIELD,
-				   ~MASK_TFIC_SPCR_BITS);
+				   PCH_RX_THOLD << SPCR_RFIC_FIELD,
+				   ~MASK_RFIC_SPCR_BITS);
 		/* enable FI and RFI interrupts */
 		pch_spi_setclr_reg(data->master, PCH_SPCR,
-				   SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0);
+				   SPCR_RFIE_BIT | SPCR_FIE_BIT, 0);
 	} else {
-		/* set receive threhold to maximum */
+		/* set receive threshold to maximum */
 		pch_spi_setclr_reg(data->master, PCH_SPCR,
 				   PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD,
 				   ~MASK_TFIC_SPCR_BITS);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 4e6245e..6034282 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -38,7 +38,7 @@
 
 
 /*
- * This supports acccess to SPI devices using normal userspace I/O calls.
+ * This supports access to SPI devices using normal userspace I/O calls.
  * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  * and often mask message boundaries, full SPI support requires full duplex
  * transfers.  There are several kinds of internal message boundaries to
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 80f2db5..7adaef6 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -1,26 +1,27 @@
 /*
- * xilinx_spi.c
- *
  * Xilinx SPI controller driver (master mode only)
  *
  * Author: MontaVista Software, Inc.
  *	source@mvista.com
  *
- * 2002-2007 (c) MontaVista Software, Inc.  This file is licensed under the
- * terms of the GNU General Public License version 2.  This program is licensed
- * "as is" without any warranty of any kind, whether express or implied.
+ * Copyright (c) 2010 Secret Lab Technologies, Ltd.
+ * Copyright (c) 2009 Intel Corporation
+ * 2002-2007 (c) MontaVista Software, Inc.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
  */
 
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/spi_bitbang.h>
-#include <linux/io.h>
-
-#include "xilinx_spi.h"
 #include <linux/spi/xilinx_spi.h>
+#include <linux/io.h>
 
 #define XILINX_SPI_NAME "xilinx_spi"
 
@@ -350,19 +351,22 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id xilinx_spi_of_match[] = {
+	{ .compatible = "xlnx,xps-spi-2.00.a", },
+	{ .compatible = "xlnx,xps-spi-2.00.b", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+#endif
+
 struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
-	u32 irq, s16 bus_num)
+	u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word)
 {
 	struct spi_master *master;
 	struct xilinx_spi *xspi;
-	struct xspi_platform_data *pdata = dev->platform_data;
 	int ret;
 
-	if (!pdata) {
-		dev_err(dev, "No platform data attached\n");
-		return NULL;
-	}
-
 	master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
 	if (!master)
 		return NULL;
@@ -389,21 +393,21 @@
 	}
 
 	master->bus_num = bus_num;
-	master->num_chipselect = pdata->num_chipselect;
+	master->num_chipselect = num_cs;
 #ifdef CONFIG_OF
 	master->dev.of_node = dev->of_node;
 #endif
 
 	xspi->mem = *mem;
 	xspi->irq = irq;
-	if (pdata->little_endian) {
+	if (little_endian) {
 		xspi->read_fn = xspi_read32;
 		xspi->write_fn = xspi_write32;
 	} else {
 		xspi->read_fn = xspi_read32_be;
 		xspi->write_fn = xspi_write32_be;
 	}
-	xspi->bits_per_word = pdata->bits_per_word;
+	xspi->bits_per_word = bits_per_word;
 	if (xspi->bits_per_word == 8) {
 		xspi->tx_fn = xspi_tx8;
 		xspi->rx_fn = xspi_rx8;
@@ -462,6 +466,97 @@
 }
 EXPORT_SYMBOL(xilinx_spi_deinit);
 
+static int __devinit xilinx_spi_probe(struct platform_device *dev)
+{
+	struct xspi_platform_data *pdata;
+	struct resource *r;
+	int irq, num_cs = 0, little_endian = 0, bits_per_word = 8;
+	struct spi_master *master;
+	u8 i;
+
+	pdata = dev->dev.platform_data;
+	if (pdata) {
+		num_cs = pdata->num_chipselect;
+		little_endian = pdata->little_endian;
+		bits_per_word = pdata->bits_per_word;
+	}
+
+#ifdef CONFIG_OF
+	if (dev->dev.of_node) {
+		const __be32 *prop;
+		int len;
+
+		/* number of slave select bits is required */
+		prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits",
+				       &len);
+		if (prop && len >= sizeof(*prop))
+			num_cs = __be32_to_cpup(prop);
+	}
+#endif
+
+	if (!num_cs) {
+		dev_err(&dev->dev, "Missing slave select configuration data\n");
+		return -EINVAL;
+	}
+
+
+	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -ENODEV;
+
+	irq = platform_get_irq(dev, 0);
+	if (irq < 0)
+		return -ENXIO;
+
+	master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs,
+				 little_endian, bits_per_word);
+	if (!master)
+		return -ENODEV;
+
+	if (pdata) {
+		for (i = 0; i < pdata->num_devices; i++)
+			spi_new_device(master, pdata->devices + i);
+	}
+
+	platform_set_drvdata(dev, master);
+	return 0;
+}
+
+static int __devexit xilinx_spi_remove(struct platform_device *dev)
+{
+	xilinx_spi_deinit(platform_get_drvdata(dev));
+	platform_set_drvdata(dev, 0);
+
+	return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+
+static struct platform_driver xilinx_spi_driver = {
+	.probe = xilinx_spi_probe,
+	.remove = __devexit_p(xilinx_spi_remove),
+	.driver = {
+		.name = XILINX_SPI_NAME,
+		.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+		.of_match_table = xilinx_spi_of_match,
+#endif
+	},
+};
+
+static int __init xilinx_spi_pltfm_init(void)
+{
+	return platform_driver_register(&xilinx_spi_driver);
+}
+module_init(xilinx_spi_pltfm_init);
+
+static void __exit xilinx_spi_pltfm_exit(void)
+{
+	platform_driver_unregister(&xilinx_spi_driver);
+}
+module_exit(xilinx_spi_pltfm_exit);
+
 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
 MODULE_DESCRIPTION("Xilinx SPI driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/spi/xilinx_spi.h b/drivers/spi/xilinx_spi.h
deleted file mode 100644
index d211acc..0000000
--- a/drivers/spi/xilinx_spi.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Xilinx SPI device driver API and platform data header file
- *
- * Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _XILINX_SPI_H_
-#define _XILINX_SPI_H_
-
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-
-#define XILINX_SPI_NAME "xilinx_spi"
-
-struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
-	u32 irq, s16 bus_num);
-
-void xilinx_spi_deinit(struct spi_master *master);
-#endif
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
deleted file mode 100644
index b66c2db..0000000
--- a/drivers/spi/xilinx_spi_of.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Xilinx SPI OF device driver
- *
- * Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/* Supports:
- * Xilinx SPI devices as OF devices
- *
- * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include <linux/of_spi.h>
-
-#include <linux/spi/xilinx_spi.h>
-#include "xilinx_spi.h"
-
-
-static int __devinit xilinx_spi_of_probe(struct platform_device *ofdev,
-	const struct of_device_id *match)
-{
-	struct spi_master *master;
-	struct xspi_platform_data *pdata;
-	struct resource r_mem;
-	struct resource r_irq;
-	int rc = 0;
-	const u32 *prop;
-	int len;
-
-	rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
-	if (rc) {
-		dev_warn(&ofdev->dev, "invalid address\n");
-		return rc;
-	}
-
-	rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
-	if (rc == NO_IRQ) {
-		dev_warn(&ofdev->dev, "no IRQ found\n");
-		return -ENODEV;
-	}
-
-	ofdev->dev.platform_data =
-		kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL);
-	pdata = ofdev->dev.platform_data;
-	if (!pdata)
-		return -ENOMEM;
-
-	/* number of slave select bits is required */
-	prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len);
-	if (!prop || len < sizeof(*prop)) {
-		dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
-		return -EINVAL;
-	}
-	pdata->num_chipselect = *prop;
-	pdata->bits_per_word = 8;
-	master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1);
-	if (!master)
-		return -ENODEV;
-
-	dev_set_drvdata(&ofdev->dev, master);
-
-	return 0;
-}
-
-static int __devexit xilinx_spi_remove(struct platform_device *ofdev)
-{
-	xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
-	dev_set_drvdata(&ofdev->dev, 0);
-	kfree(ofdev->dev.platform_data);
-	ofdev->dev.platform_data = NULL;
-	return 0;
-}
-
-static int __exit xilinx_spi_of_remove(struct platform_device *op)
-{
-	return xilinx_spi_remove(op);
-}
-
-static const struct of_device_id xilinx_spi_of_match[] = {
-	{ .compatible = "xlnx,xps-spi-2.00.a", },
-	{ .compatible = "xlnx,xps-spi-2.00.b", },
-	{}
-};
-
-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-
-static struct of_platform_driver xilinx_spi_of_driver = {
-	.probe = xilinx_spi_of_probe,
-	.remove = __exit_p(xilinx_spi_of_remove),
-	.driver = {
-		.name = "xilinx-xps-spi",
-		.owner = THIS_MODULE,
-		.of_match_table = xilinx_spi_of_match,
-	},
-};
-
-static int __init xilinx_spi_of_init(void)
-{
-	return of_register_platform_driver(&xilinx_spi_of_driver);
-}
-module_init(xilinx_spi_of_init);
-
-static void __exit xilinx_spi_of_exit(void)
-{
-	of_unregister_platform_driver(&xilinx_spi_of_driver);
-}
-module_exit(xilinx_spi_of_exit);
-
-MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
-MODULE_DESCRIPTION("Xilinx SPI platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi_pltfm.c b/drivers/spi/xilinx_spi_pltfm.c
deleted file mode 100644
index 24debac..0000000
--- a/drivers/spi/xilinx_spi_pltfm.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Support for Xilinx SPI platform devices
- * Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/* Supports:
- * Xilinx SPI devices as platform devices
- *
- * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/spi/xilinx_spi.h>
-
-#include "xilinx_spi.h"
-
-static int __devinit xilinx_spi_probe(struct platform_device *dev)
-{
-	struct xspi_platform_data *pdata;
-	struct resource *r;
-	int irq;
-	struct spi_master *master;
-	u8 i;
-
-	pdata = dev->dev.platform_data;
-	if (!pdata)
-		return -ENODEV;
-
-	r = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	if (!r)
-		return -ENODEV;
-
-	irq = platform_get_irq(dev, 0);
-	if (irq < 0)
-		return -ENXIO;
-
-	master = xilinx_spi_init(&dev->dev, r, irq, dev->id);
-	if (!master)
-		return -ENODEV;
-
-	for (i = 0; i < pdata->num_devices; i++)
-		spi_new_device(master, pdata->devices + i);
-
-	platform_set_drvdata(dev, master);
-	return 0;
-}
-
-static int __devexit xilinx_spi_remove(struct platform_device *dev)
-{
-	xilinx_spi_deinit(platform_get_drvdata(dev));
-	platform_set_drvdata(dev, 0);
-
-	return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
-
-static struct platform_driver xilinx_spi_driver = {
-	.probe	= xilinx_spi_probe,
-	.remove	= __devexit_p(xilinx_spi_remove),
-	.driver = {
-		.name = XILINX_SPI_NAME,
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init xilinx_spi_pltfm_init(void)
-{
-	return platform_driver_register(&xilinx_spi_driver);
-}
-module_init(xilinx_spi_pltfm_init);
-
-static void __exit xilinx_spi_pltfm_exit(void)
-{
-	platform_driver_unregister(&xilinx_spi_driver);
-}
-module_exit(xilinx_spi_pltfm_exit);
-
-MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
-MODULE_DESCRIPTION("Xilinx SPI platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 5a0985d..29884c0 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -420,6 +420,16 @@
 			bus->pcicore.dev = dev;
 #endif /* CONFIG_SSB_DRIVER_PCICORE */
 			break;
+		case SSB_DEV_ETHERNET:
+			if (bus->bustype == SSB_BUSTYPE_PCI) {
+				if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM &&
+				    (bus->host_pci->device & 0xFF00) == 0x4300) {
+					/* This is a dangling ethernet core on a
+					 * wireless device. Ignore it. */
+					continue;
+				}
+			}
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index bdc632b6..5c8fcfc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -119,16 +119,18 @@
 
 source "drivers/staging/memrar/Kconfig"
 
+source "drivers/staging/sep/Kconfig"
+
 source "drivers/staging/iio/Kconfig"
 
+source "drivers/staging/cs5535_gpio/Kconfig"
+
 source "drivers/staging/zram/Kconfig"
 
 source "drivers/staging/wlags49_h2/Kconfig"
 
 source "drivers/staging/wlags49_h25/Kconfig"
 
-source "drivers/staging/batman-adv/Kconfig"
-
 source "drivers/staging/samsung-laptop/Kconfig"
 
 source "drivers/staging/sm7xx/Kconfig"
@@ -141,8 +143,6 @@
 
 source "drivers/staging/ti-st/Kconfig"
 
-source "drivers/staging/adis16255/Kconfig"
-
 source "drivers/staging/xgifb/Kconfig"
 
 source "drivers/staging/msm/Kconfig"
@@ -175,5 +175,9 @@
 
 source "drivers/staging/speakup/Kconfig"
 
+source "drivers/staging/cptm1217/Kconfig"
+
+source "drivers/staging/ste_rmi4/Kconfig"
+
 endif # !STAGING_EXCLUDE_BUILD
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 3eda5c7..d538863 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -42,18 +42,18 @@
 obj-$(CONFIG_HYPERV)		+= hv/
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_MRST_RAR_HANDLER)	+= memrar/
+obj-$(CONFIG_DX_SEP)            += sep/
 obj-$(CONFIG_IIO)		+= iio/
+obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio/
 obj-$(CONFIG_ZRAM)		+= zram/
 obj-$(CONFIG_WLAGS49_H2)	+= wlags49_h2/
 obj-$(CONFIG_WLAGS49_H25)	+= wlags49_h25/
-obj-$(CONFIG_BATMAN_ADV)	+= batman-adv/
 obj-$(CONFIG_SAMSUNG_LAPTOP)	+= samsung-laptop/
 obj-$(CONFIG_FB_SM7XX)		+= sm7xx/
 obj-$(CONFIG_VIDEO_DT3155)	+= dt3155v4l/
 obj-$(CONFIG_CRYSTALHD)		+= crystalhd/
 obj-$(CONFIG_CXT1E1)		+= cxt1e1/
 obj-$(CONFIG_TI_ST)		+= ti-st/
-obj-$(CONFIG_ADIS16255)		+= adis16255/
 obj-$(CONFIG_FB_XGI)		+= xgifb/
 obj-$(CONFIG_MSM_STAGING)	+= msm/
 obj-$(CONFIG_EASYCAP)		+= easycap/
@@ -68,3 +68,5 @@
 obj-$(CONFIG_FT1000)		+= ft1000/
 obj-$(CONFIG_SND_INTEL_SST)		+= intel_sst/
 obj-$(CONFIG_SPEAKUP)	+= speakup/
+obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217)	+= cptm1217/
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4)	+= ste_rmi4/
diff --git a/drivers/staging/adis16255/Kconfig b/drivers/staging/adis16255/Kconfig
deleted file mode 100644
index a883c1f..0000000
--- a/drivers/staging/adis16255/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config ADIS16255
-	tristate "Analog Devices ADIS16250/16255"
-	depends on SPI && SYSFS
-	---help---
-	If you say yes here you get support for the Analog Devices
-	ADIS16250/16255 Low Power Gyroscope. The driver exposes
-	orientation and gyroscope value, as well as sample rate
-	to the sysfs.
-
-	This driver can also be built as a module. If so, the module
-	will be called adis16255.
diff --git a/drivers/staging/adis16255/Makefile b/drivers/staging/adis16255/Makefile
deleted file mode 100644
index 8c39081..0000000
--- a/drivers/staging/adis16255/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_ADIS16255)		+= adis16255.o
diff --git a/drivers/staging/adis16255/adis16255.c b/drivers/staging/adis16255/adis16255.c
deleted file mode 100644
index 8d4d7cb..0000000
--- a/drivers/staging/adis16255/adis16255.c
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * Analog Devices ADIS16250/ADIS16255 Low Power Gyroscope
- *
- * Written by: Matthias Brugger <m_brugger@web.de>
- *
- * Copyright (C) 2010 Fraunhofer Institute for Integrated Circuits
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-
-/*
- * The driver just has a bare interface to the sysfs (sample rate in Hz,
- * orientation (x, y, z) and gyroscope data in °/sec.
- *
- * It should be added to iio subsystem when this has left staging.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-#include <linux/interrupt.h>
-#include <linux/sysfs.h>
-#include <linux/stat.h>
-#include <linux/delay.h>
-
-#include <linux/gpio.h>
-
-#include <linux/spi/spi.h>
-#include <linux/workqueue.h>
-
-#include "adis16255.h"
-
-#define ADIS_STATUS        0x3d
-#define ADIS_SMPL_PRD_MSB  0x37
-#define ADIS_SMPL_PRD_LSB  0x36
-#define ADIS_MSC_CTRL_MSB  0x35
-#define ADIS_MSC_CTRL_LSB  0x34
-#define ADIS_GPIO_CTRL     0x33
-#define ADIS_ALM_SMPL1     0x25
-#define ADIS_ALM_MAG1      0x21
-#define ADIS_GYRO_SCALE    0x17
-#define ADIS_GYRO_OUT      0x05
-#define ADIS_SUPPLY_OUT    0x03
-#define ADIS_ENDURANCE     0x01
-
-/*
- * data structure for every sensor
- *
- * @dev:       Driver model representation of the device.
- * @spi:       Pointer to the spi device which will manage i/o to spi bus.
- * @data:      Last read data from device.
- * @irq_adis:  GPIO Number of IRQ signal
- * @irq:       irq line manage by kernel
- * @negative:  indicates if sensor is upside down (negative == 1)
- * @direction: indicates axis (x, y, z) the sensor is meassuring
- */
-struct spi_adis16255_data {
-	struct device dev;
-	struct spi_device *spi;
-	s16 data;
-	int irq;
-	u8 negative;
-	char direction;
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int spi_adis16255_read_data(struct spi_adis16255_data *spiadis,
-					u8 adr,
-					u8 *rbuf)
-{
-	struct spi_device *spi = spiadis->spi;
-	struct spi_message msg;
-	struct spi_transfer xfer1, xfer2;
-	u8 *buf, *rx;
-	int ret;
-
-	buf = kzalloc(4, GFP_KERNEL);
-	if (buf == NULL)
-		return -ENOMEM;
-
-	rx = kzalloc(4, GFP_KERNEL);
-	if (rx == NULL) {
-		ret = -ENOMEM;
-		goto err_buf;
-	}
-
-	buf[0] = adr;
-
-	spi_message_init(&msg);
-	memset(&xfer1, 0, sizeof(xfer1));
-	memset(&xfer2, 0, sizeof(xfer2));
-
-	xfer1.tx_buf = buf;
-	xfer1.rx_buf = buf + 2;
-	xfer1.len = 2;
-	xfer1.delay_usecs = 9;
-
-	xfer2.tx_buf = rx + 2;
-	xfer2.rx_buf = rx;
-	xfer2.len = 2;
-
-	spi_message_add_tail(&xfer1, &msg);
-	spi_message_add_tail(&xfer2, &msg);
-
-	ret = spi_sync(spi, &msg);
-	if (ret == 0) {
-		rbuf[0] = rx[0];
-		rbuf[1] = rx[1];
-	}
-
-	kfree(rx);
-err_buf:
-	kfree(buf);
-
-	return ret;
-}
-
-static int spi_adis16255_write_data(struct spi_adis16255_data *spiadis,
-					u8 adr1,
-					u8 adr2,
-					u8 *wbuf)
-{
-	struct spi_device *spi = spiadis->spi;
-	struct spi_message   msg;
-	struct spi_transfer  xfer1, xfer2;
-	u8       *buf, *rx;
-	int         ret;
-
-	buf = kmalloc(4, GFP_KERNEL);
-	if (buf == NULL)
-		return -ENOMEM;
-
-	rx = kzalloc(4, GFP_KERNEL);
-	if (rx == NULL) {
-		ret = -ENOMEM;
-		goto err_buf;
-	}
-
-	spi_message_init(&msg);
-	memset(&xfer1, 0, sizeof(xfer1));
-	memset(&xfer2, 0, sizeof(xfer2));
-
-	buf[0] = adr1 | 0x80;
-	buf[1] = *wbuf;
-
-	buf[2] = adr2 | 0x80;
-	buf[3] = *(wbuf + 1);
-
-	xfer1.tx_buf = buf;
-	xfer1.rx_buf = rx;
-	xfer1.len = 2;
-	xfer1.delay_usecs = 9;
-
-	xfer2.tx_buf = buf+2;
-	xfer2.rx_buf = rx+2;
-	xfer2.len = 2;
-
-	spi_message_add_tail(&xfer1, &msg);
-	spi_message_add_tail(&xfer2, &msg);
-
-	ret = spi_sync(spi, &msg);
-	if (ret != 0)
-		dev_warn(&spi->dev, "write data to %#x %#x failed\n",
-				buf[0], buf[2]);
-
-	kfree(rx);
-err_buf:
-	kfree(buf);
-	return ret;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static irqreturn_t adis_irq_thread(int irq, void *dev_id)
-{
-	struct spi_adis16255_data *spiadis = dev_id;
-	int status;
-	u16 value = 0;
-
-	status =  spi_adis16255_read_data(spiadis, ADIS_GYRO_OUT, (u8 *)&value);
-	if (status != 0) {
-		dev_warn(&spiadis->spi->dev, "SPI FAILED\n");
-		goto exit;
-	}
-
-	/* perform on new data only... */
-	if (value & 0x8000) {
-		/* delete error and new data bit */
-		value = value & 0x3fff;
-		/* set negative value */
-		if (value & 0x2000)
-			value = value | 0xe000;
-
-		if (likely(spiadis->negative))
-			value = -value;
-
-		spiadis->data = (s16) value;
-	}
-
-exit:
-	return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-
-ssize_t adis16255_show_data(struct device *device,
-		struct device_attribute *da,
-		char *buf)
-{
-	struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
-	return snprintf(buf, PAGE_SIZE, "%d\n", spiadis->data);
-}
-DEVICE_ATTR(data, S_IRUGO , adis16255_show_data, NULL);
-
-ssize_t adis16255_show_direction(struct device *device,
-		struct device_attribute *da,
-		char *buf)
-{
-	struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
-	return snprintf(buf, PAGE_SIZE, "%c\n", spiadis->direction);
-}
-DEVICE_ATTR(direction, S_IRUGO , adis16255_show_direction, NULL);
-
-ssize_t adis16255_show_sample_rate(struct device *device,
-		struct device_attribute *da,
-		char *buf)
-{
-	struct spi_adis16255_data *spiadis = dev_get_drvdata(device);
-	int status = 0;
-	u16 value = 0;
-	int ts = 0;
-
-	status = spi_adis16255_read_data(spiadis, ADIS_SMPL_PRD_MSB,
-				(u8 *)&value);
-	if (status != 0)
-		return -EINVAL;
-
-	if (value & 0x80) {
-		/* timebase = 60.54 ms */
-		ts = 60540 * ((0x7f & value) + 1);
-	} else {
-		/* timebase = 1.953 ms */
-		ts = 1953 * ((0x7f & value) + 1);
-	}
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", (1000*1000)/ts);
-}
-DEVICE_ATTR(sample_rate, S_IRUGO , adis16255_show_sample_rate, NULL);
-
-static struct attribute *adis16255_attributes[] = {
-	&dev_attr_data.attr,
-	&dev_attr_direction.attr,
-	&dev_attr_sample_rate.attr,
-	NULL
-};
-
-static const struct attribute_group adis16255_attr_group = {
-	.attrs = adis16255_attributes,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int spi_adis16255_shutdown(struct spi_adis16255_data *spiadis)
-{
-	u16 value = 0;
-	/* turn sensor off */
-	spi_adis16255_write_data(spiadis,
-			ADIS_SMPL_PRD_MSB, ADIS_SMPL_PRD_LSB,
-			(u8 *)&value);
-	spi_adis16255_write_data(spiadis,
-			ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
-			(u8 *)&value);
-	return 0;
-}
-
-static int spi_adis16255_bringup(struct spi_adis16255_data *spiadis)
-{
-	int status = 0;
-	u16 value = 0;
-
-	status = spi_adis16255_read_data(spiadis, ADIS_GYRO_SCALE,
-				(u8 *)&value);
-	if (status != 0)
-		goto err;
-	if (value != 0x0800) {
-		dev_warn(&spiadis->spi->dev, "Scale factor is none default "
-				"value (%.4x)\n", value);
-	}
-
-	/* timebase = 1.953 ms, Ns = 0 -> 512 Hz sample rate */
-	value =  0x0001;
-	status = spi_adis16255_write_data(spiadis,
-				ADIS_SMPL_PRD_MSB, ADIS_SMPL_PRD_LSB,
-				(u8 *)&value);
-	if (status != 0)
-		goto err;
-
-	/* start internal self-test */
-	value = 0x0400;
-	status = spi_adis16255_write_data(spiadis,
-				ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
-				(u8 *)&value);
-	if (status != 0)
-		goto err;
-
-	/* wait 35 ms to finish self-test */
-	msleep(35);
-
-	value = 0x0000;
-	status = spi_adis16255_read_data(spiadis, ADIS_STATUS,
-				(u8 *)&value);
-	if (status != 0)
-		goto err;
-
-	if (value & 0x23) {
-		if (value & 0x20) {
-			dev_warn(&spiadis->spi->dev, "self-test error\n");
-			status = -ENODEV;
-			goto err;
-		} else if (value & 0x3)	{
-			dev_warn(&spiadis->spi->dev, "Sensor voltage "
-						"out of range.\n");
-			status = -ENODEV;
-			goto err;
-		}
-	}
-
-	/* set interrupt to active high on DIO0 when data ready */
-	value = 0x0006;
-	status = spi_adis16255_write_data(spiadis,
-				ADIS_MSC_CTRL_MSB, ADIS_MSC_CTRL_LSB,
-				(u8 *)&value);
-	if (status != 0)
-		goto err;
-	return status;
-
-err:
-	spi_adis16255_shutdown(spiadis);
-	return status;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int __devinit spi_adis16255_probe(struct spi_device *spi)
-{
-
-	struct adis16255_init_data *init_data = spi->dev.platform_data;
-	struct spi_adis16255_data  *spiadis;
-	int status = 0;
-
-	spiadis = kzalloc(sizeof(*spiadis), GFP_KERNEL);
-	if (!spiadis)
-		return -ENOMEM;
-
-	spiadis->spi = spi;
-	spiadis->direction = init_data->direction;
-
-	if (init_data->negative)
-		spiadis->negative = 1;
-
-	status = gpio_request(init_data->irq, "adis16255");
-	if (status != 0)
-		goto err;
-
-	status = gpio_direction_input(init_data->irq);
-	if (status != 0)
-		goto gpio_err;
-
-	spiadis->irq = gpio_to_irq(init_data->irq);
-
-	status = request_threaded_irq(spiadis->irq,
-			NULL, adis_irq_thread,
-			IRQF_DISABLED, "adis-driver", spiadis);
-
-	if (status != 0) {
-		dev_err(&spi->dev, "IRQ request failed\n");
-		goto gpio_err;
-	}
-
-	dev_dbg(&spi->dev, "GPIO %d IRQ %d\n", init_data->irq, spiadis->irq);
-
-	dev_set_drvdata(&spi->dev, spiadis);
-	status = sysfs_create_group(&spi->dev.kobj, &adis16255_attr_group);
-	if (status != 0)
-		goto irq_err;
-
-	status = spi_adis16255_bringup(spiadis);
-	if (status != 0)
-		goto sysfs_err;
-
-	dev_info(&spi->dev, "spi_adis16255 driver added!\n");
-
-	return status;
-
-sysfs_err:
-	sysfs_remove_group(&spiadis->spi->dev.kobj, &adis16255_attr_group);
-irq_err:
-	free_irq(spiadis->irq, spiadis);
-gpio_err:
-	gpio_free(init_data->irq);
-err:
-	kfree(spiadis);
-	return status;
-}
-
-static int __devexit spi_adis16255_remove(struct spi_device *spi)
-{
-	struct spi_adis16255_data  *spiadis    = dev_get_drvdata(&spi->dev);
-
-	spi_adis16255_shutdown(spiadis);
-
-	free_irq(spiadis->irq, spiadis);
-	gpio_free(irq_to_gpio(spiadis->irq));
-
-	sysfs_remove_group(&spiadis->spi->dev.kobj, &adis16255_attr_group);
-
-	kfree(spiadis);
-
-	dev_info(&spi->dev, "spi_adis16255 driver removed!\n");
-	return 0;
-}
-
-static struct spi_driver spi_adis16255_drv = {
-	.driver = {
-		.name =  "spi_adis16255",
-		.owner = THIS_MODULE,
-	},
-	.probe = spi_adis16255_probe,
-	.remove =   __devexit_p(spi_adis16255_remove),
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int __init spi_adis16255_init(void)
-{
-	return spi_register_driver(&spi_adis16255_drv);
-}
-module_init(spi_adis16255_init);
-
-static void __exit spi_adis16255_exit(void)
-{
-	spi_unregister_driver(&spi_adis16255_drv);
-}
-module_exit(spi_adis16255_exit);
-
-MODULE_AUTHOR("Matthias Brugger");
-MODULE_DESCRIPTION("SPI device driver for ADIS16255 sensor");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/adis16255/adis16255.h b/drivers/staging/adis16255/adis16255.h
deleted file mode 100644
index 03e0700..0000000
--- a/drivers/staging/adis16255/adis16255.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef ADIS16255_H
-#define ADIS16255_H
-
-#include <linux/types.h>
-
-struct adis16255_init_data {
-	char direction;
-	u8   negative;
-	int  irq;
-};
-
-#endif
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 016c6f7..7bb7da7 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -70,7 +70,7 @@
 MODULE_PARM_DESC(start_off,
 		 "Set to 1 to switch off OLED display after it is attached");
 
-enum oled_pack_mode{
+enum oled_pack_mode {
 	PACK_MODE_G1,
 	PACK_MODE_G50,
 	PACK_MODE_LAST
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
index c307a55..e96662b 100644
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif.c
@@ -876,7 +876,7 @@
 void
 HIFUnMaskInterrupt(HIF_DEVICE *device)
 {
-    int ret;;
+    int ret;
 
     AR_DEBUG_ASSERT(device != NULL);
     AR_DEBUG_ASSERT(device->func != NULL);
@@ -1188,7 +1188,7 @@
     HIF_DEVICE *hifdevice;
     AR_DEBUG_PRINTF(ATH_DEBUG_TRACE, ("AR6000: addHifDevice\n"));
     AR_DEBUG_ASSERT(func != NULL);
-    hifdevice = (HIF_DEVICE *)kzalloc(sizeof(HIF_DEVICE), GFP_KERNEL);
+    hifdevice = kzalloc(sizeof(HIF_DEVICE), GFP_KERNEL);
     AR_DEBUG_ASSERT(hifdevice != NULL);
 #if HIF_USE_DMA_BOUNCE_BUFFER
     hifdevice->dma_buffer = kmalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index a659f70..126a36a 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -4439,7 +4439,7 @@
         for (i = assoc_req_ie_pos; i < assoc_req_ie_pos + assocReqLen - 4; i++) {
             AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("%2.2x ", assocInfo[i]));
             sprintf(pos, "%2.2x", assocInfo[i]);
-            pos += 2;;
+            pos += 2;
         }
         AR_DEBUG_PRINTF(ATH_DEBUG_WLAN_CONNECT,("\n"));
 
diff --git a/drivers/staging/autofs/dirhash.c b/drivers/staging/autofs/dirhash.c
index 8f3e2b8..d3f42c8 100644
--- a/drivers/staging/autofs/dirhash.c
+++ b/drivers/staging/autofs/dirhash.c
@@ -30,7 +30,7 @@
 			 struct autofs_dir_ent *ent)
 {
 	autofs_delete_usage(ent);   /* Unlink from current position */
-	autofs_init_usage(dh,ent);  /* Relink at queue tail */
+	autofs_init_usage(dh, ent);  /* Relink at queue tail */
 }
 
 struct autofs_dir_ent *autofs_expire(struct super_block *sb,
@@ -45,17 +45,18 @@
 		struct path path;
 		int umount_ok;
 
-		if ( list_empty(&dh->expiry_head) || sbi->catatonic )
+		if (list_empty(&dh->expiry_head) || sbi->catatonic)
 			return NULL;	/* No entries */
 		/* We keep the list sorted by last_usage and want old stuff */
-		ent = list_entry(dh->expiry_head.next, struct autofs_dir_ent, exp);
+		ent = list_entry(dh->expiry_head.next,
+						struct autofs_dir_ent, exp);
 		if (jiffies - ent->last_usage < timeout)
 			break;
 		/* Move to end of list in case expiry isn't desirable */
 		autofs_update_usage(dh, ent);
 
 		/* Check to see that entry is expirable */
-		if ( ent->ino < AUTOFS_FIRST_DIR_INO )
+		if (ent->ino < AUTOFS_FIRST_DIR_INO)
 			return ent; /* Symlinks are always expirable */
 
 		/* Get the dentry for the autofs subdirectory */
@@ -63,14 +64,15 @@
 
 		if (!path.dentry) {
 			/* Should only happen in catatonic mode */
-			printk("autofs: dentry == NULL but inode range is directory, entry %s\n", ent->name);
+			printk(KERN_DEBUG "autofs: dentry == NULL but inode \
+				range is directory, entry %s\n", ent->name);
 			autofs_delete_usage(ent);
 			continue;
 		}
 
 		if (!path.dentry->d_inode) {
 			dput(path.dentry);
-			printk("autofs: negative dentry on expiry queue: %s\n",
+			printk(KERN_DEBUG "autofs: negative dentry on expiry queue: %s\n",
 			       ent->name);
 			autofs_delete_usage(ent);
 			continue;
@@ -80,14 +82,16 @@
 		   point to the mounted-on-top root. */
 		if (!S_ISDIR(path.dentry->d_inode->i_mode) ||
 		    !d_mountpoint(path.dentry)) {
-			DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
+			DPRINTK(("autofs: not expirable \
+				(not a mounted directory): %s\n", ent->name));
 			continue;
 		}
 		path.mnt = mnt;
 		path_get(&path);
 		if (!follow_down(&path)) {
 			path_put(&path);
-			DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name));
+			DPRINTK(("autofs: not expirable\
+			(not a mounted directory): %s\n", ent->name));
 			continue;
 		}
 		while (d_mountpoint(path.dentry) && follow_down(&path))
@@ -96,30 +100,37 @@
 		path_put(&path);
 
 		if (umount_ok) {
-			DPRINTK(("autofs: signaling expire on %s\n", ent->name));
+			DPRINTK(("autofs: signaling expire on %s\n",
+								ent->name));
 			return ent; /* Expirable! */
 		}
-		DPRINTK(("autofs: didn't expire due to may_umount: %s\n", ent->name));
+
+		DPRINTK(("autofs: didn't expire due to may_umount: %s\n",
+								ent->name));
 	}
 	return NULL;		/* No expirable entries */
 }
 
-void autofs_initialize_hash(struct autofs_dirhash *dh) {
+void autofs_initialize_hash(struct autofs_dirhash *dh)
+{
 	memset(&dh->h, 0, AUTOFS_HASH_SIZE*sizeof(struct autofs_dir_ent *));
 	INIT_LIST_HEAD(&dh->expiry_head);
 }
 
-struct autofs_dir_ent *autofs_hash_lookup(const struct autofs_dirhash *dh, struct qstr *name)
+struct autofs_dir_ent *autofs_hash_lookup(const struct autofs_dirhash *dh,
+						struct qstr *name)
 {
 	struct autofs_dir_ent *dhn;
 
 	DPRINTK(("autofs_hash_lookup: hash = 0x%08x, name = ", name->hash));
-	autofs_say(name->name,name->len);
+	autofs_say(name->name, name->len);
 
-	for ( dhn = dh->h[(unsigned) name->hash % AUTOFS_HASH_SIZE] ; dhn ; dhn = dhn->next ) {
-		if ( name->hash == dhn->hash &&
+	for (dhn = dh->h[(unsigned) name->hash % AUTOFS_HASH_SIZE];
+		dhn;
+		dhn = dhn->next) {
+		if (name->hash == dhn->hash &&
 		     name->len == dhn->len &&
-		     !memcmp(name->name, dhn->name, name->len) )
+		     !memcmp(name->name, dhn->name, name->len))
 			break;
 	}
 
@@ -131,9 +142,9 @@
 	struct autofs_dir_ent **dhnp;
 
 	DPRINTK(("autofs_hash_insert: hash = 0x%08x, name = ", ent->hash));
-	autofs_say(ent->name,ent->len);
+	autofs_say(ent->name, ent->len);
 
-	autofs_init_usage(dh,ent);
+	autofs_init_usage(dh, ent);
 	if (ent->dentry)
 		dget(ent->dentry);
 
@@ -141,19 +152,19 @@
 	ent->next = *dhnp;
 	ent->back = dhnp;
 	*dhnp = ent;
-	if ( ent->next )
+	if (ent->next)
 		ent->next->back = &(ent->next);
 }
 
 void autofs_hash_delete(struct autofs_dir_ent *ent)
 {
 	*(ent->back) = ent->next;
-	if ( ent->next )
+	if (ent->next)
 		ent->next->back = ent->back;
 
 	autofs_delete_usage(ent);
 
-	if ( ent->dentry )
+	if (ent->dentry)
 		dput(ent->dentry);
 	kfree(ent->name);
 	kfree(ent);
@@ -176,37 +187,37 @@
 	bucket = (*ptr >> 16) - 1;
 	ecount = *ptr & 0xffff;
 
-	if ( bucket < 0 ) {
+	if (bucket < 0)
 		bucket = ecount = 0;
-	} 
 
 	DPRINTK(("autofs_hash_enum: bucket %d, entry %d\n", bucket, ecount));
 
 	ent = last ? last->next : NULL;
 
-	if ( ent ) {
+	if (ent) {
 		ecount++;
 	} else {
-		while  ( bucket < AUTOFS_HASH_SIZE ) {
+		while  (bucket < AUTOFS_HASH_SIZE) {
 			ent = dh->h[bucket];
-			for ( i = ecount ; ent && i ; i-- )
+			for (i = ecount ; ent && i ; i--)
 				ent = ent->next;
-			
+
 			if (ent) {
 				ecount++; /* Point to *next* entry */
 				break;
 			}
-			
+
 			bucket++; ecount = 0;
 		}
 	}
 
 #ifdef DEBUG
-	if ( !ent )
-		printk("autofs_hash_enum: nothing found\n");
+	if (!ent)
+		printk(KERN_DEBUG "autofs_hash_enum: nothing found\n");
 	else {
-		printk("autofs_hash_enum: found hash %08x, name", ent->hash);
-		autofs_say(ent->name,ent->len);
+		printk(KERN_DEBUG "autofs_hash_enum: found hash %08x, name",
+								ent->hash);
+		autofs_say(ent->name, ent->len);
 	}
 #endif
 
@@ -221,9 +232,9 @@
 	int i;
 	struct autofs_dir_ent *ent;
 
-	for ( i = 0 ; i < AUTOFS_HASH_SIZE ; i++ ) {
-		for ( ent = dh->h[i] ; ent ; ent = ent->next ) {
-			if ( ent->dentry ) {
+	for (i = 0 ; i < AUTOFS_HASH_SIZE ; i++) {
+		for (ent = dh->h[i] ; ent ; ent = ent->next) {
+			if (ent->dentry) {
 				dput(ent->dentry);
 				ent->dentry = NULL;
 			}
@@ -238,10 +249,10 @@
 	int i;
 	struct autofs_dir_ent *ent, *nent;
 
-	for ( i = 0 ; i < AUTOFS_HASH_SIZE ; i++ ) {
-		for ( ent = sbi->dirhash.h[i] ; ent ; ent = nent ) {
+	for (i = 0 ; i < AUTOFS_HASH_SIZE ; i++) {
+		for (ent = sbi->dirhash.h[i] ; ent ; ent = nent) {
 			nent = ent->next;
-			if ( ent->dentry )
+			if (ent->dentry)
 				dput(ent->dentry);
 			kfree(ent->name);
 			kfree(ent);
diff --git a/drivers/staging/autofs/root.c b/drivers/staging/autofs/root.c
index 0fdec4b..bf0e975 100644
--- a/drivers/staging/autofs/root.c
+++ b/drivers/staging/autofs/root.c
@@ -154,13 +154,16 @@
  * yet completely filled in, and revalidate has to delay such
  * lookups..
  */
-static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd)
+static int autofs_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode * dir;
 	struct autofs_sb_info *sbi;
 	struct autofs_dir_ent *ent;
 	int res;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
 	lock_kernel();
 	dir = dentry->d_parent->d_inode;
 	sbi = autofs_sbi(dir->i_sb);
@@ -237,7 +240,7 @@
 	 *
 	 * We need to do this before we release the directory semaphore.
 	 */
-	dentry->d_op = &autofs_dentry_operations;
+	d_set_d_op(dentry, &autofs_dentry_operations);
 	dentry->d_flags |= DCACHE_AUTOFS_PENDING;
 	d_add(dentry, NULL);
 
diff --git a/drivers/staging/batman-adv/Kconfig b/drivers/staging/batman-adv/Kconfig
deleted file mode 100644
index 8553f35..0000000
--- a/drivers/staging/batman-adv/Kconfig
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# B.A.T.M.A.N meshing protocol
-#
-
-config BATMAN_ADV
-	tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
-	depends on NET
-        default n
-	---help---
-
-        B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
-        a routing protocol for multi-hop ad-hoc mesh networks. The
-        networks may be wired or wireless. See
-        http://www.open-mesh.org/ for more information and user space
-        tools.
-
-config BATMAN_ADV_DEBUG
-	bool "B.A.T.M.A.N. debugging"
-	depends on BATMAN_ADV != n
-	---help---
-
-	  This is an option for use by developers; most people should
-	  say N here. This enables compilation of support for
-	  outputting debugging information to the kernel log. The
-	  output is controlled via the module parameter debug.
-
diff --git a/drivers/staging/batman-adv/Makefile b/drivers/staging/batman-adv/Makefile
deleted file mode 100644
index 7892428..0000000
--- a/drivers/staging/batman-adv/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
-#
-# Marek Lindner, Simon Wunderlich
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of version 2 of the GNU General Public
-# License as published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA
-#
-
-obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
-batman-adv-y := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o unicast.o
diff --git a/drivers/staging/batman-adv/README b/drivers/staging/batman-adv/README
deleted file mode 100644
index 7c878bb..0000000
--- a/drivers/staging/batman-adv/README
+++ /dev/null
@@ -1,240 +0,0 @@
-[state: 04-09-2010]
-
-BATMAN-ADV
-----------
-
-Batman  advanced  is  a new approach to wireless networking which
-does no longer operate on the IP basis. Unlike the batman daemon,
-which  exchanges  information  using UDP packets and sets routing
-tables, batman-advanced operates on ISO/OSI Layer 2 only and uses
-and  routes  (or  better: bridges) Ethernet Frames. It emulates a
-virtual network switch of all nodes participating.  Therefore all
-nodes  appear  to be link local, thus all higher operating proto-
-cols won't be affected by any changes within the network. You can
-run almost any protocol above batman advanced, prominent examples
-are: IPv4, IPv6, DHCP, IPX.
-
-Batman advanced was implemented as a Linux kernel driver  to  re-
-duce the overhead to a minimum. It does not depend on any (other)
-network driver, and can be used on wifi as well as ethernet  lan,
-vpn,  etc ... (anything with ethernet-style layer 2).
-
-CONFIGURATION
--------------
-
-Load the batman-adv module into your kernel:
-
-# insmod batman-adv.ko
-
-The  module  is now waiting for activation. You must add some in-
-terfaces on which batman can operate. After  loading  the  module
-batman  advanced  will scan your systems interfaces to search for
-compatible interfaces. Once found, it will create  subfolders  in
-the /sys directories of each supported interface, e.g.
-
-# ls /sys/class/net/eth0/batman_adv/
-# iface_status  mesh_iface
-
-If an interface does not have the "batman_adv" subfolder it prob-
-ably is not supported. Not supported  interfaces  are:  loopback,
-non-ethernet and batman's own interfaces.
-
-Note:  After the module was loaded it will continuously watch for
-new interfaces to verify the compatibility. There is no  need  to
-reload the module if you plug your USB wifi adapter into your ma-
-chine after batman advanced was initially loaded.
-
-To activate a  given  interface  simply  write  "bat0"  into  its
-"mesh_iface" file inside the batman_adv subfolder:
-
-# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface
-
-Repeat  this step for all interfaces you wish to add.  Now batman
-starts using/broadcasting on this/these interface(s).
-
-By reading the "iface_status" file you can check its status:
-
-# cat /sys/class/net/eth0/batman_adv/iface_status
-# active
-
-To deactivate an interface you have  to  write  "none"  into  its
-"mesh_iface" file:
-
-# echo none > /sys/class/net/eth0/batman_adv/mesh_iface
-
-
-All  mesh  wide  settings  can be found in batman's own interface
-folder:
-
-#  ls  /sys/class/net/bat0/mesh/
-#  aggregated_ogms  bonding  orig_interval  vis_mode
-
-
-There is a special folder for debugging informations:
-
-#  ls /sys/kernel/debug/batman_adv/bat0/
-#  originators  socket  transtable_global  transtable_local
-#  vis_data
-
-
-Some of the files contain all sort of status information  regard-
-ing  the  mesh  network.  For  example, you can view the table of
-originators (mesh participants) with:
-
-# cat /sys/kernel/debug/batman_adv/bat0/originators
-
-Other files allow to change batman's behaviour to better fit your
-requirements.  For instance, you can check the current originator
-interval (value in milliseconds which determines how often batman
-sends its broadcast packets):
-
-# cat /sys/class/net/bat0/mesh/orig_interval
-# 1000
-
-and also change its value:
-
-# echo 3000 > /sys/class/net/bat0/mesh/orig_interval
-
-In very mobile scenarios, you might want to adjust the originator
-interval to a lower value. This will make the mesh  more  respon-
-sive to topology changes, but will also increase the overhead.
-
-
-USAGE
------
-
-To  make use of your newly created mesh, batman advanced provides
-a new interface "bat0" which you should use from this  point  on.
-All  interfaces  added  to  batman  advanced are not relevant any
-longer because batman handles them for you. Basically, one "hands
-over" the data by using the batman interface and batman will make
-sure it reaches its destination.
-
-The "bat0" interface can be used like any  other  regular  inter-
-face.  It needs an IP address which can be either statically con-
-figured or dynamically (by using DHCP or similar services):
-
-# NodeA: ifconfig bat0 192.168.0.1
-# NodeB: ifconfig bat0 192.168.0.2
-# NodeB: ping 192.168.0.1
-
-Note:  In  order to avoid problems remove all IP addresses previ-
-ously assigned to interfaces now used by batman advanced, e.g.
-
-# ifconfig eth0 0.0.0.0
-
-
-VISUALIZATION
--------------
-
-If you want topology visualization, at least one mesh  node  must
-be configured as VIS-server:
-
-# echo "server" > /sys/class/net/bat0/mesh/vis_mode
-
-Each  node  is  either configured as "server" or as "client" (de-
-fault: "client").  Clients send their topology data to the server
-next to them, and server synchronize with other servers. If there
-is no server configured (default) within the  mesh,  no  topology
-information   will  be  transmitted.  With  these  "synchronizing
-servers", there can be 1 or more vis servers sharing the same (or
-at least very similar) data.
-
-When  configured  as  server,  you can get a topology snapshot of
-your mesh:
-
-# cat /sys/kernel/debug/batman_adv/bat0/vis_data
-
-This raw output is intended to be easily parsable and convertable
-with  other tools. Have a look at the batctl README if you want a
-vis output in dot or json format for instance and how those  out-
-puts could then be visualised in an image.
-
-The raw format consists of comma separated values per entry where
-each entry is giving information about a  certain  source  inter-
-face.  Each  entry can/has to have the following values:
--> "mac" - mac address of an originator's source interface
-           (each line begins with it)
--> "TQ mac  value"  -  src mac's link quality towards mac address
-                       of a neighbor originator's interface which
-                       is being used for routing
--> "HNA mac" - HNA announced by source mac
--> "PRIMARY" - this  is a primary interface
--> "SEC mac" - secondary mac address of source
-               (requires preceding PRIMARY)
-
-The TQ value has a range from 4 to 255 with 255 being  the  best.
-The HNA entries are showing which hosts are connected to the mesh
-via bat0 or being bridged into the mesh network.  The PRIMARY/SEC
-values are only applied on primary interfaces
-
-
-LOGGING/DEBUGGING
------------------
-
-All error messages, warnings and information messages are sent to
-the kernel log. Depending on your operating  system  distribution
-this  can  be read in one of a number of ways. Try using the com-
-mands: dmesg, logread, or looking in the files  /var/log/kern.log
-or  /var/log/syslog.  All  batman-adv  messages are prefixed with
-"batman-adv:" So to see just these messages try
-
-# dmesg | grep batman-adv
-
-When investigating problems with your mesh network  it  is  some-
-times  necessary  to see more detail debug messages. This must be
-enabled when compiling the batman-adv module. When building  bat-
-man-adv  as  part of kernel, use "make menuconfig" and enable the
-option "B.A.T.M.A.N. debugging".
-
-Those additional  debug messages can be accessed  using a special
-file in debugfs
-
-# cat /sys/kernel/debug/batman_adv/bat0/log
-
-The additional debug output is by default disabled. It can be en-
-abled  during run time. Following log_levels are defined:
-
-0 - All  debug  output  disabled
-1 - Enable messages related to routing / flooding / broadcasting
-2 - Enable route or hna added / changed / deleted
-3 - Enable all messages
-
-The debug output can be changed at runtime  using  the  file
-/sys/class/net/bat0/mesh/log_level. e.g.
-
-# echo 2 > /sys/class/net/bat0/mesh/log_level
-
-will enable debug messages for when routes or HNAs change.
-
-
-BATCTL
-------
-
-As batman advanced operates on layer 2 all hosts participating in
-the  virtual switch are completely transparent for all  protocols
-above layer 2. Therefore the common diagnosis tools do  not  work
-as  expected.  To  overcome these problems batctl was created. At
-the  moment the  batctl contains ping,  traceroute,  tcpdump  and
-interfaces to the kernel module settings.
-
-For more information, please see the manpage (man batctl).
-
-batctl is available on http://www.open-mesh.org/
-
-
-CONTACT
--------
-
-Please send us comments, experiences, questions, anything :)
-
-IRC:            #batman   on   irc.freenode.org
-Mailing-list:   b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
-                (optional   subscription   at
-                 https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
-
-You can also contact the Authors:
-
-Marek  Lindner  <lindner_marek@yahoo.de>
-Simon  Wunderlich  <siwu@hrz.tu-chemnitz.de>
-
diff --git a/drivers/staging/batman-adv/TODO b/drivers/staging/batman-adv/TODO
deleted file mode 100644
index 11c384f..0000000
--- a/drivers/staging/batman-adv/TODO
+++ /dev/null
@@ -1,14 +0,0 @@
- * remove own list functionality from hash
- * use hlist_head, hlist_node in hash
- * don't use callbacks for compare+choose in hash
- * think about more efficient ways instead of abstraction of hash
- * Request a new review
- * Process the comments from the review
- * Move into mainline proper
-
-Please send all patches to:
-	Marek Lindner <lindner_marek@yahoo.de>
-	Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-	Andrew Lunn <andrew@lunn.ch>
-	b.a.t.m.a.n@lists.open-mesh.org
-	Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/batman-adv/aggregation.c b/drivers/staging/batman-adv/aggregation.c
deleted file mode 100644
index 08624d4..0000000
--- a/drivers/staging/batman-adv/aggregation.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "aggregation.h"
-#include "send.h"
-#include "routing.h"
-
-/* calculate the size of the hna information for a given packet */
-static int hna_len(struct batman_packet *batman_packet)
-{
-	return batman_packet->num_hna * ETH_ALEN;
-}
-
-/* return true if new_packet can be aggregated with forw_packet */
-static bool can_aggregate_with(struct batman_packet *new_batman_packet,
-			       int packet_len,
-			       unsigned long send_time,
-			       bool directlink,
-			       struct batman_if *if_incoming,
-			       struct forw_packet *forw_packet)
-{
-	struct batman_packet *batman_packet =
-		(struct batman_packet *)forw_packet->skb->data;
-	int aggregated_bytes = forw_packet->packet_len + packet_len;
-
-	/**
-	 * we can aggregate the current packet to this aggregated packet
-	 * if:
-	 *
-	 * - the send time is within our MAX_AGGREGATION_MS time
-	 * - the resulting packet wont be bigger than
-	 *   MAX_AGGREGATION_BYTES
-	 */
-
-	if (time_before(send_time, forw_packet->send_time) &&
-	    time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
-					forw_packet->send_time) &&
-	    (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
-
-		/**
-		 * check aggregation compatibility
-		 * -> direct link packets are broadcasted on
-		 *    their interface only
-		 * -> aggregate packet if the current packet is
-		 *    a "global" packet as well as the base
-		 *    packet
-		 */
-
-		/* packets without direct link flag and high TTL
-		 * are flooded through the net  */
-		if ((!directlink) &&
-		    (!(batman_packet->flags & DIRECTLINK)) &&
-		    (batman_packet->ttl != 1) &&
-
-		    /* own packets originating non-primary
-		     * interfaces leave only that interface */
-		    ((!forw_packet->own) ||
-		     (forw_packet->if_incoming->if_num == 0)))
-			return true;
-
-		/* if the incoming packet is sent via this one
-		 * interface only - we still can aggregate */
-		if ((directlink) &&
-		    (new_batman_packet->ttl == 1) &&
-		    (forw_packet->if_incoming == if_incoming) &&
-
-		    /* packets from direct neighbors or
-		     * own secondary interface packets
-		     * (= secondary interface packets in general) */
-		    (batman_packet->flags & DIRECTLINK ||
-		     (forw_packet->own &&
-		      forw_packet->if_incoming->if_num != 0)))
-			return true;
-	}
-
-	return false;
-}
-
-#define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
-/* create a new aggregated packet and add this packet to it */
-static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
-				  unsigned long send_time, bool direct_link,
-				  struct batman_if *if_incoming,
-				  int own_packet)
-{
-	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct forw_packet *forw_packet_aggr;
-	unsigned long flags;
-	unsigned char *skb_buff;
-
-	/* own packet should always be scheduled */
-	if (!own_packet) {
-		if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"batman packet queue full\n");
-			return;
-		}
-	}
-
-	forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
-	if (!forw_packet_aggr) {
-		if (!own_packet)
-			atomic_inc(&bat_priv->batman_queue_left);
-		return;
-	}
-
-	if ((atomic_read(&bat_priv->aggregation_enabled)) &&
-	    (packet_len < MAX_AGGREGATION_BYTES))
-		forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
-						      sizeof(struct ethhdr));
-	else
-		forw_packet_aggr->skb = dev_alloc_skb(packet_len +
-						      sizeof(struct ethhdr));
-
-	if (!forw_packet_aggr->skb) {
-		if (!own_packet)
-			atomic_inc(&bat_priv->batman_queue_left);
-		kfree(forw_packet_aggr);
-		return;
-	}
-	skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
-
-	INIT_HLIST_NODE(&forw_packet_aggr->list);
-
-	skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
-	forw_packet_aggr->packet_len = packet_len;
-	memcpy(skb_buff, packet_buff, packet_len);
-
-	forw_packet_aggr->own = own_packet;
-	forw_packet_aggr->if_incoming = if_incoming;
-	forw_packet_aggr->num_packets = 0;
-	forw_packet_aggr->direct_link_flags = 0;
-	forw_packet_aggr->send_time = send_time;
-
-	/* save packet direct link flag status */
-	if (direct_link)
-		forw_packet_aggr->direct_link_flags |= 1;
-
-	/* add new packet to packet list */
-	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
-	hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
-	spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
-	/* start timer for this packet */
-	INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
-			  send_outstanding_bat_packet);
-	queue_delayed_work(bat_event_workqueue,
-			   &forw_packet_aggr->delayed_work,
-			   send_time - jiffies);
-}
-
-/* aggregate a new packet into the existing aggregation */
-static void aggregate(struct forw_packet *forw_packet_aggr,
-		      unsigned char *packet_buff,
-		      int packet_len,
-		      bool direct_link)
-{
-	unsigned char *skb_buff;
-
-	skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
-	memcpy(skb_buff, packet_buff, packet_len);
-	forw_packet_aggr->packet_len += packet_len;
-	forw_packet_aggr->num_packets++;
-
-	/* save packet direct link flag status */
-	if (direct_link)
-		forw_packet_aggr->direct_link_flags |=
-			(1 << forw_packet_aggr->num_packets);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
-			    unsigned char *packet_buff, int packet_len,
-			    struct batman_if *if_incoming, char own_packet,
-			    unsigned long send_time)
-{
-	/**
-	 * _aggr -> pointer to the packet we want to aggregate with
-	 * _pos -> pointer to the position in the queue
-	 */
-	struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
-	struct hlist_node *tmp_node;
-	struct batman_packet *batman_packet =
-		(struct batman_packet *)packet_buff;
-	bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
-	unsigned long flags;
-
-	/* find position for the packet in the forward queue */
-	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
-	/* own packets are not to be aggregated */
-	if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
-		hlist_for_each_entry(forw_packet_pos, tmp_node,
-				     &bat_priv->forw_bat_list, list) {
-			if (can_aggregate_with(batman_packet,
-					       packet_len,
-					       send_time,
-					       direct_link,
-					       if_incoming,
-					       forw_packet_pos)) {
-				forw_packet_aggr = forw_packet_pos;
-				break;
-			}
-		}
-	}
-
-	/* nothing to aggregate with - either aggregation disabled or no
-	 * suitable aggregation packet found */
-	if (forw_packet_aggr == NULL) {
-		/* the following section can run without the lock */
-		spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
-		/**
-		 * if we could not aggregate this packet with one of the others
-		 * we hold it back for a while, so that it might be aggregated
-		 * later on
-		 */
-		if ((!own_packet) &&
-		    (atomic_read(&bat_priv->aggregation_enabled)))
-			send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
-
-		new_aggregated_packet(packet_buff, packet_len,
-				      send_time, direct_link,
-				      if_incoming, own_packet);
-	} else {
-		aggregate(forw_packet_aggr,
-			  packet_buff, packet_len,
-			  direct_link);
-		spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-	}
-}
-
-/* unpack the aggregated packets and process them one by one */
-void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-			     int packet_len, struct batman_if *if_incoming)
-{
-	struct batman_packet *batman_packet;
-	int buff_pos = 0;
-	unsigned char *hna_buff;
-
-	batman_packet = (struct batman_packet *)packet_buff;
-
-	do {
-		/* network to host order for our 32bit seqno, and the
-		   orig_interval. */
-		batman_packet->seqno = ntohl(batman_packet->seqno);
-
-		hna_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
-		receive_bat_packet(ethhdr, batman_packet,
-				   hna_buff, hna_len(batman_packet),
-				   if_incoming);
-
-		buff_pos += BAT_PACKET_LEN + hna_len(batman_packet);
-		batman_packet = (struct batman_packet *)
-			(packet_buff + buff_pos);
-	} while (aggregated_packet(buff_pos, packet_len,
-				   batman_packet->num_hna));
-}
diff --git a/drivers/staging/batman-adv/aggregation.h b/drivers/staging/batman-adv/aggregation.h
deleted file mode 100644
index 71a91b3..0000000
--- a/drivers/staging/batman-adv/aggregation.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_AGGREGATION_H_
-#define _NET_BATMAN_ADV_AGGREGATION_H_
-
-#include "main.h"
-
-/* is there another aggregated packet here? */
-static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna)
-{
-	int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN);
-
-	return (next_buff_pos <= packet_len) &&
-		(next_buff_pos <= MAX_AGGREGATION_BYTES);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
-			    unsigned char *packet_buff, int packet_len,
-			    struct batman_if *if_incoming, char own_packet,
-			    unsigned long send_time);
-void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
-			     int packet_len, struct batman_if *if_incoming);
-
-#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/drivers/staging/batman-adv/bat_debugfs.c b/drivers/staging/batman-adv/bat_debugfs.c
deleted file mode 100644
index 57f84a9..0000000
--- a/drivers/staging/batman-adv/bat_debugfs.c
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-
-#include <linux/debugfs.h>
-
-#include "bat_debugfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "vis.h"
-#include "icmp_socket.h"
-
-static struct dentry *bat_debugfs;
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-#define LOG_BUFF_MASK (log_buff_len-1)
-#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
-
-static int log_buff_len = LOG_BUF_LEN;
-
-static void emit_log_char(struct debug_log *debug_log, char c)
-{
-	LOG_BUFF(debug_log->log_end) = c;
-	debug_log->log_end++;
-
-	if (debug_log->log_end - debug_log->log_start > log_buff_len)
-		debug_log->log_start = debug_log->log_end - log_buff_len;
-}
-
-static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
-{
-	int printed_len;
-	va_list args;
-	static char debug_log_buf[256];
-	char *p;
-	unsigned long flags;
-
-	if (!debug_log)
-		return 0;
-
-	spin_lock_irqsave(&debug_log->lock, flags);
-	va_start(args, fmt);
-	printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
-				 fmt, args);
-	va_end(args);
-
-	for (p = debug_log_buf; *p != 0; p++)
-		emit_log_char(debug_log, *p);
-
-	spin_unlock_irqrestore(&debug_log->lock, flags);
-
-	wake_up(&debug_log->queue_wait);
-
-	return 0;
-}
-
-int debug_log(struct bat_priv *bat_priv, char *fmt, ...)
-{
-	va_list args;
-	char tmp_log_buf[256];
-
-	va_start(args, fmt);
-	vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
-	fdebug_log(bat_priv->debug_log, "[%10u] %s",
-		   (jiffies / HZ), tmp_log_buf);
-	va_end(args);
-
-	return 0;
-}
-
-static int log_open(struct inode *inode, struct file *file)
-{
-	nonseekable_open(inode, file);
-	file->private_data = inode->i_private;
-	inc_module_count();
-	return 0;
-}
-
-static int log_release(struct inode *inode, struct file *file)
-{
-	dec_module_count();
-	return 0;
-}
-
-static ssize_t log_read(struct file *file, char __user *buf,
-			size_t count, loff_t *ppos)
-{
-	struct bat_priv *bat_priv = file->private_data;
-	struct debug_log *debug_log = bat_priv->debug_log;
-	int error, i = 0;
-	char c;
-	unsigned long flags;
-
-	if ((file->f_flags & O_NONBLOCK) &&
-	    !(debug_log->log_end - debug_log->log_start))
-		return -EAGAIN;
-
-	if ((!buf) || (count < 0))
-		return -EINVAL;
-
-	if (count == 0)
-		return 0;
-
-	if (!access_ok(VERIFY_WRITE, buf, count))
-		return -EFAULT;
-
-	error = wait_event_interruptible(debug_log->queue_wait,
-				(debug_log->log_start - debug_log->log_end));
-
-	if (error)
-		return error;
-
-	spin_lock_irqsave(&debug_log->lock, flags);
-
-	while ((!error) && (i < count) &&
-	       (debug_log->log_start != debug_log->log_end)) {
-		c = LOG_BUFF(debug_log->log_start);
-
-		debug_log->log_start++;
-
-		spin_unlock_irqrestore(&debug_log->lock, flags);
-
-		error = __put_user(c, buf);
-
-		spin_lock_irqsave(&debug_log->lock, flags);
-
-		buf++;
-		i++;
-
-	}
-
-	spin_unlock_irqrestore(&debug_log->lock, flags);
-
-	if (!error)
-		return i;
-
-	return error;
-}
-
-static unsigned int log_poll(struct file *file, poll_table *wait)
-{
-	struct bat_priv *bat_priv = file->private_data;
-	struct debug_log *debug_log = bat_priv->debug_log;
-
-	poll_wait(file, &debug_log->queue_wait, wait);
-
-	if (debug_log->log_end - debug_log->log_start)
-		return POLLIN | POLLRDNORM;
-
-	return 0;
-}
-
-static const struct file_operations log_fops = {
-	.open           = log_open,
-	.release        = log_release,
-	.read           = log_read,
-	.poll           = log_poll,
-	.llseek         = no_llseek,
-};
-
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
-	struct dentry *d;
-
-	if (!bat_priv->debug_dir)
-		goto err;
-
-	bat_priv->debug_log = kzalloc(sizeof(struct debug_log), GFP_ATOMIC);
-	if (!bat_priv->debug_log)
-		goto err;
-
-	spin_lock_init(&bat_priv->debug_log->lock);
-	init_waitqueue_head(&bat_priv->debug_log->queue_wait);
-
-	d = debugfs_create_file("log", S_IFREG | S_IRUSR,
-				bat_priv->debug_dir, bat_priv, &log_fops);
-	if (d)
-		goto err;
-
-	return 0;
-
-err:
-	return 1;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
-	kfree(bat_priv->debug_log);
-	bat_priv->debug_log = NULL;
-}
-#else /* CONFIG_BATMAN_ADV_DEBUG */
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
-	bat_priv->debug_log = NULL;
-	return 0;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
-	return;
-}
-#endif
-
-static int originators_open(struct inode *inode, struct file *file)
-{
-	struct net_device *net_dev = (struct net_device *)inode->i_private;
-	return single_open(file, orig_seq_print_text, net_dev);
-}
-
-static int transtable_global_open(struct inode *inode, struct file *file)
-{
-	struct net_device *net_dev = (struct net_device *)inode->i_private;
-	return single_open(file, hna_global_seq_print_text, net_dev);
-}
-
-static int transtable_local_open(struct inode *inode, struct file *file)
-{
-	struct net_device *net_dev = (struct net_device *)inode->i_private;
-	return single_open(file, hna_local_seq_print_text, net_dev);
-}
-
-static int vis_data_open(struct inode *inode, struct file *file)
-{
-	struct net_device *net_dev = (struct net_device *)inode->i_private;
-	return single_open(file, vis_seq_print_text, net_dev);
-}
-
-struct bat_debuginfo {
-	struct attribute attr;
-	const struct file_operations fops;
-};
-
-#define BAT_DEBUGINFO(_name, _mode, _open)	\
-struct bat_debuginfo bat_debuginfo_##_name = {	\
-	.attr = { .name = __stringify(_name),	\
-		  .mode = _mode, },		\
-	.fops = { .owner = THIS_MODULE,		\
-		  .open = _open,		\
-		  .read	= seq_read,		\
-		  .llseek = seq_lseek,		\
-		  .release = single_release,	\
-		}				\
-};
-
-static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
-static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
-static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
-static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
-
-static struct bat_debuginfo *mesh_debuginfos[] = {
-	&bat_debuginfo_originators,
-	&bat_debuginfo_transtable_global,
-	&bat_debuginfo_transtable_local,
-	&bat_debuginfo_vis_data,
-	NULL,
-};
-
-void debugfs_init(void)
-{
-	bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
-	if (bat_debugfs == ERR_PTR(-ENODEV))
-		bat_debugfs = NULL;
-}
-
-void debugfs_destroy(void)
-{
-	if (bat_debugfs) {
-		debugfs_remove_recursive(bat_debugfs);
-		bat_debugfs = NULL;
-	}
-}
-
-int debugfs_add_meshif(struct net_device *dev)
-{
-	struct bat_priv *bat_priv = netdev_priv(dev);
-	struct bat_debuginfo **bat_debug;
-	struct dentry *file;
-
-	if (!bat_debugfs)
-		goto out;
-
-	bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
-	if (!bat_priv->debug_dir)
-		goto out;
-
-	bat_socket_setup(bat_priv);
-	debug_log_setup(bat_priv);
-
-	for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
-		file = debugfs_create_file(((*bat_debug)->attr).name,
-					  S_IFREG | ((*bat_debug)->attr).mode,
-					  bat_priv->debug_dir,
-					  dev, &(*bat_debug)->fops);
-		if (!file) {
-			bat_err(dev, "Can't add debugfs file: %s/%s\n",
-				dev->name, ((*bat_debug)->attr).name);
-			goto rem_attr;
-		}
-	}
-
-	return 0;
-rem_attr:
-	debugfs_remove_recursive(bat_priv->debug_dir);
-	bat_priv->debug_dir = NULL;
-out:
-#ifdef CONFIG_DEBUG_FS
-	return -ENOMEM;
-#else
-	return 0;
-#endif /* CONFIG_DEBUG_FS */
-}
-
-void debugfs_del_meshif(struct net_device *dev)
-{
-	struct bat_priv *bat_priv = netdev_priv(dev);
-
-	debug_log_cleanup(bat_priv);
-
-	if (bat_debugfs) {
-		debugfs_remove_recursive(bat_priv->debug_dir);
-		bat_priv->debug_dir = NULL;
-	}
-}
diff --git a/drivers/staging/batman-adv/bat_debugfs.h b/drivers/staging/batman-adv/bat_debugfs.h
deleted file mode 100644
index 72df532..0000000
--- a/drivers/staging/batman-adv/bat_debugfs.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-
-#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
-#define _NET_BATMAN_ADV_DEBUGFS_H_
-
-#define DEBUGFS_BAT_SUBDIR "batman_adv"
-
-void debugfs_init(void);
-void debugfs_destroy(void);
-int debugfs_add_meshif(struct net_device *dev);
-void debugfs_del_meshif(struct net_device *dev);
-
-#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
deleted file mode 100644
index bc17fb8..0000000
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bat_sysfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "vis.h"
-
-#define to_dev(obj)     container_of(obj, struct device, kobj)
-
-#define BAT_ATTR(_name, _mode, _show, _store)	\
-struct bat_attribute bat_attr_##_name = {	\
-	.attr = {.name = __stringify(_name),	\
-		 .mode = _mode },		\
-	.show   = _show,			\
-	.store  = _store,			\
-};
-
-static ssize_t show_aggr_ogms(struct kobject *kobj, struct attribute *attr,
-			     char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
-	int aggr_status = atomic_read(&bat_priv->aggregation_enabled);
-
-	return sprintf(buff, "%s\n",
-		       aggr_status == 0 ? "disabled" : "enabled");
-}
-
-static ssize_t store_aggr_ogms(struct kobject *kobj, struct attribute *attr,
-			      char *buff, size_t count)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	int aggr_tmp = -1;
-
-	if (((count == 2) && (buff[0] == '1')) ||
-	    (strncmp(buff, "enable", 6) == 0))
-		aggr_tmp = 1;
-
-	if (((count == 2) && (buff[0] == '0')) ||
-	    (strncmp(buff, "disable", 7) == 0))
-		aggr_tmp = 0;
-
-	if (aggr_tmp < 0) {
-		if (buff[count - 1] == '\n')
-			buff[count - 1] = '\0';
-
-		bat_info(net_dev,
-			 "Invalid parameter for 'aggregate OGM' setting"
-			 "received: %s\n", buff);
-		return -EINVAL;
-	}
-
-	if (atomic_read(&bat_priv->aggregation_enabled) == aggr_tmp)
-		return count;
-
-	bat_info(net_dev, "Changing aggregation from: %s to: %s\n",
-		 atomic_read(&bat_priv->aggregation_enabled) == 1 ?
-		 "enabled" : "disabled", aggr_tmp == 1 ? "enabled" :
-		 "disabled");
-
-	atomic_set(&bat_priv->aggregation_enabled, (unsigned)aggr_tmp);
-	return count;
-}
-
-static ssize_t show_bond(struct kobject *kobj, struct attribute *attr,
-			     char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
-	int bond_status = atomic_read(&bat_priv->bonding_enabled);
-
-	return sprintf(buff, "%s\n",
-		       bond_status == 0 ? "disabled" : "enabled");
-}
-
-static ssize_t store_bond(struct kobject *kobj, struct attribute *attr,
-			  char *buff, size_t count)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	int bonding_enabled_tmp = -1;
-
-	if (((count == 2) && (buff[0] == '1')) ||
-	    (strncmp(buff, "enable", 6) == 0))
-		bonding_enabled_tmp = 1;
-
-	if (((count == 2) && (buff[0] == '0')) ||
-	    (strncmp(buff, "disable", 7) == 0))
-		bonding_enabled_tmp = 0;
-
-	if (bonding_enabled_tmp < 0) {
-		if (buff[count - 1] == '\n')
-			buff[count - 1] = '\0';
-
-		bat_err(net_dev,
-			"Invalid parameter for 'bonding' setting received: "
-			"%s\n", buff);
-		return -EINVAL;
-	}
-
-	if (atomic_read(&bat_priv->bonding_enabled) == bonding_enabled_tmp)
-		return count;
-
-	bat_info(net_dev, "Changing bonding from: %s to: %s\n",
-		 atomic_read(&bat_priv->bonding_enabled) == 1 ?
-		 "enabled" : "disabled",
-		 bonding_enabled_tmp == 1 ? "enabled" : "disabled");
-
-	atomic_set(&bat_priv->bonding_enabled, (unsigned)bonding_enabled_tmp);
-	return count;
-}
-
-static ssize_t show_frag(struct kobject *kobj, struct attribute *attr,
-			     char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
-	int frag_status = atomic_read(&bat_priv->frag_enabled);
-
-	return sprintf(buff, "%s\n",
-		       frag_status == 0 ? "disabled" : "enabled");
-}
-
-static ssize_t store_frag(struct kobject *kobj, struct attribute *attr,
-			  char *buff, size_t count)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	int frag_enabled_tmp = -1;
-
-	if (((count == 2) && (buff[0] == '1')) ||
-	    (strncmp(buff, "enable", 6) == 0))
-		frag_enabled_tmp = 1;
-
-	if (((count == 2) && (buff[0] == '0')) ||
-	    (strncmp(buff, "disable", 7) == 0))
-		frag_enabled_tmp = 0;
-
-	if (frag_enabled_tmp < 0) {
-		if (buff[count - 1] == '\n')
-			buff[count - 1] = '\0';
-
-		bat_err(net_dev,
-			"Invalid parameter for 'fragmentation' setting on mesh"
-			"received: %s\n", buff);
-		return -EINVAL;
-	}
-
-	if (atomic_read(&bat_priv->frag_enabled) == frag_enabled_tmp)
-		return count;
-
-	bat_info(net_dev, "Changing fragmentation from: %s to: %s\n",
-		 atomic_read(&bat_priv->frag_enabled) == 1 ?
-		 "enabled" : "disabled",
-		 frag_enabled_tmp == 1 ? "enabled" : "disabled");
-
-	atomic_set(&bat_priv->frag_enabled, (unsigned)frag_enabled_tmp);
-	update_min_mtu(net_dev);
-	return count;
-}
-
-static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
-			     char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
-	int vis_mode = atomic_read(&bat_priv->vis_mode);
-
-	return sprintf(buff, "%s\n",
-		       vis_mode == VIS_TYPE_CLIENT_UPDATE ?
-							"client" : "server");
-}
-
-static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
-			      char *buff, size_t count)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	unsigned long val;
-	int ret, vis_mode_tmp = -1;
-
-	ret = strict_strtoul(buff, 10, &val);
-
-	if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
-	    (strncmp(buff, "client", 6) == 0) ||
-	    (strncmp(buff, "off", 3) == 0))
-		vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
-
-	if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
-	    (strncmp(buff, "server", 6) == 0))
-		vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
-
-	if (vis_mode_tmp < 0) {
-		if (buff[count - 1] == '\n')
-			buff[count - 1] = '\0';
-
-		bat_info(net_dev,
-			 "Invalid parameter for 'vis mode' setting received: "
-			 "%s\n", buff);
-		return -EINVAL;
-	}
-
-	if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
-		return count;
-
-	bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
-		 atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
-		 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
-		 "client" : "server");
-
-	atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp);
-	return count;
-}
-
-static ssize_t show_orig_interval(struct kobject *kobj, struct attribute *attr,
-				 char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
-
-	return sprintf(buff, "%i\n",
-		       atomic_read(&bat_priv->orig_interval));
-}
-
-static ssize_t store_orig_interval(struct kobject *kobj, struct attribute *attr,
-				  char *buff, size_t count)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	unsigned long orig_interval_tmp;
-	int ret;
-
-	ret = strict_strtoul(buff, 10, &orig_interval_tmp);
-	if (ret) {
-		bat_info(net_dev, "Invalid parameter for 'orig_interval' "
-			 "setting received: %s\n", buff);
-		return -EINVAL;
-	}
-
-	if (orig_interval_tmp < JITTER * 2) {
-		bat_info(net_dev, "New originator interval too small: %li "
-			 "(min: %i)\n", orig_interval_tmp, JITTER * 2);
-		return -EINVAL;
-	}
-
-	if (atomic_read(&bat_priv->orig_interval) == orig_interval_tmp)
-		return count;
-
-	bat_info(net_dev, "Changing originator interval from: %i to: %li\n",
-		 atomic_read(&bat_priv->orig_interval),
-		 orig_interval_tmp);
-
-	atomic_set(&bat_priv->orig_interval, orig_interval_tmp);
-	return count;
-}
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-static ssize_t show_log_level(struct kobject *kobj, struct attribute *attr,
-			     char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
-	int log_level = atomic_read(&bat_priv->log_level);
-
-	return sprintf(buff, "%d\n", log_level);
-}
-
-static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
-			      char *buff, size_t count)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	unsigned long log_level_tmp;
-	int ret;
-
-	ret = strict_strtoul(buff, 10, &log_level_tmp);
-	if (ret) {
-		bat_info(net_dev, "Invalid parameter for 'log_level' "
-			 "setting received: %s\n", buff);
-		return -EINVAL;
-	}
-
-	if (log_level_tmp > 3) {
-		bat_info(net_dev, "New log level too big: %li "
-			 "(max: %i)\n", log_level_tmp, 3);
-		return -EINVAL;
-	}
-
-	if (atomic_read(&bat_priv->log_level) == log_level_tmp)
-		return count;
-
-	bat_info(net_dev, "Changing log level from: %i to: %li\n",
-		 atomic_read(&bat_priv->log_level),
-		 log_level_tmp);
-
-	atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
-	return count;
-}
-#endif
-
-static BAT_ATTR(aggregated_ogms, S_IRUGO | S_IWUSR,
-		show_aggr_ogms, store_aggr_ogms);
-static BAT_ATTR(bonding, S_IRUGO | S_IWUSR, show_bond, store_bond);
-static BAT_ATTR(fragmentation, S_IRUGO | S_IWUSR, show_frag, store_frag);
-static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
-static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
-		show_orig_interval, store_orig_interval);
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-static BAT_ATTR(log_level, S_IRUGO | S_IWUSR, show_log_level, store_log_level);
-#endif
-
-static struct bat_attribute *mesh_attrs[] = {
-	&bat_attr_aggregated_ogms,
-	&bat_attr_bonding,
-	&bat_attr_fragmentation,
-	&bat_attr_vis_mode,
-	&bat_attr_orig_interval,
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-	&bat_attr_log_level,
-#endif
-	NULL,
-};
-
-int sysfs_add_meshif(struct net_device *dev)
-{
-	struct kobject *batif_kobject = &dev->dev.kobj;
-	struct bat_priv *bat_priv = netdev_priv(dev);
-	struct bat_attribute **bat_attr;
-	int err;
-
-	bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
-						    batif_kobject);
-	if (!bat_priv->mesh_obj) {
-		bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
-			SYSFS_IF_MESH_SUBDIR);
-		goto out;
-	}
-
-	for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
-		err = sysfs_create_file(bat_priv->mesh_obj,
-					&((*bat_attr)->attr));
-		if (err) {
-			bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
-				dev->name, SYSFS_IF_MESH_SUBDIR,
-				((*bat_attr)->attr).name);
-			goto rem_attr;
-		}
-	}
-
-	return 0;
-
-rem_attr:
-	for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
-		sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
-	kobject_put(bat_priv->mesh_obj);
-	bat_priv->mesh_obj = NULL;
-out:
-	return -ENOMEM;
-}
-
-void sysfs_del_meshif(struct net_device *dev)
-{
-	struct bat_priv *bat_priv = netdev_priv(dev);
-	struct bat_attribute **bat_attr;
-
-	for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
-		sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
-	kobject_put(bat_priv->mesh_obj);
-	bat_priv->mesh_obj = NULL;
-}
-
-static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
-			       char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
-	ssize_t length;
-
-	if (!batman_if)
-		return 0;
-
-	length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ?
-			 "none" : batman_if->soft_iface->name);
-
-	hardif_put(batman_if);
-
-	return length;
-}
-
-static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
-				char *buff, size_t count)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
-	int status_tmp = -1;
-	int ret;
-
-	if (!batman_if)
-		return count;
-
-	if (buff[count - 1] == '\n')
-		buff[count - 1] = '\0';
-
-	if (strlen(buff) >= IFNAMSIZ) {
-		pr_err("Invalid parameter for 'mesh_iface' setting received: "
-		       "interface name too long '%s'\n", buff);
-		hardif_put(batman_if);
-		return -EINVAL;
-	}
-
-	if (strncmp(buff, "none", 4) == 0)
-		status_tmp = IF_NOT_IN_USE;
-	else
-		status_tmp = IF_I_WANT_YOU;
-
-	if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) &&
-	    (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) {
-		hardif_put(batman_if);
-		return count;
-	}
-
-	if (status_tmp == IF_NOT_IN_USE) {
-		rtnl_lock();
-		hardif_disable_interface(batman_if);
-		rtnl_unlock();
-		hardif_put(batman_if);
-		return count;
-	}
-
-	/* if the interface already is in use */
-	if (batman_if->if_status != IF_NOT_IN_USE) {
-		rtnl_lock();
-		hardif_disable_interface(batman_if);
-		rtnl_unlock();
-	}
-
-	ret = hardif_enable_interface(batman_if, buff);
-	hardif_put(batman_if);
-
-	return ret;
-}
-
-static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
-				 char *buff)
-{
-	struct device *dev = to_dev(kobj->parent);
-	struct net_device *net_dev = to_net_dev(dev);
-	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
-	ssize_t length;
-
-	if (!batman_if)
-		return 0;
-
-	switch (batman_if->if_status) {
-	case IF_TO_BE_REMOVED:
-		length = sprintf(buff, "disabling\n");
-		break;
-	case IF_INACTIVE:
-		length = sprintf(buff, "inactive\n");
-		break;
-	case IF_ACTIVE:
-		length = sprintf(buff, "active\n");
-		break;
-	case IF_TO_BE_ACTIVATED:
-		length = sprintf(buff, "enabling\n");
-		break;
-	case IF_NOT_IN_USE:
-	default:
-		length = sprintf(buff, "not in use\n");
-		break;
-	}
-
-	hardif_put(batman_if);
-
-	return length;
-}
-
-static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
-		show_mesh_iface, store_mesh_iface);
-static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
-
-static struct bat_attribute *batman_attrs[] = {
-	&bat_attr_mesh_iface,
-	&bat_attr_iface_status,
-	NULL,
-};
-
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
-{
-	struct kobject *hardif_kobject = &dev->dev.kobj;
-	struct bat_attribute **bat_attr;
-	int err;
-
-	*hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
-						    hardif_kobject);
-
-	if (!*hardif_obj) {
-		bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
-			SYSFS_IF_BAT_SUBDIR);
-		goto out;
-	}
-
-	for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
-		err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
-		if (err) {
-			bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
-				dev->name, SYSFS_IF_BAT_SUBDIR,
-				((*bat_attr)->attr).name);
-			goto rem_attr;
-		}
-	}
-
-	return 0;
-
-rem_attr:
-	for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
-		sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
-out:
-	return -ENOMEM;
-}
-
-void sysfs_del_hardif(struct kobject **hardif_obj)
-{
-	kobject_put(*hardif_obj);
-	*hardif_obj = NULL;
-}
diff --git a/drivers/staging/batman-adv/bat_sysfs.h b/drivers/staging/batman-adv/bat_sysfs.h
deleted file mode 100644
index 7f186c0..0000000
--- a/drivers/staging/batman-adv/bat_sysfs.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-
-#ifndef _NET_BATMAN_ADV_SYSFS_H_
-#define _NET_BATMAN_ADV_SYSFS_H_
-
-#define SYSFS_IF_MESH_SUBDIR "mesh"
-#define SYSFS_IF_BAT_SUBDIR "batman_adv"
-
-struct bat_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
-			char *buf);
-	ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
-			 char *buf, size_t count);
-};
-
-int sysfs_add_meshif(struct net_device *dev);
-void sysfs_del_meshif(struct net_device *dev);
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
-void sysfs_del_hardif(struct kobject **hardif_obj);
-
-#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/drivers/staging/batman-adv/bitarray.c b/drivers/staging/batman-adv/bitarray.c
deleted file mode 100644
index 814274f..0000000
--- a/drivers/staging/batman-adv/bitarray.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bitarray.h"
-
-#include <linux/bitops.h>
-
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
-		       uint32_t curr_seqno)
-{
-	int32_t diff, word_offset, word_num;
-
-	diff = last_seqno - curr_seqno;
-	if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
-		return 0;
-	} else {
-		/* which word */
-		word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
-		/* which position in the selected word */
-		word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
-
-		if (seq_bits[word_num] & 1 << word_offset)
-			return 1;
-		else
-			return 0;
-	}
-}
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(TYPE_OF_WORD *seq_bits, int32_t n)
-{
-	int32_t word_offset, word_num;
-
-	/* if too old, just drop it */
-	if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
-		return;
-
-	/* which word */
-	word_num = n / WORD_BIT_SIZE;
-	/* which position in the selected word */
-	word_offset = n % WORD_BIT_SIZE;
-
-	seq_bits[word_num] |= 1 << word_offset;	/* turn the position on */
-}
-
-/* shift the packet array by n places. */
-static void bit_shift(TYPE_OF_WORD *seq_bits, int32_t n)
-{
-	int32_t word_offset, word_num;
-	int32_t i;
-
-	if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
-		return;
-
-	word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */
-	word_num = n / WORD_BIT_SIZE;	/* shift over how much (full) words */
-
-	for (i = NUM_WORDS - 1; i > word_num; i--) {
-		/* going from old to new, so we don't overwrite the data we copy
-		 * from.
-		 *
-		 * left is high, right is low: FEDC BA98 7654 3210
-		 *					  ^^ ^^
-		 *			       vvvv
-		 * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
-		 * word_offset==WORD_BIT_SIZE/2 ????? in this example.
-		 * (=24 bits)
-		 *
-		 * our desired output would be: 9876 5432 1000 0000
-		 * */
-
-		seq_bits[i] =
-			(seq_bits[i - word_num] << word_offset) +
-			/* take the lower port from the left half, shift it left
-			 * to its final position */
-			(seq_bits[i - word_num - 1] >>
-			 (WORD_BIT_SIZE-word_offset));
-		/* and the upper part of the right half and shift it left to
-		 * it's position */
-		/* for our example that would be: word[0] = 9800 + 0076 =
-		 * 9876 */
-	}
-	/* now for our last word, i==word_num, we only have the it's "left"
-	 * half. that's the 1000 word in our example.*/
-
-	seq_bits[i] = (seq_bits[i - word_num] << word_offset);
-
-	/* pad the rest with 0, if there is anything */
-	i--;
-
-	for (; i >= 0; i--)
-		seq_bits[i] = 0;
-}
-
-static void bit_reset_window(TYPE_OF_WORD *seq_bits)
-{
-	int i;
-	for (i = 0; i < NUM_WORDS; i++)
-		seq_bits[i] = 0;
-}
-
-
-/* receive and process one packet within the sequence number window.
- *
- * returns:
- *  1 if the window was moved (either new or very old)
- *  0 if the window was not moved/shifted.
- */
-char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
-		    int32_t seq_num_diff, int8_t set_mark)
-{
-	struct bat_priv *bat_priv = (struct bat_priv *)priv;
-
-	/* sequence number is slightly older. We already got a sequence number
-	 * higher than this one, so we just mark it. */
-
-	if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
-		if (set_mark)
-			bit_mark(seq_bits, -seq_num_diff);
-		return 0;
-	}
-
-	/* sequence number is slightly newer, so we shift the window and
-	 * set the mark if required */
-
-	if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
-		bit_shift(seq_bits, seq_num_diff);
-
-		if (set_mark)
-			bit_mark(seq_bits, 0);
-		return 1;
-	}
-
-	/* sequence number is much newer, probably missed a lot of packets */
-
-	if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE)
-		|| (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"We missed a lot of packets (%i) !\n",
-			seq_num_diff - 1);
-		bit_reset_window(seq_bits);
-		if (set_mark)
-			bit_mark(seq_bits, 0);
-		return 1;
-	}
-
-	/* received a much older packet. The other host either restarted
-	 * or the old packet got delayed somewhere in the network. The
-	 * packet should be dropped without calling this function if the
-	 * seqno window is protected. */
-
-	if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
-		|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
-
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Other host probably restarted!\n");
-
-		bit_reset_window(seq_bits);
-		if (set_mark)
-			bit_mark(seq_bits, 0);
-
-		return 1;
-	}
-
-	/* never reached */
-	return 0;
-}
-
-/* count the hamming weight, how many good packets did we receive? just count
- * the 1's.
- */
-int bit_packet_count(TYPE_OF_WORD *seq_bits)
-{
-	int i, hamming = 0;
-
-	for (i = 0; i < NUM_WORDS; i++)
-		hamming += hweight_long(seq_bits[i]);
-
-	return hamming;
-}
diff --git a/drivers/staging/batman-adv/bitarray.h b/drivers/staging/batman-adv/bitarray.h
deleted file mode 100644
index 77b1e61..0000000
--- a/drivers/staging/batman-adv/bitarray.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_BITARRAY_H_
-#define _NET_BATMAN_ADV_BITARRAY_H_
-
-/* you should choose something big, if you don't want to waste cpu
- * and keep the type in sync with bit_packet_count */
-#define TYPE_OF_WORD unsigned long
-#define WORD_BIT_SIZE (sizeof(TYPE_OF_WORD) * 8)
-
-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
-					   uint32_t curr_seqno);
-
-/* turn corresponding bit on, so we can remember that we got the packet */
-void bit_mark(TYPE_OF_WORD *seq_bits, int32_t n);
-
-
-/* receive and process one packet, returns 1 if received seq_num is considered
- * new, 0 if old  */
-char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
-		    int32_t seq_num_diff, int8_t set_mark);
-
-/* count the hamming weight, how many good packets did we receive? */
-int  bit_packet_count(TYPE_OF_WORD *seq_bits);
-
-#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
deleted file mode 100644
index d85de82..0000000
--- a/drivers/staging/batman-adv/hard-interface.c
+++ /dev/null
@@ -1,647 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "hard-interface.h"
-#include "soft-interface.h"
-#include "send.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "bat_sysfs.h"
-#include "originator.h"
-#include "hash.h"
-
-#include <linux/if_arp.h>
-
-#define MIN(x, y) ((x) < (y) ? (x) : (y))
-
-/* protect update critical side of if_list - but not the content */
-static DEFINE_SPINLOCK(if_list_lock);
-
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
-{
-	struct batman_if *batman_if;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if (batman_if->net_dev == net_dev)
-			goto out;
-	}
-
-	batman_if = NULL;
-
-out:
-	if (batman_if)
-		hardif_hold(batman_if);
-
-	rcu_read_unlock();
-	return batman_if;
-}
-
-static int is_valid_iface(struct net_device *net_dev)
-{
-	if (net_dev->flags & IFF_LOOPBACK)
-		return 0;
-
-	if (net_dev->type != ARPHRD_ETHER)
-		return 0;
-
-	if (net_dev->addr_len != ETH_ALEN)
-		return 0;
-
-	/* no batman over batman */
-#ifdef HAVE_NET_DEVICE_OPS
-	if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
-		return 0;
-#else
-	if (net_dev->hard_start_xmit == interface_tx)
-		return 0;
-#endif
-
-	/* Device is being bridged */
-	/* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
-		return 0; */
-
-	return 1;
-}
-
-static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
-{
-	struct batman_if *batman_if;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if (batman_if->soft_iface != soft_iface)
-			continue;
-
-		if (batman_if->if_status == IF_ACTIVE)
-			goto out;
-	}
-
-	batman_if = NULL;
-
-out:
-	if (batman_if)
-		hardif_hold(batman_if);
-
-	rcu_read_unlock();
-	return batman_if;
-}
-
-static void update_primary_addr(struct bat_priv *bat_priv)
-{
-	struct vis_packet *vis_packet;
-
-	vis_packet = (struct vis_packet *)
-				bat_priv->my_vis_info->skb_packet->data;
-	memcpy(vis_packet->vis_orig,
-	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(vis_packet->sender_orig,
-	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-}
-
-static void set_primary_if(struct bat_priv *bat_priv,
-			   struct batman_if *batman_if)
-{
-	struct batman_packet *batman_packet;
-	struct batman_if *old_if;
-
-	if (batman_if)
-		hardif_hold(batman_if);
-
-	old_if = bat_priv->primary_if;
-	bat_priv->primary_if = batman_if;
-
-	if (old_if)
-		hardif_put(old_if);
-
-	if (!bat_priv->primary_if)
-		return;
-
-	batman_packet = (struct batman_packet *)(batman_if->packet_buff);
-	batman_packet->flags = PRIMARIES_FIRST_HOP;
-	batman_packet->ttl = TTL;
-
-	update_primary_addr(bat_priv);
-
-	/***
-	 * hacky trick to make sure that we send the HNA information via
-	 * our new primary interface
-	 */
-	atomic_set(&bat_priv->hna_local_changed, 1);
-}
-
-static bool hardif_is_iface_up(struct batman_if *batman_if)
-{
-	if (batman_if->net_dev->flags & IFF_UP)
-		return true;
-
-	return false;
-}
-
-static void update_mac_addresses(struct batman_if *batman_if)
-{
-	memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
-	       batman_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
-	       batman_if->net_dev->dev_addr, ETH_ALEN);
-}
-
-static void check_known_mac_addr(struct net_device *net_dev)
-{
-	struct batman_if *batman_if;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if ((batman_if->if_status != IF_ACTIVE) &&
-		    (batman_if->if_status != IF_TO_BE_ACTIVATED))
-			continue;
-
-		if (batman_if->net_dev == net_dev)
-			continue;
-
-		if (!compare_orig(batman_if->net_dev->dev_addr,
-				  net_dev->dev_addr))
-			continue;
-
-		pr_warning("The newly added mac address (%pM) already exists "
-			   "on: %s\n", net_dev->dev_addr,
-			   batman_if->net_dev->name);
-		pr_warning("It is strongly recommended to keep mac addresses "
-			   "unique to avoid problems!\n");
-	}
-	rcu_read_unlock();
-}
-
-int hardif_min_mtu(struct net_device *soft_iface)
-{
-	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct batman_if *batman_if;
-	/* allow big frames if all devices are capable to do so
-	 * (have MTU > 1500 + BAT_HEADER_LEN) */
-	int min_mtu = ETH_DATA_LEN;
-
-	if (atomic_read(&bat_priv->frag_enabled))
-		goto out;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if ((batman_if->if_status != IF_ACTIVE) &&
-		    (batman_if->if_status != IF_TO_BE_ACTIVATED))
-			continue;
-
-		if (batman_if->soft_iface != soft_iface)
-			continue;
-
-		min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
-			      min_mtu);
-	}
-	rcu_read_unlock();
-out:
-	return min_mtu;
-}
-
-/* adjusts the MTU if a new interface with a smaller MTU appeared. */
-void update_min_mtu(struct net_device *soft_iface)
-{
-	int min_mtu;
-
-	min_mtu = hardif_min_mtu(soft_iface);
-	if (soft_iface->mtu != min_mtu)
-		soft_iface->mtu = min_mtu;
-}
-
-static void hardif_activate_interface(struct batman_if *batman_if)
-{
-	struct bat_priv *bat_priv;
-
-	if (batman_if->if_status != IF_INACTIVE)
-		return;
-
-	bat_priv = netdev_priv(batman_if->soft_iface);
-
-	update_mac_addresses(batman_if);
-	batman_if->if_status = IF_TO_BE_ACTIVATED;
-
-	/**
-	 * the first active interface becomes our primary interface or
-	 * the next active interface after the old primay interface was removed
-	 */
-	if (!bat_priv->primary_if)
-		set_primary_if(bat_priv, batman_if);
-
-	bat_info(batman_if->soft_iface, "Interface activated: %s\n",
-		 batman_if->net_dev->name);
-
-	update_min_mtu(batman_if->soft_iface);
-	return;
-}
-
-static void hardif_deactivate_interface(struct batman_if *batman_if)
-{
-	if ((batman_if->if_status != IF_ACTIVE) &&
-	   (batman_if->if_status != IF_TO_BE_ACTIVATED))
-		return;
-
-	batman_if->if_status = IF_INACTIVE;
-
-	bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
-		 batman_if->net_dev->name);
-
-	update_min_mtu(batman_if->soft_iface);
-}
-
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
-{
-	struct bat_priv *bat_priv;
-	struct batman_packet *batman_packet;
-
-	if (batman_if->if_status != IF_NOT_IN_USE)
-		goto out;
-
-	batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
-
-	if (!batman_if->soft_iface) {
-		batman_if->soft_iface = softif_create(iface_name);
-
-		if (!batman_if->soft_iface)
-			goto err;
-
-		/* dev_get_by_name() increases the reference counter for us */
-		dev_hold(batman_if->soft_iface);
-	}
-
-	bat_priv = netdev_priv(batman_if->soft_iface);
-	batman_if->packet_len = BAT_PACKET_LEN;
-	batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
-
-	if (!batman_if->packet_buff) {
-		bat_err(batman_if->soft_iface, "Can't add interface packet "
-			"(%s): out of memory\n", batman_if->net_dev->name);
-		goto err;
-	}
-
-	batman_packet = (struct batman_packet *)(batman_if->packet_buff);
-	batman_packet->packet_type = BAT_PACKET;
-	batman_packet->version = COMPAT_VERSION;
-	batman_packet->flags = 0;
-	batman_packet->ttl = 2;
-	batman_packet->tq = TQ_MAX_VALUE;
-	batman_packet->num_hna = 0;
-
-	batman_if->if_num = bat_priv->num_ifaces;
-	bat_priv->num_ifaces++;
-	batman_if->if_status = IF_INACTIVE;
-	orig_hash_add_if(batman_if, bat_priv->num_ifaces);
-
-	batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
-	batman_if->batman_adv_ptype.func = batman_skb_recv;
-	batman_if->batman_adv_ptype.dev = batman_if->net_dev;
-	hardif_hold(batman_if);
-	dev_add_pack(&batman_if->batman_adv_ptype);
-
-	atomic_set(&batman_if->seqno, 1);
-	atomic_set(&batman_if->frag_seqno, 1);
-	bat_info(batman_if->soft_iface, "Adding interface: %s\n",
-		 batman_if->net_dev->name);
-
-	if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
-		ETH_DATA_LEN + BAT_HEADER_LEN)
-		bat_info(batman_if->soft_iface,
-			"The MTU of interface %s is too small (%i) to handle "
-			"the transport of batman-adv packets. Packets going "
-			"over this interface will be fragmented on layer2 "
-			"which could impact the performance. Setting the MTU "
-			"to %zi would solve the problem.\n",
-			batman_if->net_dev->name, batman_if->net_dev->mtu,
-			ETH_DATA_LEN + BAT_HEADER_LEN);
-
-	if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
-		ETH_DATA_LEN + BAT_HEADER_LEN)
-		bat_info(batman_if->soft_iface,
-			"The MTU of interface %s is too small (%i) to handle "
-			"the transport of batman-adv packets. If you experience"
-			" problems getting traffic through try increasing the "
-			"MTU to %zi.\n",
-			batman_if->net_dev->name, batman_if->net_dev->mtu,
-			ETH_DATA_LEN + BAT_HEADER_LEN);
-
-	if (hardif_is_iface_up(batman_if))
-		hardif_activate_interface(batman_if);
-	else
-		bat_err(batman_if->soft_iface, "Not using interface %s "
-			"(retrying later): interface not active\n",
-			batman_if->net_dev->name);
-
-	/* begin scheduling originator messages on that interface */
-	schedule_own_packet(batman_if);
-
-out:
-	return 0;
-
-err:
-	return -ENOMEM;
-}
-
-void hardif_disable_interface(struct batman_if *batman_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-
-	if (batman_if->if_status == IF_ACTIVE)
-		hardif_deactivate_interface(batman_if);
-
-	if (batman_if->if_status != IF_INACTIVE)
-		return;
-
-	bat_info(batman_if->soft_iface, "Removing interface: %s\n",
-		 batman_if->net_dev->name);
-	dev_remove_pack(&batman_if->batman_adv_ptype);
-	hardif_put(batman_if);
-
-	bat_priv->num_ifaces--;
-	orig_hash_del_if(batman_if, bat_priv->num_ifaces);
-
-	if (batman_if == bat_priv->primary_if) {
-		struct batman_if *new_if;
-
-		new_if = get_active_batman_if(batman_if->soft_iface);
-		set_primary_if(bat_priv, new_if);
-
-		if (new_if)
-			hardif_put(new_if);
-	}
-
-	kfree(batman_if->packet_buff);
-	batman_if->packet_buff = NULL;
-	batman_if->if_status = IF_NOT_IN_USE;
-
-	/* delete all references to this batman_if */
-	purge_orig_ref(bat_priv);
-	purge_outstanding_packets(bat_priv, batman_if);
-	dev_put(batman_if->soft_iface);
-
-	/* nobody uses this interface anymore */
-	if (!bat_priv->num_ifaces)
-		softif_destroy(batman_if->soft_iface);
-
-	batman_if->soft_iface = NULL;
-}
-
-static struct batman_if *hardif_add_interface(struct net_device *net_dev)
-{
-	struct batman_if *batman_if;
-	int ret;
-
-	ret = is_valid_iface(net_dev);
-	if (ret != 1)
-		goto out;
-
-	dev_hold(net_dev);
-
-	batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
-	if (!batman_if) {
-		pr_err("Can't add interface (%s): out of memory\n",
-		       net_dev->name);
-		goto release_dev;
-	}
-
-	ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
-	if (ret)
-		goto free_if;
-
-	batman_if->if_num = -1;
-	batman_if->net_dev = net_dev;
-	batman_if->soft_iface = NULL;
-	batman_if->if_status = IF_NOT_IN_USE;
-	INIT_LIST_HEAD(&batman_if->list);
-	atomic_set(&batman_if->refcnt, 0);
-	hardif_hold(batman_if);
-
-	check_known_mac_addr(batman_if->net_dev);
-
-	spin_lock(&if_list_lock);
-	list_add_tail_rcu(&batman_if->list, &if_list);
-	spin_unlock(&if_list_lock);
-
-	/* extra reference for return */
-	hardif_hold(batman_if);
-	return batman_if;
-
-free_if:
-	kfree(batman_if);
-release_dev:
-	dev_put(net_dev);
-out:
-	return NULL;
-}
-
-static void hardif_remove_interface(struct batman_if *batman_if)
-{
-	/* first deactivate interface */
-	if (batman_if->if_status != IF_NOT_IN_USE)
-		hardif_disable_interface(batman_if);
-
-	if (batman_if->if_status != IF_NOT_IN_USE)
-		return;
-
-	batman_if->if_status = IF_TO_BE_REMOVED;
-	synchronize_rcu();
-	sysfs_del_hardif(&batman_if->hardif_obj);
-	hardif_put(batman_if);
-}
-
-void hardif_remove_interfaces(void)
-{
-	struct batman_if *batman_if, *batman_if_tmp;
-	struct list_head if_queue;
-
-	INIT_LIST_HEAD(&if_queue);
-
-	spin_lock(&if_list_lock);
-	list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
-		list_del_rcu(&batman_if->list);
-		list_add_tail(&batman_if->list, &if_queue);
-	}
-	spin_unlock(&if_list_lock);
-
-	rtnl_lock();
-	list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
-		hardif_remove_interface(batman_if);
-	}
-	rtnl_unlock();
-}
-
-static int hard_if_event(struct notifier_block *this,
-			 unsigned long event, void *ptr)
-{
-	struct net_device *net_dev = (struct net_device *)ptr;
-	struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
-	struct bat_priv *bat_priv;
-
-	if (!batman_if && event == NETDEV_REGISTER)
-		batman_if = hardif_add_interface(net_dev);
-
-	if (!batman_if)
-		goto out;
-
-	switch (event) {
-	case NETDEV_UP:
-		hardif_activate_interface(batman_if);
-		break;
-	case NETDEV_GOING_DOWN:
-	case NETDEV_DOWN:
-		hardif_deactivate_interface(batman_if);
-		break;
-	case NETDEV_UNREGISTER:
-		spin_lock(&if_list_lock);
-		list_del_rcu(&batman_if->list);
-		spin_unlock(&if_list_lock);
-
-		hardif_remove_interface(batman_if);
-		break;
-	case NETDEV_CHANGEMTU:
-		if (batman_if->soft_iface)
-			update_min_mtu(batman_if->soft_iface);
-		break;
-	case NETDEV_CHANGEADDR:
-		if (batman_if->if_status == IF_NOT_IN_USE) {
-			hardif_put(batman_if);
-			goto out;
-		}
-
-		check_known_mac_addr(batman_if->net_dev);
-		update_mac_addresses(batman_if);
-
-		bat_priv = netdev_priv(batman_if->soft_iface);
-		if (batman_if == bat_priv->primary_if)
-			update_primary_addr(bat_priv);
-		break;
-	default:
-		break;
-	};
-	hardif_put(batman_if);
-
-out:
-	return NOTIFY_DONE;
-}
-
-/* receive a packet with the batman ethertype coming on a hard
- * interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
-	struct packet_type *ptype, struct net_device *orig_dev)
-{
-	struct bat_priv *bat_priv;
-	struct batman_packet *batman_packet;
-	struct batman_if *batman_if;
-	int ret;
-
-	batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
-	skb = skb_share_check(skb, GFP_ATOMIC);
-
-	/* skb was released by skb_share_check() */
-	if (!skb)
-		goto err_out;
-
-	/* packet should hold at least type and version */
-	if (unlikely(!pskb_may_pull(skb, 2)))
-		goto err_free;
-
-	/* expect a valid ethernet header here. */
-	if (unlikely(skb->mac_len != sizeof(struct ethhdr)
-				|| !skb_mac_header(skb)))
-		goto err_free;
-
-	if (!batman_if->soft_iface)
-		goto err_free;
-
-	bat_priv = netdev_priv(batman_if->soft_iface);
-
-	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
-		goto err_free;
-
-	/* discard frames on not active interfaces */
-	if (batman_if->if_status != IF_ACTIVE)
-		goto err_free;
-
-	batman_packet = (struct batman_packet *)skb->data;
-
-	if (batman_packet->version != COMPAT_VERSION) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: incompatible batman version (%i)\n",
-			batman_packet->version);
-		goto err_free;
-	}
-
-	/* all receive handlers return whether they received or reused
-	 * the supplied skb. if not, we have to free the skb. */
-
-	switch (batman_packet->packet_type) {
-		/* batman originator packet */
-	case BAT_PACKET:
-		ret = recv_bat_packet(skb, batman_if);
-		break;
-
-		/* batman icmp packet */
-	case BAT_ICMP:
-		ret = recv_icmp_packet(skb, batman_if);
-		break;
-
-		/* unicast packet */
-	case BAT_UNICAST:
-		ret = recv_unicast_packet(skb, batman_if);
-		break;
-
-		/* fragmented unicast packet */
-	case BAT_UNICAST_FRAG:
-		ret = recv_ucast_frag_packet(skb, batman_if);
-		break;
-
-		/* broadcast packet */
-	case BAT_BCAST:
-		ret = recv_bcast_packet(skb, batman_if);
-		break;
-
-		/* vis packet */
-	case BAT_VIS:
-		ret = recv_vis_packet(skb, batman_if);
-		break;
-	default:
-		ret = NET_RX_DROP;
-	}
-
-	if (ret == NET_RX_DROP)
-		kfree_skb(skb);
-
-	/* return NET_RX_SUCCESS in any case as we
-	 * most probably dropped the packet for
-	 * routing-logical reasons. */
-
-	return NET_RX_SUCCESS;
-
-err_free:
-	kfree_skb(skb);
-err_out:
-	return NET_RX_DROP;
-}
-
-struct notifier_block hard_if_notifier = {
-	.notifier_call = hard_if_event,
-};
diff --git a/drivers/staging/batman-adv/hard-interface.h b/drivers/staging/batman-adv/hard-interface.h
deleted file mode 100644
index d550889..0000000
--- a/drivers/staging/batman-adv/hard-interface.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
-#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
-
-#define IF_NOT_IN_USE 0
-#define IF_TO_BE_REMOVED 1
-#define IF_INACTIVE 2
-#define IF_ACTIVE 3
-#define IF_TO_BE_ACTIVATED 4
-#define IF_I_WANT_YOU 5
-
-extern struct notifier_block hard_if_notifier;
-
-struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
-int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
-void hardif_disable_interface(struct batman_if *batman_if);
-void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
-				struct net_device *dev,
-				struct packet_type *ptype,
-				struct net_device *orig_dev);
-int hardif_min_mtu(struct net_device *soft_iface);
-void update_min_mtu(struct net_device *soft_iface);
-
-static inline void hardif_hold(struct batman_if *batman_if)
-{
-	atomic_inc(&batman_if->refcnt);
-}
-
-static inline void hardif_put(struct batman_if *batman_if)
-{
-	if (atomic_dec_and_test(&batman_if->refcnt)) {
-		dev_put(batman_if->net_dev);
-		kfree(batman_if);
-	}
-}
-
-#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/hash.c b/drivers/staging/batman-adv/hash.c
deleted file mode 100644
index 8ef26eb..0000000
--- a/drivers/staging/batman-adv/hash.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "hash.h"
-
-/* clears the hash */
-static void hash_init(struct hashtable_t *hash)
-{
-	int i;
-
-	hash->elements = 0;
-
-	for (i = 0 ; i < hash->size; i++)
-		hash->table[i] = NULL;
-}
-
-/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
- * called to remove the elements inside of the hash.  if you don't remove the
- * elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg)
-{
-	struct element_t *bucket, *last_bucket;
-	int i;
-
-	for (i = 0; i < hash->size; i++) {
-		bucket = hash->table[i];
-
-		while (bucket != NULL) {
-			if (free_cb != NULL)
-				free_cb(bucket->data, arg);
-
-			last_bucket = bucket;
-			bucket = bucket->next;
-			kfree(last_bucket);
-		}
-	}
-
-	hash_destroy(hash);
-}
-
-/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash)
-{
-	kfree(hash->table);
-	kfree(hash);
-}
-
-/* iterate though the hash. First element is selected if an iterator
- * initialized with HASHIT() is supplied as iter. Use the returned
- * (or supplied) iterator to access the elements until hash_iterate returns
- * NULL. */
-
-struct hash_it_t *hash_iterate(struct hashtable_t *hash,
-			       struct hash_it_t *iter)
-{
-	if (!hash)
-		return NULL;
-	if (!iter)
-		return NULL;
-
-	/* sanity checks first (if our bucket got deleted in the last
-	 * iteration): */
-	if (iter->bucket != NULL) {
-		if (iter->first_bucket != NULL) {
-			/* we're on the first element and it got removed after
-			 * the last iteration. */
-			if ((*iter->first_bucket) != iter->bucket) {
-				/* there are still other elements in the list */
-				if ((*iter->first_bucket) != NULL) {
-					iter->prev_bucket = NULL;
-					iter->bucket = (*iter->first_bucket);
-					iter->first_bucket =
-						&hash->table[iter->index];
-					return iter;
-				} else {
-					iter->bucket = NULL;
-				}
-			}
-		} else if (iter->prev_bucket != NULL) {
-			/*
-			* we're not on the first element, and the bucket got
-			* removed after the last iteration.  the last bucket's
-			* next pointer is not pointing to our actual bucket
-			* anymore.  select the next.
-			*/
-			if (iter->prev_bucket->next != iter->bucket)
-				iter->bucket = iter->prev_bucket;
-		}
-	}
-
-	/* now as we are sane, select the next one if there is some */
-	if (iter->bucket != NULL) {
-		if (iter->bucket->next != NULL) {
-			iter->prev_bucket = iter->bucket;
-			iter->bucket = iter->bucket->next;
-			iter->first_bucket = NULL;
-			return iter;
-		}
-	}
-
-	/* if not returned yet, we've reached the last one on the index and have
-	 * to search forward */
-	iter->index++;
-	/* go through the entries of the hash table */
-	while (iter->index < hash->size) {
-		if ((hash->table[iter->index]) != NULL) {
-			iter->prev_bucket = NULL;
-			iter->bucket = hash->table[iter->index];
-			iter->first_bucket = &hash->table[iter->index];
-			return iter;
-		} else {
-			iter->index++;
-		}
-	}
-
-	/* nothing to iterate over anymore */
-	return NULL;
-}
-
-/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
-			     hashdata_choose_cb choose)
-{
-	struct hashtable_t *hash;
-
-	hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC);
-
-	if (hash == NULL)
-		return NULL;
-
-	hash->size = size;
-	hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC);
-
-	if (hash->table == NULL) {
-		kfree(hash);
-		return NULL;
-	}
-
-	hash_init(hash);
-
-	hash->compare = compare;
-	hash->choose = choose;
-
-	return hash;
-}
-
-/* adds data to the hashtable. returns 0 on success, -1 on error */
-int hash_add(struct hashtable_t *hash, void *data)
-{
-	int index;
-	struct element_t *bucket, *prev_bucket = NULL;
-
-	if (!hash)
-		return -1;
-
-	index = hash->choose(data, hash->size);
-	bucket = hash->table[index];
-
-	while (bucket != NULL) {
-		if (hash->compare(bucket->data, data))
-			return -1;
-
-		prev_bucket = bucket;
-		bucket = bucket->next;
-	}
-
-	/* found the tail of the list, add new element */
-	bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC);
-
-	if (bucket == NULL)
-		return -1;
-
-	bucket->data = data;
-	bucket->next = NULL;
-
-	/* and link it */
-	if (prev_bucket == NULL)
-		hash->table[index] = bucket;
-	else
-		prev_bucket->next = bucket;
-
-	hash->elements++;
-	return 0;
-}
-
-/* finds data, based on the key in keydata. returns the found data on success,
- * or NULL on error */
-void *hash_find(struct hashtable_t *hash, void *keydata)
-{
-	int index;
-	struct element_t *bucket;
-
-	if (!hash)
-		return NULL;
-
-	index = hash->choose(keydata , hash->size);
-	bucket = hash->table[index];
-
-	while (bucket != NULL) {
-		if (hash->compare(bucket->data, keydata))
-			return bucket->data;
-
-		bucket = bucket->next;
-	}
-
-	return NULL;
-}
-
-/* remove bucket (this might be used in hash_iterate() if you already found the
- * bucket you want to delete and don't need the overhead to find it again with
- * hash_remove(). But usually, you don't want to use this function, as it
- * fiddles with hash-internals. */
-void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t)
-{
-	void *data_save;
-
-	data_save = hash_it_t->bucket->data;
-
-	if (hash_it_t->prev_bucket != NULL)
-		hash_it_t->prev_bucket->next = hash_it_t->bucket->next;
-	else if (hash_it_t->first_bucket != NULL)
-		(*hash_it_t->first_bucket) = hash_it_t->bucket->next;
-
-	kfree(hash_it_t->bucket);
-	hash->elements--;
-
-	return data_save;
-}
-
-/* removes data from hash, if found. returns pointer do data on success, so you
- * can remove the used structure yourself, or NULL on error .  data could be the
- * structure you use with just the key filled, we just need the key for
- * comparing. */
-void *hash_remove(struct hashtable_t *hash, void *data)
-{
-	struct hash_it_t hash_it_t;
-
-	hash_it_t.index = hash->choose(data, hash->size);
-	hash_it_t.bucket = hash->table[hash_it_t.index];
-	hash_it_t.prev_bucket = NULL;
-
-	while (hash_it_t.bucket != NULL) {
-		if (hash->compare(hash_it_t.bucket->data, data)) {
-			hash_it_t.first_bucket =
-				(hash_it_t.bucket ==
-				 hash->table[hash_it_t.index] ?
-				 &hash->table[hash_it_t.index] : NULL);
-			return hash_remove_bucket(hash, &hash_it_t);
-		}
-
-		hash_it_t.prev_bucket = hash_it_t.bucket;
-		hash_it_t.bucket = hash_it_t.bucket->next;
-	}
-
-	return NULL;
-}
-
-/* resize the hash, returns the pointer to the new hash or NULL on
- * error. removes the old hash on success. */
-struct hashtable_t *hash_resize(struct hashtable_t *hash, int size)
-{
-	struct hashtable_t *new_hash;
-	struct element_t *bucket;
-	int i;
-
-	/* initialize a new hash with the new size */
-	new_hash = hash_new(size, hash->compare, hash->choose);
-
-	if (new_hash == NULL)
-		return NULL;
-
-	/* copy the elements */
-	for (i = 0; i < hash->size; i++) {
-		bucket = hash->table[i];
-
-		while (bucket != NULL) {
-			hash_add(new_hash, bucket->data);
-			bucket = bucket->next;
-		}
-	}
-
-	/* remove hash and eventual overflow buckets but not the content
-	 * itself. */
-	hash_delete(hash, NULL, NULL);
-
-	return new_hash;
-}
diff --git a/drivers/staging/batman-adv/hash.h b/drivers/staging/batman-adv/hash.h
deleted file mode 100644
index 2c8e176..0000000
--- a/drivers/staging/batman-adv/hash.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_HASH_H_
-#define _NET_BATMAN_ADV_HASH_H_
-
-#define HASHIT(name) struct hash_it_t name = { \
-		.index = -1, .bucket = NULL, \
-		.prev_bucket = NULL, \
-		.first_bucket = NULL }
-
-
-typedef int (*hashdata_compare_cb)(void *, void *);
-typedef int (*hashdata_choose_cb)(void *, int);
-typedef void (*hashdata_free_cb)(void *, void *);
-
-struct element_t {
-	void *data;		/* pointer to the data */
-	struct element_t *next;	/* overflow bucket pointer */
-};
-
-struct hash_it_t {
-	int index;
-	struct element_t *bucket;
-	struct element_t *prev_bucket;
-	struct element_t **first_bucket;
-};
-
-struct hashtable_t {
-	struct element_t **table;   /* the hashtable itself, with the buckets */
-	int elements;		    /* number of elements registered */
-	int size;		    /* size of hashtable */
-	hashdata_compare_cb compare;/* callback to a compare function.  should
-				     * compare 2 element datas for their keys,
-				     * return 0 if same and not 0 if not
-				     * same */
-	hashdata_choose_cb choose;  /* the hashfunction, should return an index
-				     * based on the key in the data of the first
-				     * argument and the size the second */
-};
-
-/* allocates and clears the hash */
-struct hashtable_t *hash_new(int size, hashdata_compare_cb compare,
-			     hashdata_choose_cb choose);
-
-/* remove bucket (this might be used in hash_iterate() if you already found the
- * bucket you want to delete and don't need the overhead to find it again with
- * hash_remove().  But usually, you don't want to use this function, as it
- * fiddles with hash-internals. */
-void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t);
-
-/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
- * called to remove the elements inside of the hash.  if you don't remove the
- * elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg);
-
-/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash);
-
-/* adds data to the hashtable. returns 0 on success, -1 on error */
-int hash_add(struct hashtable_t *hash, void *data);
-
-/* removes data from hash, if found. returns pointer do data on success, so you
- * can remove the used structure yourself, or NULL on error .  data could be the
- * structure you use with just the key filled, we just need the key for
- * comparing. */
-void *hash_remove(struct hashtable_t *hash, void *data);
-
-/* finds data, based on the key in keydata. returns the found data on success,
- * or NULL on error */
-void *hash_find(struct hashtable_t *hash, void *keydata);
-
-/* resize the hash, returns the pointer to the new hash or NULL on
- * error. removes the old hash on success */
-struct hashtable_t *hash_resize(struct hashtable_t *hash, int size);
-
-/* iterate though the hash. first element is selected with iter_in NULL.  use
- * the returned iterator to access the elements until hash_it_t returns NULL. */
-struct hash_it_t *hash_iterate(struct hashtable_t *hash,
-			       struct hash_it_t *iter_in);
-
-#endif /* _NET_BATMAN_ADV_HASH_H_ */
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
deleted file mode 100644
index 48856ca..0000000
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include "icmp_socket.h"
-#include "send.h"
-#include "types.h"
-#include "hash.h"
-#include "hard-interface.h"
-
-
-static struct socket_client *socket_client_hash[256];
-
-static void bat_socket_add_packet(struct socket_client *socket_client,
-				  struct icmp_packet_rr *icmp_packet,
-				  size_t icmp_len);
-
-void bat_socket_init(void)
-{
-	memset(socket_client_hash, 0, sizeof(socket_client_hash));
-}
-
-static int bat_socket_open(struct inode *inode, struct file *file)
-{
-	unsigned int i;
-	struct socket_client *socket_client;
-
-	nonseekable_open(inode, file);
-
-	socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL);
-
-	if (!socket_client)
-		return -ENOMEM;
-
-	for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) {
-		if (!socket_client_hash[i]) {
-			socket_client_hash[i] = socket_client;
-			break;
-		}
-	}
-
-	if (i == ARRAY_SIZE(socket_client_hash)) {
-		pr_err("Error - can't add another packet client: "
-		       "maximum number of clients reached\n");
-		kfree(socket_client);
-		return -EXFULL;
-	}
-
-	INIT_LIST_HEAD(&socket_client->queue_list);
-	socket_client->queue_len = 0;
-	socket_client->index = i;
-	socket_client->bat_priv = inode->i_private;
-	spin_lock_init(&socket_client->lock);
-	init_waitqueue_head(&socket_client->queue_wait);
-
-	file->private_data = socket_client;
-
-	inc_module_count();
-	return 0;
-}
-
-static int bat_socket_release(struct inode *inode, struct file *file)
-{
-	struct socket_client *socket_client = file->private_data;
-	struct socket_packet *socket_packet;
-	struct list_head *list_pos, *list_pos_tmp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&socket_client->lock, flags);
-
-	/* for all packets in the queue ... */
-	list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
-		socket_packet = list_entry(list_pos,
-					   struct socket_packet, list);
-
-		list_del(list_pos);
-		kfree(socket_packet);
-	}
-
-	socket_client_hash[socket_client->index] = NULL;
-	spin_unlock_irqrestore(&socket_client->lock, flags);
-
-	kfree(socket_client);
-	dec_module_count();
-
-	return 0;
-}
-
-static ssize_t bat_socket_read(struct file *file, char __user *buf,
-			       size_t count, loff_t *ppos)
-{
-	struct socket_client *socket_client = file->private_data;
-	struct socket_packet *socket_packet;
-	size_t packet_len;
-	int error;
-	unsigned long flags;
-
-	if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
-		return -EAGAIN;
-
-	if ((!buf) || (count < sizeof(struct icmp_packet)))
-		return -EINVAL;
-
-	if (!access_ok(VERIFY_WRITE, buf, count))
-		return -EFAULT;
-
-	error = wait_event_interruptible(socket_client->queue_wait,
-					 socket_client->queue_len);
-
-	if (error)
-		return error;
-
-	spin_lock_irqsave(&socket_client->lock, flags);
-
-	socket_packet = list_first_entry(&socket_client->queue_list,
-					 struct socket_packet, list);
-	list_del(&socket_packet->list);
-	socket_client->queue_len--;
-
-	spin_unlock_irqrestore(&socket_client->lock, flags);
-
-	error = __copy_to_user(buf, &socket_packet->icmp_packet,
-			       socket_packet->icmp_len);
-
-	packet_len = socket_packet->icmp_len;
-	kfree(socket_packet);
-
-	if (error)
-		return -EFAULT;
-
-	return packet_len;
-}
-
-static ssize_t bat_socket_write(struct file *file, const char __user *buff,
-				size_t len, loff_t *off)
-{
-	struct socket_client *socket_client = file->private_data;
-	struct bat_priv *bat_priv = socket_client->bat_priv;
-	struct sk_buff *skb;
-	struct icmp_packet_rr *icmp_packet;
-
-	struct orig_node *orig_node;
-	struct batman_if *batman_if;
-	size_t packet_len = sizeof(struct icmp_packet);
-	uint8_t dstaddr[ETH_ALEN];
-	unsigned long flags;
-
-	if (len < sizeof(struct icmp_packet)) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Error - can't send packet from char device: "
-			"invalid packet size\n");
-		return -EINVAL;
-	}
-
-	if (!bat_priv->primary_if)
-		return -EFAULT;
-
-	if (len >= sizeof(struct icmp_packet_rr))
-		packet_len = sizeof(struct icmp_packet_rr);
-
-	skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
-	if (!skb)
-		return -ENOMEM;
-
-	skb_reserve(skb, sizeof(struct ethhdr));
-	icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
-
-	if (!access_ok(VERIFY_READ, buff, packet_len)) {
-		len = -EFAULT;
-		goto free_skb;
-	}
-
-	if (__copy_from_user(icmp_packet, buff, packet_len)) {
-		len = -EFAULT;
-		goto free_skb;
-	}
-
-	if (icmp_packet->packet_type != BAT_ICMP) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Error - can't send packet from char device: "
-			"got bogus packet type (expected: BAT_ICMP)\n");
-		len = -EINVAL;
-		goto free_skb;
-	}
-
-	if (icmp_packet->msg_type != ECHO_REQUEST) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Error - can't send packet from char device: "
-			"got bogus message type (expected: ECHO_REQUEST)\n");
-		len = -EINVAL;
-		goto free_skb;
-	}
-
-	icmp_packet->uid = socket_client->index;
-
-	if (icmp_packet->version != COMPAT_VERSION) {
-		icmp_packet->msg_type = PARAMETER_PROBLEM;
-		icmp_packet->ttl = COMPAT_VERSION;
-		bat_socket_add_packet(socket_client, icmp_packet, packet_len);
-		goto free_skb;
-	}
-
-	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
-		goto dst_unreach;
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-						   icmp_packet->dst));
-
-	if (!orig_node)
-		goto unlock;
-
-	if (!orig_node->router)
-		goto unlock;
-
-	batman_if = orig_node->router->if_incoming;
-	memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	if (!batman_if)
-		goto dst_unreach;
-
-	if (batman_if->if_status != IF_ACTIVE)
-		goto dst_unreach;
-
-	memcpy(icmp_packet->orig,
-	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-
-	if (packet_len == sizeof(struct icmp_packet_rr))
-		memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN);
-
-
-	send_skb_packet(skb, batman_if, dstaddr);
-
-	goto out;
-
-unlock:
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-dst_unreach:
-	icmp_packet->msg_type = DESTINATION_UNREACHABLE;
-	bat_socket_add_packet(socket_client, icmp_packet, packet_len);
-free_skb:
-	kfree_skb(skb);
-out:
-	return len;
-}
-
-static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
-{
-	struct socket_client *socket_client = file->private_data;
-
-	poll_wait(file, &socket_client->queue_wait, wait);
-
-	if (socket_client->queue_len > 0)
-		return POLLIN | POLLRDNORM;
-
-	return 0;
-}
-
-static const struct file_operations fops = {
-	.owner = THIS_MODULE,
-	.open = bat_socket_open,
-	.release = bat_socket_release,
-	.read = bat_socket_read,
-	.write = bat_socket_write,
-	.poll = bat_socket_poll,
-	.llseek = no_llseek,
-};
-
-int bat_socket_setup(struct bat_priv *bat_priv)
-{
-	struct dentry *d;
-
-	if (!bat_priv->debug_dir)
-		goto err;
-
-	d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
-				bat_priv->debug_dir, bat_priv, &fops);
-	if (d)
-		goto err;
-
-	return 0;
-
-err:
-	return 1;
-}
-
-static void bat_socket_add_packet(struct socket_client *socket_client,
-				  struct icmp_packet_rr *icmp_packet,
-				  size_t icmp_len)
-{
-	struct socket_packet *socket_packet;
-	unsigned long flags;
-
-	socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC);
-
-	if (!socket_packet)
-		return;
-
-	INIT_LIST_HEAD(&socket_packet->list);
-	memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
-	socket_packet->icmp_len = icmp_len;
-
-	spin_lock_irqsave(&socket_client->lock, flags);
-
-	/* while waiting for the lock the socket_client could have been
-	 * deleted */
-	if (!socket_client_hash[icmp_packet->uid]) {
-		spin_unlock_irqrestore(&socket_client->lock, flags);
-		kfree(socket_packet);
-		return;
-	}
-
-	list_add_tail(&socket_packet->list, &socket_client->queue_list);
-	socket_client->queue_len++;
-
-	if (socket_client->queue_len > 100) {
-		socket_packet = list_first_entry(&socket_client->queue_list,
-						 struct socket_packet, list);
-
-		list_del(&socket_packet->list);
-		kfree(socket_packet);
-		socket_client->queue_len--;
-	}
-
-	spin_unlock_irqrestore(&socket_client->lock, flags);
-
-	wake_up(&socket_client->queue_wait);
-}
-
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
-			       size_t icmp_len)
-{
-	struct socket_client *hash = socket_client_hash[icmp_packet->uid];
-
-	if (hash)
-		bat_socket_add_packet(hash, icmp_packet, icmp_len);
-}
diff --git a/drivers/staging/batman-adv/icmp_socket.h b/drivers/staging/batman-adv/icmp_socket.h
deleted file mode 100644
index bf9b348..0000000
--- a/drivers/staging/batman-adv/icmp_socket.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
-#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
-
-#include "types.h"
-
-#define ICMP_SOCKET "socket"
-
-void bat_socket_init(void);
-int bat_socket_setup(struct bat_priv *bat_priv);
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
-			       size_t icmp_len);
-
-#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
deleted file mode 100644
index 0587940..0000000
--- a/drivers/staging/batman-adv/main.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bat_sysfs.h"
-#include "bat_debugfs.h"
-#include "routing.h"
-#include "send.h"
-#include "originator.h"
-#include "soft-interface.h"
-#include "icmp_socket.h"
-#include "translation-table.h"
-#include "hard-interface.h"
-#include "types.h"
-#include "vis.h"
-#include "hash.h"
-
-struct list_head if_list;
-
-unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-struct workqueue_struct *bat_event_workqueue;
-
-static int __init batman_init(void)
-{
-	INIT_LIST_HEAD(&if_list);
-
-	/* the name should not be longer than 10 chars - see
-	 * http://lwn.net/Articles/23634/ */
-	bat_event_workqueue = create_singlethread_workqueue("bat_events");
-
-	if (!bat_event_workqueue)
-		return -ENOMEM;
-
-	bat_socket_init();
-	debugfs_init();
-
-	register_netdevice_notifier(&hard_if_notifier);
-
-	pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) "
-		"loaded\n", SOURCE_VERSION, REVISION_VERSION_STR,
-		COMPAT_VERSION);
-
-	return 0;
-}
-
-static void __exit batman_exit(void)
-{
-	debugfs_destroy();
-	unregister_netdevice_notifier(&hard_if_notifier);
-	hardif_remove_interfaces();
-
-	flush_workqueue(bat_event_workqueue);
-	destroy_workqueue(bat_event_workqueue);
-	bat_event_workqueue = NULL;
-
-	rcu_barrier();
-}
-
-int mesh_init(struct net_device *soft_iface)
-{
-	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-
-	spin_lock_init(&bat_priv->orig_hash_lock);
-	spin_lock_init(&bat_priv->forw_bat_list_lock);
-	spin_lock_init(&bat_priv->forw_bcast_list_lock);
-	spin_lock_init(&bat_priv->hna_lhash_lock);
-	spin_lock_init(&bat_priv->hna_ghash_lock);
-	spin_lock_init(&bat_priv->vis_hash_lock);
-	spin_lock_init(&bat_priv->vis_list_lock);
-
-	INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
-	INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
-
-	if (originator_init(bat_priv) < 1)
-		goto err;
-
-	if (hna_local_init(bat_priv) < 1)
-		goto err;
-
-	if (hna_global_init(bat_priv) < 1)
-		goto err;
-
-	hna_local_add(soft_iface, soft_iface->dev_addr);
-
-	if (vis_init(bat_priv) < 1)
-		goto err;
-
-	atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
-	goto end;
-
-err:
-	pr_err("Unable to allocate memory for mesh information structures: "
-	       "out of mem ?\n");
-	mesh_free(soft_iface);
-	return -1;
-
-end:
-	return 0;
-}
-
-void mesh_free(struct net_device *soft_iface)
-{
-	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-
-	atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
-
-	purge_outstanding_packets(bat_priv, NULL);
-
-	vis_quit(bat_priv);
-
-	originator_free(bat_priv);
-
-	hna_local_free(bat_priv);
-	hna_global_free(bat_priv);
-
-	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
-}
-
-void inc_module_count(void)
-{
-	try_module_get(THIS_MODULE);
-}
-
-void dec_module_count(void)
-{
-	module_put(THIS_MODULE);
-}
-
-/* returns 1 if they are the same originator */
-
-int compare_orig(void *data1, void *data2)
-{
-	return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
-}
-
-/* hashfunction to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-int choose_orig(void *data, int32_t size)
-{
-	unsigned char *key = data;
-	uint32_t hash = 0;
-	size_t i;
-
-	for (i = 0; i < 6; i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
-
-	return hash % size;
-}
-
-int is_my_mac(uint8_t *addr)
-{
-	struct batman_if *batman_if;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if (batman_if->if_status != IF_ACTIVE)
-			continue;
-
-		if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
-			rcu_read_unlock();
-			return 1;
-		}
-	}
-	rcu_read_unlock();
-	return 0;
-
-}
-
-int is_bcast(uint8_t *addr)
-{
-	return (addr[0] == (uint8_t)0xff) && (addr[1] == (uint8_t)0xff);
-}
-
-int is_mcast(uint8_t *addr)
-{
-	return *addr & 0x01;
-}
-
-module_init(batman_init);
-module_exit(batman_exit);
-
-MODULE_LICENSE("GPL");
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
-#ifdef REVISION_VERSION
-MODULE_VERSION(SOURCE_VERSION "-" REVISION_VERSION);
-#else
-MODULE_VERSION(SOURCE_VERSION);
-#endif
diff --git a/drivers/staging/batman-adv/main.h b/drivers/staging/batman-adv/main.h
deleted file mode 100644
index 5e3f516..0000000
--- a/drivers/staging/batman-adv/main.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_MAIN_H_
-#define _NET_BATMAN_ADV_MAIN_H_
-
-/* Kernel Programming */
-#define LINUX
-
-#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
-		      "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
-#define DRIVER_DESC   "B.A.T.M.A.N. advanced"
-#define DRIVER_DEVICE "batman-adv"
-
-#define SOURCE_VERSION "next"
-
-
-/* B.A.T.M.A.N. parameters */
-
-#define TQ_MAX_VALUE 255
-#define JITTER 20
-#define TTL 50			  /* Time To Live of broadcast messages */
-
-#define PURGE_TIMEOUT 200	/* purge originators after time in seconds if no
-				   * valid packet comes in -> TODO: check
-				   * influence on TQ_LOCAL_WINDOW_SIZE */
-#define LOCAL_HNA_TIMEOUT 3600 /* in seconds */
-
-#define TQ_LOCAL_WINDOW_SIZE 64	  /* sliding packet range of received originator
-				   * messages in squence numbers (should be a
-				   * multiple of our word size) */
-#define TQ_GLOBAL_WINDOW_SIZE 5
-#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
-#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
-#define TQ_TOTAL_BIDRECT_LIMIT 1
-
-#define TQ_HOP_PENALTY 10
-
-#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
-
-#define PACKBUFF_SIZE 2000
-#define LOG_BUF_LEN 8192	  /* has to be a power of 2 */
-
-#define VIS_INTERVAL 5000	/* 5 seconds */
-
-/* how much worse secondary interfaces may be to
- * to be considered as bonding candidates */
-
-#define BONDING_TQ_THRESHOLD	50
-
-#define MAX_AGGREGATION_BYTES 512 /* should not be bigger than 512 bytes or
-				   * change the size of
-				   * forw_packet->direct_link_flags */
-#define MAX_AGGREGATION_MS 100
-
-#define RESET_PROTECTION_MS 30000
-#define EXPECTED_SEQNO_RANGE	65536
-/* don't reset again within 30 seconds */
-
-#define MESH_INACTIVE 0
-#define MESH_ACTIVE 1
-#define MESH_DEACTIVATING 2
-
-#define BCAST_QUEUE_LEN		256
-#define BATMAN_QUEUE_LEN	256
-
-/*
- * Debug Messages
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Append 'batman-adv: ' before
-					     * kernel messages */
-
-#define DBG_BATMAN 1	/* all messages related to routing / flooding /
-			 * broadcasting / etc */
-#define DBG_ROUTES 2	/* route or hna added / changed / deleted */
-#define DBG_ALL 3
-
-#define LOG_BUF_LEN 8192          /* has to be a power of 2 */
-
-
-/*
- *  Vis
- */
-
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
-/*
- * Kernel headers
- */
-
-#include <linux/mutex.h>	/* mutex */
-#include <linux/module.h>	/* needed by all modules */
-#include <linux/netdevice.h>	/* netdevice */
-#include <linux/if_ether.h>	/* ethernet header */
-#include <linux/poll.h>		/* poll_table */
-#include <linux/kthread.h>	/* kernel threads */
-#include <linux/pkt_sched.h>	/* schedule types */
-#include <linux/workqueue.h>	/* workqueue */
-#include <linux/slab.h>
-#include <net/sock.h>		/* struct sock */
-#include <linux/jiffies.h>
-#include <linux/seq_file.h>
-#include "types.h"
-
-#ifndef REVISION_VERSION
-#define REVISION_VERSION_STR ""
-#else
-#define REVISION_VERSION_STR " "REVISION_VERSION
-#endif
-
-extern struct list_head if_list;
-
-extern unsigned char broadcast_addr[];
-extern struct workqueue_struct *bat_event_workqueue;
-
-int mesh_init(struct net_device *soft_iface);
-void mesh_free(struct net_device *soft_iface);
-void inc_module_count(void);
-void dec_module_count(void);
-int compare_orig(void *data1, void *data2);
-int choose_orig(void *data, int32_t size);
-int is_my_mac(uint8_t *addr);
-int is_bcast(uint8_t *addr);
-int is_mcast(uint8_t *addr);
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
-
-#define bat_dbg(type, bat_priv, fmt, arg...)			\
-	do {							\
-		if (atomic_read(&bat_priv->log_level) & type)	\
-			debug_log(bat_priv, fmt, ## arg);	\
-	}							\
-	while (0)
-#else /* !CONFIG_BATMAN_ADV_DEBUG */
-static inline void bat_dbg(char type __attribute__((unused)),
-			   struct bat_priv *bat_priv __attribute__((unused)),
-			   char *fmt __attribute__((unused)), ...)
-{
-}
-#endif
-
-#define bat_warning(net_dev, fmt, arg...)				\
-	do {								\
-		struct net_device *_netdev = (net_dev);                 \
-		struct bat_priv *_batpriv = netdev_priv(_netdev);       \
-		bat_dbg(DBG_ALL, _batpriv, fmt, ## arg);		\
-		pr_warning("%s: " fmt, _netdev->name, ## arg);		\
-	} while (0)
-#define bat_info(net_dev, fmt, arg...)					\
-	do {								\
-		struct net_device *_netdev = (net_dev);                 \
-		struct bat_priv *_batpriv = netdev_priv(_netdev);       \
-		bat_dbg(DBG_ALL, _batpriv, fmt, ## arg);		\
-		pr_info("%s: " fmt, _netdev->name, ## arg);		\
-	} while (0)
-#define bat_err(net_dev, fmt, arg...)					\
-	do {								\
-		struct net_device *_netdev = (net_dev);                 \
-		struct bat_priv *_batpriv = netdev_priv(_netdev);       \
-		bat_dbg(DBG_ALL, _batpriv, fmt, ## arg);		\
-		pr_err("%s: " fmt, _netdev->name, ## arg);		\
-	} while (0)
-
-#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
deleted file mode 100644
index 5527008..0000000
--- a/drivers/staging/batman-adv/originator.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-/* increase the reference counter for this originator */
-
-#include "main.h"
-#include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-#include "unicast.h"
-
-static void purge_orig(struct work_struct *work);
-
-static void start_purge_timer(struct bat_priv *bat_priv)
-{
-	INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
-	queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
-}
-
-int originator_init(struct bat_priv *bat_priv)
-{
-	unsigned long flags;
-	if (bat_priv->orig_hash)
-		return 1;
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	bat_priv->orig_hash = hash_new(128, compare_orig, choose_orig);
-
-	if (!bat_priv->orig_hash)
-		goto err;
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-	start_purge_timer(bat_priv);
-	return 1;
-
-err:
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-	return 0;
-}
-
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
-		uint8_t *neigh, struct batman_if *if_incoming)
-{
-	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct neigh_node *neigh_node;
-
-	bat_dbg(DBG_BATMAN, bat_priv,
-		"Creating new last-hop neighbor of originator\n");
-
-	neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
-	if (!neigh_node)
-		return NULL;
-
-	INIT_LIST_HEAD(&neigh_node->list);
-
-	memcpy(neigh_node->addr, neigh, ETH_ALEN);
-	neigh_node->orig_node = orig_neigh_node;
-	neigh_node->if_incoming = if_incoming;
-
-	list_add_tail(&neigh_node->list, &orig_node->neigh_list);
-	return neigh_node;
-}
-
-static void free_orig_node(void *data, void *arg)
-{
-	struct list_head *list_pos, *list_pos_tmp;
-	struct neigh_node *neigh_node;
-	struct orig_node *orig_node = (struct orig_node *)data;
-	struct bat_priv *bat_priv = (struct bat_priv *)arg;
-
-	/* for all neighbors towards this originator ... */
-	list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
-		neigh_node = list_entry(list_pos, struct neigh_node, list);
-
-		list_del(list_pos);
-		kfree(neigh_node);
-	}
-
-	frag_list_free(&orig_node->frag_list);
-	hna_global_del_orig(bat_priv, orig_node, "originator timed out");
-
-	kfree(orig_node->bcast_own);
-	kfree(orig_node->bcast_own_sum);
-	kfree(orig_node);
-}
-
-void originator_free(struct bat_priv *bat_priv)
-{
-	unsigned long flags;
-
-	if (!bat_priv->orig_hash)
-		return;
-
-	cancel_delayed_work_sync(&bat_priv->orig_work);
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
-	bat_priv->orig_hash = NULL;
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-/* this function finds or creates an originator entry for the given
- * address if it does not exits */
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
-{
-	struct orig_node *orig_node;
-	struct hashtable_t *swaphash;
-	int size;
-
-	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, addr));
-
-	if (orig_node)
-		return orig_node;
-
-	bat_dbg(DBG_BATMAN, bat_priv,
-		"Creating new originator: %pM\n", addr);
-
-	orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
-	if (!orig_node)
-		return NULL;
-
-	INIT_LIST_HEAD(&orig_node->neigh_list);
-
-	memcpy(orig_node->orig, addr, ETH_ALEN);
-	orig_node->router = NULL;
-	orig_node->hna_buff = NULL;
-	orig_node->bcast_seqno_reset = jiffies - 1
-					- msecs_to_jiffies(RESET_PROTECTION_MS);
-	orig_node->batman_seqno_reset = jiffies - 1
-					- msecs_to_jiffies(RESET_PROTECTION_MS);
-
-	size = bat_priv->num_ifaces * sizeof(TYPE_OF_WORD) * NUM_WORDS;
-
-	orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
-	if (!orig_node->bcast_own)
-		goto free_orig_node;
-
-	size = bat_priv->num_ifaces * sizeof(uint8_t);
-	orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
-
-	INIT_LIST_HEAD(&orig_node->frag_list);
-	orig_node->last_frag_packet = 0;
-
-	if (!orig_node->bcast_own_sum)
-		goto free_bcast_own;
-
-	if (hash_add(bat_priv->orig_hash, orig_node) < 0)
-		goto free_bcast_own_sum;
-
-	if (bat_priv->orig_hash->elements * 4 > bat_priv->orig_hash->size) {
-		swaphash = hash_resize(bat_priv->orig_hash,
-				       bat_priv->orig_hash->size * 2);
-
-		if (!swaphash)
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"Couldn't resize orig hash table\n");
-		else
-			bat_priv->orig_hash = swaphash;
-	}
-
-	return orig_node;
-free_bcast_own_sum:
-	kfree(orig_node->bcast_own_sum);
-free_bcast_own:
-	kfree(orig_node->bcast_own);
-free_orig_node:
-	kfree(orig_node);
-	return NULL;
-}
-
-static bool purge_orig_neighbors(struct bat_priv *bat_priv,
-				 struct orig_node *orig_node,
-				 struct neigh_node **best_neigh_node)
-{
-	struct list_head *list_pos, *list_pos_tmp;
-	struct neigh_node *neigh_node;
-	bool neigh_purged = false;
-
-	*best_neigh_node = NULL;
-
-	/* for all neighbors towards this originator ... */
-	list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
-		neigh_node = list_entry(list_pos, struct neigh_node, list);
-
-		if ((time_after(jiffies,
-			neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
-		    (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
-		    (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
-
-			if (neigh_node->if_incoming->if_status ==
-							IF_TO_BE_REMOVED)
-				bat_dbg(DBG_BATMAN, bat_priv,
-					"neighbor purge: originator %pM, "
-					"neighbor: %pM, iface: %s\n",
-					orig_node->orig, neigh_node->addr,
-					neigh_node->if_incoming->net_dev->name);
-			else
-				bat_dbg(DBG_BATMAN, bat_priv,
-					"neighbor timeout: originator %pM, "
-					"neighbor: %pM, last_valid: %lu\n",
-					orig_node->orig, neigh_node->addr,
-					(neigh_node->last_valid / HZ));
-
-			neigh_purged = true;
-			list_del(list_pos);
-			kfree(neigh_node);
-		} else {
-			if ((*best_neigh_node == NULL) ||
-			    (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
-				*best_neigh_node = neigh_node;
-		}
-	}
-	return neigh_purged;
-}
-
-static bool purge_orig_node(struct bat_priv *bat_priv,
-			    struct orig_node *orig_node)
-{
-	struct neigh_node *best_neigh_node;
-
-	if (time_after(jiffies,
-		orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
-
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Originator timeout: originator %pM, last_valid %lu\n",
-			orig_node->orig, (orig_node->last_valid / HZ));
-		return true;
-	} else {
-		if (purge_orig_neighbors(bat_priv, orig_node,
-							&best_neigh_node)) {
-			update_routes(bat_priv, orig_node,
-				      best_neigh_node,
-				      orig_node->hna_buff,
-				      orig_node->hna_buff_len);
-			/* update bonding candidates, we could have lost
-			 * some candidates. */
-			update_bonding_candidates(bat_priv, orig_node);
-		}
-	}
-
-	return false;
-}
-
-static void _purge_orig(struct bat_priv *bat_priv)
-{
-	HASHIT(hashit);
-	struct orig_node *orig_node;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
-	/* for all origins... */
-	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-		orig_node = hashit.bucket->data;
-
-		if (purge_orig_node(bat_priv, orig_node)) {
-			hash_remove_bucket(bat_priv->orig_hash, &hashit);
-			free_orig_node(orig_node, bat_priv);
-		}
-
-		if (time_after(jiffies, (orig_node->last_frag_packet +
-					msecs_to_jiffies(FRAG_TIMEOUT))))
-			frag_list_free(&orig_node->frag_list);
-	}
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-}
-
-static void purge_orig(struct work_struct *work)
-{
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
-	struct bat_priv *bat_priv =
-		container_of(delayed_work, struct bat_priv, orig_work);
-
-	_purge_orig(bat_priv);
-	start_purge_timer(bat_priv);
-}
-
-void purge_orig_ref(struct bat_priv *bat_priv)
-{
-	_purge_orig(bat_priv);
-}
-
-int orig_seq_print_text(struct seq_file *seq, void *offset)
-{
-	HASHIT(hashit);
-	struct net_device *net_dev = (struct net_device *)seq->private;
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	struct orig_node *orig_node;
-	struct neigh_node *neigh_node;
-	int batman_count = 0;
-	int last_seen_secs;
-	int last_seen_msecs;
-	unsigned long flags;
-
-	if ((!bat_priv->primary_if) ||
-	    (bat_priv->primary_if->if_status != IF_ACTIVE)) {
-		if (!bat_priv->primary_if)
-			return seq_printf(seq, "BATMAN mesh %s disabled - "
-				     "please specify interfaces to enable it\n",
-				     net_dev->name);
-
-		return seq_printf(seq, "BATMAN mesh %s "
-				  "disabled - primary interface not active\n",
-				  net_dev->name);
-	}
-
-	seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
-		   SOURCE_VERSION, REVISION_VERSION_STR,
-		   bat_priv->primary_if->net_dev->name,
-		   bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
-	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
-		   "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
-		   "outgoingIF", "Potential nexthops");
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
-	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-
-		orig_node = hashit.bucket->data;
-
-		if (!orig_node->router)
-			continue;
-
-		if (orig_node->router->tq_avg == 0)
-			continue;
-
-		last_seen_secs = jiffies_to_msecs(jiffies -
-						orig_node->last_valid) / 1000;
-		last_seen_msecs = jiffies_to_msecs(jiffies -
-						orig_node->last_valid) % 1000;
-
-		seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
-			   orig_node->orig, last_seen_secs, last_seen_msecs,
-			   orig_node->router->tq_avg, orig_node->router->addr,
-			   orig_node->router->if_incoming->net_dev->name);
-
-		list_for_each_entry(neigh_node, &orig_node->neigh_list, list) {
-			seq_printf(seq, " %pM (%3i)", neigh_node->addr,
-					   neigh_node->tq_avg);
-		}
-
-		seq_printf(seq, "\n");
-		batman_count++;
-	}
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	if ((batman_count == 0))
-		seq_printf(seq, "No batman nodes in range ...\n");
-
-	return 0;
-}
-
-static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
-{
-	void *data_ptr;
-
-	data_ptr = kmalloc(max_if_num * sizeof(TYPE_OF_WORD) * NUM_WORDS,
-			   GFP_ATOMIC);
-	if (!data_ptr) {
-		pr_err("Can't resize orig: out of memory\n");
-		return -1;
-	}
-
-	memcpy(data_ptr, orig_node->bcast_own,
-	       (max_if_num - 1) * sizeof(TYPE_OF_WORD) * NUM_WORDS);
-	kfree(orig_node->bcast_own);
-	orig_node->bcast_own = data_ptr;
-
-	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-	if (!data_ptr) {
-		pr_err("Can't resize orig: out of memory\n");
-		return -1;
-	}
-
-	memcpy(data_ptr, orig_node->bcast_own_sum,
-	       (max_if_num - 1) * sizeof(uint8_t));
-	kfree(orig_node->bcast_own_sum);
-	orig_node->bcast_own_sum = data_ptr;
-
-	return 0;
-}
-
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
-{
-	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-	struct orig_node *orig_node;
-	unsigned long flags;
-	HASHIT(hashit);
-
-	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
-	 * if_num */
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
-	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-		orig_node = hashit.bucket->data;
-
-		if (orig_node_add_if(orig_node, max_if_num) == -1)
-			goto err;
-	}
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-	return 0;
-
-err:
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-	return -ENOMEM;
-}
-
-static int orig_node_del_if(struct orig_node *orig_node,
-		     int max_if_num, int del_if_num)
-{
-	void *data_ptr = NULL;
-	int chunk_size;
-
-	/* last interface was removed */
-	if (max_if_num == 0)
-		goto free_bcast_own;
-
-	chunk_size = sizeof(TYPE_OF_WORD) * NUM_WORDS;
-	data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
-	if (!data_ptr) {
-		pr_err("Can't resize orig: out of memory\n");
-		return -1;
-	}
-
-	/* copy first part */
-	memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
-
-	/* copy second part */
-	memcpy(data_ptr + del_if_num * chunk_size,
-	       orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
-	       (max_if_num - del_if_num) * chunk_size);
-
-free_bcast_own:
-	kfree(orig_node->bcast_own);
-	orig_node->bcast_own = data_ptr;
-
-	if (max_if_num == 0)
-		goto free_own_sum;
-
-	data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-	if (!data_ptr) {
-		pr_err("Can't resize orig: out of memory\n");
-		return -1;
-	}
-
-	memcpy(data_ptr, orig_node->bcast_own_sum,
-	       del_if_num * sizeof(uint8_t));
-
-	memcpy(data_ptr + del_if_num * sizeof(uint8_t),
-	       orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
-	       (max_if_num - del_if_num) * sizeof(uint8_t));
-
-free_own_sum:
-	kfree(orig_node->bcast_own_sum);
-	orig_node->bcast_own_sum = data_ptr;
-
-	return 0;
-}
-
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
-{
-	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-	struct batman_if *batman_if_tmp;
-	struct orig_node *orig_node;
-	unsigned long flags;
-	HASHIT(hashit);
-	int ret;
-
-	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
-	 * if_num */
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
-	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-		orig_node = hashit.bucket->data;
-
-		ret = orig_node_del_if(orig_node, max_if_num,
-				       batman_if->if_num);
-
-		if (ret == -1)
-			goto err;
-	}
-
-	/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
-		if (batman_if_tmp->if_status == IF_NOT_IN_USE)
-			continue;
-
-		if (batman_if == batman_if_tmp)
-			continue;
-
-		if (batman_if->soft_iface != batman_if_tmp->soft_iface)
-			continue;
-
-		if (batman_if_tmp->if_num > batman_if->if_num)
-			batman_if_tmp->if_num--;
-	}
-	rcu_read_unlock();
-
-	batman_if->if_num = -1;
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-	return 0;
-
-err:
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-	return -ENOMEM;
-}
diff --git a/drivers/staging/batman-adv/originator.h b/drivers/staging/batman-adv/originator.h
deleted file mode 100644
index a97c4004..0000000
--- a/drivers/staging/batman-adv/originator.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
-#define _NET_BATMAN_ADV_ORIGINATOR_H_
-
-int originator_init(struct bat_priv *bat_priv);
-void originator_free(struct bat_priv *bat_priv);
-void purge_orig_ref(struct bat_priv *bat_priv);
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
-struct neigh_node *
-create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
-		uint8_t *neigh, struct batman_if *if_incoming);
-int orig_seq_print_text(struct seq_file *seq, void *offset);
-int orig_hash_add_if(struct batman_if *batman_if, int max_if_num);
-int orig_hash_del_if(struct batman_if *batman_if, int max_if_num);
-
-#endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/drivers/staging/batman-adv/packet.h b/drivers/staging/batman-adv/packet.h
deleted file mode 100644
index 2693383..0000000
--- a/drivers/staging/batman-adv/packet.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_PACKET_H_
-#define _NET_BATMAN_ADV_PACKET_H_
-
-#define ETH_P_BATMAN  0x4305	/* unofficial/not registered Ethertype */
-
-#define BAT_PACKET       0x01
-#define BAT_ICMP         0x02
-#define BAT_UNICAST      0x03
-#define BAT_BCAST        0x04
-#define BAT_VIS          0x05
-#define BAT_UNICAST_FRAG 0x06
-
-/* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 13
-#define DIRECTLINK 0x40
-#define VIS_SERVER 0x20
-#define PRIMARIES_FIRST_HOP 0x10
-
-/* ICMP message types */
-#define ECHO_REPLY 0
-#define DESTINATION_UNREACHABLE 3
-#define ECHO_REQUEST 8
-#define TTL_EXCEEDED 11
-#define PARAMETER_PROBLEM 12
-
-/* vis defines */
-#define VIS_TYPE_SERVER_SYNC		0
-#define VIS_TYPE_CLIENT_UPDATE		1
-
-/* fragmentation defines */
-#define UNI_FRAG_HEAD 0x01
-
-struct batman_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
-	uint8_t  tq;
-	uint32_t seqno;
-	uint8_t  orig[6];
-	uint8_t  prev_sender[6];
-	uint8_t  ttl;
-	uint8_t  num_hna;
-} __attribute__((packed));
-
-#define BAT_PACKET_LEN sizeof(struct batman_packet)
-
-struct icmp_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  ttl;
-	uint8_t  dst[6];
-	uint8_t  orig[6];
-	uint16_t seqno;
-	uint8_t  uid;
-} __attribute__((packed));
-
-#define BAT_RR_LEN 16
-
-/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets */
-struct icmp_packet_rr {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  ttl;
-	uint8_t  dst[6];
-	uint8_t  orig[6];
-	uint16_t seqno;
-	uint8_t  uid;
-	uint8_t  rr_cur;
-	uint8_t  rr[BAT_RR_LEN][ETH_ALEN];
-} __attribute__((packed));
-
-struct unicast_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  dest[6];
-	uint8_t  ttl;
-} __attribute__((packed));
-
-struct unicast_frag_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  dest[6];
-	uint8_t  ttl;
-	uint8_t  flags;
-	uint8_t  orig[6];
-	uint16_t seqno;
-} __attribute__((packed));
-
-struct bcast_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  orig[6];
-	uint8_t  ttl;
-	uint32_t seqno;
-} __attribute__((packed));
-
-struct vis_packet {
-	uint8_t  packet_type;
-	uint8_t  version;        /* batman version field */
-	uint8_t  vis_type;	 /* which type of vis-participant sent this? */
-	uint8_t  entries;	 /* number of entries behind this struct */
-	uint32_t seqno;		 /* sequence number */
-	uint8_t  ttl;		 /* TTL */
-	uint8_t  vis_orig[6];	 /* originator that informs about its
-				  * neighbors */
-	uint8_t  target_orig[6]; /* who should receive this packet */
-	uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
-} __attribute__((packed));
-
-#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/drivers/staging/batman-adv/ring_buffer.c b/drivers/staging/batman-adv/ring_buffer.c
deleted file mode 100644
index defd37c..0000000
--- a/drivers/staging/batman-adv/ring_buffer.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "ring_buffer.h"
-
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value)
-{
-	lq_recv[*lq_index] = value;
-	*lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE;
-}
-
-uint8_t ring_buffer_avg(uint8_t lq_recv[])
-{
-	uint8_t *ptr;
-	uint16_t count = 0, i = 0, sum = 0;
-
-	ptr = lq_recv;
-
-	while (i < TQ_GLOBAL_WINDOW_SIZE) {
-		if (*ptr != 0) {
-			count++;
-			sum += *ptr;
-		}
-
-		i++;
-		ptr++;
-	}
-
-	if (count == 0)
-		return 0;
-
-	return (uint8_t)(sum / count);
-}
diff --git a/drivers/staging/batman-adv/ring_buffer.h b/drivers/staging/batman-adv/ring_buffer.h
deleted file mode 100644
index 6b0cb9aa..0000000
--- a/drivers/staging/batman-adv/ring_buffer.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
-#define _NET_BATMAN_ADV_RING_BUFFER_H_
-
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
-uint8_t ring_buffer_avg(uint8_t lq_recv[]);
-
-#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
deleted file mode 100644
index 657b69e..0000000
--- a/drivers/staging/batman-adv/routing.c
+++ /dev/null
@@ -1,1389 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "routing.h"
-#include "send.h"
-#include "hash.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "icmp_socket.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "types.h"
-#include "ring_buffer.h"
-#include "vis.h"
-#include "aggregation.h"
-#include "unicast.h"
-
-void slide_own_bcast_window(struct batman_if *batman_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-	HASHIT(hashit);
-	struct orig_node *orig_node;
-	TYPE_OF_WORD *word;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
-	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-		orig_node = hashit.bucket->data;
-		word = &(orig_node->bcast_own[batman_if->if_num * NUM_WORDS]);
-
-		bit_get_packet(bat_priv, word, 1, 0);
-		orig_node->bcast_own_sum[batman_if->if_num] =
-			bit_packet_count(word);
-	}
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		       unsigned char *hna_buff, int hna_buff_len)
-{
-	if ((hna_buff_len != orig_node->hna_buff_len) ||
-	    ((hna_buff_len > 0) &&
-	     (orig_node->hna_buff_len > 0) &&
-	     (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
-
-		if (orig_node->hna_buff_len > 0)
-			hna_global_del_orig(bat_priv, orig_node,
-					    "originator changed hna");
-
-		if ((hna_buff_len > 0) && (hna_buff != NULL))
-			hna_global_add_orig(bat_priv, orig_node,
-					    hna_buff, hna_buff_len);
-	}
-}
-
-static void update_route(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node,
-			 struct neigh_node *neigh_node,
-			 unsigned char *hna_buff, int hna_buff_len)
-{
-	/* route deleted */
-	if ((orig_node->router != NULL) && (neigh_node == NULL)) {
-
-		bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
-			orig_node->orig);
-		hna_global_del_orig(bat_priv, orig_node,
-				    "originator timed out");
-
-		/* route added */
-	} else if ((orig_node->router == NULL) && (neigh_node != NULL)) {
-
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Adding route towards: %pM (via %pM)\n",
-			orig_node->orig, neigh_node->addr);
-		hna_global_add_orig(bat_priv, orig_node,
-				    hna_buff, hna_buff_len);
-
-		/* route changed */
-	} else {
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Changing route towards: %pM "
-			"(now via %pM - was via %pM)\n",
-			orig_node->orig, neigh_node->addr,
-			orig_node->router->addr);
-	}
-
-	orig_node->router = neigh_node;
-}
-
-
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		   struct neigh_node *neigh_node, unsigned char *hna_buff,
-		   int hna_buff_len)
-{
-
-	if (orig_node == NULL)
-		return;
-
-	if (orig_node->router != neigh_node)
-		update_route(bat_priv, orig_node, neigh_node,
-			     hna_buff, hna_buff_len);
-	/* may be just HNA changed */
-	else
-		update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
-}
-
-static int is_bidirectional_neigh(struct orig_node *orig_node,
-				struct orig_node *orig_neigh_node,
-				struct batman_packet *batman_packet,
-				struct batman_if *if_incoming)
-{
-	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
-	unsigned char total_count;
-
-	if (orig_node == orig_neigh_node) {
-		list_for_each_entry(tmp_neigh_node,
-				    &orig_node->neigh_list,
-				    list) {
-
-			if (compare_orig(tmp_neigh_node->addr,
-					 orig_neigh_node->orig) &&
-			    (tmp_neigh_node->if_incoming == if_incoming))
-				neigh_node = tmp_neigh_node;
-		}
-
-		if (!neigh_node)
-			neigh_node = create_neighbor(orig_node,
-						     orig_neigh_node,
-						     orig_neigh_node->orig,
-						     if_incoming);
-		/* create_neighbor failed, return 0 */
-		if (!neigh_node)
-			return 0;
-
-		neigh_node->last_valid = jiffies;
-	} else {
-		/* find packet count of corresponding one hop neighbor */
-		list_for_each_entry(tmp_neigh_node,
-				    &orig_neigh_node->neigh_list, list) {
-
-			if (compare_orig(tmp_neigh_node->addr,
-					 orig_neigh_node->orig) &&
-			    (tmp_neigh_node->if_incoming == if_incoming))
-				neigh_node = tmp_neigh_node;
-		}
-
-		if (!neigh_node)
-			neigh_node = create_neighbor(orig_neigh_node,
-						     orig_neigh_node,
-						     orig_neigh_node->orig,
-						     if_incoming);
-		/* create_neighbor failed, return 0 */
-		if (!neigh_node)
-			return 0;
-	}
-
-	orig_node->last_valid = jiffies;
-
-	/* pay attention to not get a value bigger than 100 % */
-	total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
-		       neigh_node->real_packet_count ?
-		       neigh_node->real_packet_count :
-		       orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
-
-	/* if we have too few packets (too less data) we set tq_own to zero */
-	/* if we receive too few packets it is not considered bidirectional */
-	if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
-	    (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
-		orig_neigh_node->tq_own = 0;
-	else
-		/* neigh_node->real_packet_count is never zero as we
-		 * only purge old information when getting new
-		 * information */
-		orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
-			neigh_node->real_packet_count;
-
-	/*
-	 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
-	 * affect the nearly-symmetric links only a little, but
-	 * punishes asymmetric links more.  This will give a value
-	 * between 0 and TQ_MAX_VALUE
-	 */
-	orig_neigh_node->tq_asym_penalty =
-		TQ_MAX_VALUE -
-		(TQ_MAX_VALUE *
-		 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
-		 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
-		 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
-		(TQ_LOCAL_WINDOW_SIZE *
-		 TQ_LOCAL_WINDOW_SIZE *
-		 TQ_LOCAL_WINDOW_SIZE);
-
-	batman_packet->tq = ((batman_packet->tq *
-			      orig_neigh_node->tq_own *
-			      orig_neigh_node->tq_asym_penalty) /
-			     (TQ_MAX_VALUE * TQ_MAX_VALUE));
-
-	bat_dbg(DBG_BATMAN, bat_priv,
-		"bidirectional: "
-		"orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
-		"real recv = %2i, local tq: %3i, asym_penalty: %3i, "
-		"total tq: %3i\n",
-		orig_node->orig, orig_neigh_node->orig, total_count,
-		neigh_node->real_packet_count, orig_neigh_node->tq_own,
-		orig_neigh_node->tq_asym_penalty, batman_packet->tq);
-
-	/* if link has the minimum required transmission quality
-	 * consider it bidirectional */
-	if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
-		return 1;
-
-	return 0;
-}
-
-static void update_orig(struct bat_priv *bat_priv,
-			struct orig_node *orig_node,
-			struct ethhdr *ethhdr,
-			struct batman_packet *batman_packet,
-			struct batman_if *if_incoming,
-			unsigned char *hna_buff, int hna_buff_len,
-			char is_duplicate)
-{
-	struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
-	int tmp_hna_buff_len;
-
-	bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
-		"Searching and updating originator entry of received packet\n");
-
-	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-		if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
-		    (tmp_neigh_node->if_incoming == if_incoming)) {
-			neigh_node = tmp_neigh_node;
-			continue;
-		}
-
-		if (is_duplicate)
-			continue;
-
-		ring_buffer_set(tmp_neigh_node->tq_recv,
-				&tmp_neigh_node->tq_index, 0);
-		tmp_neigh_node->tq_avg =
-			ring_buffer_avg(tmp_neigh_node->tq_recv);
-	}
-
-	if (!neigh_node) {
-		struct orig_node *orig_tmp;
-
-		orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
-		if (!orig_tmp)
-			return;
-
-		neigh_node = create_neighbor(orig_node, orig_tmp,
-					     ethhdr->h_source, if_incoming);
-		if (!neigh_node)
-			return;
-	} else
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Updating existing last-hop neighbor of originator\n");
-
-	orig_node->flags = batman_packet->flags;
-	neigh_node->last_valid = jiffies;
-
-	ring_buffer_set(neigh_node->tq_recv,
-			&neigh_node->tq_index,
-			batman_packet->tq);
-	neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
-
-	if (!is_duplicate) {
-		orig_node->last_ttl = batman_packet->ttl;
-		neigh_node->last_ttl = batman_packet->ttl;
-	}
-
-	tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
-			    batman_packet->num_hna * ETH_ALEN : hna_buff_len);
-
-	/* if this neighbor already is our next hop there is nothing
-	 * to change */
-	if (orig_node->router == neigh_node)
-		goto update_hna;
-
-	/* if this neighbor does not offer a better TQ we won't consider it */
-	if ((orig_node->router) &&
-	    (orig_node->router->tq_avg > neigh_node->tq_avg))
-		goto update_hna;
-
-	/* if the TQ is the same and the link not more symetric we
-	 * won't consider it either */
-	if ((orig_node->router) &&
-	     ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
-	     (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
-	      >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
-		goto update_hna;
-
-	update_routes(bat_priv, orig_node, neigh_node,
-		      hna_buff, tmp_hna_buff_len);
-	return;
-
-update_hna:
-	update_routes(bat_priv, orig_node, orig_node->router,
-		      hna_buff, tmp_hna_buff_len);
-}
-
-/* checks whether the host restarted and is in the protection time.
- * returns:
- *  0 if the packet is to be accepted
- *  1 if the packet is to be ignored.
- */
-static int window_protected(struct bat_priv *bat_priv,
-			    int32_t seq_num_diff,
-			    unsigned long *last_reset)
-{
-	if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
-		|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
-		if (time_after(jiffies, *last_reset +
-			msecs_to_jiffies(RESET_PROTECTION_MS))) {
-
-			*last_reset = jiffies;
-			bat_dbg(DBG_BATMAN, bat_priv,
-				"old packet received, start protection\n");
-
-			return 0;
-		} else
-			return 1;
-	}
-	return 0;
-}
-
-/* processes a batman packet for all interfaces, adjusts the sequence number and
- * finds out whether it is a duplicate.
- * returns:
- *   1 the packet is a duplicate
- *   0 the packet has not yet been received
- *  -1 the packet is old and has been received while the seqno window
- *     was protected. Caller should drop it.
- */
-static char count_real_packets(struct ethhdr *ethhdr,
-			       struct batman_packet *batman_packet,
-			       struct batman_if *if_incoming)
-{
-	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct orig_node *orig_node;
-	struct neigh_node *tmp_neigh_node;
-	char is_duplicate = 0;
-	int32_t seq_diff;
-	int need_update = 0;
-	int set_mark;
-
-	orig_node = get_orig_node(bat_priv, batman_packet->orig);
-	if (orig_node == NULL)
-		return 0;
-
-	seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
-
-	/* signalize caller that the packet is to be dropped. */
-	if (window_protected(bat_priv, seq_diff,
-			     &orig_node->batman_seqno_reset))
-		return -1;
-
-	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-
-		is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
-					       orig_node->last_real_seqno,
-					       batman_packet->seqno);
-
-		if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
-		    (tmp_neigh_node->if_incoming == if_incoming))
-			set_mark = 1;
-		else
-			set_mark = 0;
-
-		/* if the window moved, set the update flag. */
-		need_update |= bit_get_packet(bat_priv,
-					      tmp_neigh_node->real_bits,
-					      seq_diff, set_mark);
-
-		tmp_neigh_node->real_packet_count =
-			bit_packet_count(tmp_neigh_node->real_bits);
-	}
-
-	if (need_update) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"updating last_seqno: old %d, new %d\n",
-			orig_node->last_real_seqno, batman_packet->seqno);
-		orig_node->last_real_seqno = batman_packet->seqno;
-	}
-
-	return is_duplicate;
-}
-
-/* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
-				 struct orig_node *orig_node,
-				 struct orig_node *orig_neigh_node,
-				 struct batman_packet *batman_packet)
-
-{
-	if (batman_packet->flags & PRIMARIES_FIRST_HOP)
-		memcpy(orig_neigh_node->primary_addr,
-		       orig_node->orig, ETH_ALEN);
-
-	return;
-}
-
-/* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
-			       struct orig_node *orig_node)
-{
-	int candidates;
-	int interference_candidate;
-	int best_tq;
-	struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
-	struct neigh_node *first_candidate, *last_candidate;
-
-	/* update the candidates for this originator */
-	if (!orig_node->router) {
-		orig_node->bond.candidates = 0;
-		return;
-	}
-
-	best_tq = orig_node->router->tq_avg;
-
-	/* update bond.candidates */
-
-	candidates = 0;
-
-	/* mark other nodes which also received "PRIMARIES FIRST HOP" packets
-	 * as "bonding partner" */
-
-	/* first, zero the list */
-	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-		tmp_neigh_node->next_bond_candidate = NULL;
-	}
-
-	first_candidate = NULL;
-	last_candidate = NULL;
-	list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
-
-		/* only consider if it has the same primary address ...  */
-		if (memcmp(orig_node->orig,
-				tmp_neigh_node->orig_node->primary_addr,
-				ETH_ALEN) != 0)
-			continue;
-
-		/* ... and is good enough to be considered */
-		if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
-			continue;
-
-		/* check if we have another candidate with the same
-		 * mac address or interface. If we do, we won't
-		 * select this candidate because of possible interference. */
-
-		interference_candidate = 0;
-		list_for_each_entry(tmp_neigh_node2,
-				&orig_node->neigh_list, list) {
-
-			if (tmp_neigh_node2 == tmp_neigh_node)
-				continue;
-
-			/* we only care if the other candidate is even
-			 * considered as candidate. */
-			if (tmp_neigh_node2->next_bond_candidate == NULL)
-				continue;
-
-
-			if ((tmp_neigh_node->if_incoming ==
-				tmp_neigh_node2->if_incoming)
-				|| (memcmp(tmp_neigh_node->addr,
-				tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
-
-				interference_candidate = 1;
-				break;
-			}
-		}
-		/* don't care further if it is an interference candidate */
-		if (interference_candidate)
-			continue;
-
-		if (first_candidate == NULL) {
-			first_candidate = tmp_neigh_node;
-			tmp_neigh_node->next_bond_candidate = first_candidate;
-		} else
-			tmp_neigh_node->next_bond_candidate = last_candidate;
-
-		last_candidate = tmp_neigh_node;
-
-		candidates++;
-	}
-
-	if (candidates > 0) {
-		first_candidate->next_bond_candidate = last_candidate;
-		orig_node->bond.selected = first_candidate;
-	}
-
-	orig_node->bond.candidates = candidates;
-}
-
-void receive_bat_packet(struct ethhdr *ethhdr,
-				struct batman_packet *batman_packet,
-				unsigned char *hna_buff, int hna_buff_len,
-				struct batman_if *if_incoming)
-{
-	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct batman_if *batman_if;
-	struct orig_node *orig_neigh_node, *orig_node;
-	char has_directlink_flag;
-	char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
-	char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
-	char is_duplicate;
-	uint32_t if_incoming_seqno;
-
-	/* Silently drop when the batman packet is actually not a
-	 * correct packet.
-	 *
-	 * This might happen if a packet is padded (e.g. Ethernet has a
-	 * minimum frame length of 64 byte) and the aggregation interprets
-	 * it as an additional length.
-	 *
-	 * TODO: A more sane solution would be to have a bit in the
-	 * batman_packet to detect whether the packet is the last
-	 * packet in an aggregation.  Here we expect that the padding
-	 * is always zero (or not 0x01)
-	 */
-	if (batman_packet->packet_type != BAT_PACKET)
-		return;
-
-	/* could be changed by schedule_own_packet() */
-	if_incoming_seqno = atomic_read(&if_incoming->seqno);
-
-	has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
-	is_single_hop_neigh = (compare_orig(ethhdr->h_source,
-					    batman_packet->orig) ? 1 : 0);
-
-	bat_dbg(DBG_BATMAN, bat_priv,
-		"Received BATMAN packet via NB: %pM, IF: %s [%pM] "
-		"(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
-		"TTL %d, V %d, IDF %d)\n",
-		ethhdr->h_source, if_incoming->net_dev->name,
-		if_incoming->net_dev->dev_addr, batman_packet->orig,
-		batman_packet->prev_sender, batman_packet->seqno,
-		batman_packet->tq, batman_packet->ttl, batman_packet->version,
-		has_directlink_flag);
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if (batman_if->if_status != IF_ACTIVE)
-			continue;
-
-		if (batman_if->soft_iface != if_incoming->soft_iface)
-			continue;
-
-		if (compare_orig(ethhdr->h_source,
-				 batman_if->net_dev->dev_addr))
-			is_my_addr = 1;
-
-		if (compare_orig(batman_packet->orig,
-				 batman_if->net_dev->dev_addr))
-			is_my_orig = 1;
-
-		if (compare_orig(batman_packet->prev_sender,
-				 batman_if->net_dev->dev_addr))
-			is_my_oldorig = 1;
-
-		if (compare_orig(ethhdr->h_source, broadcast_addr))
-			is_broadcast = 1;
-	}
-	rcu_read_unlock();
-
-	if (batman_packet->version != COMPAT_VERSION) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: incompatible batman version (%i)\n",
-			batman_packet->version);
-		return;
-	}
-
-	if (is_my_addr) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: received my own broadcast (sender: %pM"
-			")\n",
-			ethhdr->h_source);
-		return;
-	}
-
-	if (is_broadcast) {
-		bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
-		"ignoring all packets with broadcast source addr (sender: %pM"
-		")\n", ethhdr->h_source);
-		return;
-	}
-
-	if (is_my_orig) {
-		TYPE_OF_WORD *word;
-		int offset;
-
-		orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
-
-		if (!orig_neigh_node)
-			return;
-
-		/* neighbor has to indicate direct link and it has to
-		 * come via the corresponding interface */
-		/* if received seqno equals last send seqno save new
-		 * seqno for bidirectional check */
-		if (has_directlink_flag &&
-		    compare_orig(if_incoming->net_dev->dev_addr,
-				 batman_packet->orig) &&
-		    (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
-			offset = if_incoming->if_num * NUM_WORDS;
-			word = &(orig_neigh_node->bcast_own[offset]);
-			bit_mark(word, 0);
-			orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
-				bit_packet_count(word);
-		}
-
-		bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
-			"originator packet from myself (via neighbor)\n");
-		return;
-	}
-
-	if (is_my_oldorig) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: ignoring all rebroadcast echos (sender: "
-			"%pM)\n", ethhdr->h_source);
-		return;
-	}
-
-	orig_node = get_orig_node(bat_priv, batman_packet->orig);
-	if (orig_node == NULL)
-		return;
-
-	is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
-
-	if (is_duplicate == -1) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: packet within seqno protection time "
-			"(sender: %pM)\n", ethhdr->h_source);
-		return;
-	}
-
-	if (batman_packet->tq == 0) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: originator packet with tq equal 0\n");
-		return;
-	}
-
-	/* avoid temporary routing loops */
-	if ((orig_node->router) &&
-	    (orig_node->router->orig_node->router) &&
-	    (compare_orig(orig_node->router->addr,
-			  batman_packet->prev_sender)) &&
-	    !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
-	    (compare_orig(orig_node->router->addr,
-			  orig_node->router->orig_node->router->addr))) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: ignoring all rebroadcast packets that "
-			"may make me loop (sender: %pM)\n", ethhdr->h_source);
-		return;
-	}
-
-	/* if sender is a direct neighbor the sender mac equals
-	 * originator mac */
-	orig_neigh_node = (is_single_hop_neigh ?
-			   orig_node :
-			   get_orig_node(bat_priv, ethhdr->h_source));
-	if (orig_neigh_node == NULL)
-		return;
-
-	/* drop packet if sender is not a direct neighbor and if we
-	 * don't route towards it */
-	if (!is_single_hop_neigh &&
-	    (orig_neigh_node->router == NULL)) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: OGM via unknown neighbor!\n");
-		return;
-	}
-
-	is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
-						batman_packet, if_incoming);
-
-	/* update ranking if it is not a duplicate or has the same
-	 * seqno and similar ttl as the non-duplicate */
-	if (is_bidirectional &&
-	    (!is_duplicate ||
-	     ((orig_node->last_real_seqno == batman_packet->seqno) &&
-	      (orig_node->last_ttl - 3 <= batman_packet->ttl))))
-		update_orig(bat_priv, orig_node, ethhdr, batman_packet,
-			    if_incoming, hna_buff, hna_buff_len, is_duplicate);
-
-	mark_bonding_address(bat_priv, orig_node,
-			     orig_neigh_node, batman_packet);
-	update_bonding_candidates(bat_priv, orig_node);
-
-	/* is single hop (direct) neighbor */
-	if (is_single_hop_neigh) {
-
-		/* mark direct link on incoming interface */
-		schedule_forward_packet(orig_node, ethhdr, batman_packet,
-					1, hna_buff_len, if_incoming);
-
-		bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
-			"rebroadcast neighbor packet with direct link flag\n");
-		return;
-	}
-
-	/* multihop originator */
-	if (!is_bidirectional) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: not received via bidirectional link\n");
-		return;
-	}
-
-	if (is_duplicate) {
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"Drop packet: duplicate packet received\n");
-		return;
-	}
-
-	bat_dbg(DBG_BATMAN, bat_priv,
-		"Forwarding packet: rebroadcast originator packet\n");
-	schedule_forward_packet(orig_node, ethhdr, batman_packet,
-				0, hna_buff_len, if_incoming);
-}
-
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-	struct ethhdr *ethhdr;
-	unsigned long flags;
-
-	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
-		return NET_RX_DROP;
-
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* packet with broadcast indication but unicast recipient */
-	if (!is_bcast(ethhdr->h_dest))
-		return NET_RX_DROP;
-
-	/* packet with broadcast sender address */
-	if (is_bcast(ethhdr->h_source))
-		return NET_RX_DROP;
-
-	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, 0) < 0)
-		return NET_RX_DROP;
-
-	/* keep skb linear */
-	if (skb_linearize(skb) < 0)
-		return NET_RX_DROP;
-
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	receive_aggr_bat_packet(ethhdr,
-				skb->data,
-				skb_headlen(skb),
-				batman_if);
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	kfree_skb(skb);
-	return NET_RX_SUCCESS;
-}
-
-static int recv_my_icmp_packet(struct bat_priv *bat_priv,
-			       struct sk_buff *skb, size_t icmp_len)
-{
-	struct orig_node *orig_node;
-	struct icmp_packet_rr *icmp_packet;
-	struct ethhdr *ethhdr;
-	struct batman_if *batman_if;
-	int ret;
-	unsigned long flags;
-	uint8_t dstaddr[ETH_ALEN];
-
-	icmp_packet = (struct icmp_packet_rr *)skb->data;
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* add data to device queue */
-	if (icmp_packet->msg_type != ECHO_REQUEST) {
-		bat_socket_receive_packet(icmp_packet, icmp_len);
-		return NET_RX_DROP;
-	}
-
-	if (!bat_priv->primary_if)
-		return NET_RX_DROP;
-
-	/* answer echo request (ping) */
-	/* get routing information */
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-						   icmp_packet->orig));
-	ret = NET_RX_DROP;
-
-	if ((orig_node != NULL) &&
-	    (orig_node->router != NULL)) {
-
-		/* don't lock while sending the packets ... we therefore
-		 * copy the required data before sending */
-		batman_if = orig_node->router->if_incoming;
-		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-		/* create a copy of the skb, if needed, to modify it. */
-		if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-			return NET_RX_DROP;
-
-		icmp_packet = (struct icmp_packet_rr *)skb->data;
-		ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-		memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-		memcpy(icmp_packet->orig,
-		       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-		icmp_packet->msg_type = ECHO_REPLY;
-		icmp_packet->ttl = TTL;
-
-		send_skb_packet(skb, batman_if, dstaddr);
-		ret = NET_RX_SUCCESS;
-
-	} else
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	return ret;
-}
-
-static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
-				  struct sk_buff *skb, size_t icmp_len)
-{
-	struct orig_node *orig_node;
-	struct icmp_packet *icmp_packet;
-	struct ethhdr *ethhdr;
-	struct batman_if *batman_if;
-	int ret;
-	unsigned long flags;
-	uint8_t dstaddr[ETH_ALEN];
-
-	icmp_packet = (struct icmp_packet *)skb->data;
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* send TTL exceeded if packet is an echo request (traceroute) */
-	if (icmp_packet->msg_type != ECHO_REQUEST) {
-		pr_debug("Warning - can't forward icmp packet from %pM to "
-			 "%pM: ttl exceeded\n", icmp_packet->orig,
-			 icmp_packet->dst);
-		return NET_RX_DROP;
-	}
-
-	if (!bat_priv->primary_if)
-		return NET_RX_DROP;
-
-	/* get routing information */
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	orig_node = ((struct orig_node *)
-		     hash_find(bat_priv->orig_hash, icmp_packet->orig));
-	ret = NET_RX_DROP;
-
-	if ((orig_node != NULL) &&
-	    (orig_node->router != NULL)) {
-
-		/* don't lock while sending the packets ... we therefore
-		 * copy the required data before sending */
-		batman_if = orig_node->router->if_incoming;
-		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-		/* create a copy of the skb, if needed, to modify it. */
-		if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-			return NET_RX_DROP;
-
-		icmp_packet = (struct icmp_packet *) skb->data;
-		ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-		memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-		memcpy(icmp_packet->orig,
-		       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-		icmp_packet->msg_type = TTL_EXCEEDED;
-		icmp_packet->ttl = TTL;
-
-		send_skb_packet(skb, batman_if, dstaddr);
-		ret = NET_RX_SUCCESS;
-
-	} else
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	return ret;
-}
-
-
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct icmp_packet_rr *icmp_packet;
-	struct ethhdr *ethhdr;
-	struct orig_node *orig_node;
-	struct batman_if *batman_if;
-	int hdr_size = sizeof(struct icmp_packet);
-	int ret;
-	unsigned long flags;
-	uint8_t dstaddr[ETH_ALEN];
-
-	/**
-	 * we truncate all incoming icmp packets if they don't match our size
-	 */
-	if (skb->len >= sizeof(struct icmp_packet_rr))
-		hdr_size = sizeof(struct icmp_packet_rr);
-
-	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb, hdr_size)))
-		return NET_RX_DROP;
-
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* packet with unicast indication but broadcast recipient */
-	if (is_bcast(ethhdr->h_dest))
-		return NET_RX_DROP;
-
-	/* packet with broadcast sender address */
-	if (is_bcast(ethhdr->h_source))
-		return NET_RX_DROP;
-
-	/* not for me */
-	if (!is_my_mac(ethhdr->h_dest))
-		return NET_RX_DROP;
-
-	icmp_packet = (struct icmp_packet_rr *)skb->data;
-
-	/* add record route information if not full */
-	if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
-	    (icmp_packet->rr_cur < BAT_RR_LEN)) {
-		memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
-			ethhdr->h_dest, ETH_ALEN);
-		icmp_packet->rr_cur++;
-	}
-
-	/* packet for me */
-	if (is_my_mac(icmp_packet->dst))
-		return recv_my_icmp_packet(bat_priv, skb, hdr_size);
-
-	/* TTL exceeded */
-	if (icmp_packet->ttl < 2)
-		return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
-
-	ret = NET_RX_DROP;
-
-	/* get routing information */
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	orig_node = ((struct orig_node *)
-		     hash_find(bat_priv->orig_hash, icmp_packet->dst));
-
-	if ((orig_node != NULL) &&
-	    (orig_node->router != NULL)) {
-
-		/* don't lock while sending the packets ... we therefore
-		 * copy the required data before sending */
-		batman_if = orig_node->router->if_incoming;
-		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-		/* create a copy of the skb, if needed, to modify it. */
-		if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-			return NET_RX_DROP;
-
-		icmp_packet = (struct icmp_packet_rr *)skb->data;
-		ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-		/* decrement ttl */
-		icmp_packet->ttl--;
-
-		/* route it */
-		send_skb_packet(skb, batman_if, dstaddr);
-		ret = NET_RX_SUCCESS;
-
-	} else
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	return ret;
-}
-
-/* find a suitable router for this originator, and use
- * bonding if possible. */
-struct neigh_node *find_router(struct bat_priv *bat_priv,
-			       struct orig_node *orig_node,
-			       struct batman_if *recv_if)
-{
-	struct orig_node *primary_orig_node;
-	struct orig_node *router_orig;
-	struct neigh_node *router, *first_candidate, *best_router;
-	static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
-	int bonding_enabled;
-
-	if (!orig_node)
-		return NULL;
-
-	if (!orig_node->router)
-		return NULL;
-
-	/* without bonding, the first node should
-	 * always choose the default router. */
-
-	bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
-
-	if ((!recv_if) && (!bonding_enabled))
-		return orig_node->router;
-
-	router_orig = orig_node->router->orig_node;
-
-	/* if we have something in the primary_addr, we can search
-	 * for a potential bonding candidate. */
-	if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
-		return orig_node->router;
-
-	/* find the orig_node which has the primary interface. might
-	 * even be the same as our router_orig in many cases */
-
-	if (memcmp(router_orig->primary_addr,
-				router_orig->orig, ETH_ALEN) == 0) {
-		primary_orig_node = router_orig;
-	} else {
-		primary_orig_node = hash_find(bat_priv->orig_hash,
-						router_orig->primary_addr);
-
-		if (!primary_orig_node)
-			return orig_node->router;
-	}
-
-	/* with less than 2 candidates, we can't do any
-	 * bonding and prefer the original router. */
-
-	if (primary_orig_node->bond.candidates < 2)
-		return orig_node->router;
-
-
-	/* all nodes between should choose a candidate which
-	 * is is not on the interface where the packet came
-	 * in. */
-	first_candidate = primary_orig_node->bond.selected;
-	router = first_candidate;
-
-	if (bonding_enabled) {
-		/* in the bonding case, send the packets in a round
-		 * robin fashion over the remaining interfaces. */
-		do {
-			/* recv_if == NULL on the first node. */
-			if (router->if_incoming != recv_if)
-				break;
-
-			router = router->next_bond_candidate;
-		} while (router != first_candidate);
-
-		primary_orig_node->bond.selected = router->next_bond_candidate;
-
-	} else {
-		/* if bonding is disabled, use the best of the
-		 * remaining candidates which are not using
-		 * this interface. */
-		best_router = first_candidate;
-
-		do {
-			/* recv_if == NULL on the first node. */
-			if ((router->if_incoming != recv_if) &&
-				(router->tq_avg > best_router->tq_avg))
-					best_router = router;
-
-			router = router->next_bond_candidate;
-		} while (router != first_candidate);
-
-		router = best_router;
-	}
-
-	return router;
-}
-
-static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
-{
-	struct ethhdr *ethhdr;
-
-	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb, hdr_size)))
-		return -1;
-
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* packet with unicast indication but broadcast recipient */
-	if (is_bcast(ethhdr->h_dest))
-		return -1;
-
-	/* packet with broadcast sender address */
-	if (is_bcast(ethhdr->h_source))
-		return -1;
-
-	/* not for me */
-	if (!is_my_mac(ethhdr->h_dest))
-		return -1;
-
-	return 0;
-}
-
-static int route_unicast_packet(struct sk_buff *skb,
-				struct batman_if *recv_if, int hdr_size)
-{
-	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct orig_node *orig_node;
-	struct neigh_node *router;
-	struct batman_if *batman_if;
-	uint8_t dstaddr[ETH_ALEN];
-	unsigned long flags;
-	struct unicast_packet *unicast_packet;
-	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	unicast_packet = (struct unicast_packet *)skb->data;
-
-	/* packet for me */
-	if (is_my_mac(unicast_packet->dest)) {
-		interface_rx(recv_if->soft_iface, skb, hdr_size);
-		return NET_RX_SUCCESS;
-	}
-
-	/* TTL exceeded */
-	if (unicast_packet->ttl < 2) {
-		pr_debug("Warning - can't forward unicast packet from %pM to "
-			 "%pM: ttl exceeded\n", ethhdr->h_source,
-			 unicast_packet->dest);
-		return NET_RX_DROP;
-	}
-
-	/* get routing information */
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	orig_node = ((struct orig_node *)
-		     hash_find(bat_priv->orig_hash, unicast_packet->dest));
-
-	router = find_router(bat_priv, orig_node, recv_if);
-
-	if (!router) {
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-		return NET_RX_DROP;
-	}
-
-	/* don't lock while sending the packets ... we therefore
-	 * copy the required data before sending */
-
-	batman_if = router->if_incoming;
-	memcpy(dstaddr, router->addr, ETH_ALEN);
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	/* create a copy of the skb, if needed, to modify it. */
-	if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
-		return NET_RX_DROP;
-
-	unicast_packet = (struct unicast_packet *)skb->data;
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* decrement ttl */
-	unicast_packet->ttl--;
-
-	/* route it */
-	send_skb_packet(skb, batman_if, dstaddr);
-
-	return NET_RX_SUCCESS;
-}
-
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
-	struct unicast_packet *unicast_packet;
-	int hdr_size = sizeof(struct unicast_packet);
-
-	if (check_unicast_packet(skb, hdr_size) < 0)
-		return NET_RX_DROP;
-
-	unicast_packet = (struct unicast_packet *)skb->data;
-
-	/* packet for me */
-	if (is_my_mac(unicast_packet->dest)) {
-		interface_rx(recv_if->soft_iface, skb, hdr_size);
-		return NET_RX_SUCCESS;
-	}
-
-	return route_unicast_packet(skb, recv_if, hdr_size);
-}
-
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct unicast_frag_packet *unicast_packet;
-	struct orig_node *orig_node;
-	struct frag_packet_list_entry *tmp_frag_entry;
-	int hdr_size = sizeof(struct unicast_frag_packet);
-	unsigned long flags;
-
-	if (check_unicast_packet(skb, hdr_size) < 0)
-		return NET_RX_DROP;
-
-	unicast_packet = (struct unicast_frag_packet *)skb->data;
-
-	/* packet for me */
-	if (is_my_mac(unicast_packet->dest)) {
-
-		spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-		orig_node = ((struct orig_node *)
-			hash_find(bat_priv->orig_hash, unicast_packet->orig));
-
-		if (!orig_node) {
-			pr_debug("couldn't find orig node for fragmentation\n");
-			spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
-					       flags);
-			return NET_RX_DROP;
-		}
-
-		orig_node->last_frag_packet = jiffies;
-
-		if (list_empty(&orig_node->frag_list) &&
-			create_frag_buffer(&orig_node->frag_list)) {
-			spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
-					       flags);
-			return NET_RX_DROP;
-		}
-
-		tmp_frag_entry =
-			search_frag_packet(&orig_node->frag_list,
-					   unicast_packet);
-
-		if (!tmp_frag_entry) {
-			create_frag_entry(&orig_node->frag_list, skb);
-			spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
-					       flags);
-			return NET_RX_SUCCESS;
-		}
-
-		skb = merge_frag_packet(&orig_node->frag_list,
-					tmp_frag_entry, skb);
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-		if (!skb)
-			return NET_RX_DROP;
-
-		interface_rx(recv_if->soft_iface, skb, hdr_size);
-		return NET_RX_SUCCESS;
-	}
-
-	return route_unicast_packet(skb, recv_if, hdr_size);
-}
-
-
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	struct orig_node *orig_node;
-	struct bcast_packet *bcast_packet;
-	struct ethhdr *ethhdr;
-	int hdr_size = sizeof(struct bcast_packet);
-	int32_t seq_diff;
-	unsigned long flags;
-
-	/* drop packet if it has not necessary minimum size */
-	if (unlikely(!pskb_may_pull(skb, hdr_size)))
-		return NET_RX_DROP;
-
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* packet with broadcast indication but unicast recipient */
-	if (!is_bcast(ethhdr->h_dest))
-		return NET_RX_DROP;
-
-	/* packet with broadcast sender address */
-	if (is_bcast(ethhdr->h_source))
-		return NET_RX_DROP;
-
-	/* ignore broadcasts sent by myself */
-	if (is_my_mac(ethhdr->h_source))
-		return NET_RX_DROP;
-
-	bcast_packet = (struct bcast_packet *)skb->data;
-
-	/* ignore broadcasts originated by myself */
-	if (is_my_mac(bcast_packet->orig))
-		return NET_RX_DROP;
-
-	if (bcast_packet->ttl < 2)
-		return NET_RX_DROP;
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	orig_node = ((struct orig_node *)
-		     hash_find(bat_priv->orig_hash, bcast_packet->orig));
-
-	if (orig_node == NULL) {
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-		return NET_RX_DROP;
-	}
-
-	/* check whether the packet is a duplicate */
-	if (get_bit_status(orig_node->bcast_bits,
-			   orig_node->last_bcast_seqno,
-			   ntohl(bcast_packet->seqno))) {
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-		return NET_RX_DROP;
-	}
-
-	seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
-
-	/* check whether the packet is old and the host just restarted. */
-	if (window_protected(bat_priv, seq_diff,
-			     &orig_node->bcast_seqno_reset)) {
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-		return NET_RX_DROP;
-	}
-
-	/* mark broadcast in flood history, update window position
-	 * if required. */
-	if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
-		orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-	/* rebroadcast packet */
-	add_bcast_packet_to_list(bat_priv, skb);
-
-	/* broadcast for me */
-	interface_rx(recv_if->soft_iface, skb, hdr_size);
-
-	return NET_RX_SUCCESS;
-}
-
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
-{
-	struct vis_packet *vis_packet;
-	struct ethhdr *ethhdr;
-	struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-	int hdr_size = sizeof(struct vis_packet);
-
-	/* keep skb linear */
-	if (skb_linearize(skb) < 0)
-		return NET_RX_DROP;
-
-	if (unlikely(!pskb_may_pull(skb, hdr_size)))
-		return NET_RX_DROP;
-
-	vis_packet = (struct vis_packet *)skb->data;
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
-
-	/* not for me */
-	if (!is_my_mac(ethhdr->h_dest))
-		return NET_RX_DROP;
-
-	/* ignore own packets */
-	if (is_my_mac(vis_packet->vis_orig))
-		return NET_RX_DROP;
-
-	if (is_my_mac(vis_packet->sender_orig))
-		return NET_RX_DROP;
-
-	switch (vis_packet->vis_type) {
-	case VIS_TYPE_SERVER_SYNC:
-		receive_server_sync_packet(bat_priv, vis_packet,
-					   skb_headlen(skb));
-		break;
-
-	case VIS_TYPE_CLIENT_UPDATE:
-		receive_client_update_packet(bat_priv, vis_packet,
-					     skb_headlen(skb));
-		break;
-
-	default:	/* ignore unknown packet */
-		break;
-	}
-
-	/* We take a copy of the data in the packet, so we should
-	   always free the skbuf. */
-	return NET_RX_DROP;
-}
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
deleted file mode 100644
index 92674c8..0000000
--- a/drivers/staging/batman-adv/routing.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_ROUTING_H_
-#define _NET_BATMAN_ADV_ROUTING_H_
-
-#include "types.h"
-
-void slide_own_bcast_window(struct batman_if *batman_if);
-void receive_bat_packet(struct ethhdr *ethhdr,
-				struct batman_packet *batman_packet,
-				unsigned char *hna_buff, int hna_buff_len,
-				struct batman_if *if_incoming);
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
-		   struct neigh_node *neigh_node, unsigned char *hna_buff,
-		   int hna_buff_len);
-int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
-struct neigh_node *find_router(struct bat_priv *bat_priv,
-		struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
-			       struct orig_node *orig_node);
-
-#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
deleted file mode 100644
index 7adf76d..0000000
--- a/drivers/staging/batman-adv/send.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "send.h"
-#include "routing.h"
-#include "translation-table.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "types.h"
-#include "vis.h"
-#include "aggregation.h"
-
-
-static void send_outstanding_bcast_packet(struct work_struct *work);
-
-/* apply hop penalty for a normal link */
-static uint8_t hop_penalty(const uint8_t tq)
-{
-	return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
-}
-
-/* when do we schedule our own packet to be sent */
-static unsigned long own_send_time(struct bat_priv *bat_priv)
-{
-	return jiffies + msecs_to_jiffies(
-		   atomic_read(&bat_priv->orig_interval) -
-		   JITTER + (random32() % 2*JITTER));
-}
-
-/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
-{
-	return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
-}
-
-/* send out an already prepared packet to the given address via the
- * specified batman interface */
-int send_skb_packet(struct sk_buff *skb,
-				struct batman_if *batman_if,
-				uint8_t *dst_addr)
-{
-	struct ethhdr *ethhdr;
-
-	if (batman_if->if_status != IF_ACTIVE)
-		goto send_skb_err;
-
-	if (unlikely(!batman_if->net_dev))
-		goto send_skb_err;
-
-	if (!(batman_if->net_dev->flags & IFF_UP)) {
-		pr_warning("Interface %s is not up - can't send packet via "
-			   "that interface!\n", batman_if->net_dev->name);
-		goto send_skb_err;
-	}
-
-	/* push to the ethernet header. */
-	if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
-		goto send_skb_err;
-
-	skb_reset_mac_header(skb);
-
-	ethhdr = (struct ethhdr *) skb_mac_header(skb);
-	memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
-	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
-
-	skb_set_network_header(skb, ETH_HLEN);
-	skb->priority = TC_PRIO_CONTROL;
-	skb->protocol = __constant_htons(ETH_P_BATMAN);
-
-	skb->dev = batman_if->net_dev;
-
-	/* dev_queue_xmit() returns a negative result on error.	 However on
-	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
-	 * (which is > 0). This will not be treated as an error. */
-
-	return dev_queue_xmit(skb);
-send_skb_err:
-	kfree_skb(skb);
-	return NET_XMIT_DROP;
-}
-
-/* Send a packet to a given interface */
-static void send_packet_to_if(struct forw_packet *forw_packet,
-			      struct batman_if *batman_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-	char *fwd_str;
-	uint8_t packet_num;
-	int16_t buff_pos;
-	struct batman_packet *batman_packet;
-	struct sk_buff *skb;
-
-	if (batman_if->if_status != IF_ACTIVE)
-		return;
-
-	packet_num = 0;
-	buff_pos = 0;
-	batman_packet = (struct batman_packet *)forw_packet->skb->data;
-
-	/* adjust all flags and log packets */
-	while (aggregated_packet(buff_pos,
-				 forw_packet->packet_len,
-				 batman_packet->num_hna)) {
-
-		/* we might have aggregated direct link packets with an
-		 * ordinary base packet */
-		if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
-		    (forw_packet->if_incoming == batman_if))
-			batman_packet->flags |= DIRECTLINK;
-		else
-			batman_packet->flags &= ~DIRECTLINK;
-
-		fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
-							    "Sending own" :
-							    "Forwarding"));
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
-			" IDF %s) on interface %s [%pM]\n",
-			fwd_str, (packet_num > 0 ? "aggregated " : ""),
-			batman_packet->orig, ntohl(batman_packet->seqno),
-			batman_packet->tq, batman_packet->ttl,
-			(batman_packet->flags & DIRECTLINK ?
-			 "on" : "off"),
-			batman_if->net_dev->name, batman_if->net_dev->dev_addr);
-
-		buff_pos += sizeof(struct batman_packet) +
-			(batman_packet->num_hna * ETH_ALEN);
-		packet_num++;
-		batman_packet = (struct batman_packet *)
-			(forw_packet->skb->data + buff_pos);
-	}
-
-	/* create clone because function is called more than once */
-	skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
-	if (skb)
-		send_skb_packet(skb, batman_if, broadcast_addr);
-}
-
-/* send a batman packet */
-static void send_packet(struct forw_packet *forw_packet)
-{
-	struct batman_if *batman_if;
-	struct net_device *soft_iface;
-	struct bat_priv *bat_priv;
-	struct batman_packet *batman_packet =
-		(struct batman_packet *)(forw_packet->skb->data);
-	unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
-	if (!forw_packet->if_incoming) {
-		pr_err("Error - can't forward packet: incoming iface not "
-		       "specified\n");
-		return;
-	}
-
-	soft_iface = forw_packet->if_incoming->soft_iface;
-	bat_priv = netdev_priv(soft_iface);
-
-	if (forw_packet->if_incoming->if_status != IF_ACTIVE)
-		return;
-
-	/* multihomed peer assumed */
-	/* non-primary OGMs are only broadcasted on their interface */
-	if ((directlink && (batman_packet->ttl == 1)) ||
-	    (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
-
-		/* FIXME: what about aggregated packets ? */
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"%s packet (originator %pM, seqno %d, TTL %d) "
-			"on interface %s [%pM]\n",
-			(forw_packet->own ? "Sending own" : "Forwarding"),
-			batman_packet->orig, ntohl(batman_packet->seqno),
-			batman_packet->ttl,
-			forw_packet->if_incoming->net_dev->name,
-			forw_packet->if_incoming->net_dev->dev_addr);
-
-		/* skb is only used once and than forw_packet is free'd */
-		send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
-				broadcast_addr);
-		forw_packet->skb = NULL;
-
-		return;
-	}
-
-	/* broadcast on every interface */
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if (batman_if->soft_iface != soft_iface)
-			continue;
-
-		send_packet_to_if(forw_packet, batman_if);
-	}
-	rcu_read_unlock();
-}
-
-static void rebuild_batman_packet(struct bat_priv *bat_priv,
-				  struct batman_if *batman_if)
-{
-	int new_len;
-	unsigned char *new_buff;
-	struct batman_packet *batman_packet;
-
-	new_len = sizeof(struct batman_packet) +
-			(bat_priv->num_local_hna * ETH_ALEN);
-	new_buff = kmalloc(new_len, GFP_ATOMIC);
-
-	/* keep old buffer if kmalloc should fail */
-	if (new_buff) {
-		memcpy(new_buff, batman_if->packet_buff,
-		       sizeof(struct batman_packet));
-		batman_packet = (struct batman_packet *)new_buff;
-
-		batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
-				new_buff + sizeof(struct batman_packet),
-				new_len - sizeof(struct batman_packet));
-
-		kfree(batman_if->packet_buff);
-		batman_if->packet_buff = new_buff;
-		batman_if->packet_len = new_len;
-	}
-}
-
-void schedule_own_packet(struct batman_if *batman_if)
-{
-	struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
-	unsigned long send_time;
-	struct batman_packet *batman_packet;
-	int vis_server;
-
-	if ((batman_if->if_status == IF_NOT_IN_USE) ||
-	    (batman_if->if_status == IF_TO_BE_REMOVED))
-		return;
-
-	vis_server = atomic_read(&bat_priv->vis_mode);
-
-	/**
-	 * the interface gets activated here to avoid race conditions between
-	 * the moment of activating the interface in
-	 * hardif_activate_interface() where the originator mac is set and
-	 * outdated packets (especially uninitialized mac addresses) in the
-	 * packet queue
-	 */
-	if (batman_if->if_status == IF_TO_BE_ACTIVATED)
-		batman_if->if_status = IF_ACTIVE;
-
-	/* if local hna has changed and interface is a primary interface */
-	if ((atomic_read(&bat_priv->hna_local_changed)) &&
-	    (batman_if == bat_priv->primary_if))
-		rebuild_batman_packet(bat_priv, batman_if);
-
-	/**
-	 * NOTE: packet_buff might just have been re-allocated in
-	 * rebuild_batman_packet()
-	 */
-	batman_packet = (struct batman_packet *)batman_if->packet_buff;
-
-	/* change sequence number to network order */
-	batman_packet->seqno =
-		htonl((uint32_t)atomic_read(&batman_if->seqno));
-
-	if (vis_server == VIS_TYPE_SERVER_SYNC)
-		batman_packet->flags |= VIS_SERVER;
-	else
-		batman_packet->flags &= ~VIS_SERVER;
-
-	atomic_inc(&batman_if->seqno);
-
-	slide_own_bcast_window(batman_if);
-	send_time = own_send_time(bat_priv);
-	add_bat_packet_to_list(bat_priv,
-			       batman_if->packet_buff,
-			       batman_if->packet_len,
-			       batman_if, 1, send_time);
-}
-
-void schedule_forward_packet(struct orig_node *orig_node,
-			     struct ethhdr *ethhdr,
-			     struct batman_packet *batman_packet,
-			     uint8_t directlink, int hna_buff_len,
-			     struct batman_if *if_incoming)
-{
-	struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	unsigned char in_tq, in_ttl, tq_avg = 0;
-	unsigned long send_time;
-
-	if (batman_packet->ttl <= 1) {
-		bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
-		return;
-	}
-
-	in_tq = batman_packet->tq;
-	in_ttl = batman_packet->ttl;
-
-	batman_packet->ttl--;
-	memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
-
-	/* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
-	 * of our best tq value */
-	if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
-
-		/* rebroadcast ogm of best ranking neighbor as is */
-		if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
-			batman_packet->tq = orig_node->router->tq_avg;
-
-			if (orig_node->router->last_ttl)
-				batman_packet->ttl = orig_node->router->last_ttl
-							- 1;
-		}
-
-		tq_avg = orig_node->router->tq_avg;
-	}
-
-	/* apply hop penalty */
-	batman_packet->tq = hop_penalty(batman_packet->tq);
-
-	bat_dbg(DBG_BATMAN, bat_priv,
-		"Forwarding packet: tq_orig: %i, tq_avg: %i, "
-		"tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
-		in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
-		batman_packet->ttl);
-
-	batman_packet->seqno = htonl(batman_packet->seqno);
-
-	/* switch of primaries first hop flag when forwarding */
-	batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
-	if (directlink)
-		batman_packet->flags |= DIRECTLINK;
-	else
-		batman_packet->flags &= ~DIRECTLINK;
-
-	send_time = forward_send_time(bat_priv);
-	add_bat_packet_to_list(bat_priv,
-			       (unsigned char *)batman_packet,
-			       sizeof(struct batman_packet) + hna_buff_len,
-			       if_incoming, 0, send_time);
-}
-
-static void forw_packet_free(struct forw_packet *forw_packet)
-{
-	if (forw_packet->skb)
-		kfree_skb(forw_packet->skb);
-	kfree(forw_packet);
-}
-
-static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
-				      struct forw_packet *forw_packet,
-				      unsigned long send_time)
-{
-	unsigned long flags;
-	INIT_HLIST_NODE(&forw_packet->list);
-
-	/* add new packet to packet list */
-	spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
-	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
-	spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
-	/* start timer for this packet */
-	INIT_DELAYED_WORK(&forw_packet->delayed_work,
-			  send_outstanding_bcast_packet);
-	queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
-			   send_time);
-}
-
-#define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
-/* add a broadcast packet to the queue and setup timers. broadcast packets
- * are sent multiple times to increase probability for beeing received.
- *
- * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
- * errors.
- *
- * The skb is not consumed, so the caller should make sure that the
- * skb is freed. */
-int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
-{
-	struct forw_packet *forw_packet;
-	struct bcast_packet *bcast_packet;
-
-	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
-		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
-		goto out;
-	}
-
-	if (!bat_priv->primary_if)
-		goto out;
-
-	forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
-
-	if (!forw_packet)
-		goto out_and_inc;
-
-	skb = skb_copy(skb, GFP_ATOMIC);
-	if (!skb)
-		goto packet_free;
-
-	/* as we have a copy now, it is safe to decrease the TTL */
-	bcast_packet = (struct bcast_packet *)skb->data;
-	bcast_packet->ttl--;
-
-	skb_reset_mac_header(skb);
-
-	forw_packet->skb = skb;
-	forw_packet->if_incoming = bat_priv->primary_if;
-
-	/* how often did we send the bcast packet ? */
-	forw_packet->num_packets = 0;
-
-	_add_bcast_packet_to_list(bat_priv, forw_packet, 1);
-	return NETDEV_TX_OK;
-
-packet_free:
-	kfree(forw_packet);
-out_and_inc:
-	atomic_inc(&bat_priv->bcast_queue_left);
-out:
-	return NETDEV_TX_BUSY;
-}
-
-static void send_outstanding_bcast_packet(struct work_struct *work)
-{
-	struct batman_if *batman_if;
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
-	struct forw_packet *forw_packet =
-		container_of(delayed_work, struct forw_packet, delayed_work);
-	unsigned long flags;
-	struct sk_buff *skb1;
-	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
-	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-
-	spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
-	hlist_del(&forw_packet->list);
-	spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
-	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
-		goto out;
-
-	/* rebroadcast packet */
-	rcu_read_lock();
-	list_for_each_entry_rcu(batman_if, &if_list, list) {
-		if (batman_if->soft_iface != soft_iface)
-			continue;
-
-		/* send a copy of the saved skb */
-		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
-		if (skb1)
-			send_skb_packet(skb1, batman_if, broadcast_addr);
-	}
-	rcu_read_unlock();
-
-	forw_packet->num_packets++;
-
-	/* if we still have some more bcasts to send */
-	if (forw_packet->num_packets < 3) {
-		_add_bcast_packet_to_list(bat_priv, forw_packet,
-					  ((5 * HZ) / 1000));
-		return;
-	}
-
-out:
-	forw_packet_free(forw_packet);
-	atomic_inc(&bat_priv->bcast_queue_left);
-}
-
-void send_outstanding_bat_packet(struct work_struct *work)
-{
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
-	struct forw_packet *forw_packet =
-		container_of(delayed_work, struct forw_packet, delayed_work);
-	unsigned long flags;
-	struct bat_priv *bat_priv;
-
-	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
-	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
-	hlist_del(&forw_packet->list);
-	spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
-	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
-		goto out;
-
-	send_packet(forw_packet);
-
-	/**
-	 * we have to have at least one packet in the queue
-	 * to determine the queues wake up time unless we are
-	 * shutting down
-	 */
-	if (forw_packet->own)
-		schedule_own_packet(forw_packet->if_incoming);
-
-out:
-	/* don't count own packet */
-	if (!forw_packet->own)
-		atomic_inc(&bat_priv->batman_queue_left);
-
-	forw_packet_free(forw_packet);
-}
-
-void purge_outstanding_packets(struct bat_priv *bat_priv,
-			       struct batman_if *batman_if)
-{
-	struct forw_packet *forw_packet;
-	struct hlist_node *tmp_node, *safe_tmp_node;
-	unsigned long flags;
-
-	if (batman_if)
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"purge_outstanding_packets(): %s\n",
-			batman_if->net_dev->name);
-	else
-		bat_dbg(DBG_BATMAN, bat_priv,
-			"purge_outstanding_packets()\n");
-
-	/* free bcast list */
-	spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
-	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
-				  &bat_priv->forw_bcast_list, list) {
-
-		/**
-		 * if purge_outstanding_packets() was called with an argmument
-		 * we delete only packets belonging to the given interface
-		 */
-		if ((batman_if) &&
-		    (forw_packet->if_incoming != batman_if))
-			continue;
-
-		spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
-		/**
-		 * send_outstanding_bcast_packet() will lock the list to
-		 * delete the item from the list
-		 */
-		cancel_delayed_work_sync(&forw_packet->delayed_work);
-		spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
-	}
-	spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
-
-	/* free batman packet list */
-	spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
-	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
-				  &bat_priv->forw_bat_list, list) {
-
-		/**
-		 * if purge_outstanding_packets() was called with an argmument
-		 * we delete only packets belonging to the given interface
-		 */
-		if ((batman_if) &&
-		    (forw_packet->if_incoming != batman_if))
-			continue;
-
-		spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-
-		/**
-		 * send_outstanding_bat_packet() will lock the list to
-		 * delete the item from the list
-		 */
-		cancel_delayed_work_sync(&forw_packet->delayed_work);
-		spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
-	}
-	spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
-}
diff --git a/drivers/staging/batman-adv/send.h b/drivers/staging/batman-adv/send.h
deleted file mode 100644
index c4cefa8..0000000
--- a/drivers/staging/batman-adv/send.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_SEND_H_
-#define _NET_BATMAN_ADV_SEND_H_
-
-#include "types.h"
-
-int send_skb_packet(struct sk_buff *skb,
-				struct batman_if *batman_if,
-				uint8_t *dst_addr);
-void schedule_own_packet(struct batman_if *batman_if);
-void schedule_forward_packet(struct orig_node *orig_node,
-			     struct ethhdr *ethhdr,
-			     struct batman_packet *batman_packet,
-			     uint8_t directlink, int hna_buff_len,
-			     struct batman_if *if_outgoing);
-int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
-void send_outstanding_bat_packet(struct work_struct *work);
-void purge_outstanding_packets(struct bat_priv *bat_priv,
-			       struct batman_if *batman_if);
-
-#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
deleted file mode 100644
index 0e99618..0000000
--- a/drivers/staging/batman-adv/soft-interface.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "routing.h"
-#include "send.h"
-#include "bat_debugfs.h"
-#include "translation-table.h"
-#include "types.h"
-#include "hash.h"
-#include "send.h"
-#include "bat_sysfs.h"
-#include <linux/slab.h>
-#include <linux/ethtool.h>
-#include <linux/etherdevice.h>
-#include "unicast.h"
-
-
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static void bat_get_drvinfo(struct net_device *dev,
-			    struct ethtool_drvinfo *info);
-static u32 bat_get_msglevel(struct net_device *dev);
-static void bat_set_msglevel(struct net_device *dev, u32 value);
-static u32 bat_get_link(struct net_device *dev);
-static u32 bat_get_rx_csum(struct net_device *dev);
-static int bat_set_rx_csum(struct net_device *dev, u32 data);
-
-static const struct ethtool_ops bat_ethtool_ops = {
-	.get_settings = bat_get_settings,
-	.get_drvinfo = bat_get_drvinfo,
-	.get_msglevel = bat_get_msglevel,
-	.set_msglevel = bat_set_msglevel,
-	.get_link = bat_get_link,
-	.get_rx_csum = bat_get_rx_csum,
-	.set_rx_csum = bat_set_rx_csum
-};
-
-int my_skb_head_push(struct sk_buff *skb, unsigned int len)
-{
-	int result;
-
-	/**
-	 * TODO: We must check if we can release all references to non-payload
-	 * data using skb_header_release in our skbs to allow skb_cow_header to
-	 * work optimally. This means that those skbs are not allowed to read
-	 * or write any data which is before the current position of skb->data
-	 * after that call and thus allow other skbs with the same data buffer
-	 * to write freely in that area.
-	 */
-	result = skb_cow_head(skb, len);
-	if (result < 0)
-		return result;
-
-	skb_push(skb, len);
-	return 0;
-}
-
-static int interface_open(struct net_device *dev)
-{
-	netif_start_queue(dev);
-	return 0;
-}
-
-static int interface_release(struct net_device *dev)
-{
-	netif_stop_queue(dev);
-	return 0;
-}
-
-static struct net_device_stats *interface_stats(struct net_device *dev)
-{
-	struct bat_priv *bat_priv = netdev_priv(dev);
-	return &bat_priv->stats;
-}
-
-static int interface_set_mac_addr(struct net_device *dev, void *p)
-{
-	struct bat_priv *bat_priv = netdev_priv(dev);
-	struct sockaddr *addr = p;
-
-	if (!is_valid_ether_addr(addr->sa_data))
-		return -EADDRNOTAVAIL;
-
-	/* only modify hna-table if it has been initialised before */
-	if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
-		hna_local_remove(bat_priv, dev->dev_addr,
-				 "mac address changed");
-		hna_local_add(dev, addr->sa_data);
-	}
-
-	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-
-	return 0;
-}
-
-static int interface_change_mtu(struct net_device *dev, int new_mtu)
-{
-	/* check ranges */
-	if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev)))
-		return -EINVAL;
-
-	dev->mtu = new_mtu;
-
-	return 0;
-}
-
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
-{
-	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct bcast_packet *bcast_packet;
-	int data_len = skb->len, ret;
-
-	if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
-		goto dropped;
-
-	soft_iface->trans_start = jiffies;
-
-	/* TODO: check this for locks */
-	hna_local_add(soft_iface, ethhdr->h_source);
-
-	/* ethernet packet should be broadcasted */
-	if (is_bcast(ethhdr->h_dest) || is_mcast(ethhdr->h_dest)) {
-		if (!bat_priv->primary_if)
-			goto dropped;
-
-		if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
-			goto dropped;
-
-		bcast_packet = (struct bcast_packet *)skb->data;
-		bcast_packet->version = COMPAT_VERSION;
-		bcast_packet->ttl = TTL;
-
-		/* batman packet type: broadcast */
-		bcast_packet->packet_type = BAT_BCAST;
-
-		/* hw address of first interface is the orig mac because only
-		 * this mac is known throughout the mesh */
-		memcpy(bcast_packet->orig,
-		       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-
-		/* set broadcast sequence number */
-		bcast_packet->seqno =
-			htonl(atomic_inc_return(&bat_priv->bcast_seqno));
-
-		add_bcast_packet_to_list(bat_priv, skb);
-
-		/* a copy is stored in the bcast list, therefore removing
-		 * the original skb. */
-		kfree_skb(skb);
-
-	/* unicast packet */
-	} else {
-		ret = unicast_send_skb(skb, bat_priv);
-		if (ret != 0)
-			goto dropped_freed;
-	}
-
-	bat_priv->stats.tx_packets++;
-	bat_priv->stats.tx_bytes += data_len;
-	goto end;
-
-dropped:
-	kfree_skb(skb);
-dropped_freed:
-	bat_priv->stats.tx_dropped++;
-end:
-	return NETDEV_TX_OK;
-}
-
-void interface_rx(struct net_device *soft_iface,
-		  struct sk_buff *skb, int hdr_size)
-{
-	struct bat_priv *priv = netdev_priv(soft_iface);
-
-	/* check if enough space is available for pulling, and pull */
-	if (!pskb_may_pull(skb, hdr_size))
-		goto dropped;
-
-	skb_pull_rcsum(skb, hdr_size);
-/*	skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
-
-	/* skb->dev & skb->pkt_type are set here */
-	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
-		goto dropped;
-	skb->protocol = eth_type_trans(skb, soft_iface);
-
-	/* should not be neccesary anymore as we use skb_pull_rcsum()
-	 * TODO: please verify this and remove this TODO
-	 * -- Dec 21st 2009, Simon Wunderlich */
-
-/*	skb->ip_summed = CHECKSUM_UNNECESSARY;*/
-
-	priv->stats.rx_packets++;
-	priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
-
-	soft_iface->last_rx = jiffies;
-
-	netif_rx(skb);
-	return;
-
-dropped:
-	kfree_skb(skb);
-	return;
-}
-
-#ifdef HAVE_NET_DEVICE_OPS
-static const struct net_device_ops bat_netdev_ops = {
-	.ndo_open = interface_open,
-	.ndo_stop = interface_release,
-	.ndo_get_stats = interface_stats,
-	.ndo_set_mac_address = interface_set_mac_addr,
-	.ndo_change_mtu = interface_change_mtu,
-	.ndo_start_xmit = interface_tx,
-	.ndo_validate_addr = eth_validate_addr
-};
-#endif
-
-static void interface_setup(struct net_device *dev)
-{
-	struct bat_priv *priv = netdev_priv(dev);
-	char dev_addr[ETH_ALEN];
-
-	ether_setup(dev);
-
-#ifdef HAVE_NET_DEVICE_OPS
-	dev->netdev_ops = &bat_netdev_ops;
-#else
-	dev->open = interface_open;
-	dev->stop = interface_release;
-	dev->get_stats = interface_stats;
-	dev->set_mac_address = interface_set_mac_addr;
-	dev->change_mtu = interface_change_mtu;
-	dev->hard_start_xmit = interface_tx;
-#endif
-	dev->destructor = free_netdev;
-
-	/**
-	 * can't call min_mtu, because the needed variables
-	 * have not been initialized yet
-	 */
-	dev->mtu = ETH_DATA_LEN;
-	dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the
-						* skbuff for our header */
-
-	/* generate random address */
-	random_ether_addr(dev_addr);
-	memcpy(dev->dev_addr, dev_addr, ETH_ALEN);
-
-	SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
-
-	memset(priv, 0, sizeof(struct bat_priv));
-}
-
-struct net_device *softif_create(char *name)
-{
-	struct net_device *soft_iface;
-	struct bat_priv *bat_priv;
-	int ret;
-
-	soft_iface = alloc_netdev(sizeof(struct bat_priv) , name,
-				   interface_setup);
-
-	if (!soft_iface) {
-		pr_err("Unable to allocate the batman interface: %s\n", name);
-		goto out;
-	}
-
-	ret = register_netdev(soft_iface);
-	if (ret < 0) {
-		pr_err("Unable to register the batman interface '%s': %i\n",
-		       name, ret);
-		goto free_soft_iface;
-	}
-
-	bat_priv = netdev_priv(soft_iface);
-
-	atomic_set(&bat_priv->aggregation_enabled, 1);
-	atomic_set(&bat_priv->bonding_enabled, 0);
-	atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
-	atomic_set(&bat_priv->orig_interval, 1000);
-	atomic_set(&bat_priv->log_level, 0);
-	atomic_set(&bat_priv->frag_enabled, 1);
-	atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
-	atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
-
-	atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
-	atomic_set(&bat_priv->bcast_seqno, 1);
-	atomic_set(&bat_priv->hna_local_changed, 0);
-
-	bat_priv->primary_if = NULL;
-	bat_priv->num_ifaces = 0;
-
-	ret = sysfs_add_meshif(soft_iface);
-	if (ret < 0)
-		goto unreg_soft_iface;
-
-	ret = debugfs_add_meshif(soft_iface);
-	if (ret < 0)
-		goto unreg_sysfs;
-
-	ret = mesh_init(soft_iface);
-	if (ret < 0)
-		goto unreg_debugfs;
-
-	return soft_iface;
-
-unreg_debugfs:
-	debugfs_del_meshif(soft_iface);
-unreg_sysfs:
-	sysfs_del_meshif(soft_iface);
-unreg_soft_iface:
-	unregister_netdev(soft_iface);
-	return NULL;
-
-free_soft_iface:
-	free_netdev(soft_iface);
-out:
-	return NULL;
-}
-
-void softif_destroy(struct net_device *soft_iface)
-{
-	debugfs_del_meshif(soft_iface);
-	sysfs_del_meshif(soft_iface);
-	mesh_free(soft_iface);
-	unregister_netdevice(soft_iface);
-}
-
-/* ethtool */
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	cmd->supported = 0;
-	cmd->advertising = 0;
-	cmd->speed = SPEED_10;
-	cmd->duplex = DUPLEX_FULL;
-	cmd->port = PORT_TP;
-	cmd->phy_address = 0;
-	cmd->transceiver = XCVR_INTERNAL;
-	cmd->autoneg = AUTONEG_DISABLE;
-	cmd->maxtxpkt = 0;
-	cmd->maxrxpkt = 0;
-
-	return 0;
-}
-
-static void bat_get_drvinfo(struct net_device *dev,
-			    struct ethtool_drvinfo *info)
-{
-	strcpy(info->driver, "B.A.T.M.A.N. advanced");
-	strcpy(info->version, SOURCE_VERSION);
-	strcpy(info->fw_version, "N/A");
-	strcpy(info->bus_info, "batman");
-}
-
-static u32 bat_get_msglevel(struct net_device *dev)
-{
-	return -EOPNOTSUPP;
-}
-
-static void bat_set_msglevel(struct net_device *dev, u32 value)
-{
-}
-
-static u32 bat_get_link(struct net_device *dev)
-{
-	return 1;
-}
-
-static u32 bat_get_rx_csum(struct net_device *dev)
-{
-	return 0;
-}
-
-static int bat_set_rx_csum(struct net_device *dev, u32 data)
-{
-	return -EOPNOTSUPP;
-}
diff --git a/drivers/staging/batman-adv/soft-interface.h b/drivers/staging/batman-adv/soft-interface.h
deleted file mode 100644
index 843a7ec..0000000
--- a/drivers/staging/batman-adv/soft-interface.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-
-int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
-void interface_rx(struct net_device *soft_iface,
-		  struct sk_buff *skb, int hdr_size);
-struct net_device *softif_create(char *name);
-void softif_destroy(struct net_device *soft_iface);
-
-#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/sysfs-class-net-batman-adv b/drivers/staging/batman-adv/sysfs-class-net-batman-adv
deleted file mode 100644
index 38dd762..0000000
--- a/drivers/staging/batman-adv/sysfs-class-net-batman-adv
+++ /dev/null
@@ -1,14 +0,0 @@
-
-What:           /sys/class/net/<iface>/batman-adv/mesh_iface
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                The /sys/class/net/<iface>/batman-adv/mesh_iface file
-                displays the batman mesh interface this <iface>
-                currently is associated with.
-
-What:           /sys/class/net/<iface>/batman-adv/iface_status
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Indicates the status of <iface> as it is seen by batman.
diff --git a/drivers/staging/batman-adv/sysfs-class-net-mesh b/drivers/staging/batman-adv/sysfs-class-net-mesh
deleted file mode 100644
index b4cdb60..0000000
--- a/drivers/staging/batman-adv/sysfs-class-net-mesh
+++ /dev/null
@@ -1,41 +0,0 @@
-
-What:           /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Indicates whether the batman protocol messages of the
-                mesh <mesh_iface> shall be aggregated or not.
-
-What:           /sys/class/net/<mesh_iface>/mesh/bonding
-Date:           June 2010
-Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-Description:
-                Indicates whether the data traffic going through the
-                mesh will be sent using multiple interfaces at the
-                same time (if available).
-
-What:           /sys/class/net/<mesh_iface>/mesh/fragmentation
-Date:           October 2010
-Contact:        Andreas Langer <an.langer@gmx.de>
-Description:
-                Indicates whether the data traffic going through the
-                mesh will be fragmented or silently discarded if the
-                packet size exceeds the outgoing interface MTU.
-
-What:           /sys/class/net/<mesh_iface>/mesh/orig_interval
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Defines the interval in milliseconds in which batman
-                sends its protocol messages.
-
-What:           /sys/class/net/<mesh_iface>/mesh/vis_mode
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Each batman node only maintains information about its
-                own local neighborhood, therefore generating graphs
-                showing the topology of the entire mesh is not easily
-                feasible without having a central instance to collect
-                the local topologies from all nodes. This file allows
-                to activate the collecting (server) mode.
diff --git a/drivers/staging/batman-adv/translation-table.c b/drivers/staging/batman-adv/translation-table.c
deleted file mode 100644
index 681ccbd..0000000
--- a/drivers/staging/batman-adv/translation-table.c
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "translation-table.h"
-#include "soft-interface.h"
-#include "types.h"
-#include "hash.h"
-
-static void hna_local_purge(struct work_struct *work);
-static void _hna_global_del_orig(struct bat_priv *bat_priv,
-				 struct hna_global_entry *hna_global_entry,
-				 char *message);
-
-static void hna_local_start_timer(struct bat_priv *bat_priv)
-{
-	INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
-	queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
-}
-
-int hna_local_init(struct bat_priv *bat_priv)
-{
-	if (bat_priv->hna_local_hash)
-		return 1;
-
-	bat_priv->hna_local_hash = hash_new(128, compare_orig, choose_orig);
-
-	if (!bat_priv->hna_local_hash)
-		return 0;
-
-	atomic_set(&bat_priv->hna_local_changed, 0);
-	hna_local_start_timer(bat_priv);
-
-	return 1;
-}
-
-void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
-{
-	struct bat_priv *bat_priv = netdev_priv(soft_iface);
-	struct hna_local_entry *hna_local_entry;
-	struct hna_global_entry *hna_global_entry;
-	struct hashtable_t *swaphash;
-	unsigned long flags;
-	int required_bytes;
-
-	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-	hna_local_entry =
-		((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
-						     addr));
-	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
-	if (hna_local_entry) {
-		hna_local_entry->last_seen = jiffies;
-		return;
-	}
-
-	/* only announce as many hosts as possible in the batman-packet and
-	   space in batman_packet->num_hna That also should give a limit to
-	   MAC-flooding. */
-	required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
-	required_bytes += BAT_PACKET_LEN;
-
-	if ((required_bytes > ETH_DATA_LEN) ||
-	    (atomic_read(&bat_priv->aggregation_enabled) &&
-	     required_bytes > MAX_AGGREGATION_BYTES) ||
-	    (bat_priv->num_local_hna + 1 > 255)) {
-		bat_dbg(DBG_ROUTES, bat_priv,
-			"Can't add new local hna entry (%pM): "
-			"number of local hna entries exceeds packet size\n",
-			addr);
-		return;
-	}
-
-	bat_dbg(DBG_ROUTES, bat_priv,
-		"Creating new local hna entry: %pM\n", addr);
-
-	hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
-	if (!hna_local_entry)
-		return;
-
-	memcpy(hna_local_entry->addr, addr, ETH_ALEN);
-	hna_local_entry->last_seen = jiffies;
-
-	/* the batman interface mac address should never be purged */
-	if (compare_orig(addr, soft_iface->dev_addr))
-		hna_local_entry->never_purge = 1;
-	else
-		hna_local_entry->never_purge = 0;
-
-	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
-	hash_add(bat_priv->hna_local_hash, hna_local_entry);
-	bat_priv->num_local_hna++;
-	atomic_set(&bat_priv->hna_local_changed, 1);
-
-	if (bat_priv->hna_local_hash->elements * 4 >
-					bat_priv->hna_local_hash->size) {
-		swaphash = hash_resize(bat_priv->hna_local_hash,
-				       bat_priv->hna_local_hash->size * 2);
-
-		if (!swaphash)
-			pr_err("Couldn't resize local hna hash table\n");
-		else
-			bat_priv->hna_local_hash = swaphash;
-	}
-
-	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
-	/* remove address from global hash if present */
-	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
-	hna_global_entry = ((struct hna_global_entry *)
-				hash_find(bat_priv->hna_global_hash, addr));
-
-	if (hna_global_entry)
-		_hna_global_del_orig(bat_priv, hna_global_entry,
-				     "local hna received");
-
-	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-}
-
-int hna_local_fill_buffer(struct bat_priv *bat_priv,
-			  unsigned char *buff, int buff_len)
-{
-	struct hna_local_entry *hna_local_entry;
-	HASHIT(hashit);
-	int i = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
-	while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
-
-		if (buff_len < (i + 1) * ETH_ALEN)
-			break;
-
-		hna_local_entry = hashit.bucket->data;
-		memcpy(buff + (i * ETH_ALEN), hna_local_entry->addr, ETH_ALEN);
-
-		i++;
-	}
-
-	/* if we did not get all new local hnas see you next time  ;-) */
-	if (i == bat_priv->num_local_hna)
-		atomic_set(&bat_priv->hna_local_changed, 0);
-
-	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-	return i;
-}
-
-int hna_local_seq_print_text(struct seq_file *seq, void *offset)
-{
-	struct net_device *net_dev = (struct net_device *)seq->private;
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	struct hna_local_entry *hna_local_entry;
-	HASHIT(hashit);
-	HASHIT(hashit_count);
-	unsigned long flags;
-	size_t buf_size, pos;
-	char *buff;
-
-	if (!bat_priv->primary_if) {
-		return seq_printf(seq, "BATMAN mesh %s disabled - "
-			       "please specify interfaces to enable it\n",
-			       net_dev->name);
-	}
-
-	seq_printf(seq, "Locally retrieved addresses (from %s) "
-		   "announced via HNA:\n",
-		   net_dev->name);
-
-	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
-	buf_size = 1;
-	/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
-	while (hash_iterate(bat_priv->hna_local_hash, &hashit_count))
-		buf_size += 21;
-
-	buff = kmalloc(buf_size, GFP_ATOMIC);
-	if (!buff) {
-		spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-		return -ENOMEM;
-	}
-	buff[0] = '\0';
-	pos = 0;
-
-	while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
-		hna_local_entry = hashit.bucket->data;
-
-		pos += snprintf(buff + pos, 22, " * %pM\n",
-				hna_local_entry->addr);
-	}
-
-	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
-	seq_printf(seq, "%s", buff);
-	kfree(buff);
-	return 0;
-}
-
-static void _hna_local_del(void *data, void *arg)
-{
-	struct bat_priv *bat_priv = (struct bat_priv *)arg;
-
-	kfree(data);
-	bat_priv->num_local_hna--;
-	atomic_set(&bat_priv->hna_local_changed, 1);
-}
-
-static void hna_local_del(struct bat_priv *bat_priv,
-			  struct hna_local_entry *hna_local_entry,
-			  char *message)
-{
-	bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
-		hna_local_entry->addr, message);
-
-	hash_remove(bat_priv->hna_local_hash, hna_local_entry->addr);
-	_hna_local_del(hna_local_entry, bat_priv);
-}
-
-void hna_local_remove(struct bat_priv *bat_priv,
-		      uint8_t *addr, char *message)
-{
-	struct hna_local_entry *hna_local_entry;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
-	hna_local_entry = (struct hna_local_entry *)
-		hash_find(bat_priv->hna_local_hash, addr);
-	if (hna_local_entry)
-		hna_local_del(bat_priv, hna_local_entry, message);
-
-	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-}
-
-static void hna_local_purge(struct work_struct *work)
-{
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
-	struct bat_priv *bat_priv =
-		container_of(delayed_work, struct bat_priv, hna_work);
-	struct hna_local_entry *hna_local_entry;
-	HASHIT(hashit);
-	unsigned long flags;
-	unsigned long timeout;
-
-	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
-	while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
-		hna_local_entry = hashit.bucket->data;
-
-		timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
-
-		if ((!hna_local_entry->never_purge) &&
-		    time_after(jiffies, timeout))
-			hna_local_del(bat_priv, hna_local_entry,
-				      "address timed out");
-	}
-
-	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-	hna_local_start_timer(bat_priv);
-}
-
-void hna_local_free(struct bat_priv *bat_priv)
-{
-	if (!bat_priv->hna_local_hash)
-		return;
-
-	cancel_delayed_work_sync(&bat_priv->hna_work);
-	hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
-	bat_priv->hna_local_hash = NULL;
-}
-
-int hna_global_init(struct bat_priv *bat_priv)
-{
-	if (bat_priv->hna_global_hash)
-		return 1;
-
-	bat_priv->hna_global_hash = hash_new(128, compare_orig, choose_orig);
-
-	if (!bat_priv->hna_global_hash)
-		return 0;
-
-	return 1;
-}
-
-void hna_global_add_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node,
-			 unsigned char *hna_buff, int hna_buff_len)
-{
-	struct hna_global_entry *hna_global_entry;
-	struct hna_local_entry *hna_local_entry;
-	struct hashtable_t *swaphash;
-	int hna_buff_count = 0;
-	unsigned long flags;
-	unsigned char *hna_ptr;
-
-	while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
-		spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
-		hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
-		hna_global_entry = (struct hna_global_entry *)
-			hash_find(bat_priv->hna_global_hash, hna_ptr);
-
-		if (!hna_global_entry) {
-			spin_unlock_irqrestore(&bat_priv->hna_ghash_lock,
-					       flags);
-
-			hna_global_entry =
-				kmalloc(sizeof(struct hna_global_entry),
-					GFP_ATOMIC);
-
-			if (!hna_global_entry)
-				break;
-
-			memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
-
-			bat_dbg(DBG_ROUTES, bat_priv,
-				"Creating new global hna entry: "
-				"%pM (via %pM)\n",
-				hna_global_entry->addr, orig_node->orig);
-
-			spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-			hash_add(bat_priv->hna_global_hash, hna_global_entry);
-
-		}
-
-		hna_global_entry->orig_node = orig_node;
-		spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
-		/* remove address from local hash if present */
-		spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-
-		hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
-		hna_local_entry = (struct hna_local_entry *)
-			hash_find(bat_priv->hna_local_hash, hna_ptr);
-
-		if (hna_local_entry)
-			hna_local_del(bat_priv, hna_local_entry,
-				      "global hna received");
-
-		spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-
-		hna_buff_count++;
-	}
-
-	/* initialize, and overwrite if malloc succeeds */
-	orig_node->hna_buff = NULL;
-	orig_node->hna_buff_len = 0;
-
-	if (hna_buff_len > 0) {
-		orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
-		if (orig_node->hna_buff) {
-			memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
-			orig_node->hna_buff_len = hna_buff_len;
-		}
-	}
-
-	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
-	if (bat_priv->hna_global_hash->elements * 4 >
-					bat_priv->hna_global_hash->size) {
-		swaphash = hash_resize(bat_priv->hna_global_hash,
-				       bat_priv->hna_global_hash->size * 2);
-
-		if (!swaphash)
-			pr_err("Couldn't resize global hna hash table\n");
-		else
-			bat_priv->hna_global_hash = swaphash;
-	}
-
-	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-}
-
-int hna_global_seq_print_text(struct seq_file *seq, void *offset)
-{
-	struct net_device *net_dev = (struct net_device *)seq->private;
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	struct hna_global_entry *hna_global_entry;
-	HASHIT(hashit);
-	HASHIT(hashit_count);
-	unsigned long flags;
-	size_t buf_size, pos;
-	char *buff;
-
-	if (!bat_priv->primary_if) {
-		return seq_printf(seq, "BATMAN mesh %s disabled - "
-				  "please specify interfaces to enable it\n",
-				  net_dev->name);
-	}
-
-	seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
-		   net_dev->name);
-
-	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
-	buf_size = 1;
-	/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
-	while (hash_iterate(bat_priv->hna_global_hash, &hashit_count))
-		buf_size += 43;
-
-	buff = kmalloc(buf_size, GFP_ATOMIC);
-	if (!buff) {
-		spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-		return -ENOMEM;
-	}
-	buff[0] = '\0';
-	pos = 0;
-
-	while (hash_iterate(bat_priv->hna_global_hash, &hashit)) {
-		hna_global_entry = hashit.bucket->data;
-
-		pos += snprintf(buff + pos, 44,
-				" * %pM via %pM\n", hna_global_entry->addr,
-				hna_global_entry->orig_node->orig);
-	}
-
-	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
-	seq_printf(seq, "%s", buff);
-	kfree(buff);
-	return 0;
-}
-
-static void _hna_global_del_orig(struct bat_priv *bat_priv,
-				 struct hna_global_entry *hna_global_entry,
-				 char *message)
-{
-	bat_dbg(DBG_ROUTES, bat_priv,
-		"Deleting global hna entry %pM (via %pM): %s\n",
-		hna_global_entry->addr, hna_global_entry->orig_node->orig,
-		message);
-
-	hash_remove(bat_priv->hna_global_hash, hna_global_entry->addr);
-	kfree(hna_global_entry);
-}
-
-void hna_global_del_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node, char *message)
-{
-	struct hna_global_entry *hna_global_entry;
-	int hna_buff_count = 0;
-	unsigned long flags;
-	unsigned char *hna_ptr;
-
-	if (orig_node->hna_buff_len == 0)
-		return;
-
-	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-
-	while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
-		hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
-		hna_global_entry = (struct hna_global_entry *)
-			hash_find(bat_priv->hna_global_hash, hna_ptr);
-
-		if ((hna_global_entry) &&
-		    (hna_global_entry->orig_node == orig_node))
-			_hna_global_del_orig(bat_priv, hna_global_entry,
-					     message);
-
-		hna_buff_count++;
-	}
-
-	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
-	orig_node->hna_buff_len = 0;
-	kfree(orig_node->hna_buff);
-	orig_node->hna_buff = NULL;
-}
-
-static void hna_global_del(void *data, void *arg)
-{
-	kfree(data);
-}
-
-void hna_global_free(struct bat_priv *bat_priv)
-{
-	if (!bat_priv->hna_global_hash)
-		return;
-
-	hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
-	bat_priv->hna_global_hash = NULL;
-}
-
-struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
-{
-	struct hna_global_entry *hna_global_entry;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
-	hna_global_entry = (struct hna_global_entry *)
-				hash_find(bat_priv->hna_global_hash, addr);
-	spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
-
-	if (!hna_global_entry)
-		return NULL;
-
-	return hna_global_entry->orig_node;
-}
diff --git a/drivers/staging/batman-adv/translation-table.h b/drivers/staging/batman-adv/translation-table.h
deleted file mode 100644
index 10c4c5c..0000000
--- a/drivers/staging/batman-adv/translation-table.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-
-#include "types.h"
-
-int hna_local_init(struct bat_priv *bat_priv);
-void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
-void hna_local_remove(struct bat_priv *bat_priv,
-		      uint8_t *addr, char *message);
-int hna_local_fill_buffer(struct bat_priv *bat_priv,
-			  unsigned char *buff, int buff_len);
-int hna_local_seq_print_text(struct seq_file *seq, void *offset);
-void hna_local_free(struct bat_priv *bat_priv);
-int hna_global_init(struct bat_priv *bat_priv);
-void hna_global_add_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node,
-			 unsigned char *hna_buff, int hna_buff_len);
-int hna_global_seq_print_text(struct seq_file *seq, void *offset);
-void hna_global_del_orig(struct bat_priv *bat_priv,
-			 struct orig_node *orig_node, char *message);
-void hna_global_free(struct bat_priv *bat_priv);
-struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
-
-#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
deleted file mode 100644
index f3f7366..0000000
--- a/drivers/staging/batman-adv/types.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-
-
-#ifndef _NET_BATMAN_ADV_TYPES_H_
-#define _NET_BATMAN_ADV_TYPES_H_
-
-#include "packet.h"
-#include "bitarray.h"
-
-#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \
-	((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
-	 sizeof(struct unicast_packet) : \
-	 sizeof(struct bcast_packet))))
-
-
-struct batman_if {
-	struct list_head list;
-	int16_t if_num;
-	char if_status;
-	struct net_device *net_dev;
-	atomic_t seqno;
-	atomic_t frag_seqno;
-	unsigned char *packet_buff;
-	int packet_len;
-	struct kobject *hardif_obj;
-	atomic_t refcnt;
-	struct packet_type batman_adv_ptype;
-	struct net_device *soft_iface;
-};
-
-/**
- *	orig_node - structure for orig_list maintaining nodes of mesh
- *	@primary_addr: hosts primary interface address
- *	@last_valid: when last packet from this node was received
- *	@bcast_seqno_reset: time when the broadcast seqno window was reset
- *	@batman_seqno_reset: time when the batman seqno window was reset
- *	@flags: for now only VIS_SERVER flag
- *	@last_real_seqno: last and best known squence number
- *	@last_ttl: ttl of last received packet
- *	@last_bcast_seqno: last broadcast sequence number received by this host
- *
- *	@candidates: how many candidates are available
- *	@selected: next bonding candidate
- */
-struct orig_node {
-	uint8_t orig[ETH_ALEN];
-	uint8_t primary_addr[ETH_ALEN];
-	struct neigh_node *router;
-	TYPE_OF_WORD *bcast_own;
-	uint8_t *bcast_own_sum;
-	uint8_t tq_own;
-	int tq_asym_penalty;
-	unsigned long last_valid;
-	unsigned long bcast_seqno_reset;
-	unsigned long batman_seqno_reset;
-	uint8_t  flags;
-	unsigned char *hna_buff;
-	int16_t hna_buff_len;
-	uint32_t last_real_seqno;
-	uint8_t last_ttl;
-	TYPE_OF_WORD bcast_bits[NUM_WORDS];
-	uint32_t last_bcast_seqno;
-	struct list_head neigh_list;
-	struct list_head frag_list;
-	unsigned long last_frag_packet;
-	struct {
-		uint8_t candidates;
-		struct neigh_node *selected;
-	} bond;
-};
-
-/**
- *	neigh_node
- *	@last_valid: when last packet via this neighbor was received
- */
-struct neigh_node {
-	struct list_head list;
-	uint8_t addr[ETH_ALEN];
-	uint8_t real_packet_count;
-	uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
-	uint8_t tq_index;
-	uint8_t tq_avg;
-	uint8_t last_ttl;
-	struct neigh_node *next_bond_candidate;
-	unsigned long last_valid;
-	TYPE_OF_WORD real_bits[NUM_WORDS];
-	struct orig_node *orig_node;
-	struct batman_if *if_incoming;
-};
-
-
-struct bat_priv {
-	atomic_t mesh_state;
-	struct net_device_stats stats;
-	atomic_t aggregation_enabled;
-	atomic_t bonding_enabled;
-	atomic_t frag_enabled;
-	atomic_t vis_mode;
-	atomic_t orig_interval;
-	atomic_t log_level;
-	atomic_t bcast_seqno;
-	atomic_t bcast_queue_left;
-	atomic_t batman_queue_left;
-	char num_ifaces;
-	struct debug_log *debug_log;
-	struct batman_if *primary_if;
-	struct kobject *mesh_obj;
-	struct dentry *debug_dir;
-	struct hlist_head forw_bat_list;
-	struct hlist_head forw_bcast_list;
-	struct list_head vis_send_list;
-	struct hashtable_t *orig_hash;
-	struct hashtable_t *hna_local_hash;
-	struct hashtable_t *hna_global_hash;
-	struct hashtable_t *vis_hash;
-	spinlock_t orig_hash_lock; /* protects orig_hash */
-	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
-	spinlock_t forw_bcast_list_lock; /* protects  */
-	spinlock_t hna_lhash_lock; /* protects hna_local_hash */
-	spinlock_t hna_ghash_lock; /* protects hna_global_hash */
-	spinlock_t vis_hash_lock; /* protects vis_hash */
-	spinlock_t vis_list_lock; /* protects vis_info::recv_list */
-	int16_t num_local_hna;
-	atomic_t hna_local_changed;
-	struct delayed_work hna_work;
-	struct delayed_work orig_work;
-	struct delayed_work vis_work;
-	struct vis_info *my_vis_info;
-};
-
-struct socket_client {
-	struct list_head queue_list;
-	unsigned int queue_len;
-	unsigned char index;
-	spinlock_t lock; /* protects queue_list, queue_len, index */
-	wait_queue_head_t queue_wait;
-	struct bat_priv *bat_priv;
-};
-
-struct socket_packet {
-	struct list_head list;
-	size_t icmp_len;
-	struct icmp_packet_rr icmp_packet;
-};
-
-struct hna_local_entry {
-	uint8_t addr[ETH_ALEN];
-	unsigned long last_seen;
-	char never_purge;
-};
-
-struct hna_global_entry {
-	uint8_t addr[ETH_ALEN];
-	struct orig_node *orig_node;
-};
-
-/**
- *	forw_packet - structure for forw_list maintaining packets to be
- *	              send/forwarded
- */
-struct forw_packet {
-	struct hlist_node list;
-	unsigned long send_time;
-	uint8_t own;
-	struct sk_buff *skb;
-	uint16_t packet_len;
-	uint32_t direct_link_flags;
-	uint8_t num_packets;
-	struct delayed_work delayed_work;
-	struct batman_if *if_incoming;
-};
-
-/* While scanning for vis-entries of a particular vis-originator
- * this list collects its interfaces to create a subgraph/cluster
- * out of them later
- */
-struct if_list_entry {
-	uint8_t addr[ETH_ALEN];
-	bool primary;
-	struct hlist_node list;
-};
-
-struct debug_log {
-	char log_buff[LOG_BUF_LEN];
-	unsigned long log_start;
-	unsigned long log_end;
-	spinlock_t lock; /* protects log_buff, log_start and log_end */
-	wait_queue_head_t queue_wait;
-};
-
-struct frag_packet_list_entry {
-	struct list_head list;
-	uint16_t seqno;
-	struct sk_buff *skb;
-};
-
-struct vis_info {
-	unsigned long       first_seen;
-	struct list_head    recv_list;
-			    /* list of server-neighbors we received a vis-packet
-			     * from.  we should not reply to them. */
-	struct list_head send_list;
-	struct kref refcount;
-	struct bat_priv *bat_priv;
-	/* this packet might be part of the vis send queue. */
-	struct sk_buff *skb_packet;
-	/* vis_info may follow here*/
-} __attribute__((packed));
-
-struct vis_info_entry {
-	uint8_t  src[ETH_ALEN];
-	uint8_t  dest[ETH_ALEN];
-	uint8_t  quality;	/* quality = 0 means HNA */
-} __attribute__((packed));
-
-struct recvlist_node {
-	struct list_head list;
-	uint8_t mac[ETH_ALEN];
-};
-
-#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
deleted file mode 100644
index 0459413..0000000
--- a/drivers/staging/batman-adv/unicast.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "unicast.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-
-struct sk_buff *merge_frag_packet(struct list_head *head,
-				  struct frag_packet_list_entry *tfp,
-				  struct sk_buff *skb)
-{
-	struct unicast_frag_packet *up =
-		(struct unicast_frag_packet *)skb->data;
-	struct sk_buff *tmp_skb;
-
-	/* set skb to the first part and tmp_skb to the second part */
-	if (up->flags & UNI_FRAG_HEAD) {
-		tmp_skb = tfp->skb;
-	} else {
-		tmp_skb = skb;
-		skb = tfp->skb;
-	}
-
-	skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
-	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
-		/* free buffered skb, skb will be freed later */
-		kfree_skb(tfp->skb);
-		return NULL;
-	}
-
-	/* move free entry to end */
-	tfp->skb = NULL;
-	tfp->seqno = 0;
-	list_move_tail(&tfp->list, head);
-
-	memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
-	kfree_skb(tmp_skb);
-	return skb;
-}
-
-void create_frag_entry(struct list_head *head, struct sk_buff *skb)
-{
-	struct frag_packet_list_entry *tfp;
-	struct unicast_frag_packet *up =
-		(struct unicast_frag_packet *)skb->data;
-
-	/* free and oldest packets stand at the end */
-	tfp = list_entry((head)->prev, typeof(*tfp), list);
-	kfree_skb(tfp->skb);
-
-	tfp->seqno = ntohs(up->seqno);
-	tfp->skb = skb;
-	list_move(&tfp->list, head);
-	return;
-}
-
-int create_frag_buffer(struct list_head *head)
-{
-	int i;
-	struct frag_packet_list_entry *tfp;
-
-	for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
-		tfp = kmalloc(sizeof(struct frag_packet_list_entry),
-			GFP_ATOMIC);
-		if (!tfp) {
-			frag_list_free(head);
-			return -ENOMEM;
-		}
-		tfp->skb = NULL;
-		tfp->seqno = 0;
-		INIT_LIST_HEAD(&tfp->list);
-		list_add(&tfp->list, head);
-	}
-
-	return 0;
-}
-
-struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
-						 struct unicast_frag_packet *up)
-{
-	struct frag_packet_list_entry *tfp;
-	struct unicast_frag_packet *tmp_up = NULL;
-	uint16_t search_seqno;
-
-	if (up->flags & UNI_FRAG_HEAD)
-		search_seqno = ntohs(up->seqno)+1;
-	else
-		search_seqno = ntohs(up->seqno)-1;
-
-	list_for_each_entry(tfp, head, list) {
-
-		if (!tfp->skb)
-			continue;
-
-		if (tfp->seqno == ntohs(up->seqno))
-			goto mov_tail;
-
-		tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
-
-		if (tfp->seqno == search_seqno) {
-
-			if ((tmp_up->flags & UNI_FRAG_HEAD) !=
-			    (up->flags & UNI_FRAG_HEAD))
-				return tfp;
-			else
-				goto mov_tail;
-		}
-	}
-	return NULL;
-
-mov_tail:
-	list_move_tail(&tfp->list, head);
-	return NULL;
-}
-
-void frag_list_free(struct list_head *head)
-{
-	struct frag_packet_list_entry *pf, *tmp_pf;
-
-	if (!list_empty(head)) {
-
-		list_for_each_entry_safe(pf, tmp_pf, head, list) {
-			kfree_skb(pf->skb);
-			list_del(&pf->list);
-			kfree(pf);
-		}
-	}
-	return;
-}
-
-static int unicast_send_frag_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
-			  struct batman_if *batman_if, uint8_t dstaddr[],
-			  struct orig_node *orig_node)
-{
-	struct unicast_frag_packet *ucast_frag1, *ucast_frag2;
-	int hdr_len = sizeof(struct unicast_frag_packet);
-	struct sk_buff *frag_skb;
-	int data_len = skb->len;
-
-	if (!bat_priv->primary_if)
-		goto dropped;
-
-	frag_skb = dev_alloc_skb(data_len - (data_len / 2) + hdr_len);
-	skb_split(skb, frag_skb, data_len / 2);
-
-	if (my_skb_head_push(frag_skb, hdr_len) < 0 ||
-	    my_skb_head_push(skb, hdr_len) < 0)
-		goto drop_frag;
-
-	ucast_frag1 = (struct unicast_frag_packet *)skb->data;
-	ucast_frag2 = (struct unicast_frag_packet *)frag_skb->data;
-
-	ucast_frag1->version = COMPAT_VERSION;
-	ucast_frag1->packet_type = BAT_UNICAST_FRAG;
-	ucast_frag1->ttl = TTL;
-	memcpy(ucast_frag1->orig,
-	       bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
-	memcpy(ucast_frag1->dest, orig_node->orig, ETH_ALEN);
-
-	memcpy(ucast_frag2, ucast_frag1, sizeof(struct unicast_frag_packet));
-
-	ucast_frag1->flags |= UNI_FRAG_HEAD;
-	ucast_frag2->flags &= ~UNI_FRAG_HEAD;
-
-	ucast_frag1->seqno = htons((uint16_t)atomic_inc_return(
-						&batman_if->frag_seqno));
-
-	ucast_frag2->seqno = htons((uint16_t)atomic_inc_return(
-						&batman_if->frag_seqno));
-
-	send_skb_packet(skb, batman_if, dstaddr);
-	send_skb_packet(frag_skb, batman_if, dstaddr);
-	return 0;
-
-drop_frag:
-	kfree_skb(frag_skb);
-dropped:
-	kfree_skb(skb);
-	return 1;
-}
-
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
-{
-	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-	struct unicast_packet *unicast_packet;
-	struct orig_node *orig_node;
-	struct batman_if *batman_if;
-	struct neigh_node *router;
-	int data_len = skb->len;
-	uint8_t dstaddr[6];
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
-	/* get routing information */
-	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-						   ethhdr->h_dest));
-
-	/* check for hna host */
-	if (!orig_node)
-		orig_node = transtable_search(bat_priv, ethhdr->h_dest);
-
-	router = find_router(bat_priv, orig_node, NULL);
-
-	if (!router)
-		goto unlock;
-
-	/* don't lock while sending the packets ... we therefore
-		* copy the required data before sending */
-
-	batman_if = router->if_incoming;
-	memcpy(dstaddr, router->addr, ETH_ALEN);
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	if (batman_if->if_status != IF_ACTIVE)
-		goto dropped;
-
-	if (atomic_read(&bat_priv->frag_enabled) &&
-	    data_len + sizeof(struct unicast_packet) > batman_if->net_dev->mtu)
-		return unicast_send_frag_skb(skb, bat_priv, batman_if,
-					     dstaddr, orig_node);
-
-	if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
-		goto dropped;
-
-	unicast_packet = (struct unicast_packet *)skb->data;
-
-	unicast_packet->version = COMPAT_VERSION;
-	/* batman packet type: unicast */
-	unicast_packet->packet_type = BAT_UNICAST;
-	/* set unicast ttl */
-	unicast_packet->ttl = TTL;
-	/* copy the destination for faster routing */
-	memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
-
-	send_skb_packet(skb, batman_if, dstaddr);
-	return 0;
-
-unlock:
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-dropped:
-	kfree_skb(skb);
-	return 1;
-}
diff --git a/drivers/staging/batman-adv/unicast.h b/drivers/staging/batman-adv/unicast.h
deleted file mode 100644
index 7973697..0000000
--- a/drivers/staging/batman-adv/unicast.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_UNICAST_H_
-#define _NET_BATMAN_ADV_UNICAST_H_
-
-#define FRAG_TIMEOUT 10000	/* purge frag list entrys after time in ms */
-#define FRAG_BUFFER_SIZE 6	/* number of list elements in buffer */
-
-struct sk_buff *merge_frag_packet(struct list_head *head,
-	struct frag_packet_list_entry *tfp,
-	struct sk_buff *skb);
-
-void create_frag_entry(struct list_head *head, struct sk_buff *skb);
-int create_frag_buffer(struct list_head *head);
-struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
-	struct unicast_frag_packet *up);
-void frag_list_free(struct list_head *head);
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
deleted file mode 100644
index 3d2c1bc..0000000
--- a/drivers/staging/batman-adv/vis.c
+++ /dev/null
@@ -1,895 +0,0 @@
-/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "send.h"
-#include "translation-table.h"
-#include "vis.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "hash.h"
-
-#define MAX_VIS_PACKET_SIZE 1000
-
-/* Returns the smallest signed integer in two's complement with the sizeof x */
-#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
-
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
- * This means that for a uint8_t with the maximum value 255, it would think:
- *  - when adding nothing - it is neither a predecessor nor a successor
- *  - before adding more than 127 to the starting value - it is a predecessor,
- *  - when adding 128 - it is neither a predecessor nor a successor,
- *  - after adding more than 127 to the starting value - it is a successor */
-#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
-			_dummy > smallest_signed_int(_dummy); })
-#define seq_after(x, y) seq_before(y, x)
-
-static void start_vis_timer(struct bat_priv *bat_priv);
-
-/* free the info */
-static void free_info(struct kref *ref)
-{
-	struct vis_info *info = container_of(ref, struct vis_info, refcount);
-	struct bat_priv *bat_priv = info->bat_priv;
-	struct recvlist_node *entry, *tmp;
-	unsigned long flags;
-
-	list_del_init(&info->send_list);
-	spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
-	list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
-		list_del(&entry->list);
-		kfree(entry);
-	}
-
-	spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
-	kfree_skb(info->skb_packet);
-}
-
-/* Compare two vis packets, used by the hashing algorithm */
-static int vis_info_cmp(void *data1, void *data2)
-{
-	struct vis_info *d1, *d2;
-	struct vis_packet *p1, *p2;
-	d1 = data1;
-	d2 = data2;
-	p1 = (struct vis_packet *)d1->skb_packet->data;
-	p2 = (struct vis_packet *)d2->skb_packet->data;
-	return compare_orig(p1->vis_orig, p2->vis_orig);
-}
-
-/* hash function to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static int vis_info_choose(void *data, int size)
-{
-	struct vis_info *vis_info = data;
-	struct vis_packet *packet;
-	unsigned char *key;
-	uint32_t hash = 0;
-	size_t i;
-
-	packet = (struct vis_packet *)vis_info->skb_packet->data;
-	key = packet->vis_orig;
-	for (i = 0; i < ETH_ALEN; i++) {
-		hash += key[i];
-		hash += (hash << 10);
-		hash ^= (hash >> 6);
-	}
-
-	hash += (hash << 3);
-	hash ^= (hash >> 11);
-	hash += (hash << 15);
-
-	return hash % size;
-}
-
-/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list */
-static void vis_data_insert_interface(const uint8_t *interface,
-				      struct hlist_head *if_list,
-				      bool primary)
-{
-	struct if_list_entry *entry;
-	struct hlist_node *pos;
-
-	hlist_for_each_entry(entry, pos, if_list, list) {
-		if (compare_orig(entry->addr, (void *)interface))
-			return;
-	}
-
-	/* its a new address, add it to the list */
-	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-	if (!entry)
-		return;
-	memcpy(entry->addr, interface, ETH_ALEN);
-	entry->primary = primary;
-	hlist_add_head(&entry->list, if_list);
-}
-
-static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
-{
-	struct if_list_entry *entry;
-	struct hlist_node *pos;
-	size_t len = 0;
-
-	hlist_for_each_entry(entry, pos, if_list, list) {
-		if (entry->primary)
-			len += sprintf(buff + len, "PRIMARY, ");
-		else {
-			len += sprintf(buff + len,  "SEC %pM, ", entry->addr);
-		}
-	}
-
-	return len;
-}
-
-static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
-{
-	struct if_list_entry *entry;
-	struct hlist_node *pos;
-	size_t count = 0;
-
-	hlist_for_each_entry(entry, pos, if_list, list) {
-		if (entry->primary)
-			count += 9;
-		else
-			count += 23;
-	}
-
-	return count;
-}
-
-/* read an entry  */
-static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
-				   uint8_t *src, bool primary)
-{
-	/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
-	if (primary && entry->quality == 0)
-		return sprintf(buff, "HNA %pM, ", entry->dest);
-	else if (compare_orig(entry->src, src))
-		return sprintf(buff, "TQ %pM %d, ", entry->dest,
-			       entry->quality);
-
-	return 0;
-}
-
-int vis_seq_print_text(struct seq_file *seq, void *offset)
-{
-	HASHIT(hashit);
-	HASHIT(hashit_count);
-	struct vis_info *info;
-	struct vis_packet *packet;
-	struct vis_info_entry *entries;
-	struct net_device *net_dev = (struct net_device *)seq->private;
-	struct bat_priv *bat_priv = netdev_priv(net_dev);
-	HLIST_HEAD(vis_if_list);
-	struct if_list_entry *entry;
-	struct hlist_node *pos, *n;
-	int i;
-	unsigned long flags;
-	int vis_server = atomic_read(&bat_priv->vis_mode);
-	size_t buff_pos, buf_size;
-	char *buff;
-
-	if ((!bat_priv->primary_if) ||
-	    (vis_server == VIS_TYPE_CLIENT_UPDATE))
-		return 0;
-
-	buf_size = 1;
-	/* Estimate length */
-	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-	while (hash_iterate(bat_priv->vis_hash, &hashit_count)) {
-		info = hashit_count.bucket->data;
-		packet = (struct vis_packet *)info->skb_packet->data;
-		entries = (struct vis_info_entry *)
-			  ((char *)packet + sizeof(struct vis_packet));
-
-		for (i = 0; i < packet->entries; i++) {
-			if (entries[i].quality == 0)
-				continue;
-			vis_data_insert_interface(entries[i].src, &vis_if_list,
-				compare_orig(entries[i].src, packet->vis_orig));
-		}
-
-		hlist_for_each_entry(entry, pos, &vis_if_list, list) {
-			buf_size += 18 + 26 * packet->entries;
-
-			/* add primary/secondary records */
-			if (compare_orig(entry->addr, packet->vis_orig))
-				buf_size +=
-					vis_data_count_prim_sec(&vis_if_list);
-
-			buf_size += 1;
-		}
-
-		hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
-			hlist_del(&entry->list);
-			kfree(entry);
-		}
-	}
-
-	buff = kmalloc(buf_size, GFP_ATOMIC);
-	if (!buff) {
-		spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-		return -ENOMEM;
-	}
-	buff[0] = '\0';
-	buff_pos = 0;
-
-	while (hash_iterate(bat_priv->vis_hash, &hashit)) {
-		info = hashit.bucket->data;
-		packet = (struct vis_packet *)info->skb_packet->data;
-		entries = (struct vis_info_entry *)
-			  ((char *)packet + sizeof(struct vis_packet));
-
-		for (i = 0; i < packet->entries; i++) {
-			if (entries[i].quality == 0)
-				continue;
-			vis_data_insert_interface(entries[i].src, &vis_if_list,
-				compare_orig(entries[i].src, packet->vis_orig));
-		}
-
-		hlist_for_each_entry(entry, pos, &vis_if_list, list) {
-			buff_pos += sprintf(buff + buff_pos, "%pM,",
-					    entry->addr);
-
-			for (i = 0; i < packet->entries; i++)
-				buff_pos += vis_data_read_entry(buff + buff_pos,
-								&entries[i],
-								entry->addr,
-								entry->primary);
-
-			/* add primary/secondary records */
-			if (compare_orig(entry->addr, packet->vis_orig))
-				buff_pos +=
-					vis_data_read_prim_sec(buff + buff_pos,
-							       &vis_if_list);
-
-			buff_pos += sprintf(buff + buff_pos, "\n");
-		}
-
-		hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
-			hlist_del(&entry->list);
-			kfree(entry);
-		}
-	}
-
-	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-
-	seq_printf(seq, "%s", buff);
-	kfree(buff);
-
-	return 0;
-}
-
-/* add the info packet to the send list, if it was not
- * already linked in. */
-static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
-{
-	if (list_empty(&info->send_list)) {
-		kref_get(&info->refcount);
-		list_add_tail(&info->send_list, &bat_priv->vis_send_list);
-	}
-}
-
-/* delete the info packet from the send list, if it was
- * linked in. */
-static void send_list_del(struct vis_info *info)
-{
-	if (!list_empty(&info->send_list)) {
-		list_del_init(&info->send_list);
-		kref_put(&info->refcount, free_info);
-	}
-}
-
-/* tries to add one entry to the receive list. */
-static void recv_list_add(struct bat_priv *bat_priv,
-			  struct list_head *recv_list, char *mac)
-{
-	struct recvlist_node *entry;
-	unsigned long flags;
-
-	entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
-	if (!entry)
-		return;
-
-	memcpy(entry->mac, mac, ETH_ALEN);
-	spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
-	list_add_tail(&entry->list, recv_list);
-	spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
-}
-
-/* returns 1 if this mac is in the recv_list */
-static int recv_list_is_in(struct bat_priv *bat_priv,
-			   struct list_head *recv_list, char *mac)
-{
-	struct recvlist_node *entry;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
-	list_for_each_entry(entry, recv_list, list) {
-		if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
-			spin_unlock_irqrestore(&bat_priv->vis_list_lock,
-					       flags);
-			return 1;
-		}
-	}
-	spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
-	return 0;
-}
-
-/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ).	vis hash must be locked outside.  is_new is set when the packet
- * is newer than old entries in the hash. */
-static struct vis_info *add_packet(struct bat_priv *bat_priv,
-				   struct vis_packet *vis_packet,
-				   int vis_info_len, int *is_new,
-				   int make_broadcast)
-{
-	struct vis_info *info, *old_info;
-	struct vis_packet *search_packet, *old_packet;
-	struct vis_info search_elem;
-	struct vis_packet *packet;
-
-	*is_new = 0;
-	/* sanity check */
-	if (!bat_priv->vis_hash)
-		return NULL;
-
-	/* see if the packet is already in vis_hash */
-	search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
-	if (!search_elem.skb_packet)
-		return NULL;
-	search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
-						     sizeof(struct vis_packet));
-
-	memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
-	old_info = hash_find(bat_priv->vis_hash, &search_elem);
-	kfree_skb(search_elem.skb_packet);
-
-	if (old_info != NULL) {
-		old_packet = (struct vis_packet *)old_info->skb_packet->data;
-		if (!seq_after(ntohl(vis_packet->seqno),
-			       ntohl(old_packet->seqno))) {
-			if (old_packet->seqno == vis_packet->seqno) {
-				recv_list_add(bat_priv, &old_info->recv_list,
-					      vis_packet->sender_orig);
-				return old_info;
-			} else {
-				/* newer packet is already in hash. */
-				return NULL;
-			}
-		}
-		/* remove old entry */
-		hash_remove(bat_priv->vis_hash, old_info);
-		send_list_del(old_info);
-		kref_put(&old_info->refcount, free_info);
-	}
-
-	info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
-	if (!info)
-		return NULL;
-
-	info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
-					 vis_info_len + sizeof(struct ethhdr));
-	if (!info->skb_packet) {
-		kfree(info);
-		return NULL;
-	}
-	skb_reserve(info->skb_packet, sizeof(struct ethhdr));
-	packet = (struct vis_packet *)skb_put(info->skb_packet,
-					      sizeof(struct vis_packet) +
-					      vis_info_len);
-
-	kref_init(&info->refcount);
-	INIT_LIST_HEAD(&info->send_list);
-	INIT_LIST_HEAD(&info->recv_list);
-	info->first_seen = jiffies;
-	info->bat_priv = bat_priv;
-	memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
-
-	/* initialize and add new packet. */
-	*is_new = 1;
-
-	/* Make it a broadcast packet, if required */
-	if (make_broadcast)
-		memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
-
-	/* repair if entries is longer than packet. */
-	if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
-		packet->entries = vis_info_len / sizeof(struct vis_info_entry);
-
-	recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
-
-	/* try to add it */
-	if (hash_add(bat_priv->vis_hash, info) < 0) {
-		/* did not work (for some reason) */
-		kref_put(&old_info->refcount, free_info);
-		info = NULL;
-	}
-
-	return info;
-}
-
-/* handle the server sync packet, forward if needed. */
-void receive_server_sync_packet(struct bat_priv *bat_priv,
-				struct vis_packet *vis_packet,
-				int vis_info_len)
-{
-	struct vis_info *info;
-	int is_new, make_broadcast;
-	unsigned long flags;
-	int vis_server = atomic_read(&bat_priv->vis_mode);
-
-	make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
-
-	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-	info = add_packet(bat_priv, vis_packet, vis_info_len,
-			  &is_new, make_broadcast);
-	if (!info)
-		goto end;
-
-	/* only if we are server ourselves and packet is newer than the one in
-	 * hash.*/
-	if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
-		send_list_add(bat_priv, info);
-end:
-	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-}
-
-/* handle an incoming client update packet and schedule forward if needed. */
-void receive_client_update_packet(struct bat_priv *bat_priv,
-				  struct vis_packet *vis_packet,
-				  int vis_info_len)
-{
-	struct vis_info *info;
-	struct vis_packet *packet;
-	int is_new;
-	unsigned long flags;
-	int vis_server = atomic_read(&bat_priv->vis_mode);
-	int are_target = 0;
-
-	/* clients shall not broadcast. */
-	if (is_bcast(vis_packet->target_orig))
-		return;
-
-	/* Are we the target for this VIS packet? */
-	if (vis_server == VIS_TYPE_SERVER_SYNC	&&
-	    is_my_mac(vis_packet->target_orig))
-		are_target = 1;
-
-	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-	info = add_packet(bat_priv, vis_packet, vis_info_len,
-			  &is_new, are_target);
-
-	if (!info)
-		goto end;
-	/* note that outdated packets will be dropped at this point. */
-
-	packet = (struct vis_packet *)info->skb_packet->data;
-
-	/* send only if we're the target server or ... */
-	if (are_target && is_new) {
-		packet->vis_type = VIS_TYPE_SERVER_SYNC;	/* upgrade! */
-		send_list_add(bat_priv, info);
-
-		/* ... we're not the recipient (and thus need to forward). */
-	} else if (!is_my_mac(packet->target_orig)) {
-		send_list_add(bat_priv, info);
-	}
-
-end:
-	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-}
-
-/* Walk the originators and find the VIS server with the best tq. Set the packet
- * address to its address and return the best_tq.
- *
- * Must be called with the originator hash locked */
-static int find_best_vis_server(struct bat_priv *bat_priv,
-				struct vis_info *info)
-{
-	HASHIT(hashit);
-	struct orig_node *orig_node;
-	struct vis_packet *packet;
-	int best_tq = -1;
-
-	packet = (struct vis_packet *)info->skb_packet->data;
-
-	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-		orig_node = hashit.bucket->data;
-		if ((orig_node) && (orig_node->router) &&
-		    (orig_node->flags & VIS_SERVER) &&
-		    (orig_node->router->tq_avg > best_tq)) {
-			best_tq = orig_node->router->tq_avg;
-			memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-		}
-	}
-	return best_tq;
-}
-
-/* Return true if the vis packet is full. */
-static bool vis_packet_full(struct vis_info *info)
-{
-	struct vis_packet *packet;
-	packet = (struct vis_packet *)info->skb_packet->data;
-
-	if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
-		< packet->entries + 1)
-		return true;
-	return false;
-}
-
-/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated */
-static int generate_vis_packet(struct bat_priv *bat_priv)
-{
-	HASHIT(hashit_local);
-	HASHIT(hashit_global);
-	struct orig_node *orig_node;
-	struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
-	struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
-	struct vis_info_entry *entry;
-	struct hna_local_entry *hna_local_entry;
-	int best_tq = -1;
-	unsigned long flags;
-
-	info->first_seen = jiffies;
-	packet->vis_type = atomic_read(&bat_priv->vis_mode);
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
-	packet->ttl = TTL;
-	packet->seqno = htonl(ntohl(packet->seqno) + 1);
-	packet->entries = 0;
-	skb_trim(info->skb_packet, sizeof(struct vis_packet));
-
-	if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
-		best_tq = find_best_vis_server(bat_priv, info);
-
-		if (best_tq < 0) {
-			spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
-					       flags);
-			return -1;
-		}
-	}
-
-	while (hash_iterate(bat_priv->orig_hash, &hashit_global)) {
-		orig_node = hashit_global.bucket->data;
-
-		if (!orig_node->router)
-			continue;
-
-		if (!compare_orig(orig_node->router->addr, orig_node->orig))
-			continue;
-
-		if (orig_node->router->if_incoming->if_status != IF_ACTIVE)
-			continue;
-
-		if (orig_node->router->tq_avg < 1)
-			continue;
-
-		/* fill one entry into buffer. */
-		entry = (struct vis_info_entry *)
-				skb_put(info->skb_packet, sizeof(*entry));
-		memcpy(entry->src,
-		       orig_node->router->if_incoming->net_dev->dev_addr,
-		       ETH_ALEN);
-		memcpy(entry->dest, orig_node->orig, ETH_ALEN);
-		entry->quality = orig_node->router->tq_avg;
-		packet->entries++;
-
-		if (vis_packet_full(info)) {
-			spin_unlock_irqrestore(
-					&bat_priv->orig_hash_lock, flags);
-			return 0;
-		}
-	}
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
-	while (hash_iterate(bat_priv->hna_local_hash, &hashit_local)) {
-		hna_local_entry = hashit_local.bucket->data;
-		entry = (struct vis_info_entry *)skb_put(info->skb_packet,
-							 sizeof(*entry));
-		memset(entry->src, 0, ETH_ALEN);
-		memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
-		entry->quality = 0; /* 0 means HNA */
-		packet->entries++;
-
-		if (vis_packet_full(info)) {
-			spin_unlock_irqrestore(&bat_priv->hna_lhash_lock,
-					       flags);
-			return 0;
-		}
-	}
-
-	spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
-	return 0;
-}
-
-/* free old vis packets. Must be called with this vis_hash_lock
- * held */
-static void purge_vis_packets(struct bat_priv *bat_priv)
-{
-	HASHIT(hashit);
-	struct vis_info *info;
-
-	while (hash_iterate(bat_priv->vis_hash, &hashit)) {
-		info = hashit.bucket->data;
-
-		/* never purge own data. */
-		if (info == bat_priv->my_vis_info)
-			continue;
-
-		if (time_after(jiffies,
-			       info->first_seen + VIS_TIMEOUT * HZ)) {
-			hash_remove_bucket(bat_priv->vis_hash, &hashit);
-			send_list_del(info);
-			kref_put(&info->refcount, free_info);
-		}
-	}
-}
-
-static void broadcast_vis_packet(struct bat_priv *bat_priv,
-				 struct vis_info *info)
-{
-	HASHIT(hashit);
-	struct orig_node *orig_node;
-	struct vis_packet *packet;
-	struct sk_buff *skb;
-	unsigned long flags;
-	struct batman_if *batman_if;
-	uint8_t dstaddr[ETH_ALEN];
-
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	packet = (struct vis_packet *)info->skb_packet->data;
-
-	/* send to all routers in range. */
-	while (hash_iterate(bat_priv->orig_hash, &hashit)) {
-		orig_node = hashit.bucket->data;
-
-		/* if it's a vis server and reachable, send it. */
-		if ((!orig_node) || (!orig_node->router))
-			continue;
-		if (!(orig_node->flags & VIS_SERVER))
-			continue;
-		/* don't send it if we already received the packet from
-		 * this node. */
-		if (recv_list_is_in(bat_priv, &info->recv_list,
-							orig_node->orig))
-			continue;
-
-		memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-		batman_if = orig_node->router->if_incoming;
-		memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-		skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-		if (skb)
-			send_skb_packet(skb, batman_if, dstaddr);
-
-		spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-
-	}
-
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-static void unicast_vis_packet(struct bat_priv *bat_priv,
-			       struct vis_info *info)
-{
-	struct orig_node *orig_node;
-	struct sk_buff *skb;
-	struct vis_packet *packet;
-	unsigned long flags;
-	struct batman_if *batman_if;
-	uint8_t dstaddr[ETH_ALEN];
-
-	spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
-	packet = (struct vis_packet *)info->skb_packet->data;
-	orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-						   packet->target_orig));
-
-	if ((!orig_node) || (!orig_node->router))
-		goto out;
-
-	/* don't lock while sending the packets ... we therefore
-	 * copy the required data before sending */
-	batman_if = orig_node->router->if_incoming;
-	memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-
-	skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-	if (skb)
-		send_skb_packet(skb, batman_if, dstaddr);
-
-	return;
-
-out:
-	spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
-}
-
-/* only send one vis packet. called from send_vis_packets() */
-static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
-{
-	struct vis_packet *packet;
-
-	packet = (struct vis_packet *)info->skb_packet->data;
-	if (packet->ttl < 2) {
-		pr_debug("Error - can't send vis packet: ttl exceeded\n");
-		return;
-	}
-
-	memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
-	       ETH_ALEN);
-	packet->ttl--;
-
-	if (is_bcast(packet->target_orig))
-		broadcast_vis_packet(bat_priv, info);
-	else
-		unicast_vis_packet(bat_priv, info);
-	packet->ttl++; /* restore TTL */
-}
-
-/* called from timer; send (and maybe generate) vis packet. */
-static void send_vis_packets(struct work_struct *work)
-{
-	struct delayed_work *delayed_work =
-		container_of(work, struct delayed_work, work);
-	struct bat_priv *bat_priv =
-		container_of(delayed_work, struct bat_priv, vis_work);
-	struct vis_info *info, *temp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-	purge_vis_packets(bat_priv);
-
-	if (generate_vis_packet(bat_priv) == 0) {
-		/* schedule if generation was successful */
-		send_list_add(bat_priv, bat_priv->my_vis_info);
-	}
-
-	list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
-				 send_list) {
-
-		kref_get(&info->refcount);
-		spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-
-		if (bat_priv->primary_if)
-			send_vis_packet(bat_priv, info);
-
-		spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-		send_list_del(info);
-		kref_put(&info->refcount, free_info);
-	}
-	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-	start_vis_timer(bat_priv);
-}
-
-/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added) */
-int vis_init(struct bat_priv *bat_priv)
-{
-	struct vis_packet *packet;
-	unsigned long flags;
-
-	if (bat_priv->vis_hash)
-		return 1;
-
-	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-
-	bat_priv->vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
-	if (!bat_priv->vis_hash) {
-		pr_err("Can't initialize vis_hash\n");
-		goto err;
-	}
-
-	bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-	if (!bat_priv->my_vis_info) {
-		pr_err("Can't initialize vis packet\n");
-		goto err;
-	}
-
-	bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
-						sizeof(struct vis_packet) +
-						MAX_VIS_PACKET_SIZE +
-						sizeof(struct ethhdr));
-	if (!bat_priv->my_vis_info->skb_packet)
-		goto free_info;
-
-	skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
-	packet = (struct vis_packet *)skb_put(
-					bat_priv->my_vis_info->skb_packet,
-					sizeof(struct vis_packet));
-
-	/* prefill the vis info */
-	bat_priv->my_vis_info->first_seen = jiffies -
-						msecs_to_jiffies(VIS_INTERVAL);
-	INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
-	INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
-	kref_init(&bat_priv->my_vis_info->refcount);
-	bat_priv->my_vis_info->bat_priv = bat_priv;
-	packet->version = COMPAT_VERSION;
-	packet->packet_type = BAT_VIS;
-	packet->ttl = TTL;
-	packet->seqno = 0;
-	packet->entries = 0;
-
-	INIT_LIST_HEAD(&bat_priv->vis_send_list);
-
-	if (hash_add(bat_priv->vis_hash, bat_priv->my_vis_info) < 0) {
-		pr_err("Can't add own vis packet into hash\n");
-		/* not in hash, need to remove it manually. */
-		kref_put(&bat_priv->my_vis_info->refcount, free_info);
-		goto err;
-	}
-
-	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-	start_vis_timer(bat_priv);
-	return 1;
-
-free_info:
-	kfree(bat_priv->my_vis_info);
-	bat_priv->my_vis_info = NULL;
-err:
-	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-	vis_quit(bat_priv);
-	return 0;
-}
-
-/* Decrease the reference count on a hash item info */
-static void free_info_ref(void *data, void *arg)
-{
-	struct vis_info *info = data;
-
-	send_list_del(info);
-	kref_put(&info->refcount, free_info);
-}
-
-/* shutdown vis-server */
-void vis_quit(struct bat_priv *bat_priv)
-{
-	unsigned long flags;
-	if (!bat_priv->vis_hash)
-		return;
-
-	cancel_delayed_work_sync(&bat_priv->vis_work);
-
-	spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
-	/* properly remove, kill timers ... */
-	hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
-	bat_priv->vis_hash = NULL;
-	bat_priv->my_vis_info = NULL;
-	spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
-}
-
-/* schedule packets for (re)transmission */
-static void start_vis_timer(struct bat_priv *bat_priv)
-{
-	INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
-	queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
-			   msecs_to_jiffies(VIS_INTERVAL));
-}
diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
deleted file mode 100644
index 2c3b330..0000000
--- a/drivers/staging/batman-adv/vis.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_VIS_H_
-#define _NET_BATMAN_ADV_VIS_H_
-
-#define VIS_TIMEOUT		200	/* timeout of vis packets in seconds */
-
-int vis_seq_print_text(struct seq_file *seq, void *offset);
-void receive_server_sync_packet(struct bat_priv *bat_priv,
-				struct vis_packet *vis_packet,
-				int vis_info_len);
-void receive_client_update_packet(struct bat_priv *bat_priv,
-				  struct vis_packet *vis_packet,
-				  int vis_info_len);
-int vis_init(struct bat_priv *bat_priv);
-void vis_quit(struct bat_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/drivers/staging/bcm/Adapter.h b/drivers/staging/bcm/Adapter.h
index 748460e..32909e2 100644
--- a/drivers/staging/bcm/Adapter.h
+++ b/drivers/staging/bcm/Adapter.h
@@ -7,53 +7,6 @@
 #define MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES 256
 #include "Debug.h"
 
-typedef struct _LIST_ENTRY{
-	struct _LIST_ENTRY 	*next;
-	struct _LIST_ENTRY 	*prev;
-} LIST_ENTRY, *PLIST_ENTRY;
-
-typedef struct _BCM_LIST_ENTRY {
-
-    LIST_ENTRY  		Link;
-
-} BCM_LIST_ENTRY, *PBCM_LIST_ENTRY;
-
-typedef enum _RCB_STATUS
-{
-	DRIVER_PROCESSED=1,
-	APPLICATION_PROCESSED
-} RCB_STATUS, *PRCB_STATUS;
-
-#define fFILLED 1
-#define fEMPTY 0
-
-struct _BCM_CB
-{
-	// The network packet that this RCB is receiving
-	PVOID      			pv_packet;
-	// Describes the length of the packet .
-	UINT                ui_packet_length;
-	// Pointer to the first buffer in the packet (only one buffer for Rx)
-	PUCHAR				buffer;
-	atomic_t	        status;
-	UINT	            filled;
-} __attribute__((packed));
-typedef struct _BCM_CB BCM_CB,*PBCM_CB;
-
-typedef BCM_CB BCM_RCB, *PBCM_RCB;
-typedef BCM_CB BCM_TCB, *PBCM_TCB;
-
-/* This is to be stored in the "pvOsDepData" of ADAPTER */
-typedef struct LINUX_DEP_DATA
-{
-	struct net_device		*virtualdev;	/* Our Interface (veth0) */
-	struct net_device		*actualdev;	/* True Interface (eth0) */
-	struct net_device_stats netstats;	/* Net statistics */
-	struct fasync_struct	*async_queue;	/* For asynchronus notification */
-
-} LINUX_DEP_DATA, *PLINUX_DEP_DATA;
-
-
 struct _LEADER
 {
 	USHORT 	Vcid;
@@ -429,26 +382,28 @@
 struct _MINI_ADAPTER
 {
 	struct _MINI_ADAPTER *next;
-	PVOID			    pvOsDepData;
+	struct net_device	*dev;
+	u32			msg_enable;
+
 	CHAR                *caDsxReqResp;
-	atomic_t			ApplicationRunning;
+	atomic_t		ApplicationRunning;
 	volatile INT		CtrlQueueLen;
-	atomic_t            AppCtrlQueueLen;
-	BOOLEAN             AppCtrlQueueOverFlow;
-	atomic_t			CurrentApplicationCount;
-	atomic_t 			RegisteredApplicationCount;
-	BOOLEAN			    TimerActive;
-	ULONG				StatisticsPointer;
+	atomic_t            	AppCtrlQueueLen;
+	BOOLEAN             	AppCtrlQueueOverFlow;
+	atomic_t		CurrentApplicationCount;
+	atomic_t 		RegisteredApplicationCount;
+	BOOLEAN		  	LinkUpStatus;
+	BOOLEAN		    	TimerActive;
+	u32			StatisticsPointer;
 	struct sk_buff		*RxControlHead;
 	struct sk_buff		*RxControlTail;
-//	spinlock_t			RxControlQueuelock;
+
 	struct semaphore	RxAppControlQueuelock;
 	struct semaphore	fw_download_sema;
 
 	PPER_TARANG_DATA    pTarangs;
 	spinlock_t			control_queue_lock;
 	wait_queue_head_t	process_read_wait_queue;
-	ULONG		    	bcm_jiffies;	/* Store Jiffies value */
 
 	// the pointer to the first packet we have queued in send
 	// deserialized miniport support variables
@@ -458,24 +413,15 @@
 	// this to keep track of the Tx and Rx MailBox Registers.
 	atomic_t		    CurrNumFreeTxDesc;
 	// to keep track the no of byte recieved
-	atomic_t			RxRollOverCount;
 	USHORT				PrevNumRecvDescs;
 	USHORT				CurrNumRecvDescs;
-	atomic_t			GoodRxByteCount;
-	atomic_t			GoodRxPktCount;
-	atomic_t			BadRxByteCount;
-	atomic_t			RxPacketDroppedCount;
-	atomic_t			GoodTxByteCount;
-	atomic_t			TxTotalPacketCount;
-	atomic_t			TxDroppedPacketCount;
-	ULONG			   	LinkUpStatus;
-	BOOLEAN			    TransferMode;
 	UINT				u32TotalDSD;
 	PacketInfo		    PackInfo[NO_OF_QUEUES];
 	S_CLASSIFIER_RULE	astClassifierTable[MAX_CLASSIFIERS];
+	BOOLEAN			    TransferMode;
 
 	/*************** qos ******************/
-	UINT				bETHCSEnabled;
+	BOOLEAN			    bETHCSEnabled;
 
 	ULONG			    BEBucketSize;
 	ULONG			    rtPSBucketSize;
@@ -483,7 +429,6 @@
 	BOOLEAN			    AutoLinkUp;
 	BOOLEAN			    AutoSyncup;
 
-	struct net_device	*dev;
 	int				major;
 	int				minor;
 	wait_queue_head_t 	tx_packet_wait_queue;
@@ -491,8 +436,6 @@
 	atomic_t			process_waiting;
 	BOOLEAN 			fw_download_done;
 
-	unsigned int		ctrlpkt_present;
-	BOOLEAN 			packets_given_to_all;
 	char 				*txctlpacket[MAX_CNTRL_PKTS];
 	atomic_t			cntrlpktCnt ;
 	atomic_t			index_app_read_cntrlpkt;
@@ -502,34 +445,30 @@
 	struct semaphore 	rdmwrmsync;
 
 	STTARGETDSXBUFFER	astTargetDsxBuffer[MAX_TARGET_DSX_BUFFERS];
-	ULONG				ulFreeTargetBufferCnt;
+	ULONG			ulFreeTargetBufferCnt;
 	ULONG              	ulCurrentTargetBuffer;
 	ULONG              	ulTotalTargetBuffersAvailable;
-	unsigned int		timeout;
-	int 				irq;
+
 	unsigned long 		chip_id;
-	unsigned int		bFlashBoot;
-	unsigned int 		if_up;
-//	spinlock_t			sleeper_lock;
-	atomic_t			rdm_wrm_access;
-	atomic_t			tx_rx_access;
+
 	wait_queue_head_t 	lowpower_mode_wait_queue;
-	atomic_t			bAbortedByHost;
-	BOOLEAN				bBinDownloaded;
-	BOOLEAN				bCfgDownloaded;
-	USHORT				usBestEffortQueueIndex;
-	BOOLEAN				bSyncUpRequestSent;
-//	struct semaphore 	data_packet_queue_lock;
+
+	BOOLEAN			bFlashBoot;
+	BOOLEAN			bBinDownloaded;
+	BOOLEAN			bCfgDownloaded;
+	BOOLEAN			bSyncUpRequestSent;
+	USHORT			usBestEffortQueueIndex;
+
 	wait_queue_head_t 	ioctl_fw_dnld_wait_queue;
 	BOOLEAN				waiting_to_fw_download_done;
 	pid_t				fw_download_process_pid;
 	PSTARGETPARAMS		pstargetparams;
 	BOOLEAN				device_removed;
 	BOOLEAN				DeviceAccess;
-	INT					DDRSetting;
-	BOOLEAN				bDDRInitDone;
-	ULONG				ulPowerSaveMode;
 	BOOLEAN				bIsAutoCorrectEnabled;
+	BOOLEAN				bDDRInitDone;
+	INT				DDRSetting;
+	ULONG				ulPowerSaveMode;
 	spinlock_t			txtransmitlock;
 	B_UINT8				txtransmit_running;
 	/* Thread for control packet handling */
@@ -567,13 +506,13 @@
 	unsigned int	usIdleModePattern;
 	//BOOLEAN			bTriedToWakeUpFromShutdown;
 	BOOLEAN			bLinkDownRequested;
-	unsigned int	check_for_hang;
+
 	int 			downloadDDR;
 	PHS_DEVICE_EXTENSION stBCMPhsContext;
 	S_HDR_SUPRESSION_CONTEXTINFO	stPhsTxContextInfo;
 	uint8_t			ucaPHSPktRestoreBuf[2048];
 	uint8_t			bPHSEnabled;
-	int 			AutoFirmDld;
+	BOOLEAN			AutoFirmDld;
 	BOOLEAN         bMipsConfig;
 	BOOLEAN         bDPLLConfig;
 	UINT32			aTxPktSizeHist[MIBS_MAX_HIST_ENTRIES];
@@ -599,10 +538,9 @@
 
 
 	struct semaphore	NVMRdmWrmLock;
-	BOOLEAN			bNetworkInterfaceRegistered;
-	BOOLEAN			bNetdeviceNotifierRegistered;
+
 	struct device *pstCreatedClassDevice;
-	BOOLEAN			bUsbClassDriverRegistered;
+
 //	BOOLEAN				InterfaceUpStatus;
 	PFLASH2X_CS_INFO psFlash2xCSInfo;
 	PFLASH_CS_INFO psFlashCSInfo ;
@@ -630,17 +568,13 @@
 	struct semaphore	LowPowerModeSync;
 	ULONG	liDrainCalculated;
 	UINT gpioBitMap;
+
     S_BCM_DEBUG_STATE stDebugState;
 
 };
 typedef struct _MINI_ADAPTER MINI_ADAPTER, *PMINI_ADAPTER;
 
-
-typedef struct _DEVICE_EXTENSION
-{
-	PMINI_ADAPTER pAdapt;
-}DEVICE_EXTENSION,*PDEVICE_EXTENSION;
-
+#define GET_BCM_ADAPTER(net_dev)	netdev_priv(net_dev)
 
 struct _ETH_HEADER_STRUC {
     UCHAR       au8DestinationAddress[6];
@@ -667,8 +601,8 @@
 
 typedef struct _DDR_SETTING
 {
-	ULONG ulRegAddress;
-	ULONG ulRegValue;
+	UINT ulRegAddress;
+	UINT ulRegValue;
 }DDR_SETTING, *PDDR_SETTING;
 typedef DDR_SETTING DDR_SET_NODE, *PDDR_SET_NODE;
 INT
diff --git a/drivers/staging/bcm/Arp.c b/drivers/staging/bcm/Arp.c
deleted file mode 100644
index d60d859..0000000
--- a/drivers/staging/bcm/Arp.c
+++ /dev/null
@@ -1,94 +0,0 @@
-
-/*
- * File Name: Arp.c
- * Abstract: This file contains the routines for handling ARP PACKETS
- */
-#include "headers.h"
-#define	ARP_PKT_SIZE	60
-
-/* =========================================================================
- * Function    - reply_to_arp_request()
- *
- * Description - When this host tries to broadcast ARP request packet through
- *		 		 the virtual interface (veth0), reply directly to upper layer.
- *		 		 This function allocates a new skb for ARP reply packet,
- *		 		 fills in the fields of the packet and then sends it to
- *		 		 upper layer.
- *
- * Parameters  - skb:	Pointer to sk_buff structure of the ARP request pkt.
- *
- * Returns     - None
- * =========================================================================*/
-
-VOID
-reply_to_arp_request(struct sk_buff *skb)
-{
-	PMINI_ADAPTER		Adapter;
-	struct ArpHeader 	*pArpHdr = NULL;
-	struct ethhdr		*pethhdr = NULL;
-	UCHAR 				uiIPHdr[4];
-	/* Check for valid skb */
-	if(skb == NULL)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Invalid skb: Cannot reply to ARP request\n");
-		return;
-	}
-
-
-	Adapter = GET_BCM_ADAPTER(skb->dev);
-	/* Print the ARP Request Packet */
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "ARP Packet Dump :");
-	BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, (PUCHAR)(skb->data), skb->len);
-
-	/*
-	 * Extract the Ethernet Header and Arp Payload including Header
-     */
-	pethhdr = (struct ethhdr *)skb->data;
-	pArpHdr  = (struct ArpHeader *)(skb->data+ETH_HLEN);
-
-	if(Adapter->bETHCSEnabled)
-	{
-		if(memcmp(pethhdr->h_source, Adapter->dev->dev_addr, ETH_ALEN))
-		{
-			bcm_kfree_skb(skb);
-			return;
-		}
-	}
-
-	// Set the Ethernet Header First.
-	memcpy(pethhdr->h_dest, pethhdr->h_source, ETH_ALEN);
-	if(!memcmp(pethhdr->h_source, Adapter->dev->dev_addr, ETH_ALEN))
-	{
-		pethhdr->h_source[5]++;
-	}
-
-	/* Set the reply to ARP Reply */
-	pArpHdr->arp.ar_op = ntohs(ARPOP_REPLY);
-
-	/* Set the HW Address properly */
-	memcpy(pArpHdr->ar_sha, pethhdr->h_source, ETH_ALEN);
-	memcpy(pArpHdr->ar_tha, pethhdr->h_dest, ETH_ALEN);
-
-	// Swapping the IP Adddress
-	memcpy(uiIPHdr,pArpHdr->ar_sip,4);
-	memcpy(pArpHdr->ar_sip,pArpHdr->ar_tip,4);
-	memcpy(pArpHdr->ar_tip,uiIPHdr,4);
-
-	/* Print the ARP Reply Packet */
-
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "ARP REPLY PACKET: ");
-
-	/* Send the Packet to upper layer */
-	BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, (PUCHAR)(skb->data), skb->len);
-
-	skb->protocol = eth_type_trans(skb,skb->dev);
-	skb->pkt_type = PACKET_HOST;
-
-//	skb->mac.raw=skb->data+LEADER_SIZE;
-	skb_set_mac_header (skb, LEADER_SIZE);
-	netif_rx(skb);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, ARP_RESP, DBG_LVL_ALL, "<=============\n");
-	return;
-}
-
-
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index fead9c5..31674ea 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -12,7 +12,7 @@
 *
 * Returns	  - Zero(Success)
 ****************************************************************/
-static struct class *bcm_class = NULL;
+
 static int bcm_char_open(struct inode *inode, struct file * filp)
 {
 	PMINI_ADAPTER 		Adapter = NULL;
@@ -93,7 +93,7 @@
     /*Stop Queuing the control response Packets*/
     atomic_dec(&Adapter->ApplicationRunning);
 
-    bcm_kfree(pTarang);
+    kfree(pTarang);
 
 	/* remove this filp from the asynchronously notified filp's */
     filp->private_data = NULL;
@@ -102,11 +102,11 @@
 
 static ssize_t bcm_char_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos)
 {
-    PPER_TARANG_DATA pTarang = (PPER_TARANG_DATA)filp->private_data;
+	PPER_TARANG_DATA pTarang = filp->private_data;
 	PMINI_ADAPTER	Adapter = pTarang->Adapter;
-    struct sk_buff* Packet = NULL;
-    UINT            PktLen = 0;
-	int 			wait_ret_val=0;
+	struct sk_buff* Packet = NULL;
+	ssize_t         PktLen = 0;
+	int 		wait_ret_val=0;
 
 	wait_ret_val = wait_event_interruptible(Adapter->process_read_wait_queue,
 		(pTarang->RxAppControlHead || Adapter->device_removed));
@@ -139,14 +139,16 @@
 	if(Packet)
 	{
 		PktLen = Packet->len;
-		if(copy_to_user(buf, Packet->data, PktLen))
+		if(copy_to_user(buf, Packet->data, min_t(size_t, PktLen, size)))
 		{
-			bcm_kfree_skb(Packet);
+			dev_kfree_skb(Packet);
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "\nReturning from copy to user failure \n");
 			return -EFAULT;
 		}
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read %d Bytes From Adapter packet = 0x%p by process %d!\n", PktLen, Packet, current->pid);
-		bcm_kfree_skb(Packet);
+		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
+				"Read %zd Bytes From Adapter packet = %p by process %d!\n",
+				PktLen, Packet, current->pid);
+		dev_kfree_skb(Packet);
 	}
 
     BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "<====\n");
@@ -155,15 +157,12 @@
 
 static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
 {
-    PPER_TARANG_DATA  pTarang = (PPER_TARANG_DATA)filp->private_data;
-	void __user *argp = (void __user *)argp;
+	PPER_TARANG_DATA  pTarang = filp->private_data;
+	void __user *argp = (void __user *)arg;
 	PMINI_ADAPTER 	Adapter = pTarang->Adapter;
 	INT  			Status = STATUS_FAILURE;
-	IOCTL_BUFFER 	IoBuffer={};
-#ifndef BCM_SHM_INTERFACE
-    int timeout = 0;
-#endif
-
+	int timeout = 0;
+	IOCTL_BUFFER 	IoBuffer;
 
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
 
@@ -204,50 +203,41 @@
 
 	Status = vendorextnIoctl(Adapter, cmd, arg);
 	if(Status != CONTINUE_COMMON_PATH )
-	{
 		 return Status;
-	}
 
 	switch(cmd){
 		// Rdms for Swin Idle...
 		case IOCTL_BCM_REGISTER_READ_PRIVATE:
 		{
 			RDM_BUFFER  sRdmBuffer = {0};
-			PCHAR temp_buff = NULL;
-			UINT Bufflen = 0;
-			/* Copy Ioctl Buffer structure */
-			if(copy_from_user((PCHAR)&IoBuffer, argp,
-				sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-			}
+			PCHAR temp_buff;
+			UINT Bufflen;
 
+			/* Copy Ioctl Buffer structure */
+			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+
+			if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+				return -EINVAL;
+
+			if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				return -EFAULT;
+
+			/* FIXME: need to restrict BuffLen */
 			Bufflen = IoBuffer.OutputLength + (4 - IoBuffer.OutputLength%4)%4;
-			temp_buff = (PCHAR)kmalloc(Bufflen, GFP_KERNEL);
+			temp_buff = kmalloc(Bufflen, GFP_KERNEL);
 			if(!temp_buff)
-			{
-				return STATUS_FAILURE;
-			}
-			if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
-				IoBuffer.InputLength))
-			{
-				Status = -EFAULT;
-				break;
-			}
+				return -ENOMEM;
+
 			Status = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
 					(PUINT)temp_buff, Bufflen);
-			if(Status != STATUS_SUCCESS)
+			if(Status == STATUS_SUCCESS)
 			{
-				bcm_kfree(temp_buff);
-				return Status;
+				if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
+					Status = -EFAULT;
 			}
-			if(copy_to_user(IoBuffer.OutputBuffer,
-				(PCHAR)temp_buff, (UINT)IoBuffer.OutputLength))
-			{
-				Status = -EFAULT;
-			}
-			bcm_kfree(temp_buff);
+
+			kfree(temp_buff);
 			break;
 		}
 		case IOCTL_BCM_REGISTER_WRITE_PRIVATE:
@@ -256,19 +246,16 @@
 			UINT uiTempVar=0;
 			/* Copy Ioctl Buffer structure */
 
-			if(copy_from_user(&IoBuffer, argp,
-				sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-			}
+			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+
+			if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+				return -EINVAL;
+
 			/* Get WrmBuffer structure */
-			if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
-				IoBuffer.InputLength))
-			{
-				Status = -EFAULT;
-				break;
-			}
+			if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				return -EFAULT;
+
 			uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
 			if(!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
 			 	((uiTempVar == EEPROM_REJECT_REG_1)||
@@ -277,8 +264,7 @@
 				(uiTempVar == EEPROM_REJECT_REG_4)))
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
-				Status = -EFAULT;
-				break;
+				return -EFAULT;
 			}
 			Status = wrmalt(Adapter, (UINT)sWrmBuffer.Register,
 						(PUINT)sWrmBuffer.Data, sizeof(ULONG));
@@ -305,56 +291,39 @@
 				(Adapter->bPreparingForLowPowerMode ==TRUE))
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Rdms\n");
-				Status = -EACCES;
-				break;
+				return -EACCES;
 			}
 			/* Copy Ioctl Buffer structure */
-			if(copy_from_user(&IoBuffer, argp,
-				sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-			}
+			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
 
-			temp_buff = (PCHAR)kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
+			if (IoBuffer.InputLength > sizeof(sRdmBuffer))
+				return -EINVAL;
+
+			if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				return -EFAULT;
+
+			/* FIXME: don't trust user supplied length */
+			temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
 			if(!temp_buff)
-			{
 				return STATUS_FAILURE;
-			}
-			if(copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer,
-				IoBuffer.InputLength))
-			{
-				Status = -EFAULT;
-				break;
-			}
 
-			if(
-#if !defined(BCM_SHM_INTERFACE)
-				(((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
-#endif
-					((ULONG)sRdmBuffer.Register & 0x3)
-			  )
+			if((((ULONG)sRdmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+			   ((ULONG)sRdmBuffer.Register & 0x3))
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "RDM Done On invalid Address : %x Access Denied.\n",
 					(int)sRdmBuffer.Register);
-				Status = -EINVAL;
-				break;
+				return -EINVAL;
 			}
 
 			uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
 			Status = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register,
 						(PUINT)temp_buff, IoBuffer.OutputLength);
-			if(Status != STATUS_SUCCESS)
-			{
-				bcm_kfree(temp_buff);
-				return Status;
-			}
-			if(copy_to_user(IoBuffer.OutputBuffer,
-				(PCHAR)temp_buff, (UINT)IoBuffer.OutputLength))
-			{
-				Status = -EFAULT;
-			}
-			bcm_kfree(temp_buff);
+			if(Status == STATUS_SUCCESS)
+				if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
+					Status = -EFAULT;
+
+			kfree(temp_buff);
 			break;
 		}
 		case IOCTL_BCM_REGISTER_WRITE:
@@ -367,36 +336,28 @@
 				(Adapter->bPreparingForLowPowerMode ==TRUE))
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Device in Idle Mode, Blocking Wrms\n");
-				Status = -EACCES;
-				break;
+				return -EACCES;
 			}
-			/* Copy Ioctl Buffer structure */
-			if(copy_from_user((PCHAR)&IoBuffer, argp,
-					sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-			}
-			/* Get WrmBuffer structure */
-			if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer,
-				IoBuffer.InputLength))
-			{
-				Status = -EFAULT;
-				break;
-			}
-			if(
-#if !defined(BCM_SHM_INTERFACE)
 
-				(((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
-#endif
-					((ULONG)sWrmBuffer.Register & 0x3)
-			 )
+			/* Copy Ioctl Buffer structure */
+			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+
+			if (IoBuffer.InputLength > sizeof(sWrmBuffer))
+				return -EINVAL;
+
+			/* Get WrmBuffer structure */
+			if(copy_from_user(&sWrmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				return -EFAULT;
+
+			if( (((ULONG)sWrmBuffer.Register & 0x0F000000) != 0x0F000000) ||
+					((ULONG)sWrmBuffer.Register & 0x3) )
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WRM Done On invalid Address : %x Access Denied.\n",
 						(int)sWrmBuffer.Register);
-				Status = -EINVAL;
-				break;
+				return -EINVAL;
 			}
+
 			uiTempVar = sWrmBuffer.Register & EEPROM_REJECT_MASK;
 			if(!((Adapter->pstargetparams->m_u32Customize) & VSG_MODE) &&
 				((uiTempVar == EEPROM_REJECT_REG_1)||
@@ -406,8 +367,7 @@
 				(cmd == IOCTL_BCM_REGISTER_WRITE))
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "EEPROM Access Denied, not in VSG Mode\n");
-				Status = -EFAULT;
-				break;
+				return -EFAULT;
 			}
 
 			Status = wrmaltWithLock(Adapter, (UINT)sWrmBuffer.Register,
@@ -436,19 +396,14 @@
 				(Adapter->bPreparingForLowPowerMode ==TRUE))
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"GPIO Can't be set/clear in Low power Mode");
-				Status = -EACCES;
-				break;
+				return -EACCES;
 			}
 			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-		    }
+				return -EFAULT;
+			if (IoBuffer.InputLength > sizeof(gpio_info))
+				return -EINVAL;
 			if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
-			{
-				Status = -EFAULT;
-				break;
-			}
+				return -EFAULT;
 			uiBit  = gpio_info.uiGpioNumber;
 			uiOperation = gpio_info.uiGpioValue;
 
@@ -517,8 +472,7 @@
 		break;
 		case BCM_LED_THREAD_STATE_CHANGE_REQ:
 		{
-
-			USER_THREAD_REQ threadReq = {0};
+			USER_THREAD_REQ threadReq = { 0 };
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"User made LED thread InActive");
 
 			if((Adapter->IdleMode == TRUE) ||
@@ -529,21 +483,16 @@
 				Status = -EACCES;
 				break;
 			}
-			Status =copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-			if(Status)
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer from user space err:%d",Status);
-				Status = -EFAULT;
-				break;
-			}
 
-			Status= copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength);
-			if(Status)
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the InputBuffer from user space err:%d",Status);
-				Status = -EFAULT;
-				break;
-			}
+			if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+
+			if (IoBuffer.InputLength > sizeof(threadReq))
+				return -EINVAL;
+
+			if (copy_from_user(&threadReq, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				return -EFAULT;
+
 			//if LED thread is running(Actively or Inactively) set it state to make inactive
 			if(Adapter->LEDInfo.led_thread_running)
 			{
@@ -572,19 +521,13 @@
 			if((Adapter->IdleMode == TRUE) ||
 				(Adapter->bShutStatus ==TRUE) ||
 				(Adapter->bPreparingForLowPowerMode ==TRUE))
-			{
-				Status = -EACCES;
-				break;
-			}
-			if(copy_from_user((PCHAR)&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
-                        	Status = -EFAULT;
-                    		break;
-                	}
-                if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
-                {
-                    Status = -EFAULT;
-                    break;
-                }
+				return -EACCES;
+			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+			if (IoBuffer.InputLength > sizeof(gpio_info))
+				return -EINVAL;
+			if(copy_from_user(&gpio_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				return -EFAULT;
                 uiBit  = gpio_info.uiGpioNumber;
 				  //Set the gpio output register
 				Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
@@ -608,25 +551,14 @@
 				if((Adapter->IdleMode == TRUE) ||
 				(Adapter->bShutStatus ==TRUE) ||
 				(Adapter->bPreparingForLowPowerMode ==TRUE))
-				{
-					Status = -EINVAL;
-					break;
-				}
-				Status = copy_from_user( (PCHAR)&IoBuffer, argp, sizeof( IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer from user space err:%d",Status);
-					Status = -EFAULT;
-					break;
-				}
+					return -EINVAL;
+				if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+					return -EFAULT;
+				if (IoBuffer.InputLength > sizeof(gpio_multi_info))
+					return -EINVAL;
+				if (copy_from_user(&gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength))
+					return -EFAULT;
 
-				Status = copy_from_user( &gpio_multi_info, IoBuffer.InputBuffer, IoBuffer.InputLength);
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer Contents from user space err:%d",Status);
-					Status = -EFAULT;
-					break;
-				}
 				if(IsReqGpioIsLedInNVM(Adapter,pgpio_multi_info[WIMAX_IDX].uiGPIOMask)== FALSE)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Sorry, Requested GPIO<0x%X> is not correspond to NVM LED bit map<0x%X>!!!",pgpio_multi_info[WIMAX_IDX].uiGPIOMask,Adapter->gpioBitMap);
@@ -686,7 +618,6 @@
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying Content to IOBufer for user space err:%d",Status);
-					Status = -EFAULT;
 					break;
 				}
 			}
@@ -700,25 +631,14 @@
 			if((Adapter->IdleMode == TRUE) ||
 				(Adapter->bShutStatus ==TRUE) ||
 				(Adapter->bPreparingForLowPowerMode ==TRUE))
-			{
-					Status = -EINVAL;
-					break;
-			}
-			Status = copy_from_user(&IoBuffer, argp, sizeof( IOCTL_BUFFER));
-			if(Status)
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer from user space err:%d",Status);
-				Status = -EFAULT;
-				break;
-			}
+					return -EINVAL;
 
-			Status = copy_from_user( &gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength);
-			if(Status)
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying the IOBufer Contents from user space err:%d",Status);
-				Status = -EFAULT;
-				break;
-			}
+			if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+			if (IoBuffer.InputLength > sizeof(gpio_multi_mode))
+				return -EINVAL;
+			if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				return -EFAULT;
 
 			Status = rdmaltWithLock( Adapter, ( UINT) GPIO_MODE_REGISTER, ( PUINT) ucResetValue, sizeof( UINT));
 			if( STATUS_SUCCESS != Status)
@@ -769,7 +689,6 @@
 			if(Status)
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Failed while copying Content to IOBufer for user space err:%d",Status);
-				Status = -EFAULT;
 				break;
 			}
 		}
@@ -783,24 +702,20 @@
 		case IOCTL_IDLE_REQ:
 		{
 			PVOID pvBuffer=NULL;
-			/* Copy Ioctl Buffer structure */
-			if(copy_from_user(&IoBuffer, argp,
-							sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-			}
-			pvBuffer=kmalloc(IoBuffer.InputLength, GFP_KERNEL);
-			if(!pvBuffer)
-			{
-				return -ENOMEM;
-			}
 
-			if(copy_from_user(pvBuffer, IoBuffer.InputBuffer,
-					IoBuffer.InputLength))
+			/* Copy Ioctl Buffer structure */
+			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+
+			/* FIXME: don't accept any length from user */
+			pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
+			if(!pvBuffer)
+				return -ENOMEM;
+
+			if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
 			{
 				Status = -EFAULT;
-				bcm_kfree(pvBuffer);
+				kfree(pvBuffer);
 				break;
 			}
 
@@ -820,10 +735,9 @@
 			Status = CopyBufferToControlPacket(Adapter, (PVOID)pvBuffer);
 		cntrlEnd:
 			up(&Adapter->LowPowerModeSync);
-			bcm_kfree(pvBuffer);
+			kfree(pvBuffer);
 			break;
 		}
-#ifndef BCM_SHM_INTERFACE
 		case IOCTL_BCM_BUFFER_DOWNLOAD_START:
 		{
 			INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock) ;
@@ -844,7 +758,7 @@
 				Status = reset_card_proc(Adapter);
 				if(Status)
 				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "reset_card_proc Failed!\n");
+					pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
 					up(&Adapter->fw_download_sema);
 					up(&Adapter->NVMRdmWrmLock);
 					break;
@@ -862,7 +776,7 @@
 		}
 		case IOCTL_BCM_BUFFER_DOWNLOAD:
 			{
-				FIRMWARE_INFO 	*psFwInfo=NULL;
+				FIRMWARE_INFO 	*psFwInfo = NULL;
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
 			do{
 				if(!down_trylock(&Adapter->fw_download_sema))
@@ -871,29 +785,23 @@
 					Status=-EINVAL;
 					break;
 				}
+
 				/* Copy Ioctl Buffer structure */
 				if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
-					Status = -EFAULT;
-					break;
-				}
+					return -EFAULT;
+
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Length for FW DLD is : %lx\n",
 										IoBuffer.InputLength);
-				psFwInfo=kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+
+				if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO))
+					return -EINVAL;
+
+				psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
 				if(!psFwInfo)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Failed to allocate buffer!!!!\n");
-					Status = -ENOMEM;
-					break;
-				}
-				if(copy_from_user(psFwInfo, IoBuffer.InputBuffer,
-							IoBuffer.InputLength))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_from_user 2 failed\n");
-					Status = -EFAULT;
-					break;
-				}
+					return -ENOMEM;
+
+				if(copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength))
+					return -EFAULT;
 
 				if(!psFwInfo->pvMappedFirmwareAddress ||
 						(psFwInfo->u32FirmwareLength == 0))
@@ -929,7 +837,7 @@
 			  if(Status != STATUS_SUCCESS)
 					up(&Adapter->fw_download_sema);
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
-				bcm_kfree(psFwInfo);
+				kfree(psFwInfo);
 				break;
 			}
 		case IOCTL_BCM_BUFFER_DOWNLOAD_STOP:
@@ -946,7 +854,7 @@
 				Adapter->bBinDownloaded=TRUE;
 				Adapter->bCfgDownloaded=TRUE;
 				atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
-				atomic_set(&Adapter->RxRollOverCount, 0);
+
 				Adapter->CurrNumRecvDescs=0;
 				Adapter->downloadDDR = 0;
 
@@ -999,7 +907,6 @@
 			up(&Adapter->NVMRdmWrmLock);
 			break;
 		}
-#endif
 		case IOCTL_BE_BUCKET_SIZE:
 			Status = 0;
 			if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
@@ -1050,22 +957,16 @@
 			break;
 
 		case IOCTL_GET_PACK_INFO:
-			if(copy_to_user(argp, &Adapter->PackInfo,
-				sizeof(PacketInfo)*NO_OF_QUEUES))
-			{
-				Status = -EFAULT;
-				break;
-			}
+			if(copy_to_user(argp, &Adapter->PackInfo, sizeof(PacketInfo)*NO_OF_QUEUES))
+				return -EFAULT;
 			Status = STATUS_SUCCESS;
 			break;
 		case IOCTL_BCM_SWITCH_TRANSFER_MODE:
 		{
 			UINT uiData = 0;
 			if(copy_from_user(&uiData, argp, sizeof(UINT)))
-			{
-				Status = -EFAULT;
-				break;
-			}
+				return -EFAULT;
+
 			if(uiData)	/* Allow All Packets */
 			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_SWITCH_TRANSFER_MODE: ETH_PACKET_TUNNELING_MODE\n");
@@ -1084,22 +985,16 @@
 		{
 			/* Copy Ioctl Buffer structure */
 			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-			}
-			if(copy_to_user(IoBuffer.OutputBuffer,
-				VER_FILEVERSION_STR, (UINT)IoBuffer.OutputLength))
-			{
-				Status = -EFAULT;
-				break;
-			}
+				return -EFAULT;
+
+			if(copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, IoBuffer.OutputLength))
+				return -EFAULT;
 			Status = STATUS_SUCCESS;
 			break;
 		}
 		case IOCTL_BCM_GET_CURRENT_STATUS:
 		{
-			LINK_STATE plink_state;
+			LINK_STATE link_state;
 
 			/* Copy Ioctl Buffer structure */
 			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
@@ -1108,19 +1003,19 @@
 				Status = -EFAULT;
 				break;
 			}
-			if (IoBuffer.OutputLength != sizeof(plink_state)) {
+			if (IoBuffer.OutputLength != sizeof(link_state)) {
 				Status = -EINVAL;
 				break;
 			}
 
-			if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
-				Status = -EFAULT;
-				break;
-			}
-			plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
-			plink_state.bShutdownMode = Adapter->bShutStatus;
-			plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
-			if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
+			memset(&link_state, 0, sizeof(link_state));
+			link_state.bIdleMode = Adapter->IdleMode;
+			link_state.bShutdownMode = Adapter->bShutStatus;
+			link_state.ucLinkStatus = Adapter->LinkStatus;
+
+			if (copy_to_user(IoBuffer.OutputBuffer, &link_state,
+					 min_t(size_t, sizeof(link_state), IoBuffer.OutputLength)))
+			{
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
 				Status = -EFAULT;
 				break;
@@ -1131,17 +1026,14 @@
         case IOCTL_BCM_SET_MAC_TRACING:
         {
             UINT  tracing_flag;
+
             /* copy ioctl Buffer structure */
-			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-			{
-				Status = -EFAULT;
-				break;
-			}
-			if(copy_from_user(&tracing_flag, IoBuffer.InputBuffer,sizeof(UINT)))
-            {
-				Status = -EFAULT;
-				break;
-			}
+	    if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+		    return -EFAULT;
+
+	    if(copy_from_user(&tracing_flag,IoBuffer.InputBuffer,sizeof(UINT)))
+		    return -EFAULT;
+
             if (tracing_flag)
                 Adapter->pTarangs->MacTracingEnabled = TRUE;
             else
@@ -1151,72 +1043,53 @@
 		case IOCTL_BCM_GET_DSX_INDICATION:
 		{
 			ULONG ulSFId=0;
-			if(copy_from_user((PCHAR)&IoBuffer, argp,
-					sizeof(IOCTL_BUFFER)))
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Invalid IO buffer!!!" );
-				Status = -EFAULT;
-				break;
-			}
+			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+				return -EFAULT;
+
 			if(IoBuffer.OutputLength < sizeof(stLocalSFAddIndicationAlt))
 			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Mismatch req: %lx needed is =0x%zx!!!",
-					IoBuffer.OutputLength, sizeof(stLocalSFAddIndicationAlt));
+				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,
+						"Mismatch req: %lx needed is =0x%zx!!!",
+						IoBuffer.OutputLength, sizeof(stLocalSFAddIndicationAlt));
 				return -EINVAL;
 			}
-			if(copy_from_user(&ulSFId, IoBuffer.InputBuffer,
-					sizeof(ulSFId)))
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Invalid SFID!!! %lu", ulSFId );
-				Status = -EFAULT;
-				break;
-			}
+
+			if(copy_from_user(&ulSFId, IoBuffer.InputBuffer, sizeof(ulSFId)))
+				return -EFAULT;
+
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Get DSX Data SF ID is =%lx\n", ulSFId );
-			get_dsx_sf_data_to_application(Adapter, ulSFId,
-				IoBuffer.OutputBuffer);
+			get_dsx_sf_data_to_application(Adapter, ulSFId, IoBuffer.OutputBuffer);
 			Status=STATUS_SUCCESS;
 		}
 		break;
 		case IOCTL_BCM_GET_HOST_MIBS:
 		{
-			PCHAR temp_buff;
+			PVOID temp_buff;
 
 			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_from user for IoBuff failed\n");
-				Status = -EFAULT;
-				break;
-			}
+				return -EFAULT;
 
 			if(IoBuffer.OutputLength != sizeof(S_MIBS_HOST_STATS_MIBS))
 			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Length Check failed %lu %zd\n", IoBuffer.OutputLength,
-											sizeof(S_MIBS_HOST_STATS_MIBS));
-	          	return -EINVAL;
+				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,
+						"Length Check failed %lu %zd\n",
+						IoBuffer.OutputLength, sizeof(S_MIBS_HOST_STATS_MIBS));
+				return -EINVAL;
 			}
 
-			temp_buff = (PCHAR)kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
-
+			/* FIXME: HOST_STATS are too big for kmalloc (122048)! */
+			temp_buff = kzalloc(sizeof(S_MIBS_HOST_STATS_MIBS), GFP_KERNEL);
 			if(!temp_buff)
-			{
 				return STATUS_FAILURE;
-			}
 
-			Status = ProcessGetHostMibs(Adapter,
-					(PUCHAR)temp_buff, IoBuffer.OutputLength);
+			Status = ProcessGetHostMibs(Adapter, temp_buff);
+			GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
 
-	        Status = GetDroppedAppCntrlPktMibs((PVOID)temp_buff,
-									(PPER_TARANG_DATA)filp->private_data);
+			if (Status != STATUS_FAILURE)
+				if(copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS)))
+					Status = -EFAULT;
 
-			if(copy_to_user(IoBuffer.OutputBuffer,(PCHAR)temp_buff,
-				sizeof(S_MIBS_HOST_STATS_MIBS)))
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy to user failed\n");
-				bcm_kfree(temp_buff);
-				return -EFAULT;
-			}
-
-			bcm_kfree(temp_buff);
+			kfree(temp_buff);
 			break;
 		}
 
@@ -1226,10 +1099,6 @@
 				Adapter->usIdleModePattern = ABORT_IDLE_MODE;
 				Adapter->bWakeUpDevice = TRUE;
 				wake_up(&Adapter->process_rx_cntrlpkt);
-				#if 0
-				Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
-				InterfaceAbortIdlemode (Adapter, Adapter->usIdleModePattern);
-				#endif
 			}
 			Status = STATUS_SUCCESS;
 			break;
@@ -1248,24 +1117,20 @@
 					Status = -EACCES;
 					break;
 				}
-				/* Copy Ioctl Buffer structure */
-				if(copy_from_user((PCHAR)&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-				{
-					Status = -EFAULT;
-					break;
-				}
 
-				pvBuffer=kmalloc(IoBuffer.InputLength, GFP_KERNEL);
+				/* Copy Ioctl Buffer structure */
+				if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+					return -EFAULT;
+
+				/* FIXME: restrict length */
+				pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
 				if(!pvBuffer)
-				{
 					return -ENOMEM;
-					break;
-				}
 
 				/* Get WrmBuffer structure */
-                if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
+				if(copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
 				{
-					bcm_kfree(pvBuffer);
+					kfree(pvBuffer);
 					Status = -EFAULT;
 					break;
 				}
@@ -1275,7 +1140,7 @@
 				if(((ULONG)pBulkBuffer->Register & 0x0F000000) != 0x0F000000 ||
 					((ULONG)pBulkBuffer->Register & 0x3))
 				{
-					bcm_kfree(pvBuffer);
+					kfree(pvBuffer);
                     BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,"WRM Done On invalid Address : %x Access Denied.\n",(int)pBulkBuffer->Register);
 					Status = -EINVAL;
 					break;
@@ -1290,7 +1155,7 @@
 					(uiTempVar == EEPROM_REJECT_REG_4)) &&
 					(cmd == IOCTL_BCM_REGISTER_WRITE))
 				{
-					bcm_kfree(pvBuffer);
+					kfree(pvBuffer);
                     BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,"EEPROM Access Denied, not in VSG Mode\n");
 					Status = -EFAULT;
 					break;
@@ -1306,30 +1171,19 @@
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WRM Failed\n");
 				}
 
-				bcm_kfree(pvBuffer);
+				kfree(pvBuffer);
 				break;
 			}
 
 		case IOCTL_BCM_GET_NVM_SIZE:
-			{
-
 			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-			{
-				//IOLog("failed NVM first");
-				Status = -EFAULT;
-				break;
-			}
-			if(Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH ) {
-				if(copy_to_user(IoBuffer.OutputBuffer,
-					(unsigned char *)&Adapter->uiNVMDSDSize, (UINT)sizeof(UINT)))
-				{
-						Status = -EFAULT;
-						return Status;
-				}
-			}
+				return -EFAULT;
 
-			Status = STATUS_SUCCESS ;
+			if(Adapter->eNVMType == NVM_EEPROM || Adapter->eNVMType == NVM_FLASH ) {
+				if(copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiNVMDSDSize, sizeof(UINT)))
+					return -EFAULT;
 			}
+			Status = STATUS_SUCCESS ;
 			break;
 
 		case IOCTL_BCM_CAL_INIT :
@@ -1338,40 +1192,26 @@
 				UINT uiSectorSize = 0 ;
 				if(Adapter->eNVMType == NVM_FLASH)
 				{
-					Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-					if(Status)
-					{
-						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
+					if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
 						return -EFAULT;
-					}
-					if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
+
+					if (copy_from_user(&uiSectorSize, IoBuffer.InputBuffer, sizeof(UINT)))
 						return -EFAULT;
 
 					if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
 					{
-
-						Status = copy_to_user(IoBuffer.OutputBuffer,
-									(unsigned char *)&Adapter->uiSectorSize ,
-									(UINT)sizeof(UINT));
-						if(Status)
-						{
-								BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Coping the sector size to use space failed. status:%d",Status);
-								return -EFAULT;
-						}
+						if (copy_to_user(IoBuffer.OutputBuffer, &Adapter->uiSectorSize,
+								 sizeof(UINT)))
+							return -EFAULT;
 					}
 					else
 					{
 						if(IsFlash2x(Adapter))
 						{
-							Status = copy_to_user(IoBuffer.OutputBuffer,
-									(unsigned char *)&Adapter->uiSectorSize ,
-									(UINT)sizeof(UINT));
-							if(Status)
-							{
-									BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Coping the sector size to use space failed. status:%d",Status);
-									return -EFAULT;
-							}
-
+							if (copy_to_user(IoBuffer.OutputBuffer,
+									 &Adapter->uiSectorSize ,
+									 sizeof(UINT)))
+							    return -EFAULT;
 						}
 						else
 						{
@@ -1395,25 +1235,19 @@
 			}
 			break;
         case IOCTL_BCM_SET_DEBUG :
+#ifdef DEBUG
             {
                 USER_BCM_DBG_STATE sUserDebugState;
 
 //				BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Entered the ioctl %x \n", IOCTL_BCM_SET_DEBUG );
 
 				BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In SET_DEBUG ioctl\n");
-				Status = copy_from_user((PCHAR)&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy from user failed\n");
-					Status = -EFAULT;
-					break;
-				}
-				Status = copy_from_user(&sUserDebugState,IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0,  "Copy of IoBuffer.InputBuffer failed");
+				if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
 					return -EFAULT;
-				}
+
+				if (copy_from_user(&sUserDebugState, IoBuffer.InputBuffer, sizeof(USER_BCM_DBG_STATE)))
+					return -EFAULT;
+
 
 				BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL_BCM_SET_DEBUG: OnOff=%d Type = 0x%x ",
 				sUserDebugState.OnOff, sUserDebugState.Type);
@@ -1436,15 +1270,14 @@
                 BCM_SHOW_DEBUG_BITMAP(Adapter);
 
 			}
+#endif
 			break;
 		case IOCTL_BCM_NVM_READ:
 		case IOCTL_BCM_NVM_WRITE:
 			{
-
-				NVM_READWRITE  stNVMReadWrite = {};
+				NVM_READWRITE  stNVMReadWrite;
 				PUCHAR pReadData = NULL;
-				void __user * pBuffertobeCopied = NULL;
-				ULONG ulDSDMagicNumInUsrBuff = 0 ;
+				ULONG ulDSDMagicNumInUsrBuff = 0;
 				struct timeval tv0, tv1;
 				memset(&tv0,0,sizeof(struct timeval));
 				memset(&tv1,0,sizeof(struct timeval));
@@ -1469,21 +1302,12 @@
 			/* Copy Ioctl Buffer structure */
 
 				if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"copy_from_user failed\n");
-					Status = -EFAULT;
-					break;
-				}
-				if(IOCTL_BCM_NVM_READ == cmd)
-					pBuffertobeCopied = IoBuffer.OutputBuffer;
-				else
-					pBuffertobeCopied = IoBuffer.InputBuffer;
+					return -EFAULT;
 
-				if(copy_from_user(&stNVMReadWrite, pBuffertobeCopied,sizeof(NVM_READWRITE)))
-				{
-					Status = -EFAULT;
-					break;
-				}
+				if(copy_from_user(&stNVMReadWrite,
+						  (IOCTL_BCM_NVM_READ == cmd) ? IoBuffer.OutputBuffer : IoBuffer.InputBuffer,
+						  sizeof(NVM_READWRITE)))
+					return -EFAULT;
 
 				//
 				// Deny the access if the offset crosses the cal area limit.
@@ -1496,18 +1320,15 @@
 					break;
 				}
 
-				pReadData =(PCHAR)kmalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
-
+				pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
 				if(!pReadData)
 					return -ENOMEM;
 
-				memset(pReadData,0,stNVMReadWrite.uiNumBytes);
-
 				if(copy_from_user(pReadData, stNVMReadWrite.pBuffer,
 							stNVMReadWrite.uiNumBytes))
 				{
 					Status = -EFAULT;
-					bcm_kfree(pReadData);
+					kfree(pReadData);
 					break;
 				}
 
@@ -1522,7 +1343,7 @@
 					{
 						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
 						up(&Adapter->NVMRdmWrmLock);
-						bcm_kfree(pReadData);
+						kfree(pReadData);
 						return -EACCES;
 					}
 
@@ -1533,13 +1354,12 @@
 
 					if(Status != STATUS_SUCCESS)
 						{
-							bcm_kfree(pReadData);
+							kfree(pReadData);
 							return Status;
 						}
-					if(copy_to_user(stNVMReadWrite.pBuffer,
-							pReadData, (UINT)stNVMReadWrite.uiNumBytes))
+					if(copy_to_user(stNVMReadWrite.pBuffer,pReadData, stNVMReadWrite.uiNumBytes))
 						{
-							bcm_kfree(pReadData);
+							kfree(pReadData);
 							Status = -EFAULT;
 						}
 				}
@@ -1554,7 +1374,7 @@
 					{
 						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
 						up(&Adapter->NVMRdmWrmLock);
-						bcm_kfree(pReadData);
+						kfree(pReadData);
 						return -EACCES;
 					}
 
@@ -1582,7 +1402,7 @@
 							{
 								BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"DSD Sig is present neither in Flash nor User provided Input..");
 								up(&Adapter->NVMRdmWrmLock);
-								bcm_kfree(pReadData);
+								kfree(pReadData);
 								return Status;
 							}
 
@@ -1591,7 +1411,7 @@
 							{
 								BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"DSD Sig is present neither in Flash nor User provided Input..");
 								up(&Adapter->NVMRdmWrmLock);
-								bcm_kfree(pReadData);
+								kfree(pReadData);
 								return Status;
 							}
 						}
@@ -1608,7 +1428,7 @@
 
 					if(Status != STATUS_SUCCESS)
 					{
-						bcm_kfree(pReadData);
+						kfree(pReadData);
 						return Status;
 					}
 				}
@@ -1616,7 +1436,7 @@
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n",(tv1.tv_sec - tv0.tv_sec)*1000 +(tv1.tv_usec - tv0.tv_usec)/1000);
 
 
-				bcm_kfree(pReadData);
+				kfree(pReadData);
 				Status = STATUS_SUCCESS;
 			}
 			break;
@@ -1629,7 +1449,7 @@
 				UINT BuffSize = 0;
 				UINT ReadBytes = 0;
 				UINT ReadOffset = 0;
-				char __user *OutPutBuff = NULL;
+				void __user *OutPutBuff;
 
 				if(IsFlash2x(Adapter) != TRUE)
 				{
@@ -1638,20 +1458,12 @@
 				}
 
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_FLASH2X_SECTION_READ Called");
-				Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+				if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
 					return -EFAULT;
-				}
 
 				//Reading FLASH 2.x READ structure
-				Status = copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer,sizeof(FLASH2X_READWRITE));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of Input Buffer failed");
+				if (copy_from_user(&sFlash2xRead, IoBuffer.InputBuffer,sizeof(FLASH2X_READWRITE)))
 					return -EFAULT;
-				}
 
 
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.Section :%x" ,sFlash2xRead.Section);
@@ -1687,7 +1499,7 @@
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
 					up(&Adapter->NVMRdmWrmLock);
-					bcm_kfree(pReadBuff);
+					kfree(pReadBuff);
 					return -EACCES;
 				}
 
@@ -1715,7 +1527,6 @@
 				 	if(Status)
 				 	{
 				 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Copy to use failed with status :%d", Status);
-						Status = -EFAULT;
 						break;
 				 	}
 					NOB = NOB - ReadBytes;
@@ -1727,15 +1538,15 @@
 
 				}
 				up(&Adapter->NVMRdmWrmLock);
-				bcm_kfree(pReadBuff);
+				kfree(pReadBuff);
 
 			 }
 			 break ;
 		case IOCTL_BCM_FLASH2X_SECTION_WRITE :
 			 {
 			 	FLASH2X_READWRITE sFlash2xWrite = {0};
-				PUCHAR pWriteBuff = NULL;
-				void __user *InputAddr = NULL;
+				PUCHAR pWriteBuff;
+				void __user *InputAddr;
 				UINT NOB = 0;
 				UINT BuffSize = 0;
 				UINT WriteOffset = 0;
@@ -1752,33 +1563,17 @@
 
 
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " IOCTL_BCM_FLASH2X_SECTION_WRITE Called");
-				Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+				if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
 					return -EFAULT;
-				}
 
 				//Reading FLASH 2.x READ structure
-				Status = copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Reading of output Buffer from IOCTL buffer fails");
+				if (copy_from_user(&sFlash2xWrite, IoBuffer.InputBuffer, sizeof(FLASH2X_READWRITE)))
 					return -EFAULT;
-				}
 
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.Section :%x" ,sFlash2xWrite.Section);
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.offset :%d" ,sFlash2xWrite.offset);
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.numOfBytes :%x" ,sFlash2xWrite.numOfBytes);
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\nsFlash2xRead.bVerify :%x\n" ,sFlash2xWrite.bVerify);
-				#if 0
-				if((sFlash2xWrite.Section == ISO_IMAGE1) ||(sFlash2xWrite.Section == ISO_IMAGE2) ||
-					(sFlash2xWrite.Section == DSD0) || (sFlash2xWrite.Section == DSD1) || (sFlash2xWrite.Section == DSD2))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"ISO/DSD Image write is not allowed....  ");
-					return STATUS_FAILURE ;
-				}
-				#endif
 				if((sFlash2xWrite.Section != VSA0) && (sFlash2xWrite.Section != VSA1) &&
 					(sFlash2xWrite.Section != VSA2) )
 				{
@@ -1798,12 +1593,10 @@
 				else
 					BuffSize = NOB ;
 
-				pWriteBuff = (PCHAR)kmalloc(BuffSize, GFP_KERNEL);
+				pWriteBuff = kmalloc(BuffSize, GFP_KERNEL);
 				if(pWriteBuff == NULL)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory allocation failed for Flash 2.x Read Structure");
 					return -ENOMEM;
-				}
+
 
 				//extracting the remainder of the given offset.
 				WriteBytes = Adapter->uiSectorSize ;
@@ -1820,7 +1613,7 @@
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
 					up(&Adapter->NVMRdmWrmLock);
-					bcm_kfree(pWriteBuff);
+					kfree(pWriteBuff);
 					return -EACCES;
 				}
 
@@ -1831,7 +1624,6 @@
 				 	if(Status)
 				 	{
 				 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy to user failed with status :%d", Status);
-						Status = -EFAULT;
 						break ;
 				 	}
 					BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pWriteBuff,WriteBytes);
@@ -1859,28 +1651,22 @@
 				}	while(NOB > 0);
 				BcmFlash2xWriteSig(Adapter,sFlash2xWrite.Section);
 				up(&Adapter->NVMRdmWrmLock);
-				bcm_kfree(pWriteBuff);
+				kfree(pWriteBuff);
 			 }
 			 break ;
 		case IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP :
 			 {
 
-			 	PFLASH2X_BITMAP psFlash2xBitMap = NULL ;
+				 PFLASH2X_BITMAP psFlash2xBitMap;
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_GET_FLASH2X_SECTION_BITMAP Called");
 
-				Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
+				if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
 					return -EFAULT;
-				}
-				if(IoBuffer.OutputLength != sizeof(FLASH2X_BITMAP))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Structure size mismatch Lib :0x%lx Driver :0x%zx ",IoBuffer.OutputLength, sizeof(FLASH2X_BITMAP));
-					break;
-				}
 
-				psFlash2xBitMap = (PFLASH2X_BITMAP)kzalloc(sizeof(FLASH2X_BITMAP), GFP_KERNEL);
+				if(IoBuffer.OutputLength != sizeof(FLASH2X_BITMAP))
+					return -EINVAL;
+
+				psFlash2xBitMap = kzalloc(sizeof(FLASH2X_BITMAP), GFP_KERNEL);
 				if(psFlash2xBitMap == NULL)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory is not available");
@@ -1895,20 +1681,16 @@
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
 					up(&Adapter->NVMRdmWrmLock);
-					bcm_kfree(psFlash2xBitMap);
+					kfree(psFlash2xBitMap);
 					return -EACCES;
 				}
 
 				BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
 				up(&Adapter->NVMRdmWrmLock);
-				Status = copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copying Flash2x bitMap failed");
-					bcm_kfree(psFlash2xBitMap);
-					return -EFAULT;
-				}
-				bcm_kfree(psFlash2xBitMap);
+				if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP)))
+					Status = -EFAULT;
+
+				kfree(psFlash2xBitMap);
 			 }
 			 break ;
 		case IOCTL_BCM_SET_ACTIVE_SECTION :
@@ -1926,14 +1708,14 @@
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
-					return -EFAULT;
+					return Status;
 				}
 
 				Status = copy_from_user(&eFlash2xSectionVal,IoBuffer.InputBuffer, sizeof(INT));
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
-					return -EFAULT;
+					return Status;
 				}
 
 				down(&Adapter->NVMRdmWrmLock);
@@ -1961,29 +1743,6 @@
 				Adapter->bAllDSDWriteAllow = FALSE ;
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_BCM_IDENTIFY_ACTIVE_SECTION called");
 
-				#if 0
-				SECTION_TYPE section = 0 ;
-
-
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "IOCTL_BCM_IDENTIFY_ACTIVE_SECTION Called");
-				Status = copy_from_user((PCHAR)&IoBuffer, (PCHAR)arg, sizeof(IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy of IOCTL BUFFER failed");
-					return -EFAULT;
-				}
-				Status = copy_from_user((PCHAR)section,(PCHAR)&IoBuffer, sizeof(INT));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy of section type failed failed");
-					return -EFAULT;
-				}
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Read Section :%d", section);
-			 	if(section == DSD)
-					Adapter->ulFlashCalStart = Adapter->uiActiveDSDOffsetAtFwDld ;
-				else
-					Status = STATUS_FAILURE ;
-				#endif
 				Status = STATUS_SUCCESS ;
 			 }
 			 break ;
@@ -2004,14 +1763,14 @@
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
-					return -EFAULT;
+					return Status;
 				}
 
-				Status = copy_from_user(&sCopySectStrut,IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
+				Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
-					return -EFAULT;
+					return Status;
 				}
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Destination SEction :%x", sCopySectStrut.DstSection);
@@ -2082,7 +1841,6 @@
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
-					Status = -EFAULT;
 					break;
 				}
 				if(Adapter->eNVMType != NVM_FLASH)
@@ -2095,35 +1853,18 @@
 				{
 
 					if(IoBuffer.OutputLength < sizeof(FLASH2X_CS_INFO))
-					{
-						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0," Passed buffer size:0x%lX is insufficient for the CS structure.. \nRequired size :0x%zx ",IoBuffer.OutputLength, sizeof(FLASH2X_CS_INFO));
-						Status = -EINVAL;
-						break;
-					}
+						return -EINVAL;
 
-					Status = copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO));
-					if(Status)
-					{
-						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copying Flash2x cs info failed");
-						Status = -EFAULT;
-						break;
-					}
+					if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlash2xCSInfo, sizeof(FLASH2X_CS_INFO)))
+						return -EFAULT;
 				}
 				else
 				{
 					if(IoBuffer.OutputLength < sizeof(FLASH_CS_INFO))
-					{
-						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0," Passed buffer size:0x%lX is insufficient for the CS structure.. Required size :0x%zx ",IoBuffer.OutputLength, sizeof(FLASH_CS_INFO));
-						Status = -EINVAL;
-						break;
-					}
-					Status = copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO));
-					if(Status)
-					{
-						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "copying Flash CS info failed");
-						Status = -EFAULT;
-						break;
-					}
+						return -EINVAL;
+
+					if (copy_to_user(IoBuffer.OutputBuffer, Adapter->psFlashCSInfo, sizeof(FLASH_CS_INFO)))
+						return -EFAULT;
 
 			 	 }
 			  }
@@ -2145,13 +1886,13 @@
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
-					return -EFAULT;
+					return Status;
 				}
-				Status = copy_from_user(&eFlash2xSectionVal,IoBuffer.InputBuffer, sizeof(INT));
+				Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
-					return -EFAULT;
+					return Status;
 				}
 
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Read Section :%d", eFlash2xSectionVal);
@@ -2181,13 +1922,13 @@
 		case IOCTL_BCM_NVM_RAW_READ :
 			 {
 
-				NVM_READWRITE  stNVMRead = {};
+				 NVM_READWRITE stNVMRead;
 				INT NOB ;
 				INT BuffSize ;
 				INT ReadOffset = 0;
 				UINT ReadBytes = 0 ;
-				PUCHAR pReadBuff = NULL ;
-				char __user *OutPutBuff = NULL ;
+				PUCHAR pReadBuff;
+				void __user *OutPutBuff;
 
 				if(Adapter->eNVMType != NVM_FLASH)
 				{
@@ -2204,10 +1945,7 @@
 				}
 
 				if(copy_from_user(&stNVMRead, IoBuffer.OutputBuffer,sizeof(NVM_READWRITE)))
-				{
-					Status = -EFAULT;
-					break;
-				}
+					return -EFAULT;
 
 				NOB = stNVMRead.uiNumBytes;
 				//In Raw-Read max Buff size : 64MB
@@ -2217,11 +1955,10 @@
 				else
 					BuffSize = NOB ;
 
-				ReadOffset = stNVMRead.uiOffset ;
+				ReadOffset = stNVMRead.uiOffset;
 				OutPutBuff = stNVMRead.pBuffer;
 
-
-				pReadBuff = (PCHAR)kzalloc(BuffSize , GFP_KERNEL);
+				pReadBuff = kzalloc(BuffSize , GFP_KERNEL);
 				if(pReadBuff == NULL)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Memory allocation failed for Flash 2.x Read Structure");
@@ -2235,7 +1972,7 @@
 					(Adapter->bPreparingForLowPowerMode ==TRUE))
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"Device is in Idle/Shutdown Mode\n");
-					bcm_kfree(pReadBuff);
+					kfree(pReadBuff);
 					up(&Adapter->NVMRdmWrmLock);
 					return -EACCES;
 				}
@@ -2256,13 +1993,12 @@
 						break;
 					}
 
-					BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pReadBuff, ReadBytes);
+					BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,pReadBuff,ReadBytes);
 
 					Status = copy_to_user(OutPutBuff, pReadBuff,ReadBytes);
 				 	if(Status)
 				 	{
 				 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy to use failed with status :%d", Status);
-						Status = -EFAULT;
 						break;
 				 	}
 					NOB = NOB - ReadBytes;
@@ -2275,7 +2011,7 @@
 				}
 				Adapter->bFlashRawRead = FALSE ;
 				up(&Adapter->NVMRdmWrmLock);
-				bcm_kfree(pReadBuff);
+				kfree(pReadBuff);
 				break ;
 			 }
 
@@ -2288,7 +2024,6 @@
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"copy of Ioctl buffer is failed from user space");
-					Status = -EFAULT;
 					break;
 				}
 
@@ -2296,7 +2031,6 @@
 				if(Status)
 				{
 					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"copy of control bit mask failed from user space");
-					Status = -EFAULT;
 					break;
 				}
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
@@ -2315,71 +2049,44 @@
 				DevInfo.u32NVMType = Adapter->eNVMType;
 				DevInfo.u32InterfaceType = BCM_USB;
 
-				Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
-					Status = -EFAULT;
-					break;
-				}
+				if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+					return -EFAULT;
+
 				if(IoBuffer.OutputLength < sizeof(DevInfo))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"User Passed buffer length is less than actural buffer size");
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"user passed buffer size :0x%lX, expected size :0x%zx",IoBuffer.OutputLength, sizeof(DevInfo));
-					Status = -EINVAL;
-					break;
-				}
-				Status = copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"copying Dev info structure to user space buffer failed");
-					Status = -EFAULT;
-					break;
-				}
+					return -EINVAL;
+
+				if (copy_to_user(IoBuffer.OutputBuffer, &DevInfo, sizeof(DevInfo)))
+					return -EFAULT;
 			}
 			break ;
 
 			case IOCTL_BCM_TIME_SINCE_NET_ENTRY:
 			{
 				ST_TIME_ELAPSED stTimeElapsedSinceNetEntry = {0};
-				struct timeval tv = {0} ;
 
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_BCM_TIME_SINCE_NET_ENTRY called");
 
-				Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
-					Status = -EFAULT;
-					break;
-				}
+				if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
+					return -EFAULT;
+
 				if(IoBuffer.OutputLength < sizeof(ST_TIME_ELAPSED))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"User Passed buffer length:0x%lx is less than expected buff size :0x%zX",IoBuffer.OutputLength,sizeof(ST_TIME_ELAPSED));
-					Status = -EINVAL;
-					break;
-				}
+					return -EINVAL;
 
-				//stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = Adapter->liTimeSinceLastNetEntry;
-				do_gettimeofday(&tv);
-				stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = tv.tv_sec - Adapter->liTimeSinceLastNetEntry;
+				stTimeElapsedSinceNetEntry.ul64TimeElapsedSinceNetEntry = get_seconds() - Adapter->liTimeSinceLastNetEntry;
 
-				Status = copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(ST_TIME_ELAPSED));
-				if(Status)
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"copying ST_TIME_ELAPSED structure to user space buffer failed");
-					Status = -EFAULT;
-					break;
-				}
+				if (copy_to_user(IoBuffer.OutputBuffer, &stTimeElapsedSinceNetEntry, sizeof(ST_TIME_ELAPSED)))
+					return -EFAULT;
 
 			}
 			break;
 
-		default:
-            BCM_DEBUG_PRINT (Adapter, DBG_TYPE_PRINTK, 0, 0, "wrong input %x",cmd);
-			BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "In default ioctl %d\n", cmd);
-			 Status = STATUS_FAILURE;
+		case IOCTL_CLOSE_NOTIFICATION:
+			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,"IOCTL_CLOSE_NOTIFICATION");
+			break;
 
+		default:
+			pr_info(DRV_NAME ": unknown ioctl cmd=%#x\n", cmd);
+			Status = STATUS_FAILURE;
 			break;
 	}
 	return Status;
@@ -2395,59 +2102,37 @@
 	.llseek = no_llseek,
 };
 
+extern struct class *bcm_class;
 
 int register_control_device_interface(PMINI_ADAPTER Adapter)
 {
+
 	if(Adapter->major>0)
-    	return Adapter->major;
-    Adapter->major = register_chrdev(0, "tarang", &bcm_fops);
-    if(Adapter->major < 0)
-    {
-    	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "register_chrdev:Failed to registering WiMax control char device!");
-        return Adapter->major;
-    }
+		return Adapter->major;
 
-	bcm_class = NULL;
-	bcm_class = class_create (THIS_MODULE, "tarang");
-	if(IS_ERR (bcm_class))
-	{
-    	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unable to create class\n");
-        unregister_chrdev(Adapter->major, "tarang");
-		Adapter->major = 0;
-		return -ENODEV;
+	Adapter->major = register_chrdev(0, DEV_NAME, &bcm_fops);
+	if(Adapter->major < 0) {
+		pr_err(DRV_NAME ": could not created character device\n");
+		return Adapter->major;
 	}
+
 	Adapter->pstCreatedClassDevice = device_create (bcm_class, NULL,
-								MKDEV(Adapter->major, 0),
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
-								NULL	,
-#endif
-								"tarang");
+							MKDEV(Adapter->major, 0), Adapter,
+							DEV_NAME);
 
-	if(IS_ERR(Adapter->pstCreatedClassDevice))
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "class device did not get created : %ld", PTR_ERR(Adapter->pstCreatedClassDevice) );
+	if(IS_ERR(Adapter->pstCreatedClassDevice)) {
+		pr_err(DRV_NAME ": class device create failed\n");
+		unregister_chrdev(Adapter->major, DEV_NAME);
+		return PTR_ERR(Adapter->pstCreatedClassDevice);
 	}
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Got Major No: %d", Adapter->major);
-    return 0;
+			
+	return 0;
 }
 
 void unregister_control_device_interface(PMINI_ADAPTER Adapter)
 {
-	if(Adapter->major > 0)
-	{
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "destroying class device");
+	if(Adapter->major > 0) {
 		device_destroy (bcm_class, MKDEV(Adapter->major, 0));
+		unregister_chrdev(Adapter->major, DEV_NAME);
 	}
-    if(!IS_ERR(bcm_class))
-	{
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "destroying created class ");
-        class_destroy (bcm_class);
-		bcm_class = NULL;
-	}
-	if(Adapter->major > 0)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"unregistering character interface");
-        unregister_chrdev(Adapter->major, "tarang");
-	}
-
 }
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index bc29698..a6ce239 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -1,264 +1,238 @@
 #include "headers.h"
 
-static INT bcm_notify_event(struct notifier_block *nb, ULONG event, PVOID dev)
-{
-	struct net_device *ndev = (struct net_device*)dev;
-    PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
-	//PMINI_ADAPTER 	Adapter = (PMINI_ADAPTER)ndev->priv;
-	if(strncmp(ndev->name,gblpnetdev->name,5)==0)
-	{
-		switch(event)
-		{
-			case NETDEV_CHANGEADDR:
-			case NETDEV_GOING_DOWN:
-				/*ignore this */
-					break;
-			case NETDEV_DOWN:
-				break;
-
-			case NETDEV_UP:
-				break;
-
-			case NETDEV_REGISTER:
-				 /* Increment the Reference Count for "veth0" */
-				 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Register RefCount: %x\n",
-									netdev_refcnt_read(ndev));
-				 dev_hold(ndev);
-				 break;
-
-			case NETDEV_UNREGISTER:
-				 /* Decrement the Reference Count for "veth0" */
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unregister RefCnt: %x\n",
-									netdev_refcnt_read(ndev));
-				dev_put(ndev);
-				break;
-		};
-	}
-	return NOTIFY_DONE;
-}
-
-/* Notifier block to receive netdevice events */
-static struct notifier_block bcm_notifier_block =
-{
-	.notifier_call = bcm_notify_event,
-};
-
 struct net_device *gblpnetdev;
-/***************************************************************************************/
-/* proto-type of lower function */
-#ifdef BCM_SHM_INTERFACE
-const char *bcmVirtDeviceName="bcmeth";
-#endif
 
 static INT bcm_open(struct net_device *dev)
 {
-    PMINI_ADAPTER Adapter = NULL ; //(PMINI_ADAPTER)dev->priv;
-	Adapter = GET_BCM_ADAPTER(dev);
-    BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "======>");
-    if(Adapter->fw_download_done==FALSE)
-        return -EINVAL;
-	Adapter->if_up=1;
-	if(Adapter->LinkUpStatus == 1){
-		if(netif_queue_stopped(Adapter->dev)){
-			netif_carrier_on(Adapter->dev);
-			netif_start_queue(Adapter->dev);
-		}
+	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+	if (Adapter->fw_download_done == FALSE) {
+		pr_notice(PFX "%s: link up failed (download in progress)\n",
+ 			  dev->name);
+		return -EBUSY;
 	}
 
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "<======");
-    return 0;
-}
+	if (netif_msg_ifup(Adapter))
+		pr_info(PFX "%s: enabling interface\n", dev->name);
 
-static INT bcm_close(struct net_device *dev)
-{
-   PMINI_ADAPTER Adapter = NULL ;//gpadapter ;
-   Adapter = GET_BCM_ADAPTER(dev);
-    BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "=====>");
-	Adapter->if_up=0;
-	if(!netif_queue_stopped(dev)) {
-		netif_carrier_off(dev);
-	    netif_stop_queue(dev);
+	if (Adapter->LinkUpStatus) {
+		if (netif_msg_link(Adapter))
+			pr_info(PFX "%s: link up\n", dev->name);
+
+		netif_carrier_on(Adapter->dev);
+		netif_start_queue(Adapter->dev);
 	}
-    BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"<=====");
-    return 0;
-}
-
-static struct net_device_stats *bcm_get_stats(struct net_device *dev)
-{
-    PLINUX_DEP_DATA pLinuxData=NULL;
-	PMINI_ADAPTER Adapter = NULL ;// gpadapter ;
-	Adapter = GET_BCM_ADAPTER(dev);
-    pLinuxData = (PLINUX_DEP_DATA)(Adapter->pvOsDepData);
-
-    //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Dev = %p, pLinuxData = %p", dev, pLinuxData);
-	pLinuxData->netstats.rx_packets=atomic_read(&Adapter->RxRollOverCount)*64*1024+Adapter->PrevNumRecvDescs;
-	pLinuxData->netstats.rx_bytes=atomic_read(&Adapter->GoodRxByteCount)+atomic_read(&Adapter->BadRxByteCount);
-	pLinuxData->netstats.rx_dropped=atomic_read(&Adapter->RxPacketDroppedCount);
-	pLinuxData->netstats.rx_errors=atomic_read(&Adapter->RxPacketDroppedCount);
-	pLinuxData->netstats.rx_length_errors=0;
-	pLinuxData->netstats.rx_frame_errors=0;
-	pLinuxData->netstats.rx_crc_errors=0;
-	pLinuxData->netstats.tx_bytes=atomic_read(&Adapter->GoodTxByteCount);
-	pLinuxData->netstats.tx_packets=atomic_read(&Adapter->TxTotalPacketCount);
-	pLinuxData->netstats.tx_dropped=atomic_read(&Adapter->TxDroppedPacketCount);
-
-    return &(pLinuxData->netstats);
-}
-/**
-@ingroup init_functions
-Register other driver entry points with the kernel
-*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
-static struct net_device_ops bcmNetDevOps = {
-    .ndo_open		= bcm_open,
-    .ndo_stop 		= bcm_close,
-    .ndo_get_stats 	= bcm_get_stats,
-    .ndo_start_xmit	= bcm_transmit,
-    .ndo_change_mtu	= eth_change_mtu,
-    .ndo_set_mac_address = eth_mac_addr,
-    .ndo_validate_addr	= eth_validate_addr,
-};
-#endif
-
-int register_networkdev(PMINI_ADAPTER Adapter)
-{
-	int result=0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
-	void **temp = NULL; /* actually we're *allocating* the device in alloc_etherdev */
-#endif
-	Adapter->dev = alloc_etherdev(sizeof(PMINI_ADAPTER));
-	if(!Adapter->dev)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "ERR: No Dev");
-		return -ENOMEM;
-	}
-	gblpnetdev							= Adapter->dev;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
-	Adapter->dev->priv      			= Adapter;
-#else
-	temp = netdev_priv(Adapter->dev);
-	*temp = (void *)Adapter;
-#endif
-	//BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "init adapterptr: %x %x\n", (UINT)Adapter, temp);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
-        Adapter->dev->netdev_ops                = &bcmNetDevOps;
-#else
-	Adapter->dev->open      			= bcm_open;
-	Adapter->dev->stop               	= bcm_close;
-	Adapter->dev->get_stats          	= bcm_get_stats;
-	Adapter->dev->hard_start_xmit    	= bcm_transmit;
-	Adapter->dev->hard_header_len    	= ETH_HLEN + LEADER_SIZE;
-#endif
-
-#ifndef BCM_SHM_INTERFACE
-	Adapter->dev->mtu					= MTU_SIZE; /* 1400 Bytes */
-	/* Read the MAC Address from EEPROM */
-	ReadMacAddressFromNVM(Adapter);
-
-
-	/* Register the notifier block for getting netdevice events */
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Registering netdevice notifier\n");
-	result = register_netdevice_notifier(&bcm_notifier_block);
-	if(result)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "BCM Notifier Block did not get registered");
-		Adapter->bNetdeviceNotifierRegistered = FALSE;
-		return result;
-	}
-	else
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "BCM Notifier got Registered");
-		Adapter->bNetdeviceNotifierRegistered = TRUE;
-	}
-
-#else
-
-	Adapter->dev->mtu			= CPE_MTU_SIZE;
-
-#if 0
-	//for CPE - harcode the virtual mac address
-	Adapter->dev->dev_addr[0] =  MII_WIMAX_MACADDRESS[0];
-	Adapter->dev->dev_addr[1] =  MII_WIMAX_MACADDRESS[1];
-	Adapter->dev->dev_addr[2] =  MII_WIMAX_MACADDRESS[2];
-	Adapter->dev->dev_addr[3] =  MII_WIMAX_MACADDRESS[3];
-	Adapter->dev->dev_addr[4] =  MII_WIMAX_MACADDRESS[4];
-	Adapter->dev->dev_addr[5] =  MII_WIMAX_MACADDRESS[5];
-#else
-	ReadMacAddressFromNVM(Adapter);
-#endif
-	strcpy(Adapter->dev->name, bcmVirtDeviceName); //Copy the device name
-
-#endif
-
-	result = register_netdev(Adapter->dev);
-	if (!result)
-	{
-		Adapter->bNetworkInterfaceRegistered = TRUE ;
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Beceem Network device name is %s!", Adapter->dev->name);
-	}
-	else
-	{
-    	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Network device can not be registered!");
-		Adapter->bNetworkInterfaceRegistered = FALSE ;
-		return result;
-	}
-
-#if 0
- Adapter->stDebugState.debug_level = DBG_LVL_CURR;
- Adapter->stDebugState.type =(UINT)0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_OTHERS] = 0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_RX] = 0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_TX] = 0xffffffff;
- Adapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xffffffff;
-
- printk("-------ps_adapter->stDebugState.type=%x\n",Adapter->stDebugState.type);
- printk("-------ps_adapter->stDebugState.subtype[DBG_TYPE_OTHERS]=%x\n",Adapter->stDebugState.subtype[DBG_TYPE_OTHERS]);
- printk("-------ps_adapter->stDebugState.subtype[DBG_TYPE_RX]=%x\n",Adapter->stDebugState.subtype[DBG_TYPE_RX]);
- printk("-------ps_adapter->stDebugState.subtype[DBG_TYPE_TX]=%x\n",Adapter->stDebugState.subtype[DBG_TYPE_TX]);
-#endif
 
 	return 0;
 }
 
-void bcm_unregister_networkdev(PMINI_ADAPTER Adapter)
+static INT bcm_close(struct net_device *dev)
 {
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unregistering the Net Dev...\n");
-	if(Adapter->dev && !IS_ERR(Adapter->dev) && Adapter->bNetworkInterfaceRegistered)
-		unregister_netdev(Adapter->dev);
-		/* Unregister the notifier block */
-	if(Adapter->bNetdeviceNotifierRegistered == TRUE)
-	{
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Unregistering netdevice notifier\n");
-			unregister_netdevice_notifier(&bcm_notifier_block);
-  }
+	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+	if (netif_msg_ifdown(Adapter))
+		pr_info(PFX "%s: disabling interface\n", dev->name);
+
+	netif_carrier_off(dev);
+	netif_stop_queue(dev);
+
+	return 0;
 }
 
-static int bcm_init(void)
+static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
+	return ClassifyPacket(netdev_priv(dev), skb);
+}
+
+/*******************************************************************
+* Function    -	bcm_transmit()
+*
+* Description - This is the main transmit function for our virtual
+*		interface(eth0). It handles the ARP packets. It
+*		clones this packet and then Queue it to a suitable
+* 		Queue. Then calls the transmit_packet().
+*
+* Parameter   -	 skb - Pointer to the socket buffer structure
+*		 dev - Pointer to the virtual net device structure
+*
+*********************************************************************/
+
+static netdev_tx_t bcm_transmit(struct sk_buff *skb, struct net_device *dev)
+{
+	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+	u16 qindex = skb_get_queue_mapping(skb);
+
+
+	if (Adapter->device_removed || !Adapter->LinkUpStatus)
+		goto drop;
+
+	if (Adapter->TransferMode != IP_PACKET_ONLY_MODE)
+		goto drop;
+
+	if (INVALID_QUEUE_INDEX == qindex)
+		goto drop;
+
+	if (Adapter->PackInfo[qindex].uiCurrentPacketsOnHost >=
+	    SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
+		return NETDEV_TX_BUSY;
+
+	/* Now Enqueue the packet */
+	if (netif_msg_tx_queued(Adapter))
+		pr_info(PFX "%s: enqueueing packet to queue %d\n",
+			dev->name, qindex);
+
+	spin_lock(&Adapter->PackInfo[qindex].SFQueueLock);
+	Adapter->PackInfo[qindex].uiCurrentBytesOnHost += skb->len;
+	Adapter->PackInfo[qindex].uiCurrentPacketsOnHost++;
+
+	*((B_UINT32 *) skb->cb + SKB_CB_LATENCY_OFFSET) = jiffies;
+	ENQUEUEPACKET(Adapter->PackInfo[qindex].FirstTxQueue,
+		      Adapter->PackInfo[qindex].LastTxQueue, skb);
+	atomic_inc(&Adapter->TotalPacketCount);
+	spin_unlock(&Adapter->PackInfo[qindex].SFQueueLock);
+
+	/* FIXME - this is racy and incorrect, replace with work queue */
+	if (!atomic_read(&Adapter->TxPktAvail)) {
+		atomic_set(&Adapter->TxPktAvail, 1);
+		wake_up(&Adapter->tx_packet_wait_queue);
+	}
+	return NETDEV_TX_OK;
+
+ drop:
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+
+
+/**
+@ingroup init_functions
+Register other driver entry points with the kernel
+*/
+static const struct net_device_ops bcmNetDevOps = {
+    .ndo_open		= bcm_open,
+    .ndo_stop 		= bcm_close,
+    .ndo_start_xmit	= bcm_transmit,
+    .ndo_change_mtu	= eth_change_mtu,
+    .ndo_set_mac_address = eth_mac_addr,
+    .ndo_validate_addr	= eth_validate_addr,
+    .ndo_select_queue	= bcm_select_queue,
+};
+
+static struct device_type wimax_type = {
+	.name	= "wimax",
+};
+
+static int bcm_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	cmd->supported		= 0;
+	cmd->advertising	= 0;
+	cmd->speed		= SPEED_10000;
+	cmd->duplex		= DUPLEX_FULL;
+	cmd->port		= PORT_TP;
+	cmd->phy_address	= 0;
+	cmd->transceiver	= XCVR_INTERNAL;
+	cmd->autoneg		= AUTONEG_DISABLE;
+	cmd->maxtxpkt		= 0;
+	cmd->maxrxpkt		= 0;
+	return 0;
+}
+
+static void bcm_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+	PS_INTERFACE_ADAPTER psIntfAdapter = Adapter->pvInterfaceAdapter;
+	struct usb_device *udev = interface_to_usbdev(psIntfAdapter->interface);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u",
+		 Adapter->uiFlashLayoutMajorVersion,
+		 Adapter->uiFlashLayoutMinorVersion);
+
+	usb_make_path(udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static u32 bcm_get_link(struct net_device *dev)
+{
+	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+	return Adapter->LinkUpStatus;
+}
+
+static u32 bcm_get_msglevel (struct net_device *dev)
+{
+	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+	return Adapter->msg_enable;
+}
+
+static void bcm_set_msglevel (struct net_device *dev, u32 level)
+{
+	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(dev);
+
+	Adapter->msg_enable = level;
+}
+
+static const struct ethtool_ops bcm_ethtool_ops = {
+	.get_settings	= bcm_get_settings,
+	.get_drvinfo	= bcm_get_drvinfo,
+	.get_link 	= bcm_get_link,
+	.get_msglevel	= bcm_get_msglevel,
+	.set_msglevel	= bcm_set_msglevel,
+};
+
+int register_networkdev(PMINI_ADAPTER Adapter)
+{
+	struct net_device *net = Adapter->dev;
+	PS_INTERFACE_ADAPTER IntfAdapter = Adapter->pvInterfaceAdapter;
+	struct usb_interface *udev = IntfAdapter->interface;
+	struct usb_device *xdev = IntfAdapter->udev;
+
 	int result;
-   	result = InterfaceInitialize();
-	if(result)
-	{
- 		printk("Initialisation failed for usbbcm");
+
+	net->netdev_ops = &bcmNetDevOps;
+	net->ethtool_ops = &bcm_ethtool_ops;
+	net->mtu = MTU_SIZE;	/* 1400 Bytes */
+	net->tx_queue_len = TX_QLEN;
+	net->flags |= IFF_NOARP;
+
+	netif_carrier_off(net);
+
+	SET_NETDEV_DEVTYPE(net, &wimax_type);
+
+	/* Read the MAC Address from EEPROM */
+	result = ReadMacAddressFromNVM(Adapter);
+	if (result != STATUS_SUCCESS) {
+		dev_err(&udev->dev,
+			PFX "Error in Reading the mac Address: %d", result);
+ 		return -EIO;
 	}
-	else
-	{
-		printk("Initialised usbbcm");
-	}
-	return result;
+
+	result = register_netdev(net);
+	if (result)
+		return result;
+
+	gblpnetdev = Adapter->dev;
+
+	if (netif_msg_probe(Adapter))
+		dev_info(&udev->dev, PFX "%s: register usb-%s-%s %pM\n",
+			 net->name, xdev->bus->bus_name, xdev->devpath,
+			 net->dev_addr);
+
+	return 0;
 }
 
-
-static void bcm_exit(void)
+void unregister_networkdev(PMINI_ADAPTER Adapter)
 {
-    printk("%s %s Calling InterfaceExit\n",__FILE__, __FUNCTION__);
-	InterfaceExit();
-    printk("%s %s InterfaceExit returned\n",__FILE__, __FUNCTION__);
+	struct net_device *net = Adapter->dev;
+	PS_INTERFACE_ADAPTER IntfAdapter = Adapter->pvInterfaceAdapter;
+	struct usb_interface *udev = IntfAdapter->interface;
+	struct usb_device *xdev = IntfAdapter->udev;
+
+	if (netif_msg_probe(Adapter))
+		dev_info(&udev->dev, PFX "%s: unregister usb-%s%s\n",
+			 net->name, xdev->bus->bus_name, xdev->devpath);
+ 
+	unregister_netdev(Adapter->dev);
 }
-
-module_init(bcm_init);
-module_exit(bcm_exit);
-MODULE_LICENSE ("GPL");
-
-
diff --git a/drivers/staging/bcm/CmHost.c b/drivers/staging/bcm/CmHost.c
index 6f388a3..5ac4582 100644
--- a/drivers/staging/bcm/CmHost.c
+++ b/drivers/staging/bcm/CmHost.c
@@ -15,6 +15,7 @@
 	eDeleteClassifier
 }E_CLASSIFIER_ACTION;
 
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid);
 
 /************************************************************
 * Function	  -	SearchSfid
@@ -28,7 +29,7 @@
 * Returns	  - Queue index for this SFID(If matched)
 				Else Invalid Queue Index(If Not matched)
 ************************************************************/
-__inline INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
+INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid)
 {
 	INT 	iIndex=0;
 	for(iIndex=(NO_OF_QUEUES-1); iIndex>=0; iIndex--)
@@ -47,26 +48,16 @@
 * Returns	  - Queue index for the free SFID
 *				Else returns Invalid Index.
 ****************************************************************/
-__inline INT SearchFreeSfid(PMINI_ADAPTER Adapter)
+static INT SearchFreeSfid(PMINI_ADAPTER Adapter)
 {
 	UINT 	uiIndex=0;
+
 	for(uiIndex=0; uiIndex < (NO_OF_QUEUES-1); uiIndex++)
 		if(Adapter->PackInfo[uiIndex].ulSFID==0)
 			return uiIndex;
 	return NO_OF_QUEUES+1;
 }
 
-__inline int SearchVcid(PMINI_ADAPTER Adapter,unsigned short usVcid)
-{
-	 int iIndex=0;
-	for(iIndex=(NO_OF_QUEUES-1);iIndex>=0;iIndex--)
-		if(Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
-			return iIndex;
-	return NO_OF_QUEUES+1;
-
-}
-
-
 /*
 Function:				SearchClsid
 Description:			This routinue would search Classifier  having specified ClassifierID as input parameter
@@ -76,7 +67,7 @@
 Return:					int :Classifier table index of matching entry
 */
 
-__inline int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16  uiClassifierID)
+static int SearchClsid(PMINI_ADAPTER Adapter,ULONG ulSFID,B_UINT16  uiClassifierID)
 {
 	unsigned int uiClassifierIndex = 0;
 	for(uiClassifierIndex=0;uiClassifierIndex<MAX_CLASSIFIERS;uiClassifierIndex++)
@@ -94,7 +85,7 @@
 This routinue would search Free available Classifier entry in classifier table.
 @return free Classifier Entry index in classifier table for specified SF
 */
-static __inline int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
+static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/
 						)
 {
 	unsigned int uiClassifierIndex = 0;
@@ -106,7 +97,7 @@
 	return MAX_CLASSIFIERS+1;
 }
 
-VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
+static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
 {
 	//deleting all the packet held in the SF
 	flush_queue(Adapter,uiSearchRuleIndex);
@@ -985,7 +976,7 @@
 
 	if(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication)
 	{
-		bcm_kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
+		kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
 		Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = NULL;
 	}
 	Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication;
@@ -1061,12 +1052,6 @@
 		pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate	: 0x%X",
 		pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
-#if 0
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u32MinimumTolerableTrafficRate	: 0x%X",
-		pstAddIndication->sfAuthorizedSet.u32MinimumTolerableTrafficRate);
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u32RequesttransmissionPolicy	: 0x%X",
-		pstAddIndication->sfAuthorizedSet.u32RequesttransmissionPolicy);
-#endif
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength	: 0x%X",
 		pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam		: 0x%X",
@@ -1114,13 +1099,6 @@
 		pstAddIndication->sfAuthorizedSet.u8PagingPreference);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u16UnsolicitedPollingInterval		: 0x%X",
 		pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
-#if 0
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "MBSZoneIdentifierassignmentLength	: 0x%X",
-		pstAddIndication->sfAuthorizedSet.MBSZoneIdentifierassignmentLength);
-	for(uiLoopIndex=0; uiLoopIndex < MAX_STRING_LEN; uiLoopIndex++)
-		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "MBSZoneIdentifierassignment : 0x%X",
-			pstAddIndication->sfAuthorizedSet.MBSZoneIdentifierassignment[uiLoopIndex]);
-#endif
 
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "sfAuthorizedSet.u8HARQChannelMapping %x  %x %x ",
 				*(unsigned int*)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
@@ -1158,11 +1136,6 @@
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-#if 0
-
-		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "u8ProtocolLength				:0x%X ",
-			psfCSType->cCPacketClassificationRule.u8ProtocolLength);
-#endif
 
 		for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
 			BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8Protocol : 0x%02X ",
@@ -1278,14 +1251,6 @@
 			pstAddIndication->sfAdmittedSet.u8QosParamSet);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8TrafficPriority			: 0x%02X",
 			pstAddIndication->sfAdmittedSet.u8TrafficPriority);
-#if 0
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "u32MaxSustainedTrafficRate	: 0x%02X",
-			ntohl(pstAddIndication->sfAdmittedSet.u32MaxSustainedTrafficRate));
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "u32MinimumTolerableTrafficRate	: 0x%X",
-		pstAddIndication->sfAdmittedSet.u32MinimumTolerableTrafficRate);
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "u32RequesttransmissionPolicy	: 0x%X",
-		pstAddIndication->sfAdmittedSet.u32RequesttransmissionPolicy);
-#endif
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u32MaxTrafficBurst			: 0x%X",
 			pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u32MinReservedTrafficRate	: 0x%X",
@@ -1339,13 +1304,6 @@
 		pstAddIndication->sfAdmittedSet.u16TimeBase);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8PagingPreference		: 0x%X",
 		pstAddIndication->sfAdmittedSet.u8PagingPreference);
-#if 0
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "MBSZoneIdentifierassignmentLength	: 0x%X",
-		pstAddIndication->sfAdmittedSet.MBSZoneIdentifierassignmentLength);
-	for(uiLoopIndex=0; uiLoopIndex < MAX_STRING_LEN; uiLoopIndex++)
-		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "MBSZoneIdentifierassignment : 0x%X",
-	pstAddIndication->sfAdmittedSet.MBSZoneIdentifierassignment[uiLoopIndex]);
-#endif
 
 
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8TrafficIndicationPreference	: 0x%02X",
@@ -1378,11 +1336,6 @@
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-#if 0
-
-		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8ProtocolLength			:0x%02X ",
-			psfCSType->cCPacketClassificationRule.u8ProtocolLength);
-#endif
 		for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
 			BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8Protocol: 0x%02X ",
 			psfCSType->cCPacketClassificationRule.u8Protocol);
@@ -1497,20 +1450,10 @@
 		pstAddIndication->sfActiveSet.u8QosParamSet);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8TrafficPriority			: 0x%02X",
 		pstAddIndication->sfActiveSet.u8TrafficPriority);
-#if 0
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "u32MaxSustainedTrafficRate	: 0x%02X",
-		ntohl(pstAddIndication->sfActiveSet.u32MaxSustainedTrafficRate));
-#endif
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u32MaxTrafficBurst			: 0x%X",
 		pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u32MinReservedTrafficRate	: 0x%X",
 		pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
-#if 0
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "u32MinimumTolerableTrafficRate	: 0x%X",
-		pstAddIndication->sfActiveSet.u32MinimumTolerableTrafficRate);
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "u32RequesttransmissionPolicy	: 0x%X",
-		pstAddIndication->sfActiveSet.u32RequesttransmissionPolicy);
-#endif
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8VendorSpecificQoSParamLength	: 0x%02X",
 		pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  "u8VendorSpecificQoSParam		: 0x%02X",
@@ -1558,13 +1501,6 @@
 		pstAddIndication->sfActiveSet.u16TimeBase);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  " u8PagingPreference		: 0x%X",
 		pstAddIndication->sfActiveSet.u8PagingPreference);
-#if 0
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  " MBSZoneIdentifierassignmentLength	: 0x%X",
-		pstAddIndication->sfActiveSet.MBSZoneIdentifierassignmentLength);
-	for(uiLoopIndex=0; uiLoopIndex < MAX_STRING_LEN; uiLoopIndex++)
-		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  " MBSZoneIdentifierassignment : 0x%X",
-		pstAddIndication->sfActiveSet.MBSZoneIdentifierassignment[uiLoopIndex]);
-#endif
 
 
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  " u8TrafficIndicationPreference	: 0x%X",
@@ -1597,11 +1533,6 @@
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
 			psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
-#if 0
-
-		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  " u8ProtocolLength				:0x%X ",
-			psfCSType->cCPacketClassificationRule.u8ProtocolLength);
-#endif
 		for(uiLoopIndex=0; uiLoopIndex < 1; uiLoopIndex++)
 			BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL,  " u8Protocol	: 0x%X ",
 			psfCSType->cCPacketClassificationRule.u8Protocol);
@@ -1706,12 +1637,8 @@
 		return 0;
 	}
 	ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  " RestoreSFParam: Total Words of DSX Message To Read: 0x%zx  From Target At : 0x%lx ",
-				nBytesToRead/sizeof(ULONG),ulAddrSFParamSet);
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "sizeof(stServiceFlowParamSI) = %zx", sizeof(stServiceFlowParamSI));
 
 	//Read out the SF Param Set At the indicated Location
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "nBytesToRead = %x", nBytesToRead);
 	if(rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
 		return STATUS_FAILURE;
 
@@ -1719,23 +1646,20 @@
 }
 
 
-static __inline ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG  ulAddrSFParamSet)
+static ULONG StoreSFParam(PMINI_ADAPTER Adapter,PUCHAR pucSrcBuffer,ULONG  ulAddrSFParamSet)
 {
     UINT	nBytesToWrite = sizeof(stServiceFlowParamSI);
-	UINT 	uiRetVal =0;
+	int ret = 0;
 
 	if(ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
 	{
 		return 0;
 	}
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  " StoreSFParam: Total Words of DSX Message To Write: 0x%zX  To Target At : 0x%lX ",(nBytesToWrite/sizeof(ULONG)),ulAddrSFParamSet);
 
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "WRM  with %x bytes",nBytesToWrite);
-
-	uiRetVal = wrm(Adapter,ulAddrSFParamSet,(PUCHAR)pucSrcBuffer, nBytesToWrite);
-	if(uiRetVal < 0) {
+	ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
+	if (ret < 0) {
 		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  "%s:%d WRM failed",__FUNCTION__, __LINE__);
-		return uiRetVal;
+		return ret;
 	}
 	return 1;
 }
@@ -1778,7 +1702,7 @@
 	}
 	// For DSA_REQ, only upto "psfAuthorizedSet" parameter should be accessed by driver!
 
-	pstAddIndication=(stLocalSFAddIndication *)kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
+	pstAddIndication=kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
 	if(NULL==pstAddIndication)
 		return 0;
 
@@ -1844,7 +1768,7 @@
 
 	(*puBufferLength) = sizeof(stLocalSFAddIndication);
 	*(stLocalSFAddIndication *)pvBuffer = *pstAddIndication;
-	bcm_kfree(pstAddIndication);
+	kfree(pstAddIndication);
 	return 1;
 }
 
@@ -1931,7 +1855,7 @@
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
 	return pstAddIndicationDest;
 failed_restore_sf_param:
-	bcm_kfree(pstAddIndicationDest);
+	kfree(pstAddIndicationDest);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====" );
 	return NULL;
 }
@@ -1988,7 +1912,7 @@
 	return 1;
 }
 
-ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid)
+static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid)
 {
 	ULONG  ulTargetDSXBufferAddress;
 	ULONG  ulTargetDsxBufferIndexToUse,ulMaxTry;
@@ -2049,7 +1973,7 @@
 {
 	if(Adapter->caDsxReqResp)
 	{
-		bcm_kfree(Adapter->caDsxReqResp);
+		kfree(Adapter->caDsxReqResp);
 	}
 	return 0;
 
@@ -2102,7 +2026,7 @@
 
 			BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,  " VCID = %x", ntohs(pstAddIndication->u16VCID));
 			CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
-			bcm_kfree(pstAddIndication);
+			kfree(pstAddIndication);
 		}
 		break;
 		case DSA_RSP:
@@ -2118,7 +2042,7 @@
 		case DSA_ACK:
 		{
 			UINT uiSearchRuleIndex=0;
-			struct timeval tv = {0};
+
 			BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
 				ntohs(pstAddIndication->u16VCID));
             uiSearchRuleIndex=SearchFreeSfid(Adapter);
@@ -2169,7 +2093,7 @@
 					Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
                     Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
 					Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
-					bcm_kfree(pstAddIndication);
+					kfree(pstAddIndication);
 				}
 
 				else if(psfLocalSet->bValid && (pstAddIndication->u8CC == 0))
@@ -2200,14 +2124,13 @@
 							if(!Adapter->LinkUpStatus)
 							{
 								netif_carrier_on(Adapter->dev);
-    							netif_start_queue(Adapter->dev);
+								netif_start_queue(Adapter->dev);
 								Adapter->LinkUpStatus = 1;
-								do_gettimeofday(&tv);
-
+								if (netif_msg_link(Adapter))
+									pr_info(PFX "%s: link up\n", Adapter->dev->name);
 								atomic_set(&Adapter->TxPktAvail, 1);
 								wake_up(&Adapter->tx_packet_wait_queue);
-								Adapter->liTimeSinceLastNetEntry = tv.tv_sec;
-								BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============Tx Service Flow Created!");
+								Adapter->liTimeSinceLastNetEntry = get_seconds();
 							}
 						}
 					}
@@ -2218,13 +2141,13 @@
 					Adapter->PackInfo[uiSearchRuleIndex].bActive=FALSE;
                     Adapter->PackInfo[uiSearchRuleIndex].bValid=FALSE;
 					Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value=0;
-					bcm_kfree(pstAddIndication);
+					kfree(pstAddIndication);
 				}
 			}
 			else
 			{
 				BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
-				bcm_kfree(pstAddIndication);
+				kfree(pstAddIndication);
 				return FALSE;
 			}
 		}
@@ -2239,7 +2162,7 @@
 			((stLocalSFChangeIndicationAlt*)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
 
 			CopyBufferToControlPacket(Adapter,(PVOID)Adapter->caDsxReqResp);
-			bcm_kfree(pstAddIndication);
+			kfree(pstAddIndication);
 		}
 		break;
 		case DSC_RSP:
@@ -2312,13 +2235,13 @@
 				else if(pstChangeIndication->u8CC == 6)
 				{
 					deleteSFBySfid(Adapter,uiSearchRuleIndex);
-					bcm_kfree(pstAddIndication);
+					kfree(pstAddIndication);
 				}
 			}
 			else
 			{
 				BCM_DEBUG_PRINT( Adapter,DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
-				bcm_kfree(pstAddIndication);
+				kfree(pstAddIndication);
 				return FALSE;
 			}
 		}
@@ -2355,7 +2278,7 @@
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
 			break;
 	default:
-		bcm_kfree(pstAddIndication);
+		kfree(pstAddIndication);
 		return FALSE ;
 	}
 	return TRUE;
diff --git a/drivers/staging/bcm/CmHost.h b/drivers/staging/bcm/CmHost.h
index 847782c..8f68976 100644
--- a/drivers/staging/bcm/CmHost.h
+++ b/drivers/staging/bcm/CmHost.h
@@ -150,8 +150,6 @@
 
 ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT *puBufferLength);
 
-ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter,B_UINT16 tid);
-
 INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter);
 
 INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter);
@@ -159,7 +157,6 @@
 
 BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter,PVOID pvBuffer);
 
-VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex);
 
 #pragma pack (pop)
 
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index 8907e21..1c7db81 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -1,6 +1,5 @@
 #include "headers.h"
 
-#ifndef BCM_SHM_INTERFACE
 
 
 #define DDR_DUMP_INTERNAL_DEVICE_MEMORY 0xBFC02B00
@@ -188,17 +187,6 @@
                                         {0x0f000840,0x0FFF1B00},
                                         {0x0f000870,0x00000002}
 									  };
-#if 0
-static DDR_SET_NODE asDPLL_800MHZ[] = {
-										{0x0f000810,0x00000F95},
-										{0x0f000810,0x00000F95},
-                                        {0x0f000810,0x00000F95},
-                                        {0x0f000820,0x03F1365B},
-                                        {0x0f000840,0x0FFF0000},
-                                        {0x0f000880,0x000003DD},
-                                        {0x0f000860,0x00000000}
-									  };
-#endif
 
 #define T3B_SKIP_CLOCK_PROGRAM_DUMP_133MHZ 11  //index for 0x0F007000
 static DDR_SET_NODE asT3B_DDRSetting133MHz[] = {//      # DPLL Clock Setting
@@ -788,7 +776,7 @@
 {
 	PDDR_SETTING psDDRSetting=NULL;
 	ULONG RegCount=0;
-	ULONG value = 0;
+	UINT value = 0;
 	UINT  uiResetValue = 0;
 	UINT uiClockSetting = 0;
 	int retval = STATUS_SUCCESS;
@@ -982,7 +970,7 @@
 		{
 			value = psDDRSetting->ulRegValue;
 		}
-		retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, (PUINT)&value, sizeof(value));
+		retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, sizeof(value));
 		if(STATUS_SUCCESS != retval) {
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
 			break;
@@ -1298,5 +1286,4 @@
 	return retval;
 }
 
-#endif
 
diff --git a/drivers/staging/bcm/Debug.c b/drivers/staging/bcm/Debug.c
deleted file mode 100644
index 2703f30..0000000
--- a/drivers/staging/bcm/Debug.c
+++ /dev/null
@@ -1,41 +0,0 @@
-#include "headers.h"
-
-static UINT current_debug_level=BCM_SCREAM;
-
-int bcm_print_buffer( UINT debug_level, const char *function_name,
-				  char *file_name, int line_number, unsigned char *buffer, int bufferlen, enum _BASE_TYPE base)
-{
-	static const char * const buff_dump_base[] = {
-		"DEC", "HEX", "OCT", "BIN"
-	};
-	if(debug_level>=current_debug_level)
-	{
-		int i=0;
-		printk("\n%s:%s:%d:Buffer dump of size 0x%x in the %s:\n", file_name, function_name, line_number, bufferlen, buff_dump_base[1]);
-		for(;i<bufferlen;i++)
-		{
-			if(i && !(i%16) )
-				printk("\n");
-			switch(base)
-			{
-				case BCM_BASE_TYPE_DEC:
-					printk("%03d ", buffer[i]);
-					break;
-				case BCM_BASE_TYPE_OCT:
-					printk("%0x03o ", buffer[i]);
-					break;
-				case BCM_BASE_TYPE_BIN:
-					printk("%02x ", buffer[i]);
-					break;
-				case BCM_BASE_TYPE_HEX:
-				default:
-					printk("%02X ", buffer[i]);
-					break;
-			}
-		}
-		printk("\n");
-	}
-	return 0;
-}
-
-
diff --git a/drivers/staging/bcm/Debug.h b/drivers/staging/bcm/Debug.h
index 3d788b5..3138729 100644
--- a/drivers/staging/bcm/Debug.h
+++ b/drivers/staging/bcm/Debug.h
@@ -9,34 +9,6 @@
 #include <linux/string.h>
 #define NONE 0xFFFF
 
-typedef enum _BASE_TYPE
-{
-	BCM_BASE_TYPE_DEC,
-	BCM_BASE_TYPE_OCT,
-	BCM_BASE_TYPE_BIN,
-	BCM_BASE_TYPE_HEX,
-	BCM_BASE_TYPE_NONE,
-} BASE_TYPE, *PBASE_TYPE;
-
-int bcm_print_buffer( UINT debug_level, const char *function_name,
-				  char *file_name, int line_number, unsigned char *buffer, int bufferlen, BASE_TYPE base);
-
-#ifdef BCM_SHM_INTERFACE
-#define CPE_VIRTUAL_ERROR_CODE_BASE_ADDR		(0xBFC02E00 + 0x4C)
-// ERROR codes for debugging
-extern unsigned char u32ErrorCounter ;
-#define ERROR_DEVICE_REMOVED  0x1
-#define ERROR_LEADER_LENGTH_ZERO  0x2
-#define ERROR_LEADER_LENGTH_CORRUPTED  0x3
-#define ERROR_NO_SKBUFF  0x4
-
-#define ERROR_DL_MODULE 0xaa000000
-extern void  CPE_ERROR_LOG(unsigned int module,unsigned int code);
-
-#endif
-
-
-
 
 //--------------------------------------------------------------------------------
 
@@ -242,43 +214,33 @@
 
 //--- Only for direct printk's; "hidden" to API.
 #define DBG_TYPE_PRINTK		3
-#define PRINTKS_ON			1	// "hidden" from API, set to 0 to turn off all printk's
 
-#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) do { \
-	if ((DBG_TYPE_PRINTK == Type) && (PRINTKS_ON)) {	\
-		printk ("%s:" string, __FUNCTION__, ##args);	\
-		printk("\n");	\
-	} else if (!Adapter)			\
-		;							\
-	else {							\
-		if (((dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level) &&	\
-		   ((Type & Adapter->stDebugState.type) && (SubType & Adapter->stDebugState.subtype[Type]))) { \
-		   		if (dbg_level & DBG_NO_FUNC_PRINT)		\
-					printk (string, ##args);						\
-				else	\
-					{												\
-					printk ("%s:" string, __FUNCTION__, ##args);	\
-					printk("\n"); \
-					} \
-		}	\
-		}	\
-} while (0)
+#define BCM_DEBUG_PRINT(Adapter, Type, SubType, dbg_level, string, args...) \
+	do {								\
+		if (DBG_TYPE_PRINTK == Type)				\
+			pr_info("%s:" string, __func__, ##args);	\
+		else if (Adapter &&					\
+			 (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level && \
+			 (Type & Adapter->stDebugState.type) &&		\
+			 (SubType & Adapter->stDebugState.subtype[Type])) { \
+			if (dbg_level & DBG_NO_FUNC_PRINT)		\
+				printk(KERN_DEBUG string, ##args);	\
+			else						\
+				printk(KERN_DEBUG "%s:" string, __func__, ##args);	\
+		}							\
+	} while (0)
 
 #define BCM_DEBUG_PRINT_BUFFER(Adapter, Type, SubType, dbg_level,  buffer, bufferlen) do { \
-		if ((DBG_TYPE_PRINTK == Type) && (PRINTKS_ON)) {	\
-			bcm_print_buffer( dbg_level, __FUNCTION__, __FILE__, __LINE__, buffer, bufferlen, BCM_BASE_TYPE_HEX);	\
-		} else if (!Adapter)			\
-			;							\
-		else {							\
-			if (((dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level)  && \
-			   ((Type & Adapter->stDebugState.type) && (SubType & Adapter->stDebugState.subtype[Type]))) { \
-					if (dbg_level & DBG_NO_FUNC_PRINT)		\
-						bcm_print_buffer( dbg_level, NULL, NULL, __LINE__, buffer, bufferlen, BCM_BASE_TYPE_HEX);						\
-					else												\
-						bcm_print_buffer( dbg_level, __FUNCTION__, __FILE__, __LINE__, buffer, bufferlen, BCM_BASE_TYPE_HEX);	\
-			}	\
-		}	\
-	} while (0)
+	if (DBG_TYPE_PRINTK == Type ||					\
+	    (Adapter &&							\
+	     (dbg_level & DBG_LVL_BITMASK) <= Adapter->stDebugState.debug_level  && \
+	     (Type & Adapter->stDebugState.type) &&			\
+	     (SubType & Adapter->stDebugState.subtype[Type]))) {	\
+		printk(KERN_DEBUG "%s:\n", __func__);			\
+		print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,	\
+			       16, 1, buffer, bufferlen, false);	\
+	}								\
+} while(0)
 
 
 #define BCM_SHOW_DEBUG_BITMAP(Adapter)	do { \
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
index 7b2ec28..2b1e9e1 100644
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ b/drivers/staging/bcm/HandleControlPacket.c
@@ -11,8 +11,7 @@
 Enqueue the control packet for Application.
 @return None
 */
-VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, 	/**<Pointer to the Adapter structure*/
-								struct sk_buff *skb)				/**<Pointer to the socket buffer*/
+static VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, struct sk_buff *skb)
 {
 	PPER_TARANG_DATA	pTarang = NULL;
 	BOOLEAN HighPriorityMessage = FALSE;
@@ -20,8 +19,10 @@
 	CHAR cntrl_msg_mask_bit = 0;
 	BOOLEAN drop_pkt_flag = TRUE ;
 	USHORT usStatus = *(PUSHORT)(skb->data);
-	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "=====>");
-	/* Get the Leader field */
+
+	if (netif_msg_pktdata(Adapter))
+		print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE,
+			       16, 1, skb->data, skb->len, 0);
 
 	switch(usStatus)
 	{
@@ -134,7 +135,7 @@
     }
 	up(&Adapter->RxAppControlQueuelock);
     wake_up(&Adapter->process_read_wait_queue);
-    bcm_kfree_skb(skb);
+    dev_kfree_skb(skb);
 	BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "After wake_up_interruptible");
 }
 
@@ -185,33 +186,7 @@
 			{
 				DEQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail);
 //				Adapter->RxControlHead=ctrl_packet->next;
-				((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.rx_packets++;
-				((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.rx_bytes+=
-				((PLEADER)ctrl_packet->data)->PLength;
 			}
-			#if 0  //Idle mode debug profiling...
-			if(*(PUSHORT)ctrl_packet->data == IDLE_MODE_STATUS)
-			{
-				puiBuffer = (PUINT)(ctrl_packet->data +sizeof(USHORT));
-				if((ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD))
-				{
-					memset(&tv, 0, sizeof(tv));
-					do_gettimeofday(&tv);
-					if((ntohl(*(puiBuffer+1)) == 0))
-					{
-						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "IdleMode Wake-up Msg from f/w at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
-					}
-					else
-					{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "IdleMode req Msg from f/w at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
-					}
-				}
-				else if((ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG))
-				{
-					BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "GOT IDLE_MODE_SF_UPDATE MSG at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
-				}
-			}
-			#endif
 
 			spin_unlock_irqrestore (&Adapter->control_queue_lock, flags);
 		 	handle_rx_control_packet(Adapter, ctrl_packet);
@@ -234,7 +209,7 @@
 		{
 			PacketToDrop=pTarang->RxAppControlHead;
 			DEQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail);
-			bcm_kfree_skb(PacketToDrop);
+			dev_kfree_skb(PacketToDrop);
 		}
 		pTarang->AppCtrlQueueLen = 0;
 		//dropped contrl packet statistics also should be reset.
diff --git a/drivers/staging/bcm/HostMibs.h b/drivers/staging/bcm/HostMibs.h
deleted file mode 100644
index 28a5783..0000000
--- a/drivers/staging/bcm/HostMibs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _HOST_MIBS_H
-#define _HOST_MIBS_H
-
-INT ProcessGetHostMibs(PMINI_ADAPTER Adapter,
-						  PVOID ioBuffer,
-						  ULONG inputBufferLength);
-#endif
diff --git a/drivers/staging/bcm/IPv6Protocol.c b/drivers/staging/bcm/IPv6Protocol.c
index 5ec3b89..91b6fbe 100644
--- a/drivers/staging/bcm/IPv6Protocol.c
+++ b/drivers/staging/bcm/IPv6Protocol.c
@@ -1,5 +1,9 @@
 #include "headers.h"
 
+static BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
+static BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
+static VOID DumpIpv6Header(IPV6Header *pstIpv6Header);
+
 static UCHAR * GetNextIPV6ChainedHeader(UCHAR **ppucPayload,UCHAR *pucNextHeader,BOOLEAN *bParseDone,USHORT *pusPayloadLength)
 {
 	UCHAR *pucRetHeaderPtr = NULL;
@@ -257,7 +261,7 @@
 }
 
 
-BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
+static BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
 {
 	UINT uiLoopIndex=0;
 	UINT  uiIpv6AddIndex=0;
@@ -310,7 +314,7 @@
 	return FALSE;
 }
 
-BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
+static BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header)
 {
 	UINT uiLoopIndex=0;
 	UINT  uiIpv6AddIndex=0;
@@ -376,7 +380,7 @@
 
 }
 
-VOID DumpIpv6Header(IPV6Header *pstIpv6Header)
+static VOID DumpIpv6Header(IPV6Header *pstIpv6Header)
 {
 	UCHAR ucVersion;
 	UCHAR  ucPrio ;
diff --git a/drivers/staging/bcm/IPv6ProtocolHdr.h b/drivers/staging/bcm/IPv6ProtocolHdr.h
index b93f790..a0db5a1 100644
--- a/drivers/staging/bcm/IPv6ProtocolHdr.h
+++ b/drivers/staging/bcm/IPv6ProtocolHdr.h
@@ -101,15 +101,12 @@
 
 
 //Function Prototypes
-BOOLEAN MatchSrcIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
-BOOLEAN MatchDestIpv6Address(S_CLASSIFIER_RULE *pstClassifierRule,IPV6Header *pstIpv6Header);
 
 USHORT	IpVersion6(PMINI_ADAPTER Adapter, /**< Pointer to the driver control structure */
 					PVOID pcIpHeader, /**<Pointer to the IP Hdr of the packet*/
 					S_CLASSIFIER_RULE *pstClassifierRule );
 
 VOID DumpIpv6Address(ULONG *puIpv6Address);
-VOID DumpIpv6Header(IPV6Header *pstIpv6Header);
 
 extern BOOLEAN MatchSrcPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
 extern BOOLEAN MatchDestPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index 60c0f29..df64acb 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -1,20 +1,18 @@
 #include "headers.h"
 
-#ifndef BCM_SHM_INTERFACE
 
 int InterfaceFileDownload( PVOID arg,
                         struct file *flp,
                         unsigned int on_chip_loc)
 {
-    char            *buff=NULL;
    // unsigned int    reg=0;
     mm_segment_t    oldfs={0};
     int             errno=0, len=0 /*,is_config_file = 0*/;
     loff_t          pos=0;
 	PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
 	//PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter;
+    char            *buff=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
 
-    buff=(PCHAR)kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
     if(!buff)
     {
         return -ENOMEM;
@@ -49,7 +47,7 @@
         on_chip_loc+=MAX_TRANSFER_CTRL_BYTE_USB;
 	}/* End of for(;;)*/
 
-	bcm_kfree(buff);
+	kfree(buff);
     return errno;
 }
 
@@ -57,7 +55,7 @@
                         struct file *flp,
                         unsigned int on_chip_loc)
 {
-    char            *buff=NULL, *buff_readback=NULL;
+    char            *buff, *buff_readback;
     unsigned int    reg=0;
     mm_segment_t    oldfs={0};
     int             errno=0, len=0, is_config_file = 0;
@@ -66,12 +64,12 @@
 	INT				Status = STATUS_SUCCESS;
 	PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
 
-    buff=(PCHAR)kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
-    buff_readback=(PCHAR)kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
+    buff=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
+    buff_readback=kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
     if(!buff || !buff_readback)
     {
-        bcm_kfree(buff);
-        bcm_kfree(buff_readback);
+        kfree(buff);
+        kfree(buff_readback);
 
         return -ENOMEM;
     }
@@ -138,8 +136,8 @@
         on_chip_loc+=MAX_TRANSFER_CTRL_BYTE_USB;
     }/* End of while(1)*/
 exit:
-    bcm_kfree(buff);
-    bcm_kfree(buff_readback);
+    kfree(buff);
+    kfree(buff_readback);
 	return Status;
 }
 
@@ -165,7 +163,7 @@
 			psFwInfo->pvMappedFirmwareAddress, psFwInfo->u32FirmwareLength);
 	if(retval)
 	{
-		bcm_kfree (Adapter->pstargetparams);
+		kfree(Adapter->pstargetparams);
 		Adapter->pstargetparams = NULL;
 		return -EFAULT;
 	}
@@ -231,41 +229,6 @@
 
 	return retval;
 }
-#if 0
-static int bcm_download_buffer(PMINI_ADAPTER Adapter,
-	unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
-	unsigned long u32StartingAddress)
-{
-    char            *buff=NULL;
-    unsigned int    len = 0;
-	int 			retval = STATUS_SUCCESS;
-	buff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
-
-	len = u32FirmwareLength;
-
-	while(u32FirmwareLength)
-	{
-		len = MIN_VAL (u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
-		if(STATUS_SUCCESS != (retval = copy_from_user(buff,
-				(unsigned char *)mappedbuffer, len)))
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copy_from_user failed\n");
-			break;
-		}
-		retval = wrm (Adapter, u32StartingAddress, buff, len);
-		if(retval)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "wrm failed\n");
-			break;
-		}
-		u32StartingAddress 	+= len;
-		u32FirmwareLength  	-= len;
-		mappedbuffer	   	+=len;
-	}
-	bcm_kfree(buff);
-	return retval;
-}
-#endif
 static int bcm_compare_buff_contents(unsigned char *readbackbuff,
 	unsigned char *buff,unsigned int len)
 {
@@ -297,58 +260,6 @@
 	}
 	return retval;
 }
-#if 0
-static int bcm_buffer_readback(PMINI_ADAPTER Adapter,
-	unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
-	unsigned long u32StartingAddress)
-{
-	unsigned char *buff = NULL;
-	unsigned char *readbackbuff = NULL;
-	unsigned int  len = u32FirmwareLength;
-	int retval = STATUS_SUCCESS;
-
-    buff=(unsigned char *)kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
-	if(NULL == buff)
-		return -ENOMEM;
-	readbackbuff =  (unsigned char *)kzalloc(MAX_TRANSFER_CTRL_BYTE_USB,
-					GFP_KERNEL);
-	if(NULL == readbackbuff)
-	{
-		bcm_kfree(buff);
-		return -ENOMEM;
-	}
-	while (u32FirmwareLength && !retval)
-	{
-		len = MIN_VAL (u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
-
-		/* read from the appl buff and then read from the target, compare */
-		if(STATUS_SUCCESS != (retval = copy_from_user(buff,
-				(unsigned char *)mappedbuffer, len)))
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "copy_from_user failed\n");
-			break;
-		}
-		retval = rdm (Adapter, u32StartingAddress, readbackbuff, len);
-		if(retval)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed\n");
-			break;
-		}
-
-		if (STATUS_SUCCESS !=
-			(retval = bcm_compare_buff_contents (readbackbuff, buff, len)))
-		{
-			break;
-		}
-		u32StartingAddress 	+= len;
-		u32FirmwareLength  	-= len;
-		mappedbuffer	   	+=len;
-	}/* end of while (u32FirmwareLength && !retval) */
-	bcm_kfree(buff);
-	bcm_kfree(readbackbuff);
-	return retval;
-}
-#endif
 int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo)
 {
 	int retval = STATUS_SUCCESS;
@@ -375,7 +286,7 @@
 	else
 	{
 
-		buff = (PUCHAR)kzalloc(psFwInfo->u32FirmwareLength,GFP_KERNEL);
+		buff = kzalloc(psFwInfo->u32FirmwareLength,GFP_KERNEL);
 		if(buff==NULL)
 		{
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Failed in allocation memory");
@@ -389,23 +300,6 @@
 			goto error ;
 		}
 
-		#if 0
-		retval = bcm_download_buffer(Adapter,
-				(unsigned char *)psFwInfo->pvMappedFirmwareAddress,
-				psFwInfo->u32FirmwareLength, psFwInfo->u32StartingAddress);
-		if(retval != STATUS_SUCCESS)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "User space buffer download fails....");
-		}
-		retval = bcm_buffer_readback (Adapter,
-				(unsigned char *)psFwInfo->pvMappedFirmwareAddress,
-				psFwInfo->u32FirmwareLength, psFwInfo->u32StartingAddress);
-
-		if(retval != STATUS_SUCCESS)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "read back verifier failed ....");
-		}
-		#endif
 		retval = buffDnldVerify(Adapter,
 					buff,
 					psFwInfo->u32FirmwareLength,
@@ -417,7 +311,7 @@
 		}
 	}
 error:
-	bcm_kfree(buff);
+	kfree(buff);
 	return retval;
 }
 
@@ -450,11 +344,10 @@
 			PUCHAR mappedbuffer, UINT u32FirmwareLength,
 			ULONG u32StartingAddress)
 {
-	PUCHAR readbackbuff = NULL;
 	UINT len = u32FirmwareLength;
 	INT retval = STATUS_SUCCESS;
+	PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB,GFP_KERNEL);
 
-	readbackbuff = (PUCHAR)kzalloc(MAX_TRANSFER_CTRL_BYTE_USB,GFP_KERNEL);
 	if(NULL == readbackbuff)
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
@@ -480,7 +373,7 @@
 		u32FirmwareLength  	-= len;
 		mappedbuffer	   	+=len;
 	}/* end of while (u32FirmwareLength && !retval) */
-	bcm_kfree(readbackbuff);
+	kfree(readbackbuff);
 	return retval;
 }
 
@@ -506,5 +399,4 @@
 	return status;
 }
 
-#endif
 
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 0750382..bf5c0ad 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -98,14 +98,6 @@
 			Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
 
 			wake_up(&Adapter->lowpower_mode_wait_queue);
-		#if 0
-			if(Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"LED Thread is Running. Hence Setting the LED Event as IDLEMODE_EXIT");
-				Adapter->DriverState = IDLEMODE_EXIT;
-				wake_up(&Adapter->LEDInfo.notify_led_event);
-			}
-		#endif
 
 		}
 		else
@@ -154,17 +146,7 @@
 	return status;
 }
 
-
-VOID InterfaceWriteIdleModeWakePattern(PMINI_ADAPTER Adapter)
-{
-/*	BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Low, 0x1d1e);
-	BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Low, 0x1d1e);
-	BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Upp, 0xd0ea);
-	BeceemWriteMemoryUshort(Adapter, Host2CPU_Mailbox_Upp, 0xd0ea);*/
-	return;
-}
-
-int InterfaceAbortIdlemode(PMINI_ADAPTER Adapter, unsigned int Pattern)
+static int InterfaceAbortIdlemode(PMINI_ADAPTER Adapter, unsigned int Pattern)
 {
 	int 	status = STATUS_SUCCESS;
 	unsigned int value;
diff --git a/drivers/staging/bcm/InterfaceIdleMode.h b/drivers/staging/bcm/InterfaceIdleMode.h
index 1bc723d..859a2ff 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.h
+++ b/drivers/staging/bcm/InterfaceIdleMode.h
@@ -7,8 +7,6 @@
 
 VOID InterfaceWriteIdleModeWakePattern(PMINI_ADAPTER Adapter);
 
-INT InterfaceAbortIdlemode(PMINI_ADAPTER Adapter, unsigned int Pattern);
-
 INT InterfaceWakeUp(PMINI_ADAPTER Adapter);
 
 VOID InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter);
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index 824f9a4..d78d5ef 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -1,54 +1,63 @@
 #include "headers.h"
 
 static struct usb_device_id InterfaceUsbtable[] = {
-    { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
+	{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
 	{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
 	{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) },
-    	{ USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
+	{ USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SM250) },
+	{ USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
 	{ USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) },
-    {}
+	{ USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) },
+	{ }
 };
+MODULE_DEVICE_TABLE(usb, InterfaceUsbtable);
 
-VOID InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
+static int debug = -1;
+module_param(debug, uint, 0600);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static const u32 default_msg =
+	NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+	| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
+	| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER Adapter);
+
+static void InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
 {
-	INT i = 0;
-	// Wake up the wait_queue...
-	if(psIntfAdapter->psAdapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY)
-	{
+	int i = 0;
+
+	/* Wake up the wait_queue... */
+	if (psIntfAdapter->psAdapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
 		psIntfAdapter->psAdapter->DriverState = DRIVER_HALT;
 		wake_up(&psIntfAdapter->psAdapter->LEDInfo.notify_led_event);
 	}
 	reset_card_proc(psIntfAdapter->psAdapter);
 
-	//worst case time taken by the RDM/WRM will be 5 sec. will check after every 100 ms
-	//to accertain the device is not being accessed. After this No RDM/WRM should be made.
-	while(psIntfAdapter->psAdapter->DeviceAccess)
-	{
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Device is being Accessed \n");
+	/*
+	 * worst case time taken by the RDM/WRM will be 5 sec. will check after every 100 ms
+	 * to accertain the device is not being accessed. After this No RDM/WRM should be made.
+	 */
+	while (psIntfAdapter->psAdapter->DeviceAccess) {
+		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+			"Device is being accessed.\n");
 		msleep(100);
 	}
 	/* Free interrupt URB */
-	//psIntfAdapter->psAdapter->device_removed = TRUE;
-	if(psIntfAdapter->psInterruptUrb)
-	{
-		usb_free_urb(psIntfAdapter->psInterruptUrb);
-	}
+	/* psIntfAdapter->psAdapter->device_removed = TRUE; */
+	usb_free_urb(psIntfAdapter->psInterruptUrb);
 
 	/* Free transmit URBs */
-	for(i = 0; i < MAXIMUM_USB_TCB; i++)
-	{
-		if(psIntfAdapter->asUsbTcb[i].urb  != NULL)
-		{
+	for (i = 0; i < MAXIMUM_USB_TCB; i++) {
+		if (psIntfAdapter->asUsbTcb[i].urb  != NULL) {
 			usb_free_urb(psIntfAdapter->asUsbTcb[i].urb);
 			psIntfAdapter->asUsbTcb[i].urb = NULL;
 		}
 	}
 	/* Free receive URB and buffers */
-	for(i = 0; i < MAXIMUM_USB_RCB; i++)
-	{
-		if (psIntfAdapter->asUsbRcb[i].urb != NULL)
-		{
-			bcm_kfree(psIntfAdapter->asUsbRcb[i].urb->transfer_buffer);
+	for (i = 0; i < MAXIMUM_USB_RCB; i++) {
+		if (psIntfAdapter->asUsbRcb[i].urb != NULL) {
+			kfree(psIntfAdapter->asUsbRcb[i].urb->transfer_buffer);
 			usb_free_urb(psIntfAdapter->asUsbRcb[i].urb);
 			psIntfAdapter->asUsbRcb[i].urb = NULL;
 		}
@@ -56,151 +65,109 @@
 	AdapterFree(psIntfAdapter->psAdapter);
 }
 
-
-
-static int usbbcm_open(struct inode *inode, struct file *file)
+static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
 {
-	return 0;
-}
+	unsigned long ulReg = 0;
+	int ret;
 
-static int usbbcm_release(struct inode *inode, struct file *file)
-{
-	return 0;
-}
-
-static ssize_t usbbcm_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
-{
-	return 0;
-}
-
-static ssize_t usbbcm_write(struct file *file, const char __user *user_buffer, size_t count, loff_t *ppos)
-{
-	return 0;
-}
-
-
-VOID ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
-{
-	ULONG ulReg = 0;
-
-// Program EP2 MAX_PKT_SIZE
+	/* Program EP2 MAX_PKT_SIZE */
 	ulReg = ntohl(EP2_MPS_REG);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x128,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x128, 4, TRUE);
 	ulReg = ntohl(EP2_MPS);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x12C,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x12C, 4, TRUE);
 
 	ulReg = ntohl(EP2_CFG_REG);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x132,4,TRUE);
-	if(((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter))->bHighSpeedDevice == TRUE)
-	{
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x132, 4, TRUE);
+	if (((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter))->bHighSpeedDevice == TRUE) {
 		ulReg = ntohl(EP2_CFG_INT);
-		BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x136,4,TRUE);
-	}
-	else
-	{
-// USE BULK EP as TX in FS mode.
+		BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
+	} else {
+		/* USE BULK EP as TX in FS mode. */
 		ulReg = ntohl(EP2_CFG_BULK);
-		BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x136,4,TRUE);
+		BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x136, 4, TRUE);
 	}
 
-
-// Program EP4 MAX_PKT_SIZE.
+	/* Program EP4 MAX_PKT_SIZE. */
 	ulReg = ntohl(EP4_MPS_REG);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x13C,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x13C, 4, TRUE);
 	ulReg = ntohl(EP4_MPS);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x140,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
 
-//	Program TX EP as interrupt (Alternate Setting)
-	if( rdmalt(Adapter,0x0F0110F8, (PUINT)&ulReg,4))
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "reading of Tx EP is failing");
-		return ;
+	/* Program TX EP as interrupt(Alternate Setting) */
+	ret = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
+	if (ret) {
+		BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+			"reading of Tx EP failed\n");
+		return;
 	}
 	ulReg |= 0x6;
 
 	ulReg = ntohl(ulReg);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1CC,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1CC, 4, TRUE);
 
 	ulReg = ntohl(EP4_CFG_REG);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1C8,4,TRUE);
-// Program ISOCHRONOUS EP size to zero.
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C8, 4, TRUE);
+	/* Program ISOCHRONOUS EP size to zero. */
 	ulReg = ntohl(ISO_MPS_REG);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1D2,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D2, 4, TRUE);
 	ulReg = ntohl(ISO_MPS);
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1D6,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1D6, 4, TRUE);
 
-// Update EEPROM Version.
-// Read 4 bytes from 508 and modify 511 and 510.
-//
-	ReadBeceemEEPROM(Adapter,0x1FC,(PUINT)&ulReg);
+	/*
+	 * Update EEPROM Version.
+	 * Read 4 bytes from 508 and modify 511 and 510.
+	 */
+	ReadBeceemEEPROM(Adapter, 0x1FC, (PUINT)&ulReg);
 	ulReg &= 0x0101FFFF;
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1FC,4,TRUE);
-//
-//Update length field if required. Also make the string NULL terminated.
-//
-	ReadBeceemEEPROM(Adapter,0xA8,(PUINT)&ulReg);
-	if((ulReg&0x00FF0000)>>16 > 0x30)
-	{
-		ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
-		BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0xA8,4,TRUE);
-	}
-	ReadBeceemEEPROM(Adapter,0x148,(PUINT)&ulReg);
-	if((ulReg&0x00FF0000)>>16 > 0x30)
-	{
-		ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
-		BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x148,4,TRUE);
-	}
-	ulReg = 0;
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x122,4,TRUE);
-	ulReg = 0;
-	BeceemEEPROMBulkWrite(Adapter,(PUCHAR)&ulReg,0x1C2,4,TRUE);
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1FC, 4, TRUE);
 
+	/* Update length field if required. Also make the string NULL terminated. */
+
+	ReadBeceemEEPROM(Adapter, 0xA8, (PUINT)&ulReg);
+	if ((ulReg&0x00FF0000)>>16 > 0x30) {
+		ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
+		BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0xA8, 4, TRUE);
+	}
+	ReadBeceemEEPROM(Adapter, 0x148, (PUINT)&ulReg);
+	if ((ulReg&0x00FF0000)>>16 > 0x30) {
+		ulReg = (ulReg&0xFF00FFFF)|(0x30<<16);
+		BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x148, 4, TRUE);
+	}
+	ulReg = 0;
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x122, 4, TRUE);
+	ulReg = 0;
+	BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x1C2, 4, TRUE);
 }
 
-static struct file_operations usbbcm_fops = {
-    .open    =  usbbcm_open,
-    .release =  usbbcm_release,
-    .read    =  usbbcm_read,
-    .write   =  usbbcm_write,
-    .owner   =  THIS_MODULE,
-	.llseek = no_llseek,
-};
-
-static struct usb_class_driver usbbcm_class = {
-    .name =     	"usbbcm",
-    .fops =     	&usbbcm_fops,
-    .minor_base =   BCM_USB_MINOR_BASE,
-};
-
 static int
 usbbcm_device_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
-	int retval =0 ;
-   	PMINI_ADAPTER psAdapter = NULL;
-	PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
-	struct usb_device      *udev = NULL;
+	struct usb_device *udev = interface_to_usbdev(intf);
+	int retval;
+	PMINI_ADAPTER psAdapter;
+	PS_INTERFACE_ADAPTER psIntfAdapter;
+	struct net_device *ndev;
 
-//	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Usbbcm probe!!");
-	if((intf == NULL) || (id == NULL))
-	{
-	//	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "intf or id is NULL");
-		return -EINVAL;
-	}
-
-	/* Allocate Adapter structure */
-	if((psAdapter = kzalloc(sizeof(MINI_ADAPTER), GFP_KERNEL)) == NULL)
-	{
-		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0, "Out of memory");
+	/* Reserve one extra queue for the bit-bucket */
+	ndev = alloc_etherdev_mq(sizeof(MINI_ADAPTER), NO_OF_QUEUES+1);
+	if (ndev == NULL) {
+		dev_err(&udev->dev, DRV_NAME ": no memory for device\n");
 		return -ENOMEM;
 	}
 
-    /* Init default driver debug state */
+	SET_NETDEV_DEV(ndev, &intf->dev);
 
-    psAdapter->stDebugState.debug_level = DBG_LVL_CURR;
+	psAdapter = netdev_priv(ndev);
+	psAdapter->dev = ndev;
+	psAdapter->msg_enable = netif_msg_init(debug, default_msg);
+
+	/* Init default driver debug state */
+
+	psAdapter->stDebugState.debug_level = DBG_LVL_CURR;
 	psAdapter->stDebugState.type = DBG_TYPE_INITEXIT;
-	memset (psAdapter->stDebugState.subtype, 0, sizeof (psAdapter->stDebugState.subtype));
 
-    /* Technically, one can start using BCM_DEBUG_PRINT after this point.
+	/*
+	 * Technically, one can start using BCM_DEBUG_PRINT after this point.
 	 * However, realize that by default the Type/Subtype bitmaps are all zero now;
 	 * so no prints will actually appear until the TestApp turns on debug paths via
 	 * the ioctl(); so practically speaking, in early init, no logging happens.
@@ -211,160 +178,128 @@
 	 * Further, we turn this OFF once init_module() completes.
 	 */
 
-    psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xff;
+	psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0xff;
 	BCM_SHOW_DEBUG_BITMAP(psAdapter);
 
 	retval = InitAdapter(psAdapter);
-	if(retval)
-	{
-		BCM_DEBUG_PRINT (psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "InitAdapter Failed\n");
+	if (retval) {
+		dev_err(&udev->dev, DRV_NAME ": InitAdapter Failed\n");
 		AdapterFree(psAdapter);
 		return retval;
 	}
 
 	/* Allocate interface adapter structure */
-	if((psAdapter->pvInterfaceAdapter =
-		kmalloc(sizeof(S_INTERFACE_ADAPTER), GFP_KERNEL)) == NULL)
-	{
-		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0, "Out of memory");
-		AdapterFree (psAdapter);
+	psIntfAdapter = kzalloc(sizeof(S_INTERFACE_ADAPTER), GFP_KERNEL);
+	if (psIntfAdapter == NULL) {
+		dev_err(&udev->dev, DRV_NAME ": no memory for Interface adapter\n");
+		AdapterFree(psAdapter);
 		return -ENOMEM;
 	}
-	memset(psAdapter->pvInterfaceAdapter, 0, sizeof(S_INTERFACE_ADAPTER));
 
-	psIntfAdapter = InterfaceAdapterGet(psAdapter);
+	psAdapter->pvInterfaceAdapter = psIntfAdapter;
 	psIntfAdapter->psAdapter = psAdapter;
 
 	/* Store usb interface in Interface Adapter */
 	psIntfAdapter->interface = intf;
 	usb_set_intfdata(intf, psIntfAdapter);
 
-	BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "psIntfAdapter 0x%p",psIntfAdapter);
+	BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+		"psIntfAdapter 0x%p\n", psIntfAdapter);
 	retval = InterfaceAdapterInit(psIntfAdapter);
-	if(retval)
-	{
+	if (retval) {
 		/* If the Firmware/Cfg File is not present
- 		 * then return success, let the application
- 		 * download the files.
- 		 */
-		if(-ENOENT == retval){
-			BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "File Not Found, Use App to Download\n");
+		 * then return success, let the application
+		 * download the files.
+		 */
+		if (-ENOENT == retval) {
+			BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+				"File Not Found.  Use app to download.\n");
 			return STATUS_SUCCESS;
 		}
-		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "InterfaceAdapterInit Failed \n");
+		BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+			"InterfaceAdapterInit failed.\n");
 		usb_set_intfdata(intf, NULL);
-		udev = interface_to_usbdev (intf);
+		udev = interface_to_usbdev(intf);
 		usb_put_dev(udev);
-		if(psAdapter->bUsbClassDriverRegistered == TRUE)
-				usb_deregister_dev (intf, &usbbcm_class);
 		InterfaceAdapterFree(psIntfAdapter);
-		return retval ;
+		return retval;
 	}
-	if(psAdapter->chip_id > T3)
-	{
-		uint32_t uiNackZeroLengthInt=4;
-		if(wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT, &uiNackZeroLengthInt, sizeof(uiNackZeroLengthInt)))
-		{
-			return -EIO;;
-		}
+	if (psAdapter->chip_id > T3) {
+		uint32_t uiNackZeroLengthInt = 4;
+
+		retval = wrmalt(psAdapter, DISABLE_USB_ZERO_LEN_INT, &uiNackZeroLengthInt, sizeof(uiNackZeroLengthInt));
+		if (retval)
+			return retval;
 	}
 
-	udev = interface_to_usbdev (intf);
 	/* Check whether the USB-Device Supports remote Wake-Up */
-	if(USB_CONFIG_ATT_WAKEUP & udev->actconfig->desc.bmAttributes)
-	{
+	if (USB_CONFIG_ATT_WAKEUP & udev->actconfig->desc.bmAttributes) {
 		/* If Suspend then only support dynamic suspend */
-		if(psAdapter->bDoSuspend)
-		{
+		if (psAdapter->bDoSuspend) {
 #ifdef CONFIG_PM
-			udev->autosuspend_delay = 0;
+			pm_runtime_set_autosuspend_delay(&udev->dev, 0);
 			intf->needs_remote_wakeup = 1;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- 			udev->autosuspend_disabled = 0;
-#else
 			usb_enable_autosuspend(udev);
-#endif
- 			device_init_wakeup(&intf->dev,1);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
- 			usb_autopm_disable(intf);
-#endif
+			device_init_wakeup(&intf->dev, 1);
 			INIT_WORK(&psIntfAdapter->usbSuspendWork, putUsbSuspend);
-			BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Enabling USB Auto-Suspend\n");
+			BCM_DEBUG_PRINT(psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+				"Enabling USB Auto-Suspend\n");
 #endif
-		}
-		else
-		{
+		} else {
 			intf->needs_remote_wakeup = 0;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- 			udev->autosuspend_disabled = 1;
-#else
 			usb_disable_autosuspend(udev);
-#endif
 		}
 	}
 
-    psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0x0;
-    return retval;
+	psAdapter->stDebugState.subtype[DBG_TYPE_INITEXIT] = 0x0;
+	return retval;
 }
 
-static void usbbcm_disconnect (struct usb_interface *intf)
+static void usbbcm_disconnect(struct usb_interface *intf)
 {
-	PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
-	PMINI_ADAPTER psAdapter = NULL;
-	struct usb_device       *udev = NULL;
-    PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
+	PS_INTERFACE_ADAPTER psIntfAdapter = usb_get_intfdata(intf);
+	PMINI_ADAPTER psAdapter;
+	struct usb_device  *udev = interface_to_usbdev(intf);
 
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Usb disconnected");
-	if(intf == NULL)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "intf pointer is NULL");
+	if (psIntfAdapter == NULL)
 		return;
-	}
-	psIntfAdapter = usb_get_intfdata(intf);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "psIntfAdapter 0x%p",psIntfAdapter);
-	if(psIntfAdapter == NULL)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "InterfaceAdapter pointer is NULL");
-		return;
-	}
+
 	psAdapter = psIntfAdapter->psAdapter;
-	if(psAdapter->bDoSuspend)
+	netif_device_detach(psAdapter->dev);
+
+	if (psAdapter->bDoSuspend)
 		intf->needs_remote_wakeup = 0;
 
 	psAdapter->device_removed = TRUE ;
 	usb_set_intfdata(intf, NULL);
 	InterfaceAdapterFree(psIntfAdapter);
-	udev = interface_to_usbdev (intf);
 	usb_put_dev(udev);
-	usb_deregister_dev (intf, &usbbcm_class);
 }
 
-
-static __inline int AllocUsbCb(PS_INTERFACE_ADAPTER psIntfAdapter)
+static int AllocUsbCb(PS_INTERFACE_ADAPTER psIntfAdapter)
 {
 	int i = 0;
-	for(i = 0; i < MAXIMUM_USB_TCB; i++)
-	{
-		if((psIntfAdapter->asUsbTcb[i].urb =
-				usb_alloc_urb(0, GFP_KERNEL)) == NULL)
-		{
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cant allocate Tx urb for index %d", i);
+
+	for (i = 0; i < MAXIMUM_USB_TCB; i++) {
+		if ((psIntfAdapter->asUsbTcb[i].urb =
+				usb_alloc_urb(0, GFP_KERNEL)) == NULL) {
+			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+				"Can't allocate Tx urb for index %d\n", i);
 			return -ENOMEM;
 		}
 	}
 
-	for(i = 0; i < MAXIMUM_USB_RCB; i++)
-	{
+	for (i = 0; i < MAXIMUM_USB_RCB; i++) {
 		if ((psIntfAdapter->asUsbRcb[i].urb =
-				usb_alloc_urb(0, GFP_KERNEL)) == NULL)
-		{
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cant allocate Rx urb for index %d", i);
+				usb_alloc_urb(0, GFP_KERNEL)) == NULL) {
+			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+				"Can't allocate Rx urb for index %d\n", i);
 			return -ENOMEM;
 		}
-		if((psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
-			kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL)) == NULL)
-		{
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cant allocate Rx buffer for index %d", i);
+		if ((psIntfAdapter->asUsbRcb[i].urb->transfer_buffer =
+			kmalloc(MAX_DATA_BUFFER_SIZE, GFP_KERNEL)) == NULL) {
+			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+				"Can't allocate Rx buffer for index %d\n", i);
 			return -ENOMEM;
 		}
 		psIntfAdapter->asUsbRcb[i].urb->transfer_buffer_length = MAX_DATA_BUFFER_SIZE;
@@ -372,77 +307,41 @@
 	return 0;
 }
 
-
-
 static int device_run(PS_INTERFACE_ADAPTER psIntfAdapter)
 {
-	INT value = 0;
+	int value = 0;
 	UINT status = STATUS_SUCCESS;
 
 	status = InitCardAndDownloadFirmware(psIntfAdapter->psAdapter);
-	if(status != STATUS_SUCCESS)
-	{
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "InitCardAndDownloadFirmware failed.\n");
+	if (status != STATUS_SUCCESS) {
+		pr_err(DRV_NAME "InitCardAndDownloadFirmware failed.\n");
 		return status;
 	}
-	if(TRUE == psIntfAdapter->psAdapter->fw_download_done)
-	{
-
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Sending first interrupt URB down......");
-		if(StartInterruptUrb(psIntfAdapter))
-		{
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Cannot send interrupt in URB");
+	if (TRUE == psIntfAdapter->psAdapter->fw_download_done) {
+		if (StartInterruptUrb(psIntfAdapter)) {
+			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+			"Cannot send interrupt in URB\n");
 		}
-		//now register the cntrl interface.
-		//after downloading the f/w waiting for 5 sec to get the mailbox interrupt.
 
+		/*
+		 * now register the cntrl interface.
+		 * after downloading the f/w waiting for 5 sec to get the mailbox interrupt.
+		 */
 		psIntfAdapter->psAdapter->waiting_to_fw_download_done = FALSE;
 		value = wait_event_timeout(psIntfAdapter->psAdapter->ioctl_fw_dnld_wait_queue,
 					psIntfAdapter->psAdapter->waiting_to_fw_download_done, 5*HZ);
 
-		if(value == 0)
-		{
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Mailbox Interrupt has not reached to Driver..");
-		}
-		else
-		{
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Got the mailbox interrupt ...Registering control interface...\n ");
-		}
-		if(register_control_device_interface(psIntfAdapter->psAdapter) < 0)
-		{
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Register Control Device failed...");
+		if (value == 0)
+			pr_err(DRV_NAME ": Timeout waiting for mailbox interrupt.\n");
+
+		if (register_control_device_interface(psIntfAdapter->psAdapter) < 0) {
+			pr_err(DRV_NAME ": Register Control Device failed.\n");
 			return -EIO;
 		}
 	}
 	return 0;
 }
 
-#if 0
-static void	print_usb_interface_desc(struct usb_interface_descriptor *usb_intf_desc)
-{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "**************** INTERFACE DESCRIPTOR *********************");
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bLength: %x", usb_intf_desc->bLength);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bDescriptorType: %x", usb_intf_desc->bDescriptorType);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceNumber: %x", usb_intf_desc->bInterfaceNumber);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bAlternateSetting: %x", usb_intf_desc->bAlternateSetting);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bNumEndpoints: %x", usb_intf_desc->bNumEndpoints);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceClass: %x", usb_intf_desc->bInterfaceClass);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceSubClass: %x", usb_intf_desc->bInterfaceSubClass);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterfaceProtocol: %x", usb_intf_desc->bInterfaceProtocol);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "iInterface :%x\n",usb_intf_desc->iInterface);
-}
-static void	print_usb_endpoint_descriptor(struct usb_endpoint_descriptor *usb_ep_desc)
-{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "**************** ENDPOINT DESCRIPTOR *********************");
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bLength  :%x ", usb_ep_desc->bLength);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bDescriptorType  :%x ", usb_ep_desc->bDescriptorType);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bEndpointAddress  :%x ", usb_ep_desc->bEndpointAddress);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bmAttributes  :%x ", usb_ep_desc->bmAttributes);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "wMaxPacketSize  :%x ",usb_ep_desc->wMaxPacketSize);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "bInterval  :%x ",usb_ep_desc->bInterval);
-}
-
-#endif
 
 static inline int bcm_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
 {
@@ -518,124 +417,111 @@
 	return (bcm_usb_endpoint_xfer_isoc(epd) && bcm_usb_endpoint_dir_out(epd));
 }
 
-INT InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
+static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
 {
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor *endpoint;
 	size_t buffer_size;
-	ULONG value;
-	INT retval = 0;
-	INT usedIntOutForBulkTransfer = 0 ;
+	unsigned long value;
+	int retval = 0;
+	int usedIntOutForBulkTransfer = 0 ;
 	BOOLEAN bBcm16 = FALSE;
 	UINT uiData = 0;
 
 	/* Store the usb dev into interface adapter */
-	psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(
-								psIntfAdapter->interface));
+	psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
 
-	if((psIntfAdapter->udev->speed == USB_SPEED_HIGH))
-	{
-		psIntfAdapter->bHighSpeedDevice = TRUE ;
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "MODEM IS CONFIGURED TO HIGH_SPEED ");
-	}
-	else
-	{
-		psIntfAdapter->bHighSpeedDevice = FALSE ;
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "MODEM IS CONFIGURED TO FULL_SPEED ");
-	}
-
+	psIntfAdapter->bHighSpeedDevice = (psIntfAdapter->udev->speed == USB_SPEED_HIGH);
 	psIntfAdapter->psAdapter->interface_rdm = BcmRDM;
 	psIntfAdapter->psAdapter->interface_wrm = BcmWRM;
 
-	if(rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG, (PUINT)&(psIntfAdapter->psAdapter->chip_id), sizeof(UINT)) < 0)
-	{
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
-		return STATUS_FAILURE;
-	}
-    if(0xbece3200==(psIntfAdapter->psAdapter->chip_id&~(0xF0)))
-	{
-		psIntfAdapter->psAdapter->chip_id=(psIntfAdapter->psAdapter->chip_id&~(0xF0));
+	retval = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
+			(u32 *)&(psIntfAdapter->psAdapter->chip_id), sizeof(u32));
+	if (retval) {
+		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
+		return retval;
 	}
 
-	BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "First RDM Chip ID 0x%lx\n", psIntfAdapter->psAdapter->chip_id);
+	if (0xbece3200 == (psIntfAdapter->psAdapter->chip_id & ~(0xF0)))
+		psIntfAdapter->psAdapter->chip_id &= ~0xF0;
 
-    iface_desc = psIntfAdapter->interface->cur_altsetting;
-	//print_usb_interface_desc(&(iface_desc->desc));
+	dev_info(&psIntfAdapter->udev->dev, "RDM Chip ID 0x%lx\n",
+		 psIntfAdapter->psAdapter->chip_id);
 
-	if(psIntfAdapter->psAdapter->chip_id == T3B)
-	{
+	iface_desc = psIntfAdapter->interface->cur_altsetting;
 
-		//
-		//T3B device will have EEPROM,check if EEPROM is proper and BCM16 can be done or not.
-		//
-		BeceemEEPROMBulkRead(psIntfAdapter->psAdapter,&uiData,0x0,4);
-		if(uiData == BECM)
-		{
+	if (psIntfAdapter->psAdapter->chip_id == T3B) {
+		/* T3B device will have EEPROM, check if EEPROM is proper and BCM16 can be done or not. */
+		BeceemEEPROMBulkRead(psIntfAdapter->psAdapter, &uiData, 0x0, 4);
+		if (uiData == BECM)
 			bBcm16 = TRUE;
-		}
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Number of Altsetting aviailable for This Modem 0x%x\n", psIntfAdapter->interface->num_altsetting);
-		if(bBcm16 == TRUE)
-		{
-			//selecting alternate setting one as a default setting for High Speed  modem.
-			if(psIntfAdapter->bHighSpeedDevice)
-				retval= usb_set_interface(psIntfAdapter->udev,DEFAULT_SETTING_0,ALTERNATE_SETTING_1);
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "BCM16 is Applicable on this dongle");
-			if(retval || (psIntfAdapter->bHighSpeedDevice == FALSE))
-			{
+
+		dev_info(&psIntfAdapter->udev->dev, "number of alternate setting %d\n",
+			 psIntfAdapter->interface->num_altsetting);
+
+		if (bBcm16 == TRUE) {
+			/* selecting alternate setting one as a default setting for High Speed  modem. */
+			if (psIntfAdapter->bHighSpeedDevice)
+				retval= usb_set_interface(psIntfAdapter->udev, DEFAULT_SETTING_0, ALTERNATE_SETTING_1);
+			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+				"BCM16 is applicable on this dongle\n");
+			if (retval || (psIntfAdapter->bHighSpeedDevice == FALSE)) {
 				usedIntOutForBulkTransfer = EP2 ;
 				endpoint = &iface_desc->endpoint[EP2].desc;
-				BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Interface altsetting  got failed or Moemd is configured to FS.hence will work on default setting 0 \n");
+				BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+					 "Interface altsetting failed or modem is configured to Full Speed, hence will work on default setting 0\n");
 				/*
-				If Modem is high speed device EP2 should be INT OUT End point
-				If Mode is FS then EP2 should be bulk end point
-				*/
-				if(((psIntfAdapter->bHighSpeedDevice ==TRUE ) && (bcm_usb_endpoint_is_int_out(endpoint)== FALSE))
-					||((psIntfAdapter->bHighSpeedDevice == FALSE)&& (bcm_usb_endpoint_is_bulk_out(endpoint)== FALSE)))
-				{
-					BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Configuring the EEPROM ");
-					//change the EP2, EP4 to INT OUT end point
+				 * If Modem is high speed device EP2 should be INT OUT End point
+				 * If Mode is FS then EP2 should be bulk end point
+				 */
+				if (((psIntfAdapter->bHighSpeedDevice == TRUE) && (bcm_usb_endpoint_is_int_out(endpoint) == FALSE))
+					|| ((psIntfAdapter->bHighSpeedDevice == FALSE) && (bcm_usb_endpoint_is_bulk_out(endpoint) == FALSE))) {
+					BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+						"Configuring the EEPROM\n");
+					/* change the EP2, EP4 to INT OUT end point */
 					ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
 
 					/*
-					It resets the device and if any thing gets changed in USB descriptor it will show fail and
-					re-enumerate the device
-					*/
+					 * It resets the device and if any thing gets changed
+					 *  in USB descriptor it will show fail and re-enumerate
+					 * the device
+					 */
 					retval = usb_reset_device(psIntfAdapter->udev);
-					if(retval)
-					{
-						BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "reset got failed. hence Re-enumerating the device \n");
+					if (retval) {
+						BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+							"reset failed.  Re-enumerating the device.\n");
 						return retval ;
 					}
 
 				}
-				if((psIntfAdapter->bHighSpeedDevice == FALSE) && bcm_usb_endpoint_is_bulk_out(endpoint))
-				{
-					// Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail.
+				if ((psIntfAdapter->bHighSpeedDevice == FALSE) && bcm_usb_endpoint_is_bulk_out(endpoint)) {
+					/* Once BULK is selected in FS mode. Revert it back to INT. Else USB_IF will fail. */
 					UINT _uiData = ntohl(EP2_CFG_INT);
-					BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,"Reverting Bulk to INT as it is FS MODE");
-					BeceemEEPROMBulkWrite(psIntfAdapter->psAdapter,(PUCHAR)&_uiData,0x136,4,TRUE);
+					BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, 
+						"Reverting Bulk to INT as it is in Full Speed mode.\n");
+					BeceemEEPROMBulkWrite(psIntfAdapter->psAdapter, (PUCHAR)&_uiData, 0x136, 4, TRUE);
 				}
-			}
-			else
-			{
+			} else {
 				usedIntOutForBulkTransfer = EP4 ;
 				endpoint = &iface_desc->endpoint[EP4].desc;
-				BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Choosing AltSetting as a default setting");
-				if( bcm_usb_endpoint_is_int_out(endpoint) == FALSE)
-				{
-					BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, " Dongle does not have BCM16 Fix");
-					//change the EP2, EP4 to INT OUT end point and use EP4 in altsetting
+				BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+					"Choosing AltSetting as a default setting.\n");
+				if (bcm_usb_endpoint_is_int_out(endpoint) == FALSE) {
+					BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+						"Dongle does not have BCM16 Fix.\n");
+					/* change the EP2, EP4 to INT OUT end point and use EP4 in altsetting */
 					ConfigureEndPointTypesThroughEEPROM(psIntfAdapter->psAdapter);
 
 					/*
-					It resets the device and if any thing gets changed in USB descriptor it will show fail and
-					re-enumerate the device
-					*/
+					 * It resets the device and if any thing gets changed in
+					 *  USB descriptor it will show fail and re-enumerate the
+					 * device
+					 */
 					retval = usb_reset_device(psIntfAdapter->udev);
-					if(retval)
-					{
-						BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "reset got failed. hence Re-enumerating the device \n");
-						return retval ;
+					if (retval) {
+						BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+							"reset failed.  Re-enumerating the device.\n");
+						return retval;
 					}
 
 				}
@@ -644,98 +530,66 @@
 	}
 
 	iface_desc = psIntfAdapter->interface->cur_altsetting;
-	//print_usb_interface_desc(&(iface_desc->desc));
-   	BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Current number of endpoints :%x \n", iface_desc->desc.bNumEndpoints);
-    for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value)
-	{
-        endpoint = &iface_desc->endpoint[value].desc;
-		//print_usb_endpoint_descriptor(endpoint);
 
-        if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr && bcm_usb_endpoint_is_bulk_in(endpoint))
-        {
-            buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
-            psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
-            psIntfAdapter->sBulkIn.bulk_in_endpointAddr =
-								endpoint->bEndpointAddress;
-	    	psIntfAdapter->sBulkIn.bulk_in_pipe =
+	for (value = 0; value < iface_desc->desc.bNumEndpoints; ++value) {
+		endpoint = &iface_desc->endpoint[value].desc;
+
+		if (!psIntfAdapter->sBulkIn.bulk_in_endpointAddr && bcm_usb_endpoint_is_bulk_in(endpoint)) {
+			buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+			psIntfAdapter->sBulkIn.bulk_in_size = buffer_size;
+			psIntfAdapter->sBulkIn.bulk_in_endpointAddr = endpoint->bEndpointAddress;
+			psIntfAdapter->sBulkIn.bulk_in_pipe =
 					usb_rcvbulkpipe(psIntfAdapter->udev,
 								psIntfAdapter->sBulkIn.bulk_in_endpointAddr);
-        }
+		}
 
-        if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && bcm_usb_endpoint_is_bulk_out(endpoint))
-        {
-
-			psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
-										endpoint->bEndpointAddress;
-	    	psIntfAdapter->sBulkOut.bulk_out_pipe =
-			usb_sndbulkpipe(psIntfAdapter->udev,
+		if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr && bcm_usb_endpoint_is_bulk_out(endpoint)) {
+			psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
+			psIntfAdapter->sBulkOut.bulk_out_pipe =
+				usb_sndbulkpipe(psIntfAdapter->udev,
 					psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
-        }
+		}
 
-        if (!psIntfAdapter->sIntrIn.int_in_endpointAddr && bcm_usb_endpoint_is_int_in(endpoint))
-        {
-            buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
-            psIntfAdapter->sIntrIn.int_in_size = buffer_size;
-            psIntfAdapter->sIntrIn.int_in_endpointAddr =
-								endpoint->bEndpointAddress;
-            psIntfAdapter->sIntrIn.int_in_interval = endpoint->bInterval;
-            psIntfAdapter->sIntrIn.int_in_buffer =
+		if (!psIntfAdapter->sIntrIn.int_in_endpointAddr && bcm_usb_endpoint_is_int_in(endpoint)) {
+			buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+			psIntfAdapter->sIntrIn.int_in_size = buffer_size;
+			psIntfAdapter->sIntrIn.int_in_endpointAddr = endpoint->bEndpointAddress;
+			psIntfAdapter->sIntrIn.int_in_interval = endpoint->bInterval;
+			psIntfAdapter->sIntrIn.int_in_buffer =
 						kmalloc(buffer_size, GFP_KERNEL);
-            if (!psIntfAdapter->sIntrIn.int_in_buffer) {
-                BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Could not allocate interrupt_in_buffer");
-                return -EINVAL;
-            }
-			//psIntfAdapter->sIntrIn.int_in_pipe =
-        }
-
-        if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && bcm_usb_endpoint_is_int_out(endpoint))
-        {
-
-			if( !psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
-				(psIntfAdapter->psAdapter->chip_id == T3B) && (value == usedIntOutForBulkTransfer))
-			{
-				//use first intout end point as a bulk out end point
-            	buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
-            	psIntfAdapter->sBulkOut.bulk_out_size = buffer_size;
-				//printk("\nINT OUT Endpoing buffer size :%x endpoint :%x\n", buffer_size, value +1);
-				psIntfAdapter->sBulkOut.bulk_out_endpointAddr =
-										endpoint->bEndpointAddress;
-	    		psIntfAdapter->sBulkOut.bulk_out_pipe =
-				usb_sndintpipe(psIntfAdapter->udev,
-					psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
-          	  	psIntfAdapter->sBulkOut.int_out_interval = endpoint->bInterval;
-
+			if (!psIntfAdapter->sIntrIn.int_in_buffer) {
+				dev_err(&psIntfAdapter->udev->dev,
+					"could not allocate interrupt_in_buffer\n");
+				return -EINVAL;
 			}
-			else if(value == EP6)
-			{
-	            buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
-	            psIntfAdapter->sIntrOut.int_out_size = buffer_size;
-	            psIntfAdapter->sIntrOut.int_out_endpointAddr =
-										endpoint->bEndpointAddress;
-	            psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
-	            psIntfAdapter->sIntrOut.int_out_buffer= kmalloc(buffer_size,
-														GFP_KERNEL);
-	            	if (!psIntfAdapter->sIntrOut.int_out_buffer)
-					{
-	                BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Could not allocate interrupt_out_buffer");
-	                return -EINVAL;
-            }
-        }
-    }
+		}
+
+		if (!psIntfAdapter->sIntrOut.int_out_endpointAddr && bcm_usb_endpoint_is_int_out(endpoint)) {
+			if (!psIntfAdapter->sBulkOut.bulk_out_endpointAddr &&
+				(psIntfAdapter->psAdapter->chip_id == T3B) && (value == usedIntOutForBulkTransfer)) {
+				/* use first intout end point as a bulk out end point */
+				buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+				psIntfAdapter->sBulkOut.bulk_out_size = buffer_size;
+				psIntfAdapter->sBulkOut.bulk_out_endpointAddr = endpoint->bEndpointAddress;
+				psIntfAdapter->sBulkOut.bulk_out_pipe = usb_sndintpipe(psIntfAdapter->udev,
+									psIntfAdapter->sBulkOut.bulk_out_endpointAddr);
+				psIntfAdapter->sBulkOut.int_out_interval = endpoint->bInterval;
+			} else if (value == EP6) {
+				buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+				psIntfAdapter->sIntrOut.int_out_size = buffer_size;
+				psIntfAdapter->sIntrOut.int_out_endpointAddr = endpoint->bEndpointAddress;
+				psIntfAdapter->sIntrOut.int_out_interval = endpoint->bInterval;
+				psIntfAdapter->sIntrOut.int_out_buffer= kmalloc(buffer_size, GFP_KERNEL);
+				if (!psIntfAdapter->sIntrOut.int_out_buffer) {
+					dev_err(&psIntfAdapter->udev->dev,
+						"could not allocate interrupt_out_buffer\n");
+					return -EINVAL;
+				}
+			}
+		}
 	}
-    usb_set_intfdata(psIntfAdapter->interface, psIntfAdapter);
-    retval = usb_register_dev(psIntfAdapter->interface, &usbbcm_class);
-	if(retval)
-	{
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "usb register dev failed = %d", retval);
-		psIntfAdapter->psAdapter->bUsbClassDriverRegistered = FALSE;
-		return retval;
-	}
-	else
-	{
-		psIntfAdapter->psAdapter->bUsbClassDriverRegistered = TRUE;
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "usb dev registered");
-	}
+
+	usb_set_intfdata(psIntfAdapter->interface, psIntfAdapter);
 
 	psIntfAdapter->psAdapter->bcm_file_download = InterfaceFileDownload;
 	psIntfAdapter->psAdapter->bcm_file_readback_from_chip =
@@ -744,67 +598,51 @@
 
 	retval = CreateInterruptUrb(psIntfAdapter);
 
-	if(retval)
-	{
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cannot create interrupt urb");
+	if (retval) {
+		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0,
+			"Cannot create interrupt urb\n");
 		return retval;
 	}
 
 	retval = AllocUsbCb(psIntfAdapter);
-	if(retval)
-	{
+	if (retval)
 		return retval;
-	}
 
-
-	retval = device_run(psIntfAdapter);
-	if(retval)
-	{
-		return retval;
-	}
-
-
-	return 0;
+	return device_run(psIntfAdapter);
 }
 
-static int InterfaceSuspend (struct usb_interface *intf, pm_message_t message)
+static int InterfaceSuspend(struct usb_interface *intf, pm_message_t message)
 {
 	PS_INTERFACE_ADAPTER  psIntfAdapter = usb_get_intfdata(intf);
-	BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "=================================\n");
-	//Bcm_kill_all_URBs(psIntfAdapter);
+
 	psIntfAdapter->bSuspended = TRUE;
 
-	if(TRUE == psIntfAdapter->bPreparingForBusSuspend)
-	{
+	if (TRUE == psIntfAdapter->bPreparingForBusSuspend) {
 		psIntfAdapter->bPreparingForBusSuspend = FALSE;
 
-		if(psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE)
-		{
+		if (psIntfAdapter->psAdapter->LinkStatus == LINKUP_DONE) {
 			psIntfAdapter->psAdapter->IdleMode = TRUE ;
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Host Entered in PMU Idle Mode..");
-		}
-		else
-		{
+			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+				"Host Entered in PMU Idle Mode.\n");
+		} else {
 			psIntfAdapter->psAdapter->bShutStatus = TRUE;
-			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Host Entered in PMU Shutdown Mode..");
+			BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
+				"Host Entered in PMU Shutdown Mode.\n");
 		}
 	}
 	psIntfAdapter->psAdapter->bPreparingForLowPowerMode = FALSE;
 
-	//Signaling the control pkt path
+	/* Signaling the control pkt path */
 	wake_up(&psIntfAdapter->psAdapter->lowpower_mode_wait_queue);
 
 	return 0;
 }
 
-static int InterfaceResume (struct usb_interface *intf)
+static int InterfaceResume(struct usb_interface *intf)
 {
-    PS_INTERFACE_ADAPTER  psIntfAdapter = usb_get_intfdata(intf);
-	printk("=================================\n");
+	PS_INTERFACE_ADAPTER  psIntfAdapter = usb_get_intfdata(intf);
 	mdelay(100);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
- 	intf->pm_usage_cnt =1 ;
-#endif
+
 	psIntfAdapter->bSuspended = FALSE;
 
 	StartInterruptUrb(psIntfAdapter);
@@ -812,57 +650,41 @@
 	return 0;
 }
 
-static int InterfacePreReset(struct usb_interface *intf)
-{
-    printk("====================>");
-	return STATUS_SUCCESS;
-}
-
-static int InterfacePostReset(struct usb_interface *intf)
-{
-    printk("Do Post chip reset setting here if it is required");
-   	return STATUS_SUCCESS;
-}
 static struct usb_driver usbbcm_driver = {
-    .name = "usbbcm",
-    .probe = usbbcm_device_probe,
-    .disconnect = usbbcm_disconnect,
-    .suspend = InterfaceSuspend,
-    .resume = InterfaceResume,
-	.pre_reset=InterfacePreReset,
-	.post_reset=InterfacePostReset,
-    .id_table = InterfaceUsbtable,
-    .supports_autosuspend = 1,
+	.name = "usbbcm",
+	.probe = usbbcm_device_probe,
+	.disconnect = usbbcm_disconnect,
+	.suspend = InterfaceSuspend,
+	.resume = InterfaceResume,
+	.id_table = InterfaceUsbtable,
+	.supports_autosuspend = 1,
 };
 
+struct class *bcm_class;
 
-/*
-Function:				InterfaceInitialize
-
-Description:			This is the hardware specific initialization Function.
-						Registering the driver with NDIS , other device specific NDIS
-						and hardware initializations are done here.
-
-Input parameters:		IN PMINI_ADAPTER Adapter   - Miniport Adapter Context
-
-
-Return:					BCM_STATUS_SUCCESS - If Initialization of the
-						HW Interface was successful.
-						Other           - If an error occured.
-*/
-INT InterfaceInitialize(void)
+static __init int bcm_init(void)
 {
-//	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Registering Usb driver!!");
+	printk(KERN_INFO "%s: %s, %s\n", DRV_NAME, DRV_DESCRIPTION, DRV_VERSION);
+	printk(KERN_INFO "%s\n", DRV_COPYRIGHT);
+
+	bcm_class = class_create(THIS_MODULE, DRV_NAME);
+	if (IS_ERR(bcm_class)) {
+		printk(KERN_ERR DRV_NAME ": could not create class\n");
+		return PTR_ERR(bcm_class);
+	}
+
 	return usb_register(&usbbcm_driver);
 }
 
-INT InterfaceExit(void)
+static __exit void bcm_exit(void)
 {
-	//PMINI_ADAPTER psAdapter = NULL;
-	int status = 0;
-
-	//BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL, "Deregistering Usb driver!!");
 	usb_deregister(&usbbcm_driver);
-	return status;
+	class_destroy(bcm_class);
 }
-MODULE_LICENSE ("GPL");
+
+module_init(bcm_init);
+module_exit(bcm_exit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h
index e7a96e5..058315a 100644
--- a/drivers/staging/bcm/InterfaceInit.h
+++ b/drivers/staging/bcm/InterfaceInit.h
@@ -8,9 +8,11 @@
 #define BCM_USB_PRODUCT_ID_T3 	0x0300
 #define BCM_USB_PRODUCT_ID_T3B 	0x0210
 #define BCM_USB_PRODUCT_ID_T3L 	0x0220
+#define BCM_USB_PRODUCT_ID_SM250 	0xbccd
 #define BCM_USB_PRODUCT_ID_SYM  0x15E
 #define BCM_USB_PRODUCT_ID_1901 0xe017
 #define BCM_USB_PRODUCT_ID_226  0x0132
+#define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007
 
 #define BCM_USB_MINOR_BASE 		192
 
@@ -19,33 +21,7 @@
 
 INT InterfaceExit(void);
 
-#ifndef BCM_SHM_INTERFACE
-INT InterfaceAdapterInit(PS_INTERFACE_ADAPTER Adapter);
-
 INT usbbcm_worker_thread(PS_INTERFACE_ADAPTER psIntfAdapter);
 
-VOID InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter);
-
-#else
-INT InterfaceAdapterInit(PMINI_ADAPTER Adapter);
-#endif
-
-
-#if 0
-
-ULONG InterfaceClaimAdapter(PMINI_ADAPTER Adapter);
-
-VOID InterfaceDDRControllerInit(PMINI_ADAPTER Adapter);
-
-ULONG InterfaceReset(PMINI_ADAPTER Adapter);
-
-ULONG InterfaceRegisterResources(PMINI_ADAPTER Adapter);
-
-VOID InterfaceUnRegisterResources(PMINI_ADAPTER Adapter);
-
-ULONG InterfaceFirmwareDownload(PMINI_ADAPTER Adapter);
-
-#endif
-
 #endif
 
diff --git a/drivers/staging/bcm/InterfaceIsr.c b/drivers/staging/bcm/InterfaceIsr.c
index f928fe4..220ff92 100644
--- a/drivers/staging/bcm/InterfaceIsr.c
+++ b/drivers/staging/bcm/InterfaceIsr.c
@@ -1,6 +1,5 @@
 #include "headers.h"
 
-#ifndef BCM_SHM_INTERFACE
 
 static void read_int_callback(struct urb *urb/*, struct pt_regs *regs*/)
 {
@@ -8,6 +7,10 @@
 	PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)urb->context;
 	PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter ;
 
+	if (netif_msg_intr(Adapter))
+		pr_info(PFX "%s: interrupt status %d\n",
+			Adapter->dev->name, status);
+
 	if(Adapter->device_removed == TRUE)
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Device has Got Removed.");
@@ -87,7 +90,7 @@
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, INTF_INIT, DBG_LVL_ALL,"Interrupt IN endPoint  has got halted/stalled...need to clear this");
 				Adapter->bEndPointHalted = TRUE ;
 				wake_up(&Adapter->tx_packet_wait_queue);
-				urb->status = STATUS_SUCCESS ;;
+				urb->status = STATUS_SUCCESS ;
 				return;
 		}
 	    /* software-driven interface shutdown */
@@ -164,40 +167,3 @@
 	return status;
 }
 
-/*
-Function:				InterfaceEnableInterrupt
-
-Description:			This is the hardware specific Function for configuring
-						and enabling the interrupts on the device.
-
-Input parameters:		IN PMINI_ADAPTER Adapter   - Miniport Adapter Context
-
-
-Return:				BCM_STATUS_SUCCESS - If configuring the interrupts was successful.
-						Other           - If an error occured.
-*/
-
-void InterfaceEnableInterrupt(PMINI_ADAPTER Adapter)
-{
-
-}
-
-/*
-Function:				InterfaceDisableInterrupt
-
-Description:			This is the hardware specific Function for disabling the interrupts on the device.
-
-Input parameters:		IN PMINI_ADAPTER Adapter   - Miniport Adapter Context
-
-
-Return:				BCM_STATUS_SUCCESS - If disabling the interrupts was successful.
-						Other           - If an error occured.
-*/
-
-void InterfaceDisableInterrupt(PMINI_ADAPTER Adapter)
-{
-
-}
-
-#endif
-
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
index 8fc893b..a51185b 100644
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ b/drivers/staging/bcm/InterfaceMisc.c
@@ -1,17 +1,5 @@
 #include "headers.h"
 
-#ifndef BCM_SHM_INTERFACE
-
-PS_INTERFACE_ADAPTER
-InterfaceAdapterGet(PMINI_ADAPTER psAdapter)
-{
-	if(psAdapter == NULL)
-	{
-		return NULL;
-	}
-	return (PS_INTERFACE_ADAPTER)(psAdapter->pvInterfaceAdapter);
-}
-
 INT
 InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
             UINT addr,
@@ -102,7 +90,7 @@
 	if((psIntfAdapter->psAdapter->StopAllXaction == TRUE) && (psIntfAdapter->psAdapter->chip_id >= T3LPB))
 	{
 		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_OTHERS, WRM, DBG_LVL_ALL,"Currently Xaction is not allowed on the bus...");
-		return EACCES;
+		return -EACCES;
 	}
 
 	if(psIntfAdapter->bSuspended ==TRUE || psIntfAdapter->bPreparingForBusSuspend == TRUE)
@@ -236,9 +224,7 @@
 	}
 
 	/* Cancel All submitted TX URB's */
-	BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cancelling All Submitted TX Urbs \n");
-
-    for(i = 0; i < MAXIMUM_USB_TCB; i++)
+	for(i = 0; i < MAXIMUM_USB_TCB; i++)
 	{
 		tempUrb = psIntfAdapter->asUsbTcb[i].urb;
 		if(tempUrb)
@@ -248,9 +234,6 @@
 		}
 	}
 
-
-    BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Cancelling All submitted Rx Urbs \n");
-
 	for(i = 0; i < MAXIMUM_USB_RCB; i++)
 	{
 		tempUrb = psIntfAdapter->asUsbRcb[i].urb;
@@ -261,16 +244,11 @@
 		}
 	}
 
-
 	atomic_set(&psIntfAdapter->uNumTcbUsed, 0);
 	atomic_set(&psIntfAdapter->uCurrTcb, 0);
 
 	atomic_set(&psIntfAdapter->uNumRcbUsed, 0);
 	atomic_set(&psIntfAdapter->uCurrRcb, 0);
-
-	BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "TCB: used- %d cur-%d\n", atomic_read(&psIntfAdapter->uNumTcbUsed), atomic_read(&psIntfAdapter->uCurrTcb));
-	BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "RCB: used- %d cur-%d\n", atomic_read(&psIntfAdapter->uNumRcbUsed), atomic_read(&psIntfAdapter->uCurrRcb));
-
 }
 
 VOID putUsbSuspend(struct work_struct *work)
@@ -282,9 +260,6 @@
 
 	if(psIntfAdapter->bSuspended == FALSE)
 		usb_autopm_put_interface(intf);
-	else
-		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Interface Resumed Completely\n");
 
 }
 
-#endif
diff --git a/drivers/staging/bcm/InterfaceMisc.h b/drivers/staging/bcm/InterfaceMisc.h
index 74c81d4..6c9e39b 100644
--- a/drivers/staging/bcm/InterfaceMisc.h
+++ b/drivers/staging/bcm/InterfaceMisc.h
@@ -1,9 +1,6 @@
 #ifndef __INTERFACE_MISC_H
 #define __INTERFACE_MISC_H
 
-PS_INTERFACE_ADAPTER
-InterfaceAdapterGet(PMINI_ADAPTER psAdapter);
-
 INT
 InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
 			UINT addr,
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
index 6fee968..533f8eb 100644
--- a/drivers/staging/bcm/InterfaceRx.c
+++ b/drivers/staging/bcm/InterfaceRx.c
@@ -1,5 +1,15 @@
 #include "headers.h"
-extern int SearchVcid(PMINI_ADAPTER , unsigned short);
+
+static int SearchVcid(PMINI_ADAPTER Adapter,unsigned short usVcid)
+{
+	int iIndex=0;
+
+	for(iIndex=(NO_OF_QUEUES-1);iIndex>=0;iIndex--)
+		if(Adapter->PackInfo[iIndex].usVCID_Value == usVcid)
+			return iIndex;
+	return NO_OF_QUEUES+1;
+
+}
 
 
 static PUSB_RCB
@@ -38,13 +48,9 @@
 	PMINI_ADAPTER Adapter = psIntfAdapter->psAdapter;
 	PLEADER pLeader = urb->transfer_buffer;
 
-
-	#if 0
-	int *puiBuffer = NULL;
-	struct timeval tv;
-	memset(&tv, 0, sizeof(tv));
-	do_gettimeofday(&tv);
-	#endif
+	if (unlikely(netif_msg_rx_status(Adapter)))
+		pr_info(PFX "%s: rx urb status %d length %d\n",
+			Adapter->dev->name, urb->status, urb->actual_length);
 
 	if((Adapter->device_removed == TRUE)  ||
 		(TRUE == Adapter->bEndPointHalted) ||
@@ -89,10 +95,10 @@
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "Leader Status:0x%hX, Length:0x%hX, VCID:0x%hX", pLeader->Status,pLeader->PLength,pLeader->Vcid);
 	if(MAX_CNTL_PKT_SIZE < pLeader->PLength)
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Corrupted leader length...%d\n",
-					pLeader->PLength);
-		atomic_inc(&Adapter->RxPacketDroppedCount);
-		atomic_add(pLeader->PLength, &Adapter->BadRxByteCount);
+		if (netif_msg_rx_err(Adapter))
+			pr_info(PFX "%s: corrupted leader length...%d\n",
+				Adapter->dev->name, pLeader->PLength);
+		++Adapter->dev->stats.rx_dropped;
 		atomic_dec(&psIntfAdapter->uNumRcbUsed);
 		return;
 	}
@@ -145,10 +151,9 @@
 		skb_put (skb, pLeader->PLength + ETH_HLEN);
 		Adapter->PackInfo[QueueIndex].uiTotalRxBytes+=pLeader->PLength;
 		Adapter->PackInfo[QueueIndex].uiThisPeriodRxBytes+= pLeader->PLength;
-		atomic_add(pLeader->PLength, &Adapter->GoodRxByteCount);
         BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "Recived Data pkt of len :0x%X", pLeader->PLength);
 
-		if(Adapter->if_up)
+		if(netif_running(Adapter->dev))
 		{
 			/* Moving ahead by ETH_HLEN to the data ptr as received from FW */
 			skb_pull(skb, ETH_HLEN);
@@ -173,9 +178,12 @@
 		else
 		{
 		    BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_RX, RX_DATA, DBG_LVL_ALL, "i/f not up hance freeing SKB...");
-			bcm_kfree_skb(skb);
+			dev_kfree_skb(skb);
 		}
-		atomic_inc(&Adapter->GoodRxPktCount);
+
+		++Adapter->dev->stats.rx_packets;
+		Adapter->dev->stats.rx_bytes += pLeader->PLength;
+
 		for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
 		{
 			if((pLeader->PLength <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1))
diff --git a/drivers/staging/bcm/InterfaceTx.c b/drivers/staging/bcm/InterfaceTx.c
index 771f7b3..a842de9 100644
--- a/drivers/staging/bcm/InterfaceTx.c
+++ b/drivers/staging/bcm/InterfaceTx.c
@@ -1,50 +1,5 @@
 #include "headers.h"
 
-#ifndef BCM_SHM_INTERFACE
-
-/*
-Function:				InterfaceTxDataPacket
-
-Description:			This is the hardware specific Function for Transmitting
-						data packet to the device.
-
-Input parameters:		IN PMINI_ADAPTER Adapter   - Miniport Adapter Context
-						PVOID Packet				-  Packet Containing the data to be transmitted
-						USHORT usVcid			   - VCID on which data packet is to be sent
-
-
-Return:				BCM_STATUS_SUCCESS - If Tx was successful.
-						Other           - If an error occured.
-*/
-
-ULONG InterfaceTxDataPacket(PMINI_ADAPTER Adapter,PVOID Packet,USHORT usVcid)
-{
-	ULONG	Status = 0;
-	return Status;
-}
-
-/*
-Function:				InterfaceTxControlPacket
-
-Description:			This is the hardware specific Function for Transmitting
-						control packet to the device.
-
-Input parameters:		IN PMINI_ADAPTER Adapter   - Miniport Adapter Context
-						PVOID pvBuffer			   - Buffer containg control packet
-						UINT uiBufferLength		   - Buffer Length
-
-Return:				BCM_STATUS_SUCCESS - If control packet transmit was successful.
-						Other           - If an error occured.
-*/
-
-ULONG InterfaceTxControlPacket(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT uiBufferLength)
-{
-	ULONG	Status = 0;
-
-
-
-	return Status;
-}
 /*this is transmit call-back(BULK OUT)*/
 static void write_bulk_callback(struct urb *urb/*, struct pt_regs *regs*/)
 {
@@ -54,10 +9,10 @@
 	PMINI_ADAPTER psAdapter = psIntfAdapter->psAdapter ;
 	BOOLEAN bpowerDownMsg = FALSE ;
     PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
-#if 0
-	struct timeval tv;
-	UINT time_ms = 0;
-#endif
+
+    if (unlikely(netif_msg_tx_done(Adapter)))
+	    pr_info(PFX "%s: transmit status %d\n", Adapter->dev->name, urb->status);
+
 	if(urb->status != STATUS_SUCCESS)
 	{
 		if(urb->status == -EPIPE)
@@ -78,11 +33,6 @@
 
 	if(TRUE == psAdapter->bPreparingForLowPowerMode)
 	{
-		#if 0
-		do_gettimeofday(&tv);
-		time_ms = tv.tv_sec *1000 + tv.tv_usec/1000;
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, " %s Idle Mode ACK_Sent got from device at time :0x%x", __FUNCTION__, time_ms);
-		#endif
 
 		if(((pControlMsg->szData[0] == GO_TO_IDLE_MODE_PAYLOAD) &&
 			(pControlMsg->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)))
@@ -152,17 +102,12 @@
 	}
 
 err_exit :
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- 	usb_buffer_free(urb->dev, urb->transfer_buffer_length,
- 			urb->transfer_buffer, urb->transfer_dma);
-#else
 	usb_free_coherent(urb->dev, urb->transfer_buffer_length,
  			urb->transfer_buffer, urb->transfer_dma);
-#endif
 }
 
 
-static __inline PUSB_TCB GetBulkOutTcb(PS_INTERFACE_ADAPTER psIntfAdapter)
+static PUSB_TCB GetBulkOutTcb(PS_INTERFACE_ADAPTER psIntfAdapter)
 {
 	PUSB_TCB pTcb = NULL;
 	UINT index = 0;
@@ -183,20 +128,14 @@
 	return pTcb;
 }
 
-static __inline int TransmitTcb(PS_INTERFACE_ADAPTER psIntfAdapter, PUSB_TCB pTcb, PVOID data, int len)
+static int TransmitTcb(PS_INTERFACE_ADAPTER psIntfAdapter, PUSB_TCB pTcb, PVOID data, int len)
 {
 
 	struct urb *urb = pTcb->urb;
 	int retval = 0;
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- 	urb->transfer_buffer = usb_buffer_alloc(psIntfAdapter->udev, len,
- 						GFP_ATOMIC, &urb->transfer_dma);
-#else
 	urb->transfer_buffer = usb_alloc_coherent(psIntfAdapter->udev, len,
  						GFP_ATOMIC, &urb->transfer_dma);
-#endif
-
 	if (!urb->transfer_buffer)
 	{
 		BCM_DEBUG_PRINT(psIntfAdapter->psAdapter,DBG_TYPE_PRINTK, 0, 0, "Error allocating memory\n");
@@ -255,5 +194,4 @@
 	return TransmitTcb(psIntfAdapter, pTcb, data, len);
 }
 
-#endif
 
diff --git a/drivers/staging/bcm/InterfaceTx.h b/drivers/staging/bcm/InterfaceTx.h
index 053f631..2731475 100644
--- a/drivers/staging/bcm/InterfaceTx.h
+++ b/drivers/staging/bcm/InterfaceTx.h
@@ -3,11 +3,5 @@
 
 INT InterfaceTransmitPacket(PVOID arg, PVOID data, UINT len);
 
-
-ULONG InterfaceTxDataPacket(PMINI_ADAPTER Adapter,PVOID Packet,USHORT usVcid);
-
-ULONG InterfaceTxControlPacket(PMINI_ADAPTER Adapter,PVOID pvBuffer,UINT uiBufferLength);
-
-
 #endif
 
diff --git a/drivers/staging/bcm/Interfacemain.h b/drivers/staging/bcm/Interfacemain.h
deleted file mode 100644
index e0db563..0000000
--- a/drivers/staging/bcm/Interfacemain.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _MAIN_
-#define _MAIN_
-#if 0
-typedef struct _MINI_ADAPTER
-{
-	S_INTERFACE_ADAPTER stInterfaceAdapter;
-}MINI_ADAPTER,*PMINI_ADAPTER;
-
-#endif
-#endif
diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c
index cae3823..f4cf41c 100644
--- a/drivers/staging/bcm/LeakyBucket.c
+++ b/drivers/staging/bcm/LeakyBucket.c
@@ -75,14 +75,14 @@
 * Returns     - The number of bytes allowed for transmission.
 *
 ***********************************************************************/
-static __inline ULONG GetSFTokenCount(PMINI_ADAPTER Adapter, PacketInfo *psSF)
+static ULONG GetSFTokenCount(PMINI_ADAPTER Adapter, PacketInfo *psSF)
 {
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow ===>");
 	/* Validate the parameters */
 	if(NULL == Adapter || (psSF < Adapter->PackInfo &&
 		(uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority]))
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %ld\n", Adapter, (psSF-Adapter->PackInfo));
+		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n", Adapter, (psSF-Adapter->PackInfo));
 		return 0;
 	}
 
@@ -94,51 +94,27 @@
 		}
 		else
 		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %ld Available %u\n",
+			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %zd Available %u\n",
 				psSF-Adapter->PackInfo, psSF->uiCurrentTokenCount);
 			psSF->uiPendedLast = 1;
 		}
 	}
 	else
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %ld not valid\n", psSF-Adapter->PackInfo);
+		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %zd not valid\n", psSF-Adapter->PackInfo);
 	}
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow <===");
 	return 0;
 }
 
-static __inline void RemovePacketFromQueue(PacketInfo *pPackInfo , struct sk_buff *Packet)
-{
-	struct sk_buff *psQueueCurrent=NULL, *psLastQueueNode=NULL;
-	psQueueCurrent = pPackInfo->FirstTxQueue;
-	while(psQueueCurrent)
-	{
-		if(Packet == psQueueCurrent)
-		{
-			if(psQueueCurrent == pPackInfo->FirstTxQueue)
-			{
-				pPackInfo->FirstTxQueue=psQueueCurrent->next;
-				if(psQueueCurrent==pPackInfo->LastTxQueue)
-					pPackInfo->LastTxQueue=NULL;
-			}
-			else
-			{
-				psLastQueueNode->next=psQueueCurrent->next;
-			}
-			break;
-		}
-		psLastQueueNode = psQueueCurrent;
-		psQueueCurrent=psQueueCurrent->next;
-	}
-}
 /**
 @ingroup tx_functions
 This function despatches packet from the specified queue.
 @return Zero(success) or Negative value(failure)
 */
-static __inline INT SendPacketFromQueue(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
-								PacketInfo *psSF,		/**<Queue identifier*/
-								struct sk_buff*  Packet)	/**<Pointer to the packet to be sent*/
+static INT SendPacketFromQueue(PMINI_ADAPTER Adapter,/**<Logical Adapter*/
+			       PacketInfo *psSF,		/**<Queue identifier*/
+			       struct sk_buff*  Packet)	/**<Pointer to the packet to be sent*/
 {
 	INT  	Status=STATUS_FAILURE;
 	UINT uiIndex =0,PktLen = 0;
@@ -180,8 +156,7 @@
 * Returns     - None.
 *
 ****************************************************************************/
-static __inline VOID CheckAndSendPacketFromIndex
-(PMINI_ADAPTER Adapter, PacketInfo *psSF)
+static VOID CheckAndSendPacketFromIndex(PMINI_ADAPTER Adapter, PacketInfo *psSF)
 {
 	struct sk_buff	*QueuePacket=NULL;
 	char 			*pControlPacket = NULL;
@@ -189,7 +164,7 @@
 	int				iPacketLen=0;
 
 
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%ld ====>", (psSF-Adapter->PackInfo));
+	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%zd ====>", (psSF-Adapter->PackInfo));
 	if((psSF != &Adapter->PackInfo[HiPriority]) && Adapter->LinkUpStatus && atomic_read(&psSF->uiPerSFTxResourceCount))//Get data packet
   	{
 		if(!psSF->ucDirection )
@@ -197,10 +172,8 @@
 
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "UpdateTokenCount ");
 		if(Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Device is in Idle Mode..Hence blocking Data Packets..\n");
-			return;
-		}
+			return;	/* in idle mode */
+
 		// Check for Free Descriptors
 		if(atomic_read(&Adapter->CurrNumFreeTxDesc) <= MINIMUM_PENDING_DESCRIPTORS)
 		{
@@ -208,9 +181,6 @@
 			return ;
 		}
 
-#if 0
-		PruneQueue(Adapter,(psSF-Adapter->PackInfo));
-#endif
 		spin_lock_bh(&psSF->SFQueueLock);
 		QueuePacket=psSF->FirstTxQueue;
 
@@ -240,7 +210,7 @@
 			}
 			else
 			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %ld\n", psSF-Adapter->PackInfo);
+				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %zd\n", psSF-Adapter->PackInfo);
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nAvailable Tokens = %d required = %d\n",
 					psSF->uiCurrentTokenCount, iPacketLen);
 				//this part indicates that becuase of non-availability of the tokens
@@ -290,17 +260,6 @@
 			}
 	   	}
 	}
-
-	if(Status != STATUS_SUCCESS)	//Tx of data packet to device Failed
-	{
-		if(Adapter->bcm_jiffies == 0)
-			Adapter->bcm_jiffies = jiffies;
-	}
-	else
-	{
-		Adapter->bcm_jiffies = 0;
-	}
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<=====");
 }
 
 
@@ -387,12 +346,7 @@
 		if(exit_flag == TRUE )
 		    break ;
 	}/* end of inner while loop */
-	if(Adapter->bcm_jiffies == 0 &&
-		atomic_read(&Adapter->TotalPacketCount) != 0 &&
-	   	uiPrevTotalCount == atomic_read(&Adapter->TotalPacketCount))
-	{
-		Adapter->bcm_jiffies = jiffies;
-	}
+
 	update_per_cid_rx  (Adapter);
 	Adapter->txtransmit_running = 0;
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<======");
diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h
index 0241234..feb3515 100644
--- a/drivers/staging/bcm/Macros.h
+++ b/drivers/staging/bcm/Macros.h
@@ -4,10 +4,6 @@
 #ifndef	__MACROS_H__
 #define __MACROS_H__
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-#define kthread_run(threadfn,data,datafmt)(struct task_struct *)kernel_thread(threadfn,data,0)
-#endif
-
 #define TX_TIMER_PERIOD 10	//10 msec
 #define MAX_CLASSIFIERS 100
 //#define MAX_CLASSIFIERS_PER_SF  20
@@ -17,10 +13,9 @@
 #define MAX_DATA_PKTS 		200
 #define MAX_ETH_SIZE 		1536
 #define MAX_CNTL_PKT_SIZE 2048
-/* TIMER RELATED */
-#define JIFFIES_2_QUADPART()	(ULONG)(jiffies * 10000) // jiffies(1msec) to Quadpart(100nsec)
 
 #define MTU_SIZE 1400
+#define TX_QLEN  5
 
 #define MAC_ADDR_REGISTER 0xbf60d000
 
@@ -266,7 +261,7 @@
 
 #define FIRMWARE_BEGIN_ADDR 0xBFC00000
 
-#define INVALID_QUEUE_INDEX (USHORT)-1
+#define INVALID_QUEUE_INDEX NO_OF_QUEUES
 
 #define INVALID_PID (pid_t)-1
 #define DDR_80_MHZ  	0
@@ -300,12 +295,7 @@
 
 /* Idle Mode Related Registers */
 #define DEBUG_INTERRUPT_GENERATOR_REGISTOR 0x0F00007C
-#ifdef BCM_SHM_INTERFACE
-#define SW_ABORT_IDLEMODE_LOC 		0xbfc02f9c
-#define CPE_VIRTUAL_MAILBOX_REG     0xBFC02E58
-#else
 #define SW_ABORT_IDLEMODE_LOC 		0x0FF01FFC
-#endif
 
 #define SW_ABORT_IDLEMODE_PATTERN 	0xd0ea1d1e
 #define DEVICE_INT_OUT_EP_REG0		0x0F011870
@@ -355,12 +345,7 @@
 	HYBRID_MODE_6   = 2
 }PMU_MODE;
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
-#define MAX_RDM_WRM_RETIRES 16
-#else
 #define MAX_RDM_WRM_RETIRES 1
-#endif
-
 
 enum eAbortPattern {
 	ABORT_SHUTDOWN_MODE = 1,
@@ -369,27 +354,6 @@
 	ABORT_IDLE_SYNCDOWN = 3
 };
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
-	#define GET_BCM_ADAPTER(net_dev)  ({\
-    PMINI_ADAPTER __Adapter = NULL;	\
-    if (net_dev)    {   \
-         __Adapter = (PMINI_ADAPTER)(net_dev->priv); \
-    } \
-    else    {   \
-         __Adapter = NULL;  \
-    }__Adapter;} )
-#else
-	#define GET_BCM_ADAPTER(net_dev) ({\
-    PMINI_ADAPTER __Adapter = NULL;	\
-    if (net_dev)    {   \
-         __Adapter = (PMINI_ADAPTER)(*((unsigned long *)netdev_priv(net_dev)));  \
-    } \
-    else    {   \
-         __Adapter = NULL;  \
-    }__Adapter;})
-
-
-#endif
 
 /* Offsets used by driver in skb cb variable */
 #define SKB_CB_CLASSIFICATION_OFFSET    0
diff --git a/drivers/staging/bcm/Makefile b/drivers/staging/bcm/Makefile
index c3ae25a..652b7f8 100644
--- a/drivers/staging/bcm/Makefile
+++ b/drivers/staging/bcm/Makefile
@@ -6,7 +6,7 @@
 
 bcm_wimax-y :=  InterfaceDld.o InterfaceIdleMode.o InterfaceInit.o InterfaceRx.o \
 		InterfaceIsr.o InterfaceMisc.o InterfaceTx.o \
-		Arp.o CmHost.o Debug.o IPv6Protocol.o Qos.o Transmit.o\
+		CmHost.o IPv6Protocol.o Qos.o Transmit.o\
 		Bcmnet.o DDRInit.o HandleControlPacket.o\
 		LeakyBucket.o Misc.o sort.o Bcmchar.o hostmibs.o PHSModule.o\
-	 	Osal_Misc.o led_control.o nvm.o vendorspecificextn.o
+		led_control.o nvm.o vendorspecificextn.o
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index 22550f7..f585aae 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -1,5 +1,12 @@
 #include "headers.h"
 
+static int BcmFileDownload(PMINI_ADAPTER Adapter, const char *path,
+                        unsigned int loc);
+static VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter);
+static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer);
+static int bcm_parse_target_params(PMINI_ADAPTER Adapter);
+static void beceem_protocol_reset (PMINI_ADAPTER Adapter);
+
 static VOID default_wimax_protocol_initialize(PMINI_ADAPTER Adapter)
 {
 
@@ -60,21 +67,11 @@
     //init_waitqueue_head(&psAdapter->device_wake_queue);
     psAdapter->fw_download_done=FALSE;
 
-    psAdapter->pvOsDepData = (PLINUX_DEP_DATA) kmalloc(sizeof(LINUX_DEP_DATA),
-                 GFP_KERNEL);
-
-    if(psAdapter->pvOsDepData == NULL)
-	{
-        BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Linux Specific Data allocation failed");
-        return -ENOMEM;
-    }
-    memset(psAdapter->pvOsDepData, 0, sizeof(LINUX_DEP_DATA));
 
 	default_wimax_protocol_initialize(psAdapter);
 	for (i=0;i<MAX_CNTRL_PKTS;i++)
 	{
-		psAdapter->txctlpacket[i] = (char *)kmalloc(MAX_CNTL_PKT_SIZE,
-												GFP_KERNEL);
+		psAdapter->txctlpacket[i] = kmalloc(MAX_CNTL_PKT_SIZE, GFP_KERNEL);
 		if(!psAdapter->txctlpacket[i])
 		{
 			BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No More Cntl pkts got, max got is %d", i);
@@ -117,7 +114,7 @@
 
 VOID AdapterFree(PMINI_ADAPTER Adapter)
 {
-	INT count = 0;
+	int count;
 
 	beceem_protocol_reset(Adapter);
 
@@ -125,72 +122,66 @@
 
 	if(Adapter->control_packet_handler && !IS_ERR(Adapter->control_packet_handler))
 	  	kthread_stop (Adapter->control_packet_handler);
+
 	if(Adapter->transmit_packet_thread && !IS_ERR(Adapter->transmit_packet_thread))
-    	kthread_stop (Adapter->transmit_packet_thread);
-    wake_up(&Adapter->process_read_wait_queue);
+		kthread_stop (Adapter->transmit_packet_thread);
+
+	wake_up(&Adapter->process_read_wait_queue);
+
 	if(Adapter->LEDInfo.led_thread_running & (BCM_LED_THREAD_RUNNING_ACTIVELY | BCM_LED_THREAD_RUNNING_INACTIVELY))
 		kthread_stop (Adapter->LEDInfo.led_cntrl_threadid);
-	bcm_unregister_networkdev(Adapter);
+
+	unregister_networkdev(Adapter);
+
+	/* FIXME: use proper wait_event and refcounting */
 	while(atomic_read(&Adapter->ApplicationRunning))
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Waiting for Application to close.. %d\n",atomic_read(&Adapter->ApplicationRunning));
 		msleep(100);
 	}
 	unregister_control_device_interface(Adapter);
-	if(Adapter->dev && !IS_ERR(Adapter->dev))
-		free_netdev(Adapter->dev);
-	if(Adapter->pstargetparams != NULL)
-	{
-		bcm_kfree(Adapter->pstargetparams);
-	}
+
+	kfree(Adapter->pstargetparams);
+
 	for (count =0;count < MAX_CNTRL_PKTS;count++)
-	{
-		if(Adapter->txctlpacket[count])
-			bcm_kfree(Adapter->txctlpacket[count]);
-	}
+		kfree(Adapter->txctlpacket[count]);
+
 	FreeAdapterDsxBuffer(Adapter);
-	if(Adapter->pvOsDepData)
-		bcm_kfree (Adapter->pvOsDepData);
-	if(Adapter->pvInterfaceAdapter)
-		bcm_kfree(Adapter->pvInterfaceAdapter);
+
+	kfree(Adapter->pvInterfaceAdapter);
 
 	//Free the PHS Interface
 	PhsCleanup(&Adapter->stBCMPhsContext);
 
-#ifndef BCM_SHM_INTERFACE
 	BcmDeAllocFlashCSStructure(Adapter);
-#endif
 
-	bcm_kfree (Adapter);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "<========\n");
+	free_netdev(Adapter->dev);
 }
 
-
-int create_worker_threads(PMINI_ADAPTER psAdapter)
+static int create_worker_threads(PMINI_ADAPTER psAdapter)
 {
-	BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Init Threads...");
 	// Rx Control Packets Processing
 	psAdapter->control_packet_handler = kthread_run((int (*)(void *))
-			control_packet_handler, psAdapter, "CtrlPktHdlr");
+							control_packet_handler, psAdapter, "%s-rx", DRV_NAME);
 	if(IS_ERR(psAdapter->control_packet_handler))
 	{
-		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Kernel Thread, but still returning success\n");
+		pr_notice(DRV_NAME ": could not create control thread\n");
 		return PTR_ERR(psAdapter->control_packet_handler);
 	}
+
 	// Tx Thread
 	psAdapter->transmit_packet_thread = kthread_run((int (*)(void *))
-		tx_pkt_handler, psAdapter, "TxPktThread");
+							tx_pkt_handler, psAdapter, "%s-tx", DRV_NAME);
 	if(IS_ERR (psAdapter->transmit_packet_thread))
 	{
-		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "No Kernel Thread, but still returning success");
+		pr_notice(DRV_NAME ": could not creat transmit thread\n");
 		kthread_stop(psAdapter->control_packet_handler);
 		return PTR_ERR(psAdapter->transmit_packet_thread);
 	}
 	return 0;
 }
 
-
-static inline struct file *open_firmware_file(PMINI_ADAPTER Adapter, char *path)
+static struct file *open_firmware_file(PMINI_ADAPTER Adapter, const char *path)
 {
     struct file             *flp=NULL;
     mm_segment_t        oldfs;
@@ -200,26 +191,20 @@
     set_fs(oldfs);
     if(IS_ERR(flp))
     {
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable To Open File %s, err  %lx",
-				path, PTR_ERR(flp));
-		flp = NULL;
+	    pr_err(DRV_NAME "Unable To Open File %s, err %ld",
+		   path, PTR_ERR(flp));
+	    flp = NULL;
     }
-    else
-    {
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Got file descriptor pointer of %s!",
-			path);
-    }
-	if(Adapter->device_removed)
-	{
-		flp = NULL;
-	}
+
+    if(Adapter->device_removed)
+	    flp = NULL;
 
     return flp;
 }
 
 
-int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
-                        char *path,     /**< path to image file */
+static int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
+                        const char *path,     /**< path to image file */
                         unsigned int loc    /**< Download Address on the chip*/
                         )
 {
@@ -248,9 +233,7 @@
         goto exit_download;
     }
     oldfs=get_fs();set_fs(get_ds());
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
     vfs_llseek(flp, 0, 0);
-#endif
     set_fs(oldfs);
     if(Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter,
 										flp, loc))
@@ -265,31 +248,10 @@
 	if(flp && !(IS_ERR(flp)))
     	filp_close(flp, current->files);
     set_fs(oldfs);
-    do_gettimeofday(&tv);
-    BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "file download done at %lx", ((tv.tv_sec * 1000) +
-                            (tv.tv_usec/1000)));
+
     return errorno;
 }
 
-
-void bcm_kfree_skb(struct sk_buff *skb)
-{
-	if(skb)
-    {
-    	kfree_skb(skb);
-    }
-	skb = NULL ;
-}
-
-VOID bcm_kfree(VOID *ptr)
-{
-	if(ptr)
-	{
-		kfree(ptr);
-	}
-	ptr = NULL ;
-}
-
 /**
 @ingroup ctrl_pkt_functions
 This function copies the contents of given buffer
@@ -395,13 +357,6 @@
 			/*Setting bIdleMode_tx_from_host to TRUE to indicate LED control thread to represent
 			  the wake up from idlemode is from host*/
 			//Adapter->LEDInfo.bIdleMode_tx_from_host = TRUE;
-#if 0
-			if(STATUS_SUCCESS != InterfaceIdleModeWakeup(Adapter))
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Idle Mode Wake up Failed\n");
-				return STATUS_FAILURE;
-			}
-#endif
 			Adapter->bWakeUpDevice = TRUE;
 			wake_up(&Adapter->process_rx_cntrlpkt);
 
@@ -489,9 +444,6 @@
 		atomic_inc(&Adapter->index_wr_txcntrlpkt);
 		BCM_DEBUG_PRINT( Adapter,DBG_TYPE_TX, TX_CONTROL,DBG_LVL_ALL, "Calling transmit_packets");
 		atomic_set(&Adapter->TxPktAvail, 1);
-#ifdef BCM_SHM_INTERFACE
-		virtual_mail_box_interrupt();
-#endif
 		wake_up(&Adapter->tx_packet_wait_queue);
 	}
 	else
@@ -530,18 +482,6 @@
 #endif
 
 
-void SendLinkDown(PMINI_ADAPTER Adapter)
-{
-	LINK_REQUEST	stLinkDownRequest;
-	memset(&stLinkDownRequest, 0, sizeof(LINK_REQUEST));
-	stLinkDownRequest.Leader.Status=LINK_UP_CONTROL_REQ;
-	stLinkDownRequest.Leader.PLength=sizeof(ULONG);//minimum 4 bytes
-	stLinkDownRequest.szData[0]=LINK_DOWN_REQ_PAYLOAD;
-	Adapter->bLinkDownRequested = TRUE;
-
-	CopyBufferToControlPacket(Adapter,&stLinkDownRequest);
-}
-
 /******************************************************************
 * Function    - LinkMessage()
 *
@@ -552,7 +492,7 @@
 *
 * Returns     - None.
 *******************************************************************/
-__inline VOID LinkMessage(PMINI_ADAPTER Adapter)
+VOID LinkMessage(PMINI_ADAPTER Adapter)
 {
 	PLINK_REQUEST	pstLinkRequest=NULL;
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "=====>");
@@ -594,7 +534,7 @@
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "Calling CopyBufferToControlPacket");
 		CopyBufferToControlPacket(Adapter, pstLinkRequest);
-		bcm_kfree(pstLinkRequest);
+		kfree(pstLinkRequest);
 	}
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LINK_UP_MSG, DBG_LVL_ALL, "LinkMessage <=====");
 	return;
@@ -614,8 +554,8 @@
 VOID StatisticsResponse(PMINI_ADAPTER Adapter,PVOID pvBuffer)
 {
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s====>",__FUNCTION__);
-	Adapter->StatisticsPointer = ntohl(*(PULONG)pvBuffer);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %lx", Adapter->StatisticsPointer);
+	Adapter->StatisticsPointer = ntohl(*(__be32 *)pvBuffer);
+	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Stats at %x", (UINT)Adapter->StatisticsPointer);
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "%s <====",__FUNCTION__);
 	return;
 }
@@ -764,7 +704,7 @@
 
 			/* Wake the LED Thread with IDLEMODE_ENTER State */
 			Adapter->DriverState = LOWPOWER_MODE_ENTER;
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld",jiffies);;
+			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL,"LED Thread is Running..Hence Setting LED Event as IDLEMODE_ENTER jiffies:%ld",jiffies);
 			wake_up(&Adapter->LEDInfo.notify_led_event);
 
 			/* Wait for 1 SEC for LED to OFF */
@@ -787,12 +727,10 @@
 			down(&Adapter->rdmwrmsync);
 			Adapter->bPreparingForLowPowerMode = TRUE;
 			up(&Adapter->rdmwrmsync);
-#ifndef BCM_SHM_INTERFACE
 			//Killing all URBS.
 			if(Adapter->bDoSuspend == TRUE)
 				Bcm_kill_all_URBs((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
 
-#endif
 		}
 		else
 		{
@@ -811,9 +749,7 @@
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"fail to send the Idle mode Request \n");
 		Adapter->bPreparingForLowPowerMode = FALSE;
-#ifndef BCM_SHM_INTERFACE
 		StartInterruptUrb((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-#endif
 	}
 	do_gettimeofday(&tv);
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_RX, RX_DPC, DBG_LVL_ALL, "IdleMode Msg submitter to Q :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
@@ -980,12 +916,10 @@
 
 }
 
-
-__inline int reset_card_proc(PMINI_ADAPTER ps_adapter)
+int reset_card_proc(PMINI_ADAPTER ps_adapter)
 {
 	int retval = STATUS_SUCCESS;
 
-#ifndef BCM_SHM_INTERFACE
     PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
 	PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
 	unsigned int value = 0, uiResetValue = 0;
@@ -1006,11 +940,9 @@
 		wrmalt(ps_adapter, SYS_CFG, &value, sizeof(value));
 	}
 
-#ifndef BCM_SHM_INTERFACE
 	//killing all submitted URBs.
 	psIntfAdapter->psAdapter->StopAllXaction = TRUE ;
 	Bcm_kill_all_URBs(psIntfAdapter);
-#endif
 	/* Reset the UMA-B Device */
 	if(ps_adapter->chip_id >= T3LPB)
 	{
@@ -1111,11 +1043,10 @@
 
 err_exit :
 	psIntfAdapter->psAdapter->StopAllXaction = FALSE ;
-#endif
 	return retval;
 }
 
-__inline int run_card_proc(PMINI_ADAPTER ps_adapter )
+int run_card_proc(PMINI_ADAPTER ps_adapter )
 {
 	unsigned int value=0;
 	{
@@ -1146,21 +1077,17 @@
 int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
 {
 
-	UINT status = STATUS_SUCCESS;
+	int status;
 	UINT value = 0;
-#ifdef BCM_SHM_INTERFACE
-	unsigned char *pConfigFileAddr = (unsigned char *)CPE_MACXVI_CFG_ADDR;
-#endif
 	/*
  	 * Create the threads first and then download the
  	 * Firm/DDR Settings..
  	 */
 
-	if((status = create_worker_threads(ps_adapter))<0)
-	{
-		BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Cannot create thread");
+	status = create_worker_threads(ps_adapter);
+	if (status<0)
 		return status;
-	}
+
 	/*
  	 * For Downloading the Firm, parse the cfg file first.
  	 */
@@ -1169,7 +1096,6 @@
 		return status;
 	}
 
-#ifndef BCM_SHM_INTERFACE
 	if(ps_adapter->chip_id >= T3LPB)
 	{
 		rdmalt(ps_adapter, SYS_CFG, &value, sizeof (value));
@@ -1187,7 +1113,7 @@
 	status = ddr_init(ps_adapter);
 	if(status)
 	{
-		BCM_DEBUG_PRINT (ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "ddr_init Failed\n");
+		pr_err(DRV_NAME "ddr_init Failed\n");
 		return status;
 	}
 
@@ -1201,7 +1127,6 @@
 		BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Error downloading CFG file");
 		goto OUT;
 	}
-	BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "CFG file downloaded");
 
 	if(register_networkdev(ps_adapter))
 	{
@@ -1266,12 +1191,6 @@
 			goto OUT;
 		}
 	}
-#if 0
-	else if(psAdapter->eNVMType == NVM_EEPROM)
-	{
-		PropagateCalParamsFromEEPROMToMemory();
-	}
-#endif
 
 	/* Download Firmare */
 	if ((status = BcmFileDownload( ps_adapter, BIN_FILE, FIRMWARE_BEGIN_ADDR)))
@@ -1280,7 +1199,6 @@
 		goto OUT;
 	}
 
-	BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "BIN file downloaded");
 	status = run_card_proc(ps_adapter);
 	if(status)
 	{
@@ -1299,68 +1217,19 @@
 		wake_up(&ps_adapter->LEDInfo.notify_led_event);
 	}
 
-#else
-
-	ps_adapter->bDDRInitDone = TRUE;
-	//Initializing the NVM.
-	BcmInitNVM(ps_adapter);
-
-	//Propagating the cal param from Flash to DDR
-	value = 0;
-	wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 4, &value, sizeof(value));
-	wrmalt(ps_adapter, EEPROM_CAL_DATA_INTERNAL_LOC - 8, &value, sizeof(value));
-
-	if(ps_adapter->eNVMType == NVM_FLASH)
-	{
-		status = PropagateCalParamsFromFlashToMemory(ps_adapter);
-		if(status)
-		{
-			printk("\nPropogation of Cal param from flash to DDR failed ..\n" );
-		}
-	}
-
-	//Copy config file param to DDR.
-	memcpy(pConfigFileAddr,ps_adapter->pstargetparams, sizeof(STARGETPARAMS));
-
-	if(register_networkdev(ps_adapter))
-	{
-		BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Netdevice failed. Cleanup needs to be performed.");
-		return -EIO;
-	}
-
-
-	status = InitLedSettings (ps_adapter);
-	if(status)
-	{
-		BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_PRINTK, 0, 0,"INIT LED FAILED\n");
-		return status;
-	}
-
-
-	if(register_control_device_interface(ps_adapter) < 0)
-	{
-		BCM_DEBUG_PRINT(ps_adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Register Control Device failed. Cleanup needs to be performed.");
-		return -EIO;
-	}
-
-	ps_adapter->fw_download_done = TRUE;
-#endif
 	return status;
 }
 
 
-int bcm_parse_target_params(PMINI_ADAPTER Adapter)
+static int bcm_parse_target_params(PMINI_ADAPTER Adapter)
 {
-#ifdef BCM_SHM_INTERFACE
-	extern void read_cfg_file(PMINI_ADAPTER Adapter);
-#endif
 	struct file 		*flp=NULL;
 	mm_segment_t 	oldfs={0};
-	char *buff = NULL;
+	char *buff;
 	int len = 0;
 	loff_t	pos = 0;
 
-	buff=(PCHAR)kmalloc(BUFFER_1K, GFP_KERNEL);
+	buff=kmalloc(BUFFER_1K, GFP_KERNEL);
 	if(!buff)
 	{
 		return -ENOMEM;
@@ -1368,14 +1237,14 @@
 	if((Adapter->pstargetparams =
 		kmalloc(sizeof(STARGETPARAMS), GFP_KERNEL)) == NULL)
 	{
-		bcm_kfree(buff);
+		kfree(buff);
 		return -ENOMEM;
 	}
 	flp=open_firmware_file(Adapter, CFG_FILE);
 	if(!flp) {
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "NOT ABLE TO OPEN THE %s FILE \n", CFG_FILE);
-		bcm_kfree(buff);
-		bcm_kfree(Adapter->pstargetparams);
+		kfree(buff);
+		kfree(Adapter->pstargetparams);
 		Adapter->pstargetparams = NULL;
 		return -ENOENT;
 	}
@@ -1386,8 +1255,8 @@
 	if(len != sizeof(STARGETPARAMS))
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"Mismatch in Target Param Structure!\n");
-		bcm_kfree(buff);
-		bcm_kfree(Adapter->pstargetparams);
+		kfree(buff);
+		kfree(Adapter->pstargetparams);
 		Adapter->pstargetparams = NULL;
 		filp_close(flp, current->files);
 		return -ENOENT;
@@ -1399,37 +1268,34 @@
 	 * Values in Adapter->pstargetparams are in network byte order
 	 */
 	memcpy(Adapter->pstargetparams, buff, sizeof(STARGETPARAMS));
-	bcm_kfree (buff);
+	kfree (buff);
 	beceem_parse_target_struct(Adapter);
-#ifdef BCM_SHM_INTERFACE
-	read_cfg_file(Adapter);
-
-#endif
 	return STATUS_SUCCESS;
 }
 
 void beceem_parse_target_struct(PMINI_ADAPTER Adapter)
 {
-	UINT uiHostDrvrCfg6 =0, uiEEPROMFlag = 0;;
+	UINT uiHostDrvrCfg6 =0, uiEEPROMFlag = 0;
 
 	if(ntohl(Adapter->pstargetparams->m_u32PhyParameter2) & AUTO_SYNC_DISABLE)
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoSyncup is Disabled\n");
+		pr_info(DRV_NAME ": AutoSyncup is Disabled\n");
 		Adapter->AutoSyncup = FALSE;
 	}
 	else
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "AutoSyncup is Enabled\n");
+		pr_info(DRV_NAME ": AutoSyncup is Enabled\n");
 		Adapter->AutoSyncup	= TRUE;
 	}
+
 	if(ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_LINKUP_ENABLE)
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Enabling autolink up");
+		pr_info(DRV_NAME ": Enabling autolink up");
 		Adapter->AutoLinkUp = TRUE;
 	}
 	else
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Disabling autolink up");
+		pr_info(DRV_NAME ": Disabling autolink up");
 		Adapter->AutoLinkUp = FALSE;
 	}
 	// Setting the DDR Setting..
@@ -1438,59 +1304,54 @@
 	Adapter->ulPowerSaveMode =
 			(ntohl(Adapter->pstargetparams->HostDrvrConfig6)>>12)&0x0F;
 
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "DDR Setting: %x\n", Adapter->DDRSetting);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT,DBG_LVL_ALL, "Power Save Mode: %lx\n",
-							Adapter->ulPowerSaveMode);
+	pr_info(DRV_NAME ": DDR Setting: %x\n", Adapter->DDRSetting);
+	pr_info(DRV_NAME ": Power Save Mode: %lx\n", Adapter->ulPowerSaveMode);
 	if(ntohl(Adapter->pstargetparams->HostDrvrConfig6) & AUTO_FIRM_DOWNLOAD)
     {
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Enabling Auto Firmware Download\n");
+        pr_info(DRV_NAME ": Enabling Auto Firmware Download\n");
         Adapter->AutoFirmDld = TRUE;
     }
     else
     {
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Disabling Auto Firmware Download\n");
+        pr_info(DRV_NAME ": Disabling Auto Firmware Download\n");
         Adapter->AutoFirmDld = FALSE;
     }
 	uiHostDrvrCfg6 = ntohl(Adapter->pstargetparams->HostDrvrConfig6);
 	Adapter->bMipsConfig = (uiHostDrvrCfg6>>20)&0x01;
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"MIPSConfig   : 0x%X\n",Adapter->bMipsConfig);
+	pr_info(DRV_NAME ": MIPSConfig   : 0x%X\n",Adapter->bMipsConfig);
 	//used for backward compatibility.
 	Adapter->bDPLLConfig = (uiHostDrvrCfg6>>19)&0x01;
 
 	Adapter->PmuMode= (uiHostDrvrCfg6 >> 24 ) & 0x03;
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "PMU MODE: %x", Adapter->PmuMode);
+	pr_info(DRV_NAME ": PMU MODE: %x", Adapter->PmuMode);
 
     if((uiHostDrvrCfg6 >> HOST_BUS_SUSPEND_BIT ) & (0x01))
     {
         Adapter->bDoSuspend = TRUE;
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Making DoSuspend TRUE as per configFile");
+        pr_info(DRV_NAME ": Making DoSuspend TRUE as per configFile");
     }
 
 	uiEEPROMFlag = ntohl(Adapter->pstargetparams->m_u32EEPROMFlag);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "uiEEPROMFlag  : 0x%X\n",uiEEPROMFlag);
+	pr_info(DRV_NAME ": uiEEPROMFlag  : 0x%X\n",uiEEPROMFlag);
 	Adapter->eNVMType = (NVM_TYPE)((uiEEPROMFlag>>4)&0x3);
 
-
 	Adapter->bStatusWrite = (uiEEPROMFlag>>6)&0x1;
-	//printk(("bStatusWrite   : 0x%X\n", Adapter->bStatusWrite));
 
 	Adapter->uiSectorSizeInCFG = 1024*(0xFFFF & ntohl(Adapter->pstargetparams->HostDrvrConfig4));
-	//printk(("uiSectorSize   : 0x%X\n", Adapter->uiSectorSizeInCFG));
 
 	Adapter->bSectorSizeOverride =(bool) ((ntohl(Adapter->pstargetparams->HostDrvrConfig4))>>16)&0x1;
-	//printk(MP_INIT,("bSectorSizeOverride   : 0x%X\n",Adapter->bSectorSizeOverride));
 
 	if(ntohl(Adapter->pstargetparams->m_u32PowerSavingModeOptions) &0x01)
 		Adapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE;
-	//autocorrection part
+
 	if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
 		doPowerAutoCorrection(Adapter);
 
 }
 
-VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter)
+static VOID doPowerAutoCorrection(PMINI_ADAPTER psAdapter)
 {
-	UINT reporting_mode = 0;
+	UINT reporting_mode;
 
 	reporting_mode = ntohl(psAdapter->pstargetparams->m_u32PowerSavingModeOptions) &0x02 ;
 	psAdapter->bIsAutoCorrectEnabled = !((char)(psAdapter->ulPowerSaveMode >> 3) & 0x1);
@@ -1504,20 +1365,9 @@
 	if (psAdapter->bIsAutoCorrectEnabled && (psAdapter->chip_id >= T3LPB))
 	{
 		//If reporting mode is enable, switch PMU to PMC
-		#if 0
-		if(reporting_mode == FALSE)
-		{
-			psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN;
-			psAdapter->bDoSuspend = TRUE;
-			BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"PMU selected ....");
-
-		}
-		else
-		#endif
 		{
 			psAdapter->ulPowerSaveMode = DEVICE_POWERSAVE_MODE_AS_PMU_CLOCK_GATING;
 			psAdapter->bDoSuspend =FALSE;
-			BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL,"PMC selected..");
 
 		}
 
@@ -1540,12 +1390,10 @@
 #if 0
 static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
 {
-	unsigned char *pucmacaddr = NULL;
-	int status = 0, i=0;
-	unsigned int temp =0;
+	int status = 0, i = 0;
+	unsigned int temp = 0;
+	unsigned char *pucmacaddr = kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
 
-
-	pucmacaddr = (unsigned char *)kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
 	if(!pucmacaddr)
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "No Buffers to Read the EEPROM Address\n");
@@ -1558,7 +1406,7 @@
 	if(status != STATUS_SUCCESS)
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "wrm Failed..\n");
-		bcm_kfree(pucmacaddr);
+		kfree(pucmacaddr);
 		pucmacaddr = NULL;
 		goto OUT;
 	}
@@ -1568,7 +1416,7 @@
 		if(status != STATUS_SUCCESS)
 		{
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm Failed..\n");
-			bcm_kfree(pucmacaddr);
+			kfree(pucmacaddr);
 			pucmacaddr = NULL;
 			goto OUT;
 		}
@@ -1580,43 +1428,6 @@
 }
 #endif
 
-#if 0
-INT ReadMacAddressFromEEPROM(PMINI_ADAPTER Adapter)
-{
-	unsigned char *puMacAddr = NULL;
-	int i =0;
-
-	puMacAddr = ReadMacAddrEEPROM(Adapter,0x200);
-	if(!puMacAddr)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Couldn't retrieve the Mac Address\n");
-		return STATUS_FAILURE;
-	}
-	else
-	{
-		if((puMacAddr[0] == 0x0  && puMacAddr[1] == 0x0  &&
-			puMacAddr[2] == 0x0  && puMacAddr[3] == 0x0  &&
-			puMacAddr[4] == 0x0  && puMacAddr[5] == 0x0) ||
-		   (puMacAddr[0] == 0xFF && puMacAddr[1] == 0xFF &&
-			puMacAddr[2] == 0xFF && puMacAddr[3] == 0xFF &&
-			puMacAddr[4] == 0xFF && puMacAddr[5] == 0xFF))
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Invalid Mac Address\n");
-			bcm_kfree(puMacAddr);
-			return STATUS_FAILURE;
-		}
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "The Mac Address received is: \n");
-		memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
-        for(i=0;i<MAC_ADDRESS_SIZE;i++)
-        {
-            BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%02x ", Adapter->dev->dev_addr[i]);
-        }
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"\n");
-		bcm_kfree(puMacAddr);
-	}
-	return STATUS_SUCCESS;
-}
-#endif
 
 static void convertEndian(B_UINT8 rwFlag, PUINT puiBuffer, UINT uiByteCount)
 {
@@ -1640,81 +1451,21 @@
 {
 	INT uiRetVal =0;
 
-#ifndef BCM_SHM_INTERFACE
 	uiRetVal = Adapter->interface_rdm(Adapter->pvInterfaceAdapter,
 			uiAddress, pucBuff, sSize);
 
 	if(uiRetVal < 0)
 		return uiRetVal;
 
-#else
-	int indx;
-	uiRetVal = STATUS_SUCCESS;
-	if(uiAddress & 0x10000000) {
-			// DDR Memory Access
-		uiAddress |= CACHE_ADDRESS_MASK;
-		memcpy(pucBuff,(unsigned char *)uiAddress ,sSize);
-	}
-	else {
-		// Register, SPRAM, Flash
-		uiAddress |= UNCACHE_ADDRESS_MASK;
-    if ((uiAddress & FLASH_ADDR_MASK) == (FLASH_CONTIGIOUS_START_ADDR_BCS350 & FLASH_ADDR_MASK))
-	{
-		#if defined(FLASH_DIRECT_ACCESS)
-        	memcpy(pucBuff,(unsigned char *)uiAddress ,sSize);
-		#else
-			printk("\nInvalid GSPI ACCESS :Addr :%#X", uiAddress);
-			uiRetVal = STATUS_FAILURE;
-		#endif
-	}
-    else if(((unsigned int )uiAddress & 0x3) ||
-			((unsigned int )pucBuff & 0x3) ||
-			((unsigned int )sSize & 0x3)) {
-		  	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"rdmalt :unalligned register access uiAddress =  %x,pucBuff = %x  size = %x\n",(unsigned int )uiAddress,(unsigned int )pucBuff,(unsigned int )sSize);
-			 uiRetVal = STATUS_FAILURE;
-		}
-		else {
-		 	for (indx=0;indx<sSize;indx+=4){
-		   		*(PUINT)(pucBuff + indx) = *(PUINT)(uiAddress + indx);
-		  	}
-		}
-	}
-#endif
 	return uiRetVal;
 }
 int wrm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
 {
 	int iRetVal;
 
-#ifndef BCM_SHM_INTERFACE
 	iRetVal = Adapter->interface_wrm(Adapter->pvInterfaceAdapter,
 			uiAddress, pucBuff, sSize);
 
-#else
-	int indx;
-	if(uiAddress & 0x10000000) {
-		// DDR Memory Access
-		uiAddress |= CACHE_ADDRESS_MASK;
-		memcpy((unsigned char *)(uiAddress),pucBuff,sSize);
-	}
-	else {
-		// Register, SPRAM, Flash
-		uiAddress |= UNCACHE_ADDRESS_MASK;
-
-		if(((unsigned int )uiAddress & 0x3) ||
-			((unsigned int )pucBuff & 0x3) ||
-			((unsigned int )sSize & 0x3)) {
-		  		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"wrmalt: unalligned register access uiAddress =  %x,pucBuff = %x  size = %x\n",(unsigned int )uiAddress,(unsigned int )pucBuff,(unsigned int )sSize);
-			 iRetVal = STATUS_FAILURE;
-		}
-		else {
-		 	for (indx=0;indx<sSize;indx+=4) {
-		  		*(PUINT)(uiAddress + indx) = *(PUINT)(pucBuff + indx);
-			}
-		}
-	}
-	iRetVal = STATUS_SUCCESS;
-#endif
 
 	return iRetVal;
 }
@@ -1735,26 +1486,7 @@
 	return uiRetVal;
 }
 
-int rdmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
-{
 
-	INT status = STATUS_SUCCESS ;
-	down(&Adapter->rdmwrmsync);
-
-	if((Adapter->IdleMode == TRUE) ||
-		(Adapter->bShutStatus ==TRUE) ||
-		(Adapter->bPreparingForLowPowerMode ==TRUE))
-	{
-		status = -EACCES;
-		goto exit;
-	}
-
-	status = rdm(Adapter, uiAddress, pucBuff, sSize);
-
-exit:
-	up(&Adapter->rdmwrmsync);
-	return status ;
-}
 int wrmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
 {
 	INT status = STATUS_SUCCESS ;
@@ -1921,10 +1653,8 @@
 			Adapter->bPreparingForLowPowerMode = TRUE;
 			up(&Adapter->rdmwrmsync);
 			//Killing all URBS.
-#ifndef BCM_SHM_INTERFACE
 			if(Adapter->bDoSuspend == TRUE)
 				Bcm_kill_all_URBs((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-#endif
 		}
 		else
 		{
@@ -1943,14 +1673,12 @@
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, MP_SHUTDOWN, DBG_LVL_ALL,"fail to send the Idle mode Request \n");
 		Adapter->bPreparingForLowPowerMode = FALSE;
 
-#ifndef BCM_SHM_INTERFACE
 		StartInterruptUrb((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
-#endif
 	}
 }
 
 
-void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer)
+static void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer)
 {
 	B_UINT32 uiResetValue = 0;
 
@@ -2077,11 +1805,7 @@
 	if(!atomic_read (&Adapter->uiMBupdate))
 		return;
 
-#ifdef BCM_SHM_INTERFACE
-	if(rdmalt(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS)<0)
-#else
 	if(rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS)<0)
-#endif
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
 		return;
@@ -2107,9 +1831,7 @@
 void flush_queue(PMINI_ADAPTER Adapter, UINT iQIndex)
 {
 	struct sk_buff* 			PacketToDrop=NULL;
-	struct net_device_stats*		netstats=NULL;
-
-	netstats = &((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats;
+	struct net_device_stats*		netstats = &Adapter->dev->stats;
 
 	spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
 
@@ -2130,25 +1852,23 @@
 			Adapter->PackInfo[iQIndex].uiDroppedCountBytes += PacketToDrop->len;
 			Adapter->PackInfo[iQIndex].uiDroppedCountPackets++;
 
-			bcm_kfree_skb(PacketToDrop);
+			dev_kfree_skb(PacketToDrop);
 			atomic_dec(&Adapter->TotalPacketCount);
-			atomic_inc(&Adapter->TxDroppedPacketCount);
-
 		}
 	}
 	spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
 
 }
 
-void beceem_protocol_reset (PMINI_ADAPTER Adapter)
+static void beceem_protocol_reset (PMINI_ADAPTER Adapter)
 {
-	int i =0;
+	int i;
 
-	if(NULL != Adapter->dev)
-	{
-		netif_carrier_off(Adapter->dev);
-		netif_stop_queue(Adapter->dev);
-	}
+	if (netif_msg_link(Adapter))
+		pr_notice(PFX "%s: protocol reset\n", Adapter->dev->name);
+
+	netif_carrier_off(Adapter->dev);
+	netif_stop_queue(Adapter->dev);
 
 	Adapter->IdleMode = FALSE;
 	Adapter->LinkUpStatus = FALSE;
@@ -2166,78 +1886,18 @@
 		Adapter->TimerActive = FALSE;
 
 	memset(Adapter->astFragmentedPktClassifierTable, 0,
-			sizeof(S_FRAGMENTED_PACKET_INFO) *
-			MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
+	       sizeof(S_FRAGMENTED_PACKET_INFO) * MAX_FRAGMENTEDIP_CLASSIFICATION_ENTRIES);
 
 	for(i = 0;i<HiPriority;i++)
 	{
 		//resetting only the first size (S_MIBS_SERVICEFLOW_TABLE) for the SF.
 		// It is same between MIBs and SF.
-		memset((PVOID)&Adapter->PackInfo[i],0,sizeof(S_MIBS_SERVICEFLOW_TABLE));
+		memset(&Adapter->PackInfo[i].stMibsExtServiceFlowTable,
+		       0, sizeof(S_MIBS_EXTSERVICEFLOW_PARAMETERS));
 	}
 }
 
 
 
-#ifdef BCM_SHM_INTERFACE
-
-
-#define GET_GTB_DIFF(start, end)  \
-( (start) < (end) )? ( (end) - (start) ) : ( ~0x0 - ( (start) - (end)) +1 )
-
-void usdelay ( unsigned int a) {
-	unsigned int start= *(unsigned int *)0xaf8051b4;
-	unsigned int end  = start+1;
-	unsigned int diff = 0;
-
-	while(1) {
-		end = *(unsigned int *)0xaf8051b4;
-		diff = (GET_GTB_DIFF(start,end))/80;
-		if (diff >= a)
-			break;
-	}
-}
-void read_cfg_file(PMINI_ADAPTER Adapter) {
-
-
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Config File Version = 0x%x \n",Adapter->pstargetparams->m_u32CfgVersion );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Center Frequency =  0x%x \n",Adapter->pstargetparams->m_u32CenterFrequency );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Band A Scan = 0x%x \n",Adapter->pstargetparams->m_u32BandAScan );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Band B Scan = 0x%x \n",Adapter->pstargetparams->m_u32BandBScan );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Band C Scan = 0x%x \n",Adapter->pstargetparams->m_u32BandCScan );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"ERTPS Options = 0x%x \n",Adapter->pstargetparams->m_u32ErtpsOptions );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PHS Enable = 0x%x \n",Adapter->pstargetparams->m_u32PHSEnable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Handoff Enable = 0x%x \n",Adapter->pstargetparams->m_u32HoEnable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HO Reserved1 = 0x%x \n",Adapter->pstargetparams->m_u32HoReserved1 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HO Reserved2 = 0x%x \n",Adapter->pstargetparams->m_u32HoReserved2 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"MIMO Enable = 0x%x \n",Adapter->pstargetparams->m_u32MimoEnable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PKMv2 Enable = 0x%x \n",Adapter->pstargetparams->m_u32SecurityEnable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Powersaving Modes Enable = 0x%x \n",Adapter->pstargetparams->m_u32PowerSavingModesEnable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Power Saving Mode Options = 0x%x \n",Adapter->pstargetparams->m_u32PowerSavingModeOptions );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"ARQ Enable = 0x%x \n",Adapter->pstargetparams->m_u32ArqEnable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Harq Enable = 0x%x \n",Adapter->pstargetparams->m_u32HarqEnable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"EEPROM Flag = 0x%x \n",Adapter->pstargetparams->m_u32EEPROMFlag );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Customize = 0x%x \n",Adapter->pstargetparams->m_u32Customize );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Bandwidth = 0x%x \n",Adapter->pstargetparams->m_u32ConfigBW );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"ShutDown Timer Value = 0x%x \n",Adapter->pstargetparams->m_u32ShutDownInitThresholdTimer );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RadioParameter = 0x%x \n",Adapter->pstargetparams->m_u32RadioParameter );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PhyParameter1 = 0x%x \n",Adapter->pstargetparams->m_u32PhyParameter1 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PhyParameter2 = 0x%x \n",Adapter->pstargetparams->m_u32PhyParameter2 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"PhyParameter3 = 0x%x \n",Adapter->pstargetparams->m_u32PhyParameter3 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"m_u32TestOptions = 0x%x \n",Adapter->pstargetparams->m_u32TestOptions );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"MaxMACDataperDLFrame = 0x%x \n",Adapter->pstargetparams->m_u32MaxMACDataperDLFrame );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"MaxMACDataperULFrame = 0x%x \n",Adapter->pstargetparams->m_u32MaxMACDataperULFrame );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Corr2MacFlags = 0x%x \n",Adapter->pstargetparams->m_u32Corr2MacFlags );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig1 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig1 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig2 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig2 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig3 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig3 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig4 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig4 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig5 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig5 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"HostDrvrConfig6 = 0x%x \n",Adapter->pstargetparams->HostDrvrConfig6 );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Segmented PUSC Enable = 0x%x \n",Adapter->pstargetparams->m_u32SegmentedPUSCenable );
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"BamcEnable = 0x%x \n",Adapter->pstargetparams->m_u32BandAMCEnable );
-}
-
-#endif
 
 
diff --git a/drivers/staging/bcm/Osal_Misc.c b/drivers/staging/bcm/Osal_Misc.c
deleted file mode 100644
index feefd20..0000000
--- a/drivers/staging/bcm/Osal_Misc.c
+++ /dev/null
@@ -1,27 +0,0 @@
-	/*++
-
-	Copyright (c) Beceem Communications Inc.
-
-	Module Name:
-		WIN_Misc.c
-
-	Abstract:
-		Implements the Miscelanneous OS Construts
-			Linked Lists
-			Dispatcher Objects(Events,Semaphores,Spin Locks and the like)
-			Files
-
-	Revision History:
-		Who         When        What
-		--------    --------    ----------------------------------------------
-		Name		Date		Created/reviewed/modified
-		Rajeev		24/1/08		Created
-	Notes:
-
-	--*/
-#include "headers.h"
-
-bool OsalMemCompare(void *dest, void *src, UINT len)
-{
-	return (memcmp(src, dest, len));
-}
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
index 8a38cf4..d1ca191 100644
--- a/drivers/staging/bcm/PHSModule.c
+++ b/drivers/staging/bcm/PHSModule.c
@@ -1,10 +1,54 @@
 #include "headers.h"
 
+static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,B_UINT16  uiClsId,S_SERVICEFLOW_TABLE *psServiceFlowTable,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+
+static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,B_UINT16  uiClsId,S_SERVICEFLOW_ENTRY *pstServiceFlowEntry,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+
+static UINT CreateClassifierPHSRule(B_UINT16  uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI);
+
+static UINT UpdateClassifierPHSRule(B_UINT16  uiClsId,S_CLASSIFIER_ENTRY *pstClassifierEntry,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
+
+static BOOLEAN ValidatePHSRuleComplete(S_PHS_RULE *psPhsRule);
+
+static BOOLEAN DerefPhsRule(B_UINT16  uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule);
+
+static UINT GetClassifierEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiClsid,E_CLASSIFIER_ENTRY_CONTEXT eClsContext, S_CLASSIFIER_ENTRY **ppstClassifierEntry);
+
+static UINT GetPhsRuleEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,S_PHS_RULE **ppstPhsRule);
+
+static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable);
+
+static int phs_compress(S_PHS_RULE   *phs_members,unsigned char *in_buf,
+						unsigned char *out_buf,unsigned int *header_size,UINT *new_header_size );
+
+
+static int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
+								unsigned char *phsf,unsigned char *phsm,unsigned int phss,unsigned int phsv,UINT *new_header_size );
+
+static int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,\
+						  S_PHS_RULE   *phs_rules,UINT *header_size);
+
+
+static ULONG PhsCompress(void* pvContext,
+				  B_UINT16 uiVcid,
+				  B_UINT16 uiClsId,
+				  void *pvInputBuffer,
+				  void *pvOutputBuffer,
+				  UINT *pOldHeaderSize,
+				  UINT *pNewHeaderSize );
+
+static ULONG PhsDeCompress(void* pvContext,
+				  B_UINT16 uiVcid,
+				  void *pvInputBuffer,
+				  void *pvOutputBuffer,
+				  UINT *pInHeaderSize,
+				  UINT *pOutHeaderSize);
+
+
+
 #define IN
 #define OUT
 
-void DumpDataPacketHeader(PUCHAR pPkt);
-
 /*
 Function:				PHSTransmit
 
@@ -81,8 +125,6 @@
 	{
 
 
-		//DumpDataPacketHeader(pucPHSPktHdrInBuf);
-
 		// Step 2 Supress Header using PHS and fill into intermediate ucaPHSPktHdrOutBuf.
 	// Suppress only if IP Header and PHS Enabled For the Service Flow
 		if(((usPacketType == ETHERNET_FRAMETYPE_IPV4) ||
@@ -120,15 +162,15 @@
 						if(newPacket == NULL)
 							return STATUS_FAILURE;
 
-						bcm_kfree_skb(Packet);
+						dev_kfree_skb(Packet);
 						*pPacket = Packet = newPacket;
 						pucPHSPktHdrInBuf = Packet->data  + BytesToRemove;
 					}
 
 					numBytesCompressed = unPhsOldHdrSize - (unPHSNewPktHeaderLen+PHSI_LEN);
 
-					OsalMemMove(pucPHSPktHdrInBuf + numBytesCompressed, pucPHSPktHdrOutBuf, unPHSNewPktHeaderLen + PHSI_LEN);
-					OsalMemMove(Packet->data + numBytesCompressed, Packet->data, BytesToRemove);
+					memcpy(pucPHSPktHdrInBuf + numBytesCompressed, pucPHSPktHdrOutBuf, unPHSNewPktHeaderLen + PHSI_LEN);
+					memcpy(Packet->data + numBytesCompressed, Packet->data, BytesToRemove);
 					skb_pull(Packet, numBytesCompressed);
 
 					return STATUS_SUCCESS;
@@ -223,23 +265,12 @@
 			}
 		}
 
-		OsalMemMove(packet->data, Adapter->ucaPHSPktRestoreBuf, nStandardPktHdrLen);
+		memcpy(packet->data, Adapter->ucaPHSPktRestoreBuf, nStandardPktHdrLen);
 	}
 
 	return STATUS_SUCCESS;
 }
 
-void DumpDataPacketHeader(PUCHAR pPkt)
-{
-	struct iphdr *iphd = (struct iphdr*)pPkt;
-    PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"Phs Send/Recieve : IP Packet Hdr \n");
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"TOS : %x \n",iphd->tos);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"Src  IP : %x \n",iphd->saddr);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"Dest IP : %x \n \n",iphd->daddr);
-
-}
-
 void DumpFullPacket(UCHAR *pBuf,UINT nPktLen)
 {
 	PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -270,15 +301,9 @@
 		return -EINVAL;
 
 	pPhsdeviceExtension->pstServiceFlowPhsRulesTable =
-      (S_SERVICEFLOW_TABLE*)OsalMemAlloc(sizeof(S_SERVICEFLOW_TABLE),
-            PHS_MEM_TAG);
+		kzalloc(sizeof(S_SERVICEFLOW_TABLE), GFP_KERNEL);
 
-    if(pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
-	{
-		OsalZeroMemory(pPhsdeviceExtension->pstServiceFlowPhsRulesTable,
-              sizeof(S_SERVICEFLOW_TABLE));
-	}
-	else
+    if(!pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation ServiceFlowPhsRulesTable failed");
 		return -ENOMEM;
@@ -288,14 +313,8 @@
 	for(i=0;i<MAX_SERVICEFLOWS;i++)
 	{
 		S_SERVICEFLOW_ENTRY sServiceFlow = pstServiceFlowTable->stSFList[i];
-		sServiceFlow.pstClassifierTable = (S_CLASSIFIER_TABLE*)OsalMemAlloc(
-            sizeof(S_CLASSIFIER_TABLE), PHS_MEM_TAG);
-		if(sServiceFlow.pstClassifierTable)
-		{
-			OsalZeroMemory(sServiceFlow.pstClassifierTable,sizeof(S_CLASSIFIER_TABLE));
-			pstServiceFlowTable->stSFList[i].pstClassifierTable = sServiceFlow.pstClassifierTable;
-    	}
-		else
+		sServiceFlow.pstClassifierTable = kzalloc(sizeof(S_CLASSIFIER_TABLE), GFP_KERNEL);
+		if(!sServiceFlow.pstClassifierTable)
 		{
 			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
 			free_phs_serviceflow_rules(pPhsdeviceExtension->
@@ -305,9 +324,7 @@
 		}
 	}
 
-
-	pPhsdeviceExtension->CompressedTxBuffer =
-          OsalMemAlloc(PHS_BUFFER_SIZE,PHS_MEM_TAG);
+	pPhsdeviceExtension->CompressedTxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
 
     if(pPhsdeviceExtension->CompressedTxBuffer == NULL)
 	{
@@ -317,12 +334,11 @@
 		return -ENOMEM;
 	}
 
-	pPhsdeviceExtension->UnCompressedRxBuffer =
-      OsalMemAlloc(PHS_BUFFER_SIZE,PHS_MEM_TAG);
+    pPhsdeviceExtension->UnCompressedRxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
 	if(pPhsdeviceExtension->UnCompressedRxBuffer == NULL)
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
-		OsalMemFree(pPhsdeviceExtension->CompressedTxBuffer,PHS_BUFFER_SIZE);
+		kfree(pPhsdeviceExtension->CompressedTxBuffer);
 		free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
 		pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
 		return -ENOMEM;
@@ -343,16 +359,11 @@
 		pPHSDeviceExt->pstServiceFlowPhsRulesTable = NULL;
 	}
 
-	if(pPHSDeviceExt->CompressedTxBuffer)
-	{
-		OsalMemFree(pPHSDeviceExt->CompressedTxBuffer,PHS_BUFFER_SIZE);
-		pPHSDeviceExt->CompressedTxBuffer = NULL;
-	}
-	if(pPHSDeviceExt->UnCompressedRxBuffer)
-	{
-		OsalMemFree(pPHSDeviceExt->UnCompressedRxBuffer,PHS_BUFFER_SIZE);
-		pPHSDeviceExt->UnCompressedRxBuffer = NULL;
-	}
+	kfree(pPHSDeviceExt->CompressedTxBuffer);
+	pPHSDeviceExt->CompressedTxBuffer = NULL;
+
+	kfree(pPHSDeviceExt->UnCompressedRxBuffer);
+	pPHSDeviceExt->UnCompressedRxBuffer = NULL;
 
 	return 0;
 }
@@ -478,20 +489,12 @@
 			{
 				if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].bUsed && pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule)
 				{
-					if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
-                                        .pstPhsRule->u8PHSI == u8PHSI)
-					{
-						if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule
-                                                ->u8RefCnt)
-							pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule
-						          ->u8RefCnt--;
-						if(0 == pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
-                            .pstPhsRule->u8RefCnt)
-							OsalMemFree(pstClassifierRulesTable
-						    ->stActivePhsRulesList[nClsidIndex].pstPhsRule,
-						      sizeof(S_PHS_RULE));
-						OsalZeroMemory(&pstClassifierRulesTable
-							->stActivePhsRulesList[nClsidIndex],
+					if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8PHSI == u8PHSI)					{
+						if(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
+							pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt--;
+						if(0 == pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
+							kfree(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule);
+						memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0,
 							sizeof(S_CLASSIFIER_ENTRY));
 					}
 				}
@@ -548,10 +551,10 @@
 				if(pstClassifierEntry->pstPhsRule->u8RefCnt)
 				pstClassifierEntry->pstPhsRule->u8RefCnt--;
 				if(0==pstClassifierEntry->pstPhsRule->u8RefCnt)
-				OsalMemFree(pstClassifierEntry->pstPhsRule,sizeof(S_PHS_RULE));
+					kfree(pstClassifierEntry->pstPhsRule);
 
 			}
-			OsalZeroMemory(pstClassifierEntry,sizeof(S_CLASSIFIER_ENTRY));
+			memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_ENTRY));
 		}
 
 		nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
@@ -559,10 +562,8 @@
 
 	   if((nClsidIndex != PHS_INVALID_TABLE_INDEX) && (!pstClassifierEntry->bUnclassifiedPHSRule))
 		{
-			if(pstClassifierEntry->pstPhsRule)
-			//Delete the classifier entry
-			OsalMemFree(pstClassifierEntry->pstPhsRule,sizeof(S_PHS_RULE));
-			OsalZeroMemory(pstClassifierEntry,sizeof(S_CLASSIFIER_ENTRY));
+			kfree(pstClassifierEntry->pstPhsRule);
+			memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_ENTRY));
 		}
 	}
 	return lStatus;
@@ -619,14 +620,11 @@
 						                                    .pstPhsRule->u8RefCnt--;
 					if(0==pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
                                                           .pstPhsRule->u8RefCnt)
-						OsalMemFree(pstClassifierRulesTable
-						            ->stActivePhsRulesList[nClsidIndex].pstPhsRule,
-						             sizeof(S_PHS_RULE));
+						kfree(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule);
 					    pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex]
                                         .pstPhsRule = NULL;
 				}
-				OsalZeroMemory(&pstClassifierRulesTable
-                    ->stActivePhsRulesList[nClsidIndex],sizeof(S_CLASSIFIER_ENTRY));
+				memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0, sizeof(S_CLASSIFIER_ENTRY));
 				if(pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule)
 				{
 					if(pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
@@ -635,15 +633,12 @@
 						                  .pstPhsRule->u8RefCnt--;
 					if(0 == pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
                                         .pstPhsRule->u8RefCnt)
-						OsalMemFree(pstClassifierRulesTable
-						      ->stOldPhsRulesList[nClsidIndex].pstPhsRule,
-						       sizeof(S_PHS_RULE));
+						kfree(pstClassifierRulesTable
+						      ->stOldPhsRulesList[nClsidIndex].pstPhsRule);
 					pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex]
                               .pstPhsRule = NULL;
 				}
-				OsalZeroMemory(&pstClassifierRulesTable
-                  ->stOldPhsRulesList[nClsidIndex],
-                   sizeof(S_CLASSIFIER_ENTRY));
+				memset(&pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex], 0, sizeof(S_CLASSIFIER_ENTRY));
 			}
 		}
 		pstServiceFlowEntry->bUsed = FALSE;
@@ -849,7 +844,7 @@
 // Does not return any value.
 //-----------------------------------------------------------------------------
 
-void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable)
+static void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable)
 {
 	int i,j;
     PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
@@ -876,8 +871,7 @@
   							                                                ->u8RefCnt--;
 						if(0==pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule
                                                                 ->u8RefCnt)
-							OsalMemFree(pstClassifierRulesTable->stActivePhsRulesList[j].
-							                                              pstPhsRule, sizeof(S_PHS_RULE));
+							kfree(pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule);
 						pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule = NULL;
 					}
 					if(pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule)
@@ -888,24 +882,23 @@
 							                                          ->u8RefCnt--;
 						if(0==pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule
                                                                       ->u8RefCnt)
-							OsalMemFree(pstClassifierRulesTable->stOldPhsRulesList[j]
-							                        .pstPhsRule,sizeof(S_PHS_RULE));
+							kfree(pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule);
 						pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule = NULL;
 					}
 				}
-			    OsalMemFree(pstClassifierRulesTable,sizeof(S_CLASSIFIER_TABLE));
+				kfree(pstClassifierRulesTable);
 			    stServiceFlowEntry.pstClassifierTable = pstClassifierRulesTable = NULL;
 			}
 		}
 	}
 
-	OsalMemFree(psServiceFlowRulesTable,sizeof(S_SERVICEFLOW_TABLE));
-	psServiceFlowRulesTable = NULL;
+    kfree(psServiceFlowRulesTable);
+    psServiceFlowRulesTable = NULL;
 }
 
 
 
-BOOLEAN ValidatePHSRuleComplete(IN S_PHS_RULE *psPhsRule)
+static BOOLEAN ValidatePHSRuleComplete(IN S_PHS_RULE *psPhsRule)
 {
 	if(psPhsRule)
 	{
@@ -988,9 +981,9 @@
 	return PHS_INVALID_TABLE_INDEX;
 }
 
-UINT GetPhsRuleEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
-      IN B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,
-      OUT S_PHS_RULE **ppstPhsRule)
+static UINT GetPhsRuleEntry(IN S_CLASSIFIER_TABLE *pstClassifierTable,
+			    IN B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,
+			    OUT S_PHS_RULE **ppstPhsRule)
 {
 	int  i;
 	S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
@@ -1102,7 +1095,7 @@
 		if(psPhsRule->u8PHSFLength)
 		{
 			//update PHSF
-			OsalMemMove(pstClassifierEntry->pstPhsRule->u8PHSF,
+			memcpy(pstClassifierEntry->pstPhsRule->u8PHSF,
 			    psPhsRule->u8PHSF , MAX_PHS_LENGTHS);
 		}
 		if(psPhsRule->u8PHSFLength)
@@ -1114,7 +1107,7 @@
 		if(psPhsRule->u8PHSMLength)
 		{
 			//update PHSM
-			OsalMemMove(pstClassifierEntry->pstPhsRule->u8PHSM,
+			memcpy(pstClassifierEntry->pstPhsRule->u8PHSM,
 			    psPhsRule->u8PHSM, MAX_PHS_LENGTHS);
 		}
 		if(psPhsRule->u8PHSMLength)
@@ -1147,7 +1140,7 @@
 	return uiStatus;
 }
 
-UINT CreateClassifierPHSRule(IN B_UINT16  uiClsId,
+static UINT CreateClassifierPHSRule(IN B_UINT16  uiClsId,
     S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,
     E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI)
 {
@@ -1234,8 +1227,7 @@
 	{
 		if(psClassifierRules->pstPhsRule == NULL)
 		{
-			psClassifierRules->pstPhsRule = (S_PHS_RULE*)OsalMemAlloc
-                (sizeof(S_PHS_RULE),PHS_MEM_TAG);
+			psClassifierRules->pstPhsRule = kmalloc(sizeof(S_PHS_RULE),GFP_KERNEL);
 
           if(NULL == psClassifierRules->pstPhsRule)
 				return ERR_PHSRULE_MEMALLOC_FAIL;
@@ -1247,7 +1239,7 @@
 		psClassifierRules->bUnclassifiedPHSRule = psPhsRule->bUnclassifiedPHSRule;
 
         /* Update The PHS rule */
-		OsalMemMove(psClassifierRules->pstPhsRule,
+		memcpy(psClassifierRules->pstPhsRule,
 		    psPhsRule, sizeof(S_PHS_RULE));
 	}
 	else
@@ -1259,7 +1251,7 @@
 }
 
 
-UINT UpdateClassifierPHSRule(IN B_UINT16  uiClsId,
+static UINT UpdateClassifierPHSRule(IN B_UINT16  uiClsId,
       IN S_CLASSIFIER_ENTRY *pstClassifierEntry,
       S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,
       B_UINT8 u8AssociatedPHSI)
@@ -1289,13 +1281,13 @@
 		//Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for uiClsId
 		if(FALSE == bPHSRuleOrphaned)
 		{
-			pstClassifierEntry->pstPhsRule = (S_PHS_RULE*)OsalMemAlloc(sizeof(S_PHS_RULE),PHS_MEM_TAG);
+			pstClassifierEntry->pstPhsRule = kmalloc(sizeof(S_PHS_RULE), GFP_KERNEL);
 			if(NULL == pstClassifierEntry->pstPhsRule)
 			{
 				return ERR_PHSRULE_MEMALLOC_FAIL;
 			}
 		}
-		OsalMemMove(pstClassifierEntry->pstPhsRule, psPhsRule, sizeof(S_PHS_RULE));
+		memcpy(pstClassifierEntry->pstPhsRule, psPhsRule, sizeof(S_PHS_RULE));
 
 	}
 	else
@@ -1304,14 +1296,8 @@
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nTying Classifier to Existing PHS Rule");
 		if(bPHSRuleOrphaned)
 		{
-		    if(pstClassifierEntry->pstPhsRule)
-		    {
-		    	//Just Free the PHS Rule as Ref Count is Zero
-		    	OsalMemFree(pstClassifierEntry->pstPhsRule,sizeof(S_PHS_RULE));
+			kfree(pstClassifierEntry->pstPhsRule);
 			pstClassifierEntry->pstPhsRule = NULL;
-
-		    }
-
 		}
 		pstClassifierEntry->pstPhsRule = pstAddPhsRule;
 
@@ -1326,7 +1312,7 @@
 
 }
 
-BOOLEAN DerefPhsRule(IN B_UINT16  uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule)
+static BOOLEAN DerefPhsRule(IN B_UINT16  uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule)
 {
 	if(pstPhsRule==NULL)
 		return FALSE;
@@ -1345,22 +1331,6 @@
 	}
 }
 
-static void DumpBuffer(PVOID BuffVAddress, int xferSize)
-{
-	int i;
-	int iPrintLength;
-	PUCHAR temp=(PUCHAR)BuffVAddress;
-    PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
-	iPrintLength=(xferSize<32?xferSize:32);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\n");
-
-	for (i=0;i < iPrintLength;i++) {
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "%x|",temp[i]);
-	}
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\n");
-}
-
-
 void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension)
 {
 	int i,j,k,l;
@@ -1520,8 +1490,8 @@
 //	size-The number of bytes copied into the output buffer i.e dynamic fields
 //	0	-If PHS rule is NULL.If PHSV field is not set.If the verification fails.
 //-----------------------------------------------------------------------------
-int phs_compress(S_PHS_RULE  *phs_rule,unsigned char *in_buf
-						,unsigned char *out_buf,UINT *header_size,UINT *new_header_size)
+static int phs_compress(S_PHS_RULE  *phs_rule,unsigned char *in_buf
+			,unsigned char *out_buf,UINT *header_size,UINT *new_header_size)
 {
 	unsigned char *old_addr = out_buf;
 	int supress = 0;
@@ -1581,9 +1551,9 @@
 //	0	-Packet has failed the verification.
 //-----------------------------------------------------------------------------
 
- int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
-								unsigned char *phsf,unsigned char *phsm,unsigned int phss,
-								unsigned int phsv,UINT* new_header_size)
+static int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
+				unsigned char *phsf,unsigned char *phsm,unsigned int phss,
+				unsigned int phsv,UINT* new_header_size)
 {
 	unsigned int size=0;
 	int bit,i=0;
diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h
index bf2b576..0dd05a7 100644
--- a/drivers/staging/bcm/PHSModule.h
+++ b/drivers/staging/bcm/PHSModule.h
@@ -27,19 +27,6 @@
 
 int phs_init(PPHS_DEVICE_EXTENSION pPhsdeviceExtension,PMINI_ADAPTER Adapter);
 
-void free_phs_serviceflow_rules(S_SERVICEFLOW_TABLE *psServiceFlowRulesTable);
-
-int phs_compress(S_PHS_RULE   *phs_members,unsigned char *in_buf,
-						unsigned char *out_buf,unsigned int *header_size,UINT *new_header_size );
-
-
-int verify_suppress_phsf(unsigned char *in_buffer,unsigned char *out_buffer,
-								unsigned char *phsf,unsigned char *phsm,unsigned int phss,unsigned int phsv,UINT *new_header_size );
-
-int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,\
-						  S_PHS_RULE   *phs_rules,UINT *header_size);
-
-
 int PhsCleanup(PPHS_DEVICE_EXTENSION pPHSDeviceExt);
 
 //Utility Functions
@@ -52,42 +39,10 @@
 ULONG PhsDeleteSFRules(void* pvContext,B_UINT16 uiVcid) ;
 
 
-ULONG PhsCompress(void* pvContext,
-				  B_UINT16 uiVcid,
-				  B_UINT16 uiClsId,
-				  void *pvInputBuffer,
-				  void *pvOutputBuffer,
-				  UINT *pOldHeaderSize,
-				  UINT *pNewHeaderSize );
-
-ULONG PhsDeCompress(void* pvContext,
-				  B_UINT16 uiVcid,
-				  void *pvInputBuffer,
-				  void *pvOutputBuffer,
-				  UINT *pInHeaderSize,
-				  UINT *pOutHeaderSize);
-
-
 BOOLEAN ValidatePHSRule(S_PHS_RULE *psPhsRule);
 
-BOOLEAN ValidatePHSRuleComplete(S_PHS_RULE *psPhsRule);
-
 UINT GetServiceFlowEntry(S_SERVICEFLOW_TABLE *psServiceFlowTable,B_UINT16 uiVcid,S_SERVICEFLOW_ENTRY **ppstServiceFlowEntry);
 
-UINT GetClassifierEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiClsid,E_CLASSIFIER_ENTRY_CONTEXT eClsContext, S_CLASSIFIER_ENTRY **ppstClassifierEntry);
-
-UINT GetPhsRuleEntry(S_CLASSIFIER_TABLE *pstClassifierTable,B_UINT32 uiPHSI,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,S_PHS_RULE **ppstPhsRule);
-
-
-UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid,B_UINT16  uiClsId,S_SERVICEFLOW_TABLE *psServiceFlowTable,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
-
-UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid,B_UINT16  uiClsId,S_SERVICEFLOW_ENTRY *pstServiceFlowEntry,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
-
-UINT CreateClassifierPHSRule(B_UINT16  uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,E_CLASSIFIER_ENTRY_CONTEXT eClsContext,B_UINT8 u8AssociatedPHSI);
-
-UINT UpdateClassifierPHSRule(B_UINT16  uiClsId,S_CLASSIFIER_ENTRY *pstClassifierEntry,S_CLASSIFIER_TABLE *psaClassifiertable ,S_PHS_RULE *psPhsRule,B_UINT8 u8AssociatedPHSI);
-
-BOOLEAN DerefPhsRule(B_UINT16  uiClsId,S_CLASSIFIER_TABLE *psaClassifiertable,S_PHS_RULE *pstPhsRule);
 
 void DumpPhsRules(PPHS_DEVICE_EXTENSION pDeviceExtension);
 
diff --git a/drivers/staging/bcm/Protocol.h b/drivers/staging/bcm/Protocol.h
index 00f1cc1..b8a4009 100644
--- a/drivers/staging/bcm/Protocol.h
+++ b/drivers/staging/bcm/Protocol.h
@@ -85,10 +85,10 @@
 	ETH_HEADER_STRUC EThHdr;
 } __attribute__((packed)) ETH_CS_ETH2_FRAME;
 
+#define ETHERNET_FRAMETYPE_IPV4		ntohs(0x0800)
+#define ETHERNET_FRAMETYPE_IPV6 	ntohs(0x86dd)
+#define ETHERNET_FRAMETYPE_802QVLAN 	ntohs(0x8100)
 
-#define ETHERNET_FRAMETYPE_IPV4 ntohs(0x0800)
-#define ETHERNET_FRAMETYPE_IPV6 ntohs(0x86dd)
-#define ETHERNET_FRAMETYPE_802QVLAN 0x8100
 //Per SF CS Specification Encodings
 typedef enum _E_SERVICEFLOW_CS_SPEC_
 {
diff --git a/drivers/staging/bcm/Prototypes.h b/drivers/staging/bcm/Prototypes.h
index 70ec8bc..b80b806 100644
--- a/drivers/staging/bcm/Prototypes.h
+++ b/drivers/staging/bcm/Prototypes.h
@@ -1,23 +1,12 @@
 #ifndef _PROTOTYPES_H_
 #define _PROTOTYPES_H_
 
-int BcmFileDownload(PMINI_ADAPTER Adapter,/**< Logical Adapter */
-                        char *path,     /**< path to image file */
-                        unsigned int loc    /**< Download Address on the chip*/
-                        );
 VOID LinkControlResponseMessage(PMINI_ADAPTER Adapter, PUCHAR pucBuffer);
 
 VOID StatisticsResponse(PMINI_ADAPTER Adapter,PVOID pvBuffer);
 
 VOID IdleModeResponse(PMINI_ADAPTER Adapter,PUINT puiBuffer);
 
-void bcm_kfree_skb(struct sk_buff *skb);
-VOID bcm_kfree(VOID *ptr);
-
-
-VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, 	/**<Pointer to the Adapter structure*/
-								struct sk_buff *skb);				/**<Pointer to the socket buffer*/
-
 int control_packet_handler	(PMINI_ADAPTER Adapter);
 
 VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter,UINT uiSearchRuleIndex);
@@ -38,25 +27,16 @@
 
 VOID flush_all_queues(PMINI_ADAPTER Adapter);
 
-USHORT	IpVersion4(PMINI_ADAPTER Adapter, /**< Pointer to the driver control structure */
-					struct iphdr *iphd, /**<Pointer to the IP Hdr of the packet*/
-					S_CLASSIFIER_RULE *pstClassifierRule );
-
-VOID PruneQueue(PMINI_ADAPTER Adapter,/**<Pointer to the driver control structure*/
-					INT iIndex/**<Queue Index*/
-					);
-
 VOID PruneQueueAllSF(PMINI_ADAPTER Adapter);
 
 INT SearchSfid(PMINI_ADAPTER Adapter,UINT uiSfid);
 
-USHORT GetPacketQueueIndex(PMINI_ADAPTER Adapter, /**<Pointer to the driver control structure */
-								struct sk_buff* Packet /**< Pointer to the Packet to be sent*/
-								);
+USHORT ClassifyPacket(PMINI_ADAPTER Adapter,struct sk_buff* skb);
 
-VOID
-reply_to_arp_request(struct sk_buff *skb  /**<sk_buff of ARP request*/
-						);
+BOOLEAN MatchSrcPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
+BOOLEAN MatchDestPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
+BOOLEAN MatchProtocol(S_CLASSIFIER_RULE *pstClassifierRule,UCHAR ucProtocol);
+
 
 INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
 					struct sk_buff *Packet, /**<data buffer*/
@@ -70,11 +50,9 @@
 							char *pControlPacket/**<Control Packet*/
 							);
 
-INT bcm_transmit(struct sk_buff *skb, 		/**< skb */
-					struct net_device *dev 	/**< net device pointer */
-					);
 
 int register_networkdev(PMINI_ADAPTER Adapter);
+void unregister_networkdev(PMINI_ADAPTER Adapter);
 
 INT AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter);
 
@@ -82,8 +60,6 @@
 
 INT FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter);
 
-int create_worker_threads(PMINI_ADAPTER psAdapter);
-
 int tx_pkt_handler(PMINI_ADAPTER Adapter);
 
 int  reset_card_proc(PMINI_ADAPTER Adapter );
@@ -92,7 +68,6 @@
 
 int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter);
 
-int bcm_parse_target_params(PMINI_ADAPTER Adapter);
 
 INT ReadMacAddressFromNVM(PMINI_ADAPTER Adapter);
 
@@ -110,26 +85,15 @@
 
 int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user * user_buffer);
 
-void SendLinkDown(PMINI_ADAPTER Adapter);
-
 void SendIdleModeResponse(PMINI_ADAPTER Adapter);
 
-void HandleShutDownModeRequest(PMINI_ADAPTER Adapter,PUCHAR pucBuffer);
 
-int  ProcessGetHostMibs(PMINI_ADAPTER Adapter, PVOID ioBuffer,
-	ULONG inputBufferLength);
-
-int GetDroppedAppCntrlPktMibs(PVOID ioBuffer, PPER_TARANG_DATA pTarang);
+int  ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *buf);
+void GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *ioBuffer, PPER_TARANG_DATA pTarang);
 void beceem_parse_target_struct(PMINI_ADAPTER Adapter);
 
-void doPowerAutoCorrection(PMINI_ADAPTER psAdapter);
-
 int bcm_ioctl_fw_download(PMINI_ADAPTER Adapter, FIRMWARE_INFO *psFwInfo);
 
-void bcm_unregister_networkdev(PMINI_ADAPTER Adapter);
-
-int SearchVcid(PMINI_ADAPTER Adapter,unsigned short usVcid);
-
 void CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter,
 		CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex);
 
@@ -149,7 +113,6 @@
 
 void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter,B_UINT16 TID,BOOLEAN bFreeAll);
 
-void beceem_protocol_reset (PMINI_ADAPTER Adapter);
 
 void flush_queue(PMINI_ADAPTER Adapter, UINT iQIndex);
 
@@ -164,31 +127,11 @@
 	UINT uiNumBytes);
 
 
-INT BeceemFlashBulkRead(
-	PMINI_ADAPTER Adapter,
-	PUINT pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes);
-
-UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter);
 
 INT WriteBeceemEEPROM(PMINI_ADAPTER Adapter,UINT uiEEPROMOffset, UINT uiData);
 
-UINT BcmGetFlashSize(PMINI_ADAPTER Adapter);
-
-UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize);
-
-INT BeceemFlashBulkWrite(
-	PMINI_ADAPTER Adapter,
-	PUINT pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes,
-	BOOLEAN bVerify);
-
 INT PropagateCalParamsFromFlashToMemory(PMINI_ADAPTER Adapter);
 
-INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter);
-
 
 INT BeceemEEPROMBulkWrite(
 	PMINI_ADAPTER Adapter,
@@ -198,11 +141,8 @@
 	BOOLEAN bVerify);
 
 
-INT ReadBeceemEEPROMBulk(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData, UINT dwNumData);
-
 INT ReadBeceemEEPROM(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData);
 
-NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter);
 
 INT BeceemNVMRead(
 	PMINI_ADAPTER Adapter,
@@ -217,24 +157,12 @@
 	UINT uiNumBytes,
 	BOOLEAN bVerify);
 
-INT BcmUpdateSectorSize(PMINI_ADAPTER Adapter,UINT uiSectorSize);
 
 INT BcmInitNVM(PMINI_ADAPTER Adapter);
 
-INT BcmGetNvmSize(PMINI_ADAPTER Adapter);
+INT BcmUpdateSectorSize(PMINI_ADAPTER Adapter,UINT uiSectorSize);
+BOOLEAN IsSectionExistInFlash(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
 
-INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
-
-VOID BcmValidateNvmType(PMINI_ADAPTER Adapter);
-
-VOID ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter);
-
-INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter);
-INT ReadDSDHeader(PMINI_ADAPTER Adapter, PDSD_HEADER psDSDHeader, FLASH2X_SECTION_VAL dsd);
-INT BcmGetActiveDSD(PMINI_ADAPTER Adapter);
-INT ReadISOHeader(PMINI_ADAPTER Adapter, PISO_HEADER psISOHeader, FLASH2X_SECTION_VAL IsoImage);
-INT BcmGetActiveISO(PMINI_ADAPTER Adapter);
-B_UINT8 IsOffsetWritable(PMINI_ADAPTER Adapter, UINT uiOffset);
 INT BcmGetFlash2xSectionalBitMap(PMINI_ADAPTER Adapter, PFLASH2X_BITMAP psFlash2xBitMap);
 
 INT BcmFlash2xBulkWrite(
@@ -251,7 +179,6 @@
 	FLASH2X_SECTION_VAL eFlashSectionVal,
 	UINT uiOffsetWithinSectionVal,
 	UINT uiNumBytes);
-INT BcmGetSectionValEndOffset(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlashSectionVal);
 
 INT BcmGetSectionValStartOffset(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlashSectionVal);
 
@@ -264,34 +191,13 @@
 INT BcmFlash2xWriteSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlashSectionVal);
 INT	validateFlash2xReadWrite(PMINI_ADAPTER Adapter, PFLASH2X_READWRITE psFlash2xReadWrite);
 INT IsFlash2x(PMINI_ADAPTER Adapter);
-INT GetFlashBaseAddr(PMINI_ADAPTER Adapter);
-INT SaveHeaderIfPresent(PMINI_ADAPTER Adapter, PUCHAR pBuff, UINT uiSectAlignAddr);
 INT	BcmCopySection(PMINI_ADAPTER Adapter,
 						FLASH2X_SECTION_VAL SrcSection,
 						FLASH2X_SECTION_VAL DstSection,
 						UINT offset,
 						UINT numOfBytes);
 
-INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset);
-INT BcmMakeFlashCSActive(PMINI_ADAPTER Adapter, UINT offset);
-INT ReadDSDSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
-INT ReadDSDPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
-FLASH2X_SECTION_VAL getHighestPriDSD(PMINI_ADAPTER Adapter);
-INT ReadISOSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
-INT ReadISOPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
-FLASH2X_SECTION_VAL getHighestPriISO(PMINI_ADAPTER Adapter);
-INT WriteToFlashWithoutSectorErase(PMINI_ADAPTER Adapter,
-										PUINT pBuff,
-										FLASH2X_SECTION_VAL eFlash2xSectionVal,
-										UINT uiOffset,
-										UINT uiNumBytes
-										);
 
-//UINT getNumOfSubSectionWithWRPermisson(PMINI_ADAPTER Adapter, SECTION_TYPE secType);
-BOOLEAN IsSectionExistInFlash(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
-INT IsSectionWritable(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL Section);
-INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
-INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
 BOOLEAN IsNonCDLessDevice(PMINI_ADAPTER Adapter);
 
 
@@ -300,7 +206,6 @@
 int wrmaltWithLock (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t sSize);
 int rdmaltWithLock (PMINI_ADAPTER Adapter, UINT uiAddress, PUINT pucBuff, size_t sSize);
 
-int rdmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
 int wrmWithLock(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t size);
 INT buffDnldVerify(PMINI_ADAPTER Adapter, unsigned char *mappedbuffer, unsigned int u32FirmwareLength,
 		unsigned long u32StartingAddress);
@@ -309,11 +214,6 @@
 VOID putUsbSuspend(struct work_struct *work);
 BOOLEAN IsReqGpioIsLedInNVM(PMINI_ADAPTER Adapter, UINT gpios);
 
-#ifdef BCM_SHM_INTERFACE
-INT beceem_virtual_device_init(void);
-VOID virtual_mail_box_interrupt(void);
-INT beceem_virtual_device_exit(void);
-#endif
 
 #endif
 
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 75b2b87..8ce4536 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -4,15 +4,14 @@
 */
 #include "headers.h"
 
-BOOLEAN MatchSrcIpAddress(S_CLASSIFIER_RULE *pstClassifierRule,ULONG ulSrcIP);
-BOOLEAN MatchTos(S_CLASSIFIER_RULE *pstClassifierRule,UCHAR ucTypeOfService);
-BOOLEAN MatchSrcPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushSrcPort);
-BOOLEAN MatchDestPort(S_CLASSIFIER_RULE *pstClassifierRule,USHORT ushDestPort);
-BOOLEAN MatchProtocol(S_CLASSIFIER_RULE *pstClassifierRule,UCHAR ucProtocol);
-BOOLEAN MatchDestIpAddress(S_CLASSIFIER_RULE *pstClassifierRule,ULONG ulDestIP);
-USHORT ClassifyPacket(PMINI_ADAPTER Adapter,struct sk_buff* skb);
-void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,PS_ETHCS_PKT_INFO pstEthCsPktInfo);
-BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo,S_CLASSIFIER_RULE *pstClassifierRule, B_UINT8 EthCSCupport);
+static void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,PS_ETHCS_PKT_INFO pstEthCsPktInfo);
+static BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo,S_CLASSIFIER_RULE *pstClassifierRule, B_UINT8 EthCSCupport);
+
+static USHORT	IpVersion4(PMINI_ADAPTER Adapter, struct iphdr *iphd,
+			   S_CLASSIFIER_RULE *pstClassifierRule );
+
+static VOID PruneQueue(PMINI_ADAPTER Adapter, INT iIndex);
+
 
 /*******************************************************************
 * Function    - MatchSrcIpAddress()
@@ -205,11 +204,10 @@
 Compares IPV4 Ip address and port number
 @return Queue Index.
 */
-USHORT	IpVersion4(PMINI_ADAPTER Adapter, /**< Pointer to the driver control structure */
-					struct iphdr *iphd, /**<Pointer to the IP Hdr of the packet*/
-					S_CLASSIFIER_RULE *pstClassifierRule )
+static USHORT	IpVersion4(PMINI_ADAPTER Adapter,
+			   struct iphdr *iphd,
+			   S_CLASSIFIER_RULE *pstClassifierRule )
 {
-	//IPHeaderFormat 		*pIpHeader=NULL;
 	xporthdr     		*xprt_hdr=NULL;
 	BOOLEAN	bClassificationSucceed=FALSE;
 
@@ -261,15 +259,6 @@
 		//if protocol is not TCP or UDP then no need of comparing source port and destination port
 		if(iphd->protocol!=TCP && iphd->protocol!=UDP)
 			break;
-#if 0
-		//check if memory is available of src and Dest port
-		if(ETH_AND_IP_HEADER_LEN + L4_SRC_PORT_LEN + L4_DEST_PORT_LEN > Packet->len)
-		{
-			//This is not an erroneous condition and pkt will be checked for next classification.
-			bClassificationSucceed = FALSE;
-			break;
-		}
-#endif
 		//******************Checking Transport Layer Header field if present *****************//
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
 			(iphd->protocol==UDP)?xprt_hdr->uhdr.source:xprt_hdr->thdr.source);
@@ -312,29 +301,6 @@
 
 	return bClassificationSucceed;
 }
-/**
-@ingroup tx_functions
-@return  Queue Index based on priority.
-*/
-USHORT GetPacketQueueIndex(PMINI_ADAPTER Adapter, /**<Pointer to the driver control structure */
-								struct sk_buff* Packet /**< Pointer to the Packet to be sent*/
-								)
-{
-	USHORT			usIndex=-1;
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "=====>");
-
-	if(NULL==Adapter || NULL==Packet)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "Got NULL Values<======");
-		return -1;
-	}
-
-	usIndex = ClassifyPacket(Adapter,Packet);
-
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "Got Queue Index %x",usIndex);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, QUEUE_INDEX, DBG_LVL_ALL, "GetPacketQueueIndex <==============");
-	return usIndex;
-}
 
 VOID PruneQueueAllSF(PMINI_ADAPTER Adapter)
 {
@@ -357,23 +323,21 @@
 drops packets from the Head till the number of bytes is
 less than or equal to max queue size for the queue.
 */
-VOID PruneQueue(PMINI_ADAPTER Adapter,/**<Pointer to the driver control structure*/
-					INT iIndex/**<Queue Index*/
-					)
+static VOID PruneQueue(PMINI_ADAPTER Adapter, INT iIndex)
 {
 	struct sk_buff* PacketToDrop=NULL;
-	struct net_device_stats*  netstats=NULL;
+	struct net_device_stats *netstats;
 
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "=====> Index %d",iIndex);
 
    	if(iIndex == HiPriority)
-       	return;
+		return;
 
 	if(!Adapter || (iIndex < 0) || (iIndex > HiPriority))
 		return;
 
 	/* To Store the netdevice statistic */
-	netstats = &((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats;
+	netstats = &Adapter->dev->stats;
 
 	spin_lock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
 
@@ -395,9 +359,13 @@
 
 		if(PacketToDrop)
 		{
-			if(netstats)
-				netstats->tx_dropped++;
-			atomic_inc(&Adapter->TxDroppedPacketCount);
+			struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, iIndex);
+			if (netif_msg_tx_err(Adapter))
+				pr_info(PFX "%s: tx queue %d overlimit\n", 
+					Adapter->dev->name, iIndex);
+
+			txq->tx_dropped++;
+
 			DEQUEUEPACKET(Adapter->PackInfo[iIndex].FirstTxQueue,
 						Adapter->PackInfo[iIndex].LastTxQueue);
 			/// update current bytes and packets count
@@ -407,7 +375,7 @@
 			/// update dropped bytes and packets counts
 			Adapter->PackInfo[iIndex].uiDroppedCountBytes += PacketToDrop->len;
 			Adapter->PackInfo[iIndex].uiDroppedCountPackets++;
-			bcm_kfree_skb(PacketToDrop);
+			dev_kfree_skb(PacketToDrop);
 
 		}
 
@@ -416,7 +384,6 @@
 			Adapter->PackInfo[iIndex].uiDroppedCountPackets);
 
 		atomic_dec(&Adapter->TotalPacketCount);
-		Adapter->bcm_jiffies = jiffies;
 	}
 
 	spin_unlock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
@@ -430,16 +397,15 @@
 {
 	INT		iQIndex;
 	UINT	uiTotalPacketLength;
-	struct sk_buff*				PacketToDrop=NULL;
-	struct net_device_stats*  	netstats=NULL;
+	struct sk_buff*			PacketToDrop=NULL;
 
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "=====>");
-	/* To Store the netdevice statistic */
-	netstats = &((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats;
 
 //	down(&Adapter->data_packet_queue_lock);
 	for(iQIndex=LowPriority; iQIndex<HiPriority; iQIndex++)
 	{
+		struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, iQIndex);
+
 		spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
 		while(Adapter->PackInfo[iQIndex].FirstTxQueue)
 		{
@@ -447,8 +413,7 @@
 			if(PacketToDrop)
 			{
 				uiTotalPacketLength = PacketToDrop->len;
-				netstats->tx_dropped++;
-				atomic_inc(&Adapter->TxDroppedPacketCount);
+				txq->tx_dropped++;
 			}
 			else
 				uiTotalPacketLength = 0;
@@ -457,7 +422,7 @@
 						Adapter->PackInfo[iQIndex].LastTxQueue);
 
 			/* Free the skb */
-			bcm_kfree_skb(PacketToDrop);
+			dev_kfree_skb(PacketToDrop);
 
 			/// update current bytes and packets count
 			Adapter->PackInfo[iQIndex].uiCurrentBytesOnHost -= uiTotalPacketLength;
@@ -559,12 +524,6 @@
 
 	for(uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--)
 	{
-		if (Adapter->device_removed)
-		{
-			bClassificationSucceed = FALSE;
-			break;
-		}
-
 		if(bClassificationSucceed)
 			break;
 		//Iterate through all classifiers which are already in order of priority
@@ -810,7 +769,10 @@
 }
 
 
-BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,PS_ETHCS_PKT_INFO pstEthCsPktInfo,S_CLASSIFIER_RULE *pstClassifierRule, B_UINT8 EthCSCupport)
+static BOOLEAN EThCSClassifyPkt(PMINI_ADAPTER Adapter,struct sk_buff* skb,
+				PS_ETHCS_PKT_INFO pstEthCsPktInfo,
+				S_CLASSIFIER_RULE *pstClassifierRule,
+				B_UINT8 EthCSCupport)
 {
 	BOOLEAN bClassificationSucceed = FALSE;
 	bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule,((ETH_HEADER_STRUC *)(skb->data))->au8SourceAddress);
@@ -840,9 +802,11 @@
 	return bClassificationSucceed;
 }
 
-void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,PS_ETHCS_PKT_INFO pstEthCsPktInfo)
+static void EThCSGetPktInfo(PMINI_ADAPTER Adapter,PVOID pvEthPayload,
+			    PS_ETHCS_PKT_INFO pstEthCsPktInfo)
 {
 	USHORT u16Etype = ntohs(((ETH_HEADER_STRUC*)pvEthPayload)->u16Etype);
+
 	BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,  "EthCSGetPktInfo : Eth Hdr Type : %X\n",u16Etype);
 	if(u16Etype > 0x5dc)
 	{
diff --git a/drivers/staging/bcm/TODO b/drivers/staging/bcm/TODO
index 366634b..cd3e9f2 100644
--- a/drivers/staging/bcm/TODO
+++ b/drivers/staging/bcm/TODO
@@ -1,15 +1,22 @@
+This driver is barely functional in its current state.
+
+BIG:
+	- existing API is (/dev/tarang) should be replaced
+	  Is it possible to use same API as Intel Wimax stack and
+	  have same user level components.
+	- Qos and queue model is non-standard and inflexible.
+	  Use existing TC Qos?
+
 TODO:
+	- support more than one board - eliminate global variables
+	- remove developer debug BCM_DEBUG() macros
+	  add a limited number of messages through netif_msg()
 	- fix non-standard kernel style
-	- sparse warnings
 	- checkpatch warnings
-   	- remove compatiablity code for older kernels
-	- remove #ifdef's
-	- fix bogus device nameing and reference counting (see bcm_notify_event)
-	- fix use of file I/O to load config
-	- request firmware
-	- update to current network device API
-	- merge some files together
+	- use request firmware
+	- fix use of file I/O to load config with better API
+	- merge some files together?
 	- cleanup/eliminate debug messages
 
-	- integrate with existing Wimax stack?
+
 
diff --git a/drivers/staging/bcm/Transmit.c b/drivers/staging/bcm/Transmit.c
index 12f9e13..0f70009 100644
--- a/drivers/staging/bcm/Transmit.c
+++ b/drivers/staging/bcm/Transmit.c
@@ -6,7 +6,7 @@
 digraph transmit1 {
 node[shape=box]
 edge[weight=5;color=red]
-bcm_transmit->reply_to_arp_request[label="ARP"]
+
 bcm_transmit->GetPacketQueueIndex[label="IP Packet"]
 GetPacketQueueIndex->IpVersion4[label="IPV4"]
 GetPacketQueueIndex->IpVersion6[label="IPV6"]
@@ -35,169 +35,16 @@
 
 #include "headers.h"
 
-/*******************************************************************
-* Function    -	bcm_transmit()
-*
-* Description - This is the main transmit function for our virtual
-*				interface(veth0). It handles the ARP packets. It
-*				clones this packet and then Queue it to a suitable
-* 		 		Queue. Then calls the transmit_packet().
-*
-* Parameter   -	 skb - Pointer to the socket buffer structure
-*				 dev - Pointer to the virtual net device structure
-*
-* Returns     -	 zero (success) or -ve value (failure)
-*
-*********************************************************************/
-
-INT bcm_transmit(struct sk_buff *skb, 		/**< skb */
-					struct net_device *dev 	/**< net device pointer */
-					)
-{
-	PMINI_ADAPTER      	Adapter = NULL;
-	USHORT				qindex=0;
-	struct timeval tv;
-	UINT		pkt_type = 0;
-	UINT 		calltransmit = 0;
-
-	BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "\n%s====>\n",__FUNCTION__);
-
-	memset(&tv, 0, sizeof(tv));
-	/* Check for valid parameters */
-	if(skb == NULL || dev==NULL)
-	{
-	    BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX,TX_OSAL_DBG, DBG_LVL_ALL, "Got NULL skb or dev\n");
-		return -EINVAL;
-	}
-
-	Adapter = GET_BCM_ADAPTER(dev);
-	if(!Adapter)
-	{
-		BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Got Invalid Adapter\n");
-  		return -EINVAL;
-	}
-	if(Adapter->device_removed == TRUE || !Adapter->LinkUpStatus)
-	{
-		if(!netif_queue_stopped(dev)) {
-				netif_carrier_off(dev);
-				netif_stop_queue(dev);
-		}
-		return STATUS_FAILURE;
-	}
-	BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Packet size : %d\n", skb->len);
-
-	/*Add Ethernet CS check here*/
-	if(Adapter->TransferMode == IP_PACKET_ONLY_MODE )
-	{
-        pkt_type = ntohs(*(PUSHORT)(skb->data + 12));
-		/* Get the queue index where the packet is to be queued */
-		BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Getting the Queue Index.....");
-
-		qindex = GetPacketQueueIndex(Adapter,skb);
-
-		if((SHORT)INVALID_QUEUE_INDEX==(SHORT)qindex)
-		{
-			if(pkt_type == ETH_ARP_FRAME)
-			{
-				/*
-				Reply directly to ARP request packet
-				ARP Spoofing only if NO ETH CS rule matches for it
-				*/
-				BCM_DEBUG_PRINT (Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL,"ARP OPCODE = %02x",
-
-                (*(PUCHAR)(skb->data + 21)));
-
-                reply_to_arp_request(skb);
-
-                BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX,TX_OSAL_DBG, DBG_LVL_ALL,"After reply_to_arp_request \n");
-
-			}
-			else
-			{
-                BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL,
-    			"Invalid queue index, dropping pkt\n");
-
-				bcm_kfree_skb(skb);
-			}
-			return STATUS_SUCCESS;
-        }
-
-		if(Adapter->PackInfo[qindex].uiCurrentPacketsOnHost >= SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
-		{
-			atomic_inc(&Adapter->TxDroppedPacketCount);
-			bcm_kfree_skb(skb);
-			return STATUS_SUCCESS;
-		}
-
-		/* Now Enqueue the packet */
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "bcm_transmit Enqueueing the Packet To Queue %d",qindex);
-		spin_lock(&Adapter->PackInfo[qindex].SFQueueLock);
-		Adapter->PackInfo[qindex].uiCurrentBytesOnHost += skb->len;
-		Adapter->PackInfo[qindex].uiCurrentPacketsOnHost++;
-
-		*((B_UINT32 *)skb->cb + SKB_CB_LATENCY_OFFSET ) = jiffies;
-		ENQUEUEPACKET(Adapter->PackInfo[qindex].FirstTxQueue,
-  	                  Adapter->PackInfo[qindex].LastTxQueue, skb);
-		atomic_inc(&Adapter->TotalPacketCount);
-		spin_unlock(&Adapter->PackInfo[qindex].SFQueueLock);
-		do_gettimeofday(&tv);
-
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL,"ENQ: \n");
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "Pkt Len = %d, sec: %ld, usec: %ld\n",
-		(skb->len-ETH_HLEN), tv.tv_sec, tv.tv_usec);
-
-#ifdef BCM_SHM_INTERFACE
-		spin_lock(&Adapter->txtransmitlock);
-		if(Adapter->txtransmit_running == 0)
-		{
-			Adapter->txtransmit_running = 1;
-			calltransmit = 1;
-		}
-		else
-			calltransmit = 0;
-
-		spin_unlock(&Adapter->txtransmitlock);
-#endif
-		if(calltransmit == 1)
-			transmit_packets(Adapter);
-		else
-		{
-			if(!atomic_read(&Adapter->TxPktAvail))
-			{
-				atomic_set(&Adapter->TxPktAvail, 1);
-#ifdef BCM_SHM_INTERFACE
-				virtual_mail_box_interrupt();
-#endif
-				wake_up(&Adapter->tx_packet_wait_queue);
-			}
-		}
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_OSAL_DBG, DBG_LVL_ALL, "<====");
-	}
-	else
-		bcm_kfree_skb(skb);
-
-  return STATUS_SUCCESS;
-}
-
 
 /**
 @ingroup ctrl_pkt_functions
 This function dispatches control packet to the h/w interface
 @return zero(success) or -ve value(failure)
 */
-INT SendControlPacket(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
-							char *pControlPacket/**<Control Packet*/
-							)
+INT SendControlPacket(PMINI_ADAPTER Adapter, char *pControlPacket)
 {
-	PLEADER PLeader = NULL;
-	struct timeval tv;
-	memset(&tv, 0, sizeof(tv));
+	PLEADER PLeader = (PLEADER)pControlPacket;
 
-
-
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "========>");
-
-	PLeader=(PLEADER)pControlPacket;
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Tx");
 	if(!pControlPacket || !Adapter)
 	{
@@ -208,12 +55,6 @@
 		((PLeader->PLength-1)/MAX_DEVICE_DESC_SIZE)+1))
     {
     	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "NO FREE DESCRIPTORS TO SEND CONTROL PACKET");
-       	if(Adapter->bcm_jiffies == 0)
-        {
-        	Adapter->bcm_jiffies = jiffies;
-            BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "UPDATED TIME(hex): %lu",
-				Adapter->bcm_jiffies);
-        }
         return STATUS_FAILURE;
     }
 
@@ -224,76 +65,33 @@
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Leader Length: %x",PLeader->PLength);
 	if(Adapter->device_removed)
 		return 0;
-#ifndef BCM_SHM_INTERFACE
+
+	if (netif_msg_pktdata(Adapter))
+		print_hex_dump(KERN_DEBUG, PFX "tx control: ", DUMP_PREFIX_NONE,
+			       16, 1, pControlPacket, PLeader->PLength + LEADER_SIZE, 0);
+
 	Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
 					pControlPacket, (PLeader->PLength + LEADER_SIZE));
-#else
-	tx_pkts_to_firmware(pControlPacket,(PLeader->PLength + LEADER_SIZE),1);
 
-	if(PLeader->Status==IDLE_MESSAGE)
-	{
-		if(((CONTROL_MESSAGE*)PLeader)->szData[0] == GO_TO_IDLE_MODE_PAYLOAD &&
-		((CONTROL_MESSAGE*)PLeader)->szData[1] == TARGET_CAN_GO_TO_IDLE_MODE)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Idle Mode Ack Sent to the Device\n");
-        	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Host Entering into Idle Mode\n");
-			do_gettimeofday(&tv);
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "IdleMode Msg sent to f/w at time :%ld ms", tv.tv_sec *1000 + tv.tv_usec /1000);
-			if(Adapter->bDoSuspend != TRUE)
-			{
-				Adapter->IdleMode = TRUE;
-				Adapter->bPreparingForLowPowerMode = FALSE ;
-			}
-		}
-	}
-	if((PLeader->Status == LINK_UP_CONTROL_REQ) &&
-		((PUCHAR)pControlPacket)[sizeof(LEADER)] == LINK_UP_ACK &&
-		((PUCHAR)pControlPacket)[sizeof(LEADER)+1] ==
-								LINK_SHUTDOWN_REQ_FROM_FIRMWARE  &&
-		((PUCHAR)pControlPacket)[sizeof(LEADER)+2] == SHUTDOWN_ACK_FROM_DRIVER)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "Shut Down ACK Sent and Host entering Shut State \n");
-		if(Adapter->bDoSuspend != TRUE)
-		{
-			Adapter->bShutStatus = TRUE;
-			Adapter->bPreparingForLowPowerMode = FALSE;
-			Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
-		}
-
-	}
-#endif
-
-	((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.tx_packets++;
-	((PLINUX_DEP_DATA)Adapter->pvOsDepData)->netstats.tx_bytes+=
-			PLeader->PLength;
 	atomic_dec(&Adapter->CurrNumFreeTxDesc);
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_CONTROL, DBG_LVL_ALL, "<=========");
 	return STATUS_SUCCESS;
 }
-static	LEADER Leader={0};
+
 /**
 @ingroup tx_functions
 This function despatches the IP packets with the given vcid
 to the target via the host h/w interface.
 @return  zero(success) or -ve value(failure)
 */
-INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
-					struct sk_buff *Packet, /**<data buffer*/
-					USHORT Vcid)			/**<VCID for this packet*/
+INT SetupNextSend(PMINI_ADAPTER Adapter,  struct sk_buff *Packet, USHORT Vcid)
 {
 	int		status=0;
-#ifdef GDMA_INTERFACE
-	int dontfree = 0;
-#endif
 	BOOLEAN bHeaderSupressionEnabled = FALSE;
 	B_UINT16            uiClassifierRuleID;
-	int QueueIndex = NO_OF_QUEUES + 1;
+	u16	QueueIndex = skb_get_queue_mapping(Packet);
+	LEADER Leader={0};
 
-	if(!Adapter || !Packet)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got NULL Adapter or Packet");
-		return -EINVAL;
-	}
 	if(Packet->len > MAX_DEVICE_DESC_SIZE)
 	{
 		status = STATUS_FAILURE;
@@ -302,14 +100,10 @@
 
 	/* Get the Classifier Rule ID */
 	uiClassifierRuleID = *((UINT32*) (Packet->cb)+SKB_CB_CLASSIFICATION_OFFSET);
-	QueueIndex = SearchVcid( Adapter,Vcid);
-	if(QueueIndex < NO_OF_QUEUES)
-	{
-		bHeaderSupressionEnabled =
-			Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
-		bHeaderSupressionEnabled =
-			bHeaderSupressionEnabled & Adapter->bPHSEnabled;
-	}
+
+	bHeaderSupressionEnabled = Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled
+		& Adapter->bPHSEnabled;
+
 	if(Adapter->device_removed)
 		{
 		status = STATUS_FAILURE;
@@ -327,15 +121,10 @@
 
 	Leader.Vcid	= Vcid;
 
-    if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET ))
-	{
-        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending TCP ACK\n");
+	if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET ))
 		Leader.Status = LEADER_STATUS_TCP_ACK;
-	}
 	else
-	{
 		Leader.Status = LEADER_STATUS;
-	}
 
 	if(Adapter->PackInfo[QueueIndex].bEthCSSupport)
 	{
@@ -351,68 +140,53 @@
 		skb_push(Packet, LEADER_SIZE);
 		memcpy(Packet->data, &Leader, LEADER_SIZE);
 	}
-
 	else
 	{
 		Leader.PLength = Packet->len - ETH_HLEN;
 		memcpy((LEADER*)skb_pull(Packet, (ETH_HLEN - LEADER_SIZE)), &Leader, LEADER_SIZE);
 	}
 
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Packet->len = %d", Packet->len);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Vcid = %d", Vcid);
-
-#ifndef BCM_SHM_INTERFACE
 	status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter,
 			Packet->data, (Leader.PLength + LEADER_SIZE));
-#else
-	status = tx_pkts_to_firmware(Packet,Packet->len,0);
-#endif
 	if(status)
 	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Tx Failed..\n");
+		++Adapter->dev->stats.tx_errors;
+		if (netif_msg_tx_err(Adapter))
+			pr_info(PFX "%s: transmit error %d\n", Adapter->dev->name,
+				status);
 	}
 	else
 	{
+		struct netdev_queue *txq = netdev_get_tx_queue(Adapter->dev, QueueIndex);
 		Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength;
-		atomic_add(Leader.PLength, &Adapter->GoodTxByteCount);
-		atomic_inc(&Adapter->TxTotalPacketCount);
-#ifdef GDMA_INTERFACE
-    dontfree = 1;
-#endif
-	}
 
-	atomic_dec(&Adapter->CurrNumFreeTxDesc);
+		txq->tx_bytes += Leader.PLength;
+		++txq->tx_packets;
 
-errExit:
-
-	if(STATUS_SUCCESS == status)
-	{
 		Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3;
 		Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len);
 		Adapter->PackInfo[QueueIndex].uiSentPackets++;
 		Adapter->PackInfo[QueueIndex].NumOfPacketsSent++;
 
 		atomic_dec(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount);
-#ifdef BCM_SHM_INTERFACE
-		if(atomic_read(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount) < 0)
-		{
-			atomic_set(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount, 0);
-		}
-#endif
 		Adapter->PackInfo[QueueIndex].uiThisPeriodSentBytes += Leader.PLength;
 	}
 
+	atomic_dec(&Adapter->CurrNumFreeTxDesc);
 
-#ifdef GDMA_INTERFACE
-  if(!dontfree){
-  	bcm_kfree_skb(Packet);
-  }
-#else
-  	bcm_kfree_skb(Packet);
-#endif
+errExit:
+
+	dev_kfree_skb(Packet);
 	return status;
 }
 
+static int tx_pending(PMINI_ADAPTER Adapter)
+{
+	return (atomic_read(&Adapter->TxPktAvail)
+		&& MINIMUM_PENDING_DESCRIPTORS < atomic_read(&Adapter->CurrNumFreeTxDesc))
+		|| Adapter->device_removed || (1 == Adapter->downloadDDR);
+}
+
 /**
 @ingroup tx_functions
 Transmit thread
@@ -420,57 +194,26 @@
 int tx_pkt_handler(PMINI_ADAPTER Adapter  /**< pointer to adapter object*/
 				)
 {
-#ifndef BCM_SHM_INTERFACE
 	int status = 0;
-#endif
 
-	UINT calltransmit = 1;
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Entring to wait for signal from the interrupt service thread!Adapter = %p",Adapter);
-
-
-	while(1)
-	{
-		if(Adapter->LinkUpStatus){
+	while(! kthread_should_stop()) {
+		/* FIXME - the timeout looks like workaround for racey usage of TxPktAvail */
+		if(Adapter->LinkUpStatus)
 			wait_event_timeout(Adapter->tx_packet_wait_queue,
-				((atomic_read(&Adapter->TxPktAvail) &&
-				(MINIMUM_PENDING_DESCRIPTORS <
-				atomic_read(&Adapter->CurrNumFreeTxDesc)) &&
-				(Adapter->device_removed == FALSE))) ||
-				(1 == Adapter->downloadDDR) || kthread_should_stop()
-#ifndef BCM_SHM_INTERFACE
-				|| (TRUE == Adapter->bEndPointHalted)
-#endif
-				, msecs_to_jiffies(10));
-		}
-		else{
-			wait_event(Adapter->tx_packet_wait_queue,
-				((atomic_read(&Adapter->TxPktAvail) &&
-				(MINIMUM_PENDING_DESCRIPTORS <
-				atomic_read(&Adapter->CurrNumFreeTxDesc)) &&
-				(Adapter->device_removed == FALSE))) ||
-				(1 == Adapter->downloadDDR) || kthread_should_stop()
-#ifndef BCM_SHM_INTERFACE
-				|| (TRUE == Adapter->bEndPointHalted)
-#endif
-				);
-		}
+					   tx_pending(Adapter), msecs_to_jiffies(10));
+		else
+			wait_event_interruptible(Adapter->tx_packet_wait_queue,
+						 tx_pending(Adapter));
 
-		if(kthread_should_stop() || Adapter->device_removed)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Exiting the tx thread..\n");
-			Adapter->transmit_packet_thread = NULL;
-			return 0;
-		}
-
-#ifndef BCM_SHM_INTERFACE
+		if (Adapter->device_removed)
+			break;
 
 		if(Adapter->downloadDDR == 1)
 		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Downloading DDR Settings\n");
 			Adapter->downloadDDR +=1;
 			status = download_ddr_settings(Adapter);
 			if(status)
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "DDR DOWNLOAD FAILED!\n");
+				pr_err(PFX "DDR DOWNLOAD FAILED! %d\n", status);
 			continue;
 		}
 
@@ -489,7 +232,6 @@
 				update_per_sf_desc_cnts(Adapter);
 			}
 		}
-#endif
 
 		if( atomic_read(&Adapter->CurrNumFreeTxDesc) &&
 			Adapter->LinkStatus == SYNC_UP_REQUEST &&
@@ -507,49 +249,12 @@
 				wake_up(&Adapter->process_rx_cntrlpkt);
 		}
 
-#ifdef BCM_SHM_INTERFACE
-		spin_lock_bh(&Adapter->txtransmitlock);
-		if(Adapter->txtransmit_running == 0)
-		{
-			Adapter->txtransmit_running = 1;
-			calltransmit = 1;
-		}
-		else
-			calltransmit = 0;
-		spin_unlock_bh(&Adapter->txtransmitlock);
-#endif
-
-		if(calltransmit)
-			transmit_packets(Adapter);
+		transmit_packets(Adapter);
 
 		atomic_set(&Adapter->TxPktAvail, 0);
 	}
+
+	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Exiting the tx thread..\n");
+	Adapter->transmit_packet_thread = NULL;
 	return 0;
 }
-
-#ifdef BCM_SHM_INTERFACE
-extern PMINI_ADAPTER psAdaptertest;
-void  virtual_mail_box_interrupt(void)
-{
-
-#ifndef GDMA_INTERFACE
-	PUINT ptr =  (PUINT)CPE_VIRTUAL_MAILBOX_REG;
-	UINT intval = (UINT)((*ptr & 0xFF00) >> 8);
-	if (intval != 0)
-	{
-		atomic_set(&psAdaptertest->CurrNumFreeTxDesc,	intval);
-		atomic_set (&psAdaptertest->uiMBupdate, TRUE);
-
-		//make it to 0
-		*ptr = *ptr & 0xffff00ff;
-	}
-#endif
-}
-unsigned int total_tx_pkts_pending(void)
-{
-	return atomic_read(&psAdaptertest->TotalPacketCount);
-}
-
-#endif
-
-
diff --git a/drivers/staging/bcm/cntrl_SignalingInterface.h b/drivers/staging/bcm/cntrl_SignalingInterface.h
index 4cbe300..8907784 100644
--- a/drivers/staging/bcm/cntrl_SignalingInterface.h
+++ b/drivers/staging/bcm/cntrl_SignalingInterface.h
@@ -2,19 +2,6 @@
 #define CNTRL_SIGNALING_INTERFACE_
 
 
-#ifdef BECEEM_TARGET
-
-#include <mac_common.h>
-#include <msg_Dsa.h>
-#include <msg_Dsc.h>
-#include <msg_Dsd.h>
-#include <sch_definitions.h>
-using namespace Beceem;
-#ifdef ENABLE_CORRIGENDUM2_UPDATE
-extern B_UINT32 g_u32Corr2MacFlags;
-#endif
-
-#else
 
 
 #define DSA_REQ 11
@@ -28,7 +15,6 @@
 #define DSD_ACK 19
 #define MAX_CLASSIFIERS_IN_SF  4
 
-#endif
 
 #define MAX_STRING_LEN 20
 #define MAX_PHS_LENGTHS 255
@@ -57,37 +43,7 @@
 ////////////////////////structure Definitions///////////////////////////////////
 ////////////////////////////////////////////////////////////////////////////////
 /// \brief class cCPacketClassificationRule
-#ifdef BECEEM_TARGET
-class CCPacketClassificationRuleSI{
-	public:
-		/// \brief Constructor for the class
-	CCPacketClassificationRuleSI():
-		u8ClassifierRulePriority(mClassifierRulePriority),
-		u8IPTypeOfServiceLength(mIPTypeOfService),
-		u8Protocol(mProtocol),
-		u8IPMaskedSourceAddressLength(0),
-		u8IPDestinationAddressLength(0),
-		u8ProtocolSourcePortRangeLength(0),
-		u8ProtocolDestPortRangeLength(0),
-		u8EthernetDestMacAddressLength(0),
-		u8EthernetSourceMACAddressLength(0),
-		u8EthertypeLength(0),
-		u16UserPriority(mUserPriority),
-		u16VLANID(mVLANID),
-		u8AssociatedPHSI(mAssociatedPHSI),
-		u16PacketClassificationRuleIndex(mPacketClassifierRuleIndex),
-		u8VendorSpecificClassifierParamLength(mVendorSpecificClassifierParamLength),
-		u8IPv6FlowLableLength(mIPv6FlowLableLength),
-		u8ClassifierActionRule(mClassifierActionRule)
-
-		{}
-              void Reset()
-              {
-                    CCPacketClassificationRuleSI();
-              }
-#else
 struct _stCPacketClassificationRuleSI{
-#endif
 
 	/**  16bit UserPriority Of The Service Flow*/
     B_UINT16                        u16UserPriority;
@@ -145,29 +101,10 @@
     B_UINT8							u8ClassifierActionRule;
     B_UINT16							u16ValidityBitMap;
 };
-#ifndef BECEEM_TARGET
 typedef struct _stCPacketClassificationRuleSI CCPacketClassificationRuleSI,stCPacketClassificationRuleSI, *pstCPacketClassificationRuleSI;
-#endif
 
 /// \brief class CPhsRuleSI
-#ifdef BECEEM_TARGET
-class CPhsRuleSI{
-	public:
-		/// \brief Constructor for the class
-		CPhsRuleSI():
-			u8PHSI(mPHSI),
-			u8PHSFLength(0),
-			u8PHSMLength(0),
-			u8PHSS(mPHSS),
-			u8PHSV(mPHSV),
-			u8VendorSpecificPHSParamsLength(mVendorSpecificPHSParamLength){}
-                void Reset()
-                {
-        		CPhsRuleSI();
-                }
-#else
 typedef struct _stPhsRuleSI {
-#endif
 	/**  8bit PHS Index Of The Service Flow*/
     B_UINT8                         u8PHSI;
 	/**  PHSF Length Of The Service Flow*/
@@ -188,31 +125,11 @@
     B_UINT8                         u8VendorSpecificPHSParams[VENDOR_PHS_PARAM_LENGTH];
 
 	B_UINT8                         u8Padding[2];
-#ifdef BECEEM_TARGET
-};
-#else
 }stPhsRuleSI,*pstPhsRuleSI;
 typedef stPhsRuleSI CPhsRuleSI;
-#endif
 
 /// \brief structure cConvergenceSLTypes
-#ifdef BECEEM_TARGET
-class CConvergenceSLTypes{
-	public:
-		/// \brief Constructor for the class
-		CConvergenceSLTypes():
-		u8ClassfierDSCAction(mClassifierDSCAction),
-		u8PhsDSCAction	(mPhsDSCAction)
-		{}
-              void Reset()
-              {
-                    CConvergenceSLTypes();
-                    cCPacketClassificationRule.Reset();
-                    cPhsRule.Reset();
-              }
-#else
 struct _stConvergenceSLTypes{
-#endif
 	/**  8bit Phs Classfier Action Of The Service Flow*/
     B_UINT8                         u8ClassfierDSCAction;
 	/**  8bit Phs DSC Action Of The Service Flow*/
@@ -220,111 +137,15 @@
 	/**   16bit Padding */
     B_UINT8                         u8Padding[2];
     /// \brief class cCPacketClassificationRule
-#ifdef BECEEM_TARGET
-    CCPacketClassificationRuleSI      cCPacketClassificationRule;
-#else
     stCPacketClassificationRuleSI     cCPacketClassificationRule;
-#endif
     /// \brief class CPhsRuleSI
-#ifdef BECEEM_TARGET
-    CPhsRuleSI				cPhsRule;
-#else
      struct _stPhsRuleSI		cPhsRule;
-#endif
 };
-#ifndef BECEEM_TARGET
 typedef struct _stConvergenceSLTypes stConvergenceSLTypes,CConvergenceSLTypes, *pstConvergenceSLTypes;
-#endif
 
 
 /// \brief structure CServiceFlowParamSI
-#ifdef BECEEM_TARGET
-class CServiceFlowParamSI{
-	public:
-		/// \brief Constructor for the class
-		CServiceFlowParamSI():
-			u32SFID(mSFid),
-			u16CID(mCid),
-			u8ServiceClassNameLength(mServiceClassNameLength),
-			u8MBSService(mMBSService),
-			u8QosParamSet(mQosParamSetType),
-			u8TrafficPriority(mTrafficPriority),
-			u32MaxSustainedTrafficRate(mMaximumSustainedTrafficRate),
-			u32MaxTrafficBurst(mMaximumTrafficBurst),
-			u32MinReservedTrafficRate(mMinimumReservedTrafficRate),
-			u8ServiceFlowSchedulingType(mServiceFlowSchedulingType),
-			u8RequesttransmissionPolicy(mRequestTransmissionPolicy),
-			u32ToleratedJitter(mToleratedJitter),
-			u32MaximumLatency(mMaximumLatency),
-			u8FixedLengthVSVariableLengthSDUIndicator
-			(mFixedLengthVSVariableLength),
-			u8SDUSize(mSDUSize),
-			u16TargetSAID(mTargetSAID),
-			u8ARQEnable(mARQEnable),
-			u16ARQWindowSize(mARQWindowSize),
-			u16ARQBlockLifeTime(mARQBlockLifeTime),
-			u16ARQSyncLossTimeOut(mARQSyncLossTimeOut),
-			u8ARQDeliverInOrder(mARQDeliverInOrder),
-			u16ARQRxPurgeTimeOut(mARQRXPurgeTimeOut),
-			//Add ARQ BLOCK SIZE, ARQ TX and RX delay initializations here
-			//after we move to only CORR2
-			u8RxARQAckProcessingTime(mRxARQAckProcessingTime),
-			u8CSSpecification(mCSSpecification),
-			u8TypeOfDataDeliveryService(mTypeOfDataDeliveryService),
-			u16SDUInterArrivalTime(mSDUInterArrivalTime),
-			u16TimeBase(mTimeBase),
-			u8PagingPreference(mPagingPreference),
-			u8MBSZoneIdentifierassignment(mMBSZoneIdentifierassignmentLength),
-			u8TrafficIndicationPreference(mTrafficIndicationPreference),
-			u8GlobalServicesClassNameLength(mGlobalServicesClassNameLength),
-			u8SNFeedbackEnabled(mSNFeedbackEnabled),
-			u8FSNSize(mFSNSize),
-			u8CIDAllocation4activeBSsLength(mCIDAllocation4activeBSsLength),
-			u16UnsolicitedGrantInterval(mUnsolicitedGrantInterval),
-			u16UnsolicitedPollingInterval(mUnsolicitedPollingInterval),
-			u8PDUSNExtendedSubheader4HarqReordering(mPDUSNExtendedSubheader4HarqReordering),
-			u8MBSContentsIDLength(mMBSContentsIDLength),
-			u8HARQServiceFlows(mHARQServiceFlows),
-			u8AuthTokenLength(mAuthTokenLength),
-			u8HarqChannelMappingLength(mHarqChannelMappingLength),
-			u8VendorSpecificQoSParamLength(mVendorSpecificQoSParamLength),
-            bValid(FALSE),
-	     u8TotalClassifiers()
-{
-//Remove the bolck after we move to Corr2 only code
-#ifdef ENABLE_CORRIGENDUM2_UPDATE
-	if((g_u32Corr2MacFlags & CORR_2_DSX)  ||  (g_u32Corr2MacFlags & CORR_2_ARQ))
-	{
-	/* IEEE Comment #627 / MTG Comment #426 */
-       	u16ARQBlockSize = mARQBlockSize;
-		if(g_u32Corr2MacFlags & CORR_2_ARQ) {
-			u16ARQRetryTxTimeOut = mARQRetryTimeOutTxDelay;
-			if(g_u32VENDOR_TYPE == VENDOR_ALCATEL) {
-				u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelay_ALU;
-			} else {
-				u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelay;
-			}
-		}
-		else
-		{
-			u16ARQRetryTxTimeOut = mARQRetryTimeOutTxDelayCorr1;
-			u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelayCorr1;
-		}
-	}
-	else
-#endif
-	{
-		u16ARQBlockSize = mARQBlockSizeCorr1;
-		u16ARQRetryTxTimeOut = mARQRetryTimeOutTxDelayCorr1;
-		u16ARQRetryRxTimeOut = mARQRetryTimeOutRxDelayCorr1;
-	}
-}
-
-	void ComputeMacOverhead(B_UINT8	u8SecOvrhead);
-	B_UINT16	GetMacOverhead() { return 	u16MacOverhead; }
-#else
 typedef struct _stServiceFlowParamSI{
-#endif //end of ifdef BECEEM_TARGET
 
      /**  32bitSFID Of The Service Flow*/
     B_UINT32                        u32SFID;
@@ -367,11 +188,6 @@
 
 	 /**  16bit ARQ Purge timeout */
     B_UINT16                        u16ARQRxPurgeTimeOut;
-#if 0 //def ENABLE_CORRIGENDUM2_UPDATE
-/* IEEE Comment #627 / MTG Comment #426 */
-    /// \brief Size of an ARQ block, changed from 2 bytes to 1
-    B_UINT8                        u8ARQBlockSize;
-#endif
 //TODO::Remove this once we move to a new CORR2 driver
     /// \brief Size of an ARQ block
     B_UINT16                        u16ARQBlockSize;
@@ -496,35 +312,18 @@
 	B_UINT8							bValid;	/**<  Validity flag */
 	B_UINT8				u8Padding;	 /**<  Padding byte*/
 
-#ifdef BECEEM_TARGET
-/**
-Structure for Convergence SubLayer Types with a maximum of 4 classifiers
-*/
-	CConvergenceSLTypes		cConvergenceSLTypes[MAX_CLASSIFIERS_IN_SF];
-#else
 /**
 Structure for Convergence SubLayer Types with a maximum of 4 classifiers
 */
 	stConvergenceSLTypes		cConvergenceSLTypes[MAX_CLASSIFIERS_IN_SF];
-#endif
 
-#ifdef BECEEM_TARGET
-};
-#else
 } stServiceFlowParamSI, *pstServiceFlowParamSI;
 typedef stServiceFlowParamSI CServiceFlowParamSI;
-#endif
 
 /**
 structure stLocalSFAddRequest
 */
 typedef struct _stLocalSFAddRequest{
-#ifdef BECEEM_TARGET
-	   _stLocalSFAddRequest( ) :
-	   	u8Type(0x00),  eConnectionDir(0x00),
-		u16TID(0x0000), u16CID(0x0000),  u16VCID(0x0000)
-	   		{}
-#endif
 
 	B_UINT8                         u8Type;	/**<  Type*/
 	B_UINT8      eConnectionDir;		/**<  Connection direction*/
@@ -535,19 +334,9 @@
 	/// \brief 16bitVCID
 	B_UINT16                        u16VCID;	/**<  16bit VCID*/
     /// \brief structure ParameterSet
-#ifdef BECEEM_SIGNALLING_INTERFACE_API
-	CServiceFlowParamSI sfParameterSet;
-#endif
 
-#ifdef BECEEM_TARGET
-    CServiceFlowParamSI              *psfParameterSet;
-#else
 	stServiceFlowParamSI	*psfParameterSet;	/**<  structure ParameterSet*/
-#endif
 
-#ifdef USING_VXWORKS
-    USE_DATA_MEMORY_MANAGER();
-#endif
 }stLocalSFAddRequest, *pstLocalSFAddRequest;
 
 
@@ -555,12 +344,6 @@
 structure stLocalSFAddIndication
 */
 typedef struct _stLocalSFAddIndication{
-#ifdef BECEEM_TARGET
-	   _stLocalSFAddIndication( ) :
-	   	u8Type(0x00),  eConnectionDir(0x00),
-		u16TID(0x0000), u16CID(0x0000),  u16VCID(0x0000)
-	   		{}
-#endif
 
 	B_UINT8                         u8Type;	/**<  Type*/
 	B_UINT8      eConnectionDir;	/**<  Connection Direction*/
@@ -571,37 +354,19 @@
     /// \brief 16bitVCID
     B_UINT16                        u16VCID;	 /**<  16bitVCID*/
 
-#ifdef 	BECEEM_SIGNALLING_INTERFACE_API
-	CServiceFlowParamSI              sfAuthorizedSet;
-    /// \brief structure AdmittedSet
-    CServiceFlowParamSI              sfAdmittedSet;
-    /// \brief structure ActiveSet
-    CServiceFlowParamSI              sfActiveSet;
-#endif
 
     /// \brief structure AuthorizedSet
-#ifdef BECEEM_TARGET
-    CServiceFlowParamSI              *psfAuthorizedSet;
-    /// \brief structure AdmittedSet
-    CServiceFlowParamSI              *psfAdmittedSet;
-    /// \brief structure ActiveSet
-    CServiceFlowParamSI              *psfActiveSet;
-#else
     /// \brief structure AuthorizedSet
     stServiceFlowParamSI              *psfAuthorizedSet;	/**<  AuthorizedSet of type stServiceFlowParamSI*/
     /// \brief structure AdmittedSet
     stServiceFlowParamSI              *psfAdmittedSet;	/**<  AdmittedSet of type stServiceFlowParamSI*/
     /// \brief structure ActiveSet
     stServiceFlowParamSI              *psfActiveSet;	/**<  sfActiveSet of type stServiceFlowParamSI*/
-#endif
 	B_UINT8				   u8CC;	/**<  Confirmation Code*/
 	B_UINT8				   u8Padd;		/**<  8-bit Padding */
 
     B_UINT16               u16Padd;	/**< 16 bit Padding */
 
-#ifdef USING_VXWORKS
-    USE_DATA_MEMORY_MANAGER();
-#endif
 }stLocalSFAddIndication;
 
 
@@ -619,33 +384,17 @@
 structure stLocalSFDeleteRequest
 */
 typedef struct _stLocalSFDeleteRequest{
-#ifdef BECEEM_TARGET
-	   _stLocalSFDeleteRequest( ) :
-	   	u8Type(0x00),  u8Padding(0x00),
-		u16TID(0x0000), u32SFID (0x00000000)
-	   		{}
-#endif
 	B_UINT8                         u8Type;	 /**< Type*/
 	B_UINT8                         u8Padding;	 /**<  Padding byte*/
 	B_UINT16			u16TID;		 /**<  TID*/
     /// \brief 32bitSFID
     B_UINT32                        u32SFID;	 /**<  SFID*/
-#ifdef USING_VXWORKS
-    USE_DATA_MEMORY_MANAGER();
-#endif
 }stLocalSFDeleteRequest, *pstLocalSFDeleteRequest;
 
 /**
 structure stLocalSFDeleteIndication
 */
 typedef struct stLocalSFDeleteIndication{
-#ifdef BECEEM_TARGET
-	   stLocalSFDeleteIndication( ) :
-	   	u8Type(0x00),  u8Padding(0x00),
-		u16TID(0x0000), u16CID(0x0000),
-		u16VCID(0x0000),u32SFID (0x00000000)
-	   		{}
-#endif
 	B_UINT8                         u8Type;	/**< Type */
 	B_UINT8                         u8Padding;	/**< Padding  */
 	B_UINT16			u16TID;			/**< TID */
@@ -658,9 +407,6 @@
 	/// \brief 8bit Confirmation code
 	B_UINT8                         u8ConfirmationCode;	/**< Confirmation code */
 	B_UINT8                         u8Padding1[3];		/**< 3 byte Padding  */
-#ifdef USING_VXWORKS
-    USE_DATA_MEMORY_MANAGER();
-#endif
 }stLocalSFDeleteIndication;
 
 typedef struct _stIM_SFHostNotify
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index 9d4e3ac..1148e5e 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -22,7 +22,6 @@
 #include <linux/etherdevice.h>
 #include <net/ip.h>
 #include <linux/wait.h>
-#include <linux/notifier.h>
 #include <linux/proc_fs.h>
 #include <linux/interrupt.h>
 
@@ -36,26 +35,10 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <asm/uaccess.h>
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
 #include <linux/kthread.h>
-#endif
 #include <linux/tcp.h>
 #include <linux/udp.h>
-#ifndef BCM_SHM_INTERFACE
 #include <linux/usb.h>
-#endif
-#ifdef BECEEM_TARGET
-
-#include <mac_common.h>
-#include <msg_Dsa.h>
-#include <msg_Dsc.h>
-#include <msg_Dsd.h>
-#include <sch_definitions.h>
-using namespace Beceem;
-#ifdef ENABLE_CORRIGENDUM2_UPDATE
-extern B_UINT32 g_u32Corr2MacFlags;
-#endif
-#endif
 
 #include "Typedefs.h"
 #include "Version.h"
@@ -71,39 +54,28 @@
 #include "CmHost.h"
 #include "DDRInit.h"
 #include "Debug.h"
-#include "HostMibs.h"
 #include "IPv6ProtocolHdr.h"
-#include "osal_misc.h"
 #include "PHSModule.h"
 #include "Protocol.h"
 #include "Prototypes.h"
 #include "Queue.h"
 #include "vendorspecificextn.h"
 
-#ifndef BCM_SHM_INTERFACE
 
 #include "InterfaceMacros.h"
 #include "InterfaceAdapter.h"
 #include "InterfaceIsr.h"
-#include "Interfacemain.h"
 #include "InterfaceMisc.h"
 #include "InterfaceRx.h"
 #include "InterfaceTx.h"
-#endif
 #include "InterfaceIdleMode.h"
 #include "InterfaceInit.h"
 
-#ifdef BCM_SHM_INTERFACE
-#include <linux/cpe_config.h>
-
-#ifdef GDMA_INTERFACE
-#include "GdmaInterface.h"
-#include "symphony.h"
-#else
-#include "virtual_interface.h"
-
-#endif
-
-#endif
+#define DRV_NAME	"beceem"
+#define DEV_NAME	"tarang"
+#define DRV_DESCRIPTION "Beceem Communications Inc. WiMAX driver"
+#define DRV_COPYRIGHT	"Copyright 2010. Beceem Communications Inc"
+#define DRV_VERSION	VER_FILEVERSION_STR
+#define PFX		DRV_NAME " "
 
 #endif
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index e9da513..c13ea5c 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -10,12 +10,8 @@
  */
 #include "headers.h"
 
-INT  ProcessGetHostMibs(PMINI_ADAPTER Adapter,
-						  PVOID ioBuffer,
-						  ULONG inputBufferLength)
+INT  ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
 {
-
-	S_MIBS_HOST_STATS_MIBS *pstHostMibs         = NULL;
 	S_SERVICEFLOW_ENTRY    *pstServiceFlowEntry = NULL;
 	S_PHS_RULE             *pstPhsRule          = NULL;
 	S_CLASSIFIER_TABLE     *pstClassifierTable  = NULL;
@@ -30,15 +26,6 @@
 		return STATUS_FAILURE;
 	}
 
-	if(ioBuffer == NULL)
-	{
-		return -EINVAL;
-	}
-	memset(ioBuffer,0,sizeof(S_MIBS_HOST_STATS_MIBS));
-
-	pstHostMibs = (S_MIBS_HOST_STATS_MIBS *)ioBuffer;
-
-
 	//Copy the classifier Table
 	for(nClassifierIndex=0; nClassifierIndex < MAX_CLASSIFIERS;
 			nClassifierIndex++)
@@ -54,7 +41,7 @@
 	{
 	if(Adapter->PackInfo[nSfIndex].bValid)
 	{
-			OsalMemMove((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
+			memcpy((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
 	}
 	else
 	{
@@ -83,7 +70,7 @@
 
 			pstHostMibs->astPhsRulesTable[nPhsTableIndex].ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
 
-			OsalMemMove(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
+			memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
 						&pstPhsRule->u8PHSI,
 						sizeof(S_PHS_RULE));
 				nPhsTableIndex++;
@@ -95,12 +82,9 @@
 	}
 
 
-
 	//copy other Host Statistics parameters
-	pstHostMibs->stHostInfo.GoodTransmits =
-				atomic_read(&Adapter->TxTotalPacketCount);
-	pstHostMibs->stHostInfo.GoodReceives =
-				atomic_read(&Adapter->GoodRxPktCount);
+	pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
+	pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
 	pstHostMibs->stHostInfo.CurrNumFreeDesc =
 			atomic_read(&Adapter->CurrNumFreeTxDesc);
 	pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
@@ -115,13 +99,10 @@
 }
 
 
-INT GetDroppedAppCntrlPktMibs(PVOID ioBuffer, PPER_TARANG_DATA pTarang)
+VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
 {
-	S_MIBS_HOST_STATS_MIBS *pstHostMibs = (S_MIBS_HOST_STATS_MIBS *)ioBuffer;
-
-	memcpy((PVOID)&(pstHostMibs->stDroppedAppCntrlMsgs),(PVOID)&(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
-
-	return STATUS_SUCCESS ;
+	memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
+	       &(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
 }
 
 
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
index 97adaae..16e939f 100644
--- a/drivers/staging/bcm/led_control.c
+++ b/drivers/staging/bcm/led_control.c
@@ -108,52 +108,16 @@
 	ulong timeout = 0;
 
 	/*Read initial value of packets sent/received */
-	Initial_num_of_packts_tx = atomic_read(&Adapter->TxTotalPacketCount);
-	Initial_num_of_packts_rx = atomic_read(&Adapter->GoodRxPktCount);
+	Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
+	Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
+
 	/*Scale the rate of transfer to no of blinks.*/
 	num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
 	num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
 
 	while((Adapter->device_removed == FALSE))
 	{
-		#if 0
-		if(0 == num_of_time_tx && 0 == num_of_time_rx)
-		{
-			timeout = 1000;
-			Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
-				currdriverstate!= Adapter->DriverState || kthread_should_stop(),
-				msecs_to_jiffies (timeout));
-			if(kthread_should_stop())
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
-				Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
-				return EVENT_SIGNALED;
-			}
-			if(Status)
-				return EVENT_SIGNALED;
-
-		}
-		#endif
-
 		timeout = 50;
-		#if 0
-		/*Turn on LED if Tx is high bandwidth*/
-		if(num_of_time_tx > MAX_NUM_OF_BLINKS)
-		{
-			TURN_ON_LED(1<<GPIO_Num_tx, uiTxLedIndex);
-			num_of_time_tx = 0;
-			bBlinkBothLED = FALSE;
-			num_of_time = num_of_time_rx;
-		}
-			/*Turn on LED if Rx is high bandwidth*/
-		if(num_of_time_rx > MAX_NUM_OF_BLINKS)
-		{
-			TURN_ON_LED(1<<GPIO_Num_rx, uiRxLedIndex);
-			num_of_time_rx = 0;
-			bBlinkBothLED = FALSE;
-			num_of_time = num_of_time_tx;
-		}
-		#endif
 		/*Blink Tx and Rx LED when both Tx and Rx is in normal bandwidth*/
 		if(bBlinkBothLED)
 		{
@@ -249,9 +213,10 @@
  		 * Read the Tx & Rx packets transmission after 1 second and
  		 * calculate rate of transfer
  		 */
-		Final_num_of_packts_tx = atomic_read(&Adapter->TxTotalPacketCount);
+		Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
+		Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
+
 		rate_of_transfer_tx = Final_num_of_packts_tx - Initial_num_of_packts_tx;
-		Final_num_of_packts_rx = atomic_read(&Adapter->GoodRxPktCount);
 		rate_of_transfer_rx = Final_num_of_packts_rx - Initial_num_of_packts_rx;
 
 		/*Read initial value of packets sent/received */
@@ -293,7 +258,7 @@
 
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",ulParamOffset, usParamLen);
 
-	puBuffer = OsalMemAlloc(usParamLen,"!MEM");
+	puBuffer = kmalloc(usParamLen, GFP_KERNEL);
 	if(!puBuffer)
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum Allocation failed");
@@ -341,10 +306,7 @@
 	}
 
 exit:
-	if(puBuffer)
-	{
-		OsalMemFree(puBuffer, usParamLen);
-	}
+	kfree(puBuffer);
 	return Status;
 }
 
@@ -497,12 +459,10 @@
 {
 	int Status = STATUS_SUCCESS;
 	UCHAR GPIO_Array[NUM_OF_LEDS+1]; /*Array to store GPIO numbers from EEPROM*/
-#ifndef BCM_SHM_INTERFACE
 	UINT uiIndex = 0;
 	UINT uiNum_of_LED_Type = 0;
 	PUCHAR puCFGData	= NULL;
 	UCHAR bData = 0;
-#endif
 	memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
 
 	if(!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams))
@@ -524,10 +484,6 @@
 		*bEnableThread = FALSE;
 		return Status;
 	}
-#ifdef BCM_SHM_INTERFACE
-	*bEnableThread = FALSE;
-	return Status ;
-#else
   /*
      * CONFIG file read successfully. Deallocate the memory of
      * uiFileNameBufferSize
@@ -578,23 +534,7 @@
 	}
 	if(uiNum_of_LED_Type >= NUM_OF_LEDS)
 		*bEnableThread = FALSE;
-#endif
 
-#if 0
-	for(uiIndex=0; uiIndex<NUM_OF_LEDS; uiIndex++)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].LED_Type = %x\n", uiIndex,
-			Adapter->LEDInfo.LEDState[uiIndex].LED_Type);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].LED_On_State = %x\n", uiIndex,
-			Adapter->LEDInfo.LEDState[uiIndex].LED_On_State);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].LED_Blink_State = %x\n", uiIndex,
-			Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State);
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LEDState[%d].GPIO_Num = %x\n", uiIndex,
-			Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num);
-	}
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Polarity = %d\n",
-			Adapter->LEDInfo.BitPolarty);
-#endif
 	return Status;
 }
 //--------------------------------------------------------------------------
@@ -721,20 +661,6 @@
 			TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
 			return ;//STATUS_FAILURE;
 		}
-	#if 0
-		if(Adapter->device_removed)
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"Device removed hence exiting from Led Thread..");
-			return ; //-ENODEV;
-		}
-	#endif
-		#if 0
-		if((GPIO_num != DISABLE_GPIO_NUM) &&
-			((currdriverstate != FW_DOWNLOAD) &&
-			(currdriverstate != NORMAL_OPERATION) &&
-			(currdriverstate != IDLEMODE_EXIT)))
-			TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
-		#endif
 
 		if(GPIO_num != DISABLE_GPIO_NUM)
 		{
@@ -752,10 +678,6 @@
 			case DRIVER_INIT:
 			{
 				currdriverstate = DRIVER_INIT;//Adapter->DriverState;
-	#if 0
-				LedGpioInit(Adapter);
-				Adapter->LEDInfo.bLedInitDone = TRUE;
-	#endif
 				BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
 
 				if(GPIO_num  != DISABLE_GPIO_NUM)
@@ -768,13 +690,6 @@
 			{
 				//BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FW_DN_DONE called\n");
 				currdriverstate = FW_DOWNLOAD;
-			#if 0
-				if(Adapter->LEDInfo.bLedInitDone == FALSE)
-				{
-					LedGpioInit(Adapter);
-					Adapter->LEDInfo.bLedInitDone = TRUE;
-				}
-			#endif
 				BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,  &uiLedIndex, &dummyIndex, currdriverstate);
 
 				if(GPIO_num != DISABLE_GPIO_NUM)
@@ -796,12 +711,6 @@
 			break;
 
 			case SHUTDOWN_EXIT:
-			#if 0
-			if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN)
-			{
-				LedGpioInit(Adapter);
-			}
-			#endif
 			//no break, continue to NO_NETWORK_ENTRY state as well.
 
 			case NO_NETWORK_ENTRY:
@@ -875,34 +784,6 @@
 			break;
 			case IDLEMODE_EXIT:
 			{
-#if 0
-				UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
-				UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
-				UCHAR uiTxLedIndex = 0;
-				UCHAR uiRxLedIndex = 0;
-
-				currdriverstate  = IDLEMODE_EXIT;
-				if(DEVICE_POWERSAVE_MODE_AS_PMU_SHUTDOWN == Adapter->ulPowerSaveMode)
-				{
-					LedGpioInit(Adapter);
-				}
-				BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx, &GPIO_num_rx, &uiTxLedIndex,&uiRxLedIndex,currdriverstate);
-
-				Adapter->LEDInfo.bIdle_led_off =  FALSE;
-
-				if((GPIO_num_tx == DISABLE_GPIO_NUM) && (GPIO_num_rx == DISABLE_GPIO_NUM))
-				{
-					GPIO_num = DISABLE_GPIO_NUM ;
-				}
-				else
-				{
-					timeout = 50;
-					if(Adapter->LEDInfo.bIdleMode_tx_from_host)
-						LED_Blink(Adapter, 1<<GPIO_num_tx, uiTxLedIndex, timeout, -1,currdriverstate);
-					else
-						LED_Blink(Adapter, 1<<GPIO_num_rx, uiRxLedIndex, timeout, -1,currdriverstate);
-				}
-#endif
 			}
 			break;
 			case DRIVER_HALT:
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 41c9ab8..c729237 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -1,6 +1,56 @@
 #include "headers.h"
 
 #define DWORD unsigned int
+
+static INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset);
+static INT BcmGetActiveDSD(PMINI_ADAPTER Adapter);
+static INT BcmGetActiveISO(PMINI_ADAPTER Adapter);
+static UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter);
+static INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter);
+static UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize);
+
+static VOID BcmValidateNvmType(PMINI_ADAPTER Adapter);
+static INT BcmGetNvmSize(PMINI_ADAPTER Adapter);
+static UINT BcmGetFlashSize(PMINI_ADAPTER Adapter);
+static NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter);
+
+static INT BcmGetSectionValEndOffset(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
+
+static B_UINT8 IsOffsetWritable(PMINI_ADAPTER Adapter, UINT uiOffset);
+static INT IsSectionWritable(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL Section);
+static INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section);
+
+static INT ReadDSDPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
+static INT ReadDSDSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL dsd);
+static INT ReadISOPriority(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
+static INT ReadISOSignature(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL iso);
+
+static INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
+static INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal);
+static INT SaveHeaderIfPresent(PMINI_ADAPTER Adapter, PUCHAR pBuff, UINT uiSectAlignAddr);
+static INT WriteToFlashWithoutSectorErase(PMINI_ADAPTER Adapter, PUINT pBuff,
+					  FLASH2X_SECTION_VAL eFlash2xSectionVal,
+					  UINT uiOffset, UINT uiNumBytes);
+static FLASH2X_SECTION_VAL getHighestPriDSD(PMINI_ADAPTER Adapter);
+static FLASH2X_SECTION_VAL getHighestPriISO(PMINI_ADAPTER Adapter);
+
+static INT BeceemFlashBulkRead(
+	PMINI_ADAPTER Adapter,
+	PUINT pBuffer,
+	UINT uiOffset,
+	UINT uiNumBytes);
+
+static INT BeceemFlashBulkWrite(
+	PMINI_ADAPTER Adapter,
+	PUINT pBuffer,
+	UINT uiOffset,
+	UINT uiNumBytes,
+	BOOLEAN bVerify);
+
+static INT GetFlashBaseAddr(PMINI_ADAPTER Adapter);
+
+static INT ReadBeceemEEPROMBulk(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData, UINT dwNumData);
+
 // Procedure:	ReadEEPROMStatusRegister
 //
 // Description: Reads the standard EEPROM Status Register.
@@ -228,213 +278,27 @@
 		ReadBeceemEEPROMBulk(Adapter, uiTempOffset + MAX_RW_SIZE, (PUINT)&uiData[4], 4);
 	}
 
-	OsalMemMove( (PUCHAR) pBuffer, ( ((PUCHAR)&uiData[0]) + uiByteOffset ), 4);
+	memcpy( (PUCHAR) pBuffer, ( ((PUCHAR)&uiData[0]) + uiByteOffset ), 4);
 
 	return STATUS_SUCCESS;
 } /* ReadBeceemEEPROM() */
 
 
-#if 0
-//-----------------------------------------------------------------------------
-// Procedure:	IsEEPROMWriteDone
-//
-// Description: Reads the SPI status to see the status of previous write.
-//
-// Arguments:
-//		Adapter    - ptr to Adapter object instance
-//
-// Returns:
-//		BOOLEAN - TRUE  - write went through
-//              - FALSE - Write Failed.
-//-----------------------------------------------------------------------------
-
-BOOLEAN IsEEPROMWriteDone(PMINI_ADAPTER Adapter)
-{
-	UINT uiRetries = 16;
-	//UINT uiStatus  = 0;
-	UINT value;
-
-	//sleep for 1.2ms ..worst case EEPROM write can take up to 1.2ms.
-	mdelay(2);
-
-	value = 0;
-	rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-
-	while(((value >> 14) & 1) == 1)
-	{
-		// EEPROM_SPI_Q_STATUS1_REG will be cleared only if write back to that.
-		value = (0x1 << 14);
-		wrmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
-		udelay(1000);
-		uiRetries--;
-		if(uiRetries == 0)
-		{
-			return FALSE;
-		}
-		value = 0;
-		rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
-	}
-	return TRUE;
-
-
-}
-
-
-//-----------------------------------------------------------------------------
-// Procedure:	ReadBeceemEEPROMBulk
-//
-// Description: This routine reads 16Byte data from EEPROM
-//
-// Arguments:
-//		Adapter     - ptr to Adapter object instance
-//          dwAddress - EEPROM Offset to read the data from.
-//          pdwData    - Pointer to double word where data needs to be stored in.
-//
-// Returns:
-//		OSAL_STATUS_CODE:
-//-----------------------------------------------------------------------------
-
-INT ReadBeceemEEPROMBulk(PMINI_ADAPTER Adapter,DWORD dwAddress, DWORD *pdwData)
-{
-	DWORD dwRetries = 16;
-	DWORD dwIndex = 0;
-	UINT value, tmpVal;
-
-
-	value = 0;
-	rdmalt (Adapter, 0x0f003008, &value, sizeof(value));
-
-	//read 0x0f003020 untill  bit 1 of 0x0f003008 is set.
-	while(((value >> 1) & 1) == 0)
-	{
-
-		rdmalt (Adapter, 0x0f003020, &tmpVal, sizeof(tmpVal));
-		dwRetries--;
-		if(dwRetries == 0)
-		{
-			return -1;
-		}
-		value = 0;
-		rdmalt (Adapter, 0x0f003008, &value, sizeof(value));
-	}
-
-	value = dwAddress | 0xfb000000;
-	wrmalt (Adapter, 0x0f003018, &value, sizeof(value));
-
-	udelay(1000);
-	value = 0;
-	for(dwIndex = 0;dwIndex < 4 ; dwIndex++)
-	{
-		value = 0;
-		rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
-		pdwData[dwIndex] = value;
-
-		value = 0;
-		rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
-		pdwData[dwIndex] |= (value << 8);
-
-		value = 0;
-		rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
-		pdwData[dwIndex] |= (value << 16);
-
-		value = 0;
-		rdmalt (Adapter, 0x0f003020, &value, sizeof(value));
-		pdwData[dwIndex] |= (value << 24);
-
-	}
-	return 0;
-}
-
-//-----------------------------------------------------------------------------
-// Procedure:	ReadBeceemEEPROM
-//
-// Description: This routine reads 4Byte data from EEPROM
-//
-// Arguments:
-//		Adapter     - ptr to Adapter object instance
-//          dwAddress - EEPROM Offset to read the data from.
-//          pdwData    - Pointer to double word where data needs to be stored in.
-//
-// Returns:
-//		OSAL_STATUS_CODE:
-//-----------------------------------------------------------------------------
-
-INT ReadBeceemEEPROM(PMINI_ADAPTER Adapter,DWORD dwAddress, DWORD *pdwData)
-{
-
-	DWORD dwReadValue = 0;
-	DWORD dwRetries = 16, dwCompleteWord = 0;
-	UINT	value, tmpVal;
-
-	rdmalt(Adapter, 0x0f003008, &value, sizeof(value));
-	while (((value >> 1) & 1) == 0) {
-		rdmalt(Adapter, 0x0f003020, &tmpVal, sizeof(tmpVal));
-
-		if (dwRetries == 0) {
-			return -1;
-		}
-		rdmalt(Adapter, 0x0f003008, &value, sizeof(value));
-	}
-
-
-	//wrm (0x0f003018, 0xNbXXXXXX)      // N is the number of bytes u want to read  (0 means 1, f means 16,   b is the opcode for page read)
-	//     Follow it up by N executions of  rdm(0x0f003020) to read the rxed bytes from rx queue.
-	dwAddress |= 0x3b000000;
-	wrmalt(Adapter, 0x0f003018,&dwAddress,4);
-	mdelay(10);
-	rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
-	dwCompleteWord=dwReadValue;
-	rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
-	dwCompleteWord|=(dwReadValue<<8);
-	rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
-	dwCompleteWord|=(dwReadValue<<16);
-	rdmalt(Adapter, 0x0f003020,&dwReadValue,4);
-	dwCompleteWord|=(dwReadValue<<24);
-
-	*pdwData = dwCompleteWord;
-
-	return 0;
-}
-#endif
 
 INT ReadMacAddressFromNVM(PMINI_ADAPTER Adapter)
 {
-	INT Status=0, i;
-	unsigned char puMacAddr[6] = {0};
-	INT AllZeroMac = 0;
-	INT AllFFMac = 0;
+	INT Status;
+	unsigned char puMacAddr[6];
 
 	Status = BeceemNVMRead(Adapter,
 			(PUINT)&puMacAddr[0],
 			INIT_PARAMS_1_MACADDRESS_ADDRESS,
 			MAC_ADDRESS_SIZE);
 
-	if(Status != STATUS_SUCCESS)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Error in Reading the mac Addres with status :%d", Status);
-		return Status;
-	}
-
-	memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
-	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Modem MAC Addr :");
-    BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_PRINTK, 0, DBG_LVL_ALL,&Adapter->dev->dev_addr[0],MAC_ADDRESS_SIZE);
-	for(i=0;i<MAC_ADDRESS_SIZE;i++)
-	{
-
-		if(Adapter->dev->dev_addr[i] == 0x00)
-			AllZeroMac++;
-		if(Adapter->dev->dev_addr[i] == 0xFF)
-			AllFFMac++;
-
-	}
-	//BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "\n");
-	if(AllZeroMac == MAC_ADDRESS_SIZE)
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Warning :: MAC Address has all 00's");
-	if(AllFFMac == MAC_ADDRESS_SIZE)
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Warning :: MAC Address has all FF's");
+	if(Status == STATUS_SUCCESS)
+		memcpy(Adapter->dev->dev_addr, puMacAddr, MAC_ADDRESS_SIZE);
 
 	return Status;
-
 }
 
 //-----------------------------------------------------------------------------
@@ -476,7 +340,7 @@
 		ReadBeceemEEPROMBulk(Adapter,uiTempOffset,(PUINT)&uiData[0],4);
 		if(uiBytesRemaining >= (MAX_RW_SIZE - uiExtraBytes))
 		{
-			OsalMemMove(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),MAX_RW_SIZE - uiExtraBytes);
+			memcpy(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),MAX_RW_SIZE - uiExtraBytes);
 
 			uiBytesRemaining -= (MAX_RW_SIZE - uiExtraBytes);
 			uiIndex += (MAX_RW_SIZE - uiExtraBytes);
@@ -484,7 +348,7 @@
 		}
 		else
 		{
-			OsalMemMove(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),uiBytesRemaining);
+			memcpy(pBuffer,(((PUCHAR)&uiData[0])+uiExtraBytes),uiBytesRemaining);
 			uiIndex += uiBytesRemaining;
 			uiOffset += uiBytesRemaining;
 			uiBytesRemaining = 0;
@@ -508,7 +372,7 @@
 			 * We read 4 Dwords of data */
 			if(0 == ReadBeceemEEPROMBulk(Adapter,uiOffset,&uiData[0],4))
 			{
-				OsalMemMove(pcBuff+uiIndex,&uiData[0],MAX_RW_SIZE);
+				memcpy(pcBuff+uiIndex,&uiData[0],MAX_RW_SIZE);
 				uiOffset += MAX_RW_SIZE;
 				uiBytesRemaining -= MAX_RW_SIZE;
 				uiIndex += MAX_RW_SIZE;
@@ -523,7 +387,7 @@
 		{
 			if(0 == ReadBeceemEEPROM(Adapter,uiOffset,&uiData[0]))
 			{
-				OsalMemMove(pcBuff+uiIndex,&uiData[0],4);
+				memcpy(pcBuff+uiIndex,&uiData[0],4);
 				uiOffset += 4;
 				uiBytesRemaining -= 4;
 				uiIndex +=4;
@@ -540,7 +404,7 @@
 			pCharBuff += uiIndex;
 			if(0 == ReadBeceemEEPROM(Adapter,uiOffset,&uiData[0]))
 			{
-				OsalMemMove(pCharBuff,&uiData[0],uiBytesRemaining);//copy only bytes requested.
+				memcpy(pCharBuff,&uiData[0],uiBytesRemaining);//copy only bytes requested.
 				uiBytesRemaining = 0;
 			}
 			else
@@ -571,7 +435,7 @@
 //		<FAILURE>			- if failed.
 //-----------------------------------------------------------------------------
 
-INT BeceemFlashBulkRead(
+static INT BeceemFlashBulkRead(
 	PMINI_ADAPTER Adapter,
 	PUINT pBuffer,
 	UINT uiOffset,
@@ -653,16 +517,8 @@
 //
 //-----------------------------------------------------------------------------
 
-UINT BcmGetFlashSize(PMINI_ADAPTER Adapter)
+static UINT BcmGetFlashSize(PMINI_ADAPTER Adapter)
 {
-#if 0
-	if(Adapter->bDDRInitDone)
-	{
-		return rdm(Adapter,FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT|FLASH_SIZE_ADDR);
-	}
-
-	return rdm(Adapter,FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT|FLASH_SIZE_ADDR);
-#endif
 	if(IsFlash2x(Adapter))
 		return 	(Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader + sizeof(DSD_HEADER));
 	else
@@ -684,7 +540,7 @@
 //
 //-----------------------------------------------------------------------------
 
-UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter)
+static UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter)
 {
 	UINT uiData = 0;
 	UINT uiIndex = 0;
@@ -733,60 +589,6 @@
 	return 0;
 }
 
-#if 0
-/***********************************************************************************/
-//
-//  WriteBeceemEEPROM: Writes 4 byte data to EEPROM offset.
-//
-//                     uiEEPROMOffset - Offset to be written to.
-//                     uiData         - Data to be written.
-//
-/***********************************************************************************/
-
-INT WriteBeceemEEPROM(PMINI_ADAPTER Adapter,UINT uiEEPROMOffset, UINT uiData)
-{
-	INT Status = 0;
-	ULONG ulRdBk = 0;
-	ULONG ulRetryCount = 3;
-	UINT value;
-
-	if(uiEEPROMOffset > EEPROM_END)
-	{
-
-		return -1;
-	}
-
-	uiData = htonl(uiData);
-	while(ulRetryCount--)
-	{
-		value = 0x06000000;
-		wrmalt(Adapter, 0x0F003018,&value, sizeof(value));//flush the EEPROM FIFO.
-		wrmalt(Adapter, 0x0F00301C,&uiData, sizeof(uiData));
-		value = 0x3A000000 | uiEEPROMOffset;
-		wrmalt(Adapter, 0x0F003018,&value, sizeof(value));
-		__udelay(100000);
-		//read back and verify.
-		Status = ReadBeceemEEPROM(Adapter,uiEEPROMOffset,(UINT *)&ulRdBk);
-		if(Status == 0)
-		{
-			if(ulRdBk == uiData)
-			{
-				return Status;
-			}
-			else
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WriteBeceemEEPROM: Readback does not match\n");
-			}
-		}
-		else
-		{
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "WriteBeceemEEPROM: Readback failed\n");
-		}
-	}
-
-	return 0;
-}
-#endif
 
 //-----------------------------------------------------------------------------
 // Procedure:	FlashSectorErase
@@ -973,7 +775,7 @@
 // need not write 0xFFFFFFFF because write requires an erase and erase will
 // make whole sector 0xFFFFFFFF.
 //
-	if (!OsalMemCompare(pData, uiErasePattern, MAX_RW_SIZE))
+	if (!memcmp(pData, uiErasePattern, MAX_RW_SIZE))
 	{
 		return 0;
 	}
@@ -1138,7 +940,7 @@
 // need not write 0xFFFFFFFF because write requires an erase and erase will
 // make whole sector 0xFFFFFFFF.
 //
-	if (!OsalMemCompare(pData,uiErasePattern,MAX_RW_SIZE))
+	if (!memcmp(pData,uiErasePattern,MAX_RW_SIZE))
 	{
 		return 0;
 	}
@@ -1332,7 +1134,7 @@
 //
 //-----------------------------------------------------------------------------
 
-INT BeceemFlashBulkWrite(
+static INT BeceemFlashBulkWrite(
 	PMINI_ADAPTER Adapter,
 	PUINT pBuffer,
 	UINT uiOffset,
@@ -1353,15 +1155,6 @@
 	UINT uiTemp 				= 0;
 	UINT index 					= 0;
 	UINT uiPartOffset 			= 0;
-	#if 0
-	struct timeval tv1 = {0};
-	struct timeval tv2 = {0};
-
-	struct timeval tr = {0};
-	struct timeval te = {0};
-	struct timeval tw = {0};
-	struct timeval twv = {0};
-	#endif
 
 #if defined(BCM_SHM_INTERFACE) && !defined(FLASH_DIRECT_ACCESS)
   Status = bcmflash_raw_write((uiOffset/FLASH_PART_SIZE),(uiOffset % FLASH_PART_SIZE),( unsigned char *)pBuffer,uiNumBytes);
@@ -1377,12 +1170,9 @@
 	uiCurrSectOffsetAddr	= uiOffset & (Adapter->uiSectorSize - 1);
 	uiSectBoundary	  		= uiSectAlignAddr + Adapter->uiSectorSize;
 
-	//pTempBuff = OsalMemAlloc(MAX_SECTOR_SIZE,'!MVN');
-	pTempBuff = OsalMemAlloc(Adapter->uiSectorSize ,"!MVN");
+	pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
 	if(NULL == pTempBuff)
-	{
 		goto BeceemFlashBulkWrite_EXIT;
-	}
 //
 // check if the data to be written is overlapped accross sectors
 //
@@ -1399,7 +1189,6 @@
 			uiNumSectTobeRead++;
 		}
 	}
-	#if 1
 	//Check whether Requested sector is writable or not in case of flash2x write. But if  write call is
 	// for DSD calibration, allow it without checking of sector permission
 
@@ -1420,7 +1209,6 @@
 			 index = index + 1 ;
 		}
 	}
-	#endif
 	Adapter->SelectedChip = RESET_CHIP_SELECT;
 	while(uiNumSectTobeRead)
 	{
@@ -1448,13 +1236,13 @@
 		if(uiNumSectTobeRead > 1)
 		{
 
-			OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
+			memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
 			pcBuffer += ((uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr)));
 			uiNumBytes -= (uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
 		}
 		else
 		{
-				OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
+				memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
 		}
 
 		if(IsFlash2x(Adapter))
@@ -1503,7 +1291,7 @@
 				}
 				else
 				{
-					if(OsalMemCompare(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
+					if(memcmp(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
 					{
 						if(STATUS_SUCCESS != (*Adapter->fpFlashWriteWithStatusCheck)(Adapter,uiPartOffset+uiIndex,&pTempBuff[uiIndex]))
 						{
@@ -1541,10 +1329,8 @@
 	{
 		BcmRestoreBlockProtectStatus(Adapter,ulStatus);
 	}
-	if(pTempBuff)
-	{
-		OsalMemFree(pTempBuff,Adapter->uiSectorSize);
-	}
+	
+	kfree(pTempBuff);
 
 	Adapter->SelectedChip = RESET_CHIP_SELECT;
 	return Status;
@@ -1599,14 +1385,10 @@
 	uiCurrSectOffsetAddr	= uiOffset & (Adapter->uiSectorSize - 1);
 	uiSectBoundary			= uiSectAlignAddr + Adapter->uiSectorSize;
 
-
-
-//	pTempBuff = OsalMemAlloc(MAX_SECTOR_SIZE,'!MVN');
-	pTempBuff = OsalMemAlloc(Adapter->uiSectorSize,"!MVN");
+	pTempBuff = kmalloc(Adapter->uiSectorSize, GFP_KERNEL);
 	if(NULL == pTempBuff)
-	{
 		goto BeceemFlashBulkWriteStatus_EXIT;
-	}
+
 //
 // check if the data to be written is overlapped accross sectors
 //
@@ -1662,13 +1444,13 @@
 		if(uiNumSectTobeRead > 1)
 		{
 
-			OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
+			memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
 			pcBuffer += ((uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr)));
 			uiNumBytes -= (uiSectBoundary-(uiSectAlignAddr+uiCurrSectOffsetAddr));
 		}
 		else
 		{
-			OsalMemMove(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
+			memcpy(&pTempBuff[uiCurrSectOffsetAddr],pcBuffer,uiNumBytes);
 		}
 
 		if(IsFlash2x(Adapter))
@@ -1698,25 +1480,10 @@
 		{
 			for(uiIndex = 0;uiIndex < Adapter->uiSectorSize;uiIndex += MAX_RW_SIZE)
 			{
-#if 0
-				if(0 == BeceemFlashBulkRead(Adapter,uiReadBk,uiOffsetFromSectStart+uiIndex + Adapter->ulFlashCalStart ,MAX_RW_SIZE))
-				{
-					for(uiReadIndex = 0;uiReadIndex < 4; uiReadIndex++)
-					{
-						if(*((PUINT)&pTempBuff[uiIndex+uiReadIndex*4]) != uiReadBk[uiReadIndex])
-						{
-							Status = -1;
-							goto BeceemFlashBulkWriteStatus_EXIT;
-
-						}
-					}
-
-				}
-#endif
 
 				if(STATUS_SUCCESS == BeceemFlashBulkRead(Adapter,(PUINT)ucReadBk,uiOffsetFromSectStart+uiIndex,MAX_RW_SIZE))
 				{
-					if(OsalMemCompare(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
+					if(memcmp(ucReadBk,&pTempBuff[uiIndex],MAX_RW_SIZE))
 					{
 						Status = STATUS_FAILURE;
 						goto BeceemFlashBulkWriteStatus_EXIT;
@@ -1747,10 +1514,8 @@
 	{
 		BcmRestoreBlockProtectStatus(Adapter,ulStatus);
 	}
-	if(pTempBuff)
-	{
-		OsalMemFree(pTempBuff,Adapter->uiSectorSize);
-	}
+
+	kfree(pTempBuff);
 	Adapter->SelectedChip = RESET_CHIP_SELECT;
 	return Status;
 
@@ -1771,7 +1536,7 @@
 
 INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter)
 {
-	PCHAR pBuff = OsalMemAlloc(BUFFER_4K,"3MVN");
+	PCHAR pBuff = kmalloc(BUFFER_4K, GFP_KERNEL);
 	UINT uiEepromSize = 0;
 	UINT uiIndex = 0;
 	UINT uiBytesToCopy = 0;
@@ -1787,14 +1552,14 @@
 	if(0 != BeceemEEPROMBulkRead(Adapter,&uiEepromSize,EEPROM_SIZE_OFFSET,4))
 	{
 
-		OsalMemFree(pBuff,BUFFER_4K);
+		kfree(pBuff);
 		return -1;
 	}
 
 	uiEepromSize >>= 16;
 	if(uiEepromSize > 1024*1024)
 	{
-		OsalMemFree(pBuff,BUFFER_4K);
+		kfree(pBuff);
 		return -1;
 	}
 
@@ -1820,7 +1585,7 @@
 	wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC-4,&value, sizeof(value));
 	value = 0xbeadbead;
 	wrmalt(Adapter, EEPROM_CAL_DATA_INTERNAL_LOC-8,&value, sizeof(value));
-	OsalMemFree(pBuff,MAX_RW_SIZE);
+	kfree(pBuff);
 
 	return Status;
 
@@ -1873,16 +1638,13 @@
 		return -1;
 	}
 
-	pBuff = OsalMemAlloc(uiEepromSize, 0);
-
+	pBuff = kmalloc(uiEepromSize, GFP_KERNEL);
 	if ( pBuff == NULL )
-	{
 		return -1;
-	}
 
 	if(0 != BeceemNVMRead(Adapter,(PUINT)pBuff,uiCalStartAddr, uiEepromSize))
 	{
-		OsalMemFree(pBuff, 0);
+		kfree(pBuff);
 		return -1;
 	}
 
@@ -1905,7 +1667,7 @@
 		uiBytesToCopy = MIN(BUFFER_4K,uiEepromSize);
 	}
 
-	OsalMemFree(pBuff, 0);
+	kfree(pBuff);
 	return Status;
 
 }
@@ -1947,14 +1709,14 @@
 		{// for the requests more than or equal to MAX_RW_SIZE bytes, use bulk read function to make the access faster.
 			BeceemEEPROMBulkRead(Adapter,&auiData[0],uiOffset,MAX_RW_SIZE);
 
-			if(OsalMemCompare(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
+			if(memcmp(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
 			{
 				// re-write
 				BeceemEEPROMBulkWrite(Adapter,(PUCHAR)(pBuffer+uiIndex),uiOffset,MAX_RW_SIZE,FALSE);
 				mdelay(3);
 				BeceemEEPROMBulkRead(Adapter,&auiData[0],uiOffset,MAX_RW_SIZE);
 
-				if(OsalMemCompare(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
+				if(memcmp(&pBuffer[uiIndex],&auiData[0],MAX_RW_SIZE))
 				{
 					return -1;
 				}
@@ -1986,7 +1748,7 @@
 		else
 		{ // Handle the reads less than 4 bytes...
 			uiData = 0;
-			OsalMemMove(&uiData,((PUCHAR)pBuffer)+(uiIndex*sizeof(UINT)),uiNumBytes);
+			memcpy(&uiData,((PUCHAR)pBuffer)+(uiIndex*sizeof(UINT)),uiNumBytes);
 			BeceemEEPROMBulkRead(Adapter,&uiRdbk,uiOffset,4);
 
 			if(memcmp(&uiData, &uiRdbk, uiNumBytes))
@@ -2186,7 +1948,7 @@
 
 		if(uiBytesToCopy >= (16 -uiExtraBytes))
 		{
-			OsalMemMove((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,MAX_RW_SIZE- uiExtraBytes);
+			memcpy((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,MAX_RW_SIZE- uiExtraBytes);
 
 			if ( STATUS_FAILURE == BeceemEEPROMWritePage( Adapter, uiData, uiTempOffset ) )
 					return STATUS_FAILURE;
@@ -2197,7 +1959,7 @@
 		}
 		else
 		{
-			OsalMemMove((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,uiBytesToCopy);
+			memcpy((((PUCHAR)&uiData[0])+uiExtraBytes),pBuffer,uiBytesToCopy);
 
 			if ( STATUS_FAILURE == BeceemEEPROMWritePage( Adapter, uiData, uiTempOffset ) )
 					return STATUS_FAILURE;
@@ -2233,7 +1995,7 @@
 	// To program non 16byte aligned data, read 16byte and then update.
 	//
 			BeceemEEPROMBulkRead(Adapter,&uiData[0],uiOffset,16);
-			OsalMemMove(&uiData[0],pBuffer+uiIndex,uiBytesToCopy);
+			memcpy(&uiData[0],pBuffer+uiIndex,uiBytesToCopy);
 
 
 			if ( STATUS_FAILURE == BeceemEEPROMWritePage( Adapter, uiData, uiOffset ) )
@@ -2535,7 +2297,7 @@
 //
 //-----------------------------------------------------------------------------
 
-UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize)
+static UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter, UINT FlashSectorSizeSig, UINT FlashSectorSize)
 {
 	UINT uiSectorSize = 0;
 	UINT uiSectorSig = 0;
@@ -2642,20 +2404,8 @@
 
 INT BcmInitNVM(PMINI_ADAPTER ps_adapter)
 {
-#ifdef BCM_SHM_INTERFACE
-#ifdef FLASH_DIRECT_ACCESS
-	unsigned int data,data1,data2 = 1;
-	wrm(ps_adapter, PAD_SELECT_REGISTER, &data2, 4);
-	data1 = rdm(ps_adapter,SYS_CFG,&data,4);
-	data1 = rdm(ps_adapter,SYS_CFG,&data,4);
-	data2 = (data | 0x80 | 0x8000);
-	wrm(ps_adapter,SYS_CFG, &data2,4); // over-write as Flash boot mode
-#endif
-	ps_adapter->eNVMType = NVM_FLASH;
-#else
 	BcmValidateNvmType(ps_adapter);
 	BcmInitEEPROMQueues(ps_adapter);
-#endif
 
 	if(ps_adapter->eNVMType == NVM_AUTODETECT)
 	{
@@ -2684,7 +2434,7 @@
 */
 /***************************************************************************/
 
-INT BcmGetNvmSize(PMINI_ADAPTER Adapter)
+static INT BcmGetNvmSize(PMINI_ADAPTER Adapter)
 {
 	if(Adapter->eNVMType == NVM_EEPROM)
 	{
@@ -2708,7 +2458,7 @@
 // Returns:
 //		<VOID>
 //-----------------------------------------------------------------------------
-VOID BcmValidateNvmType(PMINI_ADAPTER Adapter)
+static VOID BcmValidateNvmType(PMINI_ADAPTER Adapter)
 {
 
 	//
@@ -2775,7 +2525,7 @@
 	if(psAdapter->psFlash2xCSInfo == NULL)
 	{
 		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0,"Can't Allocate memory for Flash 2.x");
-		bcm_kfree(psAdapter->psFlashCSInfo);
+		kfree(psAdapter->psFlashCSInfo);
 		return -ENOMEM;
 	}
 
@@ -2783,8 +2533,8 @@
 	if(psAdapter->psFlash2xVendorInfo == NULL)
 	{
 		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0,"Can't Allocate Vendor Info Memory for Flash 2.x");
-		bcm_kfree(psAdapter->psFlashCSInfo);
-		bcm_kfree(psAdapter->psFlash2xCSInfo);
+		kfree(psAdapter->psFlashCSInfo);
+		kfree(psAdapter->psFlash2xCSInfo);
 		return -ENOMEM;
 	}
 
@@ -2798,9 +2548,9 @@
 		BCM_DEBUG_PRINT(psAdapter,DBG_TYPE_PRINTK, 0, 0," Adapter structure point is NULL");
 		return -EINVAL;
 	}
-	bcm_kfree(psAdapter->psFlashCSInfo);
-	bcm_kfree(psAdapter->psFlash2xCSInfo);
-	bcm_kfree(psAdapter->psFlash2xVendorInfo);
+	kfree(psAdapter->psFlashCSInfo);
+	kfree(psAdapter->psFlash2xCSInfo);
+	kfree(psAdapter->psFlash2xVendorInfo);
 	return STATUS_SUCCESS ;
 }
 
@@ -2954,7 +2704,7 @@
 	return STATUS_SUCCESS;
 }
 
-INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section)
+static INT IsSectionExistInVendorInfo(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section)
 {
  	return ( Adapter->uiVendorExtnFlag &&
  		(Adapter->psFlash2xVendorInfo->VendorSection[section].AccessFlags & FLASH2X_SECTION_PRESENT) &&
@@ -3052,7 +2802,7 @@
 //		<VOID>
 //-----------------------------------------------------------------------------
 
-INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
+static INT BcmGetFlashCSInfo(PMINI_ADAPTER Adapter)
 {
 	//FLASH_CS_INFO sFlashCsInfo = {0};
 
@@ -3070,7 +2820,6 @@
 	memset(Adapter->psFlashCSInfo, 0 ,sizeof(FLASH_CS_INFO));
 	memset(Adapter->psFlash2xCSInfo, 0 ,sizeof(FLASH2X_CS_INFO));
 
-#ifndef BCM_SHM_INTERFACE
 	if(!Adapter->bDDRInitDone)
 	{
 		{
@@ -3079,7 +2828,6 @@
 		}
 	}
 
-#endif
 
 	// Reading first 8 Bytes to get the Flash Layout
 	// MagicNumber(4 bytes) +FlashLayoutMinorVersion(2 Bytes) +FlashLayoutMajorVersion(2 Bytes)
@@ -3147,9 +2895,7 @@
 			return STATUS_FAILURE;
 		}
 		ConvertEndianOf2XCSStructure(Adapter->psFlash2xCSInfo);
-#ifndef BCM_SHM_INTERFACE
 		BcmDumpFlash2XCSStructure(Adapter->psFlash2xCSInfo,Adapter);
-#endif
 		if((FLASH_CONTROL_STRUCT_SIGNATURE == Adapter->psFlash2xCSInfo->MagicNumber) &&
 		   (SCSI_FIRMWARE_MINOR_VERSION <= MINOR_VERSION(Adapter->psFlash2xCSInfo->SCSIFirmwareVersion)) &&
 		   (FLASH_SECTOR_SIZE_SIG == Adapter->psFlash2xCSInfo->FlashSectorSizeSig) &&
@@ -3181,21 +2927,10 @@
 	Concerns: what if CS sector size does not match with this sector size ???
 	what is the indication of AccessBitMap  in CS in flash 2.x ????
 	*/
-#ifndef BCM_SHM_INTERFACE
 	Adapter->ulFlashID = BcmReadFlashRDID(Adapter);
-#endif
 
 	Adapter->uiFlashLayoutMajorVersion = uiFlashLayoutMajorVersion;
 
-	#if 0
-	if(FLASH_PART_SST25VF080B == Adapter->ulFlashID)
-	{
-	//
-	// 1MB flash has been selected. we have to use 64K as sector size no matter what is kept in FLASH_CS.
-	//
-		Adapter->uiSectorSize = 0x10000;
-	}
-	#endif
 
 	return STATUS_SUCCESS ;
 }
@@ -3214,7 +2949,7 @@
 //
 //-----------------------------------------------------------------------------
 
-NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter)
+static NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter)
 {
 	UINT uiData = 0;
 
@@ -3569,39 +3304,6 @@
 }
 
 /**
-*	ReadDSDHeader : Read the DSD map for the DSD Section val provided in Argument.
-*	@Adapter : Beceem Private Data Structure
-*	@psDSDHeader :Pointer of the buffer where header has to be read
-*	@dsd :value of the Dyanmic DSD like DSD0 of DSD1 or DSD2
-*
-*	Return Value:-
-*		if suceeds return STATUS_SUCCESS or negative error code.
-**/
-INT ReadDSDHeader(PMINI_ADAPTER Adapter, PDSD_HEADER psDSDHeader, FLASH2X_SECTION_VAL dsd)
-{
-	INT Status = STATUS_SUCCESS;
-
-	Status =BcmFlash2xBulkRead(Adapter,
-						    (PUINT)psDSDHeader,
-							dsd,
-							Adapter->psFlash2xCSInfo->OffsetFromDSDStartForDSDHeader,
-							sizeof(DSD_HEADER));
-	if(Status == STATUS_SUCCESS)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImageMagicNumber :0X%x", ntohl(psDSDHeader->DSDImageMagicNumber));
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImageSize :0X%x ",ntohl(psDSDHeader->DSDImageSize));
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImageCRC :0X%x",ntohl(psDSDHeader->DSDImageCRC));
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "DSDImagePriority :0X%x",ntohl(psDSDHeader->DSDImagePriority));
-	}
-	else
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"DSD Header read is failed with status :%d", Status);
-	}
-
-	return Status;
-}
-
-/**
 *	BcmGetActiveDSD : Set the Active DSD in Adapter Structure which has to be dumped in DDR
 *	@Adapter :-Drivers private Data Structure
 *
@@ -3609,7 +3311,7 @@
 *		Return STATUS_SUCESS if get sucess in setting the right DSD else negaive error code
 *
 **/
-INT BcmGetActiveDSD(PMINI_ADAPTER Adapter)
+static INT BcmGetActiveDSD(PMINI_ADAPTER Adapter)
 {
 	FLASH2X_SECTION_VAL uiHighestPriDSD = 0 ;
 
@@ -3647,39 +3349,6 @@
 	return STATUS_SUCCESS;
 }
 
-/**
-*	ReadISOUnReservedBytes : Read the ISO map for the ISO Section val provided in Argument.
-*	@Adapter : Driver Private Data Structure
-*	@psISOHeader :Pointer of the location where header has to be read
-*	@IsoImage :value of the Dyanmic ISO like ISO_IMAGE1 of ISO_IMAGE2
-*
-*	Return Value:-
-*		if suceeds return STATUS_SUCCESS or negative error code.
-**/
-
-INT ReadISOHeader(PMINI_ADAPTER Adapter, PISO_HEADER psISOHeader, FLASH2X_SECTION_VAL IsoImage)
-{
-	INT Status = STATUS_SUCCESS;
-
-	Status = BcmFlash2xBulkRead(Adapter,
-					    (PUINT)psISOHeader,
-						IsoImage,
-						0,
-						sizeof(ISO_HEADER));
-
-	if(Status == STATUS_SUCCESS)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImageMagicNumber :0X%x", ntohl(psISOHeader->ISOImageMagicNumber));
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImageSize :0X%x ",ntohl(psISOHeader->ISOImageSize));
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImageCRC :0X%x",ntohl(psISOHeader->ISOImageCRC));
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "ISOImagePriority :0X%x",ntohl(psISOHeader->ISOImagePriority));
-	}
-	else
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "ISO Header Read failed");
-	}
-	return Status;
-}
 
 /**
 *	BcmGetActiveISO :- Set the Active ISO in Adapter Data Structue
@@ -3691,7 +3360,7 @@
 *
 **/
 
-INT BcmGetActiveISO(PMINI_ADAPTER Adapter)
+static INT BcmGetActiveISO(PMINI_ADAPTER Adapter)
 {
 
 	INT HighestPriISO = 0 ;
@@ -4588,7 +4257,7 @@
 
 	}
 
-	bcm_kfree(Buff);
+	kfree(Buff);
 
 	return Status;
 }
@@ -4789,7 +4458,7 @@
 	Success :- Base Address of the Flash
 **/
 
-INT GetFlashBaseAddr(PMINI_ADAPTER Adapter)
+static INT GetFlashBaseAddr(PMINI_ADAPTER Adapter)
 {
 
 	UINT uiBaseAddr = 0;
@@ -4866,20 +4535,6 @@
 		return  -EINVAL;
 	}
 
-	#if 0
-	else
-	{
-		if((SrcSection == VSA0) || (SrcSection == VSA1) || (SrcSection == VSA2))
-		{
-			if((DstSection != VSA0) && (DstSection != VSA1) && (DstSection != VSA2))
-			{
-				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Source and Destion secton is not of same type");
-				return -EINVAL;
-			}
-		}
-
-	}
-	#endif
 	//if offset zero means have to copy complete secton
 
 	if(numOfBytes == 0)
@@ -4954,7 +4609,7 @@
 				BytesToBeCopied = numOfBytes;
 		}
 	}while(numOfBytes > 0) ;
-	bcm_kfree(pBuff);
+	kfree(pBuff);
 	Adapter->bHeaderChangeAllowed = FALSE ;
 	return Status;
 }
@@ -4979,14 +4634,6 @@
 	UINT uiSectAlignAddr = 0;
 	UINT sig = 0;
 
-	#if 0
-	//if Chenges in Header is allowed, Return back
-	if(Adapter->bHeaderChangeAllowed == TRUE)
-	{
-		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL, "Header Change is allowed");
-		return STATUS_SUCCESS ;
-	}
-	#endif
 	//making the offset sector alligned
 	uiSectAlignAddr = uiOffset & ~(Adapter->uiSectorSize - 1);
 
@@ -5024,7 +4671,7 @@
 		//Replace Buffer content with Header
 		memcpy(pBuff +offsetToProtect,pTempBuff,HeaderSizeToProtect);
 
-		bcm_kfree(pTempBuff);
+		kfree(pTempBuff);
 	}
 	if(bHasHeader && Adapter->bSigCorrupted)
 	{
@@ -5044,29 +4691,7 @@
 
 	return STATUS_SUCCESS ;
 }
-INT BcmMakeFlashCSActive(PMINI_ADAPTER Adapter, UINT offset)
-{
-	UINT GPIOConfig = 0 ;
 
-
-	if(Adapter->bFlashRawRead == FALSE)
-	{
-		//Applicable for Flash2.x
-		if(IsFlash2x(Adapter) == FALSE)
-			return STATUS_SUCCESS;
-	}
-
-	if(offset/FLASH_PART_SIZE)
-	{
-		//bit[14..12] -> will select make Active CS1, CS2 or CS3
-		// Select CS1, CS2 and CS3 (CS0 is dedicated pin)
-		rdmalt(Adapter,FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
-		GPIOConfig |= (7 << 12);
-		wrmalt(Adapter,FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
-	}
-
-	return STATUS_SUCCESS ;
-}
 /**
 BcmDoChipSelect : This will selcet the appropriate chip for writing.
 @Adapater :- Bcm Driver Private Data Structure
@@ -5074,7 +4699,7 @@
 OutPut:-
 	Select the Appropriate chip and retrn status Sucess
 **/
-INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset)
+static INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset)
 {
 	UINT FlashConfig = 0;
 	INT ChipNum = 0;
@@ -5365,39 +4990,6 @@
 	return Status;
 }
 
-#if 0
-UINT getNumOfSubSectionWithWRPermisson(PMINI_ADAPTER Adapter, SECTION_TYPE secType)
-{
-
-	UINT numOfWRSubSec = 0;
-	switch(secType)
-	{
-		case ISO :
-			if(IsSectionWritable(Adapter,ISO_IMAGE1))
-				numOfWRSubSec = numOfWRSubSec + 1;
-			if(IsSectionWritable(Adapter,ISO_IMAGE2))
-				numOfWRSubSec = numOfWRSubSec + 1;
-			break;
-
-		case DSD :
-			if(IsSectionWritable(Adapter,DSD2))
-				numOfWRSubSec = numOfWRSubSec + 1;
-			if(IsSectionWritable(Adapter,DSD1))
-				numOfWRSubSec = numOfWRSubSec + 1;
-			if(IsSectionWritable(Adapter,DSD0))
-				numOfWRSubSec = numOfWRSubSec + 1;
-			break ;
-
-		case VSA :
-				//for VSA Add code Here
-		 default :
-			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Invalid secton<%d> is passed", secType);\
-			numOfWRSubSec = 0;
-
-	}
-	return numOfWRSubSec;
-}
-#endif
 BOOLEAN IsSectionExistInFlash(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL section)
 {
 
@@ -5479,7 +5071,7 @@
 		return Status ;
 }
 
-INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
+static INT CorruptDSDSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
 {
 
 	PUCHAR pBuff = NULL;
@@ -5543,16 +5135,16 @@
 	else
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"BCM Signature is not present in header");
-		bcm_kfree(pBuff);
+		kfree(pBuff);
 		return STATUS_FAILURE;
 	}
 
-	bcm_kfree(pBuff);
+	kfree(pBuff);
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Corrupted the signature");
 	return STATUS_SUCCESS ;
 }
 
-INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
+static INT CorruptISOSig(PMINI_ADAPTER Adapter, FLASH2X_SECTION_VAL eFlash2xSectionVal)
 {
 
 	PUCHAR pBuff = NULL;
@@ -5593,14 +5185,14 @@
 	else
 	{
 		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"BCM Signature is not present in header");
-		bcm_kfree(pBuff);
+		kfree(pBuff);
 		return STATUS_FAILURE;
 	}
 
 	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,"Corrupted the signature");
 	BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, NVM_RW, DBG_LVL_ALL,pBuff,MAX_RW_SIZE);
 
-	bcm_kfree(pBuff);
+	kfree(pBuff);
 	return STATUS_SUCCESS ;
 }
 
diff --git a/drivers/staging/bcm/nvm.h b/drivers/staging/bcm/nvm.h
index 6ec6ca8..651b5a4 100644
--- a/drivers/staging/bcm/nvm.h
+++ b/drivers/staging/bcm/nvm.h
@@ -323,15 +323,6 @@
 
 
 
-#ifdef BCM_SHM_INTERFACE
-
-#define FLASH_ADDR_MASK                          0x1F000000
-extern int bcmflash_raw_read(unsigned int flash_id, unsigned int offset, unsigned char *inbuf, unsigned int len);
-extern int bcmflash_raw_write(unsigned int flash_id, unsigned int offset, unsigned char *outbuf, unsigned int len);
-extern int bcmflash_raw_writenoerase(unsigned int flash_id, unsigned int offset, unsigned char *outbuf, unsigned int len);
-
-
-#endif
 
 #define FLASH_CONTIGIOUS_START_ADDR_AFTER_INIT   0x1C000000
 #define FLASH_CONTIGIOUS_START_ADDR_BEFORE_INIT  0x1F000000
@@ -414,76 +405,5 @@
 
 #define FIELD_OFFSET_IN_HEADER(HeaderPointer,Field) ((PUCHAR)&((HeaderPointer)(NULL))->Field - (PUCHAR)(NULL))
 
-#if 0
-INT BeceemEEPROMBulkRead(
-	PMINI_ADAPTER Adapter,
-	PUINT pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes);
-
-
-INT BeceemFlashBulkRead(
-	PMINI_ADAPTER Adapter,
-	PUINT pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes);
-
-UINT BcmGetEEPROMSize(PMINI_ADAPTER Adapter);
-
-UINT BcmGetFlashSize(PMINI_ADAPTER Adapter);
-
-UINT BcmGetFlashSectorSize(PMINI_ADAPTER Adapter);
-
-
-
-INT BeceemFlashBulkWrite(
-	PMINI_ADAPTER Adapter,
-	PUINT pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes,
-	BOOLEAN bVerify);
-
-INT PropagateCalParamsFromFlashToMemory(PMINI_ADAPTER Adapter);
-
-INT PropagateCalParamsFromEEPROMToMemory(PMINI_ADAPTER Adapter);
-
-
-INT BeceemEEPROMBulkWrite(
-	PMINI_ADAPTER Adapter,
-	PUCHAR pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes,
-	BOOLEAN bVerify);
-
-
-INT ReadBeceemEEPROM(PMINI_ADAPTER Adapter,UINT dwAddress, UINT *pdwData);
-
-NVM_TYPE BcmGetNvmType(PMINI_ADAPTER Adapter);
-
-INT BeceemNVMRead(
-	PMINI_ADAPTER Adapter,
-	PUINT pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes);
-
-INT BeceemNVMWrite(
-	PMINI_ADAPTER Adapter,
-	PUINT pBuffer,
-	UINT uiOffset,
-	UINT uiNumBytes,
-	BOOLEAN bVerify);
-
-INT ReadMacAddressFromEEPROM(PMINI_ADAPTER Adapter);
-
-INT BcmUpdateSectorSize(PMINI_ADAPTER Adapter,UINT uiSectorSize);
-
-INT BcmInitNVM(PMINI_ADAPTER Adapter);
-
-VOID BcmValidateNvmType(PMINI_ADAPTER Adapter);
-
-VOID BcmGetFlashCSInfo(PMINI_ADAPTER Adapter);
-
-#endif
-
 #endif
 
diff --git a/drivers/staging/bcm/osal_misc.h b/drivers/staging/bcm/osal_misc.h
deleted file mode 100644
index ff4adde..0000000
--- a/drivers/staging/bcm/osal_misc.h
+++ /dev/null
@@ -1,49 +0,0 @@
-	/*++
-
-	Copyright (c) Beceem Communications Inc.
-
-	Module Name:
-		OSAL_Misc.h
-
-	Abstract:
-		Provides the OS Abstracted macros to access:
-			Linked Lists
-			Dispatcher Objects(Events,Semaphores,Spin Locks and the like)
-			Files
-
-
-	Revision History:
-		Who         When        What
-		--------    --------    ----------------------------------------------
-		Name		Date		Created/reviewed/modified
-		Rajeev		24/1/08		Created
-	Notes:
-
-	--*/
-#ifndef _OSAL_MISC_H_
-#define _OSAL_MISC_H_
-//OSAL Macros
-//OSAL Primitives
-typedef PUCHAR  POSAL_NW_PACKET  ;		//Nw packets
-
-
-#define OsalMemAlloc(n,t) kmalloc(n,GFP_KERNEL)
-
-#define OsalMemFree(x,n) bcm_kfree(x)
-
-#define OsalMemMove(dest, src, len)		\
-{										\
-			memcpy(dest,src, len);		\
-}
-
-#define OsalZeroMemory(pDest, Len)		\
-{										\
-			memset(pDest,0,Len);		\
-}
-
-//#define OsalMemSet(pSrc,Char,Len) memset(pSrc,Char,Len)
-
-bool OsalMemCompare(void *dest, void *src, UINT len);
-
-#endif
-
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index a27bb0b..99e6766 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -43,14 +43,8 @@
 	http://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
 	https://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
 
-For all chips, copy brcm/bcm43xx-0-610-809-0.fw and
-brcm/bcm43xx_hdr-0-610-809-0.fw to /lib/firmware/brcm (or wherever firmware is
-normally installed on the system).  In the /lib/firmware/brcm directory, then
-create the following symlinks:
-
-	ln -s bcm43xx-0-610-809-0.fw bcm43xx-0.fw
-	ln -s bcm43xx_hdr-0-610-809-0.fw bcm43xx_hdr-0.fw
-
+For all chips, copy brcm/bcm43xx-0.fw and brcm/bcm43xx_hdr-0.fw to
+/lib/firmware/brcm (or wherever firmware is normally installed on your system).
 
 Currently supported chips
 ==============
diff --git a/drivers/staging/brcm80211/brcmfmac/README b/drivers/staging/brcm80211/brcmfmac/README
index 43601fa..be29e42 100644
--- a/drivers/staging/brcm80211/brcmfmac/README
+++ b/drivers/staging/brcm80211/brcmfmac/README
@@ -25,8 +25,9 @@
 	http://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
 	https://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
 
-For 4329 chip, copy brcm/bcm4329-fullmac-4-218-248-5.bin and
-bcm4329-fullmac-4-218-248-5.txt to /lib/firmware/brcm
+For 4329 chip, copy brcm/bcm4329-fullmac-4.bin and brcm/bcm4329-fullmac-4.txt
+to /lib/firmware/brcm (or wherever firmware is normally installed on your
+system).
 
 Contact Info:
 =============
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
index 4c613da..acf43a3 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
@@ -16,13 +16,14 @@
 /* ****************** BCMSDH Interface Functions *************************** */
 
 #include <linux/types.h>
+#include <linux/netdevice.h>
 #include <bcmdefs.h>
 #include <bcmdevs.h>
 #include <bcmendian.h>
+#include <osl.h>
 #include <bcmutils.h>
 #include <hndsoc.h>
 #include <siutils.h>
-#include <osl.h>
 
 #include <bcmsdh.h>		/* BRCM API for SDIO
 			 clients (such as wl, dhd) */
@@ -38,7 +39,7 @@
 	bool init_success;	/* underlying driver successfully attached */
 	void *sdioh;		/* handler for sdioh */
 	u32 vendevid;	/* Target Vendor and Device ID on SD bus */
-	osl_t *osh;
+	struct osl_info *osh;
 	bool regfail;		/* Save status of last
 				 reg_read/reg_write call */
 	u32 sbwad;		/* Save backplane window address */
@@ -55,7 +56,8 @@
 }
 #endif
 
-bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+bcmsdh_info_t *bcmsdh_attach(struct osl_info *osh, void *cfghdl,
+				void **regsva, uint irq)
 {
 	bcmsdh_info_t *bcmsdh;
 
@@ -84,7 +86,7 @@
 	return bcmsdh;
 }
 
-int bcmsdh_detach(osl_t *osh, void *sdh)
+int bcmsdh_detach(struct osl_info *osh, void *sdh)
 {
 	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
 
@@ -451,7 +453,7 @@
 
 int
 bcmsdh_recv_buf(void *sdh, u32 addr, uint fn, uint flags,
-		u8 *buf, uint nbytes, void *pkt,
+		u8 *buf, uint nbytes, struct sk_buff *pkt,
 		bcmsdh_cmplt_fn_t complete, void *handle)
 {
 	bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
index 9028cd0..d24b5e7 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
@@ -20,8 +20,7 @@
 
 #define __UNDEF_NO_VERSION__
 
-#include <linuxver.h>
-
+#include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/completion.h>
 
@@ -57,7 +56,7 @@
 #else
 	struct pci_dev *dev;	/* pci device handle */
 #endif				/* BCMPLATFORM_BUS */
-	osl_t *osh;
+	struct osl_info *osh;
 	void *regs;		/* SDIO Host Controller address */
 	bcmsdh_info_t *sdh;	/* SDIO Host Controller handle */
 	void *ch;
@@ -139,22 +138,11 @@
 #endif				/* BCMLXSDMMC */
 
 #ifndef BCMLXSDMMC
-static struct device_driver bcmsdh_driver = {
-	.name = "pxa2xx-mci",
-	.bus = &platform_bus_type,
-	.probe = bcmsdh_probe,
-	.remove = bcmsdh_remove,
-	.suspend = NULL,
-	.resume = NULL,
-};
-#endif				/* BCMLXSDMMC */
-
-#ifndef BCMLXSDMMC
 static
 #endif				/* BCMLXSDMMC */
 int bcmsdh_probe(struct device *dev)
 {
-	osl_t *osh = NULL;
+	struct osl_info *osh = NULL;
 	bcmsdh_hc_t *sdhc = NULL;
 	unsigned long regs = 0;
 	bcmsdh_info_t *sdh = NULL;
@@ -189,7 +177,7 @@
 	}
 #endif				/* defined(OOB_INTR_ONLY) */
 	/* allocate SDIO Host Controller state info */
-	osh = osl_attach(dev, PCI_BUS, false);
+	osh = osl_attach(dev, PCI_BUS);
 	if (!osh) {
 		SDLX_MSG(("%s: osl_attach failed\n", __func__));
 		goto err;
@@ -258,7 +246,7 @@
 int bcmsdh_remove(struct device *dev)
 {
 	bcmsdh_hc_t *sdhc, *prev;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	sdhc = sdhcinfo;
 	drvinfo.detach(sdhc->ch);
@@ -291,269 +279,23 @@
 
 	return 0;
 }
-
-#else				/* BCMPLATFORM_BUS */
-
-#if !defined(BCMLXSDMMC)
-/* forward declarations for PCI probe and remove functions. */
-static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev,
-				      const struct pci_device_id *ent);
-static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
-
-/**
- * pci id table
- */
-static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
-{
-	.vendor = PCI_ANY_ID,
-	.device = PCI_ANY_ID,
-	.subvendor = PCI_ANY_ID,
-	.subdevice = PCI_ANY_ID,
-	.class = 0,
-	.class_mask = 0,
-	.driver_data = 0,
-},
-{0,}
-};
-
-MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
-
-/**
- * SDIO Host Controller pci driver info
- */
-static struct pci_driver bcmsdh_pci_driver = {
-	.node = {},
-	.name = "bcmsdh",
-	.id_table = bcmsdh_pci_devid,
-	.probe = bcmsdh_pci_probe,
-	.remove = bcmsdh_pci_remove,
-	.suspend = NULL,
-	.resume = NULL,
-};
-
-extern uint sd_pci_slot;	/* Force detection to a particular PCI */
-				/* slot only . Allows for having multiple */
-				/* WL devices at once in a PC */
-				/* Only one instance of dhd will be */
-				/* usable at a time */
-				/* Upper word is bus number, */
-				/* lower word is slot number */
-				/* Default value of 0xFFFFffff turns this */
-				/* off */
-module_param(sd_pci_slot, uint, 0);
-
-/**
- * Detect supported SDIO Host Controller and attach if found.
- *
- * Determine if the device described by pdev is a supported SDIO Host
- * Controller.  If so, attach to it and attach to the target device.
- */
-static int __devinit
-bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	osl_t *osh = NULL;
-	bcmsdh_hc_t *sdhc = NULL;
-	unsigned long regs;
-	bcmsdh_info_t *sdh = NULL;
-	int rc;
-
-	if (sd_pci_slot != 0xFFFFffff) {
-		if (pdev->bus->number != (sd_pci_slot >> 16) ||
-		    PCI_SLOT(pdev->devfn) != (sd_pci_slot & 0xffff)) {
-			SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
-				  __func__,
-				  bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
-				  "Found compatible SDIOHC" :
-				  "Probing unknown device",
-				  pdev->bus->number, PCI_SLOT(pdev->devfn),
-				  pdev->vendor, pdev->device));
-			return -ENODEV;
-		}
-		SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X "
-			"(good PCI location)\n", __func__,
-			bcmsdh_chipmatch(pdev->vendor, pdev->device) ?
-			"Using compatible SDIOHC" : "WARNING, forced use "
-			"of unkown device",
-		pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
-		pdev->device));
-	}
-
-	if ((pdev->vendor == VENDOR_TI)
-	    && ((pdev->device == PCIXX21_FLASHMEDIA_ID)
-		|| (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
-		u32 config_reg;
-
-		SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n",
-			  __func__));
-		osh = osl_attach(pdev, PCI_BUS, false);
-		if (!osh) {
-			SDLX_MSG(("%s: osl_attach failed\n", __func__));
-			goto err;
-		}
-
-		config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
-
-		/*
-		 * Set MMC_SD_DIS bit in FlashMedia Controller.
-		 * Disbling the SD/MMC Controller in the FlashMedia Controller
-		 * allows the Standard SD Host Controller to take over control
-		 * of the SD Slot.
-		 */
-		config_reg |= 0x02;
-		OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
-		osl_detach(osh);
-	}
-	/* match this pci device with what we support */
-	/* we can't solely rely on this to believe it is
-		our SDIO Host Controller! */
-	if (!bcmsdh_chipmatch(pdev->vendor, pdev->device))
-		return -ENODEV;
-
-	/* this is a pci device we might support */
-	SDLX_MSG(("%s: Found possible SDIO Host Controller: "
-		"bus %d slot %d func %d irq %d\n", __func__,
-		pdev->bus->number, PCI_SLOT(pdev->devfn),
-		PCI_FUNC(pdev->devfn), pdev->irq));
-
-	/* use bcmsdh_query_device() to get the vendor ID of the target device
-	 * so it will eventually appear in the Broadcom string on the console
-	 */
-
-	/* allocate SDIO Host Controller state info */
-	osh = osl_attach(pdev, PCI_BUS, false);
-	if (!osh) {
-		SDLX_MSG(("%s: osl_attach failed\n", __func__));
-		goto err;
-	}
-	sdhc = kzalloc(sizeof(bcmsdh_hc_t), GFP_ATOMIC);
-	if (!sdhc) {
-		SDLX_MSG(("%s: out of memory\n", __func__));
-		goto err;
-	}
-	sdhc->osh = osh;
-
-	sdhc->dev = pdev;
-
-	/* map to address where host can access */
-	pci_set_master(pdev);
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		SDLX_MSG(("%s: Cannot enable PCI device\n", __func__));
-		goto err;
-	}
-	sdh = bcmsdh_attach(osh, (void *)(unsigned long)pci_resource_start(pdev, 0),
-			(void **)&regs, pdev->irq);
-	if (!sdh) {
-		SDLX_MSG(("%s: bcmsdh_attach failed\n", __func__));
-		goto err;
-	}
-
-	sdhc->sdh = sdh;
-
-	/* try to attach to the target device */
-	sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
-				bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
-				(void *)regs, NULL, sdh);
-	if (!sdhc->ch) {
-		SDLX_MSG(("%s: device attach failed\n", __func__));
-		goto err;
-	}
-
-	/* chain SDIO Host Controller info together */
-	sdhc->next = sdhcinfo;
-	sdhcinfo = sdhc;
-
-	return 0;
-
-	/* error handling */
-err:
-	if (sdhc->sdh)
-		bcmsdh_detach(sdhc->osh, sdhc->sdh);
-	if (sdhc)
-		kfree(sdhc);
-	if (osh)
-		osl_detach(osh);
-	return -ENODEV;
-}
-
-/**
- * Detach from target devices and SDIO Host Controller
- */
-static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev)
-{
-	bcmsdh_hc_t *sdhc, *prev;
-	osl_t *osh;
-
-	/* find the SDIO Host Controller state for this
-		 pdev and take it out from the list */
-	for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
-		if (sdhc->dev == pdev) {
-			if (prev)
-				prev->next = sdhc->next;
-			else
-				sdhcinfo = NULL;
-			break;
-		}
-		prev = sdhc;
-	}
-	if (!sdhc)
-		return;
-
-	drvinfo.detach(sdhc->ch);
-
-	bcmsdh_detach(sdhc->osh, sdhc->sdh);
-
-	/* release SDIO Host Controller info */
-	osh = sdhc->osh;
-	kfree(sdhc);
-	osl_detach(osh);
-}
-#endif				/* BCMLXSDMMC */
 #endif				/* BCMPLATFORM_BUS */
 
 extern int sdio_function_init(void);
 
 int bcmsdh_register(bcmsdh_driver_t *driver)
 {
-	int error = 0;
-
 	drvinfo = *driver;
 
-#if defined(BCMPLATFORM_BUS)
-#if defined(BCMLXSDMMC)
 	SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
-	error = sdio_function_init();
-#else
-	SDLX_MSG(("Intel PXA270 SDIO Driver\n"));
-	error = driver_register(&bcmsdh_driver);
-#endif				/* defined(BCMLXSDMMC) */
-	return error;
-#endif				/* defined(BCMPLATFORM_BUS) */
-
-#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
-	error = pci_register_driver(&bcmsdh_pci_driver);
-	if (!error)
-		return 0;
-
-	SDLX_MSG(("%s: pci_register_driver failed 0x%x\n", __func__, error));
-#endif				/* BCMPLATFORM_BUS */
-
-	return error;
+	return sdio_function_init();
 }
 
 extern void sdio_function_cleanup(void);
 
 void bcmsdh_unregister(void)
 {
-#if defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
-		driver_unregister(&bcmsdh_driver);
-#endif
-#if defined(BCMLXSDMMC)
 	sdio_function_cleanup();
-#endif				/* BCMLXSDMMC */
-#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
-	pci_unregister_driver(&bcmsdh_pci_driver);
-#endif				/* BCMPLATFORM_BUS */
 }
 
 #if defined(OOB_INTR_ONLY)
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index f6c9c45..d399b5c 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -14,11 +14,12 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 #include <linux/types.h>
+#include <linux/netdevice.h>
 #include <bcmdefs.h>
 #include <bcmdevs.h>
 #include <bcmendian.h>
-#include <bcmutils.h>
 #include <osl.h>
+#include <bcmutils.h>
 #include <sdio.h>		/* SDIO Device and Protocol Specs */
 #include <sdioh.h>		/* SDIO Host Controller Specification */
 #include <bcmsdbus.h>		/* bcmsdh to/from specific controller APIs */
@@ -111,7 +112,7 @@
 /*
  *	Public entry points & extern's
  */
-extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq)
+extern sdioh_info_t *sdioh_attach(struct osl_info *osh, void *bar0, uint irq)
 {
 	sdioh_info_t *sd;
 	int err_ret;
@@ -174,7 +175,7 @@
 	return sd;
 }
 
-extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd)
+extern SDIOH_API_RC sdioh_detach(struct osl_info *osh, sdioh_info_t *sd)
 {
 	sd_trace(("%s\n", __func__));
 
@@ -750,7 +751,7 @@
 	sd_trace(("%s: Func = %d\n", __func__, func));
 
 	if (!sd->func_cis_ptr[func]) {
-		bzero(cis, length);
+		memset(cis, 0, length);
 		sd_err(("%s: no func_cis_ptr[%d]\n", __func__, func));
 		return SDIOH_API_RC_FAIL;
 	}
@@ -927,13 +928,13 @@
 
 static SDIOH_API_RC
 sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
-		     uint addr, void *pkt)
+		     uint addr, struct sk_buff *pkt)
 {
 	bool fifo = (fix_inc == SDIOH_DATA_FIX);
 	u32 SGCount = 0;
 	int err_ret = 0;
 
-	void *pnext;
+	struct sk_buff *pnext;
 
 	sd_trace(("%s: Enter\n", __func__));
 
@@ -943,8 +944,8 @@
 
 	/* Claim host controller */
 	sdio_claim_host(gInstance->func[func]);
-	for (pnext = pkt; pnext; pnext = PKTNEXT(pnext)) {
-		uint pkt_len = PKTLEN(pnext);
+	for (pnext = pkt; pnext; pnext = pnext->next) {
+		uint pkt_len = pnext->len;
 		pkt_len += 3;
 		pkt_len &= 0xFFFFFFFC;
 
@@ -961,23 +962,23 @@
 		 * is supposed to give
 		 * us something we can work with.
 		 */
-		ASSERT(((u32) (PKTDATA(pkt)) & DMA_ALIGN_MASK) == 0);
+		ASSERT(((u32) (pkt->data) & DMA_ALIGN_MASK) == 0);
 
 		if ((write) && (!fifo)) {
 			err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
-						   ((u8 *) PKTDATA(pnext)),
+						   ((u8 *) (pnext->data)),
 						   pkt_len);
 		} else if (write) {
 			err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
-						   ((u8 *) PKTDATA(pnext)),
+						   ((u8 *) (pnext->data)),
 						   pkt_len);
 		} else if (fifo) {
 			err_ret = sdio_readsb(gInstance->func[func],
-					      ((u8 *) PKTDATA(pnext)),
+					      ((u8 *) (pnext->data)),
 					      addr, pkt_len);
 		} else {
 			err_ret = sdio_memcpy_fromio(gInstance->func[func],
-						     ((u8 *) PKTDATA(pnext)),
+						     ((u8 *) (pnext->data)),
 						     addr, pkt_len);
 		}
 
@@ -1025,10 +1026,10 @@
 extern SDIOH_API_RC
 sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write,
 		     uint func, uint addr, uint reg_width, uint buflen_u,
-		     u8 *buffer, void *pkt)
+		     u8 *buffer, struct sk_buff *pkt)
 {
 	SDIOH_API_RC Status;
-	void *mypkt = NULL;
+	struct sk_buff *mypkt = NULL;
 
 	sd_trace(("%s: Enter\n", __func__));
 
@@ -1038,52 +1039,52 @@
 	if (pkt == NULL) {
 		sd_data(("%s: Creating new %s Packet, len=%d\n",
 			 __func__, write ? "TX" : "RX", buflen_u));
-		mypkt = PKTGET(sd->osh, buflen_u, write ? true : false);
+		mypkt = pkt_buf_get_skb(sd->osh, buflen_u);
 		if (!mypkt) {
-			sd_err(("%s: PKTGET failed: len %d\n",
+			sd_err(("%s: pkt_buf_get_skb failed: len %d\n",
 				__func__, buflen_u));
 			return SDIOH_API_RC_FAIL;
 		}
 
 		/* For a write, copy the buffer data into the packet. */
 		if (write)
-			bcopy(buffer, PKTDATA(mypkt), buflen_u);
+			bcopy(buffer, mypkt->data, buflen_u);
 
 		Status =
 		    sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
 
 		/* For a read, copy the packet data back to the buffer. */
 		if (!write)
-			bcopy(PKTDATA(mypkt), buffer, buflen_u);
+			bcopy(mypkt->data, buffer, buflen_u);
 
-		PKTFREE(sd->osh, mypkt, write ? true : false);
-	} else if (((u32) (PKTDATA(pkt)) & DMA_ALIGN_MASK) != 0) {
+		pkt_buf_free_skb(sd->osh, mypkt, write ? true : false);
+	} else if (((u32) (pkt->data) & DMA_ALIGN_MASK) != 0) {
 		/* Case 2: We have a packet, but it is unaligned. */
 
 		/* In this case, we cannot have a chain. */
-		ASSERT(PKTNEXT(pkt) == NULL);
+		ASSERT(pkt->next == NULL);
 
 		sd_data(("%s: Creating aligned %s Packet, len=%d\n",
-			 __func__, write ? "TX" : "RX", PKTLEN(pkt)));
-		mypkt = PKTGET(sd->osh, PKTLEN(pkt), write ? true : false);
+			 __func__, write ? "TX" : "RX", pkt->len));
+		mypkt = pkt_buf_get_skb(sd->osh, pkt->len);
 		if (!mypkt) {
-			sd_err(("%s: PKTGET failed: len %d\n",
-				__func__, PKTLEN(pkt)));
+			sd_err(("%s: pkt_buf_get_skb failed: len %d\n",
+				__func__, pkt->len));
 			return SDIOH_API_RC_FAIL;
 		}
 
 		/* For a write, copy the buffer data into the packet. */
 		if (write)
-			bcopy(PKTDATA(pkt), PKTDATA(mypkt), PKTLEN(pkt));
+			bcopy(pkt->data, mypkt->data, pkt->len);
 
 		Status =
 		    sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
 
 		/* For a read, copy the packet data back to the buffer. */
 		if (!write)
-			bcopy(PKTDATA(mypkt), PKTDATA(pkt), PKTLEN(mypkt));
+			bcopy(mypkt->data, pkt->data, mypkt->len);
 
-		PKTFREE(sd->osh, mypkt, write ? true : false);
+		pkt_buf_free_skb(sd->osh, mypkt, write ? true : false);
 	} else {		/* case 3: We have a packet and
 				 it is aligned. */
 		sd_data(("%s: Aligned %s Packet, direct DMA\n",
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
index ae7b566..ceaa474 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
@@ -15,7 +15,9 @@
  */
 #include <linux/types.h>
 #include <linux/sched.h>	/* request_irq() */
+#include <linux/netdevice.h>
 #include <bcmdefs.h>
+#include <osl.h>
 #include <bcmutils.h>
 #include <sdio.h>		/* SDIO Specs */
 #include <bcmsdbus.h>		/* bcmsdh to/from specific controller APIs */
@@ -211,7 +213,7 @@
 	if (!gInstance)
 		return -ENOMEM;
 
-	bzero(&sdmmc_dev, sizeof(sdmmc_dev));
+	memset(&sdmmc_dev, 0, sizeof(sdmmc_dev));
 	error = sdio_register_driver(&bcmsdh_sdmmc_driver);
 
 	return error;
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd.h b/drivers/staging/brcm80211/brcmfmac/dhd.h
index 57d06b2..69c6a02 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd.h
@@ -77,7 +77,7 @@
 /* Common structure for module and instance linkage */
 typedef struct dhd_pub {
 	/* Linkage ponters */
-	osl_t *osh;		/* OSL handle */
+	struct osl_info *osh;		/* OSL handle */
 	struct dhd_bus *bus;	/* Bus module handle */
 	struct dhd_prot *prot;	/* Protocol module handle */
 	struct dhd_info *info;	/* Info module handle */
@@ -277,15 +277,16 @@
  */
 
 /* To allow osl_attach/detach calls from os-independent modules */
-osl_t *dhd_osl_attach(void *pdev, uint bustype);
-void dhd_osl_detach(osl_t *osh);
+struct osl_info *dhd_osl_attach(void *pdev, uint bustype);
+void dhd_osl_detach(struct osl_info *osh);
 
 /* Indication from bus module regarding presence/insertion of dongle.
  * Return dhd_pub_t pointer, used as handle to OS module in later calls.
  * Returned structure should have bus and prot pointers filled in.
  * bus_hdrlen specifies required headroom for bus module header.
  */
-extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen);
+extern dhd_pub_t *dhd_attach(struct osl_info *osh, struct dhd_bus *bus,
+				uint bus_hdrlen);
 extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
 
 /* Indication from bus module regarding removal/absence of dongle */
@@ -294,10 +295,12 @@
 /* Indication from bus module to change flow-control state */
 extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
 
-extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
+extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q,
+			 struct sk_buff *pkt, int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt);
+extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx,
+			 struct sk_buff *rxp, int numpkt);
 
 /* Return pointer to interface name */
 extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
@@ -306,7 +309,7 @@
 extern void dhd_sched_dpc(dhd_pub_t *dhdp);
 
 /* Notify tx completion */
-extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
+extern void dhd_txcomplete(dhd_pub_t *dhdp, struct sk_buff *txp, bool success);
 
 /* Query ioctl */
 extern int dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
@@ -377,7 +380,7 @@
 			   int len);
 
 /* Send packet to dongle via data channel */
-extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
+extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pkt);
 
 /* Send event to host */
 extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event,
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h b/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
index 3b39c99..cd0d540 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
@@ -26,8 +26,8 @@
 extern void dhd_bus_unregister(void);
 
 /* Download firmware image and nvram image */
-extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t * osh,
-				      char *fw_path, char *nv_path);
+extern bool dhd_bus_download_firmware(struct dhd_bus *bus,
+	      struct osl_info *osh, char *fw_path, char *nv_path);
 
 /* Stop bus module: clear pending frames, disable data flow */
 extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
@@ -36,7 +36,7 @@
 extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
 
 /* Send a data frame to the dongle.  Callee disposes of txp. */
-extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+extern int dhd_bus_txdata(struct dhd_bus *bus, struct sk_buff *txp);
 
 /* Send/receive a control message to/from the dongle.
  * Expects caller to enforce a single outstanding transaction.
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c b/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
index bcbaac9..b7b527f 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/netdevice.h>
 #include <bcmdefs.h>
 #include <osl.h>
 
@@ -297,6 +298,15 @@
 	return ret;
 }
 
+#define PKTSUMNEEDED(skb) \
+		(((struct sk_buff *)(skb))->ip_summed == CHECKSUM_PARTIAL)
+#define PKTSETSUMGOOD(skb, x) \
+		(((struct sk_buff *)(skb))->ip_summed = \
+		((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because
+	skb->ip_summed is overloaded */
+
 int
 dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
 		  void *params, int plen, void *arg, int len, bool set)
@@ -309,7 +319,7 @@
 	bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
 }
 
-void dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *pktbuf)
+void dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, struct sk_buff *pktbuf)
 {
 #ifdef BDC
 	struct bdc_header *h;
@@ -320,33 +330,33 @@
 #ifdef BDC
 	/* Push BDC header used to convey priority for buses that don't */
 
-	PKTPUSH(pktbuf, BDC_HEADER_LEN);
+	skb_push(pktbuf, BDC_HEADER_LEN);
 
-	h = (struct bdc_header *)PKTDATA(pktbuf);
+	h = (struct bdc_header *)(pktbuf->data);
 
 	h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
 	if (PKTSUMNEEDED(pktbuf))
 		h->flags |= BDC_FLAG_SUM_NEEDED;
 
-	h->priority = (PKTPRIO(pktbuf) & BDC_PRIORITY_MASK);
+	h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
 	h->flags2 = 0;
 	h->rssi = 0;
 #endif				/* BDC */
 	BDC_SET_IF_IDX(h, ifidx);
 }
 
-bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, u8 * fcbits)
+bool dhd_proto_fcinfo(dhd_pub_t *dhd, struct sk_buff *pktbuf, u8 * fcbits)
 {
 #ifdef BDC
 	struct bdc_header *h;
 
-	if (PKTLEN(pktbuf) < BDC_HEADER_LEN) {
+	if (pktbuf->len < BDC_HEADER_LEN) {
 		DHD_ERROR(("%s: rx data too short (%d < %d)\n",
-			   __func__, PKTLEN(pktbuf), BDC_HEADER_LEN));
+			   __func__, pktbuf->len, BDC_HEADER_LEN));
 		return BCME_ERROR;
 	}
 
-	h = (struct bdc_header *)PKTDATA(pktbuf);
+	h = (struct bdc_header *)(pktbuf->data);
 
 	*fcbits = h->priority >> BDC_PRIORITY_FC_SHIFT;
 	if ((h->flags2 & BDC_FLAG2_FC_FLAG) == BDC_FLAG2_FC_FLAG)
@@ -355,7 +365,7 @@
 	return false;
 }
 
-int dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf)
+int dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, struct sk_buff *pktbuf)
 {
 #ifdef BDC
 	struct bdc_header *h;
@@ -366,13 +376,13 @@
 #ifdef BDC
 	/* Pop BDC header used to convey priority for buses that don't */
 
-	if (PKTLEN(pktbuf) < BDC_HEADER_LEN) {
+	if (pktbuf->len < BDC_HEADER_LEN) {
 		DHD_ERROR(("%s: rx data too short (%d < %d)\n", __func__,
-			   PKTLEN(pktbuf), BDC_HEADER_LEN));
+			   pktbuf->len, BDC_HEADER_LEN));
 		return BCME_ERROR;
 	}
 
-	h = (struct bdc_header *)PKTDATA(pktbuf);
+	h = (struct bdc_header *)(pktbuf->data);
 
 	*ifidx = BDC_GET_IF_IDX(h);
 	if (*ifidx >= DHD_MAX_IFS) {
@@ -395,9 +405,9 @@
 		PKTSETSUMGOOD(pktbuf, true);
 	}
 
-	PKTSETPRIO(pktbuf, (h->priority & BDC_PRIORITY_MASK));
+	pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
 
-	PKTPULL(pktbuf, BDC_HEADER_LEN);
+	skb_pull(pktbuf, BDC_HEADER_LEN);
 #endif				/* BDC */
 
 	return 0;
@@ -467,7 +477,7 @@
 		dhd_os_proto_unblock(dhd);
 		return ret;
 	}
-	memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+	memcpy(dhd->mac.octet, buf, ETH_ALEN);
 
 	dhd_os_proto_unblock(dhd);
 
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_common.c b/drivers/staging/brcm80211/brcmfmac/dhd_common.c
index 703188f..3dbf72e 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_common.c
@@ -16,6 +16,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <bcmdefs.h>
+#include <linux/netdevice.h>
 #include <osl.h>
 #include <bcmutils.h>
 #include <bcmendian.h>
@@ -326,9 +327,10 @@
 	}
 }
 
-bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
+bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, struct sk_buff *pkt,
+		  int prec)
 {
-	void *p;
+	struct sk_buff *p;
 	int eprec = -1;		/* precedence to evict from */
 	bool discard_oldest;
 
@@ -366,7 +368,7 @@
 			ASSERT(p);
 		}
 
-		PKTFREE(dhdp->osh, p, true);
+		pkt_buf_free_skb(dhdp->osh, p, true);
 	}
 
 	/* Enqueue */
@@ -832,7 +834,7 @@
 	u16 flags;
 	int evlen;
 
-	if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
+	if (memcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
 		DHD_ERROR(("%s: mismatched OUI, bailing\n", __func__));
 		return BCME_ERROR;
 	}
@@ -1254,7 +1256,7 @@
 	 */
 	ret = dhd_custom_get_mac_address(ea_addr.octet);
 	if (!ret) {
-		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN,
+		bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETH_ALEN,
 			    buf, sizeof(buf));
 		ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
 		if (ret < 0) {
@@ -1262,7 +1264,7 @@
 				   __func__, ret));
 		} else
 			memcpy(dhd->mac.octet, (void *)&ea_addr,
-			       ETHER_ADDR_LEN);
+			       ETH_ALEN);
 	}
 #endif				/* GET_CUSTOM_MAC_ENABLE */
 
@@ -1532,7 +1534,7 @@
 					break;
 
 				if (!memcmp
-				    (bi->BSSID.octet, addr, ETHER_ADDR_LEN)) {
+				    (bi->BSSID.octet, addr, ETH_ALEN)) {
 					DHD_ISCAN(("%s: Del BSS[%2.2d:%2.2d] "
 					"%X:%X:%X:%X:%X:%X\n",
 					__func__, l, i, bi->BSSID.octet[0],
@@ -1670,7 +1672,7 @@
 	char buf[WLC_IOCTL_SMLEN];
 
 	memset(&params, 0, sizeof(wl_iscan_params_t));
-	memcpy(&params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	memcpy(&params.params.bssid, &ether_bcast, ETH_ALEN);
 
 	params.params.bss_type = DOT11_BSSTYPE_ANY;
 	params.params.scan_type = DOT11_SCANTYPE_ACTIVE;
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c b/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
index f647034..c3f18bb 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
@@ -14,7 +14,7 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include <linuxver.h>
+#include <linux/netdevice.h>
 #include <osl.h>
 #include <bcmutils.h>
 
@@ -24,8 +24,8 @@
 #include <wlioctl.h>
 #include <wl_iw.h>
 
-#define WL_ERROR(x) printf x
-#define WL_TRACE(x)
+#define WL_ERROR(fmt, args...) printk(fmt, ##args)
+#define WL_TRACE(fmt, args...) no_printk(fmt, ##args)
 
 #ifdef CUSTOMER_HW
 extern void bcm_wlan_power_off(int);
@@ -67,13 +67,13 @@
 #endif
 
 	if (dhd_oob_gpio_num < 0) {
-		WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined\n",
-			__func__));
+		WL_ERROR("%s: ERROR customer specific Host GPIO is NOT defined\n",
+			 __func__);
 		return dhd_oob_gpio_num;
 	}
 
-	WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
-		  __func__, dhd_oob_gpio_num));
+	WL_ERROR("%s: customer specific Host GPIO number is (%d)\n",
+		 __func__, dhd_oob_gpio_num);
 
 #if defined CUSTOMER_HW
 	host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
@@ -93,40 +93,40 @@
 {
 	switch (onoff) {
 	case WLAN_RESET_OFF:
-		WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
-			__func__));
+		WL_TRACE("%s: call customer specific GPIO to insert WLAN RESET\n",
+			 __func__);
 #ifdef CUSTOMER_HW
 		bcm_wlan_power_off(2);
 #endif				/* CUSTOMER_HW */
 #ifdef CUSTOMER_HW2
 		wifi_set_power(0, 0);
 #endif
-		WL_ERROR(("=========== WLAN placed in RESET ========\n"));
+		WL_ERROR("=========== WLAN placed in RESET ========\n");
 		break;
 
 	case WLAN_RESET_ON:
-		WL_TRACE(("%s: callc customer specific GPIO to remove WLAN RESET\n",
-			__func__));
+		WL_TRACE("%s: callc customer specific GPIO to remove WLAN RESET\n",
+			 __func__);
 #ifdef CUSTOMER_HW
 		bcm_wlan_power_on(2);
 #endif				/* CUSTOMER_HW */
 #ifdef CUSTOMER_HW2
 		wifi_set_power(1, 0);
 #endif
-		WL_ERROR(("=========== WLAN going back to live  ========\n"));
+		WL_ERROR("=========== WLAN going back to live  ========\n");
 		break;
 
 	case WLAN_POWER_OFF:
-		WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
-			__func__));
+		WL_TRACE("%s: call customer specific GPIO to turn off WL_REG_ON\n",
+			 __func__);
 #ifdef CUSTOMER_HW
 		bcm_wlan_power_off(1);
 #endif				/* CUSTOMER_HW */
 		break;
 
 	case WLAN_POWER_ON:
-		WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
-			__func__));
+		WL_TRACE("%s: call customer specific GPIO to turn on WL_REG_ON\n",
+			 __func__);
 #ifdef CUSTOMER_HW
 		bcm_wlan_power_on(1);
 #endif				/* CUSTOMER_HW */
@@ -140,7 +140,7 @@
 /* Function to get custom MAC address */
 int dhd_custom_get_mac_address(unsigned char *buf)
 {
-	WL_TRACE(("%s Enter\n", __func__));
+	WL_TRACE("%s Enter\n", __func__);
 	if (!buf)
 		return -EINVAL;
 
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index 9335f02..db45083 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -32,7 +32,6 @@
 #include <linux/fs.h>
 #include <linux/uaccess.h>
 #include <bcmdefs.h>
-#include <linuxver.h>
 #include <osl.h>
 #include <bcmutils.h>
 #include <bcmendian.h>
@@ -211,7 +210,7 @@
 	int idx;		/* iface idx in dongle */
 	int state;		/* interface state */
 	uint subunit;		/* subunit */
-	u8 mac_addr[ETHER_ADDR_LEN];	/* assigned MAC address */
+	u8 mac_addr[ETH_ALEN];	/* assigned MAC address */
 	bool attached;		/* Delayed attachment when unset */
 	bool txflowcontrol;	/* Per interface flow control indicator */
 	char name[IFNAMSIZ];	/* linux interface name */
@@ -709,7 +708,7 @@
 
 	/* Send down the multicast list first. */
 
-	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+	buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
 	bufp = buf = kmalloc(buflen, GFP_ATOMIC);
 	if (!bufp) {
 		DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
@@ -727,8 +726,8 @@
 	netdev_for_each_mc_addr(ha, dev) {
 		if (!cnt)
 			break;
-		memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
-		bufp += ETHER_ADDR_LEN;
+		memcpy(bufp, ha->addr, ETH_ALEN);
+		bufp += ETH_ALEN;
 		cnt--;
 	}
 
@@ -812,7 +811,7 @@
 
 	DHD_TRACE(("%s enter\n", __func__));
 	if (!bcm_mkiovar
-	    ("cur_etheraddr", (char *)addr, ETHER_ADDR_LEN, buf, 32)) {
+	    ("cur_etheraddr", (char *)addr, ETH_ALEN, buf, 32)) {
 		DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n",
 			   dhd_ifname(&dhd->pub, ifidx)));
 		return -1;
@@ -828,7 +827,7 @@
 		DHD_ERROR(("%s: set cur_etheraddr failed\n",
 			   dhd_ifname(&dhd->pub, ifidx)));
 	} else {
-		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+		memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETH_ALEN);
 	}
 
 	return ret;
@@ -998,7 +997,7 @@
 		return -1;
 
 	ASSERT(dhd->sysioc_tsk);
-	memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
+	memcpy(&dhd->macvalue, sa->sa_data, ETH_ALEN);
 	dhd->set_macaddress = true;
 	up(&dhd->sysioc_sem);
 
@@ -1019,7 +1018,7 @@
 	up(&dhd->sysioc_sem);
 }
 
-int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf)
 {
 	int ret;
 	dhd_info_t *dhd = (dhd_info_t *) (dhdp->info);
@@ -1029,13 +1028,13 @@
 		return -ENODEV;
 
 	/* Update multicast statistic */
-	if (PKTLEN(pktbuf) >= ETHER_ADDR_LEN) {
-		u8 *pktdata = (u8 *) PKTDATA(pktbuf);
+	if (pktbuf->len >= ETH_ALEN) {
+		u8 *pktdata = (u8 *) (pktbuf->data);
 		struct ether_header *eh = (struct ether_header *)pktdata;
 
-		if (ETHER_ISMULTI(eh->ether_dhost))
+		if (is_multicast_ether_addr(eh->ether_dhost))
 			dhdp->tx_multicast++;
-		if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
+		if (ntoh16(eh->ether_type) == ETH_P_PAE)
 			atomic_inc(&dhd->pend_8021x_cnt);
 	}
 
@@ -1053,6 +1052,32 @@
 	return ret;
 }
 
+static inline void *
+osl_pkt_frmnative(struct osl_info *osh, struct sk_buff *skb)
+{
+	struct sk_buff *nskb;
+
+	for (nskb = skb; nskb; nskb = nskb->next)
+		osh->pktalloced++;
+
+	return (void *)skb;
+}
+#define PKTFRMNATIVE(osh, skb)	\
+	osl_pkt_frmnative((osh), (struct sk_buff *)(skb))
+
+static inline struct sk_buff *
+osl_pkt_tonative(struct osl_info *osh, void *pkt)
+{
+	struct sk_buff *nskb;
+
+	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next)
+		osh->pktalloced--;
+
+	return (struct sk_buff *)pkt;
+}
+#define PKTTONATIVE(osh, pkt)	\
+	osl_pkt_tonative((osh), (pkt))
+
 static int dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
 {
 	int ret;
@@ -1133,13 +1158,15 @@
 		netif_wake_queue(net);
 }
 
-void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt)
+void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf,
+		  int numpkt)
 {
 	dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
 	struct sk_buff *skb;
 	unsigned char *eth;
 	uint len;
-	void *data, *pnext, *save_pktbuf;
+	void *data;
+	struct sk_buff *pnext, *save_pktbuf;
 	int i;
 	dhd_if_t *ifp;
 	wl_event_msg_t event;
@@ -1150,8 +1177,8 @@
 
 	for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
 
-		pnext = PKTNEXT(pktbuf);
-		PKTSETNEXT(pktbuf, NULL);
+		pnext = pktbuf->next;
+		pktbuf->next = NULL;
 
 		skb = PKTTONATIVE(dhdp->osh, pktbuf);
 
@@ -1190,7 +1217,7 @@
 		/* Process special event packets and then discard them */
 		if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM)
 			dhd_wl_host_event(dhd, &ifidx,
-					  skb->mac_header,
+					  skb_mac_header(skb),
 					  &event, &data);
 
 		ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
@@ -1223,7 +1250,7 @@
 	return;
 }
 
-void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
+void dhd_txcomplete(dhd_pub_t *dhdp, struct sk_buff *txp, bool success)
 {
 	uint ifidx;
 	dhd_info_t *dhd = (dhd_info_t *) (dhdp->info);
@@ -1232,10 +1259,10 @@
 
 	dhd_prot_hdrpull(dhdp, &ifidx, txp);
 
-	eh = (struct ether_header *)PKTDATA(txp);
+	eh = (struct ether_header *)(txp->data);
 	type = ntoh16(eh->ether_type);
 
-	if (type == ETHER_TYPE_802_1X)
+	if (type == ETH_P_PAE)
 		atomic_dec(&dhd->pend_8021x_cnt);
 
 }
@@ -1621,6 +1648,51 @@
 	return 0;
 }
 
+static s16 linuxbcmerrormap[] = { 0,	/* 0 */
+	-EINVAL,		/* BCME_ERROR */
+	-EINVAL,		/* BCME_BADARG */
+	-EINVAL,		/* BCME_BADOPTION */
+	-EINVAL,		/* BCME_NOTUP */
+	-EINVAL,		/* BCME_NOTDOWN */
+	-EINVAL,		/* BCME_NOTAP */
+	-EINVAL,		/* BCME_NOTSTA */
+	-EINVAL,		/* BCME_BADKEYIDX */
+	-EINVAL,		/* BCME_RADIOOFF */
+	-EINVAL,		/* BCME_NOTBANDLOCKED */
+	-EINVAL,		/* BCME_NOCLK */
+	-EINVAL,		/* BCME_BADRATESET */
+	-EINVAL,		/* BCME_BADBAND */
+	-E2BIG,			/* BCME_BUFTOOSHORT */
+	-E2BIG,			/* BCME_BUFTOOLONG */
+	-EBUSY,			/* BCME_BUSY */
+	-EINVAL,		/* BCME_NOTASSOCIATED */
+	-EINVAL,		/* BCME_BADSSIDLEN */
+	-EINVAL,		/* BCME_OUTOFRANGECHAN */
+	-EINVAL,		/* BCME_BADCHAN */
+	-EFAULT,		/* BCME_BADADDR */
+	-ENOMEM,		/* BCME_NORESOURCE */
+	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
+	-EMSGSIZE,		/* BCME_BADLENGTH */
+	-EINVAL,		/* BCME_NOTREADY */
+	-EPERM,			/* BCME_NOTPERMITTED */
+	-ENOMEM,		/* BCME_NOMEM */
+	-EINVAL,		/* BCME_ASSOCIATED */
+	-ERANGE,		/* BCME_RANGE */
+	-EINVAL,		/* BCME_NOTFOUND */
+	-EINVAL,		/* BCME_WME_NOT_ENABLED */
+	-EINVAL,		/* BCME_TSPEC_NOTFOUND */
+	-EINVAL,		/* BCME_ACM_NOTSUPPORTED */
+	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
+	-EIO,			/* BCME_SDIO_ERROR */
+	-ENODEV,		/* BCME_DONGLE_DOWN */
+	-EINVAL,		/* BCME_VERSION */
+	-EIO,			/* BCME_TXFAIL */
+	-EIO,			/* BCME_RXFAIL */
+	-EINVAL,		/* BCME_NODEVICE */
+	-EINVAL,		/* BCME_NMODE_DISABLED */
+	-ENODATA,		/* BCME_NONRESIDENT */
+};
+
 static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
 {
 	dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
@@ -1742,7 +1814,12 @@
 	if (buf)
 		kfree(buf);
 
-	return OSL_ERROR(bcmerror);
+	if (bcmerror > 0)
+		bcmerror = 0;
+	else if (bcmerror < BCME_LAST)
+		bcmerror = BCME_ERROR;
+
+	return linuxbcmerrormap[-bcmerror];
 }
 
 static int dhd_stop(struct net_device *net)
@@ -1789,7 +1866,7 @@
 		}
 		atomic_set(&dhd->pend_8021x_cnt, 0);
 
-		memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+		memcpy(net->dev_addr, dhd->pub.mac.octet, ETH_ALEN);
 
 #ifdef TOE
 		/* Get current TOE mode from dongle */
@@ -1814,12 +1891,12 @@
 	return ret;
 }
 
-osl_t *dhd_osl_attach(void *pdev, uint bustype)
+struct osl_info *dhd_osl_attach(void *pdev, uint bustype)
 {
-	return osl_attach(pdev, bustype, true);
+	return osl_attach(pdev, bustype);
 }
 
-void dhd_osl_detach(osl_t *osh)
+void dhd_osl_detach(struct osl_info *osh)
 {
 	osl_detach(osh);
 }
@@ -1845,7 +1922,7 @@
 	dhd->iflist[ifidx] = ifp;
 	strlcpy(ifp->name, name, IFNAMSIZ);
 	if (mac_addr != NULL)
-		memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
+		memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
 
 	if (handle == NULL) {
 		ifp->state = WLC_E_IF_ADD;
@@ -1877,7 +1954,8 @@
 	up(&dhd->sysioc_sem);
 }
 
-dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
+dhd_pub_t *dhd_attach(struct osl_info *osh, struct dhd_bus *bus,
+			uint bus_hdrlen)
 {
 	dhd_info_t *dhd = NULL;
 	struct net_device *net;
@@ -2199,19 +2277,11 @@
 	.ndo_set_multicast_list = dhd_set_multicast_list
 };
 
-static struct net_device_ops dhd_ops_virt = {
-	.ndo_get_stats = dhd_get_stats,
-	.ndo_do_ioctl = dhd_ioctl_entry,
-	.ndo_start_xmit = dhd_start_xmit,
-	.ndo_set_mac_address = dhd_set_mac_address,
-	.ndo_set_multicast_list = dhd_set_multicast_list
-};
-
 int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
 {
 	dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
 	struct net_device *net;
-	u8 temp_addr[ETHER_ADDR_LEN] = {
+	u8 temp_addr[ETH_ALEN] = {
 		0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
 
 	DHD_TRACE(("%s: ifidx %d\n", __func__, ifidx));
@@ -2229,7 +2299,7 @@
 	 */
 	if (ifidx != 0) {
 		/* for virtual interfaces use the primary MAC  */
-		memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+		memcpy(temp_addr, dhd->pub.mac.octet, ETH_ALEN);
 
 	}
 
@@ -2257,7 +2327,7 @@
 
 	dhd->pub.rxsz = net->mtu + net->hard_header_len + dhd->pub.hdrlen;
 
-	memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
+	memcpy(net->dev_addr, temp_addr, ETH_ALEN);
 
 	if (register_netdev(net) != 0) {
 		DHD_ERROR(("%s: couldn't register the net device\n",
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
index bf8df98..c66f1c2 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/sched.h>
-#include <linuxver.h>
 
 int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
 {
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h b/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
index cc42fa4..a5309e2 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
@@ -46,15 +46,16 @@
 /* Stop protocol: sync w/dongle state. */
 extern void dhd_prot_stop(dhd_pub_t *dhdp);
 
-extern bool dhd_proto_fcinfo(dhd_pub_t *dhd, void *pktbuf, u8 *fcbits);
+extern bool dhd_proto_fcinfo(dhd_pub_t *dhd, struct sk_buff *pktbuf,
+			     u8 *fcbits);
 
 /* Add any protocol-specific data header.
  * Caller must reserve prot_hdrlen prepend space.
  */
-extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, struct sk_buff *txp);
 
 /* Remove any protocol-specific data header. */
-extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp);
+extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, struct sk_buff *rxp);
 
 /* Use protocol to issue ioctl to dongle */
 extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc,
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c b/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
index b2281d9..3edce44 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
@@ -16,6 +16,7 @@
 
 #include <linux/types.h>
 #include <bcmdefs.h>
+#include <linux/netdevice.h>
 #include <osl.h>
 #include <bcmsdh.h>
 
@@ -143,7 +144,7 @@
  * bufpool was present for gspi bus.
  */
 #define PKTFREE2()		if ((bus->bus != SPI_BUS) || bus->usebufpool) \
-					PKTFREE(bus->dhd->osh, pkt, false);
+							pkt_buf_free_skb(bus->dhd->osh, pkt, false);
 DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
 extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
 			    uint len);
@@ -202,8 +203,8 @@
 	u8 rx_seq;		/* Receive sequence number (expected) */
 	bool rxskip;		/* Skip receive (awaiting NAK ACK) */
 
-	void *glomd;		/* Packet containing glomming descriptor */
-	void *glom;		/* Packet chain for glommed superframe */
+	struct sk_buff *glomd;	/* Packet containing glomming descriptor */
+	struct sk_buff *glom;	/* Packet chain for glommed superframe */
 	uint glomerr;		/* Glom packet read errors */
 
 	u8 *rxbuf;		/* Buffer for receiving control packets */
@@ -356,16 +357,16 @@
 #if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
 #error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
 #endif	/* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
-#define PKTALIGN(osh, p, len, align)					\
+#define PKTALIGN(_osh, _p, _len, _align)				\
 	do {								\
 		uint datalign;						\
-		datalign = (unsigned long)PKTDATA((p));			\
-		datalign = roundup(datalign, (align)) - datalign;	\
-		ASSERT(datalign < (align));				\
-		ASSERT(PKTLEN((p)) >= ((len) + datalign));	\
+		datalign = (unsigned long)((_p)->data);			\
+		datalign = roundup(datalign, (_align)) - datalign;	\
+		ASSERT(datalign < (_align));				\
+		ASSERT((_p)->len >= ((_len) + datalign));		\
 		if (datalign)						\
-			PKTPULL((p), datalign);			\
-		PKTSETLEN((p), (len));				\
+			skb_pull((_p), datalign);			\
+		__skb_trim((_p), (_len));				\
 	} while (0)
 
 /* Limit on rounding up frames */
@@ -430,27 +431,30 @@
 #endif				/* DHD_DEBUG  */
 static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
 
-static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh);
-static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static void dhdsdio_release(dhd_bus_t *bus, struct osl_info *osh);
+static void dhdsdio_release_malloc(dhd_bus_t *bus, struct osl_info *osh);
 static void dhdsdio_disconnect(void *ptr);
 static bool dhdsdio_chipmatch(u16 chipid);
-static bool dhdsdio_probe_attach(dhd_bus_t *bus, osl_t *osh, void *sdh,
-				 void *regsva, u16 devid);
-static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh);
-static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh);
-static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t * osh);
+static bool dhdsdio_probe_attach(dhd_bus_t *bus, struct osl_info *osh,
+				 void *sdh, void *regsva, u16 devid);
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, struct osl_info *osh,
+				 void *sdh);
+static bool dhdsdio_probe_init(dhd_bus_t *bus, struct osl_info *osh, void *sdh);
+static void dhdsdio_release_dongle(dhd_bus_t *bus, struct osl_info * osh);
 
 static uint process_nvram_vars(char *varbuf, uint len);
 
 static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
 static int dhd_bcmsdh_recv_buf(dhd_bus_t *bus, u32 addr, uint fn,
-			       uint flags, u8 *buf, uint nbytes, void *pkt,
-			       bcmsdh_cmplt_fn_t complete, void *handle);
+			       uint flags, u8 *buf, uint nbytes,
+			       struct sk_buff *pkt, bcmsdh_cmplt_fn_t complete,
+			       void *handle);
 static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, u32 addr, uint fn,
-			       uint flags, u8 *buf, uint nbytes, void *pkt,
-			       bcmsdh_cmplt_fn_t complete, void *handle);
+			       uint flags, u8 *buf, uint nbytes,
+			       struct sk_buff *pkt, bcmsdh_cmplt_fn_t complete,
+			       void *handle);
 
-static bool dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh,
+static bool dhdsdio_download_firmware(struct dhd_bus *bus, struct osl_info *osh,
 				      void *sdh);
 static int _dhdsdio_download_firmware(struct dhd_bus *bus);
 
@@ -900,16 +904,17 @@
 
 /* Writes a HW/SW header into the packet and sends it. */
 /* Assumes: (a) header space already there, (b) caller holds lock */
-static int dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt)
+static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
+			 bool free_pkt)
 {
 	int ret;
-	osl_t *osh;
+	struct osl_info *osh;
 	u8 *frame;
 	u16 len, pad = 0;
 	u32 swheader;
 	uint retries = 0;
 	bcmsdh_info_t *sdh;
-	void *new;
+	struct sk_buff *new;
 	int i;
 
 	DHD_TRACE(("%s: Enter\n", __func__));
@@ -922,46 +927,46 @@
 		goto done;
 	}
 
-	frame = (u8 *) PKTDATA(pkt);
+	frame = (u8 *) (pkt->data);
 
 	/* Add alignment padding, allocate new packet if needed */
 	pad = ((unsigned long)frame % DHD_SDALIGN);
 	if (pad) {
-		if (PKTHEADROOM(pkt) < pad) {
+		if (skb_headroom(pkt) < pad) {
 			DHD_INFO(("%s: insufficient headroom %d for %d pad\n",
-				  __func__, (int)PKTHEADROOM(pkt), pad));
+				  __func__, skb_headroom(pkt), pad));
 			bus->dhd->tx_realloc++;
-			new = PKTGET(osh, (PKTLEN(pkt) + DHD_SDALIGN), true);
+			new = pkt_buf_get_skb(osh, (pkt->len + DHD_SDALIGN));
 			if (!new) {
 				DHD_ERROR(("%s: couldn't allocate new %d-byte "
 					"packet\n",
-					__func__, PKTLEN(pkt) + DHD_SDALIGN));
+					__func__, pkt->len + DHD_SDALIGN));
 				ret = BCME_NOMEM;
 				goto done;
 			}
 
-			PKTALIGN(osh, new, PKTLEN(pkt), DHD_SDALIGN);
-			bcopy(PKTDATA(pkt), PKTDATA(new), PKTLEN(pkt));
+			PKTALIGN(osh, new, pkt->len, DHD_SDALIGN);
+			bcopy(pkt->data, new->data, pkt->len);
 			if (free_pkt)
-				PKTFREE(osh, pkt, true);
+				pkt_buf_free_skb(osh, pkt, true);
 			/* free the pkt if canned one is not used */
 			free_pkt = true;
 			pkt = new;
-			frame = (u8 *) PKTDATA(pkt);
+			frame = (u8 *) (pkt->data);
 			ASSERT(((unsigned long)frame % DHD_SDALIGN) == 0);
 			pad = 0;
 		} else {
-			PKTPUSH(pkt, pad);
-			frame = (u8 *) PKTDATA(pkt);
+			skb_push(pkt, pad);
+			frame = (u8 *) (pkt->data);
 
-			ASSERT((pad + SDPCM_HDRLEN) <= (int)PKTLEN(pkt));
-			bzero(frame, pad + SDPCM_HDRLEN);
+			ASSERT((pad + SDPCM_HDRLEN) <= (int)(pkt->len));
+			memset(frame, 0, pad + SDPCM_HDRLEN);
 		}
 	}
 	ASSERT(pad < DHD_SDALIGN);
 
 	/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
-	len = (u16) PKTLEN(pkt);
+	len = (u16) (pkt->len);
 	*(u16 *) frame = htol16(len);
 	*(((u16 *) frame) + 1) = htol16(~len);
 
@@ -974,7 +979,7 @@
 	htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
 
 #ifdef DHD_DEBUG
-	tx_packets[PKTPRIO(pkt)]++;
+	tx_packets[pkt->priority]++;
 	if (DHD_BYTES_ON() &&
 	    (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
 	      (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
@@ -989,7 +994,7 @@
 		u16 pad = bus->blocksize - (len % bus->blocksize);
 		if ((pad <= bus->roundup) && (pad < bus->blocksize))
 #ifdef NOTUSED
-			if (pad <= PKTTAILROOM(pkt))
+			if (pad <= skb_tailroom(pkt))
 #endif				/* NOTUSED */
 				len += pad;
 	} else if (len % DHD_SDALIGN) {
@@ -999,7 +1004,7 @@
 	/* Some controllers have trouble with odd bytes -- round to even */
 	if (forcealign && (len & (ALIGNMENT - 1))) {
 #ifdef NOTUSED
-		if (PKTTAILROOM(pkt))
+		if (skb_tailroom(pkt))
 #endif
 			len = roundup(len, ALIGNMENT);
 #ifdef NOTUSED
@@ -1050,34 +1055,34 @@
 
 done:
 	/* restore pkt buffer pointer before calling tx complete routine */
-	PKTPULL(pkt, SDPCM_HDRLEN + pad);
+	skb_pull(pkt, SDPCM_HDRLEN + pad);
 	dhd_os_sdunlock(bus->dhd);
 	dhd_txcomplete(bus->dhd, pkt, ret != 0);
 	dhd_os_sdlock(bus->dhd);
 
 	if (free_pkt)
-		PKTFREE(osh, pkt, true);
+		pkt_buf_free_skb(osh, pkt, true);
 
 	return ret;
 }
 
-int dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+int dhd_bus_txdata(struct dhd_bus *bus, struct sk_buff *pkt)
 {
 	int ret = BCME_ERROR;
-	osl_t *osh;
+	struct osl_info *osh;
 	uint datalen, prec;
 
 	DHD_TRACE(("%s: Enter\n", __func__));
 
 	osh = bus->dhd->osh;
-	datalen = PKTLEN(pkt);
+	datalen = pkt->len;
 
 #ifdef SDTEST
 	/* Push the test header if doing loopback */
 	if (bus->ext_loop) {
 		u8 *data;
-		PKTPUSH(pkt, SDPCM_TEST_HDRLEN);
-		data = PKTDATA(pkt);
+		skb_push(pkt, SDPCM_TEST_HDRLEN);
+		data = pkt->data;
 		*data++ = SDPCM_TEST_ECHOREQ;
 		*data++ = (u8) bus->loopid++;
 		*data++ = (datalen >> 0);
@@ -1087,10 +1092,10 @@
 #endif				/* SDTEST */
 
 	/* Add space for the header */
-	PKTPUSH(pkt, SDPCM_HDRLEN);
-	ASSERT(IS_ALIGNED((unsigned long)PKTDATA(pkt), 2));
+	skb_push(pkt, SDPCM_HDRLEN);
+	ASSERT(IS_ALIGNED((unsigned long)(pkt->data), 2));
 
-	prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
+	prec = PRIO2PREC((pkt->priority & PRIOMASK));
 
 	/* Check for existing queue, current flow-control,
 			 pending event, or pending clock */
@@ -1105,9 +1110,9 @@
 		/* Priority based enq */
 		dhd_os_sdlock_txq(bus->dhd);
 		if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == false) {
-			PKTPULL(pkt, SDPCM_HDRLEN);
+			skb_pull(pkt, SDPCM_HDRLEN);
 			dhd_txcomplete(bus->dhd, pkt, false);
-			PKTFREE(osh, pkt, true);
+			pkt_buf_free_skb(osh, pkt, true);
 			DHD_ERROR(("%s: out of bus->txq !!!\n", __func__));
 			ret = BCME_NORESOURCE;
 		} else {
@@ -1162,7 +1167,7 @@
 
 static uint dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
 {
-	void *pkt;
+	struct sk_buff *pkt;
 	u32 intstatus = 0;
 	uint retries = 0;
 	int ret = 0, prec_out;
@@ -1186,7 +1191,7 @@
 			break;
 		}
 		dhd_os_sdunlock_txq(bus->dhd);
-		datalen = PKTLEN(pkt) - SDPCM_HDRLEN;
+		datalen = pkt->len - SDPCM_HDRLEN;
 
 #ifndef SDTEST
 		ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
@@ -1247,7 +1252,7 @@
 			frame -= doff;
 			len += doff;
 			msglen += doff;
-			bzero(frame, doff + SDPCM_HDRLEN);
+			memset(frame, 0, doff + SDPCM_HDRLEN);
 		}
 		ASSERT(doff < DHD_SDALIGN);
 	}
@@ -2531,7 +2536,7 @@
 		if (!vbuffer)
 			return BCME_NOMEM;
 
-		bzero(vbuffer, varsize);
+		memset(vbuffer, 0, varsize);
 		bcopy(bus->vars, vbuffer, bus->varsz);
 
 		/* Write the vars list */
@@ -2823,7 +2828,7 @@
 
 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
 {
-	osl_t *osh = bus->dhd->osh;
+	struct osl_info *osh = bus->dhd->osh;
 	u32 local_hostintmask;
 	u8 saveclk;
 	uint retries;
@@ -2877,10 +2882,10 @@
 
 	/* Clear any held glomming stuff */
 	if (bus->glomd)
-		PKTFREE(osh, bus->glomd, false);
+		pkt_buf_free_skb(osh, bus->glomd, false);
 
 	if (bus->glom)
-		PKTFREE(osh, bus->glom, false);
+		pkt_buf_free_skb(osh, bus->glom, false);
 
 	bus->glom = bus->glomd = NULL;
 
@@ -3178,8 +3183,8 @@
 	u8 *dptr, num = 0;
 
 	u16 sublen, check;
-	void *pfirst, *plast, *pnext, *save_pfirst;
-	osl_t *osh = bus->dhd->osh;
+	struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+	struct osl_info *osh = bus->dhd->osh;
 
 	int errcode;
 	u8 chan, seq, doff, sfdoff;
@@ -3199,8 +3204,8 @@
 		dhd_os_sdlock_rxq(bus->dhd);
 
 		pfirst = plast = pnext = NULL;
-		dlen = (u16) PKTLEN(bus->glomd);
-		dptr = PKTDATA(bus->glomd);
+		dlen = (u16) (bus->glomd->len);
+		dptr = bus->glomd->data;
 		if (!dlen || (dlen & 1)) {
 			DHD_ERROR(("%s: bad glomd len(%d), ignore descriptor\n",
 			__func__, dlen));
@@ -3235,19 +3240,19 @@
 			}
 
 			/* Allocate/chain packet for next subframe */
-			pnext = PKTGET(osh, sublen + DHD_SDALIGN, false);
+			pnext = pkt_buf_get_skb(osh, sublen + DHD_SDALIGN);
 			if (pnext == NULL) {
-				DHD_ERROR(("%s: PKTGET failed, num %d len %d\n",
+				DHD_ERROR(("%s: pkt_buf_get_skb failed, num %d len %d\n",
 					   __func__, num, sublen));
 				break;
 			}
-			ASSERT(!PKTLINK(pnext));
+			ASSERT(!(pnext->prev));
 			if (!pfirst) {
 				ASSERT(!plast);
 				pfirst = plast = pnext;
 			} else {
 				ASSERT(plast);
-				PKTSETNEXT(plast, pnext);
+				plast->next = pnext;
 				plast = pnext;
 			}
 
@@ -3271,13 +3276,13 @@
 			pfirst = pnext = NULL;
 		} else {
 			if (pfirst)
-				PKTFREE(osh, pfirst, false);
+				pkt_buf_free_skb(osh, pfirst, false);
 			bus->glom = NULL;
 			num = 0;
 		}
 
 		/* Done with descriptor packet */
-		PKTFREE(osh, bus->glomd, false);
+		pkt_buf_free_skb(osh, bus->glomd, false);
 		bus->glomd = NULL;
 		bus->nextlen = 0;
 
@@ -3290,10 +3295,10 @@
 		if (DHD_GLOM_ON()) {
 			DHD_GLOM(("%s: try superframe read, packet chain:\n",
 				__func__));
-			for (pnext = bus->glom; pnext; pnext = PKTNEXT(pnext)) {
+			for (pnext = bus->glom; pnext; pnext = pnext->next) {
 				DHD_GLOM(("    %p: %p len 0x%04x (%d)\n",
-					  pnext, (u8 *) PKTDATA(pnext),
-					  PKTLEN(pnext), PKTLEN(pnext)));
+					  pnext, (u8 *) (pnext->data),
+					  pnext->len, pnext->len));
 			}
 		}
 
@@ -3309,7 +3314,7 @@
 						      bcmsdh_cur_sbwad
 						      (bus->sdh), SDIO_FUNC_2,
 						      F2SYNC,
-						      (u8 *) PKTDATA(pfirst),
+						      (u8 *) pfirst->data,
 						      dlen, pfirst, NULL, NULL);
 		} else if (bus->dataptr) {
 			errcode = dhd_bcmsdh_recv_buf(bus,
@@ -3346,7 +3351,7 @@
 				bus->glomerr = 0;
 				dhdsdio_rxfail(bus, true, false);
 				dhd_os_sdlock_rxq(bus->dhd);
-				PKTFREE(osh, bus->glom, false);
+				pkt_buf_free_skb(osh, bus->glom, false);
 				dhd_os_sdunlock_rxq(bus->dhd);
 				bus->rxglomfail++;
 				bus->glom = NULL;
@@ -3355,13 +3360,13 @@
 		}
 #ifdef DHD_DEBUG
 		if (DHD_GLOM_ON()) {
-			prhex("SUPERFRAME", PKTDATA(pfirst),
-			      min_t(int, PKTLEN(pfirst), 48));
+			prhex("SUPERFRAME", pfirst->data,
+			      min_t(int, pfirst->len, 48));
 		}
 #endif
 
 		/* Validate the superframe header */
-		dptr = (u8 *) PKTDATA(pfirst);
+		dptr = (u8 *) (pfirst->data);
 		sublen = ltoh16_ua(dptr);
 		check = ltoh16_ua(dptr + sizeof(u16));
 
@@ -3399,11 +3404,11 @@
 				   __func__));
 			errcode = -1;
 		} else if ((doff < SDPCM_HDRLEN) ||
-			   (doff > (PKTLEN(pfirst) - SDPCM_HDRLEN))) {
+			   (doff > (pfirst->len - SDPCM_HDRLEN))) {
 			DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d "
 				"pkt %d min %d\n",
 				__func__, doff, sublen,
-				PKTLEN(pfirst), SDPCM_HDRLEN));
+				pfirst->len, SDPCM_HDRLEN));
 			errcode = -1;
 		}
 
@@ -3424,14 +3429,14 @@
 		bus->tx_max = txmax;
 
 		/* Remove superframe header, remember offset */
-		PKTPULL(pfirst, doff);
+		skb_pull(pfirst, doff);
 		sfdoff = doff;
 
 		/* Validate all the subframe headers */
 		for (num = 0, pnext = pfirst; pnext && !errcode;
-		     num++, pnext = PKTNEXT(pnext)) {
-			dptr = (u8 *) PKTDATA(pnext);
-			dlen = (u16) PKTLEN(pnext);
+		     num++, pnext = pnext->next) {
+			dptr = (u8 *) (pnext->data);
+			dlen = (u16) (pnext->len);
 			sublen = ltoh16_ua(dptr);
 			check = ltoh16_ua(dptr + sizeof(u16));
 			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -3469,13 +3474,13 @@
 				 a couple retries */
 			if (bus->glomerr++ < 3) {
 				/* Restore superframe header space */
-				PKTPUSH(pfirst, sfdoff);
+				skb_push(pfirst, sfdoff);
 				dhdsdio_rxfail(bus, true, true);
 			} else {
 				bus->glomerr = 0;
 				dhdsdio_rxfail(bus, true, false);
 				dhd_os_sdlock_rxq(bus->dhd);
-				PKTFREE(osh, bus->glom, false);
+				pkt_buf_free_skb(osh, bus->glom, false);
 				dhd_os_sdunlock_rxq(bus->dhd);
 				bus->rxglomfail++;
 				bus->glom = NULL;
@@ -3491,10 +3496,10 @@
 
 		dhd_os_sdlock_rxq(bus->dhd);
 		for (num = 0; pfirst; rxseq++, pfirst = pnext) {
-			pnext = PKTNEXT(pfirst);
-			PKTSETNEXT(pfirst, NULL);
+			pnext = pfirst->next;
+			pfirst->next = NULL;
 
-			dptr = (u8 *) PKTDATA(pfirst);
+			dptr = (u8 *) (pfirst->data);
 			sublen = ltoh16_ua(dptr);
 			chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
 			seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -3502,8 +3507,8 @@
 
 			DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d "
 				"chan %d seq %d\n",
-				__func__, num, pfirst, PKTDATA(pfirst),
-				PKTLEN(pfirst), sublen, chan, seq));
+				__func__, num, pfirst, pfirst->data,
+				pfirst->len, sublen, chan, seq));
 
 			ASSERT((chan == SDPCM_DATA_CHANNEL)
 			       || (chan == SDPCM_EVENT_CHANNEL));
@@ -3519,13 +3524,13 @@
 				prhex("Rx Subframe Data", dptr, dlen);
 #endif
 
-			PKTSETLEN(pfirst, sublen);
-			PKTPULL(pfirst, doff);
+			__skb_trim(pfirst, sublen);
+			skb_pull(pfirst, doff);
 
-			if (PKTLEN(pfirst) == 0) {
-				PKTFREE(bus->dhd->osh, pfirst, false);
+			if (pfirst->len == 0) {
+				pkt_buf_free_skb(bus->dhd->osh, pfirst, false);
 				if (plast) {
-					PKTSETNEXT(plast, pnext);
+					plast->next = pnext;
 				} else {
 					ASSERT(save_pfirst == pfirst);
 					save_pfirst = pnext;
@@ -3536,9 +3541,9 @@
 				DHD_ERROR(("%s: rx protocol error\n",
 					   __func__));
 				bus->dhd->rx_errors++;
-				PKTFREE(osh, pfirst, false);
+				pkt_buf_free_skb(osh, pfirst, false);
 				if (plast) {
-					PKTSETNEXT(plast, pnext);
+					plast->next = pnext;
 				} else {
 					ASSERT(save_pfirst == pfirst);
 					save_pfirst = pnext;
@@ -3548,7 +3553,7 @@
 
 			/* this packet will go up, link back into
 				 chain and count it */
-			PKTSETNEXT(pfirst, pnext);
+			pfirst->next = pnext;
 			plast = pfirst;
 			num++;
 
@@ -3556,11 +3561,11 @@
 			if (DHD_GLOM_ON()) {
 				DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) "
 				"nxt/lnk %p/%p\n",
-				__func__, num, pfirst, PKTDATA(pfirst),
-				PKTLEN(pfirst), PKTNEXT(pfirst),
-				PKTLINK(pfirst)));
-				prhex("", (u8 *) PKTDATA(pfirst),
-				      min_t(int, PKTLEN(pfirst), 32));
+				__func__, num, pfirst, pfirst->data,
+				pfirst->len, pfirst->next,
+				pfirst->prev));
+				prhex("", (u8 *) pfirst->data,
+				      min_t(int, pfirst->len, 32));
 			}
 #endif				/* DHD_DEBUG */
 		}
@@ -3580,7 +3585,7 @@
 /* Return true if there may be more frames to read */
 static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
 {
-	osl_t *osh = bus->dhd->osh;
+	struct osl_info *osh = bus->dhd->osh;
 	bcmsdh_info_t *sdh = bus->sdh;
 
 	u16 len, check;	/* Extracted hardware header fields */
@@ -3588,7 +3593,7 @@
 	u8 fcbits;		/* Extracted fcbits from software header */
 	u8 delta;
 
-	void *pkt;		/* Packet for event or data frames */
+	struct sk_buff *pkt;		/* Packet for event or data frames */
 	u16 pad;		/* Number of pad bytes to read */
 	u16 rdlen;		/* Total number of bytes to read */
 	u8 rxseq;		/* Next sequence number to expect */
@@ -3675,7 +3680,7 @@
 			 */
 			/* Allocate a packet buffer */
 			dhd_os_sdlock_rxq(bus->dhd);
-			pkt = PKTGET(osh, rdlen + DHD_SDALIGN, false);
+			pkt = pkt_buf_get_skb(osh, rdlen + DHD_SDALIGN);
 			if (!pkt) {
 				if (bus->bus == SPI_BUS) {
 					bus->usebufpool = false;
@@ -3721,7 +3726,7 @@
 				} else {
 					/* Give up on data,
 					request rtx of events */
-					DHD_ERROR(("%s (nextlen): PKTGET failed: len %d rdlen %d " "expected rxseq %d\n",
+					DHD_ERROR(("%s (nextlen): pkt_buf_get_skb failed: len %d rdlen %d " "expected rxseq %d\n",
 						__func__, len, rdlen, rxseq));
 					/* Just go try again w/normal
 					header read */
@@ -3732,9 +3737,9 @@
 				if (bus->bus == SPI_BUS)
 					bus->usebufpool = true;
 
-				ASSERT(!PKTLINK(pkt));
+				ASSERT(!(pkt->prev));
 				PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
-				rxbuf = (u8 *) PKTDATA(pkt);
+				rxbuf = (u8 *) (pkt->data);
 				/* Read the entire frame */
 				sdret =
 				    dhd_bcmsdh_recv_buf(bus,
@@ -3748,7 +3753,7 @@
 				if (sdret < 0) {
 					DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
 						__func__, rdlen, sdret));
-					PKTFREE(bus->dhd->osh, pkt, false);
+					pkt_buf_free_skb(bus->dhd->osh, pkt, false);
 					bus->dhd->rx_errors++;
 					dhd_os_sdunlock_rxq(bus->dhd);
 					/* Force retry w/normal header read.
@@ -3896,7 +3901,7 @@
 							     doff);
 					if (bus->usebufpool) {
 						dhd_os_sdlock_rxq(bus->dhd);
-						PKTFREE(bus->dhd->osh, pkt,
+						pkt_buf_free_skb(bus->dhd->osh, pkt,
 							false);
 						dhd_os_sdunlock_rxq(bus->dhd);
 					}
@@ -4086,10 +4091,10 @@
 		}
 
 		dhd_os_sdlock_rxq(bus->dhd);
-		pkt = PKTGET(osh, (rdlen + firstread + DHD_SDALIGN), false);
+		pkt = pkt_buf_get_skb(osh, (rdlen + firstread + DHD_SDALIGN));
 		if (!pkt) {
 			/* Give up on data, request rtx of events */
-			DHD_ERROR(("%s: PKTGET failed: rdlen %d chan %d\n",
+			DHD_ERROR(("%s: pkt_buf_get_skb failed: rdlen %d chan %d\n",
 				   __func__, rdlen, chan));
 			bus->dhd->rx_dropped++;
 			dhd_os_sdunlock_rxq(bus->dhd);
@@ -4098,17 +4103,17 @@
 		}
 		dhd_os_sdunlock_rxq(bus->dhd);
 
-		ASSERT(!PKTLINK(pkt));
+		ASSERT(!(pkt->prev));
 
 		/* Leave room for what we already read, and align remainder */
-		ASSERT(firstread < (PKTLEN(pkt)));
-		PKTPULL(pkt, firstread);
+		ASSERT(firstread < pkt->len);
+		skb_pull(pkt, firstread);
 		PKTALIGN(osh, pkt, rdlen, DHD_SDALIGN);
 
 		/* Read the remaining frame data */
 		sdret =
 		    dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2,
-					F2SYNC, ((u8 *) PKTDATA(pkt)), rdlen,
+					F2SYNC, ((u8 *) (pkt->data)), rdlen,
 					pkt, NULL, NULL);
 		bus->f2rxdata++;
 		ASSERT(sdret != BCME_PENDING);
@@ -4122,7 +4127,7 @@
 				       ? "data" : "test")),
 				   sdret));
 			dhd_os_sdlock_rxq(bus->dhd);
-			PKTFREE(bus->dhd->osh, pkt, false);
+			pkt_buf_free_skb(bus->dhd->osh, pkt, false);
 			dhd_os_sdunlock_rxq(bus->dhd);
 			bus->dhd->rx_errors++;
 			dhdsdio_rxfail(bus, true, RETRYCHAN(chan));
@@ -4130,12 +4135,12 @@
 		}
 
 		/* Copy the already-read portion */
-		PKTPUSH(pkt, firstread);
-		bcopy(bus->rxhdr, PKTDATA(pkt), firstread);
+		skb_push(pkt, firstread);
+		bcopy(bus->rxhdr, pkt->data, firstread);
 
 #ifdef DHD_DEBUG
 		if (DHD_BYTES_ON() && DHD_DATA_ON())
-			prhex("Rx Data", PKTDATA(pkt), len);
+			prhex("Rx Data", pkt->data, len);
 #endif
 
 deliver:
@@ -4146,12 +4151,12 @@
 					__func__, len));
 #ifdef DHD_DEBUG
 				if (DHD_GLOM_ON()) {
-					prhex("Glom Data", PKTDATA(pkt), len);
+					prhex("Glom Data", pkt->data, len);
 				}
 #endif
-				PKTSETLEN(pkt, len);
+				__skb_trim(pkt, len);
 				ASSERT(doff == SDPCM_HDRLEN);
-				PKTPULL(pkt, SDPCM_HDRLEN);
+				skb_pull(pkt, SDPCM_HDRLEN);
 				bus->glomd = pkt;
 			} else {
 				DHD_ERROR(("%s: glom superframe w/o "
@@ -4162,8 +4167,8 @@
 		}
 
 		/* Fill in packet len and prio, deliver upward */
-		PKTSETLEN(pkt, len);
-		PKTPULL(pkt, doff);
+		__skb_trim(pkt, len);
+		skb_pull(pkt, doff);
 
 #ifdef SDTEST
 		/* Test channel packets are processed separately */
@@ -4173,15 +4178,15 @@
 		}
 #endif				/* SDTEST */
 
-		if (PKTLEN(pkt) == 0) {
+		if (pkt->len == 0) {
 			dhd_os_sdlock_rxq(bus->dhd);
-			PKTFREE(bus->dhd->osh, pkt, false);
+			pkt_buf_free_skb(bus->dhd->osh, pkt, false);
 			dhd_os_sdunlock_rxq(bus->dhd);
 			continue;
 		} else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) {
 			DHD_ERROR(("%s: rx protocol error\n", __func__));
 			dhd_os_sdlock_rxq(bus->dhd);
-			PKTFREE(bus->dhd->osh, pkt, false);
+			pkt_buf_free_skb(bus->dhd->osh, pkt, false);
 			dhd_os_sdunlock_rxq(bus->dhd);
 			bus->dhd->rx_errors++;
 			continue;
@@ -4626,11 +4631,11 @@
 
 static void dhdsdio_pktgen(dhd_bus_t *bus)
 {
-	void *pkt;
+	struct sk_buff *pkt;
 	u8 *data;
 	uint pktcount;
 	uint fillbyte;
-	osl_t *osh = bus->dhd->osh;
+	struct osl_info *osh = bus->dhd->osh;
 	u16 len;
 
 	/* Display current count if appropriate */
@@ -4658,16 +4663,16 @@
 
 		/* Allocate an appropriate-sized packet */
 		len = bus->pktgen_len;
-		pkt = PKTGET(osh,
+		pkt = pkt_buf_get_skb(osh,
 			(len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
 			true);
 		if (!pkt) {
-			DHD_ERROR(("%s: PKTGET failed!\n", __func__));
+			DHD_ERROR(("%s: pkt_buf_get_skb failed!\n", __func__));
 			break;
 		}
 		PKTALIGN(osh, pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN),
 			 DHD_SDALIGN);
-		data = (u8 *) PKTDATA(pkt) + SDPCM_HDRLEN;
+		data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
 
 		/* Write test header cmd and extra based on mode */
 		switch (bus->pktgen_mode) {
@@ -4689,7 +4694,7 @@
 		default:
 			DHD_ERROR(("Unrecognized pktgen mode %d\n",
 				   bus->pktgen_mode));
-			PKTFREE(osh, pkt, true);
+			pkt_buf_free_skb(osh, pkt, true);
 			bus->pktgen_count = 0;
 			return;
 		}
@@ -4706,9 +4711,9 @@
 
 #ifdef DHD_DEBUG
 		if (DHD_BYTES_ON() && DHD_DATA_ON()) {
-			data = (u8 *) PKTDATA(pkt) + SDPCM_HDRLEN;
+			data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
 			prhex("dhdsdio_pktgen: Tx Data", data,
-			      PKTLEN(pkt) - SDPCM_HDRLEN);
+			      pkt->len - SDPCM_HDRLEN);
 		}
 #endif
 
@@ -4733,19 +4738,19 @@
 
 static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start)
 {
-	void *pkt;
+	struct sk_buff *pkt;
 	u8 *data;
-	osl_t *osh = bus->dhd->osh;
+	struct osl_info *osh = bus->dhd->osh;
 
 	/* Allocate the packet */
-	pkt = PKTGET(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN,
+	pkt = pkt_buf_get_skb(osh, SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN,
 			true);
 	if (!pkt) {
-		DHD_ERROR(("%s: PKTGET failed!\n", __func__));
+		DHD_ERROR(("%s: pkt_buf_get_skb failed!\n", __func__));
 		return;
 	}
 	PKTALIGN(osh, pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
-	data = (u8 *) PKTDATA(pkt) + SDPCM_HDRLEN;
+	data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
 
 	/* Fill in the test header */
 	*data++ = SDPCM_TEST_SEND;
@@ -4758,9 +4763,9 @@
 		bus->pktgen_fail++;
 }
 
-static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
+static void dhdsdio_testrcv(dhd_bus_t *bus, struct sk_buff *pkt, uint seq)
 {
-	osl_t *osh = bus->dhd->osh;
+	struct osl_info *osh = bus->dhd->osh;
 	u8 *data;
 	uint pktlen;
 
@@ -4770,16 +4775,16 @@
 	u16 offset;
 
 	/* Check for min length */
-	pktlen = PKTLEN(pkt);
+	pktlen = pkt->len;
 	if (pktlen < SDPCM_TEST_HDRLEN) {
 		DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n",
 			   pktlen));
-		PKTFREE(osh, pkt, false);
+		pkt_buf_free_skb(osh, pkt, false);
 		return;
 	}
 
 	/* Extract header fields */
-	data = PKTDATA(pkt);
+	data = pkt->data;
 	cmd = *data++;
 	extra = *data++;
 	len = *data++;
@@ -4792,7 +4797,7 @@
 			DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, "
 				"pktlen %d seq %d" " cmd %d extra %d len %d\n",
 				pktlen, seq, cmd, extra, len));
-			PKTFREE(osh, pkt, false);
+			pkt_buf_free_skb(osh, pkt, false);
 			return;
 		}
 	}
@@ -4802,19 +4807,19 @@
 	case SDPCM_TEST_ECHOREQ:
 		/* Rx->Tx turnaround ok (even on NDIS w/current
 			 implementation) */
-		*(u8 *) (PKTDATA(pkt)) = SDPCM_TEST_ECHORSP;
+		*(u8 *) (pkt->data) = SDPCM_TEST_ECHORSP;
 		if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true) == 0) {
 			bus->pktgen_sent++;
 		} else {
 			bus->pktgen_fail++;
-			PKTFREE(osh, pkt, false);
+			pkt_buf_free_skb(osh, pkt, false);
 		}
 		bus->pktgen_rcvd++;
 		break;
 
 	case SDPCM_TEST_ECHORSP:
 		if (bus->ext_loop) {
-			PKTFREE(osh, pkt, false);
+			pkt_buf_free_skb(osh, pkt, false);
 			bus->pktgen_rcvd++;
 			break;
 		}
@@ -4827,12 +4832,12 @@
 				break;
 			}
 		}
-		PKTFREE(osh, pkt, false);
+		pkt_buf_free_skb(osh, pkt, false);
 		bus->pktgen_rcvd++;
 		break;
 
 	case SDPCM_TEST_DISCARD:
-		PKTFREE(osh, pkt, false);
+		pkt_buf_free_skb(osh, pkt, false);
 		bus->pktgen_rcvd++;
 		break;
 
@@ -4842,7 +4847,7 @@
 		DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, "
 			"pktlen %d seq %d" " cmd %d extra %d len %d\n",
 			pktlen, seq, cmd, extra, len));
-		PKTFREE(osh, pkt, false);
+		pkt_buf_free_skb(osh, pkt, false);
 		break;
 	}
 
@@ -4960,7 +4965,7 @@
 	dhd_bus_t *bus = dhdp->bus;
 	u32 addr, val;
 	int rv;
-	void *pkt;
+	struct sk_buff *pkt;
 
 	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
 	if (bus->console_addr == 0)
@@ -5003,7 +5008,7 @@
 	/* Bump dongle by sending an empty event pkt.
 	 * sdpcm_sendup (RX) checks for virtual console input.
 	 */
-	pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, true);
+	pkt = pkt_buf_get_skb(bus->dhd->osh, 4 + SDPCM_RESERVE);
 	if ((pkt != NULL) && bus->clkstate == CLK_AVAIL)
 		dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, true);
 
@@ -5061,7 +5066,7 @@
 
 static void *dhdsdio_probe(u16 venid, u16 devid, u16 bus_no,
 			   u16 slot, u16 func, uint bustype, void *regsva,
-			   osl_t *osh, void *sdh)
+			   struct osl_info *osh, void *sdh)
 {
 	int ret;
 	dhd_bus_t *bus;
@@ -5220,8 +5225,8 @@
 }
 
 static bool
-dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
-		     u16 devid)
+dhdsdio_probe_attach(struct dhd_bus *bus, struct osl_info *osh, void *sdh,
+			void *regsva, u16 devid)
 {
 	u8 clkctl = 0;
 	int err = 0;
@@ -5280,7 +5285,7 @@
 					"failed\n", fn));
 				break;
 			}
-			bzero(cis[fn], SBSDIO_CIS_SIZE_LIMIT);
+			memset(cis[fn], 0, SBSDIO_CIS_SIZE_LIMIT);
 
 			err = bcmsdh_cis_read(sdh, fn, cis[fn],
 						SBSDIO_CIS_SIZE_LIMIT);
@@ -5378,7 +5383,8 @@
 	return false;
 }
 
-static bool dhdsdio_probe_malloc(dhd_bus_t *bus, osl_t *osh, void *sdh)
+static bool dhdsdio_probe_malloc(dhd_bus_t *bus, struct osl_info *osh,
+				 void *sdh)
 {
 	DHD_TRACE(("%s: Enter\n", __func__));
 
@@ -5419,7 +5425,7 @@
 	return false;
 }
 
-static bool dhdsdio_probe_init(dhd_bus_t *bus, osl_t *osh, void *sdh)
+static bool dhdsdio_probe_init(dhd_bus_t *bus, struct osl_info *osh, void *sdh)
 {
 	s32 fnum;
 
@@ -5496,7 +5502,7 @@
 }
 
 bool
-dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+dhd_bus_download_firmware(struct dhd_bus *bus, struct osl_info *osh,
 			  char *fw_path, char *nv_path)
 {
 	bool ret;
@@ -5509,7 +5515,7 @@
 }
 
 static bool
-dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
+dhdsdio_download_firmware(struct dhd_bus *bus, struct osl_info *osh, void *sdh)
 {
 	bool ret;
 
@@ -5524,7 +5530,7 @@
 }
 
 /* Detach and free everything */
-static void dhdsdio_release(dhd_bus_t *bus, osl_t *osh)
+static void dhdsdio_release(dhd_bus_t *bus, struct osl_info *osh)
 {
 	DHD_TRACE(("%s: Enter\n", __func__));
 
@@ -5554,7 +5560,7 @@
 	DHD_TRACE(("%s: Disconnected\n", __func__));
 }
 
-static void dhdsdio_release_malloc(dhd_bus_t *bus, osl_t *osh)
+static void dhdsdio_release_malloc(dhd_bus_t *bus, struct osl_info *osh)
 {
 	DHD_TRACE(("%s: Enter\n", __func__));
 
@@ -5573,7 +5579,7 @@
 	}
 }
 
-static void dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh)
+static void dhdsdio_release_dongle(dhd_bus_t *bus, struct osl_info *osh)
 {
 	DHD_TRACE(("%s: Enter\n", __func__));
 
@@ -5986,7 +5992,7 @@
 
 static int
 dhd_bcmsdh_recv_buf(dhd_bus_t *bus, u32 addr, uint fn, uint flags,
-		    u8 *buf, uint nbytes, void *pkt,
+		    u8 *buf, uint nbytes, struct sk_buff *pkt,
 		    bcmsdh_cmplt_fn_t complete, void *handle)
 {
 	int status;
@@ -6000,7 +6006,7 @@
 
 static int
 dhd_bcmsdh_send_buf(dhd_bus_t *bus, u32 addr, uint fn, uint flags,
-		    u8 *buf, uint nbytes, void *pkt,
+		    u8 *buf, uint nbytes, struct sk_buff *pkt,
 		    bcmsdh_cmplt_fn_t complete, void *handle)
 {
 	return bcmsdh_send_buf
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index ea08252..991463f 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -16,7 +16,6 @@
 
 #include <linux/kernel.h>
 #include <linux/if_arp.h>
-#include <linuxver.h>
 #include <osl.h>
 
 #include <bcmutils.h>
@@ -30,10 +29,6 @@
 #include <dhdioctl.h>
 #include <wlioctl.h>
 
-#include <proto/ethernet.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-
 #include <linux/kthread.h>
 #include <linux/netdevice.h>
 #include <linux/sched.h>
@@ -342,7 +337,7 @@
 	struct wl_iface *ci;						\
 	if (unlikely(!(wl_cfg80211_dev && 				\
 		(ci = wl_get_drvdata(wl_cfg80211_dev))))) {		\
-		WL_ERR(("wl_cfg80211_dev is unavailable\n"));		\
+		WL_ERR("wl_cfg80211_dev is unavailable\n");		\
 		BUG();							\
 	} 								\
 	ci_to_wl(ci);							\
@@ -352,8 +347,8 @@
 do {									\
 	struct wl_priv *wl = wiphy_to_wl(wiphy);			\
 	if (unlikely(!test_bit(WL_STATUS_READY, &wl->status))) {	\
-		WL_INFO(("device is not ready : status (%d)\n",		\
-			(int)wl->status));				\
+		WL_INFO("device is not ready : status (%d)\n",		\
+			(int)wl->status);				\
 		return -EIO;						\
 	}								\
 } while (0)
@@ -618,8 +613,8 @@
 	switch (type) {
 	case NL80211_IFTYPE_MONITOR:
 	case NL80211_IFTYPE_WDS:
-		WL_ERR(("type (%d) : currently we do not support this type\n",
-			type));
+		WL_ERR("type (%d) : currently we do not support this type\n",
+		       type);
 		return -EOPNOTSUPP;
 	case NL80211_IFTYPE_ADHOC:
 		wl->conf->mode = WL_MODE_IBSS;
@@ -635,15 +630,15 @@
 	ap = htod32(ap);
 	wdev = ndev->ieee80211_ptr;
 	wdev->iftype = type;
-	WL_DBG(("%s : ap (%d), infra (%d)\n", ndev->name, ap, infra));
+	WL_DBG("%s : ap (%d), infra (%d)\n", ndev->name, ap, infra);
 	err = wl_dev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+		WL_ERR("WLC_SET_INFRA error (%d)\n", err);
 		return err;
 	}
 	err = wl_dev_ioctl(ndev, WLC_SET_AP, &ap, sizeof(ap));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_AP error (%d)\n", err));
+		WL_ERR("WLC_SET_AP error (%d)\n", err);
 		return err;
 	}
 
@@ -653,7 +648,7 @@
 
 static void wl_iscan_prep(struct wl_scan_params *params, struct wlc_ssid *ssid)
 {
-	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	memcpy(&params->bssid, &ether_bcast, ETH_ALEN);
 	params->bss_type = DOT11_BSSTYPE_ANY;
 	params->scan_type = 0;
 	params->nprobes = -1;
@@ -705,7 +700,7 @@
 
 	if (ssid && ssid->SSID_len)
 		params_size += sizeof(struct wlc_ssid);
-	params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
+	params = kzalloc(params_size, GFP_KERNEL);
 	if (unlikely(!params))
 		return -ENOMEM;
 	memset(params, 0, params_size);
@@ -722,9 +717,9 @@
 				iscan->ioctl_buf, WLC_IOCTL_SMLEN);
 	if (unlikely(err)) {
 		if (err == -EBUSY) {
-			WL_INFO(("system busy : iscan canceled\n"));
+			WL_INFO("system busy : iscan canceled\n");
 		} else {
-			WL_ERR(("error (%d)\n", err));
+			WL_ERR("error (%d)\n", err);
 		}
 	}
 	kfree(params);
@@ -748,7 +743,7 @@
 	err = wl_dev_ioctl(wl_to_ndev(wl), WLC_SET_PASSIVE_SCAN,
 			&passive_scan, sizeof(passive_scan));
 	if (unlikely(err)) {
-		WL_DBG(("error (%d)\n", err));
+		WL_DBG("error (%d)\n", err);
 		return err;
 	}
 	wl_set_mpc(ndev, 0);
@@ -774,12 +769,12 @@
 	s32 err = 0;
 
 	if (unlikely(test_bit(WL_STATUS_SCANNING, &wl->status))) {
-		WL_ERR(("Scanning already : status (%d)\n", (int)wl->status));
+		WL_ERR("Scanning already : status (%d)\n", (int)wl->status);
 		return -EAGAIN;
 	}
 	if (unlikely(test_bit(WL_STATUS_SCAN_ABORTING, &wl->status))) {
-		WL_ERR(("Scanning being aborted : status (%d)\n",
-			(int)wl->status));
+		WL_ERR("Scanning being aborted : status (%d)\n",
+		       (int)wl->status);
 		return -EAGAIN;
 	}
 
@@ -811,26 +806,26 @@
 		else
 			goto scan_out;
 	} else {
-		WL_DBG(("ssid \"%s\", ssid_len (%d)\n",
-			ssids->ssid, ssids->ssid_len));
+		WL_DBG("ssid \"%s\", ssid_len (%d)\n",
+		       ssids->ssid, ssids->ssid_len);
 		memset(&sr->ssid, 0, sizeof(sr->ssid));
 		sr->ssid.SSID_len =
 			    min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
 		if (sr->ssid.SSID_len) {
 			memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
 			sr->ssid.SSID_len = htod32(sr->ssid.SSID_len);
-			WL_DBG(("Specific scan ssid=\"%s\" len=%d\n",
-					sr->ssid.SSID, sr->ssid.SSID_len));
+			WL_DBG("Specific scan ssid=\"%s\" len=%d\n",
+			       sr->ssid.SSID, sr->ssid.SSID_len);
 			spec_scan = true;
 		} else {
-			WL_DBG(("Broadcast scan\n"));
+			WL_DBG("Broadcast scan\n");
 		}
-		WL_DBG(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len));
+		WL_DBG("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len);
 		passive_scan = wl->active_scan ? 0 : 1;
 		err = wl_dev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
 				&passive_scan, sizeof(passive_scan));
 		if (unlikely(err)) {
-			WL_ERR(("WLC_SET_PASSIVE_SCAN error (%d)\n", err));
+			WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
 			goto scan_out;
 		}
 		wl_set_mpc(ndev, 0);
@@ -838,10 +833,10 @@
 				sizeof(sr->ssid));
 		if (err) {
 			if (err == -EBUSY) {
-				WL_INFO(("system busy : scan for \"%s\" "
-					"canceled\n", sr->ssid.SSID));
+				WL_INFO("system busy : scan for \"%s\" canceled\n",
+					sr->ssid.SSID);
 			} else {
-				WL_ERR(("WLC_SCAN error (%d)\n", err));
+				WL_ERR("WLC_SCAN error (%d)\n", err);
 			}
 			wl_set_mpc(ndev, 1);
 			goto scan_out;
@@ -865,7 +860,7 @@
 	CHECK_SYS_UP();
 	err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
 	if (unlikely(err)) {
-		WL_DBG(("scan error (%d)\n", err));
+		WL_DBG("scan error (%d)\n", err);
 		return err;
 	}
 
@@ -884,7 +879,7 @@
 
 	err = wl_dev_ioctl(dev, WLC_SET_VAR, buf, len);
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 	}
 
 	return err;
@@ -907,7 +902,7 @@
 	BUG_ON(unlikely(!len));
 	err = wl_dev_ioctl(dev, WLC_GET_VAR, &var, len);
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 	}
 	*retval = dtoh32(var.val);
 
@@ -920,7 +915,7 @@
 
 	err = wl_dev_intvar_set(dev, "rtsthresh", rts_threshold);
 	if (unlikely(err)) {
-		WL_ERR(("Error (%d)\n", err));
+		WL_ERR("Error (%d)\n", err);
 		return err;
 	}
 	return err;
@@ -932,7 +927,7 @@
 
 	err = wl_dev_intvar_set(dev, "fragthresh", frag_threshold);
 	if (unlikely(err)) {
-		WL_ERR(("Error (%d)\n", err));
+		WL_ERR("Error (%d)\n", err);
 		return err;
 	}
 	return err;
@@ -946,7 +941,7 @@
 	retry = htod32(retry);
 	err = wl_dev_ioctl(dev, cmd, &retry, sizeof(retry));
 	if (unlikely(err)) {
-		WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+		WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
 		return err;
 	}
 	return err;
@@ -1006,7 +1001,7 @@
 
 	CHECK_SYS_UP();
 	if (params->bssid) {
-		WL_ERR(("Invalid bssid\n"));
+		WL_ERR("Invalid bssid\n");
 		return -EOPNOTSUPP;
 	}
 	bss = cfg80211_get_ibss(wiphy, NULL, params->ssid, params->ssid_len);
@@ -1032,7 +1027,7 @@
 	}
 	if (bss) {
 		wl->ibss_starter = false;
-		WL_DBG(("Found IBSS\n"));
+		WL_DBG("Found IBSS\n");
 	} else {
 		wl->ibss_starter = true;
 	}
@@ -1049,14 +1044,14 @@
 	join_params.ssid.SSID_len = htod32(params->ssid_len);
 	if (params->bssid)
 		memcpy(&join_params.params.bssid, params->bssid,
-		       ETHER_ADDR_LEN);
+		       ETH_ALEN);
 	else
-		memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
+		memset(&join_params.params.bssid, 0, ETH_ALEN);
 
 	err = wl_dev_ioctl(dev, WLC_SET_SSID, &join_params,
 			sizeof(join_params));
 	if (unlikely(err)) {
-		WL_ERR(("Error (%d)\n", err));
+		WL_ERR("Error (%d)\n", err);
 		return err;
 	}
 	return err;
@@ -1087,10 +1082,10 @@
 		val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
 	else
 		val = WPA_AUTH_DISABLED;
-	WL_DBG(("setting wpa_auth to 0x%0x\n", val));
+	WL_DBG("setting wpa_auth to 0x%0x\n", val);
 	err = wl_dev_intvar_set(dev, "wpa_auth", val);
 	if (unlikely(err)) {
-		WL_ERR(("set wpa_auth failed (%d)\n", err));
+		WL_ERR("set wpa_auth failed (%d)\n", err);
 		return err;
 	}
 	sec = wl_read_prof(wl, WL_PROF_SEC);
@@ -1109,27 +1104,27 @@
 	switch (sme->auth_type) {
 	case NL80211_AUTHTYPE_OPEN_SYSTEM:
 		val = 0;
-		WL_DBG(("open system\n"));
+		WL_DBG("open system\n");
 		break;
 	case NL80211_AUTHTYPE_SHARED_KEY:
 		val = 1;
-		WL_DBG(("shared key\n"));
+		WL_DBG("shared key\n");
 		break;
 	case NL80211_AUTHTYPE_AUTOMATIC:
 		val = 2;
-		WL_DBG(("automatic\n"));
+		WL_DBG("automatic\n");
 		break;
 	case NL80211_AUTHTYPE_NETWORK_EAP:
-		WL_DBG(("network eap\n"));
+		WL_DBG("network eap\n");
 	default:
 		val = 2;
-		WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
+		WL_ERR("invalid auth type (%d)\n", sme->auth_type);
 		break;
 	}
 
 	err = wl_dev_intvar_set(dev, "auth", val);
 	if (unlikely(err)) {
-		WL_ERR(("set auth failed (%d)\n", err));
+		WL_ERR("set auth failed (%d)\n", err);
 		return err;
 	}
 	sec = wl_read_prof(wl, WL_PROF_SEC);
@@ -1162,8 +1157,8 @@
 			pval = AES_ENABLED;
 			break;
 		default:
-			WL_ERR(("invalid cipher pairwise (%d)\n",
-				sme->crypto.ciphers_pairwise[0]));
+			WL_ERR("invalid cipher pairwise (%d)\n",
+			       sme->crypto.ciphers_pairwise[0]);
 			return -EINVAL;
 		}
 	}
@@ -1183,16 +1178,16 @@
 			gval = AES_ENABLED;
 			break;
 		default:
-			WL_ERR(("invalid cipher group (%d)\n",
-				sme->crypto.cipher_group));
+			WL_ERR("invalid cipher group (%d)\n",
+			       sme->crypto.cipher_group);
 			return -EINVAL;
 		}
 	}
 
-	WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
+	WL_DBG("pval (%d) gval (%d)\n", pval, gval);
 	err = wl_dev_intvar_set(dev, "wsec", pval | gval);
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 		return err;
 	}
 
@@ -1214,7 +1209,7 @@
 	if (sme->crypto.n_akm_suites) {
 		err = wl_dev_intvar_get(dev, "wpa_auth", &val);
 		if (unlikely(err)) {
-			WL_ERR(("could not get wpa_auth (%d)\n", err));
+			WL_ERR("could not get wpa_auth (%d)\n", err);
 			return err;
 		}
 		if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
@@ -1226,8 +1221,8 @@
 				val = WPA_AUTH_PSK;
 				break;
 			default:
-				WL_ERR(("invalid cipher group (%d)\n",
-					sme->crypto.cipher_group));
+				WL_ERR("invalid cipher group (%d)\n",
+				       sme->crypto.cipher_group);
 				return -EINVAL;
 			}
 		} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1239,16 +1234,16 @@
 				val = WPA2_AUTH_PSK;
 				break;
 			default:
-				WL_ERR(("invalid cipher group (%d)\n",
-					sme->crypto.cipher_group));
+				WL_ERR("invalid cipher group (%d)\n",
+				       sme->crypto.cipher_group);
 				return -EINVAL;
 			}
 		}
 
-		WL_DBG(("setting wpa_auth to %d\n", val));
+		WL_DBG("setting wpa_auth to %d\n", val);
 		err = wl_dev_intvar_set(dev, "wpa_auth", val);
 		if (unlikely(err)) {
-			WL_ERR(("could not set wpa_auth (%d)\n", err));
+			WL_ERR("could not set wpa_auth (%d)\n", err);
 			return err;
 		}
 	}
@@ -1268,11 +1263,11 @@
 	s32 val;
 	s32 err = 0;
 
-	WL_DBG(("key len (%d)\n", sme->key_len));
+	WL_DBG("key len (%d)\n", sme->key_len);
 	if (sme->key_len) {
 		sec = wl_read_prof(wl, WL_PROF_SEC);
-		WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
-			sec->wpa_versions, sec->cipher_pairwise));
+		WL_DBG("wpa_versions 0x%x cipher_pairwise 0x%x\n",
+		       sec->wpa_versions, sec->cipher_pairwise);
 		if (!
 		    (sec->wpa_versions & (NL80211_WPA_VERSION_1 |
 					  NL80211_WPA_VERSION_2))
@@ -1282,7 +1277,7 @@
 			key.len = (u32) sme->key_len;
 			key.index = (u32) sme->key_idx;
 			if (unlikely(key.len > sizeof(key.data))) {
-				WL_ERR(("Too long key length (%u)\n", key.len));
+				WL_ERR("Too long key length (%u)\n", key.len);
 				return -EINVAL;
 			}
 			memcpy(key.data, sme->key, key.len);
@@ -1295,27 +1290,27 @@
 				key.algo = CRYPTO_ALGO_WEP128;
 				break;
 			default:
-				WL_ERR(("Invalid algorithm (%d)\n",
-					sme->crypto.ciphers_pairwise[0]));
+				WL_ERR("Invalid algorithm (%d)\n",
+				       sme->crypto.ciphers_pairwise[0]);
 				return -EINVAL;
 			}
 			/* Set the new key/index */
-			WL_DBG(("key length (%d) key index (%d) algo (%d)\n",
-				key.len, key.index, key.algo));
-			WL_DBG(("key \"%s\"\n", key.data));
+			WL_DBG("key length (%d) key index (%d) algo (%d)\n",
+			       key.len, key.index, key.algo);
+			WL_DBG("key \"%s\"\n", key.data);
 			swap_key_from_BE(&key);
 			err = wl_dev_ioctl(dev, WLC_SET_KEY, &key,
 					sizeof(key));
 			if (unlikely(err)) {
-				WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+				WL_ERR("WLC_SET_KEY error (%d)\n", err);
 				return err;
 			}
 			if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
-				WL_DBG(("set auth_type to shared key\n"));
+				WL_DBG("set auth_type to shared key\n");
 				val = 1;	/* shared key */
 				err = wl_dev_intvar_set(dev, "auth", val);
 				if (unlikely(err)) {
-					WL_ERR(("set auth failed (%d)\n", err));
+					WL_ERR("set auth failed (%d)\n", err);
 					return err;
 				}
 			}
@@ -1337,15 +1332,15 @@
 
 	CHECK_SYS_UP();
 	if (unlikely(!sme->ssid)) {
-		WL_ERR(("Invalid ssid\n"));
+		WL_ERR("Invalid ssid\n");
 		return -EOPNOTSUPP;
 	}
 	if (chan) {
 		wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
-		WL_DBG(("channel (%d), center_req (%d)\n", wl->channel,
-			chan->center_freq));
+		WL_DBG("channel (%d), center_req (%d)\n",
+		       wl->channel, chan->center_freq);
 	}
-	WL_DBG(("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len));
+	WL_DBG("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
 	err = wl_set_wpa_version(dev, sme);
 	if (unlikely(err))
 		return err;
@@ -1378,18 +1373,18 @@
 	memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
 	join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
 	wl_update_prof(wl, NULL, &join_params.ssid, WL_PROF_SSID);
-	memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
 
 	wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
-	WL_DBG(("join_param_size %d\n", join_params_size));
+	WL_DBG("join_param_size %d\n", join_params_size);
 
 	if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
-		WL_DBG(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
-			join_params.ssid.SSID_len));
+		WL_DBG("ssid \"%s\", len (%d)\n",
+		       join_params.ssid.SSID, join_params.ssid.SSID_len);
 	}
 	err = wl_dev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size);
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 		return err;
 	}
 	set_bit(WL_STATUS_CONNECTING, &wl->status);
@@ -1406,17 +1401,17 @@
 	bool act = false;
 	s32 err = 0;
 
-	WL_DBG(("Reason %d\n", reason_code));
+	WL_DBG("Reason %d\n", reason_code);
 	CHECK_SYS_UP();
 	act = *(bool *) wl_read_prof(wl, WL_PROF_ACT);
 	if (likely(act)) {
 		scbval.val = reason_code;
-		memcpy(&scbval.ea, &wl->bssid, ETHER_ADDR_LEN);
+		memcpy(&scbval.ea, &wl->bssid, ETH_ALEN);
 		scbval.val = htod32(scbval.val);
 		err = wl_dev_ioctl(dev, WLC_DISASSOC, &scbval,
 				sizeof(scb_val_t));
 		if (unlikely(err)) {
-			WL_ERR(("error (%d)\n", err));
+			WL_ERR("error (%d)\n", err);
 			return err;
 		}
 	}
@@ -1441,13 +1436,13 @@
 		break;
 	case NL80211_TX_POWER_LIMITED:
 		if (dbm < 0) {
-			WL_ERR(("TX_POWER_LIMITTED - dbm is negative\n"));
+			WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
 			return -EINVAL;
 		}
 		break;
 	case NL80211_TX_POWER_FIXED:
 		if (dbm < 0) {
-			WL_ERR(("TX_POWER_FIXED - dbm is negative..\n"));
+			WL_ERR("TX_POWER_FIXED - dbm is negative\n");
 			return -EINVAL;
 		}
 		break;
@@ -1457,7 +1452,7 @@
 	disable = htod32(disable);
 	err = wl_dev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+		WL_ERR("WLC_SET_RADIO error (%d)\n", err);
 		return err;
 	}
 
@@ -1468,7 +1463,7 @@
 	err = wl_dev_intvar_set(ndev, "qtxpower",
 			(s32) (bcm_mw_to_qdbm(txpwrmw)));
 	if (unlikely(err)) {
-		WL_ERR(("qtxpower error (%d)\n", err));
+		WL_ERR("qtxpower error (%d)\n", err);
 		return err;
 	}
 	wl->conf->tx_power = dbm;
@@ -1487,7 +1482,7 @@
 	CHECK_SYS_UP();
 	err = wl_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 		return err;
 	}
 	result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
@@ -1504,12 +1499,12 @@
 	s32 wsec;
 	s32 err = 0;
 
-	WL_DBG(("key index (%d)\n", key_idx));
+	WL_DBG("key index (%d)\n", key_idx);
 	CHECK_SYS_UP();
 
 	err = wl_dev_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		WL_ERR("WLC_GET_WSEC error (%d)\n", err);
 		return err;
 	}
 	wsec = dtoh32(wsec);
@@ -1520,7 +1515,7 @@
 		err = wl_dev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
 				sizeof(index));
 		if (unlikely(err)) {
-			WL_ERR(("error (%d)\n", err));
+			WL_ERR("error (%d)\n", err);
 		}
 	}
 	return err;
@@ -1537,8 +1532,8 @@
 	key.index = (u32) key_idx;
 	/* Instead of bcast for ea address for default wep keys,
 		 driver needs it to be Null */
-	if (!ETHER_ISMULTI(mac_addr))
-		memcpy((char *)&key.ea, (void *)mac_addr, ETHER_ADDR_LEN);
+	if (!is_multicast_ether_addr(mac_addr))
+		memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
 	key.len = (u32) params->key_len;
 	/* check for key index change */
 	if (key.len == 0) {
@@ -1546,16 +1541,16 @@
 		swap_key_from_BE(&key);
 		err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
 		if (unlikely(err)) {
-			WL_ERR(("key delete error (%d)\n", err));
+			WL_ERR("key delete error (%d)\n", err);
 			return err;
 		}
 	} else {
 		if (key.len > sizeof(key.data)) {
-			WL_ERR(("Invalid key length (%d)\n", key.len));
+			WL_ERR("Invalid key length (%d)\n", key.len);
 			return -EINVAL;
 		}
 
-		WL_DBG(("Setting the key index %d\n", key.index));
+		WL_DBG("Setting the key index %d\n", key.index);
 		memcpy(key.data, params->key, key.len);
 
 		if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
@@ -1579,26 +1574,26 @@
 		switch (params->cipher) {
 		case WLAN_CIPHER_SUITE_WEP40:
 			key.algo = CRYPTO_ALGO_WEP1;
-			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			WL_DBG("WLAN_CIPHER_SUITE_WEP40\n");
 			break;
 		case WLAN_CIPHER_SUITE_WEP104:
 			key.algo = CRYPTO_ALGO_WEP128;
-			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			WL_DBG("WLAN_CIPHER_SUITE_WEP104\n");
 			break;
 		case WLAN_CIPHER_SUITE_TKIP:
 			key.algo = CRYPTO_ALGO_TKIP;
-			WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+			WL_DBG("WLAN_CIPHER_SUITE_TKIP\n");
 			break;
 		case WLAN_CIPHER_SUITE_AES_CMAC:
 			key.algo = CRYPTO_ALGO_AES_CCM;
-			WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+			WL_DBG("WLAN_CIPHER_SUITE_AES_CMAC\n");
 			break;
 		case WLAN_CIPHER_SUITE_CCMP:
 			key.algo = CRYPTO_ALGO_AES_CCM;
-			WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+			WL_DBG("WLAN_CIPHER_SUITE_CCMP\n");
 			break;
 		default:
-			WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+			WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
 			return -EINVAL;
 		}
 		swap_key_from_BE(&key);
@@ -1606,7 +1601,7 @@
 		dhd_wait_pend8021x(dev);
 		err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
 		if (unlikely(err)) {
-			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+			WL_ERR("WLC_SET_KEY error (%d)\n", err);
 			return err;
 		}
 	}
@@ -1623,7 +1618,7 @@
 	s32 wsec;
 	s32 err = 0;
 
-	WL_DBG(("key index (%d)\n", key_idx));
+	WL_DBG("key index (%d)\n", key_idx);
 	CHECK_SYS_UP();
 
 	if (mac_addr)
@@ -1634,7 +1629,7 @@
 	key.index = (u32) key_idx;
 
 	if (unlikely(key.len > sizeof(key.data))) {
-		WL_ERR(("Too long key length (%u)\n", key.len));
+		WL_ERR("Too long key length (%u)\n", key.len);
 		return -EINVAL;
 	}
 	memcpy(key.data, params->key, key.len);
@@ -1643,26 +1638,26 @@
 	switch (params->cipher) {
 	case WLAN_CIPHER_SUITE_WEP40:
 		key.algo = CRYPTO_ALGO_WEP1;
-		WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+		WL_DBG("WLAN_CIPHER_SUITE_WEP40\n");
 		break;
 	case WLAN_CIPHER_SUITE_WEP104:
 		key.algo = CRYPTO_ALGO_WEP128;
-		WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+		WL_DBG("WLAN_CIPHER_SUITE_WEP104\n");
 		break;
 	case WLAN_CIPHER_SUITE_TKIP:
 		key.algo = CRYPTO_ALGO_TKIP;
-		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		WL_DBG("WLAN_CIPHER_SUITE_TKIP\n");
 		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
 		key.algo = CRYPTO_ALGO_AES_CCM;
-		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		WL_DBG("WLAN_CIPHER_SUITE_AES_CMAC\n");
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 		key.algo = CRYPTO_ALGO_AES_CCM;
-		WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+		WL_DBG("WLAN_CIPHER_SUITE_CCMP\n");
 		break;
 	default:
-		WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+		WL_ERR("Invalid cipher (0x%x)\n", params->cipher);
 		return -EINVAL;
 	}
 
@@ -1670,21 +1665,21 @@
 	swap_key_from_BE(&key);
 	err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+		WL_ERR("WLC_SET_KEY error (%d)\n", err);
 		return err;
 	}
 
 	val = WEP_ENABLED;
 	err = wl_dev_intvar_get(dev, "wsec", &wsec);
 	if (unlikely(err)) {
-		WL_ERR(("get wsec error (%d)\n", err));
+		WL_ERR("get wsec error (%d)\n", err);
 		return err;
 	}
 	wsec &= ~(WEP_ENABLED);
 	wsec |= val;
 	err = wl_dev_intvar_set(dev, "wsec", wsec);
 	if (unlikely(err)) {
-		WL_ERR(("set wsec error (%d)\n", err));
+		WL_ERR("set wsec error (%d)\n", err);
 		return err;
 	}
 
@@ -1692,7 +1687,7 @@
 	val = htod32(val);
 	err = wl_dev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_AUTH error (%d)\n", err));
+		WL_ERR("WLC_SET_AUTH error (%d)\n", err);
 		return err;
 	}
 	return err;
@@ -1714,7 +1709,7 @@
 	key.flags = WL_PRIMARY_KEY;
 	key.algo = CRYPTO_ALGO_OFF;
 
-	WL_DBG(("key index (%d)\n", key_idx));
+	WL_DBG("key index (%d)\n", key_idx);
 	/* Set the new key/index */
 	swap_key_from_BE(&key);
 	err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
@@ -1722,10 +1717,10 @@
 		if (err == -EINVAL) {
 			if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
 				/* we ignore this key index in this case */
-				WL_DBG(("invalid key index (%d)\n", key_idx));
+				WL_DBG("invalid key index (%d)\n", key_idx);
 			}
 		} else {
-			WL_ERR(("WLC_SET_KEY error (%d)\n", err));
+			WL_ERR("WLC_SET_KEY error (%d)\n", err);
 		}
 		return err;
 	}
@@ -1733,14 +1728,14 @@
 	val = 0;
 	err = wl_dev_intvar_get(dev, "wsec", &wsec);
 	if (unlikely(err)) {
-		WL_ERR(("get wsec error (%d)\n", err));
+		WL_ERR("get wsec error (%d)\n", err);
 		return err;
 	}
 	wsec &= ~(WEP_ENABLED);
 	wsec |= val;
 	err = wl_dev_intvar_set(dev, "wsec", wsec);
 	if (unlikely(err)) {
-		WL_ERR(("set wsec error (%d)\n", err));
+		WL_ERR("set wsec error (%d)\n", err);
 		return err;
 	}
 
@@ -1748,7 +1743,7 @@
 	val = htod32(val);
 	err = wl_dev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_AUTH error (%d)\n", err));
+		WL_ERR("WLC_SET_AUTH error (%d)\n", err);
 		return err;
 	}
 	return err;
@@ -1766,7 +1761,7 @@
 	s32 wsec;
 	s32 err = 0;
 
-	WL_DBG(("key index (%d)\n", key_idx));
+	WL_DBG("key index (%d)\n", key_idx);
 	CHECK_SYS_UP();
 
 	memset(&key, 0, sizeof(key));
@@ -1778,7 +1773,7 @@
 
 	err = wl_dev_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
+		WL_ERR("WLC_GET_WSEC error (%d)\n", err);
 		return err;
 	}
 	wsec = dtoh32(wsec);
@@ -1787,22 +1782,22 @@
 		sec = wl_read_prof(wl, WL_PROF_SEC);
 		if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
 			params.cipher = WLAN_CIPHER_SUITE_WEP40;
-			WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+			WL_DBG("WLAN_CIPHER_SUITE_WEP40\n");
 		} else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
 			params.cipher = WLAN_CIPHER_SUITE_WEP104;
-			WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+			WL_DBG("WLAN_CIPHER_SUITE_WEP104\n");
 		}
 		break;
 	case TKIP_ENABLED:
 		params.cipher = WLAN_CIPHER_SUITE_TKIP;
-		WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+		WL_DBG("WLAN_CIPHER_SUITE_TKIP\n");
 		break;
 	case AES_ENABLED:
 		params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
-		WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+		WL_DBG("WLAN_CIPHER_SUITE_AES_CMAC\n");
 		break;
 	default:
-		WL_ERR(("Invalid algo (0x%x)\n", wsec));
+		WL_ERR("Invalid algo (0x%x)\n", wsec);
 		return -EINVAL;
 	}
 
@@ -1814,7 +1809,7 @@
 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
 				    struct net_device *dev, u8 key_idx)
 {
-	WL_INFO(("Not supported\n"));
+	WL_INFO("Not supported\n");
 	CHECK_SYS_UP();
 	return -EOPNOTSUPP;
 }
@@ -1831,20 +1826,20 @@
 
 	CHECK_SYS_UP();
 	if (unlikely
-	    (memcmp(mac, wl_read_prof(wl, WL_PROF_BSSID), ETHER_ADDR_LEN))) {
-		WL_ERR(("Wrong Mac address\n"));
+	    (memcmp(mac, wl_read_prof(wl, WL_PROF_BSSID), ETH_ALEN))) {
+		WL_ERR("Wrong Mac address\n");
 		return -ENOENT;
 	}
 
 	/* Report the current tx rate */
 	err = wl_dev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate));
 	if (err) {
-		WL_ERR(("Could not get rate (%d)\n", err));
+		WL_ERR("Could not get rate (%d)\n", err);
 	} else {
 		rate = dtoh32(rate);
 		sinfo->filled |= STATION_INFO_TX_BITRATE;
 		sinfo->txrate.legacy = rate * 5;
-		WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+		WL_DBG("Rate %d Mbps\n", rate / 2);
 	}
 
 	if (test_bit(WL_STATUS_CONNECTED, &wl->status)) {
@@ -1852,13 +1847,13 @@
 		err = wl_dev_ioctl(dev, WLC_GET_RSSI, &scb_val,
 				sizeof(scb_val_t));
 		if (unlikely(err)) {
-			WL_ERR(("Could not get rssi (%d)\n", err));
+			WL_ERR("Could not get rssi (%d)\n", err);
 			return err;
 		}
 		rssi = dtoh32(scb_val.val);
 		sinfo->filled |= STATION_INFO_SIGNAL;
 		sinfo->signal = rssi;
-		WL_DBG(("RSSI %d dBm\n", rssi));
+		WL_DBG("RSSI %d dBm\n", rssi);
 	}
 
 	return err;
@@ -1874,13 +1869,13 @@
 	CHECK_SYS_UP();
 	pm = enabled ? PM_FAST : PM_OFF;
 	pm = htod32(pm);
-	WL_DBG(("power save %s\n", (pm ? "enabled" : "disabled")));
+	WL_DBG("power save %s\n", (pm ? "enabled" : "disabled"));
 	err = wl_dev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
 	if (unlikely(err)) {
 		if (err == -ENODEV)
-			WL_DBG(("net_device is not ready yet\n"));
+			WL_DBG("net_device is not ready yet\n");
 		else
-			WL_ERR(("error (%d)\n", err));
+			WL_ERR("error (%d)\n", err);
 		return err;
 	}
 	return err;
@@ -1932,7 +1927,7 @@
 	err = wl_dev_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
 			sizeof(rateset));
 	if (unlikely(err)) {
-		WL_ERR(("could not get current rateset (%d)\n", err));
+		WL_ERR("could not get current rateset (%d)\n", err);
 		return err;
 	}
 
@@ -1952,7 +1947,7 @@
 		rate = val / 500000;
 	}
 
-	WL_DBG(("rate %d mbps\n", (rate / 2)));
+	WL_DBG("rate %d mbps\n", rate / 2);
 
 	/*
 	 *
@@ -1962,7 +1957,7 @@
 	err_bg = wl_dev_intvar_set(dev, "bg_rate", rate);
 	err_a = wl_dev_intvar_set(dev, "a_rate", rate);
 	if (unlikely(err_bg && err_a)) {
-		WL_ERR(("could not set fixed rate (%d) (%d)\n", err_bg, err_a));
+		WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
 		return err_bg | err_a;
 	}
 
@@ -2007,12 +2002,12 @@
 {
 	int i, j;
 
-	WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+	WL_DBG("No of elements %d\n", pmk_list->pmkids.npmkid);
 	for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
-		WL_DBG(("PMKID[%d]: %pM =\n", i,
-			&pmk_list->pmkids.pmkid[i].BSSID));
+		WL_DBG("PMKID[%d]: %pM =\n", i,
+		       &pmk_list->pmkids.pmkid[i].BSSID);
 		for (j = 0; j < WPA2_PMKID_LEN; j++) {
-			WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
+			WL_DBG("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]);
 		}
 	}
 	if (likely(!err)) {
@@ -2034,11 +2029,11 @@
 	CHECK_SYS_UP();
 	for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
 		if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
-			    ETHER_ADDR_LEN))
+			    ETH_ALEN))
 			break;
 	if (i < WL_NUM_PMKIDS_MAX) {
 		memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
-		       ETHER_ADDR_LEN);
+		       ETH_ALEN);
 		memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
 		       WPA2_PMKID_LEN);
 		if (i == wl->pmk_list->pmkids.npmkid)
@@ -2046,12 +2041,12 @@
 	} else {
 		err = -EINVAL;
 	}
-	WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
-		&wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].BSSID));
+	WL_DBG("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+	       &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].BSSID);
 	for (i = 0; i < WPA2_PMKID_LEN; i++) {
-		WL_DBG(("%02x\n",
-			wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].
-			PMKID[i]));
+		WL_DBG("%02x\n",
+		       wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].
+		       PMKID[i]);
 	}
 
 	err = wl_update_pmklist(dev, wl->pmk_list, err);
@@ -2069,19 +2064,19 @@
 	int i;
 
 	CHECK_SYS_UP();
-	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+	memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
 	memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
 
-	WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
-		&pmkid.pmkid[0].BSSID));
+	WL_DBG("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+	       &pmkid.pmkid[0].BSSID);
 	for (i = 0; i < WPA2_PMKID_LEN; i++) {
-		WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
+		WL_DBG("%02x\n", pmkid.pmkid[0].PMKID[i]);
 	}
 
 	for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
 		if (!memcmp
 		    (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
-		     ETHER_ADDR_LEN))
+		     ETH_ALEN))
 			break;
 
 	if ((wl->pmk_list->pmkids.npmkid > 0)
@@ -2090,7 +2085,7 @@
 		for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
 			memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
 			       &wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
-			       ETHER_ADDR_LEN);
+			       ETH_ALEN);
 			memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
 			       &wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
 			       WPA2_PMKID_LEN);
@@ -2168,13 +2163,13 @@
 
 	wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
 	if (unlikely(!wdev)) {
-		WL_ERR(("Could not allocate wireless device\n"));
+		WL_ERR("Could not allocate wireless device\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	wdev->wiphy =
 	    wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv) + sizeof_iface);
 	if (unlikely(!wdev->wiphy)) {
-		WL_ERR(("Couldn not allocate wiphy device\n"));
+		WL_ERR("Couldn not allocate wiphy device\n");
 		err = -ENOMEM;
 		goto wiphy_new_out;
 	}
@@ -2204,7 +2199,7 @@
 #endif				/* !WL_POWERSAVE_DISABLED */
 	err = wiphy_register(wdev->wiphy);
 	if (unlikely(err < 0)) {
-		WL_ERR(("Couldn not register wiphy device (%d)\n", err));
+		WL_ERR("Couldn not register wiphy device (%d)\n", err);
 		goto wiphy_register_out;
 	}
 	return wdev;
@@ -2223,7 +2218,7 @@
 	struct wireless_dev *wdev = wl_to_wdev(wl);
 
 	if (unlikely(!wdev)) {
-		WL_ERR(("wdev is invalid\n"));
+		WL_ERR("wdev is invalid\n");
 		return;
 	}
 	wiphy_unregister(wdev->wiphy);
@@ -2241,11 +2236,11 @@
 
 	bss_list = wl->bss_list;
 	if (unlikely(bss_list->version != WL_BSS_INFO_VERSION)) {
-		WL_ERR(("Version %d != WL_BSS_INFO_VERSION\n",
-			bss_list->version));
+		WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
+		       bss_list->version);
 		return -EOPNOTSUPP;
 	}
-	WL_DBG(("scanned AP count (%d)\n", bss_list->count));
+	WL_DBG("scanned AP count (%d)\n", bss_list->count);
 	bi = next_bss(bss_list, bi);
 	for_each_bss(bss_list, bi, i) {
 		err = wl_inform_single_bss(wl, bi);
@@ -2270,14 +2265,14 @@
 	s32 err = 0;
 
 	if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
-		WL_DBG(("Beacon is larger than buffer. Discarding\n"));
+		WL_DBG("Beacon is larger than buffer. Discarding\n");
 		return err;
 	}
 	notif_bss_info =
 	    kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt) - sizeof(u8) +
 		    WL_BSS_INFO_MAX, GFP_KERNEL);
 	if (unlikely(!notif_bss_info)) {
-		WL_ERR(("notif_bss_info alloc failed\n"));
+		WL_ERR("notif_bss_info alloc failed\n");
 		return -ENOMEM;
 	}
 	mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
@@ -2289,7 +2284,7 @@
 	else
 		band = wiphy->bands[IEEE80211_BAND_5GHZ];
 	notif_bss_info->rssi = bi->RSSI;
-	memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
+	memcpy(mgmt->bssid, &bi->BSSID, ETH_ALEN);
 	mgmt_type = wl->active_scan ?
 		IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
 	if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
@@ -2321,17 +2316,17 @@
 	freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
 	channel = ieee80211_get_channel(wiphy, freq);
 
-	WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
-		bi->SSID,
-		notif_bss_info->rssi, notif_bss_info->channel,
-		mgmt->u.beacon.capab_info, &bi->BSSID));
+	WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n",
+	       bi->SSID,
+	       notif_bss_info->rssi, notif_bss_info->channel,
+	       mgmt->u.beacon.capab_info, &bi->BSSID);
 
 	signal = notif_bss_info->rssi * 100;
 	if (unlikely(!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
 						le16_to_cpu
 						(notif_bss_info->frame_len),
 						signal, GFP_KERNEL))) {
-		WL_ERR(("cfg80211_inform_bss_frame error\n"));
+		WL_ERR("cfg80211_inform_bss_frame error\n");
 		kfree(notif_bss_info);
 		return -EINVAL;
 	}
@@ -2399,12 +2394,12 @@
 		if (wl_is_ibssmode(wl)) {
 			cfg80211_ibss_joined(ndev, (s8 *)&e->addr,
 					     GFP_KERNEL);
-			WL_DBG(("joined in IBSS network\n"));
+			WL_DBG("joined in IBSS network\n");
 		} else {
 			wl_bss_connect_done(wl, ndev, e, data, true);
-			WL_DBG(("joined in BSS network \"%s\"\n",
-				((struct wlc_ssid *)
-				 wl_read_prof(wl, WL_PROF_SSID))->SSID));
+			WL_DBG("joined in BSS network \"%s\"\n",
+			       ((struct wlc_ssid *)
+				wl_read_prof(wl, WL_PROF_SSID))->SSID);
 		}
 		act = true;
 		wl_update_prof(wl, e, &act, WL_PROF_ACT);
@@ -2459,7 +2454,7 @@
 	err = wl_dev_ioctl(dev, WLC_GET_VAR, (void *)wl->ioctl_buf,
 			WL_IOCTL_LEN_MAX);
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 		return err;
 	}
 	memcpy(buf, wl->ioctl_buf, buf_len);
@@ -2479,7 +2474,7 @@
 	err = wl_dev_bufvar_get(ndev, "assoc_info", wl->extra_buf,
 				WL_ASSOC_INFO_MAX);
 	if (unlikely(err)) {
-		WL_ERR(("could not get assoc info (%d)\n", err));
+		WL_ERR("could not get assoc info (%d)\n", err);
 		return err;
 	}
 	assoc_info = (struct wl_assoc_ielen *)wl->extra_buf;
@@ -2489,7 +2484,7 @@
 		err = wl_dev_bufvar_get(ndev, "assoc_req_ies", wl->extra_buf,
 					WL_ASSOC_INFO_MAX);
 		if (unlikely(err)) {
-			WL_ERR(("could not get assoc req (%d)\n", err));
+			WL_ERR("could not get assoc req (%d)\n", err);
 			return err;
 		}
 		conn_info->req_ie_len = req_len;
@@ -2503,7 +2498,7 @@
 		err = wl_dev_bufvar_get(ndev, "assoc_resp_ies", wl->extra_buf,
 					WL_ASSOC_INFO_MAX);
 		if (unlikely(err)) {
-			WL_ERR(("could not get assoc resp (%d)\n", err));
+			WL_ERR("could not get assoc resp (%d)\n", err);
 			return err;
 		}
 		conn_info->resp_ie_len = resp_len;
@@ -2513,8 +2508,8 @@
 		conn_info->resp_ie_len = 0;
 		conn_info->resp_ie = NULL;
 	}
-	WL_DBG(("req len (%d) resp len (%d)\n", conn_info->req_ie_len,
-		conn_info->resp_ie_len));
+	WL_DBG("req len (%d) resp len (%d)\n",
+	       conn_info->req_ie_len, conn_info->resp_ie_len);
 
 	return err;
 }
@@ -2547,8 +2542,8 @@
 		join_params->params.chanspec_num =
 			htod32(join_params->params.chanspec_num);
 
-		WL_DBG(("join_params->params.chanspec_list[0]= %#X, channel %d, chanspec %#X\n",
-			join_params->params.chanspec_list[0], ch, chanspec));
+		WL_DBG("join_params->params.chanspec_list[0]= %#X, channel %d, chanspec %#X\n",
+		       join_params->params.chanspec_list[0], ch, chanspec);
 	}
 }
 
@@ -2575,16 +2570,16 @@
 
 	rtnl_lock();
 	if (unlikely(!bss)) {
-		WL_DBG(("Could not find the AP\n"));
+		WL_DBG("Could not find the AP\n");
 		*(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
 		err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_BSS_INFO,
 				wl->extra_buf, WL_EXTRA_BUF_MAX);
 		if (unlikely(err)) {
-			WL_ERR(("Could not get bss info %d\n", err));
+			WL_ERR("Could not get bss info %d\n", err);
 			goto update_bss_info_out;
 		}
 		bi = (struct wl_bss_info *)(wl->extra_buf + 4);
-		if (unlikely(memcmp(&bi->BSSID, &wl->bssid, ETHER_ADDR_LEN))) {
+		if (unlikely(memcmp(&bi->BSSID, &wl->bssid, ETH_ALEN))) {
 			err = -EIO;
 			goto update_bss_info_out;
 		}
@@ -2596,7 +2591,7 @@
 		ie_len = bi->ie_length;
 		beacon_interval = cpu_to_le16(bi->beacon_period);
 	} else {
-		WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
+		WL_DBG("Found the AP in the list - BSSID %pM\n", bss->bssid);
 		ie = bss->information_elements;
 		ie_len = bss->len_information_elements;
 		beacon_interval = bss->beacon_interval;
@@ -2615,7 +2610,7 @@
 		err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_DTIMPRD,
 			&dtim_period, sizeof(dtim_period));
 		if (unlikely(err)) {
-			WL_ERR(("WLC_GET_DTIMPRD error (%d)\n", err));
+			WL_ERR("WLC_GET_DTIMPRD error (%d)\n", err);
 			goto update_bss_info_out;
 		}
 	}
@@ -2636,13 +2631,13 @@
 	s32 err = 0;
 
 	wl_get_assoc_ies(wl);
-	memcpy(&wl->bssid, &e->addr, ETHER_ADDR_LEN);
+	memcpy(&wl->bssid, &e->addr, ETH_ALEN);
 	wl_update_bss_info(wl);
 	cfg80211_roamed(ndev,
 			(u8 *)&wl->bssid,
 			conn_info->req_ie, conn_info->req_ie_len,
 			conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
-	WL_DBG(("Report roaming result\n"));
+	WL_DBG("Report roaming result\n");
 
 	set_bit(WL_STATUS_CONNECTED, &wl->status);
 
@@ -2657,7 +2652,7 @@
 	s32 err = 0;
 
 	wl_get_assoc_ies(wl);
-	memcpy(&wl->bssid, &e->addr, ETHER_ADDR_LEN);
+	memcpy(&wl->bssid, &e->addr, ETH_ALEN);
 	wl_update_bss_info(wl);
 	if (test_and_clear_bit(WL_STATUS_CONNECTING, &wl->status)) {
 		cfg80211_connect_result(ndev,
@@ -2668,15 +2663,15 @@
 					conn_info->resp_ie_len,
 					completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT,
 					GFP_KERNEL);
-		WL_DBG(("Report connect result - connection %s\n",
-			completed ? "succeeded" : "failed"));
+		WL_DBG("Report connect result - connection %s\n",
+		       completed ? "succeeded" : "failed");
 	} else {
 		cfg80211_roamed(ndev,
 				(u8 *)&wl->bssid,
 				conn_info->req_ie, conn_info->req_ie_len,
 				conn_info->resp_ie, conn_info->resp_ie_len,
 				GFP_KERNEL);
-		WL_DBG(("Report roaming result\n"));
+		WL_DBG("Report roaming result\n");
 	}
 	set_bit(WL_STATUS_CONNECTED, &wl->status);
 
@@ -2716,7 +2711,7 @@
 		return wl_wakeup_iscan(wl_to_iscan(wl));
 
 	if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
-		WL_ERR(("Scan complete while device not scanning\n"));
+		WL_ERR("Scan complete while device not scanning\n");
 		return -EINVAL;
 	}
 	if (unlikely(!wl->scan_request)) {
@@ -2725,14 +2720,14 @@
 	err = wl_dev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
 			sizeof(channel_inform));
 	if (unlikely(err)) {
-		WL_ERR(("scan busy (%d)\n", err));
+		WL_ERR("scan busy (%d)\n", err);
 		goto scan_done_out;
 	}
 	channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
 	if (unlikely(channel_inform.scan_channel)) {
 
-		WL_DBG(("channel_inform.scan_channel (%d)\n",
-			channel_inform.scan_channel));
+		WL_DBG("channel_inform.scan_channel (%d)\n",
+		       channel_inform.scan_channel);
 	}
 	wl->bss_list = wl->scan_results;
 	bss_list = wl->bss_list;
@@ -2740,7 +2735,7 @@
 	bss_list->buflen = htod32(len);
 	err = wl_dev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len);
 	if (unlikely(err)) {
-		WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+		WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
 		err = -EINVAL;
 		goto scan_done_out;
 	}
@@ -2794,55 +2789,54 @@
 
 static s32 wl_init_priv_mem(struct wl_priv *wl)
 {
-	wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+	wl->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
 	if (unlikely(!wl->scan_results)) {
-		WL_ERR(("Scan results alloc failed\n"));
+		WL_ERR("Scan results alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL);
+	wl->conf = kzalloc(sizeof(*wl->conf), GFP_KERNEL);
 	if (unlikely(!wl->conf)) {
-		WL_ERR(("wl_conf alloc failed\n"));
+		WL_ERR("wl_conf alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->profile = (void *)kzalloc(sizeof(*wl->profile), GFP_KERNEL);
+	wl->profile = kzalloc(sizeof(*wl->profile), GFP_KERNEL);
 	if (unlikely(!wl->profile)) {
-		WL_ERR(("wl_profile alloc failed\n"));
+		WL_ERR("wl_profile alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->bss_info = (void *)kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+	wl->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
 	if (unlikely(!wl->bss_info)) {
-		WL_ERR(("Bss information alloc failed\n"));
+		WL_ERR("Bss information alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->scan_req_int =
-	    (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
+	wl->scan_req_int = kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
 	if (unlikely(!wl->scan_req_int)) {
-		WL_ERR(("Scan req alloc failed\n"));
+		WL_ERR("Scan req alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->ioctl_buf = (void *)kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
+	wl->ioctl_buf = kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
 	if (unlikely(!wl->ioctl_buf)) {
-		WL_ERR(("Ioctl buf alloc failed\n"));
+		WL_ERR("Ioctl buf alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+	wl->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
 	if (unlikely(!wl->extra_buf)) {
-		WL_ERR(("Extra buf alloc failed\n"));
+		WL_ERR("Extra buf alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
+	wl->iscan = kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
 	if (unlikely(!wl->iscan)) {
-		WL_ERR(("Iscan buf alloc failed\n"));
+		WL_ERR("Iscan buf alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->fw = (void *)kzalloc(sizeof(*wl->fw), GFP_KERNEL);
+	wl->fw = kzalloc(sizeof(*wl->fw), GFP_KERNEL);
 	if (unlikely(!wl->fw)) {
-		WL_ERR(("fw object alloc failed\n"));
+		WL_ERR("fw object alloc failed\n");
 		goto init_priv_mem_out;
 	}
-	wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
+	wl->pmk_list = kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
 	if (unlikely(!wl->pmk_list)) {
-		WL_ERR(("pmk list alloc failed\n"));
+		WL_ERR("pmk list alloc failed\n");
 		goto init_priv_mem_out;
 	}
 
@@ -2884,7 +2878,7 @@
 	wl->event_tsk = kthread_run(wl_event_handler, wl, "wl_event_handler");
 	if (IS_ERR(wl->event_tsk)) {
 		wl->event_tsk = NULL;
-		WL_ERR(("failed to create event thread\n"));
+		WL_ERR("failed to create event thread\n");
 		return -ENOMEM;
 	}
 	return 0;
@@ -2917,7 +2911,7 @@
 	struct net_device *ndev = wl_to_ndev(wl);
 
 	if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
-		WL_ERR(("Scan complete while device not scanning\n"));
+		WL_ERR("Scan complete while device not scanning\n");
 		return;
 	}
 	if (likely(wl->scan_request)) {
@@ -2931,7 +2925,7 @@
 static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
 {
 	if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
-		WL_DBG(("wake up iscan\n"));
+		WL_DBG("wake up iscan\n");
 		up(&iscan->sync);
 		return 0;
 	}
@@ -2961,14 +2955,14 @@
 				WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
 				WL_ISCAN_BUF_MAX);
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 		return err;
 	}
 	results->buflen = dtoh32(results->buflen);
 	results->version = dtoh32(results->version);
 	results->count = dtoh32(results->count);
-	WL_DBG(("results->count = %d\n", results->count));
-	WL_DBG(("results->buflen = %d\n", results->buflen));
+	WL_DBG("results->count = %d\n", results->count);
+	WL_DBG("results->buflen = %d\n", results->buflen);
 	*status = dtoh32(list_buf->status);
 	*bss_list = results;
 
@@ -3053,7 +3047,7 @@
 		err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
 		if (unlikely(err)) {
 			status = WL_SCAN_RESULTS_ABORTED;
-			WL_ERR(("Abort iscan\n"));
+			WL_ERR("Abort iscan\n");
 		}
 		rtnl_unlock();
 		el->handler[status] (wl);
@@ -3062,7 +3056,7 @@
 		del_timer_sync(&iscan->timer);
 		iscan->timer_on = 0;
 	}
-	WL_DBG(("%s was terminated\n", __func__));
+	WL_DBG("%s was terminated\n", __func__);
 
 	return 0;
 }
@@ -3073,7 +3067,7 @@
 
 	if (iscan) {
 		iscan->timer_on = 0;
-		WL_DBG(("timer expired\n"));
+		WL_DBG("timer expired\n");
 		wl_wakeup_iscan(iscan);
 	}
 }
@@ -3088,7 +3082,7 @@
 		sema_init(&iscan->sync, 0);
 		iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
 		if (IS_ERR(iscan->tsk)) {
-			WL_ERR(("Could not create iscan thread\n"));
+			WL_ERR("Could not create iscan thread\n");
 			iscan->tsk = NULL;
 			return -ENOMEM;
 		}
@@ -3123,7 +3117,7 @@
 		sema_init(&iscan->sync, 0);
 		iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
 		if (IS_ERR(iscan->tsk)) {
-			WL_ERR(("Could not create iscan thread\n"));
+			WL_ERR("Could not create iscan thread\n");
 			iscan->tsk = NULL;
 			return -ENOMEM;
 		}
@@ -3192,17 +3186,17 @@
 	s32 err = 0;
 
 	if (unlikely(!ndev)) {
-		WL_ERR(("ndev is invaild\n"));
+		WL_ERR("ndev is invalid\n");
 		return -ENODEV;
 	}
 	wl_cfg80211_dev = kzalloc(sizeof(struct wl_dev), GFP_KERNEL);
 	if (unlikely(!wl_cfg80211_dev)) {
-		WL_ERR(("wl_cfg80211_dev is invalid\n"));
+		WL_ERR("wl_cfg80211_dev is invalid\n");
 		return -ENOMEM;
 	}
-	WL_DBG(("func %p\n", wl_cfg80211_get_sdio_func()));
+	WL_DBG("func %p\n", wl_cfg80211_get_sdio_func());
 	wdev = wl_alloc_wdev(sizeof(struct wl_iface), &wl_cfg80211_get_sdio_func()->dev);
-	if (unlikely(IS_ERR(wdev)))
+	if (IS_ERR(wdev))
 		return -ENOMEM;
 
 	wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
@@ -3216,7 +3210,7 @@
 	wdev->netdev = ndev;
 	err = wl_init_priv(wl);
 	if (unlikely(err)) {
-		WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+		WL_ERR("Failed to init iwm_priv (%d)\n", err);
 		goto cfg80211_attach_out;
 	}
 	wl_set_drvdata(wl_cfg80211_dev, ci);
@@ -3261,19 +3255,19 @@
 			break;
 		e = wl_deq_event(wl);
 		if (unlikely(!e)) {
-			WL_ERR(("eqeue empty..\n"));
+			WL_ERR("event queue empty...\n");
 			BUG();
 		}
-		WL_DBG(("event type (%d)\n", e->etype));
+		WL_DBG("event type (%d)\n", e->etype);
 		if (wl->el.handler[e->etype]) {
 			wl->el.handler[e->etype] (wl, wl_to_ndev(wl), &e->emsg,
 						  e->edata);
 		} else {
-			WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+			WL_DBG("Unknown Event (%d): ignoring\n", e->etype);
 		}
 		wl_put_event(e);
 	}
-	WL_DBG(("%s was terminated\n", __func__));
+	WL_DBG("%s was terminated\n", __func__);
 	return 0;
 }
 
@@ -3286,7 +3280,7 @@
 	s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
 	    wl_dbg_estr[event_type] : (s8 *) "Unknown";
 #endif				/* (WL_DBG_LEVEL > 0) */
-	WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
+	WL_DBG("event_type (%d):" "WLC_E_" "%s\n", event_type, estr);
 	if (likely(!wl_enq_event(wl, event_type, e, data)))
 		wl_wakeup_event(wl);
 }
@@ -3341,7 +3335,7 @@
 
 	e = kzalloc(sizeof(struct wl_event_q), GFP_KERNEL);
 	if (unlikely(!e)) {
-		WL_ERR(("event alloc failed\n"));
+		WL_ERR("event alloc failed\n");
 		return -ENOMEM;
 	}
 
@@ -3385,8 +3379,8 @@
 	switch (iftype) {
 	case NL80211_IFTYPE_MONITOR:
 	case NL80211_IFTYPE_WDS:
-		WL_ERR(("type (%d) : currently we do not support this mode\n",
-			iftype));
+		WL_ERR("type (%d) : currently we do not support this mode\n",
+		       iftype);
 		err = -EINVAL;
 		return err;
 	case NL80211_IFTYPE_ADHOC:
@@ -3396,20 +3390,20 @@
 		break;
 	default:
 		err = -EINVAL;
-		WL_ERR(("invalid type (%d)\n", iftype));
+		WL_ERR("invalid type (%d)\n", iftype);
 		return err;
 	}
 	infra = htod32(infra);
 	ap = htod32(ap);
-	WL_DBG(("%s ap (%d), infra (%d)\n", ndev->name, ap, infra));
+	WL_DBG("%s ap (%d), infra (%d)\n", ndev->name, ap, infra);
 	err = wl_dev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+		WL_ERR("WLC_SET_INFRA error (%d)\n", err);
 		return err;
 	}
 	err = wl_dev_ioctl(ndev, WLC_SET_AP, &ap, sizeof(ap));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_AP error (%d)\n", err));
+		WL_ERR("WLC_SET_AP error (%d)\n", err);
 		return err;
 	}
 
@@ -3431,7 +3425,7 @@
 
 	err = wl_dev_ioctl(ndev, WLC_UP, &up, sizeof(up));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_UP error (%d)\n", err));
+		WL_ERR("WLC_UP error (%d)\n", err);
 	}
 	return err;
 }
@@ -3442,7 +3436,7 @@
 
 	err = wl_dev_ioctl(ndev, WLC_SET_PM, &power_mode, sizeof(power_mode));
 	if (unlikely(err)) {
-		WL_ERR(("WLC_SET_PM error (%d)\n", err));
+		WL_ERR("WLC_SET_PM error (%d)\n", err);
 	}
 	return err;
 }
@@ -3459,14 +3453,14 @@
 		    sizeof(iovbuf));
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 	if (unlikely(err)) {
-		WL_ERR(("txglomalign error (%d)\n", err));
+		WL_ERR("txglomalign error (%d)\n", err);
 		goto dongle_glom_out;
 	}
 	/* disable glom option per default */
 	bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 	if (unlikely(err)) {
-		WL_ERR(("txglom error (%d)\n", err));
+		WL_ERR("txglom error (%d)\n", err);
 		goto dongle_glom_out;
 	}
 dongle_glom_out:
@@ -3487,7 +3481,7 @@
 			    sizeof(iovbuf));
 		err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 		if (unlikely(err)) {
-			WL_ERR(("bcn_timeout error (%d)\n", err));
+			WL_ERR("bcn_timeout error (%d)\n", err);
 			goto dongle_rom_out;
 		}
 	}
@@ -3496,7 +3490,7 @@
 	bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 	if (unlikely(err)) {
-		WL_ERR(("roam_off error (%d)\n", err));
+		WL_ERR("roam_off error (%d)\n", err);
 		goto dongle_rom_out;
 	}
 dongle_rom_out:
@@ -3516,7 +3510,7 @@
 		    sizeof(iovbuf));
 	err = wl_dev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf));
 	if (unlikely(err)) {
-		WL_ERR(("Get event_msgs error (%d)\n", err));
+		WL_ERR("Get event_msgs error (%d)\n", err);
 		goto dongle_eventmsg_out;
 	}
 	memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
@@ -3544,7 +3538,7 @@
 		    sizeof(iovbuf));
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 	if (unlikely(err)) {
-		WL_ERR(("Set event_msgs error (%d)\n", err));
+		WL_ERR("Set event_msgs error (%d)\n", err);
 		goto dongle_eventmsg_out;
 	}
 
@@ -3562,9 +3556,9 @@
 			sizeof(scan_assoc_time));
 	if (err) {
 		if (err == -EOPNOTSUPP) {
-			WL_INFO(("Scan assoc time is not supported\n"));
+			WL_INFO("Scan assoc time is not supported\n");
 		} else {
-			WL_ERR(("Scan assoc time error (%d)\n", err));
+			WL_ERR("Scan assoc time error (%d)\n", err);
 		}
 		goto dongle_scantime_out;
 	}
@@ -3572,9 +3566,9 @@
 			sizeof(scan_unassoc_time));
 	if (err) {
 		if (err == -EOPNOTSUPP) {
-			WL_INFO(("Scan unassoc time is not supported\n"));
+			WL_INFO("Scan unassoc time is not supported\n");
 		} else {
-			WL_ERR(("Scan unassoc time error (%d)\n", err));
+			WL_ERR("Scan unassoc time error (%d)\n", err);
 		}
 		goto dongle_scantime_out;
 	}
@@ -3595,9 +3589,9 @@
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 	if (err) {
 		if (err == -EOPNOTSUPP)
-			WL_INFO(("arpoe is not supported\n"));
+			WL_INFO("arpoe is not supported\n");
 		else
-			WL_ERR(("arpoe error (%d)\n", err));
+			WL_ERR("arpoe error (%d)\n", err);
 
 		goto dongle_offload_out;
 	}
@@ -3605,9 +3599,9 @@
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 	if (err) {
 		if (err == -EOPNOTSUPP)
-			WL_INFO(("arp_ol is not supported\n"));
+			WL_INFO("arp_ol is not supported\n");
 		else
-			WL_ERR(("arp_ol error (%d)\n", err));
+			WL_ERR("arp_ol error (%d)\n", err);
 
 		goto dongle_offload_out;
 	}
@@ -3620,12 +3614,12 @@
 {
 	int i;
 	if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
-		WL_ERR(("Mask invalid format. Needs to start with 0x\n"));
+		WL_ERR("Mask invalid format. Needs to start with 0x\n");
 		return -1;
 	}
 	src = src + 2;		/* Skip past 0x */
 	if (strlen(src) % 2 != 0) {
-		WL_ERR(("Mask invalid format. Needs to be of even length\n"));
+		WL_ERR("Mask invalid format. Needs to be of even length\n");
 		return -1;
 	}
 	for (i = 0; *src != '\0'; i++) {
@@ -3684,7 +3678,7 @@
 					      mask_and_pattern[mask_size]));
 
 	if (mask_size != pattern_size) {
-		WL_ERR(("Mask and pattern not the same size\n"));
+		WL_ERR("Mask and pattern not the same size\n");
 		err = -EINVAL;
 		goto dongle_filter_out;
 	}
@@ -3704,9 +3698,9 @@
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, buf, buf_len);
 	if (err) {
 		if (err == -EOPNOTSUPP) {
-			WL_INFO(("filter not supported\n"));
+			WL_INFO("filter not supported\n");
 		} else {
-			WL_ERR(("filter (%d)\n", err));
+			WL_ERR("filter (%d)\n", err);
 		}
 		goto dongle_filter_out;
 	}
@@ -3717,9 +3711,9 @@
 	err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
 	if (err) {
 		if (err == -EOPNOTSUPP) {
-			WL_INFO(("filter_mode not supported\n"));
+			WL_INFO("filter_mode not supported\n");
 		} else {
-			WL_ERR(("filter_mode (%d)\n", err));
+			WL_ERR("filter_mode (%d)\n", err);
 		}
 		goto dongle_filter_out;
 	}
@@ -3800,12 +3794,12 @@
 	err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_PHYLIST, &phy_list,
 			sizeof(phy_list));
 	if (unlikely(err)) {
-		WL_ERR(("error (%d)\n", err));
+		WL_ERR("error (%d)\n", err);
 		return err;
 	}
 
 	phy = ((char *)&phy_list)[1];
-	WL_DBG(("%c phy\n", phy));
+	WL_DBG("%c phy\n", phy);
 	if (phy == 'n' || phy == 'a') {
 		wiphy = wl_to_wiphy(wl);
 		wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
@@ -3911,7 +3905,7 @@
 	case WL_PROF_SSID:
 		return &wl->profile->ssid;
 	}
-	WL_ERR(("invalid item (%d)\n", item));
+	WL_ERR("invalid item (%d)\n", item);
 	return NULL;
 }
 
@@ -3932,9 +3926,9 @@
 		break;
 	case WL_PROF_BSSID:
 		if (data)
-			memcpy(wl->profile->bssid, data, ETHER_ADDR_LEN);
+			memcpy(wl->profile->bssid, data, ETH_ALEN);
 		else
-			memset(wl->profile->bssid, 0, ETHER_ADDR_LEN);
+			memset(wl->profile->bssid, 0, ETH_ALEN);
 		break;
 	case WL_PROF_SEC:
 		memcpy(&wl->profile->sec, data, sizeof(wl->profile->sec));
@@ -3949,7 +3943,7 @@
 		wl->profile->dtim_period = *(u8 *)data;
 		break;
 	default:
-		WL_ERR(("unsupported item (%d)\n", item));
+		WL_ERR("unsupported item (%d)\n", item);
 		err = -EOPNOTSUPP;
 		break;
 	}
@@ -3991,7 +3985,7 @@
 	s32 err = 0;
 
 	if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
-		WL_ERR(("ei crosses buffer boundary\n"));
+		WL_ERR("ei crosses buffer boundary\n");
 		return -ENOSPC;
 	}
 	ie->buf[ie->offset] = t;
@@ -4008,7 +4002,7 @@
 	s32 err = 0;
 
 	if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
-		WL_ERR(("ei_stream crosses buffer boundary\n"));
+		WL_ERR("ei_stream crosses buffer boundary\n");
 		return -ENOSPC;
 	}
 	memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
@@ -4023,7 +4017,7 @@
 	s32 err = 0;
 
 	if (unlikely(ie->offset > dst_size)) {
-		WL_ERR(("dst_size is not enough\n"));
+		WL_ERR("dst_size is not enough\n");
 		return -ENOSPC;
 	}
 	memcpy(dst, &ie->buf[0], ie->offset);
@@ -4123,37 +4117,37 @@
 	const struct firmware *fw_entry = NULL;
 	s32 err = 0;
 
-	WL_DBG(("file name : \"%s\"\n", file_name));
+	WL_DBG("file name : \"%s\"\n", file_name);
 	wl = WL_PRIV_GET();
 
 	if (!test_bit(WL_FW_LOADING_DONE, &wl->fw->status)) {
 		err = request_firmware(&wl->fw->fw_entry, file_name,
 				&wl_cfg80211_get_sdio_func()->dev);
 		if (unlikely(err)) {
-			WL_ERR(("Could not download fw (%d)\n", err));
+			WL_ERR("Could not download fw (%d)\n", err);
 			goto req_fw_out;
 		}
 		set_bit(WL_FW_LOADING_DONE, &wl->fw->status);
 		fw_entry = wl->fw->fw_entry;
 		if (fw_entry) {
-			WL_DBG(("fw size (%zd), data (%p)\n", fw_entry->size,
-				fw_entry->data));
+			WL_DBG("fw size (%zd), data (%p)\n",
+			       fw_entry->size, fw_entry->data);
 		}
 	} else if (!test_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status)) {
 		err = request_firmware(&wl->fw->fw_entry, file_name,
 				&wl_cfg80211_get_sdio_func()->dev);
 		if (unlikely(err)) {
-			WL_ERR(("Could not download nvram (%d)\n", err));
+			WL_ERR("Could not download nvram (%d)\n", err);
 			goto req_fw_out;
 		}
 		set_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status);
 		fw_entry = wl->fw->fw_entry;
 		if (fw_entry) {
-			WL_DBG(("nvram size (%zd), data (%p)\n", fw_entry->size,
-				fw_entry->data));
+			WL_DBG("nvram size (%zd), data (%p)\n",
+			       fw_entry->size, fw_entry->data);
 		}
 	} else {
-		WL_DBG(("Downloading already done. Nothing to do more\n"));
+		WL_DBG("Downloading already done. Nothing to do more\n");
 		err = -EPERM;
 	}
 
@@ -4189,10 +4183,10 @@
 
 	err = wl_dev_intvar_set(ndev, "mpc", mpc);
 	if (unlikely(err)) {
-		WL_ERR(("fail to set mpc\n"));
+		WL_ERR("fail to set mpc\n");
 		return;
 	}
-	WL_DBG(("MPC : %d\n", mpc));
+	WL_DBG("MPC : %d\n", mpc);
 }
 
 static int wl_debugfs_add_netdev_params(struct wl_priv *wl)
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
index 770e63f..482691b 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
@@ -54,34 +54,36 @@
 
 #define WL_DBG_LEVEL 1		/* 0 invalidates all debug messages.
 				 default is 1 */
-#define	WL_ERR(args)									\
-do {										\
-	if (wl_dbg_level & WL_DBG_ERR) {				\
-		if (net_ratelimit()) {						\
-			printk(KERN_ERR "ERROR @%s : ", __func__);	\
-			printk args;						\
-		} 								\
-	}									\
+#define	WL_ERR(fmt, args...)					\
+do {								\
+	if (wl_dbg_level & WL_DBG_ERR) {			\
+		if (net_ratelimit()) {				\
+			printk(KERN_ERR "ERROR @%s : " fmt,	\
+			       __func__, ##args);		\
+		}						\
+	}							\
 } while (0)
-#define	WL_INFO(args)									\
-do {										\
-	if (wl_dbg_level & WL_DBG_INFO) {				\
-		if (net_ratelimit()) {						\
-			printk(KERN_ERR "INFO @%s : ", __func__);	\
-			printk args;						\
-		}								\
-	}									\
+
+#define	WL_INFO(fmt, args...)					\
+do {								\
+	if (wl_dbg_level & WL_DBG_INFO) {			\
+		if (net_ratelimit()) {				\
+			printk(KERN_ERR "INFO @%s : " fmt,	\
+			       __func__, ##args);		\
+		}						\
+	}							\
 } while (0)
+
 #if (WL_DBG_LEVEL > 0)
-#define	WL_DBG(args)								\
-do {									\
+#define	WL_DBG(fmt, args...)					\
+do {								\
 	if (wl_dbg_level & WL_DBG_DBG) {			\
-		printk(KERN_ERR "DEBUG @%s :", __func__);	\
-		printk args;							\
-	}									\
+		printk(KERN_ERR "DEBUG @%s :" fmt,		\
+		       __func__, ##args);			\
+	}							\
 } while (0)
 #else				/* !(WL_DBG_LEVEL > 0) */
-#define	WL_DBG(args)
+#define	WL_DBG(fmt, args...) noprintk(fmt, ##args)
 #endif				/* (WL_DBG_LEVEL > 0) */
 
 #define WL_SCAN_RETRY_MAX	3	/* used for ibss scan */
@@ -237,7 +239,7 @@
 struct wl_profile {
 	u32 mode;
 	struct wlc_ssid ssid;
-	u8 bssid[ETHER_ADDR_LEN];
+	u8 bssid[ETH_ALEN];
 	u16 beacon_interval;
 	u8 dtim_period;
 	struct wl_security sec;
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
index 979a494..db6e68e 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
@@ -15,8 +15,9 @@
  */
 
 #include <linux/kthread.h>
+#include <linux/semaphore.h>
 #include <bcmdefs.h>
-#include <linuxver.h>
+#include <linux/netdevice.h>
 #include <osl.h>
 #include <wlioctl.h>
 
@@ -31,20 +32,18 @@
 #include <dhd.h>
 #include <dhdioctl.h>
 
-typedef void wlc_info_t;
-typedef void wl_info_t;
 typedef const struct si_pub si_t;
 #include <wlioctl.h>
 
 #include <proto/ethernet.h>
 #include <dngl_stats.h>
 #include <dhd.h>
-#define WL_ERROR(x) printf x
-#define WL_TRACE(x)
-#define WL_ASSOC(x)
-#define WL_INFORM(x)
-#define WL_WSEC(x)
-#define WL_SCAN(x)
+
+#define WL_ERROR(fmt, args...)	printk(fmt, ##args)
+#define WL_TRACE(fmt, args...)	no_printk(fmt, ##args)
+#define WL_INFORM(fmt, args...)	no_printk(fmt, ##args)
+#define WL_WSEC(fmt, args...)	no_printk(fmt, ##args)
+#define WL_SCAN(fmt, args...)	no_printk(fmt, ##args)
 
 #include <wl_iw.h>
 
@@ -187,12 +186,12 @@
 	int ret = -EINVAL;
 
 	if (!dev) {
-		WL_ERROR(("%s: dev is null\n", __func__));
+		WL_ERROR("%s: dev is null\n", __func__);
 		return ret;
 	}
 
-	WL_INFORM(("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, "
-		"len:%d ,\n", __func__, current->pid, cmd, arg, len));
+	WL_INFORM("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d\n",
+		  __func__, current->pid, cmd, arg, len);
 
 	if (g_onoff == G_WLAN_SET_ON) {
 		memset(&ioc, 0, sizeof(ioc));
@@ -205,7 +204,7 @@
 
 		ret = dev_open(dev);
 		if (ret) {
-			WL_ERROR(("%s: Error dev_open: %d\n", __func__, ret));
+			WL_ERROR("%s: Error dev_open: %d\n", __func__, ret);
 			return ret;
 		}
 
@@ -214,7 +213,7 @@
 		ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
 		set_fs(fs);
 	} else {
-		WL_TRACE(("%s: call after driver stop : ignored\n", __func__));
+		WL_TRACE("%s: call after driver stop : ignored\n", __func__);
 	}
 	return ret;
 }
@@ -335,7 +334,7 @@
 	int error;
 	struct sockaddr bssid;
 
-	WL_TRACE(("%s: SIOCSIWCOMMIT\n", dev->name));
+	WL_TRACE("%s: SIOCSIWCOMMIT\n", dev->name);
 
 	error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
 	if (error)
@@ -346,11 +345,11 @@
 	if (!ssid.SSID_len)
 		return 0;
 
-	bzero(&bssid, sizeof(struct sockaddr));
-	error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN);
+	memset(&bssid, 0, sizeof(struct sockaddr));
+	error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETH_ALEN);
 	if (error) {
-		WL_ERROR(("%s: WLC_REASSOC to %s failed \n", __func__,
-			  ssid.SSID));
+		WL_ERROR("%s: WLC_REASSOC to %s failed\n",
+			 __func__, ssid.SSID);
 		return error;
 	}
 
@@ -361,7 +360,7 @@
 wl_iw_get_name(struct net_device *dev,
 	       struct iw_request_info *info, char *cwrq, char *extra)
 {
-	WL_TRACE(("%s: SIOCGIWNAME\n", dev->name));
+	WL_TRACE("%s: SIOCGIWNAME\n", dev->name);
 
 	strcpy(cwrq, "IEEE 802.11-DS");
 
@@ -375,7 +374,7 @@
 	int error, chan;
 	uint sf = 0;
 
-	WL_TRACE(("\n %s %s: SIOCSIWFREQ\n", __func__, dev->name));
+	WL_TRACE("\n %s %s: SIOCSIWFREQ\n", __func__, dev->name);
 
 	if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
 		chan = fwrq->m;
@@ -410,7 +409,7 @@
 	channel_info_t ci;
 	int error;
 
-	WL_TRACE(("%s: SIOCGIWFREQ\n", dev->name));
+	WL_TRACE("%s: SIOCGIWFREQ\n", dev->name);
 
 	error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci));
 	if (error)
@@ -427,7 +426,7 @@
 {
 	int infra = 0, ap = 0, error = 0;
 
-	WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
+	WL_TRACE("%s: SIOCSIWMODE\n", dev->name);
 
 	switch (*uwrq) {
 	case IW_MODE_MASTER:
@@ -462,7 +461,7 @@
 {
 	int error, infra = 0, ap = 0;
 
-	WL_TRACE(("%s: SIOCGIWMODE\n", dev->name));
+	WL_TRACE("%s: SIOCGIWMODE\n", dev->name);
 
 	error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra));
 	if (error)
@@ -501,14 +500,14 @@
 	{30, 60, 90, 120, 180, 240, 270, 300}
 	};
 
-	WL_TRACE(("%s: SIOCGIWRANGE\n", dev->name));
+	WL_TRACE("%s: SIOCGIWRANGE\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
 
 	channels = kmalloc((MAXCHANNEL + 1) * 4, GFP_KERNEL);
 	if (!channels) {
-		WL_ERROR(("Could not alloc channels\n"));
+		WL_ERROR("Could not alloc channels\n");
 		return -ENOMEM;
 	}
 	list = (wl_u32_list_t *) channels;
@@ -684,14 +683,14 @@
 	struct sockaddr *addr = (struct sockaddr *)extra;
 	int i;
 
-	WL_TRACE(("%s: SIOCSIWSPY\n", dev->name));
+	WL_TRACE("%s: SIOCSIWSPY\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
 
 	iw->spy_num = min_t(int, ARRAY_SIZE(iw->spy_addr), dwrq->length);
 	for (i = 0; i < iw->spy_num; i++)
-		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETHER_ADDR_LEN);
+		memcpy(&iw->spy_addr[i], addr[i].sa_data, ETH_ALEN);
 	memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
 
 	return 0;
@@ -706,14 +705,14 @@
 	struct iw_quality *qual = (struct iw_quality *)&addr[iw->spy_num];
 	int i;
 
-	WL_TRACE(("%s: SIOCGIWSPY\n", dev->name));
+	WL_TRACE("%s: SIOCGIWSPY\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
 
 	dwrq->length = iw->spy_num;
 	for (i = 0; i < iw->spy_num; i++) {
-		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETHER_ADDR_LEN);
+		memcpy(addr[i].sa_data, &iw->spy_addr[i], ETH_ALEN);
 		addr[i].sa_family = AF_UNIX;
 		memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
 		iw->spy_qual[i].updated = 0;
@@ -751,8 +750,8 @@
 		join_params->params.chanspec_num =
 		    htod32(join_params->params.chanspec_num);
 
-		WL_TRACE(("%s  join_params->params.chanspec_list[0]= %X\n",
-			  __func__, join_params->params.chanspec_list[0]));
+		WL_TRACE("%s  join_params->params.chanspec_list[0]= %X\n",
+			 __func__, join_params->params.chanspec_list[0]);
 	}
 	return 1;
 }
@@ -765,16 +764,17 @@
 	wl_join_params_t join_params;
 	int join_params_size;
 
-	WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
+	WL_TRACE("%s: SIOCSIWAP\n", dev->name);
 
 	if (awrq->sa_family != ARPHRD_ETHER) {
-		WL_ERROR(("Invalid Header...sa_family\n"));
+		WL_ERROR("Invalid Header...sa_family\n");
 		return -EINVAL;
 	}
 
-	if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
+	if (is_broadcast_ether_addr(awrq->sa_data) ||
+	    is_zero_ether_addr(awrq->sa_data)) {
 		scb_val_t scbval;
-		bzero(&scbval, sizeof(scb_val_t));
+		memset(&scbval, 0, sizeof(scb_val_t));
 		(void)dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval,
 				    sizeof(scb_val_t));
 		return 0;
@@ -785,23 +785,23 @@
 
 	memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
 	join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
-	memcpy(&join_params.params.bssid, awrq->sa_data, ETHER_ADDR_LEN);
+	memcpy(&join_params.params.bssid, awrq->sa_data, ETH_ALEN);
 
-	WL_TRACE(("%s  target_channel=%d\n", __func__,
-		  g_wl_iw_params.target_channel));
+	WL_TRACE("%s  target_channel=%d\n",
+		 __func__, g_wl_iw_params.target_channel);
 	wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params,
 			     &join_params_size);
 
 	error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params,
 				join_params_size);
 	if (error) {
-		WL_ERROR(("%s Invalid ioctl data=%d\n", __func__, error));
+		WL_ERROR("%s Invalid ioctl data=%d\n", __func__, error);
 	}
 
 	if (g_ssid.SSID_len) {
-		WL_TRACE(("%s: join SSID=%s BSSID=%pM ch=%d\n",
-			  __func__, g_ssid.SSID, awrq->sa_data,
-			  g_wl_iw_params.target_channel));
+		WL_TRACE("%s: join SSID=%s BSSID=%pM ch=%d\n",
+			 __func__, g_ssid.SSID, awrq->sa_data,
+			 g_wl_iw_params.target_channel);
 	}
 
 	memset(&g_ssid, 0, sizeof(g_ssid));
@@ -812,12 +812,12 @@
 wl_iw_get_wap(struct net_device *dev,
 	      struct iw_request_info *info, struct sockaddr *awrq, char *extra)
 {
-	WL_TRACE(("%s: SIOCGIWAP\n", dev->name));
+	WL_TRACE("%s: SIOCGIWAP\n", dev->name);
 
 	awrq->sa_family = ARPHRD_ETHER;
-	memset(awrq->sa_data, 0, ETHER_ADDR_LEN);
+	memset(awrq->sa_data, 0, ETH_ALEN);
 
-	(void)dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETHER_ADDR_LEN);
+	(void)dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETH_ALEN);
 
 	return 0;
 }
@@ -831,16 +831,16 @@
 	scb_val_t scbval;
 	int error = -EINVAL;
 
-	WL_TRACE(("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name));
+	WL_TRACE("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name);
 
 	mlme = (struct iw_mlme *)extra;
 	if (mlme == NULL) {
-		WL_ERROR(("Invalid ioctl data.\n"));
+		WL_ERROR("Invalid ioctl data\n");
 		return error;
 	}
 
 	scbval.val = mlme->reason_code;
-	bcopy(&mlme->addr.sa_data, &scbval.ea, ETHER_ADDR_LEN);
+	bcopy(&mlme->addr.sa_data, &scbval.ea, ETH_ALEN);
 
 	if (mlme->cmd == IW_MLME_DISASSOC) {
 		scbval.val = htod32(scbval.val);
@@ -853,7 +853,7 @@
 		    dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON,
 				  &scbval, sizeof(scb_val_t));
 	} else {
-		WL_ERROR(("Invalid ioctl data.\n"));
+		WL_ERROR("Invalid ioctl data\n");
 		return error;
 	}
 
@@ -874,7 +874,7 @@
 	int error, i;
 	uint buflen = dwrq->length;
 
-	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+	WL_TRACE("%s: SIOCGIWAPLIST\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
@@ -886,7 +886,7 @@
 	list->buflen = htod32(buflen);
 	error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen);
 	if (error) {
-		WL_ERROR(("%d: Scan results error %d\n", __LINE__, error));
+		WL_ERROR("%d: Scan results error %d\n", __LINE__, error);
 		kfree(list);
 		return error;
 	}
@@ -894,8 +894,8 @@
 	list->version = dtoh32(list->version);
 	list->count = dtoh32(list->count);
 	if (list->version != WL_BSS_INFO_VERSION) {
-		WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
-			  __func__, list->version));
+		WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+			 __func__, list->version);
 		kfree(list);
 		return -EINVAL;
 	}
@@ -911,7 +911,7 @@
 		if (!(dtoh16(bi->capability) & DOT11_CAP_ESS))
 			continue;
 
-		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETH_ALEN);
 		addr[dwrq->length].sa_family = ARPHRD_ETHER;
 		qual[dwrq->length].qual = rssi_to_qual(dtoh16(bi->RSSI));
 		qual[dwrq->length].level = 0x100 + dtoh16(bi->RSSI);
@@ -952,13 +952,13 @@
 	wl_bss_info_t *bi = NULL;
 	int i;
 
-	WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
+	WL_TRACE("%s: SIOCGIWAPLIST\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
 
 	if ((!iscan) || (!iscan->sysioc_tsk)) {
-		WL_ERROR(("%s error\n", __func__));
+		WL_ERROR("%s error\n", __func__);
 		return 0;
 	}
 
@@ -966,9 +966,8 @@
 	while (buf) {
 		list = &((wl_iscan_results_t *) buf->iscan_buf)->results;
 		if (list->version != WL_BSS_INFO_VERSION) {
-			WL_ERROR(("%s : list->version %d != "
-				"WL_BSS_INFO_VERSION\n",
-				__func__, list->version));
+			WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+				 __func__, list->version);
 			return -EINVAL;
 		}
 
@@ -985,7 +984,7 @@
 				continue;
 
 			memcpy(addr[dwrq->length].sa_data, &bi->BSSID,
-			       ETHER_ADDR_LEN);
+			       ETH_ALEN);
 			addr[dwrq->length].sa_family = ARPHRD_ETHER;
 			qual[dwrq->length].qual =
 			    rssi_to_qual(dtoh16(bi->RSSI));
@@ -1016,7 +1015,7 @@
 {
 	int err = 0;
 
-	memcpy(&params->bssid, &ether_bcast, ETHER_ADDR_LEN);
+	memcpy(&params->bssid, &ether_bcast, ETH_ALEN);
 	params->bss_type = DOT11_BSSTYPE_ANY;
 	params->scan_type = 0;
 	params->nprobes = -1;
@@ -1043,15 +1042,15 @@
 	iscan->iscan_ex_params_p->action = htod16(action);
 	iscan->iscan_ex_params_p->scan_duration = htod16(0);
 
-	WL_SCAN(("%s : nprobes=%d\n", __func__,
-		 iscan->iscan_ex_params_p->params.nprobes));
-	WL_SCAN(("active_time=%d\n",
-		 iscan->iscan_ex_params_p->params.active_time));
-	WL_SCAN(("passive_time=%d\n",
-		 iscan->iscan_ex_params_p->params.passive_time));
-	WL_SCAN(("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time));
-	WL_SCAN(("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type));
-	WL_SCAN(("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type));
+	WL_SCAN("%s : nprobes=%d\n",
+		__func__, iscan->iscan_ex_params_p->params.nprobes);
+	WL_SCAN("active_time=%d\n",
+		 iscan->iscan_ex_params_p->params.active_time);
+	WL_SCAN("passive_time=%d\n",
+		 iscan->iscan_ex_params_p->params.passive_time);
+	WL_SCAN("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time);
+	WL_SCAN("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type);
+	WL_SCAN("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type);
 
 	(void)dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p,
 				  iscan->iscan_ex_param_size, iscan->ioctlbuf,
@@ -1066,7 +1065,7 @@
 	if (iscan) {
 		iscan->timer_on = 0;
 		if (iscan->iscan_state != ISCAN_STATE_IDLE) {
-			WL_TRACE(("timer trigger\n"));
+			WL_TRACE("timer trigger\n");
 			up(&iscan->sysioc_sem);
 		}
 	}
@@ -1101,8 +1100,8 @@
 	} else {
 		buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
 		if (!buf) {
-			WL_ERROR(("%s can't alloc iscan_buf_t : going to abort "
-				"currect iscan\n", __func__));
+			WL_ERROR("%s can't alloc iscan_buf_t : going to abort current iscan\n",
+				 __func__);
 			MUTEX_UNLOCK_WL_SCAN_SET();
 			return WL_SCAN_RESULTS_NO_MEM;
 		}
@@ -1135,11 +1134,11 @@
 		results->buflen = dtoh32(results->buflen);
 		results->version = dtoh32(results->version);
 		results->count = dtoh32(results->count);
-		WL_TRACE(("results->count = %d\n", results->count));
-		WL_TRACE(("results->buflen = %d\n", results->buflen));
+		WL_TRACE("results->count = %d\n", results->count);
+		WL_TRACE("results->buflen = %d\n", results->buflen);
 		status = dtoh32(list_buf->status);
 	} else {
-		WL_ERROR(("%s returns error %d\n", __func__, res));
+		WL_ERROR("%s returns error %d\n", __func__, res);
 		status = WL_SCAN_RESULTS_NO_MEM;
 	}
 	MUTEX_UNLOCK_WL_SCAN_SET();
@@ -1148,8 +1147,8 @@
 
 static void wl_iw_force_specific_scan(iscan_info_t *iscan)
 {
-	WL_TRACE(("%s force Specific SCAN for %s\n", __func__,
-		  g_specific_ssid.SSID));
+	WL_TRACE("%s force Specific SCAN for %s\n",
+		 __func__, g_specific_ssid.SSID);
 	rtnl_lock();
 
 	(void)dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid,
@@ -1166,7 +1165,7 @@
 	memset(&wrqu, 0, sizeof(wrqu));
 
 	wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
-	WL_TRACE(("Send Event ISCAN complete\n"));
+	WL_TRACE("Send Event ISCAN complete\n");
 #endif
 }
 
@@ -1190,8 +1189,8 @@
 		status = wl_iw_iscan_get(iscan);
 		rtnl_unlock();
 		if (g_scan_specified_ssid && (iscan_pass_abort == true)) {
-			WL_TRACE(("%s Get results from specific scan "
-				"status = %d\n", __func__, status));
+			WL_TRACE("%s Get results from specific scan status = %d\n",
+				 __func__, status);
 			wl_iw_send_scan_complete(iscan);
 			iscan_pass_abort = false;
 			status = -1;
@@ -1199,7 +1198,7 @@
 
 		switch (status) {
 		case WL_SCAN_RESULTS_PARTIAL:
-			WL_TRACE(("iscanresults incomplete\n"));
+			WL_TRACE("iscanresults incomplete\n");
 			rtnl_lock();
 			wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
 			rtnl_unlock();
@@ -1208,18 +1207,18 @@
 			iscan->timer_on = 1;
 			break;
 		case WL_SCAN_RESULTS_SUCCESS:
-			WL_TRACE(("iscanresults complete\n"));
+			WL_TRACE("iscanresults complete\n");
 			iscan->iscan_state = ISCAN_STATE_IDLE;
 			wl_iw_send_scan_complete(iscan);
 			break;
 		case WL_SCAN_RESULTS_PENDING:
-			WL_TRACE(("iscanresults pending\n"));
+			WL_TRACE("iscanresults pending\n");
 			mod_timer(&iscan->timer,
 				  jiffies + iscan->timer_ms * HZ / 1000);
 			iscan->timer_on = 1;
 			break;
 		case WL_SCAN_RESULTS_ABORTED:
-			WL_TRACE(("iscanresults aborted\n"));
+			WL_TRACE("iscanresults aborted\n");
 			iscan->iscan_state = ISCAN_STATE_IDLE;
 			if (g_scan_specified_ssid == 0)
 				wl_iw_send_scan_complete(iscan);
@@ -1229,12 +1228,12 @@
 			}
 			break;
 		case WL_SCAN_RESULTS_NO_MEM:
-			WL_TRACE(("iscanresults can't alloc memory: skip\n"));
+			WL_TRACE("iscanresults can't alloc memory: skip\n");
 			iscan->iscan_state = ISCAN_STATE_IDLE;
 			break;
 		default:
-			WL_TRACE(("iscanresults returned unknown status %d\n",
-				  status));
+			WL_TRACE("iscanresults returned unknown status %d\n",
+				 status);
 			break;
 		}
 	}
@@ -1253,11 +1252,11 @@
 	       union iwreq_data *wrqu, char *extra)
 {
 	int error;
-	WL_TRACE(("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __func__, dev->name));
+	WL_TRACE("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __func__, dev->name);
 
 	g_set_essid_before_scan = false;
 #if defined(CSCAN)
-	WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __func__));
+	WL_ERROR("%s: Scan from SIOCGIWSCAN not supported\n", __func__);
 	return -EINVAL;
 #endif
 
@@ -1274,9 +1273,8 @@
 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
 			struct iw_scan_req *req = (struct iw_scan_req *)extra;
 			if (g_scan_specified_ssid) {
-				WL_TRACE(("%s Specific SCAN is not done ignore "
-					"scan for = %s\n",
-					__func__, req->essid));
+				WL_TRACE("%s Specific SCAN is not done ignore scan for = %s\n",
+					 __func__, req->essid);
 				return -EBUSY;
 			} else {
 				g_specific_ssid.SSID_len = min_t(size_t,
@@ -1287,9 +1285,9 @@
 				g_specific_ssid.SSID_len =
 				    htod32(g_specific_ssid.SSID_len);
 				g_scan_specified_ssid = 1;
-				WL_TRACE(("### Specific scan ssid=%s len=%d\n",
-					  g_specific_ssid.SSID,
-					  g_specific_ssid.SSID_len));
+				WL_TRACE("### Specific scan ssid=%s len=%d\n",
+					 g_specific_ssid.SSID,
+					 g_specific_ssid.SSID_len);
 			}
 		}
 	}
@@ -1297,8 +1295,8 @@
 	error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid,
 				sizeof(g_specific_ssid));
 	if (error) {
-		WL_TRACE(("#### Set SCAN for %s failed with %d\n",
-			  g_specific_ssid.SSID, error));
+		WL_TRACE("#### Set SCAN for %s failed with %d\n",
+			 g_specific_ssid.SSID, error);
 		g_scan_specified_ssid = 0;
 		return -EBUSY;
 	}
@@ -1317,7 +1315,7 @@
 
 	wl_iw_set_event_mask(dev);
 
-	WL_TRACE(("+++: Set Broadcast ISCAN\n"));
+	WL_TRACE("+++: Set Broadcast ISCAN\n");
 	memset(&ssid, 0, sizeof(ssid));
 
 	iscan->list_cur = iscan->list_hdr;
@@ -1346,20 +1344,20 @@
 	wlc_ssid_t ssid;
 	iscan_info_t *iscan = g_iscan;
 
-	WL_TRACE(("%s: SIOCSIWSCAN : ISCAN\n", dev->name));
+	WL_TRACE("%s: SIOCSIWSCAN : ISCAN\n", dev->name);
 
 #if defined(CSCAN)
-	WL_ERROR(("%s: Scan from SIOCGIWSCAN not supported\n", __func__));
+	WL_ERROR("%s: Scan from SIOCGIWSCAN not supported\n", __func__);
 	return -EINVAL;
 #endif
 
 	if (g_onoff == G_WLAN_SET_OFF) {
-		WL_TRACE(("%s: driver is not up yet after START\n", __func__));
+		WL_TRACE("%s: driver is not up yet after START\n", __func__);
 		return 0;
 	}
 #ifdef PNO_SUPPORT
 	if (dhd_dev_get_pno_status(dev)) {
-		WL_ERROR(("%s: Scan called when PNO is active\n", __func__));
+		WL_ERROR("%s: Scan called when PNO is active\n", __func__);
 	}
 #endif
 
@@ -1367,8 +1365,8 @@
 		return wl_iw_set_scan(dev, info, wrqu, extra);
 
 	if (g_scan_specified_ssid) {
-		WL_TRACE(("%s Specific SCAN already running ignoring BC scan\n",
-			  __func__));
+		WL_TRACE("%s Specific SCAN already running ignoring BC scan\n",
+			 __func__);
 		return EBUSY;
 	}
 
@@ -1386,8 +1384,8 @@
 			g_scan_specified_ssid = 0;
 
 			if (iscan->iscan_state == ISCAN_STATE_SCANING) {
-				WL_TRACE(("%s ISCAN already in progress \n",
-					  __func__));
+				WL_TRACE("%s ISCAN already in progress\n",
+					 __func__);
 				return 0;
 			}
 		}
@@ -1406,7 +1404,7 @@
 	u8 *ie = *wpaie;
 
 	if ((ie[1] >= 6) &&
-	    !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
+	    !memcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
 		return true;
 	}
 
@@ -1422,7 +1420,7 @@
 	u8 *ie = *wpsie;
 
 	if ((ie[1] >= 4) &&
-	    !bcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
+	    !memcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
 		return true;
 	}
 
@@ -1501,9 +1499,8 @@
 
 	for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
 		if (list->version != WL_BSS_INFO_VERSION) {
-			WL_ERROR(("%s : list->version %d != "
-				"WL_BSS_INFO_VERSION\n",
-				__func__, list->version));
+			WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+				 __func__, list->version);
 			return ret;
 		}
 
@@ -1511,11 +1508,11 @@
 					     dtoh32(bi->length)) : list->
 		    bss_info;
 
-		WL_TRACE(("%s : %s\n", __func__, bi->SSID));
+		WL_TRACE("%s : %s\n", __func__, bi->SSID);
 
 		iwe.cmd = SIOCGIWAP;
 		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+		memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETH_ALEN);
 		event =
 		    IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
 					 IW_EV_ADDR_LEN);
@@ -1590,11 +1587,11 @@
 
 	ret = event - extra;
 	if (ret < 0) {
-		WL_ERROR(("==> Wrong size\n"));
+		WL_ERROR("==> Wrong size\n");
 		ret = 0;
 	}
-	WL_TRACE(("%s: size=%d bytes prepared\n", __func__,
-		  (unsigned int)(event - extra)));
+	WL_TRACE("%s: size=%d bytes prepared\n",
+		 __func__, (unsigned int)(event - extra));
 	return (uint)ret;
 }
 
@@ -1614,10 +1611,10 @@
 	iscan_buf_t *p_buf;
 #endif
 
-	WL_TRACE(("%s: buflen_from_user %d: \n", dev->name, buflen_from_user));
+	WL_TRACE("%s: buflen_from_user %d:\n", dev->name, buflen_from_user);
 
 	if (!extra) {
-		WL_TRACE(("%s: wl_iw_get_scan return -EINVAL\n", dev->name));
+		WL_TRACE("%s: wl_iw_get_scan return -EINVAL\n", dev->name);
 		return -EINVAL;
 	}
 
@@ -1631,8 +1628,8 @@
 	if (g_scan_specified_ssid) {
 		list = kmalloc(len, GFP_KERNEL);
 		if (!list) {
-			WL_TRACE(("%s: wl_iw_get_scan return -ENOMEM\n",
-				  dev->name));
+			WL_TRACE("%s: wl_iw_get_scan return -ENOMEM\n",
+				 dev->name);
 			g_scan_specified_ssid = 0;
 			return -ENOMEM;
 		}
@@ -1642,8 +1639,8 @@
 	list->buflen = htod32(len);
 	error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len);
 	if (error) {
-		WL_ERROR(("%s: %s : Scan_results ERROR %d\n", dev->name,
-			  __func__, error));
+		WL_ERROR("%s: %s : Scan_results ERROR %d\n",
+			 dev->name, __func__, error);
 		dwrq->length = len;
 		if (g_scan_specified_ssid) {
 			g_scan_specified_ssid = 0;
@@ -1656,8 +1653,8 @@
 	list->count = dtoh32(list->count);
 
 	if (list->version != WL_BSS_INFO_VERSION) {
-		WL_ERROR(("%s : list->version %d != WL_BSS_INFO_VERSION\n",
-			  __func__, list->version));
+		WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+			 __func__, list->version);
 		if (g_scan_specified_ssid) {
 			g_scan_specified_ssid = 0;
 			kfree(list);
@@ -1666,8 +1663,8 @@
 	}
 
 	if (g_scan_specified_ssid) {
-		WL_TRACE(("%s: Specified scan APs in the list =%d\n",
-			  __func__, list->count));
+		WL_TRACE("%s: Specified scan APs in the list =%d\n",
+			 __func__, list->count);
 		len_ret =
 		    (__u16) wl_iw_get_scan_prep(list, info, extra,
 						buflen_from_user);
@@ -1678,8 +1675,8 @@
 		while (p_buf != iscan->list_cur) {
 			list_merge =
 			    &((wl_iscan_results_t *) p_buf->iscan_buf)->results;
-			WL_TRACE(("%s: Bcast APs list=%d\n", __func__,
-				  list_merge->count));
+			WL_TRACE("%s: Bcast APs list=%d\n",
+				 __func__, list_merge->count);
 			if (list_merge->count > 0)
 				len_ret +=
 				    (__u16) wl_iw_get_scan_prep(list_merge,
@@ -1689,8 +1686,8 @@
 		}
 #else
 		list_merge = (wl_scan_results_t *) g_scan;
-		WL_TRACE(("%s: Bcast APs list=%d\n", __func__,
-			  list_merge->count));
+		WL_TRACE("%s: Bcast APs list=%d\n",
+			 __func__, list_merge->count);
 		if (list_merge->count > 0)
 			len_ret +=
 			    (__u16) wl_iw_get_scan_prep(list_merge, info,
@@ -1714,8 +1711,8 @@
 	dwrq->length = len;
 	dwrq->flags = 0;
 
-	WL_TRACE(("%s return to WE %d bytes APs=%d\n", __func__,
-		  dwrq->length, list->count));
+	WL_TRACE("%s return to WE %d bytes APs=%d\n",
+		 __func__, dwrq->length, list->count);
 	return 0;
 }
 
@@ -1736,26 +1733,26 @@
 	u32 counter = 0;
 	u8 channel;
 
-	WL_TRACE(("%s %s buflen_from_user %d:\n", dev->name, __func__,
-		  dwrq->length));
+	WL_TRACE("%s %s buflen_from_user %d:\n",
+		 dev->name, __func__, dwrq->length);
 
 	if (!extra) {
-		WL_TRACE(("%s: INVALID SIOCGIWSCAN GET bad parameter\n",
-			  dev->name));
+		WL_TRACE("%s: INVALID SIOCGIWSCAN GET bad parameter\n",
+			 dev->name);
 		return -EINVAL;
 	}
 
 	if ((!iscan) || (!iscan->sysioc_tsk)) {
-		WL_ERROR(("%ssysioc_tsk\n", __func__));
+		WL_ERROR("%ssysioc_tsk\n", __func__);
 		return wl_iw_get_scan(dev, info, dwrq, extra);
 	}
 
 	if (iscan->iscan_state == ISCAN_STATE_SCANING) {
-		WL_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name));
+		WL_TRACE("%s: SIOCGIWSCAN GET still scanning\n", dev->name);
 		return -EAGAIN;
 	}
 
-	WL_TRACE(("%s: SIOCGIWSCAN GET broadcast results\n", dev->name));
+	WL_TRACE("%s: SIOCGIWSCAN GET broadcast results\n", dev->name);
 	apcnt = 0;
 	p_buf = iscan->list_hdr;
 	while (p_buf != iscan->list_cur) {
@@ -1764,9 +1761,8 @@
 		counter += list->count;
 
 		if (list->version != WL_BSS_INFO_VERSION) {
-			WL_ERROR(("%s : list->version %d != "
-				"WL_BSS_INFO_VERSION\n",
-				__func__, list->version));
+			WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
+				 __func__, list->version);
 			return -EINVAL;
 		}
 
@@ -1779,14 +1775,14 @@
 			ASSERT(((unsigned long)bi + dtoh32(bi->length)) <=
 			       ((unsigned long)list + WLC_IW_ISCAN_MAXLEN));
 
-			if (event + ETHER_ADDR_LEN + bi->SSID_len +
+			if (event + ETH_ALEN + bi->SSID_len +
 			    IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >=
 			    end)
 				return -E2BIG;
 			iwe.cmd = SIOCGIWAP;
 			iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
 			memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID,
-			       ETHER_ADDR_LEN);
+			       ETH_ALEN);
 			event =
 			    IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
 						 IW_EV_ADDR_LEN);
@@ -1876,8 +1872,8 @@
 	dwrq->length = event - extra;
 	dwrq->flags = 0;
 
-	WL_TRACE(("%s return to WE %d bytes APs=%d\n", __func__,
-		  dwrq->length, counter));
+	WL_TRACE("%s return to WE %d bytes APs=%d\n",
+		 __func__, dwrq->length, counter);
 
 	if (!dwrq->length)
 		return -EAGAIN;
@@ -1895,7 +1891,7 @@
 	wl_join_params_t join_params;
 	int join_params_size;
 
-	WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
+	WL_TRACE("%s: SIOCSIWESSID\n", dev->name);
 
 	if (g_set_essid_before_scan)
 		return -EAGAIN;
@@ -1923,7 +1919,7 @@
 
 	memcpy(&join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
 	join_params.ssid.SSID_len = htod32(g_ssid.SSID_len);
-	memcpy(&join_params.params.bssid, &ether_bcast, ETHER_ADDR_LEN);
+	memcpy(&join_params.params.bssid, &ether_bcast, ETH_ALEN);
 
 	wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params,
 			     &join_params_size);
@@ -1931,11 +1927,11 @@
 	error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params,
 				join_params_size);
 	if (error)
-		WL_ERROR(("Invalid ioctl data=%d\n", error));
+		WL_ERROR("Invalid ioctl data=%d\n", error);
 
 	if (g_ssid.SSID_len) {
-		WL_TRACE(("%s: join SSID=%s ch=%d\n", __func__,
-			  g_ssid.SSID, g_wl_iw_params.target_channel));
+		WL_TRACE("%s: join SSID=%s ch=%d\n",
+			 __func__, g_ssid.SSID, g_wl_iw_params.target_channel);
 	}
 	return 0;
 }
@@ -1948,14 +1944,14 @@
 	wlc_ssid_t ssid;
 	int error;
 
-	WL_TRACE(("%s: SIOCGIWESSID\n", dev->name));
+	WL_TRACE("%s: SIOCGIWESSID\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
 
 	error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
 	if (error) {
-		WL_ERROR(("Error getting the SSID\n"));
+		WL_ERROR("Error getting the SSID\n");
 		return error;
 	}
 
@@ -1976,7 +1972,7 @@
 {
 	wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
 
-	WL_TRACE(("%s: SIOCSIWNICKN\n", dev->name));
+	WL_TRACE("%s: SIOCSIWNICKN\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
@@ -1996,7 +1992,7 @@
 {
 	wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
 
-	WL_TRACE(("%s: SIOCGIWNICKN\n", dev->name));
+	WL_TRACE("%s: SIOCGIWNICKN\n", dev->name);
 
 	if (!extra)
 		return -EINVAL;
@@ -2014,7 +2010,7 @@
 	wl_rateset_t rateset;
 	int error, rate, i, error_bg, error_a;
 
-	WL_TRACE(("%s: SIOCSIWRATE\n", dev->name));
+	WL_TRACE("%s: SIOCSIWRATE\n", dev->name);
 
 	error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
 				sizeof(rateset));
@@ -2063,7 +2059,7 @@
 {
 	int error, rate;
 
-	WL_TRACE(("%s: SIOCGIWRATE\n", dev->name));
+	WL_TRACE("%s: SIOCGIWRATE\n", dev->name);
 
 	error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate));
 	if (error)
@@ -2080,7 +2076,7 @@
 {
 	int error, rts;
 
-	WL_TRACE(("%s: SIOCSIWRTS\n", dev->name));
+	WL_TRACE("%s: SIOCSIWRTS\n", dev->name);
 
 	if (vwrq->disabled)
 		rts = DOT11_DEFAULT_RTS_LEN;
@@ -2102,7 +2098,7 @@
 {
 	int error, rts;
 
-	WL_TRACE(("%s: SIOCGIWRTS\n", dev->name));
+	WL_TRACE("%s: SIOCGIWRTS\n", dev->name);
 
 	error = dev_wlc_intvar_get(dev, "rtsthresh", &rts);
 	if (error)
@@ -2121,7 +2117,7 @@
 {
 	int error, frag;
 
-	WL_TRACE(("%s: SIOCSIWFRAG\n", dev->name));
+	WL_TRACE("%s: SIOCSIWFRAG\n", dev->name);
 
 	if (vwrq->disabled)
 		frag = DOT11_DEFAULT_FRAG_LEN;
@@ -2143,7 +2139,7 @@
 {
 	int error, fragthreshold;
 
-	WL_TRACE(("%s: SIOCGIWFRAG\n", dev->name));
+	WL_TRACE("%s: SIOCGIWFRAG\n", dev->name);
 
 	error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold);
 	if (error)
@@ -2163,7 +2159,7 @@
 {
 	int error, disable;
 	u16 txpwrmw;
-	WL_TRACE(("%s: SIOCSIWTXPOW\n", dev->name));
+	WL_TRACE("%s: SIOCSIWTXPOW\n", dev->name);
 
 	disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
 	disable += WL_RADIO_SW_DISABLE << 16;
@@ -2200,7 +2196,7 @@
 	int error, disable, txpwrdbm;
 	u8 result;
 
-	WL_TRACE(("%s: SIOCGIWTXPOW\n", dev->name));
+	WL_TRACE("%s: SIOCGIWTXPOW\n", dev->name);
 
 	error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable));
 	if (error)
@@ -2229,7 +2225,7 @@
 {
 	int error, lrl, srl;
 
-	WL_TRACE(("%s: SIOCSIWRETRY\n", dev->name));
+	WL_TRACE("%s: SIOCSIWRETRY\n", dev->name);
 
 	if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
 		return -EINVAL;
@@ -2277,7 +2273,7 @@
 {
 	int error, lrl, srl;
 
-	WL_TRACE(("%s: SIOCGIWRETRY\n", dev->name));
+	WL_TRACE("%s: SIOCGIWRETRY\n", dev->name);
 
 	vwrq->disabled = 0;
 
@@ -2317,7 +2313,7 @@
 	wl_wsec_key_t key;
 	int error, val, wsec;
 
-	WL_TRACE(("%s: SIOCSIWENCODE\n", dev->name));
+	WL_TRACE("%s: SIOCSIWENCODE\n", dev->name);
 
 	memset(&key, 0, sizeof(key));
 
@@ -2409,9 +2405,9 @@
 	wl_wsec_key_t key;
 	int error, val, wsec, auth;
 
-	WL_TRACE(("%s: SIOCGIWENCODE\n", dev->name));
+	WL_TRACE("%s: SIOCGIWENCODE\n", dev->name);
 
-	bzero(&key, sizeof(wl_wsec_key_t));
+	memset(&key, 0, sizeof(wl_wsec_key_t));
 
 	if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
 		for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS;
@@ -2465,7 +2461,7 @@
 {
 	int error, pm;
 
-	WL_TRACE(("%s: SIOCSIWPOWER\n", dev->name));
+	WL_TRACE("%s: SIOCSIWPOWER\n", dev->name);
 
 	pm = vwrq->disabled ? PM_OFF : PM_MAX;
 
@@ -2484,7 +2480,7 @@
 {
 	int error, pm;
 
-	WL_TRACE(("%s: SIOCGIWPOWER\n", dev->name));
+	WL_TRACE("%s: SIOCGIWPOWER\n", dev->name);
 
 	error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm));
 	if (error)
@@ -2503,7 +2499,7 @@
 		struct iw_request_info *info, struct iw_point *iwp, char *extra)
 {
 
-	WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
+	WL_TRACE("%s: SIOCSIWGENIE\n", dev->name);
 
 	CHECK_EXTRA_FOR_NULL(extra);
 
@@ -2516,7 +2512,7 @@
 wl_iw_get_wpaie(struct net_device *dev,
 		struct iw_request_info *info, struct iw_point *iwp, char *extra)
 {
-	WL_TRACE(("%s: SIOCGIWGENIE\n", dev->name));
+	WL_TRACE("%s: SIOCGIWGENIE\n", dev->name);
 	iwp->length = 64;
 	dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
 	return 0;
@@ -2531,7 +2527,7 @@
 	int error;
 	struct iw_encode_ext *iwe;
 
-	WL_TRACE(("%s: SIOCSIWENCODEEXT\n", dev->name));
+	WL_TRACE("%s: SIOCSIWENCODEEXT\n", dev->name);
 
 	CHECK_EXTRA_FOR_NULL(extra);
 
@@ -2548,14 +2544,14 @@
 
 	key.len = iwe->key_len;
 
-	if (!ETHER_ISMULTI(iwe->addr.sa_data))
+	if (!is_multicast_ether_addr(iwe->addr.sa_data))
 		bcopy((void *)&iwe->addr.sa_data, (char *)&key.ea,
-		      ETHER_ADDR_LEN);
+		      ETH_ALEN);
 
 	if (key.len == 0) {
 		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
-			WL_WSEC(("Changing the the primary Key to %d\n",
-				 key.index));
+			WL_WSEC("Changing the the primary Key to %d\n",
+				key.index);
 			key.index = htod32(key.index);
 			error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
 					      &key.index, sizeof(key.index));
@@ -2569,9 +2565,9 @@
 		if (iwe->key_len > sizeof(key.data))
 			return -EINVAL;
 
-		WL_WSEC(("Setting the key index %d\n", key.index));
+		WL_WSEC("Setting the key index %d\n", key.index);
 		if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
-			WL_WSEC(("key is a Primary Key\n"));
+			WL_WSEC("key is a Primary Key\n");
 			key.flags = WL_PRIMARY_KEY;
 		}
 
@@ -2638,15 +2634,15 @@
 	uint i;
 	int ret = 0;
 
-	WL_WSEC(("%s: SIOCSIWPMKSA\n", dev->name));
+	WL_WSEC("%s: SIOCSIWPMKSA\n", dev->name);
 
 	CHECK_EXTRA_FOR_NULL(extra);
 
 	iwpmksa = (struct iw_pmksa *)extra;
 
 	if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
-		WL_WSEC(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
-		bzero((char *)&pmkid_list, sizeof(pmkid_list));
+		WL_WSEC("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n");
+		memset((char *)&pmkid_list, 0, sizeof(pmkid_list));
 	}
 
 	else if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
@@ -2656,30 +2652,30 @@
 			pmkidptr = &pmkid;
 
 			bcopy(&iwpmksa->bssid.sa_data[0],
-			      &pmkidptr->pmkid[0].BSSID, ETHER_ADDR_LEN);
+			      &pmkidptr->pmkid[0].BSSID, ETH_ALEN);
 			bcopy(&iwpmksa->pmkid[0], &pmkidptr->pmkid[0].PMKID,
 			      WPA2_PMKID_LEN);
 
-			WL_WSEC(("wl_iw_set_pmksa:IW_PMKSA_REMOVE:PMKID: "
-				"%pM = ", &pmkidptr->pmkid[0].BSSID));
+			WL_WSEC("wl_iw_set_pmksa:IW_PMKSA_REMOVE:PMKID: %pM = ",
+				&pmkidptr->pmkid[0].BSSID);
 			for (j = 0; j < WPA2_PMKID_LEN; j++)
-				WL_WSEC(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
-			WL_WSEC(("\n"));
+				WL_WSEC("%02x ", pmkidptr->pmkid[0].PMKID[j]);
+			WL_WSEC("\n");
 		}
 
 		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
-			if (!bcmp
+			if (!memcmp
 			    (&iwpmksa->bssid.sa_data[0],
-			     &pmkid_list.pmkids.pmkid[i].BSSID, ETHER_ADDR_LEN))
+			     &pmkid_list.pmkids.pmkid[i].BSSID, ETH_ALEN))
 				break;
 
 		if ((pmkid_list.pmkids.npmkid > 0)
 		    && (i < pmkid_list.pmkids.npmkid)) {
-			bzero(&pmkid_list.pmkids.pmkid[i], sizeof(pmkid_t));
+			memset(&pmkid_list.pmkids.pmkid[i], 0, sizeof(pmkid_t));
 			for (; i < (pmkid_list.pmkids.npmkid - 1); i++) {
 				bcopy(&pmkid_list.pmkids.pmkid[i + 1].BSSID,
 				      &pmkid_list.pmkids.pmkid[i].BSSID,
-				      ETHER_ADDR_LEN);
+				      ETH_ALEN);
 				bcopy(&pmkid_list.pmkids.pmkid[i + 1].PMKID,
 				      &pmkid_list.pmkids.pmkid[i].PMKID,
 				      WPA2_PMKID_LEN);
@@ -2691,14 +2687,14 @@
 
 	else if (iwpmksa->cmd == IW_PMKSA_ADD) {
 		for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
-			if (!bcmp
+			if (!memcmp
 			    (&iwpmksa->bssid.sa_data[0],
-			     &pmkid_list.pmkids.pmkid[i].BSSID, ETHER_ADDR_LEN))
+			     &pmkid_list.pmkids.pmkid[i].BSSID, ETH_ALEN))
 				break;
 		if (i < MAXPMKID) {
 			bcopy(&iwpmksa->bssid.sa_data[0],
 			      &pmkid_list.pmkids.pmkid[i].BSSID,
-			      ETHER_ADDR_LEN);
+			      ETH_ALEN);
 			bcopy(&iwpmksa->pmkid[0],
 			      &pmkid_list.pmkids.pmkid[i].PMKID,
 			      WPA2_PMKID_LEN);
@@ -2710,25 +2706,25 @@
 			uint j;
 			uint k;
 			k = pmkid_list.pmkids.npmkid;
-			WL_WSEC(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %pM = ",
-				&pmkid_list.pmkids.pmkid[k].BSSID));
+			WL_WSEC("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %pM = ",
+				&pmkid_list.pmkids.pmkid[k].BSSID);
 			for (j = 0; j < WPA2_PMKID_LEN; j++)
-				WL_WSEC(("%02x ",
-					 pmkid_list.pmkids.pmkid[k].PMKID[j]));
-			WL_WSEC(("\n"));
+				WL_WSEC("%02x ",
+					pmkid_list.pmkids.pmkid[k].PMKID[j]);
+			WL_WSEC("\n");
 		}
 	}
-	WL_WSEC(("PRINTING pmkid LIST - No of elements %d\n",
-		 pmkid_list.pmkids.npmkid));
+	WL_WSEC("PRINTING pmkid LIST - No of elements %d\n",
+		pmkid_list.pmkids.npmkid);
 	for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
 		uint j;
-		WL_WSEC(("PMKID[%d]: %pM = ", i,
-			&pmkid_list.pmkids.pmkid[i].BSSID));
+		WL_WSEC("PMKID[%d]: %pM = ",
+			i, &pmkid_list.pmkids.pmkid[i].BSSID);
 		for (j = 0; j < WPA2_PMKID_LEN; j++)
-			WL_WSEC(("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]));
-		WL_WSEC(("\n"));
+			WL_WSEC("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]);
+		WL_WSEC("\n");
 	}
-	WL_WSEC(("\n"));
+	WL_WSEC("\n");
 
 	if (!ret)
 		ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list,
@@ -2742,7 +2738,7 @@
 		    struct iw_request_info *info,
 		    struct iw_param *vwrq, char *extra)
 {
-	WL_TRACE(("%s: SIOCGIWENCODEEXT\n", dev->name));
+	WL_TRACE("%s: SIOCGIWENCODEEXT\n", dev->name);
 	return 0;
 }
 
@@ -2757,13 +2753,13 @@
 	int val = 0;
 	wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
 
-	WL_TRACE(("%s: SIOCSIWAUTH\n", dev->name));
+	WL_TRACE("%s: SIOCSIWAUTH\n", dev->name);
 
 	paramid = vwrq->flags & IW_AUTH_INDEX;
 	paramval = vwrq->value;
 
-	WL_TRACE(("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
-		  dev->name, paramid, paramval));
+	WL_TRACE("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
+		 dev->name, paramid, paramval);
 
 	switch (paramid) {
 	case IW_AUTH_WPA_VERSION:
@@ -2773,8 +2769,8 @@
 			val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
 		else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
 			val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
-		WL_INFORM(("%s: %d: setting wpa_auth to 0x%0x\n", __func__,
-			   __LINE__, val));
+		WL_INFORM("%s: %d: setting wpa_auth to 0x%0x\n",
+			  __func__, __LINE__, val);
 		error = dev_wlc_intvar_set(dev, "wpa_auth", val);
 		if (error)
 			return error;
@@ -2797,20 +2793,19 @@
 		}
 
 		if (iw->privacy_invoked && !val) {
-			WL_WSEC(("%s: %s: 'Privacy invoked' true but clearing "
-				"wsec, assuming " "we're a WPS enrollee\n",
-				dev->name, __func__));
+			WL_WSEC("%s: %s: 'Privacy invoked' true but clearing wsec, assuming we're a WPS enrollee\n",
+				dev->name, __func__);
 			error = dev_wlc_intvar_set(dev, "is_WPS_enrollee",
 							true);
 			if (error) {
-				WL_WSEC(("Failed to set is_WPS_enrollee\n"));
+				WL_WSEC("Failed to set is_WPS_enrollee\n");
 				return error;
 			}
 		} else if (val) {
 			error = dev_wlc_intvar_set(dev, "is_WPS_enrollee",
 							false);
 			if (error) {
-				WL_WSEC(("Failed to clear is_WPS_enrollee\n"));
+				WL_WSEC("Failed to clear is_WPS_enrollee\n");
 				return error;
 			}
 		}
@@ -2837,8 +2832,8 @@
 			else
 				val = WPA2_AUTH_UNSPECIFIED;
 		}
-		WL_INFORM(("%s: %d: setting wpa_auth to %d\n", __func__,
-			   __LINE__, val));
+		WL_INFORM("%s: %d: setting wpa_auth to %d\n",
+			  __func__, __LINE__, val);
 		error = dev_wlc_intvar_set(dev, "wpa_auth", val);
 		if (error)
 			return error;
@@ -2850,7 +2845,7 @@
 		break;
 
 	case IW_AUTH_80211_AUTH_ALG:
-		WL_INFORM(("Setting the D11auth %d\n", paramval));
+		WL_INFORM("Setting the D11auth %d\n", paramval);
 		if (paramval == IW_AUTH_ALG_OPEN_SYSTEM)
 			val = 0;
 		else if (paramval == IW_AUTH_ALG_SHARED_KEY)
@@ -2879,8 +2874,8 @@
 				dev_wlc_intvar_set(dev, "wsec", val);
 			}
 			val = 0;
-			WL_INFORM(("%s: %d: setting wpa_auth to %d\n",
-				   __func__, __LINE__, val));
+			WL_INFORM("%s: %d: setting wpa_auth to %d\n",
+				  __func__, __LINE__, val);
 			dev_wlc_intvar_set(dev, "wpa_auth", 0);
 			return error;
 		}
@@ -2897,7 +2892,7 @@
 
 #if WIRELESS_EXT > 17
 	case IW_AUTH_ROAMING_CONTROL:
-		WL_INFORM(("%s: IW_AUTH_ROAMING_CONTROL\n", __func__));
+		WL_INFORM("%s: IW_AUTH_ROAMING_CONTROL\n", __func__);
 		break;
 	case IW_AUTH_PRIVACY_INVOKED:
 		{
@@ -2908,8 +2903,7 @@
 				error = dev_wlc_intvar_set(dev,
 						"is_WPS_enrollee", false);
 				if (error) {
-					WL_WSEC(("Failed to clear iovar "
-						"is_WPS_enrollee\n"));
+					WL_WSEC("Failed to clear iovar is_WPS_enrollee\n");
 					return error;
 				}
 			} else {
@@ -2923,8 +2917,7 @@
 							"is_WPS_enrollee",
 							true);
 					if (error) {
-						WL_WSEC(("Failed to set iovar "
-						"is_WPS_enrollee\n"));
+						WL_WSEC("Failed to set iovar is_WPS_enrollee\n");
 						return error;
 					}
 				} else {
@@ -2932,8 +2925,7 @@
 							"is_WPS_enrollee",
 							false);
 					if (error) {
-						WL_WSEC(("Failed to clear "
-							"is_WPS_enrollee\n"));
+						WL_WSEC("Failed to clear is_WPS_enrollee\n");
 						return error;
 					}
 				}
@@ -2960,7 +2952,7 @@
 	int val;
 	wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
 
-	WL_TRACE(("%s: SIOCGIWAUTH\n", dev->name));
+	WL_TRACE("%s: SIOCGIWAUTH\n", dev->name);
 
 	paramid = vwrq->flags & IW_AUTH_INDEX;
 
@@ -3040,7 +3032,7 @@
 		break;
 #if WIRELESS_EXT > 17
 	case IW_AUTH_ROAMING_CONTROL:
-		WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __func__));
+		WL_ERROR("%s: IW_AUTH_ROAMING_CONTROL\n", __func__);
 		break;
 	case IW_AUTH_PRIVACY_INVOKED:
 		paramval = iw->privacy_invoked;
@@ -3157,19 +3149,19 @@
 	char *extra = NULL;
 	int token_size = 1, max_tokens = 0, ret = 0;
 
-	WL_TRACE(("\n%s, cmd:%x alled via dhd->do_ioctl()entry point\n",
-		  __func__, cmd));
+	WL_TRACE("\n%s, cmd:%x alled via dhd->do_ioctl()entry point\n",
+		 __func__, cmd);
 	if (cmd < SIOCIWFIRST ||
 		IW_IOCTL_IDX(cmd) >= ARRAY_SIZE(wl_iw_handler)) {
-		WL_ERROR(("%s: error in cmd=%x : out of range\n", __func__,
-			cmd));
+		WL_ERROR("%s: error in cmd=%x : out of range\n",
+			 __func__, cmd);
 		return -EOPNOTSUPP;
 	}
 
 	handler = wl_iw_handler[IW_IOCTL_IDX(cmd)];
 	if (!handler) {
-		WL_ERROR(("%s: error in cmd=%x : not supported\n",
-			__func__, cmd));
+		WL_ERROR("%s: error in cmd=%x : not supported\n",
+			 __func__, cmd);
 		return -EOPNOTSUPP;
 	}
 
@@ -3234,9 +3226,8 @@
 
 	if (max_tokens && wrq->u.data.pointer) {
 		if (wrq->u.data.length > max_tokens) {
-			WL_ERROR(("%s: error in cmd=%x wrq->u.data.length=%d "
-			"> max_tokens=%d\n",
-			__func__, cmd, wrq->u.data.length, max_tokens));
+			WL_ERROR("%s: error in cmd=%x wrq->u.data.length=%d > max_tokens=%d\n",
+				 __func__, cmd, wrq->u.data.length, max_tokens);
 			return -E2BIG;
 		}
 		extra = kmalloc(max_tokens * token_size, GFP_KERNEL);
@@ -3339,7 +3330,7 @@
 		memset(stringBuf, 0, buflen);
 		snprintf(stringBuf, buflen, "%s %s %02d %02d",
 			 name, cause, status, reason);
-		WL_INFORM(("Connection status: %s\n", stringBuf));
+		WL_INFORM("Connection status: %s\n", stringBuf);
 		return true;
 	} else {
 		return false;
@@ -3383,46 +3374,46 @@
 	iw = 0;
 
 	if (!dev) {
-		WL_ERROR(("%s: dev is null\n", __func__));
+		WL_ERROR("%s: dev is null\n", __func__);
 		return;
 	}
 
 	iw = *(wl_iw_t **) netdev_priv(dev);
 
-	WL_TRACE(("%s: dev=%s event=%d\n", __func__, dev->name, event_type));
+	WL_TRACE("%s: dev=%s event=%d\n", __func__, dev->name, event_type);
 
 	switch (event_type) {
 	case WLC_E_TXFAIL:
 		cmd = IWEVTXDROP;
-		memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
 		wrqu.addr.sa_family = ARPHRD_ETHER;
 		break;
 #if WIRELESS_EXT > 14
 	case WLC_E_JOIN:
 	case WLC_E_ASSOC_IND:
 	case WLC_E_REASSOC_IND:
-		memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+		memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
 		wrqu.addr.sa_family = ARPHRD_ETHER;
 		cmd = IWEVREGISTERED;
 		break;
 	case WLC_E_DEAUTH_IND:
 	case WLC_E_DISASSOC_IND:
 		cmd = SIOCGIWAP;
-		bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
+		memset(wrqu.addr.sa_data, 0, ETH_ALEN);
 		wrqu.addr.sa_family = ARPHRD_ETHER;
-		bzero(&extra, ETHER_ADDR_LEN);
+		memset(&extra, 0, ETH_ALEN);
 		break;
 	case WLC_E_LINK:
 	case WLC_E_NDIS_LINK:
 		cmd = SIOCGIWAP;
 		if (!(flags & WLC_EVENT_MSG_LINK)) {
-			bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
-			bzero(&extra, ETHER_ADDR_LEN);
+			memset(wrqu.addr.sa_data, 0, ETH_ALEN);
+			memset(&extra, 0, ETH_ALEN);
 			WAKE_LOCK_TIMEOUT(iw->pub, WAKE_LOCK_LINK_DOWN_TMOUT,
 					  20 * HZ);
 		} else {
-			memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
-			WL_TRACE(("Link UP\n"));
+			memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
+			WL_TRACE("Link UP\n");
 
 		}
 		wrqu.addr.sa_family = ARPHRD_ETHER;
@@ -3433,8 +3424,8 @@
 			wrqu.data.length = datalen + 1;
 			extra[0] = WLC_E_ACTION_FRAME;
 			memcpy(&extra[1], data, datalen);
-			WL_TRACE(("WLC_E_ACTION_FRAME len %d \n",
-				  wrqu.data.length));
+			WL_TRACE("WLC_E_ACTION_FRAME len %d\n",
+				 wrqu.data.length);
 		}
 		break;
 
@@ -3464,7 +3455,7 @@
 			else
 				micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
 			memcpy(micerrevt->src_addr.sa_data, &e->addr,
-			       ETHER_ADDR_LEN);
+			       ETH_ALEN);
 			micerrevt->src_addr.sa_family = ARPHRD_ETHER;
 
 			break;
@@ -3487,14 +3478,14 @@
 				wrqu.data.length = sizeof(struct iw_pmkid_cand);
 				pmkidcand = pmkcandlist->pmkid_cand;
 				while (count) {
-					bzero(iwpmkidcand,
+					memset(iwpmkidcand, 0,
 					      sizeof(struct iw_pmkid_cand));
 					if (pmkidcand->preauth)
 						iwpmkidcand->flags |=
 						    IW_PMKID_CAND_PREAUTH;
 					bcopy(&pmkidcand->BSSID,
 					      &iwpmkidcand->bssid.sa_data,
-					      ETHER_ADDR_LEN);
+					      ETH_ALEN);
 #ifndef SANDGATE2G
 					wireless_send_event(dev, cmd, &wrqu,
 							    extra);
@@ -3515,13 +3506,13 @@
 		} else {
 			cmd = SIOCGIWSCAN;
 			wrqu.data.length = strlen(extra);
-			WL_TRACE(("Event WLC_E_SCAN_COMPLETE from specific "
-				"scan %d\n", g_iscan->iscan_state));
+			WL_TRACE("Event WLC_E_SCAN_COMPLETE from specific scan %d\n",
+				 g_iscan->iscan_state);
 		}
 #else
 		cmd = SIOCGIWSCAN;
 		wrqu.data.length = strlen(extra);
-		WL_TRACE(("Event WLC_E_SCAN_COMPLETE\n"));
+		WL_TRACE("Event WLC_E_SCAN_COMPLETE\n");
 #endif
 		break;
 
@@ -3529,9 +3520,9 @@
 		{
 			wlc_ssid_t *ssid;
 			ssid = (wlc_ssid_t *) data;
-			WL_ERROR(("%s Event WLC_E_PFN_NET_FOUND, send %s up : "
-				"find %s len=%d\n", __func__, PNO_EVENT_UP,
-				ssid->SSID, ssid->SSID_len));
+			WL_ERROR("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n",
+				 __func__, PNO_EVENT_UP,
+				 ssid->SSID, ssid->SSID_len);
 			WAKE_LOCK_TIMEOUT(iw->pub, WAKE_LOCK_PNO_FIND_TMOUT,
 					  20 * HZ);
 			cmd = IWEVCUSTOM;
@@ -3542,7 +3533,7 @@
 		break;
 
 	default:
-		WL_TRACE(("Unknown Event %d: ignoring\n", event_type));
+		WL_TRACE("Unknown Event %d: ignoring\n", event_type);
 		break;
 	}
 #ifndef SANDGATE2G
@@ -3583,15 +3574,15 @@
 		goto done;
 
 	phy_noise = dtoh32(phy_noise);
-	WL_TRACE(("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise));
+	WL_TRACE("wl_iw_get_wireless_stats phy noise=%d\n", phy_noise);
 
-	bzero(&scb_val, sizeof(scb_val_t));
+	memset(&scb_val, 0, sizeof(scb_val_t));
 	res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
 	if (res)
 		goto done;
 
 	rssi = dtoh32(scb_val.val);
-	WL_TRACE(("wl_iw_get_wireless_stats rssi=%d\n", rssi));
+	WL_TRACE("wl_iw_get_wireless_stats rssi=%d\n", rssi);
 	if (rssi <= WL_IW_RSSI_NO_SIGNAL)
 		wstats->qual.qual = 0;
 	else if (rssi <= WL_IW_RSSI_VERY_LOW)
@@ -3614,23 +3605,21 @@
 #endif
 
 #if WIRELESS_EXT > 11
-	WL_TRACE(("wl_iw_get_wireless_stats counters=%d\n",
-		  (int)sizeof(wl_cnt_t)));
+	WL_TRACE("wl_iw_get_wireless_stats counters=%zu\n", sizeof(wl_cnt_t));
 
 	memset(&cnt, 0, sizeof(wl_cnt_t));
 	res =
 	    dev_wlc_bufvar_get(dev, "counters", (char *)&cnt, sizeof(wl_cnt_t));
 	if (res) {
-		WL_ERROR(("wl_iw_get_wireless_stats counters failed error=%d\n",
-			  res));
+		WL_ERROR("wl_iw_get_wireless_stats counters failed error=%d\n",
+			 res);
 		goto done;
 	}
 
 	cnt.version = dtoh16(cnt.version);
 	if (cnt.version != WL_CNT_T_VERSION) {
-		WL_TRACE(("\tIncorrect version of counters struct: expected "
-			"%d; got %d\n",
-			WL_CNT_T_VERSION, cnt.version));
+		WL_TRACE("\tIncorrect version of counters struct: expected %d; got %d\n",
+			 WL_CNT_T_VERSION, cnt.version);
 		goto done;
 	}
 
@@ -3641,22 +3630,22 @@
 	wstats->discard.misc = dtoh32(cnt.rxrunt) + dtoh32(cnt.rxgiant);
 	wstats->miss.beacon = 0;
 
-	WL_TRACE(("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
-		  dtoh32(cnt.txframe), dtoh32(cnt.txbyte)));
-	WL_TRACE(("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n",
-		  dtoh32(cnt.rxfrmtoolong)));
-	WL_TRACE(("wl_iw_get_wireless_stats counters rxbadplcp=%d\n",
-		  dtoh32(cnt.rxbadplcp)));
-	WL_TRACE(("wl_iw_get_wireless_stats counters rxundec=%d\n",
-		  dtoh32(cnt.rxundec)));
-	WL_TRACE(("wl_iw_get_wireless_stats counters rxfragerr=%d\n",
-		  dtoh32(cnt.rxfragerr)));
-	WL_TRACE(("wl_iw_get_wireless_stats counters txfail=%d\n",
-		  dtoh32(cnt.txfail)));
-	WL_TRACE(("wl_iw_get_wireless_stats counters rxrunt=%d\n",
-		  dtoh32(cnt.rxrunt)));
-	WL_TRACE(("wl_iw_get_wireless_stats counters rxgiant=%d\n",
-		  dtoh32(cnt.rxgiant)));
+	WL_TRACE("wl_iw_get_wireless_stats counters txframe=%d txbyte=%d\n",
+		 dtoh32(cnt.txframe), dtoh32(cnt.txbyte));
+	WL_TRACE("wl_iw_get_wireless_stats counters rxfrmtoolong=%d\n",
+		  dtoh32(cnt.rxfrmtoolong));
+	WL_TRACE("wl_iw_get_wireless_stats counters rxbadplcp=%d\n",
+		  dtoh32(cnt.rxbadplcp));
+	WL_TRACE("wl_iw_get_wireless_stats counters rxundec=%d\n",
+		  dtoh32(cnt.rxundec));
+	WL_TRACE("wl_iw_get_wireless_stats counters rxfragerr=%d\n",
+		  dtoh32(cnt.rxfragerr));
+	WL_TRACE("wl_iw_get_wireless_stats counters txfail=%d\n",
+		  dtoh32(cnt.txfail));
+	WL_TRACE("wl_iw_get_wireless_stats counters rxrunt=%d\n",
+		  dtoh32(cnt.rxrunt));
+	WL_TRACE("wl_iw_get_wireless_stats counters rxgiant=%d\n",
+		  dtoh32(cnt.rxgiant));
 #endif				/* WIRELESS_EXT > 11 */
 
 done:
@@ -3690,8 +3679,7 @@
 		return -ENOMEM;
 	memset(iscan, 0, sizeof(iscan_info_t));
 
-	iscan->iscan_ex_params_p =
-	    (wl_iscan_params_t *) kmalloc(params_size, GFP_KERNEL);
+	iscan->iscan_ex_params_p = kmalloc(params_size, GFP_KERNEL);
 	if (!iscan->iscan_ex_params_p)
 		return -ENOMEM;
 	iscan->iscan_ex_param_size = params_size;
@@ -3723,9 +3711,7 @@
 	priv_dev = dev;
 	MUTEX_LOCK_SOFTAP_SET_INIT(iw->pub);
 #endif
-	g_scan = NULL;
-
-	g_scan = (void *)kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
+	g_scan = kmalloc(G_SCAN_RESULTS, GFP_KERNEL);
 	if (!g_scan)
 		return -ENOMEM;
 
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.h b/drivers/staging/brcm80211/brcmfmac/wl_iw.h
index edbf61f..c8637c5 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.h
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.h
@@ -78,7 +78,7 @@
 
 #define CHECK_EXTRA_FOR_NULL(extra) \
 if (!extra) { \
-	WL_ERROR(("%s: error : extra is null pointer\n", __func__)); \
+	WL_ERROR("%s: error : extra is null pointer\n", __func__);	\
 	return -EINVAL; \
 }
 
diff --git a/drivers/staging/brcm80211/include/bcm_rpc.h b/drivers/staging/brcm80211/include/bcm_rpc.h
deleted file mode 100644
index 77e5d8f..0000000
--- a/drivers/staging/brcm80211/include/bcm_rpc.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BCM_RPC_H_
-#define _BCM_RPC_H_
-
-#include <rpc_osl.h>
-
-typedef struct rpc_info rpc_info_t;
-typedef struct rpc_buf rpc_buf_t;
-struct rpc_transport_info;
-typedef void (*rpc_dispatch_cb_t) (void *ctx, struct rpc_buf *buf);
-typedef void (*rpc_resync_cb_t) (void *ctx);
-typedef void (*rpc_down_cb_t) (void *ctx);
-typedef void (*rpc_txdone_cb_t) (void *ctx, struct rpc_buf *buf);
-extern struct rpc_info *bcm_rpc_attach(void *pdev, osl_t *osh,
-				       struct rpc_transport_info *rpc_th);
-
-extern void bcm_rpc_detach(struct rpc_info *rpc);
-extern void bcm_rpc_down(struct rpc_info *rpc);
-extern void bcm_rpc_watchdog(struct rpc_info *rpc);
-
-extern struct rpc_buf *bcm_rpc_buf_alloc(struct rpc_info *rpc, int len);
-extern void bcm_rpc_buf_free(struct rpc_info *rpc, struct rpc_buf *b);
-/* get rpc transport handle */
-extern struct rpc_transport_info *bcm_rpc_tp_get(struct rpc_info *rpc);
-
-/* callback for: data_rx, down, resync */
-extern void bcm_rpc_rxcb_init(struct rpc_info *rpc, void *ctx,
-			      rpc_dispatch_cb_t cb, void *dnctx,
-			      rpc_down_cb_t dncb, rpc_resync_cb_t resync_cb,
-			      rpc_txdone_cb_t);
-extern void bcm_rpc_rxcb_deinit(struct rpc_info *rpci);
-
-/* HOST or CLIENT rpc call, requiring no return value */
-extern int bcm_rpc_call(struct rpc_info *rpc, struct rpc_buf *b);
-
-/* HOST rpc call, demanding return.
- *   The thread may be suspended and control returns back to OS
- *   The thread will resume(waked up) on either the return signal received or timeout
- *     The implementation details depend on OS
- */
-extern struct rpc_buf *bcm_rpc_call_with_return(struct rpc_info *rpc,
-						struct rpc_buf *b);
-
-/* CLIENT rpc call to respond to bcm_rpc_call_with_return, requiring no return value */
-extern int bcm_rpc_call_return(struct rpc_info *rpc, struct rpc_buf *retb);
-
-extern uint bcm_rpc_buf_header_len(struct rpc_info *rpci);
-
-#define RPC_PKTLOG_SIZE		50	/* Depth of the history */
-#define RPC_PKTLOG_RD_LEN	3
-#define RPC_PKTLOG_DUMP_SIZE	150	/* dump size should be more than the product of above two */
-extern int bcm_rpc_pktlog_get(struct rpc_info *rpci, u32 *buf,
-			      uint buf_size, bool send);
-extern int bcm_rpc_dump(rpc_info_t *rpci, struct bcmstrbuf *b);
-
-/* HIGH/BMAC: bit 15-8: RPC module, bit 7-0: TP module */
-#define RPC_ERROR_VAL	0x0001
-#define RPC_TRACE_VAL	0x0002
-#define RPC_PKTTRACE_VAL 0x0004
-#define RPC_PKTLOG_VAL	0x0008
-extern void bcm_rpc_msglevel_set(struct rpc_info *rpci, u16 msglevel,
-				 bool high_low);
-
-#endif				/* _BCM_RPC_H_ */
diff --git a/drivers/staging/brcm80211/include/bcm_rpc_tp.h b/drivers/staging/brcm80211/include/bcm_rpc_tp.h
deleted file mode 100644
index bb8dc6d..0000000
--- a/drivers/staging/brcm80211/include/bcm_rpc_tp.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcm_rpc_tp_h_
-#define _bcm_rpc_tp_h_
-#include <bcm_rpc.h>
-
-#define DBUS_RX_BUFFER_SIZE_RPC	(2100)	/* rxbufsize for dbus_attach, linux only for now */
-
-#define BCM_RPC_TP_ENCAP_LEN	4	/* TP header is 4 bytes */
-
-#define BCM_RPC_TP_HOST_AGG_MASK	0xffff0000
-#define BCM_RPC_TP_HOST_AGG_SHIFT	16
-#define BCM_RPC_TP_HOST_AGG_AMPDU	0x00010000	/* HOST->DNGL ampdu aggregation */
-#define BCM_RPC_TP_HOST_AGG_TEST	0x00100000	/* HOST->DNGL test aggregation */
-#define BCM_RPC_TP_DNGL_AGG_MASK	0x0000ffff
-#define BCM_RPC_TP_DNGL_AGG_DPC		0x00000001	/* DNGL->HOST data aggregation */
-#define BCM_RPC_TP_DNGL_AGG_FLOWCTL	0x00000002	/* DNGL->HOST tx flowcontrol agg */
-#define BCM_RPC_TP_DNGL_AGG_TEST	0x00000010	/* DNGL->HOST test agg */
-
-#define BCM_RPC_TP_DNGL_AGG_MAX_SFRAME	3	/* max agg subframes, must be <= USB_NTXD */
-#define BCM_RPC_TP_DNGL_AGG_MAX_BYTE	4000	/* max agg bytes */
-
-#define BCM_RPC_TP_HOST_AGG_MAX_SFRAME  3	/* max agg subframes, AMPDU only, 3 is enough */
-#define BCM_RPC_TP_HOST_AGG_MAX_BYTE    3400	/* max agg bytes; to fit 2+ tcp/udp pkts. Each one:
-						 * 802.3pkt + 802.11 hdr + rpc hdr + tp hdr < 1700B
-						 * Need to be in sync with dongle usb rx dma
-						 *  rxbufsize(USBBULK_RXBUF_GIANT in usbdev_sb.c)
-						 */
-/* TP-DBUS pkts flowcontrol */
-#define BCM_RPC_TP_DBUS_NTXQ	50	/* queue size for TX on bulk OUT, aggregation possible */
-#define BCM_RPC_TP_DBUS_NRXQ	50	/* queue size for RX on bulk IN, aggregation possible */
-#define BCM_RPC_TP_DBUS_NRXQ_CTRL	1	/* queue size for RX on ctl EP0 */
-
-#define BCM_RPC_TP_DBUS_NRXQ_PKT	(BCM_RPC_TP_DBUS_NRXQ * BCM_RPC_TP_DNGL_AGG_MAX_SFRAME)
-#define BCM_RPC_TP_DBUS_NTXQ_PKT	(BCM_RPC_TP_DBUS_NTXQ * BCM_RPC_TP_HOST_AGG_MAX_SFRAME)
-
-typedef struct rpc_transport_info rpc_tp_info_t;
-
-typedef void (*rpc_tx_complete_fn_t) (void *, rpc_buf_t *, int status);
-typedef void (*rpc_rx_fn_t) (void *, rpc_buf_t *);
-
-#ifdef WLC_LOW
-typedef void (*rpc_txflowctl_cb_t) (void *ctx, bool on);
-#endif
-
-extern rpc_tp_info_t *bcm_rpc_tp_attach(osl_t *osh, void *bus);
-extern void bcm_rpc_tp_detach(rpc_tp_info_t *rpcb);
-extern void bcm_rpc_tp_down(rpc_tp_info_t *rpcb);
-extern void bcm_rpc_tp_watchdog(rpc_tp_info_t *rpcb);
-
-extern int bcm_rpc_tp_buf_send(rpc_tp_info_t *rpcb, rpc_buf_t *buf);
-
-/* callback for tx_complete, rx_pkt */
-extern void bcm_rpc_tp_register_cb(rpc_tp_info_t *rpcb,
-				   rpc_tx_complete_fn_t txcmplt,
-				   void *tx_context, rpc_rx_fn_t rxpkt,
-				   void *rx_context, rpc_osl_t *rpc_osh);
-extern void bcm_rpc_tp_deregister_cb(rpc_tp_info_t *rpcb);
-
-/* Buffer manipulation */
-extern uint bcm_rpc_buf_tp_header_len(rpc_tp_info_t *rpcb);
-extern rpc_buf_t *bcm_rpc_tp_buf_alloc(rpc_tp_info_t *rpcb, int len);
-extern void bcm_rpc_tp_buf_free(rpc_tp_info_t *rpcb, rpc_buf_t *buf);
-extern int bcm_rpc_buf_len_get(rpc_tp_info_t *rpcb, rpc_buf_t *b);
-extern int bcm_rpc_buf_len_set(rpc_tp_info_t *rpcb, rpc_buf_t *b, uint len);
-extern rpc_buf_t *bcm_rpc_buf_next_get(rpc_tp_info_t *rpcb, rpc_buf_t *b);
-extern void bcm_rpc_buf_next_set(rpc_tp_info_t *rpcb, rpc_buf_t *b,
-				 rpc_buf_t *nextb);
-extern unsigned char *bcm_rpc_buf_data(rpc_tp_info_t *rpcb, rpc_buf_t *b);
-extern unsigned char *bcm_rpc_buf_push(rpc_tp_info_t *rpcb, rpc_buf_t *b,
-				       uint delta);
-extern unsigned char *bcm_rpc_buf_pull(rpc_tp_info_t *rpcb, rpc_buf_t *b,
-				       uint delta);
-extern void bcm_rpc_tp_buf_release(rpc_tp_info_t *rpcb, rpc_buf_t *buf);
-extern void bcm_rpc_tp_buf_cnt_adjust(rpc_tp_info_t *rpcb, int adjust);
-/* RPC call_with_return */
-extern int bcm_rpc_tp_recv_rtn(rpc_tp_info_t *rpcb);
-extern int bcm_rpc_tp_get_device_speed(rpc_tp_info_t *rpc_th);
-#ifdef BCMDBG
-extern int bcm_rpc_tp_dump(rpc_tp_info_t *rpcb, struct bcmstrbuf *b);
-#endif
-
-#ifdef WLC_LOW
-/* intercept USB pkt to parse RPC header: USB driver rx-> wl_send -> this -> wl driver */
-extern void bcm_rpc_tp_rx_from_dnglbus(rpc_tp_info_t *rpc_th, struct lbuf *lb);
-
-/* RPC callreturn pkt, go to USB driver tx */
-extern int bcm_rpc_tp_send_callreturn(rpc_tp_info_t *rpc_th, rpc_buf_t *b);
-
-extern void bcm_rpc_tp_dump(rpc_tp_info_t *rpcb);
-extern void bcm_rpc_tp_txflowctl(rpc_tp_info_t *rpcb, bool state, int prio);
-extern void bcm_rpc_tp_txflowctlcb_init(rpc_tp_info_t *rpc_th, void *ctx,
-					rpc_txflowctl_cb_t cb);
-extern void bcm_rpc_tp_txflowctlcb_deinit(rpc_tp_info_t *rpc_th);
-extern void bcm_rpc_tp_txq_wm_set(rpc_tp_info_t *rpc_th, u8 hiwm,
-				  u8 lowm);
-extern void bcm_rpc_tp_txq_wm_get(rpc_tp_info_t *rpc_th, u8 *hiwm,
-				  u8 *lowm);
-#endif				/* WLC_LOW */
-
-extern void bcm_rpc_tp_agg_set(rpc_tp_info_t *rpcb, u32 reason, bool set);
-extern void bcm_rpc_tp_agg_limit_set(rpc_tp_info_t *rpc_th, u8 sf,
-				     u16 bytes);
-extern void bcm_rpc_tp_agg_limit_get(rpc_tp_info_t *rpc_th, u8 *sf,
-				     u16 *bytes);
-
-#define BCM_RPC_TP_MSG_LEVEL_MASK	0x00ff
-/* dongle msg level */
-#define RPC_TP_MSG_DNGL_ERR_VAL		0x0001	/* DNGL TP error msg */
-#define RPC_TP_MSG_DNGL_DBG_VAL		0x0002	/* DNGL TP dbg msg */
-#define RPC_TP_MSG_DNGL_AGG_VAL		0x0004	/* DNGL TP agg msg */
-#define RPC_TP_MSG_DNGL_DEA_VAL		0x0008	/* DNGL TP deag msg */
-
-/* host msg level */
-#define RPC_TP_MSG_HOST_ERR_VAL		0x0001	/* DNGL TP error msg */
-#define RPC_TP_MSG_HOST_DBG_VAL		0x0002	/* DNGL TP dbg msg */
-#define RPC_TP_MSG_HOST_AGG_VAL		0x0004	/* DNGL TP agg msg */
-#define RPC_TP_MSG_HOST_DEA_VAL		0x0008	/* DNGL TP deag msg */
-
-extern void bcm_rpc_tp_msglevel_set(rpc_tp_info_t *rpc_th, u8 msglevel,
-				    bool high_low);
-
-#endif				/* _bcm_rpc_tp_h_ */
diff --git a/drivers/staging/brcm80211/include/bcm_xdr.h b/drivers/staging/brcm80211/include/bcm_xdr.h
deleted file mode 100644
index 50fbd78..0000000
--- a/drivers/staging/brcm80211/include/bcm_xdr.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BCM_XDR_H
-#define _BCM_XDR_H
-
-/*
- * bcm_xdr_buf_t
- * Structure used for bookkeeping of a buffer being packed or unpacked.
- * Keeps a current read/write pointer and size as well as
- * the original buffer pointer and size.
- *
- */
-typedef struct {
-	u8 *buf;		/* pointer to current position in origbuf */
-	uint size;		/* current (residual) size in bytes */
-	u8 *origbuf;		/* unmodified pointer to orignal buffer */
-	uint origsize;		/* unmodified orignal buffer size in bytes */
-} bcm_xdr_buf_t;
-
-void bcm_xdr_buf_init(bcm_xdr_buf_t *b, void *buf, size_t len);
-
-int bcm_xdr_pack_u32(bcm_xdr_buf_t *b, u32 val);
-int bcm_xdr_unpack_u32(bcm_xdr_buf_t *b, u32 *pval);
-int bcm_xdr_pack_s32(bcm_xdr_buf_t *b, s32 val);
-int bcm_xdr_unpack_s32(bcm_xdr_buf_t *b, s32 *pval);
-int bcm_xdr_pack_s8(bcm_xdr_buf_t *b, s8 val);
-int bcm_xdr_unpack_s8(bcm_xdr_buf_t *b, s8 *pval);
-int bcm_xdr_pack_opaque(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_unpack_opaque(bcm_xdr_buf_t *b, uint len, void **pdata);
-int bcm_xdr_unpack_opaque_cpy(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_pack_opaque_varlen(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_unpack_opaque_varlen(bcm_xdr_buf_t *b, uint *plen, void **pdata);
-int bcm_xdr_pack_string(bcm_xdr_buf_t *b, char *str);
-int bcm_xdr_unpack_string(bcm_xdr_buf_t *b, uint *plen, char **pstr);
-
-int bcm_xdr_pack_u8_vec(bcm_xdr_buf_t *, u8 *vec, u32 elems);
-int bcm_xdr_unpack_u8_vec(bcm_xdr_buf_t *, u8 *vec, u32 elems);
-int bcm_xdr_pack_u16_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-int bcm_xdr_unpack_u16_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-int bcm_xdr_pack_u32_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-int bcm_xdr_unpack_u32_vec(bcm_xdr_buf_t *b, uint len, void *vec);
-
-int bcm_xdr_pack_opaque_raw(bcm_xdr_buf_t *b, uint len, void *data);
-int bcm_xdr_pack_opaque_pad(bcm_xdr_buf_t *b);
-
-#endif				/* _BCM_XDR_H */
diff --git a/drivers/staging/brcm80211/include/bcmdefs.h b/drivers/staging/brcm80211/include/bcmdefs.h
index dc52e9d..74601fc 100644
--- a/drivers/staging/brcm80211/include/bcmdefs.h
+++ b/drivers/staging/brcm80211/include/bcmdefs.h
@@ -42,9 +42,6 @@
 #define BCMFASTPATH
 #endif
 
-/* Put some library data/code into ROM to reduce RAM requirements */
-#define BCMROMFN(_fn)		_fn
-
 /* Bus types */
 #define	SI_BUS			0	/* SOC Interconnect */
 #define	PCI_BUS			1	/* PCI target */
@@ -54,35 +51,6 @@
 #define SPI_BUS			6	/* gSPI target */
 #define RPC_BUS			7	/* RPC target */
 
-/* Allows size optimization for single-bus image */
-#ifdef BCMBUSTYPE
-#define BUSTYPE(bus) 	(BCMBUSTYPE)
-#else
-#define BUSTYPE(bus) 	(bus)
-#endif
-
-/* Allows size optimization for single-backplane image */
-#ifdef BCMCHIPTYPE
-#define CHIPTYPE(bus) 	(BCMCHIPTYPE)
-#else
-#define CHIPTYPE(bus) 	(bus)
-#endif
-
-/* Allows size optimization for SPROM support */
-#define SPROMBUS	(PCI_BUS)
-
-/* Allows size optimization for single-chip image */
-#ifdef BCMCHIPID
-#define CHIPID(chip)	(BCMCHIPID)
-#else
-#define CHIPID(chip)	(chip)
-#endif
-
-#ifdef BCMCHIPREV
-#define CHIPREV(rev)	(BCMCHIPREV)
-#else
-#define CHIPREV(rev)	(rev)
-#endif
 
 /* Defines for DMA Address Width - Shared between OSL and HNDDMA */
 #define DMADDR_MASK_32 0x0	/* Address mask for 32-bits */
@@ -146,31 +114,11 @@
 
 #define BCMEXTRAHDROOM 172
 
-/* Headroom required for dongle-to-host communication.  Packets allocated
- * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
- * leave this much room in front for low-level message headers which may
- * be needed to get across the dongle bus to the host.  (These messages
- * don't go over the network, so room for the full WL header above would
- * be a waste.).
-*/
-#define BCMDONGLEHDRSZ 12
-#define BCMDONGLEPADSZ 16
-
-#define BCMDONGLEOVERHEAD	(BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
-
 #ifdef BCMDBG
-
-#define BCMDBG_ERR
-
 #ifndef BCMDBG_ASSERT
 #define BCMDBG_ASSERT
-#endif				/* BCMDBG_ASSERT */
-
-#endif				/* BCMDBG */
-
-#if defined(BCMDBG_ASSERT)
-#define BCMASSERT_SUPPORT
-#endif
+#endif	/* BCMDBG_ASSERT */
+#endif	/* BCMDBG */
 
 /* Macros for doing definition and get/set of bitfields
  * Usage example, e.g. a three-bit field (bits 4-6):
@@ -190,11 +138,10 @@
 		(((val) & (~(field ## _M << field ## _S))) | \
 		 ((unsigned)(bits) << field ## _S))
 
-/* define BCMSMALL to remove misc features for memory-constrained environments */
-#define	BCMSPACE
-#define bcmspace	true	/* if (bcmspace) code is retained */
-
 /* Max. nvram variable table size */
 #define	MAXSZ_NVRAM_VARS	4096
 
+/* handle forward declaration */
+struct wl_info;
+
 #endif				/* _bcmdefs_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmsdbus.h b/drivers/staging/brcm80211/include/bcmsdbus.h
index ca99495..89059dd 100644
--- a/drivers/staging/brcm80211/include/bcmsdbus.h
+++ b/drivers/staging/brcm80211/include/bcmsdbus.h
@@ -46,8 +46,8 @@
  *  The handler shall be provided by all subsequent calls. No local cache
  *  cfghdl points to the starting address of pci device mapped memory
  */
-extern sdioh_info_t *sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
-extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
+extern sdioh_info_t *sdioh_attach(struct osl_info *osh, void *cfghdl, uint irq);
+extern SDIOH_API_RC sdioh_detach(struct osl_info *osh, sdioh_info_t *si);
 extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si,
 					     sdioh_cb_fn_t fn, void *argh);
 extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
@@ -79,7 +79,7 @@
 					 uint fix_inc, uint rw, uint fnc_num,
 					 u32 addr, uint regwidth,
 					 u32 buflen, u8 *buffer,
-					 void *pkt);
+					 struct sk_buff *pkt);
 
 /* get cis data */
 extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, u8 *cis,
diff --git a/drivers/staging/brcm80211/include/bcmsdh.h b/drivers/staging/brcm80211/include/bcmsdh.h
index 6b80983..0e1f799 100644
--- a/drivers/staging/brcm80211/include/bcmsdh.h
+++ b/drivers/staging/brcm80211/include/bcmsdh.h
@@ -40,11 +40,11 @@
  *    implementation may maintain a single "default" handle (e.g. the first or
  *    most recent one) to enable single-instance implementations to pass NULL.
  */
-extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva,
-				    uint irq);
+extern bcmsdh_info_t *bcmsdh_attach(struct osl_info *osh, void *cfghdl,
+				    void **regsva, uint irq);
 
 /* Detach - freeup resources allocated in attach */
-extern int bcmsdh_detach(osl_t *osh, void *sdh);
+extern int bcmsdh_detach(struct osl_info *osh, void *sdh);
 
 /* Query if SD device interrupts are enabled */
 extern bool bcmsdh_intr_query(void *sdh);
@@ -122,7 +122,7 @@
 			   u8 *buf, uint nbytes, void *pkt,
 			   bcmsdh_cmplt_fn_t complete, void *handle);
 extern int bcmsdh_recv_buf(void *sdh, u32 addr, uint fn, uint flags,
-			   u8 *buf, uint nbytes, void *pkt,
+			   u8 *buf, uint nbytes, struct sk_buff *pkt,
 			   bcmsdh_cmplt_fn_t complete, void *handle);
 
 /* Flags bits */
@@ -174,8 +174,8 @@
 typedef struct {
 	/* attach to device */
 	void *(*attach) (u16 vend_id, u16 dev_id, u16 bus, u16 slot,
-			 u16 func, uint bustype, void *regsva, osl_t *osh,
-			 void *param);
+			 u16 func, uint bustype, void *regsva,
+			 struct osl_info *osh, void *param);
 	/* detach from device */
 	void (*detach) (void *ch);
 } bcmsdh_driver_t;
diff --git a/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h b/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h
index 7d5aa71..4d671dd 100644
--- a/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h
+++ b/drivers/staging/brcm80211/include/bcmsdh_sdmmc.h
@@ -51,7 +51,7 @@
 #define CLIENT_INTR 		0x100	/* Get rid of this! */
 
 struct sdioh_info {
-	osl_t *osh;		/* osh handler */
+	struct osl_info *osh;		/* osh handler */
 	bool client_intr_enabled;	/* interrupt connnected flag */
 	bool intr_handler_valid;	/* client driver interrupt handler valid */
 	sdioh_cb_fn_t intr_handler;	/* registered interrupt handler */
@@ -94,8 +94,8 @@
  */
 
 /* Register mapping routines */
-extern u32 *sdioh_sdmmc_reg_map(osl_t *osh, s32 addr, int size);
-extern void sdioh_sdmmc_reg_unmap(osl_t *osh, s32 addr, int size);
+extern u32 *sdioh_sdmmc_reg_map(struct osl_info *osh, s32 addr, int size);
+extern void sdioh_sdmmc_reg_unmap(struct osl_info *osh, s32 addr, int size);
 
 /* Interrupt (de)registration routines */
 extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
diff --git a/drivers/staging/brcm80211/include/bcmsrom.h b/drivers/staging/brcm80211/include/bcmsrom.h
index 9d53657..cdcef74 100644
--- a/drivers/staging/brcm80211/include/bcmsrom.h
+++ b/drivers/staging/brcm80211/include/bcmsrom.h
@@ -20,15 +20,15 @@
 #include <bcmsrom_fmt.h>
 
 /* Prototypes */
-extern int srom_var_init(si_t *sih, uint bus, void *curmap, osl_t *osh,
-			 char **vars, uint *count);
+extern int srom_var_init(si_t *sih, uint bus, void *curmap,
+			 struct osl_info *osh, char **vars, uint *count);
 
-extern int srom_read(si_t *sih, uint bus, void *curmap, osl_t *osh,
+extern int srom_read(si_t *sih, uint bus, void *curmap, struct osl_info *osh,
 		     uint byteoff, uint nbytes, u16 *buf, bool check_crc);
 
 /* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
  *   and extract from it into name=value pairs
  */
-extern int srom_parsecis(osl_t *osh, u8 **pcis, uint ciscnt,
+extern int srom_parsecis(struct osl_info *osh, u8 **pcis, uint ciscnt,
 			 char **vars, uint *count);
 #endif				/* _bcmsrom_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmutils.h b/drivers/staging/brcm80211/include/bcmutils.h
index b533159..a8f76d8 100644
--- a/drivers/staging/brcm80211/include/bcmutils.h
+++ b/drivers/staging/brcm80211/include/bcmutils.h
@@ -30,7 +30,6 @@
 	};
 
 /* ** driver-only section ** */
-#include <osl.h>
 
 #define GPIO_PIN_NOTDEFINED 	0x20	/* Pin not defined */
 
@@ -56,10 +55,10 @@
 #endif
 
 	typedef struct pktq_prec {
-		void *head;	/* first packet to dequeue */
-		void *tail;	/* last packet to dequeue */
-		u16 len;	/* number of queued packets */
-		u16 max;	/* maximum number of queued packets */
+		struct sk_buff *head;	/* first packet to dequeue */
+		struct sk_buff *tail;	/* last packet to dequeue */
+		u16 len;		/* number of queued packets */
+		u16 max;		/* maximum number of queued packets */
 	} pktq_prec_t;
 
 /* multi-priority pkt queue */
@@ -105,23 +104,26 @@
 #define pktq_ppeek(pq, prec)            ((pq)->q[prec].head)
 #define pktq_ppeek_tail(pq, prec)       ((pq)->q[prec].tail)
 
-	extern void *pktq_penq(struct pktq *pq, int prec, void *p);
-	extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
-	extern void *pktq_pdeq(struct pktq *pq, int prec);
-	extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+extern struct sk_buff *pktq_penq(struct pktq *pq, int prec,
+				 struct sk_buff *p);
+extern struct sk_buff *pktq_penq_head(struct pktq *pq, int prec,
+				      struct sk_buff *p);
+extern struct sk_buff *pktq_pdeq(struct pktq *pq, int prec);
+extern struct sk_buff *pktq_pdeq_tail(struct pktq *pq, int prec);
+
 /* Empty the queue at particular precedence level */
 #ifdef BRCM_FULLMAC
-	extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec,
+	extern void pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec,
 		bool dir);
 #else
-	extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec,
+	extern void pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec,
 		bool dir, ifpkt_cb_t fn, int arg);
 #endif /* BRCM_FULLMAC */
 
 /* operations on a set of precedences in packet queue */
 
-	extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
-	extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern struct sk_buff *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
 
 /* operations on packet queue as a whole */
 
@@ -140,20 +142,19 @@
 
 	extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
 /* prec_out may be NULL if caller is not interested in return value */
-	extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+	extern struct sk_buff *pktq_peek_tail(struct pktq *pq, int *prec_out);
 #ifdef BRCM_FULLMAC
-	extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir);
+	extern void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir);
 #else
-	extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir,
+	extern void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir,
 		ifpkt_cb_t fn, int arg);
 #endif
 
 /* externs */
 /* packet */
-	extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len,
-			       unsigned char *buf);
-	extern uint pktsegcnt(osl_t *osh, void *p);
-	extern uint pkttotlen(osl_t *osh, void *p);
+	extern uint pktfrombuf(struct osl_info *osh, struct sk_buff *p,
+			       uint offset, int len, unsigned char *buf);
+	extern uint pkttotlen(struct osl_info *osh, struct sk_buff *p);
 
 /* ethernet address */
 	extern int bcm_ether_atoe(char *p, struct ether_addr *ea);
@@ -166,7 +167,8 @@
 	extern char *getvar(char *vars, const char *name);
 	extern int getintvar(char *vars, const char *name);
 #ifdef BCMDBG
-	extern void prpkt(const char *msg, osl_t *osh, void *p0);
+	extern void prpkt(const char *msg, struct osl_info *osh,
+			  struct sk_buff *p0);
 #endif				/* BCMDBG */
 #define bcm_perf_enable()
 #define bcmstats(fmt)
@@ -359,7 +361,21 @@
 #define CEIL(x, y)		(((x) + ((y)-1)) / (y))
 #define	ISPOWEROF2(x)		((((x)-1)&(x)) == 0)
 
-/* bit map related macros */
+/* map physical to virtual I/O */
+#if !defined(CONFIG_MMC_MSM7X00A)
+#define REG_MAP(pa, size)       ioremap_nocache((unsigned long)(pa), \
+					(unsigned long)(size))
+#else
+#define REG_MAP(pa, size)       (void *)(0)
+#endif
+
+/* Register operations */
+#define AND_REG(osh, r, v)	W_REG(osh, (r), R_REG(osh, r) & (v))
+#define OR_REG(osh, r, v)	W_REG(osh, (r), R_REG(osh, r) | (v))
+
+#define SET_REG(osh, r, mask, val) \
+		W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
+
 #ifndef setbit
 #ifndef NBBY			/* the BSD family defines NBBY */
 #define	NBBY	8		/* 8 bits per byte */
diff --git a/drivers/staging/brcm80211/include/d11.h b/drivers/staging/brcm80211/include/d11.h
index c07548c..be2d497 100644
--- a/drivers/staging/brcm80211/include/d11.h
+++ b/drivers/staging/brcm80211/include/d11.h
@@ -17,13 +17,6 @@
 #ifndef	_D11_H
 #define	_D11_H
 
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <hndsoc.h>
-#include <sbhndpio.h>
-#include <sbhnddma.h>
-#include <proto/802.11.h>
-
 /* This marks the start of a packed structure section. */
 #include <packed_section_start.h>
 
diff --git a/drivers/staging/brcm80211/include/dbus.h b/drivers/staging/brcm80211/include/dbus.h
deleted file mode 100644
index 81ffea7..0000000
--- a/drivers/staging/brcm80211/include/dbus.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __DBUS_H__
-#define __DBUS_H__
-
-#ifdef BCMDBG
-#define DBUSERR(args)        do { if (net_ratelimit()) printf args; } while (0)
-#define DBUSTRACE(args)
-#define DBUSDBGLOCK(args)
-
-#else
-#define DBUSTRACE(args)
-#define DBUSERR(args)
-#define DBUSDBGLOCK(args)
-#endif
-
-enum {
-	DBUS_OK = 0,
-	DBUS_ERR = -200,
-	DBUS_ERR_TIMEOUT,
-	DBUS_ERR_DISCONNECT,
-	DBUS_ERR_NODEVICE,
-	DBUS_ERR_UNSUPPORTED,
-	DBUS_ERR_PENDING,
-	DBUS_ERR_NOMEM,
-	DBUS_ERR_TXFAIL,
-	DBUS_ERR_TXTIMEOUT,
-	DBUS_ERR_TXDROP,
-	DBUS_ERR_RXFAIL,
-	DBUS_ERR_RXDROP,
-	DBUS_ERR_TXCTLFAIL,
-	DBUS_ERR_RXCTLFAIL,
-	DBUS_ERR_REG_PARAM,
-	DBUS_STATUS_CANCELLED
-};
-
-#define ERR_CBMASK_TXFAIL		0x00000001
-#define ERR_CBMASK_RXFAIL		0x00000002
-#define ERR_CBMASK_ALL			0xFFFFFFFF
-
-#define DBUS_CBCTL_WRITE		0
-#define DBUS_CBCTL_READ			1
-
-#define DBUS_TX_RETRY_LIMIT		3	/* retries for failed txirb */
-#define DBUS_TX_TIMEOUT_INTERVAL	250	/* timeout for txirb complete, in ms */
-
-#define DBUS_BUFFER_SIZE_TX	5000
-#define DBUS_BUFFER_SIZE_RX	5000
-
-#define DBUS_BUFFER_SIZE_TX_NOAGG	2048
-#define DBUS_BUFFER_SIZE_RX_NOAGG	2048
-
-/* DBUS types */
-enum {
-	DBUS_USB,
-	DBUS_SDIO,
-	DBUS_SPI,
-	DBUS_UNKNOWN
-};
-
-enum dbus_state {
-	DBUS_STATE_DL_PENDING,
-	DBUS_STATE_DL_DONE,
-	DBUS_STATE_UP,
-	DBUS_STATE_DOWN,
-	DBUS_STATE_PNP_FWDL,
-	DBUS_STATE_DISCONNECT
-};
-
-enum dbus_pnp_state {
-	DBUS_PNP_DISCONNECT,
-	DBUS_PNP_SLEEP,
-	DBUS_PNP_RESUME
-};
-
-typedef enum _DEVICE_SPEED {
-	INVALID_SPEED = -1,
-	LOW_SPEED = 1,		/* USB 1.1: 1.5 Mbps */
-	FULL_SPEED,		/* USB 1.1: 12  Mbps */
-	HIGH_SPEED,		/* USB 2.0: 480 Mbps */
-	SUPER_SPEED,		/* USB 3.0: 4.8 Gbps */
-} DEVICE_SPEED;
-
-typedef struct {
-	int bustype;
-	int vid;
-	int pid;
-	int devid;
-	int chiprev;		/* chip revsion number */
-	int mtu;
-	int nchan;		/* Data Channels */
-} dbus_attrib_t;
-
-/* FIX: Account for errors related to DBUS;
- * Let upper layer account for packets/bytes
- */
-typedef struct {
-	u32 rx_errors;
-	u32 tx_errors;
-	u32 rx_dropped;
-	u32 tx_dropped;
-} dbus_stats_t;
-
-/*
- * Configurable BUS parameters
- */
-typedef struct {
-	bool rxctl_deferrespok;
-} dbus_config_t;
-
-struct dbus_callbacks;
-struct exec_parms;
-
-typedef void *(*probe_cb_t) (void *arg, const char *desc, u32 bustype,
-			     u32 hdrlen);
-typedef void (*disconnect_cb_t) (void *arg);
-typedef void *(*exec_cb_t) (struct exec_parms *args);
-
-/* Client callbacks registered during dbus_attach() */
-typedef struct dbus_callbacks {
-	void (*send_complete) (void *cbarg, void *info, int status);
-	void (*recv_buf) (void *cbarg, u8 *buf, int len);
-	void (*recv_pkt) (void *cbarg, void *pkt);
-	void (*txflowcontrol) (void *cbarg, bool onoff);
-	void (*errhandler) (void *cbarg, int err);
-	void (*ctl_complete) (void *cbarg, int type, int status);
-	void (*state_change) (void *cbarg, int state);
-	void *(*pktget) (void *cbarg, uint len, bool send);
-	void (*pktfree) (void *cbarg, void *p, bool send);
-} dbus_callbacks_t;
-
-struct dbus_pub;
-struct bcmstrbuf;
-struct dbus_irb;
-struct dbus_irb_rx;
-struct dbus_irb_tx;
-struct dbus_intf_callbacks;
-
-typedef struct {
-	void *(*attach) (struct dbus_pub *pub, void *cbarg,
-			 struct dbus_intf_callbacks *cbs);
-	void (*detach) (struct dbus_pub *pub, void *bus);
-
-	int (*up) (void *bus);
-	int (*down) (void *bus);
-	int (*send_irb) (void *bus, struct dbus_irb_tx *txirb);
-	int (*recv_irb) (void *bus, struct dbus_irb_rx *rxirb);
-	int (*cancel_irb) (void *bus, struct dbus_irb_tx *txirb);
-	int (*send_ctl) (void *bus, u8 *buf, int len);
-	int (*recv_ctl) (void *bus, u8 *buf, int len);
-	int (*get_stats) (void *bus, dbus_stats_t *stats);
-	int (*get_attrib) (void *bus, dbus_attrib_t *attrib);
-
-	int (*pnp) (void *bus, int event);
-	int (*remove) (void *bus);
-	int (*resume) (void *bus);
-	int (*suspend) (void *bus);
-	int (*stop) (void *bus);
-	int (*reset) (void *bus);
-
-	/* Access to bus buffers directly */
-	void *(*pktget) (void *bus, int len);
-	void (*pktfree) (void *bus, void *pkt);
-
-	int (*iovar_op) (void *bus, const char *name, void *params, int plen,
-			 void *arg, int len, bool set);
-	void (*dump) (void *bus, struct bcmstrbuf *strbuf);
-	int (*set_config) (void *bus, dbus_config_t *config);
-	int (*get_config) (void *bus, dbus_config_t *config);
-
-	 bool(*device_exists) (void *bus);
-	 bool(*dlneeded) (void *bus);
-	int (*dlstart) (void *bus, u8 *fw, int len);
-	int (*dlrun) (void *bus);
-	 bool(*recv_needed) (void *bus);
-
-	void *(*exec_rxlock) (void *bus, exec_cb_t func,
-			      struct exec_parms *args);
-	void *(*exec_txlock) (void *bus, exec_cb_t func,
-			      struct exec_parms *args);
-
-	int (*tx_timer_init) (void *bus);
-	int (*tx_timer_start) (void *bus, uint timeout);
-	int (*tx_timer_stop) (void *bus);
-
-	int (*sched_dpc) (void *bus);
-	int (*lock) (void *bus);
-	int (*unlock) (void *bus);
-	int (*sched_probe_cb) (void *bus);
-
-	int (*shutdown) (void *bus);
-
-	int (*recv_stop) (void *bus);
-	int (*recv_resume) (void *bus);
-
-	/* Add from the bottom */
-} dbus_intf_t;
-
-typedef struct dbus_pub {
-	struct osl_info *osh;
-	dbus_stats_t stats;
-	dbus_attrib_t attrib;
-	enum dbus_state busstate;
-	DEVICE_SPEED device_speed;
-	int ntxq, nrxq, rxsize;
-	void *bus;
-	struct shared_info *sh;
-} dbus_pub_t;
-
-#define BUS_INFO(bus, type) (((type *) bus)->pub->bus)
-
-/*
- * Public Bus Function Interface
- */
-extern int dbus_register(int vid, int pid, probe_cb_t prcb,
-			 disconnect_cb_t discb, void *prarg, void *param1,
-			 void *param2);
-extern int dbus_deregister(void);
-
-extern const dbus_pub_t *dbus_attach(struct osl_info *osh, int rxsize, int nrxq,
-				     int ntxq, void *cbarg,
-				     dbus_callbacks_t *cbs,
-				     struct shared_info *sh);
-extern void dbus_detach(const dbus_pub_t *pub);
-
-extern int dbus_up(const dbus_pub_t *pub);
-extern int dbus_down(const dbus_pub_t *pub);
-extern int dbus_stop(const dbus_pub_t *pub);
-extern int dbus_shutdown(const dbus_pub_t *pub);
-extern void dbus_flowctrl_rx(const dbus_pub_t *pub, bool on);
-
-extern int dbus_send_buf(const dbus_pub_t *pub, u8 *buf, int len,
-			 void *info);
-extern int dbus_send_pkt(const dbus_pub_t *pub, void *pkt, void *info);
-extern int dbus_send_ctl(const dbus_pub_t *pub, u8 *buf, int len);
-extern int dbus_recv_ctl(const dbus_pub_t *pub, u8 *buf, int len);
-
-extern int dbus_get_stats(const dbus_pub_t *pub, dbus_stats_t *stats);
-extern int dbus_get_attrib(const dbus_pub_t *pub, dbus_attrib_t *attrib);
-extern int dbus_get_device_speed(const dbus_pub_t *pub);
-extern int dbus_set_config(const dbus_pub_t *pub, dbus_config_t *config);
-extern int dbus_get_config(const dbus_pub_t *pub, dbus_config_t *config);
-
-extern void *dbus_pktget(const dbus_pub_t *pub, int len);
-extern void dbus_pktfree(const dbus_pub_t *pub, void *pkt);
-
-extern int dbus_set_errmask(const dbus_pub_t *pub, u32 mask);
-extern int dbus_pnp_sleep(const dbus_pub_t *pub);
-extern int dbus_pnp_resume(const dbus_pub_t *pub, int *fw_reload);
-extern int dbus_pnp_disconnect(const dbus_pub_t *pub);
-
-extern int dbus_iovar_op(const dbus_pub_t *pub, const char *name,
-			 void *params, int plen, void *arg, int len, bool set);
-#ifdef BCMDBG
-extern void dbus_hist_dump(const dbus_pub_t *pub, struct bcmstrbuf *b);
-#endif				/* BCMDBG */
-/*
- * Private Common Bus Interface
- */
-
-/* IO Request Block (IRB) */
-typedef struct dbus_irb {
-	struct dbus_irb *next;	/* it's casted from dbus_irb_tx or dbus_irb_rx struct */
-} dbus_irb_t;
-
-typedef struct dbus_irb_rx {
-	struct dbus_irb irb;	/* Must be first */
-	u8 *buf;
-	int buf_len;
-	int actual_len;
-	void *pkt;
-	void *info;
-	void *arg;
-} dbus_irb_rx_t;
-
-typedef struct dbus_irb_tx {
-	struct dbus_irb irb;	/* Must be first */
-	u8 *buf;
-	int len;
-	void *pkt;
-	int retry_count;
-	void *info;
-	void *arg;
-} dbus_irb_tx_t;
-
-/* DBUS interface callbacks are different from user callbacks
- * so, internally, different info can be passed to upper layer
- */
-typedef struct dbus_intf_callbacks {
-	void (*send_irb_timeout) (void *cbarg, dbus_irb_tx_t *txirb);
-	void (*send_irb_complete) (void *cbarg, dbus_irb_tx_t *txirb,
-				   int status);
-	void (*recv_irb_complete) (void *cbarg, dbus_irb_rx_t *rxirb,
-				   int status);
-	void (*errhandler) (void *cbarg, int err);
-	void (*ctl_complete) (void *cbarg, int type, int status);
-	void (*state_change) (void *cbarg, int state);
-	 bool(*isr) (void *cbarg, bool *wantdpc);
-	 bool(*dpc) (void *cbarg, bool bounded);
-	void (*watchdog) (void *cbarg);
-	void *(*pktget) (void *cbarg, uint len, bool send);
-	void (*pktfree) (void *cbarg, void *p, bool send);
-	struct dbus_irb *(*getirb) (void *cbarg, bool send);
-	void (*rxerr_indicate) (void *cbarg, bool on);
-} dbus_intf_callbacks_t;
-
-/*
- * Porting: To support new bus, port these functions below
- */
-
-/*
- * Bus specific Interface
- * Implemented by dbus_usb.c/dbus_sdio.c
- */
-extern int dbus_bus_register(int vid, int pid, probe_cb_t prcb,
-			     disconnect_cb_t discb, void *prarg,
-			     dbus_intf_t **intf, void *param1, void *param2);
-extern int dbus_bus_deregister(void);
-
-/*
- * Bus-specific and OS-specific Interface
- * Implemented by dbus_usb_[linux/ndis].c/dbus_sdio_[linux/ndis].c
- */
-extern int dbus_bus_osl_register(int vid, int pid, probe_cb_t prcb,
-				 disconnect_cb_t discb, void *prarg,
-				 dbus_intf_t **intf, void *param1,
-				 void *param2);
-extern int dbus_bus_osl_deregister(void);
-
-/*
- * Bus-specific, OS-specific, HW-specific Interface
- * Mainly for SDIO Host HW controller
- */
-extern int dbus_bus_osl_hw_register(int vid, int pid, probe_cb_t prcb,
-				    disconnect_cb_t discb, void *prarg,
-				    dbus_intf_t **intf);
-extern int dbus_bus_osl_hw_deregister(void);
-
-#endif				/* __DBUS_H__ */
diff --git a/drivers/staging/brcm80211/include/epivers.h b/drivers/staging/brcm80211/include/epivers.h
deleted file mode 100644
index 2e6b519..0000000
--- a/drivers/staging/brcm80211/include/epivers.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _epivers_h_
-#define _epivers_h_
-
-#define	EPI_MAJOR_VERSION	5
-
-#define	EPI_MINOR_VERSION	75
-
-#define	EPI_RC_NUMBER		11
-
-#define	EPI_INCREMENTAL_NUMBER	0
-
-#define EPI_BUILD_NUMBER	1
-
-#define	EPI_VERSION		{ 5, 75, 11, 0 }
-
-#ifdef BCMSDIO
-/* EPI_VERSION_NUM must match FW version */
-#define	EPI_VERSION_NUM		0x054b0c00
-#else
-#define	EPI_VERSION_NUM		0x054b0b00
-#endif
-
-#define EPI_VERSION_DEV		5.75.11
-
-/* Driver Version String, ASCII, 32 chars max */
-#define	EPI_VERSION_STR		"5.75.11"
-
-#endif				/* _epivers_h_ */
diff --git a/drivers/staging/brcm80211/include/hnddma.h b/drivers/staging/brcm80211/include/hnddma.h
index bee4c89..4c5462b 100644
--- a/drivers/staging/brcm80211/include/hnddma.h
+++ b/drivers/staging/brcm80211/include/hnddma.h
@@ -19,7 +19,7 @@
 
 #ifndef _hnddma_pub_
 #define _hnddma_pub_
-typedef const struct hnddma_pub hnddma_t;
+struct hnddma_pub;
 #endif				/* _hnddma_pub_ */
 
 /* range param for dma_getnexttxp() and dma_txreclaim */
@@ -30,52 +30,54 @@
 } txd_range_t;
 
 /* dma function type */
-typedef void (*di_detach_t) (hnddma_t *dmah);
-typedef bool(*di_txreset_t) (hnddma_t *dmah);
-typedef bool(*di_rxreset_t) (hnddma_t *dmah);
-typedef bool(*di_rxidle_t) (hnddma_t *dmah);
-typedef void (*di_txinit_t) (hnddma_t *dmah);
-typedef bool(*di_txenabled_t) (hnddma_t *dmah);
-typedef void (*di_rxinit_t) (hnddma_t *dmah);
-typedef void (*di_txsuspend_t) (hnddma_t *dmah);
-typedef void (*di_txresume_t) (hnddma_t *dmah);
-typedef bool(*di_txsuspended_t) (hnddma_t *dmah);
-typedef bool(*di_txsuspendedidle_t) (hnddma_t *dmah);
-typedef int (*di_txfast_t) (hnddma_t *dmah, void *p, bool commit);
-typedef int (*di_txunframed_t) (hnddma_t *dmah, void *p, uint len,
+typedef void (*di_detach_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txreset_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxreset_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxidle_t) (struct hnddma_pub *dmah);
+typedef void (*di_txinit_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txenabled_t) (struct hnddma_pub *dmah);
+typedef void (*di_rxinit_t) (struct hnddma_pub *dmah);
+typedef void (*di_txsuspend_t) (struct hnddma_pub *dmah);
+typedef void (*di_txresume_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txsuspended_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txsuspendedidle_t) (struct hnddma_pub *dmah);
+typedef int (*di_txfast_t) (struct hnddma_pub *dmah, struct sk_buff *p,
+			    bool commit);
+typedef int (*di_txunframed_t) (struct hnddma_pub *dmah, void *p, uint len,
 				bool commit);
-typedef void *(*di_getpos_t) (hnddma_t *di, bool direction);
-typedef void (*di_fifoloopbackenable_t) (hnddma_t *dmah);
-typedef bool(*di_txstopped_t) (hnddma_t *dmah);
-typedef bool(*di_rxstopped_t) (hnddma_t *dmah);
-typedef bool(*di_rxenable_t) (hnddma_t *dmah);
-typedef bool(*di_rxenabled_t) (hnddma_t *dmah);
-typedef void *(*di_rx_t) (hnddma_t *dmah);
-typedef bool(*di_rxfill_t) (hnddma_t *dmah);
-typedef void (*di_txreclaim_t) (hnddma_t *dmah, txd_range_t range);
-typedef void (*di_rxreclaim_t) (hnddma_t *dmah);
-typedef unsigned long (*di_getvar_t) (hnddma_t *dmah, const char *name);
-typedef void *(*di_getnexttxp_t) (hnddma_t *dmah, txd_range_t range);
-typedef void *(*di_getnextrxp_t) (hnddma_t *dmah, bool forceall);
-typedef void *(*di_peeknexttxp_t) (hnddma_t *dmah);
-typedef void *(*di_peeknextrxp_t) (hnddma_t *dmah);
-typedef void (*di_rxparam_get_t) (hnddma_t *dmah, u16 *rxoffset,
+typedef void *(*di_getpos_t) (struct hnddma_pub *di, bool direction);
+typedef void (*di_fifoloopbackenable_t) (struct hnddma_pub *dmah);
+typedef bool(*di_txstopped_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxstopped_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxenable_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxenabled_t) (struct hnddma_pub *dmah);
+typedef void *(*di_rx_t) (struct hnddma_pub *dmah);
+typedef bool(*di_rxfill_t) (struct hnddma_pub *dmah);
+typedef void (*di_txreclaim_t) (struct hnddma_pub *dmah, txd_range_t range);
+typedef void (*di_rxreclaim_t) (struct hnddma_pub *dmah);
+typedef unsigned long (*di_getvar_t) (struct hnddma_pub *dmah,
+				      const char *name);
+typedef void *(*di_getnexttxp_t) (struct hnddma_pub *dmah, txd_range_t range);
+typedef void *(*di_getnextrxp_t) (struct hnddma_pub *dmah, bool forceall);
+typedef void *(*di_peeknexttxp_t) (struct hnddma_pub *dmah);
+typedef void *(*di_peeknextrxp_t) (struct hnddma_pub *dmah);
+typedef void (*di_rxparam_get_t) (struct hnddma_pub *dmah, u16 *rxoffset,
 				  u16 *rxbufsize);
-typedef void (*di_txblock_t) (hnddma_t *dmah);
-typedef void (*di_txunblock_t) (hnddma_t *dmah);
-typedef uint(*di_txactive_t) (hnddma_t *dmah);
-typedef void (*di_txrotate_t) (hnddma_t *dmah);
-typedef void (*di_counterreset_t) (hnddma_t *dmah);
-typedef uint(*di_ctrlflags_t) (hnddma_t *dmah, uint mask, uint flags);
-typedef char *(*di_dump_t) (hnddma_t *dmah, struct bcmstrbuf *b,
+typedef void (*di_txblock_t) (struct hnddma_pub *dmah);
+typedef void (*di_txunblock_t) (struct hnddma_pub *dmah);
+typedef uint(*di_txactive_t) (struct hnddma_pub *dmah);
+typedef void (*di_txrotate_t) (struct hnddma_pub *dmah);
+typedef void (*di_counterreset_t) (struct hnddma_pub *dmah);
+typedef uint(*di_ctrlflags_t) (struct hnddma_pub *dmah, uint mask, uint flags);
+typedef char *(*di_dump_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
 			    bool dumpring);
-typedef char *(*di_dumptx_t) (hnddma_t *dmah, struct bcmstrbuf *b,
+typedef char *(*di_dumptx_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
 			      bool dumpring);
-typedef char *(*di_dumprx_t) (hnddma_t *dmah, struct bcmstrbuf *b,
+typedef char *(*di_dumprx_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
 			      bool dumpring);
-typedef uint(*di_rxactive_t) (hnddma_t *dmah);
-typedef uint(*di_txpending_t) (hnddma_t *dmah);
-typedef uint(*di_txcommitted_t) (hnddma_t *dmah);
+typedef uint(*di_rxactive_t) (struct hnddma_pub *dmah);
+typedef uint(*di_txpending_t) (struct hnddma_pub *dmah);
+typedef uint(*di_txcommitted_t) (struct hnddma_pub *dmah);
 
 /* dma opsvec */
 typedef struct di_fcn_s {
@@ -141,7 +143,8 @@
 	uint txnobuf;		/* tx out of dma descriptors */
 };
 
-extern hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih,
+extern struct hnddma_pub *dma_attach(struct osl_info *osh, char *name,
+			    si_t *sih,
 			    void *dmaregstx, void *dmaregsrx, uint ntxd,
 			    uint nrxd, uint rxbufsize, int rxextheadroom,
 			    uint nrxpost, uint rxoffset, uint *msg_level);
@@ -238,6 +241,6 @@
 extern uint dma_addrwidth(si_t *sih, void *dmaregs);
 
 /* pio helpers */
-extern void dma_txpioloopback(osl_t *osh, dma32regs_t *);
+extern void dma_txpioloopback(struct osl_info *osh, dma32regs_t *);
 
 #endif				/* _hnddma_h_ */
diff --git a/drivers/staging/brcm80211/include/hndpmu.h b/drivers/staging/brcm80211/include/hndpmu.h
index bbcf0ee..a0110e4 100644
--- a/drivers/staging/brcm80211/include/hndpmu.h
+++ b/drivers/staging/brcm80211/include/hndpmu.h
@@ -28,44 +28,44 @@
 #define SET_LDO_VOLTAGE_LNLDO1	9
 #define SET_LDO_VOLTAGE_LNLDO2_SEL	10
 
-extern void si_pmu_init(si_t *sih, osl_t *osh);
-extern void si_pmu_chip_init(si_t *sih, osl_t *osh);
-extern void si_pmu_pll_init(si_t *sih, osl_t *osh, u32 xtalfreq);
-extern void si_pmu_res_init(si_t *sih, osl_t *osh);
-extern void si_pmu_swreg_init(si_t *sih, osl_t *osh);
+extern void si_pmu_init(si_t *sih, struct osl_info *osh);
+extern void si_pmu_chip_init(si_t *sih, struct osl_info *osh);
+extern void si_pmu_pll_init(si_t *sih, struct osl_info *osh, u32 xtalfreq);
+extern void si_pmu_res_init(si_t *sih, struct osl_info *osh);
+extern void si_pmu_swreg_init(si_t *sih, struct osl_info *osh);
 
-extern u32 si_pmu_force_ilp(si_t *sih, osl_t *osh, bool force);
+extern u32 si_pmu_force_ilp(si_t *sih, struct osl_info *osh, bool force);
 
-extern u32 si_pmu_si_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_cpu_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_mem_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_alp_clock(si_t *sih, osl_t *osh);
-extern u32 si_pmu_ilp_clock(si_t *sih, osl_t *osh);
+extern u32 si_pmu_si_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_cpu_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_mem_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_alp_clock(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_ilp_clock(si_t *sih, struct osl_info *osh);
 
-extern void si_pmu_set_switcher_voltage(si_t *sih, osl_t *osh,
+extern void si_pmu_set_switcher_voltage(si_t *sih, struct osl_info *osh,
 					u8 bb_voltage, u8 rf_voltage);
-extern void si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, u8 ldo,
+extern void si_pmu_set_ldo_voltage(si_t *sih, struct osl_info *osh, u8 ldo,
 				   u8 voltage);
-extern u16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh);
-extern void si_pmu_rcal(si_t *sih, osl_t *osh);
+extern u16 si_pmu_fast_pwrup_delay(si_t *sih, struct osl_info *osh);
+extern void si_pmu_rcal(si_t *sih, struct osl_info *osh);
 extern void si_pmu_pllupd(si_t *sih);
-extern void si_pmu_spuravoid(si_t *sih, osl_t *osh, u8 spuravoid);
+extern void si_pmu_spuravoid(si_t *sih, struct osl_info *osh, u8 spuravoid);
 
-extern bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh);
-extern u32 si_pmu_measure_alpclk(si_t *sih, osl_t *osh);
+extern bool si_pmu_is_otp_powered(si_t *sih, struct osl_info *osh);
+extern u32 si_pmu_measure_alpclk(si_t *sih, struct osl_info *osh);
 
 extern u32 si_pmu_chipcontrol(si_t *sih, uint reg, u32 mask, u32 val);
 extern u32 si_pmu_regcontrol(si_t *sih, uint reg, u32 mask, u32 val);
 extern u32 si_pmu_pllcontrol(si_t *sih, uint reg, u32 mask, u32 val);
 extern void si_pmu_pllupd(si_t *sih);
-extern void si_pmu_sprom_enable(si_t *sih, osl_t *osh, bool enable);
+extern void si_pmu_sprom_enable(si_t *sih, struct osl_info *osh, bool enable);
 
 extern void si_pmu_radio_enable(si_t *sih, bool enable);
-extern u32 si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh,
+extern u32 si_pmu_waitforclk_on_backplane(si_t *sih, struct osl_info *osh,
 					     u32 clk, u32 delay);
 
-extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
-extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh,
+extern void si_pmu_otp_power(si_t *sih, struct osl_info *osh, bool on);
+extern void si_sdiod_drive_strength_init(si_t *sih, struct osl_info *osh,
 					 u32 drivestrength);
 
 #endif				/* _hndpmu_h_ */
diff --git a/drivers/staging/brcm80211/include/linux_osl.h b/drivers/staging/brcm80211/include/linux_osl.h
deleted file mode 100644
index c9c860b..0000000
--- a/drivers/staging/brcm80211/include/linux_osl.h
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _linux_osl_h_
-#define _linux_osl_h_
-
-
-/* Linux Kernel: File Operations: start */
-extern void *osl_os_open_image(char *filename);
-extern int osl_os_get_image_block(char *buf, int len, void *image);
-extern void osl_os_close_image(void *image);
-/* Linux Kernel: File Operations: end */
-
-extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
-extern void osl_detach(osl_t *osh);
-
-extern u32 g_assert_type;
-
-#if defined(BCMDBG_ASSERT)
-#define ASSERT(exp) \
-	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
-extern void osl_assert(char *exp, char *file, int line);
-#else
-#ifdef __GNUC__
-#define GCC_VERSION \
-			(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION > 30100
-#define ASSERT(exp)	do {} while (0)
-#else
-			/* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
-#define ASSERT(exp)
-#endif				/* GCC_VERSION > 30100 */
-#endif				/* __GNUC__ */
-#endif				/* defined(BCMDBG_ASSERT) */
-
-/* PCI configuration space access macros */
-#define	OSL_PCI_READ_CONFIG(osh, offset, size) \
-	osl_pci_read_config((osh), (offset), (size))
-#define	OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
-	osl_pci_write_config((osh), (offset), (size), (val))
-extern u32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
-extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
-
-/* PCI device bus # and slot # */
-#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
-#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
-extern uint osl_pci_bus(osl_t *osh);
-extern uint osl_pci_slot(osl_t *osh);
-
-/* Pkttag flag should be part of public information */
-typedef struct {
-	bool pkttag;
-	uint pktalloced;	/* Number of allocated packet buffers */
-	bool mmbus;		/* Bus supports memory-mapped register accesses */
-	pktfree_cb_fn_t tx_fn;	/* Callback function for PKTFREE */
-	void *tx_ctx;		/* Context to the callback function */
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
-	osl_rreg_fn_t rreg_fn;	/* Read Register function */
-	osl_wreg_fn_t wreg_fn;	/* Write Register function */
-	void *reg_ctx;		/* Context to the reg callback functions */
-#endif
-} osl_pubinfo_t;
-
-#define PKTFREESETCB(osh, _tx_fn, _tx_ctx)			\
-	do {							\
-		((osl_pubinfo_t *)osh)->tx_fn = _tx_fn;		\
-		((osl_pubinfo_t *)osh)->tx_ctx = _tx_ctx;	\
-	} while (0)
-
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
-#define REGOPSSET(osh, rreg, wreg, ctx)			\
-	do {						\
-		((osl_pubinfo_t *)osh)->rreg_fn = rreg;	\
-		((osl_pubinfo_t *)osh)->wreg_fn = wreg;	\
-		((osl_pubinfo_t *)osh)->reg_ctx = ctx;	\
-	} while (0)
-#endif
-
-#define BUS_SWAP32(v)		(v)
-
-#define	DMA_CONSISTENT_ALIGN	osl_dma_consistent_align()
-extern uint osl_dma_consistent_align(void);
-extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, u16 align,
-				      uint *tot, unsigned long *pap);
-
-#ifdef BRCM_FULLMAC
-#define	DMA_ALLOC_CONSISTENT(osh, size, pap, dmah, alignbits) \
-	osl_dma_alloc_consistent((osh), (size), (0), (tot), (pap))
-#else
-#define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
-	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
-#endif /* BRCM_FULLMAC */
-
-#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
-	osl_dma_free_consistent((osh), (void *)(va), (size), (pa))
-extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, unsigned long pa);
-
-/* map/unmap direction */
-#define	DMA_TX	1		/* TX direction for DMA */
-#define	DMA_RX	2		/* RX direction for DMA */
-
-/* map/unmap shared (dma-able) memory */
-#define	DMA_MAP(osh, va, size, direction, p, dmah) \
-	osl_dma_map((osh), (va), (size), (direction))
-#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
-	osl_dma_unmap((osh), (pa), (size), (direction))
-extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction);
-extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
-
-/* API for DMA addressing capability */
-#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
-
-/* register access macros */
-#if defined(BCMSDIO)
-#ifdef BRCM_FULLMAC
-#include <bcmsdh.h>
-#endif
-#define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (unsigned long)(r), sizeof(*(r)), (v)))
-#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (unsigned long)(r), sizeof(*(r))))
-#endif
-
-#if defined(BCMSDIO)
-#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t *)(osh))->mmbus) \
-		mmap_op else bus_op
-#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t *)(osh))->mmbus) ? \
-		mmap_op : bus_op
-#else
-#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) mmap_op
-#define SELECT_BUS_READ(osh, mmap_op, bus_op) mmap_op
-#endif
-
-#define OSL_ERROR(bcmerror)	osl_error(bcmerror)
-extern int osl_error(int bcmerror);
-
-/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
-#define	PKTBUFSZ	2048	/* largest reasonable packet buffer, driver uses for ethernet MTU */
-
-#define OSL_SYSUPTIME()		((u32)jiffies * (1000 / HZ))
-#define	printf(fmt, args...)	printk(fmt , ## args)
-#ifdef BRCM_FULLMAC
-#include <linux/kernel.h>	/* for vsn/printf's */
-#include <linux/string.h>	/* for mem*, str* */
-#endif
-/* bcopy's: Linux kernel doesn't provide these (anymore) */
-#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
-#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
-#define	bzero(b, len)		memset((b), '\0', (len))
-
-/* register access macros */
-#if defined(OSLREGOPS)
-#else
-#ifndef IL_BIGENDIAN
-#ifndef __mips__
-#define R_REG(osh, r) (\
-	SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(u8) ? readb((volatile u8*)(r)) : \
-	sizeof(*(r)) == sizeof(u16) ? readw((volatile u16*)(r)) : \
-	readl((volatile u32*)(r)), OSL_READ_REG(osh, r)) \
-)
-#else				/* __mips__ */
-#define R_REG(osh, r) (\
-	SELECT_BUS_READ(osh, \
-		({ \
-			__typeof(*(r)) __osl_v; \
-			__asm__ __volatile__("sync"); \
-			switch (sizeof(*(r))) { \
-			case sizeof(u8): \
-				__osl_v = readb((volatile u8*)(r)); \
-				break; \
-			case sizeof(u16): \
-				__osl_v = readw((volatile u16*)(r)); \
-				break; \
-			case sizeof(u32): \
-				__osl_v = \
-				readl((volatile u32*)(r)); \
-				break; \
-			} \
-			__asm__ __volatile__("sync"); \
-			__osl_v; \
-		}), \
-		({ \
-			__typeof(*(r)) __osl_v; \
-			__asm__ __volatile__("sync"); \
-			__osl_v = OSL_READ_REG(osh, r); \
-			__asm__ __volatile__("sync"); \
-			__osl_v; \
-		})) \
-)
-#endif				/* __mips__ */
-
-#define W_REG(osh, r, v) do { \
-	SELECT_BUS_WRITE(osh,  \
-		switch (sizeof(*(r))) { \
-		case sizeof(u8): \
-			writeb((u8)(v), (volatile u8*)(r)); break; \
-		case sizeof(u16): \
-			writew((u16)(v), (volatile u16*)(r)); break; \
-		case sizeof(u32): \
-			writel((u32)(v), (volatile u32*)(r)); break; \
-		}, \
-		(OSL_WRITE_REG(osh, r, v))); \
-	} while (0)
-#else				/* IL_BIGENDIAN */
-#define R_REG(osh, r) (\
-	SELECT_BUS_READ(osh, \
-		({ \
-			__typeof(*(r)) __osl_v; \
-			switch (sizeof(*(r))) { \
-			case sizeof(u8): \
-				__osl_v = \
-				readb((volatile u8*)((r)^3)); \
-				break; \
-			case sizeof(u16): \
-				__osl_v = \
-				readw((volatile u16*)((r)^2)); \
-				break; \
-			case sizeof(u32): \
-				__osl_v = readl((volatile u32*)(r)); \
-				break; \
-			} \
-			__osl_v; \
-		}), \
-		OSL_READ_REG(osh, r)) \
-)
-#define W_REG(osh, r, v) do { \
-	SELECT_BUS_WRITE(osh,  \
-		switch (sizeof(*(r))) { \
-		case sizeof(u8):	\
-			writeb((u8)(v), \
-			(volatile u8*)((r)^3)); break; \
-		case sizeof(u16):	\
-			writew((u16)(v), \
-			(volatile u16*)((r)^2)); break; \
-		case sizeof(u32):	\
-			writel((u32)(v), \
-			(volatile u32*)(r)); break; \
-		}, \
-		(OSL_WRITE_REG(osh, r, v))); \
-	} while (0)
-#endif				/* IL_BIGENDIAN */
-
-#endif				/* OSLREGOPS */
-
-#define	AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
-#define	OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
-
-/* bcopy, bcmp, and bzero functions */
-#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
-#define	bcmp(b1, b2, len)	memcmp((b1), (b2), (len))
-#define	bzero(b, len)		memset((b), '\0', (len))
-
-/* uncached/cached virtual address */
-#ifdef __mips__
-#include <asm/addrspace.h>
-#define OSL_UNCACHED(va)	((void *)KSEG1ADDR((va)))
-#define OSL_CACHED(va)		((void *)KSEG0ADDR((va)))
-#else
-#define OSL_UNCACHED(va)	((void *)va)
-#define OSL_CACHED(va)		((void *)va)
-#endif				/* mips */
-
-#if defined(mips)
-#define	OSL_GETCYCLES(x)	((x) = read_c0_count() * 2)
-#elif defined(__i386__)
-#define	OSL_GETCYCLES(x)	rdtscl((x))
-#else
-#define OSL_GETCYCLES(x)	((x) = 0)
-#endif				/* defined(mips) */
-
-/* dereference an address that may cause a bus exception */
-#ifdef mips
-#define	BUSPROBE(val, addr)	get_dbe((val), (addr))
-#include <asm/paccess.h>
-#else
-#define	BUSPROBE(val, addr)	({ (val) = R_REG(NULL, (addr)); 0; })
-#endif				/* mips */
-
-/* map/unmap physical to virtual I/O */
-#if !defined(CONFIG_MMC_MSM7X00A)
-#define	REG_MAP(pa, size)	ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
-#else
-#define REG_MAP(pa, size)       (void *)(0)
-#endif				/* !defined(CONFIG_MMC_MSM7X00A */
-#define	REG_UNMAP(va)		iounmap((va))
-
-#define	R_SM(r)			(*(r))
-#define	W_SM(r, v)		(*(r) = (v))
-#define	BZERO_SM(r, len)	memset((r), '\0', (len))
-
-#ifdef BRCM_FULLMAC
-#include <linuxver.h>		/* use current 2.4.x calling conventions */
-#endif
-
-/* packet primitives */
-#define	PKTGET(osh, len, send)		osl_pktget((osh), (len))
-#define	PKTFREE(osh, skb, send)		osl_pktfree((osh), (skb), (send))
-#define	PKTDATA(skb)		(((struct sk_buff *)(skb))->data)
-#define	PKTLEN(skb)		(((struct sk_buff *)(skb))->len)
-#define PKTHEADROOM(skb)		(PKTDATA(skb)-(((struct sk_buff *)(skb))->head))
-#define PKTTAILROOM(skb) ((((struct sk_buff *)(skb))->end)-(((struct sk_buff *)(skb))->tail))
-#define	PKTNEXT(skb)		(((struct sk_buff *)(skb))->next)
-#define	PKTSETNEXT(skb, x)	\
-	(((struct sk_buff *)(skb))->next = (struct sk_buff *)(x))
-#define	PKTSETLEN(skb, len)	__skb_trim((struct sk_buff *)(skb), (len))
-#define	PKTPUSH(skb, bytes)	skb_push((struct sk_buff *)(skb), (bytes))
-#define	PKTPULL(skb, bytes)	skb_pull((struct sk_buff *)(skb), (bytes))
-#define	PKTTAG(skb)		((void *)(((struct sk_buff *)(skb))->cb))
-#define PKTALLOCED(osh)		(((osl_pubinfo_t *)(osh))->pktalloced)
-#define PKTSETPOOL(osh, skb, x, y)	do {} while (0)
-#define PKTPOOL(osh, skb)		false
-extern void *osl_pktget(osl_t *osh, uint len);
-extern void osl_pktfree(osl_t *osh, void *skb, bool send);
-
-#ifdef BRCM_FULLMAC
-extern void *osl_pktget_static(osl_t *osh, uint len);
-extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
-
-static inline void *
-osl_pkt_frmnative(osl_pubinfo_t *osh, struct sk_buff *skb)
-{
-	struct sk_buff *nskb;
-
-	if (osh->pkttag)
-		bzero((void *)skb->cb, OSL_PKTTAG_SZ);
-
-	for (nskb = skb; nskb; nskb = nskb->next)
-		osh->pktalloced++;
-
-	return (void *)skb;
-}
-#define PKTFRMNATIVE(osh, skb)	\
-	osl_pkt_frmnative(((osl_pubinfo_t *)osh), (struct sk_buff*)(skb))
-
-static inline struct sk_buff *
-osl_pkt_tonative(osl_pubinfo_t *osh, void *pkt)
-{
-	struct sk_buff *nskb;
-
-	if (osh->pkttag)
-		bzero(((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
-
-	for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next)
-		osh->pktalloced--;
-
-	return (struct sk_buff *)pkt;
-}
-#define PKTTONATIVE(osh, pkt)	\
-	osl_pkt_tonative((osl_pubinfo_t *)(osh), (pkt))
-#else /* !BRCM_FULLMAC */
-#define PKTUNALLOC(osh)			(((osl_pubinfo_t *)(osh))->pktalloced--)
-
-#define	PKTSETSKIPCT(osh, skb)
-#define	PKTCLRSKIPCT(osh, skb)
-#define	PKTSKIPCT(osh, skb)
-#endif	/* BRCM_FULLMAC */
-
-#define	PKTLINK(skb)			(((struct sk_buff *)(skb))->prev)
-#define	PKTSETLINK(skb, x)		(((struct sk_buff *)(skb))->prev = (struct sk_buff*)(x))
-#define	PKTPRIO(skb)			(((struct sk_buff *)(skb))->priority)
-#define	PKTSETPRIO(skb, x)		(((struct sk_buff *)(skb))->priority = (x))
-#define PKTSUMNEEDED(skb)		(((struct sk_buff *)(skb))->ip_summed == CHECKSUM_PARTIAL)
-#define PKTSETSUMGOOD(skb, x)		(((struct sk_buff *)(skb))->ip_summed = \
-						((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
-/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
-#define PKTSHARED(skb)                  (((struct sk_buff *)(skb))->cloned)
-
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
-#define RPC_READ_REG(osh, r) (\
-	sizeof(*(r)) == sizeof(u8) ? osl_readb((osh), (volatile u8*)(r)) : \
-	sizeof(*(r)) == sizeof(u16) ? osl_readw((osh), (volatile u16*)(r)) : \
-	osl_readl((osh), (volatile u32*)(r)) \
-)
-#define RPC_WRITE_REG(osh, r, v) do { \
-	switch (sizeof(*(r))) { \
-	case sizeof(u8): \
-		osl_writeb((osh), (volatile u8*)(r), (u8)(v)); \
-		break; \
-	case sizeof(u16): \
-		osl_writew((osh), (volatile u16*)(r), (u16)(v)); \
-		break; \
-	case sizeof(u32): \
-		osl_writel((osh), (volatile u32*)(r), (u32)(v)); \
-		break; \
-	} \
-} while (0)
-
-extern u8 osl_readb(osl_t *osh, volatile u8 *r);
-extern u16 osl_readw(osl_t *osh, volatile u16 *r);
-extern u32 osl_readl(osl_t *osh, volatile u32 *r);
-extern void osl_writeb(osl_t *osh, volatile u8 *r, u8 v);
-extern void osl_writew(osl_t *osh, volatile u16 *r, u16 v);
-extern void osl_writel(osl_t *osh, volatile u32 *r, u32 v);
-#endif				/* BCMSDIO */
-
-#endif				/* _linux_osl_h_ */
diff --git a/drivers/staging/brcm80211/include/linuxver.h b/drivers/staging/brcm80211/include/linuxver.h
deleted file mode 100644
index dc72141..0000000
--- a/drivers/staging/brcm80211/include/linuxver.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _linuxver_h_
-#define _linuxver_h_
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/ieee80211.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-
-#undef IP_TOS
-#include <asm/io.h>
-
-#endif				/* _linuxver_h_ */
diff --git a/drivers/staging/brcm80211/include/nicpci.h b/drivers/staging/brcm80211/include/nicpci.h
index ce146e8..928818d 100644
--- a/drivers/staging/brcm80211/include/nicpci.h
+++ b/drivers/staging/brcm80211/include/nicpci.h
@@ -45,17 +45,17 @@
 #else
 struct sbpcieregs;
 
-extern u8 pcicore_find_pci_capability(osl_t *osh, u8 req_cap_id,
+extern u8 pcicore_find_pci_capability(struct osl_info *osh, u8 req_cap_id,
 					 unsigned char *buf, u32 *buflen);
-extern uint pcie_readreg(osl_t *osh, struct sbpcieregs *pcieregs,
+extern uint pcie_readreg(struct osl_info *osh, struct sbpcieregs *pcieregs,
 			 uint addrtype, uint offset);
-extern uint pcie_writereg(osl_t *osh, struct sbpcieregs *pcieregs,
+extern uint pcie_writereg(struct osl_info *osh, struct sbpcieregs *pcieregs,
 			  uint addrtype, uint offset, uint val);
 
 extern u8 pcie_clkreq(void *pch, u32 mask, u32 val);
 extern u32 pcie_lcreg(void *pch, u32 mask, u32 val);
 
-extern void *pcicore_init(si_t *sih, osl_t *osh, void *regs);
+extern void *pcicore_init(si_t *sih, struct osl_info *osh, void *regs);
 extern void pcicore_deinit(void *pch);
 extern void pcicore_attach(void *pch, char *pvars, int state);
 extern void pcicore_hwup(void *pch);
@@ -70,10 +70,10 @@
 extern u32 pcicore_pciereg(void *pch, u32 offset, u32 mask,
 			      u32 val, uint type);
 
-extern bool pcicore_pmecap_fast(osl_t *osh);
+extern bool pcicore_pmecap_fast(struct osl_info *osh);
 extern void pcicore_pmeen(void *pch);
 extern void pcicore_pmeclr(void *pch);
 extern bool pcicore_pmestat(void *pch);
-#endif				/* defined(BCMSDIO) || (defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)) */
+#endif /* defined(BCMSDIO)||(defined(BCMBUSTYPE) && (BCMBUSTYPE==SI_BUS)) */
 
 #endif				/* _NICPCI_H */
diff --git a/drivers/staging/brcm80211/include/osl.h b/drivers/staging/brcm80211/include/osl.h
index c0ebb3d..b282356 100644
--- a/drivers/staging/brcm80211/include/osl.h
+++ b/drivers/staging/brcm80211/include/osl.h
@@ -18,42 +18,197 @@
 #define _osl_h_
 
 /* osl handle type forward declaration */
-typedef struct osl_info osl_t;
+struct osl_info {
+	uint pktalloced;	/* Number of allocated packet buffers */
+	bool mmbus;		/* Bus supports memory-mapped registers */
+	uint magic;
+	void *pdev;
+	uint bustype;
+};
+
 typedef struct osl_dmainfo osldma_t;
 
-#define OSL_PKTTAG_SZ	32	/* Size of PktTag */
 
-/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
-typedef void (*pktfree_cb_fn_t) (void *ctx, void *pkt, unsigned int status);
+extern struct osl_info *osl_attach(void *pdev, uint bustype);
+extern void osl_detach(struct osl_info *osh);
 
-#ifdef BCMSDIO
-/* Drivers use REGOPSSET() to register register read/write funcitons */
-typedef unsigned int (*osl_rreg_fn_t) (void *ctx, void *reg, unsigned int size);
-typedef void (*osl_wreg_fn_t) (void *ctx, void *reg, unsigned int val,
-			       unsigned int size);
+extern u32 g_assert_type;
+
+#if defined(BCMDBG_ASSERT)
+#define ASSERT(exp) \
+	  do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
+extern void osl_assert(char *exp, char *file, int line);
+#else
+#define ASSERT(exp)	do {} while (0)
+#endif  /* defined(BCMDBG_ASSERT) */
+
+/* PCI device bus # and slot # */
+#define OSL_PCI_BUS(osh)	osl_pci_bus(osh)
+#define OSL_PCI_SLOT(osh)	osl_pci_slot(osh)
+extern uint osl_pci_bus(struct osl_info *osh);
+extern uint osl_pci_slot(struct osl_info *osh);
+
+#define BUS_SWAP32(v)		(v)
+
+extern void *osl_dma_alloc_consistent(struct osl_info *osh, uint size,
+				      u16 align, uint *tot, unsigned long *pap);
+
+#ifdef BRCM_FULLMAC
+#define	DMA_ALLOC_CONSISTENT(osh, size, pap, dmah, alignbits) \
+	osl_dma_alloc_consistent((osh), (size), (0), (tot), (pap))
+#else
+#define	DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
+	osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#endif /* BRCM_FULLMAC */
+
+#define	DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+	osl_dma_free_consistent((osh), (void *)(va), (size), (pa))
+extern void osl_dma_free_consistent(struct osl_info *osh, void *va,
+				    uint size, unsigned long pa);
+
+/* map/unmap direction */
+#define	DMA_TX	1		/* TX direction for DMA */
+#define	DMA_RX	2		/* RX direction for DMA */
+
+/* map/unmap shared (dma-able) memory */
+#define	DMA_MAP(osh, va, size, direction, p, dmah) \
+	osl_dma_map((osh), (va), (size), (direction))
+#define	DMA_UNMAP(osh, pa, size, direction, p, dmah) \
+	osl_dma_unmap((osh), (pa), (size), (direction))
+extern uint osl_dma_map(struct osl_info *osh, void *va, uint size,
+			int direction);
+extern void osl_dma_unmap(struct osl_info *osh, uint pa, uint size,
+			  int direction);
+
+/* register access macros */
+#if defined(BCMSDIO)
+#ifdef BRCM_FULLMAC
+#include <bcmsdh.h>
+#endif
+#define OSL_WRITE_REG(osh, r, v) \
+		(bcmsdh_reg_write(NULL, (unsigned long)(r), sizeof(*(r)), (v)))
+#define OSL_READ_REG(osh, r) \
+		(bcmsdh_reg_read(NULL, (unsigned long)(r), sizeof(*(r))))
 #endif
 
-#include <linux_osl.h>
-
-/* --------------------------------------------------------------------------
-** Register manipulation macros.
-*/
-
-#define	SET_REG(osh, r, mask, val)	W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
-
-#ifndef AND_REG
-#define AND_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) & (v))
-#endif				/* !AND_REG */
-
-#ifndef OR_REG
-#define OR_REG(osh, r, v)		W_REG(osh, (r), R_REG(osh, r) | (v))
-#endif				/* !OR_REG */
-
-#if !defined(OSL_SYSUPTIME)
-#define OSL_SYSUPTIME() (0)
-#define OSL_SYSUPTIME_SUPPORT false
+#if defined(BCMSDIO)
+#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) \
+	if ((osh)->mmbus) \
+		mmap_op else bus_op
+#define SELECT_BUS_READ(osh, mmap_op, bus_op) \
+	((osh)->mmbus) ?  mmap_op : bus_op
 #else
-#define OSL_SYSUPTIME_SUPPORT true
-#endif				/* OSL_SYSUPTIME */
+#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) mmap_op
+#define SELECT_BUS_READ(osh, mmap_op, bus_op) mmap_op
+#endif
 
-#endif				/* _osl_h_ */
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define	PKTBUFSZ	2048
+
+#define OSL_SYSUPTIME()		((u32)jiffies * (1000 / HZ))
+#define	printf(fmt, args...)	printk(fmt , ## args)
+#ifdef BRCM_FULLMAC
+#include <linux/kernel.h>	/* for vsn/printf's */
+#include <linux/string.h>	/* for mem*, str* */
+#endif
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+
+/* register access macros */
+#ifndef IL_BIGENDIAN
+#ifndef __mips__
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(u8) ? \
+	readb((volatile u8*)(r)) : \
+	sizeof(*(r)) == sizeof(u16) ? readw((volatile u16*)(r)) : \
+	readl((volatile u32*)(r)), OSL_READ_REG(osh, r)) \
+)
+#else				/* __mips__ */
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			__asm__ __volatile__("sync"); \
+			switch (sizeof(*(r))) { \
+			case sizeof(u8): \
+				__osl_v = readb((volatile u8*)(r)); \
+				break; \
+			case sizeof(u16): \
+				__osl_v = readw((volatile u16*)(r)); \
+				break; \
+			case sizeof(u32): \
+				__osl_v = \
+				readl((volatile u32*)(r)); \
+				break; \
+			} \
+			__asm__ __volatile__("sync"); \
+			__osl_v; \
+		}), \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			__asm__ __volatile__("sync"); \
+			__osl_v = OSL_READ_REG(osh, r); \
+			__asm__ __volatile__("sync"); \
+			__osl_v; \
+		})) \
+)
+#endif				/* __mips__ */
+
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh,  \
+		switch (sizeof(*(r))) { \
+		case sizeof(u8): \
+			writeb((u8)(v), (volatile u8*)(r)); break; \
+		case sizeof(u16): \
+			writew((u16)(v), (volatile u16*)(r)); break; \
+		case sizeof(u32): \
+			writel((u32)(v), (volatile u32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+#else				/* IL_BIGENDIAN */
+#define R_REG(osh, r) (\
+	SELECT_BUS_READ(osh, \
+		({ \
+			__typeof(*(r)) __osl_v; \
+			switch (sizeof(*(r))) { \
+			case sizeof(u8): \
+				__osl_v = \
+				readb((volatile u8*)((r)^3)); \
+				break; \
+			case sizeof(u16): \
+				__osl_v = \
+				readw((volatile u16*)((r)^2)); \
+				break; \
+			case sizeof(u32): \
+				__osl_v = readl((volatile u32*)(r)); \
+				break; \
+			} \
+			__osl_v; \
+		}), \
+		OSL_READ_REG(osh, r)) \
+)
+#define W_REG(osh, r, v) do { \
+	SELECT_BUS_WRITE(osh,  \
+		switch (sizeof(*(r))) { \
+		case sizeof(u8):	\
+			writeb((u8)(v), \
+			(volatile u8*)((r)^3)); break; \
+		case sizeof(u16):	\
+			writew((u16)(v), \
+			(volatile u16*)((r)^2)); break; \
+		case sizeof(u32):	\
+			writel((u32)(v), \
+			(volatile u32*)(r)); break; \
+		}, \
+		(OSL_WRITE_REG(osh, r, v))); \
+	} while (0)
+#endif				/* IL_BIGENDIAN */
+
+#define	bcopy(src, dst, len)	memcpy((dst), (src), (len))
+
+/* packet primitives */
+extern struct sk_buff *pkt_buf_get_skb(struct osl_info *osh, uint len);
+extern void pkt_buf_free_skb(struct osl_info *osh, struct sk_buff *skb, bool send);
+
+#endif /* _osl_h_ */
diff --git a/drivers/staging/brcm80211/include/proto/ethernet.h b/drivers/staging/brcm80211/include/proto/ethernet.h
index cc17b42..567407d 100644
--- a/drivers/staging/brcm80211/include/proto/ethernet.h
+++ b/drivers/staging/brcm80211/include/proto/ethernet.h
@@ -17,28 +17,22 @@
 #ifndef _NET_ETHERNET_H_
 #define _NET_ETHERNET_H_
 
+#include <linux/if_ether.h>
+
 #include <packed_section_start.h>
 
-#define	ETHER_ADDR_LEN		6
 #define	ETHER_TYPE_LEN		2
 #define	ETHER_CRC_LEN		4
-#define	ETHER_HDR_LEN		(ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
 #define	ETHER_MIN_LEN		64
 #define	ETHER_MIN_DATA		46
 #define	ETHER_MAX_LEN		1518
 #define	ETHER_MAX_DATA		1500
 
-#define ETHER_TYPE_MIN		0x0600
-#define	ETHER_TYPE_IP		0x0800
-#define ETHER_TYPE_ARP		0x0806
-#define ETHER_TYPE_8021Q	0x8100
 #define	ETHER_TYPE_BRCM		0x886c
-#define	ETHER_TYPE_802_1X	0x888e
-#define	ETHER_TYPE_802_1X_PREAUTH 0x88c7
 
-#define ETHER_DEST_OFFSET	(0 * ETHER_ADDR_LEN)
-#define ETHER_SRC_OFFSET	(1 * ETHER_ADDR_LEN)
-#define ETHER_TYPE_OFFSET	(2 * ETHER_ADDR_LEN)
+#define ETHER_DEST_OFFSET	(0 * ETH_ALEN)
+#define ETHER_SRC_OFFSET	(1 * ETH_ALEN)
+#define ETHER_TYPE_OFFSET	(2 * ETH_ALEN)
 
 #define	ETHER_IS_VALID_LEN(foo)	\
 	((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
@@ -53,50 +47,18 @@
 }
 
 BWL_PRE_PACKED_STRUCT struct ether_header {
-	u8 ether_dhost[ETHER_ADDR_LEN];
-	u8 ether_shost[ETHER_ADDR_LEN];
+	u8 ether_dhost[ETH_ALEN];
+	u8 ether_shost[ETH_ALEN];
 	u16 ether_type;
 } BWL_POST_PACKED_STRUCT;
 
 BWL_PRE_PACKED_STRUCT struct ether_addr {
-	u8 octet[ETHER_ADDR_LEN];
+	u8 octet[ETH_ALEN];
 } BWL_POST_PACKED_STRUCT;
 
-#define ETHER_SET_LOCALADDR(ea)	(((u8 *)(ea))[0] = (((u8 *)(ea))[0] | 2))
-#define ETHER_IS_LOCALADDR(ea) 	(((u8 *)(ea))[0] & 2)
-#define ETHER_CLR_LOCALADDR(ea)	(((u8 *)(ea))[0] = \
-	(((u8 *)(ea))[0] & 0xd))
-#define ETHER_TOGGLE_LOCALADDR(ea)	(((u8 *)(ea))[0] = \
-	(((u8 *)(ea))[0] ^ 2))
-
 #define ETHER_SET_UNICAST(ea)	(((u8 *)(ea))[0] = (((u8 *)(ea))[0] & ~1))
 
-#define ETHER_ISMULTI(ea) (((const u8 *)(ea))[0] & 1)
-
-#define	ether_cmp(a, b)	(!(((short *)a)[0] == ((short *)b)[0]) | \
-			 !(((short *)a)[1] == ((short *)b)[1]) | \
-			 !(((short *)a)[2] == ((short *)b)[2]))
-
-#define	ether_copy(s, d) { \
-		((short *)d)[0] = ((short *)s)[0]; \
-		((short *)d)[1] = ((short *)s)[1]; \
-		((short *)d)[2] = ((short *)s)[2]; }
-
 static const struct ether_addr ether_bcast = { {255, 255, 255, 255, 255, 255} };
-static const struct ether_addr ether_null = { {0, 0, 0, 0, 0, 0} };
-
-#define ETHER_ISBCAST(ea)	((((u8 *)(ea))[0] &		\
-	((u8 *)(ea))[1] &		\
-	((u8 *)(ea))[2] &		\
-	((u8 *)(ea))[3] &		\
-	((u8 *)(ea))[4] &		\
-	((u8 *)(ea))[5]) == 0xff)
-#define ETHER_ISNULLADDR(ea)	((((u8 *)(ea))[0] |		\
-	((u8 *)(ea))[1] |		\
-	((u8 *)(ea))[2] |		\
-	((u8 *)(ea))[3] |		\
-	((u8 *)(ea))[4] |		\
-	((u8 *)(ea))[5]) == 0)
 
 #define ETHER_MOVE_HDR(d, s) \
 do { \
diff --git a/drivers/staging/brcm80211/include/proto/wpa.h b/drivers/staging/brcm80211/include/proto/wpa.h
index ec84c9f..10c2fb6 100644
--- a/drivers/staging/brcm80211/include/proto/wpa.h
+++ b/drivers/staging/brcm80211/include/proto/wpa.h
@@ -19,95 +19,7 @@
 
 #include <proto/ethernet.h>
 
-#include <packed_section_start.h>
-
-#define DOT11_RC_INVALID_WPA_IE		13
-#define DOT11_RC_MIC_FAILURE		14
-#define DOT11_RC_4WH_TIMEOUT		15
-#define DOT11_RC_GTK_UPDATE_TIMEOUT	16
-#define DOT11_RC_WPA_IE_MISMATCH	17
-#define DOT11_RC_INVALID_MC_CIPHER	18
-#define DOT11_RC_INVALID_UC_CIPHER	19
-#define DOT11_RC_INVALID_AKMP		20
-#define DOT11_RC_BAD_WPA_VERSION	21
-#define DOT11_RC_INVALID_WPA_CAP	22
-#define DOT11_RC_8021X_AUTH_FAIL	23
-
 #define WPA2_PMKID_LEN	16
-
-typedef BWL_PRE_PACKED_STRUCT struct {
-	u8 tag;
-	u8 length;
-	u8 oui[3];
-	u8 oui_type;
-	BWL_PRE_PACKED_STRUCT struct {
-		u8 low;
-		u8 high;
-	} BWL_POST_PACKED_STRUCT version;
-} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
-#define WPA_IE_OUITYPE_LEN	4
-#define WPA_IE_FIXED_LEN	8
-#define WPA_IE_TAG_FIXED_LEN	6
-
-typedef BWL_PRE_PACKED_STRUCT struct {
-	u8 tag;
-	u8 length;
-	BWL_PRE_PACKED_STRUCT struct {
-		u8 low;
-		u8 high;
-	} BWL_POST_PACKED_STRUCT version;
-} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
-#define WPA_RSN_IE_FIXED_LEN	4
-#define WPA_RSN_IE_TAG_FIXED_LEN	2
-typedef u8 wpa_pmkid_t[WPA2_PMKID_LEN];
-
-typedef BWL_PRE_PACKED_STRUCT struct {
-	u8 oui[3];
-	u8 type;
-} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
-#define WPA_SUITE_LEN	4
-
-typedef BWL_PRE_PACKED_STRUCT struct {
-	BWL_PRE_PACKED_STRUCT struct {
-		u8 low;
-		u8 high;
-	} BWL_POST_PACKED_STRUCT count;
-	wpa_suite_t list[1];
-} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t;
-#define WPA_IE_SUITE_COUNT_LEN	2
-typedef BWL_PRE_PACKED_STRUCT struct {
-	BWL_PRE_PACKED_STRUCT struct {
-		u8 low;
-		u8 high;
-	} BWL_POST_PACKED_STRUCT count;
-	wpa_pmkid_t list[1];
-} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
-
-#define WPA_CIPHER_NONE		0
-#define WPA_CIPHER_WEP_40	1
-#define WPA_CIPHER_TKIP		2
-#define WPA_CIPHER_AES_OCB	3
-#define WPA_CIPHER_AES_CCM	4
-#define WPA_CIPHER_WEP_104	5
-
-#define IS_WPA_CIPHER(cipher)	((cipher) == WPA_CIPHER_NONE || \
-				 (cipher) == WPA_CIPHER_WEP_40 || \
-				 (cipher) == WPA_CIPHER_WEP_104 || \
-				 (cipher) == WPA_CIPHER_TKIP || \
-				 (cipher) == WPA_CIPHER_AES_OCB || \
-				 (cipher) == WPA_CIPHER_AES_CCM)
-
-#define WPA_TKIP_CM_DETECT	60
-#define WPA_TKIP_CM_BLOCK	60
-
-#define RSN_CAP_LEN		2
-
-#define RSN_CAP_PREAUTH			0x0001
-#define RSN_CAP_NOPAIRWISE		0x0002
-#define RSN_CAP_PTK_REPLAY_CNTR_MASK	0x000C
-#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT	2
-#define RSN_CAP_GTK_REPLAY_CNTR_MASK	0x0030
-#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT	4
 #define RSN_CAP_1_REPLAY_CNTR		0
 #define RSN_CAP_2_REPLAY_CNTRS		1
 #define RSN_CAP_4_REPLAY_CNTRS		2
@@ -118,10 +30,4 @@
 #define WPA_CAP_REPLAY_CNTR_SHIFT	RSN_CAP_PTK_REPLAY_CNTR_SHIFT
 #define WPA_CAP_REPLAY_CNTR_MASK	RSN_CAP_PTK_REPLAY_CNTR_MASK
 
-#define WPA_CAP_LEN	RSN_CAP_LEN
-
-#define	WPA_CAP_WPA2_PREAUTH		RSN_CAP_PREAUTH
-
-#include <packed_section_end.h>
-
 #endif				/* _proto_wpa_h_ */
diff --git a/drivers/staging/brcm80211/include/rpc_osl.h b/drivers/staging/brcm80211/include/rpc_osl.h
index 4a26480..c59d9ed 100644
--- a/drivers/staging/brcm80211/include/rpc_osl.h
+++ b/drivers/staging/brcm80211/include/rpc_osl.h
@@ -18,7 +18,7 @@
 #define _rpcosl_h_
 
 typedef struct rpc_osl rpc_osl_t;
-extern rpc_osl_t *rpc_osl_attach(osl_t *osh);
+extern rpc_osl_t *rpc_osl_attach(struct osl_info *osh);
 extern void rpc_osl_detach(rpc_osl_t *rpc_osh);
 
 #define RPC_OSL_LOCK(rpc_osh) rpc_osl_lock((rpc_osh))
diff --git a/drivers/staging/brcm80211/include/siutils.h b/drivers/staging/brcm80211/include/siutils.h
index 57c3650..a935092 100644
--- a/drivers/staging/brcm80211/include/siutils.h
+++ b/drivers/staging/brcm80211/include/siutils.h
@@ -19,9 +19,6 @@
 
 #include <hndsoc.h>
 
-#if !defined(WLC_LOW)
-#include "bcm_rpc.h"
-#endif
 /*
  * Data structure to export all chip specific common variables
  *   public (read-only) portion of siutils handle returned by si_attach()
@@ -50,19 +47,12 @@
 	uint socirev;		/* SOC interconnect rev */
 	bool pci_pr32414;
 
-#if !defined(WLC_LOW)
-	rpc_info_t *rpc;
-#endif
 };
 
 /* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
  * for monolithic driver, it is readonly to prevent accident change
  */
-#if !defined(WLC_LOW)
-typedef struct si_pub si_t;
-#else
 typedef const struct si_pub si_t;
-#endif
 
 /*
  * Many of the routines below take an 'sih' handle as their first arg.
@@ -128,8 +118,8 @@
 #define GPIO_CTRL_EPA_EN_MASK 0x40
 
 /* === exported functions === */
-extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
-		       void *sdh, char **vars, uint *varsz);
+extern si_t *si_attach(uint pcidev, struct osl_info *osh, void *regs,
+		       uint bustype, void *sdh, char **vars, uint *varsz);
 
 extern void si_detach(si_t *sih);
 extern bool si_pci_war16165(si_t *sih);
@@ -138,7 +128,7 @@
 extern uint si_flag(si_t *sih);
 extern uint si_coreidx(si_t *sih);
 extern uint si_corerev(si_t *sih);
-extern void *si_osh(si_t *sih);
+struct osl_info *si_osh(si_t *sih);
 extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask,
 		uint val);
 extern void si_write_wrapperreg(si_t *sih, u32 offset, u32 val);
@@ -227,7 +217,7 @@
 /* misc si info needed by some of the routines */
 typedef struct si_info {
 	struct si_pub pub;	/* back plane public state (must be first field) */
-	void *osh;		/* osl os handle */
+	struct osl_info *osh;		/* osl os handle */
 	void *sdh;		/* bcmsdh handle */
 	uint dev_coreid;	/* the core provides driver functions */
 	void *intr_arg;		/* interrupt callback function arg */
@@ -305,9 +295,9 @@
 #define	ILP_DIV_5MHZ		0	/* ILP = 5 MHz */
 #define	ILP_DIV_1MHZ		4	/* ILP = 1 MHz */
 
-#define PCI(si)		((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+#define PCI(si)		(((si)->pub.bustype == PCI_BUS) &&	\
 			 ((si)->pub.buscoretype == PCI_CORE_ID))
-#define PCIE(si)	((BUSTYPE((si)->pub.bustype) == PCI_BUS) &&	\
+#define PCIE(si)	(((si)->pub.bustype == PCI_BUS) &&	\
 			 ((si)->pub.buscoretype == PCIE_CORE_ID))
 #define PCI_FORCEHT(si)	\
 	(PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
@@ -344,9 +334,9 @@
 char *si_getnvramflvar(si_t *sih, const char *name);
 
 /* AMBA Interconnect exported externs */
-extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
-		       void *sdh, char **vars, uint *varsz);
-extern si_t *ai_kattach(osl_t *osh);
+extern si_t *ai_attach(uint pcidev, struct osl_info *osh, void *regs,
+		       uint bustype, void *sdh, char **vars, uint *varsz);
+extern si_t *ai_kattach(struct osl_info *osh);
 extern void ai_scan(si_t *sih, void *regs, uint devid);
 
 extern uint ai_flag(si_t *sih);
diff --git a/drivers/staging/brcm80211/include/wlioctl.h b/drivers/staging/brcm80211/include/wlioctl.h
index 96866fb..9be793c 100644
--- a/drivers/staging/brcm80211/include/wlioctl.h
+++ b/drivers/staging/brcm80211/include/wlioctl.h
@@ -33,82 +33,9 @@
 #define BWL_DEFAULT_PACKING
 #include <packed_section_start.h>
 
-/* Legacy structure to help keep backward compatible wl tool and tray app */
-
-#define	LEGACY_WL_BSS_INFO_VERSION	107	/* older version of wl_bss_info struct */
-
-typedef struct wl_bss_info_107 {
-	u32 version;		/* version field */
-	u32 length;		/* byte length of data in this record,
-				 * starting at version and including IEs
-				 */
-	struct ether_addr BSSID;
-	u16 beacon_period;	/* units are Kusec */
-	u16 capability;	/* Capability information */
-	u8 SSID_len;
-	u8 SSID[32];
-	struct {
-		uint count;	/* # rates in this set */
-		u8 rates[16];	/* rates in 500kbps units w/hi bit set if basic */
-	} rateset;		/* supported rates */
-	u8 channel;		/* Channel no. */
-	u16 atim_window;	/* units are Kusec */
-	u8 dtim_period;	/* DTIM period */
-	s16 RSSI;		/* receive signal strength (in dBm) */
-	s8 phy_noise;		/* noise (in dBm) */
-	u32 ie_length;	/* byte length of Information Elements */
-	/* variable length Information Elements */
-} wl_bss_info_107_t;
-
-/*
- * Per-BSS information structure.
- */
-
-#define	LEGACY2_WL_BSS_INFO_VERSION	108	/* old version of wl_bss_info struct */
-
-/* BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in wl_scan_results_t)
- */
-typedef struct wl_bss_info_108 {
-	u32 version;		/* version field */
-	u32 length;		/* byte length of data in this record,
-				 * starting at version and including IEs
-				 */
-	struct ether_addr BSSID;
-	u16 beacon_period;	/* units are Kusec */
-	u16 capability;	/* Capability information */
-	u8 SSID_len;
-	u8 SSID[32];
-	struct {
-		uint count;	/* # rates in this set */
-		u8 rates[16];	/* rates in 500kbps units w/hi bit set if basic */
-	} rateset;		/* supported rates */
-	chanspec_t chanspec;	/* chanspec for bss */
-	u16 atim_window;	/* units are Kusec */
-	u8 dtim_period;	/* DTIM period */
-	s16 RSSI;		/* receive signal strength (in dBm) */
-	s8 phy_noise;		/* noise (in dBm) */
-
-	u8 n_cap;		/* BSS is 802.11N Capable */
-	u32 nbss_cap;	/* 802.11N BSS Capabilities (based on HT_CAP_*) */
-	u8 ctl_ch;		/* 802.11N BSS control channel number */
-	u32 reserved32[1];	/* Reserved for expansion of BSS properties */
-	u8 flags;		/* flags */
-	u8 reserved[3];	/* Reserved for expansion of BSS properties */
-	u8 basic_mcs[MCSSET_LEN];	/* 802.11N BSS required MCS set */
-
-	u16 ie_offset;	/* offset at which IEs start, from beginning */
-	u32 ie_length;	/* byte length of Information Elements */
-	/* Add new fields here */
-	/* variable length Information Elements */
-} wl_bss_info_108_t;
-
 #ifdef BRCM_FULLMAC
+
 #define	WL_BSS_INFO_VERSION	108	/* current ver of wl_bss_info struct */
-#else
-#define	WL_BSS_INFO_VERSION	109	/* current ver of wl_bss_info struct */
-#endif
 
 /* BSS info structure
  * Applications MUST CHECK ie_offset field and length field to access IEs and
@@ -148,12 +75,14 @@
 	/* Add new fields here */
 	/* variable length Information Elements */
 } wl_bss_info_t;
+#endif /* BRCM_FULLMAC */
 
 typedef struct wlc_ssid {
 	u32 SSID_len;
 	unsigned char SSID[32];
 } wlc_ssid_t;
 
+#ifdef BRCM_FULLMAC
 typedef struct chan_scandata {
 	u8 txpower;
 	u8 pad;
@@ -308,6 +237,7 @@
 	struct ether_addr bssid;
 	struct ether_addr mac;
 } wl_probe_params_t;
+#endif /* BRCM_FULLMAC */
 
 #define WL_NUMRATES		16	/* max # of rates in a rateset */
 typedef struct wl_rateset {
@@ -315,6 +245,7 @@
 	u8 rates[WL_NUMRATES];	/* rates in 500kbps units w/hi bit set if basic */
 } wl_rateset_t;
 
+#ifdef BRCM_FULLMAC
 typedef struct wl_rateset_args {
 	u32 count;		/* # rates in this set */
 	u8 rates[WL_NUMRATES];	/* rates in 500kbps units w/hi bit set if basic */
@@ -352,6 +283,8 @@
 } wl_join_params_t;
 #define WL_JOIN_PARAMS_FIXED_SIZE 	(sizeof(wl_join_params_t) - sizeof(chanspec_t))
 
+#endif /* BRCM_FULLMAC */
+
 /* defines used by the nrate iovar */
 #define NRATE_MCS_INUSE	0x00000080	/* MSC in use,indicates b0-6 holds an mcs */
 #define NRATE_RATE_MASK 0x0000007f	/* rate/mcs value */
@@ -391,6 +324,7 @@
 
 #define HIGHEST_SINGLE_STREAM_MCS	7	/* MCS values greater than this enable multiple streams */
 
+#ifdef BRCM_FULLMAC
 #define MAX_CCA_CHANNELS 38	/* Max number of 20 Mhz wide channels */
 #define MAX_CCA_SECS     60	/* CCA keeps this many seconds history */
 
@@ -428,8 +362,11 @@
 	cca_congest_t secs[1];	/* Data */
 } cca_congest_channel_req_t;
 
+#endif /* BRCM_FULLMAC */
+
 #define WLC_CNTRY_BUF_SZ	4	/* Country string is 3 bytes + NUL */
 
+#ifdef BRCM_FULLMAC
 typedef struct wl_country {
 	char country_abbrev[WLC_CNTRY_BUF_SZ];	/* nul-terminated country code used in
 						 * the Country IE
@@ -516,6 +453,7 @@
 	wl_rm_rep_elt_t rep[1];	/* variable length block of reports */
 } wl_rm_rep_t;
 #define WL_RM_REP_FIXED_LEN	8
+#endif /* BRCM_FULLMAC */
 
 /* Enumerate crypto algorithms */
 #define	CRYPTO_ALGO_OFF			0
@@ -621,28 +559,6 @@
 	u8 activehi;
 } wl_led_info_t;
 
-/* flags */
-#define WLC_ASSOC_REQ_IS_REASSOC 0x01	/* assoc req was actually a reassoc */
-
-/* srom read/write struct passed through ioctl */
-typedef struct {
-	uint byteoff;		/* byte offset */
-	uint nbytes;		/* number of bytes */
-	u16 buf[1];
-} srom_rw_t;
-
-/* similar cis (srom or otp) struct [iovar: may not be aligned] */
-typedef struct {
-	u32 source;		/* cis source */
-	u32 byteoff;		/* byte offset */
-	u32 nbytes;		/* number of bytes */
-	/* data follows here */
-} cis_rw_t;
-
-#define WLC_CIS_DEFAULT	0	/* built-in default */
-#define WLC_CIS_SROM	1	/* source is sprom */
-#define WLC_CIS_OTP	2	/* source is otp */
-
 /* R_REG and W_REG struct passed through ioctl */
 typedef struct {
 	u32 byteoff;		/* byte offset of the field in d11regs_t */
@@ -651,102 +567,14 @@
 	uint band;		/* band (optional) */
 } rw_reg_t;
 
-/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
-/* PCL - Power Control Loop */
-/* current gain setting is replaced by user input */
-#define WL_ATTEN_APP_INPUT_PCL_OFF	0	/* turn off PCL, apply supplied input */
-#define WL_ATTEN_PCL_ON			1	/* turn on PCL */
-/* current gain setting is maintained */
-#define WL_ATTEN_PCL_OFF		2	/* turn off PCL. */
 
-typedef struct {
-	u16 auto_ctrl;	/* WL_ATTEN_XX */
-	u16 bb;		/* Baseband attenuation */
-	u16 radio;		/* Radio attenuation */
-	u16 txctl1;		/* Radio TX_CTL1 value */
-} atten_t;
-
-/* Per-AC retry parameters */
-struct wme_tx_params_s {
-	u8 short_retry;
-	u8 short_fallback;
-	u8 long_retry;
-	u8 long_fallback;
-	u16 max_rate;	/* In units of 512 Kbps */
-};
-
-typedef struct wme_tx_params_s wme_tx_params_t;
-
-#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
-
-/* defines used by poweridx iovar - it controls power in a-band */
-/* current gain setting is maintained */
-#define WL_PWRIDX_PCL_OFF	-2	/* turn off PCL.  */
-#define WL_PWRIDX_PCL_ON	-1	/* turn on PCL */
-#define WL_PWRIDX_LOWER_LIMIT	-2	/* lower limit */
-#define WL_PWRIDX_UPPER_LIMIT	63	/* upper limit */
-/* value >= 0 causes
- *	- input to be set to that value
- *	- PCL to be off
- */
-
-/* Used to get specific link/ac parameters */
-typedef struct {
-	int ac;
-	u8 val;
-	struct ether_addr ea;
-} link_val_t;
-
-#define BCM_MAC_STATUS_INDICATION	(0x40010200L)
-
-typedef struct {
-	u16 ver;		/* version of this struct */
-	u16 len;		/* length in bytes of this structure */
-	u16 cap;		/* sta's advertised capabilities */
-	u32 flags;		/* flags defined below */
-	u32 idle;		/* time since data pkt rx'd from sta */
-	struct ether_addr ea;	/* Station address */
-	wl_rateset_t rateset;	/* rateset in use */
-	u32 in;		/* seconds elapsed since associated */
-	u32 listen_interval_inms;	/* Min Listen interval in ms for this STA */
-	u32 tx_pkts;		/* # of packets transmitted */
-	u32 tx_failures;	/* # of packets failed */
-	u32 rx_ucast_pkts;	/* # of unicast packets received */
-	u32 rx_mcast_pkts;	/* # of multicast packets received */
-	u32 tx_rate;		/* Rate of last successful tx frame */
-	u32 rx_rate;		/* Rate of last successful rx frame */
-	u32 rx_decrypt_succeeds;	/* # of packet decrypted successfully */
-	u32 rx_decrypt_failures;	/* # of packet decrypted unsuccessfully */
-} sta_info_t;
-
-#define WL_OLD_STAINFO_SIZE	offsetof(sta_info_t, tx_pkts)
-
-#define WL_STA_VER		3
-
-/* Flags for sta_info_t indicating properties of STA */
-#define WL_STA_BRCM		0x1	/* Running a Broadcom driver */
-#define WL_STA_WME		0x2	/* WMM association */
-#define WL_STA_ABCAP		0x4
-#define WL_STA_AUTHE		0x8	/* Authenticated */
-#define WL_STA_ASSOC		0x10	/* Associated */
-#define WL_STA_AUTHO		0x20	/* Authorized */
-#define WL_STA_WDS		0x40	/* Wireless Distribution System */
-#define WL_STA_WDS_LINKUP	0x80	/* WDS traffic/probes flowing properly */
-#define WL_STA_PS		0x100	/* STA is in power save mode from AP's viewpoint */
-#define WL_STA_APSD_BE		0x200	/* APSD delv/trigger for AC_BE is default enabled */
-#define WL_STA_APSD_BK		0x400	/* APSD delv/trigger for AC_BK is default enabled */
-#define WL_STA_APSD_VI		0x800	/* APSD delv/trigger for AC_VI is default enabled */
-#define WL_STA_APSD_VO		0x1000	/* APSD delv/trigger for AC_VO is default enabled */
-#define WL_STA_N_CAP		0x2000	/* STA 802.11n capable */
-#define WL_STA_SCBSTATS		0x4000	/* Per STA debug stats */
-
-#define WL_WDS_LINKUP		WL_STA_WDS_LINKUP	/* deprecated */
-
+#ifdef BRCM_FULLMAC
 /* Used to get specific STA parameters */
 typedef struct {
 	u32 val;
 	struct ether_addr ea;
 } scb_val_t;
+#endif /* BRCM_FULLMAC */
 
 /* channel encoding */
 typedef struct channel_info {
@@ -770,6 +598,7 @@
 	uint rx_ocast_good_pkt;	/* unicast packets destined for others */
 } get_pktcnt_t;
 
+#ifdef BRCM_FULLMAC
 /* Linux network driver ioctl encoding */
 typedef struct wl_ioctl {
 	uint cmd;		/* common ioctl definition */
@@ -779,11 +608,8 @@
 	uint used;		/* bytes read or written (optional) */
 	uint needed;		/* bytes needed (optional) */
 } wl_ioctl_t;
+#endif /* BRCM_FULLMAC */
 
-/* reference to wl_ioctl_t struct used by usermode driver */
-#define ioctl_subtype	set	/* subtype param */
-#define ioctl_pid	used	/* pid param */
-#define ioctl_status	needed	/* status param */
 
 /*
  * Structure for passing hardware and software
@@ -810,45 +636,11 @@
 
 #define WL_REV_INFO_LEGACY_LENGTH	48
 
-#define WL_BRAND_MAX 10
-typedef struct wl_instance_info {
-	uint instance;
-	char brand[WL_BRAND_MAX];
-} wl_instance_info_t;
-
-/* structure to change size of tx fifo */
-typedef struct wl_txfifo_sz {
-	u16 magic;
-	u16 fifo;
-	u16 size;
-} wl_txfifo_sz_t;
-/* magic pattern used for mismatch driver and wl */
-#define WL_TXFIFO_SZ_MAGIC	0xa5a5
-
-/* Transfer info about an IOVar from the driver */
-/* Max supported IOV name size in bytes, + 1 for nul termination */
-#define WLC_IOV_NAME_LEN 30
-typedef struct wlc_iov_trx_s {
-	u8 module;
-	u8 type;
-	char name[WLC_IOV_NAME_LEN];
-} wlc_iov_trx_t;
-
-/* check this magic number */
-#define WLC_IOCTL_MAGIC		0x14e46c77
-
-#define PROC_ENTRY_NAME "brcm_debug"
-/* bump this number if you change the ioctl interface */
-#define WLC_IOCTL_VERSION	1
-
 #ifdef BRCM_FULLMAC
-#define	WLC_IOCTL_MAXLEN	8192
-#else
-#define	WLC_IOCTL_MAXLEN		3072	/* max length ioctl buffer required */
-#endif
 #define	WLC_IOCTL_SMLEN			256	/* "small" length ioctl buffer required */
 #define WLC_IOCTL_MEDLEN		1536	/* "med" length ioctl buffer required */
-#define WLC_SAMPLECOLLECT_MAXLEN	10240	/* Max Sample Collect buffer for two cores */
+#define	WLC_IOCTL_MAXLEN	8192
+#endif
 
 /* common ioctl definitions */
 #define WLC_GET_MAGIC				0
@@ -1399,23 +1191,6 @@
 #define WL_TX_POWER_MCS40_FIRST	        28
 #define WL_TX_POWER_MCS40_NUM	        17
 
-typedef struct {
-	u32 flags;
-	chanspec_t chanspec;	/* txpwr report for this channel */
-	chanspec_t local_chanspec;	/* channel on which we are associated */
-	u8 local_max;	/* local max according to the AP */
-	u8 local_constraint;	/* local constraint according to the AP */
-	s8 antgain[2];	/* Ant gain for each band - from SROM */
-	u8 rf_cores;		/* count of RF Cores being reported */
-	u8 est_Pout[4];	/* Latest tx power out estimate per RF
-				 * chain without adjustment
-				 */
-	u8 est_Pout_cck;	/* Latest CCK tx power out estimate */
-	u8 user_limit[WL_TX_POWER_RATES_LEGACY];	/* User limit */
-	u8 reg_limit[WL_TX_POWER_RATES_LEGACY];	/* Regulatory power limit */
-	u8 board_limit[WL_TX_POWER_RATES_LEGACY];	/* Max power board can support (SROM) */
-	u8 target[WL_TX_POWER_RATES_LEGACY];	/* Latest target power */
-} tx_power_legacy2_t;
 
 #define WL_TX_POWER_RATES	       101
 #define WL_TX_POWER_CCK_FIRST	       0
@@ -1848,63 +1623,6 @@
 	u8 retry;		/* retry value */
 };
 
-/* structure for addts arguments */
-/* For ioctls that take a list of TSPEC */
-struct tslist {
-	int count;		/* number of tspecs */
-	struct tsinfo_arg tsinfo[1];	/* variable length array of tsinfo */
-};
-
-/* structure for addts/delts arguments */
-typedef struct tspec_arg {
-	u16 version;		/* see definition of TSPEC_ARG_VERSION */
-	u16 length;		/* length of entire structure */
-	uint flag;		/* bit field */
-	/* TSPEC Arguments */
-	struct tsinfo_arg tsinfo;	/* TS Info bit field */
-	u16 nom_msdu_size;	/* (Nominal or fixed) MSDU Size (bytes) */
-	u16 max_msdu_size;	/* Maximum MSDU Size (bytes) */
-	uint min_srv_interval;	/* Minimum Service Interval (us) */
-	uint max_srv_interval;	/* Maximum Service Interval (us) */
-	uint inactivity_interval;	/* Inactivity Interval (us) */
-	uint suspension_interval;	/* Suspension Interval (us) */
-	uint srv_start_time;	/* Service Start Time (us) */
-	uint min_data_rate;	/* Minimum Data Rate (bps) */
-	uint mean_data_rate;	/* Mean Data Rate (bps) */
-	uint peak_data_rate;	/* Peak Data Rate (bps) */
-	uint max_burst_size;	/* Maximum Burst Size (bytes) */
-	uint delay_bound;	/* Delay Bound (us) */
-	uint min_phy_rate;	/* Minimum PHY Rate (bps) */
-	u16 surplus_bw;	/* Surplus Bandwidth Allowance (range 1.0 to 8.0) */
-	u16 medium_time;	/* Medium Time (32 us/s periods) */
-	u8 dialog_token;	/* dialog token */
-} tspec_arg_t;
-
-/* tspec arg for desired station */
-typedef struct tspec_per_sta_arg {
-	struct ether_addr ea;
-	struct tspec_arg ts;
-} tspec_per_sta_arg_t;
-
-/* structure for max bandwidth for each access category */
-typedef struct wme_max_bandwidth {
-	u32 ac[AC_COUNT];	/* max bandwidth for each access category */
-} wme_max_bandwidth_t;
-
-#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t))
-
-/* current version of wl_tspec_arg_t struct */
-#define	TSPEC_ARG_VERSION		2	/* current version of wl_tspec_arg_t struct */
-#define TSPEC_ARG_LENGTH		55	/* argument length from tsinfo to medium_time */
-#define TSPEC_DEFAULT_DIALOG_TOKEN	42	/* default dialog token */
-#define TSPEC_DEFAULT_SBW_FACTOR	0x3000	/* default surplus bw */
-
-/* define for flag */
-#define TSPEC_PENDING		0	/* TSPEC pending */
-#define TSPEC_ACCEPTED		1	/* TSPEC accepted */
-#define TSPEC_REJECTED		2	/* TSPEC rejected */
-#define TSPEC_UNKNOWN		3	/* TSPEC unknown */
-#define TSPEC_STATUS_MASK	7	/* TSPEC status mask */
 
 /* Software feature flag defines used by wlfeatureflag */
 #define WL_SWFL_NOHWRADIO	0x0004
@@ -1913,16 +1631,6 @@
 
 #define WL_LIFETIME_MAX 0xFFFF	/* Max value in ms */
 
-/*
- * Dongle pattern matching filter.
- */
-
-/* Packet filter types. Currently, only pattern matching is supported. */
-typedef enum wl_pkt_filter_type {
-	WL_PKT_FILTER_TYPE_PATTERN_MATCH	/* Pattern matching filter */
-} wl_pkt_filter_type_t;
-
-#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
 
 /* Pattern matching filter. Specifies an offset within received packets to
  * start matching, the pattern to match, the size of the pattern, and a bitmask
@@ -1957,20 +1665,6 @@
 	u32 enable;		/* Enable/disable bool */
 } wl_pkt_filter_enable_t;
 
-/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
-typedef struct wl_pkt_filter_list {
-	u32 num;		/* Number of installed packet filters */
-	wl_pkt_filter_t filter[1];	/* Variable array of packet filters. */
-} wl_pkt_filter_list_t;
-
-#define WL_PKT_FILTER_LIST_FIXED_LEN	  offsetof(wl_pkt_filter_list_t, filter)
-
-/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */
-typedef struct wl_pkt_filter_stats {
-	u32 num_pkts_matched;	/* # filter matches for specified filter id */
-	u32 num_pkts_forwarded;	/* # packets fwded from dongle to host for all filters */
-	u32 num_pkts_discarded;	/* # packets discarded by dongle for all filters */
-} wl_pkt_filter_stats_t;
 
 #define	WLC_RSSI_INVALID	 0	/* invalid RSSI value */
 
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_cmn.c b/drivers/staging/brcm80211/phy/wlc_phy_cmn.c
index 8287261..3bed37c 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_cmn.c
+++ b/drivers/staging/brcm80211/phy/wlc_phy_cmn.c
@@ -20,10 +20,15 @@
 #include <linux/string.h>
 #include <bcmdefs.h>
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <bcmendian.h>
 #include <bcmnvram.h>
 #include <sbchipc.h>
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
 
 #include <wlc_phy_int.h>
 #include <wlc_phyreg_n.h>
@@ -163,7 +168,7 @@
 		return NULL;
 
 	for (s = vars; s && *s;) {
-		if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+		if ((memcmp(s, name, len) == 0) && (s[len] == '='))
 			return &s[len + 1];
 
 		while (*s++)
@@ -272,7 +277,7 @@
 
 void write_radio_reg(phy_info_t *pi, u16 addr, u16 val)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 
 	if (NORADIO_ENAB(pi->pubpi))
 		return;
@@ -296,7 +301,7 @@
 		W_REG(osh, &pi->regs->phy4wdatalo, val);
 	}
 
-	if (BUSTYPE(pi->sh->bustype) == PCI_BUS) {
+	if (pi->sh->bustype == PCI_BUS) {
 		if (++pi->phy_wreg >= pi->phy_wreg_limit) {
 			(void)R_REG(osh, &pi->regs->maccontrol);
 			pi->phy_wreg = 0;
@@ -405,7 +410,7 @@
 
 u16 read_phy_reg(phy_info_t *pi, u16 addr)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	d11regs_t *regs;
 
 	osh = pi->sh->osh;
@@ -426,7 +431,7 @@
 
 void write_phy_reg(phy_info_t *pi, u16 addr, u16 val)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	d11regs_t *regs;
 
 	osh = pi->sh->osh;
@@ -441,7 +446,7 @@
 #else
 	W_REG(osh, (volatile u32 *)(&regs->phyregaddr),
 	      addr | (val << 16));
-	if (BUSTYPE(pi->sh->bustype) == PCI_BUS) {
+	if (pi->sh->bustype == PCI_BUS) {
 		if (++pi->phy_wreg >= pi->phy_wreg_limit) {
 			pi->phy_wreg = 0;
 			(void)R_REG(osh, &regs->phyversion);
@@ -452,7 +457,7 @@
 
 void and_phy_reg(phy_info_t *pi, u16 addr, u16 val)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	d11regs_t *regs;
 
 	osh = pi->sh->osh;
@@ -473,7 +478,7 @@
 
 void or_phy_reg(phy_info_t *pi, u16 addr, u16 val)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	d11regs_t *regs;
 
 	osh = pi->sh->osh;
@@ -494,7 +499,7 @@
 
 void mod_phy_reg(phy_info_t *pi, u16 addr, u16 mask, u16 val)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	d11regs_t *regs;
 
 	osh = pi->sh->osh;
@@ -591,7 +596,7 @@
 
 void wlc_phy_shared_detach(shared_phy_t *phy_sh)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 
 	if (phy_sh) {
 		osh = phy_sh->osh;
@@ -609,7 +614,7 @@
 	u32 sflags = 0;
 	uint phyversion;
 	int i;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	osh = sh->osh;
 
@@ -1080,8 +1085,8 @@
 	pi->tbl_data_hi = tblDataHi;
 	pi->tbl_data_lo = tblDataLo;
 
-	if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
-	     CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+	if ((pi->sh->chip == BCM43224_CHIP_ID ||
+	     pi->sh->chip == BCM43421_CHIP_ID) &&
 	    (pi->sh->chiprev == 1)) {
 		pi->tbl_addr = tblAddr;
 		pi->tbl_save_id = tbl_id;
@@ -1093,8 +1098,8 @@
 {
 	ASSERT((width == 8) || (width == 16) || (width == 32));
 
-	if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
-	     CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+	if ((pi->sh->chip == BCM43224_CHIP_ID ||
+	     pi->sh->chip == BCM43421_CHIP_ID) &&
 	    (pi->sh->chiprev == 1) &&
 	    (pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
 		read_phy_reg(pi, pi->tbl_data_lo);
@@ -1132,8 +1137,8 @@
 
 	for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
 
-		if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
-		     CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+		if ((pi->sh->chip == BCM43224_CHIP_ID ||
+		     pi->sh->chip == BCM43421_CHIP_ID) &&
 		    (pi->sh->chiprev == 1) &&
 		    (tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
 			read_phy_reg(pi, tblDataLo);
@@ -1175,8 +1180,8 @@
 
 	for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
 
-		if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID ||
-		     CHIPID(pi->sh->chip) == BCM43421_CHIP_ID) &&
+		if ((pi->sh->chip == BCM43224_CHIP_ID ||
+		     pi->sh->chip == BCM43421_CHIP_ID) &&
 		    (pi->sh->chiprev == 1)) {
 			(void)read_phy_reg(pi, tblDataLo);
 
@@ -1534,7 +1539,7 @@
 
 	ASSERT((band == WLC_BAND_2G) || (band == WLC_BAND_5G));
 
-	bzero(channels, sizeof(chanvec_t));
+	memset(channels, 0, sizeof(chanvec_t));
 
 	for (i = 0; i < ARRAY_SIZE(chan_info_all); i++) {
 		channel = chan_info_all[i].chan;
@@ -1896,7 +1901,7 @@
 		tx_pwr_min = min(tx_pwr_min, tx_pwr_target[rate]);
 	}
 
-	bzero(pi->tx_power_offset, sizeof(pi->tx_power_offset));
+	memset(pi->tx_power_offset, 0, sizeof(pi->tx_power_offset));
 	pi->tx_power_max = tx_pwr_max;
 	pi->tx_power_min = tx_pwr_min;
 	pi->tx_power_max_rate_ind = tx_pwr_max_rate_ind;
@@ -2507,7 +2512,7 @@
 	s8 cmplx_pwr_dbm[PHY_CORE_MAX];
 	u8 i;
 
-	bzero((u8 *) cmplx_pwr_dbm, sizeof(cmplx_pwr_dbm));
+	memset((u8 *) cmplx_pwr_dbm, 0, sizeof(cmplx_pwr_dbm));
 	ASSERT(pi->pubpi.phy_corenum <= PHY_CORE_MAX);
 	wlc_phy_compute_dB(cmplx_pwr, cmplx_pwr_dbm, pi->pubpi.phy_corenum);
 
@@ -2621,9 +2626,9 @@
 			u8 wait_crs = 0;
 			u8 i;
 
-			bzero((u8 *) est, sizeof(est));
-			bzero((u8 *) cmplx_pwr, sizeof(cmplx_pwr));
-			bzero((u8 *) noise_dbm_ant, sizeof(noise_dbm_ant));
+			memset((u8 *) est, 0, sizeof(est));
+			memset((u8 *) cmplx_pwr, 0, sizeof(cmplx_pwr));
+			memset((u8 *) noise_dbm_ant, 0, sizeof(noise_dbm_ant));
 
 			log_num_samps = PHY_NOISE_SAMPLE_LOG_NUM_NPHY;
 			num_samps = 1 << log_num_samps;
@@ -2704,8 +2709,8 @@
 	u8 idx, core;
 
 	ASSERT(pi->pubpi.phy_corenum <= PHY_CORE_MAX);
-	bzero((u8 *) cmplx_pwr, sizeof(cmplx_pwr));
-	bzero((u8 *) noise_dbm_ant, sizeof(noise_dbm_ant));
+	memset((u8 *) cmplx_pwr, 0, sizeof(cmplx_pwr));
+	memset((u8 *) noise_dbm_ant, 0, sizeof(noise_dbm_ant));
 
 	for (idx = 0, core = 0; core < pi->pubpi.phy_corenum; idx += 2, core++) {
 		lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP(idx));
@@ -3325,7 +3330,7 @@
 
 void wlc_lcnphy_epa_switch(phy_info_t *pi, bool mode)
 {
-	if ((CHIPID(pi->sh->chip) == BCM4313_CHIP_ID) &&
+	if ((pi->sh->chip == BCM4313_CHIP_ID) &&
 	    (pi->sh->boardflags & BFL_FEM)) {
 		if (mode) {
 			u16 txant = 0;
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_hal.h b/drivers/staging/brcm80211/phy/wlc_phy_hal.h
index 52260b2..514e15e 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_hal.h
+++ b/drivers/staging/brcm80211/phy/wlc_phy_hal.h
@@ -122,11 +122,7 @@
 
 struct phy_pub;
 
-#ifdef WLC_HIGH_ONLY
-typedef struct wlc_rpc_phy wlc_phy_t;
-#else
 typedef struct phy_pub wlc_phy_t;
-#endif
 
 typedef struct shared_phy_params {
 	void *osh;
@@ -150,7 +146,6 @@
 	u32 boardflags2;
 } shared_phy_params_t;
 
-#ifdef WLC_LOW
 
 extern shared_phy_t *wlc_phy_shared_attach(shared_phy_params_t *shp);
 extern void wlc_phy_shared_detach(shared_phy_t *phy_sh);
@@ -189,7 +184,6 @@
 extern void wlc_phy_switch_radio(wlc_phy_t *ppi, bool on);
 extern void wlc_phy_anacore(wlc_phy_t *ppi, bool on);
 
-#endif				/* WLC_LOW */
 
 extern void wlc_phy_BSSinit(wlc_phy_t *ppi, bool bonlyap, int rssi);
 
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_int.h b/drivers/staging/brcm80211/phy/wlc_phy_int.h
index 9513b87..72eee91 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_int.h
+++ b/drivers/staging/brcm80211/phy/wlc_phy_int.h
@@ -527,7 +527,7 @@
 struct shared_phy {
 	struct phy_info *phy_head;
 	uint unit;
-	osl_t *osh;
+	struct osl_info *osh;
 	si_t *sih;
 	void *physhim;
 	uint corerev;
@@ -1158,7 +1158,7 @@
 	 (pi->ipa5g_on && CHSPEC_IS5G(pi->radio_chanspec)))
 
 #define WLC_PHY_WAR_PR51571(pi) \
-	if ((BUSTYPE((pi)->sh->bustype) == PCI_BUS) && NREV_LT((pi)->pubpi.phy_rev, 3)) \
+	if (((pi)->sh->bustype == PCI_BUS) && NREV_LT((pi)->pubpi.phy_rev, 3)) \
 		(void)R_REG((pi)->sh->osh, &(pi)->regs->maccontrol)
 
 extern void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype);
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_lcn.c b/drivers/staging/brcm80211/phy/wlc_phy_lcn.c
index 3d3112e..3ac2b49 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_lcn.c
+++ b/drivers/staging/brcm80211/phy/wlc_phy_lcn.c
@@ -17,13 +17,18 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
+#include <linux/delay.h>
 #include <wlc_cfg.h>
 #include <qmath.h>
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/pci.h>
 #include <siutils.h>
 #include <hndpmu.h>
 
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+
 #include <wlc_phy_radio.h>
 #include <wlc_phy_int.h>
 #include <wlc_phy_lcn.h>
@@ -1327,7 +1332,7 @@
 	u32 data_buf[64];
 	phytbl_info_t tab;
 
-	bzero(data_buf, sizeof(data_buf));
+	memset(data_buf, 0, sizeof(data_buf));
 
 	tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
 	tab.tbl_width = 32;
@@ -1951,7 +1956,7 @@
 	band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
 
 	cal_gains = *target_gains;
-	bzero(ncorr_override, sizeof(ncorr_override));
+	memset(ncorr_override, 0, sizeof(ncorr_override));
 	for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
 		if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
 			cal_gains.gm_gain =
@@ -2529,7 +2534,7 @@
 	tab.tbl_width = 32;
 	tab.tbl_offset = 0;
 
-	bzero(temp_offset, sizeof(temp_offset));
+	memset(temp_offset, 0, sizeof(temp_offset));
 	for (j = 1; j < 128; j += 2)
 		temp_offset[j] = 0x80000;
 
diff --git a/drivers/staging/brcm80211/phy/wlc_phy_n.c b/drivers/staging/brcm80211/phy/wlc_phy_n.c
index 950008f..c6cce8d 100644
--- a/drivers/staging/brcm80211/phy/wlc_phy_n.c
+++ b/drivers/staging/brcm80211/phy/wlc_phy_n.c
@@ -18,13 +18,18 @@
 #include <linux/string.h>
 #include <bcmdefs.h>
 #include <wlc_cfg.h>
-#include <linuxver.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
 #include <osl.h>
 #include <siutils.h>
 #include <sbchipc.h>
 #include <hndpmu.h>
 #include <bcmendian.h>
 
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+
 #include <wlc_phy_radio.h>
 #include <wlc_phy_int.h>
 #include <wlc_phyreg_n.h>
@@ -14554,7 +14559,7 @@
 		}
 	}
 
-	if ((!PHY_IPA(pi)) && (CHIPID(pi->sh->chip) == BCM5357_CHIP_ID)) {
+	if ((!PHY_IPA(pi)) && (pi->sh->chip == BCM5357_CHIP_ID)) {
 		si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA,
 				   CCTRL5357_EXTPA);
 	}
@@ -17599,7 +17604,7 @@
 
 	mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x1, 0x1);
 
-	if (CHIPID(pi->sh->chip) == !BCM6362_CHIP_ID) {
+	if (pi->sh->chip == !BCM6362_CHIP_ID) {
 
 		mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x2, 0x2);
 	}
@@ -18007,8 +18012,8 @@
 			write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 |
 					RADIO_2056_SYN, 0x1f);
 
-			if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
-			    (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID)) {
+			if ((pi->sh->chip == BCM4716_CHIP_ID) ||
+			    (pi->sh->chip == BCM47162_CHIP_ID)) {
 
 				write_radio_reg(pi,
 						RADIO_2056_SYN_PLL_LOOPFILTER4 |
@@ -18070,8 +18075,8 @@
 				WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
 						 PADG_IDAC, 0xcc);
 
-				if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
-				    (CHIPID(pi->sh->chip) ==
+				if ((pi->sh->chip == BCM4716_CHIP_ID) ||
+				    (pi->sh->chip ==
 				     BCM47162_CHIP_ID)) {
 					bias = 0x40;
 					cascbias = 0x45;
@@ -18083,11 +18088,11 @@
 					bias = 0x25;
 					cascbias = 0x20;
 
-					if ((CHIPID(pi->sh->chip) ==
+					if ((pi->sh->chip ==
 					     BCM43224_CHIP_ID)
-					    || (CHIPID(pi->sh->chip) ==
+					    || (pi->sh->chip ==
 						BCM43225_CHIP_ID)
-					    || (CHIPID(pi->sh->chip) ==
+					    || (pi->sh->chip ==
 						BCM43421_CHIP_ID)) {
 						if (pi->sh->chippkg ==
 						    BCM43224_FAB_SMIC) {
@@ -18198,9 +18203,9 @@
 
 			cascbias = 0x30;
 
-			if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID) ||
-			    (CHIPID(pi->sh->chip) == BCM43225_CHIP_ID) ||
-			    (CHIPID(pi->sh->chip) == BCM43421_CHIP_ID)) {
+			if ((pi->sh->chip == BCM43224_CHIP_ID) ||
+			    (pi->sh->chip == BCM43225_CHIP_ID) ||
+			    (pi->sh->chip == BCM43421_CHIP_ID)) {
 				if (pi->sh->chippkg == BCM43224_FAB_SMIC) {
 					cascbias = 0x35;
 				}
@@ -18927,7 +18932,7 @@
 			case 38:
 			case 102:
 			case 118:
-				if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) &&
+				if ((pi->sh->chip == BCM4716_CHIP_ID) &&
 				    (pi->sh->chippkg == BCM4717_PKG_ID)) {
 					nphy_adj_tone_id_buf[0] = 32;
 					nphy_adj_noise_var_buf[0] = 0x21f;
@@ -19062,7 +19067,7 @@
 				if (pi->nphy_aband_spurwar_en &&
 				    ((val == 38) || (val == 102)
 				     || (val == 118))) {
-					if ((CHIPID(pi->sh->chip) ==
+					if ((pi->sh->chip ==
 					     BCM4716_CHIP_ID)
 					    && (pi->sh->chippkg ==
 						BCM4717_PKG_ID)) {
@@ -19077,8 +19082,8 @@
 		if (pi->phy_spuravoid == SPURAVOID_FORCEON)
 			spuravoid = 1;
 
-		if ((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
-		    (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID)) {
+		if ((pi->sh->chip == BCM4716_CHIP_ID) ||
+		    (pi->sh->chip == BCM47162_CHIP_ID)) {
 			si_pmu_spuravoid(pi->sh->sih, pi->sh->osh, spuravoid);
 		} else {
 			wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
@@ -19086,9 +19091,9 @@
 			wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
 		}
 
-		if ((CHIPID(pi->sh->chip) == BCM43224_CHIP_ID) ||
-		    (CHIPID(pi->sh->chip) == BCM43225_CHIP_ID) ||
-		    (CHIPID(pi->sh->chip) == BCM43421_CHIP_ID)) {
+		if ((pi->sh->chip == BCM43224_CHIP_ID) ||
+		    (pi->sh->chip == BCM43225_CHIP_ID) ||
+		    (pi->sh->chip == BCM43421_CHIP_ID)) {
 
 			if (spuravoid == 1) {
 
@@ -19105,8 +19110,8 @@
 			}
 		}
 
-		if (!((CHIPID(pi->sh->chip) == BCM4716_CHIP_ID) ||
-		      (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID))) {
+		if (!((pi->sh->chip == BCM4716_CHIP_ID) ||
+		      (pi->sh->chip == BCM47162_CHIP_ID))) {
 			wlapi_bmac_core_phypll_reset(pi->sh->physhim);
 		}
 
@@ -21062,11 +21067,11 @@
 		wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16,
 					 &auxADC_rssi_ctrlH_save);
 
-		if (CHIPID(pi->sh->chip) == BCM5357_CHIP_ID) {
+		if (pi->sh->chip == BCM5357_CHIP_ID) {
 			radio_temp[0] = (193 * (radio_temp[1] + radio_temp2[1])
 					 + 88 * (auxADC_Vl) - 27111 +
 					 128) / 256;
-		} else if (CHIPID(pi->sh->chip) == BCM43236_CHIP_ID) {
+		} else if (pi->sh->chip == BCM43236_CHIP_ID) {
 			radio_temp[0] = (198 * (radio_temp[1] + radio_temp2[1])
 					 + 91 * (auxADC_Vl) - 27243 +
 					 128) / 256;
@@ -26277,7 +26282,7 @@
 		} else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
 
 			tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
-			if (CHIPID(pi->sh->chip) == BCM47162_CHIP_ID) {
+			if (pi->sh->chip == BCM47162_CHIP_ID) {
 
 				tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
 			}
@@ -26833,7 +26838,7 @@
 		phy_a2 = 63;
 
 		if (CHSPEC_IS2G(pi->radio_chanspec)) {
-			if (CHIPID(pi->sh->chip) == BCM6362_CHIP_ID) {
+			if (pi->sh->chip == BCM6362_CHIP_ID) {
 				phy_a1 = 35;
 				phy_a3 = 35;
 			} else if ((pi->pubpi.radiorev == 4)
@@ -26946,7 +26951,7 @@
 					if (NREV_GE(pi->pubpi.phy_rev, 6)) {
 						phy_a5 = 0x00f7 | (phy_a4 << 8);
 
-						if (CHIPID(pi->sh->chip) ==
+						if (pi->sh->chip ==
 						    BCM47162_CHIP_ID) {
 							phy_a5 =
 							    0x10f7 | (phy_a4 <<
diff --git a/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c b/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c
index 6ce9e5d..330b881 100644
--- a/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c
+++ b/drivers/staging/brcm80211/phy/wlc_phytbl_lcn.c
@@ -15,6 +15,9 @@
  */
 
 #include <linux/types.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+#include <osl.h>
 #include <wlc_phy_int.h>
 #include <wlc_phytbl_lcn.h>
 
diff --git a/drivers/staging/brcm80211/phy/wlc_phytbl_n.c b/drivers/staging/brcm80211/phy/wlc_phytbl_n.c
index 7cc2c56..a9fc193 100644
--- a/drivers/staging/brcm80211/phy/wlc_phytbl_n.c
+++ b/drivers/staging/brcm80211/phy/wlc_phytbl_n.c
@@ -16,6 +16,9 @@
 
 #include <linux/kernel.h>
 
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+#include <osl.h>
 #include <wlc_phy_int.h>
 #include <wlc_phytbl_n.h>
 
diff --git a/drivers/staging/brcm80211/sys/wl_dbg.h b/drivers/staging/brcm80211/sys/wl_dbg.h
index e63b27e..54af257 100644
--- a/drivers/staging/brcm80211/sys/wl_dbg.h
+++ b/drivers/staging/brcm80211/sys/wl_dbg.h
@@ -20,15 +20,20 @@
 /* wl_msg_level is a bit vector with defs in wlioctl.h */
 extern u32 wl_msg_level;
 
-#define WL_PRINT(args)		printf args
-#define WL_NONE(args)
+#define WL_NONE(fmt, args...) no_printk(fmt, ##args)
+
+#define WL_PRINT(level, fmt, args...)		\
+do {						\
+	if (wl_msg_level & level)		\
+		printk(fmt, ##args);		\
+} while (0)
 
 #ifdef BCMDBG
 
-#define	WL_ERROR(args)		do {if ((wl_msg_level & WL_ERROR_VAL)) WL_PRINT(args); } while (0)
-#define	WL_TRACE(args)		do {if (wl_msg_level & WL_TRACE_VAL) WL_PRINT(args); } while (0)
-#define WL_AMPDU(args)		do {if (wl_msg_level & WL_AMPDU_VAL) WL_PRINT(args); } while (0)
-#define WL_FFPLD(args)		do {if (wl_msg_level & WL_FFPLD_VAL) WL_PRINT(args); } while (0)
+#define	WL_ERROR(fmt, args...)	WL_PRINT(WL_ERROR_VAL, fmt, ##args)
+#define	WL_TRACE(fmt, args...)	WL_PRINT(WL_TRACE_VAL, fmt, ##args)
+#define WL_AMPDU(fmt, args...)	WL_PRINT(WL_AMPDU_VAL, fmt, ##args)
+#define WL_FFPLD(fmt, args...)	WL_PRINT(WL_FFPLD_VAL, fmt, ##args)
 
 #define WL_ERROR_ON()		(wl_msg_level & WL_ERROR_VAL)
 
@@ -44,35 +49,50 @@
 
 extern u32 wl_ampdu_dbg;
 
-#define WL_AMPDU_UPDN(args) do {if (wl_ampdu_dbg & WL_AMPDU_UPDN_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_RX(args) do {if (wl_ampdu_dbg & WL_AMPDU_RX_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_ERR(args) do {if (wl_ampdu_dbg & WL_AMPDU_ERR_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_TX(args) do {if (wl_ampdu_dbg & WL_AMPDU_TX_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_CTL(args) do {if (wl_ampdu_dbg & WL_AMPDU_CTL_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_HW(args) do {if (wl_ampdu_dbg & WL_AMPDU_HW_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_HWTXS(args) do {if (wl_ampdu_dbg & WL_AMPDU_HWTXS_VAL) {WL_AMPDU(args); } } while (0)
-#define WL_AMPDU_HWDBG(args) do {if (wl_ampdu_dbg & WL_AMPDU_HWDBG_VAL) {WL_AMPDU(args); } } while (0)
+#define WL_AMPDU_PRINT(level, fmt, args...)	\
+do {						\
+	if (wl_ampdu_dbg & level) {		\
+		WL_AMPDU(fmt, ##args);		\
+	}					\
+} while (0)
+
+#define WL_AMPDU_UPDN(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_UPDN_VAL, fmt, ##args)
+#define WL_AMPDU_RX(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_RX_VAL, fmt, ##args)
+#define WL_AMPDU_ERR(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_ERR_VAL, fmt, ##args)
+#define WL_AMPDU_TX(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_TX_VAL, fmt, ##args)
+#define WL_AMPDU_CTL(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_CTL_VAL, fmt, ##args)
+#define WL_AMPDU_HW(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_HW_VAL, fmt, ##args)
+#define WL_AMPDU_HWTXS(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_HWTXS_VAL, fmt, ##args)
+#define WL_AMPDU_HWDBG(fmt, args...)			\
+	WL_AMPDU_PRINT(WL_AMPDU_HWDBG_VAL, fmt, ##args)
 #define WL_AMPDU_ERR_ON() (wl_ampdu_dbg & WL_AMPDU_ERR_VAL)
 #define WL_AMPDU_HW_ON() (wl_ampdu_dbg & WL_AMPDU_HW_VAL)
 #define WL_AMPDU_HWTXS_ON() (wl_ampdu_dbg & WL_AMPDU_HWTXS_VAL)
 
 #else				/* BCMDBG */
 
-#define	WL_ERROR(args)
-#define	WL_TRACE(args)
-#define WL_AMPDU(args)
-#define WL_FFPLD(args)
+#define	WL_ERROR(fmt, args...)		no_printk(fmt, ##args)
+#define	WL_TRACE(fmt, args...)		no_printk(fmt, ##args)
+#define WL_AMPDU(fmt, args...)		no_printk(fmt, ##args)
+#define WL_FFPLD(fmt, args...)		no_printk(fmt, ##args)
 
 #define WL_ERROR_ON()		0
 
-#define WL_AMPDU_UPDN(args)
-#define WL_AMPDU_RX(args)
-#define WL_AMPDU_ERR(args)
-#define WL_AMPDU_TX(args)
-#define WL_AMPDU_CTL(args)
-#define WL_AMPDU_HW(args)
-#define WL_AMPDU_HWTXS(args)
-#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_UPDN(fmt, args...)	no_printk(fmt, ##args)
+#define WL_AMPDU_RX(fmt, args...)	no_printk(fmt, ##args)
+#define WL_AMPDU_ERR(fmt, args...)	no_printk(fmt, ##args)
+#define WL_AMPDU_TX(fmt, args...)	no_printk(fmt, ##args)
+#define WL_AMPDU_CTL(fmt, args...)	no_printk(fmt, ##args)
+#define WL_AMPDU_HW(fmt, args...)	no_printk(fmt, ##args)
+#define WL_AMPDU_HWTXS(fmt, args...)	no_printk(fmt, ##args)
+#define WL_AMPDU_HWDBG(fmt, args...)	no_printk(fmt, ##args)
 #define WL_AMPDU_ERR_ON()       0
 #define WL_AMPDU_HW_ON()        0
 #define WL_AMPDU_HWTXS_ON()     0
diff --git a/drivers/staging/brcm80211/sys/wl_export.h b/drivers/staging/brcm80211/sys/wl_export.h
index 08442f8..aa8b5a3 100644
--- a/drivers/staging/brcm80211/sys/wl_export.h
+++ b/drivers/staging/brcm80211/sys/wl_export.h
@@ -45,10 +45,10 @@
 			 int periodic);
 extern bool wl_del_timer(struct wl_info *wl, struct wl_timer *timer);
 
-extern uint wl_buf_to_pktcopy(osl_t *osh, void *p, unsigned char *buf, int len,
-			      uint offset);
-extern void *wl_get_pktbuffer(osl_t *osh, int len);
-extern int wl_set_pktlen(osl_t *osh, void *p, int len);
+extern uint wl_buf_to_pktcopy(struct osl_info *osh, void *p, unsigned char *buf,
+			      int len, uint offset);
+extern void *wl_get_pktbuffer(struct osl_info *osh, int len);
+extern int wl_set_pktlen(struct osl_info *osh, void *p, int len);
 
 #define wl_sort_bsslist(a, b) false
 
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index d060377..bdd629d 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -21,72 +21,36 @@
 #include <linux/string.h>
 #include <linux/pci_ids.h>
 #include <bcmdefs.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
 #include <osl.h>
 #define WLC_MAXBSSCFG		1	/* single BSS configs */
 
 #include <wlc_cfg.h>
 #include <net/mac80211.h>
-#include <epivers.h>
-#ifndef WLC_HIGH_ONLY
 #include <phy_version.h>
-#endif
 #include <bcmutils.h>
 #include <pcicfg.h>
 #include <wlioctl.h>
 #include <wlc_key.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
 #include <wlc_channel.h>
 #include <wlc_pub.h>
 #include <wlc_scb.h>
 #include <wl_dbg.h>
-#ifdef BCMSDIO
-#include <bcmsdh.h>
-#endif
 #include <wl_export.h>
-#ifdef WLC_HIGH_ONLY
-#include "dbus.h"
-#include "bcm_rpc_tp.h"
-#include "bcm_rpc.h"
-#include "bcm_xdr.h"
-#include "wlc_rpc.h"
-#endif
 
 #include <wl_mac80211.h>
 #include <linux/firmware.h>
-#ifndef WLC_HIGH_ONLY
 #include <wl_ucode.h>
 #include <d11ucode_ext.h>
-#endif
 
-#ifdef BCMSDIO
-extern struct device *sdiommc_dev;
-#endif
-
-extern void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg,
-			      bool suspend);
-bool wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw);
-void wlc_mac_bcn_promisc_change(wlc_info_t *wlc, bool promisc);
-void wlc_set_addrmatch(wlc_info_t *wlc, int match_reg_offset,
-		       const struct ether_addr *addr);
 
 static void wl_timer(unsigned long data);
 static void _wl_timer(wl_timer_t *t);
 
-#ifdef WLC_HIGH_ONLY
-#define RPCQ_LOCK(_wl, _flags) spin_lock_irqsave(&(_wl)->rpcq_lock, (_flags))
-#define RPCQ_UNLOCK(_wl, _flags)  spin_unlock_irqrestore(&(_wl)->rpcq_lock, (_flags))
-#define TXQ_LOCK(_wl, _flags) spin_lock_irqsave(&(_wl)->txq_lock, (_flags))
-#define TXQ_UNLOCK(_wl, _flags)  spin_unlock_irqrestore(&(_wl)->txq_lock, (_flags))
-static void wl_rpc_down(void *wlh);
-static void wl_rpcq_free(wl_info_t *wl);
-static void wl_rpcq_dispatch(struct wl_task *task);
-static void wl_rpc_dispatch_schedule(void *ctx, struct rpc_buf *buf);
-static void wl_start_txqwork(struct wl_task *task);
-static void wl_txq_free(wl_info_t *wl);
-static void wl_timer_task(wl_task_t *task);
-static int wl_schedule_task(wl_info_t *wl, void (*fn) (struct wl_task *),
-			    void *context);
-#endif				/* WLC_HIGH_ONLY */
 
 static int ieee_hw_init(struct ieee80211_hw *hw);
 static int ieee_hw_rate_init(struct ieee80211_hw *hw);
@@ -134,16 +98,14 @@
 	u8 rx_hdr[16], tx_hdr[16];
 };
 
-#ifndef WLC_HIGH_ONLY
-#define WL_DEV_IF(dev)		((wl_if_t *)netdev_priv(dev))
-#define	WL_INFO(dev)		((wl_info_t *)(WL_DEV_IF(dev)->wl))	/* points to wl */
-static int wl_request_fw(wl_info_t *wl, struct pci_dev *pdev);
-static void wl_release_fw(wl_info_t *wl);
-#endif
+#define WL_DEV_IF(dev)		((struct wl_if *)netdev_priv(dev))
+#define	WL_INFO(dev)		((struct wl_info *)(WL_DEV_IF(dev)->wl))
+static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
+static void wl_release_fw(struct wl_info *wl);
 
 /* local prototypes */
-static int wl_start(struct sk_buff *skb, wl_info_t *wl);
-static int wl_start_int(wl_info_t *wl, struct ieee80211_hw *hw,
+static int wl_start(struct sk_buff *skb, struct wl_info *wl);
+static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw,
 			struct sk_buff *skb);
 static void wl_dpc(unsigned long data);
 
@@ -152,7 +114,6 @@
 MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
-#ifndef BCMSDIO
 /* recognized PCI IDs */
 static struct pci_device_id wl_id_table[] = {
 	{PCI_VENDOR_ID_BROADCOM, 0x4357, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* 43225 2G */
@@ -163,55 +124,18 @@
 
 MODULE_DEVICE_TABLE(pci, wl_id_table);
 static void wl_remove(struct pci_dev *pdev);
-#endif				/* !BCMSDIO */
 
-#ifdef BCMSDIO
-static uint sd_drivestrength = 6;
-module_param(sd_drivestrength, uint, 0);
-#endif
 
 #ifdef BCMDBG
 static int msglevel = 0xdeadbeef;
 module_param(msglevel, int, 0);
-#ifndef WLC_HIGH_ONLY
 static int phymsglevel = 0xdeadbeef;
 module_param(phymsglevel, int, 0);
-#endif				/* WLC_HIGH_ONLY */
 #endif				/* BCMDBG */
 
-static int oneonly;
-module_param(oneonly, int, 0);
-
-static int piomode;
-module_param(piomode, int, 0);
-
-static int instance_base;	/* Starting instance number */
-module_param(instance_base, int, 0);
-
-#if defined(BCMDBG)
-static char *macaddr;
-module_param(macaddr, charp, S_IRUGO);
-#endif
-
-static int nompc = 1;
-module_param(nompc, int, 0);
-
-static char name[IFNAMSIZ] = "eth%d";
-module_param_string(name, name, IFNAMSIZ, 0);
-
-#ifndef	SRCBASE
-#define	SRCBASE "."
-#endif
-
-#define WL_MAGIC 	0xdeadbeef
-
 #define HW_TO_WL(hw)	 (hw->priv)
 #define WL_TO_HW(wl)	  (wl->pub->ieee_hw)
-#ifdef WLC_HIGH_ONLY
-static int wl_ops_tx_nl(struct ieee80211_hw *hw, struct sk_buff *skb);
-#else
 static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-#endif
 static int wl_ops_start(struct ieee80211_hw *hw);
 static void wl_ops_stop(struct ieee80211_hw *hw);
 static int wl_ops_add_interface(struct ieee80211_hw *hw,
@@ -249,28 +173,13 @@
 			   enum ieee80211_ampdu_mlme_action action,
 			   struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 
-#ifdef WLC_HIGH_ONLY
-static int wl_ops_tx_nl(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-	int status;
-	wl_info_t *wl = hw->priv;
-	if (!wl->pub->up) {
-		WL_ERROR(("ops->tx called while down\n"));
-		status = -ENETDOWN;
-		goto done;
-	}
-	status = wl_start(skb, wl);
- done:
-	return status;
-}
-#else
 static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
 	int status;
-	wl_info_t *wl = hw->priv;
+	struct wl_info *wl = hw->priv;
 	WL_LOCK(wl);
 	if (!wl->pub->up) {
-		WL_ERROR(("ops->tx called while down\n"));
+		WL_ERROR("ops->tx called while down\n");
 		status = -ENETDOWN;
 		goto done;
 	}
@@ -279,13 +188,14 @@
 	WL_UNLOCK(wl);
 	return status;
 }
-#endif				/* WLC_HIGH_ONLY */
 
 static int wl_ops_start(struct ieee80211_hw *hw)
 {
-	wl_info_t *wl = hw->priv;
-	/* struct ieee80211_channel *curchan = hw->conf.channel; */
-	WL_NONE(("%s : Initial channel: %d\n", __func__, curchan->hw_value));
+	struct wl_info *wl = hw->priv;
+	/*
+	  struct ieee80211_channel *curchan = hw->conf.channel;
+	  WL_NONE("%s : Initial channel: %d\n", __func__, curchan->hw_value);
+	*/
 
 	WL_LOCK(wl);
 	ieee80211_wake_queues(hw);
@@ -296,7 +206,7 @@
 
 static void wl_ops_stop(struct ieee80211_hw *hw)
 {
-	wl_info_t *wl = hw->priv;
+	struct wl_info *wl = hw->priv;
 	ASSERT(wl);
 	WL_LOCK(wl);
 	wl_down(wl);
@@ -309,7 +219,7 @@
 static int
 wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-	wl_info_t *wl;
+	struct wl_info *wl;
 	int err;
 
 	/* Just STA for now */
@@ -318,8 +228,8 @@
 	    vif->type != NL80211_IFTYPE_STATION &&
 	    vif->type != NL80211_IFTYPE_WDS &&
 	    vif->type != NL80211_IFTYPE_ADHOC) {
-		WL_ERROR(("%s: Attempt to add type %d, only STA for now\n",
-			  __func__, vif->type));
+		WL_ERROR("%s: Attempt to add type %d, only STA for now\n",
+			 __func__, vif->type);
 		return -EOPNOTSUPP;
 	}
 
@@ -329,7 +239,7 @@
 	WL_UNLOCK(wl);
 
 	if (err != 0)
-		WL_ERROR(("%s: wl_up() returned %d\n", __func__, err));
+		WL_ERROR("%s: wl_up() returned %d\n", __func__, err);
 	return err;
 }
 
@@ -343,7 +253,7 @@
 ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
 		 enum nl80211_channel_type type)
 {
-	wl_info_t *wl = HW_TO_WL(hw);
+	struct wl_info *wl = HW_TO_WL(hw);
 	int err = 0;
 
 	switch (type) {
@@ -355,8 +265,7 @@
 		break;
 	case NL80211_CHAN_HT40MINUS:
 	case NL80211_CHAN_HT40PLUS:
-		WL_ERROR(("%s: Need to implement 40 Mhz Channels!\n",
-			  __func__));
+		WL_ERROR("%s: Need to implement 40 Mhz Channels!\n", __func__);
 		break;
 	}
 
@@ -368,17 +277,17 @@
 static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct ieee80211_conf *conf = &hw->conf;
-	wl_info_t *wl = HW_TO_WL(hw);
+	struct wl_info *wl = HW_TO_WL(hw);
 	int err = 0;
 	int new_int;
 
 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
-		WL_NONE(("%s: Setting listen interval to %d\n",
-			 __func__, conf->listen_interval));
+		WL_NONE("%s: Setting listen interval to %d\n",
+			__func__, conf->listen_interval);
 		if (wlc_iovar_setint
 		    (wl->wlc, "bcn_li_bcn", conf->listen_interval)) {
-			WL_ERROR(("%s: Error setting listen_interval\n",
-				  __func__));
+			WL_ERROR("%s: Error setting listen_interval\n",
+				 __func__);
 			err = -EIO;
 			goto config_out;
 		}
@@ -386,41 +295,42 @@
 		ASSERT(new_int == conf->listen_interval);
 	}
 	if (changed & IEEE80211_CONF_CHANGE_MONITOR)
-		WL_NONE(("Need to set monitor mode\n"));
+		WL_NONE("Need to set monitor mode\n");
 	if (changed & IEEE80211_CONF_CHANGE_PS)
-		WL_NONE(("Need to set Power-save mode\n"));
+		WL_NONE("Need to set Power-save mode\n");
 
 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
-		WL_NONE(("%s: Setting tx power to %d dbm\n", __func__,
-			 conf->power_level));
+		WL_NONE("%s: Setting tx power to %d dbm\n",
+			__func__, conf->power_level);
 		if (wlc_iovar_setint
 		    (wl->wlc, "qtxpower", conf->power_level * 4)) {
-			WL_ERROR(("%s: Error setting power_level\n", __func__));
+			WL_ERROR("%s: Error setting power_level\n", __func__);
 			err = -EIO;
 			goto config_out;
 		}
 		wlc_iovar_getint(wl->wlc, "qtxpower", &new_int);
 		if (new_int != (conf->power_level * 4))
-			WL_ERROR(("%s: Power level req != actual, %d %d\n",
-				  __func__, conf->power_level * 4, new_int));
+			WL_ERROR("%s: Power level req != actual, %d %d\n",
+				 __func__, conf->power_level * 4, new_int);
 	}
 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
 		err = ieee_set_channel(hw, conf->channel, conf->channel_type);
 	}
 	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
-		WL_NONE(("%s: srl %d, lrl %d\n", __func__,
-			 conf->short_frame_max_tx_count,
-			 conf->long_frame_max_tx_count));
+		WL_NONE("%s: srl %d, lrl %d\n",
+			__func__,
+			conf->short_frame_max_tx_count,
+			conf->long_frame_max_tx_count);
 		if (wlc_set
 		    (wl->wlc, WLC_SET_SRL,
 		     conf->short_frame_max_tx_count) < 0) {
-			WL_ERROR(("%s: Error setting srl\n", __func__));
+			WL_ERROR("%s: Error setting srl\n", __func__);
 			err = -EIO;
 			goto config_out;
 		}
 		if (wlc_set(wl->wlc, WLC_SET_LRL, conf->long_frame_max_tx_count)
 		    < 0) {
-			WL_ERROR(("%s: Error setting lrl\n", __func__));
+			WL_ERROR("%s: Error setting lrl\n", __func__);
 			err = -EIO;
 			goto config_out;
 		}
@@ -435,32 +345,29 @@
 			struct ieee80211_vif *vif,
 			struct ieee80211_bss_conf *info, u32 changed)
 {
-	wl_info_t *wl = HW_TO_WL(hw);
+	struct wl_info *wl = HW_TO_WL(hw);
 	int val;
 
-#ifdef WLC_HIGH_ONLY
-	WL_LOCK(wl);
-#endif
 
 	if (changed & BSS_CHANGED_ASSOC) {
-		WL_ERROR(("Associated:\t%s\n", info->assoc ? "True" : "False"));
+		WL_ERROR("Associated:\t%s\n", info->assoc ? "True" : "False");
 		/* association status changed (associated/disassociated)
 		 * also implies a change in the AID.
 		 */
 	}
 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
-		WL_NONE(("Use_cts_prot:\t%s Implement me\n",
-			 info->use_cts_prot ? "True" : "False"));
+		WL_NONE("Use_cts_prot:\t%s Implement me\n",
+			info->use_cts_prot ? "True" : "False");
 		/* CTS protection changed */
 	}
 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
-		WL_NONE(("Short preamble:\t%s Implement me\n",
-			 info->use_short_preamble ? "True" : "False"));
+		WL_NONE("Short preamble:\t%s Implement me\n",
+			info->use_short_preamble ? "True" : "False");
 		/* preamble changed */
 	}
 	if (changed & BSS_CHANGED_ERP_SLOT) {
-		WL_NONE(("Changing short slot:\t%s\n",
-			 info->use_short_slot ? "True" : "False"));
+		WL_NONE("Changing short slot:\t%s\n",
+			info->use_short_slot ? "True" : "False");
 		if (info->use_short_slot)
 			val = 1;
 		else
@@ -470,39 +377,36 @@
 	}
 
 	if (changed & BSS_CHANGED_HT) {
-		WL_NONE(("%s: HT mode - Implement me\n", __func__));
+		WL_NONE("%s: HT mode - Implement me\n", __func__);
 		/* 802.11n parameters changed */
 	}
 	if (changed & BSS_CHANGED_BASIC_RATES) {
-		WL_NONE(("Need to change Basic Rates:\t0x%x! Implement me\n",
-			 (u32) info->basic_rates));
+		WL_NONE("Need to change Basic Rates:\t0x%x! Implement me\n",
+			(u32) info->basic_rates);
 		/* Basic rateset changed */
 	}
 	if (changed & BSS_CHANGED_BEACON_INT) {
-		WL_NONE(("Beacon Interval:\t%d Implement me\n",
-			 info->beacon_int));
+		WL_NONE("Beacon Interval:\t%d Implement me\n",
+			info->beacon_int);
 		/* Beacon interval changed */
 	}
 	if (changed & BSS_CHANGED_BSSID) {
-		WL_NONE(("new BSSID:\taid %d  bss:%pM\n", info->aid,
-			info->bssid));
+		WL_NONE("new BSSID:\taid %d  bss:%pM\n",
+			info->aid, info->bssid);
 		/* BSSID changed, for whatever reason (IBSS and managed mode) */
 		/* FIXME: need to store bssid in bsscfg */
 		wlc_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET,
 				  (struct ether_addr *)info->bssid);
 	}
 	if (changed & BSS_CHANGED_BEACON) {
-		WL_ERROR(("BSS_CHANGED_BEACON\n"));
+		WL_ERROR("BSS_CHANGED_BEACON\n");
 		/* Beacon data changed, retrieve new beacon (beaconing modes) */
 	}
 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
-		WL_ERROR(("Beacon enabled:\t%s\n",
-			  info->enable_beacon ? "True" : "False"));
+		WL_ERROR("Beacon enabled:\t%s\n",
+			 info->enable_beacon ? "True" : "False");
 		/* Beaconing should be enabled/disabled (beaconing modes) */
 	}
-#ifdef WLC_HIGH_ONLY
-	WL_UNLOCK(wl);
-#endif
 	return;
 }
 
@@ -511,27 +415,24 @@
 			unsigned int changed_flags,
 			unsigned int *total_flags, u64 multicast)
 {
-#ifndef WLC_HIGH_ONLY
-	wl_info_t *wl = hw->priv;
-#endif
+	struct wl_info *wl = hw->priv;
 
 	changed_flags &= MAC_FILTERS;
 	*total_flags &= MAC_FILTERS;
 	if (changed_flags & FIF_PROMISC_IN_BSS)
-		WL_ERROR(("FIF_PROMISC_IN_BSS\n"));
+		WL_ERROR("FIF_PROMISC_IN_BSS\n");
 	if (changed_flags & FIF_ALLMULTI)
-		WL_ERROR(("FIF_ALLMULTI\n"));
+		WL_ERROR("FIF_ALLMULTI\n");
 	if (changed_flags & FIF_FCSFAIL)
-		WL_ERROR(("FIF_FCSFAIL\n"));
+		WL_ERROR("FIF_FCSFAIL\n");
 	if (changed_flags & FIF_PLCPFAIL)
-		WL_ERROR(("FIF_PLCPFAIL\n"));
+		WL_ERROR("FIF_PLCPFAIL\n");
 	if (changed_flags & FIF_CONTROL)
-		WL_ERROR(("FIF_CONTROL\n"));
+		WL_ERROR("FIF_CONTROL\n");
 	if (changed_flags & FIF_OTHER_BSS)
-		WL_ERROR(("FIF_OTHER_BSS\n"));
+		WL_ERROR("FIF_OTHER_BSS\n");
 	if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
-		WL_NONE(("FIF_BCN_PRBRESP_PROMISC\n"));
-#ifndef WLC_HIGH_ONLY
+		WL_NONE("FIF_BCN_PRBRESP_PROMISC\n");
 		WL_LOCK(wl);
 		if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
 			wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
@@ -541,7 +442,6 @@
 			wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
 		}
 		WL_UNLOCK(wl);
-#endif
 	}
 	return;
 }
@@ -549,25 +449,25 @@
 static int
 wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
 {
-	WL_ERROR(("%s: Enter\n", __func__));
+	WL_ERROR("%s: Enter\n", __func__);
 	return 0;
 }
 
 static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
 {
-	WL_NONE(("Scan Start\n"));
+	WL_NONE("Scan Start\n");
 	return;
 }
 
 static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
 {
-	WL_NONE(("Scan Complete\n"));
+	WL_NONE("Scan Complete\n");
 	return;
 }
 
 static void wl_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf)
 {
-	WL_ERROR(("%s: Enter\n", __func__));
+	WL_ERROR("%s: Enter\n", __func__);
 	return;
 }
 
@@ -575,13 +475,13 @@
 wl_ops_get_stats(struct ieee80211_hw *hw,
 		 struct ieee80211_low_level_stats *stats)
 {
-	WL_ERROR(("%s: Enter\n", __func__));
+	WL_ERROR("%s: Enter\n", __func__);
 	return 0;
 }
 
 static int wl_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
-	WL_ERROR(("%s: Enter\n", __func__));
+	WL_ERROR("%s: Enter\n", __func__);
 	return 0;
 }
 
@@ -589,10 +489,10 @@
 wl_ops_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		  enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
 {
-	WL_NONE(("%s: Enter\n", __func__));
+	WL_NONE("%s: Enter\n", __func__);
 	switch (cmd) {
 	default:
-		WL_ERROR(("%s: Uknown cmd = %d\n", __func__, cmd));
+		WL_ERROR("%s: Unknown cmd = %d\n", __func__, cmd);
 		break;
 	}
 	return;
@@ -602,11 +502,11 @@
 wl_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
 	       const struct ieee80211_tx_queue_params *params)
 {
-	wl_info_t *wl = hw->priv;
+	struct wl_info *wl = hw->priv;
 
-	WL_NONE(("%s: Enter (WME config)\n", __func__));
-	WL_NONE(("queue %d, txop %d, cwmin %d, cwmax %d, aifs %d\n", queue,
-		 params->txop, params->cw_min, params->cw_max, params->aifs));
+	WL_NONE("%s: Enter (WME config)\n", __func__);
+	WL_NONE("queue %d, txop %d, cwmin %d, cwmax %d, aifs %d\n", queue,
+		 params->txop, params->cw_min, params->cw_max, params->aifs);
 
 	WL_LOCK(wl);
 	wlc_wme_setparams(wl->wlc, queue, (void *)params, true);
@@ -617,7 +517,7 @@
 
 static u64 wl_ops_get_tsf(struct ieee80211_hw *hw)
 {
-	WL_ERROR(("%s: Enter\n", __func__));
+	WL_ERROR("%s: Enter\n", __func__);
 	return 0;
 }
 
@@ -628,11 +528,11 @@
 	struct scb *scb;
 
 	int i;
-	wl_info_t *wl = hw->priv;
+	struct wl_info *wl = hw->priv;
 
 	/* Init the scb */
 	scb = (struct scb *)sta->drv_priv;
-	bzero(scb, sizeof(struct scb));
+	memset(scb, 0, sizeof(struct scb));
 	for (i = 0; i < NUMPRIO; i++)
 		scb->seqctl[i] = 0xFFFF;
 	scb->seqctl_nonqos = 0xFFFF;
@@ -641,20 +541,12 @@
 	wl->pub->global_scb = scb;
 	wl->pub->global_ampdu = &(scb->scb_ampdu);
 	wl->pub->global_ampdu->scb = scb;
-#ifdef WLC_HIGH_ONLY
-	wl->pub->global_ampdu->max_pdu = AMPDU_NUM_MPDU;
-#else
 	wl->pub->global_ampdu->max_pdu = 16;
-#endif
 	pktq_init(&scb->scb_ampdu.txq, AMPDU_MAX_SCB_TID,
 		  AMPDU_MAX_SCB_TID * PKTQ_LEN_DEFAULT);
 
 	sta->ht_cap.ht_supported = true;
-#ifdef WLC_HIGH_ONLY
-	sta->ht_cap.ampdu_factor = AMPDU_RX_FACTOR_16K;
-#else
 	sta->ht_cap.ampdu_factor = AMPDU_RX_FACTOR_64K;
-#endif
 	sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
 	sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
 	    IEEE80211_HT_CAP_SGI_20 |
@@ -668,7 +560,7 @@
 wl_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 	      struct ieee80211_sta *sta)
 {
-	WL_NONE(("%s: Enter\n", __func__));
+	WL_NONE("%s: Enter\n", __func__);
 	return 0;
 }
 
@@ -681,19 +573,19 @@
 #if defined(BCMDBG)
 	struct scb *scb = (struct scb *)sta->drv_priv;
 #endif
-	wl_info_t *wl = hw->priv;
+	struct wl_info *wl = hw->priv;
 
 	ASSERT(scb->magic == SCB_MAGIC);
 	switch (action) {
 	case IEEE80211_AMPDU_RX_START:
-		WL_NONE(("%s: action = IEEE80211_AMPDU_RX_START\n", __func__));
+		WL_NONE("%s: action = IEEE80211_AMPDU_RX_START\n", __func__);
 		break;
 	case IEEE80211_AMPDU_RX_STOP:
-		WL_NONE(("%s: action = IEEE80211_AMPDU_RX_STOP\n", __func__));
+		WL_NONE("%s: action = IEEE80211_AMPDU_RX_STOP\n", __func__);
 		break;
 	case IEEE80211_AMPDU_TX_START:
 		if (!wlc_aggregatable(wl->wlc, tid)) {
-			/* WL_ERROR(("START: tid %d is not agg' able, return FAILURE to stack\n", tid)); */
+			/* WL_ERROR("START: tid %d is not agg' able, return FAILURE to stack\n", tid); */
 			return -1;
 		}
 		/* XXX: Use the starting sequence number provided ... */
@@ -707,22 +599,18 @@
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
 		/* Not sure what to do here */
 		/* Power save wakeup */
-		WL_NONE(("%s: action = IEEE80211_AMPDU_TX_OPERATIONAL\n",
-			 __func__));
+		WL_NONE("%s: action = IEEE80211_AMPDU_TX_OPERATIONAL\n",
+			__func__);
 		break;
 	default:
-		WL_ERROR(("%s: Invalid command, ignoring\n", __func__));
+		WL_ERROR("%s: Invalid command, ignoring\n", __func__);
 	}
 
 	return 0;
 }
 
 static const struct ieee80211_ops wl_ops = {
-#ifdef WLC_HIGH_ONLY
-	.tx = wl_ops_tx_nl,
-#else
 	.tx = wl_ops_tx,
-#endif
 	.start = wl_ops_start,
 	.stop = wl_ops_stop,
 	.add_interface = wl_ops_add_interface,
@@ -744,10 +632,10 @@
 	.ampdu_action = wl_ampdu_action,
 };
 
-static int wl_set_hint(wl_info_t *wl, char *abbrev)
+static int wl_set_hint(struct wl_info *wl, char *abbrev)
 {
-	WL_ERROR(("%s: Sending country code %c%c to MAC80211\n", __func__,
-		  abbrev[0], abbrev[1]));
+	WL_ERROR("%s: Sending country code %c%c to MAC80211\n",
+		 __func__, abbrev[0], abbrev[1]);
 	return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
 }
 
@@ -762,117 +650,61 @@
  * a warning that this function is defined but not used if we declare
  * it as static.
  */
-static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs,
+static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
 			    uint bustype, void *btparam, uint irq)
 {
-	wl_info_t *wl;
-	osl_t *osh;
+	struct wl_info *wl;
+	struct osl_info *osh;
 	int unit, err;
 
 	unsigned long base_addr;
 	struct ieee80211_hw *hw;
 	u8 perm[ETH_ALEN];
 
-	unit = wl_found + instance_base;
+	unit = wl_found;
 	err = 0;
 
 	if (unit < 0) {
-		WL_ERROR(("wl%d: unit number overflow, exiting\n", unit));
+		WL_ERROR("wl%d: unit number overflow, exiting\n", unit);
 		return NULL;
 	}
 
-	if (oneonly && (unit != instance_base)) {
-		WL_ERROR(("wl%d: wl_attach: oneonly is set, exiting\n", unit));
-		return NULL;
-	}
-
-	/* Requires pkttag feature */
-	osh = osl_attach(btparam, bustype, true);
+	osh = osl_attach(btparam, bustype);
 	ASSERT(osh);
 
-#ifdef WLC_HIGH_ONLY
-	hw = ieee80211_alloc_hw(sizeof(wl_info_t), &wl_ops);
-	if (!hw) {
-		WL_ERROR(("%s: ieee80211_alloc_hw failed\n", __func__));
-		ASSERT(0);
-	}
-
-	bzero(hw->priv, sizeof(*wl));
-	wl = hw->priv;
-#else
 	/* allocate private info */
 	hw = pci_get_drvdata(btparam);	/* btparam == pdev */
 	wl = hw->priv;
-#endif
 	ASSERT(wl);
 
-	wl->magic = WL_MAGIC;
 	wl->osh = osh;
 	atomic_set(&wl->callbacks, 0);
 
 	/* setup the bottom half handler */
 	tasklet_init(&wl->tasklet, wl_dpc, (unsigned long) wl);
 
-#ifdef WLC_HIGH_ONLY
-	wl->rpc_th = bcm_rpc_tp_attach(osh, NULL);
-	if (wl->rpc_th == NULL) {
-		WL_ERROR(("wl%d: %s: bcm_rpc_tp_attach failed!\n", unit,
-			  __func__));
-		goto fail;
-	}
 
-	wl->rpc = bcm_rpc_attach(NULL, osh, wl->rpc_th);
-	if (wl->rpc == NULL) {
-		WL_ERROR(("wl%d: %s: bcm_rpc_attach failed!\n", unit,
-			  __func__));
-		goto fail;
-	}
-
-	/* init tx work queue for wl_start/send pkt; no need to destroy workitem  */
-	INIT_WORK(&wl->txq_task.work, (work_func_t) wl_start_txqwork);
-	wl->txq_task.context = wl;
-#endif				/* WLC_HIGH_ONLY */
-
-#ifdef BCMSDIO
-	SET_IEEE80211_DEV(hw, sdiommc_dev);
-#endif
 
 	base_addr = regs;
 
 	if (bustype == PCI_BUS) {
-		/* piomode can be overwritten by command argument */
-		wl->piomode = piomode;
-		WL_TRACE(("PCI/%s\n", wl->piomode ? "PIO" : "DMA"));
+		wl->piomode = false;
 	} else if (bustype == RPC_BUS) {
 		/* Do nothing */
 	} else {
 		bustype = PCI_BUS;
-		WL_TRACE(("force to PCI\n"));
+		WL_TRACE("force to PCI\n");
 	}
 	wl->bcm_bustype = bustype;
 
-#ifdef WLC_HIGH_ONLY
-	if (wl->bcm_bustype == RPC_BUS) {
-		wl->regsva = (void *)0;
-		btparam = wl->rpc;
-	} else
-#endif
 	wl->regsva = ioremap_nocache(base_addr, PCI_BAR0_WINSZ);
 	if (wl->regsva == NULL) {
-		WL_ERROR(("wl%d: ioremap() failed\n", unit));
+		WL_ERROR("wl%d: ioremap() failed\n", unit);
 		goto fail;
 	}
-#ifdef WLC_HIGH_ONLY
-	spin_lock_init(&wl->rpcq_lock);
-	spin_lock_init(&wl->txq_lock);
-
-	sema_init(&wl->sem, 1);
-#else
 	spin_lock_init(&wl->lock);
 	spin_lock_init(&wl->isr_lock);
-#endif
 
-#ifndef WLC_HIGH_ONLY
 	/* prepare ucode */
 	if (wl_request_fw(wl, (struct pci_dev *)btparam)) {
 		printf("%s: Failed to find firmware usually in %s\n",
@@ -881,17 +713,14 @@
 		wl_remove((struct pci_dev *)btparam);
 		goto fail1;
 	}
-#endif
 
 	/* common load-time initialization */
 	wl->wlc = wlc_attach((void *)wl, vendor, device, unit, wl->piomode, osh,
 			     wl->regsva, wl->bcm_bustype, btparam, &err);
-#ifndef WLC_HIGH_ONLY
 	wl_release_fw(wl);
-#endif
 	if (!wl->wlc) {
-		printf("%s: %s wlc_attach() failed with code %d\n",
-			KBUILD_MODNAME, EPI_VERSION_STR, err);
+		printf("%s: wlc_attach() failed with code %d\n",
+			KBUILD_MODNAME, err);
 		goto fail;
 	}
 	wl->pub = wlc_pub(wl->wlc);
@@ -900,52 +729,35 @@
 	ASSERT(wl->pub->ieee_hw);
 	ASSERT(wl->pub->ieee_hw->priv == wl);
 
-#ifdef WLC_HIGH_ONLY
-	REGOPSSET(osh, (osl_rreg_fn_t) wlc_reg_read,
-		  (osl_wreg_fn_t) wlc_reg_write, wl->wlc);
-	wl->rpc_dispatch_ctx.rpc = wl->rpc;
-	wl->rpc_dispatch_ctx.wlc = wl->wlc;
-	bcm_rpc_rxcb_init(wl->rpc, wl, wl_rpc_dispatch_schedule, wl,
-			  wl_rpc_down, NULL, NULL);
-#endif				/* WLC_HIGH_ONLY */
 
-	if (nompc) {
-		if (wlc_iovar_setint(wl->wlc, "mpc", 0)) {
-			WL_ERROR(("wl%d: Error setting MPC variable to 0\n",
-				  unit));
-		}
+	if (wlc_iovar_setint(wl->wlc, "mpc", 0)) {
+		WL_ERROR("wl%d: Error setting MPC variable to 0\n", unit);
 	}
-#ifdef BCMSDIO
-	/* Set SDIO drive strength */
-	wlc_iovar_setint(wl->wlc, "sd_drivestrength", sd_drivestrength);
-#endif
 
-#ifdef WLC_LOW
 	/* register our interrupt handler */
 	if (request_irq(irq, wl_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
-		WL_ERROR(("wl%d: request_irq() failed\n", unit));
+		WL_ERROR("wl%d: request_irq() failed\n", unit);
 		goto fail;
 	}
 	wl->irq = irq;
-#endif				/* WLC_LOW */
 
 	/* register module */
 	wlc_module_register(wl->pub, NULL, "linux", wl, NULL, wl_linux_watchdog,
 			    NULL);
 
 	if (ieee_hw_init(hw)) {
-		WL_ERROR(("wl%d: %s: ieee_hw_init failed!\n", unit, __func__));
+		WL_ERROR("wl%d: %s: ieee_hw_init failed!\n", unit, __func__);
 		goto fail;
 	}
 
-	bcopy(&wl->pub->cur_etheraddr, perm, ETHER_ADDR_LEN);
+	bcopy(&wl->pub->cur_etheraddr, perm, ETH_ALEN);
 	ASSERT(is_valid_ether_addr(perm));
 	SET_IEEE80211_PERM_ADDR(hw, perm);
 
 	err = ieee80211_register_hw(hw);
 	if (err) {
-		WL_ERROR(("%s: ieee80211_register_hw failed, status %d\n",
-			  __func__, err));
+		WL_ERROR("%s: ieee80211_register_hw failed, status %d\n",
+			 __func__, err);
 	}
 
 	if (wl->pub->srom_ccode[0])
@@ -953,19 +765,14 @@
 	else
 		err = wl_set_hint(wl, "US");
 	if (err) {
-		WL_ERROR(("%s: regulatory_hint failed, status %d\n", __func__,
-			  err));
+		WL_ERROR("%s: regulatory_hint failed, status %d\n",
+			 __func__, err);
 	}
-#ifndef WLC_HIGH_ONLY
-	WL_ERROR(("wl%d: Broadcom BCM43xx 802.11 MAC80211 Driver "
-		  EPI_VERSION_STR " (" PHY_VERSION_STR ")", unit));
-#else
-	WL_ERROR(("wl%d: Broadcom BCM43xx 802.11 MAC80211 Driver "
-		  EPI_VERSION_STR, unit));
-#endif
+	WL_ERROR("wl%d: Broadcom BCM43xx 802.11 MAC80211 Driver (" PHY_VERSION_STR ")",
+		 unit);
 
 #ifdef BCMDBG
-	printf(" (Compiled in " SRCBASE " at " __TIME__ " on " __DATE__ ")");
+	printf(" (Compiled at " __TIME__ " on " __DATE__ ")");
 #endif				/* BCMDBG */
 	printf("\n");
 
@@ -978,54 +785,6 @@
 	return NULL;
 }
 
-#ifdef WLC_HIGH_ONLY
-static void *wl_dbus_probe_cb(void *arg, const char *desc, u32 bustype,
-			      u32 hdrlen)
-{
-	wl_info_t *wl;
-	WL_ERROR(("%s:\n", __func__));
-
-	wl = wl_attach(BCM_DNGL_VID, BCM_DNGL_BDC_PID, (unsigned long) NULL, RPC_BUS,
-		NULL, 0);
-	if (!wl) {
-		WL_ERROR(("%s: wl_attach failed\n", __func__));
-	}
-
-	/* This is later passed to wl_dbus_disconnect_cb */
-	return wl;
-}
-
-static void wl_dbus_disconnect_cb(void *arg)
-{
-	wl_info_t *wl = arg;
-
-	WL_ERROR(("%s:\n", __func__));
-
-	if (wl) {
-#ifdef WLC_HIGH_ONLY
-		if (wl->pub->ieee_hw) {
-			ieee80211_unregister_hw(wl->pub->ieee_hw);
-			WL_ERROR(("%s: Back from down\n", __func__));
-		}
-		wlc_device_removed(wl->wlc);
-		wlc_bmac_dngl_reboot(wl->rpc);
-		bcm_rpc_down(wl->rpc);
-#endif
-		WL_LOCK(wl);
-		wl_down(wl);
-		WL_UNLOCK(wl);
-#ifdef WLC_HIGH_ONLY
-		if (wl->pub->ieee_hw) {
-			ieee80211_free_hw(wl->pub->ieee_hw);
-			WL_ERROR(("%s: Back from ieee80211_free_hw\n",
-				  __func__));
-			wl->pub->ieee_hw = NULL;
-		}
-#endif
-		wl_free(wl);
-	}
-}
-#endif				/* WLC_HIGH_ONLY */
 
 
 #define CHAN2GHZ(channel, freqency, chflags)  { \
@@ -1163,29 +922,13 @@
 		   .cap = IEEE80211_HT_CAP_GRN_FLD |
 		   IEEE80211_HT_CAP_SGI_20 |
 		   IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
-#ifdef WLC_HIGH_ONLY
-		   .ht_supported = true,
-		   .ampdu_factor = AMPDU_RX_FACTOR_16K,
-#else
 		   .ht_supported = true,
 		   .ampdu_factor = AMPDU_RX_FACTOR_64K,
-#endif
 		   .ampdu_density = AMPDU_DEF_MPDU_DENSITY,
 		   .mcs = {
 			   /* placeholders for now */
-#ifdef WLC_HIGH_ONLY
-			   /*
-			    * rx_mask[0] = 0xff by default
-			    * rx_mask[1] = 0xff if number of rx chain >=2
-			    * rx_mask[2] = 0xff if number of rx chain >=3
-			    * rx_mask[4] = 1 if 40Mhz is supported
-			    */
-			   .rx_mask = {0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-			   .rx_highest = 72,	/* max rate of single stream */
-#else
 			   .rx_mask = {0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0},
 			   .rx_highest = 500,
-#endif
 			   .tx_params = IEEE80211_HT_MCS_TX_DEFINED}
 		   }
 };
@@ -1212,7 +955,7 @@
 
 static int ieee_hw_rate_init(struct ieee80211_hw *hw)
 {
-	wl_info_t *wl = HW_TO_WL(hw);
+	struct wl_info *wl = HW_TO_WL(hw);
 	int has_5g;
 	char phy_list[4];
 
@@ -1222,20 +965,16 @@
 	hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
 
 	if (wlc_get(wl->wlc, WLC_GET_PHYLIST, (int *)&phy_list) < 0) {
-		WL_ERROR(("Phy list failed\n"));
+		WL_ERROR("Phy list failed\n");
 	}
-	WL_NONE(("%s: phylist = %c\n", __func__, phy_list[0]));
+	WL_NONE("%s: phylist = %c\n", __func__, phy_list[0]);
 
-#ifndef WLC_HIGH_ONLY
 	if (phy_list[0] == 'n' || phy_list[0] == 'c') {
 		if (phy_list[0] == 'c') {
 			/* Single stream */
 			wl_band_2GHz_nphy.ht_cap.mcs.rx_mask[1] = 0;
 			wl_band_2GHz_nphy.ht_cap.mcs.rx_highest = 72;
 		}
-#else
-	if (phy_list[0] == 's') {
-#endif
 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl_band_2GHz_nphy;
 	} else {
 		BUG();
@@ -1245,11 +984,7 @@
 	/* Assume all bands use the same phy.  True for 11n devices. */
 	if (NBANDS_PUB(wl->pub) > 1) {
 		has_5g++;
-#ifndef WLC_HIGH_ONLY
 		if (phy_list[0] == 'n' || phy_list[0] == 'c') {
-#else
-		if (phy_list[0] == 's') {
-#endif
 			hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
 			    &wl_band_5GHz_nphy;
 		} else {
@@ -1257,7 +992,7 @@
 		}
 	}
 
-	WL_NONE(("%s: 2ghz = %d, 5ghz = %d\n", __func__, 1, has_5g));
+	WL_NONE("%s: 2ghz = %d, 5ghz = %d\n", __func__, 1, has_5g);
 
 	return 0;
 }
@@ -1288,7 +1023,6 @@
 	return ieee_hw_rate_init(hw);
 }
 
-#ifndef BCMSDIO
 /**
  * determines if a device is a WL device, and if so, attaches it.
  *
@@ -1300,15 +1034,15 @@
 wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	int rc;
-	wl_info_t *wl;
+	struct wl_info *wl;
 	struct ieee80211_hw *hw;
 	u32 val;
 
 	ASSERT(pdev);
 
-	WL_TRACE(("%s: bus %d slot %d func %d irq %d\n", __func__,
-		  pdev->bus->number, PCI_SLOT(pdev->devfn),
-		  PCI_FUNC(pdev->devfn), pdev->irq));
+	WL_TRACE("%s: bus %d slot %d func %d irq %d\n",
+		 __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn), pdev->irq);
 
 	if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
 	    (((pdev->device & 0xff00) != 0x4300) &&
@@ -1318,9 +1052,9 @@
 
 	rc = pci_enable_device(pdev);
 	if (rc) {
-		WL_ERROR(("%s: Cannot enable device %d-%d_%d\n", __func__,
-			  pdev->bus->number, PCI_SLOT(pdev->devfn),
-			  PCI_FUNC(pdev->devfn)));
+		WL_ERROR("%s: Cannot enable device %d-%d_%d\n",
+			 __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
+			 PCI_FUNC(pdev->devfn));
 		return -ENODEV;
 	}
 	pci_set_master(pdev);
@@ -1329,9 +1063,9 @@
 	if ((val & 0x0000ff00) != 0)
 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-	hw = ieee80211_alloc_hw(sizeof(wl_info_t), &wl_ops);
+	hw = ieee80211_alloc_hw(sizeof(struct wl_info), &wl_ops);
 	if (!hw) {
-		WL_ERROR(("%s: ieee80211_alloc_hw failed\n", __func__));
+		WL_ERROR("%s: ieee80211_alloc_hw failed\n", __func__);
 		rc = -ENOMEM;
 		goto err_1;
 	}
@@ -1340,34 +1074,34 @@
 
 	pci_set_drvdata(pdev, hw);
 
-	bzero(hw->priv, sizeof(*wl));
+	memset(hw->priv, 0, sizeof(*wl));
 
 	wl = wl_attach(pdev->vendor, pdev->device, pci_resource_start(pdev, 0),
 		       PCI_BUS, pdev, pdev->irq);
 
 	if (!wl) {
-		WL_ERROR(("%s: %s: wl_attach failed!\n",
-			KBUILD_MODNAME, __func__));
+		WL_ERROR("%s: %s: wl_attach failed!\n",
+			 KBUILD_MODNAME, __func__);
 		return -ENODEV;
 	}
 	return 0;
  err_1:
-	WL_ERROR(("%s: err_1: Major hoarkage\n", __func__));
+	WL_ERROR("%s: err_1: Major hoarkage\n", __func__);
 	return 0;
 }
 
 #ifdef LINUXSTA_PS
 static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
 {
-	wl_info_t *wl;
+	struct wl_info *wl;
 	struct ieee80211_hw *hw;
 
-	WL_TRACE(("wl: wl_suspend\n"));
+	WL_TRACE("wl: wl_suspend\n");
 
 	hw = pci_get_drvdata(pdev);
 	wl = HW_TO_WL(hw);
 	if (!wl) {
-		WL_ERROR(("wl: wl_suspend: pci_get_drvdata failed\n"));
+		WL_ERROR("wl: wl_suspend: pci_get_drvdata failed\n");
 		return -ENODEV;
 	}
 
@@ -1382,16 +1116,16 @@
 
 static int wl_resume(struct pci_dev *pdev)
 {
-	wl_info_t *wl;
+	struct wl_info *wl;
 	struct ieee80211_hw *hw;
 	int err = 0;
 	u32 val;
 
-	WL_TRACE(("wl: wl_resume\n"));
+	WL_TRACE("wl: wl_resume\n");
 	hw = pci_get_drvdata(pdev);
 	wl = HW_TO_WL(hw);
 	if (!wl) {
-		WL_ERROR(("wl: wl_resume: pci_get_drvdata failed\n"));
+		WL_ERROR("wl: wl_resume: pci_get_drvdata failed\n");
 		return -ENODEV;
 	}
 
@@ -1421,17 +1155,17 @@
 
 static void wl_remove(struct pci_dev *pdev)
 {
-	wl_info_t *wl;
+	struct wl_info *wl;
 	struct ieee80211_hw *hw;
 
 	hw = pci_get_drvdata(pdev);
 	wl = HW_TO_WL(hw);
 	if (!wl) {
-		WL_ERROR(("wl: wl_remove: pci_get_drvdata failed\n"));
+		WL_ERROR("wl: wl_remove: pci_get_drvdata failed\n");
 		return;
 	}
 	if (!wlc_chipmatch(pdev->vendor, pdev->device)) {
-		WL_ERROR(("wl: wl_remove: wlc_chipmatch failed\n"));
+		WL_ERROR("wl: wl_remove: wlc_chipmatch failed\n");
 		return;
 	}
 	if (wl->wlc) {
@@ -1439,7 +1173,7 @@
 		WL_LOCK(wl);
 		wl_down(wl);
 		WL_UNLOCK(wl);
-		WL_NONE(("%s: Down\n", __func__));
+		WL_NONE("%s: Down\n", __func__);
 	}
 	pci_disable_device(pdev);
 
@@ -1459,7 +1193,6 @@
  .remove   = __devexit_p(wl_remove),
  .id_table = wl_id_table,
 };
-#endif				/* !BCMSDIO */
 
 /**
  * This is the main entry point for the WL driver.
@@ -1480,7 +1213,6 @@
 		if (var)
 			wl_msg_level = simple_strtoul(var, NULL, 0);
 	}
-#ifndef WLC_HIGH_ONLY
 	{
 		extern u32 phyhal_msg_level;
 
@@ -1492,25 +1224,13 @@
 				phyhal_msg_level = simple_strtoul(var, NULL, 0);
 		}
 	}
-#endif				/* WLC_HIGH_ONLY */
 #endif				/* BCMDBG */
 
-#ifndef BCMSDIO
 	error = pci_register_driver(&wl_pci_driver);
 	if (!error)
 		return 0;
 
-#endif				/* !BCMSDIO */
 
-#ifdef WLC_HIGH_ONLY
-	/* BMAC_NOTE: define hardcode number, why NODEVICE is ok ? */
-	error =
-	    dbus_register(BCM_DNGL_VID, 0, wl_dbus_probe_cb,
-			  wl_dbus_disconnect_cb, NULL, NULL, NULL);
-	if (error == DBUS_ERR_NODEVICE) {
-		error = DBUS_OK;
-	}
-#endif				/* WLC_HIGH_ONLY */
 
 	return error;
 }
@@ -1524,13 +1244,8 @@
  */
 static void __exit wl_module_exit(void)
 {
-#ifndef BCMSDIO
 	pci_unregister_driver(&wl_pci_driver);
-#endif				/* !BCMSDIO */
 
-#ifdef WLC_HIGH_ONLY
-	dbus_deregister();
-#endif				/* WLC_HIGH_ONLY */
 }
 
 module_init(wl_module_init);
@@ -1543,19 +1258,17 @@
  * by the wl parameter.
  *
  */
-void wl_free(wl_info_t *wl)
+void wl_free(struct wl_info *wl)
 {
 	wl_timer_t *t, *next;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	ASSERT(wl);
-#ifndef WLC_HIGH_ONLY
 	/* free ucode data */
 	if (wl->fw.fw_cnt)
 		wl_ucode_data_free();
 	if (wl->irq)
 		free_irq(wl->irq, wl);
-#endif
 
 	/* kill dpc */
 	tasklet_kill(&wl->tasklet);
@@ -1593,103 +1306,50 @@
 	 * unregister_netdev() calls get_stats() which may read chip registers
 	 * so we cannot unmap the chip registers until after calling unregister_netdev() .
 	 */
-	if (wl->regsva && BUSTYPE(wl->bcm_bustype) != SDIO_BUS &&
-	    BUSTYPE(wl->bcm_bustype) != JTAG_BUS) {
+	if (wl->regsva && wl->bcm_bustype != SDIO_BUS &&
+	    wl->bcm_bustype != JTAG_BUS) {
 		iounmap((void *)wl->regsva);
 	}
 	wl->regsva = NULL;
 
-#ifdef WLC_HIGH_ONLY
-	wl_rpcq_free(wl);
-
-	wl_txq_free(wl);
-
-	if (wl->rpc) {
-		bcm_rpc_detach(wl->rpc);
-		wl->rpc = NULL;
-	}
-
-	if (wl->rpc_th) {
-		bcm_rpc_tp_detach(wl->rpc_th);
-		wl->rpc_th = NULL;
-	}
-#endif				/* WLC_HIGH_ONLY */
 
 	osl_detach(osh);
 }
 
-#ifdef WLC_LOW
 /* transmit a packet */
-static int BCMFASTPATH wl_start(struct sk_buff *skb, wl_info_t *wl)
+static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl)
 {
 	if (!wl)
 		return -ENETDOWN;
 
 	return wl_start_int(wl, WL_TO_HW(wl), skb);
 }
-#endif				/* WLC_LOW */
 
 static int BCMFASTPATH
-wl_start_int(wl_info_t *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
+wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-#ifdef WLC_HIGH_ONLY
-	WL_LOCK(wl);
-#endif
 	wlc_sendpkt_mac80211(wl->wlc, skb, hw);
-#ifdef WLC_HIGH_ONLY
-	WL_UNLOCK(wl);
-#endif
 	return NETDEV_TX_OK;
 }
 
-void wl_txflowcontrol(wl_info_t *wl, struct wl_if *wlif, bool state, int prio)
+void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
+		      int prio)
 {
-	WL_ERROR(("Shouldn't be here %s\n", __func__));
+	WL_ERROR("Shouldn't be here %s\n", __func__);
 }
 
-#if defined(WLC_HIGH_ONLY)
-/* Schedule a completion handler to run at safe time */
-static int
-wl_schedule_task(wl_info_t *wl, void (*fn) (struct wl_task *task),
-		 void *context)
+void wl_init(struct wl_info *wl)
 {
-	wl_task_t *task;
-
-	WL_TRACE(("wl%d: wl_schedule_task\n", wl->pub->unit));
-
-	task = kmalloc(sizeof(wl_task_t), GFP_ATOMIC);
-	if (!task) {
-		WL_ERROR(("wl%d: wl_schedule_task: out of memory\n", wl->pub->unit));
-		return -ENOMEM;
-	}
-
-	INIT_WORK(&task->work, (work_func_t) fn);
-	task->context = context;
-
-	if (!schedule_work(&task->work)) {
-		WL_ERROR(("wl%d: schedule_work() failed\n", wl->pub->unit));
-		kfree(task);
-		return -ENOMEM;
-	}
-
-	atomic_inc(&wl->callbacks);
-
-	return 0;
-}
-#endif				/* defined(WLC_HIGH_ONLY) */
-
-void wl_init(wl_info_t *wl)
-{
-	WL_TRACE(("wl%d: wl_init\n", wl->pub->unit));
+	WL_TRACE("wl%d: wl_init\n", wl->pub->unit);
 
 	wl_reset(wl);
 
 	wlc_init(wl->wlc);
 }
 
-uint wl_reset(wl_info_t *wl)
+uint wl_reset(struct wl_info *wl)
 {
-	WL_TRACE(("wl%d: wl_reset\n", wl->pub->unit));
+	WL_TRACE("wl%d: wl_reset\n", wl->pub->unit);
 
 	wlc_reset(wl->wlc);
 
@@ -1703,25 +1363,22 @@
  * These are interrupt on/off entry points. Disable interrupts
  * during interrupt state transition.
  */
-void BCMFASTPATH wl_intrson(wl_info_t *wl)
+void BCMFASTPATH wl_intrson(struct wl_info *wl)
 {
-#if defined(WLC_LOW)
 	unsigned long flags;
 
 	INT_LOCK(wl, flags);
 	wlc_intrson(wl->wlc);
 	INT_UNLOCK(wl, flags);
-#endif				/* WLC_LOW */
 }
 
-bool wl_alloc_dma_resources(wl_info_t *wl, uint addrwidth)
+bool wl_alloc_dma_resources(struct wl_info *wl, uint addrwidth)
 {
 	return true;
 }
 
-u32 BCMFASTPATH wl_intrsoff(wl_info_t *wl)
+u32 BCMFASTPATH wl_intrsoff(struct wl_info *wl)
 {
-#if defined(WLC_LOW)
 	unsigned long flags;
 	u32 status;
 
@@ -1729,23 +1386,18 @@
 	status = wlc_intrsoff(wl->wlc);
 	INT_UNLOCK(wl, flags);
 	return status;
-#else
-	return 0;
-#endif				/* WLC_LOW */
 }
 
-void wl_intrsrestore(wl_info_t *wl, u32 macintmask)
+void wl_intrsrestore(struct wl_info *wl, u32 macintmask)
 {
-#if defined(WLC_LOW)
 	unsigned long flags;
 
 	INT_LOCK(wl, flags);
 	wlc_intrsrestore(wl->wlc, macintmask);
 	INT_UNLOCK(wl, flags);
-#endif				/* WLC_LOW */
 }
 
-int wl_up(wl_info_t *wl)
+int wl_up(struct wl_info *wl)
 {
 	int error = 0;
 
@@ -1757,7 +1409,7 @@
 	return error;
 }
 
-void wl_down(wl_info_t *wl)
+void wl_down(struct wl_info *wl)
 {
 	uint callbacks, ret_val = 0;
 
@@ -1768,24 +1420,21 @@
 	/* wait for down callbacks to complete */
 	WL_UNLOCK(wl);
 
-#ifndef WLC_HIGH_ONLY
 	/* For HIGH_only driver, it's important to actually schedule other work,
 	 * not just spin wait since everything runs at schedule level
 	 */
 	SPINWAIT((atomic_read(&wl->callbacks) > callbacks), 100 * 1000);
-#endif				/* WLC_HIGH_ONLY */
 
 	WL_LOCK(wl);
 }
 
 irqreturn_t BCMFASTPATH wl_isr(int irq, void *dev_id)
 {
-#if defined(WLC_LOW)
-	wl_info_t *wl;
+	struct wl_info *wl;
 	bool ours, wantdpc;
 	unsigned long flags;
 
-	wl = (wl_info_t *) dev_id;
+	wl = (struct wl_info *) dev_id;
 
 	WL_ISRLOCK(wl, flags);
 
@@ -1805,17 +1454,13 @@
 	WL_ISRUNLOCK(wl, flags);
 
 	return IRQ_RETVAL(ours);
-#else
-	return IRQ_RETVAL(0);
-#endif				/* WLC_LOW */
 }
 
 static void BCMFASTPATH wl_dpc(unsigned long data)
 {
-#ifdef WLC_LOW
-	wl_info_t *wl;
+	struct wl_info *wl;
 
-	wl = (wl_info_t *) data;
+	wl = (struct wl_info *) data;
 
 	WL_LOCK(wl);
 
@@ -1846,20 +1491,19 @@
 
  done:
 	WL_UNLOCK(wl);
-#endif				/* WLC_LOW */
 }
 
-static void wl_link_up(wl_info_t *wl, char *ifname)
+static void wl_link_up(struct wl_info *wl, char *ifname)
 {
-	WL_ERROR(("wl%d: link up (%s)\n", wl->pub->unit, ifname));
+	WL_ERROR("wl%d: link up (%s)\n", wl->pub->unit, ifname);
 }
 
-static void wl_link_down(wl_info_t *wl, char *ifname)
+static void wl_link_down(struct wl_info *wl, char *ifname)
 {
-	WL_ERROR(("wl%d: link down (%s)\n", wl->pub->unit, ifname));
+	WL_ERROR("wl%d: link down (%s)\n", wl->pub->unit, ifname);
 }
 
-void wl_event(wl_info_t *wl, char *ifname, wlc_event_t *e)
+void wl_event(struct wl_info *wl, char *ifname, wlc_event_t *e)
 {
 
 	switch (e->event.event_type) {
@@ -1877,12 +1521,7 @@
 
 static void wl_timer(unsigned long data)
 {
-#ifndef WLC_HIGH_ONLY
 	_wl_timer((wl_timer_t *) data);
-#else
-	wl_timer_t *t = (wl_timer_t *) data;
-	wl_schedule_task(t->wl, wl_timer_task, t);
-#endif				/* WLC_HIGH_ONLY */
 }
 
 static void _wl_timer(wl_timer_t *t)
@@ -1906,18 +1545,18 @@
 	WL_UNLOCK(t->wl);
 }
 
-wl_timer_t *wl_init_timer(wl_info_t *wl, void (*fn) (void *arg), void *arg,
+wl_timer_t *wl_init_timer(struct wl_info *wl, void (*fn) (void *arg), void *arg,
 			  const char *name)
 {
 	wl_timer_t *t;
 
 	t = kmalloc(sizeof(wl_timer_t), GFP_ATOMIC);
 	if (!t) {
-		WL_ERROR(("wl%d: wl_init_timer: out of memory\n", wl->pub->unit));
+		WL_ERROR("wl%d: wl_init_timer: out of memory\n", wl->pub->unit);
 		return 0;
 	}
 
-	bzero(t, sizeof(wl_timer_t));
+	memset(t, 0, sizeof(wl_timer_t));
 
 	init_timer(&t->timer);
 	t->timer.data = (unsigned long) t;
@@ -1940,12 +1579,12 @@
 /* BMAC_NOTE: Add timer adds only the kernel timer since it's going to be more accurate
  * as well as it's easier to make it periodic
  */
-void wl_add_timer(wl_info_t *wl, wl_timer_t *t, uint ms, int periodic)
+void wl_add_timer(struct wl_info *wl, wl_timer_t *t, uint ms, int periodic)
 {
 #ifdef BCMDBG
 	if (t->set) {
-		WL_ERROR(("%s: Already set. Name: %s, per %d\n",
-			  __func__, t->name, periodic));
+		WL_ERROR("%s: Already set. Name: %s, per %d\n",
+			 __func__, t->name, periodic);
 	}
 #endif
 	ASSERT(!t->set);
@@ -1960,7 +1599,7 @@
 }
 
 /* return true if timer successfully deleted, false if still pending */
-bool wl_del_timer(wl_info_t *wl, wl_timer_t *t)
+bool wl_del_timer(struct wl_info *wl, wl_timer_t *t)
 {
 	if (t->set) {
 		t->set = false;
@@ -1973,7 +1612,7 @@
 	return true;
 }
 
-void wl_free_timer(wl_info_t *wl, wl_timer_t *t)
+void wl_free_timer(struct wl_info *wl, wl_timer_t *t)
 {
 	wl_timer_t *tmp;
 
@@ -2009,7 +1648,7 @@
 
 static int wl_linux_watchdog(void *ctx)
 {
-	wl_info_t *wl = (wl_info_t *) ctx;
+	struct wl_info *wl = (struct wl_info *) ctx;
 	struct net_device_stats *stats = NULL;
 	uint id;
 	/* refresh stats */
@@ -2049,233 +1688,12 @@
 	u32 idx;
 };
 
-#ifdef WLC_HIGH_ONLY
-static void wl_rpc_down(void *wlh)
-{
-	wl_info_t *wl = (wl_info_t *) (wlh);
-
-	wlc_device_removed(wl->wlc);
-
-	wl_rpcq_free(wl);
-}
-
-static int BCMFASTPATH wl_start(struct sk_buff *skb, wl_info_t *wl)
-{
-
-	unsigned long flags;
-
-	skb->prev = NULL;
-
-	/* Lock the queue as tasklet could be running at this time */
-	TXQ_LOCK(wl, flags);
-	if (wl->txq_head == NULL)
-		wl->txq_head = skb;
-	else {
-		wl->txq_tail->prev = skb;
-	}
-	wl->txq_tail = skb;
-
-	if (wl->txq_dispatched == false) {
-		wl->txq_dispatched = true;
-
-		if (schedule_work(&wl->txq_task.work)) {
-			atomic_inc(&wl->callbacks);
-		} else {
-			WL_ERROR(("wl%d: wl_start/schedule_work failed\n",
-				  wl->pub->unit));
-		}
-	}
-
-	TXQ_UNLOCK(wl, flags);
-
-	return 0;
-
-}
-
-static void wl_start_txqwork(struct wl_task *task)
-{
-	wl_info_t *wl = (wl_info_t *) task->context;
-	struct sk_buff *skb;
-	unsigned long flags;
-	uint count = 0;
-
-	WL_TRACE(("wl%d: wl_start_txqwork\n", wl->pub->unit));
-
-	/* First remove an entry then go for execution */
-	TXQ_LOCK(wl, flags);
-	while (wl->txq_head) {
-		skb = wl->txq_head;
-		wl->txq_head = skb->prev;
-		skb->prev = NULL;
-		if (wl->txq_head == NULL)
-			wl->txq_tail = NULL;
-		TXQ_UNLOCK(wl, flags);
-
-		/* it has WL_LOCK/WL_UNLOCK inside */
-		wl_start_int(wl, WL_TO_HW(wl), skb);
-
-		/* bounded our execution, reshedule ourself next */
-		if (++count >= 10)
-			break;
-
-		TXQ_LOCK(wl, flags);
-	}
-
-	if (count >= 10) {
-		if (!schedule_work(&wl->txq_task.work)) {
-			WL_ERROR(("wl%d: wl_start/schedule_work failed\n",
-				  wl->pub->unit));
-			atomic_dec(&wl->callbacks);
-		}
-	} else {
-		wl->txq_dispatched = false;
-		TXQ_UNLOCK(wl, flags);
-		atomic_dec(&wl->callbacks);
-	}
-
-	return;
-}
-
-static void wl_txq_free(wl_info_t *wl)
-{
-	struct sk_buff *skb;
-
-	if (wl->txq_head == NULL) {
-		ASSERT(wl->txq_tail == NULL);
-		return;
-	}
-
-	while (wl->txq_head) {
-		skb = wl->txq_head;
-		wl->txq_head = skb->prev;
-		PKTFREE(wl->osh, skb, true);
-	}
-
-	wl->txq_tail = NULL;
-}
-
-static void wl_rpcq_free(wl_info_t *wl)
-{
-	rpc_buf_t *buf;
-
-	if (wl->rpcq_head == NULL) {
-		ASSERT(wl->rpcq_tail == NULL);
-		return;
-	}
-
-	while (wl->rpcq_head) {
-		buf = wl->rpcq_head;
-		wl->rpcq_head = bcm_rpc_buf_next_get(wl->rpc_th, buf);
-		bcm_rpc_buf_free(wl->rpc_dispatch_ctx.rpc, buf);
-	}
-
-	wl->rpcq_tail = NULL;
-}
-
-static void wl_rpcq_dispatch(struct wl_task *task)
-{
-	wl_info_t *wl = (wl_info_t *) task->context;
-	rpc_buf_t *buf;
-	unsigned long flags;
-
-	/* First remove an entry then go for execution */
-	RPCQ_LOCK(wl, flags);
-	while (wl->rpcq_head) {
-		buf = wl->rpcq_head;
-		wl->rpcq_head = bcm_rpc_buf_next_get(wl->rpc_th, buf);
-
-		if (wl->rpcq_head == NULL)
-			wl->rpcq_tail = NULL;
-		RPCQ_UNLOCK(wl, flags);
-
-		WL_LOCK(wl);
-		wlc_rpc_high_dispatch(&wl->rpc_dispatch_ctx, buf);
-		WL_UNLOCK(wl);
-
-		RPCQ_LOCK(wl, flags);
-	}
-
-	wl->rpcq_dispatched = false;
-
-	RPCQ_UNLOCK(wl, flags);
-
-	kfree(task);
-	atomic_dec(&wl->callbacks);
-}
-
-static void wl_rpcq_add(wl_info_t *wl, rpc_buf_t *buf)
-{
-	unsigned long flags;
-
-	bcm_rpc_buf_next_set(wl->rpc_th, buf, NULL);
-
-	/* Lock the queue as tasklet could be running at this time */
-	RPCQ_LOCK(wl, flags);
-	if (wl->rpcq_head == NULL)
-		wl->rpcq_head = buf;
-	else
-		bcm_rpc_buf_next_set(wl->rpc_th, wl->rpcq_tail, buf);
-
-	wl->rpcq_tail = buf;
-
-	if (wl->rpcq_dispatched == false) {
-		wl->rpcq_dispatched = true;
-		wl_schedule_task(wl, wl_rpcq_dispatch, wl);
-	}
-
-	RPCQ_UNLOCK(wl, flags);
-}
-
-#if defined(BCMDBG)
-static const struct name_entry rpc_name_tbl[] = RPC_ID_TABLE;
-#endif				/* BCMDBG */
-
-/* dongle-side rpc dispatch routine */
-static void wl_rpc_dispatch_schedule(void *ctx, struct rpc_buf *buf)
-{
-	bcm_xdr_buf_t b;
-	wl_info_t *wl = (wl_info_t *) ctx;
-	wlc_rpc_id_t rpc_id;
-	int err;
-
-	bcm_xdr_buf_init(&b, bcm_rpc_buf_data(wl->rpc_th, buf),
-			 bcm_rpc_buf_len_get(wl->rpc_th, buf));
-
-	err = bcm_xdr_unpack_u32(&b, &rpc_id);
-	ASSERT(!err);
-	WL_TRACE(("%s: Dispatch id %s\n", __func__,
-		  WLC_RPC_ID_LOOKUP(rpc_name_tbl, rpc_id)));
-
-	/* Handle few emergency ones */
-	switch (rpc_id) {
-	default:
-		wl_rpcq_add(wl, buf);
-		break;
-	}
-}
-
-static void wl_timer_task(wl_task_t *task)
-{
-	wl_timer_t *t = (wl_timer_t *) task->context;
-
-	_wl_timer(t);
-	kfree(task);
-
-	/* This dec is for the task_schedule. The timer related
-	 * callback is decremented in _wl_timer
-	 */
-	atomic_dec(&t->wl->callbacks);
-}
-#endif				/* WLC_HIGH_ONLY */
-
-#ifndef WLC_HIGH_ONLY
 char *wl_firmwares[WL_MAX_FW] = {
 	"brcm/bcm43xx",
 	NULL
 };
 
-#ifdef WLC_LOW
-int wl_ucode_init_buf(wl_info_t *wl, void **pbuf, u32 idx)
+int wl_ucode_init_buf(struct wl_info *wl, void **pbuf, u32 idx)
 {
 	int i, entry;
 	const u8 *pdata;
@@ -2301,7 +1719,7 @@
 	return -1;
 }
 
-int wl_ucode_init_uint(wl_info_t *wl, u32 *data, u32 idx)
+int wl_ucode_init_uint(struct wl_info *wl, u32 *data, u32 idx)
 {
 	int i, entry;
 	const u8 *pdata;
@@ -2321,22 +1739,21 @@
 	printf("ERROR: ucode tag:%d can not be found!\n", idx);
 	return -1;
 }
-#endif				/* WLC_LOW */
 
-static int wl_request_fw(wl_info_t *wl, struct pci_dev *pdev)
+static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev)
 {
 	int status;
 	struct device *device = &pdev->dev;
 	char fw_name[100];
 	int i;
 
-	bzero((void *)&wl->fw, sizeof(struct wl_firmware));
+	memset((void *)&wl->fw, 0, sizeof(struct wl_firmware));
 	for (i = 0; i < WL_MAX_FW; i++) {
 		if (wl_firmwares[i] == NULL)
 			break;
 		sprintf(fw_name, "%s-%d.fw", wl_firmwares[i],
 			UCODE_LOADER_API_VER);
-		WL_NONE(("request fw %s\n", fw_name));
+		WL_NONE("request fw %s\n", fw_name);
 		status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
 		if (status) {
 			printf("%s: fail to load firmware %s\n",
@@ -2344,7 +1761,7 @@
 			wl_release_fw(wl);
 			return status;
 		}
-		WL_NONE(("request fw %s\n", fw_name));
+		WL_NONE("request fw %s\n", fw_name);
 		sprintf(fw_name, "%s_hdr-%d.fw", wl_firmwares[i],
 			UCODE_LOADER_API_VER);
 		status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
@@ -2356,22 +1773,19 @@
 		}
 		wl->fw.hdr_num_entries[i] =
 		    wl->fw.fw_hdr[i]->size / (sizeof(struct wl_fw_hdr));
-		WL_NONE(("request fw %s find: %d entries\n", fw_name,
-			 wl->fw.hdr_num_entries[i]));
+		WL_NONE("request fw %s find: %d entries\n",
+			fw_name, wl->fw.hdr_num_entries[i]);
 	}
 	wl->fw.fw_cnt = i;
-	wl_ucode_data_init(wl);
-	return 0;
+	return wl_ucode_data_init(wl);
 }
 
-#ifdef WLC_LOW
 void wl_ucode_free_buf(void *p)
 {
 	kfree(p);
 }
-#endif				/* WLC_LOW */
 
-static void wl_release_fw(wl_info_t *wl)
+static void wl_release_fw(struct wl_info *wl)
 {
 	int i;
 	for (i = 0; i < WL_MAX_FW; i++) {
@@ -2379,4 +1793,54 @@
 		release_firmware(wl->fw.fw_hdr[i]);
 	}
 }
-#endif				/* WLC_HIGH_ONLY */
+
+
+/*
+ * checks validity of all firmware images loaded from user space
+ */
+int wl_check_firmwares(struct wl_info *wl)
+{
+	int i;
+	int entry;
+	int rc = 0;
+	const struct firmware *fw;
+	const struct firmware *fw_hdr;
+	struct wl_fw_hdr *ucode_hdr;
+	for (i = 0; i < WL_MAX_FW && rc == 0; i++) {
+		fw =  wl->fw.fw_bin[i];
+		fw_hdr = wl->fw.fw_hdr[i];
+		if (fw == NULL && fw_hdr == NULL) {
+			break;
+		} else if (fw == NULL || fw_hdr == NULL) {
+			WL_ERROR("%s: invalid bin/hdr fw\n", __func__);
+			rc = -EBADF;
+		} else if (fw_hdr->size % sizeof(struct wl_fw_hdr)) {
+			WL_ERROR("%s: non integral fw hdr file size %d/%zu\n",
+				 __func__, fw_hdr->size,
+				 sizeof(struct wl_fw_hdr));
+			rc = -EBADF;
+		} else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
+			WL_ERROR("%s: out of bounds fw file size %d\n",
+				 __func__, fw->size);
+			rc = -EBADF;
+		} else {
+			/* check if ucode section overruns firmware image */
+			ucode_hdr = (struct wl_fw_hdr *)fw_hdr->data;
+			for (entry = 0; entry < wl->fw.hdr_num_entries[i] && rc;
+			     entry++, ucode_hdr++) {
+				if (ucode_hdr->offset + ucode_hdr->len >
+				    fw->size) {
+					WL_ERROR("%s: conflicting bin/hdr\n",
+						 __func__);
+					rc = -EBADF;
+				}
+			}
+		}
+	}
+	if (rc == 0 && wl->fw.fw_cnt != i) {
+		WL_ERROR("%s: invalid fw_cnt=%d\n", __func__, wl->fw.fw_cnt);
+		rc = -EBADF;
+	}
+	return rc;
+}
+
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.h b/drivers/staging/brcm80211/sys/wl_mac80211.h
index 78cee44..bb39b77 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.h
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.h
@@ -60,58 +60,32 @@
 };
 
 struct wl_info {
-	wlc_pub_t *pub;		/* pointer to public wlc state */
+	struct wlc_pub *pub;		/* pointer to public wlc state */
 	void *wlc;		/* pointer to private common os-independent data */
-	osl_t *osh;		/* pointer to os handler */
+	struct osl_info *osh;		/* pointer to os handler */
 	u32 magic;
 
 	int irq;
 
-#ifdef WLC_HIGH_ONLY
-	struct semaphore sem;	/* use semaphore to allow sleep */
-#else
 	spinlock_t lock;	/* per-device perimeter lock */
 	spinlock_t isr_lock;	/* per-device ISR synchronization lock */
-#endif
 	uint bcm_bustype;	/* bus type */
 	bool piomode;		/* set from insmod argument */
 	void *regsva;		/* opaque chip registers virtual address */
 	atomic_t callbacks;	/* # outstanding callback functions */
 	struct wl_timer *timers;	/* timer cleanup queue */
 	struct tasklet_struct tasklet;	/* dpc tasklet */
-#ifdef BCMSDIO
-	bcmsdh_info_t *sdh;	/* pointer to sdio bus handler */
-	unsigned long flags;		/* current irq flags */
-#endif				/* BCMSDIO */
 	bool resched;		/* dpc needs to be and is rescheduled */
 #ifdef LINUXSTA_PS
 	u32 pci_psstate[16];	/* pci ps-state save/restore */
 #endif
 	/* RPC, handle, lock, txq, workitem */
-#ifdef WLC_HIGH_ONLY
-	rpc_info_t *rpc;	/* RPC handle */
-	rpc_tp_info_t *rpc_th;	/* RPC transport handle */
-	wlc_rpc_ctx_t rpc_dispatch_ctx;
-
-	bool rpcq_dispatched;	/* Avoid scheduling multiple tasks */
-	spinlock_t rpcq_lock;	/* Lock for the queue */
-	rpc_buf_t *rpcq_head;	/* RPC Q */
-	rpc_buf_t *rpcq_tail;	/* Points to the last buf */
-
-	bool txq_dispatched;	/* Avoid scheduling multiple tasks */
-	spinlock_t txq_lock;	/* Lock for the queue */
-	struct sk_buff *txq_head;	/* TX Q */
-	struct sk_buff *txq_tail;	/* Points to the last buf */
-
-	wl_task_t txq_task;	/* work queue for wl_start() */
-#endif				/* WLC_HIGH_ONLY */
 	uint stats_id;		/* the current set of stats */
 	/* ping-pong stats counters updated by Linux watchdog */
 	struct net_device_stats stats_watchdog[2];
 	struct wl_firmware fw;
 };
 
-#ifndef WLC_HIGH_ONLY
 #define WL_LOCK(wl)	spin_lock_bh(&(wl)->lock)
 #define WL_UNLOCK(wl)	spin_unlock_bh(&(wl)->lock)
 
@@ -122,17 +96,6 @@
 /* locking under WL_LOCK() to synchronize with wl_isr */
 #define INT_LOCK(wl, flags)	spin_lock_irqsave(&(wl)->isr_lock, flags)
 #define INT_UNLOCK(wl, flags)	spin_unlock_irqrestore(&(wl)->isr_lock, flags)
-#else				/* BCMSDIO */
-
-#define WL_LOCK(wl)	down(&(wl)->sem)
-#define WL_UNLOCK(wl)	up(&(wl)->sem)
-
-#define WL_ISRLOCK(wl)
-#define WL_ISRUNLOCK(wl)
-#endif				/* WLC_HIGH_ONLY */
-
-/* handle forward declaration */
-typedef struct wl_info wl_info_t;
 
 #ifndef PCI_D0
 #define PCI_D0		0
@@ -148,14 +111,7 @@
 
 extern int __devinit wl_pci_probe(struct pci_dev *pdev,
 				  const struct pci_device_id *ent);
-extern void wl_free(wl_info_t *wl);
+extern void wl_free(struct wl_info *wl);
 extern int wl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
-extern int wl_ucode_data_init(wl_info_t *wl);
-extern void wl_ucode_data_free(void);
-#ifdef WLC_LOW
-extern void wl_ucode_free_buf(void *);
-extern int wl_ucode_init_buf(wl_info_t *wl, void **pbuf, u32 idx);
-extern int wl_ucode_init_uint(wl_info_t *wl, u32 *data, u32 idx);
-#endif				/* WLC_LOW */
 
 #endif				/* _wl_mac80211_h_ */
diff --git a/drivers/staging/brcm80211/sys/wl_ucode.h b/drivers/staging/brcm80211/sys/wl_ucode.h
index a1ba372..2a0f402 100644
--- a/drivers/staging/brcm80211/sys/wl_ucode.h
+++ b/drivers/staging/brcm80211/sys/wl_ucode.h
@@ -14,6 +14,9 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#define MIN_FW_SIZE 40000	/* minimum firmware file size in bytes */
+#define MAX_FW_SIZE 150000
+
 typedef struct d11init {
 	u16 addr;
 	u16 size;
@@ -35,3 +38,12 @@
 extern u32 bcm43xx_24_lcnsz;
 extern u32 *bcm43xx_bommajor;
 extern u32 *bcm43xx_bomminor;
+
+extern int wl_ucode_data_init(struct wl_info *wl);
+extern void wl_ucode_data_free(void);
+
+extern int wl_ucode_init_buf(struct wl_info *wl, void **pbuf, unsigned int idx);
+extern int wl_ucode_init_uint(struct wl_info *wl, unsigned *data,
+			      unsigned int idx);
+extern void wl_ucode_free_buf(void *);
+extern int  wl_check_firmwares(struct wl_info *wl);
diff --git a/drivers/staging/brcm80211/sys/wl_ucode_loader.c b/drivers/staging/brcm80211/sys/wl_ucode_loader.c
index 0b41a9c..23e10f3 100644
--- a/drivers/staging/brcm80211/sys/wl_ucode_loader.c
+++ b/drivers/staging/brcm80211/sys/wl_ucode_loader.c
@@ -14,17 +14,12 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-typedef struct wl_info wl_info_t;
 #include <linux/types.h>
 #include <bcmdefs.h>
 #include <d11ucode_ext.h>
 #include <wl_ucode.h>
 
-extern int wl_ucode_init_buf(wl_info_t *wl, void **pbuf, unsigned int idx);
-extern int wl_ucode_init_uint(wl_info_t *wl, unsigned *data, unsigned int idx);
-extern int wl_ucode_data_init(wl_info_t *wl);
-extern void wl_ucode_data_free(void);
-extern void wl_ucode_free_buf(void *);
+
 
 d11init_t *d11lcn0bsinitvals24;
 d11init_t *d11lcn0initvals24;
@@ -42,8 +37,12 @@
 u32 *bcm43xx_bommajor;
 u32 *bcm43xx_bomminor;
 
-int wl_ucode_data_init(wl_info_t *wl)
+int wl_ucode_data_init(struct wl_info *wl)
 {
+	int rc;
+	rc = wl_check_firmwares(wl);
+	if (rc < 0)
+		return rc;
 	wl_ucode_init_buf(wl, (void **)&d11lcn0bsinitvals24,
 			  D11LCN0BSINITVALS24);
 	wl_ucode_init_buf(wl, (void **)&d11lcn0initvals24, D11LCN0INITVALS24);
diff --git a/drivers/staging/brcm80211/sys/wlc_alloc.c b/drivers/staging/brcm80211/sys/wlc_alloc.c
index 2dc89f9..746439e 100644
--- a/drivers/staging/brcm80211/sys/wlc_alloc.c
+++ b/drivers/staging/brcm80211/sys/wlc_alloc.c
@@ -17,28 +17,33 @@
 #include <linux/string.h>
 #include <bcmdefs.h>
 #include <wlc_cfg.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <osl.h>
 #include <bcmutils.h>
 #include <siutils.h>
 #include <wlioctl.h>
 #include <wlc_pub.h>
 #include <wlc_key.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
+#include <wlc_event.h>
 #include <wlc_mac80211.h>
 #include <wlc_alloc.h>
+#include <wl_dbg.h>
 
-static wlc_pub_t *wlc_pub_malloc(osl_t *osh, uint unit, uint *err,
-				 uint devid);
-static void wlc_pub_mfree(osl_t *osh, wlc_pub_t *pub);
+static struct wlc_pub *wlc_pub_malloc(struct osl_info *osh, uint unit,
+				      uint *err, uint devid);
+static void wlc_pub_mfree(struct osl_info *osh, struct wlc_pub *pub);
 static void wlc_tunables_init(wlc_tunables_t *tunables, uint devid);
 
-void *wlc_calloc(osl_t *osh, uint unit, uint size)
+void *wlc_calloc(struct osl_info *osh, uint unit, uint size)
 {
 	void *item;
 
 	item = kzalloc(size, GFP_ATOMIC);
 	if (item == NULL)
-		WL_ERROR(("wl%d: %s: out of memory\n", unit, __func__));
+		WL_ERROR("wl%d: %s: out of memory\n", unit, __func__);
 	return item;
 }
 
@@ -58,18 +63,14 @@
 	tunables->ampdudatahiwat = WLC_AMPDUDATAHIWAT;
 	tunables->rxbnd = RXBND;
 	tunables->txsbnd = TXSBND;
-#if defined(WLC_HIGH_ONLY) && defined(NTXD_USB_4319)
-	if (devid == BCM4319_CHIP_ID) {
-		tunables->ntxd = NTXD_USB_4319;
-	}
-#endif				/* WLC_HIGH_ONLY */
 }
 
-static wlc_pub_t *wlc_pub_malloc(osl_t *osh, uint unit, uint *err, uint devid)
+static struct wlc_pub *wlc_pub_malloc(struct osl_info *osh, uint unit,
+				      uint *err, uint devid)
 {
-	wlc_pub_t *pub;
+	struct wlc_pub *pub;
 
-	pub = (wlc_pub_t *) wlc_calloc(osh, unit, sizeof(wlc_pub_t));
+	pub = (struct wlc_pub *) wlc_calloc(osh, unit, sizeof(struct wlc_pub));
 	if (pub == NULL) {
 		*err = 1001;
 		goto fail;
@@ -99,7 +100,7 @@
 	return NULL;
 }
 
-static void wlc_pub_mfree(osl_t *osh, wlc_pub_t *pub)
+static void wlc_pub_mfree(struct osl_info *osh, struct wlc_pub *pub)
 {
 	if (pub == NULL)
 		return;
@@ -114,7 +115,7 @@
 	kfree(pub);
 }
 
-wlc_bsscfg_t *wlc_bsscfg_malloc(osl_t *osh, uint unit)
+wlc_bsscfg_t *wlc_bsscfg_malloc(struct osl_info *osh, uint unit)
 {
 	wlc_bsscfg_t *cfg;
 
@@ -134,7 +135,7 @@
 	return NULL;
 }
 
-void wlc_bsscfg_mfree(osl_t *osh, wlc_bsscfg_t *cfg)
+void wlc_bsscfg_mfree(struct osl_info *osh, wlc_bsscfg_t *cfg)
 {
 	if (cfg == NULL)
 		return;
@@ -155,7 +156,7 @@
 	kfree(cfg);
 }
 
-void wlc_bsscfg_ID_assign(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg)
+void wlc_bsscfg_ID_assign(struct wlc_info *wlc, wlc_bsscfg_t *bsscfg)
 {
 	bsscfg->ID = wlc->next_bsscfg_ID;
 	wlc->next_bsscfg_ID++;
@@ -164,11 +165,13 @@
 /*
  * The common driver entry routine. Error codes should be unique
  */
-wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err, uint devid)
+struct wlc_info *wlc_attach_malloc(struct osl_info *osh, uint unit, uint *err,
+			      uint devid)
 {
-	wlc_info_t *wlc;
+	struct wlc_info *wlc;
 
-	wlc = (wlc_info_t *) wlc_calloc(osh, unit, sizeof(wlc_info_t));
+	wlc = (struct wlc_info *) wlc_calloc(osh, unit,
+					     sizeof(struct wlc_info));
 	if (wlc == NULL) {
 		*err = 1002;
 		goto fail;
@@ -176,7 +179,7 @@
 
 	wlc->hwrxoff = WL_HWRXOFF;
 
-	/* allocate wlc_pub_t state structure */
+	/* allocate struct wlc_pub state structure */
 	wlc->pub = wlc_pub_malloc(osh, unit, err, devid);
 	if (wlc->pub == NULL) {
 		*err = 1003;
@@ -184,17 +187,16 @@
 	}
 	wlc->pub->wlc = wlc;
 
-	/* allocate wlc_hw_info_t state structure */
+	/* allocate struct wlc_hw_info state structure */
 
-	wlc->hw = (wlc_hw_info_t *)wlc_calloc(osh, unit,
-		sizeof(wlc_hw_info_t));
+	wlc->hw = (struct wlc_hw_info *)wlc_calloc(osh, unit,
+		sizeof(struct wlc_hw_info));
 	if (wlc->hw == NULL) {
 		*err = 1005;
 		goto fail;
 	}
 	wlc->hw->wlc = wlc;
 
-#ifdef WLC_LOW
 	wlc->hw->bandstate[0] = (wlc_hwband_t *)wlc_calloc(osh, unit,
 		(sizeof(wlc_hwband_t) * MAXBANDS));
 	if (wlc->hw->bandstate[0] == NULL) {
@@ -209,7 +211,6 @@
 			     (sizeof(wlc_hwband_t) * i));
 		}
 	}
-#endif				/* WLC_LOW */
 
 	wlc->modulecb = (modulecb_t *)wlc_calloc(osh, unit,
 		sizeof(modulecb_t) * WLC_MAXMODULES);
@@ -266,8 +267,8 @@
 		goto fail;
 	}
 
-	wlc->bandstate[0] = (wlcband_t *)wlc_calloc(osh, unit,
-				(sizeof(wlcband_t) * MAXBANDS));
+	wlc->bandstate[0] = (struct wlcband *)wlc_calloc(osh, unit,
+				(sizeof(struct wlcband)*MAXBANDS));
 	if (wlc->bandstate[0] == NULL) {
 		*err = 1025;
 		goto fail;
@@ -276,12 +277,13 @@
 
 		for (i = 1; i < MAXBANDS; i++) {
 			wlc->bandstate[i] =
-			    (wlcband_t *) ((unsigned long)wlc->bandstate[0] +
-					   (sizeof(wlcband_t) * i));
+			    (struct wlcband *) ((unsigned long)wlc->bandstate[0]
+			    + (sizeof(struct wlcband)*i));
 		}
 	}
 
-	wlc->corestate = (wlccore_t *)wlc_calloc(osh, unit, sizeof(wlccore_t));
+	wlc->corestate = (struct wlccore *)wlc_calloc(osh, unit,
+						      sizeof(struct wlccore));
 	if (wlc->corestate == NULL) {
 		*err = 1026;
 		goto fail;
@@ -301,7 +303,7 @@
 	return NULL;
 }
 
-void wlc_detach_mfree(wlc_info_t *wlc, osl_t *osh)
+void wlc_detach_mfree(struct wlc_info *wlc, struct osl_info *osh)
 {
 	if (wlc == NULL)
 		return;
@@ -355,12 +357,10 @@
 	}
 
 	if (wlc->hw) {
-#ifdef WLC_LOW
 		if (wlc->hw->bandstate[0]) {
 			kfree(wlc->hw->bandstate[0]);
 			wlc->hw->bandstate[0] = NULL;
 		}
-#endif
 
 		/* free hw struct */
 		kfree(wlc->hw);
diff --git a/drivers/staging/brcm80211/sys/wlc_alloc.h b/drivers/staging/brcm80211/sys/wlc_alloc.h
index 678a2b9..ac34f78 100644
--- a/drivers/staging/brcm80211/sys/wlc_alloc.h
+++ b/drivers/staging/brcm80211/sys/wlc_alloc.h
@@ -14,12 +14,12 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-extern void *wlc_calloc(osl_t *osh, uint unit, uint size);
+extern void *wlc_calloc(struct osl_info *osh, uint unit, uint size);
 
-extern wlc_info_t *wlc_attach_malloc(osl_t *osh, uint unit, uint *err,
-				     uint devid);
-extern void wlc_detach_mfree(wlc_info_t *wlc, osl_t *osh);
+extern struct wlc_info *wlc_attach_malloc(struct osl_info *osh, uint unit,
+					  uint *err, uint devid);
+extern void wlc_detach_mfree(struct wlc_info *wlc, struct osl_info *osh);
 
 struct wlc_bsscfg;
-extern struct wlc_bsscfg *wlc_bsscfg_malloc(osl_t *osh, uint unit);
-extern void wlc_bsscfg_mfree(osl_t *osh, struct wlc_bsscfg *cfg);
+extern struct wlc_bsscfg *wlc_bsscfg_malloc(struct osl_info *osh, uint unit);
+extern void wlc_bsscfg_mfree(struct osl_info *osh, struct wlc_bsscfg *cfg);
diff --git a/drivers/staging/brcm80211/sys/wlc_ampdu.c b/drivers/staging/brcm80211/sys/wlc_ampdu.c
index a4e49f3..d749917 100644
--- a/drivers/staging/brcm80211/sys/wlc_ampdu.c
+++ b/drivers/staging/brcm80211/sys/wlc_ampdu.c
@@ -16,19 +16,19 @@
 #include <linux/kernel.h>
 #include <wlc_cfg.h>
 #include <bcmdefs.h>
-#include <linuxver.h>
-#include <bcmdefs.h>
 #include <osl.h>
 #include <bcmutils.h>
 #include <siutils.h>
 #include <bcmendian.h>
 #include <wlioctl.h>
+#include <sbhndpio.h>
 #include <sbhnddma.h>
 #include <hnddma.h>
 #include <d11.h>
 #include <wlc_rate.h>
 #include <wlc_pub.h>
 #include <wlc_key.h>
+#include <wlc_event.h>
 #include <wlc_mac80211.h>
 #include <wlc_phy_hal.h>
 #include <wlc_antsel.h>
@@ -36,11 +36,8 @@
 #include <net/mac80211.h>
 #include <wlc_ampdu.h>
 #include <wl_export.h>
+#include <wl_dbg.h>
 
-#ifdef WLC_HIGH_ONLY
-#include <bcm_rpc_tp.h>
-#include <wlc_rpctx.h>
-#endif
 
 #define AMPDU_MAX_MPDU		32	/* max number of mpdus in an ampdu */
 #define AMPDU_NUM_MPDU_LEGACY	16	/* max number of mpdus in an ampdu to a legacy */
@@ -101,7 +98,7 @@
 
 /* AMPDU module specific state */
 struct ampdu_info {
-	wlc_info_t *wlc;	/* pointer to main wlc structure */
+	struct wlc_info *wlc;	/* pointer to main wlc structure */
 	int scb_handle;		/* scb cubby handle to retrieve data from scb */
 	u8 ini_enable[AMPDU_MAX_SCB_TID];	/* per-tid initiator enable/disable of ampdu */
 	u8 ba_tx_wsize;	/* Tx ba window size (in pdu) */
@@ -125,11 +122,6 @@
 				 */
 	wlc_fifo_info_t fifo_tb[NUM_FFPLD_FIFO];	/* table of fifo infos  */
 
-#ifdef WLC_HIGH_ONLY
-	void *p;
-	tx_status_t txs;
-	bool waiting_status;	/* To help sanity checks */
-#endif
 };
 
 #define AMPDU_CLEANUPFLAG_RX   (0x1)
@@ -138,38 +130,39 @@
 #define SCB_AMPDU_CUBBY(ampdu, scb) (&(scb->scb_ampdu))
 #define SCB_AMPDU_INI(scb_ampdu, tid) (&(scb_ampdu->ini[tid]))
 
-static void wlc_ffpld_init(ampdu_info_t *ampdu);
-static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int f);
-static void wlc_ffpld_calc_mcs2ampdu_table(ampdu_info_t *ampdu, int f);
+static void wlc_ffpld_init(struct ampdu_info *ampdu);
+static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int f);
+static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f);
 
-static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
+static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
 						   scb_ampdu_t *scb_ampdu,
 						   u8 tid, bool override);
-static void ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu,
+static void ampdu_cleanup_tid_ini(struct ampdu_info *ampdu,
+				  scb_ampdu_t *scb_ampdu,
 				  u8 tid, bool force);
-static void ampdu_update_max_txlen(ampdu_info_t *ampdu, u8 dur);
-static void scb_ampdu_update_config(ampdu_info_t *ampdu, struct scb *scb);
-static void scb_ampdu_update_config_all(ampdu_info_t *ampdu);
+static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur);
+static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb);
+static void scb_ampdu_update_config_all(struct ampdu_info *ampdu);
 
 #define wlc_ampdu_txflowcontrol(a, b, c)	do {} while (0)
 
-static void wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb,
-					  void *p, tx_status_t *txs,
-					  u32 frmtxstatus,
-					  u32 frmtxstatus2);
+static void wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu,
+					  struct scb *scb,
+					  struct sk_buff *p, tx_status_t *txs,
+					  u32 frmtxstatus, u32 frmtxstatus2);
 
-static inline u16 pkt_txh_seqnum(wlc_info_t *wlc, void *p)
+static inline u16 pkt_txh_seqnum(struct wlc_info *wlc, struct sk_buff *p)
 {
 	d11txh_t *txh;
 	struct dot11_header *h;
-	txh = (d11txh_t *) PKTDATA(p);
+	txh = (d11txh_t *) p->data;
 	h = (struct dot11_header *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
 	return ltoh16(h->seq) >> SEQNUM_SHIFT;
 }
 
-ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc)
+struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc)
 {
-	ampdu_info_t *ampdu;
+	struct ampdu_info *ampdu;
 	int i;
 
 	/* some code depends on packed structures */
@@ -179,9 +172,10 @@
 	ASSERT(wlc->pub->tunables->ampdunummpdu <= AMPDU_MAX_MPDU);
 	ASSERT(wlc->pub->tunables->ampdunummpdu > 0);
 
-	ampdu = kzalloc(sizeof(ampdu_info_t), GFP_ATOMIC);
+	ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
 	if (!ampdu) {
-		WL_ERROR(("wl%d: wlc_ampdu_attach: out of mem\n", wlc->pub->unit));
+		WL_ERROR("wl%d: wlc_ampdu_attach: out of mem\n",
+			 wlc->pub->unit);
 		return NULL;
 	}
 	ampdu->wlc = wlc;
@@ -209,10 +203,6 @@
 		ampdu->rx_factor = AMPDU_RX_FACTOR_32K;
 	else
 		ampdu->rx_factor = AMPDU_RX_FACTOR_64K;
-#ifdef WLC_HIGH_ONLY
-	/* Restrict to smaller rcv size for BMAC dongle */
-	ampdu->rx_factor = AMPDU_RX_FACTOR_32K;
-#endif
 	ampdu->retry_limit = AMPDU_DEF_RETRY_LIMIT;
 	ampdu->rr_retry_limit = AMPDU_DEF_RR_RETRY_LIMIT;
 
@@ -232,7 +222,7 @@
 	return ampdu;
 }
 
-void wlc_ampdu_detach(ampdu_info_t *ampdu)
+void wlc_ampdu_detach(struct ampdu_info *ampdu)
 {
 	int i;
 
@@ -250,12 +240,12 @@
 	kfree(ampdu);
 }
 
-void scb_ampdu_cleanup(ampdu_info_t *ampdu, struct scb *scb)
+void scb_ampdu_cleanup(struct ampdu_info *ampdu, struct scb *scb)
 {
 	scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
 	u8 tid;
 
-	WL_AMPDU_UPDN(("scb_ampdu_cleanup: enter\n"));
+	WL_AMPDU_UPDN("scb_ampdu_cleanup: enter\n");
 	ASSERT(scb_ampdu);
 
 	for (tid = 0; tid < AMPDU_MAX_SCB_TID; tid++) {
@@ -266,12 +256,12 @@
 /* reset the ampdu state machine so that it can gracefully handle packets that were
  * freed from the dma and tx queues during reinit
  */
-void wlc_ampdu_reset(ampdu_info_t *ampdu)
+void wlc_ampdu_reset(struct ampdu_info *ampdu)
 {
-	WL_NONE(("%s: Entering\n", __func__));
+	WL_NONE("%s: Entering\n", __func__);
 }
 
-static void scb_ampdu_update_config(ampdu_info_t *ampdu, struct scb *scb)
+static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb)
 {
 	scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
 	int i;
@@ -301,12 +291,12 @@
 	ASSERT(scb_ampdu->release);
 }
 
-void scb_ampdu_update_config_all(ampdu_info_t *ampdu)
+void scb_ampdu_update_config_all(struct ampdu_info *ampdu)
 {
 	scb_ampdu_update_config(ampdu, ampdu->wlc->pub->global_scb);
 }
 
-static void wlc_ffpld_init(ampdu_info_t *ampdu)
+static void wlc_ffpld_init(struct ampdu_info *ampdu)
 {
 	int i, j;
 	wlc_fifo_info_t *fifo;
@@ -330,9 +320,9 @@
  * Return 1 if pre-loading not active, -1 if not an underflow event,
  * 0 if pre-loading module took care of the event.
  */
-static int wlc_ffpld_check_txfunfl(wlc_info_t *wlc, int fid)
+static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
 {
-	ampdu_info_t *ampdu = wlc->ampdu;
+	struct ampdu_info *ampdu = wlc->ampdu;
 	u32 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
 	u32 txunfl_ratio;
 	u8 max_mpdu;
@@ -349,7 +339,7 @@
 			 M_UCODE_MACSTAT + offsetof(macstat_t, txfunfl[fid]));
 	new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
 	if (new_txunfl == 0) {
-		WL_FFPLD(("check_txunfl : TX status FRAG set but no tx underflows\n"));
+		WL_FFPLD("check_txunfl : TX status FRAG set but no tx underflows\n");
 		return -1;
 	}
 	fifo->prev_txfunfl = cur_txunfl;
@@ -359,7 +349,7 @@
 
 	/* check if fifo is big enough */
 	if (wlc_xmtfifo_sz_get(wlc, fid, &xmtfifo_sz)) {
-		WL_FFPLD(("check_txunfl : get xmtfifo_sz failed.\n"));
+		WL_FFPLD("check_txunfl : get xmtfifo_sz failed\n");
 		return -1;
 	}
 
@@ -373,8 +363,8 @@
 	if (fifo->accum_txfunfl < 10)
 		return 0;
 
-	WL_FFPLD(("ampdu_count %d  tx_underflows %d\n",
-		  current_ampdu_cnt, fifo->accum_txfunfl));
+	WL_FFPLD("ampdu_count %d  tx_underflows %d\n",
+		 current_ampdu_cnt, fifo->accum_txfunfl);
 
 	/*
 	   compute the current ratio of tx unfl per ampdu.
@@ -427,8 +417,8 @@
 		      (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
 		     / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
 
-		WL_FFPLD(("DMA estimated transfer rate %d; pre-load size %d\n",
-			  fifo->dmaxferrate, fifo->ampdu_pld_size));
+		WL_FFPLD("DMA estimated transfer rate %d; pre-load size %d\n",
+			 fifo->dmaxferrate, fifo->ampdu_pld_size);
 	} else {
 
 		/* decrease ampdu size */
@@ -450,7 +440,7 @@
 	return 0;
 }
 
-static void wlc_ffpld_calc_mcs2ampdu_table(ampdu_info_t *ampdu, int f)
+static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
 {
 	int i;
 	u32 phy_rate, dma_rate, tmp;
@@ -483,11 +473,12 @@
 }
 
 static void BCMFASTPATH
-wlc_ampdu_agg(ampdu_info_t *ampdu, struct scb *scb, void *p, uint prec)
+wlc_ampdu_agg(struct ampdu_info *ampdu, struct scb *scb, struct sk_buff *p,
+	      uint prec)
 {
 	scb_ampdu_t *scb_ampdu;
 	scb_ampdu_tid_ini_t *ini;
-	u8 tid = (u8) PKTPRIO(p);
+	u8 tid = (u8) (p->priority);
 
 	scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
 
@@ -500,11 +491,12 @@
 }
 
 int BCMFASTPATH
-wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **pdu, int prec)
+wlc_sendampdu(struct ampdu_info *ampdu, wlc_txq_info_t *qi,
+	      struct sk_buff **pdu, int prec)
 {
-	wlc_info_t *wlc;
-	osl_t *osh;
-	void *p, *pkt[AMPDU_MAX_MPDU];
+	struct wlc_info *wlc;
+	struct osl_info *osh;
+	struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
 	u8 tid, ndelim;
 	int err = 0;
 	u8 preamble_type = WLC_GF_PREAMBLE;
@@ -540,7 +532,7 @@
 
 	ASSERT(p);
 
-	tid = (u8) PKTPRIO(p);
+	tid = (u8) (p->priority);
 	ASSERT(tid < AMPDU_MAX_SCB_TID);
 
 	f = ampdu->fifo_tb + prio2fifo[tid];
@@ -561,7 +553,7 @@
 	wlc_ampdu_agg(ampdu, scb, p, tid);
 
 	if (wlc->block_datafifo) {
-		WL_ERROR(("%s: Fifo blocked\n", __func__));
+		WL_ERROR("%s: Fifo blocked\n", __func__);
 		return BCME_BUSY;
 	}
 	rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
@@ -576,7 +568,7 @@
 		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
 			err = wlc_prep_pdu(wlc, p, &fifo);
 		} else {
-			WL_ERROR(("%s: AMPDU flag is off!\n", __func__));
+			WL_ERROR("%s: AMPDU flag is off!\n", __func__);
 			*pdu = NULL;
 			err = 0;
 			break;
@@ -584,14 +576,16 @@
 
 		if (err) {
 			if (err == BCME_BUSY) {
-				WL_ERROR(("wl%d: wlc_sendampdu: prep_xdu retry; seq 0x%x\n", wlc->pub->unit, seq));
+				WL_ERROR("wl%d: wlc_sendampdu: prep_xdu retry; seq 0x%x\n",
+					 wlc->pub->unit, seq);
 				WLCNTINCR(ampdu->cnt->sduretry);
 				*pdu = p;
 				break;
 			}
 
 			/* error in the packet; reject it */
-			WL_AMPDU_ERR(("wl%d: wlc_sendampdu: prep_xdu rejected; seq 0x%x\n", wlc->pub->unit, seq));
+			WL_AMPDU_ERR("wl%d: wlc_sendampdu: prep_xdu rejected; seq 0x%x\n",
+				     wlc->pub->unit, seq);
 			WLCNTINCR(ampdu->cnt->sdurejected);
 
 			*pdu = NULL;
@@ -600,7 +594,7 @@
 
 		/* pkt is good to be aggregated */
 		ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
-		txh = (d11txh_t *) PKTDATA(p);
+		txh = (d11txh_t *) p->data;
 		plcp = (u8 *) (txh + 1);
 		h = (struct dot11_header *)(plcp + D11_PHY_HDR_LEN);
 		seq = ltoh16(h->seq) >> SEQNUM_SHIFT;
@@ -633,8 +627,8 @@
 		ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
 		seg_cnt += 1;
 
-		WL_AMPDU_TX(("wl%d: wlc_sendampdu: mpdu %d plcp_len %d\n",
-			     wlc->pub->unit, count, len));
+		WL_AMPDU_TX("wl%d: wlc_sendampdu: mpdu %d plcp_len %d\n",
+			    wlc->pub->unit, count, len);
 
 		/*
 		 * aggregateable mpdu. For ucode/hw agg,
@@ -665,7 +659,8 @@
 
 		dma_len += (u16) pkttotlen(osh, p);
 
-		WL_AMPDU_TX(("wl%d: wlc_sendampdu: ampdu_len %d seg_cnt %d null delim %d\n", wlc->pub->unit, ampdu_len, seg_cnt, ndelim));
+		WL_AMPDU_TX("wl%d: wlc_sendampdu: ampdu_len %d seg_cnt %d null delim %d\n",
+			    wlc->pub->unit, ampdu_len, seg_cnt, ndelim);
 
 		txh->MacTxControlLow = htol16(mcl);
 
@@ -695,8 +690,8 @@
 			    min(scb_ampdu->max_rxlen,
 				ampdu->max_txlen[mcs][is40][sgi]);
 
-			WL_NONE(("sendampdu: sgi %d, is40 %d, mcs %d\n", sgi,
-				 is40, mcs));
+			WL_NONE("sendampdu: sgi %d, is40 %d, mcs %d\n",
+				sgi, is40, mcs);
 
 			maxlen = 64 * 1024;	/* XXX Fix me to honor real max_rxlen */
 
@@ -739,13 +734,14 @@
 		/* test whether to add more */
 		if ((MCS_RATE(mcs, true, false) >= f->dmaxferrate) &&
 		    (count == f->mcs2ampdu_table[mcs])) {
-			WL_AMPDU_ERR(("wl%d: PR 37644: stopping ampdu at %d for mcs %d", wlc->pub->unit, count, mcs));
+			WL_AMPDU_ERR("wl%d: PR 37644: stopping ampdu at %d for mcs %d\n",
+				     wlc->pub->unit, count, mcs);
 			break;
 		}
 
 		if (count == scb_ampdu->max_pdu) {
-			WL_NONE(("Stop taking from q, reached %d deep\n",
-				 scb_ampdu->max_pdu));
+			WL_NONE("Stop taking from q, reached %d deep\n",
+				scb_ampdu->max_pdu);
 			break;
 		}
 
@@ -755,7 +751,7 @@
 
 		if (p) {
 			if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
-			    ((u8) PKTPRIO(p) == tid)) {
+			    ((u8) (p->priority) == tid)) {
 
 				plen =
 				    pkttotlen(osh, p) + AMPDU_MAX_MPDU_OVERHEAD;
@@ -763,15 +759,16 @@
 
 				if ((plen + ampdu_len) > maxlen) {
 					p = NULL;
-					WL_ERROR(("%s: Bogus plen #1\n",
-						  __func__));
+					WL_ERROR("%s: Bogus plen #1\n",
+						 __func__);
 					ASSERT(3 == 4);
 					continue;
 				}
 
 				/* check if there are enough descriptors available */
 				if (TXAVAIL(wlc, fifo) <= (seg_cnt + 1)) {
-					WL_ERROR(("%s: No fifo space   !!!!!!\n", __func__));
+					WL_ERROR("%s: No fifo space   !!!!!!\n",
+						 __func__);
 					p = NULL;
 					continue;
 				}
@@ -789,7 +786,7 @@
 		WLCNTADD(ampdu->cnt->txmpdu, count);
 
 		/* patch up the last txh */
-		txh = (d11txh_t *) PKTDATA(pkt[count - 1]);
+		txh = (d11txh_t *) pkt[count - 1]->data;
 		mcl = ltoh16(txh->MacTxControlLow);
 		mcl &= ~TXC_AMPDU_MASK;
 		mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
@@ -807,7 +804,7 @@
 		ampdu_len -= roundup(len, 4) - len;
 
 		/* patch up the first txh & plcp */
-		txh = (d11txh_t *) PKTDATA(pkt[0]);
+		txh = (d11txh_t *) pkt[0]->data;
 		plcp = (u8 *) (txh + 1);
 
 		WLC_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
@@ -878,27 +875,18 @@
 			WLC_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
 		}
 
-		WL_AMPDU_TX(("wl%d: wlc_sendampdu: count %d ampdu_len %d\n",
-			     wlc->pub->unit, count, ampdu_len));
+		WL_AMPDU_TX("wl%d: wlc_sendampdu: count %d ampdu_len %d\n",
+			    wlc->pub->unit, count, ampdu_len);
 
 		/* inform rate_sel if it this is a rate probe pkt */
 		frameid = ltoh16(txh->TxFrameID);
 		if (frameid & TXFID_RATE_PROBE_MASK) {
-			WL_ERROR(("%s: XXX what to do with TXFID_RATE_PROBE_MASK!?\n", __func__));
+			WL_ERROR("%s: XXX what to do with TXFID_RATE_PROBE_MASK!?\n",
+				 __func__);
 		}
-#ifdef WLC_HIGH_ONLY
-		if (wlc->rpc_agg & BCM_RPC_TP_HOST_AGG_AMPDU)
-			bcm_rpc_tp_agg_set(bcm_rpc_tp_get(wlc->rpc),
-					   BCM_RPC_TP_HOST_AGG_AMPDU, true);
-#endif
 		for (i = 0; i < count; i++)
 			wlc_txfifo(wlc, fifo, pkt[i], i == (count - 1),
 				   ampdu->txpkt_weight);
-#ifdef WLC_HIGH_ONLY
-		if (wlc->rpc_agg & BCM_RPC_TP_HOST_AGG_AMPDU)
-			bcm_rpc_tp_agg_set(bcm_rpc_tp_get(wlc->rpc),
-					   BCM_RPC_TP_HOST_AGG_AMPDU, false);
-#endif
 
 	}
 	/* endif (count) */
@@ -906,11 +894,11 @@
 }
 
 void BCMFASTPATH
-wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, void *p,
-		     tx_status_t *txs)
+wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+		     struct sk_buff *p, tx_status_t *txs)
 {
 	scb_ampdu_t *scb_ampdu;
-	wlc_info_t *wlc = ampdu->wlc;
+	struct wlc_info *wlc = ampdu->wlc;
 	scb_ampdu_tid_ini_t *ini;
 	u32 s1 = 0, s2 = 0;
 	struct ieee80211_tx_info *tx_info;
@@ -922,7 +910,7 @@
 	ASSERT(txs->status & TX_STATUS_AMPDU);
 	scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
 	ASSERT(scb_ampdu);
-	ini = SCB_AMPDU_INI(scb_ampdu, PKTPRIO(p));
+	ini = SCB_AMPDU_INI(scb_ampdu, p->priority);
 	ASSERT(ini->scb == scb);
 
 	/* BMAC_NOTE: For the split driver, second level txstatus comes later
@@ -930,7 +918,6 @@
 	 * call the first one
 	 */
 	if (txs->status & TX_STATUS_ACK_RCV) {
-#ifdef WLC_LOW
 		u8 status_delay = 0;
 
 		/* wait till the next 8 bytes of txstatus is available */
@@ -948,54 +935,14 @@
 		ASSERT(!(s1 & TX_STATUS_INTERMEDIATE));
 		ASSERT(s1 & TX_STATUS_AMPDU);
 		s2 = R_REG(wlc->osh, &wlc->regs->frmtxstatus2);
-#else				/* WLC_LOW */
-
-		/* Store the relevant information in ampdu structure */
-		WL_AMPDU_TX(("wl%d: wlc_ampdu_dotxstatus: High Recvd\n",
-			     wlc->pub->unit));
-
-		ASSERT(!ampdu->p);
-		ampdu->p = p;
-		bcopy(txs, &ampdu->txs, sizeof(tx_status_t));
-		ampdu->waiting_status = true;
-		return;
-#endif				/* WLC_LOW */
 	}
 
 	wlc_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
 	wlc_ampdu_txflowcontrol(wlc, scb_ampdu, ini);
 }
 
-#ifdef WLC_HIGH_ONLY
-void wlc_ampdu_txstatus_complete(ampdu_info_t *ampdu, u32 s1, u32 s2)
-{
-	WL_AMPDU_TX(("wl%d: wlc_ampdu_txstatus_complete: High Recvd 0x%x 0x%x p:%p\n", ampdu->wlc->pub->unit, s1, s2, ampdu->p));
-
-	ASSERT(ampdu->waiting_status);
-
-	/* The packet may have been freed if the SCB went away, if so, then still free the
-	 * DMA chain
-	 */
-	if (ampdu->p) {
-		struct ieee80211_tx_info *tx_info;
-		struct scb *scb;
-
-		tx_info = IEEE80211_SKB_CB(ampdu->p);
-		scb = (struct scb *)tx_info->control.sta->drv_priv;
-
-		wlc_ampdu_dotxstatus_complete(ampdu, scb, ampdu->p, &ampdu->txs,
-					      s1, s2);
-		ampdu->p = NULL;
-	}
-
-	ampdu->waiting_status = false;
-}
-#endif				/* WLC_HIGH_ONLY */
-void rate_status(wlc_info_t *wlc, struct ieee80211_tx_info *tx_info,
-		 tx_status_t *txs, u8 mcs);
-
 void
-rate_status(wlc_info_t *wlc, struct ieee80211_tx_info *tx_info,
+rate_status(struct wlc_info *wlc, struct ieee80211_tx_info *tx_info,
 	    tx_status_t *txs, u8 mcs)
 {
 	struct ieee80211_tx_rate *txrate = tx_info->status.rates;
@@ -1008,17 +955,15 @@
 	}
 }
 
-extern void wlc_txq_enq(wlc_info_t *wlc, struct scb *scb, void *sdu,
-			uint prec);
-
 #define SHORTNAME "AMPDU status"
 
 static void BCMFASTPATH
-wlc_ampdu_dotxstatus_complete(ampdu_info_t *ampdu, struct scb *scb, void *p,
-			      tx_status_t *txs, u32 s1, u32 s2)
+wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+			      struct sk_buff *p, tx_status_t *txs,
+			      u32 s1, u32 s2)
 {
 	scb_ampdu_t *scb_ampdu;
-	wlc_info_t *wlc = ampdu->wlc;
+	struct wlc_info *wlc = ampdu->wlc;
 	scb_ampdu_tid_ini_t *ini;
 	u8 bitmap[8], queue, tid;
 	d11txh_t *txh;
@@ -1037,7 +982,7 @@
 
 #ifdef BCMDBG
 	u8 hole[AMPDU_MAX_MPDU];
-	bzero(hole, sizeof(hole));
+	memset(hole, 0, sizeof(hole));
 #endif
 
 	ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
@@ -1046,7 +991,7 @@
 	scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
 	ASSERT(scb_ampdu);
 
-	tid = (u8) PKTPRIO(p);
+	tid = (u8) (p->priority);
 
 	ini = SCB_AMPDU_INI(scb_ampdu, tid);
 	retry_limit = ampdu->retry_limit_tid[tid];
@@ -1054,7 +999,7 @@
 
 	ASSERT(ini->scb == scb);
 
-	bzero(bitmap, sizeof(bitmap));
+	memset(bitmap, 0, sizeof(bitmap));
 	queue = txs->frameid & TXFID_QUEUE_MASK;
 	ASSERT(queue < AC_COUNT);
 
@@ -1091,13 +1036,16 @@
 		if (supr_status) {
 			update_rate = false;
 			if (supr_status == TX_STATUS_SUPR_BADCH) {
-				WL_ERROR(("%s: Pkt tx suppressed, illegal channel possibly %d\n", __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec)));
+				WL_ERROR("%s: Pkt tx suppressed, illegal channel possibly %d\n",
+					 __func__,
+					 CHSPEC_CHANNEL(wlc->default_bss->chanspec));
 			} else {
 				if (supr_status == TX_STATUS_SUPR_FRAG)
-					WL_NONE(("%s: AMPDU frag err\n",
-						 __func__));
+					WL_NONE("%s: AMPDU frag err\n",
+						__func__);
 				else
-					WL_ERROR(("%s: wlc_ampdu_dotxstatus: supr_status 0x%x\n", __func__, supr_status));
+					WL_ERROR("%s: wlc_ampdu_dotxstatus: supr_status 0x%x\n",
+						 __func__, supr_status);
 			}
 			/* no need to retry for badch; will fail again */
 			if (supr_status == TX_STATUS_SUPR_BADCH ||
@@ -1116,22 +1064,18 @@
 				if (wlc_ffpld_check_txfunfl(wlc, prio2fifo[tid])
 				    > 0) {
 					tx_error = true;
-#ifdef WLC_HIGH_ONLY
-					/* With BMAC, TX Underflows should not happen */
-					WL_ERROR(("wl%d: BMAC TX Underflow?",
-						  wlc->pub->unit));
-#endif
 				}
 			}
 		} else if (txs->phyerr) {
 			update_rate = false;
 			WLCNTINCR(wlc->pub->_cnt->txphyerr);
-			WL_ERROR(("wl%d: wlc_ampdu_dotxstatus: tx phy error (0x%x)\n", wlc->pub->unit, txs->phyerr));
+			WL_ERROR("wl%d: wlc_ampdu_dotxstatus: tx phy error (0x%x)\n",
+				 wlc->pub->unit, txs->phyerr);
 
 #ifdef BCMDBG
 			if (WL_ERROR_ON()) {
 				prpkt("txpkt (AMPDU)", wlc->osh, p);
-				wlc_print_txdesc((d11txh_t *) PKTDATA(p));
+				wlc_print_txdesc((d11txh_t *) p->data);
 				wlc_print_txstatus(txs);
 			}
 #endif				/* BCMDBG */
@@ -1142,7 +1086,7 @@
 	while (p) {
 		tx_info = IEEE80211_SKB_CB(p);
 		ASSERT(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
-		txh = (d11txh_t *) PKTDATA(p);
+		txh = (d11txh_t *) p->data;
 		mcl = ltoh16(txh->MacTxControlLow);
 		plcp = (u8 *) (txh + 1);
 		h = (struct dot11_header *)(plcp + D11_PHY_HDR_LEN);
@@ -1158,10 +1102,9 @@
 		if (ba_recd) {
 			bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
 
-			WL_AMPDU_TX(("%s: tid %d seq is %d, start_seq is %d, "
-				     "bindex is %d set %d, index %d\n",
-				     __func__, tid, seq, start_seq, bindex,
-				     isset(bitmap, bindex), index));
+			WL_AMPDU_TX("%s: tid %d seq is %d, start_seq is %d, bindex is %d set %d, index %d\n",
+				    __func__, tid, seq, start_seq, bindex,
+				    isset(bitmap, bindex), index);
 
 			/* if acked then clear bit and free packet */
 			if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
@@ -1186,8 +1129,8 @@
 				     status & TX_STATUS_FRM_RTX_MASK) >>
 				    TX_STATUS_FRM_RTX_SHIFT;
 
-				PKTPULL(p, D11_PHY_HDR_LEN);
-				PKTPULL(p, D11_TXH_LEN);
+				skb_pull(p, D11_PHY_HDR_LEN);
+				skb_pull(p, D11_TXH_LEN);
 
 				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
 							    p);
@@ -1212,9 +1155,10 @@
 				ieee80211_tx_info_clear_status(tx_info);
 				tx_info->flags |=
 				    IEEE80211_TX_STAT_AMPDU_NO_BACK;
-				PKTPULL(p, D11_PHY_HDR_LEN);
-				PKTPULL(p, D11_TXH_LEN);
-				WL_ERROR(("%s: BA Timeout, seq %d, in_transit %d\n", SHORTNAME, seq, ini->tx_in_transit));
+				skb_pull(p, D11_PHY_HDR_LEN);
+				skb_pull(p, D11_TXH_LEN);
+				WL_ERROR("%s: BA Timeout, seq %d, in_transit %d\n",
+					 SHORTNAME, seq, ini->tx_in_transit);
 				ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
 							    p);
 			}
@@ -1242,7 +1186,7 @@
 }
 
 static void
-ampdu_cleanup_tid_ini(ampdu_info_t *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
+ampdu_cleanup_tid_ini(struct ampdu_info *ampdu, scb_ampdu_t *scb_ampdu, u8 tid,
 		      bool force)
 {
 	scb_ampdu_tid_ini_t *ini;
@@ -1250,8 +1194,8 @@
 	if (!ini)
 		return;
 
-	WL_AMPDU_CTL(("wl%d: ampdu_cleanup_tid_ini: tid %d\n",
-		      ampdu->wlc->pub->unit, tid));
+	WL_AMPDU_CTL("wl%d: ampdu_cleanup_tid_ini: tid %d\n",
+		     ampdu->wlc->pub->unit, tid);
 
 	if (ini->tx_in_transit && !force)
 		return;
@@ -1264,7 +1208,7 @@
 }
 
 /* initialize the initiator code for tid */
-static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(ampdu_info_t *ampdu,
+static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
 						   scb_ampdu_t *scb_ampdu,
 						   u8 tid, bool override)
 {
@@ -1277,7 +1221,7 @@
 
 	/* check for per-tid control of ampdu */
 	if (!ampdu->ini_enable[tid]) {
-		WL_ERROR(("%s: Rejecting tid %d\n", __func__, tid));
+		WL_ERROR("%s: Rejecting tid %d\n", __func__, tid);
 		return NULL;
 	}
 
@@ -1290,21 +1234,21 @@
 	return ini;
 }
 
-int wlc_ampdu_set(ampdu_info_t *ampdu, bool on)
+int wlc_ampdu_set(struct ampdu_info *ampdu, bool on)
 {
-	wlc_info_t *wlc = ampdu->wlc;
+	struct wlc_info *wlc = ampdu->wlc;
 
 	wlc->pub->_ampdu = false;
 
 	if (on) {
 		if (!N_ENAB(wlc->pub)) {
-			WL_AMPDU_ERR(("wl%d: driver not nmode enabled\n",
-				      wlc->pub->unit));
+			WL_AMPDU_ERR("wl%d: driver not nmode enabled\n",
+				     wlc->pub->unit);
 			return BCME_UNSUPPORTED;
 		}
 		if (!wlc_ampdu_cap(ampdu)) {
-			WL_AMPDU_ERR(("wl%d: device not ampdu capable\n",
-				      wlc->pub->unit));
+			WL_AMPDU_ERR("wl%d: device not ampdu capable\n",
+				     wlc->pub->unit);
 			return BCME_UNSUPPORTED;
 		}
 		wlc->pub->_ampdu = on;
@@ -1313,7 +1257,7 @@
 	return 0;
 }
 
-bool wlc_ampdu_cap(ampdu_info_t *ampdu)
+bool wlc_ampdu_cap(struct ampdu_info *ampdu)
 {
 	if (WLC_PHY_11N_CAP(ampdu->wlc->band))
 		return true;
@@ -1321,7 +1265,7 @@
 		return false;
 }
 
-static void ampdu_update_max_txlen(ampdu_info_t *ampdu, u8 dur)
+static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
 {
 	u32 rate, mcs;
 
@@ -1343,7 +1287,7 @@
 }
 
 u8 BCMFASTPATH
-wlc_ampdu_null_delim_cnt(ampdu_info_t *ampdu, struct scb *scb,
+wlc_ampdu_null_delim_cnt(struct ampdu_info *ampdu, struct scb *scb,
 			 ratespec_t rspec, int phylen)
 {
 	scb_ampdu_t *scb_ampdu;
@@ -1379,25 +1323,25 @@
 		return 0;
 }
 
-void wlc_ampdu_macaddr_upd(wlc_info_t *wlc)
+void wlc_ampdu_macaddr_upd(struct wlc_info *wlc)
 {
 	char template[T_RAM_ACCESS_SZ * 2];
 
 	/* driver needs to write the ta in the template; ta is at offset 16 */
-	bzero(template, sizeof(template));
-	bcopy((char *)wlc->pub->cur_etheraddr.octet, template, ETHER_ADDR_LEN);
+	memset(template, 0, sizeof(template));
+	bcopy((char *)wlc->pub->cur_etheraddr.octet, template, ETH_ALEN);
 	wlc_write_template_ram(wlc, (T_BA_TPL_BASE + 16), (T_RAM_ACCESS_SZ * 2),
 			       template);
 }
 
-bool wlc_aggregatable(wlc_info_t *wlc, u8 tid)
+bool wlc_aggregatable(struct wlc_info *wlc, u8 tid)
 {
 	return wlc->ampdu->ini_enable[tid];
 }
 
-void wlc_ampdu_shm_upd(ampdu_info_t *ampdu)
+void wlc_ampdu_shm_upd(struct ampdu_info *ampdu)
 {
-	wlc_info_t *wlc = ampdu->wlc;
+	struct wlc_info *wlc = ampdu->wlc;
 
 	/* Extend ucode internal watchdog timer to match larger received frames */
 	if ((ampdu->rx_factor & HT_PARAMS_RX_FACTOR_MASK) ==
diff --git a/drivers/staging/brcm80211/sys/wlc_ampdu.h b/drivers/staging/brcm80211/sys/wlc_ampdu.h
index c721b16..03457f6 100644
--- a/drivers/staging/brcm80211/sys/wlc_ampdu.h
+++ b/drivers/staging/brcm80211/sys/wlc_ampdu.h
@@ -17,24 +17,20 @@
 #ifndef _wlc_ampdu_h_
 #define _wlc_ampdu_h_
 
-extern ampdu_info_t *wlc_ampdu_attach(wlc_info_t *wlc);
-extern void wlc_ampdu_detach(ampdu_info_t *ampdu);
-extern bool wlc_ampdu_cap(ampdu_info_t *ampdu);
-extern int wlc_ampdu_set(ampdu_info_t *ampdu, bool on);
-extern int wlc_sendampdu(ampdu_info_t *ampdu, wlc_txq_info_t *qi, void **aggp,
-			 int prec);
-extern void wlc_ampdu_dotxstatus(ampdu_info_t *ampdu, struct scb *scb, void *p,
-				 tx_status_t *txs);
-extern void wlc_ampdu_reset(ampdu_info_t *ampdu);
-extern void wlc_ampdu_macaddr_upd(wlc_info_t *wlc);
-extern void wlc_ampdu_shm_upd(ampdu_info_t *ampdu);
+extern struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc);
+extern void wlc_ampdu_detach(struct ampdu_info *ampdu);
+extern bool wlc_ampdu_cap(struct ampdu_info *ampdu);
+extern int wlc_ampdu_set(struct ampdu_info *ampdu, bool on);
+extern int wlc_sendampdu(struct ampdu_info *ampdu, wlc_txq_info_t *qi,
+			 struct sk_buff **aggp, int prec);
+extern void wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+				 struct sk_buff *p, tx_status_t *txs);
+extern void wlc_ampdu_reset(struct ampdu_info *ampdu);
+extern void wlc_ampdu_macaddr_upd(struct wlc_info *wlc);
+extern void wlc_ampdu_shm_upd(struct ampdu_info *ampdu);
 
-extern u8 wlc_ampdu_null_delim_cnt(ampdu_info_t *ampdu, struct scb *scb,
+extern u8 wlc_ampdu_null_delim_cnt(struct ampdu_info *ampdu, struct scb *scb,
 				      ratespec_t rspec, int phylen);
-extern void scb_ampdu_cleanup(ampdu_info_t *ampdu, struct scb *scb);
-#ifdef WLC_HIGH_ONLY
-extern void wlc_ampdu_txstatus_complete(ampdu_info_t *ampdu, u32 s1,
-					u32 s2);
-#endif
+extern void scb_ampdu_cleanup(struct ampdu_info *ampdu, struct scb *scb);
 
 #endif				/* _wlc_ampdu_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_antsel.c b/drivers/staging/brcm80211/sys/wlc_antsel.c
index 5ff8831..402ddf8 100644
--- a/drivers/staging/brcm80211/sys/wlc_antsel.c
+++ b/drivers/staging/brcm80211/sys/wlc_antsel.c
@@ -19,18 +19,23 @@
 #ifdef WLANTSEL
 
 #include <linux/kernel.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <bcmdefs.h>
 #include <osl.h>
 #include <bcmutils.h>
 #include <siutils.h>
 #include <wlioctl.h>
 
+#include <bcmdevs.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
 #include <d11.h>
 #include <wlc_rate.h>
 #include <wlc_key.h>
 #include <wlc_pub.h>
 #include <wl_dbg.h>
+#include <wlc_event.h>
 #include <wlc_mac80211.h>
 #include <wlc_bmac.h>
 #include <wlc_phy_hal.h>
@@ -58,10 +63,11 @@
 #define ANT_SELCFG_DEF_2x4	0x02	/* default antenna configuration */
 
 /* static functions */
-static int wlc_antsel_cfgupd(antsel_info_t *asi, wlc_antselcfg_t *antsel);
-static u8 wlc_antsel_id2antcfg(antsel_info_t *asi, u8 id);
-static u16 wlc_antsel_antcfg2antsel(antsel_info_t *asi, u8 ant_cfg);
-static void wlc_antsel_init_cfg(antsel_info_t *asi, wlc_antselcfg_t *antsel,
+static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel);
+static u8 wlc_antsel_id2antcfg(struct antsel_info *asi, u8 id);
+static u16 wlc_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg);
+static void wlc_antsel_init_cfg(struct antsel_info *asi,
+				wlc_antselcfg_t *antsel,
 				bool auto_sel);
 
 const u16 mimo_2x4_div_antselpat_tbl[] = {
@@ -88,14 +94,15 @@
 	0, 0, 0, 0, 0, 0, 0, 0	/* pat to antselid */
 };
 
-antsel_info_t *wlc_antsel_attach(wlc_info_t *wlc, osl_t *osh,
-						  wlc_pub_t *pub,
-						  wlc_hw_info_t *wlc_hw) {
-	antsel_info_t *asi;
+struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc,
+				      struct osl_info *osh,
+				      struct wlc_pub *pub,
+				      struct wlc_hw_info *wlc_hw) {
+	struct antsel_info *asi;
 
-	asi = kzalloc(sizeof(antsel_info_t), GFP_ATOMIC);
+	asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
 	if (!asi) {
-		WL_ERROR(("wl%d: wlc_antsel_attach: out of mem\n", pub->unit));
+		WL_ERROR("wl%d: wlc_antsel_attach: out of mem\n", pub->unit);
 		return NULL;
 	}
 
@@ -124,7 +131,7 @@
 				asi->antsel_avail = false;
 			} else {
 				asi->antsel_avail = false;
-				WL_ERROR(("wlc_antsel_attach: 2o3 board cfg invalid\n"));
+				WL_ERROR("wlc_antsel_attach: 2o3 board cfg invalid\n");
 				ASSERT(0);
 			}
 			break;
@@ -152,7 +159,7 @@
 	return asi;
 }
 
-void wlc_antsel_detach(antsel_info_t *asi)
+void wlc_antsel_detach(struct antsel_info *asi)
 {
 	if (!asi)
 		return;
@@ -160,7 +167,7 @@
 	kfree(asi);
 }
 
-void wlc_antsel_init(antsel_info_t *asi)
+void wlc_antsel_init(struct antsel_info *asi)
 {
 	if ((asi->antsel_type == ANTSEL_2x3) ||
 	    (asi->antsel_type == ANTSEL_2x4))
@@ -169,7 +176,7 @@
 
 /* boardlevel antenna selection: init antenna selection structure */
 static void
-wlc_antsel_init_cfg(antsel_info_t *asi, wlc_antselcfg_t *antsel,
+wlc_antsel_init_cfg(struct antsel_info *asi, wlc_antselcfg_t *antsel,
 		    bool auto_sel)
 {
 	if (asi->antsel_type == ANTSEL_2x3) {
@@ -200,7 +207,7 @@
 }
 
 void BCMFASTPATH
-wlc_antsel_antcfg_get(antsel_info_t *asi, bool usedef, bool sel,
+wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
 		      u8 antselid, u8 fbantselid, u8 *antcfg,
 		      u8 *fbantcfg)
 {
@@ -232,7 +239,7 @@
 }
 
 /* boardlevel antenna selection: convert mimo_antsel (ucode interface) to id */
-u8 wlc_antsel_antsel2id(antsel_info_t *asi, u16 antsel)
+u8 wlc_antsel_antsel2id(struct antsel_info *asi, u16 antsel)
 {
 	u8 antselid = 0;
 
@@ -251,7 +258,7 @@
 }
 
 /* boardlevel antenna selection: convert id to ant_cfg */
-static u8 wlc_antsel_id2antcfg(antsel_info_t *asi, u8 id)
+static u8 wlc_antsel_id2antcfg(struct antsel_info *asi, u8 id)
 {
 	u8 antcfg = ANT_SELCFG_DEF_2x2;
 
@@ -270,7 +277,7 @@
 }
 
 /* boardlevel antenna selection: convert ant_cfg to mimo_antsel (ucode interface) */
-static u16 wlc_antsel_antcfg2antsel(antsel_info_t *asi, u8 ant_cfg)
+static u16 wlc_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
 {
 	u8 idx = WLC_ANTIDX_11N(WLC_ANTSEL_11N(ant_cfg));
 	u16 mimo_antsel = 0;
@@ -290,9 +297,9 @@
 }
 
 /* boardlevel antenna selection: ucode interface control */
-static int wlc_antsel_cfgupd(antsel_info_t *asi, wlc_antselcfg_t *antsel)
+static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel)
 {
-	wlc_info_t *wlc = asi->wlc;
+	struct wlc_info *wlc = asi->wlc;
 	u8 ant_cfg;
 	u16 mimo_antsel;
 
diff --git a/drivers/staging/brcm80211/sys/wlc_antsel.h b/drivers/staging/brcm80211/sys/wlc_antsel.h
index 1d048bb..8875b58 100644
--- a/drivers/staging/brcm80211/sys/wlc_antsel.h
+++ b/drivers/staging/brcm80211/sys/wlc_antsel.h
@@ -16,13 +16,15 @@
 
 #ifndef _wlc_antsel_h_
 #define _wlc_antsel_h_
-extern antsel_info_t *wlc_antsel_attach(wlc_info_t *wlc, osl_t *osh,
-					wlc_pub_t *pub,
-					wlc_hw_info_t *wlc_hw);
-extern void wlc_antsel_detach(antsel_info_t *asi);
-extern void wlc_antsel_init(antsel_info_t *asi);
-extern void wlc_antsel_antcfg_get(antsel_info_t *asi, bool usedef, bool sel,
+extern struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc,
+					struct osl_info *osh,
+					struct wlc_pub *pub,
+					struct wlc_hw_info *wlc_hw);
+extern void wlc_antsel_detach(struct antsel_info *asi);
+extern void wlc_antsel_init(struct antsel_info *asi);
+extern void wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
+				  bool sel,
 				  u8 id, u8 fbid, u8 *antcfg,
 				  u8 *fbantcfg);
-extern u8 wlc_antsel_antsel2id(antsel_info_t *asi, u16 antsel);
+extern u8 wlc_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
 #endif				/* _wlc_antsel_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_bmac.c b/drivers/staging/brcm80211/sys/wlc_bmac.c
index b70f9d0..69f600a 100644
--- a/drivers/staging/brcm80211/sys/wlc_bmac.c
+++ b/drivers/staging/brcm80211/sys/wlc_bmac.c
@@ -14,13 +14,13 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#ifndef WLC_LOW
-#error "This file needs WLC_LOW"
-#endif
 
 #include <linux/kernel.h>
 #include <wlc_cfg.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <bcmdefs.h>
 #include <osl.h>
 #include <proto/802.11.h>
@@ -42,12 +42,14 @@
 #include <wlc_channel.h>
 #include <bcmsrom.h>
 #include <wlc_key.h>
+#include <bcmdevs.h>
 /* BMAC_NOTE: a WLC_HIGH compile include of wlc.h adds in more structures and type
  * dependencies. Need to include these to files to allow a clean include of wlc.h
  * with WLC_HIGH defined.
  * At some point we may be able to skip the include of wlc.h and instead just
  * define a stub wlc_info and band struct to allow rpc calls to get the rpc handle.
  */
+#include <wlc_event.h>
 #include <wlc_mac80211.h>
 #include <wlc_bmac.h>
 #include <wlc_phy_shim.h>
@@ -55,9 +57,6 @@
 #include <wl_export.h>
 #include "wl_ucode.h"
 #include "d11ucode_ext.h"
-#ifdef BCMSDIO
-#include <bcmsdh.h>
-#endif
 #include <bcmotp.h>
 
 /* BMAC_NOTE: With WLC_HIGH defined, some fns in this file make calls to high level
@@ -69,6 +68,7 @@
 #include <pcie_core.h>
 
 #include <wlc_alloc.h>
+#include <wl_dbg.h>
 
 #define	TIMER_INTERVAL_WATCHDOG_BMAC	1000	/* watchdog timer, in unit of ms */
 
@@ -113,64 +113,65 @@
 	{9, 58, 22, 14, 14, 5},	/* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
 };
 
-static void wlc_clkctl_clk(wlc_hw_info_t *wlc, uint mode);
-static void wlc_coreinit(wlc_info_t *wlc);
+static void wlc_clkctl_clk(struct wlc_hw_info *wlc, uint mode);
+static void wlc_coreinit(struct wlc_info *wlc);
 
 /* used by wlc_wakeucode_init() */
-static void wlc_write_inits(wlc_hw_info_t *wlc_hw, const d11init_t *inits);
-static void wlc_ucode_write(wlc_hw_info_t *wlc_hw, const u32 ucode[],
+static void wlc_write_inits(struct wlc_hw_info *wlc_hw, const d11init_t *inits);
+static void wlc_ucode_write(struct wlc_hw_info *wlc_hw, const u32 ucode[],
 			    const uint nbytes);
-static void wlc_ucode_download(wlc_hw_info_t *wlc);
-static void wlc_ucode_txant_set(wlc_hw_info_t *wlc_hw);
+static void wlc_ucode_download(struct wlc_hw_info *wlc);
+static void wlc_ucode_txant_set(struct wlc_hw_info *wlc_hw);
 
 /* used by wlc_dpc() */
-static bool wlc_bmac_dotxstatus(wlc_hw_info_t *wlc, tx_status_t *txs,
+static bool wlc_bmac_dotxstatus(struct wlc_hw_info *wlc, tx_status_t *txs,
 				u32 s2);
-static bool wlc_bmac_txstatus_corerev4(wlc_hw_info_t *wlc);
-static bool wlc_bmac_txstatus(wlc_hw_info_t *wlc, bool bound, bool *fatal);
-static bool wlc_bmac_recv(wlc_hw_info_t *wlc_hw, uint fifo, bool bound);
+static bool wlc_bmac_txstatus_corerev4(struct wlc_hw_info *wlc);
+static bool wlc_bmac_txstatus(struct wlc_hw_info *wlc, bool bound, bool *fatal);
+static bool wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound);
 
 /* used by wlc_down() */
-static void wlc_flushqueues(wlc_info_t *wlc);
+static void wlc_flushqueues(struct wlc_info *wlc);
 
-static void wlc_write_mhf(wlc_hw_info_t *wlc_hw, u16 *mhfs);
-static void wlc_mctrl_reset(wlc_hw_info_t *wlc_hw);
-static void wlc_corerev_fifofixup(wlc_hw_info_t *wlc_hw);
+static void wlc_write_mhf(struct wlc_hw_info *wlc_hw, u16 *mhfs);
+static void wlc_mctrl_reset(struct wlc_hw_info *wlc_hw);
+static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw);
 
 /* Low Level Prototypes */
-static u16 wlc_bmac_read_objmem(wlc_hw_info_t *wlc_hw, uint offset,
+static u16 wlc_bmac_read_objmem(struct wlc_hw_info *wlc_hw, uint offset,
 				   u32 sel);
-static void wlc_bmac_write_objmem(wlc_hw_info_t *wlc_hw, uint offset, u16 v,
-				  u32 sel);
-static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme);
-static void wlc_bmac_detach_dmapio(wlc_hw_info_t *wlc_hw);
-static void wlc_ucode_bsinit(wlc_hw_info_t *wlc_hw);
-static bool wlc_validboardtype(wlc_hw_info_t *wlc);
-static bool wlc_isgoodchip(wlc_hw_info_t *wlc_hw);
-static char *wlc_get_macaddr(wlc_hw_info_t *wlc_hw);
-static void wlc_mhfdef(wlc_info_t *wlc, u16 *mhfs, u16 mhf2_init);
-static void wlc_mctrl_write(wlc_hw_info_t *wlc_hw);
-static void wlc_ucode_mute_override_set(wlc_hw_info_t *wlc_hw);
-static void wlc_ucode_mute_override_clear(wlc_hw_info_t *wlc_hw);
-static u32 wlc_wlintrsoff(wlc_info_t *wlc);
-static void wlc_wlintrsrestore(wlc_info_t *wlc, u32 macintmask);
-static void wlc_gpio_init(wlc_info_t *wlc);
-static void wlc_write_hw_bcntemplate0(wlc_hw_info_t *wlc_hw, void *bcn,
+static void wlc_bmac_write_objmem(struct wlc_hw_info *wlc_hw, uint offset,
+				  u16 v, u32 sel);
+static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme);
+static void wlc_bmac_detach_dmapio(struct wlc_hw_info *wlc_hw);
+static void wlc_ucode_bsinit(struct wlc_hw_info *wlc_hw);
+static bool wlc_validboardtype(struct wlc_hw_info *wlc);
+static bool wlc_isgoodchip(struct wlc_hw_info *wlc_hw);
+static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw);
+static void wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init);
+static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw);
+static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw);
+static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw);
+static u32 wlc_wlintrsoff(struct wlc_info *wlc);
+static void wlc_wlintrsrestore(struct wlc_info *wlc, u32 macintmask);
+static void wlc_gpio_init(struct wlc_info *wlc);
+static void wlc_write_hw_bcntemplate0(struct wlc_hw_info *wlc_hw, void *bcn,
 				      int len);
-static void wlc_write_hw_bcntemplate1(wlc_hw_info_t *wlc_hw, void *bcn,
+static void wlc_write_hw_bcntemplate1(struct wlc_hw_info *wlc_hw, void *bcn,
 				      int len);
-static void wlc_bmac_bsinit(wlc_info_t *wlc, chanspec_t chanspec);
-static u32 wlc_setband_inact(wlc_info_t *wlc, uint bandunit);
-static void wlc_bmac_setband(wlc_hw_info_t *wlc_hw, uint bandunit,
+static void wlc_bmac_bsinit(struct wlc_info *wlc, chanspec_t chanspec);
+static u32 wlc_setband_inact(struct wlc_info *wlc, uint bandunit);
+static void wlc_bmac_setband(struct wlc_hw_info *wlc_hw, uint bandunit,
 			     chanspec_t chanspec);
-static void wlc_bmac_update_slot_timing(wlc_hw_info_t *wlc_hw, bool shortslot);
-static void wlc_upd_ofdm_pctl1_table(wlc_hw_info_t *wlc_hw);
-static u16 wlc_bmac_ofdm_ratetable_offset(wlc_hw_info_t *wlc_hw,
+static void wlc_bmac_update_slot_timing(struct wlc_hw_info *wlc_hw,
+					bool shortslot);
+static void wlc_upd_ofdm_pctl1_table(struct wlc_hw_info *wlc_hw);
+static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw,
 					     u8 rate);
 
 /* === Low Level functions === */
 
-void wlc_bmac_set_shortslot(wlc_hw_info_t *wlc_hw, bool shortslot)
+void wlc_bmac_set_shortslot(struct wlc_hw_info *wlc_hw, bool shortslot)
 {
 	wlc_hw->shortslot = shortslot;
 
@@ -186,9 +187,10 @@
  * or shortslot 11g (9us slots)
  * The PSM needs to be suspended for this call.
  */
-static void wlc_bmac_update_slot_timing(wlc_hw_info_t *wlc_hw, bool shortslot)
+static void wlc_bmac_update_slot_timing(struct wlc_hw_info *wlc_hw,
+					bool shortslot)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	d11regs_t *regs;
 
 	osh = wlc_hw->osh;
@@ -205,7 +207,7 @@
 	}
 }
 
-static void WLBANDINITFN(wlc_ucode_bsinit) (wlc_hw_info_t *wlc_hw)
+static void WLBANDINITFN(wlc_ucode_bsinit) (struct wlc_hw_info *wlc_hw)
 {
 	/* init microcode host flags */
 	wlc_write_mhf(wlc_hw, wlc_hw->band->mhfs);
@@ -215,30 +217,32 @@
 		if (WLCISNPHY(wlc_hw->band)) {
 			wlc_write_inits(wlc_hw, d11n0bsinitvals16);
 		} else {
-			WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
-				  __func__, wlc_hw->unit, wlc_hw->corerev));
+			WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+				 __func__, wlc_hw->unit, wlc_hw->corerev);
 		}
 	} else {
 		if (D11REV_IS(wlc_hw->corerev, 24)) {
 			if (WLCISLCNPHY(wlc_hw->band)) {
 				wlc_write_inits(wlc_hw, d11lcn0bsinitvals24);
 			} else
-				WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n", __func__, wlc_hw->unit, wlc_hw->corerev));
+				WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+					 __func__, wlc_hw->unit,
+					 wlc_hw->corerev);
 		} else {
-			WL_ERROR(("%s: wl%d: unsupported corerev %d\n",
-				  __func__, wlc_hw->unit, wlc_hw->corerev));
+			WL_ERROR("%s: wl%d: unsupported corerev %d\n",
+				 __func__, wlc_hw->unit, wlc_hw->corerev);
 		}
 	}
 }
 
 /* switch to new band but leave it inactive */
-static u32 WLBANDINITFN(wlc_setband_inact) (wlc_info_t *wlc, uint bandunit)
+static u32 WLBANDINITFN(wlc_setband_inact) (struct wlc_info *wlc, uint bandunit)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	u32 macintmask;
 	u32 tmp;
 
-	WL_TRACE(("wl%d: wlc_setband_inact\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_setband_inact\n", wlc_hw->unit);
 
 	ASSERT(bandunit != wlc_hw->band->bandunit);
 	ASSERT(si_iscoreup(wlc_hw->sih));
@@ -269,24 +273,24 @@
  * Param 'bound' indicates max. # frames to process before break out.
  */
 static bool BCMFASTPATH
-wlc_bmac_recv(wlc_hw_info_t *wlc_hw, uint fifo, bool bound)
+wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound)
 {
-	void *p;
-	void *head = NULL;
-	void *tail = NULL;
+	struct sk_buff *p;
+	struct sk_buff *head = NULL;
+	struct sk_buff *tail = NULL;
 	uint n = 0;
 	uint bound_limit = bound ? wlc_hw->wlc->pub->tunables->rxbnd : -1;
 	u32 tsf_h, tsf_l;
 	wlc_d11rxhdr_t *wlc_rxhdr = NULL;
 
-	WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
 	/* gather received frames */
 	while ((p = dma_rx(wlc_hw->di[fifo]))) {
 
 		if (!tail)
 			head = tail = p;
 		else {
-			PKTSETLINK(tail, p);
+			tail->prev = p;
 			tail = p;
 		}
 
@@ -303,11 +307,11 @@
 
 	/* process each frame */
 	while ((p = head) != NULL) {
-		head = PKTLINK(head);
-		PKTSETLINK(p, NULL);
+		head = head->prev;
+		p->prev = NULL;
 
 		/* record the tsf_l in wlc_rxd11hdr */
-		wlc_rxhdr = (wlc_d11rxhdr_t *) PKTDATA(p);
+		wlc_rxhdr = (wlc_d11rxhdr_t *) p->data;
 		wlc_rxhdr->tsf_l = htol32(tsf_l);
 
 		/* compute the RSSI from d11rxhdr and record it in wlc_rxd11hr */
@@ -323,15 +327,15 @@
  *   Return true if another dpc needs to be re-scheduled. false otherwise.
  *   Param 'bounded' indicates if applicable loops should be bounded.
  */
-bool BCMFASTPATH wlc_dpc(wlc_info_t *wlc, bool bounded)
+bool BCMFASTPATH wlc_dpc(struct wlc_info *wlc, bool bounded)
 {
 	u32 macintstatus;
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	d11regs_t *regs = wlc_hw->regs;
 	bool fatal = false;
 
 	if (DEVICEREMOVED(wlc)) {
-		WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+		WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
 		wl_down(wlc->wl);
 		return false;
 	}
@@ -340,8 +344,8 @@
 	macintstatus = wlc->macintstatus;
 	wlc->macintstatus = 0;
 
-	WL_TRACE(("wl%d: wlc_dpc: macintstatus 0x%x\n", wlc_hw->unit,
-		  macintstatus));
+	WL_TRACE("wl%d: wlc_dpc: macintstatus 0x%x\n",
+		 wlc_hw->unit, macintstatus);
 
 	if (macintstatus & MI_PRQ) {
 		/* Process probe request FIFO */
@@ -364,7 +368,7 @@
 		if (wlc_bmac_txstatus(wlc->hw, bounded, &fatal))
 			wlc->macintstatus |= MI_TFS;
 		if (fatal) {
-			WL_ERROR(("MI_TFS: fatal\n"));
+			WL_ERROR("MI_TFS: fatal\n");
 			goto fatal;
 		}
 	}
@@ -374,7 +378,7 @@
 
 	/* ATIM window end */
 	if (macintstatus & MI_ATIMWINEND) {
-		WL_TRACE(("wlc_isr: end of ATIM window\n"));
+		WL_TRACE("wlc_isr: end of ATIM window\n");
 
 		OR_REG(wlc_hw->osh, &regs->maccommand, wlc->qvalid);
 		wlc->qvalid = 0;
@@ -395,7 +399,7 @@
 	/* TX FIFO suspend/flush completion */
 	if (macintstatus & MI_TXSTOP) {
 		if (wlc_bmac_tx_fifo_suspended(wlc_hw, TX_DATA_FIFO)) {
-			/*      WL_ERROR(("dpc: fifo_suspend_comlete\n")); */
+			/*      WL_ERROR("dpc: fifo_suspend_comlete\n"); */
 		}
 	}
 
@@ -405,11 +409,12 @@
 	}
 
 	if (macintstatus & MI_GP0) {
-		WL_ERROR(("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now));
+		WL_ERROR("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n",
+			 wlc_hw->unit, wlc_hw->now);
 
 		printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
-			    __func__, CHIPID(wlc_hw->sih->chip),
-			    CHIPREV(wlc_hw->sih->chiprev));
+					__func__, wlc_hw->sih->chip,
+					wlc_hw->sih->chiprev);
 
 		WLCNTINCR(wlc->pub->_cnt->psmwds);
 
@@ -427,7 +432,8 @@
 		u32 rfd = R_REG(wlc_hw->osh, &regs->phydebug) & PDBG_RFD;
 #endif
 
-		WL_ERROR(("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n", wlc_hw->unit, rfd));
+		WL_ERROR("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n",
+			 wlc_hw->unit, rfd);
 
 		WLCNTINCR(wlc->pub->_cnt->rfdisable);
 	}
@@ -452,10 +458,10 @@
 /* common low-level watchdog code */
 void wlc_bmac_watchdog(void *arg)
 {
-	wlc_info_t *wlc = (wlc_info_t *) arg;
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_info *wlc = (struct wlc_info *) arg;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 
-	WL_TRACE(("wl%d: wlc_bmac_watchdog\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_watchdog\n", wlc_hw->unit);
 
 	if (!wlc_hw->up)
 		return;
@@ -476,13 +482,13 @@
 }
 
 void
-wlc_bmac_set_chanspec(wlc_hw_info_t *wlc_hw, chanspec_t chanspec, bool mute,
-		      struct txpwr_limits *txpwr)
+wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
+		      bool mute, struct txpwr_limits *txpwr)
 {
 	uint bandunit;
 
-	WL_TRACE(("wl%d: wlc_bmac_set_chanspec 0x%x\n", wlc_hw->unit,
-		  chanspec));
+	WL_TRACE("wl%d: wlc_bmac_set_chanspec 0x%x\n",
+		 wlc_hw->unit, chanspec);
 
 	wlc_hw->chanspec = chanspec;
 
@@ -520,7 +526,8 @@
 	}
 }
 
-int wlc_bmac_revinfo_get(wlc_hw_info_t *wlc_hw, wlc_bmac_revinfo_t *revinfo)
+int wlc_bmac_revinfo_get(struct wlc_hw_info *wlc_hw,
+			 wlc_bmac_revinfo_t *revinfo)
 {
 	si_t *sih = wlc_hw->sih;
 	uint idx;
@@ -558,20 +565,20 @@
 	return 0;
 }
 
-int wlc_bmac_state_get(wlc_hw_info_t *wlc_hw, wlc_bmac_state_t *state)
+int wlc_bmac_state_get(struct wlc_hw_info *wlc_hw, wlc_bmac_state_t *state)
 {
 	state->machwcap = wlc_hw->machwcap;
 
 	return 0;
 }
 
-static bool wlc_bmac_attach_dmapio(wlc_info_t *wlc, uint j, bool wme)
+static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
 {
 	uint i;
 	char name[8];
 	/* ucode host flag 2 needed for pio mode, independent of band and fifo */
 	u16 pio_mhf2 = 0;
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	uint unit = wlc_hw->unit;
 	wlc_tunables_t *tune = wlc->pub->tunables;
 
@@ -581,7 +588,7 @@
 	if (wlc_hw->di[0] == 0) {	/* Init FIFOs */
 		uint addrwidth;
 		int dma_attach_err = 0;
-		osl_t *osh = wlc_hw->osh;
+		struct osl_info *osh = wlc_hw->osh;
 
 		/* Find out the DMA addressing capability and let OS know
 		 * All the channels within one DMA core have 'common-minimum' same
@@ -589,10 +596,10 @@
 		 */
 		addrwidth =
 		    dma_addrwidth(wlc_hw->sih, DMAREG(wlc_hw, DMA_TX, 0));
-		OSL_DMADDRWIDTH(osh, addrwidth);
 
 		if (!wl_alloc_dma_resources(wlc_hw->wlc->wl, addrwidth)) {
-			WL_ERROR(("wl%d: wlc_attach: alloc_dma_resources failed\n", unit));
+			WL_ERROR("wl%d: wlc_attach: alloc_dma_resources failed\n",
+				 unit);
 			return false;
 		}
 
@@ -665,8 +672,7 @@
 /* Cleaner to leave this as if with AP defined */
 
 		if (dma_attach_err) {
-			WL_ERROR(("wl%d: wlc_attach: dma_attach failed\n",
-				  unit));
+			WL_ERROR("wl%d: wlc_attach: dma_attach failed\n", unit);
 			return false;
 		}
 
@@ -684,7 +690,7 @@
 	return true;
 }
 
-static void wlc_bmac_detach_dmapio(wlc_hw_info_t *wlc_hw)
+static void wlc_bmac_detach_dmapio(struct wlc_hw_info *wlc_hw)
 {
 	uint j;
 
@@ -702,11 +708,11 @@
  *    initialize software state for each core and band
  *    put the whole chip in reset(driver down state), no clock
  */
-int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
-		    bool piomode, osl_t *osh, void *regsva, uint bustype,
-		    void *btparam)
+int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
+		    bool piomode, struct osl_info *osh, void *regsva,
+		    uint bustype, void *btparam)
 {
-	wlc_hw_info_t *wlc_hw;
+	struct wlc_hw_info *wlc_hw;
 	d11regs_t *regs;
 	char *macaddr = NULL;
 	char *vars;
@@ -715,8 +721,8 @@
 	bool wme = false;
 	shared_phy_params_t sha_params;
 
-	WL_TRACE(("wl%d: wlc_bmac_attach: vendor 0x%x device 0x%x\n", unit,
-		  vendor, device));
+	WL_TRACE("wl%d: wlc_bmac_attach: vendor 0x%x device 0x%x\n",
+		 unit, vendor, device);
 
 	ASSERT(sizeof(wlc_d11rxhdr_t) <= WL_HWRXOFF);
 
@@ -729,7 +735,7 @@
 	wlc_hw->band = wlc_hw->bandstate[0];
 	wlc_hw->_piomode = piomode;
 
-	/* populate wlc_hw_info_t with default values  */
+	/* populate struct wlc_hw_info with default values  */
 	wlc_bmac_info_init(wlc_hw);
 
 	/*
@@ -740,7 +746,7 @@
 	wlc_hw->sih = si_attach((uint) device, osh, regsva, bustype, btparam,
 				&wlc_hw->vars, &wlc_hw->vars_size);
 	if (wlc_hw->sih == NULL) {
-		WL_ERROR(("wl%d: wlc_bmac_attach: si_attach failed\n", unit));
+		WL_ERROR("wl%d: wlc_bmac_attach: si_attach failed\n", unit);
 		err = 11;
 		goto fail;
 	}
@@ -760,21 +766,22 @@
 		var = getvar(vars, "vendid");
 		if (var) {
 			vendor = (u16) simple_strtoul(var, NULL, 0);
-			WL_ERROR(("Overriding vendor id = 0x%x\n", vendor));
+			WL_ERROR("Overriding vendor id = 0x%x\n", vendor);
 		}
 		var = getvar(vars, "devid");
 		if (var) {
 			u16 devid = (u16) simple_strtoul(var, NULL, 0);
 			if (devid != 0xffff) {
 				device = devid;
-				WL_ERROR(("Overriding device id = 0x%x\n",
-					  device));
+				WL_ERROR("Overriding device id = 0x%x\n",
+					 device);
 			}
 		}
 
 		/* verify again the device is supported */
 		if (!wlc_chipmatch(vendor, device)) {
-			WL_ERROR(("wl%d: wlc_bmac_attach: Unsupported vendor/device (0x%x/0x%x)\n", unit, vendor, device));
+			WL_ERROR("wl%d: wlc_bmac_attach: Unsupported vendor/device (0x%x/0x%x)\n",
+				 unit, vendor, device);
 			err = 12;
 			goto fail;
 		}
@@ -809,7 +816,8 @@
 	wlc_bmac_corereset(wlc_hw, WLC_USE_COREFLAGS);
 
 	if (!wlc_bmac_validate_chip_access(wlc_hw)) {
-		WL_ERROR(("wl%d: wlc_bmac_attach: validate_chip_access failed\n", unit));
+		WL_ERROR("wl%d: wlc_bmac_attach: validate_chip_access failed\n",
+			 unit);
 		err = 14;
 		goto fail;
 	}
@@ -821,7 +829,8 @@
 		j = BOARDREV_PROMOTED;
 	wlc_hw->boardrev = (u16) j;
 	if (!wlc_validboardtype(wlc_hw)) {
-		WL_ERROR(("wl%d: wlc_bmac_attach: Unsupported Broadcom board type (0x%x)" " or revision level (0x%x)\n", unit, wlc_hw->sih->boardtype, wlc_hw->boardrev));
+		WL_ERROR("wl%d: wlc_bmac_attach: Unsupported Broadcom board type (0x%x)" " or revision level (0x%x)\n",
+			 unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
 		err = 15;
 		goto fail;
 	}
@@ -833,7 +842,7 @@
 	    || (wlc_hw->boardflags & BFL_NOPLLDOWN))
 		wlc_bmac_pllreq(wlc_hw, true, WLC_PLLREQ_SHARED);
 
-	if ((BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+	if ((wlc_hw->sih->bustype == PCI_BUS)
 	    && (si_pci_war16165(wlc_hw->sih)))
 		wlc->war16165 = true;
 
@@ -844,7 +853,7 @@
 	} else
 		wlc_hw->_nbands = 1;
 
-	if ((CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID))
+	if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
 		wlc_hw->_nbands = 1;
 
 	/* BMAC_NOTE: remove init of pub values when wlc_attach() unconditionally does the
@@ -863,8 +872,8 @@
 	wlc_hw->physhim = wlc_phy_shim_attach(wlc_hw, wlc->wl, wlc);
 
 	if (wlc_hw->physhim == NULL) {
-		WL_ERROR(("wl%d: wlc_bmac_attach: wlc_phy_shim_attach failed\n",
-			  unit));
+		WL_ERROR("wl%d: wlc_bmac_attach: wlc_phy_shim_attach failed\n",
+			 unit);
 		err = 25;
 		goto fail;
 	}
@@ -931,7 +940,8 @@
 		wlc_hw->band->pi = wlc_phy_attach(wlc_hw->phy_sh,
 			(void *)regs, wlc_hw->band->bandtype, vars);
 		if (wlc_hw->band->pi == NULL) {
-			WL_ERROR(("wl%d: wlc_bmac_attach: wlc_phy_attach failed\n", unit));
+			WL_ERROR("wl%d: wlc_bmac_attach: wlc_phy_attach failed\n",
+				 unit);
 			err = 17;
 			goto fail;
 		}
@@ -961,7 +971,9 @@
 				goto bad_phy;
 		} else {
  bad_phy:
-			WL_ERROR(("wl%d: wlc_bmac_attach: unsupported phy type/rev (%d/%d)\n", unit, wlc_hw->band->phytype, wlc_hw->band->phyrev));
+			WL_ERROR("wl%d: wlc_bmac_attach: unsupported phy type/rev (%d/%d)\n",
+				 unit,
+				 wlc_hw->band->phytype, wlc_hw->band->phyrev);
 			err = 18;
 			goto fail;
 		}
@@ -993,7 +1005,7 @@
 	wlc_coredisable(wlc_hw);
 
 	/* Match driver "down" state */
-	if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+	if (wlc_hw->sih->bustype == PCI_BUS)
 		si_pci_down(wlc_hw->sih);
 
 	/* register sb interrupt callback functions */
@@ -1016,27 +1028,27 @@
 	/* init etheraddr state variables */
 	macaddr = wlc_get_macaddr(wlc_hw);
 	if (macaddr == NULL) {
-		WL_ERROR(("wl%d: wlc_bmac_attach: macaddr not found\n", unit));
+		WL_ERROR("wl%d: wlc_bmac_attach: macaddr not found\n", unit);
 		err = 21;
 		goto fail;
 	}
 	bcm_ether_atoe(macaddr, &wlc_hw->etheraddr);
-	if (ETHER_ISBCAST((char *)&wlc_hw->etheraddr) ||
-	    ETHER_ISNULLADDR((char *)&wlc_hw->etheraddr)) {
-		WL_ERROR(("wl%d: wlc_bmac_attach: bad macaddr %s\n", unit,
-			  macaddr));
+	if (is_broadcast_ether_addr(wlc_hw->etheraddr.octet) ||
+	    is_zero_ether_addr(wlc_hw->etheraddr.octet)) {
+		WL_ERROR("wl%d: wlc_bmac_attach: bad macaddr %s\n",
+			 unit, macaddr);
 		err = 22;
 		goto fail;
 	}
 
-	WL_ERROR(("%s:: deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
-		  __func__, wlc_hw->deviceid, wlc_hw->_nbands,
-		  wlc_hw->sih->boardtype, macaddr));
+	WL_ERROR("%s:: deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+		 __func__, wlc_hw->deviceid, wlc_hw->_nbands,
+		 wlc_hw->sih->boardtype, macaddr);
 
 	return err;
 
  fail:
-	WL_ERROR(("wl%d: wlc_bmac_attach: failed with err %d\n", unit, err));
+	WL_ERROR("wl%d: wlc_bmac_attach: failed with err %d\n", unit, err);
 	return err;
 }
 
@@ -1045,9 +1057,9 @@
  * may get overrides later in this function
  *  BMAC_NOTES, move low out and resolve the dangling ones
  */
-void wlc_bmac_info_init(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_info_init(struct wlc_hw_info *wlc_hw)
 {
-	wlc_info_t *wlc = wlc_hw->wlc;
+	struct wlc_info *wlc = wlc_hw->wlc;
 
 	/* set default sw macintmask value */
 	wlc->defmacintmask = DEF_MACINTMASK;
@@ -1067,11 +1079,11 @@
 /*
  * low level detach
  */
-int wlc_bmac_detach(wlc_info_t *wlc)
+int wlc_bmac_detach(struct wlc_info *wlc)
 {
 	uint i;
 	wlc_hwband_t *band;
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	int callbacks;
 
 	callbacks = 0;
@@ -1082,7 +1094,7 @@
 		 */
 		si_deregister_intr_callback(wlc_hw->sih);
 
-		if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+		if (wlc_hw->sih->bustype == PCI_BUS)
 			si_pci_sleep(wlc_hw->sih);
 	}
 
@@ -1118,9 +1130,9 @@
 
 }
 
-void wlc_bmac_reset(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_reset(struct wlc_hw_info *wlc_hw)
 {
-	WL_TRACE(("wl%d: wlc_bmac_reset\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_reset\n", wlc_hw->unit);
 
 	WLCNTINCR(wlc_hw->wlc->pub->_cnt->reset);
 
@@ -1135,13 +1147,13 @@
 }
 
 void
-wlc_bmac_init(wlc_hw_info_t *wlc_hw, chanspec_t chanspec,
+wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
 			  bool mute) {
 	u32 macintmask;
 	bool fastclk;
-	wlc_info_t *wlc = wlc_hw->wlc;
+	struct wlc_info *wlc = wlc_hw->wlc;
 
-	WL_TRACE(("wl%d: wlc_bmac_init\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_init\n", wlc_hw->unit);
 
 	/* request FAST clock if not on */
 	fastclk = wlc_hw->forcefastclk;
@@ -1186,11 +1198,11 @@
 		wlc_clkctl_clk(wlc_hw, CLK_DYNAMIC);
 }
 
-int wlc_bmac_up_prep(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw)
 {
 	uint coremask;
 
-	WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
 
 	ASSERT(wlc_hw->wlc->pub->hw_up && wlc_hw->wlc->macintmask == 0);
 
@@ -1208,7 +1220,7 @@
 	 */
 	coremask = (1 << wlc_hw->wlc->core->coreidx);
 
-	if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+	if (wlc_hw->sih->bustype == PCI_BUS)
 		si_pci_setup(wlc_hw->sih, coremask);
 
 	ASSERT(si_coreid(wlc_hw->sih) == D11_CORE_ID);
@@ -1219,13 +1231,13 @@
 	 */
 	if (wlc_bmac_radio_read_hwdisabled(wlc_hw)) {
 		/* put SB PCI in down state again */
-		if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+		if (wlc_hw->sih->bustype == PCI_BUS)
 			si_pci_down(wlc_hw->sih);
 		wlc_bmac_xtal(wlc_hw, OFF);
 		return BCME_RADIOOFF;
 	}
 
-	if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+	if (wlc_hw->sih->bustype == PCI_BUS)
 		si_pci_up(wlc_hw->sih);
 
 	/* reset the d11 core */
@@ -1234,9 +1246,9 @@
 	return 0;
 }
 
-int wlc_bmac_up_finish(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_up_finish(struct wlc_hw_info *wlc_hw)
 {
-	WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
 
 	wlc_hw->up = true;
 	wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
@@ -1248,12 +1260,12 @@
 	return 0;
 }
 
-int wlc_bmac_down_prep(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw)
 {
 	bool dev_gone;
 	uint callbacks = 0;
 
-	WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
 
 	if (!wlc_hw->up)
 		return callbacks;
@@ -1276,12 +1288,12 @@
 	return callbacks;
 }
 
-int wlc_bmac_down_finish(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_down_finish(struct wlc_hw_info *wlc_hw)
 {
 	uint callbacks = 0;
 	bool dev_gone;
 
-	WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
 
 	if (!wlc_hw->up)
 		return callbacks;
@@ -1311,7 +1323,7 @@
 
 		/* turn off primary xtal and pll */
 		if (!wlc_hw->noreset) {
-			if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS)
+			if (wlc_hw->sih->bustype == PCI_BUS)
 				si_pci_down(wlc_hw->sih);
 			wlc_bmac_xtal(wlc_hw, OFF);
 		}
@@ -1320,7 +1332,7 @@
 	return callbacks;
 }
 
-void wlc_bmac_wait_for_wake(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_wait_for_wake(struct wlc_hw_info *wlc_hw)
 {
 	if (D11REV_IS(wlc_hw->corerev, 4))	/* no slowclock */
 		udelay(5);
@@ -1336,29 +1348,30 @@
 	ASSERT(wlc_bmac_read_shm(wlc_hw, M_UCODE_DBGST) != DBGST_ASLEEP);
 }
 
-void wlc_bmac_hw_etheraddr(wlc_hw_info_t *wlc_hw, struct ether_addr *ea)
+void wlc_bmac_hw_etheraddr(struct wlc_hw_info *wlc_hw, struct ether_addr *ea)
 {
-	bcopy(&wlc_hw->etheraddr, ea, ETHER_ADDR_LEN);
+	bcopy(&wlc_hw->etheraddr, ea, ETH_ALEN);
 }
 
-void wlc_bmac_set_hw_etheraddr(wlc_hw_info_t *wlc_hw, struct ether_addr *ea)
+void wlc_bmac_set_hw_etheraddr(struct wlc_hw_info *wlc_hw,
+			       struct ether_addr *ea)
 {
-	bcopy(ea, &wlc_hw->etheraddr, ETHER_ADDR_LEN);
+	bcopy(ea, &wlc_hw->etheraddr, ETH_ALEN);
 }
 
-int wlc_bmac_bandtype(wlc_hw_info_t *wlc_hw)
+int wlc_bmac_bandtype(struct wlc_hw_info *wlc_hw)
 {
 	return wlc_hw->band->bandtype;
 }
 
-void *wlc_cur_phy(wlc_info_t *wlc)
+void *wlc_cur_phy(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	return (void *)wlc_hw->band->pi;
 }
 
 /* control chip clock to save power, enable dynamic clock or force fast clock */
-static void wlc_clkctl_clk(wlc_hw_info_t *wlc_hw, uint mode)
+static void wlc_clkctl_clk(struct wlc_hw_info *wlc_hw, uint mode)
 {
 	if (PMUCTL_ENAB(wlc_hw->sih)) {
 		/* new chips with PMU, CCS_FORCEHT will distribute the HT clock on backplane,
@@ -1455,11 +1468,11 @@
 
 /* set initial host flags value */
 static void
-wlc_mhfdef(wlc_info_t *wlc, u16 *mhfs, u16 mhf2_init)
+wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 
-	bzero(mhfs, sizeof(u16) * MHFMAX);
+	memset(mhfs, 0, MHFMAX * sizeof(u16));
 
 	mhfs[MHF2] |= mhf2_init;
 
@@ -1485,7 +1498,7 @@
  *                   WLC_BAND_ALL  <--- All bands
  */
 void
-wlc_bmac_mhf(wlc_hw_info_t *wlc_hw, u8 idx, u16 mask, u16 val,
+wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask, u16 val,
 	     int bands)
 {
 	u16 save;
@@ -1539,7 +1552,7 @@
 	}
 }
 
-u16 wlc_bmac_mhf_get(wlc_hw_info_t *wlc_hw, u8 idx, int bands)
+u16 wlc_bmac_mhf_get(struct wlc_hw_info *wlc_hw, u8 idx, int bands)
 {
 	wlc_hwband_t *band;
 	ASSERT(idx < MHFMAX);
@@ -1565,7 +1578,7 @@
 	return band->mhfs[idx];
 }
 
-static void wlc_write_mhf(wlc_hw_info_t *wlc_hw, u16 *mhfs)
+static void wlc_write_mhf(struct wlc_hw_info *wlc_hw, u16 *mhfs)
 {
 	u8 idx;
 	u16 addr[] = {
@@ -1583,7 +1596,7 @@
 /* set the maccontrol register to desired reset state and
  * initialize the sw cache of the register
  */
-static void wlc_mctrl_reset(wlc_hw_info_t *wlc_hw)
+static void wlc_mctrl_reset(struct wlc_hw_info *wlc_hw)
 {
 	/* IHR accesses are always enabled, PSM disabled, HPS off and WAKE on */
 	wlc_hw->maccontrol = 0;
@@ -1594,7 +1607,7 @@
 }
 
 /* set or clear maccontrol bits */
-void wlc_bmac_mctrl(wlc_hw_info_t *wlc_hw, u32 mask, u32 val)
+void wlc_bmac_mctrl(struct wlc_hw_info *wlc_hw, u32 mask, u32 val)
 {
 	u32 maccontrol;
 	u32 new_maccontrol;
@@ -1616,7 +1629,7 @@
 }
 
 /* write the software state of maccontrol and overrides to the maccontrol register */
-static void wlc_mctrl_write(wlc_hw_info_t *wlc_hw)
+static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw)
 {
 	u32 maccontrol = wlc_hw->maccontrol;
 
@@ -1633,7 +1646,7 @@
 	W_REG(wlc_hw->osh, &wlc_hw->regs->maccontrol, maccontrol);
 }
 
-void wlc_ucode_wake_override_set(wlc_hw_info_t *wlc_hw, u32 override_bit)
+void wlc_ucode_wake_override_set(struct wlc_hw_info *wlc_hw, u32 override_bit)
 {
 	ASSERT((wlc_hw->wake_override & override_bit) == 0);
 
@@ -1650,7 +1663,7 @@
 	return;
 }
 
-void wlc_ucode_wake_override_clear(wlc_hw_info_t *wlc_hw, u32 override_bit)
+void wlc_ucode_wake_override_clear(struct wlc_hw_info *wlc_hw, u32 override_bit)
 {
 	ASSERT(wlc_hw->wake_override & override_bit);
 
@@ -1671,7 +1684,7 @@
  * STA               0              1 <--- This will ensure no beacons
  * IBSS              0              0
  */
-static void wlc_ucode_mute_override_set(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw)
 {
 	wlc_hw->mute_override = 1;
 
@@ -1687,7 +1700,7 @@
 }
 
 /* Clear the override on AP and INFRA bits */
-static void wlc_ucode_mute_override_clear(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw)
 {
 	if (wlc_hw->mute_override == 0)
 		return;
@@ -1707,16 +1720,16 @@
  * Write a MAC address to the rcmta structure
  */
 void
-wlc_bmac_set_rcmta(wlc_hw_info_t *wlc_hw, int idx,
+wlc_bmac_set_rcmta(struct wlc_hw_info *wlc_hw, int idx,
 		   const struct ether_addr *addr)
 {
 	d11regs_t *regs = wlc_hw->regs;
 	volatile u16 *objdata16 = (volatile u16 *)&regs->objdata;
 	u32 mac_hm;
 	u16 mac_l;
-	osl_t *osh;
+	struct osl_info *osh;
 
-	WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
 
 	ASSERT(wlc_hw->corerev > 4);
 
@@ -1740,16 +1753,16 @@
  * Write a MAC address to the given match reg offset in the RXE match engine.
  */
 void
-wlc_bmac_set_addrmatch(wlc_hw_info_t *wlc_hw, int match_reg_offset,
+wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw, int match_reg_offset,
 		       const struct ether_addr *addr)
 {
 	d11regs_t *regs;
 	u16 mac_l;
 	u16 mac_m;
 	u16 mac_h;
-	osl_t *osh;
+	struct osl_info *osh;
 
-	WL_TRACE(("wl%d: wlc_bmac_set_addrmatch\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_set_addrmatch\n", wlc_hw->unit);
 
 	ASSERT((match_reg_offset < RCM_SIZE) || (wlc_hw->corerev == 4));
 
@@ -1769,7 +1782,7 @@
 }
 
 void
-wlc_bmac_write_template_ram(wlc_hw_info_t *wlc_hw, int offset, int len,
+wlc_bmac_write_template_ram(struct wlc_hw_info *wlc_hw, int offset, int len,
 			    void *buf)
 {
 	d11regs_t *regs;
@@ -1778,9 +1791,9 @@
 #ifdef IL_BIGENDIAN
 	volatile u16 *dptr = NULL;
 #endif				/* IL_BIGENDIAN */
-	osl_t *osh;
+	struct osl_info *osh;
 
-	WL_TRACE(("wl%d: wlc_bmac_write_template_ram\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_write_template_ram\n", wlc_hw->unit);
 
 	regs = wlc_hw->regs;
 	osh = wlc_hw->osh;
@@ -1812,9 +1825,9 @@
 	}
 }
 
-void wlc_bmac_set_cwmin(wlc_hw_info_t *wlc_hw, u16 newmin)
+void wlc_bmac_set_cwmin(struct wlc_hw_info *wlc_hw, u16 newmin)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 
 	osh = wlc_hw->osh;
 	wlc_hw->band->CWmin = newmin;
@@ -1824,9 +1837,9 @@
 	W_REG(osh, &wlc_hw->regs->objdata, newmin);
 }
 
-void wlc_bmac_set_cwmax(wlc_hw_info_t *wlc_hw, u16 newmax)
+void wlc_bmac_set_cwmax(struct wlc_hw_info *wlc_hw, u16 newmax)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 
 	osh = wlc_hw->osh;
 	wlc_hw->band->CWmax = newmax;
@@ -1836,7 +1849,7 @@
 	W_REG(osh, &wlc_hw->regs->objdata, newmax);
 }
 
-void wlc_bmac_bw_set(wlc_hw_info_t *wlc_hw, u16 bw)
+void wlc_bmac_bw_set(struct wlc_hw_info *wlc_hw, u16 bw)
 {
 	bool fastclk;
 	u32 tmp;
@@ -1861,7 +1874,7 @@
 }
 
 static void
-wlc_write_hw_bcntemplate0(wlc_hw_info_t *wlc_hw, void *bcn, int len)
+wlc_write_hw_bcntemplate0(struct wlc_hw_info *wlc_hw, void *bcn, int len)
 {
 	d11regs_t *regs = wlc_hw->regs;
 
@@ -1875,7 +1888,7 @@
 }
 
 static void
-wlc_write_hw_bcntemplate1(wlc_hw_info_t *wlc_hw, void *bcn, int len)
+wlc_write_hw_bcntemplate1(struct wlc_hw_info *wlc_hw, void *bcn, int len)
 {
 	d11regs_t *regs = wlc_hw->regs;
 
@@ -1890,7 +1903,7 @@
 
 /* mac is assumed to be suspended at this point */
 void
-wlc_bmac_write_hw_bcntemplates(wlc_hw_info_t *wlc_hw, void *bcn, int len,
+wlc_bmac_write_hw_bcntemplates(struct wlc_hw_info *wlc_hw, void *bcn, int len,
 			       bool both)
 {
 	d11regs_t *regs = wlc_hw->regs;
@@ -1911,10 +1924,10 @@
 	}
 }
 
-static void WLBANDINITFN(wlc_bmac_upd_synthpu) (wlc_hw_info_t *wlc_hw)
+static void WLBANDINITFN(wlc_bmac_upd_synthpu) (struct wlc_hw_info *wlc_hw)
 {
 	u16 v;
-	wlc_info_t *wlc = wlc_hw->wlc;
+	struct wlc_info *wlc = wlc_hw->wlc;
 	/* update SYNTHPU_DLY */
 
 	if (WLCISLCNPHY(wlc->band)) {
@@ -1930,12 +1943,12 @@
 
 /* band-specific init */
 static void
-WLBANDINITFN(wlc_bmac_bsinit) (wlc_info_t *wlc, chanspec_t chanspec)
+WLBANDINITFN(wlc_bmac_bsinit) (struct wlc_info *wlc, chanspec_t chanspec)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 
-	WL_TRACE(("wl%d: wlc_bmac_bsinit: bandunit %d\n", wlc_hw->unit,
-		  wlc_hw->band->bandunit));
+	WL_TRACE("wl%d: wlc_bmac_bsinit: bandunit %d\n",
+		 wlc_hw->unit, wlc_hw->band->bandunit);
 
 	/* sanity check */
 	if (PHY_TYPE(R_REG(wlc_hw->osh, &wlc_hw->regs->phyversion)) !=
@@ -1969,9 +1982,9 @@
 	wlc_bmac_upd_synthpu(wlc_hw);
 }
 
-void wlc_bmac_core_phy_clk(wlc_hw_info_t *wlc_hw, bool clk)
+void wlc_bmac_core_phy_clk(struct wlc_hw_info *wlc_hw, bool clk)
 {
-	WL_TRACE(("wl%d: wlc_bmac_core_phy_clk: clk %d\n", wlc_hw->unit, clk));
+	WL_TRACE("wl%d: wlc_bmac_core_phy_clk: clk %d\n", wlc_hw->unit, clk);
 
 	wlc_hw->phyclk = clk;
 
@@ -1994,9 +2007,9 @@
 }
 
 /* Perform a soft reset of the PHY PLL */
-void wlc_bmac_core_phypll_reset(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_core_phypll_reset(struct wlc_hw_info *wlc_hw)
 {
-	WL_TRACE(("wl%d: wlc_bmac_core_phypll_reset\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_core_phypll_reset\n", wlc_hw->unit);
 
 	si_corereg(wlc_hw->sih, SI_CC_IDX,
 		   offsetof(chipcregs_t, chipcontrol_addr), ~0, 0);
@@ -2015,7 +2028,7 @@
 /* light way to turn on phy clock without reset for NPHY only
  *  refer to wlc_bmac_core_phy_clk for full version
  */
-void wlc_bmac_phyclk_fgc(wlc_hw_info_t *wlc_hw, bool clk)
+void wlc_bmac_phyclk_fgc(struct wlc_hw_info *wlc_hw, bool clk)
 {
 	/* support(necessary for NPHY and HYPHY) only */
 	if (!WLCISNPHY(wlc_hw->band))
@@ -2028,7 +2041,7 @@
 
 }
 
-void wlc_bmac_macphyclk_set(wlc_hw_info_t *wlc_hw, bool clk)
+void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk)
 {
 	if (ON == clk)
 		si_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
@@ -2036,13 +2049,13 @@
 		si_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
 }
 
-void wlc_bmac_phy_reset(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw)
 {
 	wlc_phy_t *pih = wlc_hw->band->pi;
 	u32 phy_bw_clkbits;
 	bool phy_in_reset = false;
 
-	WL_TRACE(("wl%d: wlc_bmac_phy_reset\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_phy_reset\n", wlc_hw->unit);
 
 	if (pih == NULL)
 		return;
@@ -2080,9 +2093,9 @@
 
 /* switch to and initialize new band */
 static void
-WLBANDINITFN(wlc_bmac_setband) (wlc_hw_info_t *wlc_hw, uint bandunit,
+WLBANDINITFN(wlc_bmac_setband) (struct wlc_hw_info *wlc_hw, uint bandunit,
 				chanspec_t chanspec) {
-	wlc_info_t *wlc = wlc_hw->wlc;
+	struct wlc_info *wlc = wlc_hw->wlc;
 	u32 macintmask;
 
 	ASSERT(NBANDS_HW(wlc_hw) > 1);
@@ -2122,9 +2135,9 @@
 }
 
 /* low-level band switch utility routine */
-void WLBANDINITFN(wlc_setxband) (wlc_hw_info_t *wlc_hw, uint bandunit)
+void WLBANDINITFN(wlc_setxband) (struct wlc_hw_info *wlc_hw, uint bandunit)
 {
-	WL_TRACE(("wl%d: wlc_setxband: bandunit %d\n", wlc_hw->unit, bandunit));
+	WL_TRACE("wl%d: wlc_setxband: bandunit %d\n", wlc_hw->unit, bandunit);
 
 	wlc_hw->band = wlc_hw->bandstate[bandunit];
 
@@ -2138,19 +2151,19 @@
 	}
 }
 
-static bool wlc_isgoodchip(wlc_hw_info_t *wlc_hw)
+static bool wlc_isgoodchip(struct wlc_hw_info *wlc_hw)
 {
 
 	/* reject unsupported corerev */
 	if (!VALID_COREREV(wlc_hw->corerev)) {
-		WL_ERROR(("unsupported core rev %d\n", wlc_hw->corerev));
+		WL_ERROR("unsupported core rev %d\n", wlc_hw->corerev);
 		return false;
 	}
 
 	return true;
 }
 
-static bool wlc_validboardtype(wlc_hw_info_t *wlc_hw)
+static bool wlc_validboardtype(struct wlc_hw_info *wlc_hw)
 {
 	bool goodboard = true;
 	uint boardrev = wlc_hw->boardrev;
@@ -2174,7 +2187,7 @@
 	return goodboard;
 }
 
-static char *wlc_get_macaddr(wlc_hw_info_t *wlc_hw)
+static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw)
 {
 	const char *varname = "macaddr";
 	char *macaddr;
@@ -2191,7 +2204,8 @@
 
 	macaddr = getvar(wlc_hw->vars, varname);
 	if (macaddr == NULL) {
-		WL_ERROR(("wl%d: wlc_get_macaddr: macaddr getvar(%s) not found\n", wlc_hw->unit, varname));
+		WL_ERROR("wl%d: wlc_get_macaddr: macaddr getvar(%s) not found\n",
+			 wlc_hw->unit, varname);
 	}
 
 	return macaddr;
@@ -2203,7 +2217,7 @@
  * this function could be called when driver is down and w/o clock
  * it operates on different registers depending on corerev and boardflag.
  */
-bool wlc_bmac_radio_read_hwdisabled(wlc_hw_info_t *wlc_hw)
+bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw)
 {
 	bool v, clk, xtal;
 	u32 resetbits = 0, flags = 0;
@@ -2226,9 +2240,9 @@
 			flags |= SICF_PCLKE;
 
 		/* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
-		if ((CHIPID(wlc_hw->sih->chip) == BCM43224_CHIP_ID) ||
-		    (CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID) ||
-		    (CHIPID(wlc_hw->sih->chip) == BCM43421_CHIP_ID))
+		if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+		    (wlc_hw->sih->chip == BCM43225_CHIP_ID) ||
+		    (wlc_hw->sih->chip == BCM43421_CHIP_ID))
 			wlc_hw->regs =
 			    (d11regs_t *) si_setcore(wlc_hw->sih, D11_CORE_ID,
 						     0);
@@ -2249,12 +2263,12 @@
 }
 
 /* Initialize just the hardware when coming out of POR or S3/S5 system states */
-void wlc_bmac_hw_up(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw)
 {
 	if (wlc_hw->wlc->pub->hw_up)
 		return;
 
-	WL_TRACE(("wl%d: %s:\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s:\n", wlc_hw->unit, __func__);
 
 	/*
 	 * Enable pll and xtal, initialize the power control registers,
@@ -2264,13 +2278,13 @@
 	si_clkctl_init(wlc_hw->sih);
 	wlc_clkctl_clk(wlc_hw, CLK_FAST);
 
-	if (BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS) {
+	if (wlc_hw->sih->bustype == PCI_BUS) {
 		si_pci_fixcfg(wlc_hw->sih);
 
 		/* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
-		if ((CHIPID(wlc_hw->sih->chip) == BCM43224_CHIP_ID) ||
-		    (CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID) ||
-		    (CHIPID(wlc_hw->sih->chip) == BCM43421_CHIP_ID))
+		if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+		    (wlc_hw->sih->chip == BCM43225_CHIP_ID) ||
+		    (wlc_hw->sih->chip == BCM43421_CHIP_ID))
 			wlc_hw->regs =
 			    (d11regs_t *) si_setcore(wlc_hw->sih, D11_CORE_ID,
 						     0);
@@ -2283,7 +2297,7 @@
 	wlc_hw->wlc->pub->hw_up = true;
 
 	if ((wlc_hw->boardflags & BFL_FEM)
-	    && (CHIPID(wlc_hw->sih->chip) == BCM4313_CHIP_ID)) {
+	    && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
 		if (!
 		    (wlc_hw->boardrev >= 0x1250
 		     && (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -2291,10 +2305,10 @@
 	}
 }
 
-static bool wlc_dma_rxreset(wlc_hw_info_t *wlc_hw, uint fifo)
+static bool wlc_dma_rxreset(struct wlc_hw_info *wlc_hw, uint fifo)
 {
-	hnddma_t *di = wlc_hw->di[fifo];
-	osl_t *osh;
+	struct hnddma_pub *di = wlc_hw->di[fifo];
+	struct osl_info *osh;
 
 	if (D11REV_LT(wlc_hw->corerev, 12)) {
 		bool rxidle = true;
@@ -2309,7 +2323,8 @@
 			 50000);
 
 		if (!rxidle && (rcv_frm_cnt != 0))
-			WL_ERROR(("wl%d: %s: rxdma[%d] not idle && rcv_frm_cnt(%d) not zero\n", wlc_hw->unit, __func__, fifo, rcv_frm_cnt));
+			WL_ERROR("wl%d: %s: rxdma[%d] not idle && rcv_frm_cnt(%d) not zero\n",
+				 wlc_hw->unit, __func__, fifo, rcv_frm_cnt);
 		mdelay(2);
 	}
 
@@ -2324,7 +2339,7 @@
  *   clear software macintstatus for fresh new start
  * one testing hack wlc_hw->noreset will bypass the d11/phy reset
  */
-void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags)
+void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags)
 {
 	d11regs_t *regs;
 	uint i;
@@ -2334,7 +2349,7 @@
 	if (flags == WLC_USE_COREFLAGS)
 		flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
 
-	WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
 
 	regs = wlc_hw->regs;
 
@@ -2347,17 +2362,20 @@
 	if (si_iscoreup(wlc_hw->sih)) {
 		for (i = 0; i < NFIFO; i++)
 			if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) {
-				WL_ERROR(("wl%d: %s: dma_txreset[%d]: cannot stop dma\n", wlc_hw->unit, __func__, i));
+				WL_ERROR("wl%d: %s: dma_txreset[%d]: cannot stop dma\n",
+					 wlc_hw->unit, __func__, i);
 			}
 
 		if ((wlc_hw->di[RX_FIFO])
 		    && (!wlc_dma_rxreset(wlc_hw, RX_FIFO))) {
-			WL_ERROR(("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n", wlc_hw->unit, __func__, RX_FIFO));
+			WL_ERROR("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n",
+				 wlc_hw->unit, __func__, RX_FIFO);
 		}
 		if (D11REV_IS(wlc_hw->corerev, 4)
 		    && wlc_hw->di[RX_TXSTATUS_FIFO]
 		    && (!wlc_dma_rxreset(wlc_hw, RX_TXSTATUS_FIFO))) {
-			WL_ERROR(("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n", wlc_hw->unit, __func__, RX_TXSTATUS_FIFO));
+			WL_ERROR("wl%d: %s: dma_rxreset[%d]: cannot stop dma\n",
+				 wlc_hw->unit, __func__, RX_TXSTATUS_FIFO);
 		}
 	}
 	/* if noreset, just stop the psm and return */
@@ -2413,14 +2431,14 @@
  * txfifo sizes needs to be modified(increased) since the newer cores
  * have more memory.
  */
-static void wlc_corerev_fifofixup(wlc_hw_info_t *wlc_hw)
+static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw)
 {
 	d11regs_t *regs = wlc_hw->regs;
 	u16 fifo_nu;
 	u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
 	u16 txfifo_def, txfifo_def1;
 	u16 txfifo_cmd;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	if (D11REV_LT(wlc_hw->corerev, 9))
 		goto exit;
@@ -2473,22 +2491,22 @@
  *   config other core registers
  *   init dma
  */
-static void wlc_coreinit(wlc_info_t *wlc)
+static void wlc_coreinit(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	d11regs_t *regs;
 	u32 sflags;
 	uint bcnint_us;
 	uint i = 0;
 	bool fifosz_fixup = false;
-	osl_t *osh;
+	struct osl_info *osh;
 	int err = 0;
 	u16 buf[NFIFO];
 
 	regs = wlc_hw->regs;
 	osh = wlc_hw->osh;
 
-	WL_TRACE(("wl%d: wlc_coreinit\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_coreinit\n", wlc_hw->unit);
 
 	/* reset PSM */
 	wlc_bmac_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
@@ -2511,8 +2529,8 @@
 	SPINWAIT(((R_REG(osh, &regs->macintstatus) & MI_MACSSPNDD) == 0),
 		 1000 * 1000);
 	if ((R_REG(osh, &regs->macintstatus) & MI_MACSSPNDD) == 0)
-		WL_ERROR(("wl%d: wlc_coreinit: ucode did not self-suspend!\n",
-			  wlc_hw->unit));
+		WL_ERROR("wl%d: wlc_coreinit: ucode did not self-suspend!\n",
+			 wlc_hw->unit);
 
 	wlc_gpio_init(wlc);
 
@@ -2522,18 +2540,18 @@
 		if (WLCISNPHY(wlc_hw->band))
 			wlc_write_inits(wlc_hw, d11n0initvals16);
 		else
-			WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
-				  __func__, wlc_hw->unit, wlc_hw->corerev));
+			WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+				 __func__, wlc_hw->unit, wlc_hw->corerev);
 	} else if (D11REV_IS(wlc_hw->corerev, 24)) {
 		if (WLCISLCNPHY(wlc_hw->band)) {
 			wlc_write_inits(wlc_hw, d11lcn0initvals24);
 		} else {
-			WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
-				  __func__, wlc_hw->unit, wlc_hw->corerev));
+			WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+				 __func__, wlc_hw->unit, wlc_hw->corerev);
 		}
 	} else {
-		WL_ERROR(("%s: wl%d: unsupported corerev %d\n",
-			  __func__, wlc_hw->unit, wlc_hw->corerev));
+		WL_ERROR("%s: wl%d: unsupported corerev %d\n",
+			 __func__, wlc_hw->unit, wlc_hw->corerev);
 	}
 
 	/* For old ucode, txfifo sizes needs to be modified(increased) for Corerev >= 9 */
@@ -2575,7 +2593,8 @@
 		err = -1;
 	}
 	if (err != 0) {
-		WL_ERROR(("wlc_coreinit: txfifo mismatch: ucode size %d driver size %d index %d\n", buf[i], wlc_hw->xmtfifo_sz[i], i));
+		WL_ERROR("wlc_coreinit: txfifo mismatch: ucode size %d driver size %d index %d\n",
+			 buf[i], wlc_hw->xmtfifo_sz[i], i);
 		/* DO NOT ASSERT corerev < 4 even there is a mismatch
 		 * shmem, since driver don't overwrite those chip and
 		 * ucode initialize data will be used.
@@ -2684,15 +2703,15 @@
  *  - 559241 = 0x88889 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x8889
  */
 
-void wlc_bmac_switch_macfreq(wlc_hw_info_t *wlc_hw, u8 spurmode)
+void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode)
 {
 	d11regs_t *regs;
-	osl_t *osh;
+	struct osl_info *osh;
 	regs = wlc_hw->regs;
 	osh = wlc_hw->osh;
 
-	if ((CHIPID(wlc_hw->sih->chip) == BCM43224_CHIP_ID) ||
-	    (CHIPID(wlc_hw->sih->chip) == BCM43225_CHIP_ID)) {
+	if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
+	    (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
 		if (spurmode == WL_SPURAVOID_ON2) {	/* 126Mhz */
 			W_REG(osh, &regs->tsf_clk_frac_l, 0x2082);
 			W_REG(osh, &regs->tsf_clk_frac_h, 0x8);
@@ -2715,12 +2734,12 @@
 }
 
 /* Initialize GPIOs that are controlled by D11 core */
-static void wlc_gpio_init(wlc_info_t *wlc)
+static void wlc_gpio_init(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	d11regs_t *regs;
 	u32 gc, gm;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	regs = wlc_hw->regs;
 	osh = wlc_hw->osh;
@@ -2780,9 +2799,9 @@
 	si_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY);
 }
 
-static void wlc_ucode_download(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_download(struct wlc_hw_info *wlc_hw)
 {
-	wlc_info_t *wlc;
+	struct wlc_info *wlc;
 	wlc = wlc_hw->wlc;
 
 	if (wlc_hw->ucode_loaded)
@@ -2794,30 +2813,30 @@
 					bcm43xx_16_mimosz);
 			wlc_hw->ucode_loaded = true;
 		} else
-			WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
-				  __func__, wlc_hw->unit, wlc_hw->corerev));
+			WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+				 __func__, wlc_hw->unit, wlc_hw->corerev);
 	} else if (D11REV_IS(wlc_hw->corerev, 24)) {
 		if (WLCISLCNPHY(wlc_hw->band)) {
 			wlc_ucode_write(wlc_hw, bcm43xx_24_lcn,
 					bcm43xx_24_lcnsz);
 			wlc_hw->ucode_loaded = true;
 		} else {
-			WL_ERROR(("%s: wl%d: unsupported phy in corerev %d\n",
-				  __func__, wlc_hw->unit, wlc_hw->corerev));
+			WL_ERROR("%s: wl%d: unsupported phy in corerev %d\n",
+				 __func__, wlc_hw->unit, wlc_hw->corerev);
 		}
 	}
 }
 
-static void wlc_ucode_write(wlc_hw_info_t *wlc_hw, const u32 ucode[],
+static void wlc_ucode_write(struct wlc_hw_info *wlc_hw, const u32 ucode[],
 			      const uint nbytes) {
-	osl_t *osh;
+	struct osl_info *osh;
 	d11regs_t *regs = wlc_hw->regs;
 	uint i;
 	uint count;
 
 	osh = wlc_hw->osh;
 
-	WL_TRACE(("wl%d: wlc_ucode_write\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_ucode_write\n", wlc_hw->unit);
 
 	ASSERT(IS_ALIGNED(nbytes, sizeof(u32)));
 
@@ -2829,13 +2848,13 @@
 		W_REG(osh, &regs->objdata, ucode[i]);
 }
 
-static void wlc_write_inits(wlc_hw_info_t *wlc_hw, const d11init_t *inits)
+static void wlc_write_inits(struct wlc_hw_info *wlc_hw, const d11init_t *inits)
 {
 	int i;
-	osl_t *osh;
+	struct osl_info *osh;
 	volatile u8 *base;
 
-	WL_TRACE(("wl%d: wlc_write_inits\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_write_inits\n", wlc_hw->unit);
 
 	osh = wlc_hw->osh;
 	base = (volatile u8 *)wlc_hw->regs;
@@ -2852,7 +2871,7 @@
 	}
 }
 
-static void wlc_ucode_txant_set(wlc_hw_info_t *wlc_hw)
+static void wlc_ucode_txant_set(struct wlc_hw_info *wlc_hw)
 {
 	u16 phyctl;
 	u16 phytxant = wlc_hw->bmac_phytxant;
@@ -2869,7 +2888,7 @@
 	wlc_bmac_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl);
 }
 
-void wlc_bmac_txant_set(wlc_hw_info_t *wlc_hw, u16 phytxant)
+void wlc_bmac_txant_set(struct wlc_hw_info *wlc_hw, u16 phytxant)
 {
 	/* update sw state */
 	wlc_hw->bmac_phytxant = phytxant;
@@ -2881,12 +2900,12 @@
 
 }
 
-u16 wlc_bmac_get_txant(wlc_hw_info_t *wlc_hw)
+u16 wlc_bmac_get_txant(struct wlc_hw_info *wlc_hw)
 {
 	return (u16) wlc_hw->wlc->stf->txant;
 }
 
-void wlc_bmac_antsel_type_set(wlc_hw_info_t *wlc_hw, u8 antsel_type)
+void wlc_bmac_antsel_type_set(struct wlc_hw_info *wlc_hw, u8 antsel_type)
 {
 	wlc_hw->antsel_type = antsel_type;
 
@@ -2894,7 +2913,7 @@
 	wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
 }
 
-void wlc_bmac_fifoerrors(wlc_hw_info_t *wlc_hw)
+void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw)
 {
 	bool fatal = false;
 	uint unit;
@@ -2911,44 +2930,45 @@
 		if (!intstatus)
 			continue;
 
-		WL_TRACE(("wl%d: wlc_bmac_fifoerrors: intstatus%d 0x%x\n", unit,
-			  idx, intstatus));
+		WL_TRACE("wl%d: wlc_bmac_fifoerrors: intstatus%d 0x%x\n",
+			 unit, idx, intstatus);
 
 		if (intstatus & I_RO) {
-			WL_ERROR(("wl%d: fifo %d: receive fifo overflow\n",
-				  unit, idx));
+			WL_ERROR("wl%d: fifo %d: receive fifo overflow\n",
+				 unit, idx);
 			WLCNTINCR(wlc_hw->wlc->pub->_cnt->rxoflo);
 			fatal = true;
 		}
 
 		if (intstatus & I_PC) {
-			WL_ERROR(("wl%d: fifo %d: descriptor error\n", unit,
-				  idx));
+			WL_ERROR("wl%d: fifo %d: descriptor error\n",
+				 unit, idx);
 			WLCNTINCR(wlc_hw->wlc->pub->_cnt->dmade);
 			fatal = true;
 		}
 
 		if (intstatus & I_PD) {
-			WL_ERROR(("wl%d: fifo %d: data error\n", unit, idx));
+			WL_ERROR("wl%d: fifo %d: data error\n", unit, idx);
 			WLCNTINCR(wlc_hw->wlc->pub->_cnt->dmada);
 			fatal = true;
 		}
 
 		if (intstatus & I_DE) {
-			WL_ERROR(("wl%d: fifo %d: descriptor protocol error\n",
-				  unit, idx));
+			WL_ERROR("wl%d: fifo %d: descriptor protocol error\n",
+				 unit, idx);
 			WLCNTINCR(wlc_hw->wlc->pub->_cnt->dmape);
 			fatal = true;
 		}
 
 		if (intstatus & I_RU) {
-			WL_ERROR(("wl%d: fifo %d: receive descriptor underflow\n", unit, idx));
+			WL_ERROR("wl%d: fifo %d: receive descriptor underflow\n",
+				 idx, unit);
 			WLCNTINCR(wlc_hw->wlc->pub->_cnt->rxuflo[idx]);
 		}
 
 		if (intstatus & I_XU) {
-			WL_ERROR(("wl%d: fifo %d: transmit fifo underflow\n",
-				  idx, unit));
+			WL_ERROR("wl%d: fifo %d: transmit fifo underflow\n",
+				 idx, unit);
 			WLCNTINCR(wlc_hw->wlc->pub->_cnt->txuflo);
 			fatal = true;
 		}
@@ -2962,9 +2982,9 @@
 	}
 }
 
-void wlc_intrson(wlc_info_t *wlc)
+void wlc_intrson(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	ASSERT(wlc->defmacintmask);
 	wlc->macintmask = wlc->defmacintmask;
 	W_REG(wlc_hw->osh, &wlc_hw->regs->macintmask, wlc->macintmask);
@@ -2975,7 +2995,7 @@
  *  but also because per-port code may require sync with valid interrupt.
  */
 
-static u32 wlc_wlintrsoff(wlc_info_t *wlc)
+static u32 wlc_wlintrsoff(struct wlc_info *wlc)
 {
 	if (!wlc->hw->up)
 		return 0;
@@ -2983,7 +3003,7 @@
 	return wl_intrsoff(wlc->wl);
 }
 
-static void wlc_wlintrsrestore(wlc_info_t *wlc, u32 macintmask)
+static void wlc_wlintrsrestore(struct wlc_info *wlc, u32 macintmask)
 {
 	if (!wlc->hw->up)
 		return;
@@ -2991,9 +3011,9 @@
 	wl_intrsrestore(wlc->wl, macintmask);
 }
 
-u32 wlc_intrsoff(wlc_info_t *wlc)
+u32 wlc_intrsoff(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	u32 macintmask;
 
 	if (!wlc_hw->clk)
@@ -3010,9 +3030,9 @@
 	return wlc->macintstatus ? 0 : macintmask;
 }
 
-void wlc_intrsrestore(wlc_info_t *wlc, u32 macintmask)
+void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	if (!wlc_hw->clk)
 		return;
 
@@ -3020,7 +3040,7 @@
 	W_REG(wlc_hw->osh, &wlc_hw->regs->macintmask, wlc->macintmask);
 }
 
-void wlc_bmac_mute(wlc_hw_info_t *wlc_hw, bool on, mbool flags)
+void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool on, mbool flags)
 {
 	struct ether_addr null_ether_addr = { {0, 0, 0, 0, 0, 0} };
 
@@ -3056,12 +3076,12 @@
 		wlc_ucode_mute_override_clear(wlc_hw);
 }
 
-void wlc_bmac_set_deaf(wlc_hw_info_t *wlc_hw, bool user_flag)
+void wlc_bmac_set_deaf(struct wlc_hw_info *wlc_hw, bool user_flag)
 {
 	wlc_phy_set_deaf(wlc_hw->band->pi, user_flag);
 }
 
-int wlc_bmac_xmtfifo_sz_get(wlc_hw_info_t *wlc_hw, uint fifo, uint *blocks)
+int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo, uint *blocks)
 {
 	if (fifo >= NFIFO)
 		return BCME_RANGE;
@@ -3071,7 +3091,7 @@
 	return 0;
 }
 
-int wlc_bmac_xmtfifo_sz_set(wlc_hw_info_t *wlc_hw, uint fifo, uint blocks)
+int wlc_bmac_xmtfifo_sz_set(struct wlc_hw_info *wlc_hw, uint fifo, uint blocks)
 {
 	if (fifo >= NFIFO || blocks > 299)
 		return BCME_RANGE;
@@ -3091,7 +3111,7 @@
  * be pulling data into a tx fifo, by the time the MAC acks the suspend
  * request.
  */
-bool wlc_bmac_tx_fifo_suspended(wlc_hw_info_t *wlc_hw, uint tx_fifo)
+bool wlc_bmac_tx_fifo_suspended(struct wlc_hw_info *wlc_hw, uint tx_fifo)
 {
 	/* check that a suspend has been requested and is no longer pending */
 
@@ -3110,7 +3130,7 @@
 	return false;
 }
 
-void wlc_bmac_tx_fifo_suspend(wlc_hw_info_t *wlc_hw, uint tx_fifo)
+void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo)
 {
 	u8 fifo = 1 << tx_fifo;
 
@@ -3141,7 +3161,7 @@
 	}
 }
 
-void wlc_bmac_tx_fifo_resume(wlc_hw_info_t *wlc_hw, uint tx_fifo)
+void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo)
 {
 	/* BMAC_NOTE: WLC_TX_FIFO_ENAB is done in wlc_dpc() for DMA case but need to be done
 	 * here for PIO otherwise the watchdog will catch the inconsistency and fire
@@ -3169,20 +3189,20 @@
  *   0 if the interrupt is not for us, or we are in some special cases;
  *   device interrupt status bits otherwise.
  */
-static inline u32 wlc_intstatus(wlc_info_t *wlc, bool in_isr)
+static inline u32 wlc_intstatus(struct wlc_info *wlc, bool in_isr)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	d11regs_t *regs = wlc_hw->regs;
 	u32 macintstatus;
 	u32 intstatus_rxfifo, intstatus_txsfifo;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	osh = wlc_hw->osh;
 
 	/* macintstatus includes a DMA interrupt summary bit */
 	macintstatus = R_REG(osh, &regs->macintstatus);
 
-	WL_TRACE(("wl%d: macintstatus: 0x%x\n", wlc_hw->unit, macintstatus));
+	WL_TRACE("wl%d: macintstatus: 0x%x\n", wlc_hw->unit, macintstatus);
 
 	/* detect cardbus removed, in power down(suspend) and in reset */
 	if (DEVICEREMOVED(wlc))
@@ -3207,9 +3227,7 @@
 	 */
 	/* turn off the interrupts */
 	W_REG(osh, &regs->macintmask, 0);
-#ifndef BCMSDIO
 	(void)R_REG(osh, &regs->macintmask);	/* sync readback */
-#endif
 	wlc->macintmask = 0;
 
 	/* clear device interrupts */
@@ -3224,7 +3242,9 @@
 			    R_REG(osh,
 				  &regs->intctrlregs[RX_TXSTATUS_FIFO].
 				  intstatus);
-			WL_TRACE(("wl%d: intstatus_rxfifo 0x%x, intstatus_txsfifo 0x%x\n", wlc_hw->unit, intstatus_rxfifo, intstatus_txsfifo));
+			WL_TRACE("wl%d: intstatus_rxfifo 0x%x, intstatus_txsfifo 0x%x\n",
+				 wlc_hw->unit,
+				 intstatus_rxfifo, intstatus_txsfifo);
 
 			/* defer unsolicited interrupt hints */
 			intstatus_rxfifo &= DEF_RXINTMASK;
@@ -3261,7 +3281,7 @@
 
 /* Update wlc->macintstatus and wlc->intstatus[]. */
 /* Return true if they are updated successfully. false otherwise */
-bool wlc_intrsupd(wlc_info_t *wlc)
+bool wlc_intrsupd(struct wlc_info *wlc)
 {
 	u32 macintstatus;
 
@@ -3286,9 +3306,9 @@
  * *wantdpc will be set to true if further wlc_dpc() processing is required,
  * false otherwise.
  */
-bool BCMFASTPATH wlc_isr(wlc_info_t *wlc, bool *wantdpc)
+bool BCMFASTPATH wlc_isr(struct wlc_info *wlc, bool *wantdpc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	u32 macintstatus;
 
 	*wantdpc = false;
@@ -3300,7 +3320,7 @@
 	macintstatus = wlc_intstatus(wlc, true);
 
 	if (macintstatus == 0xffffffff)
-		WL_ERROR(("DEVICEREMOVED detected in the ISR code path.\n"));
+		WL_ERROR("DEVICEREMOVED detected in the ISR code path\n");
 
 	/* it is not for us */
 	if (macintstatus == 0)
@@ -3317,20 +3337,20 @@
 }
 
 /* process tx completion events for corerev < 5 */
-static bool wlc_bmac_txstatus_corerev4(wlc_hw_info_t *wlc_hw)
+static bool wlc_bmac_txstatus_corerev4(struct wlc_hw_info *wlc_hw)
 {
-	void *status_p;
+	struct sk_buff *status_p;
 	tx_status_t *txs;
-	osl_t *osh;
+	struct osl_info *osh;
 	bool fatal = false;
 
-	WL_TRACE(("wl%d: wlc_txstatusrecv\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_txstatusrecv\n", wlc_hw->unit);
 
 	osh = wlc_hw->osh;
 
 	while (!fatal && (status_p = dma_rx(wlc_hw->di[RX_TXSTATUS_FIFO]))) {
 
-		txs = (tx_status_t *) PKTDATA(status_p);
+		txs = (tx_status_t *) status_p->data;
 		/* MAC uses little endian only */
 		ltoh16_buf((void *)txs, sizeof(tx_status_t));
 
@@ -3340,7 +3360,7 @@
 
 		fatal = wlc_bmac_dotxstatus(wlc_hw, txs, 0);
 
-		PKTFREE(osh, status_p, false);
+		pkt_buf_free_skb(osh, status_p, false);
 	}
 
 	if (fatal)
@@ -3353,7 +3373,7 @@
 }
 
 static bool BCMFASTPATH
-wlc_bmac_dotxstatus(wlc_hw_info_t *wlc_hw, tx_status_t *txs, u32 s2)
+wlc_bmac_dotxstatus(struct wlc_hw_info *wlc_hw, tx_status_t *txs, u32 s2)
 {
 	/* discard intermediate indications for ucode with one legitimate case:
 	 *   e.g. if "useRTS" is set. ucode did a successful rts/cts exchange, but the subsequent
@@ -3372,12 +3392,12 @@
  * Return true if more tx status need to be processed. false otherwise.
  */
 static bool BCMFASTPATH
-wlc_bmac_txstatus(wlc_hw_info_t *wlc_hw, bool bound, bool *fatal)
+wlc_bmac_txstatus(struct wlc_hw_info *wlc_hw, bool bound, bool *fatal)
 {
 	bool morepending = false;
-	wlc_info_t *wlc = wlc_hw->wlc;
+	struct wlc_info *wlc = wlc_hw->wlc;
 
-	WL_TRACE(("wl%d: wlc_bmac_txstatus\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_txstatus\n", wlc_hw->unit);
 
 	if (D11REV_IS(wlc_hw->corerev, 4)) {
 		/* to retire soon */
@@ -3388,7 +3408,7 @@
 	} else {
 		/* corerev >= 5 */
 		d11regs_t *regs;
-		osl_t *osh;
+		struct osl_info *osh;
 		tx_status_t txstatus, *txs;
 		u32 s1, s2;
 		uint n = 0;
@@ -3402,8 +3422,8 @@
 		       && (s1 = R_REG(osh, &regs->frmtxstatus)) & TXS_V) {
 
 			if (s1 == 0xffffffff) {
-				WL_ERROR(("wl%d: %s: dead chip\n",
-					  wlc_hw->unit, __func__));
+				WL_ERROR("wl%d: %s: dead chip\n",
+					 wlc_hw->unit, __func__);
 				ASSERT(s1 != 0xffffffff);
 				return morepending;
 			}
@@ -3436,15 +3456,15 @@
 	return morepending;
 }
 
-void wlc_suspend_mac_and_wait(wlc_info_t *wlc)
+void wlc_suspend_mac_and_wait(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	d11regs_t *regs = wlc_hw->regs;
 	u32 mc, mi;
-	osl_t *osh;
+	struct osl_info *osh;
 
-	WL_TRACE(("wl%d: wlc_suspend_mac_and_wait: bandunit %d\n", wlc_hw->unit,
-		  wlc_hw->band->bandunit));
+	WL_TRACE("wl%d: wlc_suspend_mac_and_wait: bandunit %d\n",
+		 wlc_hw->unit, wlc_hw->band->bandunit);
 
 	/*
 	 * Track overlapping suspend requests
@@ -3461,7 +3481,7 @@
 	mc = R_REG(osh, &regs->maccontrol);
 
 	if (mc == 0xffffffff) {
-		WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+		WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
 		wl_down(wlc->wl);
 		return;
 	}
@@ -3471,7 +3491,7 @@
 
 	mi = R_REG(osh, &regs->macintstatus);
 	if (mi == 0xffffffff) {
-		WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+		WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
 		wl_down(wlc->wl);
 		return;
 	}
@@ -3483,15 +3503,18 @@
 		 WLC_MAX_MAC_SUSPEND);
 
 	if (!(R_REG(osh, &regs->macintstatus) & MI_MACSSPNDD)) {
-		WL_ERROR(("wl%d: wlc_suspend_mac_and_wait: waited %d uS and "
-			  "MI_MACSSPNDD is still not on.\n",
-			  wlc_hw->unit, WLC_MAX_MAC_SUSPEND));
-		WL_ERROR(("wl%d: psmdebug 0x%08x, phydebug 0x%08x, psm_brc 0x%04x\n", wlc_hw->unit, R_REG(osh, &regs->psmdebug), R_REG(osh, &regs->phydebug), R_REG(osh, &regs->psm_brc)));
+		WL_ERROR("wl%d: wlc_suspend_mac_and_wait: waited %d uS and MI_MACSSPNDD is still not on.\n",
+			 wlc_hw->unit, WLC_MAX_MAC_SUSPEND);
+		WL_ERROR("wl%d: psmdebug 0x%08x, phydebug 0x%08x, psm_brc 0x%04x\n",
+			 wlc_hw->unit,
+			 R_REG(osh, &regs->psmdebug),
+			 R_REG(osh, &regs->phydebug),
+			 R_REG(osh, &regs->psm_brc));
 	}
 
 	mc = R_REG(osh, &regs->maccontrol);
 	if (mc == 0xffffffff) {
-		WL_ERROR(("wl%d: %s: dead chip\n", wlc_hw->unit, __func__));
+		WL_ERROR("wl%d: %s: dead chip\n", wlc_hw->unit, __func__);
 		wl_down(wlc->wl);
 		return;
 	}
@@ -3500,15 +3523,15 @@
 	ASSERT(!(mc & MCTL_EN_MAC));
 }
 
-void wlc_enable_mac(wlc_info_t *wlc)
+void wlc_enable_mac(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	d11regs_t *regs = wlc_hw->regs;
 	u32 mc, mi;
-	osl_t *osh;
+	struct osl_info *osh;
 
-	WL_TRACE(("wl%d: wlc_enable_mac: bandunit %d\n", wlc_hw->unit,
-		  wlc->band->bandunit));
+	WL_TRACE("wl%d: wlc_enable_mac: bandunit %d\n",
+		 wlc_hw->unit, wlc->band->bandunit);
 
 	/*
 	 * Track overlapping suspend requests
@@ -3539,7 +3562,7 @@
 	wlc_ucode_wake_override_clear(wlc_hw, WLC_WAKE_OVERRIDE_MACSUSPEND);
 }
 
-void wlc_bmac_ifsctl_edcrs_set(wlc_hw_info_t *wlc_hw, bool abie, bool isht)
+void wlc_bmac_ifsctl_edcrs_set(struct wlc_hw_info *wlc_hw, bool abie, bool isht)
 {
 	if (!(WLCISNPHY(wlc_hw->band) && (D11REV_GE(wlc_hw->corerev, 16))))
 		return;
@@ -3575,7 +3598,7 @@
 	}
 }
 
-static void wlc_upd_ofdm_pctl1_table(wlc_hw_info_t *wlc_hw)
+static void wlc_upd_ofdm_pctl1_table(struct wlc_hw_info *wlc_hw)
 {
 	u8 rate;
 	u8 rates[8] = {
@@ -3609,7 +3632,7 @@
 	}
 }
 
-static u16 wlc_bmac_ofdm_ratetable_offset(wlc_hw_info_t *wlc_hw, u8 rate)
+static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw, u8 rate)
 {
 	uint i;
 	u8 plcp_rate = 0;
@@ -3642,7 +3665,7 @@
 	return 2 * wlc_bmac_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2));
 }
 
-void wlc_bmac_band_stf_ss_set(wlc_hw_info_t *wlc_hw, u8 stf_mode)
+void wlc_bmac_band_stf_ss_set(struct wlc_hw_info *wlc_hw, u8 stf_mode)
 {
 	wlc_hw->hw_stf_ss_opmode = stf_mode;
 
@@ -3651,7 +3674,7 @@
 }
 
 void BCMFASTPATH
-wlc_bmac_read_tsf(wlc_hw_info_t *wlc_hw, u32 *tsf_l_ptr,
+wlc_bmac_read_tsf(struct wlc_hw_info *wlc_hw, u32 *tsf_l_ptr,
 		  u32 *tsf_h_ptr)
 {
 	d11regs_t *regs = wlc_hw->regs;
@@ -3663,14 +3686,14 @@
 	return;
 }
 
-bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw)
+bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw)
 {
 	d11regs_t *regs;
 	u32 w, val;
 	volatile u16 *reg16;
-	osl_t *osh;
+	struct osl_info *osh;
 
-	WL_TRACE(("wl%d: validate_chip_access\n", wlc_hw->unit));
+	WL_TRACE("wl%d: validate_chip_access\n", wlc_hw->unit);
 
 	regs = wlc_hw->regs;
 	osh = wlc_hw->osh;
@@ -3690,7 +3713,8 @@
 	(void)R_REG(osh, &regs->objaddr);
 	val = R_REG(osh, &regs->objdata);
 	if (val != (u32) 0xaa5555aa) {
-		WL_ERROR(("wl%d: validate_chip_access: SHM = 0x%x, expected 0xaa5555aa\n", wlc_hw->unit, val));
+		WL_ERROR("wl%d: validate_chip_access: SHM = 0x%x, expected 0xaa5555aa\n",
+			 wlc_hw->unit, val);
 		return false;
 	}
 
@@ -3702,7 +3726,8 @@
 	(void)R_REG(osh, &regs->objaddr);
 	val = R_REG(osh, &regs->objdata);
 	if (val != (u32) 0x55aaaa55) {
-		WL_ERROR(("wl%d: validate_chip_access: SHM = 0x%x, expected 0x55aaaa55\n", wlc_hw->unit, val));
+		WL_ERROR("wl%d: validate_chip_access: SHM = 0x%x, expected 0x55aaaa55\n",
+			 wlc_hw->unit, val);
 		return false;
 	}
 
@@ -3732,12 +3757,14 @@
 		/* verify with the 16 bit registers that have no side effects */
 		val = R_REG(osh, &regs->tsf_cfpstrt_l);
 		if (val != (uint) 0xBBBB) {
-			WL_ERROR(("wl%d: validate_chip_access: tsf_cfpstrt_l = 0x%x, expected" " 0x%x\n", wlc_hw->unit, val, 0xBBBB));
+			WL_ERROR("wl%d: validate_chip_access: tsf_cfpstrt_l = 0x%x, expected 0x%x\n",
+				 wlc_hw->unit, val, 0xBBBB);
 			return false;
 		}
 		val = R_REG(osh, &regs->tsf_cfpstrt_h);
 		if (val != (uint) 0xCCCC) {
-			WL_ERROR(("wl%d: validate_chip_access: tsf_cfpstrt_h = 0x%x, expected" " 0x%x\n", wlc_hw->unit, val, 0xCCCC));
+			WL_ERROR("wl%d: validate_chip_access: tsf_cfpstrt_h = 0x%x, expected 0x%x\n",
+				 wlc_hw->unit, val, 0xCCCC);
 			return false;
 		}
 
@@ -3749,7 +3776,10 @@
 	w = R_REG(osh, &regs->maccontrol);
 	if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
 	    (w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
-		WL_ERROR(("wl%d: validate_chip_access: maccontrol = 0x%x, expected 0x%x or 0x%x\n", wlc_hw->unit, w, (MCTL_IHR_EN | MCTL_WAKE), (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE)));
+		WL_ERROR("wl%d: validate_chip_access: maccontrol = 0x%x, expected 0x%x or 0x%x\n",
+			 wlc_hw->unit, w,
+			 (MCTL_IHR_EN | MCTL_WAKE),
+			 (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE));
 		return false;
 	}
 
@@ -3758,13 +3788,13 @@
 
 #define PHYPLL_WAIT_US	100000
 
-void wlc_bmac_core_phypll_ctl(wlc_hw_info_t *wlc_hw, bool on)
+void wlc_bmac_core_phypll_ctl(struct wlc_hw_info *wlc_hw, bool on)
 {
 	d11regs_t *regs;
-	osl_t *osh;
+	struct osl_info *osh;
 	u32 tmp;
 
-	WL_TRACE(("wl%d: wlc_bmac_core_phypll_ctl\n", wlc_hw->unit));
+	WL_TRACE("wl%d: wlc_bmac_core_phypll_ctl\n", wlc_hw->unit);
 
 	tmp = 0;
 	regs = wlc_hw->regs;
@@ -3785,8 +3815,8 @@
 			tmp = R_REG(osh, &regs->clk_ctl_st);
 			if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
 			    (CCS_ERSRC_AVAIL_HT)) {
-				WL_ERROR(("%s: turn on PHY PLL failed\n",
-					  __func__));
+				WL_ERROR("%s: turn on PHY PLL failed\n",
+					 __func__);
 				ASSERT(0);
 			}
 		} else {
@@ -3803,8 +3833,8 @@
 			     (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
 			    !=
 			    (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) {
-				WL_ERROR(("%s: turn on PHY PLL failed\n",
-					  __func__));
+				WL_ERROR("%s: turn on PHY PLL failed\n",
+					 __func__);
 				ASSERT(0);
 			}
 		}
@@ -3817,11 +3847,11 @@
 	}
 }
 
-void wlc_coredisable(wlc_hw_info_t *wlc_hw)
+void wlc_coredisable(struct wlc_hw_info *wlc_hw)
 {
 	bool dev_gone;
 
-	WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
 
 	ASSERT(!wlc_hw->up);
 
@@ -3857,9 +3887,9 @@
 }
 
 /* power both the pll and external oscillator on/off */
-void wlc_bmac_xtal(wlc_hw_info_t *wlc_hw, bool want)
+void wlc_bmac_xtal(struct wlc_hw_info *wlc_hw, bool want)
 {
-	WL_TRACE(("wl%d: wlc_bmac_xtal: want %d\n", wlc_hw->unit, want));
+	WL_TRACE("wl%d: wlc_bmac_xtal: want %d\n", wlc_hw->unit, want);
 
 	/* dont power down if plldown is false or we must poll hw radio disable */
 	if (!want && wlc_hw->pllreq)
@@ -3876,9 +3906,9 @@
 	}
 }
 
-static void wlc_flushqueues(wlc_info_t *wlc)
+static void wlc_flushqueues(struct wlc_info *wlc)
 {
-	wlc_hw_info_t *wlc_hw = wlc->hw;
+	struct wlc_hw_info *wlc_hw = wlc->hw;
 	uint i;
 
 	wlc->txpend16165war = 0;
@@ -3888,8 +3918,8 @@
 		if (wlc_hw->di[i]) {
 			dma_txreclaim(wlc_hw->di[i], HNDDMA_RANGE_ALL);
 			TXPKTPENDCLR(wlc, i);
-			WL_TRACE(("wlc_flushqueues: pktpend fifo %d cleared\n",
-				  i));
+			WL_TRACE("wlc_flushqueues: pktpend fifo %d cleared\n",
+				 i);
 		}
 
 	/* free any posted rx packets */
@@ -3898,12 +3928,12 @@
 		dma_rxreclaim(wlc_hw->di[RX_TXSTATUS_FIFO]);
 }
 
-u16 wlc_bmac_read_shm(wlc_hw_info_t *wlc_hw, uint offset)
+u16 wlc_bmac_read_shm(struct wlc_hw_info *wlc_hw, uint offset)
 {
 	return wlc_bmac_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL);
 }
 
-void wlc_bmac_write_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v)
+void wlc_bmac_write_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v)
 {
 	wlc_bmac_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL);
 }
@@ -3912,7 +3942,7 @@
  * SHM 'offset' needs to be an even address and
  * Buffer length 'len' must be an even number of bytes
  */
-void wlc_bmac_set_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v, int len)
+void wlc_bmac_set_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v, int len)
 {
 	int i;
 
@@ -3929,7 +3959,7 @@
 }
 
 static u16
-wlc_bmac_read_objmem(wlc_hw_info_t *wlc_hw, uint offset, u32 sel)
+wlc_bmac_read_objmem(struct wlc_hw_info *wlc_hw, uint offset, u32 sel)
 {
 	d11regs_t *regs = wlc_hw->regs;
 	volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
@@ -3950,7 +3980,7 @@
 }
 
 static void
-wlc_bmac_write_objmem(wlc_hw_info_t *wlc_hw, uint offset, u16 v, u32 sel)
+wlc_bmac_write_objmem(struct wlc_hw_info *wlc_hw, uint offset, u16 v, u32 sel)
 {
 	d11regs_t *regs = wlc_hw->regs;
 	volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
@@ -3973,7 +4003,7 @@
  * 'sel' selects the type of memory
  */
 void
-wlc_bmac_copyto_objmem(wlc_hw_info_t *wlc_hw, uint offset, const void *buf,
+wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw, uint offset, const void *buf,
 		       int len, u32 sel)
 {
 	u16 v;
@@ -3999,7 +4029,7 @@
  * 'sel' selects the type of memory
  */
 void
-wlc_bmac_copyfrom_objmem(wlc_hw_info_t *wlc_hw, uint offset, void *buf,
+wlc_bmac_copyfrom_objmem(struct wlc_hw_info *wlc_hw, uint offset, void *buf,
 			 int len, u32 sel)
 {
 	u16 v;
@@ -4020,16 +4050,16 @@
 	}
 }
 
-void wlc_bmac_copyfrom_vars(wlc_hw_info_t *wlc_hw, char **buf, uint *len)
+void wlc_bmac_copyfrom_vars(struct wlc_hw_info *wlc_hw, char **buf, uint *len)
 {
-	WL_TRACE(("wlc_bmac_copyfrom_vars, nvram vars totlen=%d\n",
-		  wlc_hw->vars_size));
+	WL_TRACE("wlc_bmac_copyfrom_vars, nvram vars totlen=%d\n",
+		 wlc_hw->vars_size);
 
 	*buf = wlc_hw->vars;
 	*len = wlc_hw->vars_size;
 }
 
-void wlc_bmac_retrylimit_upd(wlc_hw_info_t *wlc_hw, u16 SRL, u16 LRL)
+void wlc_bmac_retrylimit_upd(struct wlc_hw_info *wlc_hw, u16 SRL, u16 LRL)
 {
 	wlc_hw->SRL = SRL;
 	wlc_hw->LRL = LRL;
@@ -4047,17 +4077,17 @@
 	}
 }
 
-void wlc_bmac_set_noreset(wlc_hw_info_t *wlc_hw, bool noreset_flag)
+void wlc_bmac_set_noreset(struct wlc_hw_info *wlc_hw, bool noreset_flag)
 {
 	wlc_hw->noreset = noreset_flag;
 }
 
-void wlc_bmac_set_ucode_loaded(wlc_hw_info_t *wlc_hw, bool ucode_loaded)
+void wlc_bmac_set_ucode_loaded(struct wlc_hw_info *wlc_hw, bool ucode_loaded)
 {
 	wlc_hw->ucode_loaded = ucode_loaded;
 }
 
-void wlc_bmac_pllreq(wlc_hw_info_t *wlc_hw, bool set, mbool req_bit)
+void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set, mbool req_bit)
 {
 	ASSERT(req_bit);
 
@@ -4088,7 +4118,7 @@
 	return;
 }
 
-void wlc_bmac_set_clk(wlc_hw_info_t *wlc_hw, bool on)
+void wlc_bmac_set_clk(struct wlc_hw_info *wlc_hw, bool on)
 {
 	if (on) {
 		/* power up pll and oscillator */
@@ -4110,7 +4140,7 @@
 }
 
 /* this will be true for all ai chips */
-bool wlc_bmac_taclear(wlc_hw_info_t *wlc_hw, bool ta_ok)
+bool wlc_bmac_taclear(struct wlc_hw_info *wlc_hw, bool ta_ok)
 {
 	return true;
 }
@@ -4118,7 +4148,7 @@
 /* Lower down relevant GPIOs like LED when going down w/o
  * doing PCI config cycles or touching interrupts
  */
-void wlc_gpio_fast_deinit(wlc_hw_info_t *wlc_hw)
+void wlc_gpio_fast_deinit(struct wlc_hw_info *wlc_hw)
 {
 	if ((wlc_hw == NULL) || (wlc_hw->sih == NULL))
 		return;
@@ -4126,17 +4156,17 @@
 	/* Only chips with internal bus or PCIE cores or certain PCI cores
 	 * are able to switch cores w/o disabling interrupts
 	 */
-	if (!((BUSTYPE(wlc_hw->sih->bustype) == SI_BUS) ||
-	      ((BUSTYPE(wlc_hw->sih->bustype) == PCI_BUS) &&
+	if (!((wlc_hw->sih->bustype == SI_BUS) ||
+	      ((wlc_hw->sih->bustype == PCI_BUS) &&
 	       ((wlc_hw->sih->buscoretype == PCIE_CORE_ID) ||
 		(wlc_hw->sih->buscorerev >= 13)))))
 		return;
 
-	WL_TRACE(("wl%d: %s\n", wlc_hw->unit, __func__));
+	WL_TRACE("wl%d: %s\n", wlc_hw->unit, __func__);
 	return;
 }
 
-bool wlc_bmac_radio_hw(wlc_hw_info_t *wlc_hw, bool enable)
+bool wlc_bmac_radio_hw(struct wlc_hw_info *wlc_hw, bool enable)
 {
 	/* Do not access Phy registers if core is not up */
 	if (si_iscoreup(wlc_hw->sih) == false)
@@ -4171,7 +4201,7 @@
 	return true;
 }
 
-u16 wlc_bmac_rate_shm_offset(wlc_hw_info_t *wlc_hw, u8 rate)
+u16 wlc_bmac_rate_shm_offset(struct wlc_hw_info *wlc_hw, u8 rate)
 {
 	u16 table_ptr;
 	u8 phy_rate, index;
@@ -4195,12 +4225,12 @@
 	return 2 * wlc_bmac_read_shm(wlc_hw, table_ptr + (index * 2));
 }
 
-void wlc_bmac_set_txpwr_percent(wlc_hw_info_t *wlc_hw, u8 val)
+void wlc_bmac_set_txpwr_percent(struct wlc_hw_info *wlc_hw, u8 val)
 {
 	wlc_phy_txpwr_percent_set(wlc_hw->band->pi, val);
 }
 
-void wlc_bmac_antsel_set(wlc_hw_info_t *wlc_hw, u32 antsel_avail)
+void wlc_bmac_antsel_set(struct wlc_hw_info *wlc_hw, u32 antsel_avail)
 {
 	wlc_hw->antsel_avail = antsel_avail;
 }
diff --git a/drivers/staging/brcm80211/sys/wlc_bmac.h b/drivers/staging/brcm80211/sys/wlc_bmac.h
index 872bc8d..98150aa 100644
--- a/drivers/staging/brcm80211/sys/wlc_bmac.h
+++ b/drivers/staging/brcm80211/sys/wlc_bmac.h
@@ -57,7 +57,8 @@
 	} band[MAXBANDS];
 } wlc_bmac_revinfo_t;
 
-/* dup state between BMAC(wlc_hw_info_t) and HIGH(wlc_info_t) driver */
+/* dup state between BMAC(struct wlc_hw_info) and HIGH(struct wlc_info)
+   driver */
 typedef struct wlc_bmac_state {
 	u32 machwcap;	/* mac hw capibility */
 	u32 preamble_ovr;	/* preamble override */
@@ -130,148 +131,143 @@
 	WLCHW_STATE_LAST
 } wlc_bmac_state_id_t;
 
-extern int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device,
-			   uint unit, bool piomode, osl_t *osh, void *regsva,
-			   uint bustype, void *btparam);
-extern int wlc_bmac_detach(wlc_info_t *wlc);
+extern int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device,
+			   uint unit, bool piomode, struct osl_info *osh,
+			   void *regsva, uint bustype, void *btparam);
+extern int wlc_bmac_detach(struct wlc_info *wlc);
 extern void wlc_bmac_watchdog(void *arg);
-extern void wlc_bmac_info_init(wlc_hw_info_t *wlc_hw);
+extern void wlc_bmac_info_init(struct wlc_hw_info *wlc_hw);
 
 /* up/down, reset, clk */
-#ifdef WLC_LOW
-extern void wlc_bmac_xtal(wlc_hw_info_t *wlc_hw, bool want);
-#endif
+extern void wlc_bmac_xtal(struct wlc_hw_info *wlc_hw, bool want);
 
-extern void wlc_bmac_copyto_objmem(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw,
 				   uint offset, const void *buf, int len,
 				   u32 sel);
-extern void wlc_bmac_copyfrom_objmem(wlc_hw_info_t *wlc_hw, uint offset,
+extern void wlc_bmac_copyfrom_objmem(struct wlc_hw_info *wlc_hw, uint offset,
 				     void *buf, int len, u32 sel);
 #define wlc_bmac_copyfrom_shm(wlc_hw, offset, buf, len)                 \
 	wlc_bmac_copyfrom_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
 #define wlc_bmac_copyto_shm(wlc_hw, offset, buf, len)                   \
 	wlc_bmac_copyto_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
 
-extern void wlc_bmac_core_phy_clk(wlc_hw_info_t *wlc_hw, bool clk);
-extern void wlc_bmac_core_phypll_reset(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_core_phypll_ctl(wlc_hw_info_t *wlc_hw, bool on);
-extern void wlc_bmac_phyclk_fgc(wlc_hw_info_t *wlc_hw, bool clk);
-extern void wlc_bmac_macphyclk_set(wlc_hw_info_t *wlc_hw, bool clk);
-extern void wlc_bmac_phy_reset(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags);
-extern void wlc_bmac_reset(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_init(wlc_hw_info_t *wlc_hw, chanspec_t chanspec,
+extern void wlc_bmac_core_phy_clk(struct wlc_hw_info *wlc_hw, bool clk);
+extern void wlc_bmac_core_phypll_reset(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_core_phypll_ctl(struct wlc_hw_info *wlc_hw, bool on);
+extern void wlc_bmac_phyclk_fgc(struct wlc_hw_info *wlc_hw, bool clk);
+extern void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk);
+extern void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags);
+extern void wlc_bmac_reset(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
 			  bool mute);
-extern int wlc_bmac_up_prep(wlc_hw_info_t *wlc_hw);
-extern int wlc_bmac_up_finish(wlc_hw_info_t *wlc_hw);
-extern int wlc_bmac_down_prep(wlc_hw_info_t *wlc_hw);
-extern int wlc_bmac_down_finish(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_corereset(wlc_hw_info_t *wlc_hw, u32 flags);
-extern void wlc_bmac_switch_macfreq(wlc_hw_info_t *wlc_hw, u8 spurmode);
+extern int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw);
+extern int wlc_bmac_up_finish(struct wlc_hw_info *wlc_hw);
+extern int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw);
+extern int wlc_bmac_down_finish(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags);
+extern void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode);
 
 /* chanspec, ucode interface */
-extern int wlc_bmac_bandtype(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_set_chanspec(wlc_hw_info_t *wlc_hw, chanspec_t chanspec,
+extern int wlc_bmac_bandtype(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw,
+				  chanspec_t chanspec,
 				  bool mute, struct txpwr_limits *txpwr);
 
-extern void wlc_bmac_txfifo(wlc_hw_info_t *wlc_hw, uint fifo, void *p,
+extern void wlc_bmac_txfifo(struct wlc_hw_info *wlc_hw, uint fifo, void *p,
 			    bool commit, u16 frameid, u8 txpktpend);
-extern int wlc_bmac_xmtfifo_sz_get(wlc_hw_info_t *wlc_hw, uint fifo,
+extern int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo,
 				   uint *blocks);
-extern void wlc_bmac_mhf(wlc_hw_info_t *wlc_hw, u8 idx, u16 mask,
+extern void wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask,
 			 u16 val, int bands);
-extern void wlc_bmac_mctrl(wlc_hw_info_t *wlc_hw, u32 mask, u32 val);
-extern u16 wlc_bmac_mhf_get(wlc_hw_info_t *wlc_hw, u8 idx, int bands);
-extern int wlc_bmac_xmtfifo_sz_set(wlc_hw_info_t *wlc_hw, uint fifo,
+extern void wlc_bmac_mctrl(struct wlc_hw_info *wlc_hw, u32 mask, u32 val);
+extern u16 wlc_bmac_mhf_get(struct wlc_hw_info *wlc_hw, u8 idx, int bands);
+extern int wlc_bmac_xmtfifo_sz_set(struct wlc_hw_info *wlc_hw, uint fifo,
 				   uint blocks);
-extern void wlc_bmac_txant_set(wlc_hw_info_t *wlc_hw, u16 phytxant);
-extern u16 wlc_bmac_get_txant(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_antsel_type_set(wlc_hw_info_t *wlc_hw, u8 antsel_type);
-extern int wlc_bmac_revinfo_get(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_txant_set(struct wlc_hw_info *wlc_hw, u16 phytxant);
+extern u16 wlc_bmac_get_txant(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_antsel_type_set(struct wlc_hw_info *wlc_hw,
+				     u8 antsel_type);
+extern int wlc_bmac_revinfo_get(struct wlc_hw_info *wlc_hw,
 				wlc_bmac_revinfo_t *revinfo);
-extern int wlc_bmac_state_get(wlc_hw_info_t *wlc_hw, wlc_bmac_state_t *state);
-extern void wlc_bmac_write_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v);
-extern u16 wlc_bmac_read_shm(wlc_hw_info_t *wlc_hw, uint offset);
-extern void wlc_bmac_set_shm(wlc_hw_info_t *wlc_hw, uint offset, u16 v,
+extern int wlc_bmac_state_get(struct wlc_hw_info *wlc_hw,
+			      wlc_bmac_state_t *state);
+extern void wlc_bmac_write_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v);
+extern u16 wlc_bmac_read_shm(struct wlc_hw_info *wlc_hw, uint offset);
+extern void wlc_bmac_set_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v,
 			     int len);
-extern void wlc_bmac_write_template_ram(wlc_hw_info_t *wlc_hw, int offset,
+extern void wlc_bmac_write_template_ram(struct wlc_hw_info *wlc_hw, int offset,
 					int len, void *buf);
-extern void wlc_bmac_copyfrom_vars(wlc_hw_info_t *wlc_hw, char **buf,
+extern void wlc_bmac_copyfrom_vars(struct wlc_hw_info *wlc_hw, char **buf,
 				   uint *len);
 
-extern void wlc_bmac_process_ps_switch(wlc_hw_info_t *wlc,
+extern void wlc_bmac_process_ps_switch(struct wlc_hw_info *wlc,
 				       struct ether_addr *ea, s8 ps_on);
-extern void wlc_bmac_hw_etheraddr(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_hw_etheraddr(struct wlc_hw_info *wlc_hw,
 				  struct ether_addr *ea);
-extern void wlc_bmac_set_hw_etheraddr(wlc_hw_info_t *wlc_hw,
+extern void wlc_bmac_set_hw_etheraddr(struct wlc_hw_info *wlc_hw,
 				      struct ether_addr *ea);
-extern bool wlc_bmac_validate_chip_access(wlc_hw_info_t *wlc_hw);
+extern bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw);
 
-extern bool wlc_bmac_radio_read_hwdisabled(wlc_hw_info_t *wlc_hw);
-extern void wlc_bmac_set_shortslot(wlc_hw_info_t *wlc_hw, bool shortslot);
-extern void wlc_bmac_mute(wlc_hw_info_t *wlc_hw, bool want, mbool flags);
-extern void wlc_bmac_set_deaf(wlc_hw_info_t *wlc_hw, bool user_flag);
-extern void wlc_bmac_band_stf_ss_set(wlc_hw_info_t *wlc_hw, u8 stf_mode);
+extern bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw);
+extern void wlc_bmac_set_shortslot(struct wlc_hw_info *wlc_hw, bool shortslot);
+extern void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool want, mbool flags);
+extern void wlc_bmac_set_deaf(struct wlc_hw_info *wlc_hw, bool user_flag);
+extern void wlc_bmac_band_stf_ss_set(struct wlc_hw_info *wlc_hw, u8 stf_mode);
 
-extern void wlc_bmac_wait_for_wake(wlc_hw_info_t *wlc_hw);
-extern bool wlc_bmac_tx_fifo_suspended(wlc_hw_info_t *wlc_hw, uint tx_fifo);
-extern void wlc_bmac_tx_fifo_suspend(wlc_hw_info_t *wlc_hw, uint tx_fifo);
-extern void wlc_bmac_tx_fifo_resume(wlc_hw_info_t *wlc_hw, uint tx_fifo);
+extern void wlc_bmac_wait_for_wake(struct wlc_hw_info *wlc_hw);
+extern bool wlc_bmac_tx_fifo_suspended(struct wlc_hw_info *wlc_hw,
+				       uint tx_fifo);
+extern void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo);
+extern void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo);
 
-extern void wlc_ucode_wake_override_set(wlc_hw_info_t *wlc_hw,
+extern void wlc_ucode_wake_override_set(struct wlc_hw_info *wlc_hw,
 					u32 override_bit);
-extern void wlc_ucode_wake_override_clear(wlc_hw_info_t *wlc_hw,
+extern void wlc_ucode_wake_override_clear(struct wlc_hw_info *wlc_hw,
 					  u32 override_bit);
 
-extern void wlc_bmac_set_rcmta(wlc_hw_info_t *wlc_hw, int idx,
+extern void wlc_bmac_set_rcmta(struct wlc_hw_info *wlc_hw, int idx,
 			       const struct ether_addr *addr);
-extern void wlc_bmac_set_addrmatch(wlc_hw_info_t *wlc_hw, int match_reg_offset,
+extern void wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw,
+				   int match_reg_offset,
 				   const struct ether_addr *addr);
-extern void wlc_bmac_write_hw_bcntemplates(wlc_hw_info_t *wlc_hw, void *bcn,
-					   int len, bool both);
+extern void wlc_bmac_write_hw_bcntemplates(struct wlc_hw_info *wlc_hw,
+					   void *bcn, int len, bool both);
 
-extern void wlc_bmac_read_tsf(wlc_hw_info_t *wlc_hw, u32 *tsf_l_ptr,
+extern void wlc_bmac_read_tsf(struct wlc_hw_info *wlc_hw, u32 *tsf_l_ptr,
 			      u32 *tsf_h_ptr);
-extern void wlc_bmac_set_cwmin(wlc_hw_info_t *wlc_hw, u16 newmin);
-extern void wlc_bmac_set_cwmax(wlc_hw_info_t *wlc_hw, u16 newmax);
-extern void wlc_bmac_set_noreset(wlc_hw_info_t *wlc, bool noreset_flag);
-extern void wlc_bmac_set_ucode_loaded(wlc_hw_info_t *wlc, bool ucode_loaded);
+extern void wlc_bmac_set_cwmin(struct wlc_hw_info *wlc_hw, u16 newmin);
+extern void wlc_bmac_set_cwmax(struct wlc_hw_info *wlc_hw, u16 newmax);
+extern void wlc_bmac_set_noreset(struct wlc_hw_info *wlc, bool noreset_flag);
+extern void wlc_bmac_set_ucode_loaded(struct wlc_hw_info *wlc,
+				      bool ucode_loaded);
 
-extern void wlc_bmac_retrylimit_upd(wlc_hw_info_t *wlc_hw, u16 SRL,
+extern void wlc_bmac_retrylimit_upd(struct wlc_hw_info *wlc_hw, u16 SRL,
 				    u16 LRL);
 
-extern void wlc_bmac_fifoerrors(wlc_hw_info_t *wlc_hw);
+extern void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw);
 
-#ifdef WLC_HIGH_ONLY
-extern void wlc_bmac_dngl_reboot(rpc_info_t *);
-extern void wlc_bmac_dngl_rpc_agg(rpc_info_t *, u16 agg);
-extern void wlc_bmac_dngl_rpc_msglevel(rpc_info_t *, u16 level);
-extern void wlc_bmac_dngl_rpc_txq_wm_set(rpc_info_t *rpc, u32 wm);
-extern void wlc_bmac_dngl_rpc_txq_wm_get(rpc_info_t *rpc, u32 *wm);
-extern void wlc_bmac_dngl_rpc_agg_limit_set(rpc_info_t *rpc, u32 val);
-extern void wlc_bmac_dngl_rpc_agg_limit_get(rpc_info_t *rpc, u32 *pval);
-extern int wlc_bmac_debug_template(wlc_hw_info_t *wlc_hw);
-#endif
 
 /* API for BMAC driver (e.g. wlc_phy.c etc) */
 
-extern void wlc_bmac_bw_set(wlc_hw_info_t *wlc_hw, u16 bw);
-extern void wlc_bmac_pllreq(wlc_hw_info_t *wlc_hw, bool set, mbool req_bit);
-extern void wlc_bmac_set_clk(wlc_hw_info_t *wlc_hw, bool on);
-extern bool wlc_bmac_taclear(wlc_hw_info_t *wlc_hw, bool ta_ok);
+extern void wlc_bmac_bw_set(struct wlc_hw_info *wlc_hw, u16 bw);
+extern void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set,
+			    mbool req_bit);
+extern void wlc_bmac_set_clk(struct wlc_hw_info *wlc_hw, bool on);
+extern bool wlc_bmac_taclear(struct wlc_hw_info *wlc_hw, bool ta_ok);
 extern void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw);
 
-extern void wlc_bmac_dump(wlc_hw_info_t *wlc_hw, struct bcmstrbuf *b,
+extern void wlc_bmac_dump(struct wlc_hw_info *wlc_hw, struct bcmstrbuf *b,
 			  wlc_bmac_dump_id_t dump_id);
-extern void wlc_gpio_fast_deinit(wlc_hw_info_t *wlc_hw);
+extern void wlc_gpio_fast_deinit(struct wlc_hw_info *wlc_hw);
 
-extern bool wlc_bmac_radio_hw(wlc_hw_info_t *wlc_hw, bool enable);
-extern u16 wlc_bmac_rate_shm_offset(wlc_hw_info_t *wlc_hw, u8 rate);
+extern bool wlc_bmac_radio_hw(struct wlc_hw_info *wlc_hw, bool enable);
+extern u16 wlc_bmac_rate_shm_offset(struct wlc_hw_info *wlc_hw, u8 rate);
 
-extern void wlc_bmac_assert_type_set(wlc_hw_info_t *wlc_hw, u32 type);
-extern void wlc_bmac_set_txpwr_percent(wlc_hw_info_t *wlc_hw, u8 val);
-extern void wlc_bmac_blink_sync(wlc_hw_info_t *wlc_hw, u32 led_pins);
-extern void wlc_bmac_ifsctl_edcrs_set(wlc_hw_info_t *wlc_hw, bool abie,
+extern void wlc_bmac_assert_type_set(struct wlc_hw_info *wlc_hw, u32 type);
+extern void wlc_bmac_set_txpwr_percent(struct wlc_hw_info *wlc_hw, u8 val);
+extern void wlc_bmac_blink_sync(struct wlc_hw_info *wlc_hw, u32 led_pins);
+extern void wlc_bmac_ifsctl_edcrs_set(struct wlc_hw_info *wlc_hw, bool abie,
 				      bool isht);
 
-extern void wlc_bmac_antsel_set(wlc_hw_info_t *wlc_hw, u32 antsel_avail);
+extern void wlc_bmac_antsel_set(struct wlc_hw_info *wlc_hw, u32 antsel_avail);
diff --git a/drivers/staging/brcm80211/sys/wlc_bsscfg.h b/drivers/staging/brcm80211/sys/wlc_bsscfg.h
index ae5542a..d6a1971 100644
--- a/drivers/staging/brcm80211/sys/wlc_bsscfg.h
+++ b/drivers/staging/brcm80211/sys/wlc_bsscfg.h
@@ -34,7 +34,8 @@
 #define MAXMACLIST		64	/* max # source MAC matches */
 #define BCN_TEMPLATE_COUNT 	2
 
-/* Iterator for "associated" STA bss configs:  (wlc_info_t *wlc, int idx, wlc_bsscfg_t *cfg) */
+/* Iterator for "associated" STA bss configs:
+   (struct wlc_info *wlc, int idx, wlc_bsscfg_t *cfg) */
 #define FOREACH_AS_STA(wlc, idx, cfg) \
 	for (idx = 0; (int) idx < WLC_MAXBSSCFG; idx++) \
 		if ((cfg = (wlc)->bsscfg[idx]) && BSSCFG_STA(cfg) && cfg->associated)
diff --git a/drivers/staging/brcm80211/sys/wlc_cfg.h b/drivers/staging/brcm80211/sys/wlc_cfg.h
index a415e1f..3decb7d 100644
--- a/drivers/staging/brcm80211/sys/wlc_cfg.h
+++ b/drivers/staging/brcm80211/sys/wlc_cfg.h
@@ -23,14 +23,6 @@
 
 #define IS_SINGLEBAND_5G(device)	0
 
-/* Keep WLC_HIGH_ONLY, WLC_SPLIT for USB extension later on */
-#if !defined(WLC_LOW)
-#define WLC_HIGH_ONLY
-#endif
-#if !defined(WLC_LOW)
-#define WLC_SPLIT
-#endif
-
 /* **** Core type/rev defaults **** */
 #define D11_DEFAULT	0x0fffffb0	/* Supported  D11 revs: 4, 5, 7-27
 					 * also need to update wlc.h MAXCOREREV
@@ -61,22 +53,6 @@
 					 *      3       5356a0
 					 */
 
-#ifdef BCMSDIO
-#define D11CONF 0x100000
-#define SSLPNCONF 2
-#define GCCONF	0
-#define ACCONF	0
-#define NCONF	0
-#define LPCONF	0
-#define LCNCONF	0
-#define NTXD 32
-#define NRXD 16
-#define NRXBUFPOST 8
-#define WLC_DATAHIWAT 32
-#define RXBND 8
-#define MAXPKTCB 64
-#define AMPDU_NUM_MPDU 8
-#endif
 
 /* For undefined values, use defaults */
 #ifndef D11CONF
diff --git a/drivers/staging/brcm80211/sys/wlc_channel.c b/drivers/staging/brcm80211/sys/wlc_channel.c
index 5092803..a35c152 100644
--- a/drivers/staging/brcm80211/sys/wlc_channel.c
+++ b/drivers/staging/brcm80211/sys/wlc_channel.c
@@ -19,16 +19,21 @@
 #include <bcmdefs.h>
 #include <wlc_cfg.h>
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <bcmutils.h>
 #include <siutils.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
 #include <wlioctl.h>
 #include <wlc_pub.h>
 #include <wlc_key.h>
+#include <wlc_event.h>
 #include <wlc_mac80211.h>
 #include <wlc_bmac.h>
 #include <wlc_stf.h>
 #include <wlc_channel.h>
+#include <wl_dbg.h>
 
 typedef struct wlc_cm_band {
 	u8 locale_flags;	/* locale_info_t flags */
@@ -39,8 +44,8 @@
 } wlc_cm_band_t;
 
 struct wlc_cm_info {
-	wlc_pub_t *pub;
-	wlc_info_t *wlc;
+	struct wlc_pub *pub;
+	struct wlc_info *wlc;
 	char srom_ccode[WLC_CNTRY_BUF_SZ];	/* Country Code in SROM */
 	uint srom_regrev;	/* Regulatory Rev for the SROM ccode */
 	const country_info_t *country;	/* current country def */
@@ -377,7 +382,7 @@
 {
 	u8 i;
 
-	bzero(channels, sizeof(chanvec_t));
+	memset(channels, 0, sizeof(chanvec_t));
 
 	for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
 		if (locale->valid_channels & (1 << i)) {
@@ -562,8 +567,8 @@
 const locale_info_t *wlc_get_locale_2g(u8 locale_idx)
 {
 	if (locale_idx >= ARRAY_SIZE(g_locale_2g_table)) {
-		WL_ERROR(("%s: locale 2g index size out of range %d\n",
-			  __func__, locale_idx));
+		WL_ERROR("%s: locale 2g index size out of range %d\n",
+			 __func__, locale_idx);
 		ASSERT(locale_idx < ARRAY_SIZE(g_locale_2g_table));
 		return NULL;
 	}
@@ -573,8 +578,8 @@
 const locale_info_t *wlc_get_locale_5g(u8 locale_idx)
 {
 	if (locale_idx >= ARRAY_SIZE(g_locale_5g_table)) {
-		WL_ERROR(("%s: locale 5g index size out of range %d\n",
-			  __func__, locale_idx));
+		WL_ERROR("%s: locale 5g index size out of range %d\n",
+			 __func__, locale_idx);
 		ASSERT(locale_idx < ARRAY_SIZE(g_locale_5g_table));
 		return NULL;
 	}
@@ -584,8 +589,8 @@
 const locale_mimo_info_t *wlc_get_mimo_2g(u8 locale_idx)
 {
 	if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) {
-		WL_ERROR(("%s: mimo 2g index size out of range %d\n", __func__,
-			  locale_idx));
+		WL_ERROR("%s: mimo 2g index size out of range %d\n",
+			 __func__, locale_idx);
 		return NULL;
 	}
 	return g_mimo_2g_table[locale_idx];
@@ -594,26 +599,26 @@
 const locale_mimo_info_t *wlc_get_mimo_5g(u8 locale_idx)
 {
 	if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table)) {
-		WL_ERROR(("%s: mimo 5g index size out of range %d\n", __func__,
-			  locale_idx));
+		WL_ERROR("%s: mimo 5g index size out of range %d\n",
+			 __func__, locale_idx);
 		return NULL;
 	}
 	return g_mimo_5g_table[locale_idx];
 }
 
-wlc_cm_info_t *wlc_channel_mgr_attach(wlc_info_t *wlc)
+wlc_cm_info_t *wlc_channel_mgr_attach(struct wlc_info *wlc)
 {
 	wlc_cm_info_t *wlc_cm;
 	char country_abbrev[WLC_CNTRY_BUF_SZ];
 	const country_info_t *country;
-	wlc_pub_t *pub = wlc->pub;
+	struct wlc_pub *pub = wlc->pub;
 	char *ccode;
 
-	WL_TRACE(("wl%d: wlc_channel_mgr_attach\n", wlc->pub->unit));
+	WL_TRACE("wl%d: wlc_channel_mgr_attach\n", wlc->pub->unit);
 
 	wlc_cm = kzalloc(sizeof(wlc_cm_info_t), GFP_ATOMIC);
 	if (wlc_cm == NULL) {
-		WL_ERROR(("wl%d: %s: out of memory", pub->unit, __func__));
+		WL_ERROR("wl%d: %s: out of memory", pub->unit, __func__);
 		return NULL;
 	}
 	wlc_cm->pub = pub;
@@ -624,12 +629,13 @@
 	ccode = getvar(wlc->pub->vars, "ccode");
 	if (ccode) {
 		strncpy(wlc->pub->srom_ccode, ccode, WLC_CNTRY_BUF_SZ - 1);
-		WL_NONE(("%s: SROM country code is %c%c\n", __func__,
-			 wlc->pub->srom_ccode[0], wlc->pub->srom_ccode[1]));
+		WL_NONE("%s: SROM country code is %c%c\n",
+			__func__,
+			wlc->pub->srom_ccode[0], wlc->pub->srom_ccode[1]);
 	}
 
 	/* internal country information which must match regulatory constraints in firmware */
-	bzero(country_abbrev, WLC_CNTRY_BUF_SZ);
+	memset(country_abbrev, 0, WLC_CNTRY_BUF_SZ);
 	strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
 	country = wlc_country_lookup(wlc, country_abbrev);
 
@@ -659,7 +665,7 @@
 
 u8 wlc_channel_locale_flags(wlc_cm_info_t *wlc_cm)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 
 	return wlc_cm->bandstate[wlc->band->bandunit].locale_flags;
 }
@@ -711,7 +717,9 @@
 	char mapped_ccode[WLC_CNTRY_BUF_SZ];
 	uint mapped_regrev;
 
-	WL_NONE(("%s: (country_abbrev \"%s\", ccode \"%s\", regrev %d) SPROM \"%s\"/%u\n", __func__, country_abbrev, ccode, regrev, wlc_cm->srom_ccode, wlc_cm->srom_regrev));
+	WL_NONE("%s: (country_abbrev \"%s\", ccode \"%s\", regrev %d) SPROM \"%s\"/%u\n",
+		__func__, country_abbrev, ccode, regrev,
+		wlc_cm->srom_ccode, wlc_cm->srom_regrev);
 
 	/* if regrev is -1, lookup the mapped country code,
 	 * otherwise use the ccode and regrev directly
@@ -750,7 +758,7 @@
 {
 	const locale_mimo_info_t *li_mimo;
 	const locale_info_t *locale;
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	char prev_country_abbrev[WLC_CNTRY_BUF_SZ];
 
 	ASSERT(country != NULL);
@@ -758,7 +766,7 @@
 	/* save current country state */
 	wlc_cm->country = country;
 
-	bzero(&prev_country_abbrev, WLC_CNTRY_BUF_SZ);
+	memset(&prev_country_abbrev, 0, WLC_CNTRY_BUF_SZ);
 	strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
 		WLC_CNTRY_BUF_SZ - 1);
 
@@ -814,7 +822,7 @@
 						 char *mapped_ccode,
 						 uint *mapped_regrev)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	const country_info_t *country;
 	uint srom_regrev = wlc_cm->srom_regrev;
 	const char *srom_ccode = wlc_cm->srom_ccode;
@@ -822,8 +830,8 @@
 
 	/* check for currently supported ccode size */
 	if (strlen(ccode) > (WLC_CNTRY_BUF_SZ - 1)) {
-		WL_ERROR(("wl%d: %s: ccode \"%s\" too long for match\n",
-			  wlc->pub->unit, __func__, ccode));
+		WL_ERROR("wl%d: %s: ccode \"%s\" too long for match\n",
+			 wlc->pub->unit, __func__, ccode);
 		return NULL;
 	}
 
@@ -838,7 +846,7 @@
 	if (!strcmp(srom_ccode, ccode)) {
 		*mapped_regrev = srom_regrev;
 		mapped = 0;
-		WL_ERROR(("srom_code == ccode %s\n", __func__));
+		WL_ERROR("srom_code == ccode %s\n", __func__);
 		ASSERT(0);
 	} else {
 		mapped =
@@ -890,7 +898,7 @@
 		}
 	}
 
-	WL_ERROR(("%s: Returning NULL\n", __func__));
+	WL_ERROR("%s: Returning NULL\n", __func__);
 	ASSERT(0);
 	return NULL;
 }
@@ -898,9 +906,9 @@
 static int
 wlc_channels_init(wlc_cm_info_t *wlc_cm, const country_info_t *country)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	uint i, j;
-	wlcband_t *band;
+	struct wlcband *band;
 	const locale_info_t *li;
 	chanvec_t sup_chan;
 	const locale_mimo_info_t *li_mimo;
@@ -952,7 +960,7 @@
  */
 static void wlc_channels_commit(wlc_cm_info_t *wlc_cm)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	uint chan;
 	struct txpwr_limits txpwr;
 
@@ -969,7 +977,9 @@
 	if (chan == INVCHANNEL) {
 		/* country/locale with no valid channels, set the radio disable bit */
 		mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
-		WL_ERROR(("wl%d: %s: no valid channel for \"%s\" nbands %d bandlocked %d\n", wlc->pub->unit, __func__, wlc_cm->country_abbrev, NBANDS(wlc), wlc->bandlocked));
+		WL_ERROR("wl%d: %s: no valid channel for \"%s\" nbands %d bandlocked %d\n",
+			 wlc->pub->unit, __func__,
+			 wlc_cm->country_abbrev, NBANDS(wlc), wlc->bandlocked);
 	} else
 	    if (mboolisset(wlc->pub->radio_disabled,
 		WL_RADIO_COUNTRY_DISABLE)) {
@@ -998,12 +1008,12 @@
 /* reset the quiet channels vector to the union of the restricted and radar channel sets */
 void wlc_quiet_channels_reset(wlc_cm_info_t *wlc_cm)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	uint i, j;
-	wlcband_t *band;
+	struct wlcband *band;
 	const chanvec_t *chanvec;
 
-	bzero(&wlc_cm->quiet_channels, sizeof(chanvec_t));
+	memset(&wlc_cm->quiet_channels, 0, sizeof(chanvec_t));
 
 	band = wlc->band;
 	for (i = 0; i < NBANDS(wlc);
@@ -1036,7 +1046,7 @@
  */
 bool wlc_valid_channel20_db(wlc_cm_info_t *wlc_cm, uint val)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 
 	return VALID_CHANNEL20(wlc, val) ||
 		(!wlc->bandlocked
@@ -1054,7 +1064,7 @@
 /* Is the channel valid for the current locale and current band? */
 bool wlc_valid_channel20(wlc_cm_info_t *wlc_cm, uint val)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 
 	return ((val < MAXCHANNEL) &&
 		isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
@@ -1064,7 +1074,7 @@
 /* Is the 40 MHz allowed for the current locale and specified band? */
 bool wlc_valid_40chanspec_in_band(wlc_cm_info_t *wlc_cm, uint bandunit)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 
 	return (((wlc_cm->bandstate[bandunit].
 		  locale_flags & (WLC_NO_MIMO | WLC_NO_40MHZ)) == 0)
@@ -1162,7 +1172,7 @@
 wlc_channel_set_chanspec(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
 			 u8 local_constraint_qdbm)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	struct txpwr_limits txpwr;
 
 	wlc_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -1179,7 +1189,7 @@
 wlc_channel_set_txpower_limit(wlc_cm_info_t *wlc_cm,
 			      u8 local_constraint_qdbm)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	struct txpwr_limits txpwr;
 
 	wlc_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
@@ -1299,13 +1309,13 @@
 wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
 		       txpwr_limits_t *txpwr)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	uint i;
 	uint chan;
 	int maxpwr;
 	int delta;
 	const country_info_t *country;
-	wlcband_t *band;
+	struct wlcband *band;
 	const locale_info_t *li;
 	int conducted_max;
 	int conducted_ofdm_max;
@@ -1314,7 +1324,7 @@
 	int maxpwr_idx;
 	uint j;
 
-	bzero(txpwr, sizeof(txpwr_limits_t));
+	memset(txpwr, 0, sizeof(txpwr_limits_t));
 
 	if (!wlc_valid_chanspec_db(wlc_cm, chanspec)) {
 		country = wlc_country_lookup(wlc, wlc->autocountry_default);
@@ -1528,13 +1538,13 @@
 static bool
 wlc_valid_chanspec_ext(wlc_cm_info_t *wlc_cm, chanspec_t chspec, bool dualband)
 {
-	wlc_info_t *wlc = wlc_cm->wlc;
+	struct wlc_info *wlc = wlc_cm->wlc;
 	u8 channel = CHSPEC_CHANNEL(chspec);
 
 	/* check the chanspec */
 	if (wf_chspec_malformed(chspec)) {
-		WL_ERROR(("wl%d: malformed chanspec 0x%x\n", wlc->pub->unit,
-			  chspec));
+		WL_ERROR("wl%d: malformed chanspec 0x%x\n",
+			 wlc->pub->unit, chspec);
 		ASSERT(0);
 		return false;
 	}
diff --git a/drivers/staging/brcm80211/sys/wlc_event.c b/drivers/staging/brcm80211/sys/wlc_event.c
index 7e1bf0e..dabd709 100644
--- a/drivers/staging/brcm80211/sys/wlc_event.c
+++ b/drivers/staging/brcm80211/sys/wlc_event.c
@@ -16,9 +16,13 @@
 
 #include <linux/kernel.h>
 #include <bcmdefs.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <osl.h>
 #include <bcmutils.h>
 #include <siutils.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
 #include <wlioctl.h>
 #include <wlc_cfg.h>
 #include <wlc_pub.h>
@@ -32,6 +36,7 @@
 #ifdef MSGTRACE
 #include <msgtrace.h>
 #endif
+#include <wl_dbg.h>
 
 /* Local prototypes */
 static void wlc_timer_cb(void *arg);
@@ -42,7 +47,7 @@
 	wlc_event_t *tail;
 	struct wlc_info *wlc;
 	void *wl;
-	wlc_pub_t *pub;
+	struct wlc_pub *pub;
 	bool tpending;
 	bool workpending;
 	struct wl_timer *timer;
@@ -53,7 +58,8 @@
 /*
  * Export functions
  */
-wlc_eventq_t *wlc_eventq_attach(wlc_pub_t *pub, struct wlc_info *wlc, void *wl,
+wlc_eventq_t *wlc_eventq_attach(struct wlc_pub *pub, struct wlc_info *wlc,
+				void *wl,
 				wlc_eventq_cb_t cb)
 {
 	wlc_eventq_t *eq;
@@ -69,8 +75,8 @@
 
 	eq->timer = wl_init_timer(eq->wl, wlc_timer_cb, eq, "eventq");
 	if (!eq->timer) {
-		WL_ERROR(("wl%d: wlc_eventq_attach: timer failed\n",
-			  pub->unit));
+		WL_ERROR("wl%d: wlc_eventq_attach: timer failed\n",
+			 pub->unit);
 		kfree(eq);
 		return NULL;
 	}
diff --git a/drivers/staging/brcm80211/sys/wlc_event.h b/drivers/staging/brcm80211/sys/wlc_event.h
index e443dae..e75582d 100644
--- a/drivers/staging/brcm80211/sys/wlc_event.h
+++ b/drivers/staging/brcm80211/sys/wlc_event.h
@@ -21,7 +21,8 @@
 
 typedef void (*wlc_eventq_cb_t) (void *arg);
 
-extern wlc_eventq_t *wlc_eventq_attach(wlc_pub_t *pub, struct wlc_info *wlc,
+extern wlc_eventq_t *wlc_eventq_attach(struct wlc_pub *pub,
+				       struct wlc_info *wlc,
 				       void *wl, wlc_eventq_cb_t cb);
 extern int wlc_eventq_detach(wlc_eventq_t *eq);
 extern int wlc_eventq_down(wlc_eventq_t *eq);
@@ -38,7 +39,7 @@
 extern int wlc_eventq_test_ind(wlc_eventq_t *eq, int et);
 extern int wlc_eventq_set_ind(wlc_eventq_t *eq, uint et, bool on);
 extern void wlc_eventq_flush(wlc_eventq_t *eq);
-extern void wlc_assign_event_msg(wlc_info_t *wlc, wl_event_msg_t *msg,
+extern void wlc_assign_event_msg(struct wlc_info *wlc, wl_event_msg_t *msg,
 				 const wlc_event_t *e, u8 *data,
 				 u32 len);
 
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index feaffcc..1d5d01a 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -15,9 +15,10 @@
  */
 #include <linux/kernel.h>
 #include <linux/ctype.h>
+#include <linux/etherdevice.h>
 #include <bcmdefs.h>
+#include <bcmdevs.h>
 #include <wlc_cfg.h>
-#include <linuxver.h>
 #include <osl.h>
 #include <bcmutils.h>
 #include <bcmwifi.h>
@@ -27,7 +28,7 @@
 #include <pcicfg.h>
 #include <bcmsrom.h>
 #include <wlioctl.h>
-#include <epivers.h>
+#include <sbhndpio.h>
 #include <sbhnddma.h>
 #include <hnddma.h>
 #include <hndpmu.h>
@@ -37,6 +38,7 @@
 #include <wlc_key.h>
 #include <wlc_bsscfg.h>
 #include <wlc_channel.h>
+#include <wlc_event.h>
 #include <wlc_mac80211.h>
 #include <wlc_bmac.h>
 #include <wlc_scb.h>
@@ -47,27 +49,11 @@
 #include <wlc_ampdu.h>
 #include <wlc_event.h>
 #include <wl_export.h>
-#ifdef BCMSDIO
-#include <bcmsdh.h>
-#else
 #include "d11ucode_ext.h"
-#endif
-#ifdef WLC_HIGH_ONLY
-#include <bcm_rpc_tp.h>
-#include <bcm_rpc.h>
-#include <bcm_xdr.h>
-#include <wlc_rpc.h>
-#include <wlc_rpctx.h>
-#endif				/* WLC_HIGH_ONLY */
 #include <wlc_alloc.h>
 #include <net/mac80211.h>
+#include <wl_dbg.h>
 
-#ifdef WLC_HIGH_ONLY
-#undef R_REG
-#undef W_REG
-#define R_REG(osh, r) RPC_READ_REG(osh, r)
-#define W_REG(osh, r, v) RPC_WRITE_REG(osh, r, v)
-#endif
 
 /*
  * buffer length needed for wlc_format_ssid
@@ -107,12 +93,8 @@
 /* To inform the ucode of the last mcast frame posted so that it can clear moredata bit */
 #define BCMCFID(wlc, fid) wlc_bmac_write_shm((wlc)->hw, M_BCMC_FID, (fid))
 
-#ifndef WLC_HIGH_ONLY
-#define WLC_WAR16165(wlc) (BUSTYPE(wlc->pub->sih->bustype) == PCI_BUS && \
+#define WLC_WAR16165(wlc) (wlc->pub->sih->bustype == PCI_BUS && \
 				(!AP_ENAB(wlc->pub)) && (wlc->war16165))
-#else
-#define WLC_WAR16165(wlc) (false)
-#endif				/* WLC_HIGH_ONLY */
 
 /* debug/trace */
 uint wl_msg_level =
@@ -135,9 +117,11 @@
 
 #define SCAN_IN_PROGRESS(x)	0
 
+#define EPI_VERSION_NUM		0x054b0b00
+
 #ifdef BCMDBG
 /* pointer to most recently allocated wl/wlc */
-static wlc_info_t *wlc_info_dbg = (wlc_info_t *) (NULL);
+static struct wlc_info *wlc_info_dbg = (struct wlc_info *) (NULL);
 #endif
 
 /* IOVar table */
@@ -238,91 +222,91 @@
 #define WLC_REPLAY_CNTRS_VALUE	WPA_CAP_16_REPLAY_CNTRS
 
 /* local prototypes */
-extern void wlc_txq_enq(void *ctx, struct scb *scb, void *sdu, uint prec);
-static u16 BCMFASTPATH wlc_d11hdrs_mac80211(wlc_info_t *wlc,
-					       struct ieee80211_hw *hw, void *p,
+static u16 BCMFASTPATH wlc_d11hdrs_mac80211(struct wlc_info *wlc,
+					       struct ieee80211_hw *hw,
+					       struct sk_buff *p,
 					       struct scb *scb, uint frag,
 					       uint nfrags, uint queue,
 					       uint next_frag_len,
 					       wsec_key_t *key,
 					       ratespec_t rspec_override);
-bool wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw);
-void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend);
-static void wlc_bss_default_init(wlc_info_t *wlc);
-static void wlc_ucode_mac_upd(wlc_info_t *wlc);
-static ratespec_t mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band,
-					 u32 int_val);
-static void wlc_tx_prec_map_init(wlc_info_t *wlc);
+
+static void wlc_bss_default_init(struct wlc_info *wlc);
+static void wlc_ucode_mac_upd(struct wlc_info *wlc);
+static ratespec_t mac80211_wlc_set_nrate(struct wlc_info *wlc,
+					 struct wlcband *cur_band, u32 int_val);
+static void wlc_tx_prec_map_init(struct wlc_info *wlc);
 static void wlc_watchdog(void *arg);
 static void wlc_watchdog_by_timer(void *arg);
-static int wlc_set_rateset(wlc_info_t *wlc, wlc_rateset_t *rs_arg);
-static int wlc_iovar_rangecheck(wlc_info_t *wlc, u32 val,
+static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg);
+static int wlc_iovar_rangecheck(struct wlc_info *wlc, u32 val,
 				const bcm_iovar_t *vi);
-static u8 wlc_local_constraint_qdbm(wlc_info_t *wlc);
+static u8 wlc_local_constraint_qdbm(struct wlc_info *wlc);
 
 /* send and receive */
-static wlc_txq_info_t *wlc_txq_alloc(wlc_info_t *wlc, osl_t *osh);
-static void wlc_txq_free(wlc_info_t *wlc, osl_t *osh, wlc_txq_info_t *qi);
-static void wlc_txflowcontrol_signal(wlc_info_t *wlc, wlc_txq_info_t *qi,
+static wlc_txq_info_t *wlc_txq_alloc(struct wlc_info *wlc,
+				     struct osl_info *osh);
+static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
+			 wlc_txq_info_t *qi);
+static void wlc_txflowcontrol_signal(struct wlc_info *wlc, wlc_txq_info_t *qi,
 				     bool on, int prio);
-static void wlc_txflowcontrol_reset(wlc_info_t *wlc);
-static u16 wlc_compute_airtime(wlc_info_t *wlc, ratespec_t rspec,
+static void wlc_txflowcontrol_reset(struct wlc_info *wlc);
+static u16 wlc_compute_airtime(struct wlc_info *wlc, ratespec_t rspec,
 				  uint length);
 static void wlc_compute_cck_plcp(ratespec_t rate, uint length, u8 *plcp);
 static void wlc_compute_ofdm_plcp(ratespec_t rate, uint length, u8 *plcp);
 static void wlc_compute_mimo_plcp(ratespec_t rate, uint length, u8 *plcp);
-static u16 wlc_compute_frame_dur(wlc_info_t *wlc, ratespec_t rate,
+static u16 wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate,
 				    u8 preamble_type, uint next_frag_len);
-static void wlc_recvctl(wlc_info_t *wlc, osl_t *osh, d11rxhdr_t *rxh,
-			void *p);
-static uint wlc_calc_frame_len(wlc_info_t *wlc, ratespec_t rate,
+static void wlc_recvctl(struct wlc_info *wlc, struct osl_info *osh,
+			d11rxhdr_t *rxh, struct sk_buff *p);
+static uint wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t rate,
 			       u8 preamble_type, uint dur);
-static uint wlc_calc_ack_time(wlc_info_t *wlc, ratespec_t rate,
+static uint wlc_calc_ack_time(struct wlc_info *wlc, ratespec_t rate,
 			      u8 preamble_type);
-static uint wlc_calc_cts_time(wlc_info_t *wlc, ratespec_t rate,
+static uint wlc_calc_cts_time(struct wlc_info *wlc, ratespec_t rate,
 			      u8 preamble_type);
 /* interrupt, up/down, band */
-static void wlc_setband(wlc_info_t *wlc, uint bandunit);
-static chanspec_t wlc_init_chanspec(wlc_info_t *wlc);
-static void wlc_bandinit_ordered(wlc_info_t *wlc, chanspec_t chanspec);
-static void wlc_bsinit(wlc_info_t *wlc);
-static int wlc_duty_cycle_set(wlc_info_t *wlc, int duty_cycle, bool isOFDM,
+static void wlc_setband(struct wlc_info *wlc, uint bandunit);
+static chanspec_t wlc_init_chanspec(struct wlc_info *wlc);
+static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec);
+static void wlc_bsinit(struct wlc_info *wlc);
+static int wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
 			      bool writeToShm);
-static void wlc_radio_hwdisable_upd(wlc_info_t *wlc);
-static bool wlc_radio_monitor_start(wlc_info_t *wlc);
+static void wlc_radio_hwdisable_upd(struct wlc_info *wlc);
+static bool wlc_radio_monitor_start(struct wlc_info *wlc);
 static void wlc_radio_timer(void *arg);
-static void wlc_radio_enable(wlc_info_t *wlc);
-static void wlc_radio_upd(wlc_info_t *wlc);
+static void wlc_radio_enable(struct wlc_info *wlc);
+static void wlc_radio_upd(struct wlc_info *wlc);
 
 /* scan, association, BSS */
-static uint wlc_calc_ba_time(wlc_info_t *wlc, ratespec_t rate,
+static uint wlc_calc_ba_time(struct wlc_info *wlc, ratespec_t rate,
 			     u8 preamble_type);
-static void wlc_update_mimo_band_bwcap(wlc_info_t *wlc, u8 bwcap);
-static void wlc_ht_update_sgi_rx(wlc_info_t *wlc, int val);
-void wlc_ht_mimops_cap_update(wlc_info_t *wlc, u8 mimops_mode);
-static void wlc_ht_update_ldpc(wlc_info_t *wlc, s8 val);
-static void wlc_war16165(wlc_info_t *wlc, bool tx);
+static void wlc_update_mimo_band_bwcap(struct wlc_info *wlc, u8 bwcap);
+static void wlc_ht_update_sgi_rx(struct wlc_info *wlc, int val);
+static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val);
+static void wlc_war16165(struct wlc_info *wlc, bool tx);
 
 static void wlc_process_eventq(void *arg);
-static void wlc_wme_retries_write(wlc_info_t *wlc);
-static bool wlc_attach_stf_ant_init(wlc_info_t *wlc);
-static uint wlc_attach_module(wlc_info_t *wlc);
-static void wlc_detach_module(wlc_info_t *wlc);
-static void wlc_timers_deinit(wlc_info_t *wlc);
-static void wlc_down_led_upd(wlc_info_t *wlc);
-static uint wlc_down_del_timer(wlc_info_t *wlc);
-static void wlc_ofdm_rateset_war(wlc_info_t *wlc);
-static int _wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len,
+static void wlc_wme_retries_write(struct wlc_info *wlc);
+static bool wlc_attach_stf_ant_init(struct wlc_info *wlc);
+static uint wlc_attach_module(struct wlc_info *wlc);
+static void wlc_detach_module(struct wlc_info *wlc);
+static void wlc_timers_deinit(struct wlc_info *wlc);
+static void wlc_down_led_upd(struct wlc_info *wlc);
+static uint wlc_down_del_timer(struct wlc_info *wlc);
+static void wlc_ofdm_rateset_war(struct wlc_info *wlc);
+static int _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
 		      struct wlc_if *wlcif);
 
 #if defined(BCMDBG)
-void wlc_get_rcmta(wlc_info_t *wlc, int idx, struct ether_addr *addr)
+void wlc_get_rcmta(struct wlc_info *wlc, int idx, struct ether_addr *addr)
 {
 	d11regs_t *regs = wlc->regs;
 	u32 v32;
-	osl_t *osh;
+	struct osl_info *osh;
 
-	WL_TRACE(("wl%d: %s\n", WLCWLUNIT(wlc), __func__));
+	WL_TRACE("wl%d: %s\n", WLCWLUNIT(wlc), __func__);
 
 	ASSERT(wlc->pub->corerev > 4);
 
@@ -344,14 +328,14 @@
 #endif				/* defined(BCMDBG) */
 
 /* keep the chip awake if needed */
-bool wlc_stay_awake(wlc_info_t *wlc)
+bool wlc_stay_awake(struct wlc_info *wlc)
 {
 	return true;
 }
 
 /* conditions under which the PM bit should be set in outgoing frames and STAY_AWAKE is meaningful
  */
-bool wlc_ps_allowed(wlc_info_t *wlc)
+bool wlc_ps_allowed(struct wlc_info *wlc)
 {
 	int idx;
 	wlc_bsscfg_t *cfg;
@@ -378,9 +362,9 @@
 	return true;
 }
 
-void wlc_reset(wlc_info_t *wlc)
+void wlc_reset(struct wlc_info *wlc)
 {
-	WL_TRACE(("wl%d: wlc_reset\n", wlc->pub->unit));
+	WL_TRACE("wl%d: wlc_reset\n", wlc->pub->unit);
 
 	wlc->check_for_unaligned_tbtt = false;
 
@@ -389,34 +373,19 @@
 		wlc_statsupd(wlc);
 
 		/* reset our snapshot of macstat counters */
-		bzero((char *)wlc->core->macstat_snapshot, sizeof(macstat_t));
+		memset((char *)wlc->core->macstat_snapshot, 0,
+			sizeof(macstat_t));
 	}
 
 	wlc_bmac_reset(wlc->hw);
 	wlc_ampdu_reset(wlc->ampdu);
 	wlc->txretried = 0;
 
-#ifdef WLC_HIGH_ONLY
-	/* Need to set a flag(to be cleared asynchronously by BMAC driver with high call)
-	 *  in order to prevent wlc_rpctx_txreclaim() from screwing wlc_rpctx_getnexttxp(),
-	 *  which could be invoked by already QUEUED high call(s) from BMAC driver before
-	 *  wlc_bmac_reset() finishes.
-	 * It's not needed before in monolithic driver model because d11core interrupts would
-	 *  have been cleared instantly in wlc_bmac_reset() and no txstatus interrupt
-	 *  will come to driver to fetch those flushed dma pkt pointers.
-	 */
-	wlc->reset_bmac_pending = true;
-
-	wlc_rpctx_txreclaim(wlc->rpctx);
-
-	wlc_stf_phy_txant_upd(wlc);
-	wlc_phy_ant_rxdiv_set(wlc->band->pi, wlc->stf->ant_rx_ovr);
-#endif
 }
 
-void wlc_fatal_error(wlc_info_t *wlc)
+void wlc_fatal_error(struct wlc_info *wlc)
 {
-	WL_ERROR(("wl%d: fatal error, reinitializing\n", wlc->pub->unit));
+	WL_ERROR("wl%d: fatal error, reinitializing\n", wlc->pub->unit);
 	wl_init(wlc->wl);
 }
 
@@ -425,7 +394,7 @@
  * if other configurations are in conflict (bandlocked, 11n mode disabled,
  * invalid channel for current country, etc.)
  */
-static chanspec_t wlc_init_chanspec(wlc_info_t *wlc)
+static chanspec_t wlc_init_chanspec(struct wlc_info *wlc)
 {
 	chanspec_t chanspec =
 	    1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
@@ -441,7 +410,7 @@
 
 struct scb global_scb;
 
-static void wlc_init_scb(wlc_info_t *wlc, struct scb *scb)
+static void wlc_init_scb(struct wlc_info *wlc, struct scb *scb)
 {
 	int i;
 	scb->flags = SCB_WMECAP | SCB_HTCAP;
@@ -449,7 +418,7 @@
 		scb->seqnum[i] = 0;
 }
 
-void wlc_init(wlc_info_t *wlc)
+void wlc_init(struct wlc_info *wlc)
 {
 	d11regs_t *regs;
 	chanspec_t chanspec;
@@ -457,7 +426,7 @@
 	wlc_bsscfg_t *bsscfg;
 	bool mute = false;
 
-	WL_TRACE(("wl%d: wlc_init\n", wlc->pub->unit));
+	WL_TRACE("wl%d: wlc_init\n", wlc->pub->unit);
 
 	regs = wlc->regs;
 
@@ -583,13 +552,13 @@
 	}
 }
 
-void wlc_mac_bcn_promisc_change(wlc_info_t *wlc, bool promisc)
+void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc)
 {
 	wlc->bcnmisc_monitor = promisc;
 	wlc_mac_bcn_promisc(wlc);
 }
 
-void wlc_mac_bcn_promisc(wlc_info_t *wlc)
+void wlc_mac_bcn_promisc(struct wlc_info *wlc)
 {
 	if ((AP_ENAB(wlc->pub) && (N_ENAB(wlc->pub) || wlc->band->gmode)) ||
 	    wlc->bcnmisc_ibss || wlc->bcnmisc_scan || wlc->bcnmisc_monitor)
@@ -599,7 +568,7 @@
 }
 
 /* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-void wlc_mac_promisc(wlc_info_t *wlc)
+void wlc_mac_promisc(struct wlc_info *wlc)
 {
 	u32 promisc_bits = 0;
 
@@ -621,7 +590,7 @@
 }
 
 /* check if hps and wake states of sw and hw are in sync */
-bool wlc_ps_check(wlc_info_t *wlc)
+bool wlc_ps_check(struct wlc_info *wlc)
 {
 	bool res = true;
 	bool hps, wake;
@@ -636,8 +605,8 @@
 		 * to avoid assert
 		 */
 		if (tmp == 0xffffffff) {
-			WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit,
-				  __func__));
+			WL_ERROR("wl%d: %s: dead chip\n",
+				 wlc->pub->unit, __func__);
 			return DEVICEREMOVED(wlc);
 		}
 
@@ -646,7 +615,8 @@
 		if (hps != ((tmp & MCTL_HPS) != 0)) {
 			int idx;
 			wlc_bsscfg_t *cfg;
-			WL_ERROR(("wl%d: hps not sync, sw %d, maccontrol 0x%x\n", wlc->pub->unit, hps, tmp));
+			WL_ERROR("wl%d: hps not sync, sw %d, maccontrol 0x%x\n",
+				 wlc->pub->unit, hps, tmp);
 			FOREACH_BSS(wlc, idx, cfg) {
 				if (!BSSCFG_STA(cfg))
 					continue;
@@ -654,23 +624,14 @@
 
 			res = false;
 		}
-#ifdef WLC_LOW
 		/* For a monolithic build the wake check can be exact since it looks at wake
 		 * override bits. The MCTL_WAKE bit should match the 'wake' value.
 		 */
 		wake = STAY_AWAKE(wlc) || wlc->hw->wake_override;
 		wake_ok = (wake == ((tmp & MCTL_WAKE) != 0));
-#else
-		/* For a split build we will not have access to any wake overrides from the low
-		 * level. The check can only make sure the MCTL_WAKE bit is on if the high
-		 * level 'wake' value is true. If the high level 'wake' is false, the MCTL_WAKE
-		 * may be either true or false due to the low level override.
-		 */
-		wake = STAY_AWAKE(wlc);
-		wake_ok = (wake && ((tmp & MCTL_WAKE) != 0)) || !wake;
-#endif
 		if (hps && !wake_ok) {
-			WL_ERROR(("wl%d: wake not sync, sw %d maccontrol 0x%x\n", wlc->pub->unit, wake, tmp));
+			WL_ERROR("wl%d: wake not sync, sw %d maccontrol 0x%x\n",
+				 wlc->pub->unit, wake, tmp);
 			res = false;
 		}
 	}
@@ -679,7 +640,7 @@
 }
 
 /* push sw hps and wake state through hardware */
-void wlc_set_ps_ctrl(wlc_info_t *wlc)
+void wlc_set_ps_ctrl(struct wlc_info *wlc)
 {
 	u32 v1, v2;
 	bool hps, wake;
@@ -688,8 +649,8 @@
 	hps = PS_ALLOWED(wlc);
 	wake = hps ? (STAY_AWAKE(wlc)) : true;
 
-	WL_TRACE(("wl%d: wlc_set_ps_ctrl: hps %d wake %d\n", wlc->pub->unit,
-		  hps, wake));
+	WL_TRACE("wl%d: wlc_set_ps_ctrl: hps %d wake %d\n",
+		 wlc->pub->unit, hps, wake);
 
 	v1 = R_REG(wlc->osh, &wlc->regs->maccontrol);
 	v2 = 0;
@@ -714,7 +675,7 @@
 int wlc_set_mac(wlc_bsscfg_t *cfg)
 {
 	int err = 0;
-	wlc_info_t *wlc = cfg->wlc;
+	struct wlc_info *wlc = cfg->wlc;
 
 	if (cfg == wlc->cfg) {
 		/* enter the MAC addr into the RXE match registers */
@@ -731,7 +692,7 @@
  */
 void wlc_set_bssid(wlc_bsscfg_t *cfg)
 {
-	wlc_info_t *wlc = cfg->wlc;
+	struct wlc_info *wlc = cfg->wlc;
 
 	/* if primary config, we need to update BSSID in RXE match registers */
 	if (cfg == wlc->cfg) {
@@ -748,7 +709,7 @@
  * Suspend the the MAC and update the slot timing
  * for standard 11b/g (20us slots) or shortslot 11g (9us slots).
  */
-void wlc_switch_shortslot(wlc_info_t *wlc, bool shortslot)
+void wlc_switch_shortslot(struct wlc_info *wlc, bool shortslot)
 {
 	int idx;
 	wlc_bsscfg_t *cfg;
@@ -776,7 +737,7 @@
 	wlc_bmac_set_shortslot(wlc->hw, shortslot);
 }
 
-static u8 wlc_local_constraint_qdbm(wlc_info_t *wlc)
+static u8 wlc_local_constraint_qdbm(struct wlc_info *wlc)
 {
 	u8 local;
 	s16 local_max;
@@ -803,7 +764,7 @@
 }
 
 /* propagate home chanspec to all bsscfgs in case bsscfg->current_bss->chanspec is referenced */
-void wlc_set_home_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
+void wlc_set_home_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
 {
 	if (wlc->home_chanspec != chanspec) {
 		int idx;
@@ -821,7 +782,7 @@
 	}
 }
 
-static void wlc_set_phy_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
+static void wlc_set_phy_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
 {
 	/* Save our copy of the chanspec */
 	wlc->chanspec = chanspec;
@@ -840,15 +801,15 @@
 
 }
 
-void wlc_set_chanspec(wlc_info_t *wlc, chanspec_t chanspec)
+void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
 {
 	uint bandunit;
 	bool switchband = false;
 	chanspec_t old_chanspec = wlc->chanspec;
 
 	if (!wlc_valid_chanspec_db(wlc->cmi, chanspec)) {
-		WL_ERROR(("wl%d: %s: Bad channel %d\n",
-			  wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec)));
+		WL_ERROR("wl%d: %s: Bad channel %d\n",
+			 wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
 		ASSERT(wlc_valid_chanspec_db(wlc->cmi, chanspec));
 		return;
 	}
@@ -859,7 +820,9 @@
 		if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
 			switchband = true;
 			if (wlc->bandlocked) {
-				WL_ERROR(("wl%d: %s: chspec %d band is locked!\n", wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec)));
+				WL_ERROR("wl%d: %s: chspec %d band is locked!\n",
+					 wlc->pub->unit, __func__,
+					 CHSPEC_CHANNEL(chanspec));
 				return;
 			}
 			/* BMAC_NOTE: should the setband call come after the wlc_bmac_chanspec() ?
@@ -895,7 +858,7 @@
 }
 
 #if defined(BCMDBG)
-static int wlc_get_current_txpwr(wlc_info_t *wlc, void *pwr, uint len)
+static int wlc_get_current_txpwr(struct wlc_info *wlc, void *pwr, uint len)
 {
 	txpwr_limits_t txpwr;
 	tx_power_t power;
@@ -909,7 +872,7 @@
 	else if (len < sizeof(tx_power_t))
 		return BCME_BUFTOOSHORT;
 
-	bzero(&power, sizeof(tx_power_t));
+	memset(&power, 0, sizeof(tx_power_t));
 
 	power.chanspec = WLC_BAND_PI_RADIO_CHANSPEC;
 	if (wlc->pub->associated)
@@ -1031,7 +994,7 @@
 	} else {
 		int band_idx = CHSPEC_IS2G(power.chanspec) ? 0 : 1;
 
-		bzero(old_power, sizeof(tx_power_legacy_t));
+		memset(old_power, 0, sizeof(tx_power_legacy_t));
 
 		old_power->txpwr_local_max = power.local_max;
 		old_power->txpwr_local_constraint = power.local_constraint;
@@ -1064,7 +1027,7 @@
 }
 #endif				/* defined(BCMDBG) */
 
-static u32 wlc_watchdog_backup_bi(wlc_info_t *wlc)
+static u32 wlc_watchdog_backup_bi(struct wlc_info *wlc)
 {
 	u32 bi;
 	bi = 2 * wlc->cfg->current_bss->dtim_period *
@@ -1083,7 +1046,7 @@
 /* Change to run the watchdog either from a periodic timer or from tbtt handler.
  * Call watchdog from tbtt handler if tbtt is true, watchdog timer otherwise.
  */
-void wlc_watchdog_upd(wlc_info_t *wlc, bool tbtt)
+void wlc_watchdog_upd(struct wlc_info *wlc, bool tbtt)
 {
 	/* make sure changing watchdog driver is allowed */
 	if (!wlc->pub->up || !wlc->pub->align_wd_tbtt)
@@ -1112,7 +1075,7 @@
 	}
 }
 
-ratespec_t wlc_lowest_basic_rspec(wlc_info_t *wlc, wlc_rateset_t *rs)
+ratespec_t wlc_lowest_basic_rspec(struct wlc_info *wlc, wlc_rateset_t *rs)
 {
 	ratespec_t lowest_basic_rspec;
 	uint i;
@@ -1140,7 +1103,7 @@
  *  ratespec     CCK		ant = wlc->stf->txant
  *  		OFDM		ant = 3
  */
-void wlc_beacon_phytxctl_txant_upd(wlc_info_t *wlc, ratespec_t bcn_rspec)
+void wlc_beacon_phytxctl_txant_upd(struct wlc_info *wlc, ratespec_t bcn_rspec)
 {
 	u16 phyctl;
 	u16 phytxant = wlc->stf->phytxant;
@@ -1159,9 +1122,9 @@
 /* centralized protection config change function to simplify debugging, no consistency checking
  * this should be called only on changes to avoid overhead in periodic function
 */
-void wlc_protection_upd(wlc_info_t *wlc, uint idx, int val)
+void wlc_protection_upd(struct wlc_info *wlc, uint idx, int val)
 {
-	WL_TRACE(("wlc_protection_upd: idx %d, val %d\n", idx, val));
+	WL_TRACE("wlc_protection_upd: idx %d, val %d\n", idx, val);
 
 	switch (idx) {
 	case WLC_PROT_G_SPEC:
@@ -1205,7 +1168,7 @@
 
 }
 
-static void wlc_ht_update_sgi_rx(wlc_info_t *wlc, int val)
+static void wlc_ht_update_sgi_rx(struct wlc_info *wlc, int val)
 {
 	wlc->ht_cap.cap &= ~(HT_CAP_SHORT_GI_20 | HT_CAP_SHORT_GI_40);
 	wlc->ht_cap.cap |= (val & WLC_N_SGI_20) ? HT_CAP_SHORT_GI_20 : 0;
@@ -1217,7 +1180,7 @@
 	}
 }
 
-static void wlc_ht_update_ldpc(wlc_info_t *wlc, s8 val)
+static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val)
 {
 	wlc->stf->ldpc = val;
 
@@ -1236,7 +1199,7 @@
  * ucode, hwmac update
  *    Channel dependent updates for ucode and hw
  */
-static void wlc_ucode_mac_upd(wlc_info_t *wlc)
+static void wlc_ucode_mac_upd(struct wlc_info *wlc)
 {
 	/* enable or disable any active IBSSs depending on whether or not
 	 * we are on the home channel
@@ -1263,13 +1226,13 @@
 	wlc_mac_promisc(wlc);
 }
 
-static void wlc_bandinit_ordered(wlc_info_t *wlc, chanspec_t chanspec)
+static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec)
 {
 	wlc_rateset_t default_rateset;
 	uint parkband;
 	uint i, band_order[2];
 
-	WL_TRACE(("wl%d: wlc_bandinit_ordered\n", wlc->pub->unit));
+	WL_TRACE("wl%d: wlc_bandinit_ordered\n", wlc->pub->unit);
 	/*
 	 * We might have been bandlocked during down and the chip power-cycled (hibernate).
 	 * figure out the right band to park on
@@ -1310,10 +1273,10 @@
 }
 
 /* band-specific init */
-static void WLBANDINITFN(wlc_bsinit) (wlc_info_t *wlc)
+static void WLBANDINITFN(wlc_bsinit) (struct wlc_info *wlc)
 {
-	WL_TRACE(("wl%d: wlc_bsinit: bandunit %d\n", wlc->pub->unit,
-		  wlc->band->bandunit));
+	WL_TRACE("wl%d: wlc_bsinit: bandunit %d\n",
+		 wlc->pub->unit, wlc->band->bandunit);
 
 	/* write ucode ACK/CTS rate table */
 	wlc_set_ratetable(wlc);
@@ -1328,7 +1291,7 @@
 }
 
 /* switch to and initialize new band */
-static void WLBANDINITFN(wlc_setband) (wlc_info_t *wlc, uint bandunit)
+static void WLBANDINITFN(wlc_setband) (struct wlc_info *wlc, uint bandunit)
 {
 	int idx;
 	wlc_bsscfg_t *cfg;
@@ -1353,7 +1316,7 @@
 }
 
 /* Initialize a WME Parameter Info Element with default STA parameters from WMM Spec, Table 12 */
-void wlc_wme_initparams_sta(wlc_info_t *wlc, wme_param_ie_t *pe)
+void wlc_wme_initparams_sta(struct wlc_info *wlc, wme_param_ie_t *pe)
 {
 	static const wme_param_ie_t stadef = {
 		WME_OUI,
@@ -1378,7 +1341,7 @@
 	memcpy(pe, &stadef, sizeof(*pe));
 }
 
-void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg, bool suspend)
+void wlc_wme_setparams(struct wlc_info *wlc, u16 aci, void *arg, bool suspend)
 {
 	int i;
 	shm_acparams_t acp_shm;
@@ -1389,7 +1352,7 @@
 
 	/* Only apply params if the core is out of reset and has clocks */
 	if (!wlc->clk) {
-		WL_ERROR(("wl%d: %s : no-clock\n", wlc->pub->unit, __func__));
+		WL_ERROR("wl%d: %s : no-clock\n", wlc->pub->unit, __func__);
 		return;
 	}
 
@@ -1402,7 +1365,7 @@
 	wlc->wme_admctl = 0;
 
 	do {
-		bzero((char *)&acp_shm, sizeof(shm_acparams_t));
+		memset((char *)&acp_shm, 0, sizeof(shm_acparams_t));
 		/* find out which ac this set of params applies to */
 		ASSERT(aci < AC_COUNT);
 		/* set the admission control policy for this AC */
@@ -1421,8 +1384,8 @@
 
 		if (acp_shm.aifs < EDCF_AIFSN_MIN
 		    || acp_shm.aifs > EDCF_AIFSN_MAX) {
-			WL_ERROR(("wl%d: wlc_edcf_setparams: bad aifs %d\n",
-				  wlc->pub->unit, acp_shm.aifs));
+			WL_ERROR("wl%d: wlc_edcf_setparams: bad aifs %d\n",
+				 wlc->pub->unit, acp_shm.aifs);
 			continue;
 		}
 
@@ -1459,7 +1422,7 @@
 
 void wlc_edcf_setparams(wlc_bsscfg_t *cfg, bool suspend)
 {
-	wlc_info_t *wlc = cfg->wlc;
+	struct wlc_info *wlc = cfg->wlc;
 	uint aci, i, j;
 	edcf_acparam_t *edcf_acp;
 	shm_acparams_t acp_shm;
@@ -1483,7 +1446,7 @@
 	wlc->wme_admctl = 0;
 
 	for (i = 0; i < AC_COUNT; i++, edcf_acp++) {
-		bzero((char *)&acp_shm, sizeof(shm_acparams_t));
+		memset((char *)&acp_shm, 0, sizeof(shm_acparams_t));
 		/* find out which ac this set of params applies to */
 		aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
 		ASSERT(aci < AC_COUNT);
@@ -1505,8 +1468,8 @@
 
 		if (acp_shm.aifs < EDCF_AIFSN_MIN
 		    || acp_shm.aifs > EDCF_AIFSN_MAX) {
-			WL_ERROR(("wl%d: wlc_edcf_setparams: bad aifs %d\n",
-				  wlc->pub->unit, acp_shm.aifs));
+			WL_ERROR("wl%d: wlc_edcf_setparams: bad aifs %d\n",
+				 wlc->pub->unit, acp_shm.aifs);
 			continue;
 		}
 
@@ -1548,20 +1511,19 @@
 
 }
 
-bool wlc_timers_init(wlc_info_t *wlc, int unit)
+bool wlc_timers_init(struct wlc_info *wlc, int unit)
 {
 	wlc->wdtimer = wl_init_timer(wlc->wl, wlc_watchdog_by_timer,
 		wlc, "watchdog");
 	if (!wlc->wdtimer) {
-		WL_ERROR(("wl%d:  wl_init_timer for wdtimer failed\n", unit));
+		WL_ERROR("wl%d:  wl_init_timer for wdtimer failed\n", unit);
 		goto fail;
 	}
 
 	wlc->radio_timer = wl_init_timer(wlc->wl, wlc_radio_timer,
 		wlc, "radio");
 	if (!wlc->radio_timer) {
-		WL_ERROR(("wl%d:  wl_init_timer for radio_timer failed\n",
-			  unit));
+		WL_ERROR("wl%d:  wl_init_timer for radio_timer failed\n", unit);
 		goto fail;
 	}
 
@@ -1575,7 +1537,7 @@
  * Initialize wlc_info default values ...
  * may get overrides later in this function
  */
-void wlc_info_init(wlc_info_t *wlc, int unit)
+void wlc_info_init(struct wlc_info *wlc, int unit)
 {
 	int i;
 	/* Assume the device is there until proven otherwise */
@@ -1686,7 +1648,7 @@
 	wlc->pr80838_war = true;
 }
 
-static bool wlc_state_bmac_sync(wlc_info_t *wlc)
+static bool wlc_state_bmac_sync(struct wlc_info *wlc)
 {
 	wlc_bmac_state_t state_bmac;
 
@@ -1700,7 +1662,7 @@
 	return true;
 }
 
-static uint wlc_attach_module(wlc_info_t *wlc)
+static uint wlc_attach_module(struct wlc_info *wlc)
 {
 	uint err = 0;
 	uint unit;
@@ -1708,15 +1670,14 @@
 
 	wlc->asi = wlc_antsel_attach(wlc, wlc->osh, wlc->pub, wlc->hw);
 	if (wlc->asi == NULL) {
-		WL_ERROR(("wl%d: wlc_attach: wlc_antsel_attach failed\n",
-			  unit));
+		WL_ERROR("wl%d: wlc_attach: wlc_antsel_attach failed\n", unit);
 		err = 44;
 		goto fail;
 	}
 
 	wlc->ampdu = wlc_ampdu_attach(wlc);
 	if (wlc->ampdu == NULL) {
-		WL_ERROR(("wl%d: wlc_attach: wlc_ampdu_attach failed\n", unit));
+		WL_ERROR("wl%d: wlc_attach: wlc_ampdu_attach failed\n", unit);
 		err = 50;
 		goto fail;
 	}
@@ -1725,13 +1686,13 @@
 	wlc->eventq =
 	    wlc_eventq_attach(wlc->pub, wlc, wlc->wl, wlc_process_eventq);
 	if (wlc->eventq == NULL) {
-		WL_ERROR(("wl%d: wlc_attach: wlc_eventq_attachfailed\n", unit));
+		WL_ERROR("wl%d: wlc_attach: wlc_eventq_attachfailed\n", unit);
 		err = 57;
 		goto fail;
 	}
 
 	if ((wlc_stf_attach(wlc) != 0)) {
-		WL_ERROR(("wl%d: wlc_attach: wlc_stf_attach failed\n", unit));
+		WL_ERROR("wl%d: wlc_attach: wlc_stf_attach failed\n", unit);
 		err = 68;
 		goto fail;
 	}
@@ -1739,9 +1700,9 @@
 	return err;
 }
 
-wlc_pub_t *wlc_pub(void *wlc)
+struct wlc_pub *wlc_pub(void *wlc)
 {
-	return ((wlc_info_t *) wlc)->pub;
+	return ((struct wlc_info *) wlc)->pub;
 }
 
 #define CHIP_SUPPORTS_11N(wlc) 	1
@@ -1750,25 +1711,25 @@
  * The common driver entry routine. Error codes should be unique
  */
 void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit, bool piomode,
-		 osl_t *osh, void *regsva, uint bustype, void *btparam,
-		 uint *perr)
+		 struct osl_info *osh, void *regsva, uint bustype,
+		 void *btparam, uint *perr)
 {
-	wlc_info_t *wlc;
+	struct wlc_info *wlc;
 	uint err = 0;
 	uint j;
-	wlc_pub_t *pub;
+	struct wlc_pub *pub;
 	wlc_txq_info_t *qi;
 	uint n_disabled;
 
-	WL_NONE(("wl%d: %s: vendor 0x%x device 0x%x\n", unit, __func__, vendor,
-		 device));
+	WL_NONE("wl%d: %s: vendor 0x%x device 0x%x\n",
+		unit, __func__, vendor, device);
 
 	ASSERT(WSEC_MAX_RCMTA_KEYS <= WSEC_MAX_KEYS);
 	ASSERT(WSEC_MAX_DEFAULT_KEYS == WLC_DEFAULT_KEYS);
 
 	/* some code depends on packed structures */
-	ASSERT(sizeof(struct ether_addr) == ETHER_ADDR_LEN);
-	ASSERT(sizeof(struct ether_header) == ETHER_HDR_LEN);
+	ASSERT(sizeof(struct ether_addr) == ETH_ALEN);
+	ASSERT(sizeof(struct ether_header) == ETH_HLEN);
 	ASSERT(sizeof(d11regs_t) == SI_CORE_SIZE);
 	ASSERT(sizeof(ofdm_phy_hdr_t) == D11_PHY_HDR_LEN);
 	ASSERT(sizeof(cck_phy_hdr_t) == D11_PHY_HDR_LEN);
@@ -1780,8 +1741,10 @@
 	ASSERT(sizeof(struct dot11_bcn_prb) == DOT11_BCN_PRB_LEN);
 	ASSERT(sizeof(tx_status_t) == TXSTATUS_LEN);
 	ASSERT(sizeof(ht_cap_ie_t) == HT_CAP_IE_LEN);
+#ifdef BRCM_FULLMAC
 	ASSERT(offsetof(wl_scan_params_t, channel_list) ==
 	       WL_SCAN_PARAMS_FIXED_SIZE);
+#endif
 	ASSERT(IS_ALIGNED(offsetof(wsec_key_t, data), sizeof(u32)));
 	ASSERT(ISPOWEROF2(MA_WINDOW_SZ));
 
@@ -1797,8 +1760,8 @@
 	       || (WPA_CAP_4_REPLAY_CNTRS == WLC_REPLAY_CNTRS_VALUE
 		   && 4 == WLC_NUMRXIVS));
 
-	/* allocate wlc_info_t state and its substructures */
-	wlc = (wlc_info_t *) wlc_attach_malloc(osh, unit, &err, device);
+	/* allocate struct wlc_info state and its substructures */
+	wlc = (struct wlc_info *) wlc_attach_malloc(osh, unit, &err, device);
 	if (wlc == NULL)
 		goto fail;
 	wlc->osh = osh;
@@ -1819,7 +1782,7 @@
 	/* By default restrict TKIP associations from 11n STA's */
 	wlc->ht_wsec_restriction = WLC_HT_TKIP_RESTRICT;
 
-	/* populate wlc_info_t with default values  */
+	/* populate struct wlc_info with default values  */
 	wlc_info_init(wlc, unit);
 
 	/* update sta/ap related parameters */
@@ -1851,10 +1814,6 @@
 	/* propagate *vars* from BMAC driver to high driver */
 	wlc_bmac_copyfrom_vars(wlc->hw, &pub->vars, &wlc->vars_size);
 
-#ifdef WLC_HIGH_ONLY
-	WL_TRACE(("nvram : vars %p , vars_size %d\n", pub->vars,
-		  wlc->vars_size));
-#endif
 
 	/* set maximum allowed duty cycle */
 	wlc->tx_duty_cycle_ofdm =
@@ -1872,19 +1831,17 @@
 	wlc_phy_stf_chain_init(wlc->band->pi, wlc->stf->hw_txchain,
 			       wlc->stf->hw_rxchain);
 
-#ifdef WLC_LOW
 	/* pull up some info resulting from the low attach */
 	{
 		int i;
 		for (i = 0; i < NFIFO; i++)
 			wlc->core->txavail[i] = wlc->hw->txavail[i];
 	}
-#endif				/* WLC_LOW */
 
 	wlc_bmac_hw_etheraddr(wlc->hw, &wlc->perm_etheraddr);
 
 	bcopy((char *)&wlc->perm_etheraddr, (char *)&pub->cur_etheraddr,
-	      ETHER_ADDR_LEN);
+	      ETH_ALEN);
 
 	for (j = 0; j < NBANDS(wlc); j++) {
 		/* Use band 1 for single band 11a */
@@ -1942,7 +1899,7 @@
 		goto fail;
 
 	if (!wlc_timers_init(wlc, unit)) {
-		WL_ERROR(("wl%d: %s: wlc_init_timer failed\n", unit, __func__));
+		WL_ERROR("wl%d: %s: wlc_init_timer failed\n", unit, __func__);
 		err = 32;
 		goto fail;
 	}
@@ -1950,8 +1907,8 @@
 	/* depend on rateset, gmode */
 	wlc->cmi = wlc_channel_mgr_attach(wlc);
 	if (!wlc->cmi) {
-		WL_ERROR(("wl%d: %s: wlc_channel_mgr_attach failed\n", unit,
-			  __func__));
+		WL_ERROR("wl%d: %s: wlc_channel_mgr_attach failed\n",
+			 unit, __func__);
 		err = 33;
 		goto fail;
 	}
@@ -1966,8 +1923,8 @@
 	/* allocate our initial queue */
 	qi = wlc_txq_alloc(wlc, osh);
 	if (qi == NULL) {
-		WL_ERROR(("wl%d: %s: failed to malloc tx queue\n", unit,
-			  __func__));
+		WL_ERROR("wl%d: %s: failed to malloc tx queue\n",
+			 unit, __func__);
 		err = 100;
 		goto fail;
 	}
@@ -2037,7 +1994,7 @@
 	wlc_radio_mpc_upd(wlc);
 
 	if (WLANTSEL_ENAB(wlc)) {
-		if ((CHIPID(wlc->pub->sih->chip)) == BCM43235_CHIP_ID) {
+		if ((wlc->pub->sih->chip) == BCM43235_CHIP_ID) {
 			if ((getintvar(wlc->pub->vars, "aa2g") == 7) ||
 			    (getintvar(wlc->pub->vars, "aa5g") == 7)) {
 				wlc_bmac_antsel_set(wlc->hw, 1);
@@ -2053,7 +2010,7 @@
 	return (void *)wlc;
 
  fail:
-	WL_ERROR(("wl%d: %s: failed with err %d\n", unit, __func__, err));
+	WL_ERROR("wl%d: %s: failed with err %d\n", unit, __func__, err);
 	if (wlc)
 		wlc_detach(wlc);
 
@@ -2062,7 +2019,7 @@
 	return NULL;
 }
 
-static void wlc_attach_antgain_init(wlc_info_t *wlc)
+static void wlc_attach_antgain_init(struct wlc_info *wlc)
 {
 	uint unit;
 	unit = wlc->pub->unit;
@@ -2071,7 +2028,8 @@
 		/* default antenna gain for srom rev 1 is 2 dBm (8 qdbm) */
 		wlc->band->antgain = 8;
 	} else if (wlc->band->antgain == -1) {
-		WL_ERROR(("wl%d: %s: Invalid antennas available in srom, using 2dB\n", unit, __func__));
+		WL_ERROR("wl%d: %s: Invalid antennas available in srom, using 2dB\n",
+			 unit, __func__);
 		wlc->band->antgain = 8;
 	} else {
 		s8 gain, fract;
@@ -2093,7 +2051,7 @@
 	}
 }
 
-static bool wlc_attach_stf_ant_init(wlc_info_t *wlc)
+static bool wlc_attach_stf_ant_init(struct wlc_info *wlc)
 {
 	int aa;
 	uint unit;
@@ -2110,7 +2068,8 @@
 		aa = (s8) getintvar(vars,
 				      (BAND_5G(bandtype) ? "aa1" : "aa0"));
 	if ((aa < 1) || (aa > 15)) {
-		WL_ERROR(("wl%d: %s: Invalid antennas available in srom (0x%x), using 3.\n", unit, __func__, aa));
+		WL_ERROR("wl%d: %s: Invalid antennas available in srom (0x%x), using 3\n",
+			 unit, __func__, aa);
 		aa = 3;
 	}
 
@@ -2132,136 +2091,8 @@
 	return true;
 }
 
-#ifdef WLC_HIGH_ONLY
-/* HIGH_ONLY bmac_attach, which sync over LOW_ONLY bmac_attach states */
-int wlc_bmac_attach(wlc_info_t *wlc, u16 vendor, u16 device, uint unit,
-		    bool piomode, osl_t *osh, void *regsva, uint bustype,
-		    void *btparam)
-{
-	wlc_bmac_revinfo_t revinfo;
-	uint idx = 0;
-	rpc_info_t *rpc = (rpc_info_t *) btparam;
 
-	ASSERT(bustype == RPC_BUS);
-
-	/* install the rpc handle in the various state structures used by stub RPC functions */
-	wlc->rpc = rpc;
-	wlc->hw->rpc = rpc;
-	wlc->hw->osh = osh;
-
-	wlc->regs = 0;
-
-	wlc->rpctx = wlc_rpctx_attach(wlc->pub, wlc);
-	if (wlc->rpctx == NULL)
-		return -1;
-
-	/*
-	 * FIFO 0
-	 * TX: TX_AC_BK_FIFO (TX AC Background data packets)
-	 */
-	/* Always initialized */
-	ASSERT(NRPCTXBUFPOST <= NTXD);
-	wlc_rpctx_fifoinit(wlc->rpctx, TX_DATA_FIFO, NRPCTXBUFPOST);
-	wlc_rpctx_fifoinit(wlc->rpctx, TX_CTL_FIFO, NRPCTXBUFPOST);
-	wlc_rpctx_fifoinit(wlc->rpctx, TX_BCMC_FIFO, NRPCTXBUFPOST);
-
-	/* VI and BK inited only if WME */
-	if (WME_ENAB(wlc->pub)) {
-		wlc_rpctx_fifoinit(wlc->rpctx, TX_AC_BK_FIFO, NRPCTXBUFPOST);
-		wlc_rpctx_fifoinit(wlc->rpctx, TX_AC_VI_FIFO, NRPCTXBUFPOST);
-	}
-
-	/* Allocate SB handle */
-	wlc->pub->sih = osl_malloc(wlc->osh, sizeof(si_t));
-	if (!wlc->pub->sih)
-		return -1;
-	bzero(wlc->pub->sih, sizeof(si_t));
-
-	/* sync up revinfo with BMAC */
-	bzero(&revinfo, sizeof(wlc_bmac_revinfo_t));
-	if (wlc_bmac_revinfo_get(wlc->hw, &revinfo) != 0)
-		return -1;
-	wlc->vendorid = (u16) revinfo.vendorid;
-	wlc->deviceid = (u16) revinfo.deviceid;
-
-	wlc->pub->boardrev = (u16) revinfo.boardrev;
-	wlc->pub->corerev = revinfo.corerev;
-	wlc->pub->sromrev = (u8) revinfo.sromrev;
-	wlc->pub->sih->chiprev = revinfo.chiprev;
-	wlc->pub->sih->chip = revinfo.chip;
-	wlc->pub->sih->chippkg = revinfo.chippkg;
-	wlc->pub->sih->boardtype = revinfo.boardtype;
-	wlc->pub->sih->boardvendor = revinfo.boardvendor;
-	wlc->pub->sih->bustype = revinfo.bustype;
-	wlc->pub->sih->buscoretype = revinfo.buscoretype;
-	wlc->pub->sih->buscorerev = revinfo.buscorerev;
-	wlc->pub->sih->issim = (bool) revinfo.issim;
-	wlc->pub->sih->rpc = rpc;
-
-	if (revinfo.nbands == 0 || revinfo.nbands > 2)
-		return -1;
-	wlc->pub->_nbands = revinfo.nbands;
-
-	for (idx = 0; idx < wlc->pub->_nbands; idx++) {
-		uint bandunit, bandtype;	/* To access bandstate */
-		wlc_phy_t *pi = osl_malloc(wlc->osh, sizeof(wlc_phy_t));
-
-		if (!pi)
-			return -1;
-		bzero(pi, sizeof(wlc_phy_t));
-		pi->rpc = rpc;
-
-		bandunit = revinfo.band[idx].bandunit;
-		bandtype = revinfo.band[idx].bandtype;
-		wlc->bandstate[bandunit]->radiorev =
-		    (u8) revinfo.band[idx].radiorev;
-		wlc->bandstate[bandunit]->phytype =
-		    (u16) revinfo.band[idx].phytype;
-		wlc->bandstate[bandunit]->phyrev =
-		    (u16) revinfo.band[idx].phyrev;
-		wlc->bandstate[bandunit]->radioid =
-		    (u16) revinfo.band[idx].radioid;
-		wlc->bandstate[bandunit]->abgphy_encore =
-		    revinfo.band[idx].abgphy_encore;
-
-		wlc->bandstate[bandunit]->pi = pi;
-		wlc->bandstate[bandunit]->bandunit = bandunit;
-		wlc->bandstate[bandunit]->bandtype = bandtype;
-	}
-
-	/* misc stuff */
-
-	return 0;
-}
-
-/* Free the convenience handles */
-int wlc_bmac_detach(wlc_info_t *wlc)
-{
-	uint idx;
-
-	if (wlc->pub->sih) {
-		osl_mfree(wlc->osh, (void *)wlc->pub->sih, sizeof(si_t));
-		wlc->pub->sih = NULL;
-	}
-
-	for (idx = 0; idx < MAXBANDS; idx++)
-		if (wlc->bandstate[idx]->pi) {
-			kfree(wlc->bandstate[idx]->pi);
-			wlc->bandstate[idx]->pi = NULL;
-		}
-
-	if (wlc->rpctx) {
-		wlc_rpctx_detach(wlc->rpctx);
-		wlc->rpctx = NULL;
-	}
-
-	return 0;
-
-}
-
-#endif				/* WLC_HIGH_ONLY */
-
-static void wlc_timers_deinit(wlc_info_t *wlc)
+static void wlc_timers_deinit(struct wlc_info *wlc)
 {
 	/* free timer state */
 	if (wlc->wdtimer) {
@@ -2274,7 +2105,7 @@
 	}
 }
 
-static void wlc_detach_module(wlc_info_t *wlc)
+static void wlc_detach_module(struct wlc_info *wlc)
 {
 	if (wlc->asi) {
 		wlc_antsel_detach(wlc->asi);
@@ -2297,7 +2128,7 @@
  *    One exception is sb register access, which is possible if crystal is turned on
  * After "down" state, driver should avoid software timer with the exception of radio_monitor.
  */
-uint wlc_detach(wlc_info_t *wlc)
+uint wlc_detach(struct wlc_info *wlc)
 {
 	uint i;
 	uint callbacks = 0;
@@ -2305,7 +2136,7 @@
 	if (wlc == NULL)
 		return 0;
 
-	WL_TRACE(("wl%d: %s\n", wlc->pub->unit, __func__));
+	WL_TRACE("wl%d: %s\n", wlc->pub->unit, __func__);
 
 	ASSERT(!wlc->pub->up);
 
@@ -2328,15 +2159,6 @@
 
 	/* free other state */
 
-#ifdef WLC_HIGH_ONLY
-	/* High-Only driver has an allocated copy of vars, monolithic just
-	 * references the wlc->hw->vars which is freed in wlc_bmac_detach()
-	 */
-	if (wlc->pub->vars) {
-		kfree(wlc->pub->vars);
-		wlc->pub->vars = NULL;
-	}
-#endif
 
 #ifdef BCMDBG
 	if (wlc->country_ie_override) {
@@ -2360,13 +2182,6 @@
 	/* Detach from iovar manager */
 	wlc_module_unregister(wlc->pub, "wlc_iovars", wlc);
 
-	/*
-	   if (wlc->ap) {
-	   wlc_ap_detach(wlc->ap);
-	   wlc->ap = NULL;
-	   }
-	 */
-
 	while (wlc->tx_queues != NULL) {
 		wlc_txq_free(wlc, wlc->osh, wlc->tx_queues);
 	}
@@ -2383,7 +2198,7 @@
 }
 
 /* update state that depends on the current value of "ap" */
-void wlc_ap_upd(wlc_info_t *wlc)
+void wlc_ap_upd(struct wlc_info *wlc)
 {
 	if (AP_ENAB(wlc->pub))
 		wlc->PLCPHdr_override = WLC_PLCP_AUTO;	/* AP: short not allowed, but not enforced */
@@ -2398,7 +2213,7 @@
 }
 
 /* read hwdisable state and propagate to wlc flag */
-static void wlc_radio_hwdisable_upd(wlc_info_t *wlc)
+static void wlc_radio_hwdisable_upd(struct wlc_info *wlc)
 {
 	if (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO || wlc->pub->hw_off)
 		return;
@@ -2411,17 +2226,17 @@
 }
 
 /* return true if Minimum Power Consumption should be entered, false otherwise */
-bool wlc_is_non_delay_mpc(wlc_info_t *wlc)
+bool wlc_is_non_delay_mpc(struct wlc_info *wlc)
 {
 	return false;
 }
 
-bool wlc_ismpc(wlc_info_t *wlc)
+bool wlc_ismpc(struct wlc_info *wlc)
 {
 	return (wlc->mpc_delay_off == 0) && (wlc_is_non_delay_mpc(wlc));
 }
 
-void wlc_radio_mpc_upd(wlc_info_t *wlc)
+void wlc_radio_mpc_upd(struct wlc_info *wlc)
 {
 	bool mpc_radio, radio_state;
 
@@ -2477,7 +2292,7 @@
  * centralized radio disable/enable function,
  * invoke radio enable/disable after updating hwradio status
  */
-static void wlc_radio_upd(wlc_info_t *wlc)
+static void wlc_radio_upd(struct wlc_info *wlc)
 {
 	if (wlc->pub->radio_disabled)
 		wlc_radio_disable(wlc);
@@ -2486,7 +2301,7 @@
 }
 
 /* maintain LED behavior in down state */
-static void wlc_down_led_upd(wlc_info_t *wlc)
+static void wlc_down_led_upd(struct wlc_info *wlc)
 {
 	ASSERT(!wlc->pub->up);
 
@@ -2499,7 +2314,7 @@
 	}
 }
 
-void wlc_radio_disable(wlc_info_t *wlc)
+void wlc_radio_disable(struct wlc_info *wlc)
 {
 	if (!wlc->pub->up) {
 		wlc_down_led_upd(wlc);
@@ -2510,7 +2325,7 @@
 	wl_down(wlc->wl);
 }
 
-static void wlc_radio_enable(wlc_info_t *wlc)
+static void wlc_radio_enable(struct wlc_info *wlc)
 {
 	if (wlc->pub->up)
 		return;
@@ -2526,10 +2341,10 @@
 /* periodical query hw radio button while driver is "down" */
 static void wlc_radio_timer(void *arg)
 {
-	wlc_info_t *wlc = (wlc_info_t *) arg;
+	struct wlc_info *wlc = (struct wlc_info *) arg;
 
 	if (DEVICEREMOVED(wlc)) {
-		WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit, __func__));
+		WL_ERROR("wl%d: %s: dead chip\n", wlc->pub->unit, __func__);
 		wl_down(wlc->wl);
 		return;
 	}
@@ -2544,7 +2359,7 @@
 	wlc_radio_upd(wlc);
 }
 
-static bool wlc_radio_monitor_start(wlc_info_t *wlc)
+static bool wlc_radio_monitor_start(struct wlc_info *wlc)
 {
 	/* Don't start the timer if HWRADIO feature is disabled */
 	if (wlc->radio_monitor || (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO))
@@ -2556,7 +2371,7 @@
 	return true;
 }
 
-bool wlc_radio_monitor_stop(wlc_info_t *wlc)
+bool wlc_radio_monitor_stop(struct wlc_info *wlc)
 {
 	if (!wlc->radio_monitor)
 		return true;
@@ -2570,7 +2385,7 @@
 }
 
 /* bring the driver down, but don't reset hardware */
-void wlc_out(wlc_info_t *wlc)
+void wlc_out(struct wlc_info *wlc)
 {
 	wlc_bmac_set_noreset(wlc->hw, true);
 	wlc_radio_upd(wlc);
@@ -2591,7 +2406,7 @@
  * if there is no packet pending for the FIFO, then the corresponding prec bits should be set
  * in prec_map. Of course, ignore this rule when block_datafifo is set
  */
-static bool wlc_tx_prec_map_verify(wlc_info_t *wlc)
+static bool wlc_tx_prec_map_verify(struct wlc_info *wlc)
 {
 	/* For non-WME, both fifos have overlapping prec_map. So it's an error only if both
 	 * fail the check.
@@ -2613,7 +2428,7 @@
 
 static void wlc_watchdog_by_timer(void *arg)
 {
-	wlc_info_t *wlc = (wlc_info_t *) arg;
+	struct wlc_info *wlc = (struct wlc_info *) arg;
 	wlc_watchdog(arg);
 	if (WLC_WATCHDOG_TBTT(wlc)) {
 		/* set to normal osl watchdog period */
@@ -2626,17 +2441,17 @@
 /* common watchdog code */
 static void wlc_watchdog(void *arg)
 {
-	wlc_info_t *wlc = (wlc_info_t *) arg;
+	struct wlc_info *wlc = (struct wlc_info *) arg;
 	int i;
 	wlc_bsscfg_t *cfg;
 
-	WL_TRACE(("wl%d: wlc_watchdog\n", wlc->pub->unit));
+	WL_TRACE("wl%d: wlc_watchdog\n", wlc->pub->unit);
 
 	if (!wlc->pub->up)
 		return;
 
 	if (DEVICEREMOVED(wlc)) {
-		WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit, __func__));
+		WL_ERROR("wl%d: %s: dead chip\n", wlc->pub->unit, __func__);
 		wl_down(wlc->wl);
 		return;
 	}
@@ -2667,13 +2482,7 @@
 	if (wlc->pub->radio_disabled)
 		return;
 
-#ifdef WLC_LOW
 	wlc_bmac_watchdog(wlc);
-#endif
-#ifdef WLC_HIGH_ONLY
-	/* maintenance */
-	wlc_bmac_rpc_watchdog(wlc);
-#endif
 
 	/* occasionally sample mac stat counters to detect 16-bit counter wrap */
 	if ((WLC_UPDATE_STATS(wlc))
@@ -2702,10 +2511,8 @@
 		wlc->tempsense_lasttime = wlc->pub->now;
 		wlc_tempsense_upd(wlc);
 	}
-#ifdef WLC_LOW
 	/* BMAC_NOTE: for HIGH_ONLY driver, this seems being called after RPC bus failed */
 	ASSERT(wlc_bmac_taclear(wlc->hw, true));
-#endif
 
 	/* Verify that tx_prec_map and fifos are in sync to avoid lock ups */
 	ASSERT(wlc_tx_prec_map_verify(wlc));
@@ -2714,9 +2521,9 @@
 }
 
 /* make interface operational */
-int wlc_up(wlc_info_t *wlc)
+int wlc_up(struct wlc_info *wlc)
 {
-	WL_TRACE(("wl%d: %s:\n", wlc->pub->unit, __func__));
+	WL_TRACE("wl%d: %s:\n", wlc->pub->unit, __func__);
 
 	/* HW is turned off so don't try to access it */
 	if (wlc->pub->hw_off || DEVICEREMOVED(wlc))
@@ -2728,7 +2535,7 @@
 	}
 
 	if ((wlc->pub->boardflags & BFL_FEM)
-	    && (CHIPID(wlc->pub->sih->chip) == BCM4313_CHIP_ID)) {
+	    && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
 		if (wlc->pub->boardrev >= 0x1250
 		    && (wlc->pub->boardflags & BFL_FEM_BT)) {
 			wlc_mhf(wlc, MHF5, MHF5_4313_GPIOCTRL,
@@ -2761,7 +2568,8 @@
 					if (!BSSCFG_STA(bsscfg)
 					    || !bsscfg->enable || !bsscfg->BSS)
 						continue;
-					WL_ERROR(("wl%d.%d: wlc_up: rfdisable -> " "wlc_bsscfg_disable()\n", wlc->pub->unit, idx));
+					WL_ERROR("wl%d.%d: wlc_up: rfdisable -> " "wlc_bsscfg_disable()\n",
+						 wlc->pub->unit, idx);
 				}
 			}
 		} else
@@ -2822,10 +2630,10 @@
 }
 
 /* Initialize the base precedence map for dequeueing from txq based on WME settings */
-static void wlc_tx_prec_map_init(wlc_info_t *wlc)
+static void wlc_tx_prec_map_init(struct wlc_info *wlc)
 {
 	wlc->tx_prec_map = WLC_PREC_BMP_ALL;
-	bzero(wlc->fifo2prec_map, sizeof(u16) * NFIFO);
+	memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
 
 	/* For non-WME, both fifos have overlapping MAXPRIO. So just disable all precedences
 	 * if either is full.
@@ -2841,7 +2649,7 @@
 	}
 }
 
-static uint wlc_down_del_timer(wlc_info_t *wlc)
+static uint wlc_down_del_timer(struct wlc_info *wlc)
 {
 	uint callbacks = 0;
 
@@ -2853,7 +2661,7 @@
  * disable the hardware, free any transient buffer state.
  * Return a count of the number of driver callbacks still pending.
  */
-uint wlc_down(wlc_info_t *wlc)
+uint wlc_down(struct wlc_info *wlc)
 {
 
 	uint callbacks = 0;
@@ -2861,12 +2669,12 @@
 	bool dev_gone = false;
 	wlc_txq_info_t *qi;
 
-	WL_TRACE(("wl%d: %s:\n", wlc->pub->unit, __func__));
+	WL_TRACE("wl%d: %s:\n", wlc->pub->unit, __func__);
 
 	/* check if we are already in the going down path */
 	if (wlc->going_down) {
-		WL_ERROR(("wl%d: %s: Driver going down so return\n",
-			  wlc->pub->unit, __func__));
+		WL_ERROR("wl%d: %s: Driver going down so return\n",
+			 wlc->pub->unit, __func__);
 		return 0;
 	}
 	if (!wlc->pub->up)
@@ -2922,14 +2730,11 @@
 	/* wlc_bmac_down_finish has done wlc_coredisable(). so clk is off */
 	wlc->clk = false;
 
-#ifdef WLC_HIGH_ONLY
-	wlc_rpctx_txreclaim(wlc->rpctx);
-#endif
 
 	/* Verify all packets are flushed from the driver */
-	if (PKTALLOCED(wlc->osh) != 0) {
-		WL_ERROR(("%d packets not freed at wlc_down!!!!!!\n",
-			  PKTALLOCED(wlc->osh)));
+	if (wlc->osh->pktalloced != 0) {
+		WL_ERROR("%d packets not freed at wlc_down!!!!!!\n",
+			 wlc->osh->pktalloced);
 	}
 #ifdef BCMDBG
 	/* Since all the packets should have been freed,
@@ -2943,7 +2748,7 @@
 }
 
 /* Set the current gmode configuration */
-int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config)
+int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
 {
 	int ret = 0;
 	uint i;
@@ -2958,7 +2763,7 @@
 	bool preamble_restrict = false;	/* Restrict association to stations that support short
 					 * preambles
 					 */
-	wlcband_t *band;
+	struct wlcband *band;
 
 	/* if N-support is enabled, allow Gmode set as long as requested
 	 * Gmode is not GMODE_LEGACY_B
@@ -2985,10 +2790,10 @@
 		wlc_protection_upd(wlc, WLC_PROT_G_USER, gmode);
 
 	/* Clear supported rates filter */
-	bzero(&wlc->sup_rates_override, sizeof(wlc_rateset_t));
+	memset(&wlc->sup_rates_override, 0, sizeof(wlc_rateset_t));
 
 	/* Clear rateset override */
-	bzero(&rs, sizeof(wlc_rateset_t));
+	memset(&rs, 0, sizeof(wlc_rateset_t));
 
 	switch (gmode) {
 	case GMODE_LEGACY_B:
@@ -3026,8 +2831,8 @@
 
 	default:
 		/* Error */
-		WL_ERROR(("wl%d: %s: invalid gmode %d\n", wlc->pub->unit,
-			  __func__, gmode));
+		WL_ERROR("wl%d: %s: invalid gmode %d\n",
+			 wlc->pub->unit, __func__, gmode);
 		return BCME_UNSUPPORTED;
 	}
 
@@ -3093,7 +2898,7 @@
 	return ret;
 }
 
-static int wlc_nmode_validate(wlc_info_t *wlc, s32 nmode)
+static int wlc_nmode_validate(struct wlc_info *wlc, s32 nmode)
 {
 	int err = 0;
 
@@ -3117,7 +2922,7 @@
 	return err;
 }
 
-int wlc_set_nmode(wlc_info_t *wlc, s32 nmode)
+int wlc_set_nmode(struct wlc_info *wlc, s32 nmode)
 {
 	uint i;
 	int err;
@@ -3176,7 +2981,7 @@
 	return err;
 }
 
-static int wlc_set_rateset(wlc_info_t *wlc, wlc_rateset_t *rs_arg)
+static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg)
 {
 	wlc_rateset_t rs, new;
 	uint bandunit;
@@ -3219,18 +3024,18 @@
 }
 
 /* simplified integer set interface for common ioctl handler */
-int wlc_set(wlc_info_t *wlc, int cmd, int arg)
+int wlc_set(struct wlc_info *wlc, int cmd, int arg)
 {
 	return wlc_ioctl(wlc, cmd, (void *)&arg, sizeof(arg), NULL);
 }
 
 /* simplified integer get interface for common ioctl handler */
-int wlc_get(wlc_info_t *wlc, int cmd, int *arg)
+int wlc_get(struct wlc_info *wlc, int cmd, int *arg)
 {
 	return wlc_ioctl(wlc, cmd, arg, sizeof(int), NULL);
 }
 
-static void wlc_ofdm_rateset_war(wlc_info_t *wlc)
+static void wlc_ofdm_rateset_war(struct wlc_info *wlc)
 {
 	u8 r;
 	bool war = false;
@@ -3246,14 +3051,16 @@
 }
 
 int
-wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
+wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
+	  struct wlc_if *wlcif)
 {
 	return _wlc_ioctl(wlc, cmd, arg, len, wlcif);
 }
 
 /* common ioctl handler. return: 0=ok, -1=error, positive=particular error */
 static int
-_wlc_ioctl(wlc_info_t *wlc, int cmd, void *arg, int len, struct wlc_if *wlcif)
+_wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
+	   struct wlc_if *wlcif)
 {
 	int val, *pval;
 	bool bool_val;
@@ -3265,7 +3072,7 @@
 	uint band;
 	rw_reg_t *r;
 	wlc_bsscfg_t *bsscfg;
-	osl_t *osh;
+	struct osl_info *osh;
 	wlc_bss_info_t *current_bss;
 
 	/* update bsscfg pointer */
@@ -3280,7 +3087,7 @@
 
 	/* If the device is turned off, then it's not "removed" */
 	if (!wlc->pub->hw_off && DEVICEREMOVED(wlc)) {
-		WL_ERROR(("wl%d: %s: dead chip\n", wlc->pub->unit, __func__));
+		WL_ERROR("wl%d: %s: dead chip\n", wlc->pub->unit, __func__);
 		wl_down(wlc->wl);
 		return BCME_ERROR;
 	}
@@ -3300,8 +3107,8 @@
 	bool_val = val != 0;
 
 	if (cmd != WLC_SET_CHANNEL)
-		WL_NONE(("WLC_IOCTL: cmd %d val 0x%x (%d) len %d\n", cmd,
-			 (uint) val, val, len));
+		WL_NONE("WLC_IOCTL: cmd %d val 0x%x (%d) len %d\n",
+			cmd, (uint)val, val, len);
 
 	bcmerror = 0;
 	regs = wlc->regs;
@@ -3321,8 +3128,8 @@
 
 	default:
 		if ((arg == NULL) || (len <= 0)) {
-			WL_ERROR(("wl%d: %s: Command %d needs arguments\n",
-				  wlc->pub->unit, __func__, cmd));
+			WL_ERROR("wl%d: %s: Command %d needs arguments\n",
+				 wlc->pub->unit, __func__, cmd);
 			bcmerror = BCME_BADARG;
 			goto done;
 		}
@@ -3388,10 +3195,6 @@
 				wlc_set_chanspec(wlc, chspec);
 				wlc_enable_mac(wlc);
 			}
-#ifdef WLC_HIGH_ONLY
-			/* delay for channel change */
-			msleep(50);
-#endif
 			break;
 		}
 
@@ -3659,8 +3462,8 @@
 			/* 4322 supports antdiv in phy, no need to set it to ucode */
 			if (WLCISNPHY(wlc->band)
 			    && D11REV_IS(wlc->pub->corerev, 16)) {
-				WL_ERROR(("wl%d: can't set ucantdiv for 4322\n",
-					  wlc->pub->unit));
+				WL_ERROR("wl%d: can't set ucantdiv for 4322\n",
+					 wlc->pub->unit);
 				bcmerror = BCME_UNSUPPORTED;
 			} else
 				wlc_mhf(wlc, MHF1, MHF1_ANTDIV,
@@ -3757,8 +3560,8 @@
 			if ((radiomask == 0) || (radiomask & ~validbits)
 			    || (radioval & ~validbits)
 			    || ((radioval & ~radiomask) != 0)) {
-				WL_ERROR(("SET_RADIO with wrong bits 0x%x\n",
-					  val));
+				WL_ERROR("SET_RADIO with wrong bits 0x%x\n",
+					 val);
 				bcmerror = BCME_RANGE;
 				break;
 			}
@@ -3788,7 +3591,7 @@
 				break;
 			}
 
-			bzero((char *)&key, sizeof(key));
+			memset((char *)&key, 0, sizeof(key));
 			if (src_key) {
 				key.index = src_key->id;
 				key.len = src_key->len;
@@ -3800,7 +3603,7 @@
 					key.flags |= WL_PRIMARY_KEY;
 
 				bcopy(src_key->ea.octet, key.ea.octet,
-				      ETHER_ADDR_LEN);
+				      ETH_ALEN);
 			}
 
 			bcopy((char *)&key, arg, sizeof(key));
@@ -3835,8 +3638,8 @@
 				u16 lo;
 				u32 hi;
 				/* group keys in WPA-NONE (IBSS only, AES and TKIP) use a global TXIV */
-				if ((bsscfg->WPA_auth & WPA_AUTH_NONE)
-				    && ETHER_ISNULLADDR(&key->ea)) {
+				if ((bsscfg->WPA_auth & WPA_AUTH_NONE) &&
+				    is_zero_ether_addr(key->ea.octet)) {
 					lo = bsscfg->wpa_none_txiv.lo;
 					hi = bsscfg->wpa_none_txiv.hi;
 				} else {
@@ -3885,7 +3688,7 @@
 			wlc_rateset_t rs;
 			wl_rateset_t *ret_rs = (wl_rateset_t *) arg;
 
-			bzero(&rs, sizeof(wlc_rateset_t));
+			memset(&rs, 0, sizeof(wlc_rateset_t));
 			wlc_default_rateset(wlc, (wlc_rateset_t *) &rs);
 
 			if (len < (int)(rs.count + sizeof(rs.count))) {
@@ -3913,7 +3716,7 @@
 				break;
 			}
 
-			bzero(&rs, sizeof(wlc_rateset_t));
+			memset(&rs, 0, sizeof(wlc_rateset_t));
 
 			/* Copy only legacy rateset section */
 			rs.count = in_rs->count;
@@ -4247,7 +4050,7 @@
 
 			/* check for an empty rateset to clear the override */
 			if (rs.count == 0) {
-				bzero(&wlc->sup_rates_override,
+				memset(&wlc->sup_rates_override, 0,
 				      sizeof(wlc_rateset_t));
 				break;
 			}
@@ -4394,7 +4197,7 @@
 #endif
 
 	case WLC_LAST:
-		WL_ERROR(("%s: WLC_LAST\n", __func__));
+		WL_ERROR("%s: WLC_LAST\n", __func__);
 	}
  done:
 
@@ -4406,21 +4209,19 @@
 		}
 
 	}
-#ifdef WLC_LOW
 	/* BMAC_NOTE: for HIGH_ONLY driver, this seems being called after RPC bus failed */
 	/* In hw_off condition, IOCTLs that reach here are deemed safe but taclear would
 	 * certainly result in getting -1 for register reads. So skip ta_clear altogether
 	 */
 	if (!(wlc->pub->hw_off))
 		ASSERT(wlc_bmac_taclear(wlc->hw, ta_ok) || !ta_ok);
-#endif
 
 	return bcmerror;
 }
 
 #if defined(BCMDBG)
 /* consolidated register access ioctl error checking */
-int wlc_iocregchk(wlc_info_t *wlc, uint band)
+int wlc_iocregchk(struct wlc_info *wlc, uint band)
 {
 	/* if band is specified, it must be the current band */
 	if ((band != WLC_BAND_AUTO) && (band != (uint) wlc->band->bandtype))
@@ -4440,7 +4241,7 @@
 
 #if defined(BCMDBG)
 /* For some ioctls, make sure that the pi pointer matches the current phy */
-int wlc_iocpichk(wlc_info_t *wlc, uint phytype)
+int wlc_iocpichk(struct wlc_info *wlc, uint phytype)
 {
 	if (wlc->band->phytype != phytype)
 		return BCME_BADBAND;
@@ -4474,21 +4275,21 @@
 }
 
 /* simplified integer get interface for common WLC_GET_VAR ioctl handler */
-int wlc_iovar_getint(wlc_info_t *wlc, const char *name, int *arg)
+int wlc_iovar_getint(struct wlc_info *wlc, const char *name, int *arg)
 {
 	return wlc_iovar_op(wlc, name, NULL, 0, arg, sizeof(s32), IOV_GET,
 			    NULL);
 }
 
 /* simplified integer set interface for common WLC_SET_VAR ioctl handler */
-int wlc_iovar_setint(wlc_info_t *wlc, const char *name, int arg)
+int wlc_iovar_setint(struct wlc_info *wlc, const char *name, int arg)
 {
 	return wlc_iovar_op(wlc, name, NULL, 0, (void *)&arg, sizeof(arg),
 			    IOV_SET, NULL);
 }
 
 /* simplified s8 get interface for common WLC_GET_VAR ioctl handler */
-int wlc_iovar_gets8(wlc_info_t *wlc, const char *name, s8 *arg)
+int wlc_iovar_gets8(struct wlc_info *wlc, const char *name, s8 *arg)
 {
 	int iovar_int;
 	int err;
@@ -4507,11 +4308,11 @@
  * calling function must keep 'iovars' until wlc_module_unregister is called.
  * 'iovar' must have the last entry's name field being NULL as terminator.
  */
-int wlc_module_register(wlc_pub_t *pub, const bcm_iovar_t *iovars,
+int wlc_module_register(struct wlc_pub *pub, const bcm_iovar_t *iovars,
 			const char *name, void *hdl, iovar_fn_t i_fn,
 			watchdog_fn_t w_fn, down_fn_t d_fn)
 {
-	wlc_info_t *wlc = (wlc_info_t *) pub->wlc;
+	struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
 	int i;
 
 	ASSERT(name != NULL);
@@ -4537,9 +4338,9 @@
 }
 
 /* unregister module callbacks */
-int wlc_module_unregister(wlc_pub_t *pub, const char *name, void *hdl)
+int wlc_module_unregister(struct wlc_pub *pub, const char *name, void *hdl)
 {
-	wlc_info_t *wlc = (wlc_info_t *) pub->wlc;
+	struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
 	int i;
 
 	if (wlc == NULL)
@@ -4550,7 +4351,7 @@
 	for (i = 0; i < WLC_MAXMODULES; i++) {
 		if (!strcmp(wlc->modulecb[i].name, name) &&
 		    (wlc->modulecb[i].hdl == hdl)) {
-			bzero(&wlc->modulecb[i], sizeof(modulecb_t));
+			memset(&wlc->modulecb[i], 0, sizeof(modulecb_t));
 			return 0;
 		}
 	}
@@ -4560,7 +4361,7 @@
 }
 
 /* Write WME tunable parameters for retransmit/max rate from wlc struct to ucode */
-static void wlc_wme_retries_write(wlc_info_t *wlc)
+static void wlc_wme_retries_write(struct wlc_info *wlc)
 {
 	int ac;
 
@@ -4582,7 +4383,7 @@
  * All pointers may point into the same buffer.
  */
 int
-wlc_iovar_op(wlc_info_t *wlc, const char *name,
+wlc_iovar_op(struct wlc_info *wlc, const char *name,
 	     void *params, int p_len, void *arg, int len,
 	     bool set, struct wlc_if *wlcif)
 {
@@ -4606,8 +4407,8 @@
 
 	if (!set && (len == sizeof(int)) &&
 	    !(IS_ALIGNED((unsigned long)(arg), (uint) sizeof(int)))) {
-		WL_ERROR(("wl%d: %s unaligned get ptr for %s\n",
-			  wlc->pub->unit, __func__, name));
+		WL_ERROR("wl%d: %s unaligned get ptr for %s\n",
+			 wlc->pub->unit, __func__, name);
 		ASSERT(0);
 	}
 
@@ -4622,11 +4423,6 @@
 	/* iovar name not found */
 	if (i >= WLC_MAXMODULES) {
 		err = BCME_UNSUPPORTED;
-#ifdef WLC_HIGH_ONLY
-		err =
-		    bcmsdh_iovar_op(wlc->btparam, name, params, p_len, arg, len,
-				    set);
-#endif
 		goto exit;
 	}
 
@@ -4658,10 +4454,10 @@
 }
 
 int
-wlc_iovar_check(wlc_pub_t *pub, const bcm_iovar_t *vi, void *arg, int len,
+wlc_iovar_check(struct wlc_pub *pub, const bcm_iovar_t *vi, void *arg, int len,
 		bool set)
 {
-	wlc_info_t *wlc = (wlc_info_t *) pub->wlc;
+	struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
 	int err = 0;
 	s32 int_val = 0;
 
@@ -4729,7 +4525,7 @@
 	    const char *name, void *params, uint p_len, void *arg, int len,
 	    int val_size, struct wlc_if *wlcif)
 {
-	wlc_info_t *wlc = hdl;
+	struct wlc_info *wlc = hdl;
 	wlc_bsscfg_t *bsscfg;
 	int err = 0;
 	s32 int_val = 0;
@@ -4739,7 +4535,7 @@
 	bool bool_val2;
 	wlc_bss_info_t *current_bss;
 
-	WL_TRACE(("wl%d: %s\n", wlc->pub->unit, __func__));
+	WL_TRACE("wl%d: %s\n", wlc->pub->unit, __func__);
 
 	bsscfg = NULL;
 	current_bss = NULL;
@@ -4762,8 +4558,8 @@
 	bool_val = (int_val != 0) ? true : false;
 	bool_val2 = (int_val2 != 0) ? true : false;
 
-	WL_TRACE(("wl%d: %s: id %d\n", wlc->pub->unit, __func__,
-		  IOV_ID(actionid)));
+	WL_TRACE("wl%d: %s: id %d\n",
+		 wlc->pub->unit, __func__, IOV_ID(actionid));
 	/* Do the actual parameter implementation */
 	switch (actionid) {
 
@@ -4821,7 +4617,7 @@
 		break;
 
 	default:
-		WL_ERROR(("wl%d: %s: unsupported\n", wlc->pub->unit, __func__));
+		WL_ERROR("wl%d: %s: unsupported\n", wlc->pub->unit, __func__);
 		err = BCME_UNSUPPORTED;
 		break;
 	}
@@ -4833,7 +4629,7 @@
 }
 
 static int
-wlc_iovar_rangecheck(wlc_info_t *wlc, u32 val, const bcm_iovar_t *vi)
+wlc_iovar_rangecheck(struct wlc_info *wlc, u32 val, const bcm_iovar_t *vi)
 {
 	int err = 0;
 	u32 min_val = 0;
@@ -4930,7 +4726,7 @@
 #define MACSTATUPD(name) \
 	wlc_ctrupd_cache(macstats.name, &wlc->core->macstat_snapshot->name, &wlc->pub->_cnt->name)
 
-void wlc_statsupd(wlc_info_t *wlc)
+void wlc_statsupd(struct wlc_info *wlc)
 {
 	int i;
 #ifdef BCMDBG
@@ -4956,8 +4752,8 @@
 	/* check for rx fifo 0 overflow */
 	delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
 	if (delta)
-		WL_ERROR(("wl%d: %u rx fifo 0 overflows!\n", wlc->pub->unit,
-			  delta));
+		WL_ERROR("wl%d: %u rx fifo 0 overflows!\n",
+			 wlc->pub->unit, delta);
 
 	/* check for tx fifo underflows */
 	for (i = 0; i < NFIFO; i++) {
@@ -4965,8 +4761,8 @@
 		    (u16) (wlc->core->macstat_snapshot->txfunfl[i] -
 			      txfunfl[i]);
 		if (delta)
-			WL_ERROR(("wl%d: %u tx fifo %d underflows!\n",
-				  wlc->pub->unit, delta, i));
+			WL_ERROR("wl%d: %u tx fifo %d underflows!\n",
+				 wlc->pub->unit, delta, i);
 	}
 #endif				/* BCMDBG */
 
@@ -5015,7 +4811,7 @@
 bool wlc_chipmatch(u16 vendor, u16 device)
 {
 	if (vendor != VENDOR_BROADCOM) {
-		WL_ERROR(("wlc_chipmatch: unknown vendor id %04x\n", vendor));
+		WL_ERROR("wlc_chipmatch: unknown vendor id %04x\n", vendor);
 		return false;
 	}
 
@@ -5027,7 +4823,7 @@
 	if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
 		return true;
 
-	WL_ERROR(("wlc_chipmatch: unknown device id %04x\n", device));
+	WL_ERROR("wlc_chipmatch: unknown device id %04x\n", device);
 	return false;
 }
 
@@ -5182,20 +4978,12 @@
 }
 #endif				/* defined(BCMDBG) */
 
-u16 wlc_rate_shm_offset(wlc_info_t *wlc, u8 rate)
+u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate)
 {
 	return wlc_bmac_rate_shm_offset(wlc->hw, rate);
 }
 
 /* Callback for device removed */
-#if defined(WLC_HIGH_ONLY)
-void wlc_device_removed(void *arg)
-{
-	wlc_info_t *wlc = (wlc_info_t *) arg;
-
-	wlc->device_present = false;
-}
-#endif				/* WLC_HIGH_ONLY */
 
 /*
  * Attempts to queue a packet onto a multiple-precedence queue,
@@ -5207,16 +4995,16 @@
  * Returns true if packet consumed (queued), false if not.
  */
 bool BCMFASTPATH
-wlc_prec_enq(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec)
+wlc_prec_enq(struct wlc_info *wlc, struct pktq *q, void *pkt, int prec)
 {
 	return wlc_prec_enq_head(wlc, q, pkt, prec, false);
 }
 
 bool BCMFASTPATH
-wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec,
-		  bool head)
+wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q, struct sk_buff *pkt,
+		  int prec, bool head)
 {
-	void *p;
+	struct sk_buff *p;
 	int eprec = -1;		/* precedence to evict from */
 
 	/* Determine precedence from which to evict packet, if any */
@@ -5226,8 +5014,8 @@
 		p = pktq_peek_tail(q, &eprec);
 		ASSERT(p != NULL);
 		if (eprec > prec) {
-			WL_ERROR(("%s: Failing: eprec %d > prec %d\n", __func__,
-				  eprec, prec));
+			WL_ERROR("%s: Failing: eprec %d > prec %d\n",
+				 __func__, eprec, prec);
 			return false;
 		}
 	}
@@ -5243,8 +5031,8 @@
 
 		/* Refuse newer packet unless configured to discard oldest */
 		if (eprec == prec && !discard_oldest) {
-			WL_ERROR(("%s: No where to go, prec == %d\n", __func__,
-				  prec));
+			WL_ERROR("%s: No where to go, prec == %d\n",
+				 __func__, prec);
 			return false;
 		}
 
@@ -5256,14 +5044,14 @@
 		/* Increment wme stats */
 		if (WME_ENAB(wlc->pub)) {
 			WLCNTINCR(wlc->pub->_wme_cnt->
-				  tx_failed[WME_PRIO2AC(PKTPRIO(p))].packets);
+				  tx_failed[WME_PRIO2AC(p->priority)].packets);
 			WLCNTADD(wlc->pub->_wme_cnt->
-				 tx_failed[WME_PRIO2AC(PKTPRIO(p))].bytes,
+				 tx_failed[WME_PRIO2AC(p->priority)].bytes,
 				 pkttotlen(wlc->osh, p));
 		}
 
 		ASSERT(0);
-		PKTFREE(wlc->osh, p, true);
+		pkt_buf_free_skb(wlc->osh, p, true);
 		WLCNTINCR(wlc->pub->_cnt->txnobuf);
 	}
 
@@ -5277,25 +5065,26 @@
 	return true;
 }
 
-void BCMFASTPATH wlc_txq_enq(void *ctx, struct scb *scb, void *sdu, uint prec)
+void BCMFASTPATH wlc_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
+			     uint prec)
 {
-	wlc_info_t *wlc = (wlc_info_t *) ctx;
+	struct wlc_info *wlc = (struct wlc_info *) ctx;
 	wlc_txq_info_t *qi = wlc->active_queue;	/* Check me */
 	struct pktq *q = &qi->q;
 	int prio;
 
-	prio = PKTPRIO(sdu);
+	prio = sdu->priority;
 
 	ASSERT(pktq_max(q) >= wlc->pub->tunables->datahiwat);
 
 	if (!wlc_prec_enq(wlc, q, sdu, prec)) {
 		if (!EDCF_ENAB(wlc->pub)
 		    || (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL))
-			WL_ERROR(("wl%d: wlc_txq_enq: txq overflow\n",
-				  wlc->pub->unit));
+			WL_ERROR("wl%d: wlc_txq_enq: txq overflow\n",
+				 wlc->pub->unit);
 
 		/* ASSERT(9 == 8); *//* XXX we might hit this condtion in case packet flooding from mac80211 stack */
-		PKTFREE(wlc->osh, sdu, true);
+		pkt_buf_free_skb(wlc->osh, sdu, true);
 		WLCNTINCR(wlc->pub->_cnt->txnobuf);
 	}
 
@@ -5317,13 +5106,14 @@
 }
 
 bool BCMFASTPATH
-wlc_sendpkt_mac80211(wlc_info_t *wlc, void *sdu, struct ieee80211_hw *hw)
+wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
+		     struct ieee80211_hw *hw)
 {
 	u8 prio;
 	uint fifo;
 	void *pkt;
 	struct scb *scb = &global_scb;
-	struct dot11_header *d11_header = (struct dot11_header *)PKTDATA(sdu);
+	struct dot11_header *d11_header = (struct dot11_header *)(sdu->data);
 	u16 type, fc;
 
 	ASSERT(sdu);
@@ -5332,13 +5122,13 @@
 	type = FC_TYPE(fc);
 
 	/* 802.11 standard requires management traffic to go at highest priority */
-	prio = (type == FC_TYPE_DATA ? PKTPRIO(sdu) : MAXPRIO);
+	prio = (type == FC_TYPE_DATA ? sdu->priority : MAXPRIO);
 	fifo = prio2fifo[prio];
 
-	ASSERT((uint) PKTHEADROOM(sdu) >= TXOFF);
-	ASSERT(!PKTSHARED(sdu));
-	ASSERT(!PKTNEXT(sdu));
-	ASSERT(!PKTLINK(sdu));
+	ASSERT((uint) skb_headroom(sdu) >= TXOFF);
+	ASSERT(!(sdu->cloned));
+	ASSERT(!(sdu->next));
+	ASSERT(!(sdu->prev));
 	ASSERT(fifo < NFIFO);
 
 	pkt = sdu;
@@ -5352,9 +5142,9 @@
 	return 0;
 }
 
-void BCMFASTPATH wlc_send_q(wlc_info_t *wlc, wlc_txq_info_t *qi)
+void BCMFASTPATH wlc_send_q(struct wlc_info *wlc, wlc_txq_info_t *qi)
 {
-	void *pkt[DOT11_MAXNUMFRAGS];
+	struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
 	int prec;
 	u16 prec_map;
 	int err = 0, i, count;
@@ -5427,7 +5217,7 @@
  * for MC frames so is used as part of the sequence number.
  */
 static inline u16
-bcmc_fid_generate(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg, d11txh_t *txh)
+bcmc_fid_generate(struct wlc_info *wlc, wlc_bsscfg_t *bsscfg, d11txh_t *txh)
 {
 	u16 frameid;
 
@@ -5441,13 +5231,14 @@
 }
 
 void BCMFASTPATH
-wlc_txfifo(wlc_info_t *wlc, uint fifo, void *p, bool commit, s8 txpktpend)
+wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p, bool commit,
+	   s8 txpktpend)
 {
 	u16 frameid = INVALIDFID;
 	d11txh_t *txh;
 
 	ASSERT(fifo < NFIFO);
-	txh = (d11txh_t *) PKTDATA(p);
+	txh = (d11txh_t *) (p->data);
 
 	/* When a BC/MC frame is being committed to the BCMC fifo via DMA (NOT PIO), update
 	 * ucode or BSS info as appropriate.
@@ -5460,21 +5251,14 @@
 	if (WLC_WAR16165(wlc))
 		wlc_war16165(wlc, true);
 
-#ifdef WLC_HIGH_ONLY
-	if (RPCTX_ENAB(wlc->pub)) {
-		(void)wlc_rpctx_tx(wlc->rpctx, fifo, p, commit, frameid,
-				   txpktpend);
-		return;
-	}
-#else
 
 	/* Bump up pending count for if not using rpc. If rpc is used, this will be handled
 	 * in wlc_bmac_txfifo()
 	 */
 	if (commit) {
 		TXPKTPENDINC(wlc, fifo, txpktpend);
-		WL_TRACE(("wlc_txfifo, pktpend inc %d to %d\n", txpktpend,
-			  TXPKTPENDGET(wlc, fifo)));
+		WL_TRACE("wlc_txfifo, pktpend inc %d to %d\n",
+			 txpktpend, TXPKTPENDGET(wlc, fifo));
 	}
 
 	/* Commit BCMC sequence number in the SHM frame ID location */
@@ -5482,13 +5266,12 @@
 		BCMCFID(wlc, frameid);
 
 	if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0) {
-		WL_ERROR(("wlc_txfifo: fatal, toss frames !!!\n"));
+		WL_ERROR("wlc_txfifo: fatal, toss frames !!!\n");
 	}
-#endif				/* WLC_HIGH_ONLY */
 }
 
 static u16
-wlc_compute_airtime(wlc_info_t *wlc, ratespec_t rspec, uint length)
+wlc_compute_airtime(struct wlc_info *wlc, ratespec_t rspec, uint length)
 {
 	u16 usec = 0;
 	uint mac_rate = RSPEC2RATE(rspec);
@@ -5523,7 +5306,8 @@
 			usec = (length << 3) / 11;
 			break;
 		default:
-			WL_ERROR(("wl%d: wlc_compute_airtime: unsupported rspec 0x%x\n", wlc->pub->unit, rspec));
+			WL_ERROR("wl%d: wlc_compute_airtime: unsupported rspec 0x%x\n",
+				 wlc->pub->unit, rspec);
 			ASSERT((const char *)"Bad phy_rate" == NULL);
 			break;
 		}
@@ -5533,7 +5317,7 @@
 }
 
 void BCMFASTPATH
-wlc_compute_plcp(wlc_info_t *wlc, ratespec_t rspec, uint length, u8 *plcp)
+wlc_compute_plcp(struct wlc_info *wlc, ratespec_t rspec, uint length, u8 *plcp)
 {
 	if (IS_MCS(rspec)) {
 		wlc_compute_mimo_plcp(rspec, length, plcp);
@@ -5574,7 +5358,7 @@
 	rate_signal = rate_info[rate] & RATE_MASK;
 	ASSERT(rate_signal != 0);
 
-	bzero(plcp, D11_PHY_HDR_LEN);
+	memset(plcp, 0, D11_PHY_HDR_LEN);
 	D11A_PHY_HDR_SRATE((ofdm_phy_hdr_t *) plcp, rate_signal);
 
 	tmp = (length & 0xfff) << 5;
@@ -5619,7 +5403,7 @@
 		break;
 
 	default:
-		WL_ERROR(("wlc_cck_plcp_set: unsupported rate %d\n", rate_500));
+		WL_ERROR("wlc_cck_plcp_set: unsupported rate %d\n", rate_500);
 		rate_500 = WLC_RATE_1M;
 		usec = length << 3;
 		break;
@@ -5657,7 +5441,7 @@
  * preamble_type	use short/GF or long/MM PLCP header
  */
 static u16 BCMFASTPATH
-wlc_compute_frame_dur(wlc_info_t *wlc, ratespec_t rate, u8 preamble_type,
+wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate, u8 preamble_type,
 		      uint next_frag_len)
 {
 	u16 dur, sifs;
@@ -5691,7 +5475,7 @@
  * frame_len		next MPDU frame length in bytes
  */
 u16 BCMFASTPATH
-wlc_compute_rtscts_dur(wlc_info_t *wlc, bool cts_only, ratespec_t rts_rate,
+wlc_compute_rtscts_dur(struct wlc_info *wlc, bool cts_only, ratespec_t rts_rate,
 		       ratespec_t frame_rate, u8 rts_preamble_type,
 		       u8 frame_preamble_type, uint frame_len, bool ba)
 {
@@ -5722,7 +5506,7 @@
 	return dur;
 }
 
-static bool wlc_phy_rspec_check(wlc_info_t *wlc, u16 bw, ratespec_t rspec)
+static bool wlc_phy_rspec_check(struct wlc_info *wlc, u16 bw, ratespec_t rspec)
 {
 	if (IS_MCS(rspec)) {
 		uint mcs = rspec & RSPEC_RATE_MASK;
@@ -5748,7 +5532,7 @@
 	return true;
 }
 
-u16 BCMFASTPATH wlc_phytxctl1_calc(wlc_info_t *wlc, ratespec_t rspec)
+u16 BCMFASTPATH wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec)
 {
 	u16 phyctl1 = 0;
 	u16 bw;
@@ -5759,7 +5543,8 @@
 		bw = RSPEC_GET_BW(rspec);
 		/* 10Mhz is not supported yet */
 		if (bw < PHY_TXC1_BW_20MHZ) {
-			WL_ERROR(("wlc_phytxctl1_calc: bw %d is not supported yet, set to 20L\n", bw));
+			WL_ERROR("wlc_phytxctl1_calc: bw %d is not supported yet, set to 20L\n",
+				 bw);
 			bw = PHY_TXC1_BW_20MHZ;
 		}
 
@@ -5784,7 +5569,7 @@
 		/* get the phyctl byte from rate phycfg table */
 		phycfg = wlc_rate_legacy_phyctl(RSPEC2RATE(rspec));
 		if (phycfg == -1) {
-			WL_ERROR(("wlc_phytxctl1_calc: wrong legacy OFDM/CCK rate\n"));
+			WL_ERROR("wlc_phytxctl1_calc: wrong legacy OFDM/CCK rate\n");
 			ASSERT(0);
 			phycfg = 0;
 		}
@@ -5798,16 +5583,14 @@
 	/* phy clock must support 40Mhz if tx descriptor uses it */
 	if ((phyctl1 & PHY_TXC1_BW_MASK) >= PHY_TXC1_BW_40MHZ) {
 		ASSERT(CHSPEC_WLC_BW(wlc->chanspec) == WLC_40_MHZ);
-#ifndef WLC_HIGH_ONLY
 		ASSERT(wlc->chanspec == wlc_phy_chanspec_get(wlc->band->pi));
-#endif
 	}
 #endif				/* BCMDBG */
 	return phyctl1;
 }
 
 ratespec_t BCMFASTPATH
-wlc_rspec_to_rts_rspec(wlc_info_t *wlc, ratespec_t rspec, bool use_rspec,
+wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec, bool use_rspec,
 		       u16 mimo_ctlchbw)
 {
 	ratespec_t rts_rspec = 0;
@@ -5863,15 +5646,15 @@
  *
  */
 static u16 BCMFASTPATH
-wlc_d11hdrs_mac80211(wlc_info_t *wlc, struct ieee80211_hw *hw,
-		     void *p, struct scb *scb, uint frag,
+wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
+		     struct sk_buff *p, struct scb *scb, uint frag,
 		     uint nfrags, uint queue, uint next_frag_len,
 		     wsec_key_t *key, ratespec_t rspec_override)
 {
 	struct dot11_header *h;
 	d11txh_t *txh;
 	u8 *plcp, plcp_fallback[D11_PHY_HDR_LEN];
-	osl_t *osh;
+	struct osl_info *osh;
 	int len, phylen, rts_phylen;
 	u16 fc, type, frameid, mch, phyctl, xfts, mainrates;
 	u16 seq = 0, mcl = 0, status = 0;
@@ -5911,7 +5694,7 @@
 	osh = wlc->osh;
 
 	/* locate 802.11 MAC header */
-	h = (struct dot11_header *)PKTDATA(p);
+	h = (struct dot11_header *)(p->data);
 	fc = ltoh16(h->fc);
 	type = FC_TYPE(fc);
 
@@ -5935,29 +5718,29 @@
 	ASSERT(tx_info);
 
 	/* add PLCP */
-	plcp = PKTPUSH(p, D11_PHY_HDR_LEN);
+	plcp = skb_push(p, D11_PHY_HDR_LEN);
 
 	/* add Broadcom tx descriptor header */
-	txh = (d11txh_t *) PKTPUSH(p, D11_TXH_LEN);
-	bzero((char *)txh, D11_TXH_LEN);
+	txh = (d11txh_t *) skb_push(p, D11_TXH_LEN);
+	memset((char *)txh, 0, D11_TXH_LEN);
 
 	/* setup frameid */
 	if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
 		/* non-AP STA should never use BCMC queue */
 		ASSERT(queue != TX_BCMC_FIFO);
 		if (queue == TX_BCMC_FIFO) {
-			WL_ERROR(("wl%d: %s: ASSERT queue == TX_BCMC!\n",
-				  WLCWLUNIT(wlc), __func__));
+			WL_ERROR("wl%d: %s: ASSERT queue == TX_BCMC!\n",
+				 WLCWLUNIT(wlc), __func__);
 			frameid = bcmc_fid_generate(wlc, NULL, txh);
 		} else {
 			/* Increment the counter for first fragment */
 			if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
-				SCB_SEQNUM(scb, PKTPRIO(p))++;
+				SCB_SEQNUM(scb, p->priority)++;
 			}
 
 			/* extract fragment number from frame first */
 			seq = ltoh16(seq) & FRAGNUM_MASK;
-			seq |= (SCB_SEQNUM(scb, PKTPRIO(p)) << SEQNUM_SHIFT);
+			seq |= (SCB_SEQNUM(scb, p->priority) << SEQNUM_SHIFT);
 			h->seq = htol16(seq);
 
 			frameid = ((seq << TXFID_SEQ_SHIFT) & TXFID_SEQ_MASK) |
@@ -5981,13 +5764,6 @@
 	if (txrate[1]->idx < 0) {
 		txrate[1] = txrate[0];
 	}
-#ifdef WLC_HIGH_ONLY
-	/* Double protection , just in case */
-	if (txrate[0]->idx > HIGHEST_SINGLE_STREAM_MCS)
-		txrate[0]->idx = HIGHEST_SINGLE_STREAM_MCS;
-	if (txrate[1]->idx > HIGHEST_SINGLE_STREAM_MCS)
-		txrate[1]->idx = HIGHEST_SINGLE_STREAM_MCS;
-#endif
 
 	for (k = 0; k < hw->max_rates; k++) {
 		is_mcs[k] =
@@ -6034,7 +5810,8 @@
 			ASSERT(RSPEC_ACTIVE(rspec[k]));
 			rspec[k] = WLC_RATE_1M;
 		} else {
-			if (WLANTSEL_ENAB(wlc) && !ETHER_ISMULTI(&h->a1)) {
+			if (WLANTSEL_ENAB(wlc) &&
+			    !is_multicast_ether_addr(h->a1.octet)) {
 				/* set tx antenna config */
 				wlc_antsel_antcfg_get(wlc->asi, false, false, 0,
 						      0, &antcfg, &fbantcfg);
@@ -6131,7 +5908,8 @@
 
 			if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
 			    && (!IS_MCS(rspec[k]))) {
-				WL_ERROR(("wl%d: %s: IEEE80211_TX_RC_MCS != IS_MCS(rspec)\n", WLCWLUNIT(wlc), __func__));
+				WL_ERROR("wl%d: %s: IEEE80211_TX_RC_MCS != IS_MCS(rspec)\n",
+					 WLCWLUNIT(wlc), __func__);
 				ASSERT(0 && "Rate mismatch");
 			}
 
@@ -6195,7 +5973,8 @@
 	    plcp[0];
 
 	/* DUR field for main rate */
-	if ((fc != FC_PS_POLL) && !ETHER_ISMULTI(&h->a1) && !use_rifs) {
+	if ((fc != FC_PS_POLL) &&
+	    !is_multicast_ether_addr(h->a1.octet) && !use_rifs) {
 		durid =
 		    wlc_compute_frame_dur(wlc, rspec[0], preamble_type[0],
 					  next_frag_len);
@@ -6213,7 +5992,7 @@
 	/* DUR field for fallback rate */
 	if (fc == FC_PS_POLL)
 		txh->FragDurFallback = h->durid;
-	else if (ETHER_ISMULTI(&h->a1) || use_rifs)
+	else if (is_multicast_ether_addr(h->a1.octet) || use_rifs)
 		txh->FragDurFallback = 0;
 	else {
 		durid = wlc_compute_frame_dur(wlc, rspec[1],
@@ -6225,7 +6004,7 @@
 	if (frag == 0)
 		mcl |= TXC_STARTMSDU;
 
-	if (!ETHER_ISMULTI(&h->a1))
+	if (!is_multicast_ether_addr(h->a1.octet))
 		mcl |= TXC_IMMEDACK;
 
 	if (BAND_5G(wlc->band->bandtype))
@@ -6260,7 +6039,7 @@
 	txh->TxFesTimeFallback = htol16(0);
 
 	/* TxFrameRA */
-	bcopy((char *)&h->a1, (char *)&txh->TxFrameRA, ETHER_ADDR_LEN);
+	bcopy((char *)&h->a1, (char *)&txh->TxFrameRA, ETH_ALEN);
 
 	/* TxFrameID */
 	txh->TxFrameID = htol16(frameid);
@@ -6347,11 +6126,11 @@
 
 		if (use_cts) {
 			rts->fc = htol16(FC_CTS);
-			bcopy((char *)&h->a2, (char *)&rts->ra, ETHER_ADDR_LEN);
+			bcopy((char *)&h->a2, (char *)&rts->ra, ETH_ALEN);
 		} else {
 			rts->fc = htol16((u16) FC_RTS);
 			bcopy((char *)&h->a1, (char *)&rts->ra,
-			      2 * ETHER_ADDR_LEN);
+			      2 * ETH_ALEN);
 		}
 
 		/* mainrate
@@ -6362,9 +6141,10 @@
 			      D11A_PHY_HDR_GRATE((ofdm_phy_hdr_t *) rts_plcp) :
 			      rts_plcp[0]) << 8;
 	} else {
-		bzero((char *)txh->RTSPhyHeader, D11_PHY_HDR_LEN);
-		bzero((char *)&txh->rts_frame, sizeof(struct dot11_rts_frame));
-		bzero((char *)txh->RTSPLCPFallback,
+		memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
+		memset((char *)&txh->rts_frame, 0,
+			sizeof(struct dot11_rts_frame));
+		memset((char *)txh->RTSPLCPFallback, 0,
 		      sizeof(txh->RTSPLCPFallback));
 		txh->RTSDurFallback = 0;
 	}
@@ -6453,7 +6233,7 @@
 	if (SCB_WME(scb) && qos && wlc->edcf_txop[ac]) {
 		uint frag_dur, dur, dur_fallback;
 
-		ASSERT(!ETHER_ISMULTI(&h->a1));
+		ASSERT(!is_multicast_ether_addr(h->a1.octet));
 
 		/* WME: Update TXOP threshold */
 		if ((!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) && (frag == 0)) {
@@ -6523,19 +6303,23 @@
 					}
 				}
 			} else
-				WL_ERROR(("wl%d: %s txop invalid for rate %d\n",
-					  wlc->pub->unit, fifo_names[queue],
-					  RSPEC2RATE(rspec[0])));
+				WL_ERROR("wl%d: %s txop invalid for rate %d\n",
+					 wlc->pub->unit, fifo_names[queue],
+					 RSPEC2RATE(rspec[0]));
 
 			if (dur > wlc->edcf_txop[ac])
-				WL_ERROR(("wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n", wlc->pub->unit, __func__, fifo_names[queue], phylen, wlc->fragthresh[queue], dur, wlc->edcf_txop[ac]));
+				WL_ERROR("wl%d: %s: %s txop exceeded phylen %d/%d dur %d/%d\n",
+					 wlc->pub->unit, __func__,
+					 fifo_names[queue],
+					 phylen, wlc->fragthresh[queue],
+					 dur, wlc->edcf_txop[ac]);
 		}
 	}
 
 	return 0;
 }
 
-void wlc_tbtt(wlc_info_t *wlc, d11regs_t *regs)
+void wlc_tbtt(struct wlc_info *wlc, d11regs_t *regs)
 {
 	wlc_bsscfg_t *cfg = wlc->cfg;
 
@@ -6571,19 +6355,19 @@
 }
 
 /* GP timer is a freerunning 32 bit counter, decrements at 1 us rate */
-void wlc_hwtimer_gptimer_set(wlc_info_t *wlc, uint us)
+void wlc_hwtimer_gptimer_set(struct wlc_info *wlc, uint us)
 {
 	ASSERT(wlc->pub->corerev >= 3);	/* no gptimer in earlier revs */
 	W_REG(wlc->osh, &wlc->regs->gptimer, us);
 }
 
-void wlc_hwtimer_gptimer_abort(wlc_info_t *wlc)
+void wlc_hwtimer_gptimer_abort(struct wlc_info *wlc)
 {
 	ASSERT(wlc->pub->corerev >= 3);
 	W_REG(wlc->osh, &wlc->regs->gptimer, 0);
 }
 
-static void wlc_hwtimer_gptimer_cb(wlc_info_t *wlc)
+static void wlc_hwtimer_gptimer_cb(struct wlc_info *wlc)
 {
 	/* when interrupt is generated, the counter is loaded with last value
 	 * written and continue to decrement. So it has to be cleaned first
@@ -6596,7 +6380,7 @@
  * POLICY: no macinstatus change, no bounding loop.
  *         All dpc bounding should be handled in BMAC dpc, like txstatus and rxint
  */
-void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus)
+void wlc_high_dpc(struct wlc_info *wlc, u32 macintstatus)
 {
 	d11regs_t *regs = wlc->regs;
 #ifdef BCMDBG
@@ -6634,8 +6418,8 @@
 	if (macintstatus & ~(MI_TBTT | MI_TXSTOP)) {
 		bcm_format_flags(int_flags, macintstatus, flagstr,
 				 sizeof(flagstr));
-		WL_TRACE(("wl%d: macintstatus 0x%x %s\n", wlc->pub->unit,
-			  macintstatus, flagstr));
+		WL_TRACE("wl%d: macintstatus 0x%x %s\n",
+			 wlc->pub->unit, macintstatus, flagstr);
 	}
 #endif				/* BCMDBG */
 
@@ -6650,11 +6434,12 @@
 		wlc_tbtt(wlc, regs);
 
 	if (macintstatus & MI_GP0) {
-		WL_ERROR(("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n", wlc->pub->unit, wlc->pub->now));
+		WL_ERROR("wl%d: PSM microcode watchdog fired at %d (seconds). Resetting.\n",
+			 wlc->pub->unit, wlc->pub->now);
 
 		printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
-			    __func__, CHIPID(wlc->pub->sih->chip),
-			    CHIPREV(wlc->pub->sih->chiprev));
+					__func__, wlc->pub->sih->chip,
+					wlc->pub->sih->chiprev);
 
 		WLCNTINCR(wlc->pub->_cnt->psmwds);
 
@@ -6668,7 +6453,9 @@
 	}
 
 	if (macintstatus & MI_RFDISABLE) {
-		WL_ERROR(("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n", wlc->pub->unit, R_REG(wlc->osh, &regs->phydebug) & PDBG_RFD));
+		WL_ERROR("wl%d: MAC Detected a change on the RF Disable Input 0x%x\n",
+			 wlc->pub->unit,
+			 R_REG(wlc->osh, &regs->phydebug) & PDBG_RFD);
 		/* delay the cleanup to wl_down in IBSS case */
 		if ((R_REG(wlc->osh, &regs->phydebug) & PDBG_RFD)) {
 			int idx;
@@ -6677,7 +6464,8 @@
 				if (!BSSCFG_STA(bsscfg) || !bsscfg->enable
 				    || !bsscfg->BSS)
 					continue;
-				WL_ERROR(("wl%d: wlc_dpc: rfdisable -> wlc_bsscfg_disable()\n", wlc->pub->unit));
+				WL_ERROR("wl%d: wlc_dpc: rfdisable -> wlc_bsscfg_disable()\n",
+					 wlc->pub->unit);
 			}
 		}
 	}
@@ -6686,14 +6474,12 @@
 	if (!pktq_empty(&wlc->active_queue->q))
 		wlc_send_q(wlc, wlc->active_queue);
 
-#ifndef WLC_HIGH_ONLY
 	ASSERT(wlc_ps_check(wlc));
-#endif
 }
 
-static void *wlc_15420war(wlc_info_t *wlc, uint queue)
+static void *wlc_15420war(struct wlc_info *wlc, uint queue)
 {
-	hnddma_t *di;
+	struct hnddma_pub *di;
 	void *p;
 
 	ASSERT(queue < NFIFO);
@@ -6715,13 +6501,14 @@
 	if (dma_txactive(wlc->hw->di[queue]) == 0) {
 		WLCNTINCR(wlc->pub->_cnt->txdmawar);
 		if (!dma_txreset(di))
-			WL_ERROR(("wl%d: %s: dma_txreset[%d]: cannot stop dma\n", wlc->pub->unit, __func__, queue));
+			WL_ERROR("wl%d: %s: dma_txreset[%d]: cannot stop dma\n",
+				 wlc->pub->unit, __func__, queue);
 		dma_txinit(di);
 	}
 	return p;
 }
 
-static void wlc_war16165(wlc_info_t *wlc, bool tx)
+static void wlc_war16165(struct wlc_info *wlc, bool tx)
 {
 	if (tx) {
 		/* the post-increment is used in STAY_AWAKE macro */
@@ -6737,14 +6524,14 @@
 /* process an individual tx_status_t */
 /* WLC_HIGH_API */
 bool BCMFASTPATH
-wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2)
+wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
 {
-	void *p;
+	struct sk_buff *p;
 	uint queue;
 	d11txh_t *txh;
 	struct scb *scb = NULL;
 	bool free_pdu;
-	osl_t *osh;
+	struct osl_info *osh;
 	int tx_rts, tx_frame_count, tx_rts_count;
 	uint totlen, supr_status;
 	bool lastframe;
@@ -6768,7 +6555,7 @@
 			 ((txs->
 			   status & TX_STATUS_FRM_RTX_MASK) >>
 			  TX_STATUS_FRM_RTX_SHIFT));
-		WL_ERROR(("%s: INTERMEDIATE but not AMPDU\n", __func__));
+		WL_ERROR("%s: INTERMEDIATE but not AMPDU\n", __func__);
 		return false;
 	}
 
@@ -6789,12 +6576,12 @@
 	if (p == NULL)
 		goto fatal;
 
-	txh = (d11txh_t *) PKTDATA(p);
+	txh = (d11txh_t *) (p->data);
 	mcl = ltoh16(txh->MacTxControlLow);
 
 	if (txs->phyerr) {
-		WL_ERROR(("phyerr 0x%x, rate 0x%x\n", txs->phyerr,
-			  txh->MainRates));
+		WL_ERROR("phyerr 0x%x, rate 0x%x\n",
+			 txs->phyerr, txh->MainRates);
 		wlc_print_txdesc(txh);
 		wlc_print_txstatus(txs);
 	}
@@ -6825,8 +6612,8 @@
 
 	supr_status = txs->status & TX_STATUS_SUPR_MASK;
 	if (supr_status == TX_STATUS_SUPR_BADCH)
-		WL_NONE(("%s: Pkt tx suppressed, possibly channel %d\n",
-			 __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec)));
+		WL_NONE("%s: Pkt tx suppressed, possibly channel %d\n",
+			__func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec));
 
 	tx_rts = htol16(txh->MacTxControlLow) & TXC_SENDRTS;
 	tx_frame_count =
@@ -6837,7 +6624,7 @@
 	lastframe = (fc & FC_MOREFRAG) == 0;
 
 	if (!lastframe) {
-		WL_ERROR(("Not last frame!\n"));
+		WL_ERROR("Not last frame!\n");
 	} else {
 		u16 sfbl, lfbl;
 		ieee80211_tx_info_clear_status(tx_info);
@@ -6879,17 +6666,17 @@
 	wlc_txfifo_complete(wlc, queue, 1);
 
 	if (lastframe) {
-		PKTSETNEXT(p, NULL);
-		PKTSETLINK(p, NULL);
+		p->next = NULL;
+		p->prev = NULL;
 		wlc->txretried = 0;
 		/* remove PLCP & Broadcom tx descriptor header */
-		PKTPULL(p, D11_PHY_HDR_LEN);
-		PKTPULL(p, D11_TXH_LEN);
+		skb_pull(p, D11_PHY_HDR_LEN);
+		skb_pull(p, D11_TXH_LEN);
 		ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
 		WLCNTINCR(wlc->pub->_cnt->ieee_tx_status);
 	} else {
-		WL_ERROR(("%s: Not last frame => not calling tx_status\n",
-			  __func__));
+		WL_ERROR("%s: Not last frame => not calling tx_status\n",
+			 __func__);
 	}
 
 	return false;
@@ -6897,24 +6684,18 @@
  fatal:
 	ASSERT(0);
 	if (p)
-		PKTFREE(osh, p, true);
+		pkt_buf_free_skb(osh, p, true);
 
-#ifdef WLC_HIGH_ONLY
-	/* If this is a split driver, do the big-hammer here.
-	 * If this is a monolithic driver, wlc_bmac.c:wlc_dpc() will do the big-hammer.
-	 */
-	wl_init(wlc->wl);
-#endif
 	return true;
 
 }
 
 void BCMFASTPATH
-wlc_txfifo_complete(wlc_info_t *wlc, uint fifo, s8 txpktpend)
+wlc_txfifo_complete(struct wlc_info *wlc, uint fifo, s8 txpktpend)
 {
 	TXPKTPENDDEC(wlc, fifo, txpktpend);
-	WL_TRACE(("wlc_txfifo_complete, pktpend dec %d to %d\n", txpktpend,
-		  TXPKTPENDGET(wlc, fifo)));
+	WL_TRACE("wlc_txfifo_complete, pktpend dec %d to %d\n",
+		 txpktpend, TXPKTPENDGET(wlc, fifo));
 
 	/* There is more room; mark precedences related to this FIFO sendable */
 	WLC_TX_FIFO_ENAB(wlc, fifo);
@@ -7014,7 +6795,7 @@
 }
 
 /* Update beacon listen interval in shared memory */
-void wlc_bcn_li_upd(wlc_info_t *wlc)
+void wlc_bcn_li_upd(struct wlc_info *wlc)
 {
 	if (AP_ENAB(wlc->pub))
 		return;
@@ -7028,7 +6809,7 @@
 }
 
 static void
-prep_mac80211_status(wlc_info_t *wlc, d11rxhdr_t *rxh, void *p,
+prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
 		     struct ieee80211_rx_status *rx_status)
 {
 	u32 tsf_l, tsf_h;
@@ -7061,7 +6842,7 @@
 	/* qual */
 	rx_status->antenna = (rxh->PhyRxStatus_0 & PRXS0_RXANT_UPSUBBAND) ? 1 : 0;	/* ant */
 
-	plcp = PKTDATA(p);
+	plcp = p->data;
 
 	rspec = wlc_compute_rspec(rxh, plcp);
 	if (IS_MCS(rspec)) {
@@ -7108,19 +6889,19 @@
 			rx_status->rate_idx = 11;
 			break;
 		default:
-			WL_ERROR(("%s: Unknown rate\n", __func__));
+			WL_ERROR("%s: Unknown rate\n", __func__);
 		}
 
 		/* Determine short preamble and rate_idx */
 		preamble = 0;
 		if (IS_CCK(rspec)) {
 			if (rxh->PhyRxStatus_0 & PRXS0_SHORTH)
-				WL_ERROR(("Short CCK\n"));
+				WL_ERROR("Short CCK\n");
 			rx_status->flag |= RX_FLAG_SHORTPRE;
 		} else if (IS_OFDM(rspec)) {
 			rx_status->flag |= RX_FLAG_SHORTPRE;
 		} else {
-			WL_ERROR(("%s: Unknown modulation\n", __func__));
+			WL_ERROR("%s: Unknown modulation\n", __func__);
 		}
 	}
 
@@ -7129,16 +6910,17 @@
 
 	if (rxh->RxStatus1 & RXS_DECERR) {
 		rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
-		WL_ERROR(("%s:  RX_FLAG_FAILED_PLCP_CRC\n", __func__));
+		WL_ERROR("%s:  RX_FLAG_FAILED_PLCP_CRC\n", __func__);
 	}
 	if (rxh->RxStatus1 & RXS_FCSERR) {
 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-		WL_ERROR(("%s:  RX_FLAG_FAILED_FCS_CRC\n", __func__));
+		WL_ERROR("%s:  RX_FLAG_FAILED_FCS_CRC\n", __func__);
 	}
 }
 
 static void
-wlc_recvctl(wlc_info_t *wlc, osl_t *osh, d11rxhdr_t *rxh, void *p)
+wlc_recvctl(struct wlc_info *wlc, struct osl_info *osh, d11rxhdr_t *rxh,
+	    struct sk_buff *p)
 {
 	int len_mpdu;
 	struct ieee80211_rx_status rx_status;
@@ -7155,12 +6937,12 @@
 	prep_mac80211_status(wlc, rxh, p, &rx_status);
 
 	/* mac header+body length, exclude CRC and plcp header */
-	len_mpdu = PKTLEN(p) - D11_PHY_HDR_LEN - DOT11_FCS_LEN;
-	PKTPULL(p, D11_PHY_HDR_LEN);
-	PKTSETLEN(p, len_mpdu);
+	len_mpdu = p->len - D11_PHY_HDR_LEN - DOT11_FCS_LEN;
+	skb_pull(p, D11_PHY_HDR_LEN);
+	__skb_trim(p, len_mpdu);
 
-	ASSERT(!PKTNEXT(p));
-	ASSERT(!PKTLINK(p));
+	ASSERT(!(p->next));
+	ASSERT(!(p->prev));
 
 	ASSERT(IS_ALIGNED((unsigned long)skb->data, 2));
 
@@ -7168,17 +6950,17 @@
 	ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
 
 	WLCNTINCR(wlc->pub->_cnt->ieee_rx);
-	PKTUNALLOC(osh);
+	osh->pktalloced--;
 	return;
 }
 
-void wlc_bss_list_free(wlc_info_t *wlc, wlc_bss_list_t *bss_list)
+void wlc_bss_list_free(struct wlc_info *wlc, wlc_bss_list_t *bss_list)
 {
 	uint index;
 	wlc_bss_info_t *bi;
 
 	if (!bss_list) {
-		WL_ERROR(("%s: Attempting to free NULL list\n", __func__));
+		WL_ERROR("%s: Attempting to free NULL list\n", __func__);
 		return;
 	}
 	/* inspect all BSS descriptor */
@@ -7201,48 +6983,48 @@
  * Param 'bound' indicates max. # frames to process before break out.
  */
 /* WLC_HIGH_API */
-void BCMFASTPATH wlc_recv(wlc_info_t *wlc, void *p)
+void BCMFASTPATH wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
 {
 	d11rxhdr_t *rxh;
 	struct dot11_header *h;
-	osl_t *osh;
+	struct osl_info *osh;
 	u16 fc;
 	uint len;
 	bool is_amsdu;
 
-	WL_TRACE(("wl%d: wlc_recv\n", wlc->pub->unit));
+	WL_TRACE("wl%d: wlc_recv\n", wlc->pub->unit);
 
 	osh = wlc->osh;
 
 	/* frame starts with rxhdr */
-	rxh = (d11rxhdr_t *) PKTDATA(p);
+	rxh = (d11rxhdr_t *) (p->data);
 
 	/* strip off rxhdr */
-	PKTPULL(p, wlc->hwrxoff);
+	skb_pull(p, wlc->hwrxoff);
 
 	/* fixup rx header endianness */
 	ltoh16_buf((void *)rxh, sizeof(d11rxhdr_t));
 
 	/* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
 	if (rxh->RxStatus1 & RXS_PBPRES) {
-		if (PKTLEN(p) < 2) {
+		if (p->len < 2) {
 			WLCNTINCR(wlc->pub->_cnt->rxrunt);
-			WL_ERROR(("wl%d: wlc_recv: rcvd runt of len %d\n",
-				  wlc->pub->unit, PKTLEN(p)));
+			WL_ERROR("wl%d: wlc_recv: rcvd runt of len %d\n",
+				 wlc->pub->unit, p->len);
 			goto toss;
 		}
-		PKTPULL(p, 2);
+		skb_pull(p, 2);
 	}
 
-	h = (struct dot11_header *)(PKTDATA(p) + D11_PHY_HDR_LEN);
-	len = PKTLEN(p);
+	h = (struct dot11_header *)(p->data + D11_PHY_HDR_LEN);
+	len = p->len;
 
 	if (rxh->RxStatus1 & RXS_FCSERR) {
 		if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
-			WL_ERROR(("FCSERR while scanning******* - tossing\n"));
+			WL_ERROR("FCSERR while scanning******* - tossing\n");
 			goto toss;
 		} else {
-			WL_ERROR(("RCSERR!!!\n"));
+			WL_ERROR("RCSERR!!!\n");
 			goto toss;
 		}
 	}
@@ -7261,10 +7043,10 @@
 	if (!is_amsdu) {
 		/* CTS and ACK CTL frames are w/o a2 */
 		if (FC_TYPE(fc) == FC_TYPE_DATA || FC_TYPE(fc) == FC_TYPE_MNG) {
-			if ((ETHER_ISNULLADDR(&h->a2) || ETHER_ISMULTI(&h->a2))) {
-				WL_ERROR(("wl%d: %s: dropping a frame with "
-					"invalid src mac address, a2: %pM\n",
-					wlc->pub->unit, __func__, &h->a2));
+			if ((is_zero_ether_addr(h->a2.octet) ||
+			     is_multicast_ether_addr(h->a2.octet))) {
+				WL_ERROR("wl%d: %s: dropping a frame with invalid src mac address, a2: %pM\n",
+					 wlc->pub->unit, __func__, &h->a2);
 				WLCNTINCR(wlc->pub->_cnt->rxbadsrcmac);
 				goto toss;
 			}
@@ -7279,7 +7061,7 @@
 	}
 
 	if (is_amsdu) {
-		WL_ERROR(("%s: is_amsdu causing toss\n", __func__));
+		WL_ERROR("%s: is_amsdu causing toss\n", __func__);
 		goto toss;
 	}
 
@@ -7287,7 +7069,7 @@
 	return;
 
  toss:
-	PKTFREE(osh, p, false);
+	pkt_buf_free_skb(osh, p, false);
 }
 
 /* calculate frame duration for Mixed-mode L-SIG spoofing, return
@@ -7297,12 +7079,12 @@
  *   len = 3(nsyms + nstream + 3) - 3
  */
 u16 BCMFASTPATH
-wlc_calc_lsig_len(wlc_info_t *wlc, ratespec_t ratespec, uint mac_len)
+wlc_calc_lsig_len(struct wlc_info *wlc, ratespec_t ratespec, uint mac_len)
 {
 	uint nsyms, len = 0, kNdps;
 
-	WL_TRACE(("wl%d: wlc_calc_lsig_len: rate %d, len%d\n", wlc->pub->unit,
-		  RSPEC2RATE(ratespec), mac_len));
+	WL_TRACE("wl%d: wlc_calc_lsig_len: rate %d, len%d\n",
+		 wlc->pub->unit, RSPEC2RATE(ratespec), mac_len);
 
 	if (IS_MCS(ratespec)) {
 		uint mcs = ratespec & RSPEC_RATE_MASK;
@@ -7338,7 +7120,7 @@
 
 /* calculate frame duration of a given rate and length, return time in usec unit */
 uint BCMFASTPATH
-wlc_calc_frame_time(wlc_info_t *wlc, ratespec_t ratespec, u8 preamble_type,
+wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
 		    uint mac_len)
 {
 	uint nsyms, dur = 0, Ndps, kNdps;
@@ -7346,11 +7128,12 @@
 
 	if (rate == 0) {
 		ASSERT(0);
-		WL_ERROR(("wl%d: WAR: using rate of 1 mbps\n", wlc->pub->unit));
+		WL_ERROR("wl%d: WAR: using rate of 1 mbps\n", wlc->pub->unit);
 		rate = WLC_RATE_1M;
 	}
 
-	WL_TRACE(("wl%d: wlc_calc_frame_time: rspec 0x%x, preamble_type %d, len%d\n", wlc->pub->unit, ratespec, preamble_type, mac_len));
+	WL_TRACE("wl%d: wlc_calc_frame_time: rspec 0x%x, preamble_type %d, len%d\n",
+		 wlc->pub->unit, ratespec, preamble_type, mac_len);
 
 	if (IS_MCS(ratespec)) {
 		uint mcs = ratespec & RSPEC_RATE_MASK;
@@ -7408,13 +7191,14 @@
 
 /* The opposite of wlc_calc_frame_time */
 static uint
-wlc_calc_frame_len(wlc_info_t *wlc, ratespec_t ratespec, u8 preamble_type,
+wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
 		   uint dur)
 {
 	uint nsyms, mac_len, Ndps, kNdps;
 	uint rate = RSPEC2RATE(ratespec);
 
-	WL_TRACE(("wl%d: wlc_calc_frame_len: rspec 0x%x, preamble_type %d, dur %d\n", wlc->pub->unit, ratespec, preamble_type, dur));
+	WL_TRACE("wl%d: wlc_calc_frame_len: rspec 0x%x, preamble_type %d, dur %d\n",
+		 wlc->pub->unit, ratespec, preamble_type, dur);
 
 	if (IS_MCS(ratespec)) {
 		uint mcs = ratespec & RSPEC_RATE_MASK;
@@ -7454,10 +7238,10 @@
 }
 
 static uint
-wlc_calc_ba_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
+wlc_calc_ba_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
 {
-	WL_TRACE(("wl%d: wlc_calc_ba_time: rspec 0x%x, preamble_type %d\n",
-		  wlc->pub->unit, rspec, preamble_type));
+	WL_TRACE("wl%d: wlc_calc_ba_time: rspec 0x%x, preamble_type %d\n",
+		 wlc->pub->unit, rspec, preamble_type);
 	/* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
 	 * or equal to the rate of the immediately previous frame in the FES
 	 */
@@ -7471,12 +7255,12 @@
 }
 
 static uint BCMFASTPATH
-wlc_calc_ack_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
+wlc_calc_ack_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
 {
 	uint dur = 0;
 
-	WL_TRACE(("wl%d: wlc_calc_ack_time: rspec 0x%x, preamble_type %d\n",
-		  wlc->pub->unit, rspec, preamble_type));
+	WL_TRACE("wl%d: wlc_calc_ack_time: rspec 0x%x, preamble_type %d\n",
+		 wlc->pub->unit, rspec, preamble_type);
 	/* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
 	 * or equal to the rate of the immediately previous frame in the FES
 	 */
@@ -7491,15 +7275,15 @@
 }
 
 static uint
-wlc_calc_cts_time(wlc_info_t *wlc, ratespec_t rspec, u8 preamble_type)
+wlc_calc_cts_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
 {
-	WL_TRACE(("wl%d: wlc_calc_cts_time: ratespec 0x%x, preamble_type %d\n",
-		  wlc->pub->unit, rspec, preamble_type));
+	WL_TRACE("wl%d: wlc_calc_cts_time: ratespec 0x%x, preamble_type %d\n",
+		 wlc->pub->unit, rspec, preamble_type);
 	return wlc_calc_ack_time(wlc, rspec, preamble_type);
 }
 
 /* derive wlc->band->basic_rate[] table from 'rateset' */
-void wlc_rate_lookup_init(wlc_info_t *wlc, wlc_rateset_t *rateset)
+void wlc_rate_lookup_init(struct wlc_info *wlc, wlc_rateset_t *rateset)
 {
 	u8 rate;
 	u8 mandatory;
@@ -7509,7 +7293,7 @@
 	uint i;
 
 	/* incoming rates are in 500kbps units as in 802.11 Supported Rates */
-	bzero(br, WLC_MAXRATE + 1);
+	memset(br, 0, WLC_MAXRATE + 1);
 
 	/* For each basic rate in the rates list, make an entry in the
 	 * best basic lookup.
@@ -7523,7 +7307,8 @@
 		rate = (rateset->rates[i] & RATE_MASK);
 
 		if (rate > WLC_MAXRATE) {
-			WL_ERROR(("wlc_rate_lookup_init: invalid rate 0x%X in rate set\n", rateset->rates[i]));
+			WL_ERROR("wlc_rate_lookup_init: invalid rate 0x%X in rate set\n",
+				 rateset->rates[i]);
 			continue;
 		}
 
@@ -7588,7 +7373,7 @@
 	}
 }
 
-static void wlc_write_rate_shm(wlc_info_t *wlc, u8 rate, u8 basic_rate)
+static void wlc_write_rate_shm(struct wlc_info *wlc, u8 rate, u8 basic_rate)
 {
 	u8 phy_rate, index;
 	u8 basic_phy_rate, basic_index;
@@ -7621,7 +7406,7 @@
 	wlc_write_shm(wlc, (basic_table + index * 2), basic_ptr);
 }
 
-static const wlc_rateset_t *wlc_rateset_get_hwrs(wlc_info_t *wlc)
+static const wlc_rateset_t *wlc_rateset_get_hwrs(struct wlc_info *wlc)
 {
 	const wlc_rateset_t *rs_dflt;
 
@@ -7638,7 +7423,7 @@
 	return rs_dflt;
 }
 
-void wlc_set_ratetable(wlc_info_t *wlc)
+void wlc_set_ratetable(struct wlc_info *wlc)
 {
 	const wlc_rateset_t *rs_dflt;
 	wlc_rateset_t rs;
@@ -7674,7 +7459,8 @@
  * Return true if the specified rate is supported by the specified band.
  * WLC_BAND_AUTO indicates the current band.
  */
-bool wlc_valid_rate(wlc_info_t *wlc, ratespec_t rspec, int band, bool verbose)
+bool wlc_valid_rate(struct wlc_info *wlc, ratespec_t rspec, int band,
+		    bool verbose)
 {
 	wlc_rateset_t *hw_rateset;
 	uint i;
@@ -7701,16 +7487,17 @@
 			return true;
  error:
 	if (verbose) {
-		WL_ERROR(("wl%d: wlc_valid_rate: rate spec 0x%x not in hw_rateset\n", wlc->pub->unit, rspec));
+		WL_ERROR("wl%d: wlc_valid_rate: rate spec 0x%x not in hw_rateset\n",
+			 wlc->pub->unit, rspec);
 	}
 
 	return false;
 }
 
-static void wlc_update_mimo_band_bwcap(wlc_info_t *wlc, u8 bwcap)
+static void wlc_update_mimo_band_bwcap(struct wlc_info *wlc, u8 bwcap)
 {
 	uint i;
-	wlcband_t *band;
+	struct wlcband *band;
 
 	for (i = 0; i < NBANDS(wlc); i++) {
 		if (IS_SINGLEBAND_5G(wlc->deviceid))
@@ -7734,7 +7521,7 @@
 	wlc->mimo_band_bwcap = bwcap;
 }
 
-void wlc_mod_prb_rsp_rate_table(wlc_info_t *wlc, uint frame_len)
+void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len)
 {
 	const wlc_rateset_t *rs_dflt;
 	wlc_rateset_t rs;
@@ -7777,14 +7564,14 @@
 }
 
 u16
-wlc_compute_bcntsfoff(wlc_info_t *wlc, ratespec_t rspec, bool short_preamble,
-		      bool phydelay)
+wlc_compute_bcntsfoff(struct wlc_info *wlc, ratespec_t rspec,
+		      bool short_preamble, bool phydelay)
 {
 	uint bcntsfoff = 0;
 
 	if (IS_MCS(rspec)) {
-		WL_ERROR(("wl%d: recd beacon with mcs rate; rspec 0x%x\n",
-			  wlc->pub->unit, rspec));
+		WL_ERROR("wl%d: recd beacon with mcs rate; rspec 0x%x\n",
+			 wlc->pub->unit, rspec);
 	} else if (IS_OFDM(rspec)) {
 		/* tx delay from MAC through phy to air (2.1 usec) +
 		 * phy header time (preamble + PLCP SIGNAL == 20 usec) +
@@ -7824,7 +7611,7 @@
  *	and included up to, but not including, the 4 byte FCS.
  */
 static void
-wlc_bcn_prb_template(wlc_info_t *wlc, uint type, ratespec_t bcn_rspec,
+wlc_bcn_prb_template(struct wlc_info *wlc, uint type, ratespec_t bcn_rspec,
 		     wlc_bsscfg_t *cfg, u16 *buf, int *len)
 {
 	cck_phy_hdr_t *plcp;
@@ -7843,7 +7630,7 @@
 	*len = hdr_len + body_len;	/* return actual size */
 
 	/* format PHY and MAC headers */
-	bzero((char *)buf, hdr_len);
+	memset((char *)buf, 0, hdr_len);
 
 	plcp = (cck_phy_hdr_t *) buf;
 
@@ -7872,9 +7659,9 @@
 	/* A1 filled in by MAC for prb resp, broadcast for bcn */
 	if (type == FC_BEACON)
 		bcopy((const char *)&ether_bcast, (char *)&h->da,
-		      ETHER_ADDR_LEN);
-	bcopy((char *)&cfg->cur_etheraddr, (char *)&h->sa, ETHER_ADDR_LEN);
-	bcopy((char *)&cfg->BSSID, (char *)&h->bssid, ETHER_ADDR_LEN);
+		      ETH_ALEN);
+	bcopy((char *)&cfg->cur_etheraddr, (char *)&h->sa, ETH_ALEN);
+	bcopy((char *)&cfg->BSSID, (char *)&h->bssid, ETH_ALEN);
 
 	/* SEQ filled in by MAC */
 
@@ -7891,7 +7678,7 @@
  * template updated.
  * Otherwise, it updates the hardware template.
  */
-void wlc_bss_update_beacon(wlc_info_t *wlc, wlc_bsscfg_t *cfg)
+void wlc_bss_update_beacon(struct wlc_info *wlc, wlc_bsscfg_t *cfg)
 {
 	int len = BCN_TMPL_LEN;
 
@@ -7907,7 +7694,7 @@
 		u16 bcn[BCN_TMPL_LEN / 2];
 		u32 both_valid = MCMD_BCN0VLD | MCMD_BCN1VLD;
 		d11regs_t *regs = wlc->regs;
-		osl_t *osh = NULL;
+		struct osl_info *osh = NULL;
 
 		osh = wlc->osh;
 
@@ -7944,7 +7731,7 @@
 /*
  * Update all beacons for the system.
  */
-void wlc_update_beacon(wlc_info_t *wlc)
+void wlc_update_beacon(struct wlc_info *wlc)
 {
 	int idx;
 	wlc_bsscfg_t *bsscfg;
@@ -7957,14 +7744,14 @@
 }
 
 /* Write ssid into shared memory */
-void wlc_shm_ssid_upd(wlc_info_t *wlc, wlc_bsscfg_t *cfg)
+void wlc_shm_ssid_upd(struct wlc_info *wlc, wlc_bsscfg_t *cfg)
 {
 	u8 *ssidptr = cfg->SSID;
 	u16 base = M_SSID;
 	u8 ssidbuf[DOT11_MAX_SSID_LEN];
 
 	/* padding the ssid with zero and copy it into shm */
-	bzero(ssidbuf, DOT11_MAX_SSID_LEN);
+	memset(ssidbuf, 0, DOT11_MAX_SSID_LEN);
 	bcopy(ssidptr, ssidbuf, cfg->SSID_len);
 
 	wlc_copyto_shm(wlc, base, ssidbuf, DOT11_MAX_SSID_LEN);
@@ -7973,7 +7760,7 @@
 		wlc_write_shm(wlc, M_SSIDLEN, (u16) cfg->SSID_len);
 }
 
-void wlc_update_probe_resp(wlc_info_t *wlc, bool suspend)
+void wlc_update_probe_resp(struct wlc_info *wlc, bool suspend)
 {
 	int idx;
 	wlc_bsscfg_t *bsscfg;
@@ -7986,7 +7773,7 @@
 }
 
 void
-wlc_bss_update_probe_resp(wlc_info_t *wlc, wlc_bsscfg_t *cfg, bool suspend)
+wlc_bss_update_probe_resp(struct wlc_info *wlc, wlc_bsscfg_t *cfg, bool suspend)
 {
 	u16 prb_resp[BCN_TMPL_LEN / 2];
 	int len = BCN_TMPL_LEN;
@@ -8027,9 +7814,9 @@
 }
 
 /* prepares pdu for transmission. returns BCM error codes */
-int wlc_prep_pdu(wlc_info_t *wlc, void *pdu, uint *fifop)
+int wlc_prep_pdu(struct wlc_info *wlc, struct sk_buff *pdu, uint *fifop)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	uint fifo;
 	d11txh_t *txh;
 	struct dot11_header *h;
@@ -8039,7 +7826,7 @@
 	osh = wlc->osh;
 
 	ASSERT(pdu);
-	txh = (d11txh_t *) PKTDATA(pdu);
+	txh = (d11txh_t *) (pdu->data);
 	ASSERT(txh);
 	h = (struct dot11_header *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
 	ASSERT(h);
@@ -8066,7 +7853,7 @@
 }
 
 /* init tx reported rate mechanism */
-void wlc_reprate_init(wlc_info_t *wlc)
+void wlc_reprate_init(struct wlc_info *wlc)
 {
 	int i;
 	wlc_bsscfg_t *bsscfg;
@@ -8080,13 +7867,13 @@
 void wlc_bsscfg_reprate_init(wlc_bsscfg_t *bsscfg)
 {
 	bsscfg->txrspecidx = 0;
-	bzero((char *)bsscfg->txrspec, sizeof(bsscfg->txrspec));
+	memset((char *)bsscfg->txrspec, 0, sizeof(bsscfg->txrspec));
 }
 
 /* Retrieve a consolidated set of revision information,
  * typically for the WLC_GET_REVINFO ioctl
  */
-int wlc_get_revision_info(wlc_info_t *wlc, void *buf, uint len)
+int wlc_get_revision_info(struct wlc_info *wlc, void *buf, uint len)
 {
 	wlc_rev_info_t *rinfo = (wlc_rev_info_t *) buf;
 
@@ -8120,7 +7907,7 @@
 	return BCME_OK;
 }
 
-void wlc_default_rateset(wlc_info_t *wlc, wlc_rateset_t *rs)
+void wlc_default_rateset(struct wlc_info *wlc, wlc_rateset_t *rs)
 {
 	wlc_rateset_default(rs, NULL, wlc->band->phytype, wlc->band->bandtype,
 			    false, RATE_MASK_FULL, (bool) N_ENAB(wlc->pub),
@@ -8128,14 +7915,14 @@
 			    wlc->stf->txstreams);
 }
 
-static void wlc_bss_default_init(wlc_info_t *wlc)
+static void wlc_bss_default_init(struct wlc_info *wlc)
 {
 	chanspec_t chanspec;
-	wlcband_t *band;
+	struct wlcband *band;
 	wlc_bss_info_t *bi = wlc->default_bss;
 
 	/* init default and target BSS with some sane initial values */
-	bzero((char *)(bi), sizeof(wlc_bss_info_t));
+	memset((char *)(bi), 0, sizeof(wlc_bss_info_t));
 	bi->beacon_period = ISSIM_ENAB(wlc->pub->sih) ? BEACON_INTERVAL_DEF_QT :
 	    BEACON_INTERVAL_DEFAULT;
 	bi->dtim_period = ISSIM_ENAB(wlc->pub->sih) ? DTIM_INTERVAL_DEF_QT :
@@ -8166,7 +7953,7 @@
 /* Deferred event processing */
 static void wlc_process_eventq(void *arg)
 {
-	wlc_info_t *wlc = (wlc_info_t *) arg;
+	struct wlc_info *wlc = (struct wlc_info *) arg;
 	wlc_event_t *etmp;
 
 	while ((etmp = wlc_eventq_deq(wlc->eventq))) {
@@ -8192,7 +7979,8 @@
 }
 
 static ratespec_t
-mac80211_wlc_set_nrate(wlc_info_t *wlc, wlcband_t *cur_band, u32 int_val)
+mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
+		       u32 int_val)
 {
 	u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
 	u8 rate = int_val & NRATE_RATE_MASK;
@@ -8211,8 +7999,8 @@
 	if (N_ENAB(wlc->pub) && ismcs) {
 		/* mcs only allowed when nmode */
 		if (stf > PHY_TXC1_MODE_SDM) {
-			WL_ERROR(("wl%d: %s: Invalid stf\n", WLCWLUNIT(wlc),
-				  __func__));
+			WL_ERROR("wl%d: %s: Invalid stf\n",
+				 WLCWLUNIT(wlc), __func__);
 			bcmerror = BCME_RANGE;
 			goto done;
 		}
@@ -8222,8 +8010,8 @@
 			if (!CHSPEC_IS40(wlc->home_chanspec) ||
 			    ((stf != PHY_TXC1_MODE_SISO)
 			     && (stf != PHY_TXC1_MODE_CDD))) {
-				WL_ERROR(("wl%d: %s: Invalid mcs 32\n",
-					  WLCWLUNIT(wlc), __func__));
+				WL_ERROR("wl%d: %s: Invalid mcs 32\n",
+					 WLCWLUNIT(wlc), __func__);
 				bcmerror = BCME_RANGE;
 				goto done;
 			}
@@ -8231,7 +8019,8 @@
 		} else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
 			/* mcs > 7 must use stf SDM */
 			if (stf != PHY_TXC1_MODE_SDM) {
-				WL_TRACE(("wl%d: %s: enabling SDM mode for mcs %d\n", WLCWLUNIT(wlc), __func__, rate));
+				WL_TRACE("wl%d: %s: enabling SDM mode for mcs %d\n",
+					 WLCWLUNIT(wlc), __func__, rate);
 				stf = PHY_TXC1_MODE_SDM;
 			}
 		} else {
@@ -8239,37 +8028,37 @@
 			if ((stf > PHY_TXC1_MODE_STBC) ||
 			    (!WLC_STBC_CAP_PHY(wlc)
 			     && (stf == PHY_TXC1_MODE_STBC))) {
-				WL_ERROR(("wl%d: %s: Invalid STBC\n",
-					  WLCWLUNIT(wlc), __func__));
+				WL_ERROR("wl%d: %s: Invalid STBC\n",
+					 WLCWLUNIT(wlc), __func__);
 				bcmerror = BCME_RANGE;
 				goto done;
 			}
 		}
 	} else if (IS_OFDM(rate)) {
 		if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
-			WL_ERROR(("wl%d: %s: Invalid OFDM\n", WLCWLUNIT(wlc),
-				  __func__));
+			WL_ERROR("wl%d: %s: Invalid OFDM\n",
+				 WLCWLUNIT(wlc), __func__);
 			bcmerror = BCME_RANGE;
 			goto done;
 		}
 	} else if (IS_CCK(rate)) {
 		if ((cur_band->bandtype != WLC_BAND_2G)
 		    || (stf != PHY_TXC1_MODE_SISO)) {
-			WL_ERROR(("wl%d: %s: Invalid CCK\n", WLCWLUNIT(wlc),
-				  __func__));
+			WL_ERROR("wl%d: %s: Invalid CCK\n",
+				 WLCWLUNIT(wlc), __func__);
 			bcmerror = BCME_RANGE;
 			goto done;
 		}
 	} else {
-		WL_ERROR(("wl%d: %s: Unknown rate type\n", WLCWLUNIT(wlc),
-			  __func__));
+		WL_ERROR("wl%d: %s: Unknown rate type\n",
+			 WLCWLUNIT(wlc), __func__);
 		bcmerror = BCME_RANGE;
 		goto done;
 	}
 	/* make sure multiple antennae are available for non-siso rates */
 	if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
-		WL_ERROR(("wl%d: %s: SISO antenna but !SISO request\n",
-			  WLCWLUNIT(wlc), __func__));
+		WL_ERROR("wl%d: %s: SISO antenna but !SISO request\n",
+			 WLCWLUNIT(wlc), __func__);
 		bcmerror = BCME_RANGE;
 		goto done;
 	}
@@ -8300,13 +8089,13 @@
 
 	return rspec;
  done:
-	WL_ERROR(("Hoark\n"));
+	WL_ERROR("Hoark\n");
 	return rate;
 }
 
 /* formula:  IDLE_BUSY_RATIO_X_16 = (100-duty_cycle)/duty_cycle*16 */
 static int
-wlc_duty_cycle_set(wlc_info_t *wlc, int duty_cycle, bool isOFDM,
+wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
 		   bool writeToShm)
 {
 	int idle_busy_ratio_x_16 = 0;
@@ -8314,8 +8103,7 @@
 	    isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
 	    M_TX_IDLE_BUSY_RATIO_X_16_CCK;
 	if (duty_cycle > 100 || duty_cycle < 0) {
-		WL_ERROR(("wl%d:  duty cycle value off limit\n",
-			  wlc->pub->unit));
+		WL_ERROR("wl%d:  duty cycle value off limit\n", wlc->pub->unit);
 		return BCME_RANGE;
 	}
 	if (duty_cycle)
@@ -8335,7 +8123,7 @@
 /* Read a single u16 from shared memory.
  * SHM 'offset' needs to be an even address
  */
-u16 wlc_read_shm(wlc_info_t *wlc, uint offset)
+u16 wlc_read_shm(struct wlc_info *wlc, uint offset)
 {
 	return wlc_bmac_read_shm(wlc->hw, offset);
 }
@@ -8343,7 +8131,7 @@
 /* Write a single u16 to shared memory.
  * SHM 'offset' needs to be an even address
  */
-void wlc_write_shm(wlc_info_t *wlc, uint offset, u16 v)
+void wlc_write_shm(struct wlc_info *wlc, uint offset, u16 v)
 {
 	wlc_bmac_write_shm(wlc->hw, offset, v);
 }
@@ -8352,7 +8140,7 @@
  * SHM 'offset' needs to be an even address and
  * Range length 'len' must be an even number of bytes
  */
-void wlc_set_shm(wlc_info_t *wlc, uint offset, u16 v, int len)
+void wlc_set_shm(struct wlc_info *wlc, uint offset, u16 v, int len)
 {
 	/* offset and len need to be even */
 	ASSERT((offset & 1) == 0);
@@ -8368,7 +8156,7 @@
  * SHM 'offset' needs to be an even address and
  * Buffer length 'len' must be an even number of bytes
  */
-void wlc_copyto_shm(wlc_info_t *wlc, uint offset, const void *buf, int len)
+void wlc_copyto_shm(struct wlc_info *wlc, uint offset, const void *buf, int len)
 {
 	/* offset and len need to be even */
 	ASSERT((offset & 1) == 0);
@@ -8384,7 +8172,7 @@
  * SHM 'offset' needs to be an even address and
  * Buffer length 'len' must be an even number of bytes
  */
-void wlc_copyfrom_shm(wlc_info_t *wlc, uint offset, void *buf, int len)
+void wlc_copyfrom_shm(struct wlc_info *wlc, uint offset, void *buf, int len)
 {
 	/* offset and len need to be even */
 	ASSERT((offset & 1) == 0);
@@ -8397,71 +8185,73 @@
 }
 
 /* wrapper BMAC functions to for HIGH driver access */
-void wlc_mctrl(wlc_info_t *wlc, u32 mask, u32 val)
+void wlc_mctrl(struct wlc_info *wlc, u32 mask, u32 val)
 {
 	wlc_bmac_mctrl(wlc->hw, mask, val);
 }
 
-void wlc_corereset(wlc_info_t *wlc, u32 flags)
+void wlc_corereset(struct wlc_info *wlc, u32 flags)
 {
 	wlc_bmac_corereset(wlc->hw, flags);
 }
 
-void wlc_mhf(wlc_info_t *wlc, u8 idx, u16 mask, u16 val, int bands)
+void wlc_mhf(struct wlc_info *wlc, u8 idx, u16 mask, u16 val, int bands)
 {
 	wlc_bmac_mhf(wlc->hw, idx, mask, val, bands);
 }
 
-u16 wlc_mhf_get(wlc_info_t *wlc, u8 idx, int bands)
+u16 wlc_mhf_get(struct wlc_info *wlc, u8 idx, int bands)
 {
 	return wlc_bmac_mhf_get(wlc->hw, idx, bands);
 }
 
-int wlc_xmtfifo_sz_get(wlc_info_t *wlc, uint fifo, uint *blocks)
+int wlc_xmtfifo_sz_get(struct wlc_info *wlc, uint fifo, uint *blocks)
 {
 	return wlc_bmac_xmtfifo_sz_get(wlc->hw, fifo, blocks);
 }
 
-void wlc_write_template_ram(wlc_info_t *wlc, int offset, int len, void *buf)
+void wlc_write_template_ram(struct wlc_info *wlc, int offset, int len,
+			    void *buf)
 {
 	wlc_bmac_write_template_ram(wlc->hw, offset, len, buf);
 }
 
-void wlc_write_hw_bcntemplates(wlc_info_t *wlc, void *bcn, int len, bool both)
+void wlc_write_hw_bcntemplates(struct wlc_info *wlc, void *bcn, int len,
+			       bool both)
 {
 	wlc_bmac_write_hw_bcntemplates(wlc->hw, bcn, len, both);
 }
 
 void
-wlc_set_addrmatch(wlc_info_t *wlc, int match_reg_offset,
+wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
 		  const struct ether_addr *addr)
 {
 	wlc_bmac_set_addrmatch(wlc->hw, match_reg_offset, addr);
 }
 
-void wlc_set_rcmta(wlc_info_t *wlc, int idx, const struct ether_addr *addr)
+void wlc_set_rcmta(struct wlc_info *wlc, int idx, const struct ether_addr *addr)
 {
 	wlc_bmac_set_rcmta(wlc->hw, idx, addr);
 }
 
-void wlc_read_tsf(wlc_info_t *wlc, u32 *tsf_l_ptr, u32 *tsf_h_ptr)
+void wlc_read_tsf(struct wlc_info *wlc, u32 *tsf_l_ptr, u32 *tsf_h_ptr)
 {
 	wlc_bmac_read_tsf(wlc->hw, tsf_l_ptr, tsf_h_ptr);
 }
 
-void wlc_set_cwmin(wlc_info_t *wlc, u16 newmin)
+void wlc_set_cwmin(struct wlc_info *wlc, u16 newmin)
 {
 	wlc->band->CWmin = newmin;
 	wlc_bmac_set_cwmin(wlc->hw, newmin);
 }
 
-void wlc_set_cwmax(wlc_info_t *wlc, u16 newmax)
+void wlc_set_cwmax(struct wlc_info *wlc, u16 newmax)
 {
 	wlc->band->CWmax = newmax;
 	wlc_bmac_set_cwmax(wlc->hw, newmax);
 }
 
-void wlc_fifoerrors(wlc_info_t *wlc)
+void wlc_fifoerrors(struct wlc_info *wlc)
 {
 
 	wlc_bmac_fifoerrors(wlc->hw);
@@ -8469,19 +8259,16 @@
 
 /* Search mem rw utilities */
 
-void wlc_pllreq(wlc_info_t *wlc, bool set, mbool req_bit)
+void wlc_pllreq(struct wlc_info *wlc, bool set, mbool req_bit)
 {
 	wlc_bmac_pllreq(wlc->hw, set, req_bit);
 }
 
-void wlc_reset_bmac_done(wlc_info_t *wlc)
+void wlc_reset_bmac_done(struct wlc_info *wlc)
 {
-#ifdef WLC_HIGH_ONLY
-	wlc->reset_bmac_pending = false;
-#endif
 }
 
-void wlc_ht_mimops_cap_update(wlc_info_t *wlc, u8 mimops_mode)
+void wlc_ht_mimops_cap_update(struct wlc_info *wlc, u8 mimops_mode)
 {
 	wlc->ht_cap.cap &= ~HT_CAP_MIMO_PS_MASK;
 	wlc->ht_cap.cap |= (mimops_mode << HT_CAP_MIMO_PS_SHIFT);
@@ -8494,7 +8281,7 @@
 
 /* check for the particular priority flow control bit being set */
 bool
-wlc_txflowcontrol_prio_isset(wlc_info_t *wlc, wlc_txq_info_t *q, int prio)
+wlc_txflowcontrol_prio_isset(struct wlc_info *wlc, wlc_txq_info_t *q, int prio)
 {
 	uint prio_mask;
 
@@ -8509,12 +8296,13 @@
 }
 
 /* propogate the flow control to all interfaces using the given tx queue */
-void wlc_txflowcontrol(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on, int prio)
+void wlc_txflowcontrol(struct wlc_info *wlc, wlc_txq_info_t *qi,
+		       bool on, int prio)
 {
 	uint prio_bits;
 	uint cur_bits;
 
-	WL_ERROR(("%s: flow contro kicks in\n", __func__));
+	WL_ERROR("%s: flow control kicks in\n", __func__);
 
 	if (prio == ALLPRIO) {
 		prio_bits = TXQ_STOP_FOR_PRIOFC_MASK;
@@ -8551,7 +8339,7 @@
 }
 
 void
-wlc_txflowcontrol_override(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
+wlc_txflowcontrol_override(struct wlc_info *wlc, wlc_txq_info_t *qi, bool on,
 			   uint override)
 {
 	uint prev_override;
@@ -8598,7 +8386,7 @@
 	}
 }
 
-static void wlc_txflowcontrol_reset(wlc_info_t *wlc)
+static void wlc_txflowcontrol_reset(struct wlc_info *wlc)
 {
 	wlc_txq_info_t *qi;
 
@@ -8611,10 +8399,10 @@
 }
 
 static void
-wlc_txflowcontrol_signal(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
+wlc_txflowcontrol_signal(struct wlc_info *wlc, wlc_txq_info_t *qi, bool on,
 			 int prio)
 {
-	wlc_if_t *wlcif;
+	struct wlc_if *wlcif;
 
 	for (wlcif = wlc->wlcif_list; wlcif != NULL; wlcif = wlcif->next) {
 		if (wlcif->qi == qi && wlcif->flags & WLC_IF_LINKED)
@@ -8622,7 +8410,7 @@
 	}
 }
 
-static wlc_txq_info_t *wlc_txq_alloc(wlc_info_t *wlc, osl_t *osh)
+static wlc_txq_info_t *wlc_txq_alloc(struct wlc_info *wlc, struct osl_info *osh)
 {
 	wlc_txq_info_t *qi, *p;
 
@@ -8652,7 +8440,8 @@
 	return qi;
 }
 
-static void wlc_txq_free(wlc_info_t *wlc, osl_t *osh, wlc_txq_info_t *qi)
+static void wlc_txq_free(struct wlc_info *wlc, struct osl_info *osh,
+			 wlc_txq_info_t *qi)
 {
 	wlc_txq_info_t *p;
 
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.h b/drivers/staging/brcm80211/sys/wlc_mac80211.h
index 6a77591..5df996b 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.h
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.h
@@ -17,19 +17,10 @@
 #ifndef _wlc_h_
 #define _wlc_h_
 
-#include <wlc_types.h>
-
-#include <wl_dbg.h>
 #include <wlioctl.h>
-#include <wlc_event.h>
 #include <wlc_phy_hal.h>
 #include <wlc_channel.h>
-#ifdef WLC_SPLIT
-#include <bcm_rpc.h>
-#endif
-
 #include <wlc_bsscfg.h>
-
 #include <wlc_scb.h>
 
 #define MA_WINDOW_SZ		8	/* moving average window size */
@@ -220,15 +211,11 @@
  * (some platforms return all 0).
  * If clocks are present, call the sb routine which will figure out if the device is removed.
  */
-#ifdef WLC_HIGH_ONLY
-#define DEVICEREMOVED(wlc)	(!wlc->device_present)
-#else
 #define DEVICEREMOVED(wlc)      \
 	((wlc->hw->clk) ?   \
 	((R_REG(wlc->hw->osh, &wlc->hw->regs->maccontrol) & \
 	(MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN) : \
 	(si_deviceremoved(wlc->hw->sih)))
-#endif				/* WLC_HIGH_ONLY */
 
 #define WLCWLUNIT(wlc)		((wlc)->pub->unit)
 
@@ -315,22 +302,20 @@
 /*
  * core state (mac)
  */
-typedef struct wlccore {
-#ifdef WLC_LOW
+struct wlccore {
 	uint coreidx;		/* # sb enumerated core */
 
 	/* fifo */
 	uint *txavail[NFIFO];	/* # tx descriptors available */
 	s16 txpktpend[NFIFO];	/* tx admission control */
-#endif				/* WLC_LOW */
 
 	macstat_t *macstat_snapshot;	/* mac hw prev read values */
-} wlccore_t;
+};
 
 /*
  * band state (phy+ana+radio)
  */
-typedef struct wlcband {
+struct wlcband {
 	int bandtype;		/* WLC_BAND_2G, WLC_BAND_5G */
 	uint bandunit;		/* bandstate[] index */
 
@@ -359,13 +344,13 @@
 	u16 CWmin;		/* The minimum size of contention window, in unit of aSlotTime */
 	u16 CWmax;		/* The maximum size of contention window, in unit of aSlotTime */
 	u16 bcntsfoff;	/* beacon tsf offset */
-} wlcband_t;
+};
 
 /* generic function callback takes just one arg */
 typedef void (*cb_fn_t) (void *);
 
 /* tx completion callback takes 3 args */
-typedef void (*pkcb_fn_t) (wlc_info_t *wlc, uint txstatus, void *arg);
+typedef void (*pkcb_fn_t) (struct wlc_info *wlc, uint txstatus, void *arg);
 
 typedef struct pkt_cb {
 	pkcb_fn_t fn;		/* function to call when tx frame completes */
@@ -398,14 +383,14 @@
 
 /* virtual interface */
 struct wlc_if {
-	wlc_if_t *next;
+	struct wlc_if *next;
 	u8 type;		/* WLC_IFTYPE_BSS or WLC_IFTYPE_WDS */
 	u8 index;		/* assigned in wl_add_if(), index of the wlif if any,
 				 * not necessarily corresponding to bsscfg._idx or
 				 * AID2PVBMAP(scb).
 				 */
 	u8 flags;		/* flags for the interface */
-	wl_if_t *wlif;		/* pointer to wlif */
+	struct wl_if *wlif;		/* pointer to wlif */
 	struct wlc_txq_info *qi;	/* pointer to associated tx queue */
 	union {
 		struct scb *scb;	/* pointer to scb if WLC_IFTYPE_WDS */
@@ -416,7 +401,6 @@
 /* flags for the interface */
 #define WLC_IF_LINKED		0x02	/* this interface is linked to a wl_if */
 
-#ifdef WLC_LOW
 typedef struct wlc_hwband {
 	int bandtype;		/* WLC_BAND_2G, WLC_BAND_5G */
 	uint bandunit;		/* bandstate[] index */
@@ -433,20 +417,15 @@
 	wlc_phy_t *pi;		/* pointer to phy specific information */
 	bool abgphy_encore;
 } wlc_hwband_t;
-#endif				/* WLC_LOW */
 
 struct wlc_hw_info {
-#ifdef WLC_SPLIT
-	rpc_info_t *rpc;	/* Handle to RPC module */
-#endif
-	osl_t *osh;		/* pointer to os handle */
+	struct osl_info *osh;		/* pointer to os handle */
 	bool _piomode;		/* true if pio mode */
-	wlc_info_t *wlc;
+	struct wlc_info *wlc;
 
 	/* fifo */
-	hnddma_t *di[NFIFO];	/* hnddma handles, per fifo */
+	struct hnddma_pub *di[NFIFO];	/* hnddma handles, per fifo */
 
-#ifdef WLC_LOW
 	uint unit;		/* device instance number */
 
 	/* version info */
@@ -497,31 +476,21 @@
 	bool forcefastclk;	/* true if the h/w is forcing the use of fast clk */
 	bool clk;		/* core is out of reset and has clock */
 	bool sbclk;		/* sb has clock */
-	bmac_pmq_t *bmac_pmq;	/*  bmac PM states derived from ucode PMQ */
+	struct bmac_pmq *bmac_pmq; /*  bmac PM states derived from ucode PMQ */
 	bool phyclk;		/* phy is out of reset and has clock */
 	bool dma_lpbk;		/* core is in DMA loopback */
 
-#ifdef BCMSDIO
-	void *sdh;
-#endif
 	bool ucode_loaded;	/* true after ucode downloaded */
 
-#ifdef WLC_LOW_ONLY
-	struct wl_timer *wdtimer;	/* timer for watchdog routine */
-	struct ether_addr orig_etheraddr;	/* original hw ethernet address */
-	u16 rpc_dngl_agg;	/* rpc agg control for dongle */
-	u32 mem_required_def;	/* memory required to replenish RX DMA ring */
-	u32 mem_required_lower;	/* memory required with lower RX bound */
-	u32 mem_required_least;	/* minimum memory requirement to handle RX */
-
-#endif				/* WLC_LOW_ONLY */
 
 	u8 hw_stf_ss_opmode;	/* STF single stream operation mode */
 	u8 antsel_type;	/* Type of boardlevel mimo antenna switch-logic
 				 * 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
 				 */
-	u32 antsel_avail;	/* put antsel_info_t here if more info is needed */
-#endif				/* WLC_LOW */
+	u32 antsel_avail;	/*
+				 * put struct antsel_info here if more info is
+				 * needed
+				 */
 };
 
 /* TX Queue information
@@ -541,15 +510,12 @@
  * Principal common (os-independent) software data structure.
  */
 struct wlc_info {
-	wlc_pub_t *pub;		/* pointer to wlc public state */
-	osl_t *osh;		/* pointer to os handle */
+	struct wlc_pub *pub;		/* pointer to wlc public state */
+	struct osl_info *osh;		/* pointer to os handle */
 	struct wl_info *wl;	/* pointer to os-specific private state */
 	d11regs_t *regs;	/* pointer to device registers */
 
-	wlc_hw_info_t *hw;	/* HW related state used primarily by BMAC */
-#ifdef WLC_SPLIT
-	rpc_info_t *rpc;	/* Handle to RPC module */
-#endif
+	struct wlc_hw_info *hw;	/* HW related state used primarily by BMAC */
 
 	/* clock */
 	int clkreq_override;	/* setting for clkreq for PCIE : Auto, 0, 1 */
@@ -566,10 +532,11 @@
 	bool clk;		/* core is out of reset and has clock */
 
 	/* multiband */
-	wlccore_t *core;	/* pointer to active io core */
-	wlcband_t *band;	/* pointer to active per-band state */
-	wlccore_t *corestate;	/* per-core state (one per hw core) */
-	wlcband_t *bandstate[MAXBANDS];	/* per-band state (one per phy/radio) */
+	struct wlccore *core;	/* pointer to active io core */
+	struct wlcband *band;	/* pointer to active per-band state */
+	struct wlccore *corestate;	/* per-core state (one per hw core) */
+	/* per-band state (one per phy/radio): */
+	struct wlcband *bandstate[MAXBANDS];
 
 	bool war16165;		/* PCI slow clock 16165 war flag */
 
@@ -584,15 +551,9 @@
 	s8 txpwr_local_max;	/* regulatory local txpwr max */
 	u8 txpwr_local_constraint;	/* local power contraint in dB */
 
-#ifdef WLC_HIGH_ONLY
-	rpctx_info_t *rpctx;	/* RPC TX module */
-	bool reset_bmac_pending;	/* bmac reset is in progressing */
-	u32 rpc_agg;		/* host agg: bit 16-31, bmac agg: bit 0-15 */
-	u32 rpc_msglevel;	/* host rpc: bit 16-31, bmac rpc: bit 0-15 */
-#endif
 
-	ampdu_info_t *ampdu;	/* ampdu module handler */
-	antsel_info_t *asi;	/* antsel module handler */
+	struct ampdu_info *ampdu;	/* ampdu module handler */
+	struct antsel_info *asi;	/* antsel module handler */
 	wlc_cm_info_t *cmi;	/* channel manager module handler */
 
 	void *btparam;		/* bus type specific cookie */
@@ -792,8 +753,6 @@
 	ac_bitmap_t apsd_trigger_ac;	/* Permissible Acess Category in which APSD Null
 					 * Trigger frames can be send
 					 */
-	wlc_ap_info_t *ap;
-
 	u8 htphy_membership;	/* HT PHY membership */
 
 	bool _regulatory_domain;	/* 802.11d enabled? */
@@ -811,7 +770,7 @@
 
 	u16 next_bsscfg_ID;
 
-	wlc_if_t *wlcif_list;	/* linked list of wlc_if structs */
+	struct wlc_if *wlcif_list;	/* linked list of wlc_if structs */
 	wlc_txq_info_t *active_queue;	/* txq for the currently active transmit context */
 	u32 mpc_dur;		/* total time (ms) in mpc mode except for the
 				 * portion since radio is turned off last time
@@ -825,8 +784,8 @@
 
 /* antsel module specific state */
 struct antsel_info {
-	wlc_info_t *wlc;	/* pointer to main wlc structure */
-	wlc_pub_t *pub;		/* pointer to public fn */
+	struct wlc_info *wlc;	/* pointer to main wlc structure */
+	struct wlc_pub *pub;		/* pointer to public fn */
 	u8 antsel_type;	/* Type of boardlevel mimo antenna switch-logic
 				 * 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
 				 */
@@ -842,23 +801,9 @@
 #define IS_MBAND_UNLOCKED(wlc) \
 	((NBANDS(wlc) > 1) && !(wlc)->bandlocked)
 
-#ifdef WLC_LOW
 #define WLC_BAND_PI_RADIO_CHANSPEC wlc_phy_chanspec_get(wlc->band->pi)
-#else
-#define WLC_BAND_PI_RADIO_CHANSPEC (wlc->chanspec)
-#endif
 
 /* sum the individual fifo tx pending packet counts */
-#if defined(WLC_HIGH_ONLY)
-#define TXPKTPENDTOT(wlc)		(wlc_rpctx_txpktpend((wlc)->rpctx, 0, true))
-#define TXPKTPENDGET(wlc, fifo)		(wlc_rpctx_txpktpend((wlc)->rpctx, (fifo), false))
-#define TXPKTPENDINC(wlc, fifo, val)	(wlc_rpctx_txpktpendinc((wlc)->rpctx, (fifo), (val)))
-#define TXPKTPENDDEC(wlc, fifo, val)	(wlc_rpctx_txpktpenddec((wlc)->rpctx, (fifo), (val)))
-#define TXPKTPENDCLR(wlc, fifo)		(wlc_rpctx_txpktpendclr((wlc)->rpctx, (fifo)))
-#define TXAVAIL(wlc, fifo)		(wlc_rpctx_txavail((wlc)->rpctx, (fifo)))
-#define GETNEXTTXP(wlc, _queue)		(wlc_rpctx_getnexttxp((wlc)->rpctx, (_queue)))
-
-#else
 #define	TXPKTPENDTOT(wlc) ((wlc)->core->txpktpend[0] + (wlc)->core->txpktpend[1] + \
 	(wlc)->core->txpktpend[2] + (wlc)->core->txpktpend[3])
 #define TXPKTPENDGET(wlc, fifo)		((wlc)->core->txpktpend[(fifo)])
@@ -868,48 +813,49 @@
 #define TXAVAIL(wlc, fifo)		(*(wlc)->core->txavail[(fifo)])
 #define GETNEXTTXP(wlc, _queue)								\
 		dma_getnexttxp((wlc)->hw->di[(_queue)], HNDDMA_RANGE_TRANSMITTED)
-#endif				/* WLC_HIGH_ONLY */
 
 #define WLC_IS_MATCH_SSID(wlc, ssid1, ssid2, len1, len2) \
-	((len1 == len2) && !bcmp(ssid1, ssid2, len1))
+	((len1 == len2) && !memcmp(ssid1, ssid2, len1))
 
-/* API shared by both WLC_HIGH and WLC_LOW driver */
-extern void wlc_high_dpc(wlc_info_t *wlc, u32 macintstatus);
-extern void wlc_fatal_error(wlc_info_t *wlc);
-extern void wlc_bmac_rpc_watchdog(wlc_info_t *wlc);
-extern void wlc_recv(wlc_info_t *wlc, void *p);
-extern bool wlc_dotxstatus(wlc_info_t *wlc, tx_status_t *txs, u32 frm_tx2);
-extern void wlc_txfifo(wlc_info_t *wlc, uint fifo, void *p, bool commit,
-		       s8 txpktpend);
-extern void wlc_txfifo_complete(wlc_info_t *wlc, uint fifo, s8 txpktpend);
-extern void wlc_info_init(wlc_info_t *wlc, int unit);
+extern void wlc_high_dpc(struct wlc_info *wlc, u32 macintstatus);
+extern void wlc_fatal_error(struct wlc_info *wlc);
+extern void wlc_bmac_rpc_watchdog(struct wlc_info *wlc);
+extern void wlc_recv(struct wlc_info *wlc, struct sk_buff *p);
+extern bool wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2);
+extern void wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p,
+		       bool commit, s8 txpktpend);
+extern void wlc_txfifo_complete(struct wlc_info *wlc, uint fifo, s8 txpktpend);
+extern void wlc_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
+			uint prec);
+extern void wlc_info_init(struct wlc_info *wlc, int unit);
 extern void wlc_print_txstatus(tx_status_t *txs);
-extern int wlc_xmtfifo_sz_get(wlc_info_t *wlc, uint fifo, uint *blocks);
-extern void wlc_write_template_ram(wlc_info_t *wlc, int offset, int len,
+extern int wlc_xmtfifo_sz_get(struct wlc_info *wlc, uint fifo, uint *blocks);
+extern void wlc_write_template_ram(struct wlc_info *wlc, int offset, int len,
 				   void *buf);
-extern void wlc_write_hw_bcntemplates(wlc_info_t *wlc, void *bcn, int len,
+extern void wlc_write_hw_bcntemplates(struct wlc_info *wlc, void *bcn, int len,
 				      bool both);
 #if defined(BCMDBG)
-extern void wlc_get_rcmta(wlc_info_t *wlc, int idx, struct ether_addr *addr);
+extern void wlc_get_rcmta(struct wlc_info *wlc, int idx,
+			  struct ether_addr *addr);
 #endif
-extern void wlc_set_rcmta(wlc_info_t *wlc, int idx,
+extern void wlc_set_rcmta(struct wlc_info *wlc, int idx,
 			  const struct ether_addr *addr);
-extern void wlc_set_addrmatch(wlc_info_t *wlc, int match_reg_offset,
+extern void wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
 			      const struct ether_addr *addr);
-extern void wlc_read_tsf(wlc_info_t *wlc, u32 *tsf_l_ptr,
+extern void wlc_read_tsf(struct wlc_info *wlc, u32 *tsf_l_ptr,
 			 u32 *tsf_h_ptr);
-extern void wlc_set_cwmin(wlc_info_t *wlc, u16 newmin);
-extern void wlc_set_cwmax(wlc_info_t *wlc, u16 newmax);
-extern void wlc_fifoerrors(wlc_info_t *wlc);
-extern void wlc_pllreq(wlc_info_t *wlc, bool set, mbool req_bit);
-extern void wlc_reset_bmac_done(wlc_info_t *wlc);
-extern void wlc_protection_upd(wlc_info_t *wlc, uint idx, int val);
-extern void wlc_hwtimer_gptimer_set(wlc_info_t *wlc, uint us);
-extern void wlc_hwtimer_gptimer_abort(wlc_info_t *wlc);
+extern void wlc_set_cwmin(struct wlc_info *wlc, u16 newmin);
+extern void wlc_set_cwmax(struct wlc_info *wlc, u16 newmax);
+extern void wlc_fifoerrors(struct wlc_info *wlc);
+extern void wlc_pllreq(struct wlc_info *wlc, bool set, mbool req_bit);
+extern void wlc_reset_bmac_done(struct wlc_info *wlc);
+extern void wlc_protection_upd(struct wlc_info *wlc, uint idx, int val);
+extern void wlc_hwtimer_gptimer_set(struct wlc_info *wlc, uint us);
+extern void wlc_hwtimer_gptimer_abort(struct wlc_info *wlc);
 
 #if defined(BCMDBG)
 extern void wlc_print_rxh(d11rxhdr_t *rxh);
-extern void wlc_print_hdrs(wlc_info_t *wlc, const char *prefix, u8 *frame,
+extern void wlc_print_hdrs(struct wlc_info *wlc, const char *prefix, u8 *frame,
 			   d11txh_t *txh, d11rxhdr_t *rxh, uint len);
 extern void wlc_print_txdesc(d11txh_t *txh);
 #endif
@@ -917,124 +863,126 @@
 extern void wlc_print_dot11_mac_hdr(u8 *buf, int len);
 #endif
 
-#ifdef WLC_LOW
-extern void wlc_setxband(wlc_hw_info_t *wlc_hw, uint bandunit);
-extern void wlc_coredisable(wlc_hw_info_t *wlc_hw);
-#endif
+extern void wlc_setxband(struct wlc_hw_info *wlc_hw, uint bandunit);
+extern void wlc_coredisable(struct wlc_hw_info *wlc_hw);
 
-extern bool wlc_valid_rate(wlc_info_t *wlc, ratespec_t rate, int band,
+extern bool wlc_valid_rate(struct wlc_info *wlc, ratespec_t rate, int band,
 			   bool verbose);
-extern void wlc_ap_upd(wlc_info_t *wlc);
+extern void wlc_ap_upd(struct wlc_info *wlc);
 
 /* helper functions */
-extern void wlc_shm_ssid_upd(wlc_info_t *wlc, wlc_bsscfg_t *cfg);
-extern int wlc_set_gmode(wlc_info_t *wlc, u8 gmode, bool config);
+extern void wlc_shm_ssid_upd(struct wlc_info *wlc, wlc_bsscfg_t *cfg);
+extern int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config);
 
-extern void wlc_mac_bcn_promisc_change(wlc_info_t *wlc, bool promisc);
-extern void wlc_mac_bcn_promisc(wlc_info_t *wlc);
-extern void wlc_mac_promisc(wlc_info_t *wlc);
-extern void wlc_txflowcontrol(wlc_info_t *wlc, wlc_txq_info_t *qi, bool on,
+extern void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc);
+extern void wlc_mac_bcn_promisc(struct wlc_info *wlc);
+extern void wlc_mac_promisc(struct wlc_info *wlc);
+extern void wlc_txflowcontrol(struct wlc_info *wlc, wlc_txq_info_t *qi, bool on,
 			      int prio);
-extern void wlc_txflowcontrol_override(wlc_info_t *wlc, wlc_txq_info_t *qi,
+extern void wlc_txflowcontrol_override(struct wlc_info *wlc, wlc_txq_info_t *qi,
 				       bool on, uint override);
-extern bool wlc_txflowcontrol_prio_isset(wlc_info_t *wlc, wlc_txq_info_t *qi,
-					 int prio);
-extern void wlc_send_q(wlc_info_t *wlc, wlc_txq_info_t *qi);
-extern int wlc_prep_pdu(wlc_info_t *wlc, void *pdu, uint *fifo);
+extern bool wlc_txflowcontrol_prio_isset(struct wlc_info *wlc,
+					 wlc_txq_info_t *qi, int prio);
+extern void wlc_send_q(struct wlc_info *wlc, wlc_txq_info_t *qi);
+extern int wlc_prep_pdu(struct wlc_info *wlc, struct sk_buff *pdu, uint *fifo);
 
-extern u16 wlc_calc_lsig_len(wlc_info_t *wlc, ratespec_t ratespec,
+extern u16 wlc_calc_lsig_len(struct wlc_info *wlc, ratespec_t ratespec,
 				uint mac_len);
-extern ratespec_t wlc_rspec_to_rts_rspec(wlc_info_t *wlc, ratespec_t rspec,
+extern ratespec_t wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec,
 					 bool use_rspec, u16 mimo_ctlchbw);
-extern u16 wlc_compute_rtscts_dur(wlc_info_t *wlc, bool cts_only,
+extern u16 wlc_compute_rtscts_dur(struct wlc_info *wlc, bool cts_only,
 				     ratespec_t rts_rate, ratespec_t frame_rate,
 				     u8 rts_preamble_type,
 				     u8 frame_preamble_type, uint frame_len,
 				     bool ba);
 
-extern void wlc_tbtt(wlc_info_t *wlc, d11regs_t *regs);
+extern void wlc_tbtt(struct wlc_info *wlc, d11regs_t *regs);
 
 #if defined(BCMDBG)
-extern void wlc_dump_ie(wlc_info_t *wlc, bcm_tlv_t *ie, struct bcmstrbuf *b);
+extern void wlc_dump_ie(struct wlc_info *wlc, bcm_tlv_t *ie,
+			struct bcmstrbuf *b);
 #endif
 
-extern bool wlc_ps_check(wlc_info_t *wlc);
-extern void wlc_reprate_init(wlc_info_t *wlc);
+extern bool wlc_ps_check(struct wlc_info *wlc);
+extern void wlc_reprate_init(struct wlc_info *wlc);
 extern void wlc_bsscfg_reprate_init(wlc_bsscfg_t *bsscfg);
 extern void wlc_uint64_sub(u32 *a_high, u32 *a_low, u32 b_high,
 			   u32 b_low);
 extern u32 wlc_calc_tbtt_offset(u32 bi, u32 tsf_h, u32 tsf_l);
 
 /* Shared memory access */
-extern void wlc_write_shm(wlc_info_t *wlc, uint offset, u16 v);
-extern u16 wlc_read_shm(wlc_info_t *wlc, uint offset);
-extern void wlc_set_shm(wlc_info_t *wlc, uint offset, u16 v, int len);
-extern void wlc_copyto_shm(wlc_info_t *wlc, uint offset, const void *buf,
+extern void wlc_write_shm(struct wlc_info *wlc, uint offset, u16 v);
+extern u16 wlc_read_shm(struct wlc_info *wlc, uint offset);
+extern void wlc_set_shm(struct wlc_info *wlc, uint offset, u16 v, int len);
+extern void wlc_copyto_shm(struct wlc_info *wlc, uint offset, const void *buf,
 			   int len);
-extern void wlc_copyfrom_shm(wlc_info_t *wlc, uint offset, void *buf, int len);
+extern void wlc_copyfrom_shm(struct wlc_info *wlc, uint offset, void *buf,
+			     int len);
 
-extern void wlc_update_beacon(wlc_info_t *wlc);
-extern void wlc_bss_update_beacon(wlc_info_t *wlc, struct wlc_bsscfg *bsscfg);
+extern void wlc_update_beacon(struct wlc_info *wlc);
+extern void wlc_bss_update_beacon(struct wlc_info *wlc,
+				  struct wlc_bsscfg *bsscfg);
 
-extern void wlc_update_probe_resp(wlc_info_t *wlc, bool suspend);
-extern void wlc_bss_update_probe_resp(wlc_info_t *wlc, wlc_bsscfg_t *cfg,
+extern void wlc_update_probe_resp(struct wlc_info *wlc, bool suspend);
+extern void wlc_bss_update_probe_resp(struct wlc_info *wlc, wlc_bsscfg_t *cfg,
 				      bool suspend);
 
-extern bool wlc_ismpc(wlc_info_t *wlc);
-extern bool wlc_is_non_delay_mpc(wlc_info_t *wlc);
-extern void wlc_radio_mpc_upd(wlc_info_t *wlc);
-extern bool wlc_prec_enq(wlc_info_t *wlc, struct pktq *q, void *pkt, int prec);
-extern bool wlc_prec_enq_head(wlc_info_t *wlc, struct pktq *q, void *pkt,
-			      int prec, bool head);
-extern u16 wlc_phytxctl1_calc(wlc_info_t *wlc, ratespec_t rspec);
-extern void wlc_compute_plcp(wlc_info_t *wlc, ratespec_t rate, uint length,
+extern bool wlc_ismpc(struct wlc_info *wlc);
+extern bool wlc_is_non_delay_mpc(struct wlc_info *wlc);
+extern void wlc_radio_mpc_upd(struct wlc_info *wlc);
+extern bool wlc_prec_enq(struct wlc_info *wlc, struct pktq *q, void *pkt,
+			 int prec);
+extern bool wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q,
+			      struct sk_buff *pkt, int prec, bool head);
+extern u16 wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec);
+extern void wlc_compute_plcp(struct wlc_info *wlc, ratespec_t rate, uint length,
 			     u8 *plcp);
-extern uint wlc_calc_frame_time(wlc_info_t *wlc, ratespec_t ratespec,
+extern uint wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec,
 				u8 preamble_type, uint mac_len);
 
-extern void wlc_set_chanspec(wlc_info_t *wlc, chanspec_t chanspec);
+extern void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec);
 
-extern bool wlc_timers_init(wlc_info_t *wlc, int unit);
+extern bool wlc_timers_init(struct wlc_info *wlc, int unit);
 
 extern const bcm_iovar_t wlc_iovars[];
 
 extern int wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
 		       const char *name, void *params, uint p_len, void *arg,
-		       int len, int val_size, wlc_if_t *wlcif);
+		       int len, int val_size, struct wlc_if *wlcif);
 
 #if defined(BCMDBG)
-extern void wlc_print_ies(wlc_info_t *wlc, u8 *ies, uint ies_len);
+extern void wlc_print_ies(struct wlc_info *wlc, u8 *ies, uint ies_len);
 #endif
 
-extern int wlc_set_nmode(wlc_info_t *wlc, s32 nmode);
-extern void wlc_ht_mimops_cap_update(wlc_info_t *wlc, u8 mimops_mode);
-extern void wlc_mimops_action_ht_send(wlc_info_t *wlc, wlc_bsscfg_t *bsscfg,
-				      u8 mimops_mode);
+extern int wlc_set_nmode(struct wlc_info *wlc, s32 nmode);
+extern void wlc_ht_mimops_cap_update(struct wlc_info *wlc, u8 mimops_mode);
+extern void wlc_mimops_action_ht_send(struct wlc_info *wlc,
+				      wlc_bsscfg_t *bsscfg, u8 mimops_mode);
 
-extern void wlc_switch_shortslot(wlc_info_t *wlc, bool shortslot);
+extern void wlc_switch_shortslot(struct wlc_info *wlc, bool shortslot);
 extern void wlc_set_bssid(wlc_bsscfg_t *cfg);
 extern void wlc_edcf_setparams(wlc_bsscfg_t *cfg, bool suspend);
-extern void wlc_wme_setparams(wlc_info_t *wlc, u16 aci, void *arg,
-			      bool suspend);
 
-extern void wlc_set_ratetable(wlc_info_t *wlc);
+extern void wlc_set_ratetable(struct wlc_info *wlc);
 extern int wlc_set_mac(wlc_bsscfg_t *cfg);
-extern void wlc_beacon_phytxctl_txant_upd(wlc_info_t *wlc,
+extern void wlc_beacon_phytxctl_txant_upd(struct wlc_info *wlc,
 					  ratespec_t bcn_rate);
-extern void wlc_mod_prb_rsp_rate_table(wlc_info_t *wlc, uint frame_len);
-extern ratespec_t wlc_lowest_basic_rspec(wlc_info_t *wlc, wlc_rateset_t *rs);
-extern u16 wlc_compute_bcntsfoff(wlc_info_t *wlc, ratespec_t rspec,
+extern void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len);
+extern ratespec_t wlc_lowest_basic_rspec(struct wlc_info *wlc,
+					 wlc_rateset_t *rs);
+extern u16 wlc_compute_bcntsfoff(struct wlc_info *wlc, ratespec_t rspec,
 				    bool short_preamble, bool phydelay);
-extern void wlc_radio_disable(wlc_info_t *wlc);
-extern void wlc_bcn_li_upd(wlc_info_t *wlc);
+extern void wlc_radio_disable(struct wlc_info *wlc);
+extern void wlc_bcn_li_upd(struct wlc_info *wlc);
 
-extern int wlc_get_revision_info(wlc_info_t *wlc, void *buf, uint len);
-extern void wlc_out(wlc_info_t *wlc);
-extern void wlc_set_home_chanspec(wlc_info_t *wlc, chanspec_t chanspec);
-extern void wlc_watchdog_upd(wlc_info_t *wlc, bool tbtt);
-extern bool wlc_ps_allowed(wlc_info_t *wlc);
-extern bool wlc_stay_awake(wlc_info_t *wlc);
-extern void wlc_wme_initparams_sta(wlc_info_t *wlc, wme_param_ie_t *pe);
+extern int wlc_get_revision_info(struct wlc_info *wlc, void *buf, uint len);
+extern void wlc_out(struct wlc_info *wlc);
+extern void wlc_set_home_chanspec(struct wlc_info *wlc, chanspec_t chanspec);
+extern void wlc_watchdog_upd(struct wlc_info *wlc, bool tbtt);
+extern bool wlc_ps_allowed(struct wlc_info *wlc);
+extern bool wlc_stay_awake(struct wlc_info *wlc);
+extern void wlc_wme_initparams_sta(struct wlc_info *wlc, wme_param_ie_t *pe);
 
-extern void wlc_bss_list_free(wlc_info_t *wlc, wlc_bss_list_t *bss_list);
+extern void wlc_bss_list_free(struct wlc_info *wlc, wlc_bss_list_t *bss_list);
+extern void wlc_ht_mimops_cap_update(struct wlc_info *wlc, u8 mimops_mode);
 #endif				/* _wlc_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_phy_shim.c b/drivers/staging/brcm80211/sys/wlc_phy_shim.c
index bf8e2e1..8bd4ede 100644
--- a/drivers/staging/brcm80211/sys/wlc_phy_shim.c
+++ b/drivers/staging/brcm80211/sys/wlc_phy_shim.c
@@ -24,9 +24,10 @@
 #include <linux/kernel.h>
 #include <bcmdefs.h>
 #include <wlc_cfg.h>
-#include <linuxver.h>
-#include <bcmutils.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <osl.h>
+#include <bcmutils.h>
 
 #include <proto/802.11.h>
 #include <bcmwifi.h>
@@ -46,6 +47,7 @@
 #include <wlc_channel.h>
 #include <bcmsrom.h>
 #include <wlc_key.h>
+#include <wlc_event.h>
 
 #include <wlc_mac80211.h>
 
@@ -53,21 +55,23 @@
 #include <wlc_phy_shim.h>
 #include <wlc_phy_hal.h>
 #include <wl_export.h>
+#include <wl_dbg.h>
 
 /* PHY SHIM module specific state */
 struct wlc_phy_shim_info {
-	wlc_hw_info_t *wlc_hw;	/* pointer to main wlc_hw structure */
+	struct wlc_hw_info *wlc_hw;	/* pointer to main wlc_hw structure */
 	void *wlc;		/* pointer to main wlc structure */
 	void *wl;		/* pointer to os-specific private state */
 };
 
-wlc_phy_shim_info_t *wlc_phy_shim_attach(wlc_hw_info_t *wlc_hw,
+wlc_phy_shim_info_t *wlc_phy_shim_attach(struct wlc_hw_info *wlc_hw,
 						       void *wl, void *wlc) {
 	wlc_phy_shim_info_t *physhim = NULL;
 
 	physhim = kzalloc(sizeof(wlc_phy_shim_info_t), GFP_ATOMIC);
 	if (!physhim) {
-		WL_ERROR(("wl%d: wlc_phy_shim_attach: out of mem\n", wlc_hw->unit));
+		WL_ERROR("wl%d: wlc_phy_shim_attach: out of mem\n",
+			 wlc_hw->unit);
 		return NULL;
 	}
 	physhim->wlc_hw = wlc_hw;
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
index a6a8c33..146a690 100644
--- a/drivers/staging/brcm80211/sys/wlc_pub.h
+++ b/drivers/staging/brcm80211/sys/wlc_pub.h
@@ -251,7 +251,7 @@
  * Public portion of "common" os-independent state structure.
  * The wlc handle points at this.
  */
-typedef struct wlc_pub {
+struct wlc_pub {
 	void *wlc;
 
 	struct ieee80211_hw *ieee_hw;
@@ -260,7 +260,7 @@
 	uint mac80211_state;
 	uint unit;		/* device instance number */
 	uint corerev;		/* core revision */
-	osl_t *osh;		/* pointer to os handle */
+	struct osl_info *osh;		/* pointer to os handle */
 	si_t *sih;		/* SB handle (cookie for siutils calls) */
 	char *vars;		/* "environment" name=value */
 	bool up;		/* interface up and running */
@@ -318,9 +318,6 @@
 				 * is implemented properly in osl of that port
 				 * when it enables this Power Save feature.
 				 */
-#ifdef BCMSDIO
-	uint sdiod_drive_strength;	/* SDIO drive strength */
-#endif				/* BCMSDIO */
 
 	u16 boardrev;	/* version # of particular board */
 	u8 sromrev;		/* version # of the srom */
@@ -333,7 +330,7 @@
 	bool _lmacproto;	/* lmac protocol module included and enabled */
 	bool phy_11ncapable;	/* the PHY/HW is capable of 802.11N */
 	bool _ampdumac;		/* mac assist ampdu enabled or not */
-} wlc_pub_t;
+};
 
 /* wl_monitor rx status per packet */
 typedef struct wl_rxsts {
@@ -437,17 +434,13 @@
 #define EDCF_ENAB(pub) (WME_ENAB(pub))
 #define QOS_ENAB(pub) (WME_ENAB(pub) || N_ENAB(pub))
 
-#define MONITOR_ENAB(wlc)	(bcmspace && (wlc)->monitor)
+#define MONITOR_ENAB(wlc)	((wlc)->monitor)
 
-#define PROMISC_ENAB(wlc)	(bcmspace && (wlc)->promisc)
-
-extern void wlc_pkttag_info_move(wlc_pub_t *pub, void *pkt_from, void *pkt_to);
-
-#define WLPKTTAGSCB(p) (WLPKTTAG(p)->_scb)
+#define PROMISC_ENAB(wlc)	((wlc)->promisc)
 
 #define	WLC_PREC_COUNT		16	/* Max precedence level implemented */
 
-/* pri is PKTPRIO encoded in the packet. This maps the Packet priority to
+/* pri is priority encoded in the packet. This maps the Packet priority to
  * enqueue precedence as defined in wlc_prec_map
  */
 extern const u8 wlc_prio2prec_map[];
@@ -497,8 +490,8 @@
 
 /* common functions for every port */
 extern void *wlc_attach(void *wl, u16 vendor, u16 device, uint unit,
-			bool piomode, osl_t *osh, void *regsva, uint bustype,
-			void *btparam, uint *perr);
+			bool piomode, struct osl_info *osh, void *regsva,
+			uint bustype, void *btparam, uint *perr);
 extern uint wlc_detach(struct wlc_info *wlc);
 extern int wlc_up(struct wlc_info *wlc);
 extern uint wlc_down(struct wlc_info *wlc);
@@ -517,8 +510,10 @@
 extern bool wlc_intrsupd(struct wlc_info *wlc);
 extern bool wlc_isr(struct wlc_info *wlc, bool *wantdpc);
 extern bool wlc_dpc(struct wlc_info *wlc, bool bounded);
-extern bool wlc_send80211_raw(struct wlc_info *wlc, wlc_if_t *wlcif, void *p,
-			      uint ac);
+extern bool wlc_send80211_raw(struct wlc_info *wlc, struct wlc_if *wlcif,
+			      void *p, uint ac);
+extern bool wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
+				 struct ieee80211_hw *hw);
 extern int wlc_iovar_op(struct wlc_info *wlc, const char *name, void *params,
 			int p_len, void *arg, int len, bool set,
 			struct wlc_if *wlcif);
@@ -527,8 +522,13 @@
 /* helper functions */
 extern void wlc_statsupd(struct wlc_info *wlc);
 extern int wlc_get_header_len(void);
+extern void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc);
+extern void wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
+			      const struct ether_addr *addr);
+extern void wlc_wme_setparams(struct wlc_info *wlc, u16 aci, void *arg,
+			      bool suspend);
 
-extern wlc_pub_t *wlc_pub(void *wlc);
+extern struct wlc_pub *wlc_pub(void *wlc);
 
 /* common functions for every port */
 extern int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw);
@@ -554,13 +554,15 @@
 /* ioctl */
 extern int wlc_iovar_gets8(struct wlc_info *wlc, const char *name,
 			     s8 *arg);
-extern int wlc_iovar_check(wlc_pub_t *pub, const bcm_iovar_t *vi, void *arg,
+extern int wlc_iovar_check(struct wlc_pub *pub, const bcm_iovar_t *vi,
+			   void *arg,
 			   int len, bool set);
 
-extern int wlc_module_register(wlc_pub_t *pub, const bcm_iovar_t *iovars,
+extern int wlc_module_register(struct wlc_pub *pub, const bcm_iovar_t *iovars,
 			       const char *name, void *hdl, iovar_fn_t iovar_fn,
 			       watchdog_fn_t watchdog_fn, down_fn_t down_fn);
-extern int wlc_module_unregister(wlc_pub_t *pub, const char *name, void *hdl);
+extern int wlc_module_unregister(struct wlc_pub *pub, const char *name,
+				 void *hdl);
 extern void wlc_event_if(struct wlc_info *wlc, struct wlc_bsscfg *cfg,
 			 wlc_event_t *e, const struct ether_addr *addr);
 extern void wlc_suspend_mac_and_wait(struct wlc_info *wlc);
@@ -617,10 +619,6 @@
 #define BAND_2G_NAME		"2.4G"
 #define BAND_5G_NAME		"5G"
 
-#if defined(BCMSDIO) || defined(WLC_HIGH_ONLY)
-void wlc_device_removed(void *arg);
-#endif
-
 /* BMAC RPC: 7 u32 params: pkttotlen, fifo, commit, fid, txpktpend, pktflag, rpc_id */
 #define WLC_RPCTX_PARAMS		32
 
diff --git a/drivers/staging/brcm80211/sys/wlc_rate.c b/drivers/staging/brcm80211/sys/wlc_rate.c
index d2d7256..ab7d0be 100644
--- a/drivers/staging/brcm80211/sys/wlc_rate.c
+++ b/drivers/staging/brcm80211/sys/wlc_rate.c
@@ -17,12 +17,14 @@
 #include <bcmdefs.h>
 #include <wlc_cfg.h>
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
 #include <bcmutils.h>
 #include <siutils.h>
 #include <bcmendian.h>
 #include <wlioctl.h>
 
+#include <sbhndpio.h>
+#include <sbhnddma.h>
 #include <proto/802.11.h>
 #include <d11.h>
 #include <wlc_rate.h>
@@ -297,7 +299,7 @@
 	uint count;
 	uint i;
 
-	bzero(rateset, sizeof(rateset));
+	memset(rateset, 0, sizeof(rateset));
 	count = rs->count;
 
 	for (i = 0; i < count; i++) {
diff --git a/drivers/staging/brcm80211/sys/wlc_rpc.h b/drivers/staging/brcm80211/sys/wlc_rpc.h
deleted file mode 100644
index db39645..0000000
--- a/drivers/staging/brcm80211/sys/wlc_rpc.h
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _WLC_RPC_H_
-#define _WLC_RPC_H_
-
-#include <wlc_types.h>
-
-/* RPC IDs, reordering is OK. This needs to be in sync with RPC_ID_TABLE below */
-typedef enum {
-	WLRPC_NULL_ID = 0,
-	WLRPC_WLC_REG_READ_ID,
-	WLRPC_WLC_REG_WRITE_ID,
-	WLRPC_WLC_MHF_SET_ID,
-	WLRPC_WLC_MHF_GET_ID,
-	WLRPC_WLC_BMAC_UP_PREP_ID,
-	WLRPC_WLC_BMAC_UP_FINISH_ID,
-	WLRPC_WLC_BMAC_DOWN_PREP_ID,
-	WLRPC_WLC_BMAC_DOWN_FINISH_ID,
-	WLRPC_WLC_BMAC_WRITE_HW_BCNTEMPLATES_ID,
-	WLRPC_WLC_BMAC_RESET_ID,
-	WLRPC_WLC_DNGL_REBOOT_ID,
-	WLRPC_WLC_BMAC_RPC_TXQ_WM_SET_ID,
-	WLRPC_WLC_BMAC_RPC_TXQ_WM_GET_ID,
-	WLRPC_WLC_BMAC_RPC_AGG_SET_ID,
-	WLRPC_WLC_BMAC_RPC_MSGLEVEL_SET_ID,
-	WLRPC_WLC_BMAC_RPC_AGG_LIMIT_SET_ID,
-	WLRPC_WLC_BMAC_RPC_AGG_LIMIT_GET_ID,
-	WLRPC_WLC_BMAC_INIT_ID,
-	WLRPC_WLC_BMAC_SET_CWMIN_ID,
-	WLRPC_WLC_BMAC_MUTE_ID,
-	WLRPC_WLC_PHY_DOIOVAR_ID,
-	WLRPC_WLC_PHY_HOLD_UPD_ID,
-	WLRPC_WLC_PHY_MUTE_UPD_ID,
-	WLRPC_WLC_PHY_CLEAR_TSSI_ID,
-	WLRPC_WLC_PHY_ANT_RXDIV_GET_ID,
-	WLRPC_WLC_PHY_ANT_RXDIV_SET_ID,
-	WLRPC_WLC_PHY_PREAMBLE_SET_ID,
-	WLRPC_WLC_PHY_FREQTRACK_END_ID,
-	WLRPC_WLC_PHY_FREQTRACK_START_ID,
-	WLRPC_WLC_PHY_IOCTL_ID,
-	WLRPC_WLC_PHY_NOISE_SAMPLE_REQUEST_ID,
-	WLRPC_WLC_PHY_CAL_PERICAL_ID,
-	WLRPC_WLC_PHY_TXPOWER_GET_ID,
-	WLRPC_WLC_PHY_TXPOWER_SET_ID,
-	WLRPC_WLC_PHY_TXPOWER_SROMLIMIT_ID,
-	WLRPC_WLC_PHY_RADAR_DETECT_ENABLE_ID,
-	WLRPC_WLC_PHY_RADAR_DETECT_RUN_ID,
-	WLRPC_WLC_PHY_TEST_ISON_ID,
-	WLRPC_WLC_BMAC_COPYFROM_OBJMEM_ID,
-	WLRPC_WLC_BMAC_COPYTO_OBJMEM_ID,
-	WLRPC_WLC_ENABLE_MAC_ID,
-	WLRPC_WLC_MCTRL_ID,
-	WLRPC_WLC_CORERESET_ID,
-	WLRPC_WLC_BMAC_READ_SHM_ID,
-	WLRPC_WLC_BMAC_READ_TSF_ID,
-	WLRPC_WLC_BMAC_SET_ADDRMATCH_ID,
-	WLRPC_WLC_BMAC_SET_CWMAX_ID,
-	WLRPC_WLC_BMAC_SET_RCMTA_ID,
-	WLRPC_WLC_BMAC_SET_SHM_ID,
-	WLRPC_WLC_SUSPEND_MAC_AND_WAIT_ID,
-	WLRPC_WLC_BMAC_WRITE_SHM_ID,
-	WLRPC_WLC_BMAC_WRITE_TEMPLATE_RAM_ID,
-	WLRPC_WLC_TX_FIFO_SUSPEND_ID,
-	WLRPC_WLC_TX_FIFO_RESUME_ID,
-	WLRPC_WLC_TX_FIFO_SUSPENDED_ID,
-	WLRPC_WLC_HW_ETHERADDR_ID,
-	WLRPC_WLC_SET_HW_ETHERADDR_ID,
-	WLRPC_WLC_BMAC_CHANSPEC_SET_ID,
-	WLRPC_WLC_BMAC_TXANT_SET_ID,
-	WLRPC_WLC_BMAC_ANTSEL_TYPE_SET_ID,
-	WLRPC_WLC_BMAC_TXFIFO_ID,
-	WLRPC_WLC_RADIO_READ_HWDISABLED_ID,
-	WLRPC_WLC_RM_CCA_MEASURE_ID,
-	WLRPC_WLC_SET_SHORTSLOT_ID,
-	WLRPC_WLC_WAIT_FOR_WAKE_ID,
-	WLRPC_WLC_PHY_TXPOWER_GET_CURRENT_ID,
-	WLRPC_WLC_PHY_TXPOWER_HW_CTRL_GET_ID,
-	WLRPC_WLC_PHY_TXPOWER_HW_CTRL_SET_ID,
-	WLRPC_WLC_PHY_BSSINIT_ID,
-	WLRPC_WLC_BAND_STF_SS_SET_ID,
-	WLRPC_WLC_PHY_BAND_FIRST_CHANSPEC_ID,
-	WLRPC_WLC_PHY_TXPOWER_LIMIT_SET_ID,
-	WLRPC_WLC_PHY_BAND_CHANNELS_ID,
-	WLRPC_WLC_BMAC_REVINFO_GET_ID,
-	WLRPC_WLC_BMAC_STATE_GET_ID,
-	WLRPC_WLC_BMAC_XMTFIFO_SZ_GET_ID,
-	WLRPC_WLC_BMAC_XMTFIFO_SZ_SET_ID,
-	WLRPC_WLC_BMAC_VALIDATE_CHIP_ACCESS_ID,
-	WLRPC_WLC_RM_CCA_COMPLETE_ID,
-	WLRPC_WLC_RECV_ID,
-	WLRPC_WLC_DOTXSTATUS_ID,
-	WLRPC_WLC_HIGH_DPC_ID,
-	WLRPC_WLC_FATAL_ERROR_ID,
-	WLRPC_WLC_PHY_SET_CHANNEL_14_WIDE_FILTER_ID,
-	WLRPC_WLC_PHY_NOISE_AVG_ID,
-	WLRPC_WLC_PHYCHAIN_INIT_ID,
-	WLRPC_WLC_PHYCHAIN_SET_ID,
-	WLRPC_WLC_PHYCHAIN_GET_ID,
-	WLRPC_WLC_PHY_TKIP_RIFS_WAR_ID,
-	WLRPC_WLC_BMAC_COPYFROM_VARS_ID,
-	WLRPC_WLC_BMAC_RETRYLIMIT_UPD_ID,
-	WLRPC_WLC_BMAC_BTC_MODE_SET_ID,
-	WLRPC_WLC_BMAC_BTC_MODE_GET_ID,
-	WLRPC_WLC_BMAC_BTC_WIRE_SET_ID,
-	WLRPC_WLC_BMAC_BTC_WIRE_GET_ID,
-	WLRPC_WLC_BMAC_SET_NORESET_ID,
-	WLRPC_WLC_AMPDU_TXSTATUS_COMPLETE_ID,
-	WLRPC_WLC_BMAC_FIFOERRORS_ID,
-	WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MIN_ID,
-	WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MAX_ID,
-	WLRPC_WLC_NOISE_CB_ID,
-	WLRPC_WLC_BMAC_LED_HW_DEINIT_ID,
-	WLRPC_WLC_BMAC_LED_HW_MASK_INIT_ID,
-	WLRPC_WLC_PLLREQ_ID,
-	WLRPC_WLC_BMAC_TACLEAR_ID,
-	WLRPC_WLC_BMAC_SET_CLK_ID,
-	WLRPC_WLC_PHY_OFDM_RATESET_WAR_ID,
-	WLRPC_WLC_PHY_BF_PREEMPT_ENABLE_ID,
-	WLRPC_WLC_BMAC_DOIOVARS_ID,
-	WLRPC_WLC_BMAC_DUMP_ID,
-	WLRPC_WLC_CISWRITE_ID,
-	WLRPC_WLC_CISDUMP_ID,
-	WLRPC_WLC_UPDATE_PHY_MODE_ID,
-	WLRPC_WLC_RESET_BMAC_DONE_ID,
-	WLRPC_WLC_BMAC_LED_BLINK_EVENT_ID,
-	WLRPC_WLC_BMAC_LED_SET_ID,
-	WLRPC_WLC_BMAC_LED_BLINK_ID,
-	WLRPC_WLC_BMAC_LED_ID,
-	WLRPC_WLC_BMAC_RATE_SHM_OFFSET_ID,
-	WLRPC_SI_ISCORE_UP_ID,
-	WLRPC_WLC_BMAC_PS_SWITCH_ID,
-	WLRPC_WLC_PHY_STF_SSMODE_GET_ID,
-	WLRPC_WLC_BMAC_DEBUG_ID,
-	WLRPC_WLC_EXTLOG_MSG_ID,
-	WLRPC_WLC_EXTLOG_CFG_ID,
-	WLRPC_BCM_ASSERT_LOG_ID,
-	WLRPC_BCM_ASSERT_TYPE_ID,
-	WLRPC_WLC_BMAC_SET_PHYCAL_CACHE_FLAG_ID,
-	WLRPC_WLC_BMAC_GET_PHYCAL_CACHE_FLAG_ID,
-	WLRPC_WLC_PHY_CAL_CACHE_INIT_ID,
-	WLRPC_WLC_PHY_CAL_CACHE_DEINIT_ID,
-	WLRPC_WLC_BMAC_HW_UP_ID,
-	WLRPC_WLC_BMAC_SET_TXPWR_PERCENT_ID,
-	WLRPC_WLC_PHYCHAIN_ACTIVE_GET_ID,
-	WLRPC_WLC_BMAC_BLINK_SYNC_ID,
-	WLRPC_WLC_BMAC_UCODE_DBGSEL_SET_ID,
-	WLRPC_WLC_BMAC_UCODE_DBGSEL_GET_ID,
-	WLRPC_WLC_PHY_RADAR_DETECT_MODE_SET_ID,
-	WLRPC_WLC_PHY_ACIM_NOISEM_RESET_NPHY_ID,
-	WLRPC_WLC_PHY_INTERFER_SET_NPHY_ID,
-	WLRPC_WLC_BMAC_IFSCTL_EDCRS_SET_ID,
-	WLRPC_WLC_PKTENGTX,
-	WLRPC_WLC_BMAC_SET_DEAF,
-	WLRPC_WLC_BMAC_CLEAR_DEAF,
-	WLRPC_WLC_BMAC_BTC_FLAGS_SET_ID,
-	WLRPC_WLC_BMAC_BTC_FLAGS_GET_ID,
-	WLRPC_WLC_BMAC_SET_RCMTA_TYPE_ID,
-	WLRPC_WLC_BMAC_BTC_FLAGS_UPD_ID,
-	WLRPC_WLC_BMAC_BTC_STUCKWAR_ID,
-	WLRPC_WLC_BMAC_CCA_STATS_READ_ID,
-	WLRPC_WLC_BMAC_ANTSEL_SET_ID,
-	WLRPC_WLC_BMAC_SET_UCODE_LOADED,
-	WLRPC_WLC_PHY_LDPC_SET_ID,
-
-	WLRPC_LAST
-} wlc_rpc_id_t;
-
-#if defined(BCMDBG) | 0
-struct name_entry {
-	int id;
-	char *name;
-};
-
-#define NAME_ENTRY(x) {x, #x}
-
-#define RPC_ID_TABLE { \
-	NAME_ENTRY(WLRPC_WLC_REG_READ_ID),	\
-	NAME_ENTRY(WLRPC_WLC_REG_WRITE_ID),	\
-	NAME_ENTRY(WLRPC_WLC_MHF_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_MHF_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_UP_PREP_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_UP_FINISH_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_DOWN_PREP_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_DOWN_FINISH_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_WRITE_HW_BCNTEMPLATES_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_RESET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_DNGL_REBOOT_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_RPC_TXQ_WM_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_RPC_TXQ_WM_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_RPC_AGG_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_RPC_MSGLEVEL_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_RPC_AGG_LIMIT_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_RPC_AGG_LIMIT_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_INIT_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_CWMIN_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_MUTE_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_DOIOVAR_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_HOLD_UPD_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_MUTE_UPD_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_CLEAR_TSSI_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_ANT_RXDIV_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_ANT_RXDIV_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_PREAMBLE_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_FREQTRACK_END_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_FREQTRACK_START_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_IOCTL_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_NOISE_SAMPLE_REQUEST_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_CAL_PERICAL_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_SROMLIMIT_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_RADAR_DETECT_ENABLE_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_RADAR_DETECT_RUN_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TEST_ISON_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_COPYFROM_OBJMEM_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_COPYTO_OBJMEM_ID),	\
-	NAME_ENTRY(WLRPC_WLC_ENABLE_MAC_ID),	\
-	NAME_ENTRY(WLRPC_WLC_MCTRL_ID),	\
-	NAME_ENTRY(WLRPC_WLC_CORERESET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_READ_SHM_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_READ_TSF_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_ADDRMATCH_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_CWMAX_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_RCMTA_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_SHM_ID),	\
-	NAME_ENTRY(WLRPC_WLC_SUSPEND_MAC_AND_WAIT_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_WRITE_SHM_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_WRITE_TEMPLATE_RAM_ID),	\
-	NAME_ENTRY(WLRPC_WLC_TX_FIFO_SUSPEND_ID),	\
-	NAME_ENTRY(WLRPC_WLC_TX_FIFO_RESUME_ID),	\
-	NAME_ENTRY(WLRPC_WLC_TX_FIFO_SUSPENDED_ID),	\
-	NAME_ENTRY(WLRPC_WLC_HW_ETHERADDR_ID),	\
-	NAME_ENTRY(WLRPC_WLC_SET_HW_ETHERADDR_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_CHANSPEC_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_TXANT_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_ANTSEL_TYPE_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_TXFIFO_ID),	\
-	NAME_ENTRY(WLRPC_WLC_RADIO_READ_HWDISABLED_ID),	\
-	NAME_ENTRY(WLRPC_WLC_RM_CCA_MEASURE_ID),	\
-	NAME_ENTRY(WLRPC_WLC_SET_SHORTSLOT_ID),	\
-	NAME_ENTRY(WLRPC_WLC_WAIT_FOR_WAKE_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_CURRENT_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_HW_CTRL_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_HW_CTRL_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_BSSINIT_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BAND_STF_SS_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_BAND_FIRST_CHANSPEC_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_LIMIT_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_BAND_CHANNELS_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_REVINFO_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_STATE_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_XMTFIFO_SZ_GET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_XMTFIFO_SZ_SET_ID),	\
-	NAME_ENTRY(WLRPC_WLC_BMAC_VALIDATE_CHIP_ACCESS_ID),	\
-	NAME_ENTRY(WLRPC_WLC_RM_CCA_COMPLETE_ID),	\
-	NAME_ENTRY(WLRPC_WLC_RECV_ID),	\
-	NAME_ENTRY(WLRPC_WLC_DOTXSTATUS_ID),	\
-	NAME_ENTRY(WLRPC_WLC_HIGH_DPC_ID),	\
-	NAME_ENTRY(WLRPC_WLC_FATAL_ERROR_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_SET_CHANNEL_14_WIDE_FILTER_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_NOISE_AVG_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHYCHAIN_INIT_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHYCHAIN_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHYCHAIN_GET_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_TKIP_RIFS_WAR_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_COPYFROM_VARS_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_RETRYLIMIT_UPD_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_BTC_MODE_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_BTC_MODE_GET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_BTC_WIRE_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_BTC_WIRE_GET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_NORESET_ID), \
-	NAME_ENTRY(WLRPC_WLC_AMPDU_TXSTATUS_COMPLETE_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_FIFOERRORS_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MIN_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_TXPOWER_GET_TARGET_MAX_ID), \
-	NAME_ENTRY(WLRPC_WLC_NOISE_CB_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_LED_HW_DEINIT_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_LED_HW_MASK_INIT_ID), \
-	NAME_ENTRY(WLRPC_WLC_PLLREQ_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_TACLEAR_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_CLK_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_OFDM_RATESET_WAR_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_BF_PREEMPT_ENABLE_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_DOIOVARS_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_DUMP_ID), \
-	NAME_ENTRY(WLRPC_WLC_CISWRITE_ID), \
-	NAME_ENTRY(WLRPC_WLC_CISDUMP_ID), \
-	NAME_ENTRY(WLRPC_WLC_UPDATE_PHY_MODE_ID), \
-	NAME_ENTRY(WLRPC_WLC_RESET_BMAC_DONE_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_LED_BLINK_EVENT_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_LED_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_LED_BLINK_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_LED_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_RATE_SHM_OFFSET_ID), \
-	NAME_ENTRY(WLRPC_SI_ISCORE_UP_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_PS_SWITCH_ID),	\
-	NAME_ENTRY(WLRPC_WLC_PHY_STF_SSMODE_GET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_DEBUG_ID), \
-	NAME_ENTRY(WLRPC_WLC_EXTLOG_MSG_ID), \
-	NAME_ENTRY(WLRPC_WLC_EXTLOG_CFG_ID), \
-	NAME_ENTRY(WLRPC_BCM_ASSERT_LOG_ID), \
-	NAME_ENTRY(WLRPC_BCM_ASSERT_TYPE_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_PHYCAL_CACHE_FLAG_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_GET_PHYCAL_CACHE_FLAG_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_CAL_CACHE_INIT_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_CAL_CACHE_DEINIT_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_HW_UP_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_TXPWR_PERCENT_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHYCHAIN_ACTIVE_GET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_BLINK_SYNC_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_UCODE_DBGSEL_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_UCODE_DBGSEL_GET_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_RADAR_DETECT_MODE_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_ACIM_NOISEM_RESET_NPHY_ID), \
-	NAME_ENTRY(WLRPC_WLC_PHY_INTERFER_SET_NPHY_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_IFSCTL_EDCRS_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_PKTENGTX), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_DEAF), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_CLEAR_DEAF), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_BTC_FLAGS_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_BTC_FLAGS_GET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_RCMTA_TYPE_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_CCA_STATS_READ_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_ANTSEL_SET_ID), \
-	NAME_ENTRY(WLRPC_WLC_BMAC_SET_UCODE_LOADED), \
-	NAME_ENTRY(WLRPC_WLC_PHY_LDPC_SET_ID),	\
-	{0, NULL} \
-	}
-
-static __inline char *_wlc_rpc_id_lookup(const struct name_entry *tbl, int _id)
-{
-	const struct name_entry *elt = tbl;
-	static char __unknown[64];
-	for (; elt->name != NULL; elt++) {
-		if (_id == elt->id)
-			break;
-	}
-	if (_id == elt->id)
-		strncpy(__unknown, elt->name, sizeof(__unknown));
-	else
-		snprintf(__unknown, sizeof(__unknown), "ID:%d", _id);
-	return __unknown;
-}
-
-#define WLC_RPC_ID_LOOKUP(tbl, _id) (_wlc_rpc_id_lookup(tbl, _id))
-
-#endif				/* BCMDBG */
-
-/* refer to txpwr_limits_t for each elements, mcs32 is the at the end for 1 byte */
-#define TXPOWER_XDR_SZ	(roundup(WLC_NUM_RATES_CCK, 4) + roundup(WLC_NUM_RATES_OFDM, 4) * 4 + \
-	roundup(WLC_NUM_RATES_MCS_1_STREAM, 4) * 6 + roundup(WLC_NUM_RATES_MCS_2_STREAM, 4) * 2 + \
-	roundup(1, 4))
-
-#define wlc_rpc_txpwr_limits(b, txpwr, op, err)	\
-	do {											\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->cck, WLC_NUM_RATES_CCK);		\
-		ASSERT(!(err));									\
-												\
-		/* 20 MHz Legacy OFDM rates with SISO transmission */				\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm, WLC_NUM_RATES_OFDM);	\
-		ASSERT(!(err));									\
-												\
-		/* 20 MHz Legacy OFDM rates with CDD transmission */				\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm_cdd, WLC_NUM_RATES_OFDM);   \
-		ASSERT(!(err));									\
-												\
-		/* 40 MHz Legacy OFDM rates with SISO transmission */				\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm_40_siso, WLC_NUM_RATES_OFDM); \
-		ASSERT(!(err));									\
-												\
-		/* 40 MHz Legacy OFDM rates with CDD transmission */				\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->ofdm_40_cdd, WLC_NUM_RATES_OFDM); \
-		ASSERT(!(err));									\
-												\
-		/* 20MHz MCS rates SISO/CDD/STBC/SDM */							 \
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_siso, WLC_NUM_RATES_MCS_1_STREAM); \
-		ASSERT(!(err));									\
-												\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_cdd, WLC_NUM_RATES_MCS_1_STREAM); \
-		ASSERT(!(err));									\
-												\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_stbc, WLC_NUM_RATES_MCS_1_STREAM); \
-		ASSERT(!(err));									\
-												\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_20_mimo, WLC_NUM_RATES_MCS_2_STREAM); \
-		ASSERT(!(err));									\
-												\
-		/* 40MHz MCS rates SISO/CDD/STBC/SDM */							 \
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_siso, WLC_NUM_RATES_MCS_1_STREAM); \
-		ASSERT(!(err));									\
-												\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_cdd, WLC_NUM_RATES_MCS_1_STREAM); \
-		ASSERT(!(err));									\
-												\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_stbc, WLC_NUM_RATES_MCS_1_STREAM); \
-		ASSERT(!(err));									\
-												\
-		(err) = bcm_xdr_##op##_u8_vec((b), (txpwr)->mcs_40_mimo, WLC_NUM_RATES_MCS_2_STREAM); \
-		ASSERT(!(err));									\
-	} while (0)
-
-typedef struct wlc_rpc_ctx {
-	rpc_info_t *rpc;
-	wlc_info_t *wlc;
-	wlc_hw_info_t *wlc_hw;
-} wlc_rpc_ctx_t;
-
-static inline rpc_buf_t *wlc_rpc_buf_alloc(rpc_info_t *rpc, bcm_xdr_buf_t *b,
-					   uint len, wlc_rpc_id_t rpc_id)
-{
-	rpc_buf_t *rpc_buf;
-
-	rpc_buf = bcm_rpc_buf_alloc(rpc, len + sizeof(u32));
-
-	if (!rpc_buf)
-		return NULL;
-
-	bcm_xdr_buf_init(b, bcm_rpc_buf_data(bcm_rpc_tp_get(rpc), rpc_buf),
-			 len + sizeof(u32));
-
-	bcm_xdr_pack_u32(b, rpc_id);
-
-	return rpc_buf;
-}
-
-#if defined(BCMDBG)
-static __inline wlc_rpc_id_t
-wlc_rpc_id_get(struct rpc_info *rpc, rpc_buf_t *buf)
-{
-	wlc_rpc_id_t rpc_id;
-	bcm_xdr_buf_t b;
-
-	bcm_xdr_buf_init(&b, bcm_rpc_buf_data(bcm_rpc_tp_get(rpc), buf),
-			 sizeof(u32));
-
-	bcm_xdr_unpack_u32(&b, (u32 *)((unsigned long) & rpc_id));
-	return rpc_id;
-}
-#endif
-
-static __inline int _wlc_rpc_call(struct rpc_info *rpc, rpc_buf_t *send)
-{
-	int _err = 0;
-#if defined(BCMDBG)
-	wlc_rpc_id_t rpc_id = wlc_rpc_id_get(rpc, send);
-	/* const struct name_entry rpc_name_tbl[] = RPC_ID_TABLE; */
-	static struct name_entry rpc_name_tbl[] = RPC_ID_TABLE;
-	WL_TRACE(("%s: Called id %s\n", __func__,
-		  WLC_RPC_ID_LOOKUP(rpc_name_tbl, rpc_id)));
-#endif
-	_err = bcm_rpc_call(rpc, send);
-	if (_err) {
-#if defined(BCMDBG)
-		WL_ERROR(("%s: Call id %s FAILED\n", __func__,
-			  WLC_RPC_ID_LOOKUP(rpc_name_tbl, rpc_id)));
-#endif
-		_err = 0;
-	}
-	return _err;
-}
-
-#define wlc_rpc_call(rpc, send) (_wlc_rpc_call(rpc, send))
-
-#include <sbhnddma.h>
-#include <sbhndpio.h>
-#include <d11.h>
-
-#ifdef WLC_LOW
-extern void wlc_rpc_bmac_dispatch(wlc_rpc_ctx_t *rpc_ctx, struct rpc_buf *buf);
-extern void wlc_rpc_bmac_dump_txfifohist(wlc_hw_info_t *wlc_hw,
-					 bool dump_clear);
-#else
-extern void wlc_rpc_high_dispatch(wlc_rpc_ctx_t *ctx, struct rpc_buf *buf);
-#endif
-
-/* Packed structure for ease of transport across RPC bus along u32 boundary */
-typedef struct wlc_rpc_txstatus {
-	u32 PAD_framelen;
-	u32 status_frameid;
-	u32 sequence_lasttxtime;
-	u32 ackphyrxsh_phyerr;
-} wlc_rpc_txstatus_t;
-
-static inline
-    void txstatus2rpc_txstatus(tx_status_t *txstatus,
-			       wlc_rpc_txstatus_t *rpc_txstatus)
-{
-	rpc_txstatus->PAD_framelen = txstatus->framelen;
-	rpc_txstatus->status_frameid =
-	    (txstatus->status << 16) | txstatus->frameid;
-	rpc_txstatus->sequence_lasttxtime =
-	    (txstatus->sequence << 16) | txstatus->lasttxtime;
-	rpc_txstatus->ackphyrxsh_phyerr =
-	    (txstatus->ackphyrxsh << 16) | txstatus->phyerr;
-}
-
-static inline
-    void rpc_txstatus2txstatus(wlc_rpc_txstatus_t *rpc_txstatus,
-			       tx_status_t *txstatus)
-{
-	txstatus->framelen = rpc_txstatus->PAD_framelen & 0xffff;
-	txstatus->status = (rpc_txstatus->status_frameid >> 16) & 0xffff;
-	txstatus->frameid = rpc_txstatus->status_frameid & 0xffff;
-	txstatus->sequence = (rpc_txstatus->sequence_lasttxtime >> 16) & 0xffff;
-	txstatus->lasttxtime = rpc_txstatus->sequence_lasttxtime & 0xffff;
-	txstatus->ackphyrxsh = (rpc_txstatus->ackphyrxsh_phyerr >> 16) & 0xffff;
-	txstatus->phyerr = rpc_txstatus->ackphyrxsh_phyerr & 0xffff;
-}
-
-extern void wlc_bmac_dngl_reboot(rpc_info_t *rpc);
-
-#endif				/* WLC_RPC_H */
diff --git a/drivers/staging/brcm80211/sys/wlc_rpctx.h b/drivers/staging/brcm80211/sys/wlc_rpctx.h
deleted file mode 100644
index 7427154..0000000
--- a/drivers/staging/brcm80211/sys/wlc_rpctx.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_rpctx_h_
-#define _wlc_rpctx_h_
-
-/* forward declaration */
-struct wlc_info;
-
-/* This controls how many packets are given to the dongle. This is required as
- * NTXD needs to be power of 2 but we may not have enough memory to absorb that
- * large number of frames
- */
-#ifndef NRPCTXBUFPOST
-#define NRPCTXBUFPOST NTXD
-#endif
-
-#if defined(WLC_HIGH_ONLY)
-
-struct wlc_rpc_phy {
-	struct rpc_info *rpc;
-};
-
-#define RPCTX_ENAB(pub)		(true)
-extern rpctx_info_t *wlc_rpctx_attach(wlc_pub_t *pub, struct wlc_info *wlc);
-extern int wlc_rpctx_fifoinit(rpctx_info_t *rpctx, uint fifo, uint ntxd);
-extern void wlc_rpctx_detach(rpctx_info_t *rpctx);
-extern int wlc_rpctx_dump(rpctx_info_t *rpctx, struct bcmstrbuf *b);
-extern void *wlc_rpctx_getnexttxp(rpctx_info_t *rpctx, uint fifo);
-extern void wlc_rpctx_txreclaim(rpctx_info_t *rpctx);
-extern uint wlc_rpctx_txavail(rpctx_info_t *rpctx, uint fifo);
-extern int wlc_rpctx_pkteng(rpctx_info_t *rpctx, uint fifo, void *p);
-extern int wlc_rpctx_tx(rpctx_info_t *rpctx, uint fifo, void *p, bool commit,
-			u16 frameid, u8 txpktpend);
-extern void wlc_rpctx_txpktpendinc(rpctx_info_t *rpctx, uint fifo, u8 val);
-extern void wlc_rpctx_txpktpenddec(rpctx_info_t *rpctx, uint fifo, u8 val);
-extern void wlc_rpctx_txpktpendclr(rpctx_info_t *rpctx, uint fifo);
-extern int wlc_rpctx_txpktpend(rpctx_info_t *rpctx, uint fifo, bool all);
-
-#else
-#define	RPCTX_ENAB(pub)			(false)
-#define	wlc_rpctx_attach(pub, wlc)	(NULL)
-#define	wlc_rpctx_fifoinit(rpctx, fifo, ntxd) (0)
-#define	wlc_rpctx_detach(rpctx)		ASSERT(0)
-#define	wlc_rpctx_txavail(rpctx, f)	(false)
-#define	wlc_rpctx_dump(rpctx, b)		(0)
-#define	wlc_rpctx_getnexttxp(rpctx, f)		(NULL)
-#define	wlc_rpctx_txreclaim(rpctx)		ASSERT(0)
-#define	wlc_rpctx_pkteng(rpctx, fifo, p)	do { } while (0)
-#define	wlc_rpctx_tx(rpctx, f, p, c, fid, t)	(0)
-#define	wlc_rpctx_txpktpendinc(rpctx, f, val)	do { } while (0)
-#define	wlc_rpctx_txpktpenddec(rpctx, f, val)	do { } while (0)
-#define	wlc_rpctx_txpktpendclr(rpctx, f)	do { } while (0)
-#define	wlc_rpctx_txpktpend(rpctx, f, all)	(0)
-
-#endif				/* WLC_HIGH */
-
-#endif				/* _wlc_rpctx_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_scb.h b/drivers/staging/brcm80211/sys/wlc_scb.h
index ce26c74..fe84e99 100644
--- a/drivers/staging/brcm80211/sys/wlc_scb.h
+++ b/drivers/staging/brcm80211/sys/wlc_scb.h
@@ -19,7 +19,7 @@
 
 #include <proto/802.1d.h>
 
-extern bool wlc_aggregatable(wlc_info_t *wlc, u8 tid);
+extern bool wlc_aggregatable(struct wlc_info *wlc, u8 tid);
 
 #define AMPDU_TX_BA_MAX_WSIZE	64	/* max Tx ba window size (in pdu) */
 /* structure to store per-tid state for the ampdu initiator */
diff --git a/drivers/staging/brcm80211/sys/wlc_stf.c b/drivers/staging/brcm80211/sys/wlc_stf.c
index 4728ad9..8975b09 100644
--- a/drivers/staging/brcm80211/sys/wlc_stf.c
+++ b/drivers/staging/brcm80211/sys/wlc_stf.c
@@ -15,8 +15,8 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <wlc_cfg.h>
-#include <linuxver.h>
 #include <bcmdefs.h>
 #include <osl.h>
 #include <bcmutils.h>
@@ -25,29 +25,33 @@
 #include <proto/802.11.h>
 #include <wlioctl.h>
 #include <bcmwifi.h>
+#include <sbhndpio.h>
+#include <sbhnddma.h>
 #include <d11.h>
 #include <wlc_rate.h>
 #include <wlc_pub.h>
 #include <wlc_key.h>
 #include <wlc_channel.h>
 #include <wlc_bsscfg.h>
+#include <wlc_event.h>
 #include <wlc_mac80211.h>
 #include <wlc_scb.h>
 #include <wl_export.h>
 #include <wlc_bmac.h>
 #include <wlc_stf.h>
+#include <wl_dbg.h>
 
 #define WLC_STF_SS_STBC_RX(wlc) (WLCISNPHY(wlc->band) && \
 	NREV_GT(wlc->band->phyrev, 3) && NREV_LE(wlc->band->phyrev, 6))
 
-static s8 wlc_stf_stbc_rx_get(wlc_info_t *wlc);
-static bool wlc_stf_stbc_tx_set(wlc_info_t *wlc, s32 int_val);
-static int wlc_stf_txcore_set(wlc_info_t *wlc, u8 Nsts, u8 val);
-static int wlc_stf_spatial_policy_set(wlc_info_t *wlc, int val);
-static void wlc_stf_stbc_rx_ht_update(wlc_info_t *wlc, int val);
+static s8 wlc_stf_stbc_rx_get(struct wlc_info *wlc);
+static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val);
+static int wlc_stf_txcore_set(struct wlc_info *wlc, u8 Nsts, u8 val);
+static int wlc_stf_spatial_policy_set(struct wlc_info *wlc, int val);
+static void wlc_stf_stbc_rx_ht_update(struct wlc_info *wlc, int val);
 
-static void _wlc_stf_phy_txant_upd(wlc_info_t *wlc);
-static u16 _wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec);
+static void _wlc_stf_phy_txant_upd(struct wlc_info *wlc);
+static u16 _wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec);
 
 #define NSTS_1	1
 #define NSTS_2	2
@@ -61,7 +65,7 @@
 	(0x0f)			/* For Nsts = 4, enable all cores */
 };
 
-static void wlc_stf_stbc_rx_ht_update(wlc_info_t *wlc, int val)
+static void wlc_stf_stbc_rx_ht_update(struct wlc_info *wlc, int val)
 {
 	ASSERT((val == HT_CAP_RX_STBC_NO)
 	       || (val == HT_CAP_RX_STBC_ONE_STREAM));
@@ -82,7 +86,7 @@
 }
 
 /* every WLC_TEMPSENSE_PERIOD seconds temperature check to decide whether to turn on/off txchain */
-void wlc_tempsense_upd(wlc_info_t *wlc)
+void wlc_tempsense_upd(struct wlc_info *wlc)
 {
 	wlc_phy_t *pi = wlc->band->pi;
 	uint active_chains, txchain;
@@ -106,7 +110,7 @@
 }
 
 void
-wlc_stf_ss_algo_channel_get(wlc_info_t *wlc, u16 *ss_algo_channel,
+wlc_stf_ss_algo_channel_get(struct wlc_info *wlc, u16 *ss_algo_channel,
 			    chanspec_t chanspec)
 {
 	tx_power_t power;
@@ -147,12 +151,12 @@
 		setbit(ss_algo_channel, PHY_TXC1_MODE_STBC);
 }
 
-static s8 wlc_stf_stbc_rx_get(wlc_info_t *wlc)
+static s8 wlc_stf_stbc_rx_get(struct wlc_info *wlc)
 {
 	return (wlc->ht_cap.cap & HT_CAP_RX_STBC_MASK) >> HT_CAP_RX_STBC_SHIFT;
 }
 
-static bool wlc_stf_stbc_tx_set(wlc_info_t *wlc, s32 int_val)
+static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val)
 {
 	if ((int_val != AUTO) && (int_val != OFF) && (int_val != ON)) {
 		return false;
@@ -173,7 +177,7 @@
 	return true;
 }
 
-bool wlc_stf_stbc_rx_set(wlc_info_t *wlc, s32 int_val)
+bool wlc_stf_stbc_rx_set(struct wlc_info *wlc, s32 int_val)
 {
 	if ((int_val != HT_CAP_RX_STBC_NO)
 	    && (int_val != HT_CAP_RX_STBC_ONE_STREAM)) {
@@ -190,10 +194,10 @@
 	return true;
 }
 
-static int wlc_stf_txcore_set(wlc_info_t *wlc, u8 Nsts, u8 core_mask)
+static int wlc_stf_txcore_set(struct wlc_info *wlc, u8 Nsts, u8 core_mask)
 {
-	WL_TRACE(("wl%d: %s: Nsts %d core_mask %x\n",
-		  wlc->pub->unit, __func__, Nsts, core_mask));
+	WL_TRACE("wl%d: %s: Nsts %d core_mask %x\n",
+		 wlc->pub->unit, __func__, Nsts, core_mask);
 
 	ASSERT((Nsts > 0) && (Nsts <= MAX_STREAMS_SUPPORTED));
 
@@ -227,12 +231,12 @@
 	return BCME_OK;
 }
 
-static int wlc_stf_spatial_policy_set(wlc_info_t *wlc, int val)
+static int wlc_stf_spatial_policy_set(struct wlc_info *wlc, int val)
 {
 	int i;
 	u8 core_mask = 0;
 
-	WL_TRACE(("wl%d: %s: val %x\n", wlc->pub->unit, __func__, val));
+	WL_TRACE("wl%d: %s: val %x\n", wlc->pub->unit, __func__, val);
 
 	wlc->stf->spatial_policy = (s8) val;
 	for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
@@ -243,7 +247,7 @@
 	return BCME_OK;
 }
 
-int wlc_stf_txchain_set(wlc_info_t *wlc, s32 int_val, bool force)
+int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force)
 {
 	u8 txchain = (u8) int_val;
 	u8 txstreams;
@@ -274,13 +278,15 @@
 				if (RSPEC_STF(wlc->bandstate[i]->rspec_override)
 				    != PHY_TXC1_MODE_SISO) {
 					wlc->bandstate[i]->rspec_override = 0;
-					WL_ERROR(("%s(): temp sense override non-SISO" " rspec_override.\n", __func__));
+					WL_ERROR("%s(): temp sense override non-SISO rspec_override\n",
+						 __func__);
 				}
 				if (RSPEC_STF
 				    (wlc->bandstate[i]->mrspec_override) !=
 				    PHY_TXC1_MODE_SISO) {
 					wlc->bandstate[i]->mrspec_override = 0;
-					WL_ERROR(("%s(): temp sense override non-SISO" " mrspec_override.\n", __func__));
+					WL_ERROR("%s(): temp sense override non-SISO mrspec_override\n",
+						 __func__);
 				}
 			}
 	}
@@ -303,7 +309,7 @@
 	return BCME_OK;
 }
 
-int wlc_stf_rxchain_set(wlc_info_t *wlc, s32 int_val)
+int wlc_stf_rxchain_set(struct wlc_info *wlc, s32 int_val)
 {
 	u8 rxchain_cnt;
 	u8 rxchain = (u8) int_val;
@@ -367,7 +373,7 @@
 }
 
 /* update wlc->stf->ss_opmode which represents the operational stf_ss mode we're using */
-int wlc_stf_ss_update(wlc_info_t *wlc, wlcband_t *band)
+int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band)
 {
 	int ret_code = 0;
 	u8 prev_stf_ss;
@@ -402,7 +408,7 @@
 	return ret_code;
 }
 
-int wlc_stf_attach(wlc_info_t *wlc)
+int wlc_stf_attach(struct wlc_info *wlc)
 {
 	wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_SISO;
 	wlc->bandstate[BAND_5G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_CDD;
@@ -425,11 +431,11 @@
 	return 0;
 }
 
-void wlc_stf_detach(wlc_info_t *wlc)
+void wlc_stf_detach(struct wlc_info *wlc)
 {
 }
 
-int wlc_stf_ant_txant_validate(wlc_info_t *wlc, s8 val)
+int wlc_stf_ant_txant_validate(struct wlc_info *wlc, s8 val)
 {
 	int bcmerror = BCME_OK;
 
@@ -476,7 +482,7 @@
  *    do tx-antenna selection for SISO transmissions
  * for NREV>=7, bit 6 and bit 7 mean antenna 0 and 1 respectively, nit6+bit7 means both cores active
 */
-static void _wlc_stf_phy_txant_upd(wlc_info_t *wlc)
+static void _wlc_stf_phy_txant_upd(struct wlc_info *wlc)
 {
 	s8 txant;
 
@@ -517,12 +523,12 @@
 	wlc_bmac_txant_set(wlc->hw, wlc->stf->phytxant);
 }
 
-void wlc_stf_phy_txant_upd(wlc_info_t *wlc)
+void wlc_stf_phy_txant_upd(struct wlc_info *wlc)
 {
 	_wlc_stf_phy_txant_upd(wlc);
 }
 
-void wlc_stf_phy_chain_calc(wlc_info_t *wlc)
+void wlc_stf_phy_chain_calc(struct wlc_info *wlc)
 {
 	/* get available rx/tx chains */
 	wlc->stf->hw_txchain = (u8) getintvar(wlc->pub->vars, "txchain");
@@ -559,7 +565,7 @@
 	wlc_stf_spatial_policy_set(wlc, MIN_SPATIAL_EXPANSION);
 }
 
-static u16 _wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec)
+static u16 _wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec)
 {
 	u16 phytxant = wlc->stf->phytxant;
 
@@ -572,12 +578,12 @@
 	return phytxant;
 }
 
-u16 wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec)
+u16 wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec)
 {
 	return _wlc_stf_phytxchain_sel(wlc, rspec);
 }
 
-u16 wlc_stf_d11hdrs_phyctl_txant(wlc_info_t *wlc, ratespec_t rspec)
+u16 wlc_stf_d11hdrs_phyctl_txant(struct wlc_info *wlc, ratespec_t rspec)
 {
 	u16 phytxant = wlc->stf->phytxant;
 	u16 mask = PHY_TXC_ANT_MASK;
diff --git a/drivers/staging/brcm80211/sys/wlc_stf.h b/drivers/staging/brcm80211/sys/wlc_stf.h
index ee9b02a..8de6382 100644
--- a/drivers/staging/brcm80211/sys/wlc_stf.h
+++ b/drivers/staging/brcm80211/sys/wlc_stf.h
@@ -20,23 +20,24 @@
 #define MIN_SPATIAL_EXPANSION	0
 #define MAX_SPATIAL_EXPANSION	1
 
-extern int wlc_stf_attach(wlc_info_t *wlc);
-extern void wlc_stf_detach(wlc_info_t *wlc);
+extern int wlc_stf_attach(struct wlc_info *wlc);
+extern void wlc_stf_detach(struct wlc_info *wlc);
 
-extern void wlc_tempsense_upd(wlc_info_t *wlc);
-extern void wlc_stf_ss_algo_channel_get(wlc_info_t *wlc,
+extern void wlc_tempsense_upd(struct wlc_info *wlc);
+extern void wlc_stf_ss_algo_channel_get(struct wlc_info *wlc,
 					u16 *ss_algo_channel,
 					chanspec_t chanspec);
-extern int wlc_stf_ss_update(wlc_info_t *wlc, struct wlcband *band);
-extern void wlc_stf_phy_txant_upd(wlc_info_t *wlc);
-extern int wlc_stf_txchain_set(wlc_info_t *wlc, s32 int_val, bool force);
-extern int wlc_stf_rxchain_set(wlc_info_t *wlc, s32 int_val);
-extern bool wlc_stf_stbc_rx_set(wlc_info_t *wlc, s32 int_val);
+extern int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band);
+extern void wlc_stf_phy_txant_upd(struct wlc_info *wlc);
+extern int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force);
+extern int wlc_stf_rxchain_set(struct wlc_info *wlc, s32 int_val);
+extern bool wlc_stf_stbc_rx_set(struct wlc_info *wlc, s32 int_val);
 
-extern int wlc_stf_ant_txant_validate(wlc_info_t *wlc, s8 val);
-extern void wlc_stf_phy_txant_upd(wlc_info_t *wlc);
-extern void wlc_stf_phy_chain_calc(wlc_info_t *wlc);
-extern u16 wlc_stf_phytxchain_sel(wlc_info_t *wlc, ratespec_t rspec);
-extern u16 wlc_stf_d11hdrs_phyctl_txant(wlc_info_t *wlc, ratespec_t rspec);
-extern u16 wlc_stf_spatial_expansion_get(wlc_info_t *wlc, ratespec_t rspec);
+extern int wlc_stf_ant_txant_validate(struct wlc_info *wlc, s8 val);
+extern void wlc_stf_phy_txant_upd(struct wlc_info *wlc);
+extern void wlc_stf_phy_chain_calc(struct wlc_info *wlc);
+extern u16 wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec);
+extern u16 wlc_stf_d11hdrs_phyctl_txant(struct wlc_info *wlc, ratespec_t rspec);
+extern u16 wlc_stf_spatial_expansion_get(struct wlc_info *wlc,
+					 ratespec_t rspec);
 #endif				/* _wlc_stf_h_ */
diff --git a/drivers/staging/brcm80211/sys/wlc_types.h b/drivers/staging/brcm80211/sys/wlc_types.h
index 33047eb..df6e04c 100644
--- a/drivers/staging/brcm80211/sys/wlc_types.h
+++ b/drivers/staging/brcm80211/sys/wlc_types.h
@@ -19,34 +19,19 @@
 
 /* forward declarations */
 
-typedef struct wlc_info wlc_info_t;
-typedef struct wlc_hw_info wlc_hw_info_t;
-typedef struct wlc_if wlc_if_t;
-typedef struct wl_if wl_if_t;
-typedef struct led_info led_info_t;
-typedef struct bmac_led bmac_led_t;
-typedef struct bmac_led_info bmac_led_info_t;
-typedef struct scb_module scb_module_t;
-typedef struct ba_info ba_info_t;
-typedef struct ampdu_info ampdu_info_t;
-typedef struct ratesel_info ratesel_info_t;
-typedef struct wlc_ap_info wlc_ap_info_t;
-typedef struct wlc_auth_info wlc_auth_info_t;
-typedef struct supplicant supplicant_t;
-typedef struct authenticator authenticator_t;
-typedef struct antsel_info antsel_info_t;
-#if !defined(WLC_LOW)
-typedef struct rpctx_info rpctx_info_t;
-#endif
-#ifdef WLC_LOW
-typedef struct bmac_pmq bmac_pmq_t;
-#endif
+struct wlc_info;
+struct wlc_hw_info;
+struct wlc_if;
+struct wl_if;
+struct ampdu_info;
+struct antsel_info;
+struct bmac_pmq;
 
 struct d11init;
 
 #ifndef _hnddma_pub_
 #define _hnddma_pub_
-typedef const struct hnddma_pub hnddma_t;
+struct hnddma_pub;
 #endif				/* _hnddma_pub_ */
 
 #endif				/* _wlc_types_h_ */
diff --git a/drivers/staging/brcm80211/util/aiutils.c b/drivers/staging/brcm80211/util/aiutils.c
index 75a7e3a..ddd2f9d 100644
--- a/drivers/staging/brcm80211/util/aiutils.c
+++ b/drivers/staging/brcm80211/util/aiutils.c
@@ -14,11 +14,16 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <bcmdefs.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <bcmutils.h>
 #include <siutils.h>
 #include <hndsoc.h>
@@ -26,8 +31,8 @@
 #include <pcicfg.h>
 #include <bcmdevs.h>
 
-#define BCM47162_DMP() ((CHIPID(sih->chip) == BCM47162_CHIP_ID) && \
-		(CHIPREV(sih->chiprev) == 0) && \
+#define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
+		(sih->chiprev == 0) && \
 		(sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
 
 /* EROM parsing */
@@ -115,7 +120,7 @@
 
 	erombase = R_REG(sii->osh, &cc->eromptr);
 
-	switch (BUSTYPE(sih->bustype)) {
+	switch (sih->bustype) {
 	case SI_BUS:
 		eromptr = (u32 *) REG_MAP(erombase, SI_CORE_SIZE);
 		break;
@@ -125,7 +130,7 @@
 		sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
 
 		/* Now point the window at the erom */
-		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
+		pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, erombase);
 		eromptr = regs;
 		break;
 
@@ -330,7 +335,7 @@
 	ASSERT((sii->intrsenabled_fn == NULL)
 	       || !(*(sii)->intrsenabled_fn) ((sii)->intr_arg));
 
-	switch (BUSTYPE(sih->bustype)) {
+	switch (sih->bustype) {
 	case SI_BUS:
 		/* map new one */
 		if (!sii->regs[coreidx]) {
@@ -347,10 +352,10 @@
 
 	case PCI_BUS:
 		/* point bar0 window */
-		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+		pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, addr);
 		regs = sii->curmap;
 		/* point bar0 2nd 4KB window */
-		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+		pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN2, wrap);
 		break;
 
 #ifdef BCMSDIO
@@ -504,7 +509,7 @@
 	if (coreidx >= SI_MAXCORES)
 		return 0;
 
-	if (BUSTYPE(sih->bustype) == SI_BUS) {
+	if (sih->bustype == SI_BUS) {
 		/* If internal bus, we can always get at everything */
 		fast = true;
 		/* map if does not exist */
@@ -514,7 +519,7 @@
 			ASSERT(GOODREGS(sii->regs[coreidx]));
 		}
 		r = (u32 *) ((unsigned char *) sii->regs[coreidx] + regoff);
-	} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+	} else if (sih->bustype == PCI_BUS) {
 		/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
 
 		if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
diff --git a/drivers/staging/brcm80211/util/bcmotp.c b/drivers/staging/brcm80211/util/bcmotp.c
index c909832..d820e7b 100644
--- a/drivers/staging/brcm80211/util/bcmotp.c
+++ b/drivers/staging/brcm80211/util/bcmotp.c
@@ -14,11 +14,13 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <bcmdefs.h>
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <bcmdevs.h>
 #include <bcmutils.h>
 #include <siutils.h>
@@ -77,7 +79,7 @@
 	uint ccrev;		/* chipc revision */
 	otp_fn_t *fn;		/* OTP functions */
 	si_t *sih;		/* Saved sb handle */
-	osl_t *osh;
+	struct osl_info *osh;
 
 #ifdef BCMIPXOTP
 	/* IPX OTP section */
@@ -221,7 +223,7 @@
 {
 	int ret = 0;
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 		ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -271,8 +273,8 @@
 	/* Read OTP lock bits and subregion programmed indication bits */
 	oi->status = R_REG(oi->osh, &cc->otpstatus);
 
-	if ((CHIPID(oi->sih->chip) == BCM43224_CHIP_ID)
-	    || (CHIPID(oi->sih->chip) == BCM43225_CHIP_ID)) {
+	if ((oi->sih->chip == BCM43224_CHIP_ID)
+	    || (oi->sih->chip == BCM43225_CHIP_ID)) {
 		u32 p_bits;
 		p_bits =
 		    (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
@@ -569,7 +571,7 @@
 static u16 hndotp_otpr(void *oh, chipcregs_t *cc, uint wn)
 {
 	otpinfo_t *oi = (otpinfo_t *) oh;
-	osl_t *osh;
+	struct osl_info *osh;
 	volatile u16 *ptr;
 
 	ASSERT(wn < ((oi->size / 2) + OTP_RC_LIM_OFF));
@@ -584,7 +586,7 @@
 static u16 hndotp_otproff(void *oh, chipcregs_t *cc, int woff)
 {
 	otpinfo_t *oi = (otpinfo_t *) oh;
-	osl_t *osh;
+	struct osl_info *osh;
 	volatile u16 *ptr;
 
 	ASSERT(woff >= (-((int)oi->size / 2)));
@@ -603,7 +605,7 @@
 	otpinfo_t *oi = (otpinfo_t *) oh;
 	uint k, row, col;
 	u32 otpp, st;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	osh = si_osh(oi->sih);
 	row = idx / 65;
@@ -636,7 +638,7 @@
 	otpinfo_t *oi;
 	u32 cap = 0, clkdiv, otpdiv = 0;
 	void *ret = NULL;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	oi = &otpinfo;
 
@@ -900,7 +902,7 @@
 	void *ret = NULL;
 
 	oi = &otpinfo;
-	bzero(oi, sizeof(otpinfo_t));
+	memset(oi, 0, sizeof(otpinfo_t));
 
 	oi->ccrev = sih->ccrev;
 
diff --git a/drivers/staging/brcm80211/util/bcmsrom.c b/drivers/staging/brcm80211/util/bcmsrom.c
index 1282ef7..19d4502 100644
--- a/drivers/staging/brcm80211/util/bcmsrom.c
+++ b/drivers/staging/brcm80211/util/bcmsrom.c
@@ -15,9 +15,11 @@
  */
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/etherdevice.h>
 #include <bcmdefs.h>
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <stdarg.h>
 #include <bcmutils.h>
 #include <hndsoc.h>
@@ -66,29 +68,30 @@
 
 #define SROM_CIS_SINGLE	1
 
-static int initvars_srom_si(si_t *sih, osl_t *osh, void *curmap, char **vars,
-			    uint *count);
+static int initvars_srom_si(si_t *sih, struct osl_info *osh, void *curmap,
+			    char **vars, uint *count);
 static void _initvars_srom_pci(u8 sromrev, u16 *srom, uint off,
 			       varbuf_t *b);
 static int initvars_srom_pci(si_t *sih, void *curmap, char **vars,
 			     uint *count);
 static int initvars_flash_si(si_t *sih, char **vars, uint *count);
 #ifdef BCMSDIO
-static int initvars_cis_sdio(osl_t *osh, char **vars, uint *count);
-static int sprom_cmd_sdio(osl_t *osh, u8 cmd);
-static int sprom_read_sdio(osl_t *osh, u16 addr, u16 *data);
+static int initvars_cis_sdio(struct osl_info *osh, char **vars, uint *count);
+static int sprom_cmd_sdio(struct osl_info *osh, u8 cmd);
+static int sprom_read_sdio(struct osl_info *osh, u16 addr, u16 *data);
 #endif				/* BCMSDIO */
-static int sprom_read_pci(osl_t *osh, si_t *sih, u16 *sprom, uint wordoff,
-			  u16 *buf, uint nwords, bool check_crc);
+static int sprom_read_pci(struct osl_info *osh, si_t *sih, u16 *sprom,
+			  uint wordoff, u16 *buf, uint nwords, bool check_crc);
 #if defined(BCMNVRAMR)
-static int otp_read_pci(osl_t *osh, si_t *sih, u16 *buf, uint bufsz);
+static int otp_read_pci(struct osl_info *osh, si_t *sih, u16 *buf, uint bufsz);
 #endif
-static u16 srom_cc_cmd(si_t *sih, osl_t *osh, void *ccregs, u32 cmd,
+static u16 srom_cc_cmd(si_t *sih, struct osl_info *osh, void *ccregs, u32 cmd,
 			  uint wordoff, u16 data);
 
-static int initvars_table(osl_t *osh, char *start, char *end, char **vars,
-			  uint *count);
-static int initvars_flash(si_t *sih, osl_t *osh, char **vp, uint len);
+static int initvars_table(struct osl_info *osh, char *start, char *end,
+			  char **vars, uint *count);
+static int initvars_flash(si_t *sih, struct osl_info *osh, char **vp,
+			  uint len);
 
 /* Initialization of varbuf structure */
 static void varbuf_init(varbuf_t *b, char *buf, uint size)
@@ -129,7 +132,7 @@
 	if (s != NULL) {
 		len = (size_t) (s - b->buf);
 		for (s = b->base; s < b->buf;) {
-			if ((bcmp(s, b->buf, len) == 0) && s[len] == '=') {
+			if ((memcmp(s, b->buf, len) == 0) && s[len] == '=') {
 				len = strlen(s) + 1;
 				memmove(s, (s + len),
 					((b->buf + r + 1) - (s + len)));
@@ -155,21 +158,21 @@
  * Initialize local vars from the right source for this platform.
  * Return 0 on success, nonzero on error.
  */
-int srom_var_init(si_t *sih, uint bustype, void *curmap, osl_t *osh,
+int srom_var_init(si_t *sih, uint bustype, void *curmap, struct osl_info *osh,
 		  char **vars, uint *count)
 {
 	uint len;
 
 	len = 0;
 
-	ASSERT(bustype == BUSTYPE(bustype));
+	ASSERT(bustype == bustype);
 	if (vars == NULL || count == NULL)
 		return 0;
 
 	*vars = NULL;
 	*count = 0;
 
-	switch (BUSTYPE(bustype)) {
+	switch (bustype) {
 	case SI_BUS:
 	case JTAG_BUS:
 		return initvars_srom_si(sih, osh, curmap, vars, count);
@@ -194,7 +197,7 @@
 
 /* support only 16-bit word read from srom */
 int
-srom_read(si_t *sih, uint bustype, void *curmap, osl_t *osh,
+srom_read(si_t *sih, uint bustype, void *curmap, struct osl_info *osh,
 	  uint byteoff, uint nbytes, u16 *buf, bool check_crc)
 {
 	uint off, nw;
@@ -202,7 +205,7 @@
 	uint i;
 #endif				/* BCMSDIO */
 
-	ASSERT(bustype == BUSTYPE(bustype));
+	ASSERT(bustype == bustype);
 
 	/* check input - 16-bit access only */
 	if (byteoff & 1 || nbytes & 1 || (byteoff + nbytes) > SROM_MAX)
@@ -211,7 +214,7 @@
 	off = byteoff / 2;
 	nw = nbytes / 2;
 
-	if (BUSTYPE(bustype) == PCI_BUS) {
+	if (bustype == PCI_BUS) {
 		if (!curmap)
 			return 1;
 
@@ -233,7 +236,7 @@
 		}
 #endif
 #ifdef BCMSDIO
-	} else if (BUSTYPE(bustype) == SDIO_BUS) {
+	} else if (bustype == SDIO_BUS) {
 		off = byteoff / 2;
 		nw = nbytes / 2;
 		for (i = 0; i < nw; i++) {
@@ -242,7 +245,7 @@
 				return 1;
 		}
 #endif				/* BCMSDIO */
-	} else if (BUSTYPE(bustype) == SI_BUS) {
+	} else if (bustype == SI_BUS) {
 		return 1;
 	} else {
 		return 1;
@@ -376,7 +379,8 @@
 /* For dongle HW, accept partial calibration parameters */
 #define BCMDONGLECASE(n)
 
-int srom_parsecis(osl_t *osh, u8 *pcis[], uint ciscnt, char **vars, uint *count)
+int srom_parsecis(struct osl_info *osh, u8 *pcis[], uint ciscnt, char **vars,
+		  uint *count)
 {
 	char eabuf[32];
 	char *base;
@@ -402,7 +406,7 @@
 		return -2;
 
 	varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
-	bzero(base, MAXSZ_NVRAM_VARS);
+	memset(base, 0, MAXSZ_NVRAM_VARS);
 	eabuf[0] = '\0';
 	for (cisnum = 0; cisnum < ciscnt; cisnum++) {
 		cis = *pcis++;
@@ -496,12 +500,12 @@
 					break;
 				default:
 					/* set macaddr if HNBU_MACADDR not seen yet */
-					if (eabuf[0] == '\0'
-					    && cis[i] == LAN_NID
-					    && !(ETHER_ISNULLADDR(&cis[i + 2]))
-					    && !(ETHER_ISMULTI(&cis[i + 2]))) {
+					if (eabuf[0] == '\0' &&
+					    cis[i] == LAN_NID &&
+					    !is_zero_ether_addr(&cis[i + 2]) &&
+					    !is_multicast_ether_addr(&cis[i + 2])) {
 						ASSERT(cis[i + 1] ==
-						       ETHER_ADDR_LEN);
+						       ETH_ALEN);
 						snprintf(eabuf, sizeof(eabuf),
 							"%pM", &cis[i + 2]);
 
@@ -970,8 +974,8 @@
 					break;
 
 				case HNBU_MACADDR:
-					if (!(ETHER_ISNULLADDR(&cis[i + 1])) &&
-					    !(ETHER_ISMULTI(&cis[i + 1]))) {
+					if (!is_zero_ether_addr(&cis[i + 1]) &&
+					    !is_multicast_ether_addr(&cis[i + 1])) {
 						snprintf(eabuf, sizeof(eabuf),
 							"%pM", &cis[i + 1]);
 
@@ -1405,8 +1409,8 @@
  * not in the bus cores.
  */
 static u16
-srom_cc_cmd(si_t *sih, osl_t *osh, void *ccregs, u32 cmd, uint wordoff,
-	    u16 data)
+srom_cc_cmd(si_t *sih, struct osl_info *osh, void *ccregs, u32 cmd,
+	    uint wordoff, u16 data)
 {
 	chipcregs_t *cc = (chipcregs_t *) ccregs;
 	uint wait_cnt = 1000;
@@ -1439,7 +1443,7 @@
  * Return 0 on success, nonzero on error.
  */
 static int
-sprom_read_pci(osl_t *osh, si_t *sih, u16 *sprom, uint wordoff,
+sprom_read_pci(struct osl_info *osh, si_t *sih, u16 *sprom, uint wordoff,
 	       u16 *buf, uint nwords, bool check_crc)
 {
 	int err = 0;
@@ -1499,7 +1503,7 @@
 }
 
 #if defined(BCMNVRAMR)
-static int otp_read_pci(osl_t *osh, si_t *sih, u16 *buf, uint bufsz)
+static int otp_read_pci(struct osl_info *osh, si_t *sih, u16 *buf, uint bufsz)
 {
 	u8 *otp;
 	uint sz = OTP_SZ_MAX / 2;	/* size in words */
@@ -1547,8 +1551,8 @@
 * Create variable table from memory.
 * Return 0 on success, nonzero on error.
 */
-static int initvars_table(osl_t *osh, char *start, char *end, char **vars,
-			  uint *count)
+static int initvars_table(struct osl_info *osh, char *start, char *end,
+			  char **vars, uint *count)
 {
 	int c = (int)(end - start);
 
@@ -1574,7 +1578,8 @@
  * of the table upon enter and to the end of the table upon exit when success.
  * Return 0 on success, nonzero on error.
  */
-static int initvars_flash(si_t *sih, osl_t *osh, char **base, uint len)
+static int initvars_flash(si_t *sih, struct osl_info *osh, char **base,
+			  uint len)
 {
 	char *vp = *base;
 	char *flash;
@@ -1634,7 +1639,7 @@
  */
 static int initvars_flash_si(si_t *sih, char **vars, uint *count)
 {
-	osl_t *osh = si_osh(sih);
+	struct osl_info *osh = si_osh(sih);
 	char *vp, *base;
 	int err;
 
@@ -1845,7 +1850,7 @@
 	u32 sr;
 	varbuf_t b;
 	char *vp, *base = NULL;
-	osl_t *osh = si_osh(sih);
+	struct osl_info *osh = si_osh(sih);
 	bool flash = false;
 	int err = 0;
 
@@ -1986,7 +1991,7 @@
  * Read the SDIO cis and call parsecis to initialize the vars.
  * Return 0 on success, nonzero on error.
  */
-static int initvars_cis_sdio(osl_t *osh, char **vars, uint *count)
+static int initvars_cis_sdio(struct osl_info *osh, char **vars, uint *count)
 {
 	u8 *cis[SBSDIO_NUM_FUNCTION + 1];
 	uint fn, numfn;
@@ -2020,7 +2025,7 @@
 }
 
 /* set SDIO sprom command register */
-static int sprom_cmd_sdio(osl_t *osh, u8 cmd)
+static int sprom_cmd_sdio(struct osl_info *osh, u8 cmd)
 {
 	u8 status = 0;
 	uint wait_cnt = 1000;
@@ -2040,7 +2045,7 @@
 }
 
 /* read a word from the SDIO srom */
-static int sprom_read_sdio(osl_t *osh, u16 addr, u16 *data)
+static int sprom_read_sdio(struct osl_info *osh, u16 addr, u16 *data)
 {
 	u8 addr_l, addr_h, data_l, data_h;
 
@@ -2068,8 +2073,8 @@
 }
 #endif				/* BCMSDIO */
 
-static int initvars_srom_si(si_t *sih, osl_t *osh, void *curmap, char **vars,
-			    uint *varsz)
+static int initvars_srom_si(si_t *sih, struct osl_info *osh, void *curmap,
+			    char **vars, uint *varsz)
 {
 	/* Search flash nvram section for srom variables */
 	return initvars_flash_si(sih, vars, varsz);
diff --git a/drivers/staging/brcm80211/util/bcmutils.c b/drivers/staging/brcm80211/util/bcmutils.c
index 9789ea4..fd30cc6 100644
--- a/drivers/staging/brcm80211/util/bcmutils.c
+++ b/drivers/staging/brcm80211/util/bcmutils.c
@@ -19,8 +19,10 @@
 #include <linux/string.h>
 #include <bcmdefs.h>
 #include <stdarg.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
 #include <osl.h>
-#include <linuxver.h>
 #include <bcmutils.h>
 #include <siutils.h>
 #include <bcmnvram.h>
@@ -30,26 +32,26 @@
 #include <proto/802.1d.h>
 #include <proto/802.11.h>
 
-
 /* copy a buffer into a pkt buffer chain */
-uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, unsigned char *buf)
+uint pktfrombuf(struct osl_info *osh, struct sk_buff *p, uint offset, int len,
+		unsigned char *buf)
 {
 	uint n, ret = 0;
 
 	/* skip 'offset' bytes */
-	for (; p && offset; p = PKTNEXT(p)) {
-		if (offset < (uint) PKTLEN(p))
+	for (; p && offset; p = p->next) {
+		if (offset < (uint) (p->len))
 			break;
-		offset -= PKTLEN(p);
+		offset -= p->len;
 	}
 
 	if (!p)
 		return 0;
 
 	/* copy the data */
-	for (; p && len; p = PKTNEXT(p)) {
-		n = min((uint) PKTLEN(p) - offset, (uint) len);
-		bcopy(buf, PKTDATA(p) + offset, n);
+	for (; p && len; p = p->next) {
+		n = min((uint) (p->len) - offset, (uint) len);
+		bcopy(buf, p->data + offset, n);
 		buf += n;
 		len -= n;
 		ret += n;
@@ -59,13 +61,13 @@
 	return ret;
 }
 /* return total length of buffer chain */
-uint BCMFASTPATH pkttotlen(osl_t *osh, void *p)
+uint BCMFASTPATH pkttotlen(struct osl_info *osh, struct sk_buff *p)
 {
 	uint total;
 
 	total = 0;
-	for (; p; p = PKTNEXT(p))
-		total += PKTLEN(p);
+	for (; p; p = p->next)
+		total += p->len;
 	return total;
 }
 
@@ -73,12 +75,13 @@
  * osl multiple-precedence packet queue
  * hi_prec is always >= the number of the highest non-empty precedence
  */
-void *BCMFASTPATH pktq_penq(struct pktq *pq, int prec, void *p)
+struct sk_buff *BCMFASTPATH pktq_penq(struct pktq *pq, int prec,
+				      struct sk_buff *p)
 {
 	struct pktq_prec *q;
 
 	ASSERT(prec >= 0 && prec < pq->num_prec);
-	ASSERT(PKTLINK(p) == NULL);	/* queueing chains not allowed */
+	ASSERT(p->prev == NULL);	/* queueing chains not allowed */
 
 	ASSERT(!pktq_full(pq));
 	ASSERT(!pktq_pfull(pq, prec));
@@ -86,7 +89,7 @@
 	q = &pq->q[prec];
 
 	if (q->head)
-		PKTSETLINK(q->tail, p);
+		q->tail->prev = p;
 	else
 		q->head = p;
 
@@ -101,12 +104,13 @@
 	return p;
 }
 
-void *BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec, void *p)
+struct sk_buff *BCMFASTPATH pktq_penq_head(struct pktq *pq, int prec,
+					   struct sk_buff *p)
 {
 	struct pktq_prec *q;
 
 	ASSERT(prec >= 0 && prec < pq->num_prec);
-	ASSERT(PKTLINK(p) == NULL);	/* queueing chains not allowed */
+	ASSERT(p->prev == NULL);	/* queueing chains not allowed */
 
 	ASSERT(!pktq_full(pq));
 	ASSERT(!pktq_pfull(pq, prec));
@@ -116,7 +120,7 @@
 	if (q->head == NULL)
 		q->tail = p;
 
-	PKTSETLINK(p, q->head);
+	p->prev = q->head;
 	q->head = p;
 	q->len++;
 
@@ -128,10 +132,10 @@
 	return p;
 }
 
-void *BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec)
+struct sk_buff *BCMFASTPATH pktq_pdeq(struct pktq *pq, int prec)
 {
 	struct pktq_prec *q;
-	void *p;
+	struct sk_buff *p;
 
 	ASSERT(prec >= 0 && prec < pq->num_prec);
 
@@ -141,7 +145,7 @@
 	if (p == NULL)
 		return NULL;
 
-	q->head = PKTLINK(p);
+	q->head = p->prev;
 	if (q->head == NULL)
 		q->tail = NULL;
 
@@ -149,15 +153,15 @@
 
 	pq->len--;
 
-	PKTSETLINK(p, NULL);
+	p->prev = NULL;
 
 	return p;
 }
 
-void *BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec)
+struct sk_buff *BCMFASTPATH pktq_pdeq_tail(struct pktq *pq, int prec)
 {
 	struct pktq_prec *q;
-	void *p, *prev;
+	struct sk_buff *p, *prev;
 
 	ASSERT(prec >= 0 && prec < pq->num_prec);
 
@@ -167,11 +171,11 @@
 	if (p == NULL)
 		return NULL;
 
-	for (prev = NULL; p != q->tail; p = PKTLINK(p))
+	for (prev = NULL; p != q->tail; p = p->prev)
 		prev = p;
 
 	if (prev)
-		PKTSETLINK(prev, NULL);
+		prev->prev = NULL;
 	else
 		q->head = NULL;
 
@@ -184,17 +188,17 @@
 }
 
 #ifdef BRCM_FULLMAC
-void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
+void pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec, bool dir)
 {
 	struct pktq_prec *q;
-	void *p;
+	struct sk_buff *p;
 
 	q = &pq->q[prec];
 	p = q->head;
 	while (p) {
-		q->head = PKTLINK(p);
-		PKTSETLINK(p, NULL);
-		PKTFREE(osh, p, dir);
+		q->head = p->prev;
+		p->prev = NULL;
+		pkt_buf_free_skb(osh, p, dir);
 		q->len--;
 		pq->len--;
 		p = q->head;
@@ -203,7 +207,7 @@
 	q->tail = NULL;
 }
 
-void pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
+void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir)
 {
 	int prec;
 	for (prec = 0; prec < pq->num_prec; prec++)
@@ -212,11 +216,11 @@
 }
 #else /* !BRCM_FULLMAC */
 void
-pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn,
-	    int arg)
+pktq_pflush(struct osl_info *osh, struct pktq *pq, int prec, bool dir,
+	    ifpkt_cb_t fn, int arg)
 {
 	struct pktq_prec *q;
-	void *p, *prev = NULL;
+	struct sk_buff *p, *prev = NULL;
 
 	q = &pq->q[prec];
 	p = q->head;
@@ -224,17 +228,17 @@
 		if (fn == NULL || (*fn) (p, arg)) {
 			bool head = (p == q->head);
 			if (head)
-				q->head = PKTLINK(p);
+				q->head = p->prev;
 			else
-				PKTSETLINK(prev, PKTLINK(p));
-			PKTSETLINK(p, NULL);
-			PKTFREE(osh, p, dir);
+				prev->prev = p->prev;
+			p->prev = NULL;
+			pkt_buf_free_skb(osh, p, dir);
 			q->len--;
 			pq->len--;
-			p = (head ? q->head : PKTLINK(prev));
+			p = (head ? q->head : prev->prev);
 		} else {
 			prev = p;
-			p = PKTLINK(p);
+			p = p->prev;
 		}
 	}
 
@@ -244,7 +248,8 @@
 	}
 }
 
-void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+void pktq_flush(struct osl_info *osh, struct pktq *pq, bool dir,
+		ifpkt_cb_t fn, int arg)
 {
 	int prec;
 	for (prec = 0; prec < pq->num_prec; prec++)
@@ -261,7 +266,7 @@
 	ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
 
 	/* pq is variable size; only zero out what's requested */
-	bzero(pq,
+	memset(pq, 0,
 	      offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
 
 	pq->num_prec = (u16) num_prec;
@@ -272,7 +277,7 @@
 		pq->q[prec].max = pq->max;
 }
 
-void *pktq_peek_tail(struct pktq *pq, int *prec_out)
+struct sk_buff *pktq_peek_tail(struct pktq *pq, int *prec_out)
 {
 	int prec;
 
@@ -303,10 +308,11 @@
 	return len;
 }
 /* Priority dequeue from a specific set of precedences */
-void *BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+struct sk_buff *BCMFASTPATH pktq_mdeq(struct pktq *pq, uint prec_bmp,
+				      int *prec_out)
 {
 	struct pktq_prec *q;
-	void *p;
+	struct sk_buff *p;
 	int prec;
 
 	if (pq->len == 0)
@@ -325,7 +331,7 @@
 	if (p == NULL)
 		return NULL;
 
-	q->head = PKTLINK(p);
+	q->head = p->prev;
 	if (q->head == NULL)
 		q->tail = NULL;
 
@@ -336,7 +342,7 @@
 
 	pq->len--;
 
-	PKTSETLINK(p, NULL);
+	p->prev = NULL;
 
 	return p;
 }
@@ -373,7 +379,7 @@
 
 	/* first look in vars[] */
 	for (s = vars; s && *s;) {
-		if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+		if ((memcmp(s, name, len) == 0) && (s[len] == '='))
 			return &s[len + 1];
 
 		while (*s++)
@@ -404,15 +410,15 @@
 
 #if defined(BCMDBG)
 /* pretty hex print a pkt buffer chain */
-void prpkt(const char *msg, osl_t *osh, void *p0)
+void prpkt(const char *msg, struct osl_info *osh, struct sk_buff *p0)
 {
-	void *p;
+	struct sk_buff *p;
 
 	if (msg && (msg[0] != '\0'))
 		printf("%s:\n", msg);
 
-	for (p = p0; p; p = PKTNEXT(p))
-		prhex(NULL, PKTDATA(p), PKTLEN(p));
+	for (p = p0; p; p = p->next)
+		prhex(NULL, p->data, p->len);
 }
 #endif				/* defined(BCMDBG) */
 
diff --git a/drivers/staging/brcm80211/util/bcmwifi.c b/drivers/staging/brcm80211/util/bcmwifi.c
index 1bb6c78..81e54bd 100644
--- a/drivers/staging/brcm80211/util/bcmwifi.c
+++ b/drivers/staging/brcm80211/util/bcmwifi.c
@@ -15,6 +15,10 @@
  */
 #include <linux/ctype.h>
 #include <linux/kernel.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
+#include <osl.h>
 #include <bcmdefs.h>
 #include <bcmutils.h>
 #include <bcmwifi.h>
diff --git a/drivers/staging/brcm80211/util/hnddma.c b/drivers/staging/brcm80211/util/hnddma.c
index fe503e7..d088692 100644
--- a/drivers/staging/brcm80211/util/hnddma.c
+++ b/drivers/staging/brcm80211/util/hnddma.c
@@ -16,7 +16,8 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linuxver.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
 #include <bcmdefs.h>
 #include <bcmdevs.h>
 #include <osl.h>
@@ -28,6 +29,10 @@
 #include <sbhnddma.h>
 #include <hnddma.h>
 
+#if defined(__mips__)
+#include <asm/addrspace.h>
+#endif
+
 /* debug/trace */
 #ifdef BCMDBG
 #define	DMA_ERROR(args) \
@@ -68,11 +73,12 @@
 
 #define	DI_INFO(dmah)	((dma_info_t *)dmah)
 
+#define R_SM(r)		(*(r))
+#define W_SM(r, v)	(*(r) = (v))
+
 /* dma engine software state */
 typedef struct dma_info {
-	struct hnddma_pub hnddma;	/* exported structure, don't use hnddma_t,
-					 * which could be const
-					 */
+	struct hnddma_pub hnddma; /* exported structure */
 	uint *msg_level;	/* message level pointer */
 	char name[MAXNAMEL];	/* callers name for diag msgs */
 
@@ -222,7 +228,7 @@
 static void _dma_fifoloopbackenable(dma_info_t *di);
 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
 static u8 dma_align_sizetobits(uint size);
-static void *dma_ringalloc(osl_t *osh, u32 boundary, uint size,
+static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
 			   u16 *alignbits, uint *alloced,
 			   dmaaddr_t *descpa, osldma_t **dmah);
 
@@ -231,7 +237,7 @@
 static bool dma32_txreset(dma_info_t *di);
 static bool dma32_rxreset(dma_info_t *di);
 static bool dma32_txsuspendedidle(dma_info_t *di);
-static int dma32_txfast(dma_info_t *di, void *p0, bool commit);
+static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
 static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range);
 static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
 static void dma32_txrotate(dma_info_t *di);
@@ -246,14 +252,14 @@
 static bool dma32_rxstopped(dma_info_t *di);
 static bool dma32_rxenabled(dma_info_t *di);
 
-static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs);
+static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs);
 
 /* Prototypes for 64-bit routines */
 static bool dma64_alloc(dma_info_t *di, uint direction);
 static bool dma64_txreset(dma_info_t *di);
 static bool dma64_rxreset(dma_info_t *di);
 static bool dma64_txsuspendedidle(dma_info_t *di);
-static int dma64_txfast(dma_info_t *di, void *p0, bool commit);
+static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
 static void *dma64_getpos(dma_info_t *di, bool direction);
 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
@@ -270,7 +276,7 @@
 static bool dma64_txstopped(dma_info_t *di);
 static bool dma64_rxstopped(dma_info_t *di);
 static bool dma64_rxenabled(dma_info_t *di);
-static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs);
+static bool _dma64_addrext(struct osl_info *osh, dma64regs_t *dma64regs);
 
 static inline u32 parity32(u32 data);
 
@@ -368,10 +374,10 @@
 	39
 };
 
-hnddma_t *dma_attach(osl_t *osh, char *name, si_t *sih, void *dmaregstx,
-		     void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize,
-		     int rxextheadroom, uint nrxpost, uint rxoffset,
-		     uint *msg_level)
+struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
+		     void *dmaregstx, void *dmaregsrx, uint ntxd,
+		     uint nrxd, uint rxbufsize, int rxextheadroom,
+		     uint nrxpost, uint rxoffset, uint *msg_level)
 {
 	dma_info_t *di;
 	uint size;
@@ -570,7 +576,7 @@
 		}
 	}
 
-	return (hnddma_t *) di;
+	return (struct hnddma_pub *) di;
 
  fail:
 	_dma_detach(di);
@@ -663,7 +669,7 @@
 	}
 }
 
-static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs)
+static bool _dma32_addrext(struct osl_info *osh, dma32regs_t *dma32regs)
 {
 	u32 w;
 
@@ -902,7 +908,7 @@
 
 	/* clear rx descriptor ring */
 	if (DMA64_ENAB(di) && DMA64_MODE(di)) {
-		BZERO_SM((void *)di->rxd64,
+		memset((void *)di->rxd64, '\0',
 			 (di->nrxd * sizeof(dma64dd_t)));
 
 		/* DMA engine with out alignment requirement requires table to be inited
@@ -916,7 +922,7 @@
 		if (di->aligndesc_4k)
 			_dma_ddtable_init(di, DMA_RX, di->rxdpa);
 	} else if (DMA32_ENAB(di)) {
-		BZERO_SM((void *)di->rxd32,
+		memset((void *)di->rxd32, '\0',
 			 (di->nrxd * sizeof(dma32dd_t)));
 		_dma_rxenable(di);
 		_dma_ddtable_init(di, DMA_RX, di->rxdpa);
@@ -978,7 +984,7 @@
  */
 static void *BCMFASTPATH _dma_rx(dma_info_t *di)
 {
-	void *p, *head, *tail;
+	struct sk_buff *p, *head, *tail;
 	uint len;
 	uint pkt_len;
 	int resid = 0;
@@ -988,30 +994,31 @@
 	if (head == NULL)
 		return NULL;
 
-	len = ltoh16(*(u16 *) (PKTDATA(head)));
+	len = ltoh16(*(u16 *) (head->data));
 	DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
 
 #if defined(__mips__)
+#define OSL_UNCACHED(va)        ((void *)KSEG1ADDR((va)))
 	if (!len) {
-		while (!(len = *(u16 *) OSL_UNCACHED(PKTDATA(head))))
+		while (!(len = *(u16 *) OSL_UNCACHED(head->data)))
 			udelay(1);
 
-		*(u16 *) PKTDATA(head) = htol16((u16) len);
+		*(u16 *) (head->data) = htol16((u16) len);
 	}
 #endif				/* defined(__mips__) */
 
 	/* set actual length */
 	pkt_len = min((di->rxoffset + len), di->rxbufsize);
-	PKTSETLEN(head, pkt_len);
+	__skb_trim(head, pkt_len);
 	resid = len - (di->rxbufsize - di->rxoffset);
 
 	/* check for single or multi-buffer rx */
 	if (resid > 0) {
 		tail = head;
 		while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
-			PKTSETNEXT(tail, p);
+			tail->next = p;
 			pkt_len = min(resid, (int)di->rxbufsize);
-			PKTSETLEN(p, pkt_len);
+			__skb_trim(p, pkt_len);
 
 			tail = p;
 			resid -= di->rxbufsize;
@@ -1037,7 +1044,7 @@
 		if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
 			DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
 				   di->name, len));
-			PKTFREE(di->osh, head, false);
+			pkt_buf_free_skb(di->osh, head, false);
 			di->hnddma.rxgiants++;
 			goto next_frame;
 		}
@@ -1053,7 +1060,7 @@
  */
 static bool BCMFASTPATH _dma_rxfill(dma_info_t *di)
 {
-	void *p;
+	struct sk_buff *p;
 	u16 rxin, rxout;
 	u32 flags = 0;
 	uint n;
@@ -1085,7 +1092,7 @@
 		   size to be allocated
 		 */
 
-		p = osl_pktget(di->osh, di->rxbufsize + extra_offset);
+		p = pkt_buf_get_skb(di->osh, di->rxbufsize + extra_offset);
 
 		if (p == NULL) {
 			DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
@@ -1109,17 +1116,18 @@
 		}
 		/* reserve an extra headroom, if applicable */
 		if (extra_offset)
-			PKTPULL(p, extra_offset);
+			skb_pull(p, extra_offset);
 
 		/* Do a cached write instead of uncached write since DMA_MAP
 		 * will flush the cache.
 		 */
-		*(u32 *) (PKTDATA(p)) = 0;
+		*(u32 *) (p->data) = 0;
 
 		if (DMASGLIST_ENAB)
-			bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t));
+			memset(&di->rxp_dmah[rxout], 0,
+				sizeof(hnddma_seg_map_t));
 
-		pa = DMA_MAP(di->osh, PKTDATA(p),
+		pa = DMA_MAP(di->osh, p->data,
 			     di->rxbufsize, DMA_RX, p, &di->rxp_dmah[rxout]);
 
 		ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
@@ -1220,15 +1228,10 @@
 {
 	void *p;
 
-	/* "unused local" warning suppression for OSLs that
-	 * define PKTFREE() without using the di->osh arg
-	 */
-	di = di;
-
 	DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
 
 	while ((p = _dma_getnextrxp(di, true)))
-		PKTFREE(di->osh, p, false);
+		pkt_buf_free_skb(di->osh, p, false);
 }
 
 static void *BCMFASTPATH _dma_getnextrxp(dma_info_t *di, bool forceall)
@@ -1372,7 +1375,7 @@
 	return 0;
 }
 
-void dma_txpioloopback(osl_t *osh, dma32regs_t *regs)
+void dma_txpioloopback(struct osl_info *osh, dma32regs_t *regs)
 {
 	OR_REG(osh, &regs->control, XC_LE);
 }
@@ -1395,7 +1398,7 @@
  * descriptor ring size aligned location. This will ensure that the ring will
  * not cross page boundary
  */
-static void *dma_ringalloc(osl_t *osh, u32 boundary, uint size,
+static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
 			   u16 *alignbits, uint *alloced,
 			   dmaaddr_t *descpa, osldma_t **dmah)
 {
@@ -1434,7 +1437,7 @@
 	di->hnddma.txavail = di->ntxd - 1;
 
 	/* clear tx descriptor ring */
-	BZERO_SM((void *)di->txd32, (di->ntxd * sizeof(dma32dd_t)));
+	memset((void *)di->txd32, '\0', (di->ntxd * sizeof(dma32dd_t)));
 
 	if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
 		control |= XC_PD;
@@ -1491,7 +1494,7 @@
 		return;
 
 	while ((p = dma32_getnexttxp(di, range)))
-		PKTFREE(di->osh, p, true);
+		pkt_buf_free_skb(di->osh, p, true);
 }
 
 static bool dma32_txstopped(dma_info_t *di)
@@ -1651,9 +1654,9 @@
  * WARNING: call must check the return value for error.
  *   the error(toss frames) could be fatal and cause many subsequent hard to debug problems
  */
-static int dma32_txfast(dma_info_t *di, void *p0, bool commit)
+static int dma32_txfast(dma_info_t *di, struct sk_buff *p0, bool commit)
 {
-	void *p, *next;
+	struct sk_buff *p, *next;
 	unsigned char *data;
 	uint len;
 	u16 txout;
@@ -1672,12 +1675,12 @@
 		uint nsegs, j;
 		hnddma_seg_map_t *map;
 
-		data = PKTDATA(p);
-		len = PKTLEN(p);
+		data = p->data;
+		len = p->len;
 #ifdef BCM_DMAPAD
 		len += PKTDMAPAD(di->osh, p);
 #endif
-		next = PKTNEXT(p);
+		next = p->next;
 
 		/* return nonzero if out of tx descriptors */
 		if (NEXTTXD(txout) == di->txin)
@@ -1687,7 +1690,8 @@
 			continue;
 
 		if (DMASGLIST_ENAB)
-			bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
+			memset(&di->txp_dmah[txout], 0,
+				sizeof(hnddma_seg_map_t));
 
 		/* get physical address of buffer start */
 		pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
@@ -1761,7 +1765,7 @@
 
  outoftxd:
 	DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
-	PKTFREE(di->osh, p0, true);
+	pkt_buf_free_skb(di->osh, p0, true);
 	di->hnddma.txavail = 0;
 	di->hnddma.txnobuf++;
 	return -1;
@@ -1959,7 +1963,7 @@
 		if (DMASGLIST_ENAB) {
 			bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
 			      sizeof(hnddma_seg_map_t));
-			bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
+			memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
 		}
 
 		di->txp[old] = NULL;
@@ -1989,7 +1993,7 @@
 	di->hnddma.txavail = di->ntxd - 1;
 
 	/* clear tx descriptor ring */
-	BZERO_SM((void *)di->txd64, (di->ntxd * sizeof(dma64dd_t)));
+	memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
 
 	/* DMA engine with out alignment requirement requires table to be inited
 	 * before enabling the engine
@@ -2060,7 +2064,7 @@
 	while ((p = dma64_getnexttxp(di, range))) {
 		/* For unframed data, we don't have any packets to free */
 		if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
-			PKTFREE(di->osh, p, true);
+			pkt_buf_free_skb(di->osh, p, true);
 	}
 }
 
@@ -2300,9 +2304,10 @@
  * WARNING: call must check the return value for error.
  *   the error(toss frames) could be fatal and cause many subsequent hard to debug problems
  */
-static int BCMFASTPATH dma64_txfast(dma_info_t *di, void *p0, bool commit)
+static int BCMFASTPATH dma64_txfast(dma_info_t *di, struct sk_buff *p0,
+				    bool commit)
 {
-	void *p, *next;
+	struct sk_buff *p, *next;
 	unsigned char *data;
 	uint len;
 	u16 txout;
@@ -2321,12 +2326,12 @@
 		uint nsegs, j;
 		hnddma_seg_map_t *map;
 
-		data = PKTDATA(p);
-		len = PKTLEN(p);
+		data = p->data;
+		len = p->len;
 #ifdef BCM_DMAPAD
 		len += PKTDMAPAD(di->osh, p);
 #endif				/* BCM_DMAPAD */
-		next = PKTNEXT(p);
+		next = p->next;
 
 		/* return nonzero if out of tx descriptors */
 		if (NEXTTXD(txout) == di->txin)
@@ -2337,7 +2342,8 @@
 
 		/* get physical address of buffer start */
 		if (DMASGLIST_ENAB)
-			bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t));
+			memset(&di->txp_dmah[txout], 0,
+				sizeof(hnddma_seg_map_t));
 
 		pa = DMA_MAP(di->osh, data, len, DMA_TX, p,
 			     &di->txp_dmah[txout]);
@@ -2409,7 +2415,7 @@
 
  outoftxd:
 	DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
-	PKTFREE(di->osh, p0, true);
+	pkt_buf_free_skb(di->osh, p0, true);
 	di->hnddma.txavail = 0;
 	di->hnddma.txnobuf++;
 	return -1;
@@ -2563,7 +2569,7 @@
 	return rxp;
 }
 
-static bool _dma64_addrext(osl_t *osh, dma64regs_t * dma64regs)
+static bool _dma64_addrext(struct osl_info *osh, dma64regs_t * dma64regs)
 {
 	u32 w;
 	OR_REG(osh, &dma64regs->control, D64_XC_AE);
@@ -2635,7 +2641,7 @@
 		if (DMASGLIST_ENAB) {
 			bcopy(&di->txp_dmah[old], &di->txp_dmah[new],
 			      sizeof(hnddma_seg_map_t));
-			bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t));
+			memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
 		}
 
 		di->txp[old] = NULL;
@@ -2654,7 +2660,7 @@
 uint dma_addrwidth(si_t *sih, void *dmaregs)
 {
 	dma32regs_t *dma32regs;
-	osl_t *osh;
+	struct osl_info *osh;
 
 	osh = si_osh(sih);
 
@@ -2664,8 +2670,8 @@
 		/* backplane are 64-bit capable */
 		if (si_backplane64(sih))
 			/* If bus is System Backplane or PCIE then we can access 64-bits */
-			if ((BUSTYPE(sih->bustype) == SI_BUS) ||
-			    ((BUSTYPE(sih->bustype) == PCI_BUS) &&
+			if ((sih->bustype == SI_BUS) ||
+			    ((sih->bustype == PCI_BUS) &&
 			     (sih->buscoretype == PCIE_CORE_ID)))
 				return DMADDRWIDTH_64;
 
@@ -2679,8 +2685,8 @@
 	dma32regs = (dma32regs_t *) dmaregs;
 
 	/* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
-	if ((BUSTYPE(sih->bustype) == SI_BUS) ||
-	    ((BUSTYPE(sih->bustype) == PCI_BUS)
+	if ((sih->bustype == SI_BUS) ||
+	    ((sih->bustype == PCI_BUS)
 	     && sih->buscoretype == PCIE_CORE_ID)
 	    || (_dma32_addrext(osh, dma32regs)))
 		return DMADDRWIDTH_32;
diff --git a/drivers/staging/brcm80211/util/hndpmu.c b/drivers/staging/brcm80211/util/hndpmu.c
index a8f3306..6cc59a8 100644
--- a/drivers/staging/brcm80211/util/hndpmu.c
+++ b/drivers/staging/brcm80211/util/hndpmu.c
@@ -13,9 +13,14 @@
  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
 #include <bcmdefs.h>
 #include <osl.h>
 #include <bcmutils.h>
@@ -40,23 +45,23 @@
 #define	PMU_NONE(args)
 
 /* PLL controls/clocks */
-static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc,
+static void si_pmu1_pllinit0(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
 			     u32 xtal);
-static u32 si_pmu1_cpuclk0(si_t *sih, osl_t *osh, chipcregs_t *cc);
-static u32 si_pmu1_alpclk0(si_t *sih, osl_t *osh, chipcregs_t *cc);
+static u32 si_pmu1_cpuclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc);
+static u32 si_pmu1_alpclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc);
 
 /* PMU resources */
 static bool si_pmu_res_depfltr_bb(si_t *sih);
 static bool si_pmu_res_depfltr_ncb(si_t *sih);
 static bool si_pmu_res_depfltr_paldo(si_t *sih);
 static bool si_pmu_res_depfltr_npaldo(si_t *sih);
-static u32 si_pmu_res_deps(si_t *sih, osl_t *osh, chipcregs_t *cc,
+static u32 si_pmu_res_deps(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
 			      u32 rsrcs, bool all);
-static uint si_pmu_res_uptime(si_t *sih, osl_t *osh, chipcregs_t *cc,
+static uint si_pmu_res_uptime(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
 			      u8 rsrc);
 static void si_pmu_res_masks(si_t *sih, u32 * pmin, u32 * pmax);
 static void si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc,
-				       osl_t *osh, u8 spuravoid);
+				       struct osl_info *osh, u8 spuravoid);
 
 static void si_pmu_set_4330_plldivs(si_t *sih);
 
@@ -101,7 +106,7 @@
 }
 
 /* Setup switcher voltage */
-void si_pmu_set_switcher_voltage(si_t *sih, osl_t *osh, u8 bb_voltage,
+void si_pmu_set_switcher_voltage(si_t *sih, struct osl_info *osh, u8 bb_voltage,
 				 u8 rf_voltage)
 {
 	chipcregs_t *cc;
@@ -124,14 +129,14 @@
 	si_setcoreidx(sih, origidx);
 }
 
-void si_pmu_set_ldo_voltage(si_t *sih, osl_t *osh, u8 ldo, u8 voltage)
+void si_pmu_set_ldo_voltage(si_t *sih, struct osl_info *osh, u8 ldo, u8 voltage)
 {
 	u8 sr_cntl_shift = 0, rc_shift = 0, shift = 0, mask = 0;
 	u8 addr = 0;
 
 	ASSERT(sih->cccaps & CC_CAP_PMU);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4336_CHIP_ID:
 		switch (ldo) {
 		case SET_LDO_VOLTAGE_CLDO_PWM:
@@ -182,7 +187,7 @@
 /* d11 slow to fast clock transition time in slow clock cycles */
 #define D11SCC_SLOW2FAST_TRANSITION	2
 
-u16 si_pmu_fast_pwrup_delay(si_t *sih, osl_t *osh)
+u16 si_pmu_fast_pwrup_delay(si_t *sih, struct osl_info *osh)
 {
 	uint delay = PMU_MAX_TRANSITION_DLY;
 	chipcregs_t *cc;
@@ -199,7 +204,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 	case BCM43421_CHIP_ID:
@@ -259,7 +264,7 @@
 	return (u16) delay;
 }
 
-u32 si_pmu_force_ilp(si_t *sih, osl_t *osh, bool force)
+u32 si_pmu_force_ilp(si_t *sih, struct osl_info *osh, bool force)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -599,7 +604,7 @@
 	rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
 
 	/* determine min/max rsrc masks */
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 	case BCM43421_CHIP_ID:
@@ -677,7 +682,7 @@
 }
 
 /* initialize PMU resources */
-void si_pmu_res_init(si_t *sih, osl_t *osh)
+void si_pmu_res_init(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -696,7 +701,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		/* Optimize resources up/down timers */
 		if (ISSIM_ENAB(sih)) {
@@ -1095,7 +1100,7 @@
 #ifdef BCMDBG
 	char chn[8];
 #endif
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		return pmu1_xtaltab0_880_4329;
 	case BCM4319_CHIP_ID:
@@ -1123,7 +1128,7 @@
 	char chn[8];
 #endif
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		/* Default to 38400Khz */
 		return &pmu1_xtaltab0_880_4329[PMU1_XTALTAB0_880_38400K];
@@ -1155,7 +1160,7 @@
 	char chn[8];
 #endif
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		return FVCO_880;
 	case BCM4319_CHIP_ID:
@@ -1178,7 +1183,7 @@
 
 /* query alp/xtal clock frequency */
 static u32
-si_pmu1_alpclk0(si_t *sih, osl_t *osh, chipcregs_t *cc)
+si_pmu1_alpclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc)
 {
 	const pmu1_xtaltab0_t *xt;
 	u32 xf;
@@ -1203,7 +1208,8 @@
  * case the xtal frequency is unknown to the s/w so we need to call
  * si_pmu1_xtaldef0() wherever it is needed to return a default value.
  */
-static void si_pmu1_pllinit0(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 xtal)
+static void si_pmu1_pllinit0(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
+			     u32 xtal)
 {
 	const pmu1_xtaltab0_t *xt;
 	u32 tmp;
@@ -1233,8 +1239,8 @@
 	 */
 	if ((((R_REG(osh, &cc->pmucontrol) & PCTL_XTALFREQ_MASK) >>
 	      PCTL_XTALFREQ_SHIFT) == xt->xf) &&
-	    !((CHIPID(sih->chip) == BCM4319_CHIP_ID)
-	      || (CHIPID(sih->chip) == BCM4330_CHIP_ID))) {
+	    !((sih->chip == BCM4319_CHIP_ID)
+	      || (sih->chip == BCM4330_CHIP_ID))) {
 		PMU_MSG(("PLL already programmed for %d.%d MHz\n",
 			 xt->fref / 1000, xt->fref % 1000));
 		return;
@@ -1244,7 +1250,7 @@
 	PMU_MSG(("Programming PLL for %d.%d MHz\n", xt->fref / 1000,
 		 xt->fref % 1000));
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		/* Change the BBPLL drive strength to 8 for all channels */
 		buf_strength = 0x888888;
@@ -1351,11 +1357,11 @@
 	      p2div << PMU1_PLL0_PC0_P2DIV_SHIFT) & PMU1_PLL0_PC0_P2DIV_MASK);
 	W_REG(osh, &cc->pllcontrol_data, tmp);
 
-	if ((CHIPID(sih->chip) == BCM4330_CHIP_ID))
+	if ((sih->chip == BCM4330_CHIP_ID))
 		si_pmu_set_4330_plldivs(sih);
 
-	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID)
-	    && (CHIPREV(sih->chiprev) == 0)) {
+	if ((sih->chip == BCM4329_CHIP_ID)
+	    && (sih->chiprev == 0)) {
 
 		W_REG(osh, &cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
 		tmp = R_REG(osh, &cc->pllcontrol_data);
@@ -1363,9 +1369,9 @@
 		tmp = tmp | DOT11MAC_880MHZ_CLK_DIVISOR_VAL;
 		W_REG(osh, &cc->pllcontrol_data, tmp);
 	}
-	if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
-	    (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
-	    (CHIPID(sih->chip) == BCM4330_CHIP_ID))
+	if ((sih->chip == BCM4319_CHIP_ID) ||
+	    (sih->chip == BCM4336_CHIP_ID) ||
+	    (sih->chip == BCM4330_CHIP_ID))
 		ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MFB;
 	else
 		ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MASH;
@@ -1407,7 +1413,7 @@
 	/* to operate the 4319 usb in 24MHz/48MHz; chipcontrol[2][84:83] needs
 	 * to be updated.
 	 */
-	if ((CHIPID(sih->chip) == BCM4319_CHIP_ID)
+	if ((sih->chip == BCM4319_CHIP_ID)
 	    && (xt->fref != XTAL_FREQ_30000MHZ)) {
 		W_REG(osh, &cc->chipcontrol_addr, PMU1_PLL0_CHIPCTL2);
 		tmp =
@@ -1436,8 +1442,8 @@
 		PCTL_ILP_DIV_MASK) |
 	    ((xt->xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK);
 
-	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID)
-	    && CHIPREV(sih->chiprev) == 0) {
+	if ((sih->chip == BCM4329_CHIP_ID)
+	    && sih->chiprev == 0) {
 		/* clear the htstretch before clearing HTReqEn */
 		AND_REG(osh, &cc->clkstretch, ~CSTRETCH_HT);
 		tmp &= ~PCTL_HT_REQ_EN;
@@ -1448,7 +1454,7 @@
 
 /* query the CPU clock frequency */
 static u32
-si_pmu1_cpuclk0(si_t *sih, osl_t *osh, chipcregs_t *cc)
+si_pmu1_cpuclk0(si_t *sih, struct osl_info *osh, chipcregs_t *cc)
 {
 	u32 tmp, m1div;
 #ifdef BCMDBG
@@ -1502,7 +1508,7 @@
 }
 
 /* initialize PLL */
-void si_pmu_pll_init(si_t *sih, osl_t *osh, uint xtalfreq)
+void si_pmu_pll_init(si_t *sih, struct osl_info *osh, uint xtalfreq)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -1517,7 +1523,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		if (xtalfreq == 0)
 			xtalfreq = 38400;
@@ -1555,7 +1561,7 @@
 }
 
 /* query alp/xtal clock frequency */
-u32 si_pmu_alp_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_alp_clock(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -1571,7 +1577,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 	case BCM43421_CHIP_ID:
@@ -1616,7 +1622,7 @@
  * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
  */
 static u32
-si_pmu5_clock(si_t *sih, osl_t *osh, chipcregs_t *cc, uint pll0,
+si_pmu5_clock(si_t *sih, struct osl_info *osh, chipcregs_t *cc, uint pll0,
 			  uint m) {
 	u32 tmp, div, ndiv, p1, p2, fc;
 
@@ -1631,7 +1637,7 @@
 		return 0;
 	}
 
-	if (CHIPID(sih->chip) == BCM5357_CHIP_ID) {
+	if (sih->chip == BCM5357_CHIP_ID) {
 		/* Detect failure in clock setting */
 		if ((R_REG(osh, &cc->chipstatus) & 0x40000) != 0) {
 			return 133 * 1000000;
@@ -1669,7 +1675,7 @@
 /* For designs that feed the same clock to both backplane
  * and CPU just return the CPU clock speed.
  */
-u32 si_pmu_si_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_si_clock(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -1685,7 +1691,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM43224_CHIP_ID:
 	case BCM43225_CHIP_ID:
 	case BCM43421_CHIP_ID:
@@ -1702,7 +1708,7 @@
 				  PMU5_MAINPLL_SI);
 		break;
 	case BCM4329_CHIP_ID:
-		if (CHIPREV(sih->chiprev) == 0)
+		if (sih->chiprev == 0)
 			clock = 38400 * 1000;
 		else
 			clock = si_pmu1_cpuclk0(sih, osh, cc);
@@ -1748,7 +1754,7 @@
 }
 
 /* query CPU clock frequency */
-u32 si_pmu_cpu_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_cpu_clock(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -1757,14 +1763,14 @@
 	ASSERT(sih->cccaps & CC_CAP_PMU);
 
 	if ((sih->pmurev >= 5) &&
-	    !((CHIPID(sih->chip) == BCM4329_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM43236_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM4330_CHIP_ID))) {
+	    !((sih->chip == BCM4329_CHIP_ID) ||
+	      (sih->chip == BCM4319_CHIP_ID) ||
+	      (sih->chip == BCM43236_CHIP_ID) ||
+	      (sih->chip == BCM4336_CHIP_ID) ||
+	      (sih->chip == BCM4330_CHIP_ID))) {
 		uint pll;
 
-		switch (CHIPID(sih->chip)) {
+		switch (sih->chip) {
 		case BCM5356_CHIP_ID:
 			pll = PMU5356_MAINPLL_PLL0;
 			break;
@@ -1792,7 +1798,7 @@
 }
 
 /* query memory clock frequency */
-u32 si_pmu_mem_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_mem_clock(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -1801,14 +1807,14 @@
 	ASSERT(sih->cccaps & CC_CAP_PMU);
 
 	if ((sih->pmurev >= 5) &&
-	    !((CHIPID(sih->chip) == BCM4329_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM4319_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM4330_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM4336_CHIP_ID) ||
-	      (CHIPID(sih->chip) == BCM43236_CHIP_ID))) {
+	    !((sih->chip == BCM4329_CHIP_ID) ||
+	      (sih->chip == BCM4319_CHIP_ID) ||
+	      (sih->chip == BCM4330_CHIP_ID) ||
+	      (sih->chip == BCM4336_CHIP_ID) ||
+	      (sih->chip == BCM43236_CHIP_ID))) {
 		uint pll;
 
-		switch (CHIPID(sih->chip)) {
+		switch (sih->chip) {
 		case BCM5356_CHIP_ID:
 			pll = PMU5356_MAINPLL_PLL0;
 			break;
@@ -1841,7 +1847,7 @@
 
 static u32 ilpcycles_per_sec;
 
-u32 si_pmu_ilp_clock(si_t *sih, osl_t *osh)
+u32 si_pmu_ilp_clock(si_t *sih, struct osl_info *osh)
 {
 	if (ISSIM_ENAB(sih))
 		return ILP_CLOCK;
@@ -1905,7 +1911,7 @@
 #define SDIOD_DRVSTR_KEY(chip, pmu)	(((chip) << 16) | (pmu))
 
 void
-si_sdiod_drive_strength_init(si_t *sih, osl_t *osh,
+si_sdiod_drive_strength_init(si_t *sih, struct osl_info *osh,
 					 u32 drivestrength) {
 	chipcregs_t *cc;
 	uint origidx, intr_val = 0;
@@ -1976,7 +1982,7 @@
 }
 
 /* initialize PMU */
-void si_pmu_init(si_t *sih, osl_t *osh)
+void si_pmu_init(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -1993,7 +1999,7 @@
 	else if (sih->pmurev >= 2)
 		OR_REG(osh, &cc->pmucontrol, PCTL_NOILP_ON_WAIT);
 
-	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 2)) {
+	if ((sih->chip == BCM4329_CHIP_ID) && (sih->chiprev == 2)) {
 		/* Fix for 4329b0 bad LPOM state. */
 		W_REG(osh, &cc->regcontrol_addr, 2);
 		OR_REG(osh, &cc->regcontrol_data, 0x100);
@@ -2008,7 +2014,7 @@
 
 /* Return up time in ILP cycles for the given resource. */
 static uint
-si_pmu_res_uptime(si_t *sih, osl_t *osh, chipcregs_t *cc,
+si_pmu_res_uptime(si_t *sih, struct osl_info *osh, chipcregs_t *cc,
 			      u8 rsrc) {
 	u32 deps;
 	uint up, i, dup, dmax;
@@ -2045,7 +2051,7 @@
 
 /* Return dependancies (direct or all/indirect) for the given resources */
 static u32
-si_pmu_res_deps(si_t *sih, osl_t *osh, chipcregs_t *cc, u32 rsrcs,
+si_pmu_res_deps(si_t *sih, struct osl_info *osh, chipcregs_t *cc, u32 rsrcs,
 		bool all)
 {
 	u32 deps = 0;
@@ -2065,7 +2071,7 @@
 }
 
 /* power up/down OTP through PMU resources */
-void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on)
+void si_pmu_otp_power(si_t *sih, struct osl_info *osh, bool on)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -2084,7 +2090,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		rsrcs = PMURES_BIT(RES4329_OTP_PU);
 		break;
@@ -2135,7 +2141,7 @@
 	si_setcoreidx(sih, origidx);
 }
 
-void si_pmu_rcal(si_t *sih, osl_t *osh)
+void si_pmu_rcal(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
@@ -2147,7 +2153,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:{
 			u8 rcal_code;
 			u32 val;
@@ -2218,7 +2224,7 @@
 	si_setcoreidx(sih, origidx);
 }
 
-void si_pmu_spuravoid(si_t *sih, osl_t *osh, u8 spuravoid)
+void si_pmu_spuravoid(si_t *sih, struct osl_info *osh, u8 spuravoid)
 {
 	chipcregs_t *cc;
 	uint origidx, intr_val;
@@ -2230,7 +2236,7 @@
 	ASSERT(cc != NULL);
 
 	/* force the HT off  */
-	if (CHIPID(sih->chip) == BCM4336_CHIP_ID) {
+	if (sih->chip == BCM4336_CHIP_ID) {
 		tmp = R_REG(osh, &cc->max_res_mask);
 		tmp &= ~RES4336_HT_AVAIL;
 		W_REG(osh, &cc->max_res_mask, tmp);
@@ -2244,7 +2250,7 @@
 	si_pmu_spuravoid_pllupdate(sih, cc, osh, spuravoid);
 
 	/* enable HT back on  */
-	if (CHIPID(sih->chip) == BCM4336_CHIP_ID) {
+	if (sih->chip == BCM4336_CHIP_ID) {
 		tmp = R_REG(osh, &cc->max_res_mask);
 		tmp |= RES4336_HT_AVAIL;
 		W_REG(osh, &cc->max_res_mask, tmp);
@@ -2255,7 +2261,7 @@
 }
 
 static void
-si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc, osl_t *osh,
+si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc, struct osl_info *osh,
 			   u8 spuravoid)
 {
 	u32 tmp = 0;
@@ -2263,14 +2269,14 @@
 	u8 bcm5357_bcm43236_p1div[] = { 0x1, 0x5, 0x5 };
 	u8 bcm5357_bcm43236_ndiv[] = { 0x30, 0xf6, 0xfc };
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM5357_CHIP_ID:
 	case BCM43235_CHIP_ID:
 	case BCM43236_CHIP_ID:
 	case BCM43238_CHIP_ID:
 
 		/* BCM5357 needs to touch PLL1_PLLCTL[02], so offset PLL0_PLLCTL[02] by 6 */
-		phypll_offset = (CHIPID(sih->chip) == BCM5357_CHIP_ID) ? 6 : 0;
+		phypll_offset = (sih->chip == BCM5357_CHIP_ID) ? 6 : 0;
 
 		/* RMW only the P1 divider */
 		W_REG(osh, &cc->pllcontrol_addr,
@@ -2451,7 +2457,7 @@
 	W_REG(osh, &cc->pmucontrol, tmp);
 }
 
-bool si_pmu_is_otp_powered(si_t *sih, osl_t *osh)
+bool si_pmu_is_otp_powered(si_t *sih, struct osl_info *osh)
 {
 	uint idx;
 	chipcregs_t *cc;
@@ -2462,7 +2468,7 @@
 	cc = si_setcoreidx(sih, SI_CC_IDX);
 	ASSERT(cc != NULL);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		st = (R_REG(osh, &cc->res_state) & PMURES_BIT(RES4329_OTP_PU))
 		    != 0;
@@ -2503,9 +2509,9 @@
 
 void
 #if defined(BCMDBG)
-si_pmu_sprom_enable(si_t *sih, osl_t *osh, bool enable)
+si_pmu_sprom_enable(si_t *sih, struct osl_info *osh, bool enable)
 #else
-si_pmu_sprom_enable(si_t *sih, osl_t *osh, bool enable)
+si_pmu_sprom_enable(si_t *sih, struct osl_info *osh, bool enable)
 #endif
 {
 	chipcregs_t *cc;
@@ -2521,7 +2527,7 @@
 }
 
 /* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(si_t *sih, osl_t *osh)
+void si_pmu_chip_init(si_t *sih, struct osl_info *osh)
 {
 	uint origidx;
 
@@ -2543,11 +2549,11 @@
 }
 
 /* initialize PMU switch/regulators */
-void si_pmu_swreg_init(si_t *sih, osl_t *osh)
+void si_pmu_swreg_init(si_t *sih, struct osl_info *osh)
 {
 	ASSERT(sih->cccaps & CC_CAP_PMU);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4336_CHIP_ID:
 		/* Reduce CLDO PWM output voltage to 1.2V */
 		si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_CLDO_PWM, 0xe);
@@ -2556,7 +2562,7 @@
 				       0xe);
 		/* Reduce LNLDO1 output voltage to 1.2V */
 		si_pmu_set_ldo_voltage(sih, osh, SET_LDO_VOLTAGE_LNLDO1, 0xe);
-		if (CHIPREV(sih->chiprev) == 0)
+		if (sih->chiprev == 0)
 			si_pmu_regcontrol(sih, 2, 0x400000, 0x400000);
 		break;
 
@@ -2573,7 +2579,7 @@
 {
 	ASSERT(sih->cccaps & CC_CAP_PMU);
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4319_CHIP_ID:
 		if (enable)
 			si_write_wrapperreg(sih, AI_OOBSELOUTB74,
@@ -2587,7 +2593,7 @@
 
 /* Wait for a particular clock level to be on the backplane */
 u32
-si_pmu_waitforclk_on_backplane(si_t *sih, osl_t *osh, u32 clk,
+si_pmu_waitforclk_on_backplane(si_t *sih, struct osl_info *osh, u32 clk,
 			       u32 delay)
 {
 	chipcregs_t *cc;
@@ -2616,7 +2622,7 @@
 
 #define EXT_ILP_HZ 32768
 
-u32 si_pmu_measure_alpclk(si_t *sih, osl_t *osh)
+u32 si_pmu_measure_alpclk(si_t *sih, struct osl_info *osh)
 {
 	chipcregs_t *cc;
 	uint origidx;
diff --git a/drivers/staging/brcm80211/util/linux_osl.c b/drivers/staging/brcm80211/util/linux_osl.c
index 2bb5b87..e6716e8 100644
--- a/drivers/staging/brcm80211/util/linux_osl.c
+++ b/drivers/staging/brcm80211/util/linux_osl.c
@@ -20,145 +20,57 @@
 #include <asm/paccess.h>
 #endif				/* mips */
 #include <bcmendian.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
 #include <bcmdefs.h>
 #include <osl.h>
 #include <bcmutils.h>
 #include <pcicfg.h>
 
 
-#define PCI_CFG_RETRY 		10
-
 #define OS_HANDLE_MAGIC		0x1234abcd	/* Magic # to recognise osh */
 #define BCM_MEM_FILENAME_LEN 	24	/* Mem. filename length */
 
-struct osl_info {
-	osl_pubinfo_t pub;
-	uint magic;
-	void *pdev;
-	uint failed;
-	uint bustype;
-};
-
 /* Global ASSERT type flag */
 u32 g_assert_type;
 
-#ifdef BRCM_FULLMAC
-static s16 linuxbcmerrormap[] = { 0,	/* 0 */
-	-EINVAL,		/* BCME_ERROR */
-	-EINVAL,		/* BCME_BADARG */
-	-EINVAL,		/* BCME_BADOPTION */
-	-EINVAL,		/* BCME_NOTUP */
-	-EINVAL,		/* BCME_NOTDOWN */
-	-EINVAL,		/* BCME_NOTAP */
-	-EINVAL,		/* BCME_NOTSTA */
-	-EINVAL,		/* BCME_BADKEYIDX */
-	-EINVAL,		/* BCME_RADIOOFF */
-	-EINVAL,		/* BCME_NOTBANDLOCKED */
-	-EINVAL,		/* BCME_NOCLK */
-	-EINVAL,		/* BCME_BADRATESET */
-	-EINVAL,		/* BCME_BADBAND */
-	-E2BIG,			/* BCME_BUFTOOSHORT */
-	-E2BIG,			/* BCME_BUFTOOLONG */
-	-EBUSY,			/* BCME_BUSY */
-	-EINVAL,		/* BCME_NOTASSOCIATED */
-	-EINVAL,		/* BCME_BADSSIDLEN */
-	-EINVAL,		/* BCME_OUTOFRANGECHAN */
-	-EINVAL,		/* BCME_BADCHAN */
-	-EFAULT,		/* BCME_BADADDR */
-	-ENOMEM,		/* BCME_NORESOURCE */
-	-EOPNOTSUPP,		/* BCME_UNSUPPORTED */
-	-EMSGSIZE,		/* BCME_BADLENGTH */
-	-EINVAL,		/* BCME_NOTREADY */
-	-EPERM,			/* BCME_NOTPERMITTED */
-	-ENOMEM,		/* BCME_NOMEM */
-	-EINVAL,		/* BCME_ASSOCIATED */
-	-ERANGE,		/* BCME_RANGE */
-	-EINVAL,		/* BCME_NOTFOUND */
-	-EINVAL,		/* BCME_WME_NOT_ENABLED */
-	-EINVAL,		/* BCME_TSPEC_NOTFOUND */
-	-EINVAL,		/* BCME_ACM_NOTSUPPORTED */
-	-EINVAL,		/* BCME_NOT_WME_ASSOCIATION */
-	-EIO,			/* BCME_SDIO_ERROR */
-	-ENODEV,		/* BCME_DONGLE_DOWN */
-	-EINVAL,		/* BCME_VERSION */
-	-EIO,			/* BCME_TXFAIL */
-	-EIO,			/* BCME_RXFAIL */
-	-EINVAL,		/* BCME_NODEVICE */
-	-EINVAL,		/* BCME_NMODE_DISABLED */
-	-ENODATA,		/* BCME_NONRESIDENT */
-
-/* When an new error code is added to bcmutils.h, add os
- * spcecific error translation here as well
- */
-/* check if BCME_LAST changed since the last time this function was updated */
-#if BCME_LAST != -42
-#error "You need to add a OS error translation in the linuxbcmerrormap \
-	for new error code defined in bcmutils.h"
-#endif
-};
-
-/* translate bcmerrors into linux errors */
-int osl_error(int bcmerror)
+struct osl_info *osl_attach(void *pdev, uint bustype)
 {
-	if (bcmerror > 0)
-		bcmerror = 0;
-	else if (bcmerror < BCME_LAST)
-		bcmerror = BCME_ERROR;
+	struct osl_info *osh;
 
-	/* Array bounds covered by ASSERT in osl_attach */
-	return linuxbcmerrormap[-bcmerror];
-}
-#endif /* BRCM_FULLMAC */
-
-osl_t *osl_attach(void *pdev, uint bustype, bool pkttag)
-{
-	osl_t *osh;
-
-	osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+	osh = kmalloc(sizeof(struct osl_info), GFP_ATOMIC);
 	ASSERT(osh);
 
-	bzero(osh, sizeof(osl_t));
-
-#ifdef BRCM_FULLMAC
-	/* Check that error map has the right number of entries in it */
-	ASSERT(ABS(BCME_LAST) == (ARRAY_SIZE(linuxbcmerrormap) - 1));
-#endif /* BRCM_FULLMAC */
+	memset(osh, 0, sizeof(struct osl_info));
 
 	osh->magic = OS_HANDLE_MAGIC;
-	osh->failed = 0;
 	osh->pdev = pdev;
-	osh->pub.pkttag = pkttag;
 	osh->bustype = bustype;
 
 	switch (bustype) {
 	case PCI_BUS:
 	case SI_BUS:
 	case PCMCIA_BUS:
-		osh->pub.mmbus = true;
+		osh->mmbus = true;
 		break;
 	case JTAG_BUS:
 	case SDIO_BUS:
 	case USB_BUS:
 	case SPI_BUS:
 	case RPC_BUS:
-		osh->pub.mmbus = false;
+		osh->mmbus = false;
 		break;
 	default:
 		ASSERT(false);
 		break;
 	}
 
-#if defined(BCMDBG) && !defined(BRCM_FULLMAC)
-	if (pkttag) {
-		struct sk_buff *skb;
-		ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
-	}
-#endif
 	return osh;
 }
 
-void osl_detach(osl_t *osh)
+void osl_detach(struct osl_info *osh)
 {
 	if (osh == NULL)
 		return;
@@ -167,8 +79,7 @@
 	kfree(osh);
 }
 
-/* Return a new packet. zero out pkttag */
-void *BCMFASTPATH osl_pktget(osl_t *osh, uint len)
+struct sk_buff *BCMFASTPATH pkt_buf_get_skb(struct osl_info *osh, uint len)
 {
 	struct sk_buff *skb;
 
@@ -177,24 +88,20 @@
 		skb_put(skb, len);
 		skb->priority = 0;
 
-		osh->pub.pktalloced++;
+		osh->pktalloced++;
 	}
 
-	return (void *)skb;
+	return skb;
 }
 
 /* Free the driver packet. Free the tag if present */
-void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send)
+void BCMFASTPATH pkt_buf_free_skb(struct osl_info *osh, struct sk_buff *skb, bool send)
 {
-	struct sk_buff *skb, *nskb;
+	struct sk_buff *nskb;
 	int nest = 0;
 
-	skb = (struct sk_buff *)p;
 	ASSERT(skb);
 
-	if (send && osh->pub.tx_fn)
-		osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
-
 	/* perversion: we use skb->next to chain multi-skb packets */
 	while (skb) {
 		nskb = skb->next;
@@ -211,63 +118,14 @@
 			 */
 			dev_kfree_skb(skb);
 
-		osh->pub.pktalloced--;
+		osh->pktalloced--;
 		nest++;
 		skb = nskb;
 	}
 }
 
-u32 osl_pci_read_config(osl_t *osh, uint offset, uint size)
-{
-	uint val = 0;
-	uint retry = PCI_CFG_RETRY;
-
-	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
-	/* only 4byte access supported */
-	ASSERT(size == 4);
-
-	do {
-		pci_read_config_dword(osh->pdev, offset, &val);
-		if (val != 0xffffffff)
-			break;
-	} while (retry--);
-
-#ifdef BCMDBG
-	if (retry < PCI_CFG_RETRY)
-		printk("PCI CONFIG READ access to %d required %d retries\n",
-		       offset, (PCI_CFG_RETRY - retry));
-#endif				/* BCMDBG */
-
-	return val;
-}
-
-void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
-{
-	uint retry = PCI_CFG_RETRY;
-
-	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
-	/* only 4byte access supported */
-	ASSERT(size == 4);
-
-	do {
-		pci_write_config_dword(osh->pdev, offset, val);
-		if (offset != PCI_BAR0_WIN)
-			break;
-		if (osl_pci_read_config(osh, offset, size) == val)
-			break;
-	} while (retry--);
-
-#if defined(BCMDBG) && !defined(BRCM_FULLMAC)
-	if (retry < PCI_CFG_RETRY)
-		printk("PCI CONFIG WRITE access to %d required %d retries\n",
-		       offset, (PCI_CFG_RETRY - retry));
-#endif				/* BCMDBG */
-}
-
 /* return bus # for the pci device pointed by osh->pdev */
-uint osl_pci_bus(osl_t *osh)
+uint osl_pci_bus(struct osl_info *osh)
 {
 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
 
@@ -275,40 +133,37 @@
 }
 
 /* return slot # for the pci device pointed by osh->pdev */
-uint osl_pci_slot(osl_t *osh)
+uint osl_pci_slot(struct osl_info *osh)
 {
 	ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
 
 	return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
 }
 
-uint osl_dma_consistent_align(void)
-{
-	return PAGE_SIZE;
-}
-
-void *osl_dma_alloc_consistent(osl_t *osh, uint size, u16 align_bits,
+void *osl_dma_alloc_consistent(struct osl_info *osh, uint size, u16 align_bits,
 			       uint *alloced, unsigned long *pap)
 {
 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 
 	if (align_bits) {
 		u16 align = (1 << align_bits);
-		if (!IS_ALIGNED(DMA_CONSISTENT_ALIGN, align))
+		if (!IS_ALIGNED(PAGE_SIZE, align))
 			size += align;
 		*alloced = size;
 	}
 	return pci_alloc_consistent(osh->pdev, size, (dma_addr_t *) pap);
 }
 
-void osl_dma_free_consistent(osl_t *osh, void *va, uint size, unsigned long pa)
+void osl_dma_free_consistent(struct osl_info *osh, void *va, uint size,
+			     unsigned long pa)
 {
 	ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
 
 	pci_free_consistent(osh->pdev, size, va, (dma_addr_t) pa);
 }
 
-uint BCMFASTPATH osl_dma_map(osl_t *osh, void *va, uint size, int direction)
+uint BCMFASTPATH osl_dma_map(struct osl_info *osh, void *va, uint size,
+			     int direction)
 {
 	int dir;
 
@@ -317,7 +172,8 @@
 	return pci_map_single(osh->pdev, va, size, dir);
 }
 
-void BCMFASTPATH osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
+void BCMFASTPATH osl_dma_unmap(struct osl_info *osh, uint pa, uint size,
+			       int direction)
 {
 	int dir;
 
@@ -373,52 +229,3 @@
 }
 #endif				/* defined(BCMDBG_ASSERT) */
 
-#if defined(BCMSDIO) && !defined(BRCM_FULLMAC)
-u8 osl_readb(osl_t *osh, volatile u8 *r)
-{
-	osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
-	void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
-	return (u8) ((rreg) (ctx, (void *)r, sizeof(u8)));
-}
-
-u16 osl_readw(osl_t *osh, volatile u16 *r)
-{
-	osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
-	void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
-	return (u16) ((rreg) (ctx, (void *)r, sizeof(u16)));
-}
-
-u32 osl_readl(osl_t *osh, volatile u32 *r)
-{
-	osl_rreg_fn_t rreg = ((osl_pubinfo_t *) osh)->rreg_fn;
-	void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
-	return (u32) ((rreg) (ctx, (void *)r, sizeof(u32)));
-}
-
-void osl_writeb(osl_t *osh, volatile u8 *r, u8 v)
-{
-	osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
-	void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
-	((wreg) (ctx, (void *)r, v, sizeof(u8)));
-}
-
-void osl_writew(osl_t *osh, volatile u16 *r, u16 v)
-{
-	osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
-	void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
-	((wreg) (ctx, (void *)r, v, sizeof(u16)));
-}
-
-void osl_writel(osl_t *osh, volatile u32 *r, u32 v)
-{
-	osl_wreg_fn_t wreg = ((osl_pubinfo_t *) osh)->wreg_fn;
-	void *ctx = ((osl_pubinfo_t *) osh)->reg_ctx;
-
-	((wreg) (ctx, (void *)r, v, sizeof(u32)));
-}
-#endif	/* BCMSDIO */
diff --git a/drivers/staging/brcm80211/util/nicpci.c b/drivers/staging/brcm80211/util/nicpci.c
index 23f86dd..56e658c 100644
--- a/drivers/staging/brcm80211/util/nicpci.c
+++ b/drivers/staging/brcm80211/util/nicpci.c
@@ -14,8 +14,9 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/delay.h>
 #include <linux/string.h>
-#include <linuxver.h>
+#include <linux/pci.h>
 #include <bcmdefs.h>
 #include <osl.h>
 #include <bcmutils.h>
@@ -35,7 +36,7 @@
 	} regs;			/* Memory mapped register to the core */
 
 	si_t *sih;		/* System interconnect handle */
-	osl_t *osh;		/* OSL handle */
+	struct osl_info *osh;		/* OSL handle */
 	u8 pciecap_lcreg_offset;	/* PCIE capability LCreg offset in the config space */
 	bool pcie_pr42767;
 	u8 pcie_polarity;
@@ -47,7 +48,8 @@
 
 /* debug/trace */
 #define	PCI_ERROR(args)
-#define PCIE_PUB(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
+#define PCIE_PUB(sih) \
+	(((sih)->bustype == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
 
 /* routines to access mdio slave device registers */
 static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk);
@@ -71,35 +73,6 @@
 
 #define PCIE_ASPM(sih)	((PCIE_PUB(sih)) && (((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5)))
 
-#define DWORD_ALIGN(x)  (x & ~(0x03))
-#define BYTE_POS(x) (x & 0x3)
-#define WORD_POS(x) (x & 0x1)
-
-#define BYTE_SHIFT(x)  (8 * BYTE_POS(x))
-#define WORD_SHIFT(x)  (16 * WORD_POS(x))
-
-#define BYTE_VAL(a, x) ((a >> BYTE_SHIFT(x)) & 0xFF)
-#define WORD_VAL(a, x) ((a >> WORD_SHIFT(x)) & 0xFFFF)
-
-#define read_pci_cfg_byte(a) \
-	(BYTE_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xff)
-
-#define read_pci_cfg_word(a) \
-	(WORD_VAL(OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4), a) & 0xffff)
-
-#define write_pci_cfg_byte(a, val) do { \
-	u32 tmpval; \
-	tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFF << BYTE_POS(a)) | \
-		val << BYTE_POS(a); \
-	OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
-	} while (0)
-
-#define write_pci_cfg_word(a, val) do { \
-	u32 tmpval; \
-	tmpval = (OSL_PCI_READ_CONFIG(osh, DWORD_ALIGN(a), 4) & ~0xFFFF << WORD_POS(a)) | \
-		val << WORD_POS(a); \
-	OSL_PCI_WRITE_CONFIG(osh, DWORD_ALIGN(a), 4, tmpval); \
-	} while (0)
 
 /* delay needed between the mdio control/ mdiodata register data access */
 #define PR28829_DELAY() udelay(10)
@@ -107,7 +80,7 @@
 /* Initialize the PCI core. It's caller's responsibility to make sure that this is done
  * only once
  */
-void *pcicore_init(si_t *sih, osl_t *osh, void *regs)
+void *pcicore_init(si_t *sih, struct osl_info *osh, void *regs)
 {
 	pcicore_info_t *pi;
 
@@ -149,8 +122,8 @@
 /* return cap_offset if requested capability exists in the PCI config space */
 /* Note that it's caller's responsibility to make sure it's a pci bus */
 u8
-pcicore_find_pci_capability(osl_t *osh, u8 req_cap_id, unsigned char *buf,
-			    u32 *buflen)
+pcicore_find_pci_capability(struct osl_info *osh, u8 req_cap_id,
+			    unsigned char *buf, u32 *buflen)
 {
 	u8 cap_id;
 	u8 cap_ptr = 0;
@@ -158,29 +131,29 @@
 	u8 byte_val;
 
 	/* check for Header type 0 */
-	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
+	pci_read_config_byte(osh->pdev, PCI_CFG_HDR, &byte_val);
 	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
 		goto end;
 
 	/* check if the capability pointer field exists */
-	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
+	pci_read_config_byte(osh->pdev, PCI_CFG_STAT, &byte_val);
 	if (!(byte_val & PCI_CAPPTR_PRESENT))
 		goto end;
 
-	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
+	pci_read_config_byte(osh->pdev, PCI_CFG_CAPPTR, &cap_ptr);
 	/* check if the capability pointer is 0x00 */
 	if (cap_ptr == 0x00)
 		goto end;
 
 	/* loop thr'u the capability list and see if the pcie capabilty exists */
 
-	cap_id = read_pci_cfg_byte(cap_ptr);
+	pci_read_config_byte(osh->pdev, cap_ptr, &cap_id);
 
 	while (cap_id != req_cap_id) {
-		cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
+		pci_read_config_byte(osh->pdev, cap_ptr + 1, &cap_ptr);
 		if (cap_ptr == 0x00)
 			break;
-		cap_id = read_pci_cfg_byte(cap_ptr);
+		pci_read_config_byte(osh->pdev, cap_ptr, &cap_id);
 	}
 	if (cap_id != req_cap_id) {
 		goto end;
@@ -199,7 +172,7 @@
 			bufsize = SZPCR - cap_data;
 		*buflen = bufsize;
 		while (bufsize--) {
-			*buf = read_pci_cfg_byte(cap_data);
+			pci_read_config_byte(osh->pdev, cap_data, buf);
 			cap_data++;
 			buf++;
 		}
@@ -210,7 +183,8 @@
 
 /* ***** Register Access API */
 uint
-pcie_readreg(osl_t *osh, sbpcieregs_t *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct osl_info *osh, sbpcieregs_t *pcieregs, uint addrtype,
+	     uint offset)
 {
 	uint retval = 0xFFFFFFFF;
 
@@ -236,8 +210,8 @@
 }
 
 uint
-pcie_writereg(osl_t *osh, sbpcieregs_t *pcieregs, uint addrtype, uint offset,
-	      uint val)
+pcie_writereg(struct osl_info *osh, sbpcieregs_t *pcieregs, uint addrtype,
+	      uint offset, uint val)
 {
 	ASSERT(pcieregs != NULL);
 
@@ -373,15 +347,15 @@
 	if (!offset)
 		return 0;
 
-	reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(u32));
+	pci_read_config_dword(pi->osh->pdev, offset, &reg_val);
 	/* set operation */
 	if (mask) {
 		if (val)
 			reg_val |= PCIE_CLKREQ_ENAB;
 		else
 			reg_val &= ~PCIE_CLKREQ_ENAB;
-		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(u32), reg_val);
-		reg_val = OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(u32));
+		pci_write_config_dword(pi->osh->pdev, offset, reg_val);
+		pci_read_config_dword(pi->osh->pdev, offset, &reg_val);
 	}
 	if (reg_val & PCIE_CLKREQ_ENAB)
 		return 1;
@@ -393,7 +367,7 @@
 {
 	u32 w;
 	si_t *sih = pi->sih;
-	osl_t *osh = pi->osh;
+	struct osl_info *osh = pi->osh;
 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
 
 	if (!PCIE_PUB(sih) || sih->buscorerev < 7)
@@ -502,12 +476,12 @@
 
 		W_REG(pi->osh, reg16, val16);
 
-		w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
-					sizeof(u32));
+		pci_read_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset,
+					&w);
 		w &= ~PCIE_ASPM_ENAB;
 		w |= pi->pcie_war_aspm_ovr;
-		OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
-				     sizeof(u32), w);
+		pci_write_config_dword(pi->osh->pdev,
+					pi->pciecap_lcreg_offset, w);
 	}
 
 	reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
@@ -577,7 +551,7 @@
 static void pcie_war_pci_setup(pcicore_info_t *pi)
 {
 	si_t *sih = pi->sih;
-	osl_t *osh = pi->osh;
+	struct osl_info *osh = pi->osh;
 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
 	u32 w;
 
@@ -694,11 +668,9 @@
 	if (!pi || !PCIE_ASPM(pi->sih))
 		return;
 
-	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pciecap_lcreg_offset,
-				sizeof(u32));
+	pci_read_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset, &w);
 	w &= ~PCIE_CAP_LCREG_ASPML1;
-	OSL_PCI_WRITE_CONFIG(pi->osh, pi->pciecap_lcreg_offset, sizeof(u32),
-			     w);
+	pci_write_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset, w);
 
 	pi->pcie_pr42767 = false;
 }
@@ -718,7 +690,7 @@
 
 /* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
 /* Just uses PCI config accesses to find out, when needed before sb_attach is done */
-bool pcicore_pmecap_fast(osl_t *osh)
+bool pcicore_pmecap_fast(struct osl_info *osh)
 {
 	u8 cap_ptr;
 	u32 pmecap;
@@ -730,7 +702,7 @@
 	if (!cap_ptr)
 		return false;
 
-	pmecap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(u32));
+	pci_read_config_dword(osh->pdev, cap_ptr, &pmecap);
 
 	return (pmecap & PME_CAP_PM_STATES) != 0;
 }
@@ -753,9 +725,8 @@
 
 		pi->pmecap_offset = cap_ptr;
 
-		pmecap =
-		    OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset,
-					sizeof(u32));
+		pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset,
+					&pmecap);
 
 		/* At least one state can generate PME */
 		pi->pmecap = (pmecap & PME_CAP_PM_STATES) != 0;
@@ -774,11 +745,11 @@
 	if (!pcicore_pmecap(pi))
 		return;
 
-	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
-				sizeof(u32));
+	pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+				&w);
 	w |= (PME_CSR_PME_EN);
-	OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
-			     sizeof(u32), w);
+	pci_write_config_dword(pi->osh->pdev,
+				pi->pmecap_offset + PME_CSR_OFFSET, w);
 }
 
 /*
@@ -792,8 +763,8 @@
 	if (!pcicore_pmecap(pi))
 		return false;
 
-	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
-				sizeof(u32));
+	pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+				&w);
 
 	return (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
 }
@@ -808,22 +779,23 @@
 	if (!pcicore_pmecap(pi))
 		return;
 
-	w = OSL_PCI_READ_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
-				sizeof(u32));
+	pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+				&w);
 
 	PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
 
 	/* PMESTAT is cleared by writing 1 to it */
 	w &= ~(PME_CSR_PME_EN);
 
-	OSL_PCI_WRITE_CONFIG(pi->osh, pi->pmecap_offset + PME_CSR_OFFSET,
-			     sizeof(u32), w);
+	pci_write_config_dword(pi->osh->pdev,
+				pi->pmecap_offset + PME_CSR_OFFSET, w);
 }
 
 u32 pcie_lcreg(void *pch, u32 mask, u32 val)
 {
 	pcicore_info_t *pi = (pcicore_info_t *) pch;
 	u8 offset;
+	u32 tmpval;
 
 	offset = pi->pciecap_lcreg_offset;
 	if (!offset)
@@ -831,9 +803,10 @@
 
 	/* set operation */
 	if (mask)
-		OSL_PCI_WRITE_CONFIG(pi->osh, offset, sizeof(u32), val);
+		pci_write_config_dword(pi->osh->pdev, offset, val);
 
-	return OSL_PCI_READ_CONFIG(pi->osh, offset, sizeof(u32));
+	pci_read_config_dword(pi->osh->pdev, offset, &tmpval);
+	return tmpval;
 }
 
 u32
@@ -842,7 +815,7 @@
 	u32 reg_val = 0;
 	pcicore_info_t *pi = (pcicore_info_t *) pch;
 	sbpcieregs_t *pcieregs = pi->regs.pcieregs;
-	osl_t *osh = pi->osh;
+	struct osl_info *osh = pi->osh;
 
 	if (mask) {
 		PCI_ERROR(("PCIEREG: 0x%x writeval  0x%x\n", offset, val));
diff --git a/drivers/staging/brcm80211/util/nvram/nvram_ro.c b/drivers/staging/brcm80211/util/nvram/nvram_ro.c
index f80375c..e4d41ee 100644
--- a/drivers/staging/brcm80211/util/nvram/nvram_ro.c
+++ b/drivers/staging/brcm80211/util/nvram/nvram_ro.c
@@ -49,7 +49,7 @@
 /* copy flash to ram */
 static void get_flash_nvram(si_t *sih, struct nvram_header *nvh)
 {
-	osl_t *osh;
+	struct osl_info *osh;
 	uint nvs, bufsz;
 	vars_t *new;
 
@@ -133,7 +133,7 @@
 	len = strlen(name);
 
 	for (s = vars; (s < lim) && *s;) {
-		if ((bcmp(s, name, len) == 0) && (s[len] == '='))
+		if ((memcmp(s, name, len) == 0) && (s[len] == '='))
 			return &s[len + 1];
 
 		while (*s++)
diff --git a/drivers/staging/brcm80211/util/sbutils.c b/drivers/staging/brcm80211/util/sbutils.c
index e4c0bab..63c3ab1 100644
--- a/drivers/staging/brcm80211/util/sbutils.c
+++ b/drivers/staging/brcm80211/util/sbutils.c
@@ -16,6 +16,9 @@
 
 #include <linux/types.h>
 #include <bcmdefs.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
 #include <osl.h>
 #include <bcmutils.h>
 #include <siutils.h>
@@ -87,7 +90,7 @@
 {
 	u32 sbaddr = 0;
 
-	switch (BUSTYPE(sii->pub.bustype)) {
+	switch (sii->pub.bustype) {
 	case SPI_BUS:
 	case SDIO_BUS:
 		sbaddr = (u32)(unsigned long)sii->curmap;
@@ -248,7 +251,7 @@
 			else {
 				/* Older chips */
 				SI_ERROR(("sb_chip2numcores: unsupported chip "
-					"0x%x\n", CHIPID(sii->pub.chip)));
+						  "0x%x\n", sii->pub.chip));
 				ASSERT(0);
 				numcores = 1;
 			}
@@ -344,7 +347,7 @@
 	u32 sbaddr = sii->coresba[coreidx];
 	void *regs;
 
-	switch (BUSTYPE(sii->pub.bustype)) {
+	switch (sii->pub.bustype) {
 #ifdef BCMSDIO
 	case SPI_BUS:
 	case SDIO_BUS:
@@ -409,8 +412,8 @@
 
 	sii = SI_INFO(sih);
 
-	if ((BUSTYPE(sii->pub.bustype) == SDIO_BUS) ||
-	    (BUSTYPE(sii->pub.bustype) == SPI_BUS)) {
+	if ((sii->pub.bustype == SDIO_BUS) ||
+	    (sii->pub.bustype == SPI_BUS)) {
 
 		INTR_OFF(sii, intr_val);
 		origidx = si_coreidx(sih);
diff --git a/drivers/staging/brcm80211/util/siutils.c b/drivers/staging/brcm80211/util/siutils.c
index f3ea7e1..b66de9b 100644
--- a/drivers/staging/brcm80211/util/siutils.c
+++ b/drivers/staging/brcm80211/util/siutils.c
@@ -14,11 +14,16 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <bcmdefs.h>
+#ifdef BRCM_FULLMAC
+#include <linux/netdevice.h>
+#endif
 #include <osl.h>
-#include <linuxver.h>
+#include <linux/module.h>
+#include <linux/pci.h>
 #include <bcmutils.h>
 #include <siutils.h>
 #include <bcmdevs.h>
@@ -53,7 +58,7 @@
 #endif
 
 /* local prototypes */
-static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
+static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
 			      void *regs, uint bustype, void *sdh, char **vars,
 			      uint *varsz);
 static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid,
@@ -81,8 +86,8 @@
  * vars - pointer to a pointer area for "environment" variables
  * varsz - pointer to int to return the size of the vars
  */
-si_t *si_attach(uint devid, osl_t *osh, void *regs, uint bustype, void *sdh,
-		char **vars, uint *varsz)
+si_t *si_attach(uint devid, struct osl_info *osh, void *regs, uint bustype,
+		void *sdh, char **vars, uint *varsz)
 {
 	si_info_t *sii;
 
@@ -113,12 +118,12 @@
 
 #ifndef BRCM_FULLMAC
 	/* kludge to enable the clock on the 4306 which lacks a slowclock */
-	if (BUSTYPE(bustype) == PCI_BUS && !si_ispcie(sii))
+	if (bustype == PCI_BUS && !si_ispcie(sii))
 		si_clkctl_xtal(&sii->pub, XTAL | PLL, ON);
 #endif
 
 #if defined(BCMSDIO)
-	if (BUSTYPE(bustype) == SDIO_BUS) {
+	if (bustype == SDIO_BUS) {
 		int err;
 		u8 clkset;
 
@@ -220,7 +225,7 @@
 		SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
 			 i, cid, crev, sii->coresba[i], sii->regs[i]));
 
-		if (BUSTYPE(bustype) == PCI_BUS) {
+		if (bustype == PCI_BUS) {
 			if (cid == PCI_CORE_ID) {
 				pciidx = i;
 				pcirev = crev;
@@ -232,8 +237,8 @@
 			}
 		}
 #ifdef BCMSDIO
-		else if (((BUSTYPE(bustype) == SDIO_BUS) ||
-			  (BUSTYPE(bustype) == SPI_BUS)) &&
+		else if (((bustype == SDIO_BUS) ||
+			  (bustype == SPI_BUS)) &&
 			 ((cid == PCMCIA_CORE_ID) || (cid == SDIOD_CORE_ID))) {
 			sii->pub.buscorerev = crev;
 			sii->pub.buscoretype = cid;
@@ -255,7 +260,7 @@
 	* or downloaded code was
 	* already running.
 	*/
-	if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
+	if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
 		if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
 			si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
 			si_core_disable(&sii->pub, 0);
@@ -281,7 +286,7 @@
 		 sii->pub.buscoretype, sii->pub.buscorerev));
 
 	/* fixup necessary chip/core configurations */
-	if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+	if (sii->pub.bustype == PCI_BUS) {
 		if (SI_FAST(sii)) {
 			if (!sii->pch) {
 				sii->pch = (void *)pcicore_init(
@@ -308,10 +313,10 @@
 	uint w = 0;
 
 	/* get boardtype and boardrev */
-	switch (BUSTYPE(sii->pub.bustype)) {
+	switch (sii->pub.bustype) {
 	case PCI_BUS:
 		/* do a pci config read to get subsystem id and subvendor id */
-		w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_SVID, sizeof(u32));
+		pci_read_config_dword(sii->osh->pdev, PCI_CFG_SVID, &w);
 		/* Let nvram variables override subsystem Vend/ID */
 		sii->pub.boardvendor = (u16)si_getdevpathintvar(&sii->pub,
 			"boardvendor");
@@ -364,7 +369,7 @@
 /* this is will make Sonics calls directly, since Sonics is no longer supported in the Si abstraction */
 /* this has been customized for the bcm 4329 ONLY */
 #ifdef BCMSDIO
-static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
+static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
 			      void *regs, uint bustype, void *sdh,
 			      char **vars, uint *varsz)
 {
@@ -376,7 +381,7 @@
 
 	ASSERT(GOODREGS(regs));
 
-	bzero((unsigned char *) sii, sizeof(si_info_t));
+	memset((unsigned char *) sii, 0, sizeof(si_info_t));
 
 	savewin = 0;
 
@@ -390,11 +395,6 @@
 	cc = (chipcregs_t *) sii->curmap;
 	sih->bustype = bustype;
 
-	if (bustype != BUSTYPE(bustype)) {
-		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype)));
-		return NULL;
-	}
-
 	/* bus/core/clk setup for register access */
 	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
 		SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
@@ -414,7 +414,7 @@
 	sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
 	sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
 
-	if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) &&
+	if ((sih->chip == BCM4329_CHIP_ID) &&
 		(sih->chippkg != BCM4329_289PIN_PKG_ID))
 			sih->chippkg = BCM4329_182PIN_PKG_ID;
 
@@ -444,7 +444,7 @@
 
 	/* Init nvram from sprom/otp if they exist */
 	if (srom_var_init
-	    (&sii->pub, BUSTYPE(bustype), regs, sii->osh, vars, varsz)) {
+	    (&sii->pub, bustype, regs, sii->osh, vars, varsz)) {
 		SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
 		goto exit;
 	}
@@ -499,7 +499,7 @@
 }
 
 #else				/* BCMSDIO */
-static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh,
+static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
 			      void *regs, uint bustype, void *sdh,
 			      char **vars, uint *varsz)
 {
@@ -511,7 +511,7 @@
 
 	ASSERT(GOODREGS(regs));
 
-	bzero((unsigned char *) sii, sizeof(si_info_t));
+	memset((unsigned char *) sii, 0, sizeof(si_info_t));
 
 	savewin = 0;
 
@@ -522,30 +522,29 @@
 	sii->osh = osh;
 
 	/* check to see if we are a si core mimic'ing a pci core */
-	if ((bustype == PCI_BUS) &&
-	    (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(u32)) ==
-	     0xffffffff)) {
-		SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI " "devid:0x%x\n", __func__, devid));
-		bustype = SI_BUS;
+	if (bustype == PCI_BUS) {
+		pci_read_config_dword(sii->osh->pdev, PCI_SPROM_CONTROL,  &w);
+		if (w == 0xffffffff) {
+			SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
+				" switching to SI devid:0x%x\n",
+				__func__, devid));
+			bustype = SI_BUS;
+		}
 	}
 
 	/* find Chipcommon address */
 	if (bustype == PCI_BUS) {
-		savewin =
-		    OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(u32));
+		pci_read_config_dword(sii->osh->pdev, PCI_BAR0_WIN, &savewin);
 		if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
 			savewin = SI_ENUM_BASE;
-		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
+		pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN,
+				       SI_ENUM_BASE);
 		cc = (chipcregs_t *) regs;
 	} else {
 		cc = (chipcregs_t *) REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
 	}
 
 	sih->bustype = bustype;
-	if (bustype != BUSTYPE(bustype)) {
-		SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", bustype, BUSTYPE(bustype)));
-		return NULL;
-	}
 
 	/* bus/core/clk setup for register access */
 	if (!si_buscore_prep(sii, bustype, devid, sdh)) {
@@ -569,7 +568,7 @@
 	sih->issim = IS_SIM(sih->chippkg);
 
 	/* scan for cores */
-	if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
+	if (sii->pub.socitype == SOCI_AI) {
 		SI_MSG(("Found chip type AI (0x%08x)\n", w));
 		/* pass chipc address instead of original core base */
 		ai_scan(&sii->pub, (void *)cc, devid);
@@ -592,10 +591,10 @@
 	/* assume current core is CC */
 	if ((sii->pub.ccrev == 0x25)
 	    &&
-	    ((CHIPID(sih->chip) == BCM43236_CHIP_ID
-	      || CHIPID(sih->chip) == BCM43235_CHIP_ID
-	      || CHIPID(sih->chip) == BCM43238_CHIP_ID)
-	     && (CHIPREV(sii->pub.chiprev) <= 2))) {
+	    ((sih->chip == BCM43236_CHIP_ID
+	      || sih->chip == BCM43235_CHIP_ID
+	      || sih->chip == BCM43238_CHIP_ID)
+	     && (sii->pub.chiprev <= 2))) {
 
 		if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
 			uint clkdiv;
@@ -613,7 +612,7 @@
 
 	/* Init nvram from sprom/otp if they exist */
 	if (srom_var_init
-	    (&sii->pub, BUSTYPE(bustype), regs, sii->osh, vars, varsz)) {
+	    (&sii->pub, bustype, regs, sii->osh, vars, varsz)) {
 		SI_ERROR(("si_doattach: srom_var_init failed: bad srom\n"));
 		goto exit;
 	}
@@ -651,10 +650,10 @@
 		pcicore_attach(sii->pch, pvars, SI_DOATTACH);
 	}
 
-	if ((CHIPID(sih->chip) == BCM43224_CHIP_ID) ||
-	    (CHIPID(sih->chip) == BCM43421_CHIP_ID)) {
+	if ((sih->chip == BCM43224_CHIP_ID) ||
+	    (sih->chip == BCM43421_CHIP_ID)) {
 		/* enable 12 mA drive strenth for 43224 and set chipControl register bit 15 */
-		if (CHIPREV(sih->chiprev) == 0) {
+		if (sih->chiprev == 0) {
 			SI_MSG(("Applying 43224A0 WARs\n"));
 			si_corereg(sih, SI_CC_IDX,
 				   offsetof(chipcregs_t, chipcontrol),
@@ -663,28 +662,28 @@
 			si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
 					   CCTRL_43224A0_12MA_LED_DRIVE);
 		}
-		if (CHIPREV(sih->chiprev) >= 1) {
+		if (sih->chiprev >= 1) {
 			SI_MSG(("Applying 43224B0+ WARs\n"));
 			si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
 					   CCTRL_43224B0_12MA_LED_DRIVE);
 		}
 	}
 
-	if (CHIPID(sih->chip) == BCM4313_CHIP_ID) {
+	if (sih->chip == BCM4313_CHIP_ID) {
 		/* enable 12 mA drive strenth for 4313 and set chipControl register bit 1 */
 		SI_MSG(("Applying 4313 WARs\n"));
 		si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
 				   CCTRL_4313_12MA_LED_DRIVE);
 	}
 
-	if (CHIPID(sih->chip) == BCM4331_CHIP_ID) {
+	if (sih->chip == BCM4331_CHIP_ID) {
 		/* Enable Ext PA lines depending on chip package option */
 		si_chipcontrl_epa4331(sih, true);
 	}
 
 	return sii;
  exit:
-	if (BUSTYPE(sih->bustype) == PCI_BUS) {
+	if (sih->bustype == PCI_BUS) {
 		if (sii->pch)
 			pcicore_deinit(sii->pch);
 		sii->pch = NULL;
@@ -708,17 +707,17 @@
 	if (sii == NULL)
 		return;
 
-	if (BUSTYPE(sih->bustype) == SI_BUS)
+	if (sih->bustype == SI_BUS)
 		for (idx = 0; idx < SI_MAXCORES; idx++)
 			if (sii->regs[idx]) {
-				REG_UNMAP(sii->regs[idx]);
+				iounmap(sii->regs[idx]);
 				sii->regs[idx] = NULL;
 			}
 
 #ifndef BRCM_FULLMAC
 	nvram_exit((void *)si_local);	/* free up nvram buffers */
 
-	if (BUSTYPE(sih->bustype) == PCI_BUS) {
+	if (sih->bustype == PCI_BUS) {
 		if (sii->pch)
 			pcicore_deinit(sii->pch);
 		sii->pch = NULL;
@@ -730,7 +729,7 @@
 		kfree(sii);
 }
 
-void *si_osh(si_t *sih)
+struct osl_info *si_osh(si_t *sih)
 {
 	si_info_t *sii;
 
@@ -766,7 +765,7 @@
 
 uint si_flag(si_t *sih)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_flag(sih);
 	else {
 		ASSERT(0);
@@ -776,7 +775,7 @@
 
 void si_setint(si_t *sih, int siflag)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		ai_setint(sih, siflag);
 	else
 		ASSERT(0);
@@ -808,7 +807,7 @@
 #ifndef BCMSDIO
 uint si_corerev(si_t *sih)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_corerev(sih);
 	else {
 		ASSERT(0);
@@ -851,7 +850,7 @@
 	if (!GOODIDX(idx))
 		return NULL;
 
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_setcoreidx(sih, idx);
 	else {
 #ifdef BCMSDIO
@@ -866,7 +865,7 @@
 #ifndef BCMSDIO
 void *si_setcoreidx(si_t *sih, uint coreidx)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_setcoreidx(sih, coreidx);
 	else {
 		ASSERT(0);
@@ -918,7 +917,7 @@
 
 u32 si_core_cflags(si_t *sih, u32 mask, u32 val)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_core_cflags(sih, mask, val);
 	else {
 		ASSERT(0);
@@ -928,7 +927,7 @@
 
 u32 si_core_sflags(si_t *sih, u32 mask, u32 val)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_core_sflags(sih, mask, val);
 	else {
 		ASSERT(0);
@@ -938,7 +937,7 @@
 
 bool si_iscoreup(si_t *sih)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_iscoreup(sih);
 	else {
 #ifdef BCMSDIO
@@ -953,7 +952,7 @@
 void si_write_wrapperreg(si_t *sih, u32 offset, u32 val)
 {
 	/* only for 4319, no requirement for SOCI_SB */
-	if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+	if (sih->socitype == SOCI_AI) {
 		ai_write_wrap_reg(sih, offset, val);
 	}
 }
@@ -961,7 +960,7 @@
 uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
 {
 
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		return ai_corereg(sih, coreidx, regoff, mask, val);
 	else {
 #ifdef BCMSDIO
@@ -976,7 +975,7 @@
 void si_core_disable(si_t *sih, u32 bits)
 {
 
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		ai_core_disable(sih, bits);
 #ifdef BCMSDIO
 	else
@@ -986,7 +985,7 @@
 
 void si_core_reset(si_t *sih, u32 bits, u32 resetbits)
 {
-	if (CHIPTYPE(sih->socitype) == SOCI_AI)
+	if (sih->socitype == SOCI_AI)
 		ai_core_reset(sih, bits, resetbits);
 #ifdef BCMSDIO
 	else
@@ -1043,8 +1042,8 @@
 
 	if (PMUCTL_ENAB(sih)) {
 
-		if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
-		    (CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
+		if ((sih->chip == BCM4319_CHIP_ID) &&
+		    (sih->chiprev == 0) && (ticks != 0)) {
 			si_corereg(sih, SI_CC_IDX,
 				   offsetof(chipcregs_t, clk_ctl_st), ~0, 0x2);
 			si_setcore(sih, USB20D_CORE_ID, 0);
@@ -1085,16 +1084,18 @@
 static uint si_slowclk_src(si_info_t *sii)
 {
 	chipcregs_t *cc;
+	u32 val;
 
 	ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
 
 	if (sii->pub.ccrev < 6) {
-		if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
-		    (OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(u32))
-		     & PCI_CFG_GPIO_SCS))
-			return SCC_SS_PCI;
-		else
-			return SCC_SS_XTAL;
+		if (sii->pub.bustype == PCI_BUS) {
+			pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUT,
+					      &val);
+			if (val & PCI_CFG_GPIO_SCS)
+				return SCC_SS_PCI;
+		}
+		return SCC_SS_XTAL;
 	} else if (sii->pub.ccrev < 10) {
 		cc = (chipcregs_t *) si_setcoreidx(&sii->pub, sii->curidx);
 		return R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK;
@@ -1264,7 +1265,7 @@
 
 	sii = SI_INFO(sih);
 
-	switch (BUSTYPE(sih->bustype)) {
+	switch (sih->bustype) {
 
 #ifdef BCMSDIO
 	case SDIO_BUS:
@@ -1276,12 +1277,9 @@
 		if (PCIE(sii))
 			return -1;
 
-		in = OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_IN, sizeof(u32));
-		out =
-		    OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(u32));
-		outen =
-		    OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUTEN,
-					sizeof(u32));
+		pci_read_config_dword(sii->osh->pdev, PCI_GPIO_IN, &in);
+		pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUT, &out);
+		pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUTEN, &outen);
 
 		/*
 		 * Avoid glitching the clock if GPRS is already using it.
@@ -1302,18 +1300,18 @@
 				out |= PCI_CFG_GPIO_XTAL;
 				if (what & PLL)
 					out |= PCI_CFG_GPIO_PLL;
-				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
-						     sizeof(u32), out);
-				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
-						     sizeof(u32), outen);
+				pci_write_config_dword(sii->osh->pdev,
+						       PCI_GPIO_OUT, out);
+				pci_write_config_dword(sii->osh->pdev,
+						       PCI_GPIO_OUTEN, outen);
 				udelay(XTAL_ON_DELAY);
 			}
 
 			/* turn pll on */
 			if (what & PLL) {
 				out &= ~PCI_CFG_GPIO_PLL;
-				OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
-						     sizeof(u32), out);
+				pci_write_config_dword(sii->osh->pdev,
+						       PCI_GPIO_OUT, out);
 				mdelay(2);
 			}
 		} else {
@@ -1321,10 +1319,10 @@
 				out &= ~PCI_CFG_GPIO_XTAL;
 			if (what & PLL)
 				out |= PCI_CFG_GPIO_PLL;
-			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUT,
-					     sizeof(u32), out);
-			OSL_PCI_WRITE_CONFIG(sii->osh, PCI_GPIO_OUTEN,
-					     sizeof(u32), outen);
+			pci_write_config_dword(sii->osh->pdev,
+					       PCI_GPIO_OUT, out);
+			pci_write_config_dword(sii->osh->pdev,
+					       PCI_GPIO_OUTEN, outen);
 		}
 
 	default:
@@ -1378,7 +1376,7 @@
 		INTR_OFF(sii, intr_val);
 		origidx = sii->curidx;
 
-		if ((BUSTYPE(sii->pub.bustype) == SI_BUS) &&
+		if ((sii->pub.bustype == SI_BUS) &&
 		    si_setcore(&sii->pub, MIPS33_CORE_ID, 0) &&
 		    (si_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10))
 			goto done;
@@ -1460,7 +1458,7 @@
 	if (!path || size <= 0)
 		return -1;
 
-	switch (BUSTYPE(sih->bustype)) {
+	switch (sih->bustype) {
 	case SI_BUS:
 	case JTAG_BUS:
 		slen = snprintf(path, (size_t) size, "sb/%u/", si_coreidx(sih));
@@ -1550,7 +1548,7 @@
 {
 	u8 cap_ptr;
 
-	if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+	if (sii->pub.bustype != PCI_BUS)
 		return false;
 
 	cap_ptr =
@@ -1617,7 +1615,7 @@
 	sii = SI_INFO(sih);
 
 	/* if not pci bus, we're done */
-	if (BUSTYPE(sih->bustype) != PCI_BUS)
+	if (sih->bustype != PCI_BUS)
 		return;
 
 	if (PCI_FORCEHT(sii))
@@ -1646,7 +1644,7 @@
 	sii = SI_INFO(sih);
 
 	/* if not pci bus, we're done */
-	if (BUSTYPE(sih->bustype) != PCI_BUS)
+	if (sih->bustype != PCI_BUS)
 		return;
 
 	/* release FORCEHT since chip is going to "down" state */
@@ -1669,7 +1667,7 @@
 
 	sii = SI_INFO(sih);
 
-	if (BUSTYPE(sii->pub.bustype) != PCI_BUS)
+	if (sii->pub.bustype != PCI_BUS)
 		return;
 
 	ASSERT(PCI(sii) || PCIE(sii));
@@ -1692,9 +1690,9 @@
 	 */
 	if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
 		/* pci config write to set this core bit in PCIIntMask */
-		w = OSL_PCI_READ_CONFIG(sii->osh, PCI_INT_MASK, sizeof(u32));
+		pci_read_config_dword(sii->osh->pdev, PCI_INT_MASK, &w);
 		w |= (coremask << PCI_SBIM_SHIFT);
-		OSL_PCI_WRITE_CONFIG(sii->osh, PCI_INT_MASK, sizeof(u32), w);
+		pci_write_config_dword(sii->osh->pdev, PCI_INT_MASK, w);
 	} else {
 		/* set sbintvec bit for our flag number */
 		si_setint(sih, siflag);
@@ -1731,7 +1729,7 @@
 
 	si_info_t *sii = SI_INFO(sih);
 
-	ASSERT(BUSTYPE(sii->pub.bustype) == PCI_BUS);
+	ASSERT(sii->pub.bustype == PCI_BUS);
 
 	/* Fixup PI in SROM shadow area to enable the correct PCI core access */
 	/* save the current index */
@@ -1777,7 +1775,7 @@
 	 * ignore reservation if it's high priority (e.g., test apps)
 	 */
 	if ((priority != GPIO_HI_PRIORITY) &&
-	    (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
+	    (sih->bustype == SI_BUS) && (val || mask)) {
 		mask = priority ? (si_gpioreservation & mask) :
 		    ((si_gpioreservation | mask) & ~(si_gpioreservation));
 		val &= mask;
@@ -1929,10 +1927,10 @@
 
 	sii = SI_INFO(sih);
 
-	switch (BUSTYPE(sih->bustype)) {
+	switch (sih->bustype) {
 	case PCI_BUS:
 		ASSERT(sii->osh != NULL);
-		w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(u32));
+		pci_read_config_dword(sii->osh->pdev, PCI_CFG_VID, &w);
 		if ((w & 0xFFFF) != VENDOR_BROADCOM)
 			return true;
 		break;
@@ -1959,7 +1957,7 @@
 		return sromctrl & SRC_PRESENT;
 	}
 
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		return (sih->chipst & CST4329_SPROM_SEL) != 0;
 	case BCM4319_CHIP_ID:
@@ -1979,7 +1977,7 @@
 
 bool si_is_otp_disabled(si_t *sih)
 {
-	switch (CHIPID(sih->chip)) {
+	switch (sih->chip) {
 	case BCM4329_CHIP_ID:
 		return (sih->chipst & CST4329_SPROM_OTP_SEL_MASK) ==
 		    CST4329_OTP_PWRDN;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 4a29ed7..dca861e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -117,8 +117,7 @@
 
 	for (driv = comedi_drivers; driv; driv = driv->next) {
 		if (!try_module_get(driv->module)) {
-			printk
-			    (KERN_INFO "comedi: failed to increment module count, skipping\n");
+			printk(KERN_INFO "comedi: failed to increment module count, skipping\n");
 			continue;
 		}
 		if (driv->num_names) {
@@ -205,9 +204,8 @@
 		mutex_lock(&dev->mutex);
 		if (dev->attached && dev->driver == driver) {
 			if (dev->use_count)
-				printk
-				    (KERN_WARNING "BUG! detaching device with use_count=%d\n",
-				     dev->use_count);
+				printk(KERN_WARNING "BUG! detaching device with use_count=%d\n",
+						dev->use_count);
 			comedi_device_detach(dev);
 		}
 		mutex_unlock(&dev->mutex);
@@ -442,7 +440,9 @@
 		unsigned i;
 		for (i = 0; i < async->n_buf_pages; ++i) {
 			if (async->buf_page_list[i].virt_addr) {
-				clear_bit(PG_reserved, &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
+				clear_bit(PG_reserved,
+					&(virt_to_page(async->buf_page_list[i].
+							virt_addr)->flags));
 				if (s->async_dma_dir != DMA_NONE) {
 					dma_free_coherent(dev->hw_dev,
 							  PAGE_SIZE,
@@ -470,10 +470,8 @@
 		struct page **pages = NULL;
 
 		async->buf_page_list =
-		    vmalloc(sizeof(struct comedi_buf_page) * n_pages);
+		    vzalloc(sizeof(struct comedi_buf_page) * n_pages);
 		if (async->buf_page_list) {
-			memset(async->buf_page_list, 0,
-			       sizeof(struct comedi_buf_page) * n_pages);
 			pages = vmalloc(sizeof(struct page *) * n_pages);
 		}
 		if (pages) {
@@ -496,8 +494,10 @@
 					break;
 
 				set_bit(PG_reserved,
-					&(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
-				pages[i] = virt_to_page(async->buf_page_list[i].virt_addr);
+					&(virt_to_page(async->buf_page_list[i].
+							virt_addr)->flags));
+				pages[i] = virt_to_page(async->buf_page_list[i].
+								virt_addr);
 			}
 		}
 		if (i == n_pages) {
@@ -514,7 +514,10 @@
 					    NULL) {
 						break;
 					}
-					clear_bit(PG_reserved, &(virt_to_page(async->buf_page_list[i].virt_addr)->flags));
+					clear_bit(PG_reserved,
+						&(virt_to_page(async->
+							buf_page_list[i].
+							virt_addr)->flags));
 					if (s->async_dma_dir != DMA_NONE) {
 						dma_free_coherent(dev->hw_dev,
 								  PAGE_SIZE,
@@ -646,8 +649,7 @@
 {
 	if ((int)(async->buf_write_count + nbytes -
 		  async->buf_write_alloc_count) > 0) {
-		printk
-		    (KERN_INFO "comedi: attempted to write-free more bytes than have been write-allocated.\n");
+		printk(KERN_INFO "comedi: attempted to write-free more bytes than have been write-allocated.\n");
 		nbytes = async->buf_write_alloc_count - async->buf_write_count;
 	}
 	async->buf_write_count += nbytes;
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
index 7361d50..0e6affd 100644
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
+++ b/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
@@ -1008,7 +1008,7 @@
 	b_ExternGate = (unsigned char) data[3];
 	b_CycleMode = (unsigned char) data[4];
 	b_InterruptEnable = (unsigned char) data[5];
-	i_ReturnValue = insn->n;;
+	i_ReturnValue = insn->n;
 	devpriv->tsk_Current = current;	/*  Save the current process task structure */
 	/**************************/
 	/* Test the module number */
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 93d7c05..76f2483 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -2710,10 +2710,10 @@
 			} else {
 				outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
 			}
-			/*  Enable the interrupt for the controler */
+			/*  Enable the interrupt for the controller */
 			dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
 			outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
-			printk("\nEnable the interrupt for the controler");
+			printk("\nEnable the interrupt for the controller");
 		}
 		printk("\nRead Eeprom");
 		i_EepromReadMainHeader(io_addr[0], this_board->pc_EepromChip,
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
index 912bc0f..a76ed25 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
@@ -225,7 +225,7 @@
 
 	devpriv->s_BoardInfos.b_BoardVersion = 1;
 
-	/*  Enable the interrupt for the controler */
+	/*  Enable the interrupt for the controller */
 	dw_Dummy = inl(devpriv->s_BoardInfos.ui_Address + 0x38);
 	outl(dw_Dummy | 0x2000, devpriv->s_BoardInfos.ui_Address + 0x38);
 
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index 2a8a6c7..62f421a 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -2850,7 +2850,7 @@
 	i_Logic = 0;
 	i_CounterLogic = 0;
 	i_InterruptMask = 0;
-	i_InputChannel = 0;;
+	i_InputChannel = 0;
 	i_TimerCounter1Enabled = 0;
 	i_TimerCounter2Enabled = 0;
 	i_WatchdogCounter3Enabled = 0;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c
index 12fcc35..8a584a0 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1516.c
@@ -335,7 +335,7 @@
 			return -EINVAL;
 		}		/* if else data[3]==1) */
 	}			/* if else data[3]==0) */
-	return (insn->n);;
+	return (insn->n);
 }
 
 /*
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index b943a06..a93e234 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -3011,7 +3011,7 @@
 
 	outl(0x83838383, devpriv->i_IobaseAmcc + 0x60);
 
-	/*  Enable the interrupt for the controler */
+	/*  Enable the interrupt for the controller */
 	dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38);
 	outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38);
 	outl(0, devpriv->i_IobaseAddon);	/* Resets the output */
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index 356a189..acaceb0 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -339,7 +339,7 @@
 int i_APCI3501_WriteAnalogOutput(struct comedi_device *dev, struct comedi_subdevice *s,
 	struct comedi_insn *insn, unsigned int *data)
 {
-	unsigned int ul_Command1 = 0, ul_Channel_no, ul_Polarity, ul_DAC_Ready = 0;;
+	unsigned int ul_Command1 = 0, ul_Channel_no, ul_Polarity, ul_DAC_Ready = 0;
 
 	ul_Channel_no = CR_CHAN(insn->chanspec);
 
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 5d06457..7edeb11 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -971,7 +971,7 @@
 	if (thisboard->ao_chans > 0) {
 		s->type = COMEDI_SUBD_AO;
 		s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
-		s->n_chan = thisboard->ao_chans;;
+		s->n_chan = thisboard->ao_chans;
 		s->maxdata = (1 << thisboard->ao_bits) - 1;
 		s->range_table = &pci230_ao_range;
 		s->insn_write = &pci230_ao_winsn;
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 0345b4c..bb93685 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -169,7 +169,7 @@
 	if (!link)
 		return -EIO;
 
-	dev->iobase = link->resource[0]->start;;
+	dev->iobase = link->resource[0]->start;
 	printk("I/O base=0x%04lx ", dev->iobase);
 
 	printk("fingerprint:\n");
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index cfcbd9b..d8aefb2 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -370,7 +370,7 @@
 	struct comedi_device *devs_opened[COMEDI_NUM_BOARD_MINORS];
 
 	memset(devs_opened, 0, sizeof(devs_opened));
-	devpriv->name[0] = 0;;
+	devpriv->name[0] = 0;
 	/* Loop through all comedi devices specified on the command-line,
 	   building our device list */
 	for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index 39a6a85..e4711ef 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -19,7 +19,7 @@
  *				- 16 bit
  *
  *	only ONE PCI-20341 module possible
- * 	only ONE PCI-20006 module possible
+ *	only ONE PCI-20006 module possible
  *	no extern trigger implemented
  *
  *	NOT WORKING (but soon) only 4 on-board differential channels supported
@@ -83,11 +83,11 @@
 #include "../comedidev.h"
 
 #define PCI20000_ID			0x1d
-#define PCI20341_ID    			0x77
-#define PCI20006_ID      		0xe3
+#define PCI20341_ID			0x77
+#define PCI20006_ID			0xe3
 #define PCI20xxx_EMPTY_ID		0xff
 
-#define PCI20000_OFFSET 		0x100
+#define PCI20000_OFFSET			0x100
 #define PCI20000_MODULES		3
 
 #define PCI20000_DIO_0			0x80
@@ -246,7 +246,7 @@
 			pci20006_init(dev, s, it->options[2 * i + 2],
 				      it->options[2 * i + 3]);
 			printk(KERN_INFO "comedi%d: "
-			       "ii_pci20kc PCI-20006 module in slot %d \n",
+			       "ii_pci20kc PCI-20006 module in slot %d\n",
 			       dev->minor, i + 1);
 			break;
 		case PCI20341_ID:
@@ -255,7 +255,7 @@
 			pci20341_init(dev, s, it->options[2 * i + 2],
 				      it->options[2 * i + 3]);
 			printk(KERN_INFO "comedi%d: "
-			       "ii_pci20kc PCI-20341 module in slot %d \n",
+			       "ii_pci20kc PCI-20341 module in slot %d\n",
 			       dev->minor, i + 1);
 			break;
 		default:
@@ -376,9 +376,20 @@
 static const int pci20341_timebase[] = { 0x00, 0x00, 0x00, 0x04 };
 static const int pci20341_settling_time[] = { 0x58, 0x58, 0x93, 0x99 };
 
-static const struct comedi_lrange range_bipolar0_5 = { 1, {BIP_RANGE(0.5)} };
-static const struct comedi_lrange range_bipolar0_05 = { 1, {BIP_RANGE(0.05)} };
-static const struct comedi_lrange range_bipolar0_025 = { 1, {BIP_RANGE(0.025)} };
+static const struct comedi_lrange range_bipolar0_5 = {
+	1,
+	{BIP_RANGE(0.5)}
+};
+
+static const struct comedi_lrange range_bipolar0_05 = {
+	1,
+	{BIP_RANGE(0.05)}
+};
+
+static const struct comedi_lrange range_bipolar0_025 = {
+	1,
+	{BIP_RANGE(0.025)}
+};
 
 static const struct comedi_lrange *const pci20341_ranges[] = {
 	&range_bipolar5,
@@ -408,12 +419,18 @@
 	s->maxdata = 0xffff;
 	s->range_table = pci20341_ranges[opt0];
 
-	option = sdp->pci20341.timebase | PCI20341_REPMODE;	/* depends on gain, trigger, repetition mode */
+	/* depends on gain, trigger, repetition mode */
+	option = sdp->pci20341.timebase | PCI20341_REPMODE;
 
-	writeb(PCI20341_INIT, sdp->iobase + PCI20341_CONFIG_REG);	/* initialize Module */
-	writeb(PCI20341_PACER, sdp->iobase + PCI20341_MOD_STATUS);	/* set Pacer */
-	writeb(option, sdp->iobase + PCI20341_OPT_REG);	/* option register */
-	writeb(sdp->pci20341.settling_time, sdp->iobase + PCI20341_SET_TIME_REG);	/* settling time counter */
+	/* initialize Module */
+	writeb(PCI20341_INIT, sdp->iobase + PCI20341_CONFIG_REG);
+	/* set Pacer */
+	writeb(PCI20341_PACER, sdp->iobase + PCI20341_MOD_STATUS);
+	/* option register */
+	writeb(option, sdp->iobase + PCI20341_OPT_REG);
+	/* settling time counter */
+	writeb(sdp->pci20341.settling_time,
+		sdp->iobase + PCI20341_SET_TIME_REG);
 	/* trigger not implemented */
 	return 0;
 }
@@ -429,11 +446,15 @@
 	unsigned int clb;	/* channel list byte */
 	unsigned int boarddata;
 
-	writeb(1, sdp->iobase + PCI20341_LCHAN_ADDR_REG);	/* write number of input channels */
+	/* write number of input channels */
+	writeb(1, sdp->iobase + PCI20341_LCHAN_ADDR_REG);
 	clb = PCI20341_DAISY_CHAIN | PCI20341_MUX | (sdp->pci20341.ai_gain << 3)
 	    | CR_CHAN(insn->chanspec);
 	writeb(clb, sdp->iobase + PCI20341_CHAN_LIST);
-	writeb(0x00, sdp->iobase + PCI20341_CC_RESET);	/* reset settling time counter and trigger delay counter */
+
+	/* reset settling time counter and trigger delay counter */
+	writeb(0x00, sdp->iobase + PCI20341_CC_RESET);
+
 	writeb(0x00, sdp->iobase + PCI20341_CHAN_RESET);
 
 	/* generate Pacer */
@@ -444,9 +465,12 @@
 		 * the whole interrupt stuff
 		 */
 		j = 0;
-		readb(sdp->iobase + PCI20341_SOFT_PACER);	/* generate Pacer */
+		/* generate Pacer */
+		readb(sdp->iobase + PCI20341_SOFT_PACER);
+
 		eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
-		while ((eoc < 0x80) && j < 100) {	/* poll Interrupt Flag */
+		/* poll Interrupt Flag */
+		while ((eoc < 0x80) && j < 100) {
 			j++;
 			eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
 		}
@@ -460,7 +484,9 @@
 		lo = readb(sdp->iobase + PCI20341_LDATA);
 		hi = readb(sdp->iobase + PCI20341_LDATA + 1);
 		boarddata = lo + 0x100 * hi;
-		data[i] = (short)((boarddata + 0x8000) & 0xffff);	/* board-data -> comedi-data */
+
+		/* board-data -> comedi-data */
+		data[i] = (short)((boarddata + 0x8000) & 0xffff);
 	}
 
 	return i;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 8b383ee..5c6c727 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -54,6 +54,7 @@
 
 #define PCI_VENDOR_ID_JR3 0x1762
 #define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111
+#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111
 #define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112
 #define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113
 #define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114
@@ -73,6 +74,8 @@
 	{
 	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
 		    PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
+		    PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
 	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
 		    PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
 	PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
@@ -807,6 +810,10 @@
 					devpriv->n_channels = 1;
 				}
 				break;
+			case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{
+					devpriv->n_channels = 1;
+				}
+				break;
 			case PCI_DEVICE_ID_JR3_2_CHANNEL:{
 					devpriv->n_channels = 2;
 				}
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index b0d44b5..a961158 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -62,11 +62,10 @@
 	unsigned input_select_bits = 0;
 
 	if (enable) {
-		if (read_not_write) {
+		if (read_not_write)
 			input_select_bits |= Gi_Read_Acknowledges_Irq;
-		} else {
+		else
 			input_select_bits |= Gi_Write_Acknowledges_Irq;
-		}
 	}
 	ni_tio_set_bits(counter,
 			NITIO_Gi_Input_Select_Reg(counter->counter_index),
@@ -84,9 +83,8 @@
 				gi_dma_config_bits |= Gi_DMA_Enable_Bit;
 				gi_dma_config_bits |= Gi_DMA_Int_Bit;
 			}
-			if (read_not_write == 0) {
+			if (read_not_write == 0)
 				gi_dma_config_bits |= Gi_DMA_Write_Bit;
-			}
 			ni_tio_set_bits(counter,
 					NITIO_Gi_DMA_Config_Reg(counter->
 								counter_index),
@@ -174,7 +172,7 @@
 static int ni_tio_output_cmd(struct ni_gpct *counter,
 			     struct comedi_async *async)
 {
-	printk("ni_tio: output commands not yet implemented.\n");
+	printk(KERN_ERR "ni_tio: output commands not yet implemented.\n");
 	return -ENOTSUPP;
 
 	counter->mite_chan->dir = COMEDI_OUTPUT;
@@ -198,9 +196,8 @@
 		set_gate_source = 1;
 		gate_source = cmd->convert_arg;
 	}
-	if (set_gate_source) {
+	if (set_gate_source)
 		retval = ni_tio_set_gate_src(counter, 0, gate_source);
-	}
 	if (cmd->flags & TRIG_WAKE_EOS) {
 		ni_tio_set_bits(counter,
 				NITIO_Gi_Interrupt_Enable_Reg(counter->
@@ -221,22 +218,21 @@
 
 	spin_lock_irqsave(&counter->lock, flags);
 	if (counter->mite_chan == NULL) {
-		printk
-		    ("ni_tio: commands only supported with DMA.  Interrupt-driven commands not yet implemented.\n");
+		printk(KERN_ERR "ni_tio: commands only supported with DMA.  Interrupt-driven commands not yet implemented.\n");
 		retval = -EIO;
 	} else {
 		retval = ni_tio_cmd_setup(counter, async);
 		if (retval == 0) {
-			if (cmd->flags & CMDF_WRITE) {
+			if (cmd->flags & CMDF_WRITE)
 				retval = ni_tio_output_cmd(counter, async);
-			} else {
+			else
 				retval = ni_tio_input_cmd(counter, async);
-			}
 		}
 	}
 	spin_unlock_irqrestore(&counter->lock, flags);
 	return retval;
 }
+EXPORT_SYMBOL_GPL(ni_tio_cmd);
 
 int ni_tio_cmdtest(struct ni_gpct *counter, struct comedi_cmd *cmd)
 {
@@ -342,6 +338,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
 
 int ni_tio_cancel(struct ni_gpct *counter)
 {
@@ -349,9 +346,8 @@
 
 	ni_tio_arm(counter, 0, 0);
 	spin_lock_irqsave(&counter->lock, flags);
-	if (counter->mite_chan) {
+	if (counter->mite_chan)
 		mite_dma_disarm(counter->mite_chan);
-	}
 	spin_unlock_irqrestore(&counter->lock, flags);
 	ni_tio_configure_dma(counter, 0, 0);
 
@@ -361,10 +357,11 @@
 			0x0);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(ni_tio_cancel);
 
-	/* During buffered input counter operation for e-series, the gate interrupt is acked
-	   automatically by the dma controller, due to the Gi_Read/Write_Acknowledges_IRQ bits
-	   in the input select register.  */
+	/* During buffered input counter operation for e-series, the gate
+	   interrupt is acked automatically by the dma controller, due to the
+	   Gi_Read/Write_Acknowledges_IRQ bits in the input select register.  */
 static int should_ack_gate(struct ni_gpct *counter)
 {
 	unsigned long flags;
@@ -372,7 +369,10 @@
 
 	switch (counter->counter_dev->variant) {
 	case ni_gpct_variant_m_series:
-	case ni_gpct_variant_660x:	/*  not sure if 660x really supports gate interrupts (the bits are not listed in register-level manual) */
+	/*  not sure if 660x really supports gate
+	    interrupts (the bits are not listed
+	    in register-level manual) */
+	case ni_gpct_variant_660x:
 		return 1;
 		break;
 	case ni_gpct_variant_e_series:
@@ -416,7 +416,8 @@
 	if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) {
 		ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
 		if (gate_error) {
-			/*660x don't support automatic acknowledgement of gate interrupt via dma read/write
+			/*660x don't support automatic acknowledgement
+			  of gate interrupt via dma read/write
 			   and report bogus gate errors */
 			if (counter->counter_dev->variant !=
 			    ni_gpct_variant_660x) {
@@ -429,9 +430,8 @@
 		if (tc_error)
 			*tc_error = 1;
 	}
-	if (gi_status & Gi_TC_Bit) {
+	if (gi_status & Gi_TC_Bit)
 		ack |= Gi_TC_Interrupt_Ack_Bit;
-	}
 	if (gi_status & Gi_Gate_Interrupt_Bit) {
 		if (should_ack_gate(counter))
 			ack |= Gi_Gate_Interrupt_Ack_Bit;
@@ -452,13 +452,14 @@
 				  NITIO_Gxx_Joint_Status2_Reg
 				  (counter->counter_index)) &
 		    Gi_Permanent_Stale_Bit(counter->counter_index)) {
-			printk("%s: Gi_Permanent_Stale_Data detected.\n",
-			       __FUNCTION__);
+			printk(KERN_INFO "%s: Gi_Permanent_Stale_Data detected.\n",
+			       __func__);
 			if (perm_stale_data)
 				*perm_stale_data = 1;
 		}
 	}
 }
+EXPORT_SYMBOL_GPL(ni_tio_acknowledge_and_confirm);
 
 void ni_tio_handle_interrupt(struct ni_gpct *counter,
 			     struct comedi_subdevice *s)
@@ -472,20 +473,19 @@
 	ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
 				       &perm_stale_data, NULL);
 	if (gate_error) {
-		printk("%s: Gi_Gate_Error detected.\n", __FUNCTION__);
+		printk(KERN_NOTICE "%s: Gi_Gate_Error detected.\n", __func__);
 		s->async->events |= COMEDI_CB_OVERFLOW;
 	}
-	if (perm_stale_data) {
+	if (perm_stale_data)
 		s->async->events |= COMEDI_CB_ERROR;
-	}
 	switch (counter->counter_dev->variant) {
 	case ni_gpct_variant_m_series:
 	case ni_gpct_variant_660x:
 		if (read_register(counter,
-				  NITIO_Gi_DMA_Status_Reg
-				  (counter->counter_index)) & Gi_DRQ_Error_Bit)
-		{
-			printk("%s: Gi_DRQ_Error detected.\n", __FUNCTION__);
+				NITIO_Gi_DMA_Status_Reg
+				(counter->counter_index)) & Gi_DRQ_Error_Bit) {
+			printk(KERN_NOTICE "%s: Gi_DRQ_Error detected.\n",
+							__func__);
 			s->async->events |= COMEDI_CB_OVERFLOW;
 		}
 		break;
@@ -506,6 +506,7 @@
 	mite_sync_input_dma(counter->mite_chan, s->async);
 	spin_unlock_irqrestore(&counter->lock, flags);
 }
+EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
 
 void ni_tio_set_mite_channel(struct ni_gpct *counter,
 			     struct mite_channel *mite_chan)
@@ -516,6 +517,7 @@
 	counter->mite_chan = mite_chan;
 	spin_unlock_irqrestore(&counter->lock, flags);
 }
+EXPORT_SYMBOL_GPL(ni_tio_set_mite_channel);
 
 static int __init ni_tiocmd_init_module(void)
 {
@@ -529,10 +531,3 @@
 }
 
 module_exit(ni_tiocmd_cleanup_module);
-
-EXPORT_SYMBOL_GPL(ni_tio_cmd);
-EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
-EXPORT_SYMBOL_GPL(ni_tio_cancel);
-EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
-EXPORT_SYMBOL_GPL(ni_tio_set_mite_channel);
-EXPORT_SYMBOL_GPL(ni_tio_acknowledge_and_confirm);
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 60ebfc3..aa8aeee 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -753,7 +753,7 @@
 	struct comedi_subdevice *s;
 	struct pci_dev *pcidev;
 	int ret;
-	resource_size_t physLas0;	/* configuation */
+	resource_size_t physLas0;	/* configuration */
 	resource_size_t physLas1;	/* data area */
 	resource_size_t physLcfg;	/* PLX9080 */
 #ifdef USE_DMA
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index 3607aae..2b34dae 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -114,7 +114,7 @@
 };
 
 struct counter_mode_register_t {
-#if defined (__LITTLE_ENDIAN_BITFIELD)
+#if defined(__LITTLE_ENDIAN_BITFIELD)
 	unsigned short coutSource:1;
 	unsigned short coutPolarity:1;
 	unsigned short autoLoadResetRcap:3;
@@ -207,7 +207,9 @@
 
 /* this structure is for data unique to this hardware driver.  If
    several hardware drivers keep similar information in this structure,
-   feel free to suggest moving the variable to the struct comedi_device struct.  */
+   feel free to suggest moving the variable to the struct comedi_device
+   struct.
+*/
 struct s526_private {
 
 	int data;
@@ -304,7 +306,7 @@
 /* int subdev_channel = 0; */
 	union cmReg cmReg;
 
-	printk("comedi%d: s526: ", dev->minor);
+	printk(KERN_INFO "comedi%d: s526: ", dev->minor);
 
 	iobase = it->options[0];
 	if (!iobase || !request_region(iobase, S526_IOSIZE, thisboard->name)) {
@@ -317,7 +319,8 @@
 
 	/*** make it a little quieter, exw, 8/29/06
 	for (i = 0; i < S526_NUM_PORTS; i++) {
-		printk("0x%02x: 0x%04x\n", ADDR_REG(s526_ports[i]), inw(ADDR_REG(s526_ports[i])));
+		printk("0x%02x: 0x%04x\n", ADDR_REG(s526_ports[i]),
+				inw(ADDR_REG(s526_ports[i])));
 	}
 	***/
 
@@ -402,7 +405,7 @@
 		s->type = COMEDI_SUBD_UNUSED;
 	}
 
-	printk("attached\n");
+	printk(KERN_INFO "attached\n");
 
 	return 1;
 
@@ -411,7 +414,7 @@
 	/* One-shot (software trigger) */
 	cmReg.reg.coutSource = 0;	/*  out RCAP */
 	cmReg.reg.coutPolarity = 1;	/*  Polarity inverted */
-	cmReg.reg.autoLoadResetRcap = 1;	/*  Auto load 0:disabled, 1:enabled */
+	cmReg.reg.autoLoadResetRcap = 1;/*  Auto load 0:disabled, 1:enabled */
 	cmReg.reg.hwCtEnableSource = 3;	/*  NOT RCAP */
 	cmReg.reg.ctEnableCtrl = 2;	/*  Hardware */
 	cmReg.reg.clockSource = 2;	/*  Internal */
@@ -426,10 +429,12 @@
 	outw(0x0001, ADDR_CHAN_REG(REG_C0H, subdev_channel));
 	outw(0x3C68, ADDR_CHAN_REG(REG_C0L, subdev_channel));
 
-	outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Reset the counter */
-	outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Load the counter from PR0 */
-
-	outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Reset RCAP (fires one-shot) */
+	/*  Reset the counter */
+	outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+	/*  Load the counter from PR0 */
+	outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+	/*  Reset RCAP (fires one-shot) */
+	outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel));
 
 #else
 
@@ -447,11 +452,12 @@
 	cmReg.reg.reserved = 0;
 
 	n = 0;
-	printk("Mode reg=0x%04x, 0x%04lx\n", cmReg.value, ADDR_CHAN_REG(REG_C0M,
-									n));
+	printk(KERN_INFO "Mode reg=0x%04x, 0x%04lx\n",
+		cmReg.value, ADDR_CHAN_REG(REG_C0M, n));
 	outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, n));
 	udelay(1000);
-	printk("Read back mode reg=0x%04x\n", inw(ADDR_CHAN_REG(REG_C0M, n)));
+	printk(KERN_INFO "Read back mode reg=0x%04x\n",
+		inw(ADDR_CHAN_REG(REG_C0M, n)));
 
 	/*  Load the pre-load register high word */
 /* value = (short) (0x55); */
@@ -466,20 +472,23 @@
 
 	/*  Reset the counter if it is software preload */
 	if (cmReg.reg.autoLoadResetRcap == 0) {
-		outw(0x8000, ADDR_CHAN_REG(REG_C0C, n));	/*  Reset the counter */
-		outw(0x4000, ADDR_CHAN_REG(REG_C0C, n));	/*  Load the counter from PR0 */
+		/*  Reset the counter */
+		outw(0x8000, ADDR_CHAN_REG(REG_C0C, n));
+		/*  Load the counter from PR0 */
+		outw(0x4000, ADDR_CHAN_REG(REG_C0C, n));
 	}
 
 	outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, n));
 	udelay(1000);
-	printk("Read back mode reg=0x%04x\n", inw(ADDR_CHAN_REG(REG_C0M, n)));
+	printk(KERN_INFO "Read back mode reg=0x%04x\n",
+			inw(ADDR_CHAN_REG(REG_C0M, n)));
 
 #endif
-	printk("Current registres:\n");
+	printk(KERN_INFO "Current registres:\n");
 
 	for (i = 0; i < S526_NUM_PORTS; i++) {
-		printk("0x%02lx: 0x%04x\n", ADDR_REG(s526_ports[i]),
-		       inw(ADDR_REG(s526_ports[i])));
+		printk(KERN_INFO "0x%02lx: 0x%04x\n",
+			ADDR_REG(s526_ports[i]), inw(ADDR_REG(s526_ports[i])));
 	}
 	return 1;
 }
@@ -494,7 +503,7 @@
  */
 static int s526_detach(struct comedi_device *dev)
 {
-	printk("comedi%d: s526: remove\n", dev->minor);
+	printk(KERN_INFO "comedi%d: s526: remove\n", dev->minor);
 
 	if (dev->iobase > 0)
 		release_region(dev->iobase, S526_IOSIZE);
@@ -513,7 +522,7 @@
 
 	/*  Check if (n > 0) */
 	if (insn->n <= 0) {
-		printk("s526: INSN_READ: n should be > 0\n");
+		printk(KERN_ERR "s526: INSN_READ: n should be > 0\n");
 		return -EINVAL;
 	}
 	/*  Read the low word first */
@@ -522,7 +531,8 @@
 		datahigh = inw(ADDR_CHAN_REG(REG_C0H, counter_channel));
 		data[i] = (int)(datahigh & 0x00FF);
 		data[i] = (data[i] << 16) | (datalow & 0xFFFF);
-/* printk("s526 GPCT[%d]: %x(0x%04x, 0x%04x)\n", counter_channel, data[i], datahigh, datalow); */
+		/* printk("s526 GPCT[%d]: %x(0x%04x, 0x%04x)\n",
+		   counter_channel, data[i], datahigh, datalow); */
 	}
 	return i;
 }
@@ -536,7 +546,8 @@
 	short value;
 	union cmReg cmReg;
 
-/* printk("s526: GPCT_INSN_CONFIG: Configuring Channel %d\n", subdev_channel); */
+	/* printk("s526: GPCT_INSN_CONFIG: Configuring Channel %d\n",
+						subdev_channel); */
 
 	for (i = 0; i < MAX_GPCT_CONFIG_DATA; i++) {
 		devpriv->s526_gpct_config[subdev_channel].data[i] =
@@ -554,7 +565,7 @@
 		   data[2]: Pre-load Register Value
 		   data[3]: Conter Control Register
 		 */
-		printk("s526: GPCT_INSN_CONFIG: Configuring Encoder\n");
+		printk(KERN_INFO "s526: GPCT_INSN_CONFIG: Configuring Encoder\n");
 		devpriv->s526_gpct_config[subdev_channel].app =
 		    PositionMeasurement;
 
@@ -563,7 +574,7 @@
 		/* One-shot (software trigger) */
 		cmReg.reg.coutSource = 0;	/*  out RCAP */
 		cmReg.reg.coutPolarity = 1;	/*  Polarity inverted */
-		cmReg.reg.autoLoadResetRcap = 0;	/*  Auto load disabled */
+		cmReg.reg.autoLoadResetRcap = 0;/*  Auto load disabled */
 		cmReg.reg.hwCtEnableSource = 3;	/*  NOT RCAP */
 		cmReg.reg.ctEnableCtrl = 2;	/*  Hardware */
 		cmReg.reg.clockSource = 2;	/*  Internal */
@@ -578,10 +589,13 @@
 		outw(0x0001, ADDR_CHAN_REG(REG_C0H, subdev_channel));
 		outw(0x3C68, ADDR_CHAN_REG(REG_C0L, subdev_channel));
 
-		outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Reset the counter */
-		outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Load the counter from PR0 */
+		/*  Reset the counter */
+		outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+		/*  Load the counter from PR0 */
+		outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
 
-		outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Reset RCAP (fires one-shot) */
+		/*  Reset RCAP (fires one-shot) */
+		outw(0x0008, ADDR_CHAN_REG(REG_C0C, subdev_channel));
 
 #endif
 
@@ -594,30 +608,34 @@
 
 		/*  Reset the counter if it is software preload */
 		if (cmReg.reg.autoLoadResetRcap == 0) {
-			outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Reset the counter */
-/* outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));    Load the counter from PR0 */
+			/*  Reset the counter */
+			outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+			/* Load the counter from PR0
+			 * outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+			 */
 		}
 #else
-		cmReg.reg.countDirCtrl = 0;	/*  0 quadrature, 1 software control */
+		/*  0 quadrature, 1 software control */
+		cmReg.reg.countDirCtrl = 0;
 
 		/*  data[1] contains GPCT_X1, GPCT_X2 or GPCT_X4 */
-		if (insn->data[1] == GPCT_X2) {
+		if (insn->data[1] == GPCT_X2)
 			cmReg.reg.clockSource = 1;
-		} else if (insn->data[1] == GPCT_X4) {
+		else if (insn->data[1] == GPCT_X4)
 			cmReg.reg.clockSource = 2;
-		} else {
+		else
 			cmReg.reg.clockSource = 0;
-		}
 
 		/*  When to take into account the indexpulse: */
-		if (insn->data[2] == GPCT_IndexPhaseLowLow) {
+		/*if (insn->data[2] == GPCT_IndexPhaseLowLow) {
 		} else if (insn->data[2] == GPCT_IndexPhaseLowHigh) {
 		} else if (insn->data[2] == GPCT_IndexPhaseHighLow) {
 		} else if (insn->data[2] == GPCT_IndexPhaseHighHigh) {
-		}
+		}*/
 		/*  Take into account the index pulse? */
 		if (insn->data[3] == GPCT_RESET_COUNTER_ON_INDEX)
-			cmReg.reg.autoLoadResetRcap = 4;	/*  Auto load with INDEX^ */
+			/*  Auto load with INDEX^ */
+			cmReg.reg.autoLoadResetRcap = 4;
 
 		/*  Set Counter Mode Register */
 		cmReg.value = (short)(insn->data[1] & 0xFFFF);
@@ -638,8 +656,10 @@
 		}
 		/*  Reset the counter if it is software preload */
 		if (cmReg.reg.autoLoadResetRcap == 0) {
-			outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Reset the counter */
-			outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/*  Load the counter from PR0 */
+			/*  Reset the counter */
+			outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+			/*  Load the counter from PR0 */
+			outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));
 		}
 #endif
 		break;
@@ -652,7 +672,7 @@
 		   data[3]: Pre-load Register 1 Value
 		   data[4]: Conter Control Register
 		 */
-		printk("s526: GPCT_INSN_CONFIG: Configuring SPG\n");
+		printk(KERN_INFO "s526: GPCT_INSN_CONFIG: Configuring SPG\n");
 		devpriv->s526_gpct_config[subdev_channel].app =
 		    SinglePulseGeneration;
 
@@ -697,7 +717,7 @@
 		   data[3]: Pre-load Register 1 Value
 		   data[4]: Conter Control Register
 		 */
-		printk("s526: GPCT_INSN_CONFIG: Configuring PTG\n");
+		printk(KERN_INFO "s526: GPCT_INSN_CONFIG: Configuring PTG\n");
 		devpriv->s526_gpct_config[subdev_channel].app =
 		    PulseTrainGeneration;
 
@@ -735,7 +755,7 @@
 		break;
 
 	default:
-		printk("s526: unsupported GPCT_insn_config\n");
+		printk(KERN_ERR "s526: unsupported GPCT_insn_config\n");
 		return -EINVAL;
 		break;
 	}
@@ -751,20 +771,21 @@
 	short value;
 	union cmReg cmReg;
 
-	printk("s526: GPCT_INSN_WRITE on channel %d\n", subdev_channel);
+	printk(KERN_INFO "s526: GPCT_INSN_WRITE on channel %d\n",
+					subdev_channel);
 	cmReg.value = inw(ADDR_CHAN_REG(REG_C0M, subdev_channel));
-	printk("s526: Counter Mode Register: %x\n", cmReg.value);
+	printk(KERN_INFO "s526: Counter Mode Register: %x\n", cmReg.value);
 	/*  Check what Application of Counter this channel is configured for */
 	switch (devpriv->s526_gpct_config[subdev_channel].app) {
 	case PositionMeasurement:
-		printk("S526: INSN_WRITE: PM\n");
+		printk(KERN_INFO "S526: INSN_WRITE: PM\n");
 		outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
 							     subdev_channel));
 		outw(0xFFFF & (*data), ADDR_CHAN_REG(REG_C0L, subdev_channel));
 		break;
 
 	case SinglePulseGeneration:
-		printk("S526: INSN_WRITE: SPG\n");
+		printk(KERN_INFO "S526: INSN_WRITE: SPG\n");
 		outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
 							     subdev_channel));
 		outw(0xFFFF & (*data), ADDR_CHAN_REG(REG_C0L, subdev_channel));
@@ -777,14 +798,14 @@
 		   The above periods must be expressed as a multiple of the
 		   pulse frequency on the selected source
 		 */
-		printk("S526: INSN_WRITE: PTG\n");
+		printk(KERN_INFO "S526: INSN_WRITE: PTG\n");
 		if ((insn->data[1] > insn->data[0]) && (insn->data[0] > 0)) {
 			(devpriv->s526_gpct_config[subdev_channel]).data[0] =
 			    insn->data[0];
 			(devpriv->s526_gpct_config[subdev_channel]).data[1] =
 			    insn->data[1];
 		} else {
-			printk("s526: INSN_WRITE: PTG: Problem with Pulse params -> %d %d\n",
+			printk(KERN_ERR "s526: INSN_WRITE: PTG: Problem with Pulse params -> %d %d\n",
 				insn->data[0], insn->data[1]);
 			return -EINVAL;
 		}
@@ -873,7 +894,7 @@
 		if (i == TIMEOUT) {
 			/* printk() should be used instead of printk()
 			 * whenever the code can be called from real-time. */
-			printk("s526: ADC(0x%04x) timeout\n",
+			printk(KERN_ERR "s526: ADC(0x%04x) timeout\n",
 			       inw(ADDR_REG(REG_ISR)));
 			return -ETIMEDOUT;
 		}
@@ -906,11 +927,14 @@
 	 * very useful, but that's how the interface is defined. */
 	for (i = 0; i < insn->n; i++) {
 		/* a typical programming sequence */
-/* outw(data[i], dev->iobase + REG_ADD);    write the data to preload register */
-		outw(data[i], ADDR_REG(REG_ADD));	/*  write the data to preload register */
+		/* write the data to preload register
+		 * outw(data[i], dev->iobase + REG_ADD);
+		 */
+		/* write the data to preload register */
+		outw(data[i], ADDR_REG(REG_ADD));
 		devpriv->ao_readback[chan] = data[i];
 /* outw(val + 1, dev->iobase + REG_DAC);  starts the D/A conversion. */
-		outw(val + 1, ADDR_REG(REG_DAC));	/*  starts the D/A conversion. */
+		outw(val + 1, ADDR_REG(REG_DAC)); /*starts the D/A conversion.*/
 	}
 
 	/* return the number of samples read/written */
@@ -954,7 +978,7 @@
 
 	/* on return, data[1] contains the value of the digital
 	 * input and output lines. */
-	data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF;	/*  low 8 bits are the data */
+	data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */
 	/* or we could just return the software copy of the output values if
 	 * it was a purely digital output subdevice */
 	/* data[1]=s->state & 0xFF; */
@@ -969,7 +993,7 @@
 	int chan = CR_CHAN(insn->chanspec);
 	int group, mask;
 
-	printk("S526 DIO insn_config\n");
+	printk(KERN_INFO "S526 DIO insn_config\n");
 
 	/* The input or output configuration of each digital line is
 	 * configured by a special insn_config instruction.  chanspec
@@ -980,11 +1004,12 @@
 	mask = 0xF << (group << 2);
 	switch (data[0]) {
 	case INSN_CONFIG_DIO_OUTPUT:
-		s->state |= 1 << (group + 10);  // bit 10/11 set the group 1/2's mode
+		/* bit 10/11 set the group 1/2's mode */
+		s->state |= 1 << (group + 10);
 		s->io_bits |= mask;
 		break;
 	case INSN_CONFIG_DIO_INPUT:
-		s->state &= ~(1 << (group + 10));// 1 is output, 0 is input.
+		s->state &= ~(1 << (group + 10)); /* 1 is output, 0 is input. */
 		s->io_bits &= ~mask;
 		break;
 	case INSN_CONFIG_DIO_QUERY:
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index de784ff..696ee04 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -2398,7 +2398,7 @@
 	usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
 	if (!usbduxsub[index].dux_commands) {
 		dev_err(dev, "comedi_: usbdux: "
-			"error alloc space for dac commands\n");
+			"error alloc space for dux commands\n");
 		tidy_up(&(usbduxsub[index]));
 		up(&start_stop_sem);
 		return -ENOMEM;
diff --git a/drivers/staging/cptm1217/Kconfig b/drivers/staging/cptm1217/Kconfig
new file mode 100644
index 0000000..43b1cc0
--- /dev/null
+++ b/drivers/staging/cptm1217/Kconfig
@@ -0,0 +1,12 @@
+config TOUCHSCREEN_CLEARPAD_TM1217
+	tristate "Synaptics Clearpad TM1217"
+	depends on I2C
+	depends on GPIOLIB
+	depends on INPUT
+	help
+	  Say Y here if you have a Synaptics Clearpad TM1217 Controller
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called clearpad_tm1217.
diff --git a/drivers/staging/cptm1217/Makefile b/drivers/staging/cptm1217/Makefile
new file mode 100644
index 0000000..8961faf
--- /dev/null
+++ b/drivers/staging/cptm1217/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217)	+= clearpad_tm1217.o
+
diff --git a/drivers/staging/cptm1217/TODO b/drivers/staging/cptm1217/TODO
new file mode 100644
index 0000000..3039224
--- /dev/null
+++ b/drivers/staging/cptm1217/TODO
@@ -0,0 +1,5 @@
+- Wait for the official upstream general clearpad drivers as promised over
+  the past few months
+- Merge any device support needed from this driver into it
+- Delete this driver
+
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
new file mode 100644
index 0000000..76e4b78
--- /dev/null
+++ b/drivers/staging/cptm1217/clearpad_tm1217.c
@@ -0,0 +1,675 @@
+/*
+ * clearpad_tm1217.c - Touch Screen driver for Synaptics Clearpad
+ * TM1217 controller
+ *
+ * Copyright (C) 2008 Intel Corp
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; ifnot, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Questions/Comments/Bug fixes to Ramesh Agarwal (ramesh.agarwal@intel.com)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/timer.h>
+#include <linux/gpio.h>
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include "cp_tm1217.h"
+
+#define CPTM1217_DEVICE_NAME		"cptm1217"
+#define CPTM1217_DRIVER_NAME		CPTM1217_DEVICE_NAME
+
+#define MAX_TOUCH_SUPPORTED		2
+#define TOUCH_SUPPORTED			1
+#define SAMPLING_FREQ			80	/* Frequency in HZ */
+#define DELAY_BTWIN_SAMPLE		(1000 / SAMPLING_FREQ)
+#define WAIT_FOR_RESPONSE		5	/* 5msec just works */
+#define MAX_RETRIES			5	/* As above */
+#define INCREMENTAL_DELAY		5	/* As above */
+
+/* Regster Definitions */
+#define TMA1217_DEV_STATUS		0x13	/* Device Status */
+#define TMA1217_INT_STATUS		0x14	/* Interrupt Status */
+
+/* Controller can detect upto 2 possible finger touches.
+ * Each finger touch provides  12 bit X Y co-ordinates, the values are split
+ * across 2 registers, and an 8 bit  Z value */
+#define TMA1217_FINGER_STATE		0x18 /* Finger State */
+#define TMA1217_FINGER1_X_HIGHER8	0x19 /* Higher 8 bit of X coordinate */
+#define TMA1217_FINGER1_Y_HIGHER8	0x1A /* Higher 8 bit of Y coordinate */
+#define TMA1217_FINGER1_XY_LOWER4	0x1B /* Lower 4 bits of X and Y */
+#define TMA1217_FINGER1_Z_VALUE		0x1D /* 8 bit Z value for finger 1 */
+#define TMA1217_FINGER2_X_HIGHER8	0x1E /* Higher 8 bit of X coordinate */
+#define TMA1217_FINGER2_Y_HIGHER8	0x1F /* Higher 8 bit of Y coordinate */
+#define TMA1217_FINGER2_XY_LOWER4	0x20 /* Lower 4 bits of X and Y */
+#define TMA1217_FINGER2_Z_VALUE		0x22 /* 8 bit Z value for finger 2 */
+#define TMA1217_DEVICE_CTRL		0x23 /* Device Control */
+#define TMA1217_INTERRUPT_ENABLE	0x24 /* Interrupt Enable */
+#define TMA1217_REPORT_MODE		0x2B /* Reporting Mode */
+#define TMA1217_MAX_X_LOWER8		0x31 /* Bit 0-7 for Max X */
+#define TMA1217_MAX_X_HIGHER4		0x32 /* Bit 8-11 for Max X */
+#define TMA1217_MAX_Y_LOWER8		0x33 /* Bit 0-7 for Max Y */
+#define TMA1217_MAX_Y_HIGHER4		0x34 /* Bit 8-11 for Max Y */
+#define TMA1217_DEVICE_CMD_RESET	0x67 /* Device CMD reg for reset */
+#define TMA1217_DEVICE_CMD_REZERO	0x69 /* Device CMD reg for rezero */
+
+#define TMA1217_MANUFACTURER_ID		0x73 /* Manufacturer Id */
+#define TMA1217_PRODUCT_FAMILY		0x75 /* Product Family */
+#define TMA1217_FIRMWARE_REVISION	0x76 /* Firmware Revision */
+#define TMA1217_SERIAL_NO_HIGH		0x7C /* Bit 8-15 of device serial no. */
+#define TMA1217_SERIAL_NO_LOW		0x7D /* Bit 0-7 of device serial no. */
+#define TMA1217_PRODUCT_ID_START	0x7E /* Start address for 10 byte ID */
+#define TMA1217_DEVICE_CAPABILITY	0x8B /* Reporting capability */
+
+
+/*
+ * The touch position structure.
+ */
+struct touch_state {
+	int	x;
+	int	y;
+	bool button;
+};
+
+/* Device Specific info given by the controller */
+struct cp_dev_info {
+	u16	maxX;
+	u16	maxY;
+};
+
+/* Vendor related info given by the controller */
+struct cp_vendor_info {
+	u8	vendor_id;
+	u8	product_family;
+	u8	firmware_rev;
+	u16	serial_no;
+};
+
+/*
+ * Private structure to store the device details
+ */
+struct cp_tm1217_device {
+	struct i2c_client	*client;
+	struct device		*dev;
+	struct cp_vendor_info	vinfo;
+	struct cp_dev_info	dinfo;
+	struct input_dev_info {
+		char			phys[32];
+		char			name[128];
+		struct input_dev	*input;
+		struct touch_state	touch;
+	} cp_input_info[MAX_TOUCH_SUPPORTED];
+
+	int	thread_running;
+	struct mutex	thread_mutex;
+
+	int gpio;
+};
+
+
+/* The following functions are used to read/write registers on the device
+ * as per the RMI prorocol. Technically, a page select should be written
+ * before doing read/write but since the register offsets are below 0xFF
+ * we can use the default value of page which is 0x00
+ */
+static int cp_tm1217_read(struct cp_tm1217_device *ts,
+				u8 *req, int size)
+{
+	int i, retval;
+
+	/* Send the address */
+	retval = i2c_master_send(ts->client, &req[0], 1);
+	if (retval != 1) {
+		dev_err(ts->dev, "cp_tm1217: I2C send failed\n");
+		return retval;
+	}
+	msleep(WAIT_FOR_RESPONSE);
+	for (i = 0; i < MAX_RETRIES; i++) {
+		retval = i2c_master_recv(ts->client, &req[1], size);
+		if (retval == size) {
+			break;
+		} else {
+			msleep(INCREMENTAL_DELAY);
+			dev_dbg(ts->dev, "cp_tm1217: Retry count is %d\n", i);
+		}
+	}
+	if (retval != size)
+		dev_err(ts->dev, "cp_tm1217: Read from device failed\n");
+
+	return retval;
+}
+
+static int cp_tm1217_write(struct cp_tm1217_device *ts,
+				u8 *req, int size)
+{
+	int retval;
+
+	/* Send the address and the data to be written */
+	retval = i2c_master_send(ts->client, &req[0], size + 1);
+	if (retval != size + 1) {
+		dev_err(ts->dev, "cp_tm1217: I2C write  failed: %d\n", retval);
+		return retval;
+	}
+	/* Wait for the write to complete. TBD why this is required */
+	msleep(WAIT_FOR_RESPONSE);
+
+	return size;
+}
+
+static int cp_tm1217_mask_interrupt(struct cp_tm1217_device *ts)
+{
+	u8 req[2];
+	int retval;
+
+	req[0] = TMA1217_INTERRUPT_ENABLE;
+	req[1] = 0x0;
+	retval = cp_tm1217_write(ts, req, 1);
+	if (retval != 1)
+		return -EIO;
+
+	return 0;
+}
+
+static int cp_tm1217_unmask_interrupt(struct cp_tm1217_device *ts)
+{
+	u8 req[2];
+	int retval;
+
+	req[0] = TMA1217_INTERRUPT_ENABLE;
+	req[1] = 0xa;
+	retval = cp_tm1217_write(ts, req, 1);
+	if (retval != 1)
+		return -EIO;
+
+	return 0;
+}
+
+static void process_touch(struct cp_tm1217_device *ts, int index)
+{
+	int retval;
+	struct input_dev_info *input_info =
+		(struct input_dev_info *)&ts->cp_input_info[index];
+	u8 xy_data[6];
+
+	if (index == 0)
+		xy_data[0] = TMA1217_FINGER1_X_HIGHER8;
+	else
+		xy_data[0] = TMA1217_FINGER2_X_HIGHER8;
+
+	retval = cp_tm1217_read(ts, xy_data, 5);
+	if (retval < 5) {
+		dev_err(ts->dev, "cp_tm1217: XY read from device failed\n");
+		return;
+	}
+
+	/* Note: Currently not using the Z values but may be requried in
+	   the future. */
+	input_info->touch.x = (xy_data[1] << 4)
+					| (xy_data[3] & 0x0F);
+	input_info->touch.y = (xy_data[2] << 4)
+					| ((xy_data[3] & 0xF0) >> 4);
+	input_report_abs(input_info->input, ABS_X, input_info->touch.x);
+	input_report_abs(input_info->input, ABS_Y, input_info->touch.y);
+	input_sync(input_info->input);
+}
+
+static void cp_tm1217_get_data(struct cp_tm1217_device *ts)
+{
+	u8 req[2];
+	int retval, i, finger_touched = 0;
+
+	do {
+		req[0] = TMA1217_FINGER_STATE;
+		retval = cp_tm1217_read(ts, req, 1);
+		if (retval != 1) {
+			dev_err(ts->dev,
+				"cp_tm1217: Read from device failed\n");
+			continue;
+		}
+		finger_touched = 0;
+		/* Start sampling until the pressure is below
+		  threshold */
+		for (i = 0; i < TOUCH_SUPPORTED; i++) {
+			if (req[1] & 0x3) {
+				finger_touched++;
+				if (ts->cp_input_info[i].touch.button == 0) {
+					/* send the button touch event */
+					input_report_key(
+						ts->cp_input_info[i].input,
+						BTN_TOUCH, 1);
+					ts->cp_input_info[i].touch.button = 1;
+				}
+				process_touch(ts, i);
+			} else {
+				if (ts->cp_input_info[i].touch.button == 1) {
+					/* send the button release event */
+					input_report_key(
+						ts->cp_input_info[i].input,
+						BTN_TOUCH, 0);
+					input_sync(ts->cp_input_info[i].input);
+					ts->cp_input_info[i].touch.button = 0;
+				}
+			}
+			req[1] = req[1] >> 2;
+		}
+		msleep(DELAY_BTWIN_SAMPLE);
+	} while (finger_touched > 0);
+}
+
+static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle)
+{
+	struct cp_tm1217_device *ts = (struct cp_tm1217_device *) handle;
+	u8 req[2];
+	int retval;
+
+	/* Chedk if another thread is already running */
+	mutex_lock(&ts->thread_mutex);
+	if (ts->thread_running == 1) {
+		mutex_unlock(&ts->thread_mutex);
+		return IRQ_HANDLED;
+	} else {
+		ts->thread_running = 1;
+		mutex_unlock(&ts->thread_mutex);
+	}
+
+	/* Mask the interrupts */
+	retval = cp_tm1217_mask_interrupt(ts);
+
+	/* Read the Interrupt Status register to find the cause of the
+	   Interrupt */
+	req[0] = TMA1217_INT_STATUS;
+	retval = cp_tm1217_read(ts, req, 1);
+	if (retval != 1)
+		goto exit_thread;
+
+	if (!(req[1] & 0x8))
+		goto exit_thread;
+
+	cp_tm1217_get_data(ts);
+
+exit_thread:
+	/* Unmask the interrupts before going to sleep */
+	retval = cp_tm1217_unmask_interrupt(ts);
+
+	mutex_lock(&ts->thread_mutex);
+	ts->thread_running = 0;
+	mutex_unlock(&ts->thread_mutex);
+
+	return IRQ_HANDLED;
+}
+
+static int cp_tm1217_init_data(struct cp_tm1217_device *ts)
+{
+	int retval;
+	u8	req[2];
+
+	/* Read the vendor id/ fw revision etc. Ignoring return check as this
+	   is non critical info  */
+	req[0] = TMA1217_MANUFACTURER_ID;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->vinfo.vendor_id = req[1];
+
+	req[0] = TMA1217_PRODUCT_FAMILY;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->vinfo.product_family = req[1];
+
+	req[0] = TMA1217_FIRMWARE_REVISION;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->vinfo.firmware_rev = req[1];
+
+	req[0] = TMA1217_SERIAL_NO_HIGH;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->vinfo.serial_no = (req[1] << 8);
+
+	req[0] = TMA1217_SERIAL_NO_LOW;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->vinfo.serial_no = ts->vinfo.serial_no | req[1];
+
+	req[0] = TMA1217_MAX_X_HIGHER4;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->dinfo.maxX = (req[1] & 0xF) << 8;
+
+	req[0] = TMA1217_MAX_X_LOWER8;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->dinfo.maxX = ts->dinfo.maxX | req[1];
+
+	req[0] = TMA1217_MAX_Y_HIGHER4;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->dinfo.maxY = (req[1] & 0xF) << 8;
+
+	req[0] = TMA1217_MAX_Y_LOWER8;
+	retval = cp_tm1217_read(ts, req, 1);
+	ts->dinfo.maxY = ts->dinfo.maxY | req[1];
+
+	return 0;
+
+}
+
+/*
+ *	Set up a GPIO for use as the interrupt. We can't simply do this at
+ *	boot time because the GPIO drivers themselves may not be around at
+ *	boot/firmware set up time to do the work. Instead defer it to driver
+ *	detection.
+ */
+
+static int cp_tm1217_setup_gpio_irq(struct cp_tm1217_device *ts)
+{
+	int retval;
+
+	/* Hook up the irq handler */
+	retval = gpio_request(ts->gpio, "cp_tm1217_touch");
+	if (retval < 0) {
+		dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
+								retval);
+		return retval;
+	}
+
+	retval = gpio_direction_input(ts->gpio);
+	if (retval < 0) {
+		dev_err(ts->dev,
+		"cp_tm1217: GPIO direction configuration failed, error %d\n",
+								retval);
+		gpio_free(ts->gpio);
+		return retval;
+	}
+
+	retval = gpio_to_irq(ts->gpio);
+	if (retval < 0) {
+		dev_err(ts->dev, "cp_tm1217: GPIO to IRQ failedi,"
+		" error %d\n", retval);
+		gpio_free(ts->gpio);
+	}
+	dev_dbg(ts->dev,
+		"cp_tm1217: Got IRQ number is %d for GPIO %d\n",
+		retval, ts->gpio);
+	return retval;
+}
+
+static int cp_tm1217_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct cp_tm1217_device *ts;
+	struct input_dev *input_dev;
+	struct input_dev_info	*input_info;
+	struct cp_tm1217_platform_data *pdata;
+	u8 req[2];
+	int i, retval;
+
+	/* No pdata is fine - we then use "normal" IRQ mode */
+
+	pdata = client->dev.platform_data;
+
+	ts = kzalloc(sizeof(struct cp_tm1217_device), GFP_KERNEL);
+	if (!ts) {
+		dev_err(&client->dev,
+			"cp_tm1217: Private Device Struct alloc failed\n");
+		return -ENOMEM;
+	}
+
+	ts->client = client;
+	ts->dev = &client->dev;
+	i2c_set_clientdata(client, ts);
+
+	ts->thread_running = 0;
+	mutex_init(&ts->thread_mutex);
+
+	/* Reset the Controller */
+	req[0] = TMA1217_DEVICE_CMD_RESET;
+	req[1] = 0x1;
+	retval = cp_tm1217_write(ts, req, 1);
+	if (retval != 1) {
+		dev_err(ts->dev, "cp_tm1217: Controller reset failed\n");
+		kfree(ts);
+		return -EIO;
+	}
+
+	/* Clear up the interrupt status from reset. */
+	req[0] = TMA1217_INT_STATUS;
+	retval = cp_tm1217_read(ts, req, 1);
+
+	/* Mask all the interrupts */
+	retval = cp_tm1217_mask_interrupt(ts);
+
+	/* Read the controller information */
+	cp_tm1217_init_data(ts);
+
+	/* The following code will register multiple event devices when
+	   multi-pointer is enabled, the code has not been tested
+	   with MPX */
+	for (i = 0; i < TOUCH_SUPPORTED; i++) {
+		input_dev = input_allocate_device();
+		if (input_dev == NULL) {
+			dev_err(ts->dev,
+				"cp_tm1217:Input Device Struct alloc failed\n");
+			kfree(ts);
+			return -ENOMEM;
+		}
+		input_info = &ts->cp_input_info[i];
+		snprintf(input_info->name, sizeof(input_info->name),
+			"cp_tm1217_touchscreen_%d", i);
+		input_dev->name = input_info->name;
+		snprintf(input_info->phys, sizeof(input_info->phys),
+			"%s/input%d", dev_name(&client->dev), i);
+
+		input_dev->phys = input_info->phys;
+		input_dev->id.bustype = BUS_I2C;
+
+		input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+		input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+		input_set_abs_params(input_dev, ABS_X, 0, ts->dinfo.maxX, 0, 0);
+		input_set_abs_params(input_dev, ABS_Y, 0, ts->dinfo.maxY, 0, 0);
+
+		retval = input_register_device(input_dev);
+		if (retval) {
+			dev_err(ts->dev,
+				"Input dev registration failed for %s\n",
+					input_dev->name);
+			goto fail;
+		}
+		input_info->input = input_dev;
+	}
+
+	/* Setup the reporting mode to send an interrupt only when
+	   finger arrives or departs. */
+	req[0] = TMA1217_REPORT_MODE;
+	req[1] = 0x02;
+	retval = cp_tm1217_write(ts, req, 1);
+
+	/* Setup the device to no sleep mode for now and make it configured */
+	req[0] = TMA1217_DEVICE_CTRL;
+	req[1] = 0x84;
+	retval = cp_tm1217_write(ts, req, 1);
+
+	/* Check for the status of the device */
+	req[0] = TMA1217_DEV_STATUS;
+	retval = cp_tm1217_read(ts, req, 1);
+	if (req[1] != 0) {
+		dev_err(ts->dev,
+			"cp_tm1217: Device Status 0x%x != 0: config failed\n",
+			req[1]);
+
+		retval = -EIO;
+		goto fail;
+	}
+
+	if (pdata && pdata->gpio) {
+		ts->gpio = pdata->gpio;
+		retval = cp_tm1217_setup_gpio_irq(ts);
+	} else
+		retval = client->irq;
+
+	if (retval < 0) {
+		dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
+								retval);
+		goto fail;
+	}
+
+	client->irq = retval;
+
+
+	retval = request_threaded_irq(client->irq,
+		NULL, cp_tm1217_sample_thread,
+		IRQF_TRIGGER_FALLING, "cp_tm1217_touch", ts);
+	if (retval < 0) {
+		dev_err(ts->dev, "cp_tm1217: Request IRQ error %d\n", retval);
+		goto fail_gpio;
+	}
+
+	/* Unmask the interrupts */
+	retval = cp_tm1217_unmask_interrupt(ts);
+	if (retval == 0)
+		return 0;
+
+	free_irq(client->irq, ts);
+fail_gpio:
+	if (ts->gpio)
+		gpio_free(ts->gpio);
+fail:
+	/* Clean up before returning failure */
+	for (i = 0; i < TOUCH_SUPPORTED; i++) {
+		if (ts->cp_input_info[i].input) {
+			input_unregister_device(ts->cp_input_info[i].input);
+			input_free_device(ts->cp_input_info[i].input);
+		}
+	}
+	kfree(ts);
+	return retval;
+
+}
+
+/*
+ * cp_tm1217 suspend
+ *
+ */
+static int cp_tm1217_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct cp_tm1217_device *ts = i2c_get_clientdata(client);
+	u8 req[2];
+	int retval;
+
+	/* Put the controller to sleep */
+	req[0] = TMA1217_DEVICE_CTRL;
+	retval = cp_tm1217_read(ts, req, 1);
+	req[1] = (req[1] & 0xF8) | 0x1;
+	retval = cp_tm1217_write(ts, req, 1);
+
+	if (device_may_wakeup(&client->dev))
+		enable_irq_wake(client->irq);
+
+	return 0;
+}
+
+/*
+ * cp_tm1217_resume
+ *
+ */
+static int cp_tm1217_resume(struct i2c_client *client)
+{
+	struct cp_tm1217_device *ts = i2c_get_clientdata(client);
+	u8 req[2];
+	int retval;
+
+	/* Take the controller out of sleep */
+	req[0] = TMA1217_DEVICE_CTRL;
+	retval = cp_tm1217_read(ts, req, 1);
+	req[1] = (req[1] & 0xF8) | 0x4;
+	retval = cp_tm1217_write(ts, req, 1);
+
+	/* Restore the register settings sinc the power to the
+	   could have been cut off */
+
+	/* Setup the reporting mode to send an interrupt only when
+	   finger arrives or departs. */
+	req[0] = TMA1217_REPORT_MODE;
+	req[1] = 0x02;
+	retval = cp_tm1217_write(ts, req, 1);
+
+	/* Setup the device to no sleep mode for now and make it configured */
+	req[0] = TMA1217_DEVICE_CTRL;
+	req[1] = 0x84;
+	retval = cp_tm1217_write(ts, req, 1);
+
+	/* Setup the interrupt mask */
+	retval = cp_tm1217_unmask_interrupt(ts);
+
+	if (device_may_wakeup(&client->dev))
+		disable_irq_wake(client->irq);
+
+	return 0;
+}
+
+/*
+ * cp_tm1217_remove
+ *
+ */
+static int cp_tm1217_remove(struct i2c_client *client)
+{
+	struct cp_tm1217_device *ts = i2c_get_clientdata(client);
+	int i;
+
+	free_irq(client->irq, ts);
+	if (ts->gpio)
+		gpio_free(ts->gpio);
+	for (i = 0; i < TOUCH_SUPPORTED; i++)
+		input_unregister_device(ts->cp_input_info[i].input);
+	kfree(ts);
+	return 0;
+}
+
+static struct i2c_device_id cp_tm1217_idtable[] = {
+	{ CPTM1217_DEVICE_NAME, 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, cp_tm1217_idtable);
+
+static struct i2c_driver cp_tm1217_driver = {
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= CPTM1217_DRIVER_NAME,
+	},
+	.id_table	= cp_tm1217_idtable,
+	.probe		= cp_tm1217_probe,
+	.remove		= cp_tm1217_remove,
+	.suspend    = cp_tm1217_suspend,
+	.resume     = cp_tm1217_resume,
+};
+
+static int __init clearpad_tm1217_init(void)
+{
+	return i2c_add_driver(&cp_tm1217_driver);
+}
+
+static void __exit clearpad_tm1217_exit(void)
+{
+	i2c_del_driver(&cp_tm1217_driver);
+}
+
+module_init(clearpad_tm1217_init);
+module_exit(clearpad_tm1217_exit);
+
+MODULE_AUTHOR("Ramesh Agarwal <ramesh.agarwal@intel.com>");
+MODULE_DESCRIPTION("Synaptics TM1217 TouchScreen Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/cptm1217/cp_tm1217.h b/drivers/staging/cptm1217/cp_tm1217.h
new file mode 100644
index 0000000..a0ce31d
--- /dev/null
+++ b/drivers/staging/cptm1217/cp_tm1217.h
@@ -0,0 +1,9 @@
+#ifndef __LINUX_I2C_CP_TM1217_H
+#define __LINUX_I2C_CP_TM1217_H
+
+struct cp_tm1217_platform_data
+{
+	int gpio;		/* If not set uses the IRQ resource 0 */
+};
+
+#endif
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index f631857..153ddbf 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -1711,7 +1711,7 @@
 	}
 
 	BCMLOG(BCMLOG_INFO, "Firmware Downloaded Successfully\n");
-	return BC_STS_SUCCESS;;
+	return BC_STS_SUCCESS;
 }
 
 enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 28c6b8c..719e70b 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -516,7 +516,7 @@
 
 	BCMLOG_ENTER;
 
-	pinfo = (struct crystalhd_adp *) pci_get_drvdata(pdev);
+	pinfo = pci_get_drvdata(pdev);
 	if (!pinfo) {
 		BCMLOG_ERR("could not get adp\n");
 		return;
@@ -626,7 +626,7 @@
 	struct crystalhd_ioctl_data *temp;
 	enum BC_STATUS sts = BC_STS_SUCCESS;
 
-	adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
+	adp = pci_get_drvdata(pdev);
 	if (!adp) {
 		BCMLOG_ERR("could not get adp\n");
 		return -ENODEV;
@@ -660,7 +660,7 @@
 	enum BC_STATUS sts = BC_STS_SUCCESS;
 	int rc;
 
-	adp = (struct crystalhd_adp *)pci_get_drvdata(pdev);
+	adp = pci_get_drvdata(pdev);
 	if (!adp) {
 		BCMLOG_ERR("could not get adp\n");
 		return -ENODEV;
diff --git a/drivers/staging/cs5535_gpio/Kconfig b/drivers/staging/cs5535_gpio/Kconfig
new file mode 100644
index 0000000..a1b3a8d
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Kconfig
@@ -0,0 +1,11 @@
+config CS5535_GPIO
+	tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
+	depends on X86_32
+	help
+	  Note: this driver is DEPRECATED.  Please use the cs5535-gpio module
+	  in the GPIO section instead (CONFIG_GPIO_CS5535).
+
+	  Give userspace access to the GPIO pins on the AMD CS5535 and
+	  CS5536 Geode companion devices.
+
+	  If compiled as a module, it will be called cs5535_gpio.
diff --git a/drivers/staging/cs5535_gpio/Makefile b/drivers/staging/cs5535_gpio/Makefile
new file mode 100644
index 0000000..d67c4b8
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio.o
diff --git a/drivers/staging/cs5535_gpio/TODO b/drivers/staging/cs5535_gpio/TODO
new file mode 100644
index 0000000..98d1cd1
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/TODO
@@ -0,0 +1,6 @@
+This is an obsolete driver for some the CS5535 and CS5536 southbridge GPIOs.
+It has been replaced by a driver that makes use of the Linux GPIO subsystem.
+Please switch to that driver, and let dilinger@queued.net know if there's
+anything missing from the new driver.
+
+This driver is scheduled for removal in 2.6.40.
diff --git a/drivers/char/cs5535_gpio.c b/drivers/staging/cs5535_gpio/cs5535_gpio.c
similarity index 100%
rename from drivers/char/cs5535_gpio.c
rename to drivers/staging/cs5535_gpio/cs5535_gpio.c
diff --git a/drivers/staging/cx25821/cx25821-alsa.c b/drivers/staging/cx25821/cx25821-alsa.c
index 9a205a3..160f669 100644
--- a/drivers/staging/cx25821/cx25821-alsa.c
+++ b/drivers/staging/cx25821/cx25821-alsa.c
@@ -630,7 +630,7 @@
  * Only boards with eeprom and byte 1 at eeprom=1 have it
  */
 
-static const struct pci_device_id cx25821_audio_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cx25821_audio_pci_tbl) = {
 	{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0,}
 };
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index dcbe6b6..52224cd 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -29,7 +29,7 @@
 #endif
 
 
-extern int  log_level;
+extern int  cxt1e1_log_level;
 
 #define COMET_NUM_SAMPLES   24  /* Number of entries in the waveform table */
 #define COMET_NUM_UNITS     5   /* Number of points per entry in table */
@@ -292,12 +292,12 @@
                                                                  * i.e.FPMODE=0 (@0x20) */
         if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
         {
-            if (log_level >= LOG_SBEBUG12)
+            if (cxt1e1_log_level >= LOG_SBEBUG12)
                 pr_info(">> %s: clockmaster internal clock\n", __func__);
             pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d); /* internal oscillator */
         } else                      /* external clock source */
         {
-            if (log_level >= LOG_SBEBUG12)
+            if (cxt1e1_log_level >= LOG_SBEBUG12)
                 pr_info(">> %s: clockmaster external clock\n", __func__);
             pci_write_32 ((u_int32_t *) &comet->tx_time, 0x09); /* loop timing
                                                                  * (external) */
@@ -312,7 +312,7 @@
             pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x21);        /* Slave Mode (CMODE=1) */
         pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, 0x20);  /* Slave Mode i.e.
                                                                  * FPMODE=1 (@0x20) */
-        if (log_level >= LOG_SBEBUG12)
+        if (cxt1e1_log_level >= LOG_SBEBUG12)
             pr_info(">> %s: clockslave internal clock\n", __func__);
         pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d);     /* oscillator timing */
     }
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index ab399c2..d9a9aa3 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -54,7 +54,7 @@
 
 #endif
 
-extern int  log_level;
+extern int  cxt1e1_log_level;
 extern int  drvr_state;
 
 
@@ -67,7 +67,7 @@
 
     FLUSH_PCI_READ ();
     v = le32_to_cpu (*p);
-    if (log_level >= LOG_DEBUG)
+    if (cxt1e1_log_level >= LOG_DEBUG)
         pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
     return v;
 #else
@@ -80,7 +80,7 @@
 pci_write_32 (u_int32_t *p, u_int32_t v)
 {
 #ifdef FLOW_DEBUG
-    if (log_level >= LOG_DEBUG)
+    if (cxt1e1_log_level >= LOG_DEBUG)
         pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
 #endif
     *p = cpu_to_le32 (v);
@@ -118,7 +118,7 @@
 
     if (drvr_state != SBE_DRVR_AVAILABLE)
     {
-        if (log_level >= LOG_MONITOR)
+        if (cxt1e1_log_level >= LOG_MONITOR)
             pr_warning("%s: drvr not available (%x)\n", __func__, drvr_state);
         return;
     }
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 89200e7..c517cc2 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -37,7 +37,7 @@
 #define STATIC  static
 #endif
 
-extern int  log_level;
+extern int  cxt1e1_log_level;
 extern int  error_flag;
 extern int  drvr_state;
 
@@ -143,7 +143,7 @@
     if ((hi->promfmt = pmc_verify_cksum (&hi->mfg_info.data)) == PROM_FORMAT_Unk)
     {
         /* bad crc, data is suspect */
-        if (log_level >= LOG_WARN)
+        if (cxt1e1_log_level >= LOG_WARN)
             pr_info("%s: EEPROM cksum error\n", hi->devname);
         hi->mfg_info_sts = EEPROM_CRCERR;
     } else
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index c793028..0f78f89 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -108,9 +108,9 @@
 #endif
 
 int         error_flag;         /* module load error reporting */
-int         log_level = LOG_ERROR;
+int         cxt1e1_log_level = LOG_ERROR;
 int         log_level_default = LOG_ERROR;
-module_param(log_level, int, 0444);
+module_param(cxt1e1_log_level, int, 0444);
 
 int         cxt1e1_max_mru = MUSYCC_MRU;
 int         max_mru_default = MUSYCC_MRU;
@@ -497,7 +497,7 @@
     rtnl_lock ();                   /* needed due to Ioctl calling sequence */
     if (ret)
     {
-        if (log_level >= LOG_WARN)
+        if (cxt1e1_log_level >= LOG_WARN)
             pr_info("%s: create_chan[%d] registration error = %d.\n",
                     ci->devname, cp->channum, ret);
         free_netdev (dev);          /* cleanup */
@@ -722,11 +722,11 @@
 STATIC      status_t
 do_set_loglevel (struct net_device * ndev, void *data)
 {
-    unsigned int log_level;
+    unsigned int cxt1e1_log_level;
 
-    if (copy_from_user (&log_level, data, sizeof (int)))
+    if (copy_from_user (&cxt1e1_log_level, data, sizeof (int)))
         return -EFAULT;
-    sbecom_set_loglevel (log_level);
+    sbecom_set_loglevel (cxt1e1_log_level);
     return 0;
 }
 
@@ -1115,9 +1115,9 @@
         return -rtn;                /* installation failure - see system log */
 
     /* housekeeping notifications */
-    if (log_level != log_level_default)
-        pr_info("NOTE: driver parameter <log_level> changed from default %d to %d.\n",
-                log_level_default, log_level);
+    if (cxt1e1_log_level != log_level_default)
+        pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n",
+                log_level_default, cxt1e1_log_level);
        if (cxt1e1_max_mru != max_mru_default)
                pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n",
                                max_mru_default, cxt1e1_max_mru);
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index fc15610..f274c77 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -97,7 +97,7 @@
 /* global driver variables */
 extern ci_t *c4_list;
 extern int  drvr_state;
-extern int  log_level;
+extern int  cxt1e1_log_level;
 
 extern int  cxt1e1_max_mru;
 extern int  cxt1e1_max_mtu;
@@ -627,7 +627,7 @@
 
     if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT))
     {
-        if (log_level >= LOG_MONITOR)
+        if (cxt1e1_log_level >= LOG_MONITOR)
             pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n",
                     pi->up->devname, rcnt, req, pi->sr_last, r,
                     (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f));
@@ -951,7 +951,7 @@
     ch = pi->chan[gchan];
     if (ch == 0 || ch->state != UP)
     {
-        if (log_level >= LOG_ERROR)
+        if (cxt1e1_log_level >= LOG_ERROR)
             pr_info("%s: intr: xmit EOM on uninitialized channel %d\n",
                     pi->up->devname, gchan);
     }
@@ -1002,7 +1002,7 @@
             }
             if (status & MUSYCC_TX_OWNED)
             {
-                if (log_level >= LOG_MONITOR)
+                if (cxt1e1_log_level >= LOG_MONITOR)
                 {
                     pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n",
                             pi->up->devname, pi->portnum, ch->channum,
@@ -1016,7 +1016,7 @@
                 break;              /* Not our mdesc, done */
             } else
             {
-                if (log_level >= LOG_MONITOR)
+                if (cxt1e1_log_level >= LOG_MONITOR)
                     pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n",
                             pi->up->devname, pi->portnum, ch->channum, readCount, md, status);
             }
@@ -1054,7 +1054,7 @@
         }
         md->status = 0;
 #ifdef RLD_TXFULL_DEBUG
-        if (log_level >= LOG_MONITOR2)
+        if (cxt1e1_log_level >= LOG_MONITOR2)
             pr_info("~~ tx_eom: tx_full %x  txd_free %d -> %d\n",
                     ch->tx_full, ch->txd_free, ch->txd_free + 1);
 #endif
@@ -1063,7 +1063,7 @@
 
         if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE))
         {
-            if (log_level >= LOG_MONITOR)
+            if (cxt1e1_log_level >= LOG_MONITOR)
                 pr_info("%s: Mode (%x) incorrect EOB status (%x)\n",
                         pi->up->devname, ch->p.chan_mode, status);
             if ((status & EOMIRQ_ENABLE) == 0)
@@ -1094,7 +1094,7 @@
         {
 
 #ifdef RLD_TXFULL_DEBUG
-            if (log_level >= LOG_MONITOR2)
+            if (cxt1e1_log_level >= LOG_MONITOR2)
                 pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n",
                         ch->channum,
                         ch->txd_free, ch->txd_num / 2);
@@ -1108,7 +1108,7 @@
 #ifdef RLD_TXFULL_DEBUG
     else if (ch->tx_full)
     {
-        if (log_level >= LOG_MONITOR2)
+        if (cxt1e1_log_level >= LOG_MONITOR2)
             pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n",
                     ch->channum,
                     ch->txd_free, ch->txd_num / 2);
@@ -1138,7 +1138,7 @@
     ch = pi->chan[gchan];
     if (ch == 0 || ch->state != UP)
     {
-        if (log_level > LOG_ERROR)
+        if (cxt1e1_log_level > LOG_ERROR)
             pr_info("%s: intr: receive EOM on uninitialized channel %d\n",
                     pi->up->devname, gchan);
         return;
@@ -1269,7 +1269,7 @@
 
     if (nextInt != INTRPTS_NEXTINT (ci->intlog.this_status_new))
     {
-        if (log_level >= LOG_MONITOR)
+        if (cxt1e1_log_level >= LOG_MONITOR)
         {
             pr_info("%s: note - updated ISD from %08x to %08x\n",
                     ci->devname, status,
@@ -1337,11 +1337,11 @@
     ci->intlog.last_status_new = ci->intlog.this_status_new;
     ci->intlog.this_status_new = currInt;
 
-    if ((log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M))
+    if ((cxt1e1_log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M))
     {
         pr_info("%s: Interrupt queue full condition occurred\n", ci->devname);
     }
-    if (log_level >= LOG_DEBUG)
+    if (cxt1e1_log_level >= LOG_DEBUG)
         pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n",
                 ci->devname, &ci->reg->isd,
         status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1));
@@ -1448,7 +1448,7 @@
         if ((currInt == badInt) || (currInt == badInt2))        /* catch failure of Bug
                                                                  * Fix checking */
         {
-            if (log_level >= LOG_WARN)
+            if (cxt1e1_log_level >= LOG_WARN)
                 pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n",
                         ci->devname, &ci->iqd_p[headx], headx);
 
@@ -1483,7 +1483,7 @@
         ci->iqd_p[headx] = __constant_cpu_to_le32 (INT_EMPTY_ENTRY);
         FLUSH_MEM_WRITE ();
 
-        if (log_level >= LOG_DEBUG)
+        if (cxt1e1_log_level >= LOG_DEBUG)
         {
             if (err != 0)
                 pr_info(" %08x -> err: %2d,", currInt, err);
@@ -1497,7 +1497,7 @@
         switch (event)
         {
         case EVE_SACK:              /* Service Request Acknowledge */
-            if (log_level >= LOG_DEBUG)
+            if (cxt1e1_log_level >= LOG_DEBUG)
             {
                 volatile u_int32_t r;
 
@@ -1534,7 +1534,7 @@
             }
             break;
         default:
-            if (log_level >= LOG_WARN)
+            if (cxt1e1_log_level >= LOG_WARN)
                 pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname,
                         event, headx, currInt, group);
             break;
@@ -1573,9 +1573,9 @@
 
                 {
 #ifdef RLD_TRANS_DEBUG
-                    if (1 || log_level >= LOG_MONITOR)
+                    if (1 || cxt1e1_log_level >= LOG_MONITOR)
 #else
-                    if (log_level >= LOG_MONITOR)
+                    if (cxt1e1_log_level >= LOG_MONITOR)
 #endif
                     {
                         pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n",
@@ -1605,7 +1605,7 @@
                 ch->s.rx_over_errors++;
                 ch->ch_start_rx = CH_START_RX_ONR;
 
-                if (log_level >= LOG_WARN)
+                if (cxt1e1_log_level >= LOG_WARN)
                 {
                     pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n",
                             ci->devname, ch->channum, ch->p.chan_mode);
@@ -1623,7 +1623,7 @@
                  * Per MUSYCC manual, Section  6.4.8.3 [Transmit Errors],
                  * this BUFF error requires Transmit channel reactivation.
                  */
-                if (log_level >= LOG_MONITOR)
+                if (cxt1e1_log_level >= LOG_MONITOR)
                     pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n",
                             ci->devname, ch->channum, ch->p.chan_mode);
             } else                  /* RX buffer overrun */
@@ -1636,7 +1636,7 @@
                  * space for this channel.  Receive channel reactivation is
                  * not required, but data has been lost.
                  */
-                if (log_level >= LOG_WARN)
+                if (cxt1e1_log_level >= LOG_WARN)
                     pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n",
                             ci->devname, ch->channum, ch->p.chan_mode);
                 /*
@@ -1658,7 +1658,7 @@
         }                           /* switch on err */
 
         /* Check for interrupt lost condition */
-        if ((currInt & INTRPT_ILOST_M) && (log_level >= LOG_ERROR))
+        if ((currInt & INTRPT_ILOST_M) && (cxt1e1_log_level >= LOG_ERROR))
         {
             pr_info("%s: Interrupt queue overflow - ILOST asserted\n",
                     ci->devname);
@@ -1667,7 +1667,7 @@
         FLUSH_MEM_WRITE ();
         FLUSH_MEM_READ ();
     }                               /* while */
-    if ((log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx))
+    if ((cxt1e1_log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx))
     {
         int         bh;
 
@@ -1821,9 +1821,9 @@
         return EROFS;               /* how else to flag unwritable state ? */
 
 #ifdef RLD_TRANS_DEBUGx
-    if (1 || log_level >= LOG_MONITOR2)
+    if (1 || cxt1e1_log_level >= LOG_MONITOR2)
 #else
-    if (log_level >= LOG_MONITOR2)
+    if (cxt1e1_log_level >= LOG_MONITOR2)
 #endif
     {
         pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n",
@@ -1846,7 +1846,7 @@
 
     if (txd_need_cnt == 0)
     {
-        if (log_level >= LOG_MONITOR2)
+        if (cxt1e1_log_level >= LOG_MONITOR2)
             pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum);
         OS_mem_token_free (mem_token);
         return 0;                   /* no data to send */
@@ -1857,7 +1857,7 @@
     if (txd_need_cnt > ch->txd_num) /* never enough descriptors for this
                                      * large a buffer */
     {
-        if (log_level >= LOG_DEBUG)
+        if (cxt1e1_log_level >= LOG_DEBUG)
         {
             pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n",
                     ch->txd_num, txd_need_cnt + 1);
@@ -1874,7 +1874,7 @@
     /************************************************************/
     if (txd_need_cnt > ch->txd_free)
     {
-        if (log_level >= LOG_MONITOR2)
+        if (cxt1e1_log_level >= LOG_MONITOR2)
         {
             pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n",
                     channum, ch->txd_free, ch->txd_num, txd_need_cnt);
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 5c8a3eb..341e7a9 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -135,7 +135,7 @@
 void        musycc_update_timeslots (mpi_t *);
 
 extern void musycc_update_tx_thp (mch_t *);
-extern int  log_level;
+extern int  cxt1e1_log_level;
 extern int  cxt1e1_max_mru;
 extern int  cxt1e1_max_mtu;
 extern int  max_rxdesc_used, max_rxdesc_default;
@@ -168,12 +168,12 @@
                                                          * for card 0 only */
     } else
     {
-        if (log_level != d)
+        if (cxt1e1_log_level != d)
         {
-            pr_info("log level changed from %d to %d\n", log_level, d);
-            log_level = d;          /* set new */
+            pr_info("log level changed from %d to %d\n", cxt1e1_log_level, d);
+            cxt1e1_log_level = d;          /* set new */
         } else
-            pr_info("log level is %d\n", log_level);
+            pr_info("log level is %d\n", cxt1e1_log_level);
     }
 }
 
@@ -513,7 +513,7 @@
             if ((value == 0x1c) || (value == 0x19) || (value == 0x12))
                 c4_loop_port (ci, portnum, COMET_MDIAG_LBOFF);  /* take port out of any
                                                                  * loopbk mode */
-            if (log_level >= LOG_DEBUG)
+            if (cxt1e1_log_level >= LOG_DEBUG)
                 if (value != 0x3f)
                     pr_warning("%s: BOC value = %x on Port %d\n",
                                ci->devname, value, portnum);
@@ -533,7 +533,7 @@
 {
     if (drvr_state != SBE_DRVR_AVAILABLE)
     {
-        if (log_level >= LOG_MONITOR)
+        if (cxt1e1_log_level >= LOG_MONITOR)
             pr_info("drvr not available (%x)\n", drvr_state);
         return;
     }
@@ -794,19 +794,19 @@
         }
 
         pci_write_32 ((u_int32_t *) &comet->mdiag, cmd);
-        if (log_level >= LOG_WARN)
+        if (cxt1e1_log_level >= LOG_WARN)
             pr_info("%s: loopback mode changed to %2x from %2x on Port %d\n",
                     ci->devname, cmd, loopValue, portnum);
         loopValue = pci_read_32 ((u_int32_t *) &comet->mdiag) & COMET_MDIAG_LBMASK;
         if (loopValue != cmd)
         {
-            if (log_level >= LOG_ERROR)
+            if (cxt1e1_log_level >= LOG_ERROR)
                 pr_info("%s: write to loop register failed, unknown state for Port %d\n",
                         ci->devname, portnum);
         }
     } else
     {
-        if (log_level >= LOG_WARN)
+        if (cxt1e1_log_level >= LOG_WARN)
             pr_info("%s: loopback already in that mode (%2x)\n",
                     ci->devname, loopValue);
     }
@@ -997,7 +997,7 @@
     pi = &ci->port[portnum];
     pp = &ci->port[portnum].p;
     e1mode = IS_FRAME_ANY_E1 (pp->port_mode);
-    if (log_level >= LOG_MONITOR2)
+    if (cxt1e1_log_level >= LOG_MONITOR2)
     {
         pr_info("%s: c4_set_port[%d]:  entered, e1mode = %x, openchans %d.\n",
                 ci->devname,
@@ -1278,12 +1278,12 @@
     }
     if (max != *len)
     {
-        if (log_level >= LOG_WARN)
+        if (cxt1e1_log_level >= LOG_WARN)
             pr_info("%s: wanted to allocate %d fifo space, but got only %d\n",
                     pi->up->devname, *len, max);
         *len = max;
     }
-    if (log_level >= LOG_DEBUG)
+    if (cxt1e1_log_level >= LOG_DEBUG)
         pr_info("%s: allocated %d fifo at %d for channel %d/%d\n",
                 pi->up->devname, max, start, chan, pi->p.portnum);
     for (i = maxstart; i < (maxstart + max); i++)
@@ -1296,7 +1296,7 @@
 {
     int         i;
 
-    if (log_level >= LOG_DEBUG)
+    if (cxt1e1_log_level >= LOG_DEBUG)
         pr_info("%s: deallocated fifo for channel %d/%d\n",
                 pi->up->devname, chan, pi->p.portnum);
     for (i = 0; i < 32; i++)
@@ -1321,7 +1321,7 @@
         return ENOENT;
     if (ch->state == UP)
     {
-        if (log_level >= LOG_MONITOR)
+        if (cxt1e1_log_level >= LOG_MONITOR)
             pr_info("%s: channel already UP, graceful early exit\n",
                     ci->devname);
         return 0;
@@ -1334,7 +1334,7 @@
     {
         if (ch->p.bitmask[i] & pi->tsm[i])
         {
-            if (1 || log_level >= LOG_WARN)
+            if (1 || cxt1e1_log_level >= LOG_WARN)
             {
                 pr_info("%s: c4_chan_up[%d] EINVAL (attempt to cfg in-use or unavailable TimeSlot[%d])\n",
                         ci->devname, channum, i);
@@ -1351,7 +1351,7 @@
     nbuf = nts / 8 ? nts / 8 : 1;
     if (!nbuf)
     {
-        /* if( log_level >= LOG_WARN)  */
+        /* if( cxt1e1_log_level >= LOG_WARN)  */
         pr_info("%s: c4_chan_up[%d] ENOBUFS (no TimeSlots assigned)\n",
                 ci->devname, channum);
         return ENOBUFS;             /* this should not happen */
@@ -1420,7 +1420,7 @@
 
 #if 0
     /* DEBUG INFO */
-    if (log_level >= LOG_MONITOR)
+    if (cxt1e1_log_level >= LOG_MONITOR)
         pr_info("%s: mode %x rxnum %d (rxused %d def %d) txnum %d (txused %d def %d)\n",
                 ci->devname, ch->p.chan_mode,
                 rxnum, max_rxdesc_used, max_rxdesc_default,
@@ -1451,7 +1451,7 @@
 
                if (!(m = OS_mem_token_alloc (cxt1e1_max_mru)))
         {
-            if (log_level >= LOG_MONITOR)
+            if (cxt1e1_log_level >= LOG_MONITOR)
                 pr_info("%s: c4_chan_up[%d] - token alloc failure, size = %d.\n",
                                                ci->devname, channum, cxt1e1_max_mru);
             goto errfree;
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 5a72cb5..501a331 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -86,7 +86,7 @@
 
     FLUSH_PCI_READ ();
     v = le32_to_cpu (*p);
-    if (log_level >= LOG_DEBUG)
+    if (cxt1e1_log_level >= LOG_DEBUG)
         pr_info("pci_read : %x = %x\n", (u_int32_t) p, v);
     return v;
 #else
@@ -99,7 +99,7 @@
 pci_write_32 (u_int32_t *p, u_int32_t v)
 {
 #ifdef FLOW_DEBUG
-    if (log_level >= LOG_DEBUG)
+    if (cxt1e1_log_level >= LOG_DEBUG)
         pr_info("pci_write: %x = %x\n", (u_int32_t) p, v);
 #endif
     *p = cpu_to_le32 (v);
diff --git a/drivers/staging/easycap/Kconfig b/drivers/staging/easycap/Kconfig
index 9d5fe4d..bd96f39 100644
--- a/drivers/staging/easycap/Kconfig
+++ b/drivers/staging/easycap/Kconfig
@@ -1,7 +1,6 @@
 config EASYCAP
 	tristate "EasyCAP USB ID 05e1:0408 support"
 	depends on USB && VIDEO_DEV
-	depends on BKL # please fix
 
 	---help---
 	  This is an integrated audio/video driver for EasyCAP cards with
diff --git a/drivers/staging/easycap/Makefile b/drivers/staging/easycap/Makefile
index 8a3d911..f1f2fbe 100644
--- a/drivers/staging/easycap/Makefile
+++ b/drivers/staging/easycap/Makefile
@@ -10,4 +10,5 @@
 ccflags-y += -DEASYCAP_IS_VIDEODEV_CLIENT
 ccflags-y += -DEASYCAP_NEEDS_V4L2_DEVICE_H
 ccflags-y += -DEASYCAP_NEEDS_V4L2_FOPS
+ccflags-y += -DEASYCAP_NEEDS_UNLOCKED_IOCTL
 
diff --git a/drivers/staging/easycap/README b/drivers/staging/easycap/README
index 3775481..6b5ac0d 100644
--- a/drivers/staging/easycap/README
+++ b/drivers/staging/easycap/README
@@ -24,6 +24,9 @@
 BUILD OPTIONS AND DEPENDENCIES
 ------------------------------
 
+Unless EASYCAP_DEBUG is defined during compilation it will not be possible
+to select a debug level at the time of module installation.
+
 If the parameter EASYCAP_IS_VIDEODEV_CLIENT is undefined during compilation
 the built module is entirely independent of the videodev module, and when
 the EasyCAP is physically plugged into a USB port the special files
@@ -33,41 +36,54 @@
 If the parameter EASYCAP_IS_VIDEODEV_CLIENT is defined during compilation
 the built easycap module is configured to register with the videodev module,
 in which case the special files created when the EasyCAP is plugged in are
-/dev/video0 and /dev/easysnd0.  Use of the easycap module as a client of
-the videodev module has received very little testing as of June 2010.
+/dev/video0 and /dev/easysnd0.
 
+During in-tree builds the following should should be defined whenever the
+parameter EASYCAP_IS_VIDEODEV_CLIENT is defined:
 
-KNOWN BUILD PROBLEMS
---------------------
+EASYCAP_NEEDS_V4L2_DEVICE_H
+EASYCAP_NEEDS_V4L2_FOPS
+EASYCAP_NEEDS_UNLOCKED_IOCTL
 
-(1) Recent gcc versions may generate the message:
-
-     warning: the frame size of .... bytes is larger than 1024 bytes
-
-This warning can be suppressed by specifying in the Makefile:
-
-     EXTRA_CFLAGS += -Wframe-larger-than=8192
-
-but it would be preferable to remove the cause of the warning.
+If the build is performed out-of-tree against older kernels the parameters
+to be defined depend on the kernel version in a way which will not be
+discussed here.
 
 
 KNOWN RUNTIME ISSUES
 --------------------
 
-(1) Randomly (maybe 5 to 10% of occasions) the driver fails to produce any
-output at start-up.  Closing mplayer (or whatever the user program is) and
-restarting it restores normal performance without any other remedial action
-being necessary.  The reason for this is not known.
+(1) Intentionally, this driver will not stream material which is unambiguously
+identified by the hardware as copy-protected.  Normal video output will be
+present for about a minute but will then freeze when this situation arises.
 
-(2) Intentionally, this driver will not stream material which is unambiguously
-identified by the hardware as copy-protected.  The video output will freeze
-within about a minute when this situation arises.
-
-(3) The controls for luminance, contrast, saturation, hue and volume may not
+(2) The controls for luminance, contrast, saturation, hue and volume may not
 always work properly.
 
-(4) Reduced-resolution S-Video seems to suffer from moire artefacts.  No
-attempt has yet been made to rememdy this.
+(3) Reduced-resolution S-Video seems to suffer from moire artefacts.
+
+
+INPUT NUMBERING
+---------------
+
+For the EasyCAP with S-VIDEO input cable the driver regards a request for
+inputs numbered 0 or 1 as referring to CVBS and a request for input
+numbered 5 as referring to S-VIDEO.
+
+For the EasyCAP with four CVBS inputs the driver expects to be asked for
+any one of inputs numbered 1,2,3,4.  If input 0 is asked for, it is
+interpreted as input 1.
+
+
+MODULE PARAMETERS
+-----------------
+
+Three module parameters are defined:
+
+debug      the easycap module is configured at diagnostic level n (0 to 9)
+gain       audio gain level n (0 to 31, default is 16)
+bars       0 =>  testcard bars when incoming video signal is lost
+           1 =>  testcard bars when incoming video signal is lost (default)
 
 
 SUPPORTED TV STANDARDS AND RESOLUTIONS
@@ -82,18 +98,29 @@
     PAL_60,       NTSC_443,
     PAL_M.
 
+In addition, the driver offers "custom" pseudo-standards with a framerate
+which is 20% of the usual framerate.  These pseudo-standards are named:
+
+    PAL_BGHIN_SLOW,    NTSC_N_443_SLOW,
+    PAL_Nc_SLOW,       NTSC_N_SLOW,
+    SECAM_SLOW,        NTSC_M_SLOW,        NTSC_M_JP_SLOW,
+    PAL_60_SLOW,       NTSC_443_SLOW,
+    PAL_M_SLOW.
+
+
 The available picture sizes are:
 
      at 25 frames per second:   720x576, 704x576, 640x480, 360x288, 320x240;
-     at 30 frames per second:   720x480, 640x480, 360x240, 320x240;
+     at 30 frames per second:   720x480, 640x480, 360x240, 320x240.
 
 
 WHAT'S TESTED AND WHAT'S NOT
 ----------------------------
 
-This driver is known to work with mplayer, mencoder, tvtime and sufficiently
-recent versions of vlc.  An interface to ffmpeg is implemented, but serious
-audio-video synchronization problems remain.
+This driver is known to work with mplayer, mencoder, tvtime, zoneminder,
+xawtv, gstreamer and sufficiently recent versions of vlc.  An interface
+to ffmpeg is implemented, but serious audio-video synchronization problems
+remain.
 
 The driver is designed to support all the TV standards accepted by the
 hardware, but as yet it has actually been tested on only a few of these.
@@ -101,10 +128,7 @@
 I have been unable to test and calibrate the S-video input myself because I
 do not possess any equipment with S-video output.
 
-This driver does not understand the V4L1 IOCTL commands, so programs such
-as camorama are not compatible.  There are reports that the driver does
-work with sufficiently recent (V4L2) versions of zoneminder, but I have not
-attempted to confirm this myself.
+This driver does not understand the V4L1 IOCTL commands.
 
 
 UDEV RULES
@@ -120,6 +144,17 @@
 LABEL="easycap_rules_end"
 
 
+MODPROBE CONFIGURATION
+----------------------
+
+The easycap module is in competition with the module snd-usb-audio for the
+EasyCAP's audio channel, and its installation can be aided by providing a
+file in directory /etc/modprobe.d with content:
+
+options easycap  gain=16 bars=1
+install easycap /sbin/rmmod snd-usb-audio; /sbin/modprobe --ignore-install easycap
+
+
 ACKNOWLEGEMENTS AND REFERENCES
 ------------------------------
 This driver makes use of information contained in the Syntek Semicon DC-1125
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h
index 884263b..8ebf96f 100644
--- a/drivers/staging/easycap/easycap.h
+++ b/drivers/staging/easycap/easycap.h
@@ -33,6 +33,7 @@
  *                EASYCAP_NEEDS_USBVIDEO_H
  *                EASYCAP_NEEDS_V4L2_DEVICE_H
  *                EASYCAP_NEEDS_V4L2_FOPS
+ *                EASYCAP_NEEDS_UNLOCKED_IOCTL
  *
  *  IF REQUIRED THEY MUST BE EXTERNALLY DEFINED, FOR EXAMPLE AS COMPILER
  *  OPTIONS.
@@ -42,35 +43,24 @@
 #if (!defined(EASYCAP_H))
 #define EASYCAP_H
 
-#if defined(EASYCAP_DEBUG)
-#if (9 < EASYCAP_DEBUG)
-#error Debug levels 0 to 9 are okay.\
-  To achieve higher levels, remove this trap manually from easycap.h
-#endif
-#endif /*EASYCAP_DEBUG*/
+/*---------------------------------------------------------------------------*/
+/*
+ *  THESE ARE NORMALLY DEFINED
+ */
+/*---------------------------------------------------------------------------*/
+#define  PATIENCE  500
+#undef   PREFER_NTSC
+#define  PERSEVERE
 /*---------------------------------------------------------------------------*/
 /*
  *  THESE ARE FOR MAINTENANCE ONLY - NORMALLY UNDEFINED:
  */
 /*---------------------------------------------------------------------------*/
-#undef  PREFER_NTSC
 #undef  EASYCAP_TESTCARD
 #undef  EASYCAP_TESTTONE
-#undef  LOCKFRAME
 #undef  NOREADBACK
 #undef  AUDIOTIME
 /*---------------------------------------------------------------------------*/
-/*
- *
- *  DEFINE   BRIDGER   TO ACTIVATE THE ROUTINE FOR BRIDGING VIDEOTAPE DROPOUTS.
- *
- *             *** UNDER DEVELOPMENT/TESTING - NOT READY YET!***
- *
- */
-/*---------------------------------------------------------------------------*/
-#undef  BRIDGER
-/*---------------------------------------------------------------------------*/
-
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/init.h>
@@ -92,25 +82,14 @@
 
 /*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #if defined(EASYCAP_IS_VIDEODEV_CLIENT)
-#if (!defined(__OLD_VIDIOC_))
-#define __OLD_VIDIOC_
-#endif /* !defined(__OLD_VIDIOC_) */
-
 #include <media/v4l2-dev.h>
-
 #if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
 #include <media/v4l2-device.h>
 #endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
 #endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
 /*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-
-#if (!defined(__OLD_VIDIOC_))
-#define __OLD_VIDIOC_
-#endif /* !defined(__OLD_VIDIOC_) */
 #include <linux/videodev2.h>
-
 #include <linux/soundcard.h>
-
 #if defined(EASYCAP_NEEDS_USBVIDEO_H)
 #include <config/video/usbvideo.h>
 #endif /*EASYCAP_NEEDS_USBVIDEO_H*/
@@ -121,7 +100,6 @@
 
 #define STRINGIZE_AGAIN(x) #x
 #define STRINGIZE(x) STRINGIZE_AGAIN(x)
-
 /*---------------------------------------------------------------------------*/
 /*  VENDOR, PRODUCT:  Syntek Semiconductor Co., Ltd
  *
@@ -135,12 +113,12 @@
 #define USB_EASYCAP_VENDOR_ID	0x05e1
 #define USB_EASYCAP_PRODUCT_ID	0x0408
 
-#define EASYCAP_DRIVER_VERSION "0.8.21"
+#define EASYCAP_DRIVER_VERSION "0.8.41"
 #define EASYCAP_DRIVER_DESCRIPTION "easycapdc60"
 
 #define USB_SKEL_MINOR_BASE     192
-#define VIDEO_DEVICE_MANY 8
-
+#define DONGLE_MANY 8
+#define INPUT_MANY 6
 /*---------------------------------------------------------------------------*/
 /*
  *  DEFAULT LUMINANCE, CONTRAST, SATURATION AND HUE
@@ -164,6 +142,8 @@
 #if (USB_2_0_MAXPACKETSIZE > PAGE_SIZE)
 #error video_isoc_buffer[.] will not be big enough
 #endif
+#define VIDEO_JUNK_TOLERATE VIDEO_ISOC_BUFFER_MANY
+#define VIDEO_LOST_TOLERATE 50
 /*---------------------------------------------------------------------------*/
 /*
  *  VIDEO BUFFERS
@@ -210,7 +190,17 @@
 #define  NTSC_M_JP      5
 #define  PAL_60         7
 #define  PAL_M          9
-#define  STANDARD_MANY 10
+#define  PAL_BGHIN_SLOW    10
+#define  PAL_Nc_SLOW       12
+#define  SECAM_SLOW        14
+#define  NTSC_N_SLOW       16
+#define  NTSC_N_443_SLOW   18
+#define  NTSC_M_SLOW       11
+#define  NTSC_443_SLOW     13
+#define  NTSC_M_JP_SLOW    15
+#define  PAL_60_SLOW       17
+#define  PAL_M_SLOW        19
+#define  STANDARD_MANY 20
 /*---------------------------------------------------------------------------*/
 /*
  *  ENUMS
@@ -238,7 +228,6 @@
 enum {
 FIELD_NONE,
 FIELD_INTERLACED,
-FIELD_ALTERNATE,
 INTERLACE_MANY
 };
 #define SETTINGS_MANY	(STANDARD_MANY * \
@@ -251,11 +240,18 @@
  *  STRUCTURE DEFINITIONS
  */
 /*---------------------------------------------------------------------------*/
+struct easycap_dongle {
+struct easycap *peasycap;
+struct mutex mutex_video;
+struct mutex mutex_audio;
+};
+/*---------------------------------------------------------------------------*/
 struct data_buffer {
 struct list_head list_head;
 void *pgo;
 void *pto;
 __u16 kount;
+__u16 input;
 };
 /*---------------------------------------------------------------------------*/
 struct data_urb {
@@ -274,6 +270,22 @@
 char name[128];
 struct v4l2_format v4l2_format;
 };
+struct inputset {
+int input;
+int input_ok;
+int standard_offset;
+int standard_offset_ok;
+int format_offset;
+int format_offset_ok;
+int brightness;
+int brightness_ok;
+int contrast;
+int contrast_ok;
+int saturation;
+int saturation_ok;
+int hue;
+int hue_ok;
+};
 /*---------------------------------------------------------------------------*/
 /*
  *   easycap.ilk == 0   =>  CVBS+S-VIDEO HARDWARE, AUDIO wMaxPacketSize=256
@@ -282,6 +294,19 @@
  */
 /*---------------------------------------------------------------------------*/
 struct easycap {
+#define TELLTALE "expectedstring"
+char telltale[16];
+int isdongle;
+
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+struct video_device video_device;
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+struct v4l2_device v4l2_device;
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+int status;
 unsigned int audio_pages_per_fragment;
 unsigned int audio_bytes_per_fragment;
 unsigned int audio_buffer_page_many;
@@ -291,26 +316,14 @@
 __s16 oldaudio;
 #endif /*UPSAMPLE*/
 
-struct easycap_format easycap_format[1 + SETTINGS_MANY];
-
 int ilk;
 bool microphone;
 
-/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
-#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
-struct video_device *pvideo_device;
-#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-
 struct usb_device *pusb_device;
 struct usb_interface *pusb_interface;
 
 struct kref kref;
 
-struct mutex mutex_mmap_video[FRAME_BUFFER_MANY];
-struct mutex mutex_timeval0;
-struct mutex mutex_timeval1;
-
 int queued[FRAME_BUFFER_MANY];
 int done[FRAME_BUFFER_MANY];
 
@@ -321,16 +334,24 @@
 int polled;
 int standard_offset;
 int format_offset;
+struct inputset inputset[INPUT_MANY];
 
+bool ntsc;
 int fps;
 int usec;
 int tolerate;
+int skip;
+int skipped;
+int lost[INPUT_MANY];
 int merit[180];
 
 struct timeval timeval0;
 struct timeval timeval1;
 struct timeval timeval2;
+struct timeval timeval3;
+struct timeval timeval6;
 struct timeval timeval7;
+struct timeval timeval8;
 long long int dnbydt;
 
 int    video_interface;
@@ -347,8 +368,6 @@
 int    video_eof;
 int    video_junk;
 
-int    fudge;
-
 struct data_buffer video_isoc_buffer[VIDEO_ISOC_BUFFER_MANY];
 struct data_buffer \
 	     field_buffer[FIELD_BUFFER_MANY][(FIELD_BUFFER_SIZE/PAGE_SIZE)];
@@ -358,6 +377,13 @@
 struct list_head urb_video_head;
 struct list_head *purb_video_head;
 
+__u8 cache[8];
+__u8 *pcache;
+int video_mt;
+int audio_mt;
+long long audio_bytes;
+__u32 isequence;
+
 int vma_many;
 
 /*---------------------------------------------------------------------------*/
@@ -383,7 +409,6 @@
  */
 /*---------------------------------------------------------------------------*/
 __u32                   pixelformat;
-__u32                   field;
 int                     width;
 int                     height;
 int                     bytesperpixel;
@@ -463,8 +488,10 @@
 void             easycap_complete(struct urb *);
 int              easycap_open(struct inode *, struct file *);
 int              easycap_release(struct inode *, struct file *);
-long             easycap_ioctl(struct file *, unsigned int,  unsigned long);
-
+long             easycap_ioctl_noinode(struct file *, unsigned int, \
+								unsigned long);
+int              easycap_ioctl(struct inode *, struct file *, unsigned int, \
+								unsigned long);
 /*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #if defined(EASYCAP_IS_VIDEODEV_CLIENT)
 int              easycap_open_noinode(struct file *);
@@ -489,12 +516,10 @@
 int              field2frame(struct easycap *);
 int              redaub(struct easycap *, void *, void *, \
 						int, int, __u8, __u8, bool);
-void             debrief(struct easycap *);
-void             sayreadonly(struct easycap *);
 void             easycap_testcard(struct easycap *, int);
-int              explain_ioctl(__u32);
-int              explain_cid(__u32);
 int              fillin_formats(void);
+int              reset(struct easycap *);
+int              newinput(struct easycap *, int);
 int              adjust_standard(struct easycap *, v4l2_std_id);
 int              adjust_format(struct easycap *, __u32, __u32, __u32, \
 								int, bool);
@@ -512,7 +537,10 @@
 ssize_t          easysnd_read(struct file *, char __user *, size_t, loff_t *);
 int              easysnd_open(struct inode *, struct file *);
 int              easysnd_release(struct inode *, struct file *);
-long             easysnd_ioctl(struct file *, unsigned int,  unsigned long);
+long             easysnd_ioctl_noinode(struct file *, unsigned int, \
+								unsigned long);
+int              easysnd_ioctl(struct inode *, struct file *, unsigned int, \
+								unsigned long);
 unsigned int     easysnd_poll(struct file *, poll_table *);
 void             easysnd_delete(struct kref *);
 int              submit_audio_urbs(struct easycap *);
@@ -532,11 +560,11 @@
 int              confirm_resolution(struct usb_device *);
 int              confirm_stream(struct usb_device *);
 
-int              setup_stk(struct usb_device *);
-int              setup_saa(struct usb_device *);
+int              setup_stk(struct usb_device *, bool);
+int              setup_saa(struct usb_device *, bool);
 int              setup_vt(struct usb_device *);
-int              check_stk(struct usb_device *);
-int              check_saa(struct usb_device *);
+int              check_stk(struct usb_device *, bool);
+int              check_saa(struct usb_device *, bool);
 int              ready_saa(struct usb_device *);
 int              merit_saa(struct usb_device *);
 int              check_vt(struct usb_device *);
@@ -554,12 +582,9 @@
 int              write_300(struct usb_device *);
 int              read_vt(struct usb_device *, __u16);
 int              write_vt(struct usb_device *, __u16, __u16);
-
-int              set2to78(struct usb_device *);
-int              set2to93(struct usb_device *);
-
 int              regset(struct usb_device *, __u16, __u16);
 int              regget(struct usb_device *, __u16, void *);
+int		isdongle(struct easycap *);
 /*---------------------------------------------------------------------------*/
 struct signed_div_result {
 long long int quotient;
@@ -587,24 +612,41 @@
 	} \
 } while (0)
 /*---------------------------------------------------------------------------*/
-
+/*
+ *  MACROS SAM(...) AND JOM(...) ALLOW DIAGNOSTIC OUTPUT TO BE TAGGED WITH
+ *  THE IDENTITY OF THE DONGLE TO WHICH IT APPLIES, BUT IF INVOKED WHEN THE
+ *  POINTER peasycap IS INVALID AN Oops IS LIKELY, AND ITS CAUSE MAY NOT BE
+ *  IMMEDIATELY OBVIOUS FROM A CASUAL READING OF THE SOURCE CODE.  BEWARE.
+*/
+/*---------------------------------------------------------------------------*/
 #define SAY(format, args...) do { \
-	printk(KERN_DEBUG "easycap: %s: " format, __func__, ##args); \
+	printk(KERN_DEBUG "easycap:: %s: " \
+			format, __func__, ##args); \
 } while (0)
-
+#define SAM(format, args...) do { \
+	printk(KERN_DEBUG "easycap::%i%s: " \
+			format, peasycap->isdongle, __func__, ##args);\
+} while (0)
 
 #if defined(EASYCAP_DEBUG)
 #define JOT(n, format, args...) do { \
 	if (n <= easycap_debug) { \
-		printk(KERN_DEBUG "easycap: %s: " format, __func__, ##args); \
+		printk(KERN_DEBUG "easycap:: %s: " \
+			format, __func__, ##args);\
 	} \
 } while (0)
+#define JOM(n, format, args...) do { \
+	if (n <= easycap_debug) { \
+		printk(KERN_DEBUG "easycap::%i%s: " \
+			format, peasycap->isdongle, __func__, ##args);\
+	} \
+} while (0)
+
 #else
 #define JOT(n, format, args...) do {} while (0)
+#define JOM(n, format, args...) do {} while (0)
 #endif /*EASYCAP_DEBUG*/
 
-#define POUT JOT(8, ":-(in file %s line %4i\n", __FILE__, __LINE__)
-
 #define MICROSECONDS(X, Y) \
 			((1000000*((long long int)(X.tv_sec - Y.tv_sec))) + \
 					(long long int)(X.tv_usec - Y.tv_usec))
diff --git a/drivers/staging/easycap/easycap_debug.h b/drivers/staging/easycap/easycap_debug.h
index 1d10d7e..b6b5718 100644
--- a/drivers/staging/easycap/easycap_debug.h
+++ b/drivers/staging/easycap/easycap_debug.h
@@ -25,3 +25,5 @@
 */
 /*****************************************************************************/
 extern int easycap_debug;
+extern int easycap_gain;
+extern struct easycap_dongle easycap_dongle[];
diff --git a/drivers/staging/easycap/easycap_ioctl.c b/drivers/staging/easycap/easycap_ioctl.c
index 9a42ae0..447953a 100644
--- a/drivers/staging/easycap/easycap_ioctl.c
+++ b/drivers/staging/easycap/easycap_ioctl.c
@@ -36,53 +36,101 @@
  *  UNLESS THERE IS A PREMATURE ERROR RETURN THIS ROUTINE UPDATES THE
  *  FOLLOWING:
  *          peasycap->standard_offset
+ *          peasycap->inputset[peasycap->input].standard_offset
  *          peasycap->fps
  *          peasycap->usec
  *          peasycap->tolerate
+ *          peasycap->skip
  */
 /*---------------------------------------------------------------------------*/
 int adjust_standard(struct easycap *peasycap, v4l2_std_id std_id)
 {
 struct easycap_standard const *peasycap_standard;
 __u16 reg, set;
-int ir, rc, need;
+int ir, rc, need, k;
 unsigned int itwas, isnow;
+bool resubmit;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 peasycap_standard = &easycap_standard[0];
 while (0xFFFF != peasycap_standard->mask) {
-	if (std_id & peasycap_standard->v4l2_standard.id)
+	if (std_id == peasycap_standard->v4l2_standard.id)
 		break;
 	peasycap_standard++;
 }
 if (0xFFFF == peasycap_standard->mask) {
-	SAY("ERROR: 0x%08X=std_id: standard not found\n", \
+	peasycap_standard = &easycap_standard[0];
+	while (0xFFFF != peasycap_standard->mask) {
+		if (std_id & peasycap_standard->v4l2_standard.id)
+			break;
+		peasycap_standard++;
+	}
+}
+if (0xFFFF == peasycap_standard->mask) {
+	SAM("ERROR: 0x%08X=std_id: standard not found\n", \
 							(unsigned int)std_id);
 	return -EINVAL;
 }
-SAY("user requests standard: %s\n", \
+SAM("selected standard: %s\n", \
 			&(peasycap_standard->v4l2_standard.name[0]));
 if (peasycap->standard_offset == \
 			(int)(peasycap_standard - &easycap_standard[0])) {
-	SAY("requested standard already in effect\n");
+	SAM("requested standard already in effect\n");
 	return 0;
 }
 peasycap->standard_offset = (int)(peasycap_standard - &easycap_standard[0]);
+for (k = 0; k < INPUT_MANY;  k++) {
+	if (!peasycap->inputset[k].standard_offset_ok) {
+			peasycap->inputset[k].standard_offset = \
+						peasycap->standard_offset;
+	}
+}
+if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+	peasycap->inputset[peasycap->input].standard_offset = \
+						peasycap->standard_offset;
+	peasycap->inputset[peasycap->input].standard_offset_ok = 1;
+} else
+	JOM(8, "%i=peasycap->input\n", peasycap->input);
 peasycap->fps = peasycap_standard->v4l2_standard.frameperiod.denominator / \
 		peasycap_standard->v4l2_standard.frameperiod.numerator;
-if (!peasycap->fps) {
-	SAY("MISTAKE: frames-per-second is zero\n");
-	return -EFAULT;
+switch (peasycap->fps) {
+case 6:
+case 30: {
+	peasycap->ntsc = true;
+	break;
 }
-JOT(8, "%i frames-per-second\n", peasycap->fps);
-peasycap->usec = 1000000 / (2 * peasycap->fps);
-peasycap->tolerate = 1000 * (25 / peasycap->fps);
-
-kill_video_urbs(peasycap);
-
+case 5:
+case 25: {
+	peasycap->ntsc = false;
+	break;
+}
+default: {
+	SAM("MISTAKE: %i=frames-per-second\n", peasycap->fps);
+	return -ENOENT;
+}
+}
+JOM(8, "%i frames-per-second\n", peasycap->fps);
+if (0x8000 & peasycap_standard->mask) {
+	peasycap->skip = 5;
+	peasycap->usec = 1000000 / (2 * (5 * peasycap->fps));
+	peasycap->tolerate = 1000 * (25 / (5 * peasycap->fps));
+} else {
+	peasycap->skip = 0;
+	peasycap->usec = 1000000 / (2 * peasycap->fps);
+	peasycap->tolerate = 1000 * (25 / peasycap->fps);
+}
+if (peasycap->video_isoc_streaming) {
+	resubmit = true;
+	kill_video_urbs(peasycap);
+} else
+	resubmit = false;
 /*--------------------------------------------------------------------------*/
 /*
  *  SAA7113H DATASHEET PAGE 44, TABLE 42
@@ -94,55 +142,41 @@
 	reg = 0x0A;  set = 0x95;
 	ir = read_saa(peasycap->pusb_device, reg);
 	if (0 > ir)
-		SAY("ERROR: cannot read SAA register 0x%02X\n", reg);
+		SAM("ERROR: cannot read SAA register 0x%02X\n", reg);
 	else
 		itwas = (unsigned int)ir;
-
-
-	set2to78(peasycap->pusb_device);
-
-
 	rc = write_saa(peasycap->pusb_device, reg, set);
 	if (0 != rc)
-		SAY("ERROR: failed to set SAA register " \
+		SAM("ERROR: failed to set SAA register " \
 			"0x%02X to 0x%02X for JP standard\n", reg, set);
 	else {
 		isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
 		if (0 > ir)
-			JOT(8, "SAA register 0x%02X changed " \
+			JOM(8, "SAA register 0x%02X changed " \
 				"to 0x%02X\n", reg, isnow);
 		else
-			JOT(8, "SAA register 0x%02X changed " \
+			JOM(8, "SAA register 0x%02X changed " \
 				"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
-
-		set2to78(peasycap->pusb_device);
-
 	}
 
 	reg = 0x0B;  set = 0x48;
 	ir = read_saa(peasycap->pusb_device, reg);
 	if (0 > ir)
-		SAY("ERROR: cannot read SAA register 0x%02X\n", reg);
+		SAM("ERROR: cannot read SAA register 0x%02X\n", reg);
 	else
 		itwas = (unsigned int)ir;
-
-	set2to78(peasycap->pusb_device);
-
 	rc = write_saa(peasycap->pusb_device, reg, set);
 	if (0 != rc)
-		SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X " \
+		SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X " \
 						"for JP standard\n", reg, set);
 	else {
 		isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
 		if (0 > ir)
-			JOT(8, "SAA register 0x%02X changed " \
+			JOM(8, "SAA register 0x%02X changed " \
 				"to 0x%02X\n", reg, isnow);
 		else
-			JOT(8, "SAA register 0x%02X changed " \
+			JOM(8, "SAA register 0x%02X changed " \
 				"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
-
-		set2to78(peasycap->pusb_device);
-
 	}
 /*--------------------------------------------------------------------------*/
 /*
@@ -176,23 +210,20 @@
 if (need) {
 	ir = read_saa(peasycap->pusb_device, reg);
 	if (0 > ir)
-		SAY("ERROR: failed to read SAA register 0x%02X\n", reg);
+		SAM("ERROR: failed to read SAA register 0x%02X\n", reg);
 	else
 		itwas = (unsigned int)ir;
-
-	set2to78(peasycap->pusb_device);
-
 	rc = write_saa(peasycap->pusb_device, reg, set);
 	if (0 != write_saa(peasycap->pusb_device, reg, set)) {
-		SAY("ERROR: failed to set SAA register " \
+		SAM("ERROR: failed to set SAA register " \
 			"0x%02X to 0x%02X for table 42\n", reg, set);
 	} else {
 		isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
 		if (0 > ir)
-			JOT(8, "SAA register 0x%02X changed " \
+			JOM(8, "SAA register 0x%02X changed " \
 				"to 0x%02X\n", reg, isnow);
 		else
-			JOT(8, "SAA register 0x%02X changed " \
+			JOM(8, "SAA register 0x%02X changed " \
 				"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
 	}
 }
@@ -204,7 +235,7 @@
 reg = 0x08;
 ir = read_saa(peasycap->pusb_device, reg);
 if (0 > ir)
-	SAY("ERROR: failed to read SAA register 0x%02X " \
+	SAM("ERROR: failed to read SAA register 0x%02X " \
 						"so cannot reset\n", reg);
 else {
 	itwas = (unsigned int)ir;
@@ -212,19 +243,18 @@
 		set = itwas | 0x40 ;
 	else
 		set = itwas & ~0x40 ;
-
-set2to78(peasycap->pusb_device);
-
-rc  = write_saa(peasycap->pusb_device, reg, set);
-if (0 != rc)
-	SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set);
-else {
-	isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
-	if (0 > ir)
-		JOT(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow);
-	else
-		JOT(8, "SAA register 0x%02X changed " \
-			"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+	rc  = write_saa(peasycap->pusb_device, reg, set);
+	if (0 != rc)
+		SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
+								reg, set);
+	else {
+		isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+		if (0 > ir)
+			JOM(8, "SAA register 0x%02X changed to 0x%02X\n", \
+								reg, isnow);
+		else
+			JOM(8, "SAA register 0x%02X changed " \
+				"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
 	}
 }
 /*--------------------------------------------------------------------------*/
@@ -235,7 +265,7 @@
 reg = 0x40;
 ir = read_saa(peasycap->pusb_device, reg);
 if (0 > ir)
-	SAY("ERROR: failed to read SAA register 0x%02X " \
+	SAM("ERROR: failed to read SAA register 0x%02X " \
 						"so cannot reset\n", reg);
 else {
 	itwas = (unsigned int)ir;
@@ -243,19 +273,18 @@
 		set = itwas | 0x80 ;
 	else
 		set = itwas & ~0x80 ;
-
-set2to78(peasycap->pusb_device);
-
-rc = write_saa(peasycap->pusb_device, reg, set);
-if (0 != rc)
-	SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", reg, set);
-else {
-	isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
-	if (0 > ir)
-		JOT(8, "SAA register 0x%02X changed to 0x%02X\n", reg, isnow);
-	else
-		JOT(8, "SAA register 0x%02X changed " \
-			"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
+	rc = write_saa(peasycap->pusb_device, reg, set);
+	if (0 != rc)
+		SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
+								reg, set);
+	else {
+		isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
+		if (0 > ir)
+			JOM(8, "SAA register 0x%02X changed to 0x%02X\n", \
+								reg, isnow);
+		else
+			JOM(8, "SAA register 0x%02X changed " \
+				"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
 	}
 }
 /*--------------------------------------------------------------------------*/
@@ -266,41 +295,39 @@
 reg = 0x5A;
 ir = read_saa(peasycap->pusb_device, reg);
 if (0 > ir)
-	SAY("ERROR: failed to read SAA register 0x%02X but continuing\n", reg);
+	SAM("ERROR: failed to read SAA register 0x%02X but continuing\n", reg);
 	itwas = (unsigned int)ir;
 	if (peasycap_standard->mask & 0x0001)
 		set = 0x0A ;
 	else
 		set = 0x07 ;
-
-	set2to78(peasycap->pusb_device);
-
 	if (0 != write_saa(peasycap->pusb_device, reg, set))
-		SAY("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
+		SAM("ERROR: failed to set SAA register 0x%02X to 0x%02X\n", \
 								reg, set);
 	else {
 		isnow = (unsigned int)read_saa(peasycap->pusb_device, reg);
 		if (0 > ir)
-			JOT(8, "SAA register 0x%02X changed "
+			JOM(8, "SAA register 0x%02X changed "
 				"to 0x%02X\n", reg, isnow);
 		else
-			JOT(8, "SAA register 0x%02X changed "
+			JOM(8, "SAA register 0x%02X changed "
 				"from 0x%02X to 0x%02X\n", reg, itwas, isnow);
 	}
-	if (0 != check_saa(peasycap->pusb_device))
-		SAY("ERROR: check_saa() failed\n");
+if (true == resubmit)
+	submit_video_urbs(peasycap);
 return 0;
 }
 /*****************************************************************************/
 /*--------------------------------------------------------------------------*/
 /*
- *  THE ALGORITHM FOR RESPONDING TO THE VIDIO_S_FMT IOCTL DEPENDS ON THE
- *  CURRENT VALUE OF peasycap->standard_offset.
+ *  THE ALGORITHM FOR RESPONDING TO THE VIDIO_S_FMT IOCTL REQUIRES
+ *  A VALID VALUE OF peasycap->standard_offset, OTHERWISE -EBUSY IS RETURNED.
+ *
  *  PROVIDED THE ARGUMENT try IS false AND THERE IS NO PREMATURE ERROR RETURN
  *  THIS ROUTINE UPDATES THE FOLLOWING:
  *          peasycap->format_offset
+ *          peasycap->inputset[peasycap->input].format_offset
  *          peasycap->pixelformat
- *          peasycap->field
  *          peasycap->height
  *          peasycap->width
  *          peasycap->bytesperpixel
@@ -321,39 +348,93 @@
 struct easycap_format *peasycap_format, *peasycap_best_format;
 __u16 mask;
 struct usb_device *p;
-int miss, multiplier, best;
-char bf[5], *pc;
+int miss, multiplier, best, k;
+char bf[5], fo[32], *pc;
 __u32 uc;
+bool resubmit;
 
-if ((struct easycap *)NULL == peasycap) {
+if (NULL == peasycap) {
 	SAY("ERROR: peasycap is NULL\n");
 	return -EFAULT;
 }
+if (0 > peasycap->standard_offset) {
+	JOM(8, "%i=peasycap->standard_offset\n", peasycap->standard_offset);
+	return -EBUSY;
+}
 p = peasycap->pusb_device;
 if ((struct usb_device *)NULL == p) {
-	SAY("ERROR: peaycap->pusb_device is NULL\n");
+	SAM("ERROR: peaycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 pc = &bf[0];
-uc = pixelformat;  memcpy((void *)pc, (void *)(&uc), 4);  bf[4] = 0;
-mask = easycap_standard[peasycap->standard_offset].mask;
-SAY("sought:    %ix%i,%s(0x%08X),%i=field,0x%02X=std mask\n", \
+uc = pixelformat;
+memcpy((void *)pc, (void *)(&uc), 4);
+bf[4] = 0;
+mask = 0xFF & easycap_standard[peasycap->standard_offset].mask;
+SAM("sought:    %ix%i,%s(0x%08X),%i=field,0x%02X=std mask\n", \
 				width, height, pc, pixelformat, field, mask);
+switch (field) {
+case V4L2_FIELD_ANY: {
+	strcpy(&fo[0], "V4L2_FIELD_ANY ");
+	break;
+}
+case V4L2_FIELD_NONE: {
+	strcpy(&fo[0], "V4L2_FIELD_NONE");
+	break;
+}
+case V4L2_FIELD_TOP: {
+	strcpy(&fo[0], "V4L2_FIELD_TOP");
+	break;
+}
+case V4L2_FIELD_BOTTOM: {
+	strcpy(&fo[0], "V4L2_FIELD_BOTTOM");
+	break;
+}
+case V4L2_FIELD_INTERLACED: {
+	strcpy(&fo[0], "V4L2_FIELD_INTERLACED");
+	break;
+}
+case V4L2_FIELD_SEQ_TB: {
+	strcpy(&fo[0], "V4L2_FIELD_SEQ_TB");
+	break;
+}
+case V4L2_FIELD_SEQ_BT: {
+	strcpy(&fo[0], "V4L2_FIELD_SEQ_BT");
+	break;
+}
+case V4L2_FIELD_ALTERNATE: {
+	strcpy(&fo[0], "V4L2_FIELD_ALTERNATE");
+	break;
+}
+case V4L2_FIELD_INTERLACED_TB: {
+	strcpy(&fo[0], "V4L2_FIELD_INTERLACED_TB");
+	break;
+}
+case V4L2_FIELD_INTERLACED_BT: {
+	strcpy(&fo[0], "V4L2_FIELD_INTERLACED_BT");
+	break;
+}
+default: {
+	strcpy(&fo[0], "V4L2_FIELD_... UNKNOWN  ");
+	break;
+}
+}
+SAM("sought:    %s\n", &fo[0]);
 if (V4L2_FIELD_ANY == field) {
-	field = V4L2_FIELD_INTERLACED;
-	SAY("prefer:    V4L2_FIELD_INTERLACED=field, was V4L2_FIELD_ANY\n");
+	field = V4L2_FIELD_NONE;
+	SAM("prefer:    V4L2_FIELD_NONE=field, was V4L2_FIELD_ANY\n");
 }
 peasycap_best_format = (struct easycap_format *)NULL;
 peasycap_format = &easycap_format[0];
 while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
-	JOT(16, ".> %i %i 0x%08X %ix%i\n", \
+	JOM(16, ".> %i %i 0x%08X %ix%i\n", \
 		peasycap_format->mask & 0x01,
 		peasycap_format->v4l2_format.fmt.pix.field,
 		peasycap_format->v4l2_format.fmt.pix.pixelformat,
 		peasycap_format->v4l2_format.fmt.pix.width,
 		peasycap_format->v4l2_format.fmt.pix.height);
 
-	if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+	if (((peasycap_format->mask & 0x1F) == (mask & 0x1F)) && \
 		(peasycap_format->v4l2_format.fmt.pix.field == field) && \
 		(peasycap_format->v4l2_format.fmt.pix.pixelformat == \
 							pixelformat) && \
@@ -365,11 +446,11 @@
 	peasycap_format++;
 }
 if (0 == peasycap_format->v4l2_format.fmt.pix.width) {
-	SAY("cannot do: %ix%i with standard mask 0x%02X\n", \
+	SAM("cannot do: %ix%i with standard mask 0x%02X\n", \
 							width, height, mask);
 	peasycap_format = &easycap_format[0];  best = -1;
 	while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
-		if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+		if (((peasycap_format->mask & 0x1F) == (mask & 0x1F)) && \
 				 (peasycap_format->v4l2_format.fmt.pix\
 						.field == field) && \
 				 (peasycap_format->v4l2_format.fmt.pix\
@@ -386,16 +467,16 @@
 		peasycap_format++;
 	}
 	if (-1 == best) {
-		SAY("cannot do %ix... with standard mask 0x%02X\n", \
+		SAM("cannot do %ix... with standard mask 0x%02X\n", \
 								width, mask);
-		SAY("cannot do ...x%i with standard mask 0x%02X\n", \
+		SAM("cannot do ...x%i with standard mask 0x%02X\n", \
 								height, mask);
-		SAY("           %ix%i unmatched\n", width, height);
+		SAM("           %ix%i unmatched\n", width, height);
 		return peasycap->format_offset;
 	}
 }
 if ((struct easycap_format *)NULL == peasycap_best_format) {
-	SAY("MISTAKE: peasycap_best_format is NULL");
+	SAM("MISTAKE: peasycap_best_format is NULL");
 	return -EINVAL;
 }
 peasycap_format = peasycap_best_format;
@@ -406,23 +487,43 @@
 /*...........................................................................*/
 
 if (false != try) {
-	SAY("MISTAKE: true==try where is should be false\n");
+	SAM("MISTAKE: true==try where is should be false\n");
 	return -EINVAL;
 }
-SAY("actioning: %ix%i %s\n", \
+SAM("actioning: %ix%i %s\n", \
 			peasycap_format->v4l2_format.fmt.pix.width, \
 			peasycap_format->v4l2_format.fmt.pix.height,
 			&peasycap_format->name[0]);
 peasycap->height        = peasycap_format->v4l2_format.fmt.pix.height;
 peasycap->width         = peasycap_format->v4l2_format.fmt.pix.width;
 peasycap->pixelformat   = peasycap_format->v4l2_format.fmt.pix.pixelformat;
-peasycap->field         = peasycap_format->v4l2_format.fmt.pix.field;
 peasycap->format_offset = (int)(peasycap_format - &easycap_format[0]);
-peasycap->bytesperpixel = (0x00F0 & peasycap_format->mask) >> 4 ;
+
+
+for (k = 0; k < INPUT_MANY; k++) {
+	if (!peasycap->inputset[k].format_offset_ok) {
+		peasycap->inputset[k].format_offset = \
+						peasycap->format_offset;
+	}
+}
+if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+	peasycap->inputset[peasycap->input].format_offset = \
+						peasycap->format_offset;
+	peasycap->inputset[peasycap->input].format_offset_ok = 1;
+} else
+	JOM(8, "%i=peasycap->input\n", peasycap->input);
+
+
+
+peasycap->bytesperpixel = (0x00E0 & peasycap_format->mask) >> 5 ;
 if (0x0100 & peasycap_format->mask)
 	peasycap->byteswaporder = true;
 else
 	peasycap->byteswaporder = false;
+if (0x0200 & peasycap_format->mask)
+	peasycap->skip = 5;
+else
+	peasycap->skip = 0;
 if (0x0800 & peasycap_format->mask)
 	peasycap->decimatepixel = true;
 else
@@ -439,27 +540,11 @@
 					multiplier * peasycap->height;
 peasycap->frame_buffer_used = peasycap->bytesperpixel * \
 					peasycap->width * peasycap->height;
-
-if (true == peasycap->offerfields) {
-	SAY("WARNING: %i=peasycap->field is untested: " \
-				"please report problems\n", peasycap->field);
-
-
-/*
- *    FIXME ---- THIS IS UNTESTED, MAY BE (AND PROBABLY IS) INCORRECT:
- *
- *    peasycap->frame_buffer_used = peasycap->frame_buffer_used / 2;
- *
- *    SO DO NOT RISK IT YET.
- *
- */
-
-
-
-}
-
-kill_video_urbs(peasycap);
-
+if (peasycap->video_isoc_streaming) {
+	resubmit = true;
+	kill_video_urbs(peasycap);
+} else
+	resubmit = false;
 /*---------------------------------------------------------------------------*/
 /*
  *  PAL
@@ -474,13 +559,13 @@
 			(288 == \
 			peasycap_format->v4l2_format.fmt.pix.height))) {
 		if (0 != set_resolution(p, 0x0000, 0x0001, 0x05A0, 0x0121)) {
-			SAY("ERROR: set_resolution() failed\n");
+			SAM("ERROR: set_resolution() failed\n");
 			return -EINVAL;
 		}
 	} else if ((704 == peasycap_format->v4l2_format.fmt.pix.width) && \
 			(576 == peasycap_format->v4l2_format.fmt.pix.height)) {
 		if (0 != set_resolution(p, 0x0004, 0x0001, 0x0584, 0x0121)) {
-			SAY("ERROR: set_resolution() failed\n");
+			SAM("ERROR: set_resolution() failed\n");
 			return -EINVAL;
 		}
 	} else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && \
@@ -491,11 +576,11 @@
 			(240 == \
 			peasycap_format->v4l2_format.fmt.pix.height))) {
 		if (0 != set_resolution(p, 0x0014, 0x0020, 0x0514, 0x0110)) {
-			SAY("ERROR: set_resolution() failed\n");
+			SAM("ERROR: set_resolution() failed\n");
 			return -EINVAL;
 		}
 	} else {
-		SAY("MISTAKE: bad format, cannot set resolution\n");
+		SAM("MISTAKE: bad format, cannot set resolution\n");
 		return -EINVAL;
 	}
 /*---------------------------------------------------------------------------*/
@@ -512,7 +597,7 @@
 			(240 == \
 			peasycap_format->v4l2_format.fmt.pix.height))) {
 		if (0 != set_resolution(p, 0x0000, 0x0003, 0x05A0, 0x00F3)) {
-			SAY("ERROR: set_resolution() failed\n");
+			SAM("ERROR: set_resolution() failed\n");
 			return -EINVAL;
 		}
 	} else if (((640 == peasycap_format->v4l2_format.fmt.pix.width) && \
@@ -523,28 +608,31 @@
 			(240 == \
 			peasycap_format->v4l2_format.fmt.pix.height))) {
 		if (0 != set_resolution(p, 0x0014, 0x0003, 0x0514, 0x00F3)) {
-			SAY("ERROR: set_resolution() failed\n");
+			SAM("ERROR: set_resolution() failed\n");
 			return -EINVAL;
 		}
 	} else {
-		SAY("MISTAKE: bad format, cannot set resolution\n");
+		SAM("MISTAKE: bad format, cannot set resolution\n");
 		return -EINVAL;
 	}
 }
 /*---------------------------------------------------------------------------*/
-
-check_stk(peasycap->pusb_device);
-
+if (true == resubmit)
+	submit_video_urbs(peasycap);
 return (int)(peasycap_best_format - &easycap_format[0]);
 }
 /*****************************************************************************/
 int adjust_brightness(struct easycap *peasycap, int value)
 {
 unsigned int mood;
-int i1;
+int i1, k;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 i1 = 0;
@@ -553,37 +641,56 @@
 		if ((easycap_control[i1].minimum > value) || \
 					(easycap_control[i1].maximum < value))
 			value = easycap_control[i1].default_value;
+
+		if ((easycap_control[i1].minimum <= peasycap->brightness) && \
+					(easycap_control[i1].maximum >= \
+						peasycap->brightness)) {
+			if (peasycap->brightness == value) {
+				SAM("unchanged brightness at  0x%02X\n", \
+								value);
+				return 0;
+			}
+		}
 		peasycap->brightness = value;
+		for (k = 0; k < INPUT_MANY; k++) {
+			if (!peasycap->inputset[k].brightness_ok)
+				peasycap->inputset[k].brightness = \
+							peasycap->brightness;
+		}
+		if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+			peasycap->inputset[peasycap->input].brightness = \
+							peasycap->brightness;
+			peasycap->inputset[peasycap->input].brightness_ok = 1;
+		} else
+			JOM(8, "%i=peasycap->input\n", peasycap->input);
 		mood = 0x00FF & (unsigned int)peasycap->brightness;
-
-		set2to78(peasycap->pusb_device);
-
 		if (!write_saa(peasycap->pusb_device, 0x0A, mood)) {
-			SAY("adjusting brightness to  0x%02X\n", mood);
+			SAM("adjusting brightness to  0x%02X\n", mood);
 			return 0;
 		} else {
-			SAY("WARNING: failed to adjust brightness " \
+			SAM("WARNING: failed to adjust brightness " \
 							"to 0x%02X\n", mood);
 			return -ENOENT;
 		}
-
-		set2to78(peasycap->pusb_device);
-
 		break;
 	}
 	i1++;
 }
-SAY("WARNING: failed to adjust brightness: control not found\n");
+SAM("WARNING: failed to adjust brightness: control not found\n");
 return -ENOENT;
 }
 /*****************************************************************************/
 int adjust_contrast(struct easycap *peasycap, int value)
 {
 unsigned int mood;
-int i1;
+int i1, k;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 i1 = 0;
@@ -592,37 +699,58 @@
 		if ((easycap_control[i1].minimum > value) || \
 					(easycap_control[i1].maximum < value))
 			value = easycap_control[i1].default_value;
+
+
+
+		if ((easycap_control[i1].minimum <= peasycap->contrast) && \
+				(easycap_control[i1].maximum >= \
+							peasycap->contrast)) {
+			if (peasycap->contrast == value) {
+				SAM("unchanged contrast at  0x%02X\n", value);
+				return 0;
+			}
+		}
 		peasycap->contrast = value;
+		for (k = 0; k < INPUT_MANY; k++) {
+			if (!peasycap->inputset[k].contrast_ok) {
+				peasycap->inputset[k].contrast = \
+							peasycap->contrast;
+			}
+		}
+		if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+			peasycap->inputset[peasycap->input].contrast = \
+							peasycap->contrast;
+			peasycap->inputset[peasycap->input].contrast_ok = 1;
+		} else
+			JOM(8, "%i=peasycap->input\n", peasycap->input);
 		mood = 0x00FF & (unsigned int) (peasycap->contrast - 128);
-
-		set2to78(peasycap->pusb_device);
-
 		if (!write_saa(peasycap->pusb_device, 0x0B, mood)) {
-			SAY("adjusting contrast to  0x%02X\n", mood);
+			SAM("adjusting contrast to  0x%02X\n", mood);
 			return 0;
 		} else {
-			SAY("WARNING: failed to adjust contrast to " \
+			SAM("WARNING: failed to adjust contrast to " \
 							"0x%02X\n", mood);
 			return -ENOENT;
 		}
-
-		set2to78(peasycap->pusb_device);
-
 		break;
 	}
 	i1++;
 }
-SAY("WARNING: failed to adjust contrast: control not found\n");
+SAM("WARNING: failed to adjust contrast: control not found\n");
 return -ENOENT;
 }
 /*****************************************************************************/
 int adjust_saturation(struct easycap *peasycap, int value)
 {
 unsigned int mood;
-int i1;
+int i1, k;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 i1 = 0;
@@ -631,37 +759,58 @@
 		if ((easycap_control[i1].minimum > value) || \
 					(easycap_control[i1].maximum < value))
 			value = easycap_control[i1].default_value;
+
+
+		if ((easycap_control[i1].minimum <= peasycap->saturation) && \
+					(easycap_control[i1].maximum >= \
+						peasycap->saturation)) {
+			if (peasycap->saturation == value) {
+				SAM("unchanged saturation at  0x%02X\n", \
+								value);
+				return 0;
+			}
+		}
 		peasycap->saturation = value;
+		for (k = 0; k < INPUT_MANY; k++) {
+			if (!peasycap->inputset[k].saturation_ok) {
+				peasycap->inputset[k].saturation = \
+							peasycap->saturation;
+			}
+		}
+		if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+			peasycap->inputset[peasycap->input].saturation = \
+							peasycap->saturation;
+			peasycap->inputset[peasycap->input].saturation_ok = 1;
+		} else
+			JOM(8, "%i=peasycap->input\n", peasycap->input);
 		mood = 0x00FF & (unsigned int) (peasycap->saturation - 128);
-
-		set2to78(peasycap->pusb_device);
-
 		if (!write_saa(peasycap->pusb_device, 0x0C, mood)) {
-			SAY("adjusting saturation to  0x%02X\n", mood);
+			SAM("adjusting saturation to  0x%02X\n", mood);
 			return 0;
 		} else {
-			SAY("WARNING: failed to adjust saturation to " \
+			SAM("WARNING: failed to adjust saturation to " \
 							"0x%02X\n", mood);
 			return -ENOENT;
 		}
 		break;
-
-		set2to78(peasycap->pusb_device);
-
 	}
 	i1++;
 }
-SAY("WARNING: failed to adjust saturation: control not found\n");
+SAM("WARNING: failed to adjust saturation: control not found\n");
 return -ENOENT;
 }
 /*****************************************************************************/
 int adjust_hue(struct easycap *peasycap, int value)
 {
 unsigned int mood;
-int i1, i2;
+int i1, i2, k;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 i1 = 0;
@@ -670,27 +819,40 @@
 		if ((easycap_control[i1].minimum > value) || \
 					(easycap_control[i1].maximum < value))
 			value = easycap_control[i1].default_value;
+
+		if ((easycap_control[i1].minimum <= peasycap->hue) && \
+					(easycap_control[i1].maximum >= \
+							peasycap->hue)) {
+			if (peasycap->hue == value) {
+				SAM("unchanged hue at  0x%02X\n", value);
+				return 0;
+			}
+		}
 		peasycap->hue = value;
+		for (k = 0; k < INPUT_MANY; k++) {
+			if (!peasycap->inputset[k].hue_ok)
+				peasycap->inputset[k].hue = peasycap->hue;
+		}
+		if ((0 <= peasycap->input) && (INPUT_MANY > peasycap->input)) {
+			peasycap->inputset[peasycap->input].hue = \
+							peasycap->hue;
+			peasycap->inputset[peasycap->input].hue_ok = 1;
+		} else
+			JOM(8, "%i=peasycap->input\n", peasycap->input);
 		i2 = peasycap->hue - 128;
 		mood = 0x00FF & ((int) i2);
-
-		set2to78(peasycap->pusb_device);
-
 		if (!write_saa(peasycap->pusb_device, 0x0D, mood)) {
-			SAY("adjusting hue to  0x%02X\n", mood);
+			SAM("adjusting hue to  0x%02X\n", mood);
 			return 0;
 		} else {
-			SAY("WARNING: failed to adjust hue to 0x%02X\n", mood);
+			SAM("WARNING: failed to adjust hue to 0x%02X\n", mood);
 			return -ENOENT;
 		}
-
-		set2to78(peasycap->pusb_device);
-
 		break;
 	}
 	i1++;
 }
-SAY("WARNING: failed to adjust hue: control not found\n");
+SAM("WARNING: failed to adjust hue: control not found\n");
 return -ENOENT;
 }
 /*****************************************************************************/
@@ -699,33 +861,45 @@
 __s8 mood;
 int i1;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 i1 = 0;
 while (0xFFFFFFFF != easycap_control[i1].id) {
 	if (V4L2_CID_AUDIO_VOLUME == easycap_control[i1].id) {
 		if ((easycap_control[i1].minimum > value) || \
-			(easycap_control[i1].maximum < value))
+				(easycap_control[i1].maximum < value))
 			value = easycap_control[i1].default_value;
+		if ((easycap_control[i1].minimum <= peasycap->volume) && \
+					(easycap_control[i1].maximum >= \
+							peasycap->volume)) {
+			if (peasycap->volume == value) {
+				SAM("unchanged volume at  0x%02X\n", value);
+				return 0;
+			}
+		}
 		peasycap->volume = value;
 		mood = (16 > peasycap->volume) ? 16 : \
 			((31 < peasycap->volume) ? 31 : \
 			(__s8) peasycap->volume);
 		if (!audio_gainset(peasycap->pusb_device, mood)) {
-			SAY("adjusting volume to 0x%01X\n", mood);
+			SAM("adjusting volume to 0x%02X\n", mood);
 			return 0;
 		} else {
-			SAY("WARNING: failed to adjust volume to " \
-							"0x%1X\n", mood);
+			SAM("WARNING: failed to adjust volume to " \
+							"0x%2X\n", mood);
 			return -ENOENT;
 		}
 		break;
 	}
 i1++;
 }
-SAY("WARNING: failed to adjust volume: control not found\n");
+SAM("WARNING: failed to adjust volume: control not found\n");
 return -ENOENT;
 }
 /*****************************************************************************/
@@ -744,8 +918,12 @@
 {
 int i1;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 i1 = 0;
@@ -756,13 +934,13 @@
 		case 1: {
 			peasycap->audio_idle = 1;
 			peasycap->timeval0.tv_sec = 0;
-			SAY("adjusting mute: %i=peasycap->audio_idle\n", \
+			SAM("adjusting mute: %i=peasycap->audio_idle\n", \
 							peasycap->audio_idle);
 			return 0;
 		}
 		default: {
 			peasycap->audio_idle = 0;
-			SAY("adjusting mute: %i=peasycap->audio_idle\n", \
+			SAM("adjusting mute: %i=peasycap->audio_idle\n", \
 							peasycap->audio_idle);
 			return 0;
 		}
@@ -771,47 +949,107 @@
 	}
 	i1++;
 }
-SAY("WARNING: failed to adjust mute: control not found\n");
+SAM("WARNING: failed to adjust mute: control not found\n");
 return -ENOENT;
 }
-
-/*--------------------------------------------------------------------------*/
-static int easycap_ioctl_bkl(struct inode *inode, struct file *file,
-			     unsigned int cmd, unsigned long arg)
+/*****************************************************************************/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if ((defined(EASYCAP_IS_VIDEODEV_CLIENT)) || \
+	(defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)))
+long
+easycap_ioctl_noinode(struct file *file, unsigned int cmd, unsigned long arg) {
+	return (long)easycap_ioctl((struct inode *)NULL, file, cmd, arg);
+}
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT||EASYCAP_NEEDS_UNLOCKED_IOCTL*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+int
+easycap_ioctl(struct inode *inode, struct file *file,
+					unsigned int cmd, unsigned long arg)
 {
-static struct easycap *peasycap;
-static struct usb_device *p;
-static __u32 isequence;
+struct easycap *peasycap;
+struct usb_device *p;
+int kd;
 
+if (NULL == file) {
+	SAY("ERROR:  file is NULL\n");
+	return -ERESTARTSYS;
+}
 peasycap = file->private_data;
 if (NULL == peasycap) {
 	SAY("ERROR:  peasycap is NULL\n");
 	return -1;
 }
-p = peasycap->pusb_device;
-if ((struct usb_device *)NULL == p) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap\n");
 	return -EFAULT;
 }
+p = peasycap->pusb_device;
+if (NULL == p) {
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
+	return -EFAULT;
+}
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+	if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_video)) {
+		SAY("ERROR: cannot lock easycap_dongle[%i].mutex_video\n", kd);
+		return -ERESTARTSYS;
+	}
+	JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
 /*---------------------------------------------------------------------------*/
 /*
- *  MOST OF THE VARIABLES DECLARED static IN THE case{} BLOCKS BELOW ARE SO
- *  DECLARED SIMPLY TO AVOID A COMPILER WARNING OF THE KIND:
- *  easycap_ioctl.c: warning:
- *                       the frame size of ... bytes is larger than 1024 bytes
- */
+ *  MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER peasycap,
+ *  IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+ *  IF NECESSARY, BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+	if (kd != isdongle(peasycap))
+		return -ERESTARTSYS;
+	if (NULL == file) {
+		SAY("ERROR:  file is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ERESTARTSYS;
+	}
+	peasycap = file->private_data;
+	if (NULL == peasycap) {
+		SAY("ERROR:  peasycap is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ERESTARTSYS;
+	}
+	if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+		SAY("ERROR: bad peasycap\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EFAULT;
+	}
+	p = peasycap->pusb_device;
+	if (NULL == peasycap->pusb_device) {
+		SAM("ERROR: peasycap->pusb_device is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ERESTARTSYS;
+	}
+} else {
+/*---------------------------------------------------------------------------*/
+/*
+ *  IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap BEFORE THE
+ *  ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL HAVE FAILED.  BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+	return -ERESTARTSYS;
+}
 /*---------------------------------------------------------------------------*/
 switch (cmd) {
 case VIDIOC_QUERYCAP: {
-	static struct v4l2_capability v4l2_capability;
-	static char version[16], *p1, *p2;
-	static int i, rc, k[3];
-	static long lng;
+	struct v4l2_capability v4l2_capability;
+	char version[16], *p1, *p2;
+	int i, rc, k[3];
+	long lng;
 
-	JOT(8, "VIDIOC_QUERYCAP\n");
+	JOM(8, "VIDIOC_QUERYCAP\n");
 
 	if (16 <= strlen(EASYCAP_DRIVER_VERSION)) {
-		SAY("ERROR: bad driver version string\n"); return -EINVAL;
+		SAM("ERROR: bad driver version string\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EINVAL;
 	}
 	strcpy(&version[0], EASYCAP_DRIVER_VERSION);
 	for (i = 0; i < 3; i++)
@@ -826,8 +1064,9 @@
 		if (3 > i) {
 			rc = (int) strict_strtol(p1, 10, &lng);
 			if (0 != rc) {
-				SAY("ERROR: %i=strict_strtol(%s,.,,)\n", \
+				SAM("ERROR: %i=strict_strtol(%s,.,,)\n", \
 								rc, p1);
+				mutex_unlock(&easycap_dongle[kd].mutex_video);
 				return -EINVAL;
 			}
 			k[i] = (int)lng;
@@ -844,7 +1083,7 @@
 				V4L2_CAP_AUDIO         | V4L2_CAP_READWRITE;
 
 	v4l2_capability.version = KERNEL_VERSION(k[0], k[1], k[2]);
-	JOT(8, "v4l2_capability.version=(%i,%i,%i)\n", k[0], k[1], k[2]);
+	JOM(8, "v4l2_capability.version=(%i,%i,%i)\n", k[0], k[1], k[2]);
 
 	strlcpy(&v4l2_capability.card[0], "EasyCAP DC60", \
 		sizeof(v4l2_capability.card));
@@ -853,26 +1092,26 @@
 				sizeof(v4l2_capability.bus_info)) < 0) {
 		strlcpy(&v4l2_capability.bus_info[0], "EasyCAP bus_info", \
 					sizeof(v4l2_capability.bus_info));
-		JOT(8, "%s=v4l2_capability.bus_info\n", \
+		JOM(8, "%s=v4l2_capability.bus_info\n", \
 					&v4l2_capability.bus_info[0]);
 	}
 	if (0 != copy_to_user((void __user *)arg, &v4l2_capability, \
 					sizeof(struct v4l2_capability))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_ENUMINPUT: {
-	static struct v4l2_input v4l2_input;
-	static __u32 index;
+	struct v4l2_input v4l2_input;
+	__u32 index;
 
-	JOT(8, "VIDIOC_ENUMINPUT\n");
+	JOM(8, "VIDIOC_ENUMINPUT\n");
 
 	if (0 != copy_from_user(&v4l2_input, (void __user *)arg, \
 					sizeof(struct v4l2_input))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
@@ -889,7 +1128,7 @@
 		v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
 				V4L2_STD_NTSC ;
 		v4l2_input.status = 0;
-		JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
 		break;
 	}
 	case 1: {
@@ -901,7 +1140,7 @@
 		v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
 				V4L2_STD_NTSC ;
 		v4l2_input.status = 0;
-		JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
 		break;
 	}
 	case 2: {
@@ -913,7 +1152,7 @@
 		v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
 				V4L2_STD_NTSC ;
 		v4l2_input.status = 0;
-		JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
 		break;
 	}
 	case 3: {
@@ -925,7 +1164,7 @@
 		v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
 				V4L2_STD_NTSC ;
 		v4l2_input.status = 0;
-		JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
 		break;
 	}
 	case 4: {
@@ -937,7 +1176,7 @@
 		v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
 				V4L2_STD_NTSC ;
 		v4l2_input.status = 0;
-		JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
 		break;
 	}
 	case 5: {
@@ -949,31 +1188,32 @@
 		v4l2_input.std = V4L2_STD_PAL | V4L2_STD_SECAM | \
 				V4L2_STD_NTSC ;
 		v4l2_input.status = 0;
-		JOT(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_input.name[0]);
 		break;
 	}
 	default: {
-		JOT(8, "%i=index: exhausts inputs\n", index);
+		JOM(8, "%i=index: exhausts inputs\n", index);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
 	}
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_input, \
 						sizeof(struct v4l2_input))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_INPUT: {
-	static __u32 index;
+	__u32 index;
 
-	JOT(8, "VIDIOC_G_INPUT\n");
+	JOM(8, "VIDIOC_G_INPUT\n");
 	index = (__u32)peasycap->input;
-	JOT(8, "user is told: %i\n", index);
+	JOM(8, "user is told: %i\n", index);
 	if (0 != copy_to_user((void __user *)arg, &index, sizeof(__u32))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
@@ -981,79 +1221,89 @@
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_S_INPUT:
 	{
-	static __u32 index;
+	__u32 index;
+	int rc;
 
-	JOT(8, "VIDIOC_S_INPUT\n");
+	JOM(8, "VIDIOC_S_INPUT\n");
 
 	if (0 != copy_from_user(&index, (void __user *)arg, sizeof(__u32))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	JOT(8, "user requests input %i\n", index);
+	JOM(8, "user requests input %i\n", index);
 
 	if ((int)index == peasycap->input) {
-		SAY("requested input already in effect\n");
+		SAM("requested input already in effect\n");
 		break;
 	}
 
-	if ((0 > index) || (5 < index)) {
-		JOT(8, "ERROR:  bad requested input: %i\n", index);
+	if ((0 > index) || (INPUT_MANY <= index)) {
+		JOM(8, "ERROR:  bad requested input: %i\n", index);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
-	peasycap->input = (int)index;
 
-	select_input(peasycap->pusb_device, peasycap->input, 9);
-
+	rc = newinput(peasycap, (int)index);
+	if (0 == rc) {
+		JOM(8, "newinput(.,%i) OK\n", (int)index);
+	} else {
+		SAM("ERROR: newinput(.,%i) returned %i\n", (int)index, rc);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EFAULT;
+	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_ENUMAUDIO: {
-	JOT(8, "VIDIOC_ENUMAUDIO\n");
+	JOM(8, "VIDIOC_ENUMAUDIO\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_ENUMAUDOUT: {
-	static struct v4l2_audioout v4l2_audioout;
+	struct v4l2_audioout v4l2_audioout;
 
-	JOT(8, "VIDIOC_ENUMAUDOUT\n");
+	JOM(8, "VIDIOC_ENUMAUDOUT\n");
 
 	if (0 != copy_from_user(&v4l2_audioout, (void __user *)arg, \
 					sizeof(struct v4l2_audioout))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	if (0 != v4l2_audioout.index)
+	if (0 != v4l2_audioout.index) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
+	}
 	memset(&v4l2_audioout, 0, sizeof(struct v4l2_audioout));
 	v4l2_audioout.index = 0;
 	strcpy(&v4l2_audioout.name[0], "Soundtrack");
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_audioout, \
 					sizeof(struct v4l2_audioout))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_QUERYCTRL: {
-	static int i1;
-	static struct v4l2_queryctrl v4l2_queryctrl;
+	int i1;
+	struct v4l2_queryctrl v4l2_queryctrl;
 
-	JOT(8, "VIDIOC_QUERYCTRL\n");
+	JOM(8, "VIDIOC_QUERYCTRL\n");
 
 	if (0 != copy_from_user(&v4l2_queryctrl, (void __user *)arg, \
 					sizeof(struct v4l2_queryctrl))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
 	i1 = 0;
 	while (0xFFFFFFFF != easycap_control[i1].id) {
 		if (easycap_control[i1].id == v4l2_queryctrl.id) {
-			JOT(8, "VIDIOC_QUERYCTRL  %s=easycap_control[%i]" \
+			JOM(8, "VIDIOC_QUERYCTRL  %s=easycap_control[%i]" \
 				".name\n", &easycap_control[i1].name[0], i1);
 			memcpy(&v4l2_queryctrl, &easycap_control[i1], \
 						sizeof(struct v4l2_queryctrl));
@@ -1062,127 +1312,137 @@
 		i1++;
 	}
 	if (0xFFFFFFFF == easycap_control[i1].id) {
-		JOT(8, "%i=index: exhausts controls\n", i1);
+		JOM(8, "%i=index: exhausts controls\n", i1);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
 	if (0 != copy_to_user((void __user *)arg, &v4l2_queryctrl, \
 					sizeof(struct v4l2_queryctrl))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_QUERYMENU: {
-	JOT(8, "VIDIOC_QUERYMENU unsupported\n");
+	JOM(8, "VIDIOC_QUERYMENU unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
-	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_CTRL: {
-	static struct v4l2_control v4l2_control;
+	struct v4l2_control *pv4l2_control;
 
-	JOT(8, "VIDIOC_G_CTRL\n");
-
-	if (0 != copy_from_user(&v4l2_control, (void __user *)arg, \
+	JOM(8, "VIDIOC_G_CTRL\n");
+	pv4l2_control = kzalloc(sizeof(struct v4l2_control), GFP_KERNEL);
+	if (!pv4l2_control) {
+		SAM("ERROR: out of memory\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ENOMEM;
+	}
+	if (0 != copy_from_user(pv4l2_control, (void __user *)arg, \
 					sizeof(struct v4l2_control))) {
-		POUT;
+		kfree(pv4l2_control);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	switch (v4l2_control.id) {
+	switch (pv4l2_control->id) {
 	case V4L2_CID_BRIGHTNESS: {
-		v4l2_control.value = peasycap->brightness;
-		JOT(8, "user enquires brightness: %i\n", v4l2_control.value);
+		pv4l2_control->value = peasycap->brightness;
+		JOM(8, "user enquires brightness: %i\n", pv4l2_control->value);
 		break;
 	}
 	case V4L2_CID_CONTRAST: {
-		v4l2_control.value = peasycap->contrast;
-		JOT(8, "user enquires contrast: %i\n", v4l2_control.value);
+		pv4l2_control->value = peasycap->contrast;
+		JOM(8, "user enquires contrast: %i\n", pv4l2_control->value);
 		break;
 	}
 	case V4L2_CID_SATURATION: {
-		v4l2_control.value = peasycap->saturation;
-		JOT(8, "user enquires saturation: %i\n", v4l2_control.value);
+		pv4l2_control->value = peasycap->saturation;
+		JOM(8, "user enquires saturation: %i\n", pv4l2_control->value);
 		break;
 	}
 	case V4L2_CID_HUE: {
-		v4l2_control.value = peasycap->hue;
-		JOT(8, "user enquires hue: %i\n", v4l2_control.value);
+		pv4l2_control->value = peasycap->hue;
+		JOM(8, "user enquires hue: %i\n", pv4l2_control->value);
 		break;
 	}
 	case V4L2_CID_AUDIO_VOLUME: {
-		v4l2_control.value = peasycap->volume;
-		JOT(8, "user enquires volume: %i\n", v4l2_control.value);
+		pv4l2_control->value = peasycap->volume;
+		JOM(8, "user enquires volume: %i\n", pv4l2_control->value);
 		break;
 	}
 	case V4L2_CID_AUDIO_MUTE: {
 		if (1 == peasycap->mute)
-			v4l2_control.value = true;
+			pv4l2_control->value = true;
 		else
-			v4l2_control.value = false;
-		JOT(8, "user enquires mute: %i\n", v4l2_control.value);
+			pv4l2_control->value = false;
+		JOM(8, "user enquires mute: %i\n", pv4l2_control->value);
 		break;
 	}
 	default: {
-		SAY("ERROR: unknown V4L2 control: 0x%08X=id\n", \
-							v4l2_control.id);
-		explain_cid(v4l2_control.id);
+		SAM("ERROR: unknown V4L2 control: 0x%08X=id\n", \
+							pv4l2_control->id);
+		kfree(pv4l2_control);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
 	}
-	if (0 != copy_to_user((void __user *)arg, &v4l2_control, \
+	if (0 != copy_to_user((void __user *)arg, pv4l2_control, \
 					sizeof(struct v4l2_control))) {
-		POUT;
+		kfree(pv4l2_control);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
+	kfree(pv4l2_control);
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 #if defined(VIDIOC_S_CTRL_OLD)
 case VIDIOC_S_CTRL_OLD: {
-	JOT(8, "VIDIOC_S_CTRL_OLD required at least for xawtv\n");
+	JOM(8, "VIDIOC_S_CTRL_OLD required at least for xawtv\n");
 }
 #endif /*VIDIOC_S_CTRL_OLD*/
 case VIDIOC_S_CTRL:
 	{
-	static struct v4l2_control v4l2_control;
+	struct v4l2_control v4l2_control;
 
-	JOT(8, "VIDIOC_S_CTRL\n");
+	JOM(8, "VIDIOC_S_CTRL\n");
 
 	if (0 != copy_from_user(&v4l2_control, (void __user *)arg, \
 					sizeof(struct v4l2_control))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
 	switch (v4l2_control.id) {
 	case V4L2_CID_BRIGHTNESS: {
-		JOT(8, "user requests brightness %i\n", v4l2_control.value);
+		JOM(8, "user requests brightness %i\n", v4l2_control.value);
 		if (0 != adjust_brightness(peasycap, v4l2_control.value))
 			;
 		break;
 	}
 	case V4L2_CID_CONTRAST: {
-		JOT(8, "user requests contrast %i\n", v4l2_control.value);
+		JOM(8, "user requests contrast %i\n", v4l2_control.value);
 		if (0 != adjust_contrast(peasycap, v4l2_control.value))
 			;
 		break;
 	}
 	case V4L2_CID_SATURATION: {
-		JOT(8, "user requests saturation %i\n", v4l2_control.value);
+		JOM(8, "user requests saturation %i\n", v4l2_control.value);
 		if (0 != adjust_saturation(peasycap, v4l2_control.value))
 			;
 		break;
 	}
 	case V4L2_CID_HUE: {
-		JOT(8, "user requests hue %i\n", v4l2_control.value);
+		JOM(8, "user requests hue %i\n", v4l2_control.value);
 		if (0 != adjust_hue(peasycap, v4l2_control.value))
 			;
 		break;
 	}
 	case V4L2_CID_AUDIO_VOLUME: {
-		JOT(8, "user requests volume %i\n", v4l2_control.value);
+		JOM(8, "user requests volume %i\n", v4l2_control.value);
 		if (0 != adjust_volume(peasycap, v4l2_control.value))
 			;
 		break;
@@ -1190,40 +1450,41 @@
 	case V4L2_CID_AUDIO_MUTE: {
 		int mute;
 
-		JOT(8, "user requests mute %i\n", v4l2_control.value);
+		JOM(8, "user requests mute %i\n", v4l2_control.value);
 		if (true == v4l2_control.value)
 			mute = 1;
 		else
 			mute = 0;
 
 		if (0 != adjust_mute(peasycap, mute))
-			SAY("WARNING: failed to adjust mute to %i\n", mute);
+			SAM("WARNING: failed to adjust mute to %i\n", mute);
 		break;
 	}
 	default: {
-		SAY("ERROR: unknown V4L2 control: 0x%08X=id\n", \
+		SAM("ERROR: unknown V4L2 control: 0x%08X=id\n", \
 							v4l2_control.id);
-		explain_cid(v4l2_control.id);
-	return -EINVAL;
-			}
-		}
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EINVAL;
+	}
+	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_S_EXT_CTRLS: {
-	JOT(8, "VIDIOC_S_EXT_CTRLS unsupported\n");
+	JOM(8, "VIDIOC_S_EXT_CTRLS unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_ENUM_FMT: {
-	static __u32 index;
-	static struct v4l2_fmtdesc v4l2_fmtdesc;
+	__u32 index;
+	struct v4l2_fmtdesc v4l2_fmtdesc;
 
-	JOT(8, "VIDIOC_ENUM_FMT\n");
+	JOM(8, "VIDIOC_ENUM_FMT\n");
 
 	if (0 != copy_from_user(&v4l2_fmtdesc, (void __user *)arg, \
 					sizeof(struct v4l2_fmtdesc))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
@@ -1238,117 +1499,327 @@
 		v4l2_fmtdesc.flags = 0;
 		strcpy(&v4l2_fmtdesc.description[0], "uyvy");
 		v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_UYVY;
-		JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
 		break;
 	}
 	case 1: {
 		v4l2_fmtdesc.flags = 0;
 		strcpy(&v4l2_fmtdesc.description[0], "yuy2");
 		v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_YUYV;
-		JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
 		break;
 	}
 	case 2: {
 		v4l2_fmtdesc.flags = 0;
 		strcpy(&v4l2_fmtdesc.description[0], "rgb24");
 		v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB24;
-		JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
 		break;
 	}
 	case 3: {
 		v4l2_fmtdesc.flags = 0;
 		strcpy(&v4l2_fmtdesc.description[0], "rgb32");
 		v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_RGB32;
-		JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
 		break;
 	}
 	case 4: {
 		v4l2_fmtdesc.flags = 0;
 		strcpy(&v4l2_fmtdesc.description[0], "bgr24");
 		v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR24;
-		JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
 		break;
 	}
 	case 5: {
 		v4l2_fmtdesc.flags = 0;
 		strcpy(&v4l2_fmtdesc.description[0], "bgr32");
 		v4l2_fmtdesc.pixelformat = V4L2_PIX_FMT_BGR32;
-		JOT(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
+		JOM(8, "%i=index: %s\n", index, &v4l2_fmtdesc.description[0]);
 		break;
 	}
 	default: {
-		JOT(8, "%i=index: exhausts formats\n", index);
+		JOM(8, "%i=index: exhausts formats\n", index);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
 	}
 	if (0 != copy_to_user((void __user *)arg, &v4l2_fmtdesc, \
 					sizeof(struct v4l2_fmtdesc))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ *  THE RESPONSE TO VIDIOC_ENUM_FRAMESIZES MUST BE CONDITIONED ON THE
+ *  THE CURRENT STANDARD, BECAUSE THAT IS WHAT gstreamer EXPECTS.  BEWARE.
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_ENUM_FRAMESIZES: {
-	JOT(8, "VIDIOC_ENUM_FRAMESIZES unsupported\n");
-	return -EINVAL;
+	__u32 index;
+	struct v4l2_frmsizeenum v4l2_frmsizeenum;
+
+	JOM(8, "VIDIOC_ENUM_FRAMESIZES\n");
+
+	if (0 != copy_from_user(&v4l2_frmsizeenum, (void __user *)arg, \
+					sizeof(struct v4l2_frmsizeenum))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EFAULT;
+	}
+
+	index = v4l2_frmsizeenum.index;
+
+	v4l2_frmsizeenum.type = (__u32) V4L2_FRMSIZE_TYPE_DISCRETE;
+
+	if (true == peasycap->ntsc) {
+		switch (index) {
+		case 0: {
+			v4l2_frmsizeenum.discrete.width = 640;
+			v4l2_frmsizeenum.discrete.height = 480;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						 discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		case 1: {
+			v4l2_frmsizeenum.discrete.width = 320;
+			v4l2_frmsizeenum.discrete.height = 240;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		case 2: {
+			v4l2_frmsizeenum.discrete.width = 720;
+			v4l2_frmsizeenum.discrete.height = 480;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		case 3: {
+			v4l2_frmsizeenum.discrete.width = 360;
+			v4l2_frmsizeenum.discrete.height = 240;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		default: {
+			JOM(8, "%i=index: exhausts framesizes\n", index);
+			mutex_unlock(&easycap_dongle[kd].mutex_video);
+			return -EINVAL;
+		}
+		}
+	} else {
+		switch (index) {
+		case 0: {
+			v4l2_frmsizeenum.discrete.width = 640;
+			v4l2_frmsizeenum.discrete.height = 480;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		case 1: {
+			v4l2_frmsizeenum.discrete.width = 320;
+			v4l2_frmsizeenum.discrete.height = 240;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		case 2: {
+			v4l2_frmsizeenum.discrete.width = 704;
+			v4l2_frmsizeenum.discrete.height = 576;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		case 3: {
+			v4l2_frmsizeenum.discrete.width = 720;
+			v4l2_frmsizeenum.discrete.height = 576;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		case 4: {
+			v4l2_frmsizeenum.discrete.width = 360;
+			v4l2_frmsizeenum.discrete.height = 288;
+			JOM(8, "%i=index: %ix%i\n", index, \
+					(int)(v4l2_frmsizeenum.\
+						discrete.width), \
+					(int)(v4l2_frmsizeenum.\
+						discrete.height));
+			break;
+		}
+		default: {
+			JOM(8, "%i=index: exhausts framesizes\n", index);
+			mutex_unlock(&easycap_dongle[kd].mutex_video);
+			return -EINVAL;
+		}
+		}
+	}
+	if (0 != copy_to_user((void __user *)arg, &v4l2_frmsizeenum, \
+					sizeof(struct v4l2_frmsizeenum))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EFAULT;
+	}
+	break;
 }
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ *  THE RESPONSE TO VIDIOC_ENUM_FRAMEINTERVALS MUST BE CONDITIONED ON THE
+ *  THE CURRENT STANDARD, BECAUSE THAT IS WHAT gstreamer EXPECTS.  BEWARE.
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_ENUM_FRAMEINTERVALS: {
-	JOT(8, "VIDIOC_ENUM_FRAME_INTERVALS unsupported\n");
-	return -EINVAL;
+	__u32 index;
+	int denominator;
+	struct v4l2_frmivalenum v4l2_frmivalenum;
+
+	JOM(8, "VIDIOC_ENUM_FRAMEINTERVALS\n");
+
+	if (peasycap->fps)
+		denominator = peasycap->fps;
+	else {
+		if (true == peasycap->ntsc)
+			denominator = 30;
+		else
+			denominator = 25;
+	}
+
+	if (0 != copy_from_user(&v4l2_frmivalenum, (void __user *)arg, \
+					sizeof(struct v4l2_frmivalenum))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EFAULT;
+	}
+
+	index = v4l2_frmivalenum.index;
+
+	v4l2_frmivalenum.type = (__u32) V4L2_FRMIVAL_TYPE_DISCRETE;
+
+	switch (index) {
+	case 0: {
+		v4l2_frmivalenum.discrete.numerator = 1;
+		v4l2_frmivalenum.discrete.denominator = denominator;
+		JOM(8, "%i=index: %i/%i\n", index, \
+			(int)(v4l2_frmivalenum.discrete.numerator), \
+			(int)(v4l2_frmivalenum.discrete.denominator));
+		break;
+	}
+	case 1: {
+		v4l2_frmivalenum.discrete.numerator = 1;
+		v4l2_frmivalenum.discrete.denominator = denominator/5;
+		JOM(8, "%i=index: %i/%i\n", index, \
+			(int)(v4l2_frmivalenum.discrete.numerator), \
+			(int)(v4l2_frmivalenum.discrete.denominator));
+		break;
+	}
+	default: {
+		JOM(8, "%i=index: exhausts frameintervals\n", index);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EINVAL;
+	}
+	}
+	if (0 != copy_to_user((void __user *)arg, &v4l2_frmivalenum, \
+					sizeof(struct v4l2_frmivalenum))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EFAULT;
+	}
+	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_FMT: {
-	static struct v4l2_format v4l2_format;
-	static struct v4l2_pix_format v4l2_pix_format;
+	struct v4l2_format *pv4l2_format;
+	struct v4l2_pix_format *pv4l2_pix_format;
 
-	JOT(8, "VIDIOC_G_FMT\n");
-
-	if (0 != copy_from_user(&v4l2_format, (void __user *)arg, \
+	JOM(8, "VIDIOC_G_FMT\n");
+	pv4l2_format = kzalloc(sizeof(struct v4l2_format), GFP_KERNEL);
+	if (!pv4l2_format) {
+		SAM("ERROR: out of memory\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ENOMEM;
+	}
+	pv4l2_pix_format = kzalloc(sizeof(struct v4l2_pix_format), GFP_KERNEL);
+	if (!pv4l2_pix_format) {
+		SAM("ERROR: out of memory\n");
+		kfree(pv4l2_format);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ENOMEM;
+	}
+	if (0 != copy_from_user(pv4l2_format, (void __user *)arg, \
 					sizeof(struct v4l2_format))) {
-		POUT;
+		kfree(pv4l2_format);
+		kfree(pv4l2_pix_format);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	if (v4l2_format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
-		POUT;
+	if (pv4l2_format->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		kfree(pv4l2_format);
+		kfree(pv4l2_pix_format);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
 
-	memset(&v4l2_pix_format, 0, sizeof(struct v4l2_pix_format));
-	v4l2_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	memcpy(&(v4l2_format.fmt.pix), \
-			 &(easycap_format[peasycap->format_offset]\
-			.v4l2_format.fmt.pix), sizeof(v4l2_pix_format));
-	JOT(8, "user is told: %s\n", \
+	memset(pv4l2_pix_format, 0, sizeof(struct v4l2_pix_format));
+	pv4l2_format->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	memcpy(&pv4l2_format->fmt.pix, \
+			 &easycap_format[peasycap->format_offset]\
+			.v4l2_format.fmt.pix, sizeof(struct v4l2_pix_format));
+	JOM(8, "user is told: %s\n", \
 			&easycap_format[peasycap->format_offset].name[0]);
 
-	if (0 != copy_to_user((void __user *)arg, &v4l2_format, \
+	if (0 != copy_to_user((void __user *)arg, pv4l2_format, \
 					sizeof(struct v4l2_format))) {
-		POUT;
+		kfree(pv4l2_format);
+		kfree(pv4l2_pix_format);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
+	kfree(pv4l2_format);
+	kfree(pv4l2_pix_format);
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_TRY_FMT:
 case VIDIOC_S_FMT: {
-	static struct v4l2_format v4l2_format;
-	static struct v4l2_pix_format v4l2_pix_format;
-	static bool try;
-	static int best_format;
+	struct v4l2_format v4l2_format;
+	struct v4l2_pix_format v4l2_pix_format;
+	bool try;
+	int best_format;
 
 	if (VIDIOC_TRY_FMT == cmd) {
-		JOT(8, "VIDIOC_TRY_FMT\n");
+		JOM(8, "VIDIOC_TRY_FMT\n");
 		try = true;
 	} else {
-		JOT(8, "VIDIOC_S_FMT\n");
+		JOM(8, "VIDIOC_S_FMT\n");
 		try = false;
 	}
 
 	if (0 != copy_from_user(&v4l2_format, (void __user *)arg, \
 					sizeof(struct v4l2_format))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
@@ -1359,7 +1830,12 @@
 					v4l2_format.fmt.pix.field, \
 					try);
 	if (0 > best_format) {
-		JOT(8, "WARNING: adjust_format() returned %i\n", best_format);
+		if (-EBUSY == best_format) {
+			mutex_unlock(&easycap_dongle[kd].mutex_video);
+			return -EBUSY;
+		}
+		JOM(8, "WARNING: adjust_format() returned %i\n", best_format);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -ENOENT;
 	}
 /*...........................................................................*/
@@ -1368,29 +1844,29 @@
 
 	memcpy(&(v4l2_format.fmt.pix), &(easycap_format[best_format]\
 			.v4l2_format.fmt.pix), sizeof(v4l2_pix_format));
-	JOT(8, "user is told: %s\n", &easycap_format[best_format].name[0]);
+	JOM(8, "user is told: %s\n", &easycap_format[best_format].name[0]);
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_format, \
 					sizeof(struct v4l2_format))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_CROPCAP: {
-	static struct v4l2_cropcap v4l2_cropcap;
+	struct v4l2_cropcap v4l2_cropcap;
 
-	JOT(8, "VIDIOC_CROPCAP\n");
+	JOM(8, "VIDIOC_CROPCAP\n");
 
 	if (0 != copy_from_user(&v4l2_cropcap, (void __user *)arg, \
 					sizeof(struct v4l2_cropcap))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
 	if (v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
-		JOT(8, "v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
+		JOM(8, "v4l2_cropcap.type != V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
 
 	memset(&v4l2_cropcap, 0, sizeof(struct v4l2_cropcap));
 	v4l2_cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1405,11 +1881,11 @@
 	v4l2_cropcap.pixelaspect.numerator = 1;
 	v4l2_cropcap.pixelaspect.denominator = 1;
 
-	JOT(8, "user is told: %ix%i\n", peasycap->width, peasycap->height);
+	JOM(8, "user is told: %ix%i\n", peasycap->width, peasycap->height);
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_cropcap, \
 					sizeof(struct v4l2_cropcap))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
@@ -1417,13 +1893,15 @@
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_CROP:
 case VIDIOC_S_CROP: {
-	JOT(8, "VIDIOC_G_CROP|VIDIOC_S_CROP  unsupported\n");
+	JOM(8, "VIDIOC_G_CROP|VIDIOC_S_CROP  unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_QUERYSTD: {
-	JOT(8, "VIDIOC_QUERYSTD: " \
+	JOM(8, "VIDIOC_QUERYSTD: " \
 			"EasyCAP is incapable of detecting standard\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 	break;
 }
@@ -1436,16 +1914,16 @@
  */
 /*---------------------------------------------------------------------------*/
 case VIDIOC_ENUMSTD: {
-	static int last0 = -1, last1 = -1, last2 = -1, last3 = -1;
-	static struct v4l2_standard v4l2_standard;
-	static __u32 index;
-	static struct easycap_standard const *peasycap_standard;
+	int last0 = -1, last1 = -1, last2 = -1, last3 = -1;
+	struct v4l2_standard v4l2_standard;
+	__u32 index;
+	struct easycap_standard const *peasycap_standard;
 
-	JOT(8, "VIDIOC_ENUMSTD\n");
+	JOM(8, "VIDIOC_ENUMSTD\n");
 
 	if (0 != copy_from_user(&v4l2_standard, (void __user *)arg, \
 					sizeof(struct v4l2_standard))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	index = v4l2_standard.index;
@@ -1466,10 +1944,11 @@
 		peasycap_standard++;
 	}
 	if (0xFFFF == peasycap_standard->mask) {
-		JOT(8, "%i=index: exhausts standards\n", index);
+		JOM(8, "%i=index: exhausts standards\n", index);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
-	JOT(8, "%i=index: %s\n", index, \
+	JOM(8, "%i=index: %s\n", index, \
 				&(peasycap_standard->v4l2_standard.name[0]));
 	memcpy(&v4l2_standard, &(peasycap_standard->v4l2_standard), \
 					sizeof(struct v4l2_standard));
@@ -1478,87 +1957,101 @@
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_standard, \
 					sizeof(struct v4l2_standard))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_STD: {
-	static v4l2_std_id std_id;
-	static struct easycap_standard const *peasycap_standard;
+	v4l2_std_id std_id;
+	struct easycap_standard const *peasycap_standard;
 
-	JOT(8, "VIDIOC_G_STD\n");
+	JOM(8, "VIDIOC_G_STD\n");
+
+	if (0 > peasycap->standard_offset) {
+		JOM(8, "%i=peasycap->standard_offset\n", \
+					peasycap->standard_offset);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EBUSY;
+	}
 
 	if (0 != copy_from_user(&std_id, (void __user *)arg, \
 						sizeof(v4l2_std_id))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
 	peasycap_standard = &easycap_standard[peasycap->standard_offset];
 	std_id = peasycap_standard->v4l2_standard.id;
 
-	JOT(8, "user is told: %s\n", \
+	JOM(8, "user is told: %s\n", \
 				&peasycap_standard->v4l2_standard.name[0]);
 
 	if (0 != copy_to_user((void __user *)arg, &std_id, \
 						sizeof(v4l2_std_id))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_S_STD: {
-	static v4l2_std_id std_id;
-	static int rc;
+	v4l2_std_id std_id;
+	int rc;
 
-	JOT(8, "VIDIOC_S_STD\n");
+	JOM(8, "VIDIOC_S_STD\n");
 
 	if (0 != copy_from_user(&std_id, (void __user *)arg, \
 						sizeof(v4l2_std_id))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
+	JOM(8, "User requests standard: 0x%08X%08X\n", \
+		(int)((std_id & (((v4l2_std_id)0xFFFFFFFF) << 32)) >> 32), \
+		(int)(std_id & ((v4l2_std_id)0xFFFFFFFF)));
+
 	rc = adjust_standard(peasycap, std_id);
 	if (0 > rc) {
-		JOT(8, "WARNING: adjust_standard() returned %i\n", rc);
+		JOM(8, "WARNING: adjust_standard() returned %i\n", rc);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -ENOENT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_REQBUFS: {
-	static int nbuffers;
-	static struct v4l2_requestbuffers v4l2_requestbuffers;
+	int nbuffers;
+	struct v4l2_requestbuffers v4l2_requestbuffers;
 
-	JOT(8, "VIDIOC_REQBUFS\n");
+	JOM(8, "VIDIOC_REQBUFS\n");
 
 	if (0 != copy_from_user(&v4l2_requestbuffers, (void __user *)arg, \
 				sizeof(struct v4l2_requestbuffers))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	if (v4l2_requestbuffers.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (v4l2_requestbuffers.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
+	}
 	if (v4l2_requestbuffers.memory != V4L2_MEMORY_MMAP) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
 	nbuffers = v4l2_requestbuffers.count;
-	JOT(8, "                   User requests %i buffers ...\n", nbuffers);
+	JOM(8, "                   User requests %i buffers ...\n", nbuffers);
 	if (nbuffers < 2)
 		nbuffers = 2;
 	if (nbuffers > FRAME_BUFFER_MANY)
 		nbuffers = FRAME_BUFFER_MANY;
 	if (v4l2_requestbuffers.count == nbuffers) {
-		JOT(8, "                   ... agree to  %i buffers\n", \
+		JOM(8, "                   ... agree to  %i buffers\n", \
 								nbuffers);
 	} else {
-		JOT(8, "                  ... insist on  %i buffers\n", \
+		JOM(8, "                  ... insist on  %i buffers\n", \
 								nbuffers);
 		v4l2_requestbuffers.count = nbuffers;
 	}
@@ -1566,32 +2059,35 @@
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_requestbuffers, \
 				sizeof(struct v4l2_requestbuffers))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_QUERYBUF: {
-	static __u32 index;
-	static struct v4l2_buffer v4l2_buffer;
+	__u32 index;
+	struct v4l2_buffer v4l2_buffer;
 
-	JOT(8, "VIDIOC_QUERYBUF\n");
+	JOM(8, "VIDIOC_QUERYBUF\n");
 
 	if (peasycap->video_eof) {
-		JOT(8, "returning -1 because  %i=video_eof\n", \
+		JOM(8, "returning -EIO because  %i=video_eof\n", \
 							peasycap->video_eof);
-		return -1;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -EIO;
 	}
 
 	if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
 					sizeof(struct v4l2_buffer))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
+	}
 	index = v4l2_buffer.index;
 	if (index < 0 || index >= peasycap->frame_buffer_many)
 		return -EINVAL;
@@ -1602,49 +2098,55 @@
 	v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | \
 						peasycap->done[index] | \
 						peasycap->queued[index];
-	v4l2_buffer.field = peasycap->field;
+	v4l2_buffer.field = V4L2_FIELD_NONE;
 	v4l2_buffer.memory = V4L2_MEMORY_MMAP;
 	v4l2_buffer.m.offset = index * FRAME_BUFFER_SIZE;
 	v4l2_buffer.length = FRAME_BUFFER_SIZE;
 
-	JOT(16, "  %10i=index\n", v4l2_buffer.index);
-	JOT(16, "  0x%08X=type\n", v4l2_buffer.type);
-	JOT(16, "  %10i=bytesused\n", v4l2_buffer.bytesused);
-	JOT(16, "  0x%08X=flags\n", v4l2_buffer.flags);
-	JOT(16, "  %10i=field\n", v4l2_buffer.field);
-	JOT(16, "  %10li=timestamp.tv_usec\n", \
+	JOM(16, "  %10i=index\n", v4l2_buffer.index);
+	JOM(16, "  0x%08X=type\n", v4l2_buffer.type);
+	JOM(16, "  %10i=bytesused\n", v4l2_buffer.bytesused);
+	JOM(16, "  0x%08X=flags\n", v4l2_buffer.flags);
+	JOM(16, "  %10i=field\n", v4l2_buffer.field);
+	JOM(16, "  %10li=timestamp.tv_usec\n", \
 					 (long)v4l2_buffer.timestamp.tv_usec);
-	JOT(16, "  %10i=sequence\n", v4l2_buffer.sequence);
-	JOT(16, "  0x%08X=memory\n", v4l2_buffer.memory);
-	JOT(16, "  %10i=m.offset\n", v4l2_buffer.m.offset);
-	JOT(16, "  %10i=length\n", v4l2_buffer.length);
+	JOM(16, "  %10i=sequence\n", v4l2_buffer.sequence);
+	JOM(16, "  0x%08X=memory\n", v4l2_buffer.memory);
+	JOM(16, "  %10i=m.offset\n", v4l2_buffer.m.offset);
+	JOM(16, "  %10i=length\n", v4l2_buffer.length);
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
 					sizeof(struct v4l2_buffer))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_QBUF: {
-	static struct v4l2_buffer v4l2_buffer;
+	struct v4l2_buffer v4l2_buffer;
 
-	JOT(8, "VIDIOC_QBUF\n");
+	JOM(8, "VIDIOC_QBUF\n");
 
 	if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
 					sizeof(struct v4l2_buffer))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
-	if (v4l2_buffer.memory != V4L2_MEMORY_MMAP)
+	}
+	if (v4l2_buffer.memory != V4L2_MEMORY_MMAP) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
+	}
 	if (v4l2_buffer.index < 0 || \
-		 (v4l2_buffer.index >= peasycap->frame_buffer_many))
+		 (v4l2_buffer.index >= peasycap->frame_buffer_many)) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
+	}
 	v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED;
 
 	peasycap->done[v4l2_buffer.index]   = 0;
@@ -1652,11 +2154,11 @@
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
 					sizeof(struct v4l2_buffer))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	JOT(8, ".....   user queueing frame buffer %i\n", \
+	JOM(8, ".....   user queueing frame buffer %i\n", \
 						(int)v4l2_buffer.index);
 
 	peasycap->frame_lock = 0;
@@ -1667,36 +2169,60 @@
 case VIDIOC_DQBUF:
 	{
 #if defined(AUDIOTIME)
-	static struct signed_div_result sdr;
-	static long long int above, below, dnbydt, fudge, sll;
-	static unsigned long long int ull;
-	static struct timeval timeval0;
+	struct signed_div_result sdr;
+	long long int above, below, dnbydt, fudge, sll;
+	unsigned long long int ull;
+	struct timeval timeval8;
 	struct timeval timeval1;
 #endif /*AUDIOTIME*/
-	static struct timeval timeval, timeval2;
-	static int i, j;
-	static struct v4l2_buffer v4l2_buffer;
+	struct timeval timeval, timeval2;
+	int i, j;
+	struct v4l2_buffer v4l2_buffer;
+	int rcdq;
+	__u16 input;
 
-	JOT(8, "VIDIOC_DQBUF\n");
+	JOM(8, "VIDIOC_DQBUF\n");
 
 	if ((peasycap->video_idle) || (peasycap->video_eof)) {
-		JOT(8, "returning -EIO because  " \
+		JOM(8, "returning -EIO because  " \
 				"%i=video_idle  %i=video_eof\n", \
 				peasycap->video_idle, peasycap->video_eof);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EIO;
 	}
 
 	if (0 != copy_from_user(&v4l2_buffer, (void __user *)arg, \
 					sizeof(struct v4l2_buffer))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+	if (v4l2_buffer.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
+	}
+
+	if (true == peasycap->offerfields) {
+		/*-----------------------------------------------------------*/
+		/*
+		 *  IN ITS 50 "fps" MODE tvtime SEEMS ALWAYS TO REQUEST
+		 *  V4L2_FIELD_BOTTOM
+		*/
+		/*-----------------------------------------------------------*/
+		if (V4L2_FIELD_TOP == v4l2_buffer.field)
+			JOM(8, "user wants V4L2_FIELD_TOP\n");
+		else if (V4L2_FIELD_BOTTOM == v4l2_buffer.field)
+			JOM(8, "user wants V4L2_FIELD_BOTTOM\n");
+		else if (V4L2_FIELD_ANY == v4l2_buffer.field)
+			JOM(8, "user wants V4L2_FIELD_ANY\n");
+		else
+			JOM(8, "user wants V4L2_FIELD_...UNKNOWN: %i\n", \
+							v4l2_buffer.field);
+	}
 
 	if (!peasycap->video_isoc_streaming) {
-		JOT(16, "returning -EIO because video urbs not streaming\n");
+		JOM(16, "returning -EIO because video urbs not streaming\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EIO;
 	}
 /*---------------------------------------------------------------------------*/
@@ -1708,19 +2234,28 @@
 /*---------------------------------------------------------------------------*/
 
 	if (!peasycap->polled) {
-		if (-EIO == easycap_dqbuf(peasycap, 0))
-			return -EIO;
+		do {
+			rcdq = easycap_dqbuf(peasycap, 0);
+			if (-EIO == rcdq) {
+				JOM(8, "returning -EIO because " \
+						"dqbuf() returned -EIO\n");
+				mutex_unlock(&easycap_dongle[kd].mutex_video);
+				return -EIO;
+			}
+		} while (0 != rcdq);
 	} else {
-		if (peasycap->video_eof)
+		if (peasycap->video_eof) {
+			mutex_unlock(&easycap_dongle[kd].mutex_video);
 			return -EIO;
+		}
 	}
 	if (V4L2_BUF_FLAG_DONE != peasycap->done[peasycap->frame_read]) {
-		SAY("ERROR: V4L2_BUF_FLAG_DONE != 0x%08X\n", \
+		SAM("ERROR: V4L2_BUF_FLAG_DONE != 0x%08X\n", \
 					peasycap->done[peasycap->frame_read]);
 	}
 	peasycap->polled = 0;
 
-	if (!(isequence % 10)) {
+	if (!(peasycap->isequence % 10)) {
 		for (i = 0; i < 179; i++)
 			peasycap->merit[i] = peasycap->merit[i+1];
 		peasycap->merit[179] = merit_saa(peasycap->pusb_device);
@@ -1728,7 +2263,7 @@
 		for (i = 0; i < 180; i++)
 			j += peasycap->merit[i];
 		if (90 < j) {
-			SAY("easycap driver shutting down " \
+			SAM("easycap driver shutting down " \
 							"on condition blue\n");
 			peasycap->video_eof = 1; peasycap->audio_eof = 1;
 		}
@@ -1738,31 +2273,23 @@
 	v4l2_buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 	v4l2_buffer.bytesused = peasycap->frame_buffer_used;
 	v4l2_buffer.flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE;
-	v4l2_buffer.field =  peasycap->field;
-	if (V4L2_FIELD_ALTERNATE == v4l2_buffer.field)
-		v4l2_buffer.field = \
-				0x000F & (peasycap->\
-				frame_buffer[peasycap->frame_read][0].kount);
+	if (true == peasycap->offerfields)
+		v4l2_buffer.field = V4L2_FIELD_BOTTOM;
+	else
+		v4l2_buffer.field = V4L2_FIELD_NONE;
 	do_gettimeofday(&timeval);
 	timeval2 = timeval;
 
 #if defined(AUDIOTIME)
 	if (!peasycap->timeval0.tv_sec) {
-		timeval0 = timeval;
+		timeval8 = timeval;
 		timeval1 = timeval;
 		timeval2 = timeval;
 		dnbydt = 192000;
-
-		if (mutex_lock_interruptible(&(peasycap->mutex_timeval0)))
-			return -ERESTARTSYS;
-		peasycap->timeval0 = timeval0;
-		mutex_unlock(&(peasycap->mutex_timeval0));
+		peasycap->timeval0 = timeval8;
 	} else {
-		if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
-			return -ERESTARTSYS;
 		dnbydt = peasycap->dnbydt;
 		timeval1 = peasycap->timeval1;
-		mutex_unlock(&(peasycap->mutex_timeval1));
 		above = dnbydt * MICROSECONDS(timeval, timeval1);
 		below = 192000;
 		sdr = signed_div(above, below);
@@ -1774,72 +2301,76 @@
 		timeval2.tv_usec = sdr.remainder;
 		timeval2.tv_sec = timeval1.tv_sec + sdr.quotient;
 	}
-	if (!(isequence % 500)) {
+	if (!(peasycap->isequence % 500)) {
 		fudge = ((long long int)(1000000)) * \
 				((long long int)(timeval.tv_sec - \
 						timeval2.tv_sec)) + \
 				(long long int)(timeval.tv_usec - \
-				timeval2.tv_usec);
+						timeval2.tv_usec);
 		sdr = signed_div(fudge, 1000);
 		sll = sdr.quotient;
 		ull = sdr.remainder;
 
-		SAY("%5lli.%-3lli=ms timestamp fudge\n", sll, ull);
+		SAM("%5lli.%-3lli=ms timestamp fudge\n", sll, ull);
 	}
 #endif /*AUDIOTIME*/
 
 	v4l2_buffer.timestamp = timeval2;
-	v4l2_buffer.sequence = isequence++;
+	v4l2_buffer.sequence = peasycap->isequence++;
 	v4l2_buffer.memory = V4L2_MEMORY_MMAP;
 	v4l2_buffer.m.offset = v4l2_buffer.index * FRAME_BUFFER_SIZE;
 	v4l2_buffer.length = FRAME_BUFFER_SIZE;
 
-	JOT(16, "  %10i=index\n", v4l2_buffer.index);
-	JOT(16, "  0x%08X=type\n", v4l2_buffer.type);
-	JOT(16, "  %10i=bytesused\n", v4l2_buffer.bytesused);
-	JOT(16, "  0x%08X=flags\n", v4l2_buffer.flags);
-	JOT(16, "  %10i=field\n", v4l2_buffer.field);
-	JOT(16, "  %10li=timestamp.tv_usec\n", \
+	JOM(16, "  %10i=index\n", v4l2_buffer.index);
+	JOM(16, "  0x%08X=type\n", v4l2_buffer.type);
+	JOM(16, "  %10i=bytesused\n", v4l2_buffer.bytesused);
+	JOM(16, "  0x%08X=flags\n", v4l2_buffer.flags);
+	JOM(16, "  %10i=field\n", v4l2_buffer.field);
+	JOM(16, "  %10li=timestamp.tv_sec\n", \
+					(long)v4l2_buffer.timestamp.tv_sec);
+	JOM(16, "  %10li=timestamp.tv_usec\n", \
 					(long)v4l2_buffer.timestamp.tv_usec);
-	JOT(16, "  %10i=sequence\n", v4l2_buffer.sequence);
-	JOT(16, "  0x%08X=memory\n", v4l2_buffer.memory);
-	JOT(16, "  %10i=m.offset\n", v4l2_buffer.m.offset);
-	JOT(16, "  %10i=length\n", v4l2_buffer.length);
+	JOM(16, "  %10i=sequence\n", v4l2_buffer.sequence);
+	JOM(16, "  0x%08X=memory\n", v4l2_buffer.memory);
+	JOM(16, "  %10i=m.offset\n", v4l2_buffer.m.offset);
+	JOM(16, "  %10i=length\n", v4l2_buffer.length);
 
 	if (0 != copy_to_user((void __user *)arg, &v4l2_buffer, \
 						sizeof(struct v4l2_buffer))) {
-		POUT;
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	JOT(8, "..... user is offered frame buffer %i\n", \
+	input = peasycap->frame_buffer[peasycap->frame_read][0].input;
+	if (0x08 & input) {
+		JOM(8, "user is offered frame buffer %i, input %i\n", \
+					peasycap->frame_read, (0x07 & input));
+	} else {
+		JOM(8, "user is offered frame buffer %i\n", \
 							peasycap->frame_read);
+	}
 	peasycap->frame_lock = 1;
+	JOM(8, "%i=peasycap->frame_fill\n", peasycap->frame_fill);
 	if (peasycap->frame_read == peasycap->frame_fill) {
 		if (peasycap->frame_lock) {
-			JOT(8, "ERROR:  filling frame buffer " \
+			JOM(8, "WORRY:  filling frame buffer " \
 						"while offered to user\n");
 		}
 	}
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
-/*---------------------------------------------------------------------------*/
-/*
- *  AUDIO URBS HAVE ALREADY BEEN SUBMITTED WHEN THIS COMMAND IS RECEIVED;
- *  VIDEO URBS HAVE NOT.
- */
-/*---------------------------------------------------------------------------*/
 case VIDIOC_STREAMON: {
-	static int i;
+	int i;
 
-	JOT(8, "VIDIOC_STREAMON\n");
+	JOM(8, "VIDIOC_STREAMON\n");
 
-	isequence = 0;
+	peasycap->isequence = 0;
 	for (i = 0; i < 180; i++)
 		peasycap->merit[i] = 0;
 	if ((struct usb_device *)NULL == peasycap->pusb_device) {
-		SAY("ERROR: peasycap->pusb_device is NULL\n");
+		SAM("ERROR: peasycap->pusb_device is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 	submit_video_urbs(peasycap);
@@ -1851,10 +2382,11 @@
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_STREAMOFF: {
-	JOT(8, "VIDIOC_STREAMOFF\n");
+	JOM(8, "VIDIOC_STREAMOFF\n");
 
 	if ((struct usb_device *)NULL == peasycap->pusb_device) {
-		SAY("ERROR: peasycap->pusb_device is NULL\n");
+		SAM("ERROR: peasycap->pusb_device is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
@@ -1866,7 +2398,7 @@
  *  THE USERSPACE PROGRAM, E.G. mplayer, MAY HANG ON EXIT.   BEWARE.
  */
 /*---------------------------------------------------------------------------*/
-	JOT(8, "calling wake_up on wq_video and wq_audio\n");
+	JOM(8, "calling wake_up on wq_video and wq_audio\n");
 	wake_up_interruptible(&(peasycap->wq_video));
 	wake_up_interruptible(&(peasycap->wq_audio));
 /*---------------------------------------------------------------------------*/
@@ -1874,111 +2406,200 @@
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_PARM: {
-	static struct v4l2_streamparm v4l2_streamparm;
+	struct v4l2_streamparm *pv4l2_streamparm;
 
-	JOT(8, "VIDIOC_G_PARM\n");
-
-	if (0 != copy_from_user(&v4l2_streamparm, (void __user *)arg, \
+	JOM(8, "VIDIOC_G_PARM\n");
+	pv4l2_streamparm = kzalloc(sizeof(struct v4l2_streamparm), GFP_KERNEL);
+	if (!pv4l2_streamparm) {
+		SAM("ERROR: out of memory\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ENOMEM;
+	}
+	if (0 != copy_from_user(pv4l2_streamparm, (void __user *)arg, \
 					sizeof(struct v4l2_streamparm))) {
-		POUT;
+		kfree(pv4l2_streamparm);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
 
-	if (v4l2_streamparm.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
-		POUT;
+	if (pv4l2_streamparm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		kfree(pv4l2_streamparm);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EINVAL;
 	}
-	v4l2_streamparm.parm.capture.capability = 0;
-	v4l2_streamparm.parm.capture.capturemode = 0;
-	v4l2_streamparm.parm.capture.timeperframe.numerator = 1;
-	v4l2_streamparm.parm.capture.timeperframe.denominator = 30;
-	v4l2_streamparm.parm.capture.readbuffers = peasycap->frame_buffer_many;
-	v4l2_streamparm.parm.capture.extendedmode = 0;
-	if (0 != copy_to_user((void __user *)arg, &v4l2_streamparm, \
+	pv4l2_streamparm->parm.capture.capability = 0;
+	pv4l2_streamparm->parm.capture.capturemode = 0;
+	pv4l2_streamparm->parm.capture.timeperframe.numerator = 1;
+
+	if (peasycap->fps) {
+		pv4l2_streamparm->parm.capture.timeperframe.\
+						denominator = peasycap->fps;
+	} else {
+		if (true == peasycap->ntsc) {
+			pv4l2_streamparm->parm.capture.timeperframe.\
+						denominator = 30;
+		} else {
+			pv4l2_streamparm->parm.capture.timeperframe.\
+						denominator = 25;
+		}
+	}
+
+	pv4l2_streamparm->parm.capture.readbuffers = \
+						peasycap->frame_buffer_many;
+	pv4l2_streamparm->parm.capture.extendedmode = 0;
+	if (0 != copy_to_user((void __user *)arg, pv4l2_streamparm, \
 					sizeof(struct v4l2_streamparm))) {
-		POUT;
+		kfree(pv4l2_streamparm);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
 		return -EFAULT;
 	}
+	kfree(pv4l2_streamparm);
 	break;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_S_PARM: {
-	JOT(8, "VIDIOC_S_PARM unsupported\n");
+	JOM(8, "VIDIOC_S_PARM unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_AUDIO: {
-	JOT(8, "VIDIOC_G_AUDIO unsupported\n");
+	JOM(8, "VIDIOC_G_AUDIO unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_S_AUDIO: {
-	JOT(8, "VIDIOC_S_AUDIO unsupported\n");
+	JOM(8, "VIDIOC_S_AUDIO unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_S_TUNER: {
-	JOT(8, "VIDIOC_S_TUNER unsupported\n");
+	JOM(8, "VIDIOC_S_TUNER unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_FBUF:
 case VIDIOC_S_FBUF:
 case VIDIOC_OVERLAY: {
-	JOT(8, "VIDIOC_G_FBUF|VIDIOC_S_FBUF|VIDIOC_OVERLAY unsupported\n");
+	JOM(8, "VIDIOC_G_FBUF|VIDIOC_S_FBUF|VIDIOC_OVERLAY unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 case VIDIOC_G_TUNER: {
-	JOT(8, "VIDIOC_G_TUNER unsupported\n");
+	JOM(8, "VIDIOC_G_TUNER unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 case VIDIOC_G_FREQUENCY:
 case VIDIOC_S_FREQUENCY: {
-	JOT(8, "VIDIOC_G_FREQUENCY|VIDIOC_S_FREQUENCY unsupported\n");
+	JOM(8, "VIDIOC_G_FREQUENCY|VIDIOC_S_FREQUENCY unsupported\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -EINVAL;
 }
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 default: {
-	JOT(8, "ERROR: unrecognized V4L2 IOCTL command: 0x%08X\n", cmd);
-	explain_ioctl(cmd);
-	POUT;
+	JOM(8, "ERROR: unrecognized V4L2 IOCTL command: 0x%08X\n", cmd);
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
 	return -ENOIOCTLCMD;
 }
 }
+mutex_unlock(&easycap_dongle[kd].mutex_video);
+JOM(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
 return 0;
 }
-
-long easycap_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct inode *inode = file->f_dentry->d_inode;
-	long ret;
-
-	lock_kernel();
-	ret = easycap_ioctl_bkl(inode, file, cmd, arg);
-	unlock_kernel();
-
-	return ret;
+/*****************************************************************************/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if ((defined(EASYCAP_IS_VIDEODEV_CLIENT)) || \
+	(defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)))
+long
+easysnd_ioctl_noinode(struct file *file, unsigned int cmd, unsigned long arg) {
+	return (long)easysnd_ioctl((struct inode *)NULL, file, cmd, arg);
 }
-
-/*--------------------------------------------------------------------------*/
-static int easysnd_ioctl_bkl(struct inode *inode, struct file *file,
-			     unsigned int cmd, unsigned long arg)
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT||EASYCAP_NEEDS_UNLOCKED_IOCTL*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+int
+easysnd_ioctl(struct inode *inode, struct file *file,
+					unsigned int cmd, unsigned long arg)
 {
 struct easycap *peasycap;
 struct usb_device *p;
+int kd;
 
+if (NULL == file) {
+	SAY("ERROR:  file is NULL\n");
+	return -ERESTARTSYS;
+}
 peasycap = file->private_data;
 if (NULL == peasycap) {
 	SAY("ERROR:  peasycap is NULL.\n");
-	return -1;
+	return -EFAULT;
+}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap\n");
+	return -EFAULT;
 }
 p = peasycap->pusb_device;
+if (NULL == p) {
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
+	return -EFAULT;
+}
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+	if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_audio)) {
+		SAY("ERROR: cannot lock easycap_dongle[%i].mutex_audio\n", kd);
+		return -ERESTARTSYS;
+	}
+	JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
+/*---------------------------------------------------------------------------*/
+/*
+ *  MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER peasycap,
+ *  IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+ *  IF NECESSARY, BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+	if (kd != isdongle(peasycap))
+		return -ERESTARTSYS;
+	if (NULL == file) {
+		SAY("ERROR:  file is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -ERESTARTSYS;
+	}
+	peasycap = file->private_data;
+	if (NULL == peasycap) {
+		SAY("ERROR:  peasycap is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -ERESTARTSYS;
+	}
+	if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+		SAY("ERROR: bad peasycap\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -EFAULT;
+	}
+	p = peasycap->pusb_device;
+	if (NULL == peasycap->pusb_device) {
+		SAM("ERROR: peasycap->pusb_device is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -ERESTARTSYS;
+	}
+} else {
+/*---------------------------------------------------------------------------*/
+/*
+ *  IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap BEFORE THE
+ *  ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL HAVE FAILED.  BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+	return -ERESTARTSYS;
+}
 /*---------------------------------------------------------------------------*/
 switch (cmd) {
 case SNDCTL_DSP_GETCAPS: {
 	int caps;
-	JOT(8, "SNDCTL_DSP_GETCAPS\n");
+	JOM(8, "SNDCTL_DSP_GETCAPS\n");
 
 #if defined(UPSAMPLE)
 	if (true == peasycap->microphone)
@@ -1992,13 +2613,15 @@
 		caps = 0x04400000;
 #endif /*UPSAMPLE*/
 
-	if (0 != copy_to_user((void __user *)arg, &caps, sizeof(int)))
+	if (0 != copy_to_user((void __user *)arg, &caps, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
+	}
 	break;
 }
 case SNDCTL_DSP_GETFMTS: {
 	int incoming;
-	JOT(8, "SNDCTL_DSP_GETFMTS\n");
+	JOM(8, "SNDCTL_DSP_GETFMTS\n");
 
 #if defined(UPSAMPLE)
 	if (true == peasycap->microphone)
@@ -2012,16 +2635,20 @@
 		incoming = AFMT_S16_LE;
 #endif /*UPSAMPLE*/
 
-	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
+	}
 	break;
 }
 case SNDCTL_DSP_SETFMT: {
 	int incoming, outgoing;
-	JOT(8, "SNDCTL_DSP_SETFMT\n");
-	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+	JOM(8, "SNDCTL_DSP_SETFMT\n");
+	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
-	JOT(8, "........... %i=incoming\n", incoming);
+	}
+	JOM(8, "........... %i=incoming\n", incoming);
 
 #if defined(UPSAMPLE)
 	if (true == peasycap->microphone)
@@ -2036,22 +2663,27 @@
 #endif /*UPSAMPLE*/
 
 	if (incoming != outgoing) {
-		JOT(8, "........... %i=outgoing\n", outgoing);
-		JOT(8, "        cf. %i=AFMT_S16_LE\n", AFMT_S16_LE);
-		JOT(8, "        cf. %i=AFMT_U8\n", AFMT_U8);
+		JOM(8, "........... %i=outgoing\n", outgoing);
+		JOM(8, "        cf. %i=AFMT_S16_LE\n", AFMT_S16_LE);
+		JOM(8, "        cf. %i=AFMT_U8\n", AFMT_U8);
 		if (0 != copy_to_user((void __user *)arg, &outgoing, \
-								sizeof(int)))
+								sizeof(int))) {
+			mutex_unlock(&easycap_dongle[kd].mutex_audio);
 			return -EFAULT;
+		}
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EINVAL ;
 	}
 	break;
 }
 case SNDCTL_DSP_STEREO: {
 	int incoming;
-	JOT(8, "SNDCTL_DSP_STEREO\n");
-	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+	JOM(8, "SNDCTL_DSP_STEREO\n");
+	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
-	JOT(8, "........... %i=incoming\n", incoming);
+	}
+	JOM(8, "........... %i=incoming\n", incoming);
 
 #if defined(UPSAMPLE)
 	if (true == peasycap->microphone)
@@ -2065,16 +2697,20 @@
 		incoming = 1;
 #endif /*UPSAMPLE*/
 
-	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
+	}
 	break;
 }
 case SNDCTL_DSP_SPEED: {
 	int incoming;
-	JOT(8, "SNDCTL_DSP_SPEED\n");
-	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+	JOM(8, "SNDCTL_DSP_SPEED\n");
+	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
-	JOT(8, "........... %i=incoming\n", incoming);
+	}
+	JOM(8, "........... %i=incoming\n", incoming);
 
 #if defined(UPSAMPLE)
 	if (true == peasycap->microphone)
@@ -2088,29 +2724,37 @@
 		incoming = 48000;
 #endif /*UPSAMPLE*/
 
-	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
+	}
 	break;
 }
 case SNDCTL_DSP_GETTRIGGER: {
 	int incoming;
-	JOT(8, "SNDCTL_DSP_GETTRIGGER\n");
-	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+	JOM(8, "SNDCTL_DSP_GETTRIGGER\n");
+	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
-	JOT(8, "........... %i=incoming\n", incoming);
+	}
+	JOM(8, "........... %i=incoming\n", incoming);
 
 	incoming = PCM_ENABLE_INPUT;
-	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
+	}
 	break;
 }
 case SNDCTL_DSP_SETTRIGGER: {
 	int incoming;
-	JOT(8, "SNDCTL_DSP_SETTRIGGER\n");
-	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+	JOM(8, "SNDCTL_DSP_SETTRIGGER\n");
+	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
-	JOT(8, "........... %i=incoming\n", incoming);
-	JOT(8, "........... cf 0x%x=PCM_ENABLE_INPUT " \
+	}
+	JOM(8, "........... %i=incoming\n", incoming);
+	JOM(8, "........... cf 0x%x=PCM_ENABLE_INPUT " \
 				"0x%x=PCM_ENABLE_OUTPUT\n", \
 					PCM_ENABLE_INPUT, PCM_ENABLE_OUTPUT);
 	;
@@ -2121,19 +2765,23 @@
 }
 case SNDCTL_DSP_GETBLKSIZE: {
 	int incoming;
-	JOT(8, "SNDCTL_DSP_GETBLKSIZE\n");
-	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int)))
+	JOM(8, "SNDCTL_DSP_GETBLKSIZE\n");
+	if (0 != copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
-	JOT(8, "........... %i=incoming\n", incoming);
+	}
+	JOM(8, "........... %i=incoming\n", incoming);
 	incoming = peasycap->audio_bytes_per_fragment;
-	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int)))
+	if (0 != copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
+	}
 	break;
 }
 case SNDCTL_DSP_GETISPACE: {
 	struct audio_buf_info audio_buf_info;
 
-	JOT(8, "SNDCTL_DSP_GETISPACE\n");
+	JOM(8, "SNDCTL_DSP_GETISPACE\n");
 
 	audio_buf_info.bytes      = peasycap->audio_bytes_per_fragment;
 	audio_buf_info.fragments  = 1;
@@ -2141,555 +2789,31 @@
 	audio_buf_info.fragstotal = 0;
 
 	if (0 != copy_to_user((void __user *)arg, &audio_buf_info, \
-								sizeof(int)))
+								sizeof(int))) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
+	}
 	break;
 }
+case 0x00005401:
+case 0x00005402:
+case 0x00005403:
+case 0x00005404:
+case 0x00005405:
+case 0x00005406: {
+	JOM(8, "SNDCTL_TMR_...: 0x%08X unsupported\n", cmd);
+	mutex_unlock(&easycap_dongle[kd].mutex_audio);
+	return -ENOIOCTLCMD;
+}
 default: {
-	JOT(8, "ERROR: unrecognized DSP IOCTL command: 0x%08X\n", cmd);
-	POUT;
+	JOM(8, "ERROR: unrecognized DSP IOCTL command: 0x%08X\n", cmd);
+	mutex_unlock(&easycap_dongle[kd].mutex_audio);
 	return -ENOIOCTLCMD;
 }
 }
+mutex_unlock(&easycap_dongle[kd].mutex_audio);
 return 0;
 }
-
-long easysnd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct inode *inode = file->f_dentry->d_inode;
-	long ret;
-
-	lock_kernel();
-	ret = easysnd_ioctl_bkl(inode, file, cmd, arg);
-	unlock_kernel();
-
-	return ret;
-}
-
 /*****************************************************************************/
-int explain_ioctl(__u32 wot)
-{
-int k;
-/*---------------------------------------------------------------------------*/
-/*
- *  THE DATA FOR THE ARRAY mess BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
- *  SHELL SCRIPT:
- *  #
- *  cat /usr/src/linux-headers-`uname -r`/include/linux/videodev2.h | \
- *     grep "^#define VIDIOC_" - | grep -v "_OLD" - | \
- *     sed -e "s,_IO.*$,,;p" | sed -e "N;s,\n,, " | \
- *     sed -e "s/^#define /  {/;s/#define /, \"/;s/$/\"},/" | \
- *     sed -e "s,	,,g;s, ,,g" >ioctl.tmp
- *  echo "{0xFFFFFFFF,\"\"}" >>ioctl.tmp
- *  exit 0
- *  #
- * AND REINSTATING THE EXCISED "_OLD" CASES WERE LATER MANUALLY.
- *
- * THE DATA FOR THE ARRAY mess1 BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
- * SHELL SCRIPT:
- *  cat /usr/src/linux-headers-`uname -r`/include/linux/videodev.h | \
- *     grep "^#define VIDIOC" - | grep -v "_OLD" - | \
- *     sed -e "s,_IO.*$,,;p" | sed -e "N;s,\n,, " | \
- *     sed -e "s/^#define /  {/;s/#define /, \"/;s/$/\"},/" | \
- *     sed -e "s,   ,,g;s, ,,g" >ioctl.tmp
- *  echo "{0xFFFFFFFF,\"\"}" >>ioctl.tmp
- *  exit 0
- *  #
- */
-/*---------------------------------------------------------------------------*/
-static struct mess {
-	__u32 command;
-	char  name[64];
-} mess[] = {
-#if defined(VIDIOC_QUERYCAP)
-{VIDIOC_QUERYCAP, "VIDIOC_QUERYCAP"},
-#endif
-#if defined(VIDIOC_RESERVED)
-{VIDIOC_RESERVED, "VIDIOC_RESERVED"},
-#endif
-#if defined(VIDIOC_ENUM_FMT)
-{VIDIOC_ENUM_FMT, "VIDIOC_ENUM_FMT"},
-#endif
-#if defined(VIDIOC_G_FMT)
-{VIDIOC_G_FMT, "VIDIOC_G_FMT"},
-#endif
-#if defined(VIDIOC_S_FMT)
-{VIDIOC_S_FMT, "VIDIOC_S_FMT"},
-#endif
-#if defined(VIDIOC_REQBUFS)
-{VIDIOC_REQBUFS, "VIDIOC_REQBUFS"},
-#endif
-#if defined(VIDIOC_QUERYBUF)
-{VIDIOC_QUERYBUF, "VIDIOC_QUERYBUF"},
-#endif
-#if defined(VIDIOC_G_FBUF)
-{VIDIOC_G_FBUF, "VIDIOC_G_FBUF"},
-#endif
-#if defined(VIDIOC_S_FBUF)
-{VIDIOC_S_FBUF, "VIDIOC_S_FBUF"},
-#endif
-#if defined(VIDIOC_OVERLAY)
-{VIDIOC_OVERLAY, "VIDIOC_OVERLAY"},
-#endif
-#if defined(VIDIOC_QBUF)
-{VIDIOC_QBUF, "VIDIOC_QBUF"},
-#endif
-#if defined(VIDIOC_DQBUF)
-{VIDIOC_DQBUF, "VIDIOC_DQBUF"},
-#endif
-#if defined(VIDIOC_STREAMON)
-{VIDIOC_STREAMON, "VIDIOC_STREAMON"},
-#endif
-#if defined(VIDIOC_STREAMOFF)
-{VIDIOC_STREAMOFF, "VIDIOC_STREAMOFF"},
-#endif
-#if defined(VIDIOC_G_PARM)
-{VIDIOC_G_PARM, "VIDIOC_G_PARM"},
-#endif
-#if defined(VIDIOC_S_PARM)
-{VIDIOC_S_PARM, "VIDIOC_S_PARM"},
-#endif
-#if defined(VIDIOC_G_STD)
-{VIDIOC_G_STD, "VIDIOC_G_STD"},
-#endif
-#if defined(VIDIOC_S_STD)
-{VIDIOC_S_STD, "VIDIOC_S_STD"},
-#endif
-#if defined(VIDIOC_ENUMSTD)
-{VIDIOC_ENUMSTD, "VIDIOC_ENUMSTD"},
-#endif
-#if defined(VIDIOC_ENUMINPUT)
-{VIDIOC_ENUMINPUT, "VIDIOC_ENUMINPUT"},
-#endif
-#if defined(VIDIOC_G_CTRL)
-{VIDIOC_G_CTRL, "VIDIOC_G_CTRL"},
-#endif
-#if defined(VIDIOC_S_CTRL)
-{VIDIOC_S_CTRL, "VIDIOC_S_CTRL"},
-#endif
-#if defined(VIDIOC_G_TUNER)
-{VIDIOC_G_TUNER, "VIDIOC_G_TUNER"},
-#endif
-#if defined(VIDIOC_S_TUNER)
-{VIDIOC_S_TUNER, "VIDIOC_S_TUNER"},
-#endif
-#if defined(VIDIOC_G_AUDIO)
-{VIDIOC_G_AUDIO, "VIDIOC_G_AUDIO"},
-#endif
-#if defined(VIDIOC_S_AUDIO)
-{VIDIOC_S_AUDIO, "VIDIOC_S_AUDIO"},
-#endif
-#if defined(VIDIOC_QUERYCTRL)
-{VIDIOC_QUERYCTRL, "VIDIOC_QUERYCTRL"},
-#endif
-#if defined(VIDIOC_QUERYMENU)
-{VIDIOC_QUERYMENU, "VIDIOC_QUERYMENU"},
-#endif
-#if defined(VIDIOC_G_INPUT)
-{VIDIOC_G_INPUT, "VIDIOC_G_INPUT"},
-#endif
-#if defined(VIDIOC_S_INPUT)
-{VIDIOC_S_INPUT, "VIDIOC_S_INPUT"},
-#endif
-#if defined(VIDIOC_G_OUTPUT)
-{VIDIOC_G_OUTPUT, "VIDIOC_G_OUTPUT"},
-#endif
-#if defined(VIDIOC_S_OUTPUT)
-{VIDIOC_S_OUTPUT, "VIDIOC_S_OUTPUT"},
-#endif
-#if defined(VIDIOC_ENUMOUTPUT)
-{VIDIOC_ENUMOUTPUT, "VIDIOC_ENUMOUTPUT"},
-#endif
-#if defined(VIDIOC_G_AUDOUT)
-{VIDIOC_G_AUDOUT, "VIDIOC_G_AUDOUT"},
-#endif
-#if defined(VIDIOC_S_AUDOUT)
-{VIDIOC_S_AUDOUT, "VIDIOC_S_AUDOUT"},
-#endif
-#if defined(VIDIOC_G_MODULATOR)
-{VIDIOC_G_MODULATOR, "VIDIOC_G_MODULATOR"},
-#endif
-#if defined(VIDIOC_S_MODULATOR)
-{VIDIOC_S_MODULATOR, "VIDIOC_S_MODULATOR"},
-#endif
-#if defined(VIDIOC_G_FREQUENCY)
-{VIDIOC_G_FREQUENCY, "VIDIOC_G_FREQUENCY"},
-#endif
-#if defined(VIDIOC_S_FREQUENCY)
-{VIDIOC_S_FREQUENCY, "VIDIOC_S_FREQUENCY"},
-#endif
-#if defined(VIDIOC_CROPCAP)
-{VIDIOC_CROPCAP, "VIDIOC_CROPCAP"},
-#endif
-#if defined(VIDIOC_G_CROP)
-{VIDIOC_G_CROP, "VIDIOC_G_CROP"},
-#endif
-#if defined(VIDIOC_S_CROP)
-{VIDIOC_S_CROP, "VIDIOC_S_CROP"},
-#endif
-#if defined(VIDIOC_G_JPEGCOMP)
-{VIDIOC_G_JPEGCOMP, "VIDIOC_G_JPEGCOMP"},
-#endif
-#if defined(VIDIOC_S_JPEGCOMP)
-{VIDIOC_S_JPEGCOMP, "VIDIOC_S_JPEGCOMP"},
-#endif
-#if defined(VIDIOC_QUERYSTD)
-{VIDIOC_QUERYSTD, "VIDIOC_QUERYSTD"},
-#endif
-#if defined(VIDIOC_TRY_FMT)
-{VIDIOC_TRY_FMT, "VIDIOC_TRY_FMT"},
-#endif
-#if defined(VIDIOC_ENUMAUDIO)
-{VIDIOC_ENUMAUDIO, "VIDIOC_ENUMAUDIO"},
-#endif
-#if defined(VIDIOC_ENUMAUDOUT)
-{VIDIOC_ENUMAUDOUT, "VIDIOC_ENUMAUDOUT"},
-#endif
-#if defined(VIDIOC_G_PRIORITY)
-{VIDIOC_G_PRIORITY, "VIDIOC_G_PRIORITY"},
-#endif
-#if defined(VIDIOC_S_PRIORITY)
-{VIDIOC_S_PRIORITY, "VIDIOC_S_PRIORITY"},
-#endif
-#if defined(VIDIOC_G_SLICED_VBI_CAP)
-{VIDIOC_G_SLICED_VBI_CAP, "VIDIOC_G_SLICED_VBI_CAP"},
-#endif
-#if defined(VIDIOC_LOG_STATUS)
-{VIDIOC_LOG_STATUS, "VIDIOC_LOG_STATUS"},
-#endif
-#if defined(VIDIOC_G_EXT_CTRLS)
-{VIDIOC_G_EXT_CTRLS, "VIDIOC_G_EXT_CTRLS"},
-#endif
-#if defined(VIDIOC_S_EXT_CTRLS)
-{VIDIOC_S_EXT_CTRLS, "VIDIOC_S_EXT_CTRLS"},
-#endif
-#if defined(VIDIOC_TRY_EXT_CTRLS)
-{VIDIOC_TRY_EXT_CTRLS, "VIDIOC_TRY_EXT_CTRLS"},
-#endif
-#if defined(VIDIOC_ENUM_FRAMESIZES)
-{VIDIOC_ENUM_FRAMESIZES, "VIDIOC_ENUM_FRAMESIZES"},
-#endif
-#if defined(VIDIOC_ENUM_FRAMEINTERVALS)
-{VIDIOC_ENUM_FRAMEINTERVALS, "VIDIOC_ENUM_FRAMEINTERVALS"},
-#endif
-#if defined(VIDIOC_G_ENC_INDEX)
-{VIDIOC_G_ENC_INDEX, "VIDIOC_G_ENC_INDEX"},
-#endif
-#if defined(VIDIOC_ENCODER_CMD)
-{VIDIOC_ENCODER_CMD, "VIDIOC_ENCODER_CMD"},
-#endif
-#if defined(VIDIOC_TRY_ENCODER_CMD)
-{VIDIOC_TRY_ENCODER_CMD, "VIDIOC_TRY_ENCODER_CMD"},
-#endif
-#if defined(VIDIOC_G_CHIP_IDENT)
-{VIDIOC_G_CHIP_IDENT, "VIDIOC_G_CHIP_IDENT"},
-#endif
 
-#if defined(VIDIOC_OVERLAY_OLD)
-{VIDIOC_OVERLAY_OLD, "VIDIOC_OVERLAY_OLD"},
-#endif
-#if defined(VIDIOC_S_PARM_OLD)
-{VIDIOC_S_PARM_OLD, "VIDIOC_S_PARM_OLD"},
-#endif
-#if defined(VIDIOC_S_CTRL_OLD)
-{VIDIOC_S_CTRL_OLD, "VIDIOC_S_CTRL_OLD"},
-#endif
-#if defined(VIDIOC_G_AUDIO_OLD)
-{VIDIOC_G_AUDIO_OLD, "VIDIOC_G_AUDIO_OLD"},
-#endif
-#if defined(VIDIOC_G_AUDOUT_OLD)
-{VIDIOC_G_AUDOUT_OLD, "VIDIOC_G_AUDOUT_OLD"},
-#endif
-#if defined(VIDIOC_CROPCAP_OLD)
-{VIDIOC_CROPCAP_OLD, "VIDIOC_CROPCAP_OLD"},
-#endif
-{0xFFFFFFFF, ""}
-};
 
-static struct mess mess1[] = \
-{
-#if defined(VIDIOCGCAP)
-{VIDIOCGCAP, "VIDIOCGCAP"},
-#endif
-#if defined(VIDIOCGCHAN)
-{VIDIOCGCHAN, "VIDIOCGCHAN"},
-#endif
-#if defined(VIDIOCSCHAN)
-{VIDIOCSCHAN, "VIDIOCSCHAN"},
-#endif
-#if defined(VIDIOCGTUNER)
-{VIDIOCGTUNER, "VIDIOCGTUNER"},
-#endif
-#if defined(VIDIOCSTUNER)
-{VIDIOCSTUNER, "VIDIOCSTUNER"},
-#endif
-#if defined(VIDIOCGPICT)
-{VIDIOCGPICT, "VIDIOCGPICT"},
-#endif
-#if defined(VIDIOCSPICT)
-{VIDIOCSPICT, "VIDIOCSPICT"},
-#endif
-#if defined(VIDIOCCAPTURE)
-{VIDIOCCAPTURE, "VIDIOCCAPTURE"},
-#endif
-#if defined(VIDIOCGWIN)
-{VIDIOCGWIN, "VIDIOCGWIN"},
-#endif
-#if defined(VIDIOCSWIN)
-{VIDIOCSWIN, "VIDIOCSWIN"},
-#endif
-#if defined(VIDIOCGFBUF)
-{VIDIOCGFBUF, "VIDIOCGFBUF"},
-#endif
-#if defined(VIDIOCSFBUF)
-{VIDIOCSFBUF, "VIDIOCSFBUF"},
-#endif
-#if defined(VIDIOCKEY)
-{VIDIOCKEY, "VIDIOCKEY"},
-#endif
-#if defined(VIDIOCGFREQ)
-{VIDIOCGFREQ, "VIDIOCGFREQ"},
-#endif
-#if defined(VIDIOCSFREQ)
-{VIDIOCSFREQ, "VIDIOCSFREQ"},
-#endif
-#if defined(VIDIOCGAUDIO)
-{VIDIOCGAUDIO, "VIDIOCGAUDIO"},
-#endif
-#if defined(VIDIOCSAUDIO)
-{VIDIOCSAUDIO, "VIDIOCSAUDIO"},
-#endif
-#if defined(VIDIOCSYNC)
-{VIDIOCSYNC, "VIDIOCSYNC"},
-#endif
-#if defined(VIDIOCMCAPTURE)
-{VIDIOCMCAPTURE, "VIDIOCMCAPTURE"},
-#endif
-#if defined(VIDIOCGMBUF)
-{VIDIOCGMBUF, "VIDIOCGMBUF"},
-#endif
-#if defined(VIDIOCGUNIT)
-{VIDIOCGUNIT, "VIDIOCGUNIT"},
-#endif
-#if defined(VIDIOCGCAPTURE)
-{VIDIOCGCAPTURE, "VIDIOCGCAPTURE"},
-#endif
-#if defined(VIDIOCSCAPTURE)
-{VIDIOCSCAPTURE, "VIDIOCSCAPTURE"},
-#endif
-#if defined(VIDIOCSPLAYMODE)
-{VIDIOCSPLAYMODE, "VIDIOCSPLAYMODE"},
-#endif
-#if defined(VIDIOCSWRITEMODE)
-{VIDIOCSWRITEMODE, "VIDIOCSWRITEMODE"},
-#endif
-#if defined(VIDIOCGPLAYINFO)
-{VIDIOCGPLAYINFO, "VIDIOCGPLAYINFO"},
-#endif
-#if defined(VIDIOCSMICROCODE)
-{VIDIOCSMICROCODE, "VIDIOCSMICROCODE"},
-#endif
-{0xFFFFFFFF, ""}
-};
-
-k = 0;
-while (mess[k].name[0]) {
-	if (wot == mess[k].command) {
-		JOT(8, "ioctl 0x%08X is %s\n", \
-					mess[k].command, &mess[k].name[0]);
-		return 0;
-	}
-	k++;
-}
-JOT(8, "ioctl 0x%08X is not in videodev2.h\n", wot);
-
-k = 0;
-while (mess1[k].name[0]) {
-	if (wot == mess1[k].command) {
-		JOT(8, "ioctl 0x%08X is %s (V4L1)\n", \
-					mess1[k].command, &mess1[k].name[0]);
-		return 0;
-	}
-	k++;
-}
-JOT(8, "ioctl 0x%08X is not in videodev.h\n", wot);
-return -1;
-}
-/*****************************************************************************/
-int explain_cid(__u32 wot)
-{
-int k;
-/*---------------------------------------------------------------------------*/
-/*
- *  THE DATA FOR THE ARRAY mess BELOW WERE CONSTRUCTED BY RUNNING THE FOLLOWING
- *  SHELL SCRIPT:
- *  #
- *  cat /usr/src/linux-headers-`uname -r`/include/linux/videodev2.h | \
- *     grep "^#define V4L2_CID_" |  \
- *     sed -e "s,(.*$,,;p" | sed -e "N;s,\n,, " | \
- *     sed -e "s/^#define /  {/;s/#define /, \"/;s/$/\"},/" | \
- *     sed -e "s,	,,g;s, ,,g" | grep -v "_BASE" | grep -v "MPEG" >cid.tmp
- *  echo "{0xFFFFFFFF,\"\"}" >>cid.tmp
- *  exit 0
- *  #
- */
-/*---------------------------------------------------------------------------*/
-static struct mess
-{
-__u32 command;
-char  name[64];
-} mess[] = {
-#if defined(V4L2_CID_USER_CLASS)
-{V4L2_CID_USER_CLASS, "V4L2_CID_USER_CLASS"},
-#endif
-#if defined(V4L2_CID_BRIGHTNESS)
-{V4L2_CID_BRIGHTNESS, "V4L2_CID_BRIGHTNESS"},
-#endif
-#if defined(V4L2_CID_CONTRAST)
-{V4L2_CID_CONTRAST, "V4L2_CID_CONTRAST"},
-#endif
-#if defined(V4L2_CID_SATURATION)
-{V4L2_CID_SATURATION, "V4L2_CID_SATURATION"},
-#endif
-#if defined(V4L2_CID_HUE)
-{V4L2_CID_HUE, "V4L2_CID_HUE"},
-#endif
-#if defined(V4L2_CID_AUDIO_VOLUME)
-{V4L2_CID_AUDIO_VOLUME, "V4L2_CID_AUDIO_VOLUME"},
-#endif
-#if defined(V4L2_CID_AUDIO_BALANCE)
-{V4L2_CID_AUDIO_BALANCE, "V4L2_CID_AUDIO_BALANCE"},
-#endif
-#if defined(V4L2_CID_AUDIO_BASS)
-{V4L2_CID_AUDIO_BASS, "V4L2_CID_AUDIO_BASS"},
-#endif
-#if defined(V4L2_CID_AUDIO_TREBLE)
-{V4L2_CID_AUDIO_TREBLE, "V4L2_CID_AUDIO_TREBLE"},
-#endif
-#if defined(V4L2_CID_AUDIO_MUTE)
-{V4L2_CID_AUDIO_MUTE, "V4L2_CID_AUDIO_MUTE"},
-#endif
-#if defined(V4L2_CID_AUDIO_LOUDNESS)
-{V4L2_CID_AUDIO_LOUDNESS, "V4L2_CID_AUDIO_LOUDNESS"},
-#endif
-#if defined(V4L2_CID_BLACK_LEVEL)
-{V4L2_CID_BLACK_LEVEL, "V4L2_CID_BLACK_LEVEL"},
-#endif
-#if defined(V4L2_CID_AUTO_WHITE_BALANCE)
-{V4L2_CID_AUTO_WHITE_BALANCE, "V4L2_CID_AUTO_WHITE_BALANCE"},
-#endif
-#if defined(V4L2_CID_DO_WHITE_BALANCE)
-{V4L2_CID_DO_WHITE_BALANCE, "V4L2_CID_DO_WHITE_BALANCE"},
-#endif
-#if defined(V4L2_CID_RED_BALANCE)
-{V4L2_CID_RED_BALANCE, "V4L2_CID_RED_BALANCE"},
-#endif
-#if defined(V4L2_CID_BLUE_BALANCE)
-{V4L2_CID_BLUE_BALANCE, "V4L2_CID_BLUE_BALANCE"},
-#endif
-#if defined(V4L2_CID_GAMMA)
-{V4L2_CID_GAMMA, "V4L2_CID_GAMMA"},
-#endif
-#if defined(V4L2_CID_WHITENESS)
-{V4L2_CID_WHITENESS, "V4L2_CID_WHITENESS"},
-#endif
-#if defined(V4L2_CID_EXPOSURE)
-{V4L2_CID_EXPOSURE, "V4L2_CID_EXPOSURE"},
-#endif
-#if defined(V4L2_CID_AUTOGAIN)
-{V4L2_CID_AUTOGAIN, "V4L2_CID_AUTOGAIN"},
-#endif
-#if defined(V4L2_CID_GAIN)
-{V4L2_CID_GAIN, "V4L2_CID_GAIN"},
-#endif
-#if defined(V4L2_CID_HFLIP)
-{V4L2_CID_HFLIP, "V4L2_CID_HFLIP"},
-#endif
-#if defined(V4L2_CID_VFLIP)
-{V4L2_CID_VFLIP, "V4L2_CID_VFLIP"},
-#endif
-#if defined(V4L2_CID_HCENTER)
-{V4L2_CID_HCENTER, "V4L2_CID_HCENTER"},
-#endif
-#if defined(V4L2_CID_VCENTER)
-{V4L2_CID_VCENTER, "V4L2_CID_VCENTER"},
-#endif
-#if defined(V4L2_CID_POWER_LINE_FREQUENCY)
-{V4L2_CID_POWER_LINE_FREQUENCY, "V4L2_CID_POWER_LINE_FREQUENCY"},
-#endif
-#if defined(V4L2_CID_HUE_AUTO)
-{V4L2_CID_HUE_AUTO, "V4L2_CID_HUE_AUTO"},
-#endif
-#if defined(V4L2_CID_WHITE_BALANCE_TEMPERATURE)
-{V4L2_CID_WHITE_BALANCE_TEMPERATURE, "V4L2_CID_WHITE_BALANCE_TEMPERATURE"},
-#endif
-#if defined(V4L2_CID_SHARPNESS)
-{V4L2_CID_SHARPNESS, "V4L2_CID_SHARPNESS"},
-#endif
-#if defined(V4L2_CID_BACKLIGHT_COMPENSATION)
-{V4L2_CID_BACKLIGHT_COMPENSATION, "V4L2_CID_BACKLIGHT_COMPENSATION"},
-#endif
-#if defined(V4L2_CID_CHROMA_AGC)
-{V4L2_CID_CHROMA_AGC, "V4L2_CID_CHROMA_AGC"},
-#endif
-#if defined(V4L2_CID_COLOR_KILLER)
-{V4L2_CID_COLOR_KILLER, "V4L2_CID_COLOR_KILLER"},
-#endif
-#if defined(V4L2_CID_LASTP1)
-{V4L2_CID_LASTP1, "V4L2_CID_LASTP1"},
-#endif
-#if defined(V4L2_CID_CAMERA_CLASS)
-{V4L2_CID_CAMERA_CLASS, "V4L2_CID_CAMERA_CLASS"},
-#endif
-#if defined(V4L2_CID_EXPOSURE_AUTO)
-{V4L2_CID_EXPOSURE_AUTO, "V4L2_CID_EXPOSURE_AUTO"},
-#endif
-#if defined(V4L2_CID_EXPOSURE_ABSOLUTE)
-{V4L2_CID_EXPOSURE_ABSOLUTE, "V4L2_CID_EXPOSURE_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_EXPOSURE_AUTO_PRIORITY)
-{V4L2_CID_EXPOSURE_AUTO_PRIORITY, "V4L2_CID_EXPOSURE_AUTO_PRIORITY"},
-#endif
-#if defined(V4L2_CID_PAN_RELATIVE)
-{V4L2_CID_PAN_RELATIVE, "V4L2_CID_PAN_RELATIVE"},
-#endif
-#if defined(V4L2_CID_TILT_RELATIVE)
-{V4L2_CID_TILT_RELATIVE, "V4L2_CID_TILT_RELATIVE"},
-#endif
-#if defined(V4L2_CID_PAN_RESET)
-{V4L2_CID_PAN_RESET, "V4L2_CID_PAN_RESET"},
-#endif
-#if defined(V4L2_CID_TILT_RESET)
-{V4L2_CID_TILT_RESET, "V4L2_CID_TILT_RESET"},
-#endif
-#if defined(V4L2_CID_PAN_ABSOLUTE)
-{V4L2_CID_PAN_ABSOLUTE, "V4L2_CID_PAN_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_TILT_ABSOLUTE)
-{V4L2_CID_TILT_ABSOLUTE, "V4L2_CID_TILT_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_FOCUS_ABSOLUTE)
-{V4L2_CID_FOCUS_ABSOLUTE, "V4L2_CID_FOCUS_ABSOLUTE"},
-#endif
-#if defined(V4L2_CID_FOCUS_RELATIVE)
-{V4L2_CID_FOCUS_RELATIVE, "V4L2_CID_FOCUS_RELATIVE"},
-#endif
-#if defined(V4L2_CID_FOCUS_AUTO)
-{V4L2_CID_FOCUS_AUTO, "V4L2_CID_FOCUS_AUTO"},
-#endif
-{0xFFFFFFFF, ""}
-};
-
-k = 0;
-while (mess[k].name[0]) {
-	if (wot == mess[k].command) {
-		JOT(8, "ioctl 0x%08X is %s\n", \
-					mess[k].command, &mess[k].name[0]);
-		return 0;
-	}
-	k++;
-}
-JOT(8, "cid 0x%08X is not in videodev2.h\n", wot);
-return -1;
-}
-/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_low.c b/drivers/staging/easycap/easycap_low.c
index ad1fc4c..28c4d1e 100644
--- a/drivers/staging/easycap/easycap_low.c
+++ b/drivers/staging/easycap/easycap_low.c
@@ -38,148 +38,209 @@
 */
 /****************************************************************************/
 
-#include "easycap_debug.h"
 #include "easycap.h"
+#include "easycap_debug.h"
 
 /*--------------------------------------------------------------------------*/
-const struct stk1160config { int reg; int set; } stk1160config[256] = {
-	{0x000, 0x0098},
-	{0x002, 0x0093},
+const struct stk1160config { int reg; int set; } stk1160configPAL[256] = {
+		{0x000, 0x0098},
+		{0x002, 0x0093},
 
-	{0x001, 0x0003},
-	{0x003, 0x0080},
-	{0x00D, 0x0000},
-	{0x00F, 0x0002},
-	{0x018, 0x0010},
-	{0x019, 0x0000},
-	{0x01A, 0x0014},
-	{0x01B, 0x000E},
-	{0x01C, 0x0046},
+		{0x001, 0x0003},
+		{0x003, 0x0080},
+		{0x00D, 0x0000},
+		{0x00F, 0x0002},
+		{0x018, 0x0010},
+		{0x019, 0x0000},
+		{0x01A, 0x0014},
+		{0x01B, 0x000E},
+		{0x01C, 0x0046},
 
-	{0x100, 0x0033},
-	{0x103, 0x0000},
-	{0x104, 0x0000},
-	{0x105, 0x0000},
-	{0x106, 0x0000},
+		{0x100, 0x0033},
+		{0x103, 0x0000},
+		{0x104, 0x0000},
+		{0x105, 0x0000},
+		{0x106, 0x0000},
 
-#if defined(PREFER_NTSC)
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ *  RESOLUTION 640x480
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+		{0x110, 0x0008},
+		{0x111, 0x0000},
+		{0x112, 0x0020},
+		{0x113, 0x0000},
+		{0x114, 0x0508},
+		{0x115, 0x0005},
+		{0x116, 0x0110},
+		{0x117, 0x0001},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 
-#undef  OLDMARGIN
-#if defined(OLDMARGIN)
-	{0x110, 0x0008},
-#else
-	{0x110, 0x0014},
-#endif /*OLDMARGIN*/
+		{0x202, 0x000F},
+		{0x203, 0x004A},
+		{0x2FF, 0x0000},
 
-	{0x111, 0x0000},
-	{0x112, 0x0003},
-	{0x113, 0x0000},
-
-#if defined(OLDMARGIN)
-	{0x114, 0x0508},
-#else
-	{0x114, 0x0514},
-#endif /*OLDMARGIN*/
-
-	{0x115, 0x0005},
-	{0x116, 0x00F3},
-	{0x117, 0x0000},
-
-#else /* ! PREFER_NTSC*/
-
-#if defined(OLDMARGIN)
-	{0x110, 0x0008},
-#else
-	{0x110, 0x0014},
-#endif /*OLDMARGIN*/
-
-	{0x111, 0x0000},
-	{0x112, 0x0020},
-	{0x113, 0x0000},
-
-#if defined(OLDMARGIN)
-	{0x114, 0x0508},
-#else
-	{0x114, 0x0514},
-#endif /*OLDMARGIN*/
-
-	{0x115, 0x0005},
-	{0x116, 0x0110},
-	{0x117, 0x0001},
-
-#endif /* ! PREFER_NTSC*/
-
-	{0x202, 0x000F},
-	{0x203, 0x004A},
-	{0x2FF, 0x0000},
-/*---------------------------------------------------------------------------*/
-	{0xFFF, 0xFFFF}
-	};
+		{0xFFF, 0xFFFF}
+};
 /*--------------------------------------------------------------------------*/
-const struct saa7113config { int reg; int set; } saa7113config[256] = {
-	{0x01, 0x08},
-	{0x02, 0x80},
-	{0x03, 0x33},
-	{0x04, 0x00},
-	{0x05, 0x00},
-	{0x06, 0xE9},
-	{0x07, 0x0D},
-#if defined(PREFER_NTSC)
-	{0x08, 0x78},
+const struct stk1160config stk1160configNTSC[256] = {
+		{0x000, 0x0098},
+		{0x002, 0x0093},
+
+		{0x001, 0x0003},
+		{0x003, 0x0080},
+		{0x00D, 0x0000},
+		{0x00F, 0x0002},
+		{0x018, 0x0010},
+		{0x019, 0x0000},
+		{0x01A, 0x0014},
+		{0x01B, 0x000E},
+		{0x01C, 0x0046},
+
+		{0x100, 0x0033},
+		{0x103, 0x0000},
+		{0x104, 0x0000},
+		{0x105, 0x0000},
+		{0x106, 0x0000},
+
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+/*
+ *  RESOLUTION 640x480
+*/
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+		{0x110, 0x0008},
+		{0x111, 0x0000},
+		{0x112, 0x0003},
+		{0x113, 0x0000},
+		{0x114, 0x0508},
+		{0x115, 0x0005},
+		{0x116, 0x00F3},
+		{0x117, 0x0000},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+
+		{0x202, 0x000F},
+		{0x203, 0x004A},
+		{0x2FF, 0x0000},
+
+		{0xFFF, 0xFFFF}
+};
+/*--------------------------------------------------------------------------*/
+const struct saa7113config { int reg; int set; } saa7113configPAL[256] = {
+		{0x01, 0x08},
+#if defined(ANTIALIAS)
+		{0x02, 0xC0},
 #else
-	{0x08, 0x38},
-#endif /* ! PREFER_NTSC*/
-	{0x09, 0x00},
-	{0x0A, SAA_0A_DEFAULT},
-	{0x0B, SAA_0B_DEFAULT},
-	{0x0C, SAA_0C_DEFAULT},
-	{0x0D, SAA_0D_DEFAULT},
-	{0x0E, 0x01},
-	{0x0F, 0x36},
-	{0x10, 0x00},
-	{0x11, 0x0C},
-	{0x12, 0xE7},
-	{0x13, 0x00},
-	{0x15, 0x00},
-	{0x16, 0x00},
-#if defined(PREFER_NTSC)
-	{0x40, 0x82},
+		{0x02, 0x80},
+#endif /*ANTIALIAS*/
+		{0x03, 0x33},
+		{0x04, 0x00},
+		{0x05, 0x00},
+		{0x06, 0xE9},
+		{0x07, 0x0D},
+		{0x08, 0x38},
+		{0x09, 0x00},
+		{0x0A, SAA_0A_DEFAULT},
+		{0x0B, SAA_0B_DEFAULT},
+		{0x0C, SAA_0C_DEFAULT},
+		{0x0D, SAA_0D_DEFAULT},
+		{0x0E, 0x01},
+		{0x0F, 0x36},
+		{0x10, 0x00},
+		{0x11, 0x0C},
+		{0x12, 0xE7},
+		{0x13, 0x00},
+		{0x15, 0x00},
+		{0x16, 0x00},
+		{0x40, 0x02},
+		{0x41, 0xFF},
+		{0x42, 0xFF},
+		{0x43, 0xFF},
+		{0x44, 0xFF},
+		{0x45, 0xFF},
+		{0x46, 0xFF},
+		{0x47, 0xFF},
+		{0x48, 0xFF},
+		{0x49, 0xFF},
+		{0x4A, 0xFF},
+		{0x4B, 0xFF},
+		{0x4C, 0xFF},
+		{0x4D, 0xFF},
+		{0x4E, 0xFF},
+		{0x4F, 0xFF},
+		{0x50, 0xFF},
+		{0x51, 0xFF},
+		{0x52, 0xFF},
+		{0x53, 0xFF},
+		{0x54, 0xFF},
+		{0x55, 0xFF},
+		{0x56, 0xFF},
+		{0x57, 0xFF},
+		{0x58, 0x40},
+		{0x59, 0x54},
+		{0x5A, 0x07},
+		{0x5B, 0x83},
+
+		{0xFF, 0xFF}
+};
+/*--------------------------------------------------------------------------*/
+const struct saa7113config saa7113configNTSC[256] = {
+		{0x01, 0x08},
+#if defined(ANTIALIAS)
+		{0x02, 0xC0},
 #else
-	{0x40, 0x02},
-#endif /* ! PREFER_NTSC*/
-	{0x41, 0xFF},
-	{0x42, 0xFF},
-	{0x43, 0xFF},
-	{0x44, 0xFF},
-	{0x45, 0xFF},
-	{0x46, 0xFF},
-	{0x47, 0xFF},
-	{0x48, 0xFF},
-	{0x49, 0xFF},
-	{0x4A, 0xFF},
-	{0x4B, 0xFF},
-	{0x4C, 0xFF},
-	{0x4D, 0xFF},
-	{0x4E, 0xFF},
-	{0x4F, 0xFF},
-	{0x50, 0xFF},
-	{0x51, 0xFF},
-	{0x52, 0xFF},
-	{0x53, 0xFF},
-	{0x54, 0xFF},
-	{0x55, 0xFF},
-	{0x56, 0xFF},
-	{0x57, 0xFF},
-	{0x58, 0x40},
-	{0x59, 0x54},
-#if defined(PREFER_NTSC)
-	{0x5A, 0x0A},
-#else
-	{0x5A, 0x07},
-#endif /* ! PREFER_NTSC*/
-	{0x5B, 0x83},
-	{0xFF, 0xFF}
-	};
+		{0x02, 0x80},
+#endif /*ANTIALIAS*/
+		{0x03, 0x33},
+		{0x04, 0x00},
+		{0x05, 0x00},
+		{0x06, 0xE9},
+		{0x07, 0x0D},
+		{0x08, 0x78},
+		{0x09, 0x00},
+		{0x0A, SAA_0A_DEFAULT},
+		{0x0B, SAA_0B_DEFAULT},
+		{0x0C, SAA_0C_DEFAULT},
+		{0x0D, SAA_0D_DEFAULT},
+		{0x0E, 0x01},
+		{0x0F, 0x36},
+		{0x10, 0x00},
+		{0x11, 0x0C},
+		{0x12, 0xE7},
+		{0x13, 0x00},
+		{0x15, 0x00},
+		{0x16, 0x00},
+		{0x40, 0x82},
+		{0x41, 0xFF},
+		{0x42, 0xFF},
+		{0x43, 0xFF},
+		{0x44, 0xFF},
+		{0x45, 0xFF},
+		{0x46, 0xFF},
+		{0x47, 0xFF},
+		{0x48, 0xFF},
+		{0x49, 0xFF},
+		{0x4A, 0xFF},
+		{0x4B, 0xFF},
+		{0x4C, 0xFF},
+		{0x4D, 0xFF},
+		{0x4E, 0xFF},
+		{0x4F, 0xFF},
+		{0x50, 0xFF},
+		{0x51, 0xFF},
+		{0x52, 0xFF},
+		{0x53, 0xFF},
+		{0x54, 0xFF},
+		{0x55, 0xFF},
+		{0x56, 0xFF},
+		{0x57, 0xFF},
+		{0x58, 0x40},
+		{0x59, 0x54},
+		{0x5A, 0x0A},
+		{0x5B, 0x83},
+
+		{0xFF, 0xFF}
+};
 /*--------------------------------------------------------------------------*/
 
 /****************************************************************************/
@@ -187,6 +248,9 @@
 confirm_resolution(struct usb_device *p)
 {
 __u8 get0, get1, get2, get3, get4, get5, get6, get7;
+
+if (NULL == p)
+	return -ENODEV;
 GET(p, 0x0110, &get0);
 GET(p, 0x0111, &get1);
 GET(p, 0x0112, &get2);
@@ -227,6 +291,8 @@
 __u16 get2;
 __u8 igot;
 
+if (NULL == p)
+	return -ENODEV;
 GET(p, 0x0100, &igot);  get2 = 0x80 & igot;
 if (0x80 == get2)
 	JOT(8, "confirm_stream:  OK\n");
@@ -236,15 +302,24 @@
 }
 /****************************************************************************/
 int
-setup_stk(struct usb_device *p)
+setup_stk(struct usb_device *p, bool ntsc)
 {
 int i0;
 
+if (NULL == p)
+	return -ENODEV;
 i0 = 0;
-while (0xFFF != stk1160config[i0].reg) {
-	SET(p, stk1160config[i0].reg, stk1160config[i0].set);
-	i0++;
+if (true == ntsc) {
+	while (0xFFF != stk1160configNTSC[i0].reg) {
+		SET(p, stk1160configNTSC[i0].reg, stk1160configNTSC[i0].set);
+		i0++;
 	}
+} else {
+	while (0xFFF != stk1160configPAL[i0].reg) {
+		SET(p, stk1160configPAL[i0].reg, stk1160configPAL[i0].set);
+		i0++;
+	}
+}
 
 write_300(p);
 
@@ -252,19 +327,26 @@
 }
 /****************************************************************************/
 int
-setup_saa(struct usb_device *p)
+setup_saa(struct usb_device *p, bool ntsc)
 {
 int i0, ir;
 
-
-set2to78(p);
-
-
+if (NULL == p)
+	return -ENODEV;
 i0 = 0;
-while (0xFF != saa7113config[i0].reg) {
-	ir = write_saa(p, saa7113config[i0].reg, saa7113config[i0].set);
-	i0++;
+if (true == ntsc) {
+	while (0xFF != saa7113configNTSC[i0].reg) {
+		ir = write_saa(p, saa7113configNTSC[i0].reg, \
+					saa7113configNTSC[i0].set);
+		i0++;
 	}
+} else {
+	while (0xFF != saa7113configPAL[i0].reg) {
+		ir = write_saa(p, saa7113configPAL[i0].reg, \
+					saa7113configPAL[i0].set);
+		i0++;
+	}
+}
 return 0;
 }
 /****************************************************************************/
@@ -273,6 +355,8 @@
 {
 __u8 igot0, igot2;
 
+if (NULL == p)
+	return -ENODEV;
 GET(p, 0x0002, &igot2);
 GET(p, 0x0000, &igot0);
 SET(p, 0x0002, set2);
@@ -283,6 +367,8 @@
 int
 write_saa(struct usb_device *p, __u16 reg0, __u16 set0)
 {
+if (NULL == p)
+	return -ENODEV;
 SET(p, 0x200, 0x00);
 SET(p, 0x204, reg0);
 SET(p, 0x205, set0);
@@ -306,6 +392,8 @@
 __u16 got502, got503;
 __u16 set502, set503;
 
+if (NULL == p)
+	return -ENODEV;
 SET(p, 0x0504, reg0);
 SET(p, 0x0500, 0x008B);
 
@@ -341,6 +429,8 @@
 __u8 igot;
 __u16 got502, got503;
 
+if (NULL == p)
+	return -ENODEV;
 SET(p, 0x0504, reg0);
 SET(p, 0x0500, 0x008B);
 
@@ -360,6 +450,8 @@
 int
 write_300(struct usb_device *p)
 {
+if (NULL == p)
+	return -ENODEV;
 SET(p, 0x300, 0x0012);
 SET(p, 0x350, 0x002D);
 SET(p, 0x351, 0x0001);
@@ -376,24 +468,48 @@
  */
 /*--------------------------------------------------------------------------*/
 int
-check_saa(struct usb_device *p)
+check_saa(struct usb_device *p, bool ntsc)
 {
 int i0, ir, rc;
+
+if (NULL == p)
+	return -ENODEV;
 i0 = 0;
-
 rc = 0;
-while (0xFF != saa7113config[i0].reg) {
-	if (0x0F == saa7113config[i0].reg) {
-		i0++; continue;
-	}
+if (true == ntsc) {
+	while (0xFF != saa7113configNTSC[i0].reg) {
+		if (0x0F == saa7113configNTSC[i0].reg) {
+			i0++;
+			continue;
+		}
 
-	ir = read_saa(p, saa7113config[i0].reg);
-	if (ir != saa7113config[i0].set) {
-		SAY("SAA register 0x%02X has 0x%02X, expected 0x%02X\n", \
-			saa7113config[i0].reg, ir, saa7113config[i0].set);
-		rc--;
+		ir = read_saa(p, saa7113configNTSC[i0].reg);
+		if (ir != saa7113configNTSC[i0].set) {
+			SAY("SAA register 0x%02X has 0x%02X, " \
+						"expected 0x%02X\n", \
+						saa7113configNTSC[i0].reg, \
+						ir, saa7113configNTSC[i0].set);
+			rc--;
+		}
+		i0++;
 	}
-	i0++;
+} else {
+	while (0xFF != saa7113configPAL[i0].reg) {
+		if (0x0F == saa7113configPAL[i0].reg) {
+			i0++;
+			continue;
+		}
+
+		ir = read_saa(p, saa7113configPAL[i0].reg);
+		if (ir != saa7113configPAL[i0].set) {
+			SAY("SAA register 0x%02X has 0x%02X, " \
+						"expected 0x%02X\n", \
+						saa7113configPAL[i0].reg, \
+						ir, saa7113configPAL[i0].set);
+			rc--;
+		}
+		i0++;
+	}
 }
 if (-8 > rc)
 	return rc;
@@ -406,6 +522,8 @@
 {
 int rc;
 
+if (NULL == p)
+	return -ENODEV;
 rc = read_saa(p, 0x1F);
 if ((0 > rc) || (0x02 & rc))
 	return 1 ;
@@ -416,29 +534,46 @@
 int
 ready_saa(struct usb_device *p)
 {
-int j, rc;
-static int max = 10;
-
+int j, rc, rate;
+const int max = 5, marktime = PATIENCE/5;
+/*--------------------------------------------------------------------------*/
+/*
+ *   RETURNS    0     FOR INTERLACED       50 Hz
+ *              1     FOR NON-INTERLACED   50 Hz
+ *              2     FOR INTERLACED       60 Hz
+ *              3     FOR NON-INTERLACED   60 Hz
+*/
+/*--------------------------------------------------------------------------*/
+if (NULL == p)
+	return -ENODEV;
 j = 0;
 while (max > j) {
 	rc = read_saa(p, 0x1F);
 	if (0 <= rc) {
-		if ((1 == (0x01 & rc))&&(0 == (0x40 & rc)))
+		if (0 == (0x40 & rc))
+			break;
+		if (1 == (0x01 & rc))
 			break;
 	}
-	msleep(100);  j++;
+	msleep(marktime);
+	j++;
 }
 if (max == j)
 	return -1;
 else {
-	if (0x20 & rc)
+	if (0x20 & rc) {
+		rate = 2;
 		JOT(8, "hardware detects 60 Hz\n");
-	else
+	} else {
+		rate = 0;
 		JOT(8, "hardware detects 50 Hz\n");
+	}
 	if (0x80 & rc)
 		JOT(8, "hardware detects interlacing\n");
-	else
+	else {
+		rate++;
 		JOT(8, "hardware detects no interlacing\n");
+	}
 }
 return 0;
 }
@@ -447,45 +582,80 @@
 /*
  *  NOTE: THE FOLLOWING ARE NOT CHECKED:
  *  REGISTERS 0x000, 0x002:  FUNCTIONALITY IS NOT KNOWN
- *  REGISTER  0x100:  ACCEPT ALSO (0x80 | stk1160config[.].set)
+ *  REGISTER  0x100:  ACCEPT ALSO (0x80 | stk1160config....[.].set)
  */
 /*--------------------------------------------------------------------------*/
 int
-check_stk(struct usb_device *p)
+check_stk(struct usb_device *p, bool ntsc)
 {
 int i0, ir;
+
+if (NULL == p)
+	return -ENODEV;
 i0 = 0;
-while (0xFFF != stk1160config[i0].reg) {
-	if (0x000 == stk1160config[i0].reg) {
-		i0++; continue;
-	}
-	if (0x002 == stk1160config[i0].reg) {
-		i0++; continue;
-	}
-
-	ir = read_stk(p, stk1160config[i0].reg);
-
-	if (0x100 == stk1160config[i0].reg) {
-		if ((ir != (0xFF & stk1160config[i0].set)) && \
-			(ir != (0x80 | (0xFF & stk1160config[i0].set))) && \
-				(0xFFFF != stk1160config[i0].set)) {
-			SAY("STK register 0x%03X has 0x%02X, " \
-					"expected 0x%02X\n", \
-					stk1160config[i0].reg, ir, \
-					stk1160config[i0].set);
+if (true == ntsc) {
+	while (0xFFF != stk1160configNTSC[i0].reg) {
+		if (0x000 == stk1160configNTSC[i0].reg) {
+			i0++; continue;
+		}
+		if (0x002 == stk1160configNTSC[i0].reg) {
+			i0++; continue;
+		}
+		ir = read_stk(p, stk1160configNTSC[i0].reg);
+		if (0x100 == stk1160configNTSC[i0].reg) {
+			if ((ir != (0xFF & stk1160configNTSC[i0].set)) && \
+					(ir != (0x80 | (0xFF & \
+					stk1160configNTSC[i0].set))) && \
+					(0xFFFF != \
+					stk1160configNTSC[i0].set)) {
+				SAY("STK register 0x%03X has 0x%02X, " \
+						"expected 0x%02X\n", \
+						stk1160configNTSC[i0].reg, \
+						ir, stk1160configNTSC[i0].set);
+				}
+			i0++; continue;
 			}
-		i0++; continue;
+		if ((ir != (0xFF & stk1160configNTSC[i0].set)) && \
+				(0xFFFF != stk1160configNTSC[i0].set)) {
+			SAY("STK register 0x%03X has 0x%02X, " \
+						"expected 0x%02X\n", \
+						stk1160configNTSC[i0].reg, \
+						ir, stk1160configNTSC[i0].set);
 		}
-
-	if ((ir != (0xFF & stk1160config[i0].set)) && \
-			(0xFFFF != stk1160config[i0].set)) {
-		SAY("STK register 0x%03X has 0x%02X, " \
-					"expected 0x%02X\n", \
-					stk1160config[i0].reg, ir, \
-					stk1160config[i0].set);
-		}
-	i0++;
+		i0++;
 	}
+} else {
+	while (0xFFF != stk1160configPAL[i0].reg) {
+		if (0x000 == stk1160configPAL[i0].reg) {
+			i0++; continue;
+		}
+		if (0x002 == stk1160configPAL[i0].reg) {
+			i0++; continue;
+		}
+		ir = read_stk(p, stk1160configPAL[i0].reg);
+		if (0x100 == stk1160configPAL[i0].reg) {
+			if ((ir != (0xFF & stk1160configPAL[i0].set)) && \
+					(ir != (0x80 | (0xFF & \
+					stk1160configPAL[i0].set))) && \
+					(0xFFFF != \
+					stk1160configPAL[i0].set)) {
+				SAY("STK register 0x%03X has 0x%02X, " \
+						"expected 0x%02X\n", \
+						stk1160configPAL[i0].reg, \
+						ir, stk1160configPAL[i0].set);
+				}
+			i0++; continue;
+			}
+		if ((ir != (0xFF & stk1160configPAL[i0].set)) && \
+				(0xFFFF != stk1160configPAL[i0].set)) {
+			SAY("STK register 0x%03X has 0x%02X, " \
+						"expected 0x%02X\n", \
+						stk1160configPAL[i0].reg, \
+						ir, stk1160configPAL[i0].set);
+		}
+		i0++;
+	}
+}
 return 0;
 }
 /****************************************************************************/
@@ -494,6 +664,8 @@
 {
 __u8 igot;
 
+if (NULL == p)
+	return -ENODEV;
 SET(p, 0x208, reg0);
 SET(p, 0x200, 0x20);
 if (0 != wait_i2c(p))
@@ -508,12 +680,14 @@
 {
 __u8 igot;
 
+if (NULL == p)
+	return -ENODEV;
 igot = 0;
 GET(p, reg0, &igot);
 return igot;
 }
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
+/****************************************************************************/
+/*--------------------------------------------------------------------------*/
 /*
  *    HARDWARE    USERSPACE INPUT NUMBER   PHYSICAL INPUT   DRIVER input VALUE
  *
@@ -534,81 +708,100 @@
 int
 select_input(struct usb_device *p, int input, int mode)
 {
+int ir;
 
+if (NULL == p)
+	return -ENODEV;
 stop_100(p);
-
-msleep(20);
 switch (input) {
 case 0:
 case 1: {
-	SET(p, 0x0000, 0x0098); break;
+	if (0 != write_saa(p, 0x02, 0x80)) {
+		SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+									input);
+	}
+	SET(p, 0x0000, 0x0098);
+	SET(p, 0x0002, 0x0078);
+	break;
 }
 case 2: {
-	SET(p, 0x0000, 0x0090); break;
+	if (0 != write_saa(p, 0x02, 0x80)) {
+		SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+									input);
+	}
+	SET(p, 0x0000, 0x0090);
+	SET(p, 0x0002, 0x0078);
+	break;
 }
 case 3: {
-	SET(p, 0x0000, 0x0088); break;
+	if (0 != write_saa(p, 0x02, 0x80)) {
+		SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+									input);
+	}
+	SET(p, 0x0000, 0x0088);
+	SET(p, 0x0002, 0x0078);
+	break;
 }
 case 4: {
-	SET(p, 0x0000, 0x0080); break;
+	if (0 != write_saa(p, 0x02, 0x80)) {
+		SAY("ERROR: failed to set SAA register 0x02 for input %i\n", \
+									input);
+	}
+	SET(p, 0x0000, 0x0080);
+	SET(p, 0x0002, 0x0078);
+	break;
 }
 case 5: {
 	if (9 != mode)
 		mode = 7;
 	switch (mode) {
-	case 7:
-		{
+	case 7: {
 		if (0 != write_saa(p, 0x02, 0x87)) {
-			SAY("ERROR: failed to set SAA " \
-				"register 0x02 for input " \
-				"%i\n", input);
+			SAY("ERROR: failed to set SAA register 0x02 " \
+						"for input %i\n", input);
 		}
 		if (0 != write_saa(p, 0x05, 0xFF)) {
-			SAY("ERROR: failed to set SAA " \
-				"register 0x05 for input " \
-				"%i\n", input);
+			SAY("ERROR: failed to set SAA register 0x05 " \
+						"for input %i\n", input);
 		}
 		break;
 	}
-	case 9:
-		{
+	case 9: {
 		if (0 != write_saa(p, 0x02, 0x89)) {
-			SAY("ERROR: failed to set SAA " \
-				"register 0x02 for input " \
-				"%i\n", input);
+			SAY("ERROR: failed to set SAA register 0x02 " \
+						"for input %i\n", input);
 		}
 		if (0 != write_saa(p, 0x05, 0x00)) {
-			SAY("ERROR: failed to set SAA " \
-				"register 0x05 for input " \
-				"%i\n", input);
+			SAY("ERROR: failed to set SAA register 0x05 " \
+						"for input %i\n", input);
 		}
-		break;
+	break;
 	}
-	default:
-		{
+	default: {
 		SAY("MISTAKE:  bad mode: %i\n", mode);
 		return -1;
-		}
+	}
 	}
 	if (0 != write_saa(p, 0x04, 0x00)) {
-		SAY("ERROR: failed to set SAA register 0x04 " \
-					"for input %i\n", input);
+		SAY("ERROR: failed to set SAA register 0x04 for input %i\n", \
+									input);
 	}
 	if (0 != write_saa(p, 0x09, 0x80)) {
-		SAY("ERROR: failed to set SAA register 0x09 " \
-					"for input %i\n", input);
+		SAY("ERROR: failed to set SAA register 0x09 for input %i\n", \
+									input);
 	}
+	SET(p, 0x0002, 0x0093);
 	break;
 }
-default:
-	{
+default: {
 	SAY("ERROR:  bad input: %i\n", input);
 	return -1;
 }
 }
-msleep(20);
-SET(p, 0x0002, 0x0093);
-msleep(20);
+ir = read_stk(p, 0x00);
+JOT(8, "STK register 0x00 has 0x%02X\n", ir);
+ir = read_saa(p, 0x02);
+JOT(8, "SAA register 0x02 has 0x%02X\n", ir);
 
 start_100(p);
 
@@ -621,6 +814,8 @@
 {
 __u16 u0x0111, u0x0113, u0x0115, u0x0117;
 
+if (NULL == p)
+	return -ENODEV;
 u0x0111 = ((0xFF00 & set0) >> 8);
 u0x0113 = ((0xFF00 & set1) >> 8);
 u0x0115 = ((0xFF00 & set2) >> 8);
@@ -641,13 +836,25 @@
 int
 start_100(struct usb_device *p)
 {
-__u16 get0;
-__u8 igot;
+__u16 get116, get117, get0;
+__u8 igot116, igot117, igot;
 
-GET(p, 0x0100, &igot);  get0 = igot;
-msleep(0x1f4);
+if (NULL == p)
+	return -ENODEV;
+GET(p, 0x0116, &igot116);
+get116 = igot116;
+GET(p, 0x0117, &igot117);
+get117 = igot117;
+SET(p, 0x0116, 0x0000);
+SET(p, 0x0117, 0x0000);
+
+GET(p, 0x0100, &igot);
+get0 = igot;
 SET(p, 0x0100, (0x80 | get0));
-msleep(0x1f4);
+
+SET(p, 0x0116, get116);
+SET(p, 0x0117, get117);
+
 return 0;
 }
 /****************************************************************************/
@@ -657,10 +864,11 @@
 __u16 get0;
 __u8 igot;
 
-GET(p, 0x0100, &igot);  get0 = igot;
-msleep(0x1f4);
+if (NULL == p)
+	return -ENODEV;
+GET(p, 0x0100, &igot);
+get0 = igot;
 SET(p, 0x0100, (0x7F & get0));
-msleep(0x1f4);
 return 0;
 }
 /****************************************************************************/
@@ -674,9 +882,11 @@
 {
 __u16 get0;
 __u8 igot;
-const int max = 4;
+const int max = 2;
 int k;
 
+if (NULL == p)
+	return -ENODEV;
 for (k = 0;  k < max;  k++) {
 	GET(p, 0x0201, &igot);  get0 = igot;
 	switch (get0) {
@@ -685,7 +895,7 @@
 		return 0;
 	}
 	case 0x00: {
-		msleep(10);
+		msleep(20);
 		continue;
 	}
 	default: {
@@ -703,8 +913,7 @@
 int rc0, rc1;
 
 if (!pusb_device)
-	return -EFAULT;
-
+	return -ENODEV;
 rc1 = 0;  igot = 0;
 rc0 = usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
 		(__u8)0x01, \
@@ -741,27 +950,14 @@
 case 0x205:
 case 0x350:
 case 0x351: {
-	if (0 != igot) {
+	if (0 != (0xFF & igot)) {
 		JOT(8, "unexpected 0x%02X for STK register 0x%03X\n", \
 								igot, index);
 	}
 break;
 }
-case 0x114:
-case 0x116: {
-	if ((0xFF & value) != igot) {
-		JOT(8, "unexpected 0x%02X != 0x%02X " \
-						"for STK register 0x%03X\n", \
-						igot, value, index);
-	}
-break;
-}
-case 0x200: {
-	if (0 == igot)
-		break;
-}
 default: {
-	if (value != igot) {
+	if ((0xFF & value) != (0xFF & igot)) {
 		JOT(8, "unexpected 0x%02X != 0x%02X " \
 					"for STK register 0x%03X\n", \
 					igot, value, index);
@@ -780,8 +976,7 @@
 int ir;
 
 if (!pusb_device)
-	return -EFAULT;
-
+	return -ENODEV;
 ir = usb_control_msg(pusb_device, usb_rcvctrlpipe(pusb_device, 0), \
 		(__u8)0x00, \
 		(__u8)(USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE), \
@@ -796,6 +991,8 @@
 int
 wakeup_device(struct usb_device *pusb_device)
 {
+if (!pusb_device)
+	return -ENODEV;
 return usb_control_msg(pusb_device, usb_sndctrlpipe(pusb_device, 0), \
 		(__u8)USB_REQ_SET_FEATURE, \
 		(__u8)(USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE), \
@@ -806,6 +1003,12 @@
 		(int)50000);
 }
 /*****************************************************************************/
+int
+audio_setup(struct easycap *peasycap)
+{
+struct usb_device *pusb_device;
+unsigned char buffer[1];
+int rc, id1, id2;
 /*---------------------------------------------------------------------------*/
 /*
  *                                IMPORTANT:
@@ -814,29 +1017,21 @@
  *  TO ENABLE AUDIO  THE VALUE 0x0200 MUST BE SENT.
  */
 /*---------------------------------------------------------------------------*/
-int
-audio_setup(struct easycap *peasycap)
-{
-struct usb_device *pusb_device;
-static __u8 request = 0x01;
-static __u8 requesttype = \
+const __u8 request = 0x01;
+const __u8 requesttype = \
 		(__u8)(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
-
-static __u16 value_unmute = 0x0200;
-static __u16 index = 0x0301;
-
-static unsigned char buffer[1];
-static __u16 length = 1;
-int rc, id1, id2;
+const __u16 value_unmute = 0x0200;
+const __u16 index = 0x0301;
+const __u16 length = 1;
 
 if (NULL == peasycap)
 	return -EFAULT;
 
 pusb_device = peasycap->pusb_device;
 if (NULL == pusb_device)
-	return -EFAULT;
+	return -ENODEV;
 
-JOT(8, "%02X %02X %02X %02X %02X %02X %02X %02X\n",	\
+JOM(8, "%02X %02X %02X %02X %02X %02X %02X %02X\n",	\
 			requesttype, request,		\
 			(0x00FF & value_unmute),	\
 			(0xFF00 & value_unmute) >> 8,	\
@@ -875,41 +1070,25 @@
  *                    THE UPPER BYTE SEEMS TO HAVE NO EFFECT.
  */
 /*--------------------------------------------------------------------------*/
-
 SET(pusb_device, 0x0500, 0x0094);
-
 SET(pusb_device, 0x0500, 0x008C);
-
 SET(pusb_device, 0x0506, 0x0001);
 SET(pusb_device, 0x0507, 0x0000);
-
 id1 = read_vt(pusb_device, 0x007C);
 id2 = read_vt(pusb_device, 0x007E);
-SAY("0x%04X:0x%04X is audio vendor id\n", id1, id2);
-
+SAM("0x%04X:0x%04X is audio vendor id\n", id1, id2);
 /*---------------------------------------------------------------------------*/
 /*
-*   SELECT AUDIO SOURCE "LINE IN" AND SET DEFAULT GAIN TO 0 dB.
-*
-*   THESE COMMANDS SEEM TO BE ACCEPTED (THOUGH POSSIBLY IGNORED) EVEN WHEN
-*   THERE IS NO SEPARATE AUDIO CHIP PRESENT.
+ *  SELECT AUDIO SOURCE "LINE IN" AND SET THE AUDIO GAIN.
 */
 /*---------------------------------------------------------------------------*/
-
-write_vt(pusb_device, 0x0002, 0x8000);
-write_vt(pusb_device, 0x001C, 0x8000);
-
-write_vt(pusb_device, 0x000E, 0x0000);
-write_vt(pusb_device, 0x0010, 0x0000);
-write_vt(pusb_device, 0x0012, 0x8000);
-write_vt(pusb_device, 0x0016, 0x0000);
-
-write_vt(pusb_device, 0x001A, 0x0404);
-write_vt(pusb_device, 0x0002, 0x0000);
-write_vt(pusb_device, 0x001C, 0x0000);
-
+if (31 < easycap_gain)
+	easycap_gain = 31;
+if (0 > easycap_gain)
+	easycap_gain = 0;
+if (0 != audio_gainset(pusb_device, (__s8)easycap_gain))
+	SAY("ERROR: audio_gainset() failed\n");
 check_vt(pusb_device);
-
 return 0;
 }
 /*****************************************************************************/
@@ -918,6 +1097,8 @@
 {
 int igot;
 
+if (!pusb_device)
+	return -ENODEV;
 igot = read_vt(pusb_device, 0x0002);
 if (0 > igot)
 	SAY("ERROR: failed to read VT1612A register 0x02\n");
@@ -942,17 +1123,23 @@
 if (0x8000 & igot)
 	SAY("register 0x%02X muted\n", 0x12);
 
+igot = read_vt(pusb_device, 0x0014);
+if (0 > igot)
+	SAY("ERROR: failed to read VT1612A register 0x14\n");
+if (0x8000 & igot)
+	SAY("register 0x%02X muted\n", 0x14);
+
 igot = read_vt(pusb_device, 0x0016);
 if (0 > igot)
 	SAY("ERROR: failed to read VT1612A register 0x16\n");
 if (0x8000 & igot)
 	SAY("register 0x%02X muted\n", 0x16);
 
-igot = read_vt(pusb_device, 0x001A);
+igot = read_vt(pusb_device, 0x0018);
 if (0 > igot)
-	SAY("ERROR: failed to read VT1612A register 0x1A\n");
+	SAY("ERROR: failed to read VT1612A register 0x18\n");
 if (0x8000 & igot)
-	SAY("register 0x%02X muted\n", 0x1A);
+	SAY("register 0x%02X muted\n", 0x18);
 
 igot = read_vt(pusb_device, 0x001C);
 if (0 > igot)
@@ -964,14 +1151,18 @@
 }
 /*****************************************************************************/
 /*---------------------------------------------------------------------------*/
-/*
- *  NOTE:  THIS DOES INCREASE THE VOLUME DRAMATICALLY:
- *         audio_gainset(pusb_device, 0x000F);
+/*  NOTE:  THIS DOES INCREASE THE VOLUME DRAMATICALLY:
+ *                      audio_gainset(pusb_device, 0x000F);
  *
- *  IF 16<loud<31 VT1621A REGISTER 0x1C IS SET FOR POSITIVE GAIN.
- *  IF loud<=16 VT1621A REGISTER 0x1C IS SET FOR ZERO GAIN.
- *  THERE IS NEVER ANY (ADDITIONAL) ATTENUATION.
- */
+ *       loud        dB  register 0x10      dB register 0x1C    dB total
+ *         0               -34.5                   0             -34.5
+ *        ..                ....                   .              ....
+ *        15                10.5                   0              10.5
+ *        16                12.0                   0              12.0
+ *        17                12.0                   1.5            13.5
+ *        ..                ....                  ....            ....
+ *        31                12.0                  22.5            34.5
+*/
 /*---------------------------------------------------------------------------*/
 int
 audio_gainset(struct usb_device *pusb_device, __s8 loud)
@@ -980,25 +1171,65 @@
 __u8 u8;
 __u16 mute;
 
-if (16 > loud)
-	loud = 16;
-u8 = 0x000F & (__u8)(loud - 16);
+if (NULL == pusb_device)
+	return -ENODEV;
+if (0 > loud)
+	loud = 0;
+if (31 < loud)
+	loud = 31;
 
 write_vt(pusb_device, 0x0002, 0x8000);
+/*---------------------------------------------------------------------------*/
+igot = read_vt(pusb_device, 0x000E);
+if (0 > igot) {
+	SAY("ERROR: failed to read VT1612A register 0x0E\n");
+	mute = 0x0000;
+} else
+	mute = 0x8000 & ((unsigned int)igot);
+mute = 0;
 
+if (16 > loud)
+	u8 = 0x01 | (0x001F & (((__u8)(15 - loud)) << 1));
+else
+	u8 = 0;
+
+JOT(8, "0x%04X=(mute|u8) for VT1612A register 0x0E\n", mute | u8);
+write_vt(pusb_device, 0x000E, (mute | u8));
+/*---------------------------------------------------------------------------*/
+igot = read_vt(pusb_device, 0x0010);
+if (0 > igot) {
+	SAY("ERROR: failed to read VT1612A register 0x10\n");
+	mute = 0x0000;
+} else
+	mute = 0x8000 & ((unsigned int)igot);
+mute = 0;
+
+JOT(8, "0x%04X=(mute|u8|(u8<<8)) for VT1612A register 0x10,...0x18\n", \
+							mute | u8 | (u8 << 8));
+write_vt(pusb_device, 0x0010, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0012, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0014, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0016, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x0018, (mute | u8 | (u8 << 8)));
+/*---------------------------------------------------------------------------*/
 igot = read_vt(pusb_device, 0x001C);
 if (0 > igot) {
 	SAY("ERROR: failed to read VT1612A register 0x1C\n");
 	mute = 0x0000;
 } else
 	mute = 0x8000 & ((unsigned int)igot);
+mute = 0;
 
-JOT(8, "0x%04X=(mute|u8|(u8<<8))\n", mute | u8 | (u8 << 8));
+if (16 <= loud)
+	u8 = 0x000F & (__u8)(loud - 16);
+else
+	u8 = 0;
 
-write_vt(pusb_device, 0x001C, 0x8000);
+JOT(8, "0x%04X=(mute|u8|(u8<<8)) for VT1612A register 0x1C\n", \
+							mute | u8 | (u8 << 8));
 write_vt(pusb_device, 0x001C, (mute | u8 | (u8 << 8)));
+write_vt(pusb_device, 0x001A, 0x0404);
 write_vt(pusb_device, 0x0002, 0x0000);
-
 return 0;
 }
 /*****************************************************************************/
@@ -1007,35 +1238,11 @@
 {
 int igot;
 
+if (NULL == pusb_device)
+	return -ENODEV;
 igot = read_vt(pusb_device, 0x001C);
 if (0 > igot)
 	SAY("ERROR: failed to read VT1612A register 0x1C\n");
 return igot;
 }
 /*****************************************************************************/
-int
-set2to78(struct usb_device *p)
-{
-int ir;
-
-msleep(20);
-ir = regset(p, 0x0002, 0x0078);
-if (0 > ir)
-	SAY("ERROR: failed to set register 0x0002 to 0x0078\n");
-msleep(20);
-return ir;
-}
-/*****************************************************************************/
-int
-set2to93(struct usb_device *p)
-{
-int ir;
-
-msleep(20);
-ir = regset(p, 0x0002, 0x0093);
-if (0 > ir)
-	SAY("ERROR: failed to set register 0x0002 to 0x0078\n");
-msleep(20);
-return ir;
-}
-/*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_main.c b/drivers/staging/easycap/easycap_main.c
index 5a4bbd9..acc1f56 100644
--- a/drivers/staging/easycap/easycap_main.c
+++ b/drivers/staging/easycap/easycap_main.c
@@ -30,9 +30,29 @@
 
 #include "easycap.h"
 #include "easycap_standard.h"
+#include "easycap_ioctl.h"
 
-int easycap_debug;
-module_param(easycap_debug, int, S_IRUGO | S_IWUSR);
+static int easycap_debug;
+static int easycap_bars;
+int easycap_gain = 16;
+module_param_named(debug, easycap_debug, int, S_IRUGO | S_IWUSR);
+module_param_named(bars, easycap_bars, int, S_IRUGO | S_IWUSR);
+module_param_named(gain, easycap_gain, int, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+/*
+ *  dongle_this IS INDISPENSIBLY static BECAUSE FUNCTION easycap_usb_probe()
+ *  IS CALLED SUCCESSIVELY FOR INTERFACES 0, 1, 2 AND THE POINTER peasycap
+ *  ALLOCATED DURING THE PROBING OF INTERFACE 0 MUST BE REMEMBERED WHEN
+ *  PROBING INTERFACES 1 AND 2.
+ *
+ *  IOCTL LOCKING IS DONE AT MODULE LEVEL, NOT DEVICE LEVEL.
+*/
+/*---------------------------------------------------------------------------*/
+
+struct easycap_dongle easycap_dongle[DONGLE_MANY];
+static int dongle_this;
+static int dongle_done;
 
 /*---------------------------------------------------------------------------*/
 /*
@@ -63,22 +83,25 @@
 	.owner		= THIS_MODULE,
 	.open		= easycap_open,
 	.release	= easycap_release,
-	.unlocked_ioctl	= easycap_ioctl,
+#if defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)
+	.unlocked_ioctl	= easycap_ioctl_noinode,
+#else
+	.ioctl		= easycap_ioctl,
+#endif /*EASYCAP_NEEDS_UNLOCKED_IOCTL*/
 	.poll		= easycap_poll,
 	.mmap		= easycap_mmap,
 	.llseek		= no_llseek,
 };
 struct vm_operations_struct easycap_vm_ops = {
-.open  = easycap_vma_open,
-.close = easycap_vma_close,
-.fault = easycap_vma_fault,
+	.open  = easycap_vma_open,
+	.close = easycap_vma_close,
+	.fault = easycap_vma_fault,
 };
 struct usb_class_driver easycap_class = {
-.name = "usb/easycap%d",
-.fops = &easycap_fops,
-.minor_base = USB_SKEL_MINOR_BASE,
+	.name = "usb/easycap%d",
+	.fops = &easycap_fops,
+	.minor_base = USB_SKEL_MINOR_BASE,
 };
-
 /*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #if defined(EASYCAP_IS_VIDEODEV_CLIENT)
 #if defined(EASYCAP_NEEDS_V4L2_FOPS)
@@ -86,16 +109,17 @@
 	.owner		= THIS_MODULE,
 	.open		= easycap_open_noinode,
 	.release	= easycap_release_noinode,
-	.unlocked_ioctl	= easycap_ioctl,
+#if defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)
+	.unlocked_ioctl	= easycap_ioctl_noinode,
+#else
+	.ioctl		= easycap_ioctl,
+#endif /*EASYCAP_NEEDS_UNLOCKED_IOCTL*/
 	.poll		= easycap_poll,
 	.mmap		= easycap_mmap,
 };
 #endif /*EASYCAP_NEEDS_V4L2_FOPS*/
-int video_device_many /*=0*/;
-struct video_device *pvideo_array[VIDEO_DEVICE_MANY], *pvideo_device;
 #endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
 /*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-
 /*--------------------------------------------------------------------------*/
 /*
  *  PARAMETERS USED WHEN REGISTERING THE AUDIO INTERFACE
@@ -105,7 +129,11 @@
 	.owner		= THIS_MODULE,
 	.open		= easysnd_open,
 	.release	= easysnd_release,
-	.unlocked_ioctl	= easysnd_ioctl,
+#if defined(EASYCAP_NEEDS_UNLOCKED_IOCTL)
+	.unlocked_ioctl	= easysnd_ioctl_noinode,
+#else
+	.ioctl		= easysnd_ioctl,
+#endif /*EASYCAP_NEEDS_UNLOCKED_IOCTL*/
 	.read		= easysnd_read,
 	.llseek		= no_llseek,
 };
@@ -115,17 +143,26 @@
 .minor_base = USB_SKEL_MINOR_BASE,
 };
 /****************************************************************************/
-/*--------------------------------------------------------------------------*/
+/*---------------------------------------------------------------------------*/
 /*
- *  IT IS NOT APPROPRIATE FOR easycap_open() TO SUBMIT THE VIDEO URBS HERE,
- *  BECAUSE THERE WILL ALWAYS BE SUBSEQUENT NEGOTIATION OF TV STANDARD AND
- *  FORMAT BY IOCTL AND IT IS INADVISABLE TO HAVE THE URBS RUNNING WHILE
- *  REGISTERS OF THE SA7113H ARE BEING MANIPULATED.
- *
- *  THE SUBMISSION OF VIDEO URBS IS THEREFORE DELAYED UNTIL THE IOCTL COMMAND
- *  STREAMON IS RECEIVED.
- */
-/*--------------------------------------------------------------------------*/
+ *  THIS ROUTINE DOES NOT DETECT DUPLICATE OCCURRENCES OF POINTER peasycap
+*/
+/*---------------------------------------------------------------------------*/
+int
+isdongle(struct easycap *peasycap)
+{
+int k;
+if (NULL == peasycap)
+	return -2;
+for (k = 0; k < DONGLE_MANY; k++) {
+	if (easycap_dongle[k].peasycap == peasycap) {
+		peasycap->isdongle = k;
+		return k;
+	}
+}
+return -1;
+}
+/*****************************************************************************/
 /*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #if defined(EASYCAP_IS_VIDEODEV_CLIENT)
 int
@@ -140,15 +177,17 @@
 {
 #if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
 struct usb_interface *pusb_interface;
+#else
+struct video_device *pvideo_device;
 #endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-struct usb_device *p;
 struct easycap *peasycap;
-int i, k, m, rc;
+int rc;
 
 JOT(4, "\n");
 SAY("==========OPEN=========\n");
 
 peasycap = (struct easycap *)NULL;
+/*---------------------------------------------------------------------------*/
 #if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
 if ((struct inode *)NULL == inode) {
 	SAY("ERROR: inode is NULL.\n");
@@ -162,161 +201,427 @@
 peasycap = usb_get_intfdata(pusb_interface);
 /*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #else
-for (i = 0;  i < video_device_many;  i++) {
-	pvideo_device = pvideo_array[i];
-	if ((struct video_device *)NULL != pvideo_device) {
-		peasycap = (struct easycap *)video_get_drvdata(pvideo_device);
-		break;
-	}
-}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-if ((struct easycap *)NULL == peasycap) {
-	SAY("MISTAKE: peasycap is NULL\n");
+pvideo_device = video_devdata(file);
+if ((struct video_device *)NULL == pvideo_device) {
+	SAY("ERROR: pvideo_device is NULL.\n");
 	return -EFAULT;
 }
-file->private_data = peasycap;
-/*---------------------------------------------------------------------------*/
-/*
- *  INITIALIZATION
- */
-/*---------------------------------------------------------------------------*/
-JOT(4, "starting initialization\n");
-
-for (k = 0;  k < FRAME_BUFFER_MANY;  k++) {
-	for (m = 0;  m < FRAME_BUFFER_SIZE/PAGE_SIZE;  m++)
-		memset(peasycap->frame_buffer[k][m].pgo, 0, PAGE_SIZE);
+peasycap = (struct easycap *)video_get_drvdata(pvideo_device);
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
 }
-p = peasycap->pusb_device;
-if ((struct usb_device *)NULL == p) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return -EFAULT;
+}
+if (NULL == peasycap->pusb_device) {
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 } else {
-	JOT(16, "0x%08lX=peasycap->pusb_device\n", \
+	JOM(16, "0x%08lX=peasycap->pusb_device\n", \
 					(long int)peasycap->pusb_device);
 }
+file->private_data = peasycap;
 rc = wakeup_device(peasycap->pusb_device);
 if (0 == rc)
-	JOT(8, "wakeup_device() OK\n");
+	JOM(8, "wakeup_device() OK\n");
 else {
-	SAY("ERROR: wakeup_device() returned %i\n", rc);
+	SAM("ERROR: wakeup_device() returned %i\n", rc);
+	if (-ENODEV == rc)
+		SAM("ERROR: wakeup_device() returned -ENODEV\n");
+	else
+		SAM("ERROR: wakeup_device() returned %i\n", rc);
+	return rc;
+}
+peasycap->input = 0;
+rc = reset(peasycap);
+if (0 != rc) {
+	SAM("ERROR: reset() returned %i\n", rc);
 	return -EFAULT;
 }
-rc = setup_stk(p);  peasycap->input = 0;
-if (0 == rc)
-	JOT(8, "setup_stk() OK\n");
-else {
-	SAY("ERROR: setup_stk() returned %i\n", rc);
-	return -EFAULT;
+return 0;
 }
-rc = setup_saa(p);
-if (0 == rc)
-	JOT(8, "setup_saa() OK\n");
-else {
-	SAY("ERROR: setup_saa() returned %i\n", rc);
-	return -EFAULT;
-}
-rc = check_saa(p);
-if (0 == rc)
-	JOT(8, "check_saa() OK\n");
-else if (-8 < rc)
-	SAY("check_saa() returned %i\n", rc);
-else {
-	SAY("ERROR: check_saa() returned %i\n", rc);
-	return -EFAULT;
-}
-peasycap->standard_offset = -1;
+/*****************************************************************************/
 /*---------------------------------------------------------------------------*/
-#if defined(PREFER_NTSC)
-
-rc = adjust_standard(peasycap, V4L2_STD_NTSC_M);
-if (0 == rc)
-	JOT(8, "adjust_standard(.,NTSC_M) OK\n");
-else {
-	SAY("ERROR: adjust_standard(.,NTSC_M) returned %i\n", rc);
-	return -EFAULT;
-}
-rc = adjust_format(peasycap, 640, 480, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE, \
-									false);
-if (0 <= rc)
-	JOT(8, "adjust_format(.,640,480,UYVY) OK\n");
-else {
-	SAY("ERROR: adjust_format(.,640,480,UYVY) returned %i\n", rc);
-	return -EFAULT;
-}
-
-#else
-
-rc = adjust_standard(peasycap, \
-		(V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
-		V4L2_STD_PAL_I | V4L2_STD_PAL_N));
-if (0 == rc)
-	JOT(8, "adjust_standard(.,PAL_BGHIN) OK\n");
-else {
-	SAY("ERROR: adjust_standard(.,PAL_BGHIN) returned %i\n", rc);
-	return -EFAULT;
-}
-rc = adjust_format(peasycap, 640, 480, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE, \
-									false);
-if (0 <= rc)
-	JOT(8, "adjust_format(.,640,480,uyvy,false) OK\n");
-else {
-	SAY("ERROR: adjust_format(.,640,480,uyvy,false) returned %i\n", rc);
-	return -EFAULT;
-}
-
-#endif /* !PREFER_NTSC*/
+/*
+ *  RESET THE HARDWARE TO ITS REFERENCE STATE.
+ *
+ *  THIS ROUTINE MAY BE CALLED REPEATEDLY IF easycap_complete() DETECTS
+ *  A BAD VIDEO FRAME SIZE.
+*/
 /*---------------------------------------------------------------------------*/
-rc = adjust_brightness(peasycap, -8192);
-if (0 != rc) {
-	SAY("ERROR: adjust_brightness(default) returned %i\n", rc);
+int
+reset(struct easycap *peasycap)
+{
+struct easycap_standard const *peasycap_standard;
+int i, rc, input, rate;
+bool ntsc, other;
+
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
 	return -EFAULT;
 }
-rc = adjust_contrast(peasycap, -8192);
-if (0 != rc) {
-	SAY("ERROR: adjust_contrast(default) returned %i\n", rc);
-	return -EFAULT;
-}
-rc = adjust_saturation(peasycap, -8192);
-if (0 != rc) {
-	SAY("ERROR: adjust_saturation(default) returned %i\n", rc);
-	return -EFAULT;
-}
-rc = adjust_hue(peasycap, -8192);
-if (0 != rc) {
-	SAY("ERROR: adjust_hue(default) returned %i\n", rc);
-	return -EFAULT;
-}
+input = peasycap->input;
+
 /*---------------------------------------------------------------------------*/
-rc = usb_set_interface(peasycap->pusb_device, peasycap->video_interface, \
-						peasycap->video_altsetting_on);
+/*
+ *  IF THE SAA7113H HAS ALREADY ACQUIRED SYNC, USE ITS HARDWARE-DETECTED
+ *  FIELD FREQUENCY TO DISTINGUISH NTSC FROM PAL.  THIS IS ESSENTIAL FOR
+ *  gstreamer AND OTHER USERSPACE PROGRAMS WHICH MAY NOT ATTEMPT TO INITIATE
+ *  A SWITCH BETWEEN PAL AND NTSC.
+ *
+ *  FUNCTION ready_saa() MAY REQUIRE A SUBSTANTIAL FRACTION OF A SECOND TO
+ *  COMPLETE, SO SHOULD NOT BE INVOKED WITHOUT GOOD REASON.
+*/
+/*---------------------------------------------------------------------------*/
+other = false;
+if (true == peasycap->ntsc)
+	JOM(8, "true=peasycap->ntsc\n");
+else
+	JOM(8, "false=peasycap->ntsc\n");
+rate = ready_saa(peasycap->pusb_device);
+if (0 > rate) {
+	JOM(8, "not ready to capture after %i ms ...\n", PATIENCE);
+	if (true == peasycap->ntsc) {
+		JOM(8, "... trying PAL ...\n");  ntsc = false;
+	} else {
+		JOM(8, "... trying NTSC ...\n"); ntsc = true;
+}
+rc = setup_stk(peasycap->pusb_device, ntsc);
 if (0 == rc)
-	JOT(8, "usb_set_interface(.,%i,%i) OK\n", peasycap->video_interface, \
-						peasycap->video_altsetting_on);
+	JOM(4, "setup_stk() OK\n");
 else {
-	SAY("ERROR: usb_set_interface() returned %i\n", rc);
+	SAM("ERROR: setup_stk() returned %i\n", rc);
 	return -EFAULT;
 }
-rc = start_100(p);
+rc = setup_saa(peasycap->pusb_device, ntsc);
 if (0 == rc)
-	JOT(8, "start_100() OK\n");
+	JOM(4, "setup_saa() OK\n");
 else {
-	SAY("ERROR: start_100() returned %i\n", rc);
+	SAM("ERROR: setup_saa() returned %i\n", rc);
 	return -EFAULT;
 }
-peasycap->video_isoc_sequence = VIDEO_ISOC_BUFFER_MANY - 1;
-peasycap->video_idle = 0;
-peasycap->video_junk = 0;
+rate = ready_saa(peasycap->pusb_device);
+if (0 > rate) {
+	JOM(8, "not ready to capture after %i ms ...\n", PATIENCE);
+	JOM(8, "... saa register 0x1F has 0x%02X\n", \
+				read_saa(peasycap->pusb_device, 0x1F));
+	ntsc = peasycap->ntsc;
+	} else {
+		JOM(8, "... success at second try:  %i=rate\n", rate);
+		ntsc = (0 < (rate/2)) ? true : false ;
+		other = true;
+	}
+} else {
+	JOM(8, "... success at first try:  %i=rate\n", rate);
+	ntsc = (0 < rate/2) ? true : false ;
+}
+if (true == ntsc)
+	JOM(8, "true=ntsc\n");
+else
+	JOM(8, "false=ntsc\n");
+/*---------------------------------------------------------------------------*/
+
+rc = setup_stk(peasycap->pusb_device, ntsc);
+if (0 == rc)
+	JOM(4, "setup_stk() OK\n");
+else {
+	SAM("ERROR: setup_stk() returned %i\n", rc);
+	return -EFAULT;
+}
+rc = setup_saa(peasycap->pusb_device, ntsc);
+if (0 == rc)
+	JOM(4, "setup_saa() OK\n");
+else {
+	SAM("ERROR: setup_saa() returned %i\n", rc);
+	return -EFAULT;
+}
+
 for (i = 0; i < 180; i++)
 	peasycap->merit[i] = 0;
 peasycap->video_eof = 0;
 peasycap->audio_eof = 0;
-
 do_gettimeofday(&peasycap->timeval7);
+/*---------------------------------------------------------------------------*/
+/*
+ * RESTORE INPUT AND FORCE REFRESH OF STANDARD, FORMAT, ETC.
+ *
+ * WHILE THIS PROCEDURE IS IN PROGRESS, SOME IOCTL COMMANDS WILL RETURN -EBUSY.
+*/
+/*---------------------------------------------------------------------------*/
+peasycap->input = -8192;
+peasycap->standard_offset = -8192;
+if (true == other) {
+	peasycap_standard = &easycap_standard[0];
+	while (0xFFFF != peasycap_standard->mask) {
+		if (true == ntsc) {
+			if (NTSC_M == \
+				peasycap_standard->v4l2_standard.index) {
+				peasycap->inputset[input].standard_offset = \
+						peasycap_standard - \
+							&easycap_standard[0];
+				break;
+			}
+		} else {
+			if (PAL_BGHIN == \
+				peasycap_standard->v4l2_standard.index) {
+				peasycap->inputset[input].standard_offset = \
+						peasycap_standard -
+							&easycap_standard[0];
+				break;
+			}
+		}
+		peasycap_standard++;
+	}
+	if (0xFFFF == peasycap_standard->mask) {
+		SAM("ERROR: standard not found\n");
+		return -EINVAL;
+	}
+JOM(8, "%i=peasycap->inputset[%i].standard_offset\n", \
+		peasycap->inputset[input].standard_offset, input);
+}
+peasycap->format_offset = -8192;
+peasycap->brightness = -8192;
+peasycap->contrast = -8192;
+peasycap->saturation = -8192;
+peasycap->hue = -8192;
 
-peasycap->fudge = 0;
+rc = newinput(peasycap, input);
 
-JOT(4, "finished initialization\n");
+if (0 == rc)
+	JOM(4, "restored input, standard and format\n");
+else {
+	SAM("ERROR: newinput(.,%i) returned %i\n", rc, input);
+	return -EFAULT;
+}
+if (true == peasycap->ntsc)
+	JOM(8, "true=peasycap->ntsc\n");
+else
+	JOM(8, "false=peasycap->ntsc\n");
+
+if (0 > peasycap->input) {
+	SAM("MISTAKE:  %i=peasycap->input\n", peasycap->input);
+	return -ENOENT;
+}
+if (0 > peasycap->standard_offset) {
+	SAM("MISTAKE:  %i=peasycap->standard_offset\n", \
+						peasycap->standard_offset);
+	return -ENOENT;
+}
+if (0 > peasycap->format_offset) {
+	SAM("MISTAKE:  %i=peasycap->format_offset\n", \
+						peasycap->format_offset);
+	return -ENOENT;
+}
+if (0 > peasycap->brightness) {
+	SAM("MISTAKE:  %i=peasycap->brightness\n", peasycap->brightness);
+	return -ENOENT;
+}
+if (0 > peasycap->contrast) {
+	SAM("MISTAKE:  %i=peasycap->contrast\n", peasycap->contrast);
+	return -ENOENT;
+}
+if (0 > peasycap->saturation) {
+	SAM("MISTAKE:  %i=peasycap->saturation\n", peasycap->saturation);
+	return -ENOENT;
+}
+if (0 > peasycap->hue) {
+	SAM("MISTAKE:  %i=peasycap->hue\n", peasycap->hue);
+	return -ENOENT;
+}
+return 0;
+}
+/*****************************************************************************/
+/*---------------------------------------------------------------------------*/
+/*
+ *  IF THE REQUESTED INPUT IS THE SAME AS THE EXISTING INPUT, DO NOTHING.
+ *  OTHERWISE:
+ *      KILL URBS, CLEAR FIELD AND FRAME BUFFERS AND RESET THEIR
+ *           _read AND _fill POINTERS.
+ *      SELECT THE NEW INPUT.
+ *      ADJUST THE STANDARD, FORMAT, BRIGHTNESS, CONTRAST, SATURATION AND HUE
+ *          ON THE BASIS OF INFORMATION IN STRUCTURE easycap.inputset[input].
+ *      RESUBMIT THE URBS IF STREAMING WAS ALREADY IN PROGRESS.
+ *
+ *  NOTE:
+ *      THIS ROUTINE MAY BE CALLED FREQUENTLY BY ZONEMINDER VIA IOCTL,
+ *      SO IT SHOULD WRITE ONLY SPARINGLY TO THE LOGFILE.
+*/
+/*---------------------------------------------------------------------------*/
+int
+newinput(struct easycap *peasycap, int input)
+{
+int rc, k, m, mood, off;
+int inputnow, video_idlenow, audio_idlenow;
+bool resubmit;
+
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
+JOM(8, "%i=input sought\n", input);
+
+if (0 > input && INPUT_MANY <= input)
+	return -ENOENT;
+inputnow = peasycap->input;
+if (input == inputnow)
+	return 0;
+/*---------------------------------------------------------------------------*/
+/*
+ *  IF STREAMING IS IN PROGRESS THE URBS ARE KILLED AT THIS
+ *  STAGE AND WILL BE RESUBMITTED PRIOR TO EXIT FROM THE ROUTINE.
+ *  IF NO STREAMING IS IN PROGRESS NO URBS WILL BE SUBMITTED BY THE
+ *  ROUTINE.
+*/
+/*---------------------------------------------------------------------------*/
+video_idlenow = peasycap->video_idle;
+audio_idlenow = peasycap->audio_idle;
+
+peasycap->video_idle = 1;
+peasycap->audio_idle = 1;
+if (peasycap->video_isoc_streaming) {
+	resubmit = true;
+	kill_video_urbs(peasycap);
+} else
+	resubmit = false;
+/*---------------------------------------------------------------------------*/
+if (NULL == peasycap->pusb_device) {
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
+	return -ENODEV;
+}
+rc = usb_set_interface(peasycap->pusb_device,
+			peasycap->video_interface, \
+			peasycap->video_altsetting_off);
+if (0 != rc) {
+	SAM("ERROR: usb_set_interface() returned %i\n", rc);
+	return -EFAULT;
+}
+rc = stop_100(peasycap->pusb_device);
+if (0 != rc) {
+	SAM("ERROR: stop_100() returned %i\n", rc);
+	return -EFAULT;
+}
+for (k = 0; k < FIELD_BUFFER_MANY; k++) {
+	for (m = 0; m < FIELD_BUFFER_SIZE/PAGE_SIZE; m++)
+		memset(peasycap->field_buffer[k][m].pgo, 0, PAGE_SIZE);
+}
+for (k = 0; k < FRAME_BUFFER_MANY; k++) {
+	for (m = 0; m < FRAME_BUFFER_SIZE/PAGE_SIZE; m++)
+		memset(peasycap->frame_buffer[k][m].pgo, 0, PAGE_SIZE);
+}
+peasycap->field_page = 0;
+peasycap->field_read = 0;
+peasycap->field_fill = 0;
+
+peasycap->frame_read = 0;
+peasycap->frame_fill = 0;
+for (k = 0; k < peasycap->input; k++) {
+	(peasycap->frame_fill)++;
+	if (peasycap->frame_buffer_many <= peasycap->frame_fill)
+		peasycap->frame_fill = 0;
+}
+peasycap->input = input;
+select_input(peasycap->pusb_device, peasycap->input, 9);
+/*---------------------------------------------------------------------------*/
+if (input == peasycap->inputset[input].input) {
+	off = peasycap->inputset[input].standard_offset;
+	if (off != peasycap->standard_offset) {
+		rc = adjust_standard(peasycap, \
+				easycap_standard[off].v4l2_standard.id);
+		if (0 != rc) {
+			SAM("ERROR: adjust_standard() returned %i\n", rc);
+			return -EFAULT;
+		}
+		JOM(8, "%i=peasycap->standard_offset\n", \
+						peasycap->standard_offset);
+	} else {
+		JOM(8, "%i=peasycap->standard_offset unchanged\n", \
+						peasycap->standard_offset);
+	}
+	off = peasycap->inputset[input].format_offset;
+	if (off != peasycap->format_offset) {
+		rc = adjust_format(peasycap, \
+			easycap_format[off].v4l2_format.fmt.pix.width, \
+			easycap_format[off].v4l2_format.fmt.pix.height, \
+			easycap_format[off].v4l2_format.fmt.pix.pixelformat, \
+			easycap_format[off].v4l2_format.fmt.pix.field, false);
+		if (0 > rc) {
+			SAM("ERROR: adjust_format() returned %i\n", rc);
+			return -EFAULT;
+		}
+		JOM(8, "%i=peasycap->format_offset\n", peasycap->format_offset);
+	} else {
+		JOM(8, "%i=peasycap->format_offset unchanged\n", \
+						peasycap->format_offset);
+	}
+	mood = peasycap->inputset[input].brightness;
+	if (mood != peasycap->brightness) {
+		rc = adjust_brightness(peasycap, mood);
+		if (0 != rc) {
+			SAM("ERROR: adjust_brightness returned %i\n", rc);
+			return -EFAULT;
+		}
+		JOM(8, "%i=peasycap->brightness\n", peasycap->brightness);
+	}
+	mood = peasycap->inputset[input].contrast;
+	if (mood != peasycap->contrast) {
+		rc = adjust_contrast(peasycap, mood);
+		if (0 != rc) {
+			SAM("ERROR: adjust_contrast returned %i\n", rc);
+			return -EFAULT;
+		}
+		JOM(8, "%i=peasycap->contrast\n", peasycap->contrast);
+	}
+	mood = peasycap->inputset[input].saturation;
+	if (mood != peasycap->saturation) {
+		rc = adjust_saturation(peasycap, mood);
+		if (0 != rc) {
+			SAM("ERROR: adjust_saturation returned %i\n", rc);
+			return -EFAULT;
+		}
+		JOM(8, "%i=peasycap->saturation\n", peasycap->saturation);
+	}
+	mood = peasycap->inputset[input].hue;
+	if (mood != peasycap->hue) {
+		rc = adjust_hue(peasycap, mood);
+		if (0 != rc) {
+			SAM("ERROR: adjust_hue returned %i\n", rc);
+			return -EFAULT;
+		}
+		JOM(8, "%i=peasycap->hue\n", peasycap->hue);
+	}
+} else {
+	SAM("MISTAKE: easycap.inputset[%i] unpopulated\n", input);
+	return -ENOENT;
+}
+/*---------------------------------------------------------------------------*/
+if (NULL == peasycap->pusb_device) {
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
+	return -ENODEV;
+}
+rc = usb_set_interface(peasycap->pusb_device,
+			peasycap->video_interface, \
+			peasycap->video_altsetting_on);
+if (0 != rc) {
+	SAM("ERROR: usb_set_interface() returned %i\n", rc);
+	return -EFAULT;
+}
+rc = start_100(peasycap->pusb_device);
+if (0 != rc) {
+	SAM("ERROR: start_100() returned %i\n", rc);
+	return -EFAULT;
+}
+if (true == resubmit)
+	submit_video_urbs(peasycap);
+
+peasycap->video_isoc_sequence = VIDEO_ISOC_BUFFER_MANY - 1;
+peasycap->video_idle = video_idlenow;
+peasycap->audio_idle = audio_idlenow;
+peasycap->video_junk = 0;
+
 return 0;
 }
 /*****************************************************************************/
@@ -326,33 +631,25 @@
 struct data_urb *pdata_urb;
 struct urb *purb;
 struct list_head *plist_head;
-int j, isbad, m, rc;
+int j, isbad, nospc, m, rc;
 int isbuf;
 
-if ((struct list_head *)NULL == peasycap->purb_video_head) {
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
+
+if (NULL == peasycap->purb_video_head) {
 	SAY("ERROR: peasycap->urb_video_head uninitialized\n");
 	return -EFAULT;
 }
-if ((struct usb_device *)NULL == peasycap->pusb_device) {
+if (NULL == peasycap->pusb_device) {
 	SAY("ERROR: peasycap->pusb_device is NULL\n");
-	return -EFAULT;
+	return -ENODEV;
 }
 if (!peasycap->video_isoc_streaming) {
-
-
-
-
-
-
-
-
-	JOT(4, "submission of all video urbs\n");
-	if (0 != ready_saa(peasycap->pusb_device)) {
-		SAY("ERROR: not ready to capture after waiting " \
-							"one second\n");
-		SAY(".....  continuing anyway\n");
-	}
-	isbad = 0;  m = 0;
+	JOM(4, "submission of all video urbs\n");
+	isbad = 0;  nospc = 0;  m = 0;
 	list_for_each(plist_head, (peasycap->purb_video_head)) {
 		pdata_urb = list_entry(plist_head, struct data_urb, list_head);
 		if (NULL != pdata_urb) {
@@ -389,44 +686,57 @@
 				rc = usb_submit_urb(purb, GFP_KERNEL);
 				if (0 != rc) {
 					isbad++;
-					SAY("ERROR: usb_submit_urb() failed " \
+					SAM("ERROR: usb_submit_urb() failed " \
 							"for urb with rc:\n");
 					switch (rc) {
 					case -ENOMEM: {
-						SAY("ENOMEM\n");
+						SAM("ERROR: -ENOMEM=" \
+							"usb_submit_urb()\n");
 						break;
 					}
 					case -ENODEV: {
-						SAY("ENODEV\n");
+						SAM("ERROR: -ENODEV=" \
+							"usb_submit_urb()\n");
 						break;
 					}
 					case -ENXIO: {
-						SAY("ENXIO\n");
+						SAM("ERROR: -ENXIO=" \
+							"usb_submit_urb()\n");
 						break;
 					}
 					case -EINVAL: {
-						SAY("EINVAL\n");
+						SAM("ERROR: -EINVAL=" \
+							"usb_submit_urb()\n");
 						break;
 					}
 					case -EAGAIN: {
-						SAY("EAGAIN\n");
+						SAM("ERROR: -EAGAIN=" \
+							"usb_submit_urb()\n");
 						break;
 					}
 					case -EFBIG: {
-						SAY("EFBIG\n");
+						SAM("ERROR: -EFBIG=" \
+							"usb_submit_urb()\n");
 						break;
 					}
 					case -EPIPE: {
-						SAY("EPIPE\n");
+						SAM("ERROR: -EPIPE=" \
+							"usb_submit_urb()\n");
 						break;
 					}
 					case -EMSGSIZE: {
-						SAY("EMSGSIZE\n");
+						SAM("ERROR: -EMSGSIZE=" \
+							"usb_submit_urb()\n");
+						break;
+					}
+					case -ENOSPC: {
+						nospc++;
 						break;
 					}
 					default: {
-						SAY("unknown error code %i\n",\
-									 rc);
+						SAM("ERROR: %i=" \
+							"usb_submit_urb()\n",\
+							rc);
 						break;
 					}
 					}
@@ -434,14 +744,20 @@
 					m++;
 				}
 				} else {
-					isbad++;
+					 isbad++;
 				}
 			} else {
 				 isbad++;
 			}
 		}
+	if (nospc) {
+		SAM("-ENOSPC=usb_submit_urb() for %i urbs\n", nospc);
+		SAM(".....  possibly inadequate USB bandwidth\n");
+		peasycap->video_eof = 1;
+	}
+
 	if (isbad) {
-		JOT(4, "attempting cleanup instead of submitting\n");
+		JOM(4, "attempting cleanup instead of submitting\n");
 		list_for_each(plist_head, (peasycap->purb_video_head)) {
 			pdata_urb = list_entry(plist_head, struct data_urb, \
 								list_head);
@@ -454,16 +770,10 @@
 		peasycap->video_isoc_streaming = 0;
 	} else {
 		peasycap->video_isoc_streaming = 1;
-		JOT(4, "submitted %i video urbs\n", m);
+		JOM(4, "submitted %i video urbs\n", m);
 	}
-
-
-
-
-
-
 } else {
-	JOT(4, "already streaming video urbs\n");
+	JOM(4, "already streaming video urbs\n");
 }
 return 0;
 }
@@ -475,35 +785,32 @@
 struct list_head *plist_head;
 struct data_urb *pdata_urb;
 
-if ((struct easycap *)NULL == peasycap) {
+if (NULL == peasycap) {
 	SAY("ERROR: peasycap is NULL\n");
 	return -EFAULT;
 }
 if (peasycap->video_isoc_streaming) {
-
-
-
 	if ((struct list_head *)NULL != peasycap->purb_video_head) {
 		peasycap->video_isoc_streaming = 0;
-		JOT(4, "killing video urbs\n");
+		JOM(4, "killing video urbs\n");
 		m = 0;
 		list_for_each(plist_head, (peasycap->purb_video_head)) {
 			pdata_urb = list_entry(plist_head, struct data_urb, \
 								list_head);
-			if ((struct data_urb *)NULL != pdata_urb) {
-				if ((struct urb *)NULL != pdata_urb->purb) {
+			if (NULL != pdata_urb) {
+				if (NULL != pdata_urb->purb) {
 					usb_kill_urb(pdata_urb->purb);
 					m++;
 				}
 			}
 		}
-		JOT(4, "%i video urbs killed\n", m);
+		JOM(4, "%i video urbs killed\n", m);
 	} else {
-		SAY("ERROR: peasycap->purb_video_head is NULL\n");
+		SAM("ERROR: peasycap->purb_video_head is NULL\n");
 		return -EFAULT;
 	}
 } else {
-	JOT(8, "%i=video_isoc_streaming, no video urbs killed\n", \
+	JOM(8, "%i=video_isoc_streaming, no video urbs killed\n", \
 					peasycap->video_isoc_streaming);
 }
 return 0;
@@ -533,11 +840,15 @@
 	SAY("ending unsuccessfully\n");
 	return -EFAULT;
 }
-if (0 != kill_video_urbs(peasycap)) {
-	SAY("ERROR: kill_video_urbs() failed\n");
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
 	return -EFAULT;
 }
-JOT(4, "ending successfully\n");
+if (0 != kill_video_urbs(peasycap)) {
+	SAM("ERROR: kill_video_urbs() failed\n");
+	return -EFAULT;
+}
+JOM(4, "ending successfully\n");
 /*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #else
 #
@@ -550,63 +861,45 @@
 /*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #if defined(EASYCAP_IS_VIDEODEV_CLIENT)
 int
-videodev_release(struct video_device *pvd)
+videodev_release(struct video_device *pvideo_device)
 {
 struct easycap *peasycap;
-int i, j, k;
 
 JOT(4, "\n");
 
-k = 0;
-for (i = 0;  i < video_device_many;  i++) {
-	pvideo_device = pvideo_array[i];
-	if ((struct video_device *)NULL != pvideo_device) {
-		if (pvd->minor == pvideo_device->minor) {
-			peasycap = (struct easycap *)\
-					video_get_drvdata(pvideo_device);
-			if ((struct easycap *)NULL == peasycap) {
-				SAY("ERROR:  peasycap is NULL\n");
-				SAY("ending unsuccessfully\n");
-				return -EFAULT;
-			}
-			if (0 != kill_video_urbs(peasycap)) {
-				SAY("ERROR: kill_video_urbs() failed\n");
-				return -EFAULT;
-			}
-			JOT(4, "freeing video_device structure: " \
-							"/dev/video%i\n", i);
-			kfree((void *)pvideo_device);
-			for (j = i;  j < (VIDEO_DEVICE_MANY - 1);  j++)
-				pvideo_array[j] = pvideo_array[j + 1];
-			video_device_many--;  k++;
-			break;
-		}
-	}
-}
-if (!k) {
-	SAY("ERROR: lost video_device structure for %i=minor\n", pvd->minor);
-	SAY("cannot free: may cause memory leak\n");
+peasycap = video_get_drvdata(pvideo_device);
+if (NULL == peasycap) {
+	SAY("ERROR:  peasycap is NULL\n");
 	SAY("ending unsuccessfully\n");
 	return -EFAULT;
 }
-
-JOT(4, "ending successfully\n");
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return -EFAULT;
+}
+if (0 != kill_video_urbs(peasycap)) {
+	SAM("ERROR: kill_video_urbs() failed\n");
+	return -EFAULT;
+}
+JOM(4, "ending successfully\n");
 return 0;
 }
 #endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-/****************************************************************************/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*****************************************************************************/
 /*--------------------------------------------------------------------------*/
 /*
- *  THIS FUNCTION IS CALLED FROM WITHIN easycap_usb_disconnect().
- *  BY THIS STAGE THE DEVICE HAS ALREADY BEEN PHYSICALLY UNPLUGGED.
- *  peasycap->pusb_device IS NO LONGER VALID AND SHOULD HAVE BEEN SET TO NULL.
+ *  THIS FUNCTION IS CALLED FROM WITHIN easycap_usb_disconnect() AND IS
+ *  PROTECTED BY SEMAPHORES SET AND CLEARED BY easycap_usb_disconnect().
+ *
+ *  BY THIS STAGE THE DEVICE HAS ALREADY BEEN PHYSICALLY UNPLUGGED, SO
+ *  peasycap->pusb_device IS NO LONGER VALID.
  */
 /*---------------------------------------------------------------------------*/
 void
 easycap_delete(struct kref *pkref)
 {
-int k, m, lost;
+int k, m, gone, kd;
 int allocation_video_urb, allocation_video_page, allocation_video_struct;
 int allocation_audio_urb, allocation_audio_page, allocation_audio_struct;
 int registered_video, registered_audio;
@@ -617,22 +910,27 @@
 JOT(4, "\n");
 
 peasycap = container_of(pkref, struct easycap, kref);
-if ((struct easycap *)NULL == peasycap) {
-	SAY("ERROR: peasycap is NULL: cannot perform deletions\n");
+if (NULL == peasycap) {
+	SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
 	return;
 }
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return;
+}
+kd = isdongle(peasycap);
 /*---------------------------------------------------------------------------*/
 /*
  *  FREE VIDEO.
  */
 /*---------------------------------------------------------------------------*/
 if ((struct list_head *)NULL != peasycap->purb_video_head) {
-	JOT(4, "freeing video urbs\n");
+	JOM(4, "freeing video urbs\n");
 	m = 0;
 	list_for_each(plist_head, (peasycap->purb_video_head)) {
 		pdata_urb = list_entry(plist_head, struct data_urb, list_head);
 		if (NULL == pdata_urb)
-			JOT(4, "ERROR: pdata_urb is NULL\n");
+			JOM(4, "ERROR: pdata_urb is NULL\n");
 		else {
 			if ((struct urb *)NULL != pdata_urb->purb) {
 				usb_free_urb(pdata_urb->purb);
@@ -643,9 +941,9 @@
 		}
 	}
 
-	JOT(4, "%i video urbs freed\n", m);
+	JOM(4, "%i video urbs freed\n", m);
 /*---------------------------------------------------------------------------*/
-	JOT(4, "freeing video data_urb structures.\n");
+	JOM(4, "freeing video data_urb structures.\n");
 	m = 0;
 	list_for_each_safe(plist_head, plist_next, peasycap->purb_video_head) {
 		pdata_urb = list_entry(plist_head, struct data_urb, list_head);
@@ -656,14 +954,12 @@
 			m++;
 		}
 	}
-	JOT(4, "%i video data_urb structures freed\n", m);
-	JOT(4, "setting peasycap->purb_video_head=NULL\n");
+	JOM(4, "%i video data_urb structures freed\n", m);
+	JOM(4, "setting peasycap->purb_video_head=NULL\n");
 	peasycap->purb_video_head = (struct list_head *)NULL;
-	} else {
-JOT(4, "peasycap->purb_video_head is NULL\n");
 }
 /*---------------------------------------------------------------------------*/
-JOT(4, "freeing video isoc buffers.\n");
+JOM(4, "freeing video isoc buffers.\n");
 m = 0;
 for (k = 0;  k < VIDEO_ISOC_BUFFER_MANY;  k++) {
 	if ((void *)NULL != peasycap->video_isoc_buffer[k].pgo) {
@@ -676,10 +972,10 @@
 		m++;
 	}
 }
-JOT(4, "isoc video buffers freed: %i pages\n", m * (0x01 << VIDEO_ISOC_ORDER));
+JOM(4, "isoc video buffers freed: %i pages\n", m * (0x01 << VIDEO_ISOC_ORDER));
 /*---------------------------------------------------------------------------*/
-JOT(4, "freeing video field buffers.\n");
-lost = 0;
+JOM(4, "freeing video field buffers.\n");
+gone = 0;
 for (k = 0;  k < FIELD_BUFFER_MANY;  k++) {
 	for (m = 0;  m < FIELD_BUFFER_SIZE/PAGE_SIZE;  m++) {
 		if ((void *)NULL != peasycap->field_buffer[k][m].pgo) {
@@ -687,14 +983,14 @@
 					(peasycap->field_buffer[k][m].pgo));
 			peasycap->field_buffer[k][m].pgo = (void *)NULL;
 			peasycap->allocation_video_page -= 1;
-			lost++;
+			gone++;
 		}
 	}
 }
-JOT(4, "video field buffers freed: %i pages\n", lost);
+JOM(4, "video field buffers freed: %i pages\n", gone);
 /*---------------------------------------------------------------------------*/
-JOT(4, "freeing video frame buffers.\n");
-lost = 0;
+JOM(4, "freeing video frame buffers.\n");
+gone = 0;
 for (k = 0;  k < FRAME_BUFFER_MANY;  k++) {
 	for (m = 0;  m < FRAME_BUFFER_SIZE/PAGE_SIZE;  m++) {
 		if ((void *)NULL != peasycap->frame_buffer[k][m].pgo) {
@@ -702,23 +998,23 @@
 					(peasycap->frame_buffer[k][m].pgo));
 			peasycap->frame_buffer[k][m].pgo = (void *)NULL;
 			peasycap->allocation_video_page -= 1;
-			lost++;
+			gone++;
 		}
 	}
 }
-JOT(4, "video frame buffers freed: %i pages\n", lost);
+JOM(4, "video frame buffers freed: %i pages\n", gone);
 /*---------------------------------------------------------------------------*/
 /*
  *  FREE AUDIO.
  */
 /*---------------------------------------------------------------------------*/
 if ((struct list_head *)NULL != peasycap->purb_audio_head) {
-	JOT(4, "freeing audio urbs\n");
+	JOM(4, "freeing audio urbs\n");
 	m = 0;
 	list_for_each(plist_head, (peasycap->purb_audio_head)) {
 		pdata_urb = list_entry(plist_head, struct data_urb, list_head);
 		if (NULL == pdata_urb)
-			JOT(4, "ERROR: pdata_urb is NULL\n");
+			JOM(4, "ERROR: pdata_urb is NULL\n");
 		else {
 			if ((struct urb *)NULL != pdata_urb->purb) {
 				usb_free_urb(pdata_urb->purb);
@@ -728,9 +1024,9 @@
 			}
 		}
 	}
-	JOT(4, "%i audio urbs freed\n", m);
+	JOM(4, "%i audio urbs freed\n", m);
 /*---------------------------------------------------------------------------*/
-	JOT(4, "freeing audio data_urb structures.\n");
+	JOM(4, "freeing audio data_urb structures.\n");
 	m = 0;
 	list_for_each_safe(plist_head, plist_next, peasycap->purb_audio_head) {
 		pdata_urb = list_entry(plist_head, struct data_urb, list_head);
@@ -741,14 +1037,12 @@
 			m++;
 		}
 	}
-JOT(4, "%i audio data_urb structures freed\n", m);
-JOT(4, "setting peasycap->purb_audio_head=NULL\n");
+JOM(4, "%i audio data_urb structures freed\n", m);
+JOM(4, "setting peasycap->purb_audio_head=NULL\n");
 peasycap->purb_audio_head = (struct list_head *)NULL;
-} else {
-JOT(4, "peasycap->purb_audio_head is NULL\n");
 }
 /*---------------------------------------------------------------------------*/
-JOT(4, "freeing audio isoc buffers.\n");
+JOM(4, "freeing audio isoc buffers.\n");
 m = 0;
 for (k = 0;  k < AUDIO_ISOC_BUFFER_MANY;  k++) {
 	if ((void *)NULL != peasycap->audio_isoc_buffer[k].pgo) {
@@ -761,22 +1055,22 @@
 		m++;
 	}
 }
-JOT(4, "easysnd_delete(): isoc audio buffers freed: %i pages\n", \
+JOM(4, "easysnd_delete(): isoc audio buffers freed: %i pages\n", \
 					m * (0x01 << AUDIO_ISOC_ORDER));
 /*---------------------------------------------------------------------------*/
-JOT(4, "freeing audio buffers.\n");
-lost = 0;
+JOM(4, "freeing audio buffers.\n");
+gone = 0;
 for (k = 0;  k < peasycap->audio_buffer_page_many;  k++) {
 	if ((void *)NULL != peasycap->audio_buffer[k].pgo) {
 		free_page((unsigned long)(peasycap->audio_buffer[k].pgo));
 		peasycap->audio_buffer[k].pgo = (void *)NULL;
 		peasycap->allocation_audio_page -= 1;
-		lost++;
+		gone++;
 	}
 }
-JOT(4, "easysnd_delete(): audio buffers freed: %i pages\n", lost);
+JOM(4, "easysnd_delete(): audio buffers freed: %i pages\n", gone);
 /*---------------------------------------------------------------------------*/
-JOT(4, "freeing easycap structure.\n");
+JOM(4, "freeing easycap structure.\n");
 allocation_video_urb    = peasycap->allocation_video_urb;
 allocation_video_page   = peasycap->allocation_video_page;
 allocation_video_struct = peasycap->allocation_video_struct;
@@ -785,15 +1079,16 @@
 allocation_audio_page   = peasycap->allocation_audio_page;
 allocation_audio_struct = peasycap->allocation_audio_struct;
 registered_audio        = peasycap->registered_audio;
-m = 0;
-if ((struct easycap *)NULL != peasycap) {
-	kfree(peasycap);  peasycap = (struct easycap *)NULL;
-	allocation_video_struct -= sizeof(struct easycap);
-	m++;
-}
-JOT(4, "%i easycap structure freed\n", m);
-/*---------------------------------------------------------------------------*/
 
+kfree(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+	easycap_dongle[kd].peasycap = (struct easycap *)NULL;
+	JOT(4, "   null-->easycap_dongle[%i].peasycap\n", kd);
+	allocation_video_struct -= sizeof(struct easycap);
+} else {
+	SAY("ERROR: cannot purge easycap_dongle[].peasycap");
+}
+/*---------------------------------------------------------------------------*/
 SAY("%8i= video urbs     after all deletions\n", allocation_video_urb);
 SAY("%8i= video pages    after all deletions\n", allocation_video_page);
 SAY("%8i= video structs  after all deletions\n", allocation_video_struct);
@@ -810,27 +1105,85 @@
 unsigned int easycap_poll(struct file *file, poll_table *wait)
 {
 struct easycap *peasycap;
+int rc, kd;
 
 JOT(8, "\n");
 
 if (NULL == ((poll_table *)wait))
 	JOT(8, "WARNING:  poll table pointer is NULL ... continuing\n");
-if (NULL == ((struct file *)file)) {
+if ((struct file *)NULL == file) {
 	SAY("ERROR:  file pointer is NULL\n");
-	return -EFAULT;
+	return -ERESTARTSYS;
 }
 peasycap = file->private_data;
 if (NULL == peasycap) {
 	SAY("ERROR:  peasycap is NULL\n");
 	return -EFAULT;
 }
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return -EFAULT;
+}
+if (NULL == peasycap->pusb_device) {
+	SAY("ERROR:  peasycap->pusb_device is NULL\n");
+	return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+	if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_video)) {
+		SAY("ERROR: cannot down easycap_dongle[%i].mutex_video\n", kd);
+		return -ERESTARTSYS;
+	}
+	JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
+	/*-------------------------------------------------------------------*/
+	/*
+	 *  MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER
+	 *  peasycap, IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+	 *  IF NECESSARY, BAIL OUT.
+	*/
+	/*-------------------------------------------------------------------*/
+	if (kd != isdongle(peasycap))
+		return -ERESTARTSYS;
+	if (NULL == file) {
+		SAY("ERROR:  file is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ERESTARTSYS;
+	}
+	peasycap = file->private_data;
+	if (NULL == peasycap) {
+		SAY("ERROR:  peasycap is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ERESTARTSYS;
+	}
+	if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+		SAY("ERROR: bad peasycap: 0x%08lX\n", \
+						(unsigned long int) peasycap);
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ERESTARTSYS;
+	}
+	if (NULL == peasycap->pusb_device) {
+		SAM("ERROR: peasycap->pusb_device is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return -ERESTARTSYS;
+	}
+} else
+	/*-------------------------------------------------------------------*/
+	/*
+	 *  IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap
+	 *  BEFORE THE ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL
+	 *  HAVE FAILED.  BAIL OUT.
+	*/
+	/*-------------------------------------------------------------------*/
+	return -ERESTARTSYS;
+/*---------------------------------------------------------------------------*/
+rc = easycap_dqbuf(peasycap, 0);
 peasycap->polled = 1;
-
-if (0 == easycap_dqbuf(peasycap, 0))
+mutex_unlock(&easycap_dongle[kd].mutex_video);
+if (0 == rc)
 	return POLLIN | POLLRDNORM;
 else
 	return POLLERR;
-
 }
 /*****************************************************************************/
 /*---------------------------------------------------------------------------*/
@@ -841,7 +1194,7 @@
 int
 easycap_dqbuf(struct easycap *peasycap, int mode)
 {
-int miss, rc;
+int input, ifield, miss, rc;
 
 JOT(8, "\n");
 
@@ -849,129 +1202,188 @@
 	SAY("ERROR:  peasycap is NULL\n");
 	return -EFAULT;
 }
+if (NULL == peasycap->pusb_device) {
+	SAY("ERROR:  peasycap->pusb_device is NULL\n");
+	return -EFAULT;
+}
+ifield = 0;
+JOM(8, "%i=ifield\n", ifield);
 /*---------------------------------------------------------------------------*/
 /*
- *  WAIT FOR FIELD 0
+ *  CHECK FOR LOST INPUT SIGNAL.
+ *
+ *  FOR THE FOUR-CVBS EasyCAP, THIS DOES NOT WORK AS EXPECTED.
+ *  IF INPUT 0 IS PRESENT AND SYNC ACQUIRED, UNPLUGGING INPUT 4 DOES NOT
+ *  RESULT IN SETTING BIT 0x40 ON REGISTER 0x1F, PRESUMABLY BECAUSE THERE
+ *  IS FLYWHEELING ON INPUT 0.  THE UPSHOT IS:
+ *
+ *    INPUT 0   PLUGGED, INPUT 4   PLUGGED => SCREEN 0 OK,   SCREEN 4 OK
+ *    INPUT 0   PLUGGED, INPUT 4 UNPLUGGED => SCREEN 0 OK,   SCREEN 4 BLACK
+ *    INPUT 0 UNPLUGGED, INPUT 4   PLUGGED => SCREEN 0 BARS, SCREEN 4 OK
+ *    INPUT 0 UNPLUGGED, INPUT 4 UNPLUGGED => SCREEN 0 BARS, SCREEN 4 BARS
+*/
+/*---------------------------------------------------------------------------*/
+input = peasycap->input;
+if (0 <= input && INPUT_MANY > input) {
+	rc = read_saa(peasycap->pusb_device, 0x1F);
+	if (0 <= rc) {
+		if (rc & 0x40)
+			peasycap->lost[input] += 1;
+		else
+			peasycap->lost[input] -= 2;
+
+	if (0 > peasycap->lost[input])
+		peasycap->lost[input] = 0;
+	else if ((2 * VIDEO_LOST_TOLERATE) < peasycap->lost[input])
+		peasycap->lost[input] = (2 * VIDEO_LOST_TOLERATE);
+	}
+}
+/*---------------------------------------------------------------------------*/
+/*
+ *  WAIT FOR FIELD ifield  (0 => TOP, 1 => BOTTOM)
  */
 /*---------------------------------------------------------------------------*/
 miss = 0;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
-	return -ERESTARTSYS;
 while ((peasycap->field_read == peasycap->field_fill) || \
 				(0 != (0xFF00 & peasycap->field_buffer\
 					[peasycap->field_read][0].kount)) || \
-				(0 != (0x00FF & peasycap->field_buffer\
+				(ifield != (0x00FF & peasycap->field_buffer\
 					[peasycap->field_read][0].kount))) {
-	mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-
 	if (mode)
 		return -EAGAIN;
 
-	JOT(8, "first wait  on wq_video, " \
+	JOM(8, "first wait  on wq_video, " \
 				"%i=field_read  %i=field_fill\n", \
 				peasycap->field_read, peasycap->field_fill);
 
-	msleep(1);
 	if (0 != (wait_event_interruptible(peasycap->wq_video, \
 			(peasycap->video_idle || peasycap->video_eof  || \
 			((peasycap->field_read != peasycap->field_fill) && \
 				(0 == (0xFF00 & peasycap->field_buffer\
 					[peasycap->field_read][0].kount)) && \
-				(0 == (0x00FF & peasycap->field_buffer\
-					[peasycap->field_read][0].kount))))))){
-		SAY("aborted by signal\n");
+				(ifield == (0x00FF & peasycap->field_buffer\
+					[peasycap->field_read][0].kount))))))) {
+		SAM("aborted by signal\n");
 		return -EIO;
 		}
 	if (peasycap->video_idle) {
-		JOT(8, "%i=peasycap->video_idle\n", peasycap->video_idle);
-		return -EIO;
+		JOM(8, "%i=peasycap->video_idle ... returning -EAGAIN\n", \
+							peasycap->video_idle);
+		return -EAGAIN;
 	}
 	if (peasycap->video_eof) {
-		JOT(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
-		debrief(peasycap);
+		JOM(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
+		#if defined(PERSEVERE)
+		if (1 == peasycap->status) {
+			JOM(8, "persevering ...\n");
+			peasycap->video_eof = 0;
+			peasycap->audio_eof = 0;
+			if (0 != reset(peasycap)) {
+				JOM(8, " ... failed ... returning -EIO\n");
+				peasycap->video_eof = 1;
+				peasycap->audio_eof = 1;
+				kill_video_urbs(peasycap);
+				return -EIO;
+			}
+			peasycap->status = 0;
+			JOM(8, " ... OK ... returning -EAGAIN\n");
+			return -EAGAIN;
+		}
+		#endif /*PERSEVERE*/
+		peasycap->video_eof = 1;
+		peasycap->audio_eof = 1;
 		kill_video_urbs(peasycap);
+		JOM(8, "returning -EIO\n");
 		return -EIO;
 	}
 miss++;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
-	return -ERESTARTSYS;
 }
-mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-JOT(8, "first awakening on wq_video after %i waits\n", miss);
+JOM(8, "first awakening on wq_video after %i waits\n", miss);
 
 rc = field2frame(peasycap);
 if (0 != rc)
-	SAY("ERROR: field2frame() returned %i\n", rc);
-
-if (true == peasycap->offerfields) {
-	peasycap->frame_read = peasycap->frame_fill;
-	(peasycap->frame_fill)++;
-	if (peasycap->frame_buffer_many <= peasycap->frame_fill)
-		peasycap->frame_fill = 0;
-
-	if (0x01 & easycap_standard[peasycap->standard_offset].mask) {
-		peasycap->frame_buffer[peasycap->frame_read][0].kount = \
-							V4L2_FIELD_BOTTOM;
-	} else {
-		peasycap->frame_buffer[peasycap->frame_read][0].kount = \
-							V4L2_FIELD_TOP;
-	}
-JOT(8, "setting:    %i=peasycap->frame_read\n", peasycap->frame_read);
-JOT(8, "bumped to:  %i=peasycap->frame_fill\n", peasycap->frame_fill);
-}
+	SAM("ERROR: field2frame() returned %i\n", rc);
 /*---------------------------------------------------------------------------*/
 /*
- *  WAIT FOR FIELD 1
+ *  WAIT FOR THE OTHER FIELD
  */
 /*---------------------------------------------------------------------------*/
+if (ifield)
+	ifield = 0;
+else
+	ifield = 1;
 miss = 0;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
-	return -ERESTARTSYS;
 while ((peasycap->field_read == peasycap->field_fill) || \
 				(0 != (0xFF00 & peasycap->field_buffer\
 					[peasycap->field_read][0].kount)) || \
-				(0 == (0x00FF & peasycap->field_buffer\
+				(ifield != (0x00FF & peasycap->field_buffer\
 					[peasycap->field_read][0].kount))) {
-	mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-
 	if (mode)
 		return -EAGAIN;
 
-	JOT(8, "second wait on wq_video, " \
+	JOM(8, "second wait on wq_video, " \
 				"%i=field_read  %i=field_fill\n", \
 				peasycap->field_read, peasycap->field_fill);
-	msleep(1);
 	if (0 != (wait_event_interruptible(peasycap->wq_video, \
 			(peasycap->video_idle || peasycap->video_eof  || \
 			((peasycap->field_read != peasycap->field_fill) && \
 				(0 == (0xFF00 & peasycap->field_buffer\
 					[peasycap->field_read][0].kount)) && \
-				(0 != (0x00FF & peasycap->field_buffer\
-					[peasycap->field_read][0].kount))))))){
-		SAY("aborted by signal\n");
+				(ifield == (0x00FF & peasycap->field_buffer\
+					[peasycap->field_read][0].\
+								kount))))))) {
+		SAM("aborted by signal\n");
 		return -EIO;
 	}
 	if (peasycap->video_idle) {
-		JOT(8, "%i=peasycap->video_idle\n", peasycap->video_idle);
-		return -EIO;
+		JOM(8, "%i=peasycap->video_idle ... returning -EAGAIN\n", \
+							peasycap->video_idle);
+		return -EAGAIN;
 	}
 	if (peasycap->video_eof) {
-		JOT(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
-		debrief(peasycap);
+		JOM(8, "%i=peasycap->video_eof\n", peasycap->video_eof);
+		#if defined(PERSEVERE)
+		if (1 == peasycap->status) {
+			JOM(8, "persevering ...\n");
+			peasycap->video_eof = 0;
+			peasycap->audio_eof = 0;
+			if (0 != reset(peasycap)) {
+				JOM(8, " ... failed ... returning -EIO\n");
+				peasycap->video_eof = 1;
+				peasycap->audio_eof = 1;
+				kill_video_urbs(peasycap);
+				return -EIO;
+			}
+			peasycap->status = 0;
+			JOM(8, " ... OK ... returning -EAGAIN\n");
+			return -EAGAIN;
+		}
+		#endif /*PERSEVERE*/
+		peasycap->video_eof = 1;
+		peasycap->audio_eof = 1;
 		kill_video_urbs(peasycap);
+		JOM(8, "returning -EIO\n");
 		return -EIO;
 	}
 miss++;
-if (mutex_lock_interruptible(&(peasycap->mutex_mmap_video[0])))
-	return -ERESTARTSYS;
 }
-mutex_unlock(&(peasycap->mutex_mmap_video[0]));
-JOT(8, "second awakening on wq_video after %i waits\n", miss);
+JOM(8, "second awakening on wq_video after %i waits\n", miss);
 
 rc = field2frame(peasycap);
 if (0 != rc)
-	SAY("ERROR: field2frame() returned %i\n", rc);
-
+	SAM("ERROR: field2frame() returned %i\n", rc);
+/*---------------------------------------------------------------------------*/
+/*
+ *  WASTE THIS FRAME
+*/
+/*---------------------------------------------------------------------------*/
+if (0 != peasycap->skip) {
+	peasycap->skipped++;
+	if (peasycap->skip != peasycap->skipped)
+		return peasycap->skip - peasycap->skipped;
+	peasycap->skipped = 0;
+}
+/*---------------------------------------------------------------------------*/
 peasycap->frame_read = peasycap->frame_fill;
 peasycap->queued[peasycap->frame_read] = 0;
 peasycap->done[peasycap->frame_read]   = V4L2_BUF_FLAG_DONE;
@@ -988,8 +1400,8 @@
 							V4L2_FIELD_BOTTOM;
 }
 
-JOT(8, "setting:    %i=peasycap->frame_read\n", peasycap->frame_read);
-JOT(8, "bumped to:  %i=peasycap->frame_fill\n", peasycap->frame_fill);
+JOM(8, "setting:    %i=peasycap->frame_read\n", peasycap->frame_read);
+JOM(8, "bumped to:  %i=peasycap->frame_fill\n", peasycap->frame_fill);
 
 return 0;
 }
@@ -1003,14 +1415,12 @@
  *  odd==false IS TRANSFERRED TO THE FRAME BUFFER.
  *
  *  THE BOOLEAN PARAMETER offerfields IS true ONLY WHEN THE USER PROGRAM
- *  CHOOSES THE OPTION V4L2_FIELD_ALTERNATE.  NO USERSPACE PROGRAM TESTED
- *  TO DATE HAS DONE THIS.  BUGS ARE LIKELY.
+ *  CHOOSES THE OPTION V4L2_FIELD_INTERLACED.
  */
 /*---------------------------------------------------------------------------*/
 int
 field2frame(struct easycap *peasycap)
 {
-static struct timeval timeval0;
 struct timeval timeval;
 long long int above, below;
 __u32 remainder;
@@ -1019,16 +1429,26 @@
 void *pex, *pad;
 int kex, kad, mex, mad, rex, rad, rad2;
 int c2, c3, w2, w3, cz, wz;
-int rc, bytesperpixel, multiplier, much, more, over, rump, caches;
+int rc, bytesperpixel, multiplier, much, more, over, rump, caches, input;
 __u8 mask, margin;
-bool odd, isuy, decimatepixel, offerfields;
+bool odd, isuy, decimatepixel, offerfields, badinput;
 
-JOT(8, "=====  parity %i, field buffer %i --> frame buffer %i\n", \
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
+
+badinput = false;
+input = 0x07 & peasycap->field_buffer[peasycap->field_read][0].input;
+
+JOM(8, "=====  parity %i, input 0x%02X, field buffer %i --> " \
+						"frame buffer %i\n", \
 			peasycap->field_buffer[peasycap->field_read][0].kount,\
+			peasycap->field_buffer[peasycap->field_read][0].input,\
 			peasycap->field_read, peasycap->frame_fill);
-JOT(8, "=====  %i=bytesperpixel\n", peasycap->bytesperpixel);
+JOM(8, "=====  %i=bytesperpixel\n", peasycap->bytesperpixel);
 if (true == peasycap->offerfields)
-	JOT(8, "===== offerfields\n");
+	JOM(8, "===== offerfields\n");
 
 /*---------------------------------------------------------------------------*/
 /*
@@ -1036,15 +1456,17 @@
  */
 /*---------------------------------------------------------------------------*/
 if (peasycap->field_read == peasycap->field_fill) {
-	SAY("ERROR: on entry, still filling field buffer %i\n", \
+	SAM("ERROR: on entry, still filling field buffer %i\n", \
 							peasycap->field_read);
 	return 0;
 }
 #if defined(EASYCAP_TESTCARD)
 easycap_testcard(peasycap, peasycap->field_read);
 #else
-if (0 != (0x0400 & peasycap->field_buffer[peasycap->field_read][0].kount))
-	easycap_testcard(peasycap, peasycap->field_read);
+if (0 <= input && INPUT_MANY > input) {
+	if (easycap_bars && VIDEO_LOST_TOLERATE <= peasycap->lost[input])
+		easycap_testcard(peasycap, peasycap->field_read);
+}
 #endif /*EASYCAP_TESTCARD*/
 /*---------------------------------------------------------------------------*/
 
@@ -1055,7 +1477,7 @@
 if ((2 != bytesperpixel) && \
 			(3 != bytesperpixel) && \
 			(4 != bytesperpixel)) {
-	SAY("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
+	SAM("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
 	return -EFAULT;
 }
 if (true == decimatepixel)
@@ -1082,8 +1504,8 @@
 else
 	odd = false;
 
-if ((true == odd) && (false == offerfields) &&(false == decimatepixel)) {
-	JOT(8, "  initial skipping    %4i          bytes p.%4i\n", \
+if ((true == odd) && (false == decimatepixel)) {
+	JOM(8, "  initial skipping    %4i          bytes p.%4i\n", \
 							w3/multiplier, mad);
 	pad += (w3 / multiplier);  rad -= (w3 / multiplier);
 }
@@ -1108,7 +1530,7 @@
 			rump = 0;
 
 			if (much % 2) {
-				SAY("MISTAKE: much is odd\n");
+				SAM("MISTAKE: much is odd\n");
 				return -EFAULT;
 			}
 
@@ -1116,13 +1538,11 @@
 					much) / 2;
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 			if (1 < bytesperpixel) {
-				if ((rad * \
-					2) < (much * \
-						bytesperpixel)) {
+				if (rad * 2 < much * bytesperpixel) {
 					/*
 					**   INJUDICIOUS ALTERATION OF THIS
-					**   BLOCK WILL CAUSE BREAKAGE.
-					**   BEWARE.
+					**   STATEMENT BLOCK WILL CAUSE
+					**   BREAKAGE.  BEWARE.
 					**/
 					rad2 = rad + bytesperpixel - 1;
 					much = ((((2 * \
@@ -1145,18 +1565,25 @@
 				}
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 			} else {
-				SAY("MISTAKE: %i=bytesperpixel\n", \
+				SAM("MISTAKE: %i=bytesperpixel\n", \
 						bytesperpixel);
 				return -EFAULT;
 			}
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 			if (rump)
 				caches++;
-
+				if (true == badinput) {
+					JOM(8, "ERROR: 0x%02X=->field_buffer" \
+						"[%i][%i].input, " \
+						"0x%02X=(0x08|->input)\n", \
+						peasycap->field_buffer\
+						[kex][mex].input, kex, mex, \
+						(0x08|peasycap->input));
+				}
 			rc = redaub(peasycap, pad, pex, much, more, \
 							mask, margin, isuy);
 			if (0 > rc) {
-				SAY("ERROR: redaub() failed\n");
+				SAM("ERROR: redaub() failed\n");
 				return -EFAULT;
 			}
 			if (much % 4) {
@@ -1171,6 +1598,9 @@
 				mex++;
 				pex = peasycap->field_buffer[kex][mex].pgo;
 				rex = PAGE_SIZE;
+				if (peasycap->field_buffer[kex][mex].input != \
+						(0x08|peasycap->input))
+					badinput = true;
 			}
 			pad  += more;
 			rad -= more;
@@ -1190,7 +1620,7 @@
  *  UNLESS IT IS THE LAST LINE OF AN ODD FRAME
  */
 /*---------------------------------------------------------------------------*/
-		if (((false == odd) || (cz != wz))&&(false == offerfields)) {
+		if ((false == odd) || (cz != wz)) {
 			over = w3;
 			do {
 				if (!rad) {
@@ -1224,7 +1654,7 @@
 			rump = 0;
 
 			if (much % 2) {
-				SAY("MISTAKE: much is odd\n");
+				SAM("MISTAKE: much is odd\n");
 				return -EFAULT;
 			}
 
@@ -1232,12 +1662,11 @@
 					much) / 4;
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 			if (1 < bytesperpixel) {
-				if ((rad * 4) < (much * \
-						bytesperpixel)) {
+				if (rad * 4 < much * bytesperpixel) {
 					/*
 					**   INJUDICIOUS ALTERATION OF THIS
-					**   BLOCK WILL CAUSE BREAKAGE.
-					**   BEWARE.
+					**   STATEMENT BLOCK WILL CAUSE
+					**   BREAKAGE.  BEWARE.
 					**/
 					rad2 = rad + bytesperpixel - 1;
 					much = ((((2 * rad2)/bytesperpixel)/2)\
@@ -1261,7 +1690,7 @@
 					}
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 				} else {
-					SAY("MISTAKE: %i=bytesperpixel\n", \
+					SAM("MISTAKE: %i=bytesperpixel\n", \
 						bytesperpixel);
 					return -EFAULT;
 				}
@@ -1269,10 +1698,18 @@
 			if (rump)
 				caches++;
 
+				if (true == badinput) {
+					JOM(8, "ERROR: 0x%02X=->field_buffer" \
+						"[%i][%i].input, " \
+						"0x%02X=(0x08|->input)\n", \
+						peasycap->field_buffer\
+						[kex][mex].input, kex, mex, \
+						(0x08|peasycap->input));
+				}
 			rc = redaub(peasycap, pad, pex, much, more, \
 							mask, margin, isuy);
 			if (0 > rc) {
-				SAY("ERROR: redaub() failed\n");
+				SAM("ERROR: redaub() failed\n");
 				return -EFAULT;
 			}
 			over -= much;   cz += much;
@@ -1281,6 +1718,9 @@
 				mex++;
 				pex = peasycap->field_buffer[kex][mex].pgo;
 				rex = PAGE_SIZE;
+				if (peasycap->field_buffer[kex][mex].input != \
+						(0x08|peasycap->input))
+					badinput = true;
 			}
 			pad  += more;
 			rad -= more;
@@ -1307,6 +1747,16 @@
 				mex++;
 				pex = peasycap->field_buffer[kex][mex].pgo;
 				rex = PAGE_SIZE;
+				if (peasycap->field_buffer[kex][mex].input != \
+						(0x08|peasycap->input)) {
+					JOM(8, "ERROR: 0x%02X=->field_buffer"\
+						"[%i][%i].input, " \
+						"0x%02X=(0x08|->input)\n", \
+						peasycap->field_buffer\
+						[kex][mex].input, kex, mex, \
+						(0x08|peasycap->input));
+					badinput = true;
+				}
 			}
 			much = over;
 			if (rex < much)
@@ -1325,39 +1775,39 @@
 /*---------------------------------------------------------------------------*/
 c2 = (mex + 1)*PAGE_SIZE - rex;
 if (cz != c2)
-	SAY("ERROR: discrepancy %i in bytes read\n", c2 - cz);
+	SAM("ERROR: discrepancy %i in bytes read\n", c2 - cz);
 c3 = (mad + 1)*PAGE_SIZE - rad;
 
 if (false == decimatepixel) {
 	if (bytesperpixel * \
 		cz != c3) \
-		SAY("ERROR: discrepancy %i in bytes written\n", \
+		SAM("ERROR: discrepancy %i in bytes written\n", \
 						c3 - (bytesperpixel * \
 									cz));
 } else {
 	if (false == odd) {
 		if (bytesperpixel * \
 			cz != (4 * c3))
-			SAY("ERROR: discrepancy %i in bytes written\n", \
+			SAM("ERROR: discrepancy %i in bytes written\n", \
 						(2*c3)-(bytesperpixel * \
 									cz));
 		} else {
 			if (0 != c3)
-				SAY("ERROR: discrepancy %i " \
+				SAM("ERROR: discrepancy %i " \
 						"in bytes written\n", c3);
 		}
 }
 if (rump)
-	SAY("ERROR: undischarged cache at end of line in frame buffer\n");
+	SAM("WORRY: undischarged cache at end of line in frame buffer\n");
 
-JOT(8, "===== field2frame(): %i bytes --> %i bytes (incl skip)\n", c2, c3);
-JOT(8, "===== field2frame(): %i=mad  %i=rad\n", mad, rad);
+JOM(8, "===== field2frame(): %i bytes --> %i bytes (incl skip)\n", c2, c3);
+JOM(8, "===== field2frame(): %i=mad  %i=rad\n", mad, rad);
 
 if (true == odd)
-	JOT(8, "+++++ field2frame():  frame buffer %i is full\n", kad);
+	JOM(8, "+++++ field2frame():  frame buffer %i is full\n", kad);
 
 if (peasycap->field_read == peasycap->field_fill)
-	SAY("WARNING: on exit, filling field buffer %i\n", \
+	SAM("WARNING: on exit, filling field buffer %i\n", \
 							peasycap->field_read);
 /*---------------------------------------------------------------------------*/
 /*
@@ -1365,23 +1815,24 @@
  */
 /*---------------------------------------------------------------------------*/
 do_gettimeofday(&timeval);
-if (timeval0.tv_sec) {
+if (peasycap->timeval6.tv_sec) {
 	below = ((long long int)(1000000)) * \
-		((long long int)(timeval.tv_sec  - timeval0.tv_sec)) + \
-			 (long long int)(timeval.tv_usec - timeval0.tv_usec);
+		((long long int)(timeval.tv_sec - \
+					peasycap->timeval6.tv_sec)) + \
+		 (long long int)(timeval.tv_usec - peasycap->timeval6.tv_usec);
 	above = (long long int)1000000;
 
 	sdr = signed_div(above, below);
 	above = sdr.quotient;
 	remainder = (__u32)sdr.remainder;
 
-	JOT(8, "video streaming at %3lli.%03i fields per second\n", above, \
+	JOM(8, "video streaming at %3lli.%03i fields per second\n", above, \
 							(remainder/1000));
 }
-timeval0 = timeval;
+peasycap->timeval6 = timeval;
 
 if (caches)
-	JOT(8, "%i=caches\n", caches);
+	JOM(8, "%i=caches\n", caches);
 return 0;
 }
 /*****************************************************************************/
@@ -1434,7 +1885,7 @@
 					__u8 mask, __u8 margin, bool isuy)
 {
 static __s32 ay[256], bu[256], rv[256], gu[256], gv[256];
-static __u8 cache[8], *pcache;
+__u8 *pcache;
 __u8 r, g, b, y, u, v, c, *p2, *p3, *pz, *pr;
 int  bytesperpixel;
 bool byteswaporder, decimatepixel, last;
@@ -1442,7 +1893,7 @@
 __s32 s32;
 
 if (much % 2) {
-	SAY("MISTAKE: much is odd\n");
+	SAM("MISTAKE: much is odd\n");
 	return -EFAULT;
 }
 bytesperpixel = peasycap->bytesperpixel;
@@ -1475,30 +1926,31 @@
 		ay[j] = ay[16];
 	for (j = 236; j < 256; j++)
 		ay[j] = ay[235];
-	JOT(8, "lookup tables are prepared\n");
+	JOM(8, "lookup tables are prepared\n");
 }
-if ((__u8 *)NULL == pcache)
-	pcache = &cache[0];
+pcache = peasycap->pcache;
+if (NULL == pcache)
+	pcache = &peasycap->cache[0];
 /*---------------------------------------------------------------------------*/
 /*
  *  TRANSFER CONTENTS OF CACHE TO THE FRAME BUFFER
  */
 /*---------------------------------------------------------------------------*/
 if (!pcache) {
-	SAY("MISTAKE: pcache is NULL\n");
+	SAM("MISTAKE: pcache is NULL\n");
 	return -EFAULT;
 }
 
-if (pcache != &cache[0])
-	JOT(16, "cache has %i bytes\n", (int)(pcache - &cache[0]));
-p2 = &cache[0];
-p3 = (__u8 *)pad - (int)(pcache - &cache[0]);
+if (pcache != &peasycap->cache[0])
+	JOM(16, "cache has %i bytes\n", (int)(pcache - &peasycap->cache[0]));
+p2 = &peasycap->cache[0];
+p3 = (__u8 *)pad - (int)(pcache - &peasycap->cache[0]);
 while (p2 < pcache) {
 	*p3++ = *p2;  p2++;
 }
-pcache = &cache[0];
+pcache = &peasycap->cache[0];
 if (p3 != pad) {
-	SAY("MISTAKE: pointer misalignment\n");
+	SAM("MISTAKE: pointer misalignment\n");
 	return -EFAULT;
 }
 /*---------------------------------------------------------------------------*/
@@ -1513,7 +1965,7 @@
 	v = *(p2 - 1);
 
 if (rump)
-	JOT(16, "%4i=much  %4i=more  %i=rump\n", much, more, rump);
+	JOM(16, "%4i=much  %4i=more  %i=rump\n", much, more, rump);
 
 /*---------------------------------------------------------------------------*/
 switch (bytesperpixel) {
@@ -1619,7 +2071,7 @@
 							0 : (__u8)s32);
 
 				if ((true == last) && rump) {
-					pcache = &cache[0];
+					pcache = &peasycap->cache[0];
 					switch (bytesperpixel - rump) {
 					case 1: {
 						*p3 = r;
@@ -1634,7 +2086,7 @@
 						break;
 					}
 					default: {
-						SAY("MISTAKE: %i=rump\n", \
+						SAM("MISTAKE: %i=rump\n", \
 							bytesperpixel - rump);
 						return -EFAULT;
 					}
@@ -1692,7 +2144,7 @@
 								0 : (__u8)s32);
 
 				if ((true == last) && rump) {
-					pcache = &cache[0];
+					pcache = &peasycap->cache[0];
 					switch (bytesperpixel - rump) {
 					case 1: {
 						*p3 = b;
@@ -1707,7 +2159,7 @@
 						break;
 					}
 					default: {
-						SAY("MISTAKE: %i=rump\n", \
+						SAM("MISTAKE: %i=rump\n", \
 							bytesperpixel - rump);
 						return -EFAULT;
 					}
@@ -1768,7 +2220,7 @@
 								0 : (__u8)s32);
 
 					if ((true == last) && rump) {
-						pcache = &cache[0];
+						pcache = &peasycap->cache[0];
 						switch (bytesperpixel - rump) {
 						case 1: {
 							*p3 = r;
@@ -1783,7 +2235,7 @@
 							break;
 						}
 						default: {
-							SAY("MISTAKE: " \
+							SAM("MISTAKE: " \
 							"%i=rump\n", \
 							bytesperpixel - rump);
 							return -EFAULT;
@@ -1844,7 +2296,7 @@
 								0 : (__u8)s32);
 
 					if ((true == last) && rump) {
-						pcache = &cache[0];
+						pcache = &peasycap->cache[0];
 						switch (bytesperpixel - rump) {
 						case 1: {
 							*p3 = b;
@@ -1859,7 +2311,7 @@
 							break;
 						}
 						default: {
-							SAY("MISTAKE: " \
+							SAM("MISTAKE: " \
 							"%i=rump\n", \
 							bytesperpixel - rump);
 							return -EFAULT;
@@ -1924,7 +2376,7 @@
 								0 : (__u8)s32);
 
 				if ((true == last) && rump) {
-					pcache = &cache[0];
+					pcache = &peasycap->cache[0];
 					switch (bytesperpixel - rump) {
 					case 1: {
 						*p3 = r;
@@ -1948,7 +2400,7 @@
 						break;
 					}
 					default: {
-						SAY("MISTAKE: %i=rump\n", \
+						SAM("MISTAKE: %i=rump\n", \
 							bytesperpixel - rump);
 						return -EFAULT;
 					}
@@ -2006,7 +2458,7 @@
 								0 : (__u8)s32);
 
 				if ((true == last) && rump) {
-					pcache = &cache[0];
+					pcache = &peasycap->cache[0];
 					switch (bytesperpixel - rump) {
 					case 1: {
 						*p3 = b;
@@ -2030,7 +2482,7 @@
 						break;
 					}
 					default: {
-						SAY("MISTAKE: %i=rump\n", \
+						SAM("MISTAKE: %i=rump\n", \
 							bytesperpixel - rump);
 						return -EFAULT;
 					}
@@ -2093,7 +2545,7 @@
 								0 : (__u8)s32);
 
 					if ((true == last) && rump) {
-						pcache = &cache[0];
+						pcache = &peasycap->cache[0];
 						switch (bytesperpixel - rump) {
 						case 1: {
 							*p3 = r;
@@ -2117,7 +2569,7 @@
 							break;
 						}
 						default: {
-							SAY("MISTAKE: " \
+							SAM("MISTAKE: " \
 							"%i=rump\n", \
 							bytesperpixel - \
 							rump);
@@ -2178,7 +2630,7 @@
 								0 : (__u8)s32);
 
 					if ((true == last) && rump) {
-						pcache = &cache[0];
+						pcache = &peasycap->cache[0];
 						switch (bytesperpixel - rump) {
 						case 1: {
 							*p3 = b;
@@ -2202,7 +2654,7 @@
 							break;
 						}
 						default: {
-							SAY("MISTAKE: " \
+							SAM("MISTAKE: " \
 							"%i=rump\n", \
 							bytesperpixel - rump);
 							return -EFAULT;
@@ -2226,48 +2678,13 @@
 	break;
 	}
 default: {
-	SAY("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
+	SAM("MISTAKE: %i=bytesperpixel\n", bytesperpixel);
 	return -EFAULT;
 	}
 }
 return 0;
 }
 /*****************************************************************************/
-void
-debrief(struct easycap *peasycap)
-{
-if ((struct usb_device *)NULL != peasycap->pusb_device) {
-	check_stk(peasycap->pusb_device);
-	check_saa(peasycap->pusb_device);
-	sayreadonly(peasycap);
-	SAY("%i=peasycap->field_fill\n", peasycap->field_fill);
-	SAY("%i=peasycap->field_read\n", peasycap->field_read);
-	SAY("%i=peasycap->frame_fill\n", peasycap->frame_fill);
-	SAY("%i=peasycap->frame_read\n", peasycap->frame_read);
-}
-return;
-}
-/*****************************************************************************/
-void
-sayreadonly(struct easycap *peasycap)
-{
-static int done;
-int got00, got1F, got60, got61, got62;
-
-if ((!done) && ((struct usb_device *)NULL != peasycap->pusb_device)) {
-	done = 1;
-	got00 = read_saa(peasycap->pusb_device, 0x00);
-	got1F = read_saa(peasycap->pusb_device, 0x1F);
-	got60 = read_saa(peasycap->pusb_device, 0x60);
-	got61 = read_saa(peasycap->pusb_device, 0x61);
-	got62 = read_saa(peasycap->pusb_device, 0x62);
-	SAY("0x%02X=reg0x00  0x%02X=reg0x1F\n", got00, got1F);
-	SAY("0x%02X=reg0x60  0x%02X=reg0x61  0x%02X=reg0x62\n", \
-							got60, got61, got62);
-}
-return;
-}
-/*****************************************************************************/
 /*---------------------------------------------------------------------------*/
 /*
  *  SEE CORBET ET AL. "LINUX DEVICE DRIVERS", 3rd EDITION, PAGES 430-434
@@ -2292,11 +2709,16 @@
 struct easycap *peasycap;
 
 peasycap = pvma->vm_private_data;
-if (NULL != peasycap)
-	peasycap->vma_many++;
-
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return;
+}
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return;
+}
+peasycap->vma_many++;
 JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
-
 return;
 }
 /*****************************************************************************/
@@ -2306,10 +2728,16 @@
 struct easycap *peasycap;
 
 peasycap = pvma->vm_private_data;
-if (NULL != peasycap) {
-	peasycap->vma_many--;
-	JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return;
 }
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return;
+}
+peasycap->vma_many--;
+JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
 return;
 }
 /*****************************************************************************/
@@ -2355,24 +2783,22 @@
 	SAY("ERROR: peasycap is NULL\n");
 	return retcode;
 }
-mutex_lock(&(peasycap->mutex_mmap_video[0]));
 /*---------------------------------------------------------------------------*/
 pbuf = peasycap->frame_buffer[k][m].pgo;
 if (NULL == pbuf) {
-	SAY("ERROR:  pbuf is NULL\n");
+	SAM("ERROR:  pbuf is NULL\n");
 	goto finish;
 }
 page = virt_to_page(pbuf);
 if (NULL == page) {
-	SAY("ERROR:  page is NULL\n");
+	SAM("ERROR:  page is NULL\n");
 	goto finish;
 }
 get_page(page);
 /*---------------------------------------------------------------------------*/
 finish:
-mutex_unlock(&(peasycap->mutex_mmap_video[0]));
 if (NULL == page) {
-	SAY("ERROR:  page is NULL after get_page(page)\n");
+	SAM("ERROR:  page is NULL after get_page(page)\n");
 } else {
 	pvmf->page = page;
 	retcode = VM_FAULT_MINOR;
@@ -2383,7 +2809,7 @@
 /*---------------------------------------------------------------------------*/
 /*
  *  ON COMPLETION OF A VIDEO URB ITS DATA IS COPIED TO THE FIELD BUFFERS
- *  PROVIDED peasycap->video_idle IS ZER0.  REGARDLESS OF THIS BEING TRUE,
+ *  PROVIDED peasycap->video_idle IS ZERO.  REGARDLESS OF THIS BEING TRUE,
  *  IT IS RESUBMITTED PROVIDED peasycap->video_isoc_streaming IS NOT ZERO.
  *
  *  THIS FUNCTION IS AN INTERRUPT SERVICE ROUTINE AND MUST NOT SLEEP.
@@ -2400,7 +2826,8 @@
  *      0 != (kount & 0x8000)   => AT LEAST ONE URB COMPLETED WITH ERRORS
  *      0 != (kount & 0x4000)   => BUFFER HAS TOO MUCH DATA
  *      0 != (kount & 0x2000)   => BUFFER HAS NOT ENOUGH DATA
- *      0 != (kount & 0x0400)   => FIELD WAS SUBMITTED BY BRIDGER ROUTINE
+ *      0 != (kount & 0x1000)   => BUFFER HAS DATA FROM DISPARATE INPUTS
+ *      0 != (kount & 0x0400)   => RESERVED
  *      0 != (kount & 0x0200)   => FIELD BUFFER NOT YET CHECKED
  *      0 != (kount & 0x0100)   => BUFFER HAS TWO EXTRA BYTES - WHY?
  */
@@ -2408,19 +2835,14 @@
 void
 easycap_complete(struct urb *purb)
 {
-static int mt;
 struct easycap *peasycap;
 struct data_buffer *pfield_buffer;
 char errbuf[16];
 int i, more, much, leap, rc, last;
 int videofieldamount;
-unsigned int override;
+unsigned int override, bad;
 int framestatus, framelength, frameactual, frameoffset;
 __u8 *pu;
-#if defined(BRIDGER)
-struct timeval timeval;
-long long usec;
-#endif /*BRIDGER*/
 
 if (NULL == purb) {
 	SAY("ERROR: easycap_complete(): purb is NULL\n");
@@ -2431,74 +2853,78 @@
 	SAY("ERROR: easycap_complete(): peasycap is NULL\n");
 	return;
 }
-
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return;
+}
 if (peasycap->video_eof)
 	return;
-
 for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++)
 	if (purb->transfer_buffer == peasycap->video_isoc_buffer[i].pgo)
 		break;
-JOT(16, "%2i=urb\n", i);
+JOM(16, "%2i=urb\n", i);
 last = peasycap->video_isoc_sequence;
 if ((((VIDEO_ISOC_BUFFER_MANY - 1) == last) && \
 						(0 != i)) || \
 	(((VIDEO_ISOC_BUFFER_MANY - 1) != last) && \
 						((last + 1) != i))) {
-	SAY("ERROR: out-of-order urbs %i,%i ... continuing\n", last, i);
+	JOM(16, "ERROR: out-of-order urbs %i,%i ... continuing\n", last, i);
 }
 peasycap->video_isoc_sequence = i;
 
 if (peasycap->video_idle) {
-	JOT(16, "%i=video_idle  %i=video_isoc_streaming\n", \
+	JOM(16, "%i=video_idle  %i=video_isoc_streaming\n", \
 			peasycap->video_idle, peasycap->video_isoc_streaming);
 	if (peasycap->video_isoc_streaming) {
 		rc = usb_submit_urb(purb, GFP_ATOMIC);
 		if (0 != rc) {
-			SAY("ERROR: while %i=video_idle, " \
-					"usb_submit_urb() failed with rc:\n", \
-							peasycap->video_idle);
 			switch (rc) {
 			case -ENOMEM: {
-				SAY("ENOMEM\n");
+				SAM("ENOMEM\n");
 				break;
 			}
 			case -ENODEV: {
-				SAY("ENODEV\n");
+				SAM("ENODEV\n");
 				break;
 			}
 			case -ENXIO: {
-				SAY("ENXIO\n");
+				SAM("ENXIO\n");
 				break;
 			}
 			case -EINVAL: {
-				SAY("EINVAL\n");
+				SAM("EINVAL\n");
 				break;
 			}
 			case -EAGAIN: {
-				SAY("EAGAIN\n");
+				SAM("EAGAIN\n");
 				break;
 			}
 			case -EFBIG: {
-				SAY("EFBIG\n");
+				SAM("EFBIG\n");
 				break;
 			}
 			case -EPIPE: {
-				SAY("EPIPE\n");
+				SAM("EPIPE\n");
 				break;
 			}
 			case -EMSGSIZE: {
-				SAY("EMSGSIZE\n");
+				SAM("EMSGSIZE\n");
 				break;
 			}
 			case -ENOSPC: {
-				SAY("ENOSPC\n");
+				SAM("ENOSPC\n");
 				break;
 			}
 			default: {
-				SAY("0x%08X\n", rc);
+				SAM("0x%08X\n", rc);
 				break;
 			}
 			}
+			if (-ENODEV != rc) \
+				SAM("ERROR: while %i=video_idle, " \
+							"usb_submit_urb() " \
+							"failed with rc:\n", \
+							peasycap->video_idle);
 		}
 	}
 return;
@@ -2506,80 +2932,80 @@
 override = 0;
 /*---------------------------------------------------------------------------*/
 if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
-	SAY("ERROR: bad peasycap->field_fill\n");
+	SAM("ERROR: bad peasycap->field_fill\n");
 	return;
 }
 if (purb->status) {
 	if ((-ESHUTDOWN == purb->status) || (-ENOENT == purb->status)) {
-		JOT(8, "urb status -ESHUTDOWN or -ENOENT\n");
+		JOM(8, "urb status -ESHUTDOWN or -ENOENT\n");
 		return;
 	}
 
 	(peasycap->field_buffer[peasycap->field_fill][0].kount) |= 0x8000 ;
-	SAY("ERROR: bad urb status:\n");
+	SAM("ERROR: bad urb status:\n");
 	switch (purb->status) {
 	case -EINPROGRESS: {
-		SAY("-EINPROGRESS\n"); break;
+		SAM("-EINPROGRESS\n"); break;
 	}
 	case -ENOSR: {
-		SAY("-ENOSR\n"); break;
+		SAM("-ENOSR\n"); break;
 	}
 	case -EPIPE: {
-		SAY("-EPIPE\n"); break;
+		SAM("-EPIPE\n"); break;
 	}
 	case -EOVERFLOW: {
-		SAY("-EOVERFLOW\n"); break;
+		SAM("-EOVERFLOW\n"); break;
 	}
 	case -EPROTO: {
-		SAY("-EPROTO\n"); break;
+		SAM("-EPROTO\n"); break;
 	}
 	case -EILSEQ: {
-		SAY("-EILSEQ\n"); break;
+		SAM("-EILSEQ\n"); break;
 	}
 	case -ETIMEDOUT: {
-		SAY("-ETIMEDOUT\n"); break;
+		SAM("-ETIMEDOUT\n"); break;
 	}
 	case -EMSGSIZE: {
-		SAY("-EMSGSIZE\n"); break;
+		SAM("-EMSGSIZE\n"); break;
 	}
 	case -EOPNOTSUPP: {
-		SAY("-EOPNOTSUPP\n"); break;
+		SAM("-EOPNOTSUPP\n"); break;
 	}
 	case -EPFNOSUPPORT: {
-		SAY("-EPFNOSUPPORT\n"); break;
+		SAM("-EPFNOSUPPORT\n"); break;
 	}
 	case -EAFNOSUPPORT: {
-		SAY("-EAFNOSUPPORT\n"); break;
+		SAM("-EAFNOSUPPORT\n"); break;
 	}
 	case -EADDRINUSE: {
-		SAY("-EADDRINUSE\n"); break;
+		SAM("-EADDRINUSE\n"); break;
 	}
 	case -EADDRNOTAVAIL: {
-		SAY("-EADDRNOTAVAIL\n"); break;
+		SAM("-EADDRNOTAVAIL\n"); break;
 	}
 	case -ENOBUFS: {
-		SAY("-ENOBUFS\n"); break;
+		SAM("-ENOBUFS\n"); break;
 	}
 	case -EISCONN: {
-		SAY("-EISCONN\n"); break;
+		SAM("-EISCONN\n"); break;
 	}
 	case -ENOTCONN: {
-		SAY("-ENOTCONN\n"); break;
+		SAM("-ENOTCONN\n"); break;
 	}
 	case -ESHUTDOWN: {
-		SAY("-ESHUTDOWN\n"); break;
+		SAM("-ESHUTDOWN\n"); break;
 	}
 	case -ENOENT: {
-		SAY("-ENOENT\n"); break;
+		SAM("-ENOENT\n"); break;
 	}
 	case -ECONNRESET: {
-		SAY("-ECONNRESET\n"); break;
+		SAM("-ECONNRESET\n"); break;
 	}
 	case -ENOSPC: {
-		SAY("ENOSPC\n"); break;
+		SAM("ENOSPC\n"); break;
 	}
 	default: {
-		SAY("unknown error code 0x%08X\n", purb->status); break;
+		SAM("unknown error code 0x%08X\n", purb->status); break;
 	}
 	}
 /*---------------------------------------------------------------------------*/
@@ -2638,7 +3064,7 @@
 				strcpy(&errbuf[0], "-ECONNRESET"); break;
 			}
 			case -ENOSPC: {
-				SAY("ENOSPC\n"); break;
+				SAM("ENOSPC\n"); break;
 			}
 			case -ESHUTDOWN: {
 				strcpy(&errbuf[0], "-ESHUTDOWN"); break;
@@ -2653,7 +3079,7 @@
 		frameactual = purb->iso_frame_desc[i].actual_length;
 		frameoffset = purb->iso_frame_desc[i].offset;
 
-		JOT(16, "frame[%2i]:" \
+		JOM(16, "frame[%2i]:" \
 				"%4i=status "  \
 				"%4i=actual "  \
 				"%4i=length "  \
@@ -2667,19 +3093,20 @@
 				PAGE_SIZE) + \
 				(int)(pfield_buffer->pto - pfield_buffer->pgo);
 		if (4 == more)
-			mt++;
+			peasycap->video_mt++;
 		if (4 < more) {
-			if (mt) {
-				JOT(8, "%4i empty video urb frames\n", mt);
-				mt = 0;
+			if (peasycap->video_mt) {
+				JOM(8, "%4i empty video urb frames\n", \
+							peasycap->video_mt);
+				peasycap->video_mt = 0;
 			}
 			if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
-				SAY("ERROR: bad peasycap->field_fill\n");
+				SAM("ERROR: bad peasycap->field_fill\n");
 				return;
 			}
 			if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
 							peasycap->field_page) {
-				SAY("ERROR: bad peasycap->field_page\n");
+				SAM("ERROR: bad peasycap->field_page\n");
 				return;
 			}
 			pfield_buffer = &peasycap->field_buffer\
@@ -2712,11 +3139,13 @@
 						peasycap->videofieldamount) {
 					if (2 == videofieldamount - \
 							peasycap->\
-							videofieldamount)
+							videofieldamount) {
 						(peasycap->field_buffer\
 						[peasycap->field_fill]\
 							[0].kount) |= 0x0100;
-					else
+						peasycap->video_junk += (1 + \
+							VIDEO_JUNK_TOLERATE);
+					} else
 						(peasycap->field_buffer\
 						[peasycap->field_fill]\
 							[0].kount) |= 0x4000;
@@ -2727,53 +3156,74 @@
 						[peasycap->field_fill]\
 							[0].kount) |= 0x2000;
 					}
-				if (!(0xFF00 & peasycap->field_buffer\
+					bad = 0xFF00 & peasycap->field_buffer\
 						[peasycap->field_fill]\
-						[0].kount)) {
-					(peasycap->video_junk)--;
-					if (-16 > peasycap->video_junk)
-						peasycap->video_junk = -16;
-					peasycap->field_read = \
+						[0].kount;
+					if (!bad) {
+						(peasycap->video_junk)--;
+						if (-VIDEO_JUNK_TOLERATE > \
+							peasycap->video_junk) \
+							peasycap->video_junk =\
+							-VIDEO_JUNK_TOLERATE;
+						peasycap->field_read = \
 							(peasycap->\
 								field_fill)++;
-
-					if (FIELD_BUFFER_MANY <= \
-						peasycap->field_fill)
-						peasycap->field_fill = 0;
-					peasycap->field_page = 0;
-					pfield_buffer = &peasycap->\
-						field_buffer\
-						[peasycap->field_fill]\
-						[peasycap->field_page];
-					pfield_buffer->pto = \
+						if (FIELD_BUFFER_MANY <= \
+								peasycap->\
+								field_fill)
+							peasycap->\
+								field_fill = 0;
+						peasycap->field_page = 0;
+						pfield_buffer = &peasycap->\
+							field_buffer\
+							[peasycap->\
+							field_fill]\
+							[peasycap->\
+							field_page];
+						pfield_buffer->pto = \
 							pfield_buffer->pgo;
-
-					JOT(8, "bumped to: %i=peasycap->" \
-						"field_fill  %i=parity\n", \
-						peasycap->field_fill, \
-						0x00FF & pfield_buffer->kount);
-					JOT(8, "field buffer %i has %i " \
-						"bytes fit to be read\n", \
-						peasycap->field_read, \
-						videofieldamount);
-					JOT(8, "wakeup call to wq_video, " \
-						"%i=field_read %i=field_fill "\
-						"%i=parity\n", \
-						peasycap->field_read, \
-						peasycap->field_fill, \
-						0x00FF & peasycap->\
-						field_buffer[peasycap->\
-						field_read][0].kount);
-					wake_up_interruptible(&(peasycap->\
-								wq_video));
-					do_gettimeofday(&peasycap->timeval7);
+						JOM(8, "bumped to: %i="\
+							"peasycap->" \
+							"field_fill  %i="\
+							"parity\n", \
+							peasycap->field_fill, \
+							0x00FF & \
+							pfield_buffer->kount);
+						JOM(8, "field buffer %i has "\
+							"%i bytes fit to be "\
+							"read\n", \
+							peasycap->field_read, \
+							videofieldamount);
+						JOM(8, "wakeup call to "\
+							"wq_video, " \
+							"%i=field_read "\
+							"%i=field_fill "\
+							"%i=parity\n", \
+							peasycap->field_read, \
+							peasycap->field_fill, \
+							0x00FF & peasycap->\
+							field_buffer\
+							[peasycap->\
+							field_read][0].kount);
+						wake_up_interruptible\
+							(&(peasycap->\
+								 wq_video));
+						do_gettimeofday\
+							(&peasycap->timeval7);
 					} else {
 					peasycap->video_junk++;
-					JOT(8, "field buffer %i had %i " \
-						"bytes, now discarded\n", \
+					if (bad & 0x0010) \
+						peasycap->video_junk += \
+						(1 + VIDEO_JUNK_TOLERATE/2);
+					JOM(8, "field buffer %i had %i " \
+						"bytes, now discarded: "\
+						"0x%04X\n", \
 						peasycap->field_fill, \
-						videofieldamount);
-
+						videofieldamount,\
+						(0xFF00 & \
+						peasycap->field_buffer\
+						[peasycap->field_fill][0].\
+						kount));
 					(peasycap->field_fill)++;
 
 					if (FIELD_BUFFER_MANY <= \
@@ -2787,20 +3237,22 @@
 					pfield_buffer->pto = \
 							pfield_buffer->pgo;
 
-					JOT(8, "bumped to: %i=peasycap->" \
+					JOM(8, "bumped to: %i=peasycap->" \
 						"field_fill  %i=parity\n", \
 						peasycap->field_fill, \
 						0x00FF & pfield_buffer->kount);
 				}
 				if (8 == more) {
-					JOT(8, "end-of-field: received " \
+					JOM(8, "end-of-field: received " \
 						"parity byte 0x%02X\n", \
 						(0xFF & *pu));
 					if (0x40 & *pu)
 						pfield_buffer->kount = 0x0000;
 					else
 						pfield_buffer->kount = 0x0001;
-					JOT(8, "end-of-field: 0x%02X=kount\n",\
+					pfield_buffer->input = 0x08 | \
+						(0x07 & peasycap->input);
+					JOM(8, "end-of-field: 0x%02X=kount\n",\
 						0xFF & pfield_buffer->kount);
 				}
 			}
@@ -2813,12 +3265,12 @@
 			more -= leap;
 
 			if (FIELD_BUFFER_MANY <= peasycap->field_fill) {
-				SAY("ERROR: bad peasycap->field_fill\n");
+				SAM("ERROR: bad peasycap->field_fill\n");
 				return;
 			}
 			if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
 							peasycap->field_page) {
-				SAY("ERROR: bad peasycap->field_page\n");
+				SAM("ERROR: bad peasycap->field_page\n");
 				return;
 			}
 			pfield_buffer = &peasycap->field_buffer\
@@ -2829,7 +3281,7 @@
 						[peasycap->field_page];
 				if (PAGE_SIZE < (pfield_buffer->pto - \
 							pfield_buffer->pgo)) {
-					SAY("ERROR: bad pfield_buffer->pto\n");
+					SAM("ERROR: bad pfield_buffer->pto\n");
 					return;
 				}
 				if (PAGE_SIZE == (pfield_buffer->pto - \
@@ -2837,7 +3289,7 @@
 					(peasycap->field_page)++;
 					if (FIELD_BUFFER_SIZE/PAGE_SIZE <= \
 							peasycap->field_page) {
-						JOT(16, "wrapping peasycap->" \
+						JOM(16, "wrapping peasycap->" \
 							"field_page\n");
 						peasycap->field_page = 0;
 					}
@@ -2847,6 +3299,15 @@
 							[peasycap->field_page];
 					pfield_buffer->pto = \
 							pfield_buffer->pgo;
+					pfield_buffer->input = 0x08 | \
+						(0x07 & peasycap->input);
+					if ((peasycap->field_buffer[peasycap->\
+							field_fill][0]).\
+								input != \
+							pfield_buffer->input)
+						(peasycap->field_buffer\
+							[peasycap->field_fill]\
+							[0]).kount |= 0x1000;
 				}
 
 				much = PAGE_SIZE - (int)(pfield_buffer->pto - \
@@ -2865,55 +3326,6 @@
 }
 /*---------------------------------------------------------------------------*/
 /*
- *
- *
- *             *** UNDER DEVELOPMENT/TESTING - NOT READY YET! ***
- *
- *
- *
- *  VIDEOTAPES MAY HAVE BEEN MANUALLY PAUSED AND RESTARTED DURING RECORDING.
- *  THIS CAUSES LOSS OF SYNC, CONFUSING DOWNSTREAM USERSPACE PROGRAMS WHICH
- *  MAY INTERPRET THE INTERRUPTION AS A SYMPTOM OF LATENCY.  TO OVERCOME THIS
- *  THE DRIVER BRIDGES THE HIATUS BY SENDING DUMMY VIDEO FRAMES AT ROUGHLY
- *  THE RIGHT TIME INTERVALS IN THE HOPE OF PERSUADING THE DOWNSTREAM USERSPACE
- *  PROGRAM TO RESUME NORMAL SERVICE WHEN THE INTERRUPTION IS OVER.
- */
-/*---------------------------------------------------------------------------*/
-#if defined(BRIDGER)
-do_gettimeofday(&timeval);
-if (peasycap->timeval7.tv_sec) {
-	usec = 1000000*(timeval.tv_sec  - peasycap->timeval7.tv_sec) + \
-			(timeval.tv_usec - peasycap->timeval7.tv_usec);
-	if (usec > (peasycap->usec + peasycap->tolerate)) {
-		JOT(8, "bridging hiatus\n");
-		peasycap->video_junk = 0;
-		peasycap->field_buffer[peasycap->field_fill][0].kount |= 0x0400;
-
-		peasycap->field_read = (peasycap->field_fill)++;
-
-		if (FIELD_BUFFER_MANY <= peasycap->field_fill) \
-						peasycap->field_fill = 0;
-		peasycap->field_page = 0;
-		pfield_buffer = &peasycap->field_buffer\
-				[peasycap->field_fill][peasycap->field_page];
-		pfield_buffer->pto = pfield_buffer->pgo;
-
-		JOT(8, "bumped to: %i=peasycap->field_fill  %i=parity\n", \
-			peasycap->field_fill, 0x00FF & pfield_buffer->kount);
-		JOT(8, "field buffer %i has %i bytes to be overwritten\n", \
-			peasycap->field_read, videofieldamount);
-		JOT(8, "wakeup call to wq_video, " \
-			"%i=field_read %i=field_fill %i=parity\n", \
-			peasycap->field_read, peasycap->field_fill, \
-			0x00FF & \
-			peasycap->field_buffer[peasycap->field_read][0].kount);
-		wake_up_interruptible(&(peasycap->wq_video));
-		do_gettimeofday(&peasycap->timeval7);
-	}
-}
-#endif /*BRIDGER*/
-/*---------------------------------------------------------------------------*/
-/*
  *  RESUBMIT THIS URB, UNLESS A SEVERE PERSISTENT ERROR CONDITION EXISTS.
  *
  *  IF THE WAIT QUEUES ARE NOT CLEARED IN RESPONSE TO AN ERROR CONDITION
@@ -2921,51 +3333,57 @@
  */
 /*---------------------------------------------------------------------------*/
 if (VIDEO_ISOC_BUFFER_MANY <= peasycap->video_junk) {
-	SAY("easycap driver shutting down on condition green\n");
+	SAM("easycap driver shutting down on condition green\n");
+	peasycap->status = 1;
 	peasycap->video_eof = 1;
+	peasycap->video_junk = 0;
+	wake_up_interruptible(&peasycap->wq_video);
+#if !defined(PERSEVERE)
 	peasycap->audio_eof = 1;
-	peasycap->video_junk = -VIDEO_ISOC_BUFFER_MANY;
-	wake_up_interruptible(&(peasycap->wq_video));
-	wake_up_interruptible(&(peasycap->wq_audio));
+	wake_up_interruptible(&peasycap->wq_audio);
+#endif /*PERSEVERE*/
 	return;
 }
 if (peasycap->video_isoc_streaming) {
 	rc = usb_submit_urb(purb, GFP_ATOMIC);
 	if (0 != rc) {
-		SAY("ERROR: while %i=video_idle, usb_submit_urb() failed " \
-					"with rc:\n", peasycap->video_idle);
 		switch (rc) {
 		case -ENOMEM: {
-			SAY("ENOMEM\n"); break;
+			SAM("ENOMEM\n"); break;
 		}
 		case -ENODEV: {
-			SAY("ENODEV\n"); break;
+			SAM("ENODEV\n"); break;
 		}
 		case -ENXIO: {
-			SAY("ENXIO\n"); break;
+			SAM("ENXIO\n"); break;
 		}
 		case -EINVAL: {
-			SAY("EINVAL\n"); break;
+			SAM("EINVAL\n"); break;
 		}
 		case -EAGAIN: {
-			SAY("EAGAIN\n"); break;
+			SAM("EAGAIN\n"); break;
 		}
 		case -EFBIG: {
-			SAY("EFBIG\n"); break;
+			SAM("EFBIG\n"); break;
 		}
 		case -EPIPE: {
-			SAY("EPIPE\n"); break;
+			SAM("EPIPE\n"); break;
 		}
 		case -EMSGSIZE: {
-			SAY("EMSGSIZE\n");  break;
+			SAM("EMSGSIZE\n");  break;
 		}
 		case -ENOSPC: {
-			SAY("ENOSPC\n"); break;
+			SAM("ENOSPC\n"); break;
 		}
 		default: {
-			SAY("0x%08X\n", rc); break;
+			SAM("0x%08X\n", rc); break;
 		}
 		}
+		if (-ENODEV != rc) \
+			SAM("ERROR: while %i=video_idle, " \
+						"usb_submit_urb() " \
+						"failed with rc:\n", \
+						peasycap->video_idle);
 	}
 }
 return;
@@ -2977,8 +3395,8 @@
  *                                  FIXME
  *
  *
- *  THIS FUNCTION ASSUMES THAT, ON EACH AND EVERY OCCASION THAT THE DEVICE IS
- *  PHYSICALLY PLUGGED IN, INTERFACE 0 IS PROBED FIRST.
+ *  THIS FUNCTION ASSUMES THAT, ON EACH AND EVERY OCCASION THAT THE EasyCAP
+ *  IS PHYSICALLY PLUGGED IN, INTERFACE 0 IS PROBED FIRST.
  *  IF THIS IS NOT TRUE, THERE IS THE POSSIBILITY OF AN Oops.
  *
  *  THIS HAS NEVER BEEN A PROBLEM IN PRACTICE, BUT SOMETHING SEEMS WRONG HERE.
@@ -2994,7 +3412,7 @@
 struct usb_interface_descriptor *pusb_interface_descriptor;
 struct usb_interface_assoc_descriptor *pusb_interface_assoc_descriptor;
 struct urb *purb;
-static struct easycap *peasycap /*=NULL*/;
+struct easycap *peasycap;
 struct data_urb *pdata_urb;
 size_t wMaxPacketSize;
 int ISOCwMaxPacketSize;
@@ -3004,19 +3422,32 @@
 __u8 bEndpointAddress;
 __u8 ISOCbEndpointAddress;
 __u8 INTbEndpointAddress;
-int isin, i, j, k, m;
+int isin, i, j, k, m, rc;
 __u8 bInterfaceNumber;
 __u8 bInterfaceClass;
 __u8 bInterfaceSubClass;
 void *pbuf;
 int okalt[8], isokalt;
-int okepn[8], isokepn;
-int okmps[8], isokmps;
+int okepn[8];
+int okmps[8];
 int maxpacketsize;
-int rc;
+__u16 mask;
+__s32 value;
+struct easycap_format *peasycap_format;
 
 JOT(4, "\n");
 
+if (!dongle_done) {
+	dongle_done = 1;
+	for (k = 0; k < DONGLE_MANY; k++) {
+		easycap_dongle[k].peasycap = (struct easycap *)NULL;
+		mutex_init(&easycap_dongle[k].mutex_video);
+		mutex_init(&easycap_dongle[k].mutex_audio);
+	}
+}
+
+peasycap = (struct easycap *)NULL;
+
 if ((struct usb_interface *)NULL == pusb_interface) {
 	SAY("ERROR: pusb_interface is NULL\n");
 	return -EFAULT;
@@ -3117,46 +3548,83 @@
 /*
  *  A NEW struct easycap IS ALWAYS ALLOCATED WHEN INTERFACE 0 IS PROBED.
  *  IT IS NOT POSSIBLE HERE TO FREE ANY EXISTING struct easycap.  THIS
- *  SHOULD HAVE BEEN DONE BY easycap_delete() WHEN THE DEVICE WAS PHYSICALLY
- *  UNPLUGGED.
- */
+ *  SHOULD HAVE BEEN DONE BY easycap_delete() WHEN THE EasyCAP WAS
+ *  PHYSICALLY UNPLUGGED.
+ *
+ *  THE POINTER peasycap TO THE struct easycap IS REMEMBERED WHEN
+ *  INTERFACES 1 AND 2 ARE PROBED.
+ *
+ *  IF TWO EasyCAPs ARE PLUGGED IN NEARLY SIMULTANEOUSLY THERE WILL
+ *  BE TROUBLE.  BEWARE.
+*/
 /*---------------------------------------------------------------------------*/
 if (0 == bInterfaceNumber) {
 	peasycap = kzalloc(sizeof(struct easycap), GFP_KERNEL);
 	if (NULL == peasycap) {
 		SAY("ERROR: Could not allocate peasycap\n");
 		return -ENOMEM;
-	} else {
-		peasycap->allocation_video_struct = sizeof(struct easycap);
-		peasycap->allocation_video_page = 0;
-		peasycap->allocation_video_urb = 0;
-		peasycap->allocation_audio_struct = 0;
-		peasycap->allocation_audio_page = 0;
-		peasycap->allocation_audio_urb = 0;
 	}
+	SAM("allocated 0x%08lX=peasycap\n", (unsigned long int) peasycap);
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+	SAM("where     0x%08lX=&peasycap->video_device\n", \
+				(unsigned long int) &peasycap->video_device);
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+	SAM("and       0x%08lX=&peasycap->v4l2_device\n", \
+				(unsigned long int) &peasycap->v4l2_device);
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
 /*---------------------------------------------------------------------------*/
 /*
- *  INITIALIZE THE NEW easycap STRUCTURE.
- *  NO PARAMETERS ARE SPECIFIED HERE REQUIRING THE SETTING OF REGISTERS.
- *  THAT IS DONE FIRST BY easycap_open() AND LATER BY easycap_ioctl().
- */
+ *  PERFORM URGENT INTIALIZATIONS ...
+*/
 /*---------------------------------------------------------------------------*/
-	peasycap->pusb_device = pusb_device;
-	peasycap->pusb_interface = pusb_interface;
-
+	strcpy(&peasycap->telltale[0], TELLTALE);
 	kref_init(&peasycap->kref);
-	JOT(8, "intf[%i]: after kref_init(..._video) " \
+	JOM(8, "intf[%i]: after kref_init(..._video) " \
 			"%i=peasycap->kref.refcount.counter\n", \
 			bInterfaceNumber, peasycap->kref.refcount.counter);
 
-	init_waitqueue_head(&(peasycap->wq_video));
-	init_waitqueue_head(&(peasycap->wq_audio));
+	init_waitqueue_head(&peasycap->wq_video);
+	init_waitqueue_head(&peasycap->wq_audio);
 
-	mutex_init(&(peasycap->mutex_timeval0));
-	mutex_init(&(peasycap->mutex_timeval1));
+	for (dongle_this = 0; dongle_this < DONGLE_MANY; dongle_this++) {
+		if (NULL == easycap_dongle[dongle_this].peasycap) {
+			if (0 == mutex_is_locked(&easycap_dongle\
+						[dongle_this].mutex_video)) {
+				if (0 == mutex_is_locked(&easycap_dongle\
+						[dongle_this].mutex_audio)) {
+					easycap_dongle\
+						[dongle_this].peasycap = \
+								peasycap;
+					JOM(8, "intf[%i]: peasycap-->easycap" \
+						"_dongle[%i].peasycap\n", \
+						bInterfaceNumber, dongle_this);
+					break;
+				}
+			}
+		}
+	}
+	if (DONGLE_MANY <= dongle_this) {
+		SAM("ERROR: too many dongles\n");
+		return -ENOMEM;
+	}
 
-	for (k = 0; k < FRAME_BUFFER_MANY; k++)
-		mutex_init(&(peasycap->mutex_mmap_video[k]));
+	peasycap->allocation_video_struct = sizeof(struct easycap);
+	peasycap->allocation_video_page = 0;
+	peasycap->allocation_video_urb = 0;
+	peasycap->allocation_audio_struct = 0;
+	peasycap->allocation_audio_page = 0;
+	peasycap->allocation_audio_urb = 0;
+
+/*---------------------------------------------------------------------------*/
+/*
+ *  ... AND FURTHER INITIALIZE THE STRUCTURE
+*/
+/*---------------------------------------------------------------------------*/
+	peasycap->pusb_device = pusb_device;
+	peasycap->pusb_interface = pusb_interface;
 
 	peasycap->ilk = 0;
 	peasycap->microphone = false;
@@ -3177,46 +3645,172 @@
 
 	peasycap->frame_buffer_many = FRAME_BUFFER_MANY;
 
-	if ((struct mutex *)NULL == &(peasycap->mutex_mmap_video[0])) {
-		SAY("ERROR: &(peasycap->mutex_mmap_video[%i]) is NULL\n", 0);
-		return -EFAULT;
-	}
+	for (k = 0; k < INPUT_MANY; k++)
+		peasycap->lost[k] = 0;
+	peasycap->skip = 0;
+	peasycap->skipped = 0;
+	peasycap->offerfields = 0;
 /*---------------------------------------------------------------------------*/
 /*
- *  DYNAMICALLY FILL IN THE AVAILABLE FORMATS.
+ *  DYNAMICALLY FILL IN THE AVAILABLE FORMATS ...
  */
 /*---------------------------------------------------------------------------*/
 	rc = fillin_formats();
 	if (0 > rc) {
-		SAY("ERROR: fillin_formats() returned %i\n", rc);
+		SAM("ERROR: fillin_formats() returned %i\n", rc);
 		return -EFAULT;
 	}
-	JOT(4, "%i formats available\n", rc);
-	} else {
+	JOM(4, "%i formats available\n", rc);
 /*---------------------------------------------------------------------------*/
-		if ((struct easycap *)NULL == peasycap) {
-			SAY("ERROR: peasycap is NULL " \
-					"when probing interface %i\n", \
-							bInterfaceNumber);
-			return -EFAULT;
+/*
+ *  ... AND POPULATE easycap.inputset[]
+*/
+/*---------------------------------------------------------------------------*/
+	for (k = 0; k < INPUT_MANY; k++) {
+		peasycap->inputset[k].input_ok = 0;
+		peasycap->inputset[k].standard_offset_ok = 0;
+		peasycap->inputset[k].format_offset_ok = 0;
+		peasycap->inputset[k].brightness_ok = 0;
+		peasycap->inputset[k].contrast_ok = 0;
+		peasycap->inputset[k].saturation_ok = 0;
+		peasycap->inputset[k].hue_ok = 0;
+	}
+	if (true == peasycap->ntsc) {
+		i = 0;
+		m = 0;
+		mask = 0;
+		while (0xFFFF != easycap_standard[i].mask) {
+			if (NTSC_M == easycap_standard[i].\
+							v4l2_standard.index) {
+				m++;
+				for (k = 0; k < INPUT_MANY; k++) {
+					peasycap->inputset[k].\
+							standard_offset = i;
+				}
+			mask = easycap_standard[i].mask;
+			}
+			i++;
 		}
+	} else {
+		i = 0;
+		m = 0;
+		mask = 0;
+		while (0xFFFF != easycap_standard[i].mask) {
+			if (PAL_BGHIN == easycap_standard[i].\
+							v4l2_standard.index) {
+				m++;
+				for (k = 0; k < INPUT_MANY; k++) {
+					peasycap->inputset[k].\
+							standard_offset = i;
+				}
+			mask = easycap_standard[i].mask;
+			}
+			i++;
+		}
+	}
 
-	JOT(8, "kref_get() with %i=peasycap->kref.refcount.counter\n", \
-					(int)peasycap->kref.refcount.counter);
-	kref_get(&peasycap->kref);
+	if (1 != m) {
+		SAM("MISTAKE: easycap.inputset[].standard_offset " \
+						"unpopulated, %i=m\n", m);
+		return -ENOENT;
+	}
+
+	peasycap_format = &easycap_format[0];
+	i = 0;
+	m = 0;
+	while (0 != peasycap_format->v4l2_format.fmt.pix.width) {
+		if (((peasycap_format->mask & 0x0F) == (mask & 0x0F)) && \
+				(peasycap_format->\
+					v4l2_format.fmt.pix.field == \
+							V4L2_FIELD_NONE) && \
+				(peasycap_format->\
+					v4l2_format.fmt.pix.pixelformat == \
+							V4L2_PIX_FMT_UYVY) && \
+				(peasycap_format->\
+					v4l2_format.fmt.pix.width  == \
+							640) && \
+				(peasycap_format->\
+					v4l2_format.fmt.pix.height == 480)) {
+			m++;
+			for (k = 0; k < INPUT_MANY; k++)
+				peasycap->inputset[k].format_offset = i;
+			break;
+		}
+	peasycap_format++;
+	i++;
+	}
+	if (1 != m) {
+		SAM("MISTAKE: easycap.inputset[].format_offset unpopulated\n");
+	return -ENOENT;
+	}
+
+	i = 0;
+	m = 0;
+	while (0xFFFFFFFF != easycap_control[i].id) {
+		value = easycap_control[i].default_value;
+		if (V4L2_CID_BRIGHTNESS == easycap_control[i].id) {
+			m++;
+			for (k = 0; k < INPUT_MANY; k++)
+				peasycap->inputset[k].brightness = value;
+		} else if (V4L2_CID_CONTRAST == easycap_control[i].id) {
+			m++;
+			for (k = 0; k < INPUT_MANY; k++)
+				peasycap->inputset[k].contrast = value;
+		} else if (V4L2_CID_SATURATION == easycap_control[i].id) {
+			m++;
+			for (k = 0; k < INPUT_MANY; k++)
+				peasycap->inputset[k].saturation = value;
+		} else if (V4L2_CID_HUE == easycap_control[i].id) {
+			m++;
+			for (k = 0; k < INPUT_MANY; k++)
+				peasycap->inputset[k].hue = value;
+		}
+		i++;
+	}
+	if (4 != m) {
+		SAM("MISTAKE: easycap.inputset[].brightness,... " \
+						"underpopulated\n");
+		return -ENOENT;
+	}
+	for (k = 0; k < INPUT_MANY; k++)
+		peasycap->inputset[k].input = k;
+	JOM(4, "populated easycap.inputset[]\n");
+	JOM(4, "finished initialization\n");
+} else {
+/*---------------------------------------------------------------------------*/
+	/*
+	 *  FOR INTERFACES 1 AND 2 THE POINTER peasycap IS OBTAINED BY ASSUMING
+	 *  THAT dongle_this HAS NOT CHANGED SINCE INTERFACE 0 WAS PROBED.  IF
+	 *  THIS IS NOT THE CASE, FOR EXAMPLE WHEN TWO EASYCAPs ARE PLUGGED IN
+	 *  SIMULTANEOUSLY, THERE WILL BE SERIOUS TROUBLE.
+	*/
+/*---------------------------------------------------------------------------*/
+	if ((0 > dongle_this) || (DONGLE_MANY <= dongle_this)) {
+		SAY("ERROR: bad dongle count\n");
+		return -EFAULT;
+	}
+	peasycap = easycap_dongle[dongle_this].peasycap;
+	JOT(8, "intf[%i]: easycap_dongle[%i].peasycap-->peasycap\n", \
+						bInterfaceNumber, dongle_this);
+
+	if ((struct easycap *)NULL == peasycap) {
+		SAY("ERROR: peasycap is NULL when probing interface %i\n", \
+							bInterfaceNumber);
+		return -EFAULT;
+	}
 }
 /*---------------------------------------------------------------------------*/
 if ((USB_CLASS_VIDEO == bInterfaceClass) || \
-	(USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
+		(USB_CLASS_VENDOR_SPEC == bInterfaceClass)) {
 	if (-1 == peasycap->video_interface) {
 		peasycap->video_interface = bInterfaceNumber;
-		JOT(4, "setting peasycap->video_interface=%i\n", \
+		JOM(4, "setting peasycap->video_interface=%i\n", \
 						peasycap->video_interface);
 	} else {
 		if (peasycap->video_interface != bInterfaceNumber) {
-			SAY("ERROR: attempting to reset " \
+			SAM("ERROR: attempting to reset " \
 					"peasycap->video_interface\n");
-			SAY("...... continuing with " \
+			SAM("...... continuing with " \
 					"%i=peasycap->video_interface\n", \
 					peasycap->video_interface);
 		}
@@ -3225,13 +3819,13 @@
 						(0x02 == bInterfaceSubClass)) {
 	if (-1 == peasycap->audio_interface) {
 		peasycap->audio_interface = bInterfaceNumber;
-		JOT(4, "setting peasycap->audio_interface=%i\n", \
+		JOM(4, "setting peasycap->audio_interface=%i\n", \
 						 peasycap->audio_interface);
 	} else {
 		if (peasycap->audio_interface != bInterfaceNumber) {
-			SAY("ERROR: attempting to reset " \
+			SAM("ERROR: attempting to reset " \
 					"peasycap->audio_interface\n");
-			SAY("...... continuing with " \
+			SAM("...... continuing with " \
 					"%i=peasycap->audio_interface\n", \
 					peasycap->audio_interface);
 		}
@@ -3244,37 +3838,35 @@
  */
 /*---------------------------------------------------------------------------*/
 isokalt = 0;
-isokepn = 0;
-isokmps = 0;
 
 for (i = 0; i < pusb_interface->num_altsetting; i++) {
 	pusb_host_interface = &(pusb_interface->altsetting[i]);
 	if ((struct usb_host_interface *)NULL == pusb_host_interface) {
-		SAY("ERROR: pusb_host_interface is NULL\n");
+		SAM("ERROR: pusb_host_interface is NULL\n");
 		return -EFAULT;
 	}
 	pusb_interface_descriptor = &(pusb_host_interface->desc);
 	if ((struct usb_interface_descriptor *)NULL == \
 						pusb_interface_descriptor) {
-		SAY("ERROR: pusb_interface_descriptor is NULL\n");
+		SAM("ERROR: pusb_interface_descriptor is NULL\n");
 		return -EFAULT;
 	}
 
-	JOT(4, "intf[%i]alt[%i]: desc.bDescriptorType=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.bDescriptorType=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->bDescriptorType);
-	JOT(4, "intf[%i]alt[%i]: desc.bInterfaceNumber=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.bInterfaceNumber=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceNumber);
-	JOT(4, "intf[%i]alt[%i]: desc.bAlternateSetting=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.bAlternateSetting=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->bAlternateSetting);
-	JOT(4, "intf[%i]alt[%i]: desc.bNumEndpoints=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.bNumEndpoints=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->bNumEndpoints);
-	JOT(4, "intf[%i]alt[%i]: desc.bInterfaceClass=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.bInterfaceClass=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceClass);
-	JOT(4, "intf[%i]alt[%i]: desc.bInterfaceSubClass=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.bInterfaceSubClass=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceSubClass);
-	JOT(4, "intf[%i]alt[%i]: desc.bInterfaceProtocol=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.bInterfaceProtocol=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->bInterfaceProtocol);
-	JOT(4, "intf[%i]alt[%i]: desc.iInterface=0x%02X\n", \
+	JOM(4, "intf[%i]alt[%i]: desc.iInterface=0x%02X\n", \
 	bInterfaceNumber, i, pusb_interface_descriptor->iInterface);
 
 	ISOCwMaxPacketSize = -1;
@@ -3285,86 +3877,80 @@
 	INTbEndpointAddress = 0;
 
 	if (0 == pusb_interface_descriptor->bNumEndpoints)
-				JOT(4, "intf[%i]alt[%i] has no endpoints\n", \
+				JOM(4, "intf[%i]alt[%i] has no endpoints\n", \
 							bInterfaceNumber, i);
 /*---------------------------------------------------------------------------*/
 	for (j = 0; j < pusb_interface_descriptor->bNumEndpoints; j++) {
 		pepd = &(pusb_host_interface->endpoint[j].desc);
 		if ((struct usb_endpoint_descriptor *)NULL == pepd) {
-			SAY("ERROR:  pepd is NULL.\n");
-			SAY("...... skipping\n");
+			SAM("ERROR:  pepd is NULL.\n");
+			SAM("...... skipping\n");
 			continue;
 		}
 		wMaxPacketSize = le16_to_cpu(pepd->wMaxPacketSize);
 		bEndpointAddress = pepd->bEndpointAddress;
 
-		JOT(4, "intf[%i]alt[%i]end[%i]: bEndpointAddress=0x%X\n", \
+		JOM(4, "intf[%i]alt[%i]end[%i]: bEndpointAddress=0x%X\n", \
 				bInterfaceNumber, i, j, \
 				pepd->bEndpointAddress);
-		JOT(4, "intf[%i]alt[%i]end[%i]: bmAttributes=0x%X\n", \
+		JOM(4, "intf[%i]alt[%i]end[%i]: bmAttributes=0x%X\n", \
 				bInterfaceNumber, i, j, \
 				pepd->bmAttributes);
-		JOT(4, "intf[%i]alt[%i]end[%i]: wMaxPacketSize=%i\n", \
+		JOM(4, "intf[%i]alt[%i]end[%i]: wMaxPacketSize=%i\n", \
 				bInterfaceNumber, i, j, \
 				pepd->wMaxPacketSize);
-		JOT(4, "intf[%i]alt[%i]end[%i]: bInterval=%i\n",
+		JOM(4, "intf[%i]alt[%i]end[%i]: bInterval=%i\n",
 				bInterfaceNumber, i, j, \
 				pepd->bInterval);
 
 		if (pepd->bEndpointAddress & USB_DIR_IN) {
-			JOT(4, "intf[%i]alt[%i]end[%i] is an  IN  endpoint\n",\
+			JOM(4, "intf[%i]alt[%i]end[%i] is an  IN  endpoint\n",\
 						bInterfaceNumber, i, j);
 			isin = 1;
 		} else {
-			JOT(4, "intf[%i]alt[%i]end[%i] is an  OUT endpoint\n",\
+			JOM(4, "intf[%i]alt[%i]end[%i] is an  OUT endpoint\n",\
 						bInterfaceNumber, i, j);
-			SAY("ERROR: OUT endpoint unexpected\n");
-			SAY("...... continuing\n");
+			SAM("ERROR: OUT endpoint unexpected\n");
+			SAM("...... continuing\n");
 			isin = 0;
 		}
 		if ((pepd->bmAttributes & \
 				USB_ENDPOINT_XFERTYPE_MASK) == \
 				USB_ENDPOINT_XFER_ISOC) {
-			JOT(4, "intf[%i]alt[%i]end[%i] is an ISOC endpoint\n",\
+			JOM(4, "intf[%i]alt[%i]end[%i] is an ISOC endpoint\n",\
 						bInterfaceNumber, i, j);
 			if (isin) {
 				switch (bInterfaceClass) {
 				case USB_CLASS_VIDEO:
 				case USB_CLASS_VENDOR_SPEC: {
 					if (!peasycap) {
-						SAY("MISTAKE: " \
+						SAM("MISTAKE: " \
 							"peasycap is NULL\n");
 						return -EFAULT;
 					}
 					if (pepd->wMaxPacketSize) {
 						if (8 > isokalt) {
 							okalt[isokalt] = i;
-							JOT(4,\
+							JOM(4,\
 							"%i=okalt[%i]\n", \
 							okalt[isokalt], \
 							isokalt);
-							isokalt++;
-						}
-						if (8 > isokepn) {
-							okepn[isokepn] = \
+							okepn[isokalt] = \
 							pepd->\
 							bEndpointAddress & \
 							0x0F;
-							JOT(4,\
+							JOM(4,\
 							"%i=okepn[%i]\n", \
-							okepn[isokepn], \
-							isokepn);
-							isokepn++;
-						}
-						if (8 > isokmps) {
-							okmps[isokmps] = \
+							okepn[isokalt], \
+							isokalt);
+							okmps[isokalt] = \
 							le16_to_cpu(pepd->\
 							wMaxPacketSize);
-							JOT(4,\
+							JOM(4,\
 							"%i=okmps[%i]\n", \
-							okmps[isokmps], \
-							isokmps);
-							isokmps++;
+							okmps[isokalt], \
+							isokalt);
+							isokalt++;
 						}
 					} else {
 						if (-1 == peasycap->\
@@ -3372,16 +3958,16 @@
 							peasycap->\
 							video_altsetting_off =\
 									 i;
-							JOT(4, "%i=video_" \
+							JOM(4, "%i=video_" \
 							"altsetting_off " \
 								"<====\n", \
 							peasycap->\
 							video_altsetting_off);
 						} else {
-							SAY("ERROR: peasycap" \
+							SAM("ERROR: peasycap" \
 							"->video_altsetting_" \
 							"off already set\n");
-							SAY("...... " \
+							SAM("...... " \
 							"continuing with " \
 							"%i=peasycap->video_" \
 							"altsetting_off\n", \
@@ -3395,39 +3981,33 @@
 					if (0x02 != bInterfaceSubClass)
 						break;
 					if (!peasycap) {
-						SAY("MISTAKE: " \
+						SAM("MISTAKE: " \
 						"peasycap is NULL\n");
 						return -EFAULT;
 					}
 					if (pepd->wMaxPacketSize) {
 						if (8 > isokalt) {
 							okalt[isokalt] = i ;
-							JOT(4,\
+							JOM(4,\
 							"%i=okalt[%i]\n", \
 							okalt[isokalt], \
 							isokalt);
-							isokalt++;
-						}
-						if (8 > isokepn) {
-							okepn[isokepn] = \
+							okepn[isokalt] = \
 							pepd->\
 							bEndpointAddress & \
 							0x0F;
-							JOT(4,\
+							JOM(4,\
 							"%i=okepn[%i]\n", \
-							okepn[isokepn], \
-							isokepn);
-							isokepn++;
-						}
-						if (8 > isokmps) {
-							okmps[isokmps] = \
+							okepn[isokalt], \
+							isokalt);
+							okmps[isokalt] = \
 							le16_to_cpu(pepd->\
 							wMaxPacketSize);
-							JOT(4,\
+							JOM(4,\
 							"%i=okmps[%i]\n",\
-							okmps[isokmps], \
-							isokmps);
-							isokmps++;
+							okmps[isokalt], \
+							isokalt);
+							isokalt++;
 						}
 					} else {
 						if (-1 == peasycap->\
@@ -3435,16 +4015,16 @@
 							peasycap->\
 							audio_altsetting_off =\
 									 i;
-							JOT(4, "%i=audio_" \
+							JOM(4, "%i=audio_" \
 							"altsetting_off " \
 							"<====\n", \
 							peasycap->\
 							audio_altsetting_off);
 						} else {
-							SAY("ERROR: peasycap" \
+							SAM("ERROR: peasycap" \
 							"->audio_altsetting_" \
 							"off already set\n");
-							SAY("...... " \
+							SAM("...... " \
 							"continuing with " \
 							"%i=peasycap->\
 							audio_altsetting_" \
@@ -3462,19 +4042,19 @@
 		} else if ((pepd->bmAttributes & \
 						USB_ENDPOINT_XFERTYPE_MASK) ==\
 						USB_ENDPOINT_XFER_BULK) {
-			JOT(4, "intf[%i]alt[%i]end[%i] is a  BULK endpoint\n",\
+			JOM(4, "intf[%i]alt[%i]end[%i] is a  BULK endpoint\n",\
 						bInterfaceNumber, i, j);
 		} else if ((pepd->bmAttributes & \
 						USB_ENDPOINT_XFERTYPE_MASK) ==\
 						USB_ENDPOINT_XFER_INT) {
-			JOT(4, "intf[%i]alt[%i]end[%i] is an  INT endpoint\n",\
+			JOM(4, "intf[%i]alt[%i]end[%i] is an  INT endpoint\n",\
 						bInterfaceNumber, i, j);
 		} else {
-			JOT(4, "intf[%i]alt[%i]end[%i] is a  CTRL endpoint\n",\
+			JOM(4, "intf[%i]alt[%i]end[%i] is a  CTRL endpoint\n",\
 						bInterfaceNumber, i, j);
 		}
 		if (0 == pepd->wMaxPacketSize) {
-			JOT(4, "intf[%i]alt[%i]end[%i] " \
+			JOM(4, "intf[%i]alt[%i]end[%i] " \
 						"has zero packet size\n", \
 						bInterfaceNumber, i, j);
 		}
@@ -3485,7 +4065,7 @@
  *  PERFORM INITIALIZATION OF THE PROBED INTERFACE
  */
 /*---------------------------------------------------------------------------*/
-JOT(4, "initialization begins for interface %i\n", \
+JOM(4, "initialization begins for interface %i\n", \
 				pusb_interface_descriptor->bInterfaceNumber);
 switch (bInterfaceNumber) {
 /*---------------------------------------------------------------------------*/
@@ -3495,89 +4075,78 @@
 /*---------------------------------------------------------------------------*/
 case 0: {
 	if (!peasycap) {
-		SAY("MISTAKE: peasycap is NULL\n");
+		SAM("MISTAKE: peasycap is NULL\n");
 		return -EFAULT;
 	}
 	if (!isokalt) {
-		SAY("ERROR:  no viable video_altsetting_on\n");
+		SAM("ERROR:  no viable video_altsetting_on\n");
 		return -ENOENT;
 	} else {
 		peasycap->video_altsetting_on = okalt[isokalt - 1];
-		JOT(4, "%i=video_altsetting_on <====\n", \
+		JOM(4, "%i=video_altsetting_on <====\n", \
 					peasycap->video_altsetting_on);
 	}
-	if (!isokepn) {
-		SAY("ERROR:  no viable video_endpointnumber\n");
-		return -ENOENT;
-	} else {
-		peasycap->video_endpointnumber = okepn[isokepn - 1];
-		JOT(4, "%i=video_endpointnumber\n", \
-					peasycap->video_endpointnumber);
-		}
-	if (!isokmps) {
-		SAY("ERROR:  no viable video_maxpacketsize\n");
-		return -ENOENT;
 /*---------------------------------------------------------------------------*/
 /*
  *  DECIDE THE VIDEO STREAMING PARAMETERS
  */
 /*---------------------------------------------------------------------------*/
+	peasycap->video_endpointnumber = okepn[isokalt - 1];
+	JOM(4, "%i=video_endpointnumber\n", peasycap->video_endpointnumber);
+	maxpacketsize = okmps[isokalt - 1];
+	if (USB_2_0_MAXPACKETSIZE > maxpacketsize) {
+		peasycap->video_isoc_maxframesize = maxpacketsize;
 	} else {
-		maxpacketsize = okmps[isokmps - 1] - 1024;
-		if (USB_2_0_MAXPACKETSIZE > maxpacketsize) {
-			peasycap->video_isoc_maxframesize = maxpacketsize;
-		} else {
-			peasycap->video_isoc_maxframesize = \
-							USB_2_0_MAXPACKETSIZE;
-		}
-		JOT(4, "%i=video_isoc_maxframesize\n", \
-					peasycap->video_isoc_maxframesize);
-		if (0 >= peasycap->video_isoc_maxframesize) {
-			SAY("ERROR:  bad video_isoc_maxframesize\n");
-			return -ENOENT;
-		}
-		peasycap->video_isoc_framesperdesc = VIDEO_ISOC_FRAMESPERDESC;
-		JOT(4, "%i=video_isoc_framesperdesc\n", \
-					peasycap->video_isoc_framesperdesc);
-		if (0 >= peasycap->video_isoc_framesperdesc) {
-			SAY("ERROR:  bad video_isoc_framesperdesc\n");
-			return -ENOENT;
-		}
-		peasycap->video_isoc_buffer_size = \
-					peasycap->video_isoc_maxframesize * \
-					peasycap->video_isoc_framesperdesc;
-		JOT(4, "%i=video_isoc_buffer_size\n", \
-					peasycap->video_isoc_buffer_size);
-		if ((PAGE_SIZE << VIDEO_ISOC_ORDER) < \
-					peasycap->video_isoc_buffer_size) {
-			SAY("MISTAKE: " \
-				"peasycap->video_isoc_buffer_size too big\n");
-			return -EFAULT;
-		}
+		peasycap->video_isoc_maxframesize = \
+						USB_2_0_MAXPACKETSIZE;
+	}
+	JOM(4, "%i=video_isoc_maxframesize\n", \
+				peasycap->video_isoc_maxframesize);
+	if (0 >= peasycap->video_isoc_maxframesize) {
+		SAM("ERROR:  bad video_isoc_maxframesize\n");
+		SAM("        possibly because port is USB 1.1\n");
+		return -ENOENT;
+	}
+	peasycap->video_isoc_framesperdesc = VIDEO_ISOC_FRAMESPERDESC;
+	JOM(4, "%i=video_isoc_framesperdesc\n", \
+				peasycap->video_isoc_framesperdesc);
+	if (0 >= peasycap->video_isoc_framesperdesc) {
+		SAM("ERROR:  bad video_isoc_framesperdesc\n");
+		return -ENOENT;
+	}
+	peasycap->video_isoc_buffer_size = \
+				peasycap->video_isoc_maxframesize * \
+				peasycap->video_isoc_framesperdesc;
+	JOM(4, "%i=video_isoc_buffer_size\n", \
+				peasycap->video_isoc_buffer_size);
+	if ((PAGE_SIZE << VIDEO_ISOC_ORDER) < \
+				peasycap->video_isoc_buffer_size) {
+		SAM("MISTAKE: peasycap->video_isoc_buffer_size too big\n");
+		return -EFAULT;
 	}
 /*---------------------------------------------------------------------------*/
 	if (-1 == peasycap->video_interface) {
-		SAY("MISTAKE:  video_interface is unset\n");
+		SAM("MISTAKE:  video_interface is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->video_altsetting_on) {
-		SAY("MISTAKE:  video_altsetting_on is unset\n");
+		SAM("MISTAKE:  video_altsetting_on is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->video_altsetting_off) {
-		SAY("MISTAKE:  video_interface_off is unset\n");
+		SAM("MISTAKE:  video_interface_off is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->video_endpointnumber) {
-		SAY("MISTAKE:  video_endpointnumber is unset\n");
+		SAM("MISTAKE:  video_endpointnumber is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->video_isoc_maxframesize) {
-		SAY("MISTAKE:  video_isoc_maxframesize is unset\n");
+		SAM("MISTAKE:  video_isoc_maxframesize is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->video_isoc_buffer_size) {
-		SAY("MISTAKE:  video_isoc_buffer_size is unset\n");
+		SAM("MISTAKE:  video_isoc_buffer_size is unset\n");
 		return -EFAULT;
 	}
 /*---------------------------------------------------------------------------*/
@@ -3588,20 +4157,20 @@
 	INIT_LIST_HEAD(&(peasycap->urb_video_head));
 	peasycap->purb_video_head = &(peasycap->urb_video_head);
 /*---------------------------------------------------------------------------*/
-	JOT(4, "allocating %i frame buffers of size %li\n",  \
+	JOM(4, "allocating %i frame buffers of size %li\n",  \
 			FRAME_BUFFER_MANY, (long int)FRAME_BUFFER_SIZE);
-	JOT(4, ".... each scattered over %li pages\n", \
+	JOM(4, ".... each scattered over %li pages\n", \
 						FRAME_BUFFER_SIZE/PAGE_SIZE);
 
 	for (k = 0;  k < FRAME_BUFFER_MANY;  k++) {
 		for (m = 0;  m < FRAME_BUFFER_SIZE/PAGE_SIZE;  m++) {
 			if ((void *)NULL != peasycap->frame_buffer[k][m].pgo)
-				SAY("attempting to reallocate frame " \
+				SAM("attempting to reallocate frame " \
 								" buffers\n");
 			else {
 				pbuf = (void *)__get_free_page(GFP_KERNEL);
 				if ((void *)NULL == pbuf) {
-					SAY("ERROR: Could not allocate frame "\
+					SAM("ERROR: Could not allocate frame "\
 						"buffer %i page %i\n", k, m);
 					return -ENOMEM;
 				} else
@@ -3615,23 +4184,23 @@
 
 	peasycap->frame_fill = 0;
 	peasycap->frame_read = 0;
-	JOT(4, "allocation of frame buffers done:  %i pages\n", k * \
+	JOM(4, "allocation of frame buffers done:  %i pages\n", k * \
 								m);
 /*---------------------------------------------------------------------------*/
-	JOT(4, "allocating %i field buffers of size %li\n",  \
+	JOM(4, "allocating %i field buffers of size %li\n",  \
 			FIELD_BUFFER_MANY, (long int)FIELD_BUFFER_SIZE);
-	JOT(4, ".... each scattered over %li pages\n", \
+	JOM(4, ".... each scattered over %li pages\n", \
 					FIELD_BUFFER_SIZE/PAGE_SIZE);
 
 	for (k = 0;  k < FIELD_BUFFER_MANY;  k++) {
 		for (m = 0;  m < FIELD_BUFFER_SIZE/PAGE_SIZE;  m++) {
 			if ((void *)NULL != peasycap->field_buffer[k][m].pgo) {
-				SAY("ERROR: attempting to reallocate " \
+				SAM("ERROR: attempting to reallocate " \
 							"field buffers\n");
 			} else {
 				pbuf = (void *) __get_free_page(GFP_KERNEL);
 				if ((void *)NULL == pbuf) {
-					SAY("ERROR: Could not allocate field" \
+					SAM("ERROR: Could not allocate field" \
 						" buffer %i page %i\n", k, m);
 					return -ENOMEM;
 					}
@@ -3647,18 +4216,18 @@
 	peasycap->field_fill = 0;
 	peasycap->field_page = 0;
 	peasycap->field_read = 0;
-	JOT(4, "allocation of field buffers done:  %i pages\n", k * \
+	JOM(4, "allocation of field buffers done:  %i pages\n", k * \
 								m);
 /*---------------------------------------------------------------------------*/
-	JOT(4, "allocating %i isoc video buffers of size %i\n",  \
+	JOM(4, "allocating %i isoc video buffers of size %i\n",  \
 					VIDEO_ISOC_BUFFER_MANY, \
 					peasycap->video_isoc_buffer_size);
-	JOT(4, ".... each occupying contiguous memory pages\n");
+	JOM(4, ".... each occupying contiguous memory pages\n");
 
 	for (k = 0;  k < VIDEO_ISOC_BUFFER_MANY; k++) {
 		pbuf = (void *)__get_free_pages(GFP_KERNEL, VIDEO_ISOC_ORDER);
 		if (NULL == pbuf) {
-			SAY("ERROR: Could not allocate isoc video buffer " \
+			SAM("ERROR: Could not allocate isoc video buffer " \
 								"%i\n", k);
 			return -ENOMEM;
 		} else
@@ -3670,26 +4239,26 @@
 					peasycap->video_isoc_buffer_size;
 		peasycap->video_isoc_buffer[k].kount = k;
 	}
-	JOT(4, "allocation of isoc video buffers done: %i pages\n", \
+	JOM(4, "allocation of isoc video buffers done: %i pages\n", \
 					k * (0x01 << VIDEO_ISOC_ORDER));
 /*---------------------------------------------------------------------------*/
 /*
  *  ALLOCATE AND INITIALIZE MULTIPLE struct urb ...
  */
 /*---------------------------------------------------------------------------*/
-	JOT(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
-	JOT(4, "using %i=peasycap->video_isoc_framesperdesc\n", \
+	JOM(4, "allocating %i struct urb.\n", VIDEO_ISOC_BUFFER_MANY);
+	JOM(4, "using %i=peasycap->video_isoc_framesperdesc\n", \
 					peasycap->video_isoc_framesperdesc);
-	JOT(4, "using %i=peasycap->video_isoc_maxframesize\n", \
+	JOM(4, "using %i=peasycap->video_isoc_maxframesize\n", \
 					peasycap->video_isoc_maxframesize);
-	JOT(4, "using %i=peasycap->video_isoc_buffer_sizen", \
+	JOM(4, "using %i=peasycap->video_isoc_buffer_sizen", \
 					peasycap->video_isoc_buffer_size);
 
 	for (k = 0;  k < VIDEO_ISOC_BUFFER_MANY; k++) {
 		purb = usb_alloc_urb(peasycap->video_isoc_framesperdesc, \
 								GFP_KERNEL);
 		if (NULL == purb) {
-			SAY("ERROR: usb_alloc_urb returned NULL for buffer " \
+			SAM("ERROR: usb_alloc_urb returned NULL for buffer " \
 								"%i\n", k);
 			return -ENOMEM;
 		} else
@@ -3697,7 +4266,7 @@
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 		pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
 		if (NULL == pdata_urb) {
-			SAY("ERROR: Could not allocate struct data_urb.\n");
+			SAM("ERROR: Could not allocate struct data_urb.\n");
 			return -ENOMEM;
 		} else
 			peasycap->allocation_video_struct += \
@@ -3714,30 +4283,30 @@
  */
 /*---------------------------------------------------------------------------*/
 		if (!k) {
-			JOT(4, "initializing video urbs thus:\n");
-			JOT(4, "  purb->interval = 1;\n");
-			JOT(4, "  purb->dev = peasycap->pusb_device;\n");
-			JOT(4, "  purb->pipe = usb_rcvisocpipe" \
+			JOM(4, "initializing video urbs thus:\n");
+			JOM(4, "  purb->interval = 1;\n");
+			JOM(4, "  purb->dev = peasycap->pusb_device;\n");
+			JOM(4, "  purb->pipe = usb_rcvisocpipe" \
 					"(peasycap->pusb_device,%i);\n", \
 					peasycap->video_endpointnumber);
-			JOT(4, "  purb->transfer_flags = URB_ISO_ASAP;\n");
-			JOT(4, "  purb->transfer_buffer = peasycap->" \
+			JOM(4, "  purb->transfer_flags = URB_ISO_ASAP;\n");
+			JOM(4, "  purb->transfer_buffer = peasycap->" \
 					"video_isoc_buffer[.].pgo;\n");
-			JOT(4, "  purb->transfer_buffer_length = %i;\n", \
+			JOM(4, "  purb->transfer_buffer_length = %i;\n", \
 					peasycap->video_isoc_buffer_size);
-			JOT(4, "  purb->complete = easycap_complete;\n");
-			JOT(4, "  purb->context = peasycap;\n");
-			JOT(4, "  purb->start_frame = 0;\n");
-			JOT(4, "  purb->number_of_packets = %i;\n", \
+			JOM(4, "  purb->complete = easycap_complete;\n");
+			JOM(4, "  purb->context = peasycap;\n");
+			JOM(4, "  purb->start_frame = 0;\n");
+			JOM(4, "  purb->number_of_packets = %i;\n", \
 					peasycap->video_isoc_framesperdesc);
-			JOT(4, "  for (j = 0; j < %i; j++)\n", \
+			JOM(4, "  for (j = 0; j < %i; j++)\n", \
 					peasycap->video_isoc_framesperdesc);
-			JOT(4, "    {\n");
-			JOT(4, "    purb->iso_frame_desc[j].offset = j*%i;\n",\
+			JOM(4, "    {\n");
+			JOM(4, "    purb->iso_frame_desc[j].offset = j*%i;\n",\
 					peasycap->video_isoc_maxframesize);
-			JOT(4, "    purb->iso_frame_desc[j].length = %i;\n", \
+			JOM(4, "    purb->iso_frame_desc[j].length = %i;\n", \
 					peasycap->video_isoc_maxframesize);
-			JOT(4, "    }\n");
+			JOM(4, "    }\n");
 		}
 
 		purb->interval = 1;
@@ -3759,13 +4328,33 @@
 					peasycap->video_isoc_maxframesize;
 		}
 	}
-	JOT(4, "allocation of %i struct urb done.\n", k);
+	JOM(4, "allocation of %i struct urb done.\n", k);
 /*--------------------------------------------------------------------------*/
 /*
  *  SAVE POINTER peasycap IN THIS INTERFACE.
  */
 /*--------------------------------------------------------------------------*/
 	usb_set_intfdata(pusb_interface, peasycap);
+/*---------------------------------------------------------------------------*/
+/*
+ *  IT IS ESSENTIAL TO INITIALIZE THE HARDWARE BEFORE, RATHER THAN AFTER,
+ *  THE DEVICE IS REGISTERED, BECAUSE SOME VERSIONS OF THE videodev MODULE
+ *  CALL easycap_open() IMMEDIATELY AFTER REGISTRATION, CAUSING A CLASH.
+ *  BEWARE.
+*/
+/*---------------------------------------------------------------------------*/
+#if defined(PREFER_NTSC)
+	peasycap->ntsc = true;
+	JOM(8, "defaulting initially to NTSC\n");
+#else
+	peasycap->ntsc = false;
+	JOM(8, "defaulting initially to PAL\n");
+#endif /*PREFER_NTSC*/
+	rc = reset(peasycap);
+	if (0 != rc) {
+		SAM("ERROR: reset() returned %i\n", rc);
+		return -EFAULT;
+	}
 /*--------------------------------------------------------------------------*/
 /*
  *  THE VIDEO DEVICE CAN BE REGISTERED NOW, AS IT IS READY.
@@ -3776,48 +4365,58 @@
 		err("Not able to get a minor for this device");
 		usb_set_intfdata(pusb_interface, NULL);
 		return -ENODEV;
-	} else
+	} else {
 		(peasycap->registered_video)++;
-	SAY("easycap attached to minor #%d\n", pusb_interface->minor);
-	break;
-/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
-#else
-	pvideo_device = (struct video_device *)\
-			kzalloc(sizeof(struct video_device), GFP_KERNEL);
-	if ((struct video_device *)NULL == pvideo_device) {
-		SAY("ERROR: Could not allocate structure video_device\n");
-		return -ENOMEM;
+		SAM("easycap attached to minor #%d\n", pusb_interface->minor);
+		break;
 	}
-	if (VIDEO_DEVICE_MANY <= video_device_many) {
-		SAY("ERROR: Too many /dev/videos\n");
-		return -ENOMEM;
-	}
-	pvideo_array[video_device_many] = pvideo_device;  video_device_many++;
-
-	strcpy(&pvideo_device->name[0], "easycapdc60");
-#if defined(EASYCAP_NEEDS_V4L2_FOPS)
-	pvideo_device->fops = &v4l2_fops;
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #else
-	pvideo_device->fops = &easycap_fops;
-#endif /*EASYCAP_NEEDS_V4L2_FOPS*/
-	pvideo_device->minor = -1;
-	pvideo_device->release = (void *)(&videodev_release);
-
-	video_set_drvdata(pvideo_device, (void *)peasycap);
-
-	rc = video_register_device(pvideo_device, VFL_TYPE_GRABBER, -1);
-	if (0 != rc) {
-		err("Not able to register with videodev");
-		videodev_release(pvideo_device);
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+	if (0 != (v4l2_device_register(&(pusb_interface->dev), \
+						&(peasycap->v4l2_device)))) {
+		SAM("v4l2_device_register() failed\n");
 		return -ENODEV;
 	} else {
-		peasycap->pvideo_device = pvideo_device;
-		(peasycap->registered_video)++;
-		JOT(4, "registered with videodev: %i=minor\n", \
-							pvideo_device->minor);
+		JOM(4, "registered device instance: %s\n", \
+					&(peasycap->v4l2_device.name[0]));
 	}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+/*
+ *                                   FIXME
+ *
+ *
+ *  THIS IS BELIEVED TO BE HARMLESS, BUT MAY WELL BE UNNECESSARY OR WRONG:
+*/
+/*---------------------------------------------------------------------------*/
+	peasycap->video_device.v4l2_dev = (struct v4l2_device *)NULL;
+/*---------------------------------------------------------------------------*/
+
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+
+	strcpy(&peasycap->video_device.name[0], "easycapdc60");
+#if defined(EASYCAP_NEEDS_V4L2_FOPS)
+	peasycap->video_device.fops = &v4l2_fops;
+#else
+	peasycap->video_device.fops = &easycap_fops;
+#endif /*EASYCAP_NEEDS_V4L2_FOPS*/
+	peasycap->video_device.minor = -1;
+	peasycap->video_device.release = (void *)(&videodev_release);
+
+	video_set_drvdata(&(peasycap->video_device), (void *)peasycap);
+
+	if (0 != (video_register_device(&(peasycap->video_device), \
+						VFL_TYPE_GRABBER, -1))) {
+		err("Not able to register with videodev");
+		videodev_release(&(peasycap->video_device));
+		return -ENODEV;
+	} else {
+		(peasycap->registered_video)++;
+		SAM("registered with videodev: %i=minor\n", \
+						peasycap->video_device.minor);
+	}
 #endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
 	break;
 }
 /*--------------------------------------------------------------------------*/
@@ -3827,125 +4426,118 @@
  */
 /*--------------------------------------------------------------------------*/
 case 1: {
+	if (!peasycap) {
+		SAM("ERROR: peasycap is NULL\n");
+		return -EFAULT;
+	}
 /*--------------------------------------------------------------------------*/
 /*
  *  SAVE POINTER peasycap IN INTERFACE 1
  */
 /*--------------------------------------------------------------------------*/
 	usb_set_intfdata(pusb_interface, peasycap);
-	JOT(4, "no initialization required for interface %i\n", \
+	JOM(4, "no initialization required for interface %i\n", \
 				pusb_interface_descriptor->bInterfaceNumber);
 	break;
 }
 /*--------------------------------------------------------------------------*/
 case 2: {
 	if (!peasycap) {
-		SAY("MISTAKE: peasycap is NULL\n");
+		SAM("MISTAKE: peasycap is NULL\n");
 		return -EFAULT;
 	}
 	if (!isokalt) {
-		SAY("ERROR:  no viable audio_altsetting_on\n");
+		SAM("ERROR:  no viable audio_altsetting_on\n");
 		return -ENOENT;
 	} else {
 		peasycap->audio_altsetting_on = okalt[isokalt - 1];
-		JOT(4, "%i=audio_altsetting_on <====\n", \
+		JOM(4, "%i=audio_altsetting_on <====\n", \
 						peasycap->audio_altsetting_on);
 	}
-	if (!isokepn) {
-		SAY("ERROR:  no viable audio_endpointnumber\n");
-		return -ENOENT;
-	} else {
-		peasycap->audio_endpointnumber = okepn[isokepn - 1];
-		JOT(4, "%i=audio_endpointnumber\n", \
-					peasycap->audio_endpointnumber);
-	}
-	if (!isokmps) {
-		SAY("ERROR:  no viable audio_maxpacketsize\n");
-		return -ENOENT;
-	} else {
-		peasycap->audio_isoc_maxframesize = okmps[isokmps - 1];
-		JOT(4, "%i=audio_isoc_maxframesize\n", \
-					peasycap->audio_isoc_maxframesize);
-		if (0 >= peasycap->audio_isoc_maxframesize) {
-			SAY("ERROR:  bad audio_isoc_maxframesize\n");
-			return -ENOENT;
-		}
-		if (9 == peasycap->audio_isoc_maxframesize) {
-			peasycap->ilk |= 0x02;
-			SAY("hardware is FOUR-CVBS\n");
-			peasycap->microphone = true;
-			peasycap->audio_pages_per_fragment = 4;
-		} else if (256 == peasycap->audio_isoc_maxframesize) {
-			peasycap->ilk &= ~0x02;
-			SAY("hardware is CVBS+S-VIDEO\n");
-			peasycap->microphone = false;
-			peasycap->audio_pages_per_fragment = 4;
-		} else {
-			SAY("hardware is unidentified:\n");
-			SAY("%i=audio_isoc_maxframesize\n", \
-					peasycap->audio_isoc_maxframesize);
-			return -ENOENT;
-		}
 
-		peasycap->audio_bytes_per_fragment = \
+	peasycap->audio_endpointnumber = okepn[isokalt - 1];
+	JOM(4, "%i=audio_endpointnumber\n", peasycap->audio_endpointnumber);
+
+	peasycap->audio_isoc_maxframesize = okmps[isokalt - 1];
+	JOM(4, "%i=audio_isoc_maxframesize\n", \
+					peasycap->audio_isoc_maxframesize);
+	if (0 >= peasycap->audio_isoc_maxframesize) {
+		SAM("ERROR:  bad audio_isoc_maxframesize\n");
+		return -ENOENT;
+	}
+	if (9 == peasycap->audio_isoc_maxframesize) {
+		peasycap->ilk |= 0x02;
+		SAM("hardware is FOUR-CVBS\n");
+		peasycap->microphone = true;
+		peasycap->audio_pages_per_fragment = 4;
+	} else if (256 == peasycap->audio_isoc_maxframesize) {
+		peasycap->ilk &= ~0x02;
+		SAM("hardware is CVBS+S-VIDEO\n");
+		peasycap->microphone = false;
+		peasycap->audio_pages_per_fragment = 4;
+	} else {
+		SAM("hardware is unidentified:\n");
+		SAM("%i=audio_isoc_maxframesize\n", \
+					peasycap->audio_isoc_maxframesize);
+		return -ENOENT;
+	}
+
+	peasycap->audio_bytes_per_fragment = \
 					peasycap->audio_pages_per_fragment * \
 								PAGE_SIZE ;
-		peasycap->audio_buffer_page_many = (AUDIO_FRAGMENT_MANY * \
+	peasycap->audio_buffer_page_many = (AUDIO_FRAGMENT_MANY * \
 					peasycap->audio_pages_per_fragment);
 
-		JOT(4, "%6i=AUDIO_FRAGMENT_MANY\n", AUDIO_FRAGMENT_MANY);
-		JOT(4, "%6i=audio_pages_per_fragment\n", \
+	JOM(4, "%6i=AUDIO_FRAGMENT_MANY\n", AUDIO_FRAGMENT_MANY);
+	JOM(4, "%6i=audio_pages_per_fragment\n", \
 					peasycap->audio_pages_per_fragment);
-		JOT(4, "%6i=audio_bytes_per_fragment\n", \
+	JOM(4, "%6i=audio_bytes_per_fragment\n", \
 					peasycap->audio_bytes_per_fragment);
-		JOT(4, "%6i=audio_buffer_page_many\n", \
+	JOM(4, "%6i=audio_buffer_page_many\n", \
 					peasycap->audio_buffer_page_many);
 
-		peasycap->audio_isoc_framesperdesc = 128;
+	peasycap->audio_isoc_framesperdesc = 128;
 
-		JOT(4, "%i=audio_isoc_framesperdesc\n", \
+	JOM(4, "%i=audio_isoc_framesperdesc\n", \
 					peasycap->audio_isoc_framesperdesc);
-		if (0 >= peasycap->audio_isoc_framesperdesc) {
-			SAY("ERROR:  bad audio_isoc_framesperdesc\n");
-			return -ENOENT;
-		}
-
-		peasycap->audio_isoc_buffer_size = \
-				peasycap->audio_isoc_maxframesize * \
-				peasycap->audio_isoc_framesperdesc;
-		JOT(4, "%i=audio_isoc_buffer_size\n", \
-					peasycap->audio_isoc_buffer_size);
-		if (AUDIO_ISOC_BUFFER_SIZE < \
-					peasycap->audio_isoc_buffer_size) {
-			SAY("MISTAKE:  audio_isoc_buffer_size bigger "
-			"than %li=AUDIO_ISOC_BUFFER_SIZE\n", \
-						AUDIO_ISOC_BUFFER_SIZE);
-			return -EFAULT;
-		}
+	if (0 >= peasycap->audio_isoc_framesperdesc) {
+		SAM("ERROR:  bad audio_isoc_framesperdesc\n");
+		return -ENOENT;
 	}
 
+	peasycap->audio_isoc_buffer_size = \
+				peasycap->audio_isoc_maxframesize * \
+				peasycap->audio_isoc_framesperdesc;
+	JOM(4, "%i=audio_isoc_buffer_size\n", \
+					peasycap->audio_isoc_buffer_size);
+	if (AUDIO_ISOC_BUFFER_SIZE < peasycap->audio_isoc_buffer_size) {
+			SAM("MISTAKE:  audio_isoc_buffer_size bigger "
+			"than %li=AUDIO_ISOC_BUFFER_SIZE\n", \
+						AUDIO_ISOC_BUFFER_SIZE);
+		return -EFAULT;
+	}
 	if (-1 == peasycap->audio_interface) {
-		SAY("MISTAKE:  audio_interface is unset\n");
+		SAM("MISTAKE:  audio_interface is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->audio_altsetting_on) {
-		SAY("MISTAKE:  audio_altsetting_on is unset\n");
+		SAM("MISTAKE:  audio_altsetting_on is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->audio_altsetting_off) {
-		SAY("MISTAKE:  audio_interface_off is unset\n");
+		SAM("MISTAKE:  audio_interface_off is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->audio_endpointnumber) {
-		SAY("MISTAKE:  audio_endpointnumber is unset\n");
+		SAM("MISTAKE:  audio_endpointnumber is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->audio_isoc_maxframesize) {
-		SAY("MISTAKE:  audio_isoc_maxframesize is unset\n");
+		SAM("MISTAKE:  audio_isoc_maxframesize is unset\n");
 		return -EFAULT;
 	}
 	if (-1 == peasycap->audio_isoc_buffer_size) {
-		SAY("MISTAKE:  audio_isoc_buffer_size is unset\n");
+		SAM("MISTAKE:  audio_isoc_buffer_size is unset\n");
 		return -EFAULT;
 	}
 /*---------------------------------------------------------------------------*/
@@ -3956,17 +4548,17 @@
 	INIT_LIST_HEAD(&(peasycap->urb_audio_head));
 	peasycap->purb_audio_head = &(peasycap->urb_audio_head);
 
-	JOT(4, "allocating an audio buffer\n");
-	JOT(4, ".... scattered over %i pages\n", \
+	JOM(4, "allocating an audio buffer\n");
+	JOM(4, ".... scattered over %i pages\n", \
 					peasycap->audio_buffer_page_many);
 
 	for (k = 0;  k < peasycap->audio_buffer_page_many;  k++) {
 		if ((void *)NULL != peasycap->audio_buffer[k].pgo) {
-			SAY("ERROR: attempting to reallocate audio buffers\n");
+			SAM("ERROR: attempting to reallocate audio buffers\n");
 		} else {
 			pbuf = (void *) __get_free_page(GFP_KERNEL);
 			if ((void *)NULL == pbuf) {
-				SAY("ERROR: Could not allocate audio " \
+				SAM("ERROR: Could not allocate audio " \
 							"buffer page %i\n", k);
 				return -ENOMEM;
 			} else
@@ -3979,16 +4571,16 @@
 
 	peasycap->audio_fill = 0;
 	peasycap->audio_read = 0;
-	JOT(4, "allocation of audio buffer done:  %i pages\n", k);
+	JOM(4, "allocation of audio buffer done:  %i pages\n", k);
 /*---------------------------------------------------------------------------*/
-	JOT(4, "allocating %i isoc audio buffers of size %i\n",  \
+	JOM(4, "allocating %i isoc audio buffers of size %i\n",  \
 		AUDIO_ISOC_BUFFER_MANY, peasycap->audio_isoc_buffer_size);
-	JOT(4, ".... each occupying contiguous memory pages\n");
+	JOM(4, ".... each occupying contiguous memory pages\n");
 
 	for (k = 0;  k < AUDIO_ISOC_BUFFER_MANY;  k++) {
 		pbuf = (void *)__get_free_pages(GFP_KERNEL, AUDIO_ISOC_ORDER);
 		if (NULL == pbuf) {
-			SAY("ERROR: Could not allocate isoc audio buffer " \
+			SAM("ERROR: Could not allocate isoc audio buffer " \
 							"%i\n", k);
 			return -ENOMEM;
 		} else
@@ -4000,25 +4592,25 @@
 		peasycap->audio_isoc_buffer_size;
 		peasycap->audio_isoc_buffer[k].kount = k;
 	}
-	JOT(4, "allocation of isoc audio buffers done.\n");
+	JOM(4, "allocation of isoc audio buffers done.\n");
 /*---------------------------------------------------------------------------*/
 /*
  *  ALLOCATE AND INITIALIZE MULTIPLE struct urb ...
  */
 /*---------------------------------------------------------------------------*/
-	JOT(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
-	JOT(4, "using %i=peasycap->audio_isoc_framesperdesc\n", \
+	JOM(4, "allocating %i struct urb.\n", AUDIO_ISOC_BUFFER_MANY);
+	JOM(4, "using %i=peasycap->audio_isoc_framesperdesc\n", \
 					peasycap->audio_isoc_framesperdesc);
-	JOT(4, "using %i=peasycap->audio_isoc_maxframesize\n", \
+	JOM(4, "using %i=peasycap->audio_isoc_maxframesize\n", \
 					peasycap->audio_isoc_maxframesize);
-	JOT(4, "using %i=peasycap->audio_isoc_buffer_size\n", \
+	JOM(4, "using %i=peasycap->audio_isoc_buffer_size\n", \
 					peasycap->audio_isoc_buffer_size);
 
 	for (k = 0;  k < AUDIO_ISOC_BUFFER_MANY; k++) {
 		purb = usb_alloc_urb(peasycap->audio_isoc_framesperdesc, \
 								GFP_KERNEL);
 		if (NULL == purb) {
-			SAY("ERROR: usb_alloc_urb returned NULL for buffer " \
+			SAM("ERROR: usb_alloc_urb returned NULL for buffer " \
 							"%i\n", k);
 			return -ENOMEM;
 		} else
@@ -4026,7 +4618,7 @@
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 		pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
 		if (NULL == pdata_urb) {
-			SAY("ERROR: Could not allocate struct data_urb.\n");
+			SAM("ERROR: Could not allocate struct data_urb.\n");
 			return -ENOMEM;
 		} else
 			peasycap->allocation_audio_struct += \
@@ -4043,30 +4635,30 @@
  */
 /*---------------------------------------------------------------------------*/
 		if (!k) {
-			JOT(4, "initializing audio urbs thus:\n");
-			JOT(4, "  purb->interval = 1;\n");
-			JOT(4, "  purb->dev = peasycap->pusb_device;\n");
-			JOT(4, "  purb->pipe = usb_rcvisocpipe(peasycap->" \
+			JOM(4, "initializing audio urbs thus:\n");
+			JOM(4, "  purb->interval = 1;\n");
+			JOM(4, "  purb->dev = peasycap->pusb_device;\n");
+			JOM(4, "  purb->pipe = usb_rcvisocpipe(peasycap->" \
 					"pusb_device,%i);\n", \
 					peasycap->audio_endpointnumber);
-			JOT(4, "  purb->transfer_flags = URB_ISO_ASAP;\n");
-			JOT(4, "  purb->transfer_buffer = " \
+			JOM(4, "  purb->transfer_flags = URB_ISO_ASAP;\n");
+			JOM(4, "  purb->transfer_buffer = " \
 				"peasycap->audio_isoc_buffer[.].pgo;\n");
-			JOT(4, "  purb->transfer_buffer_length = %i;\n", \
+			JOM(4, "  purb->transfer_buffer_length = %i;\n", \
 					peasycap->audio_isoc_buffer_size);
-			JOT(4, "  purb->complete = easysnd_complete;\n");
-			JOT(4, "  purb->context = peasycap;\n");
-			JOT(4, "  purb->start_frame = 0;\n");
-			JOT(4, "  purb->number_of_packets = %i;\n", \
+			JOM(4, "  purb->complete = easysnd_complete;\n");
+			JOM(4, "  purb->context = peasycap;\n");
+			JOM(4, "  purb->start_frame = 0;\n");
+			JOM(4, "  purb->number_of_packets = %i;\n", \
 					peasycap->audio_isoc_framesperdesc);
-			JOT(4, "  for (j = 0; j < %i; j++)\n", \
+			JOM(4, "  for (j = 0; j < %i; j++)\n", \
 					peasycap->audio_isoc_framesperdesc);
-			JOT(4, "    {\n");
-			JOT(4, "    purb->iso_frame_desc[j].offset = j*%i;\n",\
+			JOM(4, "    {\n");
+			JOM(4, "    purb->iso_frame_desc[j].offset = j*%i;\n",\
 					peasycap->audio_isoc_maxframesize);
-			JOT(4, "    purb->iso_frame_desc[j].length = %i;\n", \
+			JOM(4, "    purb->iso_frame_desc[j].length = %i;\n", \
 					peasycap->audio_isoc_maxframesize);
-			JOT(4, "    }\n");
+			JOM(4, "    }\n");
 			}
 
 		purb->interval = 1;
@@ -4088,7 +4680,7 @@
 					peasycap->audio_isoc_maxframesize;
 		}
 	}
-	JOT(4, "allocation of %i struct urb done.\n", k);
+	JOM(4, "allocation of %i struct urb done.\n", k);
 /*---------------------------------------------------------------------------*/
 /*
  *  SAVE POINTER peasycap IN THIS INTERFACE.
@@ -4105,14 +4697,18 @@
 		err("Not able to get a minor for this device.");
 		usb_set_intfdata(pusb_interface, NULL);
 		return -ENODEV;
-	} else
+	} else {
+		JOM(8, "kref_get() with %i=peasycap->kref.refcount.counter\n",\
+					(int)peasycap->kref.refcount.counter);
+		kref_get(&peasycap->kref);
 		(peasycap->registered_audio)++;
+	}
 /*---------------------------------------------------------------------------*/
 /*
  *  LET THE USER KNOW WHAT NODE THE AUDIO DEVICE IS ATTACHED TO.
  */
 /*---------------------------------------------------------------------------*/
-	SAY("easysnd attached to minor #%d\n", pusb_interface->minor);
+	SAM("easysnd attached to minor #%d\n", pusb_interface->minor);
 	break;
 }
 /*---------------------------------------------------------------------------*/
@@ -4121,20 +4717,19 @@
  */
 /*---------------------------------------------------------------------------*/
 default: {
-	JOT(4, "ERROR: unexpected interface %i\n", bInterfaceNumber);
+	JOM(4, "ERROR: unexpected interface %i\n", bInterfaceNumber);
 	return -EINVAL;
 }
 }
-JOT(4, "ends successfully for interface %i\n", \
+JOM(4, "ends successfully for interface %i\n", \
 				pusb_interface_descriptor->bInterfaceNumber);
 return 0;
 }
 /*****************************************************************************/
 /*---------------------------------------------------------------------------*/
 /*
- *  WHEN THIS FUNCTION IS CALLED THE DEVICE HAS ALREADY BEEN PHYSICALLY
- *  UNPLUGGED.
- *  HENCE peasycap->pusb_device IS NO LONGER VALID AND MUST BE SET TO NULL.
+ *  WHEN THIS FUNCTION IS CALLED THE EasyCAP HAS ALREADY BEEN PHYSICALLY
+ *  UNPLUGGED.  HENCE peasycap->pusb_device IS NO LONGER VALID.
  */
 /*---------------------------------------------------------------------------*/
 void
@@ -4147,7 +4742,14 @@
 
 struct list_head *plist_head;
 struct data_urb *pdata_urb;
-int minor, m;
+int minor, m, kd;
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+struct v4l2_device *pv4l2_device;
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
 
 JOT(4, "\n");
 
@@ -4169,107 +4771,188 @@
 minor = pusb_interface->minor;
 JOT(4, "intf[%i]: minor=%i\n", bInterfaceNumber, minor);
 
+if (1 == bInterfaceNumber)
+	return;
+
 peasycap = usb_get_intfdata(pusb_interface);
-if ((struct easycap *)NULL == peasycap)
+if (NULL == peasycap) {
 	SAY("ERROR: peasycap is NULL\n");
-else {
-	peasycap->pusb_device = (struct usb_device *)NULL;
-	switch (bInterfaceNumber) {
+	return;
+}
 /*---------------------------------------------------------------------------*/
-	case 0: {
-		if ((struct list_head *)NULL != peasycap->purb_video_head) {
-			JOT(4, "killing video urbs\n");
-			m = 0;
-			list_for_each(plist_head, (peasycap->purb_video_head))
-				{
-				pdata_urb = list_entry(plist_head, \
-						struct data_urb, list_head);
-				if ((struct data_urb *)NULL != pdata_urb) {
-					if ((struct urb *)NULL != \
-							pdata_urb->purb) {
-						usb_kill_urb(pdata_urb->purb);
-						m++;
-					}
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+#
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+/*---------------------------------------------------------------------------*/
+/*
+ *  SOME VERSIONS OF THE videodev MODULE OVERWRITE THE DATA WHICH HAS
+ *  BEEN WRITTEN BY THE CALL TO usb_set_intfdata() IN easycap_usb_probe(),
+ *  REPLACING IT WITH A POINTER TO THE EMBEDDED v4l2_device STRUCTURE.
+ *  TO DETECT THIS, THE STRING IN THE easycap.telltale[] BUFFER IS CHECKED.
+*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	pv4l2_device = usb_get_intfdata(pusb_interface);
+	if ((struct v4l2_device *)NULL == pv4l2_device) {
+		SAY("ERROR: pv4l2_device is NULL\n");
+		return;
+	}
+	peasycap = (struct easycap *) \
+		container_of(pv4l2_device, struct easycap, v4l2_device);
+}
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return;
+}
+/*---------------------------------------------------------------------------*/
+/*
+ *  IF THE WAIT QUEUES ARE NOT CLEARED A DEADLOCK IS POSSIBLE.  BEWARE.
+*/
+/*---------------------------------------------------------------------------*/
+peasycap->video_eof = 1;
+peasycap->audio_eof = 1;
+wake_up_interruptible(&(peasycap->wq_video));
+wake_up_interruptible(&(peasycap->wq_audio));
+/*---------------------------------------------------------------------------*/
+switch (bInterfaceNumber) {
+case 0: {
+	if ((struct list_head *)NULL != peasycap->purb_video_head) {
+		JOM(4, "killing video urbs\n");
+		m = 0;
+		list_for_each(plist_head, (peasycap->purb_video_head))
+			{
+			pdata_urb = list_entry(plist_head, \
+					struct data_urb, list_head);
+			if ((struct data_urb *)NULL != pdata_urb) {
+				if ((struct urb *)NULL != \
+						pdata_urb->purb) {
+					usb_kill_urb(pdata_urb->purb);
+					m++;
 				}
 			}
-			JOT(4, "%i video urbs killed\n", m);
-		} else
-			SAY("ERROR: peasycap->purb_video_head is NULL\n");
-		break;
+		}
+		JOM(4, "%i video urbs killed\n", m);
 	}
+	break;
+}
 /*---------------------------------------------------------------------------*/
-	case 2: {
-		if ((struct list_head *)NULL != peasycap->purb_audio_head) {
-			JOT(4, "killing audio urbs\n");
-			m = 0;
-			list_for_each(plist_head, \
-						(peasycap->purb_audio_head)) {
-				pdata_urb = list_entry(plist_head, \
-						struct data_urb, list_head);
-				if ((struct data_urb *)NULL != pdata_urb) {
-					if ((struct urb *)NULL != \
-							pdata_urb->purb) {
-						usb_kill_urb(pdata_urb->purb);
-						m++;
-					}
+case 2: {
+	if ((struct list_head *)NULL != peasycap->purb_audio_head) {
+		JOM(4, "killing audio urbs\n");
+		m = 0;
+		list_for_each(plist_head, \
+					(peasycap->purb_audio_head)) {
+			pdata_urb = list_entry(plist_head, \
+					struct data_urb, list_head);
+			if ((struct data_urb *)NULL != pdata_urb) {
+				if ((struct urb *)NULL != \
+						pdata_urb->purb) {
+					usb_kill_urb(pdata_urb->purb);
+					m++;
 				}
 			}
-			JOT(4, "%i audio urbs killed\n", m);
-		} else
-			SAY("ERROR: peasycap->purb_audio_head is NULL\n");
-		break;
+		}
+		JOM(4, "%i audio urbs killed\n", m);
 	}
+	break;
+}
 /*---------------------------------------------------------------------------*/
-	default:
-		break;
-	}
+default:
+	break;
 }
 /*--------------------------------------------------------------------------*/
 /*
  *  DEREGISTER
+ *
+ *  THIS PROCEDURE WILL BLOCK UNTIL easycap_poll(), VIDEO IOCTL AND AUDIO
+ *  IOCTL ARE ALL UNLOCKED.  IF THIS IS NOT DONE AN Oops CAN OCCUR WHEN
+ *  AN EasyCAP IS UNPLUGGED WHILE THE URBS ARE RUNNING.  BEWARE.
  */
 /*--------------------------------------------------------------------------*/
+kd = isdongle(peasycap);
 switch (bInterfaceNumber) {
 case 0: {
+	if (0 <= kd && DONGLE_MANY > kd) {
+		wake_up_interruptible(&peasycap->wq_video);
+		JOM(4, "about to lock easycap_dongle[%i].mutex_video\n", kd);
+		if (mutex_lock_interruptible(&easycap_dongle[kd].\
+								mutex_video)) {
+			SAY("ERROR: cannot lock easycap_dongle[%i]." \
+							"mutex_video\n", kd);
+			return;
+		}
+		JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
+	} else
+		SAY("ERROR: %i=kd is bad: cannot lock dongle\n", kd);
+/*---------------------------------------------------------------------------*/
 #if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
 	if ((struct easycap *)NULL == peasycap) {
-		SAY("ERROR: peasycap has become NULL\n");
+		SAM("ERROR: peasycap has become NULL\n");
 	} else {
-		lock_kernel();
 		usb_deregister_dev(pusb_interface, &easycap_class);
 		(peasycap->registered_video)--;
-
-		JOT(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
-		unlock_kernel();
-		SAY("easycap detached from minor #%d\n", minor);
+		JOM(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
+		SAM("easycap detached from minor #%d\n", minor);
 	}
-/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
 #else
-	if ((struct easycap *)NULL == peasycap)
-		SAY("ERROR: peasycap has become NULL\n");
-	else {
-		lock_kernel();
-		video_unregister_device(peasycap->pvideo_device);
-		(peasycap->registered_video)--;
-		unlock_kernel();
-		JOT(4, "unregistered with videodev: %i=minor\n", \
-							pvideo_device->minor);
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+	if (!peasycap->v4l2_device.name[0]) {
+		SAM("ERROR: peasycap->v4l2_device.name is empty\n");
+		if (0 <= kd && DONGLE_MANY > kd)
+			mutex_unlock(&easycap_dongle[kd].mutex_video);
+		return;
 	}
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+	v4l2_device_disconnect(&peasycap->v4l2_device);
+	JOM(4, "v4l2_device_disconnect() OK\n");
+	v4l2_device_unregister(&peasycap->v4l2_device);
+	JOM(4, "v4l2_device_unregister() OK\n");
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+
+	video_unregister_device(&peasycap->video_device);
+	JOM(4, "intf[%i]: video_unregister_device() OK\n", bInterfaceNumber);
+	(peasycap->registered_video)--;
+	JOM(4, "unregistered with videodev: %i=minor\n", minor);
 #endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+
+	if (0 <= kd && DONGLE_MANY > kd) {
+		mutex_unlock(&easycap_dongle[kd].mutex_video);
+		JOM(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
+	}
 	break;
 }
 case 2: {
-	lock_kernel();
+	if (0 <= kd && DONGLE_MANY > kd) {
+		wake_up_interruptible(&peasycap->wq_audio);
+		JOM(4, "about to lock easycap_dongle[%i].mutex_audio\n", kd);
+		if (mutex_lock_interruptible(&easycap_dongle[kd].\
+								mutex_audio)) {
+			SAY("ERROR: cannot lock easycap_dongle[%i]." \
+							"mutex_audio\n", kd);
+			return;
+		}
+		JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
+	} else
+		SAY("ERROR: %i=kd is bad: cannot lock dongle\n", kd);
 
 	usb_deregister_dev(pusb_interface, &easysnd_class);
-	if ((struct easycap *)NULL != peasycap)
-		(peasycap->registered_audio)--;
+	(peasycap->registered_audio)--;
 
-	JOT(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
-	unlock_kernel();
+	JOM(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
+	SAM("easysnd detached from minor #%d\n", minor);
 
-	SAY("easysnd detached from minor #%d\n", minor);
+	if (0 <= kd && DONGLE_MANY > kd) {
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		JOM(4, "unlocked easycap_dongle[%i].mutex_audio\n", kd);
+	}
 	break;
 }
 default:
@@ -4280,25 +4963,42 @@
  *  CALL easycap_delete() IF NO REMAINING REFERENCES TO peasycap
  */
 /*---------------------------------------------------------------------------*/
-if ((struct easycap *)NULL == peasycap) {
-	SAY("ERROR: peasycap has become NULL\n");
-	SAY("cannot call kref_put()\n");
-	SAY("ending unsuccessfully: may cause memory leak\n");
-	return;
-}
 if (!peasycap->kref.refcount.counter) {
-	SAY("ERROR: peasycap->kref.refcount.counter is zero " \
+	SAM("ERROR: peasycap->kref.refcount.counter is zero "
 						"so cannot call kref_put()\n");
-	SAY("ending unsuccessfully: may cause memory leak\n");
+	SAM("ending unsuccessfully: may cause memory leak\n");
 	return;
 }
-JOT(4, "intf[%i]: kref_put() with %i=peasycap->kref.refcount.counter\n", \
+if (0 <= kd && DONGLE_MANY > kd) {
+	JOM(4, "about to lock easycap_dongle[%i].mutex_video\n", kd);
+	if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_video)) {
+		SAY("ERROR: cannot down easycap_dongle[%i].mutex_video\n", kd);
+		SAM("ending unsuccessfully: may cause memory leak\n");
+	return;
+	}
+	JOM(4, "locked easycap_dongle[%i].mutex_video\n", kd);
+	JOM(4, "about to lock easycap_dongle[%i].mutex_audio\n", kd);
+	if (mutex_lock_interruptible(&easycap_dongle[kd].mutex_audio)) {
+		SAY("ERROR: cannot down easycap_dongle[%i].mutex_audio\n", kd);
+		mutex_unlock(&(easycap_dongle[kd].mutex_video));
+		JOM(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
+		SAM("ending unsuccessfully: may cause memory leak\n");
+		return;
+	}
+	JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
+}
+JOM(4, "intf[%i]: %i=peasycap->kref.refcount.counter\n", \
 		bInterfaceNumber, (int)peasycap->kref.refcount.counter);
 kref_put(&peasycap->kref, easycap_delete);
 JOT(4, "intf[%i]: kref_put() done.\n", bInterfaceNumber);
+if (0 <= kd && DONGLE_MANY > kd) {
+	mutex_unlock(&(easycap_dongle[kd].mutex_audio));
+	JOT(4, "unlocked easycap_dongle[%i].mutex_audio\n", kd);
+	mutex_unlock(&easycap_dongle[kd].mutex_video);
+	JOT(4, "unlocked easycap_dongle[%i].mutex_video\n", kd);
+}
 /*---------------------------------------------------------------------------*/
-
-JOT(4, "ends\n");
+JOM(4, "ends\n");
 return;
 }
 /*****************************************************************************/
@@ -4308,7 +5008,8 @@
 int result;
 
 SAY("========easycap=======\n");
-JOT(4, "begins.  %i=debug\n", easycap_debug);
+JOT(4, "begins.  %i=debug %i=bars %i=gain\n", easycap_debug, easycap_bars, \
+						easycap_gain);
 SAY("version: " EASYCAP_DRIVER_VERSION "\n");
 /*---------------------------------------------------------------------------*/
 /*
@@ -4349,6 +5050,9 @@
 MODULE_DESCRIPTION(EASYCAP_DRIVER_DESCRIPTION);
 MODULE_VERSION(EASYCAP_DRIVER_VERSION);
 #if defined(EASYCAP_DEBUG)
-MODULE_PARM_DESC(easycap_debug, "debug: 0 (default), 1, 2,...");
+MODULE_PARM_DESC(debug, "Debug level: 0(default),1,2,...,9");
 #endif /*EASYCAP_DEBUG*/
+MODULE_PARM_DESC(bars, \
+	"Testcard bars on input signal failure: 0=>no, 1=>yes(default)");
+MODULE_PARM_DESC(gain, "Audio gain: 0,...,16(default),...31");
 /*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_settings.c b/drivers/staging/easycap/easycap_settings.c
index 38d9405..df3f17d 100644
--- a/drivers/staging/easycap/easycap_settings.c
+++ b/drivers/staging/easycap/easycap_settings.c
@@ -33,11 +33,15 @@
  *  THE LEAST SIGNIFICANT BIT OF easycap_standard.mask HAS MEANING:
  *                         0 => 25 fps
  *                         1 => 30 fps
+ *
+ *  THE MOST  SIGNIFICANT BIT OF easycap_standard.mask HAS MEANING:
+ *                         0 => full framerate
+ *                         1 => 20%  framerate
  */
 /*---------------------------------------------------------------------------*/
 const struct easycap_standard easycap_standard[] = {
 {
-.mask = 0x000F & PAL_BGHIN ,
+.mask = 0x00FF & PAL_BGHIN ,
 .v4l2_standard = {
 	.index = PAL_BGHIN,
 	.id = (V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
@@ -50,7 +54,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & NTSC_N_443 ,
+.mask = 0x00FF & NTSC_N_443 ,
 .v4l2_standard = {
 	.index = NTSC_N_443,
 	.id = V4L2_STD_UNKNOWN,
@@ -62,7 +66,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & PAL_Nc ,
+.mask = 0x00FF & PAL_Nc ,
 .v4l2_standard = {
 	.index = PAL_Nc,
 	.id = V4L2_STD_PAL_Nc,
@@ -74,7 +78,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & NTSC_N ,
+.mask = 0x00FF & NTSC_N ,
 .v4l2_standard = {
 	.index = NTSC_N,
 	.id = V4L2_STD_UNKNOWN,
@@ -86,7 +90,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & SECAM ,
+.mask = 0x00FF & SECAM ,
 .v4l2_standard = {
 	.index = SECAM,
 	.id = V4L2_STD_SECAM,
@@ -98,7 +102,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & NTSC_M ,
+.mask = 0x00FF & NTSC_M ,
 .v4l2_standard = {
 	.index = NTSC_M,
 	.id = V4L2_STD_NTSC_M,
@@ -110,7 +114,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & NTSC_M_JP ,
+.mask = 0x00FF & NTSC_M_JP ,
 .v4l2_standard = {
 	.index = NTSC_M_JP,
 	.id = V4L2_STD_NTSC_M_JP,
@@ -122,7 +126,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & PAL_60 ,
+.mask = 0x00FF & PAL_60 ,
 .v4l2_standard = {
 	.index = PAL_60,
 	.id = V4L2_STD_PAL_60,
@@ -134,7 +138,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & NTSC_443 ,
+.mask = 0x00FF & NTSC_443 ,
 .v4l2_standard = {
 	.index = NTSC_443,
 	.id = V4L2_STD_NTSC_443,
@@ -146,7 +150,7 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
-.mask = 0x000F & PAL_M ,
+.mask = 0x00FF & PAL_M ,
 .v4l2_standard = {
 	.index = PAL_M,
 	.id = V4L2_STD_PAL_M,
@@ -158,6 +162,128 @@
 },
 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 {
+.mask = 0x8000 | (0x00FF & PAL_BGHIN_SLOW),
+.v4l2_standard = {
+	.index = PAL_BGHIN_SLOW,
+	.id = (V4L2_STD_PAL_B | V4L2_STD_PAL_G | V4L2_STD_PAL_H | \
+				V4L2_STD_PAL_I | V4L2_STD_PAL_N | \
+					(((v4l2_std_id)0x01) << 32)),
+	.name = "PAL_BGHIN_SLOW",
+	.frameperiod = {1, 5},
+	.framelines = 625,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_N_443_SLOW),
+.v4l2_standard = {
+	.index = NTSC_N_443_SLOW,
+	.id = (V4L2_STD_UNKNOWN | (((v4l2_std_id)0x11) << 32)),
+	.name = "NTSC_N_443_SLOW",
+	.frameperiod = {1, 5},
+	.framelines = 480,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & PAL_Nc_SLOW),
+.v4l2_standard = {
+	.index = PAL_Nc_SLOW,
+	.id = (V4L2_STD_PAL_Nc | (((v4l2_std_id)0x01) << 32)),
+	.name = "PAL_Nc_SLOW",
+	.frameperiod = {1, 5},
+	.framelines = 625,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_N_SLOW),
+.v4l2_standard = {
+	.index = NTSC_N_SLOW,
+	.id = (V4L2_STD_UNKNOWN | (((v4l2_std_id)0x21) << 32)),
+	.name = "NTSC_N_SLOW",
+	.frameperiod = {1, 5},
+	.framelines = 525,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & SECAM_SLOW),
+.v4l2_standard = {
+	.index = SECAM_SLOW,
+	.id = (V4L2_STD_SECAM | (((v4l2_std_id)0x01) << 32)),
+	.name = "SECAM_SLOW",
+	.frameperiod = {1, 5},
+	.framelines = 625,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_M_SLOW),
+.v4l2_standard = {
+	.index = NTSC_M_SLOW,
+	.id = (V4L2_STD_NTSC_M | (((v4l2_std_id)0x01) << 32)),
+	.name = "NTSC_M_SLOW",
+	.frameperiod = {1, 6},
+	.framelines = 525,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_M_JP_SLOW),
+.v4l2_standard = {
+	.index = NTSC_M_JP_SLOW,
+	.id = (V4L2_STD_NTSC_M_JP | (((v4l2_std_id)0x01) << 32)),
+	.name = "NTSC_M_JP_SLOW",
+	.frameperiod = {1, 6},
+	.framelines = 525,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & PAL_60_SLOW),
+.v4l2_standard = {
+	.index = PAL_60_SLOW,
+	.id = (V4L2_STD_PAL_60 | (((v4l2_std_id)0x01) << 32)),
+	.name = "PAL_60_SLOW",
+	.frameperiod = {1, 6},
+	.framelines = 525,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & NTSC_443_SLOW),
+.v4l2_standard = {
+	.index = NTSC_443_SLOW,
+	.id = (V4L2_STD_NTSC_443 | (((v4l2_std_id)0x01) << 32)),
+	.name = "NTSC_443_SLOW",
+	.frameperiod = {1, 6},
+	.framelines = 525,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
+.mask = 0x8000 | (0x00FF & PAL_M_SLOW),
+.v4l2_standard = {
+	.index = PAL_M_SLOW,
+	.id = (V4L2_STD_PAL_M | (((v4l2_std_id)0x01) << 32)),
+	.name = "PAL_M_SLOW",
+	.frameperiod = {1, 6},
+	.framelines = 525,
+	.reserved = {0, 0, 0, 0}
+}
+},
+/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
+{
 .mask = 0xFFFF
 }
 };
@@ -165,15 +291,16 @@
 /*
  *  THE 16-BIT easycap_format.mask HAS MEANING:
  *    (least significant) BIT  0:     0 => PAL, 25 FPS;   1 => NTSC, 30 FPS
- *                        BITS 1-3:   RESERVED FOR DIFFERENTIATING STANDARDS
- *                        BITS 4-7:   NUMBER OF BYTES PER PIXEL
+ *                        BITS 2-4:   RESERVED FOR DIFFERENTIATING STANDARDS
+ *                        BITS 5-7:   NUMBER OF BYTES PER PIXEL
  *                        BIT  8:     0 => NATIVE BYTE ORDER;  1 => SWAPPED
  *                        BITS 9-10:  RESERVED FOR OTHER BYTE PERMUTATIONS
- *                        BIT 11:     0 => UNDECIMATED;  1 => DECIMATED
- *                        BIT 12:     0 => OFFER FRAMES; 1 => OFFER FIELDS
- *     (most significant) BITS 13-15: RESERVED FOR OTHER FIELD ORDER OPTIONS
+ *                        BIT 11:     0 => UNDECIMATED;    1 => DECIMATED
+ *                        BIT 12:     0 => OFFER FRAMES;   1 => OFFER FIELDS
+ *                        BIT 13:     0 => FULL FRAMERATE; 1 => REDUCED
+ *     (most significant) BITS 14-15: RESERVED FOR OTHER FIELD/FRAME OPTIONS
  *  IT FOLLOWS THAT:
- *     bytesperpixel IS         ((0x00F0 & easycap_format.mask) >> 4)
+ *     bytesperpixel IS         ((0x00E0 & easycap_format.mask) >> 5)
  *     byteswaporder IS true IF (0 != (0x0100 & easycap_format.mask))
  *
  *     decimatepixel IS true IF (0 != (0x0800 & easycap_format.mask))
@@ -197,65 +324,135 @@
 	mask1 = 0x0000;
 	switch (i) {
 	case PAL_BGHIN: {
-		mask1 = PAL_BGHIN;
+		mask1 = 0x1F & PAL_BGHIN;
 		strcpy(&name1[0], "PAL_BGHIN");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
 		break;
 	}
 	case SECAM: {
-		mask1 = SECAM;
+		mask1 = 0x1F & SECAM;
 		strcpy(&name1[0], "SECAM");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
 		break;
 	}
 	case PAL_Nc: {
-		mask1 = PAL_Nc;
+		mask1 = 0x1F & PAL_Nc;
 		strcpy(&name1[0], "PAL_Nc");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
 		break;
 	}
 	case PAL_60: {
-		mask1 = PAL_60;
+		mask1 = 0x1F & PAL_60;
 		strcpy(&name1[0], "PAL_60");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
 		break;
 	}
 	case PAL_M: {
-		mask1 = PAL_M;
+		mask1 = 0x1F & PAL_M;
 		strcpy(&name1[0], "PAL_M");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
 		break;
 	}
 	case NTSC_M: {
-		mask1 = NTSC_M;
+		mask1 = 0x1F & NTSC_M;
 		strcpy(&name1[0], "NTSC_M");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
 		break;
 	}
 	case NTSC_443: {
-		mask1 = NTSC_443;
+		mask1 = 0x1F & NTSC_443;
 		strcpy(&name1[0], "NTSC_443");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
 		break;
 	}
 	case NTSC_M_JP: {
-		mask1 = NTSC_M_JP;
+		mask1 = 0x1F & NTSC_M_JP;
 		strcpy(&name1[0], "NTSC_M_JP");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
 		break;
 	}
 	case NTSC_N: {
-		mask1 = NTSC_M;
+		mask1 = 0x1F & NTSC_M;
 		strcpy(&name1[0], "NTSC_N");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
 		break;
 	}
 	case NTSC_N_443: {
-		mask1 = NTSC_N_443;
+		mask1 = 0x1F & NTSC_N_443;
 		strcpy(&name1[0], "NTSC_N_443");
 		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
 		break;
 	}
+	case PAL_BGHIN_SLOW: {
+		mask1 = 0x001F & PAL_BGHIN_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "PAL_BGHIN_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+		break;
+	}
+	case SECAM_SLOW: {
+		mask1 = 0x001F & SECAM_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "SECAM_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+		break;
+	}
+	case PAL_Nc_SLOW: {
+		mask1 = 0x001F & PAL_Nc_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "PAL_Nc_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+		break;
+	}
+	case PAL_60_SLOW: {
+		mask1 = 0x001F & PAL_60_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "PAL_60_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+		break;
+	}
+	case PAL_M_SLOW: {
+		mask1 = 0x001F & PAL_M_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "PAL_M_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_BG;
+		break;
+	}
+	case NTSC_M_SLOW: {
+		mask1 = 0x001F & NTSC_M_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "NTSC_M_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+		break;
+	}
+	case NTSC_443_SLOW: {
+		mask1 = 0x001F & NTSC_443_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "NTSC_443_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+		break;
+	}
+	case NTSC_M_JP_SLOW: {
+		mask1 = 0x001F & NTSC_M_JP_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "NTSC_M_JP_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+		break;
+	}
+	case NTSC_N_SLOW: {
+		mask1 = 0x001F & NTSC_N_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "NTSC_N_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+		break;
+	}
+	case NTSC_N_443_SLOW: {
+		mask1 = 0x001F & NTSC_N_443_SLOW;
+		mask1 |= 0x0200;
+		strcpy(&name1[0], "NTSC_N_443_SLOW");
+		colorspace = V4L2_COLORSPACE_470_SYSTEM_M;
+		break;
+	}
 	default:
 		return -1;
 	}
@@ -311,39 +508,39 @@
 			case FMT_UYVY: {
 				strcpy(&name3[0], "_" STRINGIZE(FMT_UYVY));
 				pixelformat = V4L2_PIX_FMT_UYVY;
-				mask3 |= (0x02 << 4);
+				mask3 |= (0x02 << 5);
 				break;
 			}
 			case FMT_YUY2: {
 				strcpy(&name3[0], "_" STRINGIZE(FMT_YUY2));
 				pixelformat = V4L2_PIX_FMT_YUYV;
-				mask3 |= (0x02 << 4);
+				mask3 |= (0x02 << 5);
 				mask3 |= 0x0100;
 				break;
 			}
 			case FMT_RGB24: {
 				strcpy(&name3[0], "_" STRINGIZE(FMT_RGB24));
 				pixelformat = V4L2_PIX_FMT_RGB24;
-				mask3 |= (0x03 << 4);
+				mask3 |= (0x03 << 5);
 				break;
 			}
 			case FMT_RGB32: {
 				strcpy(&name3[0], "_" STRINGIZE(FMT_RGB32));
 				pixelformat = V4L2_PIX_FMT_RGB32;
-				mask3 |= (0x04 << 4);
+				mask3 |= (0x04 << 5);
 				break;
 			}
 			case FMT_BGR24: {
 				strcpy(&name3[0], "_" STRINGIZE(FMT_BGR24));
 				pixelformat = V4L2_PIX_FMT_BGR24;
-				mask3 |= (0x03 << 4);
+				mask3 |= (0x03 << 5);
 				mask3 |= 0x0100;
 				break;
 			}
 			case FMT_BGR32: {
 				strcpy(&name3[0], "_" STRINGIZE(FMT_BGR32));
 				pixelformat = V4L2_PIX_FMT_BGR32;
-				mask3 |= (0x04 << 4);
+				mask3 |= (0x04 << 5);
 				mask3 |= 0x0100;
 				break;
 			}
@@ -363,13 +560,8 @@
 				}
 				case FIELD_INTERLACED: {
 					strcpy(&name4[0], "-i");
-					field = V4L2_FIELD_INTERLACED;
-					break;
-				}
-				case FIELD_ALTERNATE: {
-					strcpy(&name4[0], "-a");
 					mask4 |= 0x1000;
-					field = V4L2_FIELD_ALTERNATE;
+					field = V4L2_FIELD_INTERLACED;
 					break;
 				}
 				default:
@@ -413,7 +605,7 @@
 }
 /*---------------------------------------------------------------------------*/
 struct v4l2_queryctrl easycap_control[] = \
- {{
+{{
 .id       = V4L2_CID_BRIGHTNESS,
 .type     = V4L2_CTRL_TYPE_INTEGER,
 .name     = "Brightness",
@@ -485,5 +677,5 @@
 {
 .id = 0xFFFFFFFF
 }
- };
+};
 /*****************************************************************************/
diff --git a/drivers/staging/easycap/easycap_sound.c b/drivers/staging/easycap/easycap_sound.c
index 63562bd..24d8bb4 100644
--- a/drivers/staging/easycap/easycap_sound.c
+++ b/drivers/staging/easycap/easycap_sound.c
@@ -36,17 +36,15 @@
 /*---------------------------------------------------------------------------*/
 /*
  *  ON COMPLETION OF AN AUDIO URB ITS DATA IS COPIED TO THE AUDIO BUFFERS
- *  PROVIDED peasycap->audio_idle IS ZER0.  REGARDLESS OF THIS BEING TRUE,
+ *  PROVIDED peasycap->audio_idle IS ZERO.  REGARDLESS OF THIS BEING TRUE,
  *  IT IS RESUBMITTED PROVIDED peasycap->audio_isoc_streaming IS NOT ZERO.
  */
 /*---------------------------------------------------------------------------*/
 void
 easysnd_complete(struct urb *purb)
 {
-static int mt;
 struct easycap *peasycap;
 struct data_buffer *paudio_buffer;
-char errbuf[16];
 __u8 *p1, *p2;
 __s16 s16;
 int i, j, more, much, leap, rc;
@@ -66,48 +64,62 @@
 	SAY("ERROR: peasycap is NULL\n");
 	return;
 }
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap\n");
+	return;
+}
+
 much = 0;
 
-
 if (peasycap->audio_idle) {
-	JOT(16, "%i=audio_idle  %i=audio_isoc_streaming\n", \
+	JOM(16, "%i=audio_idle  %i=audio_isoc_streaming\n", \
 			peasycap->audio_idle, peasycap->audio_isoc_streaming);
 	if (peasycap->audio_isoc_streaming) {
 		rc = usb_submit_urb(purb, GFP_ATOMIC);
 		if (0 != rc) {
-			SAY("ERROR: while %i=audio_idle, " \
+			if (-ENODEV != rc)
+				SAM("ERROR: while %i=audio_idle, " \
 					"usb_submit_urb() failed with rc:\n", \
 							peasycap->audio_idle);
 			switch (rc) {
 			case -ENOMEM: {
-				SAY("ENOMEM\n");    break;
+				SAM("-ENOMEM\n");
+				break;
 			}
 			case -ENODEV: {
-				SAY("ENODEV\n");    break;
+				break;
 			}
 			case -ENXIO: {
-				SAY("ENXIO\n");     break;
+				SAM("-ENXIO\n");
+				break;
 			}
 			case -EINVAL: {
-				SAY("EINVAL\n");    break;
+				SAM("-EINVAL\n");
+				break;
 			}
 			case -EAGAIN: {
-				SAY("EAGAIN\n");    break;
+				SAM("-EAGAIN\n");
+				break;
 			}
 			case -EFBIG: {
-				SAY("EFBIG\n");     break;
+				SAM("-EFBIG\n");
+				break;
 			}
 			case -EPIPE: {
-				SAY("EPIPE\n");     break;
+				SAM("-EPIPE\n");
+				break;
 			}
 			case -EMSGSIZE: {
-				SAY("EMSGSIZE\n");  break;
+				SAM("-EMSGSIZE\n");
+				break;
 			}
 			case -ENOSPC: {
-				SAY("ENOSPC\n");  break;
+				SAM("-ENOSPC\n");
+				break;
 			}
 			default: {
-				SAY("0x%08X\n", rc); break;
+				SAM("unknown error: 0x%08X\n", rc);
+				break;
 			}
 			}
 		}
@@ -116,74 +128,95 @@
 }
 /*---------------------------------------------------------------------------*/
 if (purb->status) {
-	if (-ESHUTDOWN == purb->status) {
-		JOT(16, "immediate return because -ESHUTDOWN=purb->status\n");
+	if ((-ESHUTDOWN == purb->status) || (-ENOENT == purb->status)) {
+		JOM(16, "urb status -ESHUTDOWN or -ENOENT\n");
 		return;
 	}
-	SAY("ERROR: non-zero urb status:\n");
+	SAM("ERROR: non-zero urb status:\n");
 	switch (purb->status) {
 	case -EINPROGRESS: {
-		SAY("-EINPROGRESS\n"); break;
+		SAM("-EINPROGRESS\n");
+		break;
 	}
 	case -ENOSR: {
-		SAY("-ENOSR\n"); break;
+		SAM("-ENOSR\n");
+		break;
 	}
 	case -EPIPE: {
-		SAY("-EPIPE\n"); break;
+		SAM("-EPIPE\n");
+		break;
 	}
 	case -EOVERFLOW: {
-		SAY("-EOVERFLOW\n"); break;
+		SAM("-EOVERFLOW\n");
+		break;
 	}
 	case -EPROTO: {
-		SAY("-EPROTO\n"); break;
+		SAM("-EPROTO\n");
+		break;
 	}
 	case -EILSEQ: {
-		SAY("-EILSEQ\n"); break;
+		SAM("-EILSEQ\n");
+		break;
 	}
 	case -ETIMEDOUT: {
-		SAY("-ETIMEDOUT\n"); break;
+		SAM("-ETIMEDOUT\n");
+		break;
 	}
 	case -EMSGSIZE: {
-		SAY("-EMSGSIZE\n"); break;
+		SAM("-EMSGSIZE\n");
+		break;
 	}
 	case -EOPNOTSUPP: {
-		SAY("-EOPNOTSUPP\n"); break;
+		SAM("-EOPNOTSUPP\n");
+		break;
 	}
 	case -EPFNOSUPPORT: {
-		SAY("-EPFNOSUPPORT\n"); break;
+		SAM("-EPFNOSUPPORT\n");
+		break;
 	}
 	case -EAFNOSUPPORT: {
-		SAY("-EAFNOSUPPORT\n"); break;
+		SAM("-EAFNOSUPPORT\n");
+		break;
 	}
 	case -EADDRINUSE: {
-		SAY("-EADDRINUSE\n"); break;
+		SAM("-EADDRINUSE\n");
+		break;
 	}
 	case -EADDRNOTAVAIL: {
-		SAY("-EADDRNOTAVAIL\n"); break;
+		SAM("-EADDRNOTAVAIL\n");
+		break;
 	}
 	case -ENOBUFS: {
-		SAY("-ENOBUFS\n"); break;
+		SAM("-ENOBUFS\n");
+		break;
 	}
 	case -EISCONN: {
-		SAY("-EISCONN\n"); break;
+		SAM("-EISCONN\n");
+		break;
 	}
 	case -ENOTCONN: {
-		SAY("-ENOTCONN\n"); break;
+		SAM("-ENOTCONN\n");
+		break;
 	}
 	case -ESHUTDOWN: {
-		SAY("-ESHUTDOWN\n"); break;
+		SAM("-ESHUTDOWN\n");
+		break;
 	}
 	case -ENOENT: {
-		SAY("-ENOENT\n"); break;
+		SAM("-ENOENT\n");
+		break;
 	}
 	case -ECONNRESET: {
-		SAY("-ECONNRESET\n"); break;
+		SAM("-ECONNRESET\n");
+		break;
 	}
 	case -ENOSPC: {
-		SAY("ENOSPC\n");  break;
+		SAM("ENOSPC\n");
+		break;
 	}
 	default: {
-		SAY("unknown error code 0x%08X\n", purb->status); break;
+		SAM("unknown error code 0x%08X\n", purb->status);
+		break;
 	}
 	}
 /*---------------------------------------------------------------------------*/
@@ -196,35 +229,43 @@
 	if (peasycap->audio_isoc_streaming) {
 		rc = usb_submit_urb(purb, GFP_ATOMIC);
 		if (0 != rc) {
-			SAY("ERROR: while %i=audio_idle, usb_submit_urb() "
+			SAM("ERROR: while %i=audio_idle, usb_submit_urb() "
 				"failed with rc:\n", peasycap->audio_idle);
 			switch (rc) {
 			case -ENOMEM: {
-				SAY("ENOMEM\n");    break;
+				SAM("-ENOMEM\n");
+				break;
 			}
 			case -ENODEV: {
-				SAY("ENODEV\n");    break;
+				SAM("-ENODEV\n");
+				break;
 			}
 			case -ENXIO: {
-				SAY("ENXIO\n");     break;
+				SAM("-ENXIO\n");
+				break;
 			}
 			case -EINVAL: {
-				SAY("EINVAL\n");    break;
+				SAM("-EINVAL\n");
+				break;
 			}
 			case -EAGAIN: {
-				SAY("EAGAIN\n");    break;
+				SAM("-EAGAIN\n");
+				break;
 			}
 			case -EFBIG: {
-				SAY("EFBIG\n");     break;
+				SAM("-EFBIG\n");
+				break;
 			}
 			case -EPIPE: {
-				SAY("EPIPE\n");     break;
+				SAM("-EPIPE\n");
+				break;
 			}
 			case -EMSGSIZE: {
-				SAY("EMSGSIZE\n");  break;
+				SAM("-EMSGSIZE\n");
+				break;
 			}
 			default: {
-				SAY("0x%08X\n", rc); break;
+				SAM("0x%08X\n", rc); break;
 			}
 			}
 		}
@@ -243,73 +284,81 @@
 for (i = 0;  i < purb->number_of_packets; i++) {
 	switch (purb->iso_frame_desc[i].status) {
 	case  0: {
-		strcpy(&errbuf[0], "OK"); break;
+		break;
 	}
 	case -ENOENT: {
-		strcpy(&errbuf[0], "-ENOENT"); break;
+		SAM("-ENOENT\n");
+		break;
 	}
 	case -EINPROGRESS: {
-		strcpy(&errbuf[0], "-EINPROGRESS"); break;
+		SAM("-EINPROGRESS\n");
+		break;
 	}
 	case -EPROTO: {
-		strcpy(&errbuf[0], "-EPROTO"); break;
+		SAM("-EPROTO\n");
+		break;
 	}
 	case -EILSEQ: {
-		strcpy(&errbuf[0], "-EILSEQ"); break;
+		SAM("-EILSEQ\n");
+		break;
 	}
 	case -ETIME: {
-		strcpy(&errbuf[0], "-ETIME"); break;
+		SAM("-ETIME\n");
+		break;
 	}
 	case -ETIMEDOUT: {
-		strcpy(&errbuf[0], "-ETIMEDOUT"); break;
+		SAM("-ETIMEDOUT\n");
+		break;
 	}
 	case -EPIPE: {
-		strcpy(&errbuf[0], "-EPIPE"); break;
+		SAM("-EPIPE\n");
+		break;
 	}
 	case -ECOMM: {
-		strcpy(&errbuf[0], "-ECOMM"); break;
+		SAM("-ECOMM\n");
+		break;
 	}
 	case -ENOSR: {
-		strcpy(&errbuf[0], "-ENOSR"); break;
+		SAM("-ENOSR\n");
+		break;
 	}
 	case -EOVERFLOW: {
-		strcpy(&errbuf[0], "-EOVERFLOW"); break;
+		SAM("-EOVERFLOW\n");
+		break;
 	}
 	case -EREMOTEIO: {
-		strcpy(&errbuf[0], "-EREMOTEIO"); break;
+		SAM("-EREMOTEIO\n");
+		break;
 	}
 	case -ENODEV: {
-		strcpy(&errbuf[0], "-ENODEV"); break;
+		SAM("-ENODEV\n");
+		break;
 	}
 	case -EXDEV: {
-		strcpy(&errbuf[0], "-EXDEV"); break;
+		SAM("-EXDEV\n");
+		break;
 	}
 	case -EINVAL: {
-		strcpy(&errbuf[0], "-EINVAL"); break;
+		SAM("-EINVAL\n");
+		break;
 	}
 	case -ECONNRESET: {
-		strcpy(&errbuf[0], "-ECONNRESET"); break;
+		SAM("-ECONNRESET\n");
+		break;
 	}
 	case -ENOSPC: {
-		strcpy(&errbuf[0], "-ENOSPC"); break;
+		SAM("-ENOSPC\n");
+		break;
 	}
 	case -ESHUTDOWN: {
-		strcpy(&errbuf[0], "-ESHUTDOWN"); break;
+		SAM("-ESHUTDOWN\n");
+		break;
 	}
 	default: {
-		strcpy(&errbuf[0], "UNKNOWN"); break;
+		SAM("unknown error:0x%08X\n", purb->iso_frame_desc[i].status);
+		break;
 	}
 	}
-	if ((!purb->iso_frame_desc[i].status) && 0) {
-		JOT(16, "frame[%2i]: %i=status{=%16s}  "  \
-						"%5i=actual  "  \
-						"%5i=length  "  \
-						"%3i=offset\n", \
-				i, purb->iso_frame_desc[i].status, &errbuf[0],
-				purb->iso_frame_desc[i].actual_length,
-				purb->iso_frame_desc[i].length,
-				purb->iso_frame_desc[i].offset);
-	}
 	if (!purb->iso_frame_desc[i].status) {
 		more = purb->iso_frame_desc[i].actual_length;
 
@@ -319,11 +368,12 @@
 #endif
 
 		if (!more)
-			mt++;
+			peasycap->audio_mt++;
 		else {
-			if (mt) {
-				JOT(16, "%4i empty audio urb frames\n", mt);
-				mt = 0;
+			if (peasycap->audio_mt) {
+				JOM(16, "%4i empty audio urb frames\n", \
+							peasycap->audio_mt);
+				peasycap->audio_mt = 0;
 			}
 
 			p1 = (__u8 *)(purb->transfer_buffer + \
@@ -340,13 +390,13 @@
 /*---------------------------------------------------------------------------*/
 			while (more) {
 				if (0 > more) {
-					SAY("easysnd_complete: MISTAKE: " \
+					SAM("easysnd_complete: MISTAKE: " \
 							"more is negative\n");
 					return;
 				}
 				if (peasycap->audio_buffer_page_many <= \
 							peasycap->audio_fill) {
-					SAY("ERROR: bad " \
+					SAM("ERROR: bad " \
 						"peasycap->audio_fill\n");
 					return;
 				}
@@ -355,7 +405,7 @@
 							[peasycap->audio_fill];
 				if (PAGE_SIZE < (paudio_buffer->pto - \
 						paudio_buffer->pgo)) {
-					SAY("ERROR: bad paudio_buffer->pto\n");
+					SAM("ERROR: bad paudio_buffer->pto\n");
 					return;
 				}
 				if (PAGE_SIZE == (paudio_buffer->pto - \
@@ -374,7 +424,7 @@
 							peasycap->audio_fill)
 						peasycap->audio_fill = 0;
 
-					JOT(12, "bumped peasycap->" \
+					JOM(12, "bumped peasycap->" \
 							"audio_fill to %i\n", \
 							peasycap->audio_fill);
 
@@ -387,7 +437,7 @@
 					if (!(peasycap->audio_fill % \
 						peasycap->\
 						audio_pages_per_fragment)) {
-						JOT(12, "wakeup call on wq_" \
+						JOM(12, "wakeup call on wq_" \
 						"audio, %i=frag reading  %i" \
 						"=fragment fill\n", \
 						(peasycap->audio_read / \
@@ -414,7 +464,7 @@
 				} else {
 #if defined(UPSAMPLE)
 					if (much % 16)
-						JOT(8, "MISTAKE? much" \
+						JOM(8, "MISTAKE? much" \
 						" is not divisible by 16\n");
 					if (much > (16 * \
 							more))
@@ -468,7 +518,7 @@
 			}
 		}
 	} else {
-		JOT(12, "discarding audio samples because " \
+		JOM(12, "discarding audio samples because " \
 			"%i=purb->iso_frame_desc[i].status\n", \
 				purb->iso_frame_desc[i].status);
 	}
@@ -486,38 +536,50 @@
 if (peasycap->audio_isoc_streaming) {
 	rc = usb_submit_urb(purb, GFP_ATOMIC);
 	if (0 != rc) {
-		SAY("ERROR: while %i=audio_idle, usb_submit_urb() failed " \
+		if (-ENODEV != rc) {
+			SAM("ERROR: while %i=audio_idle, " \
+					"usb_submit_urb() failed " \
 					"with rc:\n", peasycap->audio_idle);
+		}
 		switch (rc) {
 		case -ENOMEM: {
-			SAY("ENOMEM\n");    break;
+			SAM("-ENOMEM\n");
+			break;
 		}
 		case -ENODEV: {
-			SAY("ENODEV\n");    break;
+			break;
 		}
 		case -ENXIO: {
-			SAY("ENXIO\n");     break;
+			SAM("-ENXIO\n");
+			break;
 		}
 		case -EINVAL: {
-			SAY("EINVAL\n");    break;
+			SAM("-EINVAL\n");
+			break;
 		}
 		case -EAGAIN: {
-			SAY("EAGAIN\n");    break;
+			SAM("-EAGAIN\n");
+			break;
 		}
 		case -EFBIG: {
-			SAY("EFBIG\n");     break;
+			SAM("-EFBIG\n");
+			break;
 		}
 		case -EPIPE: {
-			SAY("EPIPE\n");     break;
+			SAM("-EPIPE\n");
+			break;
 		}
 		case -EMSGSIZE: {
-			SAY("EMSGSIZE\n");  break;
+			SAM("-EMSGSIZE\n");
+			break;
 		}
 		case -ENOSPC: {
-			SAY("ENOSPC\n");  break;
+			SAM("-ENOSPC\n");
+			break;
 		}
 		default: {
-			SAY("0x%08X\n", rc); break;
+			SAM("unknown error: 0x%08X\n", rc);
+			break;
 		}
 		}
 	}
@@ -529,8 +591,7 @@
 /*
  *  THE AUDIO URBS ARE SUBMITTED AT THIS EARLY STAGE SO THAT IT IS POSSIBLE TO
  *  STREAM FROM /dev/easysnd1 WITH SIMPLE PROGRAMS SUCH AS cat WHICH DO NOT
- *  HAVE AN IOCTL INTERFACE.  THE VIDEO URBS, BY CONTRAST, MUST BE SUBMITTED
- *  MUCH LATER: SEE COMMENTS IN FILE easycap_main.c.
+ *  HAVE AN IOCTL INTERFACE.
  */
 /*---------------------------------------------------------------------------*/
 int
@@ -539,8 +600,15 @@
 struct usb_interface *pusb_interface;
 struct easycap *peasycap;
 int subminor, rc;
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#if defined(EASYCAP_IS_VIDEODEV_CLIENT)
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+struct v4l2_device *pv4l2_device;
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
 
-JOT(4, "begins.\n");
+JOT(4, "begins\n");
 
 subminor = iminor(inode);
 
@@ -556,70 +624,90 @@
 	SAY("ending unsuccessfully\n");
 	return -1;
 }
+/*---------------------------------------------------------------------------*/
+#if (!defined(EASYCAP_IS_VIDEODEV_CLIENT))
+#
+/*vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv*/
+#else
+#if defined(EASYCAP_NEEDS_V4L2_DEVICE_H)
+/*---------------------------------------------------------------------------*/
+/*
+ *  SOME VERSIONS OF THE videodev MODULE OVERWRITE THE DATA WHICH HAS
+ *  BEEN WRITTEN BY THE CALL TO usb_set_intfdata() IN easycap_usb_probe(),
+ *  REPLACING IT WITH A POINTER TO THE EMBEDDED v4l2_device STRUCTURE.
+ *  TO DETECT THIS, THE STRING IN THE easycap.telltale[] BUFFER IS CHECKED.
+*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	pv4l2_device = usb_get_intfdata(pusb_interface);
+	if ((struct v4l2_device *)NULL == pv4l2_device) {
+		SAY("ERROR: pv4l2_device is NULL\n");
+		return -EFAULT;
+	}
+	peasycap = (struct easycap *) \
+		container_of(pv4l2_device, struct easycap, v4l2_device);
+}
+#endif /*EASYCAP_NEEDS_V4L2_DEVICE_H*/
+#
+#endif /*EASYCAP_IS_VIDEODEV_CLIENT*/
+/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
+/*---------------------------------------------------------------------------*/
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return -EFAULT;
+}
+/*---------------------------------------------------------------------------*/
 
 file->private_data = peasycap;
 
 /*---------------------------------------------------------------------------*/
 /*
- *  INITIALIZATION.
+ *  INITIALIZATION
  */
 /*---------------------------------------------------------------------------*/
-JOT(4, "starting initialization\n");
+JOM(4, "starting initialization\n");
 
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
-	return -EFAULT;
-} else {
-	JOT(16, "0x%08lX=peasycap->pusb_device\n", \
-					(long int)peasycap->pusb_device);
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
+	return -ENODEV;
 }
+JOM(16, "0x%08lX=peasycap->pusb_device\n", (long int)peasycap->pusb_device);
 
 rc = audio_setup(peasycap);
 if (0 <= rc)
-	JOT(8, "audio_setup() returned %i\n", rc);
+	JOM(8, "audio_setup() returned %i\n", rc);
 else
-	JOT(8, "easysnd open(): ERROR: audio_setup() returned %i\n", rc);
+	JOM(8, "easysnd open(): ERROR: audio_setup() returned %i\n", rc);
 
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device has become NULL\n");
-	return -EFAULT;
-}
-rc = adjust_volume(peasycap, -8192);
-if (0 != rc) {
-	SAY("ERROR: adjust_volume(default) returned %i\n", rc);
-	return -EFAULT;
+	SAM("ERROR: peasycap->pusb_device has become NULL\n");
+	return -ENODEV;
 }
 /*---------------------------------------------------------------------------*/
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device has become NULL\n");
-	return -EFAULT;
+	SAM("ERROR: peasycap->pusb_device has become NULL\n");
+	return -ENODEV;
 }
 rc = usb_set_interface(peasycap->pusb_device, peasycap->audio_interface, \
 					peasycap->audio_altsetting_on);
-JOT(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface, \
+JOM(8, "usb_set_interface(.,%i,%i) returned %i\n", peasycap->audio_interface, \
 					peasycap->audio_altsetting_on, rc);
 
-if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device has become NULL\n");
-	return -EFAULT;
-}
 rc = wakeup_device(peasycap->pusb_device);
 if (0 == rc)
-	JOT(8, "wakeup_device() returned %i\n", rc);
+	JOM(8, "wakeup_device() returned %i\n", rc);
 else
-	JOT(8, "easysnd open(): ERROR: wakeup_device() returned %i\n", rc);
+	JOM(8, "ERROR: wakeup_device() returned %i\n", rc);
 
-if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device has become NULL\n");
-	return -EFAULT;
-}
-submit_audio_urbs(peasycap);
+peasycap->audio_eof = 0;
 peasycap->audio_idle = 0;
 
 peasycap->timeval1.tv_sec  = 0;
 peasycap->timeval1.tv_usec = 0;
 
-JOT(4, "finished initialization\n");
+submit_audio_urbs(peasycap);
+
+JOM(4, "finished initialization\n");
 return 0;
 }
 /*****************************************************************************/
@@ -635,11 +723,15 @@
 	SAY("ERROR:  peasycap is NULL.\n");
 	return -EFAULT;
 }
-if (0 != kill_audio_urbs(peasycap)) {
-	SAY("ERROR: kill_audio_urbs() failed\n");
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
 	return -EFAULT;
 }
-JOT(4, "ending successfully\n");
+if (0 != kill_audio_urbs(peasycap)) {
+	SAM("ERROR: kill_audio_urbs() failed\n");
+	return -EFAULT;
+}
+JOM(4, "ending successfully\n");
 return 0;
 }
 /*****************************************************************************/
@@ -648,12 +740,11 @@
 						size_t kount, loff_t *poff)
 {
 struct timeval timeval;
-static struct timeval timeval1;
-static long long int audio_bytes, above, below, mean;
+long long int above, below, mean;
 struct signed_div_result sdr;
 unsigned char *p0;
 long int kount1, more, rc, l0, lm;
-int fragment;
+int fragment, kd;
 struct easycap *peasycap;
 struct data_buffer *pdata_buffer;
 size_t szret;
@@ -671,23 +762,89 @@
 
 JOT(8, "===== easysnd_read(): kount=%i, *poff=%i\n", (int)kount, (int)(*poff));
 
-peasycap = (struct easycap *)(file->private_data);
+if (NULL == file) {
+	SAY("ERROR:  file is NULL\n");
+	return -ERESTARTSYS;
+}
+peasycap = file->private_data;
 if (NULL == peasycap) {
 	SAY("ERROR in easysnd_read(): peasycap is NULL\n");
 	return -EFAULT;
 }
+if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+	SAY("ERROR: bad peasycap: 0x%08lX\n", (unsigned long int) peasycap);
+	return -EFAULT;
+}
+if (NULL == peasycap->pusb_device) {
+	SAY("ERROR in easysnd_read(): peasycap->pusb_device is NULL\n");
+	return -EFAULT;
+}
+kd = isdongle(peasycap);
+if (0 <= kd && DONGLE_MANY > kd) {
+	if (mutex_lock_interruptible(&(easycap_dongle[kd].mutex_audio))) {
+		SAY("ERROR: cannot lock easycap_dongle[%i].mutex_audio\n", kd);
+		return -ERESTARTSYS;
+	}
+	JOM(4, "locked easycap_dongle[%i].mutex_audio\n", kd);
 /*---------------------------------------------------------------------------*/
+/*
+ *  MEANWHILE, easycap_usb_disconnect() MAY HAVE FREED POINTER peasycap,
+ *  IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
+ *  IF NECESSARY, BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+	if (kd != isdongle(peasycap))
+		return -ERESTARTSYS;
+	if (NULL == file) {
+		SAY("ERROR:  file is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -ERESTARTSYS;
+	}
+	peasycap = file->private_data;
+	if (NULL == peasycap) {
+		SAY("ERROR:  peasycap is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -ERESTARTSYS;
+	}
+	if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
+		SAY("ERROR: bad peasycap: 0x%08lX\n", \
+						(unsigned long int) peasycap);
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -ERESTARTSYS;
+	}
+	if (NULL == peasycap->pusb_device) {
+		SAM("ERROR: peasycap->pusb_device is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
+		return -ERESTARTSYS;
+	}
+} else {
+/*---------------------------------------------------------------------------*/
+/*
+ *  IF easycap_usb_disconnect() HAS ALREADY FREED POINTER peasycap BEFORE THE
+ *  ATTEMPT TO ACQUIRE THE SEMAPHORE, isdongle() WILL HAVE FAILED.  BAIL OUT.
+*/
+/*---------------------------------------------------------------------------*/
+	return -ERESTARTSYS;
+}
+/*---------------------------------------------------------------------------*/
+if (file->f_flags & O_NONBLOCK)
+	JOT(16, "NONBLOCK  kount=%i, *poff=%i\n", (int)kount, (int)(*poff));
+else
+	JOT(8, "BLOCKING  kount=%i, *poff=%i\n", (int)kount, (int)(*poff));
+
 if ((0 > peasycap->audio_read) || \
 		(peasycap->audio_buffer_page_many <= peasycap->audio_read)) {
-	SAY("ERROR: peasycap->audio_read out of range\n");
+	SAM("ERROR: peasycap->audio_read out of range\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_audio);
 	return -EFAULT;
 }
 pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
 if ((struct data_buffer *)NULL == pdata_buffer) {
-	SAY("ERROR: pdata_buffer is NULL\n");
+	SAM("ERROR: pdata_buffer is NULL\n");
+	mutex_unlock(&easycap_dongle[kd].mutex_audio);
 	return -EFAULT;
 }
-JOT(12, "before wait, %i=frag read  %i=frag fill\n", \
+JOM(12, "before wait, %i=frag read  %i=frag fill\n", \
 		(peasycap->audio_read / peasycap->audio_pages_per_fragment), \
 		(peasycap->audio_fill / peasycap->audio_pages_per_fragment));
 fragment = (peasycap->audio_read / peasycap->audio_pages_per_fragment);
@@ -695,7 +852,8 @@
 				peasycap->audio_pages_per_fragment)) || \
 		(0 == (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo)))) {
 	if (file->f_flags & O_NONBLOCK) {
-		JOT(16, "returning -EAGAIN as instructed\n");
+		JOM(16, "returning -EAGAIN as instructed\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EAGAIN;
 	}
 	rc = wait_event_interruptible(peasycap->wq_audio, \
@@ -704,50 +862,56 @@
 				peasycap->audio_pages_per_fragment)) && \
 		(0 < (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo))))));
 	if (0 != rc) {
-		SAY("aborted by signal\n");
+		SAM("aborted by signal\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -ERESTARTSYS;
 	}
 	if (peasycap->audio_eof) {
-		JOT(8, "returning 0 because  %i=audio_eof\n", \
+		JOM(8, "returning 0 because  %i=audio_eof\n", \
 							peasycap->audio_eof);
 		kill_audio_urbs(peasycap);
-		msleep(500);
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return 0;
 	}
 	if (peasycap->audio_idle) {
-		JOT(16, "returning 0 because  %i=audio_idle\n", \
+		JOM(16, "returning 0 because  %i=audio_idle\n", \
 							peasycap->audio_idle);
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return 0;
 	}
 	if (!peasycap->audio_isoc_streaming) {
-		JOT(16, "returning 0 because audio urbs not streaming\n");
+		JOM(16, "returning 0 because audio urbs not streaming\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return 0;
 	}
 }
-JOT(12, "after  wait, %i=frag read  %i=frag fill\n", \
+JOM(12, "after  wait, %i=frag read  %i=frag fill\n", \
 		(peasycap->audio_read / peasycap->audio_pages_per_fragment), \
 		(peasycap->audio_fill / peasycap->audio_pages_per_fragment));
 szret = (size_t)0;
 while (fragment == (peasycap->audio_read / \
 				peasycap->audio_pages_per_fragment)) {
 	if (NULL == pdata_buffer->pgo) {
-		SAY("ERROR: pdata_buffer->pgo is NULL\n");
+		SAM("ERROR: pdata_buffer->pgo is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
 	}
 	if (NULL == pdata_buffer->pto) {
-		SAY("ERROR: pdata_buffer->pto is NULL\n");
+		SAM("ERROR: pdata_buffer->pto is NULL\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
 	}
 	kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
 	if (0 > kount1) {
-		SAY("easysnd_read: MISTAKE: kount1 is negative\n");
+		SAM("easysnd_read: MISTAKE: kount1 is negative\n");
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -ERESTARTSYS;
 	}
 	if (!kount1) {
 		(peasycap->audio_read)++;
 		if (peasycap->audio_buffer_page_many <= peasycap->audio_read)
 			peasycap->audio_read = 0;
-		JOT(12, "bumped peasycap->audio_read to %i\n", \
+		JOM(12, "bumped peasycap->audio_read to %i\n", \
 						peasycap->audio_read);
 
 		if (fragment != (peasycap->audio_read / \
@@ -757,30 +921,34 @@
 		if ((0 > peasycap->audio_read) || \
 			(peasycap->audio_buffer_page_many <= \
 					peasycap->audio_read)) {
-			SAY("ERROR: peasycap->audio_read out of range\n");
+			SAM("ERROR: peasycap->audio_read out of range\n");
+			mutex_unlock(&easycap_dongle[kd].mutex_audio);
 			return -EFAULT;
 		}
 		pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
 		if ((struct data_buffer *)NULL == pdata_buffer) {
-			SAY("ERROR: pdata_buffer is NULL\n");
+			SAM("ERROR: pdata_buffer is NULL\n");
+			mutex_unlock(&easycap_dongle[kd].mutex_audio);
 			return -EFAULT;
 		}
 		if (NULL == pdata_buffer->pgo) {
-			SAY("ERROR: pdata_buffer->pgo is NULL\n");
+			SAM("ERROR: pdata_buffer->pgo is NULL\n");
+			mutex_unlock(&easycap_dongle[kd].mutex_audio);
 			return -EFAULT;
 		}
 		if (NULL == pdata_buffer->pto) {
-			SAY("ERROR: pdata_buffer->pto is NULL\n");
+			SAM("ERROR: pdata_buffer->pto is NULL\n");
+			mutex_unlock(&easycap_dongle[kd].mutex_audio);
 			return -EFAULT;
 		}
 		kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
 	}
-	JOT(12, "ready  to send %li bytes\n", (long int) kount1);
-	JOT(12, "still  to send %li bytes\n", (long int) kount);
+	JOM(12, "ready  to send %li bytes\n", (long int) kount1);
+	JOM(12, "still  to send %li bytes\n", (long int) kount);
 	more = kount1;
 	if (more > kount)
 		more = kount;
-	JOT(12, "agreed to send %li bytes from page %i\n", \
+	JOM(12, "agreed to send %li bytes from page %i\n", \
 						more, peasycap->audio_read);
 	if (!more)
 		break;
@@ -798,7 +966,8 @@
 /*---------------------------------------------------------------------------*/
 	rc = copy_to_user(puserspacebuffer, pdata_buffer->pto, more);
 	if (0 != rc) {
-		SAY("ERROR: copy_to_user() returned %li\n", rc);
+		SAM("ERROR: copy_to_user() returned %li\n", rc);
+		mutex_unlock(&easycap_dongle[kd].mutex_audio);
 		return -EFAULT;
 	}
 	*poff += (loff_t)more;
@@ -807,11 +976,11 @@
 	puserspacebuffer += more;
 	kount -= (size_t)more;
 }
-JOT(12, "after  read, %i=frag read  %i=frag fill\n", \
+JOM(12, "after  read, %i=frag read  %i=frag fill\n", \
 		(peasycap->audio_read / peasycap->audio_pages_per_fragment), \
 		(peasycap->audio_fill / peasycap->audio_pages_per_fragment));
 if (kount < 0) {
-	SAY("MISTAKE:  %li=kount  %li=szret\n", \
+	SAM("MISTAKE:  %li=kount  %li=szret\n", \
 					(long int)kount, (long int)szret);
 }
 /*---------------------------------------------------------------------------*/
@@ -827,11 +996,11 @@
 	mean = peasycap->audio_niveau;
 	sdr = signed_div(mean, peasycap->audio_sample);
 
-	JOT(8, "%8lli=mean  %8lli=meansquare after %lli samples, =>\n", \
+	JOM(8, "%8lli=mean  %8lli=meansquare after %lli samples, =>\n", \
 				sdr.quotient, above, peasycap->audio_sample);
 
 	sdr = signed_div(above, 32768);
-	JOT(8, "audio dynamic range is roughly %lli\n", sdr.quotient);
+	JOM(8, "audio dynamic range is roughly %lli\n", sdr.quotient);
 }
 /*---------------------------------------------------------------------------*/
 /*
@@ -840,33 +1009,28 @@
 /*---------------------------------------------------------------------------*/
 do_gettimeofday(&timeval);
 if (!peasycap->timeval1.tv_sec) {
-	audio_bytes = 0;
-	timeval1 = timeval;
-
-	if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
-		return -ERESTARTSYS;
-	peasycap->timeval1 = timeval1;
-	mutex_unlock(&(peasycap->mutex_timeval1));
+	peasycap->audio_bytes = 0;
+	peasycap->timeval3 = timeval;
+	peasycap->timeval1 = peasycap->timeval3;
 	sdr.quotient = 192000;
 } else {
-	audio_bytes += (long long int) szret;
+	peasycap->audio_bytes += (long long int) szret;
 	below = ((long long int)(1000000)) * \
-		((long long int)(timeval.tv_sec  - timeval1.tv_sec)) + \
-		(long long int)(timeval.tv_usec - timeval1.tv_usec);
-	above = 1000000 * ((long long int) audio_bytes);
+		((long long int)(timeval.tv_sec  - \
+						peasycap->timeval3.tv_sec)) + \
+		(long long int)(timeval.tv_usec - peasycap->timeval3.tv_usec);
+	above = 1000000 * ((long long int) peasycap->audio_bytes);
 
 	if (below)
 		sdr = signed_div(above, below);
 	else
 		sdr.quotient = 192000;
 }
-JOT(8, "audio streaming at %lli bytes/second\n", sdr.quotient);
-if (mutex_lock_interruptible(&(peasycap->mutex_timeval1)))
-	return -ERESTARTSYS;
+JOM(8, "audio streaming at %lli bytes/second\n", sdr.quotient);
 peasycap->dnbydt = sdr.quotient;
-mutex_unlock(&(peasycap->mutex_timeval1));
 
-JOT(8, "returning %li\n", (long int)szret);
+JOM(8, "returning %li\n", (long int)szret);
+mutex_unlock(&easycap_dongle[kd].mutex_audio);
 return szret;
 }
 /*****************************************************************************/
@@ -881,27 +1045,31 @@
 struct data_urb *pdata_urb;
 struct urb *purb;
 struct list_head *plist_head;
-int j, isbad, m, rc;
+int j, isbad, nospc, m, rc;
 int isbuf;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if ((struct list_head *)NULL == peasycap->purb_audio_head) {
-	SAY("ERROR: peasycap->urb_audio_head uninitialized\n");
+	SAM("ERROR: peasycap->urb_audio_head uninitialized\n");
 	return -EFAULT;
 }
 if ((struct usb_device *)NULL == peasycap->pusb_device) {
-	SAY("ERROR: peasycap->pusb_device is NULL\n");
+	SAM("ERROR: peasycap->pusb_device is NULL\n");
 	return -EFAULT;
 }
 if (!peasycap->audio_isoc_streaming) {
-	JOT(4, "initial submission of all audio urbs\n");
+	JOM(4, "initial submission of all audio urbs\n");
 	rc = usb_set_interface(peasycap->pusb_device,
 					peasycap->audio_interface, \
 					peasycap->audio_altsetting_on);
-	JOT(8, "usb_set_interface(.,%i,%i) returned %i\n", \
+	JOM(8, "usb_set_interface(.,%i,%i) returned %i\n", \
 					peasycap->audio_interface, \
 					peasycap->audio_altsetting_on, rc);
 
-	isbad = 0;  m = 0;
+	isbad = 0;  nospc = 0;  m = 0;
 	list_for_each(plist_head, (peasycap->purb_audio_head)) {
 		pdata_urb = list_entry(plist_head, struct data_urb, list_head);
 		if (NULL != pdata_urb) {
@@ -938,39 +1106,49 @@
 				rc = usb_submit_urb(purb, GFP_KERNEL);
 				if (0 != rc) {
 					isbad++;
-					SAY("ERROR: usb_submit_urb() failed" \
+					SAM("ERROR: usb_submit_urb() failed" \
 							" for urb with rc:\n");
 					switch (rc) {
 					case -ENOMEM: {
-						SAY("ENOMEM\n"); break;
+						SAM("-ENOMEM\n");
+						break;
 					}
 					case -ENODEV: {
-						SAY("ENODEV\n"); break;
+						SAM("-ENODEV\n");
+						break;
 					}
 					case -ENXIO: {
-						SAY("ENXIO\n"); break;
+						SAM("-ENXIO\n");
+						break;
 					}
 					case -EINVAL: {
-						SAY("EINVAL\n"); break;
+						SAM("-EINVAL\n");
+						break;
 					}
 					case -EAGAIN: {
-						SAY("EAGAIN\n"); break;
+						SAM("-EAGAIN\n");
+						break;
 					}
 					case -EFBIG: {
-						SAY("EFBIG\n"); break;
+						SAM("-EFBIG\n");
+						break;
 					}
 					case -EPIPE: {
-						SAY("EPIPE\n"); break;
+						SAM("-EPIPE\n");
+						break;
 					}
 					case -EMSGSIZE: {
-						SAY("EMSGSIZE\n"); break;
+						SAM("-EMSGSIZE\n");
+						break;
 					}
 					case -ENOSPC: {
-						SAY("ENOSPC\n"); break;
+						nospc++;
+						break;
 					}
 					default: {
-						SAY("unknown error code %i\n",\
-								 rc); break;
+						SAM("unknown error code %i\n",\
+								 rc);
+						break;
 					}
 					}
 				} else {
@@ -983,8 +1161,13 @@
 			isbad++;
 		}
 	}
+	if (nospc) {
+		SAM("-ENOSPC=usb_submit_urb() for %i urbs\n", nospc);
+		SAM(".....  possibly inadequate USB bandwidth\n");
+		peasycap->audio_eof = 1;
+	}
 	if (isbad) {
-		JOT(4, "attempting cleanup instead of submitting\n");
+		JOM(4, "attempting cleanup instead of submitting\n");
 		list_for_each(plist_head, (peasycap->purb_audio_head)) {
 			pdata_urb = list_entry(plist_head, struct data_urb, \
 								list_head);
@@ -997,10 +1180,10 @@
 		peasycap->audio_isoc_streaming = 0;
 	} else {
 		peasycap->audio_isoc_streaming = 1;
-		JOT(4, "submitted %i audio urbs\n", m);
+		JOM(4, "submitted %i audio urbs\n", m);
 	}
 } else
-	JOT(4, "already streaming audio urbs\n");
+	JOM(4, "already streaming audio urbs\n");
 
 return 0;
 }
@@ -1017,10 +1200,14 @@
 struct list_head *plist_head;
 struct data_urb *pdata_urb;
 
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return -EFAULT;
+}
 if (peasycap->audio_isoc_streaming) {
 	if ((struct list_head *)NULL != peasycap->purb_audio_head) {
 		peasycap->audio_isoc_streaming = 0;
-		JOT(4, "killing audio urbs\n");
+		JOM(4, "killing audio urbs\n");
 		m = 0;
 		list_for_each(plist_head, (peasycap->purb_audio_head)) {
 			pdata_urb = list_entry(plist_head, struct data_urb,
@@ -1032,13 +1219,13 @@
 				}
 			}
 		}
-		JOT(4, "%i audio urbs killed\n", m);
+		JOM(4, "%i audio urbs killed\n", m);
 	} else {
-		SAY("ERROR: peasycap->purb_audio_head is NULL\n");
+		SAM("ERROR: peasycap->purb_audio_head is NULL\n");
 		return -EFAULT;
 	}
 } else {
-	JOT(8, "%i=audio_isoc_streaming, no audio urbs killed\n", \
+	JOM(8, "%i=audio_isoc_streaming, no audio urbs killed\n", \
 					peasycap->audio_isoc_streaming);
 }
 return 0;
diff --git a/drivers/staging/easycap/easycap_testcard.c b/drivers/staging/easycap/easycap_testcard.c
index 3c2ce28..e27dfe9 100644
--- a/drivers/staging/easycap/easycap_testcard.c
+++ b/drivers/staging/easycap/easycap_testcard.c
@@ -29,37 +29,69 @@
 #include "easycap_debug.h"
 
 /*****************************************************************************/
-#define TESTCARD_BYTESPERLINE (2 * 1440)
+#define TESTCARD_BYTESPERLINE (2 * 720)
 void
-easycap_testcard(struct easycap *peasycap, int field_fill)
+easycap_testcard(struct easycap *peasycap, int field)
 {
 int total;
 int y, u, v, r, g, b;
 unsigned char uyvy[4];
-
-int i1, line, k, m, n, more, much, barwidth;
+int i1, line, k, m, n, more, much, barwidth, barheight;
 unsigned char bfbar[TESTCARD_BYTESPERLINE / 8], *p1, *p2;
 struct data_buffer *pfield_buffer;
 
-JOT(8, "%i=field_fill\n", field_fill);
-
-if ((TESTCARD_BYTESPERLINE / 2) < peasycap->width) {
-	SAY("ERROR: image is too wide\n");
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
 	return;
 }
-if (peasycap->width % 16) {
-	SAY("ERROR: indivisible image width\n");
+JOM(8, "%i=field\n", field);
+switch (peasycap->width) {
+case 720:
+case 360: {
+	barwidth = (2 * 720) / 8;
+	break;
+}
+case 704:
+case 352: {
+	barwidth = (2 * 704) / 8;
+	break;
+}
+case 640:
+case 320: {
+	barwidth = (2 * 640) / 8;
+	break;
+}
+default: {
+	SAM("ERROR:  cannot set barwidth\n");
 	return;
 }
-
+}
+if (TESTCARD_BYTESPERLINE < barwidth) {
+	SAM("ERROR: barwidth is too large\n");
+	return;
+}
+switch (peasycap->height) {
+case 576:
+case 288: {
+	barheight = 576;
+	break;
+}
+case 480:
+case 240: {
+	barheight = 480;
+	break;
+}
+default: {
+	SAM("ERROR: cannot set barheight\n");
+	return;
+}
+}
 total = 0;
-barwidth = (2 * peasycap->width) / 8;
-
-k = field_fill;
+k = field;
 m = 0;
 n = 0;
 
-for (line = 0;  line < (peasycap->height / 2);  line++) {
+for (line = 0;  line < (barheight / 2);  line++) {
 	for (i1 = 0;  i1 < 8;  i1++) {
 		r = (i1 * 256)/8;
 		g = (i1 * 256)/8;
@@ -88,15 +120,15 @@
 
 		while (more) {
 			if ((FIELD_BUFFER_SIZE/PAGE_SIZE) <= m) {
-				SAY("ERROR:  bad m reached\n");
+				SAM("ERROR:  bad m reached\n");
 				return;
 			}
 		if (PAGE_SIZE < n) {
-			SAY("ERROR:  bad n reached\n"); return;
+			SAM("ERROR:  bad n reached\n"); return;
 		}
 
 		if (0 > more) {
-			SAY("ERROR:  internal fault\n");
+			SAM("ERROR:  internal fault\n");
 			return;
 		}
 
@@ -117,10 +149,6 @@
 		}
 	}
 }
-
-JOT(8, "%i=total\n", total);
-if (total != peasycap->width * peasycap->height)
-	SAY("ERROR: wrong number of bytes written:  %i\n", total);
 return;
 }
 /*****************************************************************************/
@@ -157,35 +185,35 @@
 		printf("%6i, ", i2);  printf("%6i\n};\n", i2);
 		}
 	}
-return(0);
+return 0;
 }
 -----------------------------------------------------------------------------*/
 int tones[2048] = {
-     0,     0,   502,   502,  1004,  1004,  1505,  1505,  2005,  2005,
-  2503,  2503,  2998,  2998,  3491,  3491,  3980,  3980,  4466,  4466,
-  4948,  4948,  5424,  5424,  5896,  5896,  6362,  6362,  6822,  6822,
-  7276,  7276,  7723,  7723,  8162,  8162,  8594,  8594,  9018,  9018,
-  9434,  9434,  9840,  9840, 10237, 10237, 10625, 10625, 11002, 11002,
- 11370, 11370, 11726, 11726, 12072, 12072, 12406, 12406, 12728, 12728,
- 13038, 13038, 13337, 13337, 13622, 13622, 13895, 13895, 14155, 14155,
- 14401, 14401, 14634, 14634, 14853, 14853, 15058, 15058, 15249, 15249,
- 15426, 15426, 15588, 15588, 15735, 15735, 15868, 15868, 15985, 15985,
- 16088, 16088, 16175, 16175, 16248, 16248, 16305, 16305, 16346, 16346,
- 16372, 16372, 16383, 16383, 16379, 16379, 16359, 16359, 16323, 16323,
- 16272, 16272, 16206, 16206, 16125, 16125, 16028, 16028, 15917, 15917,
- 15790, 15790, 15649, 15649, 15492, 15492, 15322, 15322, 15136, 15136,
- 14937, 14937, 14723, 14723, 14496, 14496, 14255, 14255, 14001, 14001,
- 13733, 13733, 13452, 13452, 13159, 13159, 12854, 12854, 12536, 12536,
- 12207, 12207, 11866, 11866, 11513, 11513, 11150, 11150, 10777, 10777,
- 10393, 10393, 10000, 10000,  9597,  9597,  9185,  9185,  8765,  8765,
-  8336,  8336,  7900,  7900,  7456,  7456,  7005,  7005,  6547,  6547,
-  6083,  6083,  5614,  5614,  5139,  5139,  4659,  4659,  4175,  4175,
-  3687,  3687,  3196,  3196,  2701,  2701,  2204,  2204,  1705,  1705,
-  1205,  1205,   703,   703,   201,   201,  -301,  -301,  -803,  -803,
- -1305, -1305, -1805, -1805, -2304, -2304, -2801, -2801, -3294, -3294,
- -3785, -3785, -4272, -4272, -4756, -4756, -5234, -5234, -5708, -5708,
- -6176, -6176, -6639, -6639, -7095, -7095, -7545, -7545, -7988, -7988,
- -8423, -8423, -8850, -8850, -9268, -9268, -9679, -9679, -10079, -10079,
+0,     0,   502,   502,  1004,  1004,  1505,  1505,  2005,  2005,
+2503,  2503,  2998,  2998,  3491,  3491,  3980,  3980,  4466,  4466,
+4948,  4948,  5424,  5424,  5896,  5896,  6362,  6362,  6822,  6822,
+7276,  7276,  7723,  7723,  8162,  8162,  8594,  8594,  9018,  9018,
+9434,  9434,  9840,  9840, 10237, 10237, 10625, 10625, 11002, 11002,
+11370, 11370, 11726, 11726, 12072, 12072, 12406, 12406, 12728, 12728,
+13038, 13038, 13337, 13337, 13622, 13622, 13895, 13895, 14155, 14155,
+14401, 14401, 14634, 14634, 14853, 14853, 15058, 15058, 15249, 15249,
+15426, 15426, 15588, 15588, 15735, 15735, 15868, 15868, 15985, 15985,
+16088, 16088, 16175, 16175, 16248, 16248, 16305, 16305, 16346, 16346,
+16372, 16372, 16383, 16383, 16379, 16379, 16359, 16359, 16323, 16323,
+16272, 16272, 16206, 16206, 16125, 16125, 16028, 16028, 15917, 15917,
+15790, 15790, 15649, 15649, 15492, 15492, 15322, 15322, 15136, 15136,
+14937, 14937, 14723, 14723, 14496, 14496, 14255, 14255, 14001, 14001,
+13733, 13733, 13452, 13452, 13159, 13159, 12854, 12854, 12536, 12536,
+12207, 12207, 11866, 11866, 11513, 11513, 11150, 11150, 10777, 10777,
+10393, 10393, 10000, 10000,  9597,  9597,  9185,  9185,  8765,  8765,
+8336,  8336,  7900,  7900,  7456,  7456,  7005,  7005,  6547,  6547,
+6083,  6083,  5614,  5614,  5139,  5139,  4659,  4659,  4175,  4175,
+3687,  3687,  3196,  3196,  2701,  2701,  2204,  2204,  1705,  1705,
+1205,  1205,   703,   703,   201,   201,  -301,  -301,  -803,  -803,
+-1305, -1305, -1805, -1805, -2304, -2304, -2801, -2801, -3294, -3294,
+-3785, -3785, -4272, -4272, -4756, -4756, -5234, -5234, -5708, -5708,
+-6176, -6176, -6639, -6639, -7095, -7095, -7545, -7545, -7988, -7988,
+-8423, -8423, -8850, -8850, -9268, -9268, -9679, -9679, -10079, -10079,
 -10471, -10471, -10853, -10853, -11224, -11224, -11585, -11585, -11935, -11935,
 -12273, -12273, -12600, -12600, -12916, -12916, -13219, -13219, -13510, -13510,
 -13788, -13788, -14053, -14053, -14304, -14304, -14543, -14543, -14767, -14767,
@@ -198,35 +226,35 @@
 -14353, -14353, -14104, -14104, -13842, -13842, -13566, -13566, -13278, -13278,
 -12977, -12977, -12665, -12665, -12340, -12340, -12003, -12003, -11656, -11656,
 -11297, -11297, -10928, -10928, -10548, -10548, -10159, -10159, -9759, -9759,
- -9351, -9351, -8934, -8934, -8509, -8509, -8075, -8075, -7634, -7634,
- -7186, -7186, -6731, -6731, -6269, -6269, -5802, -5802, -5329, -5329,
- -4852, -4852, -4369, -4369, -3883, -3883, -3393, -3393, -2900, -2900,
- -2404, -2404, -1905, -1905, -1405, -1405,  -904,  -904,  -402,  -402,
-   100,   100,   603,   603,  1105,  1105,  1605,  1605,  2105,  2105,
-  2602,  2602,  3097,  3097,  3589,  3589,  4078,  4078,  4563,  4563,
-  5043,  5043,  5519,  5519,  5990,  5990,  6455,  6455,  6914,  6914,
-  7366,  7366,  7811,  7811,  8249,  8249,  8680,  8680,  9102,  9102,
-  9516,  9516,  9920,  9920, 10315, 10315, 10701, 10701, 11077, 11077,
- 11442, 11442, 11796, 11796, 12139, 12139, 12471, 12471, 12791, 12791,
- 13099, 13099, 13395, 13395, 13678, 13678, 13948, 13948, 14205, 14205,
- 14449, 14449, 14679, 14679, 14895, 14895, 15098, 15098, 15286, 15286,
- 15459, 15459, 15618, 15618, 15763, 15763, 15892, 15892, 16007, 16007,
- 16107, 16107, 16191, 16191, 16260, 16260, 16314, 16314, 16353, 16353,
- 16376, 16376, 16384, 16384, 16376, 16376, 16353, 16353, 16314, 16314,
- 16260, 16260, 16191, 16191, 16107, 16107, 16007, 16007, 15892, 15892,
- 15763, 15763, 15618, 15618, 15459, 15459, 15286, 15286, 15098, 15098,
- 14895, 14895, 14679, 14679, 14449, 14449, 14205, 14205, 13948, 13948,
- 13678, 13678, 13395, 13395, 13099, 13099, 12791, 12791, 12471, 12471,
- 12139, 12139, 11796, 11796, 11442, 11442, 11077, 11077, 10701, 10701,
- 10315, 10315,  9920,  9920,  9516,  9516,  9102,  9102,  8680,  8680,
-  8249,  8249,  7811,  7811,  7366,  7366,  6914,  6914,  6455,  6455,
-  5990,  5990,  5519,  5519,  5043,  5043,  4563,  4563,  4078,  4078,
-  3589,  3589,  3097,  3097,  2602,  2602,  2105,  2105,  1605,  1605,
-  1105,  1105,   603,   603,   100,   100,  -402,  -402,  -904,  -904,
- -1405, -1405, -1905, -1905, -2404, -2404, -2900, -2900, -3393, -3393,
- -3883, -3883, -4369, -4369, -4852, -4852, -5329, -5329, -5802, -5802,
- -6269, -6269, -6731, -6731, -7186, -7186, -7634, -7634, -8075, -8075,
- -8509, -8509, -8934, -8934, -9351, -9351, -9759, -9759, -10159, -10159,
+-9351, -9351, -8934, -8934, -8509, -8509, -8075, -8075, -7634, -7634,
+-7186, -7186, -6731, -6731, -6269, -6269, -5802, -5802, -5329, -5329,
+-4852, -4852, -4369, -4369, -3883, -3883, -3393, -3393, -2900, -2900,
+-2404, -2404, -1905, -1905, -1405, -1405,  -904,  -904,  -402,  -402,
+100,   100,   603,   603,  1105,  1105,  1605,  1605,  2105,  2105,
+2602,  2602,  3097,  3097,  3589,  3589,  4078,  4078,  4563,  4563,
+5043,  5043,  5519,  5519,  5990,  5990,  6455,  6455,  6914,  6914,
+7366,  7366,  7811,  7811,  8249,  8249,  8680,  8680,  9102,  9102,
+9516,  9516,  9920,  9920, 10315, 10315, 10701, 10701, 11077, 11077,
+11442, 11442, 11796, 11796, 12139, 12139, 12471, 12471, 12791, 12791,
+13099, 13099, 13395, 13395, 13678, 13678, 13948, 13948, 14205, 14205,
+14449, 14449, 14679, 14679, 14895, 14895, 15098, 15098, 15286, 15286,
+15459, 15459, 15618, 15618, 15763, 15763, 15892, 15892, 16007, 16007,
+16107, 16107, 16191, 16191, 16260, 16260, 16314, 16314, 16353, 16353,
+16376, 16376, 16384, 16384, 16376, 16376, 16353, 16353, 16314, 16314,
+16260, 16260, 16191, 16191, 16107, 16107, 16007, 16007, 15892, 15892,
+15763, 15763, 15618, 15618, 15459, 15459, 15286, 15286, 15098, 15098,
+14895, 14895, 14679, 14679, 14449, 14449, 14205, 14205, 13948, 13948,
+13678, 13678, 13395, 13395, 13099, 13099, 12791, 12791, 12471, 12471,
+12139, 12139, 11796, 11796, 11442, 11442, 11077, 11077, 10701, 10701,
+10315, 10315,  9920,  9920,  9516,  9516,  9102,  9102,  8680,  8680,
+8249,  8249,  7811,  7811,  7366,  7366,  6914,  6914,  6455,  6455,
+5990,  5990,  5519,  5519,  5043,  5043,  4563,  4563,  4078,  4078,
+3589,  3589,  3097,  3097,  2602,  2602,  2105,  2105,  1605,  1605,
+1105,  1105,   603,   603,   100,   100,  -402,  -402,  -904,  -904,
+-1405, -1405, -1905, -1905, -2404, -2404, -2900, -2900, -3393, -3393,
+-3883, -3883, -4369, -4369, -4852, -4852, -5329, -5329, -5802, -5802,
+-6269, -6269, -6731, -6731, -7186, -7186, -7634, -7634, -8075, -8075,
+-8509, -8509, -8934, -8934, -9351, -9351, -9759, -9759, -10159, -10159,
 -10548, -10548, -10928, -10928, -11297, -11297, -11656, -11656, -12003, -12003,
 -12340, -12340, -12665, -12665, -12977, -12977, -13278, -13278, -13566, -13566,
 -13842, -13842, -14104, -14104, -14353, -14353, -14589, -14589, -14810, -14810,
@@ -239,35 +267,35 @@
 -14304, -14304, -14053, -14053, -13788, -13788, -13510, -13510, -13219, -13219,
 -12916, -12916, -12600, -12600, -12273, -12273, -11935, -11935, -11585, -11585,
 -11224, -11224, -10853, -10853, -10471, -10471, -10079, -10079, -9679, -9679,
- -9268, -9268, -8850, -8850, -8423, -8423, -7988, -7988, -7545, -7545,
- -7095, -7095, -6639, -6639, -6176, -6176, -5708, -5708, -5234, -5234,
- -4756, -4756, -4272, -4272, -3785, -3785, -3294, -3294, -2801, -2801,
- -2304, -2304, -1805, -1805, -1305, -1305,  -803,  -803,  -301,  -301,
-   201,   201,   703,   703,  1205,  1205,  1705,  1705,  2204,  2204,
-  2701,  2701,  3196,  3196,  3687,  3687,  4175,  4175,  4659,  4659,
-  5139,  5139,  5614,  5614,  6083,  6083,  6547,  6547,  7005,  7005,
-  7456,  7456,  7900,  7900,  8336,  8336,  8765,  8765,  9185,  9185,
-  9597,  9597, 10000, 10000, 10393, 10393, 10777, 10777, 11150, 11150,
- 11513, 11513, 11866, 11866, 12207, 12207, 12536, 12536, 12854, 12854,
- 13159, 13159, 13452, 13452, 13733, 13733, 14001, 14001, 14255, 14255,
- 14496, 14496, 14723, 14723, 14937, 14937, 15136, 15136, 15322, 15322,
- 15492, 15492, 15649, 15649, 15790, 15790, 15917, 15917, 16028, 16028,
- 16125, 16125, 16206, 16206, 16272, 16272, 16323, 16323, 16359, 16359,
- 16379, 16379, 16383, 16383, 16372, 16372, 16346, 16346, 16305, 16305,
- 16248, 16248, 16175, 16175, 16088, 16088, 15985, 15985, 15868, 15868,
- 15735, 15735, 15588, 15588, 15426, 15426, 15249, 15249, 15058, 15058,
- 14853, 14853, 14634, 14634, 14401, 14401, 14155, 14155, 13895, 13895,
- 13622, 13622, 13337, 13337, 13038, 13038, 12728, 12728, 12406, 12406,
- 12072, 12072, 11726, 11726, 11370, 11370, 11002, 11002, 10625, 10625,
- 10237, 10237,  9840,  9840,  9434,  9434,  9018,  9018,  8594,  8594,
-  8162,  8162,  7723,  7723,  7276,  7276,  6822,  6822,  6362,  6362,
-  5896,  5896,  5424,  5424,  4948,  4948,  4466,  4466,  3980,  3980,
-  3491,  3491,  2998,  2998,  2503,  2503,  2005,  2005,  1505,  1505,
-  1004,  1004,   502,   502,     0,     0,  -502,  -502, -1004, -1004,
- -1505, -1505, -2005, -2005, -2503, -2503, -2998, -2998, -3491, -3491,
- -3980, -3980, -4466, -4466, -4948, -4948, -5424, -5424, -5896, -5896,
- -6362, -6362, -6822, -6822, -7276, -7276, -7723, -7723, -8162, -8162,
- -8594, -8594, -9018, -9018, -9434, -9434, -9840, -9840, -10237, -10237,
+-9268, -9268, -8850, -8850, -8423, -8423, -7988, -7988, -7545, -7545,
+-7095, -7095, -6639, -6639, -6176, -6176, -5708, -5708, -5234, -5234,
+-4756, -4756, -4272, -4272, -3785, -3785, -3294, -3294, -2801, -2801,
+-2304, -2304, -1805, -1805, -1305, -1305,  -803,  -803,  -301,  -301,
+201,   201,   703,   703,  1205,  1205,  1705,  1705,  2204,  2204,
+2701,  2701,  3196,  3196,  3687,  3687,  4175,  4175,  4659,  4659,
+5139,  5139,  5614,  5614,  6083,  6083,  6547,  6547,  7005,  7005,
+7456,  7456,  7900,  7900,  8336,  8336,  8765,  8765,  9185,  9185,
+9597,  9597, 10000, 10000, 10393, 10393, 10777, 10777, 11150, 11150,
+11513, 11513, 11866, 11866, 12207, 12207, 12536, 12536, 12854, 12854,
+13159, 13159, 13452, 13452, 13733, 13733, 14001, 14001, 14255, 14255,
+14496, 14496, 14723, 14723, 14937, 14937, 15136, 15136, 15322, 15322,
+15492, 15492, 15649, 15649, 15790, 15790, 15917, 15917, 16028, 16028,
+16125, 16125, 16206, 16206, 16272, 16272, 16323, 16323, 16359, 16359,
+16379, 16379, 16383, 16383, 16372, 16372, 16346, 16346, 16305, 16305,
+16248, 16248, 16175, 16175, 16088, 16088, 15985, 15985, 15868, 15868,
+15735, 15735, 15588, 15588, 15426, 15426, 15249, 15249, 15058, 15058,
+14853, 14853, 14634, 14634, 14401, 14401, 14155, 14155, 13895, 13895,
+13622, 13622, 13337, 13337, 13038, 13038, 12728, 12728, 12406, 12406,
+12072, 12072, 11726, 11726, 11370, 11370, 11002, 11002, 10625, 10625,
+10237, 10237,  9840,  9840,  9434,  9434,  9018,  9018,  8594,  8594,
+8162,  8162,  7723,  7723,  7276,  7276,  6822,  6822,  6362,  6362,
+5896,  5896,  5424,  5424,  4948,  4948,  4466,  4466,  3980,  3980,
+3491,  3491,  2998,  2998,  2503,  2503,  2005,  2005,  1505,  1505,
+1004,  1004,   502,   502,     0,     0,  -502,  -502, -1004, -1004,
+-1505, -1505, -2005, -2005, -2503, -2503, -2998, -2998, -3491, -3491,
+-3980, -3980, -4466, -4466, -4948, -4948, -5424, -5424, -5896, -5896,
+-6362, -6362, -6822, -6822, -7276, -7276, -7723, -7723, -8162, -8162,
+-8594, -8594, -9018, -9018, -9434, -9434, -9840, -9840, -10237, -10237,
 -10625, -10625, -11002, -11002, -11370, -11370, -11726, -11726, -12072, -12072,
 -12406, -12406, -12728, -12728, -13038, -13038, -13337, -13337, -13622, -13622,
 -13895, -13895, -14155, -14155, -14401, -14401, -14634, -14634, -14853, -14853,
@@ -280,35 +308,35 @@
 -14255, -14255, -14001, -14001, -13733, -13733, -13452, -13452, -13159, -13159,
 -12854, -12854, -12536, -12536, -12207, -12207, -11866, -11866, -11513, -11513,
 -11150, -11150, -10777, -10777, -10393, -10393, -10000, -10000, -9597, -9597,
- -9185, -9185, -8765, -8765, -8336, -8336, -7900, -7900, -7456, -7456,
- -7005, -7005, -6547, -6547, -6083, -6083, -5614, -5614, -5139, -5139,
- -4659, -4659, -4175, -4175, -3687, -3687, -3196, -3196, -2701, -2701,
- -2204, -2204, -1705, -1705, -1205, -1205,  -703,  -703,  -201,  -201,
-   301,   301,   803,   803,  1305,  1305,  1805,  1805,  2304,  2304,
-  2801,  2801,  3294,  3294,  3785,  3785,  4272,  4272,  4756,  4756,
-  5234,  5234,  5708,  5708,  6176,  6176,  6639,  6639,  7095,  7095,
-  7545,  7545,  7988,  7988,  8423,  8423,  8850,  8850,  9268,  9268,
-  9679,  9679, 10079, 10079, 10471, 10471, 10853, 10853, 11224, 11224,
- 11585, 11585, 11935, 11935, 12273, 12273, 12600, 12600, 12916, 12916,
- 13219, 13219, 13510, 13510, 13788, 13788, 14053, 14053, 14304, 14304,
- 14543, 14543, 14767, 14767, 14978, 14978, 15175, 15175, 15357, 15357,
- 15525, 15525, 15678, 15678, 15817, 15817, 15940, 15940, 16049, 16049,
- 16142, 16142, 16221, 16221, 16284, 16284, 16331, 16331, 16364, 16364,
- 16381, 16381, 16382, 16382, 16368, 16368, 16339, 16339, 16294, 16294,
- 16234, 16234, 16159, 16159, 16069, 16069, 15963, 15963, 15842, 15842,
- 15707, 15707, 15557, 15557, 15392, 15392, 15212, 15212, 15018, 15018,
- 14810, 14810, 14589, 14589, 14353, 14353, 14104, 14104, 13842, 13842,
- 13566, 13566, 13278, 13278, 12977, 12977, 12665, 12665, 12340, 12340,
- 12003, 12003, 11656, 11656, 11297, 11297, 10928, 10928, 10548, 10548,
- 10159, 10159,  9759,  9759,  9351,  9351,  8934,  8934,  8509,  8509,
-  8075,  8075,  7634,  7634,  7186,  7186,  6731,  6731,  6269,  6269,
-  5802,  5802,  5329,  5329,  4852,  4852,  4369,  4369,  3883,  3883,
-  3393,  3393,  2900,  2900,  2404,  2404,  1905,  1905,  1405,  1405,
-   904,   904,   402,   402,  -100,  -100,  -603,  -603, -1105, -1105,
- -1605, -1605, -2105, -2105, -2602, -2602, -3097, -3097, -3589, -3589,
- -4078, -4078, -4563, -4563, -5043, -5043, -5519, -5519, -5990, -5990,
- -6455, -6455, -6914, -6914, -7366, -7366, -7811, -7811, -8249, -8249,
- -8680, -8680, -9102, -9102, -9516, -9516, -9920, -9920, -10315, -10315,
+-9185, -9185, -8765, -8765, -8336, -8336, -7900, -7900, -7456, -7456,
+-7005, -7005, -6547, -6547, -6083, -6083, -5614, -5614, -5139, -5139,
+-4659, -4659, -4175, -4175, -3687, -3687, -3196, -3196, -2701, -2701,
+-2204, -2204, -1705, -1705, -1205, -1205,  -703,  -703,  -201,  -201,
+301,   301,   803,   803,  1305,  1305,  1805,  1805,  2304,  2304,
+2801,  2801,  3294,  3294,  3785,  3785,  4272,  4272,  4756,  4756,
+5234,  5234,  5708,  5708,  6176,  6176,  6639,  6639,  7095,  7095,
+7545,  7545,  7988,  7988,  8423,  8423,  8850,  8850,  9268,  9268,
+9679,  9679, 10079, 10079, 10471, 10471, 10853, 10853, 11224, 11224,
+11585, 11585, 11935, 11935, 12273, 12273, 12600, 12600, 12916, 12916,
+13219, 13219, 13510, 13510, 13788, 13788, 14053, 14053, 14304, 14304,
+14543, 14543, 14767, 14767, 14978, 14978, 15175, 15175, 15357, 15357,
+15525, 15525, 15678, 15678, 15817, 15817, 15940, 15940, 16049, 16049,
+16142, 16142, 16221, 16221, 16284, 16284, 16331, 16331, 16364, 16364,
+16381, 16381, 16382, 16382, 16368, 16368, 16339, 16339, 16294, 16294,
+16234, 16234, 16159, 16159, 16069, 16069, 15963, 15963, 15842, 15842,
+15707, 15707, 15557, 15557, 15392, 15392, 15212, 15212, 15018, 15018,
+14810, 14810, 14589, 14589, 14353, 14353, 14104, 14104, 13842, 13842,
+13566, 13566, 13278, 13278, 12977, 12977, 12665, 12665, 12340, 12340,
+12003, 12003, 11656, 11656, 11297, 11297, 10928, 10928, 10548, 10548,
+10159, 10159,  9759,  9759,  9351,  9351,  8934,  8934,  8509,  8509,
+8075,  8075,  7634,  7634,  7186,  7186,  6731,  6731,  6269,  6269,
+5802,  5802,  5329,  5329,  4852,  4852,  4369,  4369,  3883,  3883,
+3393,  3393,  2900,  2900,  2404,  2404,  1905,  1905,  1405,  1405,
+904,   904,   402,   402,  -100,  -100,  -603,  -603, -1105, -1105,
+-1605, -1605, -2105, -2105, -2602, -2602, -3097, -3097, -3589, -3589,
+-4078, -4078, -4563, -4563, -5043, -5043, -5519, -5519, -5990, -5990,
+-6455, -6455, -6914, -6914, -7366, -7366, -7811, -7811, -8249, -8249,
+-8680, -8680, -9102, -9102, -9516, -9516, -9920, -9920, -10315, -10315,
 -10701, -10701, -11077, -11077, -11442, -11442, -11796, -11796, -12139, -12139,
 -12471, -12471, -12791, -12791, -13099, -13099, -13395, -13395, -13678, -13678,
 -13948, -13948, -14205, -14205, -14449, -14449, -14679, -14679, -14895, -14895,
@@ -321,35 +349,35 @@
 -14205, -14205, -13948, -13948, -13678, -13678, -13395, -13395, -13099, -13099,
 -12791, -12791, -12471, -12471, -12139, -12139, -11796, -11796, -11442, -11442,
 -11077, -11077, -10701, -10701, -10315, -10315, -9920, -9920, -9516, -9516,
- -9102, -9102, -8680, -8680, -8249, -8249, -7811, -7811, -7366, -7366,
- -6914, -6914, -6455, -6455, -5990, -5990, -5519, -5519, -5043, -5043,
- -4563, -4563, -4078, -4078, -3589, -3589, -3097, -3097, -2602, -2602,
- -2105, -2105, -1605, -1605, -1105, -1105,  -603,  -603,  -100,  -100,
-   402,   402,   904,   904,  1405,  1405,  1905,  1905,  2404,  2404,
-  2900,  2900,  3393,  3393,  3883,  3883,  4369,  4369,  4852,  4852,
-  5329,  5329,  5802,  5802,  6269,  6269,  6731,  6731,  7186,  7186,
-  7634,  7634,  8075,  8075,  8509,  8509,  8934,  8934,  9351,  9351,
-  9759,  9759, 10159, 10159, 10548, 10548, 10928, 10928, 11297, 11297,
- 11656, 11656, 12003, 12003, 12340, 12340, 12665, 12665, 12977, 12977,
- 13278, 13278, 13566, 13566, 13842, 13842, 14104, 14104, 14353, 14353,
- 14589, 14589, 14810, 14810, 15018, 15018, 15212, 15212, 15392, 15392,
- 15557, 15557, 15707, 15707, 15842, 15842, 15963, 15963, 16069, 16069,
- 16159, 16159, 16234, 16234, 16294, 16294, 16339, 16339, 16368, 16368,
- 16382, 16382, 16381, 16381, 16364, 16364, 16331, 16331, 16284, 16284,
- 16221, 16221, 16142, 16142, 16049, 16049, 15940, 15940, 15817, 15817,
- 15678, 15678, 15525, 15525, 15357, 15357, 15175, 15175, 14978, 14978,
- 14767, 14767, 14543, 14543, 14304, 14304, 14053, 14053, 13788, 13788,
- 13510, 13510, 13219, 13219, 12916, 12916, 12600, 12600, 12273, 12273,
- 11935, 11935, 11585, 11585, 11224, 11224, 10853, 10853, 10471, 10471,
- 10079, 10079,  9679,  9679,  9268,  9268,  8850,  8850,  8423,  8423,
-  7988,  7988,  7545,  7545,  7095,  7095,  6639,  6639,  6176,  6176,
-  5708,  5708,  5234,  5234,  4756,  4756,  4272,  4272,  3785,  3785,
-  3294,  3294,  2801,  2801,  2304,  2304,  1805,  1805,  1305,  1305,
-   803,   803,   301,   301,  -201,  -201,  -703,  -703, -1205, -1205,
- -1705, -1705, -2204, -2204, -2701, -2701, -3196, -3196, -3687, -3687,
- -4175, -4175, -4659, -4659, -5139, -5139, -5614, -5614, -6083, -6083,
- -6547, -6547, -7005, -7005, -7456, -7456, -7900, -7900, -8336, -8336,
- -8765, -8765, -9185, -9185, -9597, -9597, -10000, -10000, -10393, -10393,
+-9102, -9102, -8680, -8680, -8249, -8249, -7811, -7811, -7366, -7366,
+-6914, -6914, -6455, -6455, -5990, -5990, -5519, -5519, -5043, -5043,
+-4563, -4563, -4078, -4078, -3589, -3589, -3097, -3097, -2602, -2602,
+-2105, -2105, -1605, -1605, -1105, -1105,  -603,  -603,  -100,  -100,
+402,   402,   904,   904,  1405,  1405,  1905,  1905,  2404,  2404,
+2900,  2900,  3393,  3393,  3883,  3883,  4369,  4369,  4852,  4852,
+5329,  5329,  5802,  5802,  6269,  6269,  6731,  6731,  7186,  7186,
+7634,  7634,  8075,  8075,  8509,  8509,  8934,  8934,  9351,  9351,
+9759,  9759, 10159, 10159, 10548, 10548, 10928, 10928, 11297, 11297,
+11656, 11656, 12003, 12003, 12340, 12340, 12665, 12665, 12977, 12977,
+13278, 13278, 13566, 13566, 13842, 13842, 14104, 14104, 14353, 14353,
+14589, 14589, 14810, 14810, 15018, 15018, 15212, 15212, 15392, 15392,
+15557, 15557, 15707, 15707, 15842, 15842, 15963, 15963, 16069, 16069,
+16159, 16159, 16234, 16234, 16294, 16294, 16339, 16339, 16368, 16368,
+16382, 16382, 16381, 16381, 16364, 16364, 16331, 16331, 16284, 16284,
+16221, 16221, 16142, 16142, 16049, 16049, 15940, 15940, 15817, 15817,
+15678, 15678, 15525, 15525, 15357, 15357, 15175, 15175, 14978, 14978,
+14767, 14767, 14543, 14543, 14304, 14304, 14053, 14053, 13788, 13788,
+13510, 13510, 13219, 13219, 12916, 12916, 12600, 12600, 12273, 12273,
+11935, 11935, 11585, 11585, 11224, 11224, 10853, 10853, 10471, 10471,
+10079, 10079,  9679,  9679,  9268,  9268,  8850,  8850,  8423,  8423,
+7988,  7988,  7545,  7545,  7095,  7095,  6639,  6639,  6176,  6176,
+5708,  5708,  5234,  5234,  4756,  4756,  4272,  4272,  3785,  3785,
+3294,  3294,  2801,  2801,  2304,  2304,  1805,  1805,  1305,  1305,
+803,   803,   301,   301,  -201,  -201,  -703,  -703, -1205, -1205,
+-1705, -1705, -2204, -2204, -2701, -2701, -3196, -3196, -3687, -3687,
+-4175, -4175, -4659, -4659, -5139, -5139, -5614, -5614, -6083, -6083,
+-6547, -6547, -7005, -7005, -7456, -7456, -7900, -7900, -8336, -8336,
+-8765, -8765, -9185, -9185, -9597, -9597, -10000, -10000, -10393, -10393,
 -10777, -10777, -11150, -11150, -11513, -11513, -11866, -11866, -12207, -12207,
 -12536, -12536, -12854, -12854, -13159, -13159, -13452, -13452, -13733, -13733,
 -14001, -14001, -14255, -14255, -14496, -14496, -14723, -14723, -14937, -14937,
@@ -362,10 +390,10 @@
 -14155, -14155, -13895, -13895, -13622, -13622, -13337, -13337, -13038, -13038,
 -12728, -12728, -12406, -12406, -12072, -12072, -11726, -11726, -11370, -11370,
 -11002, -11002, -10625, -10625, -10237, -10237, -9840, -9840, -9434, -9434,
- -9018, -9018, -8594, -8594, -8162, -8162, -7723, -7723, -7276, -7276,
- -6822, -6822, -6362, -6362, -5896, -5896, -5424, -5424, -4948, -4948,
- -4466, -4466, -3980, -3980, -3491, -3491, -2998, -2998, -2503, -2503,
- -2005, -2005, -1505, -1505, -1004, -1004,  -502,  -502
+-9018, -9018, -8594, -8594, -8162, -8162, -7723, -7723, -7276, -7276,
+-6822, -6822, -6362, -6362, -5896, -5896, -5424, -5424, -4948, -4948,
+-4466, -4466, -3980, -3980, -3491, -3491, -2998, -2998, -2503, -2503,
+-2005, -2005, -1505, -1505, -1004, -1004,  -502,  -502
 };
 /*****************************************************************************/
 void
@@ -375,10 +403,12 @@
 unsigned char *p2;
 struct data_buffer *paudio_buffer;
 
-JOT(8, "%i=audio_fill\n", audio_fill);
-
+if (NULL == peasycap) {
+	SAY("ERROR: peasycap is NULL\n");
+	return;
+}
+JOM(8, "%i=audio_fill\n", audio_fill);
 paudio_buffer = &peasycap->audio_buffer[audio_fill];
-
 p2 = (unsigned char *)(paudio_buffer->pgo);
 for (i1 = 0;  i1 < PAGE_SIZE;  i1 += 4, p2 += 4) {
 	*p2       = (unsigned char) (0x00FF & tones[i1/2]);
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 10bcb45..f62ba7a 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -783,7 +783,7 @@
 	/* Retrieve the net_device pointer from the pci_dev struct, as well
 	 * as the private adapter struct
 	 */
-	netdev = (struct net_device *) pci_get_drvdata(pdev);
+	netdev = pci_get_drvdata(pdev);
 	adapter = netdev_priv(netdev);
 
 	/* Perform device cleanup */
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index ef7fbf8..2babb03 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -89,7 +89,7 @@
 
 /* Use our own dbg macro */
 #define dbg_info(dev, format, arg...) do \
-    { if (debug) dev_info(dev , format , ## arg); } while (0)
+	{ if (debug) dev_info(dev , format , ## arg); } while (0)
 
 #define alphatrack_ocmd_info(dev, cmd, format, arg...)
 
@@ -769,7 +769,7 @@
 	}
 
 	dev->write_buffer =
-	    kmalloc(sizeof(struct alphatrack_ocmd) * true_size, GFP_KERNEL);
+	    kmalloc(true_size * sizeof(struct alphatrack_ocmd), GFP_KERNEL);
 
 	if (!dev->write_buffer) {
 		dev_err(&intf->dev, "Couldn't allocate write_buffer\n");
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index eed7e94..588afd5 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -132,7 +132,7 @@
 //---------------------------------------------------------------------------
 static inline u16 ft1000_read_fifo_len(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 
 	if (info->AsicID == ELECTRABUZZ_ID) {
 		return (ft1000_read_reg(dev, FT1000_REG_UFIFO_STAT) - 16);
@@ -155,7 +155,7 @@
 //---------------------------------------------------------------------------
 u16 ft1000_read_dpram(struct net_device * dev, int offset)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	unsigned long flags;
 	u16 data;
 
@@ -184,7 +184,7 @@
 static inline void ft1000_write_dpram(struct net_device *dev,
 					  int offset, u16 value)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	unsigned long flags;
 
 	// Provide mutual exclusive access while reading ASIC registers.
@@ -208,7 +208,7 @@
 //---------------------------------------------------------------------------
 u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	unsigned long flags;
 	u16 data;
 
@@ -242,7 +242,7 @@
 static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
 						 int offset, u16 value, int Index)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	unsigned long flags;
 
 	// Provide mutual exclusive access while reading ASIC registers.
@@ -270,7 +270,7 @@
 //---------------------------------------------------------------------------
 u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	unsigned long flags;
 	u32 data;
 
@@ -298,7 +298,7 @@
 //---------------------------------------------------------------------------
 void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	unsigned long flags;
 
 	// Provide mutual exclusive access while reading ASIC registers.
@@ -320,7 +320,7 @@
 //---------------------------------------------------------------------------
 static void ft1000_enable_interrupts(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 tempword;
 
 	DEBUG(1, "ft1000_hw:ft1000_enable_interrupts()\n");
@@ -345,7 +345,7 @@
 //---------------------------------------------------------------------------
 static void ft1000_disable_interrupts(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 tempword;
 
 	DEBUG(1, "ft1000_hw: ft1000_disable_interrupts()\n");
@@ -370,7 +370,7 @@
 //---------------------------------------------------------------------------
 static void ft1000_reset_asic(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 tempword;
 
 	DEBUG(1, "ft1000_hw:ft1000_reset_asic called\n");
@@ -414,7 +414,7 @@
 //---------------------------------------------------------------------------
 static int ft1000_reset_card(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 tempword;
 	int i;
 	unsigned long flags;
@@ -618,7 +618,7 @@
 	FT1000_INFO *info;
 	USHORT tempword;
 
-	info = (FT1000_INFO *) netdev_priv(dev);
+	info = netdev_priv(dev);
 
 	if (info->CardReady == 1) {
 		// Perform dsp heartbeat check
@@ -831,7 +831,7 @@
 //---------------------------------------------------------------------------
 void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	int i;
 	u16 tempword;
 	unsigned long flags;
@@ -916,7 +916,7 @@
 //---------------------------------------------------------------------------
 BOOLEAN ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16 *pnxtph)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 size;
 	u16 *ppseudohdr;
 	int i;
@@ -1009,7 +1009,7 @@
 //---------------------------------------------------------------------------
 void ft1000_proc_drvmsg(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 msgtype;
 	u16 tempword;
 	PMEDIAMSG pmediamsg;
@@ -1292,7 +1292,7 @@
 //---------------------------------------------------------------------------
 int ft1000_parse_dpram_msg(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 doorbell;
 	u16 portid;
 	u16 nxtph;
@@ -1449,7 +1449,7 @@
 //---------------------------------------------------------------------------
 static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 i;
 	u32 templong;
 	u16 tempword;
@@ -1596,7 +1596,7 @@
 int ft1000_copy_up_pkt(struct net_device *dev)
 {
 	u16 tempword;
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 len;
 	struct sk_buff *skb;
 	u16 i;
@@ -1783,7 +1783,7 @@
 //---------------------------------------------------------------------------
 int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	union {
 		PSEUDO_HDR blk;
 		u16 buff[sizeof(PSEUDO_HDR) >> 1];
@@ -1943,7 +1943,7 @@
 
 static struct net_device_stats *ft1000_stats(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	return (&info->stats);
 }
 
@@ -1967,7 +1967,7 @@
 
 static int ft1000_close(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 
 	DEBUG(0, "ft1000_hw: ft1000_close()\n");
 
@@ -1989,7 +1989,7 @@
 
 static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u8 *pdata;
 
 	DEBUG(1, "ft1000_hw: ft1000_start_xmit()\n");
@@ -2026,7 +2026,7 @@
 static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = (struct net_device *)dev_id;
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	u16 tempword;
 	u16 inttype;
 	int cnt;
@@ -2091,7 +2091,7 @@
 
 void stop_ft1000_card(struct net_device *dev)
 {
-	FT1000_INFO *info = (FT1000_INFO *) netdev_priv(dev);
+	FT1000_INFO *info = netdev_priv(dev);
 	PPROV_RECORD ptr;
 //	int cnt;
 
@@ -2127,7 +2127,7 @@
 				   struct ethtool_drvinfo *info)
 {
 	FT1000_INFO *ft_info;
-	ft_info = (FT1000_INFO *) netdev_priv(dev);
+	ft_info = netdev_priv(dev);
 
 	snprintf(info->driver, 32, "ft1000");
 	snprintf(info->bus_info, ETHTOOL_BUSINFO_LEN, "PCMCIA 0x%lx",
@@ -2139,7 +2139,7 @@
 static u32 ft1000_get_link(struct net_device *dev)
 {
 	FT1000_INFO *info;
-	info = (FT1000_INFO *) netdev_priv(dev);
+	info = netdev_priv(dev);
 	return info->mediastate;
 }
 
@@ -2185,7 +2185,7 @@
 	}
 
 	SET_NETDEV_DEV(dev, fdev);
-	info = (FT1000_INFO *) netdev_priv(dev);
+	info = netdev_priv(dev);
 
 	memset(info, 0, sizeof(FT1000_INFO));
 
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index b45de9b..935608e 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -65,7 +65,7 @@
 	time_t delta;
 
 	dev = (struct net_device *)data;
-	info = (FT1000_INFO *) netdev_priv(dev);
+	info = netdev_priv(dev);
 
 	if (off > 0) {
 		*eof = 1;
@@ -174,7 +174,7 @@
 	struct net_device *dev = ptr;
 	FT1000_INFO *info;
 
-	info = (FT1000_INFO *) netdev_priv(dev);
+	info = netdev_priv(dev);
 
 	switch (event) {
 	case NETDEV_CHANGENAME:
@@ -195,7 +195,7 @@
 {
 	FT1000_INFO *info;
 
-	info = (FT1000_INFO *) netdev_priv(dev);
+	info = netdev_priv(dev);
 
 	info->proc_ft1000 = proc_mkdir(FT1000_PROC, init_net.proc_net);
 	create_proc_read_entry(dev->name, 0644, info->proc_ft1000,
@@ -208,7 +208,7 @@
 {
 	FT1000_INFO *info;
 
-	info = (FT1000_INFO *) netdev_priv(dev);
+	info = netdev_priv(dev);
 
 	remove_proc_entry(dev->name, info->proc_ft1000);
 	remove_proc_entry(FT1000_PROC, init_net.proc_net);
diff --git a/drivers/staging/ft1000/ft1000-usb/Makefile b/drivers/staging/ft1000/ft1000-usb/Makefile
index dd87ecd..f0f5240 100644
--- a/drivers/staging/ft1000/ft1000-usb/Makefile
+++ b/drivers/staging/ft1000/ft1000-usb/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_FT1000_USB) += ft1000.o
 
-ft1000-y := ft1000_chdev.o ft1000_download.o ft1000_hw.o ft1000_proc.o ft1000_usb.o
+ft1000-y := ft1000_debug.o ft1000_download.o ft1000_hw.o ft1000_proc.o ft1000_usb.o
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
deleted file mode 100644
index 20d5098..0000000
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ /dev/null
@@ -1,935 +0,0 @@
-//---------------------------------------------------------------------------
-// FT1000 driver for Flarion Flash OFDM NIC Device
-//
-// Copyright (C) 2006 Flarion Technologies, All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation; either version 2 of the License, or (at your option) any
-// later version. This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-// or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-// more details. You should have received a copy of the GNU General Public
-// License along with this program; if not, write to the
-// Free Software Foundation, Inc., 59 Temple Place -
-// Suite 330, Boston, MA 02111-1307, USA.
-//---------------------------------------------------------------------------
-//
-// File:         ft1000_chdev.c
-//
-// Description:  Custom character device dispatch routines.
-//
-// History:
-// 8/29/02    Whc                Ported to Linux.
-// 6/05/06    Whc                Porting to Linux 2.6.9
-//
-//---------------------------------------------------------------------------
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/poll.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-
-#include <linux/fs.h>
-#include <linux/kmod.h>
-#include <linux/ioctl.h>
-#include <linux/unistd.h>
-
-#include "ft1000_usb.h"
-//#include "ft1000_ioctl.h"
-
-static int ft1000_flarion_cnt = 0;
-
-//need to looking usage of ft1000Handle
-
-static int ft1000_ChOpen (struct inode *Inode, struct file *File);
-static unsigned int ft1000_ChPoll(struct file *file, poll_table *wait);
-static long ft1000_ChIoctl(struct file *File, unsigned int Command,
-                           unsigned long Argument);
-static int ft1000_ChRelease (struct inode *Inode, struct file *File);
-
-// Global pointer to device object
-static struct ft1000_device *pdevobj[MAX_NUM_CARDS + 2];
-//static devfs_handle_t ft1000Handle[MAX_NUM_CARDS];
-
-// List to free receive command buffer pool
-struct list_head freercvpool;
-
-// lock to arbitrate free buffer list for receive command data
-spinlock_t free_buff_lock;
-
-int numofmsgbuf = 0;
-
-// Global variable to indicate that all provisioning data is sent to DSP
-//BOOLEAN fProvComplete;
-
-//
-// Table of entry-point routines for char device
-//
-static struct file_operations ft1000fops =
-{
-	.unlocked_ioctl	= ft1000_ChIoctl,
-	.poll		= ft1000_ChPoll,
-	.open		= ft1000_ChOpen,
-	.release	= ft1000_ChRelease,
-	.llseek		= no_llseek,
-};
-
-
-
-
-//---------------------------------------------------------------------------
-// Function:    exec_mknod
-//
-// Parameters:
-//
-// Returns:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int exec_mknod (void *pdata)
-{
-	struct ft1000_info *info;
-    char mjnum[4];
-    char minornum[4];
-    char temp[32];
-    int retcode;
-//    int i;					//aelias [-] reason : unused variable
-    char *envp[] = { "HOME=/", "PATH=/usr/bin:/bin", NULL };
-    char *argv[]={"-m 666",temp,"c",mjnum,minornum,NULL};
-
-    info = pdata;
-    DEBUG("ft1000_chdev:exec_mknod is called with major number = %d\n", info->DeviceMajor);
-    sprintf(temp, "%s%s", "/dev/", info->DeviceName) ;
-    sprintf(mjnum, "%d", info->DeviceMajor);
-    sprintf(minornum, "%d", info->CardNumber);
-
-    //char *argv[]={"mknod","-m 666",temp,"c",mjnum,minornum,NULL};
-//    char *argv[]={"-m 666",temp,"c",mjnum,minornum,NULL};
-
-    //for (i=0; i<7;i++)
-    //    DEBUG("argv[%d]=%s\n", i, argv[i]);
-
-
-    retcode = call_usermodehelper ("/bin/mknod", argv, envp, 1);
-    if (retcode) {
-        DEBUG("ft1000_chdev:exec_mknod failed to make the node: retcode = %d\n", retcode);
-    }
-
-
-
-    return retcode;
-
-}
-
-//---------------------------------------------------------------------------
-// Function:    rm_mknod
-//
-// Description: This module removes the FT1000 device file
-//
-//---------------------------------------------------------------------------
-static int rm_mknod (void *pdata)
-{
-
-	struct ft1000_info *info;
-    //char *argv[4]={"rm", "-f", "/dev/FT1000", NULL};
-    int retcode;
-    char temp[32];
-    char *argv[]={"rm", "-f", temp, NULL};
-
-	info = (struct ft1000_info *)pdata;
-    DEBUG("ft1000_chdev:rm_mknod is called for device %s\n", info->DeviceName);
-    sprintf(temp, "%s%s", "/dev/", info->DeviceName) ;
-
-//    char *argv[]={"rm", "-f", temp, NULL};
-
-    retcode = call_usermodehelper ("/bin/rm", argv, NULL, 1);
-    if (retcode) {
-        DEBUG("ft1000_chdev:rm_mknod failed to remove the node: retcode = %d\n", retcode);
-    }
-    else
-        DEBUG("ft1000_chdev:rm_mknod done!\n");
-
-
-    return retcode;
-
-}
-//---------------------------------------------------------------------------
-// Function:    ft1000_get_buffer
-//
-// Parameters:
-//
-// Returns:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist)
-{
-    unsigned long flags;
-	struct dpram_blk *ptr;
-
-    spin_lock_irqsave(&free_buff_lock, flags);
-    // Check if buffer is available
-    if ( list_empty(bufflist) ) {
-        DEBUG("ft1000_get_buffer:  No more buffer - %d\n", numofmsgbuf);
-        ptr = NULL;
-    }
-    else {
-        numofmsgbuf--;
-	ptr = list_entry(bufflist->next, struct dpram_blk, list);
-        list_del(&ptr->list);
-        //DEBUG("ft1000_get_buffer: number of free msg buffers = %d\n", numofmsgbuf);
-    }
-    spin_unlock_irqrestore(&free_buff_lock, flags);
-
-    return ptr;
-}
-
-
-
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_free_buffer
-//
-// Parameters:
-//
-// Returns:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
-{
-    unsigned long flags;
-
-    spin_lock_irqsave(&free_buff_lock, flags);
-    // Put memory back to list
-    list_add_tail(&pdpram_blk->list, plist);
-    numofmsgbuf++;
-    //DEBUG("ft1000_free_buffer: number of free msg buffers = %d\n", numofmsgbuf);
-    spin_unlock_irqrestore(&free_buff_lock, flags);
-}
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_CreateDevice
-//
-// Parameters:  dev - pointer to adapter object
-//
-// Returns:     0 if successful
-//
-// Description: Creates a private char device.
-//
-// Notes:       Only called by init_module().
-//
-//---------------------------------------------------------------------------
-int ft1000_CreateDevice(struct ft1000_device *dev)
-{
-	struct ft1000_info *info = netdev_priv(dev->net);
-    int result;
-    int i;
-    pid_t pid;
-
-    // make a new device name
-    sprintf(info->DeviceName, "%s%d", "FT100", info->CardNumber);
-
-    // Delete any existing FT1000 node
-    pid = kernel_thread (rm_mknod,(void *)info, 0);
-    msleep(1000);
-
-    DEBUG("ft1000_CreateDevice: number of instance = %d\n", ft1000_flarion_cnt);
-    DEBUG("DeviceCreated = %x\n", info->DeviceCreated);
-
-    //save the device info to global array
-    pdevobj[info->CardNumber] = dev;
-
-    DEBUG("ft1000_CreateDevice: ******SAVED pdevobj[%d]=%p\n", info->CardNumber, pdevobj[info->CardNumber]);	//aelias [+] reason:up
-
-    if (info->DeviceCreated)
-    {
-	DEBUG("ft1000_CreateDevice: \"%s\" already registered\n", info->DeviceName);
-	return -EIO;
-    }
-
-
-    // register the device
-    DEBUG("ft1000_CreateDevice: \"%s\" device registration\n", info->DeviceName);
-    info->DeviceMajor = 0;
-
-    result = register_chrdev(info->DeviceMajor, info->DeviceName, &ft1000fops);
-    if (result < 0)
-    {
-	DEBUG("ft1000_CreateDevice: unable to get major %d\n", info->DeviceMajor);
-	return result;
-    }
-
-    DEBUG("ft1000_CreateDevice: registered char device \"%s\"\n", info->DeviceName);
-
-    // save a dynamic device major number
-    if (info->DeviceMajor == 0)
-    {
-	info->DeviceMajor = result;
-	DEBUG("ft1000_PcdCreateDevice: device major = %d\n", info->DeviceMajor);
-    }
-
-    // Create a thread to call user mode app to mknod
-    pid = kernel_thread (exec_mknod, (void *)info, 0);
-
-    // initialize application information
-
-//    if (ft1000_flarion_cnt == 0) {
-//
-//    	  DEBUG("Initialize free_buff_lock and freercvpool\n");
-//        spin_lock_init(&free_buff_lock);
-//
-//        // initialize a list of buffers to be use for queuing up receive command data
-//        INIT_LIST_HEAD (&freercvpool);
-//
-//        // create list of free buffers
-//        for (i=0; i<NUM_OF_FREE_BUFFERS; i++) {
-//            // Get memory for DPRAM_DATA link list
-//            pdpram_blk = kmalloc ( sizeof(struct dpram_blk), GFP_KERNEL );
-//            // Get a block of memory to store command data
-//            pdpram_blk->pbuffer = kmalloc ( MAX_CMD_SQSIZE, GFP_KERNEL );
-//            // link provisioning data
-//            list_add_tail (&pdpram_blk->list, &freercvpool);
-//        }
-//        numofmsgbuf = NUM_OF_FREE_BUFFERS;
-//    }
-
-
-    // initialize application information
-    info->appcnt = 0;
-    for (i=0; i<MAX_NUM_APP; i++) {
-        info->app_info[i].nTxMsg = 0;
-        info->app_info[i].nRxMsg = 0;
-        info->app_info[i].nTxMsgReject = 0;
-        info->app_info[i].nRxMsgMiss = 0;
-        info->app_info[i].fileobject = NULL;
-        info->app_info[i].app_id = i+1;
-        info->app_info[i].DspBCMsgFlag = 0;
-        info->app_info[i].NumOfMsg = 0;
-        init_waitqueue_head(&info->app_info[i].wait_dpram_msg);
-        INIT_LIST_HEAD (&info->app_info[i].app_sqlist);
-    }
-
-
-
-
-//    ft1000Handle[info->CardNumber] = devfs_register(NULL, info->DeviceName, DEVFS_FL_AUTO_DEVNUM, 0, 0,
-//                                  S_IFCHR | S_IRUGO | S_IWUGO, &ft1000fops, NULL);
-
-
-    info->DeviceCreated = TRUE;
-    ft1000_flarion_cnt++;
-
-    return result;
-}
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_DestroyDeviceDEBUG
-//
-// Parameters:  dev - pointer to adapter object
-//
-// Description: Destroys a private char device.
-//
-// Notes:       Only called by cleanup_module().
-//
-//---------------------------------------------------------------------------
-void ft1000_DestroyDevice(struct net_device *dev)
-{
-	struct ft1000_info *info = netdev_priv(dev);
-    int result = 0;
-    pid_t pid;
-		int i;
-	struct dpram_blk *pdpram_blk;
-	struct dpram_blk *ptr;
-
-    DEBUG("ft1000_chdev:ft1000_DestroyDevice called\n");
-
-
-
-    if (info->DeviceCreated)
-	{
-        ft1000_flarion_cnt--;
-		unregister_chrdev(info->DeviceMajor, info->DeviceName);
-		DEBUG("ft1000_DestroyDevice: unregistered device \"%s\", result = %d\n",
-					   info->DeviceName, result);
-
-       pid = kernel_thread (rm_mknod, (void *)info, 0);
-
-        // Make sure we free any memory reserve for slow Queue
-        for (i=0; i<MAX_NUM_APP; i++) {
-            while (list_empty(&info->app_info[i].app_sqlist) == 0) {
-                pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
-                list_del(&pdpram_blk->list);
-                ft1000_free_buffer(pdpram_blk, &freercvpool);
-
-            }
-            wake_up_interruptible(&info->app_info[i].wait_dpram_msg);
-        }
-
-        // Remove buffer allocated for receive command data
-        if (ft1000_flarion_cnt == 0) {
-            while (list_empty(&freercvpool) == 0) {
-		ptr = list_entry(freercvpool.next, struct dpram_blk, list);
-                list_del(&ptr->list);
-                kfree(ptr->pbuffer);
-                kfree(ptr);
-            }
-        }
-
-//        devfs_unregister(ft1000Handle[info->CardNumber]);
-
-		info->DeviceCreated = FALSE;
-
-		pdevobj[info->CardNumber] = NULL;
-	}
-
-
-}
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_ChOpen
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int ft1000_ChOpen (struct inode *Inode, struct file *File)
-{
-	struct ft1000_info *info;
-    int i,num;
-
-    DEBUG("ft1000_ChOpen called\n");
-    num = (MINOR(Inode->i_rdev) & 0xf);
-    DEBUG("ft1000_ChOpen: minor number=%d\n", num);
-
-    for (i=0; i<5; i++)
-        DEBUG("pdevobj[%d]=%p\n", i, pdevobj[i]); //aelias [+] reason: down
-
-    if ( pdevobj[num] != NULL )
-        //info = (struct ft1000_info *)(pdevobj[num]->net->priv);
-		info = (struct ft1000_info *)netdev_priv(pdevobj[num]->net);
-    else
-    {
-        DEBUG("ft1000_ChOpen: can not find device object %d\n", num);
-        return -1;
-    }
-
-    DEBUG("f_owner = %p number of application = %d\n", (&File->f_owner), info->appcnt );
-
-    // Check if maximum number of application exceeded
-    if (info->appcnt > MAX_NUM_APP) {
-        DEBUG("Maximum number of application exceeded\n");
-        return -EACCES;
-    }
-
-    // Search for available application info block
-    for (i=0; i<MAX_NUM_APP; i++) {
-        if ( (info->app_info[i].fileobject == NULL) ) {
-            break;
-        }
-    }
-
-    // Fail due to lack of application info block
-    if (i == MAX_NUM_APP) {
-        DEBUG("Could not find an application info block\n");
-        return -EACCES;
-    }
-
-    info->appcnt++;
-    info->app_info[i].fileobject = &File->f_owner;
-    info->app_info[i].nTxMsg = 0;
-    info->app_info[i].nRxMsg = 0;
-    info->app_info[i].nTxMsgReject = 0;
-    info->app_info[i].nRxMsgMiss = 0;
-
-    File->private_data = pdevobj[num]->net;
-
-	nonseekable_open(Inode, File);
-    return 0;
-}
-
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_ChPoll
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-
-static unsigned int ft1000_ChPoll(struct file *file, poll_table *wait)
-{
-    struct net_device *dev = file->private_data;
-	struct ft1000_info *info;
-    int i;
-
-    //DEBUG("ft1000_ChPoll called\n");
-    if (ft1000_flarion_cnt == 0) {
-        DEBUG("FT1000:ft1000_ChPoll called when ft1000_flarion_cnt is zero\n");
-        return (-EBADF);
-    }
-
-	info = (struct ft1000_info *) netdev_priv(dev);
-
-    // Search for matching file object
-    for (i=0; i<MAX_NUM_APP; i++) {
-        if ( info->app_info[i].fileobject == &file->f_owner) {
-            //DEBUG("FT1000:ft1000_ChIoctl: Message is for AppId = %d\n", info->app_info[i].app_id);
-            break;
-        }
-    }
-
-    // Could not find application info block
-    if (i == MAX_NUM_APP) {
-        DEBUG("FT1000:ft1000_ChIoctl:Could not find application info block\n");
-        return ( -EACCES );
-    }
-
-    if (list_empty(&info->app_info[i].app_sqlist) == 0) {
-        DEBUG("FT1000:ft1000_ChPoll:Message detected in slow queue\n");
-        return(POLLIN | POLLRDNORM | POLLPRI);
-    }
-
-    poll_wait (file, &info->app_info[i].wait_dpram_msg, wait);
-    //DEBUG("FT1000:ft1000_ChPoll:Polling for data from DSP\n");
-
-    return (0);
-}
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_ChIoctl
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static long ft1000_ChIoctl (struct file *File, unsigned int Command,
-                           unsigned long Argument)
-{
-    void __user *argp = (void __user *)Argument;
-    struct net_device *dev;
-	struct ft1000_info *info;
-    struct ft1000_device *ft1000dev;
-    int result=0;
-    int cmd;
-    int i;
-    u16 tempword;
-    unsigned long flags;
-    struct timeval tv;
-    IOCTL_GET_VER get_ver_data;
-    IOCTL_GET_DSP_STAT get_stat_data;
-    u8 ConnectionMsg[] = {0x00,0x44,0x10,0x20,0x80,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x93,0x64,
-                          0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x0a,
-                          0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-                          0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-                          0x00,0x00,0x02,0x37,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x01,0x00,0x01,0x7f,0x00,
-                          0x00,0x01,0x00,0x00};
-
-    unsigned short ledStat=0;
-    unsigned short conStat=0;
-
-    //DEBUG("ft1000_ChIoctl called\n");
-
-    if (ft1000_flarion_cnt == 0) {
-        DEBUG("FT1000:ft1000_ChIoctl called when ft1000_flarion_cnt is zero\n");
-        return (-EBADF);
-    }
-
-    //DEBUG("FT1000:ft1000_ChIoctl:Command = 0x%x Argument = 0x%8x\n", Command, (u32)Argument);
-
-    dev = File->private_data;
-	info = (struct ft1000_info *) netdev_priv(dev);
-    ft1000dev = info->pFt1000Dev;
-    cmd = _IOC_NR(Command);
-    //DEBUG("FT1000:ft1000_ChIoctl:cmd = 0x%x\n", cmd);
-
-    // process the command
-    switch (cmd) {
-    case IOCTL_REGISTER_CMD:
-            DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_REGISTER called\n");
-            result = get_user(tempword, (__u16 __user*)argp);
-            if (result) {
-                DEBUG("result = %d failed to get_user\n", result);
-                break;
-            }
-            if (tempword == DSPBCMSGID) {
-                // Search for matching file object
-                for (i=0; i<MAX_NUM_APP; i++) {
-                    if ( info->app_info[i].fileobject == &File->f_owner) {
-                        info->app_info[i].DspBCMsgFlag = 1;
-                        DEBUG("FT1000:ft1000_ChIoctl:Registered for broadcast messages\n");
-                        break;
-                    }
-                }
-            }
-            break;
-
-    case IOCTL_GET_VER_CMD:
-        DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_VER called\n");
-
-        get_ver_data.drv_ver = FT1000_DRV_VER;
-
-        if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data)) ) {
-            DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
-            result = -EFAULT;
-            break;
-        }
-
-        DEBUG("FT1000:ft1000_ChIoctl:driver version = 0x%x\n",(unsigned int)get_ver_data.drv_ver);
-
-        break;
-    case IOCTL_CONNECT:
-        // Connect Message
-        DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_CONNECT\n");
-        ConnectionMsg[79] = 0xfc;
-			   CardSendCommand(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
-
-        break;
-    case IOCTL_DISCONNECT:
-        // Disconnect Message
-        DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_DISCONNECT\n");
-        ConnectionMsg[79] = 0xfd;
-			   CardSendCommand(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
-        break;
-    case IOCTL_GET_DSP_STAT_CMD:
-        //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_DSP_STAT called\n");
-	memset(&get_stat_data, 0, sizeof(get_stat_data));
-        memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
-        memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
-        memcpy(get_stat_data.Sku, info->Sku, SKUSZ);
-        memcpy(get_stat_data.eui64, info->eui64, EUISZ);
-
-            if (info->ProgConStat != 0xFF) {
-                ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (PUCHAR)&ledStat, FT1000_MAG_DSP_LED_INDX);
-                get_stat_data.LedStat = ntohs(ledStat);
-                DEBUG("FT1000:ft1000_ChIoctl: LedStat = 0x%x\n", get_stat_data.LedStat);
-                ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (PUCHAR)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
-                get_stat_data.ConStat = ntohs(conStat);
-                DEBUG("FT1000:ft1000_ChIoctl: ConStat = 0x%x\n", get_stat_data.ConStat);
-            }
-            else {
-                get_stat_data.ConStat = 0x0f;
-            }
-
-
-        get_stat_data.nTxPkts = info->stats.tx_packets;
-        get_stat_data.nRxPkts = info->stats.rx_packets;
-        get_stat_data.nTxBytes = info->stats.tx_bytes;
-        get_stat_data.nRxBytes = info->stats.rx_bytes;
-        do_gettimeofday ( &tv );
-        get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
-        DEBUG("Connection Time = %d\n", (int)get_stat_data.ConTm);
-        if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data)) ) {
-            DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
-            result = -EFAULT;
-            break;
-        }
-        DEBUG("ft1000_chioctl: GET_DSP_STAT succeed\n");
-        break;
-    case IOCTL_SET_DPRAM_CMD:
-        {
-            IOCTL_DPRAM_BLK *dpram_data;
-            //IOCTL_DPRAM_COMMAND dpram_command;
-            USHORT qtype;
-            USHORT msgsz;
-		struct pseudo_hdr *ppseudo_hdr;
-            PUSHORT pmsg;
-            USHORT total_len;
-            USHORT app_index;
-            u16 status;
-
-            //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_SET_DPRAM called\n");
-
-
-            if (ft1000_flarion_cnt == 0) {
-                return (-EBADF);
-            }
-
-            if (info->DrvMsgPend) {
-                return (-ENOTTY);
-            }
-
-            if ( (info->DspAsicReset) || (info->fProvComplete == 0) ) {
-                return (-EACCES);
-            }
-
-            info->fAppMsgPend = 1;
-
-            if (info->CardReady) {
-
-               //DEBUG("FT1000:ft1000_ChIoctl: try to SET_DPRAM \n");
-
-                // Get the length field to see how many bytes to copy
-                result = get_user(msgsz, (__u16 __user *)argp);
-                msgsz = ntohs (msgsz);
-                //DEBUG("FT1000:ft1000_ChIoctl: length of message = %d\n", msgsz);
-
-                if (msgsz > MAX_CMD_SQSIZE) {
-                    DEBUG("FT1000:ft1000_ChIoctl: bad message length = %d\n", msgsz);
-                    result = -EINVAL;
-                    break;
-                }
-
-		result = -ENOMEM;
-		dpram_data = kmalloc(msgsz + 2, GFP_KERNEL);
-		if (!dpram_data)
-			break;
-
-                //if ( copy_from_user(&(dpram_command.dpram_blk), (PIOCTL_DPRAM_BLK)Argument, msgsz+2) ) {
-                if ( copy_from_user(&dpram_data, argp, msgsz+2) ) {
-                    DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
-                    result = -EFAULT;
-                }
-                else {
-#if 0
-                    // whc - for debugging only
-                    ptr = (char *)&dpram_data;
-                    for (i=0; i<msgsz; i++) {
-                        DEBUG(1,"FT1000:ft1000_ChIoctl: data %d = 0x%x\n", i, *ptr++);
-                    }
-#endif
-                    // Check if this message came from a registered application
-                    for (i=0; i<MAX_NUM_APP; i++) {
-                        if ( info->app_info[i].fileobject == &File->f_owner) {
-                            break;
-                        }
-                    }
-                    if (i==MAX_NUM_APP) {
-                        DEBUG("FT1000:No matching application fileobject\n");
-                        result = -EINVAL;
-			kfree(dpram_data);
-                        break;
-                    }
-                    app_index = i;
-
-                    // Check message qtype type which is the lower byte within qos_class
-                    //qtype = ntohs(dpram_command.dpram_blk.pseudohdr.qos_class) & 0xff;
-                    qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
-                    //DEBUG("FT1000_ft1000_ChIoctl: qtype = %d\n", qtype);
-                    if (qtype) {
-                    }
-                    else {
-                        // Put message into Slow Queue
-                        // Only put a message into the DPRAM if msg doorbell is available
-                        status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
-                        //DEBUG("FT1000_ft1000_ChIoctl: READ REGISTER tempword=%x\n", tempword);
-                        if (tempword & FT1000_DB_DPRAM_TX) {
-                            // Suspend for 2ms and try again due to DSP doorbell busy
-                            mdelay(2);
-                            status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
-                            if (tempword & FT1000_DB_DPRAM_TX) {
-                                // Suspend for 1ms and try again due to DSP doorbell busy
-                                mdelay(1);
-                                status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
-                                if (tempword & FT1000_DB_DPRAM_TX) {
-                                    status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
-                                    if (tempword & FT1000_DB_DPRAM_TX) {
-                                        // Suspend for 3ms and try again due to DSP doorbell busy
-                                        mdelay(3);
-                                        status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
-                                        if (tempword & FT1000_DB_DPRAM_TX) {
-                                            DEBUG("FT1000:ft1000_ChIoctl:Doorbell not available\n");
-                                            result = -ENOTTY;
-						kfree(dpram_data);
-                                            break;
-                                        }
-                                    }
-                                }
-                            }
-                        }
-
-                        //DEBUG("FT1000_ft1000_ChIoctl: finished reading register\n");
-
-                        // Make sure we are within the limits of the slow queue memory limitation
-                        if ( (msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ) ) {
-                            // Need to put sequence number plus new checksum for message
-                            //pmsg = (PUSHORT)&dpram_command.dpram_blk.pseudohdr;
-                            pmsg = (PUSHORT)&dpram_data->pseudohdr;
-				ppseudo_hdr = (struct pseudo_hdr *)pmsg;
-                            total_len = msgsz+2;
-                            if (total_len & 0x1) {
-                                total_len++;
-                            }
-
-                            // Insert slow queue sequence number
-                            ppseudo_hdr->seq_num = info->squeseqnum++;
-                            ppseudo_hdr->portsrc = info->app_info[app_index].app_id;
-                            // Calculate new checksum
-                            ppseudo_hdr->checksum = *pmsg++;
-                            //DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum);
-                            for (i=1; i<7; i++) {
-                                ppseudo_hdr->checksum ^= *pmsg++;
-                                //DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum);
-                            }
-                            pmsg++;
-				ppseudo_hdr = (struct pseudo_hdr *)pmsg;
-#if 0
-                            ptr = dpram_data;
-                            DEBUG("FT1000:ft1000_ChIoctl: Command Send\n");
-                            for (i=0; i<total_len; i++) {
-                                DEBUG("FT1000:ft1000_ChIoctl: data %d = 0x%x\n", i, *ptr++);
-                            }
-#endif
-                            //dpram_command.extra = 0;
-
-                            //CardSendCommand(ft1000dev,(unsigned char*)&dpram_command,total_len+2);
-                            CardSendCommand(ft1000dev,(unsigned short*)dpram_data,total_len+2);
-
-
-                            info->app_info[app_index].nTxMsg++;
-                        }
-                        else {
-                            result = -EINVAL;
-                        }
-                    }
-                }
-            }
-            else {
-                DEBUG("FT1000:ft1000_ChIoctl: Card not ready take messages\n");
-                result = -EACCES;
-            }
-	    kfree(dpram_data);
-
-        }
-        break;
-    case IOCTL_GET_DPRAM_CMD:
-        {
-		struct dpram_blk *pdpram_blk;
-            IOCTL_DPRAM_BLK __user *pioctl_dpram;
-            int msglen;
-
-            //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_DPRAM called\n");
-
-            if (ft1000_flarion_cnt == 0) {
-                return (-EBADF);
-            }
-
-            // Search for matching file object
-            for (i=0; i<MAX_NUM_APP; i++) {
-                if ( info->app_info[i].fileobject == &File->f_owner) {
-                    //DEBUG("FT1000:ft1000_ChIoctl: Message is for AppId = %d\n", info->app_info[i].app_id);
-                    break;
-                }
-            }
-
-            // Could not find application info block
-            if (i == MAX_NUM_APP) {
-                DEBUG("FT1000:ft1000_ChIoctl:Could not find application info block\n");
-                result = -EBADF;
-                break;
-            }
-
-            result = 0;
-            pioctl_dpram = argp;
-            if (list_empty(&info->app_info[i].app_sqlist) == 0) {
-                //DEBUG("FT1000:ft1000_ChIoctl:Message detected in slow queue\n");
-                spin_lock_irqsave(&free_buff_lock, flags);
-                pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
-                list_del(&pdpram_blk->list);
-                info->app_info[i].NumOfMsg--;
-                //DEBUG("FT1000:ft1000_ChIoctl:NumOfMsg for app %d = %d\n", i, info->app_info[i].NumOfMsg);
-                spin_unlock_irqrestore(&free_buff_lock, flags);
-                msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
-                result = get_user(msglen, &pioctl_dpram->total_len);
-		if (result)
-			break;
-		msglen = htons(msglen);
-                //DEBUG("FT1000:ft1000_ChIoctl:msg length = %x\n", msglen);
-                if(copy_to_user (&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen))
-				{
-					DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
-	             	result = -EFAULT;
-	             	break;
-				}
-
-                ft1000_free_buffer(pdpram_blk, &freercvpool);
-                result = msglen;
-            }
-            //DEBUG("FT1000:ft1000_ChIoctl: IOCTL_FT1000_GET_DPRAM no message\n");
-        }
-        break;
-
-    default:
-        DEBUG("FT1000:ft1000_ChIoctl:unknown command: 0x%x\n", Command);
-        result = -ENOTTY;
-        break;
-    }
-    info->fAppMsgPend = 0;
-    return result;
-}
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_ChRelease
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int ft1000_ChRelease (struct inode *Inode, struct file *File)
-{
-	struct ft1000_info *info;
-    struct net_device *dev;
-    int i;
-	struct dpram_blk *pdpram_blk;
-
-    DEBUG("ft1000_ChRelease called\n");
-
-    dev = File->private_data;
-	info = (struct ft1000_info *) netdev_priv(dev);
-
-    if (ft1000_flarion_cnt == 0) {
-        info->appcnt--;
-        return (-EBADF);
-    }
-
-    // Search for matching file object
-    for (i=0; i<MAX_NUM_APP; i++) {
-        if ( info->app_info[i].fileobject == &File->f_owner) {
-            //DEBUG("FT1000:ft1000_ChIoctl: Message is for AppId = %d\n", info->app_info[i].app_id);
-            break;
-        }
-    }
-
-    if (i==MAX_NUM_APP)
-	    return 0;
-
-    while (list_empty(&info->app_info[i].app_sqlist) == 0) {
-        DEBUG("Remove and free memory queue up on slow queue\n");
-        pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
-        list_del(&pdpram_blk->list);
-        ft1000_free_buffer(pdpram_blk, &freercvpool);
-    }
-
-    // initialize application information
-    info->appcnt--;
-    DEBUG("ft1000_chdev:%s:appcnt = %d\n", __FUNCTION__, info->appcnt);
-    info->app_info[i].fileobject = NULL;
-
-    return 0;
-}
-
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
new file mode 100644
index 0000000..149ba59
--- /dev/null
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -0,0 +1,782 @@
+//---------------------------------------------------------------------------
+// FT1000 driver for Flarion Flash OFDM NIC Device
+//
+// Copyright (C) 2006 Flarion Technologies, All rights reserved.
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 2 of the License, or (at your option) any
+// later version. This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+// or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+// more details. You should have received a copy of the GNU General Public
+// License along with this program; if not, write to the
+// Free Software Foundation, Inc., 59 Temple Place -
+// Suite 330, Boston, MA 02111-1307, USA.
+//---------------------------------------------------------------------------
+//
+// File:         ft1000_chdev.c
+//
+// Description:  Custom character device dispatch routines.
+//
+// History:
+// 8/29/02    Whc                Ported to Linux.
+// 6/05/06    Whc                Porting to Linux 2.6.9
+//
+//---------------------------------------------------------------------------
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/poll.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+
+#include <linux/ioctl.h>
+#include <linux/debugfs.h>
+#include "ft1000_usb.h"
+
+static int ft1000_flarion_cnt = 0;
+
+static int ft1000_open (struct inode *inode, struct file *file);
+static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait);
+static long ft1000_ioctl(struct file *file, unsigned int command,
+                           unsigned long argument);
+static int ft1000_release (struct inode *inode, struct file *file);
+
+// List to free receive command buffer pool
+struct list_head freercvpool;
+
+// lock to arbitrate free buffer list for receive command data
+spinlock_t free_buff_lock;
+
+int numofmsgbuf = 0;
+
+//
+// Table of entry-point routines for char device
+//
+static struct file_operations ft1000fops =
+{
+	.unlocked_ioctl	= ft1000_ioctl,
+	.poll		= ft1000_poll_dev,
+	.open		= ft1000_open,
+	.release	= ft1000_release,
+	.llseek		= no_llseek,
+};
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_get_buffer
+//
+// Parameters:
+//
+// Returns:
+//
+// Description:
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist)
+{
+    unsigned long flags;
+	struct dpram_blk *ptr;
+
+    spin_lock_irqsave(&free_buff_lock, flags);
+    // Check if buffer is available
+    if ( list_empty(bufflist) ) {
+        DEBUG("ft1000_get_buffer:  No more buffer - %d\n", numofmsgbuf);
+        ptr = NULL;
+    }
+    else {
+        numofmsgbuf--;
+	ptr = list_entry(bufflist->next, struct dpram_blk, list);
+        list_del(&ptr->list);
+        //DEBUG("ft1000_get_buffer: number of free msg buffers = %d\n", numofmsgbuf);
+    }
+    spin_unlock_irqrestore(&free_buff_lock, flags);
+
+    return ptr;
+}
+
+
+
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_free_buffer
+//
+// Parameters:
+//
+// Returns:
+//
+// Description:
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&free_buff_lock, flags);
+    // Put memory back to list
+    list_add_tail(&pdpram_blk->list, plist);
+    numofmsgbuf++;
+    //DEBUG("ft1000_free_buffer: number of free msg buffers = %d\n", numofmsgbuf);
+    spin_unlock_irqrestore(&free_buff_lock, flags);
+}
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_CreateDevice
+//
+// Parameters:  dev - pointer to adapter object
+//
+// Returns:     0 if successful
+//
+// Description: Creates a private char device.
+//
+// Notes:       Only called by init_module().
+//
+//---------------------------------------------------------------------------
+int ft1000_create_dev(struct ft1000_device *dev)
+{
+	struct ft1000_info *info = netdev_priv(dev->net);
+    int result;
+    int i;
+	struct dentry *dir, *file;
+	struct ft1000_debug_dirs *tmp;
+
+    // make a new device name
+    sprintf(info->DeviceName, "%s%d", "FT1000_", info->CardNumber);
+
+    DEBUG("%s: number of instance = %d\n", __func__, ft1000_flarion_cnt);
+    DEBUG("DeviceCreated = %x\n", info->DeviceCreated);
+
+    if (info->DeviceCreated)
+    {
+	DEBUG("%s: \"%s\" already registered\n", __func__, info->DeviceName);
+	return -EIO;
+    }
+
+
+    // register the device
+    DEBUG("%s: \"%s\" debugfs device registration\n", __func__, info->DeviceName);
+
+	tmp = kmalloc(sizeof(struct ft1000_debug_dirs), GFP_KERNEL);
+	if (tmp == NULL) {
+		result = -1;
+		goto fail;
+	}
+
+	dir = debugfs_create_dir(info->DeviceName, 0);
+	if (IS_ERR(dir)) {
+		result = PTR_ERR(dir);
+		goto debug_dir_fail;
+	}
+
+	file = debugfs_create_file("device", S_IRUGO | S_IWUSR, dir,
+					dev, &ft1000fops);
+	if (IS_ERR(file)) {
+		result = PTR_ERR(file);
+		goto debug_file_fail;
+	}
+
+	tmp->dent = dir;
+	tmp->file = file;
+	tmp->int_number = info->CardNumber;
+	list_add(&(tmp->list), &(info->nodes.list));
+
+    DEBUG("%s: registered debugfs directory \"%s\"\n", __func__, info->DeviceName);
+
+    // initialize application information
+    info->appcnt = 0;
+    for (i=0; i<MAX_NUM_APP; i++) {
+        info->app_info[i].nTxMsg = 0;
+        info->app_info[i].nRxMsg = 0;
+        info->app_info[i].nTxMsgReject = 0;
+        info->app_info[i].nRxMsgMiss = 0;
+        info->app_info[i].fileobject = NULL;
+        info->app_info[i].app_id = i+1;
+        info->app_info[i].DspBCMsgFlag = 0;
+        info->app_info[i].NumOfMsg = 0;
+        init_waitqueue_head(&info->app_info[i].wait_dpram_msg);
+        INIT_LIST_HEAD (&info->app_info[i].app_sqlist);
+    }
+
+    info->DeviceCreated = TRUE;
+    ft1000_flarion_cnt++;
+
+	return 0;
+
+debug_file_fail:
+	debugfs_remove(dir);
+debug_dir_fail:
+	kfree(tmp);
+fail:
+	return result;
+}
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_DestroyDeviceDEBUG
+//
+// Parameters:  dev - pointer to adapter object
+//
+// Description: Destroys a private char device.
+//
+// Notes:       Only called by cleanup_module().
+//
+//---------------------------------------------------------------------------
+void ft1000_destroy_dev(struct net_device *dev)
+{
+	struct ft1000_info *info = netdev_priv(dev);
+		int i;
+	struct dpram_blk *pdpram_blk;
+	struct dpram_blk *ptr;
+	struct list_head *pos, *q;
+	struct ft1000_debug_dirs *dir;
+
+    DEBUG("%s called\n", __func__);
+
+
+
+    if (info->DeviceCreated)
+	{
+        ft1000_flarion_cnt--;
+		list_for_each_safe(pos, q, &info->nodes.list) {
+			dir = list_entry(pos, struct ft1000_debug_dirs, list);
+			if (dir->int_number == info->CardNumber) {
+				debugfs_remove(dir->file);
+				debugfs_remove(dir->dent);
+				list_del(pos);
+				kfree(dir);
+			}
+		}
+		DEBUG("%s: unregistered device \"%s\"\n", __func__,
+					   info->DeviceName);
+
+        // Make sure we free any memory reserve for slow Queue
+        for (i=0; i<MAX_NUM_APP; i++) {
+            while (list_empty(&info->app_info[i].app_sqlist) == 0) {
+                pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
+                list_del(&pdpram_blk->list);
+                ft1000_free_buffer(pdpram_blk, &freercvpool);
+
+            }
+            wake_up_interruptible(&info->app_info[i].wait_dpram_msg);
+        }
+
+        // Remove buffer allocated for receive command data
+        if (ft1000_flarion_cnt == 0) {
+            while (list_empty(&freercvpool) == 0) {
+		ptr = list_entry(freercvpool.next, struct dpram_blk, list);
+                list_del(&ptr->list);
+                kfree(ptr->pbuffer);
+                kfree(ptr);
+            }
+        }
+		info->DeviceCreated = FALSE;
+	}
+
+
+}
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_open
+//
+// Parameters:
+//
+// Description:
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+static int ft1000_open (struct inode *inode, struct file *file)
+{
+	struct ft1000_info *info;
+	struct ft1000_device *dev = (struct ft1000_device *)inode->i_private;
+    int i,num;
+
+    DEBUG("%s called\n", __func__);
+    num = (MINOR(inode->i_rdev) & 0xf);
+    DEBUG("ft1000_open: minor number=%d\n", num);
+
+	info = file->private_data = netdev_priv(dev->net);
+
+    DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), info->appcnt );
+
+    // Check if maximum number of application exceeded
+    if (info->appcnt > MAX_NUM_APP) {
+        DEBUG("Maximum number of application exceeded\n");
+        return -EACCES;
+    }
+
+    // Search for available application info block
+    for (i=0; i<MAX_NUM_APP; i++) {
+        if ( (info->app_info[i].fileobject == NULL) ) {
+            break;
+        }
+    }
+
+    // Fail due to lack of application info block
+    if (i == MAX_NUM_APP) {
+        DEBUG("Could not find an application info block\n");
+        return -EACCES;
+    }
+
+    info->appcnt++;
+    info->app_info[i].fileobject = &file->f_owner;
+    info->app_info[i].nTxMsg = 0;
+    info->app_info[i].nRxMsg = 0;
+    info->app_info[i].nTxMsgReject = 0;
+    info->app_info[i].nRxMsgMiss = 0;
+
+	nonseekable_open(inode, file);
+    return 0;
+}
+
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_poll_dev
+//
+// Parameters:
+//
+// Description:
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+
+static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
+{
+    struct net_device *dev = file->private_data;
+	struct ft1000_info *info;
+    int i;
+
+    //DEBUG("ft1000_poll_dev called\n");
+    if (ft1000_flarion_cnt == 0) {
+        DEBUG("FT1000:ft1000_poll_dev called when ft1000_flarion_cnt is zero\n");
+        return (-EBADF);
+    }
+
+	info = netdev_priv(dev);
+
+    // Search for matching file object
+    for (i=0; i<MAX_NUM_APP; i++) {
+        if ( info->app_info[i].fileobject == &file->f_owner) {
+            //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+            break;
+        }
+    }
+
+    // Could not find application info block
+    if (i == MAX_NUM_APP) {
+        DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
+        return ( -EACCES );
+    }
+
+    if (list_empty(&info->app_info[i].app_sqlist) == 0) {
+        DEBUG("FT1000:ft1000_poll_dev:Message detected in slow queue\n");
+        return(POLLIN | POLLRDNORM | POLLPRI);
+    }
+
+    poll_wait (file, &info->app_info[i].wait_dpram_msg, wait);
+    //DEBUG("FT1000:ft1000_poll_dev:Polling for data from DSP\n");
+
+    return (0);
+}
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_ioctl
+//
+// Parameters:
+//
+// Description:
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+static long ft1000_ioctl (struct file *file, unsigned int command,
+                           unsigned long argument)
+{
+    void __user *argp = (void __user *)argument;
+	struct ft1000_info *info;
+    struct ft1000_device *ft1000dev;
+    int result=0;
+    int cmd;
+    int i;
+    u16 tempword;
+    unsigned long flags;
+    struct timeval tv;
+    IOCTL_GET_VER get_ver_data;
+    IOCTL_GET_DSP_STAT get_stat_data;
+    u8 ConnectionMsg[] = {0x00,0x44,0x10,0x20,0x80,0x00,0x00,0x00,0x00,0x00,0x03,0x00,0x00,0x00,0x93,0x64,
+                          0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x00,0x00,0x00,0x0a,
+                          0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+                          0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+                          0x00,0x00,0x02,0x37,0x00,0x00,0x00,0x08,0x00,0x00,0x00,0x01,0x00,0x01,0x7f,0x00,
+                          0x00,0x01,0x00,0x00};
+
+    unsigned short ledStat=0;
+    unsigned short conStat=0;
+
+    //DEBUG("ft1000_ioctl called\n");
+
+    if (ft1000_flarion_cnt == 0) {
+        DEBUG("FT1000:ft1000_ioctl called when ft1000_flarion_cnt is zero\n");
+        return (-EBADF);
+    }
+
+    //DEBUG("FT1000:ft1000_ioctl:command = 0x%x argument = 0x%8x\n", command, (u32)argument);
+
+	info = file->private_data;
+	ft1000dev = info->pFt1000Dev;
+    cmd = _IOC_NR(command);
+    //DEBUG("FT1000:ft1000_ioctl:cmd = 0x%x\n", cmd);
+
+    // process the command
+    switch (cmd) {
+    case IOCTL_REGISTER_CMD:
+            DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_REGISTER called\n");
+            result = get_user(tempword, (__u16 __user*)argp);
+            if (result) {
+                DEBUG("result = %d failed to get_user\n", result);
+                break;
+            }
+            if (tempword == DSPBCMSGID) {
+                // Search for matching file object
+                for (i=0; i<MAX_NUM_APP; i++) {
+                    if ( info->app_info[i].fileobject == &file->f_owner) {
+                        info->app_info[i].DspBCMsgFlag = 1;
+                        DEBUG("FT1000:ft1000_ioctl:Registered for broadcast messages\n");
+                        break;
+                    }
+                }
+            }
+            break;
+
+    case IOCTL_GET_VER_CMD:
+        DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_VER called\n");
+
+        get_ver_data.drv_ver = FT1000_DRV_VER;
+
+        if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data)) ) {
+            DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
+            result = -EFAULT;
+            break;
+        }
+
+        DEBUG("FT1000:ft1000_ioctl:driver version = 0x%x\n",(unsigned int)get_ver_data.drv_ver);
+
+        break;
+    case IOCTL_CONNECT:
+        // Connect Message
+        DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_CONNECT\n");
+        ConnectionMsg[79] = 0xfc;
+			   CardSendCommand(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
+
+        break;
+    case IOCTL_DISCONNECT:
+        // Disconnect Message
+        DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_DISCONNECT\n");
+        ConnectionMsg[79] = 0xfd;
+			   CardSendCommand(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
+        break;
+    case IOCTL_GET_DSP_STAT_CMD:
+        //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DSP_STAT called\n");
+	memset(&get_stat_data, 0, sizeof(get_stat_data));
+        memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
+        memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
+        memcpy(get_stat_data.Sku, info->Sku, SKUSZ);
+        memcpy(get_stat_data.eui64, info->eui64, EUISZ);
+
+            if (info->ProgConStat != 0xFF) {
+                ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
+                get_stat_data.LedStat = ntohs(ledStat);
+                DEBUG("FT1000:ft1000_ioctl: LedStat = 0x%x\n", get_stat_data.LedStat);
+                ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+                get_stat_data.ConStat = ntohs(conStat);
+                DEBUG("FT1000:ft1000_ioctl: ConStat = 0x%x\n", get_stat_data.ConStat);
+            }
+            else {
+                get_stat_data.ConStat = 0x0f;
+            }
+
+
+        get_stat_data.nTxPkts = info->stats.tx_packets;
+        get_stat_data.nRxPkts = info->stats.rx_packets;
+        get_stat_data.nTxBytes = info->stats.tx_bytes;
+        get_stat_data.nRxBytes = info->stats.rx_bytes;
+        do_gettimeofday ( &tv );
+        get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
+        DEBUG("Connection Time = %d\n", (int)get_stat_data.ConTm);
+        if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data)) ) {
+            DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
+            result = -EFAULT;
+            break;
+        }
+        DEBUG("ft1000_chioctl: GET_DSP_STAT succeed\n");
+        break;
+    case IOCTL_SET_DPRAM_CMD:
+        {
+            IOCTL_DPRAM_BLK *dpram_data = NULL;
+            //IOCTL_DPRAM_COMMAND dpram_command;
+            u16 qtype;
+            u16 msgsz;
+		struct pseudo_hdr *ppseudo_hdr;
+            u16 *pmsg;
+            u16 total_len;
+            u16 app_index;
+            u16 status;
+
+            //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_SET_DPRAM called\n");
+
+
+            if (ft1000_flarion_cnt == 0) {
+                return (-EBADF);
+            }
+
+            if (info->DrvMsgPend) {
+                return (-ENOTTY);
+            }
+
+            if ( (info->DspAsicReset) || (info->fProvComplete == 0) ) {
+                return (-EACCES);
+            }
+
+            info->fAppMsgPend = 1;
+
+            if (info->CardReady) {
+
+               //DEBUG("FT1000:ft1000_ioctl: try to SET_DPRAM \n");
+
+                // Get the length field to see how many bytes to copy
+                result = get_user(msgsz, (__u16 __user *)argp);
+                msgsz = ntohs (msgsz);
+                //DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz);
+
+                if (msgsz > MAX_CMD_SQSIZE) {
+                    DEBUG("FT1000:ft1000_ioctl: bad message length = %d\n", msgsz);
+                    result = -EINVAL;
+                    break;
+                }
+
+		result = -ENOMEM;
+		dpram_data = kmalloc(msgsz + 2, GFP_KERNEL);
+		if (!dpram_data)
+			break;
+
+                if ( copy_from_user(dpram_data, argp, msgsz+2) ) {
+                    DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
+                    result = -EFAULT;
+                }
+                else {
+                    // Check if this message came from a registered application
+                    for (i=0; i<MAX_NUM_APP; i++) {
+                        if ( info->app_info[i].fileobject == &file->f_owner) {
+                            break;
+                        }
+                    }
+                    if (i==MAX_NUM_APP) {
+                        DEBUG("FT1000:No matching application fileobject\n");
+                        result = -EINVAL;
+			kfree(dpram_data);
+                        break;
+                    }
+                    app_index = i;
+
+                    // Check message qtype type which is the lower byte within qos_class
+                    qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
+                    //DEBUG("FT1000_ft1000_ioctl: qtype = %d\n", qtype);
+                    if (qtype) {
+                    }
+                    else {
+                        // Put message into Slow Queue
+                        // Only put a message into the DPRAM if msg doorbell is available
+                        status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+                        //DEBUG("FT1000_ft1000_ioctl: READ REGISTER tempword=%x\n", tempword);
+                        if (tempword & FT1000_DB_DPRAM_TX) {
+                            // Suspend for 2ms and try again due to DSP doorbell busy
+                            mdelay(2);
+                            status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+                            if (tempword & FT1000_DB_DPRAM_TX) {
+                                // Suspend for 1ms and try again due to DSP doorbell busy
+                                mdelay(1);
+                                status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+                                if (tempword & FT1000_DB_DPRAM_TX) {
+                                    status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+                                    if (tempword & FT1000_DB_DPRAM_TX) {
+                                        // Suspend for 3ms and try again due to DSP doorbell busy
+                                        mdelay(3);
+                                        status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
+                                        if (tempword & FT1000_DB_DPRAM_TX) {
+                                            DEBUG("FT1000:ft1000_ioctl:Doorbell not available\n");
+                                            result = -ENOTTY;
+						kfree(dpram_data);
+                                            break;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+
+                        //DEBUG("FT1000_ft1000_ioctl: finished reading register\n");
+
+                        // Make sure we are within the limits of the slow queue memory limitation
+                        if ( (msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ) ) {
+                            // Need to put sequence number plus new checksum for message
+                            pmsg = (u16 *)&dpram_data->pseudohdr;
+				ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+                            total_len = msgsz+2;
+                            if (total_len & 0x1) {
+                                total_len++;
+                            }
+
+                            // Insert slow queue sequence number
+                            ppseudo_hdr->seq_num = info->squeseqnum++;
+                            ppseudo_hdr->portsrc = info->app_info[app_index].app_id;
+                            // Calculate new checksum
+                            ppseudo_hdr->checksum = *pmsg++;
+                            //DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum);
+                            for (i=1; i<7; i++) {
+                                ppseudo_hdr->checksum ^= *pmsg++;
+                                //DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum);
+                            }
+                            pmsg++;
+				ppseudo_hdr = (struct pseudo_hdr *)pmsg;
+                           CardSendCommand(ft1000dev,(unsigned short*)dpram_data,total_len+2);
+
+
+                            info->app_info[app_index].nTxMsg++;
+                        }
+                        else {
+                            result = -EINVAL;
+                        }
+                    }
+                }
+            }
+            else {
+                DEBUG("FT1000:ft1000_ioctl: Card not ready take messages\n");
+                result = -EACCES;
+            }
+	    kfree(dpram_data);
+
+        }
+        break;
+    case IOCTL_GET_DPRAM_CMD:
+        {
+		struct dpram_blk *pdpram_blk;
+            IOCTL_DPRAM_BLK __user *pioctl_dpram;
+            int msglen;
+
+            //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM called\n");
+
+            if (ft1000_flarion_cnt == 0) {
+                return (-EBADF);
+            }
+
+            // Search for matching file object
+            for (i=0; i<MAX_NUM_APP; i++) {
+                if ( info->app_info[i].fileobject == &file->f_owner) {
+                    //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+                    break;
+                }
+            }
+
+            // Could not find application info block
+            if (i == MAX_NUM_APP) {
+                DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
+                result = -EBADF;
+                break;
+            }
+
+            result = 0;
+            pioctl_dpram = argp;
+            if (list_empty(&info->app_info[i].app_sqlist) == 0) {
+                //DEBUG("FT1000:ft1000_ioctl:Message detected in slow queue\n");
+                spin_lock_irqsave(&free_buff_lock, flags);
+                pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
+                list_del(&pdpram_blk->list);
+                info->app_info[i].NumOfMsg--;
+                //DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, info->app_info[i].NumOfMsg);
+                spin_unlock_irqrestore(&free_buff_lock, flags);
+                msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
+                result = get_user(msglen, &pioctl_dpram->total_len);
+		if (result)
+			break;
+		msglen = htons(msglen);
+                //DEBUG("FT1000:ft1000_ioctl:msg length = %x\n", msglen);
+                if(copy_to_user (&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen))
+				{
+					DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
+	             	result = -EFAULT;
+	             	break;
+				}
+
+                ft1000_free_buffer(pdpram_blk, &freercvpool);
+                result = msglen;
+            }
+            //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM no message\n");
+        }
+        break;
+
+    default:
+        DEBUG("FT1000:ft1000_ioctl:unknown command: 0x%x\n", command);
+        result = -ENOTTY;
+        break;
+    }
+    info->fAppMsgPend = 0;
+    return result;
+}
+
+//---------------------------------------------------------------------------
+// Function:    ft1000_release
+//
+// Parameters:
+//
+// Description:
+//
+// Notes:
+//
+//---------------------------------------------------------------------------
+static int ft1000_release (struct inode *inode, struct file *file)
+{
+	struct ft1000_info *info;
+    struct net_device *dev;
+    int i;
+	struct dpram_blk *pdpram_blk;
+
+    DEBUG("ft1000_release called\n");
+
+    dev = file->private_data;
+	info = netdev_priv(dev);
+
+    if (ft1000_flarion_cnt == 0) {
+        info->appcnt--;
+        return (-EBADF);
+    }
+
+    // Search for matching file object
+    for (i=0; i<MAX_NUM_APP; i++) {
+        if ( info->app_info[i].fileobject == &file->f_owner) {
+            //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", info->app_info[i].app_id);
+            break;
+        }
+    }
+
+    if (i==MAX_NUM_APP)
+	    return 0;
+
+    while (list_empty(&info->app_info[i].app_sqlist) == 0) {
+        DEBUG("Remove and free memory queue up on slow queue\n");
+        pdpram_blk = list_entry(info->app_info[i].app_sqlist.next, struct dpram_blk, list);
+        list_del(&pdpram_blk->list);
+        ft1000_free_buffer(pdpram_blk, &freercvpool);
+    }
+
+    // initialize application information
+    info->appcnt--;
+    DEBUG("ft1000_chdev:%s:appcnt = %d\n", __FUNCTION__, info->appcnt);
+    info->app_info[i].fileobject = NULL;
+
+    return 0;
+}
+
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 4dd456f..17546d8 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -123,11 +123,11 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-static ULONG check_usb_db (struct ft1000_device *ft1000dev)
+static u32 check_usb_db (struct ft1000_device *ft1000dev)
 {
    int               loopcnt;
-   USHORT            temp;
-   ULONG             status;
+   u16            temp;
+   u32             status;
 
    loopcnt = 0;
    while (loopcnt < 10)
@@ -190,7 +190,7 @@
 // Function:    get_handshake
 //
 // Parameters:  struct ft1000_device  - device structure
-//              USHORT expected_value - the handshake value expected
+//              u16 expected_value - the handshake value expected
 //
 // Returns:     handshakevalue - success
 //              HANDSHAKE_TIMEOUT_VALUE - failure
@@ -200,11 +200,11 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-static USHORT get_handshake(struct ft1000_device *ft1000dev, USHORT expected_value)
+static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value)
 {
-   USHORT            handshake;
+   u16            handshake;
    int               loopcnt;
-   ULONG             status=0;
+   u32             status=0;
 	struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
 
    loopcnt = 0;
@@ -228,7 +228,7 @@
                    status = ft1000_write_register (ft1000dev,  FT1000_DB_DNLD_RX, FT1000_REG_DOORBELL);
                }
 
-                status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (PUCHAR)&handshake, 1);
+                status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
                 //DEBUG("get_handshake: handshake is %x\n", tempx);
                 handshake = ntohs(handshake);
                 //DEBUG("get_handshake: after swap, handshake is %x\n", handshake);
@@ -259,7 +259,7 @@
 // Function:    put_handshake
 //
 // Parameters:  struct ft1000_device  - device structure
-//              USHORT handshake_value - handshake to be written
+//              u16 handshake_value - handshake to be written
 //
 // Returns:     none
 //
@@ -269,30 +269,30 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-static void put_handshake(struct ft1000_device *ft1000dev,USHORT handshake_value)
+static void put_handshake(struct ft1000_device *ft1000dev,u16 handshake_value)
 {
-    ULONG tempx;
-    USHORT tempword;
-    ULONG status;
+    u32 tempx;
+    u16 tempword;
+    u32 status;
 
 
 
-        tempx = (ULONG)handshake_value;
+        tempx = (u32)handshake_value;
         tempx = ntohl(tempx);
 
-        tempword = (USHORT)(tempx & 0xffff);
+        tempword = (u16)(tempx & 0xffff);
         status = ft1000_write_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 0);
-        tempword = (USHORT)(tempx >> 16);
+        tempword = (u16)(tempx >> 16);
         status = ft1000_write_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 1);
         status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL);
 }
 
-static USHORT get_handshake_usb(struct ft1000_device *ft1000dev, USHORT expected_value)
+static u16 get_handshake_usb(struct ft1000_device *ft1000dev, u16 expected_value)
 {
-   USHORT            handshake;
+   u16            handshake;
    int               loopcnt;
-   USHORT            temp;
-   ULONG             status=0;
+   u16            temp;
+   u32             status=0;
 
 	struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
    loopcnt = 0;
@@ -300,10 +300,10 @@
    while (loopcnt < 100)
    {
        if (pft1000info->usbboot == 2) {
-           status = ft1000_read_dpram32 (ft1000dev, 0, (PUCHAR)&(pft1000info->tempbuf[0]), 64);
+           status = ft1000_read_dpram32 (ft1000dev, 0, (u8 *)&(pft1000info->tempbuf[0]), 64);
            for (temp=0; temp<16; temp++)
                DEBUG("tempbuf %d = 0x%x\n", temp, pft1000info->tempbuf[temp]);
-           status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (PUCHAR)&handshake, 1);
+           status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
            DEBUG("handshake from read_dpram16 = 0x%x\n", handshake);
            if (pft1000info->dspalive == pft1000info->tempbuf[6])
                handshake = 0;
@@ -313,7 +313,7 @@
            }
        }
        else {
-           status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (PUCHAR)&handshake, 1);
+           status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
        }
        loopcnt++;
        msleep(10);
@@ -327,7 +327,7 @@
    return HANDSHAKE_TIMEOUT_VALUE;
 }
 
-static void put_handshake_usb(struct ft1000_device *ft1000dev,USHORT handshake_value)
+static void put_handshake_usb(struct ft1000_device *ft1000dev,u16 handshake_value)
 {
    int i;
 
@@ -346,44 +346,44 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-static USHORT get_request_type(struct ft1000_device *ft1000dev)
+static u16 get_request_type(struct ft1000_device *ft1000dev)
 {
-   USHORT   request_type;
-   ULONG    status;
-   USHORT   tempword;
-   ULONG    tempx;
+   u16   request_type;
+   u32    status;
+   u16   tempword;
+   u32    tempx;
 	struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
 
    if ( pft1000info->bootmode == 1)
    {
-       status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempx);
+       status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
        tempx = ntohl(tempx);
    }
    else
    {
        tempx = 0;
 
-       status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempword, 1);
+       status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
        tempx |= (tempword << 16);
        tempx = ntohl(tempx);
    }
-   request_type = (USHORT)tempx;
+   request_type = (u16)tempx;
 
    //DEBUG("get_request_type: request_type is %x\n", request_type);
    return request_type;
 
 }
 
-static USHORT get_request_type_usb(struct ft1000_device *ft1000dev)
+static u16 get_request_type_usb(struct ft1000_device *ft1000dev)
 {
-   USHORT   request_type;
-   ULONG    status;
-   USHORT   tempword;
-   ULONG    tempx;
+   u16   request_type;
+   u32    status;
+   u16   tempword;
+   u32    tempx;
 	struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
    if ( pft1000info->bootmode == 1)
    {
-       status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempx);
+       status = fix_ft1000_read_dpram32 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
        tempx = ntohl(tempx);
    }
    else
@@ -394,12 +394,12 @@
        }
        else {
           tempx = 0;
-          status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (PUCHAR)&tempword, 1);
+          status = ft1000_read_dpram16 (ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
        }
        tempx |= (tempword << 16);
        tempx = ntohl(tempx);
    }
-   request_type = (USHORT)tempx;
+   request_type = (u16)tempx;
 
    //DEBUG("get_request_type: request_type is %x\n", request_type);
    return request_type;
@@ -420,22 +420,22 @@
 //---------------------------------------------------------------------------
 static long get_request_value(struct ft1000_device *ft1000dev)
 {
-   ULONG     value;
-   USHORT   tempword;
-   ULONG    status;
+   u32     value;
+   u16   tempword;
+   u32    status;
 	struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
 
 
        if ( pft1000info->bootmode == 1)
        {
-	   status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&value);
+	   status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
 	   value = ntohl(value);
        }
        else
        {
-	   status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempword, 0);
+	   status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
 	   value = tempword;
-           status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempword, 1);
+           status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
 	   value |= (tempword << 16);
 	   value = ntohl(value);
        }
@@ -449,9 +449,9 @@
 #if 0
 static long get_request_value_usb(struct ft1000_device *ft1000dev)
 {
-   ULONG     value;
-   USHORT   tempword;
-   ULONG    status;
+   u32     value;
+   u16   tempword;
+   u32    status;
    struct ft1000_info * pft1000info = netdev_priv(ft1000dev->net);
 
        if (pft1000info->usbboot == 2) {
@@ -460,7 +460,7 @@
        }
        else {
           value = 0;
-          status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempword, 1);
+          status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
        }
 
        value |= (tempword << 16);
@@ -490,11 +490,11 @@
 //---------------------------------------------------------------------------
 static void put_request_value(struct ft1000_device *ft1000dev, long lvalue)
 {
-   ULONG    tempx;
-   ULONG    status;
+   u32    tempx;
+   u32    status;
 
        tempx = ntohl(lvalue);
-       status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (PUCHAR)&tempx);
+       status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempx);
 
 
 
@@ -516,10 +516,10 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-static USHORT hdr_checksum(struct pseudo_hdr *pHdr)
+static u16 hdr_checksum(struct pseudo_hdr *pHdr)
 {
-   USHORT   *usPtr = (USHORT *)pHdr;
-   USHORT   chksum;
+   u16   *usPtr = (u16 *)pHdr;
+   u16   chksum;
 
 
   chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
@@ -533,8 +533,8 @@
 // Function:    write_blk
 //
 // Parameters:  struct ft1000_device  - device structure
-//              USHORT **pUsFile - DSP image file pointer in USHORT
-//              UCHAR  **pUcFile - DSP image file pointer in UCHAR
+//              u16 **pUsFile - DSP image file pointer in u16
+//              u8  **pUcFile - DSP image file pointer in u8
 //              long   word_length - lenght of the buffer to be written
 //                                   to DPRAM
 //
@@ -546,20 +546,20 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-static ULONG write_blk (struct ft1000_device *ft1000dev, USHORT **pUsFile, UCHAR **pUcFile, long word_length)
+static u32 write_blk (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length)
 {
-   ULONG Status = STATUS_SUCCESS;
-   USHORT dpram;
+   u32 Status = STATUS_SUCCESS;
+   u16 dpram;
    long temp_word_length;
    int loopcnt, i, j;
-   USHORT *pTempFile;
-   USHORT tempword;
-   USHORT tempbuffer[64];
-   USHORT resultbuffer[64];
+   u16 *pTempFile;
+   u16 tempword;
+   u16 tempbuffer[64];
+   u16 resultbuffer[64];
 	struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
 
    //DEBUG("FT1000:download:start word_length = %d\n",(int)word_length);
-   dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+   dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
    tempword = *(*pUsFile);
    (*pUsFile)++;
    Status = ft1000_write_dpram16(ft1000dev, dpram, tempword, 0);
@@ -569,7 +569,7 @@
 
    *pUcFile = *pUcFile + 4;
    word_length--;
-   tempword = (USHORT)word_length;
+   tempword = (u16)word_length;
    word_length = (word_length / 16) + 1;
    pTempFile = *pUsFile;
    temp_word_length = word_length;
@@ -602,24 +602,24 @@
 	      if (pft1000info->bootmode == 0)
 	      {
 		 if (dpram >= 0x3F4)
-                     Status = ft1000_write_dpram32 (ft1000dev, dpram, (PUCHAR)&tempbuffer[0], 8);
+                     Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 8);
 	         else
-                    Status = ft1000_write_dpram32 (ft1000dev, dpram, (PUCHAR)&tempbuffer[0], 64);
+                    Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64);
 	      }
 	      else
 	      {
                  for (j=0; j<10; j++)
                  {
-                   Status = ft1000_write_dpram32 (ft1000dev, dpram, (PUCHAR)&tempbuffer[0], 64);
+                   Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64);
 		   if (Status == STATUS_SUCCESS)
 		   {
 		       // Work around for ASIC bit stuffing problem.
 		       if ( (tempbuffer[31] & 0xfe00) == 0xfe00)
 		       {
-      		           Status = ft1000_write_dpram32(ft1000dev, dpram+12, (PUCHAR)&tempbuffer[24], 64);
+      		           Status = ft1000_write_dpram32(ft1000dev, dpram+12, (u8 *)&tempbuffer[24], 64);
 		       }
     		       // Let's check the data written
-	    	       Status = ft1000_read_dpram32 (ft1000dev, dpram, (PUCHAR)&resultbuffer[0], 64);
+	    	       Status = ft1000_read_dpram32 (ft1000dev, dpram, (u8 *)&resultbuffer[0], 64);
 		       if ( (tempbuffer[31] & 0xfe00) == 0xfe00)
 		       {
 		           for (i=0; i<28; i++)
@@ -633,7 +633,7 @@
 				   break;
 				}
 			   }
-   			   Status = ft1000_read_dpram32 (ft1000dev, dpram+12, (PUCHAR)&resultbuffer[0], 64);
+   			   Status = ft1000_read_dpram32 (ft1000dev, dpram+12, (u8 *)&resultbuffer[0], 64);
 		           for (i=0; i<16; i++)
 		           {
     			       if (resultbuffer[i] != tempbuffer[i+24])
@@ -689,8 +689,8 @@
 // Function:    write_blk_fifo
 //
 // Parameters:  struct ft1000_device  - device structure
-//              USHORT **pUsFile - DSP image file pointer in USHORT
-//              UCHAR  **pUcFile - DSP image file pointer in UCHAR
+//              u16 **pUsFile - DSP image file pointer in u16
+//              u8  **pUcFile - DSP image file pointer in u8
 //              long   word_length - lenght of the buffer to be written
 //                                   to DPRAM
 //
@@ -702,9 +702,9 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-static ULONG write_blk_fifo (struct ft1000_device *ft1000dev, USHORT **pUsFile, UCHAR **pUcFile, long word_length)
+static u32 write_blk_fifo (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length)
 {
-   ULONG Status = STATUS_SUCCESS;
+   u32 Status = STATUS_SUCCESS;
    int byte_length;
    long aligncnt;
 
@@ -770,36 +770,36 @@
 //  Returns:    status                  - return code
 //---------------------------------------------------------------------------
 
-u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG  FileLength)
+u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, u32  FileLength)
 {
-   u16                     Status = STATUS_SUCCESS;
-   UINT                    uiState;
-   USHORT                  handshake;
-	struct pseudo_hdr *pHdr;
-   USHORT                  usHdrLength;
+   u16                     status = STATUS_SUCCESS;
+   u32                    state;
+   u16                  handshake;
+	struct pseudo_hdr *pseudo_header;
+   u16                  pseudo_header_len;
    long                    word_length;
-   USHORT                  request;
-   USHORT                  temp;
-   USHORT                  tempword;
+   u16                  request;
+   u16                  temp;
+   u16                  tempword;
 
-	struct dsp_file_hdr *pFileHdr5;
-	struct dsp_image_info *pDspImageInfoV6 = NULL;
+	struct dsp_file_hdr *file_hdr;
+	struct dsp_image_info *dsp_img_info = NULL;
    long                    requested_version;
-   BOOLEAN                 bGoodVersion;
-	struct drv_msg *pMailBoxData;
-   USHORT                  *pUsData = NULL;
-   USHORT                  *pUsFile = NULL;
-   UCHAR                   *pUcFile = NULL;
-   UCHAR                   *pBootEnd = NULL, *pCodeEnd= NULL;
-   int                     imageN;
+   bool                 correct_version;
+	struct drv_msg *mailbox_data;
+   u16                  *data = NULL;
+   u16                  *s_file = NULL;
+   u8                   *c_file = NULL;
+   u8                   *boot_end = NULL, *code_end= NULL;
+   int                     image;
    long                    loader_code_address, loader_code_size = 0;
    long                    run_address = 0, run_size = 0;
 
-   ULONG                   templong;
-   ULONG                   image_chksum = 0;
+   u32                   templong;
+   u32                   image_chksum = 0;
 
-   USHORT                  dpram = 0;
-   PUCHAR                  pbuffer;
+   u16                  dpram = 0;
+   u8 *pbuffer;
 	struct prov_record *pprov_record;
 	struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net);
 
@@ -814,24 +814,24 @@
    // Get version id of file, at first 4 bytes of file, for newer files.
    //
 
-   uiState = STATE_START_DWNLD;
+   state = STATE_START_DWNLD;
 
-   pFileHdr5 = (struct dsp_file_hdr *)pFileStart;
+   file_hdr = (struct dsp_file_hdr *)pFileStart;
 
    ft1000_write_register (ft1000dev, 0x800, FT1000_REG_MAG_WATERMARK);
 
-      pUsFile = (USHORT *)(pFileStart + pFileHdr5->loader_offset);
-      pUcFile = (UCHAR *)(pFileStart + pFileHdr5->loader_offset);
+      s_file = (u16 *)(pFileStart + file_hdr->loader_offset);
+      c_file = (u8 *)(pFileStart + file_hdr->loader_offset);
 
-      pBootEnd = (UCHAR *)(pFileStart + pFileHdr5->loader_code_end);
+      boot_end = (u8 *)(pFileStart + file_hdr->loader_code_end);
 
-      loader_code_address = pFileHdr5->loader_code_address;
-      loader_code_size = pFileHdr5->loader_code_size;
-      bGoodVersion = FALSE;
+      loader_code_address = file_hdr->loader_code_address;
+      loader_code_size = file_hdr->loader_code_size;
+      correct_version = FALSE;
 
-   while ((Status == STATUS_SUCCESS) && (uiState != STATE_DONE_FILE))
+   while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE))
    {
-      switch (uiState)
+      switch (state)
       {
       case  STATE_START_DWNLD:
          DEBUG("FT1000:STATE_START_DWNLD\n");
@@ -848,10 +848,10 @@
          else
          {
             DEBUG("FT1000:download:Download error: Handshake failed\n");
-            Status = STATUS_FAILURE;
+            status = STATUS_FAILURE;
          }
 
-         uiState = STATE_BOOT_DWNLD;
+         state = STATE_BOOT_DWNLD;
 
          break;
 
@@ -878,11 +878,11 @@
             case  REQUEST_DONE_BL:
                DEBUG("FT1000:REQUEST_DONE_BL\n");
                /* Reposition ptrs to beginning of code section */
-               pUsFile = (USHORT *)(pBootEnd);
-               pUcFile = (UCHAR *)(pBootEnd);
-               //DEBUG("FT1000:download:pUsFile = 0x%8x\n", (int)pUsFile);
-               //DEBUG("FT1000:download:pUcFile = 0x%8x\n", (int)pUcFile);
-               uiState = STATE_CODE_DWNLD;
+               s_file = (u16 *)(boot_end);
+               c_file = (u8 *)(boot_end);
+               //DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file);
+               //DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file);
+               state = STATE_CODE_DWNLD;
                pft1000info->fcodeldr = 1;
                break;
             case  REQUEST_CODE_SEGMENT:
@@ -893,33 +893,33 @@
                if (word_length > MAX_LENGTH)
                {
                   DEBUG("FT1000:download:Download error: Max length exceeded\n");
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
-               if ( (word_length*2 + pUcFile) > pBootEnd)
+               if ( (word_length*2 + c_file) > boot_end)
                {
                   /*
                    * Error, beyond boot code range.
                    */
                   DEBUG("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundry.\n",
                                                             (int)word_length);
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
                /*
                 * Position ASIC DPRAM auto-increment pointer.
                 */
-				    dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+				    dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
 					if (word_length & 0x1)
 						word_length++;
 					word_length = word_length / 2;
 
-			Status =   write_blk(ft1000dev, &pUsFile, &pUcFile, word_length);
-			//DEBUG("write_blk returned %d\n", Status);
+			status =   write_blk(ft1000dev, &s_file, &c_file, word_length);
+			//DEBUG("write_blk returned %d\n", status);
                break;
             default:
                DEBUG("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n",request);
-               Status = STATUS_FAILURE;
+               status = STATUS_FAILURE;
                break;
             }
             if (pft1000info->usbboot)
@@ -930,7 +930,7 @@
          else
          {
             DEBUG("FT1000:download:Download error: Handshake failed\n");
-            Status = STATUS_FAILURE;
+            status = STATUS_FAILURE;
          }
 
          break;
@@ -959,7 +959,7 @@
                 break;
             case  REQUEST_RUN_ADDRESS:
                DEBUG("FT1000:download:  REQUEST_RUN_ADDRESS\n");
-               if (bGoodVersion)
+               if (correct_version)
                {
                   DEBUG("FT1000:download:run_address = 0x%8x\n", (int)run_address);
                   put_request_value(ft1000dev, run_address);
@@ -967,13 +967,13 @@
                else
                {
                   DEBUG("FT1000:download:Download error: Got Run address request before image offset request.\n");
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
                break;
             case  REQUEST_CODE_LENGTH:
                DEBUG("FT1000:download:REQUEST_CODE_LENGTH\n");
-               if (bGoodVersion)
+               if (correct_version)
                {
                   DEBUG("FT1000:download:run_size = 0x%8x\n", (int)run_size);
                   put_request_value(ft1000dev, run_size);
@@ -981,23 +981,23 @@
                else
                {
                   DEBUG("FT1000:download:Download error: Got Size request before image offset request.\n");
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
                break;
             case  REQUEST_DONE_CL:
                pft1000info->usbboot = 3;
                /* Reposition ptrs to beginning of provisioning section */
-                  pUsFile = (USHORT *)(pFileStart + pFileHdr5->commands_offset);
-                  pUcFile = (UCHAR *)(pFileStart + pFileHdr5->commands_offset);
-               uiState = STATE_DONE_DWNLD;
+                  s_file = (u16 *)(pFileStart + file_hdr->commands_offset);
+                  c_file = (u8 *)(pFileStart + file_hdr->commands_offset);
+               state = STATE_DONE_DWNLD;
                break;
             case  REQUEST_CODE_SEGMENT:
                //DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n");
-               if (!bGoodVersion)
+               if (!correct_version)
                {
                   DEBUG("FT1000:download:Download error: Got Code Segment request before image offset request.\n");
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
 #if 0
@@ -1011,28 +1011,28 @@
 #endif
                {
                   DEBUG("FT1000:download:Download error: Max length exceeded\n");
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
-               if ( (word_length*2 + pUcFile) > pCodeEnd)
+               if ( (word_length*2 + c_file) > code_end)
                {
                   /*
                    * Error, beyond boot code range.
                    */
                   DEBUG("FT1000:download:Download error: Requested len=%d exceeds DSP code boundry.\n",
                                (int)word_length);
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
                /*
                 * Position ASIC DPRAM auto-increment pointer.
                 */
-		   dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+		   dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
 		   if (word_length & 0x1)
 			word_length++;
 		   word_length = word_length / 2;
 
-   	       write_blk_fifo (ft1000dev, &pUsFile, &pUcFile, word_length);
+   	       write_blk_fifo (ft1000dev, &s_file, &c_file, word_length);
                if (pft1000info->usbboot == 0)
                    pft1000info->usbboot++;
                if (pft1000info->usbboot == 1) {
@@ -1047,14 +1047,14 @@
                // Convert length from byte count to word count. Make sure we round up.
                word_length = (long)(pft1000info->DSPInfoBlklen + 1)/2;
                put_request_value(ft1000dev, word_length);
-		pMailBoxData = (struct drv_msg *)&(pft1000info->DSPInfoBlk[0]);
+		mailbox_data = (struct drv_msg *)&(pft1000info->DSPInfoBlk[0]);
                /*
                 * Position ASIC DPRAM auto-increment pointer.
                 */
 
 
-                   pUsData = (USHORT *)&pMailBoxData->data[0];
-                   dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+                   data = (u16 *)&mailbox_data->data[0];
+                   dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
                    if (word_length & 0x1)
                        word_length++;
 
@@ -1064,25 +1064,25 @@
                for (; word_length > 0; word_length--) /* In words */
                {
 
-                      templong = *pUsData++;
-					  templong |= (*pUsData++ << 16);
-                      Status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (PUCHAR)&templong);
+                      templong = *data++;
+					  templong |= (*data++ << 16);
+                      status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *)&templong);
 
                }
                break;
 
             case  REQUEST_VERSION_INFO:
                DEBUG("FT1000:download:REQUEST_VERSION_INFO\n");
-               word_length = pFileHdr5->version_data_size;
+               word_length = file_hdr->version_data_size;
                put_request_value(ft1000dev, word_length);
                /*
                 * Position ASIC DPRAM auto-increment pointer.
                 */
 
-               pUsFile = (USHORT *)(pFileStart + pFileHdr5->version_data_offset);
+               s_file = (u16 *)(pFileStart + file_hdr->version_data_offset);
 
 
-                   dpram = (USHORT)DWNLD_MAG1_PS_HDR_LOC;
+                   dpram = (u16)DWNLD_MAG1_PS_HDR_LOC;
                    if (word_length & 0x1)
                        word_length++;
 
@@ -1092,59 +1092,59 @@
                for (; word_length > 0; word_length--) /* In words */
                {
 
-                      templong = ntohs(*pUsFile++);
-					  temp = ntohs(*pUsFile++);
+                      templong = ntohs(*s_file++);
+					  temp = ntohs(*s_file++);
 					  templong |= (temp << 16);
-                      Status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (PUCHAR)&templong);
+                      status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *)&templong);
 
                }
                break;
 
             case  REQUEST_CODE_BY_VERSION:
                DEBUG("FT1000:download:REQUEST_CODE_BY_VERSION\n");
-               bGoodVersion = FALSE;
+               correct_version = FALSE;
                requested_version = get_request_value(ft1000dev);
 
-                   pDspImageInfoV6 = (struct dsp_image_info *)(pFileStart + sizeof(struct dsp_file_hdr ));
+                   dsp_img_info = (struct dsp_image_info *)(pFileStart + sizeof(struct dsp_file_hdr ));
 
-               for (imageN = 0; imageN < pFileHdr5->nDspImages; imageN++)
+               for (image = 0; image < file_hdr->nDspImages; image++)
                {
 
-                       temp = (USHORT)(pDspImageInfoV6->version);
+                       temp = (u16)(dsp_img_info->version);
                        templong = temp;
-                       temp = (USHORT)(pDspImageInfoV6->version >> 16);
+                       temp = (u16)(dsp_img_info->version >> 16);
                        templong |= (temp << 16);
-                   if (templong == (ULONG)requested_version)
+                   if (templong == (u32)requested_version)
                        {
-                           bGoodVersion = TRUE;
-                           DEBUG("FT1000:download: bGoodVersion is TRUE\n");
-                           pUsFile = (USHORT *)(pFileStart + pDspImageInfoV6->begin_offset);
-                           pUcFile = (UCHAR *)(pFileStart + pDspImageInfoV6->begin_offset);
-                           pCodeEnd = (UCHAR *)(pFileStart + pDspImageInfoV6->end_offset);
-                           run_address = pDspImageInfoV6->run_address;
-                           run_size = pDspImageInfoV6->image_size;
-                           image_chksum = (ULONG)pDspImageInfoV6->checksum;
+                           correct_version = TRUE;
+                           DEBUG("FT1000:download: correct_version is TRUE\n");
+                           s_file = (u16 *)(pFileStart + dsp_img_info->begin_offset);
+                           c_file = (u8 *)(pFileStart + dsp_img_info->begin_offset);
+                           code_end = (u8 *)(pFileStart + dsp_img_info->end_offset);
+                           run_address = dsp_img_info->run_address;
+                           run_size = dsp_img_info->image_size;
+                           image_chksum = (u32)dsp_img_info->checksum;
                            break;
                         }
-                        pDspImageInfoV6++;
+                        dsp_img_info++;
 
 
                } //end of for
 
-               if (!bGoodVersion)
+               if (!correct_version)
                {
                   /*
                    * Error, beyond boot code range.
                    */
                   DEBUG("FT1000:download:Download error: Bad Version Request = 0x%x.\n",(int)requested_version);
-                  Status = STATUS_FAILURE;
+                  status = STATUS_FAILURE;
                   break;
                }
                break;
 
             default:
                DEBUG("FT1000:download:Download error: Bad request type=%d in CODE download state.\n",request);
-               Status = STATUS_FAILURE;
+               status = STATUS_FAILURE;
                break;
             }
             if (pft1000info->usbboot)
@@ -1155,94 +1155,94 @@
          else
          {
             DEBUG("FT1000:download:Download error: Handshake failed\n");
-            Status = STATUS_FAILURE;
+            status = STATUS_FAILURE;
          }
 
          break;
 
       case STATE_DONE_DWNLD:
          DEBUG("FT1000:download:Code loader is done...\n");
-         uiState = STATE_SECTION_PROV;
+         state = STATE_SECTION_PROV;
          break;
 
       case  STATE_SECTION_PROV:
          DEBUG("FT1000:download:STATE_SECTION_PROV\n");
-		pHdr = (struct pseudo_hdr *)pUcFile;
+		pseudo_header = (struct pseudo_hdr *)c_file;
 
-         if (pHdr->checksum == hdr_checksum(pHdr))
+         if (pseudo_header->checksum == hdr_checksum(pseudo_header))
          {
-            if (pHdr->portdest != 0x80 /* Dsp OAM */)
+            if (pseudo_header->portdest != 0x80 /* Dsp OAM */)
             {
-               uiState = STATE_DONE_PROV;
+               state = STATE_DONE_PROV;
                break;
             }
-            usHdrLength = ntohs(pHdr->length);    /* Byte length for PROV records */
+            pseudo_header_len = ntohs(pseudo_header->length);    /* Byte length for PROV records */
 
             // Get buffer for provisioning data
-		pbuffer = kmalloc((usHdrLength + sizeof(struct pseudo_hdr)), GFP_ATOMIC);
+		pbuffer = kmalloc((pseudo_header_len + sizeof(struct pseudo_hdr)), GFP_ATOMIC);
             if (pbuffer) {
-		memcpy(pbuffer, (void *)pUcFile, (UINT)(usHdrLength + sizeof(struct pseudo_hdr)));
+		memcpy(pbuffer, (void *)c_file, (u32)(pseudo_header_len + sizeof(struct pseudo_hdr)));
                 // link provisioning data
 		pprov_record = kmalloc(sizeof(struct prov_record), GFP_ATOMIC);
                 if (pprov_record) {
                     pprov_record->pprov_data = pbuffer;
                     list_add_tail (&pprov_record->list, &pft1000info->prov_list);
                     // Move to next entry if available
-			pUcFile = (UCHAR *)((unsigned long)pUcFile + (UINT)((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
-                    if ( (unsigned long)(pUcFile) - (unsigned long)(pFileStart) >= (unsigned long)FileLength) {
-                       uiState = STATE_DONE_FILE;
+			c_file = (u8 *)((unsigned long)c_file + (u32)((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
+                    if ( (unsigned long)(c_file) - (unsigned long)(pFileStart) >= (unsigned long)FileLength) {
+                       state = STATE_DONE_FILE;
                     }
                 }
                 else {
                     kfree(pbuffer);
-                    Status = STATUS_FAILURE;
+                    status = STATUS_FAILURE;
                 }
             }
             else {
-                Status = STATUS_FAILURE;
+                status = STATUS_FAILURE;
             }
          }
          else
          {
             /* Checksum did not compute */
-            Status = STATUS_FAILURE;
+            status = STATUS_FAILURE;
          }
-         DEBUG("ft1000:download: after STATE_SECTION_PROV, uiState = %d, Status= %d\n", uiState, Status);
+         DEBUG("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n", state, status);
          break;
 
       case  STATE_DONE_PROV:
          DEBUG("FT1000:download:STATE_DONE_PROV\n");
-         uiState = STATE_DONE_FILE;
+         state = STATE_DONE_FILE;
          break;
 
 
       default:
-         Status = STATUS_FAILURE;
+         status = STATUS_FAILURE;
          break;
       } /* End Switch */
 
-      if (Status != STATUS_SUCCESS) {
+      if (status != STATUS_SUCCESS) {
           break;
       }
 
 /****
       // Check if Card is present
-      Status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
-      if ( (Status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
+      status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK);
+      if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) {
           break;
       }
 
-      Status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
-      if ( (Status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
+      status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID);
+      if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) {
           break;
       }
 ****/
 
    } /* End while */
 
-   DEBUG("Download exiting with status = 0x%8x\n", Status);
+   DEBUG("Download exiting with status = 0x%8x\n", status);
    ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL);
 
-   return Status;
+   return status;
 }
 
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index 5b89ee2..643a637 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -41,37 +41,9 @@
 //Jim
 
 static u8 tempbuffer[1600];
-static unsigned long gCardIndex;
 
 #define MAX_RCV_LOOP   100
 
-/****************************************************************
- *     ft1000_control_complete
- ****************************************************************/
-static void ft1000_control_complete(struct urb *urb)
-{
-    struct ft1000_device *ft1000dev = (struct ft1000_device *)urb->context;
-
-    //DEBUG("FT1000_CONTROL_COMPLETE ENTERED\n");
-    if (ft1000dev == NULL )
-    {
-        DEBUG("NULL ft1000dev, failure\n");
-        return ;
-    }
-    else if ( ft1000dev->dev == NULL )
-    {
-        DEBUG("NULL ft1000dev->dev, failure\n");
-        return ;
-    }
-
-    if(waitqueue_active(&ft1000dev->control_wait))
-    {
-        wake_up(&ft1000dev->control_wait);
-    }
-
-    //DEBUG("FT1000_CONTROL_COMPLETE RETURNED\n");
-}
-
 //---------------------------------------------------------------------------
 // Function:    ft1000_control
 //
@@ -187,7 +159,7 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-u16 ft1000_write_register(struct ft1000_device *ft1000dev, USHORT value, u16 nRegIndx)
+u16 ft1000_write_register(struct ft1000_device *ft1000dev, u16 value, u16 nRegIndx)
 {
      u16 ret = STATUS_SUCCESS;
 
@@ -223,7 +195,7 @@
 //
 //---------------------------------------------------------------------------
 
-u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt)
+u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt)
 {
     u16 ret = STATUS_SUCCESS;
 
@@ -262,7 +234,7 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt)
+u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt)
 {
      u16 ret = STATUS_SUCCESS;
 
@@ -299,7 +271,7 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, u8 highlow)
+u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u8 highlow)
 {
     u16 ret = STATUS_SUCCESS;
 
@@ -347,7 +319,7 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, USHORT indx, USHORT value, u8 highlow)
+u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u8 highlow)
 {
      u16 ret = STATUS_SUCCESS;
 
@@ -392,10 +364,10 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer)
+u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer)
 {
-    UCHAR buf[16];
-    USHORT pos;
+    u8 buf[16];
+    u16 pos;
     u16 ret = STATUS_SUCCESS;
 
     //DEBUG("fix_ft1000_read_dpram32: indx: %d  \n", indx);
@@ -441,14 +413,14 @@
 // Notes:
 //
 //---------------------------------------------------------------------------
-u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer)
+u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer)
 {
-    USHORT pos1;
-    USHORT pos2;
-    USHORT i;
-    UCHAR buf[32];
-    UCHAR resultbuffer[32];
-    PUCHAR pdata;
+    u16 pos1;
+    u16 pos2;
+    u16 i;
+    u8 buf[32];
+    u8 resultbuffer[32];
+    u8 *pdata;
     u16 ret  = STATUS_SUCCESS;
 
     //DEBUG("fix_ft1000_write_dpram32: Entered:\n");
@@ -472,7 +444,7 @@
         return ret;
     }
 
-    ret = ft1000_read_dpram32(ft1000dev, pos1, (PUCHAR)&resultbuffer[0], 16);
+    ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16);
     if (ret == STATUS_SUCCESS)
     {
         buffer = pdata;
@@ -487,8 +459,8 @@
 
     if (ret == STATUS_FAILURE)
     {
-        ret = ft1000_write_dpram32(ft1000dev, pos1, (PUCHAR)&tempbuffer[0], 16);
-        ret = ft1000_read_dpram32(ft1000dev, pos1, (PUCHAR)&resultbuffer[0], 16);
+        ret = ft1000_write_dpram32(ft1000dev, pos1, (u8 *)&tempbuffer[0], 16);
+        ret = ft1000_read_dpram32(ft1000dev, pos1, (u8 *)&resultbuffer[0], 16);
         if (ret == STATUS_SUCCESS)
         {
             buffer = pdata;
@@ -518,10 +490,10 @@
 //
 //  Returns:    None
 //-----------------------------------------------------------------------
-static void card_reset_dsp (struct ft1000_device *ft1000dev, BOOLEAN value)
+static void card_reset_dsp (struct ft1000_device *ft1000dev, bool value)
 {
     u16 status = STATUS_SUCCESS;
-    USHORT tempword;
+    u16 tempword;
 
     status = ft1000_write_register (ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
     status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_SUP_CTRL);
@@ -620,8 +592,8 @@
 int dsp_reload(struct ft1000_device *ft1000dev)
 {
     u16 status;
-    USHORT tempword;
-    ULONG templong;
+    u16 tempword;
+    u32 templong;
 
 	struct ft1000_info *pft1000info;
 
@@ -648,7 +620,7 @@
     status = ft1000_write_register (ft1000dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
 
     // Let's check for FEFE
-    status = ft1000_read_dpram32 (ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX, (PUCHAR)&templong, 4);
+    status = ft1000_read_dpram32 (ft1000dev, FT1000_MAG_DPRAM_FEFE_INDX, (u8 *)&templong, 4);
     DEBUG("templong (fefe) = 0x%8x\n", templong);
 
     // call codeloader
@@ -753,7 +725,7 @@
 
     // Initialize DSP heartbeat area to ho
     ft1000_write_dpram16(ft1000dev, FT1000_MAG_HI_HO, ho_mag, FT1000_MAG_HI_HO_INDX);
-    ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (PCHAR)&tempword, FT1000_MAG_HI_HO_INDX);
+    ft1000_read_dpram16(ft1000dev, FT1000_MAG_HI_HO, (u8 *)&tempword, FT1000_MAG_HI_HO_INDX);
     DEBUG("ft1000_hw:ft1000_reset_card:hi_ho value = 0x%x\n", tempword);
 
 
@@ -800,8 +772,7 @@
 	int i, ret_val;
 	struct list_head *cur, *tmp;
 	char card_nr[2];
-
-	gCardIndex=0; //mbelian
+	unsigned long gCardIndex = 0;
 
     DEBUG("Enter init_ft1000_netdev...\n");
 
@@ -813,7 +784,7 @@
 	return -ENOMEM;
     }
 
-	pInfo = (struct ft1000_info *) netdev_priv(netdev);
+	pInfo = netdev_priv(netdev);
 
     //DEBUG("init_ft1000_netdev: gFt1000Info=%x, netdev=%x, ft1000dev=%x\n", gFt1000Info, netdev, ft1000dev);
 
@@ -821,9 +792,6 @@
 
     dev_alloc_name(netdev, netdev->name);
 
-    //for the first inserted card, decide the card index beginning number, in case there are existing network interfaces
-    if ( gCardIndex == 0 )
-    {
         DEBUG("init_ft1000_netdev: network device name is %s\n", netdev->name);
 
 	if ( strncmp(netdev->name,"eth", 3) == 0) {
@@ -843,13 +811,6 @@
 		ret_val = -ENXIO;
 		goto err_net;
         }
-    }
-    else
-    {
-        //not the first inserted card, increase card number by 1
-        pInfo->CardNumber = gCardIndex;
-        /*DEBUG("card number = %d\n", pInfo->CardNumber);*/ //mbelian
-    }
 
     memset(&pInfo->stats, 0, sizeof(struct net_device_stats) );
 
@@ -862,7 +823,6 @@
     pInfo->mediastate = 0;
     pInfo->fifo_cnt = 0;
     pInfo->DeviceCreated = FALSE;
-    pInfo->DeviceMajor = 0;
     pInfo->CurrentInterruptEnableMask = ISR_DEFAULT_MASK;
     pInfo->InterruptsEnabled = FALSE;
     pInfo->CardReady = 0;
@@ -874,13 +834,11 @@
     pInfo->fCondResetPend = 0;
 	pInfo->usbboot = 0;
 	pInfo->dspalive = 0;
-	for (i=0;i<32 ;i++ )
-	{
-		pInfo->tempbuf[i] = 0;
-	}
+	memset(&pInfo->tempbuf[0], 0, sizeof(pInfo->tempbuf));
 
     INIT_LIST_HEAD(&pInfo->prov_list);
 
+	INIT_LIST_HEAD(&pInfo->nodes.list);
 //mbelian
 #ifdef HAVE_NET_DEVICE_OPS
 	netdev->netdev_ops = &ftnet_ops;
@@ -982,7 +940,7 @@
 
 
     //Create character device, implemented by Jim
-    ft1000_CreateDevice(ft1000dev);
+    ft1000_create_dev(ft1000dev);
 
     DEBUG ("reg_ft1000_netdev returned\n");
 
@@ -1026,178 +984,6 @@
     //DEBUG("Return from ft1000_usb_transmit_complete\n");
 }
 
-
-/****************************************************************
- *     ft1000_control
- ****************************************************************/
-static int ft1000_read_fifo_reg(struct ft1000_device *ft1000dev,unsigned int pipe,
-                          u8 request,
-                          u8 requesttype,
-                          u16 value,
-                          u16 index,
-                          void *data,
-                          u16 size,
-                          int timeout)
-{
-    u16 ret;
-
-    DECLARE_WAITQUEUE(wait, current);
-    struct urb *urb;
-    struct usb_ctrlrequest *dr;
-    int status;
-
-    if (ft1000dev == NULL )
-    {
-        DEBUG("NULL ft1000dev, failure\n");
-        return STATUS_FAILURE;
-    }
-    else if ( ft1000dev->dev == NULL )
-    {
-        DEBUG("NULL ft1000dev->dev, failure\n");
-        return STATUS_FAILURE;
-    }
-
-    spin_lock(&ft1000dev->device_lock);
-
-    if(in_interrupt())
-    {
-        spin_unlock(&ft1000dev->device_lock);
-        return -EBUSY;
-    }
-
-    urb = usb_alloc_urb(0, GFP_KERNEL);
-    dr = kmalloc(sizeof(struct usb_ctrlrequest), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
-
-    if(!urb || !dr)
-    {
-	kfree(dr);
-	usb_free_urb(urb);
-        spin_unlock(&ft1000dev->device_lock);
-        return -ENOMEM;
-    }
-
-
-
-    dr->bRequestType = requesttype;
-    dr->bRequest = request;
-    dr->wValue = value;
-    dr->wIndex = index;
-    dr->wLength = size;
-
-    usb_fill_control_urb(urb, ft1000dev->dev, pipe, (char*)dr, (void*)data, size, (void *)ft1000_control_complete, (void*)ft1000dev);
-
-
-    init_waitqueue_head(&ft1000dev->control_wait);
-
-	set_current_state(TASK_INTERRUPTIBLE);
-
-    add_wait_queue(&ft1000dev->control_wait, &wait);
-
-
-
-
-    status = usb_submit_urb(urb, GFP_KERNEL);
-
-    if(status)
-    {
-        usb_free_urb(urb);
-        kfree(dr);
-        remove_wait_queue(&ft1000dev->control_wait, &wait);
-        spin_unlock(&ft1000dev->device_lock);
-        return status;
-    }
-
-    if(urb->status == -EINPROGRESS)
-    {
-        while(timeout && urb->status == -EINPROGRESS)
-        {
-            status = timeout = schedule_timeout(timeout);
-        }
-    }
-    else
-    {
-        status = 1;
-    }
-
-    remove_wait_queue(&ft1000dev->control_wait, &wait);
-
-    if(!status)
-    {
-        usb_unlink_urb(urb);
-        printk("ft1000 timeout\n");
-        status = -ETIMEDOUT;
-    }
-    else
-    {
-        status = urb->status;
-
-        if(urb->status)
-        {
-            printk("ft1000 control message failed (urb addr: %p) with error number: %i\n", urb, (int)status);
-
-            usb_clear_halt(ft1000dev->dev, usb_rcvctrlpipe(ft1000dev->dev, 0));
-            usb_clear_halt(ft1000dev->dev, usb_sndctrlpipe(ft1000dev->dev, 0));
-            usb_unlink_urb(urb);
-        }
-    }
-
-
-
-    usb_free_urb(urb);
-    kfree(dr);
-    spin_unlock(&ft1000dev->device_lock);
-    return ret;
-
-
-}
-
-//---------------------------------------------------------------------------
-// Function:    ft1000_read_fifo_len
-//
-// Parameters:  ft1000dev - device structure
-//
-//
-// Returns:     none
-//
-// Description: read the fifo length register content
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static inline u16 ft1000_read_fifo_len (struct net_device *dev)
-{
-    u16 temp;
-    u16 ret;
-
-	struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
-    struct ft1000_device *ft1000dev = info->pFt1000Dev;
-//    DEBUG("ft1000_read_fifo_len: enter ft1000dev %x\n", ft1000dev);			//aelias [-] reason: warning: format ???%x??? expects type ???unsigned int???, but argument 2 has type ???struct ft1000_device *???
-    DEBUG("ft1000_read_fifo_len: enter ft1000dev %p\n", ft1000dev);	//aelias [+] reason: up
-
-    ret = STATUS_SUCCESS;
-
-    ret = ft1000_read_fifo_reg(ft1000dev,
-                          usb_rcvctrlpipe(ft1000dev->dev,0),
-                          HARLEY_READ_REGISTER,
-                          HARLEY_READ_OPERATION,
-                          0,
-                          FT1000_REG_MAG_UFSR,
-                          &temp,
-                          2,
-                          LARGE_TIMEOUT);
-
-    if (ret>0)
-        ret = STATUS_SUCCESS;
-    else
-        ret = STATUS_FAILURE;
-
-    DEBUG("ft1000_read_fifo_len: returned %d\n", temp);
-
-    return (temp- 16);
-
-}
-
-
 //---------------------------------------------------------------------------
 //
 // Function:   ft1000_copy_down_pkt
@@ -1219,16 +1005,15 @@
     struct ft1000_device *pFt1000Dev = pInfo->pFt1000Dev;
 
 
-    int i, count, ret;
-    USHORT *pTemp;
-    USHORT checksum;
+	int count, ret;
     u8 *t;
+	struct pseudo_hdr hdr;
 
     if (!pInfo->CardReady)
     {
 
         DEBUG("ft1000_copy_down_pkt::Card Not Ready\n");
-    	return STATUS_FAILURE;
+	return -ENODEV;
 
     }
 
@@ -1240,27 +1025,27 @@
     {
         DEBUG("Error:ft1000_copy_down_pkt:Message Size Overflow!\n");
     	DEBUG("size = %d\n", count);
-    	return STATUS_FAILURE;
+	return -EINVAL;
     }
 
     if ( count % 4)
         count = count + (4- (count %4) );
 
-    pTemp = (PUSHORT)&(pFt1000Dev->tx_buf[0]);
-    *pTemp ++ = ntohs(count);
-    *pTemp ++ = 0x1020;
-    *pTemp ++ = 0x2010;
-    *pTemp ++ = 0x9100;
-    *pTemp ++ = 0;
-    *pTemp ++ = 0;
-    *pTemp ++ = 0;
-    pTemp = (PUSHORT)&(pFt1000Dev->tx_buf[0]);
-    checksum = *pTemp ++;
-    for (i=1; i<7; i++)
-    {
-        checksum ^= *pTemp ++;
-    }
-    *pTemp++ = checksum;
+	memset(&hdr, 0, sizeof(struct pseudo_hdr));
+
+	hdr.length = ntohs(count);
+	hdr.source = 0x10;
+	hdr.destination = 0x20;
+	hdr.portdest = 0x20;
+	hdr.portsrc = 0x10;
+	hdr.sh_str_id = 0x91;
+	hdr.control = 0x00;
+
+	hdr.checksum = hdr.length ^ hdr.source ^ hdr.destination ^
+			hdr.portdest ^ hdr.portsrc ^ hdr.sh_str_id ^
+			hdr.control;
+
+	memcpy(&pFt1000Dev->tx_buf[0], &hdr, sizeof(hdr));
 	memcpy(&(pFt1000Dev->tx_buf[sizeof(struct pseudo_hdr)]), packet, len);
 
     netif_stop_queue(netdev);
@@ -1283,25 +1068,18 @@
     }*/
 
 
-    ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
-    if(ret)
-    {
+	ret = usb_submit_urb(pFt1000Dev->tx_urb, GFP_ATOMIC);
+	if (ret) {
 		DEBUG("ft1000 failed tx_urb %d\n", ret);
-
-		return STATUS_FAILURE;
-
-    }
-    else
-    {
-        //DEBUG("ft1000 sucess tx_urb %d\n", ret);
-
-        pInfo->stats.tx_packets++;
-        pInfo->stats.tx_bytes += (len+14);
-    }
+		return ret;
+	} else {
+		pInfo->stats.tx_packets++;
+		pInfo->stats.tx_bytes += (len+14);
+	}
 
     //DEBUG("ft1000_copy_down_pkt() exit\n");
 
-    return STATUS_SUCCESS;
+	return 0;
 }
 
 //---------------------------------------------------------------------------
@@ -1331,14 +1109,13 @@
     if ( skb == NULL )
     {
         DEBUG ("ft1000_hw: ft1000_start_xmit:skb == NULL!!!\n" );
-        return STATUS_FAILURE;
+	return NETDEV_TX_OK;
     }
 
     if ( pFt1000Dev->status & FT1000_STATUS_CLOSING)
     {
         DEBUG("network driver is closed, return\n");
-        dev_kfree_skb(skb);
-        return STATUS_SUCCESS;
+	goto err;
     }
 
     //DEBUG("ft1000_start_xmit 1:length of packet = %d\n", skb->len);
@@ -1357,28 +1134,24 @@
     {
         /* Drop packet is mediastate is down */
         DEBUG("ft1000_hw:ft1000_start_xmit:mediastate is down\n");
-        dev_kfree_skb(skb);
-        return STATUS_SUCCESS;
+	goto err;
     }
 
     if ( (skb->len < ENET_HEADER_SIZE) || (skb->len > ENET_MAX_SIZE) )
     {
         /* Drop packet which has invalid size */
         DEBUG("ft1000_hw:ft1000_start_xmit:invalid ethernet length\n");
-        dev_kfree_skb(skb);
-        return STATUS_SUCCESS;
+	goto err;
     }
 //mbelian
-    if(ft1000_copy_down_pkt (dev, (pdata+ENET_HEADER_SIZE-2), skb->len - ENET_HEADER_SIZE + 2) == STATUS_FAILURE)
-	{
-    	dev_kfree_skb(skb);
-		return STATUS_SUCCESS;
-	}
+	ft1000_copy_down_pkt(dev, (pdata+ENET_HEADER_SIZE-2),
+				skb->len - ENET_HEADER_SIZE + 2);
 
-    dev_kfree_skb(skb);
+err:
+	dev_kfree_skb(skb);
     //DEBUG(" ft1000_start_xmit() exit\n");
 
-    return 0;
+	return NETDEV_TX_OK;
 }
 
 //---------------------------------------------------------------------------
@@ -1424,7 +1197,7 @@
     //DEBUG("ft1000_copy_up_pkt: transfer_buffer_length=%d, actual_buffer_len=%d\n",
       //       urb->transfer_buffer_length, urb->actual_length);
 
-    chksum = (PUSHORT)ft1000dev->rx_buf;
+    chksum = (u16 *)ft1000dev->rx_buf;
 
     tempword = *chksum++;
     for (i=1; i<7; i++)
@@ -1521,7 +1294,7 @@
     {
         DEBUG("network driver is closed, return\n");
         //usb_kill_urb(pFt1000Dev->rx_urb); //mbelian
-        return STATUS_SUCCESS;
+	return -ENODEV;
     }
 
     usb_fill_bulk_urb(pFt1000Dev->rx_urb,
@@ -1536,12 +1309,12 @@
     if((result = usb_submit_urb(pFt1000Dev->rx_urb, GFP_ATOMIC)))
     {
         printk("ft1000_submit_rx_urb: submitting rx_urb %d failed\n", result);
-        return STATUS_FAILURE;
+	return result;
     }
 
     //DEBUG("ft1000_submit_rx_urb exit: result=%d\n", result);
 
-    return STATUS_SUCCESS;
+	return 0;
 }
 
 //---------------------------------------------------------------------------
@@ -1560,8 +1333,9 @@
 //---------------------------------------------------------------------------
 static int ft1000_open (struct net_device *dev)
 {
-	struct ft1000_info *pInfo = (struct ft1000_info *)netdev_priv(dev);
+	struct ft1000_info *pInfo = netdev_priv(dev);
     struct timeval tv; //mbelian
+	int ret;
 
     DEBUG("ft1000_open is called for card %d\n", pInfo->CardNumber);
     //DEBUG("ft1000_open: dev->addr=%x, dev->addr_len=%d\n", dev->addr, dev->addr_len);
@@ -1579,8 +1353,9 @@
 
     netif_carrier_on(dev); //mbelian
 
-    ft1000_submit_rx_urb(pInfo);
-    return 0;
+	ret = ft1000_submit_rx_urb(pInfo);
+
+	return ret;
 }
 
 //---------------------------------------------------------------------------
@@ -1599,7 +1374,7 @@
 //---------------------------------------------------------------------------
 int ft1000_close(struct net_device *net)
 {
-	struct ft1000_info *pInfo = (struct ft1000_info *) netdev_priv(net);
+	struct ft1000_info *pInfo = netdev_priv(net);
     struct ft1000_device *ft1000dev = pInfo->pFt1000Dev;
 
     //DEBUG ("ft1000_close: netdev->refcnt=%d\n", net->refcnt);
@@ -1622,7 +1397,7 @@
 
 static struct net_device_stats *ft1000_netdev_stats(struct net_device *dev)
 {
-	struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+	struct ft1000_info *info = netdev_priv(dev);
 
 	return &(info->stats); //mbelian
 }
@@ -1648,7 +1423,7 @@
 static int ft1000_chkcard (struct ft1000_device *dev) {
     u16 tempword;
     u16 status;
-	struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+	struct ft1000_info *info = netdev_priv(dev->net);
 
     if (info->fCondResetPend)
     {
@@ -1692,13 +1467,13 @@
 //          = 1 (successful)
 //
 //---------------------------------------------------------------------------
-static BOOLEAN ft1000_receive_cmd (struct ft1000_device *dev, u16 *pbuffer, int maxsz, u16 *pnxtph) {
+static bool ft1000_receive_cmd (struct ft1000_device *dev, u16 *pbuffer, int maxsz, u16 *pnxtph) {
     u16 size, ret;
     u16 *ppseudohdr;
     int i;
     u16 tempword;
 
-    ret = ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (PUCHAR)&size, FT1000_MAG_PH_LEN_INDX);
+    ret = ft1000_read_dpram16(dev, FT1000_MAG_PH_LEN, (u8 *)&size, FT1000_MAG_PH_LEN_INDX);
     size = ntohs(size) + PSEUDOSZ;
     if (size > maxsz) {
         DEBUG("FT1000:ft1000_receive_cmd:Invalid command length = %d\n", size);
@@ -1748,15 +1523,15 @@
 static int ft1000_dsp_prov(void *arg)
 {
     struct ft1000_device *dev = (struct ft1000_device *)arg;
-	struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+	struct ft1000_info *info = netdev_priv(dev->net);
     u16 tempword;
     u16 len;
     u16 i=0;
 	struct prov_record *ptr;
 	struct pseudo_hdr *ppseudo_hdr;
-    PUSHORT pmsg;
+    u16 *pmsg;
     u16 status;
-    USHORT TempShortBuf [256];
+    u16 TempShortBuf [256];
 
     DEBUG("*** DspProv Entered\n");
 
@@ -1792,7 +1567,7 @@
             len = htons(len);
             len += PSEUDOSZ;
 
-            pmsg = (PUSHORT)ptr->pprov_data;
+            pmsg = (u16 *)ptr->pprov_data;
 		ppseudo_hdr = (struct pseudo_hdr *)pmsg;
             // Insert slow queue sequence number
             ppseudo_hdr->seq_num = info->squeseqnum++;
@@ -1809,7 +1584,7 @@
             TempShortBuf[1] = htons (len);
             memcpy(&TempShortBuf[2], ppseudo_hdr, len);
 
-            status = ft1000_write_dpram32 (dev, 0, (PUCHAR)&TempShortBuf[0], (unsigned short)(len+2));
+            status = ft1000_write_dpram32 (dev, 0, (u8 *)&TempShortBuf[0], (unsigned short)(len+2));
             status = ft1000_write_register (dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL);
 
             list_del(&ptr->list);
@@ -1831,7 +1606,7 @@
 
 
 static int ft1000_proc_drvmsg (struct ft1000_device *dev, u16 size) {
-	struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+	struct ft1000_info *info = netdev_priv(dev->net);
     u16 msgtype;
     u16 tempword;
 	struct media_msg *pmediamsg;
@@ -1839,7 +1614,7 @@
 	struct drv_msg *pdrvmsg;
     u16 i;
 	struct pseudo_hdr *ppseudo_hdr;
-    PUSHORT pmsg;
+    u16 *pmsg;
     u16 status;
     union {
         u8  byte[2];
@@ -1971,7 +1746,7 @@
                 tempword = ntohs(pdrvmsg->length);
                 info->DSPInfoBlklen = tempword;
                 if (tempword < (MAX_DSP_SESS_REC-4) ) {
-                    pmsg = (PUSHORT)&pdrvmsg->data[0];
+                    pmsg = (u16 *)&pdrvmsg->data[0];
                     for (i=0; i<((tempword+1)/2); i++) {
                         DEBUG("FT1000:drivermsg:dsp info data = 0x%x\n", *pmsg);
                         info->DSPInfoBlk[i+10] = *pmsg++;
@@ -2003,10 +1778,10 @@
 
                 // Put message into Slow Queue
                 // Form Pseudo header
-                pmsg = (PUSHORT)info->DSPInfoBlk;
+                pmsg = (u16 *)info->DSPInfoBlk;
                 *pmsg++ = 0;
                 *pmsg++ = htons(info->DSPInfoBlklen+20+info->DSPInfoBlklen);
-		ppseudo_hdr = (struct pseudo_hdr *)(PUSHORT)&info->DSPInfoBlk[2];
+		ppseudo_hdr = (struct pseudo_hdr *)(u16 *)&info->DSPInfoBlk[2];
                 ppseudo_hdr->length = htons(info->DSPInfoBlklen+4+info->DSPInfoBlklen);
                 ppseudo_hdr->source = 0x10;
                 ppseudo_hdr->destination = 0x20;
@@ -2028,7 +1803,7 @@
                 }
                 info->DSPInfoBlk[10] = 0x7200;
                 info->DSPInfoBlk[11] = htons(info->DSPInfoBlklen);
-                status = ft1000_write_dpram32 (dev, 0, (PUCHAR)&info->DSPInfoBlk[0], (unsigned short)(info->DSPInfoBlklen+22));
+                status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPInfoBlk[0], (unsigned short)(info->DSPInfoBlklen+22));
                 status = ft1000_write_register (dev, FT1000_DB_DPRAM_TX, FT1000_REG_DOORBELL);
                 info->DrvMsgPend = 0;
 
@@ -2053,7 +1828,7 @@
               if ( (tempword & FT1000_DB_DPRAM_TX) == 0) {
                   // Put message into Slow Queue
                   // Form Pseudo header
-                  pmsg = (PUSHORT)&tempbuffer[0];
+                  pmsg = (u16 *)&tempbuffer[0];
 			ppseudo_hdr = (struct pseudo_hdr *)pmsg;
                   ppseudo_hdr->length = htons(0x0012);
                   ppseudo_hdr->source = 0x10;
@@ -2074,7 +1849,7 @@
                   for (i=1; i<7; i++) {
                       ppseudo_hdr->checksum ^= *pmsg++;
                   }
-                  pmsg = (PUSHORT)&tempbuffer[16];
+                  pmsg = (u16 *)&tempbuffer[16];
                   *pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
                   *pmsg++ = htons(0x000e);
                   *pmsg++ = htons(info->DSP_TIME[0]);
@@ -2089,7 +1864,7 @@
                   *pmsg++ = convert.wrd;
                   *pmsg++ = htons(info->DrvErrNum);
 
-                  CardSendCommand (dev, (unsigned char*)&tempbuffer[0], (USHORT)(0x0012 + PSEUDOSZ));
+                  CardSendCommand (dev, (unsigned char*)&tempbuffer[0], (u16)(0x0012 + PSEUDOSZ));
                   info->DrvErrNum = 0;
               }
               info->DrvMsgPend = 0;
@@ -2114,15 +1889,15 @@
 int ft1000_poll(void* dev_id) {
 
     struct ft1000_device *dev = (struct ft1000_device *)dev_id;
-	struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev->net);
+	struct ft1000_info *info = netdev_priv(dev->net);
 
     u16 tempword;
     u16 status;
     u16 size;
     int i;
-    USHORT data;
-    USHORT modulo;
-    USHORT portid;
+    u16 data;
+    u16 modulo;
+    u16 portid;
     u16 nxtph;
 	struct dpram_blk *pdpram_blk;
 	struct pseudo_hdr *ppseudo_hdr;
@@ -2143,14 +1918,14 @@
         if (tempword & FT1000_DB_DPRAM_RX) {
             //DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type:  FT1000_DB_DPRAM_RX\n");
 
-            status = ft1000_read_dpram16(dev, 0x200, (PUCHAR)&data, 0);
+            status = ft1000_read_dpram16(dev, 0x200, (u8 *)&data, 0);
             //DEBUG("ft1000_poll:FT1000_DB_DPRAM_RX:ft1000_read_dpram16:size = 0x%x\n", data);
             size = ntohs(data) + 16 + 2; //wai
             if (size % 4) {
                 modulo = 4 - (size % 4);
                 size = size + modulo;
             }
-            status = ft1000_read_dpram16(dev, 0x201, (PUCHAR)&portid, 1);
+            status = ft1000_read_dpram16(dev, 0x201, (u8 *)&portid, 1);
             portid &= 0xff;
             //DEBUG("ft1000_poll: FT1000_REG_DOORBELL message type: FT1000_DB_DPRAM_RX : portid 0x%x\n", portid);
 
@@ -2285,7 +2060,7 @@
             status = ft1000_write_register (dev, FT1000_ASIC_RESET_REQ, FT1000_REG_DOORBELL);
             status = ft1000_write_register (dev, HOST_INTF_BE, FT1000_REG_SUP_CTRL);
             // copy dsp session record from Adapter block
-            status = ft1000_write_dpram32 (dev, 0, (PUCHAR)&info->DSPSess.Rec[0], 1024);
+            status = ft1000_write_dpram32 (dev, 0, (u8 *)&info->DSPSess.Rec[0], 1024);
             // Program WMARK register
             status = ft1000_write_register (dev, 0x600, FT1000_REG_MAG_WATERMARK);
             // ring doorbell to tell DSP that ASIC is out of reset
@@ -2299,10 +2074,10 @@
             if (info->fAppMsgPend == 0) {
                // Reset ASIC and DSP
 
-                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (PUCHAR)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
-                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (PUCHAR)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX);
-                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (PUCHAR)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX);
-                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (PUCHAR)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX);
+                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER0, (u8 *)&(info->DSP_TIME[0]), FT1000_MAG_DSP_TIMER0_INDX);
+                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER1, (u8 *)&(info->DSP_TIME[1]), FT1000_MAG_DSP_TIMER1_INDX);
+                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER2, (u8 *)&(info->DSP_TIME[2]), FT1000_MAG_DSP_TIMER2_INDX);
+                status    = ft1000_read_dpram16(dev, FT1000_MAG_DSP_TIMER3, (u8 *)&(info->DSP_TIME[3]), FT1000_MAG_DSP_TIMER3_INDX);
                 info->CardReady = 0;
                 info->DrvErrNum = DSP_CONDRESET_INFO;
                 DEBUG("ft1000_hw:DSP conditional reset requested\n");
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h
index c580741..ab9312f 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.h
@@ -4,7 +4,7 @@
 
 #include "ft1000_usb.h"
 
-extern u16 ft1000_read_register(struct usb_device *dev, PUSHORT Data, u8 nRegIndx);
-extern u16 ft1000_write_register(struct usb_device *dev, USHORT value, u8 nRegIndx);
+extern u16 ft1000_read_register(struct usb_device *dev, u16 *Data, u8 nRegIndx);
+extern u16 ft1000_write_register(struct usb_device *dev, u16 value, u8 nRegIndx);
 
 #endif
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
index 36cdd58..b87542a 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
@@ -1,3 +1,24 @@
+/*
+ * ft1000_proc.c - ft1000 proc interface
+ *
+ * Copyright	(C) 2009-2010 Quintec
+ *		(C) 2010 Open-nandra
+ *      <marek.belisko@open-nandra.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/proc_fs.h>
@@ -14,219 +35,217 @@
 
 #define PUTX_TO_PAGE(len,page,message,size,var) \
 	len += snprintf(page+len, PAGE_SIZE - len, message); \
-	for(i = 0; i < (size - 1); i++) \
-	{ \
+	for (i = 0; i < (size - 1); i++) {\
 		len += snprintf(page+len, PAGE_SIZE - len, "%02x:", var[i]); \
 	} \
 	len += snprintf(page+len, PAGE_SIZE - len, "%02x\n", var[i])
 
 #define PUTD_TO_PAGE(len,page,message,size,var) \
 	len += snprintf(page+len, PAGE_SIZE - len, message); \
-	for(i = 0; i < (size - 1); i++) \
-	{ \
+	for (i = 0; i < (size - 1); i++) {\
 		len += snprintf(page+len, PAGE_SIZE - len, "%d.", var[i]); \
 	} \
 	len += snprintf(page+len, PAGE_SIZE - len, "%d\n", var[i])
 
 
-
-
-//#ifdef INIT_NET_NS
 #define FTNET_PROC init_net.proc_net
-//#else
-//#define FTNET_PROC proc_net
-//#endif
 
 
-u16 ft1000_read_dpram16 (struct ft1000_device *ft1000dev, USHORT indx,
-			 PUCHAR buffer, u8 highlow);
+u16 ft1000_read_dpram16 (struct ft1000_device *ft1000dev, u16 indx,
+			 u8 *buffer, u8 highlow);
 
 
 static int
-ft1000ReadProc (char *page, char **start, off_t off, int count, int *eof,
+ft1000ReadProc(char *page, char **start, off_t off, int count, int *eof,
 		void *data)
 {
-  struct net_device *dev;
-  int len;
-  int i;
-  unsigned short ledStat;
-  unsigned short conStat;
+	struct net_device *dev;
+	int len;
+	int i;
+	unsigned short ledStat;
+	unsigned short conStat;
 
 	struct ft1000_info *info;
 
-  char *status[] = { "Idle (Disconnect)", "Searching", "Active (Connected)",
-    "Waiting for L2", "Sleep", "No Coverage", "", ""
-  };
+	char *status[] = { 
+		"Idle (Disconnect)", 
+		"Searching",
+		"Active (Connected)",
+		"Waiting for L2",
+		"Sleep",
+		"No Coverage",
+		"",
+		"",
+	};
 
-  char *signal[] = { "", "*", "**", "***", "****" };
-  int strength;
-  int quality;
-  struct timeval tv;
-  time_t delta;
+	char *signal[] = { "", "*", "**", "***", "****" };
+	int strength;
+	int quality;
+	struct timeval tv;
+	time_t delta;
 
-  dev = (struct net_device *) data;
-	info = (struct ft1000_info *) netdev_priv(dev);
+	dev = (struct net_device *) data;
+	info = netdev_priv(dev);
 
-  if (off > 0)
-    {
-      *eof = 1;
-      return 0;
-    }
+	if (off > 0) {
+		*eof = 1;
+		return 0;
+	}
 
 
-  if (info->ProgConStat != 0xFF)
-    {
-      ft1000_read_dpram16 (info->pFt1000Dev, FT1000_MAG_DSP_LED,
-			   (PUCHAR) & ledStat, FT1000_MAG_DSP_LED_INDX);
-      info->LedStat = ntohs (ledStat);
+	if (info->ProgConStat != 0xFF) {
+		ft1000_read_dpram16(info->pFt1000Dev, FT1000_MAG_DSP_LED,
+			   (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
+		info->LedStat = ntohs(ledStat);
 
-      ft1000_read_dpram16 (info->pFt1000Dev, FT1000_MAG_DSP_CON_STATE,
-			   (PUCHAR) & conStat, FT1000_MAG_DSP_CON_STATE_INDX);
-      info->ConStat = ntohs (conStat);
-      do_gettimeofday (&tv);
-      delta = (tv.tv_sec - info->ConTm);
-    }
-  else
-    {
-      info->ConStat = 0xf;
-      delta = 0;
-    }
+		ft1000_read_dpram16(info->pFt1000Dev, FT1000_MAG_DSP_CON_STATE,
+			(u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+		info->ConStat = ntohs(conStat);
+		do_gettimeofday(&tv);
+		delta = (tv.tv_sec - info->ConTm);
+	} else {
+		info->ConStat = 0xf;
+		delta = 0;
+	}
 
+	i = (info->LedStat) & 0xf;
+	switch (i) {
+	case 0x1:
+		strength = 1;
+		break;
+	case 0x3:
+		strength = 2;
+		break;
+	case 0x7:
+		strength = 3;
+		break;
+	case 0xf:
+		strength = 4;
+		break;
+	default:
+		strength = 0;
+	}
 
+	i = (info->LedStat >> 8) & 0xf;
+	switch (i) {
+	case 0x1:
+		quality = 1;
+		break;
+	case 0x3:
+		quality = 2;
+		break;
+	case 0x7:
+		quality = 3;
+		break;
+	case 0xf:
+		quality = 4;
+		break;
+	default:
+		quality = 0;
+	}
 
-  i = (info->LedStat) & 0xf;
-  switch (i)
-    {
-    case 0x1:
-      strength = 1;
-      break;
-    case 0x3:
-      strength = 2;
-      break;
-    case 0x7:
-      strength = 3;
-      break;
-    case 0xf:
-      strength = 4;
-      break;
-    default:
-      strength = 0;
-    }
+	len = 0;
+	PUTM_TO_PAGE(len, page, "Connection Time: %02ld:%02ld:%02ld\n",
+      		((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
+	PUTM_TO_PAGE(len, page, "Connection Time[s]: %ld\n", delta);
+	PUTM_TO_PAGE(len, page, "Asic ID: %s\n",
+      	(info->AsicID) ==
+      	ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
+	PUTX_TO_PAGE(len, page, "SKU: ", SKUSZ, info->Sku);
+	PUTX_TO_PAGE(len, page, "EUI64: ", EUISZ, info->eui64);
+	PUTD_TO_PAGE(len, page, "DSP version number: ", DSPVERSZ, info->DspVer);
+	PUTX_TO_PAGE(len, page, "Hardware Serial Number: ", HWSERNUMSZ,
+      		info->HwSerNum);
+	PUTX_TO_PAGE(len, page, "Caliberation Version: ", CALVERSZ,
+      		info->RfCalVer);
+	PUTD_TO_PAGE(len, page, "Caliberation Date: ", CALDATESZ,
+		info->RfCalDate);
+	PUTM_TO_PAGE(len, page, "Media State: %s\n",
+      		(info->mediastate) ? "link" : "no link");
+	PUTM_TO_PAGE(len, page, "Connection Status: %s\n",
+      		status[((info->ConStat) & 0x7)]);
+	PUTM_TO_PAGE(len, page, "RX packets: %ld\n", info->stats.rx_packets);
+	PUTM_TO_PAGE(len, page, "TX packets: %ld\n", info->stats.tx_packets);
+	PUTM_TO_PAGE(len, page, "RX bytes: %ld\n", info->stats.rx_bytes);
+	PUTM_TO_PAGE(len, page, "TX bytes: %ld\n", info->stats.tx_bytes);
+	PUTM_TO_PAGE(len, page, "Signal Strength: %s\n", signal[strength]);
+	PUTM_TO_PAGE(len, page, "Signal Quality: %s\n", signal[quality]);
 
-  i = (info->LedStat >> 8) & 0xf;
-  switch (i)
-    {
-    case 0x1:
-      quality = 1;
-      break;
-    case 0x3:
-      quality = 2;
-      break;
-    case 0x7:
-      quality = 3;
-      break;
-    case 0xf:
-      quality = 4;
-      break;
-    default:
-      quality = 0;
-    }
-
-
-  len = 0;
-  PUTM_TO_PAGE (len, page, "Connection Time: %02ld:%02ld:%02ld\n",
-		((delta / 3600) % 24), ((delta / 60) % 60), (delta % 60));
-  PUTM_TO_PAGE (len, page, "Connection Time[s]: %ld\n", delta);
-  PUTM_TO_PAGE (len, page, "Asic ID: %s\n",
-		(info->AsicID) ==
-		ELECTRABUZZ_ID ? "ELECTRABUZZ ASIC" : "MAGNEMITE ASIC");
-  PUTX_TO_PAGE (len, page, "SKU: ", SKUSZ, info->Sku);
-  PUTX_TO_PAGE (len, page, "EUI64: ", EUISZ, info->eui64);
-  PUTD_TO_PAGE (len, page, "DSP version number: ", DSPVERSZ, info->DspVer);
-  PUTX_TO_PAGE (len, page, "Hardware Serial Number: ", HWSERNUMSZ,
-		info->HwSerNum);
-  PUTX_TO_PAGE (len, page, "Caliberation Version: ", CALVERSZ,
-		info->RfCalVer);
-  PUTD_TO_PAGE (len, page, "Caliberation Date: ", CALDATESZ, info->RfCalDate);
-  PUTM_TO_PAGE (len, page, "Media State: %s\n",
-		(info->mediastate) ? "link" : "no link");
-  PUTM_TO_PAGE (len, page, "Connection Status: %s\n",
-		status[((info->ConStat) & 0x7)]);
-  PUTM_TO_PAGE (len, page, "RX packets: %ld\n", info->stats.rx_packets);
-  PUTM_TO_PAGE (len, page, "TX packets: %ld\n", info->stats.tx_packets);
-  PUTM_TO_PAGE (len, page, "RX bytes: %ld\n", info->stats.rx_bytes);
-  PUTM_TO_PAGE (len, page, "TX bytes: %ld\n", info->stats.tx_bytes);
-  PUTM_TO_PAGE (len, page, "Signal Strength: %s\n", signal[strength]);
-  PUTM_TO_PAGE (len, page, "Signal Quality: %s\n", signal[quality]);
-
-
-
-
-  return len;
+	return len;
 }
 
 static int
-ft1000NotifyProc (struct notifier_block *this, unsigned long event, void *ptr)
+ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr)
 {
-  struct net_device *dev = ptr;
+	struct net_device *dev = ptr;
 	struct ft1000_info *info;
-  struct proc_dir_entry *ft1000_proc_file;
+	struct proc_dir_entry *ft1000_proc_file;
 
-info = (struct ft1000_info *) netdev_priv(dev);
+	info = netdev_priv(dev);
 
+	switch (event) {
+	case NETDEV_CHANGENAME:
+		remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
+		ft1000_proc_file = create_proc_read_entry(dev->name, 0644,
+					info->ft1000_proc_dir,
+					ft1000ReadProc, dev);
+		snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
+		break;
+	}
 
-  switch (event)
-    {
-    case NETDEV_CHANGENAME:
-      remove_proc_entry (info->netdevname, info->ft1000_proc_dir);
-      ft1000_proc_file = create_proc_read_entry (dev->name, 0644,
-						 info->ft1000_proc_dir,
-						 ft1000ReadProc, dev);
-      snprintf (info->netdevname, IFNAMSIZ, "%s", dev->name);
-      break;
-    }
-  return NOTIFY_DONE;
+	return NOTIFY_DONE;
 }
 
 static struct notifier_block ft1000_netdev_notifier = {
-  .notifier_call = ft1000NotifyProc
+	.notifier_call = ft1000NotifyProc,
 };
 
 
-void
-ft1000InitProc (struct net_device *dev)
+int ft1000_init_proc(struct net_device *dev)
 {
 	struct ft1000_info *info;
-  struct proc_dir_entry *ft1000_proc_file;
-	info = (struct ft1000_info *) netdev_priv(dev);
+	struct proc_dir_entry *ft1000_proc_file;
+	int ret = 0;
 
+	info = netdev_priv(dev);
 
-  info->ft1000_proc_dir = proc_mkdir (FT1000_PROC_DIR, FTNET_PROC);
-  if (info->ft1000_proc_dir == NULL)
-    {
-      remove_proc_entry (FT1000_PROC_DIR, FTNET_PROC);
-    }
+	info->ft1000_proc_dir = proc_mkdir(FT1000_PROC_DIR, FTNET_PROC);
+	if (info->ft1000_proc_dir == NULL) {
+		printk(KERN_WARNING "Unable to create %s dir.\n",
+			FT1000_PROC_DIR);
+		ret = -EINVAL;
+		goto fail;
+	}
 
+	ft1000_proc_file =
+		create_proc_read_entry(dev->name, 0644,
+			info->ft1000_proc_dir, ft1000ReadProc, dev);
 
-  ft1000_proc_file =
-    create_proc_read_entry (dev->name, 0644, info->ft1000_proc_dir,
-			    ft1000ReadProc, dev);
-  if (ft1000_proc_file == NULL)
-    {
-      remove_proc_entry (info->netdevname, info->ft1000_proc_dir);
-    }
+	if (ft1000_proc_file == NULL) {
+		printk(KERN_WARNING "Unable to create /proc entry.\n");
+		ret = -EINVAL;
+		goto fail_entry;
+	}
 
-  snprintf (info->netdevname, IFNAMSIZ, "%s", dev->name);
-  register_netdevice_notifier (&ft1000_netdev_notifier);
-  return;
+	snprintf(info->netdevname, IFNAMSIZ, "%s", dev->name);
+
+	ret = register_netdevice_notifier(&ft1000_netdev_notifier);
+	if (ret)
+		goto fail_notif;
+
+	return 0;
+
+fail_notif:
+	remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
+fail_entry:
+	remove_proc_entry(FT1000_PROC_DIR, FTNET_PROC);
+fail:
+	return ret;
 }
 
-void
-ft1000CleanupProc(struct ft1000_info *info)
+void ft1000_cleanup_proc(struct ft1000_info *info)
 {
-  remove_proc_entry (info->netdevname, info->ft1000_proc_dir);
-  remove_proc_entry (FT1000_PROC_DIR, FTNET_PROC);
-  unregister_netdevice_notifier (&ft1000_netdev_notifier);
-
-  return;
+	remove_proc_entry(info->netdevname, info->ft1000_proc_dir);
+	remove_proc_entry(FT1000_PROC_DIR, FTNET_PROC);
+	unregister_netdevice_notifier(&ft1000_netdev_notifier);
 }
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 28f55b2..79482ac 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -36,7 +36,7 @@
 
 MODULE_DEVICE_TABLE(usb, id_table);
 
-static BOOLEAN gPollingfailed = FALSE;
+static bool gPollingfailed = FALSE;
 int ft1000_poll_thread(void *arg)
 {
 	int ret = STATUS_SUCCESS;
@@ -64,7 +64,7 @@
 	int i, ret = 0, size;
 
 	struct ft1000_device *ft1000dev;
-	struct ft1000_info *pft1000info;
+	struct ft1000_info *pft1000info = NULL;
 	const struct firmware *dsp_fw;
 
 	ft1000dev = kmalloc(sizeof(struct ft1000_device), GFP_KERNEL);
@@ -84,7 +84,6 @@
 	ft1000dev->dev = dev;
 	ft1000dev->status = 0;
 	ft1000dev->net = NULL;
-	spin_lock_init(&ft1000dev->device_lock);
 	ft1000dev->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
 	ft1000dev->rx_urb = usb_alloc_urb(0, GFP_ATOMIC);
 
@@ -164,7 +163,7 @@
 	if (ret)
 		goto err_load;
 
-	pft1000info = (struct ft1000_info *) netdev_priv(ft1000dev->net);
+	pft1000info = netdev_priv(ft1000dev->net);
 
 	DEBUG("In probe: pft1000info=%p\n", pft1000info);
 	ret = dsp_reload(ft1000dev);
@@ -176,14 +175,18 @@
 	gPollingfailed = FALSE;
 	pft1000info->pPollThread =
 	    kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");
+
+	if (IS_ERR(pft1000info->pPollThread)) {
+		ret = PTR_ERR(pft1000info->pPollThread);
+		goto err_load;
+	}
+
 	msleep(500);
 
 	while (!pft1000info->CardReady) {
 		if (gPollingfailed) {
-			if (pft1000info->pPollThread)
-				kthread_stop(pft1000info->pPollThread);
 			ret = -EIO;
-			goto err_load;
+			goto err_thread;
 		}
 		msleep(100);
 		DEBUG("ft1000_probe::Waiting for Card Ready\n");
@@ -193,14 +196,21 @@
 
 	ret = reg_ft1000_netdev(ft1000dev, interface);
 	if (ret)
-		goto err_load;
+		goto err_thread;
+
+	ret = ft1000_init_proc(ft1000dev->net);
+	if (ret)
+		goto err_proc;
 
 	pft1000info->NetDevRegDone = 1;
 
-	ft1000InitProc(ft1000dev->net);
-
 	return 0;
 
+err_proc:
+	unregister_netdev(ft1000dev->net);
+	free_netdev(ft1000dev->net);
+err_thread:
+	kthread_stop(pft1000info->pPollThread);
 err_load:
 	kfree(pFileStart);
 err_fw:
@@ -218,7 +228,7 @@
 	DEBUG("In disconnect pft1000info=%p\n", pft1000info);
 
 	if (pft1000info) {
-		ft1000CleanupProc(pft1000info);
+		ft1000_cleanup_proc(pft1000info);
 		if (pft1000info->pPollThread)
 			kthread_stop(pft1000info->pPollThread);
 
@@ -226,7 +236,7 @@
 
 		if (pft1000info->pFt1000Dev->net) {
 			DEBUG("ft1000_disconnect: destroy char driver\n");
-			ft1000_DestroyDevice(pft1000info->pFt1000Dev->net);
+			ft1000_destroy_dev(pft1000info->pFt1000Dev->net);
 			unregister_netdev(pft1000info->pFt1000Dev->net);
 			DEBUG
 			    ("ft1000_disconnect: network device unregisterd\n");
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index a9d419a..a143e9c 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -98,16 +98,6 @@
 /*end of Jim*/
 #define DEBUG(args...) printk(KERN_INFO args)
 
-#define UCHAR               u8
-#define USHORT              u16
-#define ULONG               u32 /* WTF ??? */
-#define BOOLEAN             u8
-#define PULONG              u32 *
-#define PUSHORT             u16 *
-#define PUCHAR              u8 *
-#define PCHAR               u8 *
-#define UINT                u32
-
 #define FALSE           0
 #define TRUE            1
 
@@ -372,15 +362,15 @@
 
 
 
-#define ISR_EMPTY			(UCHAR)0x00 	 // no bits set in ISR
+#define ISR_EMPTY			(u8)0x00 	 // no bits set in ISR
 
-#define ISR_DOORBELL_ACK	(UCHAR)0x01		 //  the doorbell i sent has been recieved.
+#define ISR_DOORBELL_ACK	(u8)0x01		 //  the doorbell i sent has been recieved.
 
-#define ISR_DOORBELL_PEND	(UCHAR)0x02 	 //  doorbell for me
+#define ISR_DOORBELL_PEND	(u8)0x02 	 //  doorbell for me
 
-#define ISR_RCV				(UCHAR)0x04 	 // packet received with no errors
+#define ISR_RCV				(u8)0x04 	 // packet received with no errors
 
-#define ISR_WATERMARK		(UCHAR)0x08 	 //
+#define ISR_WATERMARK		(u8)0x08 	 //
 
 
 
@@ -466,12 +456,9 @@
 {
 	struct usb_device *dev;
 	struct net_device *net;
-	spinlock_t device_lock;
 
 	u32 status;
 
-	wait_queue_head_t control_wait;
-
 	struct urb *rx_urb;
 	struct urb *tx_urb;
 
@@ -486,6 +473,13 @@
 //	struct net_device_stats stats; //mbelian
 } __attribute__ ((packed));
 
+struct ft1000_debug_dirs {
+	struct list_head list;
+	struct dentry *dent;
+	struct dentry *file;
+	int int_number;
+};
+
 struct ft1000_info {
     struct ft1000_device *pFt1000Dev;
     struct net_device_stats stats;
@@ -497,9 +491,9 @@
 	unsigned char usbboot;
     unsigned short dspalive;
     u16 ASIC_ID;
-    BOOLEAN fProvComplete;
-    BOOLEAN fCondResetPend;
-    BOOLEAN fAppMsgPend;
+    bool fProvComplete;
+    bool fCondResetPend;
+    bool fAppMsgPend;
     char *pfwimg;
     int fwimgsz;
     u16 DrvErrNum;
@@ -520,7 +514,7 @@
     int NetDevRegDone;
     u8 CardNumber;
     u8 DeviceName[15];
-    int DeviceMajor;
+    struct ft1000_debug_dirs nodes;
     int registered;
     int mediastate;
     int dhcpflg;
@@ -567,26 +561,26 @@
 } __attribute__ ((packed));
 
 u16 ft1000_read_register(struct ft1000_device *ft1000dev, u16* Data, u16 nRegIndx);
-u16 ft1000_write_register(struct ft1000_device *ft1000dev, USHORT value, u16 nRegIndx);
-u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt);
-u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, USHORT cnt);
-u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer, u8 highlow);
-u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, USHORT indx, USHORT value, u8 highlow);
-u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer);
-u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, USHORT indx, PUCHAR buffer);
+u16 ft1000_write_register(struct ft1000_device *ft1000dev, u16 value, u16 nRegIndx);
+u16 ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt);
+u16 ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u16 cnt);
+u16 ft1000_read_dpram16(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer, u8 highlow);
+u16 ft1000_write_dpram16(struct ft1000_device *ft1000dev, u16 indx, u16 value, u8 highlow);
+u16 fix_ft1000_read_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer);
+u16 fix_ft1000_write_dpram32(struct ft1000_device *ft1000dev, u16 indx, u8 *buffer);
 
 extern void *pFileStart;
 extern size_t FileLength;
 extern int numofmsgbuf;
 
 int ft1000_close (struct net_device *dev);
-u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, ULONG  FileLength);
+u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, u32  FileLength);
 
 extern struct list_head freercvpool;
 extern spinlock_t free_buff_lock;   // lock to arbitrate free buffer list for receive command data
 
-int ft1000_CreateDevice(struct ft1000_device *dev);
-void ft1000_DestroyDevice(struct net_device *dev);
+int ft1000_create_dev(struct ft1000_device *dev);
+void ft1000_destroy_dev(struct net_device *dev);
 extern void CardSendCommand(struct ft1000_device *ft1000dev, void *ptempbuffer, int size);
 
 struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
@@ -600,8 +594,8 @@
 int reg_ft1000_netdev(struct ft1000_device *ft1000dev, struct usb_interface *intf);
 int ft1000_poll(void* dev_id);
 
-void ft1000InitProc(struct net_device *dev);
-void ft1000CleanupProc(struct ft1000_info *info);
+int ft1000_init_proc(struct net_device *dev);
+void ft1000_cleanup_proc(struct ft1000_info *info);
 
 
 
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index 48d4e48..6c9279a 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -624,7 +624,7 @@
 	go->dvd_mode = 0;
 	go->interlace_coding = 0;
 	for (i = 0; i < 4; ++i)
-		go->modet[i].enable = 0;;
+		go->modet[i].enable = 0;
 	for (i = 0; i < 1624; ++i)
 		go->modet_map[i] = 0;
 	go->audio_deliver = NULL;
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index b46349b..acd39bd 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_HYPERV_UTILS)	+= hv_utils.o
 
 hv_vmbus-y := vmbus_drv.o osd.o \
-		 vmbus.o hv.o connection.o channel.o \
+		 hv.o connection.o channel.o \
 		 channel_mgmt.o ring_buffer.o
 hv_storvsc-y := storvsc_drv.o storvsc.o
 hv_blkvsc-y := blkvsc_drv.o blkvsc.o
diff --git a/drivers/staging/hv/blkvsc.c b/drivers/staging/hv/blkvsc.c
index d5b0abd..bc16d91 100644
--- a/drivers/staging/hv/blkvsc.c
+++ b/drivers/staging/hv/blkvsc.c
@@ -25,24 +25,24 @@
 #include "osd.h"
 #include "storvsc.c"
 
-static const char *gBlkDriverName = "blkvsc";
+static const char *g_blk_driver_name = "blkvsc";
 
 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
-static const struct hv_guid gBlkVscDeviceType = {
+static const struct hv_guid g_blk_device_type = {
 	.data = {
 		0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
 		0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
 	}
 };
 
-static int BlkVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
+static int blk_vsc_on_device_add(struct hv_device *device, void *additional_info)
 {
-	struct storvsc_device_info *deviceInfo;
+	struct storvsc_device_info *device_info;
 	int ret = 0;
 
-	deviceInfo = (struct storvsc_device_info *)AdditionalInfo;
+	device_info = (struct storvsc_device_info *)additional_info;
 
-	ret = StorVscOnDeviceAdd(Device, AdditionalInfo);
+	ret = stor_vsc_on_device_add(device, additional_info);
 	if (ret != 0)
 		return ret;
 
@@ -51,31 +51,31 @@
 	 * id. For IDE devices, the device instance id is formatted as
 	 * <bus id> * - <device id> - 8899 - 000000000000.
 	 */
-	deviceInfo->PathId = Device->deviceInstance.data[3] << 24 |
-			     Device->deviceInstance.data[2] << 16 |
-			     Device->deviceInstance.data[1] << 8  |
-			     Device->deviceInstance.data[0];
+	device_info->path_id = device->deviceInstance.data[3] << 24 |
+			     device->deviceInstance.data[2] << 16 |
+			     device->deviceInstance.data[1] << 8  |
+			     device->deviceInstance.data[0];
 
-	deviceInfo->TargetId = Device->deviceInstance.data[5] << 8 |
-			       Device->deviceInstance.data[4];
+	device_info->target_id = device->deviceInstance.data[5] << 8 |
+			       device->deviceInstance.data[4];
 
 	return ret;
 }
 
-int BlkVscInitialize(struct hv_driver *Driver)
+int blk_vsc_initialize(struct hv_driver *driver)
 {
-	struct storvsc_driver_object *storDriver;
+	struct storvsc_driver_object *stor_driver;
 	int ret = 0;
 
-	storDriver = (struct storvsc_driver_object *)Driver;
+	stor_driver = (struct storvsc_driver_object *)driver;
 
 	/* Make sure we are at least 2 pages since 1 page is used for control */
-	/* ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1)); */
+	/* ASSERT(stor_driver->RingBufferSize >= (PAGE_SIZE << 1)); */
 
-	Driver->name = gBlkDriverName;
-	memcpy(&Driver->deviceType, &gBlkVscDeviceType, sizeof(struct hv_guid));
+	driver->name = g_blk_driver_name;
+	memcpy(&driver->deviceType, &g_blk_device_type, sizeof(struct hv_guid));
 
-	storDriver->RequestExtSize = sizeof(struct storvsc_request_extension);
+	stor_driver->request_ext_size = sizeof(struct storvsc_request_extension);
 
 	/*
 	 * Divide the ring buffer data size (which is 1 page less than the ring
@@ -83,20 +83,20 @@
 	 * by the max request size (which is
 	 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
 	 */
-	storDriver->MaxOutstandingRequestsPerChannel =
-		((storDriver->RingBufferSize - PAGE_SIZE) /
+	stor_driver->max_outstanding_req_per_channel =
+		((stor_driver->ring_buffer_size - PAGE_SIZE) /
 		  ALIGN_UP(MAX_MULTIPAGE_BUFFER_PACKET +
 			   sizeof(struct vstor_packet) + sizeof(u64),
 			   sizeof(u64)));
 
 	DPRINT_INFO(BLKVSC, "max io outstd %u",
-		    storDriver->MaxOutstandingRequestsPerChannel);
+		    stor_driver->max_outstanding_req_per_channel);
 
 	/* Setup the dispatch table */
-	storDriver->Base.OnDeviceAdd = BlkVscOnDeviceAdd;
-	storDriver->Base.OnDeviceRemove = StorVscOnDeviceRemove;
-	storDriver->Base.OnCleanup = StorVscOnCleanup;
-	storDriver->OnIORequest	= StorVscOnIORequest;
+	stor_driver->base.OnDeviceAdd = blk_vsc_on_device_add;
+	stor_driver->base.OnDeviceRemove = stor_vsc_on_device_remove;
+	stor_driver->base.OnCleanup = stor_vsc_on_cleanup;
+	stor_driver->on_io_request = stor_vsc_on_io_request;
 
 	return ret;
 }
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 3f81ca5..b3d05fc 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -177,13 +177,13 @@
 	struct driver_context *drv_ctx = &g_blkvsc_drv.drv_ctx;
 	int ret;
 
-	storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
+	storvsc_drv_obj->ring_buffer_size = blkvsc_ringbuffer_size;
 
 	/* Callback to client driver to complete the initialization */
-	drv_init(&storvsc_drv_obj->Base);
+	drv_init(&storvsc_drv_obj->base);
 
-	drv_ctx->driver.name = storvsc_drv_obj->Base.name;
-	memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
+	drv_ctx->driver.name = storvsc_drv_obj->base.name;
+	memcpy(&drv_ctx->class_id, &storvsc_drv_obj->base.deviceType,
 	       sizeof(struct hv_guid));
 
 	drv_ctx->probe = blkvsc_probe;
@@ -230,8 +230,8 @@
 		device_unregister(current_dev);
 	}
 
-	if (storvsc_drv_obj->Base.OnCleanup)
-		storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
+	if (storvsc_drv_obj->base.OnCleanup)
+		storvsc_drv_obj->base.OnCleanup(&storvsc_drv_obj->base);
 
 	vmbus_child_driver_unregister(drv_ctx);
 
@@ -262,7 +262,7 @@
 
 	DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
 
-	if (!storvsc_drv_obj->Base.OnDeviceAdd) {
+	if (!storvsc_drv_obj->base.OnDeviceAdd) {
 		DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
 		ret = -1;
 		goto Cleanup;
@@ -284,7 +284,7 @@
 
 	blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
 					sizeof(struct blkvsc_request) +
-					storvsc_drv_obj->RequestExtSize, 0,
+					storvsc_drv_obj->request_ext_size, 0,
 					SLAB_HWCACHE_ALIGN, NULL);
 	if (!blkdev->request_pool) {
 		ret = -ENOMEM;
@@ -293,7 +293,7 @@
 
 
 	/* Call to the vsc driver to add the device */
-	ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
+	ret = storvsc_drv_obj->base.OnDeviceAdd(device_obj, &device_info);
 	if (ret != 0) {
 		DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
 		goto Cleanup;
@@ -301,9 +301,9 @@
 
 	blkdev->device_ctx = device_ctx;
 	/* this identified the device 0 or 1 */
-	blkdev->target = device_info.TargetId;
+	blkdev->target = device_info.target_id;
 	/* this identified the ide ctrl 0 or 1 */
-	blkdev->path = device_info.PathId;
+	blkdev->path = device_info.path_id;
 
 	dev_set_drvdata(device, blkdev);
 
@@ -391,7 +391,7 @@
 	return ret;
 
 Remove:
-	storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+	storvsc_drv_obj->base.OnDeviceRemove(device_obj);
 
 Cleanup:
 	if (blkdev) {
@@ -459,9 +459,9 @@
 	blkvsc_req->req = NULL;
 	blkvsc_req->write = 0;
 
-	blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
-	blkvsc_req->request.DataBuffer.Offset = 0;
-	blkvsc_req->request.DataBuffer.Length = 0;
+	blkvsc_req->request.data_buffer.PfnArray[0] = 0;
+	blkvsc_req->request.data_buffer.Offset = 0;
+	blkvsc_req->request.data_buffer.Length = 0;
 
 	blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
 	blkvsc_req->cmd_len = 10;
@@ -506,9 +506,9 @@
 	blkvsc_req->req = NULL;
 	blkvsc_req->write = 0;
 
-	blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
-	blkvsc_req->request.DataBuffer.Offset = 0;
-	blkvsc_req->request.DataBuffer.Length = 64;
+	blkvsc_req->request.data_buffer.PfnArray[0] = page_to_pfn(page_buf);
+	blkvsc_req->request.data_buffer.Offset = 0;
+	blkvsc_req->request.data_buffer.Length = 64;
 
 	blkvsc_req->cmnd[0] = INQUIRY;
 	blkvsc_req->cmnd[1] = 0x1;		/* Get product data */
@@ -593,9 +593,9 @@
 	blkvsc_req->req = NULL;
 	blkvsc_req->write = 0;
 
-	blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
-	blkvsc_req->request.DataBuffer.Offset = 0;
-	blkvsc_req->request.DataBuffer.Length = 8;
+	blkvsc_req->request.data_buffer.PfnArray[0] = page_to_pfn(page_buf);
+	blkvsc_req->request.data_buffer.Offset = 0;
+	blkvsc_req->request.data_buffer.Length = 8;
 
 	blkvsc_req->cmnd[0] = READ_CAPACITY;
 	blkvsc_req->cmd_len = 16;
@@ -614,7 +614,7 @@
 	wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
 
 	/* check error */
-	if (blkvsc_req->request.Status) {
+	if (blkvsc_req->request.status) {
 		scsi_normalize_sense(blkvsc_req->sense_buffer,
 				     SCSI_SENSE_BUFFERSIZE, &sense_hdr);
 
@@ -670,9 +670,9 @@
 	blkvsc_req->req = NULL;
 	blkvsc_req->write = 0;
 
-	blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
-	blkvsc_req->request.DataBuffer.Offset = 0;
-	blkvsc_req->request.DataBuffer.Length = 12;
+	blkvsc_req->request.data_buffer.PfnArray[0] = page_to_pfn(page_buf);
+	blkvsc_req->request.data_buffer.Offset = 0;
+	blkvsc_req->request.data_buffer.Length = 12;
 
 	blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */
 	blkvsc_req->cmd_len = 16;
@@ -691,7 +691,7 @@
 	wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
 
 	/* check error */
-	if (blkvsc_req->request.Status) {
+	if (blkvsc_req->request.status) {
 		scsi_normalize_sense(blkvsc_req->sense_buffer,
 				     SCSI_SENSE_BUFFERSIZE, &sense_hdr);
 		if (sense_hdr.asc == 0x3A) {
@@ -741,14 +741,14 @@
 
 	DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
 
-	if (!storvsc_drv_obj->Base.OnDeviceRemove)
+	if (!storvsc_drv_obj->base.OnDeviceRemove)
 		return -1;
 
 	/*
 	 * Call to the vsc driver to let it know that the device is being
 	 * removed
 	 */
-	ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+	ret = storvsc_drv_obj->base.OnDeviceRemove(device_obj);
 	if (ret != 0) {
 		/* TODO: */
 		DPRINT_ERR(BLKVSC_DRV,
@@ -865,38 +865,38 @@
 		   (blkvsc_req->write) ? "WRITE" : "READ",
 		   (unsigned long) blkvsc_req->sector_start,
 		   blkvsc_req->sector_count,
-		   blkvsc_req->request.DataBuffer.Offset,
-		   blkvsc_req->request.DataBuffer.Length);
+		   blkvsc_req->request.data_buffer.Offset,
+		   blkvsc_req->request.data_buffer.Length);
 #if 0
-	for (i = 0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++) {
+	for (i = 0; i < (blkvsc_req->request.data_buffer.Length >> 12); i++) {
 		DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
 			   "req %p pfn[%d] %llx\n",
 			   blkvsc_req, i,
-			   blkvsc_req->request.DataBuffer.PfnArray[i]);
+			   blkvsc_req->request.data_buffer.PfnArray[i]);
 	}
 #endif
 
 	storvsc_req = &blkvsc_req->request;
-	storvsc_req->Extension = (void *)((unsigned long)blkvsc_req +
+	storvsc_req->extension = (void *)((unsigned long)blkvsc_req +
 					  sizeof(struct blkvsc_request));
 
-	storvsc_req->Type = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
+	storvsc_req->type = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
 
-	storvsc_req->OnIOCompletion = request_completion;
-	storvsc_req->Context = blkvsc_req;
+	storvsc_req->on_io_completion = request_completion;
+	storvsc_req->context = blkvsc_req;
 
-	storvsc_req->Host = blkdev->port;
-	storvsc_req->Bus = blkdev->path;
-	storvsc_req->TargetId = blkdev->target;
-	storvsc_req->LunId = 0;	 /* this is not really used at all */
+	storvsc_req->host = blkdev->port;
+	storvsc_req->bus = blkdev->path;
+	storvsc_req->target_id = blkdev->target;
+	storvsc_req->lun_id = 0;	 /* this is not really used at all */
 
-	storvsc_req->CdbLen = blkvsc_req->cmd_len;
-	storvsc_req->Cdb = blkvsc_req->cmnd;
+	storvsc_req->cdb_len = blkvsc_req->cmd_len;
+	storvsc_req->cdb = blkvsc_req->cmnd;
 
-	storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
-	storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
+	storvsc_req->sense_buffer = blkvsc_req->sense_buffer;
+	storvsc_req->sense_buffer_size = SCSI_SENSE_BUFFERSIZE;
 
-	ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj,
+	ret = storvsc_drv_obj->on_io_request(&blkdev->device_ctx->device_obj,
 					   &blkvsc_req->request);
 	if (ret == 0)
 		blkdev->num_outstanding_reqs++;
@@ -992,8 +992,10 @@
 
 					blkvsc_req->dev = blkdev;
 					blkvsc_req->req = req;
-					blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
-					blkvsc_req->request.DataBuffer.Length = 0;
+					blkvsc_req->request.data_buffer.Offset
+						= bvec->bv_offset;
+					blkvsc_req->request.data_buffer.Length
+						= 0;
 
 					/* Add to the group */
 					blkvsc_req->group = group;
@@ -1007,8 +1009,11 @@
 				}
 
 				/* Add the curr bvec/segment to the curr blkvsc_req */
-				blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
-				blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
+				blkvsc_req->request.data_buffer.
+					PfnArray[databuf_idx]
+						= page_to_pfn(bvec->bv_page);
+				blkvsc_req->request.data_buffer.Length
+					+= bvec->bv_len;
 
 				prev_bvec = bvec;
 
@@ -1073,7 +1078,7 @@
 static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
 {
 	struct blkvsc_request *blkvsc_req =
-			(struct blkvsc_request *)request->Context;
+			(struct blkvsc_request *)request->context;
 	struct block_device_context *blkdev =
 			(struct block_device_context *)blkvsc_req->dev;
 	struct scsi_sense_hdr sense_hdr;
@@ -1083,7 +1088,7 @@
 
 	blkdev->num_outstanding_reqs--;
 
-	if (blkvsc_req->request.Status)
+	if (blkvsc_req->request.status)
 		if (scsi_normalize_sense(blkvsc_req->sense_buffer,
 					 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
 			scsi_print_sense_hdr("blkvsc", &sense_hdr);
@@ -1095,7 +1100,7 @@
 static void blkvsc_request_completion(struct hv_storvsc_request *request)
 {
 	struct blkvsc_request *blkvsc_req =
-			(struct blkvsc_request *)request->Context;
+			(struct blkvsc_request *)request->context;
 	struct block_device_context *blkdev =
 			(struct block_device_context *)blkvsc_req->dev;
 	unsigned long flags;
@@ -1110,7 +1115,7 @@
 		   (blkvsc_req->write) ? "WRITE" : "READ",
 		   (unsigned long)blkvsc_req->sector_start,
 		   blkvsc_req->sector_count,
-		   blkvsc_req->request.DataBuffer.Length,
+		   blkvsc_req->request.data_buffer.Length,
 		   blkvsc_req->group->outstanding,
 		   blkdev->num_outstanding_reqs);
 
@@ -1137,7 +1142,7 @@
 			list_del(&comp_req->req_entry);
 
 			if (!__blk_end_request(comp_req->req,
-				(!comp_req->request.Status ? 0 : -EIO),
+				(!comp_req->request.status ? 0 : -EIO),
 				comp_req->sector_count * blkdev->sector_size)) {
 				/*
 				 * All the sectors have been xferred ie the
@@ -1195,7 +1200,7 @@
 
 			if (comp_req->req) {
 				ret = __blk_end_request(comp_req->req,
-					(!comp_req->request.Status ? 0 : -EIO),
+					(!comp_req->request.status ? 0 : -EIO),
 					comp_req->sector_count *
 					blkdev->sector_size);
 
@@ -1482,7 +1487,7 @@
 
 	DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
 
-	ret = blkvsc_drv_init(BlkVscInitialize);
+	ret = blkvsc_drv_init(blk_vsc_initialize);
 
 	return ret;
 }
diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
index 26ebc77..45a627d 100644
--- a/drivers/staging/hv/channel.c
+++ b/drivers/staging/hv/channel.c
@@ -43,24 +43,24 @@
 	int j = 0;
 
 	DPRINT_DBG(VMBUS, "monitorPage - %p, trigger state - %d",
-		   MonitorPage, MonitorPage->TriggerState);
+		   MonitorPage, MonitorPage->trigger_state);
 
 	for (i = 0; i < 4; i++)
 		DPRINT_DBG(VMBUS, "trigger group (%d) - %llx", i,
-			   MonitorPage->TriggerGroup[i].AsUINT64);
+			   MonitorPage->trigger_group[i].as_uint64);
 
 	for (i = 0; i < 4; i++) {
 		for (j = 0; j < 32; j++) {
 			DPRINT_DBG(VMBUS, "latency (%d)(%d) - %llx", i, j,
-				   MonitorPage->Latency[i][j]);
+				   MonitorPage->latency[i][j]);
 		}
 	}
 	for (i = 0; i < 4; i++) {
 		for (j = 0; j < 32; j++) {
 			DPRINT_DBG(VMBUS, "param-conn id (%d)(%d) - %d", i, j,
-			       MonitorPage->Parameter[i][j].ConnectionId.Asu32);
+			       MonitorPage->parameter[i][j].connectionid.asu32);
 			DPRINT_DBG(VMBUS, "param-flag (%d)(%d) - %d", i, j,
-				MonitorPage->Parameter[i][j].FlagNumber);
+				MonitorPage->parameter[i][j].flag_number);
 		}
 	}
 }
@@ -74,21 +74,21 @@
 {
 	struct hv_monitor_page *monitorpage;
 
-	if (channel->OfferMsg.MonitorAllocated) {
+	if (channel->offermsg.monitor_allocated) {
 		/* Each u32 represents 32 channels */
-		set_bit(channel->OfferMsg.ChildRelId & 31,
+		set_bit(channel->offermsg.child_relid & 31,
 			(unsigned long *) gVmbusConnection.SendInterruptPage +
-			(channel->OfferMsg.ChildRelId >> 5));
+			(channel->offermsg.child_relid >> 5));
 
 		monitorpage = gVmbusConnection.MonitorPages;
 		monitorpage++; /* Get the child to parent monitor page */
 
-		set_bit(channel->MonitorBit,
-			(unsigned long *)&monitorpage->TriggerGroup
-					[channel->MonitorGroup].Pending);
+		set_bit(channel->monitor_bit,
+			(unsigned long *)&monitorpage->trigger_group
+					[channel->monitor_grp].pending);
 
 	} else {
-		VmbusSetEvent(channel->OfferMsg.ChildRelId);
+		VmbusSetEvent(channel->offermsg.child_relid);
 	}
 }
 
@@ -97,19 +97,19 @@
 {
 	struct hv_monitor_page *monitorPage;
 
-	if (Channel->OfferMsg.MonitorAllocated) {
+	if (Channel->offermsg.monitor_allocated) {
 		/* Each u32 represents 32 channels */
-		clear_bit(Channel->OfferMsg.ChildRelId & 31,
+		clear_bit(Channel->offermsg.child_relid & 31,
 			  (unsigned long *)gVmbusConnection.SendInterruptPage +
-			  (Channel->OfferMsg.ChildRelId >> 5));
+			  (Channel->offermsg.child_relid >> 5));
 
 		monitorPage =
 			(struct hv_monitor_page *)gVmbusConnection.MonitorPages;
 		monitorPage++; /* Get the child to parent monitor page */
 
-		clear_bit(Channel->MonitorBit,
-			  (unsigned long *)&monitorPage->TriggerGroup
-					[Channel->MonitorGroup].Pending);
+		clear_bit(Channel->monitor_bit,
+			  (unsigned long *)&monitorPage->trigger_group
+					[Channel->monitor_grp].Pending);
 	}
 }
 
@@ -121,42 +121,42 @@
 			      struct vmbus_channel_debug_info *debuginfo)
 {
 	struct hv_monitor_page *monitorpage;
-	u8 monitor_group = (u8)channel->OfferMsg.MonitorId / 32;
-	u8 monitor_offset = (u8)channel->OfferMsg.MonitorId % 32;
+	u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
+	u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
 	/* u32 monitorBit	= 1 << monitorOffset; */
 
-	debuginfo->RelId = channel->OfferMsg.ChildRelId;
-	debuginfo->State = channel->State;
-	memcpy(&debuginfo->InterfaceType,
-	       &channel->OfferMsg.Offer.InterfaceType, sizeof(struct hv_guid));
-	memcpy(&debuginfo->InterfaceInstance,
-	       &channel->OfferMsg.Offer.InterfaceInstance,
+	debuginfo->relid = channel->offermsg.child_relid;
+	debuginfo->state = channel->state;
+	memcpy(&debuginfo->interfacetype,
+	       &channel->offermsg.offer.InterfaceType, sizeof(struct hv_guid));
+	memcpy(&debuginfo->interface_instance,
+	       &channel->offermsg.offer.InterfaceInstance,
 	       sizeof(struct hv_guid));
 
 	monitorpage = (struct hv_monitor_page *)gVmbusConnection.MonitorPages;
 
-	debuginfo->MonitorId = channel->OfferMsg.MonitorId;
+	debuginfo->monitorid = channel->offermsg.monitorid;
 
-	debuginfo->ServerMonitorPending =
-			monitorpage->TriggerGroup[monitor_group].Pending;
-	debuginfo->ServerMonitorLatency =
-			monitorpage->Latency[monitor_group][monitor_offset];
-	debuginfo->ServerMonitorConnectionId =
-			monitorpage->Parameter[monitor_group]
-					[monitor_offset].ConnectionId.u.Id;
+	debuginfo->servermonitor_pending =
+			monitorpage->trigger_group[monitor_group].pending;
+	debuginfo->servermonitor_latency =
+			monitorpage->latency[monitor_group][monitor_offset];
+	debuginfo->servermonitor_connectionid =
+			monitorpage->parameter[monitor_group]
+					[monitor_offset].connectionid.u.id;
 
 	monitorpage++;
 
-	debuginfo->ClientMonitorPending =
-			monitorpage->TriggerGroup[monitor_group].Pending;
-	debuginfo->ClientMonitorLatency =
-			monitorpage->Latency[monitor_group][monitor_offset];
-	debuginfo->ClientMonitorConnectionId =
-			monitorpage->Parameter[monitor_group]
-					[monitor_offset].ConnectionId.u.Id;
+	debuginfo->clientmonitor_pending =
+			monitorpage->trigger_group[monitor_group].pending;
+	debuginfo->clientmonitor_latency =
+			monitorpage->latency[monitor_group][monitor_offset];
+	debuginfo->clientmonitor_connectionid =
+			monitorpage->parameter[monitor_group]
+					[monitor_offset].connectionid.u.id;
 
-	RingBufferGetDebugInfo(&channel->Inbound, &debuginfo->Inbound);
-	RingBufferGetDebugInfo(&channel->Outbound, &debuginfo->Outbound);
+	ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
+	ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
 }
 
 /*
@@ -176,11 +176,11 @@
 	/* ASSERT(!(SendRingBufferSize & (PAGE_SIZE - 1))); */
 	/* ASSERT(!(RecvRingBufferSize & (PAGE_SIZE - 1))); */
 
-	newchannel->OnChannelCallback = onchannelcallback;
-	newchannel->ChannelCallbackContext = context;
+	newchannel->onchannel_callback = onchannelcallback;
+	newchannel->channel_callback_context = context;
 
 	/* Allocate the ring buffer */
-	out = osd_PageAlloc((send_ringbuffer_size + recv_ringbuffer_size)
+	out = osd_page_alloc((send_ringbuffer_size + recv_ringbuffer_size)
 			     >> PAGE_SHIFT);
 	if (!out)
 		return -ENOMEM;
@@ -189,17 +189,17 @@
 
 	in = (void *)((unsigned long)out + send_ringbuffer_size);
 
-	newchannel->RingBufferPages = out;
-	newchannel->RingBufferPageCount = (send_ringbuffer_size +
+	newchannel->ringbuffer_pages = out;
+	newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
 					   recv_ringbuffer_size) >> PAGE_SHIFT;
 
-	ret = RingBufferInit(&newchannel->Outbound, out, send_ringbuffer_size);
+	ret = ringbuffer_init(&newchannel->outbound, out, send_ringbuffer_size);
 	if (ret != 0) {
 		err = ret;
 		goto errorout;
 	}
 
-	ret = RingBufferInit(&newchannel->Inbound, in, recv_ringbuffer_size);
+	ret = ringbuffer_init(&newchannel->inbound, in, recv_ringbuffer_size);
 	if (ret != 0) {
 		err = ret;
 		goto errorout;
@@ -210,13 +210,13 @@
 	DPRINT_DBG(VMBUS, "Establishing ring buffer's gpadl for channel %p...",
 		   newchannel);
 
-	newchannel->RingBufferGpadlHandle = 0;
+	newchannel->ringbuffer_gpadlhandle = 0;
 
 	ret = vmbus_establish_gpadl(newchannel,
-					 newchannel->Outbound.RingBuffer,
+					 newchannel->outbound.ring_buffer,
 					 send_ringbuffer_size +
 					 recv_ringbuffer_size,
-					 &newchannel->RingBufferGpadlHandle);
+					 &newchannel->ringbuffer_gpadlhandle);
 
 	if (ret != 0) {
 		err = ret;
@@ -225,12 +225,12 @@
 
 	DPRINT_DBG(VMBUS, "channel %p <relid %d gpadl 0x%x send ring %p "
 		   "size %d recv ring %p size %d, downstreamoffset %d>",
-		   newchannel, newchannel->OfferMsg.ChildRelId,
-		   newchannel->RingBufferGpadlHandle,
-		   newchannel->Outbound.RingBuffer,
-		   newchannel->Outbound.RingSize,
-		   newchannel->Inbound.RingBuffer,
-		   newchannel->Inbound.RingSize,
+		   newchannel, newchannel->offermsg.child_relid,
+		   newchannel->ringbuffer_gpadlhandle,
+		   newchannel->outbound.ring_buffer,
+		   newchannel->outbound.ring_size,
+		   newchannel->inbound.ring_buffer,
+		   newchannel->inbound.ring_size,
 		   send_ringbuffer_size);
 
 	/* Create and init the channel open message */
@@ -242,20 +242,20 @@
 		goto errorout;
 	}
 
-	openInfo->WaitEvent = osd_WaitEventCreate();
-	if (!openInfo->WaitEvent) {
+	openInfo->waitevent = osd_waitevent_create();
+	if (!openInfo->waitevent) {
 		err = -ENOMEM;
 		goto errorout;
 	}
 
-	openMsg = (struct vmbus_channel_open_channel *)openInfo->Msg;
-	openMsg->Header.MessageType = ChannelMessageOpenChannel;
-	openMsg->OpenId = newchannel->OfferMsg.ChildRelId; /* FIXME */
-	openMsg->ChildRelId = newchannel->OfferMsg.ChildRelId;
-	openMsg->RingBufferGpadlHandle = newchannel->RingBufferGpadlHandle;
-	openMsg->DownstreamRingBufferPageOffset = send_ringbuffer_size >>
+	openMsg = (struct vmbus_channel_open_channel *)openInfo->msg;
+	openMsg->header.msgtype = CHANNELMSG_OPENCHANNEL;
+	openMsg->openid = newchannel->offermsg.child_relid; /* FIXME */
+	openMsg->child_relid = newchannel->offermsg.child_relid;
+	openMsg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
+	openMsg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
 						  PAGE_SHIFT;
-	openMsg->ServerContextAreaGpadlHandle = 0; /* TODO */
+	openMsg->server_contextarea_gpadlhandle = 0; /* TODO */
 
 	if (userdatalen > MAX_USER_DEFINED_BYTES) {
 		err = -EINVAL;
@@ -263,10 +263,10 @@
 	}
 
 	if (userdatalen)
-		memcpy(openMsg->UserData, userdata, userdatalen);
+		memcpy(openMsg->userdata, userdata, userdatalen);
 
 	spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
-	list_add_tail(&openInfo->MsgListEntry,
+	list_add_tail(&openInfo->msglistentry,
 		      &gVmbusConnection.ChannelMsgList);
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
 
@@ -280,27 +280,27 @@
 	}
 
 	/* FIXME: Need to time-out here */
-	osd_WaitEventWait(openInfo->WaitEvent);
+	osd_waitevent_wait(openInfo->waitevent);
 
-	if (openInfo->Response.OpenResult.Status == 0)
+	if (openInfo->response.open_result.status == 0)
 		DPRINT_INFO(VMBUS, "channel <%p> open success!!", newchannel);
 	else
 		DPRINT_INFO(VMBUS, "channel <%p> open failed - %d!!",
-			    newchannel, openInfo->Response.OpenResult.Status);
+			    newchannel, openInfo->response.open_result.status);
 
 Cleanup:
 	spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
-	list_del(&openInfo->MsgListEntry);
+	list_del(&openInfo->msglistentry);
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
 
-	kfree(openInfo->WaitEvent);
+	kfree(openInfo->waitevent);
 	kfree(openInfo);
 	return 0;
 
 errorout:
-	RingBufferCleanup(&newchannel->Outbound);
-	RingBufferCleanup(&newchannel->Inbound);
-	osd_PageFree(out, (send_ringbuffer_size + recv_ringbuffer_size)
+	ringbuffer_cleanup(&newchannel->outbound);
+	ringbuffer_cleanup(&newchannel->inbound);
+	osd_page_free(out, (send_ringbuffer_size + recv_ringbuffer_size)
 		     >> PAGE_SHIFT);
 	kfree(openInfo);
 	return err;
@@ -322,7 +322,7 @@
 
 	for (i = 0; i < pfncount; i++)
 		DPRINT_DBG(VMBUS, "gpadl body  - %d) pfn %llu",
-			   i, gpadl->Pfn[i]);
+			   i, gpadl->pfn[i]);
 }
 
 /*
@@ -336,18 +336,18 @@
 
 	DPRINT_DBG(VMBUS,
 		   "gpadl header - relid %d, range count %d, range buflen %d",
-		   gpadl->ChildRelId, gpadl->RangeCount, gpadl->RangeBufLen);
-	for (i = 0; i < gpadl->RangeCount; i++) {
-		pagecount = gpadl->Range[i].ByteCount >> PAGE_SHIFT;
+		   gpadl->child_relid, gpadl->rangecount, gpadl->range_buflen);
+	for (i = 0; i < gpadl->rangecount; i++) {
+		pagecount = gpadl->range[i].ByteCount >> PAGE_SHIFT;
 		pagecount = (pagecount > 26) ? 26 : pagecount;
 
 		DPRINT_DBG(VMBUS, "gpadl range %d - len %d offset %d "
-			   "page count %d", i, gpadl->Range[i].ByteCount,
-			   gpadl->Range[i].ByteOffset, pagecount);
+			   "page count %d", i, gpadl->range[i].ByteCount,
+			   gpadl->range[i].ByteOffset, pagecount);
 
 		for (j = 0; j < pagecount; j++)
 			DPRINT_DBG(VMBUS, "%d) pfn %llu", j,
-				   gpadl->Range[i].PfnArray[j]);
+				   gpadl->range[i].PfnArray[j]);
 	}
 }
 
@@ -391,18 +391,18 @@
 		if (!msgheader)
 			goto nomem;
 
-		INIT_LIST_HEAD(&msgheader->SubMsgList);
-		msgheader->MessageSize = msgsize;
+		INIT_LIST_HEAD(&msgheader->submsglist);
+		msgheader->msgsize = msgsize;
 
 		gpadl_header = (struct vmbus_channel_gpadl_header *)
-			msgheader->Msg;
-		gpadl_header->RangeCount = 1;
-		gpadl_header->RangeBufLen = sizeof(struct gpa_range) +
+			msgheader->msg;
+		gpadl_header->rangecount = 1;
+		gpadl_header->range_buflen = sizeof(struct gpa_range) +
 					 pagecount * sizeof(u64);
-		gpadl_header->Range[0].ByteOffset = 0;
-		gpadl_header->Range[0].ByteCount = size;
+		gpadl_header->range[0].ByteOffset = 0;
+		gpadl_header->range[0].ByteCount = size;
 		for (i = 0; i < pfncount; i++)
-			gpadl_header->Range[0].PfnArray[i] = pfn+i;
+			gpadl_header->range[0].PfnArray[i] = pfn+i;
 		*msginfo = msgheader;
 		*messagecount = 1;
 
@@ -428,10 +428,10 @@
 			/* FIXME: we probably need to more if this fails */
 			if (!msgbody)
 				goto nomem;
-			msgbody->MessageSize = msgsize;
+			msgbody->msgsize = msgsize;
 			(*messagecount)++;
 			gpadl_body =
-				(struct vmbus_channel_gpadl_body *)msgbody->Msg;
+				(struct vmbus_channel_gpadl_body *)msgbody->msg;
 
 			/*
 			 * FIXME:
@@ -440,11 +440,11 @@
 			 */
 			/* gpadl_body->Gpadl = kbuffer; */
 			for (i = 0; i < pfncurr; i++)
-				gpadl_body->Pfn[i] = pfn + pfnsum + i;
+				gpadl_body->pfn[i] = pfn + pfnsum + i;
 
 			/* add to msg header */
-			list_add_tail(&msgbody->MsgListEntry,
-				      &msgheader->SubMsgList);
+			list_add_tail(&msgbody->msglistentry,
+				      &msgheader->submsglist);
 			pfnsum += pfncurr;
 			pfnleft -= pfncurr;
 		}
@@ -456,17 +456,17 @@
 		msgheader = kzalloc(msgsize, GFP_KERNEL);
 		if (msgheader == NULL)
 			goto nomem;
-		msgheader->MessageSize = msgsize;
+		msgheader->msgsize = msgsize;
 
 		gpadl_header = (struct vmbus_channel_gpadl_header *)
-			msgheader->Msg;
-		gpadl_header->RangeCount = 1;
-		gpadl_header->RangeBufLen = sizeof(struct gpa_range) +
+			msgheader->msg;
+		gpadl_header->rangecount = 1;
+		gpadl_header->range_buflen = sizeof(struct gpa_range) +
 					 pagecount * sizeof(u64);
-		gpadl_header->Range[0].ByteOffset = 0;
-		gpadl_header->Range[0].ByteCount = size;
+		gpadl_header->range[0].ByteOffset = 0;
+		gpadl_header->range[0].ByteCount = size;
 		for (i = 0; i < pagecount; i++)
-			gpadl_header->Range[0].PfnArray[i] = pfn+i;
+			gpadl_header->range[0].PfnArray[i] = pfn+i;
 
 		*msginfo = msgheader;
 		*messagecount = 1;
@@ -508,21 +508,21 @@
 	if (ret)
 		return ret;
 
-	msginfo->WaitEvent = osd_WaitEventCreate();
-	if (!msginfo->WaitEvent) {
+	msginfo->waitevent = osd_waitevent_create();
+	if (!msginfo->waitevent) {
 		ret = -ENOMEM;
 		goto Cleanup;
 	}
 
-	gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->Msg;
-	gpadlmsg->Header.MessageType = ChannelMessageGpadlHeader;
-	gpadlmsg->ChildRelId = channel->OfferMsg.ChildRelId;
-	gpadlmsg->Gpadl = next_gpadl_handle;
+	gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
+	gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
+	gpadlmsg->child_relid = channel->offermsg.child_relid;
+	gpadlmsg->gpadl = next_gpadl_handle;
 
 	dump_gpadl_header(gpadlmsg);
 
 	spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
-	list_add_tail(&msginfo->MsgListEntry,
+	list_add_tail(&msginfo->msglistentry,
 		      &gVmbusConnection.ChannelMsgList);
 
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
@@ -530,9 +530,9 @@
 		   kbuffer, size, msgcount);
 
 	DPRINT_DBG(VMBUS, "Sending GPADL Header - len %zd",
-		   msginfo->MessageSize - sizeof(*msginfo));
+		   msginfo->msgsize - sizeof(*msginfo));
 
-	ret = VmbusPostMessage(gpadlmsg, msginfo->MessageSize -
+	ret = VmbusPostMessage(gpadlmsg, msginfo->msgsize -
 			       sizeof(*msginfo));
 	if (ret != 0) {
 		DPRINT_ERR(VMBUS, "Unable to open channel - %d", ret);
@@ -540,48 +540,48 @@
 	}
 
 	if (msgcount > 1) {
-		list_for_each(curr, &msginfo->SubMsgList) {
+		list_for_each(curr, &msginfo->submsglist) {
 
 			/* FIXME: should this use list_entry() instead ? */
 			submsginfo = (struct vmbus_channel_msginfo *)curr;
 			gpadl_body =
-			     (struct vmbus_channel_gpadl_body *)submsginfo->Msg;
+			     (struct vmbus_channel_gpadl_body *)submsginfo->msg;
 
-			gpadl_body->Header.MessageType =
-				ChannelMessageGpadlBody;
-			gpadl_body->Gpadl = next_gpadl_handle;
+			gpadl_body->header.msgtype =
+				CHANNELMSG_GPADL_BODY;
+			gpadl_body->gpadl = next_gpadl_handle;
 
 			DPRINT_DBG(VMBUS, "Sending GPADL Body - len %zd",
-				   submsginfo->MessageSize -
+				   submsginfo->msgsize -
 				   sizeof(*submsginfo));
 
-			dump_gpadl_body(gpadl_body, submsginfo->MessageSize -
+			dump_gpadl_body(gpadl_body, submsginfo->msgsize -
 				      sizeof(*submsginfo));
 			ret = VmbusPostMessage(gpadl_body,
-					       submsginfo->MessageSize -
+					       submsginfo->msgsize -
 					       sizeof(*submsginfo));
 			if (ret != 0)
 				goto Cleanup;
 
 		}
 	}
-	osd_WaitEventWait(msginfo->WaitEvent);
+	osd_waitevent_wait(msginfo->waitevent);
 
 	/* At this point, we received the gpadl created msg */
 	DPRINT_DBG(VMBUS, "Received GPADL created "
 		   "(relid %d, status %d handle %x)",
-		   channel->OfferMsg.ChildRelId,
-		   msginfo->Response.GpadlCreated.CreationStatus,
-		   gpadlmsg->Gpadl);
+		   channel->offermsg.child_relid,
+		   msginfo->response.gpadl_created.creation_status,
+		   gpadlmsg->gpadl);
 
-	*gpadl_handle = gpadlmsg->Gpadl;
+	*gpadl_handle = gpadlmsg->gpadl;
 
 Cleanup:
 	spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
-	list_del(&msginfo->MsgListEntry);
+	list_del(&msginfo->msglistentry);
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
 
-	kfree(msginfo->WaitEvent);
+	kfree(msginfo->waitevent);
 	kfree(msginfo);
 	return ret;
 }
@@ -604,20 +604,20 @@
 	if (!info)
 		return -ENOMEM;
 
-	info->WaitEvent = osd_WaitEventCreate();
-	if (!info->WaitEvent) {
+	info->waitevent = osd_waitevent_create();
+	if (!info->waitevent) {
 		kfree(info);
 		return -ENOMEM;
 	}
 
-	msg = (struct vmbus_channel_gpadl_teardown *)info->Msg;
+	msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
 
-	msg->Header.MessageType = ChannelMessageGpadlTeardown;
-	msg->ChildRelId = channel->OfferMsg.ChildRelId;
-	msg->Gpadl = gpadl_handle;
+	msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
+	msg->child_relid = channel->offermsg.child_relid;
+	msg->gpadl = gpadl_handle;
 
 	spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
-	list_add_tail(&info->MsgListEntry,
+	list_add_tail(&info->msglistentry,
 		      &gVmbusConnection.ChannelMsgList);
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
 
@@ -628,14 +628,14 @@
 		/* something... */
 	}
 
-	osd_WaitEventWait(info->WaitEvent);
+	osd_waitevent_wait(info->waitevent);
 
 	/* Received a torndown response */
 	spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
-	list_del(&info->MsgListEntry);
+	list_del(&info->msglistentry);
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
 
-	kfree(info->WaitEvent);
+	kfree(info->waitevent);
 	kfree(info);
 	return ret;
 }
@@ -652,7 +652,7 @@
 	int ret;
 
 	/* Stop callback and cancel the timer asap */
-	channel->OnChannelCallback = NULL;
+	channel->onchannel_callback = NULL;
 	del_timer_sync(&channel->poll_timer);
 
 	/* Send a closing message */
@@ -663,11 +663,11 @@
 	if (!info)
 		return;
 
-	/* info->waitEvent = osd_WaitEventCreate(); */
+	/* info->waitEvent = osd_waitevent_create(); */
 
-	msg = (struct vmbus_channel_close_channel *)info->Msg;
-	msg->Header.MessageType = ChannelMessageCloseChannel;
-	msg->ChildRelId = channel->OfferMsg.ChildRelId;
+	msg = (struct vmbus_channel_close_channel *)info->msg;
+	msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
+	msg->child_relid = channel->offermsg.child_relid;
 
 	ret = VmbusPostMessage(msg, sizeof(struct vmbus_channel_close_channel));
 	if (ret != 0) {
@@ -676,17 +676,17 @@
 	}
 
 	/* Tear down the gpadl for the channel's ring buffer */
-	if (channel->RingBufferGpadlHandle)
+	if (channel->ringbuffer_gpadlhandle)
 		vmbus_teardown_gpadl(channel,
-					  channel->RingBufferGpadlHandle);
+					  channel->ringbuffer_gpadlhandle);
 
 	/* TODO: Send a msg to release the childRelId */
 
 	/* Cleanup the ring buffers for this channel */
-	RingBufferCleanup(&channel->Outbound);
-	RingBufferCleanup(&channel->Inbound);
+	ringbuffer_cleanup(&channel->outbound);
+	ringbuffer_cleanup(&channel->inbound);
 
-	osd_PageFree(channel->RingBufferPages, channel->RingBufferPageCount);
+	osd_page_free(channel->ringbuffer_pages, channel->ringbuffer_pagecount);
 
 	kfree(info);
 
@@ -696,9 +696,9 @@
 	 * caller will free the channel
 	 */
 
-	if (channel->State == CHANNEL_OPEN_STATE) {
+	if (channel->state == CHANNEL_OPEN_STATE) {
 		spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
-		list_del(&channel->ListEntry);
+		list_del(&channel->listentry);
 		spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
 
 		free_channel(channel);
@@ -752,10 +752,10 @@
 	sg_set_buf(&bufferlist[2], &aligned_data,
 		   packetlen_aligned - packetlen);
 
-	ret = RingBufferWrite(&channel->Outbound, bufferlist, 3);
+	ret = ringbuffer_write(&channel->outbound, bufferlist, 3);
 
 	/* TODO: We should determine if this is optional */
-	if (ret == 0 && !GetRingBufferInterruptMask(&channel->Outbound))
+	if (ret == 0 && !get_ringbuffer_interrupt_mask(&channel->outbound))
 		vmbus_setevent(channel);
 
 	return ret;
@@ -817,10 +817,10 @@
 	sg_set_buf(&bufferlist[2], &aligned_data,
 		packetlen_aligned - packetlen);
 
-	ret = RingBufferWrite(&channel->Outbound, bufferlist, 3);
+	ret = ringbuffer_write(&channel->outbound, bufferlist, 3);
 
 	/* TODO: We should determine if this is optional */
-	if (ret == 0 && !GetRingBufferInterruptMask(&channel->Outbound))
+	if (ret == 0 && !get_ringbuffer_interrupt_mask(&channel->outbound))
 		vmbus_setevent(channel);
 
 	return ret;
@@ -886,10 +886,10 @@
 	sg_set_buf(&bufferlist[2], &aligned_data,
 		packetlen_aligned - packetlen);
 
-	ret = RingBufferWrite(&channel->Outbound, bufferlist, 3);
+	ret = ringbuffer_write(&channel->outbound, bufferlist, 3);
 
 	/* TODO: We should determine if this is optional */
-	if (ret == 0 && !GetRingBufferInterruptMask(&channel->Outbound))
+	if (ret == 0 && !get_ringbuffer_interrupt_mask(&channel->outbound))
 		vmbus_setevent(channel);
 
 	return ret;
@@ -923,7 +923,7 @@
 
 	spin_lock_irqsave(&channel->inbound_lock, flags);
 
-	ret = RingBufferPeek(&channel->Inbound, &desc,
+	ret = ringbuffer_peek(&channel->inbound, &desc,
 			     sizeof(struct vmpacket_descriptor));
 	if (ret != 0) {
 		spin_unlock_irqrestore(&channel->inbound_lock, flags);
@@ -940,7 +940,7 @@
 
 	DPRINT_DBG(VMBUS, "packet received on channel %p relid %d <type %d "
 		   "flag %d tid %llx pktlen %d datalen %d> ",
-		   channel, channel->OfferMsg.ChildRelId, desc.Type,
+		   channel, channel->offermsg.child_relid, desc.Type,
 		   desc.Flags, desc.TransactionId, packetlen, userlen);
 
 	*buffer_actual_len = userlen;
@@ -956,7 +956,7 @@
 	*requestid = desc.TransactionId;
 
 	/* Copy over the packet to the user buffer */
-	ret = RingBufferRead(&channel->Inbound, buffer, userlen,
+	ret = ringbuffer_read(&channel->inbound, buffer, userlen,
 			     (desc.DataOffset8 << 3));
 
 	spin_unlock_irqrestore(&channel->inbound_lock, flags);
@@ -983,7 +983,7 @@
 
 	spin_lock_irqsave(&channel->inbound_lock, flags);
 
-	ret = RingBufferPeek(&channel->Inbound, &desc,
+	ret = ringbuffer_peek(&channel->inbound, &desc,
 			     sizeof(struct vmpacket_descriptor));
 	if (ret != 0) {
 		spin_unlock_irqrestore(&channel->inbound_lock, flags);
@@ -999,7 +999,7 @@
 
 	DPRINT_DBG(VMBUS, "packet received on channel %p relid %d <type %d "
 		   "flag %d tid %llx pktlen %d datalen %d> ",
-		   channel, channel->OfferMsg.ChildRelId, desc.Type,
+		   channel, channel->offermsg.child_relid, desc.Type,
 		   desc.Flags, desc.TransactionId, packetlen, userlen);
 
 	*buffer_actual_len = packetlen;
@@ -1015,7 +1015,7 @@
 	*requestid = desc.TransactionId;
 
 	/* Copy over the entire packet to the user buffer */
-	ret = RingBufferRead(&channel->Inbound, buffer, packetlen, 0);
+	ret = ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
 
 	spin_unlock_irqrestore(&channel->inbound_lock, flags);
 	return 0;
@@ -1030,7 +1030,7 @@
 	dump_vmbus_channel(channel);
 	/* ASSERT(Channel->OnChannelCallback); */
 
-	channel->OnChannelCallback(channel->ChannelCallbackContext);
+	channel->onchannel_callback(channel->channel_callback_context);
 
 	mod_timer(&channel->poll_timer, jiffies + usecs_to_jiffies(100));
 }
@@ -1042,8 +1042,8 @@
 {
 	struct vmbus_channel *channel = (struct vmbus_channel *)data;
 
-	if (channel->OnChannelCallback)
-		channel->OnChannelCallback(channel->ChannelCallbackContext);
+	if (channel->onchannel_callback)
+		channel->onchannel_callback(channel->channel_callback_context);
 }
 
 /*
@@ -1051,7 +1051,7 @@
  */
 static void dump_vmbus_channel(struct vmbus_channel *channel)
 {
-	DPRINT_DBG(VMBUS, "Channel (%d)", channel->OfferMsg.ChildRelId);
-	DumpRingInfo(&channel->Outbound, "Outbound ");
-	DumpRingInfo(&channel->Inbound, "Inbound ");
+	DPRINT_DBG(VMBUS, "Channel (%d)", channel->offermsg.child_relid);
+	dump_ring_info(&channel->outbound, "Outbound ");
+	dump_ring_info(&channel->inbound, "Inbound ");
 }
diff --git a/drivers/staging/hv/channel_mgmt.c b/drivers/staging/hv/channel_mgmt.c
index 45dbe30..d44d5c3 100644
--- a/drivers/staging/hv/channel_mgmt.c
+++ b/drivers/staging/hv/channel_mgmt.c
@@ -251,8 +251,8 @@
 	channel->poll_timer.data = (unsigned long)channel;
 	channel->poll_timer.function = vmbus_ontimer;
 
-	channel->ControlWQ = create_workqueue("hv_vmbus_ctl");
-	if (!channel->ControlWQ) {
+	channel->controlwq = create_workqueue("hv_vmbus_ctl");
+	if (!channel->controlwq) {
 		kfree(channel);
 		return NULL;
 	}
@@ -263,12 +263,14 @@
 /*
  * release_hannel - Release the vmbus channel object itself
  */
-static inline void release_channel(void *context)
+static void release_channel(struct work_struct *work)
 {
-	struct vmbus_channel *channel = context;
+	struct vmbus_channel *channel = container_of(work,
+						     struct vmbus_channel,
+						     work);
 
 	DPRINT_DBG(VMBUS, "releasing channel (%p)", channel);
-	destroy_workqueue(channel->ControlWQ);
+	destroy_workqueue(channel->controlwq);
 	DPRINT_DBG(VMBUS, "channel released (%p)", channel);
 
 	kfree(channel);
@@ -286,8 +288,8 @@
 	 * workqueue/thread context
 	 * ie we can't destroy ourselves.
 	 */
-	osd_schedule_callback(gVmbusConnection.WorkQueue, release_channel,
-			      channel);
+	INIT_WORK(&channel->work, release_channel);
+	queue_work(gVmbusConnection.WorkQueue, &channel->work);
 }
 
 
@@ -308,29 +310,46 @@
 	spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
 }
 
+/*
+ * vmbus_process_rescind_offer -
+ * Rescind the offer by initiating a device removal
+ */
+static void vmbus_process_rescind_offer(struct work_struct *work)
+{
+	struct vmbus_channel *channel = container_of(work,
+						     struct vmbus_channel,
+						     work);
+
+	vmbus_child_device_unregister(channel->device_obj);
+}
 
 /*
  * vmbus_process_offer - Process the offer by creating a channel/device
  * associated with this offer
  */
-static void vmbus_process_offer(void *context)
+static void vmbus_process_offer(struct work_struct *work)
 {
-	struct vmbus_channel *newchannel = context;
+	struct vmbus_channel *newchannel = container_of(work,
+							struct vmbus_channel,
+							work);
 	struct vmbus_channel *channel;
 	bool fnew = true;
 	int ret;
 	int cnt;
 	unsigned long flags;
 
+	/* The next possible work is rescind handling */
+	INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
+
 	/* Make sure this is a new offer */
 	spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
 
-	list_for_each_entry(channel, &gVmbusConnection.ChannelList, ListEntry) {
-		if (!memcmp(&channel->OfferMsg.Offer.InterfaceType,
-			    &newchannel->OfferMsg.Offer.InterfaceType,
+	list_for_each_entry(channel, &gVmbusConnection.ChannelList, listentry) {
+		if (!memcmp(&channel->offermsg.offer.InterfaceType,
+			    &newchannel->offermsg.offer.InterfaceType,
 			    sizeof(struct hv_guid)) &&
-		    !memcmp(&channel->OfferMsg.Offer.InterfaceInstance,
-			    &newchannel->OfferMsg.Offer.InterfaceInstance,
+		    !memcmp(&channel->offermsg.offer.InterfaceInstance,
+			    &newchannel->offermsg.offer.InterfaceInstance,
 			    sizeof(struct hv_guid))) {
 			fnew = false;
 			break;
@@ -338,14 +357,14 @@
 	}
 
 	if (fnew)
-		list_add_tail(&newchannel->ListEntry,
+		list_add_tail(&newchannel->listentry,
 			      &gVmbusConnection.ChannelList);
 
 	spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
 
 	if (!fnew) {
 		DPRINT_DBG(VMBUS, "Ignoring duplicate offer for relid (%d)",
-			   newchannel->OfferMsg.ChildRelId);
+			   newchannel->offermsg.child_relid);
 		free_channel(newchannel);
 		return;
 	}
@@ -355,27 +374,27 @@
 	 * We need to set the DeviceObject field before calling
 	 * VmbusChildDeviceAdd()
 	 */
-	newchannel->DeviceObject = VmbusChildDeviceCreate(
-		&newchannel->OfferMsg.Offer.InterfaceType,
-		&newchannel->OfferMsg.Offer.InterfaceInstance,
+	newchannel->device_obj = vmbus_child_device_create(
+		&newchannel->offermsg.offer.InterfaceType,
+		&newchannel->offermsg.offer.InterfaceInstance,
 		newchannel);
 
 	DPRINT_DBG(VMBUS, "child device object allocated - %p",
-		   newchannel->DeviceObject);
+		   newchannel->device_obj);
 
 	/*
 	 * Add the new device to the bus. This will kick off device-driver
 	 * binding which eventually invokes the device driver's AddDevice()
 	 * method.
 	 */
-	ret = VmbusChildDeviceAdd(newchannel->DeviceObject);
+	ret = VmbusChildDeviceAdd(newchannel->device_obj);
 	if (ret != 0) {
 		DPRINT_ERR(VMBUS,
 			   "unable to add child device object (relid %d)",
-			   newchannel->OfferMsg.ChildRelId);
+			   newchannel->offermsg.child_relid);
 
 		spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
-		list_del(&newchannel->ListEntry);
+		list_del(&newchannel->listentry);
 		spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
 
 		free_channel(newchannel);
@@ -385,11 +404,11 @@
 		 * so that when we do close the channel normally, we
 		 * can cleanup properly
 		 */
-		newchannel->State = CHANNEL_OPEN_STATE;
+		newchannel->state = CHANNEL_OPEN_STATE;
 
 		/* Open IC channels */
 		for (cnt = 0; cnt < MAX_MSG_TYPES; cnt++) {
-			if (memcmp(&newchannel->OfferMsg.Offer.InterfaceType,
+			if (memcmp(&newchannel->offermsg.offer.InterfaceType,
 				   &hv_cb_utils[cnt].data,
 				   sizeof(struct hv_guid)) == 0 &&
 				vmbus_open(newchannel, 2 * PAGE_SIZE,
@@ -406,17 +425,6 @@
 }
 
 /*
- * vmbus_process_rescind_offer -
- * Rescind the offer by initiating a device removal
- */
-static void vmbus_process_rescind_offer(void *context)
-{
-	struct vmbus_channel *channel = context;
-
-	VmbusChildDeviceRemove(channel->DeviceObject);
-}
-
-/*
  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
  *
  * We ignore all offers except network and storage offers. For each network and
@@ -434,7 +442,7 @@
 
 	offer = (struct vmbus_channel_offer_channel *)hdr;
 	for (i = 0; i < MAX_NUM_DEVICE_CLASSES_SUPPORTED; i++) {
-		if (memcmp(&offer->Offer.InterfaceType,
+		if (memcmp(&offer->offer.InterfaceType,
 		    &gSupportedDeviceClasses[i], sizeof(struct hv_guid)) == 0) {
 			fsupported = 1;
 			break;
@@ -443,12 +451,12 @@
 
 	if (!fsupported) {
 		DPRINT_DBG(VMBUS, "Ignoring channel offer notification for "
-			   "child relid %d", offer->ChildRelId);
+			   "child relid %d", offer->child_relid);
 		return;
 	}
 
-	guidtype = &offer->Offer.InterfaceType;
-	guidinstance = &offer->Offer.InterfaceInstance;
+	guidtype = &offer->offer.InterfaceType;
+	guidinstance = &offer->offer.InterfaceInstance;
 
 	DPRINT_INFO(VMBUS, "Channel offer notification - "
 		    "child relid %d monitor id %d allocated %d, "
@@ -456,8 +464,8 @@
 		    "%02x%02x%02x%02x%02x%02x%02x%02x} "
 		    "instance {%02x%02x%02x%02x-%02x%02x-%02x%02x-"
 		    "%02x%02x%02x%02x%02x%02x%02x%02x}",
-		    offer->ChildRelId, offer->MonitorId,
-		    offer->MonitorAllocated,
+		    offer->child_relid, offer->monitorid,
+		    offer->monitor_allocated,
 		    guidtype->data[3], guidtype->data[2],
 		    guidtype->data[1], guidtype->data[0],
 		    guidtype->data[5], guidtype->data[4],
@@ -484,14 +492,14 @@
 
 	DPRINT_DBG(VMBUS, "channel object allocated - %p", newchannel);
 
-	memcpy(&newchannel->OfferMsg, offer,
+	memcpy(&newchannel->offermsg, offer,
 	       sizeof(struct vmbus_channel_offer_channel));
-	newchannel->MonitorGroup = (u8)offer->MonitorId / 32;
-	newchannel->MonitorBit = (u8)offer->MonitorId % 32;
+	newchannel->monitor_grp = (u8)offer->monitorid / 32;
+	newchannel->monitor_bit = (u8)offer->monitorid % 32;
 
 	/* TODO: Make sure the offer comes from our parent partition */
-	osd_schedule_callback(newchannel->ControlWQ, vmbus_process_offer,
-			      newchannel);
+	INIT_WORK(&newchannel->work, vmbus_process_offer);
+	queue_work(newchannel->controlwq, &newchannel->work);
 }
 
 /*
@@ -505,16 +513,16 @@
 	struct vmbus_channel *channel;
 
 	rescind = (struct vmbus_channel_rescind_offer *)hdr;
-	channel = GetChannelFromRelId(rescind->ChildRelId);
+	channel = GetChannelFromRelId(rescind->child_relid);
 	if (channel == NULL) {
 		DPRINT_DBG(VMBUS, "channel not found for relId %d",
-			   rescind->ChildRelId);
+			   rescind->child_relid);
 		return;
 	}
 
-	osd_schedule_callback(channel->ControlWQ,
-			      vmbus_process_rescind_offer,
-			      channel);
+	/* work is initialized for vmbus_process_rescind_offer() from
+	 * vmbus_process_offer() where the channel got created */
+	queue_work(channel->controlwq, &channel->work);
 }
 
 /*
@@ -545,7 +553,7 @@
 	unsigned long flags;
 
 	result = (struct vmbus_channel_open_result *)hdr;
-	DPRINT_DBG(VMBUS, "vmbus open result - %d", result->Status);
+	DPRINT_DBG(VMBUS, "vmbus open result - %d", result->status);
 
 	/*
 	 * Find the open msg, copy the result and signal/unblock the wait event
@@ -556,17 +564,17 @@
 /* FIXME: this should probably use list_entry() instead */
 		msginfo = (struct vmbus_channel_msginfo *)curr;
 		requestheader =
-			(struct vmbus_channel_message_header *)msginfo->Msg;
+			(struct vmbus_channel_message_header *)msginfo->msg;
 
-		if (requestheader->MessageType == ChannelMessageOpenChannel) {
+		if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
 			openmsg =
-			(struct vmbus_channel_open_channel *)msginfo->Msg;
-			if (openmsg->ChildRelId == result->ChildRelId &&
-			    openmsg->OpenId == result->OpenId) {
-				memcpy(&msginfo->Response.OpenResult,
+			(struct vmbus_channel_open_channel *)msginfo->msg;
+			if (openmsg->child_relid == result->child_relid &&
+			    openmsg->openid == result->openid) {
+				memcpy(&msginfo->response.open_result,
 				       result,
 				       sizeof(struct vmbus_channel_open_result));
-				osd_WaitEventSet(msginfo->WaitEvent);
+				osd_waitevent_set(msginfo->waitevent);
 				break;
 			}
 		}
@@ -592,7 +600,7 @@
 
 	gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
 	DPRINT_DBG(VMBUS, "vmbus gpadl created result - %d",
-		   gpadlcreated->CreationStatus);
+		   gpadlcreated->creation_status);
 
 	/*
 	 * Find the establish msg, copy the result and signal/unblock the wait
@@ -604,19 +612,19 @@
 /* FIXME: this should probably use list_entry() instead */
 		msginfo = (struct vmbus_channel_msginfo *)curr;
 		requestheader =
-			(struct vmbus_channel_message_header *)msginfo->Msg;
+			(struct vmbus_channel_message_header *)msginfo->msg;
 
-		if (requestheader->MessageType == ChannelMessageGpadlHeader) {
+		if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
 			gpadlheader =
 			(struct vmbus_channel_gpadl_header *)requestheader;
 
-			if ((gpadlcreated->ChildRelId ==
-			     gpadlheader->ChildRelId) &&
-			    (gpadlcreated->Gpadl == gpadlheader->Gpadl)) {
-				memcpy(&msginfo->Response.GpadlCreated,
+			if ((gpadlcreated->child_relid ==
+			     gpadlheader->child_relid) &&
+			    (gpadlcreated->gpadl == gpadlheader->gpadl)) {
+				memcpy(&msginfo->response.gpadl_created,
 				       gpadlcreated,
 				       sizeof(struct vmbus_channel_gpadl_created));
-				osd_WaitEventSet(msginfo->WaitEvent);
+				osd_waitevent_set(msginfo->waitevent);
 				break;
 			}
 		}
@@ -652,17 +660,17 @@
 /* FIXME: this should probably use list_entry() instead */
 		msginfo = (struct vmbus_channel_msginfo *)curr;
 		requestheader =
-			(struct vmbus_channel_message_header *)msginfo->Msg;
+			(struct vmbus_channel_message_header *)msginfo->msg;
 
-		if (requestheader->MessageType == ChannelMessageGpadlTeardown) {
+		if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
 			gpadl_teardown =
 			(struct vmbus_channel_gpadl_teardown *)requestheader;
 
-			if (gpadl_torndown->Gpadl == gpadl_teardown->Gpadl) {
-				memcpy(&msginfo->Response.GpadlTorndown,
+			if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
+				memcpy(&msginfo->response.gpadl_torndown,
 				       gpadl_torndown,
 				       sizeof(struct vmbus_channel_gpadl_torndown));
-				osd_WaitEventSet(msginfo->WaitEvent);
+				osd_waitevent_set(msginfo->waitevent);
 				break;
 			}
 		}
@@ -694,16 +702,16 @@
 /* FIXME: this should probably use list_entry() instead */
 		msginfo = (struct vmbus_channel_msginfo *)curr;
 		requestheader =
-			(struct vmbus_channel_message_header *)msginfo->Msg;
+			(struct vmbus_channel_message_header *)msginfo->msg;
 
-		if (requestheader->MessageType ==
-		    ChannelMessageInitiateContact) {
+		if (requestheader->msgtype ==
+		    CHANNELMSG_INITIATE_CONTACT) {
 			initiate =
 			(struct vmbus_channel_initiate_contact *)requestheader;
-			memcpy(&msginfo->Response.VersionResponse,
+			memcpy(&msginfo->response.version_response,
 			      version_response,
 			      sizeof(struct vmbus_channel_version_response));
-			osd_WaitEventSet(msginfo->WaitEvent);
+			osd_waitevent_set(msginfo->waitevent);
 		}
 	}
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
@@ -711,24 +719,24 @@
 
 /* Channel message dispatch table */
 static struct vmbus_channel_message_table_entry
-	gChannelMessageTable[ChannelMessageCount] = {
-	{ChannelMessageInvalid,			NULL},
-	{ChannelMessageOfferChannel,		vmbus_onoffer},
-	{ChannelMessageRescindChannelOffer,	vmbus_onoffer_rescind},
-	{ChannelMessageRequestOffers,		NULL},
-	{ChannelMessageAllOffersDelivered,	vmbus_onoffers_delivered},
-	{ChannelMessageOpenChannel,		NULL},
-	{ChannelMessageOpenChannelResult,	vmbus_onopen_result},
-	{ChannelMessageCloseChannel,		NULL},
-	{ChannelMessageGpadlHeader,		NULL},
-	{ChannelMessageGpadlBody,		NULL},
-	{ChannelMessageGpadlCreated,		vmbus_ongpadl_created},
-	{ChannelMessageGpadlTeardown,		NULL},
-	{ChannelMessageGpadlTorndown,		vmbus_ongpadl_torndown},
-	{ChannelMessageRelIdReleased,		NULL},
-	{ChannelMessageInitiateContact,		NULL},
-	{ChannelMessageVersionResponse,		vmbus_onversion_response},
-	{ChannelMessageUnload,			NULL},
+	gChannelMessageTable[CHANNELMSG_COUNT] = {
+	{CHANNELMSG_INVALID,			NULL},
+	{CHANNELMSG_OFFERCHANNEL,		vmbus_onoffer},
+	{CHANNELMSG_RESCIND_CHANNELOFFER,	vmbus_onoffer_rescind},
+	{CHANNELMSG_REQUESTOFFERS,		NULL},
+	{CHANNELMSG_ALLOFFERS_DELIVERED,	vmbus_onoffers_delivered},
+	{CHANNELMSG_OPENCHANNEL,		NULL},
+	{CHANNELMSG_OPENCHANNEL_RESULT,	vmbus_onopen_result},
+	{CHANNELMSG_CLOSECHANNEL,		NULL},
+	{CHANNELMSG_GPADL_HEADER,		NULL},
+	{CHANNELMSG_GPADL_BODY,		NULL},
+	{CHANNELMSG_GPADL_CREATED,		vmbus_ongpadl_created},
+	{CHANNELMSG_GPADL_TEARDOWN,		NULL},
+	{CHANNELMSG_GPADL_TORNDOWN,		vmbus_ongpadl_torndown},
+	{CHANNELMSG_RELID_RELEASED,		NULL},
+	{CHANNELMSG_INITIATE_CONTACT,		NULL},
+	{CHANNELMSG_VERSION_RESPONSE,		vmbus_onversion_response},
+	{CHANNELMSG_UNLOAD,			NULL},
 };
 
 /*
@@ -742,29 +750,25 @@
 	struct vmbus_channel_message_header *hdr;
 	int size;
 
-	hdr = (struct vmbus_channel_message_header *)msg->u.Payload;
-	size = msg->Header.PayloadSize;
+	hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+	size = msg->header.payload_size;
 
-	DPRINT_DBG(VMBUS, "message type %d size %d", hdr->MessageType, size);
+	DPRINT_DBG(VMBUS, "message type %d size %d", hdr->msgtype, size);
 
-	if (hdr->MessageType >= ChannelMessageCount) {
+	if (hdr->msgtype >= CHANNELMSG_COUNT) {
 		DPRINT_ERR(VMBUS,
 			   "Received invalid channel message type %d size %d",
-			   hdr->MessageType, size);
+			   hdr->msgtype, size);
 		print_hex_dump_bytes("", DUMP_PREFIX_NONE,
-				     (unsigned char *)msg->u.Payload, size);
-		kfree(msg);
+				     (unsigned char *)msg->u.payload, size);
 		return;
 	}
 
-	if (gChannelMessageTable[hdr->MessageType].messageHandler)
-		gChannelMessageTable[hdr->MessageType].messageHandler(hdr);
+	if (gChannelMessageTable[hdr->msgtype].messageHandler)
+		gChannelMessageTable[hdr->msgtype].messageHandler(hdr);
 	else
 		DPRINT_ERR(VMBUS, "Unhandled channel message type %d",
-			   hdr->MessageType);
-
-	/* Free the msg that was allocated in VmbusOnMsgDPC() */
-	kfree(msg);
+			   hdr->msgtype);
 }
 
 /*
@@ -782,15 +786,15 @@
 	if (!msginfo)
 		return -ENOMEM;
 
-	msginfo->WaitEvent = osd_WaitEventCreate();
-	if (!msginfo->WaitEvent) {
+	msginfo->waitevent = osd_waitevent_create();
+	if (!msginfo->waitevent) {
 		kfree(msginfo);
 		return -ENOMEM;
 	}
 
-	msg = (struct vmbus_channel_message_header *)msginfo->Msg;
+	msg = (struct vmbus_channel_message_header *)msginfo->msg;
 
-	msg->MessageType = ChannelMessageRequestOffers;
+	msg->msgtype = CHANNELMSG_REQUESTOFFERS;
 
 	/*SpinlockAcquire(gVmbusConnection.channelMsgLock);
 	INSERT_TAIL_LIST(&gVmbusConnection.channelMsgList,
@@ -808,7 +812,7 @@
 
 		goto Cleanup;
 	}
-	/* osd_WaitEventWait(msgInfo->waitEvent); */
+	/* osd_waitevent_wait(msgInfo->waitEvent); */
 
 	/*SpinlockAcquire(gVmbusConnection.channelMsgLock);
 	REMOVE_ENTRY_LIST(&msgInfo->msgListEntry);
@@ -817,7 +821,7 @@
 
 Cleanup:
 	if (msginfo) {
-		kfree(msginfo->WaitEvent);
+		kfree(msginfo->waitevent);
 		kfree(msginfo);
 	}
 
@@ -837,17 +841,17 @@
 	spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
 
 	list_for_each_entry_safe(channel, pos, &gVmbusConnection.ChannelList,
-				 ListEntry) {
+				 listentry) {
 		if (channel == start)
 			break;
 
-		if (!channel->DeviceObject->Driver) {
-			list_del(&channel->ListEntry);
+		if (!channel->device_obj->Driver) {
+			list_del(&channel->listentry);
 			DPRINT_INFO(VMBUS,
 				    "Releasing unattached device object %p",
-				    channel->DeviceObject);
+				    channel->device_obj);
 
-			VmbusChildDeviceRemove(channel->DeviceObject);
+			vmbus_child_device_unregister(channel->device_obj);
 			free_channel(channel);
 		} else {
 			if (!start)
diff --git a/drivers/staging/hv/channel_mgmt.h b/drivers/staging/hv/channel_mgmt.h
index d16cc08..de6b2a0 100644
--- a/drivers/staging/hv/channel_mgmt.h
+++ b/drivers/staging/hv/channel_mgmt.h
@@ -33,60 +33,60 @@
 
 /* Version 1 messages */
 enum vmbus_channel_message_type {
-	ChannelMessageInvalid			=  0,
-	ChannelMessageOfferChannel		=  1,
-	ChannelMessageRescindChannelOffer	=  2,
-	ChannelMessageRequestOffers		=  3,
-	ChannelMessageAllOffersDelivered	=  4,
-	ChannelMessageOpenChannel		=  5,
-	ChannelMessageOpenChannelResult		=  6,
-	ChannelMessageCloseChannel		=  7,
-	ChannelMessageGpadlHeader		=  8,
-	ChannelMessageGpadlBody			=  9,
-	ChannelMessageGpadlCreated		= 10,
-	ChannelMessageGpadlTeardown		= 11,
-	ChannelMessageGpadlTorndown		= 12,
-	ChannelMessageRelIdReleased		= 13,
-	ChannelMessageInitiateContact		= 14,
-	ChannelMessageVersionResponse		= 15,
-	ChannelMessageUnload			= 16,
+	CHANNELMSG_INVALID			=  0,
+	CHANNELMSG_OFFERCHANNEL		=  1,
+	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
+	CHANNELMSG_REQUESTOFFERS		=  3,
+	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
+	CHANNELMSG_OPENCHANNEL		=  5,
+	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
+	CHANNELMSG_CLOSECHANNEL		=  7,
+	CHANNELMSG_GPADL_HEADER		=  8,
+	CHANNELMSG_GPADL_BODY			=  9,
+	CHANNELMSG_GPADL_CREATED		= 10,
+	CHANNELMSG_GPADL_TEARDOWN		= 11,
+	CHANNELMSG_GPADL_TORNDOWN		= 12,
+	CHANNELMSG_RELID_RELEASED		= 13,
+	CHANNELMSG_INITIATE_CONTACT		= 14,
+	CHANNELMSG_VERSION_RESPONSE		= 15,
+	CHANNELMSG_UNLOAD			= 16,
 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
-	ChannelMessageViewRangeAdd		= 17,
-	ChannelMessageViewRangeRemove		= 18,
+	CHANNELMSG_VIEWRANGE_ADD		= 17,
+	CHANNELMSG_VIEWRANGE_REMOVE		= 18,
 #endif
-	ChannelMessageCount
+	CHANNELMSG_COUNT
 };
 
 struct vmbus_channel_message_header {
-	enum vmbus_channel_message_type MessageType;
-	u32 Padding;
+	enum vmbus_channel_message_type msgtype;
+	u32 padding;
 } __attribute__((packed));
 
 /* Query VMBus Version parameters */
 struct vmbus_channel_query_vmbus_version {
-	struct vmbus_channel_message_header Header;
-	u32 Version;
+	struct vmbus_channel_message_header header;
+	u32 version;
 } __attribute__((packed));
 
 /* VMBus Version Supported parameters */
 struct vmbus_channel_version_supported {
-	struct vmbus_channel_message_header Header;
-	bool VersionSupported;
+	struct vmbus_channel_message_header header;
+	bool version_supported;
 } __attribute__((packed));
 
 /* Offer Channel parameters */
 struct vmbus_channel_offer_channel {
-	struct vmbus_channel_message_header Header;
-	struct vmbus_channel_offer Offer;
-	u32 ChildRelId;
-	u8 MonitorId;
-	bool MonitorAllocated;
+	struct vmbus_channel_message_header header;
+	struct vmbus_channel_offer offer;
+	u32 child_relid;
+	u8 monitorid;
+	bool monitor_allocated;
 } __attribute__((packed));
 
 /* Rescind Offer parameters */
 struct vmbus_channel_rescind_offer {
-	struct vmbus_channel_message_header Header;
-	u32 ChildRelId;
+	struct vmbus_channel_message_header header;
+	u32 child_relid;
 } __attribute__((packed));
 
 /*
@@ -100,43 +100,43 @@
 
 /* Open Channel parameters */
 struct vmbus_channel_open_channel {
-	struct vmbus_channel_message_header Header;
+	struct vmbus_channel_message_header header;
 
 	/* Identifies the specific VMBus channel that is being opened. */
-	u32 ChildRelId;
+	u32 child_relid;
 
 	/* ID making a particular open request at a channel offer unique. */
-	u32 OpenId;
+	u32 openid;
 
 	/* GPADL for the channel's ring buffer. */
-	u32 RingBufferGpadlHandle;
+	u32 ringbuffer_gpadlhandle;
 
 	/* GPADL for the channel's server context save area. */
-	u32 ServerContextAreaGpadlHandle;
+	u32 server_contextarea_gpadlhandle;
 
 	/*
 	* The upstream ring buffer begins at offset zero in the memory
 	* described by RingBufferGpadlHandle. The downstream ring buffer
 	* follows it at this offset (in pages).
 	*/
-	u32 DownstreamRingBufferPageOffset;
+	u32 downstream_ringbuffer_pageoffset;
 
 	/* User-specific data to be passed along to the server endpoint. */
-	unsigned char UserData[MAX_USER_DEFINED_BYTES];
+	unsigned char userdata[MAX_USER_DEFINED_BYTES];
 } __attribute__((packed));
 
 /* Open Channel Result parameters */
 struct vmbus_channel_open_result {
-	struct vmbus_channel_message_header Header;
-	u32 ChildRelId;
-	u32 OpenId;
-	u32 Status;
+	struct vmbus_channel_message_header header;
+	u32 child_relid;
+	u32 openid;
+	u32 status;
 } __attribute__((packed));
 
 /* Close channel parameters; */
 struct vmbus_channel_close_channel {
-	struct vmbus_channel_message_header Header;
-	u32 ChildRelId;
+	struct vmbus_channel_message_header header;
+	u32 child_relid;
 } __attribute__((packed));
 
 /* Channel Message GPADL */
@@ -151,72 +151,72 @@
  * follow-up packet that contains more.
  */
 struct vmbus_channel_gpadl_header {
-	struct vmbus_channel_message_header Header;
-	u32 ChildRelId;
-	u32 Gpadl;
-	u16 RangeBufLen;
-	u16 RangeCount;
-	struct gpa_range Range[0];
+	struct vmbus_channel_message_header header;
+	u32 child_relid;
+	u32 gpadl;
+	u16 range_buflen;
+	u16 rangecount;
+	struct gpa_range range[0];
 } __attribute__((packed));
 
 /* This is the followup packet that contains more PFNs. */
 struct vmbus_channel_gpadl_body {
-	struct vmbus_channel_message_header Header;
-	u32 MessageNumber;
-	u32 Gpadl;
-	u64 Pfn[0];
+	struct vmbus_channel_message_header header;
+	u32 msgnumber;
+	u32 gpadl;
+	u64 pfn[0];
 } __attribute__((packed));
 
 struct vmbus_channel_gpadl_created {
-	struct vmbus_channel_message_header Header;
-	u32 ChildRelId;
-	u32 Gpadl;
-	u32 CreationStatus;
+	struct vmbus_channel_message_header header;
+	u32 child_relid;
+	u32 gpadl;
+	u32 creation_status;
 } __attribute__((packed));
 
 struct vmbus_channel_gpadl_teardown {
-	struct vmbus_channel_message_header Header;
-	u32 ChildRelId;
-	u32 Gpadl;
+	struct vmbus_channel_message_header header;
+	u32 child_relid;
+	u32 gpadl;
 } __attribute__((packed));
 
 struct vmbus_channel_gpadl_torndown {
-	struct vmbus_channel_message_header Header;
-	u32 Gpadl;
+	struct vmbus_channel_message_header header;
+	u32 gpadl;
 } __attribute__((packed));
 
 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
 struct vmbus_channel_view_range_add {
-	struct vmbus_channel_message_header Header;
-	PHYSICAL_ADDRESS ViewRangeBase;
-	u64 ViewRangeLength;
-	u32 ChildRelId;
+	struct vmbus_channel_message_header header;
+	PHYSICAL_ADDRESS viewrange_base;
+	u64 viewrange_length;
+	u32 child_relid;
 } __attribute__((packed));
 
 struct vmbus_channel_view_range_remove {
-	struct vmbus_channel_message_header Header;
-	PHYSICAL_ADDRESS ViewRangeBase;
-	u32 ChildRelId;
+	struct vmbus_channel_message_header header;
+	PHYSICAL_ADDRESS viewrange_base;
+	u32 child_relid;
 } __attribute__((packed));
 #endif
 
 struct vmbus_channel_relid_released {
-	struct vmbus_channel_message_header Header;
-	u32 ChildRelId;
+	struct vmbus_channel_message_header header;
+	u32 child_relid;
 } __attribute__((packed));
 
 struct vmbus_channel_initiate_contact {
-	struct vmbus_channel_message_header Header;
-	u32 VMBusVersionRequested;
-	u32 Padding2;
-	u64 InterruptPage;
-	u64 MonitorPage1;
-	u64 MonitorPage2;
+	struct vmbus_channel_message_header header;
+	u32 vmbus_version_requested;
+	u32 padding2;
+	u64 interrupt_page;
+	u64 monitor_page1;
+	u64 monitor_page2;
 } __attribute__((packed));
 
 struct vmbus_channel_version_response {
-	struct vmbus_channel_message_header Header;
-	bool VersionSupported;
+	struct vmbus_channel_message_header header;
+	bool version_supported;
 } __attribute__((packed));
 
 enum vmbus_channel_state {
@@ -226,54 +226,55 @@
 };
 
 struct vmbus_channel {
-	struct list_head ListEntry;
+	struct list_head listentry;
 
-	struct hv_device *DeviceObject;
+	struct hv_device *device_obj;
 
 	struct timer_list poll_timer; /* SA-111 workaround */
+	struct work_struct work;
 
-	enum vmbus_channel_state State;
+	enum vmbus_channel_state state;
 
-	struct vmbus_channel_offer_channel OfferMsg;
+	struct vmbus_channel_offer_channel offermsg;
 	/*
 	 * These are based on the OfferMsg.MonitorId.
 	 * Save it here for easy access.
 	 */
-	u8 MonitorGroup;
-	u8 MonitorBit;
+	u8 monitor_grp;
+	u8 monitor_bit;
 
-	u32 RingBufferGpadlHandle;
+	u32 ringbuffer_gpadlhandle;
 
 	/* Allocated memory for ring buffer */
-	void *RingBufferPages;
-	u32 RingBufferPageCount;
-	struct hv_ring_buffer_info Outbound;	/* send to parent */
-	struct hv_ring_buffer_info Inbound;	/* receive from parent */
+	void *ringbuffer_pages;
+	u32 ringbuffer_pagecount;
+	struct hv_ring_buffer_info outbound;	/* send to parent */
+	struct hv_ring_buffer_info inbound;	/* receive from parent */
 	spinlock_t inbound_lock;
-	struct workqueue_struct *ControlWQ;
+	struct workqueue_struct *controlwq;
 
 	/* Channel callback are invoked in this workqueue context */
 	/* HANDLE dataWorkQueue; */
 
-	void (*OnChannelCallback)(void *context);
-	void *ChannelCallbackContext;
+	void (*onchannel_callback)(void *context);
+	void *channel_callback_context;
 };
 
 struct vmbus_channel_debug_info {
-	u32 RelId;
-	enum vmbus_channel_state State;
-	struct hv_guid InterfaceType;
-	struct hv_guid InterfaceInstance;
-	u32 MonitorId;
-	u32 ServerMonitorPending;
-	u32 ServerMonitorLatency;
-	u32 ServerMonitorConnectionId;
-	u32 ClientMonitorPending;
-	u32 ClientMonitorLatency;
-	u32 ClientMonitorConnectionId;
+	u32 relid;
+	enum vmbus_channel_state state;
+	struct hv_guid interfacetype;
+	struct hv_guid interface_instance;
+	u32 monitorid;
+	u32 servermonitor_pending;
+	u32 servermonitor_latency;
+	u32 servermonitor_connectionid;
+	u32 clientmonitor_pending;
+	u32 clientmonitor_latency;
+	u32 clientmonitor_connectionid;
 
-	struct hv_ring_buffer_debug_info Inbound;
-	struct hv_ring_buffer_debug_info Outbound;
+	struct hv_ring_buffer_debug_info inbound;
+	struct hv_ring_buffer_debug_info outbound;
 };
 
 /*
@@ -282,28 +283,28 @@
  */
 struct vmbus_channel_msginfo {
 	/* Bookkeeping stuff */
-	struct list_head MsgListEntry;
+	struct list_head msglistentry;
 
 	/* So far, this is only used to handle gpadl body message */
-	struct list_head SubMsgList;
+	struct list_head submsglist;
 
 	/* Synchronize the request/response if needed */
-	struct osd_waitevent *WaitEvent;
+	struct osd_waitevent *waitevent;
 
 	union {
-		struct vmbus_channel_version_supported VersionSupported;
-		struct vmbus_channel_open_result OpenResult;
-		struct vmbus_channel_gpadl_torndown GpadlTorndown;
-		struct vmbus_channel_gpadl_created GpadlCreated;
-		struct vmbus_channel_version_response VersionResponse;
-	} Response;
+		struct vmbus_channel_version_supported version_supported;
+		struct vmbus_channel_open_result open_result;
+		struct vmbus_channel_gpadl_torndown gpadl_torndown;
+		struct vmbus_channel_gpadl_created gpadl_created;
+		struct vmbus_channel_version_response version_response;
+	} response;
 
-	u32 MessageSize;
+	u32 msgsize;
 	/*
 	 * The channel message that goes out on the "wire".
 	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
 	 */
-	unsigned char Msg[0];
+	unsigned char msg[0];
 };
 
 
diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c
index f847707..c2e298f 100644
--- a/drivers/staging/hv/connection.c
+++ b/drivers/staging/hv/connection.c
@@ -66,7 +66,7 @@
 	 * Setup the vmbus event connection for channel interrupt
 	 * abstraction stuff
 	 */
-	gVmbusConnection.InterruptPage = osd_PageAlloc(1);
+	gVmbusConnection.InterruptPage = osd_page_alloc(1);
 	if (gVmbusConnection.InterruptPage == NULL) {
 		ret = -1;
 		goto Cleanup;
@@ -81,7 +81,7 @@
 	 * Setup the monitor notification facility. The 1st page for
 	 * parent->child and the 2nd page for child->parent
 	 */
-	gVmbusConnection.MonitorPages = osd_PageAlloc(2);
+	gVmbusConnection.MonitorPages = osd_page_alloc(2);
 	if (gVmbusConnection.MonitorPages == NULL) {
 		ret = -1;
 		goto Cleanup;
@@ -95,19 +95,19 @@
 		goto Cleanup;
 	}
 
-	msgInfo->WaitEvent = osd_WaitEventCreate();
-	if (!msgInfo->WaitEvent) {
+	msgInfo->waitevent = osd_waitevent_create();
+	if (!msgInfo->waitevent) {
 		ret = -ENOMEM;
 		goto Cleanup;
 	}
 
-	msg = (struct vmbus_channel_initiate_contact *)msgInfo->Msg;
+	msg = (struct vmbus_channel_initiate_contact *)msgInfo->msg;
 
-	msg->Header.MessageType = ChannelMessageInitiateContact;
-	msg->VMBusVersionRequested = VMBUS_REVISION_NUMBER;
-	msg->InterruptPage = virt_to_phys(gVmbusConnection.InterruptPage);
-	msg->MonitorPage1 = virt_to_phys(gVmbusConnection.MonitorPages);
-	msg->MonitorPage2 = virt_to_phys(
+	msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+	msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
+	msg->interrupt_page = virt_to_phys(gVmbusConnection.InterruptPage);
+	msg->monitor_page1 = virt_to_phys(gVmbusConnection.MonitorPages);
+	msg->monitor_page2 = virt_to_phys(
 			(void *)((unsigned long)gVmbusConnection.MonitorPages +
 				 PAGE_SIZE));
 
@@ -116,30 +116,30 @@
 	 * receive the response before returning from this routine
 	 */
 	spin_lock_irqsave(&gVmbusConnection.channelmsg_lock, flags);
-	list_add_tail(&msgInfo->MsgListEntry,
+	list_add_tail(&msgInfo->msglistentry,
 		      &gVmbusConnection.ChannelMsgList);
 
 	spin_unlock_irqrestore(&gVmbusConnection.channelmsg_lock, flags);
 
 	DPRINT_DBG(VMBUS, "Vmbus connection - interrupt pfn %llx, "
 		   "monitor1 pfn %llx,, monitor2 pfn %llx",
-		   msg->InterruptPage, msg->MonitorPage1, msg->MonitorPage2);
+		   msg->interrupt_page, msg->monitor_page1, msg->monitor_page2);
 
 	DPRINT_DBG(VMBUS, "Sending channel initiate msg...");
 	ret = VmbusPostMessage(msg,
 			       sizeof(struct vmbus_channel_initiate_contact));
 	if (ret != 0) {
-		list_del(&msgInfo->MsgListEntry);
+		list_del(&msgInfo->msglistentry);
 		goto Cleanup;
 	}
 
 	/* Wait for the connection response */
-	osd_WaitEventWait(msgInfo->WaitEvent);
+	osd_waitevent_wait(msgInfo->waitevent);
 
-	list_del(&msgInfo->MsgListEntry);
+	list_del(&msgInfo->msglistentry);
 
 	/* Check if successful */
-	if (msgInfo->Response.VersionResponse.VersionSupported) {
+	if (msgInfo->response.version_response.version_supported) {
 		DPRINT_INFO(VMBUS, "Vmbus connected!!");
 		gVmbusConnection.ConnectState = Connected;
 
@@ -151,7 +151,7 @@
 		goto Cleanup;
 	}
 
-	kfree(msgInfo->WaitEvent);
+	kfree(msgInfo->waitevent);
 	kfree(msgInfo);
 	return 0;
 
@@ -162,17 +162,17 @@
 		destroy_workqueue(gVmbusConnection.WorkQueue);
 
 	if (gVmbusConnection.InterruptPage) {
-		osd_PageFree(gVmbusConnection.InterruptPage, 1);
+		osd_page_free(gVmbusConnection.InterruptPage, 1);
 		gVmbusConnection.InterruptPage = NULL;
 	}
 
 	if (gVmbusConnection.MonitorPages) {
-		osd_PageFree(gVmbusConnection.MonitorPages, 2);
+		osd_page_free(gVmbusConnection.MonitorPages, 2);
 		gVmbusConnection.MonitorPages = NULL;
 	}
 
 	if (msgInfo) {
-		kfree(msgInfo->WaitEvent);
+		kfree(msgInfo->waitevent);
 		kfree(msgInfo);
 	}
 
@@ -195,14 +195,14 @@
 	if (!msg)
 		return -ENOMEM;
 
-	msg->MessageType = ChannelMessageUnload;
+	msg->msgtype = CHANNELMSG_UNLOAD;
 
 	ret = VmbusPostMessage(msg,
 			       sizeof(struct vmbus_channel_message_header));
 	if (ret != 0)
 		goto Cleanup;
 
-	osd_PageFree(gVmbusConnection.InterruptPage, 1);
+	osd_page_free(gVmbusConnection.InterruptPage, 1);
 
 	/* TODO: iterate thru the msg list and free up */
 	destroy_workqueue(gVmbusConnection.WorkQueue);
@@ -226,8 +226,8 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
-	list_for_each_entry(channel, &gVmbusConnection.ChannelList, ListEntry) {
-		if (channel->OfferMsg.ChildRelId == relId) {
+	list_for_each_entry(channel, &gVmbusConnection.ChannelList, listentry) {
+		if (channel->offermsg.child_relid == relId) {
 			foundChannel = channel;
 			break;
 		}
@@ -309,9 +309,9 @@
 {
 	union hv_connection_id connId;
 
-	connId.Asu32 = 0;
-	connId.u.Id = VMBUS_MESSAGE_CONNECTION_ID;
-	return HvPostMessage(connId, 1, buffer, bufferLen);
+	connId.asu32 = 0;
+	connId.u.id = VMBUS_MESSAGE_CONNECTION_ID;
+	return hv_post_message(connId, 1, buffer, bufferLen);
 }
 
 /*
@@ -324,5 +324,5 @@
 		(unsigned long *)gVmbusConnection.SendInterruptPage +
 		(childRelId >> 5));
 
-	return HvSignalEvent();
+	return hv_signal_event();
 }
diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
index 86b1ddd..a34d713 100644
--- a/drivers/staging/hv/hv.c
+++ b/drivers/staging/hv/hv.c
@@ -28,17 +28,18 @@
 #include "vmbus_private.h"
 
 /* The one and only */
-struct hv_context gHvContext = {
-	.SynICInitialized	= false,
-	.HypercallPage		= NULL,
-	.SignalEventParam	= NULL,
-	.SignalEventBuffer	= NULL,
+struct hv_context hv_context = {
+	.synic_initialized	= false,
+	.hypercall_page		= NULL,
+	.signal_event_param	= NULL,
+	.signal_event_buffer	= NULL,
 };
 
 /*
- * HvQueryHypervisorPresence - Query the cpuid for presense of windows hypervisor
+ * query_hypervisor_presence
+ * - Query the cpuid for presense of windows hypervisor
  */
-static int HvQueryHypervisorPresence(void)
+static int query_hypervisor_presence(void)
 {
 	unsigned int eax;
 	unsigned int ebx;
@@ -50,22 +51,22 @@
 	ebx = 0;
 	ecx = 0;
 	edx = 0;
-	op = HvCpuIdFunctionVersionAndFeatures;
+	op = HVCPUID_VERSION_FEATURES;
 	cpuid(op, &eax, &ebx, &ecx, &edx);
 
 	return ecx & HV_PRESENT_BIT;
 }
 
 /*
- * HvQueryHypervisorInfo - Get version info of the windows hypervisor
+ * query_hypervisor_info - Get version info of the windows hypervisor
  */
-static int HvQueryHypervisorInfo(void)
+static int query_hypervisor_info(void)
 {
 	unsigned int eax;
 	unsigned int ebx;
 	unsigned int ecx;
 	unsigned int edx;
-	unsigned int maxLeaf;
+	unsigned int max_leaf;
 	unsigned int op;
 
 	/*
@@ -76,7 +77,7 @@
 	ebx = 0;
 	ecx = 0;
 	edx = 0;
-	op = HvCpuIdFunctionHvVendorAndMaxFunction;
+	op = HVCPUID_VENDOR_MAXFUNCTION;
 	cpuid(op, &eax, &ebx, &ecx, &edx);
 
 	DPRINT_INFO(VMBUS, "Vendor ID: %c%c%c%c%c%c%c%c%c%c%c%c",
@@ -93,12 +94,12 @@
 		    ((edx >> 16) & 0xFF),
 		    ((edx >> 24) & 0xFF));
 
-	maxLeaf = eax;
+	max_leaf = eax;
 	eax = 0;
 	ebx = 0;
 	ecx = 0;
 	edx = 0;
-	op = HvCpuIdFunctionHvInterface;
+	op = HVCPUID_INTERFACE;
 	cpuid(op, &eax, &ebx, &ecx, &edx);
 
 	DPRINT_INFO(VMBUS, "Interface ID: %c%c%c%c",
@@ -107,12 +108,12 @@
 		    ((eax >> 16) & 0xFF),
 		    ((eax >> 24) & 0xFF));
 
-	if (maxLeaf >= HvCpuIdFunctionMsHvVersion) {
+	if (max_leaf >= HVCPUID_VERSION) {
 		eax = 0;
 		ebx = 0;
 		ecx = 0;
 		edx = 0;
-		op = HvCpuIdFunctionMsHvVersion;
+		op = HVCPUID_VERSION;
 		cpuid(op, &eax, &ebx, &ecx, &edx);
 		DPRINT_INFO(VMBUS, "OS Build:%d-%d.%d-%d-%d.%d",\
 			    eax,
@@ -122,80 +123,81 @@
 			    edx >> 24,
 			    edx & 0xFFFFFF);
 	}
-	return maxLeaf;
+	return max_leaf;
 }
 
 /*
- * HvDoHypercall - Invoke the specified hypercall
+ * do_hypercall- Invoke the specified hypercall
  */
-static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
+static u64 do_hypercall(u64 control, void *input, void *output)
 {
 #ifdef CONFIG_X86_64
-	u64 hvStatus = 0;
-	u64 inputAddress = (Input) ? virt_to_phys(Input) : 0;
-	u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
-	volatile void *hypercallPage = gHvContext.HypercallPage;
+	u64 hv_status = 0;
+	u64 input_address = (input) ? virt_to_phys(input) : 0;
+	u64 output_address = (output) ? virt_to_phys(output) : 0;
+	volatile void *hypercall_page = hv_context.hypercall_page;
 
 	DPRINT_DBG(VMBUS, "Hypercall <control %llx input phys %llx virt %p "
 		   "output phys %llx virt %p hypercall %p>",
-		   Control, inputAddress, Input,
-		   outputAddress, Output, hypercallPage);
+		   control, input_address, input,
+		   output_address, output, hypercall_page);
 
-	__asm__ __volatile__("mov %0, %%r8" : : "r" (outputAddress) : "r8");
-	__asm__ __volatile__("call *%3" : "=a" (hvStatus) :
-			     "c" (Control), "d" (inputAddress),
-			     "m" (hypercallPage));
+	__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
+	__asm__ __volatile__("call *%3" : "=a" (hv_status) :
+			     "c" (control), "d" (input_address),
+			     "m" (hypercall_page));
 
-	DPRINT_DBG(VMBUS, "Hypercall <return %llx>",  hvStatus);
+	DPRINT_DBG(VMBUS, "Hypercall <return %llx>",  hv_status);
 
-	return hvStatus;
+	return hv_status;
 
 #else
 
-	u32 controlHi = Control >> 32;
-	u32 controlLo = Control & 0xFFFFFFFF;
-	u32 hvStatusHi = 1;
-	u32 hvStatusLo = 1;
-	u64 inputAddress = (Input) ? virt_to_phys(Input) : 0;
-	u32 inputAddressHi = inputAddress >> 32;
-	u32 inputAddressLo = inputAddress & 0xFFFFFFFF;
-	u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
-	u32 outputAddressHi = outputAddress >> 32;
-	u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
-	volatile void *hypercallPage = gHvContext.HypercallPage;
+	u32 control_hi = control >> 32;
+	u32 control_lo = control & 0xFFFFFFFF;
+	u32 hv_status_hi = 1;
+	u32 hv_status_lo = 1;
+	u64 input_address = (input) ? virt_to_phys(input) : 0;
+	u32 input_address_hi = input_address >> 32;
+	u32 input_address_lo = input_address & 0xFFFFFFFF;
+	u64 output_address = (output) ? virt_to_phys(output) : 0;
+	u32 output_address_hi = output_address >> 32;
+	u32 output_address_lo = output_address & 0xFFFFFFFF;
+	volatile void *hypercall_page = hv_context.hypercall_page;
 
 	DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
-		   Control, Input, Output);
+		   control, input, output);
 
-	__asm__ __volatile__ ("call *%8" : "=d"(hvStatusHi),
-			      "=a"(hvStatusLo) : "d" (controlHi),
-			      "a" (controlLo), "b" (inputAddressHi),
-			      "c" (inputAddressLo), "D"(outputAddressHi),
-			      "S"(outputAddressLo), "m" (hypercallPage));
+	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
+			      "=a"(hv_status_lo) : "d" (control_hi),
+			      "a" (control_lo), "b" (input_address_hi),
+			      "c" (input_address_lo), "D"(output_address_hi),
+			      "S"(output_address_lo), "m" (hypercall_page));
 
 	DPRINT_DBG(VMBUS, "Hypercall <return %llx>",
-		   hvStatusLo | ((u64)hvStatusHi << 32));
+		   hv_status_lo | ((u64)hv_status_hi << 32));
 
-	return hvStatusLo | ((u64)hvStatusHi << 32);
+	return hv_status_lo | ((u64)hv_status_hi << 32);
 #endif /* !x86_64 */
 }
 
 /*
- * HvInit - Main initialization routine.
+ * hv_init - Main initialization routine.
  *
  * This routine must be called before any other routines in here are called
  */
-int HvInit(void)
+int hv_init(void)
 {
 	int ret = 0;
-	int maxLeaf;
-	union hv_x64_msr_hypercall_contents hypercallMsr;
-	void *virtAddr = NULL;
+	int max_leaf;
+	union hv_x64_msr_hypercall_contents hypercall_msr;
+	void *virtaddr = NULL;
 
-	memset(gHvContext.synICEventPage, 0, sizeof(void *) * MAX_NUM_CPUS);
-	memset(gHvContext.synICMessagePage, 0, sizeof(void *) * MAX_NUM_CPUS);
+	memset(hv_context.synic_event_page, 0, sizeof(void *) * MAX_NUM_CPUS);
+	memset(hv_context.synic_message_page, 0,
+	       sizeof(void *) * MAX_NUM_CPUS);
 
-	if (!HvQueryHypervisorPresence()) {
+	if (!query_hypervisor_presence()) {
 		DPRINT_ERR(VMBUS, "No Windows hypervisor detected!!");
 		goto Cleanup;
 	}
@@ -203,146 +205,148 @@
 	DPRINT_INFO(VMBUS,
 		    "Windows hypervisor detected! Retrieving more info...");
 
-	maxLeaf = HvQueryHypervisorInfo();
+	max_leaf = query_hypervisor_info();
 	/* HvQueryHypervisorFeatures(maxLeaf); */
 
 	/*
 	 * We only support running on top of Hyper-V
 	 */
-	rdmsrl(HV_X64_MSR_GUEST_OS_ID, gHvContext.GuestId);
+	rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
 
-	if (gHvContext.GuestId != 0) {
+	if (hv_context.guestid != 0) {
 		DPRINT_ERR(VMBUS, "Unknown guest id (0x%llx)!!",
-				gHvContext.GuestId);
+				hv_context.guestid);
 		goto Cleanup;
 	}
 
 	/* Write our OS info */
 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
-	gHvContext.GuestId = HV_LINUX_GUEST_ID;
+	hv_context.guestid = HV_LINUX_GUEST_ID;
 
 	/* See if the hypercall page is already set */
-	rdmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
 	/*
 	* Allocate the hypercall page memory
-	* virtAddr = osd_PageAlloc(1);
+	* virtaddr = osd_page_alloc(1);
 	*/
-	virtAddr = osd_VirtualAllocExec(PAGE_SIZE);
+	virtaddr = osd_virtual_alloc_exec(PAGE_SIZE);
 
-	if (!virtAddr) {
+	if (!virtaddr) {
 		DPRINT_ERR(VMBUS,
 			   "unable to allocate hypercall page!!");
 		goto Cleanup;
 	}
 
-	hypercallMsr.Enable = 1;
+	hypercall_msr.enable = 1;
 
-	hypercallMsr.GuestPhysicalAddress = vmalloc_to_pfn(virtAddr);
-	wrmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+	hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
+	wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
 	/* Confirm that hypercall page did get setup. */
-	hypercallMsr.AsUINT64 = 0;
-	rdmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+	hypercall_msr.as_uint64 = 0;
+	rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
-	if (!hypercallMsr.Enable) {
+	if (!hypercall_msr.enable) {
 		DPRINT_ERR(VMBUS, "unable to set hypercall page!!");
 		goto Cleanup;
 	}
 
-	gHvContext.HypercallPage = virtAddr;
+	hv_context.hypercall_page = virtaddr;
 
 	DPRINT_INFO(VMBUS, "Hypercall page VA=%p, PA=0x%0llx",
-		    gHvContext.HypercallPage,
-		    (u64)hypercallMsr.GuestPhysicalAddress << PAGE_SHIFT);
+		    hv_context.hypercall_page,
+		    (u64)hypercall_msr.guest_physical_address << PAGE_SHIFT);
 
 	/* Setup the global signal event param for the signal event hypercall */
-	gHvContext.SignalEventBuffer =
+	hv_context.signal_event_buffer =
 			kmalloc(sizeof(struct hv_input_signal_event_buffer),
 				GFP_KERNEL);
-	if (!gHvContext.SignalEventBuffer)
+	if (!hv_context.signal_event_buffer)
 		goto Cleanup;
 
-	gHvContext.SignalEventParam =
+	hv_context.signal_event_param =
 		(struct hv_input_signal_event *)
-			(ALIGN_UP((unsigned long)gHvContext.SignalEventBuffer,
+			(ALIGN_UP((unsigned long)
+				  hv_context.signal_event_buffer,
 				  HV_HYPERCALL_PARAM_ALIGN));
-	gHvContext.SignalEventParam->ConnectionId.Asu32 = 0;
-	gHvContext.SignalEventParam->ConnectionId.u.Id =
+	hv_context.signal_event_param->connectionid.asu32 = 0;
+	hv_context.signal_event_param->connectionid.u.id =
 						VMBUS_EVENT_CONNECTION_ID;
-	gHvContext.SignalEventParam->FlagNumber = 0;
-	gHvContext.SignalEventParam->RsvdZ = 0;
+	hv_context.signal_event_param->flag_number = 0;
+	hv_context.signal_event_param->rsvdz = 0;
 
 	return ret;
 
 Cleanup:
-	if (virtAddr) {
-		if (hypercallMsr.Enable) {
-			hypercallMsr.AsUINT64 = 0;
-			wrmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
+	if (virtaddr) {
+		if (hypercall_msr.enable) {
+			hypercall_msr.as_uint64 = 0;
+			wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 		}
 
-		vfree(virtAddr);
+		vfree(virtaddr);
 	}
 	ret = -1;
 	return ret;
 }
 
 /*
- * HvCleanup - Cleanup routine.
+ * hv_cleanup - Cleanup routine.
  *
  * This routine is called normally during driver unloading or exiting.
  */
-void HvCleanup(void)
+void hv_cleanup(void)
 {
-	union hv_x64_msr_hypercall_contents hypercallMsr;
+	union hv_x64_msr_hypercall_contents hypercall_msr;
 
-	kfree(gHvContext.SignalEventBuffer);
-	gHvContext.SignalEventBuffer = NULL;
-	gHvContext.SignalEventParam = NULL;
+	kfree(hv_context.signal_event_buffer);
+	hv_context.signal_event_buffer = NULL;
+	hv_context.signal_event_param = NULL;
 
-	if (gHvContext.HypercallPage) {
-		hypercallMsr.AsUINT64 = 0;
-		wrmsrl(HV_X64_MSR_HYPERCALL, hypercallMsr.AsUINT64);
-		vfree(gHvContext.HypercallPage);
-		gHvContext.HypercallPage = NULL;
+	if (hv_context.hypercall_page) {
+		hypercall_msr.as_uint64 = 0;
+		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+		vfree(hv_context.hypercall_page);
+		hv_context.hypercall_page = NULL;
 	}
 }
 
 /*
- * HvPostMessage - Post a message using the hypervisor message IPC.
+ * hv_post_message - Post a message using the hypervisor message IPC.
  *
  * This involves a hypercall.
  */
-u16 HvPostMessage(union hv_connection_id connectionId,
-		  enum hv_message_type messageType,
-		  void *payload, size_t payloadSize)
+u16 hv_post_message(union hv_connection_id connection_id,
+		  enum hv_message_type message_type,
+		  void *payload, size_t payload_size)
 {
-	struct alignedInput {
+	struct aligned_input {
 		u64 alignment8;
 		struct hv_input_post_message msg;
 	};
 
-	struct hv_input_post_message *alignedMsg;
+	struct hv_input_post_message *aligned_msg;
 	u16 status;
 	unsigned long addr;
 
-	if (payloadSize > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
+	if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
 		return -1;
 
-	addr = (unsigned long)kmalloc(sizeof(struct alignedInput), GFP_ATOMIC);
+	addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
 	if (!addr)
 		return -1;
 
-	alignedMsg = (struct hv_input_post_message *)
+	aligned_msg = (struct hv_input_post_message *)
 			(ALIGN_UP(addr, HV_HYPERCALL_PARAM_ALIGN));
 
-	alignedMsg->ConnectionId = connectionId;
-	alignedMsg->MessageType = messageType;
-	alignedMsg->PayloadSize = payloadSize;
-	memcpy((void *)alignedMsg->Payload, payload, payloadSize);
+	aligned_msg->connectionid = connection_id;
+	aligned_msg->message_type = message_type;
+	aligned_msg->payload_size = payload_size;
+	memcpy((void *)aligned_msg->payload, payload, payload_size);
 
-	status = HvDoHypercall(HvCallPostMessage, alignedMsg, NULL) & 0xFFFF;
+	status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
+		& 0xFFFF;
 
 	kfree((void *)addr);
 
@@ -351,38 +355,40 @@
 
 
 /*
- * HvSignalEvent - Signal an event on the specified connection using the hypervisor event IPC.
+ * hv_signal_event -
+ * Signal an event on the specified connection using the hypervisor event IPC.
  *
  * This involves a hypercall.
  */
-u16 HvSignalEvent(void)
+u16 hv_signal_event(void)
 {
 	u16 status;
 
-	status = HvDoHypercall(HvCallSignalEvent, gHvContext.SignalEventParam,
+	status = do_hypercall(HVCALL_SIGNAL_EVENT,
+			       hv_context.signal_event_param,
 			       NULL) & 0xFFFF;
 	return status;
 }
 
 /*
- * HvSynicInit - Initialize the Synthethic Interrupt Controller.
+ * hv_synic_init - Initialize the Synthethic Interrupt Controller.
  *
  * If it is already initialized by another entity (ie x2v shim), we need to
  * retrieve the initialized message and event pages.  Otherwise, we create and
  * initialize the message and event pages.
  */
-void HvSynicInit(void *irqarg)
+void hv_synic_init(void *irqarg)
 {
 	u64 version;
 	union hv_synic_simp simp;
 	union hv_synic_siefp siefp;
-	union hv_synic_sint sharedSint;
+	union hv_synic_sint shared_sint;
 	union hv_synic_scontrol sctrl;
 
-	u32 irqVector = *((u32 *)(irqarg));
+	u32 irq_vector = *((u32 *)(irqarg));
 	int cpu = smp_processor_id();
 
-	if (!gHvContext.HypercallPage)
+	if (!hv_context.hypercall_page)
 		return;
 
 	/* Check the version */
@@ -390,110 +396,112 @@
 
 	DPRINT_INFO(VMBUS, "SynIC version: %llx", version);
 
-	gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
+	hv_context.synic_message_page[cpu] =
+		(void *)get_zeroed_page(GFP_ATOMIC);
 
-	if (gHvContext.synICMessagePage[cpu] == NULL) {
+	if (hv_context.synic_message_page[cpu] == NULL) {
 		DPRINT_ERR(VMBUS,
 			   "unable to allocate SYNIC message page!!");
 		goto Cleanup;
 	}
 
-	gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
+	hv_context.synic_event_page[cpu] =
+		(void *)get_zeroed_page(GFP_ATOMIC);
 
-	if (gHvContext.synICEventPage[cpu] == NULL) {
+	if (hv_context.synic_event_page[cpu] == NULL) {
 		DPRINT_ERR(VMBUS,
 			   "unable to allocate SYNIC event page!!");
 		goto Cleanup;
 	}
 
 	/* Setup the Synic's message page */
-	rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
-	simp.SimpEnabled = 1;
-	simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
+	rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+	simp.simp_enabled = 1;
+	simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
 		>> PAGE_SHIFT;
 
-	DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", simp.AsUINT64);
+	DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", simp.as_uint64);
 
-	wrmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
+	wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
 
 	/* Setup the Synic's event page */
-	rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
-	siefp.SiefpEnabled = 1;
-	siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
+	rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+	siefp.siefp_enabled = 1;
+	siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
 		>> PAGE_SHIFT;
 
-	DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", siefp.AsUINT64);
+	DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", siefp.as_uint64);
 
-	wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
+	wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
 
 	/* Setup the interception SINT. */
 	/* wrmsrl((HV_X64_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX), */
-	/*	  interceptionSint.AsUINT64); */
+	/*	  interceptionSint.as_uint64); */
 
 	/* Setup the shared SINT. */
-	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
-	sharedSint.AsUINT64 = 0;
-	sharedSint.Vector = irqVector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
-	sharedSint.Masked = false;
-	sharedSint.AutoEoi = true;
+	shared_sint.as_uint64 = 0;
+	shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
+	shared_sint.masked = false;
+	shared_sint.auto_eoi = true;
 
 	DPRINT_DBG(VMBUS, "HV_X64_MSR_SINT1 msr set to: %llx",
-		   sharedSint.AsUINT64);
+		   shared_sint.as_uint64);
 
-	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
 	/* Enable the global synic bit */
-	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.AsUINT64);
-	sctrl.Enable = 1;
+	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
+	sctrl.enable = 1;
 
-	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.AsUINT64);
+	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 
-	gHvContext.SynICInitialized = true;
+	hv_context.synic_initialized = true;
 	return;
 
 Cleanup:
-	if (gHvContext.synICEventPage[cpu])
-		osd_PageFree(gHvContext.synICEventPage[cpu], 1);
+	if (hv_context.synic_event_page[cpu])
+		osd_page_free(hv_context.synic_event_page[cpu], 1);
 
-	if (gHvContext.synICMessagePage[cpu])
-		osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
+	if (hv_context.synic_message_page[cpu])
+		osd_page_free(hv_context.synic_message_page[cpu], 1);
 	return;
 }
 
 /*
- * HvSynicCleanup - Cleanup routine for HvSynicInit().
+ * hv_synic_cleanup - Cleanup routine for hv_synic_init().
  */
-void HvSynicCleanup(void *arg)
+void hv_synic_cleanup(void *arg)
 {
-	union hv_synic_sint sharedSint;
+	union hv_synic_sint shared_sint;
 	union hv_synic_simp simp;
 	union hv_synic_siefp siefp;
 	int cpu = smp_processor_id();
 
-	if (!gHvContext.SynICInitialized)
+	if (!hv_context.synic_initialized)
 		return;
 
-	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
-	sharedSint.Masked = 1;
+	shared_sint.masked = 1;
 
 	/* Need to correctly cleanup in the case of SMP!!! */
 	/* Disable the interrupt */
-	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
+	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
-	rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
-	simp.SimpEnabled = 0;
-	simp.BaseSimpGpa = 0;
+	rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
+	simp.simp_enabled = 0;
+	simp.base_simp_gpa = 0;
 
-	wrmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
+	wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
 
-	rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
-	siefp.SiefpEnabled = 0;
-	siefp.BaseSiefpGpa = 0;
+	rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
+	siefp.siefp_enabled = 0;
+	siefp.base_siefp_gpa = 0;
 
-	wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
+	wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
 
-	osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
-	osd_PageFree(gHvContext.synICEventPage[cpu], 1);
+	osd_page_free(hv_context.synic_message_page[cpu], 1);
+	osd_page_free(hv_context.synic_event_page[cpu], 1);
 }
diff --git a/drivers/staging/hv/hv.h b/drivers/staging/hv/hv.h
index 41f5ebb..829aff8 100644
--- a/drivers/staging/hv/hv.h
+++ b/drivers/staging/hv/hv.h
@@ -92,49 +92,49 @@
 
 
 struct hv_input_signal_event_buffer {
-	u64 Align8;
-	struct hv_input_signal_event Event;
+	u64 align8;
+	struct hv_input_signal_event event;
 };
 
 struct hv_context {
 	/* We only support running on top of Hyper-V
 	* So at this point this really can only contain the Hyper-V ID
 	*/
-	u64 GuestId;
+	u64 guestid;
 
-	void *HypercallPage;
+	void *hypercall_page;
 
-	bool SynICInitialized;
+	bool synic_initialized;
 
 	/*
 	 * This is used as an input param to HvCallSignalEvent hypercall. The
 	 * input param is immutable in our usage and must be dynamic mem (vs
 	 * stack or global). */
-	struct hv_input_signal_event_buffer *SignalEventBuffer;
+	struct hv_input_signal_event_buffer *signal_event_buffer;
 	/* 8-bytes aligned of the buffer above */
-	struct hv_input_signal_event *SignalEventParam;
+	struct hv_input_signal_event *signal_event_param;
 
-	void *synICMessagePage[MAX_NUM_CPUS];
-	void *synICEventPage[MAX_NUM_CPUS];
+	void *synic_message_page[MAX_NUM_CPUS];
+	void *synic_event_page[MAX_NUM_CPUS];
 };
 
-extern struct hv_context gHvContext;
+extern struct hv_context hv_context;
 
 
 /* Hv Interface */
 
-extern int HvInit(void);
+extern int hv_init(void);
 
-extern void HvCleanup(void);
+extern void hv_cleanup(void);
 
-extern u16 HvPostMessage(union hv_connection_id connectionId,
-			 enum hv_message_type messageType,
-			 void *payload, size_t payloadSize);
+extern u16 hv_post_message(union hv_connection_id connection_id,
+			 enum hv_message_type message_type,
+			 void *payload, size_t payload_size);
 
-extern u16 HvSignalEvent(void);
+extern u16 hv_signal_event(void);
 
-extern void HvSynicInit(void *irqarg);
+extern void hv_synic_init(void *irqarg);
 
-extern void HvSynicCleanup(void *arg);
+extern void hv_synic_cleanup(void *arg);
 
 #endif /* __HV_H__ */
diff --git a/drivers/staging/hv/hv_api.h b/drivers/staging/hv/hv_api.h
index 9eb818e..70e863a 100644
--- a/drivers/staging/hv/hv_api.h
+++ b/drivers/staging/hv/hv_api.h
@@ -510,21 +510,21 @@
 
 /*
  * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
- * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
+ * is set by CPUID(HVCPUID_VERSION_FEATURES).
  */
 enum hv_cpuid_function {
-	HvCpuIdFunctionVersionAndFeatures		= 0x00000001,
-	HvCpuIdFunctionHvVendorAndMaxFunction		= 0x40000000,
-	HvCpuIdFunctionHvInterface			= 0x40000001,
+	HVCPUID_VERSION_FEATURES		= 0x00000001,
+	HVCPUID_VENDOR_MAXFUNCTION		= 0x40000000,
+	HVCPUID_INTERFACE			= 0x40000001,
 
 	/*
 	 * The remaining functions depend on the value of
-	 * HvCpuIdFunctionInterface
+	 * HVCPUID_INTERFACE
 	 */
-	HvCpuIdFunctionMsHvVersion			= 0x40000002,
-	HvCpuIdFunctionMsHvFeatures			= 0x40000003,
-	HvCpuIdFunctionMsHvEnlightenmentInformation	= 0x40000004,
-	HvCpuIdFunctionMsHvImplementationLimits		= 0x40000005,
+	HVCPUID_VERSION			= 0x40000002,
+	HVCPUID_FEATURES			= 0x40000003,
+	HVCPUID_ENLIGHTENMENT_INFO	= 0x40000004,
+	HVCPUID_IMPLEMENTATION_LIMITS		= 0x40000005,
 };
 
 /* Define the virtual APIC registers */
@@ -575,30 +575,30 @@
 
 /* Define hypervisor message types. */
 enum hv_message_type {
-	HvMessageTypeNone			= 0x00000000,
+	HVMSG_NONE			= 0x00000000,
 
 	/* Memory access messages. */
-	HvMessageTypeUnmappedGpa		= 0x80000000,
-	HvMessageTypeGpaIntercept		= 0x80000001,
+	HVMSG_UNMAPPED_GPA		= 0x80000000,
+	HVMSG_GPA_INTERCEPT		= 0x80000001,
 
 	/* Timer notification messages. */
-	HvMessageTimerExpired			= 0x80000010,
+	HVMSG_TIMER_EXPIRED			= 0x80000010,
 
 	/* Error messages. */
-	HvMessageTypeInvalidVpRegisterValue	= 0x80000020,
-	HvMessageTypeUnrecoverableException	= 0x80000021,
-	HvMessageTypeUnsupportedFeature		= 0x80000022,
+	HVMSG_INVALID_VP_REGISTER_VALUE	= 0x80000020,
+	HVMSG_UNRECOVERABLE_EXCEPTION	= 0x80000021,
+	HVMSG_UNSUPPORTED_FEATURE		= 0x80000022,
 
 	/* Trace buffer complete messages. */
-	HvMessageTypeEventLogBufferComplete	= 0x80000040,
+	HVMSG_EVENTLOG_BUFFERCOMPLETE	= 0x80000040,
 
 	/* Platform-specific processor intercept messages. */
-	HvMessageTypeX64IoPortIntercept		= 0x80010000,
-	HvMessageTypeX64MsrIntercept		= 0x80010001,
-	HvMessageTypeX64CpuidIntercept		= 0x80010002,
-	HvMessageTypeX64ExceptionIntercept	= 0x80010003,
-	HvMessageTypeX64ApicEoi			= 0x80010004,
-	HvMessageTypeX64LegacyFpError		= 0x80010005
+	HVMSG_X64_IOPORT_INTERCEPT		= 0x80010000,
+	HVMSG_X64_MSR_INTERCEPT		= 0x80010001,
+	HVMSG_X64_CPUID_INTERCEPT		= 0x80010002,
+	HVMSG_X64_EXCEPTION_INTERCEPT	= 0x80010003,
+	HVMSG_X64_APIC_EOI			= 0x80010004,
+	HVMSG_X64_LEGACY_FP_ERROR		= 0x80010005
 };
 
 /* Define the number of synthetic interrupt sources. */
@@ -610,103 +610,103 @@
 
 /* Define connection identifier type. */
 union hv_connection_id {
-	u32 Asu32;
+	u32 asu32;
 	struct {
-		u32 Id:24;
-		u32 Reserved:8;
+		u32 id:24;
+		u32 reserved:8;
 	} u;
 };
 
 /* Define port identifier type. */
 union hv_port_id {
-	u32 Asu32;
+	u32 asu32;
 	struct {
-		u32 Id:24;
-		u32 Reserved:8;
+		u32 id:24;
+		u32 reserved:8;
 	} u ;
 };
 
 /* Define port type. */
 enum hv_port_type {
-	HvPortTypeMessage	= 1,
-	HvPortTypeEvent		= 2,
-	HvPortTypeMonitor	= 3
+	HVPORT_MSG	= 1,
+	HVPORT_EVENT		= 2,
+	HVPORT_MONITOR	= 3
 };
 
 /* Define port information structure. */
 struct hv_port_info {
-	enum hv_port_type PortType;
-	u32 Padding;
+	enum hv_port_type port_type;
+	u32 padding;
 	union {
 		struct {
-			u32 TargetSint;
-			u32 TargetVp;
-			u64 RsvdZ;
-		} MessagePortInfo;
+			u32 target_sint;
+			u32 target_vp;
+			u64 rsvdz;
+		} message_port_info;
 		struct {
-			u32 TargetSint;
-			u32 TargetVp;
-			u16 BaseFlagNumber;
-			u16 FlagCount;
-			u32 RsvdZ;
-		} EventPortInfo;
+			u32 target_sint;
+			u32 target_vp;
+			u16 base_flag_bumber;
+			u16 flag_count;
+			u32 rsvdz;
+		} event_port_info;
 		struct {
-			u64 MonitorAddress;
-			u64 RsvdZ;
-		} MonitorPortInfo;
+			u64 monitor_address;
+			u64 rsvdz;
+		} monitor_port_info;
 	};
 };
 
 struct hv_connection_info {
-	enum hv_port_type PortType;
-	u32 Padding;
+	enum hv_port_type port_type;
+	u32 padding;
 	union {
 		struct {
-			u64 RsvdZ;
-		} MessageConnectionInfo;
+			u64 rsvdz;
+		} message_connection_info;
 		struct {
-			u64 RsvdZ;
-		} EventConnectionInfo;
+			u64 rsvdz;
+		} event_connection_info;
 		struct {
-			u64 MonitorAddress;
-		} MonitorConnectionInfo;
+			u64 monitor_address;
+		} monitor_connection_info;
 	};
 };
 
 /* Define synthetic interrupt controller message flags. */
 union hv_message_flags {
-	u8 Asu8;
+	u8 asu8;
 	struct {
-		u8 MessagePending:1;
-		u8 Reserved:7;
+		u8 msg_pending:1;
+		u8 reserved:7;
 	};
 };
 
 /* Define synthetic interrupt controller message header. */
 struct hv_message_header {
-	enum hv_message_type MessageType;
-	u8 PayloadSize;
-	union hv_message_flags MessageFlags;
-	u8 Reserved[2];
+	enum hv_message_type message_type;
+	u8 payload_size;
+	union hv_message_flags message_flags;
+	u8 reserved[2];
 	union {
-		u64 Sender;
-		union hv_port_id Port;
+		u64 sender;
+		union hv_port_id port;
 	};
 };
 
 /* Define timer message payload structure. */
 struct hv_timer_message_payload {
-	u32 TimerIndex;
-	u32 Reserved;
-	u64 ExpirationTime;	/* When the timer expired */
-	u64 DeliveryTime;	/* When the message was delivered */
+	u32 timer_index;
+	u32 reserved;
+	u64 expiration_time;	/* When the timer expired */
+	u64 delivery_time;	/* When the message was delivered */
 };
 
 /* Define synthetic interrupt controller message format. */
 struct hv_message {
-	struct hv_message_header Header;
+	struct hv_message_header header;
 	union {
-		u64 Payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+		u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
 	} u ;
 };
 
@@ -715,82 +715,82 @@
 
 /* Define the synthetic interrupt message page layout. */
 struct hv_message_page {
-	struct hv_message SintMessage[HV_SYNIC_SINT_COUNT];
+	struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
 };
 
 /* Define the synthetic interrupt controller event flags format. */
 union hv_synic_event_flags {
-	u8 Flags8[HV_EVENT_FLAGS_BYTE_COUNT];
-	u32 Flags32[HV_EVENT_FLAGS_DWORD_COUNT];
+	u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT];
+	u32 flags32[HV_EVENT_FLAGS_DWORD_COUNT];
 };
 
 /* Define the synthetic interrupt flags page layout. */
 struct hv_synic_event_flags_page {
-	union hv_synic_event_flags SintEventFlags[HV_SYNIC_SINT_COUNT];
+	union hv_synic_event_flags sintevent_flags[HV_SYNIC_SINT_COUNT];
 };
 
 /* Define SynIC control register. */
 union hv_synic_scontrol {
-	u64 AsUINT64;
+	u64 as_uint64;
 	struct {
-		u64 Enable:1;
-		u64 Reserved:63;
+		u64 enable:1;
+		u64 reserved:63;
 	};
 };
 
 /* Define synthetic interrupt source. */
 union hv_synic_sint {
-	u64 AsUINT64;
+	u64 as_uint64;
 	struct {
-		u64 Vector:8;
-		u64 Reserved1:8;
-		u64 Masked:1;
-		u64 AutoEoi:1;
-		u64 Reserved2:46;
+		u64 vector:8;
+		u64 reserved1:8;
+		u64 masked:1;
+		u64 auto_eoi:1;
+		u64 reserved2:46;
 	};
 };
 
 /* Define the format of the SIMP register */
 union hv_synic_simp {
-	u64 AsUINT64;
+	u64 as_uint64;
 	struct {
-		u64 SimpEnabled:1;
-		u64 Preserved:11;
-		u64 BaseSimpGpa:52;
+		u64 simp_enabled:1;
+		u64 preserved:11;
+		u64 base_simp_gpa:52;
 	};
 };
 
 /* Define the format of the SIEFP register */
 union hv_synic_siefp {
-	u64 AsUINT64;
+	u64 as_uint64;
 	struct {
-		u64 SiefpEnabled:1;
-		u64 Preserved:11;
-		u64 BaseSiefpGpa:52;
+		u64 siefp_enabled:1;
+		u64 preserved:11;
+		u64 base_siefp_gpa:52;
 	};
 };
 
 /* Definitions for the monitored notification facility */
 union hv_monitor_trigger_group {
-	u64 AsUINT64;
+	u64 as_uint64;
 	struct {
-		u32 Pending;
-		u32 Armed;
+		u32 pending;
+		u32 armed;
 	};
 };
 
 struct hv_monitor_parameter {
-	union hv_connection_id ConnectionId;
-	u16 FlagNumber;
-	u16 RsvdZ;
+	union hv_connection_id connectionid;
+	u16 flagnumber;
+	u16 rsvdz;
 };
 
 union hv_monitor_trigger_state {
-	u32 Asu32;
+	u32 asu32;
 
 	struct {
-		u32 GroupEnable:4;
-		u32 RsvdZ:28;
+		u32 group_enable:4;
+		u32 rsvdz:28;
 	};
 };
 
@@ -814,42 +814,42 @@
 /* | 840 | Rsvd4[0]                                     | */
 /* ------------------------------------------------------ */
 struct hv_monitor_page {
-	union hv_monitor_trigger_state TriggerState;
-	u32 RsvdZ1;
+	union hv_monitor_trigger_state trigger_state;
+	u32 rsvdz1;
 
-	union hv_monitor_trigger_group TriggerGroup[4];
-	u64 RsvdZ2[3];
+	union hv_monitor_trigger_group trigger_group[4];
+	u64 rsvdz2[3];
 
-	s32 NextCheckTime[4][32];
+	s32 next_checktime[4][32];
 
-	u16 Latency[4][32];
-	u64 RsvdZ3[32];
+	u16 latency[4][32];
+	u64 rsvdz3[32];
 
-	struct hv_monitor_parameter Parameter[4][32];
+	struct hv_monitor_parameter parameter[4][32];
 
-	u8 RsvdZ4[1984];
+	u8 rsvdz4[1984];
 };
 
 /* Declare the various hypercall operations. */
 enum hv_call_code {
-	HvCallPostMessage	= 0x005c,
-	HvCallSignalEvent	= 0x005d,
+	HVCALL_POST_MESSAGE	= 0x005c,
+	HVCALL_SIGNAL_EVENT	= 0x005d,
 };
 
-/* Definition of the HvPostMessage hypercall input structure. */
+/* Definition of the hv_post_message hypercall input structure. */
 struct hv_input_post_message {
-	union hv_connection_id ConnectionId;
-	u32 Reserved;
-	enum hv_message_type MessageType;
-	u32 PayloadSize;
-	u64 Payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+	union hv_connection_id connectionid;
+	u32 reserved;
+	enum hv_message_type message_type;
+	u32 payload_size;
+	u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
 };
 
-/* Definition of the HvSignalEvent hypercall input structure. */
+/* Definition of the hv_signal_event hypercall input structure. */
 struct hv_input_signal_event {
-	union hv_connection_id ConnectionId;
-	u16 FlagNumber;
-	u16 RsvdZ;
+	union hv_connection_id connectionid;
+	u16 flag_number;
+	u16 rsvdz;
 };
 
 /*
@@ -859,16 +859,16 @@
 
 /* Version info reported by guest OS's */
 enum hv_guest_os_vendor {
-	HvGuestOsVendorMicrosoft	= 0x0001
+	HVGUESTOS_VENDOR_MICROSOFT	= 0x0001
 };
 
 enum hv_guest_os_microsoft_ids {
-	HvGuestOsMicrosoftUndefined	= 0x00,
-	HvGuestOsMicrosoftMSDOS		= 0x01,
-	HvGuestOsMicrosoftWindows3x	= 0x02,
-	HvGuestOsMicrosoftWindows9x	= 0x03,
-	HvGuestOsMicrosoftWindowsNT	= 0x04,
-	HvGuestOsMicrosoftWindowsCE	= 0x05
+	HVGUESTOS_MICROSOFT_UNDEFINED	= 0x00,
+	HVGUESTOS_MICROSOFT_MSDOS		= 0x01,
+	HVGUESTOS_MICROSOFT_WINDOWS3X	= 0x02,
+	HVGUESTOS_MICROSOFT_WINDOWS9X	= 0x03,
+	HVGUESTOS_MICROSOFT_WINDOWSNT	= 0x04,
+	HVGUESTOS_MICROSOFT_WINDOWSCE	= 0x05
 };
 
 /*
@@ -877,14 +877,14 @@
 #define HV_X64_MSR_GUEST_OS_ID	0x40000000
 
 union hv_x64_msr_guest_os_id_contents {
-	u64 AsUINT64;
+	u64 as_uint64;
 	struct {
-		u64 BuildNumber:16;
-		u64 ServiceVersion:8; /* Service Pack, etc. */
-		u64 MinorVersion:8;
-		u64 MajorVersion:8;
-		u64 OsId:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
-		u64 VendorId:16; /* enum hv_guest_os_vendor */
+		u64 build_number:16;
+		u64 service_version:8; /* Service Pack, etc. */
+		u64 minor_version:8;
+		u64 major_version:8;
+		u64 os_id:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
+		u64 vendor_id:16; /* enum hv_guest_os_vendor */
 	};
 };
 
@@ -894,11 +894,11 @@
 #define HV_X64_MSR_HYPERCALL	0x40000001
 
 union hv_x64_msr_hypercall_contents {
-	u64 AsUINT64;
+	u64 as_uint64;
 	struct {
-		u64 Enable:1;
-		u64 Reserved:11;
-		u64 GuestPhysicalAddress:52;
+		u64 enable:1;
+		u64 reserved:11;
+		u64 guest_physical_address:52;
 	};
 };
 
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index a99e900..0074581 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -38,12 +38,14 @@
 #include "vmbus_api.h"
 #include "utils.h"
 
+static u8 *shut_txf_buf;
+static u8 *time_txf_buf;
+static u8 *hbeat_txf_buf;
 
 static void shutdown_onchannelcallback(void *context)
 {
 	struct vmbus_channel *channel = context;
-	u8 *buf;
-	u32 buflen, recvlen;
+	u32 recvlen;
 	u64 requestid;
 	u8  execute_shutdown = false;
 
@@ -52,24 +54,23 @@
 	struct icmsg_hdr *icmsghdrp;
 	struct icmsg_negotiate *negop = NULL;
 
-	buflen = PAGE_SIZE;
-	buf = kmalloc(buflen, GFP_ATOMIC);
-
-	vmbus_recvpacket(channel, buf, buflen, &recvlen, &requestid);
+	vmbus_recvpacket(channel, shut_txf_buf,
+			 PAGE_SIZE, &recvlen, &requestid);
 
 	if (recvlen > 0) {
 		DPRINT_DBG(VMBUS, "shutdown packet: len=%d, requestid=%lld",
 			   recvlen, requestid);
 
-		icmsghdrp = (struct icmsg_hdr *)&buf[
+		icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
 			sizeof(struct vmbuspipe_hdr)];
 
 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-			prep_negotiate_resp(icmsghdrp, negop, buf);
+			prep_negotiate_resp(icmsghdrp, negop, shut_txf_buf);
 		} else {
-			shutdown_msg = (struct shutdown_msg_data *)&buf[
-				sizeof(struct vmbuspipe_hdr) +
-				sizeof(struct icmsg_hdr)];
+			shutdown_msg =
+				(struct shutdown_msg_data *)&shut_txf_buf[
+					sizeof(struct vmbuspipe_hdr) +
+					sizeof(struct icmsg_hdr)];
 
 			switch (shutdown_msg->flags) {
 			case 0:
@@ -93,13 +94,11 @@
 		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
 			| ICMSGHDRFLAG_RESPONSE;
 
-		vmbus_sendpacket(channel, buf,
+		vmbus_sendpacket(channel, shut_txf_buf,
 				       recvlen, requestid,
 				       VmbusPacketTypeDataInBand, 0);
 	}
 
-	kfree(buf);
-
 	if (execute_shutdown == true)
 		orderly_poweroff(false);
 }
@@ -150,28 +149,25 @@
 static void timesync_onchannelcallback(void *context)
 {
 	struct vmbus_channel *channel = context;
-	u8 *buf;
-	u32 buflen, recvlen;
+	u32 recvlen;
 	u64 requestid;
 	struct icmsg_hdr *icmsghdrp;
 	struct ictimesync_data *timedatap;
 
-	buflen = PAGE_SIZE;
-	buf = kmalloc(buflen, GFP_ATOMIC);
-
-	vmbus_recvpacket(channel, buf, buflen, &recvlen, &requestid);
+	vmbus_recvpacket(channel, time_txf_buf,
+			 PAGE_SIZE, &recvlen, &requestid);
 
 	if (recvlen > 0) {
 		DPRINT_DBG(VMBUS, "timesync packet: recvlen=%d, requestid=%lld",
 			recvlen, requestid);
 
-		icmsghdrp = (struct icmsg_hdr *)&buf[
+		icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
 				sizeof(struct vmbuspipe_hdr)];
 
 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-			prep_negotiate_resp(icmsghdrp, NULL, buf);
+			prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf);
 		} else {
-			timedatap = (struct ictimesync_data *)&buf[
+			timedatap = (struct ictimesync_data *)&time_txf_buf[
 				sizeof(struct vmbuspipe_hdr) +
 				sizeof(struct icmsg_hdr)];
 			adj_guesttime(timedatap->parenttime, timedatap->flags);
@@ -180,12 +176,10 @@
 		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
 			| ICMSGHDRFLAG_RESPONSE;
 
-		vmbus_sendpacket(channel, buf,
+		vmbus_sendpacket(channel, time_txf_buf,
 				recvlen, requestid,
 				VmbusPacketTypeDataInBand, 0);
 	}
-
-	kfree(buf);
 }
 
 /*
@@ -196,30 +190,28 @@
 static void heartbeat_onchannelcallback(void *context)
 {
 	struct vmbus_channel *channel = context;
-	u8 *buf;
-	u32 buflen, recvlen;
+	u32 recvlen;
 	u64 requestid;
 	struct icmsg_hdr *icmsghdrp;
 	struct heartbeat_msg_data *heartbeat_msg;
 
-	buflen = PAGE_SIZE;
-	buf = kmalloc(buflen, GFP_ATOMIC);
-
-	vmbus_recvpacket(channel, buf, buflen, &recvlen, &requestid);
+	vmbus_recvpacket(channel, hbeat_txf_buf,
+			 PAGE_SIZE, &recvlen, &requestid);
 
 	if (recvlen > 0) {
 		DPRINT_DBG(VMBUS, "heartbeat packet: len=%d, requestid=%lld",
 			   recvlen, requestid);
 
-		icmsghdrp = (struct icmsg_hdr *)&buf[
+		icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
 				sizeof(struct vmbuspipe_hdr)];
 
 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
-			prep_negotiate_resp(icmsghdrp, NULL, buf);
+			prep_negotiate_resp(icmsghdrp, NULL, hbeat_txf_buf);
 		} else {
-			heartbeat_msg = (struct heartbeat_msg_data *)&buf[
-				sizeof(struct vmbuspipe_hdr) +
-				sizeof(struct icmsg_hdr)];
+			heartbeat_msg =
+				(struct heartbeat_msg_data *)&hbeat_txf_buf[
+					sizeof(struct vmbuspipe_hdr) +
+					sizeof(struct icmsg_hdr)];
 
 			DPRINT_DBG(VMBUS, "heartbeat seq = %lld",
 				   heartbeat_msg->seq_num);
@@ -230,12 +222,10 @@
 		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
 			| ICMSGHDRFLAG_RESPONSE;
 
-		vmbus_sendpacket(channel, buf,
+		vmbus_sendpacket(channel, hbeat_txf_buf,
 				       recvlen, requestid,
 				       VmbusPacketTypeDataInBand, 0);
 	}
-
-	kfree(buf);
 }
 
 static const struct pci_device_id __initconst
@@ -268,15 +258,28 @@
 	if (!dmi_check_system(hv_utils_dmi_table))
 		return -ENODEV;
 
-	hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback =
+	shut_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	time_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	hbeat_txf_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+	if (!shut_txf_buf || !time_txf_buf || !hbeat_txf_buf) {
+		printk(KERN_INFO
+		       "Unable to allocate memory for receive buffer\n");
+		kfree(shut_txf_buf);
+		kfree(time_txf_buf);
+		kfree(hbeat_txf_buf);
+		return -ENOMEM;
+	}
+
+	hv_cb_utils[HV_SHUTDOWN_MSG].channel->onchannel_callback =
 		&shutdown_onchannelcallback;
 	hv_cb_utils[HV_SHUTDOWN_MSG].callback = &shutdown_onchannelcallback;
 
-	hv_cb_utils[HV_TIMESYNC_MSG].channel->OnChannelCallback =
+	hv_cb_utils[HV_TIMESYNC_MSG].channel->onchannel_callback =
 		&timesync_onchannelcallback;
 	hv_cb_utils[HV_TIMESYNC_MSG].callback = &timesync_onchannelcallback;
 
-	hv_cb_utils[HV_HEARTBEAT_MSG].channel->OnChannelCallback =
+	hv_cb_utils[HV_HEARTBEAT_MSG].channel->onchannel_callback =
 		&heartbeat_onchannelcallback;
 	hv_cb_utils[HV_HEARTBEAT_MSG].callback = &heartbeat_onchannelcallback;
 
@@ -287,17 +290,21 @@
 {
 	printk(KERN_INFO "De-Registered HyperV Utility Driver\n");
 
-	hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback =
+	hv_cb_utils[HV_SHUTDOWN_MSG].channel->onchannel_callback =
 		&chn_cb_negotiate;
 	hv_cb_utils[HV_SHUTDOWN_MSG].callback = &chn_cb_negotiate;
 
-	hv_cb_utils[HV_TIMESYNC_MSG].channel->OnChannelCallback =
+	hv_cb_utils[HV_TIMESYNC_MSG].channel->onchannel_callback =
 		&chn_cb_negotiate;
 	hv_cb_utils[HV_TIMESYNC_MSG].callback = &chn_cb_negotiate;
 
-	hv_cb_utils[HV_HEARTBEAT_MSG].channel->OnChannelCallback =
+	hv_cb_utils[HV_HEARTBEAT_MSG].channel->onchannel_callback =
 		&chn_cb_negotiate;
 	hv_cb_utils[HV_HEARTBEAT_MSG].callback = &chn_cb_negotiate;
+
+	kfree(shut_txf_buf);
+	kfree(time_txf_buf);
+	kfree(hbeat_txf_buf);
 }
 
 module_init(init_hyperv_utils);
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index 4c2632c..df9cd13 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -31,147 +31,149 @@
 
 
 /* Globals */
-static const char *gDriverName = "netvsc";
+static const char *driver_name = "netvsc";
 
 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
-static const struct hv_guid gNetVscDeviceType = {
+static const struct hv_guid netvsc_device_type = {
 	.data = {
 		0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
 		0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
 	}
 };
 
-static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo);
+static int netvsc_device_add(struct hv_device *device, void *additional_info);
 
-static int NetVscOnDeviceRemove(struct hv_device *Device);
+static int netvsc_device_remove(struct hv_device *device);
 
-static void NetVscOnCleanup(struct hv_driver *Driver);
+static void netvsc_cleanup(struct hv_driver *driver);
 
-static void NetVscOnChannelCallback(void *context);
+static void netvsc_channel_cb(void *context);
 
-static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device);
+static int netvsc_init_send_buf(struct hv_device *device);
 
-static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device);
+static int netvsc_init_recv_buf(struct hv_device *device);
 
-static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice);
+static int netvsc_destroy_send_buf(struct netvsc_device *net_device);
 
-static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice);
+static int netvsc_destroy_recv_buf(struct netvsc_device *net_device);
 
-static int NetVscConnectToVsp(struct hv_device *Device);
+static int netvsc_connect_vsp(struct hv_device *device);
 
-static void NetVscOnSendCompletion(struct hv_device *Device,
-				   struct vmpacket_descriptor *Packet);
+static void netvsc_send_completion(struct hv_device *device,
+				   struct vmpacket_descriptor *packet);
 
-static int NetVscOnSend(struct hv_device *Device,
-			struct hv_netvsc_packet *Packet);
+static int netvsc_send(struct hv_device *device,
+			struct hv_netvsc_packet *packet);
 
-static void NetVscOnReceive(struct hv_device *Device,
-			    struct vmpacket_descriptor *Packet);
+static void netvsc_receive(struct hv_device *device,
+			    struct vmpacket_descriptor *packet);
 
-static void NetVscOnReceiveCompletion(void *Context);
+static void netvsc_receive_completion(void *context);
 
-static void NetVscSendReceiveCompletion(struct hv_device *Device,
-					u64 TransactionId);
+static void netvsc_send_recv_completion(struct hv_device *device,
+					u64 transaction_id);
 
 
-static struct netvsc_device *AllocNetDevice(struct hv_device *Device)
+static struct netvsc_device *alloc_net_device(struct hv_device *device)
 {
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 
-	netDevice = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
-	if (!netDevice)
+	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
+	if (!net_device)
 		return NULL;
 
 	/* Set to 2 to allow both inbound and outbound traffic */
-	atomic_cmpxchg(&netDevice->RefCount, 0, 2);
+	atomic_cmpxchg(&net_device->refcnt, 0, 2);
 
-	netDevice->Device = Device;
-	Device->Extension = netDevice;
+	net_device->dev = device;
+	device->Extension = net_device;
 
-	return netDevice;
+	return net_device;
 }
 
-static void FreeNetDevice(struct netvsc_device *Device)
+static void free_net_device(struct netvsc_device *device)
 {
-	WARN_ON(atomic_read(&Device->RefCount) == 0);
-	Device->Device->Extension = NULL;
-	kfree(Device);
+	WARN_ON(atomic_read(&device->refcnt) == 0);
+	device->dev->Extension = NULL;
+	kfree(device);
 }
 
 
 /* Get the net device object iff exists and its refcount > 1 */
-static struct netvsc_device *GetOutboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 {
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 
-	netDevice = Device->Extension;
-	if (netDevice && atomic_read(&netDevice->RefCount) > 1)
-		atomic_inc(&netDevice->RefCount);
+	net_device = device->Extension;
+	if (net_device && atomic_read(&net_device->refcnt) > 1)
+		atomic_inc(&net_device->refcnt);
 	else
-		netDevice = NULL;
+		net_device = NULL;
 
-	return netDevice;
+	return net_device;
 }
 
 /* Get the net device object iff exists and its refcount > 0 */
-static struct netvsc_device *GetInboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
 {
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 
-	netDevice = Device->Extension;
-	if (netDevice && atomic_read(&netDevice->RefCount))
-		atomic_inc(&netDevice->RefCount);
+	net_device = device->Extension;
+	if (net_device && atomic_read(&net_device->refcnt))
+		atomic_inc(&net_device->refcnt);
 	else
-		netDevice = NULL;
+		net_device = NULL;
 
-	return netDevice;
+	return net_device;
 }
 
-static void PutNetDevice(struct hv_device *Device)
+static void put_net_device(struct hv_device *device)
 {
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 
-	netDevice = Device->Extension;
+	net_device = device->Extension;
 	/* ASSERT(netDevice); */
 
-	atomic_dec(&netDevice->RefCount);
+	atomic_dec(&net_device->refcnt);
 }
 
-static struct netvsc_device *ReleaseOutboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *release_outbound_net_device(
+		struct hv_device *device)
 {
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 
-	netDevice = Device->Extension;
-	if (netDevice == NULL)
+	net_device = device->Extension;
+	if (net_device == NULL)
 		return NULL;
 
 	/* Busy wait until the ref drop to 2, then set it to 1 */
-	while (atomic_cmpxchg(&netDevice->RefCount, 2, 1) != 2)
+	while (atomic_cmpxchg(&net_device->refcnt, 2, 1) != 2)
 		udelay(100);
 
-	return netDevice;
+	return net_device;
 }
 
-static struct netvsc_device *ReleaseInboundNetDevice(struct hv_device *Device)
+static struct netvsc_device *release_inbound_net_device(
+		struct hv_device *device)
 {
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 
-	netDevice = Device->Extension;
-	if (netDevice == NULL)
+	net_device = device->Extension;
+	if (net_device == NULL)
 		return NULL;
 
 	/* Busy wait until the ref drop to 1, then set it to 0 */
-	while (atomic_cmpxchg(&netDevice->RefCount, 1, 0) != 1)
+	while (atomic_cmpxchg(&net_device->refcnt, 1, 0) != 1)
 		udelay(100);
 
-	Device->Extension = NULL;
-	return netDevice;
+	device->Extension = NULL;
+	return net_device;
 }
 
 /*
- * NetVscInitialize - Main entry point
+ * netvsc_initialize - Main entry point
  */
-int NetVscInitialize(struct hv_driver *drv)
+int netvsc_initialize(struct hv_driver *drv)
 {
 	struct netvsc_driver *driver = (struct netvsc_driver *)drv;
 
@@ -185,8 +187,8 @@
 	/* Make sure we are at least 2 pages since 1 page is used for control */
 	/* ASSERT(driver->RingBufferSize >= (PAGE_SIZE << 1)); */
 
-	drv->name = gDriverName;
-	memcpy(&drv->deviceType, &gNetVscDeviceType, sizeof(struct hv_guid));
+	drv->name = driver_name;
+	memcpy(&drv->deviceType, &netvsc_device_type, sizeof(struct hv_guid));
 
 	/* Make sure it is set by the caller */
 	/* FIXME: These probably should still be tested in some way */
@@ -194,24 +196,24 @@
 	/* ASSERT(driver->OnLinkStatusChanged); */
 
 	/* Setup the dispatch table */
-	driver->Base.OnDeviceAdd	= NetVscOnDeviceAdd;
-	driver->Base.OnDeviceRemove	= NetVscOnDeviceRemove;
-	driver->Base.OnCleanup		= NetVscOnCleanup;
+	driver->base.OnDeviceAdd	= netvsc_device_add;
+	driver->base.OnDeviceRemove	= netvsc_device_remove;
+	driver->base.OnCleanup		= netvsc_cleanup;
 
-	driver->OnSend			= NetVscOnSend;
+	driver->send			= netvsc_send;
 
-	RndisFilterInit(driver);
+	rndis_filter_init(driver);
 	return 0;
 }
 
-static int NetVscInitializeReceiveBufferWithNetVsp(struct hv_device *Device)
+static int netvsc_init_recv_buf(struct hv_device *device)
 {
 	int ret = 0;
-	struct netvsc_device *netDevice;
-	struct nvsp_message *initPacket;
+	struct netvsc_device *net_device;
+	struct nvsp_message *init_packet;
 
-	netDevice = GetOutboundNetDevice(Device);
-	if (!netDevice) {
+	net_device = get_outbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "unable to get net device..."
 			   "device being destroyed?");
 		return -1;
@@ -220,12 +222,12 @@
 	/* page-size grandularity */
 	/* ASSERT((netDevice->ReceiveBufferSize & (PAGE_SIZE - 1)) == 0); */
 
-	netDevice->ReceiveBuffer =
-		osd_PageAlloc(netDevice->ReceiveBufferSize >> PAGE_SHIFT);
-	if (!netDevice->ReceiveBuffer) {
+	net_device->recv_buf =
+		osd_page_alloc(net_device->recv_buf_size >> PAGE_SHIFT);
+	if (!net_device->recv_buf) {
 		DPRINT_ERR(NETVSC,
 			   "unable to allocate receive buffer of size %d",
-			   netDevice->ReceiveBufferSize);
+			   net_device->recv_buf_size);
 		ret = -1;
 		goto Cleanup;
 	}
@@ -240,32 +242,34 @@
 	 * channel.  Note: This call uses the vmbus connection rather
 	 * than the channel to establish the gpadl handle.
 	 */
-	ret = vmbus_establish_gpadl(Device->channel, netDevice->ReceiveBuffer,
-				    netDevice->ReceiveBufferSize,
-				    &netDevice->ReceiveBufferGpadlHandle);
+	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
+				    net_device->recv_buf_size,
+				    &net_device->recv_buf_gpadl_handle);
 	if (ret != 0) {
 		DPRINT_ERR(NETVSC,
 			   "unable to establish receive buffer's gpadl");
 		goto Cleanup;
 	}
 
-	/* osd_WaitEventWait(ext->ChannelInitEvent); */
+	/* osd_waitevent_wait(ext->ChannelInitEvent); */
 
 	/* Notify the NetVsp of the gpadl handle */
 	DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendReceiveBuffer...");
 
-	initPacket = &netDevice->ChannelInitPacket;
+	init_packet = &net_device->channel_init_pkt;
 
-	memset(initPacket, 0, sizeof(struct nvsp_message));
+	memset(init_packet, 0, sizeof(struct nvsp_message));
 
-	initPacket->Header.MessageType = NvspMessage1TypeSendReceiveBuffer;
-	initPacket->Messages.Version1Messages.SendReceiveBuffer.GpadlHandle = netDevice->ReceiveBufferGpadlHandle;
-	initPacket->Messages.Version1Messages.SendReceiveBuffer.Id = NETVSC_RECEIVE_BUFFER_ID;
+	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
+	init_packet->msg.v1_msg.send_recv_buf.
+		gpadl_handle = net_device->recv_buf_gpadl_handle;
+	init_packet->msg.v1_msg.
+		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 
 	/* Send the gpadl notification request */
-	ret = vmbus_sendpacket(Device->channel, initPacket,
+	ret = vmbus_sendpacket(device->channel, init_packet,
 			       sizeof(struct nvsp_message),
-			       (unsigned long)initPacket,
+			       (unsigned long)init_packet,
 			       VmbusPacketTypeDataInBand,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 	if (ret != 0) {
@@ -274,13 +278,15 @@
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(netDevice->ChannelInitEvent);
+	osd_waitevent_wait(net_device->channel_init_event);
 
 	/* Check the response */
-	if (initPacket->Messages.Version1Messages.SendReceiveBufferComplete.Status != NvspStatusSuccess) {
+	if (init_packet->msg.v1_msg.
+	    send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
 		DPRINT_ERR(NETVSC, "Unable to complete receive buffer "
 			   "initialzation with NetVsp - status %d",
-			   initPacket->Messages.Version1Messages.SendReceiveBufferComplete.Status);
+			   init_packet->msg.v1_msg.
+			   send_recv_buf_complete.status);
 		ret = -1;
 		goto Cleanup;
 	}
@@ -289,32 +295,36 @@
 	/* ASSERT(netDevice->ReceiveSectionCount == 0); */
 	/* ASSERT(netDevice->ReceiveSections == NULL); */
 
-	netDevice->ReceiveSectionCount = initPacket->Messages.Version1Messages.SendReceiveBufferComplete.NumSections;
+	net_device->recv_section_cnt = init_packet->msg.
+		v1_msg.send_recv_buf_complete.num_sections;
 
-	netDevice->ReceiveSections = kmalloc(netDevice->ReceiveSectionCount * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
-	if (netDevice->ReceiveSections == NULL) {
+	net_device->recv_section = kmalloc(net_device->recv_section_cnt
+		* sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
+	if (net_device->recv_section == NULL) {
 		ret = -1;
 		goto Cleanup;
 	}
 
-	memcpy(netDevice->ReceiveSections,
-		initPacket->Messages.Version1Messages.SendReceiveBufferComplete.Sections,
-		netDevice->ReceiveSectionCount * sizeof(struct nvsp_1_receive_buffer_section));
+	memcpy(net_device->recv_section,
+		init_packet->msg.v1_msg.
+	       send_recv_buf_complete.sections,
+		net_device->recv_section_cnt *
+	       sizeof(struct nvsp_1_receive_buffer_section));
 
 	DPRINT_INFO(NETVSC, "Receive sections info (count %d, offset %d, "
 		    "endoffset %d, suballoc size %d, num suballocs %d)",
-		    netDevice->ReceiveSectionCount,
-		    netDevice->ReceiveSections[0].Offset,
-		    netDevice->ReceiveSections[0].EndOffset,
-		    netDevice->ReceiveSections[0].SubAllocationSize,
-		    netDevice->ReceiveSections[0].NumSubAllocations);
+		    net_device->recv_section_cnt,
+		    net_device->recv_section[0].offset,
+		    net_device->recv_section[0].end_offset,
+		    net_device->recv_section[0].sub_alloc_size,
+		    net_device->recv_section[0].num_sub_allocs);
 
 	/*
 	 * For 1st release, there should only be 1 section that represents the
 	 * entire receive buffer
 	 */
-	if (netDevice->ReceiveSectionCount != 1 ||
-	    netDevice->ReceiveSections->Offset != 0) {
+	if (net_device->recv_section_cnt != 1 ||
+	    net_device->recv_section->offset != 0) {
 		ret = -1;
 		goto Cleanup;
 	}
@@ -322,26 +332,26 @@
 	goto Exit;
 
 Cleanup:
-	NetVscDestroyReceiveBuffer(netDevice);
+	netvsc_destroy_recv_buf(net_device);
 
 Exit:
-	PutNetDevice(Device);
+	put_net_device(device);
 	return ret;
 }
 
-static int NetVscInitializeSendBufferWithNetVsp(struct hv_device *Device)
+static int netvsc_init_send_buf(struct hv_device *device)
 {
 	int ret = 0;
-	struct netvsc_device *netDevice;
-	struct nvsp_message *initPacket;
+	struct netvsc_device *net_device;
+	struct nvsp_message *init_packet;
 
-	netDevice = GetOutboundNetDevice(Device);
-	if (!netDevice) {
+	net_device = get_outbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "unable to get net device..."
 			   "device being destroyed?");
 		return -1;
 	}
-	if (netDevice->SendBufferSize <= 0) {
+	if (net_device->send_buf_size <= 0) {
 		ret = -EINVAL;
 		goto Cleanup;
 	}
@@ -349,11 +359,11 @@
 	/* page-size grandularity */
 	/* ASSERT((netDevice->SendBufferSize & (PAGE_SIZE - 1)) == 0); */
 
-	netDevice->SendBuffer =
-		osd_PageAlloc(netDevice->SendBufferSize >> PAGE_SHIFT);
-	if (!netDevice->SendBuffer) {
+	net_device->send_buf =
+		osd_page_alloc(net_device->send_buf_size >> PAGE_SHIFT);
+	if (!net_device->send_buf) {
 		DPRINT_ERR(NETVSC, "unable to allocate send buffer of size %d",
-			   netDevice->SendBufferSize);
+			   net_device->send_buf_size);
 		ret = -1;
 		goto Cleanup;
 	}
@@ -367,31 +377,33 @@
 	 * channel.  Note: This call uses the vmbus connection rather
 	 * than the channel to establish the gpadl handle.
 	 */
-	ret = vmbus_establish_gpadl(Device->channel, netDevice->SendBuffer,
-				    netDevice->SendBufferSize,
-				    &netDevice->SendBufferGpadlHandle);
+	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
+				    net_device->send_buf_size,
+				    &net_device->send_buf_gpadl_handle);
 	if (ret != 0) {
 		DPRINT_ERR(NETVSC, "unable to establish send buffer's gpadl");
 		goto Cleanup;
 	}
 
-	/* osd_WaitEventWait(ext->ChannelInitEvent); */
+	/* osd_waitevent_wait(ext->ChannelInitEvent); */
 
 	/* Notify the NetVsp of the gpadl handle */
 	DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendSendBuffer...");
 
-	initPacket = &netDevice->ChannelInitPacket;
+	init_packet = &net_device->channel_init_pkt;
 
-	memset(initPacket, 0, sizeof(struct nvsp_message));
+	memset(init_packet, 0, sizeof(struct nvsp_message));
 
-	initPacket->Header.MessageType = NvspMessage1TypeSendSendBuffer;
-	initPacket->Messages.Version1Messages.SendReceiveBuffer.GpadlHandle = netDevice->SendBufferGpadlHandle;
-	initPacket->Messages.Version1Messages.SendReceiveBuffer.Id = NETVSC_SEND_BUFFER_ID;
+	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
+	init_packet->msg.v1_msg.send_recv_buf.
+		gpadl_handle = net_device->send_buf_gpadl_handle;
+	init_packet->msg.v1_msg.send_recv_buf.id =
+		NETVSC_SEND_BUFFER_ID;
 
 	/* Send the gpadl notification request */
-	ret = vmbus_sendpacket(Device->channel, initPacket,
+	ret = vmbus_sendpacket(device->channel, init_packet,
 			       sizeof(struct nvsp_message),
-			       (unsigned long)initPacket,
+			       (unsigned long)init_packet,
 			       VmbusPacketTypeDataInBand,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 	if (ret != 0) {
@@ -400,32 +412,35 @@
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(netDevice->ChannelInitEvent);
+	osd_waitevent_wait(net_device->channel_init_event);
 
 	/* Check the response */
-	if (initPacket->Messages.Version1Messages.SendSendBufferComplete.Status != NvspStatusSuccess) {
+	if (init_packet->msg.v1_msg.
+	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
 		DPRINT_ERR(NETVSC, "Unable to complete send buffer "
 			   "initialzation with NetVsp - status %d",
-			   initPacket->Messages.Version1Messages.SendSendBufferComplete.Status);
+			   init_packet->msg.v1_msg.
+			   send_send_buf_complete.status);
 		ret = -1;
 		goto Cleanup;
 	}
 
-	netDevice->SendSectionSize = initPacket->Messages.Version1Messages.SendSendBufferComplete.SectionSize;
+	net_device->send_section_size = init_packet->
+	msg.v1_msg.send_send_buf_complete.section_size;
 
 	goto Exit;
 
 Cleanup:
-	NetVscDestroySendBuffer(netDevice);
+	netvsc_destroy_send_buf(net_device);
 
 Exit:
-	PutNetDevice(Device);
+	put_net_device(device);
 	return ret;
 }
 
-static int NetVscDestroyReceiveBuffer(struct netvsc_device *NetDevice)
+static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
 {
-	struct nvsp_message *revokePacket;
+	struct nvsp_message *revoke_packet;
 	int ret = 0;
 
 	/*
@@ -434,20 +449,23 @@
 	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
 	 * to send a revoke msg here
 	 */
-	if (NetDevice->ReceiveSectionCount) {
+	if (net_device->recv_section_cnt) {
 		DPRINT_INFO(NETVSC,
 			    "Sending NvspMessage1TypeRevokeReceiveBuffer...");
 
 		/* Send the revoke receive buffer */
-		revokePacket = &NetDevice->RevokePacket;
-		memset(revokePacket, 0, sizeof(struct nvsp_message));
+		revoke_packet = &net_device->revoke_packet;
+		memset(revoke_packet, 0, sizeof(struct nvsp_message));
 
-		revokePacket->Header.MessageType = NvspMessage1TypeRevokeReceiveBuffer;
-		revokePacket->Messages.Version1Messages.RevokeReceiveBuffer.Id = NETVSC_RECEIVE_BUFFER_ID;
+		revoke_packet->hdr.msg_type =
+			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
+		revoke_packet->msg.v1_msg.
+		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 
-		ret = vmbus_sendpacket(NetDevice->Device->channel, revokePacket,
+		ret = vmbus_sendpacket(net_device->dev->channel,
+				       revoke_packet,
 				       sizeof(struct nvsp_message),
-				       (unsigned long)revokePacket,
+				       (unsigned long)revoke_packet,
 				       VmbusPacketTypeDataInBand, 0);
 		/*
 		 * If we failed here, we might as well return and
@@ -461,11 +479,11 @@
 	}
 
 	/* Teardown the gpadl on the vsp end */
-	if (NetDevice->ReceiveBufferGpadlHandle) {
+	if (net_device->recv_buf_gpadl_handle) {
 		DPRINT_INFO(NETVSC, "Tearing down receive buffer's GPADL...");
 
-		ret = vmbus_teardown_gpadl(NetDevice->Device->channel,
-					   NetDevice->ReceiveBufferGpadlHandle);
+		ret = vmbus_teardown_gpadl(net_device->dev->channel,
+			   net_device->recv_buf_gpadl_handle);
 
 		/* If we failed here, we might as well return and have a leak rather than continue and a bugchk */
 		if (ret != 0) {
@@ -473,30 +491,30 @@
 				   "unable to teardown receive buffer's gpadl");
 			return -1;
 		}
-		NetDevice->ReceiveBufferGpadlHandle = 0;
+		net_device->recv_buf_gpadl_handle = 0;
 	}
 
-	if (NetDevice->ReceiveBuffer) {
+	if (net_device->recv_buf) {
 		DPRINT_INFO(NETVSC, "Freeing up receive buffer...");
 
 		/* Free up the receive buffer */
-		osd_PageFree(NetDevice->ReceiveBuffer,
-			     NetDevice->ReceiveBufferSize >> PAGE_SHIFT);
-		NetDevice->ReceiveBuffer = NULL;
+		osd_page_free(net_device->recv_buf,
+			     net_device->recv_buf_size >> PAGE_SHIFT);
+		net_device->recv_buf = NULL;
 	}
 
-	if (NetDevice->ReceiveSections) {
-		NetDevice->ReceiveSectionCount = 0;
-		kfree(NetDevice->ReceiveSections);
-		NetDevice->ReceiveSections = NULL;
+	if (net_device->recv_section) {
+		net_device->recv_section_cnt = 0;
+		kfree(net_device->recv_section);
+		net_device->recv_section = NULL;
 	}
 
 	return ret;
 }
 
-static int NetVscDestroySendBuffer(struct netvsc_device *NetDevice)
+static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
 {
-	struct nvsp_message *revokePacket;
+	struct nvsp_message *revoke_packet;
 	int ret = 0;
 
 	/*
@@ -505,20 +523,23 @@
 	 *  NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
 	 *  to send a revoke msg here
 	 */
-	if (NetDevice->SendSectionSize) {
+	if (net_device->send_section_size) {
 		DPRINT_INFO(NETVSC,
 			    "Sending NvspMessage1TypeRevokeSendBuffer...");
 
 		/* Send the revoke send buffer */
-		revokePacket = &NetDevice->RevokePacket;
-		memset(revokePacket, 0, sizeof(struct nvsp_message));
+		revoke_packet = &net_device->revoke_packet;
+		memset(revoke_packet, 0, sizeof(struct nvsp_message));
 
-		revokePacket->Header.MessageType = NvspMessage1TypeRevokeSendBuffer;
-		revokePacket->Messages.Version1Messages.RevokeSendBuffer.Id = NETVSC_SEND_BUFFER_ID;
+		revoke_packet->hdr.msg_type =
+			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
+		revoke_packet->msg.v1_msg.
+			revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
 
-		ret = vmbus_sendpacket(NetDevice->Device->channel, revokePacket,
+		ret = vmbus_sendpacket(net_device->dev->channel,
+				       revoke_packet,
 				       sizeof(struct nvsp_message),
-				       (unsigned long)revokePacket,
+				       (unsigned long)revoke_packet,
 				       VmbusPacketTypeDataInBand, 0);
 		/*
 		 * If we failed here, we might as well return and have a leak
@@ -532,10 +553,10 @@
 	}
 
 	/* Teardown the gpadl on the vsp end */
-	if (NetDevice->SendBufferGpadlHandle) {
+	if (net_device->send_buf_gpadl_handle) {
 		DPRINT_INFO(NETVSC, "Tearing down send buffer's GPADL...");
-		ret = vmbus_teardown_gpadl(NetDevice->Device->channel,
-					   NetDevice->SendBufferGpadlHandle);
+		ret = vmbus_teardown_gpadl(net_device->dev->channel,
+					   net_device->send_buf_gpadl_handle);
 
 		/*
 		 * If we failed here, we might as well return and have a leak
@@ -546,49 +567,51 @@
 				   "gpadl");
 			return -1;
 		}
-		NetDevice->SendBufferGpadlHandle = 0;
+		net_device->send_buf_gpadl_handle = 0;
 	}
 
-	if (NetDevice->SendBuffer) {
+	if (net_device->send_buf) {
 		DPRINT_INFO(NETVSC, "Freeing up send buffer...");
 
 		/* Free up the receive buffer */
-		osd_PageFree(NetDevice->SendBuffer,
-			     NetDevice->SendBufferSize >> PAGE_SHIFT);
-		NetDevice->SendBuffer = NULL;
+		osd_page_free(net_device->send_buf,
+			     net_device->send_buf_size >> PAGE_SHIFT);
+		net_device->send_buf = NULL;
 	}
 
 	return ret;
 }
 
 
-static int NetVscConnectToVsp(struct hv_device *Device)
+static int netvsc_connect_vsp(struct hv_device *device)
 {
 	int ret;
-	struct netvsc_device *netDevice;
-	struct nvsp_message *initPacket;
-	int ndisVersion;
+	struct netvsc_device *net_device;
+	struct nvsp_message *init_packet;
+	int ndis_version;
 
-	netDevice = GetOutboundNetDevice(Device);
-	if (!netDevice) {
+	net_device = get_outbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "unable to get net device..."
 			   "device being destroyed?");
 		return -1;
 	}
 
-	initPacket = &netDevice->ChannelInitPacket;
+	init_packet = &net_device->channel_init_pkt;
 
-	memset(initPacket, 0, sizeof(struct nvsp_message));
-	initPacket->Header.MessageType = NvspMessageTypeInit;
-	initPacket->Messages.InitMessages.Init.MinProtocolVersion = NVSP_MIN_PROTOCOL_VERSION;
-	initPacket->Messages.InitMessages.Init.MaxProtocolVersion = NVSP_MAX_PROTOCOL_VERSION;
+	memset(init_packet, 0, sizeof(struct nvsp_message));
+	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
+	init_packet->msg.init_msg.init.min_protocol_ver =
+		NVSP_MIN_PROTOCOL_VERSION;
+	init_packet->msg.init_msg.init.max_protocol_ver =
+		NVSP_MAX_PROTOCOL_VERSION;
 
 	DPRINT_INFO(NETVSC, "Sending NvspMessageTypeInit...");
 
 	/* Send the init request */
-	ret = vmbus_sendpacket(Device->channel, initPacket,
+	ret = vmbus_sendpacket(device->channel, init_packet,
 			       sizeof(struct nvsp_message),
-			       (unsigned long)initPacket,
+			       (unsigned long)init_packet,
 			       VmbusPacketTypeDataInBand,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 
@@ -597,47 +620,52 @@
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(netDevice->ChannelInitEvent);
+	osd_waitevent_wait(net_device->channel_init_event);
 
 	/* Now, check the response */
 	/* ASSERT(initPacket->Messages.InitMessages.InitComplete.MaximumMdlChainLength <= MAX_MULTIPAGE_BUFFER_COUNT); */
 	DPRINT_INFO(NETVSC, "NvspMessageTypeInit status(%d) max mdl chain (%d)",
-		initPacket->Messages.InitMessages.InitComplete.Status,
-		initPacket->Messages.InitMessages.InitComplete.MaximumMdlChainLength);
+		init_packet->msg.init_msg.init_complete.status,
+		init_packet->msg.init_msg.
+		    init_complete.max_mdl_chain_len);
 
-	if (initPacket->Messages.InitMessages.InitComplete.Status !=
-	    NvspStatusSuccess) {
+	if (init_packet->msg.init_msg.init_complete.status !=
+	    NVSP_STAT_SUCCESS) {
 		DPRINT_ERR(NETVSC,
 			"unable to initialize with netvsp (status 0x%x)",
-			initPacket->Messages.InitMessages.InitComplete.Status);
+			init_packet->msg.init_msg.init_complete.status);
 		ret = -1;
 		goto Cleanup;
 	}
 
-	if (initPacket->Messages.InitMessages.InitComplete.NegotiatedProtocolVersion != NVSP_PROTOCOL_VERSION_1) {
+	if (init_packet->msg.init_msg.init_complete.
+	    negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
 		DPRINT_ERR(NETVSC, "unable to initialize with netvsp "
 			   "(version expected 1 got %d)",
-			   initPacket->Messages.InitMessages.InitComplete.NegotiatedProtocolVersion);
+			   init_packet->msg.init_msg.
+			   init_complete.negotiated_protocol_ver);
 		ret = -1;
 		goto Cleanup;
 	}
 	DPRINT_INFO(NETVSC, "Sending NvspMessage1TypeSendNdisVersion...");
 
 	/* Send the ndis version */
-	memset(initPacket, 0, sizeof(struct nvsp_message));
+	memset(init_packet, 0, sizeof(struct nvsp_message));
 
-	ndisVersion = 0x00050000;
+	ndis_version = 0x00050000;
 
-	initPacket->Header.MessageType = NvspMessage1TypeSendNdisVersion;
-	initPacket->Messages.Version1Messages.SendNdisVersion.NdisMajorVersion =
-				(ndisVersion & 0xFFFF0000) >> 16;
-	initPacket->Messages.Version1Messages.SendNdisVersion.NdisMinorVersion =
-				ndisVersion & 0xFFFF;
+	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
+	init_packet->msg.v1_msg.
+		send_ndis_ver.ndis_major_ver =
+				(ndis_version & 0xFFFF0000) >> 16;
+	init_packet->msg.v1_msg.
+		send_ndis_ver.ndis_minor_ver =
+				ndis_version & 0xFFFF;
 
 	/* Send the init request */
-	ret = vmbus_sendpacket(Device->channel, initPacket,
+	ret = vmbus_sendpacket(device->channel, init_packet,
 			       sizeof(struct nvsp_message),
-			       (unsigned long)initPacket,
+			       (unsigned long)init_packet,
 			       VmbusPacketTypeDataInBand, 0);
 	if (ret != 0) {
 		DPRINT_ERR(NETVSC,
@@ -651,51 +679,52 @@
 	 * packet) since our Vmbus always set the
 	 * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED flag
 	 */
-	 /* osd_WaitEventWait(NetVscChannel->ChannelInitEvent); */
+	 /* osd_waitevent_wait(NetVscChannel->ChannelInitEvent); */
 
 	/* Post the big receive buffer to NetVSP */
-	ret = NetVscInitializeReceiveBufferWithNetVsp(Device);
+	ret = netvsc_init_recv_buf(device);
 	if (ret == 0)
-		ret = NetVscInitializeSendBufferWithNetVsp(Device);
+		ret = netvsc_init_send_buf(device);
 
 Cleanup:
-	PutNetDevice(Device);
+	put_net_device(device);
 	return ret;
 }
 
-static void NetVscDisconnectFromVsp(struct netvsc_device *NetDevice)
+static void NetVscDisconnectFromVsp(struct netvsc_device *net_device)
 {
-	NetVscDestroyReceiveBuffer(NetDevice);
-	NetVscDestroySendBuffer(NetDevice);
+	netvsc_destroy_recv_buf(net_device);
+	netvsc_destroy_send_buf(net_device);
 }
 
 /*
- * NetVscOnDeviceAdd - Callback when the device belonging to this driver is added
+ * netvsc_device_add - Callback when the device belonging to this
+ * driver is added
  */
-static int NetVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
+static int netvsc_device_add(struct hv_device *device, void *additional_info)
 {
 	int ret = 0;
 	int i;
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 	struct hv_netvsc_packet *packet, *pos;
-	struct netvsc_driver *netDriver =
-				(struct netvsc_driver *)Device->Driver;
+	struct netvsc_driver *net_driver =
+				(struct netvsc_driver *)device->Driver;
 
-	netDevice = AllocNetDevice(Device);
-	if (!netDevice) {
+	net_device = alloc_net_device(device);
+	if (!net_device) {
 		ret = -1;
 		goto Cleanup;
 	}
 
-	DPRINT_DBG(NETVSC, "netvsc channel object allocated - %p", netDevice);
+	DPRINT_DBG(NETVSC, "netvsc channel object allocated - %p", net_device);
 
 	/* Initialize the NetVSC channel extension */
-	netDevice->ReceiveBufferSize = NETVSC_RECEIVE_BUFFER_SIZE;
-	spin_lock_init(&netDevice->receive_packet_list_lock);
+	net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+	spin_lock_init(&net_device->recv_pkt_list_lock);
 
-	netDevice->SendBufferSize = NETVSC_SEND_BUFFER_SIZE;
+	net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
 
-	INIT_LIST_HEAD(&netDevice->ReceivePacketList);
+	INIT_LIST_HEAD(&net_device->recv_pkt_list);
 
 	for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
 		packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -707,19 +736,19 @@
 				   NETVSC_RECEIVE_PACKETLIST_COUNT, i);
 			break;
 		}
-		list_add_tail(&packet->ListEntry,
-			      &netDevice->ReceivePacketList);
+		list_add_tail(&packet->list_ent,
+			      &net_device->recv_pkt_list);
 	}
-	netDevice->ChannelInitEvent = osd_WaitEventCreate();
-	if (!netDevice->ChannelInitEvent) {
+	net_device->channel_init_event = osd_waitevent_create();
+	if (!net_device->channel_init_event) {
 		ret = -ENOMEM;
 		goto Cleanup;
 	}
 
 	/* Open the channel */
-	ret = vmbus_open(Device->channel, netDriver->RingBufferSize,
-			 netDriver->RingBufferSize, NULL, 0,
-			 NetVscOnChannelCallback, Device);
+	ret = vmbus_open(device->channel, net_driver->ring_buf_size,
+			 net_driver->ring_buf_size, NULL, 0,
+			 netvsc_channel_cb, device);
 
 	if (ret != 0) {
 		DPRINT_ERR(NETVSC, "unable to open channel: %d", ret);
@@ -731,7 +760,7 @@
 	DPRINT_INFO(NETVSC, "*** NetVSC channel opened successfully! ***");
 
 	/* Connect with the NetVsp */
-	ret = NetVscConnectToVsp(Device);
+	ret = netvsc_connect_vsp(device);
 	if (ret != 0) {
 		DPRINT_ERR(NETVSC, "unable to connect to NetVSP - %d", ret);
 		ret = -1;
@@ -745,174 +774,178 @@
 
 close:
 	/* Now, we can close the channel safely */
-	vmbus_close(Device->channel);
+	vmbus_close(device->channel);
 
 Cleanup:
 
-	if (netDevice) {
-		kfree(netDevice->ChannelInitEvent);
+	if (net_device) {
+		kfree(net_device->channel_init_event);
 
 		list_for_each_entry_safe(packet, pos,
-					 &netDevice->ReceivePacketList,
-					 ListEntry) {
-			list_del(&packet->ListEntry);
+					 &net_device->recv_pkt_list,
+					 list_ent) {
+			list_del(&packet->list_ent);
 			kfree(packet);
 		}
 
-		ReleaseOutboundNetDevice(Device);
-		ReleaseInboundNetDevice(Device);
+		release_outbound_net_device(device);
+		release_inbound_net_device(device);
 
-		FreeNetDevice(netDevice);
+		free_net_device(net_device);
 	}
 
 	return ret;
 }
 
 /*
- * NetVscOnDeviceRemove - Callback when the root bus device is removed
+ * netvsc_device_remove - Callback when the root bus device is removed
  */
-static int NetVscOnDeviceRemove(struct hv_device *Device)
+static int netvsc_device_remove(struct hv_device *device)
 {
-	struct netvsc_device *netDevice;
-	struct hv_netvsc_packet *netvscPacket, *pos;
+	struct netvsc_device *net_device;
+	struct hv_netvsc_packet *netvsc_packet, *pos;
 
 	DPRINT_INFO(NETVSC, "Disabling outbound traffic on net device (%p)...",
-		    Device->Extension);
+		    device->Extension);
 
 	/* Stop outbound traffic ie sends and receives completions */
-	netDevice = ReleaseOutboundNetDevice(Device);
-	if (!netDevice) {
+	net_device = release_outbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "No net device present!!");
 		return -1;
 	}
 
 	/* Wait for all send completions */
-	while (atomic_read(&netDevice->NumOutstandingSends)) {
+	while (atomic_read(&net_device->num_outstanding_sends)) {
 		DPRINT_INFO(NETVSC, "waiting for %d requests to complete...",
-			    atomic_read(&netDevice->NumOutstandingSends));
+			    atomic_read(&net_device->num_outstanding_sends));
 		udelay(100);
 	}
 
 	DPRINT_INFO(NETVSC, "Disconnecting from netvsp...");
 
-	NetVscDisconnectFromVsp(netDevice);
+	NetVscDisconnectFromVsp(net_device);
 
 	DPRINT_INFO(NETVSC, "Disabling inbound traffic on net device (%p)...",
-		    Device->Extension);
+		    device->Extension);
 
 	/* Stop inbound traffic ie receives and sends completions */
-	netDevice = ReleaseInboundNetDevice(Device);
+	net_device = release_inbound_net_device(device);
 
 	/* At this point, no one should be accessing netDevice except in here */
-	DPRINT_INFO(NETVSC, "net device (%p) safe to remove", netDevice);
+	DPRINT_INFO(NETVSC, "net device (%p) safe to remove", net_device);
 
 	/* Now, we can close the channel safely */
-	vmbus_close(Device->channel);
+	vmbus_close(device->channel);
 
 	/* Release all resources */
-	list_for_each_entry_safe(netvscPacket, pos,
-				 &netDevice->ReceivePacketList, ListEntry) {
-		list_del(&netvscPacket->ListEntry);
-		kfree(netvscPacket);
+	list_for_each_entry_safe(netvsc_packet, pos,
+				 &net_device->recv_pkt_list, list_ent) {
+		list_del(&netvsc_packet->list_ent);
+		kfree(netvsc_packet);
 	}
 
-	kfree(netDevice->ChannelInitEvent);
-	FreeNetDevice(netDevice);
+	kfree(net_device->channel_init_event);
+	free_net_device(net_device);
 	return 0;
 }
 
 /*
- * NetVscOnCleanup - Perform any cleanup when the driver is removed
+ * netvsc_cleanup - Perform any cleanup when the driver is removed
  */
-static void NetVscOnCleanup(struct hv_driver *drv)
+static void netvsc_cleanup(struct hv_driver *drv)
 {
 }
 
-static void NetVscOnSendCompletion(struct hv_device *Device,
-				   struct vmpacket_descriptor *Packet)
+static void netvsc_send_completion(struct hv_device *device,
+				   struct vmpacket_descriptor *packet)
 {
-	struct netvsc_device *netDevice;
-	struct nvsp_message *nvspPacket;
-	struct hv_netvsc_packet *nvscPacket;
+	struct netvsc_device *net_device;
+	struct nvsp_message *nvsp_packet;
+	struct hv_netvsc_packet *nvsc_packet;
 
-	netDevice = GetInboundNetDevice(Device);
-	if (!netDevice) {
+	net_device = get_inbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "unable to get net device..."
 			   "device being destroyed?");
 		return;
 	}
 
-	nvspPacket = (struct nvsp_message *)((unsigned long)Packet + (Packet->DataOffset8 << 3));
+	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
+			(packet->DataOffset8 << 3));
 
 	DPRINT_DBG(NETVSC, "send completion packet - type %d",
-		   nvspPacket->Header.MessageType);
+		   nvsp_packet->hdr.msg_type);
 
-	if ((nvspPacket->Header.MessageType == NvspMessageTypeInitComplete) ||
-	    (nvspPacket->Header.MessageType ==
-	     NvspMessage1TypeSendReceiveBufferComplete) ||
-	    (nvspPacket->Header.MessageType ==
-	     NvspMessage1TypeSendSendBufferComplete)) {
+	if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
+	    (nvsp_packet->hdr.msg_type ==
+	     NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
+	    (nvsp_packet->hdr.msg_type ==
+	     NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
 		/* Copy the response back */
-		memcpy(&netDevice->ChannelInitPacket, nvspPacket,
+		memcpy(&net_device->channel_init_pkt, nvsp_packet,
 		       sizeof(struct nvsp_message));
-		osd_WaitEventSet(netDevice->ChannelInitEvent);
-	} else if (nvspPacket->Header.MessageType ==
-		   NvspMessage1TypeSendRNDISPacketComplete) {
+		osd_waitevent_set(net_device->channel_init_event);
+	} else if (nvsp_packet->hdr.msg_type ==
+		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
 		/* Get the send context */
-		nvscPacket = (struct hv_netvsc_packet *)(unsigned long)Packet->TransactionId;
+		nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
+			packet->TransactionId;
 		/* ASSERT(nvscPacket); */
 
 		/* Notify the layer above us */
-		nvscPacket->Completion.Send.OnSendCompletion(nvscPacket->Completion.Send.SendCompletionContext);
+		nvsc_packet->completion.send.send_completion(
+			nvsc_packet->completion.send.send_completion_ctx);
 
-		atomic_dec(&netDevice->NumOutstandingSends);
+		atomic_dec(&net_device->num_outstanding_sends);
 	} else {
 		DPRINT_ERR(NETVSC, "Unknown send completion packet type - "
-			   "%d received!!", nvspPacket->Header.MessageType);
+			   "%d received!!", nvsp_packet->hdr.msg_type);
 	}
 
-	PutNetDevice(Device);
+	put_net_device(device);
 }
 
-static int NetVscOnSend(struct hv_device *Device,
-			struct hv_netvsc_packet *Packet)
+static int netvsc_send(struct hv_device *device,
+			struct hv_netvsc_packet *packet)
 {
-	struct netvsc_device *netDevice;
+	struct netvsc_device *net_device;
 	int ret = 0;
 
 	struct nvsp_message sendMessage;
 
-	netDevice = GetOutboundNetDevice(Device);
-	if (!netDevice) {
+	net_device = get_outbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
-			   "ignoring outbound packets", netDevice);
+			   "ignoring outbound packets", net_device);
 		return -2;
 	}
 
-	sendMessage.Header.MessageType = NvspMessage1TypeSendRNDISPacket;
-	if (Packet->IsDataPacket) {
+	sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
+	if (packet->is_data_pkt) {
 		/* 0 is RMC_DATA; */
-		sendMessage.Messages.Version1Messages.SendRNDISPacket.ChannelType = 0;
+		sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
 	} else {
 		/* 1 is RMC_CONTROL; */
-		sendMessage.Messages.Version1Messages.SendRNDISPacket.ChannelType = 1;
+		sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
 	}
 
 	/* Not using send buffer section */
-	sendMessage.Messages.Version1Messages.SendRNDISPacket.SendBufferSectionIndex = 0xFFFFFFFF;
-	sendMessage.Messages.Version1Messages.SendRNDISPacket.SendBufferSectionSize = 0;
+	sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
+		0xFFFFFFFF;
+	sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
 
-	if (Packet->PageBufferCount) {
-		ret = vmbus_sendpacket_pagebuffer(Device->channel,
-						  Packet->PageBuffers,
-						  Packet->PageBufferCount,
+	if (packet->page_buf_cnt) {
+		ret = vmbus_sendpacket_pagebuffer(device->channel,
+						  packet->page_buf,
+						  packet->page_buf_cnt,
 						  &sendMessage,
 						  sizeof(struct nvsp_message),
-						  (unsigned long)Packet);
+						  (unsigned long)packet);
 	} else {
-		ret = vmbus_sendpacket(Device->channel, &sendMessage,
+		ret = vmbus_sendpacket(device->channel, &sendMessage,
 				       sizeof(struct nvsp_message),
-				       (unsigned long)Packet,
+				       (unsigned long)packet,
 				       VmbusPacketTypeDataInBand,
 				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 
@@ -920,31 +953,31 @@
 
 	if (ret != 0)
 		DPRINT_ERR(NETVSC, "Unable to send packet %p ret %d",
-			   Packet, ret);
+			   packet, ret);
 
-	atomic_inc(&netDevice->NumOutstandingSends);
-	PutNetDevice(Device);
+	atomic_inc(&net_device->num_outstanding_sends);
+	put_net_device(device);
 	return ret;
 }
 
-static void NetVscOnReceive(struct hv_device *Device,
-			    struct vmpacket_descriptor *Packet)
+static void netvsc_receive(struct hv_device *device,
+			    struct vmpacket_descriptor *packet)
 {
-	struct netvsc_device *netDevice;
-	struct vmtransfer_page_packet_header *vmxferpagePacket;
-	struct nvsp_message *nvspPacket;
-	struct hv_netvsc_packet *netvscPacket = NULL;
+	struct netvsc_device *net_device;
+	struct vmtransfer_page_packet_header *vmxferpage_packet;
+	struct nvsp_message *nvsp_packet;
+	struct hv_netvsc_packet *netvsc_packet = NULL;
 	unsigned long start;
-	unsigned long end, endVirtual;
+	unsigned long end, end_virtual;
 	/* struct netvsc_driver *netvscDriver; */
-	struct xferpage_packet *xferpagePacket = NULL;
+	struct xferpage_packet *xferpage_packet = NULL;
 	int i, j;
-	int count = 0, bytesRemain = 0;
+	int count = 0, bytes_remain = 0;
 	unsigned long flags;
 	LIST_HEAD(listHead);
 
-	netDevice = GetInboundNetDevice(Device);
-	if (!netDevice) {
+	net_device = get_inbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "unable to get net device..."
 			   "device being destroyed?");
 		return;
@@ -954,39 +987,40 @@
 	 * All inbound packets other than send completion should be xfer page
 	 * packet
 	 */
-	if (Packet->Type != VmbusPacketTypeDataUsingTransferPages) {
+	if (packet->Type != VmbusPacketTypeDataUsingTransferPages) {
 		DPRINT_ERR(NETVSC, "Unknown packet type received - %d",
-			   Packet->Type);
-		PutNetDevice(Device);
+			   packet->Type);
+		put_net_device(device);
 		return;
 	}
 
-	nvspPacket = (struct nvsp_message *)((unsigned long)Packet +
-			(Packet->DataOffset8 << 3));
+	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
+			(packet->DataOffset8 << 3));
 
 	/* Make sure this is a valid nvsp packet */
-	if (nvspPacket->Header.MessageType != NvspMessage1TypeSendRNDISPacket) {
+	if (nvsp_packet->hdr.msg_type !=
+	    NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
 		DPRINT_ERR(NETVSC, "Unknown nvsp packet type received - %d",
-			   nvspPacket->Header.MessageType);
-		PutNetDevice(Device);
+			   nvsp_packet->hdr.msg_type);
+		put_net_device(device);
 		return;
 	}
 
 	DPRINT_DBG(NETVSC, "NVSP packet received - type %d",
-		   nvspPacket->Header.MessageType);
+		   nvsp_packet->hdr.msg_type);
 
-	vmxferpagePacket = (struct vmtransfer_page_packet_header *)Packet;
+	vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
 
-	if (vmxferpagePacket->TransferPageSetId != NETVSC_RECEIVE_BUFFER_ID) {
+	if (vmxferpage_packet->TransferPageSetId != NETVSC_RECEIVE_BUFFER_ID) {
 		DPRINT_ERR(NETVSC, "Invalid xfer page set id - "
 			   "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
-			   vmxferpagePacket->TransferPageSetId);
-		PutNetDevice(Device);
+			   vmxferpage_packet->TransferPageSetId);
+		put_net_device(device);
 		return;
 	}
 
 	DPRINT_DBG(NETVSC, "xfer page - range count %d",
-		   vmxferpagePacket->RangeCount);
+		   vmxferpage_packet->RangeCount);
 
 	/*
 	 * Grab free packets (range count + 1) to represent this xfer
@@ -994,13 +1028,13 @@
 	 * We grab it here so that we know exactly how many we can
 	 * fulfil
 	 */
-	spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
-	while (!list_empty(&netDevice->ReceivePacketList)) {
-		list_move_tail(netDevice->ReceivePacketList.next, &listHead);
-		if (++count == vmxferpagePacket->RangeCount + 1)
+	spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
+	while (!list_empty(&net_device->recv_pkt_list)) {
+		list_move_tail(net_device->recv_pkt_list.next, &listHead);
+		if (++count == vmxferpage_packet->RangeCount + 1)
 			break;
 	}
-	spin_unlock_irqrestore(&netDevice->receive_packet_list_lock, flags);
+	spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
 
 	/*
 	 * We need at least 2 netvsc pkts (1 to represent the xfer
@@ -1010,140 +1044,149 @@
 	if (count < 2) {
 		DPRINT_ERR(NETVSC, "Got only %d netvsc pkt...needed %d pkts. "
 			   "Dropping this xfer page packet completely!",
-			   count, vmxferpagePacket->RangeCount + 1);
+			   count, vmxferpage_packet->RangeCount + 1);
 
 		/* Return it to the freelist */
-		spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
+		spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
 		for (i = count; i != 0; i--) {
 			list_move_tail(listHead.next,
-				       &netDevice->ReceivePacketList);
+				       &net_device->recv_pkt_list);
 		}
-		spin_unlock_irqrestore(&netDevice->receive_packet_list_lock,
+		spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
 				       flags);
 
-		NetVscSendReceiveCompletion(Device,
-					    vmxferpagePacket->d.TransactionId);
+		netvsc_send_recv_completion(device,
+					    vmxferpage_packet->d.TransactionId);
 
-		PutNetDevice(Device);
+		put_net_device(device);
 		return;
 	}
 
 	/* Remove the 1st packet to represent the xfer page packet itself */
-	xferpagePacket = (struct xferpage_packet *)listHead.next;
-	list_del(&xferpagePacket->ListEntry);
+	xferpage_packet = (struct xferpage_packet *)listHead.next;
+	list_del(&xferpage_packet->list_ent);
 
 	/* This is how much we can satisfy */
-	xferpagePacket->Count = count - 1;
+	xferpage_packet->count = count - 1;
 	/* ASSERT(xferpagePacket->Count > 0 && xferpagePacket->Count <= */
 	/* 	vmxferpagePacket->RangeCount); */
 
-	if (xferpagePacket->Count != vmxferpagePacket->RangeCount) {
+	if (xferpage_packet->count != vmxferpage_packet->RangeCount) {
 		DPRINT_INFO(NETVSC, "Needed %d netvsc pkts to satisy this xfer "
-			    "page...got %d", vmxferpagePacket->RangeCount,
-			    xferpagePacket->Count);
+			    "page...got %d", vmxferpage_packet->RangeCount,
+			    xferpage_packet->count);
 	}
 
 	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
 	for (i = 0; i < (count - 1); i++) {
-		netvscPacket = (struct hv_netvsc_packet *)listHead.next;
-		list_del(&netvscPacket->ListEntry);
+		netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
+		list_del(&netvsc_packet->list_ent);
 
 		/* Initialize the netvsc packet */
-		netvscPacket->XferPagePacket = xferpagePacket;
-		netvscPacket->Completion.Recv.OnReceiveCompletion =
-					NetVscOnReceiveCompletion;
-		netvscPacket->Completion.Recv.ReceiveCompletionContext =
-					netvscPacket;
-		netvscPacket->Device = Device;
+		netvsc_packet->xfer_page_pkt = xferpage_packet;
+		netvsc_packet->completion.recv.recv_completion =
+					netvsc_receive_completion;
+		netvsc_packet->completion.recv.recv_completion_ctx =
+					netvsc_packet;
+		netvsc_packet->device = device;
 		/* Save this so that we can send it back */
-		netvscPacket->Completion.Recv.ReceiveCompletionTid =
-					vmxferpagePacket->d.TransactionId;
+		netvsc_packet->completion.recv.recv_completion_tid =
+					vmxferpage_packet->d.TransactionId;
 
-		netvscPacket->TotalDataBufferLength =
-					vmxferpagePacket->Ranges[i].ByteCount;
-		netvscPacket->PageBufferCount = 1;
+		netvsc_packet->total_data_buflen =
+					vmxferpage_packet->Ranges[i].ByteCount;
+		netvsc_packet->page_buf_cnt = 1;
 
 		/* ASSERT(vmxferpagePacket->Ranges[i].ByteOffset + */
 		/* 	vmxferpagePacket->Ranges[i].ByteCount < */
 		/* 	netDevice->ReceiveBufferSize); */
 
-		netvscPacket->PageBuffers[0].Length =
-					vmxferpagePacket->Ranges[i].ByteCount;
+		netvsc_packet->page_buf[0].Length =
+					vmxferpage_packet->Ranges[i].ByteCount;
 
-		start = virt_to_phys((void *)((unsigned long)netDevice->ReceiveBuffer + vmxferpagePacket->Ranges[i].ByteOffset));
+		start = virt_to_phys((void *)((unsigned long)net_device->
+		recv_buf + vmxferpage_packet->Ranges[i].ByteOffset));
 
-		netvscPacket->PageBuffers[0].Pfn = start >> PAGE_SHIFT;
-		endVirtual = (unsigned long)netDevice->ReceiveBuffer
-		    + vmxferpagePacket->Ranges[i].ByteOffset
-		    + vmxferpagePacket->Ranges[i].ByteCount - 1;
-		end = virt_to_phys((void *)endVirtual);
+		netvsc_packet->page_buf[0].Pfn = start >> PAGE_SHIFT;
+		end_virtual = (unsigned long)net_device->recv_buf
+		    + vmxferpage_packet->Ranges[i].ByteOffset
+		    + vmxferpage_packet->Ranges[i].ByteCount - 1;
+		end = virt_to_phys((void *)end_virtual);
 
 		/* Calculate the page relative offset */
-		netvscPacket->PageBuffers[0].Offset =
-			vmxferpagePacket->Ranges[i].ByteOffset & (PAGE_SIZE - 1);
+		netvsc_packet->page_buf[0].Offset =
+			vmxferpage_packet->Ranges[i].ByteOffset &
+			(PAGE_SIZE - 1);
 		if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
 			/* Handle frame across multiple pages: */
-			netvscPacket->PageBuffers[0].Length =
-				(netvscPacket->PageBuffers[0].Pfn << PAGE_SHIFT)
+			netvsc_packet->page_buf[0].Length =
+				(netvsc_packet->page_buf[0].Pfn <<
+				 PAGE_SHIFT)
 				+ PAGE_SIZE - start;
-			bytesRemain = netvscPacket->TotalDataBufferLength -
-					netvscPacket->PageBuffers[0].Length;
+			bytes_remain = netvsc_packet->total_data_buflen -
+					netvsc_packet->page_buf[0].Length;
 			for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
-				netvscPacket->PageBuffers[j].Offset = 0;
-				if (bytesRemain <= PAGE_SIZE) {
-					netvscPacket->PageBuffers[j].Length = bytesRemain;
-					bytesRemain = 0;
+				netvsc_packet->page_buf[j].Offset = 0;
+				if (bytes_remain <= PAGE_SIZE) {
+					netvsc_packet->page_buf[j].Length =
+						bytes_remain;
+					bytes_remain = 0;
 				} else {
-					netvscPacket->PageBuffers[j].Length = PAGE_SIZE;
-					bytesRemain -= PAGE_SIZE;
+					netvsc_packet->page_buf[j].Length =
+						PAGE_SIZE;
+					bytes_remain -= PAGE_SIZE;
 				}
-				netvscPacket->PageBuffers[j].Pfn =
-				    virt_to_phys((void *)(endVirtual - bytesRemain)) >> PAGE_SHIFT;
-				netvscPacket->PageBufferCount++;
-				if (bytesRemain == 0)
+				netvsc_packet->page_buf[j].Pfn =
+				    virt_to_phys((void *)(end_virtual -
+						bytes_remain)) >> PAGE_SHIFT;
+				netvsc_packet->page_buf_cnt++;
+				if (bytes_remain == 0)
 					break;
 			}
 			/* ASSERT(bytesRemain == 0); */
 		}
 		DPRINT_DBG(NETVSC, "[%d] - (abs offset %u len %u) => "
 			   "(pfn %llx, offset %u, len %u)", i,
-			   vmxferpagePacket->Ranges[i].ByteOffset,
-			   vmxferpagePacket->Ranges[i].ByteCount,
-			   netvscPacket->PageBuffers[0].Pfn,
-			   netvscPacket->PageBuffers[0].Offset,
-			   netvscPacket->PageBuffers[0].Length);
+			   vmxferpage_packet->Ranges[i].ByteOffset,
+			   vmxferpage_packet->Ranges[i].ByteCount,
+			   netvsc_packet->page_buf[0].Pfn,
+			   netvsc_packet->page_buf[0].Offset,
+			   netvsc_packet->page_buf[0].Length);
 
 		/* Pass it to the upper layer */
-		((struct netvsc_driver *)Device->Driver)->OnReceiveCallback(Device, netvscPacket);
+		((struct netvsc_driver *)device->Driver)->
+			recv_cb(device, netvsc_packet);
 
-		NetVscOnReceiveCompletion(netvscPacket->Completion.Recv.ReceiveCompletionContext);
+		netvsc_receive_completion(netvsc_packet->
+				completion.recv.recv_completion_ctx);
 	}
 
 	/* ASSERT(list_empty(&listHead)); */
 
-	PutNetDevice(Device);
+	put_net_device(device);
 }
 
-static void NetVscSendReceiveCompletion(struct hv_device *Device,
-					u64 TransactionId)
+static void netvsc_send_recv_completion(struct hv_device *device,
+					u64 transaction_id)
 {
 	struct nvsp_message recvcompMessage;
 	int retries = 0;
 	int ret;
 
 	DPRINT_DBG(NETVSC, "Sending receive completion pkt - %llx",
-		   TransactionId);
+		   transaction_id);
 
-	recvcompMessage.Header.MessageType =
-				NvspMessage1TypeSendRNDISPacketComplete;
+	recvcompMessage.hdr.msg_type =
+				NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
 
 	/* FIXME: Pass in the status */
-	recvcompMessage.Messages.Version1Messages.SendRNDISPacketComplete.Status = NvspStatusSuccess;
+	recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
+		NVSP_STAT_SUCCESS;
 
 retry_send_cmplt:
 	/* Send the completion */
-	ret = vmbus_sendpacket(Device->channel, &recvcompMessage,
-			       sizeof(struct nvsp_message), TransactionId,
+	ret = vmbus_sendpacket(device->channel, &recvcompMessage,
+			       sizeof(struct nvsp_message), transaction_id,
 			       VmbusPacketTypeCompletion, 0);
 	if (ret == 0) {
 		/* success */
@@ -1152,7 +1195,7 @@
 		/* no more room...wait a bit and attempt to retry 3 times */
 		retries++;
 		DPRINT_ERR(NETVSC, "unable to send receive completion pkt "
-			   "(tid %llx)...retrying %d", TransactionId, retries);
+			   "(tid %llx)...retrying %d", transaction_id, retries);
 
 		if (retries < 4) {
 			udelay(100);
@@ -1160,22 +1203,22 @@
 		} else {
 			DPRINT_ERR(NETVSC, "unable to send receive completion "
 				  "pkt (tid %llx)...give up retrying",
-				  TransactionId);
+				  transaction_id);
 		}
 	} else {
 		DPRINT_ERR(NETVSC, "unable to send receive completion pkt - "
-			   "%llx", TransactionId);
+			   "%llx", transaction_id);
 	}
 }
 
 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
-static void NetVscOnReceiveCompletion(void *Context)
+static void netvsc_receive_completion(void *context)
 {
-	struct hv_netvsc_packet *packet = Context;
-	struct hv_device *device = (struct hv_device *)packet->Device;
-	struct netvsc_device *netDevice;
-	u64 transactionId = 0;
-	bool fSendReceiveComp = false;
+	struct hv_netvsc_packet *packet = context;
+	struct hv_device *device = (struct hv_device *)packet->device;
+	struct netvsc_device *net_device;
+	u64 transaction_id = 0;
+	bool fsend_receive_comp = false;
 	unsigned long flags;
 
 	/* ASSERT(packet->XferPagePacket); */
@@ -1185,49 +1228,49 @@
 	 * send out receive completion, we are using GetInboundNetDevice()
 	 * since we may have disable outbound traffic already.
 	 */
-	netDevice = GetInboundNetDevice(device);
-	if (!netDevice) {
+	net_device = get_inbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "unable to get net device..."
 			   "device being destroyed?");
 		return;
 	}
 
 	/* Overloading use of the lock. */
-	spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags);
+	spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
 
 	/* ASSERT(packet->XferPagePacket->Count > 0); */
-	packet->XferPagePacket->Count--;
+	packet->xfer_page_pkt->count--;
 
 	/*
 	 * Last one in the line that represent 1 xfer page packet.
 	 * Return the xfer page packet itself to the freelist
 	 */
-	if (packet->XferPagePacket->Count == 0) {
-		fSendReceiveComp = true;
-		transactionId = packet->Completion.Recv.ReceiveCompletionTid;
-		list_add_tail(&packet->XferPagePacket->ListEntry,
-			      &netDevice->ReceivePacketList);
+	if (packet->xfer_page_pkt->count == 0) {
+		fsend_receive_comp = true;
+		transaction_id = packet->completion.recv.recv_completion_tid;
+		list_add_tail(&packet->xfer_page_pkt->list_ent,
+			      &net_device->recv_pkt_list);
 
 	}
 
 	/* Put the packet back */
-	list_add_tail(&packet->ListEntry, &netDevice->ReceivePacketList);
-	spin_unlock_irqrestore(&netDevice->receive_packet_list_lock, flags);
+	list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
+	spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
 
 	/* Send a receive completion for the xfer page packet */
-	if (fSendReceiveComp)
-		NetVscSendReceiveCompletion(device, transactionId);
+	if (fsend_receive_comp)
+		netvsc_send_recv_completion(device, transaction_id);
 
-	PutNetDevice(device);
+	put_net_device(device);
 }
 
-static void NetVscOnChannelCallback(void *Context)
+static void netvsc_channel_cb(void *context)
 {
 	int ret;
-	struct hv_device *device = Context;
-	struct netvsc_device *netDevice;
-	u32 bytesRecvd;
-	u64 requestId;
+	struct hv_device *device = context;
+	struct netvsc_device *net_device;
+	u32 bytes_recvd;
+	u64 request_id;
 	unsigned char *packet;
 	struct vmpacket_descriptor *desc;
 	unsigned char *buffer;
@@ -1241,37 +1284,37 @@
 		return;
 	buffer = packet;
 
-	netDevice = GetInboundNetDevice(device);
-	if (!netDevice) {
+	net_device = get_inbound_net_device(device);
+	if (!net_device) {
 		DPRINT_ERR(NETVSC, "net device (%p) shutting down..."
-			   "ignoring inbound packets", netDevice);
+			   "ignoring inbound packets", net_device);
 		goto out;
 	}
 
 	do {
 		ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
-					   &bytesRecvd, &requestId);
+					   &bytes_recvd, &request_id);
 		if (ret == 0) {
-			if (bytesRecvd > 0) {
+			if (bytes_recvd > 0) {
 				DPRINT_DBG(NETVSC, "receive %d bytes, tid %llx",
-					   bytesRecvd, requestId);
+					   bytes_recvd, request_id);
 
 				desc = (struct vmpacket_descriptor *)buffer;
 				switch (desc->Type) {
 				case VmbusPacketTypeCompletion:
-					NetVscOnSendCompletion(device, desc);
+					netvsc_send_completion(device, desc);
 					break;
 
 				case VmbusPacketTypeDataUsingTransferPages:
-					NetVscOnReceive(device, desc);
+					netvsc_receive(device, desc);
 					break;
 
 				default:
 					DPRINT_ERR(NETVSC,
 						   "unhandled packet type %d, "
 						   "tid %llx len %d\n",
-						   desc->Type, requestId,
-						   bytesRecvd);
+						   desc->Type, request_id,
+						   bytes_recvd);
 					break;
 				}
 
@@ -1293,20 +1336,20 @@
 			}
 		} else if (ret == -2) {
 			/* Handle large packet */
-			buffer = kmalloc(bytesRecvd, GFP_ATOMIC);
+			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
 			if (buffer == NULL) {
 				/* Try again next time around */
 				DPRINT_ERR(NETVSC,
 					   "unable to allocate buffer of size "
-					   "(%d)!!", bytesRecvd);
+					   "(%d)!!", bytes_recvd);
 				break;
 			}
 
-			bufferlen = bytesRecvd;
+			bufferlen = bytes_recvd;
 		}
 	} while (1);
 
-	PutNetDevice(device);
+	put_net_device(device);
 out:
 	kfree(buffer);
 	return;
diff --git a/drivers/staging/hv/netvsc.h b/drivers/staging/hv/netvsc.h
index c71dce5..932a77c 100644
--- a/drivers/staging/hv/netvsc.h
+++ b/drivers/staging/hv/netvsc.h
@@ -38,48 +38,48 @@
 #define NVSP_MAX_PROTOCOL_VERSION	NVSP_PROTOCOL_VERSION_1
 
 enum {
-	NvspMessageTypeNone = 0,
+	NVSP_MSG_TYPE_NONE = 0,
 
 	/* Init Messages */
-	NvspMessageTypeInit			= 1,
-	NvspMessageTypeInitComplete		= 2,
+	NVSP_MSG_TYPE_INIT			= 1,
+	NVSP_MSG_TYPE_INIT_COMPLETE		= 2,
 
-	NvspVersionMessageStart			= 100,
+	NVSP_VERSION_MSG_START			= 100,
 
 	/* Version 1 Messages */
-	NvspMessage1TypeSendNdisVersion		= NvspVersionMessageStart,
+	NVSP_MSG1_TYPE_SEND_NDIS_VER		= NVSP_VERSION_MSG_START,
 
-	NvspMessage1TypeSendReceiveBuffer,
-	NvspMessage1TypeSendReceiveBufferComplete,
-	NvspMessage1TypeRevokeReceiveBuffer,
+	NVSP_MSG1_TYPE_SEND_RECV_BUF,
+	NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE,
+	NVSP_MSG1_TYPE_REVOKE_RECV_BUF,
 
-	NvspMessage1TypeSendSendBuffer,
-	NvspMessage1TypeSendSendBufferComplete,
-	NvspMessage1TypeRevokeSendBuffer,
+	NVSP_MSG1_TYPE_SEND_SEND_BUF,
+	NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE,
+	NVSP_MSG1_TYPE_REVOKE_SEND_BUF,
 
-	NvspMessage1TypeSendRNDISPacket,
-	NvspMessage1TypeSendRNDISPacketComplete,
+	NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
+	NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
 
 	/*
 	 * This should be set to the number of messages for the version with
 	 * the maximum number of messages.
 	 */
-	NvspNumMessagePerVersion		= 9,
+	NVSP_NUM_MSG_PER_VERSION		= 9,
 };
 
 enum {
-	NvspStatusNone = 0,
-	NvspStatusSuccess,
-	NvspStatusFailure,
-	NvspStatusProtocolVersionRangeTooNew,
-	NvspStatusProtocolVersionRangeTooOld,
-	NvspStatusInvalidRndisPacket,
-	NvspStatusBusy,
-	NvspStatusMax,
+	NVSP_STAT_NONE = 0,
+	NVSP_STAT_SUCCESS,
+	NVSP_STAT_FAIL,
+	NVSP_STAT_PROTOCOL_TOO_NEW,
+	NVSP_STAT_PROTOCOL_TOO_OLD,
+	NVSP_STAT_INVALID_RNDIS_PKT,
+	NVSP_STAT_BUSY,
+	NVSP_STAT_MAX,
 };
 
 struct nvsp_message_header {
-	u32 MessageType;
+	u32 msg_type;
 };
 
 /* Init Messages */
@@ -90,8 +90,8 @@
  * versioning (i.e. this message will be the same for ever).
  */
 struct nvsp_message_init {
-	u32 MinProtocolVersion;
-	u32 MaxProtocolVersion;
+	u32 min_protocol_ver;
+	u32 max_protocol_ver;
 } __attribute__((packed));
 
 /*
@@ -100,14 +100,14 @@
  * (i.e. this message will be the same for ever).
  */
 struct nvsp_message_init_complete {
-	u32 NegotiatedProtocolVersion;
-	u32 MaximumMdlChainLength;
-	u32 Status;
+	u32 negotiated_protocol_ver;
+	u32 max_mdl_chain_len;
+	u32 status;
 } __attribute__((packed));
 
 union nvsp_message_init_uber {
-	struct nvsp_message_init Init;
-	struct nvsp_message_init_complete InitComplete;
+	struct nvsp_message_init init;
+	struct nvsp_message_init_complete init_complete;
 } __attribute__((packed));
 
 /* Version 1 Messages */
@@ -117,8 +117,8 @@
  * can use this information when handling OIDs sent by the VSC.
  */
 struct nvsp_1_message_send_ndis_version {
-	u32 NdisMajorVersion;
-	u32 NdisMinorVersion;
+	u32 ndis_major_ver;
+	u32 ndis_minor_ver;
 } __attribute__((packed));
 
 /*
@@ -126,15 +126,15 @@
  * can then use the receive buffer to send data to the VSC.
  */
 struct nvsp_1_message_send_receive_buffer {
-	u32 GpadlHandle;
-	u16 Id;
+	u32 gpadl_handle;
+	u16 id;
 } __attribute__((packed));
 
 struct nvsp_1_receive_buffer_section {
-	u32 Offset;
-	u32 SubAllocationSize;
-	u32 NumSubAllocations;
-	u32 EndOffset;
+	u32 offset;
+	u32 sub_alloc_size;
+	u32 num_sub_allocs;
+	u32 end_offset;
 } __attribute__((packed));
 
 /*
@@ -143,8 +143,8 @@
  * buffer.
  */
 struct nvsp_1_message_send_receive_buffer_complete {
-	u32 Status;
-	u32 NumSections;
+	u32 status;
+	u32 num_sections;
 
 	/*
 	 * The receive buffer is split into two parts, a large suballocation
@@ -165,7 +165,7 @@
 	 *  LargeOffset                            SmallOffset
 	 */
 
-	struct nvsp_1_receive_buffer_section Sections[1];
+	struct nvsp_1_receive_buffer_section sections[1];
 } __attribute__((packed));
 
 /*
@@ -174,7 +174,7 @@
  * again.
  */
 struct nvsp_1_message_revoke_receive_buffer {
-	u16 Id;
+	u16 id;
 };
 
 /*
@@ -182,8 +182,8 @@
  * can then use the send buffer to send data to the VSP.
  */
 struct nvsp_1_message_send_send_buffer {
-	u32 GpadlHandle;
-	u16 Id;
+	u32 gpadl_handle;
+	u16 id;
 } __attribute__((packed));
 
 /*
@@ -192,7 +192,7 @@
  * buffer.
  */
 struct nvsp_1_message_send_send_buffer_complete {
-	u32 Status;
+	u32 status;
 
 	/*
 	 * The VSC gets to choose the size of the send buffer and the VSP gets
@@ -200,7 +200,7 @@
 	 * dynamic reconfigurations when the cost of GPA-direct buffers
 	 * decreases.
 	 */
-	u32 SectionSize;
+	u32 section_size;
 } __attribute__((packed));
 
 /*
@@ -208,7 +208,7 @@
  * completes this transaction, the vsp should never use the send buffer again.
  */
 struct nvsp_1_message_revoke_send_buffer {
-	u16 Id;
+	u16 id;
 };
 
 /*
@@ -221,7 +221,7 @@
 	 * channels of communication. However, the Network VSP only has one.
 	 * Therefore, the channel travels with the RNDIS packet.
 	 */
-	u32 ChannelType;
+	u32 channel_type;
 
 	/*
 	 * This field is used to send part or all of the data through a send
@@ -229,8 +229,8 @@
 	 * index is 0xFFFFFFFF, then the send buffer is not being used and all
 	 * of the data was sent through other VMBus mechanisms.
 	 */
-	u32 SendBufferSectionIndex;
-	u32 SendBufferSectionSize;
+	u32 send_buf_section_index;
+	u32 send_buf_section_size;
 } __attribute__((packed));
 
 /*
@@ -239,35 +239,35 @@
  * message cannot use any resources associated with the original RNDIS packet.
  */
 struct nvsp_1_message_send_rndis_packet_complete {
-	u32 Status;
+	u32 status;
 };
 
 union nvsp_1_message_uber {
-	struct nvsp_1_message_send_ndis_version SendNdisVersion;
+	struct nvsp_1_message_send_ndis_version send_ndis_ver;
 
-	struct nvsp_1_message_send_receive_buffer SendReceiveBuffer;
+	struct nvsp_1_message_send_receive_buffer send_recv_buf;
 	struct nvsp_1_message_send_receive_buffer_complete
-						SendReceiveBufferComplete;
-	struct nvsp_1_message_revoke_receive_buffer RevokeReceiveBuffer;
+						send_recv_buf_complete;
+	struct nvsp_1_message_revoke_receive_buffer revoke_recv_buf;
 
-	struct nvsp_1_message_send_send_buffer SendSendBuffer;
-	struct nvsp_1_message_send_send_buffer_complete SendSendBufferComplete;
-	struct nvsp_1_message_revoke_send_buffer RevokeSendBuffer;
+	struct nvsp_1_message_send_send_buffer send_send_buf;
+	struct nvsp_1_message_send_send_buffer_complete send_send_buf_complete;
+	struct nvsp_1_message_revoke_send_buffer revoke_send_buf;
 
-	struct nvsp_1_message_send_rndis_packet SendRNDISPacket;
+	struct nvsp_1_message_send_rndis_packet send_rndis_pkt;
 	struct nvsp_1_message_send_rndis_packet_complete
-						SendRNDISPacketComplete;
+						send_rndis_pkt_complete;
 } __attribute__((packed));
 
 union nvsp_all_messages {
-	union nvsp_message_init_uber InitMessages;
-	union nvsp_1_message_uber Version1Messages;
+	union nvsp_message_init_uber init_msg;
+	union nvsp_1_message_uber v1_msg;
 } __attribute__((packed));
 
 /* ALL Messages */
 struct nvsp_message {
-	struct nvsp_message_header Header;
-	union nvsp_all_messages Messages;
+	struct nvsp_message_header hdr;
+	union nvsp_all_messages msg;
 } __attribute__((packed));
 
 
@@ -293,39 +293,39 @@
 
 /* Per netvsc channel-specific */
 struct netvsc_device {
-	struct hv_device *Device;
+	struct hv_device *dev;
 
-	atomic_t RefCount;
-	atomic_t NumOutstandingSends;
+	atomic_t refcnt;
+	atomic_t num_outstanding_sends;
 	/*
 	 * List of free preallocated hv_netvsc_packet to represent receive
 	 * packet
 	 */
-	struct list_head ReceivePacketList;
-	spinlock_t receive_packet_list_lock;
+	struct list_head recv_pkt_list;
+	spinlock_t recv_pkt_list_lock;
 
 	/* Send buffer allocated by us but manages by NetVSP */
-	void *SendBuffer;
-	u32 SendBufferSize;
-	u32 SendBufferGpadlHandle;
-	u32 SendSectionSize;
+	void *send_buf;
+	u32 send_buf_size;
+	u32 send_buf_gpadl_handle;
+	u32 send_section_size;
 
 	/* Receive buffer allocated by us but manages by NetVSP */
-	void *ReceiveBuffer;
-	u32 ReceiveBufferSize;
-	u32 ReceiveBufferGpadlHandle;
-	u32 ReceiveSectionCount;
-	struct nvsp_1_receive_buffer_section *ReceiveSections;
+	void *recv_buf;
+	u32 recv_buf_size;
+	u32 recv_buf_gpadl_handle;
+	u32 recv_section_cnt;
+	struct nvsp_1_receive_buffer_section *recv_section;
 
 	/* Used for NetVSP initialization protocol */
-	struct osd_waitevent *ChannelInitEvent;
-	struct nvsp_message ChannelInitPacket;
+	struct osd_waitevent *channel_init_event;
+	struct nvsp_message channel_init_pkt;
 
-	struct nvsp_message RevokePacket;
+	struct nvsp_message revoke_packet;
 	/* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
 
 	/* Holds rndis device info */
-	void *Extension;
+	void *extension;
 };
 
 #endif /* _NETVSC_H_ */
diff --git a/drivers/staging/hv/netvsc_api.h b/drivers/staging/hv/netvsc_api.h
index 4b5b3ac..b4bed36 100644
--- a/drivers/staging/hv/netvsc_api.h
+++ b/drivers/staging/hv/netvsc_api.h
@@ -32,10 +32,10 @@
 
 /* Represent the xfer page packet which contains 1 or more netvsc packet */
 struct xferpage_packet {
-	struct list_head ListEntry;
+	struct list_head list_ent;
 
 	/* # of netvsc packets this xfer packet contains */
-	u32 Count;
+	u32 count;
 };
 
 /* The number of pages which are enough to cover jumbo frame buffer. */
@@ -47,70 +47,70 @@
  */
 struct hv_netvsc_packet {
 	/* Bookkeeping stuff */
-	struct list_head ListEntry;
+	struct list_head list_ent;
 
-	struct hv_device *Device;
-	bool IsDataPacket;
+	struct hv_device *device;
+	bool is_data_pkt;
 
 	/*
 	 * Valid only for receives when we break a xfer page packet
 	 * into multiple netvsc packets
 	 */
-	struct xferpage_packet *XferPagePacket;
+	struct xferpage_packet *xfer_page_pkt;
 
 	union {
 		struct{
-			u64 ReceiveCompletionTid;
-			void *ReceiveCompletionContext;
-			void (*OnReceiveCompletion)(void *context);
-		} Recv;
+			u64 recv_completion_tid;
+			void *recv_completion_ctx;
+			void (*recv_completion)(void *context);
+		} recv;
 		struct{
-			u64 SendCompletionTid;
-			void *SendCompletionContext;
-			void (*OnSendCompletion)(void *context);
-		} Send;
-	} Completion;
+			u64 send_completion_tid;
+			void *send_completion_ctx;
+			void (*send_completion)(void *context);
+		} send;
+	} completion;
 
-	/* This points to the memory after PageBuffers */
-	void *Extension;
+	/* This points to the memory after page_buf */
+	void *extension;
 
-	u32 TotalDataBufferLength;
+	u32 total_data_buflen;
 	/* Points to the send/receive buffer where the ethernet frame is */
-	u32 PageBufferCount;
-	struct hv_page_buffer PageBuffers[NETVSC_PACKET_MAXPAGE];
+	u32 page_buf_cnt;
+	struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
 };
 
 /* Represents the net vsc driver */
 struct netvsc_driver {
 	/* Must be the first field */
 	/* Which is a bug FIXME! */
-	struct hv_driver Base;
+	struct hv_driver base;
 
-	u32 RingBufferSize;
-	u32 RequestExtSize;
+	u32 ring_buf_size;
+	u32 req_ext_size;
 
 	/*
 	 * This is set by the caller to allow us to callback when we
 	 * receive a packet from the "wire"
 	 */
-	int (*OnReceiveCallback)(struct hv_device *dev,
+	int (*recv_cb)(struct hv_device *dev,
 				 struct hv_netvsc_packet *packet);
-	void (*OnLinkStatusChanged)(struct hv_device *dev, u32 Status);
+	void (*link_status_change)(struct hv_device *dev, u32 status);
 
 	/* Specific to this driver */
-	int (*OnSend)(struct hv_device *dev, struct hv_netvsc_packet *packet);
+	int (*send)(struct hv_device *dev, struct hv_netvsc_packet *packet);
 
-	void *Context;
+	void *ctx;
 };
 
 struct netvsc_device_info {
-    unsigned char MacAddr[6];
-    bool LinkState;	/* 0 - link up, 1 - link down */
+	unsigned char mac_adr[6];
+	bool link_state;	/* 0 - link up, 1 - link down */
 };
 
 /* Interface */
-int NetVscInitialize(struct hv_driver *drv);
-int RndisFilterOnOpen(struct hv_device *Device);
-int RndisFilterOnClose(struct hv_device *Device);
+int netvsc_initialize(struct hv_driver *drv);
+int rndis_filter_open(struct hv_device *dev);
+int rndis_filter_close(struct hv_device *dev);
 
 #endif /* _NETVSC_API_H_ */
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 1415352..0147b40 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -66,6 +66,9 @@
 /* The one and only one */
 static struct netvsc_driver_context g_netvsc_drv;
 
+/* no-op so the netdev core doesn't return -EINVAL when modifying the the
+ * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
+ * when it calls RndisFilterOnOpen() */
 static void netvsc_set_multicast_list(struct net_device *net)
 {
 }
@@ -78,7 +81,7 @@
 
 	if (netif_carrier_ok(net)) {
 		/* Open up the device */
-		ret = RndisFilterOnOpen(device_obj);
+		ret = rndis_filter_open(device_obj);
 		if (ret != 0) {
 			DPRINT_ERR(NETVSC_DRV,
 				   "unable to open device (ret %d).", ret);
@@ -101,7 +104,7 @@
 
 	netif_stop_queue(net);
 
-	ret = RndisFilterOnClose(device_obj);
+	ret = rndis_filter_close(device_obj);
 	if (ret != 0)
 		DPRINT_ERR(NETVSC_DRV, "unable to close device (ret %d).", ret);
 
@@ -112,7 +115,7 @@
 {
 	struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
 	struct sk_buff *skb = (struct sk_buff *)
-		(unsigned long)packet->Completion.Send.SendCompletionTid;
+		(unsigned long)packet->completion.send.send_completion_tid;
 
 	kfree(packet);
 
@@ -151,7 +154,7 @@
 	/* Allocate a netvsc packet based on # of frags. */
 	packet = kzalloc(sizeof(struct hv_netvsc_packet) +
 			 (num_pages * sizeof(struct hv_page_buffer)) +
-			 net_drv_obj->RequestExtSize, GFP_ATOMIC);
+			 net_drv_obj->req_ext_size, GFP_ATOMIC);
 	if (!packet) {
 		/* out of memory, silently drop packet */
 		DPRINT_ERR(NETVSC_DRV, "unable to allocate hv_netvsc_packet");
@@ -161,40 +164,40 @@
 		return NETDEV_TX_OK;
 	}
 
-	packet->Extension = (void *)(unsigned long)packet +
+	packet->extension = (void *)(unsigned long)packet +
 				sizeof(struct hv_netvsc_packet) +
 				    (num_pages * sizeof(struct hv_page_buffer));
 
 	/* Setup the rndis header */
-	packet->PageBufferCount = num_pages;
+	packet->page_buf_cnt = num_pages;
 
 	/* TODO: Flush all write buffers/ memory fence ??? */
 	/* wmb(); */
 
 	/* Initialize it from the skb */
-	packet->TotalDataBufferLength	= skb->len;
+	packet->total_data_buflen	= skb->len;
 
 	/* Start filling in the page buffers starting after RNDIS buffer. */
-	packet->PageBuffers[1].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
-	packet->PageBuffers[1].Offset
+	packet->page_buf[1].Pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
+	packet->page_buf[1].Offset
 		= (unsigned long)skb->data & (PAGE_SIZE - 1);
-	packet->PageBuffers[1].Length = skb_headlen(skb);
+	packet->page_buf[1].Length = skb_headlen(skb);
 
 	/* Additional fragments are after SKB data */
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
-		packet->PageBuffers[i+2].Pfn = page_to_pfn(f->page);
-		packet->PageBuffers[i+2].Offset = f->page_offset;
-		packet->PageBuffers[i+2].Length = f->size;
+		packet->page_buf[i+2].Pfn = page_to_pfn(f->page);
+		packet->page_buf[i+2].Offset = f->page_offset;
+		packet->page_buf[i+2].Length = f->size;
 	}
 
 	/* Set the completion routine */
-	packet->Completion.Send.OnSendCompletion = netvsc_xmit_completion;
-	packet->Completion.Send.SendCompletionContext = packet;
-	packet->Completion.Send.SendCompletionTid = (unsigned long)skb;
+	packet->completion.send.send_completion = netvsc_xmit_completion;
+	packet->completion.send.send_completion_ctx = packet;
+	packet->completion.send.send_completion_tid = (unsigned long)skb;
 
-	ret = net_drv_obj->OnSend(&net_device_ctx->device_ctx->device_obj,
+	ret = net_drv_obj->send(&net_device_ctx->device_ctx->device_obj,
 				  packet);
 	if (ret == 0) {
 		net->stats.tx_bytes += skb->len;
@@ -260,7 +263,7 @@
 	}
 
 	/* Allocate a skb - TODO direct I/O to pages? */
-	skb = netdev_alloc_skb_ip_align(net, packet->TotalDataBufferLength);
+	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
 	if (unlikely(!skb)) {
 		++net->stats.rx_dropped;
 		return 0;
@@ -273,17 +276,17 @@
 	 * Copy to skb. This copy is needed here since the memory pointed by
 	 * hv_netvsc_packet cannot be deallocated
 	 */
-	for (i = 0; i < packet->PageBufferCount; i++) {
-		data = kmap_atomic(pfn_to_page(packet->PageBuffers[i].Pfn),
+	for (i = 0; i < packet->page_buf_cnt; i++) {
+		data = kmap_atomic(pfn_to_page(packet->page_buf[i].Pfn),
 					       KM_IRQ1);
 		data = (void *)(unsigned long)data +
-				packet->PageBuffers[i].Offset;
+				packet->page_buf[i].Offset;
 
-		memcpy(skb_put(skb, packet->PageBuffers[i].Length), data,
-		       packet->PageBuffers[i].Length);
+		memcpy(skb_put(skb, packet->page_buf[i].Length), data,
+		       packet->page_buf[i].Length);
 
 		kunmap_atomic((void *)((unsigned long)data -
-				       packet->PageBuffers[i].Offset), KM_IRQ1);
+				       packet->page_buf[i].Offset), KM_IRQ1);
 	}
 
 	local_irq_restore(flags);
@@ -346,7 +349,7 @@
 	struct netvsc_device_info device_info;
 	int ret;
 
-	if (!net_drv_obj->Base.OnDeviceAdd)
+	if (!net_drv_obj->base.OnDeviceAdd)
 		return -1;
 
 	net = alloc_etherdev(sizeof(struct net_device_context));
@@ -363,7 +366,7 @@
 	dev_set_drvdata(device, net);
 
 	/* Notify the netvsc driver of the new device */
-	ret = net_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
+	ret = net_drv_obj->base.OnDeviceAdd(device_obj, &device_info);
 	if (ret != 0) {
 		free_netdev(net);
 		dev_set_drvdata(device, NULL);
@@ -382,10 +385,10 @@
 	 * out of sync with the device's link status
 	 */
 	if (!netif_carrier_ok(net))
-		if (!device_info.LinkState)
+		if (!device_info.link_state)
 			netif_carrier_on(net);
 
-	memcpy(net->dev_addr, device_info.MacAddr, ETH_ALEN);
+	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
 	net->netdev_ops = &device_ops;
 
@@ -398,7 +401,7 @@
 	ret = register_netdev(net);
 	if (ret != 0) {
 		/* Remove the device and release the resource */
-		net_drv_obj->Base.OnDeviceRemove(device_obj);
+		net_drv_obj->base.OnDeviceRemove(device_obj);
 		free_netdev(net);
 	}
 
@@ -422,7 +425,7 @@
 		return 0;
 	}
 
-	if (!net_drv_obj->Base.OnDeviceRemove)
+	if (!net_drv_obj->base.OnDeviceRemove)
 		return -1;
 
 	/* Stop outbound asap */
@@ -435,7 +438,7 @@
 	 * Call to the vsc driver to let it know that the device is being
 	 * removed
 	 */
-	ret = net_drv_obj->Base.OnDeviceRemove(device_obj);
+	ret = net_drv_obj->base.OnDeviceRemove(device_obj);
 	if (ret != 0) {
 		/* TODO: */
 		DPRINT_ERR(NETVSC, "unable to remove vsc device (ret %d)", ret);
@@ -481,8 +484,8 @@
 		device_unregister(current_dev);
 	}
 
-	if (netvsc_drv_obj->Base.OnCleanup)
-		netvsc_drv_obj->Base.OnCleanup(&netvsc_drv_obj->Base);
+	if (netvsc_drv_obj->base.OnCleanup)
+		netvsc_drv_obj->base.OnCleanup(&netvsc_drv_obj->base);
 
 	vmbus_child_driver_unregister(drv_ctx);
 
@@ -495,15 +498,15 @@
 	struct driver_context *drv_ctx = &g_netvsc_drv.drv_ctx;
 	int ret;
 
-	net_drv_obj->RingBufferSize = ring_size * PAGE_SIZE;
-	net_drv_obj->OnReceiveCallback = netvsc_recv_callback;
-	net_drv_obj->OnLinkStatusChanged = netvsc_linkstatus_callback;
+	net_drv_obj->ring_buf_size = ring_size * PAGE_SIZE;
+	net_drv_obj->recv_cb = netvsc_recv_callback;
+	net_drv_obj->link_status_change = netvsc_linkstatus_callback;
 
 	/* Callback to client driver to complete the initialization */
-	drv_init(&net_drv_obj->Base);
+	drv_init(&net_drv_obj->base);
 
-	drv_ctx->driver.name = net_drv_obj->Base.name;
-	memcpy(&drv_ctx->class_id, &net_drv_obj->Base.deviceType,
+	drv_ctx->driver.name = net_drv_obj->base.name;
+	memcpy(&drv_ctx->class_id, &net_drv_obj->base.deviceType,
 	       sizeof(struct hv_guid));
 
 	drv_ctx->probe = netvsc_probe;
@@ -536,7 +539,7 @@
 	if (!dmi_check_system(hv_netvsc_dmi_table))
 		return -ENODEV;
 
-	return netvsc_drv_init(NetVscInitialize);
+	return netvsc_drv_init(netvsc_initialize);
 }
 
 static void __exit netvsc_exit(void)
diff --git a/drivers/staging/hv/osd.c b/drivers/staging/hv/osd.c
index 8c3eb27..b5a3940 100644
--- a/drivers/staging/hv/osd.c
+++ b/drivers/staging/hv/osd.c
@@ -43,13 +43,7 @@
 #include <linux/slab.h>
 #include "osd.h"
 
-struct osd_callback_struct {
-	struct work_struct work;
-	void (*callback)(void *);
-	void *data;
-};
-
-void *osd_VirtualAllocExec(unsigned int size)
+void *osd_virtual_alloc_exec(unsigned int size)
 {
 #ifdef __x86_64__
 	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
@@ -60,7 +54,7 @@
 }
 
 /**
- * osd_PageAlloc() - Allocate pages
+ * osd_page_alloc() - Allocate pages
  * @count:      Total number of Kernel pages you want to allocate
  *
  * Tries to allocate @count number of consecutive free kernel pages.
@@ -68,7 +62,7 @@
  * If successfull it will return pointer to the @count pages.
  * Mainly used by Hyper-V drivers.
  */
-void *osd_PageAlloc(unsigned int count)
+void *osd_page_alloc(unsigned int count)
 {
 	void *p;
 
@@ -85,26 +79,26 @@
 	/* if (p) memset(p, 0, PAGE_SIZE); */
 	/* return p; */
 }
-EXPORT_SYMBOL_GPL(osd_PageAlloc);
+EXPORT_SYMBOL_GPL(osd_page_alloc);
 
 /**
- * osd_PageFree() - Free pages
+ * osd_page_free() - Free pages
  * @page:       Pointer to the first page to be freed
  * @count:      Total number of Kernel pages you free
  *
- * Frees the pages allocated by osd_PageAlloc()
+ * Frees the pages allocated by osd_page_alloc()
  * Mainly used by Hyper-V drivers.
  */
-void osd_PageFree(void *page, unsigned int count)
+void osd_page_free(void *page, unsigned int count)
 {
 	free_pages((unsigned long)page, get_order(count * PAGE_SIZE));
 	/*struct page* p = virt_to_page(page);
 	__free_page(p);*/
 }
-EXPORT_SYMBOL_GPL(osd_PageFree);
+EXPORT_SYMBOL_GPL(osd_page_free);
 
 /**
- * osd_WaitEventCreate() - Create the event queue
+ * osd_waitevent_create() - Create the event queue
  *
  * Allocates memory for a &struct osd_waitevent. And then calls
  * init_waitqueue_head to set up the wait queue for the event.
@@ -114,7 +108,7 @@
  * Returns pointer to &struct osd_waitevent
  * Mainly used by Hyper-V drivers.
  */
-struct osd_waitevent *osd_WaitEventCreate(void)
+struct osd_waitevent *osd_waitevent_create(void)
 {
 	struct osd_waitevent *wait = kmalloc(sizeof(struct osd_waitevent),
 					     GFP_KERNEL);
@@ -125,14 +119,14 @@
 	init_waitqueue_head(&wait->event);
 	return wait;
 }
-EXPORT_SYMBOL_GPL(osd_WaitEventCreate);
+EXPORT_SYMBOL_GPL(osd_waitevent_create);
 
 
 /**
- * osd_WaitEventSet() - Wake up the process
- * @waitEvent: Structure to event to be woken up
+ * osd_waitevent_set() - Wake up the process
+ * @wait_event: Structure to event to be woken up
  *
- * @waitevent is of type &struct osd_waitevent
+ * @wait_event is of type &struct osd_waitevent
  *
  * Wake up the sleeping process so it can do some work.
  * And set condition indicator in &struct osd_waitevent to indicate
@@ -140,18 +134,18 @@
  *
  * Only used by Network and Storage Hyper-V drivers.
  */
-void osd_WaitEventSet(struct osd_waitevent *waitEvent)
+void osd_waitevent_set(struct osd_waitevent *wait_event)
 {
-	waitEvent->condition = 1;
-	wake_up_interruptible(&waitEvent->event);
+	wait_event->condition = 1;
+	wake_up_interruptible(&wait_event->event);
 }
-EXPORT_SYMBOL_GPL(osd_WaitEventSet);
+EXPORT_SYMBOL_GPL(osd_waitevent_set);
 
 /**
- * osd_WaitEventWait() - Wait for event till condition is true
- * @waitEvent: Structure to event to be put to sleep
+ * osd_waitevent_wait() - Wait for event till condition is true
+ * @wait_event: Structure to event to be put to sleep
  *
- * @waitevent is of type &struct osd_waitevent
+ * @wait_event is of type &struct osd_waitevent
  *
  * Set up the process to sleep until waitEvent->condition get true.
  * And set condition indicator in &struct osd_waitevent to indicate
@@ -161,25 +155,25 @@
  *
  * Mainly used by Hyper-V drivers.
  */
-int osd_WaitEventWait(struct osd_waitevent *waitEvent)
+int osd_waitevent_wait(struct osd_waitevent *wait_event)
 {
 	int ret = 0;
 
-	ret = wait_event_interruptible(waitEvent->event,
-				       waitEvent->condition);
-	waitEvent->condition = 0;
+	ret = wait_event_interruptible(wait_event->event,
+				       wait_event->condition);
+	wait_event->condition = 0;
 	return ret;
 }
-EXPORT_SYMBOL_GPL(osd_WaitEventWait);
+EXPORT_SYMBOL_GPL(osd_waitevent_wait);
 
 /**
- * osd_WaitEventWaitEx() - Wait for event or timeout for process wakeup
- * @waitEvent: Structure to event to be put to sleep
- * @TimeoutInMs:       Total number of Milliseconds to wait before waking up
+ * osd_waitevent_waitex() - Wait for event or timeout for process wakeup
+ * @wait_event: Structure to event to be put to sleep
+ * @timeout_in_ms:       Total number of Milliseconds to wait before waking up
  *
- * @waitevent is of type &struct osd_waitevent
+ * @wait_event is of type &struct osd_waitevent
  * Set up the process to sleep until @waitEvent->condition get true or
- * @TimeoutInMs (Time out in Milliseconds) has been reached.
+ * @timeout_in_ms (Time out in Milliseconds) has been reached.
  * And set condition indicator in &struct osd_waitevent to indicate
  * the process is in a sleeping state.
  *
@@ -187,42 +181,14 @@
  *
  * Mainly used by Hyper-V drivers.
  */
-int osd_WaitEventWaitEx(struct osd_waitevent *waitEvent, u32 TimeoutInMs)
+int osd_waitevent_waitex(struct osd_waitevent *wait_event, u32 timeout_in_ms)
 {
 	int ret = 0;
 
-	ret = wait_event_interruptible_timeout(waitEvent->event,
-					       waitEvent->condition,
-					       msecs_to_jiffies(TimeoutInMs));
-	waitEvent->condition = 0;
+	ret = wait_event_interruptible_timeout(wait_event->event,
+					       wait_event->condition,
+					       msecs_to_jiffies(timeout_in_ms));
+	wait_event->condition = 0;
 	return ret;
 }
-EXPORT_SYMBOL_GPL(osd_WaitEventWaitEx);
-
-static void osd_callback_work(struct work_struct *work)
-{
-	struct osd_callback_struct *cb = container_of(work,
-						struct osd_callback_struct,
-						work);
-	(cb->callback)(cb->data);
-	kfree(cb);
-}
-
-int osd_schedule_callback(struct workqueue_struct *wq,
-			  void (*func)(void *),
-			  void *data)
-{
-	struct osd_callback_struct *cb;
-
-	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
-	if (!cb) {
-		printk(KERN_ERR "unable to allocate memory in osd_schedule_callback\n");
-		return -1;
-	}
-
-	cb->callback = func;
-	cb->data = data;
-	INIT_WORK(&cb->work, osd_callback_work);
-	return queue_work(wq, &cb->work);
-}
-
+EXPORT_SYMBOL_GPL(osd_waitevent_waitex);
diff --git a/drivers/staging/hv/osd.h b/drivers/staging/hv/osd.h
index ce064e8..870ef07 100644
--- a/drivers/staging/hv/osd.h
+++ b/drivers/staging/hv/osd.h
@@ -50,21 +50,17 @@
 
 /* Osd routines */
 
-extern void *osd_VirtualAllocExec(unsigned int size);
+extern void *osd_virtual_alloc_exec(unsigned int size);
 
-extern void *osd_PageAlloc(unsigned int count);
-extern void osd_PageFree(void *page, unsigned int count);
+extern void *osd_page_alloc(unsigned int count);
+extern void osd_page_free(void *page, unsigned int count);
 
-extern struct osd_waitevent *osd_WaitEventCreate(void);
-extern void osd_WaitEventSet(struct osd_waitevent *waitEvent);
-extern int osd_WaitEventWait(struct osd_waitevent *waitEvent);
+extern struct osd_waitevent *osd_waitevent_create(void);
+extern void osd_waitevent_set(struct osd_waitevent *wait_event);
+extern int osd_waitevent_wait(struct osd_waitevent *wait_event);
 
-/* If >0, waitEvent got signaled. If ==0, timeout. If < 0, error */
-extern int osd_WaitEventWaitEx(struct osd_waitevent *waitEvent,
-			       u32 TimeoutInMs);
-
-int osd_schedule_callback(struct workqueue_struct *wq,
-			  void (*func)(void *),
-			  void *data);
+/* If >0, wait_event got signaled. If ==0, timeout. If < 0, error */
+extern int osd_waitevent_waitex(struct osd_waitevent *wait_event,
+			       u32 timeout_in_ms);
 
 #endif /* _OSD_H_ */
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index d78c569..4d53392 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -38,7 +38,7 @@
 /*++
 
 Name:
-	GetRingBufferAvailBytes()
+	get_ringbuffer_availbytes()
 
 Description:
 	Get number of bytes available to read and to write to
@@ -46,33 +46,34 @@
 
 --*/
 static inline void
-GetRingBufferAvailBytes(struct hv_ring_buffer_info *rbi, u32 *read, u32 *write)
+get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+			  u32 *read, u32 *write)
 {
 	u32 read_loc, write_loc;
 
 	/* Capture the read/write indices before they changed */
-	read_loc = rbi->RingBuffer->ReadIndex;
-	write_loc = rbi->RingBuffer->WriteIndex;
+	read_loc = rbi->ring_buffer->read_index;
+	write_loc = rbi->ring_buffer->write_index;
 
-	*write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->RingDataSize);
-	*read = rbi->RingDataSize - *write;
+	*write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
+	*read = rbi->ring_datasize - *write;
 }
 
 /*++
 
 Name:
-	GetNextWriteLocation()
+	get_next_write_location()
 
 Description:
 	Get the next write location for the specified ring buffer
 
 --*/
 static inline u32
-GetNextWriteLocation(struct hv_ring_buffer_info *RingInfo)
+get_next_write_location(struct hv_ring_buffer_info *ring_info)
 {
-	u32 next = RingInfo->RingBuffer->WriteIndex;
+	u32 next = ring_info->ring_buffer->write_index;
 
-	/* ASSERT(next < RingInfo->RingDataSize); */
+	/* ASSERT(next < ring_info->RingDataSize); */
 
 	return next;
 }
@@ -80,34 +81,34 @@
 /*++
 
 Name:
-	SetNextWriteLocation()
+	set_next_write_location()
 
 Description:
 	Set the next write location for the specified ring buffer
 
 --*/
 static inline void
-SetNextWriteLocation(struct hv_ring_buffer_info *RingInfo,
-		     u32 NextWriteLocation)
+set_next_write_location(struct hv_ring_buffer_info *ring_info,
+		     u32 next_write_location)
 {
-	RingInfo->RingBuffer->WriteIndex = NextWriteLocation;
+	ring_info->ring_buffer->write_index = next_write_location;
 }
 
 /*++
 
 Name:
-	GetNextReadLocation()
+	get_next_read_location()
 
 Description:
 	Get the next read location for the specified ring buffer
 
 --*/
 static inline u32
-GetNextReadLocation(struct hv_ring_buffer_info *RingInfo)
+get_next_read_location(struct hv_ring_buffer_info *ring_info)
 {
-	u32 next = RingInfo->RingBuffer->ReadIndex;
+	u32 next = ring_info->ring_buffer->read_index;
 
-	/* ASSERT(next < RingInfo->RingDataSize); */
+	/* ASSERT(next < ring_info->RingDataSize); */
 
 	return next;
 }
@@ -115,7 +116,7 @@
 /*++
 
 Name:
-	GetNextReadLocationWithOffset()
+	get_next_readlocation_withoffset()
 
 Description:
 	Get the next read location + offset for the specified ring buffer.
@@ -123,13 +124,14 @@
 
 --*/
 static inline u32
-GetNextReadLocationWithOffset(struct hv_ring_buffer_info *RingInfo, u32 Offset)
+get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
+				 u32 offset)
 {
-	u32 next = RingInfo->RingBuffer->ReadIndex;
+	u32 next = ring_info->ring_buffer->read_index;
 
-	/* ASSERT(next < RingInfo->RingDataSize); */
-	next += Offset;
-	next %= RingInfo->RingDataSize;
+	/* ASSERT(next < ring_info->RingDataSize); */
+	next += offset;
+	next %= ring_info->ring_datasize;
 
 	return next;
 }
@@ -137,141 +139,145 @@
 /*++
 
 Name:
-	SetNextReadLocation()
+	set_next_read_location()
 
 Description:
 	Set the next read location for the specified ring buffer
 
 --*/
 static inline void
-SetNextReadLocation(struct hv_ring_buffer_info *RingInfo, u32 NextReadLocation)
+set_next_read_location(struct hv_ring_buffer_info *ring_info,
+		    u32 next_read_location)
 {
-	RingInfo->RingBuffer->ReadIndex = NextReadLocation;
+	ring_info->ring_buffer->read_index = next_read_location;
 }
 
 
 /*++
 
 Name:
-	GetRingBuffer()
+	get_ring_buffer()
 
 Description:
 	Get the start of the ring buffer
 
 --*/
 static inline void *
-GetRingBuffer(struct hv_ring_buffer_info *RingInfo)
+get_ring_buffer(struct hv_ring_buffer_info *ring_info)
 {
-	return (void *)RingInfo->RingBuffer->Buffer;
+	return (void *)ring_info->ring_buffer->buffer;
 }
 
 
 /*++
 
 Name:
-	GetRingBufferSize()
+	get_ring_buffersize()
 
 Description:
 	Get the size of the ring buffer
 
 --*/
 static inline u32
-GetRingBufferSize(struct hv_ring_buffer_info *RingInfo)
+get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
 {
-	return RingInfo->RingDataSize;
+	return ring_info->ring_datasize;
 }
 
 /*++
 
 Name:
-	GetRingBufferIndices()
+	get_ring_bufferindices()
 
 Description:
 	Get the read and write indices as u64 of the specified ring buffer
 
 --*/
 static inline u64
-GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
+get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
 {
-	return (u64)RingInfo->RingBuffer->WriteIndex << 32;
+	return (u64)ring_info->ring_buffer->write_index << 32;
 }
 
 
 /*++
 
 Name:
-	DumpRingInfo()
+	dump_ring_info()
 
 Description:
 	Dump out to console the ring buffer info
 
 --*/
-void DumpRingInfo(struct hv_ring_buffer_info *RingInfo, char *Prefix)
+void dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix)
 {
-	u32 bytesAvailToWrite;
-	u32 bytesAvailToRead;
+	u32 bytes_avail_towrite;
+	u32 bytes_avail_toread;
 
-	GetRingBufferAvailBytes(RingInfo,
-	&bytesAvailToRead,
-	&bytesAvailToWrite);
+	get_ringbuffer_availbytes(ring_info,
+	&bytes_avail_toread,
+	&bytes_avail_towrite);
 
 	DPRINT(VMBUS,
 		DEBUG_RING_LVL,
 		"%s <<ringinfo %p buffer %p avail write %u "
 		"avail read %u read idx %u write idx %u>>",
-		Prefix,
-		RingInfo,
-		RingInfo->RingBuffer->Buffer,
-		bytesAvailToWrite,
-		bytesAvailToRead,
-		RingInfo->RingBuffer->ReadIndex,
-		RingInfo->RingBuffer->WriteIndex);
+		prefix,
+		ring_info,
+		ring_info->ring_buffer->buffer,
+		bytes_avail_towrite,
+		bytes_avail_toread,
+		ring_info->ring_buffer->read_index,
+		ring_info->ring_buffer->write_index);
 }
 
 
 /* Internal routines */
 
 static u32
-CopyToRingBuffer(
-	struct hv_ring_buffer_info	*RingInfo,
-	u32				StartWriteOffset,
-	void				*Src,
-	u32				SrcLen);
+copyto_ringbuffer(
+	struct hv_ring_buffer_info	*ring_info,
+	u32				start_write_offset,
+	void				*src,
+	u32				srclen);
 
 static u32
-CopyFromRingBuffer(
-	struct hv_ring_buffer_info	*RingInfo,
-	void				*Dest,
-	u32				DestLen,
-	u32				StartReadOffset);
+copyfrom_ringbuffer(
+	struct hv_ring_buffer_info	*ring_info,
+	void				*dest,
+	u32				destlen,
+	u32				start_read_offset);
 
 
 
 /*++
 
 Name:
-	RingBufferGetDebugInfo()
+	ringbuffer_get_debuginfo()
 
 Description:
 	Get various debug metrics for the specified ring buffer
 
 --*/
-void RingBufferGetDebugInfo(struct hv_ring_buffer_info *RingInfo,
+void ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
 			    struct hv_ring_buffer_debug_info *debug_info)
 {
-	u32 bytesAvailToWrite;
-	u32 bytesAvailToRead;
+	u32 bytes_avail_towrite;
+	u32 bytes_avail_toread;
 
-	if (RingInfo->RingBuffer) {
-		GetRingBufferAvailBytes(RingInfo,
-					&bytesAvailToRead,
-					&bytesAvailToWrite);
+	if (ring_info->ring_buffer) {
+		get_ringbuffer_availbytes(ring_info,
+					&bytes_avail_toread,
+					&bytes_avail_towrite);
 
-		debug_info->BytesAvailToRead = bytesAvailToRead;
-		debug_info->BytesAvailToWrite = bytesAvailToWrite;
-		debug_info->CurrentReadIndex = RingInfo->RingBuffer->ReadIndex;
-		debug_info->CurrentWriteIndex = RingInfo->RingBuffer->WriteIndex;
-		debug_info->CurrentInterruptMask = RingInfo->RingBuffer->InterruptMask;
+		debug_info->bytes_avail_toread = bytes_avail_toread;
+		debug_info->bytes_avail_towrite = bytes_avail_towrite;
+		debug_info->current_read_index =
+			ring_info->ring_buffer->read_index;
+		debug_info->current_write_index =
+			ring_info->ring_buffer->write_index;
+		debug_info->current_interrupt_mask =
+			ring_info->ring_buffer->interrupt_mask;
 	}
 }
 
@@ -279,40 +285,42 @@
 /*++
 
 Name:
-	GetRingBufferInterruptMask()
+	get_ringbuffer_interrupt_mask()
 
 Description:
 	Get the interrupt mask for the specified ring buffer
 
 --*/
-u32 GetRingBufferInterruptMask(struct hv_ring_buffer_info *rbi)
+u32 get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
 {
-	return rbi->RingBuffer->InterruptMask;
+	return rbi->ring_buffer->interrupt_mask;
 }
 
 /*++
 
 Name:
-	RingBufferInit()
+	ringbuffer_init()
 
 Description:
 	Initialize the ring buffer
 
 --*/
-int RingBufferInit(struct hv_ring_buffer_info *RingInfo, void *Buffer, u32 BufferLen)
+int ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+		   void *buffer, u32 buflen)
 {
 	if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
 		return -EINVAL;
 
-	memset(RingInfo, 0, sizeof(struct hv_ring_buffer_info));
+	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
 
-	RingInfo->RingBuffer = (struct hv_ring_buffer *)Buffer;
-	RingInfo->RingBuffer->ReadIndex = RingInfo->RingBuffer->WriteIndex = 0;
+	ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
+	ring_info->ring_buffer->read_index =
+		ring_info->ring_buffer->write_index = 0;
 
-	RingInfo->RingSize = BufferLen;
-	RingInfo->RingDataSize = BufferLen - sizeof(struct hv_ring_buffer);
+	ring_info->ring_size = buflen;
+	ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
 
-	spin_lock_init(&RingInfo->ring_lock);
+	spin_lock_init(&ring_info->ring_lock);
 
 	return 0;
 }
@@ -320,97 +328,97 @@
 /*++
 
 Name:
-	RingBufferCleanup()
+	ringbuffer_cleanup()
 
 Description:
 	Cleanup the ring buffer
 
 --*/
-void RingBufferCleanup(struct hv_ring_buffer_info *RingInfo)
+void ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 {
 }
 
 /*++
 
 Name:
-	RingBufferWrite()
+	ringbuffer_write()
 
 Description:
 	Write to the ring buffer
 
 --*/
-int RingBufferWrite(struct hv_ring_buffer_info *OutRingInfo,
+int ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 		    struct scatterlist *sglist, u32 sgcount)
 {
 	int i = 0;
-	u32 byteAvailToWrite;
-	u32 byteAvailToRead;
-	u32 totalBytesToWrite = 0;
+	u32 bytes_avail_towrite;
+	u32 bytes_avail_toread;
+	u32 totalbytes_towrite = 0;
 
 	struct scatterlist *sg;
-	volatile u32 nextWriteLocation;
-	u64 prevIndices = 0;
+	volatile u32 next_write_location;
+	u64 prev_indices = 0;
 	unsigned long flags;
 
 	for_each_sg(sglist, sg, sgcount, i)
 	{
-		totalBytesToWrite += sg->length;
+		totalbytes_towrite += sg->length;
 	}
 
-	totalBytesToWrite += sizeof(u64);
+	totalbytes_towrite += sizeof(u64);
 
-	spin_lock_irqsave(&OutRingInfo->ring_lock, flags);
+	spin_lock_irqsave(&outring_info->ring_lock, flags);
 
-	GetRingBufferAvailBytes(OutRingInfo,
-				&byteAvailToRead,
-				&byteAvailToWrite);
+	get_ringbuffer_availbytes(outring_info,
+				&bytes_avail_toread,
+				&bytes_avail_towrite);
 
-	DPRINT_DBG(VMBUS, "Writing %u bytes...", totalBytesToWrite);
+	DPRINT_DBG(VMBUS, "Writing %u bytes...", totalbytes_towrite);
 
-	/* DumpRingInfo(OutRingInfo, "BEFORE "); */
+	/* Dumpring_info(Outring_info, "BEFORE "); */
 
 	/* If there is only room for the packet, assume it is full. */
 	/* Otherwise, the next time around, we think the ring buffer */
 	/* is empty since the read index == write index */
-	if (byteAvailToWrite <= totalBytesToWrite) {
+	if (bytes_avail_towrite <= totalbytes_towrite) {
 		DPRINT_DBG(VMBUS,
 			"No more space left on outbound ring buffer "
 			"(needed %u, avail %u)",
-			totalBytesToWrite,
-			byteAvailToWrite);
+			totalbytes_towrite,
+			bytes_avail_towrite);
 
-		spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
+		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 		return -1;
 	}
 
 	/* Write to the ring buffer */
-	nextWriteLocation = GetNextWriteLocation(OutRingInfo);
+	next_write_location = get_next_write_location(outring_info);
 
 	for_each_sg(sglist, sg, sgcount, i)
 	{
-		nextWriteLocation = CopyToRingBuffer(OutRingInfo,
-						     nextWriteLocation,
+		next_write_location = copyto_ringbuffer(outring_info,
+						     next_write_location,
 						     sg_virt(sg),
 						     sg->length);
 	}
 
 	/* Set previous packet start */
-	prevIndices = GetRingBufferIndices(OutRingInfo);
+	prev_indices = get_ring_bufferindices(outring_info);
 
-	nextWriteLocation = CopyToRingBuffer(OutRingInfo,
-					     nextWriteLocation,
-					     &prevIndices,
+	next_write_location = copyto_ringbuffer(outring_info,
+					     next_write_location,
+					     &prev_indices,
 					     sizeof(u64));
 
 	/* Make sure we flush all writes before updating the writeIndex */
 	mb();
 
 	/* Now, update the write location */
-	SetNextWriteLocation(OutRingInfo, nextWriteLocation);
+	set_next_write_location(outring_info, next_write_location);
 
-	/* DumpRingInfo(OutRingInfo, "AFTER "); */
+	/* Dumpring_info(Outring_info, "AFTER "); */
 
-	spin_unlock_irqrestore(&OutRingInfo->ring_lock, flags);
+	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 	return 0;
 }
 
@@ -418,47 +426,48 @@
 /*++
 
 Name:
-	RingBufferPeek()
+	ringbuffer_peek()
 
 Description:
 	Read without advancing the read index
 
 --*/
-int RingBufferPeek(struct hv_ring_buffer_info *InRingInfo, void *Buffer, u32 BufferLen)
+int ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
+		   void *Buffer, u32 buflen)
 {
-	u32 bytesAvailToWrite;
-	u32 bytesAvailToRead;
-	u32 nextReadLocation = 0;
+	u32 bytes_avail_towrite;
+	u32 bytes_avail_toread;
+	u32 next_read_location = 0;
 	unsigned long flags;
 
-	spin_lock_irqsave(&InRingInfo->ring_lock, flags);
+	spin_lock_irqsave(&Inring_info->ring_lock, flags);
 
-	GetRingBufferAvailBytes(InRingInfo,
-				&bytesAvailToRead,
-				&bytesAvailToWrite);
+	get_ringbuffer_availbytes(Inring_info,
+				&bytes_avail_toread,
+				&bytes_avail_towrite);
 
 	/* Make sure there is something to read */
-	if (bytesAvailToRead < BufferLen) {
+	if (bytes_avail_toread < buflen) {
 		/* DPRINT_DBG(VMBUS,
 			"got callback but not enough to read "
 			"<avail to read %d read size %d>!!",
-			bytesAvailToRead,
+			bytes_avail_toread,
 			BufferLen); */
 
-		spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+		spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
 
 		return -1;
 	}
 
 	/* Convert to byte offset */
-	nextReadLocation = GetNextReadLocation(InRingInfo);
+	next_read_location = get_next_read_location(Inring_info);
 
-	nextReadLocation = CopyFromRingBuffer(InRingInfo,
+	next_read_location = copyfrom_ringbuffer(Inring_info,
 						Buffer,
-						BufferLen,
-						nextReadLocation);
+						buflen,
+						next_read_location);
 
-	spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+	spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
 
 	return 0;
 }
@@ -467,58 +476,59 @@
 /*++
 
 Name:
-	RingBufferRead()
+	ringbuffer_read()
 
 Description:
 	Read and advance the read index
 
 --*/
-int RingBufferRead(struct hv_ring_buffer_info *InRingInfo, void *Buffer,
-		   u32 BufferLen, u32 Offset)
+int ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+		   u32 buflen, u32 offset)
 {
-	u32 bytesAvailToWrite;
-	u32 bytesAvailToRead;
-	u32 nextReadLocation = 0;
-	u64 prevIndices = 0;
+	u32 bytes_avail_towrite;
+	u32 bytes_avail_toread;
+	u32 next_read_location = 0;
+	u64 prev_indices = 0;
 	unsigned long flags;
 
-	if (BufferLen <= 0)
+	if (buflen <= 0)
 		return -EINVAL;
 
-	spin_lock_irqsave(&InRingInfo->ring_lock, flags);
+	spin_lock_irqsave(&inring_info->ring_lock, flags);
 
-	GetRingBufferAvailBytes(InRingInfo,
-				&bytesAvailToRead,
-				&bytesAvailToWrite);
+	get_ringbuffer_availbytes(inring_info,
+				&bytes_avail_toread,
+				&bytes_avail_towrite);
 
-	DPRINT_DBG(VMBUS, "Reading %u bytes...", BufferLen);
+	DPRINT_DBG(VMBUS, "Reading %u bytes...", buflen);
 
-	/* DumpRingInfo(InRingInfo, "BEFORE "); */
+	/* Dumpring_info(Inring_info, "BEFORE "); */
 
 	/* Make sure there is something to read */
-	if (bytesAvailToRead < BufferLen) {
+	if (bytes_avail_toread < buflen) {
 		DPRINT_DBG(VMBUS,
 			"got callback but not enough to read "
 			"<avail to read %d read size %d>!!",
-			bytesAvailToRead,
-			BufferLen);
+			bytes_avail_toread,
+			buflen);
 
-		spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 
 		return -1;
 	}
 
-	nextReadLocation = GetNextReadLocationWithOffset(InRingInfo, Offset);
+	next_read_location =
+		get_next_readlocation_withoffset(inring_info, offset);
 
-	nextReadLocation = CopyFromRingBuffer(InRingInfo,
-						Buffer,
-						BufferLen,
-						nextReadLocation);
+	next_read_location = copyfrom_ringbuffer(inring_info,
+						buffer,
+						buflen,
+						next_read_location);
 
-	nextReadLocation = CopyFromRingBuffer(InRingInfo,
-						&prevIndices,
+	next_read_location = copyfrom_ringbuffer(inring_info,
+						&prev_indices,
 						sizeof(u64),
-						nextReadLocation);
+						next_read_location);
 
 	/* Make sure all reads are done before we update the read index since */
 	/* the writer may start writing to the read area once the read index */
@@ -526,11 +536,11 @@
 	mb();
 
 	/* Update the read index */
-	SetNextReadLocation(InRingInfo, nextReadLocation);
+	set_next_read_location(inring_info, next_read_location);
 
-	/* DumpRingInfo(InRingInfo, "AFTER "); */
+	/* Dumpring_info(Inring_info, "AFTER "); */
 
-	spin_unlock_irqrestore(&InRingInfo->ring_lock, flags);
+	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 
 	return 0;
 }
@@ -539,7 +549,7 @@
 /*++
 
 Name:
-	CopyToRingBuffer()
+	copyto_ringbuffer()
 
 Description:
 	Helper routine to copy from source to ring buffer.
@@ -547,37 +557,37 @@
 
 --*/
 static u32
-CopyToRingBuffer(
-	struct hv_ring_buffer_info	*RingInfo,
-	u32				StartWriteOffset,
-	void				*Src,
-	u32				SrcLen)
+copyto_ringbuffer(
+	struct hv_ring_buffer_info	*ring_info,
+	u32				start_write_offset,
+	void				*src,
+	u32				srclen)
 {
-	void *ringBuffer = GetRingBuffer(RingInfo);
-	u32 ringBufferSize = GetRingBufferSize(RingInfo);
-	u32 fragLen;
+	void *ring_buffer = get_ring_buffer(ring_info);
+	u32 ring_buffer_size = get_ring_buffersize(ring_info);
+	u32 frag_len;
 
 	/* wrap-around detected! */
-	if (SrcLen > ringBufferSize - StartWriteOffset) {
+	if (srclen > ring_buffer_size - start_write_offset) {
 		DPRINT_DBG(VMBUS, "wrap-around detected!");
 
-		fragLen = ringBufferSize - StartWriteOffset;
-		memcpy(ringBuffer + StartWriteOffset, Src, fragLen);
-		memcpy(ringBuffer, Src + fragLen, SrcLen - fragLen);
+		frag_len = ring_buffer_size - start_write_offset;
+		memcpy(ring_buffer + start_write_offset, src, frag_len);
+		memcpy(ring_buffer, src + frag_len, srclen - frag_len);
 	} else
-		memcpy(ringBuffer + StartWriteOffset, Src, SrcLen);
+		memcpy(ring_buffer + start_write_offset, src, srclen);
 
-	StartWriteOffset += SrcLen;
-	StartWriteOffset %= ringBufferSize;
+	start_write_offset += srclen;
+	start_write_offset %= ring_buffer_size;
 
-	return StartWriteOffset;
+	return start_write_offset;
 }
 
 
 /*++
 
 Name:
-	CopyFromRingBuffer()
+	copyfrom_ringbuffer()
 
 Description:
 	Helper routine to copy to source from ring buffer.
@@ -585,34 +595,34 @@
 
 --*/
 static u32
-CopyFromRingBuffer(
-	struct hv_ring_buffer_info	*RingInfo,
-	void				*Dest,
-	u32				DestLen,
-	u32				StartReadOffset)
+copyfrom_ringbuffer(
+	struct hv_ring_buffer_info	*ring_info,
+	void				*dest,
+	u32				destlen,
+	u32				start_read_offset)
 {
-	void *ringBuffer = GetRingBuffer(RingInfo);
-	u32 ringBufferSize = GetRingBufferSize(RingInfo);
+	void *ring_buffer = get_ring_buffer(ring_info);
+	u32 ring_buffer_size = get_ring_buffersize(ring_info);
 
-	u32 fragLen;
+	u32 frag_len;
 
 	/* wrap-around detected at the src */
-	if (DestLen > ringBufferSize - StartReadOffset) {
+	if (destlen > ring_buffer_size - start_read_offset) {
 		DPRINT_DBG(VMBUS, "src wrap-around detected!");
 
-		fragLen = ringBufferSize - StartReadOffset;
+		frag_len = ring_buffer_size - start_read_offset;
 
-		memcpy(Dest, ringBuffer + StartReadOffset, fragLen);
-		memcpy(Dest + fragLen, ringBuffer, DestLen - fragLen);
+		memcpy(dest, ring_buffer + start_read_offset, frag_len);
+		memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
 	} else
 
-		memcpy(Dest, ringBuffer + StartReadOffset, DestLen);
+		memcpy(dest, ring_buffer + start_read_offset, destlen);
 
 
-	StartReadOffset += DestLen;
-	StartReadOffset %= ringBufferSize;
+	start_read_offset += destlen;
+	start_read_offset %= ring_buffer_size;
 
-	return StartReadOffset;
+	return start_read_offset;
 }
 
 
diff --git a/drivers/staging/hv/ring_buffer.h b/drivers/staging/hv/ring_buffer.h
index a7f1717..7bd6ecf 100644
--- a/drivers/staging/hv/ring_buffer.h
+++ b/drivers/staging/hv/ring_buffer.h
@@ -29,18 +29,18 @@
 
 struct hv_ring_buffer {
 	/* Offset in bytes from the start of ring data below */
-	volatile u32 WriteIndex;
+	volatile u32 write_index;
 
 	/* Offset in bytes from the start of ring data below */
-	volatile u32 ReadIndex;
+	volatile u32 read_index;
 
-	volatile u32 InterruptMask;
+	volatile u32 interrupt_mask;
 
 	/* Pad it to PAGE_SIZE so that data starts on page boundary */
-	u8	Reserved[4084];
+	u8	reserved[4084];
 
 	/* NOTE:
-	 * The InterruptMask field is used only for channels but since our
+	 * The interrupt_mask field is used only for channels but since our
 	 * vmbus connection also uses this data structure and its data starts
 	 * here, we commented out this field.
 	 */
@@ -50,24 +50,24 @@
 	 * Ring data starts here + RingDataStartOffset
 	 * !!! DO NOT place any fields below this !!!
 	 */
-	u8 Buffer[0];
+	u8 buffer[0];
 } __attribute__((packed));
 
 struct hv_ring_buffer_info {
-	struct hv_ring_buffer *RingBuffer;
-	u32 RingSize;			/* Include the shared header */
+	struct hv_ring_buffer *ring_buffer;
+	u32 ring_size;			/* Include the shared header */
 	spinlock_t ring_lock;
 
-	u32 RingDataSize;		/* < ringSize */
-	u32 RingDataStartOffset;
+	u32 ring_datasize;		/* < ring_size */
+	u32 ring_data_startoffset;
 };
 
 struct hv_ring_buffer_debug_info {
-	u32 CurrentInterruptMask;
-	u32 CurrentReadIndex;
-	u32 CurrentWriteIndex;
-	u32 BytesAvailToRead;
-	u32 BytesAvailToWrite;
+	u32 current_interrupt_mask;
+	u32 current_read_index;
+	u32 current_write_index;
+	u32 bytes_avail_toread;
+	u32 bytes_avail_towrite;
 };
 
 
@@ -75,28 +75,28 @@
 /* Interface */
 
 
-int RingBufferInit(struct hv_ring_buffer_info *RingInfo, void *Buffer,
-		   u32 BufferLen);
+int ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer,
+		   u32 buflen);
 
-void RingBufferCleanup(struct hv_ring_buffer_info *RingInfo);
+void ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
-int RingBufferWrite(struct hv_ring_buffer_info *RingInfo,
+int ringbuffer_write(struct hv_ring_buffer_info *ring_info,
 		    struct scatterlist *sglist,
 		    u32 sgcount);
 
-int RingBufferPeek(struct hv_ring_buffer_info *RingInfo, void *Buffer,
-		   u32 BufferLen);
+int ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
+		   u32 buflen);
 
-int RingBufferRead(struct hv_ring_buffer_info *RingInfo,
-		   void *Buffer,
-		   u32 BufferLen,
-		   u32 Offset);
+int ringbuffer_read(struct hv_ring_buffer_info *ring_info,
+		   void *buffer,
+		   u32 buflen,
+		   u32 offset);
 
-u32 GetRingBufferInterruptMask(struct hv_ring_buffer_info *RingInfo);
+u32 get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
 
-void DumpRingInfo(struct hv_ring_buffer_info *RingInfo, char *Prefix);
+void dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix);
 
-void RingBufferGetDebugInfo(struct hv_ring_buffer_info *RingInfo,
+void ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
 			    struct hv_ring_buffer_debug_info *debug_info);
 
 #endif /* _RING_BUFFER_H_ */
diff --git a/drivers/staging/hv/rndis.h b/drivers/staging/hv/rndis.h
index 723e1f1..014de04 100644
--- a/drivers/staging/hv/rndis.h
+++ b/drivers/staging/hv/rndis.h
@@ -288,24 +288,24 @@
 #define RNDIS_DF_RAW_DATA			0x00000004
 
 /*  Remote NDIS medium types. */
-#define RNdisMedium802_3			0x00000000
-#define RNdisMedium802_5			0x00000001
-#define RNdisMediumFddi				0x00000002
-#define RNdisMediumWan				0x00000003
-#define RNdisMediumLocalTalk			0x00000004
-#define RNdisMediumArcnetRaw			0x00000006
-#define RNdisMediumArcnet878_2			0x00000007
-#define RNdisMediumAtm				0x00000008
-#define RNdisMediumWirelessWan			0x00000009
-#define RNdisMediumIrda				0x0000000a
-#define RNdisMediumCoWan			0x0000000b
+#define RNDIS_MEDIUM_802_3			0x00000000
+#define RNDIS_MEDIUM_802_5			0x00000001
+#define RNDIS_MEDIUM_FDDI				0x00000002
+#define RNDIS_MEDIUM_WAN				0x00000003
+#define RNDIS_MEDIUM_LOCAL_TALK			0x00000004
+#define RNDIS_MEDIUM_ARCNET_RAW			0x00000006
+#define RNDIS_MEDIUM_ARCNET_878_2			0x00000007
+#define RNDIS_MEDIUM_ATM				0x00000008
+#define RNDIS_MEDIUM_WIRELESS_WAN			0x00000009
+#define RNDIS_MEDIUM_IRDA				0x0000000a
+#define RNDIS_MEDIUM_CO_WAN			0x0000000b
 /* Not a real medium, defined as an upper-bound */
-#define RNdisMediumMax				0x0000000d
+#define RNDIS_MEDIUM_MAX				0x0000000d
 
 
 /* Remote NDIS medium connection states. */
-#define RNdisMediaStateConnected		0x00000000
-#define RNdisMediaStateDisconnected		0x00000001
+#define RNDIS_MEDIA_STATE_CONNECTED		0x00000000
+#define RNDIS_MEDIA_STATE_DISCONNECTED		0x00000001
 
 /*  Remote NDIS version numbers */
 #define RNDIS_MAJOR_VERSION			0x00000001
@@ -314,106 +314,106 @@
 
 /* NdisInitialize message */
 struct rndis_initialize_request {
-	u32 RequestId;
-	u32 MajorVersion;
-	u32 MinorVersion;
-	u32 MaxTransferSize;
+	u32 req_id;
+	u32 major_ver;
+	u32 minor_ver;
+	u32 max_xfer_size;
 };
 
 /* Response to NdisInitialize */
 struct rndis_initialize_complete {
-	u32 RequestId;
-	u32 Status;
-	u32 MajorVersion;
-	u32 MinorVersion;
-	u32 DeviceFlags;
-	u32 Medium;
-	u32 MaxPacketsPerMessage;
-	u32 MaxTransferSize;
-	u32 PacketAlignmentFactor;
-	u32 AFListOffset;
-	u32 AFListSize;
+	u32 req_id;
+	u32 status;
+	u32 major_ver;
+	u32 minor_ver;
+	u32 dev_flags;
+	u32 medium;
+	u32 max_pkt_per_msg;
+	u32 max_xfer_size;
+	u32 pkt_alignment_factor;
+	u32 af_list_offset;
+	u32 af_list_size;
 };
 
 /* Call manager devices only: Information about an address family */
 /* supported by the device is appended to the response to NdisInitialize. */
 struct rndis_co_address_family {
-	u32 AddressFamily;
-	u32 MajorVersion;
-	u32 MinorVersion;
+	u32 address_family;
+	u32 major_ver;
+	u32 minor_ver;
 };
 
 /* NdisHalt message */
 struct rndis_halt_request {
-	u32 RequestId;
+	u32 req_id;
 };
 
 /* NdisQueryRequest message */
 struct rndis_query_request {
-	u32 RequestId;
-	u32 Oid;
-	u32 InformationBufferLength;
-	u32 InformationBufferOffset;
-	u32 DeviceVcHandle;
+	u32 req_id;
+	u32 oid;
+	u32 info_buflen;
+	u32 info_buf_offset;
+	u32 dev_vc_handle;
 };
 
 /* Response to NdisQueryRequest */
 struct rndis_query_complete {
-	u32 RequestId;
-	u32 Status;
-	u32 InformationBufferLength;
-	u32 InformationBufferOffset;
+	u32 req_id;
+	u32 status;
+	u32 info_buflen;
+	u32 info_buf_offset;
 };
 
 /* NdisSetRequest message */
 struct rndis_set_request {
-	u32 RequestId;
-	u32 Oid;
-	u32 InformationBufferLength;
-	u32 InformationBufferOffset;
-	u32 DeviceVcHandle;
+	u32 req_id;
+	u32 oid;
+	u32 info_buflen;
+	u32 info_buf_offset;
+	u32 dev_vc_handle;
 };
 
 /* Response to NdisSetRequest */
 struct rndis_set_complete {
-	u32 RequestId;
-	u32 Status;
+	u32 req_id;
+	u32 status;
 };
 
 /* NdisReset message */
 struct rndis_reset_request {
-	u32 Reserved;
+	u32 reserved;
 };
 
 /* Response to NdisReset */
 struct rndis_reset_complete {
-	u32 Status;
-	u32 AddressingReset;
+	u32 status;
+	u32 addressing_reset;
 };
 
 /* NdisMIndicateStatus message */
 struct rndis_indicate_status {
-	u32 Status;
-	u32 StatusBufferLength;
-	u32 StatusBufferOffset;
+	u32 status;
+	u32 status_buflen;
+	u32 status_buf_offset;
 };
 
 /* Diagnostic information passed as the status buffer in */
 /* struct rndis_indicate_status messages signifying error conditions. */
 struct rndis_diagnostic_info {
-	u32 DiagStatus;
-	u32 ErrorOffset;
+	u32 diag_status;
+	u32 error_offset;
 };
 
 /* NdisKeepAlive message */
 struct rndis_keepalive_request {
-	u32 RequestId;
+	u32 req_id;
 };
 
 /* Response to NdisKeepAlive */
 struct rndis_keepalive_complete {
-	u32 RequestId;
-	u32 Status;
+	u32 req_id;
+	u32 status;
 };
 
 /*
@@ -422,39 +422,39 @@
  * to 0 for connectionless data, otherwise it contains the VC handle.
  */
 struct rndis_packet {
-	u32 DataOffset;
-	u32 DataLength;
-	u32 OOBDataOffset;
-	u32 OOBDataLength;
-	u32 NumOOBDataElements;
-	u32 PerPacketInfoOffset;
-	u32 PerPacketInfoLength;
-	u32 VcHandle;
-	u32 Reserved;
+	u32 data_offset;
+	u32 data_len;
+	u32 oob_data_offset;
+	u32 oob_data_len;
+	u32 num_oob_data_elements;
+	u32 per_pkt_info_offset;
+	u32 per_pkt_info_len;
+	u32 vc_handle;
+	u32 reserved;
 };
 
 /* Optional Out of Band data associated with a Data message. */
 struct rndis_oobd {
-	u32 Size;
-	u32 Type;
-	u32 ClassInformationOffset;
+	u32 size;
+	u32 type;
+	u32 class_info_offset;
 };
 
 /* Packet extension field contents associated with a Data message. */
 struct rndis_per_packet_info {
-	u32 Size;
-	u32 Type;
-	u32 PerPacketInformationOffset;
+	u32 size;
+	u32 type;
+	u32 per_pkt_info_offset;
 };
 
 /* Format of Information buffer passed in a SetRequest for the OID */
 /* OID_GEN_RNDIS_CONFIG_PARAMETER. */
 struct rndis_config_parameter_info {
-	u32 ParameterNameOffset;
-	u32 ParameterNameLength;
-	u32 ParameterType;
-	u32 ParameterValueOffset;
-	u32 ParameterValueLength;
+	u32 parameter_name_offset;
+	u32 parameter_name_length;
+	u32 parameter_type;
+	u32 parameter_value_offset;
+	u32 parameter_value_length;
 };
 
 /* Values for ParameterType in struct rndis_config_parameter_info */
@@ -466,187 +466,188 @@
 
 /* CoNdisMiniportCreateVc message */
 struct rcondis_mp_create_vc {
-	u32 RequestId;
-	u32 NdisVcHandle;
+	u32 req_id;
+	u32 ndis_vc_handle;
 };
 
 /* Response to CoNdisMiniportCreateVc */
 struct rcondis_mp_create_vc_complete {
-	u32 RequestId;
-	u32 DeviceVcHandle;
-	u32 Status;
+	u32 req_id;
+	u32 dev_vc_handle;
+	u32 status;
 };
 
 /* CoNdisMiniportDeleteVc message */
 struct rcondis_mp_delete_vc {
-	u32 RequestId;
-	u32 DeviceVcHandle;
+	u32 req_id;
+	u32 dev_vc_handle;
 };
 
 /* Response to CoNdisMiniportDeleteVc */
 struct rcondis_mp_delete_vc_complete {
-	u32 RequestId;
-	u32 Status;
+	u32 req_id;
+	u32 status;
 };
 
 /* CoNdisMiniportQueryRequest message */
 struct rcondis_mp_query_request {
-	u32 RequestId;
-	u32 RequestType;
-	u32 Oid;
-	u32 DeviceVcHandle;
-	u32 InformationBufferLength;
-	u32 InformationBufferOffset;
+	u32 req_id;
+	u32 request_type;
+	u32 oid;
+	u32 dev_vc_handle;
+	u32 info_buflen;
+	u32 info_buf_offset;
 };
 
 /* CoNdisMiniportSetRequest message */
 struct rcondis_mp_set_request {
-	u32 RequestId;
-	u32 RequestType;
-	u32 Oid;
-	u32 DeviceVcHandle;
-	u32 InformationBufferLength;
-	u32 InformationBufferOffset;
+	u32 req_id;
+	u32 request_type;
+	u32 oid;
+	u32 dev_vc_handle;
+	u32 info_buflen;
+	u32 info_buf_offset;
 };
 
 /* CoNdisIndicateStatus message */
 struct rcondis_indicate_status {
-	u32 NdisVcHandle;
-	u32 Status;
-	u32 StatusBufferLength;
-	u32 StatusBufferOffset;
+	u32 ndis_vc_handle;
+	u32 status;
+	u32 status_buflen;
+	u32 status_buf_offset;
 };
 
 /* CONDIS Call/VC parameters */
 struct rcondis_specific_parameters {
-	u32 ParameterType;
-	u32 ParameterLength;
-	u32 ParameterOffset;
+	u32 parameter_type;
+	u32 parameter_length;
+	u32 parameter_lffset;
 };
 
 struct rcondis_media_parameters {
-	u32 Flags;
-	u32 Reserved1;
-	u32 Reserved2;
-	struct rcondis_specific_parameters MediaSpecific;
+	u32 flags;
+	u32 reserved1;
+	u32 reserved2;
+	struct rcondis_specific_parameters media_specific;
 };
 
 struct rndis_flowspec {
-	u32 TokenRate;
-	u32 TokenBucketSize;
-	u32 PeakBandwidth;
-	u32 Latency;
-	u32 DelayVariation;
-	u32 ServiceType;
-	u32 MaxSduSize;
-	u32 MinimumPolicedSize;
+	u32 token_rate;
+	u32 token_bucket_size;
+	u32 peak_bandwidth;
+	u32 latency;
+	u32 delay_variation;
+	u32 service_type;
+	u32 max_sdu_size;
+	u32 minimum_policed_size;
 };
 
 struct rcondis_call_manager_parameters {
-	struct rndis_flowspec Transmit;
-	struct rndis_flowspec Receive;
-	struct rcondis_specific_parameters CallMgrSpecific;
+	struct rndis_flowspec transmit;
+	struct rndis_flowspec receive;
+	struct rcondis_specific_parameters call_mgr_specific;
 };
 
 /* CoNdisMiniportActivateVc message */
 struct rcondis_mp_activate_vc_request {
-	u32 RequestId;
-	u32 Flags;
-	u32 DeviceVcHandle;
-	u32 MediaParamsOffset;
-	u32 MediaParamsLength;
-	u32 CallMgrParamsOffset;
-	u32 CallMgrParamsLength;
+	u32 req_id;
+	u32 flags;
+	u32 dev_vc_handle;
+	u32 media_params_offset;
+	u32 media_params_length;
+	u32 call_mgr_params_offset;
+	u32 call_mgr_params_length;
 };
 
 /* Response to CoNdisMiniportActivateVc */
 struct rcondis_mp_activate_vc_complete {
-	u32 RequestId;
-	u32 Status;
+	u32 req_id;
+	u32 status;
 };
 
 /* CoNdisMiniportDeactivateVc message */
 struct rcondis_mp_deactivate_vc_request {
-	u32 RequestId;
-	u32 Flags;
-	u32 DeviceVcHandle;
+	u32 req_id;
+	u32 flags;
+	u32 dev_vc_handle;
 };
 
 /* Response to CoNdisMiniportDeactivateVc */
 struct rcondis_mp_deactivate_vc_complete {
-	u32 RequestId;
-	u32 Status;
+	u32 req_id;
+	u32 status;
 };
 
 
 /* union with all of the RNDIS messages */
 union rndis_message_container {
-	struct rndis_packet Packet;
-	struct rndis_initialize_request InitializeRequest;
-	struct rndis_halt_request HaltRequest;
-	struct rndis_query_request QueryRequest;
-	struct rndis_set_request SetRequest;
-	struct rndis_reset_request ResetRequest;
-	struct rndis_keepalive_request KeepaliveRequest;
-	struct rndis_indicate_status IndicateStatus;
-	struct rndis_initialize_complete InitializeComplete;
-	struct rndis_query_complete QueryComplete;
-	struct rndis_set_complete SetComplete;
-	struct rndis_reset_complete ResetComplete;
-	struct rndis_keepalive_complete KeepaliveComplete;
-	struct rcondis_mp_create_vc CoMiniportCreateVc;
-	struct rcondis_mp_delete_vc CoMiniportDeleteVc;
-	struct rcondis_indicate_status CoIndicateStatus;
-	struct rcondis_mp_activate_vc_request CoMiniportActivateVc;
-	struct rcondis_mp_deactivate_vc_request CoMiniportDeactivateVc;
-	struct rcondis_mp_create_vc_complete CoMiniportCreateVcComplete;
-	struct rcondis_mp_delete_vc_complete CoMiniportDeleteVcComplete;
-	struct rcondis_mp_activate_vc_complete CoMiniportActivateVcComplete;
-	struct rcondis_mp_deactivate_vc_complete CoMiniportDeactivateVcComplete;
+	struct rndis_packet pkt;
+	struct rndis_initialize_request init_req;
+	struct rndis_halt_request halt_req;
+	struct rndis_query_request query_req;
+	struct rndis_set_request set_req;
+	struct rndis_reset_request reset_req;
+	struct rndis_keepalive_request keep_alive_req;
+	struct rndis_indicate_status indicate_status;
+	struct rndis_initialize_complete init_complete;
+	struct rndis_query_complete query_complete;
+	struct rndis_set_complete set_complete;
+	struct rndis_reset_complete reset_complete;
+	struct rndis_keepalive_complete keep_alive_complete;
+	struct rcondis_mp_create_vc co_miniport_create_vc;
+	struct rcondis_mp_delete_vc co_miniport_delete_vc;
+	struct rcondis_indicate_status co_indicate_status;
+	struct rcondis_mp_activate_vc_request co_miniport_activate_vc;
+	struct rcondis_mp_deactivate_vc_request co_miniport_deactivate_vc;
+	struct rcondis_mp_create_vc_complete co_miniport_create_vc_complete;
+	struct rcondis_mp_delete_vc_complete co_miniport_delete_vc_complete;
+	struct rcondis_mp_activate_vc_complete co_miniport_activate_vc_complete;
+	struct rcondis_mp_deactivate_vc_complete
+		co_miniport_deactivate_vc_complete;
 };
 
 /* Remote NDIS message format */
 struct rndis_message {
-	u32 NdisMessageType;
+	u32 ndis_msg_type;
 
 	/* Total length of this message, from the beginning */
 	/* of the sruct rndis_message, in bytes. */
-	u32 MessageLength;
+	u32 msg_len;
 
 	/* Actual message */
-	union rndis_message_container Message;
+	union rndis_message_container msg;
 };
 
 /* Handy macros */
 
 /* get the size of an RNDIS message. Pass in the message type, */
 /* struct rndis_set_request, struct rndis_packet for example */
-#define RNDIS_MESSAGE_SIZE(Message)				\
-	(sizeof(Message) + (sizeof(struct rndis_message) -	\
+#define RNDIS_MESSAGE_SIZE(msg)				\
+	(sizeof(msg) + (sizeof(struct rndis_message) -	\
 	 sizeof(union rndis_message_container)))
 
 /* get pointer to info buffer with message pointer */
-#define MESSAGE_TO_INFO_BUFFER(Message)				\
-	(((unsigned char *)(Message)) + Message->InformationBufferOffset)
+#define MESSAGE_TO_INFO_BUFFER(msg)				\
+	(((unsigned char *)(msg)) + msg->info_buf_offset)
 
 /* get pointer to status buffer with message pointer */
-#define MESSAGE_TO_STATUS_BUFFER(Message)			\
-	(((unsigned char *)(Message)) + Message->StatusBufferOffset)
+#define MESSAGE_TO_STATUS_BUFFER(msg)			\
+	(((unsigned char *)(msg)) + msg->status_buf_offset)
 
 /* get pointer to OOBD buffer with message pointer */
-#define MESSAGE_TO_OOBD_BUFFER(Message)				\
-	(((unsigned char *)(Message)) + Message->OOBDataOffset)
+#define MESSAGE_TO_OOBD_BUFFER(msg)				\
+	(((unsigned char *)(msg)) + msg->oob_data_offset)
 
 /* get pointer to data buffer with message pointer */
-#define MESSAGE_TO_DATA_BUFFER(Message)				\
-	(((unsigned char *)(Message)) + Message->PerPacketInfoOffset)
+#define MESSAGE_TO_DATA_BUFFER(msg)				\
+	(((unsigned char *)(msg)) + msg->per_pkt_info_offset)
 
 /* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(RndisMessage)		\
-	((void *) &RndisMessage->Message)
+#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg)		\
+	((void *) &rndis_msg->msg)
 
 /* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(RndisMessage)	\
-	((void *) RndisMessage)
+#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg)	\
+	((void *) rndis_msg)
 
 #endif /* _RNDIS_H_ */
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
index fa2141f..53676dc 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/staging/hv/rndis_filter.c
@@ -32,7 +32,7 @@
 /* Data types */
 struct rndis_filter_driver_object {
 	/* The original driver */
-	struct netvsc_driver InnerDriver;
+	struct netvsc_driver inner_drv;
 };
 
 enum rndis_device_state {
@@ -43,63 +43,63 @@
 };
 
 struct rndis_device {
-	struct netvsc_device *NetDevice;
+	struct netvsc_device *net_dev;
 
-	enum rndis_device_state State;
-	u32 LinkStatus;
-	atomic_t NewRequestId;
+	enum rndis_device_state state;
+	u32 link_stat;
+	atomic_t new_req_id;
 
 	spinlock_t request_lock;
-	struct list_head RequestList;
+	struct list_head req_list;
 
-	unsigned char HwMacAddr[ETH_ALEN];
+	unsigned char hw_mac_adr[ETH_ALEN];
 };
 
 struct rndis_request {
-	struct list_head ListEntry;
-	struct osd_waitevent *WaitEvent;
+	struct list_head list_ent;
+	struct osd_waitevent *waitevent;
 
 	/*
 	 * FIXME: We assumed a fixed size response here. If we do ever need to
 	 * handle a bigger response, we can either define a max response
 	 * message or add a response buffer variable above this field
 	 */
-	struct rndis_message ResponseMessage;
+	struct rndis_message response_msg;
 
 	/* Simplify allocation by having a netvsc packet inline */
-	struct hv_netvsc_packet	Packet;
-	struct hv_page_buffer Buffer;
+	struct hv_netvsc_packet	pkt;
+	struct hv_page_buffer buf;
 	/* FIXME: We assumed a fixed size request here. */
-	struct rndis_message RequestMessage;
+	struct rndis_message request_msg;
 };
 
 
 struct rndis_filter_packet {
-	void *CompletionContext;
-	void (*OnCompletion)(void *context);
-	struct rndis_message Message;
+	void *completion_ctx;
+	void (*completion)(void *context);
+	struct rndis_message msg;
 };
 
 
-static int RndisFilterOnDeviceAdd(struct hv_device *Device,
-				  void *AdditionalInfo);
+static int rndis_filte_device_add(struct hv_device *dev,
+				  void *additional_info);
 
-static int RndisFilterOnDeviceRemove(struct hv_device *Device);
+static int rndis_filter_device_remove(struct hv_device *dev);
 
-static void RndisFilterOnCleanup(struct hv_driver *Driver);
+static void rndis_filter_cleanup(struct hv_driver *drv);
 
-static int RndisFilterOnSend(struct hv_device *Device,
-			     struct hv_netvsc_packet *Packet);
+static int rndis_filter_send(struct hv_device *dev,
+			     struct hv_netvsc_packet *pkt);
 
-static void RndisFilterOnSendCompletion(void *Context);
+static void rndis_filter_send_completion(void *ctx);
 
-static void RndisFilterOnSendRequestCompletion(void *Context);
+static void rndis_filter_send_request_completion(void *ctx);
 
 
 /* The one and only */
-static struct rndis_filter_driver_object gRndisFilter;
+static struct rndis_filter_driver_object rndis_filter;
 
-static struct rndis_device *GetRndisDevice(void)
+static struct rndis_device *get_rndis_device(void)
 {
 	struct rndis_device *device;
 
@@ -109,19 +109,19 @@
 
 	spin_lock_init(&device->request_lock);
 
-	INIT_LIST_HEAD(&device->RequestList);
+	INIT_LIST_HEAD(&device->req_list);
 
-	device->State = RNDIS_DEV_UNINITIALIZED;
+	device->state = RNDIS_DEV_UNINITIALIZED;
 
 	return device;
 }
 
-static struct rndis_request *GetRndisRequest(struct rndis_device *Device,
-					     u32 MessageType,
-					     u32 MessageLength)
+static struct rndis_request *get_rndis_request(struct rndis_device *dev,
+					     u32 msg_type,
+					     u32 msg_len)
 {
 	struct rndis_request *request;
-	struct rndis_message *rndisMessage;
+	struct rndis_message *rndis_msg;
 	struct rndis_set_request *set;
 	unsigned long flags;
 
@@ -129,61 +129,61 @@
 	if (!request)
 		return NULL;
 
-	request->WaitEvent = osd_WaitEventCreate();
-	if (!request->WaitEvent) {
+	request->waitevent = osd_waitevent_create();
+	if (!request->waitevent) {
 		kfree(request);
 		return NULL;
 	}
 
-	rndisMessage = &request->RequestMessage;
-	rndisMessage->NdisMessageType = MessageType;
-	rndisMessage->MessageLength = MessageLength;
+	rndis_msg = &request->request_msg;
+	rndis_msg->ndis_msg_type = msg_type;
+	rndis_msg->msg_len = msg_len;
 
 	/*
 	 * Set the request id. This field is always after the rndis header for
 	 * request/response packet types so we just used the SetRequest as a
 	 * template
 	 */
-	set = &rndisMessage->Message.SetRequest;
-	set->RequestId = atomic_inc_return(&Device->NewRequestId);
+	set = &rndis_msg->msg.set_req;
+	set->req_id = atomic_inc_return(&dev->new_req_id);
 
 	/* Add to the request list */
-	spin_lock_irqsave(&Device->request_lock, flags);
-	list_add_tail(&request->ListEntry, &Device->RequestList);
-	spin_unlock_irqrestore(&Device->request_lock, flags);
+	spin_lock_irqsave(&dev->request_lock, flags);
+	list_add_tail(&request->list_ent, &dev->req_list);
+	spin_unlock_irqrestore(&dev->request_lock, flags);
 
 	return request;
 }
 
-static void PutRndisRequest(struct rndis_device *Device,
-			    struct rndis_request *Request)
+static void put_rndis_request(struct rndis_device *dev,
+			    struct rndis_request *req)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&Device->request_lock, flags);
-	list_del(&Request->ListEntry);
-	spin_unlock_irqrestore(&Device->request_lock, flags);
+	spin_lock_irqsave(&dev->request_lock, flags);
+	list_del(&req->list_ent);
+	spin_unlock_irqrestore(&dev->request_lock, flags);
 
-	kfree(Request->WaitEvent);
-	kfree(Request);
+	kfree(req->waitevent);
+	kfree(req);
 }
 
-static void DumpRndisMessage(struct rndis_message *RndisMessage)
+static void dump_rndis_message(struct rndis_message *rndis_msg)
 {
-	switch (RndisMessage->NdisMessageType) {
+	switch (rndis_msg->ndis_msg_type) {
 	case REMOTE_NDIS_PACKET_MSG:
 		DPRINT_DBG(NETVSC, "REMOTE_NDIS_PACKET_MSG (len %u, "
 			   "data offset %u data len %u, # oob %u, "
 			   "oob offset %u, oob len %u, pkt offset %u, "
 			   "pkt len %u",
-			   RndisMessage->MessageLength,
-			   RndisMessage->Message.Packet.DataOffset,
-			   RndisMessage->Message.Packet.DataLength,
-			   RndisMessage->Message.Packet.NumOOBDataElements,
-			   RndisMessage->Message.Packet.OOBDataOffset,
-			   RndisMessage->Message.Packet.OOBDataLength,
-			   RndisMessage->Message.Packet.PerPacketInfoOffset,
-			   RndisMessage->Message.Packet.PerPacketInfoLength);
+			   rndis_msg->msg_len,
+			   rndis_msg->msg.pkt.data_offset,
+			   rndis_msg->msg.pkt.data_len,
+			   rndis_msg->msg.pkt.num_oob_data_elements,
+			   rndis_msg->msg.pkt.oob_data_offset,
+			   rndis_msg->msg.pkt.oob_data_len,
+			   rndis_msg->msg.pkt.per_pkt_info_offset,
+			   rndis_msg->msg.pkt.per_pkt_info_len);
 		break;
 
 	case REMOTE_NDIS_INITIALIZE_CMPLT:
@@ -191,147 +191,157 @@
 			"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
 			"device flags %d, max xfer size 0x%x, max pkts %u, "
 			"pkt aligned %u)",
-			RndisMessage->MessageLength,
-			RndisMessage->Message.InitializeComplete.RequestId,
-			RndisMessage->Message.InitializeComplete.Status,
-			RndisMessage->Message.InitializeComplete.MajorVersion,
-			RndisMessage->Message.InitializeComplete.MinorVersion,
-			RndisMessage->Message.InitializeComplete.DeviceFlags,
-			RndisMessage->Message.InitializeComplete.MaxTransferSize,
-			RndisMessage->Message.InitializeComplete.MaxPacketsPerMessage,
-			RndisMessage->Message.InitializeComplete.PacketAlignmentFactor);
+			rndis_msg->msg_len,
+			rndis_msg->msg.init_complete.req_id,
+			rndis_msg->msg.init_complete.status,
+			rndis_msg->msg.init_complete.major_ver,
+			rndis_msg->msg.init_complete.minor_ver,
+			rndis_msg->msg.init_complete.dev_flags,
+			rndis_msg->msg.init_complete.max_xfer_size,
+			rndis_msg->msg.init_complete.
+			   max_pkt_per_msg,
+			rndis_msg->msg.init_complete.
+			   pkt_alignment_factor);
 		break;
 
 	case REMOTE_NDIS_QUERY_CMPLT:
 		DPRINT_DBG(NETVSC, "REMOTE_NDIS_QUERY_CMPLT "
 			"(len %u, id 0x%x, status 0x%x, buf len %u, "
 			"buf offset %u)",
-			RndisMessage->MessageLength,
-			RndisMessage->Message.QueryComplete.RequestId,
-			RndisMessage->Message.QueryComplete.Status,
-			RndisMessage->Message.QueryComplete.InformationBufferLength,
-			RndisMessage->Message.QueryComplete.InformationBufferOffset);
+			rndis_msg->msg_len,
+			rndis_msg->msg.query_complete.req_id,
+			rndis_msg->msg.query_complete.status,
+			rndis_msg->msg.query_complete.
+			   info_buflen,
+			rndis_msg->msg.query_complete.
+			   info_buf_offset);
 		break;
 
 	case REMOTE_NDIS_SET_CMPLT:
 		DPRINT_DBG(NETVSC,
 			"REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)",
-			RndisMessage->MessageLength,
-			RndisMessage->Message.SetComplete.RequestId,
-			RndisMessage->Message.SetComplete.Status);
+			rndis_msg->msg_len,
+			rndis_msg->msg.set_complete.req_id,
+			rndis_msg->msg.set_complete.status);
 		break;
 
 	case REMOTE_NDIS_INDICATE_STATUS_MSG:
 		DPRINT_DBG(NETVSC, "REMOTE_NDIS_INDICATE_STATUS_MSG "
 			"(len %u, status 0x%x, buf len %u, buf offset %u)",
-			RndisMessage->MessageLength,
-			RndisMessage->Message.IndicateStatus.Status,
-			RndisMessage->Message.IndicateStatus.StatusBufferLength,
-			RndisMessage->Message.IndicateStatus.StatusBufferOffset);
+			rndis_msg->msg_len,
+			rndis_msg->msg.indicate_status.status,
+			rndis_msg->msg.indicate_status.status_buflen,
+			rndis_msg->msg.indicate_status.status_buf_offset);
 		break;
 
 	default:
 		DPRINT_DBG(NETVSC, "0x%x (len %u)",
-			RndisMessage->NdisMessageType,
-			RndisMessage->MessageLength);
+			rndis_msg->ndis_msg_type,
+			rndis_msg->msg_len);
 		break;
 	}
 }
 
-static int RndisFilterSendRequest(struct rndis_device *Device,
-				  struct rndis_request *Request)
+static int rndis_filter_send_request(struct rndis_device *dev,
+				  struct rndis_request *req)
 {
 	int ret;
 	struct hv_netvsc_packet *packet;
 
 	/* Setup the packet to send it */
-	packet = &Request->Packet;
+	packet = &req->pkt;
 
-	packet->IsDataPacket = false;
-	packet->TotalDataBufferLength = Request->RequestMessage.MessageLength;
-	packet->PageBufferCount = 1;
+	packet->is_data_pkt = false;
+	packet->total_data_buflen = req->request_msg.msg_len;
+	packet->page_buf_cnt = 1;
 
-	packet->PageBuffers[0].Pfn = virt_to_phys(&Request->RequestMessage) >>
+	packet->page_buf[0].Pfn = virt_to_phys(&req->request_msg) >>
 					PAGE_SHIFT;
-	packet->PageBuffers[0].Length = Request->RequestMessage.MessageLength;
-	packet->PageBuffers[0].Offset =
-		(unsigned long)&Request->RequestMessage & (PAGE_SIZE - 1);
+	packet->page_buf[0].Length = req->request_msg.msg_len;
+	packet->page_buf[0].Offset =
+		(unsigned long)&req->request_msg & (PAGE_SIZE - 1);
 
-	packet->Completion.Send.SendCompletionContext = Request;/* packet; */
-	packet->Completion.Send.OnSendCompletion =
-		RndisFilterOnSendRequestCompletion;
-	packet->Completion.Send.SendCompletionTid = (unsigned long)Device;
+	packet->completion.send.send_completion_ctx = req;/* packet; */
+	packet->completion.send.send_completion =
+		rndis_filter_send_request_completion;
+	packet->completion.send.send_completion_tid = (unsigned long)dev;
 
-	ret = gRndisFilter.InnerDriver.OnSend(Device->NetDevice->Device, packet);
+	ret = rndis_filter.inner_drv.send(dev->net_dev->dev, packet);
 	return ret;
 }
 
-static void RndisFilterReceiveResponse(struct rndis_device *Device,
-				       struct rndis_message *Response)
+static void rndis_filter_receive_response(struct rndis_device *dev,
+				       struct rndis_message *resp)
 {
 	struct rndis_request *request = NULL;
 	bool found = false;
 	unsigned long flags;
 
-	spin_lock_irqsave(&Device->request_lock, flags);
-	list_for_each_entry(request, &Device->RequestList, ListEntry) {
+	spin_lock_irqsave(&dev->request_lock, flags);
+	list_for_each_entry(request, &dev->req_list, list_ent) {
 		/*
 		 * All request/response message contains RequestId as the 1st
 		 * field
 		 */
-		if (request->RequestMessage.Message.InitializeRequest.RequestId
-		    == Response->Message.InitializeComplete.RequestId) {
+		if (request->request_msg.msg.init_req.req_id
+		    == resp->msg.init_complete.req_id) {
 			DPRINT_DBG(NETVSC, "found rndis request for "
 				"this response (id 0x%x req type 0x%x res "
 				"type 0x%x)",
-				request->RequestMessage.Message.InitializeRequest.RequestId,
-				request->RequestMessage.NdisMessageType,
-				Response->NdisMessageType);
+				request->request_msg.msg.
+				   init_req.req_id,
+				request->request_msg.ndis_msg_type,
+				resp->ndis_msg_type);
 
 			found = true;
 			break;
 		}
 	}
-	spin_unlock_irqrestore(&Device->request_lock, flags);
+	spin_unlock_irqrestore(&dev->request_lock, flags);
 
 	if (found) {
-		if (Response->MessageLength <= sizeof(struct rndis_message)) {
-			memcpy(&request->ResponseMessage, Response,
-			       Response->MessageLength);
+		if (resp->msg_len <= sizeof(struct rndis_message)) {
+			memcpy(&request->response_msg, resp,
+			       resp->msg_len);
 		} else {
 			DPRINT_ERR(NETVSC, "rndis response buffer overflow "
 				  "detected (size %u max %zu)",
-				  Response->MessageLength,
+				  resp->msg_len,
 				  sizeof(struct rndis_filter_packet));
 
-			if (Response->NdisMessageType ==
+			if (resp->ndis_msg_type ==
 			    REMOTE_NDIS_RESET_CMPLT) {
 				/* does not have a request id field */
-				request->ResponseMessage.Message.ResetComplete.Status = STATUS_BUFFER_OVERFLOW;
+				request->response_msg.msg.reset_complete.
+					status = STATUS_BUFFER_OVERFLOW;
 			} else {
-				request->ResponseMessage.Message.InitializeComplete.Status = STATUS_BUFFER_OVERFLOW;
+				request->response_msg.msg.
+				init_complete.status =
+					STATUS_BUFFER_OVERFLOW;
 			}
 		}
 
-		osd_WaitEventSet(request->WaitEvent);
+		osd_waitevent_set(request->waitevent);
 	} else {
 		DPRINT_ERR(NETVSC, "no rndis request found for this response "
 			   "(id 0x%x res type 0x%x)",
-			   Response->Message.InitializeComplete.RequestId,
-			   Response->NdisMessageType);
+			   resp->msg.init_complete.req_id,
+			   resp->ndis_msg_type);
 	}
 }
 
-static void RndisFilterReceiveIndicateStatus(struct rndis_device *Device,
-					     struct rndis_message *Response)
+static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
+					     struct rndis_message *resp)
 {
 	struct rndis_indicate_status *indicate =
-			&Response->Message.IndicateStatus;
+			&resp->msg.indicate_status;
 
-	if (indicate->Status == RNDIS_STATUS_MEDIA_CONNECT) {
-		gRndisFilter.InnerDriver.OnLinkStatusChanged(Device->NetDevice->Device, 1);
-	} else if (indicate->Status == RNDIS_STATUS_MEDIA_DISCONNECT) {
-		gRndisFilter.InnerDriver.OnLinkStatusChanged(Device->NetDevice->Device, 0);
+	if (indicate->status == RNDIS_STATUS_MEDIA_CONNECT) {
+		rndis_filter.inner_drv.link_status_change(
+			dev->net_dev->dev, 1);
+	} else if (indicate->status == RNDIS_STATUS_MEDIA_DISCONNECT) {
+		rndis_filter.inner_drv.link_status_change(
+			dev->net_dev->dev, 0);
 	} else {
 		/*
 		 * TODO:
@@ -339,18 +349,18 @@
 	}
 }
 
-static void RndisFilterReceiveData(struct rndis_device *Device,
-				   struct rndis_message *Message,
-				   struct hv_netvsc_packet *Packet)
+static void rndis_filter_receive_data(struct rndis_device *dev,
+				   struct rndis_message *msg,
+				   struct hv_netvsc_packet *pkt)
 {
-	struct rndis_packet *rndisPacket;
-	u32 dataOffset;
+	struct rndis_packet *rndis_pkt;
+	u32 data_offset;
 
 	/* empty ethernet frame ?? */
 	/* ASSERT(Packet->PageBuffers[0].Length > */
 	/* 	RNDIS_MESSAGE_SIZE(struct rndis_packet)); */
 
-	rndisPacket = &Message->Message.Packet;
+	rndis_pkt = &msg->msg.pkt;
 
 	/*
 	 * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
@@ -358,48 +368,48 @@
 	 */
 
 	/* Remove the rndis header and pass it back up the stack */
-	dataOffset = RNDIS_HEADER_SIZE + rndisPacket->DataOffset;
+	data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
 
-	Packet->TotalDataBufferLength -= dataOffset;
-	Packet->PageBuffers[0].Offset += dataOffset;
-	Packet->PageBuffers[0].Length -= dataOffset;
+	pkt->total_data_buflen -= data_offset;
+	pkt->page_buf[0].Offset += data_offset;
+	pkt->page_buf[0].Length -= data_offset;
 
-	Packet->IsDataPacket = true;
+	pkt->is_data_pkt = true;
 
-	gRndisFilter.InnerDriver.OnReceiveCallback(Device->NetDevice->Device,
-						   Packet);
+	rndis_filter.inner_drv.recv_cb(dev->net_dev->dev,
+						   pkt);
 }
 
-static int RndisFilterOnReceive(struct hv_device *Device,
-				struct hv_netvsc_packet	*Packet)
+static int rndis_filter_receive(struct hv_device *dev,
+				struct hv_netvsc_packet	*pkt)
 {
-	struct netvsc_device *netDevice = Device->Extension;
-	struct rndis_device *rndisDevice;
-	struct rndis_message rndisMessage;
-	struct rndis_message *rndisHeader;
+	struct netvsc_device *net_dev = dev->Extension;
+	struct rndis_device *rndis_dev;
+	struct rndis_message rndis_msg;
+	struct rndis_message *rndis_hdr;
 
-	if (!netDevice)
+	if (!net_dev)
 		return -EINVAL;
 
 	/* Make sure the rndis device state is initialized */
-	if (!netDevice->Extension) {
+	if (!net_dev->extension) {
 		DPRINT_ERR(NETVSC, "got rndis message but no rndis device..."
 			  "dropping this message!");
 		return -1;
 	}
 
-	rndisDevice = (struct rndis_device *)netDevice->Extension;
-	if (rndisDevice->State == RNDIS_DEV_UNINITIALIZED) {
+	rndis_dev = (struct rndis_device *)net_dev->extension;
+	if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
 		DPRINT_ERR(NETVSC, "got rndis message but rndis device "
 			   "uninitialized...dropping this message!");
 		return -1;
 	}
 
-	rndisHeader = (struct rndis_message *)kmap_atomic(
-			pfn_to_page(Packet->PageBuffers[0].Pfn), KM_IRQ0);
+	rndis_hdr = (struct rndis_message *)kmap_atomic(
+			pfn_to_page(pkt->page_buf[0].Pfn), KM_IRQ0);
 
-	rndisHeader = (void *)((unsigned long)rndisHeader +
-			Packet->PageBuffers[0].Offset);
+	rndis_hdr = (void *)((unsigned long)rndis_hdr +
+			pkt->page_buf[0].Offset);
 
 	/* Make sure we got a valid rndis message */
 	/*
@@ -408,39 +418,39 @@
 	 * range shows 52 bytes
 	 * */
 #if 0
-	if (Packet->TotalDataBufferLength != rndisHeader->MessageLength) {
-		kunmap_atomic(rndisHeader - Packet->PageBuffers[0].Offset,
+	if (pkt->total_data_buflen != rndis_hdr->msg_len) {
+		kunmap_atomic(rndis_hdr - pkt->page_buf[0].Offset,
 			      KM_IRQ0);
 
 		DPRINT_ERR(NETVSC, "invalid rndis message? (expected %u "
 			   "bytes got %u)...dropping this message!",
-			   rndisHeader->MessageLength,
-			   Packet->TotalDataBufferLength);
+			   rndis_hdr->msg_len,
+			   pkt->total_data_buflen);
 		return -1;
 	}
 #endif
 
-	if ((rndisHeader->NdisMessageType != REMOTE_NDIS_PACKET_MSG) &&
-	    (rndisHeader->MessageLength > sizeof(struct rndis_message))) {
+	if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
+	    (rndis_hdr->msg_len > sizeof(struct rndis_message))) {
 		DPRINT_ERR(NETVSC, "incoming rndis message buffer overflow "
 			   "detected (got %u, max %zu)...marking it an error!",
-			   rndisHeader->MessageLength,
+			   rndis_hdr->msg_len,
 			   sizeof(struct rndis_message));
 	}
 
-	memcpy(&rndisMessage, rndisHeader,
-		(rndisHeader->MessageLength > sizeof(struct rndis_message)) ?
+	memcpy(&rndis_msg, rndis_hdr,
+		(rndis_hdr->msg_len > sizeof(struct rndis_message)) ?
 			sizeof(struct rndis_message) :
-			rndisHeader->MessageLength);
+			rndis_hdr->msg_len);
 
-	kunmap_atomic(rndisHeader - Packet->PageBuffers[0].Offset, KM_IRQ0);
+	kunmap_atomic(rndis_hdr - pkt->page_buf[0].Offset, KM_IRQ0);
 
-	DumpRndisMessage(&rndisMessage);
+	dump_rndis_message(&rndis_msg);
 
-	switch (rndisMessage.NdisMessageType) {
+	switch (rndis_msg.ndis_msg_type) {
 	case REMOTE_NDIS_PACKET_MSG:
 		/* data msg */
-		RndisFilterReceiveData(rndisDevice, &rndisMessage, Packet);
+		rndis_filter_receive_data(rndis_dev, &rndis_msg, pkt);
 		break;
 
 	case REMOTE_NDIS_INITIALIZE_CMPLT:
@@ -449,37 +459,37 @@
 	/* case REMOTE_NDIS_RESET_CMPLT: */
 	/* case REMOTE_NDIS_KEEPALIVE_CMPLT: */
 		/* completion msgs */
-		RndisFilterReceiveResponse(rndisDevice, &rndisMessage);
+		rndis_filter_receive_response(rndis_dev, &rndis_msg);
 		break;
 
 	case REMOTE_NDIS_INDICATE_STATUS_MSG:
 		/* notification msgs */
-		RndisFilterReceiveIndicateStatus(rndisDevice, &rndisMessage);
+		rndis_filter_receive_indicate_status(rndis_dev, &rndis_msg);
 		break;
 	default:
 		DPRINT_ERR(NETVSC, "unhandled rndis message (type %u len %u)",
-			   rndisMessage.NdisMessageType,
-			   rndisMessage.MessageLength);
+			   rndis_msg.ndis_msg_type,
+			   rndis_msg.msg_len);
 		break;
 	}
 
 	return 0;
 }
 
-static int RndisFilterQueryDevice(struct rndis_device *Device, u32 Oid,
-				  void *Result, u32 *ResultSize)
+static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
+				  void *result, u32 *result_size)
 {
 	struct rndis_request *request;
-	u32 inresultSize = *ResultSize;
+	u32 inresult_size = *result_size;
 	struct rndis_query_request *query;
-	struct rndis_query_complete *queryComplete;
+	struct rndis_query_complete *query_complete;
 	int ret = 0;
 
-	if (!Result)
+	if (!result)
 		return -EINVAL;
 
-	*ResultSize = 0;
-	request = GetRndisRequest(Device, REMOTE_NDIS_QUERY_MSG,
+	*result_size = 0;
+	request = get_rndis_request(dev, REMOTE_NDIS_QUERY_MSG,
 			RNDIS_MESSAGE_SIZE(struct rndis_query_request));
 	if (!request) {
 		ret = -1;
@@ -487,71 +497,71 @@
 	}
 
 	/* Setup the rndis query */
-	query = &request->RequestMessage.Message.QueryRequest;
-	query->Oid = Oid;
-	query->InformationBufferOffset = sizeof(struct rndis_query_request);
-	query->InformationBufferLength = 0;
-	query->DeviceVcHandle = 0;
+	query = &request->request_msg.msg.query_req;
+	query->oid = oid;
+	query->info_buf_offset = sizeof(struct rndis_query_request);
+	query->info_buflen = 0;
+	query->dev_vc_handle = 0;
 
-	ret = RndisFilterSendRequest(Device, request);
+	ret = rndis_filter_send_request(dev, request);
 	if (ret != 0)
 		goto Cleanup;
 
-	osd_WaitEventWait(request->WaitEvent);
+	osd_waitevent_wait(request->waitevent);
 
 	/* Copy the response back */
-	queryComplete = &request->ResponseMessage.Message.QueryComplete;
+	query_complete = &request->response_msg.msg.query_complete;
 
-	if (queryComplete->InformationBufferLength > inresultSize) {
+	if (query_complete->info_buflen > inresult_size) {
 		ret = -1;
 		goto Cleanup;
 	}
 
-	memcpy(Result,
-	       (void *)((unsigned long)queryComplete +
-			 queryComplete->InformationBufferOffset),
-	       queryComplete->InformationBufferLength);
+	memcpy(result,
+	       (void *)((unsigned long)query_complete +
+			 query_complete->info_buf_offset),
+	       query_complete->info_buflen);
 
-	*ResultSize = queryComplete->InformationBufferLength;
+	*result_size = query_complete->info_buflen;
 
 Cleanup:
 	if (request)
-		PutRndisRequest(Device, request);
+		put_rndis_request(dev, request);
 
 	return ret;
 }
 
-static int RndisFilterQueryDeviceMac(struct rndis_device *Device)
+static int rndis_filter_query_device_mac(struct rndis_device *dev)
 {
 	u32 size = ETH_ALEN;
 
-	return RndisFilterQueryDevice(Device,
+	return rndis_filter_query_device(dev,
 				      RNDIS_OID_802_3_PERMANENT_ADDRESS,
-				      Device->HwMacAddr, &size);
+				      dev->hw_mac_adr, &size);
 }
 
-static int RndisFilterQueryDeviceLinkStatus(struct rndis_device *Device)
+static int rndis_filter_query_device_link_status(struct rndis_device *dev)
 {
 	u32 size = sizeof(u32);
 
-	return RndisFilterQueryDevice(Device,
+	return rndis_filter_query_device(dev,
 				      RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
-				      &Device->LinkStatus, &size);
+				      &dev->link_stat, &size);
 }
 
-static int RndisFilterSetPacketFilter(struct rndis_device *Device,
-				      u32 NewFilter)
+static int rndis_filter_set_packet_filter(struct rndis_device *dev,
+				      u32 new_filter)
 {
 	struct rndis_request *request;
 	struct rndis_set_request *set;
-	struct rndis_set_complete *setComplete;
+	struct rndis_set_complete *set_complete;
 	u32 status;
 	int ret;
 
 	/* ASSERT(RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32) <= */
 	/* 	sizeof(struct rndis_message)); */
 
-	request = GetRndisRequest(Device, REMOTE_NDIS_SET_MSG,
+	request = get_rndis_request(dev, REMOTE_NDIS_SET_MSG,
 			RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
 			sizeof(u32));
 	if (!request) {
@@ -560,19 +570,19 @@
 	}
 
 	/* Setup the rndis set */
-	set = &request->RequestMessage.Message.SetRequest;
-	set->Oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
-	set->InformationBufferLength = sizeof(u32);
-	set->InformationBufferOffset = sizeof(struct rndis_set_request);
+	set = &request->request_msg.msg.set_req;
+	set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
+	set->info_buflen = sizeof(u32);
+	set->info_buf_offset = sizeof(struct rndis_set_request);
 
 	memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
-	       &NewFilter, sizeof(u32));
+	       &new_filter, sizeof(u32));
 
-	ret = RndisFilterSendRequest(Device, request);
+	ret = rndis_filter_send_request(dev, request);
 	if (ret != 0)
 		goto Cleanup;
 
-	ret = osd_WaitEventWaitEx(request->WaitEvent, 2000/*2sec*/);
+	ret = osd_waitevent_waitex(request->waitevent, 2000/*2sec*/);
 	if (!ret) {
 		ret = -1;
 		DPRINT_ERR(NETVSC, "timeout before we got a set response...");
@@ -584,27 +594,27 @@
 	} else {
 		if (ret > 0)
 			ret = 0;
-		setComplete = &request->ResponseMessage.Message.SetComplete;
-		status = setComplete->Status;
+		set_complete = &request->response_msg.msg.set_complete;
+		status = set_complete->status;
 	}
 
 Cleanup:
 	if (request)
-		PutRndisRequest(Device, request);
+		put_rndis_request(dev, request);
 Exit:
 	return ret;
 }
 
-int RndisFilterInit(struct netvsc_driver *Driver)
+int rndis_filter_init(struct netvsc_driver *drv)
 {
 	DPRINT_DBG(NETVSC, "sizeof(struct rndis_filter_packet) == %zd",
 		   sizeof(struct rndis_filter_packet));
 
-	Driver->RequestExtSize = sizeof(struct rndis_filter_packet);
+	drv->req_ext_size = sizeof(struct rndis_filter_packet);
 
 	/* Driver->Context = rndisDriver; */
 
-	memset(&gRndisFilter, 0, sizeof(struct rndis_filter_driver_object));
+	memset(&rndis_filter, 0, sizeof(struct rndis_filter_driver_object));
 
 	/*rndisDriver->Driver = Driver;
 
@@ -612,38 +622,38 @@
 	rndisDriver->OnLinkStatusChanged = Driver->OnLinkStatusChanged;*/
 
 	/* Save the original dispatch handlers before we override it */
-	gRndisFilter.InnerDriver.Base.OnDeviceAdd = Driver->Base.OnDeviceAdd;
-	gRndisFilter.InnerDriver.Base.OnDeviceRemove =
-					Driver->Base.OnDeviceRemove;
-	gRndisFilter.InnerDriver.Base.OnCleanup = Driver->Base.OnCleanup;
+	rndis_filter.inner_drv.base.OnDeviceAdd = drv->base.OnDeviceAdd;
+	rndis_filter.inner_drv.base.OnDeviceRemove =
+					drv->base.OnDeviceRemove;
+	rndis_filter.inner_drv.base.OnCleanup = drv->base.OnCleanup;
 
 	/* ASSERT(Driver->OnSend); */
 	/* ASSERT(Driver->OnReceiveCallback); */
-	gRndisFilter.InnerDriver.OnSend = Driver->OnSend;
-	gRndisFilter.InnerDriver.OnReceiveCallback = Driver->OnReceiveCallback;
-	gRndisFilter.InnerDriver.OnLinkStatusChanged =
-					Driver->OnLinkStatusChanged;
+	rndis_filter.inner_drv.send = drv->send;
+	rndis_filter.inner_drv.recv_cb = drv->recv_cb;
+	rndis_filter.inner_drv.link_status_change =
+					drv->link_status_change;
 
 	/* Override */
-	Driver->Base.OnDeviceAdd = RndisFilterOnDeviceAdd;
-	Driver->Base.OnDeviceRemove = RndisFilterOnDeviceRemove;
-	Driver->Base.OnCleanup = RndisFilterOnCleanup;
-	Driver->OnSend = RndisFilterOnSend;
+	drv->base.OnDeviceAdd = rndis_filte_device_add;
+	drv->base.OnDeviceRemove = rndis_filter_device_remove;
+	drv->base.OnCleanup = rndis_filter_cleanup;
+	drv->send = rndis_filter_send;
 	/* Driver->QueryLinkStatus = RndisFilterQueryDeviceLinkStatus; */
-	Driver->OnReceiveCallback = RndisFilterOnReceive;
+	drv->recv_cb = rndis_filter_receive;
 
 	return 0;
 }
 
-static int RndisFilterInitDevice(struct rndis_device *Device)
+static int rndis_filter_init_device(struct rndis_device *dev)
 {
 	struct rndis_request *request;
 	struct rndis_initialize_request *init;
-	struct rndis_initialize_complete *initComplete;
+	struct rndis_initialize_complete *init_complete;
 	u32 status;
 	int ret;
 
-	request = GetRndisRequest(Device, REMOTE_NDIS_INITIALIZE_MSG,
+	request = get_rndis_request(dev, REMOTE_NDIS_INITIALIZE_MSG,
 			RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
 	if (!request) {
 		ret = -1;
@@ -651,105 +661,105 @@
 	}
 
 	/* Setup the rndis set */
-	init = &request->RequestMessage.Message.InitializeRequest;
-	init->MajorVersion = RNDIS_MAJOR_VERSION;
-	init->MinorVersion = RNDIS_MINOR_VERSION;
+	init = &request->request_msg.msg.init_req;
+	init->major_ver = RNDIS_MAJOR_VERSION;
+	init->minor_ver = RNDIS_MINOR_VERSION;
 	/* FIXME: Use 1536 - rounded ethernet frame size */
-	init->MaxTransferSize = 2048;
+	init->max_xfer_size = 2048;
 
-	Device->State = RNDIS_DEV_INITIALIZING;
+	dev->state = RNDIS_DEV_INITIALIZING;
 
-	ret = RndisFilterSendRequest(Device, request);
+	ret = rndis_filter_send_request(dev, request);
 	if (ret != 0) {
-		Device->State = RNDIS_DEV_UNINITIALIZED;
+		dev->state = RNDIS_DEV_UNINITIALIZED;
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(request->WaitEvent);
+	osd_waitevent_wait(request->waitevent);
 
-	initComplete = &request->ResponseMessage.Message.InitializeComplete;
-	status = initComplete->Status;
+	init_complete = &request->response_msg.msg.init_complete;
+	status = init_complete->status;
 	if (status == RNDIS_STATUS_SUCCESS) {
-		Device->State = RNDIS_DEV_INITIALIZED;
+		dev->state = RNDIS_DEV_INITIALIZED;
 		ret = 0;
 	} else {
-		Device->State = RNDIS_DEV_UNINITIALIZED;
+		dev->state = RNDIS_DEV_UNINITIALIZED;
 		ret = -1;
 	}
 
 Cleanup:
 	if (request)
-		PutRndisRequest(Device, request);
+		put_rndis_request(dev, request);
 
 	return ret;
 }
 
-static void RndisFilterHaltDevice(struct rndis_device *Device)
+static void rndis_filter_halt_device(struct rndis_device *dev)
 {
 	struct rndis_request *request;
 	struct rndis_halt_request *halt;
 
 	/* Attempt to do a rndis device halt */
-	request = GetRndisRequest(Device, REMOTE_NDIS_HALT_MSG,
+	request = get_rndis_request(dev, REMOTE_NDIS_HALT_MSG,
 				RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
 	if (!request)
 		goto Cleanup;
 
 	/* Setup the rndis set */
-	halt = &request->RequestMessage.Message.HaltRequest;
-	halt->RequestId = atomic_inc_return(&Device->NewRequestId);
+	halt = &request->request_msg.msg.halt_req;
+	halt->req_id = atomic_inc_return(&dev->new_req_id);
 
 	/* Ignore return since this msg is optional. */
-	RndisFilterSendRequest(Device, request);
+	rndis_filter_send_request(dev, request);
 
-	Device->State = RNDIS_DEV_UNINITIALIZED;
+	dev->state = RNDIS_DEV_UNINITIALIZED;
 
 Cleanup:
 	if (request)
-		PutRndisRequest(Device, request);
+		put_rndis_request(dev, request);
 	return;
 }
 
-static int RndisFilterOpenDevice(struct rndis_device *Device)
+static int rndis_filter_open_device(struct rndis_device *dev)
 {
 	int ret;
 
-	if (Device->State != RNDIS_DEV_INITIALIZED)
+	if (dev->state != RNDIS_DEV_INITIALIZED)
 		return 0;
 
-	ret = RndisFilterSetPacketFilter(Device,
+	ret = rndis_filter_set_packet_filter(dev,
 					 NDIS_PACKET_TYPE_BROADCAST |
 					 NDIS_PACKET_TYPE_ALL_MULTICAST |
 					 NDIS_PACKET_TYPE_DIRECTED);
 	if (ret == 0)
-		Device->State = RNDIS_DEV_DATAINITIALIZED;
+		dev->state = RNDIS_DEV_DATAINITIALIZED;
 
 	return ret;
 }
 
-static int RndisFilterCloseDevice(struct rndis_device *Device)
+static int rndis_filter_close_device(struct rndis_device *dev)
 {
 	int ret;
 
-	if (Device->State != RNDIS_DEV_DATAINITIALIZED)
+	if (dev->state != RNDIS_DEV_DATAINITIALIZED)
 		return 0;
 
-	ret = RndisFilterSetPacketFilter(Device, 0);
+	ret = rndis_filter_set_packet_filter(dev, 0);
 	if (ret == 0)
-		Device->State = RNDIS_DEV_INITIALIZED;
+		dev->state = RNDIS_DEV_INITIALIZED;
 
 	return ret;
 }
 
-static int RndisFilterOnDeviceAdd(struct hv_device *Device,
-				  void *AdditionalInfo)
+static int rndis_filte_device_add(struct hv_device *dev,
+				  void *additional_info)
 {
 	int ret;
 	struct netvsc_device *netDevice;
 	struct rndis_device *rndisDevice;
-	struct netvsc_device_info *deviceInfo = AdditionalInfo;
+	struct netvsc_device_info *deviceInfo = additional_info;
 
-	rndisDevice = GetRndisDevice();
+	rndisDevice = get_rndis_device();
 	if (!rndisDevice)
 		return -1;
 
@@ -760,7 +770,7 @@
 	 * NOTE! Once the channel is created, we may get a receive callback
 	 * (RndisFilterOnReceive()) before this call is completed
 	 */
-	ret = gRndisFilter.InnerDriver.Base.OnDeviceAdd(Device, AdditionalInfo);
+	ret = rndis_filter.inner_drv.base.OnDeviceAdd(dev, additional_info);
 	if (ret != 0) {
 		kfree(rndisDevice);
 		return ret;
@@ -768,15 +778,15 @@
 
 
 	/* Initialize the rndis device */
-	netDevice = Device->Extension;
+	netDevice = dev->Extension;
 	/* ASSERT(netDevice); */
 	/* ASSERT(netDevice->Device); */
 
-	netDevice->Extension = rndisDevice;
-	rndisDevice->NetDevice = netDevice;
+	netDevice->extension = rndisDevice;
+	rndisDevice->net_dev = netDevice;
 
 	/* Send the rndis initialization message */
-	ret = RndisFilterInitDevice(rndisDevice);
+	ret = rndis_filter_init_device(rndisDevice);
 	if (ret != 0) {
 		/*
 		 * TODO: If rndis init failed, we will need to shut down the
@@ -785,7 +795,7 @@
 	}
 
 	/* Get the mac address */
-	ret = RndisFilterQueryDeviceMac(rndisDevice);
+	ret = rndis_filter_query_device_mac(rndisDevice);
 	if (ret != 0) {
 		/*
 		 * TODO: shutdown rndis device and the channel
@@ -793,62 +803,62 @@
 	}
 
 	DPRINT_INFO(NETVSC, "Device 0x%p mac addr %pM",
-		    rndisDevice, rndisDevice->HwMacAddr);
+		    rndisDevice, rndisDevice->hw_mac_adr);
 
-	memcpy(deviceInfo->MacAddr, rndisDevice->HwMacAddr, ETH_ALEN);
+	memcpy(deviceInfo->mac_adr, rndisDevice->hw_mac_adr, ETH_ALEN);
 
-	RndisFilterQueryDeviceLinkStatus(rndisDevice);
+	rndis_filter_query_device_link_status(rndisDevice);
 
-	deviceInfo->LinkState = rndisDevice->LinkStatus;
+	deviceInfo->link_state = rndisDevice->link_stat;
 	DPRINT_INFO(NETVSC, "Device 0x%p link state %s", rndisDevice,
-		    ((deviceInfo->LinkState) ? ("down") : ("up")));
+		    ((deviceInfo->link_state) ? ("down") : ("up")));
 
 	return ret;
 }
 
-static int RndisFilterOnDeviceRemove(struct hv_device *Device)
+static int rndis_filter_device_remove(struct hv_device *dev)
 {
-	struct netvsc_device *netDevice = Device->Extension;
-	struct rndis_device *rndisDevice = netDevice->Extension;
+	struct netvsc_device *net_dev = dev->Extension;
+	struct rndis_device *rndis_dev = net_dev->extension;
 
 	/* Halt and release the rndis device */
-	RndisFilterHaltDevice(rndisDevice);
+	rndis_filter_halt_device(rndis_dev);
 
-	kfree(rndisDevice);
-	netDevice->Extension = NULL;
+	kfree(rndis_dev);
+	net_dev->extension = NULL;
 
 	/* Pass control to inner driver to remove the device */
-	gRndisFilter.InnerDriver.Base.OnDeviceRemove(Device);
+	rndis_filter.inner_drv.base.OnDeviceRemove(dev);
 
 	return 0;
 }
 
-static void RndisFilterOnCleanup(struct hv_driver *Driver)
+static void rndis_filter_cleanup(struct hv_driver *drv)
 {
 }
 
-int RndisFilterOnOpen(struct hv_device *Device)
+int rndis_filter_open(struct hv_device *dev)
 {
-	struct netvsc_device *netDevice = Device->Extension;
+	struct netvsc_device *netDevice = dev->Extension;
 
 	if (!netDevice)
 		return -EINVAL;
 
-	return RndisFilterOpenDevice(netDevice->Extension);
+	return rndis_filter_open_device(netDevice->extension);
 }
 
-int RndisFilterOnClose(struct hv_device *Device)
+int rndis_filter_close(struct hv_device *dev)
 {
-	struct netvsc_device *netDevice = Device->Extension;
+	struct netvsc_device *netDevice = dev->Extension;
 
 	if (!netDevice)
 		return -EINVAL;
 
-	return RndisFilterCloseDevice(netDevice->Extension);
+	return rndis_filter_close_device(netDevice->extension);
 }
 
-static int RndisFilterOnSend(struct hv_device *Device,
-			     struct hv_netvsc_packet *Packet)
+static int rndis_filter_send(struct hv_device *dev,
+			     struct hv_netvsc_packet *pkt)
 {
 	int ret;
 	struct rndis_filter_packet *filterPacket;
@@ -857,62 +867,62 @@
 	u32 rndisMessageSize;
 
 	/* Add the rndis header */
-	filterPacket = (struct rndis_filter_packet *)Packet->Extension;
+	filterPacket = (struct rndis_filter_packet *)pkt->extension;
 	/* ASSERT(filterPacket); */
 
 	memset(filterPacket, 0, sizeof(struct rndis_filter_packet));
 
-	rndisMessage = &filterPacket->Message;
+	rndisMessage = &filterPacket->msg;
 	rndisMessageSize = RNDIS_MESSAGE_SIZE(struct rndis_packet);
 
-	rndisMessage->NdisMessageType = REMOTE_NDIS_PACKET_MSG;
-	rndisMessage->MessageLength = Packet->TotalDataBufferLength +
+	rndisMessage->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
+	rndisMessage->msg_len = pkt->total_data_buflen +
 				      rndisMessageSize;
 
-	rndisPacket = &rndisMessage->Message.Packet;
-	rndisPacket->DataOffset = sizeof(struct rndis_packet);
-	rndisPacket->DataLength = Packet->TotalDataBufferLength;
+	rndisPacket = &rndisMessage->msg.pkt;
+	rndisPacket->data_offset = sizeof(struct rndis_packet);
+	rndisPacket->data_len = pkt->total_data_buflen;
 
-	Packet->IsDataPacket = true;
-	Packet->PageBuffers[0].Pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
-	Packet->PageBuffers[0].Offset =
+	pkt->is_data_pkt = true;
+	pkt->page_buf[0].Pfn = virt_to_phys(rndisMessage) >> PAGE_SHIFT;
+	pkt->page_buf[0].Offset =
 			(unsigned long)rndisMessage & (PAGE_SIZE-1);
-	Packet->PageBuffers[0].Length = rndisMessageSize;
+	pkt->page_buf[0].Length = rndisMessageSize;
 
 	/* Save the packet send completion and context */
-	filterPacket->OnCompletion = Packet->Completion.Send.OnSendCompletion;
-	filterPacket->CompletionContext =
-				Packet->Completion.Send.SendCompletionContext;
+	filterPacket->completion = pkt->completion.send.send_completion;
+	filterPacket->completion_ctx =
+				pkt->completion.send.send_completion_ctx;
 
 	/* Use ours */
-	Packet->Completion.Send.OnSendCompletion = RndisFilterOnSendCompletion;
-	Packet->Completion.Send.SendCompletionContext = filterPacket;
+	pkt->completion.send.send_completion = rndis_filter_send_completion;
+	pkt->completion.send.send_completion_ctx = filterPacket;
 
-	ret = gRndisFilter.InnerDriver.OnSend(Device, Packet);
+	ret = rndis_filter.inner_drv.send(dev, pkt);
 	if (ret != 0) {
 		/*
 		 * Reset the completion to originals to allow retries from
 		 * above
 		 */
-		Packet->Completion.Send.OnSendCompletion =
-				filterPacket->OnCompletion;
-		Packet->Completion.Send.SendCompletionContext =
-				filterPacket->CompletionContext;
+		pkt->completion.send.send_completion =
+				filterPacket->completion;
+		pkt->completion.send.send_completion_ctx =
+				filterPacket->completion_ctx;
 	}
 
 	return ret;
 }
 
-static void RndisFilterOnSendCompletion(void *Context)
+static void rndis_filter_send_completion(void *ctx)
 {
-	struct rndis_filter_packet *filterPacket = Context;
+	struct rndis_filter_packet *filterPacket = ctx;
 
 	/* Pass it back to the original handler */
-	filterPacket->OnCompletion(filterPacket->CompletionContext);
+	filterPacket->completion(filterPacket->completion_ctx);
 }
 
 
-static void RndisFilterOnSendRequestCompletion(void *Context)
+static void rndis_filter_send_request_completion(void *ctx)
 {
 	/* Noop */
 }
diff --git a/drivers/staging/hv/rndis_filter.h b/drivers/staging/hv/rndis_filter.h
index 764b9bf..4da18f3 100644
--- a/drivers/staging/hv/rndis_filter.h
+++ b/drivers/staging/hv/rndis_filter.h
@@ -50,6 +50,6 @@
 
 /* Interface */
 
-extern int RndisFilterInit(struct netvsc_driver *driver);
+extern int rndis_filter_init(struct netvsc_driver *driver);
 
 #endif /* _RNDISFILTER_H_ */
diff --git a/drivers/staging/hv/storvsc.c b/drivers/staging/hv/storvsc.c
index 19e87f6..9295113 100644
--- a/drivers/staging/hv/storvsc.c
+++ b/drivers/staging/hv/storvsc.c
@@ -34,43 +34,43 @@
 struct storvsc_request_extension {
 	/* LIST_ENTRY ListEntry; */
 
-	struct hv_storvsc_request *Request;
-	struct hv_device *Device;
+	struct hv_storvsc_request *request;
+	struct hv_device *device;
 
 	/* Synchronize the request/response if needed */
-	struct osd_waitevent *WaitEvent;
+	struct osd_waitevent *wait_event;
 
-	struct vstor_packet VStorPacket;
+	struct vstor_packet vstor_packet;
 };
 
 /* A storvsc device is a device object that contains a vmbus channel */
 struct storvsc_device {
-	struct hv_device *Device;
+	struct hv_device *device;
 
 	/* 0 indicates the device is being destroyed */
-	atomic_t RefCount;
+	atomic_t ref_count;
 
-	atomic_t NumOutstandingRequests;
+	atomic_t num_outstanding_req;
 
 	/*
 	 * Each unique Port/Path/Target represents 1 channel ie scsi
 	 * controller. In reality, the pathid, targetid is always 0
 	 * and the port is set by us
 	 */
-	unsigned int PortNumber;
-	unsigned char PathId;
-	unsigned char TargetId;
+	unsigned int port_number;
+	unsigned char path_id;
+	unsigned char target_id;
 
 	/* LIST_ENTRY OutstandingRequestList; */
 	/* HANDLE OutstandingRequestLock; */
 
 	/* Used for vsc/vsp channel reset process */
-	struct storvsc_request_extension InitRequest;
-	struct storvsc_request_extension ResetRequest;
+	struct storvsc_request_extension init_request;
+	struct storvsc_request_extension reset_request;
 };
 
 
-static const char *gDriverName = "storvsc";
+static const char *g_driver_name = "storvsc";
 
 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
 static const struct hv_guid gStorVscDeviceType = {
@@ -81,131 +81,133 @@
 };
 
 
-static inline struct storvsc_device *AllocStorDevice(struct hv_device *Device)
+static inline struct storvsc_device *alloc_stor_device(struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
-	storDevice = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
-	if (!storDevice)
+	stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
+	if (!stor_device)
 		return NULL;
 
 	/* Set to 2 to allow both inbound and outbound traffics */
-	/* (ie GetStorDevice() and MustGetStorDevice()) to proceed. */
-	atomic_cmpxchg(&storDevice->RefCount, 0, 2);
+	/* (ie get_stor_device() and must_get_stor_device()) to proceed. */
+	atomic_cmpxchg(&stor_device->ref_count, 0, 2);
 
-	storDevice->Device = Device;
-	Device->Extension = storDevice;
+	stor_device->device = device;
+	device->Extension = stor_device;
 
-	return storDevice;
+	return stor_device;
 }
 
-static inline void FreeStorDevice(struct storvsc_device *Device)
+static inline void free_stor_device(struct storvsc_device *device)
 {
-	/* ASSERT(atomic_read(&Device->RefCount) == 0); */
-	kfree(Device);
+	/* ASSERT(atomic_read(&device->ref_count) == 0); */
+	kfree(device);
 }
 
 /* Get the stordevice object iff exists and its refcount > 1 */
-static inline struct storvsc_device *GetStorDevice(struct hv_device *Device)
+static inline struct storvsc_device *get_stor_device(struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
-	storDevice = (struct storvsc_device *)Device->Extension;
-	if (storDevice && atomic_read(&storDevice->RefCount) > 1)
-		atomic_inc(&storDevice->RefCount);
+	stor_device = (struct storvsc_device *)device->Extension;
+	if (stor_device && atomic_read(&stor_device->ref_count) > 1)
+		atomic_inc(&stor_device->ref_count);
 	else
-		storDevice = NULL;
+		stor_device = NULL;
 
-	return storDevice;
+	return stor_device;
 }
 
 /* Get the stordevice object iff exists and its refcount > 0 */
-static inline struct storvsc_device *MustGetStorDevice(struct hv_device *Device)
+static inline struct storvsc_device *must_get_stor_device(
+					struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
-	storDevice = (struct storvsc_device *)Device->Extension;
-	if (storDevice && atomic_read(&storDevice->RefCount))
-		atomic_inc(&storDevice->RefCount);
+	stor_device = (struct storvsc_device *)device->Extension;
+	if (stor_device && atomic_read(&stor_device->ref_count))
+		atomic_inc(&stor_device->ref_count);
 	else
-		storDevice = NULL;
+		stor_device = NULL;
 
-	return storDevice;
+	return stor_device;
 }
 
-static inline void PutStorDevice(struct hv_device *Device)
+static inline void put_stor_device(struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
-	storDevice = (struct storvsc_device *)Device->Extension;
-	/* ASSERT(storDevice); */
+	stor_device = (struct storvsc_device *)device->Extension;
+	/* ASSERT(stor_device); */
 
-	atomic_dec(&storDevice->RefCount);
-	/* ASSERT(atomic_read(&storDevice->RefCount)); */
+	atomic_dec(&stor_device->ref_count);
+	/* ASSERT(atomic_read(&stor_device->ref_count)); */
 }
 
-/* Drop ref count to 1 to effectively disable GetStorDevice() */
-static inline struct storvsc_device *ReleaseStorDevice(struct hv_device *Device)
+/* Drop ref count to 1 to effectively disable get_stor_device() */
+static inline struct storvsc_device *release_stor_device(
+					struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
-	storDevice = (struct storvsc_device *)Device->Extension;
-	/* ASSERT(storDevice); */
+	stor_device = (struct storvsc_device *)device->Extension;
+	/* ASSERT(stor_device); */
 
 	/* Busy wait until the ref drop to 2, then set it to 1 */
-	while (atomic_cmpxchg(&storDevice->RefCount, 2, 1) != 2)
+	while (atomic_cmpxchg(&stor_device->ref_count, 2, 1) != 2)
 		udelay(100);
 
-	return storDevice;
+	return stor_device;
 }
 
-/* Drop ref count to 0. No one can use StorDevice object. */
-static inline struct storvsc_device *FinalReleaseStorDevice(
-			struct hv_device *Device)
+/* Drop ref count to 0. No one can use stor_device object. */
+static inline struct storvsc_device *final_release_stor_device(
+			struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
-	storDevice = (struct storvsc_device *)Device->Extension;
-	/* ASSERT(storDevice); */
+	stor_device = (struct storvsc_device *)device->Extension;
+	/* ASSERT(stor_device); */
 
 	/* Busy wait until the ref drop to 1, then set it to 0 */
-	while (atomic_cmpxchg(&storDevice->RefCount, 1, 0) != 1)
+	while (atomic_cmpxchg(&stor_device->ref_count, 1, 0) != 1)
 		udelay(100);
 
-	Device->Extension = NULL;
-	return storDevice;
+	device->Extension = NULL;
+	return stor_device;
 }
 
-static int StorVscChannelInit(struct hv_device *Device)
+static int stor_vsc_channel_init(struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 	struct storvsc_request_extension *request;
-	struct vstor_packet *vstorPacket;
+	struct vstor_packet *vstor_packet;
 	int ret;
 
-	storDevice = GetStorDevice(Device);
-	if (!storDevice) {
+	stor_device = get_stor_device(device);
+	if (!stor_device) {
 		DPRINT_ERR(STORVSC, "unable to get stor device..."
 			   "device being destroyed?");
 		return -1;
 	}
 
-	request = &storDevice->InitRequest;
-	vstorPacket = &request->VStorPacket;
+	request = &stor_device->init_request;
+	vstor_packet = &request->vstor_packet;
 
 	/*
 	 * Now, initiate the vsc/vsp initialization protocol on the open
 	 * channel
 	 */
 	memset(request, 0, sizeof(struct storvsc_request_extension));
-	request->WaitEvent = osd_WaitEventCreate();
-	if (!request->WaitEvent) {
+	request->wait_event = osd_waitevent_create();
+	if (!request->wait_event) {
 		ret = -ENOMEM;
 		goto nomem;
 	}
 
-	vstorPacket->Operation = VStorOperationBeginInitialization;
-	vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
+	vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
+	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 
 	/*SpinlockAcquire(gDriverExt.packetListLock);
 	INSERT_TAIL_LIST(&gDriverExt.packetList, &packet->listEntry.entry);
@@ -213,7 +215,7 @@
 
 	DPRINT_INFO(STORVSC, "BEGIN_INITIALIZATION_OPERATION...");
 
-	ret = vmbus_sendpacket(Device->channel, vstorPacket,
+	ret = vmbus_sendpacket(device->channel, vstor_packet,
 			       sizeof(struct vstor_packet),
 			       (unsigned long)request,
 			       VmbusPacketTypeDataInBand,
@@ -224,27 +226,27 @@
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(request->WaitEvent);
+	osd_waitevent_wait(request->wait_event);
 
-	if (vstorPacket->Operation != VStorOperationCompleteIo ||
-	    vstorPacket->Status != 0) {
+	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+	    vstor_packet->status != 0) {
 		DPRINT_ERR(STORVSC, "BEGIN_INITIALIZATION_OPERATION failed "
 			   "(op %d status 0x%x)",
-			   vstorPacket->Operation, vstorPacket->Status);
+			   vstor_packet->operation, vstor_packet->status);
 		goto Cleanup;
 	}
 
 	DPRINT_INFO(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION...");
 
 	/* reuse the packet for version range supported */
-	memset(vstorPacket, 0, sizeof(struct vstor_packet));
-	vstorPacket->Operation = VStorOperationQueryProtocolVersion;
-	vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
+	memset(vstor_packet, 0, sizeof(struct vstor_packet));
+	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 
-	vstorPacket->Version.MajorMinor = VMSTOR_PROTOCOL_VERSION_CURRENT;
-	FILL_VMSTOR_REVISION(vstorPacket->Version.Revision);
+	vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
+	FILL_VMSTOR_REVISION(vstor_packet->version.revision);
 
-	ret = vmbus_sendpacket(Device->channel, vstorPacket,
+	ret = vmbus_sendpacket(device->channel, vstor_packet,
 			       sizeof(struct vstor_packet),
 			       (unsigned long)request,
 			       VmbusPacketTypeDataInBand,
@@ -255,27 +257,27 @@
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(request->WaitEvent);
+	osd_waitevent_wait(request->wait_event);
 
 	/* TODO: Check returned version */
-	if (vstorPacket->Operation != VStorOperationCompleteIo ||
-	    vstorPacket->Status != 0) {
+	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+	    vstor_packet->status != 0) {
 		DPRINT_ERR(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION failed "
 			   "(op %d status 0x%x)",
-			   vstorPacket->Operation, vstorPacket->Status);
+			   vstor_packet->operation, vstor_packet->status);
 		goto Cleanup;
 	}
 
 	/* Query channel properties */
 	DPRINT_INFO(STORVSC, "QUERY_PROPERTIES_OPERATION...");
 
-	memset(vstorPacket, 0, sizeof(struct vstor_packet));
-	vstorPacket->Operation = VStorOperationQueryProperties;
-	vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
-	vstorPacket->StorageChannelProperties.PortNumber =
-					storDevice->PortNumber;
+	memset(vstor_packet, 0, sizeof(struct vstor_packet));
+	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
+	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+	vstor_packet->storage_channel_properties.port_number =
+					stor_device->port_number;
 
-	ret = vmbus_sendpacket(Device->channel, vstorPacket,
+	ret = vmbus_sendpacket(device->channel, vstor_packet,
 			       sizeof(struct vstor_packet),
 			       (unsigned long)request,
 			       VmbusPacketTypeDataInBand,
@@ -287,31 +289,32 @@
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(request->WaitEvent);
+	osd_waitevent_wait(request->wait_event);
 
 	/* TODO: Check returned version */
-	if (vstorPacket->Operation != VStorOperationCompleteIo ||
-	    vstorPacket->Status != 0) {
+	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+	    vstor_packet->status != 0) {
 		DPRINT_ERR(STORVSC, "QUERY_PROPERTIES_OPERATION failed "
 			   "(op %d status 0x%x)",
-			   vstorPacket->Operation, vstorPacket->Status);
+			   vstor_packet->operation, vstor_packet->status);
 		goto Cleanup;
 	}
 
-	storDevice->PathId = vstorPacket->StorageChannelProperties.PathId;
-	storDevice->TargetId = vstorPacket->StorageChannelProperties.TargetId;
+	stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
+	stor_device->target_id
+		= vstor_packet->storage_channel_properties.target_id;
 
 	DPRINT_DBG(STORVSC, "channel flag 0x%x, max xfer len 0x%x",
-		   vstorPacket->StorageChannelProperties.Flags,
-		   vstorPacket->StorageChannelProperties.MaxTransferBytes);
+		   vstor_packet->storage_channel_properties.flags,
+		   vstor_packet->storage_channel_properties.max_transfer_bytes);
 
 	DPRINT_INFO(STORVSC, "END_INITIALIZATION_OPERATION...");
 
-	memset(vstorPacket, 0, sizeof(struct vstor_packet));
-	vstorPacket->Operation = VStorOperationEndInitialization;
-	vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
+	memset(vstor_packet, 0, sizeof(struct vstor_packet));
+	vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
+	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 
-	ret = vmbus_sendpacket(Device->channel, vstorPacket,
+	ret = vmbus_sendpacket(device->channel, vstor_packet,
 			       sizeof(struct vstor_packet),
 			       (unsigned long)request,
 			       VmbusPacketTypeDataInBand,
@@ -323,125 +326,125 @@
 		goto Cleanup;
 	}
 
-	osd_WaitEventWait(request->WaitEvent);
+	osd_waitevent_wait(request->wait_event);
 
-	if (vstorPacket->Operation != VStorOperationCompleteIo ||
-	    vstorPacket->Status != 0) {
+	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
+	    vstor_packet->status != 0) {
 		DPRINT_ERR(STORVSC, "END_INITIALIZATION_OPERATION failed "
 			   "(op %d status 0x%x)",
-			   vstorPacket->Operation, vstorPacket->Status);
+			   vstor_packet->operation, vstor_packet->status);
 		goto Cleanup;
 	}
 
 	DPRINT_INFO(STORVSC, "**** storage channel up and running!! ****");
 
 Cleanup:
-	kfree(request->WaitEvent);
-	request->WaitEvent = NULL;
+	kfree(request->wait_event);
+	request->wait_event = NULL;
 nomem:
-	PutStorDevice(Device);
+	put_stor_device(device);
 	return ret;
 }
 
-static void StorVscOnIOCompletion(struct hv_device *Device,
-				  struct vstor_packet *VStorPacket,
-				  struct storvsc_request_extension *RequestExt)
+static void stor_vsc_on_io_completion(struct hv_device *device,
+				  struct vstor_packet *vstor_packet,
+				  struct storvsc_request_extension *request_ext)
 {
 	struct hv_storvsc_request *request;
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
-	storDevice = MustGetStorDevice(Device);
-	if (!storDevice) {
+	stor_device = must_get_stor_device(device);
+	if (!stor_device) {
 		DPRINT_ERR(STORVSC, "unable to get stor device..."
 			   "device being destroyed?");
 		return;
 	}
 
 	DPRINT_DBG(STORVSC, "IO_COMPLETE_OPERATION - request extension %p "
-		   "completed bytes xfer %u", RequestExt,
-		   VStorPacket->VmSrb.DataTransferLength);
+		   "completed bytes xfer %u", request_ext,
+		   vstor_packet->vm_srb.data_transfer_length);
 
-	/* ASSERT(RequestExt != NULL); */
-	/* ASSERT(RequestExt->Request != NULL); */
+	/* ASSERT(request_ext != NULL); */
+	/* ASSERT(request_ext->request != NULL); */
 
-	request = RequestExt->Request;
+	request = request_ext->request;
 
 	/* ASSERT(request->OnIOCompletion != NULL); */
 
 	/* Copy over the status...etc */
-	request->Status = VStorPacket->VmSrb.ScsiStatus;
+	request->status = vstor_packet->vm_srb.scsi_status;
 
-	if (request->Status != 0 || VStorPacket->VmSrb.SrbStatus != 1) {
+	if (request->status != 0 || vstor_packet->vm_srb.srb_status != 1) {
 		DPRINT_WARN(STORVSC,
 			    "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
-			    request->Cdb[0], VStorPacket->VmSrb.ScsiStatus,
-			    VStorPacket->VmSrb.SrbStatus);
+			    request->cdb[0], vstor_packet->vm_srb.scsi_status,
+			    vstor_packet->vm_srb.srb_status);
 	}
 
-	if ((request->Status & 0xFF) == 0x02) {
+	if ((request->status & 0xFF) == 0x02) {
 		/* CHECK_CONDITION */
-		if (VStorPacket->VmSrb.SrbStatus & 0x80) {
+		if (vstor_packet->vm_srb.srb_status & 0x80) {
 			/* autosense data available */
 			DPRINT_WARN(STORVSC, "storvsc pkt %p autosense data "
-				    "valid - len %d\n", RequestExt,
-				    VStorPacket->VmSrb.SenseInfoLength);
+				    "valid - len %d\n", request_ext,
+				    vstor_packet->vm_srb.sense_info_length);
 
-			/* ASSERT(VStorPacket->VmSrb.SenseInfoLength <= */
+			/* ASSERT(vstor_packet->vm_srb.sense_info_length <= */
 			/* 	request->SenseBufferSize); */
-			memcpy(request->SenseBuffer,
-			       VStorPacket->VmSrb.SenseData,
-			       VStorPacket->VmSrb.SenseInfoLength);
+			memcpy(request->sense_buffer,
+			       vstor_packet->vm_srb.sense_data,
+			       vstor_packet->vm_srb.sense_info_length);
 
-			request->SenseBufferSize =
-					VStorPacket->VmSrb.SenseInfoLength;
+			request->sense_buffer_size =
+					vstor_packet->vm_srb.sense_info_length;
 		}
 	}
 
 	/* TODO: */
-	request->BytesXfer = VStorPacket->VmSrb.DataTransferLength;
+	request->bytes_xfer = vstor_packet->vm_srb.data_transfer_length;
 
-	request->OnIOCompletion(request);
+	request->on_io_completion(request);
 
-	atomic_dec(&storDevice->NumOutstandingRequests);
+	atomic_dec(&stor_device->num_outstanding_req);
 
-	PutStorDevice(Device);
+	put_stor_device(device);
 }
 
-static void StorVscOnReceive(struct hv_device *Device,
-			     struct vstor_packet *VStorPacket,
-			     struct storvsc_request_extension *RequestExt)
+static void stor_vsc_on_receive(struct hv_device *device,
+			     struct vstor_packet *vstor_packet,
+			     struct storvsc_request_extension *request_ext)
 {
-	switch (VStorPacket->Operation) {
-	case VStorOperationCompleteIo:
+	switch (vstor_packet->operation) {
+	case VSTOR_OPERATION_COMPLETE_IO:
 		DPRINT_DBG(STORVSC, "IO_COMPLETE_OPERATION");
-		StorVscOnIOCompletion(Device, VStorPacket, RequestExt);
+		stor_vsc_on_io_completion(device, vstor_packet, request_ext);
 		break;
-	case VStorOperationRemoveDevice:
+	case VSTOR_OPERATION_REMOVE_DEVICE:
 		DPRINT_INFO(STORVSC, "REMOVE_DEVICE_OPERATION");
 		/* TODO: */
 		break;
 
 	default:
 		DPRINT_INFO(STORVSC, "Unknown operation received - %d",
-			    VStorPacket->Operation);
+			    vstor_packet->operation);
 		break;
 	}
 }
 
-static void StorVscOnChannelCallback(void *context)
+static void stor_vsc_on_channel_callback(void *context)
 {
 	struct hv_device *device = (struct hv_device *)context;
-	struct storvsc_device *storDevice;
-	u32 bytesRecvd;
-	u64 requestId;
+	struct storvsc_device *stor_device;
+	u32 bytes_recvd;
+	u64 request_id;
 	unsigned char packet[ALIGN_UP(sizeof(struct vstor_packet), 8)];
 	struct storvsc_request_extension *request;
 	int ret;
 
 	/* ASSERT(device); */
 
-	storDevice = MustGetStorDevice(device);
-	if (!storDevice) {
+	stor_device = must_get_stor_device(device);
+	if (!stor_device) {
 		DPRINT_ERR(STORVSC, "unable to get stor device..."
 			   "device being destroyed?");
 		return;
@@ -450,32 +453,33 @@
 	do {
 		ret = vmbus_recvpacket(device->channel, packet,
 				       ALIGN_UP(sizeof(struct vstor_packet), 8),
-				       &bytesRecvd, &requestId);
-		if (ret == 0 && bytesRecvd > 0) {
+				       &bytes_recvd, &request_id);
+		if (ret == 0 && bytes_recvd > 0) {
 			DPRINT_DBG(STORVSC, "receive %d bytes - tid %llx",
-				   bytesRecvd, requestId);
+				   bytes_recvd, request_id);
 
-			/* ASSERT(bytesRecvd == sizeof(struct vstor_packet)); */
+			/* ASSERT(bytes_recvd ==
+					sizeof(struct vstor_packet)); */
 
 			request = (struct storvsc_request_extension *)
-					(unsigned long)requestId;
+					(unsigned long)request_id;
 			/* ASSERT(request);c */
 
-			/* if (vstorPacket.Flags & SYNTHETIC_FLAG) */
-			if ((request == &storDevice->InitRequest) ||
-			    (request == &storDevice->ResetRequest)) {
+			/* if (vstor_packet.Flags & SYNTHETIC_FLAG) */
+			if ((request == &stor_device->init_request) ||
+			    (request == &stor_device->reset_request)) {
 				/* DPRINT_INFO(STORVSC,
 				 *             "reset completion - operation "
 				 *             "%u status %u",
-				 *             vstorPacket.Operation,
-				 *             vstorPacket.Status); */
+				 *             vstor_packet.Operation,
+				 *             vstor_packet.Status); */
 
-				memcpy(&request->VStorPacket, packet,
+				memcpy(&request->vstor_packet, packet,
 				       sizeof(struct vstor_packet));
 
-				osd_WaitEventSet(request->WaitEvent);
+				osd_waitevent_set(request->wait_event);
 			} else {
-				StorVscOnReceive(device,
+				stor_vsc_on_receive(device,
 						(struct vstor_packet *)packet,
 						request);
 			}
@@ -485,52 +489,55 @@
 		}
 	} while (1);
 
-	PutStorDevice(device);
+	put_stor_device(device);
 	return;
 }
 
-static int StorVscConnectToVsp(struct hv_device *Device)
+static int stor_vsc_connect_to_vsp(struct hv_device *device)
 {
 	struct vmstorage_channel_properties props;
-	struct storvsc_driver_object *storDriver;
+	struct storvsc_driver_object *stor_driver;
 	int ret;
 
-	storDriver = (struct storvsc_driver_object *)Device->Driver;
+	stor_driver = (struct storvsc_driver_object *)device->Driver;
 	memset(&props, 0, sizeof(struct vmstorage_channel_properties));
 
 	/* Open the channel */
-	ret = vmbus_open(Device->channel,
-			 storDriver->RingBufferSize, storDriver->RingBufferSize,
+	ret = vmbus_open(device->channel,
+			 stor_driver->ring_buffer_size,
+			 stor_driver->ring_buffer_size,
 			 (void *)&props,
 			 sizeof(struct vmstorage_channel_properties),
-			 StorVscOnChannelCallback, Device);
+			 stor_vsc_on_channel_callback, device);
 
 	DPRINT_DBG(STORVSC, "storage props: path id %d, tgt id %d, max xfer %d",
-		   props.PathId, props.TargetId, props.MaxTransferBytes);
+		   props.path_id, props.target_id, props.max_transfer_bytes);
 
 	if (ret != 0) {
 		DPRINT_ERR(STORVSC, "unable to open channel: %d", ret);
 		return -1;
 	}
 
-	ret = StorVscChannelInit(Device);
+	ret = stor_vsc_channel_init(device);
 
 	return ret;
 }
 
 /*
- * StorVscOnDeviceAdd - Callback when the device belonging to this driver is added
+ * stor_vsc_on_device_add - Callback when the device belonging to this driver
+ * is added
  */
-static int StorVscOnDeviceAdd(struct hv_device *Device, void *AdditionalInfo)
+static int stor_vsc_on_device_add(struct hv_device *device,
+					void *additional_info)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 	/* struct vmstorage_channel_properties *props; */
-	struct storvsc_device_info *deviceInfo;
+	struct storvsc_device_info *device_info;
 	int ret = 0;
 
-	deviceInfo = (struct storvsc_device_info *)AdditionalInfo;
-	storDevice = AllocStorDevice(Device);
-	if (!storDevice) {
+	device_info = (struct storvsc_device_info *)additional_info;
+	stor_device = alloc_stor_device(device);
+	if (!stor_device) {
 		ret = -1;
 		goto Cleanup;
 	}
@@ -550,103 +557,103 @@
 	storChannel->PathId = props->PathId;
 	storChannel->TargetId = props->TargetId; */
 
-	storDevice->PortNumber = deviceInfo->PortNumber;
+	stor_device->port_number = device_info->port_number;
 	/* Send it back up */
-	ret = StorVscConnectToVsp(Device);
+	ret = stor_vsc_connect_to_vsp(device);
 
-	/* deviceInfo->PortNumber = storDevice->PortNumber; */
-	deviceInfo->PathId = storDevice->PathId;
-	deviceInfo->TargetId = storDevice->TargetId;
+	/* device_info->PortNumber = stor_device->PortNumber; */
+	device_info->path_id = stor_device->path_id;
+	device_info->target_id = stor_device->target_id;
 
 	DPRINT_DBG(STORVSC, "assigned port %u, path %u target %u\n",
-		   storDevice->PortNumber, storDevice->PathId,
-		   storDevice->TargetId);
+		   stor_device->port_number, stor_device->path_id,
+		   stor_device->target_id);
 
 Cleanup:
 	return ret;
 }
 
 /*
- * StorVscOnDeviceRemove - Callback when the our device is being removed
+ * stor_vsc_on_device_remove - Callback when the our device is being removed
  */
-static int StorVscOnDeviceRemove(struct hv_device *Device)
+static int stor_vsc_on_device_remove(struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 
 	DPRINT_INFO(STORVSC, "disabling storage device (%p)...",
-		    Device->Extension);
+		    device->Extension);
 
-	storDevice = ReleaseStorDevice(Device);
+	stor_device = release_stor_device(device);
 
 	/*
 	 * At this point, all outbound traffic should be disable. We
 	 * only allow inbound traffic (responses) to proceed so that
 	 * outstanding requests can be completed.
 	 */
-	while (atomic_read(&storDevice->NumOutstandingRequests)) {
+	while (atomic_read(&stor_device->num_outstanding_req)) {
 		DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
-			    atomic_read(&storDevice->NumOutstandingRequests));
+			    atomic_read(&stor_device->num_outstanding_req));
 		udelay(100);
 	}
 
 	DPRINT_INFO(STORVSC, "removing storage device (%p)...",
-		    Device->Extension);
+		    device->Extension);
 
-	storDevice = FinalReleaseStorDevice(Device);
+	stor_device = final_release_stor_device(device);
 
-	DPRINT_INFO(STORVSC, "storage device (%p) safe to remove", storDevice);
+	DPRINT_INFO(STORVSC, "storage device (%p) safe to remove", stor_device);
 
 	/* Close the channel */
-	vmbus_close(Device->channel);
+	vmbus_close(device->channel);
 
-	FreeStorDevice(storDevice);
+	free_stor_device(stor_device);
 	return 0;
 }
 
-int StorVscOnHostReset(struct hv_device *Device)
+int stor_vsc_on_host_reset(struct hv_device *device)
 {
-	struct storvsc_device *storDevice;
+	struct storvsc_device *stor_device;
 	struct storvsc_request_extension *request;
-	struct vstor_packet *vstorPacket;
+	struct vstor_packet *vstor_packet;
 	int ret;
 
 	DPRINT_INFO(STORVSC, "resetting host adapter...");
 
-	storDevice = GetStorDevice(Device);
-	if (!storDevice) {
+	stor_device = get_stor_device(device);
+	if (!stor_device) {
 		DPRINT_ERR(STORVSC, "unable to get stor device..."
 			   "device being destroyed?");
 		return -1;
 	}
 
-	request = &storDevice->ResetRequest;
-	vstorPacket = &request->VStorPacket;
+	request = &stor_device->reset_request;
+	vstor_packet = &request->vstor_packet;
 
-	request->WaitEvent = osd_WaitEventCreate();
-	if (!request->WaitEvent) {
+	request->wait_event = osd_waitevent_create();
+	if (!request->wait_event) {
 		ret = -ENOMEM;
 		goto Cleanup;
 	}
 
-	vstorPacket->Operation = VStorOperationResetBus;
-	vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
-	vstorPacket->VmSrb.PathId = storDevice->PathId;
+	vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
+	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+	vstor_packet->vm_srb.path_id = stor_device->path_id;
 
-	ret = vmbus_sendpacket(Device->channel, vstorPacket,
+	ret = vmbus_sendpacket(device->channel, vstor_packet,
 			       sizeof(struct vstor_packet),
-			       (unsigned long)&storDevice->ResetRequest,
+			       (unsigned long)&stor_device->reset_request,
 			       VmbusPacketTypeDataInBand,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 	if (ret != 0) {
 		DPRINT_ERR(STORVSC, "Unable to send reset packet %p ret %d",
-			   vstorPacket, ret);
+			   vstor_packet, ret);
 		goto Cleanup;
 	}
 
 	/* FIXME: Add a timeout */
-	osd_WaitEventWait(request->WaitEvent);
+	osd_waitevent_wait(request->wait_event);
 
-	kfree(request->WaitEvent);
+	kfree(request->wait_event);
 	DPRINT_INFO(STORVSC, "host adapter reset completed");
 
 	/*
@@ -655,118 +662,118 @@
 	 */
 
 Cleanup:
-	PutStorDevice(Device);
+	put_stor_device(device);
 	return ret;
 }
 
 /*
- * StorVscOnIORequest - Callback to initiate an I/O request
+ * stor_vsc_on_io_request - Callback to initiate an I/O request
  */
-static int StorVscOnIORequest(struct hv_device *Device,
-			      struct hv_storvsc_request *Request)
+static int stor_vsc_on_io_request(struct hv_device *device,
+			      struct hv_storvsc_request *request)
 {
-	struct storvsc_device *storDevice;
-	struct storvsc_request_extension *requestExtension;
-	struct vstor_packet *vstorPacket;
+	struct storvsc_device *stor_device;
+	struct storvsc_request_extension *request_extension;
+	struct vstor_packet *vstor_packet;
 	int ret = 0;
 
-	requestExtension =
-		(struct storvsc_request_extension *)Request->Extension;
-	vstorPacket = &requestExtension->VStorPacket;
-	storDevice = GetStorDevice(Device);
+	request_extension =
+		(struct storvsc_request_extension *)request->extension;
+	vstor_packet = &request_extension->vstor_packet;
+	stor_device = get_stor_device(device);
 
 	DPRINT_DBG(STORVSC, "enter - Device %p, DeviceExt %p, Request %p, "
-		   "Extension %p", Device, storDevice, Request,
-		   requestExtension);
+		   "Extension %p", device, stor_device, request,
+		   request_extension);
 
 	DPRINT_DBG(STORVSC, "req %p len %d bus %d, target %d, lun %d cdblen %d",
-		   Request, Request->DataBuffer.Length, Request->Bus,
-		   Request->TargetId, Request->LunId, Request->CdbLen);
+		   request, request->data_buffer.Length, request->bus,
+		   request->target_id, request->lun_id, request->cdb_len);
 
-	if (!storDevice) {
+	if (!stor_device) {
 		DPRINT_ERR(STORVSC, "unable to get stor device..."
 			   "device being destroyed?");
 		return -2;
 	}
 
-	/* print_hex_dump_bytes("", DUMP_PREFIX_NONE, Request->Cdb,
-	 *			Request->CdbLen); */
+	/* print_hex_dump_bytes("", DUMP_PREFIX_NONE, request->Cdb,
+	 *			request->CdbLen); */
 
-	requestExtension->Request = Request;
-	requestExtension->Device  = Device;
+	request_extension->request = request;
+	request_extension->device  = device;
 
-	memset(vstorPacket, 0 , sizeof(struct vstor_packet));
+	memset(vstor_packet, 0 , sizeof(struct vstor_packet));
 
-	vstorPacket->Flags |= REQUEST_COMPLETION_FLAG;
+	vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
 
-	vstorPacket->VmSrb.Length = sizeof(struct vmscsi_request);
+	vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
 
-	vstorPacket->VmSrb.PortNumber = Request->Host;
-	vstorPacket->VmSrb.PathId = Request->Bus;
-	vstorPacket->VmSrb.TargetId = Request->TargetId;
-	vstorPacket->VmSrb.Lun = Request->LunId;
+	vstor_packet->vm_srb.port_number = request->host;
+	vstor_packet->vm_srb.path_id = request->bus;
+	vstor_packet->vm_srb.target_id = request->target_id;
+	vstor_packet->vm_srb.lun = request->lun_id;
 
-	vstorPacket->VmSrb.SenseInfoLength = SENSE_BUFFER_SIZE;
+	vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
 
 	/* Copy over the scsi command descriptor block */
-	vstorPacket->VmSrb.CdbLength = Request->CdbLen;
-	memcpy(&vstorPacket->VmSrb.Cdb, Request->Cdb, Request->CdbLen);
+	vstor_packet->vm_srb.cdb_length = request->cdb_len;
+	memcpy(&vstor_packet->vm_srb.cdb, request->cdb, request->cdb_len);
 
-	vstorPacket->VmSrb.DataIn = Request->Type;
-	vstorPacket->VmSrb.DataTransferLength = Request->DataBuffer.Length;
+	vstor_packet->vm_srb.data_in = request->type;
+	vstor_packet->vm_srb.data_transfer_length = request->data_buffer.Length;
 
-	vstorPacket->Operation = VStorOperationExecuteSRB;
+	vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
 
 	DPRINT_DBG(STORVSC, "srb - len %d port %d, path %d, target %d, "
 		   "lun %d senselen %d cdblen %d",
-		   vstorPacket->VmSrb.Length,
-		   vstorPacket->VmSrb.PortNumber,
-		   vstorPacket->VmSrb.PathId,
-		   vstorPacket->VmSrb.TargetId,
-		   vstorPacket->VmSrb.Lun,
-		   vstorPacket->VmSrb.SenseInfoLength,
-		   vstorPacket->VmSrb.CdbLength);
+		   vstor_packet->vm_srb.length,
+		   vstor_packet->vm_srb.port_number,
+		   vstor_packet->vm_srb.path_id,
+		   vstor_packet->vm_srb.target_id,
+		   vstor_packet->vm_srb.lun,
+		   vstor_packet->vm_srb.sense_info_length,
+		   vstor_packet->vm_srb.cdb_length);
 
-	if (requestExtension->Request->DataBuffer.Length) {
-		ret = vmbus_sendpacket_multipagebuffer(Device->channel,
-				&requestExtension->Request->DataBuffer,
-				vstorPacket,
+	if (request_extension->request->data_buffer.Length) {
+		ret = vmbus_sendpacket_multipagebuffer(device->channel,
+				&request_extension->request->data_buffer,
+				vstor_packet,
 				sizeof(struct vstor_packet),
-				(unsigned long)requestExtension);
+				(unsigned long)request_extension);
 	} else {
-		ret = vmbus_sendpacket(Device->channel, vstorPacket,
+		ret = vmbus_sendpacket(device->channel, vstor_packet,
 				       sizeof(struct vstor_packet),
-				       (unsigned long)requestExtension,
+				       (unsigned long)request_extension,
 				       VmbusPacketTypeDataInBand,
 				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 	}
 
 	if (ret != 0) {
 		DPRINT_DBG(STORVSC, "Unable to send packet %p ret %d",
-			   vstorPacket, ret);
+			   vstor_packet, ret);
 	}
 
-	atomic_inc(&storDevice->NumOutstandingRequests);
+	atomic_inc(&stor_device->num_outstanding_req);
 
-	PutStorDevice(Device);
+	put_stor_device(device);
 	return ret;
 }
 
 /*
- * StorVscOnCleanup - Perform any cleanup when the driver is removed
+ * stor_vsc_on_cleanup - Perform any cleanup when the driver is removed
  */
-static void StorVscOnCleanup(struct hv_driver *Driver)
+static void stor_vsc_on_cleanup(struct hv_driver *driver)
 {
 }
 
 /*
- * StorVscInitialize - Main entry point
+ * stor_vsc_initialize - Main entry point
  */
-int StorVscInitialize(struct hv_driver *Driver)
+int stor_vsc_initialize(struct hv_driver *driver)
 {
-	struct storvsc_driver_object *storDriver;
+	struct storvsc_driver_object *stor_driver;
 
-	storDriver = (struct storvsc_driver_object *)Driver;
+	stor_driver = (struct storvsc_driver_object *)driver;
 
 	DPRINT_DBG(STORVSC, "sizeof(STORVSC_REQUEST)=%zd "
 		   "sizeof(struct storvsc_request_extension)=%zd "
@@ -778,13 +785,14 @@
 		   sizeof(struct vmscsi_request));
 
 	/* Make sure we are at least 2 pages since 1 page is used for control */
-	/* ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1)); */
+	/* ASSERT(stor_driver->RingBufferSize >= (PAGE_SIZE << 1)); */
 
-	Driver->name = gDriverName;
-	memcpy(&Driver->deviceType, &gStorVscDeviceType,
+	driver->name = g_driver_name;
+	memcpy(&driver->deviceType, &gStorVscDeviceType,
 	       sizeof(struct hv_guid));
 
-	storDriver->RequestExtSize = sizeof(struct storvsc_request_extension);
+	stor_driver->request_ext_size =
+			sizeof(struct storvsc_request_extension);
 
 	/*
 	 * Divide the ring buffer data size (which is 1 page less
@@ -792,22 +800,22 @@
 	 * the ring buffer indices) by the max request size (which is
 	 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
 	 */
-	storDriver->MaxOutstandingRequestsPerChannel =
-		((storDriver->RingBufferSize - PAGE_SIZE) /
+	stor_driver->max_outstanding_req_per_channel =
+		((stor_driver->ring_buffer_size - PAGE_SIZE) /
 		  ALIGN_UP(MAX_MULTIPAGE_BUFFER_PACKET +
 			   sizeof(struct vstor_packet) + sizeof(u64),
 			   sizeof(u64)));
 
 	DPRINT_INFO(STORVSC, "max io %u, currently %u\n",
-		    storDriver->MaxOutstandingRequestsPerChannel,
+		    stor_driver->max_outstanding_req_per_channel,
 		    STORVSC_MAX_IO_REQUESTS);
 
 	/* Setup the dispatch table */
-	storDriver->Base.OnDeviceAdd	= StorVscOnDeviceAdd;
-	storDriver->Base.OnDeviceRemove	= StorVscOnDeviceRemove;
-	storDriver->Base.OnCleanup	= StorVscOnCleanup;
+	stor_driver->base.OnDeviceAdd	= stor_vsc_on_device_add;
+	stor_driver->base.OnDeviceRemove	= stor_vsc_on_device_remove;
+	stor_driver->base.OnCleanup	= stor_vsc_on_cleanup;
 
-	storDriver->OnIORequest		= StorVscOnIORequest;
+	stor_driver->on_io_request	= stor_vsc_on_io_request;
 
 	return 0;
 }
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 8505a1c..fbf5755 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -53,58 +53,58 @@
 };
 
 struct hv_storvsc_request {
-	enum storvsc_request_type Type;
-	u32 Host;
-	u32 Bus;
-	u32 TargetId;
-	u32 LunId;
-	u8 *Cdb;
-	u32 CdbLen;
-	u32 Status;
-	u32 BytesXfer;
+	enum storvsc_request_type type;
+	u32 host;
+	u32 bus;
+	u32 target_id;
+	u32 lun_id;
+	u8 *cdb;
+	u32 cdb_len;
+	u32 status;
+	u32 bytes_xfer;
 
-	unsigned char *SenseBuffer;
-	u32 SenseBufferSize;
+	unsigned char *sense_buffer;
+	u32 sense_buffer_size;
 
-	void *Context;
+	void *context;
 
-	void (*OnIOCompletion)(struct hv_storvsc_request *Request);
+	void (*on_io_completion)(struct hv_storvsc_request *request);
 
 	/* This points to the memory after DataBuffer */
-	void *Extension;
+	void *extension;
 
-	struct hv_multipage_buffer DataBuffer;
+	struct hv_multipage_buffer data_buffer;
 };
 
 /* Represents the block vsc driver */
 struct storvsc_driver_object {
 	/* Must be the first field */
 	/* Which is a bug FIXME! */
-	struct hv_driver Base;
+	struct hv_driver base;
 
 	/* Set by caller (in bytes) */
-	u32 RingBufferSize;
+	u32 ring_buffer_size;
 
 	/* Allocate this much private extension for each I/O request */
-	u32 RequestExtSize;
+	u32 request_ext_size;
 
 	/* Maximum # of requests in flight per channel/device */
-	u32 MaxOutstandingRequestsPerChannel;
+	u32 max_outstanding_req_per_channel;
 
 	/* Specific to this driver */
-	int (*OnIORequest)(struct hv_device *Device,
-			   struct hv_storvsc_request *Request);
+	int (*on_io_request)(struct hv_device *device,
+			   struct hv_storvsc_request *request);
 };
 
 struct storvsc_device_info {
-	unsigned int PortNumber;
-	unsigned char PathId;
-	unsigned char TargetId;
+	unsigned int port_number;
+	unsigned char path_id;
+	unsigned char target_id;
 };
 
 /* Interface */
-int StorVscInitialize(struct hv_driver *driver);
-int StorVscOnHostReset(struct hv_device *Device);
-int BlkVscInitialize(struct hv_driver *driver);
+int stor_vsc_initialize(struct hv_driver *driver);
+int stor_vsc_on_host_reset(struct hv_device *device);
+int blk_vsc_initialize(struct hv_driver *driver);
 
 #endif /* _STORVSC_API_H_ */
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 6f8d67d..17f1b34 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -140,28 +140,28 @@
 	struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv.drv_obj;
 	struct driver_context *drv_ctx = &g_storvsc_drv.drv_ctx;
 
-	storvsc_drv_obj->RingBufferSize = storvsc_ringbuffer_size;
+	storvsc_drv_obj->ring_buffer_size = storvsc_ringbuffer_size;
 
 	/* Callback to client driver to complete the initialization */
-	drv_init(&storvsc_drv_obj->Base);
+	drv_init(&storvsc_drv_obj->base);
 
 	DPRINT_INFO(STORVSC_DRV,
 		    "request extension size %u, max outstanding reqs %u",
-		    storvsc_drv_obj->RequestExtSize,
-		    storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
+		    storvsc_drv_obj->request_ext_size,
+		    storvsc_drv_obj->max_outstanding_req_per_channel);
 
-	if (storvsc_drv_obj->MaxOutstandingRequestsPerChannel <
+	if (storvsc_drv_obj->max_outstanding_req_per_channel <
 	    STORVSC_MAX_IO_REQUESTS) {
 		DPRINT_ERR(STORVSC_DRV,
 			   "The number of outstanding io requests (%d) "
 			   "is larger than that supported (%d) internally.",
 			   STORVSC_MAX_IO_REQUESTS,
-			   storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
+			   storvsc_drv_obj->max_outstanding_req_per_channel);
 		return -1;
 	}
 
-	drv_ctx->driver.name = storvsc_drv_obj->Base.name;
-	memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
+	drv_ctx->driver.name = storvsc_drv_obj->base.name;
+	memcpy(&drv_ctx->class_id, &storvsc_drv_obj->base.deviceType,
 	       sizeof(struct hv_guid));
 
 	drv_ctx->probe = storvsc_probe;
@@ -206,8 +206,8 @@
 		device_unregister(current_dev);
 	}
 
-	if (storvsc_drv_obj->Base.OnCleanup)
-		storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
+	if (storvsc_drv_obj->base.OnCleanup)
+		storvsc_drv_obj->base.OnCleanup(&storvsc_drv_obj->base);
 
 	vmbus_child_driver_unregister(drv_ctx);
 	return;
@@ -231,7 +231,7 @@
 	struct host_device_context *host_device_ctx;
 	struct storvsc_device_info device_info;
 
-	if (!storvsc_drv_obj->Base.OnDeviceAdd)
+	if (!storvsc_drv_obj->base.OnDeviceAdd)
 		return -1;
 
 	host = scsi_host_alloc(&scsi_driver,
@@ -252,7 +252,7 @@
 	host_device_ctx->request_pool =
 				kmem_cache_create(dev_name(&device_ctx->device),
 					sizeof(struct storvsc_cmd_request) +
-					storvsc_drv_obj->RequestExtSize, 0,
+					storvsc_drv_obj->request_ext_size, 0,
 					SLAB_HWCACHE_ALIGN, NULL);
 
 	if (!host_device_ctx->request_pool) {
@@ -260,9 +260,9 @@
 		return -ENOMEM;
 	}
 
-	device_info.PortNumber = host->host_no;
+	device_info.port_number = host->host_no;
 	/* Call to the vsc driver to add the device */
-	ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj,
+	ret = storvsc_drv_obj->base.OnDeviceAdd(device_obj,
 						(void *)&device_info);
 	if (ret != 0) {
 		DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
@@ -272,8 +272,8 @@
 	}
 
 	/* host_device_ctx->port = device_info.PortNumber; */
-	host_device_ctx->path = device_info.PathId;
-	host_device_ctx->target = device_info.TargetId;
+	host_device_ctx->path = device_info.path_id;
+	host_device_ctx->target = device_info.target_id;
 
 	/* max # of devices per target */
 	host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
@@ -287,7 +287,7 @@
 	if (ret != 0) {
 		DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device");
 
-		storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+		storvsc_drv_obj->base.OnDeviceRemove(device_obj);
 
 		kmem_cache_destroy(host_device_ctx->request_pool);
 		scsi_host_put(host);
@@ -317,14 +317,14 @@
 			(struct host_device_context *)host->hostdata;
 
 
-	if (!storvsc_drv_obj->Base.OnDeviceRemove)
+	if (!storvsc_drv_obj->base.OnDeviceRemove)
 		return -1;
 
 	/*
 	 * Call to the vsc driver to let it know that the device is being
 	 * removed
 	 */
-	ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
+	ret = storvsc_drv_obj->base.OnDeviceRemove(device_obj);
 	if (ret != 0) {
 		/* TODO: */
 		DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)",
@@ -350,7 +350,7 @@
 static void storvsc_commmand_completion(struct hv_storvsc_request *request)
 {
 	struct storvsc_cmd_request *cmd_request =
-		(struct storvsc_cmd_request *)request->Context;
+		(struct storvsc_cmd_request *)request->context;
 	struct scsi_cmnd *scmnd = cmd_request->cmd;
 	struct host_device_context *host_device_ctx =
 		(struct host_device_context *)scmnd->device->host->hostdata;
@@ -375,16 +375,17 @@
 				      cmd_request->bounce_sgl_count);
 	}
 
-	scmnd->result = request->Status;
+	scmnd->result = request->status;
 
 	if (scmnd->result) {
 		if (scsi_normalize_sense(scmnd->sense_buffer,
-					 request->SenseBufferSize, &sense_hdr))
+				request->sense_buffer_size, &sense_hdr))
 			scsi_print_sense_hdr("storvsc", &sense_hdr);
 	}
 
-	/* ASSERT(request->BytesXfer <= request->DataBuffer.Length); */
-	scsi_set_resid(scmnd, request->DataBuffer.Length - request->BytesXfer);
+	/* ASSERT(request->BytesXfer <= request->data_buffer.Length); */
+	scsi_set_resid(scmnd,
+		request->data_buffer.Length - request->bytes_xfer);
 
 	scsi_done_fn = scmnd->scsi_done;
 
@@ -657,42 +658,42 @@
 
 	request = &cmd_request->request;
 
-	request->Extension =
+	request->extension =
 		(void *)((unsigned long)cmd_request + request_size);
 	DPRINT_DBG(STORVSC_DRV, "req %p size %d ext %d", request, request_size,
-		   storvsc_drv_obj->RequestExtSize);
+		   storvsc_drv_obj->request_ext_size);
 
 	/* Build the SRB */
 	switch (scmnd->sc_data_direction) {
 	case DMA_TO_DEVICE:
-		request->Type = WRITE_TYPE;
+		request->type = WRITE_TYPE;
 		break;
 	case DMA_FROM_DEVICE:
-		request->Type = READ_TYPE;
+		request->type = READ_TYPE;
 		break;
 	default:
-		request->Type = UNKNOWN_TYPE;
+		request->type = UNKNOWN_TYPE;
 		break;
 	}
 
-	request->OnIOCompletion = storvsc_commmand_completion;
-	request->Context = cmd_request;/* scmnd; */
+	request->on_io_completion = storvsc_commmand_completion;
+	request->context = cmd_request;/* scmnd; */
 
 	/* request->PortId = scmnd->device->channel; */
-	request->Host = host_device_ctx->port;
-	request->Bus = scmnd->device->channel;
-	request->TargetId = scmnd->device->id;
-	request->LunId = scmnd->device->lun;
+	request->host = host_device_ctx->port;
+	request->bus = scmnd->device->channel;
+	request->target_id = scmnd->device->id;
+	request->lun_id = scmnd->device->lun;
 
 	/* ASSERT(scmnd->cmd_len <= 16); */
-	request->CdbLen = scmnd->cmd_len;
-	request->Cdb = scmnd->cmnd;
+	request->cdb_len = scmnd->cmd_len;
+	request->cdb = scmnd->cmnd;
 
-	request->SenseBuffer = scmnd->sense_buffer;
-	request->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
+	request->sense_buffer = scmnd->sense_buffer;
+	request->sense_buffer_size = SCSI_SENSE_BUFFERSIZE;
 
 
-	request->DataBuffer.Length = scsi_bufflen(scmnd);
+	request->data_buffer.Length = scsi_bufflen(scmnd);
 	if (scsi_sg_count(scmnd)) {
 		sgl = (struct scatterlist *)scsi_sglist(scmnd);
 		sg_count = scsi_sg_count(scmnd);
@@ -733,25 +734,25 @@
 			sg_count = cmd_request->bounce_sgl_count;
 		}
 
-		request->DataBuffer.Offset = sgl[0].offset;
+		request->data_buffer.Offset = sgl[0].offset;
 
 		for (i = 0; i < sg_count; i++) {
 			DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
 				   i, sgl[i].length, sgl[i].offset);
-			request->DataBuffer.PfnArray[i] =
+			request->data_buffer.PfnArray[i] =
 				page_to_pfn(sg_page((&sgl[i])));
 		}
 	} else if (scsi_sglist(scmnd)) {
 		/* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
-		request->DataBuffer.Offset =
+		request->data_buffer.Offset =
 			virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
-		request->DataBuffer.PfnArray[0] =
+		request->data_buffer.PfnArray[0] =
 			virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
 	}
 
 retry_request:
 	/* Invokes the vsc to start an IO */
-	ret = storvsc_drv_obj->OnIORequest(&device_ctx->device_obj,
+	ret = storvsc_drv_obj->on_io_request(&device_ctx->device_obj,
 					   &cmd_request->request);
 	if (ret == -1) {
 		/* no more space */
@@ -844,7 +845,7 @@
 		    scmnd->device, &device_ctx->device_obj);
 
 	/* Invokes the vsc to reset the host/bus */
-	ret = StorVscOnHostReset(&device_ctx->device_obj);
+	ret = stor_vsc_on_host_reset(&device_ctx->device_obj);
 	if (ret != 0)
 		return ret;
 
@@ -939,7 +940,7 @@
 	int ret;
 
 	DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
-	ret = storvsc_drv_init(StorVscInitialize);
+	ret = storvsc_drv_init(stor_vsc_initialize);
 	return ret;
 }
 
diff --git a/drivers/staging/hv/vmbus.c b/drivers/staging/hv/vmbus.c
deleted file mode 100644
index d449daf..0000000
--- a/drivers/staging/hv/vmbus.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (c) 2009, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Authors:
- *   Haiyang Zhang <haiyangz@microsoft.com>
- *   Hank Janssen  <hjanssen@microsoft.com>
- *
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include "osd.h"
-#include "logging.h"
-#include "version_info.h"
-#include "vmbus_private.h"
-
-static const char *gDriverName = "vmbus";
-
-/*
- * Windows vmbus does not defined this.
- * We defined this to be consistent with other devices
- */
-/* {c5295816-f63a-4d5f-8d1a-4daf999ca185} */
-static const struct hv_guid gVmbusDeviceType = {
-	.data = {
-		0x16, 0x58, 0x29, 0xc5, 0x3a, 0xf6, 0x5f, 0x4d,
-		0x8d, 0x1a, 0x4d, 0xaf, 0x99, 0x9c, 0xa1, 0x85
-	}
-};
-
-/* {ac3760fc-9adf-40aa-9427-a70ed6de95c5} */
-static const struct hv_guid gVmbusDeviceId = {
-	.data = {
-		0xfc, 0x60, 0x37, 0xac, 0xdf, 0x9a, 0xaa, 0x40,
-		0x94, 0x27, 0xa7, 0x0e, 0xd6, 0xde, 0x95, 0xc5
-	}
-};
-
-static struct hv_driver *gDriver; /* vmbus driver object */
-static struct hv_device *gDevice; /* vmbus root device */
-
-/*
- * VmbusGetChannelOffers - Retrieve the channel offers from the parent partition
- */
-static void VmbusGetChannelOffers(void)
-{
-	vmbus_request_offers();
-}
-
-/*
- * VmbusCreateChildDevice - Creates the child device on the bus that represents the channel offer
- */
-struct hv_device *VmbusChildDeviceCreate(struct hv_guid *DeviceType,
-					 struct hv_guid *DeviceInstance,
-					 struct vmbus_channel *channel)
-{
-	struct vmbus_driver *vmbusDriver = (struct vmbus_driver *)gDriver;
-
-	return vmbusDriver->OnChildDeviceCreate(DeviceType, DeviceInstance,
-						channel);
-}
-
-/*
- * VmbusChildDeviceAdd - Registers the child device with the vmbus
- */
-int VmbusChildDeviceAdd(struct hv_device *ChildDevice)
-{
-	struct vmbus_driver *vmbusDriver = (struct vmbus_driver *)gDriver;
-
-	return vmbusDriver->OnChildDeviceAdd(gDevice, ChildDevice);
-}
-
-/*
- * VmbusChildDeviceRemove Unregisters the child device from the vmbus
- */
-void VmbusChildDeviceRemove(struct hv_device *ChildDevice)
-{
-	struct vmbus_driver *vmbusDriver = (struct vmbus_driver *)gDriver;
-
-	vmbusDriver->OnChildDeviceRemove(ChildDevice);
-}
-
-/*
- * VmbusOnDeviceAdd - Callback when the root bus device is added
- */
-static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
-{
-	u32 *irqvector = AdditionalInfo;
-	int ret;
-
-	gDevice = dev;
-
-	memcpy(&gDevice->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
-	memcpy(&gDevice->deviceInstance, &gVmbusDeviceId,
-	       sizeof(struct hv_guid));
-
-	/* strcpy(dev->name, "vmbus"); */
-	/* SynIC setup... */
-	on_each_cpu(HvSynicInit, (void *)irqvector, 1);
-
-	/* Connect to VMBus in the root partition */
-	ret = VmbusConnect();
-
-	/* VmbusSendEvent(device->localPortId+1); */
-	return ret;
-}
-
-/*
- * VmbusOnDeviceRemove - Callback when the root bus device is removed
- */
-static int VmbusOnDeviceRemove(struct hv_device *dev)
-{
-	int ret = 0;
-
-	vmbus_release_unattached_channels();
-	VmbusDisconnect();
-	on_each_cpu(HvSynicCleanup, NULL, 1);
-	return ret;
-}
-
-/*
- * VmbusOnCleanup - Perform any cleanup when the driver is removed
- */
-static void VmbusOnCleanup(struct hv_driver *drv)
-{
-	/* struct vmbus_driver *driver = (struct vmbus_driver *)drv; */
-
-	HvCleanup();
-}
-
-/*
- * VmbusOnMsgDPC - DPC routine to handle messages from the hypervisior
- */
-static void VmbusOnMsgDPC(struct hv_driver *drv)
-{
-	int cpu = smp_processor_id();
-	void *page_addr = gHvContext.synICMessagePage[cpu];
-	struct hv_message *msg = (struct hv_message *)page_addr +
-				  VMBUS_MESSAGE_SINT;
-	struct hv_message *copied;
-
-	while (1) {
-		if (msg->Header.MessageType == HvMessageTypeNone) {
-			/* no msg */
-			break;
-		} else {
-			copied = kmemdup(msg, sizeof(*copied), GFP_ATOMIC);
-			if (copied == NULL)
-				continue;
-
-			osd_schedule_callback(gVmbusConnection.WorkQueue,
-					      vmbus_onmessage,
-					      (void *)copied);
-		}
-
-		msg->Header.MessageType = HvMessageTypeNone;
-
-		/*
-		 * Make sure the write to MessageType (ie set to
-		 * HvMessageTypeNone) happens before we read the
-		 * MessagePending and EOMing. Otherwise, the EOMing
-		 * will not deliver any more messages since there is
-		 * no empty slot
-		 */
-		mb();
-
-		if (msg->Header.MessageFlags.MessagePending) {
-			/*
-			 * This will cause message queue rescan to
-			 * possibly deliver another msg from the
-			 * hypervisor
-			 */
-			wrmsrl(HV_X64_MSR_EOM, 0);
-		}
-	}
-}
-
-/*
- * VmbusOnEventDPC - DPC routine to handle events from the hypervisior
- */
-static void VmbusOnEventDPC(struct hv_driver *drv)
-{
-	/* TODO: Process any events */
-	VmbusOnEvents();
-}
-
-/*
- * VmbusOnISR - ISR routine
- */
-static int VmbusOnISR(struct hv_driver *drv)
-{
-	int ret = 0;
-	int cpu = smp_processor_id();
-	void *page_addr;
-	struct hv_message *msg;
-	union hv_synic_event_flags *event;
-
-	page_addr = gHvContext.synICMessagePage[cpu];
-	msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
-
-	/* Check if there are actual msgs to be process */
-	if (msg->Header.MessageType != HvMessageTypeNone) {
-		DPRINT_DBG(VMBUS, "received msg type %d size %d",
-				msg->Header.MessageType,
-				msg->Header.PayloadSize);
-		ret |= 0x1;
-	}
-
-	/* TODO: Check if there are events to be process */
-	page_addr = gHvContext.synICEventPage[cpu];
-	event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
-
-	/* Since we are a child, we only need to check bit 0 */
-	if (test_and_clear_bit(0, (unsigned long *) &event->Flags32[0])) {
-		DPRINT_DBG(VMBUS, "received event %d", event->Flags32[0]);
-		ret |= 0x2;
-	}
-
-	return ret;
-}
-
-/*
- * VmbusInitialize - Main entry point
- */
-int VmbusInitialize(struct hv_driver *drv)
-{
-	struct vmbus_driver *driver = (struct vmbus_driver *)drv;
-	int ret;
-
-	DPRINT_INFO(VMBUS, "+++++++ HV Driver version = %s +++++++",
-		    HV_DRV_VERSION);
-	DPRINT_INFO(VMBUS, "+++++++ Vmbus supported version = %d +++++++",
-			VMBUS_REVISION_NUMBER);
-	DPRINT_INFO(VMBUS, "+++++++ Vmbus using SINT %d +++++++",
-			VMBUS_MESSAGE_SINT);
-	DPRINT_DBG(VMBUS, "sizeof(vmbus_channel_packet_page_buffer)=%zd, "
-			"sizeof(VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER)=%zd",
-			sizeof(struct vmbus_channel_packet_page_buffer),
-			sizeof(struct vmbus_channel_packet_multipage_buffer));
-
-	drv->name = gDriverName;
-	memcpy(&drv->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
-
-	/* Setup dispatch table */
-	driver->Base.OnDeviceAdd	= VmbusOnDeviceAdd;
-	driver->Base.OnDeviceRemove	= VmbusOnDeviceRemove;
-	driver->Base.OnCleanup		= VmbusOnCleanup;
-	driver->OnIsr			= VmbusOnISR;
-	driver->OnMsgDpc		= VmbusOnMsgDPC;
-	driver->OnEventDpc		= VmbusOnEventDPC;
-	driver->GetChannelOffers	= VmbusGetChannelOffers;
-
-	/* Hypervisor initialization...setup hypercall page..etc */
-	ret = HvInit();
-	if (ret != 0)
-		DPRINT_ERR(VMBUS, "Unable to initialize the hypervisor - 0x%x",
-				ret);
-	gDriver = drv;
-
-	return ret;
-}
diff --git a/drivers/staging/hv/vmbus_api.h b/drivers/staging/hv/vmbus_api.h
index 2af42e5..2da3f52 100644
--- a/drivers/staging/hv/vmbus_api.h
+++ b/drivers/staging/hv/vmbus_api.h
@@ -115,28 +115,4 @@
 	void *Extension;
 };
 
-/* Vmbus driver object */
-struct vmbus_driver {
-	/* !! Must be the 1st field !! */
-	/* FIXME if ^, then someone is doing somthing stupid */
-	struct hv_driver Base;
-
-	/* Set by the caller */
-	struct hv_device * (*OnChildDeviceCreate)(struct hv_guid *DeviceType,
-						struct hv_guid *DeviceInstance,
-						struct vmbus_channel *channel);
-	void (*OnChildDeviceDestroy)(struct hv_device *device);
-	int (*OnChildDeviceAdd)(struct hv_device *RootDevice,
-				struct hv_device *ChildDevice);
-	void (*OnChildDeviceRemove)(struct hv_device *device);
-
-	/* Set by the callee */
-	int (*OnIsr)(struct hv_driver *driver);
-	void (*OnMsgDpc)(struct hv_driver *driver);
-	void (*OnEventDpc)(struct hv_driver *driver);
-	void (*GetChannelOffers)(void);
-};
-
-int VmbusInitialize(struct hv_driver *drv);
-
 #endif /* _VMBUS_API_H_ */
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
index 0d9f3a4..84fdb64 100644
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
 #include "logging.h"
 #include "vmbus.h"
 #include "channel.h"
+#include "vmbus_private.h"
 
 
 /* FIXME! We need to do this dynamically for PIC and APIC system */
@@ -46,7 +47,7 @@
 	/* The driver field is not used in here. Instead, the bus field is */
 	/* used to represent the driver */
 	struct driver_context drv_ctx;
-	struct vmbus_driver drv_obj;
+	struct hv_driver drv_obj;
 
 	struct bus_type bus;
 	struct tasklet_struct msg_dpc;
@@ -69,13 +70,6 @@
 static void vmbus_device_release(struct device *device);
 static void vmbus_bus_release(struct device *device);
 
-static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
-						   struct hv_guid *instance,
-						   struct vmbus_channel *channel);
-static void vmbus_child_device_destroy(struct hv_device *device_obj);
-static int vmbus_child_device_register(struct hv_device *root_device_obj,
-				       struct hv_device *child_device_obj);
-static void vmbus_child_device_unregister(struct hv_device *child_device_obj);
 static ssize_t vmbus_show_device_attr(struct device *dev,
 				      struct device_attribute *dev_attr,
 				      char *buf);
@@ -129,6 +123,182 @@
 	.bus.dev_attrs =	vmbus_device_attrs,
 };
 
+static const char *gDriverName = "hyperv";
+
+/*
+ * Windows vmbus does not defined this.
+ * We defined this to be consistent with other devices
+ */
+/* {c5295816-f63a-4d5f-8d1a-4daf999ca185} */
+static const struct hv_guid gVmbusDeviceType = {
+	.data = {
+		0x16, 0x58, 0x29, 0xc5, 0x3a, 0xf6, 0x5f, 0x4d,
+		0x8d, 0x1a, 0x4d, 0xaf, 0x99, 0x9c, 0xa1, 0x85
+	}
+};
+
+/* {ac3760fc-9adf-40aa-9427-a70ed6de95c5} */
+static const struct hv_guid gVmbusDeviceId = {
+	.data = {
+		0xfc, 0x60, 0x37, 0xac, 0xdf, 0x9a, 0xaa, 0x40,
+		0x94, 0x27, 0xa7, 0x0e, 0xd6, 0xde, 0x95, 0xc5
+	}
+};
+
+static struct hv_device *gDevice; /* vmbus root device */
+
+/*
+ * VmbusChildDeviceAdd - Registers the child device with the vmbus
+ */
+int VmbusChildDeviceAdd(struct hv_device *ChildDevice)
+{
+	return vmbus_child_device_register(gDevice, ChildDevice);
+}
+
+/*
+ * VmbusOnDeviceAdd - Callback when the root bus device is added
+ */
+static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
+{
+	u32 *irqvector = AdditionalInfo;
+	int ret;
+
+	gDevice = dev;
+
+	memcpy(&gDevice->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
+	memcpy(&gDevice->deviceInstance, &gVmbusDeviceId,
+	       sizeof(struct hv_guid));
+
+	/* strcpy(dev->name, "vmbus"); */
+	/* SynIC setup... */
+	on_each_cpu(hv_synic_init, (void *)irqvector, 1);
+
+	/* Connect to VMBus in the root partition */
+	ret = VmbusConnect();
+
+	/* VmbusSendEvent(device->localPortId+1); */
+	return ret;
+}
+
+/*
+ * VmbusOnDeviceRemove - Callback when the root bus device is removed
+ */
+static int VmbusOnDeviceRemove(struct hv_device *dev)
+{
+	int ret = 0;
+
+	vmbus_release_unattached_channels();
+	VmbusDisconnect();
+	on_each_cpu(hv_synic_cleanup, NULL, 1);
+	return ret;
+}
+
+/*
+ * VmbusOnCleanup - Perform any cleanup when the driver is removed
+ */
+static void VmbusOnCleanup(struct hv_driver *drv)
+{
+	/* struct vmbus_driver *driver = (struct vmbus_driver *)drv; */
+
+	hv_cleanup();
+}
+
+struct onmessage_work_context {
+	struct work_struct work;
+	struct hv_message msg;
+};
+
+static void vmbus_onmessage_work(struct work_struct *work)
+{
+	struct onmessage_work_context *ctx;
+
+	ctx = container_of(work, struct onmessage_work_context,
+			   work);
+	vmbus_onmessage(&ctx->msg);
+	kfree(ctx);
+}
+
+/*
+ * vmbus_on_msg_dpc - DPC routine to handle messages from the hypervisior
+ */
+static void vmbus_on_msg_dpc(struct hv_driver *drv)
+{
+	int cpu = smp_processor_id();
+	void *page_addr = hv_context.synic_message_page[cpu];
+	struct hv_message *msg = (struct hv_message *)page_addr +
+				  VMBUS_MESSAGE_SINT;
+	struct onmessage_work_context *ctx;
+
+	while (1) {
+		if (msg->header.message_type == HVMSG_NONE) {
+			/* no msg */
+			break;
+		} else {
+			ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+			if (ctx == NULL)
+				continue;
+			INIT_WORK(&ctx->work, vmbus_onmessage_work);
+			memcpy(&ctx->msg, msg, sizeof(*msg));
+			queue_work(gVmbusConnection.WorkQueue, &ctx->work);
+		}
+
+		msg->header.message_type = HVMSG_NONE;
+
+		/*
+		 * Make sure the write to MessageType (ie set to
+		 * HVMSG_NONE) happens before we read the
+		 * MessagePending and EOMing. Otherwise, the EOMing
+		 * will not deliver any more messages since there is
+		 * no empty slot
+		 */
+		mb();
+
+		if (msg->header.message_flags.msg_pending) {
+			/*
+			 * This will cause message queue rescan to
+			 * possibly deliver another msg from the
+			 * hypervisor
+			 */
+			wrmsrl(HV_X64_MSR_EOM, 0);
+		}
+	}
+}
+
+/*
+ * vmbus_on_isr - ISR routine
+ */
+static int vmbus_on_isr(struct hv_driver *drv)
+{
+	int ret = 0;
+	int cpu = smp_processor_id();
+	void *page_addr;
+	struct hv_message *msg;
+	union hv_synic_event_flags *event;
+
+	page_addr = hv_context.synic_message_page[cpu];
+	msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+
+	/* Check if there are actual msgs to be process */
+	if (msg->header.message_type != HVMSG_NONE) {
+		DPRINT_DBG(VMBUS, "received msg type %d size %d",
+				msg->header.message_type,
+				msg->header.payload_size);
+		ret |= 0x1;
+	}
+
+	/* TODO: Check if there are events to be process */
+	page_addr = hv_context.synic_event_page[cpu];
+	event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+
+	/* Since we are a child, we only need to check bit 0 */
+	if (test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+		DPRINT_DBG(VMBUS, "received event %d", event->flags32[0]);
+		ret |= 0x2;
+	}
+
+	return ret;
+}
+
 static void get_channel_info(struct hv_device *device,
 			     struct hv_device_info *info)
 {
@@ -139,35 +309,38 @@
 
 	vmbus_get_debug_info(device->channel, &debug_info);
 
-	info->ChannelId = debug_info.RelId;
-	info->ChannelState = debug_info.State;
-	memcpy(&info->ChannelType, &debug_info.InterfaceType,
+	info->ChannelId = debug_info.relid;
+	info->ChannelState = debug_info.state;
+	memcpy(&info->ChannelType, &debug_info.interfacetype,
 	       sizeof(struct hv_guid));
-	memcpy(&info->ChannelInstance, &debug_info.InterfaceInstance,
+	memcpy(&info->ChannelInstance, &debug_info.interface_instance,
 	       sizeof(struct hv_guid));
 
-	info->MonitorId = debug_info.MonitorId;
+	info->MonitorId = debug_info.monitorid;
 
-	info->ServerMonitorPending = debug_info.ServerMonitorPending;
-	info->ServerMonitorLatency = debug_info.ServerMonitorLatency;
-	info->ServerMonitorConnectionId = debug_info.ServerMonitorConnectionId;
+	info->ServerMonitorPending = debug_info.servermonitor_pending;
+	info->ServerMonitorLatency = debug_info.servermonitor_latency;
+	info->ServerMonitorConnectionId = debug_info.servermonitor_connectionid;
 
-	info->ClientMonitorPending = debug_info.ClientMonitorPending;
-	info->ClientMonitorLatency = debug_info.ClientMonitorLatency;
-	info->ClientMonitorConnectionId = debug_info.ClientMonitorConnectionId;
+	info->ClientMonitorPending = debug_info.clientmonitor_pending;
+	info->ClientMonitorLatency = debug_info.clientmonitor_latency;
+	info->ClientMonitorConnectionId = debug_info.clientmonitor_connectionid;
 
-	info->Inbound.InterruptMask = debug_info.Inbound.CurrentInterruptMask;
-	info->Inbound.ReadIndex = debug_info.Inbound.CurrentReadIndex;
-	info->Inbound.WriteIndex = debug_info.Inbound.CurrentWriteIndex;
-	info->Inbound.BytesAvailToRead = debug_info.Inbound.BytesAvailToRead;
-	info->Inbound.BytesAvailToWrite = debug_info.Inbound.BytesAvailToWrite;
+	info->Inbound.InterruptMask = debug_info.inbound.current_interrupt_mask;
+	info->Inbound.ReadIndex = debug_info.inbound.current_read_index;
+	info->Inbound.WriteIndex = debug_info.inbound.current_write_index;
+	info->Inbound.BytesAvailToRead = debug_info.inbound.bytes_avail_toread;
+	info->Inbound.BytesAvailToWrite =
+		debug_info.inbound.bytes_avail_towrite;
 
-	info->Outbound.InterruptMask = debug_info.Outbound.CurrentInterruptMask;
-	info->Outbound.ReadIndex = debug_info.Outbound.CurrentReadIndex;
-	info->Outbound.WriteIndex = debug_info.Outbound.CurrentWriteIndex;
-	info->Outbound.BytesAvailToRead = debug_info.Outbound.BytesAvailToRead;
+	info->Outbound.InterruptMask =
+		debug_info.outbound.current_interrupt_mask;
+	info->Outbound.ReadIndex = debug_info.outbound.current_read_index;
+	info->Outbound.WriteIndex = debug_info.outbound.current_write_index;
+	info->Outbound.BytesAvailToRead =
+		debug_info.outbound.bytes_avail_toread;
 	info->Outbound.BytesAvailToWrite =
-		debug_info.Outbound.BytesAvailToWrite;
+		debug_info.outbound.bytes_avail_towrite;
 }
 
 /*
@@ -286,44 +459,55 @@
  *	- setup the vmbus root device
  *	- retrieve the channel offers
  */
-static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
+static int vmbus_bus_init(void)
 {
 	struct vmbus_driver_context *vmbus_drv_ctx = &g_vmbus_drv;
-	struct vmbus_driver *vmbus_drv_obj = &g_vmbus_drv.drv_obj;
+	struct hv_driver *driver = &g_vmbus_drv.drv_obj;
 	struct vm_device *dev_ctx = &g_vmbus_drv.device_ctx;
 	int ret;
 	unsigned int vector;
 
-	/*
-	 * Set this up to allow lower layer to callback to add/remove child
-	 * devices on the bus
-	 */
-	vmbus_drv_obj->OnChildDeviceCreate = vmbus_child_device_create;
-	vmbus_drv_obj->OnChildDeviceDestroy = vmbus_child_device_destroy;
-	vmbus_drv_obj->OnChildDeviceAdd = vmbus_child_device_register;
-	vmbus_drv_obj->OnChildDeviceRemove = vmbus_child_device_unregister;
+	DPRINT_INFO(VMBUS, "+++++++ HV Driver version = %s +++++++",
+		    HV_DRV_VERSION);
+	DPRINT_INFO(VMBUS, "+++++++ Vmbus supported version = %d +++++++",
+			VMBUS_REVISION_NUMBER);
+	DPRINT_INFO(VMBUS, "+++++++ Vmbus using SINT %d +++++++",
+			VMBUS_MESSAGE_SINT);
+	DPRINT_DBG(VMBUS, "sizeof(vmbus_channel_packet_page_buffer)=%zd, "
+			"sizeof(VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER)=%zd",
+			sizeof(struct vmbus_channel_packet_page_buffer),
+			sizeof(struct vmbus_channel_packet_multipage_buffer));
 
-	/* Call to bus driver to initialize */
-	ret = drv_init(&vmbus_drv_obj->Base);
+	driver->name = gDriverName;
+	memcpy(&driver->deviceType, &gVmbusDeviceType, sizeof(struct hv_guid));
+
+	/* Setup dispatch table */
+	driver->OnDeviceAdd	= VmbusOnDeviceAdd;
+	driver->OnDeviceRemove	= VmbusOnDeviceRemove;
+	driver->OnCleanup	= VmbusOnCleanup;
+
+	/* Hypervisor initialization...setup hypercall page..etc */
+	ret = hv_init();
 	if (ret != 0) {
-		DPRINT_ERR(VMBUS_DRV, "Unable to initialize vmbus (%d)", ret);
+		DPRINT_ERR(VMBUS, "Unable to initialize the hypervisor - 0x%x",
+				ret);
 		goto cleanup;
 	}
 
 	/* Sanity checks */
-	if (!vmbus_drv_obj->Base.OnDeviceAdd) {
+	if (!driver->OnDeviceAdd) {
 		DPRINT_ERR(VMBUS_DRV, "OnDeviceAdd() routine not set");
 		ret = -1;
 		goto cleanup;
 	}
 
-	vmbus_drv_ctx->bus.name = vmbus_drv_obj->Base.name;
+	vmbus_drv_ctx->bus.name = driver->name;
 
 	/* Initialize the bus context */
 	tasklet_init(&vmbus_drv_ctx->msg_dpc, vmbus_msg_dpc,
-		     (unsigned long)vmbus_drv_obj);
+		     (unsigned long)driver);
 	tasklet_init(&vmbus_drv_ctx->event_dpc, vmbus_event_dpc,
-		     (unsigned long)vmbus_drv_obj);
+		     (unsigned long)driver);
 
 	/* Now, register the bus driver with LDM */
 	ret = bus_register(&vmbus_drv_ctx->bus);
@@ -334,7 +518,7 @@
 
 	/* Get the interrupt resource */
 	ret = request_irq(vmbus_irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
-			  vmbus_drv_obj->Base.name, NULL);
+			  driver->name, NULL);
 
 	if (ret != 0) {
 		DPRINT_ERR(VMBUS_DRV, "ERROR - Unable to request IRQ %d",
@@ -352,7 +536,7 @@
 	/* Call to bus driver to add the root device */
 	memset(dev_ctx, 0, sizeof(struct vm_device));
 
-	ret = vmbus_drv_obj->Base.OnDeviceAdd(&dev_ctx->device_obj, &vector);
+	ret = driver->OnDeviceAdd(&dev_ctx->device_obj, &vector);
 	if (ret != 0) {
 		DPRINT_ERR(VMBUS_DRV,
 			   "ERROR - Unable to add vmbus root device");
@@ -392,9 +576,7 @@
 		goto cleanup;
 	}
 
-
-	vmbus_drv_obj->GetChannelOffers();
-
+	vmbus_request_offers();
 	wait_for_completion(&hv_channel_ready);
 
 cleanup:
@@ -408,17 +590,17 @@
  */
 static void vmbus_bus_exit(void)
 {
-	struct vmbus_driver *vmbus_drv_obj = &g_vmbus_drv.drv_obj;
+	struct hv_driver *driver = &g_vmbus_drv.drv_obj;
 	struct vmbus_driver_context *vmbus_drv_ctx = &g_vmbus_drv;
 
 	struct vm_device *dev_ctx = &g_vmbus_drv.device_ctx;
 
 	/* Remove the root device */
-	if (vmbus_drv_obj->Base.OnDeviceRemove)
-		vmbus_drv_obj->Base.OnDeviceRemove(&dev_ctx->device_obj);
+	if (driver->OnDeviceRemove)
+		driver->OnDeviceRemove(&dev_ctx->device_obj);
 
-	if (vmbus_drv_obj->Base.OnCleanup)
-		vmbus_drv_obj->Base.OnCleanup(&vmbus_drv_obj->Base);
+	if (driver->OnCleanup)
+		driver->OnCleanup(driver);
 
 	/* Unregister the root bus device */
 	device_unregister(&dev_ctx->device);
@@ -446,7 +628,6 @@
  */
 int vmbus_child_driver_register(struct driver_context *driver_ctx)
 {
-	struct vmbus_driver *vmbus_drv_obj = &g_vmbus_drv.drv_obj;
 	int ret;
 
 	DPRINT_INFO(VMBUS_DRV, "child driver (%p) registering - name %s",
@@ -457,7 +638,7 @@
 
 	ret = driver_register(&driver_ctx->driver);
 
-	vmbus_drv_obj->GetChannelOffers();
+	vmbus_request_offers();
 
 	return ret;
 }
@@ -489,9 +670,9 @@
  * vmbus_child_device_create - Creates and registers a new child device
  * on the vmbus.
  */
-static struct hv_device *vmbus_child_device_create(struct hv_guid *type,
-						   struct hv_guid *instance,
-						   struct vmbus_channel *channel)
+struct hv_device *vmbus_child_device_create(struct hv_guid *type,
+					    struct hv_guid *instance,
+					    struct vmbus_channel *channel)
 {
 	struct vm_device *child_device_ctx;
 	struct hv_device *child_device_obj;
@@ -538,8 +719,8 @@
 /*
  * vmbus_child_device_register - Register the child device on the specified bus
  */
-static int vmbus_child_device_register(struct hv_device *root_device_obj,
-				       struct hv_device *child_device_obj)
+int vmbus_child_device_register(struct hv_device *root_device_obj,
+				struct hv_device *child_device_obj)
 {
 	int ret = 0;
 	struct vm_device *root_device_ctx =
@@ -583,7 +764,7 @@
  * vmbus_child_device_unregister - Remove the specified child device
  * from the vmbus.
  */
-static void vmbus_child_device_unregister(struct hv_device *device_obj)
+void vmbus_child_device_unregister(struct hv_device *device_obj)
 {
 	struct vm_device *device_ctx = to_vm_device(device_obj);
 
@@ -601,13 +782,6 @@
 }
 
 /*
- * vmbus_child_device_destroy - Destroy the specified child device on the vmbus.
- */
-static void vmbus_child_device_destroy(struct hv_device *device_obj)
-{
-}
-
-/*
  * vmbus_uevent - add uevent for our device
  *
  * This routine is invoked when a device is added or removed on the vmbus to
@@ -701,7 +875,7 @@
 		struct vmbus_driver_context *vmbus_drv_ctx =
 			(struct vmbus_driver_context *)driver_ctx;
 
-		device_ctx->device_obj.Driver = &vmbus_drv_ctx->drv_obj.Base;
+		device_ctx->device_obj.Driver = &vmbus_drv_ctx->drv_obj;
 		DPRINT_INFO(VMBUS_DRV,
 			    "device object (%p) set to driver object (%p)",
 			    &device_ctx->device_obj,
@@ -849,7 +1023,6 @@
 {
 	struct vm_device *device_ctx = device_to_vm_device(device);
 
-	/* vmbus_child_device_destroy(&device_ctx->device_obj); */
 	kfree(device_ctx);
 
 	/* !!DO NOT REFERENCE device_ctx anymore at this point!! */
@@ -860,36 +1033,28 @@
  */
 static void vmbus_msg_dpc(unsigned long data)
 {
-	struct vmbus_driver *vmbus_drv_obj = (struct vmbus_driver *)data;
-
-	/* ASSERT(vmbus_drv_obj->OnMsgDpc != NULL); */
+	struct hv_driver *driver = (struct hv_driver *)data;
 
 	/* Call to bus driver to handle interrupt */
-	vmbus_drv_obj->OnMsgDpc(&vmbus_drv_obj->Base);
+	vmbus_on_msg_dpc(driver);
 }
 
 /*
- * vmbus_msg_dpc - Tasklet routine to handle hypervisor events
+ * vmbus_event_dpc - Tasklet routine to handle hypervisor events
  */
 static void vmbus_event_dpc(unsigned long data)
 {
-	struct vmbus_driver *vmbus_drv_obj = (struct vmbus_driver *)data;
-
-	/* ASSERT(vmbus_drv_obj->OnEventDpc != NULL); */
-
 	/* Call to bus driver to handle interrupt */
-	vmbus_drv_obj->OnEventDpc(&vmbus_drv_obj->Base);
+	VmbusOnEvents();
 }
 
 static irqreturn_t vmbus_isr(int irq, void *dev_id)
 {
-	struct vmbus_driver *vmbus_driver_obj = &g_vmbus_drv.drv_obj;
+	struct hv_driver *driver = &g_vmbus_drv.drv_obj;
 	int ret;
 
-	/* ASSERT(vmbus_driver_obj->OnIsr != NULL); */
-
 	/* Call to bus driver to handle interrupt */
-	ret = vmbus_driver_obj->OnIsr(&vmbus_driver_obj->Base);
+	ret = vmbus_on_isr(driver);
 
 	/* Schedules a dpc if necessary */
 	if (ret > 0) {
@@ -928,7 +1093,7 @@
 	if (!dmi_check_system(microsoft_hv_dmi_table))
 		return -ENODEV;
 
-	return vmbus_bus_init(VmbusInitialize);
+	return vmbus_bus_init();
 }
 
 static void __exit vmbus_exit(void)
diff --git a/drivers/staging/hv/vmbus_private.h b/drivers/staging/hv/vmbus_private.h
index 09eaec9..07f6d22 100644
--- a/drivers/staging/hv/vmbus_private.h
+++ b/drivers/staging/hv/vmbus_private.h
@@ -102,13 +102,14 @@
 
 /* General vmbus interface */
 
-struct hv_device *VmbusChildDeviceCreate(struct hv_guid *deviceType,
+struct hv_device *vmbus_child_device_create(struct hv_guid *deviceType,
 					 struct hv_guid *deviceInstance,
 					 struct vmbus_channel *channel);
 
 int VmbusChildDeviceAdd(struct hv_device *Device);
-
-void VmbusChildDeviceRemove(struct hv_device *Device);
+int vmbus_child_device_register(struct hv_device *root_device_obj,
+				struct hv_device *child_device_obj);
+void vmbus_child_device_unregister(struct hv_device *device_obj);
 
 /* static void */
 /* VmbusChildDeviceDestroy( */
diff --git a/drivers/staging/hv/vstorage.h b/drivers/staging/hv/vstorage.h
index 4ea597d..ae8be84 100644
--- a/drivers/staging/hv/vstorage.h
+++ b/drivers/staging/hv/vstorage.h
@@ -27,15 +27,17 @@
 
 #define REVISION_STRING(REVISION_) #REVISION_
 #define FILL_VMSTOR_REVISION(RESULT_LVALUE_)				\
-{									\
-	char *revisionString = REVISION_STRING($Revision : 6 $) + 11;	\
-	RESULT_LVALUE_ = 0;						\
-	while (*revisionString >= '0' && *revisionString <= '9') {	\
-		RESULT_LVALUE_ *= 10;					\
-		RESULT_LVALUE_ += *revisionString - '0';		\
-		revisionString++;					\
-	}								\
-}
+	do {								\
+		char *revision_string					\
+			= REVISION_STRING($Rev : 6 $) + 6;		\
+		RESULT_LVALUE_ = 0;					\
+		while (*revision_string >= '0'				\
+			&& *revision_string <= '9') {			\
+			RESULT_LVALUE_ *= 10;				\
+			RESULT_LVALUE_ += *revision_string - '0';	\
+			revision_string++;				\
+		}							\
+	} while (0)
 
 /* Major/minor macros.  Minor version is in LSB, meaning that earlier flat */
 /* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1). */
@@ -65,17 +67,17 @@
 
 /*  Packet structure describing virtual storage requests. */
 enum vstor_packet_operation {
-	VStorOperationCompleteIo            = 1,
-	VStorOperationRemoveDevice          = 2,
-	VStorOperationExecuteSRB            = 3,
-	VStorOperationResetLun              = 4,
-	VStorOperationResetAdapter          = 5,
-	VStorOperationResetBus              = 6,
-	VStorOperationBeginInitialization   = 7,
-	VStorOperationEndInitialization     = 8,
-	VStorOperationQueryProtocolVersion  = 9,
-	VStorOperationQueryProperties       = 10,
-	VStorOperationMaximum               = 10
+	VSTOR_OPERATION_COMPLETE_IO		= 1,
+	VSTOR_OPERATION_REMOVE_DEVICE		= 2,
+	VSTOR_OPERATION_EXECUTE_SRB		= 3,
+	VSTOR_OPERATION_RESET_LUN		= 4,
+	VSTOR_OPERATION_RESET_ADAPTER		= 5,
+	VSTOR_OPERATION_RESET_BUS		= 6,
+	VSTOR_OPERATION_BEGIN_INITIALIZATION	= 7,
+	VSTOR_OPERATION_END_INITIALIZATION	= 8,
+	VSTOR_OPERATION_QUERY_PROTOCOL_VERSION	= 9,
+	VSTOR_OPERATION_QUERY_PROPERTIES	= 10,
+	VSTOR_OPERATION_MAXIMUM			= 10
 };
 
 /*
@@ -89,31 +91,29 @@
 #define SENSE_BUFFER_SIZE			0x12
 #endif
 
-#define MAX_DATA_BUFFER_LENGTH_WITH_PADDING	0x14
+#define MAX_DATA_BUF_LEN_WITH_PADDING		0x14
 
 struct vmscsi_request {
-	unsigned short Length;
-	unsigned char SrbStatus;
-	unsigned char ScsiStatus;
+	unsigned short length;
+	unsigned char srb_status;
+	unsigned char scsi_status;
 
-	unsigned char PortNumber;
-	unsigned char PathId;
-	unsigned char TargetId;
-	unsigned char Lun;
+	unsigned char port_number;
+	unsigned char path_id;
+	unsigned char target_id;
+	unsigned char lun;
 
-	unsigned char CdbLength;
-	unsigned char SenseInfoLength;
-	unsigned char DataIn;
-	unsigned char Reserved;
+	unsigned char cdb_length;
+	unsigned char sense_info_length;
+	unsigned char data_in;
+	unsigned char reserved;
 
-	unsigned int DataTransferLength;
+	unsigned int data_transfer_length;
 
 	union {
-	unsigned char Cdb[CDB16GENERIC_LENGTH];
-
-	unsigned char SenseData[SENSE_BUFFER_SIZE];
-
-	unsigned char ReservedArray[MAX_DATA_BUFFER_LENGTH_WITH_PADDING];
+		unsigned char cdb[CDB16GENERIC_LENGTH];
+		unsigned char sense_data[SENSE_BUFFER_SIZE];
+		unsigned char reserved_array[MAX_DATA_BUF_LEN_WITH_PADDING];
 	};
 } __attribute((packed));
 
@@ -123,24 +123,24 @@
  * properties of the channel.
  */
 struct vmstorage_channel_properties {
-	unsigned short ProtocolVersion;
-	unsigned char  PathId;
-	unsigned char  TargetId;
+	unsigned short protocol_version;
+	unsigned char path_id;
+	unsigned char target_id;
 
 	/* Note: port number is only really known on the client side */
-	unsigned int  PortNumber;
-	unsigned int  Flags;
-	unsigned int  MaxTransferBytes;
+	unsigned int port_number;
+	unsigned int flags;
+	unsigned int max_transfer_bytes;
 
 	/*  This id is unique for each channel and will correspond with */
 	/*  vendor specific data in the inquirydata */
-	unsigned long long UniqueId;
+	unsigned long long unique_id;
 } __attribute__((packed));
 
 /*  This structure is sent during the storage protocol negotiations. */
 struct vmstorage_protocol_version {
 	/* Major (MSW) and minor (LSW) version numbers. */
-	unsigned short MajorMinor;
+	unsigned short major_minor;
 
 	/*
 	 * Revision number is auto-incremented whenever this file is changed
@@ -148,7 +148,7 @@
 	 * definitely indicate incompatibility--but it does indicate mismatched
 	 * builds.
 	 */
-	unsigned short Revision;
+	unsigned short revision;
 } __attribute__((packed));
 
 /* Channel Property Flags */
@@ -157,13 +157,13 @@
 
 struct vstor_packet {
 	/* Requested operation type */
-	enum vstor_packet_operation Operation;
+	enum vstor_packet_operation operation;
 
 	/*  Flags - see below for values */
-	unsigned int     Flags;
+	unsigned int flags;
 
 	/* Status of the request returned from the server side. */
-	unsigned int     Status;
+	unsigned int status;
 
 	/* Data payload area */
 	union {
@@ -171,13 +171,13 @@
 		 * Structure used to forward SCSI commands from the
 		 * client to the server.
 		 */
-		struct vmscsi_request VmSrb;
+		struct vmscsi_request vm_srb;
 
 		/* Structure used to query channel properties. */
-		struct vmstorage_channel_properties StorageChannelProperties;
+		struct vmstorage_channel_properties storage_channel_properties;
 
 		/* Used during version negotiations. */
-		struct vmstorage_protocol_version Version;
+		struct vmstorage_protocol_version version;
 	};
 } __attribute__((packed));
 
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio b/drivers/staging/iio/Documentation/sysfs-bus-iio
index fdb017a..2dde97d 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio
@@ -1,11 +1,12 @@
-What:		/sys/bus/iio/devices/device[n]
+What:		/sys/bus/iio/devices/deviceX
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Hardware chip or device accessed by on communication port.
-		Corresponds to a grouping of sensor channels.
+		Corresponds to a grouping of sensor channels. X is the IIO
+		index of the device.
 
-What:		/sys/bus/iio/devices/trigger[n]
+What:		/sys/bus/iio/devices/triggerX
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -13,25 +14,26 @@
 		May be provided by a device driver that also has an IIO device
 		based on hardware generated events (e.g. data ready) or
 		provided by a separate driver for other hardware (e.g.
-		periodic timer, gpio or high resolution timer).
+		periodic timer, GPIO or high resolution timer).
 		Contains trigger type specific elements. These do not
 		generalize well and hence are not documented in this file.
+		X is the IIO index of the trigger.
 
-What:		/sys/bus/iio/devices/device[n]:buffer
+What:		/sys/bus/iio/devices/deviceX:buffer
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Link to /sys/class/iio/device[n]/device[n]:buffer. n indicates
+		Link to /sys/class/iio/deviceX/deviceX:buffer. X indicates
 		the device with which this buffer buffer is associated.
 
-What:		/sys/.../device[n]/name
+What:		/sys/bus/iio/devices/deviceX/name
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Description of the physical chip / device. Typically a part
-		number.
+		Description of the physical chip / device for device X.
+		Typically a part number.
 
-What:		/sys/.../device[n]/sampling_frequency
+What:		/sys/bus/iio/devices/deviceX/sampling_frequency
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -44,90 +46,49 @@
 		relevant directories.  If it effects all of the above
 		then it is to be found in the base device directory as here.
 
-What:		/sys/.../device[n]/sampling_frequency_available
+What:		/sys/bus/iio/devices/deviceX/sampling_frequency_available
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		When the internal sampling clock can only take a small
-		discrete set of values, this file lists those availale.
+		discrete set of values, this file lists those available.
 
-What:		/sys/.../device[n]/in[m][_name]_raw
+What:		/sys/bus/iio/devices/deviceX/inY_raw
+What:		/sys/bus/iio/devices/deviceX/inY_supply_raw
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Raw (unscaled no bias removal etc) voltage measurement from
-		channel m. name is used in special cases where this does
-		not correspond to externally available input (e.g. supply
-		voltage monitoring in which case the file is in_supply_raw).
-		If the device supports events on this channel then m must be
-		specified (even on named channels) so as to allow the source
-		of event codes to be identified.
+		channel Y. In special cases where the channel does not
+		correspond to externally available input one of the named
+		versions may be used. The number must always be specified and
+		unique to allow association with event codes.
 
-What:		/sys/.../device[n]/in[m][_name]_offset
-KernelVersion:	2.6.35
-Contact:	linux-iio@vger.kernel.org
-Description:
-		If known for a device, offset to be added to in[m]_raw prior
-		to scaling by in[_name][m]_scale in order to obtain voltage in
-		millivolts.  Not present if the offset is always 0 or unknown.
-		If m is not present, then voltage offset applies to all in
-		channels. May be writable if a variable offset is controlled
-		by the device. Note that this is different to calibbias which
-		is for devices that apply offsets to compensate for variation
-		between different instances of the part, typically adjusted by
-		using some hardware supported calibration procedure.
-
-What:		/sys/.../device[n]/in[m][_name]_offset_available
-KernelVersion:	2.6.35
-Contact:	linux-iio@vger.kernel.org
-Description:
-		If a small number of discrete offset values are available, this
-		will be a space separated list.  If these are independant (but
-		options the same) for individual offsets then m should not be
-		present.
-
-What:		/sys/.../device[n]/in[m][_name]_offset_[min|max]
-KernelVersion:	2.6.35
-Contact:	linux-iio@vger.kernel.org
-Description:
-		If a more or less continuous range of voltage offsets are
-		supported then these specify the minimum and maximum.  If shared
-		by all in channels then m is not present.
-
-What:		/sys/.../device[n]/in[m][_name]_calibbias
-KernelVersion:	2.6.35
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Hardware applied calibration offset. (assumed to fix production
-		inaccuracies)
-
-What		/sys/.../device[n]/in[m][_name]_calibscale
-KernelVersion:	2.6.35
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Hardware applied calibration scale factor. (assumed to fix
-		production inaccuracies)
-
-What:		/sys/.../device[n]/in[m][_name]_scale
-KernelVersion:	2.6.35
-Contact:	linux-iio@vger.kernel.org
-Description:
-		If known for a device, scale to be applied to volt[m]_raw post
-		addition of in[_name][m]_offset in order to obtain the measured
-		voltage in millivolts.  If shared across all in channels then
-		m is not present.
-
-What:		/sys/.../device[n]/in[m]-in[o]_raw
+What:		/sys/bus/iio/devices/deviceX/inY-inZ_raw
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Raw (unscaled) differential voltage measurement equivalent to
-		channel m - channel o where these channel numbers apply to the
+		channel Y - channel Z where these channel numbers apply to the
 		physically equivalent inputs when non differential readings are
 		separately available. In differential only parts, then all that
-		is required is a consistent labelling.
+		is required is a consistent labeling.
 
-What:		/sys/.../device[n]/accel[_x|_y|_z][m]_raw
+What:		/sys/bus/iio/devices/deviceX/temp_raw
+What:		/sys/bus/iio/devices/deviceX/temp_x_raw
+What:		/sys/bus/iio/devices/deviceX/temp_y_raw
+What:		/sys/bus/iio/devices/deviceX/temp_z_raw
+KernelVersion:	2.6.35
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Raw (unscaled no bias removal etc) temperature measurement.
+		It an axis is specified it generally means that the temperature
+		sensor is associated with one part of a compound device (e.g.
+		a gyroscope axis).
+
+What:		/sys/bus/iio/devices/deviceX/accel_x_raw
+What:		/sys/bus/iio/devices/deviceX/accel_y_raw
+What:		/sys/bus/iio/devices/deviceX/accel_z_raw
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -135,29 +96,32 @@
 		but should match other such assignments on device)
 		channel m (not present if only one accelerometer channel at
 		this orientation). Has all of the equivalent parameters as per
-		in[m]. Units after application of scale and offset are m/s^2.
+		inY. Units after application of scale and offset are m/s^2.
 
-What:		/sys/.../device[n]/gyro[_x|_y|_z][m]_raw
+What:		/sys/bus/iio/devices/deviceX/gyro_x_raw
+What:		/sys/bus/iio/devices/deviceX/gyro_y_raw
+What:		/sys/bus/iio/devices/deviceX/gyro_z_raw
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Angular velocity about axis x, y or z (may be arbitrarily
-		assigned) channel m (not present if only one gyroscope at
-		this orientation).
-		Data converted by application of offset then scale to
+		assigned) Data converted by application of offset then scale to
 		radians per second. Has all the equivalent parameters as
-		per in[m].
+		per inY.
 
-What:		/sys/.../device[n]/incli[_x|_y|_z][m]_raw
+What:		/sys/bus/iio/devices/deviceX/incli_x_raw
+What:		/sys/bus/iio/devices/deviceX/incli_y_raw
+What:		/sys/bus/iio/devices/deviceX/incli_z_raw
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Inclination raw reading about axis x, y or z (may be arbitarily
-		assigned) channel m (not present if only one inclinometer at
-		this orientation).  Data converted by application of offset
+		Inclination raw reading about axis x, y or z (may be
+		arbitrarily assigned). Data converted by application of offset
 		and scale to Degrees.
 
-What:		/sys/.../device[n]/magn[_x|_y|_z][m]_raw
+What:		/sys/bus/iio/devices/deviceX/magn_x_raw
+What:		/sys/bus/iio/devices/deviceX/magn_y_raw
+What:		/sys/bus/iio/devices/deviceX/magn_z_raw
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -165,37 +129,150 @@
 		assigned) channel m (not present if only one magnetometer
 		at this orientation).  Data converted by application of
 		offset then scale to Gauss. Has all the equivalent modifiers
-		as per in[m].
+		as per inY.
 
-What:		/sys/.../device[n]/device[n]:event[m]
+What:		/sys/bus/iio/devices/deviceX/accel_x_peak_raw
+What:		/sys/bus/iio/devices/deviceX/accel_y_peak_raw
+What:		/sys/bus/iio/devices/deviceX/accel_z_peak_raw
+KernelVersion:	2.6.36
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Some devices provide a store of the highest value seen since
+		some reset condition.  These attributes allow access to this
+		and are otherwise the direct equivalent of the
+		<type>Y[_name]_raw attributes.
+
+What:		/sys/bus/iio/devices/deviceX/accel_xyz_squared_peak_raw
+KernelVersion:	2.6.36
+Contact:	linux-iio@vger.kernel.org
+Description:
+		A computed peak value based on the sum squared magnitude of
+		the underlying value in the specified directions.
+
+What:		/sys/bus/iio/devices/deviceX/accel_offset
+What:		/sys/bus/iio/devices/deviceX/temp_offset
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Configuration of which hardware generated events are passed up to
-		userspace. Some of these are a bit complex to generalize so this
-		section is a work in progress.
+		If known for a device, offset to be added to <type>[Y]_raw prior
+		to scaling by <type>[Y]_scale in order to obtain value in the
+		<type> units as specified in <type>[y]_raw documentation.
+		Not present if the offset is always 0 or unknown. If Y is not
+		present, then the offset applies to all in channels of <type>.
+		May be writable if a variable offset can be applied on the
+		device. Note that this is different to calibbias which
+		is for devices (or drivers) that apply offsets to compensate
+		for variation between different instances of the part, typically
+		adjusted by using some hardware supported calibration procedure.
 
-What:		/sys/.../device[n]:event[m]/dev
+What:		/sys/bus/iio/devices/deviceX/inY_scale
+What:		/sys/bus/iio/devices/deviceX/inY_supply_scale
+What:		/sys/bus/iio/devices/deviceX/in_scale
+What:		/sys/bus/iio/devices/deviceX/accel_scale
+What:		/sys/bus/iio/devices/deviceX/accel_peak_scale
+What:		/sys/bus/iio/devices/deviceX/gyro_scale
+What:		/sys/bus/iio/devices/deviceX/magn_scale
+What:		/sys/bus/iio/devices/deviceX/magn_x_scale
+What:		/sys/bus/iio/devices/deviceX/magn_y_scale
+What:		/sys/bus/iio/devices/deviceX/magn_z_scale
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
-		major:minor character device numbers for the event line.
+		If known for a device, scale to be applied to <type>Y[_name]_raw
+		post addition of <type>[Y][_name]_offset in order to obtain the
+		measured value in <type> units as specified in
+		<type>[Y][_name]_raw documentation..  If shared across all in
+		channels then Y is not present and the value is called
+		<type>[Y][_name]_scale. The peak modifier means this value
+		is applied to <type>Y[_name]_peak_raw values.
 
-Taking accel_x0 as an example
+What:		/sys/bus/iio/devices/deviceX/accel_x_calibbias
+What:		/sys/bus/iio/devices/deviceX/accel_y_calibbias
+What:		/sys/bus/iio/devices/deviceX/accel_z_calibbias
+What:		/sys/bus/iio/devices/deviceX/gyro_x_calibbias
+What:		/sys/bus/iio/devices/deviceX/gyro_y_calibbias
+What:		/sys/bus/iio/devices/deviceX/gyro_z_calibbias
+KernelVersion:	2.6.35
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Hardware applied calibration offset. (assumed to fix production
+		inaccuracies). If shared across all channels, <type>_calibbias
+		is used.
 
-What:		/sys/.../device[n]:event[m]/accel_x0_thresh[_rising|_falling]_en
+What		/sys/bus/iio/devices/deviceX/inY_calibscale
+What		/sys/bus/iio/devices/deviceX/inY_supply_calibscale
+What		/sys/bus/iio/devices/deviceX/in_calibscale
+What		/sys/bus/iio/devices/deviceX/accel_x_calibscale
+What		/sys/bus/iio/devices/deviceX/accel_y_calibscale
+What		/sys/bus/iio/devices/deviceX/accel_z_calibscale
+What		/sys/bus/iio/devices/deviceX/gyro_x_calibscale
+What		/sys/bus/iio/devices/deviceX/gyro_y_calibscale
+What		/sys/bus/iio/devices/deviceX/gyro_z_calibscale
+KernelVersion:	2.6.35
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Hardware applied calibration scale factor. (assumed to fix
+		production inaccuracies).  If shared across all channels,
+		<type>_calibscale is used.
+
+What:		/sys/bus/iio/devices/deviceX/accel_scale_available
+KernelVersion:	2.635
+Contact:	linux-iio@vger.kernel.org
+Description:
+		If a discrete set of scale values are available, they
+		are listed in this attribute.
+
+What:		/sys/bus/iio/devices/deviceX/deviceX:eventY
+KernelVersion:	2.6.35
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Configuration of which hardware generated events are passed up
+		to user-space.
+
+What:		/sys/bus/iio/devices/deviceX:event/dev
+What:		/sys/bus/iio/devices/deviceX:eventY/dev
+KernelVersion:	2.6.35
+Contact:	linux-iio@vger.kernel.org
+Description:
+		major:minor character device numbers for the event line Y of
+		device X.
+
+What:		/sys/.../deviceX:eventY/accel_x_thresh_rising_en
+What:		/sys/.../deviceX:eventY/accel_x_thresh_falling_en
+What:		/sys/.../deviceX:eventY/accel_y_thresh_rising_en
+What:		/sys/.../deviceX:eventY/accel_y_thresh_falling_en
+What:		/sys/.../deviceX:eventY/accel_z_thresh_rising_en
+What:		/sys/.../deviceX:eventY/accel_z_thresh_falling_en
+What:		/sys/.../deviceX:eventY/gyro_x_thresh_rising_en
+What:		/sys/.../deviceX:eventY/gyro_x_thresh_falling_en
+What:		/sys/.../deviceX:eventY/gyro_y_thresh_rising_en
+What:		/sys/.../deviceX:eventY/gyro_y_thresh_falling_en
+What:		/sys/.../deviceX:eventY/gyro_z_thresh_rising_en
+What:		/sys/.../deviceX:eventY/gyro_z_thresh_falling_en
+What:		/sys/.../deviceX:eventY/magn_x_thresh_rising_en
+What:		/sys/.../deviceX:eventY/magn_x_thresh_falling_en
+What:		/sys/.../deviceX:eventY/magn_y_thresh_rising_en
+What:		/sys/.../deviceX:eventY/magn_y_thresh_falling_en
+What:		/sys/.../deviceX:eventY/magn_z_thresh_rising_en
+What:		/sys/.../deviceX:eventY/magn_z_thresh_falling_en
+What:		/sys/.../deviceX:eventY/inZ_supply_thresh_rising_en
+What:		/sys/.../deviceX:eventY/inZ_supply_thresh_falling_en
+What:		/sys/.../deviceX:eventY/inZ_thresh_rising_en
+What:		/sys/.../deviceX:eventY/inZ_thresh_falling_en
+What:		/sys/.../deviceX:eventY/temp_thresh_rising_en
+What:		/sys/.../deviceX:eventY/temp_thresh_falling_en
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Event generated when accel_x0 passes a threshold in the specfied
+		Event generated when channel passes a threshold in the specified
 		(_rising|_falling) direction. If the direction is not specified,
 		then either the device will report an event which ever direction
 		a single threshold value is called in (e.g.
-		accel_x0_<raw|input>_thresh_value) or
-		accel_x0_<raw|input>_thresh_rising_value and
-		accel_x0_<raw|input>_thresh_falling_value may take different
-		values, but the device can only enable both thresholds or
-		neither.
+		<type>[Z][_name]_<raw|input>_thresh_value) or
+		<type>[Z][_name]_<raw|input>_thresh_rising_value and
+		<type>[Z][_name]_<raw|input>_thresh_falling_value may take
+		different values, but the device can only enable both thresholds
+		or neither.
 		Note the driver will assume the last p events requested are
 		to be enabled where p is however many it supports (which may
 		vary depending on the exact set requested. So if you want to be
@@ -205,186 +282,338 @@
 		a given event type is enabled a future point (and not those for
 		whatever event was previously enabled).
 
-What:		/sys/.../accel_x0_<raw|input>_thresh[_rising|_falling]_value
+What:		/sys/.../deviceX:eventY/accel_x_roc_rising_en
+What:		/sys/.../deviceX:eventY/accel_x_roc_falling_en
+What:		/sys/.../deviceX:eventY/accel_y_roc_rising_en
+What:		/sys/.../deviceX:eventY/accel_y_roc_falling_en
+What:		/sys/.../deviceX:eventY/accel_z_roc_rising_en
+What:		/sys/.../deviceX:eventY/accel_z_roc_falling_en
+What:		/sys/.../deviceX:eventY/gyro_x_roc_rising_en
+What:		/sys/.../deviceX:eventY/gyro_x_roc_falling_en
+What:		/sys/.../deviceX:eventY/gyro_y_roc_rising_en
+What:		/sys/.../deviceX:eventY/gyro_y_roc_falling_en
+What:		/sys/.../deviceX:eventY/gyro_z_roc_rising_en
+What:		/sys/.../deviceX:eventY/gyro_z_roc_falling_en
+What:		/sys/.../deviceX:eventY/magn_x_roc_rising_en
+What:		/sys/.../deviceX:eventY/magn_x_roc_falling_en
+What:		/sys/.../deviceX:eventY/magn_y_roc_rising_en
+What:		/sys/.../deviceX:eventY/magn_y_roc_falling_en
+What:		/sys/.../deviceX:eventY/magn_z_roc_rising_en
+What:		/sys/.../deviceX:eventY/magn_z_roc_falling_en
+What:		/sys/.../deviceX:eventY/inZ_supply_roc_rising_en
+What:		/sys/.../deviceX:eventY/inZ_supply_roc_falling_en
+What:		/sys/.../deviceX:eventY/inZ_roc_rising_en
+What:		/sys/.../deviceX:eventY/inZ_roc_falling_en
+What:		/sys/.../deviceX:eventY/temp_roc_rising_en
+What:		/sys/.../deviceX:eventY/temp_roc_falling_en
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Event generated when channel passes a threshold on the rate of
+		change (1st differential) in the specified (_rising|_falling)
+		direction. If the direction is not specified, then either the
+		device will report an event which ever direction a single
+		threshold value is called in (e.g.
+		<type>[Z][_name]_<raw|input>_roc_value) or
+		<type>[Z][_name]_<raw|input>_roc_rising_value and
+		<type>[Z][_name]_<raw|input>_roc_falling_value may take
+		different values, but the device can only enable both rate of
+		change thresholds or neither.
+		Note the driver will assume the last p events requested are
+		to be enabled where p is however many it supports (which may
+		vary depending on the exact set requested. So if you want to be
+		sure you have set what you think you have, check the contents of
+		these attributes after everything is configured. Drivers may
+		have to buffer any parameters so that they are consistent when
+		a given event type is enabled a future point (and not those for
+		whatever event was previously enabled).
+
+What:		/sys/.../deviceX:eventY/accel_x_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/accel_x_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/accel_y_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/accel_y_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/accel_z_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/accel_z_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/gyro_x_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/gyro_x_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/gyro_y_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/gyro_y_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/gyro_z_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/gyro_z_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/magn_x_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/magn_x_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/magn_y_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/magn_y_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/magn_z_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/magn_z_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/inZ_supply_raw_thresh_rising_value
+What:		/sys/.../deviceX:eventY/inZ_supply_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/inZ_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/inZ_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/temp_raw_thresh_falling_value
+What:		/sys/.../deviceX:eventY/temp_raw_thresh_falling_value
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Specifies the value of threshold that the device is comparing
 		against for the events enabled by
-		accel_x0_<raw|input>_thresh[_rising|falling]_en.
-		If seperate exist for the two directions, but direction is
-		not specified for this attribute, then a single threshold value
-		applies to both directions.
+		<type>Z[_name]_thresh[_rising|falling]_en.
+		If separate attributes  exist for the two directions, but
+		direction is not specified for this attribute, then a single
+		threshold value applies to both directions.
 		The raw or input element of the name indicates whether the
 		value is in raw device units or in processed units (as _raw
 		and _input do on sysfs direct channel read attributes).
 
-What:		/sys/.../accel_x0_thresh[_rising|_falling]_meanperiod
+What:		/sys/.../deviceX:eventY/accel_x_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/accel_x_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/accel_y_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/accel_y_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/accel_z_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/accel_z_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/gyro_x_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/gyro_x_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/gyro_y_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/gyro_y_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/gyro_z_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/gyro_z_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/magn_x_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/magn_x_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/magn_y_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/magn_y_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/magn_z_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/magn_z_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/inZ_supply_raw_roc_rising_value
+What:		/sys/.../deviceX:eventY/inZ_supply_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/inZ_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/inZ_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/temp_raw_roc_falling_value
+What:		/sys/.../deviceX:eventY/temp_raw_roc_falling_value
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Period of time (in seconds) over which the raw channel value
-		is averaged before being compared with the threshold set in
-		accel_x0_thresh[_rising|_falling]_meanperiod.  If direction is
-		not specified then this mean period applies to both directions.
+		Specifies the value of rate of change threshold that the
+		device is comparing against for the events enabled by
+		<type>[Z][_name]_roc[_rising|falling]_en.
+		If separate attributes exist for the two directions,
+		but direction is not specified for this attribute,
+		then a single threshold value applies to both directions.
+		The raw or input element of the name indicates whether the
+		value is in raw device units or in processed units (as _raw
+		and _input do on sysfs direct channel read attributes).
 
-What:		/sys/.../accel_x0_thresh[_rising|_falling]_period
+What:		/sys/.../deviceX:eventY/accel_x_thresh_rising_period
+What:		/sys/.../deviceX:eventY/accel_x_thresh_falling_period
+hat:		/sys/.../deviceX:eventY/accel_x_roc_rising_period
+What:		/sys/.../deviceX:eventY/accel_x_roc_falling_period
+What:		/sys/.../deviceX:eventY/accel_y_thresh_rising_period
+What:		/sys/.../deviceX:eventY/accel_y_thresh_falling_period
+What:		/sys/.../deviceX:eventY/accel_y_roc_rising_period
+What:		/sys/.../deviceX:eventY/accel_y_roc_falling_period
+What:		/sys/.../deviceX:eventY/accel_z_thresh_rising_period
+What:		/sys/.../deviceX:eventY/accel_z_thresh_falling_period
+What:		/sys/.../deviceX:eventY/accel_z_roc_rising_period
+What:		/sys/.../deviceX:eventY/accel_z_roc_falling_period
+What:		/sys/.../deviceX:eventY/gyro_x_thresh_rising_period
+What:		/sys/.../deviceX:eventY/gyro_x_thresh_falling_period
+What:		/sys/.../deviceX:eventY/gyro_x_roc_rising_period
+What:		/sys/.../deviceX:eventY/gyro_x_roc_falling_period
+What:		/sys/.../deviceX:eventY/gyro_y_thresh_rising_period
+What:		/sys/.../deviceX:eventY/gyro_y_thresh_falling_period
+What:		/sys/.../deviceX:eventY/gyro_y_roc_rising_period
+What:		/sys/.../deviceX:eventY/gyro_y_roc_falling_period
+What:		/sys/.../deviceX:eventY/gyro_z_thresh_rising_period
+What:		/sys/.../deviceX:eventY/gyro_z_thresh_falling_period
+What:		/sys/.../deviceX:eventY/gyro_z_roc_rising_period
+What:		/sys/.../deviceX:eventY/gyro_z_roc_falling_period
+What:		/sys/.../deviceX:eventY/magn_x_thresh_rising_period
+What:		/sys/.../deviceX:eventY/magn_x_thresh_falling_period
+What:		/sys/.../deviceX:eventY/magn_x_roc_rising_period
+What:		/sys/.../deviceX:eventY/magn_x_roc_falling_period
+What:		/sys/.../deviceX:eventY/magn_y_thresh_rising_period
+What:		/sys/.../deviceX:eventY/magn_y_thresh_falling_period
+What:		/sys/.../deviceX:eventY/magn_y_roc_rising_period
+What:		/sys/.../deviceX:eventY/magn_y_roc_falling_period
+What:		/sys/.../deviceX:eventY/magn_z_thresh_rising_period
+What:		/sys/.../deviceX:eventY/magn_z_thresh_falling_period
+What:		/sys/.../deviceX:eventY/magn_z_roc_rising_period
+What:		/sys/.../deviceX:eventY/magn_z_roc_falling_period
+What:		/sys/.../deviceX:eventY/inZ_supply_thresh_rising_period
+What:		/sys/.../deviceX:eventY/inZ_supply_thresh_falling_period
+What:		/sys/.../deviceX:eventY/inz_supply_roc_rising_period
+What:		/sys/.../deviceX:eventY/inZ_supply_roc_falling_period
+What:		/sys/.../deviceX:eventY/inZ_thresh_rising_period
+What:		/sys/.../deviceX:eventY/inZ_thresh_falling_period
+What:		/sys/.../deviceX:eventY/inZ_roc_rising_period
+What:		/sys/.../deviceX:eventY/inZ_roc_falling_period
+What:		/sys/.../deviceX:eventY/temp_thresh_rising_period
+What:		/sys/.../deviceX:eventY/temp_thresh_falling_period
+What:		/sys/.../deviceX:eventY/temp_roc_rising_period
+What:		/sys/.../deviceX:eventY/temp_roc_falling_period
+What:		/sys/.../deviceX:eventY/accel_x&y&z_mag_falling_period
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Period of time (in seconds) for which the threshold must be
-		passed before an event is generated. If direction is not
+		Period of time (in seconds) for which the condition must be
+		met before an event is generated. If direction is not
 		specified then this period applies to both directions.
 
-What:		/sys/.../device[n]:event[m]/accel_x0_mag[_rising|_falling]_en
+What:		/sys/.../deviceX:eventY/accel_mag_en
+What:		/sys/.../deviceX:eventY/accel_mag_rising_en
+What:		/sys/.../deviceX:eventY/accel_mag_falling_en
+What:		/sys/.../deviceX:eventY/accel_x_mag_en
+What:		/sys/.../deviceX:eventY/accel_x_mag_rising_en
+What:		/sys/.../deviceX:eventY/accel_x_mag_falling_en
+What:		/sys/.../deviceX:eventY/accel_y_mag_en
+What:		/sys/.../deviceX:eventY/accel_y_mag_rising_en
+What:		/sys/.../deviceX:eventY/accel_y_mag_falling_en
+What:		/sys/.../deviceX:eventY/accel_z_mag_en
+What:		/sys/.../deviceX:eventY/accel_z_mag_rising_en
+What:		/sys/.../deviceX:eventY/accel_z_mag_falling_en
+What:		/sys/.../deviceX:eventY/accel_x&y&z_mag_rising_en
+What:		/sys/.../deviceX:eventY/accel_x&y&z_mag_falling_en
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Similar to accel_x0_thresh[_rising|_falling]_en, but here the
+		Similar to accel_x_thresh[_rising|_falling]_en, but here the
 		magnitude of the channel is compared to the threshold, not its
 		signed value.
 
-What:		/sys/.../accel_x0_<raw|input>_mag[_rising|_falling]_value
+What:		/sys/.../accel_raw_mag_value
+What:		/sys/.../accel_x_raw_mag_rising_value
+What:		/sys/.../accel_y_raw_mag_rising_value
+What:		/sys/.../accel_z_raw_mag_rising_value
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
-		The value to which the magnitude of the channel is compared.
+		The value to which the magnitude of the channel is compared. If
+		number or direction is not specified, applies to all channels of
+		this type.
 
-What:		/sys/.../accel_x0_mag[_rising|_falling]_meanperiod
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Period of time (in seconds) over which the value of the channel
-		is averaged before being compared to the threshold
-
-What:		/sys/.../accel_x0_mag[_rising|_falling]_period
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Period of time (in seconds) for which the condition must be true
-		before an event occurs.
-
-What:		/sys/.../device[n]:event[m]/accel_x0_roc[_rising|_falling]_en
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Similar to accel_x0_thresh[_rising|_falling]_en, but here the
-		first differential is compared with the threshold.
-
-What:		/sys/.../accel_x0_<raw|input>_roc[_rising|_falling]_value
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		The value to which the first differential of the channel is
-		compared.
-
-What:		/sys/.../accel_x0_roc[_rising|_falling]_meanperiod
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Period of time (in seconds) over which the value of the channel
-		is averaged before being compared to the threshold
-
-What:		/sys/.../accel_x0_roc[_rising|_falling]_period
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Period of time (in seconds) for which the condition must be true
-		before an event occurs.
-
-What:		/sys/.../device[n]/device[n]:buffer:event/dev
+What:		/sys/bus/iio/devices/deviceX:buffer:event/dev
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Buffer for device n event character device major:minor numbers.
+		Buffer for device X event character device major:minor numbers.
 
-What:		/sys/.../device[n]/device[n]:buffer:access/dev
+What:		/sys/bus/iio/devices/deviceX:buffer:access/dev
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
-		Buffer for device n access character device o major:minor numbers.
+		Buffer for device X access character device major:minor numbers.
 
-What:		/sys/.../device[n]:buffer/trigger
+What:		/sys/bus/iio/devices/deviceX:buffer/trigger
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		The name of the trigger source being used, as per string given
-		in /sys/class/iio/trigger[n]/name.
+		in /sys/class/iio/triggerY/name.
 
-What:		/sys/.../device[n]:buffer/length
+What:		/sys/bus/iio/devices/deviceX:buffer/length
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Number of scans contained by the buffer.
 
-What:		/sys/.../device[n]:buffer/bytes_per_datum
+What:		/sys/bus/iio/devices/deviceX:buffer/bytes_per_datum
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Bytes per scan.  Due to alignment fun, the scan may be larger
 		than implied directly by the scan_element parameters.
 
-What:		/sys/.../device[n]:buffer/enable
+What:		/sys/bus/iio/devices/deviceX:buffer/enable
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Actually start the buffer capture up.  Will start trigger
 		if first device and appropriate.
 
-What:		/sys/.../device[n]:buffer/alignment
-KernelVersion:	2.6.35
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Minimum data alignment.  Scan elements larger than this are
-		aligned to the nearest power of 2 times this.  (may not be
-		true in weird hardware buffers that pack data well)
-
-What:		/sys/.../device[n]/buffer/scan_elements
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Directory containing interfaces for elements that will be
 		captured for a single triggered sample set in the buffer.
 
-What:		/sys/.../device[n]/buffer/scan_elements/accel_x0_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_x_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_y_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_z_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_x_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_y_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_z_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_x_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_y_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_z_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/timestamp_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_supply_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/inY-inZ_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_x_en
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_y_en
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Scan element control for triggered data capture.
 
-What:		/sys/.../device[n]/buffer/scan_elements/accel[_x0]_type
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_type
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_type
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_type
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_type
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_type
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/in-in_type
+What:		/sys/.../deviceX:buffer/scan_elements/inY_supply_type
+What:		/sys/.../deviceX:buffer/scan_elements/timestamp_type
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
 		Description of the scan element data storage within the buffer
-		and hence the form in which it is read from userspace.
-		Form is [s|u]bits/storagebits.  s or u specifies if signed
-		(2's complement) or unsigned. bits is the number of bits of
-		data and storagebits is the space (after padding) that it
-		occupies in the buffer.  Note that some devices will have
-		additional information in the unused bits so to get a clean
-		value, the bits value must be used to mask the buffer output
-		value appropriately.  The storagebits value also specifies the
-		data alignment.  So s48/64 will be a signed 48 bit integer
-		stored in a 64 bit location aligned to a a64 bit boundary.
+		and hence the form in which it is read from user-space.
+		Form is [s|u]bits/storagebits[>>shift].  s or u specifies if
+		signed (2's complement) or unsigned. bits is the number of bits
+		of data and storagebits is the space (after padding) that it
+		occupies in the buffer. shift if specified, is the shift that
+		needs to be applied prior to masking out unused bits. Some
+		devices put their data in the middle of the transferred elements
+		with additional information on both sides.  Note that some
+		devices will have additional information in the unused bits
+		so to get a clean value, the bits value must be used to mask
+		the buffer output value appropriately.  The storagebits value
+		also specifies the data alignment.  So s48/64>>2 will be a
+		signed 48 bit integer stored in a 64 bit location aligned to
+		a a64 bit boundary. To obtain the clean value, shift right 2
+		and apply a mask to zero the top 16 bits of the result.
 		For other storage combinations this attribute will be extended
 		appropriately.
 
-What:		/sys/.../device[n]/buffer/scan_elements/accel[_x0]_index
+What:		/sys/.../deviceX:buffer/scan_elements/accel_type_available
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		If the type parameter can take one of a small set of values,
+		this attribute lists them.
+
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/inY_index
+What:		/sys/.../deviceX:buffer/scan_elements/inY_supply_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_x_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_y_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/accel_z_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_x_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_y_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/gyro_z_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_x_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_y_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/magn_z_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_x_index
+What:		/sys/bus/iio/devices/deviceX:buffer/scan_elements/incli_y_index
+What:		/sys/.../deviceX:buffer/scan_elements/timestamp_index
 KernelVersion:	2.6.37
 Contact:	linux-iio@vger.kernel.org
 Description:
 		A single positive integer specifying the position of this
-		scan element in the buffer. Note these are not dependant on
-		what is enabled and may not be contiguous. Thus for userspace
+		scan element in the buffer. Note these are not dependent on
+		what is enabled and may not be contiguous. Thus for user-space
 		to establish the full layout these must be used in conjunction
 		with all _en attributes to establish which channels are present,
 		and the relevant _type attributes to establish the data storage
 		format.
-
-What:		/sys/.../device[n]/buffer/scan_elements/accel[_x0]_shift
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		A bit shift (to right) that must be applied prior to
-		extracting the bits specified by accel[_x0]_precision.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-dds b/drivers/staging/iio/Documentation/sysfs-bus-iio-dds
new file mode 100644
index 0000000..ffdd547
--- /dev/null
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-dds
@@ -0,0 +1,93 @@
+
+What:		/sys/bus/iio/devices/.../ddsX_freqY
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Stores frequency into tuning word Y.
+		There will be more than one ddsX_freqY file, which allows for
+		pin controlled FSK Frequency Shift Keying
+		(ddsX_pincontrol_freq_en is active) or the user can control
+		the desired active tuning word by writing Y to the
+		ddsX_freqsymbol file.
+
+What:		/sys/bus/iio/devices/.../ddsX_freqY_scale
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Scale to be applied to ddsX_freqY in order to obtain the
+		desired value in Hz. If shared across all frequency registers
+		Y is not present. It is also possible X is not present if
+		shared across all channels.
+
+What:		/sys/bus/iio/devices/.../ddsX_freqsymbol
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Specifies the active output frequency tuning word. The value
+		corresponds to the Y in ddsX_freqY. To exit this mode the user
+		can write ddsX_pincontrol_freq_en or ddsX_out_enable file.
+
+What:		/sys/bus/iio/devices/.../ddsX_phaseY
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Stores phase into Y.
+		There will be more than one ddsX_phaseY file, which allows for
+		pin controlled PSK Phase Shift Keying
+		(ddsX_pincontrol_phase_en is active) or the user can
+		control the desired phase Y which is added to the phase
+		accumulator output by writing Y to the en_phase file.
+
+What:		/sys/bus/iio/devices/.../ddsX_phaseY_scale
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Scale to be applied to ddsX_phaseY in order to obtain the
+		desired value in rad. If shared across all phase registers
+		Y is not present. It is also possible X is not present if
+		shared across all channels.
+
+What:		/sys/bus/iio/devices/.../ddsX_phasesymbol
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Specifies the active phase Y which is added to the phase
+		accumulator output. The value corresponds to the Y in
+		ddsX_phaseY. To exit this mode the user can write
+		ddsX_pincontrol_phase_en or disable file.
+
+What:		/sys/bus/iio/devices/.../ddsX_pincontrol_en
+What:		/sys/bus/iio/devices/.../ddsX_pincontrol_freq_en
+What:		/sys/bus/iio/devices/.../ddsX_pincontrol_phase_en
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		ddsX_pincontrol_en: Both, the active frequency and phase is
+		controlled by the respective phase and frequency control inputs.
+		In case the device in question allows to independent controls,
+		then there are dedicated files (ddsX_pincontrol_freq_en,
+		ddsX_pincontrol_phase_en).
+
+What:		/sys/bus/iio/devices/.../ddsX_out_enable
+What:		/sys/bus/iio/devices/.../ddsX_outY_enable
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		ddsX_outY_enable controls signal generation on output Y of
+		channel X. Y may be suppressed if all channels are
+		controlled together.
+
+What:		/sys/bus/iio/devices/.../ddsX_outY_wavetype
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Specifies the output waveform.
+		(sine, triangle, ramp, square, ...)
+		For a list of available output waveform options read
+		available_output_modes.
+
+What:		/sys/bus/iio/devices/.../ddsX_outY_wavetype_available
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Lists all available output waveform options.
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index ed48815..e2ac07d 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -42,11 +42,15 @@
 
 source "drivers/staging/iio/accel/Kconfig"
 source "drivers/staging/iio/adc/Kconfig"
+source "drivers/staging/iio/addac/Kconfig"
+source "drivers/staging/iio/dac/Kconfig"
+source "drivers/staging/iio/dds/Kconfig"
 source "drivers/staging/iio/gyro/Kconfig"
 source "drivers/staging/iio/imu/Kconfig"
 source "drivers/staging/iio/light/Kconfig"
 source "drivers/staging/iio/magnetometer/Kconfig"
-
+source "drivers/staging/iio/meter/Kconfig"
+source "drivers/staging/iio/resolver/Kconfig"
 source "drivers/staging/iio/trigger/Kconfig"
 
 endif # IIO
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index e909674..f9b5fb2 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -11,8 +11,13 @@
 
 obj-y += accel/
 obj-y += adc/
+obj-y += addac/
+obj-y += dac/
+obj-y += dds/
 obj-y += gyro/
 obj-y += imu/
 obj-y += light/
-obj-y += trigger/
 obj-y += magnetometer/
+obj-y += meter/
+obj-y += resolver/
+obj-y += trigger/
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 898cba1..d1ad35e 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -61,6 +61,10 @@
 files. (avoided at the moment to keep the driver set
 contained in staging).
 
+ADI Drivers:
+CC the device-drivers-devel@blackfin.uclinux.org mailing list when
+e-mailing the normal IIO list (see below).
+
 Documentation
 1) Lots of cleanup and expansion.
 2) Some device require indvidual docs.
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 5926c03..a34f1d3 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -3,6 +3,33 @@
 #
 comment "Accelerometers"
 
+config ADIS16201
+	tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
+	depends on SPI
+	select IIO_TRIGGER if IIO_RING_BUFFER
+	select IIO_SW_RING if IIO_RING_BUFFER
+	help
+	  Say yes here to build support for Analog Devices adis16201 dual-axis
+	  digital inclinometer and accelerometer.
+
+config ADIS16203
+	tristate "Analog Devices ADIS16203 Programmable 360 Degrees Inclinometer"
+	depends on SPI
+	select IIO_TRIGGER if IIO_RING_BUFFER
+	select IIO_SW_RING if IIO_RING_BUFFER
+	help
+	  Say yes here to build support for Analog Devices adis16203 Programmable
+	  360 Degrees Inclinometer.
+
+config ADIS16204
+	tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
+	depends on SPI
+	select IIO_TRIGGER if IIO_RING_BUFFER
+	select IIO_SW_RING if IIO_RING_BUFFER
+	help
+	  Say yes here to build support for Analog Devices adis16204 Programmable
+	  High-g Digital Impact Sensor and Recorder.
+
 config ADIS16209
 	tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
 	depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index ff84703..1b2a6d3 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -2,6 +2,18 @@
 # Makefile for industrial I/O accelerometer drivers
 #
 
+adis16201-y             := adis16201_core.o
+adis16201-$(CONFIG_IIO_RING_BUFFER) += adis16201_ring.o adis16201_trigger.o
+obj-$(CONFIG_ADIS16201) += adis16201.o
+
+adis16203-y             := adis16203_core.o
+adis16203-$(CONFIG_IIO_RING_BUFFER) += adis16203_ring.o adis16203_trigger.o
+obj-$(CONFIG_ADIS16203) += adis16203.o
+
+adis16204-y             := adis16204_core.o
+adis16204-$(CONFIG_IIO_RING_BUFFER) += adis16204_ring.o adis16204_trigger.o
+obj-$(CONFIG_ADIS16204) += adis16204.o
+
 adis16209-y             := adis16209_core.o
 adis16209-$(CONFIG_IIO_RING_BUFFER) += adis16209_ring.o adis16209_trigger.o
 obj-$(CONFIG_ADIS16209) += adis16209.o
diff --git a/drivers/staging/iio/accel/accel.h b/drivers/staging/iio/accel/accel.h
index f5f61b2..50651f8 100644
--- a/drivers/staging/iio/accel/accel.h
+++ b/drivers/staging/iio/accel/accel.h
@@ -65,3 +65,23 @@
 #define IIO_DEV_ATTR_ACCEL_Z(_show, _addr)			\
 	IIO_DEVICE_ATTR(accel_z_raw, S_IRUGO, _show, NULL, _addr)
 
+#define IIO_DEV_ATTR_ACCEL_XY(_show, _addr)			\
+	IIO_DEVICE_ATTR(accel_xy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_PEAK(_show, _addr)			\
+	IIO_DEVICE_ATTR(accel_peak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_XPEAK(_show, _addr)			\
+	IIO_DEVICE_ATTR(accel_xpeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_YPEAK(_show, _addr)			\
+	IIO_DEVICE_ATTR(accel_ypeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_ZPEAK(_show, _addr)			\
+	IIO_DEVICE_ATTR(accel_zpeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_XYPEAK(_show, _addr)		\
+	IIO_DEVICE_ATTR(accel_xypeak, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ACCEL_XYZPEAK(_show, _addr)		\
+	IIO_DEVICE_ATTR(accel_xyzpeak, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
new file mode 100644
index 0000000..c9bf22c
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -0,0 +1,150 @@
+#ifndef SPI_ADIS16201_H_
+#define SPI_ADIS16201_H_
+
+#define ADIS16201_STARTUP_DELAY	220 /* ms */
+
+#define ADIS16201_READ_REG(a)    a
+#define ADIS16201_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16201_FLASH_CNT      0x00 /* Flash memory write count */
+#define ADIS16201_SUPPLY_OUT     0x02 /* Output, power supply */
+#define ADIS16201_XACCL_OUT      0x04 /* Output, x-axis accelerometer */
+#define ADIS16201_YACCL_OUT      0x06 /* Output, y-axis accelerometer */
+#define ADIS16201_AUX_ADC        0x08 /* Output, auxiliary ADC input */
+#define ADIS16201_TEMP_OUT       0x0A /* Output, temperature */
+#define ADIS16201_XINCL_OUT      0x0C /* Output, x-axis inclination */
+#define ADIS16201_YINCL_OUT      0x0E /* Output, y-axis inclination */
+#define ADIS16201_XACCL_OFFS     0x10 /* Calibration, x-axis acceleration offset */
+#define ADIS16201_YACCL_OFFS     0x12 /* Calibration, y-axis acceleration offset */
+#define ADIS16201_XACCL_SCALE    0x14 /* x-axis acceleration scale factor */
+#define ADIS16201_YACCL_SCALE    0x16 /* y-axis acceleration scale factor */
+#define ADIS16201_XINCL_OFFS     0x18 /* Calibration, x-axis inclination offset */
+#define ADIS16201_YINCL_OFFS     0x1A /* Calibration, y-axis inclination offset */
+#define ADIS16201_XINCL_SCALE    0x1C /* x-axis inclination scale factor */
+#define ADIS16201_YINCL_SCALE    0x1E /* y-axis inclination scale factor */
+#define ADIS16201_ALM_MAG1       0x20 /* Alarm 1 amplitude threshold */
+#define ADIS16201_ALM_MAG2       0x22 /* Alarm 2 amplitude threshold */
+#define ADIS16201_ALM_SMPL1      0x24 /* Alarm 1, sample period */
+#define ADIS16201_ALM_SMPL2      0x26 /* Alarm 2, sample period */
+#define ADIS16201_ALM_CTRL       0x28 /* Alarm control */
+#define ADIS16201_AUX_DAC        0x30 /* Auxiliary DAC data */
+#define ADIS16201_GPIO_CTRL      0x32 /* General-purpose digital input/output control */
+#define ADIS16201_MSC_CTRL       0x34 /* Miscellaneous control */
+#define ADIS16201_SMPL_PRD       0x36 /* Internal sample period (rate) control */
+#define ADIS16201_AVG_CNT        0x38 /* Operation, filter configuration */
+#define ADIS16201_SLP_CNT        0x3A /* Operation, sleep mode control */
+#define ADIS16201_DIAG_STAT      0x3C /* Diagnostics, system status register */
+#define ADIS16201_GLOB_CMD       0x3E /* Operation, system command register */
+
+#define ADIS16201_OUTPUTS        7
+
+/* MSC_CTRL */
+#define ADIS16201_MSC_CTRL_SELF_TEST_EN	        (1 << 8)  /* Self-test enable */
+#define ADIS16201_MSC_CTRL_DATA_RDY_EN	        (1 << 2)  /* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16201_MSC_CTRL_ACTIVE_HIGH	        (1 << 1)  /* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1	(1 << 0)  /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* DIAG_STAT */
+#define ADIS16201_DIAG_STAT_ALARM2        (1<<9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM1        (1<<8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_SPI_FAIL	  (1<<3) /* SPI communications failure */
+#define ADIS16201_DIAG_STAT_FLASH_UPT	  (1<<2) /* Flash update failure */
+#define ADIS16201_DIAG_STAT_POWER_HIGH	  (1<<1) /* Power supply above 3.625 V */
+#define ADIS16201_DIAG_STAT_POWER_LOW	  (1<<0) /* Power supply below 3.15 V */
+
+/* GLOB_CMD */
+#define ADIS16201_GLOB_CMD_SW_RESET	(1<<7)
+#define ADIS16201_GLOB_CMD_FACTORY_CAL	(1<<1)
+
+#define ADIS16201_MAX_TX 14
+#define ADIS16201_MAX_RX 14
+
+#define ADIS16201_ERROR_ACTIVE          (1<<14)
+
+/**
+ * struct adis16201_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct adis16201_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+
+int adis16201_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+enum adis16201_scan {
+	ADIS16201_SCAN_SUPPLY,
+	ADIS16201_SCAN_ACC_X,
+	ADIS16201_SCAN_ACC_Y,
+	ADIS16201_SCAN_AUX_ADC,
+	ADIS16201_SCAN_TEMP,
+	ADIS16201_SCAN_INCLI_X,
+	ADIS16201_SCAN_INCLI_Y,
+};
+
+void adis16201_remove_trigger(struct iio_dev *indio_dev);
+int adis16201_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16201_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+int adis16201_configure_ring(struct iio_dev *indio_dev);
+void adis16201_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16201_initialize_ring(struct iio_ring_buffer *ring);
+void adis16201_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16201_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16201_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+adis16201_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int adis16201_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void adis16201_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16201_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+
+static inline void adis16201_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16201_H_ */
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
new file mode 100644
index 0000000..79b785a
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -0,0 +1,659 @@
+/*
+ * ADIS16201 Programmable Digital Vibration Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "inclinometer.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16201.h"
+
+#define DRIVER_NAME		"adis16201"
+
+static int adis16201_check_status(struct device *dev);
+
+/**
+ * adis16201_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16201_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16201_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16201_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16201_spi_write_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		}, {
+			.tx_buf = st->tx + 2,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16201_WRITE_REG(lower_reg_address);
+	st->tx[1] = value & 0xFF;
+	st->tx[2] = ADIS16201_WRITE_REG(lower_reg_address + 1);
+	st->tx[3] = (value >> 8) & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16201_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16201_spi_read_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+			.delay_usecs = 20,
+		}, {
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+			.delay_usecs = 20,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16201_READ_REG(lower_reg_address);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				lower_reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static ssize_t adis16201_read_12bit_unsigned(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = adis16201_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	if (val & ADIS16201_ERROR_ACTIVE) {
+		ret = adis16201_check_status(dev);
+		if (ret)
+			return ret;
+	}
+
+	return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16201_read_temp(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u16 val;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16201_spi_read_reg_16(dev, ADIS16201_TEMP_OUT, (u16 *)&val);
+	if (ret)
+		goto error_ret;
+
+	if (val & ADIS16201_ERROR_ACTIVE) {
+		ret = adis16201_check_status(dev);
+		if (ret)
+			goto error_ret;
+	}
+
+	val &= 0xFFF;
+	ret = sprintf(buf, "%d\n", val);
+
+error_ret:
+	mutex_unlock(&indio_dev->mlock);
+	return ret;
+}
+
+static ssize_t adis16201_read_9bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	s16 val = 0;
+	ssize_t ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16201_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+	if (!ret) {
+		if (val & ADIS16201_ERROR_ACTIVE) {
+			ret = adis16201_check_status(dev);
+			if (ret)
+				goto error_ret;
+		}
+		val = ((s16)(val << 7) >> 7);
+		ret = sprintf(buf, "%d\n", val);
+	}
+
+error_ret:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16201_read_12bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	s16 val = 0;
+	ssize_t ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16201_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+	if (!ret) {
+		if (val & ADIS16201_ERROR_ACTIVE) {
+			ret = adis16201_check_status(dev);
+			if (ret)
+				goto error_ret;
+		}
+
+		val = ((s16)(val << 4) >> 4);
+		ret = sprintf(buf, "%d\n", val);
+	}
+
+error_ret:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16201_read_14bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	s16 val = 0;
+	ssize_t ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16201_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+	if (!ret) {
+		if (val & ADIS16201_ERROR_ACTIVE) {
+			ret = adis16201_check_status(dev);
+			if (ret)
+				goto error_ret;
+		}
+
+		val = ((s16)(val << 2) >> 2);
+		ret = sprintf(buf, "%d\n", val);
+	}
+
+error_ret:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16201_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = adis16201_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static int adis16201_reset(struct device *dev)
+{
+	int ret;
+	ret = adis16201_spi_write_reg_8(dev,
+			ADIS16201_GLOB_CMD,
+			ADIS16201_GLOB_CMD_SW_RESET);
+	if (ret)
+		dev_err(dev, "problem resetting device");
+
+	return ret;
+}
+
+static ssize_t adis16201_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -EINVAL;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return adis16201_reset(dev);
+	}
+	return -EINVAL;
+}
+
+int adis16201_set_irq(struct device *dev, bool enable)
+{
+	int ret = 0;
+	u16 msc;
+
+	ret = adis16201_spi_read_reg_16(dev, ADIS16201_MSC_CTRL, &msc);
+	if (ret)
+		goto error_ret;
+
+	msc |= ADIS16201_MSC_CTRL_ACTIVE_HIGH;
+	msc &= ~ADIS16201_MSC_CTRL_DATA_RDY_DIO1;
+	if (enable)
+		msc |= ADIS16201_MSC_CTRL_DATA_RDY_EN;
+	else
+		msc &= ~ADIS16201_MSC_CTRL_DATA_RDY_EN;
+
+	ret = adis16201_spi_write_reg_16(dev, ADIS16201_MSC_CTRL, msc);
+
+error_ret:
+	return ret;
+}
+
+static int adis16201_check_status(struct device *dev)
+{
+	u16 status;
+	int ret;
+
+	ret = adis16201_spi_read_reg_16(dev, ADIS16201_DIAG_STAT, &status);
+	if (ret < 0) {
+		dev_err(dev, "Reading status failed\n");
+		goto error_ret;
+	}
+	ret = status & 0xF;
+	if (ret)
+		ret = -EFAULT;
+
+	if (status & ADIS16201_DIAG_STAT_SPI_FAIL)
+		dev_err(dev, "SPI failure\n");
+	if (status & ADIS16201_DIAG_STAT_FLASH_UPT)
+		dev_err(dev, "Flash update failed\n");
+	if (status & ADIS16201_DIAG_STAT_POWER_HIGH)
+		dev_err(dev, "Power supply above 3.625V\n");
+	if (status & ADIS16201_DIAG_STAT_POWER_LOW)
+		dev_err(dev, "Power supply below 3.15V\n");
+
+error_ret:
+	return ret;
+}
+
+static int adis16201_self_test(struct device *dev)
+{
+	int ret;
+	ret = adis16201_spi_write_reg_16(dev,
+			ADIS16201_MSC_CTRL,
+			ADIS16201_MSC_CTRL_SELF_TEST_EN);
+	if (ret) {
+		dev_err(dev, "problem starting self test");
+		goto err_ret;
+	}
+
+	ret = adis16201_check_status(dev);
+
+err_ret:
+	return ret;
+}
+
+static int adis16201_initial_setup(struct adis16201_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* Disable IRQ */
+	ret = adis16201_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	/* Do self test */
+	ret = adis16201_self_test(dev);
+	if (ret) {
+		dev_err(dev, "self test failure");
+		goto err_ret;
+	}
+
+	/* Read status register to check the result */
+	ret = adis16201_check_status(dev);
+	if (ret) {
+		adis16201_reset(dev);
+		dev_err(dev, "device not playing ball -> reset");
+		msleep(ADIS16201_STARTUP_DELAY);
+		ret = adis16201_check_status(dev);
+		if (ret) {
+			dev_err(dev, "giving up");
+			goto err_ret;
+		}
+	}
+
+	printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+			st->us->chip_select, st->us->irq);
+
+err_ret:
+	return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16201_read_12bit_unsigned,
+		ADIS16201_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.00122");
+static IIO_DEV_ATTR_IN_RAW(1, adis16201_read_12bit_unsigned,
+		ADIS16201_AUX_ADC);
+static IIO_CONST_ATTR(in1_scale, "0.00061");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16201_read_14bit_signed,
+		ADIS16201_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16201_read_14bit_signed,
+		ADIS16201_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+		adis16201_read_12bit_signed,
+		adis16201_write_16bit,
+		ADIS16201_XACCL_OFFS);
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+		adis16201_read_12bit_signed,
+		adis16201_write_16bit,
+		ADIS16201_YACCL_OFFS);
+static IIO_CONST_ATTR(accel_scale, "0.4625");
+
+static IIO_DEV_ATTR_INCLI_X(adis16201_read_14bit_signed,
+		ADIS16201_XINCL_OUT);
+static IIO_DEV_ATTR_INCLI_Y(adis16201_read_14bit_signed,
+		ADIS16201_YINCL_OUT);
+static IIO_DEV_ATTR_INCLI_X_OFFSET(S_IWUSR | S_IRUGO,
+		adis16201_read_9bit_signed,
+		adis16201_write_16bit,
+		ADIS16201_XACCL_OFFS);
+static IIO_DEV_ATTR_INCLI_Y_OFFSET(S_IWUSR | S_IRUGO,
+		adis16201_read_9bit_signed,
+		adis16201_write_16bit,
+		ADIS16201_YACCL_OFFS);
+static IIO_CONST_ATTR(incli_scale, "0.1");
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16201_read_temp);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16201_write_reset, 0);
+
+static IIO_CONST_ATTR(name, "adis16201");
+
+static struct attribute *adis16201_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group adis16201_event_attribute_group = {
+	.attrs = adis16201_event_attributes,
+};
+
+static struct attribute *adis16201_attributes[] = {
+	&iio_dev_attr_in0_supply_raw.dev_attr.attr,
+	&iio_const_attr_in0_supply_scale.dev_attr.attr,
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_in1_raw.dev_attr.attr,
+	&iio_const_attr_in1_scale.dev_attr.attr,
+	&iio_dev_attr_accel_x_raw.dev_attr.attr,
+	&iio_dev_attr_accel_y_raw.dev_attr.attr,
+	&iio_dev_attr_accel_x_offset.dev_attr.attr,
+	&iio_dev_attr_accel_y_offset.dev_attr.attr,
+	&iio_const_attr_accel_scale.dev_attr.attr,
+	&iio_dev_attr_incli_x_raw.dev_attr.attr,
+	&iio_dev_attr_incli_y_raw.dev_attr.attr,
+	&iio_dev_attr_incli_x_offset.dev_attr.attr,
+	&iio_dev_attr_incli_y_offset.dev_attr.attr,
+	&iio_const_attr_incli_scale.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group adis16201_attribute_group = {
+	.attrs = adis16201_attributes,
+};
+
+static int __devinit adis16201_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct adis16201_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADIS16201_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADIS16201_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &adis16201_event_attribute_group;
+	st->indio_dev->attrs = &adis16201_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = adis16201_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = adis16201_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING,
+				"adis16201");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = adis16201_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = adis16201_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	adis16201_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (spi->irq)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	adis16201_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	adis16201_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int adis16201_remove(struct spi_device *spi)
+{
+	struct adis16201_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	flush_scheduled_work();
+
+	adis16201_remove_trigger(indio_dev);
+	if (spi->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	adis16201_uninitialize_ring(indio_dev->ring);
+	iio_device_unregister(indio_dev);
+	adis16201_unconfigure_ring(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver adis16201_driver = {
+	.driver = {
+		.name = "adis16201",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16201_probe,
+	.remove = __devexit_p(adis16201_remove),
+};
+
+static __init int adis16201_init(void)
+{
+	return spi_register_driver(&adis16201_driver);
+}
+module_init(adis16201_init);
+
+static __exit void adis16201_exit(void)
+{
+	spi_unregister_driver(&adis16201_driver);
+}
+module_exit(adis16201_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
new file mode 100644
index 0000000..e6870a2
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -0,0 +1,218 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16201.h"
+
+static IIO_SCAN_EL_C(in_supply, ADIS16201_SCAN_SUPPLY, ADIS16201_SUPPLY_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in_supply, u, 12, 16);
+static IIO_SCAN_EL_C(accel_x, ADIS16201_SCAN_ACC_X, ADIS16201_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16201_SCAN_ACC_Y, ADIS16201_YACCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(accel, s, 14, 16);
+static IIO_SCAN_EL_C(in0, ADIS16201_SCAN_AUX_ADC, ADIS16201_AUX_ADC, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in0, u, 12, 16);
+static IIO_SCAN_EL_C(temp, ADIS16201_SCAN_TEMP, ADIS16201_TEMP_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(temp, u, 12, 16);
+static IIO_SCAN_EL_C(incli_x, ADIS16201_SCAN_INCLI_X,
+		     ADIS16201_XINCL_OUT, NULL);
+static IIO_SCAN_EL_C(incli_y, ADIS16201_SCAN_INCLI_Y,
+		     ADIS16201_YINCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(incli, s, 14, 16);
+static IIO_SCAN_EL_TIMESTAMP(7);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *adis16201_scan_el_attrs[] = {
+	&iio_scan_el_in_supply.dev_attr.attr,
+	&iio_const_attr_in_supply_index.dev_attr.attr,
+	&iio_const_attr_in_supply_type.dev_attr.attr,
+	&iio_scan_el_accel_x.dev_attr.attr,
+	&iio_const_attr_accel_x_index.dev_attr.attr,
+	&iio_scan_el_accel_y.dev_attr.attr,
+	&iio_const_attr_accel_y_index.dev_attr.attr,
+	&iio_const_attr_accel_type.dev_attr.attr,
+	&iio_scan_el_in0.dev_attr.attr,
+	&iio_const_attr_in0_index.dev_attr.attr,
+	&iio_const_attr_in0_type.dev_attr.attr,
+	&iio_scan_el_temp.dev_attr.attr,
+	&iio_const_attr_temp_index.dev_attr.attr,
+	&iio_const_attr_temp_type.dev_attr.attr,
+	&iio_scan_el_incli_x.dev_attr.attr,
+	&iio_const_attr_incli_x_index.dev_attr.attr,
+	&iio_scan_el_incli_y.dev_attr.attr,
+	&iio_const_attr_incli_y_index.dev_attr.attr,
+	&iio_const_attr_incli_type.dev_attr.attr,
+	&iio_scan_el_timestamp.dev_attr.attr,
+	&iio_const_attr_timestamp_index.dev_attr.attr,
+	&iio_const_attr_timestamp_type.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adis16201_scan_el_group = {
+	.attrs = adis16201_scan_el_attrs,
+	.name = "scan_elements",
+};
+
+/**
+ * adis16201_poll_func_th() top half interrupt handler called by trigger
+ * @private_data:	iio_dev
+ **/
+static void adis16201_poll_func_th(struct iio_dev *indio_dev, s64 time)
+{
+	struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+	st->last_timestamp = time;
+	schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16201_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16201_read_ring_data(struct device *dev, u8 *rx)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[ADIS16201_OUTPUTS + 1];
+	int ret;
+	int i;
+
+	mutex_lock(&st->buf_lock);
+
+	spi_message_init(&msg);
+
+	memset(xfers, 0, sizeof(xfers));
+	for (i = 0; i <= ADIS16201_OUTPUTS; i++) {
+		xfers[i].bits_per_word = 8;
+		xfers[i].cs_change = 1;
+		xfers[i].len = 2;
+		xfers[i].delay_usecs = 20;
+		xfers[i].tx_buf = st->tx + 2 * i;
+		st->tx[2 * i] = ADIS16201_READ_REG(ADIS16201_SUPPLY_OUT + 2 * i);
+		st->tx[2 * i + 1] = 0;
+		if (i >= 1)
+			xfers[i].rx_buf = rx + 2 * (i - 1);
+		spi_message_add_tail(&xfers[i], &msg);
+	}
+
+	ret = spi_sync(st->us, &msg);
+	if (ret)
+		dev_err(&st->us->dev, "problem when burst reading");
+
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16201_trigger_bh_to_ring(struct work_struct *work_s)
+{
+	struct adis16201_state *st
+		= container_of(work_s, struct adis16201_state,
+			       work_trigger_to_ring);
+	struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+	int i = 0;
+	s16 *data;
+	size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+	data = kmalloc(datasize, GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(&st->us->dev, "memory alloc failed in ring bh");
+		return;
+	}
+
+	if (ring->scan_count)
+		if (adis16201_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+			for (; i < ring->scan_count; i++)
+				data[i] = be16_to_cpup(
+					(__be16 *)&(st->rx[i*2]));
+
+	/* Guaranteed to be aligned with 8 byte boundary */
+	if (ring->scan_timestamp)
+		*((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+	ring->access.store_to(ring,
+			      (u8 *)data,
+			      st->last_timestamp);
+
+	iio_trigger_notify_done(st->indio_dev->trig);
+	kfree(data);
+
+	return;
+}
+
+void adis16201_unconfigure_ring(struct iio_dev *indio_dev)
+{
+	kfree(indio_dev->pollfunc);
+	iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16201_configure_ring(struct iio_dev *indio_dev)
+{
+	int ret = 0;
+	struct adis16201_state *st = indio_dev->dev_data;
+	struct iio_ring_buffer *ring;
+	INIT_WORK(&st->work_trigger_to_ring, adis16201_trigger_bh_to_ring);
+
+	ring = iio_sw_rb_allocate(indio_dev);
+	if (!ring) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	indio_dev->ring = ring;
+	/* Effectively select the ring buffer implementation */
+	iio_ring_sw_register_funcs(&ring->access);
+	ring->bpe = 2;
+	ring->scan_el_attrs = &adis16201_scan_el_group;
+	ring->scan_timestamp = true;
+	ring->preenable = &iio_sw_ring_preenable;
+	ring->postenable = &iio_triggered_ring_postenable;
+	ring->predisable = &iio_triggered_ring_predisable;
+	ring->owner = THIS_MODULE;
+
+	/* Set default scan mode */
+	iio_scan_mask_set(ring, iio_scan_el_in_supply.number);
+	iio_scan_mask_set(ring, iio_scan_el_accel_x.number);
+	iio_scan_mask_set(ring, iio_scan_el_accel_y.number);
+	iio_scan_mask_set(ring, iio_scan_el_temp.number);
+	iio_scan_mask_set(ring, iio_scan_el_in0.number);
+	iio_scan_mask_set(ring, iio_scan_el_incli_x.number);
+	iio_scan_mask_set(ring, iio_scan_el_incli_y.number);
+
+	ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16201_poll_func_th);
+	if (ret)
+		goto error_iio_sw_rb_free;
+
+	indio_dev->modes |= INDIO_RING_TRIGGERED;
+	return 0;
+
+error_iio_sw_rb_free:
+	iio_sw_rb_free(indio_dev->ring);
+	return ret;
+}
+
+int adis16201_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16201_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+	iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16201_trigger.c b/drivers/staging/iio/accel/adis16201_trigger.c
new file mode 100644
index 0000000..8a9cea19
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16201_trigger.c
@@ -0,0 +1,122 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16201.h"
+
+/**
+ * adis16201_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16201_data_rdy_trig_poll(struct iio_dev *dev_info,
+				       int index,
+				       s64 timestamp,
+				       int no_test)
+{
+	struct adis16201_state *st = iio_dev_get_devdata(dev_info);
+	struct iio_trigger *trig = st->trig;
+
+	iio_trigger_poll(trig, timestamp);
+
+	return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16201_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16201_trigger_attrs[] = {
+	&dev_attr_name.attr,
+	NULL,
+};
+
+static const struct attribute_group adis16201_trigger_attr_group = {
+	.attrs = adis16201_trigger_attrs,
+};
+
+/**
+ * adis16201_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16201_data_rdy_trigger_set_state(struct iio_trigger *trig,
+						bool state)
+{
+	struct adis16201_state *st = trig->private_data;
+	struct iio_dev *indio_dev = st->indio_dev;
+	int ret = 0;
+
+	dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+	ret = adis16201_set_irq(&st->indio_dev->dev, state);
+	if (state == false) {
+		iio_remove_event_from_list(&iio_event_data_rdy_trig,
+					   &indio_dev->interrupts[0]
+					   ->ev_list);
+		flush_scheduled_work();
+	} else {
+		iio_add_event_to_list(&iio_event_data_rdy_trig,
+				      &indio_dev->interrupts[0]->ev_list);
+	}
+	return ret;
+}
+
+/**
+ * adis16201_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig:	the datardy trigger
+ **/
+static int adis16201_trig_try_reen(struct iio_trigger *trig)
+{
+	struct adis16201_state *st = trig->private_data;
+	enable_irq(st->us->irq);
+	return 0;
+}
+
+int adis16201_probe_trigger(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct adis16201_state *st = indio_dev->dev_data;
+
+	st->trig = iio_allocate_trigger();
+	st->trig->name = kasprintf(GFP_KERNEL,
+				"adis16201-dev%d",
+				indio_dev->id);
+	if (!st->trig->name) {
+		ret = -ENOMEM;
+		goto error_free_trig;
+	}
+	st->trig->dev.parent = &st->us->dev;
+	st->trig->owner = THIS_MODULE;
+	st->trig->private_data = st;
+	st->trig->set_trigger_state = &adis16201_data_rdy_trigger_set_state;
+	st->trig->try_reenable = &adis16201_trig_try_reen;
+	st->trig->control_attrs = &adis16201_trigger_attr_group;
+	ret = iio_trigger_register(st->trig);
+
+	/* select default trigger */
+	indio_dev->trig = st->trig;
+	if (ret)
+		goto error_free_trig_name;
+
+	return 0;
+
+error_free_trig_name:
+	kfree(st->trig->name);
+error_free_trig:
+	iio_free_trigger(st->trig);
+
+	return ret;
+}
+
+void adis16201_remove_trigger(struct iio_dev *indio_dev)
+{
+	struct adis16201_state *state = indio_dev->dev_data;
+
+	iio_trigger_unregister(state->trig);
+	kfree(state->trig->name);
+	iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
new file mode 100644
index 0000000..b39323e
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -0,0 +1,143 @@
+#ifndef SPI_ADIS16203_H_
+#define SPI_ADIS16203_H_
+
+#define ADIS16203_STARTUP_DELAY	220 /* ms */
+
+#define ADIS16203_READ_REG(a)    a
+#define ADIS16203_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16203_FLASH_CNT      0x00 /* Flash memory write count */
+#define ADIS16203_SUPPLY_OUT     0x02 /* Output, power supply */
+#define ADIS16203_AUX_ADC        0x08 /* Output, auxiliary ADC input */
+#define ADIS16203_TEMP_OUT       0x0A /* Output, temperature */
+#define ADIS16203_XINCL_OUT      0x0C /* Output, x-axis inclination */
+#define ADIS16203_YINCL_OUT      0x0E /* Output, y-axis inclination */
+#define ADIS16203_INCL_NULL      0x18 /* Incline null calibration */
+#define ADIS16203_ALM_MAG1       0x20 /* Alarm 1 amplitude threshold */
+#define ADIS16203_ALM_MAG2       0x22 /* Alarm 2 amplitude threshold */
+#define ADIS16203_ALM_SMPL1      0x24 /* Alarm 1, sample period */
+#define ADIS16203_ALM_SMPL2      0x26 /* Alarm 2, sample period */
+#define ADIS16203_ALM_CTRL       0x28 /* Alarm control */
+#define ADIS16203_AUX_DAC        0x30 /* Auxiliary DAC data */
+#define ADIS16203_GPIO_CTRL      0x32 /* General-purpose digital input/output control */
+#define ADIS16203_MSC_CTRL       0x34 /* Miscellaneous control */
+#define ADIS16203_SMPL_PRD       0x36 /* Internal sample period (rate) control */
+#define ADIS16203_AVG_CNT        0x38 /* Operation, filter configuration */
+#define ADIS16203_SLP_CNT        0x3A /* Operation, sleep mode control */
+#define ADIS16203_DIAG_STAT      0x3C /* Diagnostics, system status register */
+#define ADIS16203_GLOB_CMD       0x3E /* Operation, system command register */
+
+#define ADIS16203_OUTPUTS        5
+
+/* MSC_CTRL */
+#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST	(1 << 10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN	(1 << 9)  /* Reverses rotation of both inclination outputs */
+#define ADIS16203_MSC_CTRL_SELF_TEST_EN	        (1 << 8)  /* Self-test enable */
+#define ADIS16203_MSC_CTRL_DATA_RDY_EN	        (1 << 2)  /* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16203_MSC_CTRL_ACTIVE_HIGH	        (1 << 1)  /* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1	(1 << 0)  /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* DIAG_STAT */
+#define ADIS16203_DIAG_STAT_ALARM2        (1<<9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM1        (1<<8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_SELFTEST_FAIL (1<<5) /* Self-test diagnostic error flag */
+#define ADIS16203_DIAG_STAT_SPI_FAIL	  (1<<3) /* SPI communications failure */
+#define ADIS16203_DIAG_STAT_FLASH_UPT	  (1<<2) /* Flash update failure */
+#define ADIS16203_DIAG_STAT_POWER_HIGH	  (1<<1) /* Power supply above 3.625 V */
+#define ADIS16203_DIAG_STAT_POWER_LOW	  (1<<0) /* Power supply below 3.15 V */
+
+/* GLOB_CMD */
+#define ADIS16203_GLOB_CMD_SW_RESET	(1<<7)
+#define ADIS16203_GLOB_CMD_CLEAR_STAT	(1<<4)
+#define ADIS16203_GLOB_CMD_FACTORY_CAL	(1<<1)
+
+#define ADIS16203_MAX_TX 12
+#define ADIS16203_MAX_RX 10
+
+#define ADIS16203_ERROR_ACTIVE          (1<<14)
+
+/**
+ * struct adis16203_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct adis16203_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+
+int adis16203_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+enum adis16203_scan {
+	ADIS16203_SCAN_SUPPLY,
+	ADIS16203_SCAN_AUX_ADC,
+	ADIS16203_SCAN_TEMP,
+	ADIS16203_SCAN_INCLI_X,
+	ADIS16203_SCAN_INCLI_Y,
+};
+
+void adis16203_remove_trigger(struct iio_dev *indio_dev);
+int adis16203_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16203_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+int adis16203_configure_ring(struct iio_dev *indio_dev);
+void adis16203_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16203_initialize_ring(struct iio_ring_buffer *ring);
+void adis16203_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16203_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16203_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+adis16203_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int adis16203_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void adis16203_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16203_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+
+static inline void adis16203_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16203_H_ */
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
new file mode 100644
index 0000000..b57f190
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -0,0 +1,568 @@
+/*
+ * ADIS16203 Programmable Digital Vibration Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "inclinometer.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16203.h"
+
+#define DRIVER_NAME		"adis16203"
+
+static int adis16203_check_status(struct device *dev);
+
+/**
+ * adis16203_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16203_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16203_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16203_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16203_spi_write_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		}, {
+			.tx_buf = st->tx + 2,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16203_WRITE_REG(lower_reg_address);
+	st->tx[1] = value & 0xFF;
+	st->tx[2] = ADIS16203_WRITE_REG(lower_reg_address + 1);
+	st->tx[3] = (value >> 8) & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16203_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16203_spi_read_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+			.delay_usecs = 20,
+		}, {
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+			.delay_usecs = 20,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16203_READ_REG(lower_reg_address);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				lower_reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static ssize_t adis16203_read_12bit_unsigned(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = adis16203_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	if (val & ADIS16203_ERROR_ACTIVE)
+		adis16203_check_status(dev);
+
+	return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16203_read_temp(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u16 val;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16203_spi_read_reg_16(dev, ADIS16203_TEMP_OUT, (u16 *)&val);
+	if (ret)
+		goto error_ret;
+
+	if (val & ADIS16203_ERROR_ACTIVE)
+		adis16203_check_status(dev);
+
+	val &= 0xFFF;
+	ret = sprintf(buf, "%d\n", val);
+
+error_ret:
+	mutex_unlock(&indio_dev->mlock);
+	return ret;
+}
+
+static ssize_t adis16203_read_14bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	s16 val = 0;
+	ssize_t ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16203_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+	if (!ret) {
+		if (val & ADIS16203_ERROR_ACTIVE)
+			adis16203_check_status(dev);
+
+		val = ((s16)(val << 2) >> 2);
+		ret = sprintf(buf, "%d\n", val);
+	}
+
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16203_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = adis16203_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static int adis16203_reset(struct device *dev)
+{
+	int ret;
+	ret = adis16203_spi_write_reg_8(dev,
+			ADIS16203_GLOB_CMD,
+			ADIS16203_GLOB_CMD_SW_RESET);
+	if (ret)
+		dev_err(dev, "problem resetting device");
+
+	return ret;
+}
+
+static ssize_t adis16203_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -EINVAL;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return adis16203_reset(dev);
+	}
+	return -EINVAL;
+}
+
+int adis16203_set_irq(struct device *dev, bool enable)
+{
+	int ret = 0;
+	u16 msc;
+
+	ret = adis16203_spi_read_reg_16(dev, ADIS16203_MSC_CTRL, &msc);
+	if (ret)
+		goto error_ret;
+
+	msc |= ADIS16203_MSC_CTRL_ACTIVE_HIGH;
+	msc &= ~ADIS16203_MSC_CTRL_DATA_RDY_DIO1;
+	if (enable)
+		msc |= ADIS16203_MSC_CTRL_DATA_RDY_EN;
+	else
+		msc &= ~ADIS16203_MSC_CTRL_DATA_RDY_EN;
+
+	ret = adis16203_spi_write_reg_16(dev, ADIS16203_MSC_CTRL, msc);
+
+error_ret:
+	return ret;
+}
+
+static int adis16203_check_status(struct device *dev)
+{
+	u16 status;
+	int ret;
+
+	ret = adis16203_spi_read_reg_16(dev, ADIS16203_DIAG_STAT, &status);
+	if (ret < 0) {
+		dev_err(dev, "Reading status failed\n");
+		goto error_ret;
+	}
+	ret = status & 0x1F;
+
+	if (status & ADIS16203_DIAG_STAT_SELFTEST_FAIL)
+		dev_err(dev, "Self test failure\n");
+	if (status & ADIS16203_DIAG_STAT_SPI_FAIL)
+		dev_err(dev, "SPI failure\n");
+	if (status & ADIS16203_DIAG_STAT_FLASH_UPT)
+		dev_err(dev, "Flash update failed\n");
+	if (status & ADIS16203_DIAG_STAT_POWER_HIGH)
+		dev_err(dev, "Power supply above 3.625V\n");
+	if (status & ADIS16203_DIAG_STAT_POWER_LOW)
+		dev_err(dev, "Power supply below 3.15V\n");
+
+error_ret:
+	return ret;
+}
+
+static int adis16203_self_test(struct device *dev)
+{
+	int ret;
+	ret = adis16203_spi_write_reg_16(dev,
+			ADIS16203_MSC_CTRL,
+			ADIS16203_MSC_CTRL_SELF_TEST_EN);
+	if (ret) {
+		dev_err(dev, "problem starting self test");
+		goto err_ret;
+	}
+
+	adis16203_check_status(dev);
+
+err_ret:
+	return ret;
+}
+
+static int adis16203_initial_setup(struct adis16203_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* Disable IRQ */
+	ret = adis16203_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	/* Do self test */
+	ret = adis16203_self_test(dev);
+	if (ret) {
+		dev_err(dev, "self test failure");
+		goto err_ret;
+	}
+
+	/* Read status register to check the result */
+	ret = adis16203_check_status(dev);
+	if (ret) {
+		adis16203_reset(dev);
+		dev_err(dev, "device not playing ball -> reset");
+		msleep(ADIS16203_STARTUP_DELAY);
+		ret = adis16203_check_status(dev);
+		if (ret) {
+			dev_err(dev, "giving up");
+			goto err_ret;
+		}
+	}
+
+	printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+			st->us->chip_select, st->us->irq);
+
+err_ret:
+	return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16203_read_12bit_unsigned,
+		ADIS16203_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.00122");
+static IIO_DEV_ATTR_IN_RAW(1, adis16203_read_12bit_unsigned,
+		ADIS16203_AUX_ADC);
+static IIO_CONST_ATTR(in1_scale, "0.00061");
+
+static IIO_DEV_ATTR_INCLI_X(adis16203_read_14bit_signed,
+		ADIS16203_XINCL_OUT);
+static IIO_DEV_ATTR_INCLI_Y(adis16203_read_14bit_signed,
+		ADIS16203_YINCL_OUT);
+static IIO_DEV_ATTR_INCLI_X_OFFSET(S_IWUSR | S_IRUGO,
+		adis16203_read_14bit_signed,
+		adis16203_write_16bit,
+		ADIS16203_INCL_NULL);
+static IIO_CONST_ATTR(incli_scale, "0.025");
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16203_read_temp);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16203_write_reset, 0);
+
+static IIO_CONST_ATTR(name, "adis16203");
+
+static struct attribute *adis16203_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group adis16203_event_attribute_group = {
+	.attrs = adis16203_event_attributes,
+};
+
+static struct attribute *adis16203_attributes[] = {
+	&iio_dev_attr_in0_supply_raw.dev_attr.attr,
+	&iio_const_attr_in0_supply_scale.dev_attr.attr,
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_in1_raw.dev_attr.attr,
+	&iio_const_attr_in1_scale.dev_attr.attr,
+	&iio_dev_attr_incli_x_raw.dev_attr.attr,
+	&iio_dev_attr_incli_y_raw.dev_attr.attr,
+	&iio_dev_attr_incli_x_offset.dev_attr.attr,
+	&iio_const_attr_incli_scale.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group adis16203_attribute_group = {
+	.attrs = adis16203_attributes,
+};
+
+static int __devinit adis16203_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct adis16203_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADIS16203_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADIS16203_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &adis16203_event_attribute_group;
+	st->indio_dev->attrs = &adis16203_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = adis16203_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = adis16203_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING,
+				"adis16203");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = adis16203_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = adis16203_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	adis16203_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (spi->irq)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	adis16203_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	adis16203_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int adis16203_remove(struct spi_device *spi)
+{
+	struct adis16203_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	flush_scheduled_work();
+
+	adis16203_remove_trigger(indio_dev);
+	if (spi->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	adis16203_uninitialize_ring(indio_dev->ring);
+	iio_device_unregister(indio_dev);
+	adis16203_unconfigure_ring(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver adis16203_driver = {
+	.driver = {
+		.name = "adis16203",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16203_probe,
+	.remove = __devexit_p(adis16203_remove),
+};
+
+static __init int adis16203_init(void)
+{
+	return spi_register_driver(&adis16203_driver);
+}
+module_init(adis16203_init);
+
+static __exit void adis16203_exit(void)
+{
+	spi_unregister_driver(&adis16203_driver);
+}
+module_exit(adis16203_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
new file mode 100644
index 0000000..3d774f7
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -0,0 +1,211 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16203.h"
+
+static IIO_SCAN_EL_C(in_supply, ADIS16203_SCAN_SUPPLY, ADIS16203_SUPPLY_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in_supply, u, 12, 16);
+static IIO_SCAN_EL_C(in0, ADIS16203_SCAN_AUX_ADC, ADIS16203_AUX_ADC, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in0, u, 12, 16);
+static IIO_SCAN_EL_C(temp, ADIS16203_SCAN_TEMP, ADIS16203_TEMP_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(temp, u, 12, 16);
+static IIO_SCAN_EL_C(incli_x, ADIS16203_SCAN_INCLI_X,
+		     ADIS16203_XINCL_OUT, NULL);
+static IIO_SCAN_EL_C(incli_y, ADIS16203_SCAN_INCLI_Y,
+		     ADIS16203_YINCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(incli, s, 14, 16);
+static IIO_SCAN_EL_TIMESTAMP(5);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *adis16203_scan_el_attrs[] = {
+	&iio_scan_el_in_supply.dev_attr.attr,
+	&iio_const_attr_in_supply_index.dev_attr.attr,
+	&iio_const_attr_in_supply_type.dev_attr.attr,
+	&iio_scan_el_in0.dev_attr.attr,
+	&iio_const_attr_in0_index.dev_attr.attr,
+	&iio_const_attr_in0_type.dev_attr.attr,
+	&iio_scan_el_temp.dev_attr.attr,
+	&iio_const_attr_temp_index.dev_attr.attr,
+	&iio_const_attr_temp_type.dev_attr.attr,
+	&iio_scan_el_incli_x.dev_attr.attr,
+	&iio_const_attr_incli_x_index.dev_attr.attr,
+	&iio_scan_el_incli_y.dev_attr.attr,
+	&iio_const_attr_incli_y_index.dev_attr.attr,
+	&iio_const_attr_incli_type.dev_attr.attr,
+	&iio_scan_el_timestamp.dev_attr.attr,
+	&iio_const_attr_timestamp_index.dev_attr.attr,
+	&iio_const_attr_timestamp_type.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adis16203_scan_el_group = {
+	.attrs = adis16203_scan_el_attrs,
+	.name = "scan_elements",
+};
+
+/**
+ * adis16203_poll_func_th() top half interrupt handler called by trigger
+ * @private_data:	iio_dev
+ **/
+static void adis16203_poll_func_th(struct iio_dev *indio_dev, s64 timestamp)
+{
+	struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+	st->last_timestamp = timestamp;
+	schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16203_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16203_read_ring_data(struct device *dev, u8 *rx)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[ADIS16203_OUTPUTS + 1];
+	int ret;
+	int i;
+
+	mutex_lock(&st->buf_lock);
+
+	spi_message_init(&msg);
+
+	memset(xfers, 0, sizeof(xfers));
+	for (i = 0; i <= ADIS16203_OUTPUTS; i++) {
+		xfers[i].bits_per_word = 8;
+		xfers[i].cs_change = 1;
+		xfers[i].len = 2;
+		xfers[i].delay_usecs = 20;
+		xfers[i].tx_buf = st->tx + 2 * i;
+		if (i < 1) /* SUPPLY_OUT: 0x02, AUX_ADC: 0x08 */
+			st->tx[2 * i] = ADIS16203_READ_REG(ADIS16203_SUPPLY_OUT + 2 * i);
+		else
+			st->tx[2 * i] = ADIS16203_READ_REG(ADIS16203_SUPPLY_OUT + 2 * i + 6);
+		st->tx[2 * i + 1] = 0;
+		if (i >= 1)
+			xfers[i].rx_buf = rx + 2 * (i - 1);
+		spi_message_add_tail(&xfers[i], &msg);
+	}
+
+	ret = spi_sync(st->us, &msg);
+	if (ret)
+		dev_err(&st->us->dev, "problem when burst reading");
+
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16203_trigger_bh_to_ring(struct work_struct *work_s)
+{
+	struct adis16203_state *st
+		= container_of(work_s, struct adis16203_state,
+			       work_trigger_to_ring);
+	struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+	int i = 0;
+	s16 *data;
+	size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+	data = kmalloc(datasize, GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(&st->us->dev, "memory alloc failed in ring bh");
+		return;
+	}
+
+	if (ring->scan_count)
+		if (adis16203_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+			for (; i < ring->scan_count; i++)
+				data[i] = be16_to_cpup(
+					(__be16 *)&(st->rx[i*2]));
+
+	/* Guaranteed to be aligned with 8 byte boundary */
+	if (ring->scan_timestamp)
+		*((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+	ring->access.store_to(ring,
+			      (u8 *)data,
+			      st->last_timestamp);
+
+	iio_trigger_notify_done(st->indio_dev->trig);
+	kfree(data);
+
+	return;
+}
+
+void adis16203_unconfigure_ring(struct iio_dev *indio_dev)
+{
+	kfree(indio_dev->pollfunc);
+	iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16203_configure_ring(struct iio_dev *indio_dev)
+{
+	int ret = 0;
+	struct adis16203_state *st = indio_dev->dev_data;
+	struct iio_ring_buffer *ring;
+	INIT_WORK(&st->work_trigger_to_ring, adis16203_trigger_bh_to_ring);
+
+	ring = iio_sw_rb_allocate(indio_dev);
+	if (!ring) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	indio_dev->ring = ring;
+	/* Effectively select the ring buffer implementation */
+	iio_ring_sw_register_funcs(&ring->access);
+	ring->bpe = 2;
+	ring->scan_el_attrs = &adis16203_scan_el_group;
+	ring->scan_timestamp = true;
+	ring->preenable = &iio_sw_ring_preenable;
+	ring->postenable = &iio_triggered_ring_postenable;
+	ring->predisable = &iio_triggered_ring_predisable;
+	ring->owner = THIS_MODULE;
+
+	/* Set default scan mode */
+	iio_scan_mask_set(ring, iio_scan_el_in_supply.number);
+	iio_scan_mask_set(ring, iio_scan_el_temp.number);
+	iio_scan_mask_set(ring, iio_scan_el_in0.number);
+	iio_scan_mask_set(ring, iio_scan_el_incli_x.number);
+	iio_scan_mask_set(ring, iio_scan_el_incli_y.number);
+
+	ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16203_poll_func_th);
+	if (ret)
+		goto error_iio_sw_rb_free;
+
+	indio_dev->modes |= INDIO_RING_TRIGGERED;
+	return 0;
+
+error_iio_sw_rb_free:
+	iio_sw_rb_free(indio_dev->ring);
+	return ret;
+}
+
+int adis16203_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16203_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+	iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16203_trigger.c b/drivers/staging/iio/accel/adis16203_trigger.c
new file mode 100644
index 0000000..50be51c
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16203_trigger.c
@@ -0,0 +1,122 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16203.h"
+
+/**
+ * adis16203_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16203_data_rdy_trig_poll(struct iio_dev *dev_info,
+				       int index,
+				       s64 timestamp,
+				       int no_test)
+{
+	struct adis16203_state *st = iio_dev_get_devdata(dev_info);
+	struct iio_trigger *trig = st->trig;
+
+	iio_trigger_poll(trig, timestamp);
+
+	return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16203_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16203_trigger_attrs[] = {
+	&dev_attr_name.attr,
+	NULL,
+};
+
+static const struct attribute_group adis16203_trigger_attr_group = {
+	.attrs = adis16203_trigger_attrs,
+};
+
+/**
+ * adis16203_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16203_data_rdy_trigger_set_state(struct iio_trigger *trig,
+						bool state)
+{
+	struct adis16203_state *st = trig->private_data;
+	struct iio_dev *indio_dev = st->indio_dev;
+	int ret = 0;
+
+	dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+	ret = adis16203_set_irq(&st->indio_dev->dev, state);
+	if (state == false) {
+		iio_remove_event_from_list(&iio_event_data_rdy_trig,
+					   &indio_dev->interrupts[0]
+					   ->ev_list);
+		flush_scheduled_work();
+	} else {
+		iio_add_event_to_list(&iio_event_data_rdy_trig,
+				      &indio_dev->interrupts[0]->ev_list);
+	}
+	return ret;
+}
+
+/**
+ * adis16203_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig:	the datardy trigger
+ **/
+static int adis16203_trig_try_reen(struct iio_trigger *trig)
+{
+	struct adis16203_state *st = trig->private_data;
+	enable_irq(st->us->irq);
+	return 0;
+}
+
+int adis16203_probe_trigger(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct adis16203_state *st = indio_dev->dev_data;
+
+	st->trig = iio_allocate_trigger();
+	st->trig->name = kasprintf(GFP_KERNEL,
+				"adis16203-dev%d",
+				indio_dev->id);
+	if (!st->trig->name) {
+		ret = -ENOMEM;
+		goto error_free_trig;
+	}
+	st->trig->dev.parent = &st->us->dev;
+	st->trig->owner = THIS_MODULE;
+	st->trig->private_data = st;
+	st->trig->set_trigger_state = &adis16203_data_rdy_trigger_set_state;
+	st->trig->try_reenable = &adis16203_trig_try_reen;
+	st->trig->control_attrs = &adis16203_trigger_attr_group;
+	ret = iio_trigger_register(st->trig);
+
+	/* select default trigger */
+	indio_dev->trig = st->trig;
+	if (ret)
+		goto error_free_trig_name;
+
+	return 0;
+
+error_free_trig_name:
+	kfree(st->trig->name);
+error_free_trig:
+	iio_free_trigger(st->trig);
+
+	return ret;
+}
+
+void adis16203_remove_trigger(struct iio_dev *indio_dev)
+{
+	struct adis16203_state *state = indio_dev->dev_data;
+
+	iio_trigger_unregister(state->trig);
+	kfree(state->trig->name);
+	iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
new file mode 100644
index 0000000..e9ed7cb
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204.h
@@ -0,0 +1,151 @@
+#ifndef SPI_ADIS16204_H_
+#define SPI_ADIS16204_H_
+
+#define ADIS16204_STARTUP_DELAY	220 /* ms */
+
+#define ADIS16204_READ_REG(a)    a
+#define ADIS16204_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16204_FLASH_CNT      0x00 /* Flash memory write count */
+#define ADIS16204_SUPPLY_OUT     0x02 /* Output, power supply */
+#define ADIS16204_XACCL_OUT      0x04 /* Output, x-axis accelerometer */
+#define ADIS16204_YACCL_OUT      0x06 /* Output, y-axis accelerometer */
+#define ADIS16204_AUX_ADC        0x08 /* Output, auxiliary ADC input */
+#define ADIS16204_TEMP_OUT       0x0A /* Output, temperature */
+#define ADIS16204_X_PEAK_OUT     0x0C /* Twos complement */
+#define ADIS16204_Y_PEAK_OUT     0x0E /* Twos complement */
+#define ADIS16204_XACCL_NULL     0x10 /* Calibration, x-axis acceleration offset null */
+#define ADIS16204_YACCL_NULL     0x12 /* Calibration, y-axis acceleration offset null */
+#define ADIS16204_XACCL_SCALE    0x14 /* X-axis scale factor calibration register */
+#define ADIS16204_YACCL_SCALE    0x16 /* Y-axis scale factor calibration register */
+#define ADIS16204_XY_RSS_OUT     0x18 /* XY combined acceleration (RSS) */
+#define ADIS16204_XY_PEAK_OUT    0x1A /* Peak, XY combined output (RSS) */
+#define ADIS16204_CAP_BUF_1      0x1C /* Capture buffer output register 1 */
+#define ADIS16204_CAP_BUF_2      0x1E /* Capture buffer output register 2 */
+#define ADIS16204_ALM_MAG1       0x20 /* Alarm 1 amplitude threshold */
+#define ADIS16204_ALM_MAG2       0x22 /* Alarm 2 amplitude threshold */
+#define ADIS16204_ALM_CTRL       0x28 /* Alarm control */
+#define ADIS16204_CAPT_PNTR      0x2A /* Capture register address pointer */
+#define ADIS16204_AUX_DAC        0x30 /* Auxiliary DAC data */
+#define ADIS16204_GPIO_CTRL      0x32 /* General-purpose digital input/output control */
+#define ADIS16204_MSC_CTRL       0x34 /* Miscellaneous control */
+#define ADIS16204_SMPL_PRD       0x36 /* Internal sample period (rate) control */
+#define ADIS16204_AVG_CNT        0x38 /* Operation, filter configuration */
+#define ADIS16204_SLP_CNT        0x3A /* Operation, sleep mode control */
+#define ADIS16204_DIAG_STAT      0x3C /* Diagnostics, system status register */
+#define ADIS16204_GLOB_CMD       0x3E /* Operation, system command register */
+
+#define ADIS16204_OUTPUTS        5
+
+/* MSC_CTRL */
+#define ADIS16204_MSC_CTRL_PWRUP_SELF_TEST	(1 << 10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16204_MSC_CTRL_SELF_TEST_EN	        (1 << 8)  /* Self-test enable */
+#define ADIS16204_MSC_CTRL_DATA_RDY_EN	        (1 << 2)  /* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16204_MSC_CTRL_ACTIVE_HIGH	        (1 << 1)  /* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16204_MSC_CTRL_DATA_RDY_DIO2	(1 << 0)  /* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
+
+/* DIAG_STAT */
+#define ADIS16204_DIAG_STAT_ALARM2        (1<<9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16204_DIAG_STAT_ALARM1        (1<<8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16204_DIAG_STAT_SELFTEST_FAIL (1<<5) /* Self-test diagnostic error flag: 1 = error condition,
+						0 = normal operation */
+#define ADIS16204_DIAG_STAT_SPI_FAIL	  (1<<3) /* SPI communications failure */
+#define ADIS16204_DIAG_STAT_FLASH_UPT	  (1<<2) /* Flash update failure */
+#define ADIS16204_DIAG_STAT_POWER_HIGH	  (1<<1) /* Power supply above 3.625 V */
+#define ADIS16204_DIAG_STAT_POWER_LOW	  (1<<0) /* Power supply below 2.975 V */
+
+/* GLOB_CMD */
+#define ADIS16204_GLOB_CMD_SW_RESET	(1<<7)
+#define ADIS16204_GLOB_CMD_CLEAR_STAT	(1<<4)
+#define ADIS16204_GLOB_CMD_FACTORY_CAL	(1<<1)
+
+#define ADIS16204_MAX_TX 24
+#define ADIS16204_MAX_RX 24
+
+#define ADIS16204_ERROR_ACTIVE          (1<<14)
+
+/**
+ * struct adis16204_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct adis16204_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+
+int adis16204_set_irq(struct device *dev, bool enable);
+
+#ifdef CONFIG_IIO_RING_BUFFER
+enum adis16204_scan {
+	ADIS16204_SCAN_SUPPLY,
+	ADIS16204_SCAN_ACC_X,
+	ADIS16204_SCAN_ACC_Y,
+	ADIS16204_SCAN_AUX_ADC,
+	ADIS16204_SCAN_TEMP,
+};
+
+void adis16204_remove_trigger(struct iio_dev *indio_dev);
+int adis16204_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16204_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+int adis16204_configure_ring(struct iio_dev *indio_dev);
+void adis16204_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16204_initialize_ring(struct iio_ring_buffer *ring);
+void adis16204_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16204_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16204_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+adis16204_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int adis16204_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void adis16204_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16204_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+
+static inline void adis16204_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
new file mode 100644
index 0000000..cc15e40
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -0,0 +1,613 @@
+/*
+ * ADIS16204 Programmable High-g Digital Impact Sensor and Recorder
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "accel.h"
+#include "../gyro/gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16204.h"
+
+#define DRIVER_NAME		"adis16204"
+
+static int adis16204_check_status(struct device *dev);
+
+/**
+ * adis16204_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+static int adis16204_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16204_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16204_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16204_spi_write_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		}, {
+			.tx_buf = st->tx + 2,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16204_WRITE_REG(lower_reg_address);
+	st->tx[1] = value & 0xFF;
+	st->tx[2] = ADIS16204_WRITE_REG(lower_reg_address + 1);
+	st->tx[3] = (value >> 8) & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16204_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16204_spi_read_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+			.delay_usecs = 20,
+		}, {
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+			.delay_usecs = 20,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16204_READ_REG(lower_reg_address);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				lower_reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static ssize_t adis16204_read_12bit_unsigned(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = adis16204_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	if (val & ADIS16204_ERROR_ACTIVE)
+		adis16204_check_status(dev);
+
+	return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16204_read_temp(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	ssize_t ret;
+	u16 val;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16204_spi_read_reg_16(dev, ADIS16204_TEMP_OUT, (u16 *)&val);
+	if (ret)
+		goto error_ret;
+
+	if (val & ADIS16204_ERROR_ACTIVE)
+		adis16204_check_status(dev);
+
+	val &= 0xFFF;
+	ret = sprintf(buf, "%d\n", val);
+
+error_ret:
+	mutex_unlock(&indio_dev->mlock);
+	return ret;
+}
+
+static ssize_t adis16204_read_12bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	s16 val = 0;
+	ssize_t ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16204_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+	if (!ret) {
+		if (val & ADIS16204_ERROR_ACTIVE)
+			adis16204_check_status(dev);
+
+		val = ((s16)(val << 4) >> 4);
+		ret = sprintf(buf, "%d\n", val);
+	}
+
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16204_read_14bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	s16 val = 0;
+	ssize_t ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	ret = adis16204_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+	if (!ret) {
+		if (val & ADIS16204_ERROR_ACTIVE)
+			adis16204_check_status(dev);
+
+		val = ((s16)(val << 2) >> 2);
+		ret = sprintf(buf, "%d\n", val);
+	}
+
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16204_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = adis16204_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static int adis16204_reset(struct device *dev)
+{
+	int ret;
+	ret = adis16204_spi_write_reg_8(dev,
+			ADIS16204_GLOB_CMD,
+			ADIS16204_GLOB_CMD_SW_RESET);
+	if (ret)
+		dev_err(dev, "problem resetting device");
+
+	return ret;
+}
+
+static ssize_t adis16204_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -EINVAL;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return adis16204_reset(dev);
+	}
+	return -EINVAL;
+}
+
+int adis16204_set_irq(struct device *dev, bool enable)
+{
+	int ret = 0;
+	u16 msc;
+
+	ret = adis16204_spi_read_reg_16(dev, ADIS16204_MSC_CTRL, &msc);
+	if (ret)
+		goto error_ret;
+
+	msc |= ADIS16204_MSC_CTRL_ACTIVE_HIGH;
+	msc &= ~ADIS16204_MSC_CTRL_DATA_RDY_DIO2;
+	if (enable)
+		msc |= ADIS16204_MSC_CTRL_DATA_RDY_EN;
+	else
+		msc &= ~ADIS16204_MSC_CTRL_DATA_RDY_EN;
+
+	ret = adis16204_spi_write_reg_16(dev, ADIS16204_MSC_CTRL, msc);
+
+error_ret:
+	return ret;
+}
+
+static int adis16204_check_status(struct device *dev)
+{
+	u16 status;
+	int ret;
+
+	ret = adis16204_spi_read_reg_16(dev, ADIS16204_DIAG_STAT, &status);
+	if (ret < 0) {
+		dev_err(dev, "Reading status failed\n");
+		goto error_ret;
+	}
+	ret = status & 0x1F;
+
+	if (status & ADIS16204_DIAG_STAT_SELFTEST_FAIL)
+		dev_err(dev, "Self test failure\n");
+	if (status & ADIS16204_DIAG_STAT_SPI_FAIL)
+		dev_err(dev, "SPI failure\n");
+	if (status & ADIS16204_DIAG_STAT_FLASH_UPT)
+		dev_err(dev, "Flash update failed\n");
+	if (status & ADIS16204_DIAG_STAT_POWER_HIGH)
+		dev_err(dev, "Power supply above 3.625V\n");
+	if (status & ADIS16204_DIAG_STAT_POWER_LOW)
+		dev_err(dev, "Power supply below 2.975V\n");
+
+error_ret:
+	return ret;
+}
+
+static int adis16204_self_test(struct device *dev)
+{
+	int ret;
+	ret = adis16204_spi_write_reg_16(dev,
+			ADIS16204_MSC_CTRL,
+			ADIS16204_MSC_CTRL_SELF_TEST_EN);
+	if (ret) {
+		dev_err(dev, "problem starting self test");
+		goto err_ret;
+	}
+
+	adis16204_check_status(dev);
+
+err_ret:
+	return ret;
+}
+
+static int adis16204_initial_setup(struct adis16204_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* Disable IRQ */
+	ret = adis16204_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	/* Do self test */
+	ret = adis16204_self_test(dev);
+	if (ret) {
+		dev_err(dev, "self test failure");
+		goto err_ret;
+	}
+
+	/* Read status register to check the result */
+	ret = adis16204_check_status(dev);
+	if (ret) {
+		adis16204_reset(dev);
+		dev_err(dev, "device not playing ball -> reset");
+		msleep(ADIS16204_STARTUP_DELAY);
+		ret = adis16204_check_status(dev);
+		if (ret) {
+			dev_err(dev, "giving up");
+			goto err_ret;
+		}
+	}
+
+	printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+			st->us->chip_select, st->us->irq);
+
+err_ret:
+	return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16204_read_12bit_unsigned,
+		ADIS16204_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.00122");
+static IIO_DEV_ATTR_IN_RAW(1, adis16204_read_12bit_unsigned,
+		ADIS16204_AUX_ADC);
+static IIO_CONST_ATTR(in1_scale, "0.00061");
+
+static IIO_DEV_ATTR_ACCEL_X(adis16204_read_14bit_signed,
+		ADIS16204_XACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_Y(adis16204_read_14bit_signed,
+		ADIS16204_YACCL_OUT);
+static IIO_DEV_ATTR_ACCEL_XY(adis16204_read_14bit_signed,
+		ADIS16204_XY_RSS_OUT);
+static IIO_DEV_ATTR_ACCEL_XPEAK(adis16204_read_14bit_signed,
+		ADIS16204_X_PEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_YPEAK(adis16204_read_14bit_signed,
+		ADIS16204_Y_PEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_XYPEAK(adis16204_read_14bit_signed,
+		ADIS16204_XY_PEAK_OUT);
+static IIO_DEV_ATTR_ACCEL_X_OFFSET(S_IWUSR | S_IRUGO,
+		adis16204_read_12bit_signed,
+		adis16204_write_16bit,
+		ADIS16204_XACCL_NULL);
+static IIO_DEV_ATTR_ACCEL_Y_OFFSET(S_IWUSR | S_IRUGO,
+		adis16204_read_12bit_signed,
+		adis16204_write_16bit,
+		ADIS16204_YACCL_NULL);
+static IIO_CONST_ATTR(accel_x_scale, "0.017125");
+static IIO_CONST_ATTR(accel_y_scale, "0.008407");
+static IIO_CONST_ATTR(accel_xy_scale, "0.017125");
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16204_read_temp);
+static IIO_CONST_ATTR(temp_offset, "25");
+static IIO_CONST_ATTR(temp_scale, "-0.47");
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16204_write_reset, 0);
+
+static IIO_CONST_ATTR(name, "adis16204");
+
+static struct attribute *adis16204_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group adis16204_event_attribute_group = {
+	.attrs = adis16204_event_attributes,
+};
+
+static struct attribute *adis16204_attributes[] = {
+	&iio_dev_attr_in0_supply_raw.dev_attr.attr,
+	&iio_const_attr_in0_supply_scale.dev_attr.attr,
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_in1_raw.dev_attr.attr,
+	&iio_const_attr_in1_scale.dev_attr.attr,
+	&iio_dev_attr_accel_x_raw.dev_attr.attr,
+	&iio_dev_attr_accel_y_raw.dev_attr.attr,
+	&iio_dev_attr_accel_xy.dev_attr.attr,
+	&iio_dev_attr_accel_xpeak.dev_attr.attr,
+	&iio_dev_attr_accel_ypeak.dev_attr.attr,
+	&iio_dev_attr_accel_xypeak.dev_attr.attr,
+	&iio_dev_attr_accel_x_offset.dev_attr.attr,
+	&iio_dev_attr_accel_y_offset.dev_attr.attr,
+	&iio_const_attr_accel_x_scale.dev_attr.attr,
+	&iio_const_attr_accel_y_scale.dev_attr.attr,
+	&iio_const_attr_accel_xy_scale.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group adis16204_attribute_group = {
+	.attrs = adis16204_attributes,
+};
+
+static int __devinit adis16204_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct adis16204_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADIS16204_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADIS16204_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &adis16204_event_attribute_group;
+	st->indio_dev->attrs = &adis16204_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = adis16204_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = adis16204_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING,
+				"adis16204");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = adis16204_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = adis16204_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	adis16204_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (spi->irq)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	adis16204_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	adis16204_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int adis16204_remove(struct spi_device *spi)
+{
+	struct adis16204_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	flush_scheduled_work();
+
+	adis16204_remove_trigger(indio_dev);
+	if (spi->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	adis16204_uninitialize_ring(indio_dev->ring);
+	iio_device_unregister(indio_dev);
+	adis16204_unconfigure_ring(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver adis16204_driver = {
+	.driver = {
+		.name = "adis16204",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16204_probe,
+	.remove = __devexit_p(adis16204_remove),
+};
+
+static __init int adis16204_init(void)
+{
+	return spi_register_driver(&adis16204_driver);
+}
+module_init(adis16204_init);
+
+static __exit void adis16204_exit(void)
+{
+	spi_unregister_driver(&adis16204_driver);
+}
+module_exit(adis16204_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
new file mode 100644
index 0000000..420b160f
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -0,0 +1,206 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "accel.h"
+#include "../trigger.h"
+#include "adis16204.h"
+
+static IIO_SCAN_EL_C(in_supply, ADIS16204_SCAN_SUPPLY, ADIS16204_SUPPLY_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in_supply, u, 12, 16);
+static IIO_SCAN_EL_C(accel_x, ADIS16204_SCAN_ACC_X, ADIS16204_XACCL_OUT, NULL);
+static IIO_SCAN_EL_C(accel_y, ADIS16204_SCAN_ACC_Y, ADIS16204_YACCL_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(accel, s, 14, 16);
+static IIO_SCAN_EL_C(in0, ADIS16204_SCAN_AUX_ADC, ADIS16204_AUX_ADC, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(in0, u, 12, 16);
+static IIO_SCAN_EL_C(temp, ADIS16204_SCAN_TEMP, ADIS16204_TEMP_OUT, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(temp, u, 12, 16);
+static IIO_SCAN_EL_TIMESTAMP(5);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *adis16204_scan_el_attrs[] = {
+	&iio_scan_el_in_supply.dev_attr.attr,
+	&iio_const_attr_in_supply_index.dev_attr.attr,
+	&iio_const_attr_in_supply_type.dev_attr.attr,
+	&iio_scan_el_accel_x.dev_attr.attr,
+	&iio_const_attr_accel_x_index.dev_attr.attr,
+	&iio_scan_el_accel_y.dev_attr.attr,
+	&iio_const_attr_accel_y_index.dev_attr.attr,
+	&iio_const_attr_accel_type.dev_attr.attr,
+	&iio_scan_el_in0.dev_attr.attr,
+	&iio_const_attr_in0_index.dev_attr.attr,
+	&iio_const_attr_in0_type.dev_attr.attr,
+	&iio_scan_el_temp.dev_attr.attr,
+	&iio_const_attr_temp_index.dev_attr.attr,
+	&iio_const_attr_temp_type.dev_attr.attr,
+	&iio_scan_el_timestamp.dev_attr.attr,
+	&iio_const_attr_timestamp_index.dev_attr.attr,
+	&iio_const_attr_timestamp_type.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adis16204_scan_el_group = {
+	.attrs = adis16204_scan_el_attrs,
+	.name = "scan_elements",
+};
+
+/**
+ * adis16204_poll_func_th() top half interrupt handler called by trigger
+ * @private_data:	iio_dev
+ **/
+static void adis16204_poll_func_th(struct iio_dev *indio_dev, s64 timestamp)
+{
+	struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+	st->last_timestamp = timestamp;
+	schedule_work(&st->work_trigger_to_ring);
+}
+
+/**
+ * adis16204_read_ring_data() read data registers which will be placed into ring
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read
+ **/
+static int adis16204_read_ring_data(struct device *dev, u8 *rx)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[ADIS16204_OUTPUTS + 1];
+	int ret;
+	int i;
+
+	mutex_lock(&st->buf_lock);
+
+	spi_message_init(&msg);
+
+	memset(xfers, 0, sizeof(xfers));
+	for (i = 0; i <= ADIS16204_OUTPUTS; i++) {
+		xfers[i].bits_per_word = 8;
+		xfers[i].cs_change = 1;
+		xfers[i].len = 2;
+		xfers[i].delay_usecs = 20;
+		xfers[i].tx_buf = st->tx + 2 * i;
+		st->tx[2 * i] = ADIS16204_READ_REG(ADIS16204_SUPPLY_OUT + 2 * i);
+		st->tx[2 * i + 1] = 0;
+		if (i >= 1)
+			xfers[i].rx_buf = rx + 2 * (i - 1);
+		spi_message_add_tail(&xfers[i], &msg);
+	}
+
+	ret = spi_sync(st->us, &msg);
+	if (ret)
+		dev_err(&st->us->dev, "problem when burst reading");
+
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void adis16204_trigger_bh_to_ring(struct work_struct *work_s)
+{
+	struct adis16204_state *st
+		= container_of(work_s, struct adis16204_state,
+			       work_trigger_to_ring);
+	struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+	int i = 0;
+	s16 *data;
+	size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+	data = kmalloc(datasize, GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(&st->us->dev, "memory alloc failed in ring bh");
+		return;
+	}
+
+	if (ring->scan_count)
+		if (adis16204_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+			for (; i < ring->scan_count; i++)
+				data[i] = be16_to_cpup(
+					(__be16 *)&(st->rx[i*2]));
+
+	/* Guaranteed to be aligned with 8 byte boundary */
+	if (ring->scan_timestamp)
+		*((s64 *)(data + ((i + 3)/4)*4)) = st->last_timestamp;
+
+	ring->access.store_to(ring,
+			      (u8 *)data,
+			      st->last_timestamp);
+
+	iio_trigger_notify_done(st->indio_dev->trig);
+	kfree(data);
+
+	return;
+}
+
+void adis16204_unconfigure_ring(struct iio_dev *indio_dev)
+{
+	kfree(indio_dev->pollfunc);
+	iio_sw_rb_free(indio_dev->ring);
+}
+
+int adis16204_configure_ring(struct iio_dev *indio_dev)
+{
+	int ret = 0;
+	struct adis16204_state *st = indio_dev->dev_data;
+	struct iio_ring_buffer *ring;
+	INIT_WORK(&st->work_trigger_to_ring, adis16204_trigger_bh_to_ring);
+
+	ring = iio_sw_rb_allocate(indio_dev);
+	if (!ring) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	indio_dev->ring = ring;
+	/* Effectively select the ring buffer implementation */
+	iio_ring_sw_register_funcs(&ring->access);
+	ring->bpe = 2;
+	ring->scan_el_attrs = &adis16204_scan_el_group;
+	ring->scan_timestamp = true;
+	ring->preenable = &iio_sw_ring_preenable;
+	ring->postenable = &iio_triggered_ring_postenable;
+	ring->predisable = &iio_triggered_ring_predisable;
+	ring->owner = THIS_MODULE;
+
+	/* Set default scan mode */
+	iio_scan_mask_set(ring, iio_scan_el_in_supply.number);
+	iio_scan_mask_set(ring, iio_scan_el_accel_x.number);
+	iio_scan_mask_set(ring, iio_scan_el_accel_y.number);
+	iio_scan_mask_set(ring, iio_scan_el_temp.number);
+	iio_scan_mask_set(ring, iio_scan_el_in0.number);
+
+	ret = iio_alloc_pollfunc(indio_dev, NULL, &adis16204_poll_func_th);
+	if (ret)
+		goto error_iio_sw_rb_free;
+
+	indio_dev->modes |= INDIO_RING_TRIGGERED;
+	return 0;
+
+error_iio_sw_rb_free:
+	iio_sw_rb_free(indio_dev->ring);
+	return ret;
+}
+
+int adis16204_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return iio_ring_buffer_register(ring, 0);
+}
+
+void adis16204_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+	iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/accel/adis16204_trigger.c b/drivers/staging/iio/accel/adis16204_trigger.c
new file mode 100644
index 0000000..8e9db90
--- /dev/null
+++ b/drivers/staging/iio/accel/adis16204_trigger.c
@@ -0,0 +1,122 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "adis16204.h"
+
+/**
+ * adis16204_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int adis16204_data_rdy_trig_poll(struct iio_dev *dev_info,
+				       int index,
+				       s64 timestamp,
+				       int no_test)
+{
+	struct adis16204_state *st = iio_dev_get_devdata(dev_info);
+	struct iio_trigger *trig = st->trig;
+
+	iio_trigger_poll(trig, timestamp);
+
+	return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &adis16204_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *adis16204_trigger_attrs[] = {
+	&dev_attr_name.attr,
+	NULL,
+};
+
+static const struct attribute_group adis16204_trigger_attr_group = {
+	.attrs = adis16204_trigger_attrs,
+};
+
+/**
+ * adis16204_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int adis16204_data_rdy_trigger_set_state(struct iio_trigger *trig,
+						bool state)
+{
+	struct adis16204_state *st = trig->private_data;
+	struct iio_dev *indio_dev = st->indio_dev;
+	int ret = 0;
+
+	dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+	ret = adis16204_set_irq(&st->indio_dev->dev, state);
+	if (state == false) {
+		iio_remove_event_from_list(&iio_event_data_rdy_trig,
+					   &indio_dev->interrupts[0]
+					   ->ev_list);
+		flush_scheduled_work();
+	} else {
+		iio_add_event_to_list(&iio_event_data_rdy_trig,
+				      &indio_dev->interrupts[0]->ev_list);
+	}
+	return ret;
+}
+
+/**
+ * adis16204_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig:	the datardy trigger
+ **/
+static int adis16204_trig_try_reen(struct iio_trigger *trig)
+{
+	struct adis16204_state *st = trig->private_data;
+	enable_irq(st->us->irq);
+	return 0;
+}
+
+int adis16204_probe_trigger(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct adis16204_state *st = indio_dev->dev_data;
+
+	st->trig = iio_allocate_trigger();
+	st->trig->name = kasprintf(GFP_KERNEL,
+				"adis16204-dev%d",
+				indio_dev->id);
+	if (!st->trig->name) {
+		ret = -ENOMEM;
+		goto error_free_trig;
+	}
+	st->trig->dev.parent = &st->us->dev;
+	st->trig->owner = THIS_MODULE;
+	st->trig->private_data = st;
+	st->trig->set_trigger_state = &adis16204_data_rdy_trigger_set_state;
+	st->trig->try_reenable = &adis16204_trig_try_reen;
+	st->trig->control_attrs = &adis16204_trigger_attr_group;
+	ret = iio_trigger_register(st->trig);
+
+	/* select default trigger */
+	indio_dev->trig = st->trig;
+	if (ret)
+		goto error_free_trig_name;
+
+	return 0;
+
+error_free_trig_name:
+	kfree(st->trig->name);
+error_free_trig:
+	iio_free_trigger(st->trig);
+
+	return ret;
+}
+
+void adis16204_remove_trigger(struct iio_dev *indio_dev)
+{
+	struct adis16204_state *state = indio_dev->dev_data;
+
+	iio_trigger_unregister(state->trig);
+	kfree(state->trig->name);
+	iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 033135c..8eba0af 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -105,7 +105,7 @@
 		xfers[i].bits_per_word = 8;
 		xfers[i].cs_change = 1;
 		xfers[i].len = 2;
-		xfers[i].delay_usecs = 20;
+		xfers[i].delay_usecs = 30;
 		xfers[i].tx_buf = st->tx + 2 * i;
 		st->tx[2 * i]
 			= ADIS16209_READ_REG(ADIS16209_SUPPLY_OUT + 2 * i);
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 330d5d6..1fd088a 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -517,7 +517,7 @@
 
 	ret = iio_alloc_pollfunc(indio_dev, NULL, &lis3l02dq_poll_func_th);
 	if (ret)
-		goto error_iio_sw_rb_free;;
+		goto error_iio_sw_rb_free;
 	indio_dev->modes |= INDIO_RING_TRIGGERED;
 	return 0;
 
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index acb6767..86869cd 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -27,6 +27,41 @@
 	  Say yes here to include ring buffer support in the MAX1363
 	  ADC driver.
 
+config AD7150
+	tristate "Analog Devices ad7150/1/6 capacitive sensor driver"
+	depends on I2C
+	help
+	  Say yes here to build support for Analog Devices capacitive sensors.
+	  (ad7150, ad7151, ad7156) Provides direct access via sysfs.
+
+config AD7152
+	tristate "Analog Devices ad7152/3 capacitive sensor driver"
+	depends on I2C
+	help
+	  Say yes here to build support for Analog Devices capacitive sensors.
+	  (ad7152, ad7153) Provides direct access via sysfs.
+
+config AD7291
+	tristate "Analog Devices AD7291 temperature sensor driver"
+	depends on I2C
+	help
+	  Say yes here to build support for Analog Devices AD7291
+	  temperature sensors.
+
+config AD7298
+	tristate "Analog Devices AD7298 temperature sensor and ADC driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices AD7298
+	  temperature sensors and ADC.
+
+config AD7314
+	tristate "Analog Devices AD7314 temperature sensor driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices AD7314
+	  temperature sensors.
+
 config AD799X
 	tristate "Analog Devices AD799x ADC driver"
 	depends on I2C
@@ -50,9 +85,9 @@
 config AD7476
 	tristate "Analog Devices AD7475/6/7/8 AD7466/7/8 and AD7495 ADC driver"
 	depends on SPI
-	select IIO_RING_BUFFER	
+	select IIO_RING_BUFFER
 	select IIO_SW_RING
-	select IIO_TRIGGER 	
+	select IIO_TRIGGER
 	help
 	  Say yes here to build support for Analog Devices
 	  AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468, AD7495
@@ -61,3 +96,55 @@
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad7476.
+
+config AD7887
+	tristate "Analog Devices AD7887 ADC driver"
+	depends on SPI
+	select IIO_RING_BUFFER
+	select IIO_SW_RING
+	select IIO_TRIGGER
+	help
+	  Say yes here to build support for Analog Devices
+	  AD7887 SPI analog to digital convertor (ADC).
+	  If unsure, say N (but it's safe to say "Y").
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad7887.
+
+config AD7745
+	tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
+	depends on I2C
+	help
+	  Say yes here to build support for Analog Devices capacitive sensors.
+	  (AD7745, AD7746, AD7747) Provides direct access via sysfs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad7745.
+
+config AD7816
+	tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices AD7816/7/8
+	  temperature sensors and ADC.
+
+config ADT75
+	tristate "Analog Devices ADT75 temperature sensor driver"
+	depends on I2C
+	help
+	  Say yes here to build support for Analog Devices ADT75
+	  temperature sensors.
+
+config ADT7310
+	tristate "Analog Devices ADT7310 temperature sensor driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices ADT7310
+	  temperature sensors.
+
+config ADT7410
+	tristate "Analog Devices ADT7410 temperature sensor driver"
+	depends on I2C
+	help
+	  Say yes here to build support for Analog Devices ADT7410
+	  temperature sensors.
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index b62c319b..6f231a2 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -14,3 +14,18 @@
 ad7476-y := ad7476_core.o
 ad7476-$(CONFIG_IIO_RING_BUFFER) += ad7476_ring.o
 obj-$(CONFIG_AD7476) += ad7476.o
+
+ad7887-y := ad7887_core.o
+ad7887-$(CONFIG_IIO_RING_BUFFER) += ad7887_ring.o
+obj-$(CONFIG_AD7887) += ad7887.o
+
+obj-$(CONFIG_AD7150) += ad7150.o
+obj-$(CONFIG_AD7152) += ad7152.o
+obj-$(CONFIG_AD7291) += ad7291.o
+obj-$(CONFIG_AD7298) += ad7298.o
+obj-$(CONFIG_AD7314) += ad7314.o
+obj-$(CONFIG_AD7745) += ad7745.o
+obj-$(CONFIG_AD7816) += ad7816.o
+obj-$(CONFIG_ADT75) += adt75.o
+obj-$(CONFIG_ADT7310) += adt7310.o
+obj-$(CONFIG_ADT7410) += adt7410.o
diff --git a/drivers/staging/iio/adc/ad7150.c b/drivers/staging/iio/adc/ad7150.c
new file mode 100644
index 0000000..8555766
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7150.c
@@ -0,0 +1,877 @@
+/*
+ * AD7150 capacitive sensor driver supporting AD7150/1/6
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7150 registers definition
+ */
+
+#define AD7150_STATUS              0
+#define AD7150_STATUS_OUT1         (1 << 3)
+#define AD7150_STATUS_OUT2         (1 << 5)
+#define AD7150_CH1_DATA_HIGH       1
+#define AD7150_CH1_DATA_LOW        2
+#define AD7150_CH2_DATA_HIGH       3
+#define AD7150_CH2_DATA_LOW        4
+#define AD7150_CH1_AVG_HIGH        5
+#define AD7150_CH1_AVG_LOW         6
+#define AD7150_CH2_AVG_HIGH        7
+#define AD7150_CH2_AVG_LOW         8
+#define AD7150_CH1_SENSITIVITY     9
+#define AD7150_CH1_THR_HOLD_H      9
+#define AD7150_CH1_TIMEOUT         10
+#define AD7150_CH1_THR_HOLD_L      10
+#define AD7150_CH1_SETUP           11
+#define AD7150_CH2_SENSITIVITY     12
+#define AD7150_CH2_THR_HOLD_H      12
+#define AD7150_CH2_TIMEOUT         13
+#define AD7150_CH2_THR_HOLD_L      13
+#define AD7150_CH2_SETUP           14
+#define AD7150_CFG                 15
+#define AD7150_CFG_FIX             (1 << 7)
+#define AD7150_PD_TIMER            16
+#define AD7150_CH1_CAPDAC          17
+#define AD7150_CH2_CAPDAC          18
+#define AD7150_SN3                 19
+#define AD7150_SN2                 20
+#define AD7150_SN1                 21
+#define AD7150_SN0                 22
+#define AD7150_ID                  23
+
+#define AD7150_MAX_CONV_MODE       4
+
+/*
+ * struct ad7150_chip_info - chip specifc information
+ */
+
+struct ad7150_chip_info {
+	const char *name;
+	struct i2c_client *client;
+	struct iio_dev *indio_dev;
+	struct work_struct thresh_work;
+	bool inter;
+	s64 last_timestamp;
+	u16 ch1_threshold;     /* Ch1 Threshold (in fixed threshold mode) */
+	u8  ch1_sensitivity;   /* Ch1 Sensitivity (in adaptive threshold mode) */
+	u8  ch1_timeout;       /* Ch1 Timeout (in adaptive threshold mode) */
+	u8  ch1_setup;
+	u16 ch2_threshold;     /* Ch2 Threshold (in fixed threshold mode) */
+	u8  ch2_sensitivity;   /* Ch1 Sensitivity (in adaptive threshold mode) */
+	u8  ch2_timeout;       /* Ch1 Timeout (in adaptive threshold mode) */
+	u8  ch2_setup;
+	u8  powerdown_timer;
+	char threshold_mode[10]; /* adaptive/fixed threshold mode */
+	int old_state;
+	char *conversion_mode;
+};
+
+struct ad7150_conversion_mode {
+	char *name;
+	u8 reg_cfg;
+};
+
+struct ad7150_conversion_mode ad7150_conv_mode_table[AD7150_MAX_CONV_MODE] = {
+	{ "idle", 0 },
+	{ "continuous-conversion", 1 },
+	{ "single-conversion", 2 },
+	{ "power-down", 3 },
+};
+
+/*
+ * ad7150 register access by I2C
+ */
+
+static int ad7150_i2c_read(struct ad7150_chip_info *chip, u8 reg, u8 *data, int len)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret = i2c_master_send(client, &reg, 1);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C write error\n");
+		return ret;
+	}
+
+	ret = i2c_master_recv(client, data, len);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read error\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int ad7150_i2c_write(struct ad7150_chip_info *chip, u8 reg, u8 data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	u8 tx[2] = {
+		reg,
+		data,
+	};
+
+	ret = i2c_master_send(client, tx, 2);
+	if (ret < 0)
+		dev_err(&client->dev, "I2C write error\n");
+
+	return ret;
+}
+
+/*
+ * sysfs nodes
+ */
+
+#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show)				\
+	IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_AVAIL_THRESHOLD_MODES(_show)				\
+	IIO_DEVICE_ATTR(available_threshold_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_THRESHOLD_MODE(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(threshold_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_THRESHOLD(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(ch1_threshold, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_THRESHOLD(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(ch2_threshold, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_SENSITIVITY(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch1_sensitivity, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_SENSITIVITY(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch2_sensitivity, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_TIMEOUT(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch1_timeout, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_TIMEOUT(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch2_timeout, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_VALUE(_show)		\
+	IIO_DEVICE_ATTR(ch1_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH2_VALUE(_show)		\
+	IIO_DEVICE_ATTR(ch2_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH1_SETUP(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch1_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_SETUP(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(ch2_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_POWERDOWN_TIMER(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(powerdown_timer, _mode, _show, _store, 0)
+
+static ssize_t ad7150_show_conversion_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int i;
+	int len = 0;
+
+	for (i = 0; i < AD7150_MAX_CONV_MODE; i++)
+		len += sprintf(buf + len, "%s\n", ad7150_conv_mode_table[i].name);
+
+	return len;
+}
+
+static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad7150_show_conversion_modes);
+
+static ssize_t ad7150_show_conversion_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%s\n", chip->conversion_mode);
+}
+
+static ssize_t ad7150_store_conversion_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	u8 cfg;
+	int i;
+
+	ad7150_i2c_read(chip, AD7150_CFG, &cfg, 1);
+
+	for (i = 0; i < AD7150_MAX_CONV_MODE; i++) {
+		if (strncmp(buf, ad7150_conv_mode_table[i].name,
+				strlen(ad7150_conv_mode_table[i].name) - 1) == 0) {
+			chip->conversion_mode = ad7150_conv_mode_table[i].name;
+			cfg |= 0x18 | ad7150_conv_mode_table[i].reg_cfg;
+			ad7150_i2c_write(chip, AD7150_CFG, cfg);
+			return len;
+		}
+	}
+
+	dev_err(dev, "not supported conversion mode\n");
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
+		ad7150_show_conversion_mode,
+		ad7150_store_conversion_mode);
+
+static ssize_t ad7150_show_threshold_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "adaptive\nfixed\n");
+}
+
+static IIO_DEV_ATTR_AVAIL_THRESHOLD_MODES(ad7150_show_threshold_modes);
+
+static ssize_t ad7150_show_ch1_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	u8 data[2];
+
+	ad7150_i2c_read(chip, AD7150_CH1_DATA_HIGH, data, 2);
+	return sprintf(buf, "%d\n", ((int) data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH1_VALUE(ad7150_show_ch1_value);
+
+static ssize_t ad7150_show_ch2_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	u8 data[2];
+
+	ad7150_i2c_read(chip, AD7150_CH2_DATA_HIGH, data, 2);
+	return sprintf(buf, "%d\n", ((int) data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH2_VALUE(ad7150_show_ch2_value);
+
+static ssize_t ad7150_show_threshold_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%s\n", chip->threshold_mode);
+}
+
+static ssize_t ad7150_store_threshold_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	u8 cfg;
+
+	ad7150_i2c_read(chip, AD7150_CFG, &cfg, 1);
+
+	if (strncmp(buf, "fixed", 5) == 0) {
+		strcpy(chip->threshold_mode, "fixed");
+		cfg |= AD7150_CFG_FIX;
+		ad7150_i2c_write(chip, AD7150_CFG, cfg);
+
+		return len;
+	} else if (strncmp(buf, "adaptive", 8) == 0) {
+		strcpy(chip->threshold_mode, "adaptive");
+		cfg &= ~AD7150_CFG_FIX;
+		ad7150_i2c_write(chip, AD7150_CFG, cfg);
+
+		return len;
+	}
+
+	dev_err(dev, "not supported threshold mode\n");
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_THRESHOLD_MODE(S_IRUGO | S_IWUSR,
+		ad7150_show_threshold_mode,
+		ad7150_store_threshold_mode);
+
+static ssize_t ad7150_show_ch1_threshold(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch1_threshold);
+}
+
+static ssize_t ad7150_store_ch1_threshold(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad7150_i2c_write(chip, AD7150_CH1_THR_HOLD_H, data >> 8);
+		ad7150_i2c_write(chip, AD7150_CH1_THR_HOLD_L, data);
+		chip->ch1_threshold = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_THRESHOLD(S_IRUGO | S_IWUSR,
+		ad7150_show_ch1_threshold,
+		ad7150_store_ch1_threshold);
+
+static ssize_t ad7150_show_ch2_threshold(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch2_threshold);
+}
+
+static ssize_t ad7150_store_ch2_threshold(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad7150_i2c_write(chip, AD7150_CH2_THR_HOLD_H, data >> 8);
+		ad7150_i2c_write(chip, AD7150_CH2_THR_HOLD_L, data);
+		chip->ch2_threshold = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_THRESHOLD(S_IRUGO | S_IWUSR,
+		ad7150_show_ch2_threshold,
+		ad7150_store_ch2_threshold);
+
+static ssize_t ad7150_show_ch1_sensitivity(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch1_sensitivity);
+}
+
+static ssize_t ad7150_store_ch1_sensitivity(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7150_i2c_write(chip, AD7150_CH1_SENSITIVITY, data);
+		chip->ch1_sensitivity = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_SENSITIVITY(S_IRUGO | S_IWUSR,
+		ad7150_show_ch1_sensitivity,
+		ad7150_store_ch1_sensitivity);
+
+static ssize_t ad7150_show_ch2_sensitivity(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch2_sensitivity);
+}
+
+static ssize_t ad7150_store_ch2_sensitivity(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7150_i2c_write(chip, AD7150_CH2_SENSITIVITY, data);
+		chip->ch2_sensitivity = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_SENSITIVITY(S_IRUGO | S_IWUSR,
+		ad7150_show_ch2_sensitivity,
+		ad7150_store_ch2_sensitivity);
+
+static ssize_t ad7150_show_ch1_timeout(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch1_timeout);
+}
+
+static ssize_t ad7150_store_ch1_timeout(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7150_i2c_write(chip, AD7150_CH1_TIMEOUT, data);
+		chip->ch1_timeout = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_TIMEOUT(S_IRUGO | S_IWUSR,
+		ad7150_show_ch1_timeout,
+		ad7150_store_ch1_timeout);
+
+static ssize_t ad7150_show_ch2_timeout(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch2_timeout);
+}
+
+static ssize_t ad7150_store_ch2_timeout(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7150_i2c_write(chip, AD7150_CH2_TIMEOUT, data);
+		chip->ch2_timeout = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_TIMEOUT(S_IRUGO | S_IWUSR,
+		ad7150_show_ch2_timeout,
+		ad7150_store_ch2_timeout);
+
+static ssize_t ad7150_show_ch1_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->ch1_setup);
+}
+
+static ssize_t ad7150_store_ch1_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7150_i2c_write(chip, AD7150_CH1_SETUP, data);
+		chip->ch1_setup = data;
+		return len;
+	}
+
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_SETUP(S_IRUGO | S_IWUSR,
+		ad7150_show_ch1_setup,
+		ad7150_store_ch1_setup);
+
+static ssize_t ad7150_show_ch2_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->ch2_setup);
+}
+
+static ssize_t ad7150_store_ch2_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7150_i2c_write(chip, AD7150_CH2_SETUP, data);
+		chip->ch2_setup = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_SETUP(S_IRUGO | S_IWUSR,
+		ad7150_show_ch2_setup,
+		ad7150_store_ch2_setup);
+
+static ssize_t ad7150_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7150_show_name, NULL, 0);
+
+static ssize_t ad7150_show_powerdown_timer(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->powerdown_timer);
+}
+
+static ssize_t ad7150_store_powerdown_timer(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x40)) {
+		chip->powerdown_timer = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_POWERDOWN_TIMER(S_IRUGO | S_IWUSR,
+		ad7150_show_powerdown_timer,
+		ad7150_store_powerdown_timer);
+
+static struct attribute *ad7150_attributes[] = {
+	&iio_dev_attr_available_threshold_modes.dev_attr.attr,
+	&iio_dev_attr_threshold_mode.dev_attr.attr,
+	&iio_dev_attr_ch1_threshold.dev_attr.attr,
+	&iio_dev_attr_ch2_threshold.dev_attr.attr,
+	&iio_dev_attr_ch1_timeout.dev_attr.attr,
+	&iio_dev_attr_ch2_timeout.dev_attr.attr,
+	&iio_dev_attr_ch1_setup.dev_attr.attr,
+	&iio_dev_attr_ch2_setup.dev_attr.attr,
+	&iio_dev_attr_ch1_sensitivity.dev_attr.attr,
+	&iio_dev_attr_ch2_sensitivity.dev_attr.attr,
+	&iio_dev_attr_powerdown_timer.dev_attr.attr,
+	&iio_dev_attr_ch1_value.dev_attr.attr,
+	&iio_dev_attr_ch2_value.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7150_attribute_group = {
+	.attrs = ad7150_attributes,
+};
+
+/*
+ * threshold events
+ */
+
+#define IIO_EVENT_CODE_CH1_HIGH    IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_CH1_LOW     IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_CH2_HIGH    IIO_BUFFER_EVENT_CODE(2)
+#define IIO_EVENT_CODE_CH2_LOW     IIO_BUFFER_EVENT_CODE(3)
+
+#define IIO_EVENT_ATTR_CH1_HIGH_SH(_evlist, _show, _store, _mask)	\
+	IIO_EVENT_ATTR_SH(ch1_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_CH2_HIGH_SH(_evlist, _show, _store, _mask)	\
+	IIO_EVENT_ATTR_SH(ch2_high, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_CH1_LOW_SH(_evlist, _show, _store, _mask)	\
+	IIO_EVENT_ATTR_SH(ch1_low, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_CH2_LOW_SH(_evlist, _show, _store, _mask)	\
+	IIO_EVENT_ATTR_SH(ch2_low, _evlist, _show, _store, _mask)
+
+static void ad7150_interrupt_handler_bh(struct work_struct *work_s)
+{
+	struct ad7150_chip_info *chip =
+		container_of(work_s, struct ad7150_chip_info, thresh_work);
+	u8 int_status;
+
+	enable_irq(chip->client->irq);
+
+	ad7150_i2c_read(chip, AD7150_STATUS, &int_status, 1);
+
+	if ((int_status & AD7150_STATUS_OUT1) && !(chip->old_state & AD7150_STATUS_OUT1))
+		iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_CH1_HIGH,
+				chip->last_timestamp);
+	else if ((!(int_status & AD7150_STATUS_OUT1)) && (chip->old_state & AD7150_STATUS_OUT1))
+		iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_CH1_LOW,
+				chip->last_timestamp);
+
+	if ((int_status & AD7150_STATUS_OUT2) && !(chip->old_state & AD7150_STATUS_OUT2))
+		iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_CH2_HIGH,
+				chip->last_timestamp);
+	else if ((!(int_status & AD7150_STATUS_OUT2)) && (chip->old_state & AD7150_STATUS_OUT2))
+		iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_CH2_LOW,
+				chip->last_timestamp);
+}
+
+static int ad7150_interrupt_handler_th(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct ad7150_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(threshold, &ad7150_interrupt_handler_th);
+
+static ssize_t ad7150_query_out_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	/*
+	 * AD7150 provides two logic output channels, which can be used as interrupt
+	 * but the pins are not configurable
+	 */
+	return sprintf(buf, "1\n");
+}
+
+static ssize_t ad7150_set_out_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return len;
+}
+
+IIO_EVENT_ATTR_CH1_HIGH_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+IIO_EVENT_ATTR_CH2_HIGH_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+IIO_EVENT_ATTR_CH1_LOW_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+IIO_EVENT_ATTR_CH2_LOW_SH(iio_event_threshold, ad7150_query_out_mode, ad7150_set_out_mode, 0);
+
+static struct attribute *ad7150_event_attributes[] = {
+	&iio_event_attr_ch1_high.dev_attr.attr,
+	&iio_event_attr_ch2_high.dev_attr.attr,
+	&iio_event_attr_ch1_low.dev_attr.attr,
+	&iio_event_attr_ch2_low.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group ad7150_event_attribute_group = {
+	.attrs = ad7150_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7150_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int ret = 0, regdone = 0;
+	struct ad7150_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (chip == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	/* this is only used for device removal purposes */
+	i2c_set_clientdata(client, chip);
+
+	chip->client = client;
+	chip->name = id->name;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	/* Echipabilish that the iio_dev is a child of the i2c device */
+	chip->indio_dev->dev.parent = &client->dev;
+	chip->indio_dev->attrs = &ad7150_attribute_group;
+	chip->indio_dev->event_attrs = &ad7150_event_attribute_group;
+	chip->indio_dev->dev_data = (void *)(chip);
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = 1;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+	regdone = 1;
+
+	if (client->irq && gpio_is_valid(irq_to_gpio(client->irq)) > 0) {
+		ret = iio_register_interrupt_line(client->irq,
+				chip->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"ad7150");
+		if (ret)
+			goto error_free_dev;
+
+		iio_add_event_to_list(iio_event_attr_ch2_low.listel,
+				&chip->indio_dev->interrupts[0]->ev_list);
+
+		INIT_WORK(&chip->thresh_work, ad7150_interrupt_handler_bh);
+	}
+
+	dev_err(&client->dev, "%s capacitive sensor registered, irq: %d\n", id->name, client->irq);
+
+	return 0;
+
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(chip->indio_dev);
+	else
+		iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad7150_remove(struct i2c_client *client)
+{
+	struct ad7150_chip_info *chip = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	if (client->irq && gpio_is_valid(irq_to_gpio(client->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct i2c_device_id ad7150_id[] = {
+	{ "ad7150", 0 },
+	{ "ad7151", 0 },
+	{ "ad7156", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7150_id);
+
+static struct i2c_driver ad7150_driver = {
+	.driver = {
+		.name = "ad7150",
+	},
+	.probe = ad7150_probe,
+	.remove = __devexit_p(ad7150_remove),
+	.id_table = ad7150_id,
+};
+
+static __init int ad7150_init(void)
+{
+	return i2c_add_driver(&ad7150_driver);
+}
+
+static __exit void ad7150_exit(void)
+{
+	i2c_del_driver(&ad7150_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ad7150/1/6 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7150_init);
+module_exit(ad7150_exit);
diff --git a/drivers/staging/iio/adc/ad7152.c b/drivers/staging/iio/adc/ad7152.c
new file mode 100644
index 0000000..fa7f8406
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7152.c
@@ -0,0 +1,610 @@
+/*
+ * AD7152 capacitive sensor driver supporting AD7152/3
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7152 registers definition
+ */
+
+#define AD7152_STATUS              0
+#define AD7152_STATUS_RDY1         (1 << 0)
+#define AD7152_STATUS_RDY2         (1 << 1)
+#define AD7152_CH1_DATA_HIGH       1
+#define AD7152_CH1_DATA_LOW        2
+#define AD7152_CH2_DATA_HIGH       3
+#define AD7152_CH2_DATA_LOW        4
+#define AD7152_CH1_OFFS_HIGH       5
+#define AD7152_CH1_OFFS_LOW        6
+#define AD7152_CH2_OFFS_HIGH       7
+#define AD7152_CH2_OFFS_LOW        8
+#define AD7152_CH1_GAIN_HIGH       9
+#define AD7152_CH1_GAIN_LOW        10
+#define AD7152_CH1_SETUP           11
+#define AD7152_CH2_GAIN_HIGH       12
+#define AD7152_CH2_GAIN_LOW        13
+#define AD7152_CH2_SETUP           14
+#define AD7152_CFG                 15
+#define AD7152_RESEVERD            16
+#define AD7152_CAPDAC_POS          17
+#define AD7152_CAPDAC_NEG          18
+#define AD7152_CFG2                26
+
+#define AD7152_MAX_CONV_MODE       6
+
+/*
+ * struct ad7152_chip_info - chip specifc information
+ */
+
+struct ad7152_chip_info {
+	const char *name;
+	struct i2c_client *client;
+	struct iio_dev *indio_dev;
+	u16 ch1_offset;     /* Channel 1 offset calibration coefficient */
+	u16 ch1_gain;       /* Channel 1 gain coefficient */
+	u8  ch1_setup;
+	u16 ch2_offset;     /* Channel 2 offset calibration coefficient */
+	u16 ch2_gain;       /* Channel 1 gain coefficient */
+	u8  ch2_setup;
+	u8  filter_rate_setup; /* Capacitive channel digital filter setup; conversion time/update rate setup per channel */
+	char *conversion_mode;
+};
+
+struct ad7152_conversion_mode {
+	char *name;
+	u8 reg_cfg;
+};
+
+struct ad7152_conversion_mode ad7152_conv_mode_table[AD7152_MAX_CONV_MODE] = {
+	{ "idle", 0 },
+	{ "continuous-conversion", 1 },
+	{ "single-conversion", 2 },
+	{ "power-down", 3 },
+	{ "offset-calibration", 5 },
+	{ "gain-calibration", 6 },
+};
+
+/*
+ * ad7152 register access by I2C
+ */
+
+static int ad7152_i2c_read(struct ad7152_chip_info *chip, u8 reg, u8 *data, int len)
+{
+	struct i2c_client *client = chip->client;
+	int ret;
+
+	ret = i2c_master_send(client, &reg, 1);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C write error\n");
+		return ret;
+	}
+
+	ret = i2c_master_recv(client, data, len);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read error\n");
+	}
+
+	return ret;
+}
+
+static int ad7152_i2c_write(struct ad7152_chip_info *chip, u8 reg, u8 data)
+{
+	struct i2c_client *client = chip->client;
+	int ret;
+
+	u8 tx[2] = {
+		reg,
+		data,
+	};
+
+	ret = i2c_master_send(client, tx, 2);
+	if (ret < 0)
+		dev_err(&client->dev, "I2C write error\n");
+
+	return ret;
+}
+
+/*
+ * sysfs nodes
+ */
+
+#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show)				\
+	IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_OFFSET(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch1_offset, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_OFFSET(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch2_offset, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_GAIN(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch1_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_GAIN(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch2_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH1_VALUE(_show)		\
+	IIO_DEVICE_ATTR(ch1_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH2_VALUE(_show)		\
+	IIO_DEVICE_ATTR(ch2_value, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CH1_SETUP(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(ch1_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CH2_SETUP(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(ch2_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_FILTER_RATE_SETUP(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(filter_rate_setup, _mode, _show, _store, 0)
+
+static ssize_t ad7152_show_conversion_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int i;
+	int len = 0;
+
+	for (i = 0; i < AD7152_MAX_CONV_MODE; i++)
+		len += sprintf(buf + len, "%s ", ad7152_conv_mode_table[i].name);
+
+	len += sprintf(buf + len, "\n");
+
+	return len;
+}
+
+static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad7152_show_conversion_modes);
+
+static ssize_t ad7152_show_ch1_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	u8 data[2];
+
+	ad7152_i2c_read(chip, AD7152_CH1_DATA_HIGH, data, 2);
+	return sprintf(buf, "%d\n", ((int)data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH1_VALUE(ad7152_show_ch1_value);
+
+static ssize_t ad7152_show_ch2_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	u8 data[2];
+
+	ad7152_i2c_read(chip, AD7152_CH2_DATA_HIGH, data, 2);
+	return sprintf(buf, "%d\n", ((int)data[0] << 8) | data[1]);
+}
+
+static IIO_DEV_ATTR_CH2_VALUE(ad7152_show_ch2_value);
+
+static ssize_t ad7152_show_conversion_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%s\n", chip->conversion_mode);
+}
+
+static ssize_t ad7152_store_conversion_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	u8 cfg;
+	int i;
+
+	ad7152_i2c_read(chip, AD7152_CFG, &cfg, 1);
+
+	for (i = 0; i < AD7152_MAX_CONV_MODE; i++)
+		if (strncmp(buf, ad7152_conv_mode_table[i].name,
+				strlen(ad7152_conv_mode_table[i].name) - 1) == 0) {
+			chip->conversion_mode = ad7152_conv_mode_table[i].name;
+			cfg |= 0x18 | ad7152_conv_mode_table[i].reg_cfg;
+			ad7152_i2c_write(chip, AD7152_CFG, cfg);
+			return len;
+		}
+
+	dev_err(dev, "not supported conversion mode\n");
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
+		ad7152_show_conversion_mode,
+		ad7152_store_conversion_mode);
+
+static ssize_t ad7152_show_ch1_offset(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch1_offset);
+}
+
+static ssize_t ad7152_store_ch1_offset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad7152_i2c_write(chip, AD7152_CH1_OFFS_HIGH, data >> 8);
+		ad7152_i2c_write(chip, AD7152_CH1_OFFS_LOW, data);
+		chip->ch1_offset = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_OFFSET(S_IRUGO | S_IWUSR,
+		ad7152_show_ch1_offset,
+		ad7152_store_ch1_offset);
+
+static ssize_t ad7152_show_ch2_offset(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch2_offset);
+}
+
+static ssize_t ad7152_store_ch2_offset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad7152_i2c_write(chip, AD7152_CH2_OFFS_HIGH, data >> 8);
+		ad7152_i2c_write(chip, AD7152_CH2_OFFS_LOW, data);
+		chip->ch2_offset = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_OFFSET(S_IRUGO | S_IWUSR,
+		ad7152_show_ch2_offset,
+		ad7152_store_ch2_offset);
+
+static ssize_t ad7152_show_ch1_gain(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch1_gain);
+}
+
+static ssize_t ad7152_store_ch1_gain(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad7152_i2c_write(chip, AD7152_CH1_GAIN_HIGH, data >> 8);
+		ad7152_i2c_write(chip, AD7152_CH1_GAIN_LOW, data);
+		chip->ch1_gain = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_GAIN(S_IRUGO | S_IWUSR,
+		ad7152_show_ch1_gain,
+		ad7152_store_ch1_gain);
+
+static ssize_t ad7152_show_ch2_gain(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->ch2_gain);
+}
+
+static ssize_t ad7152_store_ch2_gain(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad7152_i2c_write(chip, AD7152_CH2_GAIN_HIGH, data >> 8);
+		ad7152_i2c_write(chip, AD7152_CH2_GAIN_LOW, data);
+		chip->ch2_gain = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_GAIN(S_IRUGO | S_IWUSR,
+		ad7152_show_ch2_gain,
+		ad7152_store_ch2_gain);
+
+static ssize_t ad7152_show_ch1_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->ch1_setup);
+}
+
+static ssize_t ad7152_store_ch1_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7152_i2c_write(chip, AD7152_CH1_SETUP, data);
+		chip->ch1_setup = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH1_SETUP(S_IRUGO | S_IWUSR,
+		ad7152_show_ch1_setup,
+		ad7152_store_ch1_setup);
+
+static ssize_t ad7152_show_ch2_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->ch2_setup);
+}
+
+static ssize_t ad7152_store_ch2_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7152_i2c_write(chip, AD7152_CH2_SETUP, data);
+		chip->ch2_setup = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CH2_SETUP(S_IRUGO | S_IWUSR,
+		ad7152_show_ch2_setup,
+		ad7152_store_ch2_setup);
+
+static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->filter_rate_setup);
+}
+
+static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad7152_i2c_write(chip, AD7152_CFG2, data);
+		chip->filter_rate_setup = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_FILTER_RATE_SETUP(S_IRUGO | S_IWUSR,
+		ad7152_show_filter_rate_setup,
+		ad7152_store_filter_rate_setup);
+
+static ssize_t ad7152_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7152_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7152_show_name, NULL, 0);
+
+static struct attribute *ad7152_attributes[] = {
+	&iio_dev_attr_available_conversion_modes.dev_attr.attr,
+	&iio_dev_attr_conversion_mode.dev_attr.attr,
+	&iio_dev_attr_ch1_gain.dev_attr.attr,
+	&iio_dev_attr_ch2_gain.dev_attr.attr,
+	&iio_dev_attr_ch1_offset.dev_attr.attr,
+	&iio_dev_attr_ch2_offset.dev_attr.attr,
+	&iio_dev_attr_ch1_value.dev_attr.attr,
+	&iio_dev_attr_ch2_value.dev_attr.attr,
+	&iio_dev_attr_ch1_setup.dev_attr.attr,
+	&iio_dev_attr_ch2_setup.dev_attr.attr,
+	&iio_dev_attr_filter_rate_setup.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7152_attribute_group = {
+	.attrs = ad7152_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7152_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int ret = 0;
+	struct ad7152_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (chip == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	/* this is only used for device removal purposes */
+	i2c_set_clientdata(client, chip);
+
+	chip->client = client;
+	chip->name = id->name;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	/* Echipabilish that the iio_dev is a child of the i2c device */
+	chip->indio_dev->dev.parent = &client->dev;
+	chip->indio_dev->attrs = &ad7152_attribute_group;
+	chip->indio_dev->dev_data = (void *)(chip);
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
+
+	return 0;
+
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad7152_remove(struct i2c_client *client)
+{
+	struct ad7152_chip_info *chip = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	if (client->irq && gpio_is_valid(irq_to_gpio(client->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct i2c_device_id ad7152_id[] = {
+	{ "ad7152", 0 },
+	{ "ad7153", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7152_id);
+
+static struct i2c_driver ad7152_driver = {
+	.driver = {
+		.name = "ad7152",
+	},
+	.probe = ad7152_probe,
+	.remove = __devexit_p(ad7152_remove),
+	.id_table = ad7152_id,
+};
+
+static __init int ad7152_init(void)
+{
+	return i2c_add_driver(&ad7152_driver);
+}
+
+static __exit void ad7152_exit(void)
+{
+	i2c_del_driver(&ad7152_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ad7152/3 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7152_init);
+module_exit(ad7152_exit);
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
new file mode 100644
index 0000000..34041a7
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -0,0 +1,1039 @@
+/*
+ * AD7291 digital temperature sensor driver supporting AD7291
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7291 registers definition
+ */
+#define AD7291_COMMAND			0
+#define AD7291_VOLTAGE			1
+#define AD7291_T_SENSE			2
+#define AD7291_T_AVERAGE		3
+#define AD7291_VOLTAGE_LIMIT_BASE	4
+#define AD7291_VOLTAGE_LIMIT_COUNT	8
+#define AD7291_T_SENSE_HIGH		0x1c
+#define AD7291_T_SENSE_LOW		0x1d
+#define AD7291_T_SENSE_HYST		0x1e
+#define AD7291_VOLTAGE_ALERT_STATUS	0x1f
+#define AD7291_T_ALERT_STATUS		0x20
+
+/*
+ * AD7291 command
+ */
+#define AD7291_AUTOCYCLE		0x1
+#define AD7291_RESET			0x2
+#define AD7291_ALART_CLEAR		0x4
+#define AD7291_ALART_POLARITY		0x8
+#define AD7291_EXT_REF			0x10
+#define AD7291_NOISE_DELAY		0x20
+#define AD7291_T_SENSE_MASK		0x40
+#define AD7291_VOLTAGE_MASK		0xff00
+#define AD7291_VOLTAGE_OFFSET		0x8
+
+/*
+ * AD7291 value masks
+ */
+#define AD7291_CHANNEL_MASK		0xf000
+#define AD7291_VALUE_MASK		0xfff
+#define AD7291_T_VALUE_SIGN		0x400
+#define AD7291_T_VALUE_FLOAT_OFFSET	2
+#define AD7291_T_VALUE_FLOAT_MASK	0x2
+
+/*
+ * struct ad7291_chip_info - chip specifc information
+ */
+
+struct ad7291_chip_info {
+	const char *name;
+	struct i2c_client *client;
+	struct iio_dev *indio_dev;
+	struct work_struct thresh_work;
+	s64 last_timestamp;
+	u16 command;
+	u8  channels;	/* Active voltage channels */
+};
+
+/*
+ * struct ad7291_chip_info - chip specifc information
+ */
+
+struct ad7291_limit_regs {
+	u16	data_high;
+	u16	data_low;
+	u16	hysteresis;
+};
+
+/*
+ * ad7291 register access by I2C
+ */
+static int ad7291_i2c_read(struct ad7291_chip_info *chip, u8 reg, u16 *data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret = i2c_smbus_read_word_data(client, reg);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read error\n");
+		return ret;
+	}
+
+	*data = swab16((u16)ret);
+
+	return 0;
+}
+
+static int ad7291_i2c_write(struct ad7291_chip_info *chip, u8 reg, u16 data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret = i2c_smbus_write_word_data(client, reg, swab16(data));
+	if (ret < 0)
+		dev_err(&client->dev, "I2C write error\n");
+
+	return ret;
+}
+
+/* Returns negative errno, or else the number of words read. */
+static int ad7291_i2c_read_data(struct ad7291_chip_info *chip, u8 reg, u16 *data)
+{
+	struct i2c_client *client = chip->client;
+	u8 commands[4];
+	int ret = 0;
+	int i, count;
+
+	if (reg == AD7291_T_SENSE || reg == AD7291_T_AVERAGE)
+		count = 2;
+	else if (reg == AD7291_VOLTAGE) {
+		if (!chip->channels) {
+			dev_err(&client->dev, "No voltage channel is selected.\n");
+			return -EINVAL;
+		}
+		count = 2 + chip->channels * 2;
+	} else {
+		dev_err(&client->dev, "I2C wrong data register\n");
+		return -EINVAL;
+	}
+
+	commands[0] = 0;
+	commands[1] = (chip->command >> 8) & 0xff;
+	commands[2] = chip->command & 0xff;
+	commands[3] = reg;
+
+	ret = i2c_master_send(client, commands, 4);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C master send error\n");
+		return ret;
+	}
+
+	ret = i2c_master_recv(client, (u8 *)data, count);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C master receive error\n");
+		return ret;
+	}
+	ret >>= 2;
+
+	for (i = 0; i < ret; i++)
+		data[i] = swab16(data[i]);
+
+	return ret;
+}
+
+static ssize_t ad7291_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+
+	if (chip->command & AD7291_AUTOCYCLE)
+		return sprintf(buf, "autocycle\n");
+	else
+		return sprintf(buf, "command\n");
+}
+
+static ssize_t ad7291_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 command;
+	int ret;
+
+	command = chip->command & (~AD7291_AUTOCYCLE);
+	if (strcmp(buf, "autocycle"))
+		command |= AD7291_AUTOCYCLE;
+
+	ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+	if (ret)
+		return -EIO;
+
+	chip->command = command;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		ad7291_show_mode,
+		ad7291_store_mode,
+		0);
+
+static ssize_t ad7291_show_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "command\nautocycle\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7291_show_available_modes, NULL, 0);
+
+static ssize_t ad7291_store_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 command;
+	int ret;
+
+	command = chip->command | AD7291_RESET;
+
+	ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+	if (ret)
+		return -EIO;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+		NULL,
+		ad7291_store_reset,
+		0);
+
+static ssize_t ad7291_show_ext_ref(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->command & AD7291_EXT_REF));
+}
+
+static ssize_t ad7291_store_ext_ref(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 command;
+	int ret;
+
+	command = chip->command & (~AD7291_EXT_REF);
+	if (strcmp(buf, "1"))
+		command |= AD7291_EXT_REF;
+
+	ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+	if (ret)
+		return -EIO;
+
+	chip->command = command;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(ext_ref, S_IRUGO | S_IWUSR,
+		ad7291_show_ext_ref,
+		ad7291_store_ext_ref,
+		0);
+
+static ssize_t ad7291_show_noise_delay(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->command & AD7291_NOISE_DELAY));
+}
+
+static ssize_t ad7291_store_noise_delay(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 command;
+	int ret;
+
+	command = chip->command & (~AD7291_NOISE_DELAY);
+	if (strcmp(buf, "1"))
+		command |= AD7291_NOISE_DELAY;
+
+	ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+	if (ret)
+		return -EIO;
+
+	chip->command = command;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(noise_delay, S_IRUGO | S_IWUSR,
+		ad7291_show_noise_delay,
+		ad7291_store_noise_delay,
+		0);
+
+static ssize_t ad7291_show_t_sense(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	char sign = ' ';
+	int ret;
+
+	ret = ad7291_i2c_read_data(chip, AD7291_T_SENSE, &data);
+	if (ret)
+		return -EIO;
+
+	if (data & AD7291_T_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data = (AD7291_T_VALUE_SIGN << 1) - data;
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.2d\n", sign,
+		(data >> AD7291_T_VALUE_FLOAT_OFFSET),
+		(data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_sense, S_IRUGO, ad7291_show_t_sense, NULL, 0);
+
+static ssize_t ad7291_show_t_average(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	char sign = ' ';
+	int ret;
+
+	ret = ad7291_i2c_read_data(chip, AD7291_T_AVERAGE, &data);
+	if (ret)
+		return -EIO;
+
+	if (data & AD7291_T_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data = (AD7291_T_VALUE_SIGN << 1) - data;
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.2d\n", sign,
+		(data >> AD7291_T_VALUE_FLOAT_OFFSET),
+		(data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_average, S_IRUGO, ad7291_show_t_average, NULL, 0);
+
+static ssize_t ad7291_show_voltage(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 data[AD7291_VOLTAGE_LIMIT_COUNT];
+	int i, size, ret;
+
+	ret = ad7291_i2c_read_data(chip, AD7291_VOLTAGE, data);
+	if (ret)
+		return -EIO;
+
+	for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
+		if (chip->command & (AD7291_T_SENSE_MASK << i)) {
+			ret = sprintf(buf, "channel[%d]=%d\n", i,
+					data[i] & AD7291_VALUE_MASK);
+			if (ret < 0)
+				break;
+			buf += ret;
+			size += ret;
+		}
+	}
+
+	return size;
+}
+
+static IIO_DEVICE_ATTR(voltage, S_IRUGO, ad7291_show_voltage, NULL, 0);
+
+static ssize_t ad7291_show_channel_mask(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%x\n", (chip->command & AD7291_VOLTAGE_MASK) >>
+			AD7291_VOLTAGE_OFFSET);
+}
+
+static ssize_t ad7291_store_channel_mask(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 command;
+	unsigned long data;
+	int i, ret;
+
+	ret = strict_strtoul(buf, 16, &data);
+	if (ret || data > 0xff)
+		return -EINVAL;
+
+	command = chip->command & (~AD7291_VOLTAGE_MASK);
+	command |= data << AD7291_VOLTAGE_OFFSET;
+
+	ret = ad7291_i2c_write(chip, AD7291_COMMAND, command);
+	if (ret)
+		return -EIO;
+
+	chip->command = command;
+
+	for (i = 0, chip->channels = 0; i < AD7291_VOLTAGE_LIMIT_COUNT; i++) {
+		if (chip->command & (AD7291_T_SENSE_MASK << i))
+			chip->channels++;
+	}
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(channel_mask, S_IRUGO | S_IWUSR,
+		ad7291_show_channel_mask,
+		ad7291_store_channel_mask,
+		0);
+
+static ssize_t ad7291_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7291_show_name, NULL, 0);
+
+static struct attribute *ad7291_attributes[] = {
+	&iio_dev_attr_available_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_dev_attr_ext_ref.dev_attr.attr,
+	&iio_dev_attr_noise_delay.dev_attr.attr,
+	&iio_dev_attr_t_sense.dev_attr.attr,
+	&iio_dev_attr_t_average.dev_attr.attr,
+	&iio_dev_attr_voltage.dev_attr.attr,
+	&iio_dev_attr_channel_mask.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7291_attribute_group = {
+	.attrs = ad7291_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_AD7291_T_SENSE_HIGH  IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_AD7291_T_SENSE_LOW   IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_AD7291_T_AVG_HIGH    IIO_BUFFER_EVENT_CODE(2)
+#define IIO_EVENT_CODE_AD7291_T_AVG_LOW     IIO_BUFFER_EVENT_CODE(3)
+#define IIO_EVENT_CODE_AD7291_VOLTAGE_BASE  IIO_BUFFER_EVENT_CODE(4)
+
+static void ad7291_interrupt_bh(struct work_struct *work_s)
+{
+	struct ad7291_chip_info *chip =
+		container_of(work_s, struct ad7291_chip_info, thresh_work);
+	u16 t_status, v_status;
+	u16 command;
+	int i;
+
+	if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status))
+		return;
+
+	if (ad7291_i2c_read(chip, AD7291_VOLTAGE_ALERT_STATUS, &v_status))
+		return;
+
+	if (!(t_status || v_status))
+		return;
+
+	command = chip->command | AD7291_ALART_CLEAR;
+	ad7291_i2c_write(chip, AD7291_COMMAND, command);
+
+	command = chip->command & ~AD7291_ALART_CLEAR;
+	ad7291_i2c_write(chip, AD7291_COMMAND, command);
+
+	enable_irq(chip->client->irq);
+
+	for (i = 0; i < 4; i++) {
+		if (t_status & (1 << i))
+			iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_AD7291_T_SENSE_HIGH + i,
+				chip->last_timestamp);
+	}
+
+	for (i = 0; i < AD7291_VOLTAGE_LIMIT_COUNT*2; i++) {
+		if (v_status & (1 << i))
+			iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_AD7291_VOLTAGE_BASE + i,
+				chip->last_timestamp);
+	}
+}
+
+static int ad7291_interrupt(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(ad7291, &ad7291_interrupt);
+
+static inline ssize_t ad7291_show_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	char sign = ' ';
+	int ret;
+
+	ret = ad7291_i2c_read(chip, bound_reg, &data);
+	if (ret)
+		return -EIO;
+
+	data &= AD7291_VALUE_MASK;
+	if (data & AD7291_T_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data = (AD7291_T_VALUE_SIGN << 1) - data;
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.2d\n", sign,
+			data >> AD7291_T_VALUE_FLOAT_OFFSET,
+			(data & AD7291_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static inline ssize_t ad7291_set_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	long tmp1, tmp2;
+	u16 data;
+	char *pos;
+	int ret;
+
+	pos = strchr(buf, '.');
+
+	ret = strict_strtol(buf, 10, &tmp1);
+
+	if (ret || tmp1 > 127 || tmp1 < -128)
+		return -EINVAL;
+
+	if (pos) {
+		len = strlen(pos);
+		if (len > AD7291_T_VALUE_FLOAT_OFFSET)
+			len = AD7291_T_VALUE_FLOAT_OFFSET;
+		pos[len] = 0;
+		ret = strict_strtol(pos, 10, &tmp2);
+
+		if (!ret)
+			tmp2 = (tmp2 / 25) * 25;
+	}
+
+	if (tmp1 < 0)
+		data = (u16)(-tmp1);
+	else
+		data = (u16)tmp1;
+	data = (data << AD7291_T_VALUE_FLOAT_OFFSET) |
+		(tmp2 & AD7291_T_VALUE_FLOAT_MASK);
+	if (tmp1 < 0)
+		/* convert positive value to supplyment */
+		data = (AD7291_T_VALUE_SIGN << 1) - data;
+
+	ret = ad7291_i2c_write(chip, bound_reg, data);
+	if (ret)
+		return -EIO;
+
+	return ret;
+}
+
+static ssize_t ad7291_show_t_sense_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return ad7291_show_t_bound(dev, attr,
+			AD7291_T_SENSE_HIGH, buf);
+}
+
+static inline ssize_t ad7291_set_t_sense_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return ad7291_set_t_bound(dev, attr,
+			AD7291_T_SENSE_HIGH, buf, len);
+}
+
+static ssize_t ad7291_show_t_sense_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return ad7291_show_t_bound(dev, attr,
+			AD7291_T_SENSE_LOW, buf);
+}
+
+static inline ssize_t ad7291_set_t_sense_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return ad7291_set_t_bound(dev, attr,
+			AD7291_T_SENSE_LOW, buf, len);
+}
+
+static ssize_t ad7291_show_t_sense_hyst(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return ad7291_show_t_bound(dev, attr,
+			AD7291_T_SENSE_HYST, buf);
+}
+
+static inline ssize_t ad7291_set_t_sense_hyst(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return ad7291_set_t_bound(dev, attr,
+			AD7291_T_SENSE_HYST, buf, len);
+}
+
+static inline ssize_t ad7291_show_v_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	int ret;
+
+	if (bound_reg < AD7291_VOLTAGE_LIMIT_BASE ||
+		bound_reg >= AD7291_VOLTAGE_LIMIT_BASE +
+		AD7291_VOLTAGE_LIMIT_COUNT)
+		return -EINVAL;
+
+	ret = ad7291_i2c_read(chip, bound_reg, &data);
+	if (ret)
+		return -EIO;
+
+	data &= AD7291_VALUE_MASK;
+
+	return sprintf(buf, "%d\n", data);
+}
+
+static inline ssize_t ad7291_set_v_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7291_chip_info *chip = dev_info->dev_data;
+	unsigned long value;
+	u16 data;
+	int ret;
+
+	if (bound_reg < AD7291_VOLTAGE_LIMIT_BASE ||
+		bound_reg >= AD7291_VOLTAGE_LIMIT_BASE +
+		AD7291_VOLTAGE_LIMIT_COUNT)
+		return -EINVAL;
+
+	ret = strict_strtoul(buf, 10, &value);
+
+	if (ret || value >= 4096)
+		return -EINVAL;
+
+	data = (u16)value;
+	ret = ad7291_i2c_write(chip, bound_reg, data);
+	if (ret)
+		return -EIO;
+
+	return ret;
+}
+
+static int ad7291_get_voltage_limit_regs(const char *channel)
+{
+	int index;
+
+	if (strlen(channel) < 3 && channel[0] != 'v')
+		return -EINVAL;
+
+	index = channel[1] - '0';
+	if (index >= AD7291_VOLTAGE_LIMIT_COUNT)
+		return -EINVAL;
+
+	return index;
+}
+
+static ssize_t ad7291_show_voltage_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int regs;
+
+	regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+	if (regs < 0)
+		return regs;
+
+	return ad7291_show_t_bound(dev, attr, regs, buf);
+}
+
+static inline ssize_t ad7291_set_voltage_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	int regs;
+
+	regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+	if (regs < 0)
+		return regs;
+
+	return ad7291_set_t_bound(dev, attr, regs, buf, len);
+}
+
+static ssize_t ad7291_show_voltage_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int regs;
+
+	regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+	if (regs < 0)
+		return regs;
+
+	return ad7291_show_t_bound(dev, attr, regs+1, buf);
+}
+
+static inline ssize_t ad7291_set_voltage_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	int regs;
+
+	regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+	if (regs < 0)
+		return regs;
+
+	return ad7291_set_t_bound(dev, attr, regs+1, buf, len);
+}
+
+static ssize_t ad7291_show_voltage_hyst(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int regs;
+
+	regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+	if (regs < 0)
+		return regs;
+
+	return ad7291_show_t_bound(dev, attr, regs+2, buf);
+}
+
+static inline ssize_t ad7291_set_voltage_hyst(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	int regs;
+
+	regs = ad7291_get_voltage_limit_regs(attr->attr.name);
+
+	if (regs < 0)
+		return regs;
+
+	return ad7291_set_t_bound(dev, attr, regs+2, buf, len);
+}
+
+IIO_EVENT_ATTR_SH(t_sense_high, iio_event_ad7291,
+		ad7291_show_t_sense_high, ad7291_set_t_sense_high, 0);
+IIO_EVENT_ATTR_SH(t_sense_low, iio_event_ad7291,
+		ad7291_show_t_sense_low, ad7291_set_t_sense_low, 0);
+IIO_EVENT_ATTR_SH(t_sense_hyst, iio_event_ad7291,
+		ad7291_show_t_sense_hyst, ad7291_set_t_sense_hyst, 0);
+
+IIO_EVENT_ATTR_SH(v0_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v0_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v0_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v1_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v1_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v1_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v2_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v2_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v2_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v3_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v3_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v3_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v4_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v4_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v4_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v5_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v5_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v5_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v6_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v6_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v6_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+IIO_EVENT_ATTR_SH(v7_high, iio_event_ad7291,
+		ad7291_show_voltage_high, ad7291_set_voltage_high, 0);
+IIO_EVENT_ATTR_SH(v7_low, iio_event_ad7291,
+		ad7291_show_voltage_low, ad7291_set_voltage_low, 0);
+IIO_EVENT_ATTR_SH(v7_hyst, iio_event_ad7291,
+		ad7291_show_voltage_hyst, ad7291_set_voltage_hyst, 0);
+
+static struct attribute *ad7291_event_attributes[] = {
+	&iio_event_attr_t_sense_high.dev_attr.attr,
+	&iio_event_attr_t_sense_low.dev_attr.attr,
+	&iio_event_attr_t_sense_hyst.dev_attr.attr,
+	&iio_event_attr_v0_high.dev_attr.attr,
+	&iio_event_attr_v0_low.dev_attr.attr,
+	&iio_event_attr_v0_hyst.dev_attr.attr,
+	&iio_event_attr_v1_high.dev_attr.attr,
+	&iio_event_attr_v1_low.dev_attr.attr,
+	&iio_event_attr_v1_hyst.dev_attr.attr,
+	&iio_event_attr_v2_high.dev_attr.attr,
+	&iio_event_attr_v2_low.dev_attr.attr,
+	&iio_event_attr_v2_hyst.dev_attr.attr,
+	&iio_event_attr_v3_high.dev_attr.attr,
+	&iio_event_attr_v3_low.dev_attr.attr,
+	&iio_event_attr_v3_hyst.dev_attr.attr,
+	&iio_event_attr_v4_high.dev_attr.attr,
+	&iio_event_attr_v4_low.dev_attr.attr,
+	&iio_event_attr_v4_hyst.dev_attr.attr,
+	&iio_event_attr_v5_high.dev_attr.attr,
+	&iio_event_attr_v5_low.dev_attr.attr,
+	&iio_event_attr_v5_hyst.dev_attr.attr,
+	&iio_event_attr_v6_high.dev_attr.attr,
+	&iio_event_attr_v6_low.dev_attr.attr,
+	&iio_event_attr_v6_hyst.dev_attr.attr,
+	&iio_event_attr_v7_high.dev_attr.attr,
+	&iio_event_attr_v7_low.dev_attr.attr,
+	&iio_event_attr_v7_hyst.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group ad7291_event_attribute_group = {
+	.attrs = ad7291_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7291_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct ad7291_chip_info *chip;
+	int ret = 0;
+
+	chip = kzalloc(sizeof(struct ad7291_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	i2c_set_clientdata(client, chip);
+
+	chip->client = client;
+	chip->name = id->name;
+	chip->command = AD7291_NOISE_DELAY | AD7291_T_SENSE_MASK;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	chip->indio_dev->dev.parent = &client->dev;
+	chip->indio_dev->attrs = &ad7291_attribute_group;
+	chip->indio_dev->event_attrs = &ad7291_event_attribute_group;
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = 1;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	if (client->irq > 0) {
+		ret = iio_register_interrupt_line(client->irq,
+				chip->indio_dev,
+				0,
+				IRQF_TRIGGER_LOW,
+				chip->name);
+		if (ret)
+			goto error_unreg_dev;
+
+		/*
+		 * The event handler list element refer to iio_event_ad7291.
+		 * All event attributes bind to the same event handler.
+		 * So, only register event handler once.
+		 */
+		iio_add_event_to_list(&iio_event_ad7291,
+				&chip->indio_dev->interrupts[0]->ev_list);
+
+		INIT_WORK(&chip->thresh_work, ad7291_interrupt_bh);
+
+		/* set irq polarity low level */
+		chip->command |= AD7291_ALART_POLARITY;
+	}
+
+	ret = ad7291_i2c_write(chip, AD7291_COMMAND, chip->command);
+	if (ret) {
+		ret = -EIO;
+		goto error_unreg_irq;
+	}
+
+	dev_info(&client->dev, "%s temperature sensor registered.\n",
+			 id->name);
+
+	return 0;
+
+error_unreg_irq:
+	iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+	iio_device_unregister(chip->indio_dev);
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+
+static int __devexit ad7291_remove(struct i2c_client *client)
+{
+	struct ad7291_chip_info *chip = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	if (client->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct i2c_device_id ad7291_id[] = {
+	{ "ad7291", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7291_id);
+
+static struct i2c_driver ad7291_driver = {
+	.driver = {
+		.name = "ad7291",
+	},
+	.probe = ad7291_probe,
+	.remove = __devexit_p(ad7291_remove),
+	.id_table = ad7291_id,
+};
+
+static __init int ad7291_init(void)
+{
+	return i2c_add_driver(&ad7291_driver);
+}
+
+static __exit void ad7291_exit(void)
+{
+	i2c_del_driver(&ad7291_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7291 digital"
+			" temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7291_init);
+module_exit(ad7291_exit);
diff --git a/drivers/staging/iio/adc/ad7298.c b/drivers/staging/iio/adc/ad7298.c
new file mode 100644
index 0000000..1a080c9
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7298.c
@@ -0,0 +1,501 @@
+/*
+ * AD7298 digital temperature sensor driver supporting AD7298
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7298 command
+ */
+#define AD7298_PD			0x1
+#define AD7298_T_AVG_MASK		0x2
+#define AD7298_EXT_REF			0x4
+#define AD7298_T_SENSE_MASK		0x20
+#define AD7298_VOLTAGE_MASK		0x3fc0
+#define AD7298_VOLTAGE_OFFSET		0x6
+#define AD7298_VOLTAGE_LIMIT_COUNT	8
+#define AD7298_REPEAT			0x40
+#define AD7298_WRITE			0x80
+
+/*
+ * AD7298 value masks
+ */
+#define AD7298_CHANNEL_MASK		0xf000
+#define AD7298_VALUE_MASK		0xfff
+#define AD7298_T_VALUE_SIGN		0x400
+#define AD7298_T_VALUE_FLOAT_OFFSET	2
+#define AD7298_T_VALUE_FLOAT_MASK	0x2
+
+/*
+ * struct ad7298_chip_info - chip specifc information
+ */
+
+struct ad7298_chip_info {
+	const char *name;
+	struct spi_device *spi_dev;
+	struct iio_dev *indio_dev;
+	u16 command;
+	u16 busy_pin;
+	u8  channels;	/* Active voltage channels */
+};
+
+/*
+ * ad7298 register access by SPI
+ */
+static int ad7298_spi_write(struct ad7298_chip_info *chip, u16 data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	int ret = 0;
+
+	data |= AD7298_WRITE;
+	data = cpu_to_be16(data);
+	ret = spi_write(spi_dev, (u8 *)&data, sizeof(data));
+	if (ret < 0)
+		dev_err(&spi_dev->dev, "SPI write error\n");
+
+	return ret;
+}
+
+static int ad7298_spi_read(struct ad7298_chip_info *chip, u16 mask, u16 *data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	int ret = 0;
+	u8 count = chip->channels;
+	u16 command;
+	int i;
+
+	if (mask & AD7298_T_SENSE_MASK) {
+		command = chip->command & ~(AD7298_T_AVG_MASK | AD7298_VOLTAGE_MASK);
+		command |= AD7298_T_SENSE_MASK;
+		count = 1;
+	} else if (mask & AD7298_T_AVG_MASK) {
+		command = chip->command & ~AD7298_VOLTAGE_MASK;
+		command |= AD7298_T_SENSE_MASK | AD7298_T_AVG_MASK;
+		count = 2;
+	} else if (mask & AD7298_VOLTAGE_MASK) {
+		command = chip->command & ~(AD7298_T_AVG_MASK | AD7298_T_SENSE_MASK);
+		count = chip->channels;
+	}
+
+	ret = ad7298_spi_write(chip, chip->command);
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI write command error\n");
+		return ret;
+	}
+
+	ret = spi_read(spi_dev, (u8 *)&command, sizeof(command));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI read error\n");
+		return ret;
+	}
+
+	i = 10000;
+	while (i && gpio_get_value(chip->busy_pin)) {
+		cpu_relax();
+		i--;
+	}
+	if (!i) {
+		dev_err(&spi_dev->dev, "Always in busy convertion.\n");
+		return -EBUSY;
+	}
+
+	for (i = 0; i < count; i++) {
+		ret = spi_read(spi_dev, (u8 *)&data[i], sizeof(data[i]));
+		if (ret < 0) {
+			dev_err(&spi_dev->dev, "SPI read error\n");
+			return ret;
+		}
+		*data = be16_to_cpu(data[i]);
+	}
+
+	return 0;
+}
+
+static ssize_t ad7298_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+
+	if (chip->command & AD7298_REPEAT)
+		return sprintf(buf, "repeat\n");
+	else
+		return sprintf(buf, "normal\n");
+}
+
+static ssize_t ad7298_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+
+	if (strcmp(buf, "repeat"))
+		chip->command |= AD7298_REPEAT;
+	else
+		chip->command &= (~AD7298_REPEAT);
+
+	return 1;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		ad7298_show_mode,
+		ad7298_store_mode,
+		0);
+
+static ssize_t ad7298_show_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "normal\nrepeat\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7298_show_available_modes, NULL, 0);
+
+static ssize_t ad7298_store_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+	u16 command;
+	int ret;
+
+	command = chip->command & ~AD7298_PD;
+
+	ret = ad7298_spi_write(chip, command);
+	if (ret)
+		return -EIO;
+
+	command = chip->command | AD7298_PD;
+
+	ret = ad7298_spi_write(chip, command);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+		NULL,
+		ad7298_store_reset,
+		0);
+
+static ssize_t ad7298_show_ext_ref(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->command & AD7298_EXT_REF));
+}
+
+static ssize_t ad7298_store_ext_ref(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+	u16 command;
+	int ret;
+
+	command = chip->command & (~AD7298_EXT_REF);
+	if (strcmp(buf, "1"))
+		command |= AD7298_EXT_REF;
+
+	ret = ad7298_spi_write(chip, command);
+	if (ret)
+		return -EIO;
+
+	chip->command = command;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(ext_ref, S_IRUGO | S_IWUSR,
+		ad7298_show_ext_ref,
+		ad7298_store_ext_ref,
+		0);
+
+static ssize_t ad7298_show_t_sense(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	char sign = ' ';
+	int ret;
+
+	ret = ad7298_spi_read(chip, AD7298_T_SENSE_MASK, &data);
+	if (ret)
+		return -EIO;
+
+	if (data & AD7298_T_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data = (AD7298_T_VALUE_SIGN << 1) - data;
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.2d\n", sign,
+		(data >> AD7298_T_VALUE_FLOAT_OFFSET),
+		(data & AD7298_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_sense, S_IRUGO, ad7298_show_t_sense, NULL, 0);
+
+static ssize_t ad7298_show_t_average(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+	u16 data[2];
+	char sign = ' ';
+	int ret;
+
+	ret = ad7298_spi_read(chip, AD7298_T_AVG_MASK, data);
+	if (ret)
+		return -EIO;
+
+	if (data[1] & AD7298_T_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data[1] = (AD7298_T_VALUE_SIGN << 1) - data[1];
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.2d\n", sign,
+		(data[1] >> AD7298_T_VALUE_FLOAT_OFFSET),
+		(data[1] & AD7298_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static IIO_DEVICE_ATTR(t_average, S_IRUGO, ad7298_show_t_average, NULL, 0);
+
+static ssize_t ad7298_show_voltage(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+	u16 data[AD7298_VOLTAGE_LIMIT_COUNT];
+	int i, size, ret;
+
+	ret = ad7298_spi_read(chip, AD7298_VOLTAGE_MASK, data);
+	if (ret)
+		return -EIO;
+
+	for (i = 0; i < AD7298_VOLTAGE_LIMIT_COUNT; i++) {
+		if (chip->command & (AD7298_T_SENSE_MASK << i)) {
+			ret = sprintf(buf, "channel[%d]=%d\n", i,
+					data[i] & AD7298_VALUE_MASK);
+			if (ret < 0)
+				break;
+			buf += ret;
+			size += ret;
+		}
+	}
+
+	return size;
+}
+
+static IIO_DEVICE_ATTR(voltage, S_IRUGO, ad7298_show_voltage, NULL, 0);
+
+static ssize_t ad7298_show_channel_mask(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%x\n", (chip->command & AD7298_VOLTAGE_MASK) >>
+			AD7298_VOLTAGE_OFFSET);
+}
+
+static ssize_t ad7298_store_channel_mask(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int i, ret;
+
+	ret = strict_strtoul(buf, 16, &data);
+	if (ret || data > 0xff)
+		return -EINVAL;
+
+	chip->command &= (~AD7298_VOLTAGE_MASK);
+	chip->command |= data << AD7298_VOLTAGE_OFFSET;
+
+	for (i = 0, chip->channels = 0; i < AD7298_VOLTAGE_LIMIT_COUNT; i++) {
+		if (chip->command & (AD7298_T_SENSE_MASK << i))
+			chip->channels++;
+	}
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(channel_mask, S_IRUGO | S_IWUSR,
+		ad7298_show_channel_mask,
+		ad7298_store_channel_mask,
+		0);
+
+static ssize_t ad7298_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7298_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7298_show_name, NULL, 0);
+
+static struct attribute *ad7298_attributes[] = {
+	&iio_dev_attr_available_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_dev_attr_ext_ref.dev_attr.attr,
+	&iio_dev_attr_t_sense.dev_attr.attr,
+	&iio_dev_attr_t_average.dev_attr.attr,
+	&iio_dev_attr_voltage.dev_attr.attr,
+	&iio_dev_attr_channel_mask.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7298_attribute_group = {
+	.attrs = ad7298_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+static int __devinit ad7298_probe(struct spi_device *spi_dev)
+{
+	struct ad7298_chip_info *chip;
+	unsigned short *pins = spi_dev->dev.platform_data;
+	int ret = 0;
+
+	chip = kzalloc(sizeof(struct ad7298_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	dev_set_drvdata(&spi_dev->dev, chip);
+
+	chip->spi_dev = spi_dev;
+	chip->name = spi_dev->modalias;
+	chip->busy_pin = pins[0];
+
+	ret = gpio_request(chip->busy_pin, chip->name);
+	if (ret) {
+		dev_err(&spi_dev->dev, "Fail to request busy gpio PIN %d.\n",
+			chip->busy_pin);
+		goto error_free_chip;
+	}
+	gpio_direction_input(chip->busy_pin);
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_gpio;
+	}
+
+	chip->indio_dev->dev.parent = &spi_dev->dev;
+	chip->indio_dev->attrs = &ad7298_attribute_group;
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
+			 chip->name);
+
+	return 0;
+
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_gpio:
+	gpio_free(chip->busy_pin);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+
+static int __devexit ad7298_remove(struct spi_device *spi_dev)
+{
+	struct ad7298_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	dev_set_drvdata(&spi_dev->dev, NULL);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	gpio_free(chip->busy_pin);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct spi_device_id ad7298_id[] = {
+	{ "ad7298", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(spi, ad7298_id);
+
+static struct spi_driver ad7298_driver = {
+	.driver = {
+		.name = "ad7298",
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad7298_probe,
+	.remove = __devexit_p(ad7298_remove),
+	.id_table = ad7298_id,
+};
+
+static __init int ad7298_init(void)
+{
+	return spi_register_driver(&ad7298_driver);
+}
+
+static __exit void ad7298_exit(void)
+{
+	spi_unregister_driver(&ad7298_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7298 digital"
+			" temperature sensor and ADC driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7298_init);
+module_exit(ad7298_exit);
diff --git a/drivers/staging/iio/adc/ad7314.c b/drivers/staging/iio/adc/ad7314.c
new file mode 100644
index 0000000..8c17b1f
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7314.c
@@ -0,0 +1,308 @@
+/*
+ * AD7314 digital temperature sensor driver for AD7314, ADT7301 and ADT7302
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7314 power mode
+ */
+#define AD7314_PD		0x2000
+
+/*
+ * AD7314 temperature masks
+ */
+#define AD7314_TEMP_SIGN		0x200
+#define AD7314_TEMP_MASK		0x7FE0
+#define AD7314_TEMP_OFFSET		5
+#define AD7314_TEMP_FLOAT_OFFSET	2
+#define AD7314_TEMP_FLOAT_MASK		0x3
+
+/*
+ * ADT7301 and ADT7302 temperature masks
+ */
+#define ADT7301_TEMP_SIGN		0x2000
+#define ADT7301_TEMP_MASK		0x2FFF
+#define ADT7301_TEMP_FLOAT_OFFSET	5
+#define ADT7301_TEMP_FLOAT_MASK		0x1F
+
+/*
+ * struct ad7314_chip_info - chip specifc information
+ */
+
+struct ad7314_chip_info {
+	const char *name;
+	struct spi_device *spi_dev;
+	struct iio_dev *indio_dev;
+	s64 last_timestamp;
+	u8  mode;
+};
+
+/*
+ * ad7314 register access by SPI
+ */
+
+static int ad7314_spi_read(struct ad7314_chip_info *chip, u16 *data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	int ret = 0;
+	u16 value;
+
+	ret = spi_read(spi_dev, (u8 *)&value, sizeof(value));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI read error\n");
+		return ret;
+	}
+
+	*data = be16_to_cpu((u16)value);
+
+	return ret;
+}
+
+static int ad7314_spi_write(struct ad7314_chip_info *chip, u16 data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	int ret = 0;
+	u16 value = cpu_to_be16(data);
+
+	ret = spi_write(spi_dev, (u8 *)&value, sizeof(value));
+	if (ret < 0)
+		dev_err(&spi_dev->dev, "SPI write error\n");
+
+	return ret;
+}
+
+static ssize_t ad7314_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7314_chip_info *chip = dev_info->dev_data;
+
+	if (chip->mode)
+		return sprintf(buf, "power-save\n");
+	else
+		return sprintf(buf, "full\n");
+}
+
+static ssize_t ad7314_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7314_chip_info *chip = dev_info->dev_data;
+	u16 mode = 0;
+	int ret;
+
+	if (!strcmp(buf, "full"))
+		mode = AD7314_PD;
+
+	ret = ad7314_spi_write(chip, mode);
+	if (ret)
+		return -EIO;
+
+	chip->mode = mode;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		ad7314_show_mode,
+		ad7314_store_mode,
+		0);
+
+static ssize_t ad7314_show_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "full\npower-save\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7314_show_available_modes, NULL, 0);
+
+static ssize_t ad7314_show_temperature(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7314_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	char sign = ' ';
+	int ret;
+
+	if (chip->mode) {
+		ret = ad7314_spi_write(chip, 0);
+		if (ret)
+			return -EIO;
+	}
+
+	ret = ad7314_spi_read(chip, &data);
+	if (ret)
+		return -EIO;
+
+	if (chip->mode)
+		ad7314_spi_write(chip, chip->mode);
+
+	if (strcmp(chip->name, "ad7314")) {
+		data = (data & AD7314_TEMP_MASK) >>
+			AD7314_TEMP_OFFSET;
+		if (data & AD7314_TEMP_SIGN) {
+			data = (AD7314_TEMP_SIGN << 1) - data;
+			sign = '-';
+		}
+
+		return sprintf(buf, "%c%d.%.2d\n", sign,
+				data >> AD7314_TEMP_FLOAT_OFFSET,
+				(data & AD7314_TEMP_FLOAT_MASK) * 25);
+	} else {
+		data &= ADT7301_TEMP_MASK;
+		if (data & ADT7301_TEMP_SIGN) {
+			data = (ADT7301_TEMP_SIGN << 1) - data;
+			sign = '-';
+		}
+
+		return sprintf(buf, "%c%d.%.5d\n", sign,
+				data >> ADT7301_TEMP_FLOAT_OFFSET,
+				(data & ADT7301_TEMP_FLOAT_MASK) * 3125);
+	}
+}
+
+static IIO_DEVICE_ATTR(temperature, S_IRUGO, ad7314_show_temperature, NULL, 0);
+
+static ssize_t ad7314_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7314_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL, 0);
+
+static struct attribute *ad7314_attributes[] = {
+	&iio_dev_attr_available_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_temperature.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7314_attribute_group = {
+	.attrs = ad7314_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7314_probe(struct spi_device *spi_dev)
+{
+	struct ad7314_chip_info *chip;
+	int ret = 0;
+
+	chip = kzalloc(sizeof(struct ad7314_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	dev_set_drvdata(&spi_dev->dev, chip);
+
+	chip->spi_dev = spi_dev;
+	chip->name = spi_dev->modalias;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	chip->indio_dev->dev.parent = &spi_dev->dev;
+	chip->indio_dev->attrs = &ad7314_attribute_group;
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
+			 chip->name);
+
+	return 0;
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+
+static int __devexit ad7314_remove(struct spi_device *spi_dev)
+{
+	struct ad7314_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	dev_set_drvdata(&spi_dev->dev, NULL);
+	if (spi_dev->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct spi_device_id ad7314_id[] = {
+	{ "adt7301", 0 },
+	{ "adt7302", 0 },
+	{ "ad7314", 0 },
+	{}
+};
+
+static struct spi_driver ad7314_driver = {
+	.driver = {
+		.name = "ad7314",
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad7314_probe,
+	.remove = __devexit_p(ad7314_remove),
+	.id_table = ad7314_id,
+};
+
+static __init int ad7314_init(void)
+{
+	return spi_register_driver(&ad7314_driver);
+}
+
+static __exit void ad7314_exit(void)
+{
+	spi_unregister_driver(&ad7314_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7314, ADT7301 and ADT7302 digital"
+			" temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7314_init);
+module_exit(ad7314_exit);
diff --git a/drivers/staging/iio/adc/ad7745.c b/drivers/staging/iio/adc/ad7745.c
new file mode 100644
index 0000000..ab7ef84
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7745.c
@@ -0,0 +1,734 @@
+/*
+ * AD774X capacitive sensor driver supporting AD7745/6/7
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD774X registers definition
+ */
+
+#define AD774X_STATUS		0
+#define AD774X_STATUS_RDY	(1 << 2)
+#define AD774X_STATUS_RDYVT	(1 << 1)
+#define AD774X_STATUS_RDYCAP	(1 << 0)
+#define AD774X_CAP_DATA_HIGH	1
+#define AD774X_CAP_DATA_MID	2
+#define AD774X_CAP_DATA_LOW	3
+#define AD774X_VT_DATA_HIGH	4
+#define AD774X_VT_DATA_MID	5
+#define AD774X_VT_DATA_LOW	6
+#define AD774X_CAP_SETUP	7
+#define AD774X_VT_SETUP		8
+#define AD774X_EXEC_SETUP	9
+#define AD774X_CFG		10
+#define AD774X_CAPDACA		11
+#define AD774X_CAPDACB		12
+#define AD774X_CAPDAC_EN	(1 << 7)
+#define AD774X_CAP_OFFH		13
+#define AD774X_CAP_OFFL		14
+#define AD774X_CAP_GAINH	15
+#define AD774X_CAP_GAINL	16
+#define AD774X_VOLT_GAINH	17
+#define AD774X_VOLT_GAINL	18
+
+#define AD774X_MAX_CONV_MODE	6
+
+/*
+ * struct ad774x_chip_info - chip specifc information
+ */
+
+struct ad774x_chip_info {
+	const char *name;
+	struct i2c_client *client;
+	struct iio_dev *indio_dev;
+	struct work_struct thresh_work;
+	bool inter;
+	s64 last_timestamp;
+	u16 cap_offs;                   /* Capacitive offset */
+	u16 cap_gain;                   /* Capacitive gain calibration */
+	u16 volt_gain;                  /* Voltage gain calibration */
+	u8  cap_setup;
+	u8  vt_setup;
+	u8  exec_setup;
+
+	char *conversion_mode;
+};
+
+struct ad774x_conversion_mode {
+	char *name;
+	u8 reg_cfg;
+};
+
+struct ad774x_conversion_mode ad774x_conv_mode_table[AD774X_MAX_CONV_MODE] = {
+	{ "idle", 0 },
+	{ "continuous-conversion", 1 },
+	{ "single-conversion", 2 },
+	{ "power-down", 3 },
+	{ "offset-calibration", 5 },
+	{ "gain-calibration", 6 },
+};
+
+/*
+ * ad774x register access by I2C
+ */
+
+static int ad774x_i2c_read(struct ad774x_chip_info *chip, u8 reg, u8 *data, int len)
+{
+	struct i2c_client *client = chip->client;
+	int ret;
+
+	ret = i2c_master_send(client, &reg, 1);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C write error\n");
+		return ret;
+	}
+
+	ret = i2c_master_recv(client, data, len);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read error\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int ad774x_i2c_write(struct ad774x_chip_info *chip, u8 reg, u8 data)
+{
+	struct i2c_client *client = chip->client;
+	int ret;
+
+	u8 tx[2] = {
+		reg,
+		data,
+	};
+
+	ret = i2c_master_send(client, tx, 2);
+	if (ret < 0)
+		dev_err(&client->dev, "I2C write error\n");
+
+	return ret;
+}
+
+/*
+ * sysfs nodes
+ */
+
+#define IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(_show)				\
+	IIO_DEVICE_ATTR(available_conversion_modes, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_CONVERSION_MODE(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(conversion_mode, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_SETUP(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(cap_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_VT_SETUP(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(in0_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_EXEC_SETUP(_mode, _show, _store)              \
+	IIO_DEVICE_ATTR(exec_setup, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_VOLT_GAIN(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(in0_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_OFFS(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(cap_offs, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_GAIN(_mode, _show, _store)		\
+	IIO_DEVICE_ATTR(cap_gain, _mode, _show, _store, 0)
+#define IIO_DEV_ATTR_CAP_DATA(_show)		\
+	IIO_DEVICE_ATTR(cap0_raw, S_IRUGO, _show, NULL, 0)
+#define IIO_DEV_ATTR_VT_DATA(_show)		\
+	IIO_DEVICE_ATTR(in0_raw, S_IRUGO, _show, NULL, 0)
+
+static ssize_t ad774x_show_conversion_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int i;
+	int len = 0;
+
+	for (i = 0; i < AD774X_MAX_CONV_MODE; i++)
+		len += sprintf(buf + len, "%s ", ad774x_conv_mode_table[i].name);
+
+	len += sprintf(buf + len, "\n");
+
+	return len;
+}
+
+static IIO_DEV_ATTR_AVAIL_CONVERSION_MODES(ad774x_show_conversion_modes);
+
+static ssize_t ad774x_show_conversion_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%s\n", chip->conversion_mode);
+}
+
+static ssize_t ad774x_store_conversion_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	u8 cfg;
+	int i;
+
+	ad774x_i2c_read(chip, AD774X_CFG, &cfg, 1);
+
+	for (i = 0; i < AD774X_MAX_CONV_MODE; i++) {
+		if (strncmp(buf, ad774x_conv_mode_table[i].name,
+				strlen(ad774x_conv_mode_table[i].name) - 1) == 0) {
+			chip->conversion_mode = ad774x_conv_mode_table[i].name;
+			cfg |= 0x18 | ad774x_conv_mode_table[i].reg_cfg;
+			ad774x_i2c_write(chip, AD774X_CFG, cfg);
+			return len;
+		}
+	}
+
+	dev_err(dev, "not supported conversion mode\n");
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CONVERSION_MODE(S_IRUGO | S_IWUSR,
+		ad774x_show_conversion_mode,
+		ad774x_store_conversion_mode);
+
+static ssize_t ad774x_show_dac_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	u8 data;
+
+	ad774x_i2c_read(chip, this_attr->address, &data, 1);
+
+	return sprintf(buf, "%02x\n", data & 0x7F);
+}
+
+static ssize_t ad774x_store_dac_value(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if (!ret) {
+		ad774x_i2c_write(chip, this_attr->address,
+			(data ? AD774X_CAPDAC_EN : 0) | (data & 0x7F));
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(capdac0_raw, S_IRUGO | S_IWUSR,
+			ad774x_show_dac_value,
+			ad774x_store_dac_value,
+			AD774X_CAPDACA);
+
+static IIO_DEVICE_ATTR(capdac1_raw, S_IRUGO | S_IWUSR,
+			ad774x_show_dac_value,
+			ad774x_store_dac_value,
+			AD774X_CAPDACB);
+
+static ssize_t ad774x_show_cap_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->cap_setup);
+}
+
+static ssize_t ad774x_store_cap_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad774x_i2c_write(chip, AD774X_CAP_SETUP, data);
+		chip->cap_setup = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CAP_SETUP(S_IRUGO | S_IWUSR,
+		ad774x_show_cap_setup,
+		ad774x_store_cap_setup);
+
+static ssize_t ad774x_show_vt_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->vt_setup);
+}
+
+static ssize_t ad774x_store_vt_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad774x_i2c_write(chip, AD774X_VT_SETUP, data);
+		chip->vt_setup = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_VT_SETUP(S_IRUGO | S_IWUSR,
+		ad774x_show_vt_setup,
+		ad774x_store_vt_setup);
+
+static ssize_t ad774x_show_exec_setup(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%02x\n", chip->exec_setup);
+}
+
+static ssize_t ad774x_store_exec_setup(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x100)) {
+		ad774x_i2c_write(chip, AD774X_EXEC_SETUP, data);
+		chip->exec_setup = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_EXEC_SETUP(S_IRUGO | S_IWUSR,
+		ad774x_show_exec_setup,
+		ad774x_store_exec_setup);
+
+static ssize_t ad774x_show_volt_gain(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->volt_gain);
+}
+
+static ssize_t ad774x_store_volt_gain(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad774x_i2c_write(chip, AD774X_VOLT_GAINH, data >> 8);
+		ad774x_i2c_write(chip, AD774X_VOLT_GAINL, data);
+		chip->volt_gain = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_VOLT_GAIN(S_IRUGO | S_IWUSR,
+		ad774x_show_volt_gain,
+		ad774x_store_volt_gain);
+
+static ssize_t ad774x_show_cap_data(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	char tmp[3];
+
+	ad774x_i2c_read(chip, AD774X_CAP_DATA_HIGH, tmp, 3);
+	data = ((int)tmp[0] << 16) | ((int)tmp[1] << 8) | (int)tmp[2];
+
+	return sprintf(buf, "%ld\n", data);
+}
+
+static IIO_DEV_ATTR_CAP_DATA(ad774x_show_cap_data);
+
+static ssize_t ad774x_show_vt_data(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	char tmp[3];
+
+	ad774x_i2c_read(chip, AD774X_VT_DATA_HIGH, tmp, 3);
+	data = ((int)tmp[0] << 16) | ((int)tmp[1] << 8) | (int)tmp[2];
+
+	return sprintf(buf, "%ld\n", data);
+}
+
+static IIO_DEV_ATTR_VT_DATA(ad774x_show_vt_data);
+
+static ssize_t ad774x_show_cap_offs(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->cap_offs);
+}
+
+static ssize_t ad774x_store_cap_offs(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad774x_i2c_write(chip, AD774X_CAP_OFFH, data >> 8);
+		ad774x_i2c_write(chip, AD774X_CAP_OFFL, data);
+		chip->cap_offs = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CAP_OFFS(S_IRUGO | S_IWUSR,
+		ad774x_show_cap_offs,
+		ad774x_store_cap_offs);
+
+static ssize_t ad774x_show_cap_gain(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->cap_gain);
+}
+
+static ssize_t ad774x_store_cap_gain(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+
+	if ((!ret) && (data < 0x10000)) {
+		ad774x_i2c_write(chip, AD774X_CAP_GAINH, data >> 8);
+		ad774x_i2c_write(chip, AD774X_CAP_GAINL, data);
+		chip->cap_gain = data;
+		return len;
+	}
+
+	return -EINVAL;
+}
+
+static IIO_DEV_ATTR_CAP_GAIN(S_IRUGO | S_IWUSR,
+		ad774x_show_cap_gain,
+		ad774x_store_cap_gain);
+
+static ssize_t ad774x_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad774x_show_name, NULL, 0);
+
+static struct attribute *ad774x_attributes[] = {
+	&iio_dev_attr_available_conversion_modes.dev_attr.attr,
+	&iio_dev_attr_conversion_mode.dev_attr.attr,
+	&iio_dev_attr_cap_setup.dev_attr.attr,
+	&iio_dev_attr_in0_setup.dev_attr.attr,
+	&iio_dev_attr_exec_setup.dev_attr.attr,
+	&iio_dev_attr_cap_offs.dev_attr.attr,
+	&iio_dev_attr_cap_gain.dev_attr.attr,
+	&iio_dev_attr_in0_gain.dev_attr.attr,
+	&iio_dev_attr_in0_raw.dev_attr.attr,
+	&iio_dev_attr_cap0_raw.dev_attr.attr,
+	&iio_dev_attr_capdac0_raw.dev_attr.attr,
+	&iio_dev_attr_capdac1_raw.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad774x_attribute_group = {
+	.attrs = ad774x_attributes,
+};
+
+/*
+ * data ready events
+ */
+
+#define IIO_EVENT_CODE_CAP_RDY     IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_VT_RDY      IIO_BUFFER_EVENT_CODE(1)
+
+#define IIO_EVENT_ATTR_CAP_RDY_SH(_evlist, _show, _store, _mask)	\
+	IIO_EVENT_ATTR_SH(cap_rdy, _evlist, _show, _store, _mask)
+
+#define IIO_EVENT_ATTR_VT_RDY_SH(_evlist, _show, _store, _mask)	\
+	IIO_EVENT_ATTR_SH(vt_rdy, _evlist, _show, _store, _mask)
+
+static void ad774x_interrupt_handler_bh(struct work_struct *work_s)
+{
+	struct ad774x_chip_info *chip =
+		container_of(work_s, struct ad774x_chip_info, thresh_work);
+	u8 int_status;
+
+	enable_irq(chip->client->irq);
+
+	ad774x_i2c_read(chip, AD774X_STATUS, &int_status, 1);
+
+	if (int_status & AD774X_STATUS_RDYCAP)
+		iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_CAP_RDY,
+				chip->last_timestamp);
+
+	if (int_status & AD774X_STATUS_RDYVT)
+		iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_VT_RDY,
+				chip->last_timestamp);
+}
+
+static int ad774x_interrupt_handler_th(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct ad774x_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(data_rdy, &ad774x_interrupt_handler_th);
+
+static ssize_t ad774x_query_out_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	/*
+	 * AD774X provides one /RDY pin, which can be used as interrupt
+	 * but the pin is not configurable
+	 */
+	return sprintf(buf, "1\n");
+}
+
+static ssize_t ad774x_set_out_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return len;
+}
+
+IIO_EVENT_ATTR_CAP_RDY_SH(iio_event_data_rdy, ad774x_query_out_mode, ad774x_set_out_mode, 0);
+IIO_EVENT_ATTR_VT_RDY_SH(iio_event_data_rdy, ad774x_query_out_mode, ad774x_set_out_mode, 0);
+
+static struct attribute *ad774x_event_attributes[] = {
+	&iio_event_attr_cap_rdy.dev_attr.attr,
+	&iio_event_attr_vt_rdy.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group ad774x_event_attribute_group = {
+	.attrs = ad774x_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad774x_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int ret = 0, regdone = 0;
+	struct ad774x_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (chip == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	/* this is only used for device removal purposes */
+	i2c_set_clientdata(client, chip);
+
+	chip->client = client;
+	chip->name = id->name;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	/* Establish that the iio_dev is a child of the i2c device */
+	chip->indio_dev->dev.parent = &client->dev;
+	chip->indio_dev->attrs = &ad774x_attribute_group;
+	chip->indio_dev->event_attrs = &ad774x_event_attribute_group;
+	chip->indio_dev->dev_data = (void *)(chip);
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = 1;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+	regdone = 1;
+
+	if (client->irq) {
+		ret = iio_register_interrupt_line(client->irq,
+				chip->indio_dev,
+				0,
+				IRQF_TRIGGER_FALLING,
+				"ad774x");
+		if (ret)
+			goto error_free_dev;
+
+		iio_add_event_to_list(iio_event_attr_cap_rdy.listel,
+				&chip->indio_dev->interrupts[0]->ev_list);
+
+		INIT_WORK(&chip->thresh_work, ad774x_interrupt_handler_bh);
+	}
+
+	dev_err(&client->dev, "%s capacitive sensor registered, irq: %d\n", id->name, client->irq);
+
+	return 0;
+
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(chip->indio_dev);
+	else
+		iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad774x_remove(struct i2c_client *client)
+{
+	struct ad774x_chip_info *chip = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	if (client->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct i2c_device_id ad774x_id[] = {
+	{ "ad7745", 0 },
+	{ "ad7746", 0 },
+	{ "ad7747", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad774x_id);
+
+static struct i2c_driver ad774x_driver = {
+	.driver = {
+		.name = "ad774x",
+	},
+	.probe = ad774x_probe,
+	.remove = __devexit_p(ad774x_remove),
+	.id_table = ad774x_id,
+};
+
+static __init int ad774x_init(void)
+{
+	return i2c_add_driver(&ad774x_driver);
+}
+
+static __exit void ad774x_exit(void)
+{
+	i2c_del_driver(&ad774x_driver);
+}
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ad7745/6/7 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad774x_init);
+module_exit(ad774x_exit);
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
new file mode 100644
index 0000000..ad7415a
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -0,0 +1,535 @@
+/*
+ * AD7816 digital temperature sensor driver supporting AD7816/7/8
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * AD7816 config masks
+ */
+#define AD7816_FULL			0x1
+#define AD7816_PD			0x2
+#define AD7816_CS_MASK			0x7
+#define AD7816_CS_MAX			0x4
+
+/*
+ * AD7816 temperature masks
+ */
+#define AD7816_VALUE_OFFSET		6
+#define AD7816_BOUND_VALUE_BASE		0x8
+#define AD7816_BOUND_VALUE_MIN		-95
+#define AD7816_BOUND_VALUE_MAX		152
+#define AD7816_TEMP_FLOAT_OFFSET	2
+#define AD7816_TEMP_FLOAT_MASK		0x3
+
+
+/*
+ * struct ad7816_chip_info - chip specifc information
+ */
+
+struct ad7816_chip_info {
+	const char *name;
+	struct spi_device *spi_dev;
+	struct iio_dev *indio_dev;
+	struct work_struct thresh_work;
+	s64 last_timestamp;
+	u16 rdwr_pin;
+	u16 convert_pin;
+	u16 busy_pin;
+	u8  oti_data[AD7816_CS_MAX+1];
+	u8  channel_id;	/* 0 always be temperature */
+	u8  mode;
+};
+
+/*
+ * ad7816 data access by SPI
+ */
+static int ad7816_spi_read(struct ad7816_chip_info *chip, u16 *data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	int ret = 0;
+
+	gpio_set_value(chip->rdwr_pin, 1);
+	gpio_set_value(chip->rdwr_pin, 0);
+	ret = spi_write(spi_dev, &chip->channel_id, sizeof(chip->channel_id));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI channel setting error\n");
+		return ret;
+	}
+	gpio_set_value(chip->rdwr_pin, 1);
+
+
+	if (chip->mode == AD7816_PD) { /* operating mode 2 */
+		gpio_set_value(chip->convert_pin, 1);
+		gpio_set_value(chip->convert_pin, 0);
+	} else { /* operating mode 1 */
+		gpio_set_value(chip->convert_pin, 0);
+		gpio_set_value(chip->convert_pin, 1);
+	}
+
+	while (gpio_get_value(chip->busy_pin))
+		cpu_relax();
+
+	gpio_set_value(chip->rdwr_pin, 0);
+	gpio_set_value(chip->rdwr_pin, 1);
+	ret = spi_read(spi_dev, (u8 *)data, sizeof(*data));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI data read error\n");
+		return ret;
+	}
+
+	*data = be16_to_cpu(*data);
+
+	return ret;
+}
+
+static int ad7816_spi_write(struct ad7816_chip_info *chip, u8 data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	int ret = 0;
+
+	gpio_set_value(chip->rdwr_pin, 1);
+	gpio_set_value(chip->rdwr_pin, 0);
+	ret = spi_write(spi_dev, &data, sizeof(data));
+	if (ret < 0)
+		dev_err(&spi_dev->dev, "SPI oti data write error\n");
+
+	return ret;
+}
+
+static ssize_t ad7816_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+
+	if (chip->mode)
+		return sprintf(buf, "power-save\n");
+	else
+		return sprintf(buf, "full\n");
+}
+
+static ssize_t ad7816_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+
+	if (strcmp(buf, "full")) {
+		gpio_set_value(chip->rdwr_pin, 1);
+		chip->mode = AD7816_FULL;
+	} else {
+		gpio_set_value(chip->rdwr_pin, 0);
+		chip->mode = AD7816_PD;
+	}
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		ad7816_show_mode,
+		ad7816_store_mode,
+		0);
+
+static ssize_t ad7816_show_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "full\npower-save\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes, NULL, 0);
+
+static ssize_t ad7816_show_channel(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", chip->channel_id);
+}
+
+static ssize_t ad7816_store_channel(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+
+	if (data > AD7816_CS_MAX && data != AD7816_CS_MASK) {
+		dev_err(&chip->spi_dev->dev, "Invalid channel id %lu for %s.\n",
+			data, chip->name);
+		return -EINVAL;
+	} else if (strcmp(chip->name, "ad7818") == 0 && data > 1) {
+		dev_err(&chip->spi_dev->dev,
+			"Invalid channel id %lu for ad7818.\n", data);
+		return -EINVAL;
+	} else if (strcmp(chip->name, "ad7816") == 0 && data > 0) {
+		dev_err(&chip->spi_dev->dev,
+			"Invalid channel id %lu for ad7816.\n", data);
+		return -EINVAL;
+	}
+
+	chip->channel_id = data;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(channel, S_IRUGO | S_IWUSR,
+		ad7816_show_channel,
+		ad7816_store_channel,
+		0);
+
+
+static ssize_t ad7816_show_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	s8 value;
+	int ret;
+
+	ret = ad7816_spi_read(chip, &data);
+	if (ret)
+		return -EIO;
+
+	data >>= AD7816_VALUE_OFFSET;
+
+	if (chip->channel_id == 0) {
+		value = (s8)((data >> AD7816_TEMP_FLOAT_OFFSET) - 103);
+		data &= AD7816_TEMP_FLOAT_MASK;
+		if (value < 0)
+			data = (1 << AD7816_TEMP_FLOAT_OFFSET) - data;
+		return sprintf(buf, "%d.%.2d\n", value, data * 25);
+	} else
+		return sprintf(buf, "%u\n", data);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, ad7816_show_value, NULL, 0);
+
+static ssize_t ad7816_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7816_show_name, NULL, 0);
+
+static struct attribute *ad7816_attributes[] = {
+	&iio_dev_attr_available_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_channel.dev_attr.attr,
+	&iio_dev_attr_value.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7816_attribute_group = {
+	.attrs = ad7816_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_AD7816_OTI    IIO_BUFFER_EVENT_CODE(0)
+
+static void ad7816_interrupt_bh(struct work_struct *work_s)
+{
+	struct ad7816_chip_info *chip =
+		container_of(work_s, struct ad7816_chip_info, thresh_work);
+
+	enable_irq(chip->spi_dev->irq);
+
+	iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_AD7816_OTI,
+			chip->last_timestamp);
+}
+
+static int ad7816_interrupt(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(ad7816, &ad7816_interrupt);
+
+static ssize_t ad7816_show_oti(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+	int value;
+
+	if (chip->channel_id > AD7816_CS_MAX) {
+		dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
+		return -EINVAL;
+	} else if (chip->channel_id == 0) {
+		value = AD7816_BOUND_VALUE_MIN +
+			(chip->oti_data[chip->channel_id] -
+			AD7816_BOUND_VALUE_BASE);
+		return sprintf(buf, "%d\n", value);
+	} else
+		return sprintf(buf, "%u\n", chip->oti_data[chip->channel_id]);
+}
+
+static inline ssize_t ad7816_set_oti(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7816_chip_info *chip = dev_info->dev_data;
+	long value;
+	u8 data;
+	int ret;
+
+	ret = strict_strtol(buf, 10, &value);
+
+	if (chip->channel_id > AD7816_CS_MAX) {
+		dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
+		return -EINVAL;
+	} else if (chip->channel_id == 0) {
+		if (ret || value < AD7816_BOUND_VALUE_MIN ||
+			value > AD7816_BOUND_VALUE_MAX)
+			return -EINVAL;
+
+		data = (u8)(value - AD7816_BOUND_VALUE_MIN +
+			AD7816_BOUND_VALUE_BASE);
+	} else {
+		if (ret || value < AD7816_BOUND_VALUE_BASE || value > 255)
+			return -EINVAL;
+
+		data = (u8)value;
+	}
+
+	ret = ad7816_spi_write(chip, data);
+	if (ret)
+		return -EIO;
+
+	chip->oti_data[chip->channel_id] = data;
+
+	return len;
+}
+
+IIO_EVENT_ATTR_SH(oti, iio_event_ad7816,
+		ad7816_show_oti, ad7816_set_oti, 0);
+
+static struct attribute *ad7816_event_attributes[] = {
+	&iio_event_attr_oti.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group ad7816_event_attribute_group = {
+	.attrs = ad7816_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7816_probe(struct spi_device *spi_dev)
+{
+	struct ad7816_chip_info *chip;
+	unsigned short *pins = spi_dev->dev.platform_data;
+	int ret = 0;
+	int i;
+
+	if (!pins) {
+		dev_err(&spi_dev->dev, "No necessary GPIO platform data.\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(struct ad7816_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	dev_set_drvdata(&spi_dev->dev, chip);
+
+	chip->spi_dev = spi_dev;
+	chip->name = spi_dev->modalias;
+	for (i = 0; i <= AD7816_CS_MAX; i++)
+		chip->oti_data[i] = 203;
+	chip->rdwr_pin = pins[0];
+	chip->convert_pin = pins[1];
+	chip->busy_pin = pins[2];
+
+	ret = gpio_request(chip->rdwr_pin, chip->name);
+	if (ret) {
+		dev_err(&spi_dev->dev, "Fail to request rdwr gpio PIN %d.\n",
+			chip->rdwr_pin);
+		goto error_free_chip;
+	}
+	gpio_direction_input(chip->rdwr_pin);
+	ret = gpio_request(chip->convert_pin, chip->name);
+	if (ret) {
+		dev_err(&spi_dev->dev, "Fail to request convert gpio PIN %d.\n",
+			chip->convert_pin);
+		goto error_free_gpio_rdwr;
+	}
+	gpio_direction_input(chip->convert_pin);
+	ret = gpio_request(chip->busy_pin, chip->name);
+	if (ret) {
+		dev_err(&spi_dev->dev, "Fail to request busy gpio PIN %d.\n",
+			chip->busy_pin);
+		goto error_free_gpio_convert;
+	}
+	gpio_direction_input(chip->busy_pin);
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_gpio;
+	}
+
+	chip->indio_dev->dev.parent = &spi_dev->dev;
+	chip->indio_dev->attrs = &ad7816_attribute_group;
+	chip->indio_dev->event_attrs = &ad7816_event_attribute_group;
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = 1;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	if (spi_dev->irq) {
+		/* Only low trigger is supported in ad7816/7/8 */
+		ret = iio_register_interrupt_line(spi_dev->irq,
+				chip->indio_dev,
+				0,
+				IRQF_TRIGGER_LOW,
+				chip->name);
+		if (ret)
+			goto error_unreg_dev;
+
+		/*
+		 * The event handler list element refer to iio_event_ad7816.
+		 * All event attributes bind to the same event handler.
+		 * So, only register event handler once.
+		 */
+		iio_add_event_to_list(&iio_event_ad7816,
+				&chip->indio_dev->interrupts[0]->ev_list);
+
+		INIT_WORK(&chip->thresh_work, ad7816_interrupt_bh);
+	}
+
+	dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
+			 chip->name);
+
+	return 0;
+
+error_unreg_dev:
+	iio_device_unregister(chip->indio_dev);
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_gpio:
+	gpio_free(chip->busy_pin);
+error_free_gpio_convert:
+	gpio_free(chip->convert_pin);
+error_free_gpio_rdwr:
+	gpio_free(chip->rdwr_pin);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+
+static int __devexit ad7816_remove(struct spi_device *spi_dev)
+{
+	struct ad7816_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	dev_set_drvdata(&spi_dev->dev, NULL);
+	if (spi_dev->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	gpio_free(chip->busy_pin);
+	gpio_free(chip->convert_pin);
+	gpio_free(chip->rdwr_pin);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct spi_device_id ad7816_id[] = {
+	{ "ad7816", 0 },
+	{ "ad7817", 0 },
+	{ "ad7818", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(spi, ad7816_id);
+
+static struct spi_driver ad7816_driver = {
+	.driver = {
+		.name = "ad7816",
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad7816_probe,
+	.remove = __devexit_p(ad7816_remove),
+	.id_table = ad7816_id,
+};
+
+static __init int ad7816_init(void)
+{
+	return spi_register_driver(&ad7816_driver);
+}
+
+static __exit void ad7816_exit(void)
+{
+	spi_unregister_driver(&ad7816_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7816/7/8 digital"
+			" temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7816_init);
+module_exit(ad7816_exit);
diff --git a/drivers/staging/iio/adc/ad7887.h b/drivers/staging/iio/adc/ad7887.h
new file mode 100644
index 0000000..8c2a218
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7887.h
@@ -0,0 +1,105 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_ADC_AD7887_H_
+#define IIO_ADC_AD7887_H_
+
+#define AD7887_REF_DIS		(1 << 5) /* on-chip reference disable */
+#define AD7887_DUAL		(1 << 4) /* dual-channel mode */
+#define AD7887_CH_AIN1		(1 << 3) /* convert on channel 1, DUAL=1 */
+#define AD7887_CH_AIN0		(0 << 3) /* convert on channel 0, DUAL=0,1 */
+#define AD7887_PM_MODE1		(0)	 /* CS based shutdown */
+#define AD7887_PM_MODE2		(1)	 /* full on */
+#define AD7887_PM_MODE3		(2)	 /* auto shutdown after conversion */
+#define AD7887_PM_MODE4		(3)	 /* standby mode */
+
+enum ad7887_channels {
+	AD7887_CH0,
+	AD7887_CH0_CH1,
+	AD7887_CH1,
+};
+
+#define RES_MASK(bits)	((1 << (bits)) - 1) /* TODO: move this into a common header */
+
+/*
+ * TODO: struct ad7887_platform_data needs to go into include/linux/iio
+ */
+
+struct ad7887_platform_data {
+	/* External Vref voltage applied */
+	u16				vref_mv;
+	/*
+	 * AD7887:
+	 * In single channel mode en_dual = flase, AIN1/Vref pins assumes its
+	 * Vref function. In dual channel mode en_dual = true, AIN1 becomes the
+	 * second input channel, and Vref is internally connected to Vdd.
+	 */
+	bool				en_dual;
+	/*
+	 * AD7887:
+	 * use_onchip_ref = true, the Vref is internally connected to the 2.500V
+	 * Voltage reference. If use_onchip_ref = false, the reference voltage
+	 * is supplied by AIN1/Vref
+	 */
+	bool				use_onchip_ref;
+};
+
+struct ad7887_chip_info {
+	u8				bits;		/* number of ADC bits */
+	u8				storagebits;	/* number of bits read from the ADC */
+	u8				left_shift;	/* number of bits the sample must be shifted */
+	char				sign;		/* [s]igned or [u]nsigned */
+	u16				int_vref_mv;	/* internal reference voltage */
+};
+
+struct ad7887_state {
+	struct iio_dev			*indio_dev;
+	struct spi_device		*spi;
+	const struct ad7887_chip_info	*chip_info;
+	struct regulator		*reg;
+	struct work_struct		poll_work;
+	atomic_t			protect_ring;
+	u16				int_vref_mv;
+	bool				en_dual;
+	struct spi_transfer		xfer[4];
+	struct spi_message		msg[3];
+	struct spi_message		*ring_msg;
+	unsigned char			tx_cmd_buf[8];
+
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+
+	unsigned char			data[4] ____cacheline_aligned;
+};
+
+enum ad7887_supported_device_ids {
+	ID_AD7887
+};
+
+#ifdef CONFIG_IIO_RING_BUFFER
+int ad7887_scan_from_ring(struct ad7887_state *st, long mask);
+int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev);
+void ad7887_ring_cleanup(struct iio_dev *indio_dev);
+#else /* CONFIG_IIO_RING_BUFFER */
+static inline int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
+{
+	return 0;
+}
+
+static inline int
+ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void ad7887_ring_cleanup(struct iio_dev *indio_dev)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* IIO_ADC_AD7887_H_ */
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
new file mode 100644
index 0000000..6859089
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -0,0 +1,305 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_generic.h"
+#include "adc.h"
+
+#include "ad7887.h"
+
+static int ad7887_scan_direct(struct ad7887_state *st, unsigned ch)
+{
+	int ret = spi_sync(st->spi, &st->msg[ch]);
+	if (ret)
+		return ret;
+
+	return (st->data[(ch * 2)] << 8) | st->data[(ch * 2) + 1];
+}
+
+static ssize_t ad7887_scan(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7887_state *st = dev_info->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+
+	mutex_lock(&dev_info->mlock);
+	if (iio_ring_enabled(dev_info))
+		ret = ad7887_scan_from_ring(st, 1 << this_attr->address);
+	else
+		ret = ad7887_scan_direct(st, this_attr->address);
+	mutex_unlock(&dev_info->mlock);
+
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%d\n", (ret >> st->chip_info->left_shift) &
+		       RES_MASK(st->chip_info->bits));
+}
+static IIO_DEV_ATTR_IN_RAW(0, ad7887_scan, 0);
+static IIO_DEV_ATTR_IN_RAW(1, ad7887_scan, 1);
+
+static ssize_t ad7887_show_scale(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	/* Driver currently only support internal vref */
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7887_state *st = iio_dev_get_devdata(dev_info);
+	/* Corresponds to Vref / 2^(bits) */
+	unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
+
+	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+}
+static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7887_show_scale, NULL, 0);
+
+static ssize_t ad7887_show_name(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7887_state *st = iio_dev_get_devdata(dev_info);
+
+	return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
+}
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad7887_show_name, NULL, 0);
+
+static struct attribute *ad7887_attributes[] = {
+	&iio_dev_attr_in0_raw.dev_attr.attr,
+	&iio_dev_attr_in1_raw.dev_attr.attr,
+	&iio_dev_attr_in_scale.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static mode_t ad7887_attr_is_visible(struct kobject *kobj,
+				     struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad7887_state *st = iio_dev_get_devdata(dev_info);
+
+	mode_t mode = attr->mode;
+
+	if ((attr == &iio_dev_attr_in1_raw.dev_attr.attr) && !st->en_dual)
+			mode = 0;
+
+	return mode;
+}
+
+static const struct attribute_group ad7887_attribute_group = {
+	.attrs = ad7887_attributes,
+	.is_visible = ad7887_attr_is_visible,
+};
+
+static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
+	/*
+	 * More devices added in future
+	 */
+	[ID_AD7887] = {
+		.bits = 12,
+		.storagebits = 16,
+		.left_shift = 0,
+		.sign = IIO_SCAN_EL_TYPE_UNSIGNED,
+		.int_vref_mv = 2500,
+	},
+};
+
+static int __devinit ad7887_probe(struct spi_device *spi)
+{
+	struct ad7887_platform_data *pdata = spi->dev.platform_data;
+	struct ad7887_state *st;
+	int ret, voltage_uv = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	st->reg = regulator_get(&spi->dev, "vcc");
+	if (!IS_ERR(st->reg)) {
+		ret = regulator_enable(st->reg);
+		if (ret)
+			goto error_put_reg;
+
+		voltage_uv = regulator_get_voltage(st->reg);
+	}
+
+	st->chip_info =
+		&ad7887_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+	spi_set_drvdata(spi, st);
+
+	atomic_set(&st->protect_ring, 0);
+	st->spi = spi;
+
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_disable_reg;
+	}
+
+	/* Estabilish that the iio_dev is a child of the spi device */
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->attrs = &ad7887_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	/* Setup default message */
+
+	st->tx_cmd_buf[0] = AD7887_CH_AIN0 | AD7887_PM_MODE4 |
+			    ((pdata && pdata->use_onchip_ref) ?
+			    0 : AD7887_REF_DIS);
+
+	st->xfer[0].rx_buf = &st->data[0];
+	st->xfer[0].tx_buf = &st->tx_cmd_buf[0];
+	st->xfer[0].len = 2;
+
+	spi_message_init(&st->msg[AD7887_CH0]);
+	spi_message_add_tail(&st->xfer[0], &st->msg[AD7887_CH0]);
+
+	if (pdata && pdata->en_dual) {
+		st->tx_cmd_buf[0] |= AD7887_DUAL | AD7887_REF_DIS;
+
+		st->tx_cmd_buf[2] = AD7887_CH_AIN1 | AD7887_DUAL |
+				    AD7887_REF_DIS | AD7887_PM_MODE4;
+		st->tx_cmd_buf[4] = AD7887_CH_AIN0 | AD7887_DUAL |
+				    AD7887_REF_DIS | AD7887_PM_MODE4;
+		st->tx_cmd_buf[6] = AD7887_CH_AIN1 | AD7887_DUAL |
+				    AD7887_REF_DIS | AD7887_PM_MODE4;
+
+		st->xfer[1].rx_buf = &st->data[0];
+		st->xfer[1].tx_buf = &st->tx_cmd_buf[2];
+		st->xfer[1].len = 2;
+
+		st->xfer[2].rx_buf = &st->data[2];
+		st->xfer[2].tx_buf = &st->tx_cmd_buf[4];
+		st->xfer[2].len = 2;
+
+		spi_message_init(&st->msg[AD7887_CH0_CH1]);
+		spi_message_add_tail(&st->xfer[1], &st->msg[AD7887_CH0_CH1]);
+		spi_message_add_tail(&st->xfer[2], &st->msg[AD7887_CH0_CH1]);
+
+		st->xfer[3].rx_buf = &st->data[0];
+		st->xfer[3].tx_buf = &st->tx_cmd_buf[6];
+		st->xfer[3].len = 2;
+
+		spi_message_init(&st->msg[AD7887_CH1]);
+		spi_message_add_tail(&st->xfer[3], &st->msg[AD7887_CH1]);
+
+		st->en_dual = true;
+
+		if (pdata && pdata->vref_mv)
+			st->int_vref_mv = pdata->vref_mv;
+		else if (voltage_uv)
+			st->int_vref_mv = voltage_uv / 1000;
+		else
+			dev_warn(&spi->dev, "reference voltage unspecified\n");
+
+	} else {
+		if (pdata && pdata->vref_mv)
+			st->int_vref_mv = pdata->vref_mv;
+		else if (pdata && pdata->use_onchip_ref)
+			st->int_vref_mv = st->chip_info->int_vref_mv;
+		else
+			dev_warn(&spi->dev, "reference voltage unspecified\n");
+	}
+
+
+	ret = ad7887_register_ring_funcs_and_init(st->indio_dev);
+	if (ret)
+		goto error_free_device;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_free_device;
+
+	ret = iio_ring_buffer_register(st->indio_dev->ring, 0);
+	if (ret)
+		goto error_cleanup_ring;
+	return 0;
+
+error_cleanup_ring:
+	ad7887_ring_cleanup(st->indio_dev);
+	iio_device_unregister(st->indio_dev);
+error_free_device:
+	iio_free_device(st->indio_dev);
+error_disable_reg:
+	if (!IS_ERR(st->reg))
+		regulator_disable(st->reg);
+error_put_reg:
+	if (!IS_ERR(st->reg))
+		regulator_put(st->reg);
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int ad7887_remove(struct spi_device *spi)
+{
+	struct ad7887_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+	iio_ring_buffer_unregister(indio_dev->ring);
+	ad7887_ring_cleanup(indio_dev);
+	iio_device_unregister(indio_dev);
+	if (!IS_ERR(st->reg)) {
+		regulator_disable(st->reg);
+		regulator_put(st->reg);
+	}
+	kfree(st);
+	return 0;
+}
+
+static const struct spi_device_id ad7887_id[] = {
+	{"ad7887", ID_AD7887},
+	{}
+};
+
+static struct spi_driver ad7887_driver = {
+	.driver = {
+		.name	= "ad7887",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ad7887_probe,
+	.remove		= __devexit_p(ad7887_remove),
+	.id_table	= ad7887_id,
+};
+
+static int __init ad7887_init(void)
+{
+	return spi_register_driver(&ad7887_driver);
+}
+module_init(ad7887_init);
+
+static void __exit ad7887_exit(void)
+{
+	spi_unregister_driver(&ad7887_driver);
+}
+module_exit(ad7887_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7887");
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
new file mode 100644
index 0000000..6b9cb1f
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2010 Analog Devices Inc.
+ * Copyright (C) 2008 Jonathan Cameron
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * ad7887_ring.c
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../ring_generic.h"
+#include "../ring_sw.h"
+#include "../trigger.h"
+#include "../sysfs.h"
+
+#include "ad7887.h"
+
+static IIO_SCAN_EL_C(in0, 0, 0, NULL);
+static IIO_SCAN_EL_C(in1, 1, 0, NULL);
+
+static ssize_t ad7887_show_type(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+	struct iio_dev *indio_dev = ring->indio_dev;
+	struct ad7887_state *st = indio_dev->dev_data;
+
+	return sprintf(buf, "%c%d/%d>>%d\n", st->chip_info->sign,
+		       st->chip_info->bits, st->chip_info->storagebits,
+		       st->chip_info->left_shift);
+}
+static IIO_DEVICE_ATTR(in_type, S_IRUGO, ad7887_show_type, NULL, 0);
+
+static struct attribute *ad7887_scan_el_attrs[] = {
+	&iio_scan_el_in0.dev_attr.attr,
+	&iio_const_attr_in0_index.dev_attr.attr,
+	&iio_scan_el_in1.dev_attr.attr,
+	&iio_const_attr_in1_index.dev_attr.attr,
+	&iio_dev_attr_in_type.dev_attr.attr,
+	NULL,
+};
+
+static mode_t ad7887_scan_el_attr_is_visible(struct kobject *kobj,
+				     struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct iio_ring_buffer *ring = dev_get_drvdata(dev);
+	struct iio_dev *indio_dev = ring->indio_dev;
+	struct ad7887_state *st = indio_dev->dev_data;
+
+	mode_t mode = attr->mode;
+
+	if ((attr == &iio_scan_el_in1.dev_attr.attr) ||
+		(attr == &iio_const_attr_in1_index.dev_attr.attr))
+		if (!st->en_dual)
+			mode = 0;
+
+	return mode;
+}
+
+static struct attribute_group ad7887_scan_el_group = {
+	.name = "scan_elements",
+	.attrs = ad7887_scan_el_attrs,
+	.is_visible = ad7887_scan_el_attr_is_visible,
+};
+
+int ad7887_scan_from_ring(struct ad7887_state *st, long mask)
+{
+	struct iio_ring_buffer *ring = st->indio_dev->ring;
+	int count = 0, ret;
+	u16 *ring_data;
+
+	if (!(ring->scan_mask & mask)) {
+		ret = -EBUSY;
+		goto error_ret;
+	}
+
+	ring_data = kmalloc(ring->access.get_bytes_per_datum(ring), GFP_KERNEL);
+	if (ring_data == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	ret = ring->access.read_last(ring, (u8 *) ring_data);
+	if (ret)
+		goto error_free_ring_data;
+
+	/* for single channel scan the result is stored with zero offset */
+	if ((ring->scan_mask == ((1 << 1) | (1 << 0))) && (mask == (1 << 1)))
+		count = 1;
+
+	ret = be16_to_cpu(ring_data[count]);
+
+error_free_ring_data:
+	kfree(ring_data);
+error_ret:
+	return ret;
+}
+
+/**
+ * ad7887_ring_preenable() setup the parameters of the ring before enabling
+ *
+ * The complex nature of the setting of the nuber of bytes per datum is due
+ * to this driver currently ensuring that the timestamp is stored at an 8
+ * byte boundary.
+ **/
+static int ad7887_ring_preenable(struct iio_dev *indio_dev)
+{
+	struct ad7887_state *st = indio_dev->dev_data;
+	struct iio_ring_buffer *ring = indio_dev->ring;
+	size_t d_size;
+
+	if (indio_dev->ring->access.set_bytes_per_datum) {
+		d_size = st->chip_info->storagebits / 8 + sizeof(s64);
+		if (d_size % 8)
+			d_size += 8 - (d_size % 8);
+		indio_dev->ring->access.set_bytes_per_datum(indio_dev->ring,
+							    d_size);
+	}
+
+	switch (ring->scan_mask) {
+	case (1 << 0):
+		st->ring_msg = &st->msg[AD7887_CH0];
+		break;
+	case (1 << 1):
+		st->ring_msg = &st->msg[AD7887_CH1];
+		/* Dummy read: push CH1 setting down to hardware */
+		spi_sync(st->spi, st->ring_msg);
+		break;
+	case ((1 << 1) | (1 << 0)):
+		st->ring_msg = &st->msg[AD7887_CH0_CH1];
+		break;
+	}
+
+	return 0;
+}
+
+static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
+{
+	struct ad7887_state *st = indio_dev->dev_data;
+
+	/* dummy read: restore default CH0 settin */
+	return spi_sync(st->spi, &st->msg[AD7887_CH0]);
+}
+
+/**
+ * ad7887_poll_func_th() th of trigger launched polling to ring buffer
+ *
+ * As sampling only occurs on spi comms occuring, leave timestamping until
+ * then.  Some triggers will generate their own time stamp.  Currently
+ * there is no way of notifying them when no one cares.
+ **/
+static void ad7887_poll_func_th(struct iio_dev *indio_dev, s64 time)
+{
+	struct ad7887_state *st = indio_dev->dev_data;
+
+	schedule_work(&st->poll_work);
+	return;
+}
+/**
+ * ad7887_poll_bh_to_ring() bh of trigger launched polling to ring buffer
+ * @work_s:	the work struct through which this was scheduled
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ * I think the one copy of this at a time was to avoid problems if the
+ * trigger was set far too high and the reads then locked up the computer.
+ **/
+static void ad7887_poll_bh_to_ring(struct work_struct *work_s)
+{
+	struct ad7887_state *st = container_of(work_s, struct ad7887_state,
+						  poll_work);
+	struct iio_dev *indio_dev = st->indio_dev;
+	struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring);
+	struct iio_ring_buffer *ring = indio_dev->ring;
+	s64 time_ns;
+	__u8 *buf;
+	int b_sent;
+	size_t d_size;
+
+	unsigned int bytes = ring->scan_count * st->chip_info->storagebits / 8;
+
+	/* Ensure the timestamp is 8 byte aligned */
+	d_size = bytes + sizeof(s64);
+	if (d_size % sizeof(s64))
+		d_size += sizeof(s64) - (d_size % sizeof(s64));
+
+	/* Ensure only one copy of this function running at a time */
+	if (atomic_inc_return(&st->protect_ring) > 1)
+		return;
+
+	buf = kzalloc(d_size, GFP_KERNEL);
+	if (buf == NULL)
+		return;
+
+	b_sent = spi_sync(st->spi, st->ring_msg);
+	if (b_sent)
+		goto done;
+
+	time_ns = iio_get_time_ns();
+
+	memcpy(buf, st->data, bytes);
+	memcpy(buf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+
+	indio_dev->ring->access.store_to(&sw_ring->buf, buf, time_ns);
+done:
+	kfree(buf);
+	atomic_dec(&st->protect_ring);
+}
+
+int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+	struct ad7887_state *st = indio_dev->dev_data;
+	int ret;
+
+	indio_dev->ring = iio_sw_rb_allocate(indio_dev);
+	if (!indio_dev->ring) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	/* Effectively select the ring buffer implementation */
+	iio_ring_sw_register_funcs(&indio_dev->ring->access);
+	ret = iio_alloc_pollfunc(indio_dev, NULL, &ad7887_poll_func_th);
+	if (ret)
+		goto error_deallocate_sw_rb;
+
+	/* Ring buffer functions - here trigger setup related */
+
+	indio_dev->ring->preenable = &ad7887_ring_preenable;
+	indio_dev->ring->postenable = &iio_triggered_ring_postenable;
+	indio_dev->ring->predisable = &iio_triggered_ring_predisable;
+	indio_dev->ring->postdisable = &ad7887_ring_postdisable;
+	indio_dev->ring->scan_el_attrs = &ad7887_scan_el_group;
+
+	INIT_WORK(&st->poll_work, &ad7887_poll_bh_to_ring);
+
+	/* Flag that polled ring buffering is possible */
+	indio_dev->modes |= INDIO_RING_TRIGGERED;
+	return 0;
+error_deallocate_sw_rb:
+	iio_sw_rb_free(indio_dev->ring);
+error_ret:
+	return ret;
+}
+
+void ad7887_ring_cleanup(struct iio_dev *indio_dev)
+{
+	/* ensure that the trigger has been detached */
+	if (indio_dev->trig) {
+		iio_put_trigger(indio_dev->trig);
+		iio_trigger_dettach_poll_func(indio_dev->trig,
+					      indio_dev->pollfunc);
+	}
+	kfree(indio_dev->pollfunc);
+	iio_sw_rb_free(indio_dev->ring);
+}
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
new file mode 100644
index 0000000..771a409
--- /dev/null
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -0,0 +1,952 @@
+/*
+ * ADT7310 digital temperature sensor driver supporting ADT7310
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * ADT7310 registers definition
+ */
+
+#define ADT7310_STATUS			0
+#define ADT7310_CONFIG			1
+#define ADT7310_TEMPERATURE		2
+#define ADT7310_ID			3
+#define ADT7310_T_CRIT			4
+#define ADT7310_T_HYST			5
+#define ADT7310_T_ALARM_HIGH		6
+#define ADT7310_T_ALARM_LOW		7
+
+/*
+ * ADT7310 status
+ */
+#define ADT7310_STAT_T_LOW		0x10
+#define ADT7310_STAT_T_HIGH		0x20
+#define ADT7310_STAT_T_CRIT		0x40
+#define ADT7310_STAT_NOT_RDY		0x80
+
+/*
+ * ADT7310 config
+ */
+#define ADT7310_FAULT_QUEUE_MASK	0x3
+#define ADT7310_CT_POLARITY		0x4
+#define ADT7310_INT_POLARITY		0x8
+#define ADT7310_EVENT_MODE		0x10
+#define ADT7310_MODE_MASK		0x60
+#define ADT7310_ONESHOT			0x20
+#define ADT7310_SPS			0x40
+#define ADT7310_PD			0x60
+#define ADT7310_RESOLUTION		0x80
+
+/*
+ * ADT7310 masks
+ */
+#define ADT7310_T16_VALUE_SIGN			0x8000
+#define ADT7310_T16_VALUE_FLOAT_OFFSET		7
+#define ADT7310_T16_VALUE_FLOAT_MASK		0x7F
+#define ADT7310_T13_VALUE_SIGN			0x1000
+#define ADT7310_T13_VALUE_OFFSET		3
+#define ADT7310_T13_VALUE_FLOAT_OFFSET		4
+#define ADT7310_T13_VALUE_FLOAT_MASK		0xF
+#define ADT7310_T_HYST_MASK			0xF
+#define ADT7310_DEVICE_ID_MASK			0x7
+#define ADT7310_MANUFACTORY_ID_MASK		0xF8
+#define ADT7310_MANUFACTORY_ID_OFFSET		3
+
+
+#define ADT7310_CMD_REG_MASK			0x28
+#define ADT7310_CMD_REG_OFFSET			3
+#define ADT7310_CMD_READ			0x40
+#define ADT7310_CMD_CON_READ			0x4
+
+#define ADT7310_IRQS				2
+
+/*
+ * struct adt7310_chip_info - chip specifc information
+ */
+
+struct adt7310_chip_info {
+	const char *name;
+	struct spi_device *spi_dev;
+	struct iio_dev *indio_dev;
+	struct work_struct thresh_work;
+	s64 last_timestamp;
+	u8  config;
+};
+
+/*
+ * adt7310 register access by SPI
+ */
+
+static int adt7310_spi_read_word(struct adt7310_chip_info *chip, u8 reg, u16 *data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	u8 command = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+	int ret = 0;
+
+	command |= ADT7310_CMD_READ;
+	ret = spi_write(spi_dev, &command, sizeof(command));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI write command error\n");
+		return ret;
+	}
+
+	ret = spi_read(spi_dev, (u8 *)data, sizeof(*data));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI read word error\n");
+		return ret;
+	}
+
+	*data = be16_to_cpu(*data);
+
+	return 0;
+}
+
+static int adt7310_spi_write_word(struct adt7310_chip_info *chip, u8 reg, u16 data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	u8 buf[3];
+	int ret = 0;
+
+	buf[0] = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+	buf[1] = (u8)(data >> 8);
+	buf[2] = (u8)(data & 0xFF);
+
+	ret = spi_write(spi_dev, buf, 3);
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI write word error\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int adt7310_spi_read_byte(struct adt7310_chip_info *chip, u8 reg, u8 *data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	u8 command = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+	int ret = 0;
+
+	command |= ADT7310_CMD_READ;
+	ret = spi_write(spi_dev, &command, sizeof(command));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI write command error\n");
+		return ret;
+	}
+
+	ret = spi_read(spi_dev, data, sizeof(*data));
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI read byte error\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int adt7310_spi_write_byte(struct adt7310_chip_info *chip, u8 reg, u8 data)
+{
+	struct spi_device *spi_dev = chip->spi_dev;
+	u8 buf[2];
+	int ret = 0;
+
+	buf[0] = (reg << ADT7310_CMD_REG_OFFSET) & ADT7310_CMD_REG_MASK;
+	buf[1] = data;
+
+	ret = spi_write(spi_dev, buf, 2);
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI write byte error\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static ssize_t adt7310_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	u8 config;
+
+	config = chip->config & ADT7310_MODE_MASK;
+
+	switch (config) {
+	case ADT7310_PD:
+		return sprintf(buf, "power-down\n");
+	case ADT7310_ONESHOT:
+		return sprintf(buf, "one-shot\n");
+	case ADT7310_SPS:
+		return sprintf(buf, "sps\n");
+	default:
+		return sprintf(buf, "full\n");
+	}
+}
+
+static ssize_t adt7310_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	u16 config;
+	int ret;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & (~ADT7310_MODE_MASK);
+	if (strcmp(buf, "power-down"))
+		config |= ADT7310_PD;
+	else if (strcmp(buf, "one-shot"))
+		config |= ADT7310_ONESHOT;
+	else if (strcmp(buf, "sps"))
+		config |= ADT7310_SPS;
+
+	ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		adt7310_show_mode,
+		adt7310_store_mode,
+		0);
+
+static ssize_t adt7310_show_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "full\none-shot\nsps\npower-down\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt7310_show_available_modes, NULL, 0);
+
+static ssize_t adt7310_show_resolution(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	int ret;
+	int bits;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	if (chip->config & ADT7310_RESOLUTION)
+		bits = 16;
+	else
+		bits = 13;
+
+	return sprintf(buf, "%d bits\n", bits);
+}
+
+static ssize_t adt7310_store_resolution(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	u16 config;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & (~ADT7310_RESOLUTION);
+	if (data)
+		config |= ADT7310_RESOLUTION;
+
+	ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(resolution, S_IRUGO | S_IWUSR,
+		adt7310_show_resolution,
+		adt7310_store_resolution,
+		0);
+
+static ssize_t adt7310_show_id(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	u8 id;
+	int ret;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_ID, &id);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "device id: 0x%x\nmanufactory id: 0x%x\n",
+			id & ADT7310_DEVICE_ID_MASK,
+			(id & ADT7310_MANUFACTORY_ID_MASK) >> ADT7310_MANUFACTORY_ID_OFFSET);
+}
+
+static IIO_DEVICE_ATTR(id, S_IRUGO | S_IWUSR,
+		adt7310_show_id,
+		NULL,
+		0);
+
+static ssize_t adt7310_convert_temperature(struct adt7310_chip_info *chip,
+		u16 data, char *buf)
+{
+	char sign = ' ';
+
+	if (chip->config & ADT7310_RESOLUTION) {
+		if (data & ADT7310_T16_VALUE_SIGN) {
+			/* convert supplement to positive value */
+			data = (u16)((ADT7310_T16_VALUE_SIGN << 1) - (u32)data);
+			sign = '-';
+		}
+		return sprintf(buf, "%c%d.%.7d\n", sign,
+				(data >> ADT7310_T16_VALUE_FLOAT_OFFSET),
+				(data & ADT7310_T16_VALUE_FLOAT_MASK) * 78125);
+	} else {
+		if (data & ADT7310_T13_VALUE_SIGN) {
+			/* convert supplement to positive value */
+			data >>= ADT7310_T13_VALUE_OFFSET;
+			data = (ADT7310_T13_VALUE_SIGN << 1) - data;
+			sign = '-';
+		}
+		return sprintf(buf, "%c%d.%.4d\n", sign,
+				(data >> ADT7310_T13_VALUE_FLOAT_OFFSET),
+				(data & ADT7310_T13_VALUE_FLOAT_MASK) * 625);
+	}
+}
+
+static ssize_t adt7310_show_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	u8 status;
+	u16 data;
+	int ret, i = 0;
+
+	do {
+		ret = adt7310_spi_read_byte(chip, ADT7310_STATUS, &status);
+		if (ret)
+			return -EIO;
+		i++;
+		if (i == 10000)
+			return -EIO;
+	} while (status & ADT7310_STAT_NOT_RDY);
+
+	ret = adt7310_spi_read_word(chip, ADT7310_TEMPERATURE, &data);
+	if (ret)
+		return -EIO;
+
+	return adt7310_convert_temperature(chip, data, buf);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, adt7310_show_value, NULL, 0);
+
+static ssize_t adt7310_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt7310_show_name, NULL, 0);
+
+static struct attribute *adt7310_attributes[] = {
+	&iio_dev_attr_available_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_resolution.dev_attr.attr,
+	&iio_dev_attr_id.dev_attr.attr,
+	&iio_dev_attr_value.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group adt7310_attribute_group = {
+	.attrs = adt7310_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT7310_ABOVE_ALARM    IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_ADT7310_BELLOW_ALARM   IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_ADT7310_ABOVE_CRIT     IIO_BUFFER_EVENT_CODE(2)
+
+static void adt7310_interrupt_bh(struct work_struct *work_s)
+{
+	struct adt7310_chip_info *chip =
+		container_of(work_s, struct adt7310_chip_info, thresh_work);
+	u8 status;
+
+	if (adt7310_spi_read_byte(chip, ADT7310_STATUS, &status))
+		return;
+
+	if (status & ADT7310_STAT_T_HIGH)
+		iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_ADT7310_ABOVE_ALARM,
+			chip->last_timestamp);
+	if (status & ADT7310_STAT_T_LOW)
+		iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_ADT7310_BELLOW_ALARM,
+			chip->last_timestamp);
+	if (status & ADT7310_STAT_T_CRIT)
+		iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_ADT7310_ABOVE_CRIT,
+			chip->last_timestamp);
+}
+
+static int adt7310_interrupt(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(adt7310, &adt7310_interrupt);
+IIO_EVENT_SH(adt7310_ct, &adt7310_interrupt);
+
+static ssize_t adt7310_show_event_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	int ret;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	if (chip->config & ADT7310_EVENT_MODE)
+		return sprintf(buf, "interrupt\n");
+	else
+		return sprintf(buf, "comparator\n");
+}
+
+static ssize_t adt7310_set_event_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	u16 config;
+	int ret;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config &= ~ADT7310_EVENT_MODE;
+	if (strcmp(buf, "comparator") != 0)
+		config |= ADT7310_EVENT_MODE;
+
+	ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return len;
+}
+
+static ssize_t adt7310_show_available_event_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "comparator\ninterrupt\n");
+}
+
+static ssize_t adt7310_show_fault_queue(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	int ret;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", chip->config & ADT7310_FAULT_QUEUE_MASK);
+}
+
+static ssize_t adt7310_set_fault_queue(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+	u8 config;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret || data > 3)
+		return -EINVAL;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & ~ADT7310_FAULT_QUEUE_MASK;
+	config |= data;
+	ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return len;
+}
+
+static inline ssize_t adt7310_show_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	int ret;
+
+	ret = adt7310_spi_read_word(chip, bound_reg, &data);
+	if (ret)
+		return -EIO;
+
+	return adt7310_convert_temperature(chip, data, buf);
+}
+
+static inline ssize_t adt7310_set_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	long tmp1, tmp2;
+	u16 data;
+	char *pos;
+	int ret;
+
+	pos = strchr(buf, '.');
+
+	ret = strict_strtol(buf, 10, &tmp1);
+
+	if (ret || tmp1 > 127 || tmp1 < -128)
+		return -EINVAL;
+
+	if (pos) {
+		len = strlen(pos);
+
+		if (chip->config & ADT7310_RESOLUTION) {
+			if (len > ADT7310_T16_VALUE_FLOAT_OFFSET)
+				len = ADT7310_T16_VALUE_FLOAT_OFFSET;
+			pos[len] = 0;
+			ret = strict_strtol(pos, 10, &tmp2);
+
+			if (!ret)
+				tmp2 = (tmp2 / 78125) * 78125;
+		} else {
+			if (len > ADT7310_T13_VALUE_FLOAT_OFFSET)
+				len = ADT7310_T13_VALUE_FLOAT_OFFSET;
+			pos[len] = 0;
+			ret = strict_strtol(pos, 10, &tmp2);
+
+			if (!ret)
+				tmp2 = (tmp2 / 625) * 625;
+		}
+	}
+
+	if (tmp1 < 0)
+		data = (u16)(-tmp1);
+	else
+		data = (u16)tmp1;
+
+	if (chip->config & ADT7310_RESOLUTION) {
+		data = (data << ADT7310_T16_VALUE_FLOAT_OFFSET) |
+			(tmp2 & ADT7310_T16_VALUE_FLOAT_MASK);
+
+		if (tmp1 < 0)
+			/* convert positive value to supplyment */
+			data = (u16)((ADT7310_T16_VALUE_SIGN << 1) - (u32)data);
+	} else {
+		data = (data << ADT7310_T13_VALUE_FLOAT_OFFSET) |
+			(tmp2 & ADT7310_T13_VALUE_FLOAT_MASK);
+
+		if (tmp1 < 0)
+			/* convert positive value to supplyment */
+			data = (ADT7310_T13_VALUE_SIGN << 1) - data;
+		data <<= ADT7310_T13_VALUE_OFFSET;
+	}
+
+	ret = adt7310_spi_write_word(chip, bound_reg, data);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t adt7310_show_t_alarm_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7310_show_t_bound(dev, attr,
+			ADT7310_T_ALARM_HIGH, buf);
+}
+
+static inline ssize_t adt7310_set_t_alarm_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7310_set_t_bound(dev, attr,
+			ADT7310_T_ALARM_HIGH, buf, len);
+}
+
+static ssize_t adt7310_show_t_alarm_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7310_show_t_bound(dev, attr,
+			ADT7310_T_ALARM_LOW, buf);
+}
+
+static inline ssize_t adt7310_set_t_alarm_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7310_set_t_bound(dev, attr,
+			ADT7310_T_ALARM_LOW, buf, len);
+}
+
+static ssize_t adt7310_show_t_crit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7310_show_t_bound(dev, attr,
+			ADT7310_T_CRIT, buf);
+}
+
+static inline ssize_t adt7310_set_t_crit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7310_set_t_bound(dev, attr,
+			ADT7310_T_CRIT, buf, len);
+}
+
+static ssize_t adt7310_show_t_hyst(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	int ret;
+	u8 t_hyst;
+
+	ret = adt7310_spi_read_byte(chip, ADT7310_T_HYST, &t_hyst);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", t_hyst & ADT7310_T_HYST_MASK);
+}
+
+static inline ssize_t adt7310_set_t_hyst(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7310_chip_info *chip = dev_info->dev_data;
+	int ret;
+	unsigned long data;
+	u8 t_hyst;
+
+	ret = strict_strtol(buf, 10, &data);
+
+	if (ret || data > ADT7310_T_HYST_MASK)
+		return -EINVAL;
+
+	t_hyst = (u8)data;
+
+	ret = adt7310_spi_write_byte(chip, ADT7310_T_HYST, t_hyst);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+IIO_EVENT_ATTR_SH(event_mode, iio_event_adt7310,
+		adt7310_show_event_mode, adt7310_set_event_mode, 0);
+IIO_EVENT_ATTR_SH(available_event_modes, iio_event_adt7310,
+		adt7310_show_available_event_modes, NULL, 0);
+IIO_EVENT_ATTR_SH(fault_queue, iio_event_adt7310,
+		adt7310_show_fault_queue, adt7310_set_fault_queue, 0);
+IIO_EVENT_ATTR_SH(t_alarm_high, iio_event_adt7310,
+		adt7310_show_t_alarm_high, adt7310_set_t_alarm_high, 0);
+IIO_EVENT_ATTR_SH(t_alarm_low, iio_event_adt7310,
+		adt7310_show_t_alarm_low, adt7310_set_t_alarm_low, 0);
+IIO_EVENT_ATTR_SH(t_crit, iio_event_adt7310_ct,
+		adt7310_show_t_crit, adt7310_set_t_crit, 0);
+IIO_EVENT_ATTR_SH(t_hyst, iio_event_adt7310,
+		adt7310_show_t_hyst, adt7310_set_t_hyst, 0);
+
+static struct attribute *adt7310_event_int_attributes[] = {
+	&iio_event_attr_event_mode.dev_attr.attr,
+	&iio_event_attr_available_event_modes.dev_attr.attr,
+	&iio_event_attr_fault_queue.dev_attr.attr,
+	&iio_event_attr_t_alarm_high.dev_attr.attr,
+	&iio_event_attr_t_alarm_low.dev_attr.attr,
+	&iio_event_attr_t_hyst.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute *adt7310_event_ct_attributes[] = {
+	&iio_event_attr_event_mode.dev_attr.attr,
+	&iio_event_attr_available_event_modes.dev_attr.attr,
+	&iio_event_attr_fault_queue.dev_attr.attr,
+	&iio_event_attr_t_crit.dev_attr.attr,
+	&iio_event_attr_t_hyst.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adt7310_event_attribute_group[ADT7310_IRQS] = {
+	{
+		.attrs = adt7310_event_int_attributes,
+	},
+	{
+		.attrs = adt7310_event_ct_attributes,
+	}
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7310_probe(struct spi_device *spi_dev)
+{
+	struct adt7310_chip_info *chip;
+	int ret = 0;
+	unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
+	unsigned long irq_flags;
+
+	chip = kzalloc(sizeof(struct adt7310_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	dev_set_drvdata(&spi_dev->dev, chip);
+
+	chip->spi_dev = spi_dev;
+	chip->name = spi_dev->modalias;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	chip->indio_dev->dev.parent = &spi_dev->dev;
+	chip->indio_dev->attrs = &adt7310_attribute_group;
+	chip->indio_dev->event_attrs = adt7310_event_attribute_group;
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = ADT7310_IRQS;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	/* CT critcal temperature event. line 0 */
+	if (spi_dev->irq) {
+		if (adt7310_platform_data[2])
+			irq_flags = adt7310_platform_data[2];
+		else
+			irq_flags = IRQF_TRIGGER_LOW;
+		ret = iio_register_interrupt_line(spi_dev->irq,
+				chip->indio_dev,
+				0,
+				irq_flags,
+				chip->name);
+		if (ret)
+			goto error_unreg_dev;
+
+		/*
+		 * The event handler list element refer to iio_event_adt7310.
+		 * All event attributes bind to the same event handler.
+		 * One event handler can only be added to one event list.
+		 */
+		iio_add_event_to_list(&iio_event_adt7310,
+				&chip->indio_dev->interrupts[0]->ev_list);
+	}
+
+	/* INT bound temperature alarm event. line 1 */
+	if (adt7310_platform_data[0]) {
+		ret = iio_register_interrupt_line(adt7310_platform_data[0],
+				chip->indio_dev,
+				1,
+				adt7310_platform_data[1],
+				chip->name);
+		if (ret)
+			goto error_unreg_ct_irq;
+
+		/*
+		 * The event handler list element refer to iio_event_adt7310.
+		 * All event attributes bind to the same event handler.
+		 * One event handler can only be added to one event list.
+		 */
+		iio_add_event_to_list(&iio_event_adt7310_ct,
+				&chip->indio_dev->interrupts[1]->ev_list);
+	}
+
+	if (spi_dev->irq && adt7310_platform_data[0]) {
+		INIT_WORK(&chip->thresh_work, adt7310_interrupt_bh);
+
+		ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
+		if (ret) {
+			ret = -EIO;
+			goto error_unreg_int_irq;
+		}
+
+		/* set irq polarity low level */
+		chip->config &= ~ADT7310_CT_POLARITY;
+
+		if (adt7310_platform_data[1] & IRQF_TRIGGER_HIGH)
+			chip->config |= ADT7310_INT_POLARITY;
+		else
+			chip->config &= ~ADT7310_INT_POLARITY;
+
+		ret = adt7310_spi_write_byte(chip, ADT7310_CONFIG, chip->config);
+		if (ret) {
+			ret = -EIO;
+			goto error_unreg_int_irq;
+		}
+	}
+
+	dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
+			chip->name);
+
+	return 0;
+
+error_unreg_int_irq:
+	iio_unregister_interrupt_line(chip->indio_dev, 1);
+error_unreg_ct_irq:
+	iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+	iio_device_unregister(chip->indio_dev);
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+
+static int __devexit adt7310_remove(struct spi_device *spi_dev)
+{
+	struct adt7310_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
+	struct iio_dev *indio_dev = chip->indio_dev;
+	unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
+
+	dev_set_drvdata(&spi_dev->dev, NULL);
+	if (adt7310_platform_data[0])
+		iio_unregister_interrupt_line(indio_dev, 1);
+	if (spi_dev->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct spi_device_id adt7310_id[] = {
+	{ "adt7310", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(spi, adt7310_id);
+
+static struct spi_driver adt7310_driver = {
+	.driver = {
+		.name = "adt7310",
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+	.probe = adt7310_probe,
+	.remove = __devexit_p(adt7310_remove),
+	.id_table = adt7310_id,
+};
+
+static __init int adt7310_init(void)
+{
+	return spi_register_driver(&adt7310_driver);
+}
+
+static __exit void adt7310_exit(void)
+{
+	spi_unregister_driver(&adt7310_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT7310 digital"
+			" temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7310_init);
+module_exit(adt7310_exit);
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
new file mode 100644
index 0000000..c345f27
--- /dev/null
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -0,0 +1,915 @@
+/*
+ * ADT7410 digital temperature sensor driver supporting ADT7410
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * ADT7410 registers definition
+ */
+
+#define ADT7410_TEMPERATURE		0
+#define ADT7410_STATUS			2
+#define ADT7410_CONFIG			3
+#define ADT7410_T_ALARM_HIGH		4
+#define ADT7410_T_ALARM_LOW		6
+#define ADT7410_T_CRIT			8
+#define ADT7410_T_HYST			0xA
+#define ADT7410_ID			0xB
+#define ADT7410_RESET			0x2F
+
+/*
+ * ADT7410 status
+ */
+#define ADT7410_STAT_T_LOW		0x10
+#define ADT7410_STAT_T_HIGH		0x20
+#define ADT7410_STAT_T_CRIT		0x40
+#define ADT7410_STAT_NOT_RDY		0x80
+
+/*
+ * ADT7410 config
+ */
+#define ADT7410_FAULT_QUEUE_MASK	0x3
+#define ADT7410_CT_POLARITY		0x4
+#define ADT7410_INT_POLARITY		0x8
+#define ADT7410_EVENT_MODE		0x10
+#define ADT7410_MODE_MASK		0x60
+#define ADT7410_ONESHOT			0x20
+#define ADT7410_SPS			0x40
+#define ADT7410_PD			0x60
+#define ADT7410_RESOLUTION		0x80
+
+/*
+ * ADT7410 masks
+ */
+#define ADT7410_T16_VALUE_SIGN			0x8000
+#define ADT7410_T16_VALUE_FLOAT_OFFSET		7
+#define ADT7410_T16_VALUE_FLOAT_MASK		0x7F
+#define ADT7410_T13_VALUE_SIGN			0x1000
+#define ADT7410_T13_VALUE_OFFSET		3
+#define ADT7410_T13_VALUE_FLOAT_OFFSET		4
+#define ADT7410_T13_VALUE_FLOAT_MASK		0xF
+#define ADT7410_T_HYST_MASK			0xF
+#define ADT7410_DEVICE_ID_MASK			0xF
+#define ADT7410_MANUFACTORY_ID_MASK		0xF0
+#define ADT7410_MANUFACTORY_ID_OFFSET		4
+
+#define ADT7410_IRQS				2
+
+/*
+ * struct adt7410_chip_info - chip specifc information
+ */
+
+struct adt7410_chip_info {
+	const char *name;
+	struct i2c_client *client;
+	struct iio_dev *indio_dev;
+	struct work_struct thresh_work;
+	s64 last_timestamp;
+	u8  config;
+};
+
+/*
+ * adt7410 register access by I2C
+ */
+
+static int adt7410_i2c_read_word(struct adt7410_chip_info *chip, u8 reg, u16 *data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret = i2c_smbus_read_word_data(client, reg);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read error\n");
+		return ret;
+	}
+
+	*data = swab16((u16)ret);
+
+	return 0;
+}
+
+static int adt7410_i2c_write_word(struct adt7410_chip_info *chip, u8 reg, u16 data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret = i2c_smbus_write_word_data(client, reg, swab16(data));
+	if (ret < 0)
+		dev_err(&client->dev, "I2C write error\n");
+
+	return ret;
+}
+
+static int adt7410_i2c_read_byte(struct adt7410_chip_info *chip, u8 reg, u8 *data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read error\n");
+		return ret;
+	}
+
+	*data = (u8)ret;
+
+	return 0;
+}
+
+static int adt7410_i2c_write_byte(struct adt7410_chip_info *chip, u8 reg, u8 data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret = i2c_smbus_write_byte_data(client, reg, data);
+	if (ret < 0)
+		dev_err(&client->dev, "I2C write error\n");
+
+	return ret;
+}
+
+static ssize_t adt7410_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	u8 config;
+
+	config = chip->config & ADT7410_MODE_MASK;
+
+	switch (config) {
+	case ADT7410_PD:
+		return sprintf(buf, "power-down\n");
+	case ADT7410_ONESHOT:
+		return sprintf(buf, "one-shot\n");
+	case ADT7410_SPS:
+		return sprintf(buf, "sps\n");
+	default:
+		return sprintf(buf, "full\n");
+	}
+}
+
+static ssize_t adt7410_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	u16 config;
+	int ret;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & (~ADT7410_MODE_MASK);
+	if (strcmp(buf, "power-down"))
+		config |= ADT7410_PD;
+	else if (strcmp(buf, "one-shot"))
+		config |= ADT7410_ONESHOT;
+	else if (strcmp(buf, "sps"))
+		config |= ADT7410_SPS;
+
+	ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		adt7410_show_mode,
+		adt7410_store_mode,
+		0);
+
+static ssize_t adt7410_show_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "full\none-shot\nsps\npower-down\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt7410_show_available_modes, NULL, 0);
+
+static ssize_t adt7410_show_resolution(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	int ret;
+	int bits;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	if (chip->config & ADT7410_RESOLUTION)
+		bits = 16;
+	else
+		bits = 13;
+
+	return sprintf(buf, "%d bits\n", bits);
+}
+
+static ssize_t adt7410_store_resolution(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	u16 config;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & (~ADT7410_RESOLUTION);
+	if (data)
+		config |= ADT7410_RESOLUTION;
+
+	ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(resolution, S_IRUGO | S_IWUSR,
+		adt7410_show_resolution,
+		adt7410_store_resolution,
+		0);
+
+static ssize_t adt7410_show_id(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	u8 id;
+	int ret;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_ID, &id);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "device id: 0x%x\nmanufactory id: 0x%x\n",
+			id & ADT7410_DEVICE_ID_MASK,
+			(id & ADT7410_MANUFACTORY_ID_MASK) >> ADT7410_MANUFACTORY_ID_OFFSET);
+}
+
+static IIO_DEVICE_ATTR(id, S_IRUGO | S_IWUSR,
+		adt7410_show_id,
+		NULL,
+		0);
+
+static ssize_t adt7410_convert_temperature(struct adt7410_chip_info *chip,
+		u16 data, char *buf)
+{
+	char sign = ' ';
+
+	if (chip->config & ADT7410_RESOLUTION) {
+		if (data & ADT7410_T16_VALUE_SIGN) {
+			/* convert supplement to positive value */
+			data = (u16)((ADT7410_T16_VALUE_SIGN << 1) - (u32)data);
+			sign = '-';
+		}
+		return sprintf(buf, "%c%d.%.7d\n", sign,
+				(data >> ADT7410_T16_VALUE_FLOAT_OFFSET),
+				(data & ADT7410_T16_VALUE_FLOAT_MASK) * 78125);
+	} else {
+		if (data & ADT7410_T13_VALUE_SIGN) {
+			/* convert supplement to positive value */
+			data >>= ADT7410_T13_VALUE_OFFSET;
+			data = (ADT7410_T13_VALUE_SIGN << 1) - data;
+			sign = '-';
+		}
+		return sprintf(buf, "%c%d.%.4d\n", sign,
+				(data >> ADT7410_T13_VALUE_FLOAT_OFFSET),
+				(data & ADT7410_T13_VALUE_FLOAT_MASK) * 625);
+	}
+}
+
+static ssize_t adt7410_show_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	u8 status;
+	u16 data;
+	int ret, i = 0;
+
+	do {
+		ret = adt7410_i2c_read_byte(chip, ADT7410_STATUS, &status);
+		if (ret)
+			return -EIO;
+		i++;
+		if (i == 10000)
+			return -EIO;
+	} while (status & ADT7410_STAT_NOT_RDY);
+
+	ret = adt7410_i2c_read_word(chip, ADT7410_TEMPERATURE, &data);
+	if (ret)
+		return -EIO;
+
+	return adt7410_convert_temperature(chip, data, buf);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, adt7410_show_value, NULL, 0);
+
+static ssize_t adt7410_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt7410_show_name, NULL, 0);
+
+static struct attribute *adt7410_attributes[] = {
+	&iio_dev_attr_available_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_resolution.dev_attr.attr,
+	&iio_dev_attr_id.dev_attr.attr,
+	&iio_dev_attr_value.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group adt7410_attribute_group = {
+	.attrs = adt7410_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT7410_ABOVE_ALARM    IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_ADT7410_BELLOW_ALARM   IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_ADT7410_ABOVE_CRIT     IIO_BUFFER_EVENT_CODE(2)
+
+static void adt7410_interrupt_bh(struct work_struct *work_s)
+{
+	struct adt7410_chip_info *chip =
+		container_of(work_s, struct adt7410_chip_info, thresh_work);
+	u8 status;
+
+	if (adt7410_i2c_read_byte(chip, ADT7410_STATUS, &status))
+		return;
+
+	enable_irq(chip->client->irq);
+
+	if (status & ADT7410_STAT_T_HIGH)
+		iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_ADT7410_ABOVE_ALARM,
+			chip->last_timestamp);
+	if (status & ADT7410_STAT_T_LOW)
+		iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_ADT7410_BELLOW_ALARM,
+			chip->last_timestamp);
+	if (status & ADT7410_STAT_T_CRIT)
+		iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_ADT7410_ABOVE_CRIT,
+			chip->last_timestamp);
+}
+
+static int adt7410_interrupt(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(adt7410, &adt7410_interrupt);
+IIO_EVENT_SH(adt7410_ct, &adt7410_interrupt);
+
+static ssize_t adt7410_show_event_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	int ret;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	if (chip->config & ADT7410_EVENT_MODE)
+		return sprintf(buf, "interrupt\n");
+	else
+		return sprintf(buf, "comparator\n");
+}
+
+static ssize_t adt7410_set_event_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	u16 config;
+	int ret;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config &= ~ADT7410_EVENT_MODE;
+	if (strcmp(buf, "comparator") != 0)
+		config |= ADT7410_EVENT_MODE;
+
+	ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static ssize_t adt7410_show_available_event_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "comparator\ninterrupt\n");
+}
+
+static ssize_t adt7410_show_fault_queue(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	int ret;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", chip->config & ADT7410_FAULT_QUEUE_MASK);
+}
+
+static ssize_t adt7410_set_fault_queue(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+	u8 config;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret || data > 3)
+		return -EINVAL;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & ~ADT7410_FAULT_QUEUE_MASK;
+	config |= data;
+	ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static inline ssize_t adt7410_show_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	int ret;
+
+	ret = adt7410_i2c_read_word(chip, bound_reg, &data);
+	if (ret)
+		return -EIO;
+
+	return adt7410_convert_temperature(chip, data, buf);
+}
+
+static inline ssize_t adt7410_set_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	long tmp1, tmp2;
+	u16 data;
+	char *pos;
+	int ret;
+
+	pos = strchr(buf, '.');
+
+	ret = strict_strtol(buf, 10, &tmp1);
+
+	if (ret || tmp1 > 127 || tmp1 < -128)
+		return -EINVAL;
+
+	if (pos) {
+		len = strlen(pos);
+
+		if (chip->config & ADT7410_RESOLUTION) {
+			if (len > ADT7410_T16_VALUE_FLOAT_OFFSET)
+				len = ADT7410_T16_VALUE_FLOAT_OFFSET;
+			pos[len] = 0;
+			ret = strict_strtol(pos, 10, &tmp2);
+
+			if (!ret)
+				tmp2 = (tmp2 / 78125) * 78125;
+		} else {
+			if (len > ADT7410_T13_VALUE_FLOAT_OFFSET)
+				len = ADT7410_T13_VALUE_FLOAT_OFFSET;
+			pos[len] = 0;
+			ret = strict_strtol(pos, 10, &tmp2);
+
+			if (!ret)
+				tmp2 = (tmp2 / 625) * 625;
+		}
+	}
+
+	if (tmp1 < 0)
+		data = (u16)(-tmp1);
+	else
+		data = (u16)tmp1;
+
+	if (chip->config & ADT7410_RESOLUTION) {
+		data = (data << ADT7410_T16_VALUE_FLOAT_OFFSET) |
+			(tmp2 & ADT7410_T16_VALUE_FLOAT_MASK);
+
+		if (tmp1 < 0)
+			/* convert positive value to supplyment */
+			data = (u16)((ADT7410_T16_VALUE_SIGN << 1) - (u32)data);
+	} else {
+		data = (data << ADT7410_T13_VALUE_FLOAT_OFFSET) |
+			(tmp2 & ADT7410_T13_VALUE_FLOAT_MASK);
+
+		if (tmp1 < 0)
+			/* convert positive value to supplyment */
+			data = (ADT7410_T13_VALUE_SIGN << 1) - data;
+		data <<= ADT7410_T13_VALUE_OFFSET;
+	}
+
+	ret = adt7410_i2c_write_word(chip, bound_reg, data);
+	if (ret)
+		return -EIO;
+
+	return ret;
+}
+
+static ssize_t adt7410_show_t_alarm_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7410_show_t_bound(dev, attr,
+			ADT7410_T_ALARM_HIGH, buf);
+}
+
+static inline ssize_t adt7410_set_t_alarm_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7410_set_t_bound(dev, attr,
+			ADT7410_T_ALARM_HIGH, buf, len);
+}
+
+static ssize_t adt7410_show_t_alarm_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7410_show_t_bound(dev, attr,
+			ADT7410_T_ALARM_LOW, buf);
+}
+
+static inline ssize_t adt7410_set_t_alarm_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7410_set_t_bound(dev, attr,
+			ADT7410_T_ALARM_LOW, buf, len);
+}
+
+static ssize_t adt7410_show_t_crit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7410_show_t_bound(dev, attr,
+			ADT7410_T_CRIT, buf);
+}
+
+static inline ssize_t adt7410_set_t_crit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7410_set_t_bound(dev, attr,
+			ADT7410_T_CRIT, buf, len);
+}
+
+static ssize_t adt7410_show_t_hyst(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	int ret;
+	u8 t_hyst;
+
+	ret = adt7410_i2c_read_byte(chip, ADT7410_T_HYST, &t_hyst);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", t_hyst & ADT7410_T_HYST_MASK);
+}
+
+static inline ssize_t adt7410_set_t_hyst(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7410_chip_info *chip = dev_info->dev_data;
+	int ret;
+	unsigned long data;
+	u8 t_hyst;
+
+	ret = strict_strtol(buf, 10, &data);
+
+	if (ret || data > ADT7410_T_HYST_MASK)
+		return -EINVAL;
+
+	t_hyst = (u8)data;
+
+	ret = adt7410_i2c_write_byte(chip, ADT7410_T_HYST, t_hyst);
+	if (ret)
+		return -EIO;
+
+	return ret;
+}
+
+IIO_EVENT_ATTR_SH(event_mode, iio_event_adt7410,
+		adt7410_show_event_mode, adt7410_set_event_mode, 0);
+IIO_EVENT_ATTR_SH(available_event_modes, iio_event_adt7410,
+		adt7410_show_available_event_modes, NULL, 0);
+IIO_EVENT_ATTR_SH(fault_queue, iio_event_adt7410,
+		adt7410_show_fault_queue, adt7410_set_fault_queue, 0);
+IIO_EVENT_ATTR_SH(t_alarm_high, iio_event_adt7410,
+		adt7410_show_t_alarm_high, adt7410_set_t_alarm_high, 0);
+IIO_EVENT_ATTR_SH(t_alarm_low, iio_event_adt7410,
+		adt7410_show_t_alarm_low, adt7410_set_t_alarm_low, 0);
+IIO_EVENT_ATTR_SH(t_crit, iio_event_adt7410_ct,
+		adt7410_show_t_crit, adt7410_set_t_crit, 0);
+IIO_EVENT_ATTR_SH(t_hyst, iio_event_adt7410,
+		adt7410_show_t_hyst, adt7410_set_t_hyst, 0);
+
+static struct attribute *adt7410_event_int_attributes[] = {
+	&iio_event_attr_event_mode.dev_attr.attr,
+	&iio_event_attr_available_event_modes.dev_attr.attr,
+	&iio_event_attr_fault_queue.dev_attr.attr,
+	&iio_event_attr_t_alarm_high.dev_attr.attr,
+	&iio_event_attr_t_alarm_low.dev_attr.attr,
+	&iio_event_attr_t_hyst.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute *adt7410_event_ct_attributes[] = {
+	&iio_event_attr_event_mode.dev_attr.attr,
+	&iio_event_attr_available_event_modes.dev_attr.attr,
+	&iio_event_attr_fault_queue.dev_attr.attr,
+	&iio_event_attr_t_crit.dev_attr.attr,
+	&iio_event_attr_t_hyst.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adt7410_event_attribute_group[ADT7410_IRQS] = {
+	{
+		.attrs = adt7410_event_int_attributes,
+	},
+	{
+		.attrs = adt7410_event_ct_attributes,
+	}
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7410_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct adt7410_chip_info *chip;
+	int ret = 0;
+	unsigned long *adt7410_platform_data = client->dev.platform_data;
+
+	chip = kzalloc(sizeof(struct adt7410_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	i2c_set_clientdata(client, chip);
+
+	chip->client = client;
+	chip->name = id->name;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	chip->indio_dev->dev.parent = &client->dev;
+	chip->indio_dev->attrs = &adt7410_attribute_group;
+	chip->indio_dev->event_attrs = adt7410_event_attribute_group;
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = ADT7410_IRQS;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	/* CT critcal temperature event. line 0 */
+	if (client->irq) {
+		ret = iio_register_interrupt_line(client->irq,
+				chip->indio_dev,
+				0,
+				IRQF_TRIGGER_LOW,
+				chip->name);
+		if (ret)
+			goto error_unreg_dev;
+
+		/*
+		 * The event handler list element refer to iio_event_adt7410.
+		 * All event attributes bind to the same event handler.
+		 * One event handler can only be added to one event list.
+		 */
+		iio_add_event_to_list(&iio_event_adt7410,
+				&chip->indio_dev->interrupts[0]->ev_list);
+	}
+
+	/* INT bound temperature alarm event. line 1 */
+	if (adt7410_platform_data[0]) {
+		ret = iio_register_interrupt_line(adt7410_platform_data[0],
+				chip->indio_dev,
+				1,
+				adt7410_platform_data[1],
+				chip->name);
+		if (ret)
+			goto error_unreg_ct_irq;
+
+		/*
+		 * The event handler list element refer to iio_event_adt7410.
+		 * All event attributes bind to the same event handler.
+		 * One event handler can only be added to one event list.
+		 */
+		iio_add_event_to_list(&iio_event_adt7410_ct,
+				&chip->indio_dev->interrupts[1]->ev_list);
+	}
+
+	if (client->irq && adt7410_platform_data[0]) {
+		INIT_WORK(&chip->thresh_work, adt7410_interrupt_bh);
+
+		ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
+		if (ret) {
+			ret = -EIO;
+			goto error_unreg_int_irq;
+		}
+
+		/* set irq polarity low level */
+		chip->config &= ~ADT7410_CT_POLARITY;
+
+		if (adt7410_platform_data[1] & IRQF_TRIGGER_HIGH)
+			chip->config |= ADT7410_INT_POLARITY;
+		else
+			chip->config &= ~ADT7410_INT_POLARITY;
+
+		ret = adt7410_i2c_write_byte(chip, ADT7410_CONFIG, chip->config);
+		if (ret) {
+			ret = -EIO;
+			goto error_unreg_int_irq;
+		}
+	}
+
+	dev_info(&client->dev, "%s temperature sensor registered.\n",
+			 id->name);
+
+	return 0;
+
+error_unreg_int_irq:
+	iio_unregister_interrupt_line(chip->indio_dev, 1);
+error_unreg_ct_irq:
+	iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+	iio_device_unregister(chip->indio_dev);
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+
+static int __devexit adt7410_remove(struct i2c_client *client)
+{
+	struct adt7410_chip_info *chip = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = chip->indio_dev;
+	unsigned long *adt7410_platform_data = client->dev.platform_data;
+
+	if (adt7410_platform_data[0])
+		iio_unregister_interrupt_line(indio_dev, 1);
+	if (client->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct i2c_device_id adt7410_id[] = {
+	{ "adt7410", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, adt7410_id);
+
+static struct i2c_driver adt7410_driver = {
+	.driver = {
+		.name = "adt7410",
+	},
+	.probe = adt7410_probe,
+	.remove = __devexit_p(adt7410_remove),
+	.id_table = adt7410_id,
+};
+
+static __init int adt7410_init(void)
+{
+	return i2c_add_driver(&adt7410_driver);
+}
+
+static __exit void adt7410_exit(void)
+{
+	i2c_del_driver(&adt7410_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT7410 digital"
+			" temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7410_init);
+module_exit(adt7410_exit);
diff --git a/drivers/staging/iio/adc/adt75.c b/drivers/staging/iio/adc/adt75.c
new file mode 100644
index 0000000..aff4d31
--- /dev/null
+++ b/drivers/staging/iio/adc/adt75.c
@@ -0,0 +1,732 @@
+/*
+ * ADT75 digital temperature sensor driver supporting ADT75
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+/*
+ * ADT75 registers definition
+ */
+
+#define ADT75_TEMPERATURE		0
+#define ADT75_CONFIG			1
+#define ADT75_T_HYST			2
+#define ADT75_T_OS			3
+#define ADT75_ONESHOT			4
+
+/*
+ * ADT75 config
+ */
+#define ADT75_PD			0x1
+#define ADT75_OS_INT			0x2
+#define ADT75_OS_POLARITY		0x4
+#define ADT75_FAULT_QUEUE_MASK		0x18
+#define ADT75_FAULT_QUEUE_OFFSET	3
+#define ADT75_SMBUS_ALART		0x8
+
+/*
+ * ADT75 masks
+ */
+#define ADT75_VALUE_SIGN		0x800
+#define ADT75_VALUE_OFFSET		4
+#define ADT75_VALUE_FLOAT_OFFSET	4
+#define ADT75_VALUE_FLOAT_MASK		0xF
+
+
+/*
+ * struct adt75_chip_info - chip specifc information
+ */
+
+struct adt75_chip_info {
+	const char *name;
+	struct i2c_client *client;
+	struct iio_dev *indio_dev;
+	struct work_struct thresh_work;
+	s64 last_timestamp;
+	u8  config;
+};
+
+/*
+ * adt75 register access by I2C
+ */
+
+static int adt75_i2c_read(struct adt75_chip_info *chip, u8 reg, u8 *data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0, len;
+
+	ret = i2c_smbus_write_byte(client, reg);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read register address error\n");
+		return ret;
+	}
+
+	if (reg == ADT75_CONFIG || reg == ADT75_ONESHOT)
+		len = 1;
+	else
+		len = 2;
+
+	ret = i2c_master_recv(client, data, len);
+	if (ret < 0) {
+		dev_err(&client->dev, "I2C read error\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int adt75_i2c_write(struct adt75_chip_info *chip, u8 reg, u8 data)
+{
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	if (reg == ADT75_CONFIG || reg == ADT75_ONESHOT)
+		ret = i2c_smbus_write_byte_data(client, reg, data);
+	else
+		ret = i2c_smbus_write_word_data(client, reg, data);
+
+	if (ret < 0)
+		dev_err(&client->dev, "I2C write error\n");
+
+	return ret;
+}
+
+static ssize_t adt75_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+
+	if (chip->config & ADT75_PD)
+		return sprintf(buf, "power-save\n");
+	else
+		return sprintf(buf, "full\n");
+}
+
+static ssize_t adt75_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	int ret;
+	u8 config;
+
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & ~ADT75_PD;
+	if (!strcmp(buf, "full"))
+		config |= ADT75_PD;
+
+	ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		adt75_show_mode,
+		adt75_store_mode,
+		0);
+
+static ssize_t adt75_show_available_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "full\npower-down\n");
+}
+
+static IIO_DEVICE_ATTR(available_modes, S_IRUGO, adt75_show_available_modes, NULL, 0);
+
+static ssize_t adt75_show_oneshot(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->config & ADT75_ONESHOT));
+}
+
+static ssize_t adt75_store_oneshot(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	unsigned long data = 0;
+	int ret;
+	u8 config;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+
+
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & ~ADT75_ONESHOT;
+	if (data)
+		config |= ADT75_ONESHOT;
+
+	ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static IIO_DEVICE_ATTR(oneshot, S_IRUGO | S_IWUSR,
+		adt75_show_oneshot,
+		adt75_store_oneshot,
+		0);
+
+static ssize_t adt75_show_value(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	char sign = ' ';
+	int ret;
+
+	if (chip->config & ADT75_PD) {
+		dev_err(dev, "Can't read value in power-down mode.\n");
+		return -EIO;
+	}
+
+	if (chip->config & ADT75_ONESHOT) {
+		/* write to active converter */
+		ret = i2c_smbus_write_byte(chip->client, ADT75_ONESHOT);
+		if (ret)
+			return -EIO;
+	}
+
+	ret = adt75_i2c_read(chip, ADT75_TEMPERATURE, (u8 *)&data);
+	if (ret)
+		return -EIO;
+
+	data = swab16(data) >> ADT75_VALUE_OFFSET;
+	if (data & ADT75_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data = (ADT75_VALUE_SIGN << 1) - data;
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.4d\n", sign,
+		(data >> ADT75_VALUE_FLOAT_OFFSET),
+		(data & ADT75_VALUE_FLOAT_MASK) * 625);
+}
+
+static IIO_DEVICE_ATTR(value, S_IRUGO, adt75_show_value, NULL, 0);
+
+static ssize_t adt75_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt75_show_name, NULL, 0);
+
+static struct attribute *adt75_attributes[] = {
+	&iio_dev_attr_available_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_oneshot.dev_attr.attr,
+	&iio_dev_attr_value.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group adt75_attribute_group = {
+	.attrs = adt75_attributes,
+};
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT75_OTI    IIO_BUFFER_EVENT_CODE(0)
+
+static void adt75_interrupt_bh(struct work_struct *work_s)
+{
+	struct adt75_chip_info *chip =
+		container_of(work_s, struct adt75_chip_info, thresh_work);
+
+	enable_irq(chip->client->irq);
+
+	iio_push_event(chip->indio_dev, 0,
+			IIO_EVENT_CODE_ADT75_OTI,
+			chip->last_timestamp);
+}
+
+static int adt75_interrupt(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct adt75_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(adt75, &adt75_interrupt);
+
+static ssize_t adt75_show_oti_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	int ret;
+
+	/* retrive ALART status */
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	if (chip->config & ADT75_OS_INT)
+		return sprintf(buf, "interrupt\n");
+	else
+		return sprintf(buf, "comparator\n");
+}
+
+static ssize_t adt75_set_oti_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	int ret;
+	u8 config;
+
+	/* retrive ALART status */
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & ~ADT75_OS_INT;
+	if (strcmp(buf, "comparator") != 0)
+		config |= ADT75_OS_INT;
+
+	ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static ssize_t adt75_show_available_oti_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "comparator\ninterrupt\n");
+}
+
+static ssize_t adt75_show_smbus_alart(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	int ret;
+
+	/* retrive ALART status */
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", !!(chip->config & ADT75_SMBUS_ALART));
+}
+
+static ssize_t adt75_set_smbus_alart(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	unsigned long data = 0;
+	int ret;
+	u8 config;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+
+	/* retrive ALART status */
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & ~ADT75_SMBUS_ALART;
+	if (data)
+		config |= ADT75_SMBUS_ALART;
+
+	ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+
+static ssize_t adt75_show_fault_queue(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	int ret;
+
+	/* retrive ALART status */
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", (chip->config & ADT75_FAULT_QUEUE_MASK) >>
+				ADT75_FAULT_QUEUE_OFFSET);
+}
+
+static ssize_t adt75_set_fault_queue(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+	u8 config;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret || data > 3)
+		return -EINVAL;
+
+	/* retrive ALART status */
+	ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+	if (ret)
+		return -EIO;
+
+	config = chip->config & ~ADT75_FAULT_QUEUE_MASK;
+	config |= (data << ADT75_FAULT_QUEUE_OFFSET);
+	ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+	if (ret)
+		return -EIO;
+
+	chip->config = config;
+
+	return ret;
+}
+static inline ssize_t adt75_show_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	u16 data;
+	char sign = ' ';
+	int ret;
+
+	ret = adt75_i2c_read(chip, bound_reg, (u8 *)&data);
+	if (ret)
+		return -EIO;
+
+	data = swab16(data) >> ADT75_VALUE_OFFSET;
+	if (data & ADT75_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data = (ADT75_VALUE_SIGN << 1) - data;
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.4d\n", sign,
+		(data >> ADT75_VALUE_FLOAT_OFFSET),
+		(data & ADT75_VALUE_FLOAT_MASK) * 625);
+}
+
+static inline ssize_t adt75_set_t_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt75_chip_info *chip = dev_info->dev_data;
+	long tmp1, tmp2;
+	u16 data;
+	char *pos;
+	int ret;
+
+	pos = strchr(buf, '.');
+
+	ret = strict_strtol(buf, 10, &tmp1);
+
+	if (ret || tmp1 > 127 || tmp1 < -128)
+		return -EINVAL;
+
+	if (pos) {
+		len = strlen(pos);
+		if (len > ADT75_VALUE_FLOAT_OFFSET)
+			len = ADT75_VALUE_FLOAT_OFFSET;
+		pos[len] = 0;
+		ret = strict_strtol(pos, 10, &tmp2);
+
+		if (!ret)
+			tmp2 = (tmp2 / 625) * 625;
+	}
+
+	if (tmp1 < 0)
+		data = (u16)(-tmp1);
+	else
+		data = (u16)tmp1;
+	data = (data << ADT75_VALUE_FLOAT_OFFSET) | (tmp2 & ADT75_VALUE_FLOAT_MASK);
+	if (tmp1 < 0)
+		/* convert positive value to supplyment */
+		data = (ADT75_VALUE_SIGN << 1) - data;
+	data <<= ADT75_VALUE_OFFSET;
+	data = swab16(data);
+
+	ret = adt75_i2c_write(chip, bound_reg, (u8)data);
+	if (ret)
+		return -EIO;
+
+	return ret;
+}
+
+static ssize_t adt75_show_t_os(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt75_show_t_bound(dev, attr,
+			ADT75_T_OS, buf);
+}
+
+static inline ssize_t adt75_set_t_os(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt75_set_t_bound(dev, attr,
+			ADT75_T_OS, buf, len);
+}
+
+static ssize_t adt75_show_t_hyst(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt75_show_t_bound(dev, attr,
+			ADT75_T_HYST, buf);
+}
+
+static inline ssize_t adt75_set_t_hyst(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt75_set_t_bound(dev, attr,
+			ADT75_T_HYST, buf, len);
+}
+
+IIO_EVENT_ATTR_SH(oti_mode, iio_event_adt75,
+		adt75_show_oti_mode, adt75_set_oti_mode, 0);
+IIO_EVENT_ATTR_SH(available_oti_modes, iio_event_adt75,
+		adt75_show_available_oti_modes, NULL, 0);
+IIO_EVENT_ATTR_SH(smbus_alart, iio_event_adt75,
+		adt75_show_smbus_alart, adt75_set_smbus_alart, 0);
+IIO_EVENT_ATTR_SH(fault_queue, iio_event_adt75,
+		adt75_show_fault_queue, adt75_set_fault_queue, 0);
+IIO_EVENT_ATTR_SH(t_os, iio_event_adt75,
+		adt75_show_t_os, adt75_set_t_os, 0);
+IIO_EVENT_ATTR_SH(t_hyst, iio_event_adt75,
+		adt75_show_t_hyst, adt75_set_t_hyst, 0);
+
+static struct attribute *adt75_event_attributes[] = {
+	&iio_event_attr_oti_mode.dev_attr.attr,
+	&iio_event_attr_available_oti_modes.dev_attr.attr,
+	&iio_event_attr_smbus_alart.dev_attr.attr,
+	&iio_event_attr_fault_queue.dev_attr.attr,
+	&iio_event_attr_t_os.dev_attr.attr,
+	&iio_event_attr_t_hyst.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adt75_event_attribute_group = {
+	.attrs = adt75_event_attributes,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt75_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct adt75_chip_info *chip;
+	int ret = 0;
+
+	chip = kzalloc(sizeof(struct adt75_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	i2c_set_clientdata(client, chip);
+
+	chip->client = client;
+	chip->name = id->name;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	chip->indio_dev->dev.parent = &client->dev;
+	chip->indio_dev->attrs = &adt75_attribute_group;
+	chip->indio_dev->event_attrs = &adt75_event_attribute_group;
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = 1;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	if (client->irq > 0) {
+		ret = iio_register_interrupt_line(client->irq,
+				chip->indio_dev,
+				0,
+				IRQF_TRIGGER_LOW,
+				chip->name);
+		if (ret)
+			goto error_unreg_dev;
+
+		/*
+		 * The event handler list element refer to iio_event_adt75.
+		 * All event attributes bind to the same event handler.
+		 * So, only register event handler once.
+		 */
+		iio_add_event_to_list(&iio_event_adt75,
+				&chip->indio_dev->interrupts[0]->ev_list);
+
+		INIT_WORK(&chip->thresh_work, adt75_interrupt_bh);
+
+		ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+		if (ret) {
+			ret = -EIO;
+			goto error_unreg_irq;
+		}
+
+		/* set irq polarity low level */
+		chip->config &= ~ADT75_OS_POLARITY;
+
+		ret = adt75_i2c_write(chip, ADT75_CONFIG, chip->config);
+		if (ret) {
+			ret = -EIO;
+			goto error_unreg_irq;
+		}
+	}
+
+	dev_info(&client->dev, "%s temperature sensor registered.\n",
+			 id->name);
+
+	return 0;
+error_unreg_irq:
+	iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+	iio_device_unregister(chip->indio_dev);
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+
+static int __devexit adt75_remove(struct i2c_client *client)
+{
+	struct adt75_chip_info *chip = i2c_get_clientdata(client);
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	if (client->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+
+static const struct i2c_device_id adt75_id[] = {
+	{ "adt75", 0 },
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, adt75_id);
+
+static struct i2c_driver adt75_driver = {
+	.driver = {
+		.name = "adt75",
+	},
+	.probe = adt75_probe,
+	.remove = __devexit_p(adt75_remove),
+	.id_table = adt75_id,
+};
+
+static __init int adt75_init(void)
+{
+	return i2c_add_driver(&adt75_driver);
+}
+
+static __exit void adt75_exit(void)
+{
+	i2c_del_driver(&adt75_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT75 digital"
+			" temperature sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt75_init);
+module_exit(adt75_exit);
diff --git a/drivers/staging/iio/addac/Kconfig b/drivers/staging/iio/addac/Kconfig
new file mode 100644
index 0000000..9847baf
--- /dev/null
+++ b/drivers/staging/iio/addac/Kconfig
@@ -0,0 +1,25 @@
+#
+# ADDAC drivers
+#
+comment "Analog digital bi-direction convertors"
+
+config ADT7316
+	tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver"
+	help
+	  Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318
+	  and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC.
+
+config ADT7316_SPI
+	tristate "support SPI bus connection"
+	depends on SPI && ADT7316
+	default y
+	help
+	  Say yes here to build SPI bus support for Analog Devices ADT7316/7/8
+	  and ADT7516/7/9.
+
+config ADT7316_I2C
+	tristate "support I2C bus connection"
+	depends on I2C && ADT7316
+	help
+	  Say yes here to build I2C bus support for Analog Devices ADT7316/7/8
+	  and ADT7516/7/9.
diff --git a/drivers/staging/iio/addac/Makefile b/drivers/staging/iio/addac/Makefile
new file mode 100644
index 0000000..4c76861
--- /dev/null
+++ b/drivers/staging/iio/addac/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for industrial I/O ADDAC drivers
+#
+
+obj-$(CONFIG_ADT7316) += adt7316.o
+obj-$(CONFIG_ADT7316_SPI) += adt7316-spi.o
+obj-$(CONFIG_ADT7316_I2C) += adt7316-i2c.o
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
new file mode 100644
index 0000000..52d1ea3
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -0,0 +1,170 @@
+/*
+ * I2C bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
+ * sensor, ADC and DAC
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include "adt7316.h"
+
+/*
+ * adt7316 register access by I2C
+ */
+static int adt7316_i2c_read(void *client, u8 reg, u8 *data)
+{
+	struct i2c_client *cl = client;
+	int ret = 0;
+
+	ret = i2c_smbus_write_byte(cl, reg);
+	if (ret < 0) {
+		dev_err(&cl->dev, "I2C fail to select reg\n");
+		return ret;
+	}
+
+	ret = i2c_smbus_read_byte(client);
+	if (ret < 0) {
+		dev_err(&cl->dev, "I2C read error\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int adt7316_i2c_write(void *client, u8 reg, u8 data)
+{
+	struct i2c_client *cl = client;
+	int ret = 0;
+
+	ret = i2c_smbus_write_byte_data(cl, reg, data);
+	if (ret < 0)
+		dev_err(&cl->dev, "I2C write error\n");
+
+	return ret;
+}
+
+static int adt7316_i2c_multi_read(void *client, u8 reg, u8 count, u8 *data)
+{
+	struct i2c_client *cl = client;
+	int i, ret = 0;
+
+	if (count > ADT7316_REG_MAX_ADDR)
+		count = ADT7316_REG_MAX_ADDR;
+
+	for (i = 0; i < count; i++) {
+		ret = adt7316_i2c_read(cl, reg, &data[i]);
+		if (ret < 0) {
+			dev_err(&cl->dev, "I2C multi read error\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int adt7316_i2c_multi_write(void *client, u8 reg, u8 count, u8 *data)
+{
+	struct i2c_client *cl = client;
+	int i, ret = 0;
+
+	if (count > ADT7316_REG_MAX_ADDR)
+		count = ADT7316_REG_MAX_ADDR;
+
+	for (i = 0; i < count; i++) {
+		ret = adt7316_i2c_write(cl, reg, data[i]);
+		if (ret < 0) {
+			dev_err(&cl->dev, "I2C multi write error\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7316_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct adt7316_bus bus = {
+		.client = client,
+		.irq = client->irq,
+		.irq_flags = IRQF_TRIGGER_LOW,
+		.read = adt7316_i2c_read,
+		.write = adt7316_i2c_write,
+		.multi_read = adt7316_i2c_multi_read,
+		.multi_write = adt7316_i2c_multi_write,
+	};
+
+	return adt7316_probe(&client->dev, &bus, id->name);
+}
+
+static int __devexit adt7316_i2c_remove(struct i2c_client *client)
+{
+	return adt7316_remove(&client->dev);;
+}
+
+static const struct i2c_device_id adt7316_i2c_id[] = {
+	{ "adt7316", 0 },
+	{ "adt7317", 0 },
+	{ "adt7318", 0 },
+	{ "adt7516", 0 },
+	{ "adt7517", 0 },
+	{ "adt7519", 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, adt7316_i2c_id);
+
+#ifdef CONFIG_PM
+static int adt7316_i2c_suspend(struct i2c_client *client, pm_message_t message)
+{
+	return adt7316_disable(&client->dev);
+}
+
+static int adt7316_i2c_resume(struct i2c_client *client)
+{
+	return adt7316_enable(&client->dev);
+}
+#else
+# define adt7316_i2c_suspend NULL
+# define adt7316_i2c_resume  NULL
+#endif
+
+static struct i2c_driver adt7316_driver = {
+	.driver = {
+		.name = "adt7316",
+		.owner  = THIS_MODULE,
+	},
+	.probe = adt7316_i2c_probe,
+	.remove = __devexit_p(adt7316_i2c_remove),
+	.suspend = adt7316_i2c_suspend,
+	.resume = adt7316_i2c_resume,
+	.id_table = adt7316_i2c_id,
+};
+
+static __init int adt7316_i2c_init(void)
+{
+	return i2c_add_driver(&adt7316_driver);
+}
+
+static __exit void adt7316_i2c_exit(void)
+{
+	i2c_del_driver(&adt7316_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("I2C bus driver for Analog Devices ADT7316/7/9 and"
+			"ADT7516/7/8 digital temperature sensor, ADC and DAC");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7316_i2c_init);
+module_exit(adt7316_i2c_exit);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
new file mode 100644
index 0000000..369d4d0
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -0,0 +1,180 @@
+/*
+ * API bus driver for ADT7316/7/8 ADT7516/7/9 digital temperature
+ * sensor, ADC and DAC
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+
+#include "adt7316.h"
+
+#define ADT7316_SPI_MAX_FREQ_HZ		5000000
+#define ADT7316_SPI_CMD_READ		0x91
+#define ADT7316_SPI_CMD_WRITE		0x90
+
+/*
+ * adt7316 register access by SPI
+ */
+
+static int adt7316_spi_multi_read(void *client, u8 reg, u8 count, u8 *data)
+{
+	struct spi_device *spi_dev = client;
+	u8 cmd[2];
+	int ret = 0;
+
+	if (count > ADT7316_REG_MAX_ADDR)
+		count = ADT7316_REG_MAX_ADDR;
+
+	cmd[0] = ADT7316_SPI_CMD_WRITE;
+	cmd[1] = reg;
+
+	ret = spi_write(spi_dev, cmd, 2);
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI fail to select reg\n");
+		return ret;
+	}
+
+	cmd[0] = ADT7316_SPI_CMD_READ;
+
+	ret = spi_write_then_read(spi_dev, cmd, 1, data, count);
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI read data error\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int adt7316_spi_multi_write(void *client, u8 reg, u8 count, u8 *data)
+{
+	struct spi_device *spi_dev = client;
+	u8 buf[ADT7316_REG_MAX_ADDR + 2];
+	int i, ret = 0;
+
+	if (count > ADT7316_REG_MAX_ADDR)
+		count = ADT7316_REG_MAX_ADDR;
+
+	buf[0] = ADT7316_SPI_CMD_WRITE;
+	buf[1] = reg;
+	for (i = 0; i < count; i++)
+		buf[i + 2] = data[i];
+
+	ret = spi_write(spi_dev, buf, count + 2);
+	if (ret < 0) {
+		dev_err(&spi_dev->dev, "SPI write error\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static int adt7316_spi_read(void *client, u8 reg, u8 *data)
+{
+	return adt7316_spi_multi_read(client, reg, 1, data);
+}
+
+static int adt7316_spi_write(void *client, u8 reg, u8 val)
+{
+	return adt7316_spi_multi_write(client, reg, 1, &val);
+}
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit adt7316_spi_probe(struct spi_device *spi_dev)
+{
+	struct adt7316_bus bus = {
+		.client = spi_dev,
+		.irq = spi_dev->irq,
+		.irq_flags = IRQF_TRIGGER_LOW,
+		.read = adt7316_spi_read,
+		.write = adt7316_spi_write,
+		.multi_read = adt7316_spi_multi_read,
+		.multi_write = adt7316_spi_multi_write,
+	};
+
+	/* don't exceed max specified SPI CLK frequency */
+	if (spi_dev->max_speed_hz > ADT7316_SPI_MAX_FREQ_HZ) {
+		dev_err(&spi_dev->dev, "SPI CLK %d Hz?\n",
+			spi_dev->max_speed_hz);
+		return -EINVAL;
+	}
+
+	/* switch from default I2C protocol to SPI protocol */
+	adt7316_spi_write(spi_dev, 0, 0);
+	adt7316_spi_write(spi_dev, 0, 0);
+	adt7316_spi_write(spi_dev, 0, 0);
+
+	return adt7316_probe(&spi_dev->dev, &bus, spi_dev->modalias);
+}
+
+static int __devexit adt7316_spi_remove(struct spi_device *spi_dev)
+{
+	return adt7316_remove(&spi_dev->dev);
+}
+
+static const struct spi_device_id adt7316_spi_id[] = {
+	{ "adt7316", 0 },
+	{ "adt7317", 0 },
+	{ "adt7318", 0 },
+	{ "adt7516", 0 },
+	{ "adt7517", 0 },
+	{ "adt7519", 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(spi, adt7316_spi_id);
+
+#ifdef CONFIG_PM
+static int adt7316_spi_suspend(struct spi_device *spi_dev, pm_message_t message)
+{
+	return adt7316_disable(&spi_dev->dev);
+}
+
+static int adt7316_spi_resume(struct spi_device *spi_dev)
+{
+	return adt7316_enable(&spi_dev->dev);
+}
+#else
+# define adt7316_spi_suspend NULL
+# define adt7316_spi_resume  NULL
+#endif
+
+static struct spi_driver adt7316_driver = {
+	.driver = {
+		.name = "adt7316",
+		.bus = &spi_bus_type,
+		.owner = THIS_MODULE,
+	},
+	.probe = adt7316_spi_probe,
+	.remove = __devexit_p(adt7316_spi_remove),
+	.suspend = adt7316_spi_suspend,
+	.resume = adt7316_spi_resume,
+	.id_table = adt7316_spi_id,
+};
+
+static __init int adt7316_spi_init(void)
+{
+	return spi_register_driver(&adt7316_driver);
+}
+
+static __exit void adt7316_spi_exit(void)
+{
+	spi_unregister_driver(&adt7316_driver);
+}
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("SPI bus driver for Analog Devices ADT7316/7/8 and"
+			"ADT7516/7/9 digital temperature sensor, ADC and DAC");
+MODULE_LICENSE("GPL v2");
+
+module_init(adt7316_spi_init);
+module_exit(adt7316_spi_exit);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
new file mode 100644
index 0000000..d1b5b13
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -0,0 +1,2402 @@
+/*
+ * ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
+ *
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "adt7316.h"
+
+/*
+ * ADT7316 registers definition
+ */
+#define ADT7316_INT_STAT1		0x0
+#define ADT7316_INT_STAT2		0x1
+#define ADT7316_LSB_IN_TEMP_VDD		0x3
+#define ADT7316_LSB_IN_TEMP_MASK	0x3
+#define ADT7316_LSB_VDD_MASK		0xC
+#define ADT7316_LSB_VDD_OFFSET		2
+#define ADT7316_LSB_EX_TEMP_AIN		0x4
+#define ADT7316_LSB_EX_TEMP_MASK	0x3
+#define ADT7516_LSB_AIN_SHIFT		2
+#define ADT7316_AD_MSB_DATA_BASE        0x6
+#define ADT7316_AD_MSB_DATA_REGS        3
+#define ADT7516_AD_MSB_DATA_REGS        6
+#define ADT7316_MSB_VDD			0x6
+#define ADT7316_MSB_IN_TEMP		0x7
+#define ADT7316_MSB_EX_TEMP		0x8
+#define ADT7516_MSB_AIN1		0x8
+#define ADT7516_MSB_AIN2		0x9
+#define ADT7516_MSB_AIN3		0xA
+#define ADT7516_MSB_AIN4		0xB
+#define ADT7316_DA_DATA_BASE		0x10
+#define ADT7316_DA_MSB_DATA_REGS	4
+#define ADT7316_LSB_DAC_A		0x10
+#define ADT7316_MSB_DAC_A		0x11
+#define ADT7316_LSB_DAC_B		0x12
+#define ADT7316_MSB_DAC_B		0x13
+#define ADT7316_LSB_DAC_C		0x14
+#define ADT7316_MSB_DAC_C		0x15
+#define ADT7316_LSB_DAC_D		0x16
+#define ADT7316_MSB_DAC_D		0x17
+#define ADT7316_CONFIG1			0x18
+#define ADT7316_CONFIG2			0x19
+#define ADT7316_CONFIG3			0x1A
+#define ADT7316_LDAC_CONFIG		0x1B
+#define ADT7316_DAC_CONFIG		0x1C
+#define ADT7316_INT_MASK1		0x1D
+#define ADT7316_INT_MASK2		0x1E
+#define ADT7316_IN_TEMP_OFFSET		0x1F
+#define ADT7316_EX_TEMP_OFFSET		0x20
+#define ADT7316_IN_ANALOG_TEMP_OFFSET	0x21
+#define ADT7316_EX_ANALOG_TEMP_OFFSET	0x22
+#define ADT7316_VDD_HIGH		0x23
+#define ADT7316_VDD_LOW			0x24
+#define ADT7316_IN_TEMP_HIGH		0x25
+#define ADT7316_IN_TEMP_LOW		0x26
+#define ADT7316_EX_TEMP_HIGH		0x27
+#define ADT7316_EX_TEMP_LOW		0x28
+#define ADT7516_AIN2_HIGH		0x2B
+#define ADT7516_AIN2_LOW		0x2C
+#define ADT7516_AIN3_HIGH		0x2D
+#define ADT7516_AIN3_LOW		0x2E
+#define ADT7516_AIN4_HIGH		0x2F
+#define ADT7516_AIN4_LOW		0x30
+#define ADT7316_DEVICE_ID		0x4D
+#define ADT7316_MANUFACTURE_ID		0x4E
+#define ADT7316_DEVICE_REV		0x4F
+#define ADT7316_SPI_LOCK_STAT		0x7F
+
+/*
+ * ADT7316 config1
+ */
+#define ADT7316_EN			0x1
+#define ADT7516_SEL_EX_TEMP		0x4
+#define ADT7516_SEL_AIN1_2_EX_TEMP_MASK	0x6
+#define ADT7516_SEL_AIN3		0x8
+#define ADT7316_INT_EN			0x20
+#define ADT7316_INT_POLARITY		0x40
+#define ADT7316_PD			0x80
+
+/*
+ * ADT7316 config2
+ */
+#define ADT7316_AD_SINGLE_CH_MASK	0x3
+#define ADT7516_AD_SINGLE_CH_MASK	0x7
+#define ADT7316_AD_SINGLE_CH_VDD	0
+#define ADT7316_AD_SINGLE_CH_IN		1
+#define ADT7316_AD_SINGLE_CH_EX		2
+#define ADT7516_AD_SINGLE_CH_AIN1	2
+#define ADT7516_AD_SINGLE_CH_AIN2	3
+#define ADT7516_AD_SINGLE_CH_AIN3	4
+#define ADT7516_AD_SINGLE_CH_AIN4	5
+#define ADT7316_AD_SINGLE_CH_MODE	0x10
+#define ADT7316_DISABLE_AVERAGING	0x20
+#define ADT7316_EN_SMBUS_TIMEOUT	0x40
+#define ADT7316_RESET			0x80
+
+/*
+ * ADT7316 config3
+ */
+#define ADT7316_ADCLK_22_5		0x1
+#define ADT7316_DA_HIGH_RESOLUTION	0x2
+#define ADT7316_DA_EN_VIA_DAC_LDCA	0x4
+#define ADT7516_AIN_IN_VREF		0x10
+#define ADT7316_EN_IN_TEMP_PROP_DACA	0x20
+#define ADT7316_EN_EX_TEMP_PROP_DACB	0x40
+
+/*
+ * ADT7316 DAC config
+ */
+#define ADT7316_DA_2VREF_CH_MASK	0xF
+#define ADT7316_DA_EN_MODE_MASK		0x30
+#define ADT7316_DA_EN_MODE_SINGLE	0x00
+#define ADT7316_DA_EN_MODE_AB_CD	0x10
+#define ADT7316_DA_EN_MODE_ABCD		0x20
+#define ADT7316_DA_EN_MODE_LDAC		0x30
+#define ADT7316_VREF_BYPASS_DAC_AB	0x40
+#define ADT7316_VREF_BYPASS_DAC_CD	0x80
+
+/*
+ * ADT7316 LDAC config
+ */
+#define ADT7316_LDAC_EN_DA_MASK		0xF
+#define ADT7316_DAC_IN_VREF		0x10
+#define ADT7516_DAC_AB_IN_VREF		0x10
+#define ADT7516_DAC_CD_IN_VREF		0x20
+#define ADT7516_DAC_IN_VREF_OFFSET	4
+#define ADT7516_DAC_IN_VREF_MASK	0x30
+
+/*
+ * ADT7316 INT_MASK2
+ */
+#define ADT7316_INT_MASK2_VDD		0x10
+
+/*
+ * ADT7316 value masks
+ */
+#define ADT7316_VALUE_MASK		0xfff
+#define ADT7316_T_VALUE_SIGN		0x400
+#define ADT7316_T_VALUE_FLOAT_OFFSET	2
+#define ADT7316_T_VALUE_FLOAT_MASK	0x2
+
+/*
+ * Chip ID
+ */
+#define ID_ADT7316		0x1
+#define ID_ADT7317		0x2
+#define ID_ADT7318		0x3
+#define ID_ADT7516		0x11
+#define ID_ADT7517		0x12
+#define ID_ADT7519		0x14
+
+#define ID_FAMILY_MASK		0xF0
+#define ID_ADT73XX		0x0
+#define ID_ADT75XX		0x10
+
+/*
+ * struct adt7316_chip_info - chip specifc information
+ */
+
+struct adt7316_chip_info {
+	const char		*name;
+	struct iio_dev		*indio_dev;
+	struct work_struct	thresh_work;
+	s64			last_timestamp;
+	struct adt7316_bus	bus;
+	u16			ldac_pin;
+	u16			int_mask;	/* 0x2f */
+	u8			config1;
+	u8			config2;
+	u8			config3;
+	u8			dac_config;	/* DAC config */
+	u8			ldac_config;	/* LDAC config */
+	u8			dac_bits;	/* 8, 10, 12 */
+	u8			id;		/* chip id */
+};
+
+/*
+ * Logic interrupt mask for user application to enable
+ * interrupts.
+ */
+#define ADT7316_IN_TEMP_HIGH_INT_MASK	0x1
+#define ADT7316_IN_TEMP_LOW_INT_MASK	0x2
+#define ADT7316_EX_TEMP_HIGH_INT_MASK	0x4
+#define ADT7316_EX_TEMP_LOW_INT_MASK	0x8
+#define ADT7316_EX_TEMP_FAULT_INT_MASK	0x10
+#define ADT7516_AIN1_INT_MASK		0x4
+#define ADT7516_AIN2_INT_MASK		0x20
+#define ADT7516_AIN3_INT_MASK		0x40
+#define ADT7516_AIN4_INT_MASK		0x80
+#define ADT7316_VDD_INT_MASK		0x100
+#define ADT7316_TEMP_INT_MASK		0x1F
+#define ADT7516_AIN_INT_MASK		0xE0
+#define ADT7316_TEMP_AIN_INT_MASK	\
+	(ADT7316_TEMP_INT_MASK | ADT7316_TEMP_INT_MASK)
+
+/*
+ * struct adt7316_chip_info - chip specifc information
+ */
+
+struct adt7316_limit_regs {
+	u16	data_high;
+	u16	data_low;
+};
+
+static ssize_t adt7316_show_enabled(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_EN));
+}
+
+static ssize_t _adt7316_store_enabled(struct adt7316_chip_info *chip,
+		int enable)
+{
+	u8 config1;
+	int ret;
+
+	if (enable)
+		config1 = chip->config1 | ADT7316_EN;
+	else
+		config1 = chip->config1 & ~ADT7316_EN;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+	if (ret)
+		return -EIO;
+
+	chip->config1 = config1;
+
+	return ret;
+
+}
+
+static ssize_t adt7316_store_enabled(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	int enable;
+
+	if (!memcmp(buf, "1", 1))
+		enable = 1;
+	else
+		enable = 0;
+
+	if (_adt7316_store_enabled(chip, enable) < 0)
+		return -EIO;
+	else
+		return len;
+}
+
+static IIO_DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR,
+		adt7316_show_enabled,
+		adt7316_store_enabled,
+		0);
+
+static ssize_t adt7316_show_select_ex_temp(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+		return -EPERM;
+
+	return sprintf(buf, "%d\n", !!(chip->config1 & ADT7516_SEL_EX_TEMP));
+}
+
+static ssize_t adt7316_store_select_ex_temp(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config1;
+	int ret;
+
+	if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+		return -EPERM;
+
+	config1 = chip->config1 & (~ADT7516_SEL_EX_TEMP);
+	if (!memcmp(buf, "1", 1))
+		config1 |= ADT7516_SEL_EX_TEMP;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+	if (ret)
+		return -EIO;
+
+	chip->config1 = config1;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(select_ex_temp, S_IRUGO | S_IWUSR,
+		adt7316_show_select_ex_temp,
+		adt7316_store_select_ex_temp,
+		0);
+
+static ssize_t adt7316_show_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if (chip->config2 & ADT7316_AD_SINGLE_CH_MODE)
+		return sprintf(buf, "single_channel\n");
+	else
+		return sprintf(buf, "round_robin\n");
+}
+
+static ssize_t adt7316_store_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config2;
+	int ret;
+
+	config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MODE);
+	if (!memcmp(buf, "single_channel", 14))
+		config2 |= ADT7316_AD_SINGLE_CH_MODE;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+	if (ret)
+		return -EIO;
+
+	chip->config2 = config2;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+		adt7316_show_mode,
+		adt7316_store_mode,
+		0);
+
+static ssize_t adt7316_show_all_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return sprintf(buf, "single_channel\nround_robin\n");
+}
+
+static IIO_DEVICE_ATTR(all_modes, S_IRUGO, adt7316_show_all_modes, NULL, 0);
+
+static ssize_t adt7316_show_ad_channel(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
+		return -EPERM;
+
+	switch (chip->config2 & ADT7516_AD_SINGLE_CH_MASK) {
+	case ADT7316_AD_SINGLE_CH_VDD:
+		return sprintf(buf, "0 - VDD\n");
+	case ADT7316_AD_SINGLE_CH_IN:
+		return sprintf(buf, "1 - Internal Temperature\n");
+	case ADT7316_AD_SINGLE_CH_EX:
+		if (((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) &&
+			(chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)
+			return sprintf(buf, "2 - AIN1\n");
+		else
+			return sprintf(buf, "2 - External Temperature\n");
+	case ADT7516_AD_SINGLE_CH_AIN2:
+		if ((chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)
+			return sprintf(buf, "3 - AIN2\n");
+		else
+			return sprintf(buf, "N/A\n");
+	case ADT7516_AD_SINGLE_CH_AIN3:
+		if (chip->config1 & ADT7516_SEL_AIN3)
+			return sprintf(buf, "4 - AIN3\n");
+		else
+			return sprintf(buf, "N/A\n");
+	case ADT7516_AD_SINGLE_CH_AIN4:
+		return sprintf(buf, "5 - AIN4\n");
+	default:
+		return sprintf(buf, "N/A\n");
+	};
+}
+
+static ssize_t adt7316_store_ad_channel(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config2;
+	unsigned long data = 0;
+	int ret;
+
+	if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
+		return -EPERM;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) {
+		if (data > 5)
+			return -EINVAL;
+
+		config2 = chip->config2 & (~ADT7516_AD_SINGLE_CH_MASK);
+	} else {
+		if (data > 2)
+			return -EINVAL;
+
+		config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MASK);
+	}
+
+
+	config2 |= data;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+	if (ret)
+		return -EIO;
+
+	chip->config2 = config2;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(ad_channel, S_IRUGO | S_IWUSR,
+		adt7316_show_ad_channel,
+		adt7316_store_ad_channel,
+		0);
+
+static ssize_t adt7316_show_all_ad_channels(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
+		return -EPERM;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+		return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
+				"2 - External Temperature or AIN2\n"
+				"3 - AIN2\n4 - AIN3\n5 - AIN4\n");
+	else
+		return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
+				"2 - External Temperature\n");
+}
+
+static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO,
+		adt7316_show_all_ad_channels, NULL, 0);
+
+static ssize_t adt7316_show_disable_averaging(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n",
+		!!(chip->config2 & ADT7316_DISABLE_AVERAGING));
+}
+
+static ssize_t adt7316_store_disable_averaging(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config2;
+	int ret;
+
+	config2 = chip->config2 & (~ADT7316_DISABLE_AVERAGING);
+	if (!memcmp(buf, "1", 1))
+		config2 |= ADT7316_DISABLE_AVERAGING;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+	if (ret)
+		return -EIO;
+
+	chip->config2 = config2;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(disable_averaging, S_IRUGO | S_IWUSR,
+		adt7316_show_disable_averaging,
+		adt7316_store_disable_averaging,
+		0);
+
+static ssize_t adt7316_show_enable_smbus_timeout(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n",
+		!!(chip->config2 & ADT7316_EN_SMBUS_TIMEOUT));
+}
+
+static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config2;
+	int ret;
+
+	config2 = chip->config2 & (~ADT7316_EN_SMBUS_TIMEOUT);
+	if (!memcmp(buf, "1", 1))
+		config2 |= ADT7316_EN_SMBUS_TIMEOUT;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+	if (ret)
+		return -EIO;
+
+	chip->config2 = config2;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR,
+		adt7316_show_enable_smbus_timeout,
+		adt7316_store_enable_smbus_timeout,
+		0);
+
+
+static ssize_t adt7316_store_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config2;
+	int ret;
+
+	config2 = chip->config2 | ADT7316_RESET;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+		NULL,
+		adt7316_store_reset,
+		0);
+
+static ssize_t adt7316_show_powerdown(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_PD));
+}
+
+static ssize_t adt7316_store_powerdown(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config1;
+	int ret;
+
+	config1 = chip->config1 & (~ADT7316_PD);
+	if (!memcmp(buf, "1", 1))
+		config1 |= ADT7316_PD;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+	if (ret)
+		return -EIO;
+
+	chip->config1 = config1;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(powerdown, S_IRUGO | S_IWUSR,
+		adt7316_show_powerdown,
+		adt7316_store_powerdown,
+		0);
+
+static ssize_t adt7316_show_fast_ad_clock(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_ADCLK_22_5));
+}
+
+static ssize_t adt7316_store_fast_ad_clock(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config3;
+	int ret;
+
+	config3 = chip->config3 & (~ADT7316_ADCLK_22_5);
+	if (!memcmp(buf, "1", 1))
+		config3 |= ADT7316_ADCLK_22_5;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+	if (ret)
+		return -EIO;
+
+	chip->config3 = config3;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(fast_ad_clock, S_IRUGO | S_IWUSR,
+		adt7316_show_fast_ad_clock,
+		adt7316_store_fast_ad_clock,
+		0);
+
+static ssize_t adt7316_show_da_high_resolution(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) {
+		if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
+			return sprintf(buf, "1 (12 bits)\n");
+		else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+			return sprintf(buf, "1 (10 bits)\n");
+	}
+
+	return sprintf(buf, "0 (8 bits)\n");
+}
+
+static ssize_t adt7316_store_da_high_resolution(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config3;
+	int ret;
+
+	chip->dac_bits = 8;
+
+	if (!memcmp(buf, "1", 1)) {
+		config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION;
+		if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
+			chip->dac_bits = 12;
+		else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+			chip->dac_bits = 10;
+	} else
+		config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+	if (ret)
+		return -EIO;
+
+	chip->config3 = config3;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(da_high_resolution, S_IRUGO | S_IWUSR,
+		adt7316_show_da_high_resolution,
+		adt7316_store_da_high_resolution,
+		0);
+
+static ssize_t adt7316_show_AIN_internal_Vref(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+		return -EPERM;
+
+	return sprintf(buf, "%d\n",
+		!!(chip->config3 & ADT7516_AIN_IN_VREF));
+}
+
+static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config3;
+	int ret;
+
+	if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
+		return -EPERM;
+
+	if (memcmp(buf, "1", 1))
+		config3 = chip->config3 & (~ADT7516_AIN_IN_VREF);
+	else
+		config3 = chip->config3 | ADT7516_AIN_IN_VREF;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+	if (ret)
+		return -EIO;
+
+	chip->config3 = config3;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(AIN_internal_Vref, S_IRUGO | S_IWUSR,
+		adt7316_show_AIN_internal_Vref,
+		adt7316_store_AIN_internal_Vref,
+		0);
+
+
+static ssize_t adt7316_show_enable_prop_DACA(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n",
+		!!(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA));
+}
+
+static ssize_t adt7316_store_enable_prop_DACA(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config3;
+	int ret;
+
+	config3 = chip->config3 & (~ADT7316_EN_IN_TEMP_PROP_DACA);
+	if (!memcmp(buf, "1", 1))
+		config3 |= ADT7316_EN_IN_TEMP_PROP_DACA;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+	if (ret)
+		return -EIO;
+
+	chip->config3 = config3;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(enable_proportion_DACA, S_IRUGO | S_IWUSR,
+		adt7316_show_enable_prop_DACA,
+		adt7316_store_enable_prop_DACA,
+		0);
+
+static ssize_t adt7316_show_enable_prop_DACB(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n",
+		!!(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB));
+}
+
+static ssize_t adt7316_store_enable_prop_DACB(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config3;
+	int ret;
+
+	config3 = chip->config3 & (~ADT7316_EN_EX_TEMP_PROP_DACB);
+	if (!memcmp(buf, "1", 1))
+		config3 |= ADT7316_EN_EX_TEMP_PROP_DACB;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
+	if (ret)
+		return -EIO;
+
+	chip->config3 = config3;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(enable_proportion_DACB, S_IRUGO | S_IWUSR,
+		adt7316_show_enable_prop_DACB,
+		adt7316_store_enable_prop_DACB,
+		0);
+
+static ssize_t adt7316_show_DAC_2Vref_ch_mask(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%x\n",
+		chip->dac_config & ADT7316_DA_2VREF_CH_MASK);
+}
+
+static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 dac_config;
+	unsigned long data = 0;
+	int ret;
+
+	ret = strict_strtoul(buf, 16, &data);
+	if (ret || data > ADT7316_DA_2VREF_CH_MASK)
+		return -EINVAL;
+
+	dac_config = chip->dac_config & (~ADT7316_DA_2VREF_CH_MASK);
+	dac_config |= data;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+	if (ret)
+		return -EIO;
+
+	chip->dac_config = dac_config;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, S_IRUGO | S_IWUSR,
+		adt7316_show_DAC_2Vref_ch_mask,
+		adt7316_store_DAC_2Vref_ch_mask,
+		0);
+
+static ssize_t adt7316_show_DAC_update_mode(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
+		return sprintf(buf, "manual\n");
+	else {
+		switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) {
+		case ADT7316_DA_EN_MODE_SINGLE:
+			return sprintf(buf, "0 - auto at any MSB DAC writing\n");
+		case ADT7316_DA_EN_MODE_AB_CD:
+			return sprintf(buf, "1 - auto at MSB DAC AB and CD writing\n");
+		case ADT7316_DA_EN_MODE_ABCD:
+			return sprintf(buf, "2 - auto at MSB DAC ABCD writing\n");
+		default: /* ADT7316_DA_EN_MODE_LDAC */
+			return sprintf(buf, "3 - manual\n");
+		};
+	}
+}
+
+static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 dac_config;
+	unsigned long data;
+	int ret;
+
+	if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
+		return -EPERM;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret || data > ADT7316_DA_EN_MODE_MASK)
+		return -EINVAL;
+
+	dac_config = chip->dac_config & (~ADT7316_DA_EN_MODE_MASK);
+	dac_config |= data;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+	if (ret)
+		return -EIO;
+
+	chip->dac_config = dac_config;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(DAC_update_mode, S_IRUGO | S_IWUSR,
+		adt7316_show_DAC_update_mode,
+		adt7316_store_DAC_update_mode,
+		0);
+
+static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)
+		return sprintf(buf, "0 - auto at any MSB DAC writing\n"
+				"1 - auto at MSB DAC AB and CD writing\n"
+				"2 - auto at MSB DAC ABCD writing\n"
+				"3 - manual\n");
+	else
+		return sprintf(buf, "manual\n");
+}
+
+static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO,
+		adt7316_show_all_DAC_update_modes, NULL, 0);
+
+
+static ssize_t adt7316_store_update_DAC(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 ldac_config;
+	unsigned long data;
+	int ret;
+
+	if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) {
+		if ((chip->dac_config & ADT7316_DA_EN_MODE_MASK) !=
+			ADT7316_DA_EN_MODE_LDAC)
+			return -EPERM;
+
+		ret = strict_strtoul(buf, 16, &data);
+		if (ret || data > ADT7316_LDAC_EN_DA_MASK)
+			return -EINVAL;
+
+		ldac_config = chip->ldac_config & (~ADT7316_LDAC_EN_DA_MASK);
+		ldac_config |= data;
+
+		ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG,
+			ldac_config);
+		if (ret)
+			return -EIO;
+	} else {
+		gpio_set_value(chip->ldac_pin, 0);
+		gpio_set_value(chip->ldac_pin, 1);
+	}
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(update_DAC, S_IRUGO | S_IWUSR,
+		NULL,
+		adt7316_store_update_DAC,
+		0);
+
+static ssize_t adt7316_show_DA_AB_Vref_bypass(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+		return -EPERM;
+
+	return sprintf(buf, "%d\n",
+		!!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_AB));
+}
+
+static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 dac_config;
+	int ret;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+		return -EPERM;
+
+	dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_AB);
+	if (!memcmp(buf, "1", 1))
+		dac_config |= ADT7316_VREF_BYPASS_DAC_AB;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+	if (ret)
+		return -EIO;
+
+	chip->dac_config = dac_config;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, S_IRUGO | S_IWUSR,
+		adt7316_show_DA_AB_Vref_bypass,
+		adt7316_store_DA_AB_Vref_bypass,
+		0);
+
+static ssize_t adt7316_show_DA_CD_Vref_bypass(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+		return -EPERM;
+
+	return sprintf(buf, "%d\n",
+		!!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_CD));
+}
+
+static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 dac_config;
+	int ret;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+		return -EPERM;
+
+	dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_CD);
+	if (!memcmp(buf, "1", 1))
+		dac_config |= ADT7316_VREF_BYPASS_DAC_CD;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config);
+	if (ret)
+		return -EIO;
+
+	chip->dac_config = dac_config;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, S_IRUGO | S_IWUSR,
+		adt7316_show_DA_CD_Vref_bypass,
+		adt7316_store_DA_CD_Vref_bypass,
+		0);
+
+static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+		return sprintf(buf, "0x%x\n",
+			(chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >>
+			ADT7516_DAC_IN_VREF_OFFSET);
+	else
+		return sprintf(buf, "%d\n",
+			!!(chip->dac_config & ADT7316_DAC_IN_VREF));
+}
+
+static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 ldac_config;
+	unsigned long data;
+	int ret;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) {
+		ret = strict_strtoul(buf, 16, &data);
+		if (ret || data > 3)
+			return -EINVAL;
+
+		ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
+		if (data & 0x1)
+			ldac_config |= ADT7516_DAC_AB_IN_VREF;
+		else if (data & 0x2)
+			ldac_config |= ADT7516_DAC_CD_IN_VREF;
+	} else {
+		ret = strict_strtoul(buf, 16, &data);
+		if (ret)
+			return -EINVAL;
+
+		ldac_config = chip->ldac_config & (~ADT7316_DAC_IN_VREF);
+		if (data)
+			ldac_config = chip->ldac_config | ADT7316_DAC_IN_VREF;
+	}
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config);
+	if (ret)
+		return -EIO;
+
+	chip->ldac_config = ldac_config;
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(DAC_internal_Vref, S_IRUGO | S_IWUSR,
+		adt7316_show_DAC_internal_Vref,
+		adt7316_store_DAC_internal_Vref,
+		0);
+
+static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip,
+		int channel, char *buf)
+{
+	u16 data;
+	u8 msb, lsb;
+	char sign = ' ';
+	int ret;
+
+	if ((chip->config2 & ADT7316_AD_SINGLE_CH_MODE) &&
+		channel != (chip->config2 & ADT7516_AD_SINGLE_CH_MASK))
+		return -EPERM;
+
+	switch (channel) {
+	case ADT7316_AD_SINGLE_CH_IN:
+		ret = chip->bus.read(chip->bus.client,
+			ADT7316_LSB_IN_TEMP_VDD, &lsb);
+		if (ret)
+			return -EIO;
+
+		ret = chip->bus.read(chip->bus.client,
+			ADT7316_AD_MSB_DATA_BASE + channel, &msb);
+		if (ret)
+			return -EIO;
+
+		data = msb << ADT7316_T_VALUE_FLOAT_OFFSET;
+		data |= lsb & ADT7316_LSB_IN_TEMP_MASK;
+		break;
+	case ADT7316_AD_SINGLE_CH_VDD:
+		ret = chip->bus.read(chip->bus.client,
+			ADT7316_LSB_IN_TEMP_VDD, &lsb);
+		if (ret)
+			return -EIO;
+
+		ret = chip->bus.read(chip->bus.client,
+
+			ADT7316_AD_MSB_DATA_BASE + channel, &msb);
+		if (ret)
+			return -EIO;
+
+		data = msb << ADT7316_T_VALUE_FLOAT_OFFSET;
+		data |= (lsb & ADT7316_LSB_VDD_MASK) >> ADT7316_LSB_VDD_OFFSET;
+		return sprintf(buf, "%d\n", data);
+	default: /* ex_temp and ain */
+		ret = chip->bus.read(chip->bus.client,
+			ADT7316_LSB_EX_TEMP_AIN, &lsb);
+		if (ret)
+			return -EIO;
+
+		ret = chip->bus.read(chip->bus.client,
+			ADT7316_AD_MSB_DATA_BASE + channel, &msb);
+		if (ret)
+			return -EIO;
+
+		data = msb << ADT7316_T_VALUE_FLOAT_OFFSET;
+		data |= lsb & (ADT7316_LSB_EX_TEMP_MASK <<
+			(ADT7516_LSB_AIN_SHIFT * (channel -
+			(ADT7316_MSB_EX_TEMP - ADT7316_AD_MSB_DATA_BASE))));
+
+		if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+			return sprintf(buf, "%d\n", data);
+		else
+			break;
+	};
+
+	if (data & ADT7316_T_VALUE_SIGN) {
+		/* convert supplement to positive value */
+		data = (ADT7316_T_VALUE_SIGN << 1) - data;
+		sign = '-';
+	}
+
+	return sprintf(buf, "%c%d.%.2d\n", sign,
+		(data >> ADT7316_T_VALUE_FLOAT_OFFSET),
+		(data & ADT7316_T_VALUE_FLOAT_MASK) * 25);
+}
+
+static ssize_t adt7316_show_VDD(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_VDD, buf);
+}
+static IIO_DEVICE_ATTR(VDD, S_IRUGO, adt7316_show_VDD, NULL, 0);
+
+static ssize_t adt7316_show_in_temp(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_IN, buf);
+}
+
+static IIO_DEVICE_ATTR(in_temp, S_IRUGO, adt7316_show_in_temp, NULL, 0);
+
+static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf);
+}
+
+static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0);
+
+static ssize_t adt7316_show_AIN2(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN2, buf);
+}
+static IIO_DEVICE_ATTR(AIN2, S_IRUGO, adt7316_show_AIN2, NULL, 0);
+
+static ssize_t adt7316_show_AIN3(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN3, buf);
+}
+static IIO_DEVICE_ATTR(AIN3, S_IRUGO, adt7316_show_AIN3, NULL, 0);
+
+static ssize_t adt7316_show_AIN4(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN4, buf);
+}
+static IIO_DEVICE_ATTR(AIN4, S_IRUGO, adt7316_show_AIN4, NULL, 0);
+
+static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip,
+		int offset_addr, char *buf)
+{
+	int data;
+	u8 val;
+	int ret;
+
+	ret = chip->bus.read(chip->bus.client, offset_addr, &val);
+	if (ret)
+		return -EIO;
+
+	data = (int)val;
+	if (val & 0x80)
+		data -= 256;
+
+	return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t adt7316_store_temp_offset(struct adt7316_chip_info *chip,
+		int offset_addr, const char *buf, size_t len)
+{
+	long data;
+	u8 val;
+	int ret;
+
+	ret = strict_strtol(buf, 10, &data);
+	if (ret || data > 127 || data < -128)
+		return -EINVAL;
+
+	if (data < 0)
+		data += 256;
+
+	val = (u8)data;
+
+	ret = chip->bus.write(chip->bus.client, offset_addr, val);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t adt7316_show_in_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_in_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR,
+		adt7316_show_in_temp_offset,
+		adt7316_store_in_temp_offset, 0);
+
+static ssize_t adt7316_show_ex_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR,
+		adt7316_show_ex_temp_offset,
+		adt7316_store_ex_temp_offset, 0);
+
+static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_temp_offset(chip,
+			ADT7316_IN_ANALOG_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_temp_offset(chip,
+			ADT7316_IN_ANALOG_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(in_analog_temp_offset, S_IRUGO | S_IWUSR,
+		adt7316_show_in_analog_temp_offset,
+		adt7316_store_in_analog_temp_offset, 0);
+
+static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_temp_offset(chip,
+			ADT7316_EX_ANALOG_TEMP_OFFSET, buf);
+}
+
+static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_temp_offset(chip,
+			ADT7316_EX_ANALOG_TEMP_OFFSET, buf, len);
+}
+
+static IIO_DEVICE_ATTR(ex_analog_temp_offset, S_IRUGO | S_IWUSR,
+		adt7316_show_ex_analog_temp_offset,
+		adt7316_store_ex_analog_temp_offset, 0);
+
+static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
+		int channel, char *buf)
+{
+	u16 data;
+	u8 msb, lsb, offset;
+	int ret;
+
+	if (channel >= ADT7316_DA_MSB_DATA_REGS ||
+		(channel == 0 &&
+		(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
+		(channel == 1 &&
+		(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
+		return -EPERM;
+
+	offset = chip->dac_bits - 8;
+
+	if (chip->dac_bits > 8) {
+		ret = chip->bus.read(chip->bus.client,
+			ADT7316_DA_DATA_BASE + channel * 2, &lsb);
+		if (ret)
+			return -EIO;
+	}
+
+	ret = chip->bus.read(chip->bus.client,
+		ADT7316_DA_DATA_BASE + 1 + channel * 2, &msb);
+	if (ret)
+		return -EIO;
+
+	data = (msb << offset) + (lsb & ((1 << offset) - 1));
+
+	return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
+		int channel, const char *buf, size_t len)
+{
+	u8 msb, lsb, offset;
+	unsigned long data;
+	int ret;
+
+	if (channel >= ADT7316_DA_MSB_DATA_REGS ||
+		(channel == 0 &&
+		(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) ||
+		(channel == 1 &&
+		(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)))
+		return -EPERM;
+
+	offset = chip->dac_bits - 8;
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret || data >= (1 << chip->dac_bits))
+		return -EINVAL;
+
+	if (chip->dac_bits > 8) {
+		lsb = data & (1 << offset);
+		ret = chip->bus.write(chip->bus.client,
+			ADT7316_DA_DATA_BASE + channel * 2, lsb);
+		if (ret)
+			return -EIO;
+	}
+
+	msb = data >> offset;
+	ret = chip->bus.write(chip->bus.client,
+		ADT7316_DA_DATA_BASE + 1 + channel * 2, msb);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t adt7316_show_DAC_A(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_DAC(chip, 0, buf);
+}
+
+static ssize_t adt7316_store_DAC_A(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_DAC(chip, 0, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_A, S_IRUGO | S_IWUSR, adt7316_show_DAC_A,
+		adt7316_store_DAC_A, 0);
+
+static ssize_t adt7316_show_DAC_B(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_DAC(chip, 1, buf);
+}
+
+static ssize_t adt7316_store_DAC_B(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_DAC(chip, 1, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_B, S_IRUGO | S_IWUSR, adt7316_show_DAC_B,
+		adt7316_store_DAC_B, 0);
+
+static ssize_t adt7316_show_DAC_C(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_DAC(chip, 2, buf);
+}
+
+static ssize_t adt7316_store_DAC_C(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_DAC(chip, 2, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_C, S_IRUGO | S_IWUSR, adt7316_show_DAC_C,
+		adt7316_store_DAC_C, 0);
+
+static ssize_t adt7316_show_DAC_D(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_show_DAC(chip, 3, buf);
+}
+
+static ssize_t adt7316_store_DAC_D(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return adt7316_store_DAC(chip, 3, buf, len);
+}
+
+static IIO_DEVICE_ATTR(DAC_D, S_IRUGO | S_IWUSR, adt7316_show_DAC_D,
+		adt7316_store_DAC_D, 0);
+
+static ssize_t adt7316_show_device_id(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 id;
+	int ret;
+
+	ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_ID, &id);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", id);
+}
+
+static IIO_DEVICE_ATTR(device_id, S_IRUGO, adt7316_show_device_id, NULL, 0);
+
+static ssize_t adt7316_show_manufactorer_id(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 id;
+	int ret;
+
+	ret = chip->bus.read(chip->bus.client, ADT7316_MANUFACTURE_ID, &id);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", id);
+}
+
+static IIO_DEVICE_ATTR(manufactorer_id, S_IRUGO,
+		adt7316_show_manufactorer_id, NULL, 0);
+
+static ssize_t adt7316_show_device_rev(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 rev;
+	int ret;
+
+	ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_REV, &rev);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%d\n", rev);
+}
+
+static IIO_DEVICE_ATTR(device_rev, S_IRUGO, adt7316_show_device_rev, NULL, 0);
+
+static ssize_t adt7316_show_bus_type(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 stat;
+	int ret;
+
+	ret = chip->bus.read(chip->bus.client, ADT7316_SPI_LOCK_STAT, &stat);
+	if (ret)
+		return -EIO;
+
+	if (stat)
+		return sprintf(buf, "spi\n");
+	else
+		return sprintf(buf, "i2c\n");
+}
+
+static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0);
+
+static ssize_t adt7316_show_name(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%s\n", chip->name);
+}
+
+static IIO_DEVICE_ATTR(name, S_IRUGO, adt7316_show_name, NULL, 0);
+
+static struct attribute *adt7316_attributes[] = {
+	&iio_dev_attr_all_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_dev_attr_enabled.dev_attr.attr,
+	&iio_dev_attr_ad_channel.dev_attr.attr,
+	&iio_dev_attr_all_ad_channels.dev_attr.attr,
+	&iio_dev_attr_disable_averaging.dev_attr.attr,
+	&iio_dev_attr_enable_smbus_timeout.dev_attr.attr,
+	&iio_dev_attr_powerdown.dev_attr.attr,
+	&iio_dev_attr_fast_ad_clock.dev_attr.attr,
+	&iio_dev_attr_da_high_resolution.dev_attr.attr,
+	&iio_dev_attr_enable_proportion_DACA.dev_attr.attr,
+	&iio_dev_attr_enable_proportion_DACB.dev_attr.attr,
+	&iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr,
+	&iio_dev_attr_DAC_update_mode.dev_attr.attr,
+	&iio_dev_attr_all_DAC_update_modes.dev_attr.attr,
+	&iio_dev_attr_update_DAC.dev_attr.attr,
+	&iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr,
+	&iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr,
+	&iio_dev_attr_DAC_internal_Vref.dev_attr.attr,
+	&iio_dev_attr_VDD.dev_attr.attr,
+	&iio_dev_attr_in_temp.dev_attr.attr,
+	&iio_dev_attr_ex_temp.dev_attr.attr,
+	&iio_dev_attr_in_temp_offset.dev_attr.attr,
+	&iio_dev_attr_ex_temp_offset.dev_attr.attr,
+	&iio_dev_attr_in_analog_temp_offset.dev_attr.attr,
+	&iio_dev_attr_ex_analog_temp_offset.dev_attr.attr,
+	&iio_dev_attr_DAC_A.dev_attr.attr,
+	&iio_dev_attr_DAC_B.dev_attr.attr,
+	&iio_dev_attr_DAC_C.dev_attr.attr,
+	&iio_dev_attr_DAC_D.dev_attr.attr,
+	&iio_dev_attr_device_id.dev_attr.attr,
+	&iio_dev_attr_manufactorer_id.dev_attr.attr,
+	&iio_dev_attr_device_rev.dev_attr.attr,
+	&iio_dev_attr_bus_type.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group adt7316_attribute_group = {
+	.attrs = adt7316_attributes,
+};
+
+static struct attribute *adt7516_attributes[] = {
+	&iio_dev_attr_all_modes.dev_attr.attr,
+	&iio_dev_attr_mode.dev_attr.attr,
+	&iio_dev_attr_select_ex_temp.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_dev_attr_enabled.dev_attr.attr,
+	&iio_dev_attr_ad_channel.dev_attr.attr,
+	&iio_dev_attr_all_ad_channels.dev_attr.attr,
+	&iio_dev_attr_disable_averaging.dev_attr.attr,
+	&iio_dev_attr_enable_smbus_timeout.dev_attr.attr,
+	&iio_dev_attr_powerdown.dev_attr.attr,
+	&iio_dev_attr_fast_ad_clock.dev_attr.attr,
+	&iio_dev_attr_AIN_internal_Vref.dev_attr.attr,
+	&iio_dev_attr_da_high_resolution.dev_attr.attr,
+	&iio_dev_attr_enable_proportion_DACA.dev_attr.attr,
+	&iio_dev_attr_enable_proportion_DACB.dev_attr.attr,
+	&iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr,
+	&iio_dev_attr_DAC_update_mode.dev_attr.attr,
+	&iio_dev_attr_all_DAC_update_modes.dev_attr.attr,
+	&iio_dev_attr_update_DAC.dev_attr.attr,
+	&iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr,
+	&iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr,
+	&iio_dev_attr_DAC_internal_Vref.dev_attr.attr,
+	&iio_dev_attr_VDD.dev_attr.attr,
+	&iio_dev_attr_in_temp.dev_attr.attr,
+	&iio_dev_attr_ex_temp_AIN1.dev_attr.attr,
+	&iio_dev_attr_AIN2.dev_attr.attr,
+	&iio_dev_attr_AIN3.dev_attr.attr,
+	&iio_dev_attr_AIN4.dev_attr.attr,
+	&iio_dev_attr_in_temp_offset.dev_attr.attr,
+	&iio_dev_attr_ex_temp_offset.dev_attr.attr,
+	&iio_dev_attr_in_analog_temp_offset.dev_attr.attr,
+	&iio_dev_attr_ex_analog_temp_offset.dev_attr.attr,
+	&iio_dev_attr_DAC_A.dev_attr.attr,
+	&iio_dev_attr_DAC_B.dev_attr.attr,
+	&iio_dev_attr_DAC_C.dev_attr.attr,
+	&iio_dev_attr_DAC_D.dev_attr.attr,
+	&iio_dev_attr_device_id.dev_attr.attr,
+	&iio_dev_attr_manufactorer_id.dev_attr.attr,
+	&iio_dev_attr_device_rev.dev_attr.attr,
+	&iio_dev_attr_bus_type.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group adt7516_attribute_group = {
+	.attrs = adt7516_attributes,
+};
+
+
+/*
+ * temperature bound events
+ */
+
+#define IIO_EVENT_CODE_ADT7316_IN_TEMP_HIGH   IIO_BUFFER_EVENT_CODE(0)
+#define IIO_EVENT_CODE_ADT7316_IN_TEMP_LOW    IIO_BUFFER_EVENT_CODE(1)
+#define IIO_EVENT_CODE_ADT7316_EX_TEMP_HIGH   IIO_BUFFER_EVENT_CODE(2)
+#define IIO_EVENT_CODE_ADT7316_EX_TEMP_LOW    IIO_BUFFER_EVENT_CODE(3)
+#define IIO_EVENT_CODE_ADT7316_EX_TEMP_FAULT  IIO_BUFFER_EVENT_CODE(4)
+#define IIO_EVENT_CODE_ADT7516_AIN1           IIO_BUFFER_EVENT_CODE(5)
+#define IIO_EVENT_CODE_ADT7516_AIN2           IIO_BUFFER_EVENT_CODE(6)
+#define IIO_EVENT_CODE_ADT7516_AIN3           IIO_BUFFER_EVENT_CODE(7)
+#define IIO_EVENT_CODE_ADT7516_AIN4           IIO_BUFFER_EVENT_CODE(8)
+#define IIO_EVENT_CODE_ADT7316_VDD            IIO_BUFFER_EVENT_CODE(9)
+
+static void adt7316_interrupt_bh(struct work_struct *work_s)
+{
+	struct adt7316_chip_info *chip =
+		container_of(work_s, struct adt7316_chip_info, thresh_work);
+	u8 stat1, stat2;
+	int i, ret, count;
+
+	ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT1, &stat1);
+	if (!ret) {
+		if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+			count = 8;
+		else
+			count = 5;
+
+		for (i = 0; i < count; i++) {
+			if (stat1 & (1 << i))
+				iio_push_event(chip->indio_dev, 0,
+					IIO_EVENT_CODE_ADT7316_IN_TEMP_HIGH + i,
+					chip->last_timestamp);
+		}
+	}
+
+	ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT2, &stat2);
+	if (!ret) {
+		if (stat2 & ADT7316_INT_MASK2_VDD)
+			iio_push_event(chip->indio_dev, 0,
+				IIO_EVENT_CODE_ADT7316_VDD,
+				chip->last_timestamp);
+	}
+
+	enable_irq(chip->bus.irq);
+}
+
+static int adt7316_interrupt(struct iio_dev *dev_info,
+		int index,
+		s64 timestamp,
+		int no_test)
+{
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	chip->last_timestamp = timestamp;
+	schedule_work(&chip->thresh_work);
+
+	return 0;
+}
+
+IIO_EVENT_SH(adt7316, &adt7316_interrupt);
+
+/*
+ * Show mask of enabled interrupts in Hex.
+ */
+static ssize_t adt7316_show_int_mask(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "0x%x\n", chip->int_mask);
+}
+
+/*
+ * Set 1 to the mask in Hex to enabled interrupts.
+ */
+static ssize_t adt7316_set_int_mask(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	unsigned long data;
+	int ret;
+	u8 mask;
+
+	ret = strict_strtoul(buf, 16, &data);
+	if (ret || data >= ADT7316_VDD_INT_MASK + 1)
+		return -EINVAL;
+
+	if (data & ADT7316_VDD_INT_MASK)
+		mask = 0;			/* enable vdd int */
+	else
+		mask = ADT7316_INT_MASK2_VDD;	/* disable vdd int */
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK2, mask);
+	if (!ret) {
+		chip->int_mask &= ~ADT7316_VDD_INT_MASK;
+		chip->int_mask |= data & ADT7316_VDD_INT_MASK;
+	}
+
+	if (data & ADT7316_TEMP_AIN_INT_MASK) {
+		if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX)
+			/* mask in reg is opposite, set 1 to disable */
+			mask = (~data) & ADT7316_TEMP_INT_MASK;
+		else
+			/* mask in reg is opposite, set 1 to disable */
+			mask = (~data) & ADT7316_TEMP_AIN_INT_MASK;
+	}
+	ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK1, mask);
+
+	chip->int_mask = mask;
+
+	return len;
+}
+static inline ssize_t adt7316_show_ad_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 val;
+	int data;
+	int ret;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX &&
+		bound_reg > ADT7316_EX_TEMP_LOW)
+		return -EPERM;
+
+	ret = chip->bus.read(chip->bus.client, bound_reg, &val);
+	if (ret)
+		return -EIO;
+
+	data = (int)val;
+
+	if (!((chip->id & ID_FAMILY_MASK) == ID_ADT75XX &&
+		(chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)) {
+		if (data & 0x80)
+			data -= 256;
+	}
+
+	return sprintf(buf, "%d\n", data);
+}
+
+static inline ssize_t adt7316_set_ad_bound(struct device *dev,
+		struct device_attribute *attr,
+		u8 bound_reg,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	long data;
+	u8 val;
+	int ret;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX &&
+		bound_reg > ADT7316_EX_TEMP_LOW)
+		return -EPERM;
+
+	ret = strict_strtol(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX &&
+		(chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) {
+		if (data > 255 || data < 0)
+			return -EINVAL;
+	} else {
+		if (data > 127 || data < -128)
+			return -EINVAL;
+
+		if (data < 0)
+			data += 256;
+	}
+
+	val = (u8)data;
+
+	ret = chip->bus.write(chip->bus.client, bound_reg, val);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t adt7316_show_in_temp_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7316_IN_TEMP_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_in_temp_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7316_IN_TEMP_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_in_temp_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7316_IN_TEMP_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_in_temp_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7316_IN_TEMP_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ex_temp_ain1_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7316_EX_TEMP_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ex_temp_ain1_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7316_EX_TEMP_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ex_temp_ain1_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7316_EX_TEMP_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ex_temp_ain1_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7316_EX_TEMP_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ain2_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7516_AIN2_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ain2_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7516_AIN2_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ain2_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7516_AIN2_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ain2_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7516_AIN2_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ain3_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7516_AIN3_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ain3_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7516_AIN3_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ain3_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7516_AIN3_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ain3_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7516_AIN3_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_ain4_high(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7516_AIN4_HIGH, buf);
+}
+
+static inline ssize_t adt7316_set_ain4_high(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7516_AIN4_HIGH, buf, len);
+}
+
+static ssize_t adt7316_show_ain4_low(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return adt7316_show_ad_bound(dev, attr,
+			ADT7516_AIN4_LOW, buf);
+}
+
+static inline ssize_t adt7316_set_ain4_low(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	return adt7316_set_ad_bound(dev, attr,
+			ADT7516_AIN4_LOW, buf, len);
+}
+
+static ssize_t adt7316_show_int_enabled(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_INT_EN));
+}
+
+static ssize_t adt7316_set_int_enabled(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	u8 config1;
+	int ret;
+
+	config1 = chip->config1 & (~ADT7316_INT_EN);
+	if (!memcmp(buf, "1", 1))
+		config1 |= ADT7316_INT_EN;
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1);
+	if (ret)
+		return -EIO;
+
+	chip->config1 = config1;
+
+	return len;
+}
+
+
+IIO_EVENT_ATTR_SH(int_mask, iio_event_adt7316,
+		adt7316_show_int_mask, adt7316_set_int_mask, 0);
+IIO_EVENT_ATTR_SH(in_temp_high, iio_event_adt7316,
+		adt7316_show_in_temp_high, adt7316_set_in_temp_high, 0);
+IIO_EVENT_ATTR_SH(in_temp_low, iio_event_adt7316,
+		adt7316_show_in_temp_low, adt7316_set_in_temp_low, 0);
+IIO_EVENT_ATTR_SH(ex_temp_high, iio_event_adt7316,
+		adt7316_show_ex_temp_ain1_high,
+		adt7316_set_ex_temp_ain1_high, 0);
+IIO_EVENT_ATTR_SH(ex_temp_low, iio_event_adt7316,
+		adt7316_show_ex_temp_ain1_low,
+		adt7316_set_ex_temp_ain1_low, 0);
+IIO_EVENT_ATTR_SH(ex_temp_ain1_high, iio_event_adt7316,
+		adt7316_show_ex_temp_ain1_high,
+		adt7316_set_ex_temp_ain1_high, 0);
+IIO_EVENT_ATTR_SH(ex_temp_ain1_low, iio_event_adt7316,
+		adt7316_show_ex_temp_ain1_low,
+		adt7316_set_ex_temp_ain1_low, 0);
+IIO_EVENT_ATTR_SH(ain2_high, iio_event_adt7316,
+		adt7316_show_ain2_high, adt7316_set_ain2_high, 0);
+IIO_EVENT_ATTR_SH(ain2_low, iio_event_adt7316,
+		adt7316_show_ain2_low, adt7316_set_ain2_low, 0);
+IIO_EVENT_ATTR_SH(ain3_high, iio_event_adt7316,
+		adt7316_show_ain3_high, adt7316_set_ain3_high, 0);
+IIO_EVENT_ATTR_SH(ain3_low, iio_event_adt7316,
+		adt7316_show_ain3_low, adt7316_set_ain3_low, 0);
+IIO_EVENT_ATTR_SH(ain4_high, iio_event_adt7316,
+		adt7316_show_ain4_high, adt7316_set_ain4_high, 0);
+IIO_EVENT_ATTR_SH(ain4_low, iio_event_adt7316,
+		adt7316_show_ain4_low, adt7316_set_ain4_low, 0);
+IIO_EVENT_ATTR_SH(int_enabled, iio_event_adt7316,
+		adt7316_show_int_enabled, adt7316_set_int_enabled, 0);
+
+static struct attribute *adt7316_event_attributes[] = {
+	&iio_event_attr_int_mask.dev_attr.attr,
+	&iio_event_attr_in_temp_high.dev_attr.attr,
+	&iio_event_attr_in_temp_low.dev_attr.attr,
+	&iio_event_attr_ex_temp_high.dev_attr.attr,
+	&iio_event_attr_ex_temp_low.dev_attr.attr,
+	&iio_event_attr_int_enabled.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adt7316_event_attribute_group = {
+	.attrs = adt7316_event_attributes,
+};
+
+static struct attribute *adt7516_event_attributes[] = {
+	&iio_event_attr_int_mask.dev_attr.attr,
+	&iio_event_attr_in_temp_high.dev_attr.attr,
+	&iio_event_attr_in_temp_low.dev_attr.attr,
+	&iio_event_attr_ex_temp_ain1_high.dev_attr.attr,
+	&iio_event_attr_ex_temp_ain1_low.dev_attr.attr,
+	&iio_event_attr_ain2_high.dev_attr.attr,
+	&iio_event_attr_ain2_low.dev_attr.attr,
+	&iio_event_attr_ain3_high.dev_attr.attr,
+	&iio_event_attr_ain3_low.dev_attr.attr,
+	&iio_event_attr_ain4_high.dev_attr.attr,
+	&iio_event_attr_ain4_low.dev_attr.attr,
+	&iio_event_attr_int_enabled.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group adt7516_event_attribute_group = {
+	.attrs = adt7516_event_attributes,
+};
+
+#ifdef CONFIG_PM
+int adt7316_disable(struct device *dev)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return _adt7316_store_enabled(chip, 0);
+}
+EXPORT_SYMBOL(adt7316_disable);
+
+int adt7316_enable(struct device *dev)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+
+	return _adt7316_store_enabled(chip, 1);
+}
+EXPORT_SYMBOL(adt7316_enable);
+#endif
+
+/*
+ * device probe and remove
+ */
+int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
+		const char *name)
+{
+	struct adt7316_chip_info *chip;
+	unsigned short *adt7316_platform_data = dev->platform_data;
+	int ret = 0;
+
+	chip = kzalloc(sizeof(struct adt7316_chip_info), GFP_KERNEL);
+
+	if (chip == NULL)
+		return -ENOMEM;
+
+	/* this is only used for device removal purposes */
+	dev_set_drvdata(dev, chip);
+
+	chip->bus = *bus;
+	chip->name = name;
+
+	if (name[4] == '3')
+		chip->id = ID_ADT7316 + (name[6] - '6');
+	else if (name[4] == '5')
+		chip->id = ID_ADT7516 + (name[6] - '6');
+	else
+		return -ENODEV;
+
+	chip->ldac_pin = adt7316_platform_data[1];
+	if (chip->ldac_pin) {
+		chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDCA;
+		if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+			chip->config1 |= ADT7516_SEL_AIN3;
+	}
+	chip->int_mask = ADT7316_TEMP_INT_MASK | ADT7316_VDD_INT_MASK;
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
+		chip->int_mask |= ADT7516_AIN_INT_MASK;
+
+	chip->indio_dev = iio_allocate_device();
+	if (chip->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_chip;
+	}
+
+	chip->indio_dev->dev.parent = dev;
+	if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) {
+		chip->indio_dev->attrs = &adt7516_attribute_group;
+		chip->indio_dev->event_attrs = &adt7516_event_attribute_group;
+	} else {
+		chip->indio_dev->attrs = &adt7316_attribute_group;
+		chip->indio_dev->event_attrs = &adt7316_event_attribute_group;
+	}
+	chip->indio_dev->dev_data = (void *)chip;
+	chip->indio_dev->driver_module = THIS_MODULE;
+	chip->indio_dev->num_interrupt_lines = 1;
+	chip->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(chip->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	if (chip->bus.irq > 0) {
+		if (adt7316_platform_data[0])
+			chip->bus.irq_flags = adt7316_platform_data[0];
+
+		ret = iio_register_interrupt_line(chip->bus.irq,
+				chip->indio_dev,
+				0,
+				chip->bus.irq_flags,
+				chip->name);
+		if (ret)
+			goto error_unreg_dev;
+
+		/*
+		 * The event handler list element refer to iio_event_adt7316.
+		 * All event attributes bind to the same event handler.
+		 * So, only register event handler once.
+		 */
+		iio_add_event_to_list(&iio_event_adt7316,
+				&chip->indio_dev->interrupts[0]->ev_list);
+
+		INIT_WORK(&chip->thresh_work, adt7316_interrupt_bh);
+
+		if (chip->bus.irq_flags & IRQF_TRIGGER_HIGH)
+			chip->config1 |= ADT7316_INT_POLARITY;
+	}
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, chip->config1);
+	if (ret) {
+		ret = -EIO;
+		goto error_unreg_irq;
+	}
+
+	ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, chip->config3);
+	if (ret) {
+		ret = -EIO;
+		goto error_unreg_irq;
+	}
+
+	dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n",
+			chip->name);
+
+	return 0;
+
+error_unreg_irq:
+	iio_unregister_interrupt_line(chip->indio_dev, 0);
+error_unreg_dev:
+	iio_device_unregister(chip->indio_dev);
+error_free_dev:
+	iio_free_device(chip->indio_dev);
+error_free_chip:
+	kfree(chip);
+
+	return ret;
+}
+EXPORT_SYMBOL(adt7316_probe);
+
+int __devexit adt7316_remove(struct device *dev)
+{
+
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct adt7316_chip_info *chip = dev_info->dev_data;
+	struct iio_dev *indio_dev = chip->indio_dev;
+
+	dev_set_drvdata(dev, NULL);
+	if (chip->bus.irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+	iio_device_unregister(indio_dev);
+	iio_free_device(chip->indio_dev);
+	kfree(chip);
+
+	return 0;
+}
+EXPORT_SYMBOL(adt7316_remove);
+
+MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADT7316/7/8 and ADT7516/7/9 digital"
+			" temperature sensor, ADC and DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/addac/adt7316.h b/drivers/staging/iio/addac/adt7316.h
new file mode 100644
index 0000000..d34bd67
--- /dev/null
+++ b/drivers/staging/iio/addac/adt7316.h
@@ -0,0 +1,33 @@
+/*
+ * ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _ADT7316_H_
+#define _ADT7316_H_
+
+#include <linux/types.h>
+
+#define ADT7316_REG_MAX_ADDR		0x3F
+
+struct adt7316_bus {
+	void *client;
+	int irq;
+	int irq_flags;
+	int (*read) (void *client, u8 reg, u8 *data);
+	int (*write) (void *client, u8 reg, u8 val);
+	int (*multi_read) (void *client, u8 first_reg, u8 count, u8 *data);
+	int (*multi_write) (void *client, u8 first_reg, u8 count, u8 *data);
+};
+
+#ifdef CONFIG_PM
+int adt7316_disable(struct device *dev);
+int adt7316_enable(struct device *dev);
+#endif
+int adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name);
+int adt7316_remove(struct device *dev);
+
+#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
new file mode 100644
index 0000000..9191bd2
--- /dev/null
+++ b/drivers/staging/iio/dac/Kconfig
@@ -0,0 +1,21 @@
+#
+# DAC drivers
+#
+comment "Digital to analog convertors"
+
+config AD5624R_SPI
+	tristate "Analog Devices AD5624/44/64R DAC spi driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices AD5624R, AD5644R and
+	  AD5664R convertors (DAC). This driver uses the common SPI interface.
+
+config AD5446
+	tristate "Analog Devices AD5444/6, AD5620/40/60 and AD5541A/12A DAC SPI driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices AD5444, AD5446,
+	  AD5620, AD5640, AD5660 and AD5541A, AD5512A DACs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad5446.
diff --git a/drivers/staging/iio/dac/Makefile b/drivers/staging/iio/dac/Makefile
new file mode 100644
index 0000000..7cf331b
--- /dev/null
+++ b/drivers/staging/iio/dac/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O DAC drivers
+#
+
+obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
+obj-$(CONFIG_AD5446) += ad5446.o
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
new file mode 100644
index 0000000..e3387cd
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -0,0 +1,323 @@
+/*
+ * AD5446 SPI DAC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#include "ad5446.h"
+
+static void ad5446_store_sample(struct ad5446_state *st, unsigned val)
+{
+	st->data.d16 = cpu_to_be16(AD5446_LOAD |
+					(val << st->chip_info->left_shift));
+}
+
+static void ad5542_store_sample(struct ad5446_state *st, unsigned val)
+{
+	st->data.d16 = cpu_to_be16(val << st->chip_info->left_shift);
+}
+
+static void ad5620_store_sample(struct ad5446_state *st, unsigned val)
+{
+	st->data.d16 = cpu_to_be16(AD5620_LOAD |
+					(val << st->chip_info->left_shift));
+}
+
+static void ad5660_store_sample(struct ad5446_state *st, unsigned val)
+{
+	val |= AD5660_LOAD;
+	st->data.d24[0] = (val >> 16) & 0xFF;
+	st->data.d24[1] = (val >> 8) & 0xFF;
+	st->data.d24[2] = val & 0xFF;
+}
+
+static ssize_t ad5446_write(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad5446_state *st = dev_info->dev_data;
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+
+	if (val > RES_MASK(st->chip_info->bits)) {
+		ret = -EINVAL;
+		goto error_ret;
+	}
+
+	mutex_lock(&dev_info->mlock);
+	st->chip_info->store_sample(st, val);
+	ret = spi_sync(st->spi, &st->msg);
+	mutex_unlock(&dev_info->mlock);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_OUT_RAW(0, ad5446_write, 0);
+
+static ssize_t ad5446_show_scale(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad5446_state *st = iio_dev_get_devdata(dev_info);
+	/* Corresponds to Vref / 2^(bits) */
+	unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
+
+	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+}
+static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
+
+static ssize_t ad5446_show_name(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad5446_state *st = iio_dev_get_devdata(dev_info);
+
+	return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
+}
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad5446_show_name, NULL, 0);
+
+static struct attribute *ad5446_attributes[] = {
+	&iio_dev_attr_out0_raw.dev_attr.attr,
+	&iio_dev_attr_out_scale.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad5446_attribute_group = {
+	.attrs = ad5446_attributes,
+};
+
+static const struct ad5446_chip_info ad5446_chip_info_tbl[] = {
+	[ID_AD5444] = {
+		.bits = 12,
+		.storagebits = 16,
+		.left_shift = 2,
+		.store_sample = ad5446_store_sample,
+	},
+	[ID_AD5446] = {
+		.bits = 14,
+		.storagebits = 16,
+		.left_shift = 0,
+		.store_sample = ad5446_store_sample,
+	},
+	[ID_AD5542A] = {
+		.bits = 16,
+		.storagebits = 16,
+		.left_shift = 0,
+		.store_sample = ad5542_store_sample,
+	},
+	[ID_AD5512A] = {
+		.bits = 12,
+		.storagebits = 16,
+		.left_shift = 4,
+		.store_sample = ad5542_store_sample,
+	},
+	[ID_AD5620_2500] = {
+		.bits = 12,
+		.storagebits = 16,
+		.left_shift = 2,
+		.int_vref_mv = 2500,
+		.store_sample = ad5620_store_sample,
+	},
+	[ID_AD5620_1250] = {
+		.bits = 12,
+		.storagebits = 16,
+		.left_shift = 2,
+		.int_vref_mv = 1250,
+		.store_sample = ad5620_store_sample,
+	},
+	[ID_AD5640_2500] = {
+		.bits = 14,
+		.storagebits = 16,
+		.left_shift = 0,
+		.int_vref_mv = 2500,
+		.store_sample = ad5620_store_sample,
+	},
+	[ID_AD5640_1250] = {
+		.bits = 14,
+		.storagebits = 16,
+		.left_shift = 0,
+		.int_vref_mv = 1250,
+		.store_sample = ad5620_store_sample,
+	},
+	[ID_AD5660_2500] = {
+		.bits = 16,
+		.storagebits = 24,
+		.left_shift = 0,
+		.int_vref_mv = 2500,
+		.store_sample = ad5660_store_sample,
+	},
+	[ID_AD5660_1250] = {
+		.bits = 16,
+		.storagebits = 24,
+		.left_shift = 0,
+		.int_vref_mv = 1250,
+		.store_sample = ad5660_store_sample,
+	},
+};
+
+static int __devinit ad5446_probe(struct spi_device *spi)
+{
+	struct ad5446_state *st;
+	int ret, voltage_uv = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	st->reg = regulator_get(&spi->dev, "vcc");
+	if (!IS_ERR(st->reg)) {
+		ret = regulator_enable(st->reg);
+		if (ret)
+			goto error_put_reg;
+
+		voltage_uv = regulator_get_voltage(st->reg);
+	}
+
+	st->chip_info =
+		&ad5446_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+	spi_set_drvdata(spi, st);
+
+	st->spi = spi;
+
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_disable_reg;
+	}
+
+	/* Estabilish that the iio_dev is a child of the spi device */
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->attrs = &ad5446_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	/* Setup default message */
+
+	st->xfer.tx_buf = &st->data;
+	st->xfer.len = st->chip_info->storagebits / 8;
+
+	spi_message_init(&st->msg);
+	spi_message_add_tail(&st->xfer, &st->msg);
+
+	switch (spi_get_device_id(spi)->driver_data) {
+		case ID_AD5620_2500:
+		case ID_AD5620_1250:
+		case ID_AD5640_2500:
+		case ID_AD5640_1250:
+		case ID_AD5660_2500:
+		case ID_AD5660_1250:
+			st->vref_mv = st->chip_info->int_vref_mv;
+			break;
+		default:
+			if (voltage_uv)
+				st->vref_mv = voltage_uv / 1000;
+			else
+				dev_warn(&spi->dev,
+					 "reference voltage unspecified\n");
+	}
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_free_device;
+
+	return 0;
+
+error_free_device:
+	iio_free_device(st->indio_dev);
+error_disable_reg:
+	if (!IS_ERR(st->reg))
+		regulator_disable(st->reg);
+error_put_reg:
+	if (!IS_ERR(st->reg))
+		regulator_put(st->reg);
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int ad5446_remove(struct spi_device *spi)
+{
+	struct ad5446_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	iio_device_unregister(indio_dev);
+	if (!IS_ERR(st->reg)) {
+		regulator_disable(st->reg);
+		regulator_put(st->reg);
+	}
+	kfree(st);
+	return 0;
+}
+
+static const struct spi_device_id ad5446_id[] = {
+	{"ad5444", ID_AD5444},
+	{"ad5446", ID_AD5446},
+	{"ad5542a", ID_AD5542A},
+	{"ad5512a", ID_AD5512A},
+	{"ad5620-2500", ID_AD5620_2500}, /* AD5620/40/60: */
+	{"ad5620-1250", ID_AD5620_1250}, /* part numbers may look differently */
+	{"ad5640-2500", ID_AD5640_2500},
+	{"ad5640-1250", ID_AD5640_1250},
+	{"ad5660-2500", ID_AD5660_2500},
+	{"ad5660-1250", ID_AD5660_1250},
+	{}
+};
+
+static struct spi_driver ad5446_driver = {
+	.driver = {
+		.name	= "ad5446",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ad5446_probe,
+	.remove		= __devexit_p(ad5446_remove),
+	.id_table	= ad5446_id,
+};
+
+static int __init ad5446_init(void)
+{
+	return spi_register_driver(&ad5446_driver);
+}
+module_init(ad5446_init);
+
+static void __exit ad5446_exit(void)
+{
+	spi_unregister_driver(&ad5446_driver);
+}
+module_exit(ad5446_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad5446");
diff --git a/drivers/staging/iio/dac/ad5446.h b/drivers/staging/iio/dac/ad5446.h
new file mode 100644
index 0000000..902542e
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5446.h
@@ -0,0 +1,96 @@
+/*
+ * AD5446 SPI DAC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_DAC_AD5446_H_
+#define IIO_DAC_AD5446_H_
+
+/* DAC Control Bits */
+
+#define AD5446_LOAD		(0x0 << 14) /* Load and update */
+#define AD5446_SDO_DIS		(0x1 << 14) /* Disable SDO */
+#define AD5446_NOP		(0x2 << 14) /* No operation */
+#define AD5446_CLK_RISING	(0x3 << 14) /* Clock data on rising edge */
+
+#define AD5620_LOAD		(0x0 << 14) /* Load and update Norm Operation*/
+#define AD5620_PWRDWN_1k	(0x1 << 14) /* Power-down: 1kOhm to GND */
+#define AD5620_PWRDWN_100k	(0x2 << 14) /* Power-down: 100kOhm to GND */
+#define AD5620_PWRDWN_TRISTATE	(0x3 << 14) /* Power-down: Three-state */
+
+#define AD5660_LOAD		(0x0 << 16) /* Load and update Norm Operation*/
+#define AD5660_PWRDWN_1k	(0x1 << 16) /* Power-down: 1kOhm to GND */
+#define AD5660_PWRDWN_100k	(0x2 << 16) /* Power-down: 100kOhm to GND */
+#define AD5660_PWRDWN_TRISTATE	(0x3 << 16) /* Power-down: Three-state */
+
+#define RES_MASK(bits)	((1 << (bits)) - 1)
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @indio_dev:		the industrial I/O device
+ * @spi:		spi_device
+ * @chip_info:		chip model specific constants, available modes etc
+ * @reg:		supply regulator
+ * @poll_work:		bottom half of polling interrupt handler
+ * @vref_mv:		actual reference voltage used
+ * @xfer:		default spi transfer
+ * @msg:		default spi message
+ * @data:		spi transmit buffer
+ */
+
+struct ad5446_state {
+	struct iio_dev			*indio_dev;
+	struct spi_device		*spi;
+	const struct ad5446_chip_info	*chip_info;
+	struct regulator		*reg;
+	struct work_struct		poll_work;
+	unsigned short			vref_mv;
+	struct spi_transfer		xfer;
+	struct spi_message		msg;
+	union {
+		unsigned short		d16;
+		unsigned char		d24[3];
+	} data;
+};
+
+/**
+ * struct ad5446_chip_info - chip specific information
+ * @bits:		accuracy of the DAC in bits
+ * @storagebits:	number of bits written to the DAC
+ * @left_shift:		number of bits the datum must be shifted
+ * @int_vref_mv:	AD5620/40/60: the internal reference voltage
+ * @store_sample:	chip specific helper function to store the datum
+ */
+
+struct ad5446_chip_info {
+	u8				bits;
+	u8				storagebits;
+	u8				left_shift;
+	u16				int_vref_mv;
+	void (*store_sample)		(struct ad5446_state *st, unsigned val);
+};
+
+/**
+ * ad5446_supported_device_ids:
+ * The AD5620/40/60 parts are available in different fixed internal reference
+ * voltage options. The actual part numbers may look differently
+ * (and a bit cryptic), however this style is used to make clear which
+ * parts are supported here.
+ */
+
+enum ad5446_supported_device_ids {
+	ID_AD5444,
+	ID_AD5446,
+	ID_AD5542A,
+	ID_AD5512A,
+	ID_AD5620_2500,
+	ID_AD5620_1250,
+	ID_AD5640_2500,
+	ID_AD5640_1250,
+	ID_AD5660_2500,
+	ID_AD5660_1250,
+};
+
+#endif /* IIO_DAC_AD5446_H_ */
diff --git a/drivers/staging/iio/dac/ad5624r.h b/drivers/staging/iio/dac/ad5624r.h
new file mode 100644
index 0000000..ce518be
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5624r.h
@@ -0,0 +1,21 @@
+#ifndef SPI_AD5624R_H_
+#define SPI_AD5624R_H_
+
+#define AD5624R_DAC_CHANNELS	4
+
+#define AD5624R_ADDR_DAC0	0x0
+#define AD5624R_ADDR_DAC1	0x1
+#define AD5624R_ADDR_DAC2	0x2
+#define AD5624R_ADDR_DAC3	0x3
+#define AD5624R_ADDR_ALL_DAC	0x7
+
+#define AD5624R_CMD_WRITE_INPUT_N             0x0
+#define AD5624R_CMD_UPDATE_DAC_N              0x1
+#define AD5624R_CMD_WRITE_INPUT_N_UPDATE_ALL  0x2
+#define AD5624R_CMD_WRITE_INPUT_N_UPDATE_N    0x3
+#define AD5624R_CMD_POWERDOWN_DAC             0x4
+#define AD5624R_CMD_RESET                     0x5
+#define AD5624R_CMD_LDAC_SETUP                0x6
+#define AD5624R_CMD_INTERNAL_REFER_SETUP      0x7
+
+#endif
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
new file mode 100644
index 0000000..2b1c6dd
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -0,0 +1,300 @@
+/*
+ * AD5624R, AD5644R, AD5664R Digital to analog convertors spi driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+#include "ad5624r.h"
+
+/**
+ * struct ad5624r_state - device related storage
+ * @indio_dev:	associated industrial IO device
+ * @us:		spi device
+ **/
+struct ad5624r_state {
+	struct iio_dev *indio_dev;
+	struct spi_device *us;
+	int data_len;
+	int ldac_mode;
+	int dac_power_mode[AD5624R_DAC_CHANNELS];
+	int internal_ref;
+};
+
+static int ad5624r_spi_write(struct spi_device *spi,
+			     u8 cmd, u8 addr, u16 val, u8 len)
+{
+	u32 data;
+	u8 msg[3];
+
+	/*
+	 * The input shift register is 24 bits wide. The first two bits are don't care bits.
+	 * The next three are the command bits, C2 to C0, followed by the 3-bit DAC address,
+	 * A2 to A0, and then the 16-, 14-, 12-bit data-word. The data-word comprises the 16-,
+	 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, for the AD5664R,
+	 * AD5644R, and AD5624R, respectively.
+	 */
+	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+	msg[0] = data >> 16;
+	msg[1] = data >> 8;
+	msg[2] = data;
+
+	return spi_write(spi, msg, 3);
+}
+
+static ssize_t ad5624r_write_dac(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t len)
+{
+	long readin;
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad5624r_state *st = indio_dev->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = strict_strtol(buf, 10, &readin);
+	if (ret)
+		return ret;
+
+	ret = ad5624r_spi_write(st->us, AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
+				this_attr->address, readin, st->data_len);
+	return ret ? ret : len;
+}
+
+static ssize_t ad5624r_read_ldac_mode(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad5624r_state *st = indio_dev->dev_data;
+
+	return sprintf(buf, "%x\n", st->ldac_mode);
+}
+
+static ssize_t ad5624r_write_ldac_mode(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t len)
+{
+	long readin;
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad5624r_state *st = indio_dev->dev_data;
+
+	ret = strict_strtol(buf, 16, &readin);
+	if (ret)
+		return ret;
+
+	ret = ad5624r_spi_write(st->us, AD5624R_CMD_LDAC_SETUP, 0,
+				readin & 0xF, 16);
+	st->ldac_mode = readin & 0xF;
+
+	return ret ? ret : len;
+}
+
+static ssize_t ad5624r_read_dac_power_mode(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad5624r_state *st = indio_dev->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	return sprintf(buf, "%d\n", st->dac_power_mode[this_attr->address]);
+}
+
+static ssize_t ad5624r_write_dac_power_mode(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t len)
+{
+	long readin;
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad5624r_state *st = indio_dev->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = strict_strtol(buf, 10, &readin);
+	if (ret)
+		return ret;
+
+	ret = ad5624r_spi_write(st->us, AD5624R_CMD_POWERDOWN_DAC, 0,
+				((readin & 0x3) << 4) |
+				(1 << this_attr->address), 16);
+
+	st->dac_power_mode[this_attr->address] = readin & 0x3;
+
+	return ret ? ret : len;
+}
+
+static ssize_t ad5624r_read_internal_ref_mode(struct device *dev,
+					      struct device_attribute *attr,
+					      char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad5624r_state *st = indio_dev->dev_data;
+
+	return sprintf(buf, "%d\n", st->internal_ref);
+}
+
+static ssize_t ad5624r_write_internal_ref_mode(struct device *dev,
+					       struct device_attribute *attr,
+					       const char *buf, size_t len)
+{
+	long readin;
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad5624r_state *st = indio_dev->dev_data;
+
+	ret = strict_strtol(buf, 10, &readin);
+	if (ret)
+		return ret;
+
+	ret = ad5624r_spi_write(st->us, AD5624R_CMD_INTERNAL_REFER_SETUP, 0,
+				!!readin, 16);
+
+	st->internal_ref = !!readin;
+
+	return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_OUT_RAW(0, ad5624r_write_dac, AD5624R_ADDR_DAC0);
+static IIO_DEV_ATTR_OUT_RAW(1, ad5624r_write_dac, AD5624R_ADDR_DAC1);
+static IIO_DEV_ATTR_OUT_RAW(2, ad5624r_write_dac, AD5624R_ADDR_DAC2);
+static IIO_DEV_ATTR_OUT_RAW(3, ad5624r_write_dac, AD5624R_ADDR_DAC3);
+
+static IIO_DEVICE_ATTR(ldac_mode, S_IRUGO | S_IWUSR, ad5624r_read_ldac_mode,
+		       ad5624r_write_ldac_mode, 0);
+static IIO_DEVICE_ATTR(internal_ref, S_IRUGO | S_IWUSR,
+		       ad5624r_read_internal_ref_mode,
+		       ad5624r_write_internal_ref_mode, 0);
+
+#define IIO_DEV_ATTR_DAC_POWER_MODE(_num, _show, _store, _addr)			\
+	IIO_DEVICE_ATTR(dac_power_mode_##_num, S_IRUGO | S_IWUSR, _show, _store, _addr)
+
+static IIO_DEV_ATTR_DAC_POWER_MODE(0, ad5624r_read_dac_power_mode,
+				   ad5624r_write_dac_power_mode, 0);
+static IIO_DEV_ATTR_DAC_POWER_MODE(1, ad5624r_read_dac_power_mode,
+				   ad5624r_write_dac_power_mode, 1);
+static IIO_DEV_ATTR_DAC_POWER_MODE(2, ad5624r_read_dac_power_mode,
+				   ad5624r_write_dac_power_mode, 2);
+static IIO_DEV_ATTR_DAC_POWER_MODE(3, ad5624r_read_dac_power_mode,
+				   ad5624r_write_dac_power_mode, 3);
+
+static struct attribute *ad5624r_attributes[] = {
+	&iio_dev_attr_out0_raw.dev_attr.attr,
+	&iio_dev_attr_out1_raw.dev_attr.attr,
+	&iio_dev_attr_out2_raw.dev_attr.attr,
+	&iio_dev_attr_out3_raw.dev_attr.attr,
+	&iio_dev_attr_dac_power_mode_0.dev_attr.attr,
+	&iio_dev_attr_dac_power_mode_1.dev_attr.attr,
+	&iio_dev_attr_dac_power_mode_2.dev_attr.attr,
+	&iio_dev_attr_dac_power_mode_3.dev_attr.attr,
+	&iio_dev_attr_ldac_mode.dev_attr.attr,
+	&iio_dev_attr_internal_ref.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad5624r_attribute_group = {
+	.attrs = ad5624r_attributes,
+};
+
+static int __devinit ad5624r_probe(struct spi_device *spi)
+{
+	struct ad5624r_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	st->data_len = spi_get_device_id(spi)->driver_data;
+
+	st->us = spi;
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 0;
+	st->indio_dev->event_attrs = NULL;
+
+	st->indio_dev->attrs = &ad5624r_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	spi->mode = SPI_MODE_0;
+	spi_setup(spi);
+
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->indio_dev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad5624r_remove(struct spi_device *spi)
+{
+	struct ad5624r_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->indio_dev);
+	kfree(st);
+
+	return 0;
+}
+
+static const struct spi_device_id ad5624r_id[] = {
+	{"ad5624r", 12},
+	{"ad5644r", 14},
+	{"ad5664r", 16},
+	{}
+};
+
+static struct spi_driver ad5624r_driver = {
+	.driver = {
+		   .name = "ad5624r",
+		   .owner = THIS_MODULE,
+		   },
+	.probe = ad5624r_probe,
+	.remove = __devexit_p(ad5624r_remove),
+	.id_table = ad5624r_id,
+};
+
+static __init int ad5624r_spi_init(void)
+{
+	return spi_register_driver(&ad5624r_driver);
+}
+module_init(ad5624r_spi_init);
+
+static __exit void ad5624r_spi_exit(void)
+{
+	spi_unregister_driver(&ad5624r_driver);
+}
+module_exit(ad5624r_spi_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD5624/44/64R DAC spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/dac.h b/drivers/staging/iio/dac/dac.h
new file mode 100644
index 0000000..1d82f35
--- /dev/null
+++ b/drivers/staging/iio/dac/dac.h
@@ -0,0 +1,6 @@
+/*
+ * dac.h - sysfs attributes associated with DACs
+ */
+
+#define IIO_DEV_ATTR_OUT_RAW(_num, _store, _addr)				\
+	IIO_DEVICE_ATTR(out##_num##_raw, S_IWUSR, NULL, _store, _addr)
diff --git a/drivers/staging/iio/dds/Kconfig b/drivers/staging/iio/dds/Kconfig
new file mode 100644
index 0000000..a047da6
--- /dev/null
+++ b/drivers/staging/iio/dds/Kconfig
@@ -0,0 +1,56 @@
+#
+# Direct Digital Synthesis drivers
+#
+comment "Direct Digital Synthesis"
+
+config AD5930
+	tristate "Analog Devices ad5930/5932 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices DDS chip
+	  ad5930/ad5932, provides direct access via sysfs.
+
+config AD9832
+	tristate "Analog Devices ad9832/5 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices DDS chip
+	  ad9832 and ad9835, provides direct access via sysfs.
+
+config AD9834
+	tristate "Analog Devices ad9833/4/ driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices DDS chip
+	  AD9833 and AD9834, provides direct access via sysfs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ad9834.
+
+config AD9850
+	tristate "Analog Devices ad9850/1 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices DDS chip
+	  ad9850/1, provides direct access via sysfs.
+
+config AD9852
+	tristate "Analog Devices ad9852/4 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices DDS chip
+	  ad9852/4, provides direct access via sysfs.
+
+config AD9910
+	tristate "Analog Devices ad9910 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices DDS chip
+	  ad9910, provides direct access via sysfs.
+
+config AD9951
+	tristate "Analog Devices ad9951 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices DDS chip
+	  ad9951, provides direct access via sysfs.
diff --git a/drivers/staging/iio/dds/Makefile b/drivers/staging/iio/dds/Makefile
new file mode 100644
index 0000000..1477461
--- /dev/null
+++ b/drivers/staging/iio/dds/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Direct Digital Synthesis drivers
+#
+
+obj-$(CONFIG_AD5930) += ad5930.o
+obj-$(CONFIG_AD9832) += ad9832.o
+obj-$(CONFIG_AD9834) += ad9834.o
+obj-$(CONFIG_AD9850) += ad9850.o
+obj-$(CONFIG_AD9852) += ad9852.o
+obj-$(CONFIG_AD9910) += ad9910.o
+obj-$(CONFIG_AD9951) += ad9951.o
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
new file mode 100644
index 0000000..f80039c
--- /dev/null
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -0,0 +1,170 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad5930
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad5930"
+
+#define value_mask (u16)0xf000
+#define addr_shift 12
+
+/* Register format: 4 bits addr + 12 bits value */
+struct ad5903_config {
+	u16 control;
+	u16 incnum;
+	u16 frqdelt[2];
+	u16 incitvl;
+	u16 buritvl;
+	u16 strtfrq[2];
+};
+
+struct ad5930_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+};
+
+static ssize_t ad5930_set_parameter(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t len)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	struct ad5903_config *config = (struct ad5903_config *)buf;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad5930_state *st = idev->dev_data;
+
+	config->control = (config->control & ~value_mask);
+	config->incnum = (config->control & ~value_mask) | (1 << addr_shift);
+	config->frqdelt[0] = (config->control & ~value_mask) | (2 << addr_shift);
+	config->frqdelt[1] = (config->control & ~value_mask) | 3 << addr_shift;
+	config->incitvl = (config->control & ~value_mask) | 4 << addr_shift;
+	config->buritvl = (config->control & ~value_mask) | 8 << addr_shift;
+	config->strtfrq[0] = (config->control & ~value_mask) | 0xc << addr_shift;
+	config->strtfrq[1] = (config->control & ~value_mask) | 0xd << addr_shift;
+
+	xfer.len = len;
+	xfer.tx_buf = config;
+	mutex_lock(&st->lock);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad5930_set_parameter, 0);
+
+static struct attribute *ad5930_attributes[] = {
+	&iio_dev_attr_dds.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad5930_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad5930_attributes,
+};
+
+static int __devinit ad5930_probe(struct spi_device *spi)
+{
+	struct ad5930_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad5930_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+	spi->max_speed_hz = 2000000;
+	spi->mode = SPI_MODE_3;
+	spi->bits_per_word = 16;
+	spi_setup(spi);
+
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad5930_remove(struct spi_device *spi)
+{
+	struct ad5930_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad5930_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad5930_probe,
+	.remove = __devexit_p(ad5930_remove),
+};
+
+static __init int ad5930_spi_init(void)
+{
+	return spi_register_driver(&ad5930_driver);
+}
+module_init(ad5930_spi_init);
+
+static __exit void ad5930_spi_exit(void)
+{
+	spi_unregister_driver(&ad5930_driver);
+}
+module_exit(ad5930_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad5930 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
new file mode 100644
index 0000000..e911893
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -0,0 +1,264 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9832
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9832"
+
+#define value_mask (u16)0xf000
+#define cmd_shift 12
+#define add_shift 8
+#define AD9832_SYNC (1 << 13)
+#define AD9832_SELSRC (1 << 12)
+#define AD9832_SLEEP (1 << 13)
+#define AD9832_RESET (1 << 12)
+#define AD9832_CLR (1 << 11)
+
+#define ADD_FREQ0LL 0x0
+#define ADD_FREQ0HL 0x1
+#define ADD_FREQ0LM 0x2
+#define ADD_FREQ0HM 0x3
+#define ADD_FREQ1LL 0x4
+#define ADD_FREQ1HL 0x5
+#define ADD_FREQ1LM 0x6
+#define ADD_FREQ1HM 0x7
+#define ADD_PHASE0L 0x8
+#define ADD_PHASE0H 0x9
+#define ADD_PHASE1L 0xa
+#define ADD_PHASE1H 0xb
+#define ADD_PHASE2L 0xc
+#define ADD_PHASE2H 0xd
+#define ADD_PHASE3L 0xe
+#define ADD_PHASE3H 0xf
+
+#define CMD_PHA8BITSW 0x1
+#define CMD_PHA16BITSW 0x0
+#define CMD_FRE8BITSW 0x3
+#define CMD_FRE16BITSW 0x2
+#define CMD_SELBITSCTL 0x6
+
+struct ad9832_setting {
+	u16 freq0[4];
+	u16 freq1[4];
+	u16 phase0[2];
+	u16 phase1[2];
+	u16 phase2[2];
+	u16 phase3[2];
+};
+
+struct ad9832_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+};
+
+static ssize_t ad9832_set_parameter(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t len)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	struct ad9832_setting config;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad9832_state *st = idev->dev_data;
+
+	config.freq0[0] = (CMD_FRE8BITSW << add_shift | ADD_FREQ0LL << add_shift | buf[0]);
+	config.freq0[1] = (CMD_FRE16BITSW << add_shift | ADD_FREQ0HL << add_shift | buf[1]);
+	config.freq0[2] = (CMD_FRE8BITSW << add_shift | ADD_FREQ0LM << add_shift | buf[2]);
+	config.freq0[3] = (CMD_FRE16BITSW << add_shift | ADD_FREQ0HM << add_shift | buf[3]);
+	config.freq1[0] = (CMD_FRE8BITSW << add_shift | ADD_FREQ1LL << add_shift | buf[4]);
+	config.freq1[1] = (CMD_FRE16BITSW << add_shift | ADD_FREQ1HL << add_shift | buf[5]);
+	config.freq1[2] = (CMD_FRE8BITSW << add_shift | ADD_FREQ1LM << add_shift | buf[6]);
+	config.freq1[3] = (CMD_FRE16BITSW << add_shift | ADD_FREQ1HM << add_shift | buf[7]);
+
+	config.phase0[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE0L << add_shift | buf[9]);
+	config.phase0[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE0H << add_shift | buf[10]);
+	config.phase1[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE1L << add_shift | buf[11]);
+	config.phase1[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE1H << add_shift | buf[12]);
+	config.phase2[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE2L << add_shift | buf[13]);
+	config.phase2[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE2H << add_shift | buf[14]);
+	config.phase3[0] = (CMD_PHA8BITSW << add_shift | ADD_PHASE3L << add_shift | buf[15]);
+	config.phase3[1] = (CMD_PHA16BITSW << add_shift | ADD_PHASE3H << add_shift | buf[16]);
+
+	xfer.len = 2 * len;
+	xfer.tx_buf = &config;
+	mutex_lock(&st->lock);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9832_set_parameter, 0);
+
+static struct attribute *ad9832_attributes[] = {
+	&iio_dev_attr_dds.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad9832_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad9832_attributes,
+};
+
+static void ad9832_init(struct ad9832_state *st)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	u16 config = 0;
+
+	config = 0x3 << 14 | AD9832_SLEEP | AD9832_RESET | AD9832_CLR;
+
+	mutex_lock(&st->lock);
+
+	xfer.len = 2;
+	xfer.tx_buf = &config;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	config = 0x2 << 14 | AD9832_SYNC | AD9832_SELSRC;
+	xfer.len = 2;
+	xfer.tx_buf = &config;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	config = CMD_SELBITSCTL << cmd_shift;
+	xfer.len = 2;
+	xfer.tx_buf = &config;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	config = 0x3 << 14;
+
+	xfer.len = 2;
+	xfer.tx_buf = &config;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+error_ret:
+	mutex_unlock(&st->lock);
+
+
+
+}
+
+static int __devinit ad9832_probe(struct spi_device *spi)
+{
+	struct ad9832_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad9832_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+	spi->max_speed_hz = 2000000;
+	spi->mode = SPI_MODE_3;
+	spi->bits_per_word = 16;
+	spi_setup(spi);
+	ad9832_init(st);
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad9832_remove(struct spi_device *spi)
+{
+	struct ad9832_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad9832_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad9832_probe,
+	.remove = __devexit_p(ad9832_remove),
+};
+
+static __init int ad9832_spi_init(void)
+{
+	return spi_register_driver(&ad9832_driver);
+}
+module_init(ad9832_spi_init);
+
+static __exit void ad9832_spi_exit(void)
+{
+	spi_unregister_driver(&ad9832_driver);
+}
+module_exit(ad9832_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9832 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
new file mode 100644
index 0000000..eb1a681
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -0,0 +1,477 @@
+/*
+ * AD9834 SPI DAC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <asm/div64.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dds.h"
+
+#include "ad9834.h"
+
+static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout)
+{
+	unsigned long long freqreg = (u64) fout * (u64) (1 << AD9834_FREQ_BITS);
+	do_div(freqreg, mclk);
+	return freqreg;
+}
+
+static int ad9834_write_frequency(struct ad9834_state *st,
+				  unsigned long addr, unsigned long fout)
+{
+	unsigned long regval;
+
+	if (fout > (st->mclk / 2))
+		return -EINVAL;
+
+	regval = ad9834_calc_freqreg(st->mclk, fout);
+
+	st->freq_data[0] = cpu_to_be16(addr | (regval &
+				       RES_MASK(AD9834_FREQ_BITS / 2)));
+	st->freq_data[1] = cpu_to_be16(addr | ((regval >>
+				       (AD9834_FREQ_BITS / 2)) &
+				       RES_MASK(AD9834_FREQ_BITS / 2)));
+
+	return spi_sync(st->spi, &st->freq_msg);;
+}
+
+static int ad9834_write_phase(struct ad9834_state *st,
+				  unsigned long addr, unsigned long phase)
+{
+	if (phase > (1 << AD9834_PHASE_BITS))
+		return -EINVAL;
+	st->data = cpu_to_be16(addr | phase);
+
+	return spi_sync(st->spi, &st->msg);
+}
+
+static ssize_t ad9834_write(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad9834_state *st = dev_info->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtoul(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+
+	mutex_lock(&dev_info->mlock);
+	switch (this_attr->address) {
+	case AD9834_REG_FREQ0:
+	case AD9834_REG_FREQ1:
+		ret = ad9834_write_frequency(st, this_attr->address, val);
+		break;
+	case AD9834_REG_PHASE0:
+	case AD9834_REG_PHASE1:
+		ret = ad9834_write_phase(st, this_attr->address, val);
+		break;
+	case AD9834_OPBITEN:
+		if (st->control & AD9834_MODE) {
+			ret = -EINVAL;  /* AD9843 reserved mode */
+			break;
+		}
+
+		if (val)
+			st->control |= AD9834_OPBITEN;
+		else
+			st->control &= ~AD9834_OPBITEN;
+
+		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+		ret = spi_sync(st->spi, &st->msg);
+		break;
+	case AD9834_PIN_SW:
+		if (val)
+			st->control |= AD9834_PIN_SW;
+		else
+			st->control &= ~AD9834_PIN_SW;
+		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+		ret = spi_sync(st->spi, &st->msg);
+		break;
+	case AD9834_FSEL:
+	case AD9834_PSEL:
+		if (val == 0)
+			st->control &= ~(this_attr->address | AD9834_PIN_SW);
+		else if (val == 1) {
+			st->control |= this_attr->address;
+			st->control &= ~AD9834_PIN_SW;
+		} else {
+			ret = -EINVAL;
+			break;
+		}
+		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+		ret = spi_sync(st->spi, &st->msg);
+		break;
+	case AD9834_RESET:
+		if (val)
+			st->control &= ~AD9834_RESET;
+		else
+			st->control |= AD9834_RESET;
+
+		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+		ret = spi_sync(st->spi, &st->msg);
+		break;
+	default:
+		ret = -ENODEV;
+	}
+	mutex_unlock(&dev_info->mlock);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ad9834_store_wavetype(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf,
+				 size_t len)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad9834_state *st = dev_info->dev_data;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret = 0;
+	bool is_ad9833 = st->devid == ID_AD9833;
+
+	mutex_lock(&dev_info->mlock);
+
+	switch (this_attr->address) {
+	case 0:
+		if (sysfs_streq(buf, "sine")) {
+			st->control &= ~AD9834_MODE;
+			if (is_ad9833)
+				st->control &= ~AD9834_OPBITEN;
+		} else if (sysfs_streq(buf, "triangle")) {
+			if (is_ad9833) {
+				st->control &= ~AD9834_OPBITEN;
+				st->control |= AD9834_MODE;
+			} else if (st->control & AD9834_OPBITEN) {
+				ret = -EINVAL;	/* AD9843 reserved mode */
+			} else {
+				st->control |= AD9834_MODE;
+			}
+		} else if (is_ad9833 && sysfs_streq(buf, "square")) {
+			st->control &= ~AD9834_MODE;
+			st->control |= AD9834_OPBITEN;
+		} else {
+			ret = -EINVAL;
+		}
+
+		break;
+	case 1:
+		if (sysfs_streq(buf, "square") &&
+			!(st->control & AD9834_MODE)) {
+			st->control &= ~AD9834_MODE;
+			st->control |= AD9834_OPBITEN;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (!ret) {
+		st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+		ret = spi_sync(st->spi, &st->msg);
+	}
+	mutex_unlock(&dev_info->mlock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t ad9834_show_name(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+
+	return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
+}
+static IIO_DEVICE_ATTR(name, S_IRUGO, ad9834_show_name, NULL, 0);
+
+static ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+	char *str;
+
+	if (st->devid == ID_AD9833)
+		str = "sine triangle square";
+	else if (st->control & AD9834_OPBITEN)
+		str = "sine";
+	else
+		str = "sine triangle";
+
+	return sprintf(buf, "%s\n", str);
+}
+
+
+static IIO_DEVICE_ATTR(dds0_out0_wavetype_available, S_IRUGO,
+		       ad9834_show_out0_wavetype_available, NULL, 0);
+
+static ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+	char *str;
+
+	if (st->control & AD9834_MODE)
+		str = "";
+	else
+		str = "square";
+
+	return sprintf(buf, "%s\n", str);
+}
+
+static IIO_DEVICE_ATTR(dds0_out1_wavetype_available, S_IRUGO,
+		       ad9834_show_out1_wavetype_available, NULL, 0);
+
+/**
+ * see dds.h for further information
+ */
+
+static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ0);
+static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ1);
+static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_FSEL);
+static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */
+
+static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE0);
+static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE1);
+static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_PSEL);
+static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/
+
+static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL,
+	ad9834_write, AD9834_PIN_SW);
+static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL, ad9834_write, AD9834_RESET);
+static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, S_IWUSR, NULL,
+	ad9834_write, AD9834_OPBITEN);
+static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0);
+static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1);
+
+static struct attribute *ad9834_attributes[] = {
+	&iio_dev_attr_dds0_freq0.dev_attr.attr,
+	&iio_dev_attr_dds0_freq1.dev_attr.attr,
+	&iio_const_attr_dds0_freq_scale.dev_attr.attr,
+	&iio_dev_attr_dds0_phase0.dev_attr.attr,
+	&iio_dev_attr_dds0_phase1.dev_attr.attr,
+	&iio_const_attr_dds0_phase_scale.dev_attr.attr,
+	&iio_dev_attr_dds0_pincontrol_en.dev_attr.attr,
+	&iio_dev_attr_dds0_freqsymbol.dev_attr.attr,
+	&iio_dev_attr_dds0_phasesymbol.dev_attr.attr,
+	&iio_dev_attr_dds0_out_enable.dev_attr.attr,
+	&iio_dev_attr_dds0_out1_enable.dev_attr.attr,
+	&iio_dev_attr_dds0_out0_wavetype.dev_attr.attr,
+	&iio_dev_attr_dds0_out1_wavetype.dev_attr.attr,
+	&iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr,
+	&iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr,
+	&iio_dev_attr_name.dev_attr.attr,
+	NULL,
+};
+
+static mode_t ad9834_attr_is_visible(struct kobject *kobj,
+				     struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct iio_dev *dev_info = dev_get_drvdata(dev);
+	struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+
+	mode_t mode = attr->mode;
+
+	if (st->devid == ID_AD9834)
+		return mode;
+
+	if ((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
+		(attr == &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr) ||
+		(attr ==
+		&iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr))
+		mode = 0;
+
+	return mode;
+}
+
+static const struct attribute_group ad9834_attribute_group = {
+	.attrs = ad9834_attributes,
+	.is_visible = ad9834_attr_is_visible,
+};
+
+static int __devinit ad9834_probe(struct spi_device *spi)
+{
+	struct ad9834_platform_data *pdata = spi->dev.platform_data;
+	struct ad9834_state *st;
+	int ret;
+
+	if (!pdata) {
+		dev_dbg(&spi->dev, "no platform data?\n");
+		return -ENODEV;
+	}
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	st->reg = regulator_get(&spi->dev, "vcc");
+	if (!IS_ERR(st->reg)) {
+		ret = regulator_enable(st->reg);
+		if (ret)
+			goto error_put_reg;
+	}
+
+	st->mclk = pdata->mclk;
+
+	spi_set_drvdata(spi, st);
+
+	st->spi = spi;
+	st->devid = spi_get_device_id(spi)->driver_data;
+
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_disable_reg;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->attrs = &ad9834_attribute_group;
+	st->indio_dev->dev_data = (void *) st;
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	/* Setup default messages */
+
+	st->xfer.tx_buf = &st->data;
+	st->xfer.len = 2;
+
+	spi_message_init(&st->msg);
+	spi_message_add_tail(&st->xfer, &st->msg);
+
+	st->freq_xfer[0].tx_buf = &st->freq_data[0];
+	st->freq_xfer[0].len = 2;
+	st->freq_xfer[0].cs_change = 1;
+	st->freq_xfer[1].tx_buf = &st->freq_data[1];
+	st->freq_xfer[1].len = 2;
+
+	spi_message_init(&st->freq_msg);
+	spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg);
+	spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg);
+
+	st->control = AD9834_B28 | AD9834_RESET;
+
+	if (!pdata->en_div2)
+		st->control |= AD9834_DIV2;
+
+	if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834))
+		st->control |= AD9834_SIGN_PIB;
+
+	st->data = cpu_to_be16(AD9834_REG_CMD | st->control);
+	ret = spi_sync(st->spi, &st->msg);
+	if (ret) {
+		dev_err(&spi->dev, "device init failed\n");
+		goto error_free_device;
+	}
+
+	ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0);
+	if (ret)
+		goto error_free_device;
+
+	ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1);
+	if (ret)
+		goto error_free_device;
+
+	ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0);
+	if (ret)
+		goto error_free_device;
+
+	ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1);
+	if (ret)
+		goto error_free_device;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_free_device;
+
+	return 0;
+
+error_free_device:
+	iio_free_device(st->indio_dev);
+error_disable_reg:
+	if (!IS_ERR(st->reg))
+		regulator_disable(st->reg);
+error_put_reg:
+	if (!IS_ERR(st->reg))
+		regulator_put(st->reg);
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad9834_remove(struct spi_device *spi)
+{
+	struct ad9834_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->indio_dev);
+	if (!IS_ERR(st->reg)) {
+		regulator_disable(st->reg);
+		regulator_put(st->reg);
+	}
+	kfree(st);
+	return 0;
+}
+
+static const struct spi_device_id ad9834_id[] = {
+	{"ad9833", ID_AD9833},
+	{"ad9834", ID_AD9834},
+	{}
+};
+
+static struct spi_driver ad9834_driver = {
+	.driver = {
+		.name	= "ad9834",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+	.probe		= ad9834_probe,
+	.remove		= __devexit_p(ad9834_remove),
+	.id_table	= ad9834_id,
+};
+
+static int __init ad9834_init(void)
+{
+	return spi_register_driver(&ad9834_driver);
+}
+module_init(ad9834_init);
+
+static void __exit ad9834_exit(void)
+{
+	spi_unregister_driver(&ad9834_driver);
+}
+module_exit(ad9834_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD9833/AD9834 DDS");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad9834");
diff --git a/drivers/staging/iio/dds/ad9834.h b/drivers/staging/iio/dds/ad9834.h
new file mode 100644
index 0000000..0fc3b88
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9834.h
@@ -0,0 +1,112 @@
+/*
+ * AD9834 SPI DDS driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_DDS_AD9834_H_
+#define IIO_DDS_AD9834_H_
+
+/* Registers */
+
+#define AD9834_REG_CMD		(0 << 14)
+#define AD9834_REG_FREQ0	(1 << 14)
+#define AD9834_REG_FREQ1	(2 << 14)
+#define AD9834_REG_PHASE0	(6 << 13)
+#define AD9834_REG_PHASE1	(7 << 13)
+
+/* Command Control Bits */
+
+#define AD9834_B28		(1 << 13)
+#define AD9834_HLB		(1 << 12)
+#define AD9834_FSEL		(1 << 11)
+#define AD9834_PSEL		(1 << 10)
+#define AD9834_PIN_SW		(1 << 9)
+#define AD9834_RESET		(1 << 8)
+#define AD9834_SLEEP1		(1 << 7)
+#define AD9834_SLEEP12		(1 << 6)
+#define AD9834_OPBITEN		(1 << 5)
+#define AD9834_SIGN_PIB		(1 << 4)
+#define AD9834_DIV2		(1 << 3)
+#define AD9834_MODE		(1 << 1)
+
+#define AD9834_FREQ_BITS	28
+#define AD9834_PHASE_BITS	12
+
+#define RES_MASK(bits)	((1 << (bits)) - 1)
+
+/**
+ * struct ad9834_state - driver instance specific data
+ * @indio_dev:		the industrial I/O device
+ * @spi:		spi_device
+ * @reg:		supply regulator
+ * @mclk:		external master clock
+ * @control:		cached control word
+ * @xfer:		default spi transfer
+ * @msg:		default spi message
+ * @freq_xfer:		tuning word spi transfer
+ * @freq_msg:		tuning word spi message
+ * @data:		spi transmit buffer
+ * @freq_data:		tuning word spi transmit buffer
+ */
+
+struct ad9834_state {
+	struct iio_dev			*indio_dev;
+	struct spi_device		*spi;
+	struct regulator		*reg;
+	unsigned int			mclk;
+	unsigned short			control;
+	unsigned short			devid;
+	struct spi_transfer		xfer;
+	struct spi_message		msg;
+	struct spi_transfer		freq_xfer[2];
+	struct spi_message		freq_msg;
+
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 */
+	unsigned short			data ____cacheline_aligned;
+	unsigned short			freq_data[2] ;
+};
+
+
+/*
+ * TODO: struct ad7887_platform_data needs to go into include/linux/iio
+ */
+
+/**
+ * struct ad9834_platform_data - platform specific information
+ * @mclk:		master clock in Hz
+ * @freq0:		power up freq0 tuning word in Hz
+ * @freq1:		power up freq1 tuning word in Hz
+ * @phase0:		power up phase0 value [0..4095] correlates with 0..2PI
+ * @phase1:		power up phase1 value [0..4095] correlates with 0..2PI
+ * @en_div2:		digital output/2 is passed to the SIGN BIT OUT pin
+ * @en_signbit_msb_out:	the MSB (or MSB/2) of the DAC data is connected to the
+ *			SIGN BIT OUT pin. en_div2 controls whether it is the MSB
+ *			or MSB/2 that is output. if en_signbit_msb_out=false,
+ *			the on-board comparator is connected to SIGN BIT OUT
+ */
+
+struct ad9834_platform_data {
+	unsigned int		mclk;
+	unsigned int		freq0;
+	unsigned int		freq1;
+	unsigned short		phase0;
+	unsigned short		phase1;
+	bool			en_div2;
+	bool			en_signbit_msb_out;
+};
+
+/**
+ * ad9834_supported_device_ids:
+ */
+
+enum ad9834_supported_device_ids {
+	ID_AD9833,
+	ID_AD9834,
+};
+
+#endif /* IIO_DDS_AD9834_H_ */
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
new file mode 100644
index 0000000..b259bfe
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -0,0 +1,156 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9850
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9850"
+
+#define value_mask (u16)0xf000
+#define addr_shift 12
+
+/* Register format: 4 bits addr + 12 bits value */
+struct ad9850_config {
+	u8 control[5];
+};
+
+struct ad9850_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+};
+
+static ssize_t ad9850_set_parameter(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t len)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	struct ad9850_config *config = (struct ad9850_config *)buf;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad9850_state *st = idev->dev_data;
+
+	xfer.len = len;
+	xfer.tx_buf = config;
+	mutex_lock(&st->lock);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9850_set_parameter, 0);
+
+static struct attribute *ad9850_attributes[] = {
+	&iio_dev_attr_dds.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad9850_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad9850_attributes,
+};
+
+static int __devinit ad9850_probe(struct spi_device *spi)
+{
+	struct ad9850_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad9850_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+	spi->max_speed_hz = 2000000;
+	spi->mode = SPI_MODE_3;
+	spi->bits_per_word = 16;
+	spi_setup(spi);
+
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad9850_remove(struct spi_device *spi)
+{
+	struct ad9850_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad9850_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad9850_probe,
+	.remove = __devexit_p(ad9850_remove),
+};
+
+static __init int ad9850_spi_init(void)
+{
+	return spi_register_driver(&ad9850_driver);
+}
+module_init(ad9850_spi_init);
+
+static __exit void ad9850_spi_exit(void)
+{
+	spi_unregister_driver(&ad9850_driver);
+}
+module_exit(ad9850_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9850 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
new file mode 100644
index 0000000..594fb6a
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -0,0 +1,305 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9852
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9852"
+
+#define addr_phaad1 0x0
+#define addr_phaad2 0x1
+#define addr_fretu1 0x2
+#define addr_fretu2 0x3
+#define addr_delfre 0x4
+#define addr_updclk 0x5
+#define addr_ramclk 0x6
+#define addr_contrl 0x7
+#define addr_optskm 0x8
+#define addr_optskr 0xa
+#define addr_dacctl 0xb
+
+#define COMPPD		(1 << 4)
+#define REFMULT2	(1 << 2)
+#define BYPPLL		(1 << 5)
+#define PLLRANG		(1 << 6)
+#define IEUPCLK		(1)
+#define OSKEN		(1 << 5)
+
+#define read_bit	(1 << 7)
+
+/* Register format: 1 byte addr + value */
+struct ad9852_config {
+	u8 phajst0[3];
+	u8 phajst1[3];
+	u8 fretun1[6];
+	u8 fretun2[6];
+	u8 dltafre[6];
+	u8 updtclk[5];
+	u8 ramprat[4];
+	u8 control[5];
+	u8 outpskm[3];
+	u8 outpskr[2];
+	u8 daccntl[3];
+};
+
+struct ad9852_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+};
+
+static ssize_t ad9852_set_parameter(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t len)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	struct ad9852_config *config = (struct ad9852_config *)buf;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad9852_state *st = idev->dev_data;
+
+	xfer.len = 3;
+	xfer.tx_buf = &config->phajst0[0];
+	mutex_lock(&st->lock);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 3;
+	xfer.tx_buf = &config->phajst1[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 6;
+	xfer.tx_buf = &config->fretun1[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 6;
+	xfer.tx_buf = &config->fretun2[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 6;
+	xfer.tx_buf = &config->dltafre[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->updtclk[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 4;
+	xfer.tx_buf = &config->ramprat[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->control[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 3;
+	xfer.tx_buf = &config->outpskm[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 2;
+	xfer.tx_buf = &config->outpskr[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 3;
+	xfer.tx_buf = &config->daccntl[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9852_set_parameter, 0);
+
+static void ad9852_init(struct ad9852_state *st)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	u8 config[5];
+
+	config[0] = addr_contrl;
+	config[1] = COMPPD;
+	config[2] = REFMULT2 | BYPPLL | PLLRANG;
+	config[3] = IEUPCLK;
+	config[4] = OSKEN;
+
+	mutex_lock(&st->lock);
+
+	xfer.len = 5;
+	xfer.tx_buf = &config;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	mutex_unlock(&st->lock);
+
+
+
+}
+
+static struct attribute *ad9852_attributes[] = {
+	&iio_dev_attr_dds.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad9852_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad9852_attributes,
+};
+
+static int __devinit ad9852_probe(struct spi_device *spi)
+{
+	struct ad9852_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad9852_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+	spi->max_speed_hz = 2000000;
+	spi->mode = SPI_MODE_3;
+	spi->bits_per_word = 8;
+	spi_setup(spi);
+	ad9852_init(st);
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad9852_remove(struct spi_device *spi)
+{
+	struct ad9852_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad9852_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad9852_probe,
+	.remove = __devexit_p(ad9852_remove),
+};
+
+static __init int ad9852_spi_init(void)
+{
+	return spi_register_driver(&ad9852_driver);
+}
+module_init(ad9852_spi_init);
+
+static __exit void ad9852_spi_exit(void)
+{
+	spi_unregister_driver(&ad9852_driver);
+}
+module_exit(ad9852_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9852 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
new file mode 100644
index 0000000..e8fb75c
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -0,0 +1,440 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9910
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9910"
+
+#define CFR1 0x0
+#define CFR2 0x1
+#define CFR3 0x2
+
+#define AUXDAC 0x3
+#define IOUPD 0x4
+#define FTW 0x7
+#define POW 0x8
+#define ASF 0x9
+#define MULTC 0x0A
+#define DIG_RAMPL 0x0B
+#define DIG_RAMPS 0x0C
+#define DIG_RAMPR 0x0D
+#define SIN_TONEP0 0x0E
+#define SIN_TONEP1 0x0F
+#define SIN_TONEP2 0x10
+#define SIN_TONEP3 0x11
+#define SIN_TONEP4 0x12
+#define SIN_TONEP5 0x13
+#define SIN_TONEP6 0x14
+#define SIN_TONEP7 0x15
+
+#define RAM_ENABLE	(1 << 7)
+
+#define MANUAL_OSK	(1 << 7)
+#define INVSIC		(1 << 6)
+#define DDS_SINEOP	(1)
+
+#define AUTO_OSK	(1)
+#define OSKEN		(1 << 1)
+#define LOAD_ARR	(1 << 2)
+#define CLR_PHA		(1 << 3)
+#define CLR_DIG		(1 << 4)
+#define ACLR_PHA	(1 << 5)
+#define ACLR_DIG	(1 << 6)
+#define LOAD_LRR	(1 << 7)
+
+#define LSB_FST		(1)
+#define SDIO_IPT	(1 << 1)
+#define EXT_PWD		(1 << 3)
+#define ADAC_PWD	(1 << 4)
+#define REFCLK_PWD	(1 << 5)
+#define DAC_PWD		(1 << 6)
+#define DIG_PWD		(1 << 7)
+
+#define ENA_AMP		(1)
+#define READ_FTW	(1)
+#define DIGR_LOW	(1 << 1)
+#define DIGR_HIGH	(1 << 2)
+#define DIGR_ENA	(1 << 3)
+#define SYNCCLK_ENA	(1 << 6)
+#define ITER_IOUPD	(1 << 7)
+
+#define TX_ENA		(1 << 1)
+#define PDCLK_INV	(1 << 2)
+#define PDCLK_ENB	(1 << 3)
+
+#define PARA_ENA	(1 << 4)
+#define SYNC_DIS	(1 << 5)
+#define DATA_ASS	(1 << 6)
+#define MATCH_ENA	(1 << 7)
+
+#define PLL_ENA		(1)
+#define PFD_RST		(1 << 2)
+#define REFCLK_RST	(1 << 6)
+#define REFCLK_BYP	(1 << 7)
+
+/* Register format: 1 byte addr + value */
+struct ad9910_config {
+	u8 auxdac[5];
+	u8 ioupd[5];
+	u8 ftw[5];
+	u8 pow[3];
+	u8 asf[5];
+	u8 multc[5];
+	u8 dig_rampl[9];
+	u8 dig_ramps[9];
+	u8 dig_rampr[5];
+	u8 sin_tonep0[9];
+	u8 sin_tonep1[9];
+	u8 sin_tonep2[9];
+	u8 sin_tonep3[9];
+	u8 sin_tonep4[9];
+	u8 sin_tonep5[9];
+	u8 sin_tonep6[9];
+	u8 sin_tonep7[9];
+};
+
+struct ad9910_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+};
+
+static ssize_t ad9910_set_parameter(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t len)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	struct ad9910_config *config = (struct ad9910_config *)buf;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad9910_state *st = idev->dev_data;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->auxdac[0];
+	mutex_lock(&st->lock);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->ioupd[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->ftw[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 3;
+	xfer.tx_buf = &config->pow[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->asf[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->multc[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->dig_rampl[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->dig_ramps[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->dig_rampr[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep0[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep1[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep2[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep3[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep4[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep5[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep6[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 9;
+	xfer.tx_buf = &config->sin_tonep7[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9910_set_parameter, 0);
+
+static void ad9910_init(struct ad9910_state *st)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	u8 cfr[5];
+
+	cfr[0] = CFR1;
+	cfr[1] = 0;
+	cfr[2] = MANUAL_OSK | INVSIC | DDS_SINEOP;
+	cfr[3] = AUTO_OSK | OSKEN | ACLR_PHA | ACLR_DIG | LOAD_LRR;
+	cfr[4] = 0;
+
+	mutex_lock(&st->lock);
+
+	xfer.len = 5;
+	xfer.tx_buf = &cfr;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	cfr[0] = CFR2;
+	cfr[1] = ENA_AMP;
+	cfr[2] = READ_FTW | DIGR_ENA | ITER_IOUPD;
+	cfr[3] = TX_ENA | PDCLK_INV | PDCLK_ENB;
+	cfr[4] = PARA_ENA;
+
+	xfer.len = 5;
+	xfer.tx_buf = &cfr;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	cfr[0] = CFR3;
+	cfr[1] = PLL_ENA;
+	cfr[2] = 0;
+	cfr[3] = REFCLK_RST | REFCLK_BYP;
+	cfr[4] = 0;
+
+	xfer.len = 5;
+	xfer.tx_buf = &cfr;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	mutex_unlock(&st->lock);
+
+
+
+}
+
+static struct attribute *ad9910_attributes[] = {
+	&iio_dev_attr_dds.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad9910_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad9910_attributes,
+};
+
+static int __devinit ad9910_probe(struct spi_device *spi)
+{
+	struct ad9910_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad9910_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+	spi->max_speed_hz = 2000000;
+	spi->mode = SPI_MODE_3;
+	spi->bits_per_word = 8;
+	spi_setup(spi);
+	ad9910_init(st);
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad9910_remove(struct spi_device *spi)
+{
+	struct ad9910_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad9910_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad9910_probe,
+	.remove = __devexit_p(ad9910_remove),
+};
+
+static __init int ad9910_spi_init(void)
+{
+	return spi_register_driver(&ad9910_driver);
+}
+module_init(ad9910_spi_init);
+
+static __exit void ad9910_spi_exit(void)
+{
+	spi_unregister_driver(&ad9910_driver);
+}
+module_exit(ad9910_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9910 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
new file mode 100644
index 0000000..57eddf6
--- /dev/null
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -0,0 +1,249 @@
+/*
+ * Driver for ADI Direct Digital Synthesis ad9951
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad9951"
+
+#define CFR1 0x0
+#define CFR2 0x1
+
+#define AUTO_OSK	(1)
+#define OSKEN		(1 << 1)
+#define LOAD_ARR	(1 << 2)
+
+#define AUTO_SYNC	(1 << 7)
+
+#define LSB_FST		(1)
+#define SDIO_IPT	(1 << 1)
+#define CLR_PHA		(1 << 2)
+#define SINE_OPT	(1 << 4)
+#define ACLR_PHA	(1 << 5)
+
+#define VCO_RANGE	(1 << 2)
+
+#define CRS_OPT		(1 << 1)
+#define HMANU_SYNC	(1 << 2)
+#define HSPD_SYNC	(1 << 3)
+
+/* Register format: 1 byte addr + value */
+struct ad9951_config {
+	u8 asf[3];
+	u8 arr[2];
+	u8 ftw0[5];
+	u8 ftw1[3];
+};
+
+struct ad9951_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+};
+
+static ssize_t ad9951_set_parameter(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t len)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	struct ad9951_config *config = (struct ad9951_config *)buf;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad9951_state *st = idev->dev_data;
+
+	xfer.len = 3;
+	xfer.tx_buf = &config->asf[0];
+	mutex_lock(&st->lock);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 2;
+	xfer.tx_buf = &config->arr[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 5;
+	xfer.tx_buf = &config->ftw0[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	xfer.len = 3;
+	xfer.tx_buf = &config->ftw1[0];
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(dds, S_IWUSR, NULL, ad9951_set_parameter, 0);
+
+static void ad9951_init(struct ad9951_state *st)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	u8 cfr[5];
+
+	cfr[0] = CFR1;
+	cfr[1] = 0;
+	cfr[2] = LSB_FST | CLR_PHA | SINE_OPT | ACLR_PHA;
+	cfr[3] = AUTO_OSK | OSKEN | LOAD_ARR;
+	cfr[4] = 0;
+
+	mutex_lock(&st->lock);
+
+	xfer.len = 5;
+	xfer.tx_buf = &cfr;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+	cfr[0] = CFR2;
+	cfr[1] = VCO_RANGE;
+	cfr[2] = HSPD_SYNC;
+	cfr[3] = 0;
+
+	xfer.len = 4;
+	xfer.tx_buf = &cfr;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	mutex_unlock(&st->lock);
+
+
+
+}
+
+static struct attribute *ad9951_attributes[] = {
+	&iio_dev_attr_dds.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad9951_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad9951_attributes,
+};
+
+static int __devinit ad9951_probe(struct spi_device *spi)
+{
+	struct ad9951_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad9951_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+	spi->max_speed_hz = 2000000;
+	spi->mode = SPI_MODE_3;
+	spi->bits_per_word = 8;
+	spi_setup(spi);
+	ad9951_init(st);
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad9951_remove(struct spi_device *spi)
+{
+	struct ad9951_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad9951_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad9951_probe,
+	.remove = __devexit_p(ad9951_remove),
+};
+
+static __init int ad9951_spi_init(void)
+{
+	return spi_register_driver(&ad9951_driver);
+}
+module_init(ad9951_spi_init);
+
+static __exit void ad9951_spi_exit(void)
+{
+	spi_unregister_driver(&ad9951_driver);
+}
+module_exit(ad9951_spi_exit);
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION("Analog Devices ad9951 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dds/dds.h b/drivers/staging/iio/dds/dds.h
new file mode 100644
index 0000000..d8ac3a9
--- /dev/null
+++ b/drivers/staging/iio/dds/dds.h
@@ -0,0 +1,110 @@
+/*
+ * dds.h - sysfs attributes associated with DDS devices
+ *
+ * Copyright (c) 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_freqY
+ */
+
+#define IIO_DEV_ATTR_FREQ(_channel, _num, _mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(dds##_channel##_freq##_num,			\
+			_mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_freqY_scale
+ */
+
+#define IIO_CONST_ATTR_FREQ_SCALE(_channel, _string)			\
+	IIO_CONST_ATTR(dds##_channel##_freq_scale, _string)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_freqsymbol
+ */
+
+#define IIO_DEV_ATTR_FREQSYMBOL(_channel, _mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(dds##_channel##_freqsymbol,			\
+			_mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_phaseY
+ */
+
+#define IIO_DEV_ATTR_PHASE(_channel, _num, _mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(dds##_channel##_phase##_num,			\
+			_mode, _show, _store, _addr)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_phaseY_scale
+ */
+
+#define IIO_CONST_ATTR_PHASE_SCALE(_channel, _string)			\
+	IIO_CONST_ATTR(dds##_channel##_phase_scale, _string)
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_phasesymbol
+ */
+
+#define IIO_DEV_ATTR_PHASESYMBOL(_channel, _mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(dds##_channel##_phasesymbol,			\
+			_mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_pincontrol_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_EN(_channel, _mode, _show, _store, _addr)\
+	IIO_DEVICE_ATTR(dds##_channel##_pincontrol_en,			\
+			_mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_pincontrol_freq_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_FREQ_EN(_channel, _mode, _show, _store, _addr)\
+	IIO_DEVICE_ATTR(dds##_channel##_pincontrol_freq_en,		\
+			_mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_pincontrol_phase_en
+ */
+
+#define IIO_DEV_ATTR_PINCONTROL_PHASE_EN(_channel, _mode, _show, _store, _addr)\
+	IIO_DEVICE_ATTR(dds##_channel##_pincontrol_phase_en,		\
+			_mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_out_enable
+ */
+
+#define IIO_DEV_ATTR_OUT_ENABLE(_channel, _mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(dds##_channel##_out_enable,			\
+			_mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_outY_enable
+ */
+
+#define IIO_DEV_ATTR_OUTY_ENABLE(_channel, _output,			\
+			_mode, _show, _store, _addr)			\
+	IIO_DEVICE_ATTR(dds##_channel##_out##_output##_enable,		\
+			_mode, _show, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_outY_wavetype
+ */
+
+#define IIO_DEV_ATTR_OUT_WAVETYPE(_channel, _output, _store, _addr)	\
+	IIO_DEVICE_ATTR(dds##_channel##_out##_output##_wavetype,	\
+			S_IWUSR, NULL, _store, _addr);
+
+/**
+ * /sys/bus/iio/devices/.../ddsX_outY_wavetype_available
+ */
+
+#define IIO_CONST_ATTR_OUT_WAVETYPES_AVAILABLE(_channel, _output, _modes)\
+	IIO_CONST_ATTR(dds##_channel##_out##_output##_wavetype_available,\
+			_modes);
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index c404361..236f15f 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -3,11 +3,45 @@
 #
 comment "Digital gyroscope sensors"
 
+config ADIS16060
+	tristate "Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices adis16060 wide bandwidth
+	  yaw rate gyroscope with SPI.
+
+config ADIS16080
+	tristate "Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices adis16080/100 Yaw Rate
+	  Gyroscope with SPI.
+
+config ADIS16130
+	tristate "Analog Devices ADIS16130 High Precision Angular Rate Sensor driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices ADIS16130 High Precision
+	  Angular Rate Sensor driver.
+
 config ADIS16260
-	tristate "Analog Devices ADIS16260/5 Digital Gyroscope Sensor SPI driver"
+	tristate "Analog Devices ADIS16260 ADIS16265 Digital Gyroscope Sensor SPI driver"
 	depends on SPI
 	select IIO_TRIGGER if IIO_RING_BUFFER
 	select IIO_SW_RING if IIO_RING_BUFFER
 	help
-	  Say yes here to build support for Analog Devices adis16260/5
+	  Say yes here to build support for Analog Devices ADIS16260 ADIS16265
 	  programmable digital gyroscope sensor.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called adis16260.
+
+config ADIS16251
+	tristate "Analog Devices ADIS16251 Digital Gyroscope Sensor SPI driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices adis16261 programmable
+	  digital gyroscope sensor.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called adis16251.
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
index b5f0dc0..2764c15 100644
--- a/drivers/staging/iio/gyro/Makefile
+++ b/drivers/staging/iio/gyro/Makefile
@@ -2,6 +2,18 @@
 # Makefile for digital gyroscope sensor drivers
 #
 
+adis16060-y             := adis16060_core.o
+obj-$(CONFIG_ADIS16060) += adis16060.o
+
+adis16080-y             := adis16080_core.o
+obj-$(CONFIG_ADIS16080) += adis16080.o
+
+adis16130-y             := adis16130_core.o
+obj-$(CONFIG_ADIS16130) += adis16130.o
+
 adis16260-y             := adis16260_core.o
 adis16260-$(CONFIG_IIO_RING_BUFFER) += adis16260_ring.o adis16260_trigger.o
 obj-$(CONFIG_ADIS16260) += adis16260.o
+
+adis16251-y             := adis16251_core.o
+obj-$(CONFIG_ADIS16251) += adis16251.o
diff --git a/drivers/staging/iio/gyro/adis16060.h b/drivers/staging/iio/gyro/adis16060.h
new file mode 100644
index 0000000..5c00e53
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16060.h
@@ -0,0 +1,101 @@
+#ifndef SPI_ADIS16060_H_
+#define SPI_ADIS16060_H_
+
+#define ADIS16060_GYRO       0x20 /* Measure Angular Rate (Gyro) */
+#define ADIS16060_SUPPLY_OUT 0x10 /* Measure Temperature */
+#define ADIS16060_AIN2       0x80 /* Measure AIN2 */
+#define ADIS16060_AIN1       0x40 /* Measure AIN1 */
+#define ADIS16060_TEMP_OUT   0x22 /* Set Positive Self-Test and Output for Angular Rate */
+#define ADIS16060_ANGL_OUT   0x21 /* Set Negative Self-Test and Output for Angular Rate */
+
+#define ADIS16060_MAX_TX     3
+#define ADIS16060_MAX_RX     3
+
+/**
+ * struct adis16060_state - device instance specific data
+ * @us_w:			actual spi_device to write data
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct adis16060_state {
+	struct spi_device		*us_w;
+	struct spi_device		*us_r;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16060_scan {
+	ADIS16060_SCAN_GYRO,
+	ADIS16060_SCAN_TEMP,
+	ADIS16060_SCAN_ADC_1,
+	ADIS16060_SCAN_ADC_2,
+};
+
+void adis16060_remove_trigger(struct iio_dev *indio_dev);
+int adis16060_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16060_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+
+int adis16060_configure_ring(struct iio_dev *indio_dev);
+void adis16060_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16060_initialize_ring(struct iio_ring_buffer *ring);
+void adis16060_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16060_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16060_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+adis16060_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int adis16060_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void adis16060_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16060_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+
+static inline void adis16060_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16060_H_ */
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
new file mode 100644
index 0000000..fc48aca
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -0,0 +1,319 @@
+/*
+ * ADIS16060 Wide Bandwidth Yaw Rate Gyroscope with SPI driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16060.h"
+
+#define DRIVER_NAME		"adis16060"
+
+struct adis16060_state *adis16060_st;
+
+int adis16060_spi_write(struct device *dev,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16060_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = 0;
+	st->tx[1] = 0;
+	st->tx[2] = val; /* The last 8 bits clocked in are latched */
+
+	ret = spi_write(st->us_w, st->tx, 3);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+int adis16060_spi_read(struct device *dev,
+		u16 *val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16060_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+
+	ret = spi_read(st->us_r, st->rx, 3);
+
+	/* The internal successive approximation ADC begins the conversion process
+	 * on the falling edge of MSEL1 and starts to place data MSB first on the
+	 * DOUT line at the 6th falling edge of SCLK
+	 */
+	if (ret == 0)
+		*val = ((st->rx[0] & 0x3) << 12) | (st->rx[1] << 4) | ((st->rx[2] >> 4) & 0xF);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static ssize_t adis16060_read(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	u16 val;
+	ssize_t ret;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+	ret =  adis16060_spi_read(dev, &val);
+	mutex_unlock(&indio_dev->mlock);
+
+	if (ret == 0)
+		return sprintf(buf, "%d\n", val);
+	else
+		return ret;
+}
+
+static ssize_t adis16060_write(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 16, &val);
+	if (ret)
+		goto error_ret;
+	ret = adis16060_spi_write(dev, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+#define IIO_DEV_ATTR_IN(_show)				\
+	IIO_DEVICE_ATTR(in, S_IRUGO, _show, NULL, 0)
+
+#define IIO_DEV_ATTR_OUT(_store)				\
+	IIO_DEVICE_ATTR(out, S_IRUGO, NULL, _store, 0)
+
+static IIO_DEV_ATTR_IN(adis16060_read);
+static IIO_DEV_ATTR_OUT(adis16060_write);
+
+static IIO_CONST_ATTR(name, "adis16060");
+
+static struct attribute *adis16060_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group adis16060_event_attribute_group = {
+	.attrs = adis16060_event_attributes,
+};
+
+static struct attribute *adis16060_attributes[] = {
+	&iio_dev_attr_in.dev_attr.attr,
+	&iio_dev_attr_out.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group adis16060_attribute_group = {
+	.attrs = adis16060_attributes,
+};
+
+static int __devinit adis16060_r_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct adis16060_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADIS16060_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADIS16060_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us_r = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &adis16060_event_attribute_group;
+	st->indio_dev->attrs = &adis16060_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = adis16060_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = adis16060_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING,
+				"adis16060");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = adis16060_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	adis16060_st = st;
+	return 0;
+
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	adis16060_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	adis16060_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16060_r_remove(struct spi_device *spi)
+{
+	struct adis16060_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	flush_scheduled_work();
+
+	adis16060_remove_trigger(indio_dev);
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	adis16060_uninitialize_ring(indio_dev->ring);
+	adis16060_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+}
+
+static int __devinit adis16060_w_probe(struct spi_device *spi)
+{
+	int ret;
+	struct adis16060_state *st = adis16060_st;
+	if (!st) {
+		ret =  -ENODEV;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+	st->us_w = spi;
+	return 0;
+
+error_ret:
+	return ret;
+}
+
+static int adis16060_w_remove(struct spi_device *spi)
+{
+	return 0;
+}
+
+static struct spi_driver adis16060_r_driver = {
+	.driver = {
+		.name = "adis16060_r",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16060_r_probe,
+	.remove = __devexit_p(adis16060_r_remove),
+};
+
+static struct spi_driver adis16060_w_driver = {
+	.driver = {
+		.name = "adis16060_w",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16060_w_probe,
+	.remove = __devexit_p(adis16060_w_remove),
+};
+
+static __init int adis16060_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&adis16060_r_driver);
+	if (ret < 0)
+		return ret;
+
+	ret = spi_register_driver(&adis16060_w_driver);
+	if (ret < 0) {
+		spi_unregister_driver(&adis16060_r_driver);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(adis16060_init);
+
+static __exit void adis16060_exit(void)
+{
+	spi_unregister_driver(&adis16060_w_driver);
+	spi_unregister_driver(&adis16060_r_driver);
+}
+module_exit(adis16060_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16060 Yaw Rate Gyroscope with SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16080.h b/drivers/staging/iio/gyro/adis16080.h
new file mode 100644
index 0000000..3fcbe67
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16080.h
@@ -0,0 +1,102 @@
+#ifndef SPI_ADIS16080_H_
+#define SPI_ADIS16080_H_
+
+#define ADIS16080_DIN_CODE   4 /* Output data format setting. 0: Twos complement. 1: Offset binary. */
+#define ADIS16080_DIN_GYRO   (0 << 10) /* Gyroscope output */
+#define ADIS16080_DIN_TEMP   (1 << 10) /* Temperature output */
+#define ADIS16080_DIN_AIN1   (2 << 10)
+#define ADIS16080_DIN_AIN2   (3 << 10)
+#define ADIS16080_DIN_WRITE  (1 << 15) /* 1: Write contents on DIN to control register.
+					* 0: No changes to control register.
+					*/
+
+#define ADIS16080_MAX_TX     2
+#define ADIS16080_MAX_RX     2
+
+/**
+ * struct adis16080_state - device instance specific data
+ * @us:			actual spi_device to write data
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct adis16080_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16080_scan {
+	ADIS16080_SCAN_GYRO,
+	ADIS16080_SCAN_TEMP,
+	ADIS16080_SCAN_ADC_1,
+	ADIS16080_SCAN_ADC_2,
+};
+
+void adis16080_remove_trigger(struct iio_dev *indio_dev);
+int adis16080_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16080_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+
+int adis16080_configure_ring(struct iio_dev *indio_dev);
+void adis16080_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16080_initialize_ring(struct iio_ring_buffer *ring);
+void adis16080_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16080_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16080_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+adis16080_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int adis16080_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void adis16080_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16080_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+
+static inline void adis16080_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16080_H_ */
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
new file mode 100644
index 0000000..0efb768
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -0,0 +1,271 @@
+/*
+ * ADIS16080/100 Yaw Rate Gyroscope with SPI driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16080.h"
+
+#define DRIVER_NAME		"adis16080"
+
+struct adis16080_state *adis16080_st;
+
+int adis16080_spi_write(struct device *dev,
+		u16 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16080_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = val >> 8;
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+int adis16080_spi_read(struct device *dev,
+		u16 *val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16080_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+
+	ret = spi_read(st->us, st->rx, 2);
+
+	if (ret == 0)
+		*val = ((st->rx[0] & 0xF) << 8) | st->rx[1];
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static ssize_t adis16080_read(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	u16 val;
+	ssize_t ret;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+	ret =  adis16080_spi_read(dev, &val);
+	mutex_unlock(&indio_dev->mlock);
+
+	if (ret == 0)
+		return sprintf(buf, "%d\n", val);
+	else
+		return ret;
+}
+
+static ssize_t adis16080_write(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 16, &val);
+	if (ret)
+		goto error_ret;
+	ret = adis16080_spi_write(dev, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+#define IIO_DEV_ATTR_IN(_show)				\
+	IIO_DEVICE_ATTR(in, S_IRUGO, _show, NULL, 0)
+
+#define IIO_DEV_ATTR_OUT(_store)				\
+	IIO_DEVICE_ATTR(out, S_IRUGO, NULL, _store, 0)
+
+static IIO_DEV_ATTR_IN(adis16080_read);
+static IIO_DEV_ATTR_OUT(adis16080_write);
+
+static IIO_CONST_ATTR(name, "adis16080");
+
+static struct attribute *adis16080_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group adis16080_event_attribute_group = {
+	.attrs = adis16080_event_attributes,
+};
+
+static struct attribute *adis16080_attributes[] = {
+	&iio_dev_attr_in.dev_attr.attr,
+	&iio_dev_attr_out.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group adis16080_attribute_group = {
+	.attrs = adis16080_attributes,
+};
+
+static int __devinit adis16080_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct adis16080_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADIS16080_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADIS16080_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &adis16080_event_attribute_group;
+	st->indio_dev->attrs = &adis16080_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = adis16080_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = adis16080_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING,
+				"adis16080");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = adis16080_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	adis16080_st = st;
+	return 0;
+
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	adis16080_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	adis16080_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16080_remove(struct spi_device *spi)
+{
+	struct adis16080_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	flush_scheduled_work();
+
+	adis16080_remove_trigger(indio_dev);
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	adis16080_uninitialize_ring(indio_dev->ring);
+	adis16080_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver adis16080_driver = {
+	.driver = {
+		.name = "adis16080",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16080_probe,
+	.remove = __devexit_p(adis16080_remove),
+};
+
+static __init int adis16080_init(void)
+{
+	return spi_register_driver(&adis16080_driver);
+}
+module_init(adis16080_init);
+
+static __exit void adis16080_exit(void)
+{
+	spi_unregister_driver(&adis16080_driver);
+}
+module_exit(adis16080_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope with SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16130.h b/drivers/staging/iio/gyro/adis16130.h
new file mode 100644
index 0000000..ab80ef6
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16130.h
@@ -0,0 +1,108 @@
+#ifndef SPI_ADIS16130_H_
+#define SPI_ADIS16130_H_
+
+#define ADIS16130_CON         0x0
+#define ADIS16130_CON_RD      (1 << 6)
+#define ADIS16130_IOP         0x1
+#define ADIS16130_IOP_ALL_RDY (1 << 3) /* 1 = data-ready signal low when unread data on all channels; */
+#define ADIS16130_IOP_SYNC    (1 << 0) /* 1 = synchronization enabled */
+#define ADIS16130_RATEDATA    0x8 /* Gyroscope output, rate of rotation */
+#define ADIS16130_TEMPDATA    0xA /* Temperature output */
+#define ADIS16130_RATECS      0x28 /* Gyroscope channel setup */
+#define ADIS16130_RATECS_EN   (1 << 3) /* 1 = channel enable; */
+#define ADIS16130_TEMPCS      0x2A /* Temperature channel setup */
+#define ADIS16130_TEMPCS_EN   (1 << 3)
+#define ADIS16130_RATECONV    0x30
+#define ADIS16130_TEMPCONV    0x32
+#define ADIS16130_MODE        0x38
+#define ADIS16130_MODE_24BIT  (1 << 1) /* 1 = 24-bit resolution; */
+
+#define ADIS16130_MAX_TX     4
+#define ADIS16130_MAX_RX     4
+
+/**
+ * struct adis16130_state - device instance specific data
+ * @us:			actual spi_device to write data
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct adis16130_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	u32                             mode; /* 1: 24bits mode 0:16bits mode */
+	struct mutex			buf_lock;
+};
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16130_scan {
+	ADIS16130_SCAN_GYRO,
+	ADIS16130_SCAN_TEMP,
+};
+
+void adis16130_remove_trigger(struct iio_dev *indio_dev);
+int adis16130_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16130_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+
+int adis16130_configure_ring(struct iio_dev *indio_dev);
+void adis16130_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16130_initialize_ring(struct iio_ring_buffer *ring);
+void adis16130_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16130_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16130_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+adis16130_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int adis16130_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void adis16130_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16130_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+
+static inline void adis16130_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16130_H_ */
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
new file mode 100644
index 0000000..49ffc7b
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -0,0 +1,313 @@
+/*
+ * ADIS16130 Digital Output, High Precision Angular Rate Sensor driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16130.h"
+
+#define DRIVER_NAME		"adis16130"
+
+struct adis16130_state *adis16130_st;
+
+int adis16130_spi_write(struct device *dev, u8 reg_addr,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = reg_addr;
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+int adis16130_spi_read(struct device *dev, u8 reg_addr,
+		u32 *val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+
+	st->tx[0] = ADIS16130_CON_RD | reg_addr;
+	if (st->mode)
+		ret = spi_read(st->us, st->rx, 4);
+	else
+		ret = spi_read(st->us, st->rx, 3);
+
+	if (ret == 0) {
+		if (st->mode)
+			*val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+		else
+			*val = (st->rx[1] << 8) | st->rx[2];
+	}
+
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static ssize_t adis16130_gyro_read(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	u32 val;
+	ssize_t ret;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+	ret =  adis16130_spi_read(dev, ADIS16130_RATEDATA, &val);
+	mutex_unlock(&indio_dev->mlock);
+
+	if (ret == 0)
+		return sprintf(buf, "%d\n", val);
+	else
+		return ret;
+}
+
+static ssize_t adis16130_temp_read(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	u32 val;
+	ssize_t ret;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+	ret =  adis16130_spi_read(dev, ADIS16130_TEMPDATA, &val);
+	mutex_unlock(&indio_dev->mlock);
+
+	if (ret == 0)
+		return sprintf(buf, "%d\n", val);
+	else
+		return ret;
+}
+
+static ssize_t adis16130_bitsmode_read(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+
+	return sprintf(buf, "%d\n", st->mode);
+}
+
+static ssize_t adis16130_bitsmode_write(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 16, &val);
+	if (ret)
+		goto error_ret;
+	ret = adis16130_spi_write(dev, ADIS16130_MODE, !!val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16130_temp_read);
+
+static IIO_CONST_ATTR(name, "adis16130");
+
+static IIO_DEV_ATTR_GYRO(adis16130_gyro_read,
+		ADIS16130_RATEDATA);
+
+#define IIO_DEV_ATTR_BITS_MODE(_mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(bits_mode, _mode, _show, _store, _addr)
+
+static IIO_DEV_ATTR_BITS_MODE(S_IWUSR | S_IRUGO, adis16130_bitsmode_read, adis16130_bitsmode_write,
+			ADIS16130_MODE);
+
+static struct attribute *adis16130_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group adis16130_event_attribute_group = {
+	.attrs = adis16130_event_attributes,
+};
+
+static struct attribute *adis16130_attributes[] = {
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_gyro_raw.dev_attr.attr,
+	&iio_dev_attr_bits_mode.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group adis16130_attribute_group = {
+	.attrs = adis16130_attributes,
+};
+
+static int __devinit adis16130_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct adis16130_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADIS16130_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADIS16130_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &adis16130_event_attribute_group;
+	st->indio_dev->attrs = &adis16130_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+	st->mode = 1;
+
+	ret = adis16130_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = adis16130_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING,
+				"adis16130");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = adis16130_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	adis16130_st = st;
+	return 0;
+
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	adis16130_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	adis16130_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16130_remove(struct spi_device *spi)
+{
+	struct adis16130_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	flush_scheduled_work();
+
+	adis16130_remove_trigger(indio_dev);
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	adis16130_uninitialize_ring(indio_dev->ring);
+	adis16130_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver adis16130_driver = {
+	.driver = {
+		.name = "adis16130",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16130_probe,
+	.remove = __devexit_p(adis16130_remove),
+};
+
+static __init int adis16130_init(void)
+{
+	return spi_register_driver(&adis16130_driver);
+}
+module_init(adis16130_init);
+
+static __exit void adis16130_exit(void)
+{
+	spi_unregister_driver(&adis16130_driver);
+}
+module_exit(adis16130_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16251.h b/drivers/staging/iio/gyro/adis16251.h
new file mode 100644
index 0000000..d23852c
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16251.h
@@ -0,0 +1,185 @@
+#ifndef SPI_ADIS16251_H_
+#define SPI_ADIS16251_H_
+
+#define ADIS16251_STARTUP_DELAY	220 /* ms */
+
+#define ADIS16251_READ_REG(a)    a
+#define ADIS16251_WRITE_REG(a) ((a) | 0x80)
+
+#define ADIS16251_ENDURANCE  0x00 /* Flash memory write count */
+#define ADIS16251_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16251_GYRO_OUT   0x04 /* X-axis gyroscope output */
+#define ADIS16251_AUX_ADC    0x0A /* analog input channel measurement */
+#define ADIS16251_TEMP_OUT   0x0C /* internal temperature measurement */
+#define ADIS16251_ANGL_OUT   0x0E /* angle displacement */
+#define ADIS16251_GYRO_OFF   0x14 /* Calibration, offset/bias adjustment */
+#define ADIS16251_GYRO_SCALE 0x16 /* Calibration, scale adjustment */
+#define ADIS16251_ALM_MAG1   0x20 /* Alarm 1 magnitude/polarity setting */
+#define ADIS16251_ALM_MAG2   0x22 /* Alarm 2 magnitude/polarity setting */
+#define ADIS16251_ALM_SMPL1  0x24 /* Alarm 1 dynamic rate of change setting */
+#define ADIS16251_ALM_SMPL2  0x26 /* Alarm 2 dynamic rate of change setting */
+#define ADIS16251_ALM_CTRL   0x28 /* Alarm control */
+#define ADIS16251_AUX_DAC    0x30 /* Auxiliary DAC data */
+#define ADIS16251_GPIO_CTRL  0x32 /* Control, digital I/O line */
+#define ADIS16251_MSC_CTRL   0x34 /* Control, data ready, self-test settings */
+#define ADIS16251_SMPL_PRD   0x36 /* Control, internal sample rate */
+#define ADIS16251_SENS_AVG   0x38 /* Control, dynamic range, filtering */
+#define ADIS16251_SLP_CNT    0x3A /* Control, sleep mode initiation */
+#define ADIS16251_DIAG_STAT  0x3C /* Diagnostic, error flags */
+#define ADIS16251_GLOB_CMD   0x3E /* Control, global commands */
+
+#define ADIS16251_ERROR_ACTIVE			(1<<14)
+#define ADIS16251_NEW_DATA			(1<<14)
+
+/* MSC_CTRL */
+#define ADIS16251_MSC_CTRL_INT_SELF_TEST	(1<<10) /* Internal self-test enable */
+#define ADIS16251_MSC_CTRL_NEG_SELF_TEST	(1<<9)
+#define ADIS16251_MSC_CTRL_POS_SELF_TEST	(1<<8)
+#define ADIS16251_MSC_CTRL_DATA_RDY_EN		(1<<2)
+#define ADIS16251_MSC_CTRL_DATA_RDY_POL_HIGH	(1<<1)
+#define ADIS16251_MSC_CTRL_DATA_RDY_DIO2	(1<<0)
+
+/* SMPL_PRD */
+#define ADIS16251_SMPL_PRD_TIME_BASE	(1<<7) /* Time base (tB): 0 = 1.953 ms, 1 = 60.54 ms */
+#define ADIS16251_SMPL_PRD_DIV_MASK	0x7F
+
+/* SLP_CNT */
+#define ADIS16251_SLP_CNT_POWER_OFF     0x80
+
+/* DIAG_STAT */
+#define ADIS16251_DIAG_STAT_ALARM2	(1<<9)
+#define ADIS16251_DIAG_STAT_ALARM1	(1<<8)
+#define ADIS16251_DIAG_STAT_SELF_TEST	(1<<5)
+#define ADIS16251_DIAG_STAT_OVERFLOW	(1<<4)
+#define ADIS16251_DIAG_STAT_SPI_FAIL	(1<<3)
+#define ADIS16251_DIAG_STAT_FLASH_UPT	(1<<2)
+#define ADIS16251_DIAG_STAT_POWER_HIGH	(1<<1)
+#define ADIS16251_DIAG_STAT_POWER_LOW	(1<<0)
+
+#define ADIS16251_DIAG_STAT_ERR_MASK (ADIS16251_DIAG_STAT_ALARM2 | \
+				      ADIS16251_DIAG_STAT_ALARM1 | \
+				      ADIS16251_DIAG_STAT_SELF_TEST | \
+				      ADIS16251_DIAG_STAT_OVERFLOW | \
+				      ADIS16251_DIAG_STAT_SPI_FAIL | \
+				      ADIS16251_DIAG_STAT_FLASH_UPT | \
+				      ADIS16251_DIAG_STAT_POWER_HIGH | \
+				      ADIS16251_DIAG_STAT_POWER_LOW)
+
+/* GLOB_CMD */
+#define ADIS16251_GLOB_CMD_SW_RESET	(1<<7)
+#define ADIS16251_GLOB_CMD_FLASH_UPD	(1<<3)
+#define ADIS16251_GLOB_CMD_DAC_LATCH	(1<<2)
+#define ADIS16251_GLOB_CMD_FAC_CALIB	(1<<1)
+#define ADIS16251_GLOB_CMD_AUTO_NULL	(1<<0)
+
+#define ADIS16251_MAX_TX 24
+#define ADIS16251_MAX_RX 24
+
+#define ADIS16251_SPI_SLOW	(u32)(300 * 1000)
+#define ADIS16251_SPI_BURST	(u32)(1000 * 1000)
+#define ADIS16251_SPI_FAST	(u32)(2000 * 1000)
+
+/**
+ * struct adis16251_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct adis16251_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+
+int adis16251_spi_write_reg_8(struct device *dev,
+			      u8 reg_address,
+			      u8 val);
+
+int adis16251_spi_read_burst(struct device *dev, u8 *rx);
+
+int adis16251_spi_read_sequence(struct device *dev,
+				      u8 *tx, u8 *rx, int num);
+
+int adis16251_set_irq(struct device *dev, bool enable);
+
+int adis16251_reset(struct device *dev);
+
+int adis16251_stop_device(struct device *dev);
+
+int adis16251_check_status(struct device *dev);
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum adis16251_scan {
+	ADIS16251_SCAN_SUPPLY,
+	ADIS16251_SCAN_GYRO,
+	ADIS16251_SCAN_TEMP,
+	ADIS16251_SCAN_ADC_0,
+};
+
+void adis16251_remove_trigger(struct iio_dev *indio_dev);
+int adis16251_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t adis16251_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+
+int adis16251_configure_ring(struct iio_dev *indio_dev);
+void adis16251_unconfigure_ring(struct iio_dev *indio_dev);
+
+int adis16251_initialize_ring(struct iio_ring_buffer *ring);
+void adis16251_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void adis16251_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16251_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+adis16251_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int adis16251_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void adis16251_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis16251_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+
+static inline void adis16251_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+
+#endif /* CONFIG_IIO_RING_BUFFER */
+#endif /* SPI_ADIS16251_H_ */
diff --git a/drivers/staging/iio/gyro/adis16251_core.c b/drivers/staging/iio/gyro/adis16251_core.c
new file mode 100644
index 0000000..a0d400f
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16251_core.c
@@ -0,0 +1,777 @@
+/*
+ * ADIS16251 Programmable Digital Gyroscope Sensor Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "gyro.h"
+#include "../adc/adc.h"
+
+#include "adis16251.h"
+
+#define DRIVER_NAME		"adis16251"
+
+/* At the moment the spi framework doesn't allow global setting of cs_change.
+ * It's in the likely to be added comment at the top of spi.h.
+ * This means that use cannot be made of spi_write etc.
+ */
+
+/**
+ * adis16251_spi_write_reg_8() - write single byte to a register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the register to be written
+ * @val: the value to write
+ **/
+int adis16251_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16251_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16251_spi_write_reg_16() - write 2 bytes to a pair of registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: value to be written
+ **/
+static int adis16251_spi_write_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		}, {
+			.tx_buf = st->tx + 2,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16251_WRITE_REG(lower_reg_address);
+	st->tx[1] = value & 0xFF;
+	st->tx[2] = ADIS16251_WRITE_REG(lower_reg_address + 1);
+	st->tx[3] = (value >> 8) & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/**
+ * adis16251_spi_read_reg_16() - read 2 bytes from a 16-bit register
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @reg_address: the address of the lower of the two registers. Second register
+ *               is assumed to have address one greater.
+ * @val: somewhere to pass back the value read
+ **/
+static int adis16251_spi_read_reg_16(struct device *dev,
+		u8 lower_reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		}, {
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 1,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16251_READ_REG(lower_reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+	st->tx[3] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				lower_reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[0] << 8) | st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+/**
+ * adis16251_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+int adis16251_spi_read_burst(struct device *dev, u8 *rx)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+	u32 old_speed_hz = st->us->max_speed_hz;
+	int ret;
+
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 2,
+			.cs_change = 0,
+		}, {
+			.rx_buf = rx,
+			.bits_per_word = 8,
+			.len = 24,
+			.cs_change = 1,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADIS16251_READ_REG(ADIS16251_GLOB_CMD);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+
+	st->us->max_speed_hz = min(ADIS16251_SPI_BURST, old_speed_hz);
+	spi_setup(st->us);
+
+	ret = spi_sync(st->us, &msg);
+	if (ret)
+		dev_err(&st->us->dev, "problem when burst reading");
+
+	st->us->max_speed_hz = old_speed_hz;
+	spi_setup(st->us);
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+/**
+ * adis16251_spi_read_sequence() - read a sequence of 16-bit registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @tx: register addresses in bytes 0,2,4,6... (min size is 2*num bytes)
+ * @rx: somewhere to pass back the value read (min size is 2*num bytes)
+ **/
+int adis16251_spi_read_sequence(struct device *dev,
+		u8 *tx, u8 *rx, int num)
+{
+	struct spi_message msg;
+	struct spi_transfer *xfers;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+	int ret, i;
+
+	xfers = kzalloc(num + 1, GFP_KERNEL);
+	if (xfers == NULL) {
+		dev_err(&st->us->dev, "memory alloc failed");
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+
+	/* tx: |add1|addr2|addr3|...|addrN |zero|
+	 * rx: |zero|res1 |res2 |...|resN-1|resN| */
+	spi_message_init(&msg);
+	for (i = 0; i < num + 1; i++) {
+		if (i > 0)
+			xfers[i].rx_buf = st->rx + 2*(i - 1);
+		if (i < num)
+			xfers[i].tx_buf = st->tx + 2*i;
+		xfers[i].bits_per_word = 8;
+		xfers[i].len = 2;
+		xfers[i].cs_change = 1;
+		spi_message_add_tail(&xfers[i], &msg);
+	}
+
+	mutex_lock(&st->buf_lock);
+
+	ret = spi_sync(st->us, &msg);
+	if (ret)
+		dev_err(&st->us->dev, "problem when reading sequence");
+
+	mutex_unlock(&st->buf_lock);
+	kfree(xfers);
+
+error_ret:
+	return ret;
+}
+
+static ssize_t adis16251_spi_read_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf,
+		unsigned bits)
+{
+	int ret;
+	s16 val = 0;
+	unsigned shift = 16 - bits;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = adis16251_spi_read_reg_16(dev, this_attr->address, (u16 *)&val);
+	if (ret)
+		return ret;
+
+	if (val & ADIS16251_ERROR_ACTIVE)
+		adis16251_check_status(dev);
+	val = ((s16)(val << shift) >> shift);
+	return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t adis16251_read_12bit_unsigned(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = adis16251_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	if (val & ADIS16251_ERROR_ACTIVE)
+		adis16251_check_status(dev);
+
+	return sprintf(buf, "%u\n", val & 0x0FFF);
+}
+
+static ssize_t adis16251_read_14bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+	ret =  adis16251_spi_read_signed(dev, attr, buf, 14);
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16251_read_12bit_signed(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	ssize_t ret;
+
+	/* Take the iio_dev status lock */
+	mutex_lock(&indio_dev->mlock);
+	ret =  adis16251_spi_read_signed(dev, attr, buf, 12);
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret;
+}
+
+static ssize_t adis16251_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = adis16251_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t adis16251_read_frequency(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret, len = 0;
+	u16 t;
+	int sps;
+	ret = adis16251_spi_read_reg_16(dev,
+			ADIS16251_SMPL_PRD,
+			&t);
+	if (ret)
+		return ret;
+	sps =  (t & ADIS16251_SMPL_PRD_TIME_BASE) ? 8 : 256;
+	sps /= (t & ADIS16251_SMPL_PRD_DIV_MASK) + 1;
+	len = sprintf(buf, "%d SPS\n", sps);
+	return len;
+}
+
+static ssize_t adis16251_write_frequency(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16251_state *st = iio_dev_get_devdata(indio_dev);
+	long val;
+	int ret;
+	u8 t;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	t = (256 / val);
+	if (t > 0)
+		t--;
+	t &= ADIS16251_SMPL_PRD_DIV_MASK;
+	if ((t & ADIS16251_SMPL_PRD_DIV_MASK) >= 0x0A)
+		st->us->max_speed_hz = ADIS16251_SPI_SLOW;
+	else
+		st->us->max_speed_hz = ADIS16251_SPI_FAST;
+
+	ret = adis16251_spi_write_reg_8(dev,
+			ADIS16251_SMPL_PRD,
+			t);
+
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t adis16251_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -1;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return adis16251_reset(dev);
+	}
+	return -1;
+}
+
+
+
+int adis16251_set_irq(struct device *dev, bool enable)
+{
+	int ret;
+	u16 msc;
+	ret = adis16251_spi_read_reg_16(dev, ADIS16251_MSC_CTRL, &msc);
+	if (ret)
+		goto error_ret;
+
+	msc |= ADIS16251_MSC_CTRL_DATA_RDY_POL_HIGH;
+	if (enable)
+		msc |= ADIS16251_MSC_CTRL_DATA_RDY_EN;
+	else
+		msc &= ~ADIS16251_MSC_CTRL_DATA_RDY_EN;
+
+	ret = adis16251_spi_write_reg_16(dev, ADIS16251_MSC_CTRL, msc);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	return ret;
+}
+
+int adis16251_reset(struct device *dev)
+{
+	int ret;
+	ret = adis16251_spi_write_reg_8(dev,
+			ADIS16251_GLOB_CMD,
+			ADIS16251_GLOB_CMD_SW_RESET);
+	if (ret)
+		dev_err(dev, "problem resetting device");
+
+	return ret;
+}
+
+/* Power down the device */
+int adis16251_stop_device(struct device *dev)
+{
+	int ret;
+	u16 val = ADIS16251_SLP_CNT_POWER_OFF;
+
+	ret = adis16251_spi_write_reg_16(dev, ADIS16251_SLP_CNT, val);
+	if (ret)
+		dev_err(dev, "problem with turning device off: SLP_CNT");
+
+	return ret;
+}
+
+static int adis16251_self_test(struct device *dev)
+{
+	int ret;
+
+	ret = adis16251_spi_write_reg_16(dev,
+			ADIS16251_MSC_CTRL,
+			ADIS16251_MSC_CTRL_INT_SELF_TEST);
+	if (ret) {
+		dev_err(dev, "problem starting self test");
+		goto err_ret;
+	}
+
+	adis16251_check_status(dev);
+
+err_ret:
+	return ret;
+}
+
+int adis16251_check_status(struct device *dev)
+{
+	u16 status;
+	int ret;
+
+	ret = adis16251_spi_read_reg_16(dev, ADIS16251_DIAG_STAT, &status);
+
+	if (ret < 0) {
+		dev_err(dev, "Reading status failed\n");
+		goto error_ret;
+	}
+
+	if (!(status & ADIS16251_DIAG_STAT_ERR_MASK)) {
+		ret = 0;
+		goto error_ret;
+	}
+
+	ret = -EFAULT;
+
+	if (status & ADIS16251_DIAG_STAT_ALARM2)
+		dev_err(dev, "Alarm 2 active\n");
+	if (status & ADIS16251_DIAG_STAT_ALARM1)
+		dev_err(dev, "Alarm 1 active\n");
+	if (status & ADIS16251_DIAG_STAT_SELF_TEST)
+		dev_err(dev, "Self test error\n");
+	if (status & ADIS16251_DIAG_STAT_OVERFLOW)
+		dev_err(dev, "Sensor overrange\n");
+	if (status & ADIS16251_DIAG_STAT_SPI_FAIL)
+		dev_err(dev, "SPI failure\n");
+	if (status & ADIS16251_DIAG_STAT_FLASH_UPT)
+		dev_err(dev, "Flash update failed\n");
+	if (status & ADIS16251_DIAG_STAT_POWER_HIGH)
+		dev_err(dev, "Power supply above 5.25V\n");
+	if (status & ADIS16251_DIAG_STAT_POWER_LOW)
+		dev_err(dev, "Power supply below 4.75V\n");
+
+error_ret:
+	return ret;
+}
+
+static int adis16251_initial_setup(struct adis16251_state *st)
+{
+	int ret;
+	u16 smp_prd;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* use low spi speed for init */
+	st->us->max_speed_hz = ADIS16251_SPI_SLOW;
+	st->us->mode = SPI_MODE_3;
+	spi_setup(st->us);
+
+	/* Disable IRQ */
+	ret = adis16251_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	/* Do self test */
+
+	/* Read status register to check the result */
+	ret = adis16251_check_status(dev);
+	if (ret) {
+		adis16251_reset(dev);
+		dev_err(dev, "device not playing ball -> reset");
+		msleep(ADIS16251_STARTUP_DELAY);
+		ret = adis16251_check_status(dev);
+		if (ret) {
+			dev_err(dev, "giving up");
+			goto err_ret;
+		}
+	}
+
+	printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
+			st->us->chip_select, st->us->irq);
+
+	/* use high spi speed if possible */
+	ret = adis16251_spi_read_reg_16(dev, ADIS16251_SMPL_PRD, &smp_prd);
+	if (!ret && (smp_prd & ADIS16251_SMPL_PRD_DIV_MASK) < 0x0A) {
+		st->us->max_speed_hz = ADIS16251_SPI_SLOW;
+		spi_setup(st->us);
+	}
+
+err_ret:
+	return ret;
+}
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(0, supply, adis16251_read_12bit_signed,
+		ADIS16251_SUPPLY_OUT);
+static IIO_CONST_ATTR(in0_supply_scale, "0.0018315");
+
+static IIO_DEV_ATTR_GYRO(adis16251_read_14bit_signed,
+		ADIS16251_GYRO_OUT);
+static IIO_DEV_ATTR_GYRO_SCALE(S_IWUSR | S_IRUGO,
+		adis16251_read_12bit_signed,
+		adis16251_write_16bit,
+		ADIS16251_GYRO_SCALE);
+static IIO_DEV_ATTR_GYRO_OFFSET(S_IWUSR | S_IRUGO,
+		adis16251_read_12bit_signed,
+		adis16251_write_16bit,
+		ADIS16251_GYRO_OFF);
+
+static IIO_DEV_ATTR_TEMP_RAW(adis16251_read_12bit_signed);
+static IIO_CONST_ATTR(temp_offset, "25 K");
+static IIO_CONST_ATTR(temp_scale, "0.1453 K");
+
+static IIO_DEV_ATTR_IN_NAMED_RAW(1, aux, adis16251_read_12bit_unsigned,
+		ADIS16251_AUX_ADC);
+static IIO_CONST_ATTR(in1_aux_scale, "0.0006105");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+		adis16251_read_frequency,
+		adis16251_write_frequency);
+static IIO_DEV_ATTR_ANGL(adis16251_read_14bit_signed,
+		ADIS16251_ANGL_OUT);
+
+static IIO_DEV_ATTR_RESET(adis16251_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("0.129 ~ 256");
+
+static IIO_CONST_ATTR(name, "adis16251");
+
+static struct attribute *adis16251_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group adis16251_event_attribute_group = {
+	.attrs = adis16251_event_attributes,
+};
+
+static struct attribute *adis16251_attributes[] = {
+	&iio_dev_attr_in0_supply_raw.dev_attr.attr,
+	&iio_const_attr_in0_supply_scale.dev_attr.attr,
+	&iio_dev_attr_gyro_raw.dev_attr.attr,
+	&iio_dev_attr_gyro_scale.dev_attr.attr,
+	&iio_dev_attr_gyro_offset.dev_attr.attr,
+	&iio_dev_attr_angl_raw.dev_attr.attr,
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_in1_aux_raw.dev_attr.attr,
+	&iio_const_attr_in1_aux_scale.dev_attr.attr,
+	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group adis16251_attribute_group = {
+	.attrs = adis16251_attributes,
+};
+
+static int __devinit adis16251_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct adis16251_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADIS16251_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADIS16251_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &adis16251_event_attribute_group;
+	st->indio_dev->attrs = &adis16251_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = adis16251_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = adis16251_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_RISING,
+				"adis16251");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = adis16251_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = adis16251_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		adis16251_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	adis16251_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	adis16251_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int adis16251_remove(struct spi_device *spi)
+{
+	int ret;
+	struct adis16251_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	ret = adis16251_stop_device(&(indio_dev->dev));
+	if (ret)
+		goto err_ret;
+
+	flush_scheduled_work();
+
+	adis16251_remove_trigger(indio_dev);
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	adis16251_uninitialize_ring(indio_dev->ring);
+	adis16251_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+
+err_ret:
+	return ret;
+}
+
+static struct spi_driver adis16251_driver = {
+	.driver = {
+		.name = "adis16251",
+		.owner = THIS_MODULE,
+	},
+	.probe = adis16251_probe,
+	.remove = __devexit_p(adis16251_remove),
+};
+
+static __init int adis16251_init(void)
+{
+	return spi_register_driver(&adis16251_driver);
+}
+module_init(adis16251_init);
+
+static __exit void adis16251_exit(void)
+{
+	spi_unregister_driver(&adis16251_driver);
+}
+module_exit(adis16251_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16251 Digital Gyroscope Sensor SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index 812440a..c1fd4364 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -1,5 +1,6 @@
 #ifndef SPI_ADIS16260_H_
 #define SPI_ADIS16260_H_
+#include "adis16260_platform_data.h"
 
 #define ADIS16260_STARTUP_DELAY	220 /* ms */
 
@@ -92,6 +93,7 @@
  * @tx:			transmit buffer
  * @rx:			recieve buffer
  * @buf_lock:		mutex to protect tx and rx
+ * @negate:		negate the scale parameter
  **/
 struct adis16260_state {
 	struct spi_device		*us;
@@ -102,6 +104,7 @@
 	u8				*tx;
 	u8				*rx;
 	struct mutex			buf_lock;
+	unsigned			negate:1;
 };
 
 int adis16260_set_irq(struct device *dev, bool enable);
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 7d7716e..045e27d 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -1,5 +1,5 @@
 /*
- * ADIS16260 Programmable Digital Gyroscope Sensor Driver
+ * ADIS16260/ADIS16265 Programmable Digital Gyroscope Sensor Driver
  *
  * Copyright 2010 Analog Devices Inc.
  *
@@ -134,8 +134,6 @@
 	mutex_lock(&st->buf_lock);
 	st->tx[0] = ADIS16260_READ_REG(lower_reg_address);
 	st->tx[1] = 0;
-	st->tx[2] = 0;
-	st->tx[3] = 0;
 
 	spi_message_init(&msg);
 	spi_message_add_tail(&xfers[0], &msg);
@@ -293,6 +291,22 @@
 	return ret ? ret : len;
 }
 
+static ssize_t adis16260_read_gyro_scale(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+	ssize_t ret = 0;
+
+	if (st->negate)
+		ret = sprintf(buf, "-");
+	/* Take the iio_dev status lock */
+	ret += sprintf(buf + ret, "%s\n", "0.00127862821");
+
+	return ret;
+}
+
 static int adis16260_reset(struct device *dev)
 {
 	int ret;
@@ -447,18 +461,6 @@
 				ADIS16260_SUPPLY_OUT);
 static IIO_CONST_ATTR_IN_NAMED_SCALE(0, supply, "0.0018315");
 
-static IIO_DEV_ATTR_GYRO(adis16260_read_14bit_signed,
-		ADIS16260_GYRO_OUT);
-static IIO_CONST_ATTR_GYRO_SCALE("0.00127862821");
-static IIO_DEV_ATTR_GYRO_CALIBSCALE(S_IWUSR | S_IRUGO,
-		adis16260_read_14bit_signed,
-		adis16260_write_16bit,
-		ADIS16260_GYRO_SCALE);
-static IIO_DEV_ATTR_GYRO_CALIBBIAS(S_IWUSR | S_IRUGO,
-		adis16260_read_12bit_signed,
-		adis16260_write_16bit,
-		ADIS16260_GYRO_OFF);
-
 static IIO_DEV_ATTR_TEMP_RAW(adis16260_read_12bit_unsigned);
 static IIO_CONST_ATTR_TEMP_OFFSET("25");
 static IIO_CONST_ATTR_TEMP_SCALE("0.1453");
@@ -470,8 +472,6 @@
 static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
 		adis16260_read_frequency,
 		adis16260_write_frequency);
-static IIO_DEV_ATTR_ANGL(adis16260_read_14bit_signed,
-		ADIS16260_ANGL_OUT);
 
 static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16260_write_reset, 0);
 
@@ -487,38 +487,70 @@
 	.attrs = adis16260_event_attributes,
 };
 
-static struct attribute *adis16260_attributes[] = {
-	&iio_dev_attr_in0_supply_raw.dev_attr.attr,
-	&iio_const_attr_in0_supply_scale.dev_attr.attr,
-	&iio_dev_attr_gyro_raw.dev_attr.attr,
-	&iio_const_attr_gyro_scale.dev_attr.attr,
-	&iio_dev_attr_gyro_calibscale.dev_attr.attr,
-	&iio_dev_attr_gyro_calibbias.dev_attr.attr,
-	&iio_dev_attr_angl_raw.dev_attr.attr,
-	&iio_dev_attr_temp_raw.dev_attr.attr,
-	&iio_const_attr_temp_offset.dev_attr.attr,
-	&iio_const_attr_temp_scale.dev_attr.attr,
-	&iio_dev_attr_in1_raw.dev_attr.attr,
-	&iio_const_attr_in1_scale.dev_attr.attr,
-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
-	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
-	&iio_dev_attr_reset.dev_attr.attr,
-	&iio_const_attr_name.dev_attr.attr,
-	NULL
-};
+#define ADIS16260_GYRO_ATTR_SET(axis)					\
+	IIO_DEV_ATTR_GYRO##axis(adis16260_read_14bit_signed,		\
+				ADIS16260_GYRO_OUT);			\
+	static IIO_DEV_ATTR_GYRO##axis##_SCALE(S_IRUGO,			\
+					adis16260_read_gyro_scale,	\
+					NULL,				\
+					0);				\
+	static IIO_DEV_ATTR_GYRO##axis##_CALIBSCALE(S_IRUGO | S_IWUSR,	\
+					adis16260_read_12bit_unsigned,	\
+					adis16260_write_16bit,		\
+					ADIS16260_GYRO_SCALE);		\
+	static IIO_DEV_ATTR_GYRO##axis##_CALIBBIAS(S_IWUSR | S_IRUGO,	\
+					adis16260_read_12bit_signed,	\
+					adis16260_write_16bit,		\
+					ADIS16260_GYRO_OFF);		\
+	static IIO_DEV_ATTR_ANGL##axis(adis16260_read_14bit_signed,	\
+				       ADIS16260_ANGL_OUT);
 
-static const struct attribute_group adis16260_attribute_group = {
-	.attrs = adis16260_attributes,
-};
+static ADIS16260_GYRO_ATTR_SET();
+static ADIS16260_GYRO_ATTR_SET(_X);
+static ADIS16260_GYRO_ATTR_SET(_Y);
+static ADIS16260_GYRO_ATTR_SET(_Z);
+
+#define ADIS16260_ATTR_GROUP(axis)					\
+	struct attribute *adis16260_attributes##axis[] = {		\
+		&iio_dev_attr_in0_supply_raw.dev_attr.attr,		\
+		&iio_const_attr_in0_supply_scale.dev_attr.attr,		\
+		&iio_dev_attr_gyro##axis##_raw.dev_attr.attr,		\
+		&iio_dev_attr_gyro##axis##_scale.dev_attr.attr,		\
+		&iio_dev_attr_gyro##axis##_calibscale.dev_attr.attr,	\
+		&iio_dev_attr_gyro##axis##_calibbias.dev_attr.attr,	\
+		&iio_dev_attr_angl##axis##_raw.dev_attr.attr,		\
+		&iio_dev_attr_temp_raw.dev_attr.attr,			\
+		&iio_const_attr_temp_offset.dev_attr.attr,		\
+		&iio_const_attr_temp_scale.dev_attr.attr,		\
+		&iio_dev_attr_in1_raw.dev_attr.attr,			\
+		&iio_const_attr_in1_scale.dev_attr.attr,		\
+		&iio_dev_attr_sampling_frequency.dev_attr.attr,		\
+		&iio_const_attr_sampling_frequency_available.dev_attr.attr, \
+		&iio_dev_attr_reset.dev_attr.attr,			\
+		&iio_const_attr_name.dev_attr.attr,			\
+		NULL							\
+	};								\
+	static const struct attribute_group adis16260_attribute_group##axis \
+	= {								\
+		.attrs = adis16260_attributes##axis,			\
+	};
+
+static ADIS16260_ATTR_GROUP();
+static ADIS16260_ATTR_GROUP(_x);
+static ADIS16260_ATTR_GROUP(_y);
+static ADIS16260_ATTR_GROUP(_z);
 
 static int __devinit adis16260_probe(struct spi_device *spi)
 {
 	int ret, regdone = 0;
+	struct adis16260_platform_data *pd = spi->dev.platform_data;
 	struct adis16260_state *st = kzalloc(sizeof *st, GFP_KERNEL);
 	if (!st) {
 		ret =  -ENOMEM;
 		goto error_ret;
 	}
+	if (pd)
+		st->negate = pd->negate;
 	/* this is only used for removal purposes */
 	spi_set_drvdata(spi, st);
 
@@ -545,7 +577,24 @@
 	st->indio_dev->dev.parent = &spi->dev;
 	st->indio_dev->num_interrupt_lines = 1;
 	st->indio_dev->event_attrs = &adis16260_event_attribute_group;
-	st->indio_dev->attrs = &adis16260_attribute_group;
+	if (pd && pd->direction)
+		switch (pd->direction) {
+		case 'x':
+			st->indio_dev->attrs = &adis16260_attribute_group_x;
+			break;
+		case 'y':
+			st->indio_dev->attrs = &adis16260_attribute_group_y;
+			break;
+		case 'z':
+			st->indio_dev->attrs = &adis16260_attribute_group_z;
+			break;
+		default:
+			st->indio_dev->attrs = &adis16260_attribute_group;
+			break;
+		}
+	else
+		st->indio_dev->attrs = &adis16260_attribute_group;
+
 	st->indio_dev->dev_data = (void *)(st);
 	st->indio_dev->driver_module = THIS_MODULE;
 	st->indio_dev->modes = INDIO_DIRECT_MODE;
@@ -635,6 +684,18 @@
 	return ret;
 }
 
+/*
+ * These parts do not need to be differentiated until someone adds
+ * support for the on chip filtering.
+ */
+static const struct spi_device_id adis16260_id[] = {
+	{"adis16260", 0},
+	{"adis16265", 0},
+	{"adis16250", 0},
+	{"adis16255", 0},
+	{}
+};
+
 static struct spi_driver adis16260_driver = {
 	.driver = {
 		.name = "adis16260",
@@ -642,6 +703,7 @@
 	},
 	.probe = adis16260_probe,
 	.remove = __devexit_p(adis16260_remove),
+	.id_table = adis16260_id,
 };
 
 static __init int adis16260_init(void)
diff --git a/drivers/staging/iio/gyro/adis16260_platform_data.h b/drivers/staging/iio/gyro/adis16260_platform_data.h
new file mode 100644
index 0000000..12802e9
--- /dev/null
+++ b/drivers/staging/iio/gyro/adis16260_platform_data.h
@@ -0,0 +1,19 @@
+/*
+ * ADIS16260 Programmable Digital Gyroscope Sensor Driver Platform Data
+ *
+ * Based on adis16255.h Matthia Brugger <m_brugger&web.de>
+ *
+ * Copyright (C) 2010 Fraunhofer Institute for Integrated Circuits
+  *
+ * Licensed under the GPL-2 or later.
+ */
+
+/**
+ * struct adis16260_platform_data - instance specific data
+ * @direction: x y or z
+ * @negate: flag to indicate value should be inverted.
+ **/
+struct adis16260_platform_data {
+	char direction;
+	unsigned negate:1;
+};
diff --git a/drivers/staging/iio/gyro/gyro.h b/drivers/staging/iio/gyro/gyro.h
index 98b837b..b4ea5bf 100644
--- a/drivers/staging/iio/gyro/gyro.h
+++ b/drivers/staging/iio/gyro/gyro.h
@@ -71,3 +71,12 @@
 
 #define IIO_DEV_ATTR_ANGL(_show, _addr)                         \
 	IIO_DEVICE_ATTR(angl_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGL_X(_show, _addr)				\
+	IIO_DEVICE_ATTR(angl_x_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGL_Y(_show, _addr)				\
+	IIO_DEVICE_ATTR(angl_y_raw, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGL_Z(_show, _addr)				\
+	IIO_DEVICE_ATTR(angl_z_raw, S_IRUGO, _show, NULL, _addr)
diff --git a/drivers/staging/iio/imu/adis16350_core.c b/drivers/staging/iio/imu/adis16350_core.c
index 97c1ec8..cf7176b 100644
--- a/drivers/staging/iio/imu/adis16350_core.c
+++ b/drivers/staging/iio/imu/adis16350_core.c
@@ -570,6 +570,7 @@
 	&iio_dev_attr_temp_y_raw.dev_attr.attr,
 	&iio_dev_attr_temp_z_raw.dev_attr.attr,
 	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
 	&iio_dev_attr_in1_raw.dev_attr.attr,
 	&iio_const_attr_in1_scale.dev_attr.attr,
 	&iio_dev_attr_sampling_frequency.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/Kconfig b/drivers/staging/iio/meter/Kconfig
new file mode 100644
index 0000000..12e36e4
--- /dev/null
+++ b/drivers/staging/iio/meter/Kconfig
@@ -0,0 +1,61 @@
+#
+# IIO meter drivers configuration
+#
+comment "Active energy metering IC"
+
+config ADE7753
+	tristate "Analog Devices ADE7753/6 Single-Phase Multifunction Metering IC Driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices ADE7753 Single-Phase Multifunction
+	  Metering IC with di/dt Sensor Interface.
+
+config ADE7754
+	tristate "Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices ADE7754 Polyphase
+	  Multifunction Energy Metering IC Driver.
+
+config ADE7758
+	tristate "Analog Devices ADE7758 Poly Phase Multifunction Energy Metering IC Driver"
+	depends on SPI
+	select IIO_TRIGGER if IIO_RING_BUFFER
+	select IIO_SW_RING if IIO_RING_BUFFER
+	help
+	  Say yes here to build support for Analog Devices ADE7758 Polyphase
+	  Multifunction Energy Metering IC with Per Phase Information Driver.
+
+config ADE7759
+	tristate "Analog Devices ADE7759 Active Energy Metering IC Driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices ADE7758 Active Energy
+	  Metering IC with di/dt Sensor Interface.
+
+config ADE7854
+	tristate "Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver"
+	depends on SPI || I2C
+	help
+	  Say yes here to build support for Analog Devices ADE7854/58/68/78 Polyphase
+	  Multifunction Energy Metering IC Driver.
+
+config ADE7854_I2C
+	tristate "support I2C bus connection"
+	depends on ADE7854 && I2C
+	default y
+	help
+	  Say Y here if you have ADE7854/58/68/78 hooked to an I2C bus.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ade7854-i2c.
+
+config ADE7854_SPI
+	tristate "support SPI bus connection"
+	depends on ADE7854 && SPI
+	default y
+	help
+	  Say Y here if you have ADE7854/58/68/78 hooked to a SPI bus.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ade7854-spi.
diff --git a/drivers/staging/iio/meter/Makefile b/drivers/staging/iio/meter/Makefile
new file mode 100644
index 0000000..0cc7d51
--- /dev/null
+++ b/drivers/staging/iio/meter/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for metering ic drivers
+#
+
+obj-$(CONFIG_ADE7753) += ade7753.o
+obj-$(CONFIG_ADE7754) += ade7754.o
+
+ade7758-y             := ade7758_core.o
+ade7758-$(CONFIG_IIO_RING_BUFFER) += ade7758_ring.o ade7758_trigger.o
+obj-$(CONFIG_ADE7758) += ade7758.o
+
+obj-$(CONFIG_ADE7759) += ade7759.o
+obj-$(CONFIG_ADE7854) += ade7854.o
+obj-$(CONFIG_ADE7854_I2C) += ade7854-i2c.o
+obj-$(CONFIG_ADE7854_SPI) += ade7854-spi.o
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
new file mode 100644
index 0000000..e72afbd
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -0,0 +1,730 @@
+/*
+ * ADE7753 Single-Phase Multifunction Metering IC with di/dt Sensor Interface Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7753.h"
+
+int ade7753_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7753_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7753_spi_write_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 3,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7753_WRITE_REG(reg_address);
+	st->tx[1] = (value >> 8) & 0xFF;
+	st->tx[2] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7753_spi_read_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7753_READ_REG(reg_address);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7753_spi_read_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 3,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7753_READ_REG(reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7753_spi_read_reg_24(struct device *dev,
+		u8 reg_address,
+		u32 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 4,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7753_READ_REG(reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+	st->tx[3] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static ssize_t ade7753_read_8bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u8 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7753_spi_read_reg_8(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7753_read_16bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7753_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7753_read_24bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u32 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7753_spi_read_reg_24(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7753_write_8bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7753_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ade7753_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7753_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static int ade7753_reset(struct device *dev)
+{
+	int ret;
+	u16 val;
+	ade7753_spi_read_reg_16(dev,
+			ADE7753_MODE,
+			&val);
+	val |= 1 << 6; /* Software Chip Reset */
+	ret = ade7753_spi_write_reg_16(dev,
+			ADE7753_MODE,
+			val);
+
+	return ret;
+}
+
+static ssize_t ade7753_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -1;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return ade7753_reset(dev);
+	}
+	return -1;
+}
+
+static IIO_DEV_ATTR_AENERGY(ade7753_read_24bit, ADE7753_AENERGY);
+static IIO_DEV_ATTR_LAENERGY(ade7753_read_24bit, ADE7753_LAENERGY);
+static IIO_DEV_ATTR_VAENERGY(ade7753_read_24bit, ADE7753_VAENERGY);
+static IIO_DEV_ATTR_LVAENERGY(ade7753_read_24bit, ADE7753_LVAENERGY);
+static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_CFDEN);
+static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_CFNUM);
+static IIO_DEV_ATTR_CHKSUM(ade7753_read_8bit, ADE7753_CHKSUM);
+static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_PHCAL);
+static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_APOS);
+static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_SAGCYC);
+static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_SAGLVL);
+static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_LINECYC);
+static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_WDIV);
+static IIO_DEV_ATTR_IRMS(S_IWUSR | S_IRUGO,
+		ade7753_read_24bit,
+		NULL,
+		ADE7753_IRMS);
+static IIO_DEV_ATTR_VRMS(S_IRUGO,
+		ade7753_read_24bit,
+		NULL,
+		ADE7753_VRMS);
+static IIO_DEV_ATTR_IRMSOS(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_IRMSOS);
+static IIO_DEV_ATTR_VRMSOS(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_VRMSOS);
+static IIO_DEV_ATTR_WGAIN(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_WGAIN);
+static IIO_DEV_ATTR_VAGAIN(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_VAGAIN);
+static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
+		ade7753_read_16bit,
+		ade7753_write_16bit,
+		ADE7753_GAIN);
+static IIO_DEV_ATTR_IPKLVL(S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_IPKLVL);
+static IIO_DEV_ATTR_VPKLVL(S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_VPKLVL);
+static IIO_DEV_ATTR_IPEAK(S_IRUGO,
+		ade7753_read_24bit,
+		NULL,
+		ADE7753_IPEAK);
+static IIO_DEV_ATTR_VPEAK(S_IRUGO,
+		ade7753_read_24bit,
+		NULL,
+		ADE7753_VPEAK);
+static IIO_DEV_ATTR_VPERIOD(S_IRUGO,
+		ade7753_read_16bit,
+		NULL,
+		ADE7753_PERIOD);
+static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_CH1OS);
+static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
+		ade7753_read_8bit,
+		ade7753_write_8bit,
+		ADE7753_CH2OS);
+
+static int ade7753_set_irq(struct device *dev, bool enable)
+{
+	int ret;
+	u8 irqen;
+	ret = ade7753_spi_read_reg_8(dev, ADE7753_IRQEN, &irqen);
+	if (ret)
+		goto error_ret;
+
+	if (enable)
+		irqen |= 1 << 3; /* Enables an interrupt when a data is
+				    present in the waveform register */
+	else
+		irqen &= ~(1 << 3);
+
+	ret = ade7753_spi_write_reg_8(dev, ADE7753_IRQEN, irqen);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	return ret;
+}
+
+/* Power down the device */
+int ade7753_stop_device(struct device *dev)
+{
+	int ret;
+	u16 val;
+	ade7753_spi_read_reg_16(dev,
+			ADE7753_MODE,
+			&val);
+	val |= 1 << 4;  /* AD converters can be turned off */
+	ret = ade7753_spi_write_reg_16(dev,
+			ADE7753_MODE,
+			val);
+
+	return ret;
+}
+
+static int ade7753_initial_setup(struct ade7753_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* use low spi speed for init */
+	st->us->mode = SPI_MODE_3;
+	spi_setup(st->us);
+
+	/* Disable IRQ */
+	ret = ade7753_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	ade7753_reset(dev);
+	msleep(ADE7753_STARTUP_DELAY);
+
+err_ret:
+	return ret;
+}
+
+static ssize_t ade7753_read_frequency(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret, len = 0;
+	u8 t;
+	int sps;
+	ret = ade7753_spi_read_reg_8(dev,
+			ADE7753_MODE,
+			&t);
+	if (ret)
+		return ret;
+
+	t = (t >> 11) & 0x3;
+	sps = 27900 / (1 + t);
+
+	len = sprintf(buf, "%d SPS\n", sps);
+	return len;
+}
+
+static ssize_t ade7753_write_frequency(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+	unsigned long val;
+	int ret;
+	u16 reg, t;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	t = (27900 / val);
+	if (t > 0)
+		t--;
+
+	if (t > 1)
+		st->us->max_speed_hz = ADE7753_SPI_SLOW;
+	else
+		st->us->max_speed_hz = ADE7753_SPI_FAST;
+
+	ret = ade7753_spi_read_reg_16(dev,
+			ADE7753_MODE,
+			&reg);
+	if (ret)
+		goto out;
+
+	reg &= ~(3 << 11);
+	reg |= t << 11;
+
+	ret = ade7753_spi_write_reg_16(dev,
+			ADE7753_MODE,
+			reg);
+
+out:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret ? ret : len;
+}
+static IIO_DEV_ATTR_TEMP_RAW(ade7753_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "-25 C");
+static IIO_CONST_ATTR(temp_scale, "0.67 C");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+		ade7753_read_frequency,
+		ade7753_write_frequency);
+
+static IIO_DEV_ATTR_RESET(ade7753_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
+
+static IIO_CONST_ATTR(name, "ade7753");
+
+static struct attribute *ade7753_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group ade7753_event_attribute_group = {
+	.attrs = ade7753_event_attributes,
+};
+
+static struct attribute *ade7753_attributes[] = {
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_phcal.dev_attr.attr,
+	&iio_dev_attr_cfden.dev_attr.attr,
+	&iio_dev_attr_aenergy.dev_attr.attr,
+	&iio_dev_attr_laenergy.dev_attr.attr,
+	&iio_dev_attr_vaenergy.dev_attr.attr,
+	&iio_dev_attr_lvaenergy.dev_attr.attr,
+	&iio_dev_attr_cfnum.dev_attr.attr,
+	&iio_dev_attr_apos.dev_attr.attr,
+	&iio_dev_attr_sagcyc.dev_attr.attr,
+	&iio_dev_attr_saglvl.dev_attr.attr,
+	&iio_dev_attr_linecyc.dev_attr.attr,
+	&iio_dev_attr_chksum.dev_attr.attr,
+	&iio_dev_attr_pga_gain.dev_attr.attr,
+	&iio_dev_attr_wgain.dev_attr.attr,
+	&iio_dev_attr_choff_1.dev_attr.attr,
+	&iio_dev_attr_choff_2.dev_attr.attr,
+	&iio_dev_attr_wdiv.dev_attr.attr,
+	&iio_dev_attr_irms.dev_attr.attr,
+	&iio_dev_attr_vrms.dev_attr.attr,
+	&iio_dev_attr_irmsos.dev_attr.attr,
+	&iio_dev_attr_vrmsos.dev_attr.attr,
+	&iio_dev_attr_vagain.dev_attr.attr,
+	&iio_dev_attr_ipklvl.dev_attr.attr,
+	&iio_dev_attr_vpklvl.dev_attr.attr,
+	&iio_dev_attr_ipeak.dev_attr.attr,
+	&iio_dev_attr_vpeak.dev_attr.attr,
+	&iio_dev_attr_vperiod.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ade7753_attribute_group = {
+	.attrs = ade7753_attributes,
+};
+
+static int __devinit ade7753_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct ade7753_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADE7753_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADE7753_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &ade7753_event_attribute_group;
+	st->indio_dev->attrs = &ade7753_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = ade7753_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = ade7753_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_FALLING,
+				"ade7753");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = ade7753_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = ade7753_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		ade7753_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	ade7753_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	ade7753_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int ade7753_remove(struct spi_device *spi)
+{
+	int ret;
+	struct ade7753_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	ret = ade7753_stop_device(&(indio_dev->dev));
+	if (ret)
+		goto err_ret;
+
+	flush_scheduled_work();
+
+	ade7753_remove_trigger(indio_dev);
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	ade7753_uninitialize_ring(indio_dev->ring);
+	ade7753_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+
+err_ret:
+	return ret;
+}
+
+static struct spi_driver ade7753_driver = {
+	.driver = {
+		.name = "ade7753",
+		.owner = THIS_MODULE,
+	},
+	.probe = ade7753_probe,
+	.remove = __devexit_p(ade7753_remove),
+};
+
+static __init int ade7753_init(void)
+{
+	return spi_register_driver(&ade7753_driver);
+}
+module_init(ade7753_init);
+
+static __exit void ade7753_exit(void)
+{
+	spi_unregister_driver(&ade7753_driver);
+}
+module_exit(ade7753_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7753.h b/drivers/staging/iio/meter/ade7753.h
new file mode 100644
index 0000000..a3722b8
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7753.h
@@ -0,0 +1,140 @@
+#ifndef _ADE7753_H
+#define _ADE7753_H
+
+#define ADE7753_WAVEFORM   0x01
+#define ADE7753_AENERGY    0x02
+#define ADE7753_RAENERGY   0x03
+#define ADE7753_LAENERGY   0x04
+#define ADE7753_VAENERGY   0x05
+#define ADE7753_RVAENERGY  0x06
+#define ADE7753_LVAENERGY  0x07
+#define ADE7753_LVARENERGY 0x08
+#define ADE7753_MODE       0x09
+#define ADE7753_IRQEN      0x0A
+#define ADE7753_STATUS     0x0B
+#define ADE7753_RSTSTATUS  0x0C
+#define ADE7753_CH1OS      0x0D
+#define ADE7753_CH2OS      0x0E
+#define ADE7753_GAIN       0x0F
+#define ADE7753_PHCAL      0x10
+#define ADE7753_APOS       0x11
+#define ADE7753_WGAIN      0x12
+#define ADE7753_WDIV       0x13
+#define ADE7753_CFNUM      0x14
+#define ADE7753_CFDEN      0x15
+#define ADE7753_IRMS       0x16
+#define ADE7753_VRMS       0x17
+#define ADE7753_IRMSOS     0x18
+#define ADE7753_VRMSOS     0x19
+#define ADE7753_VAGAIN     0x1A
+#define ADE7753_VADIV      0x1B
+#define ADE7753_LINECYC    0x1C
+#define ADE7753_ZXTOUT     0x1D
+#define ADE7753_SAGCYC     0x1E
+#define ADE7753_SAGLVL     0x1F
+#define ADE7753_IPKLVL     0x20
+#define ADE7753_VPKLVL     0x21
+#define ADE7753_IPEAK      0x22
+#define ADE7753_RSTIPEAK   0x23
+#define ADE7753_VPEAK      0x24
+#define ADE7753_RSTVPEAK   0x25
+#define ADE7753_TEMP       0x26
+#define ADE7753_PERIOD     0x27
+#define ADE7753_TMODE      0x3D
+#define ADE7753_CHKSUM     0x3E
+#define ADE7753_DIEREV     0x3F
+
+#define ADE7753_READ_REG(a)    a
+#define ADE7753_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7753_MAX_TX    4
+#define ADE7753_MAX_RX    4
+#define ADE7753_STARTUP_DELAY 1
+
+#define ADE7753_SPI_SLOW	(u32)(300 * 1000)
+#define ADE7753_SPI_BURST	(u32)(1000 * 1000)
+#define ADE7753_SPI_FAST	(u32)(2000 * 1000)
+
+#define DRIVER_NAME		"ade7753"
+
+/**
+ * struct ade7753_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct ade7753_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7753_scan {
+	ADE7753_SCAN_ACTIVE_POWER,
+	ADE7753_SCAN_CH1,
+	ADE7753_SCAN_CH2,
+};
+
+void ade7753_remove_trigger(struct iio_dev *indio_dev);
+int ade7753_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7753_read_data_from_ring(struct device *dev,
+		struct device_attribute *attr,
+		char *buf);
+
+
+int ade7753_configure_ring(struct iio_dev *indio_dev);
+void ade7753_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7753_initialize_ring(struct iio_ring_buffer *ring);
+void ade7753_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7753_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7753_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+ade7753_read_data_from_ring(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return 0;
+}
+
+static int ade7753_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+static inline void ade7753_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7753_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+static inline void ade7753_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
new file mode 100644
index 0000000..23dedfa
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -0,0 +1,756 @@
+/*
+ * ADE7754 Polyphase Multifunction Energy Metering IC Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7754.h"
+
+static int ade7754_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7754_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7754_spi_write_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 3,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7754_WRITE_REG(reg_address);
+	st->tx[1] = (value >> 8) & 0xFF;
+	st->tx[2] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7754_spi_read_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7754_READ_REG(reg_address);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7754_spi_read_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 3,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7754_READ_REG(reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7754_spi_read_reg_24(struct device *dev,
+		u8 reg_address,
+		u32 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 4,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7754_READ_REG(reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+	st->tx[3] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static ssize_t ade7754_read_8bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u8 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7754_spi_read_reg_8(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7754_read_16bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7754_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7754_read_24bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u32 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7754_spi_read_reg_24(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7754_write_8bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7754_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ade7754_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7754_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static int ade7754_reset(struct device *dev)
+{
+	int ret;
+	u8 val;
+	ade7754_spi_read_reg_8(dev,
+			ADE7754_OPMODE,
+			&val);
+	val |= 1 << 6; /* Software Chip Reset */
+	ret = ade7754_spi_write_reg_8(dev,
+			ADE7754_OPMODE,
+			val);
+
+	return ret;
+}
+
+
+static ssize_t ade7754_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -1;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return ade7754_reset(dev);
+	}
+	return -1;
+}
+
+static IIO_DEV_ATTR_AENERGY(ade7754_read_24bit, ADE7754_AENERGY);
+static IIO_DEV_ATTR_LAENERGY(ade7754_read_24bit, ADE7754_LAENERGY);
+static IIO_DEV_ATTR_VAENERGY(ade7754_read_24bit, ADE7754_VAENERGY);
+static IIO_DEV_ATTR_LVAENERGY(ade7754_read_24bit, ADE7754_LVAENERGY);
+static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+		ade7754_read_8bit,
+		ade7754_write_8bit,
+		ADE7754_VPEAK);
+static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+		ade7754_read_8bit,
+		ade7754_write_8bit,
+		ADE7754_VPEAK);
+static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+		ade7754_read_8bit,
+		ade7754_write_8bit,
+		ADE7754_APHCAL);
+static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+		ade7754_read_8bit,
+		ade7754_write_8bit,
+		ADE7754_BPHCAL);
+static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+		ade7754_read_8bit,
+		ade7754_write_8bit,
+		ADE7754_CPHCAL);
+static IIO_DEV_ATTR_AAPOS(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_AAPOS);
+static IIO_DEV_ATTR_BAPOS(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_BAPOS);
+static IIO_DEV_ATTR_CAPOS(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_CAPOS);
+static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+		ade7754_read_8bit,
+		ade7754_write_8bit,
+		ADE7754_WDIV);
+static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
+		ade7754_read_8bit,
+		ade7754_write_8bit,
+		ADE7754_VADIV);
+static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_CFNUM);
+static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_CFDEN);
+static IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_AAPGAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_BAPGAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_CAPGAIN);
+static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+		ade7754_read_24bit,
+		NULL,
+		ADE7754_AIRMS);
+static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+		ade7754_read_24bit,
+		NULL,
+		ADE7754_BIRMS);
+static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+		ade7754_read_24bit,
+		NULL,
+		ADE7754_CIRMS);
+static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+		ade7754_read_24bit,
+		NULL,
+		ADE7754_AVRMS);
+static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+		ade7754_read_24bit,
+		NULL,
+		ADE7754_BVRMS);
+static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+		ade7754_read_24bit,
+		NULL,
+		ADE7754_CVRMS);
+static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_AIRMSOS);
+static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_BIRMSOS);
+static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_CIRMSOS);
+static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_AVRMSOS);
+static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_BVRMSOS);
+static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
+		ade7754_read_16bit,
+		ade7754_write_16bit,
+		ADE7754_CVRMSOS);
+
+static int ade7754_set_irq(struct device *dev, bool enable)
+{
+	int ret;
+	u16 irqen;
+	ret = ade7754_spi_read_reg_16(dev, ADE7754_IRQEN, &irqen);
+	if (ret)
+		goto error_ret;
+
+	if (enable)
+		irqen |= 1 << 14; /* Enables an interrupt when a data is
+				     present in the waveform register */
+	else
+		irqen &= ~(1 << 14);
+
+	ret = ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	return ret;
+}
+
+/* Power down the device */
+static int ade7754_stop_device(struct device *dev)
+{
+	int ret;
+	u8 val;
+	ade7754_spi_read_reg_8(dev,
+			ADE7754_OPMODE,
+			&val);
+	val |= 7 << 3;  /* ADE7754 powered down */
+	ret = ade7754_spi_write_reg_8(dev,
+			ADE7754_OPMODE,
+			val);
+
+	return ret;
+}
+
+static int ade7754_initial_setup(struct ade7754_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* use low spi speed for init */
+	st->us->mode = SPI_MODE_3;
+	spi_setup(st->us);
+
+	/* Disable IRQ */
+	ret = ade7754_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	ade7754_reset(dev);
+	msleep(ADE7754_STARTUP_DELAY);
+
+err_ret:
+	return ret;
+}
+
+static ssize_t ade7754_read_frequency(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret, len = 0;
+	u8 t;
+	int sps;
+	ret = ade7754_spi_read_reg_8(dev,
+			ADE7754_WAVMODE,
+			&t);
+	if (ret)
+		return ret;
+
+	t = (t >> 3) & 0x3;
+	sps = 26000 / (1 + t);
+
+	len = sprintf(buf, "%d SPS\n", sps);
+	return len;
+}
+
+static ssize_t ade7754_write_frequency(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+	unsigned long val;
+	int ret;
+	u8 reg, t;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	t = (26000 / val);
+	if (t > 0)
+		t--;
+
+	if (t > 1)
+		st->us->max_speed_hz = ADE7754_SPI_SLOW;
+	else
+		st->us->max_speed_hz = ADE7754_SPI_FAST;
+
+	ret = ade7754_spi_read_reg_8(dev,
+			ADE7754_WAVMODE,
+			&reg);
+	if (ret)
+		goto out;
+
+	reg &= ~(3 << 3);
+	reg |= t << 3;
+
+	ret = ade7754_spi_write_reg_8(dev,
+			ADE7754_WAVMODE,
+			reg);
+
+out:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret ? ret : len;
+}
+static IIO_DEV_ATTR_TEMP_RAW(ade7754_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "129 C");
+static IIO_CONST_ATTR(temp_scale, "4 C");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+		ade7754_read_frequency,
+		ade7754_write_frequency);
+
+static IIO_DEV_ATTR_RESET(ade7754_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
+
+static IIO_CONST_ATTR(name, "ade7754");
+
+static struct attribute *ade7754_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group ade7754_event_attribute_group = {
+	.attrs = ade7754_event_attributes,
+};
+
+static struct attribute *ade7754_attributes[] = {
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_aenergy.dev_attr.attr,
+	&iio_dev_attr_laenergy.dev_attr.attr,
+	&iio_dev_attr_vaenergy.dev_attr.attr,
+	&iio_dev_attr_lvaenergy.dev_attr.attr,
+	&iio_dev_attr_vpeak.dev_attr.attr,
+	&iio_dev_attr_ipeak.dev_attr.attr,
+	&iio_dev_attr_aphcal.dev_attr.attr,
+	&iio_dev_attr_bphcal.dev_attr.attr,
+	&iio_dev_attr_cphcal.dev_attr.attr,
+	&iio_dev_attr_aapos.dev_attr.attr,
+	&iio_dev_attr_bapos.dev_attr.attr,
+	&iio_dev_attr_capos.dev_attr.attr,
+	&iio_dev_attr_wdiv.dev_attr.attr,
+	&iio_dev_attr_vadiv.dev_attr.attr,
+	&iio_dev_attr_cfnum.dev_attr.attr,
+	&iio_dev_attr_cfden.dev_attr.attr,
+	&iio_dev_attr_active_power_a_gain.dev_attr.attr,
+	&iio_dev_attr_active_power_b_gain.dev_attr.attr,
+	&iio_dev_attr_active_power_c_gain.dev_attr.attr,
+	&iio_dev_attr_airms.dev_attr.attr,
+	&iio_dev_attr_birms.dev_attr.attr,
+	&iio_dev_attr_cirms.dev_attr.attr,
+	&iio_dev_attr_avrms.dev_attr.attr,
+	&iio_dev_attr_bvrms.dev_attr.attr,
+	&iio_dev_attr_cvrms.dev_attr.attr,
+	&iio_dev_attr_airmsos.dev_attr.attr,
+	&iio_dev_attr_birmsos.dev_attr.attr,
+	&iio_dev_attr_cirmsos.dev_attr.attr,
+	&iio_dev_attr_avrmsos.dev_attr.attr,
+	&iio_dev_attr_bvrmsos.dev_attr.attr,
+	&iio_dev_attr_cvrmsos.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ade7754_attribute_group = {
+	.attrs = ade7754_attributes,
+};
+
+
+
+static int __devinit ade7754_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct ade7754_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADE7754_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADE7754_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &ade7754_event_attribute_group;
+	st->indio_dev->attrs = &ade7754_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = ade7754_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = ade7754_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_FALLING,
+				"ade7754");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = ade7754_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = ade7754_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		ade7754_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	ade7754_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	ade7754_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int ade7754_remove(struct spi_device *spi)
+{
+	int ret;
+	struct ade7754_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	ret = ade7754_stop_device(&(indio_dev->dev));
+	if (ret)
+		goto err_ret;
+
+	flush_scheduled_work();
+
+	ade7754_remove_trigger(indio_dev);
+	if (spi->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	ade7754_uninitialize_ring(indio_dev->ring);
+	ade7754_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+
+err_ret:
+	return ret;
+}
+
+static struct spi_driver ade7754_driver = {
+	.driver = {
+		.name = "ade7754",
+		.owner = THIS_MODULE,
+	},
+	.probe = ade7754_probe,
+	.remove = __devexit_p(ade7754_remove),
+};
+
+static __init int ade7754_init(void)
+{
+	return spi_register_driver(&ade7754_driver);
+}
+module_init(ade7754_init);
+
+static __exit void ade7754_exit(void)
+{
+	spi_unregister_driver(&ade7754_driver);
+}
+module_exit(ade7754_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7754.h b/drivers/staging/iio/meter/ade7754.h
new file mode 100644
index 0000000..f6a3e4b
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7754.h
@@ -0,0 +1,161 @@
+#ifndef _ADE7754_H
+#define _ADE7754_H
+
+#define ADE7754_AENERGY   0x01
+#define ADE7754_RAENERGY  0x02
+#define ADE7754_LAENERGY  0x03
+#define ADE7754_VAENERGY  0x04
+#define ADE7754_RVAENERGY 0x05
+#define ADE7754_LVAENERGY 0x06
+#define ADE7754_PERIOD    0x07
+#define ADE7754_TEMP      0x08
+#define ADE7754_WFORM     0x09
+#define ADE7754_OPMODE    0x0A
+#define ADE7754_MMODE     0x0B
+#define ADE7754_WAVMODE   0x0C
+#define ADE7754_WATMODE   0x0D
+#define ADE7754_VAMODE    0x0E
+#define ADE7754_IRQEN     0x0F
+#define ADE7754_STATUS    0x10
+#define ADE7754_RSTATUS   0x11
+#define ADE7754_ZXTOUT    0x12
+#define ADE7754_LINCYC    0x13
+#define ADE7754_SAGCYC    0x14
+#define ADE7754_SAGLVL    0x15
+#define ADE7754_VPEAK     0x16
+#define ADE7754_IPEAK     0x17
+#define ADE7754_GAIN      0x18
+#define ADE7754_AWG       0x19
+#define ADE7754_BWG       0x1A
+#define ADE7754_CWG       0x1B
+#define ADE7754_AVAG      0x1C
+#define ADE7754_BVAG      0x1D
+#define ADE7754_CVAG      0x1E
+#define ADE7754_APHCAL    0x1F
+#define ADE7754_BPHCAL    0x20
+#define ADE7754_CPHCAL    0x21
+#define ADE7754_AAPOS     0x22
+#define ADE7754_BAPOS     0x23
+#define ADE7754_CAPOS     0x24
+#define ADE7754_CFNUM     0x25
+#define ADE7754_CFDEN     0x26
+#define ADE7754_WDIV      0x27
+#define ADE7754_VADIV     0x28
+#define ADE7754_AIRMS     0x29
+#define ADE7754_BIRMS     0x2A
+#define ADE7754_CIRMS     0x2B
+#define ADE7754_AVRMS     0x2C
+#define ADE7754_BVRMS     0x2D
+#define ADE7754_CVRMS     0x2E
+#define ADE7754_AIRMSOS   0x2F
+#define ADE7754_BIRMSOS   0x30
+#define ADE7754_CIRMSOS   0x31
+#define ADE7754_AVRMSOS   0x32
+#define ADE7754_BVRMSOS   0x33
+#define ADE7754_CVRMSOS   0x34
+#define ADE7754_AAPGAIN   0x35
+#define ADE7754_BAPGAIN   0x36
+#define ADE7754_CAPGAIN   0x37
+#define ADE7754_AVGAIN    0x38
+#define ADE7754_BVGAIN    0x39
+#define ADE7754_CVGAIN    0x3A
+#define ADE7754_CHKSUM    0x3E
+#define ADE7754_VERSION   0x3F
+
+#define ADE7754_READ_REG(a)    a
+#define ADE7754_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7754_MAX_TX    4
+#define ADE7754_MAX_RX    4
+#define ADE7754_STARTUP_DELAY 1
+
+#define ADE7754_SPI_SLOW	(u32)(300 * 1000)
+#define ADE7754_SPI_BURST	(u32)(1000 * 1000)
+#define ADE7754_SPI_FAST	(u32)(2000 * 1000)
+
+#define DRIVER_NAME		"ade7754"
+
+/**
+ * struct ade7754_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct ade7754_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7754_scan {
+	ADE7754_SCAN_PHA_V,
+	ADE7754_SCAN_PHB_V,
+	ADE7754_SCAN_PHC_V,
+	ADE7754_SCAN_PHA_I,
+	ADE7754_SCAN_PHB_I,
+	ADE7754_SCAN_PHC_I,
+};
+
+void ade7754_remove_trigger(struct iio_dev *indio_dev);
+int ade7754_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7754_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+
+int ade7754_configure_ring(struct iio_dev *indio_dev);
+void ade7754_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7754_initialize_ring(struct iio_ring_buffer *ring);
+void ade7754_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7754_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7754_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+ade7754_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static int ade7754_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+static inline void ade7754_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7754_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+static inline void ade7754_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
new file mode 100644
index 0000000..df5bb7b
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -0,0 +1,171 @@
+#ifndef _ADE7758_H
+#define _ADE7758_H
+
+#define ADE7758_AWATTHR   0x01
+#define ADE7758_BWATTHR   0x02
+#define ADE7758_CWATTHR   0x03
+#define ADE7758_AVARHR    0x04
+#define ADE7758_BVARHR    0x05
+#define ADE7758_CVARHR    0x06
+#define ADE7758_AVAHR     0x07
+#define ADE7758_BVAHR     0x08
+#define ADE7758_CVAHR     0x09
+#define ADE7758_AIRMS     0x0A
+#define ADE7758_BIRMS     0x0B
+#define ADE7758_CIRMS     0x0C
+#define ADE7758_AVRMS     0x0D
+#define ADE7758_BVRMS     0x0E
+#define ADE7758_CVRMS     0x0F
+#define ADE7758_FREQ      0x10
+#define ADE7758_TEMP      0x11
+#define ADE7758_WFORM     0x12
+#define ADE7758_OPMODE    0x13
+#define ADE7758_MMODE     0x14
+#define ADE7758_WAVMODE   0x15
+#define ADE7758_COMPMODE  0x16
+#define ADE7758_LCYCMODE  0x17
+#define ADE7758_MASK      0x18
+#define ADE7758_STATUS    0x19
+#define ADE7758_RSTATUS   0x1A
+#define ADE7758_ZXTOUT    0x1B
+#define ADE7758_LINECYC   0x1C
+#define ADE7758_SAGCYC    0x1D
+#define ADE7758_SAGLVL    0x1E
+#define ADE7758_VPINTLVL  0x1F
+#define ADE7758_IPINTLVL  0x20
+#define ADE7758_VPEAK     0x21
+#define ADE7758_IPEAK     0x22
+#define ADE7758_GAIN      0x23
+#define ADE7758_AVRMSGAIN 0x24
+#define ADE7758_BVRMSGAIN 0x25
+#define ADE7758_CVRMSGAIN 0x26
+#define ADE7758_AIGAIN    0x27
+#define ADE7758_BIGAIN    0x28
+#define ADE7758_CIGAIN    0x29
+#define ADE7758_AWG       0x2A
+#define ADE7758_BWG       0x2B
+#define ADE7758_CWG       0x2C
+#define ADE7758_AVARG     0x2D
+#define ADE7758_BVARG     0x2E
+#define ADE7758_CVARG     0x2F
+#define ADE7758_AVAG      0x30
+#define ADE7758_BVAG      0x31
+#define ADE7758_CVAG      0x32
+#define ADE7758_AVRMSOS   0x33
+#define ADE7758_BVRMSOS   0x34
+#define ADE7758_CVRMSOS   0x35
+#define ADE7758_AIRMSOS   0x36
+#define ADE7758_BIRMSOS   0x37
+#define ADE7758_CIRMSOS   0x38
+#define ADE7758_AWAITOS   0x39
+#define ADE7758_BWAITOS   0x3A
+#define ADE7758_CWAITOS   0x3B
+#define ADE7758_AVAROS    0x3C
+#define ADE7758_BVAROS    0x3D
+#define ADE7758_CVAROS    0x3E
+#define ADE7758_APHCAL    0x3F
+#define ADE7758_BPHCAL    0x40
+#define ADE7758_CPHCAL    0x41
+#define ADE7758_WDIV      0x42
+#define ADE7758_VADIV     0x44
+#define ADE7758_VARDIV    0x43
+#define ADE7758_APCFNUM   0x45
+#define ADE7758_APCFDEN   0x46
+#define ADE7758_VARCFNUM  0x47
+#define ADE7758_VARCFDEN  0x48
+#define ADE7758_CHKSUM    0x7E
+#define ADE7758_VERSION   0x7F
+
+#define ADE7758_READ_REG(a)    a
+#define ADE7758_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7758_MAX_TX    8
+#define ADE7758_MAX_RX    4
+#define ADE7758_STARTUP_DELAY 1
+
+#define ADE7758_SPI_SLOW	(u32)(300 * 1000)
+#define ADE7758_SPI_BURST	(u32)(1000 * 1000)
+#define ADE7758_SPI_FAST	(u32)(2000 * 1000)
+
+#define DRIVER_NAME		"ade7758"
+
+/**
+ * struct ade7758_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct ade7758_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+#ifdef CONFIG_IIO_RING_BUFFER
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7758_scan {
+	ADE7758_SCAN_WFORM,
+};
+
+void ade7758_remove_trigger(struct iio_dev *indio_dev);
+int ade7758_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7758_read_data_from_ring(struct device *dev,
+		struct device_attribute *attr,
+		char *buf);
+
+
+int ade7758_configure_ring(struct iio_dev *indio_dev);
+void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7758_initialize_ring(struct iio_ring_buffer *ring);
+void ade7758_uninitialize_ring(struct iio_ring_buffer *ring);
+int ade7758_set_irq(struct device *dev, bool enable);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+ade7758_read_data_from_ring(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return 0;
+}
+
+static int ade7758_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+static inline void ade7758_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
new file mode 100644
index 0000000..b7634cb
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -0,0 +1,866 @@
+/*
+ * ADE7758 Polyphase Multifunction Energy Metering IC Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7758.h"
+
+int ade7758_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7758_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7758_spi_write_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 3,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7758_WRITE_REG(reg_address);
+	st->tx[1] = (value >> 8) & 0xFF;
+	st->tx[2] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7758_spi_write_reg_24(struct device *dev,
+		u8 reg_address,
+		u32 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 4,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7758_WRITE_REG(reg_address);
+	st->tx[1] = (value >> 16) & 0xFF;
+	st->tx[2] = (value >> 8) & 0xFF;
+	st->tx[3] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7758_spi_read_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7758_READ_REG(reg_address);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7758_spi_read_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 3,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7758_READ_REG(reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7758_spi_read_reg_24(struct device *dev,
+		u8 reg_address,
+		u32 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 4,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7758_READ_REG(reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+	st->tx[3] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static ssize_t ade7758_read_8bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u8 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7758_spi_read_reg_8(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7758_read_16bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7758_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7758_read_24bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u32 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7758_spi_read_reg_24(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7758_write_8bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7758_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ade7758_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7758_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+int ade7758_reset(struct device *dev)
+{
+	int ret;
+	u8 val;
+	ade7758_spi_read_reg_8(dev,
+			ADE7758_OPMODE,
+			&val);
+	val |= 1 << 6; /* Software Chip Reset */
+	ret = ade7758_spi_write_reg_8(dev,
+			ADE7758_OPMODE,
+			val);
+
+	return ret;
+}
+
+static ssize_t ade7758_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -1;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return ade7758_reset(dev);
+	}
+	return -1;
+}
+
+static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+		ade7758_read_8bit,
+		ade7758_write_8bit,
+		ADE7758_VPEAK);
+static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+		ade7758_read_8bit,
+		ade7758_write_8bit,
+		ADE7758_VPEAK);
+static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+		ade7758_read_8bit,
+		ade7758_write_8bit,
+		ADE7758_APHCAL);
+static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+		ade7758_read_8bit,
+		ade7758_write_8bit,
+		ADE7758_BPHCAL);
+static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+		ade7758_read_8bit,
+		ade7758_write_8bit,
+		ADE7758_CPHCAL);
+static IIO_DEV_ATTR_WDIV(S_IWUSR | S_IRUGO,
+		ade7758_read_8bit,
+		ade7758_write_8bit,
+		ADE7758_WDIV);
+static IIO_DEV_ATTR_VADIV(S_IWUSR | S_IRUGO,
+		ade7758_read_8bit,
+		ade7758_write_8bit,
+		ADE7758_VADIV);
+static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+		ade7758_read_24bit,
+		NULL,
+		ADE7758_AIRMS);
+static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+		ade7758_read_24bit,
+		NULL,
+		ADE7758_BIRMS);
+static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+		ade7758_read_24bit,
+		NULL,
+		ADE7758_CIRMS);
+static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+		ade7758_read_24bit,
+		NULL,
+		ADE7758_AVRMS);
+static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+		ade7758_read_24bit,
+		NULL,
+		ADE7758_BVRMS);
+static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+		ade7758_read_24bit,
+		NULL,
+		ADE7758_CVRMS);
+static IIO_DEV_ATTR_AIRMSOS(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_AIRMSOS);
+static IIO_DEV_ATTR_BIRMSOS(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_BIRMSOS);
+static IIO_DEV_ATTR_CIRMSOS(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_CIRMSOS);
+static IIO_DEV_ATTR_AVRMSOS(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_AVRMSOS);
+static IIO_DEV_ATTR_BVRMSOS(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_BVRMSOS);
+static IIO_DEV_ATTR_CVRMSOS(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_CVRMSOS);
+static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_AIGAIN);
+static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_BIGAIN);
+static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_CIGAIN);
+static IIO_DEV_ATTR_AVRMSGAIN(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_AVRMSGAIN);
+static IIO_DEV_ATTR_BVRMSGAIN(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_BVRMSGAIN);
+static IIO_DEV_ATTR_CVRMSGAIN(S_IWUSR | S_IRUGO,
+		ade7758_read_16bit,
+		ade7758_write_16bit,
+		ADE7758_CVRMSGAIN);
+
+int ade7758_set_irq(struct device *dev, bool enable)
+{
+	int ret;
+	u32 irqen;
+	ret = ade7758_spi_read_reg_24(dev, ADE7758_MASK, &irqen);
+	if (ret)
+		goto error_ret;
+
+	if (enable)
+		irqen |= 1 << 16; /* Enables an interrupt when a data is
+				     present in the waveform register */
+	else
+		irqen &= ~(1 << 16);
+
+	ret = ade7758_spi_write_reg_24(dev, ADE7758_MASK, irqen);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	return ret;
+}
+
+/* Power down the device */
+static int ade7758_stop_device(struct device *dev)
+{
+	int ret;
+	u8 val;
+	ade7758_spi_read_reg_8(dev,
+			ADE7758_OPMODE,
+			&val);
+	val |= 7 << 3;  /* ADE7758 powered down */
+	ret = ade7758_spi_write_reg_8(dev,
+			ADE7758_OPMODE,
+			val);
+
+	return ret;
+}
+
+static int ade7758_initial_setup(struct ade7758_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* use low spi speed for init */
+	st->us->mode = SPI_MODE_3;
+	spi_setup(st->us);
+
+	/* Disable IRQ */
+	ret = ade7758_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	ade7758_reset(dev);
+	msleep(ADE7758_STARTUP_DELAY);
+
+err_ret:
+	return ret;
+}
+
+static ssize_t ade7758_read_frequency(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret, len = 0;
+	u8 t;
+	int sps;
+	ret = ade7758_spi_read_reg_8(dev,
+			ADE7758_WAVMODE,
+			&t);
+	if (ret)
+		return ret;
+
+	t = (t >> 5) & 0x3;
+	sps = 26040 / (1 << t);
+
+	len = sprintf(buf, "%d SPS\n", sps);
+	return len;
+}
+
+static ssize_t ade7758_write_frequency(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	unsigned long val;
+	int ret;
+	u8 reg, t;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	t = (26040 / val);
+	if (t > 0)
+		t >>= 1;
+
+	if (t > 1)
+		st->us->max_speed_hz = ADE7758_SPI_SLOW;
+	else
+		st->us->max_speed_hz = ADE7758_SPI_FAST;
+
+	ret = ade7758_spi_read_reg_8(dev,
+			ADE7758_WAVMODE,
+			&reg);
+	if (ret)
+		goto out;
+
+	reg &= ~(5 << 3);
+	reg |= t << 5;
+
+	ret = ade7758_spi_write_reg_8(dev,
+			ADE7758_WAVMODE,
+			reg);
+
+out:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t ade7758_read_waveform_type(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret, len = 0;
+	u8 t;
+	ret = ade7758_spi_read_reg_8(dev,
+			ADE7758_WAVMODE,
+			&t);
+	if (ret)
+		return ret;
+
+	t = (t >> 2) & 0x7;
+
+	len = sprintf(buf, "%d\n", t);
+
+	return len;
+}
+
+static ssize_t ade7758_write_waveform_type(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	unsigned long val;
+	int ret;
+	u8 reg;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	if (val > 4)
+		return -EINVAL;
+
+	mutex_lock(&indio_dev->mlock);
+
+	ret = ade7758_spi_read_reg_8(dev,
+			ADE7758_WAVMODE,
+			&reg);
+	if (ret)
+		goto out;
+
+	reg &= ~(7 << 2);
+	reg |= val << 2;
+
+	ret = ade7758_spi_write_reg_8(dev,
+			ADE7758_WAVMODE,
+			reg);
+
+out:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "129 C");
+static IIO_CONST_ATTR(temp_scale, "4 C");
+
+static IIO_DEV_ATTR_AWATTHR(ade7758_read_16bit,
+		ADE7758_AWATTHR);
+static IIO_DEV_ATTR_BWATTHR(ade7758_read_16bit,
+		ADE7758_BWATTHR);
+static IIO_DEV_ATTR_CWATTHR(ade7758_read_16bit,
+		ADE7758_CWATTHR);
+static IIO_DEV_ATTR_AVARHR(ade7758_read_16bit,
+		ADE7758_AVARHR);
+static IIO_DEV_ATTR_BVARHR(ade7758_read_16bit,
+		ADE7758_BVARHR);
+static IIO_DEV_ATTR_CVARHR(ade7758_read_16bit,
+		ADE7758_CVARHR);
+static IIO_DEV_ATTR_AVAHR(ade7758_read_16bit,
+		ADE7758_AVAHR);
+static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
+		ADE7758_BVAHR);
+static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
+		ADE7758_CVAHR);
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+		ade7758_read_frequency,
+		ade7758_write_frequency);
+
+/**
+ * IIO_DEV_ATTR_WAVEFORM_TYPE - set the type of waveform.
+ * @_mode: sysfs file mode/permissions
+ * @_show: output method for the attribute
+ * @_store: input method for the attribute
+ **/
+#define IIO_DEV_ATTR_WAVEFORM_TYPE(_mode, _show, _store)			\
+	IIO_DEVICE_ATTR(waveform_type, _mode, _show, _store, 0)
+
+static IIO_DEV_ATTR_WAVEFORM_TYPE(S_IWUSR | S_IRUGO,
+		ade7758_read_waveform_type,
+		ade7758_write_waveform_type);
+
+static IIO_DEV_ATTR_RESET(ade7758_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
+
+static IIO_CONST_ATTR(name, "ade7758");
+
+static struct attribute *ade7758_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group ade7758_event_attribute_group = {
+	.attrs = ade7758_event_attributes,
+};
+
+static struct attribute *ade7758_attributes[] = {
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+	&iio_dev_attr_waveform_type.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_awatthr.dev_attr.attr,
+	&iio_dev_attr_bwatthr.dev_attr.attr,
+	&iio_dev_attr_cwatthr.dev_attr.attr,
+	&iio_dev_attr_avarhr.dev_attr.attr,
+	&iio_dev_attr_bvarhr.dev_attr.attr,
+	&iio_dev_attr_cvarhr.dev_attr.attr,
+	&iio_dev_attr_avahr.dev_attr.attr,
+	&iio_dev_attr_bvahr.dev_attr.attr,
+	&iio_dev_attr_cvahr.dev_attr.attr,
+	&iio_dev_attr_vpeak.dev_attr.attr,
+	&iio_dev_attr_ipeak.dev_attr.attr,
+	&iio_dev_attr_aphcal.dev_attr.attr,
+	&iio_dev_attr_bphcal.dev_attr.attr,
+	&iio_dev_attr_cphcal.dev_attr.attr,
+	&iio_dev_attr_wdiv.dev_attr.attr,
+	&iio_dev_attr_vadiv.dev_attr.attr,
+	&iio_dev_attr_airms.dev_attr.attr,
+	&iio_dev_attr_birms.dev_attr.attr,
+	&iio_dev_attr_cirms.dev_attr.attr,
+	&iio_dev_attr_avrms.dev_attr.attr,
+	&iio_dev_attr_bvrms.dev_attr.attr,
+	&iio_dev_attr_cvrms.dev_attr.attr,
+	&iio_dev_attr_aigain.dev_attr.attr,
+	&iio_dev_attr_bigain.dev_attr.attr,
+	&iio_dev_attr_cigain.dev_attr.attr,
+	&iio_dev_attr_avrmsgain.dev_attr.attr,
+	&iio_dev_attr_bvrmsgain.dev_attr.attr,
+	&iio_dev_attr_cvrmsgain.dev_attr.attr,
+	&iio_dev_attr_airmsos.dev_attr.attr,
+	&iio_dev_attr_birmsos.dev_attr.attr,
+	&iio_dev_attr_cirmsos.dev_attr.attr,
+	&iio_dev_attr_avrmsos.dev_attr.attr,
+	&iio_dev_attr_bvrmsos.dev_attr.attr,
+	&iio_dev_attr_cvrmsos.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ade7758_attribute_group = {
+	.attrs = ade7758_attributes,
+};
+
+
+
+static int __devinit ade7758_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct ade7758_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADE7758_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADE7758_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &ade7758_event_attribute_group;
+	st->indio_dev->attrs = &ade7758_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = ade7758_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = ade7758_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_FALLING,
+				"ade7758");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = ade7758_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = ade7758_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		ade7758_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	ade7758_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	ade7758_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int ade7758_remove(struct spi_device *spi)
+{
+	int ret;
+	struct ade7758_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	ret = ade7758_stop_device(&(indio_dev->dev));
+	if (ret)
+		goto err_ret;
+
+	flush_scheduled_work();
+
+	ade7758_remove_trigger(indio_dev);
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	ade7758_uninitialize_ring(indio_dev->ring);
+	iio_device_unregister(indio_dev);
+	ade7758_unconfigure_ring(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+
+err_ret:
+	return ret;
+}
+
+static struct spi_driver ade7758_driver = {
+	.driver = {
+		.name = "ade7758",
+		.owner = THIS_MODULE,
+	},
+	.probe = ade7758_probe,
+	.remove = __devexit_p(ade7758_remove),
+};
+
+static __init int ade7758_init(void)
+{
+	return spi_register_driver(&ade7758_driver);
+}
+module_init(ade7758_init);
+
+static __exit void ade7758_exit(void)
+{
+	spi_unregister_driver(&ade7758_driver);
+}
+module_exit(ade7758_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
new file mode 100644
index 0000000..274b4a0
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -0,0 +1,212 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_sw.h"
+#include "../accel/accel.h"
+#include "../trigger.h"
+#include "ade7758.h"
+
+/**
+ * combine_8_to_32() utility function to munge to u8s into u32
+ **/
+static inline u32 combine_8_to_32(u8 lower, u8 mid, u8 upper)
+{
+	u32 _lower = lower;
+	u32 _mid = mid;
+	u32 _upper = upper;
+
+	return _lower | (_mid << 8) | (_upper << 16);
+}
+
+static IIO_SCAN_EL_C(wform, ADE7758_SCAN_WFORM, ADE7758_WFORM, NULL);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(wform, s, 24, 32);
+static IIO_SCAN_EL_TIMESTAMP(1);
+static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
+
+static struct attribute *ade7758_scan_el_attrs[] = {
+	&iio_scan_el_wform.dev_attr.attr,
+	&iio_const_attr_wform_index.dev_attr.attr,
+	&iio_const_attr_wform_type.dev_attr.attr,
+	&iio_scan_el_timestamp.dev_attr.attr,
+	&iio_const_attr_timestamp_index.dev_attr.attr,
+	&iio_const_attr_timestamp_type.dev_attr.attr,
+	NULL,
+};
+
+static struct attribute_group ade7758_scan_el_group = {
+	.attrs = ade7758_scan_el_attrs,
+	.name = "scan_elements",
+};
+
+/**
+ * ade7758_poll_func_th() top half interrupt handler called by trigger
+ * @private_data:	iio_dev
+ **/
+static void ade7758_poll_func_th(struct iio_dev *indio_dev, s64 time)
+{
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	st->last_timestamp = time;
+	schedule_work(&st->work_trigger_to_ring);
+	/* Indicate that this interrupt is being handled */
+
+	/* Technically this is trigger related, but without this
+	 * handler running there is currently no way for the interrupt
+	 * to clear.
+	 */
+}
+
+/**
+ * ade7758_spi_read_burst() - read all data registers
+ * @dev: device associated with child of actual device (iio_dev or iio_trig)
+ * @rx: somewhere to pass back the value read (min size is 24 bytes)
+ **/
+static int ade7758_spi_read_burst(struct device *dev, u8 *rx)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7758_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = rx,
+			.bits_per_word = 8,
+			.len = 4,
+		}, {
+			.tx_buf = st->tx + 4,
+			.rx_buf = rx,
+			.bits_per_word = 8,
+			.len = 4,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7758_READ_REG(ADE7758_RSTATUS);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+	st->tx[3] = 0;
+	st->tx[4] = ADE7758_READ_REG(ADE7758_WFORM);
+	st->tx[5] = 0;
+	st->tx[6] = 0;
+	st->tx[7] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfers[0], &msg);
+	spi_message_add_tail(&xfers[1], &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret)
+		dev_err(&st->us->dev, "problem when reading WFORM value\n");
+
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+ * specific to be rolled into the core.
+ */
+static void ade7758_trigger_bh_to_ring(struct work_struct *work_s)
+{
+	struct ade7758_state *st
+		= container_of(work_s, struct ade7758_state,
+			       work_trigger_to_ring);
+	struct iio_ring_buffer *ring = st->indio_dev->ring;
+
+	int i = 0;
+	s32 *data;
+	size_t datasize = ring->access.get_bytes_per_datum(ring);
+
+	data = kmalloc(datasize, GFP_KERNEL);
+	if (data == NULL) {
+		dev_err(&st->us->dev, "memory alloc failed in ring bh");
+		return;
+	}
+
+	if (ring->scan_count)
+		if (ade7758_spi_read_burst(&st->indio_dev->dev, st->rx) >= 0)
+			for (; i < ring->scan_count; i++)
+				data[i] = combine_8_to_32(st->rx[i*2+2],
+						st->rx[i*2+1],
+						st->rx[i*2]);
+
+	/* Guaranteed to be aligned with 8 byte boundary */
+	if (ring->scan_timestamp)
+		*((s64 *)
+		(((u32)data + 4 * ring->scan_count + 4) & ~0x7)) =
+			st->last_timestamp;
+
+	ring->access.store_to(ring,
+			      (u8 *)data,
+			      st->last_timestamp);
+
+	iio_trigger_notify_done(st->indio_dev->trig);
+	kfree(data);
+
+	return;
+}
+
+void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
+{
+	kfree(indio_dev->pollfunc);
+	iio_sw_rb_free(indio_dev->ring);
+}
+
+int ade7758_configure_ring(struct iio_dev *indio_dev)
+{
+	int ret = 0;
+	struct ade7758_state *st = indio_dev->dev_data;
+	struct iio_ring_buffer *ring;
+	INIT_WORK(&st->work_trigger_to_ring, ade7758_trigger_bh_to_ring);
+
+	ring = iio_sw_rb_allocate(indio_dev);
+	if (!ring) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	indio_dev->ring = ring;
+	/* Effectively select the ring buffer implementation */
+	iio_ring_sw_register_funcs(&ring->access);
+	ring->bpe = 4;
+	ring->scan_el_attrs = &ade7758_scan_el_group;
+	ring->scan_timestamp = true;
+	ring->preenable = &iio_sw_ring_preenable;
+	ring->postenable = &iio_triggered_ring_postenable;
+	ring->predisable = &iio_triggered_ring_predisable;
+	ring->owner = THIS_MODULE;
+
+	/* Set default scan mode */
+	iio_scan_mask_set(ring, iio_scan_el_wform.number);
+
+	ret = iio_alloc_pollfunc(indio_dev, NULL, &ade7758_poll_func_th);
+	if (ret)
+		goto error_iio_sw_rb_free;
+
+	indio_dev->modes |= INDIO_RING_TRIGGERED;
+	return 0;
+
+error_iio_sw_rb_free:
+	iio_sw_rb_free(indio_dev->ring);
+	return ret;
+}
+
+int ade7758_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return iio_ring_buffer_register(ring, 0);
+}
+
+void ade7758_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+	iio_ring_buffer_unregister(ring);
+}
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
new file mode 100644
index 0000000..60abca0
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7758_trigger.c
@@ -0,0 +1,125 @@
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+#include <linux/spi/spi.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../trigger.h"
+#include "ade7758.h"
+
+/**
+ * ade7758_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static int ade7758_data_rdy_trig_poll(struct iio_dev *dev_info,
+				       int index,
+				       s64 timestamp,
+				       int no_test)
+{
+	struct ade7758_state *st = iio_dev_get_devdata(dev_info);
+	struct iio_trigger *trig = st->trig;
+
+	iio_trigger_poll(trig, timestamp);
+
+	return IRQ_HANDLED;
+}
+
+IIO_EVENT_SH(data_rdy_trig, &ade7758_data_rdy_trig_poll);
+
+static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
+
+static struct attribute *ade7758_trigger_attrs[] = {
+	&dev_attr_name.attr,
+	NULL,
+};
+
+static const struct attribute_group ade7758_trigger_attr_group = {
+	.attrs = ade7758_trigger_attrs,
+};
+
+/**
+ * ade7758_data_rdy_trigger_set_state() set datardy interrupt state
+ **/
+static int ade7758_data_rdy_trigger_set_state(struct iio_trigger *trig,
+						bool state)
+{
+	struct ade7758_state *st = trig->private_data;
+	struct iio_dev *indio_dev = st->indio_dev;
+	int ret = 0;
+
+	dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
+	ret = ade7758_set_irq(&st->indio_dev->dev, state);
+	if (state == false) {
+		iio_remove_event_from_list(&iio_event_data_rdy_trig,
+					   &indio_dev->interrupts[0]
+					   ->ev_list);
+		/* possible quirk with handler currently worked around
+		   by ensuring the work queue is empty */
+		flush_scheduled_work();
+	} else {
+		iio_add_event_to_list(&iio_event_data_rdy_trig,
+				      &indio_dev->interrupts[0]->ev_list);
+	}
+	return ret;
+}
+
+/**
+ * ade7758_trig_try_reen() try renabling irq for data rdy trigger
+ * @trig:	the datardy trigger
+ **/
+static int ade7758_trig_try_reen(struct iio_trigger *trig)
+{
+	struct ade7758_state *st = trig->private_data;
+	enable_irq(st->us->irq);
+	/* irq reenabled so success! */
+	return 0;
+}
+
+int ade7758_probe_trigger(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct ade7758_state *st = indio_dev->dev_data;
+
+	st->trig = iio_allocate_trigger();
+	st->trig->name = kasprintf(GFP_KERNEL,
+				"ade7758-dev%d",
+				indio_dev->id);
+	if (!st->trig->name) {
+		ret = -ENOMEM;
+		goto error_free_trig;
+	}
+	st->trig->dev.parent = &st->us->dev;
+	st->trig->owner = THIS_MODULE;
+	st->trig->private_data = st;
+	st->trig->set_trigger_state = &ade7758_data_rdy_trigger_set_state;
+	st->trig->try_reenable = &ade7758_trig_try_reen;
+	st->trig->control_attrs = &ade7758_trigger_attr_group;
+	ret = iio_trigger_register(st->trig);
+
+	/* select default trigger */
+	indio_dev->trig = st->trig;
+	if (ret)
+		goto error_free_trig_name;
+
+	return 0;
+
+error_free_trig_name:
+	kfree(st->trig->name);
+error_free_trig:
+	iio_free_trigger(st->trig);
+
+	return ret;
+}
+
+void ade7758_remove_trigger(struct iio_dev *indio_dev)
+{
+	struct ade7758_state *state = indio_dev->dev_data;
+
+	iio_trigger_unregister(state->trig);
+	kfree(state->trig->name);
+	iio_free_trigger(state->trig);
+}
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
new file mode 100644
index 0000000..fafc3c1
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -0,0 +1,670 @@
+/*
+ * ADE7759 Active Energy Metering IC with di/dt Sensor Interface Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7759.h"
+
+int ade7759_spi_write_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 val)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7759_WRITE_REG(reg_address);
+	st->tx[1] = val;
+
+	ret = spi_write(st->us, st->tx, 2);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7759_spi_write_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 3,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7759_WRITE_REG(reg_address);
+	st->tx[1] = (value >> 8) & 0xFF;
+	st->tx[2] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7759_spi_read_reg_8(struct device *dev,
+		u8 reg_address,
+		u8 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 2,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7759_READ_REG(reg_address);
+	st->tx[1] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = st->rx[1];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7759_spi_read_reg_16(struct device *dev,
+		u8 reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 3,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7759_READ_REG(reg_address);
+	st->tx[1] = 0;
+	st->tx[2] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[1] << 8) | st->rx[2];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7759_spi_read_reg_40(struct device *dev,
+		u8 reg_address,
+		u64 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.rx_buf = st->rx,
+			.bits_per_word = 8,
+			.len = 6,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7759_READ_REG(reg_address);
+	memset(&st->tx[1], 0 , 5);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->us, &msg);
+	if (ret) {
+		dev_err(&st->us->dev, "problem when reading 40 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) |
+		(st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static ssize_t ade7759_read_8bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u8 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7759_spi_read_reg_8(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7759_read_16bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7759_spi_read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7759_read_40bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u64 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = ade7759_spi_read_reg_40(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%llu\n", val);
+}
+
+static ssize_t ade7759_write_8bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7759_spi_write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ade7759_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = ade7759_spi_write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static int ade7759_reset(struct device *dev)
+{
+	int ret;
+	u16 val;
+	ade7759_spi_read_reg_16(dev,
+			ADE7759_MODE,
+			&val);
+	val |= 1 << 6; /* Software Chip Reset */
+	ret = ade7759_spi_write_reg_16(dev,
+			ADE7759_MODE,
+			val);
+
+	return ret;
+}
+
+static ssize_t ade7759_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -1;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return ade7759_reset(dev);
+	}
+	return -1;
+}
+
+static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY);
+static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
+		ade7759_read_16bit,
+		ade7759_write_16bit,
+		ADE7759_CFDEN);
+static IIO_DEV_ATTR_CFNUM(S_IWUSR | S_IRUGO,
+		ade7759_read_8bit,
+		ade7759_write_8bit,
+		ADE7759_CFNUM);
+static IIO_DEV_ATTR_CHKSUM(ade7759_read_8bit, ADE7759_CHKSUM);
+static IIO_DEV_ATTR_PHCAL(S_IWUSR | S_IRUGO,
+		ade7759_read_16bit,
+		ade7759_write_16bit,
+		ADE7759_PHCAL);
+static IIO_DEV_ATTR_APOS(S_IWUSR | S_IRUGO,
+		ade7759_read_16bit,
+		ade7759_write_16bit,
+		ADE7759_APOS);
+static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+		ade7759_read_8bit,
+		ade7759_write_8bit,
+		ADE7759_SAGCYC);
+static IIO_DEV_ATTR_SAGLVL(S_IWUSR | S_IRUGO,
+		ade7759_read_8bit,
+		ade7759_write_8bit,
+		ADE7759_SAGLVL);
+static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+		ade7759_read_8bit,
+		ade7759_write_8bit,
+		ADE7759_LINECYC);
+static IIO_DEV_ATTR_LENERGY(ade7759_read_40bit, ADE7759_LENERGY);
+static IIO_DEV_ATTR_PGA_GAIN(S_IWUSR | S_IRUGO,
+		ade7759_read_8bit,
+		ade7759_write_8bit,
+		ADE7759_GAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_GAIN(S_IWUSR | S_IRUGO,
+		ade7759_read_16bit,
+		ade7759_write_16bit,
+		ADE7759_APGAIN);
+static IIO_DEV_ATTR_CH_OFF(1, S_IWUSR | S_IRUGO,
+		ade7759_read_8bit,
+		ade7759_write_8bit,
+		ADE7759_CH1OS);
+static IIO_DEV_ATTR_CH_OFF(2, S_IWUSR | S_IRUGO,
+		ade7759_read_8bit,
+		ade7759_write_8bit,
+		ADE7759_CH2OS);
+
+static int ade7759_set_irq(struct device *dev, bool enable)
+{
+	int ret;
+	u8 irqen;
+	ret = ade7759_spi_read_reg_8(dev, ADE7759_IRQEN, &irqen);
+	if (ret)
+		goto error_ret;
+
+	if (enable)
+		irqen |= 1 << 3; /* Enables an interrupt when a data is
+				    present in the waveform register */
+	else
+		irqen &= ~(1 << 3);
+
+	ret = ade7759_spi_write_reg_8(dev, ADE7759_IRQEN, irqen);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	return ret;
+}
+
+/* Power down the device */
+int ade7759_stop_device(struct device *dev)
+{
+	int ret;
+	u16 val;
+	ade7759_spi_read_reg_16(dev,
+			ADE7759_MODE,
+			&val);
+	val |= 1 << 4;  /* AD converters can be turned off */
+	ret = ade7759_spi_write_reg_16(dev,
+			ADE7759_MODE,
+			val);
+
+	return ret;
+}
+
+static int ade7759_initial_setup(struct ade7759_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* use low spi speed for init */
+	st->us->mode = SPI_MODE_3;
+	spi_setup(st->us);
+
+	/* Disable IRQ */
+	ret = ade7759_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	ade7759_reset(dev);
+	msleep(ADE7759_STARTUP_DELAY);
+
+err_ret:
+	return ret;
+}
+
+static ssize_t ade7759_read_frequency(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret, len = 0;
+	u16 t;
+	int sps;
+	ret = ade7759_spi_read_reg_16(dev,
+			ADE7759_MODE,
+			&t);
+	if (ret)
+		return ret;
+
+	t = (t >> 3) & 0x3;
+	sps = 27900 / (1 + t);
+
+	len = sprintf(buf, "%d SPS\n", sps);
+	return len;
+}
+
+static ssize_t ade7759_write_frequency(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+	unsigned long val;
+	int ret;
+	u16 reg, t;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		return ret;
+
+	mutex_lock(&indio_dev->mlock);
+
+	t = (27900 / val);
+	if (t > 0)
+		t--;
+
+	if (t > 1)
+		st->us->max_speed_hz = ADE7759_SPI_SLOW;
+	else
+		st->us->max_speed_hz = ADE7759_SPI_FAST;
+
+	ret = ade7759_spi_read_reg_16(dev,
+			ADE7759_MODE,
+			&reg);
+	if (ret)
+		goto out;
+
+	reg &= ~(3 << 13);
+	reg |= t << 13;
+
+	ret = ade7759_spi_write_reg_16(dev,
+			ADE7759_MODE,
+			reg);
+
+out:
+	mutex_unlock(&indio_dev->mlock);
+
+	return ret ? ret : len;
+}
+static IIO_DEV_ATTR_TEMP_RAW(ade7759_read_8bit);
+static IIO_CONST_ATTR(temp_offset, "70 C");
+static IIO_CONST_ATTR(temp_scale, "1 C");
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+		ade7759_read_frequency,
+		ade7759_write_frequency);
+
+static IIO_DEV_ATTR_RESET(ade7759_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
+
+static IIO_CONST_ATTR(name, "ade7759");
+
+static struct attribute *ade7759_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group ade7759_event_attribute_group = {
+	.attrs = ade7759_event_attributes,
+};
+
+static struct attribute *ade7759_attributes[] = {
+	&iio_dev_attr_temp_raw.dev_attr.attr,
+	&iio_const_attr_temp_offset.dev_attr.attr,
+	&iio_const_attr_temp_scale.dev_attr.attr,
+	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_phcal.dev_attr.attr,
+	&iio_dev_attr_cfden.dev_attr.attr,
+	&iio_dev_attr_aenergy.dev_attr.attr,
+	&iio_dev_attr_cfnum.dev_attr.attr,
+	&iio_dev_attr_apos.dev_attr.attr,
+	&iio_dev_attr_sagcyc.dev_attr.attr,
+	&iio_dev_attr_saglvl.dev_attr.attr,
+	&iio_dev_attr_linecyc.dev_attr.attr,
+	&iio_dev_attr_lenergy.dev_attr.attr,
+	&iio_dev_attr_chksum.dev_attr.attr,
+	&iio_dev_attr_pga_gain.dev_attr.attr,
+	&iio_dev_attr_active_power_gain.dev_attr.attr,
+	&iio_dev_attr_choff_1.dev_attr.attr,
+	&iio_dev_attr_choff_2.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ade7759_attribute_group = {
+	.attrs = ade7759_attributes,
+};
+
+static int __devinit ade7759_probe(struct spi_device *spi)
+{
+	int ret, regdone = 0;
+	struct ade7759_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		goto error_ret;
+	}
+	/* this is only used for removal purposes */
+	spi_set_drvdata(spi, st);
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADE7759_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADE7759_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	st->us = spi;
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = &spi->dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &ade7759_event_attribute_group;
+	st->indio_dev->attrs = &ade7759_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = ade7759_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = ade7759_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (spi->irq) {
+		ret = iio_register_interrupt_line(spi->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_FALLING,
+				"ade7759");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = ade7759_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+
+	/* Get the device into a sane initial state */
+	ret = ade7759_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+	return 0;
+
+error_remove_trigger:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		ade7759_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	ade7759_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	ade7759_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+/* fixme, confirm ordering in this function */
+static int ade7759_remove(struct spi_device *spi)
+{
+	int ret;
+	struct ade7759_state *st = spi_get_drvdata(spi);
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	ret = ade7759_stop_device(&(indio_dev->dev));
+	if (ret)
+		goto err_ret;
+
+	flush_scheduled_work();
+
+	ade7759_remove_trigger(indio_dev);
+	if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	ade7759_uninitialize_ring(indio_dev->ring);
+	ade7759_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+
+err_ret:
+	return ret;
+}
+
+static struct spi_driver ade7759_driver = {
+	.driver = {
+		.name = "ade7759",
+		.owner = THIS_MODULE,
+	},
+	.probe = ade7759_probe,
+	.remove = __devexit_p(ade7759_remove),
+};
+
+static __init int ade7759_init(void)
+{
+	return spi_register_driver(&ade7759_driver);
+}
+module_init(ade7759_init);
+
+static __exit void ade7759_exit(void)
+{
+	spi_unregister_driver(&ade7759_driver);
+}
+module_exit(ade7759_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7759.h b/drivers/staging/iio/meter/ade7759.h
new file mode 100644
index 0000000..813dea2
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7759.h
@@ -0,0 +1,122 @@
+#ifndef _ADE7759_H
+#define _ADE7759_H
+
+#define ADE7759_WAVEFORM  0x01
+#define ADE7759_AENERGY   0x02
+#define ADE7759_RSTENERGY 0x03
+#define ADE7759_STATUS    0x04
+#define ADE7759_RSTSTATUS 0x05
+#define ADE7759_MODE      0x06
+#define ADE7759_CFDEN     0x07
+#define ADE7759_CH1OS     0x08
+#define ADE7759_CH2OS     0x09
+#define ADE7759_GAIN      0x0A
+#define ADE7759_APGAIN    0x0B
+#define ADE7759_PHCAL     0x0C
+#define ADE7759_APOS      0x0D
+#define ADE7759_ZXTOUT    0x0E
+#define ADE7759_SAGCYC    0x0F
+#define ADE7759_IRQEN     0x10
+#define ADE7759_SAGLVL    0x11
+#define ADE7759_TEMP      0x12
+#define ADE7759_LINECYC   0x13
+#define ADE7759_LENERGY   0x14
+#define ADE7759_CFNUM     0x15
+#define ADE7759_CHKSUM    0x1E
+#define ADE7759_DIEREV    0x1F
+
+#define ADE7759_READ_REG(a)    a
+#define ADE7759_WRITE_REG(a) ((a) | 0x80)
+
+#define ADE7759_MAX_TX    6
+#define ADE7759_MAX_RX    6
+#define ADE7759_STARTUP_DELAY 1
+
+#define ADE7759_SPI_SLOW	(u32)(300 * 1000)
+#define ADE7759_SPI_BURST	(u32)(1000 * 1000)
+#define ADE7759_SPI_FAST	(u32)(2000 * 1000)
+
+#define DRIVER_NAME		"ade7759"
+
+/**
+ * struct ade7759_state - device instance specific data
+ * @us:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct ade7759_state {
+	struct spi_device		*us;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	struct mutex			buf_lock;
+};
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7759_scan {
+	ADE7759_SCAN_ACTIVE_POWER,
+	ADE7759_SCAN_CH1_CH2,
+	ADE7759_SCAN_CH1,
+	ADE7759_SCAN_CH2,
+};
+
+void ade7759_remove_trigger(struct iio_dev *indio_dev);
+int ade7759_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7759_read_data_from_ring(struct device *dev,
+		struct device_attribute *attr,
+		char *buf);
+
+
+int ade7759_configure_ring(struct iio_dev *indio_dev);
+void ade7759_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7759_initialize_ring(struct iio_ring_buffer *ring);
+void ade7759_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7759_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7759_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+ade7759_read_data_from_ring(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return 0;
+}
+
+static int ade7759_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+static inline void ade7759_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7759_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+static inline void ade7759_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
new file mode 100644
index 0000000..4578e7b
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -0,0 +1,272 @@
+/*
+ * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (I2C Bus)
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
+#include "../iio.h"
+#include "ade7854.h"
+
+static int ade7854_i2c_write_reg_8(struct device *dev,
+		u16 reg_address,
+		u8 value)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+	st->tx[2] = value;
+
+	ret = i2c_master_send(st->i2c, st->tx, 3);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_i2c_write_reg_16(struct device *dev,
+		u16 reg_address,
+		u16 value)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+	st->tx[2] = (value >> 8) & 0xFF;
+	st->tx[3] = value & 0xFF;
+
+	ret = i2c_master_send(st->i2c, st->tx, 4);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_i2c_write_reg_24(struct device *dev,
+		u16 reg_address,
+		u32 value)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+	st->tx[2] = (value >> 16) & 0xFF;
+	st->tx[3] = (value >> 8) & 0xFF;
+	st->tx[4] = value & 0xFF;
+
+	ret = i2c_master_send(st->i2c, st->tx, 5);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_i2c_write_reg_32(struct device *dev,
+		u16 reg_address,
+		u32 value)
+{
+	int ret;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+	st->tx[2] = (value >> 24) & 0xFF;
+	st->tx[3] = (value >> 16) & 0xFF;
+	st->tx[4] = (value >> 8) & 0xFF;
+	st->tx[5] = value & 0xFF;
+
+	ret = i2c_master_send(st->i2c, st->tx, 6);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_i2c_read_reg_8(struct device *dev,
+		u16 reg_address,
+		u8 *val)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+
+	ret = i2c_master_send(st->i2c, st->tx, 2);
+	if (ret)
+		goto out;
+
+	ret = i2c_master_recv(st->i2c, st->rx, 1);
+	if (ret)
+		goto out;
+
+	*val = st->rx[0];
+out:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7854_i2c_read_reg_16(struct device *dev,
+		u16 reg_address,
+		u16 *val)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+
+	ret = i2c_master_send(st->i2c, st->tx, 2);
+	if (ret)
+		goto out;
+
+	ret = i2c_master_recv(st->i2c, st->rx, 2);
+	if (ret)
+		goto out;
+
+	*val = (st->rx[0] << 8) | st->rx[1];
+out:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7854_i2c_read_reg_24(struct device *dev,
+		u16 reg_address,
+		u32 *val)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+
+	ret = i2c_master_send(st->i2c, st->tx, 2);
+	if (ret)
+		goto out;
+
+	ret = i2c_master_recv(st->i2c, st->rx, 3);
+	if (ret)
+		goto out;
+
+	*val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
+out:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7854_i2c_read_reg_32(struct device *dev,
+		u16 reg_address,
+		u32 *val)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = (reg_address >> 8) & 0xFF;
+	st->tx[1] = reg_address & 0xFF;
+
+	ret = i2c_master_send(st->i2c, st->tx, 2);
+	if (ret)
+		goto out;
+
+	ret = i2c_master_recv(st->i2c, st->rx, 3);
+	if (ret)
+		goto out;
+
+	*val = (st->rx[0] << 24) | (st->rx[1] << 16) | (st->rx[2] << 8) | st->rx[3];
+out:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int __devinit ade7854_i2c_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	int ret;
+	struct ade7854_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		return ret;
+	}
+
+	i2c_set_clientdata(client, st);
+	st->read_reg_8 = ade7854_i2c_read_reg_8;
+	st->read_reg_16 = ade7854_i2c_read_reg_16;
+	st->read_reg_24 = ade7854_i2c_read_reg_24;
+	st->read_reg_32 = ade7854_i2c_read_reg_32;
+	st->write_reg_8 = ade7854_i2c_write_reg_8;
+	st->write_reg_16 = ade7854_i2c_write_reg_16;
+	st->write_reg_24 = ade7854_i2c_write_reg_24;
+	st->write_reg_32 = ade7854_i2c_write_reg_32;
+	st->i2c = client;
+	st->irq = client->irq;
+
+	ret = ade7854_probe(st, &client->dev);
+	if (ret) {
+		kfree(st);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int __devexit ade7854_i2c_remove(struct i2c_client *client)
+{
+	return ade7854_remove(i2c_get_clientdata(client));
+}
+
+static const struct i2c_device_id ade7854_id[] = {
+	{ "ade7854", 0 },
+	{ "ade7858", 0 },
+	{ "ade7868", 0 },
+	{ "ade7878", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ade7854_id);
+
+static struct i2c_driver ade7854_i2c_driver = {
+	.driver = {
+		.name = "ade7854",
+	},
+	.probe    = ade7854_i2c_probe,
+	.remove   = __devexit_p(ade7854_i2c_remove),
+	.id_table = ade7854_id,
+};
+
+static __init int ade7854_i2c_init(void)
+{
+	return i2c_add_driver(&ade7854_i2c_driver);
+}
+module_init(ade7854_i2c_init);
+
+static __exit void ade7854_i2c_exit(void)
+{
+	i2c_del_driver(&ade7854_i2c_driver);
+}
+module_exit(ade7854_i2c_exit);
+
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC I2C Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
new file mode 100644
index 0000000..fe58103e
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -0,0 +1,360 @@
+/*
+ * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver (SPI Bus)
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+
+#include "../iio.h"
+#include "ade7854.h"
+
+static int ade7854_spi_write_reg_8(struct device *dev,
+		u16 reg_address,
+		u8 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 4,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7854_WRITE_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_spi_write_reg_16(struct device *dev,
+		u16 reg_address,
+		u16 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 5,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7854_WRITE_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = (value >> 8) & 0xFF;
+	st->tx[4] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_spi_write_reg_24(struct device *dev,
+		u16 reg_address,
+		u32 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 6,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7854_WRITE_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = (value >> 16) & 0xFF;
+	st->tx[4] = (value >> 8) & 0xFF;
+	st->tx[5] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_spi_write_reg_32(struct device *dev,
+		u16 reg_address,
+		u32 value)
+{
+	int ret;
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 7,
+		}
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7854_WRITE_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = (value >> 24) & 0xFF;
+	st->tx[4] = (value >> 16) & 0xFF;
+	st->tx[5] = (value >> 8) & 0xFF;
+	st->tx[6] = value & 0xFF;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	mutex_unlock(&st->buf_lock);
+
+	return ret;
+}
+
+static int ade7854_spi_read_reg_8(struct device *dev,
+		u16 reg_address,
+		u8 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 4,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+
+	st->tx[0] = ADE7854_READ_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	if (ret) {
+		dev_err(&st->spi->dev, "problem when reading 8 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = st->rx[3];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7854_spi_read_reg_16(struct device *dev,
+		u16 reg_address,
+		u16 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 5,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+	st->tx[0] = ADE7854_READ_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = 0;
+	st->tx[4] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	if (ret) {
+		dev_err(&st->spi->dev, "problem when reading 16 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[3] << 8) | st->rx[4];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7854_spi_read_reg_24(struct device *dev,
+		u16 reg_address,
+		u32 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 6,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+
+	st->tx[0] = ADE7854_READ_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = 0;
+	st->tx[4] = 0;
+	st->tx[5] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	if (ret) {
+		dev_err(&st->spi->dev, "problem when reading 24 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int ade7854_spi_read_reg_32(struct device *dev,
+		u16 reg_address,
+		u32 *val)
+{
+	struct spi_message msg;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	int ret;
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = st->tx,
+			.bits_per_word = 8,
+			.len = 7,
+		},
+	};
+
+	mutex_lock(&st->buf_lock);
+
+	st->tx[0] = ADE7854_READ_REG;
+	st->tx[1] = (reg_address >> 8) & 0xFF;
+	st->tx[2] = reg_address & 0xFF;
+	st->tx[3] = 0;
+	st->tx[4] = 0;
+	st->tx[5] = 0;
+	st->tx[6] = 0;
+
+	spi_message_init(&msg);
+	spi_message_add_tail(xfers, &msg);
+	ret = spi_sync(st->spi, &msg);
+	if (ret) {
+		dev_err(&st->spi->dev, "problem when reading 32 bit register 0x%02X",
+				reg_address);
+		goto error_ret;
+	}
+	*val = (st->rx[3] << 24) | (st->rx[4] << 16) | (st->rx[5] << 8) | st->rx[6];
+
+error_ret:
+	mutex_unlock(&st->buf_lock);
+	return ret;
+}
+
+static int __devinit ade7854_spi_probe(struct spi_device *spi)
+{
+	int ret;
+	struct ade7854_state *st = kzalloc(sizeof *st, GFP_KERNEL);
+	if (!st) {
+		ret =  -ENOMEM;
+		return ret;
+	}
+
+	spi_set_drvdata(spi, st);
+	st->read_reg_8 = ade7854_spi_read_reg_8;
+	st->read_reg_16 = ade7854_spi_read_reg_16;
+	st->read_reg_24 = ade7854_spi_read_reg_24;
+	st->read_reg_32 = ade7854_spi_read_reg_32;
+	st->write_reg_8 = ade7854_spi_write_reg_8;
+	st->write_reg_16 = ade7854_spi_write_reg_16;
+	st->write_reg_24 = ade7854_spi_write_reg_24;
+	st->write_reg_32 = ade7854_spi_write_reg_32;
+	st->irq = spi->irq;
+	st->spi = spi;
+
+	ret = ade7854_probe(st, &spi->dev);
+	if (ret) {
+		kfree(st);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ade7854_spi_remove(struct spi_device *spi)
+{
+	ade7854_remove(spi_get_drvdata(spi));
+
+	return 0;
+}
+
+static struct spi_driver ade7854_driver = {
+	.driver = {
+		.name = "ade7854",
+		.owner = THIS_MODULE,
+	},
+	.probe = ade7854_spi_probe,
+	.remove = __devexit_p(ade7854_spi_remove),
+};
+
+static __init int ade7854_init(void)
+{
+	return spi_register_driver(&ade7854_driver);
+}
+module_init(ade7854_init);
+
+static __exit void ade7854_exit(void)
+{
+	spi_unregister_driver(&ade7854_driver);
+}
+module_exit(ade7854_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC SPI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
new file mode 100644
index 0000000..a13d504
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -0,0 +1,680 @@
+/*
+ * ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/list.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "meter.h"
+#include "ade7854.h"
+
+static ssize_t ade7854_read_8bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u8 val = 0;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = st->read_reg_8(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7854_read_16bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u16 val = 0;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = st->read_reg_16(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7854_read_24bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u32 val = 0;
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+	ret = st->read_reg_24(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val & 0xFFFFFF);
+}
+
+static ssize_t ade7854_read_32bit(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	int ret;
+	u32 val = 0;
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	ret = st->read_reg_32(dev, this_attr->address, &val);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t ade7854_write_8bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = st->write_reg_8(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ade7854_write_16bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = st->write_reg_16(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ade7854_write_24bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = st->write_reg_24(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static ssize_t ade7854_write_32bit(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf,
+		size_t len)
+{
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	int ret;
+	long val;
+
+	ret = strict_strtol(buf, 10, &val);
+	if (ret)
+		goto error_ret;
+	ret = st->write_reg_32(dev, this_attr->address, val);
+
+error_ret:
+	return ret ? ret : len;
+}
+
+static int ade7854_reset(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	int ret;
+	u16 val;
+
+	st->read_reg_16(dev, ADE7854_CONFIG, &val);
+	val |= 1 << 7; /* Software Chip Reset */
+	ret = st->write_reg_16(dev, ADE7854_CONFIG, val);
+
+	return ret;
+}
+
+
+static ssize_t ade7854_write_reset(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	if (len < 1)
+		return -1;
+	switch (buf[0]) {
+	case '1':
+	case 'y':
+	case 'Y':
+		return ade7854_reset(dev);
+	}
+	return -1;
+}
+
+static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_AIGAIN);
+static IIO_DEV_ATTR_BIGAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_BIGAIN);
+static IIO_DEV_ATTR_CIGAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_CIGAIN);
+static IIO_DEV_ATTR_NIGAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_NIGAIN);
+static IIO_DEV_ATTR_AVGAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_AVGAIN);
+static IIO_DEV_ATTR_BVGAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_BVGAIN);
+static IIO_DEV_ATTR_CVGAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_CVGAIN);
+static IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_AVAGAIN);
+static IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_BVAGAIN);
+static IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_CVAGAIN);
+static IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_AWATTOS);
+static IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_BWATTOS);
+static IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_CWATTOS);
+static IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_AVARGAIN);
+static IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_BVARGAIN);
+static IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_CVARGAIN);
+static IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_AVAROS);
+static IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_BVAROS);
+static IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(S_IWUSR | S_IRUGO,
+		ade7854_read_24bit,
+		ade7854_write_24bit,
+		ADE7854_CVAROS);
+static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
+		ade7854_read_32bit,
+		ade7854_write_32bit,
+		ADE7854_VPEAK);
+static IIO_DEV_ATTR_IPEAK(S_IWUSR | S_IRUGO,
+		ade7854_read_32bit,
+		ade7854_write_32bit,
+		ADE7854_VPEAK);
+static IIO_DEV_ATTR_APHCAL(S_IWUSR | S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_APHCAL);
+static IIO_DEV_ATTR_BPHCAL(S_IWUSR | S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_BPHCAL);
+static IIO_DEV_ATTR_CPHCAL(S_IWUSR | S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_CPHCAL);
+static IIO_DEV_ATTR_CF1DEN(S_IWUSR | S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_CF1DEN);
+static IIO_DEV_ATTR_CF2DEN(S_IWUSR | S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_CF2DEN);
+static IIO_DEV_ATTR_CF3DEN(S_IWUSR | S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_CF3DEN);
+static IIO_DEV_ATTR_LINECYC(S_IWUSR | S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_LINECYC);
+static IIO_DEV_ATTR_SAGCYC(S_IWUSR | S_IRUGO,
+		ade7854_read_8bit,
+		ade7854_write_8bit,
+		ADE7854_SAGCYC);
+static IIO_DEV_ATTR_CFCYC(S_IWUSR | S_IRUGO,
+		ade7854_read_8bit,
+		ade7854_write_8bit,
+		ADE7854_CFCYC);
+static IIO_DEV_ATTR_PEAKCYC(S_IWUSR | S_IRUGO,
+		ade7854_read_8bit,
+		ade7854_write_8bit,
+		ADE7854_PEAKCYC);
+static IIO_DEV_ATTR_CHKSUM(ade7854_read_24bit,
+		ADE7854_CHECKSUM);
+static IIO_DEV_ATTR_ANGLE0(ade7854_read_24bit,
+		ADE7854_ANGLE0);
+static IIO_DEV_ATTR_ANGLE1(ade7854_read_24bit,
+		ADE7854_ANGLE1);
+static IIO_DEV_ATTR_ANGLE2(ade7854_read_24bit,
+		ADE7854_ANGLE2);
+static IIO_DEV_ATTR_AIRMS(S_IRUGO,
+		ade7854_read_24bit,
+		NULL,
+		ADE7854_AIRMS);
+static IIO_DEV_ATTR_BIRMS(S_IRUGO,
+		ade7854_read_24bit,
+		NULL,
+		ADE7854_BIRMS);
+static IIO_DEV_ATTR_CIRMS(S_IRUGO,
+		ade7854_read_24bit,
+		NULL,
+		ADE7854_CIRMS);
+static IIO_DEV_ATTR_NIRMS(S_IRUGO,
+		ade7854_read_24bit,
+		NULL,
+		ADE7854_NIRMS);
+static IIO_DEV_ATTR_AVRMS(S_IRUGO,
+		ade7854_read_24bit,
+		NULL,
+		ADE7854_AVRMS);
+static IIO_DEV_ATTR_BVRMS(S_IRUGO,
+		ade7854_read_24bit,
+		NULL,
+		ADE7854_BVRMS);
+static IIO_DEV_ATTR_CVRMS(S_IRUGO,
+		ade7854_read_24bit,
+		NULL,
+		ADE7854_CVRMS);
+static IIO_DEV_ATTR_AIRMSOS(S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_AIRMSOS);
+static IIO_DEV_ATTR_BIRMSOS(S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_BIRMSOS);
+static IIO_DEV_ATTR_CIRMSOS(S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_CIRMSOS);
+static IIO_DEV_ATTR_AVRMSOS(S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_AVRMSOS);
+static IIO_DEV_ATTR_BVRMSOS(S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_BVRMSOS);
+static IIO_DEV_ATTR_CVRMSOS(S_IRUGO,
+		ade7854_read_16bit,
+		ade7854_write_16bit,
+		ADE7854_CVRMSOS);
+static IIO_DEV_ATTR_VOLT_A(ade7854_read_24bit,
+		ADE7854_VAWV);
+static IIO_DEV_ATTR_VOLT_B(ade7854_read_24bit,
+		ADE7854_VBWV);
+static IIO_DEV_ATTR_VOLT_C(ade7854_read_24bit,
+		ADE7854_VCWV);
+static IIO_DEV_ATTR_CURRENT_A(ade7854_read_24bit,
+		ADE7854_IAWV);
+static IIO_DEV_ATTR_CURRENT_B(ade7854_read_24bit,
+		ADE7854_IBWV);
+static IIO_DEV_ATTR_CURRENT_C(ade7854_read_24bit,
+		ADE7854_ICWV);
+static IIO_DEV_ATTR_AWATTHR(ade7854_read_32bit,
+		ADE7854_AWATTHR);
+static IIO_DEV_ATTR_BWATTHR(ade7854_read_32bit,
+		ADE7854_BWATTHR);
+static IIO_DEV_ATTR_CWATTHR(ade7854_read_32bit,
+		ADE7854_CWATTHR);
+static IIO_DEV_ATTR_AFWATTHR(ade7854_read_32bit,
+		ADE7854_AFWATTHR);
+static IIO_DEV_ATTR_BFWATTHR(ade7854_read_32bit,
+		ADE7854_BFWATTHR);
+static IIO_DEV_ATTR_CFWATTHR(ade7854_read_32bit,
+		ADE7854_CFWATTHR);
+static IIO_DEV_ATTR_AVARHR(ade7854_read_32bit,
+		ADE7854_AVARHR);
+static IIO_DEV_ATTR_BVARHR(ade7854_read_32bit,
+		ADE7854_BVARHR);
+static IIO_DEV_ATTR_CVARHR(ade7854_read_32bit,
+		ADE7854_CVARHR);
+static IIO_DEV_ATTR_AVAHR(ade7854_read_32bit,
+		ADE7854_AVAHR);
+static IIO_DEV_ATTR_BVAHR(ade7854_read_32bit,
+		ADE7854_BVAHR);
+static IIO_DEV_ATTR_CVAHR(ade7854_read_32bit,
+		ADE7854_CVAHR);
+
+static int ade7854_set_irq(struct device *dev, bool enable)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+
+	int ret;
+	u32 irqen;
+
+	ret = st->read_reg_32(dev, ADE7854_MASK0, &irqen);
+	if (ret)
+		goto error_ret;
+
+	if (enable)
+		irqen |= 1 << 17; /* 1: interrupt enabled when all periodical
+				     (at 8 kHz rate) DSP computations finish. */
+	else
+		irqen &= ~(1 << 17);
+
+	ret = st->write_reg_32(dev, ADE7854_MASK0, irqen);
+	if (ret)
+		goto error_ret;
+
+error_ret:
+	return ret;
+}
+
+static int ade7854_initial_setup(struct ade7854_state *st)
+{
+	int ret;
+	struct device *dev = &st->indio_dev->dev;
+
+	/* Disable IRQ */
+	ret = ade7854_set_irq(dev, false);
+	if (ret) {
+		dev_err(dev, "disable irq failed");
+		goto err_ret;
+	}
+
+	ade7854_reset(dev);
+	msleep(ADE7854_STARTUP_DELAY);
+
+err_ret:
+	return ret;
+}
+
+static IIO_DEV_ATTR_RESET(ade7854_write_reset);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("8000");
+
+static IIO_CONST_ATTR(name, "ade7854");
+
+static struct attribute *ade7854_event_attributes[] = {
+	NULL
+};
+
+static struct attribute_group ade7854_event_attribute_group = {
+	.attrs = ade7854_event_attributes,
+};
+
+static struct attribute *ade7854_attributes[] = {
+	&iio_dev_attr_aigain.dev_attr.attr,
+	&iio_dev_attr_bigain.dev_attr.attr,
+	&iio_dev_attr_cigain.dev_attr.attr,
+	&iio_dev_attr_nigain.dev_attr.attr,
+	&iio_dev_attr_avgain.dev_attr.attr,
+	&iio_dev_attr_bvgain.dev_attr.attr,
+	&iio_dev_attr_cvgain.dev_attr.attr,
+	&iio_dev_attr_linecyc.dev_attr.attr,
+	&iio_dev_attr_sagcyc.dev_attr.attr,
+	&iio_dev_attr_cfcyc.dev_attr.attr,
+	&iio_dev_attr_peakcyc.dev_attr.attr,
+	&iio_dev_attr_chksum.dev_attr.attr,
+	&iio_dev_attr_apparent_power_a_gain.dev_attr.attr,
+	&iio_dev_attr_apparent_power_b_gain.dev_attr.attr,
+	&iio_dev_attr_apparent_power_c_gain.dev_attr.attr,
+	&iio_dev_attr_active_power_a_offset.dev_attr.attr,
+	&iio_dev_attr_active_power_b_offset.dev_attr.attr,
+	&iio_dev_attr_active_power_c_offset.dev_attr.attr,
+	&iio_dev_attr_reactive_power_a_gain.dev_attr.attr,
+	&iio_dev_attr_reactive_power_b_gain.dev_attr.attr,
+	&iio_dev_attr_reactive_power_c_gain.dev_attr.attr,
+	&iio_dev_attr_reactive_power_a_offset.dev_attr.attr,
+	&iio_dev_attr_reactive_power_b_offset.dev_attr.attr,
+	&iio_dev_attr_reactive_power_c_offset.dev_attr.attr,
+	&iio_dev_attr_awatthr.dev_attr.attr,
+	&iio_dev_attr_bwatthr.dev_attr.attr,
+	&iio_dev_attr_cwatthr.dev_attr.attr,
+	&iio_dev_attr_afwatthr.dev_attr.attr,
+	&iio_dev_attr_bfwatthr.dev_attr.attr,
+	&iio_dev_attr_cfwatthr.dev_attr.attr,
+	&iio_dev_attr_avarhr.dev_attr.attr,
+	&iio_dev_attr_bvarhr.dev_attr.attr,
+	&iio_dev_attr_cvarhr.dev_attr.attr,
+	&iio_dev_attr_angle0.dev_attr.attr,
+	&iio_dev_attr_angle1.dev_attr.attr,
+	&iio_dev_attr_angle2.dev_attr.attr,
+	&iio_dev_attr_avahr.dev_attr.attr,
+	&iio_dev_attr_bvahr.dev_attr.attr,
+	&iio_dev_attr_cvahr.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_const_attr_name.dev_attr.attr,
+	&iio_dev_attr_vpeak.dev_attr.attr,
+	&iio_dev_attr_ipeak.dev_attr.attr,
+	&iio_dev_attr_aphcal.dev_attr.attr,
+	&iio_dev_attr_bphcal.dev_attr.attr,
+	&iio_dev_attr_cphcal.dev_attr.attr,
+	&iio_dev_attr_cf1den.dev_attr.attr,
+	&iio_dev_attr_cf2den.dev_attr.attr,
+	&iio_dev_attr_cf3den.dev_attr.attr,
+	&iio_dev_attr_airms.dev_attr.attr,
+	&iio_dev_attr_birms.dev_attr.attr,
+	&iio_dev_attr_cirms.dev_attr.attr,
+	&iio_dev_attr_nirms.dev_attr.attr,
+	&iio_dev_attr_avrms.dev_attr.attr,
+	&iio_dev_attr_bvrms.dev_attr.attr,
+	&iio_dev_attr_cvrms.dev_attr.attr,
+	&iio_dev_attr_airmsos.dev_attr.attr,
+	&iio_dev_attr_birmsos.dev_attr.attr,
+	&iio_dev_attr_cirmsos.dev_attr.attr,
+	&iio_dev_attr_avrmsos.dev_attr.attr,
+	&iio_dev_attr_bvrmsos.dev_attr.attr,
+	&iio_dev_attr_cvrmsos.dev_attr.attr,
+	&iio_dev_attr_volt_a.dev_attr.attr,
+	&iio_dev_attr_volt_b.dev_attr.attr,
+	&iio_dev_attr_volt_c.dev_attr.attr,
+	&iio_dev_attr_current_a.dev_attr.attr,
+	&iio_dev_attr_current_b.dev_attr.attr,
+	&iio_dev_attr_current_c.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ade7854_attribute_group = {
+	.attrs = ade7854_attributes,
+};
+
+int ade7854_probe(struct ade7854_state *st, struct device *dev)
+{
+	int ret, regdone = 0;
+
+	/* Allocate the comms buffers */
+	st->rx = kzalloc(sizeof(*st->rx)*ADE7854_MAX_RX, GFP_KERNEL);
+	if (st->rx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->tx = kzalloc(sizeof(*st->tx)*ADE7854_MAX_TX, GFP_KERNEL);
+	if (st->tx == NULL) {
+		ret = -ENOMEM;
+		goto error_free_rx;
+	}
+	mutex_init(&st->buf_lock);
+	/* setup the industrialio driver allocated elements */
+	st->indio_dev = iio_allocate_device();
+	if (st->indio_dev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_tx;
+	}
+
+	st->indio_dev->dev.parent = dev;
+	st->indio_dev->num_interrupt_lines = 1;
+	st->indio_dev->event_attrs = &ade7854_event_attribute_group;
+	st->indio_dev->attrs = &ade7854_attribute_group;
+	st->indio_dev->dev_data = (void *)(st);
+	st->indio_dev->driver_module = THIS_MODULE;
+	st->indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = ade7854_configure_ring(st->indio_dev);
+	if (ret)
+		goto error_free_dev;
+
+	ret = iio_device_register(st->indio_dev);
+	if (ret)
+		goto error_unreg_ring_funcs;
+	regdone = 1;
+
+	ret = ade7854_initialize_ring(st->indio_dev->ring);
+	if (ret) {
+		printk(KERN_ERR "failed to initialize the ring\n");
+		goto error_unreg_ring_funcs;
+	}
+
+	if (st->irq) {
+		ret = iio_register_interrupt_line(st->irq,
+				st->indio_dev,
+				0,
+				IRQF_TRIGGER_FALLING,
+				"ade7854");
+		if (ret)
+			goto error_uninitialize_ring;
+
+		ret = ade7854_probe_trigger(st->indio_dev);
+		if (ret)
+			goto error_unregister_line;
+	}
+	/* Get the device into a sane initial state */
+	ret = ade7854_initial_setup(st);
+	if (ret)
+		goto error_remove_trigger;
+
+	return 0;
+
+error_remove_trigger:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		ade7854_remove_trigger(st->indio_dev);
+error_unregister_line:
+	if (st->indio_dev->modes & INDIO_RING_TRIGGERED)
+		iio_unregister_interrupt_line(st->indio_dev, 0);
+error_uninitialize_ring:
+	ade7854_uninitialize_ring(st->indio_dev->ring);
+error_unreg_ring_funcs:
+	ade7854_unconfigure_ring(st->indio_dev);
+error_free_dev:
+	if (regdone)
+		iio_device_unregister(st->indio_dev);
+	else
+		iio_free_device(st->indio_dev);
+error_free_tx:
+	kfree(st->tx);
+error_free_rx:
+	kfree(st->rx);
+error_free_st:
+	kfree(st);
+	return ret;
+
+}
+EXPORT_SYMBOL(ade7854_probe);
+
+int ade7854_remove(struct ade7854_state *st)
+{
+	struct iio_dev *indio_dev = st->indio_dev;
+
+	flush_scheduled_work();
+
+	ade7854_remove_trigger(indio_dev);
+	if (st->irq)
+		iio_unregister_interrupt_line(indio_dev, 0);
+
+	ade7854_uninitialize_ring(indio_dev->ring);
+	ade7854_unconfigure_ring(indio_dev);
+	iio_device_unregister(indio_dev);
+	kfree(st->tx);
+	kfree(st->rx);
+	kfree(st);
+
+	return 0;
+}
+EXPORT_SYMBOL(ade7854_remove);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
new file mode 100644
index 0000000..47690e5
--- /dev/null
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -0,0 +1,245 @@
+#ifndef _ADE7854_H
+#define _ADE7854_H
+
+#define ADE7854_AIGAIN    0x4380
+#define ADE7854_AVGAIN    0x4381
+#define ADE7854_BIGAIN    0x4382
+#define ADE7854_BVGAIN    0x4383
+#define ADE7854_CIGAIN    0x4384
+#define ADE7854_CVGAIN    0x4385
+#define ADE7854_NIGAIN    0x4386
+#define ADE7854_AIRMSOS   0x4387
+#define ADE7854_AVRMSOS   0x4388
+#define ADE7854_BIRMSOS   0x4389
+#define ADE7854_BVRMSOS   0x438A
+#define ADE7854_CIRMSOS   0x438B
+#define ADE7854_CVRMSOS   0x438C
+#define ADE7854_NIRMSOS   0x438D
+#define ADE7854_AVAGAIN   0x438E
+#define ADE7854_BVAGAIN   0x438F
+#define ADE7854_CVAGAIN   0x4390
+#define ADE7854_AWGAIN    0x4391
+#define ADE7854_AWATTOS   0x4392
+#define ADE7854_BWGAIN    0x4393
+#define ADE7854_BWATTOS   0x4394
+#define ADE7854_CWGAIN    0x4395
+#define ADE7854_CWATTOS   0x4396
+#define ADE7854_AVARGAIN  0x4397
+#define ADE7854_AVAROS    0x4398
+#define ADE7854_BVARGAIN  0x4399
+#define ADE7854_BVAROS    0x439A
+#define ADE7854_CVARGAIN  0x439B
+#define ADE7854_CVAROS    0x439C
+#define ADE7854_AFWGAIN   0x439D
+#define ADE7854_AFWATTOS  0x439E
+#define ADE7854_BFWGAIN   0x439F
+#define ADE7854_BFWATTOS  0x43A0
+#define ADE7854_CFWGAIN   0x43A1
+#define ADE7854_CFWATTOS  0x43A2
+#define ADE7854_AFVARGAIN 0x43A3
+#define ADE7854_AFVAROS   0x43A4
+#define ADE7854_BFVARGAIN 0x43A5
+#define ADE7854_BFVAROS   0x43A6
+#define ADE7854_CFVARGAIN 0x43A7
+#define ADE7854_CFVAROS   0x43A8
+#define ADE7854_VATHR1    0x43A9
+#define ADE7854_VATHR0    0x43AA
+#define ADE7854_WTHR1     0x43AB
+#define ADE7854_WTHR0     0x43AC
+#define ADE7854_VARTHR1   0x43AD
+#define ADE7854_VARTHR0   0x43AE
+#define ADE7854_RSV       0x43AF
+#define ADE7854_VANOLOAD  0x43B0
+#define ADE7854_APNOLOAD  0x43B1
+#define ADE7854_VARNOLOAD 0x43B2
+#define ADE7854_VLEVEL    0x43B3
+#define ADE7854_DICOEFF   0x43B5
+#define ADE7854_HPFDIS    0x43B6
+#define ADE7854_ISUMLVL   0x43B8
+#define ADE7854_ISUM      0x43BF
+#define ADE7854_AIRMS     0x43C0
+#define ADE7854_AVRMS     0x43C1
+#define ADE7854_BIRMS     0x43C2
+#define ADE7854_BVRMS     0x43C3
+#define ADE7854_CIRMS     0x43C4
+#define ADE7854_CVRMS     0x43C5
+#define ADE7854_NIRMS     0x43C6
+#define ADE7854_RUN       0xE228
+#define ADE7854_AWATTHR   0xE400
+#define ADE7854_BWATTHR   0xE401
+#define ADE7854_CWATTHR   0xE402
+#define ADE7854_AFWATTHR  0xE403
+#define ADE7854_BFWATTHR  0xE404
+#define ADE7854_CFWATTHR  0xE405
+#define ADE7854_AVARHR    0xE406
+#define ADE7854_BVARHR    0xE407
+#define ADE7854_CVARHR    0xE408
+#define ADE7854_AFVARHR   0xE409
+#define ADE7854_BFVARHR   0xE40A
+#define ADE7854_CFVARHR   0xE40B
+#define ADE7854_AVAHR     0xE40C
+#define ADE7854_BVAHR     0xE40D
+#define ADE7854_CVAHR     0xE40E
+#define ADE7854_IPEAK     0xE500
+#define ADE7854_VPEAK     0xE501
+#define ADE7854_STATUS0   0xE502
+#define ADE7854_STATUS1   0xE503
+#define ADE7854_OILVL     0xE507
+#define ADE7854_OVLVL     0xE508
+#define ADE7854_SAGLVL    0xE509
+#define ADE7854_MASK0     0xE50A
+#define ADE7854_MASK1     0xE50B
+#define ADE7854_IAWV      0xE50C
+#define ADE7854_IBWV      0xE50D
+#define ADE7854_ICWV      0xE50E
+#define ADE7854_VAWV      0xE510
+#define ADE7854_VBWV      0xE511
+#define ADE7854_VCWV      0xE512
+#define ADE7854_AWATT     0xE513
+#define ADE7854_BWATT     0xE514
+#define ADE7854_CWATT     0xE515
+#define ADE7854_AVA       0xE519
+#define ADE7854_BVA       0xE51A
+#define ADE7854_CVA       0xE51B
+#define ADE7854_CHECKSUM  0xE51F
+#define ADE7854_VNOM      0xE520
+#define ADE7854_PHSTATUS  0xE600
+#define ADE7854_ANGLE0    0xE601
+#define ADE7854_ANGLE1    0xE602
+#define ADE7854_ANGLE2    0xE603
+#define ADE7854_PERIOD    0xE607
+#define ADE7854_PHNOLOAD  0xE608
+#define ADE7854_LINECYC   0xE60C
+#define ADE7854_ZXTOUT    0xE60D
+#define ADE7854_COMPMODE  0xE60E
+#define ADE7854_GAIN      0xE60F
+#define ADE7854_CFMODE    0xE610
+#define ADE7854_CF1DEN    0xE611
+#define ADE7854_CF2DEN    0xE612
+#define ADE7854_CF3DEN    0xE613
+#define ADE7854_APHCAL    0xE614
+#define ADE7854_BPHCAL    0xE615
+#define ADE7854_CPHCAL    0xE616
+#define ADE7854_PHSIGN    0xE617
+#define ADE7854_CONFIG    0xE618
+#define ADE7854_MMODE     0xE700
+#define ADE7854_ACCMODE   0xE701
+#define ADE7854_LCYCMODE  0xE702
+#define ADE7854_PEAKCYC   0xE703
+#define ADE7854_SAGCYC    0xE704
+#define ADE7854_CFCYC     0xE705
+#define ADE7854_HSDC_CFG  0xE706
+#define ADE7854_CONFIG2   0xEC01
+
+#define ADE7854_READ_REG   0x1
+#define ADE7854_WRITE_REG  0x0
+
+#define ADE7854_MAX_TX    7
+#define ADE7854_MAX_RX    7
+#define ADE7854_STARTUP_DELAY 1
+
+#define ADE7854_SPI_SLOW	(u32)(300 * 1000)
+#define ADE7854_SPI_BURST	(u32)(1000 * 1000)
+#define ADE7854_SPI_FAST	(u32)(2000 * 1000)
+
+#define DRIVER_NAME		"ade7854"
+
+/**
+ * struct ade7854_state - device instance specific data
+ * @spi:			actual spi_device
+ * @work_trigger_to_ring: bh for triggered event handling
+ * @inter:		used to check if new interrupt has been triggered
+ * @last_timestamp:	passing timestamp from th to bh of interrupt handler
+ * @indio_dev:		industrial I/O device structure
+ * @trig:		data ready trigger registered with iio
+ * @tx:			transmit buffer
+ * @rx:			recieve buffer
+ * @buf_lock:		mutex to protect tx and rx
+ **/
+struct ade7854_state {
+	struct spi_device		*spi;
+	struct i2c_client               *i2c;
+	struct work_struct		work_trigger_to_ring;
+	s64				last_timestamp;
+	struct iio_dev			*indio_dev;
+	struct iio_trigger		*trig;
+	u8				*tx;
+	u8				*rx;
+	int				(*read_reg_8) (struct device *, u16, u8 *);
+	int				(*read_reg_16) (struct device *, u16, u16 *);
+	int				(*read_reg_24) (struct device *, u16, u32 *);
+	int				(*read_reg_32) (struct device *, u16, u32 *);
+	int				(*write_reg_8) (struct device *, u16, u8);
+	int				(*write_reg_16) (struct device *, u16, u16);
+	int				(*write_reg_24) (struct device *, u16, u32);
+	int				(*write_reg_32) (struct device *, u16, u32);
+	int                             irq;
+	struct mutex			buf_lock;
+};
+
+extern int ade7854_probe(struct ade7854_state *st, struct device *dev);
+extern int ade7854_remove(struct ade7854_state *st);
+
+#if defined(CONFIG_IIO_RING_BUFFER) && defined(THIS_HAS_RING_BUFFER_SUPPORT)
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
+
+enum ade7854_scan {
+	ADE7854_SCAN_PHA_V,
+	ADE7854_SCAN_PHB_V,
+	ADE7854_SCAN_PHC_V,
+	ADE7854_SCAN_PHA_I,
+	ADE7854_SCAN_PHB_I,
+	ADE7854_SCAN_PHC_I,
+};
+
+void ade7854_remove_trigger(struct iio_dev *indio_dev);
+int ade7854_probe_trigger(struct iio_dev *indio_dev);
+
+ssize_t ade7854_read_data_from_ring(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf);
+
+
+int ade7854_configure_ring(struct iio_dev *indio_dev);
+void ade7854_unconfigure_ring(struct iio_dev *indio_dev);
+
+int ade7854_initialize_ring(struct iio_ring_buffer *ring);
+void ade7854_uninitialize_ring(struct iio_ring_buffer *ring);
+#else /* CONFIG_IIO_RING_BUFFER */
+
+static inline void ade7854_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7854_probe_trigger(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline ssize_t
+ade7854_read_data_from_ring(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	return 0;
+}
+
+static inline int ade7854_configure_ring(struct iio_dev *indio_dev)
+{
+	return 0;
+}
+
+static inline void ade7854_unconfigure_ring(struct iio_dev *indio_dev)
+{
+}
+static inline int ade7854_initialize_ring(struct iio_ring_buffer *ring)
+{
+	return 0;
+}
+static inline void ade7854_uninitialize_ring(struct iio_ring_buffer *ring)
+{
+}
+#endif /* CONFIG_IIO_RING_BUFFER */
+
+#endif
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
new file mode 100644
index 0000000..142c50d
--- /dev/null
+++ b/drivers/staging/iio/meter/meter.h
@@ -0,0 +1,396 @@
+#include "../sysfs.h"
+
+/* metering ic types of attribute */
+
+#define IIO_DEV_ATTR_CURRENT_A_OFFSET(_mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(current_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_B_OFFSET(_mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(current_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_C_OFFSET(_mode, _show, _store, _addr)	\
+	IIO_DEVICE_ATTR(current_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOLT_A_OFFSET(_mode, _show, _store, _addr)      \
+	IIO_DEVICE_ATTR(volt_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOLT_B_OFFSET(_mode, _show, _store, _addr)      \
+	IIO_DEVICE_ATTR(volt_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOLT_C_OFFSET(_mode, _show, _store, _addr)      \
+	IIO_DEVICE_ATTR(volt_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_A_OFFSET(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(reactive_power_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_B_OFFSET(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(reactive_power_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_C_OFFSET(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(reactive_power_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_A_OFFSET(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(active_power_a_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_B_OFFSET(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(active_power_b_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_C_OFFSET(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(active_power_c_offset, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_A_GAIN(_mode, _show, _store, _addr)		\
+	IIO_DEVICE_ATTR(current_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_B_GAIN(_mode, _show, _store, _addr)		\
+	IIO_DEVICE_ATTR(current_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_C_GAIN(_mode, _show, _store, _addr)		\
+	IIO_DEVICE_ATTR(current_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APPARENT_POWER_A_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(apparent_power_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APPARENT_POWER_B_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(apparent_power_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APPARENT_POWER_C_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(apparent_power_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(active_power_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_A_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(active_power_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_B_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(active_power_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_ACTIVE_POWER_C_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(active_power_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_A_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(reactive_power_a_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_B_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(reactive_power_b_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_REACTIVE_POWER_C_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(reactive_power_c_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_A(_show, _addr)			\
+	IIO_DEVICE_ATTR(current_a, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_B(_show, _addr)			\
+	IIO_DEVICE_ATTR(current_b, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CURRENT_C(_show, _addr)			\
+	IIO_DEVICE_ATTR(current_c, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VOLT_A(_show, _addr)			\
+	IIO_DEVICE_ATTR(volt_a, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VOLT_B(_show, _addr)			\
+	IIO_DEVICE_ATTR(volt_b, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VOLT_C(_show, _addr)			\
+	IIO_DEVICE_ATTR(volt_c, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(aenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(lenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_RAENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(raenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LAENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(laenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_VAENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(vaenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LVAENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(lvaenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_RVAENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(rvaenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_LVARENERGY(_show, _addr)			\
+	IIO_DEVICE_ATTR(lvarenergy, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CHKSUM(_show, _addr)                       \
+	IIO_DEVICE_ATTR(chksum, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGLE0(_show, _addr)                       \
+	IIO_DEVICE_ATTR(angle0, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGLE1(_show, _addr)                       \
+	IIO_DEVICE_ATTR(angle1, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_ANGLE2(_show, _addr)                       \
+	IIO_DEVICE_ATTR(angle2, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AWATTHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(awatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BWATTHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(bwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CWATTHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(cwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AFWATTHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(afwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BFWATTHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(bfwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CFWATTHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(cfwatthr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AVARHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(avarhr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BVARHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(bvarhr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CVARHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(cvarhr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_AVAHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(avahr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_BVAHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(bvahr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_CVAHR(_show, _addr)			\
+	IIO_DEVICE_ATTR(cvahr, S_IRUGO, _show, NULL, _addr)
+
+#define IIO_DEV_ATTR_IOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(ios, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(vos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_PHCAL(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(phcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APHCAL(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(aphcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BPHCAL(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(bphcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CPHCAL(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cphcal, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_APOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(apos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AAPOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(aapos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BAPOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(bapos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CAPOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(capos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVRMSGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(avrmsgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVRMSGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(bvrmsgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVRMSGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cvrmsgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AIGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(aigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BIGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(bigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CIGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_NIGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(nigain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(avgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(bvgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cvgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_WGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(wgain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_WDIV(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(wdiv, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CFNUM(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cfnum, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CFDEN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cfden, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CF1DEN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cf1den, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CF2DEN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cf2den, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CF3DEN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cf3den, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(irms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(vrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AIRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(airms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BIRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(birms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CIRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cirms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_NIRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(nirms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(avrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(bvrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVRMS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cvrms, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(irmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(vrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AIRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(airmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BIRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(birmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CIRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cirmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_AVRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(avrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_BVRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(bvrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CVRMSOS(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cvrmsos, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VAGAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(vagain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_PGA_GAIN(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(pga_gain, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VADIV(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(vadiv, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_LINECYC(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(linecyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_SAGCYC(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(sagcyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CFCYC(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(cfcyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_PEAKCYC(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(peakcyc, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_SAGLVL(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(saglvl, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IPKLVL(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(ipklvl, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VPKLVL(_mode, _show, _store, _addr)                \
+	IIO_DEVICE_ATTR(vpklvl, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_IPEAK(_mode, _show, _store, _addr)			\
+	IIO_DEVICE_ATTR(ipeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_RIPEAK(_mode, _show, _store, _addr)			\
+	IIO_DEVICE_ATTR(ripeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VPEAK(_mode, _show, _store, _addr)			\
+	IIO_DEVICE_ATTR(vpeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_RVPEAK(_mode, _show, _store, _addr)			\
+	IIO_DEVICE_ATTR(rvpeak, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_VPERIOD(_mode, _show, _store, _addr)			\
+	IIO_DEVICE_ATTR(vperiod, _mode, _show, _store, _addr)
+
+#define IIO_DEV_ATTR_CH_OFF(_num, _mode, _show, _store, _addr)			\
+  IIO_DEVICE_ATTR(choff_##_num, _mode, _show, _store, _addr)
+
+/* active energy register, AENERGY, is more than half full */
+#define IIO_EVENT_ATTR_AENERGY_HALF_FULL(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(aenergy_half_full, _evlist, _show, _store, _mask)
+
+/* a SAG on the line voltage */
+#define IIO_EVENT_ATTR_LINE_VOLT_SAG(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(line_volt_sag, _evlist, _show, _store, _mask)
+
+/*
+ * Indicates the end of energy accumulation over an integer number
+ * of half line cycles
+ */
+#define IIO_EVENT_ATTR_CYCEND(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(cycend, _evlist, _show, _store, _mask)
+
+/* on the rising and falling edge of the the voltage waveform */
+#define IIO_EVENT_ATTR_ZERO_CROSS(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(zero_cross, _evlist, _show, _store, _mask)
+
+/* the active energy register has overflowed */
+#define IIO_EVENT_ATTR_AENERGY_OVERFLOW(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(aenergy_overflow, _evlist, _show, _store, _mask)
+
+/* the apparent energy register has overflowed */
+#define IIO_EVENT_ATTR_VAENERGY_OVERFLOW(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(vaenergy_overflow, _evlist, _show, _store, _mask)
+
+/* the active energy register, VAENERGY, is more than half full */
+#define IIO_EVENT_ATTR_VAENERGY_HALF_FULL(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(vaenergy_half_full, _evlist, _show, _store, _mask)
+
+/* the power has gone from negative to positive */
+#define IIO_EVENT_ATTR_PPOS(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(ppos, _evlist, _show, _store, _mask)
+
+/* the power has gone from positive to negative */
+#define IIO_EVENT_ATTR_PNEG(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(pneg, _evlist, _show, _store, _mask)
+
+/* waveform sample from Channel 1 has exceeded the IPKLVL value */
+#define IIO_EVENT_ATTR_IPKLVL_EXC(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(ipklvl_exc, _evlist, _show, _store, _mask)
+
+/* waveform sample from Channel 2 has exceeded the VPKLVL value */
+#define IIO_EVENT_ATTR_VPKLVL_EXC(_evlist, _show, _store, _mask) \
+	IIO_EVENT_ATTR_SH(vpklvl_exc, _evlist, _show, _store, _mask)
+
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
new file mode 100644
index 0000000..a4a3634
--- /dev/null
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -0,0 +1,54 @@
+#
+# Resolver/Synchro drivers
+#
+comment "Resolver to digital converters"
+
+config AD2S90
+	tristate "Analog Devices ad2s90 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices spi resolver
+	  to digital converters, ad2s90, provides direct access via sysfs.
+
+config AD2S120X
+	tristate "Analog Devices ad2s120x driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices spi resolver
+	  to digital converters, ad2s1200 and ad2s1205, provides direct access
+	  via sysfs.
+
+config AD2S1210
+	tristate "Analog Devices ad2s1210 driver"
+	depends on SPI
+	help
+	  Say yes here to build support for Analog Devices spi resolver
+	  to digital converters, ad2s1210, provides direct access via sysfs.
+
+choice
+	prompt "Resolution Control"
+	depends on AD2S1210
+	default AD2S1210_GPIO_NONE
+	help
+	  In normal mode, the resolution of the digital output is selected
+	  using the RES0 and RES1 input pins. In configuration mode, the
+	  resolution is selected by setting the RES0 and RES1 bits in the
+	  control regsiter. When switching between normal mode and configuration
+	  mode, there are some schemes to keep them matchs.
+
+config AD2S1210_GPIO_INPUT
+	bool "read resolution from gpio pins"
+	help
+	  GPIO pins are sampling RES0 and RES1 pins, read the resolution
+	  settings from the GPIO pins.
+
+config AD2S1210_GPIO_OUTPUT
+	bool "set gpio pins to set resolution"
+	help
+	  RES0 and RES1 pins are controlled by GPIOs, setting GPIO pins to
+	  set the resolution.
+
+config AD2S1210_GPIO_NONE
+	bool "take the responsibility by user"
+
+endchoice
diff --git a/drivers/staging/iio/resolver/Makefile b/drivers/staging/iio/resolver/Makefile
new file mode 100644
index 0000000..0b84a89
--- /dev/null
+++ b/drivers/staging/iio/resolver/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Resolver/Synchro drivers
+#
+
+obj-$(CONFIG_AD2S90) += ad2s90.o
+obj-$(CONFIG_AD2S120X) += ad2s120x.o
+obj-$(CONFIG_AD2S1210) += ad2s1210.o
diff --git a/drivers/staging/iio/resolver/ad2s120x.c b/drivers/staging/iio/resolver/ad2s120x.c
new file mode 100644
index 0000000..8f497a2
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s120x.c
@@ -0,0 +1,310 @@
+/*
+ * ad2s120x.c simple support for the ADI Resolver to Digital Converters: AD2S1200/1205
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad2s120x"
+
+/* input pin sample and rdvel is controlled by driver */
+#define AD2S120X_PN	2
+
+/* input clock on serial interface */
+#define AD2S120X_HZ	8192000
+/* clock period in nano second */
+#define AD2S120X_TSCLK	(1000000000/AD2S120X_HZ)
+
+struct ad2s120x_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+	unsigned short sample;
+	unsigned short rdvel;
+	u8 rx[2];
+	u8 tx[2];
+};
+
+static ssize_t ad2s120x_show_pos_vel(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret = 0;
+	ssize_t len = 0;
+	u16 pos;
+	s16 vel;
+	u8 status;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s120x_state *st = idev->dev_data;
+
+	xfer.len = 1;
+	xfer.tx_buf = st->tx;
+	xfer.rx_buf = st->rx;
+	mutex_lock(&st->lock);
+
+	gpio_set_value(st->sample, 0);
+	/* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
+	udelay(1);
+	gpio_set_value(st->sample, 1);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	status = st->rx[1];
+	pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+	len = sprintf(buf, "%d %c%c%c%c ", pos,
+				(status & 0x8) ? 'P' : 'V',
+				(status & 0x4) ? 'd' : '_',
+				(status & 0x2) ? 'l' : '_',
+				(status & 0x1) ? '1' : '0');
+
+	/* delay 18 ns */
+	/* ndelay(18); */
+
+	gpio_set_value(st->rdvel, 0);
+	/* ndelay(5);*/
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	status = st->rx[1];
+	vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
+	vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+	len += sprintf(buf + len, "%d %c%c%c%c\n", vel,
+				(status & 0x8) ? 'P' : 'V',
+				(status & 0x4) ? 'd' : '_',
+				(status & 0x2) ? 'l' : '_',
+				(status & 0x1) ? '1' : '0');
+error_ret:
+	gpio_set_value(st->rdvel, 1);
+	/* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
+	udelay(1);
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t ad2s120x_show_pos(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret = 0;
+	ssize_t len = 0;
+	u16 pos;
+	u8 status;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s120x_state *st = idev->dev_data;
+
+	xfer.len = 1;
+	xfer.tx_buf = st->tx;
+	xfer.rx_buf = st->rx;
+	mutex_lock(&st->lock);
+
+	gpio_set_value(st->sample, 0);
+	/* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
+	udelay(1);
+	gpio_set_value(st->sample, 1);
+	gpio_set_value(st->rdvel, 1);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	status = st->rx[1];
+	pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+	len = sprintf(buf, "%d %c%c%c%c ", pos,
+				(status & 0x8) ? 'P' : 'V',
+				(status & 0x4) ? 'd' : '_',
+				(status & 0x2) ? 'l' : '_',
+				(status & 0x1) ? '1' : '0');
+error_ret:
+	/* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
+	udelay(1);
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t ad2s120x_show_vel(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret = 0;
+	ssize_t len = 0;
+	s16 vel;
+	u8 status;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s120x_state *st = idev->dev_data;
+
+	xfer.len = 1;
+	xfer.tx_buf = st->tx;
+	xfer.rx_buf = st->rx;
+	mutex_lock(&st->lock);
+
+	gpio_set_value(st->sample, 0);
+	/* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
+	udelay(1);
+	gpio_set_value(st->sample, 1);
+
+	gpio_set_value(st->rdvel, 0);
+	/* ndelay(5);*/
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	status = st->rx[1];
+	vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
+	vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+	len += sprintf(buf + len, "%d %c%c%c%c\n", vel,
+				(status & 0x8) ? 'P' : 'V',
+				(status & 0x4) ? 'd' : '_',
+				(status & 0x2) ? 'l' : '_',
+				(status & 0x1) ? '1' : '0');
+error_ret:
+	gpio_set_value(st->rdvel, 1);
+	/* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
+	udelay(1);
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(description,
+	"12-Bit R/D Converter with Reference Oscillator");
+static IIO_DEVICE_ATTR(pos_vel, S_IRUGO, ad2s120x_show_pos_vel, NULL, 0);
+static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_pos, NULL, 0);
+static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_vel, NULL, 0);
+
+static struct attribute *ad2s120x_attributes[] = {
+	&iio_const_attr_description.dev_attr.attr,
+	&iio_dev_attr_pos_vel.dev_attr.attr,
+	&iio_dev_attr_pos.dev_attr.attr,
+	&iio_dev_attr_vel.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad2s120x_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad2s120x_attributes,
+};
+
+static int __devinit ad2s120x_probe(struct spi_device *spi)
+{
+	struct ad2s120x_state *st;
+	int pn, ret = 0;
+	unsigned short *pins = spi->dev.platform_data;
+
+	for (pn = 0; pn < AD2S120X_PN; pn++) {
+		if (gpio_request(pins[pn], DRV_NAME)) {
+			pr_err("%s: request gpio pin %d failed\n",
+						DRV_NAME, pins[pn]);
+			goto error_ret;
+		}
+		gpio_direction_output(pins[pn], 1);
+	}
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+	st->sample = pins[0];
+	st->rdvel = pins[1];
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad2s120x_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+
+	spi->max_speed_hz = AD2S120X_HZ;
+	spi->mode = SPI_MODE_3;
+	spi_setup(spi);
+
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	for (--pn; pn >= 0; pn--)
+		gpio_free(pins[pn]);
+	return ret;
+}
+
+static int __devexit ad2s120x_remove(struct spi_device *spi)
+{
+	struct ad2s120x_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad2s120x_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad2s120x_probe,
+	.remove = __devexit_p(ad2s120x_remove),
+};
+
+static __init int ad2s120x_spi_init(void)
+{
+	return spi_register_driver(&ad2s120x_driver);
+}
+module_init(ad2s120x_spi_init);
+
+static __exit void ad2s120x_spi_exit(void)
+{
+	spi_unregister_driver(&ad2s120x_driver);
+}
+module_exit(ad2s120x_spi_exit);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
new file mode 100644
index 0000000..c12f64c
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -0,0 +1,872 @@
+/*
+ * ad2s1210.c support for the ADI Resolver to Digital Converters: AD2S1210
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad2s1210"
+
+#define DEF_CONTROL		0x7E
+
+#define MSB_IS_HIGH		0x80
+#define MSB_IS_LOW		0x7F
+#define PHASE_LOCK_RANGE_44	0x20
+#define ENABLE_HYSTERESIS	0x10
+#define SET_ENRES1		0x08
+#define SET_ENRES0		0x04
+#define SET_RES1		0x02
+#define SET_RES0		0x01
+
+#define SET_ENRESOLUTION	(SET_ENRES1 | SET_ENRES0)
+#define SET_RESOLUTION		(SET_RES1 | SET_RES0)
+
+#define REG_POSITION		0x80
+#define REG_VELOCITY		0x82
+#define REG_LOS_THRD		0x88
+#define REG_DOS_OVR_THRD	0x89
+#define REG_DOS_MIS_THRD	0x8A
+#define REG_DOS_RST_MAX_THRD	0x8B
+#define REG_DOS_RST_MIN_THRD	0x8C
+#define REG_LOT_HIGH_THRD	0x8D
+#define REG_LOT_LOW_THRD	0x8E
+#define REG_EXCIT_FREQ		0x91
+#define REG_CONTROL		0x92
+#define REG_SOFT_RESET		0xF0
+#define REG_FAULT		0xFF
+
+/* pin SAMPLE, A0, A1, RES0, RES1, is controlled by driver */
+#define AD2S1210_SAA		3
+#if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+# define AD2S1210_RES		2
+#else
+# define AD2S1210_RES		0
+#endif
+#define AD2S1210_PN		(AD2S1210_SAA + AD2S1210_RES)
+
+#define AD2S1210_MIN_CLKIN	6144000
+#define AD2S1210_MAX_CLKIN	10240000
+#define AD2S1210_MIN_EXCIT	2000
+#define AD2S1210_MAX_EXCIT	20000
+#define AD2S1210_MIN_FCW	0x4
+#define AD2S1210_MAX_FCW	0x50
+
+/* default input clock on serial interface */
+#define AD2S1210_DEF_CLKIN	8192000
+/* clock period in nano second */
+#define AD2S1210_DEF_TCK	(1000000000/AD2S1210_DEF_CLKIN)
+#define AD2S1210_DEF_EXCIT	10000
+
+enum ad2s1210_mode {
+	MOD_POS = 0,
+	MOD_VEL,
+	MOD_RESERVED,
+	MOD_CONFIG,
+};
+
+enum ad2s1210_res {
+	RES_10 = 10,
+	RES_12 = 12,
+	RES_14 = 14,
+	RES_16 = 16,
+};
+
+static unsigned int resolution_value[] = {
+		RES_10, RES_12, RES_14, RES_16};
+
+struct ad2s1210_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+	struct spi_transfer xfer;
+	unsigned int hysteresis;
+	unsigned int old_data;
+	enum ad2s1210_mode mode;
+	enum ad2s1210_res resolution;
+	unsigned int fclkin;
+	unsigned int fexcit;
+	unsigned short sample;
+	unsigned short a0;
+	unsigned short a1;
+	unsigned short res0;
+	unsigned short res1;
+	u8 rx[3];
+	u8 tx[3];
+};
+
+static inline void start_sample(struct ad2s1210_state *st)
+{
+	gpio_set_value(st->sample, 0);
+}
+
+static inline void stop_sample(struct ad2s1210_state *st)
+{
+	gpio_set_value(st->sample, 1);
+}
+
+static inline void set_mode(enum ad2s1210_mode mode, struct ad2s1210_state *st)
+{
+	switch (mode) {
+	case MOD_POS:
+		gpio_set_value(st->a0, 0);
+		gpio_set_value(st->a1, 0);
+		break;
+	case MOD_VEL:
+		gpio_set_value(st->a0, 0);
+		gpio_set_value(st->a1, 1);
+		break;
+	case MOD_CONFIG:
+		gpio_set_value(st->a0, 1);
+		gpio_set_value(st->a1, 1);
+		break;
+	default:
+		/* set to reserved mode */
+		gpio_set_value(st->a0, 1);
+		gpio_set_value(st->a1, 0);
+	}
+	st->mode = mode;
+}
+
+/* write 1 bytes (address or data) to the chip */
+static int config_write(struct ad2s1210_state *st,
+					unsigned char data)
+{
+	struct spi_message msg;
+	int ret = 0;
+
+	st->xfer.len = 1;
+	set_mode(MOD_CONFIG, st);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&st->xfer, &msg);
+	st->tx[0] = data;
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		return ret;
+	st->old_data = 1;
+	return ret;
+}
+
+/* read value from one of the registers */
+static int config_read(struct ad2s1210_state *st,
+				unsigned char address,
+					unsigned char *data)
+{
+	struct spi_message msg;
+	int ret = 0;
+
+	st->xfer.len = 2;
+	set_mode(MOD_CONFIG, st);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&st->xfer, &msg);
+	st->tx[0] = address | MSB_IS_HIGH;
+	st->tx[1] = REG_FAULT;
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		return ret;
+	*data = st->rx[1];
+	st->old_data = 1;
+	return ret;
+}
+
+static inline void update_frequency_control_word(struct ad2s1210_state *st)
+{
+	unsigned char fcw;
+	fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin);
+	if (fcw >= AD2S1210_MIN_FCW && fcw <= AD2S1210_MAX_FCW) {
+		config_write(st, REG_EXCIT_FREQ);
+		config_write(st, fcw);
+	} else
+		pr_err("ad2s1210: FCW out of range\n");
+}
+
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+static inline unsigned char read_resolution_pin(struct ad2s1210_state *st)
+{
+	unsigned int data;
+	data = (gpio_get_value(st->res0) << 1)  |
+			gpio_get_value(st->res1);
+	return resolution_value[data];
+}
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+static inline void set_resolution_pin(struct ad2s1210_state *st)
+{
+	switch (st->resolution) {
+	case RES_10:
+		gpio_set_value(st->res0, 0);
+		gpio_set_value(st->res1, 0);
+		break;
+	case RES_12:
+		gpio_set_value(st->res0, 0);
+		gpio_set_value(st->res1, 1);
+		break;
+	case RES_14:
+		gpio_set_value(st->res0, 1);
+		gpio_set_value(st->res1, 0);
+		break;
+	case RES_16:
+		gpio_set_value(st->res0, 1);
+		gpio_set_value(st->res1, 1);
+		break;
+	}
+}
+#endif
+
+static inline void soft_reset(struct ad2s1210_state *st)
+{
+	config_write(st, REG_SOFT_RESET);
+	config_write(st, 0x0);
+}
+
+
+/* return the OLD DATA since last spi bus write */
+static ssize_t ad2s1210_show_raw(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	int ret;
+
+	mutex_lock(&st->lock);
+	if (st->old_data) {
+		ret = sprintf(buf, "0x%x\n", st->rx[0]);
+		st->old_data = 0;
+	} else
+		ret = 0;
+	mutex_unlock(&st->lock);
+	return ret;
+}
+
+static ssize_t ad2s1210_store_raw(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned long udata;
+	unsigned char data;
+	int ret;
+
+	ret = strict_strtoul(buf, 16, &udata);
+	if (ret)
+		return -EINVAL;
+	data = udata & 0xff;
+	mutex_lock(&st->lock);
+	config_write(st, data);
+	mutex_unlock(&st->lock);
+	return 1;
+}
+
+static ssize_t ad2s1210_store_softreset(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	mutex_lock(&st->lock);
+	soft_reset(st);
+	mutex_unlock(&st->lock);
+	return len;
+}
+
+static ssize_t ad2s1210_show_fclkin(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	return sprintf(buf, "%d\n", st->fclkin);
+}
+
+static ssize_t ad2s1210_store_fclkin(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned long fclkin;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &fclkin);
+	if (!ret && fclkin >= AD2S1210_MIN_CLKIN &&
+				fclkin <= AD2S1210_MAX_CLKIN) {
+		mutex_lock(&st->lock);
+		st->fclkin = fclkin;
+	} else {
+		pr_err("ad2s1210: fclkin out of range\n");
+		return -EINVAL;
+	}
+	update_frequency_control_word(st);
+	soft_reset(st);
+	mutex_unlock(&st->lock);
+	return len;
+}
+
+static ssize_t ad2s1210_show_fexcit(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	return sprintf(buf, "%d\n", st->fexcit);
+}
+
+static ssize_t ad2s1210_store_fexcit(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned long fexcit;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &fexcit);
+	if (!ret && fexcit >= AD2S1210_MIN_EXCIT &&
+				fexcit <= AD2S1210_MAX_EXCIT) {
+		mutex_lock(&st->lock);
+		st->fexcit = fexcit;
+	} else {
+		pr_err("ad2s1210: excitation frequency out of range\n");
+		return -EINVAL;
+	}
+	update_frequency_control_word(st);
+	soft_reset(st);
+	mutex_unlock(&st->lock);
+	return len;
+}
+
+static ssize_t ad2s1210_show_control(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned char data;
+	mutex_lock(&st->lock);
+	config_read(st, REG_CONTROL, &data);
+	mutex_unlock(&st->lock);
+	return sprintf(buf, "0x%x\n", data);
+}
+
+static ssize_t ad2s1210_store_control(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned long udata;
+	unsigned char data;
+	int ret;
+
+	ret = strict_strtoul(buf, 16, &udata);
+	if (ret) {
+		ret = -EINVAL;
+		goto error_ret;
+	}
+	mutex_lock(&st->lock);
+	config_write(st, REG_CONTROL);
+	data = udata & MSB_IS_LOW;
+	config_write(st, data);
+	config_read(st, REG_CONTROL, &data);
+	if (data & MSB_IS_HIGH) {
+		ret = -EIO;
+		pr_err("ad2s1210: write control register fail\n");
+		goto error_ret;
+	}
+	st->resolution = resolution_value[data & SET_RESOLUTION];
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+	data = read_resolution_pin(st);
+	if (data != st->resolution)
+		pr_warning("ad2s1210: resolution settings not match\n");
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+	set_resolution_pin(st);
+#endif
+	ret = len;
+	if (data & ENABLE_HYSTERESIS)
+		st->hysteresis = 1;
+	else
+		st->hysteresis = 0;
+error_ret:
+	mutex_unlock(&st->lock);
+	return ret;
+}
+
+static ssize_t ad2s1210_show_resolution(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	return sprintf(buf, "%d\n", st->resolution);
+}
+
+static ssize_t ad2s1210_store_resolution(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned char data;
+	unsigned long udata;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &udata);
+	if (ret || udata < RES_10 || udata > RES_16) {
+		pr_err("ad2s1210: resolution out of range\n");
+		return -EINVAL;
+	}
+	mutex_lock(&st->lock);
+	config_read(st, REG_CONTROL, &data);
+	data &= ~SET_RESOLUTION;
+	data |= (udata - RES_10) >> 1;
+	config_write(st, REG_CONTROL);
+	config_write(st, data & MSB_IS_LOW);
+	config_read(st, REG_CONTROL, &data);
+	if (data & MSB_IS_HIGH) {
+		ret = -EIO;
+		pr_err("ad2s1210: setting resolution fail\n");
+		goto error_ret;
+	}
+	st->resolution = resolution_value[data & SET_RESOLUTION];
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+	data = read_resolution_pin(st);
+	if (data != st->resolution)
+		pr_warning("ad2s1210: resolution settings not match\n");
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+	set_resolution_pin(st);
+#endif
+	ret = len;
+error_ret:
+	mutex_unlock(&st->lock);
+	return ret;
+}
+/* read the fault register since last sample */
+static ssize_t ad2s1210_show_fault(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int ret = 0;
+	ssize_t len = 0;
+	unsigned char data;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+
+	mutex_lock(&st->lock);
+	ret = config_read(st, REG_FAULT, &data);
+
+	if (ret)
+		goto error_ret;
+	len = sprintf(buf, "0x%x\n", data);
+error_ret:
+	mutex_unlock(&st->lock);
+	return ret ? ret : len;
+}
+
+static ssize_t ad2s1210_clear_fault(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned char data;
+
+	mutex_lock(&st->lock);
+	start_sample(st);
+	/* delay (2 * tck + 20) nano seconds */
+	udelay(1);
+	stop_sample(st);
+	config_read(st, REG_FAULT, &data);
+	start_sample(st);
+	stop_sample(st);
+	mutex_unlock(&st->lock);
+
+	return 0;
+}
+
+static ssize_t ad2s1210_show_reg(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned char data;
+	struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
+
+	mutex_lock(&st->lock);
+	config_read(st, iattr->address, &data);
+	mutex_unlock(&st->lock);
+	return sprintf(buf, "%d\n", data);
+}
+
+static ssize_t ad2s1210_store_reg(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+	unsigned long data;
+	int ret;
+	struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
+
+	ret = strict_strtoul(buf, 10, &data);
+	if (ret)
+		return -EINVAL;
+	mutex_lock(&st->lock);
+	config_write(st, iattr->address);
+	config_write(st, data & MSB_IS_LOW);
+	mutex_unlock(&st->lock);
+	return len;
+}
+
+static ssize_t ad2s1210_show_pos(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct spi_message msg;
+	int ret = 0;
+	ssize_t len = 0;
+	u16 pos;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+
+	st->xfer.len = 2;
+	mutex_lock(&st->lock);
+	start_sample(st);
+	/* delay (6 * tck + 20) nano seconds */
+	udelay(1);
+
+	set_mode(MOD_POS, st);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&st->xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
+	if (st->hysteresis)
+		pos >>= 16 - st->resolution;
+	len = sprintf(buf, "%d\n", pos);
+error_ret:
+	stop_sample(st);
+	/* delay (2 * tck + 20) nano seconds */
+	udelay(1);
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t ad2s1210_show_vel(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct spi_message msg;
+	unsigned short negative;
+	int ret = 0;
+	ssize_t len = 0;
+	s16 vel;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+
+	st->xfer.len = 2;
+	mutex_lock(&st->lock);
+	start_sample(st);
+	/* delay (6 * tck + 20) nano seconds */
+	udelay(1);
+
+	set_mode(MOD_VEL, st);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&st->xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	negative = st->rx[0] & 0x80;
+	vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
+	vel >>= 16 - st->resolution;
+	if (negative) {
+		negative = (0xffff >> st->resolution) << st->resolution;
+		vel |= negative;
+	}
+	len = sprintf(buf, "%d\n", vel);
+error_ret:
+	stop_sample(st);
+	/* delay (2 * tck + 20) nano seconds */
+	udelay(1);
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t ad2s1210_show_pos_vel(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct spi_message msg;
+	unsigned short negative;
+	int ret = 0;
+	ssize_t len = 0;
+	u16 pos;
+	s16 vel;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s1210_state *st = idev->dev_data;
+
+	st->xfer.len = 2;
+	mutex_lock(&st->lock);
+	start_sample(st);
+	/* delay (6 * tck + 20) nano seconds */
+	udelay(1);
+
+	set_mode(MOD_POS, st);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&st->xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
+	if (st->hysteresis)
+		pos >>= 16 - st->resolution;
+	len = sprintf(buf, "%d ", pos);
+
+	st->xfer.len = 2;
+	set_mode(MOD_VEL, st);
+	spi_message_init(&msg);
+	spi_message_add_tail(&st->xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	negative = st->rx[0] & 0x80;
+	vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
+	vel >>= 16 - st->resolution;
+	if (negative) {
+		negative = (0xffff >> st->resolution) << st->resolution;
+		vel |= negative;
+	}
+	len += sprintf(buf + len, "%d\n", vel);
+error_ret:
+	stop_sample(st);
+	/* delay (2 * tck + 20) nano seconds */
+	udelay(1);
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(description,
+	"Variable Resolution, 10-Bit to 16Bit R/D\n\
+Converter with Reference Oscillator");
+static IIO_DEVICE_ATTR(raw_io, S_IRUGO | S_IWUSR,
+		ad2s1210_show_raw, ad2s1210_store_raw, 0);
+static IIO_DEVICE_ATTR(reset, S_IWUSR,
+		NULL, ad2s1210_store_softreset, 0);
+static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
+		ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
+static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
+		ad2s1210_show_fexcit,	ad2s1210_store_fexcit, 0);
+static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR,
+		ad2s1210_show_control, ad2s1210_store_control, 0);
+static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR,
+		ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
+static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR,
+		ad2s1210_show_fault, ad2s1210_clear_fault, 0);
+static IIO_DEVICE_ATTR(pos, S_IRUGO,
+		ad2s1210_show_pos, NULL, 0);
+static IIO_DEVICE_ATTR(vel, S_IRUGO,
+		ad2s1210_show_vel, NULL, 0);
+static IIO_DEVICE_ATTR(pos_vel, S_IRUGO,
+		ad2s1210_show_pos_vel, NULL, 0);
+static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR,
+		ad2s1210_show_reg, ad2s1210_store_reg, REG_LOS_THRD);
+static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR,
+		ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_OVR_THRD);
+static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR,
+		ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_MIS_THRD);
+static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR,
+		ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MAX_THRD);
+static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR,
+		ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MIN_THRD);
+static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR,
+		ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_HIGH_THRD);
+static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
+		ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_LOW_THRD);
+
+static struct attribute *ad2s1210_attributes[] = {
+	&iio_const_attr_description.dev_attr.attr,
+	&iio_dev_attr_raw_io.dev_attr.attr,
+	&iio_dev_attr_reset.dev_attr.attr,
+	&iio_dev_attr_fclkin.dev_attr.attr,
+	&iio_dev_attr_fexcit.dev_attr.attr,
+	&iio_dev_attr_control.dev_attr.attr,
+	&iio_dev_attr_bits.dev_attr.attr,
+	&iio_dev_attr_fault.dev_attr.attr,
+	&iio_dev_attr_pos.dev_attr.attr,
+	&iio_dev_attr_vel.dev_attr.attr,
+	&iio_dev_attr_pos_vel.dev_attr.attr,
+	&iio_dev_attr_los_thrd.dev_attr.attr,
+	&iio_dev_attr_dos_ovr_thrd.dev_attr.attr,
+	&iio_dev_attr_dos_mis_thrd.dev_attr.attr,
+	&iio_dev_attr_dos_rst_max_thrd.dev_attr.attr,
+	&iio_dev_attr_dos_rst_min_thrd.dev_attr.attr,
+	&iio_dev_attr_lot_high_thrd.dev_attr.attr,
+	&iio_dev_attr_lot_low_thrd.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad2s1210_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad2s1210_attributes,
+};
+
+static int __devinit ad2s1210_initial(struct ad2s1210_state *st)
+{
+	unsigned char data;
+	int ret;
+
+	mutex_lock(&st->lock);
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+	st->resolution = read_resolution_pin(st);
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+	set_resolution_pin(st);
+#endif
+
+	config_write(st, REG_CONTROL);
+	data = DEF_CONTROL & ~(SET_RESOLUTION);
+	data |= (st->resolution - RES_10) >> 1;
+	config_write(st, data);
+	ret = config_read(st, REG_CONTROL, &data);
+	if (ret)
+		goto error_ret;
+
+	if (data & MSB_IS_HIGH) {
+		ret = -EIO;
+		goto error_ret;
+	}
+
+	update_frequency_control_word(st);
+	soft_reset(st);
+error_ret:
+	mutex_unlock(&st->lock);
+	return ret;
+}
+
+static int __devinit ad2s1210_probe(struct spi_device *spi)
+{
+	struct ad2s1210_state *st;
+	int pn, ret = 0;
+	unsigned short *pins = spi->dev.platform_data;
+
+	for (pn = 0; pn < AD2S1210_PN; pn++) {
+		if (gpio_request(pins[pn], DRV_NAME)) {
+			pr_err("%s: request gpio pin %d failed\n",
+						DRV_NAME, pins[pn]);
+			goto error_ret;
+		}
+		if (pn < AD2S1210_SAA)
+			gpio_direction_output(pins[pn], 1);
+		else {
+#if defined(CONFIG_AD2S1210_GPIO_INPUT)
+			gpio_direction_input(pins[pn]);
+#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
+			gpio_direction_output(pins[pn], 1);
+#endif
+		}
+	}
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+	st->xfer.tx_buf = st->tx;
+	st->xfer.rx_buf = st->rx;
+	st->hysteresis = 1;
+	st->mode = MOD_CONFIG;
+	st->resolution = RES_12;
+	st->fclkin = AD2S1210_DEF_CLKIN;
+	st->fexcit = AD2S1210_DEF_EXCIT;
+	st->sample = pins[0];
+	st->a0 = pins[1];
+	st->a1 = pins[2];
+	st->res0 = pins[3];
+	st->res1 = pins[4];
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad2s1210_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+
+	if (spi->max_speed_hz != AD2S1210_DEF_CLKIN)
+		st->fclkin = spi->max_speed_hz;
+	spi->mode = SPI_MODE_3;
+	spi_setup(spi);
+
+	ad2s1210_initial(st);
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	for (--pn; pn >= 0; pn--)
+		gpio_free(pins[pn]);
+	return ret;
+}
+
+static int __devexit ad2s1210_remove(struct spi_device *spi)
+{
+	struct ad2s1210_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad2s1210_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad2s1210_probe,
+	.remove = __devexit_p(ad2s1210_remove),
+};
+
+static __init int ad2s1210_spi_init(void)
+{
+	return spi_register_driver(&ad2s1210_driver);
+}
+module_init(ad2s1210_spi_init);
+
+static __exit void ad2s1210_spi_exit(void)
+{
+	spi_unregister_driver(&ad2s1210_driver);
+}
+module_exit(ad2s1210_spi_exit);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
new file mode 100644
index 0000000..4143535
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -0,0 +1,159 @@
+/*
+ * ad2s90.c simple support for the ADI Resolver to Digital Converters: AD2S90
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#define DRV_NAME "ad2s90"
+
+struct ad2s90_state {
+	struct mutex lock;
+	struct iio_dev *idev;
+	struct spi_device *sdev;
+	u8 rx[2];
+	u8 tx[2];
+};
+
+static ssize_t ad2s90_show_angular(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct spi_message msg;
+	struct spi_transfer xfer;
+	int ret;
+	ssize_t len = 0;
+	u16 val;
+	struct iio_dev *idev = dev_get_drvdata(dev);
+	struct ad2s90_state *st = idev->dev_data;
+
+	xfer.len = 1;
+	xfer.tx_buf = st->tx;
+	xfer.rx_buf = st->rx;
+	mutex_lock(&st->lock);
+
+	spi_message_init(&msg);
+	spi_message_add_tail(&xfer, &msg);
+	ret = spi_sync(st->sdev, &msg);
+	if (ret)
+		goto error_ret;
+	val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+	len = sprintf(buf, "%d\n", val);
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret ? ret : len;
+}
+
+#define IIO_DEV_ATTR_SIMPLE_RESOLVER(_show) \
+	IIO_DEVICE_ATTR(angular, S_IRUGO, _show, NULL, 0)
+
+static IIO_CONST_ATTR(description,
+	"Low Cost, Complete 12-Bit Resolver-to-Digital Converter");
+static IIO_DEV_ATTR_SIMPLE_RESOLVER(ad2s90_show_angular);
+
+static struct attribute *ad2s90_attributes[] = {
+	&iio_const_attr_description.dev_attr.attr,
+	&iio_dev_attr_angular.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad2s90_attribute_group = {
+	.name = DRV_NAME,
+	.attrs = ad2s90_attributes,
+};
+
+static int __devinit ad2s90_probe(struct spi_device *spi)
+{
+	struct ad2s90_state *st;
+	int ret = 0;
+
+	st = kzalloc(sizeof(*st), GFP_KERNEL);
+	if (st == NULL) {
+		ret = -ENOMEM;
+		goto error_ret;
+	}
+	spi_set_drvdata(spi, st);
+
+	mutex_init(&st->lock);
+	st->sdev = spi;
+
+	st->idev = iio_allocate_device();
+	if (st->idev == NULL) {
+		ret = -ENOMEM;
+		goto error_free_st;
+	}
+	st->idev->dev.parent = &spi->dev;
+	st->idev->num_interrupt_lines = 0;
+	st->idev->event_attrs = NULL;
+
+	st->idev->attrs = &ad2s90_attribute_group;
+	st->idev->dev_data = (void *)(st);
+	st->idev->driver_module = THIS_MODULE;
+	st->idev->modes = INDIO_DIRECT_MODE;
+
+	ret = iio_device_register(st->idev);
+	if (ret)
+		goto error_free_dev;
+
+	/* need 600ns between CS and the first falling edge of SCLK */
+	spi->max_speed_hz = 830000;
+	spi->mode = SPI_MODE_3;
+	spi_setup(spi);
+
+	return 0;
+
+error_free_dev:
+	iio_free_device(st->idev);
+error_free_st:
+	kfree(st);
+error_ret:
+	return ret;
+}
+
+static int __devexit ad2s90_remove(struct spi_device *spi)
+{
+	struct ad2s90_state *st = spi_get_drvdata(spi);
+
+	iio_device_unregister(st->idev);
+	kfree(st);
+
+	return 0;
+}
+
+static struct spi_driver ad2s90_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = ad2s90_probe,
+	.remove = __devexit_p(ad2s90_remove),
+};
+
+static __init int ad2s90_spi_init(void)
+{
+	return spi_register_driver(&ad2s90_driver);
+}
+module_init(ad2s90_spi_init);
+
+static __exit void ad2s90_spi_exit(void)
+{
+	spi_unregister_driver(&ad2s90_driver);
+}
+module_exit(ad2s90_spi_exit);
+
+MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index ee91a95..24b74dd 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -108,6 +108,12 @@
 	IIO_DEVICE_ATTR(name, S_IRUGO, _show, NULL, 0)
 
 /**
+ * IIO_DEV_ATTR_RESET: resets the device
+ **/
+#define IIO_DEV_ATTR_RESET(_store)			\
+	IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, _store, 0)
+
+/**
  * IIO_CONST_ATTR_NAME - constant identifier
  * @_string: the name
  **/
diff --git a/drivers/staging/intel_sst/Kconfig b/drivers/staging/intel_sst/Kconfig
index b46bd9d..8239107 100644
--- a/drivers/staging/intel_sst/Kconfig
+++ b/drivers/staging/intel_sst/Kconfig
@@ -8,6 +8,7 @@
 
 config SND_INTELMID
 	tristate "Intel MID sound card driver"
+	depends on SOUND && SND
 	select SND_PCM
 	select SND_SEQUENCER
 	select SND_JACK
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
index 24d3928..ce4a9f7 100644
--- a/drivers/staging/intel_sst/intel_sst.c
+++ b/drivers/staging/intel_sst/intel_sst.c
@@ -29,11 +29,14 @@
  *  This file contains all init functions
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/fs.h>
 #include <linux/interrupt.h>
 #include <linux/firmware.h>
 #include <linux/miscdevice.h>
+#include <linux/pm_runtime.h>
 #include <asm/mrst.h>
 #include "intel_sst.h"
 #include "intel_sst_ioctl.h"
@@ -169,17 +172,17 @@
 {
 	int i, ret = 0;
 
-	pr_debug("sst: Probe for DID %x\n", pci->device);
+	pr_debug("Probe for DID %x\n", pci->device);
 	mutex_lock(&drv_ctx_lock);
 	if (sst_drv_ctx) {
-		pr_err("sst: Only one sst handle is supported\n");
+		pr_err("Only one sst handle is supported\n");
 		mutex_unlock(&drv_ctx_lock);
 		return -EBUSY;
 	}
 
 	sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
 	if (!sst_drv_ctx) {
-		pr_err("sst: intel_sst malloc fail\n");
+		pr_err("malloc fail\n");
 		mutex_unlock(&drv_ctx_lock);
 		return -ENOMEM;
 	}
@@ -226,7 +229,7 @@
 	spin_lock_init(&sst_drv_ctx->list_spin_lock);
 
 	sst_drv_ctx->max_streams = pci_id->driver_data;
-	pr_debug("sst: Got drv data max stream %d\n",
+	pr_debug("Got drv data max stream %d\n",
 				sst_drv_ctx->max_streams);
 	for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
 		struct stream_info *stream = &sst_drv_ctx->streams[i];
@@ -241,18 +244,18 @@
 			sst_drv_ctx->mmap_mem =
 				kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
 			if (sst_drv_ctx->mmap_mem) {
-				pr_debug("sst: Got memory %p size 0x%x\n",
+				pr_debug("Got memory %p size 0x%x\n",
 					sst_drv_ctx->mmap_mem,
 					sst_drv_ctx->mmap_len);
 				break;
 			}
 			if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
-				pr_err("sst: mem alloc fail...abort!!\n");
+				pr_err("mem alloc fail...abort!!\n");
 				ret = -ENOMEM;
 				goto free_process_reply_wq;
 			}
 			sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
-			pr_debug("sst:mem alloc failed...trying %d\n",
+			pr_debug("mem alloc failed...trying %d\n",
 						sst_drv_ctx->mmap_len);
 		}
 	}
@@ -260,7 +263,7 @@
 	/* Init the device */
 	ret = pci_enable_device(pci);
 	if (ret) {
-		pr_err("sst: device cant be enabled\n");
+		pr_err("device cant be enabled\n");
 		goto do_free_mem;
 	}
 	sst_drv_ctx->pci = pci_dev_get(pci);
@@ -273,25 +276,25 @@
 	sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
 	if (!sst_drv_ctx->shim)
 		goto do_release_regions;
-	pr_debug("sst: SST Shim Ptr %p\n", sst_drv_ctx->shim);
+	pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
 
 	/* Shared SRAM */
 	sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
 	if (!sst_drv_ctx->mailbox)
 		goto do_unmap_shim;
-	pr_debug("sst: SRAM Ptr %p\n", sst_drv_ctx->mailbox);
+	pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
 
 	/* IRAM */
 	sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
 	if (!sst_drv_ctx->iram)
 		goto do_unmap_sram;
-	pr_debug("sst:IRAM Ptr %p\n", sst_drv_ctx->iram);
+	pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
 
 	/* DRAM */
 	sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
 	if (!sst_drv_ctx->dram)
 		goto do_unmap_iram;
-	pr_debug("sst: DRAM Ptr %p\n", sst_drv_ctx->dram);
+	pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
 
 	mutex_lock(&sst_drv_ctx->sst_lock);
 	sst_drv_ctx->sst_state = SST_UN_INIT;
@@ -301,26 +304,31 @@
 		IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
 	if (ret)
 		goto do_unmap_dram;
-	pr_debug("sst: Registered IRQ 0x%x\n", pci->irq);
+	pr_debug("Registered IRQ 0x%x\n", pci->irq);
+
+	/*Register LPE Control as misc driver*/
+	ret = misc_register(&lpe_ctrl);
+	if (ret) {
+		pr_err("couldn't register control device\n");
+		goto do_free_irq;
+	}
 
 	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
 		ret = misc_register(&lpe_dev);
 		if (ret) {
-			pr_err("sst: couldn't register LPE device\n");
-			goto do_free_irq;
-		}
-
-		/*Register LPE Control as misc driver*/
-		ret = misc_register(&lpe_ctrl);
-		if (ret) {
-			pr_err("sst: couldn't register misc driver\n");
-			goto do_free_irq;
-		}
+ 			pr_err("couldn't register misc driver\n");
+			goto do_free_misc;
+ 		}
 	}
 	sst_drv_ctx->lpe_stalled = 0;
-	pr_debug("sst: ...successfully done!!!\n");
+	pm_runtime_set_active(&pci->dev);
+	pm_runtime_enable(&pci->dev);
+	pm_runtime_allow(&pci->dev);
+	pr_debug("...successfully done!!!\n");
 	return ret;
 
+do_free_misc:
+	misc_deregister(&lpe_ctrl);
 do_free_irq:
 	free_irq(pci->irq, sst_drv_ctx);
 do_unmap_dram:
@@ -347,7 +355,7 @@
 	destroy_workqueue(sst_drv_ctx->mad_wq);
 do_free_drv_ctx:
 	kfree(sst_drv_ctx);
-	pr_err("sst: Probe failed with 0x%x\n", ret);
+	pr_err("Probe failed with 0x%x\n", ret);
 	return ret;
 }
 
@@ -365,10 +373,9 @@
 	mutex_lock(&sst_drv_ctx->sst_lock);
 	sst_drv_ctx->sst_state = SST_UN_INIT;
 	mutex_unlock(&sst_drv_ctx->sst_lock);
-	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
+	misc_deregister(&lpe_ctrl);
+	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
 		misc_deregister(&lpe_dev);
-		misc_deregister(&lpe_ctrl);
-	}
 	free_irq(pci->irq, sst_drv_ctx);
 	iounmap(sst_drv_ctx->dram);
 	iounmap(sst_drv_ctx->iram);
@@ -404,10 +411,12 @@
 {
 	union config_status_reg csr;
 
-	pr_debug("sst: intel_sst_suspend called\n");
+	pr_debug("intel_sst_suspend called\n");
 
-	if (sst_drv_ctx->pb_streams != 0 || sst_drv_ctx->cp_streams != 0)
-		return -EPERM;
+	if (sst_drv_ctx->stream_cnt) {
+		pr_err("active streams,not able to suspend\n");
+		return -EBUSY;
+	}
 	/*Assert RESET on LPE Processor*/
 	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
 	csr.full = csr.full | 0x2;
@@ -434,17 +443,17 @@
 {
 	int ret = 0;
 
-	pr_debug("sst: intel_sst_resume called\n");
+	pr_debug("intel_sst_resume called\n");
 	if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
-		pr_err("sst: SST is not in suspended state\n");
-		return -EPERM;
+		pr_err("SST is not in suspended state\n");
+		return 0;
 	}
 	sst_drv_ctx = pci_get_drvdata(pci);
 	pci_set_power_state(pci, PCI_D0);
 	pci_restore_state(pci);
 	ret = pci_enable_device(pci);
 	if (ret)
-		pr_err("sst: device cant be enabled\n");
+		pr_err("device cant be enabled\n");
 
 	mutex_lock(&sst_drv_ctx->sst_lock);
 	sst_drv_ctx->sst_state = SST_UN_INIT;
@@ -452,6 +461,34 @@
 	return 0;
 }
 
+static int intel_sst_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	pr_debug("runtime_suspend called\n");
+	return intel_sst_suspend(pci_dev, PMSG_SUSPEND);
+}
+
+static int intel_sst_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	pr_debug("runtime_resume called\n");
+	return intel_sst_resume(pci_dev);
+}
+
+static int intel_sst_runtime_idle(struct device *dev)
+{
+	pr_debug("runtime_idle called\n");
+	if (sst_drv_ctx->stream_cnt == 0 && sst_drv_ctx->am_cnt == 0)
+		pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
+	return -EBUSY;
+}
+
+static const struct dev_pm_ops intel_sst_pm = {
+	.runtime_suspend = intel_sst_runtime_suspend,
+	.runtime_resume = intel_sst_runtime_resume,
+	.runtime_idle = intel_sst_runtime_idle,
+};
+
 /* PCI Routines */
 static struct pci_device_id intel_sst_ids[] = {
 	{ PCI_VDEVICE(INTEL, SST_MRST_PCI_ID), 3},
@@ -468,6 +505,9 @@
 #ifdef CONFIG_PM
 	.suspend = intel_sst_suspend,
 	.resume = intel_sst_resume,
+	.driver = {
+		.pm = &intel_sst_pm,
+	},
 #endif
 };
 
@@ -482,14 +522,14 @@
 {
 	/* Init all variables, data structure etc....*/
 	int ret = 0;
-	pr_debug("sst: INFO: ******** SST DRIVER loading.. Ver: %s\n",
+	pr_debug("INFO: ******** SST DRIVER loading.. Ver: %s\n",
 				       SST_DRIVER_VERSION);
 
 	mutex_init(&drv_ctx_lock);
 	/* Register with PCI */
 	ret = pci_register_driver(&driver);
 	if (ret)
-		pr_err("sst: PCI register failed\n");
+		pr_err("PCI register failed\n");
 	return ret;
 }
 
@@ -504,7 +544,7 @@
 {
 	pci_unregister_driver(&driver);
 
-	pr_debug("sst: driver unloaded\n");
+	pr_debug("driver unloaded\n");
 	return;
 }
 
diff --git a/drivers/staging/intel_sst/intel_sst.h b/drivers/staging/intel_sst/intel_sst.h
index 1f19f0d..cb03ff7 100644
--- a/drivers/staging/intel_sst/intel_sst.h
+++ b/drivers/staging/intel_sst/intel_sst.h
@@ -29,6 +29,7 @@
  *	and middleware.
  *  This file is shared between the SST and MAD drivers
  */
+#include "intel_sst_ioctl.h"
 
 #define SST_CARD_NAMES "intel_mid_card"
 
@@ -107,10 +108,15 @@
 	int (*power_down_pmic) (void);
 };
 
+struct intel_sst_pcm_control {
+	int (*open) (struct snd_sst_params *str_param);
+	int (*device_control) (int cmd, void *arg);
+	int (*close) (unsigned int str_id);
+};
 struct intel_sst_card_ops {
 	char *module_name;
 	unsigned int  vendor_id;
-	int (*control_set) (int control_element, void *value);
+	struct intel_sst_pcm_control *pcm_control;
 	struct snd_pmic_ops *scard_ops;
 };
 
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 9914400..a367991 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -27,12 +27,15 @@
  *  Upper layer interfaces (MAD driver, MMF) to SST driver
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/fs.h>
 #include <linux/uio.h>
 #include <linux/aio.h>
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
+#include <linux/pm_runtime.h>
 #include <linux/ioctl.h>
 #ifdef CONFIG_MRST_RAR_HANDLER
 #include <linux/rar_register.h>
@@ -58,14 +61,14 @@
 {
 	int retval = 0;
 	if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) {
-		pr_warn("sst: Sound card not availble\n ");
+		pr_warn("Sound card not available\n");
 		return -EIO;
 	}
 	if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
-		pr_debug("sst: Resuming from Suspended state\n");
+		pr_debug("Resuming from Suspended state\n");
 		retval = intel_sst_resume(sst_drv_ctx->pci);
 		if (retval) {
-			pr_debug("sst: Resume Failed= %#x,abort\n", retval);
+			pr_debug("Resume Failed= %#x,abort\n", retval);
 			return retval;
 		}
 	}
@@ -97,15 +100,22 @@
  */
 int intel_sst_open(struct inode *i_node, struct file *file_ptr)
 {
-	int retval = intel_sst_check_device();
-	if (retval)
-		return retval;
+	unsigned int retval;
 
 	mutex_lock(&sst_drv_ctx->stream_lock);
+	pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
+	retval = intel_sst_check_device();
+	if (retval) {
+		pm_runtime_put(&sst_drv_ctx->pci->dev);
+		mutex_unlock(&sst_drv_ctx->stream_lock);
+		return retval;
+	}
+
 	if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) {
 		struct ioctl_pvt_data *data =
 			kzalloc(sizeof(struct ioctl_pvt_data), GFP_KERNEL);
 		if (!data) {
+			pm_runtime_put(&sst_drv_ctx->pci->dev);
 			mutex_unlock(&sst_drv_ctx->stream_lock);
 			return -ENOMEM;
 		}
@@ -115,9 +125,10 @@
 		data->pvt_id = sst_assign_pvt_id(sst_drv_ctx);
 		data->str_id = 0;
 		file_ptr->private_data = (void *)data;
-		pr_debug("sst: pvt_id handle = %d!\n", data->pvt_id);
+		pr_debug("pvt_id handle = %d!\n", data->pvt_id);
 	} else {
 		retval = -EUSERS;
+		pm_runtime_put(&sst_drv_ctx->pci->dev);
 		mutex_unlock(&sst_drv_ctx->stream_lock);
 	}
 	return retval;
@@ -136,18 +147,26 @@
  */
 int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
 {
-	int retval = intel_sst_check_device();
-	if (retval)
-		return retval;
+	unsigned int retval;
 
 	/* audio manager open */
 	mutex_lock(&sst_drv_ctx->stream_lock);
+	pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
+	retval = intel_sst_check_device();
+	if (retval) {
+		pm_runtime_put(&sst_drv_ctx->pci->dev);
+		mutex_unlock(&sst_drv_ctx->stream_lock);
+		return retval;
+	}
+
 	if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) {
 		sst_drv_ctx->am_cnt++;
-		pr_debug("sst: AM handle opened...\n");
+		pr_debug("AM handle opened...\n");
 		file_ptr->private_data = NULL;
-	} else
+	} else {
 		retval = -EACCES;
+		pm_runtime_put(&sst_drv_ctx->pci->dev);
+	}
 
 	mutex_unlock(&sst_drv_ctx->stream_lock);
 	return retval;
@@ -166,10 +185,11 @@
 {
 	struct ioctl_pvt_data *data = file_ptr->private_data;
 
-	pr_debug("sst: Release called, closing app handle\n");
+	pr_debug("Release called, closing app handle\n");
 	mutex_lock(&sst_drv_ctx->stream_lock);
 	sst_drv_ctx->encoded_cnt--;
 	sst_drv_ctx->stream_cnt--;
+	pm_runtime_put(&sst_drv_ctx->pci->dev);
 	mutex_unlock(&sst_drv_ctx->stream_lock);
 	free_stream_context(data->str_id);
 	kfree(data);
@@ -181,8 +201,9 @@
 	/* audio manager close */
 	mutex_lock(&sst_drv_ctx->stream_lock);
 	sst_drv_ctx->am_cnt--;
+	pm_runtime_put(&sst_drv_ctx->pci->dev);
 	mutex_unlock(&sst_drv_ctx->stream_lock);
-	pr_debug("sst: AM handle closed\n");
+	pr_debug("AM handle closed\n");
 	return 0;
 }
 
@@ -208,7 +229,7 @@
 		return -EINVAL;
 
 	length = vma->vm_end - vma->vm_start;
-	pr_debug("sst: called for stream %d length 0x%x\n", str_id, length);
+	pr_debug("called for stream %d length 0x%x\n", str_id, length);
 
 	if (length > sst_drv_ctx->mmap_len)
 		return -ENOMEM;
@@ -231,7 +252,7 @@
 	else
 		sst_drv_ctx->streams[str_id].mmapped = true;
 
-	pr_debug("sst: mmap ret 0x%x\n", retval);
+	pr_debug("mmap ret 0x%x\n", retval);
 	return retval;
 }
 
@@ -245,7 +266,7 @@
 	struct snd_sst_mmap_buff_entry *buf_entry;
 	struct snd_sst_mmap_buff_entry *tmp_buf;
 
-	pr_debug("sst:called for str_id %d\n", str_id);
+	pr_debug("called for str_id %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return -EINVAL;
@@ -270,7 +291,7 @@
 		goto out_free;
 	}
 
-	pr_debug("sst:new buffers count %d status %d\n",
+	pr_debug("new buffers count %d status %d\n",
 			mmap_buf->entries, stream->status);
 	buf_entry = tmp_buf;
 	for (i = 0; i < mmap_buf->entries; i++) {
@@ -300,14 +321,14 @@
 		stream->status = STREAM_RUNNING;
 		if (stream->ops == STREAM_OPS_PLAYBACK) {
 			if (sst_play_frame(str_id) < 0) {
-				pr_warn("sst: play frames fail\n");
+				pr_warn("play frames fail\n");
 				mutex_unlock(&stream->lock);
 				retval = -EIO;
 				goto out_free;
 			}
 		} else if (stream->ops == STREAM_OPS_CAPTURE) {
 			if (sst_capture_frame(str_id) < 0) {
-				pr_warn("sst: capture frame fail\n");
+				pr_warn("capture frame fail\n");
 				mutex_unlock(&stream->lock);
 				retval = -EIO;
 				goto out_free;
@@ -324,7 +345,7 @@
 
 	if (retval >= 0)
 		retval = stream->cumm_bytes;
-	pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
+	pr_debug("end of play/rec ioctl bytes = %d!!\n", retval);
 
 out_free:
 	kfree(tmp_buf);
@@ -349,7 +370,7 @@
 
 	if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) {
 		/* stream is not started yet */
-		pr_debug("sst: Stream isn't in started state %d, prev %d\n",
+		pr_debug("Stream isn't in started state %d, prev %d\n",
 			stream->status, stream->prev);
 	} else if ((stream->status == STREAM_RUNNING ||
 			stream->status == STREAM_PAUSED) &&
@@ -358,13 +379,13 @@
 		if (stream->ops == STREAM_OPS_PLAYBACK ||
 				stream->ops == STREAM_OPS_PLAYBACK_DRM) {
 			if (sst_play_frame(str_id) < 0) {
-				pr_warn("sst: play frames failed\n");
+				pr_warn("play frames failed\n");
 				mutex_unlock(&stream->lock);
 				return -EIO;
 			}
 		} else if (stream->ops == STREAM_OPS_CAPTURE) {
 			if (sst_capture_frame(str_id) < 0) {
-				pr_warn("sst: capture frames failed\n ");
+				pr_warn("capture frames failed\n");
 				mutex_unlock(&stream->lock);
 				return -EIO;
 			}
@@ -379,7 +400,7 @@
 	retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk);
 	if (retval) {
 		stream->status = STREAM_INIT;
-		pr_debug("sst: wait returned error...\n");
+		pr_debug("wait returned error...\n");
 	}
 	return retval;
 }
@@ -477,7 +498,7 @@
 		if (((unsigned long)iovec[index].iov_base
 				+ iovec[index].iov_len) <
 				((unsigned long)iovec[index].iov_base)) {
-			pr_debug("sst: Buffer overflows");
+			pr_debug("Buffer overflows\n");
 			kfree(stream_bufs);
 			return -EINVAL;
 		}
@@ -490,7 +511,7 @@
 		}
 
 		copied_size += size;
-		pr_debug("sst: copied_size - %lx\n", copied_size);
+		pr_debug("copied_size - %lx\n", copied_size);
 		if ((copied_size >= mmap_len) ||
 				(stream->sg_index == nr_segs)) {
 			add_to_list = 1;
@@ -520,7 +541,7 @@
 	int retval = 0;
 
 	/* copy sent buffers */
-	pr_debug("sst: capture stream copying to user now...\n");
+	pr_debug("capture stream copying to user now...\n");
 	list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
 		if (kbufs->in_use == true) {
 			/* copy to user */
@@ -538,7 +559,7 @@
 			}
 		}
 	}
-	pr_debug("sst: end of cap copy\n");
+	pr_debug("end of cap copy\n");
 	return retval;
 }
 
@@ -590,7 +611,7 @@
 		return -EINVAL;
 	stream = &sst_drv_ctx->streams[str_id];
 	if (stream->mmapped == true) {
-		pr_warn("sst: user write and stream is mapped");
+		pr_warn("user write and stream is mapped\n");
 		return -EIO;
 	}
 	if (!count)
@@ -598,7 +619,7 @@
 	stream->curr_bytes = 0;
 	stream->cumm_bytes = 0;
 	/* copy user buf details */
-	pr_debug("sst: new buffers %p, copy size %d, status %d\n" ,
+	pr_debug("new buffers %p, copy size %d, status %d\n" ,
 			buf, (int) count, (int) stream->status);
 
 	stream->buf_type = SST_BUF_USER_STATIC;
@@ -618,7 +639,7 @@
 	stream->cur_ptr = NULL;
 	if (retval >= 0)
 		retval = stream->cumm_bytes;
-	pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
+	pr_debug("end of play/rec bytes = %d!!\n", retval);
 	return retval;
 }
 
@@ -639,7 +660,7 @@
 	int str_id = data->str_id;
 	struct stream_info *stream = &sst_drv_ctx->streams[str_id];
 
-	pr_debug("sst: called for %d\n", str_id);
+	pr_debug("called for %d\n", str_id);
 	if (stream->status == STREAM_UN_INIT ||
 		stream->status == STREAM_DECODE) {
 		return -EBADRQC;
@@ -665,12 +686,12 @@
 	int str_id = data->str_id;
 	struct stream_info *stream;
 
-	pr_debug("sst: entry - %ld\n", nr_segs);
+	pr_debug("entry - %ld\n", nr_segs);
 
 	if (is_sync_kiocb(kiocb) == false)
 		return -EINVAL;
 
-	pr_debug("sst: called for str_id %d\n", str_id);
+	pr_debug("called for str_id %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return -EINVAL;
@@ -683,7 +704,7 @@
 	}
 	stream->curr_bytes = 0;
 	stream->cumm_bytes = 0;
-	pr_debug("sst: new segs %ld, offset %d, status %d\n" ,
+	pr_debug("new segs %ld, offset %d, status %d\n" ,
 			nr_segs, (int) offset, (int) stream->status);
 	stream->buf_type = SST_BUF_USER_STATIC;
 	do {
@@ -698,7 +719,7 @@
 	stream->cur_ptr = NULL;
 	if (retval >= 0)
 		retval = stream->cumm_bytes;
-	pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
+	pr_debug("end of play/rec bytes = %d!!\n", retval);
 	return retval;
 }
 
@@ -719,7 +740,7 @@
 	int str_id = data->str_id;
 	struct stream_info *stream = &sst_drv_ctx->streams[str_id];
 
-	pr_debug("sst: called for %d\n", str_id);
+	pr_debug("called for %d\n", str_id);
 	if (stream->status == STREAM_UN_INIT ||
 			stream->status == STREAM_DECODE)
 		return -EBADRQC;
@@ -744,14 +765,14 @@
 	int str_id = data->str_id;
 	struct stream_info *stream;
 
-	pr_debug("sst: entry - %ld\n", nr_segs);
+	pr_debug("entry - %ld\n", nr_segs);
 
 	if (is_sync_kiocb(kiocb) == false) {
-		pr_debug("sst: aio_read from user space is not allowed\n");
+		pr_debug("aio_read from user space is not allowed\n");
 		return -EINVAL;
 	}
 
-	pr_debug("sst: called for str_id %d\n", str_id);
+	pr_debug("called for str_id %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return -EINVAL;
@@ -764,7 +785,7 @@
 	stream->curr_bytes = 0;
 	stream->cumm_bytes = 0;
 
-	pr_debug("sst: new segs %ld, offset %d, status %d\n" ,
+	pr_debug("new segs %ld, offset %d, status %d\n" ,
 			nr_segs, (int) offset, (int) stream->status);
 	stream->buf_type = SST_BUF_USER_STATIC;
 	do {
@@ -779,34 +800,169 @@
 	stream->cur_ptr = NULL;
 	if (retval >= 0)
 		retval = stream->cumm_bytes;
-	pr_debug("sst: end of play/rec bytes = %d!!\n", retval);
+	pr_debug("end of play/rec bytes = %d!!\n", retval);
 	return retval;
 }
 
 /* sst_print_stream_params - prints the stream parameters (debug fn)*/
 static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm)
 {
-	pr_debug("sst: codec params:result =%d\n",
+	pr_debug("codec params:result = %d\n",
 				get_prm->codec_params.result);
-	pr_debug("sst: codec params:stream = %d\n",
+	pr_debug("codec params:stream = %d\n",
 				get_prm->codec_params.stream_id);
-	pr_debug("sst: codec params:codec = %d\n",
+	pr_debug("codec params:codec = %d\n",
 				get_prm->codec_params.codec);
-	pr_debug("sst: codec params:ops = %d\n",
+	pr_debug("codec params:ops = %d\n",
 				get_prm->codec_params.ops);
-	pr_debug("sst: codec params:stream_type= %d\n",
+	pr_debug("codec params:stream_type = %d\n",
 				get_prm->codec_params.stream_type);
-	pr_debug("sst: pcmparams:sfreq= %d\n",
+	pr_debug("pcmparams:sfreq = %d\n",
 				get_prm->pcm_params.sfreq);
-	pr_debug("sst: pcmparams:num_chan= %d\n",
+	pr_debug("pcmparams:num_chan = %d\n",
 				get_prm->pcm_params.num_chan);
-	pr_debug("sst: pcmparams:pcm_wd_sz= %d\n",
+	pr_debug("pcmparams:pcm_wd_sz = %d\n",
 				get_prm->pcm_params.pcm_wd_sz);
 	return;
 }
 
 /**
- * intel_sst_ioctl - recieves the device ioctl's
+ * sst_create_algo_ipc - create ipc msg for algorithm parameters
+ *
+ * @algo_params: Algorithm parameters
+ * @msg: post msg pointer
+ *
+ * This function is called to create ipc msg
+ */
+int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
+					struct ipc_post **msg)
+{
+	if (sst_create_large_msg(msg))
+		return -ENOMEM;
+	sst_fill_header(&(*msg)->header,
+			IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
+	(*msg)->header.part.data = sizeof(u32) +
+			sizeof(*algo_params) + algo_params->size;
+	memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
+	memcpy((*msg)->mailbox_data + sizeof(u32),
+				algo_params, sizeof(*algo_params));
+	return 0;
+}
+
+/**
+ * sst_send_algo_ipc - send ipc msg for algorithm parameters
+ *
+ * @msg: post msg pointer
+ *
+ * This function is called to send ipc msg
+ */
+int sst_send_algo_ipc(struct ipc_post **msg)
+{
+	sst_drv_ctx->ppp_params_blk.condition = false;
+	sst_drv_ctx->ppp_params_blk.ret_code = 0;
+	sst_drv_ctx->ppp_params_blk.on = true;
+	sst_drv_ctx->ppp_params_blk.data = NULL;
+	spin_lock(&sst_drv_ctx->list_spin_lock);
+	list_add_tail(&(*msg)->node, &sst_drv_ctx->ipc_dispatch_list);
+	spin_unlock(&sst_drv_ctx->list_spin_lock);
+	sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
+	return sst_wait_interruptible_timeout(sst_drv_ctx,
+			&sst_drv_ctx->ppp_params_blk, SST_BLOCK_TIMEOUT);
+}
+
+/**
+ * intel_sst_ioctl_dsp - recieves the device ioctl's
+ *
+ * @cmd:Ioctl cmd
+ * @arg:data
+ *
+ * This function is called when a user space component
+ * sends a DSP Ioctl to SST driver
+ */
+long intel_sst_ioctl_dsp(unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	struct snd_ppp_params algo_params;
+	struct snd_ppp_params *algo_params_copied;
+	struct ipc_post *msg;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(SNDRV_SST_SET_ALGO):
+		if (copy_from_user(&algo_params, (void __user *)arg,
+							sizeof(algo_params)))
+			return -EFAULT;
+		if (algo_params.size > SST_MAILBOX_SIZE)
+			return -EMSGSIZE;
+
+		pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+			algo_params.algo_id, algo_params.str_id,
+			algo_params.enable, algo_params.size);
+		retval = sst_create_algo_ipc(&algo_params, &msg);
+		if (retval)
+			break;
+		algo_params.reserved = 0;
+		if (copy_from_user(msg->mailbox_data + sizeof(algo_params),
+				algo_params.params, algo_params.size))
+			return -EFAULT;
+
+		retval = sst_send_algo_ipc(&msg);
+		if (retval) {
+			pr_debug("Error in sst_set_algo = %d\n", retval);
+			retval = -EIO;
+		}
+		break;
+
+	case _IOC_NR(SNDRV_SST_GET_ALGO):
+		if (copy_from_user(&algo_params, (void __user *)arg,
+							sizeof(algo_params)))
+			return -EFAULT;
+		pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
+			algo_params.algo_id, algo_params.str_id,
+			algo_params.enable, algo_params.size);
+		retval = sst_create_algo_ipc(&algo_params, &msg);
+		if (retval)
+			break;
+		algo_params.reserved = 1;
+		retval = sst_send_algo_ipc(&msg);
+		if (retval) {
+			pr_debug("Error in sst_get_algo = %d\n", retval);
+			retval = -EIO;
+			break;
+		}
+		algo_params_copied = (struct snd_ppp_params *)
+					sst_drv_ctx->ppp_params_blk.data;
+		if (algo_params_copied->size > algo_params.size) {
+			pr_debug("mem insufficient to copy\n");
+			retval = -EMSGSIZE;
+			goto free_mem;
+		} else {
+			char __user *tmp;
+
+			if (copy_to_user(algo_params.params,
+					algo_params_copied->params,
+					algo_params_copied->size)) {
+				retval = -EFAULT;
+				goto free_mem;
+			}
+			tmp = (char __user *)arg + offsetof(
+					struct snd_ppp_params, size);
+			if (copy_to_user(tmp, &algo_params_copied->size,
+						 sizeof(__u32))) {
+				retval = -EFAULT;
+				goto free_mem;
+			}
+
+		}
+free_mem:
+		kfree(algo_params_copied->params);
+		kfree(algo_params_copied);
+		break;
+	}
+	return retval;
+}
+
+/**
+ * intel_sst_ioctl - receives the device ioctl's
  * @file_ptr:pointer to file
  * @cmd:Ioctl cmd
  * @arg:data
@@ -832,7 +988,7 @@
 
 	switch (_IOC_NR(cmd)) {
 	case _IOC_NR(SNDRV_SST_STREAM_PAUSE):
-		pr_debug("sst: IOCTL_PAUSE recieved for %d!\n", str_id);
+		pr_debug("IOCTL_PAUSE received for %d!\n", str_id);
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
@@ -841,7 +997,7 @@
 		break;
 
 	case _IOC_NR(SNDRV_SST_STREAM_RESUME):
-		pr_debug("sst: SNDRV_SST_IOCTL_RESUME recieved!\n");
+		pr_debug("SNDRV_SST_IOCTL_RESUME received!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
@@ -852,7 +1008,7 @@
 	case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
 		struct snd_sst_params str_param;
 
-		pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
+		pr_debug("IOCTL_SET_PARAMS received!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
@@ -884,7 +1040,7 @@
 					retval = -EINVAL;
 			}
 		} else {
-			pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
+			pr_debug("SET_STREAM_PARAMS received!\n");
 			/* allocated set params only */
 			retval = sst_set_stream_param(str_id, &str_param);
 			/* Block the call for reply */
@@ -907,14 +1063,14 @@
 
 		if (copy_from_user(&set_vol, (void __user *)arg,
 				sizeof(set_vol))) {
-			pr_debug("sst: copy failed\n");
+			pr_debug("copy failed\n");
 			retval = -EFAULT;
 			break;
 		}
-		pr_debug("sst: SET_VOLUME recieved for %d!\n",
+		pr_debug("SET_VOLUME recieved for %d!\n",
 				set_vol.stream_id);
 		if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
-			pr_debug("sst: invalid operation!\n");
+			pr_debug("invalid operation!\n");
 			retval = -EPERM;
 			break;
 		}
@@ -929,10 +1085,10 @@
 			retval = -EFAULT;
 			break;
 		}
-		pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
+		pr_debug("IOCTL_GET_VOLUME recieved for stream = %d!\n",
 				get_vol.stream_id);
 		if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
-			pr_debug("sst: invalid operation!\n");
+			pr_debug("invalid operation!\n");
 			retval = -EPERM;
 			break;
 		}
@@ -941,7 +1097,7 @@
 			retval = -EIO;
 			break;
 		}
-		pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
+		pr_debug("id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
 				get_vol.stream_id, get_vol.volume,
 				get_vol.ramp_duration, get_vol.ramp_type);
 		if (copy_to_user((struct snd_sst_vol __user *)arg,
@@ -961,7 +1117,7 @@
 			retval = -EFAULT;
 			break;
 		}
-		pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
+		pr_debug("SNDRV_SST_SET_VOLUME recieved for %d!\n",
 			set_mute.stream_id);
 		if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
 			retval = -EPERM;
@@ -973,7 +1129,7 @@
 	case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
 		struct snd_sst_get_stream_params get_params;
 
-		pr_debug("sst: IOCTL_GET_PARAMS recieved!\n");
+		pr_debug("IOCTL_GET_PARAMS received!\n");
 		if (minor != 0) {
 			retval = -EBADRQC;
 			break;
@@ -997,7 +1153,7 @@
 	case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
 		struct snd_sst_mmap_buffs mmap_buf;
 
-		pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
+		pr_debug("SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
@@ -1011,7 +1167,7 @@
 		break;
 	}
 	case _IOC_NR(SNDRV_SST_STREAM_DROP):
-		pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
+		pr_debug("SNDRV_SST_IOCTL_DROP received!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EINVAL;
 			break;
@@ -1023,7 +1179,7 @@
 		struct snd_sst_tstamp tstamp = {0};
 		unsigned long long time, freq, mod;
 
-		pr_debug("sst: SNDRV_SST_STREAM_GET_TSTAMP recieved!\n");
+		pr_debug("SNDRV_SST_STREAM_GET_TSTAMP received!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
@@ -1044,7 +1200,7 @@
 	case _IOC_NR(SNDRV_SST_STREAM_START):{
 		struct stream_info *stream;
 
-		pr_debug("sst: SNDRV_SST_STREAM_START recieved!\n");
+		pr_debug("SNDRV_SST_STREAM_START received!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EINVAL;
 			break;
@@ -1083,7 +1239,7 @@
 	case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
 		struct snd_sst_target_device target_device;
 
-		pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
+		pr_debug("SET_TARGET_DEVICE recieved!\n");
 		if (copy_from_user(&target_device, (void __user *)arg,
 				sizeof(target_device))) {
 			retval = -EFAULT;
@@ -1100,7 +1256,7 @@
 	case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
 		struct snd_sst_driver_info info;
 
-		pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
+		pr_debug("SNDRV_SST_DRIVER_INFO recived\n");
 		info.version = SST_VERSION_NUM;
 		/* hard coding, shud get sumhow later */
 		info.active_pcm_streams = sst_drv_ctx->stream_cnt -
@@ -1122,7 +1278,7 @@
 		struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
 		char __user *dest;
 
-		pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
+		pr_debug("SNDRV_SST_STREAM_DECODE received\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
@@ -1197,7 +1353,7 @@
 	}
 
 	case _IOC_NR(SNDRV_SST_STREAM_DRAIN):
-		pr_debug("sst: SNDRV_SST_STREAM_DRAIN recived\n");
+		pr_debug("SNDRV_SST_STREAM_DRAIN received\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EINVAL;
 			break;
@@ -1209,7 +1365,7 @@
 		unsigned long long __user *bytes = (unsigned long long __user *)arg;
 		struct snd_sst_tstamp tstamp = {0};
 
-		pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
+		pr_debug("STREAM_BYTES_DECODED received!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EINVAL;
 			break;
@@ -1225,7 +1381,7 @@
 	case _IOC_NR(SNDRV_SST_FW_INFO): {
 		struct snd_sst_fw_info *fw_info;
 
-		pr_debug("sst: SNDRV_SST_FW_INFO recived\n");
+		pr_debug("SNDRV_SST_FW_INFO received\n");
 
 		fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC);
 		if (!fw_info) {
@@ -1248,10 +1404,18 @@
 		kfree(fw_info);
 		break;
 	}
+	case _IOC_NR(SNDRV_SST_GET_ALGO):
+	case _IOC_NR(SNDRV_SST_SET_ALGO):
+		if (minor != AM_MODULE) {
+			retval = -EBADRQC;
+			break;
+		}
+		retval = intel_sst_ioctl_dsp(cmd, arg);
+		break;
 	default:
 		retval = -EINVAL;
 	}
-	pr_debug("sst: intel_sst_ioctl:complete ret code = %d\n", retval);
+	pr_debug("intel_sst_ioctl:complete ret code = %d\n", retval);
 	return retval;
 }
 
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index bf0ead7..0a60e86 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -28,15 +28,15 @@
  *  Common private declarations for SST
  */
 
-#define SST_DRIVER_VERSION "1.2.05"
-#define SST_VERSION_NUM 0x1205
+#define SST_DRIVER_VERSION "1.2.09"
+#define SST_VERSION_NUM 0x1209
 
 /* driver names */
 #define SST_DRV_NAME "intel_sst_driver"
-#define SST_FW_FILENAME_MRST "fw_sst_080a.bin"
-#define SST_FW_FILENAME_MFLD "fw_sst_082f.bin"
 #define SST_MRST_PCI_ID 0x080A
 #define SST_MFLD_PCI_ID 0x082F
+#define PCI_ID_LENGTH 4
+#define SST_SUSPEND_DELAY 2000
 
 enum sst_states {
 	SST_FW_LOADED = 1,
@@ -392,7 +392,7 @@
 
 	struct stream_info	streams[MAX_NUM_STREAMS];
 	struct stream_alloc_block alloc_block[MAX_ACTIVE_STREAM];
-	struct sst_block	tgt_dev_blk, fw_info_blk,
+	struct sst_block	tgt_dev_blk, fw_info_blk, ppp_params_blk,
 				vol_info_blk, mute_info_blk, hs_info_blk;
 	struct mutex		list_lock;/* mutex for IPC list locking */
 	spinlock_t	list_spin_lock; /* mutex for IPC list locking */
diff --git a/drivers/staging/intel_sst/intel_sst_drv_interface.c b/drivers/staging/intel_sst/intel_sst_drv_interface.c
index 669e298..ea8e251 100644
--- a/drivers/staging/intel_sst/intel_sst_drv_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_drv_interface.c
@@ -26,10 +26,13 @@
  *  Upper layer interfaces (MAD driver, MMF) to SST driver
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include <linux/fs.h>
 #include <linux/firmware.h>
+#include <linux/pm_runtime.h>
 #include "intel_sst.h"
 #include "intel_sst_ioctl.h"
 #include "intel_sst_fw_ipc.h"
@@ -45,17 +48,18 @@
 {
 	int retval;
 	const struct firmware *fw_sst;
-	const char *name;
+	char name[20];
+
 	if (sst_drv_ctx->sst_state != SST_UN_INIT)
 		return -EPERM;
-	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
-		name = SST_FW_FILENAME_MRST;
-	else
-		name = SST_FW_FILENAME_MFLD;
-	pr_debug("sst: Downloading %s FW now...\n", name);
+
+	snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
+					sst_drv_ctx->pci_id, ".bin");
+
+	pr_debug("Downloading %s FW now...\n", name);
 	retval = request_firmware(&fw_sst, name, &sst_drv_ctx->pci->dev);
 	if (retval) {
-		pr_err("sst: request fw failed %d\n", retval);
+		pr_err("request fw failed %d\n", retval);
 		return retval;
 	}
 	sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
@@ -66,7 +70,7 @@
 
 	retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
 	if (retval)
-		pr_err("sst: fw download failed %d\n" , retval);
+		pr_err("fw download failed %d\n" , retval);
 end_restore:
 	release_firmware(fw_sst);
 	sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
@@ -90,7 +94,7 @@
 
 		retry--;
 	}
-	pr_debug("sst: in Stalled State\n");
+	pr_debug("in Stalled State\n");
 	return retval;
 }
 
@@ -138,23 +142,23 @@
 	retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
 				str_param->codec, str_param->device_type);
 	if (retval < 0) {
-		pr_err("sst: sst_alloc_stream failed %d\n", retval);
+		pr_err("sst_alloc_stream failed %d\n", retval);
 		return retval;
 	}
-	pr_debug("sst: Stream allocated %d\n", retval);
+	pr_debug("Stream allocated %d\n", retval);
 	str_id = retval;
 	str_info = &sst_drv_ctx->streams[str_id];
 	/* Block the call for reply */
 	retval = sst_wait_interruptible_timeout(sst_drv_ctx,
 			&str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
 	if ((retval != 0) || (str_info->ctrl_blk.ret_code != 0)) {
-		pr_debug("sst: FW alloc failed retval %d, ret_code %d\n",
+		pr_debug("FW alloc failed retval %d, ret_code %d\n",
 				retval, str_info->ctrl_blk.ret_code);
 		str_id = -str_info->ctrl_blk.ret_code; /*return error*/
 		*lib_dnld = str_info->ctrl_blk.data;
 		sst_clean_stream(str_info);
 	} else
-		pr_debug("sst: FW Stream allocated sucess\n");
+		pr_debug("FW Stream allocated success\n");
 	return str_id; /*will ret either error (in above if) or correct str id*/
 }
 
@@ -171,9 +175,9 @@
 	case SST_CODEC_TYPE_MP3:
 		return str_param->sparams.uc.mp3_params.sfreq;
 	case SST_CODEC_TYPE_AAC:
-		return str_param->sparams.uc.aac_params.sfreq;;
+		return str_param->sparams.uc.aac_params.sfreq;
 	case SST_CODEC_TYPE_WMA9:
-		return str_param->sparams.uc.wma_params.sfreq;;
+		return str_param->sparams.uc.wma_params.sfreq;
 	default:
 		return 0;
 	}
@@ -196,14 +200,14 @@
 		/* codec download is required */
 		struct snd_sst_alloc_response *response;
 
-		pr_debug("sst: Codec is required.... trying that\n");
+		pr_debug("Codec is required.... trying that\n");
 		if (lib_dnld == NULL) {
-			pr_err("sst: lib download null!!! abort\n");
+			pr_err("lib download null!!! abort\n");
 			return -EIO;
 		}
 		i = sst_get_block_stream(sst_drv_ctx);
 		response = sst_drv_ctx->alloc_block[i].ops_block.data;
-		pr_debug("sst: alloc block allocated = %d\n", i);
+		pr_debug("alloc block allocated = %d\n", i);
 		if (i < 0) {
 			kfree(lib_dnld);
 			return -ENOMEM;
@@ -213,15 +217,15 @@
 
 		sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
 		if (!retval) {
-			pr_debug("sst: codec was downloaded sucesfully\n");
+			pr_debug("codec was downloaded successfully\n");
 
 			retval = sst_get_stream_allocated(str_param, &lib_dnld);
 			if (retval <= 0)
 				goto err;
 
-			pr_debug("sst: Alloc done stream id %d\n", retval);
+			pr_debug("Alloc done stream id %d\n", retval);
 		} else {
-			pr_debug("sst: codec download failed\n");
+			pr_debug("codec download failed\n");
 			retval = -EIO;
 			goto err;
 		}
@@ -279,97 +283,138 @@
 		retval = sst_start_stream(mad_ops->stream_id);
 		break;
 	case SST_SND_STREAM_PROCESS:
-		pr_debug("sst: play/capt frames...\n");
+		pr_debug("play/capt frames...\n");
 		break;
 	default:
-		pr_err("sst:  wrong control_ops reported\n");
+		pr_err(" wrong control_ops reported\n");
 	}
 	return;
 }
-/*
- * sst_control_set - Set Control params
- *
- * @control_list: list of controls to be set
- *
- * This function is called by MID sound card driver to set
- * SST/Sound card controls. This is registered with MID driver
- */
-int sst_control_set(int control_element, void *value)
+
+void send_intial_rx_timeslot(void)
 {
-	int retval = 0, str_id = 0;
-	struct stream_info *stream;
+	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
+			sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
+			&& sst_drv_ctx->pmic_vendor != SND_NC)
+		sst_enable_rx_timeslot(sst_drv_ctx->rx_time_slot_status);
+}
+
+/*
+ * sst_open_pcm_stream - Open PCM interface
+ *
+ * @str_param: parameters of pcm stream
+ *
+ * This function is called by MID sound card driver to open
+ * a new pcm interface
+ */
+int sst_open_pcm_stream(struct snd_sst_params *str_param)
+{
+	struct stream_info *str_info;
+	int retval;
+
+	pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
 
 	if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
-		/*LPE is suspended, resume it before proceding*/
-		pr_debug("sst: Resuming from Suspended state\n");
+		/* LPE is suspended, resume it before proceding*/
+		pr_debug("Resuming from Suspended state\n");
 		retval = intel_sst_resume(sst_drv_ctx->pci);
 		if (retval) {
-			pr_err("sst: Resume Failed = %#x, abort\n", retval);
+			pr_err("Resume Failed = %#x, abort\n", retval);
+			pm_runtime_put(&sst_drv_ctx->pci->dev);
 			return retval;
 		}
 	}
 	if (sst_drv_ctx->sst_state == SST_UN_INIT) {
 		/* FW is not downloaded */
-		pr_debug("sst: DSP Downloading FW now...\n");
+		pr_debug("DSP Downloading FW now...\n");
 		retval = sst_download_fw();
 		if (retval) {
-			pr_err("sst: FW download fail %x, abort\n", retval);
+			pr_err("FW download fail %x, abort\n", retval);
+			pm_runtime_put(&sst_drv_ctx->pci->dev);
 			return retval;
 		}
-		if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
-			sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
-				&& sst_drv_ctx->pmic_vendor != SND_NC)
-			sst_enable_rx_timeslot(
-					sst_drv_ctx->rx_time_slot_status);
+		send_intial_rx_timeslot();
 	}
 
-	switch (control_element) {
-	case SST_SND_ALLOC: {
-		struct snd_sst_params *str_param;
-		struct stream_info *str_info;
+	if (!str_param) {
+		pm_runtime_put(&sst_drv_ctx->pci->dev);
+		return -EINVAL;
+	}
 
-		str_param = (struct snd_sst_params *)value;
-		BUG_ON(!str_param);
-		retval = sst_get_stream(str_param);
-		if (retval >= 0)
-			sst_drv_ctx->stream_cnt++;
+	retval = sst_get_stream(str_param);
+	if (retval > 0) {
+		sst_drv_ctx->stream_cnt++;
 		str_info = &sst_drv_ctx->streams[retval];
 		str_info->src = MAD_DRV;
-		break;
-	}
+	} else
+		pm_runtime_put(&sst_drv_ctx->pci->dev);
 
+	return retval;
+}
+
+/*
+ * sst_close_pcm_stream - Close PCM interface
+ *
+ * @str_id: stream id to be closed
+ *
+ * This function is called by MID sound card driver to close
+ * an existing pcm interface
+ */
+int sst_close_pcm_stream(unsigned int str_id)
+{
+	struct stream_info *stream;
+
+	pr_debug("sst: stream free called\n");
+	if (sst_validate_strid(str_id))
+		return -EINVAL;
+	stream = &sst_drv_ctx->streams[str_id];
+	free_stream_context(str_id);
+	stream->pcm_substream = NULL;
+	stream->status = STREAM_UN_INIT;
+	stream->period_elapsed = NULL;
+	sst_drv_ctx->stream_cnt--;
+	pr_debug("sst: will call runtime put now\n");
+	pm_runtime_put(&sst_drv_ctx->pci->dev);
+	return 0;
+}
+
+/*
+ * sst_device_control - Set Control params
+ *
+ * @cmd: control cmd to be set
+ * @arg: command argument
+ *
+ * This function is called by MID sound card driver to set
+ * SST/Sound card controls for an opened stream.
+ * This is registered with MID driver
+ */
+int sst_device_control(int cmd, void *arg)
+{
+	int retval = 0, str_id = 0;
+
+	switch (cmd) {
 	case SST_SND_PAUSE:
 	case SST_SND_RESUME:
 	case SST_SND_DROP:
 	case SST_SND_START:
-		sst_drv_ctx->mad_ops.control_op = control_element;
-		sst_drv_ctx->mad_ops.stream_id = *(int *)value;
+		sst_drv_ctx->mad_ops.control_op = cmd;
+		sst_drv_ctx->mad_ops.stream_id = *(int *)arg;
 		queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
 		break;
 
-	case SST_SND_FREE:
-		str_id = *(int *)value;
-		stream = &sst_drv_ctx->streams[str_id];
-		free_stream_context(str_id);
-		stream->pcm_substream = NULL;
-		stream->status = STREAM_UN_INIT;
-		stream->period_elapsed = NULL;
-		sst_drv_ctx->stream_cnt--;
-		break;
-
 	case SST_SND_STREAM_INIT: {
 		struct pcm_stream_info *str_info;
 		struct stream_info *stream;
 
-		pr_debug("sst: stream init called\n");
-		str_info = (struct pcm_stream_info *)value;
+		pr_debug("stream init called\n");
+		str_info = (struct pcm_stream_info *)arg;
 		str_id = str_info->str_id;
 		retval = sst_validate_strid(str_id);
 		if (retval)
 			break;
 
 		stream = &sst_drv_ctx->streams[str_id];
-		pr_debug("sst: setting the period ptrs\n");
+		pr_debug("setting the period ptrs\n");
 		stream->pcm_substream = str_info->mad_substream;
 		stream->period_elapsed = str_info->period_elapsed;
 		stream->sfreq = str_info->sfreq;
@@ -384,7 +429,7 @@
 		struct stream_info *stream;
 
 
-		stream_info = (struct pcm_stream_info *)value;
+		stream_info = (struct pcm_stream_info *)arg;
 		str_id = stream_info->str_id;
 		retval = sst_validate_strid(str_id);
 		if (retval)
@@ -398,26 +443,26 @@
 			+(str_id * sizeof(fw_tstamp))),
 			sizeof(fw_tstamp));
 
-		pr_debug("sst: Pointer Query on strid = %d ops %d\n",
+		pr_debug("Pointer Query on strid = %d ops %d\n",
 						str_id, stream->ops);
 
 		if (stream->ops == STREAM_OPS_PLAYBACK)
 			stream_info->buffer_ptr = fw_tstamp.samples_rendered;
 		else
 			stream_info->buffer_ptr = fw_tstamp.samples_processed;
-		pr_debug("sst: Samples rendered = %llu, buffer ptr %llu\n",
+		pr_debug("Samples rendered = %llu, buffer ptr %llu\n",
 			fw_tstamp.samples_rendered, stream_info->buffer_ptr);
 		break;
 	}
 	case SST_ENABLE_RX_TIME_SLOT: {
-		int status = *(int *)value;
+		int status = *(int *)arg;
 		sst_drv_ctx->rx_time_slot_status = status ;
 		sst_enable_rx_timeslot(status);
 		break;
 	}
 	default:
 		/* Illegal case */
-		pr_warn("sst: illegal req\n");
+		pr_warn("illegal req\n");
 		return -EINVAL;
 	}
 
@@ -425,8 +470,14 @@
 }
 
 
+struct intel_sst_pcm_control pcm_ops = {
+	.open = sst_open_pcm_stream,
+	.device_control = sst_device_control,
+	.close = sst_close_pcm_stream,
+};
+
 struct intel_sst_card_ops sst_pmic_ops = {
-	.control_set = sst_control_set,
+	.pcm_control = &pcm_ops,
 };
 
 /*
@@ -439,12 +490,12 @@
 int register_sst_card(struct intel_sst_card_ops *card)
 {
 	if (!sst_drv_ctx) {
-		pr_err("sst: No SST driver register card reject\n");
+		pr_err("No SST driver register card reject\n");
 		return -ENODEV;
 	}
 
 	if (!card || !card->module_name) {
-		pr_err("sst: Null Pointer Passed\n");
+		pr_err("Null Pointer Passed\n");
 		return -EINVAL;
 	}
 	if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
@@ -456,17 +507,17 @@
 			sst_pmic_ops.module_name = card->module_name;
 			sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
 			sst_drv_ctx->rx_time_slot_status = 0; /*default AMIC*/
-			card->control_set = sst_pmic_ops.control_set;
+			card->pcm_control = sst_pmic_ops.pcm_control;
 			sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
 			return 0;
 		} else {
-			pr_err("sst: strcmp fail %s\n", card->module_name);
+			pr_err("strcmp fail %s\n", card->module_name);
 			return -EINVAL;
 		}
 
 	} else {
 		/* already registered a driver */
-		pr_err("sst: Repeat for registeration..denied\n");
+		pr_err("Repeat for registration..denied\n");
 		return -EBADRQC;
 	}
 	return 0;
@@ -482,11 +533,11 @@
  */
 void unregister_sst_card(struct intel_sst_card_ops *card)
 {
-	if (sst_pmic_ops.control_set == card->control_set) {
+	if (sst_pmic_ops.pcm_control == card->pcm_control) {
 		/* unreg */
 		sst_pmic_ops.module_name = "";
 		sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
-		pr_debug("sst: Unregistered %s\n", card->module_name);
+		pr_debug("Unregistered %s\n", card->module_name);
 	}
 	return;
 }
diff --git a/drivers/staging/intel_sst/intel_sst_dsp.c b/drivers/staging/intel_sst/intel_sst_dsp.c
index d80a6ee..6e5c915 100644
--- a/drivers/staging/intel_sst/intel_sst_dsp.c
+++ b/drivers/staging/intel_sst/intel_sst_dsp.c
@@ -29,6 +29,9 @@
  *  This file contains all dsp controlling functions like firmware download,
  * setting/resetting dsp cores, etc
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/fs.h>
 #include <linux/firmware.h>
@@ -47,8 +50,9 @@
 {
 	union config_status_reg csr;
 
-	pr_debug("sst: Resetting the DSP in mrst\n");
-	csr.full = 0x3a2;
+	pr_debug("Resetting the DSP in mrst\n");
+	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
+	csr.full |= 0x382;
 	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
 	csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
 	csr.part.strb_cntr_rst = 0;
@@ -68,7 +72,7 @@
 {
 	union config_status_reg csr;
 
-	pr_debug("sst: Resetting the DSP in medfield\n");
+	pr_debug("Resetting the DSP in medfield\n");
 	csr.full = 0x048303E2;
 	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
 
@@ -90,7 +94,7 @@
 	csr.part.run_stall = 0;
 	csr.part.sst_reset = 0;
 	csr.part.strb_cntr_rst = 1;
-	pr_debug("sst: Setting SST to execute_mrst 0x%x\n", csr.full);
+	pr_debug("Setting SST to execute_mrst 0x%x\n", csr.full);
 	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
 
 	return 0;
@@ -111,7 +115,7 @@
 	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
 	csr.full = 0x04830061;
 	sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-	pr_debug("sst: Starting the DSP_medfld\n");
+	pr_debug("Starting the DSP_medfld\n");
 
 	return 0;
 }
@@ -130,16 +134,16 @@
 	u32 count;
 	void __iomem *ram;
 
-	pr_debug("sst: module sign %s size %x blocks %x type %x\n",
+	pr_debug("module sign %s size %x blocks %x type %x\n",
 			module->signature, module->mod_size,
 			module->blocks, module->type);
-	pr_debug("sst: module entrypoint 0x%x\n", module->entry_point);
+	pr_debug("module entrypoint 0x%x\n", module->entry_point);
 
 	block = (void *)module + sizeof(*module);
 
 	for (count = 0; count < module->blocks; count++) {
 		if (block->size <= 0) {
-			pr_err("sst: block size invalid\n");
+			pr_err("block size invalid\n");
 			return -EINVAL;
 		}
 		switch (block->type) {
@@ -150,7 +154,7 @@
 			ram = sst_drv_ctx->dram;
 			break;
 		default:
-			pr_err("sst: wrong ram type0x%x in block0x%x\n",
+			pr_err("wrong ram type0x%x in block0x%x\n",
 					block->type, count);
 			return -EINVAL;
 		}
@@ -184,10 +188,10 @@
 	if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
 			(sst_fw->size != header->file_size + sizeof(*header))) {
 		/* Invalid FW signature */
-		pr_err("sst: InvalidFW sign/filesize mismatch\n");
+		pr_err("Invalid FW sign/filesize mismatch\n");
 		return -EINVAL;
 	}
-	pr_debug("sst: header sign=%s size=%x modules=%x fmt=%x size=%x\n",
+	pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%x\n",
 			header->signature, header->file_size, header->modules,
 			header->file_format, sizeof(*header));
 	module = (void *)sst_fw->data + sizeof(*header);
@@ -214,7 +218,7 @@
 {
 	int ret_val;
 
-	pr_debug("sst: load_fw called\n");
+	pr_debug("load_fw called\n");
 	BUG_ON(!fw);
 
 	if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
@@ -239,7 +243,7 @@
 	if (ret_val)
 		return ret_val;
 
-	pr_debug("sst: fw loaded successful!!!\n");
+	pr_debug("fw loaded successful!!!\n");
 	return ret_val;
 }
 
@@ -261,7 +265,7 @@
 
 	pvt_id = sst_assign_pvt_id(sst_drv_ctx);
 	i = sst_get_block_stream(sst_drv_ctx);
-	pr_debug("sst: alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
+	pr_debug("alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
 	if (i < 0) {
 		kfree(msg);
 		return -ENOMEM;
@@ -281,11 +285,11 @@
 	if (retval) {
 		/* error */
 		sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
-		pr_err("sst: Prep codec downloaded failed %d\n",
+		pr_err("Prep codec downloaded failed %d\n",
 				retval);
 		return -EIO;
 	}
-	pr_debug("sst: FW responded, ready for download now...\n");
+	pr_debug("FW responded, ready for download now...\n");
 	/* downloading on success */
 	mutex_lock(&sst_drv_ctx->sst_lock);
 	sst_drv_ctx->sst_state = SST_FW_LOADED;
@@ -325,7 +329,7 @@
 	list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
 	spin_unlock(&sst_drv_ctx->list_spin_lock);
 	sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
-	pr_debug("sst: Waiting for FW response Download complete\n");
+	pr_debug("Waiting for FW response Download complete\n");
 	sst_drv_ctx->alloc_block[i].ops_block.condition = false;
 	retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
 	if (retval) {
@@ -337,7 +341,7 @@
 		return -EIO;
 	}
 
-	pr_debug("sst: FW sucess on Download complete\n");
+	pr_debug("FW success on Download complete\n");
 	sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
 	mutex_lock(&sst_drv_ctx->sst_lock);
 	sst_drv_ctx->sst_state = SST_FW_RUNNING;
@@ -360,14 +364,14 @@
 
 	header = (struct fw_header *)fw_lib->data;
 	if (header->modules != 1) {
-		pr_err("sst: Module no mismatch found\n ");
+		pr_err("Module no mismatch found\n");
 		err = -EINVAL;
 		goto exit;
 	}
 	module = (void *)fw_lib->data + sizeof(*header);
 	*entry_point = module->entry_point;
-	pr_debug("sst: Module entry point 0x%x\n", *entry_point);
-	pr_debug("sst: Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
+	pr_debug("Module entry point 0x%x\n", *entry_point);
+	pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
 			module->signature, module->mod_size,
 			module->blocks, module->type);
 
@@ -381,20 +385,20 @@
 			dsize += block->size;
 			break;
 		default:
-			pr_err("sst: Invalid block type for 0x%x\n", n_blk);
+			pr_err("Invalid block type for 0x%x\n", n_blk);
 			err = -EINVAL;
 			goto exit;
 		}
 		block = (void *)block + sizeof(*block) + block->size;
 	}
 	if (isize > slot->iram_size || dsize > slot->dram_size) {
-		pr_err("sst: library exceeds size allocated\n");
+		pr_err("library exceeds size allocated\n");
 		err = -EINVAL;
 		goto exit;
 	} else
-		pr_debug("sst: Library is safe for download...\n");
+		pr_debug("Library is safe for download...\n");
 
-	pr_debug("sst: iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
+	pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
 			isize, dsize, slot->iram_size, slot->dram_size);
 exit:
 	return err;
@@ -414,15 +418,15 @@
 
 	memset(buf, 0, sizeof(buf));
 
-	pr_debug("sst: Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
+	pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
 			lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
-	pr_debug("sst: Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
+	pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
 		lib->lib_info.lib_version, lib->lib_info.lib_name,
 		lib->lib_info.lib_caps, lib->lib_info.media_type);
 
-	pr_debug("sst: IRAM Size 0x%x, offset 0x%x\n",
+	pr_debug("IRAM Size 0x%x, offset 0x%x\n",
 		lib->slot_info.iram_size, lib->slot_info.iram_offset);
-	pr_debug("sst: DRAM Size 0x%x, offset 0x%x\n",
+	pr_debug("DRAM Size 0x%x, offset 0x%x\n",
 		lib->slot_info.dram_size, lib->slot_info.dram_offset);
 
 	switch (lib->lib_info.lib_type) {
@@ -442,7 +446,7 @@
 		type = "wma9_";
 		break;
 	default:
-		pr_err("sst: Invalid codec type\n");
+		pr_err("Invalid codec type\n");
 		error = -EINVAL;
 		goto wake;
 	}
@@ -458,11 +462,11 @@
 			lib->slot_info.slot_num);
 	len += snprintf(buf + len, sizeof(buf) - len, ".bin");
 
-	pr_debug("sst: Requesting %s\n", buf);
+	pr_debug("Requesting %s\n", buf);
 
 	error = request_firmware(&fw_lib, buf, &sst_drv_ctx->pci->dev);
 	if (error) {
-		pr_err("sst: library load failed %d\n", error);
+		pr_err("library load failed %d\n", error);
 		goto wake;
 	}
 	error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
@@ -476,7 +480,7 @@
 		goto wake_free;
 
 	/* lib is downloaded and init send alloc again */
-	pr_debug("sst: Library is downloaded now...\n");
+	pr_debug("Library is downloaded now...\n");
 wake_free:
 	/* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
 	release_firmware(fw_lib);
diff --git a/drivers/staging/intel_sst/intel_sst_fw_ipc.h b/drivers/staging/intel_sst/intel_sst_fw_ipc.h
index 9d3c368..8df313d 100644
--- a/drivers/staging/intel_sst/intel_sst_fw_ipc.h
+++ b/drivers/staging/intel_sst/intel_sst_fw_ipc.h
@@ -31,6 +31,7 @@
 */
 
 #define MAX_NUM_STREAMS_MRST 3
+#define MAX_NUM_STREAMS_MFLD 6
 #define MAX_NUM_STREAMS 6
 #define MAX_DBG_RW_BYTES 80
 #define MAX_NUM_SCATTER_BUFFERS 8
@@ -67,6 +68,8 @@
 #define IPC_IA_CAPT_VOICE 0x17
 #define IPC_IA_DECODE_FRAMES 0x18
 
+#define IPC_IA_ALG_PARAMS 0x1A
+
 /* I2L Stream config/control msgs */
 #define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
 #define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
@@ -141,73 +144,87 @@
 	/* Error code,response to msgId: Description */
 	/* Common error codes */
 	SST_SUCCESS = 0,	/* Success */
-	SST_ERR_INVALID_STREAM_ID, /* Invalid stream ID */
-	SST_ERR_INVALID_MSG_ID,	/* Invalid message ID */
-	SST_ERR_INVALID_STREAM_OP, /* Invalid stream operation request */
-	SST_ERR_INVALID_PARAMS,	/* Invalid params */
-	SST_ERR_INVALID_CODEC,	/* Invalid codec type */
-	SST_ERR_INVALID_MEDIA_TYPE, /* Invalid media type */
-	SST_ERR_STREAM_ERR,  /* ANY: Stream control or config or
-					processing error */
+	SST_ERR_INVALID_STREAM_ID = 1,
+	SST_ERR_INVALID_MSG_ID = 2,
+	SST_ERR_INVALID_STREAM_OP = 3,
+	SST_ERR_INVALID_PARAMS = 4,
+	SST_ERR_INVALID_CODEC = 5,
+	SST_ERR_INVALID_MEDIA_TYPE = 6,
+	SST_ERR_STREAM_ERR = 7,
 
 	/* IPC specific error codes */
-	SST_IPC_ERR_CALL_BACK_NOT_REGD, /* Call back for msg not regd */
-	SST_IPC_ERR_STREAM_NOT_ALLOCATED, /* Stream is not allocated  */
-	SST_IPC_ERR_STREAM_ALLOC_FAILED, /* ALLOC:Stream alloc failed */
-	SST_IPC_ERR_GET_STREAM_FAILED, /* ALLOC:Get stream id failed*/
-	SST_ERR_MOD_NOT_AVAIL, /* SET/GET: Mod(AEC/AGC/ALC) not available */
-	SST_ERR_MOD_DNLD_RQD, /* SET/GET: Mod(AEC/AGC/ALC) download required */
-	SST_ERR_STREAM_STOPPED,		/* ANY: Stream is in stopped state */
-	SST_ERR_STREAM_IN_USE, /* ANY: Stream is already in use */
+	SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
+	SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
+	SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
+	SST_IPC_ERR_GET_STREAM_FAILED = 11,
+	SST_ERR_MOD_NOT_AVAIL = 12,
+	SST_ERR_MOD_DNLD_RQD = 13,
+	SST_ERR_STREAM_STOPPED = 14,
+	SST_ERR_STREAM_IN_USE = 15,
 
 	/* Capture specific error codes */
-	SST_CAP_ERR_INCMPLTE_CAPTURE_MSG,/* ANY:Incomplete message */
-	SST_CAP_ERR_CAPTURE_FAIL, /* ANY:Capture op failed */
-	SST_CAP_ERR_GET_DDR_NEW_SGLIST,
-	SST_CAP_ERR_UNDER_RUN,	/* lack of input data */
-	SST_CAP_ERR_OVERFLOW,	/* lack of output space */
+	SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
+	SST_CAP_ERR_CAPTURE_FAIL = 17,
+	SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
+	SST_CAP_ERR_UNDER_RUN = 19,
+	SST_CAP_ERR_OVERFLOW = 20,
 
 	/* Playback specific error codes*/
-	SST_PB_ERR_INCMPLTE_PLAY_MSG, /* ANY: Incomplete message */
-	SST_PB_ERR_PLAY_FAIL, /* ANY: Playback operation failed */
-	SST_PB_ERR_GET_DDR_NEW_SGLIST,
+	SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
+	SST_PB_ERR_PLAY_FAIL = 22,
+	SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
 
 	/* Codec manager specific error codes */
-	SST_LIB_ERR_LIB_DNLD_REQUIRED, /* ALLOC: Codec download required */
-	SST_LIB_ERR_LIB_NOT_SUPPORTED, /* Library is not supported */
+	SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
+	SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
 
 	/* Library manager specific error codes */
-	SST_SCC_ERR_PREP_DNLD_FAILED, /* Failed to prepare for codec download */
-	SST_SCC_ERR_LIB_DNLD_RES_FAILED, /* Lib download resume failed */
+	SST_SCC_ERR_PREP_DNLD_FAILED = 26,
+	SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
 	/* Scheduler specific error codes */
-	SST_SCH_ERR_FAIL, /* REPORT: */
+	SST_SCH_ERR_FAIL = 28,
 
 	/* DMA specific error codes */
-	SST_DMA_ERR_NO_CHNL_AVAILABLE, /* DMA Ch not available */
-	SST_DMA_ERR_INVALID_INPUT_PARAMS, /* Invalid input params */
-	SST_DMA_ERR_CHNL_ALREADY_SUSPENDED, /* Ch is suspended */
-	SST_DMA_ERR_CHNL_ALREADY_STARTED, /* Ch already started */
-	SST_DMA_ERR_CHNL_NOT_ENABLED, /* Ch not enabled */
-	SST_DMA_ERR_TRANSFER_FAILED, /* Transfer failed */
-	SST_SSP_ERR_ALREADY_ENABLED, /* REPORT: SSP already enabled */
-	SST_SSP_ERR_ALREADY_DISABLED, /* REPORT: SSP already disabled */
-	SST_SSP_ERR_NOT_INITIALIZED,
+	SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
+	SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
+	SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
+	SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
+	SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
+	SST_DMA_ERR_TRANSFER_FAILED = 34,
+
+	SST_SSP_ERR_ALREADY_ENABLED = 35,
+	SST_SSP_ERR_ALREADY_DISABLED = 36,
+	SST_SSP_ERR_NOT_INITIALIZED = 37,
+	SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
 
 	/* Other error codes */
-	SST_ERR_MOD_INIT_FAIL,	/* Firmware Module init failed */
+	SST_ERR_MOD_INIT_FAIL = 39,
 
 	/* FW init error codes */
-	SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED,
-	SST_RDR_ERR_ROUTE_ALREADY_STARTED,
-	SST_RDR_PREP_CODEC_DNLD_FAILED,
+	SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
+	SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
+	SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
+	SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
 
 	/* Memory debug error codes */
-	SST_ERR_DBG_MEM_READ_FAIL,
-	SST_ERR_DBG_MEM_WRITE_FAIL,
+	SST_ERR_DBG_MEM_READ_FAIL = 44,
+	SST_ERR_DBG_MEM_WRITE_FAIL = 45,
+	SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
+	SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
 
-	/* Decode error codes */
-	SST_ERR_DEC_NEED_INPUT_BUF,
+	SST_ERR_BUFFER_NOT_AVAILABLE = 48,
+	SST_ERR_BUFFER_NOT_ALLOCATED = 49,
+	SST_ERR_INVALID_REGION_TYPE = 50,
+	SST_ERR_NULL_PTR = 51,
+	SST_ERR_INVALID_BUFFER_SIZE = 52,
+	SST_ERR_INVALID_BUFFER_INDEX = 53,
 
+	/*IIPC specific error codes */
+	SST_IIPC_QUEUE_FULL = 54,
+	SST_IIPC_ERR_MSG_SND_FAILED = 55,
+	SST_PB_ERR_UNDERRUN_OCCURED = 56,
+	SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
+	SST_INVALID_TIME_SLOTS = 58,
 };
 
 enum dbg_mem_data_type {
diff --git a/drivers/staging/intel_sst/intel_sst_ioctl.h b/drivers/staging/intel_sst/intel_sst_ioctl.h
index 03b9316..bebc395 100644
--- a/drivers/staging/intel_sst/intel_sst_ioctl.h
+++ b/drivers/staging/intel_sst/intel_sst_ioctl.h
@@ -190,21 +190,15 @@
 	__u32 reserved;	/* No pre-processing defined yet */
 };
 
-struct snd_params_block {
-	__u32 type;		/*Type of the parameter*/
-	__u32 size;		/*size of the parameters in the block*/
-	__u8 params[0];	/*Parameters of the algorithm*/
-};
-
 /* Pre and post processing params structure */
 struct snd_ppp_params {
-	enum sst_algo_types	algo_id;/* Post/Pre processing algorithm ID  */
+	__u8			algo_id;/* Post/Pre processing algorithm ID  */
 	__u8			str_id;	/*Only 5 bits used 0 - 31 are valid*/
 	__u8			enable;	/* 0= disable, 1= enable*/
 	__u8			reserved;
 	__u32			size;	/*Size of parameters for all blocks*/
-	struct snd_params_block	params[0];
-};
+	void			*params;
+} __attribute__ ((packed));
 
 struct snd_sst_postproc_info {
 	__u32 src_min;		/* Supported SRC Min sampling freq */
@@ -431,5 +425,8 @@
 #define SNDRV_SST_FW_INFO	_IOR('L', 0x20,  struct snd_sst_fw_info *)
 #define SNDRV_SST_SET_TARGET_DEVICE _IOW('L', 0x21, \
 					struct snd_sst_target_device *)
+/*DSP Ioctls on /dev/intel_sst_ctrl only*/
+#define SNDRV_SST_SET_ALGO	_IOW('L', 0x30,  struct snd_ppp_params *)
+#define SNDRV_SST_GET_ALGO	_IOWR('L', 0x31,  struct snd_ppp_params *)
 
 #endif /* __INTEL_SST_IOCTL_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ipc.c b/drivers/staging/intel_sst/intel_sst_ipc.c
index 39c67fa..0742dde 100644
--- a/drivers/staging/intel_sst/intel_sst_ipc.c
+++ b/drivers/staging/intel_sst/intel_sst_ipc.c
@@ -26,6 +26,8 @@
  *  This file defines all ipc functions
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <linux/sched.h>
@@ -75,16 +77,16 @@
 	/*To check if LPE is in stalled state.*/
 	retval = sst_stalled();
 	if (retval < 0) {
-		pr_err("sst: in stalled state\n");
+		pr_err("in stalled state\n");
 		return;
 	}
-	pr_debug("sst: post message called\n");
+	pr_debug("post message called\n");
 	spin_lock(&sst_drv_ctx->list_spin_lock);
 
 	/* check list */
 	if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
 		/* list is empty, mask imr */
-		pr_debug("sst: Empty msg queue... masking\n");
+		pr_debug("Empty msg queue... masking\n");
 		imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
 		imr.part.done_interrupt = 1;
 		/* dummy register for shim workaround */
@@ -97,7 +99,7 @@
 	header.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCX);
 	if (header.part.busy) {
 		/* busy, unmask */
-		pr_debug("sst: Busy not free... unmasking\n");
+		pr_debug("Busy not free... unmasking\n");
 		imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
 		imr.part.done_interrupt = 0;
 		/* dummy register for shim workaround */
@@ -109,8 +111,8 @@
 	msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
 			struct ipc_post, node);
 	list_del(&msg->node);
-	pr_debug("sst: Post message: header = %x\n", msg->header.full);
-	pr_debug("sst: size: = %x\n", msg->header.part.data);
+	pr_debug("Post message: header = %x\n", msg->header.full);
+	pr_debug("size: = %x\n", msg->header.part.data);
 	if (msg->header.part.large)
 		memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
 			msg->mailbox_data, msg->header.part.data);
@@ -166,13 +168,13 @@
 		(struct ipc_header_fw_init *)msg->mailbox;
 	int retval = 0;
 
-	pr_debug("sst: *** FW Init msg came***\n");
+	pr_debug("*** FW Init msg came***\n");
 	if (init->result) {
 		mutex_lock(&sst_drv_ctx->sst_lock);
 		sst_drv_ctx->sst_state = SST_ERROR;
 		mutex_unlock(&sst_drv_ctx->sst_lock);
-		pr_debug("sst: FW Init failed, Error %x\n", init->result);
-		pr_err("sst: FW Init failed, Error %x\n", init->result);
+		pr_debug("FW Init failed, Error %x\n", init->result);
+		pr_err("FW Init failed, Error %x\n", init->result);
 		retval = -init->result;
 		return retval;
 	}
@@ -180,12 +182,13 @@
 		sst_send_sound_card_type();
 	mutex_lock(&sst_drv_ctx->sst_lock);
 	sst_drv_ctx->sst_state = SST_FW_RUNNING;
+	sst_drv_ctx->lpe_stalled = 0;
 	mutex_unlock(&sst_drv_ctx->sst_lock);
-	pr_debug("sst: FW Version %x.%x\n",
+	pr_debug("FW Version %x.%x\n",
 			init->fw_version.major, init->fw_version.minor);
-	pr_debug("sst: Build No %x Type %x\n",
+	pr_debug("Build No %x Type %x\n",
 			init->fw_version.build, init->fw_version.type);
-	pr_debug("sst:  Build date %s Time %s\n",
+	pr_debug(" Build date %s Time %s\n",
 			init->build_info.date, init->build_info.time);
 	sst_wake_up_alloc_block(sst_drv_ctx, FW_DWNL_ID, retval, NULL);
 	return retval;
@@ -204,19 +207,19 @@
 			container_of(work, struct sst_ipc_msg_wq, wq);
 	int str_id = msg->header.part.str_id;
 
-	pr_debug("sst: IPC process for %x\n", msg->header.full);
+	pr_debug("IPC process for %x\n", msg->header.full);
 
 	/* based on msg in list call respective handler */
 	switch (msg->header.part.msg_id) {
 	case IPC_SST_BUF_UNDER_RUN:
 	case IPC_SST_BUF_OVER_RUN:
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst:  stream id %d invalid\n", str_id);
+			pr_err("stream id %d invalid\n", str_id);
 			break;
 		}
-		pr_err("sst: Buffer under/overrun for%d\n",
+		pr_err("Buffer under/overrun for %d\n",
 				msg->header.part.str_id);
-		pr_err("sst: Got Underrun & not to send data...ignore\n");
+		pr_err("Got Underrun & not to send data...ignore\n");
 		break;
 
 	case IPC_SST_GET_PLAY_FRAMES:
@@ -224,35 +227,35 @@
 			struct stream_info *stream ;
 
 			if (sst_validate_strid(str_id)) {
-				pr_err("sst: strid %d invalid\n", str_id);
+				pr_err("strid %d invalid\n", str_id);
 				break;
 			}
 			/* call sst_play_frame */
 			stream = &sst_drv_ctx->streams[str_id];
-			pr_debug("sst: sst_play_frames for %d\n",
+			pr_debug("sst_play_frames for %d\n",
 					msg->header.part.str_id);
 			mutex_lock(&sst_drv_ctx->streams[str_id].lock);
 			sst_play_frame(msg->header.part.str_id);
 			mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
 			break;
 		} else
-			pr_err("sst: sst_play_frames for Penwell!!\n");
+			pr_err("sst_play_frames for Penwell!!\n");
 
 	case IPC_SST_GET_CAPT_FRAMES:
 		if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
 			struct stream_info *stream;
 			/* call sst_capture_frame */
 			if (sst_validate_strid(str_id)) {
-				pr_err("sst: str id %d invalid\n", str_id);
+				pr_err("str id %d invalid\n", str_id);
 				break;
 			}
 			stream = &sst_drv_ctx->streams[str_id];
-			pr_debug("sst: sst_capture_frames for %d\n",
+			pr_debug("sst_capture_frames for %d\n",
 					msg->header.part.str_id);
 			mutex_lock(&stream->lock);
 			if (stream->mmapped == false &&
 					stream->src == SST_DRV) {
-				pr_debug("sst: waking up block for copy.\n");
+				pr_debug("waking up block for copy.\n");
 				stream->data_blk.ret_code = 0;
 				stream->data_blk.condition = true;
 				stream->data_blk.on = false;
@@ -261,11 +264,11 @@
 				sst_capture_frame(msg->header.part.str_id);
 			mutex_unlock(&stream->lock);
 		} else
-			pr_err("sst: sst_play_frames for Penwell!!\n");
+			pr_err("sst_play_frames for Penwell!!\n");
 		break;
 
 	case IPC_IA_PRINT_STRING:
-		pr_debug("sst: been asked to print something by fw\n");
+		pr_debug("been asked to print something by fw\n");
 		/* TBD */
 		break;
 
@@ -277,12 +280,12 @@
 
 	case IPC_SST_STREAM_PROCESS_FATAL_ERR:
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst: stream id %d invalid\n", str_id);
+			pr_err("stream id %d invalid\n", str_id);
 			break;
 		}
-		pr_err("sst: codec fatal error %x stream %d...\n",
+		pr_err("codec fatal error %x stream %d...\n",
 				msg->header.full, msg->header.part.str_id);
-		pr_err("sst: Dropping the stream\n");
+		pr_err("Dropping the stream\n");
 		sst_drop_stream(msg->header.part.str_id);
 		break;
 	case IPC_IA_LPE_GETTING_STALLED:
@@ -293,7 +296,7 @@
 		break;
 	default:
 		/* Illegal case */
-		pr_err("sst: Unhandled msg %x header %x\n",
+		pr_err("Unhandled msg %x header %x\n",
 		msg->header.part.msg_id, msg->header.full);
 	}
 	sst_clear_interrupt();
@@ -322,7 +325,7 @@
 		if (!msg->header.part.data) {
 			sst_drv_ctx->tgt_dev_blk.ret_code = 0;
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 			msg->header.part.msg_id, msg->header.part.data);
 			sst_drv_ctx->tgt_dev_blk.ret_code =
 					-msg->header.part.data;
@@ -333,6 +336,55 @@
 				wake_up(&sst_drv_ctx->wait_queue);
 		}
 		break;
+	case IPC_IA_ALG_PARAMS: {
+		pr_debug("sst:IPC_ALG_PARAMS response %x\n", msg->header.full);
+		pr_debug("sst: data value %x\n", msg->header.part.data);
+		pr_debug("sst: large value %x\n", msg->header.part.large);
+
+		if (!msg->header.part.large) {
+			if (!msg->header.part.data) {
+				pr_debug("sst: alg set success\n");
+				sst_drv_ctx->ppp_params_blk.ret_code = 0;
+			} else {
+				pr_debug("sst: alg set failed\n");
+				sst_drv_ctx->ppp_params_blk.ret_code =
+							-msg->header.part.data;
+			}
+
+		} else if (msg->header.part.data) {
+			struct snd_ppp_params *mailbox_params, *get_params;
+			char *params;
+
+			pr_debug("sst: alg get success\n");
+			mailbox_params = (struct snd_ppp_params *)msg->mailbox;
+			get_params = kzalloc(sizeof(*get_params), GFP_KERNEL);
+			if (get_params == NULL) {
+				pr_err("sst: out of memory for ALG PARAMS");
+				break;
+			}
+			memcpy_fromio(get_params, mailbox_params,
+							sizeof(*get_params));
+			get_params->params = kzalloc(mailbox_params->size,
+							GFP_KERNEL);
+			if (get_params->params == NULL) {
+				kfree(get_params);
+				pr_err("sst: out of memory for ALG PARAMS block");
+				break;
+			}
+			params = msg->mailbox;
+			params = params + sizeof(*mailbox_params) - sizeof(u32);
+			memcpy_fromio(get_params->params, params,
+							get_params->size);
+			sst_drv_ctx->ppp_params_blk.ret_code = 0;
+			sst_drv_ctx->ppp_params_blk.data = get_params;
+		}
+
+		if (sst_drv_ctx->ppp_params_blk.on == true) {
+			sst_drv_ctx->ppp_params_blk.condition = true;
+			wake_up(&sst_drv_ctx->wait_queue);
+		}
+		break;
+	}
 	case IPC_IA_GET_FW_INFO: {
 		struct snd_sst_fw_info *fw_info =
 			(struct snd_sst_fw_info *)msg->mailbox;
@@ -340,7 +392,7 @@
 			int major = fw_info->fw_version.major;
 			int minor = fw_info->fw_version.minor;
 			int build = fw_info->fw_version.build;
-			pr_debug("sst: Msg succedded %x\n",
+			pr_debug("Msg succeeded %x\n",
 				       msg->header.part.msg_id);
 			pr_debug("INFO: ***FW*** = %02d.%02d.%02d\n",
 					major, minor, build);
@@ -349,13 +401,13 @@
 				sizeof(struct snd_sst_fw_info));
 			sst_drv_ctx->fw_info_blk.ret_code = 0;
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 			msg->header.part.msg_id, msg->header.part.data);
 			sst_drv_ctx->fw_info_blk.ret_code =
 					-msg->header.part.data;
 		}
 		if (sst_drv_ctx->fw_info_blk.on == true) {
-			pr_debug("sst: Memcopy succedded\n");
+			pr_debug("Memcopy succeeded\n");
 			sst_drv_ctx->fw_info_blk.on = false;
 			sst_drv_ctx->fw_info_blk.condition = true;
 			wake_up(&sst_drv_ctx->wait_queue);
@@ -364,11 +416,11 @@
 	}
 	case IPC_IA_SET_STREAM_MUTE:
 		if (!msg->header.part.data) {
-			pr_debug("sst: Msg succedded %x\n",
+			pr_debug("Msg succeeded %x\n",
 				       msg->header.part.msg_id);
 			sst_drv_ctx->mute_info_blk.ret_code = 0;
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 			msg->header.part.msg_id, msg->header.part.data);
 			sst_drv_ctx->mute_info_blk.ret_code =
 					-msg->header.part.data;
@@ -382,11 +434,11 @@
 		break;
 	case IPC_IA_SET_STREAM_VOL:
 		if (!msg->header.part.data) {
-			pr_debug("sst: Msg succedded %x\n",
+			pr_debug("Msg succeeded %x\n",
 				       msg->header.part.msg_id);
 			sst_drv_ctx->vol_info_blk.ret_code = 0;
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 					msg->header.part.msg_id,
 			msg->header.part.data);
 			sst_drv_ctx->vol_info_blk.ret_code =
@@ -402,15 +454,15 @@
 		break;
 	case IPC_IA_GET_STREAM_VOL:
 		if (msg->header.part.large) {
-			pr_debug("sst: Large Msg Received Successfully\n");
-			pr_debug("sst: Msg succedded %x\n",
+			pr_debug("Large Msg Received Successfully\n");
+			pr_debug("Msg succeeded %x\n",
 				       msg->header.part.msg_id);
 			memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
 				(void *) msg->mailbox,
 				sizeof(struct snd_sst_vol));
 			sst_drv_ctx->vol_info_blk.ret_code = 0;
 		} else {
-			pr_err("sst: Msg %x reply error %x\n",
+			pr_err("Msg %x reply error %x\n",
 			msg->header.part.msg_id, msg->header.part.data);
 			sst_drv_ctx->vol_info_blk.ret_code =
 					-msg->header.part.data;
@@ -424,18 +476,18 @@
 
 	case IPC_IA_GET_STREAM_PARAMS:
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst: stream id %d invalid\n", str_id);
+			pr_err("stream id %d invalid\n", str_id);
 			break;
 		}
 		str_info = &sst_drv_ctx->streams[str_id];
 		if (msg->header.part.large) {
-			pr_debug("sst: Get stream large success\n");
+			pr_debug("Get stream large success\n");
 			memcpy_fromio(str_info->ctrl_blk.data,
 				((void *)(msg->mailbox)),
 				sizeof(struct snd_sst_fw_get_stream_params));
 			str_info->ctrl_blk.ret_code = 0;
 		} else {
-			pr_err("sst: Msg %x reply error %x\n",
+			pr_err("Msg %x reply error %x\n",
 				msg->header.part.msg_id, msg->header.part.data);
 			str_info->ctrl_blk.ret_code = -msg->header.part.data;
 		}
@@ -447,19 +499,19 @@
 		break;
 	case IPC_IA_DECODE_FRAMES:
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst: stream id %d invalid\n", str_id);
+			pr_err("stream id %d invalid\n", str_id);
 			break;
 		}
 		str_info = &sst_drv_ctx->streams[str_id];
 		if (msg->header.part.large) {
-			pr_debug("sst: Msg succedded %x\n",
+			pr_debug("Msg succeeded %x\n",
 				       msg->header.part.msg_id);
 			memcpy_fromio(str_info->data_blk.data,
 					((void *)(msg->mailbox)),
 					sizeof(struct snd_sst_decode_info));
 			str_info->data_blk.ret_code = 0;
 		} else {
-			pr_err("sst: Msg %x reply error %x\n",
+			pr_err("Msg %x reply error %x\n",
 				msg->header.part.msg_id, msg->header.part.data);
 			str_info->data_blk.ret_code = -msg->header.part.data;
 		}
@@ -471,17 +523,17 @@
 		break;
 	case IPC_IA_DRAIN_STREAM:
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst: stream id %d invalid\n", str_id);
+			pr_err("stream id %d invalid\n", str_id);
 			break;
 		}
 		str_info = &sst_drv_ctx->streams[str_id];
 		if (!msg->header.part.data) {
-			pr_debug("sst: Msg succedded %x\n",
+			pr_debug("Msg succeeded %x\n",
 					msg->header.part.msg_id);
 			str_info->ctrl_blk.ret_code = 0;
 
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 				msg->header.part.msg_id, msg->header.part.data);
 			str_info->ctrl_blk.ret_code = -msg->header.part.data;
 
@@ -496,7 +548,7 @@
 
 	case IPC_IA_DROP_STREAM:
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst: str id %d invalid\n", str_id);
+			pr_err("str id %d invalid\n", str_id);
 			break;
 		}
 		str_info = &sst_drv_ctx->streams[str_id];
@@ -504,12 +556,12 @@
 			struct snd_sst_drop_response *drop_resp =
 				(struct snd_sst_drop_response *)msg->mailbox;
 
-			pr_debug("sst: Drop ret bytes %x\n", drop_resp->bytes);
+			pr_debug("Drop ret bytes %x\n", drop_resp->bytes);
 
 			str_info->curr_bytes = drop_resp->bytes;
 			str_info->ctrl_blk.ret_code =  0;
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 				msg->header.part.msg_id, msg->header.part.data);
 			str_info->ctrl_blk.ret_code = -msg->header.part.data;
 		}
@@ -521,10 +573,10 @@
 		break;
 	case IPC_IA_ENABLE_RX_TIME_SLOT:
 		if (!msg->header.part.data) {
-			pr_debug("sst: RX_TIME_SLOT success\n");
+			pr_debug("RX_TIME_SLOT success\n");
 			sst_drv_ctx->hs_info_blk.ret_code = 0;
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 				msg->header.part.msg_id,
 				msg->header.part.data);
 			sst_drv_ctx->hs_info_blk.ret_code =
@@ -541,17 +593,17 @@
 	case IPC_IA_SET_STREAM_PARAMS:
 		str_info = &sst_drv_ctx->streams[str_id];
 		if (!msg->header.part.data) {
-			pr_debug("sst: Msg succedded %x\n",
+			pr_debug("Msg succeeded %x\n",
 					msg->header.part.msg_id);
 			str_info->ctrl_blk.ret_code = 0;
 		} else {
-			pr_err("sst:  Msg %x reply error %x\n",
+			pr_err(" Msg %x reply error %x\n",
 					msg->header.part.msg_id,
 					msg->header.part.data);
 			str_info->ctrl_blk.ret_code = -msg->header.part.data;
 		}
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst:  stream id %d invalid\n", str_id);
+			pr_err(" stream id %d invalid\n", str_id);
 			break;
 		}
 
@@ -564,9 +616,9 @@
 
 	case IPC_IA_FREE_STREAM:
 		if (!msg->header.part.data) {
-			pr_debug("sst: Stream %d freed\n", str_id);
+			pr_debug("Stream %d freed\n", str_id);
 		} else {
-			pr_err("sst: Free for %d ret error %x\n",
+			pr_err("Free for %d ret error %x\n",
 				       str_id, msg->header.part.data);
 		}
 		break;
@@ -575,7 +627,7 @@
 		struct snd_sst_alloc_response *resp =
 				(struct snd_sst_alloc_response *)msg->mailbox;
 		if (resp->str_type.result)
-			pr_err("sst: error alloc stream = %x\n",
+			pr_err("error alloc stream = %x\n",
 				       resp->str_type.result);
 		sst_alloc_stream_response(str_id, resp);
 		break;
@@ -584,21 +636,21 @@
 	case IPC_IA_PLAY_FRAMES:
 	case IPC_IA_CAPT_FRAMES:
 		if (sst_validate_strid(str_id)) {
-			pr_err("sst: stream id %d invalid\n" , str_id);
+			pr_err("stream id %d invalid\n", str_id);
 			break;
 		}
-		pr_debug("sst: Ack for play/capt frames recived\n");
+		pr_debug("Ack for play/capt frames received\n");
 		break;
 
 	case IPC_IA_PREP_LIB_DNLD: {
 		struct snd_sst_str_type *str_type =
 			(struct snd_sst_str_type *)msg->mailbox;
-		pr_debug("sst: Prep Lib download %x\n",
+		pr_debug("Prep Lib download %x\n",
 				msg->header.part.msg_id);
 		if (str_type->result)
-			pr_err("sst: Prep lib download %x\n", str_type->result);
+			pr_err("Prep lib download %x\n", str_type->result);
 		else
-			pr_debug("sst: Can download codec now...\n");
+			pr_debug("Can download codec now...\n");
 		sst_wake_up_alloc_block(sst_drv_ctx, str_id,
 				str_type->result, NULL);
 		break;
@@ -609,12 +661,12 @@
 			(struct snd_sst_lib_download_info *)msg->mailbox;
 		int retval = resp->result;
 
-		pr_debug("sst: Lib downloaded %x\n", msg->header.part.msg_id);
+		pr_debug("Lib downloaded %x\n", msg->header.part.msg_id);
 		if (resp->result) {
-			pr_err("sst: err in lib dload %x\n", resp->result);
+			pr_err("err in lib dload %x\n", resp->result);
 		} else {
-			pr_debug("sst: Codec download complete...\n");
-			pr_debug("sst: codec Type %d Ver %d Built %s: %s\n",
+			pr_debug("Codec download complete...\n");
+			pr_debug("codec Type %d Ver %d Built %s: %s\n",
 				resp->dload_lib.lib_info.lib_type,
 				resp->dload_lib.lib_info.lib_version,
 				resp->dload_lib.lib_info.b_date,
@@ -639,17 +691,17 @@
 	case IPC_IA_GET_FW_BUILD_INF: {
 		struct sst_fw_build_info *build =
 			(struct sst_fw_build_info *)msg->mailbox;
-		pr_debug("sst: Build date:%sTime:%s", build->date, build->time);
+		pr_debug("Build date:%sTime:%s", build->date, build->time);
 		break;
 	}
 	case IPC_IA_SET_PMIC_TYPE:
 		break;
 	case IPC_IA_START_STREAM:
-		pr_debug("sst: reply for START STREAM %x\n", msg->header.full);
+		pr_debug("reply for START STREAM %x\n", msg->header.full);
 		break;
 	default:
 		/* Illegal case */
-		pr_err("sst: process reply:default = %x\n", msg->header.full);
+		pr_err("process reply:default = %x\n", msg->header.full);
 	}
 	sst_clear_interrupt();
 	return;
diff --git a/drivers/staging/intel_sst/intel_sst_pvt.c b/drivers/staging/intel_sst/intel_sst_pvt.c
index 6487e19..01f8c3b 100644
--- a/drivers/staging/intel_sst/intel_sst_pvt.c
+++ b/drivers/staging/intel_sst/intel_sst_pvt.c
@@ -29,6 +29,8 @@
  *  This file contains all private functions
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/fs.h>
 #include <linux/firmware.h>
@@ -60,7 +62,7 @@
 		}
 	}
 	if (i == MAX_ACTIVE_STREAM) {
-		pr_err("sst: max alloc_stream reached");
+		pr_err("max alloc_stream reached\n");
 		i = -EBUSY; /* active stream limit reached */
 	}
 	return i;
@@ -84,14 +86,14 @@
 				block->condition)) {
 		/* event wake */
 		if (block->ret_code < 0) {
-			pr_err("sst: stream failed %d\n", block->ret_code);
+			pr_err("stream failed %d\n", block->ret_code);
 			retval = -EBUSY;
 		} else {
-			pr_debug("sst: event up\n");
+			pr_debug("event up\n");
 			retval = 0;
 		}
 	} else {
-		pr_err("sst: signal interrupted\n");
+		pr_err("signal interrupted\n");
 		retval = -EINTR;
 	}
 	return retval;
@@ -115,18 +117,18 @@
 {
 	int retval = 0;
 
-	pr_debug("sst: sst_wait_interruptible_timeout - waiting....\n");
+	pr_debug("sst_wait_interruptible_timeout - waiting....\n");
 	if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
 						block->condition,
 						msecs_to_jiffies(timeout))) {
 		if (block->ret_code < 0)
-			pr_err("sst: stream failed %d\n", block->ret_code);
+			pr_err("stream failed %d\n", block->ret_code);
 		else
-			pr_debug("sst: event up\n");
+			pr_debug("event up\n");
 		retval = block->ret_code;
 	} else {
 		block->on = false;
-		pr_err("sst: timeout occured...\n");
+		pr_err("timeout occurred...\n");
 		/*setting firmware state as uninit so that the
 		firmware will get re-downloaded on next request
 		this is because firmare not responding for 5 sec
@@ -156,18 +158,18 @@
 	/* NOTE:
 	Observed that FW processes the alloc msg and replies even
 	before the alloc thread has finished execution */
-	pr_debug("sst: waiting for %x, condition %x\n",
+	pr_debug("waiting for %x, condition %x\n",
 		       block->sst_id, block->ops_block.condition);
 	if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
 				block->ops_block.condition,
 				msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
 		/* event wake */
-		pr_debug("sst: Event wake %x\n", block->ops_block.condition);
-		pr_debug("sst: message ret: %d\n", block->ops_block.ret_code);
+		pr_debug("Event wake %x\n", block->ops_block.condition);
+		pr_debug("message ret: %d\n", block->ops_block.ret_code);
 		retval = block->ops_block.ret_code;
 	} else {
 		block->ops_block.on = false;
-		pr_err("sst: Wait timed-out %x\n", block->ops_block.condition);
+		pr_err("Wait timed-out %x\n", block->ops_block.condition);
 		/* settign firmware state as uninit so that the
 		firmware will get redownloaded on next request
 		this is because firmare not responding for 5 sec
@@ -192,14 +194,14 @@
 
 	msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
 	if (!msg) {
-		pr_err("sst: kzalloc msg failed\n");
+		pr_err("kzalloc msg failed\n");
 		return -ENOMEM;
 	}
 
 	msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
 	if (!msg->mailbox_data) {
 		kfree(msg);
-		pr_err("sst: kzalloc mailbox_data failed");
+		pr_err("kzalloc mailbox_data failed");
 		return -ENOMEM;
 	};
 	*arg = msg;
@@ -219,7 +221,7 @@
 
 	msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
 	if (!msg) {
-		pr_err("sst: kzalloc msg failed\n");
+		pr_err("kzalloc msg failed\n");
 		return -ENOMEM;
 	}
 	msg->mailbox_data = NULL;
@@ -290,10 +292,10 @@
 	struct ipc_post *msg = NULL;
 
 	if (sst_create_short_msg(&msg)) {
-		pr_err("sst: mem allocation failed\n");
+		pr_err("mem allocation failed\n");
 			return -ENOMEM;
 	}
-	pr_debug("sst: ipc message sending: ENABLE_RX_TIME_SLOT\n");
+	pr_debug("ipc message sending: ENABLE_RX_TIME_SLOT\n");
 	sst_fill_header(&msg->header, IPC_IA_ENABLE_RX_TIME_SLOT, 0, 0);
 	msg->header.part.data = status;
 	sst_drv_ctx->hs_info_blk.condition = false;
diff --git a/drivers/staging/intel_sst/intel_sst_stream.c b/drivers/staging/intel_sst/intel_sst_stream.c
index b2c4b70..795e42a 100644
--- a/drivers/staging/intel_sst/intel_sst_stream.c
+++ b/drivers/staging/intel_sst/intel_sst_stream.c
@@ -26,6 +26,8 @@
  *  This file contains the stream operations of SST driver
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <linux/sched.h>
@@ -45,8 +47,8 @@
  */
 int sst_check_device_type(u32 device, u32 num_chan, u32 *pcm_slot)
 {
-	if (device >= MAX_NUM_STREAMS) {
-		pr_debug("sst: device type invalid %d\n", device);
+	if (device > MAX_NUM_STREAMS_MFLD) {
+		pr_debug("device type invalid %d\n", device);
 		return -EINVAL;
 	}
 	if (sst_drv_ctx->streams[device].status == STREAM_UN_INIT) {
@@ -71,15 +73,15 @@
 		else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 4)
 			*pcm_slot = 0x0F;
 		else {
-			pr_debug("sst: No condition satisfied.. ret err\n");
+			pr_debug("No condition satisfied.. ret err\n");
 			return -EINVAL;
 		}
 	} else {
-		pr_debug("sst: this stream state is not uni-init, is %d\n",
+		pr_debug("this stream state is not uni-init, is %d\n",
 				sst_drv_ctx->streams[device].status);
 		return -EBADRQC;
 	}
-	pr_debug("sst: returning slot %x\n", *pcm_slot);
+	pr_debug("returning slot %x\n", *pcm_slot);
 	return 0;
 }
 /**
@@ -96,7 +98,7 @@
 		if (sst_drv_ctx->streams[i].status == STREAM_UN_INIT)
 			return i;
 	}
-	pr_debug("sst: Didnt find empty stream for mrst\n");
+	pr_debug("Didnt find empty stream for mrst\n");
 	return -EBUSY;
 }
 
@@ -305,7 +307,7 @@
 		if (str_info->prev == STREAM_UN_INIT)
 			return -EBADRQC;
 		if (str_info->ctrl_blk.on == true) {
-			pr_err("SST ERR: control path is in use\n ");
+			pr_err("SST ERR: control path is in use\n");
 			return -EINVAL;
 		}
 		if (sst_create_short_msg(&msg))
@@ -333,7 +335,7 @@
 		}
 	} else {
 		retval = -EBADRQC;
-		pr_err("SST ERR:BADQRC for stream\n ");
+		pr_err("SST ERR: BADQRC for stream\n");
 	}
 
 	return retval;
@@ -468,7 +470,7 @@
 		}
 	} else {
 		retval = -EBADRQC;
-		pr_err("SST ERR:BADQRC for stream\n");
+		pr_err("SST ERR: BADQRC for stream\n");
 	}
 	return retval;
 }
diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
index 5c45560..85789ba 100644
--- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c
+++ b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
@@ -26,13 +26,15 @@
  *  This file contains the stream operations of SST driver
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/syscalls.h>
 #include <linux/firmware.h>
 #include <linux/sched.h>
-#include <linux/rar_register.h>
 #ifdef CONFIG_MRST_RAR_HANDLER
-#include "../../../drivers/staging/memrar/memrar.h"
+#include <linux/rar_register.h>
+#include "../memrar/memrar.h"
 #endif
 #include "intel_sst_ioctl.h"
 #include "intel_sst.h"
@@ -53,7 +55,7 @@
 	struct stream_info *str_info;
 	struct snd_sst_fw_get_stream_params *fw_params;
 
-	pr_debug("sst: get_stream for %d\n", str_id);
+	pr_debug("get_stream for %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return retval;
@@ -61,16 +63,16 @@
 	str_info = &sst_drv_ctx->streams[str_id];
 	if (str_info->status != STREAM_UN_INIT) {
 		if (str_info->ctrl_blk.on == true) {
-			pr_err("sst: control path in use\n");
+			pr_err("control path in use\n");
 			return -EINVAL;
 		}
 		if (sst_create_short_msg(&msg)) {
-			pr_err("sst: message creation failed\n");
+			pr_err("message creation failed\n");
 			return -ENOMEM;
 		}
 		fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
 		if (!fw_params) {
-			pr_err("sst: mem allcoation failed\n ");
+			pr_err("mem allocation failed\n");
 			kfree(msg);
 			return -ENOMEM;
 		}
@@ -104,7 +106,7 @@
 		get_params->codec_params.stream_type = str_info->str_type;
 		kfree(fw_params);
 	} else {
-		pr_debug("sst: Stream is not in the init state\n");
+		pr_debug("Stream is not in the init state\n");
 	}
 	return retval;
 }
@@ -125,17 +127,17 @@
 
 	BUG_ON(!str_param);
 	if (sst_drv_ctx->streams[str_id].ops != str_param->ops) {
-		pr_err("sst: Invalid operation\n");
+		pr_err("Invalid operation\n");
 		return -EINVAL;
 	}
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return retval;
-	pr_debug("sst: set_stream for %d\n", str_id);
+	pr_debug("set_stream for %d\n", str_id);
 	str_info =  &sst_drv_ctx->streams[str_id];
 	if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
 		if (str_info->ctrl_blk.on == true) {
-			pr_err("sst: control path in use\n");
+			pr_err("control path in use\n");
 			return -EAGAIN;
 		}
 		if (sst_create_large_msg(&msg))
@@ -163,7 +165,7 @@
 		}
 	} else {
 		retval = -EBADRQC;
-		pr_err("sst: BADQRC for stream\n");
+		pr_err("BADQRC for stream\n");
 	}
 	return retval;
 }
@@ -183,7 +185,7 @@
 	struct snd_sst_vol *fw_get_vol;
 	int str_id = get_vol->stream_id;
 
-	pr_debug("sst: get vol called\n");
+	pr_debug("get vol called\n");
 
 	if (sst_create_short_msg(&msg))
 		return -ENOMEM;
@@ -195,7 +197,7 @@
 	sst_drv_ctx->vol_info_blk.on = true;
 	fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
 	if (!fw_get_vol) {
-		pr_err("sst: mem allocation failed\n");
+		pr_err("mem allocation failed\n");
 		kfree(msg);
 		return -ENOMEM;
 	}
@@ -209,10 +211,10 @@
 	if (retval)
 		retval = -EIO;
 	else {
-		pr_debug("sst: stream id %d\n", fw_get_vol->stream_id);
-		pr_debug("sst: volume %d\n", fw_get_vol->volume);
-		pr_debug("sst: ramp duration %d\n", fw_get_vol->ramp_duration);
-		pr_debug("sst: ramp_type %d\n", fw_get_vol->ramp_type);
+		pr_debug("stream id %d\n", fw_get_vol->stream_id);
+		pr_debug("volume %d\n", fw_get_vol->volume);
+		pr_debug("ramp duration %d\n", fw_get_vol->ramp_duration);
+		pr_debug("ramp_type %d\n", fw_get_vol->ramp_type);
 		memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
 	}
 	return retval;
@@ -231,10 +233,10 @@
 	int retval = 0;
 	struct ipc_post *msg = NULL;
 
-	pr_debug("sst: set vol called\n");
+	pr_debug("set vol called\n");
 
 	if (sst_create_large_msg(&msg)) {
-		pr_err("sst: message creation failed\n");
+		pr_err("message creation failed\n");
 		return -ENOMEM;
 	}
 	sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
@@ -254,7 +256,7 @@
 	retval = sst_wait_interruptible_timeout(sst_drv_ctx,
 			&sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
 	if (retval) {
-		pr_err("sst: error in set_vol = %d\n", retval);
+		pr_err("error in set_vol = %d\n", retval);
 		retval = -EIO;
 	}
 	return retval;
@@ -273,10 +275,10 @@
 	int retval = 0;
 	struct ipc_post *msg = NULL;
 
-	pr_debug("sst: set mute called\n");
+	pr_debug("set mute called\n");
 
 	if (sst_create_large_msg(&msg)) {
-		pr_err("sst: message creation failed\n");
+		pr_err("message creation failed\n");
 		return -ENOMEM;
 	}
 	sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
@@ -297,7 +299,7 @@
 	retval = sst_wait_interruptible_timeout(sst_drv_ctx,
 			&sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
 	if (retval) {
-		pr_err("sst: error in set_mute = %d\n", retval);
+		pr_err("error in set_mute = %d\n", retval);
 		retval = -EIO;
 	}
 	return retval;
@@ -358,20 +360,20 @@
 		slot->device_type == SND_SST_DEVICE_PCM) {
 			retval = sst_activate_target(slot);
 			if (retval)
-				pr_err("sst: SST_Activate_target_fail\n");
+				pr_err("SST_Activate_target_fail\n");
 			else
-				pr_err("sst: SST_Activate_target_pass\n");
+				pr_err("SST_Activate_target_pass\n");
 		return retval;
 	} else if (slot->action == SND_SST_PORT_PREPARE &&
 			slot->device_type == SND_SST_DEVICE_PCM) {
 				retval = sst_prepare_target(slot);
 			if (retval)
-				pr_err("sst: SST_prepare_target_fail\n");
+				pr_err("SST_prepare_target_fail\n");
 			else
-				pr_err("sst: SST_prepare_target_pass\n");
+				pr_err("SST_prepare_target_pass\n");
 			return retval;
 	} else {
-		pr_err("sst: slot_action : %d, device_type: %d\n",
+		pr_err("slot_action : %d, device_type: %d\n",
 				slot->action, slot->device_type);
 		return retval;
 	}
@@ -383,7 +385,7 @@
 	struct ipc_post *msg;
 
 	if (sst_create_large_msg(&msg)) {
-		pr_err("sst: message creation failed\n");
+		pr_err("message creation failed\n");
 		return -ENOMEM;
 	}
 	sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
@@ -399,11 +401,11 @@
 	list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
 	spin_unlock(&sst_drv_ctx->list_spin_lock);
 	sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
-	pr_debug("sst: message sent- waiting\n");
+	pr_debug("message sent- waiting\n");
 	retval = sst_wait_interruptible_timeout(sst_drv_ctx,
 			&sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
 	if (retval)
-		pr_err("sst: target device ipc failed = 0x%x\n", retval);
+		pr_err("target device ipc failed = 0x%x\n", retval);
 	return retval;
 
 }
@@ -439,7 +441,7 @@
 					goto err;
 			} else {
 err:
-				pr_err("sst: i/p params incorrect\n");
+				pr_err("i/p params incorrect\n");
 				return -EINVAL;
 			}
 		}
@@ -460,15 +462,15 @@
 {
 	int retval, i, prepare_count = 0;
 
-	pr_debug("sst: Target Device Select\n");
+	pr_debug("Target Device Select\n");
 
 	if (target->device_route < 0 || target->device_route > 2) {
-		pr_err("sst: device route is invalid\n");
+		pr_err("device route is invalid\n");
 		return -EINVAL;
 	}
 
 	if (target->device_route != 0) {
-		pr_err("sst: Unsupported config\n");
+		pr_err("Unsupported config\n");
 		return -EIO;
 	}
 	retval = sst_target_device_validate(target);
@@ -480,18 +482,18 @@
 		return retval;
 	for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
 		if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
-			pr_debug("sst: activate called in %d\n", i);
+			pr_debug("activate called in %d\n", i);
 			retval = sst_parse_target(&target->devices[i]);
 			if (retval)
 				return retval;
 		} else if (target->devices[i].action == SND_SST_PORT_PREPARE) {
-			pr_debug("sst: PREPARE in %d, Forwading\n", i);
+			pr_debug("PREPARE in %d, Forwarding\n", i);
 			retval = sst_parse_target(&target->devices[i]);
 			if (retval) {
-				pr_err("sst: Parse Target fail %d", retval);
+				pr_err("Parse Target fail %d\n", retval);
 				return retval;
 			}
-			pr_debug("sst: Parse Target successful %d", retval);
+			pr_debug("Parse Target successful %d\n", retval);
 			if (target->devices[i].device_type ==
 						SND_SST_DEVICE_PCM)
 				prepare_count++;
@@ -512,11 +514,11 @@
 	rar_status = rar_handle_to_bus(buffers, count);
 
 	if (count != rar_status) {
-		pr_err("sst: The rar CALL Failed");
+		pr_err("The rar CALL Failed");
 		retval = -EIO;
 	}
 	if (buffers->info.type != RAR_TYPE_AUDIO) {
-		pr_err("sst: Invalid RAR type\n");
+		pr_err("Invalid RAR type\n");
 		return -EINVAL;
 	}
 	return retval;
@@ -539,10 +541,10 @@
 		if (kbufs->in_use == false) {
 #ifdef CONFIG_MRST_RAR_HANDLER
 			if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
-				pr_debug("sst: DRM playback handling\n");
+				pr_debug("DRM playback handling\n");
 				rar_buffers.info.handle = (__u32)kbufs->addr;
 				rar_buffers.info.size = kbufs->size;
-				pr_debug("sst: rar handle 0x%x size=0x%x",
+				pr_debug("rar handle 0x%x size=0x%x\n",
 					rar_buffers.info.handle,
 					rar_buffers.info.size);
 				retval =  sst_get_RAR(&rar_buffers, 1);
@@ -552,7 +554,7 @@
 				sg_list->addr[i].addr = rar_buffers.bus_address;
 				/* rar_buffers.info.size; */
 				sg_list->addr[i].size = (__u32)kbufs->size;
-				pr_debug("sst: phyaddr[%d] 0x%x Size:0x%x\n"
+				pr_debug("phyaddr[%d] 0x%x Size:0x%x\n"
 					, i, sg_list->addr[i].addr,
 					sg_list->addr[i].size);
 			}
@@ -562,7 +564,7 @@
 					virt_to_phys((void *)
 						kbufs->addr + kbufs->offset);
 				sg_list->addr[i].size = kbufs->size;
-				pr_debug("sst: phyaddr[%d]:0x%x Size:0x%x\n"
+				pr_debug("phyaddr[%d]:0x%x Size:0x%x\n"
 				, i , sg_list->addr[i].addr, kbufs->size);
 			}
 			stream->curr_bytes += sg_list->addr[i].size;
@@ -574,7 +576,7 @@
 	}
 
 	sg_list->num_entries = i;
-	pr_debug("sst:sg list entries = %d\n", sg_list->num_entries);
+	pr_debug("sg list entries = %d\n", sg_list->num_entries);
 	return i;
 }
 
@@ -595,7 +597,7 @@
 	struct sst_stream_bufs *kbufs = NULL, *_kbufs;
 	struct stream_info *stream;
 
-	pr_debug("sst: play frame for %d\n", str_id);
+	pr_debug("play frame for %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return retval;
@@ -615,14 +617,14 @@
 	stream->curr_bytes = 0;
 	if (list_empty(&stream->bufs)) {
 		/* no user buffer available */
-		pr_debug("sst: Null buffer stream status %d\n", stream->status);
+		pr_debug("Null buffer stream status %d\n", stream->status);
 		stream->prev = stream->status;
 		stream->status = STREAM_INIT;
-		pr_debug("sst:new stream status = %d\n", stream->status);
+		pr_debug("new stream status = %d\n", stream->status);
 		if (stream->need_draining == true) {
-			pr_debug("sst:draining stream\n");
+			pr_debug("draining stream\n");
 			if (sst_create_short_msg(&msg)) {
-				pr_err("sst: mem alloc failed\n");
+				pr_err("mem allocation failed\n");
 				return -ENOMEM;
 			}
 			sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
@@ -633,7 +635,7 @@
 			spin_unlock(&sst_drv_ctx->list_spin_lock);
 			sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
 		} else if (stream->data_blk.on == true) {
-			pr_debug("sst:user list empty.. wake\n");
+			pr_debug("user list empty.. wake\n");
 			/* unblock */
 			stream->data_blk.ret_code = 0;
 			stream->data_blk.condition = true;
@@ -678,7 +680,7 @@
 	struct stream_info *stream;
 
 
-	pr_debug("sst:capture frame for %d\n", str_id);
+	pr_debug("capture frame for %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return retval;
@@ -688,19 +690,19 @@
 		if (kbufs->in_use == true) {
 			list_del(&kbufs->node);
 			kfree(kbufs);
-			pr_debug("sst:del node\n");
+			pr_debug("del node\n");
 		}
 	}
 	if (list_empty(&stream->bufs)) {
 		/* no user buffer available */
-		pr_debug("sst:Null buffer!!!!stream status %d\n",
+		pr_debug("Null buffer!!!!stream status %d\n",
 			       stream->status);
 		stream->prev = stream->status;
 		stream->status = STREAM_INIT;
-		pr_debug("sst:new stream status = %d\n",
+		pr_debug("new stream status = %d\n",
 			       stream->status);
 		if (stream->data_blk.on == true) {
-			pr_debug("sst:user list empty.. wake\n");
+			pr_debug("user list empty.. wake\n");
 			/* unblock */
 			stream->data_blk.ret_code = 0;
 			stream->data_blk.condition = true;
@@ -731,7 +733,7 @@
 	stream->cumm_bytes += stream->curr_bytes;
 	stream->curr_bytes = 0;
 
-    pr_debug("sst:Cum bytes  = %d\n", stream->cumm_bytes);
+    pr_debug("Cum bytes  = %d\n", stream->cumm_bytes);
 	return 0;
 }
 
@@ -743,7 +745,7 @@
 		if (bufs->buff_entry[i].size < min_val)
 			min_val = bufs->buff_entry[i].size;
 	}
-	pr_debug("sst:min_val = %d\n", min_val);
+	pr_debug("min_val = %d\n", min_val);
 	return min_val;
 }
 
@@ -754,7 +756,7 @@
 		if (bufs->buff_entry[i].size > max_val)
 			max_val = bufs->buff_entry[i].size;
 	}
-	pr_debug("sst:max_val = %d\n", max_val);
+	pr_debug("max_val = %d\n", max_val);
 	return max_val;
 }
 
@@ -773,7 +775,7 @@
 			if (dbufs->ibufs->entries == dbufs->obufs->entries)
 				return 0;
 			else {
-				pr_err("sst: RAR entries dont match\n");
+				pr_err("RAR entries dont match\n");
 				 return -EINVAL;
 			}
 		} else
@@ -783,26 +785,26 @@
 	}
 #endif
 	if (!str_info->decode_ibuf) {
-		pr_debug("sst:no i/p buffers, trying full size\n");
+		pr_debug("no i/p buffers, trying full size\n");
 		str_info->decode_isize = cum_input_given;
 		str_info->decode_ibuf = kzalloc(str_info->decode_isize,
 						GFP_KERNEL);
 		str_info->idecode_alloc = str_info->decode_isize;
 	}
 	if (!str_info->decode_ibuf) {
-		pr_debug("sst:buff alloc failed, try max size\n");
+		pr_debug("buff alloc failed, try max size\n");
 		str_info->decode_isize = calculate_max_size(dbufs->ibufs);
 		str_info->decode_ibuf = kzalloc(
 				str_info->decode_isize, GFP_KERNEL);
 		str_info->idecode_alloc = str_info->decode_isize;
 	}
 	if (!str_info->decode_ibuf) {
-		pr_debug("sst:buff alloc failed, try min size\n");
+		pr_debug("buff alloc failed, try min size\n");
 		str_info->decode_isize = calculate_min_size(dbufs->ibufs);
 		str_info->decode_ibuf = kzalloc(str_info->decode_isize,
 						GFP_KERNEL);
 		if (!str_info->decode_ibuf) {
-			pr_err("sst: mem allocation failed\n");
+			pr_err("mem allocation failed\n");
 			return -ENOMEM;
 		}
 		str_info->idecode_alloc = str_info->decode_isize;
@@ -820,7 +822,7 @@
 	struct ipc_post *msg = NULL;
 	int retval = 0;
 
-	pr_debug("SST DBGsst_set_mute:called\n");
+	pr_debug("SST DBG:sst_set_mute:called\n");
 
 	if (str_info->decode_ibuf_type == SST_BUF_RAR) {
 #ifdef CONFIG_MRST_RAR_HANDLER
@@ -857,7 +859,7 @@
 	dec_info->input_bytes_consumed = 0;
 	dec_info->output_bytes_produced = 0;
 	if (sst_create_large_msg(&msg)) {
-		pr_err("sst: message creation failed\n");
+		pr_err("message creation failed\n");
 		return -ENOMEM;
 	}
 
@@ -878,13 +880,13 @@
 	return retval;
 }
 
+#ifdef CONFIG_MRST_RAR_HANDLER
 static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
 			struct snd_sst_dbufs *dbufs,
 			int *input_index, int *in_copied,
 			int *input_index_valid_size, int *new_entry_flag)
 {
 	int retval = 0;
-#ifdef CONFIG_MRST_RAR_HANDLER
 	int i;
 
 	if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
@@ -894,7 +896,7 @@
 				dbufs->ibufs->buff_entry[i].buffer,
 				sizeof(__u32));
 		if (retval) {
-			pr_err("sst:cpy from user fail\n");
+			pr_err("cpy from user fail\n");
 			return -EAGAIN;
 		}
 		rar_buffers.info.type = dbufs->ibufs->type;
@@ -919,9 +921,10 @@
 		str_info->decode_ibuf_type = dbufs->ibufs->type;
 		*in_copied = str_info->decode_isize;
 	}
-#endif
 	return retval;
 }
+#endif
+
 /*This function is used to prepare the kernel input buffers with contents
 before sending for decode*/
 static int sst_prepare_input_buffers(struct stream_info *str_info,
@@ -931,7 +934,7 @@
 {
 	int i, cpy_size, retval = 0;
 
-	pr_debug("sst:input_index = %d, input entries = %d\n",
+	pr_debug("input_index = %d, input entries = %d\n",
 			 *input_index, dbufs->ibufs->entries);
 	for (i = *input_index; i < dbufs->ibufs->entries; i++) {
 #ifdef CONFIG_MRST_RAR_HANDLER
@@ -939,7 +942,7 @@
 			dbufs, input_index, in_copied,
 				input_index_valid_size, new_entry_flag);
 		if (retval) {
-			pr_err("sst: In prepare input buffers for RAR\n");
+			pr_err("In prepare input buffers for RAR\n");
 			return -EIO;
 		}
 #endif
@@ -947,10 +950,10 @@
 		if (*input_index_valid_size == 0)
 			*input_index_valid_size =
 				dbufs->ibufs->buff_entry[i].size;
-		pr_debug("sst:inout addr = %p, size = %d\n",
+		pr_debug("inout addr = %p, size = %d\n",
 			dbufs->ibufs->buff_entry[i].buffer,
 			*input_index_valid_size);
-		pr_debug("sst:decode_isize = %d, in_copied %d\n",
+		pr_debug("decode_isize = %d, in_copied %d\n",
 			str_info->decode_isize, *in_copied);
 		if (*input_index_valid_size <=
 					(str_info->decode_isize - *in_copied))
@@ -958,12 +961,12 @@
 		else
 			cpy_size = str_info->decode_isize - *in_copied;
 
-		pr_debug("sst:cpy size = %d\n", cpy_size);
+		pr_debug("cpy size = %d\n", cpy_size);
 		if (!dbufs->ibufs->buff_entry[i].buffer) {
-			pr_err("sst: i/p buffer is null\n");
+			pr_err("i/p buffer is null\n");
 			return -EINVAL;
 		}
-		pr_debug("sst:Try copy To %p, From %p, size %d\n",
+		pr_debug("Try copy To %p, From %p, size %d\n",
 				str_info->decode_ibuf + *in_copied,
 				dbufs->ibufs->buff_entry[i].buffer, cpy_size);
 
@@ -972,22 +975,22 @@
 				(void *) dbufs->ibufs->buff_entry[i].buffer,
 				cpy_size);
 		if (retval) {
-			pr_err("sst: copy from user failed\n");
+			pr_err("copy from user failed\n");
 			return -EIO;
 		}
 		*in_copied += cpy_size;
 		*input_index_valid_size -= cpy_size;
-		pr_debug("sst:in buff size = %d, in_copied = %d\n",
+		pr_debug("in buff size = %d, in_copied = %d\n",
 			*input_index_valid_size, *in_copied);
 		if (*input_index_valid_size != 0) {
-			pr_debug("sst:more input buffers left\n");
+			pr_debug("more input buffers left\n");
 			dbufs->ibufs->buff_entry[i].buffer += cpy_size;
 			break;
 		}
 		if (*in_copied == str_info->decode_isize &&
 			*input_index_valid_size == 0 &&
 			(i+1) <= dbufs->ibufs->entries) {
-			pr_debug("sst:all input buffers copied\n");
+			pr_debug("all input buffers copied\n");
 			*new_entry_flag = true;
 			*input_index = i + 1;
 			break;
@@ -1005,23 +1008,23 @@
 
 {
 	int i, cpy_size, retval = 0;
-	pr_debug("sst:output_index = %d, output entries = %d\n",
+	pr_debug("output_index = %d, output entries = %d\n",
 				*output_index,
 				dbufs->obufs->entries);
 	for (i = *output_index; i < dbufs->obufs->entries; i++) {
 		*output_index = i;
-		pr_debug("sst:output addr = %p, size = %d\n",
+		pr_debug("output addr = %p, size = %d\n",
 			dbufs->obufs->buff_entry[i].buffer,
 			dbufs->obufs->buff_entry[i].size);
-		pr_debug("sst:output_size = %d, out_copied = %d\n",
+		pr_debug("output_size = %d, out_copied = %d\n",
 				output_size, *out_copied);
 		if (dbufs->obufs->buff_entry[i].size <
 				(output_size - *out_copied))
 			cpy_size = dbufs->obufs->buff_entry[i].size;
 		else
 			cpy_size = output_size - *out_copied;
-		pr_debug("sst:cpy size = %d\n", cpy_size);
-		pr_debug("sst:Try copy To: %p, From %p, size %d\n",
+		pr_debug("cpy size = %d\n", cpy_size);
+		pr_debug("Try copy To: %p, From %p, size %d\n",
 				dbufs->obufs->buff_entry[i].buffer,
 				sst_drv_ctx->mmap_mem + *out_copied,
 				cpy_size);
@@ -1029,13 +1032,13 @@
 					sst_drv_ctx->mmap_mem + *out_copied,
 					cpy_size);
 		if (retval) {
-			pr_err("sst: copy to user failed\n");
+			pr_err("copy to user failed\n");
 			return -EIO;
 		} else
-			pr_debug("sst:copy to user passed\n");
+			pr_debug("copy to user passed\n");
 		*out_copied += cpy_size;
 		dbufs->obufs->buff_entry[i].size -= cpy_size;
-		pr_debug("sst:o/p buff size %d, out_copied %d\n",
+		pr_debug("o/p buff size %d, out_copied %d\n",
 			dbufs->obufs->buff_entry[i].size, *out_copied);
 		if (dbufs->obufs->buff_entry[i].size != 0) {
 			*output_index = i;
@@ -1073,7 +1076,7 @@
 	unsigned long long input_bytes, output_bytes;
 
 	sst_drv_ctx->scard_ops->power_down_pmic();
-	pr_debug("sst: Powering_down_PMIC...\n");
+	pr_debug("Powering_down_PMIC...\n");
 
 	retval = sst_validate_strid(str_id);
 	if (retval)
@@ -1081,7 +1084,7 @@
 
 	str_info = &sst_drv_ctx->streams[str_id];
 	if (str_info->status != STREAM_INIT) {
-		pr_err("sst: invalid stream state = %d\n",
+		pr_err("invalid stream state = %d\n",
 			       str_info->status);
 		return -EINVAL;
 	}
@@ -1098,7 +1101,7 @@
 	retval =  sst_allocate_decode_buf(str_info, dbufs,
 				cum_input_given, cum_output_given);
 	if (retval) {
-		pr_err("sst: mem allocation failed, abort!!!\n");
+		pr_err("mem allocation failed, abort!!!\n");
 		retval = -ENOMEM;
 		goto finish;
 	}
@@ -1114,7 +1117,7 @@
 			dbufs, &input_index, &in_copied,
 			&input_index_valid_size, &new_entry_flag);
 		if (retval) {
-			pr_err("sst: prepare in buffers failed\n");
+			pr_err("prepare in buffers failed\n");
 			goto finish;
 		}
 
@@ -1145,8 +1148,8 @@
 				str_info->decode_osize = dbufs->obufs->
 					buff_entry[output_index].size;
 				str_info->decode_obuf_type = dbufs->obufs->type;
-				pr_debug("sst:DRM handling\n");
-				pr_debug("o/p_add=0x%lu Size=0x%x",
+				pr_debug("DRM handling\n");
+				pr_debug("o/p_add=0x%lu Size=0x%x\n",
 					(unsigned long) str_info->decode_obuf,
 					str_info->decode_osize);
 			} else {
@@ -1160,7 +1163,7 @@
 		if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
 			if (str_info->decode_isize > in_copied) {
 				str_info->decode_isize = in_copied;
-				pr_debug("sst:i/p size = %d\n",
+				pr_debug("i/p size = %d\n",
 						str_info->decode_isize);
 			}
 		}
@@ -1168,20 +1171,19 @@
 
 		retval = sst_send_decode_mess(str_id, str_info, &dec_info);
 		if (retval || dec_info.input_bytes_consumed == 0) {
-			pr_err(
-				"SST ERR: mess failed or no input consumed\n");
+			pr_err("SST ERR: mess failed or no input consumed\n");
 			goto finish;
 		}
 		input_bytes = dec_info.input_bytes_consumed;
 		output_bytes = dec_info.output_bytes_produced;
 
-		pr_debug("sst:in_copied=%d, con=%lld, prod=%lld\n",
+		pr_debug("in_copied=%d, con=%lld, prod=%lld\n",
 			in_copied, input_bytes, output_bytes);
 		if (dbufs->obufs->type == SST_BUF_RAR) {
 			output_index += 1;
 			if (output_index == dbufs->obufs->entries) {
 				copy_in_done = true;
-				pr_debug("sst:all i/p cpy done\n");
+				pr_debug("all i/p cpy done\n");
 			}
 			total_output += output_bytes;
 		} else {
@@ -1190,14 +1192,14 @@
 			retval = sst_prepare_output_buffers(str_info, dbufs,
 				&output_index, output_size, &out_copied);
 			if (retval) {
-				pr_err("sst:prep out buff fail\n");
+				pr_err("prep out buff fail\n");
 				goto finish;
 			}
 			if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
 				if (in_copied != input_bytes) {
 					int bytes_left = in_copied -
 								input_bytes;
-					pr_debug("sst:bytes %d\n",
+					pr_debug("bytes %d\n",
 							bytes_left);
 					if (new_entry_flag == true)
 						input_index--;
@@ -1237,7 +1239,7 @@
 			total_output += out_copied;
 			if (str_info->decode_osize != out_copied) {
 				str_info->decode_osize -= out_copied;
-				pr_debug("sst:output size modified = %d\n",
+				pr_debug("output size modified = %d\n",
 						str_info->decode_osize);
 			}
 		}
@@ -1251,16 +1253,16 @@
 		} else {
 			if (total_output == cum_output_given) {
 				copy_out_done = true;
-				pr_debug("sst:all o/p cpy done\n");
+				pr_debug("all o/p cpy done\n");
 			}
 
 			if (total_input == cum_input_given) {
 				copy_in_done = true;
-				pr_debug("sst:all i/p cpy done\n");
+				pr_debug("all i/p cpy done\n");
 			}
 		}
 
-		pr_debug("sst:copy_out = %d, copy_in = %d\n",
+		pr_debug("copy_out = %d, copy_in = %d\n",
 				copy_out_done, copy_in_done);
 	}
 
diff --git a/drivers/staging/intel_sst/intelmid.c b/drivers/staging/intel_sst/intelmid.c
index 4c0264c..fb22921 100644
--- a/drivers/staging/intel_sst/intelmid.c
+++ b/drivers/staging/intel_sst/intelmid.c
@@ -24,6 +24,9 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  * ALSA driver for Intel MID sound card chipset
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
@@ -101,12 +104,10 @@
 static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
 					int cmd)
 {
-	int ret_val = 0;
+	int ret_val = 0, str_id;
 	struct snd_intelmad *intelmaddata;
 	struct mad_stream_pvt *stream;
-	/*struct stream_buffer buffer_to_sst;*/
-
-
+	struct intel_sst_pcm_control *sst_ops;
 
 	WARN_ON(!substream);
 
@@ -115,38 +116,35 @@
 
 	WARN_ON(!intelmaddata->sstdrv_ops);
 	WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
+	sst_ops  = intelmaddata->sstdrv_ops->pcm_control;
+	str_id = stream->stream_info.str_id;
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
-		pr_debug("sst: Trigger Start\n");
-		ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_START,
-				&stream->stream_info.str_id);
+		pr_debug("Trigger Start\n");
+		ret_val = sst_ops->device_control(SST_SND_START, &str_id);
 		if (ret_val)
 			return ret_val;
 		stream->stream_status = RUNNING;
 		stream->substream = substream;
-		stream->stream_status = RUNNING;
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
-		pr_debug("sst: in stop\n");
-		ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
-				&stream->stream_info.str_id);
+		pr_debug("in stop\n");
+		ret_val = sst_ops->device_control(SST_SND_DROP, &str_id);
 		if (ret_val)
 			return ret_val;
 		stream->stream_status = DROPPED;
 		break;
 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		pr_debug("sst: in pause\n");
-		ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_PAUSE,
-				&stream->stream_info.str_id);
+		pr_debug("in pause\n");
+		ret_val = sst_ops->device_control(SST_SND_PAUSE, &str_id);
 		if (ret_val)
 			return ret_val;
 		stream->stream_status = PAUSED;
 		break;
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		pr_debug("sst: in pause release\n");
-		ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_RESUME,
-						&stream->stream_info.str_id);
+		pr_debug("in pause release\n");
+		ret_val = sst_ops->device_control(SST_SND_RESUME, &str_id);
 		if (ret_val)
 			return ret_val;
 		stream->stream_status = RUNNING;
@@ -170,19 +168,19 @@
 	int ret_val = 0;
 	struct snd_intelmad *intelmaddata;
 
-	pr_debug("sst: pcm_prepare called\n");
+	pr_debug("pcm_prepare called\n");
 
 	WARN_ON(!substream);
 	stream = substream->runtime->private_data;
 	intelmaddata = snd_pcm_substream_chip(substream);
-	pr_debug("sst: pb cnt = %d cap cnt = %d\n",\
+	pr_debug("pb cnt = %d cap cnt = %d\n",\
 		intelmaddata->playback_cnt,
 		intelmaddata->capture_cnt);
 
 	if (stream->stream_info.str_id) {
-		pr_debug("sst: Prepare called for already set stream\n");
-		ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_DROP,
-					&stream->stream_info.str_id);
+		pr_debug("Prepare called for already set stream\n");
+		ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
+				SST_SND_DROP, &stream->stream_info.str_id);
 		return ret_val;
 	}
 
@@ -197,7 +195,7 @@
 	/* return back the stream id */
 	snprintf(substream->pcm->id, sizeof(substream->pcm->id),
 			"%d", stream->stream_info.str_id);
-	pr_debug("sst: stream id to user = %s\n",
+	pr_debug("stream id to user = %s\n",
 			substream->pcm->id);
 
 	ret_val = snd_intelmad_init_stream(substream);
@@ -212,7 +210,7 @@
 {
 	int ret_val;
 
-	pr_debug("sst: snd_intelmad_hw_params called\n");
+	pr_debug("snd_intelmad_hw_params called\n");
 	ret_val = snd_pcm_lib_malloc_pages(substream,
 			params_buffer_bytes(hw_params));
 	memset(substream->runtime->dma_area, 0,
@@ -223,7 +221,7 @@
 
 static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
 {
-	pr_debug("sst: snd_intelmad_hw_free called\n");
+	pr_debug("snd_intelmad_hw_free called\n");
 	return snd_pcm_lib_free_pages(substream);
 }
 
@@ -250,15 +248,15 @@
 	if (stream->stream_status == INIT)
 		return 0;
 
-	ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_BUFFER_POINTER,
-				&stream->stream_info);
+	ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
+			SST_SND_BUFFER_POINTER, &stream->stream_info);
 	if (ret_val) {
-		pr_err("sst: error code = 0x%x\n", ret_val);
+		pr_err("error code = 0x%x\n", ret_val);
 		return ret_val;
 	}
-	pr_debug("sst: samples reported out 0x%llx\n",
+	pr_debug("samples reported out 0x%llx\n",
 			stream->stream_info.buffer_ptr);
-	pr_debug("sst: Frame bits:: %d period_count :: %d\n",
+	pr_debug("Frame bits:: %d period_count :: %d\n",
 			(int)substream->runtime->frame_bits,
 			(int)substream->runtime->period_size);
 
@@ -277,26 +275,26 @@
 {
 	struct snd_intelmad *intelmaddata;
 	struct mad_stream_pvt *stream;
-	int ret_val = 0;
+	int ret_val = 0, str_id;
 
 	WARN_ON(!substream);
 
 	stream = substream->runtime->private_data;
+	str_id = stream->stream_info.str_id;
 
-	pr_debug("sst: snd_intelmad_close called\n");
+	pr_debug("sst: snd_intelmad_close called for %d\n", str_id);
 	intelmaddata = snd_pcm_substream_chip(substream);
 
-	pr_debug("sst: str id = %d\n", stream->stream_info.str_id);
+	pr_debug("str id = %d\n", stream->stream_info.str_id);
 	if (stream->stream_info.str_id) {
 		/* SST API to actually stop/free the stream */
-		ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_FREE,
-				&stream->stream_info.str_id);
+		ret_val = intelmaddata->sstdrv_ops->pcm_control->close(str_id);
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 			intelmaddata->playback_cnt--;
 		else
 			intelmaddata->capture_cnt--;
 	}
-	pr_debug("sst: snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
+	pr_debug("snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
 		intelmaddata->playback_cnt, intelmaddata->capture_cnt);
 	kfree(substream->runtime->private_data);
 	return ret_val;
@@ -319,7 +317,7 @@
 
 	WARN_ON(!substream);
 
-	pr_debug("sst: snd_intelmad_open called\n");
+	pr_debug("snd_intelmad_open called\n");
 
 	intelmaddata = snd_pcm_substream_chip(substream);
 	runtime = substream->runtime;
@@ -456,17 +454,17 @@
 {
 
 	if (!jack) {
-		pr_debug("sst: MAD error jack empty\n");
+		pr_debug("MAD error jack empty\n");
 
 	} else {
-		pr_debug("sst: MAD send jack report for = %d!!!\n", status);
-		pr_debug("sst: MAD send jack report %d\n", jack->type);
+		pr_debug("MAD send jack report for = %d!!!\n", status);
+		pr_debug("MAD send jack report %d\n", jack->type);
 		snd_jack_report(jack, status);
 
 		/*button pressed and released */
 		if (buttonpressevent)
 			snd_jack_report(jack, 0);
-		pr_debug("sst: MAD sending jack report Done !!!\n");
+		pr_debug("MAD sending jack report Done !!!\n");
 	}
 
 
@@ -490,7 +488,7 @@
 	if (intsts & 0x4) {
 
 		if (!(intelmid_audio_interrupt_enable)) {
-			pr_debug("sst: Audio interrupt enable\n");
+			pr_debug("Audio interrupt enable\n");
 			sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
 
 			sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
@@ -500,7 +498,7 @@
 
 		}
 		/* send headphone detect */
-		pr_debug("sst: MAD headphone %d\n", intsts & 0x4);
+		pr_debug("MAD headphone %d\n", intsts & 0x4);
 		jack = &intelmaddata->jack[0].jack;
 		present = !(intelmaddata->jack[0].jack_status);
 		intelmaddata->jack[0].jack_status = present;
@@ -510,7 +508,7 @@
 
 	if (intsts & 0x2) {
 		/* send short push */
-		pr_debug("sst: MAD short push %d\n", intsts & 0x2);
+		pr_debug("MAD short push %d\n", intsts & 0x2);
 		jack = &intelmaddata->jack[2].jack;
 		present = 1;
 		jack_event_flag = 1;
@@ -518,7 +516,7 @@
 	}
 	if (intsts & 0x1) {
 		/* send long push */
-		pr_debug("sst: MAD long push %d\n", intsts & 0x1);
+		pr_debug("MAD long push %d\n", intsts & 0x1);
 		jack = &intelmaddata->jack[3].jack;
 		present = 1;
 		jack_event_flag = 1;
@@ -526,7 +524,7 @@
 	}
 	if (intsts & 0x8) {
 		if (!(intelmid_audio_interrupt_enable)) {
-			pr_debug("sst: Audio interrupt enable\n");
+			pr_debug("Audio interrupt enable\n");
 			sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
 
 			sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
@@ -535,7 +533,7 @@
 			intelmaddata->jack[1].jack_status = 0;
 		}
 		/* send headset detect */
-		pr_debug("sst: MAD headset = %d\n", intsts & 0x8);
+		pr_debug("MAD headset = %d\n", intsts & 0x8);
 		jack = &intelmaddata->jack[1].jack;
 		present = !(intelmaddata->jack[1].jack_status);
 		intelmaddata->jack[1].jack_status = present;
@@ -558,10 +556,10 @@
 
 	scard_ops = intelmaddata->sstdrv_ops->scard_ops;
 
-	pr_debug("sst: previous value: %x\n", intelmaddata->jack_prev_state);
+	pr_debug("previous value: %x\n", intelmaddata->jack_prev_state);
 
 	if (!(intelmid_audio_interrupt_enable)) {
-		pr_debug("sst: Audio interrupt enable\n");
+		pr_debug("Audio interrupt enable\n");
 		intelmaddata->jack_prev_state = 0xC0;
 		intelmid_audio_interrupt_enable = 1;
 	}
@@ -572,12 +570,12 @@
 			sc_access_read.reg_addr = 0x201;
 			sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
 			value = (sc_access_read.value);
-			pr_debug("sst: value returned = 0x%x\n", value);
+			pr_debug("value returned = 0x%x\n", value);
 		}
 
 		if (jack_prev_state == 0xc0 && value == 0x40) {
 			/*headset detected. */
-			pr_debug("sst: MAD headset inserted\n");
+			pr_debug("MAD headset inserted\n");
 			jack = &intelmaddata->jack[1].jack;
 			present = 1;
 			jack_event_flag = 1;
@@ -587,7 +585,7 @@
 
 		if (jack_prev_state == 0xc0 && value == 0x00) {
 			/* headphone  detected. */
-			pr_debug("sst: MAD headphone inserted\n");
+			pr_debug("MAD headphone inserted\n");
 			jack = &intelmaddata->jack[0].jack;
 			present = 1;
 			jack_event_flag = 1;
@@ -596,9 +594,9 @@
 
 		if (jack_prev_state == 0x40 && value == 0xc0) {
 			/*headset  removed*/
-			pr_debug("sst: Jack headset status %d\n",
+			pr_debug("Jack headset status %d\n",
 				intelmaddata->jack[1].jack_status);
-			pr_debug("sst: MAD headset removed\n");
+			pr_debug("MAD headset removed\n");
 			jack = &intelmaddata->jack[1].jack;
 			present = 0;
 			jack_event_flag = 1;
@@ -607,9 +605,9 @@
 
 		if (jack_prev_state == 0x00 && value == 0xc0) {
 			/* headphone  detected. */
-			pr_debug("sst: Jack headphone status %d\n",
+			pr_debug("Jack headphone status %d\n",
 					intelmaddata->jack[0].jack_status);
-			pr_debug("sst: headphone removed\n");
+			pr_debug("headphone removed\n");
 			jack = &intelmaddata->jack[0].jack;
 			present = 0;
 			jack_event_flag = 1;
@@ -618,7 +616,7 @@
 		if (jack_prev_state == 0x40 && value == 0x00) {
 			/*button pressed*/
 			do_gettimeofday(&intelmaddata->jack[1].buttonpressed);
-			pr_debug("sst: MAD button press detected n");
+			pr_debug("MAD button press detected\n");
 		}
 
 
@@ -628,19 +626,19 @@
 				do_gettimeofday(
 					&intelmaddata->jack[1].buttonreleased);
 				/*button pressed */
-				pr_debug("sst: Button Released detected\n");
+				pr_debug("Button Released detected\n");
 				timediff = intelmaddata->jack[1].
 					buttonreleased.tv_sec - intelmaddata->
 					jack[1].buttonpressed.tv_sec;
 				buttonpressflag = 1;
 				if (timediff > 1) {
-					pr_debug("sst: long press detected\n");
+					pr_debug("long press detected\n");
 					/* send headphone detect/undetect */
 					jack = &intelmaddata->jack[3].jack;
 					present = 1;
 					jack_event_flag = 1;
 				} else {
-					pr_debug("sst: short press detected\n");
+					pr_debug("short press detected\n");
 					/* send headphone detect/undetect */
 					jack = &intelmaddata->jack[2].jack;
 					present = 1;
@@ -667,24 +665,24 @@
 		sc_access_read.reg_addr = 0x132;
 		sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
 		value = (sc_access_read.value);
-		pr_debug("sst: value returned = 0x%x\n", value);
+		pr_debug("value returned = 0x%x\n", value);
 	}
 	if (intsts & 0x1) {
-		pr_debug("sst: headset detected\n");
+		pr_debug("headset detected\n");
 		/* send headset detect/undetect */
 		jack = &intelmaddata->jack[1].jack;
 		present = (value == 0x1) ? 1 : 0;
 		jack_event_flag = 1;
 	}
 	if (intsts & 0x2) {
-		pr_debug("sst: headphone detected\n");
+		pr_debug("headphone detected\n");
 		/* send headphone detect/undetect */
 		jack = &intelmaddata->jack[0].jack;
 		present = (value == 0x2) ? 1 : 0;
 		jack_event_flag = 1;
 	}
 	if (intsts & 0x4) {
-		pr_debug("sst: short push detected\n");
+		pr_debug("short push detected\n");
 		/* send short push */
 		jack = &intelmaddata->jack[2].jack;
 		present = 1;
@@ -692,7 +690,7 @@
 		buttonpressflag = 1;
 	}
 	if (intsts & 0x8) {
-		pr_debug("sst: long push detected\n");
+		pr_debug("long push detected\n");
 		/* send long push */
 		jack = &intelmaddata->jack[3].jack;
 		present = 1;
@@ -738,12 +736,12 @@
 	u32 regbase = AUDINT_BASE, regsize = 8;
 	char *drv_name;
 
-	pr_debug("sst: irq reg done, regbase 0x%x, regsize 0x%x\n",
+	pr_debug("irq reg done, regbase 0x%x, regsize 0x%x\n",
 					regbase, regsize);
 	intelmaddata->int_base = ioremap_nocache(regbase, regsize);
 	if (!intelmaddata->int_base)
-		pr_err("sst: Mapping of cache failed\n");
-	pr_debug("sst: irq = 0x%x\n", intelmaddata->irq);
+		pr_err("Mapping of cache failed\n");
+	pr_debug("irq = 0x%x\n", intelmaddata->irq);
 	if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
 		drv_name = DRIVER_NAME_MFLD;
 	else
@@ -753,7 +751,7 @@
 				IRQF_SHARED, drv_name,
 				intelmaddata);
 	if (ret_val)
-		pr_err("sst: cannot register IRQ\n");
+		pr_err("cannot register IRQ\n");
 	return ret_val;
 }
 
@@ -775,10 +773,10 @@
 		if (ret_val)
 			return ret_val;
 		sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0));
-		pr_debug("sst: orginal n extrated vendor id = 0x%x %d\n",
+		pr_debug("orginal n extrated vendor id = 0x%x %d\n",
 				vendor_addr.value, sst_card_vendor_id);
 		if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
-			pr_err("sst: vendor card not supported!!\n");
+			pr_err("vendor card not supported!!\n");
 			return -EIO;
 		}
 	} else
@@ -801,7 +799,7 @@
 	/* registering with SST driver to get access to SST APIs to use */
 	ret_val = register_sst_card(intelmaddata->sstdrv_ops);
 	if (ret_val) {
-		pr_err("sst: sst card registration failed\n");
+		pr_err("sst card registration failed\n");
 		return ret_val;
 	}
 
@@ -832,7 +830,7 @@
 	char name[32] = INTEL_MAD;
 	struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL;
 
-	pr_debug("sst: called for pb %d, cp %d, idx %d\n", pb, cap, index);
+	pr_debug("called for pb %d, cp %d, idx %d\n", pb, cap, index);
 	ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm);
 	if (ret_val)
 		return ret_val;
@@ -878,7 +876,7 @@
 
 	WARN_ON(!card);
 	WARN_ON(!intelmaddata);
-	pr_debug("sst: snd_intelmad_pcm called\n");
+	pr_debug("snd_intelmad_pcm called\n");
 	ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0);
 	if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT)
 		return ret_val;
@@ -903,7 +901,7 @@
 	struct snd_jack *jack;
 	int retval;
 
-	pr_debug("sst: snd_intelmad_jack called\n");
+	pr_debug("snd_intelmad_jack called\n");
 	jack = &intelmaddata->jack[0].jack;
 	retval = snd_jack_new(intelmaddata->card, "Headphone",
 				SND_JACK_HEADPHONE, &jack);
@@ -982,9 +980,9 @@
 		ret_val = snd_ctl_add(card,
 				snd_ctl_new1(&controls[idx],
 				intelmaddata));
-		pr_debug("sst: mixer[idx]=%d added\n", idx);
+		pr_debug("mixer[idx]=%d added\n", idx);
 		if (ret_val) {
-			pr_err("sst: in adding of control index = %d\n", idx);
+			pr_err("in adding of control index = %d\n", idx);
 			break;
 		}
 	}
@@ -999,7 +997,7 @@
 
 	intelmaddata = device->device_data;
 
-	pr_debug("sst: snd_intelmad_dev_free called\n");
+	pr_debug("snd_intelmad_dev_free called\n");
 	snd_card_free(intelmaddata->card);
 	/*genl_unregister_family(&audio_event_genl_family);*/
 	unregister_sst_card(intelmaddata->sstdrv_ops);
@@ -1040,23 +1038,23 @@
 	const struct platform_device_id *id = platform_get_device_id(pdev);
 	unsigned int cpu_id = (unsigned int)id->driver_data;
 
-	pr_debug("sst: probe for %s cpu_id %d\n", pdev->name, cpu_id);
+	pr_debug("probe for %s cpu_id %d\n", pdev->name, cpu_id);
 	if (!strcmp(pdev->name, DRIVER_NAME_MRST))
-		pr_debug("sst: detected MRST\n");
+		pr_debug("detected MRST\n");
 	else if (!strcmp(pdev->name, DRIVER_NAME_MFLD))
-		pr_debug("sst: detected MFLD\n");
+		pr_debug("detected MFLD\n");
 	else {
-		pr_err("sst: detected unknown device abort!!\n");
+		pr_err("detected unknown device abort!!\n");
 		return -EIO;
 	}
 	if ((cpu_id < CPU_CHIP_LINCROFT) || (cpu_id > CPU_CHIP_PENWELL)) {
-		pr_err("sst: detected unknown cpu_id abort!!\n");
+		pr_err("detected unknown cpu_id abort!!\n");
 		return -EIO;
 	}
 	/* allocate memory for saving internal context and working */
 	intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
 	if (!intelmaddata) {
-		pr_debug("sst: mem alloctn fail\n");
+		pr_debug("mem alloctn fail\n");
 		return -ENOMEM;
 	}
 
@@ -1064,7 +1062,7 @@
 	intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
 					GFP_KERNEL);
 	if (!intelmaddata->sstdrv_ops) {
-		pr_err("sst: mem allocation for ops fail\n");
+		pr_err("mem allocation for ops fail\n");
 		kfree(intelmaddata);
 		return -ENOMEM;
 	}
@@ -1073,7 +1071,7 @@
 	/* create a card instance with ALSA framework */
 	ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
 	if (ret_val) {
-		pr_err("sst: snd_card_create fail\n");
+		pr_err("snd_card_create fail\n");
 		goto free_allocs;
 	}
 
@@ -1092,7 +1090,7 @@
 	/* registering with LPE driver to get access to SST APIs to use */
 	ret_val = snd_intelmad_sst_register(intelmaddata);
 	if (ret_val) {
-		pr_err("sst: snd_intelmad_sst_register failed\n");
+		pr_err("snd_intelmad_sst_register failed\n");
 		goto free_allocs;
 	}
 
@@ -1100,19 +1098,19 @@
 
 	ret_val = snd_intelmad_pcm(card, intelmaddata);
 	if (ret_val) {
-		pr_err("sst: snd_intelmad_pcm failed\n");
+		pr_err("snd_intelmad_pcm failed\n");
 		goto free_allocs;
 	}
 
 	ret_val = snd_intelmad_mixer(intelmaddata);
 	if (ret_val) {
-		pr_err("sst: snd_intelmad_mixer failed\n");
+		pr_err("snd_intelmad_mixer failed\n");
 		goto free_allocs;
 	}
 
 	ret_val = snd_intelmad_jack(intelmaddata);
 	if (ret_val) {
-		pr_err("sst: snd_intelmad_jack failed\n");
+		pr_err("snd_intelmad_jack failed\n");
 		goto free_allocs;
 	}
 
@@ -1126,31 +1124,31 @@
 
 	ret_val = snd_intelmad_register_irq(intelmaddata);
 	if (ret_val) {
-		pr_err("sst: snd_intelmad_register_irq fail\n");
+		pr_err("snd_intelmad_register_irq fail\n");
 		goto free_allocs;
 	}
 
 	/* internal function call to register device with ALSA */
 	ret_val = snd_intelmad_create(intelmaddata, card);
 	if (ret_val) {
-		pr_err("sst: snd_intelmad_create failed\n");
+		pr_err("snd_intelmad_create failed\n");
 		goto free_allocs;
 	}
 	card->private_data = &intelmaddata;
 	snd_card_set_dev(card, &pdev->dev);
 	ret_val = snd_card_register(card);
 	if (ret_val) {
-		pr_err("sst: snd_card_register failed\n");
+		pr_err("snd_card_register failed\n");
 		goto free_allocs;
 	}
 
-	pr_debug("sst:snd_intelmad_probe complete\n");
+	pr_debug("snd_intelmad_probe complete\n");
 	return ret_val;
 
 free_mad_jack_wq:
 	destroy_workqueue(intelmaddata->mad_jack_wq);
 free_allocs:
-	pr_err("sst: probe failed\n");
+	pr_err("probe failed\n");
 	snd_card_free(card);
 	kfree(intelmaddata->sstdrv_ops);
 	kfree(intelmaddata);
@@ -1200,7 +1198,7 @@
  */
 static int __init alsa_card_intelmad_init(void)
 {
-	pr_debug("sst: mad_init called\n");
+	pr_debug("mad_init called\n");
 	return platform_driver_register(&snd_intelmad_driver);
 }
 
@@ -1211,7 +1209,7 @@
  */
 static void __exit alsa_card_intelmad_exit(void)
 {
-	pr_debug("sst:mad_exit called\n");
+	pr_debug("mad_exit called\n");
 	return platform_driver_unregister(&snd_intelmad_driver);
 }
 
diff --git a/drivers/staging/intel_sst/intelmid.h b/drivers/staging/intel_sst/intelmid.h
index 81e7448..0ce1031 100644
--- a/drivers/staging/intel_sst/intelmid.h
+++ b/drivers/staging/intel_sst/intelmid.h
@@ -178,9 +178,4 @@
 extern struct snd_kcontrol_new snd_intelmad_controls_mfld[];
 extern struct snd_pmic_ops *intelmad_vendor_ops[];
 
-/* This is an enabler hook as the platform detection logic isn't yet
-   present and depends on some firmware and DMI support to detect AAVA
-   devices. It will vanish once the AAVA platform support is merged */
-#define is_aava()	0
-
 #endif /* __INTELMID_H */
diff --git a/drivers/staging/intel_sst/intelmid_ctrl.c b/drivers/staging/intel_sst/intelmid_ctrl.c
index 03b4ece..69af070 100644
--- a/drivers/staging/intel_sst/intelmid_ctrl.c
+++ b/drivers/staging/intel_sst/intelmid_ctrl.c
@@ -24,6 +24,9 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *  ALSA driver handling mixer controls for Intel MAD chipset
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <sound/core.h>
 #include <sound/control.h>
 #include "jack.h"
@@ -216,7 +219,7 @@
 	struct snd_intelmad *intelmaddata;
 	struct snd_pmic_ops *scard_ops;
 
-	pr_debug("sst: snd_intelmad_volume_get called\n");
+	pr_debug("snd_intelmad_volume_get called\n");
 
 	WARN_ON(!uval);
 	WARN_ON(!kcontrol);
@@ -273,7 +276,7 @@
 	struct snd_intelmad *intelmaddata;
 	struct snd_pmic_ops *scard_ops;
 
-	pr_debug("sst: Mute_get called\n");
+	pr_debug("Mute_get called\n");
 
 	WARN_ON(!uval);
 	WARN_ON(!kcontrol);
@@ -332,7 +335,7 @@
 	struct snd_intelmad *intelmaddata;
 	struct snd_pmic_ops *scard_ops;
 
-	pr_debug("sst: volume set called:%ld %ld\n",
+	pr_debug("volume set called:%ld %ld\n",
 			uval->value.integer.value[0],
 			uval->value.integer.value[1]);
 
@@ -387,7 +390,7 @@
 	struct snd_intelmad *intelmaddata;
 	struct snd_pmic_ops *scard_ops;
 
-	pr_debug("sst: snd_intelmad_mute_set called\n");
+	pr_debug("snd_intelmad_mute_set called\n");
 
 	WARN_ON(!uval);
 	WARN_ON(!kcontrol);
@@ -455,7 +458,7 @@
 {
 	struct snd_intelmad *intelmaddata;
 	struct snd_pmic_ops *scard_ops;
-	pr_debug("sst: device_get called\n");
+	pr_debug("device_get called\n");
 
 	WARN_ON(!uval);
 	WARN_ON(!kcontrol);
@@ -491,8 +494,9 @@
 	struct snd_intelmad *intelmaddata;
 	struct snd_pmic_ops *scard_ops;
 	int ret_val = 0, vendor, status;
+	struct intel_sst_pcm_control *pcm_control;
 
-	pr_debug("sst: snd_intelmad_device_set called\n");
+	pr_debug("snd_intelmad_device_set called\n");
 
 	WARN_ON(!uval);
 	WARN_ON(!kcontrol);
@@ -518,15 +522,13 @@
 	case INPUT_SEL:
 		vendor = intelmaddata->sstdrv_ops->vendor_id;
 		if ((vendor == SND_MX) || (vendor == SND_FS)) {
-			if (uval->value.enumerated.item[0] == HS_MIC) {
+			pcm_control = intelmaddata->sstdrv_ops->pcm_control;
+			if (uval->value.enumerated.item[0] == HS_MIC)
 				status = 1;
-				intelmaddata->sstdrv_ops->
-				control_set(SST_ENABLE_RX_TIME_SLOT, &status);
-			} else {
+			else
 				status = 0;
-				intelmaddata->sstdrv_ops->
-				control_set(SST_ENABLE_RX_TIME_SLOT, &status);
-			}
+			pcm_control->device_control(
+					SST_ENABLE_RX_TIME_SLOT, &status);
 		}
 		ret_val = scard_ops->set_input_dev(
 				uval->value.enumerated.item[0]);
diff --git a/drivers/staging/intel_sst/intelmid_msic_control.c b/drivers/staging/intel_sst/intelmid_msic_control.c
index 4d1755e..da093ed 100644
--- a/drivers/staging/intel_sst/intelmid_msic_control.c
+++ b/drivers/staging/intel_sst/intelmid_msic_control.c
@@ -24,6 +24,8 @@
  * This file contains the control operations of msic vendors
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/file.h>
 #include "intel_sst.h"
@@ -83,7 +85,7 @@
 	snd_msic_ops.cap_on = 0;
 	snd_msic_ops.input_dev_id = DMIC; /*def dev*/
 	snd_msic_ops.output_dev_id = STEREO_HEADPHONE;
-	pr_debug("sst: msic init complete!!\n");
+	pr_debug("msic init complete!!\n");
 	return 0;
 }
 
@@ -173,7 +175,7 @@
 			return retval;
 	}
 
-	pr_debug("sst: powering up pb.... Device %d\n", device);
+	pr_debug("powering up pb.... Device %d\n", device);
 	sst_sc_reg_access(sc_access1, PMIC_WRITE, 4);
 	switch (device) {
 	case SND_SST_DEVICE_HEADSET:
@@ -205,7 +207,7 @@
 		break;
 
 	default:
-		pr_warn("sst: Wrong Device %d, selected %d\n",
+		pr_warn("Wrong Device %d, selected %d\n",
 			       device, snd_msic_ops.output_dev_id);
 	}
 	return sst_sc_reg_access(sc_access_pcm2, PMIC_READ_MODIFY, 1);
@@ -268,7 +270,7 @@
 			return retval;
 	}
 
-	pr_debug("sst: powering up cp....%d\n", snd_msic_ops.input_dev_id);
+	pr_debug("powering up cp....%d\n", snd_msic_ops.input_dev_id);
 	sst_sc_reg_access(sc_access2, PMIC_READ_MODIFY, 1);
 	snd_msic_ops.cap_on = 1;
 	if (snd_msic_ops.input_dev_id == AMIC)
@@ -283,7 +285,7 @@
 {
 	int retval = 0;
 
-	pr_debug("sst: powering dn msic\n");
+	pr_debug("powering dn msic\n");
 	snd_msic_ops.pb_on = 0;
 	snd_msic_ops.cap_on = 0;
 	return retval;
@@ -293,7 +295,7 @@
 {
 	int retval = 0;
 
-	pr_debug("sst: powering dn pb....\n");
+	pr_debug("powering dn pb....\n");
 	snd_msic_ops.pb_on = 0;
 	return retval;
 }
@@ -302,7 +304,7 @@
 {
 	int retval = 0;
 
-	pr_debug("sst: powering dn cp....\n");
+	pr_debug("powering dn cp....\n");
 	snd_msic_ops.cap_on = 0;
 	return retval;
 }
@@ -311,7 +313,7 @@
 {
 	int retval = 0;
 
-	pr_debug("sst: msic set selected output:%d\n", value);
+	pr_debug("msic set selected output:%d\n", value);
 	snd_msic_ops.output_dev_id = value;
 	if (snd_msic_ops.pb_on)
 		msic_power_up_pb(SND_SST_DEVICE_HEADSET);
@@ -330,15 +332,15 @@
 	};
 	int retval = 0;
 
-	pr_debug("sst: msic_set_selected_input_dev:%d\n", value);
+	pr_debug("msic_set_selected_input_dev:%d\n", value);
 	snd_msic_ops.input_dev_id = value;
 	switch (value) {
 	case AMIC:
-		pr_debug("sst: Selecting AMIC1\n");
+		pr_debug("Selecting AMIC1\n");
 		retval = sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 1);
 		break;
 	case DMIC:
-		pr_debug("sst: Selecting DMIC1\n");
+		pr_debug("Selecting DMIC1\n");
 		retval = sst_sc_reg_access(sc_access_dmic, PMIC_WRITE, 1);
 		break;
 	default:
diff --git a/drivers/staging/intel_sst/intelmid_pvt.c b/drivers/staging/intel_sst/intelmid_pvt.c
index 9ed9475..3ba9daf 100644
--- a/drivers/staging/intel_sst/intelmid_pvt.c
+++ b/drivers/staging/intel_sst/intelmid_pvt.c
@@ -23,6 +23,9 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  * ALSA driver for Intel MID sound card chipset - holding private functions
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/io.h>
 #include <asm/intel_scu_ipc.h>
 #include <sound/core.h>
@@ -50,7 +53,7 @@
 
 	if (stream->stream_status != RUNNING)
 		return;
-	pr_debug("sst: calling period elapsed\n");
+	pr_debug("calling period elapsed\n");
 	snd_pcm_period_elapsed(substream);
 	return;
 }
@@ -76,8 +79,8 @@
 	param.uc.pcm_params.period_count = substream->runtime->period_size;
 	param.uc.pcm_params.ring_buffer_addr =
 				virt_to_phys(substream->runtime->dma_area);
-	pr_debug("sst: period_cnt = %d\n", param.uc.pcm_params.period_count);
-	pr_debug("sst: sfreq= %d, wd_sz = %d\n",
+	pr_debug("period_cnt = %d\n", param.uc.pcm_params.period_count);
+	pr_debug("sfreq= %d, wd_sz = %d\n",
 		 param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
 
 	str_params.sparams = param;
@@ -85,24 +88,22 @@
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		str_params.ops = STREAM_OPS_PLAYBACK;
-		pr_debug("sst: Playbck stream,Device %d\n", stream->device);
+		pr_debug("Playbck stream,Device %d\n", stream->device);
 	} else {
 		str_params.ops = STREAM_OPS_CAPTURE;
 		stream->device = SND_SST_DEVICE_CAPTURE;
-		pr_debug("sst: Capture stream,Device %d\n", stream->device);
+		pr_debug("Capture stream,Device %d\n", stream->device);
 	}
 	str_params.device_type = stream->device;
-	ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_ALLOC,
-					&str_params);
-	pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n",
-			ret_val);
+	ret_val = intelmaddata->sstdrv_ops->pcm_control->open(&str_params);
+	pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
 	if (ret_val < 0)
 		return ret_val;
 
 	stream->stream_info.str_id = ret_val;
 	stream->stream_status = INIT;
 	stream->stream_info.buffer_ptr = 0;
-	pr_debug("sst: str id :  %d\n", stream->stream_info.str_id);
+	pr_debug("str id :  %d\n", stream->stream_info.str_id);
 
 	return ret_val;
 }
@@ -113,15 +114,15 @@
 	struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
 	int ret_val;
 
-	pr_debug("sst: setting buffer ptr param\n");
+	pr_debug("setting buffer ptr param\n");
 	stream->stream_info.period_elapsed = period_elapsed;
 	stream->stream_info.mad_substream = substream;
 	stream->stream_info.buffer_ptr = 0;
 	stream->stream_info.sfreq = substream->runtime->rate;
-	ret_val = intelmaddata->sstdrv_ops->control_set(SST_SND_STREAM_INIT,
-					&stream->stream_info);
+	ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
+			SST_SND_STREAM_INIT, &stream->stream_info);
 	if (ret_val)
-		pr_err("sst: control_set ret error %d\n", ret_val);
+		pr_err("control_set ret error %d\n", ret_val);
 	return ret_val;
 
 }
@@ -145,30 +146,29 @@
 		for (i = 0; i < num_val; i++) {
 			retval = intel_scu_ipc_iowrite8(sc_access[i].reg_addr,
 							sc_access[i].value);
-			if (retval) {
-				pr_err("sst: IPC write failed!!! %d\n", retval);
-				return retval;
-			}
+			if (retval)
+				goto err;
 		}
 	} else if (type == PMIC_READ) {
 		for (i = 0; i < num_val; i++) {
 			retval = intel_scu_ipc_ioread8(sc_access[i].reg_addr,
 							&(sc_access[i].value));
-			if (retval) {
-				pr_err("sst: IPC read failed!!!!!%d\n", retval);
-				return retval;
-			}
+			if (retval)
+				goto err;
 		}
 	} else {
 		for (i = 0; i < num_val; i++) {
 			retval = intel_scu_ipc_update_register(
 				sc_access[i].reg_addr, sc_access[i].value,
 				sc_access[i].mask);
-			if (retval) {
-				pr_err("sst: IPC Modify failed!!!%d\n", retval);
-				return retval;
-			}
+			if (retval)
+				goto err;
 		}
 	}
-	return retval;
+	return 0;
+err:
+	pr_err("IPC failed for cmd %d, %d\n", retval, type);
+	pr_err("reg:0x%2x addr:0x%2x\n",
+		sc_access[i].reg_addr, sc_access[i].value);
+ 	return retval;
 }
diff --git a/drivers/staging/intel_sst/intelmid_v0_control.c b/drivers/staging/intel_sst/intelmid_v0_control.c
index f586d62..7859225 100644
--- a/drivers/staging/intel_sst/intelmid_v0_control.c
+++ b/drivers/staging/intel_sst/intelmid_v0_control.c
@@ -26,6 +26,8 @@
  *  This file contains the control operations of vendor 1
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/file.h>
 #include "intel_sst.h"
@@ -151,7 +153,7 @@
 	if (retval)
 		return retval;
 
-	pr_debug("sst: in fs power up pb\n");
+	pr_debug("in fs power up pb\n");
 	return fs_enable_audiodac(UNMUTE);
 }
 
@@ -173,7 +175,7 @@
 	if (retval)
 		return retval;
 
-	pr_debug("sst: in fsl power down pb\n");
+	pr_debug("in fsl power down pb\n");
 	return fs_enable_audiodac(UNMUTE);
 }
 
@@ -380,7 +382,7 @@
 		sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
 
 	}
-	pr_debug("sst: sfreq:%d,Register value = %x\n", sfreq, config1);
+	pr_debug("sfreq:%d,Register value = %x\n", sfreq, config1);
 
 	if (word_size == 24) {
 		sc_access[0].reg_addr  = AUDIOPORT1;
@@ -438,18 +440,18 @@
 
 	switch (value) {
 	case AMIC:
-		pr_debug("sst: Selecting amic not supported in mono cfg\n");
+		pr_debug("Selecting amic not supported in mono cfg\n");
 		return sst_sc_reg_access(sc_access_mic, PMIC_READ_MODIFY, 2);
 		break;
 
 	case HS_MIC:
-		pr_debug("sst: Selecting hsmic\n");
+		pr_debug("Selecting hsmic\n");
 		return sst_sc_reg_access(sc_access_hsmic,
 				PMIC_READ_MODIFY, 2);
 		break;
 
 	case DMIC:
-		pr_debug("sst: Selecting dmic\n");
+		pr_debug("Selecting dmic\n");
 		return sst_sc_reg_access(sc_access_dmic, PMIC_READ_MODIFY, 2);
 		break;
 
@@ -505,7 +507,7 @@
 		return retval;
 
 
-	pr_debug("sst: dev_id:0x%x value:0x%x\n", dev_id, value);
+	pr_debug("dev_id:0x%x value:0x%x\n", dev_id, value);
 	switch (dev_id) {
 	case PMIC_SND_DMIC_MUTE:
 		sc_access[0].reg_addr = MICCTRL;
@@ -606,7 +608,7 @@
 
 	switch (dev_id) {
 	case PMIC_SND_LEFT_PB_VOL:
-		pr_debug("sst: PMIC_SND_LEFT_PB_VOL:%d\n", value);
+		pr_debug("PMIC_SND_LEFT_PB_VOL:%d\n", value);
 		sc_access[0].value = sc_access[1].value = value;
 		sc_access[0].reg_addr = AUD16;
 		sc_access[1].reg_addr = AUD15;
@@ -616,7 +618,7 @@
 		break;
 
 	case PMIC_SND_RIGHT_PB_VOL:
-		pr_debug("sst: PMIC_SND_RIGHT_PB_VOL:%d\n", value);
+		pr_debug("PMIC_SND_RIGHT_PB_VOL:%d\n", value);
 		sc_access[0].value = sc_access[1].value = value;
 		sc_access[0].reg_addr = AUD17;
 		sc_access[1].reg_addr = AUD15;
@@ -629,7 +631,7 @@
 		reg_num = 2;
 		break;
 	case PMIC_SND_CAPTURE_VOL:
-		pr_debug("sst: PMIC_SND_CAPTURE_VOL:%d\n", value);
+		pr_debug("PMIC_SND_CAPTURE_VOL:%d\n", value);
 		sc_access[0].reg_addr = MICLICTRL1;
 		sc_access[1].reg_addr = MICLICTRL2;
 		sc_access[2].reg_addr = DMICCTRL1;
@@ -726,17 +728,17 @@
 
 	switch (dev_id) {
 	case PMIC_SND_CAPTURE_VOL:
-		pr_debug("sst: PMIC_SND_CAPTURE_VOL\n");
+		pr_debug("PMIC_SND_CAPTURE_VOL\n");
 		sc_access.reg_addr = MICLICTRL1;
 		mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
 		break;
 	case PMIC_SND_LEFT_PB_VOL:
-		pr_debug("sst: PMIC_SND_LEFT_PB_VOL\n");
+		pr_debug("PMIC_SND_LEFT_PB_VOL\n");
 		sc_access.reg_addr = AUD16;
 		mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
 		break;
 	case PMIC_SND_RIGHT_PB_VOL:
-		pr_debug("sst: PMIC_SND_RT_PB_VOL\n");
+		pr_debug("PMIC_SND_RT_PB_VOL\n");
 		sc_access.reg_addr = AUD17;
 		mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
 		break;
@@ -745,9 +747,9 @@
 	}
 
 	retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
-	pr_debug("sst: value read = 0x%x\n", sc_access.value);
+	pr_debug("value read = 0x%x\n", sc_access.value);
 	*value = (int) (sc_access.value & mask);
-	pr_debug("sst: value returned = 0x%x\n", *value);
+	pr_debug("value returned = 0x%x\n", *value);
 	return retval;
 }
 
diff --git a/drivers/staging/intel_sst/intelmid_v1_control.c b/drivers/staging/intel_sst/intelmid_v1_control.c
index 9de86b2..478cfec 100644
--- a/drivers/staging/intel_sst/intelmid_v1_control.c
+++ b/drivers/staging/intel_sst/intelmid_v1_control.c
@@ -25,6 +25,8 @@
  *  This file contains the control operations of vendor 2
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/file.h>
 #include <asm/mrst.h>
@@ -132,56 +134,6 @@
 	return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
 }
 
-static int mx_init_capture_card(void)
-{
-	struct sc_reg_access sc_access[] = {
-		{0x206, 0x5a, 0x0},
-		{0x207, 0xbe, 0x0},
-		{0x208, 0x90, 0x0},
-		{0x209, 0x32, 0x0},
-		{0x20e, 0x22, 0x0},
-		{0x210, 0x84, 0x0},
-		{0x223, 0x20, 0x0},
-		{0x226, 0xC0, 0x0},
-	};
-
-	int retval = 0;
-
-	retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 8);
-	if (0 != retval) {
-		/* pmic communication fails */
-		pr_debug("sst: pmic commn failed\n");
-		return retval;
-	}
-
-	pr_debug("sst: Capture configuration complete!!\n");
-	return 0;
-}
-
-static int mx_init_playback_card(void)
-{
-	struct sc_reg_access sc_access[] = {
-		{0x206, 0x00, 0x0},
-		{0x207, 0x00, 0x0},
-		{0x208, 0x00, 0x0},
-		{0x209, 0x51, 0x0},
-		{0x20e, 0x51, 0x0},
-		{0x210, 0x21, 0x0},
-		{0x223, 0x01, 0x0},
-	};
-	int retval = 0;
-
-	retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 9);
-	if (0 != retval) {
-		/* pmic communication fails */
-		pr_debug("sst: pmic commn failed\n");
-		return retval;
-	}
-
-	pr_debug("sst: Playback configuration complete!!\n");
-	return 0;
-}
-
 static int mx_enable_audiodac(int value)
 {
 	struct sc_reg_access sc_access[3];
@@ -204,7 +156,7 @@
 	retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
 	if (retval)
 		return retval;
-	pr_debug("sst: mute status = %d", snd_pmic_ops_mx.mute_status);
+	pr_debug("mute status = %d\n", snd_pmic_ops_mx.mute_status);
 	if (snd_pmic_ops_mx.mute_status == MUTE ||
 				snd_pmic_ops_mx.master_mute == MUTE)
 		return retval;
@@ -412,7 +364,7 @@
 		if (retval)
 			return retval;
 	}
-	pr_debug("sst: SST DBG mx_set_pcm_voice_params called\n");
+	pr_debug("SST DBG:mx_set_pcm_voice_params called\n");
 	return sst_sc_reg_access(sc_access, PMIC_WRITE, 44);
 }
 
@@ -529,7 +481,7 @@
 			return retval;
 	}
 
-	pr_debug("sst: mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
+	pr_debug("mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
 	snd_pmic_ops_mx.output_dev_id = dev_id;
 	switch (dev_id) {
 	case STEREO_HEADPHONE:
@@ -549,7 +501,7 @@
 		num_reg = 1;
 		break;
 	case RECEIVER:
-		pr_debug("sst: RECEIVER Koski selected\n");
+		pr_debug("RECEIVER Koski selected\n");
 
 		/* configuration - AS enable, receiver enable */
 		sc_access[0].reg_addr = 0xFF;
@@ -559,7 +511,7 @@
 		num_reg = 1;
 		break;
 	default:
-		pr_err("sst: Not a valid output dev\n");
+		pr_err("Not a valid output dev\n");
 		return 0;
 	}
 	return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
@@ -598,7 +550,7 @@
 			return retval;
 	}
 	snd_pmic_ops_mx.input_dev_id = dev_id;
-	pr_debug("sst: mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
+	pr_debug("mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
 
 	switch (dev_id) {
 	case AMIC:
@@ -646,7 +598,7 @@
 	}
 
 
-	pr_debug("sst: set_mute dev_id:0x%x , value:%d\n", dev_id, value);
+	pr_debug("set_mute dev_id:0x%x , value:%d\n", dev_id, value);
 
 	switch (dev_id) {
 	case PMIC_SND_DMIC_MUTE:
@@ -760,7 +712,7 @@
 		if (retval)
 			return retval;
 	}
-	pr_debug("sst: set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
+	pr_debug("set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
 	switch (dev_id) {
 	case PMIC_SND_RECEIVER_VOL:
 		return 0;
@@ -875,7 +827,7 @@
 	if (retval)
 		return retval;
 	*value = -(sc_access.value & mask);
-	pr_debug("sst: get volume value extracted %d\n", *value);
+	pr_debug("get volume value extracted %d\n", *value);
 	return retval;
 }
 
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index 3a7de76..e38e89d 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -26,6 +26,8 @@
  *  This file contains the control operations of vendor 3
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/file.h>
 #include "intel_sst.h"
@@ -120,7 +122,7 @@
 	snd_pmic_ops_nc.master_mute = UNMUTE;
 	snd_pmic_ops_nc.mute_status = UNMUTE;
 	sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
-	pr_debug("sst: init complete!!\n");
+	pr_debug("init complete!!\n");
 	return 0;
 }
 
@@ -169,7 +171,7 @@
 	nc_enable_audiodac(MUTE);
 	msleep(30);
 
-	pr_debug("sst: powering up pb....\n");
+	pr_debug("powering up pb....\n");
 
 	sc_access[0].reg_addr = VAUDIOCNT;
 	sc_access[0].value = 0x27;
@@ -222,7 +224,7 @@
 		return retval;
 
 
-	pr_debug("sst: powering up cp....\n");
+	pr_debug("powering up cp....\n");
 
 	if (port == 0xFF)
 		return 0;
@@ -275,7 +277,7 @@
 	nc_enable_audiodac(MUTE);
 
 
-	pr_debug("sst: powering dn nc_power_down ....\n");
+	pr_debug("powering dn nc_power_down ....\n");
 
 	msleep(30);
 
@@ -324,7 +326,7 @@
 	if (retval)
 		return retval;
 
-	pr_debug("sst: powering dn pb....\n");
+	pr_debug("powering dn pb....\n");
 
 	nc_enable_audiodac(MUTE);
 
@@ -370,7 +372,7 @@
 	if (retval)
 		return retval;
 
-	pr_debug("sst: powering dn cp....\n");
+	pr_debug("powering dn cp....\n");
 	return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
 }
 
@@ -400,7 +402,7 @@
 		return retval;
 
 	sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
-	pr_debug("sst: Voice parameters set successfully!!\n");
+	pr_debug("Voice parameters set successfully!!\n");
 	return 0;
 }
 
@@ -451,20 +453,20 @@
 
 		sc_access.value = 0x07;
 		sc_access.reg_addr = RMUTE;
-		pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_access.value);
+		pr_debug("RIGHT_HP_MUTE value%d\n", sc_access.value);
 		sc_access.mask = MASK2;
 		sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
 	} else {
 		sc_access.value = 0x00;
 		sc_access.reg_addr = RMUTE;
-		pr_debug("sst: RIGHT_HP_MUTE value %d\n", sc_access.value);
+		pr_debug("RIGHT_HP_MUTE value %d\n", sc_access.value);
 		sc_access.mask = MASK2;
 		sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
 
 
 	}
 
-	pr_debug("sst: word_size = %d\n", word_size);
+	pr_debug("word_size = %d\n", word_size);
 
 	if (word_size == 24) {
 		sc_access.reg_addr = AUDIOPORT2;
@@ -477,7 +479,7 @@
 	}
 	sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
 
-	pr_debug("sst: word_size = %d\n", word_size);
+	pr_debug("word_size = %d\n", word_size);
 	sc_access.reg_addr = AUDIOPORT1;
 	sc_access.mask = MASK5|MASK4|MASK1|MASK0;
 	if (word_size == 16)
@@ -508,7 +510,7 @@
 		retval = nc_init_card();
 	if (retval)
 		return retval;
-	pr_debug("sst: nc set selected output:%d\n", value);
+	pr_debug("nc set selected output:%d\n", value);
 	switch (value) {
 	case STEREO_HEADPHONE:
 		retval = sst_sc_reg_access(sc_access_HP, PMIC_WRITE, 2);
@@ -517,7 +519,7 @@
 		retval = sst_sc_reg_access(sc_access_IS, PMIC_WRITE, 2);
 		break;
 	default:
-		pr_err("sst: rcvd illegal request: %d\n", value);
+		pr_err("rcvd illegal request: %d\n", value);
 		return -EINVAL;
 	}
 	return retval;
@@ -541,7 +543,7 @@
 	};
 
 	sst_sc_reg_access(sc_access, PMIC_WRITE, 12);
-	pr_debug("sst: Audio Init successfully!!\n");
+	pr_debug("Audio Init successfully!!\n");
 
 	/*set output device */
 	nc_set_selected_output_dev(snd_pmic_ops_nc.output_dev_id);
@@ -549,13 +551,13 @@
 	if (snd_pmic_ops_nc.num_channel == 1) {
 		sc_acces.value = 0x07;
 		sc_acces.reg_addr = RMUTE;
-		pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_acces.value);
+		pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
 		sc_acces.mask = MASK2;
 		sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
 	} else {
 		sc_acces.value = 0x00;
 		sc_acces.reg_addr = RMUTE;
-		pr_debug("sst: RIGHT_HP_MUTE value%d\n", sc_acces.value);
+		pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
 		sc_acces.mask = MASK2;
 		sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
 	}
@@ -629,11 +631,11 @@
 	if (retval)
 		return retval;
 
-	pr_debug("sst: set device id::%d, value %d\n", dev_id, value);
+	pr_debug("set device id::%d, value %d\n", dev_id, value);
 
 	switch (dev_id) {
 	case PMIC_SND_MUTE_ALL:
-		pr_debug("sst: PMIC_SND_MUTE_ALL value %d\n", value);
+		pr_debug("PMIC_SND_MUTE_ALL value %d\n", value);
 		snd_pmic_ops_nc.mute_status = value;
 		snd_pmic_ops_nc.master_mute = value;
 		if (value == UNMUTE) {
@@ -669,7 +671,7 @@
 		}
 		break;
 	case PMIC_SND_HP_MIC_MUTE:
-		pr_debug("sst: PMIC_SND_HPMIC_MUTE value %d\n", value);
+		pr_debug("PMIC_SND_HPMIC_MUTE value %d\n", value);
 		if (value == UNMUTE) {
 			/* unmute the system, set the 6th bit to one */
 			sc_access[0].value = 0x00;
@@ -682,7 +684,7 @@
 		retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
 		break;
 	case PMIC_SND_AMIC_MUTE:
-		pr_debug("sst: PMIC_SND_AMIC_MUTE value %d\n", value);
+		pr_debug("PMIC_SND_AMIC_MUTE value %d\n", value);
 		if (value == UNMUTE) {
 			/* unmute the system, set the 6th bit to one */
 			sc_access[0].value = 0x00;
@@ -696,7 +698,7 @@
 		break;
 
 	case PMIC_SND_DMIC_MUTE:
-		pr_debug("sst: INPUT_MUTE_DMIC value%d\n", value);
+		pr_debug("INPUT_MUTE_DMIC value%d\n", value);
 		if (value == UNMUTE) {
 			/* unmute the system, set the 6th bit to one */
 			sc_access[1].value = 0x00;
@@ -724,13 +726,13 @@
 
 		if (dev_id == PMIC_SND_LEFT_HP_MUTE) {
 			sc_access[0].reg_addr = LMUTE;
-			pr_debug("sst: LEFT_HP_MUTE value %d\n",
+			pr_debug("LEFT_HP_MUTE value %d\n",
 					sc_access[0].value);
 		} else {
 			if (snd_pmic_ops_nc.num_channel == 1)
 				sc_access[0].value = 0x04;
 			sc_access[0].reg_addr = RMUTE;
-			pr_debug("sst: RIGHT_HP_MUTE value %d\n",
+			pr_debug("RIGHT_HP_MUTE value %d\n",
 					sc_access[0].value);
 		}
 		sc_access[0].mask = MASK2;
@@ -743,7 +745,7 @@
 		else
 			sc_access[0].value = 0x03;
 		sc_access[0].reg_addr = LMUTE;
-		pr_debug("sst: SPEAKER_MUTE %d\n", sc_access[0].value);
+		pr_debug("SPEAKER_MUTE %d\n", sc_access[0].value);
 		sc_access[0].mask = MASK1;
 		retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
 		break;
@@ -764,10 +766,10 @@
 	if (retval)
 		return retval;
 
-	pr_debug("sst: set volume:%d\n", dev_id);
+	pr_debug("set volume:%d\n", dev_id);
 	switch (dev_id) {
 	case PMIC_SND_CAPTURE_VOL:
-		pr_debug("sst: PMIC_SND_CAPTURE_VOL:value::%d\n", value);
+		pr_debug("PMIC_SND_CAPTURE_VOL:value::%d\n", value);
 		sc_access[0].value = sc_access[1].value =
 					sc_access[2].value = -value;
 		sc_access[0].mask = sc_access[1].mask = sc_access[2].mask =
@@ -779,7 +781,7 @@
 		break;
 
 	case PMIC_SND_LEFT_PB_VOL:
-		pr_debug("sst: PMIC_SND_LEFT_HP_VOL %d\n", value);
+		pr_debug("PMIC_SND_LEFT_HP_VOL %d\n", value);
 		sc_access[0].value = -value;
 		sc_access[0].reg_addr  = AUDIOLVOL;
 		sc_access[0].mask =
@@ -788,7 +790,7 @@
 		break;
 
 	case PMIC_SND_RIGHT_PB_VOL:
-		pr_debug("sst: PMIC_SND_RIGHT_HP_VOL value %d\n", value);
+		pr_debug("PMIC_SND_RIGHT_HP_VOL value %d\n", value);
 		if (snd_pmic_ops_nc.num_channel == 1) {
 			sc_access[0].value = 0x04;
 		    sc_access[0].reg_addr = RMUTE;
@@ -821,11 +823,11 @@
 		return retval;
 	snd_pmic_ops_nc.input_dev_id = value;
 
-	pr_debug("sst: nc set selected input:%d\n", value);
+	pr_debug("nc set selected input:%d\n", value);
 
 	switch (value) {
 	case AMIC:
-		pr_debug("sst: Selecting AMIC\n");
+		pr_debug("Selecting AMIC\n");
 		sc_access[0].reg_addr = 0x107;
 		sc_access[0].value = 0x40;
 		sc_access[0].mask =  MASK6|MASK4|MASK3|MASK1|MASK0;
@@ -842,7 +844,7 @@
 		break;
 
 	case HS_MIC:
-		pr_debug("sst: Selecting HS_MIC\n");
+		pr_debug("Selecting HS_MIC\n");
 		sc_access[0].reg_addr = 0x107;
 		sc_access[0].mask =  MASK6|MASK4|MASK3|MASK1|MASK0;
 		sc_access[0].value = 0x10;
@@ -859,7 +861,7 @@
 		break;
 
 	case DMIC:
-		pr_debug("sst: DMIC\n");
+		pr_debug("DMIC\n");
 		sc_access[0].reg_addr = 0x107;
 		sc_access[0].mask = MASK6|MASK4|MASK3|MASK1|MASK0;
 		sc_access[0].value = 0x0B;
@@ -871,7 +873,7 @@
 		sc_access[2].mask = MASK6;
 		sc_access[3].reg_addr = 0x109;
 		sc_access[3].mask = MASK6;
-		sc_access[3].value = 0x40;
+		sc_access[3].value = 0x00;
 		num_val = 4;
 		break;
 	default:
@@ -890,23 +892,23 @@
 	if (retval)
 		return retval;
 
-	pr_debug("sst: get mute::%d\n", dev_id);
+	pr_debug("get mute::%d\n", dev_id);
 
 	switch (dev_id) {
 	case PMIC_SND_AMIC_MUTE:
-		pr_debug("sst: PMIC_SND_INPUT_MUTE_MIC1\n");
+		pr_debug("PMIC_SND_INPUT_MUTE_MIC1\n");
 		sc_access.reg_addr = LILSEL;
 		mask = MASK6;
 		break;
 	case PMIC_SND_HP_MIC_MUTE:
-		pr_debug("sst: PMIC_SND_INPUT_MUTE_MIC2\n");
+		pr_debug("PMIC_SND_INPUT_MUTE_MIC2\n");
 		sc_access.reg_addr = LIRSEL;
 		mask = MASK6;
 		break;
 	case PMIC_SND_LEFT_HP_MUTE:
 	case PMIC_SND_RIGHT_HP_MUTE:
 		mask = MASK2;
-		pr_debug("sst: PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
+		pr_debug("PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
 		if (dev_id == PMIC_SND_RIGHT_HP_MUTE)
 			sc_access.reg_addr = RMUTE;
 		else
@@ -914,12 +916,12 @@
 		break;
 
 	case PMIC_SND_LEFT_SPEAKER_MUTE:
-		pr_debug("sst: PMIC_MONO_EARPIECE_MUTE\n");
+		pr_debug("PMIC_MONO_EARPIECE_MUTE\n");
 		sc_access.reg_addr = RMUTE;
 		mask = MASK1;
 		break;
 	case PMIC_SND_DMIC_MUTE:
-		pr_debug("sst: PMIC_SND_INPUT_MUTE_DMIC\n");
+		pr_debug("PMIC_SND_INPUT_MUTE_DMIC\n");
 		sc_access.reg_addr = 0x105;
 		mask = MASK6;
 		break;
@@ -928,16 +930,16 @@
 
 	}
 	retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
-	pr_debug("sst: reg value = %d\n", sc_access.value);
+	pr_debug("reg value = %d\n", sc_access.value);
 	if (retval)
 		return retval;
 	*value = (sc_access.value) & mask;
-	pr_debug("sst: masked value = %d\n", *value);
+	pr_debug("masked value = %d\n", *value);
 	if (*value)
 		*value = 0;
 	else
 		*value = 1;
-	pr_debug("sst: value returned = 0x%x\n", *value);
+	pr_debug("value returned = 0x%x\n", *value);
 	return retval;
 }
 
@@ -953,19 +955,19 @@
 
 	switch (dev_id) {
 	case PMIC_SND_CAPTURE_VOL:
-		pr_debug("sst: PMIC_SND_INPUT_CAPTURE_VOL\n");
+		pr_debug("PMIC_SND_INPUT_CAPTURE_VOL\n");
 		sc_access.reg_addr =  LILSEL;
 		mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
 		break;
 
 	case PMIC_SND_RIGHT_PB_VOL:
-		pr_debug("sst: GET_VOLUME_PMIC_LEFT_HP_VOL\n");
+		pr_debug("GET_VOLUME_PMIC_LEFT_HP_VOL\n");
 		sc_access.reg_addr = AUDIOLVOL;
 		mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
 		break;
 
 	case PMIC_SND_LEFT_PB_VOL:
-		pr_debug("sst: GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
+		pr_debug("GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
 		sc_access.reg_addr = AUDIORVOL;
 		mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
 		break;
@@ -975,9 +977,9 @@
 
 	}
 	retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
-	pr_debug("sst: value read = 0x%x\n", sc_access.value);
+	pr_debug("value read = 0x%x\n", sc_access.value);
 	*value = -((sc_access.value) & mask);
-	pr_debug("sst: get vol value returned = %d\n", *value);
+	pr_debug("get vol value returned = %d\n", *value);
 	return retval;
 }
 
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 978bf87..515e448 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -11,43 +11,37 @@
 #include "transport.h"
 #include "init.h"
 
-BYTE		IsSSFDCCompliance;
-BYTE		IsXDCompliance;
-extern DWORD MediaChange;
-extern int	Check_D_MediaFmt(struct us_data *);
+BYTE IsSSFDCCompliance;
+BYTE IsXDCompliance;
 
-//----- ENE_InitMedia() ----------------------------------------
+/*
+ * ENE_InitMedia():
+ */
 int ENE_InitMedia(struct us_data *us)
 {
 	int	result;
 	BYTE	MiscReg03 = 0;
 
-	printk("--- Init Media ---\n");
+	printk(KERN_INFO "--- Init Media ---\n");
 	result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Read register fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "Read register fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
-	printk("MiscReg03 = %x\n", MiscReg03);
+	printk(KERN_INFO "MiscReg03 = %x\n", MiscReg03);
 
-	if (MiscReg03 & 0x01)
-	{
-		if (!us->SD_Status.Ready)
-		{
+	if (MiscReg03 & 0x01) {
+		if (!us->SD_Status.Ready) {
 			result = ENE_SDInit(us);
 			if (result != USB_STOR_XFER_GOOD)
 				return USB_STOR_TRANSPORT_ERROR;
 		}
 	}
 
-	if (MiscReg03 & 0x02)
-	{
-		if (!us->SM_Status.Ready && !us->MS_Status.Ready)
-		{
+	if (MiscReg03 & 0x02) {
+		if (!us->SM_Status.Ready && !us->MS_Status.Ready) {
 			result = ENE_SMInit(us);
-			if (result != USB_STOR_XFER_GOOD)
-			{
+			if (result != USB_STOR_XFER_GOOD) {
 				result = ENE_MSInit(us);
 				if (result != USB_STOR_XFER_GOOD)
 					return USB_STOR_TRANSPORT_ERROR;
@@ -58,7 +52,9 @@
 	return result;
 }
 
-//----- ENE_Read_BYTE() ----------------------------------------
+/*
+ * ENE_Read_BYTE() :
+ */
 int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
@@ -76,19 +72,20 @@
 	return result;
 }
 
-//----- ENE_SDInit() ---------------------
+/*
+ * ENE_SDInit():
+ */
 int ENE_SDInit(struct us_data *us)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int	result;
 	BYTE	buf[0x200];
 
-	printk("transport --- ENE_SDInit\n");
-	// SD Init Part-1
+	printk(KERN_INFO "transport --- ENE_SDInit\n");
+	/* SD Init Part-1 */
 	result = ENE_LoadBinCode(us, SD_INIT1_PATTERN);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Load SD Init Code Part-1 Fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "Load SD Init Code Part-1 Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -98,17 +95,15 @@
 	bcb->CDB[0] = 0xF2;
 
 	result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Exection SD Init Code Fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "Exection SD Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	// SD Init Part-2
+	/* SD Init Part-2 */
 	result = ENE_LoadBinCode(us, SD_INIT2_PATTERN);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Load SD Init Code Part-2 Fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "Load SD Init Code Part-2 Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -119,45 +114,41 @@
 	bcb->CDB[0]			= 0xF1;
 
 	result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Exection SD Init Code Fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "Exection SD Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
 	us->SD_Status =  *(PSD_STATUS)&buf[0];
-	if (us->SD_Status.Insert && us->SD_Status.Ready)
-	{
+	if (us->SD_Status.Insert && us->SD_Status.Ready) {
 		ENE_ReadSDReg(us, (PBYTE)&buf);
-		printk("Insert     = %x\n", us->SD_Status.Insert);
-		printk("Ready      = %x\n", us->SD_Status.Ready);
-		printk("IsMMC      = %x\n", us->SD_Status.IsMMC);
-		printk("HiCapacity = %x\n", us->SD_Status.HiCapacity);
-		printk("HiSpeed    = %x\n", us->SD_Status.HiSpeed);
-		printk("WtP        = %x\n", us->SD_Status.WtP);
-	}
-	else
-	{
-		printk("SD Card Not Ready --- %x\n", buf[0]);
+		printk(KERN_INFO "Insert     = %x\n", us->SD_Status.Insert);
+		printk(KERN_INFO "Ready      = %x\n", us->SD_Status.Ready);
+		printk(KERN_INFO "IsMMC      = %x\n", us->SD_Status.IsMMC);
+		printk(KERN_INFO "HiCapacity = %x\n", us->SD_Status.HiCapacity);
+		printk(KERN_INFO "HiSpeed    = %x\n", us->SD_Status.HiSpeed);
+		printk(KERN_INFO "WtP        = %x\n", us->SD_Status.WtP);
+	} else {
+		printk(KERN_ERR "SD Card Not Ready --- %x\n", buf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-//----- ENE_MSInit() ----------------------------------------
+/*
+ * ENE_MSInit():
+ */
 int ENE_MSInit(struct us_data *us)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int	result;
 	BYTE	buf[0x200];
 	WORD	MSP_BlockSize, MSP_UserAreaBlocks;
-	
 
-	printk("transport --- ENE_MSInit\n");
+	printk(KERN_INFO "transport --- ENE_MSInit\n");
 	result = ENE_LoadBinCode(us, MS_INIT_PATTERN);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Load MS Init Code Fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "Load MS Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -169,53 +160,49 @@
 	bcb->CDB[1]			= 0x01;
 
 	result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Exection MS Init Code Fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "Exection MS Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
 	us->MS_Status = *(PMS_STATUS)&buf[0];
 
-	if (us->MS_Status.Insert && us->MS_Status.Ready)
-	{
-		printk("Insert     = %x\n", us->MS_Status.Insert);
-		printk("Ready      = %x\n", us->MS_Status.Ready);
-		printk("IsMSPro    = %x\n", us->MS_Status.IsMSPro);
-		printk("IsMSPHG    = %x\n", us->MS_Status.IsMSPHG);
-		printk("WtP        = %x\n", us->MS_Status.WtP);
-		if (us->MS_Status.IsMSPro)
-		{
-			MSP_BlockSize      = (buf[6] <<8) | buf[7];
-			MSP_UserAreaBlocks = (buf[10]<<8) | buf[11];
+	if (us->MS_Status.Insert && us->MS_Status.Ready) {
+		printk(KERN_INFO "Insert     = %x\n", us->MS_Status.Insert);
+		printk(KERN_INFO "Ready      = %x\n", us->MS_Status.Ready);
+		printk(KERN_INFO "IsMSPro    = %x\n", us->MS_Status.IsMSPro);
+		printk(KERN_INFO "IsMSPHG    = %x\n", us->MS_Status.IsMSPHG);
+		printk(KERN_INFO "WtP        = %x\n", us->MS_Status.WtP);
+		if (us->MS_Status.IsMSPro) {
+			MSP_BlockSize      = (buf[6] << 8) | buf[7];
+			MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
 			us->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
-		}
-		else
+		} else {
 			MS_CardInit(us);
-		printk("MS Init Code OK !!\n");
-	}
-	else
-	{
-		printk("MS Card Not Ready --- %x\n", buf[0]);
+		}
+		printk(KERN_INFO "MS Init Code OK !!\n");
+	} else {
+		printk(KERN_INFO "MS Card Not Ready --- %x\n", buf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-//----- ENE_SMInit() ----------------------------------------
+/*
+ *ENE_SMInit()
+ */
 int ENE_SMInit(struct us_data *us)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int	result;
 	BYTE	buf[0x200];
 
-	printk("transport --- ENE_SMInit\n");
+	printk(KERN_INFO "transport --- ENE_SMInit\n");
 
 	result = ENE_LoadBinCode(us, SM_INIT_PATTERN);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Load SM Init Code Fail !!\n");
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_INFO "Load SM Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -227,9 +214,9 @@
 	bcb->CDB[1]			= 0x01;
 
 	result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("Exection SM Init Code Fail !! result = %x\n", result);
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR
+		       "Exection SM Init Code Fail !! result = %x\n", result);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -238,32 +225,31 @@
 	us->SM_DeviceID = buf[1];
 	us->SM_CardID   = buf[2];
 
-	if (us->SM_Status.Insert && us->SM_Status.Ready)
-	{
-		printk("Insert     = %x\n", us->SM_Status.Insert);
-		printk("Ready      = %x\n", us->SM_Status.Ready);
-		printk("WtP        = %x\n", us->SM_Status.WtP);
-		printk("DeviceID   = %x\n", us->SM_DeviceID);
-		printk("CardID     = %x\n", us->SM_CardID);
+	if (us->SM_Status.Insert && us->SM_Status.Ready) {
+		printk(KERN_INFO "Insert     = %x\n", us->SM_Status.Insert);
+		printk(KERN_INFO "Ready      = %x\n", us->SM_Status.Ready);
+		printk(KERN_INFO "WtP        = %x\n", us->SM_Status.WtP);
+		printk(KERN_INFO "DeviceID   = %x\n", us->SM_DeviceID);
+		printk(KERN_INFO "CardID     = %x\n", us->SM_CardID);
 		MediaChange = 1;
 		Check_D_MediaFmt(us);
-	}
-	else
-	{
-		printk("SM Card Not Ready --- %x\n", buf[0]);
+	} else {
+		printk(KERN_ERR "SM Card Not Ready --- %x\n", buf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-//----- ENE_ReadSDReg() ----------------------------------------------
+/*
+ * ENE_ReadSDReg()
+ */
 int ENE_ReadSDReg(struct us_data *us, u8 *RdBuf)
 {
 	WORD	tmpreg;
 	DWORD	reg4b;
-	
-	//printk("transport --- ENE_ReadSDReg\n");
+
+	/* printk(KERN_INFO "transport --- ENE_ReadSDReg\n"); */
 	reg4b = *(PDWORD)&RdBuf[0x18];
 	us->SD_READ_BL_LEN = (BYTE)((reg4b >> 8) & 0x0f);
 
@@ -277,74 +263,75 @@
 	if (us->SD_Status.HiCapacity && us->SD_Status.IsMMC)
 		us->HC_C_SIZE = *(PDWORD)(&RdBuf[0x100]);
 
-	if (us->SD_READ_BL_LEN > SD_BLOCK_LEN)
-	{
-		us->SD_Block_Mult = 1 << (us->SD_READ_BL_LEN - SD_BLOCK_LEN);		us->SD_READ_BL_LEN = SD_BLOCK_LEN;
-	}
-	else
-	{		us->SD_Block_Mult = 1;
+	if (us->SD_READ_BL_LEN > SD_BLOCK_LEN) {
+		us->SD_Block_Mult =
+			1 << (us->SD_READ_BL_LEN - SD_BLOCK_LEN);
+		us->SD_READ_BL_LEN = SD_BLOCK_LEN;
+	} else {
+		us->SD_Block_Mult = 1;
 	}
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-//----- ENE_LoadBinCode() ---------------------
+/*
+ * ENE_LoadBinCode()
+ */
 int ENE_LoadBinCode(struct us_data *us, BYTE flag)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int result;
-	//void *buf;
+	/* void *buf; */
 	PBYTE buf;
 
-	//printk("transport --- ENE_LoadBinCode\n");
+	/* printk(KERN_INFO "transport --- ENE_LoadBinCode\n"); */
 	if (us->BIN_FLAG == flag)
 		return USB_STOR_TRANSPORT_GOOD;
 
 	buf = kmalloc(0x800, GFP_KERNEL);
 	if (buf == NULL)
 		return USB_STOR_TRANSPORT_ERROR;
-	switch ( flag )
-	{
-		// For SD
-		case SD_INIT1_PATTERN:
-			printk("SD_INIT1_PATTERN\n");
-			memcpy(buf, SD_Init1, 0x800);
+	switch (flag) {
+	/* For SD */
+	case SD_INIT1_PATTERN:
+		printk(KERN_INFO "SD_INIT1_PATTERN\n");
+		memcpy(buf, SD_Init1, 0x800);
 		break;
-		case SD_INIT2_PATTERN:
-			printk("SD_INIT2_PATTERN\n");
-			memcpy(buf, SD_Init2, 0x800);
+	case SD_INIT2_PATTERN:
+		printk(KERN_INFO "SD_INIT2_PATTERN\n");
+		memcpy(buf, SD_Init2, 0x800);
 		break;
-		case SD_RW_PATTERN:
-			printk("SD_RW_PATTERN\n");
-			memcpy(buf, SD_Rdwr, 0x800);
+	case SD_RW_PATTERN:
+		printk(KERN_INFO "SD_RW_PATTERN\n");
+		memcpy(buf, SD_Rdwr, 0x800);
 		break;
-		// For MS
-		case MS_INIT_PATTERN:
-			printk("MS_INIT_PATTERN\n");
-			memcpy(buf, MS_Init, 0x800);
+	/* For MS */
+	case MS_INIT_PATTERN:
+		printk(KERN_INFO "MS_INIT_PATTERN\n");
+		memcpy(buf, MS_Init, 0x800);
 		break;
-		case MSP_RW_PATTERN:
-			printk("MSP_RW_PATTERN\n");
-			memcpy(buf, MSP_Rdwr, 0x800);
+	case MSP_RW_PATTERN:
+		printk(KERN_INFO "MSP_RW_PATTERN\n");
+		memcpy(buf, MSP_Rdwr, 0x800);
 		break;
-		case MS_RW_PATTERN:
-			printk("MS_RW_PATTERN\n");
-			memcpy(buf, MS_Rdwr, 0x800);
+	case MS_RW_PATTERN:
+		printk(KERN_INFO "MS_RW_PATTERN\n");
+		memcpy(buf, MS_Rdwr, 0x800);
 		break;
-		// For SS
-		case SM_INIT_PATTERN:
-			printk("SM_INIT_PATTERN\n");
-			memcpy(buf, SM_Init, 0x800);
+	/* For SS */
+	case SM_INIT_PATTERN:
+		printk(KERN_INFO "SM_INIT_PATTERN\n");
+		memcpy(buf, SM_Init, 0x800);
 		break;
-		case SM_RW_PATTERN:
-			printk("SM_RW_PATTERN\n");
-			memcpy(buf, SM_Rdwr, 0x800);
+	case SM_RW_PATTERN:
+		printk(KERN_INFO "SM_RW_PATTERN\n");
+		memcpy(buf, SM_Rdwr, 0x800);
 		break;
 	}
 
 	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x800;
-	bcb->Flags =0x00;
+	bcb->Flags = 0x00;
 	bcb->CDB[0] = 0xEF;
 
 	result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0);
@@ -354,54 +341,63 @@
 	return result;
 }
 
-//----- ENE_SendScsiCmd() ---------------------
+/*
+ * ENE_SendScsiCmd():
+ */
 int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
 
 	int result;
-	unsigned int transfer_length=bcb->DataTransferLength, cswlen=0, partial=0;
+	unsigned int transfer_length = bcb->DataTransferLength,
+		     cswlen = 0, partial = 0;
 	unsigned int residue;
 
-	//printk("transport --- ENE_SendScsiCmd\n");
-	// send cmd to out endpoint
-	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL);
-	if (result != USB_STOR_XFER_GOOD)
-	{
-		printk("send cmd to out endpoint fail ---\n");
+	/* printk(KERN_INFO "transport --- ENE_SendScsiCmd\n"); */
+	/* send cmd to out endpoint */
+	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
+					    bcb, US_BULK_CB_WRAP_LEN, NULL);
+	if (result != USB_STOR_XFER_GOOD) {
+		printk(KERN_ERR "send cmd to out endpoint fail ---\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	if (buf)
-	{
-		unsigned int pipe = fDir == FDIR_READ ? us->recv_bulk_pipe : us->send_bulk_pipe;
-		// Bulk
+	if (buf) {
+		unsigned int pipe = fDir;
+
+		if (fDir == FDIR_READ)
+			pipe = us->recv_bulk_pipe;
+		else
+			pipe = us->send_bulk_pipe;
+
+		/* Bulk */
 		if (use_sg)
 			result = usb_stor_bulk_srb(us, pipe, us->srb);
 		else
-			result = usb_stor_bulk_transfer_sg(us, pipe, buf, transfer_length, 0, &partial);
-		if (result != USB_STOR_XFER_GOOD)
-		{
-			printk("data transfer fail ---\n");
+			result = usb_stor_bulk_transfer_sg(us, pipe, buf,
+						transfer_length, 0, &partial);
+		if (result != USB_STOR_XFER_GOOD) {
+			printk(KERN_ERR "data transfer fail ---\n");
 			return USB_STOR_TRANSPORT_ERROR;
 		}
 	}
 
-	// Get CSW for device status
-	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen);
+	/* Get CSW for device status */
+	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
+						US_BULK_CS_WRAP_LEN, &cswlen);
 
-	if (result == USB_STOR_XFER_SHORT && cswlen == 0)
-	{
-		printk("Received 0-length CSW; retrying...\n");
-		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen);
+	if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
+		printk(KERN_WARNING "Received 0-length CSW; retrying...\n");
+		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
+					bcs, US_BULK_CS_WRAP_LEN, &cswlen);
 	}
 
-	if (result == USB_STOR_XFER_STALLED)
-	{
+	if (result == USB_STOR_XFER_STALLED) {
 		/* get the status again */
-		printk("Attempting to get CSW (2nd try)...\n");
-		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL);
+		printk(KERN_WARNING "Attempting to get CSW (2nd try)...\n");
+		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
+						bcs, US_BULK_CS_WRAP_LEN, NULL);
 	}
 
 	if (result != USB_STOR_XFER_GOOD)
@@ -410,12 +406,14 @@
 	/* check bulk status */
 	residue = le32_to_cpu(bcs->Residue);
 
-	/* try to compute the actual residue, based on how much data
-	 * was really transferred and what the device tells us */
-	if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE))
-	{
+	/*
+	 * try to compute the actual residue, based on how much data
+	 * was really transferred and what the device tells us
+	 */
+	if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
 		residue = min(residue, transfer_length);
-		scsi_set_resid(us->srb, max(scsi_get_resid(us->srb), (int) residue));
+		scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
+							(int) residue));
 	}
 
 	if (bcs->Status != US_BULK_STAT_OK)
@@ -424,35 +422,40 @@
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-//----- ENE_Read_Data() ---------------------
+/*
+ * ENE_Read_Data()
+ */
 int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
 	int result;
 
-	//printk("transport --- ENE_Read_Data\n");
-	// set up the command wrapper
+	/* printk(KERN_INFO "transport --- ENE_Read_Data\n"); */
+	/* set up the command wrapper */
 	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = length;
-	bcb->Flags =0x80;
+	bcb->Flags = 0x80;
 	bcb->CDB[0] = 0xED;
 	bcb->CDB[2] = 0xFF;
 	bcb->CDB[3] = 0x81;
 
-	// send cmd to out endpoint
-	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL);
+	/* send cmd to out endpoint */
+	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
+						US_BULK_CB_WRAP_LEN, NULL);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	// R/W data
-	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, buf, length, NULL);
+	/* R/W data */
+	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
+						buf, length, NULL);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	// Get CSW for device status
-	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL);
+	/* Get CSW for device status */
+	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
+						US_BULK_CS_WRAP_LEN, NULL);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 	if (bcs->Status != US_BULK_STAT_OK)
@@ -461,35 +464,40 @@
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-//----- ENE_Write_Data() ---------------------
+/*
+ * ENE_Write_Data():
+ */
 int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
 	int result;
 
-	//printk("transport --- ENE_Write_Data\n");
-	// set up the command wrapper
+	/* printk("transport --- ENE_Write_Data\n"); */
+	/* set up the command wrapper */
 	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = length;
-	bcb->Flags =0x00;
+	bcb->Flags = 0x00;
 	bcb->CDB[0] = 0xEE;
 	bcb->CDB[2] = 0xFF;
 	bcb->CDB[3] = 0x81;
 
-	// send cmd to out endpoint
-	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, NULL);
+	/* send cmd to out endpoint */
+	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
+						US_BULK_CB_WRAP_LEN, NULL);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	// R/W data
-	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, buf, length, NULL);
+	/* R/W data */
+	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
+						buf, length, NULL);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	// Get CSW for device status
-	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, NULL);
+	/* Get CSW for device status */
+	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
+						US_BULK_CS_WRAP_LEN, NULL);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 	if (bcs->Status != US_BULK_STAT_OK)
@@ -498,42 +506,52 @@
 	return USB_STOR_TRANSPORT_GOOD;
 }
 
-//----- usb_stor_print_cmd() ---------------------
+/*
+ * usb_stor_print_cmd():
+ */
 void usb_stor_print_cmd(struct scsi_cmnd *srb)
 {
 	PBYTE	Cdb = srb->cmnd;
 	DWORD	cmd = Cdb[0];
-	DWORD	bn  =	((Cdb[2]<<24) & 0xff000000) | ((Cdb[3]<<16) & 0x00ff0000) |
-			((Cdb[4]<< 8) & 0x0000ff00) | ((Cdb[5]<< 0) & 0x000000ff);
-	WORD	blen = ((Cdb[7]<< 8) & 0xff00) | ((Cdb[8]<< 0) & 0x00ff);
+	DWORD	bn  =	((Cdb[2] << 24) & 0xff000000) |
+			((Cdb[3] << 16) & 0x00ff0000) |
+			((Cdb[4] << 8) & 0x0000ff00) |
+			((Cdb[5] << 0) & 0x000000ff);
+	WORD	blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
 
 	switch (cmd) {
 	case TEST_UNIT_READY:
-		//printk("scsi cmd %X --- SCSIOP_TEST_UNIT_READY\n", cmd);
+		/* printk(KERN_INFO
+			 "scsi cmd %X --- SCSIOP_TEST_UNIT_READY\n", cmd); */
 		break;
 	case INQUIRY:
-		printk("scsi cmd %X --- SCSIOP_INQUIRY\n", cmd);
+		printk(KERN_INFO "scsi cmd %X --- SCSIOP_INQUIRY\n", cmd);
 		break;
 	case MODE_SENSE:
-		printk("scsi cmd %X --- SCSIOP_MODE_SENSE\n", cmd);
+		printk(KERN_INFO "scsi cmd %X --- SCSIOP_MODE_SENSE\n", cmd);
 		break;
 	case START_STOP:
-		printk("scsi cmd %X --- SCSIOP_START_STOP\n", cmd);
+		printk(KERN_INFO "scsi cmd %X --- SCSIOP_START_STOP\n", cmd);
 		break;
 	case READ_CAPACITY:
-		printk("scsi cmd %X --- SCSIOP_READ_CAPACITY\n", cmd);
+		printk(KERN_INFO "scsi cmd %X --- SCSIOP_READ_CAPACITY\n", cmd);
 		break;
 	case READ_10:
-		//printk("scsi cmd %X --- SCSIOP_READ, bn = %X, blen = %X\n", cmd, bn, blen);
+		/*  printk(KERN_INFO
+			   "scsi cmd %X --- SCSIOP_READ,bn = %X, blen = %X\n"
+			   ,cmd, bn, blen); */
 		break;
 	case WRITE_10:
-		//printk("scsi cmd %X --- SCSIOP_WRITE, bn = %X, blen = %X\n", cmd, bn, blen);
+		/* printk(KERN_INFO
+			  "scsi cmd %X --- SCSIOP_WRITE,
+			  bn = %X, blen = %X\n" , cmd, bn, blen); */
 		break;
 	case ALLOW_MEDIUM_REMOVAL:
-		printk("scsi cmd %X --- SCSIOP_ALLOW_MEDIUM_REMOVAL\n", cmd);
+		printk(KERN_INFO
+			"scsi cmd %X --- SCSIOP_ALLOW_MEDIUM_REMOVAL\n", cmd);
 		break;
 	default:
-		printk("scsi cmd %X --- Other cmd\n", cmd);
+		printk(KERN_INFO "scsi cmd %X --- Other cmd\n", cmd);
 		break;
 	}
 	bn = 0;
diff --git a/drivers/staging/keucr/init.h b/drivers/staging/keucr/init.h
index cd199fc..5223132 100644
--- a/drivers/staging/keucr/init.h
+++ b/drivers/staging/keucr/init.h
@@ -1,5 +1,8 @@
 #include "common.h"
 
+extern DWORD MediaChange;
+extern int Check_D_MediaFmt(struct us_data *);
+
 BYTE SD_Init1[] = {
 0x90, 0xFF, 0x09, 0xE0, 0x30, 0xE1, 0x06, 0x90,
 0xFF, 0x23, 0x74, 0x80, 0xF0, 0x90, 0xFF, 0x09,
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index 9a3fdb4..452ea8f 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -347,7 +347,7 @@
 	BYTE                     *PageBuffer;
 	MS_LibTypeExtdat         ExtraData;
 
-	if ((PageBuffer = (BYTE *)kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL))==NULL)
+	if ((PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL))==NULL)
 		return (DWORD)-1;
 
 	result = (DWORD)-1;
@@ -480,8 +480,8 @@
 	DWORD  i;
 
 
-	us->MS_Lib.Phy2LogMap = (WORD *)kmalloc(us->MS_Lib.NumberOfPhyBlock * sizeof(WORD), GFP_KERNEL);
-	us->MS_Lib.Log2PhyMap = (WORD *)kmalloc(us->MS_Lib.NumberOfLogBlock * sizeof(WORD), GFP_KERNEL);
+	us->MS_Lib.Phy2LogMap = kmalloc(us->MS_Lib.NumberOfPhyBlock * sizeof(WORD), GFP_KERNEL);
+	us->MS_Lib.Log2PhyMap = kmalloc(us->MS_Lib.NumberOfLogBlock * sizeof(WORD), GFP_KERNEL);
 
 	if ((us->MS_Lib.Phy2LogMap == NULL) || (us->MS_Lib.Log2PhyMap == NULL))
 	{
@@ -610,8 +610,8 @@
 {
 	us->MS_Lib.wrtblk = (WORD)-1;
 
-	us->MS_Lib.blkpag = (BYTE *)kmalloc(us->MS_Lib.PagesPerBlock * us->MS_Lib.BytesPerSector, GFP_KERNEL);
-	us->MS_Lib.blkext = (MS_LibTypeExtdat *)kmalloc(us->MS_Lib.PagesPerBlock * sizeof(MS_LibTypeExtdat), GFP_KERNEL);
+	us->MS_Lib.blkpag = kmalloc(us->MS_Lib.PagesPerBlock * us->MS_Lib.BytesPerSector, GFP_KERNEL);
+	us->MS_Lib.blkext = kmalloc(us->MS_Lib.PagesPerBlock * sizeof(MS_LibTypeExtdat), GFP_KERNEL);
 
 	if ((us->MS_Lib.blkpag == NULL) || (us->MS_Lib.blkext == NULL))
 	{
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index bdfbf76..2cbe9f8 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -153,9 +153,9 @@
 	WORD len, bn;
 
 	//if (Check_D_MediaPower())        ; ¦b 6250 don't care
-	//    return(ErrCode);             ;
+	//    return(ErrCode);
 	//if (Check_D_MediaFmt(fdoExt))    ;
-	//    return(ErrCode);             ;
+	//    return(ErrCode);
 	if (Conv_D_MediaAddr(us, start))
 		return(ErrCode);
 
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 1b52535..ce10cf2 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -763,8 +763,8 @@
 	bcb->CDB[7]			= (BYTE)addr;
 	bcb->CDB[6]			= (BYTE)(addr/0x0100);
 	bcb->CDB[5]			= Media.Zone/2;
-	bcb->CDB[8]			= *(redundant+REDT_ADDR1H);;
-	bcb->CDB[9]			= *(redundant+REDT_ADDR1L);;
+	bcb->CDB[8]			= *(redundant+REDT_ADDR1H);
+	bcb->CDB[9]			= *(redundant+REDT_ADDR1L);
 
 	result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0);
 	if (result != USB_STOR_XFER_GOOD)
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 1e3bb14..9647154 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -147,7 +147,7 @@
 	const int bytes_per_frame = line6pcm->properties->bytes_per_frame;
 	int frames = fsize / bytes_per_frame;
 
-	if (runtime == 0)
+	if (runtime == NULL)
 		return;
 
 	if (line6pcm->pos_in_done + frames > runtime->buffer_size) {
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index ab67e88..e554a2d 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -127,7 +127,7 @@
 
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
 
-	if (urb == 0) {
+	if (urb == NULL) {
 		dev_err(line6->ifcdev, "Out of memory\n");
 		return -ENOMEM;
 	}
@@ -137,7 +137,7 @@
 
 	transfer_buffer = kmalloc(length, GFP_ATOMIC);
 
-	if (transfer_buffer == 0) {
+	if (transfer_buffer == NULL) {
 		usb_free_urb(urb);
 		dev_err(line6->ifcdev, "Out of memory\n");
 		return -ENOMEM;
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 29940fd..10c5438 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -246,7 +246,7 @@
 
 	change_volume(urb_out, line6pcm->volume_playback, bytes_per_frame);
 
-	if (line6pcm->prev_fbuf != 0) {
+	if (line6pcm->prev_fbuf != NULL) {
 #ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
 		if (line6pcm->flags & MASK_PCM_IMPULSE) {
 			create_impulse_test_signal(line6pcm, urb_out,
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 971844b..9bcf149 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -377,7 +377,7 @@
 	duty_cycle = new_duty_cycle;
 	freq = new_freq;
 
-	loops_per_sec = current_cpu_data.loops_per_jiffy;
+	loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
 	loops_per_sec *= HZ;
 
 	/* How many clocks in a microsecond?, avoiding long long divide */
@@ -398,7 +398,7 @@
 	dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
 		"clk/jiffy=%ld, pulse=%ld, space=%ld, "
 		"conv_us_to_clocks=%ld\n",
-		freq, duty_cycle, current_cpu_data.loops_per_jiffy,
+		freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
 		pulse_width, space_width, conv_us_to_clocks);
 	return 0;
 }
diff --git a/drivers/staging/memrar/memrar.h b/drivers/staging/memrar/memrar.h
index 0b735b8..0feb73b 100644
--- a/drivers/staging/memrar/memrar.h
+++ b/drivers/staging/memrar/memrar.h
@@ -95,6 +95,7 @@
 	dma_addr_t bus_address;
 };
 
+#if defined(CONFIG_MRST_RAR_HANDLER)
 /**
  * rar_reserve() - reserve RAR buffers
  * @buffers:	array of RAR_buffers where type and size of buffers to
@@ -149,7 +150,25 @@
 extern size_t rar_handle_to_bus(struct RAR_buffer *buffers,
 				size_t count);
 
+#else
 
+extern inline size_t rar_reserve(struct RAR_buffer *buffers, size_t count)
+{
+	return 0;
+}
+
+extern inline size_t rar_release(struct RAR_buffer *buffers, size_t count)
+{
+	return 0;
+}
+
+extern inline size_t rar_handle_to_bus(struct RAR_buffer *buffers,
+				size_t count)
+{
+	return 0;
+}
+
+#endif  /* MRST_RAR_HANDLER */
 #endif  /* __KERNEL__ */
 
 #endif  /* _MEMRAR_H */
diff --git a/drivers/staging/msm/Makefile b/drivers/staging/msm/Makefile
index bb3606f..07a89ec 100644
--- a/drivers/staging/msm/Makefile
+++ b/drivers/staging/msm/Makefile
@@ -41,11 +41,11 @@
 obj-$(CONFIG_FB_MSM_LCDC) += lcdc.o
 
 # MDDI
-msm_mddi-objs := mddi.o mddihost.o mddihosti.o
+msm_mddi-y := mddi.o mddihost.o mddihosti.o
 obj-$(CONFIG_FB_MSM_MDDI) += msm_mddi.o
 
 # External MDDI
-msm_mddi_ext-objs := mddihost_e.o mddi_ext.o
+msm_mddi_ext-y := mddihost_e.o mddi_ext.o
 obj-$(CONFIG_FB_MSM_EXTMDDI) += msm_mddi_ext.o
 
 # TVEnc
diff --git a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c b/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
index 864d7c1..edba78a 100644
--- a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
+++ b/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
@@ -77,7 +77,7 @@
 
 	/* followed by parameter bytes */
 	if (num) {
-		bp = (char *)&data;;
+		bp = (char *)&data;
 		bp += (num - 1);
 		while (num) {
 			toshiba_spi_write_byte(1, *bp);
diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c
index 033fc94..2a80775 100644
--- a/drivers/staging/msm/msm_fb_bl.c
+++ b/drivers/staging/msm/msm_fb_bl.c
@@ -42,7 +42,7 @@
 	return 0;
 }
 
-static struct backlight_ops msm_fb_bl_ops = {
+static const struct backlight_ops msm_fb_bl_ops = {
 	.get_brightness = msm_fb_bl_get_brightness,
 	.update_status = msm_fb_bl_update_status,
 };
diff --git a/drivers/staging/msm/tvenc.c b/drivers/staging/msm/tvenc.c
index f41c5ac..4fbb77b 100644
--- a/drivers/staging/msm/tvenc.c
+++ b/drivers/staging/msm/tvenc.c
@@ -279,12 +279,13 @@
 
 	if (IS_ERR(tvenc_clk)) {
 		printk(KERN_ERR "error: can't get tvenc_clk!\n");
-		return IS_ERR(tvenc_clk);
+		return PTR_ERR(tvenc_clk);
 	}
 
 	if (IS_ERR(tvdac_clk)) {
 		printk(KERN_ERR "error: can't get tvdac_clk!\n");
-		return IS_ERR(tvdac_clk);
+		clk_put(tvenc_clk);
+		return PTR_ERR(tvdac_clk);
 	}
 
 //	pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ , "tvenc",
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index ac2d3d0..35f9cda 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,6 +1,5 @@
 TODO:
 	- checkpatch.pl cleanups
-	- port geode gpio calls to newer cs5535 API
 	- see if vx855 gpio API can be made similar enough to cs5535 so we can
 	  share more code
 	- allow simultaneous XO-1 and XO-1.5 support
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 4ca45ec..9f26dc9 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -27,7 +27,6 @@
 #include <asm/uaccess.h>
 #include <linux/ctype.h>
 #include <linux/reboot.h>
-#include <linux/gpio.h>
 #include <asm/tsc.h>
 #include <asm/olpc.h>
 
@@ -49,7 +48,7 @@
 	int (*init)(void);
 	void (*bus_stabilize_wiggle)(void);
 	void (*set_dconload)(int);
-	int (*read_status)(void);
+	u8 (*read_status)(void);
 };
 
 static struct dcon_platform_data *pdata;
@@ -615,7 +614,7 @@
 	__ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
 };
 
-static struct backlight_ops dcon_bl_ops = {
+static const struct backlight_ops dcon_bl_ops = {
 	.get_brightness = dconbl_get,
 	.update_status = dconbl_set
 };
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 6453ca4..e566d21 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -29,26 +29,6 @@
 #define DCON_REG_SCAN_INT	9
 #define DCON_REG_BRIGHT		10
 
-/* GPIO registers (CS5536) */
-
-#define MSR_LBAR_GPIO		0x5140000C
-
-#define GPIOx_OUT_VAL     0x00
-#define GPIOx_OUT_EN      0x04
-#define GPIOx_IN_EN       0x20
-#define GPIOx_INV_EN      0x24
-#define GPIOx_IN_FLTR_EN  0x28
-#define GPIOx_EVNTCNT_EN  0x2C
-#define GPIOx_READ_BACK   0x30
-#define GPIOx_EVNT_EN     0x38
-#define GPIOx_NEGEDGE_EN  0x44
-#define GPIOx_NEGEDGE_STS 0x4C
-#define GPIO_FLT7_AMNT    0xD8
-#define GPIO_MAP_X        0xE0
-#define GPIO_MAP_Y        0xE4
-#define GPIO_FE7_SEL      0xF7
-
-
 /* Status values */
 
 #define DCONSTAT_SCANINT	0
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 779fb7d..043198d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -10,54 +10,70 @@
  * modify it under the terms of version 2 of the GNU General Public
  * License as published by the Free Software Foundation.
  */
-
+#include <linux/cs5535.h>
+#include <linux/gpio.h>
 #include <asm/olpc.h>
 
 #include "olpc_dcon.h"
 
-/* Base address of the GPIO registers */
-static unsigned long gpio_base;
-
-/*
- * List of GPIOs that we care about:
- * (in)  GPIO12   -- DCONBLANK
- * (in)  GPIO[56] -- DCONSTAT[01]
- * (out) GPIO11   -- DCONLOAD
- */
-
-#define IN_GPIOS ((1<<5) | (1<<6) | (1<<7) | (1<<12))
-#define OUT_GPIOS (1<<11)
-
 static int dcon_init_xo_1(void)
 {
-	unsigned long lo, hi;
 	unsigned char lob;
 
-	rdmsr(MSR_LBAR_GPIO, lo, hi);
-
-	/* Check the mask and whether GPIO is enabled (sanity check) */
-	if (hi != 0x0000f001) {
-		printk(KERN_ERR "GPIO not enabled -- cannot use DCON\n");
-		return -ENODEV;
+	if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request STAT0 GPIO\n");
+		return -EIO;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request STAT1 GPIO\n");
+		goto err_gp_stat1;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request IRQ GPIO\n");
+		goto err_gp_irq;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request LOAD GPIO\n");
+		goto err_gp_load;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request BLANK GPIO\n");
+		goto err_gp_blank;
 	}
 
-	/* Mask off the IO base address */
-	gpio_base = lo & 0x0000ff00;
-
 	/* Turn off the event enable for GPIO7 just to be safe */
-	outl(1 << (16+7), gpio_base + GPIOx_EVNT_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+
+	/*
+	 * Determine the current state by reading the GPIO bit; earlier
+	 * stages of the boot process have established the state.
+	 *
+	 * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here;
+	 * this is because OFW will disable input for the pin and set a value..
+	 * READ_BACK will only contain a valid value if input is enabled and
+	 * then a value is set.  So, future readings of the pin can use
+	 * READ_BACK, but the first one cannot.  Awesome, huh?
+	 */
+	dcon_source = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
+		? DCON_SOURCE_CPU
+		: DCON_SOURCE_DCON;
+	dcon_pending = dcon_source;
 
 	/* Set the directions for the GPIO pins */
-	outl(OUT_GPIOS | (IN_GPIOS << 16), gpio_base + GPIOx_OUT_EN);
-	outl(IN_GPIOS | (OUT_GPIOS << 16), gpio_base + GPIOx_IN_EN);
+	gpio_direction_input(OLPC_GPIO_DCON_STAT0);
+	gpio_direction_input(OLPC_GPIO_DCON_STAT1);
+	gpio_direction_input(OLPC_GPIO_DCON_IRQ);
+	gpio_direction_input(OLPC_GPIO_DCON_BLANK);
+	gpio_direction_output(OLPC_GPIO_DCON_LOAD,
+			dcon_source == DCON_SOURCE_CPU);
 
 	/* Set up the interrupt mappings */
 
 	/* Set the IRQ to pair 2 */
-	geode_gpio_event_irq(OLPC_GPIO_DCON_IRQ, 2);
+	cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
 
 	/* Enable group 2 to trigger the DCON interrupt */
-	geode_gpio_set_irq(2, DCON_IRQ);
+	cs5535_gpio_set_irq(2, DCON_IRQ);
 
 	/* Select edge level for interrupt (in PIC) */
 	lob = inb(0x4d0);
@@ -65,52 +81,61 @@
 	outb(lob, 0x4d0);
 
 	/* Register the interupt handler */
-	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver))
-		return -EIO;
+	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver)) {
+		printk(KERN_ERR "olpc-dcon: failed to request DCON's irq\n");
+		goto err_req_irq;
+	}
 
 	/* Clear INV_EN for GPIO7 (DCONIRQ) */
-	outl((1<<(16+7)), gpio_base + GPIOx_INV_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
 
 	/* Enable filter for GPIO12 (DCONBLANK) */
-	outl(1<<(12), gpio_base + GPIOx_IN_FLTR_EN);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
 
 	/* Disable filter for GPIO7 */
-	outl(1<<(16+7), gpio_base + GPIOx_IN_FLTR_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
 
 	/* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-
-	outl(1<<(16+7), gpio_base + GPIOx_EVNTCNT_EN);
-	outl(1<<(16+12), gpio_base + GPIOx_EVNTCNT_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
 
 	/* Add GPIO12 to the Filter Event Pair #7 */
-	outb(12, gpio_base + GPIO_FE7_SEL);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
 
 	/* Turn off negative Edge Enable for GPIO12 */
-	outl(1<<(16+12), gpio_base + GPIOx_NEGEDGE_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
 
 	/* Enable negative Edge Enable for GPIO7 */
-	outl(1<<7, gpio_base + GPIOx_NEGEDGE_EN);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
 
 	/* Zero the filter amount for Filter Event Pair #7 */
-	outw(0, gpio_base + GPIO_FLT7_AMNT);
+	cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
 
 	/* Clear the negative edge status for GPIO7 and GPIO12 */
-	outl((1<<7) | (1<<12), gpio_base+0x4c);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
 
 	/* FIXME:  Clear the posiitive status as well, just to be sure */
-	outl((1<<7) | (1<<12), gpio_base+0x48);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
 
 	/* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-	outl((1<<(7))|(1<<12), gpio_base + GPIOx_EVNT_EN);
-
-	/* Determine the current state by reading the GPIO bit */
-	/* Earlier stages of the boot process have established the state */
-	dcon_source = inl(gpio_base + GPIOx_OUT_VAL) & (1<<11)
-		? DCON_SOURCE_CPU
-		: DCON_SOURCE_DCON;
-	dcon_pending = dcon_source;
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
 
 	return 0;
+
+err_req_irq:
+	gpio_free(OLPC_GPIO_DCON_BLANK);
+err_gp_blank:
+	gpio_free(OLPC_GPIO_DCON_LOAD);
+err_gp_load:
+	gpio_free(OLPC_GPIO_DCON_IRQ);
+err_gp_irq:
+	gpio_free(OLPC_GPIO_DCON_STAT1);
+err_gp_stat1:
+	gpio_free(OLPC_GPIO_DCON_STAT0);
+	return -EIO;
 }
 
 static void dcon_wiggle_xo_1(void)
@@ -128,37 +153,44 @@
 	 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
 	 * GPIO15.
  	 */
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
 
 	for (x = 0; x < 16; x++) {
 		udelay(5);
-		geode_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+		cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
 		udelay(5);
-		geode_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+		cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
 	}
 	udelay(5);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
 }
 
 static void dcon_set_dconload_1(int val)
 {
-	if (val)	
-		outl(1<<11, gpio_base + GPIOx_OUT_VAL);
-	else
-		outl(1<<(11 + 16), gpio_base + GPIOx_OUT_VAL);
+	gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
 }
 
-static int dcon_read_status_xo_1(void)
+static u8 dcon_read_status_xo_1(void)
 {
-	int status = inl(gpio_base + GPIOx_READ_BACK) >> 5;
-	
+	u8 status;
+
+	status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+	status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+
 	/* Clear the negative edge status for GPIO7 */
-	outl(1 << 7, gpio_base + GPIOx_NEGEDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
 
 	return status;
 }
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index cca6a23..4f56098 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -195,9 +195,9 @@
 	}
 }
 
-static int dcon_read_status_xo_1_5(void) 
+static u8 dcon_read_status_xo_1_5(void)
 {
-	int status;
+	u8 status;
 	
 	if (!dcon_was_irq())
 		return -1;
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 6771520..683657c 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -69,7 +69,7 @@
 	return ret;
 }
 
-static const struct pci_device_id phison_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
 	{ PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000, PCI_ANY_ID, PCI_ANY_ID,
 	  PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
 	{ 0, },
diff --git a/drivers/staging/pohmelfs/crypto.c b/drivers/staging/pohmelfs/crypto.c
index 2fdb3e0..6540864 100644
--- a/drivers/staging/pohmelfs/crypto.c
+++ b/drivers/staging/pohmelfs/crypto.c
@@ -130,10 +130,8 @@
 
 void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e)
 {
-	if (e->hash)
-		crypto_free_hash(e->hash);
-	if (e->cipher)
-		crypto_free_ablkcipher(e->cipher);
+	crypto_free_hash(e->hash);
+	crypto_free_ablkcipher(e->cipher);
 	kfree(e->data);
 }
 
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 61685cc..56d3a4e 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -826,6 +826,13 @@
 	.set_page_dirty 	= __set_page_dirty_nobuffers,
 };
 
+static void pohmelfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
+}
+
 /*
  * ->detroy_inode() callback. Deletes inode from the caches
  *  and frees private data.
@@ -842,8 +849,8 @@
 
 	dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
 		__func__, pi, &pi->vfs_inode, pi->ino);
-	kmem_cache_free(pohmelfs_inode_cache, pi);
 	atomic_long_dec(&psb->total_inodes);
+	call_rcu(&inode->i_rcu, pohmelfs_i_callback);
 }
 
 /*
@@ -1318,8 +1325,8 @@
 	}
 
 	psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
-	cancel_rearming_delayed_work(&psb->dwork);
-	cancel_rearming_delayed_work(&psb->drop_dwork);
+	cancel_delayed_work_sync(&psb->dwork);
+	cancel_delayed_work_sync(&psb->drop_dwork);
 	flush_scheduled_work();
 
 	dprintk("%s: stopped workqueues.\n", __func__);
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c
index 9279897..b2e9186 100644
--- a/drivers/staging/pohmelfs/net.c
+++ b/drivers/staging/pohmelfs/net.c
@@ -413,7 +413,7 @@
 				if (dentry) {
 					alias = d_materialise_unique(dentry, &npi->vfs_inode);
 					if (alias)
-						dput(dentry);
+						dput(alias);
 				}
 
 				dput(dentry);
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
index 8ec83d2..400a9fc 100644
--- a/drivers/staging/pohmelfs/path_entry.c
+++ b/drivers/staging/pohmelfs/path_entry.c
@@ -83,10 +83,11 @@
 int pohmelfs_path_length(struct pohmelfs_inode *pi)
 {
 	struct dentry *d, *root, *first;
-	int len = 1; /* Root slash */
+	int len;
+	unsigned seq;
 
-	first = d = d_find_alias(&pi->vfs_inode);
-	if (!d) {
+	first = d_find_alias(&pi->vfs_inode);
+	if (!first) {
 		dprintk("%s: ino: %llu, mode: %o.\n", __func__, pi->ino, pi->vfs_inode.i_mode);
 		return -ENOENT;
 	}
@@ -95,7 +96,11 @@
 	root = dget(current->fs->root.dentry);
 	spin_unlock(&current->fs->lock);
 
-	spin_lock(&dcache_lock);
+rename_retry:
+	len = 1; /* Root slash */
+	d = first;
+	seq = read_seqbegin(&rename_lock);
+	rcu_read_lock();
 
 	if (!IS_ROOT(d) && d_unhashed(d))
 		len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */
@@ -104,7 +109,9 @@
 		len += d->d_name.len + 1; /* Plus slash */
 		d = d->d_parent;
 	}
-	spin_unlock(&dcache_lock);
+	rcu_read_unlock();
+	if (read_seqretry(&rename_lock, seq))
+		goto rename_retry;
 
 	dput(root);
 	dput(first);
diff --git a/drivers/staging/rt2860/chip/mac_pci.h b/drivers/staging/rt2860/chip/mac_pci.h
index 9f25ef0..b8868a5 100644
--- a/drivers/staging/rt2860/chip/mac_pci.h
+++ b/drivers/staging/rt2860/chip/mac_pci.h
@@ -30,7 +30,8 @@
     Abstract:
 
     Revision History:
-    Who          When          What
+    Who          	When            What
+    Justin P. Mattock	11/07/2010	Fix some typos
     ---------    ----------    ----------------------------------------------
  */
 
@@ -45,7 +46,7 @@
 
 /* */
 /* Device ID & Vendor ID related definitions, */
-/* NOTE: you should not add the new VendorID/DeviceID here unless you not sure it belongs to what chip. */
+/* NOTE: you should not add the new VendorID/DeviceID here unless you know for sure what chip it belongs too. */
 /* */
 #define NIC_PCI_VENDOR_ID		0x1814
 #define PCIBUS_INTEL_VENDOR	0x8086
@@ -83,7 +84,7 @@
 	u32 SDPtr1;
 	/*Word3 */
 	u32 rsv2:24;
-	u32 WIV:1;		/* Wireless Info Valid. 1 if Driver already fill WI,  o if DMA needs to copy WI to correctposition */
+	u32 WIV:1;		/* Wireless Info Valid. 1 if Driver already fill WI,  o if DMA needs to copy WI to correct position */
 	u32 QSEL:2;		/* select on-chip FIFO ID for 2nd-stage output scheduler.0:MGMT, 1:HCCA 2:EDCA */
 	u32 rsv:2;
 	u32 TCO:1;		/* */
diff --git a/drivers/staging/rt2860/chip/mac_usb.h b/drivers/staging/rt2860/chip/mac_usb.h
index ed0c0b4..e8158fb 100644
--- a/drivers/staging/rt2860/chip/mac_usb.h
+++ b/drivers/staging/rt2860/chip/mac_usb.h
@@ -30,7 +30,8 @@
     Abstract:
 
     Revision History:
-    Who          When          What
+    Who          	When            What
+    Justin P. Mattock	11/07/2010	Fix a typo
     ---------    ----------    ----------------------------------------------
  */
 
@@ -93,7 +94,7 @@
 	/* Word 0 */
 	u32 USBDMATxPktLen:16;	/*used ONLY in USB bulk Aggregation,  Total byte counts of all sub-frame. */
 	u32 rsv:8;
-	u32 WIV:1;		/* Wireless Info Valid. 1 if Driver already fill WI,  o if DMA needs to copy WI to correctposition */
+	u32 WIV:1;		/* Wireless Info Valid. 1 if Driver already fill WI,  o if DMA needs to copy WI to correct position */
 	u32 QSEL:2;		/* select on-chip FIFO ID for 2nd-stage output scheduler.0:MGMT, 1:HCCA 2:EDCA */
 	u32 SwUseLastRound:1;	/* Software use. */
 	u32 rsv2:2;		/* Software use. */
diff --git a/drivers/staging/rt2860/chip/rtmp_mac.h b/drivers/staging/rt2860/chip/rtmp_mac.h
index e8f7172..3d1e491 100644
--- a/drivers/staging/rt2860/chip/rtmp_mac.h
+++ b/drivers/staging/rt2860/chip/rtmp_mac.h
@@ -32,6 +32,7 @@
 
 	Revision History:
 	Who			When		  What
+	Justin P. Mattock	11/07/2010	  Fix a comments, and typos
 	--------	----------	  ----------------------------------------------
 */
 
@@ -43,7 +44,7 @@
 /* ================================================================================= */
 
 /* the first 24-byte in TXD is called TXINFO and will be DMAed to MAC block through TXFIFO. */
-/* MAC block use this TXINFO to control the transmission behavior of this frame. */
+/* MAC block uses this TXINFO to control the transmission behavior of this frame. */
 #define FIFO_MGMT                 0
 #define FIFO_HCCA                 1
 #define FIFO_EDCA                 2
@@ -458,8 +459,8 @@
 /* */
 typedef union _RF_CSR_CFG0_STRUC {
 	struct {
-		u32 RegIdAndContent:24;	/* Register     value to program into BBP */
-		u32 bitwidth:5;	/* Selected     BBP     register */
+		u32 RegIdAndContent:24;	/* Register value to program into BBP */
+		u32 bitwidth:5;	/* Selected BBP register */
 		u32 StandbyMode:1;	/* 0: high when stand by 1: low when standby */
 		u32 Sel:1;	/* 0:RF_LE0 activate  1:RF_LE1 activate */
 		u32 Busy:1;	/* 0: idle 1: 8busy */
@@ -469,7 +470,7 @@
 #define RF_CSR_CFG1			0x1024
 typedef union _RF_CSR_CFG1_STRUC {
 	struct {
-		u32 RegIdAndContent:24;	/* Register     value to program into BBP */
+		u32 RegIdAndContent:24;	/* Register value to program into BBP */
 		u32 RFGap:5;	/* Gap between BB_CONTROL_RF and RF_LE. 0: 3 system clock cycle (37.5usec) 1: 5 system clock cycle (62.5usec) */
 		u32 rsv:7;	/* 0: idle 1: 8busy */
 	} field;
@@ -478,7 +479,7 @@
 #define RF_CSR_CFG2			0x1028	/* */
 typedef union _RF_CSR_CFG2_STRUC {
 	struct {
-		u32 RegIdAndContent:24;	/* Register     value to program into BBP */
+		u32 RegIdAndContent:24;	/* Register value to program into BBP */
 		u32 rsv:8;	/* 0: idle 1: 8busy */
 	} field;
 	u32 word;
@@ -490,7 +491,7 @@
 		u32 OffPeriod:8;	/* blinking off period unit 1ms */
 		u32 SlowBlinkPeriod:6;	/* slow blinking period. unit:1ms */
 		u32 rsv:2;
-		u32 RLedMode:2;	/* red Led Mode    0: off1: blinking upon TX2: periodic slow blinking3: always on */
+		u32 RLedMode:2;	/* red Led Mode 0: off1: blinking upon TX2: periodic slow blinking3: always on */
 		u32 GLedMode:2;	/* green Led Mode */
 		u32 YLedMode:2;	/* yellow Led Mode */
 		u32 LedPolar:1;	/* Led Polarity.  0: active low1: active high */
@@ -621,9 +622,9 @@
 #define TX_RTY_CFG	0x134c
 typedef union PACKED _TX_RTY_CFG_STRUC {
 	struct {
-		u32 ShortRtyLimit:8;	/*  short retry limit */
-		u32 LongRtyLimit:8;	/*long retry limit */
-		u32 LongRtyThre:12;	/* Long retry threshoold */
+		u32 ShortRtyLimit:8;	/* short retry limit */
+		u32 LongRtyLimit:8;	/* long retry limit */
+		u32 LongRtyThre:12;	/* Long retry threshold */
 		u32 NonAggRtyMode:1;	/* Non-Aggregate MPDU retry mode.  0:expired by retry limit, 1: expired by mpdu life timer */
 		u32 AggRtyMode:1;	/* Aggregate MPDU retry mode.  0:expired by retry limit, 1: expired by mpdu life timer */
 		u32 TxautoFBEnable:1;	/* Tx retry PHY rate auto fallback enable */
diff --git a/drivers/staging/rt2860/chip/rtmp_phy.h b/drivers/staging/rt2860/chip/rtmp_phy.h
index 9f924ea..98454df 100644
--- a/drivers/staging/rt2860/chip/rtmp_phy.h
+++ b/drivers/staging/rt2860/chip/rtmp_phy.h
@@ -247,7 +247,7 @@
 			}						\
 		}							\
 		if (BbpCsr.field.Busy == BUSY) {			\
-			DBGPRINT_ERR(("BBP(viaMCU=%d) read R%d fail\n", (_bViaMCU), _bbpID));	\
+			DBGPRINT_ERR("BBP(viaMCU=%d) read R%d fail\n", (_bViaMCU), _bbpID);	\
 			*(_pV) = (_pAd)->BbpWriteLatch[_bbpID];               \
 			if ((_bViaMCU) == TRUE) {			\
 				RTMP_IO_READ32(_pAd, _regID, &BbpCsr.word);				\
@@ -336,11 +336,11 @@
 			}						\
 		}							\
 	} else {							\
-		DBGPRINT_ERR((" , brt30xxBanMcuCmd = %d, Read BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I)));	\
+		DBGPRINT_ERR(" , brt30xxBanMcuCmd = %d, Read BBP %d \n", (_A)->brt30xxBanMcuCmd, (_I));	\
 		*(_pV) = (_A)->BbpWriteLatch[_I];			\
 	}								\
 	if ((BbpCsr.field.Busy == BUSY) || ((_A)->bPCIclkOff == TRUE)) { \
-		DBGPRINT_ERR(("BBP read R%d=0x%x fail\n", _I, BbpCsr.word)); \
+		DBGPRINT_ERR("BBP read R%d=0x%x fail\n", _I, BbpCsr.word); \
 		*(_pV) = (_A)->BbpWriteLatch[_I];			\
 	}								\
 }
@@ -378,7 +378,7 @@
 			break;						\
 		}							\
 		if (_busyCnt == MAX_BUSY_COUNT) {			\
-			DBGPRINT_ERR(("BBP write R%d fail\n", _bbpID));	\
+			DBGPRINT_ERR("BBP write R%d fail\n", _bbpID);	\
 			if ((_bViaMCU) == TRUE) {			\
 				RTMP_IO_READ32(_pAd, H2M_BBP_AGENT, &BbpCsr.word);	\
 				BbpCsr.field.Busy = 0;			\
@@ -459,15 +459,15 @@
 				break;															\
 			}																	\
 		} else {						\
-			DBGPRINT_ERR(("  brt30xxBanMcuCmd = %d. Write BBP %d \n",  (_A)->brt30xxBanMcuCmd, (_I)));	\
+			DBGPRINT_ERR("  brt30xxBanMcuCmd = %d. Write BBP %d \n",  (_A)->brt30xxBanMcuCmd, (_I));	\
 		}																	\
 		if ((BusyCnt == MAX_BUSY_COUNT) || ((_A)->bPCIclkOff == TRUE)) { \
 			if (BusyCnt == MAX_BUSY_COUNT)				\
 				(_A)->AccessBBPFailCount++;					\
-			DBGPRINT_ERR(("BBP write R%d=0x%x fail. BusyCnt= %d.bPCIclkOff = %d. \n", _I, BbpCsr.word, BusyCnt, (_A)->bPCIclkOff));	\
+			DBGPRINT_ERR("BBP write R%d=0x%x fail. BusyCnt= %d.bPCIclkOff = %d. \n", _I, BbpCsr.word, BusyCnt, (_A)->bPCIclkOff);	\
 		}																	\
 	} else {							\
-		DBGPRINT_ERR(("****** BBP_Write_Latch Buffer exceeds max boundry ****** \n"));	\
+		DBGPRINT_ERR("****** BBP_Write_Latch Buffer exceeds max boundry ****** \n");	\
 	}																		\
 }
 #endif /* RTMP_MAC_PCI // */
diff --git a/drivers/staging/rt2860/chips/rt3090.c b/drivers/staging/rt2860/chips/rt3090.c
index c2933c6..334720e 100644
--- a/drivers/staging/rt2860/chips/rt3090.c
+++ b/drivers/staging/rt2860/chips/rt3090.c
@@ -28,10 +28,11 @@
 	rt3090.c
 
 	Abstract:
-	Specific funcitons and variables for RT3070
+	Specific functions and variables for RT3070
 
 	Revision History:
-	Who         When          What
+	Who         		When            What
+	Justin P. Mattock	11/07/2010	Fix a typo
 	--------    ----------    ----------------------------------------------
 */
 
@@ -51,7 +52,8 @@
 	if (IS_RT3090(pAd)) {
 		/* Init RF calibration */
 		/* Driver should toggle RF R30 bit7 before init RF registers */
-		u32 RfReg = 0, data;
+		u8 RfReg;
+		u32 data;
 
 		RT30xxReadRFRegister(pAd, RF_R30, (u8 *)&RfReg);
 		RfReg |= 0x80;
diff --git a/drivers/staging/rt2860/chips/rt30xx.c b/drivers/staging/rt2860/chips/rt30xx.c
index 4367a19..354debf 100644
--- a/drivers/staging/rt2860/chips/rt30xx.c
+++ b/drivers/staging/rt2860/chips/rt30xx.c
@@ -28,10 +28,11 @@
 	rt30xx.c
 
 	Abstract:
-	Specific funcitons and variables for RT30xx.
+	Specific functions and variables for RT30xx.
 
 	Revision History:
-	Who         When          What
+	Who         		When            What
+	Justin P. Mattock	11/07/2010	Fix some typos
 	--------    ----------    ----------------------------------------------
 */
 
@@ -53,7 +54,7 @@
 	,
 	{RF_R06, 0x02}
 	,
-	{RF_R07, 0x70}
+	{RF_R07, 0x60}
 	,
 	{RF_R09, 0x0F}
 	,
@@ -89,7 +90,7 @@
 
 u8 NUM_RF_REG_PARMS = (sizeof(RT30xx_RFRegTable) / sizeof(struct rt_reg_pair));
 
-/* Antenna divesity use GPIO3 and EESK pin for control */
+/* Antenna diversity use GPIO3 and EESK pin for control */
 /* Antenna and EEPROM access are both using EESK pin, */
 /* Therefor we should avoid accessing EESK at the same time */
 /* Then restore antenna after EEPROM access */
@@ -243,7 +244,7 @@
 				break;
 			}
 
-			/* prevent infinite loop cause driver hang. */
+			/* prevent infinite loop; causes driver hang. */
 			if (loopcnt++ > 100) {
 				DBGPRINT(RT_DEBUG_ERROR,
 					 ("RTMPFilterCalibration - can't find a valid value, loopcnt=%d stop calibrating",
@@ -441,7 +442,7 @@
 
 		/* VCO_IC, RF R7 register Bit 4 & Bit 5 to 1 */
 		RT30xxReadRFRegister(pAd, RF_R07, &RFValue);
-		RFValue |= 0x30;
+		RFValue |= 0x20;
 		RT30xxWriteRFRegister(pAd, RF_R07, RFValue);
 
 		/* Idoh, RF R9 register Bit 1, Bit 2 & Bit 3 to 1 */
diff --git a/drivers/staging/rt2860/common/ba_action.c b/drivers/staging/rt2860/common/ba_action.c
index 8eef82d..b046c2b 100644
--- a/drivers/staging/rt2860/common/ba_action.c
+++ b/drivers/staging/rt2860/common/ba_action.c
@@ -799,8 +799,8 @@
 			/* force send specified TID DelBA */
 			struct rt_mlme_delba_req DelbaReq;
 			struct rt_mlme_queue_elem *Elem =
-			    (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
-							MEM_ALLOC_FLAG);
+				kmalloc(sizeof(struct rt_mlme_queue_elem),
+					MEM_ALLOC_FLAG);
 			if (Elem != NULL) {
 				NdisZeroMemory(&DelbaReq, sizeof(DelbaReq));
 				NdisZeroMemory(Elem, sizeof(struct rt_mlme_queue_elem));
@@ -839,8 +839,8 @@
 	    && (pBAEntry->ORI_BA_Status == Originator_Done)) {
 		struct rt_mlme_delba_req DelbaReq;
 		struct rt_mlme_queue_elem *Elem =
-		    (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
-						MEM_ALLOC_FLAG);
+			kmalloc(sizeof(struct rt_mlme_queue_elem),
+				MEM_ALLOC_FLAG);
 		if (Elem != NULL) {
 			NdisZeroMemory(&DelbaReq, sizeof(DelbaReq));
 			NdisZeroMemory(Elem, sizeof(struct rt_mlme_queue_elem));
@@ -908,8 +908,8 @@
 		/* */
 		if (bPassive == FALSE) {
 			struct rt_mlme_queue_elem *Elem =
-			    (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
-							MEM_ALLOC_FLAG);
+				kmalloc(sizeof(struct rt_mlme_queue_elem),
+					MEM_ALLOC_FLAG);
 			if (Elem != NULL) {
 				NdisZeroMemory(&DelbaReq, sizeof(DelbaReq));
 				NdisZeroMemory(Elem, sizeof(struct rt_mlme_queue_elem));
@@ -1270,13 +1270,13 @@
 
 	/* First check the size, it MUST not exceed the mlme queue size */
 	if (MsgLen > MGMT_DMA_BUFFER_SIZE) {
-		DBGPRINT_ERR(("CntlEnqueueForRecv: frame too large, size = %ld \n", MsgLen));
+		DBGPRINT_ERR("CntlEnqueueForRecv: frame too large, size = %ld \n", MsgLen);
 		return FALSE;
 	} else if (MsgLen != sizeof(struct rt_frame_ba_req)) {
-		DBGPRINT_ERR(("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen));
+		DBGPRINT_ERR("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen);
 		return FALSE;
 	} else if (MsgLen != sizeof(struct rt_frame_ba_req)) {
-		DBGPRINT_ERR(("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen));
+		DBGPRINT_ERR("CntlEnqueueForRecv: BlockAck Request frame length size = %ld incorrect\n", MsgLen);
 		return FALSE;
 	}
 
diff --git a/drivers/staging/rt2860/common/cmm_data.c b/drivers/staging/rt2860/common/cmm_data.c
index 93a5347..2204c2b 100644
--- a/drivers/staging/rt2860/common/cmm_data.c
+++ b/drivers/staging/rt2860/common/cmm_data.c
@@ -1366,7 +1366,7 @@
 	/* R66 should not be 0 */
 	if (pAd->BbpTuning.R66CurrentValue == 0) {
 		pAd->BbpTuning.R66CurrentValue = 0x38;
-		DBGPRINT_ERR(("RTMPResumeMsduTransmission, R66CurrentValue=0...\n"));
+		DBGPRINT_ERR("RTMPResumeMsduTransmission, R66CurrentValue=0...\n");
 	}
 
 	RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66,
diff --git a/drivers/staging/rt2860/common/cmm_data_pci.c b/drivers/staging/rt2860/common/cmm_data_pci.c
index 43d73a0..7af59ff 100644
--- a/drivers/staging/rt2860/common/cmm_data_pci.c
+++ b/drivers/staging/rt2860/common/cmm_data_pci.c
@@ -137,7 +137,7 @@
 
 	pTxD->SDPtr0 = BufBasePaLow;
 	pTxD->SDLen0 = TXINFO_SIZE + TXWI_SIZE + hwHeaderLen;	/* include padding */
-	pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);;
+	pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);
 	pTxD->SDLen1 = pTxBlk->SrcBufLen;
 	pTxD->LastSec0 = 0;
 	pTxD->LastSec1 = (bIsLast) ? 1 : 0;
@@ -215,7 +215,7 @@
 
 	pTxD->SDPtr0 = BufBasePaLow;
 	pTxD->SDLen0 = firstDMALen;	/* include padding */
-	pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);;
+	pTxD->SDPtr1 = PCI_MAP_SINGLE(pAd, pTxBlk, 0, 1, PCI_DMA_TODEVICE);
 	pTxD->SDLen1 = pTxBlk->SrcBufLen;
 	pTxD->LastSec0 = 0;
 	pTxD->LastSec1 = (bIsLast) ? 1 : 0;
diff --git a/drivers/staging/rt2860/common/cmm_mac_pci.c b/drivers/staging/rt2860/common/cmm_mac_pci.c
index e26ba49..850f0fb 100644
--- a/drivers/staging/rt2860/common/cmm_mac_pci.c
+++ b/drivers/staging/rt2860/common/cmm_mac_pci.c
@@ -89,7 +89,7 @@
 
 			if (pAd->TxDescRing[num].AllocVa == NULL) {
 				ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
-				DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+				DBGPRINT_ERR("Failed to allocate a big buffer\n");
 				Status = NDIS_STATUS_RESOURCES;
 				break;
 			}
@@ -121,7 +121,7 @@
 
 			if (pAd->TxBufSpace[num].AllocVa == NULL) {
 				ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
-				DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+				DBGPRINT_ERR("Failed to allocate a big buffer\n");
 				Status = NDIS_STATUS_RESOURCES;
 				break;
 			}
@@ -197,7 +197,7 @@
 
 		if (pAd->MgmtDescRing.AllocVa == NULL) {
 			ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
-			DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+			DBGPRINT_ERR("Failed to allocate a big buffer\n");
 			Status = NDIS_STATUS_RESOURCES;
 			break;
 		}
@@ -251,7 +251,7 @@
 
 		if (pAd->RxDescRing.AllocVa == NULL) {
 			ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
-			DBGPRINT_ERR(("Failed to allocate a big buffer\n"));
+			DBGPRINT_ERR("Failed to allocate a big buffer\n");
 			Status = NDIS_STATUS_RESOURCES;
 			break;
 		}
@@ -304,7 +304,7 @@
 			/* Error handling */
 			if (pDmaBuf->AllocVa == NULL) {
 				ErrorValue = ERRLOG_OUT_OF_SHARED_MEMORY;
-				DBGPRINT_ERR(("Failed to allocate RxRing's 1st buffer\n"));
+				DBGPRINT_ERR("Failed to allocate RxRing's 1st buffer\n");
 				Status = NDIS_STATUS_RESOURCES;
 				break;
 			}
diff --git a/drivers/staging/rt2860/common/cmm_mac_usb.c b/drivers/staging/rt2860/common/cmm_mac_usb.c
index 72731cb..64a65a4 100644
--- a/drivers/staging/rt2860/common/cmm_mac_usb.c
+++ b/drivers/staging/rt2860/common/cmm_mac_usb.c
@@ -236,7 +236,7 @@
 		os_alloc_mem(pAd, (u8 **) (&pAd->MgmtDescRing.AllocVa),
 			     pAd->MgmtDescRing.AllocSize);
 		if (pAd->MgmtDescRing.AllocVa == NULL) {
-			DBGPRINT_ERR(("Failed to allocate a big buffer for MgmtDescRing!\n"));
+			DBGPRINT_ERR("Failed to allocate a big buffer for MgmtDescRing!\n");
 			Status = NDIS_STATUS_RESOURCES;
 			goto out1;
 		}
diff --git a/drivers/staging/rt2860/common/cmm_wpa.c b/drivers/staging/rt2860/common/cmm_wpa.c
index e37b64b..0040f45 100644
--- a/drivers/staging/rt2860/common/cmm_wpa.c
+++ b/drivers/staging/rt2860/common/cmm_wpa.c
@@ -2794,7 +2794,7 @@
 
 	/* Check length */
 	if ((len <= 0) || (pEid->Len != len)) {
-		DBGPRINT_ERR(("%s : The length is invalid\n", __func__));
+		DBGPRINT_ERR("%s : The length is invalid\n", __func__);
 		return NULL;
 	}
 	/* Check WPA or WPA2 */
@@ -2803,14 +2803,13 @@
 		u16 ucount;
 
 		if (len < sizeof(struct rt_rsnie)) {
-			DBGPRINT_ERR(("%s : The length is too short for WPA\n",
-				      __func__));
+			DBGPRINT_ERR("%s : The length is too short for WPA\n", __func__);
 			return NULL;
 		}
 		/* Get the count of pairwise cipher */
 		ucount = cpu2le16(pRsnie->ucount);
 		if (ucount > 2) {
-			DBGPRINT_ERR(("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount));
+			DBGPRINT_ERR("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount);
 			return NULL;
 		}
 		/* Get the group cipher */
@@ -2836,14 +2835,13 @@
 		isWPA2 = TRUE;
 
 		if (len < sizeof(struct rt_rsnie2)) {
-			DBGPRINT_ERR(("%s : The length is too short for WPA2\n",
-				      __func__));
+			DBGPRINT_ERR("%s : The length is too short for WPA2\n", __func__);
 			return NULL;
 		}
 		/* Get the count of pairwise cipher */
 		ucount = cpu2le16(pRsnie->ucount);
 		if (ucount > 2) {
-			DBGPRINT_ERR(("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount));
+			DBGPRINT_ERR("%s : The count(%d) of pairwise cipher is invlaid\n", __func__, ucount);
 			return NULL;
 		}
 		/* Get the group cipher */
@@ -2863,7 +2861,7 @@
 		offset = sizeof(struct rt_rsnie2) + (4 * (ucount - 1));
 
 	} else {
-		DBGPRINT_ERR(("%s : Unknown IE (%d)\n", __func__, pEid->Eid));
+		DBGPRINT_ERR("%s : Unknown IE (%d)\n", __func__, pEid->Eid);
 		return NULL;
 	}
 
@@ -2872,8 +2870,7 @@
 	len -= offset;
 
 	if (len < sizeof(struct rt_rsnie_auth)) {
-		DBGPRINT_ERR(("%s : The length of RSNIE is too short\n",
-			      __func__));
+		DBGPRINT_ERR("%s : The length of RSNIE is too short\n", __func__);
 		return NULL;
 	}
 	/* pointer to AKM count */
@@ -2882,8 +2879,7 @@
 	/* Get the count of pairwise cipher */
 	acount = cpu2le16(pAkm->acount);
 	if (acount > 2) {
-		DBGPRINT_ERR(("%s : The count(%d) of AKM is invlaid\n",
-			      __func__, acount));
+		DBGPRINT_ERR("%s : The count(%d) of AKM is invlaid\n", __func__, acount);
 		return NULL;
 	}
 	/* Get the AKM suite */
@@ -2910,7 +2906,7 @@
 			return pBuf;
 		}
 	} else {
-		DBGPRINT_ERR(("%s : it can't get any more information beyond AKM \n", __func__));
+		DBGPRINT_ERR("%s : it can't get any more information beyond AKM \n", __func__);
 		return NULL;
 	}
 
diff --git a/drivers/staging/rt2860/common/ee_efuse.c b/drivers/staging/rt2860/common/ee_efuse.c
index 03412f5..fed0ba4 100644
--- a/drivers/staging/rt2860/common/ee_efuse.c
+++ b/drivers/staging/rt2860/common/ee_efuse.c
@@ -264,7 +264,7 @@
 		if (i == EFUSE_USAGE_MAP_END)
 			efusefreenum = 0;
 	}
-	printk("efuseFreeNumber is %d\n", efusefreenum);
+	printk(KERN_DEBUG "efuseFreeNumber is %d\n", efusefreenum);
 	return TRUE;
 }
 
@@ -274,16 +274,23 @@
 	int i = 0;
 	if (!pAd->bUseEfuse)
 		return FALSE;
+
+	printk(KERN_DEBUG "Block 0: ");
+
 	for (i = 0; i < EFUSE_USAGE_MAP_END / 2; i++) {
 		InBuf[0] = 2 * i;
 		InBuf[1] = 2;
 		InBuf[2] = 0x0;
 
 		eFuseReadPhysical(pAd, &InBuf[0], 4, &InBuf[2], 2);
-		if (i % 4 == 0)
-			printk("\nBlock %x:", i / 8);
-		printk("%04x ", InBuf[2]);
+		if (i && i % 4 == 0) {
+			printk(KERN_CONT "\n");
+			printk(KERN_DEBUG "Block %x:", i / 8);
+		}
+		printk(KERN_CONT "%04x ", InBuf[2]);
 	}
+	printk(KERN_CONT "\n");
+
 	return TRUE;
 }
 
diff --git a/drivers/staging/rt2860/common/mlme.c b/drivers/staging/rt2860/common/mlme.c
index 7300c6e..d9c3fd5 100644
--- a/drivers/staging/rt2860/common/mlme.c
+++ b/drivers/staging/rt2860/common/mlme.c
@@ -550,7 +550,7 @@
 			Elem->MsgLen = 0;
 
 		} else {
-			DBGPRINT_ERR(("MlmeHandler: MlmeQueue empty\n"));
+			DBGPRINT_ERR("MlmeHandler: MlmeQueue empty\n");
 		}
 	}
 
@@ -4698,8 +4698,7 @@
 
 	/* First check the size, it MUST not exceed the mlme queue size */
 	if (MsgLen > MGMT_DMA_BUFFER_SIZE) {
-		DBGPRINT_ERR(("MlmeEnqueue: msg too large, size = %ld \n",
-			      MsgLen));
+		DBGPRINT_ERR("MlmeEnqueue: msg too large, size = %ld \n", MsgLen);
 		return FALSE;
 	}
 
@@ -4762,12 +4761,12 @@
 	if (RTMP_TEST_FLAG
 	    (pAd,
 	     fRTMP_ADAPTER_HALT_IN_PROGRESS | fRTMP_ADAPTER_NIC_NOT_EXIST)) {
-		DBGPRINT_ERR(("MlmeEnqueueForRecv: fRTMP_ADAPTER_HALT_IN_PROGRESS\n"));
+		DBGPRINT_ERR("MlmeEnqueueForRecv: fRTMP_ADAPTER_HALT_IN_PROGRESS\n");
 		return FALSE;
 	}
 	/* First check the size, it MUST not exceed the mlme queue size */
 	if (MsgLen > MGMT_DMA_BUFFER_SIZE) {
-		DBGPRINT_ERR(("MlmeEnqueueForRecv: frame too large, size = %ld \n", MsgLen));
+		DBGPRINT_ERR("MlmeEnqueueForRecv: frame too large, size = %ld \n", MsgLen);
 		return FALSE;
 	}
 
@@ -4777,7 +4776,7 @@
 
 	{
 		if (!MsgTypeSubst(pAd, pFrame, &Machine, &MsgType)) {
-			DBGPRINT_ERR(("MlmeEnqueueForRecv: un-recongnized mgmt->subtype=%d\n", pFrame->Hdr.FC.SubType));
+			DBGPRINT_ERR("MlmeEnqueueForRecv: un-recongnized mgmt->subtype=%d\n", pFrame->Hdr.FC.SubType);
 			return FALSE;
 		}
 	}
@@ -4867,7 +4866,7 @@
 			Elem->MsgLen = 0;
 
 		} else {
-			DBGPRINT_ERR(("MlmeRestartStateMachine: MlmeQueue empty\n"));
+			DBGPRINT_ERR("MlmeRestartStateMachine: MlmeQueue empty\n");
 		}
 	}
 #endif /* RTMP_MAC_PCI // */
diff --git a/drivers/staging/rt2860/common/rt_rf.c b/drivers/staging/rt2860/common/rt_rf.c
index 519121d..2895447 100644
--- a/drivers/staging/rt2860/common/rt_rf.c
+++ b/drivers/staging/rt2860/common/rt_rf.c
@@ -131,8 +131,7 @@
 		}
 	}
 	if (rfcsr.field.RF_CSR_KICK == BUSY) {
-		DBGPRINT_ERR(("RF read R%d=0x%x fail, i[%d], k[%d]\n", regID,
-			      rfcsr.word, i, k));
+		DBGPRINT_ERR("RF read R%d=0x%x fail, i[%d], k[%d]\n", regID, rfcsr.word, i, k);
 		return STATUS_UNSUCCESSFUL;
 	}
 
diff --git a/drivers/staging/rt2860/common/rtmp_init.c b/drivers/staging/rt2860/common/rtmp_init.c
index 3628e85..d359a14 100644
--- a/drivers/staging/rt2860/common/rtmp_init.c
+++ b/drivers/staging/rt2860/common/rtmp_init.c
@@ -169,14 +169,14 @@
 		pBeaconBuf = kmalloc(MAX_BEACON_SIZE, MEM_ALLOC_FLAG);
 		if (pBeaconBuf == NULL) {
 			Status = NDIS_STATUS_FAILURE;
-			DBGPRINT_ERR(("Failed to allocate memory - BeaconBuf!\n"));
+			DBGPRINT_ERR("Failed to allocate memory - BeaconBuf!\n");
 			break;
 		}
 		NdisZeroMemory(pBeaconBuf, MAX_BEACON_SIZE);
 
 		Status = AdapterBlockAllocateMemory(handle, (void **) & pAd);
 		if (Status != NDIS_STATUS_SUCCESS) {
-			DBGPRINT_ERR(("Failed to allocate memory - ADAPTER\n"));
+			DBGPRINT_ERR("Failed to allocate memory - ADAPTER\n");
 			break;
 		}
 		pAd->BeaconBuf = pBeaconBuf;
@@ -785,8 +785,7 @@
 		  Version.field.Version, Version.field.FaeReleaseNumber));
 
 	if (Version.field.Version > VALID_EEPROM_VERSION) {
-		DBGPRINT_ERR(("E2PROM: WRONG VERSION 0x%x, should be %d\n",
-			      Version.field.Version, VALID_EEPROM_VERSION));
+		DBGPRINT_ERR("E2PROM: WRONG VERSION 0x%x, should be %d\n", Version.field.Version, VALID_EEPROM_VERSION);
 		/*pAd->SystemErrorBitmap |= 0x00000001;
 
 		   // hard-code default value when no proper E2PROM installed
@@ -2911,7 +2910,7 @@
 			RTMP_OS_Add_Timer(&pTimer->TimerObj, Value);
 		}
 	} else {
-		DBGPRINT_ERR(("RTMPSetTimer failed, Timer hasn't been initialize!\n"));
+		DBGPRINT_ERR("RTMPSetTimer failed, Timer hasn't been initialize!\n");
 	}
 }
 
@@ -2947,7 +2946,7 @@
 			RTMP_OS_Mod_Timer(&pTimer->TimerObj, Value);
 		}
 	} else {
-		DBGPRINT_ERR(("RTMPModTimer failed, Timer hasn't been initialize!\n"));
+		DBGPRINT_ERR("RTMPModTimer failed, Timer hasn't been initialize!\n");
 	}
 }
 
@@ -2989,7 +2988,7 @@
 		RtmpTimerQRemove(pTimer->pAd, pTimer);
 #endif /* RTMP_TIMER_TASK_SUPPORT // */
 	} else {
-		DBGPRINT_ERR(("RTMPCancelTimer failed, Timer hasn't been initialize!\n"));
+		DBGPRINT_ERR("RTMPCancelTimer failed, Timer hasn't been initialize!\n");
 	}
 }
 
@@ -3251,8 +3250,7 @@
 	/* Load 8051 firmware */
 	Status = NICLoadFirmware(pAd);
 	if (Status != NDIS_STATUS_SUCCESS) {
-		DBGPRINT_ERR(("NICLoadFirmware failed, Status[=0x%08x]\n",
-			      Status));
+		DBGPRINT_ERR("NICLoadFirmware failed, Status[=0x%08x]\n", Status);
 		goto err1;
 	}
 
@@ -3268,8 +3266,7 @@
 
 	Status = RTMPAllocTxRxRingMemory(pAd);
 	if (Status != NDIS_STATUS_SUCCESS) {
-		DBGPRINT_ERR(("RTMPAllocDMAMemory failed, Status[=0x%08x]\n",
-			      Status));
+		DBGPRINT_ERR("RTMPAllocDMAMemory failed, Status[=0x%08x]\n", Status);
 		goto err1;
 	}
 
@@ -3284,7 +3281,7 @@
 
 	Status = MlmeInit(pAd);
 	if (Status != NDIS_STATUS_SUCCESS) {
-		DBGPRINT_ERR(("MlmeInit failed, Status[=0x%08x]\n", Status));
+		DBGPRINT_ERR("MlmeInit failed, Status[=0x%08x]\n", Status);
 		goto err2;
 	}
 	/* Initialize pAd->StaCfg, pAd->ApCfg, pAd->CommonCfg to manufacture default */
@@ -3309,8 +3306,7 @@
 	/* */
 	Status = NICInitializeAdapter(pAd, TRUE);
 	if (Status != NDIS_STATUS_SUCCESS) {
-		DBGPRINT_ERR(("NICInitializeAdapter failed, Status[=0x%08x]\n",
-			      Status));
+		DBGPRINT_ERR("NICInitializeAdapter failed, Status[=0x%08x]\n", Status);
 		if (Status != NDIS_STATUS_SUCCESS)
 			goto err3;
 	}
diff --git a/drivers/staging/rt2860/common/rtmp_mcu.c b/drivers/staging/rt2860/common/rtmp_mcu.c
index 844d4b9..80fa416 100644
--- a/drivers/staging/rt2860/common/rtmp_mcu.c
+++ b/drivers/staging/rt2860/common/rtmp_mcu.c
@@ -267,7 +267,7 @@
 		} while (i++ < 100);
 
 		if (i > 100) {
-			DBGPRINT_ERR(("H2M_MAILBOX still hold by MCU. command fail\n"));
+			DBGPRINT_ERR("H2M_MAILBOX still hold by MCU. command fail\n");
 			return FALSE;
 		}
 
@@ -296,7 +296,7 @@
 #ifdef RTMP_MAC_PCI
 #endif /* RTMP_MAC_PCI // */
 			{
-				DBGPRINT_ERR(("H2M_MAILBOX still hold by MCU. command fail\n"));
+				DBGPRINT_ERR("H2M_MAILBOX still hold by MCU. command fail\n");
 			}
 			return FALSE;
 		}
diff --git a/drivers/staging/rt2860/common/spectrum.c b/drivers/staging/rt2860/common/spectrum.c
index 2d5f847..1dfb802 100644
--- a/drivers/staging/rt2860/common/spectrum.c
+++ b/drivers/staging/rt2860/common/spectrum.c
@@ -1837,7 +1837,7 @@
 			}
 
 			if (index >= pAd->ChannelListNum) {
-				DBGPRINT_ERR(("&&&&&&&&&&&&&&&&&&&&&&&&&&PeerChSwAnnAction(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum));
+				DBGPRINT_ERR("&&&&&&&&&&&&&&&&&&&&&&&&&&PeerChSwAnnAction(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum);
 			}
 		}
 	}
diff --git a/drivers/staging/rt2860/mlme.h b/drivers/staging/rt2860/mlme.h
index 01414c3..cd1ee3d 100644
--- a/drivers/staging/rt2860/mlme.h
+++ b/drivers/staging/rt2860/mlme.h
@@ -32,8 +32,9 @@
 	Revision History:
 	Who			When			What
 	--------	----------		------------------------------
-	John Chang	2003-08-28		Created
-	John Chang  2004-09-06      modified for RT2600
+	John Chang		2003-08-28		Created
+	John Chang  		2004-09-06      	modified for RT2600
+	Justin P. Mattock	11/07/2010		Fix typos in comments
 
 */
 #ifndef __MLME_H__
@@ -41,7 +42,7 @@
 
 #include "rtmp_dot11.h"
 
-/* maximum supported capability information - */
+/* maximum supported capability information */
 /* ESS, IBSS, Privacy, Short Preamble, Spectrum mgmt, Short Slot */
 #define SUPPORTED_CAPABILITY_INFO   0x0533
 
@@ -77,7 +78,7 @@
 #define CW_MAX_IN_BITS              10	/* actual CwMax = 2^CW_MAX_IN_BITS - 1 */
 
 /* Note: RSSI_TO_DBM_OFFSET has been changed to variable for new RF (2004-0720). */
-/* SHould not refer to this constant anymore */
+/* Should not refer to this constant anymore */
 /*#define RSSI_TO_DBM_OFFSET          120 // for RT2530 RSSI-115 = dBm */
 #define RSSI_FOR_MID_TX_POWER       -55	/* -55 db is considered mid-distance */
 #define RSSI_FOR_LOW_TX_POWER       -45	/* -45 db is considered very short distance and */
@@ -123,7 +124,7 @@
 #define TID_MAC_HASH_INDEX(Addr, TID)      (TID_MAC_HASH(Addr, TID) % HASH_TABLE_SIZE)
 
 /* LED Control */
-/* assoiation ON. one LED ON. another blinking when TX, OFF when idle */
+/* association ON. one LED ON. another blinking when TX, OFF when idle */
 /* no association, both LED off */
 #define ASIC_LED_ACT_ON(pAd)        RTMP_IO_WRITE32(pAd, MAC_CSR14, 0x00031e46)
 #define ASIC_LED_ACT_OFF(pAd)       RTMP_IO_WRITE32(pAd, MAC_CSR14, 0x00001e46)
@@ -284,8 +285,8 @@
 
 /* 802.11n draft3 related structure definitions. */
 /* 7.3.2.60 */
-#define dot11OBSSScanPassiveDwell							20	/* in TU. min amount of time that the STA continously scans each channel when performing an active OBSS scan. */
-#define dot11OBSSScanActiveDwell							10	/* in TU.min amount of time that the STA continously scans each channel when performing an passive OBSS scan. */
+#define dot11OBSSScanPassiveDwell							20	/* in TU. min amount of time that the STA continuously scans each channel when performing an active OBSS scan. */
+#define dot11OBSSScanActiveDwell							10	/* in TU.min amount of time that the STA continuously scans each channel when performing an passive OBSS scan. */
 #define dot11BSSWidthTriggerScanInterval					300	/* in sec. max interval between scan operations to be performed to detect BSS channel width trigger events. */
 #define dot11OBSSScanPassiveTotalPerChannel					200	/* in TU. min total amount of time that the STA scans each channel when performing a passive OBSS scan. */
 #define dot11OBSSScanActiveTotalPerChannel					20	/*in TU. min total amount of time that the STA scans each channel when performing a active OBSS scan */
@@ -325,7 +326,7 @@
 };
 
 /* 20/40 trigger event table */
-/* If one Event A delete or created, or if Event B is detected or not detected, STA should send 2040BSSCoexistence to AP. */
+/* If one Event (A) is deleted or created, or if Event (B) is detected or not detected, STA should send 2040BSSCoexistence to AP. */
 #define MAX_TRIGGER_EVENT		64
 struct rt_trigger_event_tab {
 	u8 EventANo;
@@ -357,14 +358,14 @@
 	u8 ChList[0];
 };
 
-/* The structure for channel switch annoucement IE. This is in 802.11n D3.03 */
+/* The structure for channel switch announcement IE. This is in 802.11n D3.03 */
 struct PACKED rt_cha_switch_announce_ie {
 	u8 SwitchMode;	/*channel switch mode */
 	u8 NewChannel;	/* */
 	u8 SwitchCount;	/* */
 };
 
-/* The structure for channel switch annoucement IE. This is in 802.11n D3.03 */
+/* The structure for channel switch announcement IE. This is in 802.11n D3.03 */
 struct PACKED rt_sec_cha_offset_ie {
 	u8 SecondaryChannelOffset;	/* 1: Secondary above, 3: Secondary below, 0: no Secondary */
 };
@@ -377,7 +378,7 @@
 	u8 MCSSet[16];
 };
 
-/*This structure substracts ralink supports from all 802.11n-related features. */
+/*This structure subtracts ralink supports from all 802.11n-related features. */
 /*Features not listed here but contained in 802.11n spec are not supported in rt2860. */
 struct rt_ht_capability {
 	u16 ChannelWidth:1;
@@ -387,14 +388,14 @@
 	u16 ShortGIfor40:1;	/*for40MHz */
 	u16 TxSTBC:1;
 	u16 RxSTBC:2;	/* 2 bits */
-	u16 AmsduEnable:1;	/* Enable to transmit A-MSDU. Suggest disable. We should use A-MPDU to gain best benifit of 802.11n */
+	u16 AmsduEnable:1;	/* Enable to transmit A-MSDU. Suggest disable. We should use A-MPDU to gain best benefit of 802.11n */
 	u16 AmsduSize:1;	/* Max receiving A-MSDU size */
 	u16 rsv:5;
 
 	/*Substract from Addiont HT INFO IE */
 	u8 MaxRAmpduFactor:2;
 	u8 MpduDensity:3;
-	u8 ExtChanOffset:2;	/* Please not the difference with following     u8   NewExtChannelOffset; from 802.11n */
+	u8 ExtChanOffset:2;	/* Please note the difference with following     u8   NewExtChannelOffset; from 802.11n */
 	u8 RecomWidth:1;
 
 	u16 OperaionMode:2;
@@ -481,7 +482,7 @@
 	u16 AMSDUSupported:1;	/* 0: not permitted             1: permitted */
 	u16 BAPolicy:1;	/* 1: immediately BA    0:delayed BA */
 	u16 TID:4;		/* value of TC os TS */
-	u16 BufSize:10;	/* number of buffe of size 2304 octetsr */
+	u16 BufSize:10;	/* number of buffer of size 2304 octetsr */
 };
 
 /* 2-byte BA Starting Seq CONTROL field */
@@ -551,7 +552,7 @@
 	BASEQ_CONTROL BAStartingSeq;
 };
 
-/* Compressed format is mandantory in HT STA */
+/* Compressed format is mandatory in HT STA */
 struct PACKED rt_frame_mtba {
 	struct rt_frame_control FC;
 	u16 Duration;
@@ -647,7 +648,7 @@
 	u8 bitmask[8];
 };
 
-/* Radio Measuement Request Frame Format */
+/* Radio Measurement Request Frame Format */
 struct PACKED rt_frame_rm_req_action {
 	struct rt_header_802_11 Hdr;
 	u8 Category;
@@ -709,7 +710,7 @@
 	u8 Cwmin[4];
 	u8 Cwmax[4];
 	u16 Txop[4];		/* in unit of 32-us */
-	BOOLEAN bACM[4];	/* 1: Admission Control of AC_BK is mandattory */
+	BOOLEAN bACM[4];	/* 1: Admission Control of AC_BK is mandatory */
 };
 
 /* QBSS LOAD information from QAP's BEACON/ProbeRsp */
@@ -757,7 +758,7 @@
 struct rt_bss_entry {
 	u8 Bssid[MAC_ADDR_LEN];
 	u8 Channel;
-	u8 CentralChannel;	/*Store the wide-band central channel for 40MHz.  .used in 40MHz AP. Or this is the same as Channel. */
+	u8 CentralChannel;	/*Store the wide-band central channel for 40MHz. used in 40MHz AP. Or this is the same as Channel. */
 	u8 BssType;
 	u16 AtimWin;
 	u16 BeaconPeriod;
@@ -855,7 +856,7 @@
 	STATE_MACHINE_FUNC *TransFunc;
 };
 
-/* MLME AUX data structure that hold temporarliy settings during a connection attempt. */
+/* MLME AUX data structure that holds temporarliy settings during a connection attempt. */
 /* Once this attemp succeeds, all settings will be copy to pAd->StaActive. */
 /* A connection attempt (user set OID, roaming, CCX fast roaming,..) consists of */
 /* several steps (JOIN, AUTH, ASSOC or REASSOC) and may fail at any step. We purposely */
@@ -996,7 +997,7 @@
 #define MAC_TABLE_ASSOC_TIMEOUT			5	/* unit: sec */
 #define MAC_TABLE_FULL(Tab)				((Tab).size == MAX_LEN_OF_MAC_TABLE)
 
-/* AP shall drop the sta if contine Tx fail count reach it. */
+/* AP shall drop the sta if continue Tx fail count reach it. */
 #define MAC_ENTRY_LIFE_CHECK_CNT		20	/* packet cnt. */
 
 /* Value domain of pMacEntry->Sst */
diff --git a/drivers/staging/rt2860/oid.h b/drivers/staging/rt2860/oid.h
index 1704c27..5a25f0d 100644
--- a/drivers/staging/rt2860/oid.h
+++ b/drivers/staging/rt2860/oid.h
@@ -32,7 +32,8 @@
 	Revision History:
 	Who			When			What
 	--------	----------		----------------------------------------------
-	Name		Date			Modification logs
+	Name			Date			Modification logs
+	Justin P. Mattock 	11/07/2010	Fix typos in comments
 */
 #ifndef _OID_H_
 #define _OID_H_
@@ -78,7 +79,7 @@
 #define NDIS_802_11_LENGTH_RATES        8
 #define NDIS_802_11_LENGTH_RATES_EX     16
 #define MAC_ADDR_LENGTH                 6
-/*#define MAX_NUM_OF_CHS                                        49 // 14 channels @2.4G +  12@UNII + 4 @MMAC + 11 @HiperLAN2 + 7 @Japan + 1 as NULL terminationc */
+/*#define MAX_NUM_OF_CHS                                        49 // 14 channels @2.4G +  12@UNII + 4 @MMAC + 11 @HiperLAN2 + 7 @Japan + 1 as NULL termination */
 #define MAX_NUM_OF_CHS				54	/* 14 channels @2.4G +  12@UNII(lower/middle) + 16@HiperLAN2 + 11@UNII(upper) + 0 @Japan + 1 as NULL termination */
 #define MAX_NUMBER_OF_EVENT				10	/* entry # in EVENT table */
 #define MAX_NUMBER_OF_MAC				32	/* if MAX_MBSSID_NUM is 8, this value can't be larger than 211 */
@@ -87,7 +88,7 @@
 #define MAX_NUMBER_OF_DLS_ENTRY			4
 
 #define RT_QUERY_SIGNAL_CONTEXT				0x0402
-#define RT_SET_IAPP_PID                 	0x0404
+#define RT_SET_IAPP_PID				0x0404
 #define RT_SET_APD_PID						0x0405
 #define RT_SET_DEL_MAC_ENTRY				0x0406
 #define RT_QUERY_EVENT_TABLE			0x0407
@@ -610,7 +611,7 @@
 
 struct rt_802_11_event_table {
 	unsigned long Num;
-	unsigned long Rsv;		/* to align Log[] at LARGE_INEGER boundary */
+	unsigned long Rsv;		/* to align Log[] at LARGE_INTEGER boundary */
 	struct rt_802_11_event_log Log[MAX_NUMBER_OF_EVENT];
 };
 
@@ -721,9 +722,9 @@
 #define	AUTH_FAIL				0x4	/* Open authentication fail */
 #define	AUTH_FAIL_KEYS			0x5	/* Shared authentication fail */
 #define	ASSOC_FAIL				0x6	/* Association failed */
-#define	EAP_MIC_FAILURE			0x7	/* Deauthencation because MIC failure */
-#define	EAP_4WAY_TIMEOUT		0x8	/* Deauthencation on 4-way handshake timeout */
-#define	EAP_GROUP_KEY_TIMEOUT	0x9	/* Deauthencation on group key handshake timeout */
+#define	EAP_MIC_FAILURE			0x7	/* Deauthentication because MIC failure */
+#define	EAP_4WAY_TIMEOUT		0x8	/* Deauthentication on 4-way handshake timeout */
+#define	EAP_GROUP_KEY_TIMEOUT	0x9	/* Deauthentication on group key handshake timeout */
 #define	EAP_SUCCESS				0xa	/* EAP succeed */
 #define	DETECT_RADAR_SIGNAL		0xb	/* Radar signal occur in current channel */
 #define EXTRA_INFO_MAX			0xb	/* Indicate Last OID */
diff --git a/drivers/staging/rt2860/pci_main_dev.c b/drivers/staging/rt2860/pci_main_dev.c
index 321facd..25fbb18 100644
--- a/drivers/staging/rt2860/pci_main_dev.c
+++ b/drivers/staging/rt2860/pci_main_dev.c
@@ -31,7 +31,8 @@
     Create and register network interface for PCI based chipsets in Linux platform.
 
     Revision History:
-    Who         When            What
+    Who         	When            What
+    Justin P. Mattock	11/07/2010	Fix typos in some comments
     --------    ----------      ----------------------------------------------
 */
 
@@ -40,8 +41,8 @@
 #include <linux/slab.h>
 
 /* Following information will be show when you run 'modinfo' */
-/* *** If you have a solution for the bug in current version of driver, please mail to me. */
-/* Otherwise post to forum in ralinktech's web site(www.ralinktech.com) and let all users help you. *** */
+/* If you have a solution for a bug in current version of driver, please e-mail me. */
+/* Otherwise post to forum in ralinktech's web site(www.ralinktech.com) and let all users help you. */
 MODULE_AUTHOR("Jett Chen <jett_chen@ralinktech.com>");
 MODULE_DESCRIPTION("RT2860/RT3090 Wireless Lan Linux Driver");
 MODULE_LICENSE("GPL");
@@ -50,9 +51,6 @@
 /* */
 /* Function declarations */
 /* */
-extern int rt28xx_close(IN struct net_device *net_dev);
-extern int rt28xx_open(struct net_device *net_dev);
-
 static void __devexit rt2860_remove_one(struct pci_dev *pci_dev);
 static int __devinit rt2860_probe(struct pci_dev *pci_dev,
 				  const struct pci_device_id *ent);
@@ -205,7 +203,7 @@
 
 	/* initialize device before it's used by a driver */
 	if (pci_enable_device(pci_dev)) {
-		printk("pci enable fail!\n");
+		printk(KERN_ERR "rt2860: pci enable fail!\n");
 		return 0;
 	}
 
@@ -599,7 +597,7 @@
 		DBGPRINT_RAW(RT_DEBUG_ERROR,
 			     (" AUX_CTRL = 0x%32x\n", MacValue));
 
-		/* for RT30xx F and after, PCIe infterface, and for power solution 3 */
+		/* for RT30xx F and after, PCIe interface, and for power solution 3 */
 		if ((IS_VERSION_AFTER_F(pAd))
 		    && (pAd->StaCfg.PSControl.field.rt30xxPowerMode >= 2)
 		    && (pAd->StaCfg.PSControl.field.rt30xxPowerMode <= 3)) {
@@ -902,7 +900,7 @@
 				  Configuration);
 		if ((Configuration != 0) && (Configuration != 0xFFFF)) {
 			Configuration &= 0xfefc;
-			/* If call from interface down, restore to orginial setting. */
+			/* If call from interface down, restore to original setting. */
 			if (Level == RESTORE_CLOSE)
 				Configuration |= pAd->HostLnkCtrlConfiguration;
 			else
@@ -924,7 +922,7 @@
 				  Configuration);
 		if ((Configuration != 0) && (Configuration != 0xFFFF)) {
 			Configuration &= 0xfefc;
-			/* If call from interface down, restore to orginial setting. */
+			/* If call from interface down, restore to original setting. */
 			if (Level == RESTORE_CLOSE)
 				Configuration |= pAd->RLnkCtrlConfiguration;
 			else
@@ -1106,12 +1104,12 @@
 		if (pos != 0)
 			pAd->HostLnkCtrlOffset = pos + PCI_EXP_LNKCTL;
 
-		/* If configurared to turn on L1. */
+		/* If configured to turn on L1. */
 		HostConfiguration = 0;
 		if (pAd->StaCfg.PSControl.field.rt30xxForceASPMTest == 1) {
 			DBGPRINT(RT_DEBUG_TRACE, ("Enter,PSM : Force ASPM\n"));
 
-			/* Skip non-exist deice right away */
+			/* Skip non-exist device right away */
 			if ((pAd->HostLnkCtrlOffset != 0)) {
 				PCI_REG_READ_WORD(pObj->parent_pci_dev,
 						  pAd->HostLnkCtrlOffset,
diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c
index abfeea1..728864e 100644
--- a/drivers/staging/rt2860/rt_linux.c
+++ b/drivers/staging/rt2860/rt_linux.c
@@ -321,7 +321,7 @@
 
 	RTMP_SET_PACKET_SOURCE(OSPKT_TO_RTPKT(pkt), PKTSRC_NDIS);
 
-	printk("###Clone###\n");
+	printk(KERN_DEBUG "###Clone###\n");
 
 	return NDIS_STATUS_SUCCESS;
 }
@@ -343,9 +343,8 @@
 					   RTMP_PKT_TAIL_PADDING);
 	if (pPacket == NULL) {
 		*ppPacket = NULL;
-#ifdef DEBUG
-		printk("RTMPAllocateNdisPacket Fail\n");
-#endif
+		pr_devel("RTMPAllocateNdisPacket Fail\n");
+
 		return NDIS_STATUS_FAILURE;
 	}
 	/* 2. clone the frame content */
@@ -601,15 +600,15 @@
 		return;
 
 	pt = pSrcBufVA;
-	printk("%s: %p, len = %d\n", str, pSrcBufVA, SrcBufLen);
+	printk(KERN_DEBUG "%s: %p, len = %d\n", str, pSrcBufVA, SrcBufLen);
 	for (x = 0; x < SrcBufLen; x++) {
 		if (x % 16 == 0)
-			printk("0x%04x : ", x);
-		printk("%02x ", ((unsigned char)pt[x]));
+			printk(KERN_DEBUG "0x%04x : ", x);
+		printk(KERN_DEBUG "%02x ", ((unsigned char)pt[x]));
 		if (x % 16 == 15)
-			printk("\n");
+			printk(KERN_DEBUG "\n");
 	}
-	printk("\n");
+	printk(KERN_DEBUG "\n");
 }
 
 /*
@@ -767,13 +766,13 @@
 		/* QOS */
 		if (pRxBlk->pHeader->FC.SubType & 0x08) {
 			header_len += 2;
-			/* Data skip QOS contorl field */
+			/* Data skip QOS control field */
 			pRxBlk->DataSize -= 2;
 		}
 		/* Order bit: A-Ralink or HTC+ */
 		if (pRxBlk->pHeader->FC.Order) {
 			header_len += 4;
-			/* Data skip HTC contorl field */
+			/* Data skip HTC control field */
 			pRxBlk->DataSize -= 4;
 		}
 		/* Copy Header */
@@ -854,7 +853,7 @@
 									 RSSI1,
 									 RSSI_1),
 				    ConvertToRssi(pAd, pRxBlk->pRxWI->RSSI2,
-						  RSSI_2));;
+						  RSSI_2));
 
 	ph->signal.did = DIDmsg_lnxind_wlansniffrm_signal;
 	ph->signal.status = 0;
@@ -926,7 +925,7 @@
 		    request_irq(_pObj->pci_dev->irq, rt2860_interrupt, SA_SHIRQ,
 				(net_dev)->name, (net_dev));
 		if (retval != 0)
-			printk("RT2860: request_irq  ERROR(%d)\n", retval);
+			printk(KERN_ERR "rt2860: request_irq  ERROR(%d)\n", retval);
 	}
 
 	return retval;
@@ -1022,7 +1021,7 @@
 	}
 #else
 	CHECK_PID_LEGALITY(pTask->taskPID) {
-		printk("Terminate the task(%s) with pid(%d)!\n",
+		printk(KERN_INFO "Terminate the task(%s) with pid(%d)!\n",
 		       pTask->taskName, GET_PID_NUMBER(pTask->taskPID));
 		mb();
 		pTask->task_killed = 1;
@@ -1175,7 +1174,7 @@
 	net_dev = pNetDev;
 	GET_PAD_FROM_NET_DEV(pAd, net_dev);
 
-	/* work-around for the SuSE due to it has it's own interface name management system. */
+	/* work-around for SuSE, due to them having their own interface name management system. */
 	{
 		NdisZeroMemory(pAd->StaCfg.dev_name, 16);
 		NdisMoveMemory(pAd->StaCfg.dev_name, net_dev->name,
@@ -1300,7 +1299,7 @@
 	int ret, rtnl_locked = FALSE;
 
 	DBGPRINT(RT_DEBUG_TRACE, ("RtmpOSNetDevAttach()--->\n"));
-	/* If we need hook some callback function to the net device structrue, now do it. */
+	/* If we need hook some callback function to the net device structure, now do it. */
 	if (pDevOpHook) {
 		struct rt_rtmp_adapter *pAd = NULL;
 
@@ -1351,10 +1350,10 @@
 		return NULL;
 	}
 
-	/* find a available interface name, max 32 interfaces */
+	/* find an available interface name, max 32 interfaces */
 	status = RtmpOSNetDevRequestName(pAd, pNetDev, pNamePrefix, devNum);
 	if (status != NDIS_STATUS_SUCCESS) {
-		/* error! no any available ra name can be used! */
+		/* error! no available ra name can be used! */
 		DBGPRINT(RT_DEBUG_ERROR,
 			 ("Assign interface name (%s with suffix 0~32) failed...\n",
 			  pNamePrefix));
diff --git a/drivers/staging/rt2860/rt_linux.h b/drivers/staging/rt2860/rt_linux.h
index 5acedf1..92ff543 100644
--- a/drivers/staging/rt2860/rt_linux.h
+++ b/drivers/staging/rt2860/rt_linux.h
@@ -30,7 +30,8 @@
     Abstract:
 
     Revision History:
-    Who          When          What
+    Who          	When         	What
+    Justin P. Mattock	11/07/2010 	Fix typo in a comment
     ---------    ----------    ----------------------------------------------
 */
 
@@ -422,11 +423,7 @@
 
 #define DBGPRINT(Level, Fmt)    DBGPRINT_RAW(Level, Fmt)
 
-#define DBGPRINT_ERR(Fmt)           \
-{                                   \
-    printk("ERROR! ");          \
-    printk Fmt;                  \
-}
+#define DBGPRINT_ERR(fmt, args...) printk(KERN_ERR fmt, ##args)
 
 #define DBGPRINT_S(Status, Fmt)		\
 {									\
@@ -726,7 +723,7 @@
 #define RTMP_GET_PACKET_MOREDATA(_p)				(RTPKT_TO_OSPKT(_p)->cb[CB_OFF+7])
 
 /* */
-/*      Sepcific Pakcet Type definition */
+/*      Specific Packet Type definition */
 /* */
 #define RTMP_PACKET_SPECIFIC_CB_OFFSET	11
 
diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
index ad60cea..701561d 100644
--- a/drivers/staging/rt2860/rt_main_dev.c
+++ b/drivers/staging/rt2860/rt_main_dev.c
@@ -31,7 +31,8 @@
     Create and register network interface.
 
     Revision History:
-    Who         When            What
+    Who         	When            What
+    Justin P. Mattock	11/07/2010	Fix typos in comments
     --------    ----------      ----------------------------------------------
 */
 
@@ -101,8 +102,8 @@
 		    (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST))) {
 			struct rt_mlme_disassoc_req DisReq;
 			struct rt_mlme_queue_elem *MsgElem =
-			    (struct rt_mlme_queue_elem *)kmalloc(sizeof(struct rt_mlme_queue_elem),
-							MEM_ALLOC_FLAG);
+				kmalloc(sizeof(struct rt_mlme_queue_elem),
+					MEM_ALLOC_FLAG);
 
 			if (MsgElem) {
 				COPY_MAC_ADDR(DisReq.Addr,
@@ -234,7 +235,7 @@
 		RTMPPCIeLinkCtrlValueRestore(pAd, RESTORE_CLOSE);
 #endif /* RTMP_MAC_PCI // */
 
-		/* If dirver doesn't wake up firmware here, */
+		/* If driver doesn't wake up firmware here, */
 		/* NICLoadFirmware will hang forever when interface is up again. */
 		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)) {
 			AsicForceWakeup(pAd, TRUE);
@@ -310,8 +311,8 @@
 			RTMP_ASIC_INTERRUPT_DISABLE(pAd);
 		}
 		/* Receive packets to clear DMA index after disable interrupt. */
-		/*RTMPHandleRxDoneInterrupt(pAd); */
-		/* put to radio off to save power when driver unload.  After radiooff, can't write /read register.  So need to finish all */
+		/* RTMPHandleRxDoneInterrupt(pAd); */
+		/* put radio off to save power when driver unloads.  After radiooff, can't write/read register, so need to finish all. */
 		/* register access before Radio off. */
 
 		brc = RT28xxPciAsicRadioOff(pAd, RTMP_HALT, 0);
@@ -420,7 +421,7 @@
 	{
 		u32 reg = 0;
 		RTMP_IO_READ32(pAd, 0x1300, &reg);	/* clear garbage interrupts */
-		printk("0x1300 = %08x\n", reg);
+		printk(KERN_DEBUG "0x1300 = %08x\n", reg);
 	}
 
 	{
@@ -724,7 +725,8 @@
 int AdapterBlockAllocateMemory(void *handle, void ** ppAd)
 {
 
-	*ppAd = (void *)vmalloc(sizeof(struct rt_rtmp_adapter));	/*pci_alloc_consistent(pci_dev, sizeof(struct rt_rtmp_adapter), phy_addr); */
+	*ppAd = vmalloc(sizeof(struct rt_rtmp_adapter));
+	/* pci_alloc_consistent(pci_dev, sizeof(struct rt_rtmp_adapter), phy_addr); */
 
 	if (*ppAd) {
 		NdisZeroMemory(*ppAd, sizeof(struct rt_rtmp_adapter));
diff --git a/drivers/staging/rt2860/rt_pci_rbus.c b/drivers/staging/rt2860/rt_pci_rbus.c
index 3004be6..e5fb67c 100644
--- a/drivers/staging/rt2860/rt_pci_rbus.c
+++ b/drivers/staging/rt2860/rt_pci_rbus.c
@@ -31,7 +31,8 @@
     Create and register network interface.
 
     Revision History:
-    Who         When            What
+    Who         	When            What
+    Justin P. Mattock	11/07/2010	Fix a typo
     --------    ----------      ----------------------------------------------
 */
 
@@ -356,7 +357,7 @@
 
 	RTMPHandleMgmtRingDmaDoneInterrupt(pAd);
 
-	/* if you use RTMP_SEM_LOCK, sometimes kernel will hang up, no any */
+	/* if you use RTMP_SEM_LOCK, sometimes kernel will hang up, without any */
 	/* bug report output */
 	RTMP_INT_LOCK(&pAd->irq_lock, flags);
 	/*
@@ -787,7 +788,7 @@
 }
 
 /*
- * invaild or writeback cache
+ * invalid or writeback cache
  * and convert virtual address to physical address
  */
 dma_addr_t linux_pci_map_single(struct rt_rtmp_adapter *pAd, void *ptr,
diff --git a/drivers/staging/rt2860/rt_usb.c b/drivers/staging/rt2860/rt_usb.c
index bcfc0f5..eb037d2 100644
--- a/drivers/staging/rt2860/rt_usb.c
+++ b/drivers/staging/rt2860/rt_usb.c
@@ -32,7 +32,8 @@
 	Revision History:
 	Who			When		What
 	--------	----------	----------------------------------------------
-	Name		Date		Modification logs
+	Name			Date		Modification logs
+	Justin P. Mattock	11/07/2010	Fix some typos.
 
 */
 
@@ -40,25 +41,25 @@
 
 void dump_urb(struct urb *purb)
 {
-	printk("urb                  :0x%08lx\n", (unsigned long)purb);
-	printk("\tdev                   :0x%08lx\n", (unsigned long)purb->dev);
-	printk("\t\tdev->state          :0x%d\n", purb->dev->state);
-	printk("\tpipe                  :0x%08x\n", purb->pipe);
-	printk("\tstatus                :%d\n", purb->status);
-	printk("\ttransfer_flags        :0x%08x\n", purb->transfer_flags);
-	printk("\ttransfer_buffer       :0x%08lx\n",
+	printk(KERN_DEBUG "urb                  :0x%08lx\n", (unsigned long)purb);
+	printk(KERN_DEBUG "\tdev                   :0x%08lx\n", (unsigned long)purb->dev);
+	printk(KERN_DEBUG "\t\tdev->state          :0x%d\n", purb->dev->state);
+	printk(KERN_DEBUG "\tpipe                  :0x%08x\n", purb->pipe);
+	printk(KERN_DEBUG "\tstatus                :%d\n", purb->status);
+	printk(KERN_DEBUG "\ttransfer_flags        :0x%08x\n", purb->transfer_flags);
+	printk(KERN_DEBUG "\ttransfer_buffer       :0x%08lx\n",
 	       (unsigned long)purb->transfer_buffer);
-	printk("\ttransfer_buffer_length:%d\n", purb->transfer_buffer_length);
-	printk("\tactual_length         :%d\n", purb->actual_length);
-	printk("\tsetup_packet          :0x%08lx\n",
+	printk(KERN_DEBUG "\ttransfer_buffer_length:%d\n", purb->transfer_buffer_length);
+	printk(KERN_DEBUG "\tactual_length         :%d\n", purb->actual_length);
+	printk(KERN_DEBUG "\tsetup_packet          :0x%08lx\n",
 	       (unsigned long)purb->setup_packet);
-	printk("\tstart_frame           :%d\n", purb->start_frame);
-	printk("\tnumber_of_packets     :%d\n", purb->number_of_packets);
-	printk("\tinterval              :%d\n", purb->interval);
-	printk("\terror_count           :%d\n", purb->error_count);
-	printk("\tcontext               :0x%08lx\n",
+	printk(KERN_DEBUG "\tstart_frame           :%d\n", purb->start_frame);
+	printk(KERN_DEBUG "\tnumber_of_packets     :%d\n", purb->number_of_packets);
+	printk(KERN_DEBUG "\tinterval              :%d\n", purb->interval);
+	printk(KERN_DEBUG "\terror_count           :%d\n", purb->error_count);
+	printk(KERN_DEBUG "\tcontext               :0x%08lx\n",
 	       (unsigned long)purb->context);
-	printk("\tcomplete              :0x%08lx\n\n",
+	printk(KERN_DEBUG "\tcomplete              :0x%08lx\n\n",
 	       (unsigned long)purb->complete);
 }
 
@@ -279,7 +280,7 @@
 	    && !RTUSB_TEST_BULK_FLAG(pAd,
 				     (fRTUSB_BULK_OUT_DATA_FRAG <<
 				      BulkOutPipeId))) {
-		/* Indicate There is data avaliable */
+		/* Indicate There is data available */
 		RTUSB_SET_BULK_FLAG(pAd,
 				    (fRTUSB_BULK_OUT_DATA_NORMAL <<
 				     BulkOutPipeId));
@@ -335,7 +336,7 @@
 	}
 
 	/* Always call Bulk routine, even reset bulk. */
-	/* The protectioon of rest bulk should be in BulkOut routine */
+	/* The protection of rest bulk should be in BulkOut routine */
 	RTUSBKickBulkOut(pAd);
 }
 
@@ -383,7 +384,7 @@
 	RTMP_SEM_UNLOCK(&pAd->BulkOutLock[pRTSContext->BulkOutPipeId]);
 
 	/* Always call Bulk routine, even reset bulk. */
-	/* The protectioon of rest bulk should be in BulkOut routine */
+	/* The protection of rest bulk should be in BulkOut routine */
 	RTUSBKickBulkOut(pAd);
 
 }
@@ -427,7 +428,7 @@
 	RTMP_SEM_UNLOCK(&pAd->BulkOutLock[0]);
 
 	/* Always call Bulk routine, even reset bulk. */
-	/* The protectioon of rest bulk should be in BulkOut routine */
+	/* The protection of rest bulk should be in BulkOut routine */
 	RTUSBKickBulkOut(pAd);
 
 }
@@ -575,7 +576,7 @@
 		} else {
 
 			/* Always call Bulk routine, even reset bulk. */
-			/* The protectioon of rest bulk should be in BulkOut routine */
+			/* The protection of rest bulk should be in BulkOut routine */
 			if (pAd->MgmtRing.TxSwFreeIdx <
 			    MGMT_RING_SIZE
 			    /* pMLMEContext->bWaitingBulkOut == TRUE */) {
diff --git a/drivers/staging/rt2860/rtmp.h b/drivers/staging/rt2860/rtmp.h
index ca54e53..70daaa4 100644
--- a/drivers/staging/rt2860/rtmp.h
+++ b/drivers/staging/rt2860/rtmp.h
@@ -31,11 +31,12 @@
     Miniport generic portion header file
 
     Revision History:
-    Who         When          What
+    Who         	When          	What
     --------    ----------    ----------------------------------------------
-    Paul Lin    2002-08-01    created
-    James Tan   2002-09-06    modified (Revise NTCRegTable)
-    John Chang  2004-09-06    modified for RT2600
+    Paul Lin    	2002-08-01    	created
+    James Tan   	2002-09-06    	modified (Revise NTCRegTable)
+    John Chang  	2004-09-06    	modified for RT2600
+    Justin P. Mattock	11/07/2010	Fix some typos
 */
 #ifndef __RTMP_H__
 #define __RTMP_H__
@@ -337,7 +338,7 @@
 #define LEAP_ON(_p)                 (((_p)->StaCfg.LeapAuthMode) == CISCO_AuthModeLEAP)
 #define LEAP_CCKM_ON(_p)            ((((_p)->StaCfg.LeapAuthMode) == CISCO_AuthModeLEAP) && ((_p)->StaCfg.LeapAuthInfo.CCKM == TRUE))
 
-/* if orginal Ethernet frame contains no LLC/SNAP, then an extra LLC/SNAP encap is required */
+/* if original Ethernet frame contains no LLC/SNAP, then an extra LLC/SNAP encap is required */
 #define EXTRA_LLCSNAP_ENCAP_FROM_PKT_START(_pBufVA, _pExtraLlcSnapEncap)		\
 {																\
 	if (((*(_pBufVA + 12) << 8) + *(_pBufVA + 13)) > 1500) {	\
@@ -466,7 +467,7 @@
 /* Control block (Descriptor) for all ring descriptor DMA operation, buffer must be */
 /* contiguous physical memory. char stored the binding Rx packet descriptor */
 /* which won't be released, driver has to wait until upper layer return the packet */
-/* before giveing up this rx ring descriptor to ASIC. NDIS_BUFFER is assocaited pair */
+/* before giving up this rx ring descriptor to ASIC. NDIS_BUFFER is associated pair */
 /* to describe the packet buffer. For Tx, char stored the tx packet descriptor */
 /* which driver should ACK upper layer when the tx is physically done or failed. */
 /* */
@@ -602,7 +603,7 @@
 };
 
 struct rt_counter_drs {
-	/* to record the each TX rate's quality. 0 is best, the bigger the worse. */
+	/* record each TX rate's quality. 0 is best, the bigger the worse. */
 	u16 TxQuality[MAX_STEP_OF_TX_RATE_SWITCH];
 	u8 PER[MAX_STEP_OF_TX_RATE_SWITCH];
 	u8 TxRateUpPenalty;	/* extra # of second penalty due to last unstable condition */
@@ -719,7 +720,7 @@
 /* Packet information for NdisQueryPacket */
 /* */
 struct rt_packet_info {
-	u32 PhysicalBufferCount;	/* Physical breaks of buffer descripor chained */
+	u32 PhysicalBufferCount;	/* Physical breaks of buffer descriptor chained */
 	u32 BufferCount;	/* Number of Buffer descriptor chained */
 	u32 TotalPacketLength;	/* Self explained */
 	char *pFirstBuffer;	/* Pointer to first buffer descriptor */
@@ -846,8 +847,8 @@
 /* Power save method control */
 typedef union _PS_CONTROL {
 	struct {
-		unsigned long EnablePSinIdle:1;	/* Enable radio off when not connect to AP. radio on only when sitesurvey, */
-		unsigned long EnableNewPS:1;	/* Enable new  Chip power save fucntion . New method can only be applied in chip version after 2872. and PCIe. */
+		unsigned long EnablePSinIdle:1;	/* Enable radio off when not connected to AP. radio on only when sitesurvey, */
+		unsigned long EnableNewPS:1;	/* Enable new  Chip power save function . New method can only be applied in chip version after 2872. and PCIe. */
 		unsigned long rt30xxPowerMode:2;	/* Power Level Mode for rt30xx chip */
 		unsigned long rt30xxFollowHostASPM:1;	/* Card Follows Host's setting for rt30xx chip. */
 		unsigned long rt30xxForceASPMTest:1;	/* Force enable L1 for rt30xx chip. This has higher priority than rt30xxFollowHostASPM Mode. */
@@ -1117,8 +1118,8 @@
 	unsigned long TimIELocationInBeacon[HW_BEACON_MAX_COUNT];
 	unsigned long CapabilityInfoLocationInBeacon[HW_BEACON_MAX_COUNT];
 	BOOLEAN EnableBeacon;	/* trigger to enable beacon transmission. */
-	u8 BeaconBitMap;	/* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter need to change. */
-	u8 DtimBitOn;	/* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter need to change. */
+	u8 BeaconBitMap;	/* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter needs to change. */
+	u8 DtimBitOn;	/* NOTE: If the MAX_MBSSID_NUM is larger than 8, this parameter needs to change. */
 };
 #endif /* RTMP_MAC_USB // */
 
@@ -1211,7 +1212,7 @@
 	/*BOOLEAN               bAutoTxRateSwitch; */
 	u8 MinTxRate;	/* RATE_1, RATE_2, RATE_5_5, RATE_11 */
 	u8 RtsRate;		/* RATE_xxx */
-	HTTRANSMIT_SETTING MlmeTransmit;	/* MGMT frame PHY rate setting when operatin at Ht rate. */
+	HTTRANSMIT_SETTING MlmeTransmit;	/* MGMT frame PHY rate setting when operation at Ht rate. */
 	u8 MlmeRate;		/* RATE_xxx, used to send MLME frames */
 	u8 BasicMlmeRate;	/* Default Rate for sending MLME frames */
 
@@ -1264,7 +1265,7 @@
 	struct rt_ht_capability_ie HtCapability;
 	struct rt_add_ht_info_ie AddHTInfo;	/* Useful as AP. */
 	/*This IE is used with channel switch announcement element when changing to a new 40MHz. */
-	/*This IE is included in channel switch ammouncement frames 7.4.1.5, beacons, probe Rsp. */
+	/*This IE is included in channel switch announcement frames 7.4.1.5, beacons, probe Rsp. */
 	struct rt_new_ext_chan_ie NewExtChanOffset;	/*7.3.2.20A, 1 if extension channel is above the control channel, 3 if below, 0 if not present */
 
 	BOOLEAN bHTProtect;
@@ -1329,7 +1330,7 @@
 	/* GROUP 1 - */
 	/*   User configuration loaded from Registry, E2PROM or OID_xxx. These settings describe */
 	/*   the user intended configuration, but not necessary fully equal to the final */
-	/*   settings in ACTIVE BSS after negotiation/compromize with the BSS holder (either */
+	/*   settings in ACTIVE BSS after negotiation/compromise with the BSS holder (either */
 	/*   AP or IBSS holder). */
 	/*   Once initialized, user configuration can only be changed via OID_xxx */
 	u8 BssType;		/* BSS_INFRA or BSS_ADHOC */
@@ -1386,12 +1387,12 @@
 
 	/* For WPA countermeasures */
 	unsigned long LastMicErrorTime;	/* record last MIC error time */
-	unsigned long MicErrCnt;	/* Should be 0, 1, 2, then reset to zero (after disassoiciation). */
+	unsigned long MicErrCnt;	/* Should be 0, 1, 2, then reset to zero (after disassociation). */
 	BOOLEAN bBlockAssoc;	/* Block associate attempt for 60 seconds after counter measure occurred. */
 	/* For WPA-PSK supplicant state */
 	WPA_STATE WpaState;	/* Default is SS_NOTUSE and handled by microsoft 802.1x */
 	u8 ReplayCounter[8];
-	u8 ANonce[32];	/* ANonce for WPA-PSK from aurhenticator */
+	u8 ANonce[32];	/* ANonce for WPA-PSK from auhenticator */
 	u8 SNonce[32];	/* SNonce for WPA-PSK */
 
 	u8 LastSNR0;		/* last received BEACON's SNR */
@@ -1423,7 +1424,7 @@
 	u8 RSNIE_Len;
 	u8 RSN_IE[MAX_LEN_OF_RSNIE];	/* The content saved here should be little-endian format. */
 
-	unsigned long CLBusyBytes;	/* Save the total bytes received durning channel load scan time */
+	unsigned long CLBusyBytes;	/* Save the total bytes received during channel load scan time */
 	u16 RPIDensity[8];	/* Array for RPI density collection */
 
 	u8 RMReqCnt;		/* Number of measurement request saved. */
@@ -1489,9 +1490,9 @@
 	BOOLEAN bForceTxBurst;	/* 1: force enble TX PACKET BURST, 0: disable */
 };
 
-/* This data structure keep the current active BSS/IBSS's configuration that this STA */
+/* This data structure keeps the current active BSS/IBSS's configuration that this STA */
 /* had agreed upon joining the network. Which means these parameters are usually decided */
-/* by the BSS/IBSS creator instead of user configuration. Data in this data structurre */
+/* by the BSS/IBSS creator instead of user configuration. Data in this data structure */
 /* is valid only when either ADHOC_ON(pAd) or INFRA_ON(pAd) is TRUE. */
 /* Normally, after SCAN or failed roaming attempts, we need to recover back to */
 /* the current active settings. */
@@ -1519,7 +1520,7 @@
 	/*Choose 1 from ValidAsWDS and ValidAsCLI  to validize. */
 	BOOLEAN ValidAsCLI;	/* Sta mode, set this TRUE after Linkup,too. */
 	BOOLEAN ValidAsWDS;	/* This is WDS Entry. only for AP mode. */
-	BOOLEAN ValidAsApCli;	/*This is a AP-Client entry, only for AP mode which enable AP-Client functions. */
+	BOOLEAN ValidAsApCli;	/* This is a AP-Client entry, only for AP mode which enable AP-Client functions. */
 	BOOLEAN ValidAsMesh;
 	BOOLEAN ValidAsDls;	/* This is DLS Entry. only for STA mode. */
 	BOOLEAN isCached;
@@ -1527,7 +1528,7 @@
 
 	u8 EnqueueEapolStartTimerRunning;	/* Enqueue EAPoL-Start for triggering EAP SM */
 	/*jan for wpa */
-	/* record which entry revoke MIC Failure , if it leaves the BSS itself, AP won't update aMICFailTime MIB */
+	/* record which entry revoke MIC Failure, if it leaves the BSS itself, AP won't update aMICFailTime MIB */
 	u8 CMTimerRunning;
 	u8 apidx;		/* MBSS number */
 	u8 RSNIE_Len;
@@ -1722,7 +1723,7 @@
 	unsigned long Rt3xxRalinkLinkCtrl;	/* USed for 3090F chip */
 	u16 DeviceID;	/* Read from PCI config */
 	unsigned long AccessBBPFailCount;
-	BOOLEAN bPCIclkOff;	/* flag that indicate if the PICE power status in Configuration SPace.. */
+	BOOLEAN bPCIclkOff;	/* flag that indicates if the PICE power status in Configuration Space.. */
 	BOOLEAN bPCIclkOffDisableTx;	/* */
 
 	BOOLEAN brt30xxBanMcuCmd;	/*when = 0xff means all commands are ok to set . */
@@ -1871,9 +1872,9 @@
 	/* ---------------------------- */
 	u8 RfIcType;		/* RFIC_xxx */
 	unsigned long RfFreqOffset;	/* Frequency offset for channel switching */
-	struct rt_rtmp_rf_regs LatchRfRegs;	/* latch th latest RF programming value since RF IC doesn't support READ */
+	struct rt_rtmp_rf_regs LatchRfRegs;	/* latch the latest RF programming value since RF IC doesn't support READ */
 
-	EEPROM_ANTENNA_STRUC Antenna;	/* Since ANtenna definition is different for a & g. We need to save it for future reference. */
+	EEPROM_ANTENNA_STRUC Antenna;	/* Since Antenna definition is different for a & g. We need to save it for future reference. */
 	EEPROM_NIC_CONFIG2_STRUC NicConfig2;
 
 	/* This soft Rx Antenna Diversity mechanism is used only when user set */
@@ -1990,7 +1991,7 @@
 	struct rt_common_config CommonCfg;
 	struct rt_mlme Mlme;
 
-	/* AP needs those vaiables for site survey feature. */
+	/* AP needs those variables for site survey feature. */
 	struct rt_mlme_aux MlmeAux;	/* temporary settings used during MLME state machine */
 	struct rt_bss_table ScanTab;	/* store the latest SCAN result */
 
@@ -2012,7 +2013,7 @@
 	/* various Counters */
 	struct rt_counter_802_3 Counters8023;	/* 802.3 counters */
 	struct rt_counter_802_11 WlanCounters;	/* 802.11 MIB counters */
-	struct rt_counter_ralink RalinkCounters;	/* Ralink propriety counters */
+	struct rt_counter_ralink RalinkCounters;	/* Ralink proprietary counters */
 	struct rt_counter_drs DrsCounters;	/* counters for Dynamic TX Rate Switching */
 	struct rt_private PrivateInfo;	/* Private information & counters */
 
@@ -2024,7 +2025,7 @@
 	u16 Sequence;
 
 	/* Control disconnect / connect event generation */
-	/*+++Didn't used anymore */
+	/*+++Not used anymore */
 	unsigned long LinkDownTime;
 	/*--- */
 	unsigned long LastRxRate;
@@ -2036,7 +2037,7 @@
 	unsigned long ExtraInfo;	/* Extra information for displaying status */
 	unsigned long SystemErrorBitmap;	/* b0: E2PROM version error */
 
-	/*+++Didn't used anymore */
+	/*+++Not used anymore */
 	unsigned long MacIcVersion;	/* MAC/BBP serial interface issue solved after ver.D */
 	/*--- */
 
@@ -2089,7 +2090,7 @@
 	unsigned long BulkOutReq;
 	unsigned long BulkOutComplete;
 	unsigned long BulkOutCompleteOther;
-	unsigned long BulkOutCompleteCancel;	/* seems not use now? */
+	unsigned long BulkOutCompleteCancel;	/* seems not used now? */
 	unsigned long BulkInReq;
 	unsigned long BulkInComplete;
 	unsigned long BulkInCompleteFail;
@@ -2196,9 +2197,9 @@
 struct rt_tx_blk {
 	u8 QueIdx;
 	u8 TxFrameType;	/* Indicate the Transmission type of the all frames in one batch */
-	u8 TotalFrameNum;	/* Total frame number want to send-out in one batch */
+	u8 TotalFrameNum;	/* Total frame number that wants to send-out in one batch */
 	u16 TotalFragNum;	/* Total frame fragments required in one batch */
-	u16 TotalFrameLen;	/* Total length of all frames want to send-out in one batch */
+	u16 TotalFrameLen;	/* Total length of all frames that wants to send-out in one batch */
 
 	struct rt_queue_header TxPacketList;
 	struct rt_mac_table_entry *pMacEntry;	/* NULL: packet with 802.11 RA field is multicast/broadcast address */
@@ -2207,7 +2208,7 @@
 	/* Following structure used for the characteristics of a specific packet. */
 	void *pPacket;
 	u8 *pSrcBufHeader;	/* Reference to the head of sk_buff->data */
-	u8 *pSrcBufData;	/* Reference to the sk_buff->data, will changed depends on hanlding progresss */
+	u8 *pSrcBufData;	/* Reference to the sk_buff->data, will change depending on the handling progresss */
 	u32 SrcBufLen;		/* Length of packet payload which not including Layer 2 header */
 	u8 *pExtraLlcSnapEncap;	/* NULL means no extra LLC/SNAP is required */
 	u8 HeaderBuf[128];	/* TempBuffer for TX_INFO + TX_WI + 802.11 Header + padding + AMSDU SubHeader + LLC/SNAP */
@@ -2219,7 +2220,7 @@
 	u8 apidx;		/* The interface associated to this packet */
 	u8 Wcid;		/* The MAC entry associated to this packet */
 	u8 UserPriority;	/* priority class of packet */
-	u8 FrameGap;		/* what kind of IFS this packet use */
+	u8 FrameGap;		/* what kind of IFS does this packet use */
 	u8 MpduReqNum;	/* number of fragments of this frame */
 	u8 TxRate;		/* TODO: Obsoleted? Should change to MCS? */
 	u8 CipherAlg;	/* cipher alogrithm */
@@ -2978,7 +2979,7 @@
 
 void IterateOnBssTab(struct rt_rtmp_adapter *pAd);
 
-void IterateOnBssTab2(struct rt_rtmp_adapter *pAd);;
+void IterateOnBssTab2(struct rt_rtmp_adapter *pAd);
 
 void JoinParmFill(struct rt_rtmp_adapter *pAd,
 		  struct rt_mlme_join_req *JoinReq, unsigned long BssIdx);
diff --git a/drivers/staging/rt2860/rtmp_def.h b/drivers/staging/rt2860/rtmp_def.h
index 9c54bac..6ac617e 100644
--- a/drivers/staging/rt2860/rtmp_def.h
+++ b/drivers/staging/rt2860/rtmp_def.h
@@ -31,10 +31,11 @@
     Miniport related definition header
 
     Revision History:
-    Who         When          What
+    Who        	 	When          	What
     --------    ----------    ----------------------------------------------
-    Paul Lin    08-01-2002    created
-    John Chang  08-05-2003    add definition for 11g & other drafts
+    Paul Lin    	08-01-2002    	created
+    John Chang  	08-05-2003    	add definition for 11g & other drafts
+    Justin P. Mattock	11/07/2010	Fix some typos
 */
 #ifndef __RTMP_DEF_H__
 #define __RTMP_DEF_H__
@@ -111,11 +112,11 @@
 	WMM Note: If memory of your system is not much, please reduce the definition;
 	or when you do WMM test, the queue for low priority AC will be full, i.e.
 	TX_RING_SIZE + MAX_PACKETS_IN_QUEUE packets for the AC will be buffered in
-	WLAN, maybe no any packet buffer can be got in Ethernet driver.
+	WLAN, maybe no packet buffers can get into the Ethernet driver.
 
-	Sometimes no packet buffer can be got in Ethernet driver, the system will
+	Sometimes no packet buffer can be get into the Ethernet driver, the system will
 	send flow control packet to the sender to slow down its sending rate.
-	So no WMM can be saw in the air.
+	So no WMM can be seen in the air.
 */
 
 /*
@@ -125,7 +126,7 @@
 	And in rt_main_end.c, clConfig.clNum = RX_RING_SIZE * 3; is changed to
 	clConfig.clNum = RX_RING_SIZE * 4;
 */
-/* TODO: For VxWorks the size is 256. Shall we cahnge the value as 256 for all OS????? */
+/* TODO: For VxWorks the size is 256. Shall we change the value as 256 for all OS? */
 #define MAX_PACKETS_IN_QUEUE				(512)	/*(512)    // to pass WMM A5-WPAPSK */
 
 #define MAX_PACKETS_IN_MCAST_PS_QUEUE		32
@@ -171,7 +172,7 @@
 #define fRTMP_ADAPTER_SCAN_2040 			0x04000000
 #define	fRTMP_ADAPTER_RADIO_MEASUREMENT		0x08000000
 
-#define fRTMP_ADAPTER_START_UP         		0x10000000	/*Devive already initialized and enabled Tx/Rx. */
+#define fRTMP_ADAPTER_START_UP         		0x10000000	/*Device already initialized and enabled Tx/Rx. */
 #define fRTMP_ADAPTER_MEDIA_STATE_CHANGE    0x20000000
 #define fRTMP_ADAPTER_IDLE_RADIO_OFF        0x40000000
 
@@ -205,8 +206,8 @@
 #define fRTMP_PS_SET_PCI_CLK_OFF_COMMAND          0x00000002
 /* Indicate driver should disable kick off hardware to send packets from now. */
 #define fRTMP_PS_DISABLE_TX         0x00000004
-/* Indicate driver should IMMEDIATELY fo to sleep after receiving AP's beacon in which  doesn't indicate unicate nor multicast packets for me */
-/*. This flag is used ONLY in RTMPHandleRxDoneInterrupt routine. */
+/* Indicate driver should IMMEDIATELY go to sleep after receiving AP's beacon in which doesn't indicate unicate nor multicast packets for me */
+/* This flag is used ONLY in RTMPHandleRxDoneInterrupt routine. */
 #define fRTMP_PS_GO_TO_SLEEP_NOW         0x00000008
 #define fRTMP_PS_TOGGLE_L1		0x00000010	/* Use Toggle L1 mechanism for rt28xx PCIe */
 
@@ -303,7 +304,7 @@
 
 /* WDS definition */
 #define	MAX_WDS_ENTRY               4
-#define WDS_PAIRWISE_KEY_OFFSET     60	/* WDS links uses pairwise key#60 ~ 63 in ASIC pairwise key table */
+#define WDS_PAIRWISE_KEY_OFFSET     60	/* WDS links use pairwise key#60 ~ 63 in ASIC pairwise key table */
 
 #define	WDS_DISABLE_MODE            0
 #define	WDS_RESTRICT_MODE           1
@@ -559,7 +560,7 @@
 #define IE_ADD_HT2                        53	/* 802.11n d1. ADDITIONAL HT CAPABILITY. ELEMENT ID TBD */
 
 /* For 802.11n D3.03 */
-/*#define IE_NEW_EXT_CHA_OFFSET             62    // 802.11n d1. New extension channel offset elemet */
+/*#define IE_NEW_EXT_CHA_OFFSET             62    // 802.11n d1. New extension channel offset element */
 #define IE_SECONDARY_CH_OFFSET		62	/* 802.11n D3.03        Secondary Channel Offset element */
 #define IE_WAPI							68	/* WAPI information element */
 #define IE_2040_BSS_COEXIST               72	/* 802.11n D3.0.3 */
@@ -678,7 +679,7 @@
 
 #define ACT_MACHINE_BASE              0
 
-/*Those PEER_xx_CATE number is based on real Categary value in IEEE spec. Please don'es modify it by your self. */
+/*Those PEER_xx_CATE number is based on real Categary value in IEEE spec. Please do not modify it by your self. */
 /*Category */
 #define MT2_PEER_SPECTRUM_CATE              0
 #define MT2_PEER_QOS_CATE              1
@@ -748,7 +749,7 @@
 
 #define ACT_FUNC_SIZE                 (MAX_ACT_STATE * MAX_ACT_MSG)
 /* */
-/* STA's AUTHENTICATION state machine: states, evvents, total function # */
+/* STA's AUTHENTICATION state machine: states, events, total function # */
 /* */
 #define AUTH_REQ_IDLE                   0
 #define AUTH_WAIT_SEQ2                  1
@@ -948,7 +949,7 @@
 #define BLOCK_ACK                   0x60	/* b6:5 = 11 */
 
 /* */
-/* rtmp_data.c use these definition */
+/* rtmp_data.c uses this definition */
 /* */
 #define LENGTH_802_11               24
 #define LENGTH_802_11_AND_H         30
@@ -1288,7 +1289,7 @@
 #define IW_STA_LINKDOWN_EVENT_FLAG					0x0210
 #define IW_SCAN_COMPLETED_EVENT_FLAG				0x0211
 #define IW_SCAN_ENQUEUE_FAIL_EVENT_FLAG				0x0212
-/* if add new system event flag, please upadte the IW_SYS_EVENT_FLAG_END */
+/* if add new system event flag, please update the IW_SYS_EVENT_FLAG_END */
 #define	IW_SYS_EVENT_FLAG_END                       0x0212
 #define	IW_SYS_EVENT_TYPE_NUM						(IW_SYS_EVENT_FLAG_END - IW_SYS_EVENT_FLAG_START + 1)
 /* For system event - end */
@@ -1305,7 +1306,7 @@
 #define IW_SPOOF_DEAUTH_EVENT_FLAG					0x0307
 #define IW_SPOOF_UNKNOWN_MGMT_EVENT_FLAG			0x0308
 #define IW_REPLAY_ATTACK_EVENT_FLAG					0x0309
-/* if add new spoof attack event flag, please upadte the IW_SPOOF_EVENT_FLAG_END */
+/* if add new spoof attack event flag, please update the IW_SPOOF_EVENT_FLAG_END */
 #define	IW_SPOOF_EVENT_FLAG_END                     0x0309
 #define	IW_SPOOF_EVENT_TYPE_NUM						(IW_SPOOF_EVENT_FLAG_END - IW_SPOOF_EVENT_FLAG_START + 1)
 /* For spoof attack event - end */
@@ -1319,7 +1320,7 @@
 #define IW_FLOOD_DISASSOC_EVENT_FLAG				0x0404
 #define IW_FLOOD_DEAUTH_EVENT_FLAG					0x0405
 #define IW_FLOOD_EAP_REQ_EVENT_FLAG					0x0406
-/* if add new flooding attack event flag, please upadte the IW_FLOOD_EVENT_FLAG_END */
+/* if add new flooding attack event flag, please update the IW_FLOOD_EVENT_FLAG_END */
 #define	IW_FLOOD_EVENT_FLAG_END                   	0x0406
 #define	IW_FLOOD_EVENT_TYPE_NUM						(IW_FLOOD_EVENT_FLAG_END - IW_FLOOD_EVENT_FLAG_START + 1)
 /* For flooding attack - end */
diff --git a/drivers/staging/rt2860/rtmp_timer.h b/drivers/staging/rt2860/rtmp_timer.h
index 28b8ac6..15b6287 100644
--- a/drivers/staging/rt2860/rtmp_timer.h
+++ b/drivers/staging/rt2860/rtmp_timer.h
@@ -28,13 +28,14 @@
 	rtmp_timer.h
 
     Abstract:
-	Ralink Wireless Driver timer related data structures and delcarations
+	Ralink Wireless Driver timer related data structures and declarations 
 
     Revision History:
-	Who           When                What
+	Who          		When                 What
 	--------    ----------      ----------------------------------------------
-	Name          Date                 Modification logs
-	Shiang Tu    Aug-28-2008	init version
+	Name          		Date                 Modification logs
+	Shiang Tu    		Aug-28-2008 	     init version
+	Justin P. Mattock	11/07/2010	     Fix a typo
 
 */
 
@@ -51,8 +52,8 @@
 
 /* ----------------- Timer Related MARCO ---------------*/
 /* In some os or chipset, we have a lot of timer functions and will read/write register, */
-/*   it's not allowed in Linux USB sub-system to do it ( because of sleep issue when */
-/*  submit to ctrl pipe). So we need a wrapper function to take care it. */
+/* it's not allowed in Linux USB sub-system to do it ( because of sleep issue when */
+/* submit to ctrl pipe). So we need a wrapper function to take care it. */
 
 #ifdef RTMP_TIMER_TASK_SUPPORT
 typedef void(*RTMP_TIMER_TASK_HANDLE) (void *SystemSpecific1,
diff --git a/drivers/staging/rt2860/spectrum.h b/drivers/staging/rt2860/spectrum.h
index 648fd63..4c325ba 100644
--- a/drivers/staging/rt2860/spectrum.h
+++ b/drivers/staging/rt2860/spectrum.h
@@ -37,7 +37,7 @@
 	==========================================================================
 	Description:
 		Prepare Measurement request action frame and enqueue it into
-		management queue waiting for transmition.
+		management queue waiting for transmission.
 
 	Parametrs:
 		1. the destination mac address of the frame.
@@ -60,7 +60,7 @@
 	==========================================================================
 	Description:
 		Prepare Measurement report action frame and enqueue it into
-		management queue waiting for transmition.
+		management queue waiting for transmission.
 
 	Parametrs:
 		1. the destination mac address of the frame.
@@ -80,7 +80,7 @@
 	==========================================================================
 	Description:
 		Prepare TPC Request action frame and enqueue it into
-		management queue waiting for transmition.
+		management queue waiting for transmission.
 
 	Parametrs:
 		1. the destination mac address of the frame.
@@ -94,7 +94,7 @@
 	==========================================================================
 	Description:
 		Prepare TPC Report action frame and enqueue it into
-		management queue waiting for transmition.
+		management queue waiting for transmission.
 
 	Parametrs:
 		1. the destination mac address of the frame.
@@ -110,7 +110,7 @@
 	==========================================================================
 	Description:
 		Prepare Channel Switch Announcement action frame and enqueue it into
-		management queue waiting for transmition.
+		management queue waiting for transmission.
 
 	Parametrs:
 		1. the destination mac address of the frame.
@@ -126,7 +126,7 @@
 /*
 	==========================================================================
 	Description:
-		Spectrun action frames Handler such as channel switch annoucement,
+		Spectrun action frames Handler such as channel switch announcement,
 		measurement report, measurement request actions frames.
 
 	Parametrs:
diff --git a/drivers/staging/rt2860/sta/assoc.c b/drivers/staging/rt2860/sta/assoc.c
index b7efb0b..59e931c 100644
--- a/drivers/staging/rt2860/sta/assoc.c
+++ b/drivers/staging/rt2860/sta/assoc.c
@@ -32,7 +32,8 @@
 	Revision History:
 	Who			When			What
 	--------	----------		----------------------------------------------
-	John		2004-9-3		porting from RT2500
+	John			2004-9-3		porting from RT2500
+	Justin P. Mattock	11/07/2010		Fix typos
 */
 #include "../rt_config.h"
 
@@ -277,10 +278,10 @@
 	u16 VarIesOffset;
 	u16 Status;
 
-	/* Block all authentication request durning WPA block period */
+	/* Block all authentication request during WPA block period */
 	if (pAd->StaCfg.bBlockAssoc == TRUE) {
 		DBGPRINT(RT_DEBUG_TRACE,
-			 ("ASSOC - Block Assoc request durning WPA block period!\n"));
+			 ("ASSOC - Block Assoc request during WPA block period!\n"));
 		pAd->Mlme.AssocMachine.CurrState = ASSOC_IDLE;
 		Status = MLME_STATE_MACHINE_REJECT;
 		MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_ASSOC_CONF, 2,
@@ -605,10 +606,10 @@
 	u8 *pOutBuffer = NULL;
 	u16 Status;
 
-	/* Block all authentication request durning WPA block period */
+	/* Block all authentication request during WPA block period */
 	if (pAd->StaCfg.bBlockAssoc == TRUE) {
 		DBGPRINT(RT_DEBUG_TRACE,
-			 ("ASSOC - Block ReAssoc request durning WPA block period!\n"));
+			 ("ASSOC - Block ReAssoc request during WPA block period!\n"));
 		pAd->Mlme.AssocMachine.CurrState = ASSOC_IDLE;
 		Status = MLME_STATE_MACHINE_REJECT;
 		MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_REASSOC_CONF, 2,
@@ -1001,7 +1002,7 @@
 	pAd->MlmeAux.CapabilityInfo =
 	    CapabilityInfo & SUPPORTED_CAPABILITY_INFO;
 
-	/* Some HT AP might lost WMM IE. We add WMM ourselves. beacuase HT requires QoS on. */
+	/* Some HT AP might lost WMM IE. We add WMM ourselves. because HT requires QoS on. */
 	if ((HtCapabilityLen > 0) && (pEdcaParm->bValid == FALSE)) {
 		pEdcaParm->bValid = TRUE;
 		pEdcaParm->Aifsn[0] = 3;
@@ -1054,7 +1055,7 @@
 	/* Set New WPA information */
 	Idx = BssTableSearch(&pAd->ScanTab, pAddr2, pAd->MlmeAux.Channel);
 	if (Idx == BSS_NOT_FOUND) {
-		DBGPRINT_ERR(("ASSOC - Can't find BSS after receiving Assoc response\n"));
+		DBGPRINT_ERR("ASSOC - Can't find BSS after receiving Assoc response\n");
 	} else {
 		/* Init variable */
 		pAd->MacTab.Content[BSSID_WCID].RSNIE_Len = 0;
diff --git a/drivers/staging/rt2860/sta/auth.c b/drivers/staging/rt2860/sta/auth.c
index 404bd22..23ea00b 100644
--- a/drivers/staging/rt2860/sta/auth.c
+++ b/drivers/staging/rt2860/sta/auth.c
@@ -32,7 +32,8 @@
 	Revision History:
 	Who			When			What
 	--------	----------		----------------------------------------------
-	John		2004-9-3		porting from RT2500
+	John			2004-9-3		porting from RT2500
+	Justin P. Mattock	11/07/2010		Fix typos
 */
 #include "../rt_config.h"
 
@@ -455,10 +456,10 @@
 	u8 *pOutBuffer = NULL;
 	unsigned long FrameLen = 0, tmp = 0;
 
-	/* Block all authentication request durning WPA block period */
+	/* Block all authentication request during WPA block period */
 	if (pAd->StaCfg.bBlockAssoc == TRUE) {
 		DBGPRINT(RT_DEBUG_TRACE,
-			 ("%s - Block Auth request durning WPA block period!\n",
+			 ("%s - Block Auth request during WPA block period!\n",
 			  pSMName));
 		pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE;
 		Status = MLME_STATE_MACHINE_REJECT;
@@ -508,8 +509,7 @@
 		RTMPSetTimer(pAuthTimer, Timeout);
 		return TRUE;
 	} else {
-		DBGPRINT_ERR(("%s - MlmeAuthReqAction() sanity check failed\n",
-			      pSMName));
+		DBGPRINT_ERR("%s - MlmeAuthReqAction() sanity check failed\n", pSMName);
 		return FALSE;
 	}
 
diff --git a/drivers/staging/rt2860/sta/connect.c b/drivers/staging/rt2860/sta/connect.c
index c380551..4996258 100644
--- a/drivers/staging/rt2860/sta/connect.c
+++ b/drivers/staging/rt2860/sta/connect.c
@@ -32,7 +32,8 @@
 	Revision History:
 	Who			When			What
 	--------	----------		----------------------------------------------
-	John			2004-08-08			Major modification from RT2560
+	John			2004-08-08		Major modification from RT2560
+	Justin P. Mattock	11/07/2010		Fix typos
 */
 #include "../rt_config.h"
 
@@ -64,7 +65,7 @@
 
 /* The following MACRO is called after 1. starting an new IBSS, 2. successfully JOIN an IBSS, */
 /* or 3. successfully ASSOCIATE to a BSS, 4. successfully RE_ASSOCIATE to a BSS */
-/* All settings successfuly negotiated furing MLME state machines become final settings */
+/* All settings successfuly negotiated firing MLME state machines become final settings */
 /* and are copied to pAd->StaActive */
 #define COPY_SETTINGS_FROM_MLME_AUX_TO_ACTIVE_CFG(_pAd)                                 \
 {                                                                                       \
@@ -214,8 +215,7 @@
 		break;
 #endif /* RTMP_MAC_USB // */
 	default:
-		DBGPRINT_ERR(("ERROR! CNTL - Illegal message type(=%ld)",
-			      Elem->MsgType));
+		DBGPRINT_ERR("ERROR! CNTL - Illegal message type(=%ld)", Elem->MsgType);
 		break;
 	}
 }
@@ -553,7 +553,7 @@
 	NdisMoveMemory(&pAd->MlmeAux.SsidBssTab.BssEntry[0],
 		       &pAd->ScanTab.BssEntry[BssIdx], sizeof(struct rt_bss_entry));
 
-	/* Add SSID into MlmeAux for site surey joining hidden SSID */
+	/* Add SSID into MlmeAux for site survey joining hidden SSID */
 	pAd->MlmeAux.SsidLen = pAd->ScanTab.BssEntry[BssIdx].SsidLen;
 	NdisMoveMemory(pAd->MlmeAux.Ssid, pAd->ScanTab.BssEntry[BssIdx].Ssid,
 		       pAd->MlmeAux.SsidLen);
@@ -666,7 +666,7 @@
 }
 
 /* Roaming is the only external request triggering CNTL state machine */
-/* despite of other "SET OID" operation. All "SET OID" related oerations */
+/* despite of other "SET OID" operation. All "SET OID" related operations */
 /* happen in sequence, because no other SET OID will be sent to this device */
 /* until the the previous SET operation is complete (successful o failed). */
 /* So, how do we quarantee this ROAMING request won't corrupt other "SET OID"? */
@@ -1224,7 +1224,7 @@
 	/* Change to AP channel */
 	if ((pAd->CommonCfg.CentralChannel > pAd->CommonCfg.Channel)
 	    && (pAd->MlmeAux.HtCapability.HtCapInfo.ChannelWidth == BW_40)) {
-		/* Must using 40MHz. */
+		/* Must use 40MHz. */
 		pAd->CommonCfg.BBPCurrentBW = BW_40;
 		AsicSwitchChannel(pAd, pAd->CommonCfg.CentralChannel, FALSE);
 		AsicLockChannel(pAd, pAd->CommonCfg.CentralChannel);
@@ -1259,7 +1259,7 @@
 	} else if ((pAd->CommonCfg.CentralChannel < pAd->CommonCfg.Channel)
 		   && (pAd->MlmeAux.HtCapability.HtCapInfo.ChannelWidth ==
 		       BW_40)) {
-		/* Must using 40MHz. */
+		/* Must use 40MHz. */
 		pAd->CommonCfg.BBPCurrentBW = BW_40;
 		AsicSwitchChannel(pAd, pAd->CommonCfg.CentralChannel, FALSE);
 		AsicLockChannel(pAd, pAd->CommonCfg.CentralChannel);
@@ -1343,12 +1343,12 @@
 	AsicSetSlotTime(pAd, TRUE);
 	AsicSetEdcaParm(pAd, &pAd->CommonCfg.APEdcaParm);
 
-	/* Call this for RTS protectionfor legacy rate, we will always enable RTS threshold, but normally it will not hit */
+	/* Call this for RTS protection for legacy rate, we will always enable RTS threshold, but normally it will not hit */
 	AsicUpdateProtect(pAd, 0, (OFDMSETPROTECT | CCKSETPROTECT), TRUE,
 			  FALSE);
 
 	if ((pAd->StaActive.SupportedPhyInfo.bHtEnable == TRUE)) {
-		/* Update HT protectionfor based on AP's operating mode. */
+		/* Update HT protection for based on AP's operating mode. */
 		if (pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent == 1) {
 			AsicUpdateProtect(pAd,
 					  pAd->MlmeAux.AddHtInfo.AddHtInfo2.
@@ -1530,7 +1530,7 @@
 		/* Add BSSID to WCID search table */
 		AsicUpdateRxWCIDTable(pAd, BSSID_WCID, pAd->CommonCfg.Bssid);
 
-		/* If WEP is enabled, add paiewise and shared key */
+		/* If WEP is enabled, add pairwise and shared key */
 		if (((pAd->StaCfg.WpaSupplicantUP) &&
 		     (pAd->StaCfg.WepStatus == Ndis802_11WEPEnabled) &&
 		     (pAd->StaCfg.PortSecured == WPA_802_1X_PORT_SECURED)) ||
@@ -1681,9 +1681,9 @@
 	pAd->Mlme.PeriodicRound = 0;
 	pAd->Mlme.OneSecPeriodicRound = 0;
 	pAd->bConfigChanged = FALSE;	/* Reset config flag */
-	pAd->ExtraInfo = GENERAL_LINK_UP;	/* Update extra information to link is up */
+	pAd->ExtraInfo = GENERAL_LINK_UP;	/* Update extra information after link is up */
 
-	/* Set asic auto fall back */
+	/* Set basic auto fall back */
 	{
 		u8 *pTable;
 		u8 TableSize = 0;
@@ -1854,8 +1854,8 @@
 	Note:
 		We need more information to know it's this requst from AP.
 		If yes! we need to do extra handling, for example, remove the WPA key.
-		Otherwise on 4-way handshaking will faied, since the WPA key didn't be
-		remove while auto reconnect.
+		Otherwise on 4-way handshaking will fail, since the WPA key didn't get
+		removed while auto reconnect.
 		Disconnect request from AP, it means we will start afresh 4-way handshaking
 		on WPA mode.
 
@@ -1870,9 +1870,9 @@
 		return;
 
 	RTMP_CLEAR_PSFLAG(pAd, fRTMP_PS_GO_TO_SLEEP_NOW);
-	/*Comment the codes, beasue the line 2291 call the same function. */
-	/*RTMPCancelTimer(&pAd->Mlme.PsPollTimer,               &Cancelled); */
-	/* Not allow go to sleep within linkdown function. */
+	/* Comment the codes, because the line 2291 call the same function. */
+	/* RTMPCancelTimer(&pAd->Mlme.PsPollTimer, &Cancelled); */
+	/* Not allowed go to sleep within the linkdown function. */
 	RTMP_CLEAR_PSFLAG(pAd, fRTMP_PS_CAN_GO_SLEEP);
 
 	if (pAd->CommonCfg.bWirelessEvent) {
@@ -1970,7 +1970,7 @@
 	/* Set LED */
 	RTMPSetLED(pAd, LED_LINK_DOWN);
 	pAd->LedIndicatorStrength = 0xF0;
-	RTMPSetSignalLED(pAd, -100);	/* Force signal strength Led to be turned off, firmware is not done it. */
+	RTMPSetSignalLED(pAd, -100);	/* Force signal strength Led to be turned off, firmware has not done it. */
 
 	AsicDisableSync(pAd);
 
diff --git a/drivers/staging/rt2860/sta/rtmp_data.c b/drivers/staging/rt2860/sta/rtmp_data.c
index 23879b7..e82c6b6 100644
--- a/drivers/staging/rt2860/sta/rtmp_data.c
+++ b/drivers/staging/rt2860/sta/rtmp_data.c
@@ -31,7 +31,8 @@
 	Data path subroutines
 
 	Revision History:
-	Who 		When			What
+	Who 	  		When		What
+	Justin P. Mattock	11/07/2010	Fix typos
 	--------	----------		----------------------------------------------
 */
 #include "../rt_config.h"
@@ -257,8 +258,8 @@
 		    && (pAd->CommonCfg.bDisableReordering == 0)) {
 			Indicate_AMPDU_Packet(pAd, pRxBlk, FromWhichBSSID);
 		} else {
-			/* Determin the destination of the EAP frame */
-			/*  to WPA state machine or upper layer */
+			/* Determine the destination of the EAP frame */
+			/* to WPA state machine or upper layer */
 			STARxEAPOLFrameIndicate(pAd, pEntry, pRxBlk,
 						FromWhichBSSID);
 		}
@@ -644,7 +645,7 @@
 
 		/* First check the size, it MUST not exceed the mlme queue size */
 		if (pRxWI->MPDUtotalByteCount > MGMT_DMA_BUFFER_SIZE) {
-			DBGPRINT_ERR(("STAHandleRxMgmtFrame: frame too large, size = %d \n", pRxWI->MPDUtotalByteCount));
+			DBGPRINT_ERR("STAHandleRxMgmtFrame: frame too large, size = %d \n", pRxWI->MPDUtotalByteCount);
 			break;
 		}
 
@@ -853,7 +854,7 @@
 	NONE
 
 Note:
-	This function do early checking and classification for send-out packet.
+	This function does early checking and classification for send-out packet.
 	You only can put OS-depened & STA related code in here.
 ========================================================================
 */
@@ -943,7 +944,7 @@
 		DBGPRINT(RT_DEBUG_ERROR,
 			 ("STASendPacket --> pSrcBufVA == NULL !SrcBufLen=%x\n",
 			  SrcBufLen));
-		/* Resourece is low, system did not allocate virtual address */
+		/* Resource is low, system did not allocate virtual address */
 		/* return NDIS_STATUS_FAILURE directly to upper layer */
 		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
 		return NDIS_STATUS_FAILURE;
@@ -979,7 +980,7 @@
 		DBGPRINT(RT_DEBUG_ERROR,
 			("STASendPacket->Cannot find pEntry(%pM) in MacTab!\n",
 				pSrcBufVA));
-		/* Resourece is low, system did not allocate virtual address */
+		/* Resource is low, system did not allocate virtual address */
 		/* return NDIS_STATUS_FAILURE directly to upper layer */
 		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
 		return NDIS_STATUS_FAILURE;
@@ -1057,9 +1058,9 @@
 
 	/* STEP 2. Check the requirement of RTS: */
 	/*         If multiple fragment required, RTS is required only for the first fragment */
-	/*         if the fragment size large than RTS threshold */
+	/*         if the fragment size is larger than RTS threshold */
 	/*     For RT28xx, Let ASIC send RTS/CTS */
-/*      RTMP_SET_PACKET_RTS(pPacket, 0); */
+	/*      RTMP_SET_PACKET_RTS(pPacket, 0); */
 	if (NumberOfFrag > 1)
 		RTSRequired =
 		    (pAd->CommonCfg.FragmentThreshold >
@@ -1171,8 +1172,8 @@
 	========================================================================
 
 	Routine Description:
-		This subroutine will scan through releative ring descriptor to find
-		out avaliable free ring descriptor and compare with request size.
+		This subroutine will scan through relative ring descriptor to find
+		out available free ring descriptor and compare with request size.
 
 	Arguments:
 		pAd Pointer to our adapter
@@ -1588,7 +1589,7 @@
 		pHeaderBufPtr += 2;
 		pTxBlk->MpduHeaderLen += 2;
 	}
-	/* padding at front of LLC header. LLC header should at 4-bytes aligment. */
+	/* padding at front of LLC header. LLC header should at 4-bytes alignment. */
 	pTxBlk->HdrPadLen = (unsigned long)pHeaderBufPtr;
 	pHeaderBufPtr = (u8 *)ROUND_UP(pHeaderBufPtr, 4);
 	pTxBlk->HdrPadLen = (unsigned long)(pHeaderBufPtr - pTxBlk->HdrPadLen);
@@ -2014,7 +2015,7 @@
 		pHeaderBufPtr += 2;
 		pTxBlk->MpduHeaderLen += 2;
 	}
-	/* The remaining content of MPDU header should locate at 4-octets aligment */
+	/* The remaining content of MPDU header should locate at 4-octets alignment */
 	pTxBlk->HdrPadLen = (unsigned long)pHeaderBufPtr;
 	pHeaderBufPtr = (u8 *)ROUND_UP(pHeaderBufPtr, 4);
 	pTxBlk->HdrPadLen = (unsigned long)(pHeaderBufPtr - pTxBlk->HdrPadLen);
@@ -2114,7 +2115,7 @@
 			    STA_Build_ARalink_Frame_Header(pAd, pTxBlk);
 
 			/* It's ok write the TxWI here, because the TxWI->MPDUtotalByteCount */
-			/*      will be updated after final frame was handled. */
+			/* will be updated after final frame was handled. */
 			RTMPWriteTxWI_Data(pAd,
 					   (struct rt_txwi *) (&pTxBlk->
 							  HeaderBuf
@@ -2291,8 +2292,8 @@
 				      pTxBlk->pExtraLlcSnapEncap, pTxBlk->pKey,
 				      0);
 
-		/* NOTE: DON'T refer the skb->len directly after following copy. Becasue the length is not adjust */
-		/*                      to correct lenght, refer to pTxBlk->SrcBufLen for the packet length in following progress. */
+		/* NOTE: DON'T refer the skb->len directly after following copy. Because the length is not adjusted */
+		/*                      to correct length, refer to pTxBlk->SrcBufLen for the packet length in following progress. */
 		NdisMoveMemory(pTxBlk->pSrcBufData + pTxBlk->SrcBufLen,
 			       &pAd->PrivateInfo.Tx.MIC[0], 8);
 		/*skb_put((RTPKT_TO_OSPKT(pTxBlk->pPacket))->tail, 8); */
@@ -2301,7 +2302,7 @@
 		pTxBlk->CipherAlg = CIPHER_TKIP_NO_MIC;
 	}
 	/* */
-	/* calcuate the overhead bytes that encryption algorithm may add. This */
+	/* calculate the overhead bytes that encryption algorithm may add. This */
 	/* affects the calculate of "duration" field */
 	/* */
 	if ((pTxBlk->CipherAlg == CIPHER_WEP64)
diff --git a/drivers/staging/rt2860/sta/sanity.c b/drivers/staging/rt2860/sta/sanity.c
index 8f9fd19..0c32604 100644
--- a/drivers/staging/rt2860/sta/sanity.c
+++ b/drivers/staging/rt2860/sta/sanity.c
@@ -32,7 +32,8 @@
 	Revision History:
 	Who			When			What
 	--------	----------		----------------------------------------------
-	John Chang  2004-09-01      add WMM support
+	John Chang  		2004-09-01      add WMM support
+	Justin P. Mattock	11/07/2010	Fix typos
 */
 #include "../rt_config.h"
 
@@ -118,7 +119,7 @@
 	NdisMoveMemory(pAid, &pFrame->Octet[4], 2);
 	Length += 2;
 
-	/* Aid already swaped byte order in RTMPFrameEndianChange() for big endian platform */
+	/* Aid already swapped byte order in RTMPFrameEndianChange() for big endian platform */
 	*pAid = (*pAid) & 0x3fff;	/* AID is low 14-bit */
 
 	/* -- get supported rates from payload and advance the pointer */
diff --git a/drivers/staging/rt2860/sta/sync.c b/drivers/staging/rt2860/sta/sync.c
index 747d3c6..7054ba1 100644
--- a/drivers/staging/rt2860/sta/sync.c
+++ b/drivers/staging/rt2860/sta/sync.c
@@ -32,8 +32,9 @@
 	Revision History:
 	Who			When			What
 	--------	----------		----------------------------------------------
-	John Chang	2004-09-01      modified for rt2561/2661
-	Jan Lee		2006-08-01      modified for rt2860 for 802.11n
+	John Chang		2004-09-01      	modified for rt2561/2661
+	Jan Lee			2006-08-01      	modified for rt2860 for 802.11n
+	Justin P. Mattock	11/07/2010		Fix typos
 */
 #include "../rt_config.h"
 
@@ -233,9 +234,9 @@
 		RTMPSuspendMsduTransmission(pAd);
 
 		/* */
-		/* To prevent data lost. */
-		/* Send an NULL data with turned PSM bit on to current associated AP before SCAN progress. */
-		/* And should send an NULL data with turned PSM bit off to AP, when scan progress done */
+		/* To prevent data loss. */
+		/* Send a NULL data with turned PSM bit on to current associated AP before SCAN progress. */
+		/* And should send a NULL data with turned PSM bit off to AP, when scan progress done */
 		/* */
 		if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED)
 		    && (INFRA_ON(pAd))) {
@@ -283,7 +284,7 @@
 		DBGPRINT(RT_DEBUG_TRACE, ("SYNC - BBP R4 to 20MHz.l\n"));
 		ScanNextChannel(pAd);
 	} else {
-		DBGPRINT_ERR(("SYNC - MlmeScanReqAction() sanity check fail\n"));
+		DBGPRINT_ERR("SYNC - MlmeScanReqAction() sanity check fail\n");
 		pAd->Mlme.SyncMachine.CurrState = SYNC_IDLE;
 		Status = MLME_INVALID_FORMAT;
 		MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_SCAN_CONF, 2,
@@ -535,7 +536,7 @@
 		MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_START_CONF, 2,
 			    &Status);
 	} else {
-		DBGPRINT_ERR(("SYNC - MlmeStartReqAction() sanity check fail.\n"));
+		DBGPRINT_ERR("SYNC - MlmeStartReqAction() sanity check fail.\n");
 		pAd->Mlme.SyncMachine.CurrState = SYNC_IDLE;
 		Status = MLME_INVALID_FORMAT;
 		MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_START_CONF, 2,
@@ -750,9 +751,9 @@
 
 		/* BEACON from desired BSS/IBSS found. We should be able to decide most */
 		/* BSS parameters here. */
-		/* Q. But what happen if this JOIN doesn't conclude a successful ASSOCIATEION? */
-		/*    Do we need to receover back all parameters belonging to previous BSS? */
-		/* A. Should be not. There's no back-door recover to previous AP. It still need */
+		/* Q. But what happen if this JOIN doesn't conclude a successful ASSOCIATION? */
+		/*    Do we need to recover back all parameters belonging to previous BSS? */
+		/* A. Should be not. There's no back-door recover to previous AP. It still needs */
 		/*    a new JOIN-AUTH-ASSOC sequence. */
 		if (MAC_ADDR_EQUAL(pAd->MlmeAux.Bssid, Bssid)) {
 			DBGPRINT(RT_DEBUG_TRACE,
@@ -876,7 +877,7 @@
 			pAd->MlmeAux.CfpMaxDuration = Cf.CfpMaxDuration;
 			pAd->MlmeAux.APRalinkIe = RalinkIe;
 
-			/* Copy AP's supported rate to MlmeAux for creating assoication request */
+			/* Copy AP's supported rate to MlmeAux for creating association request */
 			/* Also filter out not supported rate */
 			pAd->MlmeAux.SupRateLen = SupRateLen;
 			NdisMoveMemory(pAd->MlmeAux.SupRate, SupRate,
@@ -1207,7 +1208,7 @@
 			}
 
 			if (index >= pAd->ChannelListNum) {
-				DBGPRINT_ERR(("PeerBeacon(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum));
+				DBGPRINT_ERR("PeerBeacon(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum);
 			}
 		}
 		/* if the ssid matched & bssid unmatched, we should select the bssid with large value. */
diff --git a/drivers/staging/rt2860/sta/wpa.c b/drivers/staging/rt2860/sta/wpa.c
index 69b8a24..ff34832 100644
--- a/drivers/staging/rt2860/sta/wpa.c
+++ b/drivers/staging/rt2860/sta/wpa.c
@@ -33,7 +33,8 @@
 	Who			When			What
 	--------	----------		----------------------------------------------
 	Jan	Lee		03-07-22		Initial
-	Paul Lin	03-11-28		Modify for supplicant
+	Paul Lin		03-11-28		Modify for supplicant
+	Justin P. Mattock	11/07/2010		Fix typos
 */
 #include "../rt_config.h"
 
@@ -86,7 +87,7 @@
 			/* Violate MIC error counts, MIC countermeasures kicks in */
 			pAd->StaCfg.MicErrCnt++;
 			/* We shall block all reception */
-			/* We shall clean all Tx ring and disassoicate from AP after next EAPOL frame */
+			/* We shall clean all Tx ring and disassociate from AP after next EAPOL frame */
 			/* */
 			/* No necessary to clean all Tx ring, on RTMPHardTransmit will stop sending non-802.1X EAPOL packets */
 			/* if pAd->StaCfg.MicErrCnt greater than 2. */
diff --git a/drivers/staging/rt2860/sta_ioctl.c b/drivers/staging/rt2860/sta_ioctl.c
index e095a44..5717e12 100644
--- a/drivers/staging/rt2860/sta_ioctl.c
+++ b/drivers/staging/rt2860/sta_ioctl.c
@@ -31,10 +31,11 @@
     IOCTL related subroutines
 
     Revision History:
-    Who         When          What
+    	Who        		 When          What
     --------    ----------    ----------------------------------------------
-    Rory Chen   01-03-2003    created
-	Rory Chen   02-14-2005    modify to support RT61
+   	Rory Chen   		01-03-2003    	created
+	Rory Chen   		02-14-2005    	modify to support RT61
+	Justin P. Mattock	11/07/2010	Fix typos
 */
 
 #include	"rt_config.h"
@@ -851,7 +852,7 @@
 
 		/*
 		   Protocol:
-		   it will show scanned AP's WirelessMode .
+		   it will show scanned AP's WirelessMode.
 		   it might be
 		   802.11a
 		   802.11a/n
@@ -875,13 +876,13 @@
 					strcpy(iwe.u.name, "802.11a");
 			} else {
 				/*
-				   if one of non B mode rate is set supported rate . it mean G only.
+				   if one of non B mode rate is set supported rate, it means G only.
 				 */
 				for (rateCnt = 0;
 				     rateCnt < pBssEntry->SupRateLen;
 				     rateCnt++) {
 					/*
-					   6Mbps(140) 9Mbps(146) and >=12Mbps(152) are supported rate , it mean G only.
+					   6Mbps(140) 9Mbps(146) and >=12Mbps(152) are supported rate, it means G only.
 					 */
 					if (pBssEntry->SupRate[rateCnt] == 140
 					    || pBssEntry->SupRate[rateCnt] ==
@@ -1417,7 +1418,7 @@
 		if ((index >= 0) && (index < 4)) {
 			pAdapter->StaCfg.DefaultKeyId = index;
 		} else
-			/* Don't complain if only change the mode */
+			/* Don't complain if the mode is only changed */
 		if (!(erq->flags & IW_ENCODE_MODE))
 			return -EINVAL;
 	}
@@ -2732,8 +2733,8 @@
 			}
 			if (INFRA_ON(pAdapter)) {
 				/*BOOLEAN Cancelled; */
-				/* Set the AutoReconnectSsid to prevent it reconnect to old SSID */
-				/* Since calling this indicate user don't want to connect to that SSID anymore. */
+				/* Set the AutoReconnectSsid to prevent it from reconnecting to the old SSID */
+				/* Since calling this indicates users don't want to connect to that SSID anymore. */
 				pAdapter->MlmeAux.AutoReconnectSsidLen = 32;
 				NdisZeroMemory(pAdapter->MlmeAux.
 					       AutoReconnectSsid,
@@ -2766,8 +2767,8 @@
 				LinkDown(pAdapter, FALSE);
 			}
 			if (ADHOC_ON(pAdapter)) {
-				/* Set the AutoReconnectSsid to prevent it reconnect to old SSID */
-				/* Since calling this indicate user don't want to connect to that SSID anymore. */
+				/* Set the AutoReconnectSsid to prevent it from reconnecting to the old SSID */
+				/* Since calling this indicates users don't want to connect to that SSID anymore. */
 				pAdapter->MlmeAux.AutoReconnectSsidLen = 32;
 				NdisZeroMemory(pAdapter->MlmeAux.
 					       AutoReconnectSsid,
@@ -2884,7 +2885,7 @@
 		}
 		/* Enable Rx with promiscuous reception */
 		RTMP_IO_WRITE32(pAdapter, RX_FILTR_CFG, 0x3);
-		/* ASIC supporsts sniffer function with replacing RSSI with timestamp. */
+		/* ASIC supports sniffer function with replacing RSSI with timestamp. */
 		/*RTMP_IO_READ32(pAdapter, MAC_SYS_CTRL, &Value); */
 		/*Value |= (0x80); */
 		/*RTMP_IO_WRITE32(pAdapter, MAC_SYS_CTRL, Value); */
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index cd15daa..ee68d51 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -27,8 +27,8 @@
 #include "rt_config.h"
 
 /* Following information will be show when you run 'modinfo' */
-/* *** If you have a solution for the bug in current version of driver, please mail to me. */
-/* Otherwise post to forum in ralinktech's web site(www.ralinktech.com) and let all users help you. *** */
+/* If you have a solution for the bug in current version of driver, please e-mail me. */
+/* Otherwise post to the forum at ralinktech's web site(www.ralinktech.com) and let all users help you. */
 MODULE_AUTHOR("Paul Lin <paul_lin@ralinktech.com>");
 MODULE_DESCRIPTION("RT2870/RT3070 Wireless Lan Linux Driver");
 MODULE_LICENSE("GPL");
@@ -233,7 +233,7 @@
 	for (i = 0; i < rtusb_usb_id_len; i++) {
 		if (dev_p->descriptor.idVendor == rtusb_usb_id[i].idVendor &&
 		    dev_p->descriptor.idProduct == rtusb_usb_id[i].idProduct) {
-			printk("rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
+			printk(KERN_INFO "rt2870: idVendor = 0x%x, idProduct = 0x%x\n",
 			       dev_p->descriptor.idVendor,
 			       dev_p->descriptor.idProduct);
 			break;
@@ -241,7 +241,7 @@
 	}
 
 	if (i == rtusb_usb_id_len) {
-		printk("rt2870: Error! Device Descriptor not matching!\n");
+		printk(KERN_ERR "rt2870: Error! Device Descriptor not matching!\n");
 		return FALSE;
 	}
 
@@ -323,7 +323,7 @@
 
 	if (!(pAd->BulkInEpAddr && pAd->BulkOutEpAddr[0])) {
 		printk
-		    ("%s: Could not find both bulk-in and bulk-out endpoints\n",
+		    (KERN_ERR "%s: Could not find both bulk-in and bulk-out endpoints\n",
 		     __FUNCTION__);
 		return FALSE;
 	}
@@ -423,7 +423,7 @@
 /* Init driver module */
 int __init rtusb_init(void)
 {
-	printk("rtusb init --->\n");
+	printk(KERN_DEBUG "rtusb init --->\n");
 	return usb_register(&rtusb_driver);
 }
 
@@ -431,7 +431,7 @@
 void __exit rtusb_exit(void)
 {
 	usb_deregister(&rtusb_driver);
-	printk("<--- rtusb exit\n");
+	printk(KERN_DEBUG "<--- rtusb exit\n");
 }
 
 module_init(rtusb_init);
@@ -814,7 +814,7 @@
 		  dev->bus->bus_name, dev->devpath));
 	if (!pAd) {
 		usb_put_dev(dev);
-		printk("rtusb_disconnect: pAd == NULL!\n");
+		printk(KERN_ERR "rtusb_disconnect: pAd == NULL!\n");
 		return;
 	}
 	RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST);
@@ -884,8 +884,8 @@
 	if (net_dev == NULL)
 		goto err_out_free_radev;
 
-	/* Here are the net_device structure with usb specific parameters. */
-	/* for supporting Network Manager.
+	/* Here are the net_device structure with usb specific parameters. 
+	 * for supporting Network Manager.
 	 * Set the sysfs physical device reference for the network logical device if set prior to registration will
 	 * cause a symlink during initialization.
 	 */
diff --git a/drivers/staging/rt2860/wpa.h b/drivers/staging/rt2860/wpa.h
index 6199ae6..116fc2c 100644
--- a/drivers/staging/rt2860/wpa.h
+++ b/drivers/staging/rt2860/wpa.h
@@ -32,13 +32,14 @@
 	Revision History:
 	Who			When			What
 	--------	----------		----------------------------------------------
-	Name		Date			Modification logs
+	Name			Date			Modification logs
+	Justin P. Mattock	11/07/2010		Fix a typo
 */
 
 #ifndef	__WPA_H__
 #define	__WPA_H__
 
-/* EAPOL Key descripter frame format related length */
+/* EAPOL Key descriptor frame format related length */
 #define LEN_KEY_DESC_NONCE			32
 #define LEN_KEY_DESC_IV				16
 #define LEN_KEY_DESC_RSC			8
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
index dd8a221..b26b5a8 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
@@ -822,7 +822,7 @@
 		{
 			txb->queue_index = UP2AC(skb->priority);
 		} else {
-			txb->queue_index = WME_AC_BK;;
+			txb->queue_index = WME_AC_BK;
 		}
 
 
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index b1786dc..fac4eee 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -89,7 +89,7 @@
                         // 	COMP_INTR       |
 				COMP_ERR ; //always open err flags on
 
-static const struct pci_device_id rtl8192_pci_id_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8192_pci_id_tbl) = {
 #ifdef RTL8190P
 	/* Realtek */
 	/* Dlink */
@@ -2283,9 +2283,7 @@
 				IMR_TXFOVW | IMR_BcnInt | IMR_TBDOK | IMR_TBDER);
 
 	priv->AcmControl = 0;
-	priv->pFirmware = (rt_firmware*)vmalloc(sizeof(rt_firmware));
-	if (priv->pFirmware)
-	memset(priv->pFirmware, 0, sizeof(rt_firmware));
+	priv->pFirmware = vzalloc(sizeof(rt_firmware));
 
 	/* rx related queue */
         skb_queue_head_init(&priv->rx_queue);
diff --git a/drivers/staging/rtl8192e/r819xE_phy.c b/drivers/staging/rtl8192e/r819xE_phy.c
index d83bcbc..50cd0e5 100644
--- a/drivers/staging/rtl8192e/r819xE_phy.c
+++ b/drivers/staging/rtl8192e/r819xE_phy.c
@@ -2596,7 +2596,7 @@
 			break;
 	}
 
-	return ret;;
+	return ret;
 
 }
 /******************************************************************************
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index 206d924..eefc657 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -9,7 +9,6 @@
 #ccflags-y += -DUSB_TX_DRIVER_AGGREGATION_ENABLE
 #ccflags-y += -DUSB_RX_AGGREGATION_SUPPORT
 ccflags-y += -DUSE_ONE_PIPE
-ccflags-y += -DENABLE_DOT11D
 ccflags-y += -Idrivers/staging/rtl8192u/ieee80211
 
 r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o		\
diff --git a/drivers/staging/rtl8192u/dot11d.h b/drivers/staging/rtl8192u/dot11d.h
index d99cc03..92e7a00 100644
--- a/drivers/staging/rtl8192u/dot11d.h
+++ b/drivers/staging/rtl8192u/dot11d.h
@@ -1,7 +1,6 @@
 #ifndef __INC_DOT11D_H
 #define __INC_DOT11D_H
 
-#ifdef ENABLE_DOT11D
 #include "ieee80211/ieee80211.h"
 
 
@@ -98,5 +97,4 @@
 	struct ieee80211_device *dev,
 	u8 channel
 );
-#endif /* ENABLE_DOT11D */
 #endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
index 45704f8..0775c55 100644
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ b/drivers/staging/rtl8192u/ieee80211/Makefile
@@ -20,7 +20,6 @@
 endif
 #ccflags-y := -DJOHN_NOCPY
 #flags to enable or disble 80211D feature
-ccflags-y += -DENABLE_DOT11D
 ieee80211-rsl-objs := ieee80211_rx.o \
 		      ieee80211_softmac.o \
 		      ieee80211_tx.o \
@@ -75,7 +74,6 @@
 CFLAGS += -DMODVERSIONS -DEXPORT_SYMTAB -include $(KSRC)/include/linux/modversions.h
 #Kernel 2.4.20
 #CFLAGS += -D__NO_VERSION__ -DEXPORT_SYMTAB
-#CFLAGS += -DENABLE_DOT11D
 SMP := $(shell $(CC) $(MODCFLAGS) -E -dM $(CONFIG_FILE) | \
    grep CONFIG_SMP | awk '{print $$3}')
 ifneq ($(SMP),1)
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index b91cbfc..ce63fc3 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -1,4 +1,3 @@
-#ifdef ENABLE_DOT11D
 //-----------------------------------------------------------------------------
 //	File:
 //		Dot11d.c
@@ -220,4 +219,3 @@
 EXPORT_SYMBOL(IsLegalChannel);
 EXPORT_SYMBOL(ToLegalChannel);
 
-#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.h b/drivers/staging/rtl8192u/ieee80211/dot11d.h
index 15b7a4b..54f2b4c 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.h
@@ -1,10 +1,8 @@
 #ifndef __INC_DOT11D_H
 #define __INC_DOT11D_H
 
-#ifdef ENABLE_DOT11D
 #include "ieee80211.h"
 
-//#define ENABLE_DOT11D
 
 //#define DOT11D_MAX_CHNL_NUM 83
 
@@ -98,5 +96,4 @@
 	struct ieee80211_device * dev,
 	u8 channel
 );
-#endif //ENABLE_DOT11D
 #endif // #ifndef __INC_DOT11D_H
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index e1216b7..c0b844d 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1572,10 +1572,8 @@
 #ifdef THOMAS_TURBO
 	u8 Turbo_Enable;//enable turbo mode, added by thomas
 #endif
-#ifdef ENABLE_DOT11D
 	u16 CountryIeLen;
 	u8 CountryIeBuf[MAX_IE_LEN];
-#endif
 	// HT Related, by amy, 2008.04.29
 	BSS_HT	bssht;
 	// Add to handle broadcom AP management frame CCK rate.
@@ -1769,7 +1767,6 @@
 #define RF_CHANGE_BY_IPS BIT28
 #define RF_CHANGE_BY_INIT	0	// Do not change the RFOff reason. Defined by Bruce, 2008-01-17.
 
-#ifdef ENABLE_DOT11D
 typedef enum
 {
 	COUNTRY_CODE_FCC = 0,
@@ -1784,7 +1781,6 @@
 	COUNTRY_CODE_MIC,
 	COUNTRY_CODE_GLOBAL_DOMAIN
 }country_code_type_t;
-#endif
 
 #define RT_MAX_LD_SLOT_NUM	10
 typedef struct _RT_LINK_DETECT_T{
@@ -1970,12 +1966,8 @@
 
 	/* map of allowed channels. 0 is dummy */
 	// FIXME: remeber to default to a basic channel plan depending of the PHY type
-#ifdef ENABLE_DOT11D
 	void* pDot11dInfo;
 	bool bGlobalDomain;
-#else
-	int channel_map[MAX_CHANNEL_NUMBER+1];
-#endif
 	int rate;       /* current rate */
 	int basic_rate;
 	//FIXME: pleace callback, see if redundant with softmac_features
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index c8ca9d8..1ea8da3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -44,9 +44,7 @@
 #include <linux/ctype.h>
 
 #include "ieee80211.h"
-#ifdef ENABLE_DOT11D
 #include "dot11d.h"
-#endif
 static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
 					struct sk_buff *skb,
 					struct ieee80211_rx_stats *rx_stats)
@@ -1599,7 +1597,6 @@
 }
 #endif
 
-#ifdef ENABLE_DOT11D
 static inline void ieee80211_extract_country_ie(
 	struct ieee80211_device *ieee,
 	struct ieee80211_info_element *info_element,
@@ -1632,7 +1629,6 @@
 	}
 
 }
-#endif
 
 int ieee80211_parse_info_param(struct ieee80211_device *ieee,
 		struct ieee80211_info_element *info_element,
@@ -2086,14 +2082,12 @@
 			       "QoS Error need to parse QOS_PARAMETER IE\n");
 			break;
 
-#ifdef ENABLE_DOT11D
 		case MFIE_TYPE_COUNTRY:
 			IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n",
 					     info_element->len);
 			//printk("=====>Receive <%s> Country IE\n",network->ssid);
 			ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP
 			break;
-#endif
 /* TODO */
 		default:
 			IEEE80211_DEBUG_MGMT
@@ -2229,10 +2223,8 @@
 #ifdef THOMAS_TURBO
 	network->Turbo_Enable = 0;
 #endif
-#ifdef ENABLE_DOT11D
 	network->CountryIeLen = 0;
 	memset(network->CountryIeBuf, 0, MAX_IE_LEN);
-#endif
 //Initialize HT parameters
 	//ieee80211_ht_initialize(&network->bssht);
 	HTInitializeBssDesc(&network->bssht);
@@ -2399,10 +2391,8 @@
 	dst->Turbo_Enable = src->Turbo_Enable;
 #endif
 
-#ifdef ENABLE_DOT11D
 	dst->CountryIeLen = src->CountryIeLen;
 	memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
-#endif
 
 	//added by amy for LEAP
 	dst->bWithAironetIE = src->bWithAironetIE;
@@ -2470,7 +2460,6 @@
 		return;
 	}
 
-#ifdef ENABLE_DOT11D
 	// For Asus EeePc request,
 	// (1) if wireless adapter receive get any 802.11d country code in AP beacon,
 	//	   wireless adapter should follow the country code.
@@ -2527,7 +2516,6 @@
 			}
 		}
 	}
-#endif
 
 	/* The network parsed correctly -- so now we scan our known networks
 	 * to see if we can find it in our list.
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index bc8c425..20f8c34 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -21,9 +21,7 @@
 #include <linux/slab.h>
 #include <linux/version.h>
 #include <asm/uaccess.h>
-#ifdef ENABLE_DOT11D
 #include "dot11d.h"
-#endif
 
 u8 rsn_authen_cipher_suite[16][4] = {
 	{0x00,0x0F,0xAC,0x00}, //Use group key, //Reserved
@@ -430,10 +428,8 @@
 void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
 {
 	short ch = 0;
-#ifdef ENABLE_DOT11D
 	u8 channel_map[MAX_CHANNEL_NUMBER+1];
 	memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-#endif
 	down(&ieee->scan_sem);
 
 	while(1)
@@ -443,11 +439,7 @@
 			ch++;
 			if (ch > MAX_CHANNEL_NUMBER)
 				goto out; /* scan completed */
-#ifdef ENABLE_DOT11D
 		}while(!channel_map[ch]);
-#else
-		}while(!ieee->channel_map[ch]);
-#endif
 
 		/* this function can be called in two situations
 		 * 1- We have switched to ad-hoc mode and we are
@@ -471,9 +463,7 @@
 		if (ieee->state == IEEE80211_LINKED)
 			goto out;
 		ieee->set_chan(ieee->dev, ch);
-#ifdef ENABLE_DOT11D
 		if(channel_map[ch] == 1)
-#endif
 		ieee80211_send_probe_requests(ieee);
 
 		/* this prevent excessive time wait when we
@@ -496,10 +486,8 @@
 	}
 	else{
 	ieee->sync_scan_hurryup = 0;
-#ifdef ENABLE_DOT11D
 	if(IS_DOT11D_ENABLE(ieee))
 		DOT11D_ScanComplete(ieee);
-#endif
 	up(&ieee->scan_sem);
 }
 }
@@ -510,10 +498,8 @@
 	struct delayed_work *dwork = container_of(work, struct delayed_work, work);
 	struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
 	static short watchdog = 0;
-#ifdef ENABLE_DOT11D
 	u8 channel_map[MAX_CHANNEL_NUMBER+1];
 	memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-#endif
 	if(!ieee->ieee_up)
 		return;
 	down(&ieee->scan_sem);
@@ -523,25 +509,16 @@
 		if (watchdog++ > MAX_CHANNEL_NUMBER)
 		{
 		//if current channel is not in channel map, set to default channel.
-		#ifdef ENABLE_DOT11D
-			if (!channel_map[ieee->current_network.channel]);
-		#else
-			if (!ieee->channel_map[ieee->current_network.channel]);
-		#endif
+			if (!channel_map[ieee->current_network.channel]) {
 				ieee->current_network.channel = 6;
 				goto out; /* no good chans */
+			}
 		}
-#ifdef ENABLE_DOT11D
 	}while(!channel_map[ieee->current_network.channel]);
-#else
-	}while(!ieee->channel_map[ieee->current_network.channel]);
-#endif
 	if (ieee->scanning == 0 )
 		goto out;
 	ieee->set_chan(ieee->dev, ieee->current_network.channel);
-#ifdef ENABLE_DOT11D
 	if(channel_map[ieee->current_network.channel] == 1)
-#endif
 	ieee80211_send_probe_requests(ieee);
 
 
@@ -550,10 +527,8 @@
 	up(&ieee->scan_sem);
 	return;
 out:
-#ifdef ENABLE_DOT11D
 	if(IS_DOT11D_ENABLE(ieee))
 		DOT11D_ScanComplete(ieee);
-#endif
 	ieee->actscanning = false;
 	watchdog = 0;
 	ieee->scanning = 0;
@@ -635,7 +610,6 @@
 /* called with ieee->lock held */
 void ieee80211_start_scan(struct ieee80211_device *ieee)
 {
-#ifdef ENABLE_DOT11D
 	if(IS_DOT11D_ENABLE(ieee) )
 	{
 		if(IS_COUNTRY_IE_VALID(ieee))
@@ -643,7 +617,6 @@
 			RESET_CIE_WATCHDOG(ieee);
 		}
 	}
-#endif
 	if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
 		if (ieee->scanning == 0){
 			ieee->scanning = 1;
@@ -657,7 +630,6 @@
 /* called with wx_sem held */
 void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
 {
-#ifdef ENABLE_DOT11D
 	if(IS_DOT11D_ENABLE(ieee) )
 	{
 		if(IS_COUNTRY_IE_VALID(ieee))
@@ -665,7 +637,6 @@
 			RESET_CIE_WATCHDOG(ieee);
 		}
 	}
-#endif
 	ieee->sync_scan_hurryup = 0;
 	if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
 		ieee80211_softmac_scan_syncro(ieee);
@@ -2390,11 +2361,9 @@
 	ieee80211_softmac_check_all_nets(ieee);
 
 
-#ifdef ENABLE_DOT11D //if creating an ad-hoc, set its channel to 10 temporarily--this is the requirement for ASUS, not 11D, so disable 11d.
 //	if((IS_DOT11D_ENABLE(ieee)) && (ieee->state == IEEE80211_NOLINK))
 	if (ieee->state == IEEE80211_NOLINK)
 		ieee->current_network.channel = 6;
-#endif
 	/* if not then the state is not linked. Maybe the user swithced to
 	 * ad-hoc mode just after being in monitor mode, or just after
 	 * being very few time in managed mode (so the card have had no
@@ -2483,7 +2452,6 @@
 void ieee80211_start_bss(struct ieee80211_device *ieee)
 {
 	unsigned long flags;
-#ifdef ENABLE_DOT11D
 	//
 	// Ref: 802.11d 11.1.3.3
 	// STA shall not start a BSS unless properly formed Beacon frame including a Country IE.
@@ -2495,7 +2463,6 @@
 			return;
 		}
 	}
-#endif
 	/* check if we have already found the net we
 	 * are interested in (if any).
 	 * if not (we are disassociated and we are not
@@ -2530,10 +2497,8 @@
 
 	if (ieee->data_hard_stop)
 			ieee->data_hard_stop(ieee->dev);
-#ifdef ENABLE_DOT11D
 	if(IS_DOT11D_ENABLE(ieee))
 		Dot11d_Reset(ieee);
-#endif
 	ieee->state = IEEE80211_NOLINK;
 	ieee->is_set_key = false;
 	ieee->link_change(ieee->dev);
@@ -2669,11 +2634,7 @@
 			ch++;
 			if (ch > MAX_CHANNEL_NUMBER)
 				return; /* no channel found */
-#ifdef ENABLE_DOT11D
 		}while(!GET_DOT11D_INFO(ieee)->channel_map[ch]);
-#else
-		}while(!ieee->channel_map[ch]);
-#endif
 		ieee->current_network.channel = ch;
 	}
 
@@ -2721,11 +2682,9 @@
 	for(i = 0; i < 5; i++) {
 	  ieee->seq_ctrl[i] = 0;
 	}
-#ifdef ENABLE_DOT11D
 	ieee->pDot11dInfo = kzalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
 	if (!ieee->pDot11dInfo)
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "can't alloc memory for DOT11D\n");
-#endif
 	//added for  AP roaming
 	ieee->LinkDetectInfo.SlotNum = 2;
 	ieee->LinkDetectInfo.NumRecvBcnInPeriod=0;
@@ -2796,13 +2755,11 @@
 void ieee80211_softmac_free(struct ieee80211_device *ieee)
 {
 	down(&ieee->wx_sem);
-#ifdef ENABLE_DOT11D
 	if(NULL != ieee->pDot11dInfo)
 	{
 		kfree(ieee->pDot11dInfo);
 		ieee->pDot11dInfo = NULL;
 	}
-#endif
 	del_timer_sync(&ieee->associate_timer);
 
 	cancel_delayed_work(&ieee->associate_retry_wq);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index f335c25..cb5a3c3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -15,9 +15,7 @@
 
 
 #include "ieee80211.h"
-#ifdef ENABLE_DOT11D
 #include "dot11d.h"
-#endif
 /* FIXME: add A freqs */
 
 const long ieee80211_wlan_frequencies[] = {
@@ -63,12 +61,10 @@
 
 	}else { /* Set the channel */
 
-#ifdef ENABLE_DOT11D
 		if (!(GET_DOT11D_INFO(ieee)->channel_map)[fwrq->m]) {
 			ret = -EINVAL;
 			goto out;
 		}
-#endif
 		ieee->current_network.channel = fwrq->m;
 		ieee->set_chan(ieee->dev, ieee->current_network.channel);
 
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 81aa2ed..ec7845e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -754,7 +754,7 @@
 		{
 			txb->queue_index = UP2AC(skb->priority);
 		} else {
-			txb->queue_index = WME_AC_BK;;
+			txb->queue_index = WME_AC_BK;
 		}
 
 
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 494f180..ae4f2b9 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -77,9 +77,7 @@
 #include "r8192_pm.h"
 #endif
 
-#ifdef ENABLE_DOT11D
 #include "dot11d.h"
-#endif
 //set here to open your trace code. //WB
 u32 rt_global_debug_component = \
 			//	COMP_INIT    	|
@@ -166,7 +164,6 @@
 #endif
 };
 
-#ifdef ENABLE_DOT11D
 
 typedef struct _CHANNEL_LIST
 {
@@ -242,9 +239,7 @@
 	}
 	return;
 }
-#endif
 
-#define eqMacAddr(a,b) ( ((a)[0]==(b)[0] && (a)[1]==(b)[1] && (a)[2]==(b)[2] && (a)[3]==(b)[3] && (a)[4]==(b)[4] && (a)[5]==(b)[5]) ? 1:0 )
 
 #define 	rx_hal_is_cck_rate(_pdrvinfo)\
 			(_pdrvinfo->RxRate == DESC90_RATE1M ||\
@@ -1507,7 +1502,7 @@
 	{
 		//
 		// Handle HW Beacon:
-		// We had transfer our beacon frame to host controler at this moment.
+		// We had transfer our beacon frame to host controller at this moment.
 		//
 		//
 		// Caution:
@@ -2203,6 +2198,8 @@
 
 	priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB+1),
 				GFP_KERNEL);
+	if (priv->rx_urb == NULL)
+		return -ENOMEM;
 
 #ifndef JACKSON_NEW_RX
 	for(i=0;i<(MAX_RX_URB+1);i++){
@@ -3155,7 +3152,6 @@
 short rtl8192_get_channel_map(struct net_device * dev)
 {
 	struct r8192_priv *priv = ieee80211_priv(dev);
-#ifdef ENABLE_DOT11D
 	if(priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN){
 		printk("rtl8180_init:Error channel plan! Set to default.\n");
 		priv->ChannelPlan= 0;
@@ -3163,21 +3159,6 @@
 	RT_TRACE(COMP_INIT, "Channel plan is %d\n",priv->ChannelPlan);
 
 	rtl819x_set_channel_map(priv->ChannelPlan, priv);
-#else
-	int ch,i;
-	//Set Default Channel Plan
-	if(!channels){
-		DMESG("No channels, aborting");
-		return -1;
-	}
-	ch=channels;
-	priv->ChannelPlan= 0;//hikaru
-	 // set channels 1..14 allowed in given locale
-	for (i=1; i<=14; i++) {
-		(priv->ieee80211->channel_map)[i] = (u8)(ch & 0x01);
-		ch >>= 1;
-	}
-#endif
 	return 0;
 }
 
@@ -5085,7 +5066,7 @@
 			//Get Rx snr value in DB
 			tmp_rxsnr =	pofdm_buf->rxsnr_X[i];
 			rx_snrX = (char)(tmp_rxsnr);
-			//rx_snrX >>= 1;;
+			//rx_snrX >>= 1;
 			rx_snrX /= 2;
 			priv->stats.rxSNRdB[i] = (long)rx_snrX;
 
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 25d5c87..f6408f9 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -21,9 +21,7 @@
 #include "r8192U.h"
 #include "r8192U_hw.h"
 
-#ifdef ENABLE_DOT11D
 #include "dot11d.h"
-#endif
 
 #define RATE_COUNT 12
 u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000,
@@ -458,11 +456,7 @@
 	for (i = 0, val = 0; i < 14; i++) {
 
 		// Include only legal frequencies for some countries
-#ifdef ENABLE_DOT11D
 		if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) {
-#else
-		if ((priv->ieee80211->channel_map)[i+1]) {
-#endif
 			range->freq[val].i = i + 1;
 			range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000;
 			range->freq[val].e = 1;
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index a3adaed..41684e8 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -6,9 +6,7 @@
 #include "r8192U_dm.h"
 #include "r819xU_firmware_img.h"
 
-#ifdef ENABLE_DOT11D
 #include "dot11d.h"
-#endif
 static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
 	0,
 	0x085c, //2412 1
@@ -1011,7 +1009,7 @@
 			break;
 	}
 
-	return ret;;
+	return ret;
 
 }
 /******************************************************************************
@@ -1257,13 +1255,11 @@
 
 	RT_TRACE(COMP_CH, "====>%s()====stage:%d, step:%d, channel:%d\n", __FUNCTION__, *stage, *step, channel);
 //	RT_ASSERT(IsLegalChannel(Adapter, channel), ("illegal channel: %d\n", channel));
-#ifdef ENABLE_DOT11D
 	if (!IsLegalChannel(priv->ieee80211, channel))
 	{
 		RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel);
 		return true; //return true to tell upper caller function this channel setting is finished! Or it will in while loop.
 	}
-#endif
 //FIXME:need to check whether channel is legal or not here.WB
 
 
diff --git a/drivers/staging/rtl8712/TODO b/drivers/staging/rtl8712/TODO
index 5c88821..2aa5deb 100644
--- a/drivers/staging/rtl8712/TODO
+++ b/drivers/staging/rtl8712/TODO
@@ -3,7 +3,6 @@
 - switch to use LIB80211
 - switch to use MAC80211
 - checkpatch.pl fixes - only a few remain
-- sparse fixes
 - switch from large inline firmware file to use the firmware interface
   and add the file to the linux-firmware package.
 
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 831d81e..36eeb5a 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -195,10 +195,7 @@
 
 static inline u8 *_malloc(u32 sz)
 {
-	u8 *pbuf;
-
-	pbuf =	kmalloc(sz, GFP_ATOMIC);
-	return pbuf;
+	return	kmalloc(sz, GFP_ATOMIC);
 }
 
 static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
@@ -220,34 +217,22 @@
 
 static inline u32 _RND8(u32 sz)
 {
-	u32	val;
-
-	val = ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
-	return val;
+	return ((sz >> 3) + ((sz & 7) ? 1 : 0)) << 3;
 }
 
 static inline u32 _RND128(u32 sz)
 {
-	u32	val;
-
-	val = ((sz >> 7) + ((sz & 127) ? 1 : 0)) << 7;
-	return val;
+	return ((sz >> 7) + ((sz & 127) ? 1 : 0)) << 7;
 }
 
 static inline u32 _RND256(u32 sz)
 {
-	u32	val;
-
-	val = ((sz >> 8) + ((sz & 255) ? 1 : 0)) << 8;
-	return val;
+	return ((sz >> 8) + ((sz & 255) ? 1 : 0)) << 8;
 }
 
 static inline u32 _RND512(u32 sz)
 {
-	u32	val;
-
-	val = ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
-	return val;
+	return ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
 }
 
 #define STRUCT_PACKED __attribute__ ((packed))
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 9730ae1..1dc12b7 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -428,7 +428,7 @@
 		      u16 cnts, u8 *data)
 {
 	int i;
-	u8 res = true;;
+	u8 res = true;
 
 	if (start_addr > EFUSE_MAX_SIZE)
 		return false;
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index 8edc518..88a1504 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -148,7 +148,7 @@
 		case 0x11:
 		case 0x12:
 		case 0x13:
-			addr = RTL8712_DMA_H2CCMD;;
+			addr = RTL8712_DMA_H2CCMD;
 			break;
 		default:
 			addr = RTL8712_DMA_BEQ;/*RTL8712_EP_LO;*/
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index b8195e3..75f1a6b 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -372,7 +372,7 @@
 					   0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
 					   0x0, 0x0};
 			datalen = pattrib->pktlen - pattrib->hdrlen;
-			pframe = pxmitframe->buf_addr + TXDESC_OFFSET;;
+			pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
 			if (bmcst) {
 				if (!memcmp(psecuritypriv->XGrptxmickey
 				   [psecuritypriv->XGrpKeyid].skey,
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index f1f0c63..a692ee8 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -527,7 +527,7 @@
 static void r871xu_dev_remove(struct usb_interface *pusb_intf)
 {
 	struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
-	struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
+	struct _adapter *padapter = netdev_priv(pnetdev);
 	struct usb_device *udev = interface_to_usbdev(pusb_intf);
 
 	if (padapter) {
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index ac2bf11..701e8d5 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -269,7 +269,7 @@
 	return 0;
 }
 
-static struct backlight_ops backlight_ops = {
+static const struct backlight_ops backlight_ops = {
 	.get_brightness	= get_brightness,
 	.update_status	= update_status,
 };
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
new file mode 100644
index 0000000..92bf166
--- /dev/null
+++ b/drivers/staging/sep/Kconfig
@@ -0,0 +1,10 @@
+config DX_SEP
+	tristate "Discretix SEP driver"
+	depends on PCI
+	help
+	  Discretix SEP driver; used for the security processor subsystem
+	  on bard the Intel Mobile Internet Device.
+
+	  The driver's name is sep_driver.
+
+	  If unsure, select N.
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
new file mode 100644
index 0000000..628d5f9
--- /dev/null
+++ b/drivers/staging/sep/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DX_SEP) := sep_driver.o
+
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
new file mode 100644
index 0000000..089c240
--- /dev/null
+++ b/drivers/staging/sep/TODO
@@ -0,0 +1,5 @@
+Todo's so far (from Alan Cox)
+- Check whether it can be plugged into any of the kernel crypto API
+  interfaces - Crypto API 'glue' is still not ready to submit
+- Clean up unused ioctls - Needs vendor help
+- Clean up unused fields in ioctl structures - Needs vendor help
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
new file mode 100644
index 0000000..0ffe68c
--- /dev/null
+++ b/drivers/staging/sep/sep_dev.h
@@ -0,0 +1,156 @@
+#ifndef __SEP_DEV_H__
+#define __SEP_DEV_H__
+
+/*
+ *
+ *  sep_dev.h - Security Processor Device Structures
+ *
+ *  Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ *  Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ *  CONTACTS:
+ *
+ *  Mark Allyn		mark.a.allyn@intel.com
+ *  Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ *  CHANGES
+ *  2010.09.14  upgrade to Medfield
+ */
+
+struct sep_device {
+	/* pointer to pci dev */
+	struct pci_dev *pdev;
+
+	/* character device file */
+	struct cdev sep_cdev;
+	struct cdev sep_daemon_cdev;
+	struct cdev sep_singleton_cdev;
+
+	/* devices (using misc dev) */
+	struct miscdevice miscdev_sep;
+	struct miscdevice miscdev_singleton;
+	struct miscdevice miscdev_daemon;
+
+	/* major / minor numbers of device */
+	dev_t sep_devno;
+	dev_t sep_daemon_devno;
+	dev_t sep_singleton_devno;
+
+	struct mutex sep_mutex;
+	struct mutex ioctl_mutex;
+	spinlock_t snd_rply_lck;
+
+	/* flags to indicate use and lock status of sep */
+	u32 pid_doing_transaction;
+	unsigned long in_use_flags;
+
+	/* request daemon alread open */
+	unsigned long request_daemon_open;
+
+	/* 1 = Moorestown; 0 = Medfield */
+	int mrst;
+
+	/* address of the shared memory allocated during init for SEP driver
+	   (coherent alloc) */
+	dma_addr_t shared_bus;
+	size_t shared_size;
+	void *shared_addr;
+
+	/* restricted access region (coherent alloc) */
+	dma_addr_t rar_bus;
+	size_t rar_size;
+	void *rar_addr;
+
+	/* Firmware regions; cache is at rar for Moorestown and
+	   resident is at rar for Medfield */
+	dma_addr_t cache_bus;
+	size_t cache_size;
+	void *cache_addr;
+
+	dma_addr_t resident_bus;
+	size_t resident_size;
+	void *resident_addr;
+
+	/* sep's scratchpad */
+	dma_addr_t dcache_bus;
+	size_t dcache_size;
+	void *dcache_addr;
+
+	/* Only used on Medfield */
+	dma_addr_t extapp_bus;
+	size_t extapp_size;
+	void *extapp_addr;
+
+	/* start address of the access to the SEP registers from driver */
+	dma_addr_t reg_physical_addr;
+	dma_addr_t reg_physical_end;
+	void __iomem *reg_addr;
+
+	/* wait queue head (event) of the driver */
+	wait_queue_head_t event;
+	wait_queue_head_t event_request_daemon;
+	wait_queue_head_t event_mmap;
+
+	struct sep_caller_id_entry
+		caller_id_table[SEP_CALLER_ID_TABLE_NUM_ENTRIES];
+
+	/* access flag for singleton device */
+	unsigned long singleton_access_flag;
+
+	/* transaction counter that coordinates the
+	   transactions between SEP and HOST */
+	unsigned long send_ct;
+	/* counter for the messages from sep */
+	unsigned long reply_ct;
+	/* counter for the number of bytes allocated in the pool for the
+	   current transaction */
+	long data_pool_bytes_allocated;
+
+	u32 num_of_data_allocations;
+
+	/* number of the lli tables created in the current transaction */
+	u32     num_lli_tables_created;
+
+	/* number of data control blocks */
+	u32 nr_dcb_creat;
+
+	struct sep_dma_resource dma_res_arr[SEP_MAX_NUM_SYNC_DMA_OPS];
+
+};
+
+static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
+{
+	void __iomem *addr = dev->reg_addr + reg;
+	writel(value, addr);
+}
+
+static inline u32 sep_read_reg(struct sep_device *dev, int reg)
+{
+	void __iomem *addr = dev->reg_addr + reg;
+	return readl(addr);
+}
+
+/* wait for SRAM write complete(indirect write */
+static inline void sep_wait_sram_write(struct sep_device *dev)
+{
+	u32 reg_val;
+	do {
+		reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
+	} while (!(reg_val & 1));
+}
+
+
+#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
new file mode 100644
index 0000000..ac5d569
--- /dev/null
+++ b/drivers/staging/sep/sep_driver.c
@@ -0,0 +1,3577 @@
+/*
+ *
+ *  sep_driver.c - Security Processor Driver main group of functions
+ *
+ *  Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ *  Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ *  CONTACTS:
+ *
+ *  Mark Allyn		mark.a.allyn@intel.com
+ *  Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ *  CHANGES:
+ *
+ *  2009.06.26	Initial publish
+ *  2010.09.14  Upgrade to Medfield
+ *
+ */
+#define DEBUG
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/kdev_t.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+#include <asm/current.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/rar_register.h>
+
+#include "../memrar/memrar.h"
+
+#include "sep_driver_hw_defs.h"
+#include "sep_driver_config.h"
+#include "sep_driver_api.h"
+#include "sep_dev.h"
+
+/*----------------------------------------
+	DEFINES
+-----------------------------------------*/
+
+#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
+
+/*--------------------------------------------
+	GLOBAL variables
+--------------------------------------------*/
+
+/* Keep this a single static object for now to keep the conversion easy */
+
+static struct sep_device *sep_dev;
+
+/**
+ *	sep_load_firmware - copy firmware cache/resident
+ *	@sep: pointer to struct sep_device we are loading
+ *
+ *	This functions copies the cache and resident from their source
+ *	location into destination shared memory.
+ */
+static int sep_load_firmware(struct sep_device *sep)
+{
+	const struct firmware *fw;
+	char *cache_name = "cache.image.bin";
+	char *res_name = "resident.image.bin";
+	char *extapp_name = "extapp.image.bin";
+	int error ;
+	unsigned long work1, work2, work3;
+
+	/* Set addresses and load resident */
+	sep->resident_bus = sep->rar_bus;
+	sep->resident_addr = sep->rar_addr;
+
+	error = request_firmware(&fw, res_name, &sep->pdev->dev);
+	if (error) {
+		dev_warn(&sep->pdev->dev, "can't request resident fw\n");
+		return error;
+	}
+
+	memcpy(sep->resident_addr, (void *)fw->data, fw->size);
+	sep->resident_size = fw->size;
+	release_firmware(fw);
+
+	dev_dbg(&sep->pdev->dev, "resident virtual is %p\n",
+		sep->resident_addr);
+	dev_dbg(&sep->pdev->dev, "resident bus is %lx\n",
+		(unsigned long)sep->resident_bus);
+	dev_dbg(&sep->pdev->dev, "resident size is %08zx\n",
+		sep->resident_size);
+
+	/* Set addresses for dcache (no loading needed) */
+	work1 = (unsigned long)sep->resident_bus;
+	work2 = (unsigned long)sep->resident_size;
+	work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
+	sep->dcache_bus = (dma_addr_t)work3;
+
+	work1 = (unsigned long)sep->resident_addr;
+	work2 = (unsigned long)sep->resident_size;
+	work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
+	sep->dcache_addr = (void *)work3;
+
+	sep->dcache_size = 1024 * 128;
+
+	/* Set addresses and load cache */
+	sep->cache_bus = sep->dcache_bus + sep->dcache_size;
+	sep->cache_addr = sep->dcache_addr + sep->dcache_size;
+
+	error = request_firmware(&fw, cache_name, &sep->pdev->dev);
+	if (error) {
+		dev_warn(&sep->pdev->dev, "Unable to request cache firmware\n");
+		return error;
+	}
+
+	memcpy(sep->cache_addr, (void *)fw->data, fw->size);
+	sep->cache_size = fw->size;
+	release_firmware(fw);
+
+	dev_dbg(&sep->pdev->dev, "cache virtual is %p\n",
+		sep->cache_addr);
+	dev_dbg(&sep->pdev->dev, "cache bus is %08lx\n",
+		(unsigned long)sep->cache_bus);
+	dev_dbg(&sep->pdev->dev, "cache size is %08zx\n",
+		sep->cache_size);
+
+	/* Set addresses and load extapp */
+	sep->extapp_bus = sep->cache_bus + (1024 * 370);
+	sep->extapp_addr = sep->cache_addr + (1024 * 370);
+
+	error = request_firmware(&fw, extapp_name, &sep->pdev->dev);
+	if (error) {
+		dev_warn(&sep->pdev->dev, "Unable to request extapp firmware\n");
+		return error;
+	}
+
+	memcpy(sep->extapp_addr, (void *)fw->data, fw->size);
+	sep->extapp_size = fw->size;
+	release_firmware(fw);
+
+	dev_dbg(&sep->pdev->dev, "extapp virtual is %p\n",
+		sep->extapp_addr);
+	dev_dbg(&sep->pdev->dev, "extapp bus is %08llx\n",
+		(unsigned long long)sep->extapp_bus);
+	dev_dbg(&sep->pdev->dev, "extapp size is %08zx\n",
+		sep->extapp_size);
+
+	return error;
+}
+
+MODULE_FIRMWARE("sep/cache.image.bin");
+MODULE_FIRMWARE("sep/resident.image.bin");
+MODULE_FIRMWARE("sep/extapp.image.bin");
+
+/**
+ *	sep_dump_message - dump the message that is pending
+ *	@sep: SEP device
+ */
+static void sep_dump_message(struct sep_device *sep)
+{
+	int count;
+	u32 *p = sep->shared_addr;
+	for (count = 0; count < 12 * 4; count += 4)
+		dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
+								count, *p++);
+}
+
+/**
+ *	sep_map_and_alloc_shared_area -	allocate shared block
+ *	@sep: security processor
+ *	@size: size of shared area
+ */
+static int sep_map_and_alloc_shared_area(struct sep_device *sep)
+{
+	sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
+		sep->shared_size,
+		&sep->shared_bus, GFP_KERNEL);
+
+	if (!sep->shared_addr) {
+		dev_warn(&sep->pdev->dev,
+			"shared memory dma_alloc_coherent failed\n");
+		return -ENOMEM;
+	}
+	dev_dbg(&sep->pdev->dev,
+		"shared_addr %zx bytes @%p (bus %llx)\n",
+				sep->shared_size, sep->shared_addr,
+				(unsigned long long)sep->shared_bus);
+	return 0;
+}
+
+/**
+ *	sep_unmap_and_free_shared_area - free shared block
+ *	@sep: security processor
+ */
+static void sep_unmap_and_free_shared_area(struct sep_device *sep)
+{
+	dev_dbg(&sep->pdev->dev, "shared area unmap and free\n");
+	dma_free_coherent(&sep->pdev->dev, sep->shared_size,
+				sep->shared_addr, sep->shared_bus);
+}
+
+/**
+ *	sep_shared_bus_to_virt - convert bus/virt addresses
+ *	@sep: pointer to struct sep_device
+ *	@bus_address: address to convert
+ *
+ *	Returns virtual address inside the shared area according
+ *	to the bus address.
+ */
+static void *sep_shared_bus_to_virt(struct sep_device *sep,
+						dma_addr_t bus_address)
+{
+	return sep->shared_addr + (bus_address - sep->shared_bus);
+}
+
+/**
+ *	open function for the singleton driver
+ *	@inode_ptr struct inode *
+ *	@file_ptr struct file *
+ *
+ *	Called when the user opens the singleton device interface
+ */
+static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
+{
+	int error = 0;
+	struct sep_device *sep;
+
+	/*
+	 * Get the SEP device structure and use it for the
+	 * private_data field in filp for other methods
+	 */
+	sep = sep_dev;
+
+	file_ptr->private_data = sep;
+
+	dev_dbg(&sep->pdev->dev, "Singleton open for pid %d\n", current->pid);
+
+	dev_dbg(&sep->pdev->dev, "calling test and set for singleton 0\n");
+	if (test_and_set_bit(0, &sep->singleton_access_flag)) {
+		error = -EBUSY;
+		goto end_function;
+	}
+
+	dev_dbg(&sep->pdev->dev, "sep_singleton_open end\n");
+end_function:
+	return error;
+}
+
+/**
+ *	sep_open - device open method
+ *	@inode: inode of SEP device
+ *	@filp: file handle to SEP device
+ *
+ *	Open method for the SEP device. Called when userspace opens
+ *	the SEP device node.
+ *
+ *	Returns zero on success otherwise an error code.
+ */
+static int sep_open(struct inode *inode, struct file *filp)
+{
+	struct sep_device *sep;
+
+	/*
+	 * Get the SEP device structure and use it for the
+	 * private_data field in filp for other methods
+	 */
+	sep = sep_dev;
+	filp->private_data = sep;
+
+	dev_dbg(&sep->pdev->dev, "Open for pid %d\n", current->pid);
+
+	/* Anyone can open; locking takes place at transaction level */
+	return 0;
+}
+
+/**
+ *	sep_singleton_release - close a SEP singleton device
+ *	@inode: inode of SEP device
+ *	@filp: file handle being closed
+ *
+ *	Called on the final close of a SEP device. As the open protects against
+ *	multiple simultaenous opens that means this method is called when the
+ *	final reference to the open handle is dropped.
+ */
+static int sep_singleton_release(struct inode *inode, struct file *filp)
+{
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "Singleton release for pid %d\n",
+							current->pid);
+	clear_bit(0, &sep->singleton_access_flag);
+	return 0;
+}
+
+/**
+ *	sep_request_daemonopen - request daemon open method
+ *	@inode: inode of SEP device
+ *	@filp: file handle to SEP device
+ *
+ *	Open method for the SEP request daemon. Called when
+ *	request daemon in userspace opens the SEP device node.
+ *
+ *	Returns zero on success otherwise an error code.
+ */
+static int sep_request_daemon_open(struct inode *inode, struct file *filp)
+{
+	struct sep_device *sep = sep_dev;
+	int error = 0;
+
+	filp->private_data = sep;
+
+	dev_dbg(&sep->pdev->dev, "Request daemon open for pid %d\n",
+		current->pid);
+
+	/* There is supposed to be only one request daemon */
+	dev_dbg(&sep->pdev->dev, "calling test and set for req_dmon open 0\n");
+	if (test_and_set_bit(0, &sep->request_daemon_open))
+		error = -EBUSY;
+	return error;
+}
+
+/**
+ *	sep_request_daemon_release - close a SEP daemon
+ *	@inode: inode of SEP device
+ *	@filp: file handle being closed
+ *
+ *	Called on the final close of a SEP daemon.
+ */
+static int sep_request_daemon_release(struct inode *inode, struct file *filp)
+{
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "Reques daemon release for pid %d\n",
+		current->pid);
+
+	/* Clear the request_daemon_open flag */
+	clear_bit(0, &sep->request_daemon_open);
+	return 0;
+}
+
+/**
+ *	sep_req_daemon_send_reply_command_handler - poke the SEP
+ *	@sep: struct sep_device *
+ *
+ *	This function raises interrupt to SEPm that signals that is has a
+ *	new command from HOST
+ */
+static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
+{
+	unsigned long lck_flags;
+
+	dev_dbg(&sep->pdev->dev,
+		"sep_req_daemon_send_reply_command_handler start\n");
+
+	sep_dump_message(sep);
+
+	/* Counters are lockable region */
+	spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+	sep->send_ct++;
+	sep->reply_ct++;
+
+	/* Send the interrupt to SEP */
+	sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
+	sep->send_ct++;
+
+	spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+	dev_dbg(&sep->pdev->dev,
+		"sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
+		sep->send_ct, sep->reply_ct);
+
+	dev_dbg(&sep->pdev->dev,
+		"sep_req_daemon_send_reply_command_handler end\n");
+
+	return 0;
+}
+
+
+/**
+ *	sep_free_dma_table_data_handler - free DMA table
+ *	@sep: pointere to struct sep_device
+ *
+ *	Handles the request to  free DMA table for synchronic actions
+ */
+static int sep_free_dma_table_data_handler(struct sep_device *sep)
+{
+	int count;
+	int dcb_counter;
+	/* Pointer to the current dma_resource struct */
+	struct sep_dma_resource *dma;
+
+	dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler start\n");
+
+	for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
+		dma = &sep->dma_res_arr[dcb_counter];
+
+		/* Unmap and free input map array */
+		if (dma->in_map_array) {
+			for (count = 0; count < dma->in_num_pages; count++) {
+				dma_unmap_page(&sep->pdev->dev,
+					dma->in_map_array[count].dma_addr,
+					dma->in_map_array[count].size,
+					DMA_TO_DEVICE);
+			}
+			kfree(dma->in_map_array);
+		}
+
+		/* Unmap output map array, DON'T free it yet */
+		if (dma->out_map_array) {
+			for (count = 0; count < dma->out_num_pages; count++) {
+				dma_unmap_page(&sep->pdev->dev,
+					dma->out_map_array[count].dma_addr,
+					dma->out_map_array[count].size,
+					DMA_FROM_DEVICE);
+			}
+			kfree(dma->out_map_array);
+		}
+
+		/* Free page cache for output */
+		if (dma->in_page_array) {
+			for (count = 0; count < dma->in_num_pages; count++) {
+				flush_dcache_page(dma->in_page_array[count]);
+				page_cache_release(dma->in_page_array[count]);
+			}
+			kfree(dma->in_page_array);
+		}
+
+		if (dma->out_page_array) {
+			for (count = 0; count < dma->out_num_pages; count++) {
+				if (!PageReserved(dma->out_page_array[count]))
+					SetPageDirty(dma->out_page_array[count]);
+				flush_dcache_page(dma->out_page_array[count]);
+				page_cache_release(dma->out_page_array[count]);
+			}
+			kfree(dma->out_page_array);
+		}
+
+		/* Reset all the values */
+		dma->in_page_array = NULL;
+		dma->out_page_array = NULL;
+		dma->in_num_pages = 0;
+		dma->out_num_pages = 0;
+		dma->in_map_array = NULL;
+		dma->out_map_array = NULL;
+		dma->in_map_num_entries = 0;
+		dma->out_map_num_entries = 0;
+	}
+
+	sep->nr_dcb_creat = 0;
+	sep->num_lli_tables_created = 0;
+
+	dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler end\n");
+	return 0;
+}
+
+/**
+ *	sep_request_daemon_mmap - maps the shared area to user space
+ *	@filp: pointer to struct file
+ *	@vma: pointer to vm_area_struct
+ *
+ *	Called by the kernel when the daemon attempts an mmap() syscall
+ *	using our handle.
+ */
+static int sep_request_daemon_mmap(struct file  *filp,
+	struct vm_area_struct  *vma)
+{
+	struct sep_device *sep = filp->private_data;
+	dma_addr_t bus_address;
+	int error = 0;
+
+	dev_dbg(&sep->pdev->dev, "daemon mmap start\n");
+
+	if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+		error = -EINVAL;
+		goto end_function;
+	}
+
+	/* Get physical address */
+	bus_address = sep->shared_bus;
+
+	dev_dbg(&sep->pdev->dev, "bus_address is %08lx\n",
+					(unsigned long)bus_address);
+
+	if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
+		vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+
+		dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
+		error = -EAGAIN;
+		goto end_function;
+	}
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "daemon mmap end\n");
+	return error;
+}
+
+/**
+ *	sep_request_daemon_poll - poll implementation
+ *	@sep: struct sep_device * for current SEP device
+ *	@filp: struct file * for open file
+ *	@wait: poll_table * for poll
+ *
+ *	Called when our device is part of a poll() or select() syscall
+ */
+static unsigned int sep_request_daemon_poll(struct file *filp,
+	poll_table  *wait)
+{
+	u32	mask = 0;
+	/* GPR2 register */
+	u32	retval2;
+	unsigned long lck_flags;
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "daemon poll: start\n");
+
+	poll_wait(filp, &sep->event_request_daemon, wait);
+
+	dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
+						sep->send_ct, sep->reply_ct);
+
+	spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+	/* Check if the data is ready */
+	if (sep->send_ct == sep->reply_ct) {
+		spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+		retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+		dev_dbg(&sep->pdev->dev,
+			"daemon poll: data check (GPR2) is %x\n", retval2);
+
+		/* Check if PRINT request */
+		if ((retval2 >> 30) & 0x1) {
+			dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
+			mask |= POLLIN;
+			goto end_function;
+		}
+		/* Check if NVS request */
+		if (retval2 >> 31) {
+			dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
+			mask |= POLLPRI | POLLWRNORM;
+		}
+	} else {
+		spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+		dev_dbg(&sep->pdev->dev,
+			"daemon poll: no reply received; returning 0\n");
+		mask = 0;
+	}
+end_function:
+	dev_dbg(&sep->pdev->dev, "daemon poll: exit\n");
+	return mask;
+}
+
+/**
+ *	sep_release - close a SEP device
+ *	@inode: inode of SEP device
+ *	@filp: file handle being closed
+ *
+ *	Called on the final close of a SEP device.
+ */
+static int sep_release(struct inode *inode, struct file *filp)
+{
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
+
+	mutex_lock(&sep->sep_mutex);
+	/* Is this the process that has a transaction open?
+	 * If so, lets reset pid_doing_transaction to 0 and
+	 * clear the in use flags, and then wake up sep_event
+	 * so that other processes can do transactions
+	 */
+	dev_dbg(&sep->pdev->dev, "waking up event and mmap_event\n");
+	if (sep->pid_doing_transaction == current->pid) {
+		clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
+		clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
+		sep_free_dma_table_data_handler(sep);
+		wake_up(&sep->event);
+		sep->pid_doing_transaction = 0;
+	}
+
+	mutex_unlock(&sep->sep_mutex);
+	return 0;
+}
+
+/**
+ *	sep_mmap -  maps the shared area to user space
+ *	@filp: pointer to struct file
+ *	@vma: pointer to vm_area_struct
+ *
+ *	Called on an mmap of our space via the normal SEP device
+ */
+static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	dma_addr_t bus_addr;
+	struct sep_device *sep = filp->private_data;
+	unsigned long error = 0;
+
+	dev_dbg(&sep->pdev->dev, "mmap start\n");
+
+	/* Set the transaction busy (own the device) */
+	wait_event_interruptible(sep->event,
+		test_and_set_bit(SEP_MMAP_LOCK_BIT,
+		&sep->in_use_flags) == 0);
+
+	if (signal_pending(current)) {
+		error = -EINTR;
+		goto end_function_with_error;
+	}
+	/*
+	 * The pid_doing_transaction indicates that this process
+	 * now owns the facilities to performa a transaction with
+	 * the SEP. While this process is performing a transaction,
+	 * no other process who has the SEP device open can perform
+	 * any transactions. This method allows more than one process
+	 * to have the device open at any given time, which provides
+	 * finer granularity for device utilization by multiple
+	 * processes.
+	 */
+	mutex_lock(&sep->sep_mutex);
+	sep->pid_doing_transaction = current->pid;
+	mutex_unlock(&sep->sep_mutex);
+
+	/* Zero the pools and the number of data pool alocation pointers */
+	sep->data_pool_bytes_allocated = 0;
+	sep->num_of_data_allocations = 0;
+
+	/*
+	 * Check that the size of the mapped range is as the size of the message
+	 * shared area
+	 */
+	if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
+		error = -EINVAL;
+		goto end_function_with_error;
+	}
+
+	dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
+
+	/* Get bus address */
+	bus_addr = sep->shared_bus;
+
+	dev_dbg(&sep->pdev->dev,
+		"bus_address is %lx\n", (unsigned long)bus_addr);
+
+	if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
+		vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
+		dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
+		error = -EAGAIN;
+		goto end_function_with_error;
+	}
+	dev_dbg(&sep->pdev->dev, "mmap end\n");
+	goto end_function;
+
+end_function_with_error:
+	/* Clear the bit */
+	clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
+	mutex_lock(&sep->sep_mutex);
+	sep->pid_doing_transaction = 0;
+	mutex_unlock(&sep->sep_mutex);
+
+	/* Raise event for stuck contextes */
+
+	dev_warn(&sep->pdev->dev, "mmap error - waking up event\n");
+	wake_up(&sep->event);
+
+end_function:
+	return error;
+}
+
+/**
+ *	sep_poll - poll handler
+ *	@filp: pointer to struct file
+ *	@wait: pointer to poll_table
+ *
+ *	Called by the OS when the kernel is asked to do a poll on
+ *	a SEP file handle.
+ */
+static unsigned int sep_poll(struct file *filp, poll_table *wait)
+{
+	u32 mask = 0;
+	u32 retval = 0;
+	u32 retval2 = 0;
+	unsigned long lck_flags;
+
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "poll: start\n");
+
+	/* Am I the process that owns the transaction? */
+	mutex_lock(&sep->sep_mutex);
+	if (current->pid != sep->pid_doing_transaction) {
+		dev_warn(&sep->pdev->dev, "poll; wrong pid\n");
+		mask = POLLERR;
+		mutex_unlock(&sep->sep_mutex);
+		goto end_function;
+	}
+	mutex_unlock(&sep->sep_mutex);
+
+	/* Check if send command or send_reply were activated previously */
+	if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
+		dev_warn(&sep->pdev->dev, "poll; lock bit set\n");
+		mask = POLLERR;
+		goto end_function;
+	}
+
+	/* Add the event to the polling wait table */
+	dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
+
+	poll_wait(filp, &sep->event, wait);
+
+	dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
+		sep->send_ct, sep->reply_ct);
+
+	/* Check if error occured during poll */
+	retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+	if (retval2 != 0x0) {
+		dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
+		mask |= POLLERR;
+		goto end_function;
+	}
+
+	spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+
+	if (sep->send_ct == sep->reply_ct) {
+		spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+		retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+		dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2)  %x\n",
+			retval);
+
+		/* Check if printf request  */
+		if ((retval >> 30) & 0x1) {
+			dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
+			wake_up(&sep->event_request_daemon);
+			goto end_function;
+		}
+
+		/* Check if the this is SEP reply or request */
+		if (retval >> 31) {
+			dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
+			wake_up(&sep->event_request_daemon);
+		} else {
+			dev_dbg(&sep->pdev->dev, "poll: normal return\n");
+			/* In case it is again by send_reply_comand */
+			clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
+			sep_dump_message(sep);
+			dev_dbg(&sep->pdev->dev,
+				"poll; SEP reply POLLIN | POLLRDNORM\n");
+			mask |= POLLIN | POLLRDNORM;
+		}
+	} else {
+		spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+		dev_dbg(&sep->pdev->dev,
+			"poll; no reply received; returning mask of 0\n");
+		mask = 0;
+	}
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "poll: end\n");
+	return mask;
+}
+
+/**
+ *	sep_time_address - address in SEP memory of time
+ *	@sep: SEP device we want the address from
+ *
+ *	Return the address of the two dwords in memory used for time
+ *	setting.
+ */
+static u32 *sep_time_address(struct sep_device *sep)
+{
+	return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
+}
+
+/**
+ *	sep_set_time - set the SEP time
+ *	@sep: the SEP we are setting the time for
+ *
+ *	Calculates time and sets it at the predefined address.
+ *	Called with the SEP mutex held.
+ */
+static unsigned long sep_set_time(struct sep_device *sep)
+{
+	struct timeval time;
+	u32 *time_addr;	/* Address of time as seen by the kernel */
+
+
+	dev_dbg(&sep->pdev->dev, "sep_set_time start\n");
+
+	do_gettimeofday(&time);
+
+	/* Set value in the SYSTEM MEMORY offset */
+	time_addr = sep_time_address(sep);
+
+	time_addr[0] = SEP_TIME_VAL_TOKEN;
+	time_addr[1] = time.tv_sec;
+
+	dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
+	dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
+	dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
+
+	return time.tv_sec;
+}
+
+/**
+ *	sep_set_caller_id_handler - insert caller id entry
+ *	@sep: SEP device
+ *	@arg: pointer to struct caller_id_struct
+ *
+ *	Inserts the data into the caller id table. Note that this function
+ *	falls under the ioctl lock
+ */
+static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg)
+{
+	void __user *hash;
+	int   error = 0;
+	int   i;
+	struct caller_id_struct command_args;
+
+	dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler start\n");
+
+	for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+		if (sep->caller_id_table[i].pid == 0)
+			break;
+	}
+
+	if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
+		dev_warn(&sep->pdev->dev, "no more caller id entries left\n");
+		dev_warn(&sep->pdev->dev, "maximum number is %d\n",
+					SEP_CALLER_ID_TABLE_NUM_ENTRIES);
+		error = -EUSERS;
+		goto end_function;
+	}
+
+	/* Copy the data */
+	if (copy_from_user(&command_args, (void __user *)arg,
+		sizeof(command_args))) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	hash = (void __user *)(unsigned long)command_args.callerIdAddress;
+
+	if (!command_args.pid || !command_args.callerIdSizeInBytes) {
+		error = -EINVAL;
+		goto end_function;
+	}
+
+	dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
+	dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
+		command_args.callerIdSizeInBytes);
+
+	if (command_args.callerIdSizeInBytes >
+					SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
+		error = -EMSGSIZE;
+		goto end_function;
+	}
+
+	sep->caller_id_table[i].pid = command_args.pid;
+
+	if (copy_from_user(sep->caller_id_table[i].callerIdHash,
+		hash, command_args.callerIdSizeInBytes))
+		error = -EFAULT;
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler end\n");
+	return error;
+}
+
+/**
+ *	sep_set_current_caller_id - set the caller id
+ *	@sep: pointer to struct_sep_device
+ *
+ *	Set the caller ID (if it exists) to the SEP. Note that this
+ *	function falls under the ioctl lock
+ */
+static int sep_set_current_caller_id(struct sep_device *sep)
+{
+	int i;
+	u32 *hash_buf_ptr;
+
+	dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id start\n");
+	dev_dbg(&sep->pdev->dev, "current process is %d\n", current->pid);
+
+	/* Zero the previous value */
+	memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
+					0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+
+	for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
+		if (sep->caller_id_table[i].pid == current->pid) {
+			dev_dbg(&sep->pdev->dev, "Caller Id found\n");
+
+			memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
+				(void *)(sep->caller_id_table[i].callerIdHash),
+				SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
+			break;
+		}
+	}
+	/* Ensure data is in little endian */
+	hash_buf_ptr = (u32 *)sep->shared_addr +
+		SEP_CALLER_ID_OFFSET_BYTES;
+
+	for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++)
+		hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]);
+
+	dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id end\n");
+	return 0;
+}
+
+/**
+ *	sep_send_command_handler - kick off a command
+ *	@sep: SEP being signalled
+ *
+ *	This function raises interrupt to SEP that signals that is has a new
+ *	command from the host
+ *
+ *      Note that this function does fall under the ioctl lock
+ */
+static int sep_send_command_handler(struct sep_device *sep)
+{
+	unsigned long lck_flags;
+	int error = 0;
+
+	dev_dbg(&sep->pdev->dev, "sep_send_command_handler start\n");
+
+	if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
+		error = -EPROTO;
+		goto end_function;
+	}
+	sep_set_time(sep);
+
+	sep_set_current_caller_id(sep);
+
+	sep_dump_message(sep);
+
+	/* Update counter */
+	spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+	sep->send_ct++;
+	spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+	dev_dbg(&sep->pdev->dev,
+		"sep_send_command_handler send_ct %lx reply_ct %lx\n",
+						sep->send_ct, sep->reply_ct);
+
+	/* Send interrupt to SEP */
+	sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_send_command_handler end\n");
+	return error;
+}
+
+/**
+ *	sep_allocate_data_pool_memory_handler -allocate pool memory
+ *	@sep: pointer to struct sep_device
+ *	@arg: pointer to struct alloc_struct
+ *
+ *	This function handles the allocate data pool memory request
+ *	This function returns calculates the bus address of the
+ *	allocated memory, and the offset of this area from the mapped address.
+ *	Therefore, the FVOs in user space can calculate the exact virtual
+ *	address of this allocated memory
+ */
+static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
+	unsigned long arg)
+{
+	int error = 0;
+	struct alloc_struct command_args;
+
+	/* Holds the allocated buffer address in the system memory pool */
+	u32 *token_addr;
+
+	dev_dbg(&sep->pdev->dev,
+		"sep_allocate_data_pool_memory_handler start\n");
+
+	if (copy_from_user(&command_args, (void __user *)arg,
+					sizeof(struct alloc_struct))) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	/* Allocate memory */
+	if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
+		SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
+		error = -ENOMEM;
+		goto end_function;
+	}
+
+	dev_dbg(&sep->pdev->dev,
+		"bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
+	dev_dbg(&sep->pdev->dev,
+		"offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
+	/* Set the virtual and bus address */
+	command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+		sep->data_pool_bytes_allocated;
+
+	dev_dbg(&sep->pdev->dev,
+		"command_args.offset: %x\n", command_args.offset);
+
+	/* Place in the shared area that is known by the SEP */
+	token_addr = (u32 *)(sep->shared_addr +
+		SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
+		(sep->num_of_data_allocations)*2*sizeof(u32));
+
+	dev_dbg(&sep->pdev->dev, "allocation offset: %x\n",
+		SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES);
+	dev_dbg(&sep->pdev->dev, "data pool token addr is %p\n", token_addr);
+
+	token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
+	token_addr[1] = (u32)sep->shared_bus +
+		SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
+		sep->data_pool_bytes_allocated;
+
+	dev_dbg(&sep->pdev->dev, "data pool token [0] %x\n", token_addr[0]);
+	dev_dbg(&sep->pdev->dev, "data pool token [1] %x\n", token_addr[1]);
+
+	/* Write the memory back to the user space */
+	error = copy_to_user((void *)arg, (void *)&command_args,
+		sizeof(struct alloc_struct));
+	if (error) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	/* Update the allocation */
+	sep->data_pool_bytes_allocated += command_args.num_bytes;
+	sep->num_of_data_allocations += 1;
+
+	dev_dbg(&sep->pdev->dev, "data_allocations %d\n",
+		sep->num_of_data_allocations);
+	dev_dbg(&sep->pdev->dev, "bytes allocated  %d\n",
+		(int)sep->data_pool_bytes_allocated);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_allocate_data_pool_memory_handler end\n");
+	return error;
+}
+
+/**
+ *	sep_lock_kernel_pages - map kernel pages for DMA
+ *	@sep: pointer to struct sep_device
+ *	@kernel_virt_addr: address of data buffer in kernel
+ *	@data_size: size of data
+ *	@lli_array_ptr: lli array
+ *	@in_out_flag: input into device or output from device
+ *
+ *	This function locks all the physical pages of the kernel virtual buffer
+ *	and construct a basic lli  array, where each entry holds the physical
+ *	page address and the size that application data holds in this page
+ *	This function is used only during kernel crypto mod calls from within
+ *	the kernel (when ioctl is not used)
+ */
+static int sep_lock_kernel_pages(struct sep_device *sep,
+	unsigned long kernel_virt_addr,
+	u32 data_size,
+	struct sep_lli_entry **lli_array_ptr,
+	int in_out_flag)
+
+{
+	int error = 0;
+	/* Array of lli */
+	struct sep_lli_entry *lli_array;
+	/* Map array */
+	struct sep_dma_map *map_array;
+
+	dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages start\n");
+	dev_dbg(&sep->pdev->dev, "kernel_virt_addr is %08lx\n",
+				(unsigned long)kernel_virt_addr);
+	dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
+
+	lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
+	if (!lli_array) {
+		error = -ENOMEM;
+		goto end_function;
+	}
+	map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
+	if (!map_array) {
+		error = -ENOMEM;
+		goto end_function_with_error;
+	}
+
+	map_array[0].dma_addr =
+		dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
+		data_size, DMA_BIDIRECTIONAL);
+	map_array[0].size = data_size;
+
+
+	/*
+	 * Set the start address of the first page - app data may start not at
+	 * the beginning of the page
+	 */
+	lli_array[0].bus_address = (u32)map_array[0].dma_addr;
+	lli_array[0].block_size = map_array[0].size;
+
+	dev_dbg(&sep->pdev->dev,
+	"lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
+		(unsigned long)lli_array[0].bus_address,
+		lli_array[0].block_size);
+
+	/* Set the output parameters */
+	if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+		*lli_array_ptr = lli_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
+	} else {
+		*lli_array_ptr = lli_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
+	}
+	goto end_function;
+
+end_function_with_error:
+	kfree(lli_array);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages end\n");
+	return error;
+}
+
+/**
+ *	sep_lock_user_pages - lock and map user pages for DMA
+ *	@sep: pointer to struct sep_device
+ *	@app_virt_addr: user memory data buffer
+ *	@data_size: size of data buffer
+ *	@lli_array_ptr: lli array
+ *	@in_out_flag: input or output to device
+ *
+ *	This function locks all the physical pages of the application
+ *	virtual buffer and construct a basic lli  array, where each entry
+ *	holds the physical page address and the size that application
+ *	data holds in this physical pages
+ */
+static int sep_lock_user_pages(struct sep_device *sep,
+	u32 app_virt_addr,
+	u32 data_size,
+	struct sep_lli_entry **lli_array_ptr,
+	int in_out_flag)
+
+{
+	int error = 0;
+	u32 count;
+	int result;
+	/* The the page of the end address of the user space buffer */
+	u32 end_page;
+	/* The page of the start address of the user space buffer */
+	u32 start_page;
+	/* The range in pages */
+	u32 num_pages;
+	/* Array of pointers to page */
+	struct page **page_array;
+	/* Array of lli */
+	struct sep_lli_entry *lli_array;
+	/* Map array */
+	struct sep_dma_map *map_array;
+	/* Direction of the DMA mapping for locked pages */
+	enum dma_data_direction	dir;
+
+	dev_dbg(&sep->pdev->dev, "sep_lock_user_pages start\n");
+
+	/* Set start and end pages  and num pages */
+	end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
+	start_page = app_virt_addr >> PAGE_SHIFT;
+	num_pages = end_page - start_page + 1;
+
+	dev_dbg(&sep->pdev->dev, "app_virt_addr is %x\n", app_virt_addr);
+	dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
+	dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
+	dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
+	dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
+
+	dev_dbg(&sep->pdev->dev, "starting page_array malloc\n");
+
+	/* Allocate array of pages structure pointers */
+	page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
+	if (!page_array) {
+		error = -ENOMEM;
+		goto end_function;
+	}
+	map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
+	if (!map_array) {
+		dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
+		error = -ENOMEM;
+		goto end_function_with_error1;
+	}
+
+	lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
+		GFP_ATOMIC);
+
+	if (!lli_array) {
+		dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
+		error = -ENOMEM;
+		goto end_function_with_error2;
+	}
+
+	dev_dbg(&sep->pdev->dev, "starting get_user_pages\n");
+
+	/* Convert the application virtual address into a set of physical */
+	down_read(&current->mm->mmap_sem);
+	result = get_user_pages(current, current->mm, app_virt_addr,
+		num_pages,
+		((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
+		0, page_array, NULL);
+
+	up_read(&current->mm->mmap_sem);
+
+	/* Check the number of pages locked - if not all then exit with error */
+	if (result != num_pages) {
+		dev_warn(&sep->pdev->dev,
+			"not all pages locked by get_user_pages\n");
+		error = -ENOMEM;
+		goto end_function_with_error3;
+	}
+
+	dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
+
+	/* Set direction */
+	if (in_out_flag == SEP_DRIVER_IN_FLAG)
+		dir = DMA_TO_DEVICE;
+	else
+		dir = DMA_FROM_DEVICE;
+
+	/*
+	 * Fill the array using page array data and
+	 * map the pages - this action will also flush the cache as needed
+	 */
+	for (count = 0; count < num_pages; count++) {
+		/* Fill the map array */
+		map_array[count].dma_addr =
+			dma_map_page(&sep->pdev->dev, page_array[count],
+			0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
+
+		map_array[count].size = PAGE_SIZE;
+
+		/* Fill the lli array entry */
+		lli_array[count].bus_address = (u32)map_array[count].dma_addr;
+		lli_array[count].block_size = PAGE_SIZE;
+
+		dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
+			count, (unsigned long)lli_array[count].bus_address,
+			count, lli_array[count].block_size);
+	}
+
+	/* Check the offset for the first page */
+	lli_array[0].bus_address =
+		lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
+
+	/* Check that not all the data is in the first page only */
+	if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
+		lli_array[0].block_size = data_size;
+	else
+		lli_array[0].block_size =
+			PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
+
+	dev_dbg(&sep->pdev->dev,
+		"lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
+		(unsigned long)lli_array[count].bus_address,
+		lli_array[count].block_size);
+
+	/* Check the size of the last page */
+	if (num_pages > 1) {
+		lli_array[num_pages - 1].block_size =
+			(app_virt_addr + data_size) & (~PAGE_MASK);
+
+		dev_warn(&sep->pdev->dev,
+			"lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
+			num_pages - 1,
+			(unsigned long)lli_array[count].bus_address,
+			num_pages - 1,
+			lli_array[count].block_size);
+	}
+
+	/* Set output params acording to the in_out flag */
+	if (in_out_flag == SEP_DRIVER_IN_FLAG) {
+		*lli_array_ptr = lli_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
+								num_pages;
+	} else {
+		*lli_array_ptr = lli_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
+								page_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
+		sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
+								num_pages;
+	}
+	goto end_function;
+
+end_function_with_error3:
+	/* Free lli array */
+	kfree(lli_array);
+
+end_function_with_error2:
+	kfree(map_array);
+
+end_function_with_error1:
+	/* Free page array */
+	kfree(page_array);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_lock_user_pages end\n");
+	return error;
+}
+
+/**
+ *	u32 sep_calculate_lli_table_max_size - size the LLI table
+ *	@sep: pointer to struct sep_device
+ *	@lli_in_array_ptr
+ *	@num_array_entries
+ *	@last_table_flag
+ *
+ *	This function calculates the size of data that can be inserted into
+ *	the lli table from this array, such that either the table is full
+ *	(all entries are entered), or there are no more entries in the
+ *	lli array
+ */
+static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
+	struct sep_lli_entry *lli_in_array_ptr,
+	u32 num_array_entries,
+	u32 *last_table_flag)
+{
+	u32 counter;
+	/* Table data size */
+	u32 table_data_size = 0;
+	/* Data size for the next table */
+	u32 next_table_data_size;
+
+	*last_table_flag = 0;
+
+	/*
+	 * Calculate the data in the out lli table till we fill the whole
+	 * table or till the data has ended
+	 */
+	for (counter = 0;
+		(counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
+			(counter < num_array_entries); counter++)
+		table_data_size += lli_in_array_ptr[counter].block_size;
+
+	/*
+	 * Check if we reached the last entry,
+	 * meaning this ia the last table to build,
+	 * and no need to check the block alignment
+	 */
+	if (counter == num_array_entries) {
+		/* Set the last table flag */
+		*last_table_flag = 1;
+		goto end_function;
+	}
+
+	/*
+	 * Calculate the data size of the next table.
+	 * Stop if no entries left or if data size is more the DMA restriction
+	 */
+	next_table_data_size = 0;
+	for (; counter < num_array_entries; counter++) {
+		next_table_data_size += lli_in_array_ptr[counter].block_size;
+		if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+			break;
+	}
+
+	/*
+	 * Check if the next table data size is less then DMA rstriction.
+	 * if it is - recalculate the current table size, so that the next
+	 * table data size will be adaquete for DMA
+	 */
+	if (next_table_data_size &&
+		next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
+
+		table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
+			next_table_data_size);
+
+	dev_dbg(&sep->pdev->dev, "table data size is %x\n",
+							table_data_size);
+end_function:
+	return table_data_size;
+}
+
+/**
+ *	sep_build_lli_table - build an lli array for the given table
+ *	@sep: pointer to struct sep_device
+ *	@lli_array_ptr: pointer to lli array
+ *	@lli_table_ptr: pointer to lli table
+ *	@num_processed_entries_ptr: pointer to number of entries
+ *	@num_table_entries_ptr: pointer to number of tables
+ *	@table_data_size: total data size
+ *
+ *	Builds ant lli table from the lli_array according to
+ *	the given size of data
+ */
+static void sep_build_lli_table(struct sep_device *sep,
+	struct sep_lli_entry	*lli_array_ptr,
+	struct sep_lli_entry	*lli_table_ptr,
+	u32 *num_processed_entries_ptr,
+	u32 *num_table_entries_ptr,
+	u32 table_data_size)
+{
+	/* Current table data size */
+	u32 curr_table_data_size;
+	/* Counter of lli array entry */
+	u32 array_counter;
+
+	dev_dbg(&sep->pdev->dev, "sep_build_lli_table start\n");
+
+	/* Init currrent table data size and lli array entry counter */
+	curr_table_data_size = 0;
+	array_counter = 0;
+	*num_table_entries_ptr = 1;
+
+	dev_dbg(&sep->pdev->dev, "table_data_size is %x\n", table_data_size);
+
+	/* Fill the table till table size reaches the needed amount */
+	while (curr_table_data_size < table_data_size) {
+		/* Update the number of entries in table */
+		(*num_table_entries_ptr)++;
+
+		lli_table_ptr->bus_address =
+			cpu_to_le32(lli_array_ptr[array_counter].bus_address);
+
+		lli_table_ptr->block_size =
+			cpu_to_le32(lli_array_ptr[array_counter].block_size);
+
+		curr_table_data_size += lli_array_ptr[array_counter].block_size;
+
+		dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
+								lli_table_ptr);
+		dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
+				(unsigned long)lli_table_ptr->bus_address);
+		dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
+			lli_table_ptr->block_size);
+
+		/* Check for overflow of the table data */
+		if (curr_table_data_size > table_data_size) {
+			dev_dbg(&sep->pdev->dev,
+				"curr_table_data_size too large\n");
+
+			/* Update the size of block in the table */
+			lli_table_ptr->block_size -=
+			cpu_to_le32((curr_table_data_size - table_data_size));
+
+			/* Update the physical address in the lli array */
+			lli_array_ptr[array_counter].bus_address +=
+			cpu_to_le32(lli_table_ptr->block_size);
+
+			/* Update the block size left in the lli array */
+			lli_array_ptr[array_counter].block_size =
+				(curr_table_data_size - table_data_size);
+		} else
+			/* Advance to the next entry in the lli_array */
+			array_counter++;
+
+		dev_dbg(&sep->pdev->dev,
+			"lli_table_ptr->bus_address is %08lx\n",
+				(unsigned long)lli_table_ptr->bus_address);
+		dev_dbg(&sep->pdev->dev,
+			"lli_table_ptr->block_size is %x\n",
+			lli_table_ptr->block_size);
+
+		/* Move to the next entry in table */
+		lli_table_ptr++;
+	}
+
+	/* Set the info entry to default */
+	lli_table_ptr->bus_address = 0xffffffff;
+	lli_table_ptr->block_size = 0;
+
+	dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", lli_table_ptr);
+	dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
+				(unsigned long)lli_table_ptr->bus_address);
+	dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
+						lli_table_ptr->block_size);
+
+	/* Set the output parameter */
+	*num_processed_entries_ptr += array_counter;
+
+	dev_dbg(&sep->pdev->dev, "num_processed_entries_ptr is %x\n",
+		*num_processed_entries_ptr);
+
+	dev_dbg(&sep->pdev->dev, "sep_build_lli_table end\n");
+}
+
+/**
+ *	sep_shared_area_virt_to_bus - map shared area to bus address
+ *	@sep: pointer to struct sep_device
+ *	@virt_address: virtual address to convert
+ *
+ *	This functions returns the physical address inside shared area according
+ *	to the virtual address. It can be either on the externa RAM device
+ *	(ioremapped), or on the system RAM
+ *	This implementation is for the external RAM
+ */
+static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
+	void *virt_address)
+{
+	dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
+	dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
+		(unsigned long)
+		sep->shared_bus + (virt_address - sep->shared_addr));
+
+	return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
+}
+
+/**
+ *	sep_shared_area_bus_to_virt - map shared area bus address to kernel
+ *	@sep: pointer to struct sep_device
+ *	@bus_address: bus address to convert
+ *
+ *	This functions returns the virtual address inside shared area
+ *	according to the physical address. It can be either on the
+ *	externa RAM device (ioremapped), or on the system RAM
+ *	This implementation is for the external RAM
+ */
+static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
+	dma_addr_t bus_address)
+{
+	dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n",
+		(unsigned long)bus_address, (unsigned long)(sep->shared_addr +
+			(size_t)(bus_address - sep->shared_bus)));
+
+	return sep->shared_addr	+ (size_t)(bus_address - sep->shared_bus);
+}
+
+/**
+ *	sep_debug_print_lli_tables - dump LLI table
+ *	@sep: pointer to struct sep_device
+ *	@lli_table_ptr: pointer to sep_lli_entry
+ *	@num_table_entries: number of entries
+ *	@table_data_size: total data size
+ *
+ *	Walk the the list of the print created tables and print all the data
+ */
+static void sep_debug_print_lli_tables(struct sep_device *sep,
+	struct sep_lli_entry *lli_table_ptr,
+	unsigned long num_table_entries,
+	unsigned long table_data_size)
+{
+	unsigned long table_count = 1;
+	unsigned long entries_count = 0;
+
+	dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
+
+	while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
+		dev_dbg(&sep->pdev->dev,
+			"lli table %08lx, table_data_size is %lu\n",
+			table_count, table_data_size);
+		dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
+							num_table_entries);
+
+		/* Print entries of the table (without info entry) */
+		for (entries_count = 0; entries_count < num_table_entries;
+			entries_count++, lli_table_ptr++) {
+
+			dev_dbg(&sep->pdev->dev,
+				"lli_table_ptr address is %08lx\n",
+				(unsigned long) lli_table_ptr);
+
+			dev_dbg(&sep->pdev->dev,
+				"phys address is %08lx block size is %x\n",
+				(unsigned long)lli_table_ptr->bus_address,
+				lli_table_ptr->block_size);
+		}
+		/* Point to the info entry */
+		lli_table_ptr--;
+
+		dev_dbg(&sep->pdev->dev,
+			"phys lli_table_ptr->block_size is %x\n",
+			lli_table_ptr->block_size);
+
+		dev_dbg(&sep->pdev->dev,
+			"phys lli_table_ptr->physical_address is %08lu\n",
+			(unsigned long)lli_table_ptr->bus_address);
+
+
+		table_data_size = lli_table_ptr->block_size & 0xffffff;
+		num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
+
+		dev_dbg(&sep->pdev->dev,
+			"phys table_data_size is %lu num_table_entries is"
+			" %lu bus_address is%lu\n", table_data_size,
+			num_table_entries, (unsigned long)lli_table_ptr->bus_address);
+
+		if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
+			lli_table_ptr = (struct sep_lli_entry *)
+				sep_shared_bus_to_virt(sep,
+				(unsigned long)lli_table_ptr->bus_address);
+
+		table_count++;
+	}
+	dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
+}
+
+
+/**
+ *	sep_prepare_empty_lli_table - create a blank LLI table
+ *	@sep: pointer to struct sep_device
+ *	@lli_table_addr_ptr: pointer to lli table
+ *	@num_entries_ptr: pointer to number of entries
+ *	@table_data_size_ptr: point to table data size
+ *
+ *	This function creates empty lli tables when there is no data
+ */
+static void sep_prepare_empty_lli_table(struct sep_device *sep,
+		dma_addr_t *lli_table_addr_ptr,
+		u32 *num_entries_ptr,
+		u32 *table_data_size_ptr)
+{
+	struct sep_lli_entry *lli_table_ptr;
+
+	dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
+
+	/* Find the area for new table */
+	lli_table_ptr =
+		(struct sep_lli_entry *)(sep->shared_addr +
+		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+		sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+	lli_table_ptr->bus_address = 0;
+	lli_table_ptr->block_size = 0;
+
+	lli_table_ptr++;
+	lli_table_ptr->bus_address = 0xFFFFFFFF;
+	lli_table_ptr->block_size = 0;
+
+	/* Set the output parameter value */
+	*lli_table_addr_ptr = sep->shared_bus +
+		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+		sep->num_lli_tables_created *
+		sizeof(struct sep_lli_entry) *
+		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+	/* Set the num of entries and table data size for empty table */
+	*num_entries_ptr = 2;
+	*table_data_size_ptr = 0;
+
+	/* Update the number of created tables */
+	sep->num_lli_tables_created++;
+
+	dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
+
+}
+
+/**
+ *	sep_prepare_input_dma_table - prepare input DMA mappings
+ *	@sep: pointer to struct sep_device
+ *	@data_size:
+ *	@block_size:
+ *	@lli_table_ptr:
+ *	@num_entries_ptr:
+ *	@table_data_size_ptr:
+ *	@is_kva: set for kernel data (kernel cryptio call)
+ *
+ *	This function prepares only input DMA table for synhronic symmetric
+ *	operations (HASH)
+ *	Note that all bus addresses that are passed to the SEP
+ *	are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_dma_table(struct sep_device *sep,
+	unsigned long app_virt_addr,
+	u32 data_size,
+	u32 block_size,
+	dma_addr_t *lli_table_ptr,
+	u32 *num_entries_ptr,
+	u32 *table_data_size_ptr,
+	bool is_kva)
+{
+	int error = 0;
+	/* Pointer to the info entry of the table - the last entry */
+	struct sep_lli_entry *info_entry_ptr;
+	/* Array of pointers to page */
+	struct sep_lli_entry *lli_array_ptr;
+	/* Points to the first entry to be processed in the lli_in_array */
+	u32 current_entry = 0;
+	/* Num entries in the virtual buffer */
+	u32 sep_lli_entries = 0;
+	/* Lli table pointer */
+	struct sep_lli_entry *in_lli_table_ptr;
+	/* The total data in one table */
+	u32 table_data_size = 0;
+	/* Flag for last table */
+	u32 last_table_flag = 0;
+	/* Number of entries in lli table */
+	u32 num_entries_in_table = 0;
+	/* Next table address */
+	void *lli_table_alloc_addr = 0;
+
+	dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table start\n");
+	dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
+	dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
+
+	/* Initialize the pages pointers */
+	sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
+	sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
+
+	/* Set the kernel address for first table to be allocated */
+	lli_table_alloc_addr = (void *)(sep->shared_addr +
+		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+		sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
+		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+	if (data_size == 0) {
+		/* Special case  - create meptu table - 2 entries, zero data */
+		sep_prepare_empty_lli_table(sep, lli_table_ptr,
+				num_entries_ptr, table_data_size_ptr);
+		goto update_dcb_counter;
+	}
+
+	/* Check if the pages are in Kernel Virtual Address layout */
+	if (is_kva == true)
+		/* Lock the pages in the kernel */
+		error = sep_lock_kernel_pages(sep, app_virt_addr,
+			data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
+	else
+		/*
+		 * Lock the pages of the user buffer
+		 * and translate them to pages
+		 */
+		error = sep_lock_user_pages(sep, app_virt_addr,
+			data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
+
+	if (error)
+		goto end_function;
+
+	dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
+		sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
+
+	current_entry = 0;
+	info_entry_ptr = NULL;
+
+	sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
+
+	/* Loop till all the entries in in array are not processed */
+	while (current_entry < sep_lli_entries) {
+
+		/* Set the new input and output tables */
+		in_lli_table_ptr =
+			(struct sep_lli_entry *)lli_table_alloc_addr;
+
+		lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+		if (lli_table_alloc_addr >
+			((void *)sep->shared_addr +
+			SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+			SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+			error = -ENOMEM;
+			goto end_function_error;
+
+		}
+
+		/* Update the number of created tables */
+		sep->num_lli_tables_created++;
+
+		/* Calculate the maximum size of data for input table */
+		table_data_size = sep_calculate_lli_table_max_size(sep,
+			&lli_array_ptr[current_entry],
+			(sep_lli_entries - current_entry),
+			&last_table_flag);
+
+		/*
+		 * If this is not the last table -
+		 * then allign it to the block size
+		 */
+		if (!last_table_flag)
+			table_data_size =
+				(table_data_size / block_size) * block_size;
+
+		dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
+							table_data_size);
+
+		/* Construct input lli table */
+		sep_build_lli_table(sep, &lli_array_ptr[current_entry],
+			in_lli_table_ptr,
+			&current_entry, &num_entries_in_table, table_data_size);
+
+		if (info_entry_ptr == NULL) {
+
+			/* Set the output parameters to physical addresses */
+			*lli_table_ptr = sep_shared_area_virt_to_bus(sep,
+				in_lli_table_ptr);
+			*num_entries_ptr = num_entries_in_table;
+			*table_data_size_ptr = table_data_size;
+
+			dev_dbg(&sep->pdev->dev,
+				"output lli_table_in_ptr is %08lx\n",
+				(unsigned long)*lli_table_ptr);
+
+		} else {
+			/* Update the info entry of the previous in table */
+			info_entry_ptr->bus_address =
+				sep_shared_area_virt_to_bus(sep,
+							in_lli_table_ptr);
+			info_entry_ptr->block_size =
+				((num_entries_in_table) << 24) |
+				(table_data_size);
+		}
+		/* Save the pointer to the info entry of the current tables */
+		info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
+	}
+	/* Print input tables */
+	sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
+		sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
+		*num_entries_ptr, *table_data_size_ptr);
+	/* The array of the pages */
+	kfree(lli_array_ptr);
+
+update_dcb_counter:
+	/* Update DCB counter */
+	sep->nr_dcb_creat++;
+	goto end_function;
+
+end_function_error:
+	/* Free all the allocated resources */
+	kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
+	kfree(lli_array_ptr);
+	kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table end\n");
+	return error;
+
+}
+/**
+ *	sep_construct_dma_tables_from_lli - prepare AES/DES mappings
+ *	@sep: pointer to struct sep_device
+ *	@lli_in_array:
+ *	@sep_in_lli_entries:
+ *	@lli_out_array:
+ *	@sep_out_lli_entries
+ *	@block_size
+ *	@lli_table_in_ptr
+ *	@lli_table_out_ptr
+ *	@in_num_entries_ptr
+ *	@out_num_entries_ptr
+ *	@table_data_size_ptr
+ *
+ *	This function creates the input and output DMA tables for
+ *	symmetric operations (AES/DES) according to the block
+ *	size from LLI arays
+ *	Note that all bus addresses that are passed to the SEP
+ *	are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_construct_dma_tables_from_lli(
+	struct sep_device *sep,
+	struct sep_lli_entry *lli_in_array,
+	u32	sep_in_lli_entries,
+	struct sep_lli_entry *lli_out_array,
+	u32	sep_out_lli_entries,
+	u32	block_size,
+	dma_addr_t *lli_table_in_ptr,
+	dma_addr_t *lli_table_out_ptr,
+	u32	*in_num_entries_ptr,
+	u32	*out_num_entries_ptr,
+	u32	*table_data_size_ptr)
+{
+	/* Points to the area where next lli table can be allocated */
+	void *lli_table_alloc_addr = 0;
+	/* Input lli table */
+	struct sep_lli_entry *in_lli_table_ptr = NULL;
+	/* Output lli table */
+	struct sep_lli_entry *out_lli_table_ptr = NULL;
+	/* Pointer to the info entry of the table - the last entry */
+	struct sep_lli_entry *info_in_entry_ptr = NULL;
+	/* Pointer to the info entry of the table - the last entry */
+	struct sep_lli_entry *info_out_entry_ptr = NULL;
+	/* Points to the first entry to be processed in the lli_in_array */
+	u32 current_in_entry = 0;
+	/* Points to the first entry to be processed in the lli_out_array */
+	u32 current_out_entry = 0;
+	/* Max size of the input table */
+	u32 in_table_data_size = 0;
+	/* Max size of the output table */
+	u32 out_table_data_size = 0;
+	/* Flag te signifies if this is the last tables build */
+	u32 last_table_flag = 0;
+	/* The data size that should be in table */
+	u32 table_data_size = 0;
+	/* Number of etnries in the input table */
+	u32 num_entries_in_table = 0;
+	/* Number of etnries in the output table */
+	u32 num_entries_out_table = 0;
+
+	dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli start\n");
+
+	/* Initiate to point after the message area */
+	lli_table_alloc_addr = (void *)(sep->shared_addr +
+		SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+		(sep->num_lli_tables_created *
+		(sizeof(struct sep_lli_entry) *
+		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
+
+	/* Loop till all the entries in in array are not processed */
+	while (current_in_entry < sep_in_lli_entries) {
+		/* Set the new input and output tables */
+		in_lli_table_ptr =
+			(struct sep_lli_entry *)lli_table_alloc_addr;
+
+		lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+		/* Set the first output tables */
+		out_lli_table_ptr =
+			(struct sep_lli_entry *)lli_table_alloc_addr;
+
+		/* Check if the DMA table area limit was overrun */
+		if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
+			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
+			((void *)sep->shared_addr +
+			SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
+			SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
+
+			dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
+			return -ENOMEM;
+		}
+
+		/* Update the number of the lli tables created */
+		sep->num_lli_tables_created += 2;
+
+		lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
+			SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
+
+		/* Calculate the maximum size of data for input table */
+		in_table_data_size =
+			sep_calculate_lli_table_max_size(sep,
+			&lli_in_array[current_in_entry],
+			(sep_in_lli_entries - current_in_entry),
+			&last_table_flag);
+
+		/* Calculate the maximum size of data for output table */
+		out_table_data_size =
+			sep_calculate_lli_table_max_size(sep,
+			&lli_out_array[current_out_entry],
+			(sep_out_lli_entries - current_out_entry),
+			&last_table_flag);
+
+		dev_dbg(&sep->pdev->dev,
+			"in_table_data_size is %x\n",
+			in_table_data_size);
+
+		dev_dbg(&sep->pdev->dev,
+			"out_table_data_size is %x\n",
+			out_table_data_size);
+
+		table_data_size = in_table_data_size;
+
+		if (!last_table_flag) {
+			/*
+			 * If this is not the last table,
+			 * then must check where the data is smallest
+			 * and then align it to the block size
+			 */
+			if (table_data_size > out_table_data_size)
+				table_data_size = out_table_data_size;
+
+			/*
+			 * Now calculate the table size so that
+			 * it will be module block size
+			 */
+			table_data_size = (table_data_size / block_size) *
+				block_size;
+		}
+
+		dev_dbg(&sep->pdev->dev, "table_data_size is %x\n",
+							table_data_size);
+
+		/* Construct input lli table */
+		sep_build_lli_table(sep, &lli_in_array[current_in_entry],
+			in_lli_table_ptr,
+			&current_in_entry,
+			&num_entries_in_table,
+			table_data_size);
+
+		/* Construct output lli table */
+		sep_build_lli_table(sep, &lli_out_array[current_out_entry],
+			out_lli_table_ptr,
+			&current_out_entry,
+			&num_entries_out_table,
+			table_data_size);
+
+		/* If info entry is null - this is the first table built */
+		if (info_in_entry_ptr == NULL) {
+			/* Set the output parameters to physical addresses */
+			*lli_table_in_ptr =
+			sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
+
+			*in_num_entries_ptr = num_entries_in_table;
+
+			*lli_table_out_ptr =
+				sep_shared_area_virt_to_bus(sep,
+				out_lli_table_ptr);
+
+			*out_num_entries_ptr = num_entries_out_table;
+			*table_data_size_ptr = table_data_size;
+
+			dev_dbg(&sep->pdev->dev,
+			"output lli_table_in_ptr is %08lx\n",
+				(unsigned long)*lli_table_in_ptr);
+			dev_dbg(&sep->pdev->dev,
+			"output lli_table_out_ptr is %08lx\n",
+				(unsigned long)*lli_table_out_ptr);
+		} else {
+			/* Update the info entry of the previous in table */
+			info_in_entry_ptr->bus_address =
+				sep_shared_area_virt_to_bus(sep,
+				in_lli_table_ptr);
+
+			info_in_entry_ptr->block_size =
+				((num_entries_in_table) << 24) |
+				(table_data_size);
+
+			/* Update the info entry of the previous in table */
+			info_out_entry_ptr->bus_address =
+				sep_shared_area_virt_to_bus(sep,
+				out_lli_table_ptr);
+
+			info_out_entry_ptr->block_size =
+				((num_entries_out_table) << 24) |
+				(table_data_size);
+
+			dev_dbg(&sep->pdev->dev,
+				"output lli_table_in_ptr:%08lx %08x\n",
+				(unsigned long)info_in_entry_ptr->bus_address,
+				info_in_entry_ptr->block_size);
+
+			dev_dbg(&sep->pdev->dev,
+				"output lli_table_out_ptr:%08lx  %08x\n",
+				(unsigned long)info_out_entry_ptr->bus_address,
+				info_out_entry_ptr->block_size);
+		}
+
+		/* Save the pointer to the info entry of the current tables */
+		info_in_entry_ptr = in_lli_table_ptr +
+			num_entries_in_table - 1;
+		info_out_entry_ptr = out_lli_table_ptr +
+			num_entries_out_table - 1;
+
+		dev_dbg(&sep->pdev->dev,
+			"output num_entries_out_table is %x\n",
+			(u32)num_entries_out_table);
+		dev_dbg(&sep->pdev->dev,
+			"output info_in_entry_ptr is %lx\n",
+			(unsigned long)info_in_entry_ptr);
+		dev_dbg(&sep->pdev->dev,
+			"output info_out_entry_ptr is %lx\n",
+			(unsigned long)info_out_entry_ptr);
+	}
+
+	/* Print input tables */
+	sep_debug_print_lli_tables(sep,
+	(struct sep_lli_entry *)
+	sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
+	*in_num_entries_ptr,
+	*table_data_size_ptr);
+
+	/* Print output tables */
+	sep_debug_print_lli_tables(sep,
+	(struct sep_lli_entry *)
+	sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
+	*out_num_entries_ptr,
+	*table_data_size_ptr);
+
+	dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli end\n");
+	return 0;
+}
+
+/**
+ *	sep_prepare_input_output_dma_table - prepare DMA I/O table
+ *	@app_virt_in_addr:
+ *	@app_virt_out_addr:
+ *	@data_size:
+ *	@block_size:
+ *	@lli_table_in_ptr:
+ *	@lli_table_out_ptr:
+ *	@in_num_entries_ptr:
+ *	@out_num_entries_ptr:
+ *	@table_data_size_ptr:
+ *	@is_kva: set for kernel data; used only for kernel crypto module
+ *
+ *	This function builds input and output DMA tables for synhronic
+ *	symmetric operations (AES, DES, HASH). It also checks that each table
+ *	is of the modular block size
+ *	Note that all bus addresses that are passed to the SEP
+ *	are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_output_dma_table(struct sep_device *sep,
+	unsigned long app_virt_in_addr,
+	unsigned long app_virt_out_addr,
+	u32 data_size,
+	u32 block_size,
+	dma_addr_t *lli_table_in_ptr,
+	dma_addr_t *lli_table_out_ptr,
+	u32 *in_num_entries_ptr,
+	u32 *out_num_entries_ptr,
+	u32 *table_data_size_ptr,
+	bool is_kva)
+
+{
+	int error = 0;
+	/* Array of pointers of page */
+	struct sep_lli_entry *lli_in_array;
+	/* Array of pointers of page */
+	struct sep_lli_entry *lli_out_array;
+
+	dev_dbg(&sep->pdev->dev, "sep_prepare_input_output_dma_table start\n");
+
+	if (data_size == 0) {
+		/* Prepare empty table for input and output */
+		sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
+			in_num_entries_ptr, table_data_size_ptr);
+
+		sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
+			out_num_entries_ptr, table_data_size_ptr);
+
+		goto update_dcb_counter;
+	}
+
+	/* Initialize the pages pointers */
+	sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
+	sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
+
+	/* Lock the pages of the buffer and translate them to pages */
+	if (is_kva == true) {
+		error = sep_lock_kernel_pages(sep, app_virt_in_addr,
+			data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
+
+		if (error) {
+			dev_warn(&sep->pdev->dev,
+				"lock kernel for in failed\n");
+			goto end_function;
+		}
+
+		error = sep_lock_kernel_pages(sep, app_virt_out_addr,
+			data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
+
+		if (error) {
+			dev_warn(&sep->pdev->dev,
+				"lock kernel for out failed\n");
+			goto end_function;
+		}
+	}
+
+	else {
+		error = sep_lock_user_pages(sep, app_virt_in_addr,
+				data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
+		if (error) {
+			dev_warn(&sep->pdev->dev,
+				"sep_lock_user_pages for input virtual buffer failed\n");
+			goto end_function;
+		}
+
+		error = sep_lock_user_pages(sep, app_virt_out_addr,
+			data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
+
+		if (error) {
+			dev_warn(&sep->pdev->dev,
+				"sep_lock_user_pages for output virtual buffer failed\n");
+			goto end_function_free_lli_in;
+		}
+	}
+
+	dev_dbg(&sep->pdev->dev, "sep_in_num_pages is %x\n",
+		sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
+	dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
+		sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
+	dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
+		SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
+
+	/* Call the fucntion that creates table from the lli arrays */
+	error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
+		sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
+		lli_out_array,
+		sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
+		block_size, lli_table_in_ptr, lli_table_out_ptr,
+		in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
+
+	if (error) {
+		dev_warn(&sep->pdev->dev,
+			"sep_construct_dma_tables_from_lli failed\n");
+		goto end_function_with_error;
+	}
+
+	kfree(lli_out_array);
+	kfree(lli_in_array);
+
+update_dcb_counter:
+	/* Update DCB counter */
+	sep->nr_dcb_creat++;
+	/* Fall through - free the lli entry arrays */
+	dev_dbg(&sep->pdev->dev, "in_num_entries_ptr is %08x\n",
+						*in_num_entries_ptr);
+	dev_dbg(&sep->pdev->dev, "out_num_entries_ptr is %08x\n",
+						*out_num_entries_ptr);
+	dev_dbg(&sep->pdev->dev, "table_data_size_ptr is %08x\n",
+						*table_data_size_ptr);
+
+	goto end_function;
+
+end_function_with_error:
+	kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
+	kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
+	kfree(lli_out_array);
+
+
+end_function_free_lli_in:
+	kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
+	kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
+	kfree(lli_in_array);
+
+end_function:
+	dev_dbg(&sep->pdev->dev,
+		"sep_prepare_input_output_dma_table end result = %d\n", error);
+
+	return error;
+
+}
+
+/**
+ *	sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
+ *	@app_in_address: unsigned long; for data buffer in (user space)
+ *	@app_out_address: unsigned long; for data buffer out (user space)
+ *	@data_in_size: u32; for size of data
+ *	@block_size: u32; for block size
+ *	@tail_block_size: u32; for size of tail block
+ *	@isapplet: bool; to indicate external app
+ *	@is_kva: bool; kernel buffer; only used for kernel crypto module
+ *
+ *	This function prepares the linked DMA tables and puts the
+ *	address for the linked list of tables inta a DCB (data control
+ *	block) the address of which is known by the SEP hardware
+ *	Note that all bus addresses that are passed to the SEP
+ *	are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
+	unsigned long  app_in_address,
+	unsigned long  app_out_address,
+	u32  data_in_size,
+	u32  block_size,
+	u32  tail_block_size,
+	bool isapplet,
+	bool	is_kva)
+{
+	int error = 0;
+	/* Size of tail */
+	u32 tail_size = 0;
+	/* Address of the created DCB table */
+	struct sep_dcblock *dcb_table_ptr = NULL;
+	/* The physical address of the first input DMA table */
+	dma_addr_t in_first_mlli_address = 0;
+	/* Number of entries in the first input DMA table */
+	u32  in_first_num_entries = 0;
+	/* The physical address of the first output DMA table */
+	dma_addr_t  out_first_mlli_address = 0;
+	/* Number of entries in the first output DMA table */
+	u32  out_first_num_entries = 0;
+	/* Data in the first input/output table */
+	u32  first_data_size = 0;
+
+	dev_dbg(&sep->pdev->dev, "prepare_input_output_dma_table_in_dcb start\n");
+
+	if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
+		/* No more DCBs to allocate */
+		dev_warn(&sep->pdev->dev, "no more DCBs available\n");
+		error = -ENOSPC;
+		goto end_function;
+	}
+
+	/* Allocate new DCB */
+	dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
+		SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
+		(sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
+
+	/* Set the default values in the DCB */
+	dcb_table_ptr->input_mlli_address = 0;
+	dcb_table_ptr->input_mlli_num_entries = 0;
+	dcb_table_ptr->input_mlli_data_size = 0;
+	dcb_table_ptr->output_mlli_address = 0;
+	dcb_table_ptr->output_mlli_num_entries = 0;
+	dcb_table_ptr->output_mlli_data_size = 0;
+	dcb_table_ptr->tail_data_size = 0;
+	dcb_table_ptr->out_vr_tail_pt = 0;
+
+	if (isapplet == true) {
+		tail_size = data_in_size % block_size;
+		if (tail_size) {
+			if (data_in_size < tail_block_size) {
+				dev_warn(&sep->pdev->dev, "data in size smaller than tail block size\n");
+				error = -ENOSPC;
+				goto end_function;
+			}
+			if (tail_block_size)
+				/*
+				 * Case the tail size should be
+				 * bigger than the real block size
+				 */
+				tail_size = tail_block_size +
+					((data_in_size -
+						tail_block_size) % block_size);
+		}
+
+		/* Check if there is enough data for DMA operation */
+		if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
+			if (is_kva == true) {
+				memcpy(dcb_table_ptr->tail_data,
+					(void *)app_in_address, data_in_size);
+			} else {
+				if (copy_from_user(dcb_table_ptr->tail_data,
+					(void __user *)app_in_address,
+					data_in_size)) {
+					error = -EFAULT;
+					goto end_function;
+				}
+			}
+
+			dcb_table_ptr->tail_data_size = data_in_size;
+
+			/* Set the output user-space address for mem2mem op */
+			if (app_out_address)
+				dcb_table_ptr->out_vr_tail_pt =
+							(u32)app_out_address;
+
+			/*
+			 * Update both data length parameters in order to avoid
+			 * second data copy and allow building of empty mlli
+			 * tables
+			 */
+			tail_size = 0x0;
+			data_in_size = 0x0;
+		}
+		if (tail_size) {
+			if (is_kva == true) {
+				memcpy(dcb_table_ptr->tail_data,
+					(void *)(app_in_address + data_in_size -
+					tail_size), tail_size);
+			} else {
+				/* We have tail data - copy it to DCB */
+				if (copy_from_user(dcb_table_ptr->tail_data,
+					(void *)(app_in_address +
+					data_in_size - tail_size), tail_size)) {
+					error = -EFAULT;
+					goto end_function;
+				}
+			}
+			if (app_out_address)
+				/*
+				 * Calculate the output address
+				 * according to tail data size
+				 */
+				dcb_table_ptr->out_vr_tail_pt =
+					(u32)app_out_address + data_in_size
+					- tail_size;
+
+			/* Save the real tail data size */
+			dcb_table_ptr->tail_data_size = tail_size;
+			/*
+			 * Update the data size without the tail
+			 * data size AKA data for the dma
+			 */
+			data_in_size = (data_in_size - tail_size);
+		}
+	}
+	/* Check if we need to build only input table or input/output */
+	if (app_out_address) {
+		/* Prepare input/output tables */
+		error = sep_prepare_input_output_dma_table(sep,
+			app_in_address,
+			app_out_address,
+			data_in_size,
+			block_size,
+			&in_first_mlli_address,
+			&out_first_mlli_address,
+			&in_first_num_entries,
+			&out_first_num_entries,
+			&first_data_size,
+			is_kva);
+	} else {
+		/* Prepare input tables */
+		error = sep_prepare_input_dma_table(sep,
+			app_in_address,
+			data_in_size,
+			block_size,
+			&in_first_mlli_address,
+			&in_first_num_entries,
+			&first_data_size,
+			is_kva);
+	}
+
+	if (error) {
+		dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
+		goto end_function;
+	}
+
+	/* Set the DCB values */
+	dcb_table_ptr->input_mlli_address = in_first_mlli_address;
+	dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
+	dcb_table_ptr->input_mlli_data_size = first_data_size;
+	dcb_table_ptr->output_mlli_address = out_first_mlli_address;
+	dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
+	dcb_table_ptr->output_mlli_data_size = first_data_size;
+
+end_function:
+	dev_dbg(&sep->pdev->dev,
+		"sep_prepare_input_output_dma_table_in_dcb end\n");
+	return error;
+
+}
+
+
+/**
+ *	sep_create_sync_dma_tables_handler - create sync DMA tables
+ *	@sep: pointer to struct sep_device
+ *	@arg: pointer to struct bld_syn_tab_struct
+ *
+ *	Handle the request for creation of the DMA tables for the synchronic
+ *	symmetric operations (AES,DES). Note that all bus addresses that are
+ *	passed to the SEP are in 32 bit format; the SEP is a 32 bit device
+ */
+static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
+						unsigned long arg)
+{
+	int error = 0;
+
+	/* Command arguments */
+	struct bld_syn_tab_struct command_args;
+
+	dev_dbg(&sep->pdev->dev,
+		"sep_create_sync_dma_tables_handler start\n");
+
+	if (copy_from_user(&command_args, (void __user *)arg,
+					sizeof(struct bld_syn_tab_struct))) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
+						command_args.app_in_address);
+	dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
+						command_args.app_out_address);
+	dev_dbg(&sep->pdev->dev, "data_size is %u\n",
+						command_args.data_in_size);
+	dev_dbg(&sep->pdev->dev, "block_size is %u\n",
+						command_args.block_size);
+
+	/* Validate user parameters */
+	if (!command_args.app_in_address) {
+		error = -EINVAL;
+		goto end_function;
+	}
+
+	error = sep_prepare_input_output_dma_table_in_dcb(sep,
+		(unsigned long)command_args.app_in_address,
+		(unsigned long)command_args.app_out_address,
+		command_args.data_in_size,
+		command_args.block_size,
+		0x0,
+		false,
+		false);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_create_sync_dma_tables_handler end\n");
+	return error;
+}
+
+/**
+ *	sep_free_dma_tables_and_dcb - free DMA tables and DCBs
+ *	@sep: pointer to struct sep_device
+ *	@isapplet: indicates external application (used for kernel access)
+ *	@is_kva: indicates kernel addresses (only used for kernel crypto)
+ *
+ *	This function frees the DMA tables and DCB
+ */
+static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
+	bool is_kva)
+{
+	int i = 0;
+	int error = 0;
+	int error_temp = 0;
+	struct sep_dcblock *dcb_table_ptr;
+	unsigned long pt_hold;
+	void *tail_pt;
+
+	dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb start\n");
+
+	if (isapplet == true) {
+		/* Set pointer to first DCB table */
+		dcb_table_ptr = (struct sep_dcblock *)
+			(sep->shared_addr +
+			SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
+
+		/* Go over each DCB and see if tail pointer must be updated */
+		for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
+			if (dcb_table_ptr->out_vr_tail_pt) {
+				pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt;
+				tail_pt = (void *)pt_hold;
+				if (is_kva == true) {
+					memcpy(tail_pt,
+						dcb_table_ptr->tail_data,
+						dcb_table_ptr->tail_data_size);
+				} else {
+					error_temp = copy_to_user(
+						tail_pt,
+						dcb_table_ptr->tail_data,
+						dcb_table_ptr->tail_data_size);
+				}
+				if (error_temp) {
+					/* Release the DMA resource */
+					error = -EFAULT;
+					break;
+				}
+			}
+		}
+	}
+	/* Free the output pages, if any */
+	sep_free_dma_table_data_handler(sep);
+
+	dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb end\n");
+	return error;
+}
+
+/**
+ *	sep_get_static_pool_addr_handler - get static pool address
+ *	@sep: pointer to struct sep_device
+ *
+ *	This function sets the bus and virtual addresses of the static pool
+ */
+static int sep_get_static_pool_addr_handler(struct sep_device *sep)
+{
+	u32 *static_pool_addr = NULL;
+
+	dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler start\n");
+
+	static_pool_addr = (u32 *)(sep->shared_addr +
+		SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
+
+	static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
+	static_pool_addr[1] = (u32)sep->shared_bus +
+		SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
+
+	dev_dbg(&sep->pdev->dev, "static pool: physical %x\n",
+		(u32)static_pool_addr[1]);
+
+	dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler end\n");
+
+	return 0;
+}
+
+/**
+ *	sep_start_handler - start device
+ *	@sep: pointer to struct sep_device
+ */
+static int sep_start_handler(struct sep_device *sep)
+{
+	unsigned long reg_val;
+	unsigned long error = 0;
+
+	dev_dbg(&sep->pdev->dev, "sep_start_handler start\n");
+
+	/* Wait in polling for message from SEP */
+	do {
+		reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+	} while (!reg_val);
+
+	/* Check the value */
+	if (reg_val == 0x1)
+		/* Fatal error - read error status from GPRO */
+		error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+	dev_dbg(&sep->pdev->dev, "sep_start_handler end\n");
+	return error;
+}
+
+/**
+ *	ep_check_sum_calc - checksum messages
+ *	@data: buffer to checksum
+ *	@length: buffer size
+ *
+ *	This function performs a checksum for messages that are sent
+ *	to the SEP.
+ */
+static u32 sep_check_sum_calc(u8 *data, u32 length)
+{
+	u32 sum = 0;
+	u16 *Tdata = (u16 *)data;
+
+	while (length > 1) {
+		/*  This is the inner loop */
+		sum += *Tdata++;
+		length -= 2;
+	}
+
+	/*  Add left-over byte, if any */
+	if (length > 0)
+		sum += *(u8 *)Tdata;
+
+	/*  Fold 32-bit sum to 16 bits */
+	while (sum>>16)
+		sum = (sum & 0xffff) + (sum >> 16);
+
+	return ~sum & 0xFFFF;
+}
+
+/**
+ *	sep_init_handler -
+ *	@sep: pointer to struct sep_device
+ *	@arg: parameters from user space application
+ *
+ *	Handles the request for SEP initialization
+ *	Note that this will go away for Medfield once the SCU
+ *	SEP initialization is complete
+ *	Also note that the message to the SEP has components
+ *	from user space as well as components written by the driver
+ *	This is becuase the portions of the message that pertain to
+ *	physical addresses must be set by the driver after the message
+ *	leaves custody of the user space application for security
+ *	reasons.
+ */
+static int sep_init_handler(struct sep_device *sep, unsigned long arg)
+{
+	u32 message_buff[14];
+	u32 counter;
+	int error = 0;
+	u32 reg_val;
+	dma_addr_t new_base_addr;
+	unsigned long addr_hold;
+	struct init_struct command_args;
+
+	dev_dbg(&sep->pdev->dev, "sep_init_handler start\n");
+
+	/* Make sure that we have not initialized already */
+	reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+
+	if (reg_val != 0x2) {
+		error = SEP_ALREADY_INITIALIZED_ERR;
+		dev_warn(&sep->pdev->dev, "init; device already initialized\n");
+		goto end_function;
+	}
+
+	/* Only root can initialize */
+	if (!capable(CAP_SYS_ADMIN)) {
+		error = -EACCES;
+		goto end_function;
+	}
+
+	/* Copy in the parameters */
+	error = copy_from_user(&command_args, (void __user *)arg,
+		sizeof(struct init_struct));
+
+	if (error) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	/* Validate parameters */
+	if (!command_args.message_addr || !command_args.sep_sram_addr ||
+		command_args.message_size_in_words > 14) {
+		error = -EINVAL;
+		goto end_function;
+	}
+
+	/* Copy in the SEP init message */
+	addr_hold = (unsigned long)command_args.message_addr;
+	error = copy_from_user(message_buff,
+		(void __user *)addr_hold,
+		command_args.message_size_in_words*sizeof(u32));
+
+	if (error) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	/* Load resident, cache, and extapp firmware */
+	error = sep_load_firmware(sep);
+
+	if (error) {
+		dev_warn(&sep->pdev->dev,
+			"init; copy SEP init message failed %x\n", error);
+		goto end_function;
+	}
+
+	/* Compute the base address */
+	new_base_addr = sep->shared_bus;
+
+	if (sep->resident_bus < new_base_addr)
+		new_base_addr = sep->resident_bus;
+
+	if (sep->cache_bus < new_base_addr)
+		new_base_addr = sep->cache_bus;
+
+	if (sep->dcache_bus < new_base_addr)
+		new_base_addr = sep->dcache_bus;
+
+	/* Put physical addresses in SEP message */
+	message_buff[3] = (u32)new_base_addr;
+	message_buff[4] = (u32)sep->shared_bus;
+	message_buff[6] = (u32)sep->resident_bus;
+	message_buff[7] = (u32)sep->cache_bus;
+	message_buff[8] = (u32)sep->dcache_bus;
+
+	message_buff[command_args.message_size_in_words - 1] = 0x0;
+	message_buff[command_args.message_size_in_words - 1] =
+		sep_check_sum_calc((u8 *)message_buff,
+		command_args.message_size_in_words*sizeof(u32));
+
+	/* Debug print of message */
+	for (counter = 0; counter < command_args.message_size_in_words;
+								counter++)
+		dev_dbg(&sep->pdev->dev, "init; SEP message word %d is %x\n",
+			counter, message_buff[counter]);
+
+	/* Tell the SEP the sram address */
+	sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, command_args.sep_sram_addr);
+
+	/* Push the message to the SEP */
+	for (counter = 0; counter < command_args.message_size_in_words;
+								counter++) {
+		sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR,
+						message_buff[counter]);
+		sep_wait_sram_write(sep);
+	}
+
+	/* Signal SEP that message is ready and to init */
+	sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
+
+	/* Wait for acknowledge */
+	dev_dbg(&sep->pdev->dev, "init; waiting for msg response\n");
+
+	do {
+		reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+	} while (!(reg_val & 0xFFFFFFFD));
+
+	if (reg_val == 0x1) {
+		dev_warn(&sep->pdev->dev, "init; device int failed\n");
+		error = sep_read_reg(sep, 0x8060);
+		dev_warn(&sep->pdev->dev, "init; sw monitor is %x\n", error);
+		error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
+		dev_warn(&sep->pdev->dev, "init; error is %x\n", error);
+		goto end_function;
+	}
+	dev_dbg(&sep->pdev->dev, "init; end CC INIT, reg_val is %x\n", reg_val);
+
+	/* Signal SEP to zero the GPR3 */
+	sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
+
+	/* Wait for response */
+	dev_dbg(&sep->pdev->dev, "init; waiting for zero set response\n");
+
+	do {
+		reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
+	} while (reg_val != 0);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "init is done\n");
+	return error;
+}
+
+/**
+ *	sep_end_transaction_handler - end transaction
+ *	@sep: pointer to struct sep_device
+ *
+ *	This API handles the end transaction request
+ */
+static int sep_end_transaction_handler(struct sep_device *sep)
+{
+	dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler start\n");
+
+	/* Clear the data pool pointers Token */
+	memset((void *)(sep->shared_addr +
+		SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
+		0, sep->num_of_data_allocations*2*sizeof(u32));
+
+	/* Check that all the DMA resources were freed */
+	sep_free_dma_table_data_handler(sep);
+
+	clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
+
+	/*
+	 * We are now through with the transaction. Let's
+	 * allow other processes who have the device open
+	 * to perform transactions
+	 */
+	mutex_lock(&sep->sep_mutex);
+	sep->pid_doing_transaction = 0;
+	mutex_unlock(&sep->sep_mutex);
+	/* Raise event for stuck contextes */
+	wake_up(&sep->event);
+
+	dev_dbg(&sep->pdev->dev, "waking up event\n");
+	dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler end\n");
+
+	return 0;
+}
+
+/**
+ *	sep_prepare_dcb_handler - prepare a control block
+ *	@sep: pointer to struct sep_device
+ *	@arg: pointer to user parameters
+ *
+ *	This function will retrieve the RAR buffer physical addresses, type
+ *	& size corresponding to the RAR handles provided in the buffers vector.
+ */
+static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
+{
+	int error;
+	/* Command arguments */
+	struct build_dcb_struct command_args;
+
+	dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
+
+	/* Get the command arguments */
+	if (copy_from_user(&command_args, (void __user *)arg,
+					sizeof(struct build_dcb_struct))) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
+						command_args.app_in_address);
+	dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
+						command_args.app_out_address);
+	dev_dbg(&sep->pdev->dev, "data_size is %x\n",
+						command_args.data_in_size);
+	dev_dbg(&sep->pdev->dev, "block_size is %x\n",
+						command_args.block_size);
+	dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
+						command_args.tail_block_size);
+
+	error = sep_prepare_input_output_dma_table_in_dcb(sep,
+		(unsigned long)command_args.app_in_address,
+		(unsigned long)command_args.app_out_address,
+		command_args.data_in_size, command_args.block_size,
+		command_args.tail_block_size, true, false);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler end\n");
+	return error;
+
+}
+
+/**
+ *	sep_free_dcb_handler - free control block resources
+ *	@sep: pointer to struct sep_device
+ *
+ *	This function frees the DCB resources and updates the needed
+ *	user-space buffers.
+ */
+static int sep_free_dcb_handler(struct sep_device *sep)
+{
+	int error ;
+
+	dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
+	dev_dbg(&sep->pdev->dev, "num of DCBs %x\n", sep->nr_dcb_creat);
+
+	error = sep_free_dma_tables_and_dcb(sep, false, false);
+
+	dev_dbg(&sep->pdev->dev, "sep_free_dcb_handler end\n");
+	return error;
+}
+
+/**
+ *	sep_rar_prepare_output_msg_handler - prepare an output message
+ *	@sep: pointer to struct sep_device
+ *	@arg: pointer to user parameters
+ *
+ *	This function will retrieve the RAR buffer physical addresses, type
+ *	& size corresponding to the RAR handles provided in the buffers vector.
+ */
+static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
+	unsigned long arg)
+{
+	int error = 0;
+	/* Command args */
+	struct rar_hndl_to_bus_struct command_args;
+	struct RAR_buffer rar_buf;
+	/* Bus address */
+	dma_addr_t  rar_bus = 0;
+	/* Holds the RAR address in the system memory offset */
+	u32 *rar_addr;
+
+	dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
+
+	/* Copy the data */
+	if (copy_from_user(&command_args, (void __user *)arg,
+						sizeof(command_args))) {
+		error = -EFAULT;
+		goto end_function;
+	}
+
+	/* Call to translation function only if user handle is not NULL */
+	if (command_args.rar_handle) {
+		memset(&rar_buf, 0, sizeof(rar_buf));
+		rar_buf.info.handle = (u32)command_args.rar_handle;
+
+		if (rar_handle_to_bus(&rar_buf, 1) != 1) {
+			dev_dbg(&sep->pdev->dev, "rar_handle_to_bus failure\n");
+			error = -EFAULT;
+			goto end_function;
+		}
+		rar_bus = rar_buf.bus_address;
+	}
+	dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
+
+	/* Set value in the SYSTEM MEMORY offset */
+	rar_addr = (u32 *)(sep->shared_addr +
+		SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
+
+	/* Copy the physical address to the System Area for the SEP */
+	rar_addr[0] = SEP_RAR_VAL_TOKEN;
+	rar_addr[1] = rar_bus;
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
+	return error;
+}
+
+/**
+ *	sep_realloc_ext_cache_handler - report location of extcache
+ *	@sep: pointer to struct sep_device
+ *	@arg: pointer to user parameters
+ *
+ *	This function tells the SEP where the extapp is located
+ */
+static int sep_realloc_ext_cache_handler(struct sep_device *sep,
+	unsigned long arg)
+{
+	/* Holds the new ext cache address in the system memory offset */
+	u32 *system_addr;
+
+	/* Set value in the SYSTEM MEMORY offset */
+	system_addr = (u32 *)(sep->shared_addr +
+		SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES);
+
+	/* Copy the physical address to the System Area for the SEP */
+	system_addr[0] = SEP_EXT_CACHE_ADDR_VAL_TOKEN;
+	dev_dbg(&sep->pdev->dev, "ext cache init; system addr 0 is %x\n",
+							system_addr[0]);
+	system_addr[1] = sep->extapp_bus;
+	dev_dbg(&sep->pdev->dev, "ext cache init; system addr 1 is %x\n",
+							system_addr[1]);
+
+	return 0;
+}
+
+/**
+ *	sep_ioctl - ioctl api
+ *	@filp: pointer to struct file
+ *	@cmd: command
+ *	@arg: pointer to argument structure
+ *
+ *	Implement the ioctl methods availble on the SEP device.
+ */
+static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int error = 0;
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "ioctl start\n");
+
+	dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
+
+	/* Make sure we own this device */
+	mutex_lock(&sep->sep_mutex);
+	if ((current->pid != sep->pid_doing_transaction) &&
+				(sep->pid_doing_transaction != 0)) {
+		dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
+		mutex_unlock(&sep->sep_mutex);
+		error = -EACCES;
+		goto end_function;
+	}
+
+	mutex_unlock(&sep->sep_mutex);
+
+	/* Check that the command is for SEP device */
+	if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+		error = -ENOTTY;
+		goto end_function;
+	}
+
+	/* Lock to prevent the daemon to interfere with operation */
+	mutex_lock(&sep->ioctl_mutex);
+
+	switch (cmd) {
+	case SEP_IOCSENDSEPCOMMAND:
+		/* Send command to SEP */
+		error = sep_send_command_handler(sep);
+		break;
+	case SEP_IOCALLOCDATAPOLL:
+		/* Allocate data pool */
+		error = sep_allocate_data_pool_memory_handler(sep, arg);
+		break;
+	case SEP_IOCCREATESYMDMATABLE:
+		/* Create DMA table for synhronic operation */
+		error = sep_create_sync_dma_tables_handler(sep, arg);
+		break;
+	case SEP_IOCFREEDMATABLEDATA:
+		/* Free the pages */
+		error = sep_free_dma_table_data_handler(sep);
+		break;
+	case SEP_IOCSEPSTART:
+		/* Start command to SEP */
+		if (sep->pdev->revision == 0) /* Only for old chip */
+			error = sep_start_handler(sep);
+		else
+			error = -EPERM; /* Not permitted on new chip */
+		break;
+	case SEP_IOCSEPINIT:
+		/* Init command to SEP */
+		if (sep->pdev->revision == 0) /* Only for old chip */
+			error = sep_init_handler(sep, arg);
+		else
+			error = -EPERM; /* Not permitted on new chip */
+		break;
+	case SEP_IOCGETSTATICPOOLADDR:
+		/* Inform the SEP the bus address of the static pool */
+		error = sep_get_static_pool_addr_handler(sep);
+		break;
+	case SEP_IOCENDTRANSACTION:
+		error = sep_end_transaction_handler(sep);
+		break;
+	case SEP_IOCREALLOCEXTCACHE:
+		if (sep->pdev->revision == 0) /* Only for old chip */
+			error = sep_realloc_ext_cache_handler(sep, arg);
+		else
+			error = -EPERM; /* Not permitted on new chip */
+		break;
+	case SEP_IOCRARPREPAREMESSAGE:
+		error = sep_rar_prepare_output_msg_handler(sep, arg);
+		break;
+	case SEP_IOCPREPAREDCB:
+		error = sep_prepare_dcb_handler(sep, arg);
+		break;
+	case SEP_IOCFREEDCB:
+		error = sep_free_dcb_handler(sep);
+		break;
+	default:
+		dev_dbg(&sep->pdev->dev, "invalid ioctl %x\n", cmd);
+		error = -ENOTTY;
+		break;
+	}
+	mutex_unlock(&sep->ioctl_mutex);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "ioctl end\n");
+	return error;
+}
+
+/**
+ *	sep_singleton_ioctl - ioctl api for singleton interface
+ *	@filp: pointer to struct file
+ *	@cmd: command
+ *	@arg: pointer to argument structure
+ *
+ *	Implement the additional ioctls for the singleton device
+ */
+static long sep_singleton_ioctl(struct file  *filp, u32 cmd, unsigned long arg)
+{
+	long error = 0;
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "singleton_ioctl start\n");
+	dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
+
+	/* Check that the command is for the SEP device */
+	if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+		error =  -ENOTTY;
+		goto end_function;
+	}
+
+	/* Make sure we own this device */
+	mutex_lock(&sep->sep_mutex);
+	if ((current->pid != sep->pid_doing_transaction) &&
+				(sep->pid_doing_transaction != 0)) {
+		dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
+		mutex_unlock(&sep->sep_mutex);
+		error = -EACCES;
+		goto end_function;
+	}
+
+	mutex_unlock(&sep->sep_mutex);
+
+	switch (cmd) {
+	case SEP_IOCTLSETCALLERID:
+		mutex_lock(&sep->ioctl_mutex);
+		error = sep_set_caller_id_handler(sep, arg);
+		mutex_unlock(&sep->ioctl_mutex);
+		break;
+	default:
+		error = sep_ioctl(filp, cmd, arg);
+		break;
+	}
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "singleton ioctl end\n");
+	return error;
+}
+
+/**
+ *	sep_request_daemon_ioctl - ioctl for daemon
+ *	@filp: pointer to struct file
+ *	@cmd: command
+ *	@arg: pointer to argument structure
+ *
+ *	Called by the request daemon to perform ioctls on the daemon device
+ */
+static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
+	unsigned long arg)
+{
+
+	long error;
+	struct sep_device *sep = filp->private_data;
+
+	dev_dbg(&sep->pdev->dev, "daemon ioctl: start\n");
+	dev_dbg(&sep->pdev->dev, "daemon ioctl: cmd is %x\n", cmd);
+
+	/* Check that the command is for SEP device */
+	if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
+		error = -ENOTTY;
+		goto end_function;
+	}
+
+	/* Only one process can access ioctl at any given time */
+	mutex_lock(&sep->ioctl_mutex);
+
+	switch (cmd) {
+	case SEP_IOCSENDSEPRPLYCOMMAND:
+		/* Send reply command to SEP */
+		error = sep_req_daemon_send_reply_command_handler(sep);
+		break;
+	case SEP_IOCENDTRANSACTION:
+		/*
+		 * End req daemon transaction, do nothing
+		 * will be removed upon update in middleware
+		 * API library
+		 */
+		error = 0;
+		break;
+	default:
+		dev_dbg(&sep->pdev->dev, "daemon ioctl: no such IOCTL\n");
+		error = -ENOTTY;
+	}
+	mutex_unlock(&sep->ioctl_mutex);
+
+end_function:
+	dev_dbg(&sep->pdev->dev, "daemon ioctl: end\n");
+	return error;
+
+}
+
+/**
+ *	sep_inthandler - interrupt handler
+ *	@irq: interrupt
+ *	@dev_id: device id
+ */
+static irqreturn_t sep_inthandler(int irq, void *dev_id)
+{
+	irqreturn_t int_error = IRQ_HANDLED;
+	unsigned long lck_flags;
+	u32 reg_val, reg_val2 = 0;
+	struct sep_device *sep = dev_id;
+
+	/* Read the IRR register to check if this is SEP interrupt */
+	reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
+	dev_dbg(&sep->pdev->dev, "SEP Interrupt - reg is %08x\n", reg_val);
+
+	if (reg_val & (0x1 << 13)) {
+		/* Lock and update the counter of reply messages */
+		spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
+		sep->reply_ct++;
+		spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
+
+		dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
+					sep->send_ct, sep->reply_ct);
+
+		/* Is this printf or daemon request? */
+		reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+		dev_dbg(&sep->pdev->dev,
+			"SEP Interrupt - reg2 is %08x\n", reg_val2);
+
+		if ((reg_val2 >> 30) & 0x1) {
+			dev_dbg(&sep->pdev->dev, "int: printf request\n");
+			wake_up(&sep->event_request_daemon);
+		} else if (reg_val2 >> 31) {
+			dev_dbg(&sep->pdev->dev, "int: daemon request\n");
+			wake_up(&sep->event_request_daemon);
+		} else {
+			dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
+			wake_up(&sep->event);
+		}
+	} else {
+		dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
+		int_error = IRQ_NONE;
+	}
+	if (int_error == IRQ_HANDLED)
+		sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
+
+	return int_error;
+}
+
+/**
+ *	sep_reconfig_shared_area - reconfigure shared area
+ *	@sep: pointer to struct sep_device
+ *
+ *	Reconfig the shared area between HOST and SEP - needed in case
+ *	the DX_CC_Init function was called before OS loading.
+ */
+static int sep_reconfig_shared_area(struct sep_device *sep)
+{
+	int ret_val;
+
+	/* use to limit waiting for SEP */
+	unsigned long end_time;
+
+	dev_dbg(&sep->pdev->dev, "reconfig shared area start\n");
+
+	/* Send the new SHARED MESSAGE AREA to the SEP */
+	dev_dbg(&sep->pdev->dev, "sending %08llx to sep\n",
+				(unsigned long long)sep->shared_bus);
+
+	sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
+
+	/* Poll for SEP response */
+	ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+	end_time = jiffies + (WAIT_TIME * HZ);
+
+	while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
+		(ret_val != sep->shared_bus))
+		ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
+
+	/* Check the return value (register) */
+	if (ret_val != sep->shared_bus) {
+		dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
+		dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
+		ret_val = -ENOMEM;
+	} else
+		ret_val = 0;
+
+	dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
+	return ret_val;
+}
+
+/* File operation for singleton SEP operations */
+static const struct file_operations singleton_file_operations = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = sep_singleton_ioctl,
+	.poll = sep_poll,
+	.open = sep_singleton_open,
+	.release = sep_singleton_release,
+	.mmap = sep_mmap,
+};
+
+/* File operation for daemon operations */
+static const struct file_operations daemon_file_operations = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = sep_request_daemon_ioctl,
+	.poll = sep_request_daemon_poll,
+	.open = sep_request_daemon_open,
+	.release = sep_request_daemon_release,
+	.mmap = sep_request_daemon_mmap,
+};
+
+/* The files operations structure of the driver */
+static const struct file_operations sep_file_operations = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = sep_ioctl,
+	.poll = sep_poll,
+	.open = sep_open,
+	.release = sep_release,
+	.mmap = sep_mmap,
+};
+
+/**
+ *	sep_register_driver_with_fs - register misc devices
+ *	@sep: pointer to struct sep_device
+ *
+ *	This function registers the driver with the file system
+ */
+static int sep_register_driver_with_fs(struct sep_device *sep)
+{
+	int ret_val;
+
+	sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
+	sep->miscdev_sep.name = SEP_DEV_NAME;
+	sep->miscdev_sep.fops = &sep_file_operations;
+
+	sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
+	sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
+	sep->miscdev_singleton.fops = &singleton_file_operations;
+
+	sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
+	sep->miscdev_daemon.name = SEP_DEV_DAEMON;
+	sep->miscdev_daemon.fops = &daemon_file_operations;
+
+	ret_val = misc_register(&sep->miscdev_sep);
+	if (ret_val) {
+		dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
+			ret_val);
+		return ret_val;
+	}
+
+	ret_val = misc_register(&sep->miscdev_singleton);
+	if (ret_val) {
+		dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
+			ret_val);
+		misc_deregister(&sep->miscdev_sep);
+		return ret_val;
+	}
+
+	ret_val = misc_register(&sep->miscdev_daemon);
+	if (ret_val) {
+		dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
+			ret_val);
+		misc_deregister(&sep->miscdev_sep);
+		misc_deregister(&sep->miscdev_singleton);
+
+		return ret_val;
+	}
+	return ret_val;
+}
+
+
+/**
+ *	sep_probe - probe a matching PCI device
+ *	@pdev: pci_device
+ *	@end: pci_device_id
+ *
+ *	Attempt to set up and configure a SEP device that has been
+ *	discovered by the PCI layer.
+ */
+static int __devinit sep_probe(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	int error = 0;
+	struct sep_device *sep;
+
+	pr_debug("SEP pci probe starting\n");
+	if (sep_dev != NULL) {
+		dev_warn(&pdev->dev, "only one SEP supported.\n");
+		return -EBUSY;
+	}
+
+	/* Enable the device */
+	error = pci_enable_device(pdev);
+	if (error) {
+		dev_warn(&pdev->dev, "error enabling pci device\n");
+		goto end_function;
+	}
+
+	/* Allocate the sep_device structure for this device */
+	sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
+	if (sep_dev == NULL) {
+		dev_warn(&pdev->dev,
+			"can't kmalloc the sep_device structure\n");
+		error = -ENOMEM;
+		goto end_function_disable_device;
+	}
+
+	/*
+	 * We're going to use another variable for actually
+	 * working with the device; this way, if we have
+	 * multiple devices in the future, it would be easier
+	 * to make appropriate changes
+	 */
+	sep = sep_dev;
+
+	sep->pdev = pci_dev_get(pdev);
+
+	init_waitqueue_head(&sep->event);
+	init_waitqueue_head(&sep->event_request_daemon);
+	spin_lock_init(&sep->snd_rply_lck);
+	mutex_init(&sep->sep_mutex);
+	mutex_init(&sep->ioctl_mutex);
+
+	dev_dbg(&sep->pdev->dev, "PCI obtained, device being prepared\n");
+	dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
+
+	/* Set up our register area */
+	sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
+	if (!sep->reg_physical_addr) {
+		dev_warn(&sep->pdev->dev, "Error getting register start\n");
+		error = -ENODEV;
+		goto end_function_free_sep_dev;
+	}
+
+	sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
+	if (!sep->reg_physical_end) {
+		dev_warn(&sep->pdev->dev, "Error getting register end\n");
+		error = -ENODEV;
+		goto end_function_free_sep_dev;
+	}
+
+	sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
+		(size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
+	if (!sep->reg_addr) {
+		dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
+		error = -ENODEV;
+		goto end_function_free_sep_dev;
+	}
+
+	dev_dbg(&sep->pdev->dev,
+		"Register area start %llx end %llx virtual %p\n",
+		(unsigned long long)sep->reg_physical_addr,
+		(unsigned long long)sep->reg_physical_end,
+		sep->reg_addr);
+
+	/* Allocate the shared area */
+	sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
+		SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
+		SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
+		SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
+		SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
+
+	if (sep_map_and_alloc_shared_area(sep)) {
+		error = -ENOMEM;
+		/* Allocation failed */
+		goto end_function_error;
+	}
+
+	sep->rar_size = FAKE_RAR_SIZE;
+	sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
+		sep->rar_size, &sep->rar_bus, GFP_KERNEL);
+	if (sep->rar_addr == NULL) {
+		dev_warn(&sep->pdev->dev, "can't allocate mfld rar\n");
+		error = -ENOMEM;
+		goto end_function_deallocate_sep_shared_area;
+	}
+
+	dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx,"
+		" size is %zx\n", sep->rar_addr,
+		(unsigned long long)sep->rar_bus,
+		sep->rar_size);
+
+	dev_dbg(&sep->pdev->dev, "about to write IMR and ICR REG_ADDR\n");
+
+	/* Clear ICR register */
+	sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
+
+	/* Set the IMR register - open only GPR 2 */
+	sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
+
+	/* Read send/receive counters from SEP */
+	sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
+	sep->reply_ct &= 0x3FFFFFFF;
+	sep->send_ct = sep->reply_ct;
+
+	dev_dbg(&sep->pdev->dev, "about to call request_irq\n");
+	/* Get the interrupt line */
+	error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
+		"sep_driver", sep);
+
+	if (error)
+		goto end_function_dealloc_rar;
+
+	/* The new chip requires ashared area reconfigure */
+	if (sep->pdev->revision == 4) { /* Only for new chip */
+		error = sep_reconfig_shared_area(sep);
+		if (error)
+			goto end_function_free_irq;
+	}
+	/* Finally magic up the device nodes */
+	/* Register driver with the fs */
+	error = sep_register_driver_with_fs(sep);
+	if (error == 0)
+		/* Success */
+		return 0;
+
+end_function_free_irq:
+	free_irq(pdev->irq, sep);
+
+end_function_dealloc_rar:
+	if (sep->rar_addr)
+		dma_free_coherent(&sep->pdev->dev, sep->rar_size,
+			sep->rar_addr, sep->rar_bus);
+	goto end_function;
+
+end_function_deallocate_sep_shared_area:
+	/* De-allocate shared area */
+	sep_unmap_and_free_shared_area(sep);
+
+end_function_error:
+	iounmap(sep->reg_addr);
+
+end_function_free_sep_dev:
+	pci_dev_put(sep_dev->pdev);
+	kfree(sep_dev);
+	sep_dev = NULL;
+
+end_function_disable_device:
+	pci_disable_device(pdev);
+
+end_function:
+	return error;
+}
+
+static void sep_remove(struct pci_dev *pdev)
+{
+	struct sep_device *sep = sep_dev;
+
+	/* Unregister from fs */
+	misc_deregister(&sep->miscdev_sep);
+	misc_deregister(&sep->miscdev_singleton);
+	misc_deregister(&sep->miscdev_daemon);
+
+	/* Free the irq */
+	free_irq(sep->pdev->irq, sep);
+
+	/* Free the shared area  */
+	sep_unmap_and_free_shared_area(sep_dev);
+	iounmap((void *) sep_dev->reg_addr);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
+
+/* Field for registering driver to PCI device */
+static struct pci_driver sep_pci_driver = {
+	.name = "sep_sec_driver",
+	.id_table = sep_pci_id_tbl,
+	.probe = sep_probe,
+	.remove = sep_remove
+};
+
+
+/**
+ *	sep_init - init function
+ *
+ *	Module load time. Register the PCI device driver.
+ */
+static int __init sep_init(void)
+{
+	return pci_register_driver(&sep_pci_driver);
+}
+
+
+/**
+ *	sep_exit - called to unload driver
+ *
+ *	Drop the misc devices then remove and unmap the various resources
+ *	that are not released by the driver remove method.
+ */
+static void __exit sep_exit(void)
+{
+	pci_unregister_driver(&sep_pci_driver);
+}
+
+
+module_init(sep_init);
+module_exit(sep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
new file mode 100644
index 0000000..fbbfa23
--- /dev/null
+++ b/drivers/staging/sep/sep_driver_api.h
@@ -0,0 +1,297 @@
+/*
+ *
+ *  sep_driver_api.h - Security Processor Driver api definitions
+ *
+ *  Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ *  Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ *  CONTACTS:
+ *
+ *  Mark Allyn		mark.a.allyn@intel.com
+ *  Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ *  CHANGES:
+ *
+ *  2010.09.14  Upgrade to Medfield
+ *
+ */
+
+#ifndef __SEP_DRIVER_API_H__
+#define __SEP_DRIVER_API_H__
+
+/* Type of request from device */
+#define SEP_DRIVER_SRC_REPLY		1
+#define SEP_DRIVER_SRC_REQ		2
+#define SEP_DRIVER_SRC_PRINTF		3
+
+
+/*-------------------------------------------
+    TYPEDEFS
+----------------------------------------------*/
+
+/*
+ * Note that several members of these structres are only here
+ * for campatability with the middleware; they are not used
+ * by this driver.
+ * All user space buffer addresses are set to aligned u64
+ * in order to ensure compatibility with 64 bit systems
+ */
+
+/*
+  init command struct; this will go away when SCU does init
+*/
+struct init_struct {
+	/* address that SEP can access for message */
+	aligned_u64   message_addr;
+
+	/* message size */
+	u32   message_size_in_words;
+
+	/* offset of the init message in the sep sram */
+	u32   sep_sram_addr;
+
+	/* -not used- resident size in bytes*/
+	u32   unused_resident_size_in_bytes;
+
+	/* -not used- cache size in bytes*/
+	u32   unused_cache_size_in_bytes;
+
+	/* -not used- ext cache current address */
+	aligned_u64   unused_extcache_addr;
+
+	/* -not used- ext cache size in bytes*/
+	u32   unused_extcache_size_in_bytes;
+};
+
+struct realloc_ext_struct {
+	/* -not used- current external cache address */
+	aligned_u64   unused_ext_cache_addr;
+
+	/* -not used- external cache size in bytes*/
+	u32   unused_ext_cache_size_in_bytes;
+};
+
+struct alloc_struct {
+	/* offset from start of shared pool area */
+	u32  offset;
+	/* number of bytes to allocate */
+	u32  num_bytes;
+};
+
+/*
+	Note that all app addresses are cast as u32; the sep
+	middleware sends them as fixed 32 bit words
+*/
+struct bld_syn_tab_struct {
+	/* address value of the data in (user space addr) */
+	aligned_u64 app_in_address;
+
+	/* size of data in */
+	u32 data_in_size;
+
+	/* address of the data out (user space addr) */
+	aligned_u64 app_out_address;
+
+	/* the size of the block of the operation - if needed,
+	   every table will be modulo this parameter */
+	u32 block_size;
+
+	/* -not used- distinct user/kernel layout */
+	bool isKernelVirtualAddress;
+
+};
+
+/* command struct for getting caller id value and address */
+struct caller_id_struct {
+	/* pid of the process */
+	u32 pid;
+	/* virtual address of the caller id hash */
+	aligned_u64 callerIdAddress;
+	/* caller id hash size in bytes */
+	u32 callerIdSizeInBytes;
+};
+
+/*
+  structure that represents DCB
+*/
+struct sep_dcblock {
+	/* physical address of the first input mlli */
+	u32	input_mlli_address;
+	/* num of entries in the first input mlli */
+	u32	input_mlli_num_entries;
+	/* size of data in the first input mlli */
+	u32	input_mlli_data_size;
+	/* physical address of the first output mlli */
+	u32	output_mlli_address;
+	/* num of entries in the first output mlli */
+	u32	output_mlli_num_entries;
+	/* size of data in the first output mlli */
+	u32	output_mlli_data_size;
+	/* pointer to the output virtual tail */
+	u32	out_vr_tail_pt;
+	/* size of tail data */
+	u32	tail_data_size;
+	/* input tail data array */
+	u8	tail_data[64];
+};
+
+struct sep_caller_id_entry {
+	int pid;
+	unsigned char callerIdHash[SEP_CALLER_ID_HASH_SIZE_IN_BYTES];
+};
+
+/*
+	command structure for building dcb block (currently for ext app only
+*/
+struct build_dcb_struct {
+	/* address value of the data in */
+	aligned_u64 app_in_address;
+	/* size of data in */
+	u32  data_in_size;
+	/* address of the data out */
+	aligned_u64 app_out_address;
+	/* the size of the block of the operation - if needed,
+	every table will be modulo this parameter */
+	u32  block_size;
+	/* the size of the block of the operation - if needed,
+	every table will be modulo this parameter */
+	u32  tail_block_size;
+};
+
+/**
+ * @struct sep_dma_map
+ *
+ * Structure that contains all information needed for mapping the user pages
+ *	     or kernel buffers for dma operations
+ *
+ *
+ */
+struct sep_dma_map {
+	/* mapped dma address */
+	dma_addr_t    dma_addr;
+	/* size of the mapped data */
+	size_t        size;
+};
+
+struct sep_dma_resource {
+	/* array of pointers to the pages that represent
+	input data for the synchronic DMA action */
+	struct page **in_page_array;
+
+	/* array of pointers to the pages that represent out
+	data for the synchronic DMA action */
+	struct page **out_page_array;
+
+	/* number of pages in the sep_in_page_array */
+	u32 in_num_pages;
+
+	/* number of pages in the sep_out_page_array */
+	u32 out_num_pages;
+
+	/* map array of the input data */
+	struct sep_dma_map *in_map_array;
+
+	/* map array of the output data */
+	struct sep_dma_map *out_map_array;
+
+	/* number of entries of the input mapp array */
+	u32 in_map_num_entries;
+
+	/* number of entries of the output mapp array */
+	u32 out_map_num_entries;
+};
+
+
+/* command struct for translating rar handle to bus address
+   and setting it at predefined location */
+struct rar_hndl_to_bus_struct {
+
+	/* rar handle */
+	aligned_u64 rar_handle;
+};
+
+/*
+  structure that represent one entry in the DMA LLI table
+*/
+struct sep_lli_entry {
+	/* physical address */
+	u32 bus_address;
+
+	/* block size */
+	u32 block_size;
+};
+
+/*----------------------------------------------------------------
+	IOCTL command defines
+	-----------------------------------------------------------------*/
+
+/* magic number 1 of the sep IOCTL command */
+#define SEP_IOC_MAGIC_NUMBER	                     's'
+
+/* sends interrupt to sep that message is ready */
+#define SEP_IOCSENDSEPCOMMAND	 \
+	_IO(SEP_IOC_MAGIC_NUMBER, 0)
+
+/* sends interrupt to sep that message is ready */
+#define SEP_IOCSENDSEPRPLYCOMMAND	 \
+	_IO(SEP_IOC_MAGIC_NUMBER, 1)
+
+/* allocate memory in data pool */
+#define SEP_IOCALLOCDATAPOLL	\
+	_IOW(SEP_IOC_MAGIC_NUMBER, 2, struct alloc_struct)
+
+/* create sym dma lli tables */
+#define SEP_IOCCREATESYMDMATABLE	\
+	_IOW(SEP_IOC_MAGIC_NUMBER, 5, struct bld_syn_tab_struct)
+
+/* free dynamic data aalocated during table creation */
+#define SEP_IOCFREEDMATABLEDATA	 \
+	_IO(SEP_IOC_MAGIC_NUMBER, 7)
+
+/* get the static pool area addersses (physical and virtual) */
+#define SEP_IOCGETSTATICPOOLADDR	\
+	_IO(SEP_IOC_MAGIC_NUMBER, 8)
+
+/* start sep command */
+#define SEP_IOCSEPSTART	 \
+	_IO(SEP_IOC_MAGIC_NUMBER, 12)
+
+/* init sep command */
+#define SEP_IOCSEPINIT	\
+	_IOW(SEP_IOC_MAGIC_NUMBER, 13, struct init_struct)
+
+/* end transaction command */
+#define SEP_IOCENDTRANSACTION	 \
+	_IO(SEP_IOC_MAGIC_NUMBER, 15)
+
+/* reallocate external app; unused structure still needed for
+ * compatability with middleware */
+#define SEP_IOCREALLOCEXTCACHE	\
+	_IOW(SEP_IOC_MAGIC_NUMBER, 18, struct realloc_ext_struct)
+
+#define SEP_IOCRARPREPAREMESSAGE	\
+	_IOW(SEP_IOC_MAGIC_NUMBER, 20, struct rar_hndl_to_bus_struct)
+
+#define SEP_IOCTLSETCALLERID	\
+	_IOW(SEP_IOC_MAGIC_NUMBER, 34, struct caller_id_struct)
+
+#define SEP_IOCPREPAREDCB					\
+	_IOW(SEP_IOC_MAGIC_NUMBER, 35, struct build_dcb_struct)
+
+#define SEP_IOCFREEDCB					\
+	_IO(SEP_IOC_MAGIC_NUMBER, 36)
+
+#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
new file mode 100644
index 0000000..b18625d
--- /dev/null
+++ b/drivers/staging/sep/sep_driver_config.h
@@ -0,0 +1,239 @@
+/*
+ *
+ *  sep_driver_config.h - Security Processor Driver configuration
+ *
+ *  Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ *  Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ *  CONTACTS:
+ *
+ *  Mark Allyn		mark.a.allyn@intel.com
+ *  Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ *  CHANGES:
+ *
+ *  2010.06.26	Upgrade to Medfield
+ *
+ */
+
+#ifndef __SEP_DRIVER_CONFIG_H__
+#define __SEP_DRIVER_CONFIG_H__
+
+
+/*--------------------------------------
+  DRIVER CONFIGURATION FLAGS
+  -------------------------------------*/
+
+/* if flag is on , then the driver is running in polling and
+	not interrupt mode */
+#define SEP_DRIVER_POLLING_MODE                         0
+
+/* flag which defines if the shared area address should be
+	reconfiged (send to SEP anew) during init of the driver */
+#define SEP_DRIVER_RECONFIG_MESSAGE_AREA                0
+
+/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
+#define SEP_DRIVER_ARM_DEBUG_MODE                       0
+
+/*-------------------------------------------
+	INTERNAL DATA CONFIGURATION
+	-------------------------------------------*/
+
+/* flag for the input array */
+#define SEP_DRIVER_IN_FLAG                              0
+
+/* flag for output array */
+#define SEP_DRIVER_OUT_FLAG                             1
+
+/* maximum number of entries in one LLI tables */
+#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP             31
+
+/* minimum data size of the MLLI table */
+#define SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE		16
+
+/* flag that signifies tah the lock is
+currently held by the proccess (struct file) */
+#define SEP_DRIVER_OWN_LOCK_FLAG                        1
+
+/* flag that signifies tah the lock is currently NOT
+held by the proccess (struct file) */
+#define SEP_DRIVER_DISOWN_LOCK_FLAG                     0
+
+/* indicates whether driver has mapped/unmapped shared area */
+#define SEP_REQUEST_DAEMON_MAPPED 1
+#define SEP_REQUEST_DAEMON_UNMAPPED 0
+
+/*--------------------------------------------------------
+	SHARED AREA  memory total size is 36K
+	it is divided is following:
+
+	SHARED_MESSAGE_AREA                     8K         }
+									}
+	STATIC_POOL_AREA                        4K         } MAPPED AREA ( 24 K)
+									}
+	DATA_POOL_AREA                          12K        }
+
+	SYNCHRONIC_DMA_TABLES_AREA              5K
+
+	placeholder until drver changes
+	FLOW_DMA_TABLES_AREA                    4K
+
+	SYSTEM_MEMORY_AREA                      3k
+
+	SYSTEM_MEMORY total size is 3k
+	it is divided as following:
+
+	TIME_MEMORY_AREA                     8B
+-----------------------------------------------------------*/
+
+#define SEP_DEV_NAME "sep_sec_driver"
+#define SEP_DEV_SINGLETON "sep_sec_singleton_driver"
+#define SEP_DEV_DAEMON "sep_req_daemon_driver"
+
+
+/*
+	the maximum length of the message - the rest of the message shared
+	area will be dedicated to the dma lli tables
+*/
+#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES			(8 * 1024)
+
+/* the size of the message shared area in pages */
+#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES		(8 * 1024)
+
+/* the size of the data pool static area in pages */
+#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES			(4 * 1024)
+
+/* the size of the data pool shared area size in pages */
+#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES		(16 * 1024)
+
+/* the size of the message shared area in pages */
+#define SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES	(1024 * 5)
+
+/* Placeholder until driver changes */
+#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES		(1024 * 4)
+
+/* system data (time, caller id etc') pool */
+#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES		(1024 * 3)
+
+/* the size in bytes of the time memory */
+#define SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES			8
+
+/* the size in bytes of the RAR parameters memory */
+#define SEP_DRIVER_SYSTEM_RAR_MEMORY_SIZE_IN_BYTES		8
+
+/* area size that is mapped  - we map the MESSAGE AREA, STATIC POOL and
+	DATA POOL areas. area must be module 4k */
+#define SEP_DRIVER_MMMAP_AREA_SIZE				(1024 * 28)
+
+/*-----------------------------------------------
+	offsets of the areas starting from the shared area start address
+*/
+
+/* message area offset */
+#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES			0
+
+/* static pool area offset */
+#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
+	(SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
+
+/* data pool area offset */
+#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
+	(SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
+	SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
+
+/* synhronic dma tables area offset */
+#define SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES \
+	(SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
+	SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
+
+/* system memory offset in bytes */
+#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
+	(SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + \
+	SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)
+
+/* offset of the time area */
+#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
+	(SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
+
+/* offset of the RAR area */
+#define SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES \
+	(SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES + \
+	SEP_DRIVER_TIME_MEMORY_SIZE_IN_BYTES)
+
+/* offset of the caller id area */
+#define SEP_CALLER_ID_OFFSET_BYTES \
+	(SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES + \
+    SEP_DRIVER_SYSTEM_RAR_MEMORY_SIZE_IN_BYTES)
+
+/* offset of the DCB area */
+#define SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES \
+	(SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES + \
+	0x400)
+
+/* offset of the ext cache area */
+#define SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES \
+	SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES
+
+/* offset of the allocation data pointer area */
+#define SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES \
+	(SEP_CALLER_ID_OFFSET_BYTES + \
+	SEP_CALLER_ID_HASH_SIZE_IN_BYTES)
+
+/* the token that defines the start of time address */
+#define SEP_TIME_VAL_TOKEN                                    0x12345678
+
+#define FAKE_RAR_SIZE (1024*1024) /* used only for mfld */
+/* DEBUG LEVEL MASKS */
+
+/* size of the caller id hash (sha2) */
+#define SEP_CALLER_ID_HASH_SIZE_IN_BYTES                      32
+
+/* size of the caller id hash (sha2) in 32 bit words */
+#define SEP_CALLER_ID_HASH_SIZE_IN_WORDS                8
+
+/* maximum number of entries in the caller id table */
+#define SEP_CALLER_ID_TABLE_NUM_ENTRIES                       20
+
+/* maximum number of symetric operation (that require DMA resource)
+	per one message */
+#define SEP_MAX_NUM_SYNC_DMA_OPS			16
+
+/* the token that defines the start of time address */
+#define SEP_RAR_VAL_TOKEN                                     0xABABABAB
+
+/* ioctl error that should be returned when trying
+   to realloc the cache/resident second time */
+#define SEP_ALREADY_INITIALIZED_ERR                           12
+
+/* bit that locks access to the shared area */
+#define SEP_MMAP_LOCK_BIT                                     0
+
+/* bit that lock access to the poll  - after send_command */
+#define SEP_SEND_MSG_LOCK_BIT                                 1
+
+/* the token that defines the static pool address address */
+#define SEP_STATIC_POOL_VAL_TOKEN                             0xABBAABBA
+
+/* the token that defines the data pool pointers address */
+#define SEP_DATA_POOL_POINTERS_VAL_TOKEN                      0xEDDEEDDE
+
+/* the token that defines the data pool pointers address */
+#define SEP_EXT_CACHE_ADDR_VAL_TOKEN                          0xBABABABA
+
+/* Time limit for SEP to finish */
+#define WAIT_TIME 10
+
+#endif /* SEP DRIVER CONFIG */
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
new file mode 100644
index 0000000..300f909
--- /dev/null
+++ b/drivers/staging/sep/sep_driver_hw_defs.h
@@ -0,0 +1,233 @@
+/*
+ *
+ *  sep_driver_hw_defs.h - Security Processor Driver hardware definitions
+ *
+ *  Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
+ *  Contributions(c) 2009,2010 Discretix. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  You should have received a copy of the GNU General Public License along with
+ *  this program; if not, write to the Free Software Foundation, Inc., 59
+ *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ *  CONTACTS:
+ *
+ *  Mark Allyn		mark.a.allyn@intel.com
+ *  Jayant Mangalampalli jayant.mangalampalli@intel.com
+ *
+ *  CHANGES:
+ *
+ *  2010.09.20	Upgrade to Medfield
+ *
+ */
+
+#ifndef SEP_DRIVER_HW_DEFS__H
+#define SEP_DRIVER_HW_DEFS__H
+
+/* PCI ID's */
+#define MFLD_PCI_DEVICE_ID 0x0826
+
+/*----------------------- */
+/* HW Registers Defines.  */
+/*                        */
+/*---------------------- -*/
+
+
+/* cf registers */
+#define		HW_R0B_ADDR_0_REG_ADDR			0x0000UL
+#define		HW_R0B_ADDR_1_REG_ADDR			0x0004UL
+#define		HW_R0B_ADDR_2_REG_ADDR			0x0008UL
+#define		HW_R0B_ADDR_3_REG_ADDR			0x000cUL
+#define		HW_R0B_ADDR_4_REG_ADDR			0x0010UL
+#define		HW_R0B_ADDR_5_REG_ADDR			0x0014UL
+#define		HW_R0B_ADDR_6_REG_ADDR			0x0018UL
+#define		HW_R0B_ADDR_7_REG_ADDR			0x001cUL
+#define		HW_R0B_ADDR_8_REG_ADDR			0x0020UL
+#define		HW_R2B_ADDR_0_REG_ADDR			0x0080UL
+#define		HW_R2B_ADDR_1_REG_ADDR			0x0084UL
+#define		HW_R2B_ADDR_2_REG_ADDR			0x0088UL
+#define		HW_R2B_ADDR_3_REG_ADDR			0x008cUL
+#define		HW_R2B_ADDR_4_REG_ADDR			0x0090UL
+#define		HW_R2B_ADDR_5_REG_ADDR			0x0094UL
+#define		HW_R2B_ADDR_6_REG_ADDR			0x0098UL
+#define		HW_R2B_ADDR_7_REG_ADDR			0x009cUL
+#define		HW_R2B_ADDR_8_REG_ADDR			0x00a0UL
+#define		HW_R3B_REG_ADDR				0x00C0UL
+#define		HW_R4B_REG_ADDR				0x0100UL
+#define		HW_CSA_ADDR_0_REG_ADDR			0x0140UL
+#define		HW_CSA_ADDR_1_REG_ADDR			0x0144UL
+#define		HW_CSA_ADDR_2_REG_ADDR			0x0148UL
+#define		HW_CSA_ADDR_3_REG_ADDR			0x014cUL
+#define		HW_CSA_ADDR_4_REG_ADDR			0x0150UL
+#define		HW_CSA_ADDR_5_REG_ADDR			0x0154UL
+#define		HW_CSA_ADDR_6_REG_ADDR			0x0158UL
+#define		HW_CSA_ADDR_7_REG_ADDR			0x015cUL
+#define		HW_CSA_ADDR_8_REG_ADDR			0x0160UL
+#define		HW_CSA_REG_ADDR				0x0140UL
+#define		HW_SINB_REG_ADDR			0x0180UL
+#define		HW_SOUTB_REG_ADDR			0x0184UL
+#define		HW_PKI_CONTROL_REG_ADDR			0x01C0UL
+#define		HW_PKI_STATUS_REG_ADDR			0x01C4UL
+#define		HW_PKI_BUSY_REG_ADDR			0x01C8UL
+#define		HW_PKI_A_1025_REG_ADDR			0x01CCUL
+#define		HW_PKI_SDMA_CTL_REG_ADDR		0x01D0UL
+#define		HW_PKI_SDMA_OFFSET_REG_ADDR		0x01D4UL
+#define		HW_PKI_SDMA_POINTERS_REG_ADDR		0x01D8UL
+#define		HW_PKI_SDMA_DLENG_REG_ADDR		0x01DCUL
+#define		HW_PKI_SDMA_EXP_POINTERS_REG_ADDR	0x01E0UL
+#define		HW_PKI_SDMA_RES_POINTERS_REG_ADDR	0x01E4UL
+#define		HW_PKI_CLR_REG_ADDR			0x01E8UL
+#define		HW_PKI_SDMA_BUSY_REG_ADDR		0x01E8UL
+#define		HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR	0x01ECUL
+#define		HW_PKI_SDMA_MUL_BY1_REG_ADDR		0x01F0UL
+#define		HW_PKI_SDMA_RMUL_SEL_REG_ADDR		0x01F4UL
+#define		HW_DES_KEY_0_REG_ADDR			0x0208UL
+#define		HW_DES_KEY_1_REG_ADDR			0x020CUL
+#define		HW_DES_KEY_2_REG_ADDR			0x0210UL
+#define		HW_DES_KEY_3_REG_ADDR			0x0214UL
+#define		HW_DES_KEY_4_REG_ADDR			0x0218UL
+#define		HW_DES_KEY_5_REG_ADDR			0x021CUL
+#define		HW_DES_CONTROL_0_REG_ADDR		0x0220UL
+#define		HW_DES_CONTROL_1_REG_ADDR		0x0224UL
+#define		HW_DES_IV_0_REG_ADDR			0x0228UL
+#define		HW_DES_IV_1_REG_ADDR			0x022CUL
+#define		HW_AES_KEY_0_ADDR_0_REG_ADDR		0x0400UL
+#define		HW_AES_KEY_0_ADDR_1_REG_ADDR		0x0404UL
+#define		HW_AES_KEY_0_ADDR_2_REG_ADDR		0x0408UL
+#define		HW_AES_KEY_0_ADDR_3_REG_ADDR		0x040cUL
+#define		HW_AES_KEY_0_ADDR_4_REG_ADDR		0x0410UL
+#define		HW_AES_KEY_0_ADDR_5_REG_ADDR		0x0414UL
+#define		HW_AES_KEY_0_ADDR_6_REG_ADDR		0x0418UL
+#define		HW_AES_KEY_0_ADDR_7_REG_ADDR		0x041cUL
+#define		HW_AES_KEY_0_REG_ADDR			0x0400UL
+#define		HW_AES_IV_0_ADDR_0_REG_ADDR		0x0440UL
+#define		HW_AES_IV_0_ADDR_1_REG_ADDR		0x0444UL
+#define		HW_AES_IV_0_ADDR_2_REG_ADDR		0x0448UL
+#define		HW_AES_IV_0_ADDR_3_REG_ADDR		0x044cUL
+#define		HW_AES_IV_0_REG_ADDR			0x0440UL
+#define		HW_AES_CTR1_ADDR_0_REG_ADDR		0x0460UL
+#define		HW_AES_CTR1_ADDR_1_REG_ADDR		0x0464UL
+#define		HW_AES_CTR1_ADDR_2_REG_ADDR		0x0468UL
+#define		HW_AES_CTR1_ADDR_3_REG_ADDR		0x046cUL
+#define		HW_AES_CTR1_REG_ADDR			0x0460UL
+#define		HW_AES_SK_REG_ADDR			0x0478UL
+#define		HW_AES_MAC_OK_REG_ADDR			0x0480UL
+#define		HW_AES_PREV_IV_0_ADDR_0_REG_ADDR	0x0490UL
+#define		HW_AES_PREV_IV_0_ADDR_1_REG_ADDR	0x0494UL
+#define		HW_AES_PREV_IV_0_ADDR_2_REG_ADDR	0x0498UL
+#define		HW_AES_PREV_IV_0_ADDR_3_REG_ADDR	0x049cUL
+#define		HW_AES_PREV_IV_0_REG_ADDR		0x0490UL
+#define		HW_AES_CONTROL_REG_ADDR			0x04C0UL
+#define		HW_HASH_H0_REG_ADDR			0x0640UL
+#define		HW_HASH_H1_REG_ADDR			0x0644UL
+#define		HW_HASH_H2_REG_ADDR			0x0648UL
+#define		HW_HASH_H3_REG_ADDR			0x064CUL
+#define		HW_HASH_H4_REG_ADDR			0x0650UL
+#define		HW_HASH_H5_REG_ADDR			0x0654UL
+#define		HW_HASH_H6_REG_ADDR			0x0658UL
+#define		HW_HASH_H7_REG_ADDR			0x065CUL
+#define		HW_HASH_H8_REG_ADDR			0x0660UL
+#define		HW_HASH_H9_REG_ADDR			0x0664UL
+#define		HW_HASH_H10_REG_ADDR			0x0668UL
+#define		HW_HASH_H11_REG_ADDR			0x066CUL
+#define		HW_HASH_H12_REG_ADDR			0x0670UL
+#define		HW_HASH_H13_REG_ADDR			0x0674UL
+#define		HW_HASH_H14_REG_ADDR			0x0678UL
+#define		HW_HASH_H15_REG_ADDR			0x067CUL
+#define		HW_HASH_CONTROL_REG_ADDR		0x07C0UL
+#define		HW_HASH_PAD_EN_REG_ADDR			0x07C4UL
+#define		HW_HASH_PAD_CFG_REG_ADDR		0x07C8UL
+#define		HW_HASH_CUR_LEN_0_REG_ADDR		0x07CCUL
+#define		HW_HASH_CUR_LEN_1_REG_ADDR		0x07D0UL
+#define		HW_HASH_CUR_LEN_2_REG_ADDR		0x07D4UL
+#define		HW_HASH_CUR_LEN_3_REG_ADDR		0x07D8UL
+#define		HW_HASH_PARAM_REG_ADDR			0x07DCUL
+#define		HW_HASH_INT_BUSY_REG_ADDR		0x07E0UL
+#define		HW_HASH_SW_RESET_REG_ADDR		0x07E4UL
+#define		HW_HASH_ENDIANESS_REG_ADDR		0x07E8UL
+#define		HW_HASH_DATA_REG_ADDR			0x07ECUL
+#define		HW_DRNG_CONTROL_REG_ADDR		0x0800UL
+#define		HW_DRNG_VALID_REG_ADDR			0x0804UL
+#define		HW_DRNG_DATA_REG_ADDR			0x0808UL
+#define		HW_RND_SRC_EN_REG_ADDR			0x080CUL
+#define		HW_AES_CLK_ENABLE_REG_ADDR		0x0810UL
+#define		HW_DES_CLK_ENABLE_REG_ADDR		0x0814UL
+#define		HW_HASH_CLK_ENABLE_REG_ADDR		0x0818UL
+#define		HW_PKI_CLK_ENABLE_REG_ADDR		0x081CUL
+#define		HW_CLK_STATUS_REG_ADDR			0x0824UL
+#define		HW_CLK_ENABLE_REG_ADDR			0x0828UL
+#define		HW_DRNG_SAMPLE_REG_ADDR			0x0850UL
+#define		HW_RND_SRC_CTL_REG_ADDR			0x0858UL
+#define		HW_CRYPTO_CTL_REG_ADDR			0x0900UL
+#define		HW_CRYPTO_STATUS_REG_ADDR		0x090CUL
+#define		HW_CRYPTO_BUSY_REG_ADDR			0x0910UL
+#define		HW_AES_BUSY_REG_ADDR			0x0914UL
+#define		HW_DES_BUSY_REG_ADDR			0x0918UL
+#define		HW_HASH_BUSY_REG_ADDR			0x091CUL
+#define		HW_CONTENT_REG_ADDR			0x0924UL
+#define		HW_VERSION_REG_ADDR			0x0928UL
+#define		HW_CONTEXT_ID_REG_ADDR			0x0930UL
+#define		HW_DIN_BUFFER_REG_ADDR			0x0C00UL
+#define		HW_DIN_MEM_DMA_BUSY_REG_ADDR		0x0c20UL
+#define		HW_SRC_LLI_MEM_ADDR_REG_ADDR		0x0c24UL
+#define		HW_SRC_LLI_WORD0_REG_ADDR		0x0C28UL
+#define		HW_SRC_LLI_WORD1_REG_ADDR		0x0C2CUL
+#define		HW_SRAM_SRC_ADDR_REG_ADDR		0x0c30UL
+#define		HW_DIN_SRAM_BYTES_LEN_REG_ADDR		0x0c34UL
+#define		HW_DIN_SRAM_DMA_BUSY_REG_ADDR		0x0C38UL
+#define		HW_WRITE_ALIGN_REG_ADDR			0x0C3CUL
+#define		HW_OLD_DATA_REG_ADDR			0x0C48UL
+#define		HW_WRITE_ALIGN_LAST_REG_ADDR		0x0C4CUL
+#define		HW_DOUT_BUFFER_REG_ADDR			0x0C00UL
+#define		HW_DST_LLI_WORD0_REG_ADDR		0x0D28UL
+#define		HW_DST_LLI_WORD1_REG_ADDR		0x0D2CUL
+#define		HW_DST_LLI_MEM_ADDR_REG_ADDR		0x0D24UL
+#define		HW_DOUT_MEM_DMA_BUSY_REG_ADDR		0x0D20UL
+#define		HW_SRAM_DEST_ADDR_REG_ADDR		0x0D30UL
+#define		HW_DOUT_SRAM_BYTES_LEN_REG_ADDR		0x0D34UL
+#define		HW_DOUT_SRAM_DMA_BUSY_REG_ADDR		0x0D38UL
+#define		HW_READ_ALIGN_REG_ADDR			0x0D3CUL
+#define		HW_READ_LAST_DATA_REG_ADDR		0x0D44UL
+#define		HW_RC4_THRU_CPU_REG_ADDR		0x0D4CUL
+#define		HW_AHB_SINGLE_REG_ADDR			0x0E00UL
+#define		HW_SRAM_DATA_REG_ADDR			0x0F00UL
+#define		HW_SRAM_ADDR_REG_ADDR			0x0F04UL
+#define		HW_SRAM_DATA_READY_REG_ADDR		0x0F08UL
+#define		HW_HOST_IRR_REG_ADDR			0x0A00UL
+#define		HW_HOST_IMR_REG_ADDR			0x0A04UL
+#define		HW_HOST_ICR_REG_ADDR			0x0A08UL
+#define		HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR	0x0A10UL
+#define		HW_HOST_SEP_BUSY_REG_ADDR		0x0A14UL
+#define		HW_HOST_SEP_LCS_REG_ADDR		0x0A18UL
+#define		HW_HOST_CC_SW_RST_REG_ADDR		0x0A40UL
+#define		HW_HOST_SEP_SW_RST_REG_ADDR		0x0A44UL
+#define		HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR	0x0A80UL
+#define		HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR	0x0A84UL
+#define		HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR	0x0A88UL
+#define		HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR	0x0A8cUL
+#define		HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR	0x0A90UL
+#define		HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR	0x0A94UL
+#define		HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR	0x0A98UL
+#define		HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR	0x0A9cUL
+#define		HW_HOST_SEP_HOST_GPR0_REG_ADDR		0x0B00UL
+#define		HW_HOST_SEP_HOST_GPR1_REG_ADDR		0x0B04UL
+#define		HW_HOST_SEP_HOST_GPR2_REG_ADDR		0x0B08UL
+#define		HW_HOST_SEP_HOST_GPR3_REG_ADDR		0x0B0CUL
+#define		HW_HOST_HOST_SEP_GPR0_REG_ADDR		0x0B80UL
+#define		HW_HOST_HOST_SEP_GPR1_REG_ADDR		0x0B84UL
+#define		HW_HOST_HOST_SEP_GPR2_REG_ADDR		0x0B88UL
+#define		HW_HOST_HOST_SEP_GPR3_REG_ADDR		0x0B8CUL
+#define		HW_HOST_HOST_ENDIAN_REG_ADDR		0x0B90UL
+#define		HW_HOST_HOST_COMM_CLK_EN_REG_ADDR	0x0B94UL
+#define		HW_CLR_SRAM_BUSY_REG_REG_ADDR		0x0F0CUL
+#define		HW_CC_SRAM_BASE_ADDRESS			0x5800UL
+
+#endif		/* ifndef HW_DEFS */
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 24f47d6..0bc113c 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -996,7 +996,7 @@
 
 
 /* Jason (08/11/2009) PCI_DRV wrapper essential structs */
-static const struct pci_device_id smtcfb_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = {
 	{0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -1071,7 +1071,7 @@
 	/* when resuming, restore pci data and fb cursor */
 	if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
 		retv = pci_set_power_state(pdev, PCI_D0);
-		retv = pci_restore_state(pdev);
+		pci_restore_state(pdev);
 		if (pci_enable_device(pdev))
 			return -1;
 		pci_set_master(pdev);
diff --git a/drivers/staging/smbfs/cache.c b/drivers/staging/smbfs/cache.c
index dbb9865..f2a1323 100644
--- a/drivers/staging/smbfs/cache.c
+++ b/drivers/staging/smbfs/cache.c
@@ -62,7 +62,7 @@
 	struct list_head *next;
 	struct dentry *dentry;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -70,7 +70,7 @@
 		smb_age_dentry(server, dentry);
 		next = next->next;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&parent->d_lock);
 }
 
 /*
@@ -96,13 +96,13 @@
 	}
 
 	/* If a pointer is invalid, we search the dentry. */
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dent = list_entry(next, struct dentry, d_u.d_child);
 		if ((unsigned long)dent->d_fsdata == fpos) {
 			if (dent->d_inode)
-				dget_locked(dent);
+				dget(dent);
 			else
 				dent = NULL;
 			goto out_unlock;
@@ -111,7 +111,7 @@
 	}
 	dent = NULL;
 out_unlock:
-	spin_unlock(&dcache_lock);
+	spin_unlock(&parent->d_lock);
 	return dent;
 }
 
@@ -134,7 +134,7 @@
 	qname->hash = full_name_hash(qname->name, qname->len);
 
 	if (dentry->d_op && dentry->d_op->d_hash)
-		if (dentry->d_op->d_hash(dentry, qname) != 0)
+		if (dentry->d_op->d_hash(dentry, inode, qname) != 0)
 			goto end_advance;
 
 	newdent = d_lookup(dentry, qname);
@@ -145,8 +145,8 @@
 			goto end_advance;
 	} else {
 		hashed = 1;
-		memcpy((char *) newdent->d_name.name, qname->name,
-		       newdent->d_name.len);
+		/* dir i_mutex is locked because we're in readdir */
+		dentry_update_name_case(newdent, qname);
 	}
 
 	if (!newdent->d_inode) {
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index f088ea2..87a3a9b 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/net.h>
 #include <linux/sched.h>
+#include <linux/namei.h>
 
 #include "smb_fs.h"
 #include "smb_mount.h"
@@ -274,9 +275,13 @@
  * Dentry operations routines
  */
 static int smb_lookup_validate(struct dentry *, struct nameidata *);
-static int smb_hash_dentry(struct dentry *, struct qstr *);
-static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
-static int smb_delete_dentry(struct dentry *);
+static int smb_hash_dentry(const struct dentry *, const struct inode *,
+		struct qstr *);
+static int smb_compare_dentry(const struct dentry *,
+		const struct inode *,
+		const struct dentry *, const struct inode *,
+		unsigned int, const char *, const struct qstr *);
+static int smb_delete_dentry(const struct dentry *);
 
 static const struct dentry_operations smbfs_dentry_operations =
 {
@@ -297,13 +302,20 @@
  * This is the callback when the dcache has a lookup hit.
  */
 static int
-smb_lookup_validate(struct dentry * dentry, struct nameidata *nd)
+smb_lookup_validate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct smb_sb_info *server = server_from_dentry(dentry);
-	struct inode * inode = dentry->d_inode;
-	unsigned long age = jiffies - dentry->d_time;
+	struct smb_sb_info *server;
+	struct inode *inode;
+	unsigned long age;
 	int valid;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	server = server_from_dentry(dentry);
+	inode = dentry->d_inode;
+	age = jiffies - dentry->d_time;
+
 	/*
 	 * The default validation is based on dentry age:
 	 * we believe in dentries for a few seconds.  (But each
@@ -333,7 +345,8 @@
 }
 
 static int 
-smb_hash_dentry(struct dentry *dir, struct qstr *this)
+smb_hash_dentry(const struct dentry *dir, const struct inode *inode,
+		struct qstr *this)
 {
 	unsigned long hash;
 	int i;
@@ -347,14 +360,17 @@
 }
 
 static int
-smb_compare_dentry(struct dentry *dir, struct qstr *a, struct qstr *b)
+smb_compare_dentry(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
 	int i, result = 1;
 
-	if (a->len != b->len)
+	if (len != name->len)
 		goto out;
-	for (i=0; i < a->len; i++) {
-		if (tolower(a->name[i]) != tolower(b->name[i]))
+	for (i=0; i < len; i++) {
+		if (tolower(str[i]) != tolower(name->name[i]))
 			goto out;
 	}
 	result = 0;
@@ -367,7 +383,7 @@
  * We use this to unhash dentries with bad inodes.
  */
 static int
-smb_delete_dentry(struct dentry * dentry)
+smb_delete_dentry(const struct dentry *dentry)
 {
 	if (dentry->d_inode) {
 		if (is_bad_inode(dentry->d_inode)) {
@@ -387,12 +403,6 @@
 void
 smb_new_dentry(struct dentry *dentry)
 {
-	struct smb_sb_info *server = server_from_dentry(dentry);
-
-	if (server->mnt->flags & SMB_MOUNT_CASE)
-		dentry->d_op = &smbfs_dentry_operations_case;
-	else
-		dentry->d_op = &smbfs_dentry_operations;
 	dentry->d_time = jiffies;
 }
 
@@ -424,7 +434,6 @@
 	struct smb_fattr finfo;
 	struct inode *inode;
 	int error;
-	struct smb_sb_info *server;
 
 	error = -ENAMETOOLONG;
 	if (dentry->d_name.len > SMB_MAXNAMELEN)
@@ -452,12 +461,6 @@
 		inode = smb_iget(dir->i_sb, &finfo);
 		if (inode) {
 	add_entry:
-			server = server_from_dentry(dentry);
-			if (server->mnt->flags & SMB_MOUNT_CASE)
-				dentry->d_op = &smbfs_dentry_operations_case;
-			else
-				dentry->d_op = &smbfs_dentry_operations;
-
 			d_add(dentry, inode);
 			smb_renew_times(dentry);
 			error = 0;
diff --git a/drivers/staging/smbfs/file.c b/drivers/staging/smbfs/file.c
index 5dcd19c..31372e7 100644
--- a/drivers/staging/smbfs/file.c
+++ b/drivers/staging/smbfs/file.c
@@ -407,11 +407,14 @@
  * privileges, so we need our own check for this.
  */
 static int
-smb_file_permission(struct inode *inode, int mask)
+smb_file_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	int mode = inode->i_mode;
 	int error = 0;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	VERBOSE("mode=%x, mask=%x\n", mode, mask);
 
 	/* Look at user permissions */
diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c
index 540a984..0778589 100644
--- a/drivers/staging/smbfs/inode.c
+++ b/drivers/staging/smbfs/inode.c
@@ -62,9 +62,16 @@
 	return &ei->vfs_inode;
 }
 
+static void smb_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(smb_inode_cachep, SMB_I(inode));
+}
+
 static void smb_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(smb_inode_cachep, SMB_I(inode));
+	call_rcu(&inode->i_rcu, smb_i_callback);
 }
 
 static void init_once(void *foo)
@@ -607,6 +614,10 @@
 		printk(KERN_ERR "smbfs: failed to start smbiod\n");
 		goto out_no_smbiod;
 	}
+	if (server->mnt->flags & SMB_MOUNT_CASE)
+		sb->s_d_op = &smbfs_dentry_operations_case;
+	else
+		sb->s_d_op = &smbfs_dentry_operations;
 
 	/*
 	 * Keep the super block locked while we get the root inode.
diff --git a/drivers/staging/smbfs/proto.h b/drivers/staging/smbfs/proto.h
index 05939a6..3883cb1 100644
--- a/drivers/staging/smbfs/proto.h
+++ b/drivers/staging/smbfs/proto.h
@@ -38,6 +38,8 @@
 extern const struct file_operations smb_dir_operations;
 extern const struct inode_operations smb_dir_inode_operations;
 extern const struct inode_operations smb_dir_inode_operations_unix;
+extern const struct dentry_operations smbfs_dentry_operations_case;
+extern const struct dentry_operations smbfs_dentry_operations;
 extern void smb_new_dentry(struct dentry *dentry);
 extern void smb_renew_times(struct dentry *dentry);
 /* cache.c */
diff --git a/drivers/staging/solo6x10/Kconfig b/drivers/staging/solo6x10/Kconfig
index d96398c..2cf77c9 100644
--- a/drivers/staging/solo6x10/Kconfig
+++ b/drivers/staging/solo6x10/Kconfig
@@ -1,7 +1,7 @@
 config SOLO6X10
 	tristate "Softlogic 6x10 MPEG codec cards"
-	depends on PCI && VIDEO_DEV && SND
-	select VIDEOBUF_DMA_CONTIG
+	depends on PCI && VIDEO_DEV && SND && I2C
+	select VIDEOBUF_DMA_SG
 	---help---
 	  This driver supports the Softlogic based MPEG-4 and h.264 codec
 	  codec cards.
diff --git a/drivers/staging/solo6x10/TODO b/drivers/staging/solo6x10/TODO
index e6a2ee2..7e6c4fa 100644
--- a/drivers/staging/solo6x10/TODO
+++ b/drivers/staging/solo6x10/TODO
@@ -1,7 +1,5 @@
 TODO (staging => main):
 
-	* checkpatch.pl (haven't run it yet)
-	* Lindent (should be clean, but check)
 	* Motion detection flags need to be moved to v4l2
 	* Some private CIDs need to be moved to v4l2
 
@@ -21,8 +19,6 @@
 	  - implement playback via external sound jack
 	  - implement loopback of external sound jack with incoming audio?
 	  - implement pause/resume
-	  - check into jacking sound from tx28xx chips directly (to avoid
-	    g.723/8khz limitations)
 
 Plase send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc Ben Collins
 <bcollins@bluecherry.net>
diff --git a/drivers/staging/solo6x10/solo6010-core.c b/drivers/staging/solo6x10/solo6010-core.c
index 4a051cd..c433136 100644
--- a/drivers/staging/solo6x10/solo6010-core.c
+++ b/drivers/staging/solo6x10/solo6010-core.c
@@ -136,6 +136,7 @@
 	int ret;
 	int sdram;
 	u8 chip_id;
+
 	solo_dev = kzalloc(sizeof(*solo_dev), GFP_KERNEL);
 	if (solo_dev == NULL)
 		return -ENOMEM;
@@ -163,21 +164,21 @@
 	chip_id = solo_reg_read(solo_dev, SOLO_CHIP_OPTION) &
 					SOLO_CHIP_ID_MASK;
 	switch (chip_id) {
-		case 7:
-			solo_dev->nr_chans = 16;
-			solo_dev->nr_ext = 5;
-			break;
-		case 6:
-			solo_dev->nr_chans = 8;
-			solo_dev->nr_ext = 2;
-			break;
-		default:
-			dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, "
-				 "defaulting to 4 channels\n",
-				 chip_id);
-		case 5:
-			solo_dev->nr_chans = 4;
-			solo_dev->nr_ext = 1;
+	case 7:
+		solo_dev->nr_chans = 16;
+		solo_dev->nr_ext = 5;
+		break;
+	case 6:
+		solo_dev->nr_chans = 8;
+		solo_dev->nr_ext = 2;
+		break;
+	default:
+		dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, "
+			 "defaulting to 4 channels\n",
+			 chip_id);
+	case 5:
+		solo_dev->nr_chans = 4;
+		solo_dev->nr_ext = 1;
 	}
 
 	/* Disable all interrupts to start */
@@ -261,13 +262,18 @@
 }
 
 static struct pci_device_id solo6010_id_table[] = {
+	/* 6010 based cards */
 	{PCI_DEVICE(PCI_VENDOR_ID_SOFTLOGIC, PCI_DEVICE_ID_SOLO6010)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_4)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_9)},
 	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_NEUSOLO_16)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_4)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_9)},
-	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_COMMSOLO_16)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_4)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_9)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_SOLO_16)},
+	/* 6110 based cards */
+	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_4)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_8)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BLUECHERRY, PCI_DEVICE_ID_BC_6110_16)},
 	{0,}
 };
 
diff --git a/drivers/staging/solo6x10/solo6010-disp.c b/drivers/staging/solo6x10/solo6010-disp.c
index 555f024..f866f84 100644
--- a/drivers/staging/solo6x10/solo6010-disp.c
+++ b/drivers/staging/solo6x10/solo6010-disp.c
@@ -198,12 +198,12 @@
 	}
 
 	/* Default motion settings */
-        solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(0) |
+	solo_reg_write(solo_dev, SOLO_VI_MOT_ADR, SOLO_VI_MOTION_EN(0) |
 		       (SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
 	solo_reg_write(solo_dev, SOLO_VI_MOT_CTRL,
 		       SOLO_VI_MOTION_FRAME_COUNT(3) |
 		       SOLO_VI_MOTION_SAMPLE_LENGTH(solo_dev->video_hsize / 16)
-		       | //SOLO_VI_MOTION_INTR_START_STOP |
+		       | /* SOLO_VI_MOTION_INTR_START_STOP | */
 		       SOLO_VI_MOTION_SAMPLE_COUNT(10));
 
 	solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0);
@@ -264,7 +264,7 @@
 	solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(0), 0);
 	solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(0), 0);
 	solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(0), 0);
-	
+
 	solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_CTRL(1), 0);
 	solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_START(1), 0);
 	solo_reg_write(solo_dev, SOLO_VO_RECTANGLE_STOP(1), 0);
diff --git a/drivers/staging/solo6x10/solo6010-enc.c b/drivers/staging/solo6x10/solo6010-enc.c
index a6cf0a8..481a492 100644
--- a/drivers/staging/solo6x10/solo6010-enc.c
+++ b/drivers/staging/solo6x10/solo6010-enc.c
@@ -22,7 +22,7 @@
 #include "solo6010.h"
 #include "solo6010-osd-font.h"
 
-#define CAPTURE_MAX_BANDWIDTH		32	// D1 4channel (D1 == 4)
+#define CAPTURE_MAX_BANDWIDTH		32	/* D1 4channel (D1 == 4) */
 #define OSG_BUFFER_SIZE			1024
 
 #define VI_PROG_HSIZE			(1280 - 16)
@@ -145,8 +145,8 @@
 
 	solo_p2m_dma(solo_dev, 0, 1, buf, SOLO_EOSD_EXT_ADDR(solo_dev) +
 		     (solo_enc->ch * SOLO_EOSD_EXT_SIZE), SOLO_EOSD_EXT_SIZE);
-        reg |= (1 << solo_enc->ch);
-        solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg);
+	reg |= (1 << solo_enc->ch);
+	solo_reg_write(solo_dev, SOLO_VE_OSD_CH, reg);
 
 	kfree(buf);
 
diff --git a/drivers/staging/solo6x10/solo6010-g723.c b/drivers/staging/solo6x10/solo6010-g723.c
index 82fbcb8..254b46a 100644
--- a/drivers/staging/solo6x10/solo6010-g723.c
+++ b/drivers/staging/solo6x10/solo6010-g723.c
@@ -47,7 +47,7 @@
  * is broken down to 20 * 48 byte regions (one for each channel possible)
  * with the rest of the page being dummy data. */
 #define MAX_BUFFER		(G723_PERIOD_BYTES * PERIODS_MAX)
-#define IRQ_PAGES		4 // 0 - 4
+#define IRQ_PAGES		4 /* 0 - 4 */
 #define PERIODS_MIN		(1 << IRQ_PAGES)
 #define PERIODS_MAX		G723_FDMA_PAGES
 
@@ -158,7 +158,7 @@
 	snd_pcm_substream_chip(ss) = solo_pcm->solo_dev;
 	kfree(solo_pcm);
 
-        return 0;
+	return 0;
 }
 
 static int snd_solo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
@@ -197,7 +197,7 @@
 
 static int snd_solo_pcm_prepare(struct snd_pcm_substream *ss)
 {
-        return 0;
+	return 0;
 }
 
 static snd_pcm_uframes_t snd_solo_pcm_pointer(struct snd_pcm_substream *ss)
@@ -271,7 +271,7 @@
 
 	value->value.integer.value[0] = tw28_get_audio_gain(solo_dev, ch);
 
-        return 0;
+	return 0;
 }
 
 static int snd_solo_capture_volume_put(struct snd_kcontrol *kcontrol,
@@ -279,15 +279,15 @@
 {
 	struct solo6010_dev *solo_dev = snd_kcontrol_chip(kcontrol);
 	u8 ch = value->id.numid - 1;
-        u8 old_val;
+	u8 old_val;
 
-        old_val = tw28_get_audio_gain(solo_dev, ch);
+	old_val = tw28_get_audio_gain(solo_dev, ch);
 	if (old_val == value->value.integer.value[0])
 		return 0;
 
 	tw28_set_audio_gain(solo_dev, ch, value->value.integer.value[0]);
 
-        return 1;
+	return 1;
 }
 
 static struct snd_kcontrol_new snd_solo_capture_volume = {
@@ -368,14 +368,16 @@
 	strcpy(card->mixername, "SOLO-6010");
 	kctl = snd_solo_capture_volume;
 	kctl.count = solo_dev->nr_chans;
-        ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
+	ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
 	if (ret < 0)
 		return ret;
 
-	if ((ret = solo_snd_pcm_init(solo_dev)) < 0)
+	ret = solo_snd_pcm_init(solo_dev);
+	if (ret < 0)
 		goto snd_error;
 
-	if ((ret = snd_card_register(card)) < 0)
+	ret = snd_card_register(card);
+	if (ret < 0)
 		goto snd_error;
 
 	solo_g723_config(solo_dev);
diff --git a/drivers/staging/solo6x10/solo6010-gpio.c b/drivers/staging/solo6x10/solo6010-gpio.c
index 46f7a71..8869b88 100644
--- a/drivers/staging/solo6x10/solo6010-gpio.c
+++ b/drivers/staging/solo6x10/solo6010-gpio.c
@@ -92,8 +92,8 @@
 
 int solo_gpio_init(struct solo6010_dev *solo_dev)
 {
-        solo_gpio_config(solo_dev);
-        return 0;
+	solo_gpio_config(solo_dev);
+	return 0;
 }
 
 void solo_gpio_exit(struct solo6010_dev *solo_dev)
diff --git a/drivers/staging/solo6x10/solo6010-i2c.c b/drivers/staging/solo6x10/solo6010-i2c.c
index cadd512..60b69cd 100644
--- a/drivers/staging/solo6x10/solo6010-i2c.c
+++ b/drivers/staging/solo6x10/solo6010-i2c.c
@@ -46,7 +46,7 @@
 
 	i2c_transfer(&solo_dev->i2c_adap[id], msgs, 2);
 
-        return data;
+	return data;
 }
 
 void solo_i2c_writebyte(struct solo6010_dev *solo_dev, int id, u8 addr,
@@ -225,9 +225,9 @@
 	}
 
 	if (i == SOLO_I2C_ADAPTERS)
-		return num; // XXX Right return value for failure?
+		return num; /* XXX Right return value for failure? */
 
-	down(&solo_dev->i2c_sem);
+	mutex_lock(&solo_dev->i2c_mutex);
 	solo_dev->i2c_id = i;
 	solo_dev->i2c_msg = msgs;
 	solo_dev->i2c_msg_num = num;
@@ -258,7 +258,7 @@
 	solo_dev->i2c_state = IIC_STATE_IDLE;
 	solo_dev->i2c_id = -1;
 
-	up(&solo_dev->i2c_sem);
+	mutex_unlock(&solo_dev->i2c_mutex);
 
 	return ret;
 }
@@ -284,7 +284,7 @@
 	solo_dev->i2c_id = -1;
 	solo_dev->i2c_state = IIC_STATE_IDLE;
 	init_waitqueue_head(&solo_dev->i2c_wait);
-	sema_init(&solo_dev->i2c_sem, 1);
+	mutex_init(&solo_dev->i2c_mutex);
 
 	for (i = 0; i < SOLO_I2C_ADAPTERS; i++) {
 		struct i2c_adapter *adap = &solo_dev->i2c_adap[i];
@@ -296,7 +296,8 @@
 		adap->retries = 1;
 		adap->dev.parent = &solo_dev->pdev->dev;
 
-		if ((ret = i2c_add_adapter(adap))) {
+		ret = i2c_add_adapter(adap);
+		if (ret) {
 			adap->algo_data = NULL;
 			break;
 		}
diff --git a/drivers/staging/solo6x10/solo6010-osd-font.h b/drivers/staging/solo6x10/solo6010-osd-font.h
index d6f565b..d72efbb 100644
--- a/drivers/staging/solo6x10/solo6010-osd-font.h
+++ b/drivers/staging/solo6x10/solo6010-osd-font.h
@@ -22,7 +22,7 @@
 
 static const unsigned int solo_osd_font[] = {
 	0x00000000, 0x0000c0c8, 0xccfefe0c, 0x08000000,
-	0x00000000, 0x10103838, 0x7c7cfefe, 0x00000000,	// 0
+	0x00000000, 0x10103838, 0x7c7cfefe, 0x00000000,	/* 0 */
 	0x00000000, 0xfefe7c7c, 0x38381010, 0x10000000,
 	0x00000000, 0x7c82fefe, 0xfefefe7c, 0x00000000,
 	0x00000000, 0x00001038, 0x10000000, 0x00000000,
@@ -54,67 +54,67 @@
 	0x0000003f, 0x7f404c52, 0x524c407f, 0x00000000,
 	0x0000007c, 0x82ba82ba, 0x82ba82fe, 0x00000000,
 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-	0x00000000, 0x183c3c3c, 0x18180018, 0x18000000,	// 32   !
+	0x00000000, 0x183c3c3c, 0x18180018, 0x18000000,	/* 32   ! */
 	0x00000066, 0x66240000, 0x00000000, 0x00000000,
-	0x00000000, 0x6c6cfe6c, 0x6c6cfe6c, 0x6c000000,	// 34 " #
+	0x00000000, 0x6c6cfe6c, 0x6c6cfe6c, 0x6c000000,	/* 34 " # */
 	0x00001010, 0x7cd6d616, 0x7cd0d6d6, 0x7c101000,
-	0x00000000, 0x0086c660, 0x30180cc6, 0xc2000000,	// 36 $ %
+	0x00000000, 0x0086c660, 0x30180cc6, 0xc2000000,	/* 36 $ % */
 	0x00000000, 0x386c6c38, 0xdc766666, 0xdc000000,
-	0x0000000c, 0x0c0c0600, 0x00000000, 0x00000000,	// 38 & '
+	0x0000000c, 0x0c0c0600, 0x00000000, 0x00000000,	/* 38 & ' */
 	0x00000000, 0x30180c0c, 0x0c0c0c18, 0x30000000,
-	0x00000000, 0x0c183030, 0x30303018, 0x0c000000,	// 40 ( )
+	0x00000000, 0x0c183030, 0x30303018, 0x0c000000,	/* 40 ( ) */
 	0x00000000, 0x0000663c, 0xff3c6600, 0x00000000,
-	0x00000000, 0x00001818, 0x7e181800, 0x00000000,	// 42 * +
+	0x00000000, 0x00001818, 0x7e181800, 0x00000000,	/* 42 * + */
 	0x00000000, 0x00000000, 0x00000e0e, 0x0c060000,
-	0x00000000, 0x00000000, 0x7e000000, 0x00000000,	// 44 , -
+	0x00000000, 0x00000000, 0x7e000000, 0x00000000,	/* 44 , - */
 	0x00000000, 0x00000000, 0x00000006, 0x06000000,
-	0x00000000, 0x80c06030, 0x180c0602, 0x00000000,	// 46 . /
+	0x00000000, 0x80c06030, 0x180c0602, 0x00000000,	/* 46 . / */
 	0x0000007c, 0xc6e6f6de, 0xcec6c67c, 0x00000000,
-	0x00000030, 0x383c3030, 0x303030fc, 0x00000000,	// 48 0 1
+	0x00000030, 0x383c3030, 0x303030fc, 0x00000000,	/* 48 0 1 */
 	0x0000007c, 0xc6c06030, 0x180cc6fe, 0x00000000,
-	0x0000007c, 0xc6c0c07c, 0xc0c0c67c, 0x00000000,	// 50 2 3
+	0x0000007c, 0xc6c0c07c, 0xc0c0c67c, 0x00000000,	/* 50 2 3 */
 	0x00000060, 0x70786c66, 0xfe6060f0, 0x00000000,
-	0x000000fe, 0x0606067e, 0xc0c0c67c, 0x00000000,	// 52 4 5
+	0x000000fe, 0x0606067e, 0xc0c0c67c, 0x00000000,	/* 52 4 5 */
 	0x00000038, 0x0c06067e, 0xc6c6c67c, 0x00000000,
-	0x000000fe, 0xc6c06030, 0x18181818, 0x00000000,	// 54 6 7
+	0x000000fe, 0xc6c06030, 0x18181818, 0x00000000,	/* 54 6 7 */
 	0x0000007c, 0xc6c6c67c, 0xc6c6c67c, 0x00000000,
-	0x0000007c, 0xc6c6c6fc, 0xc0c06038, 0x00000000,	// 56 8 9
+	0x0000007c, 0xc6c6c6fc, 0xc0c06038, 0x00000000,	/* 56 8 9 */
 	0x00000000, 0x18180000, 0x00181800, 0x00000000,
-	0x00000000, 0x18180000, 0x0018180c, 0x00000000,	// 58 : ;
+	0x00000000, 0x18180000, 0x0018180c, 0x00000000,	/* 58 : ; */
 	0x00000060, 0x30180c06, 0x0c183060, 0x00000000,
 	0x00000000, 0x007e0000, 0x007e0000, 0x00000000,
 	0x00000006, 0x0c183060, 0x30180c06, 0x00000000,
 	0x0000007c, 0xc6c66030, 0x30003030, 0x00000000,
 	0x0000007c, 0xc6f6d6d6, 0x7606067c, 0x00000000,
-	0x00000010, 0x386cc6c6, 0xfec6c6c6, 0x00000000,	// 64 @ A
+	0x00000010, 0x386cc6c6, 0xfec6c6c6, 0x00000000,	/* 64 @ A */
 	0x0000007e, 0xc6c6c67e, 0xc6c6c67e, 0x00000000,
-	0x00000078, 0xcc060606, 0x0606cc78, 0x00000000,	// 66
+	0x00000078, 0xcc060606, 0x0606cc78, 0x00000000,	/* 66 */
 	0x0000003e, 0x66c6c6c6, 0xc6c6663e, 0x00000000,
-	0x000000fe, 0x0606063e, 0x060606fe, 0x00000000,	// 68
+	0x000000fe, 0x0606063e, 0x060606fe, 0x00000000,	/* 68 */
 	0x000000fe, 0x0606063e, 0x06060606, 0x00000000,
-	0x00000078, 0xcc060606, 0xf6c6ccb8, 0x00000000,	// 70
+	0x00000078, 0xcc060606, 0xf6c6ccb8, 0x00000000,	/* 70 */
 	0x000000c6, 0xc6c6c6fe, 0xc6c6c6c6, 0x00000000,
-	0x0000003c, 0x18181818, 0x1818183c, 0x00000000,	// 72
+	0x0000003c, 0x18181818, 0x1818183c, 0x00000000,	/* 72 */
 	0x00000060, 0x60606060, 0x6066663c, 0x00000000,
-	0x000000c6, 0xc666361e, 0x3666c6c6, 0x00000000,	// 74
+	0x000000c6, 0xc666361e, 0x3666c6c6, 0x00000000,	/* 74 */
 	0x00000006, 0x06060606, 0x060606fe, 0x00000000,
-	0x000000c6, 0xeefed6c6, 0xc6c6c6c6, 0x00000000,	// 76
+	0x000000c6, 0xeefed6c6, 0xc6c6c6c6, 0x00000000,	/* 76 */
 	0x000000c6, 0xcedefef6, 0xe6c6c6c6, 0x00000000,
-	0x00000038, 0x6cc6c6c6, 0xc6c66c38, 0x00000000,	// 78
+	0x00000038, 0x6cc6c6c6, 0xc6c66c38, 0x00000000,	/* 78 */
 	0x0000007e, 0xc6c6c67e, 0x06060606, 0x00000000,
-	0x00000038, 0x6cc6c6c6, 0xc6d67c38, 0x60000000,	// 80
+	0x00000038, 0x6cc6c6c6, 0xc6d67c38, 0x60000000,	/* 80 */
 	0x0000007e, 0xc6c6c67e, 0x66c6c6c6, 0x00000000,
-	0x0000007c, 0xc6c60c38, 0x60c6c67c, 0x00000000,	// 82
+	0x0000007c, 0xc6c60c38, 0x60c6c67c, 0x00000000,	/* 82 */
 	0x0000007e, 0x18181818, 0x18181818, 0x00000000,
-	0x000000c6, 0xc6c6c6c6, 0xc6c6c67c, 0x00000000,	// 84
+	0x000000c6, 0xc6c6c6c6, 0xc6c6c67c, 0x00000000,	/* 84 */
 	0x000000c6, 0xc6c6c6c6, 0xc66c3810, 0x00000000,
-	0x000000c6, 0xc6c6c6c6, 0xd6d6fe6c, 0x00000000,	// 86
+	0x000000c6, 0xc6c6c6c6, 0xd6d6fe6c, 0x00000000,	/* 86 */
 	0x000000c6, 0xc6c66c38, 0x6cc6c6c6, 0x00000000,
-	0x00000066, 0x66666666, 0x3c181818, 0x00000000,	// 88
+	0x00000066, 0x66666666, 0x3c181818, 0x00000000,	/* 88 */
 	0x000000fe, 0xc0603018, 0x0c0606fe, 0x00000000,
-	0x0000003c, 0x0c0c0c0c, 0x0c0c0c3c, 0x00000000,	// 90
+	0x0000003c, 0x0c0c0c0c, 0x0c0c0c3c, 0x00000000,	/* 90 */
 	0x00000002, 0x060c1830, 0x60c08000, 0x00000000,
-	0x0000003c, 0x30303030, 0x3030303c, 0x00000000,	// 92
+	0x0000003c, 0x30303030, 0x3030303c, 0x00000000,	/* 92 */
 	0x00001038, 0x6cc60000, 0x00000000, 0x00000000,
 	0x00000000, 0x00000000, 0x00000000, 0x00fe0000,
 	0x00001818, 0x30000000, 0x00000000, 0x00000000,
diff --git a/drivers/staging/solo6x10/solo6010-p2m.c b/drivers/staging/solo6x10/solo6010-p2m.c
index 7ed3ed4..956dea0 100644
--- a/drivers/staging/solo6x10/solo6010-p2m.c
+++ b/drivers/staging/solo6x10/solo6010-p2m.c
@@ -18,10 +18,11 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/scatterlist.h>
 
 #include "solo6010.h"
 
-// #define SOLO_TEST_P2M
+/* #define SOLO_TEST_P2M */
 
 int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
 		 void *sys_addr, u32 ext_addr, u32 size)
@@ -30,8 +31,9 @@
 	int ret;
 
 	WARN_ON(!size);
-	WARN_ON(id >= SOLO_NR_P2M);
-	if (!size || id >= SOLO_NR_P2M)
+	BUG_ON(id >= SOLO_NR_P2M);
+
+	if (!size)
 		return -EINVAL;
 
 	dma_addr = pci_map_single(solo_dev->pdev, sys_addr, size,
@@ -48,41 +50,136 @@
 int solo_p2m_dma_t(struct solo6010_dev *solo_dev, u8 id, int wr,
 		   dma_addr_t dma_addr, u32 ext_addr, u32 size)
 {
-	struct solo_p2m_dev *p2m_dev;
-	unsigned int timeout = 0;
+	struct p2m_desc *desc = kzalloc(sizeof(*desc) * 2, GFP_DMA);
+	int ret;
 
-	WARN_ON(!size);
-	WARN_ON(id >= SOLO_NR_P2M);
-	if (!size || id >= SOLO_NR_P2M)
-		return -EINVAL;
+	if (desc == NULL)
+		return -ENOMEM;
+
+	solo_p2m_push_desc(&desc[1], wr, dma_addr, ext_addr, size, 0, 0);
+	ret = solo_p2m_dma_desc(solo_dev, id, desc, 2);
+	kfree(desc);
+
+	return ret;
+}
+
+void solo_p2m_push_desc(struct p2m_desc *desc, int wr, dma_addr_t dma_addr,
+			u32 ext_addr, u32 size, int repeat, u32 ext_size)
+{
+	desc->ta = dma_addr;
+	desc->fa = ext_addr;
+
+	desc->ext = SOLO_P2M_COPY_SIZE(size >> 2);
+	desc->ctrl = SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) |
+		(wr ? SOLO_P2M_WRITE : 0) | SOLO_P2M_TRANS_ON;
+
+	/* Ext size only matters when we're repeating */
+	if (repeat) {
+		desc->ext |= SOLO_P2M_EXT_INC(ext_size >> 2);
+		desc->ctrl |=  SOLO_P2M_PCI_INC(size >> 2) |
+			SOLO_P2M_REPEAT(repeat);
+	}
+}
+
+int solo_p2m_dma_desc(struct solo6010_dev *solo_dev, u8 id,
+		      struct p2m_desc *desc, int desc_count)
+{
+	struct solo_p2m_dev *p2m_dev;
+	unsigned int timeout;
+	int ret = 0;
+	u32 config = 0;
+	dma_addr_t desc_dma = 0;
+
+	BUG_ON(id >= SOLO_NR_P2M);
+	BUG_ON(!desc_count || desc_count > SOLO_NR_P2M_DESC);
 
 	p2m_dev = &solo_dev->p2m_dev[id];
 
-	down(&p2m_dev->sem);
+	mutex_lock(&p2m_dev->mutex);
 
-start_dma:
+	solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), 0);
+
 	INIT_COMPLETION(p2m_dev->completion);
 	p2m_dev->error = 0;
-	solo_reg_write(solo_dev, SOLO_P2M_TAR_ADR(id), dma_addr);
-	solo_reg_write(solo_dev, SOLO_P2M_EXT_ADR(id), ext_addr);
-	solo_reg_write(solo_dev, SOLO_P2M_EXT_CFG(id),
-		       SOLO_P2M_COPY_SIZE(size >> 2));
-	solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id),
-		       SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) |
-		       (wr ? SOLO_P2M_WRITE : 0) | SOLO_P2M_TRANS_ON);
 
+	/* Enable the descriptors */
+	config = solo_reg_read(solo_dev, SOLO_P2M_CONFIG(id));
+	desc_dma = pci_map_single(solo_dev->pdev, desc,
+				  desc_count * sizeof(*desc),
+				  PCI_DMA_TODEVICE);
+	solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(id), desc_dma);
+	solo_reg_write(solo_dev, SOLO_P2M_DESC_ID(id), desc_count - 1);
+	solo_reg_write(solo_dev, SOLO_P2M_CONFIG(id), config |
+		       SOLO_P2M_DESC_MODE);
+
+	/* Should have all descriptors completed from one interrupt */
 	timeout = wait_for_completion_timeout(&p2m_dev->completion, HZ);
 
 	solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), 0);
 
-	/* XXX Really looks to me like we will get stuck here if a
-	 * real PCI P2M error occurs */
+	/* Reset back to non-descriptor mode */
+	solo_reg_write(solo_dev, SOLO_P2M_CONFIG(id), config);
+	solo_reg_write(solo_dev, SOLO_P2M_DESC_ID(id), 0);
+	solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(id), 0);
+	pci_unmap_single(solo_dev->pdev, desc_dma,
+			 desc_count * sizeof(*desc),
+			 PCI_DMA_TODEVICE);
+
 	if (p2m_dev->error)
-		goto start_dma;
+		ret = -EIO;
+	else if (timeout == 0)
+		ret = -EAGAIN;
 
-	up(&p2m_dev->sem);
+	mutex_unlock(&p2m_dev->mutex);
 
-	return (timeout == 0) ? -EAGAIN : 0;
+	WARN_ON_ONCE(ret);
+
+	return ret;
+}
+
+int solo_p2m_dma_sg(struct solo6010_dev *solo_dev, u8 id,
+		    struct p2m_desc *pdesc, int wr,
+		    struct scatterlist *sg, u32 sg_off,
+		    u32 ext_addr, u32 size)
+{
+	int i;
+	int idx;
+
+	BUG_ON(id >= SOLO_NR_P2M);
+
+	if (WARN_ON_ONCE(!size))
+		return -EINVAL;
+
+	memset(pdesc, 0, sizeof(*pdesc));
+
+	/* Should rewrite this to handle > SOLO_NR_P2M_DESC transactions */
+	for (i = 0, idx = 1; idx < SOLO_NR_P2M_DESC && sg && size > 0;
+	     i++, sg = sg_next(sg)) {
+		struct p2m_desc *desc = &pdesc[idx];
+		u32 sg_len = sg_dma_len(sg);
+		u32 len;
+
+		if (sg_off >= sg_len) {
+			sg_off -= sg_len;
+			continue;
+		}
+
+		sg_len -= sg_off;
+		len = min(sg_len, size);
+
+		solo_p2m_push_desc(desc, wr, sg_dma_address(sg) + sg_off,
+				   ext_addr, len, 0, 0);
+
+		size -= len;
+		ext_addr += len;
+		idx++;
+
+		sg_off = 0;
+	}
+
+	WARN_ON_ONCE(size || i >= SOLO_NR_P2M_DESC);
+
+	return solo_p2m_dma_desc(solo_dev, id, pdesc, idx);
 }
 
 #ifdef SOLO_TEST_P2M
@@ -147,13 +244,16 @@
 	return;
 }
 #else
-#define run_p2m_test(__solo)	do{}while(0)
+#define run_p2m_test(__solo)	do {} while (0)
 #endif
 
 void solo_p2m_isr(struct solo6010_dev *solo_dev, int id)
 {
+	struct solo_p2m_dev *p2m_dev = &solo_dev->p2m_dev[id];
+
 	solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_P2M(id));
-	complete(&solo_dev->p2m_dev[id].completion);
+
+	complete(&p2m_dev->completion);
 }
 
 void solo_p2m_error_isr(struct solo6010_dev *solo_dev, u32 status)
@@ -188,16 +288,14 @@
 	for (i = 0; i < SOLO_NR_P2M; i++) {
 		p2m_dev = &solo_dev->p2m_dev[i];
 
-		sema_init(&p2m_dev->sem, 1);
+		mutex_init(&p2m_dev->mutex);
 		init_completion(&p2m_dev->completion);
 
-		solo_reg_write(solo_dev, SOLO_P2M_DES_ADR(i),
-			       __pa(p2m_dev->desc));
-
 		solo_reg_write(solo_dev, SOLO_P2M_CONTROL(i), 0);
 		solo_reg_write(solo_dev, SOLO_P2M_CONFIG(i),
 			       SOLO_P2M_CSC_16BIT_565 |
-			       SOLO_P2M_DMA_INTERVAL(0) |
+			       SOLO_P2M_DMA_INTERVAL(3) |
+			       SOLO_P2M_DESC_INTR_OPT |
 			       SOLO_P2M_PCI_MASTER_MODE);
 		solo6010_irq_on(solo_dev, SOLO_IRQ_P2M(i));
 	}
diff --git a/drivers/staging/solo6x10/solo6010-tw28.c b/drivers/staging/solo6x10/solo6010-tw28.c
index 0159c83..905a6ad 100644
--- a/drivers/staging/solo6x10/solo6010-tw28.c
+++ b/drivers/staging/solo6x10/solo6010-tw28.c
@@ -35,107 +35,107 @@
 #define DEFAULT_VACTIVE_PAL		(312-DEFAULT_VDELAY_PAL)
 
 static u8 tbl_tw2864_template[] = {
-	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x00
+	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x00 */
 	0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x10
+	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x10 */
 	0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x20
+	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x20 */
 	0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, // 0x30
+	0x00, 0x00, 0x80, 0x10, 0x80, 0x80, 0x00, 0x02, /* 0x30 */
 	0x12, 0xf5, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x40
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x70
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00,
-	0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, // 0x80
+	0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, /* 0x80 */
 	0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00,
-	0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, // 0x90
+	0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, /* 0x90 */
 	0x00, 0x28, 0x44, 0x44, 0xa0, 0x88, 0x5a, 0x01,
-	0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, // 0xa0
+	0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, /* 0xa0 */
 	0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44,
-	0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, // 0xb0
+	0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, /* 0xb0 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
 	0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00,
-	0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, // 0xd0
+	0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, /* 0xd0 */
 	0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
-	0x10, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, // 0xe0
+	0x10, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */
 	0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
-	0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, // 0xf0
+	0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */
 	0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00,
 };
 
 static u8 tbl_tw2865_ntsc_template[] = {
-	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x00
+	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x00 */
 	0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x10
+	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x10 */
 	0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, // 0x20
+	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x20 */
 	0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, // 0x30
+	0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, /* 0x30 */
 	0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f,
-	0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, // 0x40
+	0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, /* 0x40 */
 	0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+	0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43,
-	0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, // 0x70
+	0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */
 	0xE9, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80,
-	0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, // 0x80
+	0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */
 	0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00,
-	0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, // 0x90
+	0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */
 	0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13,
-	0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, // 0xa0
+	0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, /* 0xa0 */
 	0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44,
-	0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, // 0xb0
+	0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */
 	0xFF, 0xE7, 0xE9, 0xE9, 0xEB, 0xFF, 0xD6, 0xD8,
-	0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+	0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
 	0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80,
-	0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, // 0xd0
+	0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */
 	0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
-	0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, // 0xe0
+	0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */
 	0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
-	0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, // 0xf0
+	0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */
 	0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0,
 };
 
 static u8 tbl_tw2865_pal_template[] = {
-	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x00
+	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x00 */
 	0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
-	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x10
+	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x10 */
 	0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
-	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x20
+	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x20 */
 	0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
-	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, // 0x30
+	0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x30 */
 	0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f,
-	0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, // 0x40
+	0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, /* 0x40 */
 	0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x50
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-	0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x60
+	0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43,
-	0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, // 0x70
+	0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */
 	0xEA, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80,
-	0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, // 0x80
+	0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */
 	0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00,
-	0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, // 0x90
+	0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */
 	0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13,
-	0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, // 0xa0
+	0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, /* 0xa0 */
 	0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44,
-	0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, // 0xb0
+	0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */
 	0xFF, 0xE7, 0xE9, 0xE9, 0xE9, 0xFF, 0xD7, 0xD8,
-	0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // 0xc0
+	0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
 	0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80,
-	0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, // 0xd0
+	0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */
 	0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81,
-	0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, // 0xe0
+	0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */
 	0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00,
-	0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, // 0xf0
+	0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, /* 0xf0 */
 	0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0,
 };
 
@@ -181,8 +181,8 @@
 		msleep_interruptible(1);
 	}
 
-//	printk("solo6010/tw28: Error writing register: %02x->%02x [%02x]\n",
-//		addr, off, val);
+/*	printk("solo6010/tw28: Error writing register: %02x->%02x [%02x]\n",
+		addr, off, val); */
 }
 
 static int tw2865_setup(struct solo6010_dev *solo_dev, u8 dev_addr)
@@ -217,7 +217,7 @@
 
 	for (i = 0; i < 0xff; i++) {
 		/* Skip read only registers */
-		if (i >= 0xb8 && i <= 0xc1 )
+		if (i >= 0xb8 && i <= 0xc1)
 			continue;
 		if ((i & ~0x30) == 0x00 ||
 		    (i & ~0x30) == 0x0c ||
@@ -302,7 +302,7 @@
 
 	for (i = 0; i < 0xff; i++) {
 		/* Skip read only registers */
-		if (i >= 0xb8 && i <= 0xc1 )
+		if (i >= 0xb8 && i <= 0xc1)
 			continue;
 		if ((i & ~0x30) == 0x00 ||
 		    (i & ~0x30) == 0x0c ||
@@ -334,13 +334,13 @@
 	};
 
 	u8 tbl_tw2815_sfr[] = {
-		0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, // 0x00
+		0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, /* 0x00 */
 		0x64, 0x80, 0x80, 0x82, 0x82, 0x00, 0x00, 0x00,
-		0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, // 0x10
+		0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, /* 0x10 */
 		0x00, 0x00, 0x00, 0xff, 0x8f, 0x00, 0x00, 0x00,
-		0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, // 0x20
+		0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, /* 0x20 */
 		0x31, 0x75, 0xb9, 0xfd, 0x00, 0x00, 0x88, 0x88,
-		0x88, 0x11, 0x00, 0x88, 0x88, 0x00,		// 0x30
+		0x88, 0x11, 0x00, 0x88, 0x88, 0x00,		/* 0x30 */
 	};
 	u8 *tbl_tw2815_common;
 	int i;
@@ -459,7 +459,7 @@
 
 		for (i = 0; i < 0x0f; i++) {
 			if (i == 0x00)
-				continue;	// read-only
+				continue;	/* read-only */
 			solo_i2c_writebyte(solo_dev, SOLO_I2C_TW,
 					   dev_addr, (ch * 0x10) + i,
 					   tbl_tw2815_common[i]);
@@ -597,7 +597,7 @@
 	return 0;
 }
 
-/* 
+/*
  * We accessed the video status signal in the Techwell chip through
  * iic/i2c because the video status reported by register REG_VI_STATUS1
  * (address 0x012C) of the SOLO6010 chip doesn't give the correct video
@@ -751,7 +751,7 @@
 		rval = tw_readbyte(solo_dev, chip_num,
 				   TW286x_BRIGHTNESS_ADDR(ch),
 				   TW_BRIGHTNESS_ADDR(ch));
-		if (is_tw286x(solo_dev, chip_num)) 
+		if (is_tw286x(solo_dev, chip_num))
 			*val = (s32)((char)rval) + 128;
 		else
 			*val = rval;
diff --git a/drivers/staging/solo6x10/solo6010-v4l2-enc.c b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
index 097e82b..7bbb940 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2-enc.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
@@ -24,7 +24,7 @@
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-common.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
 
 #include "solo6010.h"
 #include "solo6010-tw28.h"
@@ -47,13 +47,14 @@
 	struct videobuf_queue	vidq;
 	struct list_head	vidq_active;
 	struct task_struct	*kthread;
+	struct p2m_desc		desc[SOLO_NR_P2M_DESC];
 };
 
 static unsigned char vid_vop_header[] = {
 	0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x20,
 	0x02, 0x48, 0x05, 0xc0, 0x00, 0x40, 0x00, 0x40,
 	0x00, 0x40, 0x00, 0x80, 0x00, 0x97, 0x53, 0x04,
-	0x1f, 0x4c, 0x58, 0x10, 0x78, 0x51, 0x18, 0x3e,
+	0x1f, 0x4c, 0x58, 0x10, 0x78, 0x51, 0x18, 0x3f,
 };
 
 /*
@@ -151,6 +152,11 @@
 	else
 		solo_dev->motion_mask &= ~(1 << ch);
 
+	/* Do this regardless of if we are turning on or off */
+	solo_reg_write(solo_enc->solo_dev, SOLO_VI_MOT_CLEAR,
+		       1 << solo_enc->ch);
+	solo_enc->motion_detected = 0;
+
 	solo_reg_write(solo_dev, SOLO_VI_MOT_ADR,
 		       SOLO_VI_MOTION_EN(solo_dev->motion_mask) |
 		       (SOLO_MOTION_EXT_ADDR(solo_dev) >> 16));
@@ -184,7 +190,7 @@
 		solo_enc->bw_weight <<= 2;
 		break;
 	default:
-		WARN(1, "mode is unknown");
+		WARN(1, "mode is unknown\n");
 	}
 }
 
@@ -211,11 +217,6 @@
 			solo_dev->enc_bw_remain -= solo_enc->bw_weight;
 	}
 
-	fh->kthread = kthread_run(solo_enc_thread, fh, SOLO6010_NAME "_enc");
-
-	if (IS_ERR(fh->kthread))
-		return PTR_ERR(fh->kthread);
-
 	fh->enc_on = 1;
 	fh->rd_idx = solo_enc->solo_dev->enc_wr_idx;
 
@@ -279,6 +280,24 @@
 	solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(solo_enc->ch), 0);
 }
 
+static int solo_start_fh_thread(struct solo_enc_fh *fh)
+{
+	struct solo_enc_dev *solo_enc = fh->enc;
+
+	fh->kthread = kthread_run(solo_enc_thread, fh, SOLO6010_NAME "_enc");
+
+	/* Oops, we had a problem */
+	if (IS_ERR(fh->kthread)) {
+		spin_lock(&solo_enc->lock);
+		solo_enc_off(fh);
+		spin_unlock(&solo_enc->lock);
+
+		return PTR_ERR(fh->kthread);
+	}
+
+	return 0;
+}
+
 static void enc_reset_gop(struct solo6010_dev *solo_dev, u8 ch)
 {
 	BUG_ON(ch >= solo_dev->nr_chans);
@@ -299,22 +318,68 @@
 	return 0;
 }
 
-static int enc_get_mpeg_dma_t(struct solo6010_dev *solo_dev, dma_addr_t buf,
-			      unsigned int off, unsigned int size)
+static void enc_write_sg(struct scatterlist *sglist, void *buf, int size)
+{
+	struct scatterlist *sg;
+	u8 *src = buf;
+
+	for (sg = sglist; sg && size > 0; sg = sg_next(sg)) {
+		u8 *p = sg_virt(sg);
+		size_t len = sg_dma_len(sg);
+		int i;
+
+		for (i = 0; i < len && size; i++)
+			p[i] = *(src++);
+	}
+}
+
+static int enc_get_mpeg_dma_sg(struct solo6010_dev *solo_dev,
+			       struct p2m_desc *desc,
+			       struct scatterlist *sglist, int skip,
+			       unsigned int off, unsigned int size)
 {
 	int ret;
 
 	if (off > SOLO_MP4E_EXT_SIZE(solo_dev))
 		return -EINVAL;
 
-	if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev))
+	if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev)) {
+		return solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E,
+				       desc, 0, sglist, skip,
+				       SOLO_MP4E_EXT_ADDR(solo_dev) + off, size);
+	}
+
+	/* Buffer wrap */
+	ret = solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E, desc, 0,
+			      sglist, skip, SOLO_MP4E_EXT_ADDR(solo_dev) + off,
+			      SOLO_MP4E_EXT_SIZE(solo_dev) - off);
+
+	ret |= solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_MP4E, desc, 0,
+			       sglist, skip + SOLO_MP4E_EXT_SIZE(solo_dev) - off,
+			       SOLO_MP4E_EXT_ADDR(solo_dev),
+			       size + off - SOLO_MP4E_EXT_SIZE(solo_dev));
+
+	return ret;
+}
+
+static int enc_get_mpeg_dma_t(struct solo6010_dev *solo_dev,
+			      dma_addr_t buf, unsigned int off,
+			      unsigned int size)
+{
+	int ret;
+
+	if (off > SOLO_MP4E_EXT_SIZE(solo_dev))
+		return -EINVAL;
+
+	if (off + size <= SOLO_MP4E_EXT_SIZE(solo_dev)) {
 		return solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf,
 				      SOLO_MP4E_EXT_ADDR(solo_dev) + off, size);
+	}
 
 	/* Buffer wrap */
 	ret = solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0, buf,
-			    SOLO_MP4E_EXT_ADDR(solo_dev) + off,
-			    SOLO_MP4E_EXT_SIZE(solo_dev) - off);
+			     SOLO_MP4E_EXT_ADDR(solo_dev) + off,
+			     SOLO_MP4E_EXT_SIZE(solo_dev) - off);
 
 	ret |= solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_MP4E, 0,
 			      buf + SOLO_MP4E_EXT_SIZE(solo_dev) - off,
@@ -337,70 +402,108 @@
 	return ret;
 }
 
-static int enc_get_jpeg_dma(struct solo6010_dev *solo_dev, dma_addr_t buf,
-			    unsigned int off, unsigned int size)
+static int enc_get_jpeg_dma_sg(struct solo6010_dev *solo_dev,
+			       struct p2m_desc *desc,
+			       struct scatterlist *sglist, int skip,
+			       unsigned int off, unsigned int size)
 {
 	int ret;
 
 	if (off > SOLO_JPEG_EXT_SIZE(solo_dev))
 		return -EINVAL;
 
-	if (off + size <= SOLO_JPEG_EXT_SIZE(solo_dev))
-		return solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0, buf,
-				      SOLO_JPEG_EXT_ADDR(solo_dev) + off, size);
+	if (off + size <= SOLO_JPEG_EXT_SIZE(solo_dev)) {
+		return solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG,
+				       desc, 0, sglist, skip,
+				       SOLO_JPEG_EXT_ADDR(solo_dev) + off, size);
+	}
 
 	/* Buffer wrap */
-	ret = solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0, buf,
-			     SOLO_JPEG_EXT_ADDR(solo_dev) + off,
-			     SOLO_JPEG_EXT_SIZE(solo_dev) - off);
+	ret = solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG, desc, 0,
+			      sglist, skip, SOLO_JPEG_EXT_ADDR(solo_dev) + off,
+			      SOLO_JPEG_EXT_SIZE(solo_dev) - off);
 
-	ret |= solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_JPEG, 0,
-			      buf + SOLO_JPEG_EXT_SIZE(solo_dev) - off,
-			      SOLO_JPEG_EXT_ADDR(solo_dev),
-			      size + off - SOLO_JPEG_EXT_SIZE(solo_dev));
+	ret |= solo_p2m_dma_sg(solo_dev, SOLO_P2M_DMA_ID_JPEG, desc, 0,
+			       sglist, skip + SOLO_JPEG_EXT_SIZE(solo_dev) - off,
+			       SOLO_JPEG_EXT_ADDR(solo_dev),
+			       size + off - SOLO_JPEG_EXT_SIZE(solo_dev));
 
 	return ret;
 }
 
-static int solo_fill_jpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
-			  struct videobuf_buffer *vb, dma_addr_t vbuf)
+/* Returns true of __chk is within the first __range bytes of __off */
+#define OFF_IN_RANGE(__off, __range, __chk) \
+	((__off <= __chk) && ((__off + __range) >= __chk))
+
+static void solo_jpeg_header(struct solo_enc_dev *solo_enc,
+			     struct videobuf_dmabuf *vbuf)
 {
-	struct solo_enc_dev *solo_enc = fh->enc;
-	struct solo6010_dev *solo_dev = solo_enc->solo_dev;
-	u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+	struct scatterlist *sg;
+	void *src = jpeg_header;
+	size_t copied = 0;
+	size_t to_copy = sizeof(jpeg_header);
 
-	memcpy(p, jpeg_header, sizeof(jpeg_header));
-	p[SOF0_START + 5] = 0xff & (solo_enc->height >> 8);
-	p[SOF0_START + 6] = 0xff & solo_enc->height;
-	p[SOF0_START + 7] = 0xff & (solo_enc->width >> 8);
-	p[SOF0_START + 8] = 0xff & solo_enc->width;
+	for (sg = vbuf->sglist; sg && copied < to_copy; sg = sg_next(sg)) {
+		size_t this_copy = min(sg_dma_len(sg),
+				       (unsigned int)(to_copy - copied));
+		u8 *p = sg_virt(sg);
 
-	vbuf += sizeof(jpeg_header);
-	vb->size = enc_buf->jpeg_size + sizeof(jpeg_header);
+		memcpy(p, src + copied, this_copy);
 
-	return enc_get_jpeg_dma(solo_dev, vbuf, enc_buf->jpeg_off,
-				enc_buf->jpeg_size);
+		if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 5))
+			p[(SOF0_START + 5) - copied] =
+				0xff & (solo_enc->height >> 8);
+		if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 6))
+			p[(SOF0_START + 6) - copied] = 0xff & solo_enc->height;
+		if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 7))
+			p[(SOF0_START + 7) - copied] =
+				0xff & (solo_enc->width >> 8);
+		if (OFF_IN_RANGE(copied, this_copy, SOF0_START + 8))
+			p[(SOF0_START + 8) - copied] = 0xff & solo_enc->width;
+
+		copied += this_copy;
+	}
+}
+
+static int solo_fill_jpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
+			  struct videobuf_buffer *vb,
+			  struct videobuf_dmabuf *vbuf)
+{
+	struct solo6010_dev *solo_dev = fh->enc->solo_dev;
+	int size = enc_buf->jpeg_size;
+
+	/* Copy the header first (direct write) */
+	solo_jpeg_header(fh->enc, vbuf);
+
+	vb->size = size + sizeof(jpeg_header);
+
+	/* Grab the jpeg frame */
+	return enc_get_jpeg_dma_sg(solo_dev, fh->desc, vbuf->sglist,
+				   sizeof(jpeg_header),
+				   enc_buf->jpeg_off, size);
 }
 
 static int solo_fill_mpeg(struct solo_enc_fh *fh, struct solo_enc_buf *enc_buf,
-			  struct videobuf_buffer *vb, dma_addr_t vbuf)
+			  struct videobuf_buffer *vb,
+			  struct videobuf_dmabuf *vbuf)
 {
 	struct solo_enc_dev *solo_enc = fh->enc;
 	struct solo6010_dev *solo_dev = solo_enc->solo_dev;
 	struct vop_header vh;
 	int ret;
 	int frame_size, frame_off;
+	int skip = 0;
 
 	if (WARN_ON_ONCE(enc_buf->size <= sizeof(vh)))
-		return -1;
+		return -EINVAL;
 
 	/* First get the hardware vop header (not real mpeg) */
 	ret = enc_get_mpeg_dma(solo_dev, &vh, enc_buf->off, sizeof(vh));
-	if (ret)
-		return -1;
+	if (WARN_ON_ONCE(ret))
+		return ret;
 
 	if (WARN_ON_ONCE(vh.size > enc_buf->size))
-		return -1;
+		return -EINVAL;
 
 	vb->width = vh.hsize << 4;
 	vb->height = vh.vsize << 4;
@@ -410,9 +513,9 @@
 	if (!enc_buf->vop) {
 		u16 fps = solo_dev->fps * 1000;
 		u16 interval = solo_enc->interval * 1000;
-		u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
+		u8 p[sizeof(vid_vop_header)];
 
-		memcpy(p, vid_vop_header, sizeof(vid_vop_header));
+		memcpy(p, vid_vop_header, sizeof(p));
 
 		if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
 			p[10] |= ((XVID_PAR_43_NTSC << 3) & 0x78);
@@ -434,43 +537,49 @@
 		if (vh.interlace)
 			p[29] |= 0x20;
 
+		enc_write_sg(vbuf->sglist, p, sizeof(p));
+
 		/* Adjust the dma buffer past this header */
 		vb->size += sizeof(vid_vop_header);
-		vbuf += sizeof(vid_vop_header);
+		skip = sizeof(vid_vop_header);
 	}
 
 	/* Now get the actual mpeg payload */
 	frame_off = (enc_buf->off + sizeof(vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
 	frame_size = enc_buf->size - sizeof(vh);
-	ret = enc_get_mpeg_dma_t(solo_dev, vbuf, frame_off, frame_size);
-	if (WARN_ON_ONCE(ret))
-		return -1;
 
-	return 0;
+	ret = enc_get_mpeg_dma_sg(solo_dev, fh->desc, vbuf->sglist,
+				  skip, frame_off, frame_size);
+	WARN_ON_ONCE(ret);
+
+	return ret;
 }
 
-/* On successful return (0), leaves solo_enc->lock unlocked */
-static int solo_enc_fillbuf(struct solo_enc_fh *fh,
+static void solo_enc_fillbuf(struct solo_enc_fh *fh,
 			    struct videobuf_buffer *vb)
 {
 	struct solo_enc_dev *solo_enc = fh->enc;
 	struct solo6010_dev *solo_dev = solo_enc->solo_dev;
 	struct solo_enc_buf *enc_buf = NULL;
-	dma_addr_t vbuf;
+	struct videobuf_dmabuf *vbuf;
 	int ret;
+	int error = 1;
 	u16 idx = fh->rd_idx;
 
 	while (idx != solo_dev->enc_wr_idx) {
 		struct solo_enc_buf *ebuf = &solo_dev->enc_buf[idx];
+
 		idx = (idx + 1) % SOLO_NR_RING_BUFS;
+
+		if (ebuf->ch != solo_enc->ch)
+			continue;
+
 		if (fh->fmt == V4L2_PIX_FMT_MPEG) {
-			if (fh->type != ebuf->type)
-				continue;
-			if (ebuf->ch == solo_enc->ch) {
+			if (fh->type == ebuf->type) {
 				enc_buf = ebuf;
 				break;
 			}
-		} else if (ebuf->ch == solo_enc->ch) {
+		} else {
 			/* For mjpeg, keep reading to the newest frame */
 			enc_buf = ebuf;
 		}
@@ -478,48 +587,55 @@
 
 	fh->rd_idx = idx;
 
-	if (!enc_buf)
-		return -1;
+	if (WARN_ON_ONCE(!enc_buf))
+		goto buf_err;
 
 	if ((fh->fmt == V4L2_PIX_FMT_MPEG &&
 	     vb->bsize < enc_buf->size) ||
 	    (fh->fmt == V4L2_PIX_FMT_MJPEG &&
 	     vb->bsize < (enc_buf->jpeg_size + sizeof(jpeg_header)))) {
-		return -1;
+		WARN_ON_ONCE(1);
+		goto buf_err;
 	}
 
-	if (!(vbuf = videobuf_to_dma_contig(vb)))
-		return -1;
-
-	/* Is it ok that we mess with this buffer out of lock? */
-	spin_unlock(&solo_enc->lock);
+	vbuf = videobuf_to_dma(vb);
+	if (WARN_ON_ONCE(!vbuf))
+		goto buf_err;
 
 	if (fh->fmt == V4L2_PIX_FMT_MPEG)
 		ret = solo_fill_mpeg(fh, enc_buf, vb, vbuf);
 	else
 		ret = solo_fill_jpeg(fh, enc_buf, vb, vbuf);
 
-	if (ret) // Ignore failures
-		return 0;
+	if (!ret)
+		error = 0;
 
-	list_del(&vb->queue);
-	vb->field_count++;
-	vb->ts = enc_buf->ts;
-	vb->state = VIDEOBUF_DONE;
+buf_err:
+	if (error) {
+		vb->state = VIDEOBUF_ERROR;
+	} else {
+		vb->field_count++;
+		vb->ts = enc_buf->ts;
+		vb->state = VIDEOBUF_DONE;
+	}
 
 	wake_up(&vb->done);
 
-	return 0;
+	return;
 }
 
 static void solo_enc_thread_try(struct solo_enc_fh *fh)
 {
 	struct solo_enc_dev *solo_enc = fh->enc;
+	struct solo6010_dev *solo_dev = solo_enc->solo_dev;
 	struct videobuf_buffer *vb;
 
 	for (;;) {
 		spin_lock(&solo_enc->lock);
 
+		if (fh->rd_idx == solo_dev->enc_wr_idx)
+			break;
+
 		if (list_empty(&fh->vidq_active))
 			break;
 
@@ -529,9 +645,11 @@
 		if (!waitqueue_active(&vb->done))
 			break;
 
-		/* On success, returns with solo_enc->lock unlocked */
-		if (solo_enc_fillbuf(fh, vb))
-			break;
+		list_del(&vb->queue);
+
+		spin_unlock(&solo_enc->lock);
+
+		solo_enc_fillbuf(fh, vb);
 	}
 
 	assert_spin_locked(&solo_enc->lock);
@@ -557,7 +675,7 @@
 
 	remove_wait_queue(&solo_enc->thread_wait, &wait);
 
-        return 0;
+	return 0;
 }
 
 void solo_motion_isr(struct solo6010_dev *solo_dev)
@@ -614,7 +732,8 @@
 		jpeg_next = solo_reg_read(solo_dev,
 					SOLO_VE_JPEG_QUE(solo_dev->enc_idx));
 
-		if ((ch = (mpeg_current >> 24) & 0x1f) >= SOLO_MAX_CHANNELS) {
+		ch = (mpeg_current >> 24) & 0x1f;
+		if (ch >= SOLO_MAX_CHANNELS) {
 			ch -= SOLO_MAX_CHANNELS;
 			enc_type = SOLO_ENC_TYPE_EXT;
 		} else
@@ -669,12 +788,12 @@
 static int solo_enc_buf_setup(struct videobuf_queue *vq, unsigned int *count,
 			      unsigned int *size)
 {
-        *size = FRAME_BUF_SIZE;
+	*size = FRAME_BUF_SIZE;
 
-        if (*count < MIN_VID_BUFFERS)
+	if (*count < MIN_VID_BUFFERS)
 		*count = MIN_VID_BUFFERS;
 
-        return 0;
+	return 0;
 }
 
 static int solo_enc_buf_prepare(struct videobuf_queue *vq,
@@ -696,7 +815,9 @@
 	if (vb->state == VIDEOBUF_NEEDS_INIT) {
 		int rc = videobuf_iolock(vq, vb, NULL);
 		if (rc < 0) {
-			videobuf_dma_contig_free(vq, vb);
+			struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+			videobuf_dma_unmap(vq->dev, dma);
+			videobuf_dma_free(dma);
 			vb->state = VIDEOBUF_NEEDS_INIT;
 			return rc;
 		}
@@ -719,7 +840,10 @@
 static void solo_enc_buf_release(struct videobuf_queue *vq,
 				 struct videobuf_buffer *vb)
 {
-	videobuf_dma_contig_free(vq, vb);
+	struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+	videobuf_dma_unmap(vq->dev, dma);
+	videobuf_dma_free(dma);
 	vb->state = VIDEOBUF_NEEDS_INIT;
 }
 
@@ -750,25 +874,22 @@
 	struct solo_enc_dev *solo_enc = video_drvdata(file);
 	struct solo_enc_fh *fh;
 
-	if ((fh = kzalloc(sizeof(*fh), GFP_KERNEL)) == NULL)
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (fh == NULL)
 		return -ENOMEM;
 
-	spin_lock(&solo_enc->lock);
-
 	fh->enc = solo_enc;
 	file->private_data = fh;
 	INIT_LIST_HEAD(&fh->vidq_active);
 	fh->fmt = V4L2_PIX_FMT_MPEG;
 	fh->type = SOLO_ENC_TYPE_STD;
 
-	videobuf_queue_dma_contig_init(&fh->vidq, &solo_enc_video_qops,
-				    &solo_enc->solo_dev->pdev->dev,
-				    &solo_enc->lock,
-				    V4L2_BUF_TYPE_VIDEO_CAPTURE,
-				    V4L2_FIELD_INTERLACED,
-				    sizeof(struct videobuf_buffer), fh, NULL);
-
-	spin_unlock(&solo_enc->lock);
+	videobuf_queue_sg_init(&fh->vidq, &solo_enc_video_qops,
+			       &solo_enc->solo_dev->pdev->dev,
+			       &solo_enc->lock,
+			       V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			       V4L2_FIELD_INTERLACED,
+			       sizeof(struct videobuf_buffer), fh, NULL);
 
 	return 0;
 }
@@ -785,7 +906,11 @@
 
 		spin_lock(&solo_enc->lock);
 		ret = solo_enc_on(fh);
-	        spin_unlock(&solo_enc->lock);
+		spin_unlock(&solo_enc->lock);
+		if (ret)
+			return ret;
+
+		ret = solo_start_fh_thread(fh);
 		if (ret)
 			return ret;
 	}
@@ -797,10 +922,15 @@
 static int solo_enc_release(struct file *file)
 {
 	struct solo_enc_fh *fh = file->private_data;
+	struct solo_enc_dev *solo_enc = fh->enc;
 
 	videobuf_stop(&fh->vidq);
 	videobuf_mmap_free(&fh->vidq);
+
+	spin_lock(&solo_enc->lock);
 	solo_enc_off(fh);
+	spin_unlock(&solo_enc->lock);
+
 	kfree(fh);
 
 	return 0;
@@ -842,7 +972,7 @@
 	if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
 		input->std = V4L2_STD_NTSC_M;
 	else
-		input->std = V4L2_STD_PAL_M;
+		input->std = V4L2_STD_PAL_B;
 
 	if (!tw28_get_video_status(solo_dev, solo_enc->ch))
 		input->status = V4L2_IN_ST_NO_SIGNAL;
@@ -915,9 +1045,8 @@
 
 	if (pix->field == V4L2_FIELD_ANY)
 		pix->field = V4L2_FIELD_INTERLACED;
-	else if (pix->field != V4L2_FIELD_INTERLACED) {
+	else if (pix->field != V4L2_FIELD_INTERLACED)
 		pix->field = V4L2_FIELD_INTERLACED;
-	}
 
 	/* Just set these */
 	pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -937,7 +1066,8 @@
 
 	spin_lock(&solo_enc->lock);
 
-	if ((ret = solo_enc_try_fmt_cap(file, priv, f))) {
+	ret = solo_enc_try_fmt_cap(file, priv, f);
+	if (ret) {
 		spin_unlock(&solo_enc->lock);
 		return ret;
 	}
@@ -956,7 +1086,10 @@
 
 	spin_unlock(&solo_enc->lock);
 
-	return ret;
+	if (ret)
+		return ret;
+
+	return solo_start_fh_thread(fh);
 }
 
 static int solo_enc_get_fmt_cap(struct file *file, void *priv,
@@ -977,7 +1110,7 @@
 	return 0;
 }
 
-static int solo_enc_reqbufs(struct file *file, void *priv, 
+static int solo_enc_reqbufs(struct file *file, void *priv,
 			    struct v4l2_requestbuffers *req)
 {
 	struct solo_enc_fh *fh = priv;
@@ -1014,6 +1147,10 @@
 		spin_unlock(&solo_enc->lock);
 		if (ret)
 			return ret;
+
+		ret = solo_start_fh_thread(fh);
+		if (ret)
+			return ret;
 	}
 
 	ret = videobuf_dqbuf(&fh->vidq, buf, file->f_flags & O_NONBLOCK);
@@ -1033,12 +1170,16 @@
 
 	/* Check for key frame on mpeg data */
 	if (fh->fmt == V4L2_PIX_FMT_MPEG) {
-		struct videobuf_buffer *vb = fh->vidq.bufs[buf->index];
-		u8 *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
-		if (p[3] == 0x00)
-			buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
-		else
-			buf->flags |= V4L2_BUF_FLAG_PFRAME;
+		struct videobuf_dmabuf *vbuf =
+				videobuf_to_dma(fh->vidq.bufs[buf->index]);
+
+		if (vbuf) {
+			u8 *p = sg_virt(vbuf->sglist);
+			if (p[3] == 0x00)
+				buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
+			else
+				buf->flags |= V4L2_BUF_FLAG_PFRAME;
+		}
 	}
 
 	return 0;
@@ -1136,7 +1277,7 @@
 	/* XXX: Shouldn't we be able to get/set this from videobuf? */
 	cp->readbuffers = 2;
 
-        return 0;
+	return 0;
 }
 
 static int solo_s_parm(struct file *file, void *priv,
@@ -1176,7 +1317,7 @@
 
 	spin_unlock(&solo_enc->lock);
 
-        return 0;
+	return 0;
 }
 
 static int solo_queryctrl(struct file *file, void *priv,
@@ -1240,7 +1381,7 @@
 		return 0;
 	}
 
-        return -EINVAL;
+	return -EINVAL;
 }
 
 static int solo_querymenu(struct file *file, void *priv,
@@ -1250,7 +1391,8 @@
 	int err;
 
 	qctrl.id = qmenu->id;
-	if ((err = solo_queryctrl(file, priv, &qctrl)))
+	err = solo_queryctrl(file, priv, &qctrl);
+	if (err)
 		return err;
 
 	return v4l2_ctrl_query_menu(qmenu, &qctrl, NULL);
@@ -1350,9 +1492,9 @@
 		switch (ctrl->id) {
 		case V4L2_CID_RDS_TX_RADIO_TEXT:
 			if (ctrl->size - 1 > OSD_TEXT_MAX)
-                                err = -ERANGE;
+				err = -ERANGE;
 			else {
-                        	err = copy_from_user(solo_enc->osd_text,
+				err = copy_from_user(solo_enc->osd_text,
 						     ctrl->string,
 						     OSD_TEXT_MAX);
 				solo_enc->osd_text[OSD_TEXT_MAX] = '\0';
@@ -1459,7 +1601,7 @@
 	.minor			= -1,
 	.release		= video_device_release,
 
-	.tvnorms		= V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
+	.tvnorms		= V4L2_STD_NTSC_M | V4L2_STD_PAL_B,
 	.current_norm		= V4L2_STD_NTSC_M,
 };
 
@@ -1505,7 +1647,7 @@
 	atomic_set(&solo_enc->readers, 0);
 
 	solo_enc->qp = SOLO_DEFAULT_QP;
-        solo_enc->gop = solo_dev->fps;
+	solo_enc->gop = solo_dev->fps;
 	solo_enc->interval = 1;
 	solo_enc->mode = SOLO_ENC_MODE_CIF;
 	solo_enc->motion_thresh = SOLO_DEF_MOT_THRESH;
diff --git a/drivers/staging/solo6x10/solo6010-v4l2.c b/drivers/staging/solo6x10/solo6010-v4l2.c
index 6ffd21d..a8491dc 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2.c
@@ -24,14 +24,13 @@
 
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-common.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf-dma-sg.h>
 
 #include "solo6010.h"
 #include "solo6010-tw28.h"
 
 #define SOLO_HW_BPL		2048
 #define SOLO_DISP_PIX_FIELD	V4L2_FIELD_INTERLACED
-#define SOLO_DISP_BUF_SIZE	(64 * 1024) // 64k
 
 /* Image size is two fields, SOLO_HW_BPL is one horizontal line */
 #define solo_vlines(__solo)	(__solo->video_vsize * 2)
@@ -49,6 +48,8 @@
 	spinlock_t		slock;
 	int			old_write;
 	struct list_head	vidq_active;
+	struct p2m_desc		desc[SOLO_NR_P2M_DESC];
+	int			desc_idx;
 };
 
 unsigned video_nr = -1;
@@ -96,7 +97,7 @@
 		       SOLO_VI_WIN_EX(ex) |
 		       SOLO_VI_WIN_SCALE(scale));
 
-        solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch),
+	solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch),
 		       SOLO_VI_WIN_SY(sy) |
 		       SOLO_VI_WIN_EY(ey));
 }
@@ -203,50 +204,149 @@
 	return 0;
 }
 
+static void disp_reset_desc(struct solo_filehandle *fh)
+{
+	/* We use desc mode, which ignores desc 0 */
+	memset(fh->desc, 0, sizeof(*fh->desc));
+	fh->desc_idx = 1;
+}
+
+static int disp_flush_descs(struct solo_filehandle *fh)
+{
+	int ret;
+
+	if (!fh->desc_idx)
+		return 0;
+
+	ret = solo_p2m_dma_desc(fh->solo_dev, SOLO_P2M_DMA_ID_DISP,
+				fh->desc, fh->desc_idx);
+	disp_reset_desc(fh);
+
+	return ret;
+}
+
+static int disp_push_desc(struct solo_filehandle *fh, dma_addr_t dma_addr,
+		      u32 ext_addr, int size, int repeat, int ext_size)
+{
+	if (fh->desc_idx >= SOLO_NR_P2M_DESC) {
+		int ret = disp_flush_descs(fh);
+		if (ret)
+			return ret;
+	}
+
+	solo_p2m_push_desc(&fh->desc[fh->desc_idx], 0, dma_addr, ext_addr,
+			   size, repeat, ext_size);
+	fh->desc_idx++;
+
+	return 0;
+}
+
 static void solo_fillbuf(struct solo_filehandle *fh,
 			 struct videobuf_buffer *vb)
 {
 	struct solo6010_dev *solo_dev = fh->solo_dev;
-	dma_addr_t vbuf;
+	struct videobuf_dmabuf *vbuf;
 	unsigned int fdma_addr;
-	int frame_size;
 	int error = 1;
 	int i;
+	struct scatterlist *sg;
+	dma_addr_t sg_dma;
+	int sg_size_left;
 
-	if (!(vbuf = videobuf_to_dma_contig(vb)))
+	vbuf = videobuf_to_dma(vb);
+	if (!vbuf)
 		goto finish_buf;
 
 	if (erase_off(solo_dev)) {
-		void *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
-		int image_size = solo_image_size(solo_dev);
-		for (i = 0; i < image_size; i += 2) {
-			((u8 *)p)[i] = 0x80;
-			((u8 *)p)[i + 1] = 0x00;
+		int i;
+
+		/* Just blit to the entire sg list, ignoring size */
+		for_each_sg(vbuf->sglist, sg, vbuf->sglen, i) {
+			void *p = sg_virt(sg);
+			size_t len = sg_dma_len(sg);
+
+			for (i = 0; i < len; i += 2) {
+				((u8 *)p)[i] = 0x80;
+				((u8 *)p)[i + 1] = 0x00;
+			}
 		}
+
 		error = 0;
 		goto finish_buf;
 	}
 
-	frame_size = SOLO_HW_BPL * solo_vlines(solo_dev);
-	fdma_addr = SOLO_DISP_EXT_ADDR(solo_dev) + (fh->old_write * frame_size);
+	disp_reset_desc(fh);
+	sg = vbuf->sglist;
+	sg_dma = sg_dma_address(sg);
+	sg_size_left = sg_dma_len(sg);
 
-	for (i = 0; i < frame_size / SOLO_DISP_BUF_SIZE; i++) {
-		int j;
-		for (j = 0; j < (SOLO_DISP_BUF_SIZE / SOLO_HW_BPL); j++) {
-			if (solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_DISP, 0,
-					   vbuf, fdma_addr + (j * SOLO_HW_BPL),
-					   solo_bytesperline(solo_dev)))
+	fdma_addr = SOLO_DISP_EXT_ADDR(solo_dev) + (fh->old_write *
+			(SOLO_HW_BPL * solo_vlines(solo_dev)));
+
+	for (i = 0; i < solo_vlines(solo_dev); i++) {
+		int line_len = solo_bytesperline(solo_dev);
+		int lines;
+
+		if (!sg_size_left) {
+			sg = sg_next(sg);
+			if (sg == NULL)
 				goto finish_buf;
-			vbuf += solo_bytesperline(solo_dev);
+			sg_dma = sg_dma_address(sg);
+			sg_size_left = sg_dma_len(sg);
 		}
-		fdma_addr += SOLO_DISP_BUF_SIZE;
+
+		/* No room for an entire line, so chunk it up */
+		if (sg_size_left < line_len) {
+			int this_addr = fdma_addr;
+
+			while (line_len > 0) {
+				int this_write;
+
+				if (!sg_size_left) {
+					sg = sg_next(sg);
+					if (sg == NULL)
+						goto finish_buf;
+					sg_dma = sg_dma_address(sg);
+					sg_size_left = sg_dma_len(sg);
+				}
+
+				this_write = min(sg_size_left, line_len);
+
+				if (disp_push_desc(fh, sg_dma, this_addr,
+						   this_write, 0, 0))
+					goto finish_buf;
+
+				line_len -= this_write;
+				sg_size_left -= this_write;
+				sg_dma += this_write;
+				this_addr += this_write;
+			}
+
+			fdma_addr += SOLO_HW_BPL;
+			continue;
+		}
+
+		/* Shove as many lines into a repeating descriptor as possible */
+		lines = min(sg_size_left / line_len,
+			    solo_vlines(solo_dev) - i);
+
+		if (disp_push_desc(fh, sg_dma, fdma_addr, line_len,
+				   lines - 1, SOLO_HW_BPL))
+			goto finish_buf;
+
+		i += lines - 1;
+		fdma_addr += SOLO_HW_BPL * lines;
+		sg_dma += lines * line_len;
+		sg_size_left -= lines * line_len;
 	}
-	error = 0;
+
+	error = disp_flush_descs(fh);
 
 finish_buf:
 	if (error) {
 		vb->state = VIDEOBUF_ERROR;
 	} else {
+		vb->size = solo_vlines(solo_dev) * solo_bytesperline(solo_dev);
 		vb->state = VIDEOBUF_DONE;
 		vb->field_count++;
 		do_gettimeofday(&vb->ts);
@@ -275,7 +375,7 @@
 			break;
 
 		cur_write = SOLO_VI_STATUS0_PAGE(solo_reg_read(fh->solo_dev,
-							SOLO_VI_STATUS0));
+						 SOLO_VI_STATUS0));
 		if (cur_write == fh->old_write)
 			break;
 
@@ -310,7 +410,7 @@
 
 	remove_wait_queue(&solo_dev->disp_thread_wait, &wait);
 
-        return 0;
+	return 0;
 }
 
 static int solo_start_thread(struct solo_filehandle *fh)
@@ -337,12 +437,12 @@
 	struct solo_filehandle *fh = vq->priv_data;
 	struct solo6010_dev *solo_dev  = fh->solo_dev;
 
-        *size = solo_image_size(solo_dev);
+	*size = solo_image_size(solo_dev);
 
-        if (*count < MIN_VID_BUFFERS)
+	if (*count < MIN_VID_BUFFERS)
 		*count = MIN_VID_BUFFERS;
 
-        return 0;
+	return 0;
 }
 
 static int solo_buf_prepare(struct videobuf_queue *vq,
@@ -364,7 +464,9 @@
 	if (vb->state == VIDEOBUF_NEEDS_INIT) {
 		int rc = videobuf_iolock(vq, vb, NULL);
 		if (rc < 0) {
-			videobuf_dma_contig_free(vq, vb);
+			struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+			videobuf_dma_unmap(vq->dev, dma);
+			videobuf_dma_free(dma);
 			vb->state = VIDEOBUF_NEEDS_INIT;
 			return rc;
 		}
@@ -388,7 +490,10 @@
 static void solo_buf_release(struct videobuf_queue *vq,
 			     struct videobuf_buffer *vb)
 {
-	videobuf_dma_contig_free(vq, vb);
+	struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
+
+	videobuf_dma_unmap(vq->dev, dma);
+	videobuf_dma_free(dma);
 	vb->state = VIDEOBUF_NEEDS_INIT;
 }
 
@@ -404,7 +509,7 @@
 {
 	struct solo_filehandle *fh = file->private_data;
 
-        return videobuf_poll_stream(file, &fh->vidq, wait);
+	return videobuf_poll_stream(file, &fh->vidq, wait);
 }
 
 static int solo_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
@@ -420,7 +525,8 @@
 	struct solo_filehandle *fh;
 	int ret;
 
-	if ((fh = kzalloc(sizeof(*fh), GFP_KERNEL)) == NULL)
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (fh == NULL)
 		return -ENOMEM;
 
 	spin_lock_init(&fh->slock);
@@ -428,16 +534,17 @@
 	fh->solo_dev = solo_dev;
 	file->private_data = fh;
 
-	if ((ret = solo_start_thread(fh))) {
+	ret = solo_start_thread(fh);
+	if (ret) {
 		kfree(fh);
 		return ret;
 	}
 
-	videobuf_queue_dma_contig_init(&fh->vidq, &solo_video_qops,
-				    &solo_dev->pdev->dev, &fh->slock,
-				    V4L2_BUF_TYPE_VIDEO_CAPTURE,
-				    SOLO_DISP_PIX_FIELD,
-				    sizeof(struct videobuf_buffer), fh, NULL);
+	videobuf_queue_sg_init(&fh->vidq, &solo_video_qops,
+			       &solo_dev->pdev->dev, &fh->slock,
+			       V4L2_BUF_TYPE_VIDEO_CAPTURE,
+			       SOLO_DISP_PIX_FIELD,
+			       sizeof(struct videobuf_buffer), fh, NULL);
 
 	return 0;
 }
@@ -530,7 +637,7 @@
 	if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
 		input->std = V4L2_STD_NTSC_M;
 	else
-		input->std = V4L2_STD_PAL_M;
+		input->std = V4L2_STD_PAL_B;
 
 	return 0;
 }
@@ -622,7 +729,7 @@
 	return 0;
 }
 
-static int solo_reqbufs(struct file *file, void *priv, 
+static int solo_reqbufs(struct file *file, void *priv,
 			struct v4l2_requestbuffers *req)
 {
 	struct solo_filehandle *fh = priv;
@@ -781,11 +888,11 @@
 	.vidioc_qbuf			= solo_qbuf,
 	.vidioc_dqbuf			= solo_dqbuf,
 	.vidioc_streamon		= solo_streamon,
-        .vidioc_streamoff		= solo_streamoff,
+	.vidioc_streamoff		= solo_streamoff,
 	/* Controls */
 	.vidioc_queryctrl		= solo_disp_queryctrl,
-        .vidioc_g_ctrl			= solo_disp_g_ctrl,
-        .vidioc_s_ctrl			= solo_disp_s_ctrl,
+	.vidioc_g_ctrl			= solo_disp_g_ctrl,
+	.vidioc_s_ctrl			= solo_disp_s_ctrl,
 };
 
 static struct video_device solo_v4l2_template = {
@@ -795,7 +902,7 @@
 	.minor			= -1,
 	.release		= video_device_release,
 
-	.tvnorms		= V4L2_STD_NTSC_M | V4L2_STD_PAL_M,
+	.tvnorms		= V4L2_STD_NTSC_M | V4L2_STD_PAL_B,
 	.current_norm		= V4L2_STD_NTSC_M,
 };
 
@@ -836,13 +943,13 @@
 	for (i = 0; i < solo_dev->nr_chans; i++) {
 		solo_v4l2_set_ch(solo_dev, i);
 		while (erase_off(solo_dev))
-			;// Do nothing
+			;/* Do nothing */
 	}
 
 	/* Set the default display channel */
 	solo_v4l2_set_ch(solo_dev, 0);
 	while (erase_off(solo_dev))
-		;// Do nothing
+		;/* Do nothing */
 
 	solo6010_irq_on(solo_dev, SOLO_IRQ_VIDEO_IN);
 
diff --git a/drivers/staging/solo6x10/solo6010.h b/drivers/staging/solo6x10/solo6010.h
index dca8e3e..9c930f3 100644
--- a/drivers/staging/solo6x10/solo6010.h
+++ b/drivers/staging/solo6x10/solo6010.h
@@ -26,8 +26,8 @@
 #include <linux/semaphore.h>
 #include <linux/mutex.h>
 #include <linux/list.h>
-#include <linux/delay.h>
 #include <linux/wait.h>
+#include <linux/delay.h>
 #include <asm/io.h>
 #include <asm/atomic.h>
 
@@ -48,10 +48,14 @@
 #define PCI_DEVICE_ID_NEUSOLO_4		0x4304
 #define PCI_DEVICE_ID_NEUSOLO_9		0x4309
 #define PCI_DEVICE_ID_NEUSOLO_16	0x4310
-/* Commell Softlogic 6010 based cards */
-#define PCI_DEVICE_ID_COMMSOLO_4	0x4E04
-#define PCI_DEVICE_ID_COMMSOLO_9	0x4E09
-#define PCI_DEVICE_ID_COMMSOLO_16	0x4E10
+/* Bluecherry Softlogic 6010 based cards */
+#define PCI_DEVICE_ID_BC_SOLO_4		0x4E04
+#define PCI_DEVICE_ID_BC_SOLO_9		0x4E09
+#define PCI_DEVICE_ID_BC_SOLO_16	0x4E10
+/* Bluecherry Softlogic 6110 based cards */
+#define PCI_DEVICE_ID_BC_6110_4		0x5304
+#define PCI_DEVICE_ID_BC_6110_8		0x5308
+#define PCI_DEVICE_ID_BC_6110_16	0x5310
 #endif /* Bluecherry */
 
 #define SOLO6010_NAME			"solo6010"
@@ -64,7 +68,7 @@
 #define SOLO6010_VER_MINOR		0
 #define SOLO6010_VER_SUB		0
 #define SOLO6010_VER_NUM \
-    KERNEL_VERSION(SOLO6010_VER_MAJOR, SOLO6010_VER_MINOR, SOLO6010_VER_SUB)
+	KERNEL_VERSION(SOLO6010_VER_MAJOR, SOLO6010_VER_MINOR, SOLO6010_VER_SUB)
 
 /*
  * The SOLO6010 actually has 8 i2c channels, but we only use 2.
@@ -78,7 +82,6 @@
 /* DMA Engine setup */
 #define SOLO_NR_P2M			4
 #define SOLO_NR_P2M_DESC		256
-#define SOLO_P2M_DESC_SIZE		(SOLO_NR_P2M_DESC * 16)
 /* MPEG and JPEG share the same interrupt and locks so they must be together
  * in the same dma channel. */
 #define SOLO_P2M_DMA_ID_MP4E		0
@@ -123,11 +126,17 @@
 	IIC_STATE_STOP
 };
 
+struct p2m_desc {
+	u32 ctrl;
+	u32 ext;
+	u32 ta;
+	u32 fa;
+};
+
 struct solo_p2m_dev {
-	struct semaphore	sem;
+	struct mutex		mutex;
 	struct completion	completion;
 	int			error;
-	u8			desc[SOLO_P2M_DESC_SIZE];
 };
 
 #define OSD_TEXT_MAX		30
@@ -185,7 +194,7 @@
 	/* i2c related items */
 	struct i2c_adapter	i2c_adap[SOLO_I2C_ADAPTERS];
 	enum SOLO_I2C_STATE	i2c_state;
-	struct semaphore	i2c_sem;
+	struct mutex		i2c_mutex;
 	int			i2c_id;
 	wait_queue_head_t	i2c_wait;
 	struct i2c_msg		*i2c_msg;
@@ -212,7 +221,7 @@
 	struct solo_enc_buf	enc_buf[SOLO_NR_RING_BUFS];
 
 	/* Current video settings */
-	u32 			video_type;
+	u32			video_type;
 	u16			video_hsize, video_vsize;
 	u16			vout_hstart, vout_vstart;
 	u16			vin_hstart, vin_vstart;
@@ -306,6 +315,14 @@
 		   dma_addr_t dma_addr, u32 ext_addr, u32 size);
 int solo_p2m_dma(struct solo6010_dev *solo_dev, u8 id, int wr,
 		 void *sys_addr, u32 ext_addr, u32 size);
+int solo_p2m_dma_sg(struct solo6010_dev *solo_dev, u8 id,
+		    struct p2m_desc *pdesc, int wr,
+		    struct scatterlist *sglist, u32 sg_off,
+		    u32 ext_addr, u32 size);
+void solo_p2m_push_desc(struct p2m_desc *desc, int wr, dma_addr_t dma_addr,
+			u32 ext_addr, u32 size, int repeat, u32 ext_size);
+int solo_p2m_dma_desc(struct solo6010_dev *solo_dev, u8 id,
+		      struct p2m_desc *desc, int desc_count);
 
 /* Set the threshold for motion detection */
 void solo_set_motion_threshold(struct solo6010_dev *solo_dev, u8 ch, u16 val);
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 65b2311..1b34a87 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -78,10 +78,10 @@
 	/* don't change CPU */
 	preempt_disable();
 
-	__get_cpu_var(reporting_keystroke) = true;
+	__this_cpu_write(reporting_keystroke, true);
 	input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
 	input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
-	__get_cpu_var(reporting_keystroke) = false;
+	__this_cpu_write(reporting_keystroke, false);
 
 	/* reenable preemption */
 	preempt_enable();
@@ -95,10 +95,5 @@
 	 */
 bool speakup_fake_key_pressed(void)
 {
-	bool is_pressed;
-
-	is_pressed = get_cpu_var(reporting_keystroke);
-	put_cpu_var(reporting_keystroke);
-
-	return is_pressed;
+	return this_cpu_read(reporting_keystroke);
 }
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index cc79f9e..408bb9b 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -984,8 +984,10 @@
 	 * not known ahead of time.
 	 */
 	accessibility_kobj = kobject_create_and_add("accessibility", NULL);
-	if (!accessibility_kobj)
-		return -ENOMEM;
+	if (!accessibility_kobj) {
+		retval = -ENOMEM;
+		goto out;
+	}
 
 	speakup_kobj = kobject_create_and_add("speakup", accessibility_kobj);
 	if (!speakup_kobj) {
@@ -1002,7 +1004,7 @@
 	if (retval)
 		goto err_group;
 
-	return 0;
+	goto out;
 
 err_group:
 	sysfs_remove_group(speakup_kobj, &main_attr_group);
@@ -1010,6 +1012,7 @@
 	kobject_put(speakup_kobj);
 err_acc:
 	kobject_put(accessibility_kobj);
+out:
 	return retval;
 }
 
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 4b7a9c2..3cd0039 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2253,17 +2253,17 @@
 
 	err = speakup_add_virtual_keyboard();
 	if (err)
-		return err;
+		goto out;
 
 	initialize_msgs();	/* Initialize arrays for i18n. */
 	first_console = kzalloc(sizeof(*first_console), GFP_KERNEL);
-	if (!first_console)
-		return -ENOMEM;
-	err = speakup_kobj_init();
-	if (err) {
-		kfree(first_console);
-		return err;
+	if (!first_console) {
+		err = -ENOMEM;
+		goto err_cons;
 	}
+	err = speakup_kobj_init();
+	if (err)
+		goto err_kobject;
 
 	reset_default_chars();
 	reset_default_chartab();
@@ -2299,11 +2299,20 @@
 
 	speakup_task = kthread_create(speakup_thread, NULL, "speakup");
 	set_user_nice(speakup_task, 10);
-	if (!IS_ERR(speakup_task))
-		wake_up_process(speakup_task);
-	else
-		return -ENOMEM;
-	return 0;
+	if (IS_ERR(speakup_task)) {
+		err = -ENOMEM;
+		goto err_kobject;
+	}
+	wake_up_process(speakup_task);
+	goto out;
+
+err_kobject:
+speakup_kobj_exit();
+	kfree(first_console);
+err_cons:
+	speakup_remove_virtual_keyboard();
+out:
+	return err;
 }
 
 module_init(speakup_init);
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index 840bddb..d36c90e 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -52,7 +52,7 @@
 
 #define COLOR_BUFFER_SIZE 160
 
-struct spk_highlight_color_track{
+struct spk_highlight_color_track {
 	/* Count of each background color */
 	unsigned int bgcount[8];
 	/* Buffer for characters drawn with each background color */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index 63a9d0a..007b24b 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -28,6 +28,7 @@
 #include <linux/log2.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/async.h>
 
 /**** Helper functions used for Div, Remainder operation on u64 ****/
 
@@ -729,34 +730,16 @@
 }
 */
 
-static int GLOB_SBD_init(void)
+static void register_spectra_ftl_async(void *unused, async_cookie_t cookie)
 {
 	int i;
 
-	/* Set debug output level (0~3) here. 3 is most verbose */
-	printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
-
-	mutex_init(&spectra_lock);
-
-	GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
-	if (GLOB_SBD_majornum <= 0) {
-		printk(KERN_ERR "Unable to get the major %d for Spectra",
-		       GLOB_SBD_majornum);
-		return -EBUSY;
-	}
-
-	if (PASS != GLOB_FTL_Flash_Init()) {
-		printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
-		       "Aborting\n");
-		goto out_flash_register;
-	}
-
 	/* create_sysfs_entry(&dev->dev); */
 
 	if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
 		printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
 		       "Aborting\n");
-		goto out_flash_register;
+		return;
 	} else {
 		nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
 			       "Num blocks=%d, pagesperblock=%d, "
@@ -775,24 +758,50 @@
 	}
 	printk(KERN_ALERT "Spectra: block table has been found.\n");
 
+	GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
+	if (GLOB_SBD_majornum <= 0) {
+		printk(KERN_ERR "Unable to get the major %d for Spectra",
+		       GLOB_SBD_majornum);
+		goto out_ftl_flash_register;
+	}
+
 	for (i = 0; i < NUM_DEVICES; i++)
 		if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
-			goto out_ftl_flash_register;
+			goto out_blk_register;
 
 	nand_dbg_print(NAND_DBG_DEBUG,
 		       "Spectra: module loaded with major number %d\n",
 		       GLOB_SBD_majornum);
 
-	return 0;
+	return;
 
+out_blk_register:
+	unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
 out_ftl_flash_register:
 	GLOB_FTL_Cache_Release();
-out_flash_register:
-	GLOB_FTL_Flash_Release();
-	unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
 	printk(KERN_ERR "Spectra: Module load failed.\n");
+}
 
-	return -ENOMEM;
+int register_spectra_ftl()
+{
+	async_schedule(register_spectra_ftl_async, NULL);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(register_spectra_ftl);
+
+static int GLOB_SBD_init(void)
+{
+	/* Set debug output level (0~3) here. 3 is most verbose */
+	printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
+
+	mutex_init(&spectra_lock);
+
+	if (PASS != GLOB_FTL_Flash_Init()) {
+		printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
+		       "Aborting\n");
+		return -ENODEV;
+	}
+	return 0;
 }
 
 static void __exit GLOB_SBD_exit(void)
diff --git a/drivers/staging/spectra/ffsport.h b/drivers/staging/spectra/ffsport.h
index 6c5d90c..85c0750 100644
--- a/drivers/staging/spectra/ffsport.h
+++ b/drivers/staging/spectra/ffsport.h
@@ -80,5 +80,6 @@
 extern int GLOB_Calc_Used_Bits(u32 n);
 extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
 extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
+extern int register_spectra_ftl(void);
 
 #endif /* _FFSPORT_ */
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
index 4e6e451..fb39c8e 100644
--- a/drivers/staging/spectra/flash.c
+++ b/drivers/staging/spectra/flash.c
@@ -1258,9 +1258,7 @@
 
 	g_SBDCmdIndex = 0;
 
-	GLOB_LLD_Flash_Init();
-
-	status = GLOB_LLD_Read_Device_ID();
+	status = GLOB_LLD_Flash_Init();
 
 	return status;
 }
diff --git a/drivers/staging/spectra/lld_emu.c b/drivers/staging/spectra/lld_emu.c
index 6733bbf..095f2f0 100644
--- a/drivers/staging/spectra/lld_emu.c
+++ b/drivers/staging/spectra/lld_emu.c
@@ -180,10 +180,8 @@
 	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
 		       __FILE__, __LINE__, __func__);
 
-	flash_memory[0] = (u8 *)vmalloc(GLOB_LLD_PAGE_SIZE *
-						   GLOB_LLD_BLOCKS *
-						   GLOB_LLD_PAGES *
-						   sizeof(u8));
+	flash_memory[0] = vmalloc(GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS *
+				  GLOB_LLD_PAGES * sizeof(u8));
 	if (!flash_memory[0]) {
 		printk(KERN_ERR "Fail to allocate memory "
 		       "for nand emulator!\n");
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
index 0d647a8f..2263d3e 100644
--- a/drivers/staging/spectra/lld_nand.c
+++ b/drivers/staging/spectra/lld_nand.c
@@ -2395,112 +2395,9 @@
 	unsigned long csr_base;
 	unsigned long csr_len;
 	struct mrst_nand_info *pndev = &info;
-
-	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-		       __FILE__, __LINE__, __func__);
-
-	ret = pci_enable_device(dev);
-	if (ret) {
-		printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
-		return ret;
-	}
-
-	pci_set_master(dev);
-	pndev->dev = dev;
-
-	csr_base = pci_resource_start(dev, 0);
-	if (!csr_base) {
-		printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
-		ret = -ENODEV;
-		goto failed_req_csr;
-	}
-
-	csr_len = pci_resource_len(dev, 0);
-	if (!csr_len) {
-		printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
-		ret = -ENODEV;
-		goto failed_req_csr;
-	}
-
-	ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
-	if (ret) {
-		printk(KERN_ERR "Spectra: Unable to request "
-		       "memory region\n");
-		goto failed_req_csr;
-	}
-
-	pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
-	if (!pndev->ioaddr) {
-		printk(KERN_ERR "Spectra: Unable to remap memory region\n");
-		ret = -ENOMEM;
-		goto failed_remap_csr;
-	}
-	nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
-		       csr_base, pndev->ioaddr, csr_len);
-
-	init_completion(&pndev->complete);
-	nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
-
-#if CMD_DMA
-	if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
-			SPECTRA_NAND_NAME, &info)) {
-		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
-		ret = -ENODEV;
-		iounmap(pndev->ioaddr);
-		goto failed_remap_csr;
-	}
-#else
-	if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
-			SPECTRA_NAND_NAME, &info)) {
-		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
-		ret = -ENODEV;
-		iounmap(pndev->ioaddr);
-		goto failed_remap_csr;
-	}
-#endif
-
-	pci_set_drvdata(dev, pndev);
-
-	return 0;
-
-failed_remap_csr:
-	pci_release_regions(dev);
-failed_req_csr:
-	pci_disable_device(dev);
-
-	return ret;
-}
-
-static void nand_pci_remove(struct pci_dev *dev)
-{
-	struct mrst_nand_info *pndev = pci_get_drvdata(dev);
-
-	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
-		       __FILE__, __LINE__, __func__);
-
-#if CMD_DMA
-	free_irq(dev->irq, pndev);
-#endif
-	iounmap(pndev->ioaddr);
-	pci_release_regions(dev);
-	pci_disable_device(dev);
-}
-
-MODULE_DEVICE_TABLE(pci, nand_pci_ids);
-
-static struct pci_driver nand_pci_driver = {
-	.name = SPECTRA_NAND_NAME,
-	.id_table = nand_pci_ids,
-	.probe = nand_pci_probe,
-	.remove = nand_pci_remove,
-};
-
-int NAND_Flash_Init(void)
-{
-	int retval;
 	u32 int_mask;
 
-	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
 		       __FILE__, __LINE__, __func__);
 
 	FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
@@ -2582,6 +2479,122 @@
 	iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
 	iowrite32(1, FlashReg + ECC_ENABLE);
 	enable_ecc = 1;
+	ret = pci_enable_device(dev);
+	if (ret) {
+		printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
+		goto failed_req_csr;
+	}
+
+	pci_set_master(dev);
+	pndev->dev = dev;
+
+	csr_base = pci_resource_start(dev, 0);
+	if (!csr_base) {
+		printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
+		ret = -ENODEV;
+		goto failed_req_csr;
+	}
+
+	csr_len = pci_resource_len(dev, 0);
+	if (!csr_len) {
+		printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
+		ret = -ENODEV;
+		goto failed_req_csr;
+	}
+
+	ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
+	if (ret) {
+		printk(KERN_ERR "Spectra: Unable to request "
+		       "memory region\n");
+		goto failed_req_csr;
+	}
+
+	pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
+	if (!pndev->ioaddr) {
+		printk(KERN_ERR "Spectra: Unable to remap memory region\n");
+		ret = -ENOMEM;
+		goto failed_remap_csr;
+	}
+	nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
+		       csr_base, pndev->ioaddr, csr_len);
+
+	init_completion(&pndev->complete);
+	nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
+
+#if CMD_DMA
+	if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
+			SPECTRA_NAND_NAME, &info)) {
+		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+		ret = -ENODEV;
+		iounmap(pndev->ioaddr);
+		goto failed_remap_csr;
+	}
+#else
+	if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
+			SPECTRA_NAND_NAME, &info)) {
+		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
+		ret = -ENODEV;
+		iounmap(pndev->ioaddr);
+		goto failed_remap_csr;
+	}
+#endif
+
+	pci_set_drvdata(dev, pndev);
+
+	ret = GLOB_LLD_Read_Device_ID();
+	if (ret) {
+		iounmap(pndev->ioaddr);
+		goto failed_remap_csr;
+	}
+
+	ret = register_spectra_ftl();
+	if (ret) {
+		iounmap(pndev->ioaddr);
+		goto failed_remap_csr;
+	}
+
+	return 0;
+
+failed_remap_csr:
+	pci_release_regions(dev);
+failed_req_csr:
+	pci_disable_device(dev);
+	iounmap(FlashMem);
+	iounmap(FlashReg);
+
+	return ret;
+}
+
+static void nand_pci_remove(struct pci_dev *dev)
+{
+	struct mrst_nand_info *pndev = pci_get_drvdata(dev);
+
+	nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
+		       __FILE__, __LINE__, __func__);
+
+#if CMD_DMA
+	free_irq(dev->irq, pndev);
+#endif
+	iounmap(pndev->ioaddr);
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+}
+
+MODULE_DEVICE_TABLE(pci, nand_pci_ids);
+
+static struct pci_driver nand_pci_driver = {
+	.name = SPECTRA_NAND_NAME,
+	.id_table = nand_pci_ids,
+	.probe = nand_pci_probe,
+	.remove = nand_pci_remove,
+};
+
+int NAND_Flash_Init(void)
+{
+	int retval;
+
+	nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
+		       __FILE__, __LINE__, __func__);
 
 	retval = pci_register_driver(&nand_pci_driver);
 	if (retval)
diff --git a/drivers/staging/ste_rmi4/Kconfig b/drivers/staging/ste_rmi4/Kconfig
new file mode 100644
index 0000000..e867950
--- /dev/null
+++ b/drivers/staging/ste_rmi4/Kconfig
@@ -0,0 +1,9 @@
+config TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+	tristate "Synaptics i2c rmi4 touchscreen"
+	depends on I2C && INPUT
+	help
+	  Say Y here if you have a Synaptics RMI4 and
+	  want to enable support for the built-in touchscreen.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_rmi4_ts.
diff --git a/drivers/staging/ste_rmi4/Makefile b/drivers/staging/ste_rmi4/Makefile
new file mode 100644
index 0000000..6cce2ed
--- /dev/null
+++ b/drivers/staging/ste_rmi4/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the RMI4 touchscreen driver.
+#
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += synaptics_i2c_rmi4.o
diff --git a/drivers/staging/ste_rmi4/TODO b/drivers/staging/ste_rmi4/TODO
new file mode 100644
index 0000000..9be2437
--- /dev/null
+++ b/drivers/staging/ste_rmi4/TODO
@@ -0,0 +1,7 @@
+TODO
+----
+
+Wait for the official upstream synaptics rmi4 clearpad drivers as promised over the past few months
+Merge any device support needed from this driver into it
+Delete this driver
+
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
new file mode 100644
index 0000000..e8f047e
--- /dev/null
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -0,0 +1,1179 @@
+/**
+ *
+ * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
+ * Copyright (c) 2007-2010, Synaptics Incorporated
+ *
+ * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Copyright 2010 (c) ST-Ericsson AB
+ */
+/*
+ * This file is licensed under the GPL2 license.
+ *
+ *#############################################################################
+ * GPL
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ *#############################################################################
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include "synaptics_i2c_rmi4.h"
+
+/* TODO: for multiple device support will need a per-device mutex */
+#define DRIVER_NAME "synaptics_rmi4_i2c"
+
+#define MAX_ERROR_REPORT	6
+#define MAX_TOUCH_MAJOR		15
+#define MAX_RETRY_COUNT		5
+#define STD_QUERY_LEN		21
+#define PAGE_LEN		2
+#define DATA_BUF_LEN		32
+#define BUF_LEN			37
+#define QUERY_LEN		9
+#define DATA_LEN		12
+#define HAS_TAP			0x01
+#define HAS_PALMDETECT		0x01
+#define HAS_ROTATE		0x02
+#define HAS_TAPANDHOLD		0x02
+#define HAS_DOUBLETAP		0x04
+#define HAS_EARLYTAP		0x08
+#define HAS_RELEASE		0x08
+#define HAS_FLICK		0x10
+#define HAS_PRESS		0x20
+#define HAS_PINCH		0x40
+
+#define MASK_16BIT		0xFFFF
+#define MASK_8BIT		0xFF
+#define MASK_7BIT		0x7F
+#define MASK_5BIT		0x1F
+#define MASK_4BIT		0x0F
+#define MASK_3BIT		0x07
+#define MASK_2BIT		0x03
+#define TOUCHPAD_CTRL_INTR	0x8
+#define PDT_START_SCAN_LOCATION (0x00E9)
+#define PDT_END_SCAN_LOCATION	(0x000A)
+#define PDT_ENTRY_SIZE		(0x0006)
+#define RMI4_NUMBER_OF_MAX_FINGERS		(8)
+#define SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM	(0x11)
+#define SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM	(0x01)
+
+/**
+ * struct synaptics_rmi4_fn_desc - contains the funtion descriptor information
+ * @query_base_addr: base address for query
+ * @cmd_base_addr: base address for command
+ * @ctrl_base_addr: base address for control
+ * @data_base_addr: base address for data
+ * @intr_src_count: count for the interrupt source
+ * @fn_number: function number
+ *
+ * This structure is used to gives the function descriptor information
+ * of the particular functionality.
+ */
+struct synaptics_rmi4_fn_desc {
+	unsigned char	query_base_addr;
+	unsigned char	cmd_base_addr;
+	unsigned char	ctrl_base_addr;
+	unsigned char	data_base_addr;
+	unsigned char	intr_src_count;
+	unsigned char	fn_number;
+};
+
+/**
+ * struct synaptics_rmi4_fn - contains the funtion information
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: number of fingers touched
+ * @size_of_data_register_block: data register block size
+ * @index_to_intr_reg: index for interrupt register
+ * @intr_mask: interrupt mask value
+ * @fn_desc: variable for function descriptor structure
+ * @link: linked list for function descriptors
+ *
+ * This structure gives information about the number of data sources and
+ * the number of data registers associated with the function.
+ */
+struct synaptics_rmi4_fn {
+	unsigned char		fn_number;
+	unsigned char		num_of_data_sources;
+	unsigned char		num_of_data_points;
+	unsigned char		size_of_data_register_block;
+	unsigned char		index_to_intr_reg;
+	unsigned char		intr_mask;
+	struct synaptics_rmi4_fn_desc	fn_desc;
+	struct list_head	link;
+};
+
+/**
+ * struct synaptics_rmi4_device_info - contains the rmi4 device information
+ * @version_major: protocol major version number
+ * @version_minor: protocol minor version number
+ * @manufacturer_id: manufacturer identification byte
+ * @product_props: product properties information
+ * @product_info: product info array
+ * @date_code: device manufacture date
+ * @tester_id: tester id array
+ * @serial_number: serial number for that device
+ * @product_id_string: product id for the device
+ * @support_fn_list: linked list for device information
+ *
+ * This structure gives information about the number of data sources and
+ * the number of data registers associated with the function.
+ */
+struct synaptics_rmi4_device_info {
+	unsigned int		version_major;
+	unsigned int		version_minor;
+	unsigned char		manufacturer_id;
+	unsigned char		product_props;
+	unsigned char		product_info[2];
+	unsigned char		date_code[3];
+	unsigned short		tester_id;
+	unsigned short		serial_number;
+	unsigned char		product_id_string[11];
+	struct list_head	support_fn_list;
+};
+
+/**
+ * struct synaptics_rmi4_data - contains the rmi4 device data
+ * @rmi4_mod_info: structure variable for rmi4 device info
+ * @input_dev: pointer for input device
+ * @i2c_client: pointer for i2c client
+ * @board: constant pointer for touch platform data
+ * @fn_list_mutex: mutex for funtion list
+ * @rmi4_page_mutex: mutex for rmi4 page
+ * @current_page: variable for integer
+ * @number_of_interrupt_register: interrupt registers count
+ * @fn01_ctrl_base_addr: control base address for fn01
+ * @fn01_query_base_addr: query base address for fn01
+ * @fn01_data_base_addr: data base address for fn01
+ * @sensor_max_x: sensor maximum x value
+ * @sensor_max_y: sensor maximum y value
+ * @regulator: pointer to the regulator structure
+ * @wait: wait queue structure variable
+ * @touch_stopped: flag to stop the thread function
+ *
+ * This structure gives the device data information.
+ */
+struct synaptics_rmi4_data {
+	struct synaptics_rmi4_device_info rmi4_mod_info;
+	struct input_dev	*input_dev;
+	struct i2c_client	*i2c_client;
+	const struct synaptics_rmi4_platform_data *board;
+	struct mutex		fn_list_mutex;
+	struct mutex		rmi4_page_mutex;
+	int			current_page;
+	unsigned int		number_of_interrupt_register;
+	unsigned short		fn01_ctrl_base_addr;
+	unsigned short		fn01_query_base_addr;
+	unsigned short		fn01_data_base_addr;
+	int			sensor_max_x;
+	int			sensor_max_y;
+	struct regulator	*regulator;
+	wait_queue_head_t	wait;
+	bool			touch_stopped;
+};
+
+/**
+ * synaptics_rmi4_set_page() - sets the page
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @address: set the address of the page
+ *
+ * This function is used to set the page and returns integer.
+ */
+static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *pdata,
+					unsigned int address)
+{
+	unsigned char	txbuf[PAGE_LEN];
+	int		retval;
+	unsigned int	page;
+	struct i2c_client *i2c = pdata->i2c_client;
+
+	page	= ((address >> 8) & MASK_8BIT);
+	if (page != pdata->current_page) {
+		txbuf[0]	= MASK_8BIT;
+		txbuf[1]	= page;
+		retval	= i2c_master_send(i2c, txbuf, PAGE_LEN);
+		if (retval != PAGE_LEN)
+			dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
+		else
+			pdata->current_page = page;
+	} else
+		retval = PAGE_LEN;
+	return retval;
+}
+/**
+ * synaptics_rmi4_i2c_block_read() - read the block of data
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @address: read the block of data from this offset
+ * @valp: pointer to a buffer containing the data to be read
+ * @size: number of bytes to read
+ *
+ * This function is to read the block of data and returns integer.
+ */
+static int synaptics_rmi4_i2c_block_read(struct synaptics_rmi4_data *pdata,
+						unsigned short address,
+						unsigned char *valp, int size)
+{
+	int retval = 0;
+	int retry_count = 0;
+	int index;
+	struct i2c_client *i2c = pdata->i2c_client;
+
+	mutex_lock(&(pdata->rmi4_page_mutex));
+	retval = synaptics_rmi4_set_page(pdata, address);
+	if (retval != PAGE_LEN)
+		goto exit;
+	index = address & MASK_8BIT;
+retry:
+	retval = i2c_smbus_read_i2c_block_data(i2c, index, size, valp);
+	if (retval != size) {
+		if (++retry_count == MAX_RETRY_COUNT)
+			dev_err(&i2c->dev,
+				"%s:address 0x%04x size %d failed:%d\n",
+					__func__, address, size, retval);
+		else {
+			synaptics_rmi4_set_page(pdata, address);
+			goto retry;
+		}
+	}
+exit:
+	mutex_unlock(&(pdata->rmi4_page_mutex));
+	return retval;
+}
+
+/**
+ * synaptics_rmi4_i2c_byte_write() - write the single byte data
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @address: write the block of data from this offset
+ * @data: data to be write
+ *
+ * This function is to write the single byte data and returns integer.
+ */
+static int synaptics_rmi4_i2c_byte_write(struct synaptics_rmi4_data *pdata,
+						unsigned short address,
+						unsigned char data)
+{
+	unsigned char txbuf[2];
+	int retval = 0;
+	struct i2c_client *i2c = pdata->i2c_client;
+
+	/* Can't have anyone else changing the page behind our backs */
+	mutex_lock(&(pdata->rmi4_page_mutex));
+
+	retval = synaptics_rmi4_set_page(pdata, address);
+	if (retval != PAGE_LEN)
+		goto exit;
+	txbuf[0]	= address & MASK_8BIT;
+	txbuf[1]	= data;
+	retval		= i2c_master_send(pdata->i2c_client, txbuf, 2);
+	/* Add in retry on writes only in certian error return values */
+	if (retval != 2) {
+		dev_err(&i2c->dev, "%s:failed:%d\n", __func__, retval);
+		retval = -EIO;
+	} else
+		retval = 1;
+exit:
+	mutex_unlock(&(pdata->rmi4_page_mutex));
+	return retval;
+}
+
+/**
+ * synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn structure
+ *
+ * This function calls to reports for the rmi4 touchpad device
+ */
+static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
+						struct synaptics_rmi4_fn *rfi)
+{
+	/* number of touch points - fingers down in this case */
+	int	touch_count = 0;
+	int	finger;
+	int	fingers_supported;
+	int	finger_registers;
+	int	reg;
+	int	finger_shift;
+	int	finger_status;
+	int	retval;
+	unsigned short	data_base_addr;
+	unsigned short	data_offset;
+	unsigned char	data_reg_blk_size;
+	unsigned char	values[2];
+	unsigned char	data[DATA_LEN];
+	int	x[RMI4_NUMBER_OF_MAX_FINGERS];
+	int	y[RMI4_NUMBER_OF_MAX_FINGERS];
+	int	wx[RMI4_NUMBER_OF_MAX_FINGERS];
+	int	wy[RMI4_NUMBER_OF_MAX_FINGERS];
+	struct	i2c_client *client = pdata->i2c_client;
+
+	/* get 2D sensor finger data */
+	/*
+	 * First get the finger status field - the size of the finger status
+	 * field is determined by the number of finger supporte - 2 bits per
+	 * finger, so the number of registers to read is:
+	 * registerCount = ceil(numberOfFingers/4).
+	 * Read the required number of registers and check each 2 bit field to
+	 * determine if a finger is down:
+	 *	00 = finger not present,
+	 *	01 = finger present and data accurate,
+	 *	10 = finger present but data may not be accurate,
+	 *	11 = reserved for product use.
+	 */
+	fingers_supported	= rfi->num_of_data_points;
+	finger_registers	= (fingers_supported + 3)/4;
+	data_base_addr		= rfi->fn_desc.data_base_addr;
+	retval = synaptics_rmi4_i2c_block_read(pdata, data_base_addr, values,
+							finger_registers);
+	if (retval != finger_registers) {
+		dev_err(&client->dev, "%s:read status registers failed\n",
+								__func__);
+		return 0;
+	}
+	/*
+	 * For each finger present, read the proper number of registers
+	 * to get absolute data.
+	 */
+	data_reg_blk_size = rfi->size_of_data_register_block;
+	for (finger = 0; finger < fingers_supported; finger++) {
+		/* determine which data byte the finger status is in */
+		reg = finger/4;
+		/* bit shift to get finger's status */
+		finger_shift	= (finger % 4) * 2;
+		finger_status	= (values[reg] >> finger_shift) & 3;
+		/*
+		 * if finger status indicates a finger is present then
+		 * read the finger data and report it
+		 */
+		if (finger_status == 1 || finger_status == 2) {
+			/* Read the finger data */
+			data_offset = data_base_addr +
+					((finger * data_reg_blk_size) +
+					finger_registers);
+			retval = synaptics_rmi4_i2c_block_read(pdata,
+						data_offset, data,
+						data_reg_blk_size);
+			if (retval != data_reg_blk_size) {
+				printk(KERN_ERR "%s:read data failed\n",
+								__func__);
+				return 0;
+			} else {
+				x[touch_count]	=
+					(data[0] << 4) | (data[2] & MASK_4BIT);
+				y[touch_count]	=
+					(data[1] << 4) |
+					((data[2] >> 4) & MASK_4BIT);
+				wy[touch_count]	=
+						(data[3] >> 4) & MASK_4BIT;
+				wx[touch_count]	=
+						(data[3] & MASK_4BIT);
+
+				if (pdata->board->x_flip)
+					x[touch_count] =
+						pdata->sensor_max_x -
+								x[touch_count];
+				if (pdata->board->y_flip)
+					y[touch_count] =
+						pdata->sensor_max_y -
+								y[touch_count];
+			}
+			/* number of active touch points */
+			touch_count++;
+		}
+	}
+
+	/* report to input subsystem */
+	if (touch_count) {
+		for (finger = 0; finger < touch_count; finger++) {
+			input_report_abs(pdata->input_dev, ABS_MT_TOUCH_MAJOR,
+						max(wx[finger] , wy[finger]));
+			input_report_abs(pdata->input_dev, ABS_MT_POSITION_X,
+								x[finger]);
+			input_report_abs(pdata->input_dev, ABS_MT_POSITION_Y,
+								y[finger]);
+			input_mt_sync(pdata->input_dev);
+		}
+	} else
+		input_mt_sync(pdata->input_dev);
+
+	/* sync after groups of events */
+	input_sync(pdata->input_dev);
+	/* return the number of touch points */
+	return touch_count;
+}
+
+/**
+ * synaptics_rmi4_report_device() - reports the rmi4 device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn
+ *
+ * This function is used to call the report function of the rmi4 device.
+ */
+static int synaptics_rmi4_report_device(struct synaptics_rmi4_data *pdata,
+					struct synaptics_rmi4_fn *rfi)
+{
+	int touch = 0;
+	struct	i2c_client *client = pdata->i2c_client;
+	static int num_error_reports;
+	if (rfi->fn_number != SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
+		num_error_reports++;
+		if (num_error_reports < MAX_ERROR_REPORT)
+			dev_err(&client->dev, "%s:report not supported\n",
+								__func__);
+	} else
+		touch = synpatics_rmi4_touchpad_report(pdata, rfi);
+	return touch;
+}
+/**
+ * synaptics_rmi4_sensor_report() - reports to input subsystem
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is used to reads in all data sources and reports
+ * them to the input subsystem.
+ */
+static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *pdata)
+{
+	unsigned char	intr_status[4];
+	/* number of touch points - fingers or buttons */
+	int touch = 0;
+	unsigned int retval;
+	struct synaptics_rmi4_fn		*rfi;
+	struct synaptics_rmi4_device_info	*rmi;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	/*
+	 * Get the interrupt status from the function $01
+	 * control register+1 to find which source(s) were interrupting
+	 * so we can read the data from the source(s) (2D sensor, buttons..)
+	 */
+	retval = synaptics_rmi4_i2c_block_read(pdata,
+					pdata->fn01_data_base_addr + 1,
+					intr_status,
+					pdata->number_of_interrupt_register);
+	if (retval != pdata->number_of_interrupt_register) {
+		dev_err(&client->dev,
+				"could not read interrupt status registers\n");
+		return 0;
+	}
+	/*
+	 * check each function that has data sources and if the interrupt for
+	 * that triggered then call that RMI4 functions report() function to
+	 * gather data and report it to the input subsystem
+	 */
+	rmi = &(pdata->rmi4_mod_info);
+	list_for_each_entry(rfi, &rmi->support_fn_list, link) {
+		if (rfi->num_of_data_sources) {
+			if (intr_status[rfi->index_to_intr_reg] &
+							rfi->intr_mask)
+				touch = synaptics_rmi4_report_device(pdata,
+									rfi);
+		}
+	}
+	/* return the number of touch points */
+	return touch;
+}
+
+/**
+ * synaptics_rmi4_irq() - thread function for rmi4 attention line
+ * @irq: irq value
+ * @data: void pointer
+ *
+ * This function is interrupt thread function. It just notifies the
+ * application layer that attention is required.
+ */
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+	struct synaptics_rmi4_data *pdata = data;
+	int touch_count;
+	do {
+		touch_count = synaptics_rmi4_sensor_report(pdata);
+		if (touch_count)
+			wait_event_timeout(pdata->wait, pdata->touch_stopped,
+							msecs_to_jiffies(1));
+		else
+			break;
+	} while (!pdata->touch_stopped);
+	return IRQ_HANDLED;
+}
+
+/**
+ * synpatics_rmi4_touchpad_detect() - detects the rmi4 touchpad device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn structure
+ * @fd: pointer to synaptics_rmi4_fn_desc structure
+ * @interruptcount: count the number of interrupts
+ *
+ * This function calls to detects the rmi4 touchpad device
+ */
+static int synpatics_rmi4_touchpad_detect(struct synaptics_rmi4_data *pdata,
+					struct synaptics_rmi4_fn *rfi,
+					struct synaptics_rmi4_fn_desc *fd,
+					unsigned int interruptcount)
+{
+	unsigned char	queries[QUERY_LEN];
+	unsigned short	intr_offset;
+	unsigned char	abs_data_size;
+	unsigned char	abs_data_blk_size;
+	unsigned char	egr_0, egr_1;
+	unsigned int	all_data_blk_size;
+	int	has_pinch, has_flick, has_tap;
+	int	has_tapandhold, has_doubletap;
+	int	has_earlytap, has_press;
+	int	has_palmdetect, has_rotate;
+	int	has_rel;
+	int	i;
+	int	retval;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	rfi->fn_desc.query_base_addr	= fd->query_base_addr;
+	rfi->fn_desc.data_base_addr	= fd->data_base_addr;
+	rfi->fn_desc.intr_src_count	= fd->intr_src_count;
+	rfi->fn_desc.fn_number		= fd->fn_number;
+	rfi->fn_number			= fd->fn_number;
+	rfi->num_of_data_sources	= fd->intr_src_count;
+	rfi->fn_desc.ctrl_base_addr	= fd->ctrl_base_addr;
+	rfi->fn_desc.cmd_base_addr	= fd->cmd_base_addr;
+
+	/*
+	 * need to get number of fingers supported, data size, etc.
+	 * to be used when getting data since the number of registers to
+	 * read depends on the number of fingers supported and data size.
+	 */
+	retval = synaptics_rmi4_i2c_block_read(pdata, fd->query_base_addr,
+							queries,
+							sizeof(queries));
+	if (retval != sizeof(queries)) {
+		dev_err(&client->dev, "%s:read function query registers\n",
+							__func__);
+		return retval;
+	}
+	/*
+	 * 2D data sources have only 3 bits for the number of fingers
+	 * supported - so the encoding is a bit wierd.
+	 */
+	if ((queries[1] & MASK_3BIT) <= 4)
+		/* add 1 since zero based */
+		rfi->num_of_data_points = (queries[1] & MASK_3BIT) + 1;
+	else {
+		/*
+		 * a value of 5 is up to 10 fingers - 6 and 7 are reserved
+		 * (shouldn't get these i int retval;n a normal 2D source).
+		 */
+		if ((queries[1] & MASK_3BIT) == 5)
+			rfi->num_of_data_points = 10;
+	}
+	/* Need to get interrupt info for handling interrupts */
+	rfi->index_to_intr_reg = (interruptcount + 7)/8;
+	if (rfi->index_to_intr_reg != 0)
+		rfi->index_to_intr_reg -= 1;
+	/*
+	 * loop through interrupts for each source in fn $11
+	 * and or in a bit to the interrupt mask for each.
+	 */
+	intr_offset = interruptcount % 8;
+	rfi->intr_mask = 0;
+	for (i = intr_offset;
+		i < ((fd->intr_src_count & MASK_3BIT) + intr_offset); i++)
+		rfi->intr_mask |= 1 << i;
+
+	/* Size of just the absolute data for one finger */
+	abs_data_size	= queries[5] & MASK_2BIT;
+	/* One each for X and Y, one for LSB for X & Y, one for W, one for Z */
+	abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
+	rfi->size_of_data_register_block = abs_data_blk_size;
+
+	/*
+	 * need to determine the size of data to read - this depends on
+	 * conditions such as whether Relative data is reported and if Gesture
+	 * data is reported.
+	 */
+	egr_0 = queries[7];
+	egr_1 = queries[8];
+
+	/*
+	 * Get info about what EGR data is supported, whether it has
+	 * Relative data supported, etc.
+	 */
+	has_pinch	= egr_0 & HAS_PINCH;
+	has_flick	= egr_0 & HAS_FLICK;
+	has_tap		= egr_0 & HAS_TAP;
+	has_earlytap	= egr_0 & HAS_EARLYTAP;
+	has_press	= egr_0 & HAS_PRESS;
+	has_rotate	= egr_1 & HAS_ROTATE;
+	has_rel		= queries[1] & HAS_RELEASE;
+	has_tapandhold	= egr_0 & HAS_TAPANDHOLD;
+	has_doubletap	= egr_0 & HAS_DOUBLETAP;
+	has_palmdetect	= egr_1 & HAS_PALMDETECT;
+
+	/*
+	 * Size of all data including finger status, absolute data for each
+	 * finger, relative data and EGR data
+	 */
+	all_data_blk_size =
+		/* finger status, four fingers per register */
+		((rfi->num_of_data_points + 3) / 4) +
+		/* absolute data, per finger times number of fingers */
+		(abs_data_blk_size * rfi->num_of_data_points) +
+		/*
+		 * two relative registers (if relative is being reported)
+		 */
+		2 * has_rel +
+		/*
+		 * F11_2D_data8 is only present if the egr_0
+		 * register is non-zero.
+		 */
+		!!(egr_0) +
+		/*
+		 * F11_2D_data9 is only present if either egr_0 or
+		 * egr_1 registers are non-zero.
+		 */
+		(egr_0 || egr_1) +
+		/*
+		 * F11_2D_data10 is only present if EGR_PINCH or EGR_FLICK of
+		 * egr_0 reports as 1.
+		 */
+		!!(has_pinch | has_flick) +
+		/*
+		 * F11_2D_data11 and F11_2D_data12 are only present if
+		 * EGR_FLICK of egr_0 reports as 1.
+		 */
+		2 * !!(has_flick);
+	return retval;
+}
+
+/**
+ * synpatics_rmi4_touchpad_config() - confiures the rmi4 touchpad device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ * @rfi: pointer to synaptics_rmi4_fn structure
+ *
+ * This function calls to confiures the rmi4 touchpad device
+ */
+int synpatics_rmi4_touchpad_config(struct synaptics_rmi4_data *pdata,
+						struct synaptics_rmi4_fn *rfi)
+{
+	/*
+	 * For the data source - print info and do any
+	 * source specific configuration.
+	 */
+	unsigned char data[BUF_LEN];
+	int retval = 0;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	/* Get and print some info about the data source... */
+	/* To Query 2D devices we need to read from the address obtained
+	 * from the function descriptor stored in the RMI function info.
+	 */
+	retval = synaptics_rmi4_i2c_block_read(pdata,
+						rfi->fn_desc.query_base_addr,
+						data, QUERY_LEN);
+	if (retval != QUERY_LEN)
+		dev_err(&client->dev, "%s:read query registers failed\n",
+								__func__);
+	else {
+		retval = synaptics_rmi4_i2c_block_read(pdata,
+						rfi->fn_desc.ctrl_base_addr,
+						data, DATA_BUF_LEN);
+		if (retval != DATA_BUF_LEN) {
+			dev_err(&client->dev,
+				"%s:read control registers failed\n",
+								__func__);
+			return retval;
+		}
+		/* Store these for use later*/
+		pdata->sensor_max_x = ((data[6] & MASK_8BIT) << 0) |
+						((data[7] & MASK_4BIT) << 8);
+		pdata->sensor_max_y = ((data[8] & MASK_5BIT) << 0) |
+						((data[9] & MASK_4BIT) << 8);
+	}
+	return retval;
+}
+
+/**
+ * synaptics_rmi4_i2c_query_device() - query the rmi4 device
+ * @pdata: pointer to synaptics_rmi4_data structure
+ *
+ * This function is used to query the rmi4 device.
+ */
+static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
+{
+	int i;
+	int retval;
+	unsigned char std_queries[STD_QUERY_LEN];
+	unsigned char intr_count = 0;
+	int data_sources = 0;
+	unsigned int ctrl_offset;
+	struct synaptics_rmi4_fn *rfi;
+	struct synaptics_rmi4_fn_desc	rmi_fd;
+	struct synaptics_rmi4_device_info *rmi;
+	struct	i2c_client *client = pdata->i2c_client;
+
+	/*
+	 * init the physical drivers RMI module
+	 * info list of functions
+	 */
+	INIT_LIST_HEAD(&pdata->rmi4_mod_info.support_fn_list);
+
+	/*
+	 * Read the Page Descriptor Table to determine what functions
+	 * are present
+	 */
+	for (i = PDT_START_SCAN_LOCATION; i > PDT_END_SCAN_LOCATION;
+						i -= PDT_ENTRY_SIZE) {
+		retval = synaptics_rmi4_i2c_block_read(pdata, i,
+						(unsigned char *)&rmi_fd,
+						sizeof(rmi_fd));
+		if (retval != sizeof(rmi_fd)) {
+			/* failed to read next PDT entry */
+			dev_err(&client->dev, "%s: read error\n", __func__);
+			return -EIO;
+		}
+		rfi = NULL;
+		if (rmi_fd.fn_number) {
+			switch (rmi_fd.fn_number & MASK_8BIT) {
+			case SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM:
+				pdata->fn01_query_base_addr =
+						rmi_fd.query_base_addr;
+				pdata->fn01_ctrl_base_addr =
+						rmi_fd.ctrl_base_addr;
+				pdata->fn01_data_base_addr =
+						rmi_fd.data_base_addr;
+				break;
+			case SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM:
+				if (rmi_fd.intr_src_count) {
+					rfi = kmalloc(sizeof(*rfi),
+								GFP_KERNEL);
+					if (!rfi) {
+						dev_err(&client->dev,
+							"%s:kmalloc failed\n",
+								__func__);
+							return -ENOMEM;
+					}
+					retval = synpatics_rmi4_touchpad_detect
+								(pdata,	rfi,
+								&rmi_fd,
+								intr_count);
+					if (retval < 0)
+						return retval;
+				}
+				break;
+			}
+			/* interrupt count for next iteration */
+			intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
+			/*
+			 * We only want to add functions to the list
+			 * that have data associated with them.
+			 */
+			if (rfi && rmi_fd.intr_src_count) {
+				/* link this function info to the RMI module */
+				mutex_lock(&(pdata->fn_list_mutex));
+				list_add_tail(&rfi->link,
+					&pdata->rmi4_mod_info.support_fn_list);
+				mutex_unlock(&(pdata->fn_list_mutex));
+			}
+		} else {
+			/*
+			 * A zero in the function number
+			 * signals the end of the PDT
+			 */
+			dev_dbg(&client->dev,
+				"%s:end of PDT\n", __func__);
+			break;
+		}
+	}
+	/*
+	 * calculate the interrupt register count - used in the
+	 * ISR to read the correct number of interrupt registers
+	 */
+	pdata->number_of_interrupt_register = (intr_count + 7) / 8;
+	/*
+	 * Function $01 will be used to query the product properties,
+	 * and product ID  so we had to read the PDT above first to get
+	 * the Fn $01 query address and prior to filling in the product
+	 * info. NOTE: Even an unflashed device will still have FN $01.
+	 */
+
+	/* Load up the standard queries and get the RMI4 module info */
+	retval = synaptics_rmi4_i2c_block_read(pdata,
+					pdata->fn01_query_base_addr,
+					std_queries,
+					sizeof(std_queries));
+	if (retval != sizeof(std_queries)) {
+		dev_err(&client->dev, "%s:Failed reading queries\n",
+							__func__);
+		 return -EIO;
+	}
+
+	/* Currently supported RMI version is 4.0 */
+	pdata->rmi4_mod_info.version_major	= 4;
+	pdata->rmi4_mod_info.version_minor	= 0;
+	/*
+	 * get manufacturer id, product_props, product info,
+	 * date code, tester id, serial num and product id (name)
+	 */
+	pdata->rmi4_mod_info.manufacturer_id	= std_queries[0];
+	pdata->rmi4_mod_info.product_props	= std_queries[1];
+	pdata->rmi4_mod_info.product_info[0]	= std_queries[2];
+	pdata->rmi4_mod_info.product_info[1]	= std_queries[3];
+	/* year - 2001-2032 */
+	pdata->rmi4_mod_info.date_code[0]	= std_queries[4] & MASK_5BIT;
+	/* month - 1-12 */
+	pdata->rmi4_mod_info.date_code[1]	= std_queries[5] & MASK_4BIT;
+	/* day - 1-31 */
+	pdata->rmi4_mod_info.date_code[2]	= std_queries[6] & MASK_5BIT;
+	pdata->rmi4_mod_info.tester_id = ((std_queries[7] & MASK_7BIT) << 8) |
+						(std_queries[8] & MASK_7BIT);
+	pdata->rmi4_mod_info.serial_number =
+		((std_queries[9] & MASK_7BIT) << 8) |
+				(std_queries[10] & MASK_7BIT);
+	memcpy(pdata->rmi4_mod_info.product_id_string, &std_queries[11], 10);
+
+	/* Check if this is a Synaptics device - report if not. */
+	if (pdata->rmi4_mod_info.manufacturer_id != 1)
+		dev_err(&client->dev, "%s: non-Synaptics mfg id:%d\n",
+			__func__, pdata->rmi4_mod_info.manufacturer_id);
+
+	list_for_each_entry(rfi, &pdata->rmi4_mod_info.support_fn_list, link)
+		data_sources += rfi->num_of_data_sources;
+	if (data_sources) {
+		rmi = &(pdata->rmi4_mod_info);
+		list_for_each_entry(rfi, &rmi->support_fn_list, link) {
+			if (rfi->num_of_data_sources) {
+				if (rfi->fn_number ==
+					SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
+					retval = synpatics_rmi4_touchpad_config
+								(pdata, rfi);
+					if (retval < 0)
+						return retval;
+				} else
+					dev_err(&client->dev,
+						"%s:fn_number not supported\n",
+								__func__);
+				/*
+				 * Turn on interrupts for this
+				 * function's data sources.
+				 */
+				ctrl_offset = pdata->fn01_ctrl_base_addr + 1 +
+							rfi->index_to_intr_reg;
+				retval = synaptics_rmi4_i2c_byte_write(pdata,
+							ctrl_offset,
+							rfi->intr_mask);
+				if (retval < 0)
+					return retval;
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
+ * @i2c: i2c client structure pointer
+ * @id:i2c device id pointer
+ *
+ * This function will allocate and initialize the instance
+ * data and request the irq and set the instance data as the clients
+ * platform data then register the physical driver which will do a scan of
+ * the rmi4 Physical Device Table and enumerate any rmi4 functions that
+ * have data sources associated with them.
+ */
+static int __devinit synaptics_rmi4_probe
+	(struct i2c_client *client, const struct i2c_device_id *dev_id)
+{
+	int retval;
+	unsigned char intr_status[4];
+	struct synaptics_rmi4_data *rmi4_data;
+	const struct synaptics_rmi4_platform_data *platformdata =
+						client->dev.platform_data;
+
+	if (!i2c_check_functionality(client->adapter,
+					I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev, "i2c smbus byte data not supported\n");
+		return -EIO;
+	}
+
+	if (!platformdata) {
+		dev_err(&client->dev, "%s: no platform data\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Allocate and initialize the instance data for this client */
+	rmi4_data = kzalloc(sizeof(struct synaptics_rmi4_data) * 2,
+							GFP_KERNEL);
+	if (!rmi4_data) {
+		dev_err(&client->dev, "%s: no memory allocated\n", __func__);
+		return -ENOMEM;
+	}
+
+	rmi4_data->input_dev = input_allocate_device();
+	if (rmi4_data->input_dev == NULL) {
+		dev_err(&client->dev, "%s:input device alloc failed\n",
+						__func__);
+		retval = -ENOMEM;
+		goto err_input;
+	}
+
+	dev_set_name(&client->dev, platformdata->name);
+
+	if (platformdata->regulator_en) {
+		rmi4_data->regulator = regulator_get(&client->dev, "v-touch");
+		if (IS_ERR(rmi4_data->regulator)) {
+			dev_err(&client->dev, "%s:get regulator failed\n",
+								__func__);
+			retval = PTR_ERR(rmi4_data->regulator);
+			goto err_regulator;
+		}
+		regulator_enable(rmi4_data->regulator);
+	}
+
+	init_waitqueue_head(&rmi4_data->wait);
+	/*
+	 * Copy i2c_client pointer into RTID's i2c_client pointer for
+	 * later use in rmi4_read, rmi4_write, etc.
+	 */
+	rmi4_data->i2c_client		= client;
+	/* So we set the page correctly the first time */
+	rmi4_data->current_page		= MASK_16BIT;
+	rmi4_data->board		= platformdata;
+	rmi4_data->touch_stopped	= false;
+
+	/* init the mutexes for maintain the lists */
+	mutex_init(&(rmi4_data->fn_list_mutex));
+	mutex_init(&(rmi4_data->rmi4_page_mutex));
+
+	/*
+	 * Register physical driver - this will call the detect function that
+	 * will then scan the device and determine the supported
+	 * rmi4 functions.
+	 */
+	retval = synaptics_rmi4_i2c_query_device(rmi4_data);
+	if (retval) {
+		dev_err(&client->dev, "%s: rmi4 query device failed\n",
+							__func__);
+		goto err_query_dev;
+	}
+
+	/* Store the instance data in the i2c_client */
+	i2c_set_clientdata(client, rmi4_data);
+
+	/*initialize the input device parameters */
+	rmi4_data->input_dev->name	= DRIVER_NAME;
+	rmi4_data->input_dev->phys	= "Synaptics_Clearpad";
+	rmi4_data->input_dev->id.bustype = BUS_I2C;
+	rmi4_data->input_dev->dev.parent = &client->dev;
+	input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+	/* Initialize the function handlers for rmi4 */
+	set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+	set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+	set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+
+	input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, 0,
+					rmi4_data->sensor_max_x, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
+					rmi4_data->sensor_max_y, 0, 0);
+	input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
+						MAX_TOUCH_MAJOR, 0, 0);
+
+	retval = input_register_device(rmi4_data->input_dev);
+	if (retval) {
+		dev_err(&client->dev, "%s:input register failed\n", __func__);
+		goto err_input_register;
+	}
+
+	/* Clear interrupts */
+	synaptics_rmi4_i2c_block_read(rmi4_data,
+			rmi4_data->fn01_data_base_addr + 1, intr_status,
+				rmi4_data->number_of_interrupt_register);
+	retval = request_threaded_irq(platformdata->irq_number, NULL,
+					synaptics_rmi4_irq,
+					platformdata->irq_type,
+					platformdata->name, rmi4_data);
+	if (retval) {
+		dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
+				__func__, platformdata->irq_number);
+		goto err_request_irq;
+	}
+
+	return retval;
+
+err_request_irq:
+	free_irq(platformdata->irq_number, rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+err_input_register:
+	i2c_set_clientdata(client, NULL);
+err_query_dev:
+	if (platformdata->regulator_en) {
+		regulator_disable(rmi4_data->regulator);
+		regulator_put(rmi4_data->regulator);
+	}
+err_regulator:
+	input_free_device(rmi4_data->input_dev);
+	rmi4_data->input_dev = NULL;
+err_input:
+	kfree(rmi4_data);
+
+	return retval;
+}
+/**
+ * synaptics_rmi4_remove() - Removes the i2c-client touchscreen driver
+ * @client: i2c client structure pointer
+ *
+ * This funtion uses to remove the i2c-client
+ * touchscreen driver and returns integer.
+ */
+static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
+{
+	struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
+	const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+
+	rmi4_data->touch_stopped = true;
+	wake_up(&rmi4_data->wait);
+	free_irq(pdata->irq_number, rmi4_data);
+	input_unregister_device(rmi4_data->input_dev);
+	if (pdata->regulator_en) {
+		regulator_disable(rmi4_data->regulator);
+		regulator_put(rmi4_data->regulator);
+	}
+	kfree(rmi4_data);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+/**
+ * synaptics_rmi4_suspend() - suspend the touch screen controller
+ * @dev: pointer to device structure
+ *
+ * This funtion is used to suspend the
+ * touch panel controller and returns integer
+ */
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+	/* Touch sleep mode */
+	int retval;
+	unsigned char intr_status;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+
+	rmi4_data->touch_stopped = true;
+	disable_irq(pdata->irq_number);
+
+	retval = synaptics_rmi4_i2c_block_read(rmi4_data,
+				rmi4_data->fn01_data_base_addr + 1,
+				&intr_status,
+				rmi4_data->number_of_interrupt_register);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
+					rmi4_data->fn01_ctrl_base_addr + 1,
+					(intr_status & ~TOUCHPAD_CTRL_INTR));
+	if (retval < 0)
+		return retval;
+
+	if (pdata->regulator_en)
+		regulator_disable(rmi4_data->regulator);
+
+	return 0;
+}
+/**
+ * synaptics_rmi4_resume() - resume the touch screen controller
+ * @dev: pointer to device structure
+ *
+ * This funtion is used to resume the touch panel
+ * controller and returns integer.
+ */
+static int synaptics_rmi4_resume(struct device *dev)
+{
+	int retval;
+	unsigned char intr_status;
+	struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+	const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+
+	if (pdata->regulator_en)
+		regulator_enable(rmi4_data->regulator);
+
+	enable_irq(pdata->irq_number);
+	rmi4_data->touch_stopped = false;
+
+	retval = synaptics_rmi4_i2c_block_read(rmi4_data,
+				rmi4_data->fn01_data_base_addr + 1,
+				&intr_status,
+				rmi4_data->number_of_interrupt_register);
+	if (retval < 0)
+		return retval;
+
+	retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
+					rmi4_data->fn01_ctrl_base_addr + 1,
+					(intr_status | TOUCHPAD_CTRL_INTR));
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+	.suspend = synaptics_rmi4_suspend,
+	.resume  = synaptics_rmi4_resume,
+};
+#endif
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+	{ DRIVER_NAME, 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+static struct i2c_driver synaptics_rmi4_driver = {
+	.driver = {
+		.name	=	DRIVER_NAME,
+		.owner	=	THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	=	&synaptics_rmi4_dev_pm_ops,
+#endif
+	},
+	.probe		=	synaptics_rmi4_probe,
+	.remove		=	__devexit_p(synaptics_rmi4_remove),
+	.id_table	=	synaptics_rmi4_id_table,
+};
+/**
+ * synaptics_rmi4_init() - Initialize the touchscreen driver
+ *
+ * This funtion uses to initializes the synaptics
+ * touchscreen driver and returns integer.
+ */
+static int __init synaptics_rmi4_init(void)
+{
+	return i2c_add_driver(&synaptics_rmi4_driver);
+}
+/**
+ * synaptics_rmi4_exit() - De-initialize the touchscreen driver
+ *
+ * This funtion uses to de-initialize the synaptics
+ * touchscreen driver and returns none.
+ */
+static void __exit synaptics_rmi4_exit(void)
+{
+	i2c_del_driver(&synaptics_rmi4_driver);
+}
+
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("naveen.gaddipati@stericsson.com, js.ha@stericsson.com");
+MODULE_DESCRIPTION("synaptics rmi4 i2c touch Driver");
+MODULE_ALIAS("i2c:synaptics_rmi4_ts");
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
new file mode 100644
index 0000000..820ae27
--- /dev/null
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
@@ -0,0 +1,50 @@
+/**
+ *
+ * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
+ * Copyright (c) 2007-2010, Synaptics Incorporated
+ *
+ * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * Copyright 2010 (c) ST-Ericsson AB
+ */
+/*
+ * This file is licensed under the GPL2 license.
+ *
+ *#############################################################################
+ * GPL
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ *#############################################################################
+ */
+
+#ifndef _SYNAPTICS_RMI4_H_INCLUDED_
+#define _SYNAPTICS_RMI4_H_INCLUDED_
+
+/**
+ * struct synaptics_rmi4_platform_data - contains the rmi4 platform data
+ * @irq_number: irq number
+ * @irq_type: irq type
+ * @x flip: x flip flag
+ * @y flip: y flip flag
+ * @regulator_en: regulator enable flag
+ *
+ * This structure gives platform data for rmi4.
+ */
+struct synaptics_rmi4_platform_data {
+	const char *name;
+	int irq_number;
+	int irq_type;
+	bool x_flip;
+	bool y_flip;
+	bool regulator_en;
+};
+
+#endif
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index 1c1f157..1159a500 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -19,8 +19,19 @@
 #ifndef _TIOMAP_
 #define _TIOMAP_
 
-#include <plat/powerdomain.h>
-#include <plat/clockdomain.h>
+/*
+ * XXX These powerdomain.h/clockdomain.h includes are wrong and should
+ * be removed.  No driver should call pwrdm_* or clkdm_* functions
+ * directly; they should rely on OMAP core code to do this.
+ */
+#include <mach-omap2/powerdomain.h>
+#include <mach-omap2/clockdomain.h>
+/*
+ * XXX These mach-omap2/ includes are wrong and should be removed.  No
+ * driver should read or write to PRM/CM registers directly; they
+ * should rely on OMAP core code to do this.
+ */
+#include <mach-omap2/cm2xxx_3xxx.h>
 #include <mach-omap2/prm-regbits-34xx.h>
 #include <mach-omap2/cm-regbits-34xx.h>
 #include <dspbridge/devdefs.h>
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 1be081f..a3b0a18 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -596,7 +596,7 @@
 		dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
 		if (dsp_debug)
 			while (__raw_readw(dw_sync_addr))
-				;;
+				;
 
 		/* Wait for DSP to clear word in shared memory */
 		/* Read the Location */
@@ -1671,7 +1671,7 @@
 			/* Find a free L2 PT. */
 			for (i = 0; (i < pt->l2_num_pages) &&
 			     (pt->pg_info[i].num_entries != 0); i++)
-				;;
+				;
 			if (i < pt->l2_num_pages) {
 				l2_page_num = i;
 				l2_base_pa = pt->l2_base_pa + (l2_page_num *
diff --git a/drivers/staging/tidspbridge/rmgr/nldr.c b/drivers/staging/tidspbridge/rmgr/nldr.c
index a6ae007..28354bb 100644
--- a/drivers/staging/tidspbridge/rmgr/nldr.c
+++ b/drivers/staging/tidspbridge/rmgr/nldr.c
@@ -943,7 +943,7 @@
 
 	/* Determine which phase this section belongs to */
 	for (pch = sect_name + 1; *pch && *pch != seps; pch++)
-		;;
+		;
 
 	if (*pch) {
 		pch++;		/* Skip over the ':' */
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 832608d..08bd26a 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -799,20 +799,6 @@
 		spin_unlock_irqrestore(&vdev->priv_lock, flags2);
 	}
 
-
-	if (!vdev->ud.tcp_socket) {
-		/* tcp connection is closed */
-		usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n",
-									urb);
-
-		usb_hcd_unlink_urb_from_ep(hcd, urb);
-
-		spin_unlock_irqrestore(&the_controller->lock, flags);
-		usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
-								urb->status);
-		spin_lock_irqsave(&the_controller->lock, flags);
-	}
-
 	spin_unlock_irqrestore(&the_controller->lock, flags);
 
 	usbip_dbg_vhci_hc("leave\n");
diff --git a/drivers/staging/vme/bridges/Module.symvers b/drivers/staging/vme/bridges/Module.symvers
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/drivers/staging/vme/bridges/Module.symvers
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 4d74562..42de83e 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -44,7 +44,7 @@
 
 static char driver_name[] = "vme_ca91cx42";
 
-static const struct pci_device_id ca91cx42_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
 	{ },
 };
@@ -58,7 +58,7 @@
 
 static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
 {
-	wake_up(&(bridge->dma_queue));
+	wake_up(&bridge->dma_queue);
 
 	return CA91CX42_LINT_DMA;
 }
@@ -82,14 +82,14 @@
 /* XXX This needs to be split into 4 queues */
 static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
 {
-	wake_up(&(bridge->mbox_queue));
+	wake_up(&bridge->mbox_queue);
 
 	return CA91CX42_LINT_MBOX;
 }
 
 static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
 {
-	wake_up(&(bridge->iack_queue));
+	wake_up(&bridge->iack_queue);
 
 	return CA91CX42_LINT_SW_IACK;
 }
@@ -207,9 +207,9 @@
 	pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
 
 	/* Initialise list for VME bus errors */
-	INIT_LIST_HEAD(&(ca91cx42_bridge->vme_errors));
+	INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
 
-	mutex_init(&(ca91cx42_bridge->irq_mtx));
+	mutex_init(&ca91cx42_bridge->irq_mtx);
 
 	/* Disable interrupts from PCI to VME */
 	iowrite32(0, bridge->base + VINT_EN);
@@ -259,8 +259,8 @@
 /*
  * Set up an VME interrupt
  */
-void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
-	int sync)
+static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
+	int state, int sync)
 
 {
 	struct pci_dev *pdev;
@@ -287,7 +287,7 @@
 	}
 }
 
-int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
+static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
 	int statid)
 {
 	u32 tmp;
@@ -299,7 +299,7 @@
 	if (statid & 1)
 		return -EINVAL;
 
-	mutex_lock(&(bridge->vme_int));
+	mutex_lock(&bridge->vme_int);
 
 	tmp = ioread32(bridge->base + VINT_EN);
 
@@ -318,12 +318,12 @@
 	tmp = tmp & ~(1 << (level + 24));
 	iowrite32(tmp, bridge->base + VINT_EN);
 
-	mutex_unlock(&(bridge->vme_int));
+	mutex_unlock(&bridge->vme_int);
 
 	return 0;
 }
 
-int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
+static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
 	unsigned long long vme_base, unsigned long long size,
 	dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
 {
@@ -429,7 +429,7 @@
 	return 0;
 }
 
-int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
+static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
 	unsigned long long *vme_base, unsigned long long *size,
 	dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
 {
@@ -518,8 +518,8 @@
 		image->kern_base = NULL;
 		if (image->bus_resource.name != NULL)
 			kfree(image->bus_resource.name);
-		release_resource(&(image->bus_resource));
-		memset(&(image->bus_resource), 0, sizeof(struct resource));
+		release_resource(&image->bus_resource);
+		memset(&image->bus_resource, 0, sizeof(struct resource));
 	}
 
 	if (image->bus_resource.name == NULL) {
@@ -540,7 +540,7 @@
 	image->bus_resource.flags = IORESOURCE_MEM;
 
 	retval = pci_bus_alloc_resource(pdev->bus,
-		&(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
+		&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
 		0, NULL, NULL);
 	if (retval) {
 		dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
@@ -563,10 +563,10 @@
 	iounmap(image->kern_base);
 	image->kern_base = NULL;
 err_remap:
-	release_resource(&(image->bus_resource));
+	release_resource(&image->bus_resource);
 err_resource:
 	kfree(image->bus_resource.name);
-	memset(&(image->bus_resource), 0, sizeof(struct resource));
+	memset(&image->bus_resource, 0, sizeof(struct resource));
 err_name:
 	return retval;
 }
@@ -578,13 +578,13 @@
 {
 	iounmap(image->kern_base);
 	image->kern_base = NULL;
-	release_resource(&(image->bus_resource));
+	release_resource(&image->bus_resource);
 	kfree(image->bus_resource.name);
-	memset(&(image->bus_resource), 0, sizeof(struct resource));
+	memset(&image->bus_resource, 0, sizeof(struct resource));
 }
 
 
-int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
+static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
 	unsigned long long vme_base, unsigned long long size,
 	vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
 {
@@ -620,7 +620,7 @@
 		goto err_window;
 	}
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	/*
 	 * Let's allocate the resource here rather than further up the stack as
@@ -628,7 +628,7 @@
 	 */
 	retval = ca91cx42_alloc_resource(image, size);
 	if (retval) {
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
 			"for resource name\n");
 		retval = -ENOMEM;
@@ -672,7 +672,7 @@
 		temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
 		break;
 	default:
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
 		retval = -EINVAL;
 		goto err_dwidth;
@@ -704,7 +704,7 @@
 	case VME_USER3:
 	case VME_USER4:
 	default:
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
 		retval = -EINVAL;
 		goto err_aspace;
@@ -730,7 +730,7 @@
 
 	iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
 
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 	return 0;
 
 err_aspace:
@@ -741,8 +741,8 @@
 	return retval;
 }
 
-int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
-	unsigned long long *vme_base, unsigned long long *size,
+static int __ca91cx42_master_get(struct vme_master_resource *image,
+	int *enabled, unsigned long long *vme_base, unsigned long long *size,
 	vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 {
 	unsigned int i, ctl;
@@ -828,24 +828,24 @@
 	return 0;
 }
 
-int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
+static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
 	unsigned long long *vme_base, unsigned long long *size,
 	vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 {
 	int retval;
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
 		cycle, dwidth);
 
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 
 	return retval;
 }
 
-ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
-	size_t count, loff_t offset)
+static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
+	void *buf, size_t count, loff_t offset)
 {
 	ssize_t retval;
 	void *addr = image->kern_base + offset;
@@ -855,7 +855,7 @@
 	if (count == 0)
 		return 0;
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	/* The following code handles VME address alignment problem
 	 * in order to assure the maximal data width cycle.
@@ -899,13 +899,13 @@
 	}
 out:
 	retval = count;
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 
 	return retval;
 }
 
-ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
-	size_t count, loff_t offset)
+static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
+	void *buf, size_t count, loff_t offset)
 {
 	ssize_t retval;
 	void *addr = image->kern_base + offset;
@@ -915,7 +915,7 @@
 	if (count == 0)
 		return 0;
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	/* Here we apply for the same strategy we do in master_read
 	 * function in order to assure D16 cycle when required.
@@ -954,11 +954,12 @@
 out:
 	retval = count;
 
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
+
 	return retval;
 }
 
-unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
+static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
 	unsigned int mask, unsigned int compare, unsigned int swap,
 	loff_t offset)
 {
@@ -974,10 +975,10 @@
 	i = image->number;
 
 	/* Locking as we can only do one of these at a time */
-	mutex_lock(&(bridge->vme_rmw));
+	mutex_lock(&bridge->vme_rmw);
 
 	/* Lock image */
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	pci_addr = (u32)image->kern_base + offset;
 
@@ -1007,15 +1008,15 @@
 	iowrite32(0, bridge->base + SCYC_CTL);
 
 out:
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 
-	mutex_unlock(&(bridge->vme_rmw));
+	mutex_unlock(&bridge->vme_rmw);
 
 	return result;
 }
 
-int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
-	struct vme_dma_attr *dest, size_t count)
+static int ca91cx42_dma_list_add(struct vme_dma_list *list,
+	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
 {
 	struct ca91cx42_dma_entry *entry, *prev;
 	struct vme_dma_pci *pci_attr;
@@ -1036,14 +1037,14 @@
 	}
 
 	/* Test descriptor alignment */
-	if ((unsigned long)&(entry->descriptor) & CA91CX42_DCPP_M) {
+	if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
 		dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
-			"required: %p\n", &(entry->descriptor));
+			"required: %p\n", &entry->descriptor);
 		retval = -EINVAL;
 		goto err_align;
 	}
 
-	memset(&(entry->descriptor), 0, sizeof(struct ca91cx42_dma_descriptor));
+	memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
 
 	if (dest->type == VME_DMA_VME) {
 		entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
@@ -1138,14 +1139,14 @@
 	entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
 
 	/* Add to list */
-	list_add_tail(&(entry->list), &(list->entries));
+	list_add_tail(&entry->list, &list->entries);
 
 	/* Fill out previous descriptors "Next Address" */
-	if (entry->list.prev != &(list->entries)) {
+	if (entry->list.prev != &list->entries) {
 		prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
 			list);
 		/* We need the bus address for the pointer */
-		desc_ptr = virt_to_bus(&(entry->descriptor));
+		desc_ptr = virt_to_bus(&entry->descriptor);
 		prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
 	}
 
@@ -1175,7 +1176,7 @@
 		return 1;
 }
 
-int ca91cx42_dma_list_exec(struct vme_dma_list *list)
+static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
 {
 	struct vme_dma_resource *ctrlr;
 	struct ca91cx42_dma_entry *entry;
@@ -1190,28 +1191,28 @@
 	bridge = ctrlr->parent->driver_priv;
 	dev = ctrlr->parent->parent;
 
-	mutex_lock(&(ctrlr->mtx));
+	mutex_lock(&ctrlr->mtx);
 
-	if (!(list_empty(&(ctrlr->running)))) {
+	if (!(list_empty(&ctrlr->running))) {
 		/*
 		 * XXX We have an active DMA transfer and currently haven't
 		 *     sorted out the mechanism for "pending" DMA transfers.
 		 *     Return busy.
 		 */
 		/* Need to add to pending here */
-		mutex_unlock(&(ctrlr->mtx));
+		mutex_unlock(&ctrlr->mtx);
 		return -EBUSY;
 	} else {
-		list_add(&(list->list), &(ctrlr->running));
+		list_add(&list->list, &ctrlr->running);
 	}
 
 	/* Get first bus address and write into registers */
-	entry = list_first_entry(&(list->entries), struct ca91cx42_dma_entry,
+	entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
 		list);
 
-	bus_addr = virt_to_bus(&(entry->descriptor));
+	bus_addr = virt_to_bus(&entry->descriptor);
 
-	mutex_unlock(&(ctrlr->mtx));
+	mutex_unlock(&ctrlr->mtx);
 
 	iowrite32(0, bridge->base + DTBC);
 	iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
@@ -1249,21 +1250,21 @@
 	}
 
 	/* Remove list from running list */
-	mutex_lock(&(ctrlr->mtx));
-	list_del(&(list->list));
-	mutex_unlock(&(ctrlr->mtx));
+	mutex_lock(&ctrlr->mtx);
+	list_del(&list->list);
+	mutex_unlock(&ctrlr->mtx);
 
 	return retval;
 
 }
 
-int ca91cx42_dma_list_empty(struct vme_dma_list *list)
+static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
 {
 	struct list_head *pos, *temp;
 	struct ca91cx42_dma_entry *entry;
 
 	/* detach and free each entry */
-	list_for_each_safe(pos, temp, &(list->entries)) {
+	list_for_each_safe(pos, temp, &list->entries) {
 		list_del(pos);
 		entry = list_entry(pos, struct ca91cx42_dma_entry, list);
 		kfree(entry);
@@ -1279,8 +1280,8 @@
  * This does not enable the LM monitor - that should be done when the first
  * callback is attached and disabled when the last callback is removed.
  */
-int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
-	vme_address_t aspace, vme_cycle_t cycle)
+static int ca91cx42_lm_set(struct vme_lm_resource *lm,
+	unsigned long long lm_base, vme_address_t aspace, vme_cycle_t cycle)
 {
 	u32 temp_base, lm_ctl = 0;
 	int i;
@@ -1298,12 +1299,12 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	/* If we already have a callback attached, we can't move it! */
 	for (i = 0; i < lm->monitors; i++) {
 		if (bridge->lm_callback[i] != NULL) {
-			mutex_unlock(&(lm->mtx));
+			mutex_unlock(&lm->mtx);
 			dev_err(dev, "Location monitor callback attached, "
 				"can't reset\n");
 			return -EBUSY;
@@ -1321,7 +1322,7 @@
 		lm_ctl |= CA91CX42_LM_CTL_AS_A32;
 		break;
 	default:
-		mutex_unlock(&(lm->mtx));
+		mutex_unlock(&lm->mtx);
 		dev_err(dev, "Invalid address space\n");
 		return -EINVAL;
 		break;
@@ -1339,7 +1340,7 @@
 	iowrite32(lm_base, bridge->base + LM_BS);
 	iowrite32(lm_ctl, bridge->base + LM_CTL);
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return 0;
 }
@@ -1347,15 +1348,15 @@
 /* Get configuration of the callback monitor and return whether it is enabled
  * or disabled.
  */
-int ca91cx42_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
-	vme_address_t *aspace, vme_cycle_t *cycle)
+static int ca91cx42_lm_get(struct vme_lm_resource *lm,
+	unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
 {
 	u32 lm_ctl, enabled = 0;
 	struct ca91cx42_driver *bridge;
 
 	bridge = lm->parent->driver_priv;
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	*lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
 	lm_ctl = ioread32(bridge->base + LM_CTL);
@@ -1380,7 +1381,7 @@
 	if (lm_ctl & CA91CX42_LM_CTL_DATA)
 		*cycle |= VME_DATA;
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return enabled;
 }
@@ -1390,7 +1391,7 @@
  *
  * Callback will be passed the monitor triggered.
  */
-int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
+static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
 	void (*callback)(int))
 {
 	u32 lm_ctl, tmp;
@@ -1400,19 +1401,19 @@
 	bridge = lm->parent->driver_priv;
 	dev = lm->parent->parent;
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	/* Ensure that the location monitor is configured - need PGM or DATA */
 	lm_ctl = ioread32(bridge->base + LM_CTL);
 	if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
-		mutex_unlock(&(lm->mtx));
+		mutex_unlock(&lm->mtx);
 		dev_err(dev, "Location monitor not properly configured\n");
 		return -EINVAL;
 	}
 
 	/* Check that a callback isn't already attached */
 	if (bridge->lm_callback[monitor] != NULL) {
-		mutex_unlock(&(lm->mtx));
+		mutex_unlock(&lm->mtx);
 		dev_err(dev, "Existing callback attached\n");
 		return -EBUSY;
 	}
@@ -1431,7 +1432,7 @@
 		iowrite32(lm_ctl, bridge->base + LM_CTL);
 	}
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return 0;
 }
@@ -1439,14 +1440,14 @@
 /*
  * Detach a callback function forn a specific location monitor.
  */
-int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
+static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
 {
 	u32 tmp;
 	struct ca91cx42_driver *bridge;
 
 	bridge = lm->parent->driver_priv;
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	/* Disable Location Monitor and ensure previous interrupts are clear */
 	tmp = ioread32(bridge->base + LINT_EN);
@@ -1467,12 +1468,12 @@
 		iowrite32(tmp, bridge->base + LM_CTL);
 	}
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return 0;
 }
 
-int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
+static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
 {
 	u32 slot = 0;
 	struct ca91cx42_driver *bridge;
@@ -1526,7 +1527,7 @@
 
 	/* Allocate mem for CR/CSR image */
 	bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
-		&(bridge->crcsr_bus));
+		&bridge->crcsr_bus);
 	if (bridge->crcsr_kernel == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
 			"image\n");
@@ -1632,12 +1633,12 @@
 	}
 
 	/* Initialize wait queues & mutual exclusion flags */
-	init_waitqueue_head(&(ca91cx42_device->dma_queue));
-	init_waitqueue_head(&(ca91cx42_device->iack_queue));
-	mutex_init(&(ca91cx42_device->vme_int));
-	mutex_init(&(ca91cx42_device->vme_rmw));
+	init_waitqueue_head(&ca91cx42_device->dma_queue);
+	init_waitqueue_head(&ca91cx42_device->iack_queue);
+	mutex_init(&ca91cx42_device->vme_int);
+	mutex_init(&ca91cx42_device->vme_rmw);
 
-	ca91cx42_bridge->parent = &(pdev->dev);
+	ca91cx42_bridge->parent = &pdev->dev;
 	strcpy(ca91cx42_bridge->name, driver_name);
 
 	/* Setup IRQ */
@@ -1648,7 +1649,7 @@
 	}
 
 	/* Add master windows to list */
-	INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
+	INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
 	for (i = 0; i < CA91C142_MAX_MASTER; i++) {
 		master_image = kmalloc(sizeof(struct vme_master_resource),
 			GFP_KERNEL);
@@ -1659,7 +1660,7 @@
 			goto err_master;
 		}
 		master_image->parent = ca91cx42_bridge;
-		spin_lock_init(&(master_image->lock));
+		spin_lock_init(&master_image->lock);
 		master_image->locked = 0;
 		master_image->number = i;
 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
@@ -1667,15 +1668,15 @@
 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
 		master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
-		memset(&(master_image->bus_resource), 0,
+		memset(&master_image->bus_resource, 0,
 			sizeof(struct resource));
 		master_image->kern_base  = NULL;
-		list_add_tail(&(master_image->list),
-			&(ca91cx42_bridge->master_resources));
+		list_add_tail(&master_image->list,
+			&ca91cx42_bridge->master_resources);
 	}
 
 	/* Add slave windows to list */
-	INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
+	INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
 	for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
 		slave_image = kmalloc(sizeof(struct vme_slave_resource),
 			GFP_KERNEL);
@@ -1686,7 +1687,7 @@
 			goto err_slave;
 		}
 		slave_image->parent = ca91cx42_bridge;
-		mutex_init(&(slave_image->mtx));
+		mutex_init(&slave_image->mtx);
 		slave_image->locked = 0;
 		slave_image->number = i;
 		slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
@@ -1698,12 +1699,12 @@
 
 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
 			VME_SUPER | VME_USER | VME_PROG | VME_DATA;
-		list_add_tail(&(slave_image->list),
-			&(ca91cx42_bridge->slave_resources));
+		list_add_tail(&slave_image->list,
+			&ca91cx42_bridge->slave_resources);
 	}
 
 	/* Add dma engines to list */
-	INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
+	INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
 	for (i = 0; i < CA91C142_MAX_DMA; i++) {
 		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
 			GFP_KERNEL);
@@ -1714,19 +1715,19 @@
 			goto err_dma;
 		}
 		dma_ctrlr->parent = ca91cx42_bridge;
-		mutex_init(&(dma_ctrlr->mtx));
+		mutex_init(&dma_ctrlr->mtx);
 		dma_ctrlr->locked = 0;
 		dma_ctrlr->number = i;
 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
 			VME_DMA_MEM_TO_VME;
-		INIT_LIST_HEAD(&(dma_ctrlr->pending));
-		INIT_LIST_HEAD(&(dma_ctrlr->running));
-		list_add_tail(&(dma_ctrlr->list),
-			&(ca91cx42_bridge->dma_resources));
+		INIT_LIST_HEAD(&dma_ctrlr->pending);
+		INIT_LIST_HEAD(&dma_ctrlr->running);
+		list_add_tail(&dma_ctrlr->list,
+			&ca91cx42_bridge->dma_resources);
 	}
 
 	/* Add location monitor to list */
-	INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
+	INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
 	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
 	if (lm == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate memory for "
@@ -1735,11 +1736,11 @@
 		goto err_lm;
 	}
 	lm->parent = ca91cx42_bridge;
-	mutex_init(&(lm->mtx));
+	mutex_init(&lm->mtx);
 	lm->locked = 0;
 	lm->number = 1;
 	lm->monitors = 4;
-	list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
+	list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
 
 	ca91cx42_bridge->slave_get = ca91cx42_slave_get;
 	ca91cx42_bridge->slave_set = ca91cx42_slave_set;
@@ -1786,28 +1787,28 @@
 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
 err_lm:
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->lm_resources) {
 		lm = list_entry(pos, struct vme_lm_resource, list);
 		list_del(pos);
 		kfree(lm);
 	}
 err_dma:
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->dma_resources) {
 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
 		list_del(pos);
 		kfree(dma_ctrlr);
 	}
 err_slave:
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->slave_resources) {
 		slave_image = list_entry(pos, struct vme_slave_resource, list);
 		list_del(pos);
 		kfree(slave_image);
 	}
 err_master:
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->master_resources) {
 		master_image = list_entry(pos, struct vme_master_resource,
 			list);
 		list_del(pos);
@@ -1831,7 +1832,7 @@
 
 }
 
-void ca91cx42_remove(struct pci_dev *pdev)
+static void ca91cx42_remove(struct pci_dev *pdev)
 {
 	struct list_head *pos = NULL;
 	struct vme_master_resource *master_image;
@@ -1870,28 +1871,28 @@
 	ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
 
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->lm_resources) {
 		lm = list_entry(pos, struct vme_lm_resource, list);
 		list_del(pos);
 		kfree(lm);
 	}
 
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->dma_resources) {
 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
 		list_del(pos);
 		kfree(dma_ctrlr);
 	}
 
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->slave_resources) {
 		slave_image = list_entry(pos, struct vme_slave_resource, list);
 		list_del(pos);
 		kfree(slave_image);
 	}
 
 	/* resources are stored in link list */
-	list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
+	list_for_each(pos, &ca91cx42_bridge->master_resources) {
 		master_image = list_entry(pos, struct vme_master_resource,
 			list);
 		list_del(pos);
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.h b/drivers/staging/vme/bridges/vme_ca91cx42.h
index e72c65b..02a7c79 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.h
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.h
@@ -39,7 +39,7 @@
 
 /* Structure used to hold driver specific information */
 struct ca91cx42_driver {
-	void *base;	/* Base Address of device registers */
+	void __iomem *base;	/* Base Address of device registers */
 	wait_queue_head_t dma_queue;
 	wait_queue_head_t iack_queue;
 	wait_queue_head_t mbox_queue;
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 492ddb2..26ea42f 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -46,7 +46,7 @@
 
 static char driver_name[] = "vme_tsi148";
 
-static const struct pci_device_id tsi148_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
 	{ },
 };
@@ -81,11 +81,11 @@
 	u32 serviced = 0;
 
 	if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
-		wake_up(&(bridge->dma_queue[0]));
+		wake_up(&bridge->dma_queue[0]);
 		serviced |= TSI148_LCSR_INTC_DMA0C;
 	}
 	if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
-		wake_up(&(bridge->dma_queue[1]));
+		wake_up(&bridge->dma_queue[1]);
 		serviced |= TSI148_LCSR_INTC_DMA1C;
 	}
 
@@ -191,7 +191,7 @@
 	if (error) {
 		error->address = error_addr;
 		error->attributes = error_attrib;
-		list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
+		list_add_tail(&error->list, &tsi148_bridge->vme_errors);
 	} else {
 		dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
 			"VMEbus Error reporting\n");
@@ -210,7 +210,7 @@
  */
 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
 {
-	wake_up(&(bridge->iack_queue));
+	wake_up(&bridge->iack_queue);
 
 	return TSI148_LCSR_INTC_IACKC;
 }
@@ -320,9 +320,9 @@
 	bridge = tsi148_bridge->driver_priv;
 
 	/* Initialise list for VME bus errors */
-	INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
+	INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
 
-	mutex_init(&(tsi148_bridge->irq_mtx));
+	mutex_init(&tsi148_bridge->irq_mtx);
 
 	result = request_irq(pdev->irq,
 			     tsi148_irqhandler,
@@ -374,8 +374,11 @@
 	return 0;
 }
 
-static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
+static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
+	struct pci_dev *pdev)
 {
+	struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
+
 	/* Turn off interrupts */
 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
@@ -384,13 +387,13 @@
 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
 
 	/* Detach interrupt handler */
-	free_irq(pdev->irq, pdev);
+	free_irq(pdev->irq, tsi148_bridge);
 }
 
 /*
  * Check to see if an IACk has been received, return true (1) or false (0).
  */
-int tsi148_iack_received(struct tsi148_driver *bridge)
+static int tsi148_iack_received(struct tsi148_driver *bridge)
 {
 	u32 tmp;
 
@@ -405,7 +408,7 @@
 /*
  * Configure VME interrupt
  */
-void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
+static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
 	int state, int sync)
 {
 	struct pci_dev *pdev;
@@ -445,14 +448,15 @@
  * Generate a VME bus interrupt at the requested level & vector. Wait for
  * interrupt to be acked.
  */
-int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
+static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
+	int statid)
 {
 	u32 tmp;
 	struct tsi148_driver *bridge;
 
 	bridge = tsi148_bridge->driver_priv;
 
-	mutex_lock(&(bridge->vme_int));
+	mutex_lock(&bridge->vme_int);
 
 	/* Read VICR register */
 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
@@ -470,7 +474,7 @@
 	wait_event_interruptible(bridge->iack_queue,
 		tsi148_iack_received(bridge));
 
-	mutex_unlock(&(bridge->vme_int));
+	mutex_unlock(&bridge->vme_int);
 
 	return 0;
 }
@@ -496,7 +500,7 @@
 	 */
 	err_pos = NULL;
 	/* Iterate through errors */
-	list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
+	list_for_each(err_pos, &tsi148_bridge->vme_errors) {
 		vme_err = list_entry(err_pos, struct vme_bus_error, list);
 		if ((vme_err->address >= address) &&
 			(vme_err->address < bound)) {
@@ -530,7 +534,7 @@
 	 */
 	err_pos = NULL;
 	/* Iterate through errors */
-	list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
+	list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
 		vme_err = list_entry(err_pos, struct vme_bus_error, list);
 
 		if ((vme_err->address >= address) &&
@@ -545,7 +549,7 @@
 /*
  * Initialize a slave window with the requested attributes.
  */
-int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
+static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
 	unsigned long long vme_base, unsigned long long size,
 	dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
 {
@@ -695,7 +699,7 @@
 /*
  * Get slave window configuration.
  */
-int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
+static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
 	unsigned long long *vme_base, unsigned long long *size,
 	dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
 {
@@ -819,8 +823,8 @@
 		image->kern_base = NULL;
 		if (image->bus_resource.name != NULL)
 			kfree(image->bus_resource.name);
-		release_resource(&(image->bus_resource));
-		memset(&(image->bus_resource), 0, sizeof(struct resource));
+		release_resource(&image->bus_resource);
+		memset(&image->bus_resource, 0, sizeof(struct resource));
 	}
 
 	/* Exit here if size is zero */
@@ -845,7 +849,7 @@
 	image->bus_resource.flags = IORESOURCE_MEM;
 
 	retval = pci_bus_alloc_resource(pdev->bus,
-		&(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
+		&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
 		0, NULL, NULL);
 	if (retval) {
 		dev_err(tsi148_bridge->parent, "Failed to allocate mem "
@@ -868,10 +872,10 @@
 	iounmap(image->kern_base);
 	image->kern_base = NULL;
 err_remap:
-	release_resource(&(image->bus_resource));
+	release_resource(&image->bus_resource);
 err_resource:
 	kfree(image->bus_resource.name);
-	memset(&(image->bus_resource), 0, sizeof(struct resource));
+	memset(&image->bus_resource, 0, sizeof(struct resource));
 err_name:
 	return retval;
 }
@@ -883,15 +887,15 @@
 {
 	iounmap(image->kern_base);
 	image->kern_base = NULL;
-	release_resource(&(image->bus_resource));
+	release_resource(&image->bus_resource);
 	kfree(image->bus_resource.name);
-	memset(&(image->bus_resource), 0, sizeof(struct resource));
+	memset(&image->bus_resource, 0, sizeof(struct resource));
 }
 
 /*
  * Set the attributes of an outbound window.
  */
-int tsi148_master_set(struct vme_master_resource *image, int enabled,
+static int tsi148_master_set(struct vme_master_resource *image, int enabled,
 	unsigned long long vme_base, unsigned long long size,
 	vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
 {
@@ -924,7 +928,7 @@
 		goto err_window;
 	}
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	/* Let's allocate the resource here rather than further up the stack as
 	 * it avoids pushing loads of bus dependant stuff up the stack. If size
@@ -932,7 +936,7 @@
 	 */
 	retval = tsi148_alloc_resource(image, size);
 	if (retval) {
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
 			"resource\n");
 		goto err_res;
@@ -959,19 +963,19 @@
 	reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
 
 	if (pci_base_low & 0xFFFF) {
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
 		retval = -EINVAL;
 		goto err_gran;
 	}
 	if (pci_bound_low & 0xFFFF) {
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
 		retval = -EINVAL;
 		goto err_gran;
 	}
 	if (vme_offset_low & 0xFFFF) {
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(tsi148_bridge->parent, "Invalid VME Offset "
 			"alignment\n");
 		retval = -EINVAL;
@@ -1035,7 +1039,7 @@
 		temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
 		break;
 	default:
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(tsi148_bridge->parent, "Invalid data width\n");
 		retval = -EINVAL;
 		goto err_dwidth;
@@ -1072,7 +1076,7 @@
 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
 		break;
 	default:
-		spin_unlock(&(image->lock));
+		spin_unlock(&image->lock);
 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
 		retval = -EINVAL;
 		goto err_aspace;
@@ -1109,7 +1113,7 @@
 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
 		TSI148_LCSR_OFFSET_OTAT);
 
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 	return 0;
 
 err_aspace:
@@ -1127,7 +1131,7 @@
  *
  * XXX Not parsing prefetch information.
  */
-int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
+static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
 	unsigned long long *vme_base, unsigned long long *size,
 	vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 {
@@ -1237,23 +1241,23 @@
 }
 
 
-int tsi148_master_get(struct vme_master_resource *image, int *enabled,
+static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
 	unsigned long long *vme_base, unsigned long long *size,
 	vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 {
 	int retval;
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
 		cycle, dwidth);
 
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 
 	return retval;
 }
 
-ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
+static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
 	size_t count, loff_t offset)
 {
 	int retval, enabled;
@@ -1266,7 +1270,7 @@
 
 	tsi148_bridge = image->parent;
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
 	retval = count;
@@ -1289,13 +1293,13 @@
 	}
 
 skip_chk:
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 
 	return retval;
 }
 
 
-ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
+static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
 	size_t count, loff_t offset)
 {
 	int retval = 0, enabled;
@@ -1312,7 +1316,7 @@
 
 	bridge = tsi148_bridge->driver_priv;
 
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
 	retval = count;
@@ -1352,7 +1356,7 @@
 	}
 
 skip_chk:
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 
 	return retval;
 }
@@ -1362,7 +1366,7 @@
  *
  * Requires a previously configured master window, returns final value.
  */
-unsigned int tsi148_master_rmw(struct vme_master_resource *image,
+static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
 	unsigned int mask, unsigned int compare, unsigned int swap,
 	loff_t offset)
 {
@@ -1378,10 +1382,10 @@
 	i = image->number;
 
 	/* Locking as we can only do one of these at a time */
-	mutex_lock(&(bridge->vme_rmw));
+	mutex_lock(&bridge->vme_rmw);
 
 	/* Lock image */
-	spin_lock(&(image->lock));
+	spin_lock(&image->lock);
 
 	pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
 		TSI148_LCSR_OFFSET_OTSAU);
@@ -1411,9 +1415,9 @@
 	tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
 
-	spin_unlock(&(image->lock));
+	spin_unlock(&image->lock);
 
-	mutex_unlock(&(bridge->vme_rmw));
+	mutex_unlock(&bridge->vme_rmw);
 
 	return result;
 }
@@ -1609,8 +1613,8 @@
 /*
  * Add a link list descriptor to the list
  */
-int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
-	struct vme_dma_attr *dest, size_t count)
+static int tsi148_dma_list_add(struct vme_dma_list *list,
+	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
 {
 	struct tsi148_dma_entry *entry, *prev;
 	u32 address_high, address_low;
@@ -1633,10 +1637,10 @@
 	}
 
 	/* Test descriptor alignment */
-	if ((unsigned long)&(entry->descriptor) & 0x7) {
+	if ((unsigned long)&entry->descriptor & 0x7) {
 		dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
 			"byte boundary as required: %p\n",
-			&(entry->descriptor));
+			&entry->descriptor);
 		retval = -EINVAL;
 		goto err_align;
 	}
@@ -1644,7 +1648,7 @@
 	/* Given we are going to fill out the structure, we probably don't
 	 * need to zero it, but better safe than sorry for now.
 	 */
-	memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
+	memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
 
 	/* Fill out source part */
 	switch (src->type) {
@@ -1681,7 +1685,7 @@
 		entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
 
 		retval = tsi148_dma_set_vme_src_attributes(
-			tsi148_bridge->parent, &(entry->descriptor.dsat),
+			tsi148_bridge->parent, &entry->descriptor.dsat,
 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
 		if (retval < 0)
 			goto err_source;
@@ -1719,7 +1723,7 @@
 		entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
 
 		retval = tsi148_dma_set_vme_dest_attributes(
-			tsi148_bridge->parent, &(entry->descriptor.ddat),
+			tsi148_bridge->parent, &entry->descriptor.ddat,
 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
 		if (retval < 0)
 			goto err_dest;
@@ -1735,16 +1739,16 @@
 	entry->descriptor.dcnt = (u32)count;
 
 	/* Add to list */
-	list_add_tail(&(entry->list), &(list->entries));
+	list_add_tail(&entry->list, &list->entries);
 
 	/* Fill out previous descriptors "Next Address" */
-	if (entry->list.prev != &(list->entries)) {
+	if (entry->list.prev != &list->entries) {
 		prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
 			list);
 		/* We need the bus address for the pointer */
-		desc_ptr = virt_to_bus(&(entry->descriptor));
-		reg_split(desc_ptr, &(prev->descriptor.dnlau),
-			&(prev->descriptor.dnlal));
+		desc_ptr = virt_to_bus(&entry->descriptor);
+		reg_split(desc_ptr, &prev->descriptor.dnlau,
+			&prev->descriptor.dnlal);
 	}
 
 	return 0;
@@ -1782,7 +1786,7 @@
  *
  * XXX Need to provide control register configuration.
  */
-int tsi148_dma_list_exec(struct vme_dma_list *list)
+static int tsi148_dma_list_exec(struct vme_dma_list *list)
 {
 	struct vme_dma_resource *ctrlr;
 	int channel, retval = 0;
@@ -1799,30 +1803,30 @@
 
 	bridge = tsi148_bridge->driver_priv;
 
-	mutex_lock(&(ctrlr->mtx));
+	mutex_lock(&ctrlr->mtx);
 
 	channel = ctrlr->number;
 
-	if (!list_empty(&(ctrlr->running))) {
+	if (!list_empty(&ctrlr->running)) {
 		/*
 		 * XXX We have an active DMA transfer and currently haven't
 		 *     sorted out the mechanism for "pending" DMA transfers.
 		 *     Return busy.
 		 */
 		/* Need to add to pending here */
-		mutex_unlock(&(ctrlr->mtx));
+		mutex_unlock(&ctrlr->mtx);
 		return -EBUSY;
 	} else {
-		list_add(&(list->list), &(ctrlr->running));
+		list_add(&list->list, &ctrlr->running);
 	}
 
 	/* Get first bus address and write into registers */
-	entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
+	entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
 		list);
 
-	bus_addr = virt_to_bus(&(entry->descriptor));
+	bus_addr = virt_to_bus(&entry->descriptor);
 
-	mutex_unlock(&(ctrlr->mtx));
+	mutex_unlock(&ctrlr->mtx);
 
 	reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
 
@@ -1850,9 +1854,9 @@
 	}
 
 	/* Remove list from running list */
-	mutex_lock(&(ctrlr->mtx));
-	list_del(&(list->list));
-	mutex_unlock(&(ctrlr->mtx));
+	mutex_lock(&ctrlr->mtx);
+	list_del(&list->list);
+	mutex_unlock(&ctrlr->mtx);
 
 	return retval;
 }
@@ -1862,13 +1866,13 @@
  *
  * We have a separate function, don't assume that the chain can't be reused.
  */
-int tsi148_dma_list_empty(struct vme_dma_list *list)
+static int tsi148_dma_list_empty(struct vme_dma_list *list)
 {
 	struct list_head *pos, *temp;
 	struct tsi148_dma_entry *entry;
 
 	/* detach and free each entry */
-	list_for_each_safe(pos, temp, &(list->entries)) {
+	list_for_each_safe(pos, temp, &list->entries) {
 		list_del(pos);
 		entry = list_entry(pos, struct tsi148_dma_entry, list);
 		kfree(entry);
@@ -1884,7 +1888,7 @@
  * This does not enable the LM monitor - that should be done when the first
  * callback is attached and disabled when the last callback is removed.
  */
-int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
+static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
 	vme_address_t aspace, vme_cycle_t cycle)
 {
 	u32 lm_base_high, lm_base_low, lm_ctl = 0;
@@ -1896,12 +1900,12 @@
 
 	bridge = tsi148_bridge->driver_priv;
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	/* If we already have a callback attached, we can't move it! */
 	for (i = 0; i < lm->monitors; i++) {
 		if (bridge->lm_callback[i] != NULL) {
-			mutex_unlock(&(lm->mtx));
+			mutex_unlock(&lm->mtx);
 			dev_err(tsi148_bridge->parent, "Location monitor "
 				"callback attached, can't reset\n");
 			return -EBUSY;
@@ -1922,7 +1926,7 @@
 		lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
 		break;
 	default:
-		mutex_unlock(&(lm->mtx));
+		mutex_unlock(&lm->mtx);
 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
 		return -EINVAL;
 		break;
@@ -1943,7 +1947,7 @@
 	iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
 	iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return 0;
 }
@@ -1951,15 +1955,15 @@
 /* Get configuration of the callback monitor and return whether it is enabled
  * or disabled.
  */
-int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
-	vme_address_t *aspace, vme_cycle_t *cycle)
+static int tsi148_lm_get(struct vme_lm_resource *lm,
+	unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
 {
 	u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
 	struct tsi148_driver *bridge;
 
 	bridge = lm->parent->driver_priv;
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
 	lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
@@ -1992,7 +1996,7 @@
 	if (lm_ctl & TSI148_LCSR_LMAT_DATA)
 		*cycle |= VME_DATA;
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return enabled;
 }
@@ -2002,7 +2006,7 @@
  *
  * Callback will be passed the monitor triggered.
  */
-int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
+static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
 	void (*callback)(int))
 {
 	u32 lm_ctl, tmp;
@@ -2013,12 +2017,12 @@
 
 	bridge = tsi148_bridge->driver_priv;
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	/* Ensure that the location monitor is configured - need PGM or DATA */
 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
 	if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
-		mutex_unlock(&(lm->mtx));
+		mutex_unlock(&lm->mtx);
 		dev_err(tsi148_bridge->parent, "Location monitor not properly "
 			"configured\n");
 		return -EINVAL;
@@ -2026,7 +2030,7 @@
 
 	/* Check that a callback isn't already attached */
 	if (bridge->lm_callback[monitor] != NULL) {
-		mutex_unlock(&(lm->mtx));
+		mutex_unlock(&lm->mtx);
 		dev_err(tsi148_bridge->parent, "Existing callback attached\n");
 		return -EBUSY;
 	}
@@ -2049,7 +2053,7 @@
 		iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
 	}
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return 0;
 }
@@ -2057,14 +2061,14 @@
 /*
  * Detach a callback function forn a specific location monitor.
  */
-int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
+static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
 {
 	u32 lm_en, tmp;
 	struct tsi148_driver *bridge;
 
 	bridge = lm->parent->driver_priv;
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	/* Disable Location Monitor and ensure previous interrupts are clear */
 	lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
@@ -2089,7 +2093,7 @@
 		iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
 	}
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	return 0;
 }
@@ -2097,7 +2101,7 @@
 /*
  * Determine Geographical Addressing
  */
-int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
+static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
 {
 	u32 slot = 0;
 	struct tsi148_driver *bridge;
@@ -2142,7 +2146,7 @@
 
 	/* Allocate mem for CR/CSR image */
 	bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
-		&(bridge->crcsr_bus));
+		&bridge->crcsr_bus);
 	if (bridge->crcsr_kernel == NULL) {
 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
 			"CR/CSR image\n");
@@ -2280,13 +2284,13 @@
 	}
 
 	/* Initialize wait queues & mutual exclusion flags */
-	init_waitqueue_head(&(tsi148_device->dma_queue[0]));
-	init_waitqueue_head(&(tsi148_device->dma_queue[1]));
-	init_waitqueue_head(&(tsi148_device->iack_queue));
-	mutex_init(&(tsi148_device->vme_int));
-	mutex_init(&(tsi148_device->vme_rmw));
+	init_waitqueue_head(&tsi148_device->dma_queue[0]);
+	init_waitqueue_head(&tsi148_device->dma_queue[1]);
+	init_waitqueue_head(&tsi148_device->iack_queue);
+	mutex_init(&tsi148_device->vme_int);
+	mutex_init(&tsi148_device->vme_rmw);
 
-	tsi148_bridge->parent = &(pdev->dev);
+	tsi148_bridge->parent = &pdev->dev;
 	strcpy(tsi148_bridge->name, driver_name);
 
 	/* Setup IRQ */
@@ -2314,7 +2318,7 @@
 			goto err_master;
 		}
 		tsi148_device->flush_image->parent = tsi148_bridge;
-		spin_lock_init(&(tsi148_device->flush_image->lock));
+		spin_lock_init(&tsi148_device->flush_image->lock);
 		tsi148_device->flush_image->locked = 1;
 		tsi148_device->flush_image->number = master_num;
 		tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
@@ -2324,13 +2328,13 @@
 			VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
 			VME_USER | VME_PROG | VME_DATA;
 		tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
-		memset(&(tsi148_device->flush_image->bus_resource), 0,
+		memset(&tsi148_device->flush_image->bus_resource, 0,
 			sizeof(struct resource));
 		tsi148_device->flush_image->kern_base  = NULL;
 	}
 
 	/* Add master windows to list */
-	INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
+	INIT_LIST_HEAD(&tsi148_bridge->master_resources);
 	for (i = 0; i < master_num; i++) {
 		master_image = kmalloc(sizeof(struct vme_master_resource),
 			GFP_KERNEL);
@@ -2341,7 +2345,7 @@
 			goto err_master;
 		}
 		master_image->parent = tsi148_bridge;
-		spin_lock_init(&(master_image->lock));
+		spin_lock_init(&master_image->lock);
 		master_image->locked = 0;
 		master_image->number = i;
 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
@@ -2351,15 +2355,15 @@
 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
 			VME_PROG | VME_DATA;
 		master_image->width_attr = VME_D16 | VME_D32;
-		memset(&(master_image->bus_resource), 0,
+		memset(&master_image->bus_resource, 0,
 			sizeof(struct resource));
 		master_image->kern_base  = NULL;
-		list_add_tail(&(master_image->list),
-			&(tsi148_bridge->master_resources));
+		list_add_tail(&master_image->list,
+			&tsi148_bridge->master_resources);
 	}
 
 	/* Add slave windows to list */
-	INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
+	INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
 	for (i = 0; i < TSI148_MAX_SLAVE; i++) {
 		slave_image = kmalloc(sizeof(struct vme_slave_resource),
 			GFP_KERNEL);
@@ -2370,7 +2374,7 @@
 			goto err_slave;
 		}
 		slave_image->parent = tsi148_bridge;
-		mutex_init(&(slave_image->mtx));
+		mutex_init(&slave_image->mtx);
 		slave_image->locked = 0;
 		slave_image->number = i;
 		slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
@@ -2380,12 +2384,12 @@
 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
 			VME_PROG | VME_DATA;
-		list_add_tail(&(slave_image->list),
-			&(tsi148_bridge->slave_resources));
+		list_add_tail(&slave_image->list,
+			&tsi148_bridge->slave_resources);
 	}
 
 	/* Add dma engines to list */
-	INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
+	INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
 	for (i = 0; i < TSI148_MAX_DMA; i++) {
 		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
 			GFP_KERNEL);
@@ -2396,21 +2400,21 @@
 			goto err_dma;
 		}
 		dma_ctrlr->parent = tsi148_bridge;
-		mutex_init(&(dma_ctrlr->mtx));
+		mutex_init(&dma_ctrlr->mtx);
 		dma_ctrlr->locked = 0;
 		dma_ctrlr->number = i;
 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
 			VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
 			VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
 			VME_DMA_PATTERN_TO_MEM;
-		INIT_LIST_HEAD(&(dma_ctrlr->pending));
-		INIT_LIST_HEAD(&(dma_ctrlr->running));
-		list_add_tail(&(dma_ctrlr->list),
-			&(tsi148_bridge->dma_resources));
+		INIT_LIST_HEAD(&dma_ctrlr->pending);
+		INIT_LIST_HEAD(&dma_ctrlr->running);
+		list_add_tail(&dma_ctrlr->list,
+			&tsi148_bridge->dma_resources);
 	}
 
 	/* Add location monitor to list */
-	INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
+	INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
 	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
 	if (lm == NULL) {
 		dev_err(&pdev->dev, "Failed to allocate memory for "
@@ -2419,11 +2423,11 @@
 		goto err_lm;
 	}
 	lm->parent = tsi148_bridge;
-	mutex_init(&(lm->mtx));
+	mutex_init(&lm->mtx);
 	lm->locked = 0;
 	lm->number = 1;
 	lm->monitors = 4;
-	list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
+	list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
 
 	tsi148_bridge->slave_get = tsi148_slave_get;
 	tsi148_bridge->slave_set = tsi148_slave_set;
@@ -2477,41 +2481,40 @@
 
 	return 0;
 
-	vme_unregister_bridge(tsi148_bridge);
 err_reg:
 	tsi148_crcsr_exit(tsi148_bridge, pdev);
 err_crcsr:
 err_lm:
 	/* resources are stored in link list */
-	list_for_each(pos, &(tsi148_bridge->lm_resources)) {
+	list_for_each(pos, &tsi148_bridge->lm_resources) {
 		lm = list_entry(pos, struct vme_lm_resource, list);
 		list_del(pos);
 		kfree(lm);
 	}
 err_dma:
 	/* resources are stored in link list */
-	list_for_each(pos, &(tsi148_bridge->dma_resources)) {
+	list_for_each(pos, &tsi148_bridge->dma_resources) {
 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
 		list_del(pos);
 		kfree(dma_ctrlr);
 	}
 err_slave:
 	/* resources are stored in link list */
-	list_for_each(pos, &(tsi148_bridge->slave_resources)) {
+	list_for_each(pos, &tsi148_bridge->slave_resources) {
 		slave_image = list_entry(pos, struct vme_slave_resource, list);
 		list_del(pos);
 		kfree(slave_image);
 	}
 err_master:
 	/* resources are stored in link list */
-	list_for_each(pos, &(tsi148_bridge->master_resources)) {
+	list_for_each(pos, &tsi148_bridge->master_resources) {
 		master_image = list_entry(pos, struct vme_master_resource,
 			list);
 		list_del(pos);
 		kfree(master_image);
 	}
 
-	tsi148_irq_exit(tsi148_device, pdev);
+	tsi148_irq_exit(tsi148_bridge, pdev);
 err_irq:
 err_test:
 	iounmap(tsi148_device->base);
@@ -2531,6 +2534,7 @@
 static void tsi148_remove(struct pci_dev *pdev)
 {
 	struct list_head *pos = NULL;
+	struct list_head *tmplist;
 	struct vme_master_resource *master_image;
 	struct vme_slave_resource *slave_image;
 	struct vme_dma_resource *dma_ctrlr;
@@ -2582,36 +2586,34 @@
 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
 
-	tsi148_irq_exit(bridge, pdev);
+	tsi148_irq_exit(tsi148_bridge, pdev);
 
 	vme_unregister_bridge(tsi148_bridge);
 
 	tsi148_crcsr_exit(tsi148_bridge, pdev);
 
 	/* resources are stored in link list */
-	list_for_each(pos, &(tsi148_bridge->dma_resources)) {
+	list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
 		list_del(pos);
 		kfree(dma_ctrlr);
 	}
 
 	/* resources are stored in link list */
-	list_for_each(pos, &(tsi148_bridge->slave_resources)) {
+	list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
 		slave_image = list_entry(pos, struct vme_slave_resource, list);
 		list_del(pos);
 		kfree(slave_image);
 	}
 
 	/* resources are stored in link list */
-	list_for_each(pos, &(tsi148_bridge->master_resources)) {
+	list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
 		master_image = list_entry(pos, struct vme_master_resource,
 			list);
 		list_del(pos);
 		kfree(master_image);
 	}
 
-	tsi148_irq_exit(bridge, pdev);
-
 	iounmap(bridge->base);
 
 	pci_release_regions(pdev);
diff --git a/drivers/staging/vme/bridges/vme_tsi148.h b/drivers/staging/vme/bridges/vme_tsi148.h
index bda64ef..9f97fa8 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.h
+++ b/drivers/staging/vme/bridges/vme_tsi148.h
@@ -35,7 +35,7 @@
 
 /* Structure used to hold driver specific information */
 struct tsi148_driver {
-	void *base;	/* Base Address of device registers */
+	void __iomem *base;	/* Base Address of device registers */
 	wait_queue_head_t dma_queue[2];
 	wait_queue_head_t iack_queue;
 	void (*lm_callback[4])(int);	/* Called in interrupt handler */
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 71bbc52..a571173 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -44,7 +44,7 @@
 static char driver_name[] = "vme_user";
 
 static int bus[USER_BUS_MAX];
-static int bus_num;
+static unsigned int bus_num;
 
 /* Currently Documentation/devices.txt defines the following for VME:
  *
@@ -92,7 +92,7 @@
  * Structure to handle image related parameters.
  */
 typedef struct {
-	void __iomem *kern_buf;	/* Buffer address in kernel space */
+	void *kern_buf;	/* Buffer address in kernel space */
 	dma_addr_t pci_buf;	/* Buffer address in PCI address space */
 	unsigned long long size_buf;	/* Buffer size */
 	struct semaphore sem;	/* Semaphore for locking image */
@@ -114,9 +114,9 @@
 } driver_stats_t;
 static driver_stats_t statistics;
 
-struct cdev *vme_user_cdev;		/* Character device */
-struct class *vme_user_sysfs_class;	/* Sysfs class */
-struct device *vme_user_bridge;		/* Pointer to the bridge device */
+static struct cdev *vme_user_cdev;		/* Character device */
+static struct class *vme_user_sysfs_class;	/* Sysfs class */
+static struct device *vme_user_bridge;		/* Pointer to bridge device */
 
 
 static const int type[VME_DEVS] = {	MASTER_MINOR,	MASTER_MINOR,
@@ -129,13 +129,14 @@
 
 static int vme_user_open(struct inode *, struct file *);
 static int vme_user_release(struct inode *, struct file *);
-static ssize_t vme_user_read(struct file *, char *, size_t, loff_t *);
-static ssize_t vme_user_write(struct file *, const char *, size_t, loff_t *);
+static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
+static ssize_t vme_user_write(struct file *, const char __user *, size_t,
+	loff_t *);
 static loff_t vme_user_llseek(struct file *, loff_t, int);
 static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
 
-static int __init vme_user_probe(struct device *, int, int);
-static int __exit vme_user_remove(struct device *, int, int);
+static int __devinit vme_user_probe(struct device *, int, int);
+static int __devexit vme_user_remove(struct device *, int, int);
 
 static struct file_operations vme_user_fops = {
 	.open = vme_user_open,
@@ -246,7 +247,7 @@
  * page) transfers will lock the user space buffer into memory and then
  * transfer the data directly from the user space buffers out to VME.
  */
-static ssize_t resource_from_user(unsigned int minor, const char *buf,
+static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
 	size_t count, loff_t *ppos)
 {
 	ssize_t retval;
@@ -277,7 +278,7 @@
 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
 	size_t count, loff_t *ppos)
 {
-	void __iomem *image_ptr;
+	void *image_ptr;
 	ssize_t retval;
 
 	image_ptr = image[minor].kern_buf + *ppos;
@@ -293,10 +294,10 @@
 	return retval;
 }
 
-static ssize_t buffer_from_user(unsigned int minor, const char *buf,
+static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
 	size_t count, loff_t *ppos)
 {
-	void __iomem *image_ptr;
+	void *image_ptr;
 	size_t retval;
 
 	image_ptr = image[minor].kern_buf + *ppos;
@@ -312,7 +313,7 @@
 	return retval;
 }
 
-static ssize_t vme_user_read(struct file *file, char *buf, size_t count,
+static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
 			loff_t *ppos)
 {
 	unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
@@ -356,8 +357,8 @@
 	return retval;
 }
 
-static ssize_t vme_user_write(struct file *file, const char *buf, size_t count,
-			 loff_t *ppos)
+static ssize_t vme_user_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
 {
 	unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
 	ssize_t retval;
@@ -455,6 +456,7 @@
 	unsigned int minor = MINOR(inode->i_rdev);
 	int retval;
 	dma_addr_t pci_addr;
+	void __user *argp = (void __user *)arg;
 
 	statistics.ioctls++;
 
@@ -470,11 +472,11 @@
 			 *	to userspace as they are
 			 */
 			retval = vme_master_get(image[minor].resource,
-				&(master.enable), &(master.vme_addr),
-				&(master.size), &(master.aspace),
-				&(master.cycle), &(master.dwidth));
+				&master.enable, &master.vme_addr,
+				&master.size, &master.aspace,
+				&master.cycle, &master.dwidth);
 
-			copied = copy_to_user((char *)arg, &master,
+			copied = copy_to_user(argp, &master,
 				sizeof(struct vme_master));
 			if (copied != 0) {
 				printk(KERN_WARNING "Partial copy to "
@@ -487,8 +489,7 @@
 
 		case VME_SET_MASTER:
 
-			copied = copy_from_user(&master, (char *)arg,
-				sizeof(master));
+			copied = copy_from_user(&master, argp, sizeof(master));
 			if (copied != 0) {
 				printk(KERN_WARNING "Partial copy from "
 					"userspace\n");
@@ -514,11 +515,11 @@
 			 *	to userspace as they are
 			 */
 			retval = vme_slave_get(image[minor].resource,
-				&(slave.enable), &(slave.vme_addr),
-				&(slave.size), &pci_addr, &(slave.aspace),
-				&(slave.cycle));
+				&slave.enable, &slave.vme_addr,
+				&slave.size, &pci_addr, &slave.aspace,
+				&slave.cycle);
 
-			copied = copy_to_user((char *)arg, &slave,
+			copied = copy_to_user(argp, &slave,
 				sizeof(struct vme_slave));
 			if (copied != 0) {
 				printk(KERN_WARNING "Partial copy to "
@@ -531,8 +532,7 @@
 
 		case VME_SET_SLAVE:
 
-			copied = copy_from_user(&slave, (char *)arg,
-				sizeof(slave));
+			copied = copy_from_user(&slave, argp, sizeof(slave));
 			if (copied != 0) {
 				printk(KERN_WARNING "Partial copy from "
 					"userspace\n");
@@ -596,7 +596,7 @@
 static struct vme_driver vme_user_driver = {
 	.name = driver_name,
 	.probe = vme_user_probe,
-	.remove = vme_user_remove,
+	.remove = __devexit_p(vme_user_remove),
 };
 
 
@@ -611,6 +611,7 @@
 	if (bus_num == 0) {
 		printk(KERN_ERR "%s: No cards, skipping registration\n",
 			driver_name);
+		retval = -ENODEV;
 		goto err_nocard;
 	}
 
@@ -629,6 +630,7 @@
 	if (ids == NULL) {
 		printk(KERN_ERR "%s: Unable to allocate ID table\n",
 			driver_name);
+		retval = -ENOMEM;
 		goto err_id;
 	}
 
@@ -652,7 +654,6 @@
 
 	return retval;
 
-	vme_unregister_driver(&vme_user_driver);
 err_reg:
 	kfree(ids);
 err_id:
@@ -665,7 +666,8 @@
  * as practical. We will therefore reserve the buffers and request the images
  * here so that we don't have to do it later.
  */
-static int __init vme_user_probe(struct device *dev, int cur_bus, int cur_slot)
+static int __devinit vme_user_probe(struct device *dev, int cur_bus,
+	int cur_slot)
 {
 	int i, err;
 	char name[12];
@@ -683,7 +685,7 @@
 	for (i = 0; i < VME_DEVS; i++) {
 		image[i].kern_buf = NULL;
 		image[i].pci_buf = 0;
-		sema_init(&(image[i].sem), 1);
+		sema_init(&image[i].sem, 1);
 		image[i].device = NULL;
 		image[i].resource = NULL;
 		image[i].users = 0;
@@ -727,7 +729,7 @@
 		}
 		image[i].size_buf = PCI_BUF_SIZE;
 		image[i].kern_buf = vme_alloc_consistent(image[i].resource,
-			image[i].size_buf, &(image[i].pci_buf));
+			image[i].size_buf, &image[i].pci_buf);
 		if (image[i].kern_buf == NULL) {
 			printk(KERN_WARNING "Unable to allocate memory for "
 				"buffer\n");
@@ -828,8 +830,8 @@
 err_slave:
 	while (i > SLAVE_MINOR) {
 		i--;
-		vme_slave_free(image[i].resource);
 		buf_unalloc(i);
+		vme_slave_free(image[i].resource);
 	}
 err_class:
 	cdev_del(vme_user_cdev);
@@ -840,7 +842,8 @@
 	return err;
 }
 
-static int __exit vme_user_remove(struct device *dev, int cur_bus, int cur_slot)
+static int __devexit vme_user_remove(struct device *dev, int cur_bus,
+	int cur_slot)
 {
 	int i;
 
@@ -849,13 +852,15 @@
 		device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
 	class_destroy(vme_user_sysfs_class);
 
-	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
+	for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
 		kfree(image[i].kern_buf);
+		vme_master_free(image[i].resource);
+	}
 
 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
 		vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
-		vme_slave_free(image[i].resource);
 		buf_unalloc(i);
+		vme_slave_free(image[i].resource);
 	}
 
 	/* Unregister device driver */
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index 093fbff..d9fc864 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -245,7 +245,7 @@
 	}
 
 	/* Loop through slave resources */
-	list_for_each(slave_pos, &(bridge->slave_resources)) {
+	list_for_each(slave_pos, &bridge->slave_resources) {
 		slave_image = list_entry(slave_pos,
 			struct vme_slave_resource, list);
 
@@ -255,17 +255,17 @@
 		}
 
 		/* Find an unlocked and compatible image */
-		mutex_lock(&(slave_image->mtx));
+		mutex_lock(&slave_image->mtx);
 		if (((slave_image->address_attr & address) == address) &&
 			((slave_image->cycle_attr & cycle) == cycle) &&
 			(slave_image->locked == 0)) {
 
 			slave_image->locked = 1;
-			mutex_unlock(&(slave_image->mtx));
+			mutex_unlock(&slave_image->mtx);
 			allocated_image = slave_image;
 			break;
 		}
-		mutex_unlock(&(slave_image->mtx));
+		mutex_unlock(&slave_image->mtx);
 	}
 
 	/* No free image */
@@ -278,15 +278,15 @@
 		goto err_alloc;
 	}
 	resource->type = VME_SLAVE;
-	resource->entry = &(allocated_image->list);
+	resource->entry = &allocated_image->list;
 
 	return resource;
 
 err_alloc:
 	/* Unlock image */
-	mutex_lock(&(slave_image->mtx));
+	mutex_lock(&slave_image->mtx);
 	slave_image->locked = 0;
-	mutex_unlock(&(slave_image->mtx));
+	mutex_unlock(&slave_image->mtx);
 err_image:
 err_bus:
 	return NULL;
@@ -369,12 +369,12 @@
 	}
 
 	/* Unlock image */
-	mutex_lock(&(slave_image->mtx));
+	mutex_lock(&slave_image->mtx);
 	if (slave_image->locked == 0)
 		printk(KERN_ERR "Image is already free\n");
 
 	slave_image->locked = 0;
-	mutex_unlock(&(slave_image->mtx));
+	mutex_unlock(&slave_image->mtx);
 
 	/* Free up resource memory */
 	kfree(resource);
@@ -401,7 +401,7 @@
 	}
 
 	/* Loop through master resources */
-	list_for_each(master_pos, &(bridge->master_resources)) {
+	list_for_each(master_pos, &bridge->master_resources) {
 		master_image = list_entry(master_pos,
 			struct vme_master_resource, list);
 
@@ -411,18 +411,18 @@
 		}
 
 		/* Find an unlocked and compatible image */
-		spin_lock(&(master_image->lock));
+		spin_lock(&master_image->lock);
 		if (((master_image->address_attr & address) == address) &&
 			((master_image->cycle_attr & cycle) == cycle) &&
 			((master_image->width_attr & dwidth) == dwidth) &&
 			(master_image->locked == 0)) {
 
 			master_image->locked = 1;
-			spin_unlock(&(master_image->lock));
+			spin_unlock(&master_image->lock);
 			allocated_image = master_image;
 			break;
 		}
-		spin_unlock(&(master_image->lock));
+		spin_unlock(&master_image->lock);
 	}
 
 	/* Check to see if we found a resource */
@@ -437,16 +437,16 @@
 		goto err_alloc;
 	}
 	resource->type = VME_MASTER;
-	resource->entry = &(allocated_image->list);
+	resource->entry = &allocated_image->list;
 
 	return resource;
 
 	kfree(resource);
 err_alloc:
 	/* Unlock image */
-	spin_lock(&(master_image->lock));
+	spin_lock(&master_image->lock);
 	master_image->locked = 0;
-	spin_unlock(&(master_image->lock));
+	spin_unlock(&master_image->lock);
 err_image:
 err_bus:
 	return NULL;
@@ -628,12 +628,12 @@
 	}
 
 	/* Unlock image */
-	spin_lock(&(master_image->lock));
+	spin_lock(&master_image->lock);
 	if (master_image->locked == 0)
 		printk(KERN_ERR "Image is already free\n");
 
 	master_image->locked = 0;
-	spin_unlock(&(master_image->lock));
+	spin_unlock(&master_image->lock);
 
 	/* Free up resource memory */
 	kfree(resource);
@@ -662,7 +662,7 @@
 	}
 
 	/* Loop through DMA resources */
-	list_for_each(dma_pos, &(bridge->dma_resources)) {
+	list_for_each(dma_pos, &bridge->dma_resources) {
 		dma_ctrlr = list_entry(dma_pos,
 			struct vme_dma_resource, list);
 
@@ -672,16 +672,16 @@
 		}
 
 		/* Find an unlocked and compatible controller */
-		mutex_lock(&(dma_ctrlr->mtx));
+		mutex_lock(&dma_ctrlr->mtx);
 		if (((dma_ctrlr->route_attr & route) == route) &&
 			(dma_ctrlr->locked == 0)) {
 
 			dma_ctrlr->locked = 1;
-			mutex_unlock(&(dma_ctrlr->mtx));
+			mutex_unlock(&dma_ctrlr->mtx);
 			allocated_ctrlr = dma_ctrlr;
 			break;
 		}
-		mutex_unlock(&(dma_ctrlr->mtx));
+		mutex_unlock(&dma_ctrlr->mtx);
 	}
 
 	/* Check to see if we found a resource */
@@ -694,15 +694,15 @@
 		goto err_alloc;
 	}
 	resource->type = VME_DMA;
-	resource->entry = &(allocated_ctrlr->list);
+	resource->entry = &allocated_ctrlr->list;
 
 	return resource;
 
 err_alloc:
 	/* Unlock image */
-	mutex_lock(&(dma_ctrlr->mtx));
+	mutex_lock(&dma_ctrlr->mtx);
 	dma_ctrlr->locked = 0;
-	mutex_unlock(&(dma_ctrlr->mtx));
+	mutex_unlock(&dma_ctrlr->mtx);
 err_ctrlr:
 err_bus:
 	return NULL;
@@ -729,9 +729,9 @@
 		printk(KERN_ERR "Unable to allocate memory for new dma list\n");
 		return NULL;
 	}
-	INIT_LIST_HEAD(&(dma_list->entries));
+	INIT_LIST_HEAD(&dma_list->entries);
 	dma_list->parent = ctrlr;
-	mutex_init(&(dma_list->mtx));
+	mutex_init(&dma_list->mtx);
 
 	return dma_list;
 }
@@ -880,14 +880,14 @@
 		return -EINVAL;
 	}
 
-	if (!mutex_trylock(&(list->mtx))) {
+	if (!mutex_trylock(&list->mtx)) {
 		printk(KERN_ERR "Link List already submitted\n");
 		return -EINVAL;
 	}
 
 	retval = bridge->dma_list_add(list, src, dest, count);
 
-	mutex_unlock(&(list->mtx));
+	mutex_unlock(&list->mtx);
 
 	return retval;
 }
@@ -903,11 +903,11 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&(list->mtx));
+	mutex_lock(&list->mtx);
 
 	retval = bridge->dma_list_exec(list);
 
-	mutex_unlock(&(list->mtx));
+	mutex_unlock(&list->mtx);
 
 	return retval;
 }
@@ -923,7 +923,7 @@
 		return -EINVAL;
 	}
 
-	if (!mutex_trylock(&(list->mtx))) {
+	if (!mutex_trylock(&list->mtx)) {
 		printk(KERN_ERR "Link List in use\n");
 		return -EINVAL;
 	}
@@ -935,10 +935,10 @@
 	retval = bridge->dma_list_empty(list);
 	if (retval) {
 		printk(KERN_ERR "Unable to empty link-list entries\n");
-		mutex_unlock(&(list->mtx));
+		mutex_unlock(&list->mtx);
 		return retval;
 	}
-	mutex_unlock(&(list->mtx));
+	mutex_unlock(&list->mtx);
 	kfree(list);
 
 	return retval;
@@ -956,20 +956,20 @@
 
 	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
 
-	if (!mutex_trylock(&(ctrlr->mtx))) {
+	if (!mutex_trylock(&ctrlr->mtx)) {
 		printk(KERN_ERR "Resource busy, can't free\n");
 		return -EBUSY;
 	}
 
-	if (!(list_empty(&(ctrlr->pending)) && list_empty(&(ctrlr->running)))) {
+	if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
 		printk(KERN_WARNING "Resource still processing transfers\n");
-		mutex_unlock(&(ctrlr->mtx));
+		mutex_unlock(&ctrlr->mtx);
 		return -EBUSY;
 	}
 
 	ctrlr->locked = 0;
 
-	mutex_unlock(&(ctrlr->mtx));
+	mutex_unlock(&ctrlr->mtx);
 
 	return 0;
 }
@@ -1013,10 +1013,10 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&(bridge->irq_mtx));
+	mutex_lock(&bridge->irq_mtx);
 
 	if (bridge->irq[level - 1].callback[statid].func) {
-		mutex_unlock(&(bridge->irq_mtx));
+		mutex_unlock(&bridge->irq_mtx);
 		printk(KERN_WARNING "VME Interrupt already taken\n");
 		return -EBUSY;
 	}
@@ -1028,7 +1028,7 @@
 	/* Enable IRQ level */
 	bridge->irq_set(bridge, level, 1, 1);
 
-	mutex_unlock(&(bridge->irq_mtx));
+	mutex_unlock(&bridge->irq_mtx);
 
 	return 0;
 }
@@ -1054,7 +1054,7 @@
 		return;
 	}
 
-	mutex_lock(&(bridge->irq_mtx));
+	mutex_lock(&bridge->irq_mtx);
 
 	bridge->irq[level - 1].count--;
 
@@ -1065,7 +1065,7 @@
 	bridge->irq[level - 1].callback[statid].func = NULL;
 	bridge->irq[level - 1].callback[statid].priv_data = NULL;
 
-	mutex_unlock(&(bridge->irq_mtx));
+	mutex_unlock(&bridge->irq_mtx);
 }
 EXPORT_SYMBOL(vme_irq_free);
 
@@ -1111,7 +1111,7 @@
 	}
 
 	/* Loop through DMA resources */
-	list_for_each(lm_pos, &(bridge->lm_resources)) {
+	list_for_each(lm_pos, &bridge->lm_resources) {
 		lm = list_entry(lm_pos,
 			struct vme_lm_resource, list);
 
@@ -1122,14 +1122,14 @@
 		}
 
 		/* Find an unlocked controller */
-		mutex_lock(&(lm->mtx));
+		mutex_lock(&lm->mtx);
 		if (lm->locked == 0) {
 			lm->locked = 1;
-			mutex_unlock(&(lm->mtx));
+			mutex_unlock(&lm->mtx);
 			allocated_lm = lm;
 			break;
 		}
-		mutex_unlock(&(lm->mtx));
+		mutex_unlock(&lm->mtx);
 	}
 
 	/* Check to see if we found a resource */
@@ -1142,15 +1142,15 @@
 		goto err_alloc;
 	}
 	resource->type = VME_LM;
-	resource->entry = &(allocated_lm->list);
+	resource->entry = &allocated_lm->list;
 
 	return resource;
 
 err_alloc:
 	/* Unlock image */
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 	lm->locked = 0;
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 err_lm:
 err_bus:
 	return NULL;
@@ -1270,7 +1270,7 @@
 
 	lm = list_entry(resource->entry, struct vme_lm_resource, list);
 
-	mutex_lock(&(lm->mtx));
+	mutex_lock(&lm->mtx);
 
 	/* XXX
 	 * Check to see that there aren't any callbacks still attached, if
@@ -1279,7 +1279,7 @@
 
 	lm->locked = 0;
 
-	mutex_unlock(&(lm->mtx));
+	mutex_unlock(&lm->mtx);
 
 	kfree(resource);
 }
@@ -1326,7 +1326,7 @@
 static void vme_free_bus_num(int bus)
 {
 	mutex_lock(&vme_bus_num_mtx);
-	vme_bus_numbers |= ~(0x1 << bus);
+	vme_bus_numbers &= ~(0x1 << bus);
 	mutex_unlock(&vme_bus_num_mtx);
 }
 
@@ -1343,11 +1343,11 @@
 	 * specification.
 	 */
 	for (i = 0; i < VME_SLOTS_MAX; i++) {
-		dev = &(bridge->dev[i]);
+		dev = &bridge->dev[i];
 		memset(dev, 0, sizeof(struct device));
 
 		dev->parent = bridge->parent;
-		dev->bus = &(vme_bus_type);
+		dev->bus = &vme_bus_type;
 		/*
 		 * We save a pointer to the bridge in platform_data so that we
 		 * can get to it later. We keep driver_data for use by the
@@ -1366,7 +1366,7 @@
 	i = VME_SLOTS_MAX;
 err_reg:
 	while (i > -1) {
-		dev = &(bridge->dev[i]);
+		dev = &bridge->dev[i];
 		device_unregister(dev);
 	}
 	vme_free_bus_num(bridge->num);
@@ -1381,7 +1381,7 @@
 
 
 	for (i = 0; i < VME_SLOTS_MAX; i++) {
-		dev = &(bridge->dev[i]);
+		dev = &bridge->dev[i];
 		device_unregister(dev);
 	}
 	vme_free_bus_num(bridge->num);
@@ -1418,7 +1418,7 @@
 	/* Determine slot number */
 	num = 0;
 	while (num < VME_SLOTS_MAX) {
-		if (&(bridge->dev[num]) == dev)
+		if (&bridge->dev[num] == dev)
 			break;
 
 		num++;
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
index b653ec0..4c6ec31 100644
--- a/drivers/staging/vme/vme_bridge.h
+++ b/drivers/staging/vme/vme_bridge.h
@@ -20,7 +20,7 @@
 	vme_cycle_t cycle_attr;
 	vme_width_t width_attr;
 	struct resource bus_resource;
-	void *kern_base;
+	void __iomem *kern_base;
 };
 
 struct vme_slave_resource {
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 32d095c..951a3a8 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -2058,7 +2058,7 @@
 QWORD CARDqGetTSFOffset (unsigned char byRxRate, QWORD qwTSF1, QWORD qwTSF2)
 {
     QWORD   qwTSFOffset;
-    unsigned short wRxBcnTSFOffst= 0;;
+    unsigned short wRxBcnTSFOffst= 0;
 
     HIDWORD(qwTSFOffset) = 0;
     LODWORD(qwTSFOffset) = 0;
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 92e3399..5e425d1 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -2073,7 +2073,7 @@
              struct iw_point *wrq,
              char *extra)
 {
-		return -EOPNOTSUPP;;
+		return -EOPNOTSUPP;
 }
 
 int iwctl_siwmlme(struct net_device *dev,
diff --git a/drivers/staging/vt6655/wpa2.c b/drivers/staging/vt6655/wpa2.c
index 805164b..744799c 100644
--- a/drivers/staging/vt6655/wpa2.c
+++ b/drivers/staging/vt6655/wpa2.c
@@ -216,7 +216,7 @@
         m = *((unsigned short *) &(pRSN->abyRSN[4]));
 
         if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
-            pBSSNode->wAKMSSAuthCount = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));;
+            pBSSNode->wAKMSSAuthCount = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));
             j = 0;
             pbyOUI = &(pRSN->abyRSN[8+4*m]);
             for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(unsigned char)); i++) {
@@ -235,7 +235,7 @@
             pBSSNode->wAKMSSAuthCount = (unsigned short)j;
             DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAKMSSAuthCount: %d\n", pBSSNode->wAKMSSAuthCount);
 
-            n = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));;
+            n = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));
             if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
                 pBSSNode->sRSNCapObj.bRSNCapExist = true;
                 pBSSNode->sRSNCapObj.wRSNCap = *((unsigned short *) &(pRSN->abyRSN[8+4*m+4*n]));
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index e5add20..0d11147 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -963,7 +963,7 @@
             break;
         case ANT_RXB:
             pDevice->byBBRxConf &= 0xFE;
-            pDevice->byBBRxConf |= 0x02;;
+            pDevice->byBBRxConf |= 0x02;
             break;
     }
 
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 8de21aa..a49053b 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -1092,7 +1092,7 @@
         pDevice->sMgmtObj.uCurrChannel = byNewChannel;
         bResult = CARDbSetMediaChannel(pDevice, byNewChannel);
 
-        return(bResult);
+	return bResult;
     }
     pDevice->byChannelSwitchCount = byCount;
     pDevice->byNewChannel = byNewChannel;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 1f9d2963..f4fb0c6 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -1608,8 +1608,8 @@
         }
     }
 
-    pDevice->bIsRxMngWorkItemQueued = FALSE;
-    spin_unlock_irq(&pDevice->lock);
+	pDevice->bIsRxMngWorkItemQueued = FALSE;
+	spin_unlock_irq(&pDevice->lock);
 
 }
 
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 0004be8..2121205 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1883,7 +1883,7 @@
              struct iw_point *wrq,
              char *extra)
 {
-		return -EOPNOTSUPP;;
+		return -EOPNOTSUPP;
 }
 
 int iwctl_siwmlme(struct net_device *dev,
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index 0c12fd3..e8c1b35 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -192,7 +192,7 @@
     // check if already in Doze mode
     ControlvReadByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PSCTL, &byData);
     if ( (byData & PSCTL_PS) != 0 )
-        return TRUE;;
+        return TRUE;
 
     if (pMgmt->eCurrMode != WMAC_MODE_IBSS_STA) {
         // check if in TIM wake period
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index bbdc127..8f18578 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -68,8 +68,7 @@
 /*---------------------  Static Classes  ----------------------------*/
 
 /*---------------------  Static Variables  --------------------------*/
-//static int          msglevel                =MSG_LEVEL_DEBUG;
-static int          msglevel                =MSG_LEVEL_INFO;
+static int          msglevel                = MSG_LEVEL_INFO;
 
 /*---------------------  Static Functions  --------------------------*/
 
diff --git a/drivers/staging/vt6656/tkip.c b/drivers/staging/vt6656/tkip.c
index a6bd533..0715636 100644
--- a/drivers/staging/vt6656/tkip.c
+++ b/drivers/staging/vt6656/tkip.c
@@ -214,13 +214,14 @@
     /* Phase 1, step 2 */
     for (i=0; i<8; i++) {
         j = 2*(i & 1);
-        p1k[0] = (p1k[0] + tkip_sbox( (p1k[4] ^ ((256*pbyTKey[1+j]) + pbyTKey[j])) % 65536 )) % 65536;
-        p1k[1] = (p1k[1] + tkip_sbox( (p1k[0] ^ ((256*pbyTKey[5+j]) + pbyTKey[4+j])) % 65536 )) % 65536;
-        p1k[2] = (p1k[2] + tkip_sbox( (p1k[1] ^ ((256*pbyTKey[9+j]) + pbyTKey[8+j])) % 65536 )) % 65536;
-        p1k[3] = (p1k[3] + tkip_sbox( (p1k[2] ^ ((256*pbyTKey[13+j]) + pbyTKey[12+j])) % 65536 )) % 65536;
-        p1k[4] = (p1k[4] + tkip_sbox( (p1k[3] ^ (((256*pbyTKey[1+j]) + pbyTKey[j]))) % 65536 )) % 65536;
+        p1k[0] = (p1k[0] + tkip_sbox((p1k[4] ^ ((256*pbyTKey[1+j]) + pbyTKey[j])) % 65536)) % 65536;
+        p1k[1] = (p1k[1] + tkip_sbox((p1k[0] ^ ((256*pbyTKey[5+j]) + pbyTKey[4+j])) % 65536)) % 65536;
+        p1k[2] = (p1k[2] + tkip_sbox((p1k[1] ^ ((256*pbyTKey[9+j]) + pbyTKey[8+j])) % 65536)) % 65536;
+        p1k[3] = (p1k[3] + tkip_sbox((p1k[2] ^ ((256*pbyTKey[13+j]) + pbyTKey[12+j])) % 65536)) % 65536;
+        p1k[4] = (p1k[4] + tkip_sbox((p1k[3] ^ (((256*pbyTKey[1+j]) + pbyTKey[j]))) % 65536)) % 65536;
         p1k[4] = (p1k[4] + i) % 65536;
     }
+ 
     /* Phase 2, Step 1 */
     ppk0 = p1k[0];
     ppk1 = p1k[1];
@@ -230,19 +231,19 @@
     ppk5 = (p1k[4] + tsc2) % 65536;
 
     /* Phase2, Step 2 */
-    ppk0 = ppk0 + tkip_sbox( (ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) % 65536);
-    ppk1 = ppk1 + tkip_sbox( (ppk0 ^ ((256*pbyTKey[3]) + pbyTKey[2])) % 65536);
-    ppk2 = ppk2 + tkip_sbox( (ppk1 ^ ((256*pbyTKey[5]) + pbyTKey[4])) % 65536);
-    ppk3 = ppk3 + tkip_sbox( (ppk2 ^ ((256*pbyTKey[7]) + pbyTKey[6])) % 65536);
-    ppk4 = ppk4 + tkip_sbox( (ppk3 ^ ((256*pbyTKey[9]) + pbyTKey[8])) % 65536);
-    ppk5 = ppk5 + tkip_sbox( (ppk4 ^ ((256*pbyTKey[11]) + pbyTKey[10])) % 65536);
+	ppk0 = ppk0 + tkip_sbox((ppk5 ^ ((256*pbyTKey[1]) + pbyTKey[0])) % 65536);
+	ppk1 = ppk1 + tkip_sbox((ppk0 ^ ((256*pbyTKey[3]) + pbyTKey[2])) % 65536);
+	ppk2 = ppk2 + tkip_sbox((ppk1 ^ ((256*pbyTKey[5]) + pbyTKey[4])) % 65536);
+	ppk3 = ppk3 + tkip_sbox((ppk2 ^ ((256*pbyTKey[7]) + pbyTKey[6])) % 65536);
+	ppk4 = ppk4 + tkip_sbox((ppk3 ^ ((256*pbyTKey[9]) + pbyTKey[8])) % 65536);
+	ppk5 = ppk5 + tkip_sbox((ppk4 ^ ((256*pbyTKey[11]) + pbyTKey[10])) % 65536);
 
-    ppk0 = ppk0 + rotr1(ppk5 ^ ((256*pbyTKey[13]) + pbyTKey[12]));
-    ppk1 = ppk1 + rotr1(ppk0 ^ ((256*pbyTKey[15]) + pbyTKey[14]));
-    ppk2 = ppk2 + rotr1(ppk1);
-    ppk3 = ppk3 + rotr1(ppk2);
-    ppk4 = ppk4 + rotr1(ppk3);
-    ppk5 = ppk5 + rotr1(ppk4);
+	ppk0 = ppk0 + rotr1(ppk5 ^ ((256*pbyTKey[13]) + pbyTKey[12]));
+	ppk1 = ppk1 + rotr1(ppk0 ^ ((256*pbyTKey[15]) + pbyTKey[14]));
+	ppk2 = ppk2 + rotr1(ppk1);
+	ppk3 = ppk3 + rotr1(ppk2);
+	ppk4 = ppk4 + rotr1(ppk3);
+	ppk5 = ppk5 + rotr1(ppk4);
 
     /* Phase 2, Step 3 */
     pbyRC4Key[0] = (tsc2 >> 8) % 256;
diff --git a/drivers/staging/vt6656/wpa2.c b/drivers/staging/vt6656/wpa2.c
index 6d13190..d4f3f75 100644
--- a/drivers/staging/vt6656/wpa2.c
+++ b/drivers/staging/vt6656/wpa2.c
@@ -215,7 +215,7 @@
         m = *((PWORD) &(pRSN->abyRSN[4]));
 
         if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
-            pBSSNode->wAKMSSAuthCount = *((PWORD) &(pRSN->abyRSN[6+4*m]));;
+            pBSSNode->wAKMSSAuthCount = *((PWORD) &(pRSN->abyRSN[6+4*m]));
             j = 0;
             pbyOUI = &(pRSN->abyRSN[8+4*m]);
             for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(BYTE)); i++) {
@@ -234,7 +234,7 @@
             pBSSNode->wAKMSSAuthCount = (WORD)j;
             DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAKMSSAuthCount: %d\n", pBSSNode->wAKMSSAuthCount);
 
-            n = *((PWORD) &(pRSN->abyRSN[6+4*m]));;
+            n = *((PWORD) &(pRSN->abyRSN[6+4*m]));
             if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
                 pBSSNode->sRSNCapObj.bRSNCapExist = TRUE;
                 pBSSNode->sRSNCapObj.wRSNCap = *((PWORD) &(pRSN->abyRSN[8+4*m+4*n]));
diff --git a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
index a678029..ad0c61d 100644
--- a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
+++ b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
@@ -2127,10 +2127,7 @@
  */
 void *cy_as_hal_alloc(uint32_t cnt)
 {
-	void *ret_p;
-
-	ret_p = kmalloc(cnt, GFP_ATOMIC);
-	return ret_p;
+	return kmalloc(cnt, GFP_ATOMIC);
 }
 
 /*
@@ -2150,10 +2147,7 @@
  */
 void *cy_as_hal_c_b_alloc(uint32_t cnt)
 {
-	void *ret_p;
-
-	ret_p = kmalloc(cnt, GFP_ATOMIC);
-	return ret_p;
+	return kmalloc(cnt, GFP_ATOMIC);
 }
 
 /*
diff --git a/drivers/staging/winbond/Makefile b/drivers/staging/winbond/Makefile
index 79fa227..081d48d 100644
--- a/drivers/staging/winbond/Makefile
+++ b/drivers/staging/winbond/Makefile
@@ -1,6 +1,5 @@
 w35und-y :=			\
 	mds.o			\
-	mlmetxrx.o		\
 	mto.o			\
 	phy_calibration.o	\
 	reg.o			\
diff --git a/drivers/staging/winbond/core.h b/drivers/staging/winbond/core.h
index 2b87a00..d7b3aca 100644
--- a/drivers/staging/winbond/core.h
+++ b/drivers/staging/winbond/core.h
@@ -4,7 +4,7 @@
 #include <linux/wireless.h>
 #include <linux/types.h>
 
-#include "wbhal_s.h"
+#include "wbhal.h"
 #include "mto.h"
 
 #include "mac_structures.h"
diff --git a/drivers/staging/winbond/mac_structures.h b/drivers/staging/winbond/mac_structures.h
index ed3df29..76c63c7 100644
--- a/drivers/staging/winbond/mac_structures.h
+++ b/drivers/staging/winbond/mac_structures.h
@@ -21,23 +21,11 @@
 #ifndef _MAC_Structures_H_
 #define _MAC_Structures_H_
 
-#include <linux/skbuff.h>
-
-/*=========================================================
-// Some miscellaneous definitions
-//-----*/
-#define MAX_CHANNELS                        30
 #define MAC_ADDR_LENGTH                     6
-#define MAX_WEP_KEY_SIZE                    16  /* 128 bits */
-#define	MAX_802_11_FRAGMENT_NUMBER		10 /* By spec */
 
 /* ========================================================
 // 802.11 Frame define
 //----- */
-#define MASK_PROTOCOL_VERSION_TYPE	0x0F
-#define MASK_FRAGMENT_NUMBER		0x000F
-#define SEQUENCE_NUMBER_SHIFT		4
-#define DIFFER_11_TO_3				18
 #define DOT_11_MAC_HEADER_SIZE		24
 #define DOT_11_SNAP_SIZE			6
 #define DOT_11_DURATION_OFFSET		2
@@ -47,15 +35,9 @@
 #define DOT_11_TYPE_OFFSET			30
 #define DOT_11_DATA_OFFSET          24
 #define DOT_11_DA_OFFSET			4
-#define DOT_3_TYPE_ARP				0x80F3
-#define DOT_3_TYPE_IPX				0x8137
-#define DOT_3_TYPE_OFFSET			12
 
-
-#define ETHERNET_HEADER_SIZE			14
 #define MAX_ETHERNET_PACKET_SIZE		1514
 
-
 /* -----  management : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
 #define MAC_SUBTYPE_MNGMNT_ASSOC_REQUEST    0x00
 #define MAC_SUBTYPE_MNGMNT_ASSOC_RESPONSE   0x10
@@ -69,129 +51,6 @@
 #define MAC_SUBTYPE_MNGMNT_AUTHENTICATION   0xB0
 #define MAC_SUBTYPE_MNGMNT_DEAUTHENTICATION 0xC0
 
-/* -----  control : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
-#define MAC_SUBTYPE_CONTROL_PSPOLL          0xA4
-#define MAC_SUBTYPE_CONTROL_RTS             0xB4
-#define MAC_SUBTYPE_CONTROL_CTS             0xC4
-#define MAC_SUBTYPE_CONTROL_ACK             0xD4
-#define MAC_SUBTYPE_CONTROL_CFEND           0xE4
-#define MAC_SUBTYPE_CONTROL_CFEND_CFACK     0xF4
-
-/* -----  data : Type of Bits (2, 3) and Subtype of Bits (4, 5, 6, 7) */
-#define MAC_SUBTYPE_DATA                    0x08
-#define MAC_SUBTYPE_DATA_CFACK              0x18
-#define MAC_SUBTYPE_DATA_CFPOLL             0x28
-#define MAC_SUBTYPE_DATA_CFACK_CFPOLL       0x38
-#define MAC_SUBTYPE_DATA_NULL               0x48
-#define MAC_SUBTYPE_DATA_CFACK_NULL         0x58
-#define MAC_SUBTYPE_DATA_CFPOLL_NULL        0x68
-#define MAC_SUBTYPE_DATA_CFACK_CFPOLL_NULL  0x78
-
-/* -----  Frame Type of Bits (2, 3) */
-#define MAC_TYPE_MANAGEMENT                 0x00
-#define MAC_TYPE_CONTROL                    0x04
-#define MAC_TYPE_DATA                       0x08
-
-/* ----- definitions for Management Frame Element ID (1 BYTE) */
-#define ELEMENT_ID_SSID                     0
-#define ELEMENT_ID_SUPPORTED_RATES          1
-#define ELEMENT_ID_FH_PARAMETER_SET         2
-#define ELEMENT_ID_DS_PARAMETER_SET         3
-#define ELEMENT_ID_CF_PARAMETER_SET         4
-#define ELEMENT_ID_TIM                      5
-#define ELEMENT_ID_IBSS_PARAMETER_SET       6
-/* 7~15 reserverd */
-#define ELEMENT_ID_CHALLENGE_TEXT           16
-/* 17~31 reserved for challenge text extension */
-/* 32~255 reserved */
-/*--  11G  -- */
-#define ELEMENT_ID_ERP_INFORMATION			42
-#define ELEMENT_ID_EXTENDED_SUPPORTED_RATES 50
-
-/* --  WPA  -- */
-
-#define ELEMENT_ID_RSN_WPA					221
-#ifdef _WPA2_
-#define ELEMENT_ID_RSN_WPA2				    48
-#endif /* endif WPA2 */
-
-#define WLAN_MAX_PAIRWISE_CIPHER_SUITE_COUNT    ((u16) 6)
-#define WLAN_MAX_AUTH_KEY_MGT_SUITE_LIST_COUNT  ((u16) 2)
-
-/* ===================================================================
-*  Reason Code (Table 18): indicate the reason of DisAssoc, DeAuthen
-*  length of ReasonCode is 2 Octs.
-* =================================================================== */
-#define REASON_REASERED             0
-#define REASON_UNSPECIDIED          1
-#define REASON_PREAUTH_INVALID      2
-#define DEAUTH_REASON_LEFT_BSS      3
-#define DISASS_REASON_AP_INACTIVE   4
-#define DISASS_REASON_AP_BUSY       5
-#define REASON_CLASS2_FRAME_FROM_NONAUTH_STA    6
-#define REASON_CLASS3_FRAME_FROM_NONASSO_STA    7
-#define DISASS_REASON_LEFT_BSS      8
-#define REASON_NOT_AUTH_YET         9
-/* 802.11i define */
-#define REASON_INVALID_IE						13
-#define REASON_MIC_ERROR						14
-#define REASON_4WAY_HANDSHAKE_TIMEOUT			15
-#define REASON_GROUPKEY_UPDATE_TIMEOUT			16
-#define REASON_IE_DIFF_4WAY_ASSOC				17
-#define REASON_INVALID_MULTICAST_CIPHER			18
-#define REASON_INVALID_UNICAST_CIPHER			19
-#define REASON_INVALID_AKMP						20
-#define REASON_UNSUPPORTED_RSNIE_VERSION		21
-#define REASON_INVALID_RSNIE_CAPABILITY			22
-#define REASON_802_1X_AUTH_FAIL					23
-#define	REASON_CIPHER_REJECT_PER_SEC_POLICY		14
-
-/*
-//===========================================================
-// enum_MMPDUResultCode --
-//   Status code (2 Octs) in the MMPDU's frame body. Table.19
-//
-//===========================================================
-enum enum_MMPDUResultCode
-{
-//    SUCCESS   = 0,      // Redefined
-    UNSPECIFIED_FAILURE                         = 1,
-
-    // 2 - 9 Reserved
-
-    NOT_SUPPROT_CAPABILITIES                    = 10,
-
-    //REASSOCIATION_DENIED
-    //
-    REASSOC_DENIED_UNABLE_CFM_ASSOC_EXIST       = 11,
-
-    //ASSOCIATION_DENIED_NOT_IN_STANDARD
-    //
-    ASSOC_DENIED_REASON_NOT_IN_STANDARD         = 12,
-    PEER_NOT_SUPPORT_AUTH_ALGORITHM             = 13,
-    AUTH_SEQNUM_OUT_OF_EXPECT                   = 14,
-    AUTH_REJECT_REASON_CHALLENGE_FAIL           = 15,
-    AUTH_REJECT_REASON_WAIT_TIMEOUT             = 16,
-    ASSOC_DENIED_REASON_AP_BUSY                 = 17,
-    ASSOC_DENIED_REASON_NOT_SUPPORT_BASIC_RATE  = 18
-} WB_MMPDURESULTCODE, *PWB_MMPDURESULTCODE;
-*/
-
-#define	RATE_BITMAP_1M				1
-#define	RATE_BITMAP_2M				2
-#define	RATE_BITMAP_5dot5M			5
-#define RATE_BITMAP_6M				6
-#define RATE_BITMAP_9M				9
-#define RATE_BITMAP_11M				11
-#define RATE_BITMAP_12M				12
-#define RATE_BITMAP_18M				18
-#define RATE_BITMAP_22M				22
-#define RATE_BITMAP_24M				24
-#define RATE_BITMAP_33M				17
-#define RATE_BITMAP_36M				19
-#define RATE_BITMAP_48M				25
-#define RATE_BITMAP_54M				28
-
 #define RATE_AUTO					0
 #define RATE_1M						2
 #define RATE_2M						4
@@ -209,408 +68,4 @@
 #define RATE_54M					108
 #define RATE_MAX					255
 
-/* CAPABILITY */
-#define CAPABILITY_ESS_BIT				0x0001
-#define CAPABILITY_IBSS_BIT				0x0002
-#define CAPABILITY_CF_POLL_BIT			0x0004
-#define CAPABILITY_CF_POLL_REQ_BIT		0x0008
-#define CAPABILITY_PRIVACY_BIT			0x0010
-#define CAPABILITY_SHORT_PREAMBLE_BIT	0x0020
-#define CAPABILITY_PBCC_BIT				0x0040
-#define CAPABILITY_CHAN_AGILITY_BIT		0x0080
-#define CAPABILITY_SHORT_SLOT_TIME_BIT	0x0400
-#define CAPABILITY_DSSS_OFDM_BIT		0x2000
-
-
-struct Capability_Information_Element {
-  union {
-	u16 __attribute__ ((packed)) wValue;
-    #ifdef _BIG_ENDIAN_  /* 20060926 add by anson's endian */
-    struct _Capability {
-	/* --  11G  -- */
-	u8	Reserved3:2;
-	u8	DSSS_OFDM:1;
-	u8	Reserved2:2;
-	u8	Short_Slot_Time:1;
-	u8    Reserved1:2;
-	u8    Channel_Agility:1;
-	u8    PBCC:1;
-	u8    ShortPreamble:1;
-	u8    CF_Privacy:1;
-	u8    CF_Poll_Request:1;
-	u8    CF_Pollable:1;
-	u8    IBSS:1;
-	u8    ESS:1;
-    } __attribute__ ((packed)) Capability;
-    #else
-    struct _Capability {
-	u8    ESS:1;
-	u8    IBSS:1;
-	u8    CF_Pollable:1;
-	u8    CF_Poll_Request:1;
-	u8    CF_Privacy:1;
-	u8    ShortPreamble:1;
-	u8    PBCC:1;
-	u8    Channel_Agility:1;
-	u8    Reserved1:2;
-		/* --  11G  -- */
-		u8	Short_Slot_Time:1;
-		u8	Reserved2:2;
-		u8	DSSS_OFDM:1;
-		u8	Reserved3:2;
-    } __attribute__ ((packed)) Capability;
-    #endif
-  } __attribute__ ((packed)) ;
-} __attribute__ ((packed));
-
-struct FH_Parameter_Set_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    Dwell_Time[2];
-    u8    Hop_Set;
-    u8    Hop_Pattern;
-    u8    Hop_Index;
-};
-
-struct DS_Parameter_Set_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    Current_Channel;
-};
-
-struct Supported_Rates_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    SupportedRates[8];
-} __attribute__ ((packed));
-
-struct SSID_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    SSID[32];
-} __attribute__ ((packed)) ;
-
-struct CF_Parameter_Set_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    CFP_Count;
-    u8    CFP_Period;
-    u8    CFP_MaxDuration[2];     /* in Time Units */
-    u8    CFP_DurRemaining[2];    /* in time units */
-};
-
-struct TIM_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    DTIM_Count;
-    u8    DTIM_Period;
-    u8    Bitmap_Control;
-    u8    Partial_Virtual_Bitmap[251];
-};
-
-struct IBSS_Parameter_Set_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    ATIM_Window[2];
-};
-
-struct Challenge_Text_Element {
-    u8    Element_ID;
-    u8    Length;
-    u8    Challenge_Text[253];
-};
-
-struct PHY_Parameter_Set_Element {
-/*  int     aSlotTime; */
-/*  int     aSifsTime; */
-    s32     aCCATime;
-    s32     aRxTxTurnaroundTime;
-    s32     aTxPLCPDelay;
-    s32     RxPLCPDelay;
-    s32     aRxTxSwitchTime;
-    s32     aTxRampOntime;
-    s32     aTxRampOffTime;
-    s32     aTxRFDelay;
-    s32     aRxRFDelay;
-    s32     aAirPropagationTime;
-    s32     aMACProcessingDelay;
-    s32     aPreambleLength;
-    s32     aPLCPHeaderLength;
-    s32     aMPDUDurationFactor;
-    s32     aMPDUMaxLength;
-/*  int     aCWmin; */
-/*  int     aCWmax; */
-};
-
-/* --  11G  -- */
-struct ERP_Information_Element {
-    u8	Element_ID;
-    u8	Length;
-    #ifdef _BIG_ENDIAN_ /* 20060926 add by anson's endian */
-	u8	Reserved:5;   /* 20060926 add by anson */
-	u8	Barker_Preamble_Mode:1;
-	u8	Use_Protection:1;
-       u8	NonERP_Present:1;
-    #else
-	u8	NonERP_Present:1;
-	u8	Use_Protection:1;
-	u8	Barker_Preamble_Mode:1;
-	u8	Reserved:5;
-    #endif
-};
-
-struct Extended_Supported_Rates_Element {
-    u8	Element_ID;
-    u8	Length;
-    u8	ExtendedSupportedRates[255];
-} __attribute__ ((packed));
-
-/* WPA(802.11i draft 3.0) */
-#define VERSION_WPA				1
-#ifdef _WPA2_
-#define VERSION_WPA2            1
-#endif /* end def  _WPA2_ */
-/* WPA2.0 OUI=00:50:F2, the MSB is reserved for suite type */
-#define OUI_WPA					0x00F25000
-#ifdef _WPA2_
-/* for wpa2 change to 0x00ACOF04 by Ws 26/04/04 */
-#define OUI_WPA2				0x00AC0F00
-#endif /* end def _WPA2_ */
-
-#define OUI_WPA_ADDITIONAL		0x01
-#define WLAN_MIN_RSN_WPA_LENGTH                 6 /* added by ws 09/10/04 */
-#ifdef _WPA2_
-#define WLAN_MIN_RSN_WPA2_LENGTH                2 /* Fix to 2 09/14/05 */
-#endif /* end def _WPA2_ */
-
-#define oui_wpa                  (u32)(OUI_WPA|OUI_WPA_ADDITIONAL)
-
-#define WPA_OUI_BIG    ((u32) 0x01F25000)/* added by ws 09/23/04 */
-#define WPA_OUI_LITTLE  ((u32) 0x01F25001)/* added by ws 09/23/04 */
-/* 20061108 For WPS. It's little endian. Big endian is 0x0050F204 */
-#define WPA_WPS_OUI				cpu_to_le32(0x04F25000)
-
-/* -----WPA2----- */
-#ifdef _WPA2_
-#define WPA2_OUI_BIG    ((u32)0x01AC0F00)
-#define WPA2_OUI_LITTLE ((u32)0x01AC0F01)
-#endif /* end def _WPA2_ */
-
-/* Authentication suite */
-#define OUI_AUTH_WPA_NONE           0x00 /* for WPA_NONE */
-#define OUI_AUTH_8021X				0x01
-#define OUI_AUTH_PSK				0x02
-/* Cipher suite */
-#define OUI_CIPHER_GROUP_KEY        0x00  /* added by ws 05/21/04 */
-#define OUI_CIPHER_WEP_40			0x01
-#define OUI_CIPHER_TKIP				0x02
-#define OUI_CIPHER_CCMP				0x04
-#define OUI_CIPHER_WEP_104			0x05
-
-struct suite_selector{
-	union{
-		u8	Value[4];
-		struct _SUIT_ {
-			u8	OUI[3];
-			u8	Type;
-		} SuitSelector;
-	};
-};
-
-/* --  WPA  -- */
-struct	RSN_Information_Element{
-	u8					Element_ID;
-	u8					Length;
- /* WPA version 2.0 additional field, and should be 00:50:F2:01 */
-	struct suite_selector	OuiWPAAdditional;
-	u16					Version;
-	struct suite_selector		GroupKeySuite;
-	u16					PairwiseKeySuiteCount;
-	struct suite_selector		PairwiseKeySuite[1];
-} __attribute__ ((packed));
-struct RSN_Auth_Sub_Information_Element {
-	u16				AuthKeyMngtSuiteCount;
-	struct suite_selector	AuthKeyMngtSuite[1];
-} __attribute__ ((packed));
-
-/* --  WPA2  -- */
-struct RSN_Capability_Element {
-  union {
-	u16	__attribute__ ((packed))	wValue;
-    #ifdef _BIG_ENDIAN_	 /* 20060927 add by anson's endian */
-    struct _RSN_Capability {
-	u16   __attribute__ ((packed))  Reserved2:8; /* 20051201 */
-	u16   __attribute__ ((packed))  Reserved1:2;
-	u16   __attribute__ ((packed))  GTK_Replay_Counter:2;
-	u16   __attribute__ ((packed))  PTK_Replay_Counter:2;
-	u16   __attribute__ ((packed))  No_Pairwise:1;
-	u16   __attribute__ ((packed))  Pre_Auth:1;
-    } __attribute__ ((packed))  RSN_Capability;
-    #else
-    struct _RSN_Capability {
-	u16   __attribute__ ((packed))  Pre_Auth:1;
-	u16   __attribute__ ((packed))  No_Pairwise:1;
-	u16   __attribute__ ((packed))  PTK_Replay_Counter:2;
-	u16   __attribute__ ((packed))  GTK_Replay_Counter:2;
-	u16   __attribute__ ((packed))  Reserved1:2;
-	u16   __attribute__ ((packed))  Reserved2:8; /* 20051201 */
-    } __attribute__ ((packed))  RSN_Capability;
-    #endif
-
-  } __attribute__ ((packed)) ;
-} __attribute__ ((packed)) ;
-
-#ifdef _WPA2_
-struct pmkid {
-  u8 pValue[16];
-};
-
-struct	WPA2_RSN_Information_Element {
-	u8					Element_ID;
-	u8					Length;
-	u16					Version;
-	struct suite_selector		GroupKeySuite;
-	u16					PairwiseKeySuiteCount;
-	struct suite_selector		PairwiseKeySuite[1];
-
-} __attribute__ ((packed));
-
-struct WPA2_RSN_Auth_Sub_Information_Element {
-	u16				AuthKeyMngtSuiteCount;
-	struct suite_selector	AuthKeyMngtSuite[1];
-} __attribute__ ((packed));
-
-
-struct PMKID_Information_Element {
-	u16				PMKID_Count;
-	struct pmkid pmkid[16];
-} __attribute__ ((packed));
-
-#endif /* enddef _WPA2_ */
-/*============================================================
-// MAC Frame structure (different type) and subfield structure
-//============================================================*/
-struct MAC_frame_control {
-/* a combination of the [Protocol Version, Control Type, Control Subtype]*/
-    u8    mac_frame_info;
-/* 20060927 add by anson's endian */
-    #ifdef _BIG_ENDIAN_
-    u8    order:1;
-    u8    WEP:1;
-    u8    more_data:1;
-    u8    pwr_mgt:1;
-    u8    retry:1;
-    u8    more_frag:1;
-    u8    from_ds:1;
-    u8    to_ds:1;
-    #else
-    u8    to_ds:1;
-    u8    from_ds:1;
-    u8    more_frag:1;
-    u8    retry:1;
-    u8    pwr_mgt:1;
-    u8    more_data:1;
-    u8    WEP:1;
-    u8    order:1;
-    #endif
-} __attribute__ ((packed));
-
-struct Management_Frame {
-/* 2B, ToDS,FromDS,MoreFrag,MoreData,Order=0 */
-    struct MAC_frame_control frame_control;
-    u16		duration;
-    u8		DA[MAC_ADDR_LENGTH];			/* Addr1 */
-    u8		SA[MAC_ADDR_LENGTH];			/* Addr2 */
-    u8		BSSID[MAC_ADDR_LENGTH];			/* Addr3 */
-    u16		Sequence_Control;
-    /* Management Frame Body <= 325 bytes */
-    /* FCS 4 bytes */
-} __attribute__ ((packed));
-
-/* SW-MAC don't Tx/Rx Control-Frame, HW-MAC do it. */
-struct Control_Frame {
-/* ToDS,FromDS,MoreFrag,Retry,MoreData,WEP,Order=0 */
-    struct MAC_frame_control frame_control;
-    u16		duration;
-    u8		RA[MAC_ADDR_LENGTH];
-    u8		TA[MAC_ADDR_LENGTH];
-    u16		FCS;
-} __attribute__ ((packed));
-
-struct Data_Frame {
-    struct MAC_frame_control frame_control;
-    u16		duration;
-    u8		Addr1[MAC_ADDR_LENGTH];
-    u8		Addr2[MAC_ADDR_LENGTH];
-    u8		Addr3[MAC_ADDR_LENGTH];
-    u16		Sequence_Control;
-    u8		Addr4[MAC_ADDR_LENGTH]; /* only exist when ToDS=FromDS=1 */
-    /* Data Frame Body <= 2312 */
-    /* FCS */
-} __attribute__ ((packed));
-
-struct Disassociation_Frame_Body {
-    u16    reasonCode;
-} __attribute__ ((packed));
-
-struct Association_Request_Frame_Body {
-    u16    capability_information;
-    u16    listenInterval;
-    u8     Current_AP_Address[MAC_ADDR_LENGTH];/* for reassociation only */
-    /*  SSID (2+32 bytes) */
-    /*  Supported_Rates (2+8 bytes) */
-} __attribute__ ((packed));
-
-struct Association_Response_Frame_Body {
-    u16    capability_information;
-    u16    statusCode;
-    u16    Association_ID;
-    struct Supported_Rates_Element supportedRates;
-} __attribute__ ((packed));
-
-/*struct Reassociation_Request_Frame_Body
-{
-    u16    capability_information;
-    u16    listenInterval;
-    u8     Current_AP_Address[MAC_ADDR_LENGTH];
-    // SSID (2+32 bytes)
-    // Supported_Rates (2+8 bytes)
-};*/
-/* eliminated by WS 07/22/04 comboined with associateion request frame. */
-
-struct Reassociation_Response_Frame_Body {
-    u16    capability_information;
-    u16    statusCode;
-    u16    Association_ID;
-    struct Supported_Rates_Element supportedRates;
-} __attribute__ ((packed));
-
-struct Deauthentication_Frame_Body {
-    u16    reasonCode;
-} __attribute__ ((packed));
-
-
-struct Probe_Response_Frame_Body {
-    u16    Timestamp;
-    u16    Beacon_Interval;
-    u16    Capability_Information;
-    /* SSID
-    // Supported_Rates
-    // PHY parameter Set (DS Parameters)
-    // CF parameter Set
-    // IBSS parameter Set */
-} __attribute__ ((packed));
-
-struct Authentication_Frame_Body {
-    u16    algorithmNumber;
-    u16    sequenceNumber;
-    u16    statusCode;
-	/* NB: don't include ChallengeText in this structure
-	// struct Challenge_Text_Element sChallengeTextElement;
-	// wkchen added */
-} __attribute__ ((packed));
-
-
 #endif /* _MAC_Structure_H_ */
-
-
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index 9217762..9cfea94 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -1,9 +1,7 @@
 #include "mds_f.h"
-#include "mlmetxrx_f.h"
 #include "mto.h"
-#include "sysdef.h"
-#include "wbhal_f.h"
-#include "wblinux_f.h"
+#include "wbhal.h"
+#include "wb35tx_f.h"
 
 unsigned char
 Mds_initial(struct wbsoft_priv *adapter)
@@ -17,11 +15,6 @@
 	return hal_get_tx_buffer(&adapter->sHwData, &pMds->pTxBuffer);
 }
 
-void
-Mds_Destroy(struct wbsoft_priv *adapter)
-{
-}
-
 static void Mds_DurationSet(struct wbsoft_priv *adapter,  struct wb35_descriptor *pDes,  u8 *buffer)
 {
 	struct T00_descriptor *pT00;
@@ -350,9 +343,7 @@
 	ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
 
 	pDes->TxRate = ctmp1;
-	#ifdef _PE_TX_DUMP_
-	printk("Tx rate =%x\n", ctmp1);
-	#endif
+	pr_debug("Tx rate =%x\n", ctmp1);
 
 	pT01->T01_modulation_type = (ctmp1%3) ? 0 : 1;
 
@@ -404,6 +395,44 @@
 
 }
 
+static void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc)
+{
+	desc->InternalUsed = desc->buffer_start_index + desc->buffer_number;
+	desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX;
+	desc->buffer_address[desc->InternalUsed] = adapter->sMlmeFrame.pMMPDU;
+	desc->buffer_size[desc->InternalUsed] = adapter->sMlmeFrame.len;
+	desc->buffer_total_size += adapter->sMlmeFrame.len;
+	desc->buffer_number++;
+	desc->Type = adapter->sMlmeFrame.DataType;
+}
+
+static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
+{
+	int i;
+
+	/* Reclaim the data buffer */
+	for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
+		if (pData == (s8 *)&(adapter->sMlmeFrame.TxMMPDU[i]))
+			break;
+	}
+	if (adapter->sMlmeFrame.TxMMPDUInUse[i])
+		adapter->sMlmeFrame.TxMMPDUInUse[i] = false;
+	else  {
+		/* Something wrong
+		 PD43 Add debug code here??? */
+	}
+}
+
+static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
+{
+    /* Reclaim the data buffer */
+	adapter->sMlmeFrame.len = 0;
+	MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU);
+
+	/* Return resource */
+	adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE;
+}
+
 void
 Mds_Tx(struct wbsoft_priv *adapter)
 {
@@ -430,9 +459,7 @@
 	do {
 		FillIndex = pMds->TxFillIndex;
 		if (pMds->TxOwner[FillIndex]) { /* Is owned by software 0:Yes 1:No */
-#ifdef _PE_TX_DUMP_
-			printk("[Mds_Tx] Tx Owner is H/W.\n");
-#endif
+			pr_debug("[Mds_Tx] Tx Owner is H/W.\n");
 			break;
 		}
 
@@ -476,9 +503,7 @@
 
 			/* For speed up Key setting */
 			if (pTxDes->EapFix) {
-#ifdef _PE_TX_DUMP_
-				printk("35: EPA 4th frame detected. Size = %d\n", PacketSize);
-#endif
+				pr_debug("35: EPA 4th frame detected. Size = %d\n", PacketSize);
 				pHwData->IsKeyPreSet = 1;
 			}
 
@@ -492,11 +517,6 @@
 			XmitBufSize += CurrentSize;
 			XmitBufAddress += CurrentSize;
 
-#ifdef _IBSS_BEACON_SEQ_STICK_
-			if ((XmitBufAddress[DOT_11_DA_OFFSET+8] & 0xfc) != MAC_SUBTYPE_MNGMNT_PROBE_REQUEST) /* +8 for USB hdr */
-#endif
-				pMds->TxToggle = true;
-
 			/* Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data */
 			MLME_SendComplete(adapter, 0, true);
 
@@ -567,9 +587,7 @@
 					pHwData->tx_retry_count[RetryCount] += RetryCount;
 				else
 					pHwData->tx_retry_count[7] += RetryCount;
-				#ifdef _PE_STATE_DUMP_
-				printk("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count);
-				#endif
+				pr_debug("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count);
 				MTO_SetTxCount(adapter, TxRate, RetryCount);
 			}
 			pHwData->dto_tx_frag_count += (RetryCount+1);
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index 7f68dea..ce8be07 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -1,11 +1,10 @@
 #ifndef __WINBOND_MDS_F_H
 #define __WINBOND_MDS_F_H
 
-#include "wbhal_s.h"
+#include "wbhal.h"
 #include "core.h"
 
 unsigned char Mds_initial(struct wbsoft_priv *adapter);
-void Mds_Destroy(struct wbsoft_priv *adapter);
 void Mds_Tx(struct wbsoft_priv *adapter);
 void Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pt02);
 void Mds_MpduProcess(struct wbsoft_priv *adapter, struct wb35_descriptor *prxdes);
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index e2de4bd..eeedf01 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -107,10 +107,6 @@
 	u8	TxRate[((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01)][2]; /* [0] current tx rate, [1] fall back rate */
 	u8	TxInfo[((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01)]; /*Store information for callback function */
 
-	/* for scanning mechanism */
-	u8	TxToggle;	/* It is TRUE if there are tx activities in some time interval */
-	u8	Reserved_[3];
-
 	/* ---- for Tx Parameter */
 	u16	TxFragmentThreshold;	/* For frame body only */
 	u16	TxRTSThreshold;
diff --git a/drivers/staging/winbond/mlmetxrx.c b/drivers/staging/winbond/mlmetxrx.c
deleted file mode 100644
index 7425a23..0000000
--- a/drivers/staging/winbond/mlmetxrx.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/* ============================================================================
-  Module Name:
-    MLMETxRx.C
-
-  Description:
-    The interface between MDS (MAC Data Service) and MLME.
-
-  Revision History:
-  --------------------------------------------------------------------------
-		200209      UN20 Jennifer Xu
-		Initial Release
-		20021108    PD43 Austin Liu
-		20030117    PD43 Austin Liu
-		Deleted MLMEReturnPacket and MLMEProcThread()
-
-  Copyright (c) 1996-2002 Winbond Electronics Corp. All Rights Reserved.
-============================================================================ */
-#include "sysdef.h"
-
-#include "mds_f.h"
-
-void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc)
-{
-	desc->InternalUsed = desc->buffer_start_index + desc->buffer_number;
-	desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX;
-	desc->buffer_address[desc->InternalUsed] = adapter->sMlmeFrame.pMMPDU;
-	desc->buffer_size[desc->InternalUsed] = adapter->sMlmeFrame.len;
-	desc->buffer_total_size += adapter->sMlmeFrame.len;
-	desc->buffer_number++;
-	desc->Type = adapter->sMlmeFrame.DataType;
-}
-
-static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
-{
-	int i;
-
-	/* Reclaim the data buffer */
-	for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
-		if (pData == (s8 *)&(adapter->sMlmeFrame.TxMMPDU[i]))
-			break;
-	}
-	if (adapter->sMlmeFrame.TxMMPDUInUse[i])
-		adapter->sMlmeFrame.TxMMPDUInUse[i] = false;
-	else  {
-		/* Something wrong
-		 PD43 Add debug code here??? */
-	}
-}
-
-void
-MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
-{
-    /* Reclaim the data buffer */
-	adapter->sMlmeFrame.len = 0;
-	MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU);
-
-	/* Return resource */
-	adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE;
-}
-
-
-
diff --git a/drivers/staging/winbond/mlmetxrx_f.h b/drivers/staging/winbond/mlmetxrx_f.h
deleted file mode 100644
index 012507f..0000000
--- a/drivers/staging/winbond/mlmetxrx_f.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* ================================================================
-// MLMETxRx.H --
-//
-//   Functions defined in MLMETxRx.c.
-//
-// Copyright (c) 2002 Winbond Electrics Corp. All Rights Reserved.
-//================================================================ */
-#ifndef _MLMETXRX_H
-#define _MLMETXRX_H
-
-#include "core.h"
-
-void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes);
-
-void
-MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID,
-		  unsigned char SendOK);
-
-#ifdef _IBSS_BEACON_SEQ_STICK_
-s8 SendBCNullData(struct wbsoft_priv *adapter, u16 wIdx);
-#endif
-
-#endif
diff --git a/drivers/staging/winbond/mto.c b/drivers/staging/winbond/mto.c
index 9cd2127..c03e501 100644
--- a/drivers/staging/winbond/mto.c
+++ b/drivers/staging/winbond/mto.c
@@ -17,9 +17,10 @@
  * ============================================================================
  */
 
-#include "sysdef.h"
 #include "sme_api.h"
-#include "wbhal_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "core.h"
 
 /* Declare SQ3 to rate and fragmentation threshold table */
 /* Declare fragmentation thresholds table */
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index 2b375ba..09844db 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -10,9 +10,10 @@
  */
 
 /****************** INCLUDE FILES SECTION ***********************************/
-#include "sysdef.h"
 #include "phy_calibration.h"
-#include "wbhal_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "core.h"
 
 
 /****************** DEBUG CONSTANT AND MACRO SECTION ************************/
diff --git a/drivers/staging/winbond/phy_calibration.h b/drivers/staging/winbond/phy_calibration.h
index 3032031..84f6e84 100644
--- a/drivers/staging/winbond/phy_calibration.h
+++ b/drivers/staging/winbond/phy_calibration.h
@@ -1,7 +1,7 @@
 #ifndef __WINBOND_PHY_CALIBRATION_H
 #define __WINBOND_PHY_CALIBRATION_H
 
-#include "wbhal_f.h"
+#include "wbhal.h"
 
 #define REG_AGC_CTRL1		0x1000
 #define REG_AGC_CTRL2		0x1004
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index 990f9d4..1b38d6d 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -1,5 +1,6 @@
-#include "sysdef.h"
-#include "wbhal_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "core.h"
 
 /*
  * ====================================================
@@ -1010,9 +1011,7 @@
 	case RF_AIROHA_7230:
 		/* Start to fill RF parameters, PLL_ON should be pulled low. */
 		Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000000);
-		#ifdef _PE_STATE_DUMP_
-		printk("* PLL_ON    low\n");
-		#endif
+		pr_debug("* PLL_ON    low\n");
 		number = ARRAY_SIZE(al7230_rf_data_24);
 		Set_ChanIndep_RfData_al7230_24(pHwData, pltmp, number);
 		break;
@@ -1098,9 +1097,7 @@
 	case RF_AIROHA_7230:
 		/* RF parameters have filled completely, PLL_ON should be pulled high */
 		Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000080);
-		#ifdef _PE_STATE_DUMP_
-		printk("* PLL_ON    high\n");
-		#endif
+		pr_debug("* PLL_ON    high\n");
 
 		/* 2.4GHz */
 		ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
@@ -1115,9 +1112,7 @@
 
 		/* 5GHz */
 		Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000000);
-		#ifdef _PE_STATE_DUMP_
-		printk("* PLL_ON    low\n");
-		#endif
+		pr_debug("* PLL_ON    low\n");
 
 		number = ARRAY_SIZE(al7230_rf_data_50);
 		Set_ChanIndep_RfData_al7230_50(pHwData, pltmp, number);
@@ -1127,9 +1122,7 @@
 		msleep(5);
 
 		Wb35Reg_WriteSync(pHwData, 0x03dc, 0x00000080);
-		#ifdef _PE_STATE_DUMP_
-		printk("* PLL_ON    high\n");
-		#endif
+		pr_debug("* PLL_ON    high\n");
 
 		ltmp = (1 << 31) | (0 << 30) | (24 << 24) | 0x9ABA8F;
 		Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
@@ -1795,9 +1788,7 @@
 
 			/* Write to register. number must less and equal than 16 */
 			Wb35Reg_BurstWrite(pHwData, 0x0864, pltmp, number, NO_INCREMENT);
-			#ifdef _PE_STATE_DUMP_
-			printk("Band changed\n");
-			#endif
+			pr_debug("Band changed\n");
 		}
 
 		if (Channel.band <= BAND_TYPE_OFDM_24) { /* channel 1 ~ 14 */
@@ -2073,11 +2064,7 @@
 	 */
 
 	/* M00 bit set */
-	#ifdef _IBSS_BEACON_SEQ_STICK_
-	reg->M00_MacControl = 0; /* Solve beacon sequence number stop by software */
-	#else
 	reg->M00_MacControl = 0x80000000; /* Solve beacon sequence number stop by hardware */
-	#endif
 
 	/* M24 disable enter power save, BB RxOn and enable NAV attack */
 	reg->M24_MacControl = 0x08040042;
@@ -2336,13 +2323,6 @@
 		pHwData->TxVgaFor50[32].TxVgaValue = pTxVga[17] - stmp * 2 / 4;
 		pHwData->TxVgaFor50[31].TxVgaValue = pTxVga[17] - stmp * 3 / 4;
 	}
-
-	#ifdef _PE_STATE_DUMP_
-	printk(" TxVgaFor24 :\n");
-	DataDmp((u8 *)pHwData->TxVgaFor24, 14 , 0);
-	printk(" TxVgaFor50 :\n");
-	DataDmp((u8 *)pHwData->TxVgaFor50, 70 , 0);
-	#endif
 }
 
 void BBProcessor_RateChanging(struct hw_data *pHwData,  u8 rate)
diff --git a/drivers/staging/winbond/sysdef.h b/drivers/staging/winbond/sysdef.h
deleted file mode 100644
index d0d71f6..0000000
--- a/drivers/staging/winbond/sysdef.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*  Winbond WLAN System Configuration defines */
-
-#ifndef SYS_DEF_H
-#define SYS_DEF_H
-
-#include <linux/delay.h>
-
-#define WB_LINUX
-#define WB_LINUX_WPA_PSK
-
-#define _USE_FALLBACK_RATE_
-
-#define _WPA2_
-
-#ifndef _WPA_PSK_DEBUG
-#undef  _WPA_PSK_DEBUG
-#endif
-
-/* debug print options, mark what debug you don't need */
-
-#ifdef FULL_DEBUG
-#define _PE_STATE_DUMP_
-#define _PE_TX_DUMP_
-#define _PE_RX_DUMP_
-#define _PE_OID_DUMP_
-#define _PE_DTO_DUMP_
-#define _PE_REG_DUMP_
-#define _PE_USB_INI_DUMP_
-#endif
-
-#endif
diff --git a/drivers/staging/winbond/wb35reg.c b/drivers/staging/winbond/wb35reg.c
index 7707223..42ae6101 100644
--- a/drivers/staging/winbond/wb35reg.c
+++ b/drivers/staging/winbond/wb35reg.c
@@ -1,4 +1,3 @@
-#include "sysdef.h"
 #include "wb35reg_f.h"
 
 #include <linux/usb.h>
@@ -140,8 +139,8 @@
 
 	/* Sync IoCallDriver */
 	reg->EP0vm_state = VM_RUNNING;
-	ret = usb_control_msg(pHwData->WbUsb.udev,
-			       usb_sndctrlpipe(pHwData->WbUsb.udev, 0),
+	ret = usb_control_msg(pHwData->udev,
+			       usb_sndctrlpipe(pHwData->udev, 0),
 			       0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
 			       0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
 	reg->EP0vm_state = VM_STOP;
@@ -150,9 +149,7 @@
 	Wb35Reg_EP0VM_start(pHwData);
 
 	if (ret < 0) {
-#ifdef _PE_REG_DUMP_
-		printk("EP0 Write register usb message sending error\n");
-#endif
+		pr_debug("EP0 Write register usb message sending error\n");
 		pHwData->SurpriseRemove = 1;
 		return false;
 	}
@@ -305,8 +302,8 @@
 		msleep(10);
 
 	reg->EP0vm_state = VM_RUNNING;
-	ret = usb_control_msg(pHwData->WbUsb.udev,
-			       usb_rcvctrlpipe(pHwData->WbUsb.udev, 0),
+	ret = usb_control_msg(pHwData->udev,
+			       usb_rcvctrlpipe(pHwData->udev, 0),
 			       0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
 			       0x0, RegisterNo, pltmp, 4, HZ * 100);
 
@@ -320,9 +317,7 @@
 	Wb35Reg_EP0VM_start(pHwData);
 
 	if (ret < 0) {
-#ifdef _PE_REG_DUMP_
-		printk("EP0 Read register usb message sending error\n");
-#endif
+		pr_debug("EP0 Read register usb message sending error\n");
 		pHwData->SurpriseRemove = 1;
 		return false;
 	}
@@ -432,8 +427,8 @@
 	if (reg_queue->DIRECT == 1) /* output */
 		pBuffer = &reg_queue->VALUE;
 
-	usb_fill_control_urb(urb, pHwData->WbUsb.udev,
-			      REG_DIRECTION(pHwData->WbUsb.udev, reg_queue),
+	usb_fill_control_urb(urb, pHwData->udev,
+			      REG_DIRECTION(pHwData->udev, reg_queue),
 			      (u8 *)dr, pBuffer, cpu_to_le16(dr->wLength),
 			      Wb35Reg_EP0VM_complete, (void *)pHwData);
 
@@ -442,9 +437,7 @@
 	ret = usb_submit_urb(urb, GFP_ATOMIC);
 
 	if (ret < 0) {
-#ifdef _PE_REG_DUMP_
-		printk("EP0 Irp sending error\n");
-#endif
+		pr_debug("EP0 Irp sending error\n");
 		goto cleanup;
 	}
 	return;
@@ -479,9 +472,7 @@
 		spin_unlock_irq(&reg->EP0VM_spin_lock);
 
 		if (reg->EP0VM_status) {
-#ifdef _PE_REG_DUMP_
-			printk("EP0 IoCompleteRoutine return error\n");
-#endif
+			pr_debug("EP0 IoCompleteRoutine return error\n");
 			reg->EP0vm_state = VM_STOP;
 			pHwData->SurpriseRemove = 1;
 		} else {
@@ -526,9 +517,7 @@
 			usb_free_urb(urb);
 			kfree(reg_queue);
 		} else {
-#ifdef _PE_REG_DUMP_
-			printk("EP0 queue release error\n");
-#endif
+			pr_debug("EP0 queue release error\n");
 		}
 		spin_lock_irq(&reg->EP0VM_spin_lock);
 
diff --git a/drivers/staging/winbond/wb35reg_f.h b/drivers/staging/winbond/wb35reg_f.h
index bf23c10..95dc980 100644
--- a/drivers/staging/winbond/wb35reg_f.h
+++ b/drivers/staging/winbond/wb35reg_f.h
@@ -1,7 +1,7 @@
 #ifndef __WINBOND_WB35REG_F_H
 #define __WINBOND_WB35REG_F_H
 
-#include "wbhal_s.h"
+#include "wbhal.h"
 
 /*
  * ====================================
diff --git a/drivers/staging/winbond/wb35reg_s.h b/drivers/staging/winbond/wb35reg_s.h
index 4eff009..eb274ff 100644
--- a/drivers/staging/winbond/wb35reg_s.h
+++ b/drivers/staging/winbond/wb35reg_s.h
@@ -5,6 +5,8 @@
 #include <linux/types.h>
 #include <asm/atomic.h>
 
+struct hw_data;
+
 /* =========================================================================
  *
  *			HAL setting function
@@ -49,11 +51,7 @@
 #define DEFAULT_CWMAX			1023	/* (M2C) CWmax. Its value is in the range 0-1023. */
 #define DEFAULT_AID			1	/* (M34) AID. Its value is in the range 1-2007. */
 
-#ifdef _USE_FALLBACK_RATE_
 #define DEFAULT_RATE_RETRY_LIMIT	2	/* (M38) as named */
-#else
-#define DEFAULT_RATE_RETRY_LIMIT	7	/* (M38) as named */
-#endif
 
 #define DEFAULT_LONG_RETRY_LIMIT	7	/* (M38) LongRetryLimit. Its value is in the range 0-15. */
 #define DEFAULT_SHORT_RETRY_LIMIT	7	/* (M38) ShortRetryLimit. Its value is in the range 0-15. */
@@ -168,4 +166,75 @@
 	u32	SQ3_filter[MAX_SQ3_FILTER_SIZE];
 	u32	SQ3_index;
 };
+
+/* =====================================================================
+ * Function declaration
+ * =====================================================================
+ */
+void hal_remove_mapping_key(struct hw_data *hw_data, u8 *mac_addr);
+void hal_remove_default_key(struct hw_data *hw_data, u32 index);
+unsigned char hal_set_mapping_key(struct hw_data *adapter, u8 *mac_addr,
+				  u8 null_key, u8 wep_on, u8 *tx_tsc,
+				  u8 *rx_tsc, u8 key_type, u8 key_len,
+				  u8 *key_data);
+unsigned char hal_set_default_key(struct hw_data *adapter, u8 index,
+				  u8 null_key, u8 wep_on, u8 *tx_tsc,
+				  u8 *rx_tsc, u8 key_type, u8 key_len,
+				  u8 *key_data);
+void hal_clear_all_default_key(struct hw_data *hw_data);
+void hal_clear_all_group_key(struct hw_data *hw_data);
+void hal_clear_all_mapping_key(struct hw_data *hw_data);
+void hal_clear_all_key(struct hw_data *hw_data);
+void hal_set_power_save_mode(struct hw_data *hw_data, unsigned char power_save,
+			     unsigned char wakeup, unsigned char dtim);
+void hal_get_power_save_mode(struct hw_data *hw_data, u8 *in_pwr_save);
+void hal_set_slot_time(struct hw_data *hw_data, u8 type);
+
+#define hal_set_atim_window(_A, _ATM)
+
+void hal_start_bss(struct hw_data *hw_data, u8 mac_op_mode);
+
+/* 0:BSS STA 1:IBSS STA */
+void hal_join_request(struct hw_data *hw_data, u8 bss_type);
+
+void hal_stop_sync_bss(struct hw_data *hw_data);
+void hal_resume_sync_bss(struct hw_data *hw_data);
+void hal_set_aid(struct hw_data *hw_data, u16 aid);
+void hal_set_bssid(struct hw_data *hw_data, u8 *bssid);
+void hal_get_bssid(struct hw_data *hw_data, u8 *bssid);
+void hal_set_listen_interval(struct hw_data *hw_data, u16 listen_interval);
+void hal_set_cap_info(struct hw_data *hw_data, u16 capability_info);
+void hal_set_ssid(struct hw_data *hw_data, u8 *ssid, u8 ssid_len);
+void hal_start_tx0(struct hw_data *hw_data);
+
+#define hal_get_cwmin(_A)	((_A)->cwmin)
+
+void hal_set_cwmax(struct hw_data *hw_data, u16 cwin_max);
+
+#define hal_get_cwmax(_A)	((_A)->cwmax)
+
+void hal_set_rsn_wpa(struct hw_data *hw_data, u32 *rsn_ie_bitmap,
+		     u32 *rsn_oui_type , unsigned char desired_auth_mode);
+void hal_set_connect_info(struct hw_data *hw_data, unsigned char bo_connect);
+u8 hal_get_est_sq3(struct hw_data *hw_data, u8 count);
+void hal_descriptor_indicate(struct hw_data *hw_data,
+			     struct wb35_descriptor *des);
+u8 hal_get_antenna_number(struct hw_data *hw_data);
+u32 hal_get_bss_pk_cnt(struct hw_data *hw_data);
+
+#define hal_get_region_from_EEPROM(_A)	((_A)->reg.EEPROMRegion)
+#define hal_get_tx_buffer(_A, _B)	Wb35Tx_get_tx_buffer(_A, _B)
+#define hal_software_set(_A)		(_A->SoftwareSet)
+#define hal_driver_init_OK(_A)		(_A->IsInitOK)
+#define hal_rssi_boundary_high(_A)	(_A->RSSI_high)
+#define hal_rssi_boundary_low(_A)	(_A->RSSI_low)
+#define hal_scan_interval(_A)		(_A->Scan_Interval)
+
+#define PHY_DEBUG(msg, args...)
+
+/* return 100ms count */
+#define hal_get_time_count(_P)		(_P->time_count / 10)
+
+#define hal_ibss_disconnect(_A)		(hal_stop_sync_bss(_A))
+
 #endif
diff --git a/drivers/staging/winbond/wb35rx.c b/drivers/staging/winbond/wb35rx.c
index 448514a..f118eeb 100644
--- a/drivers/staging/winbond/wb35rx.c
+++ b/drivers/staging/winbond/wb35rx.c
@@ -14,7 +14,6 @@
 #include <linux/slab.h>
 
 #include "core.h"
-#include "sysdef.h"
 #include "wb35rx_f.h"
 
 static void packet_came(struct ieee80211_hw *hw, char *pRxBufferAddress, int PacketSize)
@@ -109,10 +108,7 @@
 
 			/* Basic check for Rx length. Is length valid? */
 			if (PacketSize > MAX_PACKET_SIZE) {
-#ifdef _PE_RX_DUMP_
-				printk("Serious ERROR : Rx data size too long, size =%d\n", PacketSize);
-#endif
-
+				pr_debug("Serious ERROR : Rx data size too long, size =%d\n", PacketSize);
 				pWb35Rx->EP3vm_state = VM_STOP;
 				pWb35Rx->Ep3ErrorCount2++;
 				break;
@@ -174,7 +170,7 @@
 	/* The IRP is completed */
 	pWb35Rx->EP3vm_state = VM_COMPLETED;
 
-	if (pHwData->SurpriseRemove || pHwData->HwStop) /* Must be here, or RxBufferId is invalid */
+	if (pHwData->SurpriseRemove) /* Must be here, or RxBufferId is invalid */
 		goto error;
 
 	if (pWb35Rx->rx_halt)
@@ -186,9 +182,7 @@
 
 	/* The URB is completed, check the result */
 	if (pWb35Rx->EP3VM_status != 0) {
-#ifdef _PE_USB_STATE_DUMP_
-		printk("EP3 IoCompleteRoutine return error\n");
-#endif
+		pr_debug("EP3 IoCompleteRoutine return error\n");
 		pWb35Rx->EP3vm_state = VM_STOP;
 		goto error;
 	}
@@ -239,7 +233,7 @@
 	u32			RxBufferId;
 
 	/* Issuing URB */
-	if (pHwData->SurpriseRemove || pHwData->HwStop)
+	if (pHwData->SurpriseRemove)
 		goto error;
 
 	if (pWb35Rx->rx_halt)
@@ -249,9 +243,7 @@
 	RxBufferId = pWb35Rx->RxBufferId;
 	if (!pWb35Rx->RxOwner[RxBufferId]) {
 		/* It's impossible to run here. */
-#ifdef _PE_RX_DUMP_
-		printk("Rx driver fifo unavailable\n");
-#endif
+		pr_debug("Rx driver fifo unavailable\n");
 		goto error;
 	}
 
@@ -268,8 +260,8 @@
 	}
 	pRxBufferAddress = pWb35Rx->pDRx;
 
-	usb_fill_bulk_urb(urb, pHwData->WbUsb.udev,
-			  usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
+	usb_fill_bulk_urb(urb, pHwData->udev,
+			  usb_rcvbulkpipe(pHwData->udev, 3),
 			  pRxBufferAddress, MAX_USB_RX_BUFFER,
 			  Wb35Rx_Complete, hw);
 
@@ -337,9 +329,7 @@
 	/* Canceling the Irp if already sends it out. */
 	if (pWb35Rx->EP3vm_state == VM_RUNNING) {
 		usb_unlink_urb(pWb35Rx->RxUrb); /* Only use unlink, let Wb35Rx_destroy to free them */
-#ifdef _PE_RX_DUMP_
-		printk("EP3 Rx stop\n");
-#endif
+		pr_debug("EP3 Rx stop\n");
 	}
 }
 
@@ -355,8 +345,6 @@
 
 	if (pWb35Rx->RxUrb)
 		usb_free_urb(pWb35Rx->RxUrb);
-#ifdef _PE_RX_DUMP_
-	printk("Wb35Rx_destroy OK\n");
-#endif
+	pr_debug("Wb35Rx_destroy OK\n");
 }
 
diff --git a/drivers/staging/winbond/wb35rx_f.h b/drivers/staging/winbond/wb35rx_f.h
index 98acce5..1fdf65e 100644
--- a/drivers/staging/winbond/wb35rx_f.h
+++ b/drivers/staging/winbond/wb35rx_f.h
@@ -2,7 +2,7 @@
 #define __WINBOND_WB35RX_F_H
 
 #include <net/mac80211.h>
-#include "wbhal_s.h"
+#include "wbhal.h"
 
 //====================================
 // Interface function declare
diff --git a/drivers/staging/winbond/wb35tx.c b/drivers/staging/winbond/wb35tx.c
index 2a9d055..44fc3fe 100644
--- a/drivers/staging/winbond/wb35tx.c
+++ b/drivers/staging/winbond/wb35tx.c
@@ -13,7 +13,6 @@
 
 #include "wb35tx_f.h"
 #include "mds_f.h"
-#include "sysdef.h"
 
 unsigned char
 Wb35Tx_get_tx_buffer(struct hw_data * pHwData, u8 **pBuffer)
@@ -41,7 +40,7 @@
 	pWb35Tx->TxSendIndex++;
 	pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER;
 
-	if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+	if (pHwData->SurpriseRemove) // Let WbWlanHalt to handle surprise remove
 		goto error;
 
 	if (pWb35Tx->tx_halt)
@@ -74,7 +73,7 @@
 	u32		SendIndex;
 
 
-	if (pHwData->SurpriseRemove || pHwData->HwStop)
+	if (pHwData->SurpriseRemove)
 		goto cleanup;
 
 	if (pWb35Tx->tx_halt)
@@ -89,8 +88,8 @@
 	//
 	// Issuing URB
 	//
-	usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
-			  usb_sndbulkpipe(pHwData->WbUsb.udev, 4),
+	usb_fill_bulk_urb(pUrb, pHwData->udev,
+			  usb_sndbulkpipe(pHwData->udev, 4),
 			  pTxBufferAddress, pMds->TxBufferSize[ SendIndex ],
 			  Wb35Tx_complete, adapter);
 
@@ -153,16 +152,12 @@
 	// Trying to canceling the Trp of EP2
 	if (pWb35Tx->EP2vm_state == VM_RUNNING)
 		usb_unlink_urb( pWb35Tx->Tx2Urb ); // Only use unlink, let Wb35Tx_destrot to free them
-	#ifdef _PE_TX_DUMP_
-	printk("EP2 Tx stop\n");
-	#endif
+	pr_debug("EP2 Tx stop\n");
 
 	// Trying to canceling the Irp of EP4
 	if (pWb35Tx->EP4vm_state == VM_RUNNING)
 		usb_unlink_urb( pWb35Tx->Tx4Urb ); // Only use unlink, let Wb35Tx_destrot to free them
-	#ifdef _PE_TX_DUMP_
-	printk("EP4 Tx stop\n");
-	#endif
+	pr_debug("EP4 Tx stop\n");
 }
 
 //======================================================
@@ -182,9 +177,7 @@
 	if (pWb35Tx->Tx2Urb)
 		usb_free_urb( pWb35Tx->Tx2Urb );
 
-	#ifdef _PE_TX_DUMP_
-	printk("Wb35Tx_destroy OK\n");
-	#endif
+	pr_debug("Wb35Tx_destroy OK\n");
 }
 
 void Wb35Tx_CurrentTime(struct wbsoft_priv *adapter, u32 TimeCount)
@@ -222,7 +215,7 @@
 	pWb35Tx->EP2VM_status = pUrb->status;
 
 	// For Linux 2.4. Interrupt will always trigger
-	if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
+	if (pHwData->SurpriseRemove) // Let WbWlanHalt to handle surprise remove
 		goto error;
 
 	if (pWb35Tx->tx_halt)
@@ -263,7 +256,7 @@
 	u32 *	pltmp = (u32 *)pWb35Tx->EP2_buf;
 	int		retv;
 
-	if (pHwData->SurpriseRemove || pHwData->HwStop)
+	if (pHwData->SurpriseRemove)
 		goto error;
 
 	if (pWb35Tx->tx_halt)
@@ -272,16 +265,14 @@
 	//
 	// Issuing URB
 	//
-	usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
+	usb_fill_int_urb( pUrb, pHwData->udev, usb_rcvintpipe(pHwData->udev,2),
 			  pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, adapter, 32);
 
 	pWb35Tx->EP2vm_state = VM_RUNNING;
 	retv = usb_submit_urb(pUrb, GFP_ATOMIC);
 
 	if (retv < 0) {
-		#ifdef _PE_TX_DUMP_
-		printk("EP2 Tx Irp sending error\n");
-		#endif
+		pr_debug("EP2 Tx Irp sending error\n");
 		goto error;
 	}
 
diff --git a/drivers/staging/winbond/wb35tx_f.h b/drivers/staging/winbond/wb35tx_f.h
index 1d3b515..018fd35 100644
--- a/drivers/staging/winbond/wb35tx_f.h
+++ b/drivers/staging/winbond/wb35tx_f.h
@@ -2,7 +2,6 @@
 #define __WINBOND_WB35TX_F_H
 
 #include "core.h"
-#include "wbhal_f.h"
 
 /*
  * ====================================
diff --git a/drivers/staging/winbond/wbhal.h b/drivers/staging/winbond/wbhal.h
new file mode 100644
index 0000000..39e84a0
--- /dev/null
+++ b/drivers/staging/winbond/wbhal.h
@@ -0,0 +1,513 @@
+#ifndef __WINBOND_WBHAL_S_H
+#define __WINBOND_WBHAL_S_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h> /* for ETH_ALEN */
+
+#define HAL_LED_SET_MASK	0x001c
+#define HAL_LED_SET_SHIFT	2
+
+/* supported RF type */
+#define RF_MAXIM_2825		0
+#define RF_MAXIM_2827		1
+#define RF_MAXIM_2828		2
+#define RF_MAXIM_2829		3
+#define RF_MAXIM_V1		15
+#define RF_AIROHA_2230		16
+#define RF_AIROHA_7230		17
+#define RF_AIROHA_2230S		18
+#define RF_WB_242		33
+#define RF_WB_242_1		34
+#define RF_DECIDE_BY_INF	255
+
+/*
+ * ----------------------------------------------------------------
+ * The follow define connect to upper layer
+ *	User must modify for connection between HAL and upper layer
+ * ----------------------------------------------------------------
+ */
+
+/*
+ * ==============================
+ * Common define
+ * ==============================
+ */
+/* Bit 5 */
+#define HAL_USB_MODE_BURST(_H)			(_H->SoftwareSet & 0x20)
+
+/* Scan interval */
+#define SCAN_MAX_CHNL_TIME			(50)
+
+/* For TxL2 Frame typr recognise */
+#define FRAME_TYPE_802_3_DATA			0
+#define FRAME_TYPE_802_11_MANAGEMENT		1
+#define FRAME_TYPE_802_11_MANAGEMENT_CHALLENGE	2
+#define FRAME_TYPE_802_11_CONTROL		3
+#define FRAME_TYPE_802_11_DATA			4
+#define FRAME_TYPE_PROMISCUOUS			5
+
+/* The follow definition is used for convert the frame------------ */
+#define DOT_11_SEQUENCE_OFFSET			22 /* Sequence control offset */
+#define DOT_3_TYPE_OFFSET			12
+#define DOT_11_MAC_HEADER_SIZE			24
+#define DOT_11_SNAP_SIZE			6
+#define DOT_11_TYPE_OFFSET			30 /* The start offset of 802.11 Frame. Type encapsulation. */
+#define DEFAULT_SIFSTIME			10
+#define DEFAULT_FRAGMENT_THRESHOLD		2346 /* No fragment */
+#define DEFAULT_MSDU_LIFE_TIME			0xffff
+
+#define LONG_PREAMBLE_PLUS_PLCPHEADER_TIME		(144 + 48)
+#define SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME		(72 + 24)
+#define PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION	(16 + 4 + 6)
+#define Tsym						4
+
+/*  Frame Type of Bits (2, 3)----------------------------------- */
+#define MAC_TYPE_MANAGEMENT			0x00
+#define MAC_TYPE_CONTROL			0x04
+#define MAC_TYPE_DATA				0x08
+#define MASK_FRAGMENT_NUMBER			0x000F
+#define SEQUENCE_NUMBER_SHIFT			4
+
+#define  HAL_WOL_TYPE_WAKEUP_FRAME		0x01
+#define  HAL_WOL_TYPE_MAGIC_PACKET		0x02
+
+#define HAL_KEYTYPE_WEP40			0
+#define HAL_KEYTYPE_WEP104			1
+#define HAL_KEYTYPE_TKIP			2 /* 128 bit key */
+#define HAL_KEYTYPE_AES_CCMP			3 /* 128 bit key */
+
+/* For VM state */
+enum {
+	VM_STOP = 0,
+	VM_RUNNING,
+	VM_COMPLETED
+};
+
+/*
+ * ================================
+ * Normal Key table format
+ * ================================
+ */
+
+/* The order of KEY index is MAPPING_KEY_START_INDEX > GROUP_KEY_START_INDEX */
+#define MAX_KEY_TABLE			24 /* 24 entry for storing key data */
+#define GROUP_KEY_START_INDEX		4
+#define MAPPING_KEY_START_INDEX		8
+
+/*
+ * =========================================
+ * Descriptor
+ * =========================================
+ */
+#define MAX_DESCRIPTOR_BUFFER_INDEX	8 /* Have to multiple of 2 */
+#define FLAG_ERROR_TX_MASK		0x000000bf
+#define FLAG_ERROR_RX_MASK		0x0000083f
+
+#define FLAG_BAND_RX_MASK		0x10000000 /* Bit 28 */
+
+struct R00_descriptor {
+	union {
+		u32	value;
+#ifdef _BIG_ENDIAN_
+		struct {
+			u32	R00_packet_or_buffer_status:1;
+			u32	R00_packet_in_fifo:1;
+			u32	R00_RESERVED:2;
+			u32	R00_receive_byte_count:12;
+			u32	R00_receive_time_index:16;
+		};
+#else
+		struct {
+			u32	R00_receive_time_index:16;
+			u32	R00_receive_byte_count:12;
+			u32	R00_RESERVED:2;
+			u32	R00_packet_in_fifo:1;
+			u32	R00_packet_or_buffer_status:1;
+		};
+#endif
+	};
+};
+
+struct T00_descriptor {
+	union {
+		u32	value;
+#ifdef _BIG_ENDIAN_
+		struct {
+			u32	T00_first_mpdu:1; /* for hardware use */
+			u32	T00_last_mpdu:1; /* for hardware use */
+			u32	T00_IsLastMpdu:1;/* 0:not 1:Yes for software used */
+			u32	T00_IgnoreResult:1;/* The same mechanism with T00 setting. */
+			u32	T00_RESERVED_ID:2;/* 3 bit ID reserved */
+			u32	T00_tx_packet_id:4;
+			u32	T00_RESERVED:4;
+			u32	T00_header_length:6;
+			u32	T00_frame_length:12;
+		};
+#else
+		struct {
+			u32	T00_frame_length:12;
+			u32	T00_header_length:6;
+			u32	T00_RESERVED:4;
+			u32	T00_tx_packet_id:4;
+			u32	T00_RESERVED_ID:2; /* 3 bit ID reserved */
+			u32	T00_IgnoreResult:1; /* The same mechanism with T00 setting. */
+			u32	T00_IsLastMpdu:1; /* 0:not 1:Yes for software used */
+			u32	T00_last_mpdu:1; /* for hardware use */
+			u32	T00_first_mpdu:1; /* for hardware use */
+		};
+#endif
+	};
+};
+
+struct R01_descriptor {
+	union {
+		u32	value;
+#ifdef _BIG_ENDIAN_
+		struct {
+			u32	R01_RESERVED:3;
+			u32	R01_mod_type:1;
+			u32	R01_pre_type:1;
+			u32	R01_data_rate:3;
+			u32	R01_AGC_state:8;
+			u32	R01_LNA_state:2;
+			u32	R01_decryption_method:2;
+			u32	R01_mic_error:1;
+			u32	R01_replay:1;
+			u32	R01_broadcast_frame:1;
+			u32	R01_multicast_frame:1;
+			u32	R01_directed_frame:1;
+			u32	R01_receive_frame_antenna_selection:1;
+			u32	R01_frame_receive_during_atim_window:1;
+			u32	R01_protocol_version_error:1;
+			u32	R01_authentication_frame_icv_error:1;
+			u32	R01_null_key_to_authentication_frame:1;
+			u32	R01_icv_error:1;
+			u32	R01_crc_error:1;
+		};
+#else
+		struct {
+			u32	R01_crc_error:1;
+			u32	R01_icv_error:1;
+			u32	R01_null_key_to_authentication_frame:1;
+			u32	R01_authentication_frame_icv_error:1;
+			u32	R01_protocol_version_error:1;
+			u32	R01_frame_receive_during_atim_window:1;
+			u32	R01_receive_frame_antenna_selection:1;
+			u32	R01_directed_frame:1;
+			u32	R01_multicast_frame:1;
+			u32	R01_broadcast_frame:1;
+			u32	R01_replay:1;
+			u32	R01_mic_error:1;
+			u32	R01_decryption_method:2;
+			u32	R01_LNA_state:2;
+			u32	R01_AGC_state:8;
+			u32	R01_data_rate:3;
+			u32	R01_pre_type:1;
+			u32	R01_mod_type:1;
+			u32	R01_RESERVED:3;
+		};
+#endif
+	};
+};
+
+struct T01_descriptor {
+	union {
+		u32	value;
+#ifdef _BIG_ENDIAN_
+		struct {
+			u32	T01_rts_cts_duration:16;
+			u32	T01_fall_back_rate:3;
+			u32	T01_add_rts:1;
+			u32	T01_add_cts:1;
+			u32	T01_modulation_type:1;
+			u32	T01_plcp_header_length:1;
+			u32	T01_transmit_rate:3;
+			u32	T01_wep_id:2;
+			u32	T01_add_challenge_text:1;
+			u32	T01_inhibit_crc:1;
+			u32	T01_loop_back_wep_mode:1;
+			u32	T01_retry_abort_ebable:1;
+		};
+#else
+		struct {
+			u32	T01_retry_abort_ebable:1;
+			u32	T01_loop_back_wep_mode:1;
+			u32	T01_inhibit_crc:1;
+			u32	T01_add_challenge_text:1;
+			u32	T01_wep_id:2;
+			u32	T01_transmit_rate:3;
+			u32	T01_plcp_header_length:1;
+			u32	T01_modulation_type:1;
+			u32	T01_add_cts:1;
+			u32	T01_add_rts:1;
+			u32	T01_fall_back_rate:3;
+			u32	T01_rts_cts_duration:16;
+		};
+#endif
+	};
+};
+
+struct T02_descriptor {
+	union {
+		u32	value;
+#ifdef _BIG_ENDIAN_
+		struct {
+			u32	T02_IsLastMpdu:1; /* The same mechanism with T00 setting */
+			u32	T02_IgnoreResult:1; /* The same mechanism with T00 setting. */
+			u32	T02_RESERVED_ID:2; /* The same mechanism with T00 setting */
+			u32	T02_Tx_PktID:4;
+			u32	T02_MPDU_Cnt:4;
+			u32	T02_RTS_Cnt:4;
+			u32	T02_RESERVED:7;
+			u32	T02_transmit_complete:1;
+			u32	T02_transmit_abort_due_to_TBTT:1;
+			u32	T02_effective_transmission_rate:1;
+			u32	T02_transmit_without_encryption_due_to_wep_on_false:1;
+			u32	T02_discard_due_to_null_wep_key:1;
+			u32	T02_RESERVED_1:1;
+			u32	T02_out_of_MaxTxMSDULiftTime:1;
+			u32	T02_transmit_abort:1;
+			u32	T02_transmit_fail:1;
+		};
+#else
+		struct {
+			u32	T02_transmit_fail:1;
+			u32	T02_transmit_abort:1;
+			u32	T02_out_of_MaxTxMSDULiftTime:1;
+			u32	T02_RESERVED_1:1;
+			u32	T02_discard_due_to_null_wep_key:1;
+			u32	T02_transmit_without_encryption_due_to_wep_on_false:1;
+			u32	T02_effective_transmission_rate:1;
+			u32	T02_transmit_abort_due_to_TBTT:1;
+			u32	T02_transmit_complete:1;
+			u32	T02_RESERVED:7;
+			u32	T02_RTS_Cnt:4;
+			u32	T02_MPDU_Cnt:4;
+			u32	T02_Tx_PktID:4;
+			u32	T02_RESERVED_ID:2; /* The same mechanism with T00 setting */
+			u32	T02_IgnoreResult:1; /* The same mechanism with T00 setting. */
+			u32	T02_IsLastMpdu:1; /* The same mechanism with T00 setting */
+		};
+#endif
+	};
+};
+
+struct wb35_descriptor { /* Skip length = 8 DWORD */
+	/* ID for descriptor ---, The field doesn't be cleard in the operation of Descriptor definition */
+	u8	Descriptor_ID;
+	/* ----------------------The above region doesn't be cleared by DESCRIPTOR_RESET------ */
+	u8	RESERVED[3];
+
+	u16	FragmentThreshold;
+	u8	InternalUsed; /* Only can be used by operation of descriptor definition */
+	u8	Type; /* 0: 802.3 1:802.11 data frame 2:802.11 management frame */
+
+	u8	PreambleMode;/* 0: short 1:long */
+	u8	TxRate;
+	u8	FragmentCount;
+	u8	EapFix; /* For speed up key install */
+
+	/* For R00 and T00 ------------------------------ */
+	union {
+		struct R00_descriptor	R00;
+		struct T00_descriptor	T00;
+	};
+
+	/* For R01 and T01 ------------------------------ */
+	union {
+		struct R01_descriptor	R01;
+		struct T01_descriptor	T01;
+	};
+
+	/* For R02 and T02 ------------------------------ */
+	union {
+		u32		R02;
+		struct T02_descriptor	T02;
+	};
+
+	/* For R03 and T03 ------------------------------ */
+	/* For software used */
+	union {
+		u32	R03;
+		u32	T03;
+		struct {
+			u8	buffer_number;
+			u8	buffer_start_index;
+			u16	buffer_total_size;
+		};
+	};
+
+	/* For storing the buffer */
+	u16	buffer_size[MAX_DESCRIPTOR_BUFFER_INDEX];
+	void	*buffer_address[MAX_DESCRIPTOR_BUFFER_INDEX];
+};
+
+#define MAX_TXVGA_EEPROM		9	/* How many word(u16) of EEPROM will be used for TxVGA */
+#define MAX_RF_PARAMETER		32
+
+struct txvga_for_50 {
+	u8	ChanNo;
+	u8	TxVgaValue;
+};
+
+/*
+ * ==============================================
+ * Device related include
+ * ==============================================
+ */
+
+#include "wb35reg_s.h"
+#include "wb35tx_s.h"
+#include "wb35rx_s.h"
+
+/* For Hal using ============================================ */
+struct hw_data {
+	/* For compatible with 33 */
+	u32	revision;
+	u32	BB3c_cal; /* The value for Tx calibration comes from EEPROM */
+	u32	BB54_cal; /* The value for Rx calibration comes from EEPROM */
+
+	/* For surprise remove */
+	u32	SurpriseRemove; /* 0: Normal 1: Surprise remove */
+	u8	IsKeyPreSet;
+	u8	CalOneTime;
+
+	u8	VCO_trim;
+
+	u32	FragCount;
+	u32	DMAFix; /* V1_DMA_FIX The variable can be removed if driver want to save mem space for V2. */
+
+	/*
+	 * ===============================================
+	 * Definition for MAC address
+	 * ===============================================
+	 */
+	u8	PermanentMacAddress[ETH_ALEN + 2]; /* The Ethernet addr that are stored in EEPROM. + 2 to 8-byte alignment */
+	u8	CurrentMacAddress[ETH_ALEN + 2]; /* The Enthernet addr that are in used. + 2 to 8-byte alignment */
+
+	/*
+	 * =========================================
+	 * Definition for 802.11
+	 * =========================================
+	 */
+	u8	*bssid_pointer; /* Used by hal_get_bssid for return value */
+	u8	bssid[8]; /* Only 6 byte will be used. 8 byte is required for read buffer */
+	u8	ssid[32]; /* maximum ssid length is 32 byte */
+
+	u16	AID;
+	u8	ssid_length;
+	u8	Channel;
+
+	u16	ListenInterval;
+	u16	CapabilityInformation;
+
+	u16	BeaconPeriod;
+	u16	ProbeDelay;
+
+	u8	bss_type;/* 0: IBSS_NET or 1:ESS_NET */
+	u8	preamble;/* 0: short preamble, 1: long preamble */
+	u8	slot_time_select; /* 9 or 20 value */
+	u8	phy_type; /* Phy select */
+
+	u32	phy_para[MAX_RF_PARAMETER];
+	u32	phy_number;
+
+	u32	CurrentRadioSw; /* 0:On 1:Off */
+	u32	CurrentRadioHw; /* 0:On 1:Off */
+
+	u8	*power_save_point; /* Used by hal_get_power_save_mode for return value */
+	u8	cwmin;
+	u8	desired_power_save;
+	u8	dtim; /* Is running dtim */
+	u8	mapping_key_replace_index; /* In Key table, the next index be replaced */
+
+	u16	MaxReceiveLifeTime;
+	u16	FragmentThreshold;
+	u16	FragmentThreshold_tmp;
+	u16	cwmax;
+
+	u8	Key_slot[MAX_KEY_TABLE][8]; /* Ownership record for key slot. For Alignment */
+	u32	Key_content[MAX_KEY_TABLE][12]; /* 10DW for each entry + 2 for burst command (Off and On valid bit) */
+	u8	CurrentDefaultKeyIndex;
+	u32	CurrentDefaultKeyLength;
+
+	/*
+	 * ==================================================
+	 * Variable for each module
+	 * ==================================================
+	 */
+	struct usb_device	*udev;
+	struct wb35_reg		reg;	/* Need Wb35Reg.h */
+	struct wb35_tx		Wb35Tx; /* Need Wb35Tx.h */
+	struct wb35_rx		Wb35Rx; /* Need Wb35Rx.h */
+
+	struct timer_list	LEDTimer; /* For LED */
+
+	u32			LEDpoint; /* For LED */
+
+	u32			dto_tx_retry_count;
+	u32			dto_tx_frag_count;
+	u32			rx_ok_count[13]; /* index=0: total rx ok */
+	u32			rx_err_count[13]; /* index=0: total rx err */
+
+	/* for Tx debug */
+	u32			tx_TBTT_start_count;
+	u32			tx_ETR_count;
+	u32			tx_WepOn_false_count;
+	u32			tx_Null_key_count;
+	u32			tx_retry_count[8];
+
+	u8			PowerIndexFromEEPROM; /* For 2412MHz */
+	u8			power_index;
+	u8			IsWaitJoinComplete; /* TRUE: set join request */
+	u8			band;
+
+	u16			SoftwareSet;
+	u16			Reserved_s;
+
+	u32			IsInitOK; /* 0: Driver starting 1: Driver init OK */
+
+	/* For Phy calibration */
+	s32			iq_rsdl_gain_tx_d2;
+	s32			iq_rsdl_phase_tx_d2;
+	u32			txvga_setting_for_cal;
+
+	u8			TxVgaSettingInEEPROM[(((MAX_TXVGA_EEPROM * 2) + 3) & ~0x03)]; /* For EEPROM value */
+	u8			TxVgaFor24[16]; /* Max is 14, 2 for alignment */
+	struct txvga_for_50		TxVgaFor50[36];	/* 35 channels in 5G. 35x2 = 70 byte. 2 for alignments */
+
+	u16			Scan_Interval;
+	u16			RESERVED6;
+
+	/* LED control */
+	u32		LED_control;
+	/*
+	 * LED_control 4 byte: Gray_Led_1[3] Gray_Led_0[2] Led[1] Led[0]
+	 * Gray_Led
+	 *		For Led gray setting
+	 * Led
+	 *		0: normal control,
+	 *			LED behavior will decide by EEPROM setting
+	 *		1: Turn off specific LED
+	 *		2: Always on specific LED
+	 *		3: slow blinking specific LED
+	 *		4: fast blinking specific LED
+	 *		5: WPS led control is set. Led0 is Red, Led1 id Green
+	 *
+	 * Led[1] is parameter for WPS LED mode
+	 *		1:InProgress
+	 *		2: Error
+	 *		3: Session overlap
+	 *		4: Success control
+	 */
+	u32		LED_LinkOn;	/* Turn LED on control */
+	u32		LED_Scanning;	/* Let LED in scan process control */
+	u32		LED_Blinking;	/* Temp variable for shining */
+	u32		RxByteCountLast;
+	u32		TxByteCountLast;
+
+	/* For global timer */
+	u32		time_count;	/* TICK_TIME_100ms 1 = 100ms */
+};
+
+#endif
diff --git a/drivers/staging/winbond/wbhal_f.h b/drivers/staging/winbond/wbhal_f.h
deleted file mode 100644
index fc78c14..0000000
--- a/drivers/staging/winbond/wbhal_f.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * =====================================================================
- * Device related include
- * =====================================================================
-*/
-#include "wb35reg_f.h"
-#include "wb35tx_f.h"
-#include "wb35rx_f.h"
-
-#include "core.h"
-
-/* =====================================================================
- * Function declaration
- * =====================================================================
- */
-void hal_remove_mapping_key(struct hw_data *hw_data, u8 *mac_addr);
-void hal_remove_default_key(struct hw_data *hw_data, u32 index);
-unsigned char hal_set_mapping_key(struct hw_data *adapter, u8 *mac_addr,
-				  u8 null_key, u8 wep_on, u8 *tx_tsc,
-				  u8 *rx_tsc, u8 key_type, u8 key_len,
-				  u8 *key_data);
-unsigned char hal_set_default_key(struct hw_data *adapter, u8 index,
-				  u8 null_key, u8 wep_on, u8 *tx_tsc,
-				  u8 *rx_tsc, u8 key_type, u8 key_len,
-				  u8 *key_data);
-void hal_clear_all_default_key(struct hw_data *hw_data);
-void hal_clear_all_group_key(struct hw_data *hw_data);
-void hal_clear_all_mapping_key(struct hw_data *hw_data);
-void hal_clear_all_key(struct hw_data *hw_data);
-void hal_set_power_save_mode(struct hw_data *hw_data, unsigned char power_save,
-			     unsigned char wakeup, unsigned char dtim);
-void hal_get_power_save_mode(struct hw_data *hw_data, u8 *in_pwr_save);
-void hal_set_slot_time(struct hw_data *hw_data, u8 type);
-
-#define hal_set_atim_window(_A, _ATM)
-
-void hal_start_bss(struct hw_data *hw_data, u8 mac_op_mode);
-
-/* 0:BSS STA 1:IBSS STA */
-void hal_join_request(struct hw_data *hw_data, u8 bss_type);
-
-void hal_stop_sync_bss(struct hw_data *hw_data);
-void hal_resume_sync_bss(struct hw_data *hw_data);
-void hal_set_aid(struct hw_data *hw_data, u16 aid);
-void hal_set_bssid(struct hw_data *hw_data, u8 *bssid);
-void hal_get_bssid(struct hw_data *hw_data, u8 *bssid);
-void hal_set_listen_interval(struct hw_data *hw_data, u16 listen_interval);
-void hal_set_cap_info(struct hw_data *hw_data, u16 capability_info);
-void hal_set_ssid(struct hw_data *hw_data, u8 *ssid, u8 ssid_len);
-void hal_start_tx0(struct hw_data *hw_data);
-
-#define hal_get_cwmin(_A)	((_A)->cwmin)
-
-void hal_set_cwmax(struct hw_data *hw_data, u16 cwin_max);
-
-#define hal_get_cwmax(_A)	((_A)->cwmax)
-
-void hal_set_rsn_wpa(struct hw_data *hw_data, u32 *rsn_ie_bitmap,
-		     u32 *rsn_oui_type , unsigned char desired_auth_mode);
-void hal_set_connect_info(struct hw_data *hw_data, unsigned char bo_connect);
-u8 hal_get_est_sq3(struct hw_data *hw_data, u8 count);
-void hal_descriptor_indicate(struct hw_data *hw_data,
-			     struct wb35_descriptor *des);
-u8 hal_get_antenna_number(struct hw_data *hw_data);
-u32 hal_get_bss_pk_cnt(struct hw_data *hw_data);
-
-#define hal_get_region_from_EEPROM(_A)	((_A)->reg.EEPROMRegion)
-#define hal_get_tx_buffer(_A, _B)	Wb35Tx_get_tx_buffer(_A, _B)
-#define hal_software_set(_A)		(_A->SoftwareSet)
-#define hal_driver_init_OK(_A)		(_A->IsInitOK)
-#define hal_rssi_boundary_high(_A)	(_A->RSSI_high)
-#define hal_rssi_boundary_low(_A)	(_A->RSSI_low)
-#define hal_scan_interval(_A)		(_A->Scan_Interval)
-
-#define PHY_DEBUG(msg, args...)
-
-/* return 100ms count */
-#define hal_get_time_count(_P)		(_P->time_count / 10)
-#define hal_detect_error(_P)		(_P->WbUsb.DetectCount)
-
-#define hal_ibss_disconnect(_A)		(hal_stop_sync_bss(_A))
diff --git a/drivers/staging/winbond/wbhal_s.h b/drivers/staging/winbond/wbhal_s.h
deleted file mode 100644
index 821a1b3..0000000
--- a/drivers/staging/winbond/wbhal_s.h
+++ /dev/null
@@ -1,525 +0,0 @@
-#ifndef __WINBOND_WBHAL_S_H
-#define __WINBOND_WBHAL_S_H
-
-#include <linux/types.h>
-#include <linux/if_ether.h> /* for ETH_ALEN */
-
-#define HAL_LED_SET_MASK	0x001c
-#define HAL_LED_SET_SHIFT	2
-
-/* supported RF type */
-#define RF_MAXIM_2825		0
-#define RF_MAXIM_2827		1
-#define RF_MAXIM_2828		2
-#define RF_MAXIM_2829		3
-#define RF_MAXIM_V1		15
-#define RF_AIROHA_2230		16
-#define RF_AIROHA_7230		17
-#define RF_AIROHA_2230S		18
-#define RF_WB_242		33
-#define RF_WB_242_1		34
-#define RF_DECIDE_BY_INF	255
-
-/*
- * ----------------------------------------------------------------
- * The follow define connect to upper layer
- *	User must modify for connection between HAL and upper layer
- * ----------------------------------------------------------------
- */
-
-/*
- * ==============================
- * Common define
- * ==============================
- */
-/* Bit 5 */
-#define HAL_USB_MODE_BURST(_H)			(_H->SoftwareSet & 0x20)
-
-/* Scan interval */
-#define SCAN_MAX_CHNL_TIME			(50)
-
-/* For TxL2 Frame typr recognise */
-#define FRAME_TYPE_802_3_DATA			0
-#define FRAME_TYPE_802_11_MANAGEMENT		1
-#define FRAME_TYPE_802_11_MANAGEMENT_CHALLENGE	2
-#define FRAME_TYPE_802_11_CONTROL		3
-#define FRAME_TYPE_802_11_DATA			4
-#define FRAME_TYPE_PROMISCUOUS			5
-
-/* The follow definition is used for convert the frame------------ */
-#define DOT_11_SEQUENCE_OFFSET			22 /* Sequence control offset */
-#define DOT_3_TYPE_OFFSET			12
-#define DOT_11_MAC_HEADER_SIZE			24
-#define DOT_11_SNAP_SIZE			6
-#define DOT_11_TYPE_OFFSET			30 /* The start offset of 802.11 Frame. Type encapsulation. */
-#define DEFAULT_SIFSTIME			10
-#define DEFAULT_FRAGMENT_THRESHOLD		2346 /* No fragment */
-#define DEFAULT_MSDU_LIFE_TIME			0xffff
-
-#define LONG_PREAMBLE_PLUS_PLCPHEADER_TIME		(144 + 48)
-#define SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME		(72 + 24)
-#define PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION	(16 + 4 + 6)
-#define Tsym						4
-
-/*  Frame Type of Bits (2, 3)----------------------------------- */
-#define MAC_TYPE_MANAGEMENT			0x00
-#define MAC_TYPE_CONTROL			0x04
-#define MAC_TYPE_DATA				0x08
-#define MASK_FRAGMENT_NUMBER			0x000F
-#define SEQUENCE_NUMBER_SHIFT			4
-
-#define  HAL_WOL_TYPE_WAKEUP_FRAME		0x01
-#define  HAL_WOL_TYPE_MAGIC_PACKET		0x02
-
-#define HAL_KEYTYPE_WEP40			0
-#define HAL_KEYTYPE_WEP104			1
-#define HAL_KEYTYPE_TKIP			2 /* 128 bit key */
-#define HAL_KEYTYPE_AES_CCMP			3 /* 128 bit key */
-
-/* For VM state */
-enum {
-	VM_STOP = 0,
-	VM_RUNNING,
-	VM_COMPLETED
-};
-
-/*
- * ================================
- * Normal Key table format
- * ================================
- */
-
-/* The order of KEY index is MAPPING_KEY_START_INDEX > GROUP_KEY_START_INDEX */
-#define MAX_KEY_TABLE			24 /* 24 entry for storing key data */
-#define GROUP_KEY_START_INDEX		4
-#define MAPPING_KEY_START_INDEX		8
-
-/*
- * =========================================
- * Descriptor
- * =========================================
- */
-#define MAX_DESCRIPTOR_BUFFER_INDEX	8 /* Have to multiple of 2 */
-#define FLAG_ERROR_TX_MASK		0x000000bf
-#define FLAG_ERROR_RX_MASK		0x0000083f
-
-#define FLAG_BAND_RX_MASK		0x10000000 /* Bit 28 */
-
-struct R00_descriptor {
-	union {
-		u32	value;
-#ifdef _BIG_ENDIAN_
-		struct {
-			u32	R00_packet_or_buffer_status:1;
-			u32	R00_packet_in_fifo:1;
-			u32	R00_RESERVED:2;
-			u32	R00_receive_byte_count:12;
-			u32	R00_receive_time_index:16;
-		};
-#else
-		struct {
-			u32	R00_receive_time_index:16;
-			u32	R00_receive_byte_count:12;
-			u32	R00_RESERVED:2;
-			u32	R00_packet_in_fifo:1;
-			u32	R00_packet_or_buffer_status:1;
-		};
-#endif
-	};
-};
-
-struct T00_descriptor {
-	union {
-		u32	value;
-#ifdef _BIG_ENDIAN_
-		struct {
-			u32	T00_first_mpdu:1; /* for hardware use */
-			u32	T00_last_mpdu:1; /* for hardware use */
-			u32	T00_IsLastMpdu:1;/* 0:not 1:Yes for software used */
-			u32	T00_IgnoreResult:1;/* The same mechanism with T00 setting. */
-			u32	T00_RESERVED_ID:2;/* 3 bit ID reserved */
-			u32	T00_tx_packet_id:4;
-			u32	T00_RESERVED:4;
-			u32	T00_header_length:6;
-			u32	T00_frame_length:12;
-		};
-#else
-		struct {
-			u32	T00_frame_length:12;
-			u32	T00_header_length:6;
-			u32	T00_RESERVED:4;
-			u32	T00_tx_packet_id:4;
-			u32	T00_RESERVED_ID:2; /* 3 bit ID reserved */
-			u32	T00_IgnoreResult:1; /* The same mechanism with T00 setting. */
-			u32	T00_IsLastMpdu:1; /* 0:not 1:Yes for software used */
-			u32	T00_last_mpdu:1; /* for hardware use */
-			u32	T00_first_mpdu:1; /* for hardware use */
-		};
-#endif
-	};
-};
-
-struct R01_descriptor {
-	union {
-		u32	value;
-#ifdef _BIG_ENDIAN_
-		struct {
-			u32	R01_RESERVED:3;
-			u32	R01_mod_type:1;
-			u32	R01_pre_type:1;
-			u32	R01_data_rate:3;
-			u32	R01_AGC_state:8;
-			u32	R01_LNA_state:2;
-			u32	R01_decryption_method:2;
-			u32	R01_mic_error:1;
-			u32	R01_replay:1;
-			u32	R01_broadcast_frame:1;
-			u32	R01_multicast_frame:1;
-			u32	R01_directed_frame:1;
-			u32	R01_receive_frame_antenna_selection:1;
-			u32	R01_frame_receive_during_atim_window:1;
-			u32	R01_protocol_version_error:1;
-			u32	R01_authentication_frame_icv_error:1;
-			u32	R01_null_key_to_authentication_frame:1;
-			u32	R01_icv_error:1;
-			u32	R01_crc_error:1;
-		};
-#else
-		struct {
-			u32	R01_crc_error:1;
-			u32	R01_icv_error:1;
-			u32	R01_null_key_to_authentication_frame:1;
-			u32	R01_authentication_frame_icv_error:1;
-			u32	R01_protocol_version_error:1;
-			u32	R01_frame_receive_during_atim_window:1;
-			u32	R01_receive_frame_antenna_selection:1;
-			u32	R01_directed_frame:1;
-			u32	R01_multicast_frame:1;
-			u32	R01_broadcast_frame:1;
-			u32	R01_replay:1;
-			u32	R01_mic_error:1;
-			u32	R01_decryption_method:2;
-			u32	R01_LNA_state:2;
-			u32	R01_AGC_state:8;
-			u32	R01_data_rate:3;
-			u32	R01_pre_type:1;
-			u32	R01_mod_type:1;
-			u32	R01_RESERVED:3;
-		};
-#endif
-	};
-};
-
-struct T01_descriptor {
-	union {
-		u32	value;
-#ifdef _BIG_ENDIAN_
-		struct {
-			u32	T01_rts_cts_duration:16;
-			u32	T01_fall_back_rate:3;
-			u32	T01_add_rts:1;
-			u32	T01_add_cts:1;
-			u32	T01_modulation_type:1;
-			u32	T01_plcp_header_length:1;
-			u32	T01_transmit_rate:3;
-			u32	T01_wep_id:2;
-			u32	T01_add_challenge_text:1;
-			u32	T01_inhibit_crc:1;
-			u32	T01_loop_back_wep_mode:1;
-			u32	T01_retry_abort_ebable:1;
-		};
-#else
-		struct {
-			u32	T01_retry_abort_ebable:1;
-			u32	T01_loop_back_wep_mode:1;
-			u32	T01_inhibit_crc:1;
-			u32	T01_add_challenge_text:1;
-			u32	T01_wep_id:2;
-			u32	T01_transmit_rate:3;
-			u32	T01_plcp_header_length:1;
-			u32	T01_modulation_type:1;
-			u32	T01_add_cts:1;
-			u32	T01_add_rts:1;
-			u32	T01_fall_back_rate:3;
-			u32	T01_rts_cts_duration:16;
-		};
-#endif
-	};
-};
-
-struct T02_descriptor {
-	union {
-		u32	value;
-#ifdef _BIG_ENDIAN_
-		struct {
-			u32	T02_IsLastMpdu:1; /* The same mechanism with T00 setting */
-			u32	T02_IgnoreResult:1; /* The same mechanism with T00 setting. */
-			u32	T02_RESERVED_ID:2; /* The same mechanism with T00 setting */
-			u32	T02_Tx_PktID:4;
-			u32	T02_MPDU_Cnt:4;
-			u32	T02_RTS_Cnt:4;
-			u32	T02_RESERVED:7;
-			u32	T02_transmit_complete:1;
-			u32	T02_transmit_abort_due_to_TBTT:1;
-			u32	T02_effective_transmission_rate:1;
-			u32	T02_transmit_without_encryption_due_to_wep_on_false:1;
-			u32	T02_discard_due_to_null_wep_key:1;
-			u32	T02_RESERVED_1:1;
-			u32	T02_out_of_MaxTxMSDULiftTime:1;
-			u32	T02_transmit_abort:1;
-			u32	T02_transmit_fail:1;
-		};
-#else
-		struct {
-			u32	T02_transmit_fail:1;
-			u32	T02_transmit_abort:1;
-			u32	T02_out_of_MaxTxMSDULiftTime:1;
-			u32	T02_RESERVED_1:1;
-			u32	T02_discard_due_to_null_wep_key:1;
-			u32	T02_transmit_without_encryption_due_to_wep_on_false:1;
-			u32	T02_effective_transmission_rate:1;
-			u32	T02_transmit_abort_due_to_TBTT:1;
-			u32	T02_transmit_complete:1;
-			u32	T02_RESERVED:7;
-			u32	T02_RTS_Cnt:4;
-			u32	T02_MPDU_Cnt:4;
-			u32	T02_Tx_PktID:4;
-			u32	T02_RESERVED_ID:2; /* The same mechanism with T00 setting */
-			u32	T02_IgnoreResult:1; /* The same mechanism with T00 setting. */
-			u32	T02_IsLastMpdu:1; /* The same mechanism with T00 setting */
-		};
-#endif
-	};
-};
-
-struct wb35_descriptor { /* Skip length = 8 DWORD */
-	/* ID for descriptor ---, The field doesn't be cleard in the operation of Descriptor definition */
-	u8	Descriptor_ID;
-	/* ----------------------The above region doesn't be cleared by DESCRIPTOR_RESET------ */
-	u8	RESERVED[3];
-
-	u16	FragmentThreshold;
-	u8	InternalUsed; /* Only can be used by operation of descriptor definition */
-	u8	Type; /* 0: 802.3 1:802.11 data frame 2:802.11 management frame */
-
-	u8	PreambleMode;/* 0: short 1:long */
-	u8	TxRate;
-	u8	FragmentCount;
-	u8	EapFix; /* For speed up key install */
-
-	/* For R00 and T00 ------------------------------ */
-	union {
-		struct R00_descriptor	R00;
-		struct T00_descriptor	T00;
-	};
-
-	/* For R01 and T01 ------------------------------ */
-	union {
-		struct R01_descriptor	R01;
-		struct T01_descriptor	T01;
-	};
-
-	/* For R02 and T02 ------------------------------ */
-	union {
-		u32		R02;
-		struct T02_descriptor	T02;
-	};
-
-	/* For R03 and T03 ------------------------------ */
-	/* For software used */
-	union {
-		u32	R03;
-		u32	T03;
-		struct {
-			u8	buffer_number;
-			u8	buffer_start_index;
-			u16	buffer_total_size;
-		};
-	};
-
-	/* For storing the buffer */
-	u16	buffer_size[MAX_DESCRIPTOR_BUFFER_INDEX];
-	void	*buffer_address[MAX_DESCRIPTOR_BUFFER_INDEX];
-};
-
-
-#define DEFAULT_NULL_PACKET_COUNT	180000	/* 180 seconds */
-
-#define MAX_TXVGA_EEPROM		9	/* How many word(u16) of EEPROM will be used for TxVGA */
-#define MAX_RF_PARAMETER		32
-
-struct txvga_for_50 {
-	u8	ChanNo;
-	u8	TxVgaValue;
-};
-
-/*
- * ==============================================
- * Device related include
- * ==============================================
- */
-
-#include "wbusb_s.h"
-#include "wb35reg_s.h"
-#include "wb35tx_s.h"
-#include "wb35rx_s.h"
-
-/* For Hal using ============================================ */
-struct hw_data {
-	/* For compatible with 33 */
-	u32	revision;
-	u32	BB3c_cal; /* The value for Tx calibration comes from EEPROM */
-	u32	BB54_cal; /* The value for Rx calibration comes from EEPROM */
-
-	/* For surprise remove */
-	u32	SurpriseRemove; /* 0: Normal 1: Surprise remove */
-	u8	IsKeyPreSet;
-	u8	CalOneTime;
-
-	u8	VCO_trim;
-
-	u32	FragCount;
-	u32	DMAFix; /* V1_DMA_FIX The variable can be removed if driver want to save mem space for V2. */
-
-	/*
-	 * ===============================================
-	 * Definition for MAC address
-	 * ===============================================
-	 */
-	u8	PermanentMacAddress[ETH_ALEN + 2]; /* The Ethernet addr that are stored in EEPROM. + 2 to 8-byte alignment */
-	u8	CurrentMacAddress[ETH_ALEN + 2]; /* The Enthernet addr that are in used. + 2 to 8-byte alignment */
-
-	/*
-	 * =========================================
-	 * Definition for 802.11
-	 * =========================================
-	 */
-	u8	*bssid_pointer; /* Used by hal_get_bssid for return value */
-	u8	bssid[8]; /* Only 6 byte will be used. 8 byte is required for read buffer */
-	u8	ssid[32]; /* maximum ssid length is 32 byte */
-
-	u16	AID;
-	u8	ssid_length;
-	u8	Channel;
-
-	u16	ListenInterval;
-	u16	CapabilityInformation;
-
-	u16	BeaconPeriod;
-	u16	ProbeDelay;
-
-	u8	bss_type;/* 0: IBSS_NET or 1:ESS_NET */
-	u8	preamble;/* 0: short preamble, 1: long preamble */
-	u8	slot_time_select; /* 9 or 20 value */
-	u8	phy_type; /* Phy select */
-
-	u32	phy_para[MAX_RF_PARAMETER];
-	u32	phy_number;
-
-	u32	CurrentRadioSw; /* 0:On 1:Off */
-	u32	CurrentRadioHw; /* 0:On 1:Off */
-
-	u8	*power_save_point; /* Used by hal_get_power_save_mode for return value */
-	u8	cwmin;
-	u8	desired_power_save;
-	u8	dtim; /* Is running dtim */
-	u8	mapping_key_replace_index; /* In Key table, the next index be replaced */
-
-	u16	MaxReceiveLifeTime;
-	u16	FragmentThreshold;
-	u16	FragmentThreshold_tmp;
-	u16	cwmax;
-
-	u8	Key_slot[MAX_KEY_TABLE][8]; /* Ownership record for key slot. For Alignment */
-	u32	Key_content[MAX_KEY_TABLE][12]; /* 10DW for each entry + 2 for burst command (Off and On valid bit) */
-	u8	CurrentDefaultKeyIndex;
-	u32	CurrentDefaultKeyLength;
-
-	/*
-	 * ==================================================
-	 * Variable for each module
-	 * ==================================================
-	 */
-	struct wb_usb		WbUsb;	/* Need WbUsb.h */
-	struct wb35_reg		reg;	/* Need Wb35Reg.h */
-	struct wb35_tx		Wb35Tx; /* Need Wb35Tx.h */
-	struct wb35_rx		Wb35Rx; /* Need Wb35Rx.h */
-
-	struct timer_list	LEDTimer; /* For LED */
-
-	u32			LEDpoint; /* For LED */
-
-	u32			dto_tx_retry_count;
-	u32			dto_tx_frag_count;
-	u32			rx_ok_count[13]; /* index=0: total rx ok */
-	u32			rx_err_count[13]; /* index=0: total rx err */
-
-	/* for Tx debug */
-	u32			tx_TBTT_start_count;
-	u32			tx_ETR_count;
-	u32			tx_WepOn_false_count;
-	u32			tx_Null_key_count;
-	u32			tx_retry_count[8];
-
-	u8			PowerIndexFromEEPROM; /* For 2412MHz */
-	u8			power_index;
-	u8			IsWaitJoinComplete; /* TRUE: set join request */
-	u8			band;
-
-	u16			SoftwareSet;
-	u16			Reserved_s;
-
-	u32			IsInitOK; /* 0: Driver starting 1: Driver init OK */
-
-	/* For Phy calibration */
-	s32			iq_rsdl_gain_tx_d2;
-	s32			iq_rsdl_phase_tx_d2;
-	u32			txvga_setting_for_cal;
-
-	u8			TxVgaSettingInEEPROM[(((MAX_TXVGA_EEPROM * 2) + 3) & ~0x03)]; /* For EEPROM value */
-	u8			TxVgaFor24[16]; /* Max is 14, 2 for alignment */
-	struct txvga_for_50		TxVgaFor50[36];	/* 35 channels in 5G. 35x2 = 70 byte. 2 for alignments */
-
-	u16			Scan_Interval;
-	u16			RESERVED6;
-
-	/* LED control */
-	u32		LED_control;
-	/*
-	 * LED_control 4 byte: Gray_Led_1[3] Gray_Led_0[2] Led[1] Led[0]
-	 * Gray_Led
-	 *		For Led gray setting
-	 * Led
-	 *		0: normal control,
-	 *			LED behavior will decide by EEPROM setting
-	 *		1: Turn off specific LED
-	 *		2: Always on specific LED
-	 *		3: slow blinking specific LED
-	 *		4: fast blinking specific LED
-	 *		5: WPS led control is set. Led0 is Red, Led1 id Green
-	 *
-	 * Led[1] is parameter for WPS LED mode
-	 *		1:InProgress
-	 *		2: Error
-	 *		3: Session overlap
-	 *		4: Success control
-	 */
-	u32		LED_LinkOn;	/* Turn LED on control */
-	u32		LED_Scanning;	/* Let LED in scan process control */
-	u32		LED_Blinking;	/* Temp variable for shining */
-	u32		RxByteCountLast;
-	u32		TxByteCountLast;
-
-	atomic_t	SurpriseRemoveCount;
-
-	/* For global timer */
-	u32		time_count;	/* TICK_TIME_100ms 1 = 100ms */
-
-	/* For error recover */
-	u32		HwStop;
-
-	/* For avoid AP disconnect */
-	u32		NullPacketCount;
-};
-
-#endif
diff --git a/drivers/staging/winbond/wblinux_f.h b/drivers/staging/winbond/wblinux_f.h
deleted file mode 100644
index 0a9d214..0000000
--- a/drivers/staging/winbond/wblinux_f.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __WBLINUX_F_H
-#define __WBLINUX_F_H
-
-#include "core.h"
-#include "mds_s.h"
-
-/*
- * ====================================================================
- * Copyright (c) 1996-2004 Winbond Electronic Corporation
- *
- * wblinux_f.h
- * ====================================================================
- */
-int wb35_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void wb35_set_multicast(struct net_device *netdev);
-struct net_device_stats *wb35_netdev_stats(struct net_device *netdev);
-#endif
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3f60cf7..2163d60 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -14,10 +14,11 @@
 
 #include "core.h"
 #include "mds_f.h"
-#include "mlmetxrx_f.h"
 #include "mto.h"
-#include "wbhal_f.h"
-#include "wblinux_f.h"
+#include "wbhal.h"
+#include "wb35reg_f.h"
+#include "wb35tx_f.h"
+#include "wb35rx_f.h"
 
 MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
 MODULE_LICENSE("GPL");
@@ -181,10 +182,7 @@
 	RFSynthesizer_SwitchingChannel(pHwData, channel); /* Switch channel */
 	pHwData->Channel = channel.ChanNo;
 	pHwData->band = channel.band;
-#ifdef _PE_STATE_DUMP_
-	printk("Set channel is %d, band =%d\n", pHwData->Channel,
-	       pHwData->band);
-#endif
+	pr_debug("Set channel is %d, band =%d\n", pHwData->Channel, pHwData->band);
 	reg->M28_MacControl &= ~0xff;	/* Clean channel information field */
 	reg->M28_MacControl |= channel.ChanNo;
 	Wb35Reg_WriteWithCallbackValue(pHwData, 0x0828, reg->M28_MacControl,
@@ -339,10 +337,8 @@
 static unsigned char hal_idle(struct hw_data *pHwData)
 {
 	struct wb35_reg *reg = &pHwData->reg;
-	struct wb_usb *pWbUsb = &pHwData->WbUsb;
 
-	if (!pHwData->SurpriseRemove
-	    && (pWbUsb->DetectCount || reg->EP0vm_state != VM_STOP))
+	if (!pHwData->SurpriseRemove && reg->EP0vm_state != VM_STOP)
 		return false;
 
 	return true;
@@ -608,15 +604,6 @@
 			}
 			break;
 		}
-
-		/* Active send null packet to avoid AP disconnect */
-		if (pHwData->LED_LinkOn) {
-			pHwData->NullPacketCount += TimeInterval;
-			if (pHwData->NullPacketCount >=
-			    DEFAULT_NULL_PACKET_COUNT) {
-				pHwData->NullPacketCount = 0;
-			}
-		}
 	}
 
 	pHwData->time_count += TimeInterval;
@@ -651,13 +638,6 @@
 
 	SoftwareSet = hal_software_set(pHwData);
 
-#ifdef Vendor2
-	/* Try to make sure the EEPROM contain */
-	SoftwareSet >>= 8;
-	if (SoftwareSet != 0x82)
-		return false;
-#endif
-
 	Wb35Rx_start(hw);
 	Wb35Tx_EP2VM_start(priv);
 
@@ -734,9 +714,7 @@
 	}
 
 	priv->sLocalPara.bAntennaNo = hal_get_antenna_number(pHwData);
-#ifdef _PE_STATE_DUMP_
-	printk("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo);
-#endif
+	pr_debug("Driver init, antenna no = %d\n", priv->sLocalPara.bAntennaNo);
 	hal_get_hw_radio_off(pHwData);
 
 	/* Waiting for HAL setting OK */
@@ -769,7 +747,6 @@
 	struct usb_host_interface *interface;
 	struct ieee80211_hw *dev;
 	struct wbsoft_priv *priv;
-	struct wb_usb *pWbUsb;
 	int nr, err;
 	u32 ltmp;
 
@@ -800,16 +777,13 @@
 
 	priv = dev->priv;
 
-	pWbUsb = &priv->sHwData.WbUsb;
-	pWbUsb->udev = udev;
+	priv->sHwData.udev = udev;
 
 	interface = intf->cur_altsetting;
 	endpoint = &interface->endpoint[0].desc;
 
-	if (endpoint[2].wMaxPacketSize == 512) {
+	if (endpoint[2].wMaxPacketSize == 512)
 		printk("[w35und] Working on USB 2.0\n");
-		pWbUsb->IsUsb20 = 1;
-	}
 
 	err = wb35_hw_init(dev);
 	if (err)
@@ -860,13 +834,9 @@
 
 static void wb35_hw_halt(struct wbsoft_priv *adapter)
 {
-	Mds_Destroy(adapter);
-
 	/* Turn off Rx and Tx hardware ability */
 	hal_stop(&adapter->sHwData);
-#ifdef _PE_USB_INI_DUMP_
-	printk("[w35und] Hal_stop O.K.\n");
-#endif
+	pr_debug("[w35und] Hal_stop O.K.\n");
 	/* Waiting Irp completed */
 	msleep(100);
 
diff --git a/drivers/staging/winbond/wbusb_s.h b/drivers/staging/winbond/wbusb_s.h
deleted file mode 100644
index 8961ae5..0000000
--- a/drivers/staging/winbond/wbusb_s.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* =========================================================
- * Copyright (c) 1996-2004 Winbond Electronic Corporation
- *
- *  Module Name:
- *    wbusb_s.h
- * =========================================================
- */
-#ifndef __WINBOND_WBUSB_S_H
-#define __WINBOND_WBUSB_S_H
-
-#include <linux/types.h>
-
-struct wb_usb {
-	u32	IsUsb20;
-	struct	usb_device *udev;
-	u32	DetectCount;
-};
-#endif
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
index 020b17a..28ae9dd 100644
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ b/drivers/staging/wlags49_h2/wl_pci.c
@@ -458,7 +458,7 @@
         return;
     }
 
-    dev = (struct net_device *)pci_get_drvdata( pdev );
+    dev = pci_get_drvdata( pdev );
     if( dev == NULL ) {
         DBG_ERROR( DbgInfo, "Could not retrieve net_device structure\n" );
         return;
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index 7a1337d..a459e48 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -248,7 +248,7 @@
 		} else {
 			DBG_TRACE(DbgInfo, "F/W image file found\n");
 #define DHF_ALLOC_SIZE 96000			/* just below 96K, let's hope it suffices for now and for the future */
-			cp = (char *)vmalloc(DHF_ALLOC_SIZE);
+			cp = vmalloc(DHF_ALLOC_SIZE);
 			if (cp == NULL) {
 				DBG_ERROR(DbgInfo, "error in vmalloc\n");
 			} else {
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.c b/drivers/staging/wlags49_h2/wl_sysfs.c
index e4c8804..9b833b3 100644
--- a/drivers/staging/wlags49_h2/wl_sysfs.c
+++ b/drivers/staging/wlags49_h2/wl_sysfs.c
@@ -42,7 +42,7 @@
     CFG_HERMES_TALLIES_STRCT tallies;
     ssize_t ret = -EINVAL;
 
-    read_lock(&dev_base_lock);
+    rcu_read_lock();
     if (dev_isalive(dev)) {
 	wl_lock(lp, &flags);
 
@@ -102,7 +102,7 @@
 	    }
     }
 
-    read_unlock(&dev_base_lock);
+    rcu_read_unlock();
     return ret;
 }
 
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 83879f9..146f365 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -148,7 +148,8 @@
 
 			/* tack on SNAP */
 			e_snap =
-			    (struct wlan_snap *) skb_push(skb, sizeof(struct wlan_snap));
+			    (struct wlan_snap *) skb_push(skb,
+				sizeof(struct wlan_snap));
 			e_snap->type = htons(proto);
 			if (ethconv == WLAN_ETHCONV_8021h
 			    && p80211_stt_findproto(proto)) {
@@ -161,7 +162,8 @@
 
 			/* tack on llc */
 			e_llc =
-			    (struct wlan_llc *) skb_push(skb, sizeof(struct wlan_llc));
+			    (struct wlan_llc *) skb_push(skb,
+				sizeof(struct wlan_llc));
 			e_llc->dsap = 0xAA;	/* SNAP, see IEEE 802 */
 			e_llc->ssap = 0xAA;
 			e_llc->ctl = 0x03;
@@ -297,10 +299,12 @@
 	if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
 		memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN);
 		memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN);
-	} else if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 1)) {
+	} else if ((WLAN_GET_FC_TODS(fc) == 0)
+			&& (WLAN_GET_FC_FROMDS(fc) == 1)) {
 		memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN);
 		memcpy(saddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN);
-	} else if ((WLAN_GET_FC_TODS(fc) == 1) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
+	} else if ((WLAN_GET_FC_TODS(fc) == 1)
+			&& (WLAN_GET_FC_FROMDS(fc) == 0)) {
 		memcpy(daddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN);
 		memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN);
 	} else {
@@ -349,7 +353,8 @@
 
 	e_llc = (struct wlan_llc *) (skb->data + payload_offset);
 	e_snap =
-	    (struct wlan_snap *) (skb->data + payload_offset + sizeof(struct wlan_llc));
+	    (struct wlan_snap *) (skb->data + payload_offset +
+		sizeof(struct wlan_llc));
 
 	/* Test for the various encodings */
 	if ((payload_length >= sizeof(struct wlan_ethhdr)) &&
@@ -372,9 +377,11 @@
 		/* chop off the 802.11 CRC */
 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
 
-	} else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap))
-		   && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa)
-		   && (e_llc->ctl == 0x03)
+	} else if ((payload_length >= sizeof(struct wlan_llc) +
+		sizeof(struct wlan_snap))
+		&&(e_llc->dsap == 0xaa)
+		&& (e_llc->ssap == 0xaa)
+		&& (e_llc->ctl == 0x03)
 		   &&
 		   (((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0)
 		     && (ethconv == WLAN_ETHCONV_8021h)
@@ -406,21 +413,25 @@
 		/* chop off the 802.11 CRC */
 		skb_trim(skb, skb->len - WLAN_CRC_LEN);
 
-	} else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap))
-		   && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa)
-		   && (e_llc->ctl == 0x03)) {
+	} else if ((payload_length >= sizeof(struct wlan_llc) +
+		sizeof(struct wlan_snap))
+		&&(e_llc->dsap == 0xaa)
+		&& (e_llc->ssap == 0xaa)
+		&& (e_llc->ctl == 0x03)) {
 		pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
-		/* it's an 802.1h frame || (an RFC1042 && protocol is not in STT) */
-		/* build a DIXII + RFC894 */
+		/* it's an 802.1h frame || (an RFC1042 && protocol not in STT)
+		   build a DIXII + RFC894 */
 
 		/* Test for an overlength frame */
-		if ((payload_length - sizeof(struct wlan_llc) - sizeof(struct wlan_snap))
-		    > netdev->mtu) {
+		if ((payload_length - sizeof(struct wlan_llc) -
+			sizeof(struct wlan_snap))
+			> netdev->mtu) {
 			/* A bogus length ethfrm has been sent. */
 			/* Is someone trying an oflow attack? */
 			printk(KERN_ERR "DIXII frame too large (%ld > %d)\n",
-			       (long int)(payload_length - sizeof(struct wlan_llc) -
-					  sizeof(struct wlan_snap)), netdev->mtu);
+			       (long int)(payload_length -
+					sizeof(struct wlan_llc) -
+					sizeof(struct wlan_snap)), netdev->mtu);
 			return 1;
 		}
 
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index eca0391..ea493aa 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -66,12 +66,14 @@
 #define	P80211_FRMMETA_MAGIC	0x802110
 
 #define P80211SKB_FRMMETA(s) \
-	(((((struct p80211_frmmeta *)((s)->cb))->magic) == P80211_FRMMETA_MAGIC) ? \
+	(((((struct p80211_frmmeta *)((s)->cb))->magic) == \
+		P80211_FRMMETA_MAGIC) ? \
 		((struct p80211_frmmeta *)((s)->cb)) : \
 		(NULL))
 
 #define P80211SKB_RXMETA(s) \
-	(P80211SKB_FRMMETA((s)) ?  P80211SKB_FRMMETA((s))->rx : ((struct p80211_rxmeta *)(NULL)))
+	(P80211SKB_FRMMETA((s)) ?  P80211SKB_FRMMETA((s))->rx : \
+		((struct p80211_rxmeta *)(NULL)))
 
 struct p80211_rxmeta {
 	struct wlandevice *wlandev;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index b7b4a73..b0af292 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -301,7 +301,8 @@
 					if (memcmp
 					    (hdr->a1, wlandev->netdev->dev_addr,
 					     ETH_ALEN) != 0) {
-						/* but reject anything else that isn't multicast */
+						/* but reject anything else that
+						   isn't multicast */
 						if (!(hdr->a1[0] & 0x01)) {
 							dev_kfree_skb(skb);
 							continue;
@@ -770,7 +771,8 @@
 	}
 
 	/* Allocate and initialize the struct device */
-	netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d", ether_setup);
+	netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d",
+				ether_setup);
 	if (netdev == NULL) {
 		printk(KERN_ERR "Failed to alloc netdev.\n");
 		wlan_free_wiphy(wiphy);
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 1ec3374..8588417 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -94,11 +94,11 @@
 #endif
 
 /*--- NSD Capabilities Flags ------------------------------*/
-#define P80211_NSDCAP_HARDWAREWEP           0x01	/* hardware wep engine */
-#define P80211_NSDCAP_SHORT_PREAMBLE        0x10	/* hardware supports */
-#define P80211_NSDCAP_HWFRAGMENT            0x80	/* nsd handles frag/defrag */
-#define P80211_NSDCAP_AUTOJOIN              0x100	/* nsd does autojoin */
-#define P80211_NSDCAP_NOSCAN                0x200	/* nsd can scan */
+#define P80211_NSDCAP_HARDWAREWEP           0x01  /* hardware wep engine */
+#define P80211_NSDCAP_SHORT_PREAMBLE        0x10  /* hardware supports */
+#define P80211_NSDCAP_HWFRAGMENT            0x80  /* nsd handles frag/defrag */
+#define P80211_NSDCAP_AUTOJOIN              0x100 /* nsd does autojoin */
+#define P80211_NSDCAP_NOSCAN                0x200 /* nsd can scan */
 
 /* Received frame statistics */
 typedef struct p80211_frmrx_t {
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index 41a99c5..9dec859 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -141,14 +141,14 @@
 #define P80211DID_LSB_ITEM		(12)
 #define P80211DID_LSB_INDEX		(18)
 #define P80211DID_LSB_ISTABLE		(26)
-#define P80211DID_LSB_ACCESS 		(27)
+#define P80211DID_LSB_ACCESS		(27)
 
 #define P80211DID_MASK_SECTION		(0x0000003fUL)
 #define P80211DID_MASK_GROUP		(0x0000003fUL)
 #define P80211DID_MASK_ITEM		(0x0000003fUL)
 #define P80211DID_MASK_INDEX		(0x000000ffUL)
 #define P80211DID_MASK_ISTABLE		(0x00000001UL)
-#define P80211DID_MASK_ACCESS 		(0x00000003UL)
+#define P80211DID_MASK_ACCESS		(0x00000003UL)
 
 #define P80211DID_MK(a, m, l)	((((u32)(a)) & (m)) << (l))
 
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 04514a8..6675c82 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -213,8 +213,8 @@
 		u16 wordbuf[17];
 
 		result = hfa384x_drvr_setconfig16(hw,
-						  HFA384x_RID_CNFROAMINGMODE,
-						  HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
+					HFA384x_RID_CNFROAMINGMODE,
+					HFA384x_ROAMMODE_HOSTSCAN_HOSTROAM);
 		if (result) {
 			printk(KERN_ERR
 			       "setconfig(ROAMINGMODE) failed. result=%d\n",
@@ -258,8 +258,8 @@
 		}
 		/* ibss options */
 		result = hfa384x_drvr_setconfig16(hw,
-						  HFA384x_RID_CREATEIBSS,
-						  HFA384x_CREATEIBSS_JOINCREATEIBSS);
+					HFA384x_RID_CREATEIBSS,
+					HFA384x_CREATEIBSS_JOINCREATEIBSS);
 		if (result) {
 			printk(KERN_ERR "Failed to set CREATEIBSS.\n");
 			msg->resultcode.data =
@@ -416,7 +416,8 @@
 #define REQBASICRATE(N) \
 	if ((count >= N) && DOT11_RATE5_ISBASIC_GET(item->supprates[(N)-1])) { \
 		req->basicrate ## N .data = item->supprates[(N)-1]; \
-		req->basicrate ## N .status = P80211ENUM_msgitem_status_data_ok; \
+		req->basicrate ## N .status = \
+			P80211ENUM_msgitem_status_data_ok; \
 	}
 
 	REQBASICRATE(1);
@@ -431,7 +432,8 @@
 #define REQSUPPRATE(N) \
 	if (count >= N) { \
 		req->supprate ## N .data = item->supprates[(N)-1]; \
-		req->supprate ## N .status = P80211ENUM_msgitem_status_data_ok; \
+		req->supprate ## N .status = \
+			P80211ENUM_msgitem_status_data_ok; \
 	}
 
 	REQSUPPRATE(1);
@@ -1102,7 +1104,7 @@
 		result = hfa384x_drvr_disable(hw, 0);
 		if (result) {
 			pr_debug
-			    ("failed to disable port 0 after sniffing, result=%d\n",
+			("failed to disable port 0 after sniffing, result=%d\n",
 			     result);
 			goto failed;
 		}
@@ -1137,7 +1139,7 @@
 			result = hfa384x_drvr_enable(hw, 0);
 			if (result) {
 				pr_debug
-				    ("failed to enable port to presniff setting, result=%d\n",
+				("failed to enable port to presniff setting, result=%d\n",
 				     result);
 				goto failed;
 			}
@@ -1161,7 +1163,7 @@
 						  &(hw->presniff_port_type));
 				if (result) {
 					pr_debug
-					    ("failed to read porttype, result=%d\n",
+					("failed to read porttype, result=%d\n",
 					     result);
 					goto failed;
 				}
@@ -1171,7 +1173,7 @@
 						  &(hw->presniff_wepflags));
 				if (result) {
 					pr_debug
-					    ("failed to read wepflags, result=%d\n",
+					("failed to read wepflags, result=%d\n",
 					     result);
 					goto failed;
 				}
@@ -1238,8 +1240,8 @@
 
 			if (result) {
 				pr_debug
-				    ("failed to set wepflags=0x%04x, result=%d\n",
-				     word, result);
+				  ("failed to set wepflags=0x%04x, result=%d\n",
+				   word, result);
 				goto failed;
 			}
 		}
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 4f73d09..ee008e5 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -472,9 +472,11 @@
 			break;
 		}
 
-		pdev = pci_find_device(PCI_VENDOR_ID_SI, nbridge_id, pdev);
-		if (pdev)
+		pdev = pci_get_device(PCI_VENDOR_ID_SI, nbridge_id, pdev);
+		if (pdev) {
 			valid_pdev = 1;
+			pci_dev_put(pdev);
+		}
 	}
 
 	if (!valid_pdev) {
@@ -2178,8 +2180,7 @@
 
 #ifndef AGPOFF
 	if (XGIfb_queuemode == AGP_CMD_QUEUE) {
-		agp_info = vmalloc(sizeof(*agp_info));
-		memset((void *)agp_info, 0x00, sizeof(*agp_info));
+		agp_info = vzalloc(sizeof(*agp_info));
 		agp_copy_info(agp_info);
 
 		agp_backend_acquire();
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/zram/xvmalloc.c
index 3fdbb8a..b644067 100644
--- a/drivers/staging/zram/xvmalloc.c
+++ b/drivers/staging/zram/xvmalloc.c
@@ -187,7 +187,7 @@
 	slindex = get_index_for_insert(block->size);
 	flindex = slindex / BITS_PER_LONG;
 
-	block->link.prev_page = 0;
+	block->link.prev_page = NULL;
 	block->link.prev_offset = 0;
 	block->link.next_page = pool->freelist[slindex].page;
 	block->link.next_offset = pool->freelist[slindex].offset;
@@ -217,7 +217,7 @@
 
 	pool->freelist[slindex].page = block->link.next_page;
 	pool->freelist[slindex].offset = block->link.next_offset;
-	block->link.prev_page = 0;
+	block->link.prev_page = NULL;
 	block->link.prev_offset = 0;
 
 	if (!pool->freelist[slindex].page) {
@@ -232,7 +232,7 @@
 		 */
 		tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
 				pool->freelist[slindex].offset, KM_USER1);
-		tmpblock->link.prev_page = 0;
+		tmpblock->link.prev_page = NULL;
 		tmpblock->link.prev_offset = 0;
 		put_ptr_atomic(tmpblock, KM_USER1);
 	}
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index d0e9e02..5415712 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -527,7 +527,7 @@
 	}
 
 	num_pages = zram->disksize >> PAGE_SHIFT;
-	zram->table = vmalloc(num_pages * sizeof(*zram->table));
+	zram->table = vzalloc(num_pages * sizeof(*zram->table));
 	if (!zram->table) {
 		pr_err("Error allocating zram address table\n");
 		/* To prevent accessing table entries during cleanup */
@@ -535,7 +535,6 @@
 		ret = -ENOMEM;
 		goto fail;
 	}
-	memset(zram->table, 0, num_pages * sizeof(*zram->table));
 
 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
 
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
new file mode 100644
index 0000000..2fac3be
--- /dev/null
+++ b/drivers/target/Kconfig
@@ -0,0 +1,32 @@
+
+menuconfig TARGET_CORE
+	tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
+	depends on SCSI && BLOCK
+	select CONFIGFS_FS
+	default n
+	help
+	Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
+	control path for target_core_mod.  This includes built-in TCM RAMDISK
+	subsystem logic for virtual LUN 0 access
+
+if TARGET_CORE
+
+config TCM_IBLOCK
+	tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+	help
+	Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
+	access to Linux/Block devices using BIO
+
+config TCM_FILEIO
+	tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
+	help
+	Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
+	access to Linux/VFS struct file or struct block_device
+
+config TCM_PSCSI
+	tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+	help
+	Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
+	passthrough access to Linux/SCSI device
+
+endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
new file mode 100644
index 0000000..5cfd708
--- /dev/null
+++ b/drivers/target/Makefile
@@ -0,0 +1,24 @@
+EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
+
+target_core_mod-y		:= target_core_configfs.o \
+				   target_core_device.o \
+				   target_core_fabric_configfs.o \
+				   target_core_fabric_lib.o \
+				   target_core_hba.o \
+				   target_core_pr.o \
+				   target_core_alua.o \
+				   target_core_scdb.o \
+				   target_core_tmr.o \
+				   target_core_tpg.o \
+				   target_core_transport.o \
+				   target_core_cdb.o \
+				   target_core_ua.o \
+				   target_core_rd.o \
+				   target_core_mib.o
+
+obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
+
+# Subsystem modules
+obj-$(CONFIG_TCM_IBLOCK)	+= target_core_iblock.o
+obj-$(CONFIG_TCM_FILEIO)	+= target_core_file.o
+obj-$(CONFIG_TCM_PSCSI)		+= target_core_pscsi.o
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
new file mode 100644
index 0000000..2c5fcfe
--- /dev/null
+++ b/drivers/target/target_core_alua.c
@@ -0,0 +1,1991 @@
+/*******************************************************************************
+ * Filename:  target_core_alua.c
+ *
+ * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
+ *
+ * Copyright (c) 2009-2010 Rising Tide Systems
+ * Copyright (c) 2009-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_ua.h"
+
+static int core_alua_check_transition(int state, int *primary);
+static int core_alua_set_tg_pt_secondary_state(
+		struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+		struct se_port *port, int explict, int offline);
+
+/*
+ * REPORT_TARGET_PORT_GROUPS
+ *
+ * See spc4r17 section 6.27
+ */
+int core_emulate_report_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
+				    Target port group descriptor */
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		/*
+		 * PREF: Preferred target port bit, determine if this
+		 * bit should be set for port group.
+		 */
+		if (tg_pt_gp->tg_pt_gp_pref)
+			buf[off] = 0x80;
+		/*
+		 * Set the ASYMMETRIC ACCESS State
+		 */
+		buf[off++] |= (atomic_read(
+			&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+		/*
+		 * Set supported ASYMMETRIC ACCESS State bits
+		 */
+		buf[off] = 0x80; /* T_SUP */
+		buf[off] |= 0x40; /* O_SUP */
+		buf[off] |= 0x8; /* U_SUP */
+		buf[off] |= 0x4; /* S_SUP */
+		buf[off] |= 0x2; /* AN_SUP */
+		buf[off++] |= 0x1; /* AO_SUP */
+		/*
+		 * TARGET PORT GROUP
+		 */
+		buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+
+		off++; /* Skip over Reserved */
+		/*
+		 * STATUS CODE
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
+		/*
+		 * Vendor Specific field
+		 */
+		buf[off++] = 0x00;
+		/*
+		 * TARGET PORT COUNT
+		 */
+		buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
+		rd_len += 8;
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+			port = tg_pt_gp_mem->tg_pt;
+			/*
+			 * Start Target Port descriptor format
+			 *
+			 * See spc4r17 section 6.2.7 Table 247
+			 */
+			off += 2; /* Skip over Obsolete */
+			/*
+			 * Set RELATIVE TARGET PORT IDENTIFIER
+			 */
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+			rd_len += 4;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	/*
+	 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+	 */
+	buf[0] = ((rd_len >> 24) & 0xff);
+	buf[1] = ((rd_len >> 16) & 0xff);
+	buf[2] = ((rd_len >> 8) & 0xff);
+	buf[3] = (rd_len & 0xff);
+
+	return 0;
+}
+
+/*
+ * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ *
+ * See spc4r17 section 6.35
+ */
+int core_emulate_set_target_port_groups(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+	struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
+	struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+	u32 len = 4; /* Skip over RESERVED area in header */
+	int alua_access_state, primary = 0, rc;
+	u16 tg_pt_id, rtpi;
+
+	if (!(l_port))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	/*
+	 * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+	 * for the local tg_pt_gp.
+	 */
+	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
+	if (!(l_tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+	if (!(l_tg_pt_gp)) {
+		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
+	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	if (!(rc)) {
+		printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+				" while TPGS_EXPLICT_ALUA is disabled\n");
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	while (len < cmd->data_length) {
+		alua_access_state = (ptr[0] & 0x0f);
+		/*
+		 * Check the received ALUA access state, and determine if
+		 * the state is a primary or secondary target port asymmetric
+		 * access state.
+		 */
+		rc = core_alua_check_transition(alua_access_state, &primary);
+		if (rc != 0) {
+			/*
+			 * If the SET TARGET PORT GROUPS attempts to establish
+			 * an invalid combination of target port asymmetric
+			 * access states or attempts to establish an
+			 * unsupported target port asymmetric access state,
+			 * then the command shall be terminated with CHECK
+			 * CONDITION status, with the sense key set to ILLEGAL
+			 * REQUEST, and the additional sense code set to INVALID
+			 * FIELD IN PARAMETER LIST.
+			 */
+			return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+		rc = -1;
+		/*
+		 * If the ASYMMETRIC ACCESS STATE field (see table 267)
+		 * specifies a primary target port asymmetric access state,
+		 * then the TARGET PORT GROUP OR TARGET PORT field specifies
+		 * a primary target port group for which the primary target
+		 * port asymmetric access state shall be changed. If the
+		 * ASYMMETRIC ACCESS STATE field specifies a secondary target
+		 * port asymmetric access state, then the TARGET PORT GROUP OR
+		 * TARGET PORT field specifies the relative target port
+		 * identifier (see 3.1.120) of the target port for which the
+		 * secondary target port asymmetric access state shall be
+		 * changed.
+		 */
+		if (primary) {
+			tg_pt_id = ((ptr[2] << 8) & 0xff);
+			tg_pt_id |= (ptr[3] & 0xff);
+			/*
+			 * Locate the matching target port group ID from
+			 * the global tg_pt_gp list
+			 */
+			spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			list_for_each_entry(tg_pt_gp,
+					&T10_ALUA(su_dev)->tg_pt_gps_list,
+					tg_pt_gp_list) {
+				if (!(tg_pt_gp->tg_pt_gp_valid_id))
+					continue;
+
+				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
+					continue;
+
+				atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_inc();
+				spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+				rc = core_alua_do_port_transition(tg_pt_gp,
+						dev, l_port, nacl,
+						alua_access_state, 1);
+
+				spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+				atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+				smp_mb__after_atomic_dec();
+				break;
+			}
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			/*
+			 * If not matching target port group ID can be located
+			 * throw an exception with ASCQ: INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0)
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		} else {
+			/*
+			 * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
+			 * the Target Port in question for the the incoming
+			 * SET_TARGET_PORT_GROUPS op.
+			 */
+			rtpi = ((ptr[2] << 8) & 0xff);
+			rtpi |= (ptr[3] & 0xff);
+			/*
+			 * Locate the matching relative target port identifer
+			 * for the struct se_device storage object.
+			 */
+			spin_lock(&dev->se_port_lock);
+			list_for_each_entry(port, &dev->dev_sep_list,
+							sep_list) {
+				if (port->sep_rtpi != rtpi)
+					continue;
+
+				tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+				spin_unlock(&dev->se_port_lock);
+
+				rc = core_alua_set_tg_pt_secondary_state(
+						tg_pt_gp_mem, port, 1, 1);
+
+				spin_lock(&dev->se_port_lock);
+				break;
+			}
+			spin_unlock(&dev->se_port_lock);
+			/*
+			 * If not matching relative target port identifier can
+			 * be located, throw an exception with ASCQ:
+			 * INVALID_PARAMETER_LIST
+			 */
+			if (rc != 0)
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+
+		ptr += 4;
+		len += 4;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_nonoptimized(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	int nonop_delay_msecs,
+	u8 *alua_ascq)
+{
+	/*
+	 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
+	 * later to determine if processing of this cmd needs to be
+	 * temporarily delayed for the Active/NonOptimized primary access state.
+	 */
+	cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
+	cmd->alua_nonop_delay = nonop_delay_msecs;
+	return 0;
+}
+
+static inline int core_alua_state_standby(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
+	 * spc4r17 section 5.9.2.4.4
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case LOG_SELECT:
+	case LOG_SENSE:
+	case MODE_SELECT:
+	case MODE_SENSE:
+	case REPORT_LUNS:
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_unavailable(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
+	 * spc4r17 section 5.9.2.4.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case MAINTENANCE_OUT:
+		switch (cdb[1]) {
+		case MO_SET_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int core_alua_state_transition(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	/*
+	 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+	 * spc4r17 section 5.9.2.5
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case MAINTENANCE_IN:
+		switch (cdb[1]) {
+		case MI_REPORT_TARGET_PGS:
+			return 0;
+		default:
+			*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+			return 1;
+		}
+	case REQUEST_SENSE:
+	case READ_BUFFER:
+	case WRITE_BUFFER:
+		return 0;
+	default:
+		*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
+ * in transport_cmd_sequencer().  This function is assigned to
+ * struct t10_alua *->state_check() in core_setup_alua()
+ */
+static int core_alua_state_check_nop(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	return 0;
+}
+
+/*
+ * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
+ * This function is assigned to struct t10_alua *->state_check() in
+ * core_setup_alua()
+ *
+ * Also, this function can return three different return codes to
+ * signal transport_generic_cmd_sequencer()
+ *
+ * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 0: Used to signal success
+ * reutrn -1: Used to signal failure, and invalid cdb field
+ */
+static int core_alua_state_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u8 *alua_ascq)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_port *port = lun->lun_sep;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	int out_alua_state, nonop_delay_msecs;
+
+	if (!(port))
+		return 0;
+	/*
+	 * First, check for a struct se_port specific secondary ALUA target port
+	 * access state: OFFLINE
+	 */
+	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		printk(KERN_INFO "ALUA: Got secondary offline status for local"
+				" target port\n");
+		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+		return 1;
+	}
+	 /*
+	 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
+	 * ALUA target port group, to obtain current ALUA access state.
+	 * Otherwise look for the underlying struct se_device association with
+	 * a ALUA logical unit group.
+	 */
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional
+	 * statement so the complier knows explictly to check this case first.
+	 * For the Optimized ALUA access state case, we want to process the
+	 * incoming fabric cmd ASAP..
+	 */
+	if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+		return 0;
+
+	switch (out_alua_state) {
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return core_alua_state_nonoptimized(cmd, cdb,
+					nonop_delay_msecs, alua_ascq);
+	case ALUA_ACCESS_STATE_STANDBY:
+		return core_alua_state_standby(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+	case ALUA_ACCESS_STATE_TRANSITION:
+		return core_alua_state_transition(cmd, cdb, alua_ascq);
+	/*
+	 * OFFLINE is a secondary ALUA target port group access state, that is
+	 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+	 */
+	case ALUA_ACCESS_STATE_OFFLINE:
+	default:
+		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+				out_alua_state);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Check implict and explict ALUA state change request.
+ */
+static int core_alua_check_transition(int state, int *primary)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+	case ALUA_ACCESS_STATE_STANDBY:
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		/*
+		 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+		 * defined as primary target port asymmetric access states.
+		 */
+		*primary = 1;
+		break;
+	case ALUA_ACCESS_STATE_OFFLINE:
+		/*
+		 * OFFLINE state is defined as a secondary target port
+		 * asymmetric access state.
+		 */
+		*primary = 0;
+		break;
+	default:
+		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
+		return -1;
+	}
+
+	return 0;
+}
+
+static char *core_alua_dump_state(int state)
+{
+	switch (state) {
+	case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+		return "Active/Optimized";
+	case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+		return "Active/NonOptimized";
+	case ALUA_ACCESS_STATE_STANDBY:
+		return "Standby";
+	case ALUA_ACCESS_STATE_UNAVAILABLE:
+		return "Unavailable";
+	case ALUA_ACCESS_STATE_OFFLINE:
+		return "Offline";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+char *core_alua_dump_status(int status)
+{
+	switch (status) {
+	case ALUA_STATUS_NONE:
+		return "None";
+	case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
+		return "Altered by Explict STPG";
+	case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
+		return "Altered by Implict ALUA";
+	default:
+		return "Unknown";
+	}
+
+	return NULL;
+}
+
+/*
+ * Used by fabric modules to determine when we need to delay processing
+ * for the Active/NonOptimized paths..
+ */
+int core_alua_check_nonop_delay(
+	struct se_cmd *cmd)
+{
+	if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
+		return 0;
+	if (in_interrupt())
+		return 0;
+	/*
+	 * The ALUA Active/NonOptimized access state delay can be disabled
+	 * in via configfs with a value of zero
+	 */
+	if (!(cmd->alua_nonop_delay))
+		return 0;
+	/*
+	 * struct se_cmd->alua_nonop_delay gets set by a target port group
+	 * defined interval in core_alua_state_nonoptimized()
+	 */
+	msleep_interruptible(cmd->alua_nonop_delay);
+	return 0;
+}
+EXPORT_SYMBOL(core_alua_check_nonop_delay);
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
+ *
+ */
+static int core_alua_write_tpg_metadata(
+	const char *path,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	mm_segment_t old_fs;
+	struct file *file;
+	struct iovec iov[1];
+	int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+			path);
+		return -ENODEV;
+	}
+
+	iov[0].iov_base = &md_buf[0];
+	iov[0].iov_len = md_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -EIO;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ */
+static int core_alua_update_tpg_primary_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	int primary_state,
+	unsigned char *md_buf)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_wwn *wwn = &su_dev->t10_wwn;
+	char path[ALUA_METADATA_PATH_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+
+	len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+			"tg_pt_gp_id=%hu\n"
+			"alua_access_state=0x%02x\n"
+			"alua_access_status=0x%02x\n",
+			tg_pt_gp->tg_pt_gp_id, primary_state,
+			tg_pt_gp->tg_pt_gp_alua_access_status);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN,
+		"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+		config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_do_transition_tg_pt(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	struct se_port *l_port,
+	struct se_node_acl *nacl,
+	unsigned char *md_buf,
+	int new_state,
+	int explict)
+{
+	struct se_dev_entry *se_deve;
+	struct se_lun_acl *lacl;
+	struct se_port *port;
+	struct t10_alua_tg_pt_gp_member *mem;
+	int old_state = 0;
+	/*
+	 * Save the old primary ALUA access state, and set the current state
+	 * to ALUA_ACCESS_STATE_TRANSITION.
+	 */
+	old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+			ALUA_ACCESS_STATE_TRANSITION);
+	tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
+				ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+				ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+	/*
+	 * Check for the optional ALUA primary state transition delay
+	 */
+	if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+		msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
+				tg_pt_gp_mem_list) {
+		port = mem->tg_pt;
+		/*
+		 * After an implicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition for the initiator port associated with every I_T
+		 * nexus with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHAGED.
+		 *
+		 * After an explicit target port asymmetric access state
+		 * change, a device server shall establish a unit attention
+		 * condition with the additional sense code set to ASYMMETRIC
+		 * ACCESS STATE CHANGED for the initiator port associated with
+		 * every I_T nexus other than the I_T nexus on which the SET
+		 * TARGET PORT GROUPS command
+		 */
+		atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(se_deve, &port->sep_alua_list,
+					alua_port_list) {
+			lacl = se_deve->se_lun_acl;
+			/*
+			 * se_deve->se_lun_acl pointer may be NULL for a
+			 * entry created without explict Node+MappedLUN ACLs
+			 */
+			if (!(lacl))
+				continue;
+
+			if (explict &&
+			   (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
+			   (l_port != NULL) && (l_port == port))
+				continue;
+
+			core_scsi3_ua_allocate(lacl->se_lun_nacl,
+				se_deve->mapped_lun, 0x2A,
+				ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+	/*
+	 * Update the ALUA metadata buf that has been allocated in
+	 * core_alua_do_port_transition(), this metadata will be written
+	 * to struct file.
+	 *
+	 * Note that there is the case where we do not want to update the
+	 * metadata when the saved metadata is being parsed in userspace
+	 * when setting the existing port access state and access status.
+	 *
+	 * Also note that the failure to write out the ALUA metadata to
+	 * struct file does NOT affect the actual ALUA transition.
+	 */
+	if (tg_pt_gp->tg_pt_gp_write_metadata) {
+		mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
+		core_alua_update_tpg_primary_metadata(tg_pt_gp,
+					new_state, md_buf);
+		mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+	}
+	/*
+	 * Set the current primary ALUA access state to the requested new state
+	 */
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
+
+	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" from primary access state %s to %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
+		core_alua_dump_state(new_state));
+
+	return 0;
+}
+
+int core_alua_do_port_transition(
+	struct t10_alua_tg_pt_gp *l_tg_pt_gp,
+	struct se_device *l_dev,
+	struct se_port *l_port,
+	struct se_node_acl *l_nacl,
+	int new_state,
+	int explict)
+{
+	struct se_device *dev;
+	struct se_port *port;
+	struct se_subsystem_dev *su_dev;
+	struct se_node_acl *nacl;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	int primary;
+
+	if (core_alua_check_transition(new_state, &primary) != 0)
+		return -EINVAL;
+
+	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
+	if (!(md_buf)) {
+		printk("Unable to allocate buf for ALUA metadata\n");
+		return -ENOMEM;
+	}
+
+	local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
+	spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = local_lu_gp_mem->lu_gp;
+	atomic_inc(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
+	/*
+	 * For storage objects that are members of the 'default_lu_gp',
+	 * we only do transition on the passed *l_tp_pt_gp, and not
+	 * on all of the matching target port groups IDs in default_lu_gp.
+	 */
+	if (!(lu_gp->lu_gp_id)) {
+		/*
+		 * core_alua_do_transition_tg_pt() will always return
+		 * success.
+		 */
+		core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
+					md_buf, new_state, explict);
+		atomic_dec(&lu_gp->lu_gp_ref_cnt);
+		smp_mb__after_atomic_dec();
+		kfree(md_buf);
+		return 0;
+	}
+	/*
+	 * For all other LU groups aside from 'default_lu_gp', walk all of
+	 * the associated storage objects looking for a matching target port
+	 * group ID from the local target port group.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
+				lu_gp_mem_list) {
+
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&lu_gp->lu_gp_lock);
+
+		spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		list_for_each_entry(tg_pt_gp,
+				&T10_ALUA(su_dev)->tg_pt_gps_list,
+				tg_pt_gp_list) {
+
+			if (!(tg_pt_gp->tg_pt_gp_valid_id))
+				continue;
+			/*
+			 * If the target behavior port asymmetric access state
+			 * is changed for any target port group accessiable via
+			 * a logical unit within a LU group, the target port
+			 * behavior group asymmetric access states for the same
+			 * target port group accessible via other logical units
+			 * in that LU group will also change.
+			 */
+			if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
+				continue;
+
+			if (l_tg_pt_gp == tg_pt_gp) {
+				port = l_port;
+				nacl = l_nacl;
+			} else {
+				port = NULL;
+				nacl = NULL;
+			}
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			/*
+			 * core_alua_do_transition_tg_pt() will always return
+			 * success.
+			 */
+			core_alua_do_transition_tg_pt(tg_pt_gp, port,
+					nacl, md_buf, new_state, explict);
+
+			spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			smp_mb__after_atomic_dec();
+		}
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+		atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+		" Group IDs: %hu %s transition to primary state: %s\n",
+		config_item_name(&lu_gp->lu_gp_group.cg_item),
+		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
+		core_alua_dump_state(new_state));
+
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	smp_mb__after_atomic_dec();
+	kfree(md_buf);
+	return 0;
+}
+
+/*
+ * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
+ */
+static int core_alua_update_tpg_secondary_metadata(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	unsigned char *md_buf,
+	u32 md_buf_len)
+{
+	struct se_portal_group *se_tpg = port->sep_tpg;
+	char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+	int len;
+
+	memset(path, 0, ALUA_METADATA_PATH_LEN);
+	memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+
+	len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
+			TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+
+	if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+		snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
+				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+	len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+			"alua_tg_pt_status=0x%02x\n",
+			atomic_read(&port->sep_tg_pt_secondary_offline),
+			port->sep_tg_pt_secondary_stat);
+
+	snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
+			TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+			port->sep_lun->unpacked_lun);
+
+	return core_alua_write_tpg_metadata(path, md_buf, len);
+}
+
+static int core_alua_set_tg_pt_secondary_state(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct se_port *port,
+	int explict,
+	int offline)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	unsigned char *md_buf;
+	u32 md_buf_len;
+	int trans_delay_msecs;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (!(tg_pt_gp)) {
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_ERR "Unable to complete secondary state"
+				" transition\n");
+		return -1;
+	}
+	trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
+	/*
+	 * Set the secondary ALUA target port access state to OFFLINE
+	 * or release the previously secondary state for struct se_port
+	 */
+	if (offline)
+		atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+	else
+		atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+
+	md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
+	port->sep_tg_pt_secondary_stat = (explict) ?
+			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
+			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+
+	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+		" to secondary access state: %s\n", (explict) ? "explict" :
+		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
+
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	/*
+	 * Do the optional transition delay after we set the secondary
+	 * ALUA access state.
+	 */
+	if (trans_delay_msecs != 0)
+		msleep_interruptible(trans_delay_msecs);
+	/*
+	 * See if we need to update the ALUA fabric port metadata for
+	 * secondary state and status
+	 */
+	if (port->sep_tg_pt_secondary_write_md) {
+		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
+		if (!(md_buf)) {
+			printk(KERN_ERR "Unable to allocate md_buf for"
+				" secondary ALUA access metadata\n");
+			return -1;
+		}
+		mutex_lock(&port->sep_tg_pt_md_mutex);
+		core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
+				md_buf, md_buf_len);
+		mutex_unlock(&port->sep_tg_pt_md_mutex);
+
+		kfree(md_buf);
+	}
+
+	return 0;
+}
+
+struct t10_alua_lu_gp *
+core_alua_allocate_lu_gp(const char *name, int def_group)
+{
+	struct t10_alua_lu_gp *lu_gp;
+
+	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
+	if (!(lu_gp)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+		return ERR_PTR(-ENOMEM);;
+	}
+	INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+	INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
+	spin_lock_init(&lu_gp->lu_gp_lock);
+	atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
+
+	if (def_group) {
+		lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;;
+		lu_gp->lu_gp_valid_id = 1;
+		se_global->alua_lu_gps_count++;
+	}
+
+	return lu_gp;
+}
+
+int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
+{
+	struct t10_alua_lu_gp *lu_gp_tmp;
+	u16 lu_gp_id_tmp;
+	/*
+	 * The lu_gp->lu_gp_id may only be set once..
+	 */
+	if (lu_gp->lu_gp_valid_id) {
+		printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+			" ignoring request\n");
+		return -1;
+	}
+
+	spin_lock(&se_global->lu_gps_lock);
+	if (se_global->alua_lu_gps_count == 0x0000ffff) {
+		printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+				" 0x0000ffff reached\n");
+		spin_unlock(&se_global->lu_gps_lock);
+		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+		return -1;
+	}
+again:
+	lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
+				se_global->alua_lu_gps_counter++;
+
+	list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
+			if (!(lu_gp_id))
+				goto again;
+
+			printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+				" already exists, ignoring request\n",
+				lu_gp_id);
+			spin_unlock(&se_global->lu_gps_lock);
+			return -1;
+		}
+	}
+
+	lu_gp->lu_gp_id = lu_gp_id_tmp;
+	lu_gp->lu_gp_valid_id = 1;
+	list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
+	se_global->alua_lu_gps_count++;
+	spin_unlock(&se_global->lu_gps_lock);
+
+	return 0;
+}
+
+static struct t10_alua_lu_gp_member *
+core_alua_allocate_lu_gp_mem(struct se_device *dev)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
+	if (!(lu_gp_mem)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
+	spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
+	atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
+
+	lu_gp_mem->lu_gp_mem_dev = dev;
+	dev->dev_alua_lu_gp_mem = lu_gp_mem;
+
+	return lu_gp_mem;
+}
+
+void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
+{
+	struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has
+	 * already been called from target_core_alua_drop_lu_gp().
+	 *
+	 * Here, we remove the *lu_gp from the global list so that
+	 * no associations can be made while we are releasing
+	 * struct t10_alua_lu_gp.
+	 */
+	spin_lock(&se_global->lu_gps_lock);
+	atomic_set(&lu_gp->lu_gp_shutdown, 1);
+	list_del(&lu_gp->lu_gp_list);
+	se_global->alua_lu_gps_count--;
+	spin_unlock(&se_global->lu_gps_lock);
+	/*
+	 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
+	 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
+	 * released with core_alua_put_lu_gp_from_name()
+	 */
+	while (atomic_read(&lu_gp->lu_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_lu_gp * from all associated
+	 * struct se_device.
+	 */
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
+				&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		/*
+		 *
+		 * lu_gp_mem is assoicated with a single
+		 * struct se_device->dev_alua_lu_gp_mem, and is released when
+		 * struct se_device is released via core_alua_free_lu_gp_mem().
+		 *
+		 * If the passed lu_gp does NOT match the default_lu_gp, assume
+		 * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+		 */
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		if (lu_gp != se_global->default_lu_gp)
+			__core_alua_attach_lu_gp_mem(lu_gp_mem,
+					se_global->default_lu_gp);
+		else
+			lu_gp_mem->lu_gp = NULL;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		spin_lock(&lu_gp->lu_gp_lock);
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
+}
+
+void core_alua_free_lu_gp_mem(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem))
+		return;
+
+	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		spin_lock(&lu_gp->lu_gp_lock);
+		if (lu_gp_mem->lu_gp_assoc) {
+			list_del(&lu_gp_mem->lu_gp_mem_list);
+			lu_gp->lu_gp_members--;
+			lu_gp_mem->lu_gp_assoc = 0;
+		}
+		spin_unlock(&lu_gp->lu_gp_lock);
+		lu_gp_mem->lu_gp = NULL;
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
+}
+
+struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_item *ci;
+
+	spin_lock(&se_global->lu_gps_lock);
+	list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
+		if (!(lu_gp->lu_gp_valid_id))
+			continue;
+		ci = &lu_gp->lu_gp_group.cg_item;
+		if (!(strcmp(config_item_name(ci), name))) {
+			atomic_inc(&lu_gp->lu_gp_ref_cnt);
+			spin_unlock(&se_global->lu_gps_lock);
+			return lu_gp;
+		}
+	}
+	spin_unlock(&se_global->lu_gps_lock);
+
+	return NULL;
+}
+
+void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&se_global->lu_gps_lock);
+	atomic_dec(&lu_gp->lu_gp_ref_cnt);
+	spin_unlock(&se_global->lu_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_attach_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	lu_gp_mem->lu_gp = lu_gp;
+	lu_gp_mem->lu_gp_assoc = 1;
+	list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
+	lu_gp->lu_gp_members++;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
+ */
+void __core_alua_drop_lu_gp_mem(
+	struct t10_alua_lu_gp_member *lu_gp_mem,
+	struct t10_alua_lu_gp *lu_gp)
+{
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_del(&lu_gp_mem->lu_gp_mem_list);
+	lu_gp_mem->lu_gp = NULL;
+	lu_gp_mem->lu_gp_assoc = 0;
+	lu_gp->lu_gp_members--;
+	spin_unlock(&lu_gp->lu_gp_lock);
+}
+
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+	struct se_subsystem_dev *su_dev,
+	const char *name,
+	int def_group)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
+	if (!(tg_pt_gp)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
+	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
+	mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+	spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
+	atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
+	tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+	tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
+	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
+		ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+	/*
+	 * Enable both explict and implict ALUA support by default
+	 */
+	tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+	/*
+	 * Set the default Active/NonOptimized Delay in milliseconds
+	 */
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
+
+	if (def_group) {
+		spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		tg_pt_gp->tg_pt_gp_id =
+				T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+		tg_pt_gp->tg_pt_gp_valid_id = 1;
+		T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+		list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			      &T10_ALUA(su_dev)->tg_pt_gps_list);
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	}
+
+	return tg_pt_gp;
+}
+
+int core_alua_set_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	u16 tg_pt_gp_id)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
+	u16 tg_pt_gp_id_tmp;
+	/*
+	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
+	 */
+	if (tg_pt_gp->tg_pt_gp_valid_id) {
+		printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+			" ignoring request\n");
+		return -1;
+	}
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
+		printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+			" 0x0000ffff reached\n");
+		spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+		return -1;
+	}
+again:
+	tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
+			T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+
+	list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
+			if (!(tg_pt_gp_id))
+				goto again;
+
+			printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+				" exists, ignoring request\n", tg_pt_gp_id);
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			return -1;
+		}
+	}
+
+	tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
+	tg_pt_gp->tg_pt_gp_valid_id = 1;
+	list_add_tail(&tg_pt_gp->tg_pt_gp_list,
+			&T10_ALUA(su_dev)->tg_pt_gps_list);
+	T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+	return 0;
+}
+
+struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+	struct se_port *port)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
+				GFP_KERNEL);
+	if (!(tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
+
+	tg_pt_gp_mem->tg_pt = port;
+	port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
+	atomic_set(&port->sep_tg_pt_gp_active, 1);
+
+	return tg_pt_gp_mem;
+}
+
+void core_alua_free_tg_pt_gp(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+	/*
+	 * Once we have reached this point, config_item_put() has already
+	 * been called from target_core_alua_drop_tg_pt_gp().
+	 *
+	 * Here we remove *tg_pt_gp from the global list so that
+	 * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+	 * can be made while we are releasing struct t10_alua_tg_pt_gp.
+	 */
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_del(&tg_pt_gp->tg_pt_gp_list);
+	T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	/*
+	 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
+	 * core_alua_get_tg_pt_gp_by_name() in
+	 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
+	 * to be released with core_alua_put_tg_pt_gp_from_name().
+	 */
+	while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
+		cpu_relax();
+	/*
+	 * Release reference to struct t10_alua_tg_pt_gp from all associated
+	 * struct se_port.
+	 */
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
+			&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		/*
+		 * tg_pt_gp_mem is assoicated with a single
+		 * se_port->sep_alua_tg_pt_gp_mem, and is released via
+		 * core_alua_free_tg_pt_gp_mem().
+		 *
+		 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
+		 * assume we want to re-assocate a given tg_pt_gp_mem with
+		 * default_tg_pt_gp.
+		 */
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					T10_ALUA(su_dev)->default_tg_pt_gp);
+		} else
+			tg_pt_gp_mem->tg_pt_gp = NULL;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
+}
+
+void core_alua_free_tg_pt_gp_mem(struct se_port *port)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem))
+		return;
+
+	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
+		cpu_relax();
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
+			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+			tg_pt_gp->tg_pt_gp_members--;
+			tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+		}
+		spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+		tg_pt_gp_mem->tg_pt_gp = NULL;
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
+}
+
+static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
+	struct se_subsystem_dev *su_dev,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct config_item *ci;
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+			tg_pt_gp_list) {
+		if (!(tg_pt_gp->tg_pt_gp_valid_id))
+			continue;
+		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		if (!(strcmp(config_item_name(ci), name))) {
+			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
+			spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+			return tg_pt_gp;
+		}
+	}
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+
+	return NULL;
+}
+
+static void core_alua_put_tg_pt_gp_from_name(
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+
+	spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+	atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
+	spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+void __core_alua_attach_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 1;
+	list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
+			&tg_pt_gp->tg_pt_gp_mem_list);
+	tg_pt_gp->tg_pt_gp_members++;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+/*
+ * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
+ */
+static void __core_alua_drop_tg_pt_gp_mem(
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
+	struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
+	tg_pt_gp_mem->tg_pt_gp = NULL;
+	tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+	tg_pt_gp->tg_pt_gp_members--;
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+{
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct config_item *tg_pt_ci;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0;
+
+	if (alua->alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem))
+		return len;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
+		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
+			" %hu\nTG Port Primary Access State: %s\nTG Port "
+			"Primary Access Status: %s\nTG Port Secondary Access"
+			" State: %s\nTG Port Secondary Access Status: %s\n",
+			config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
+			core_alua_dump_state(atomic_read(
+					&tg_pt_gp->tg_pt_gp_alua_access_state)),
+			core_alua_dump_status(
+				tg_pt_gp->tg_pt_gp_alua_access_status),
+			(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+			"Offline" : "None",
+			core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+	}
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+	return len;
+}
+
+ssize_t core_alua_store_tg_pt_gp_info(
+	struct se_port *port,
+	const char *page,
+	size_t count)
+{
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+	struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+	int move = 0;
+
+	tpg = port->sep_tpg;
+	lun = port->sep_lun;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
+			" %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		return -EINVAL;
+	}
+
+	if (count > TG_PT_GROUP_NAME_BUF) {
+		printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA target port group alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_tg_pt_gp_by_name() will increment reference to
+		 * struct t10_alua_tg_pt_gp.  This reference is released with
+		 * core_alua_put_tg_pt_gp_from_name() below.
+		 */
+		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+					strstrip(buf));
+		if (!(tg_pt_gp_new))
+			return -ENODEV;
+	}
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem)) {
+		if (tg_pt_gp_new)
+			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+		printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if ((tg_pt_gp)) {
+		/*
+		 * Clearing an existing tg_pt_gp association, and replacing
+		 * with the default_tg_pt_gp.
+		 */
+		if (!(tg_pt_gp_new)) {
+			printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
+				" alua/%s, ID: %hu back to"
+				" default_tg_pt_gp\n",
+				TPG_TFO(tpg)->tpg_get_wwn(tpg),
+				TPG_TFO(tpg)->tpg_get_tag(tpg),
+				config_item_name(&lun->lun_group.cg_item),
+				config_item_name(
+					&tg_pt_gp->tg_pt_gp_group.cg_item),
+				tg_pt_gp->tg_pt_gp_id);
+
+			__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+			__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+					T10_ALUA(su_dev)->default_tg_pt_gp);
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
+		 */
+		__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
+		move = 1;
+	}
+	/*
+	 * Associate tg_pt_gp_mem with tg_pt_gp_new.
+	 */
+	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
+		"Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
+		TPG_TFO(tpg)->tpg_get_tag(tpg),
+		config_item_name(&lun->lun_group.cg_item),
+		config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
+		tg_pt_gp_new->tg_pt_gp_id);
+
+	core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+	return count;
+}
+
+ssize_t core_alua_show_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
+	    (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
+		return sprintf(page, "Implict and Explict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
+		return sprintf(page, "Implict\n");
+	else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
+		return sprintf(page, "Explict\n");
+	else
+		return sprintf(page, "None\n");
+}
+
+ssize_t core_alua_store_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_access_type\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
+		printk(KERN_ERR "Illegal value for alua_access_type:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	if (tmp == 3)
+		tg_pt_gp->tg_pt_gp_alua_access_type =
+			TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+	else if (tmp == 2)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+	else if (tmp == 1)
+		tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+	else
+		tg_pt_gp->tg_pt_gp_alua_access_type = 0;
+
+	return count;
+}
+
+ssize_t core_alua_show_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
+}
+
+ssize_t core_alua_store_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
+		printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_NONOP_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+}
+
+ssize_t core_alua_store_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+		return -EINVAL;
+	}
+	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
+		printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
+			ALUA_MAX_TRANS_DELAY_MSECS);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
+}
+
+ssize_t core_alua_store_preferred_bit(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
+{
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return sprintf(page, "%d\n",
+		atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+}
+
+ssize_t core_alua_store_offline_bit(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned long tmp;
+	int ret;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
+	if (!(tg_pt_gp_mem)) {
+		printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
+			lun->lun_sep, 0, (int)tmp);
+	if (ret < 0)
+		return -EINVAL;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_status(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+}
+
+ssize_t core_alua_store_secondary_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+		return -EINVAL;
+	}
+	if ((tmp != ALUA_STATUS_NONE) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+				tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+
+	return count;
+}
+
+ssize_t core_alua_show_secondary_write_metadata(
+	struct se_lun *lun,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+			lun->lun_sep->sep_tg_pt_secondary_write_md);
+}
+
+ssize_t core_alua_store_secondary_write_metadata(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+		return -EINVAL;
+	}
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+				" %lu\n", tmp);
+		return -EINVAL;
+	}
+	lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+
+	return count;
+}
+
+int core_setup_alua(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_alua *alua = T10_ALUA(su_dev);
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but emulate SCSI logic themselves.
+	 */
+	if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+		alua->alua_type = SPC_ALUA_PASSTHROUGH;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+			" emulation\n", TRANSPORT(dev)->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated ALUA.
+	 */
+	if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+		printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
+			" device\n", TRANSPORT(dev)->name);
+		/*
+		 * Assoicate this struct se_device with the default ALUA
+		 * LUN Group.
+		 */
+		lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
+		if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
+			return -1;
+
+		alua->alua_type = SPC3_ALUA_EMULATED;
+		alua->alua_state_check = &core_alua_state_check;
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		__core_alua_attach_lu_gp_mem(lu_gp_mem,
+				se_global->default_lu_gp);
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+			" core/alua/lu_gps/default_lu_gp\n",
+			TRANSPORT(dev)->name);
+	} else {
+		alua->alua_type = SPC2_ALUA_DISABLED;
+		alua->alua_state_check = &core_alua_state_check_nop;
+		printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
+			" device\n", TRANSPORT(dev)->name);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
new file mode 100644
index 0000000..c86f97a
--- /dev/null
+++ b/drivers/target/target_core_alua.h
@@ -0,0 +1,126 @@
+#ifndef TARGET_CORE_ALUA_H
+#define TARGET_CORE_ALUA_H
+
+/*
+ * INQUIRY response data, TPGS Field
+ *
+ * from spc4r17 section 6.4.2 Table 135
+ */
+#define TPGS_NO_ALUA				0x00
+#define TPGS_IMPLICT_ALUA			0x10
+#define TPGS_EXPLICT_ALUA			0x20
+
+/*
+ * ASYMMETRIC ACCESS STATE field
+ *
+ * from spc4r17 section 6.27 Table 245
+ */
+#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED	0x0
+#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED	0x1
+#define ALUA_ACCESS_STATE_STANDBY		0x2
+#define ALUA_ACCESS_STATE_UNAVAILABLE		0x3
+#define ALUA_ACCESS_STATE_OFFLINE		0xe
+#define ALUA_ACCESS_STATE_TRANSITION		0xf
+
+/*
+ * REPORT_TARGET_PORT_GROUP STATUS CODE
+ *
+ * from spc4r17 section 6.27 Table 246
+ */
+#define ALUA_STATUS_NONE				0x00
+#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG		0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA		0x02
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_04H_ALUA_STATE_TRANSITION			0x0a
+#define ASCQ_04H_ALUA_TG_PT_STANDBY			0x0b
+#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE			0x0c
+#define ASCQ_04H_ALUA_OFFLINE				0x12
+
+/*
+ * Used as the default for Active/NonOptimized delay (in milliseconds)
+ * This can also be changed via configfs on a per target port group basis..
+ */
+#define ALUA_DEFAULT_NONOP_DELAY_MSECS			100
+#define ALUA_MAX_NONOP_DELAY_MSECS			10000 /* 10 seconds */
+/*
+ * Used for implict and explict ALUA transitional delay, that is disabled
+ * by default, and is intended to be used for debugging client side ALUA code.
+ */
+#define ALUA_DEFAULT_TRANS_DELAY_MSECS			0
+#define ALUA_MAX_TRANS_DELAY_MSECS			30000 /* 30 seconds */
+/*
+ * Used by core_alua_update_tpg_primary_metadata() and
+ * core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_METADATA_PATH_LEN				512
+/*
+ * Used by core_alua_update_tpg_secondary_metadata()
+ */
+#define ALUA_SECONDARY_METADATA_WWN_LEN			256
+
+extern struct kmem_cache *t10_alua_lu_gp_cache;
+extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
+extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+extern int core_emulate_report_target_port_groups(struct se_cmd *);
+extern int core_emulate_set_target_port_groups(struct se_cmd *);
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
+				struct se_device *, struct se_port *,
+				struct se_node_acl *, int, int);
+extern char *core_alua_dump_status(int);
+extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
+extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
+extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
+extern void core_alua_free_lu_gp_mem(struct se_device *);
+extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
+extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
+extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
+					struct t10_alua_lu_gp *);
+extern void core_alua_drop_lu_gp_dev(struct se_device *);
+extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
+			struct se_subsystem_dev *, const char *, int);
+extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
+extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
+					struct se_port *);
+extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
+extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
+extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
+					struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+						size_t);
+extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
+extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+						char *);
+extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
+					char *);
+extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
+					const char *, size_t);
+extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
+extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
+					size_t);
+extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
+extern ssize_t core_alua_store_secondary_status(struct se_lun *,
+					const char *, size_t);
+extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
+					char *);
+extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
+					const char *, size_t);
+extern int core_setup_alua(struct se_device *, int);
+
+#endif /* TARGET_CORE_ALUA_H */
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
new file mode 100644
index 0000000..366080b
--- /dev/null
+++ b/drivers/target/target_core_cdb.c
@@ -0,0 +1,1131 @@
+/*
+ * CDB emulation for non-READ/WRITE commands.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include "target_core_ua.h"
+
+static void
+target_fill_alua_data(struct se_port *port, unsigned char *buf)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+
+	/*
+	 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
+	 */
+	buf[5]	= 0x80;
+
+	/*
+	 * Set TPGS field for explict and/or implict ALUA access type
+	 * and opteration.
+	 *
+	 * See spc4r17 section 6.4.2 Table 135
+	 */
+	if (!port)
+		return;
+	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+	if (!tg_pt_gp_mem)
+		return;
+
+	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+	if (tg_pt_gp)
+		buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
+	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+}
+
+static int
+target_emulate_inquiry_std(struct se_cmd *cmd)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+
+	/*
+	 * Make sure we at least have 6 bytes of INQUIRY response
+	 * payload going back for EVPD=0
+	 */
+	if (cmd->data_length < 6) {
+		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+			" too small for EVPD=0\n", cmd->data_length);
+		return -1;
+	}
+
+	buf[0] = dev->transport->get_device_type(dev);
+	if (buf[0] == TYPE_TAPE)
+		buf[1] = 0x80;
+	buf[2] = dev->transport->get_device_rev(dev);
+
+	/*
+	 * Enable SCCS and TPGS fields for Emulated ALUA
+	 */
+	if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+		target_fill_alua_data(lun->lun_sep, buf);
+
+	if (cmd->data_length < 8) {
+		buf[4] = 1; /* Set additional length to 1 */
+		return 0;
+	}
+
+	buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
+
+	/*
+	 * Do not include vendor, product, reversion info in INQUIRY
+	 * response payload for cdbs with a small allocation length.
+	 */
+	if (cmd->data_length < 36) {
+		buf[4] = 3; /* Set additional length to 3 */
+		return 0;
+	}
+
+	snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
+	snprintf((unsigned char *)&buf[16], 16, "%s",
+		 &DEV_T10_WWN(dev)->model[0]);
+	snprintf((unsigned char *)&buf[32], 4, "%s",
+		 &DEV_T10_WWN(dev)->revision[0]);
+	buf[4] = 31; /* Set additional length to 31 */
+	return 0;
+}
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+	buf[1] = 0x00;
+	if (cmd->data_length < 8)
+		return 0;
+
+	buf[4] = 0x0;
+	/*
+	 * Only report the INQUIRY EVPD=1 pages after a valid NAA
+	 * Registered Extended LUN WWN has been set via ConfigFS
+	 * during device creation/restart.
+	 */
+	if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		buf[3] = 3;
+		buf[5] = 0x80;
+		buf[6] = 0x83;
+		buf[7] = 0x86;
+	}
+
+	return 0;
+}
+
+/* unit serial number */
+static int
+target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	u16 len = 0;
+
+	buf[1] = 0x80;
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		u32 unit_serial_len;
+
+		unit_serial_len =
+			strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		if (((len + 4) + unit_serial_len) > cmd->data_length) {
+			len += unit_serial_len;
+			buf[2] = ((len >> 8) & 0xff);
+			buf[3] = (len & 0xff);
+			return 0;
+		}
+		len += sprintf((unsigned char *)&buf[4], "%s",
+			&DEV_T10_WWN(dev)->unit_serial[0]);
+		len++; /* Extra Byte for NULL Terminator */
+		buf[3] = len;
+	}
+	return 0;
+}
+
+/*
+ * Device identification VPD, for a complete list of
+ * DESIGNATOR TYPEs see spc4r17 Table 459.
+ */
+static int
+target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_lun *lun = SE_LUN(cmd);
+	struct se_port *port = NULL;
+	struct se_portal_group *tpg = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	unsigned char binary, binary_new;
+	unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+	u32 prod_len;
+	u32 unit_serial_len, off = 0;
+	int i;
+	u16 len = 0, id_len;
+
+	buf[1] = 0x83;
+	off = 4;
+
+	/*
+	 * NAA IEEE Registered Extended Assigned designator format, see
+	 * spc4r17 section 7.7.3.6.5
+	 *
+	 * We depend upon a target_core_mod/ConfigFS provided
+	 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
+	 * value in order to return the NAA id.
+	 */
+	if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+		goto check_t10_vend_desc;
+
+	if (off + 20 > cmd->data_length)
+		goto check_t10_vend_desc;
+
+	/* CODE SET == Binary */
+	buf[off++] = 0x1;
+
+	/* Set ASSOICATION == addressed logical unit: 0)b */
+	buf[off] = 0x00;
+
+	/* Identifier/Designator type == NAA identifier */
+	buf[off++] = 0x3;
+	off++;
+
+	/* Identifier/Designator length */
+	buf[off++] = 0x10;
+
+	/*
+	 * Start NAA IEEE Registered Extended Identifier/Designator
+	 */
+	buf[off++] = (0x6 << 4);
+
+	/*
+	 * Use OpenFabrics IEEE Company ID: 00 14 05
+	 */
+	buf[off++] = 0x01;
+	buf[off++] = 0x40;
+	buf[off] = (0x5 << 4);
+
+	/*
+	 * Return ConfigFS Unit Serial Number information for
+	 * VENDOR_SPECIFIC_IDENTIFIER and
+	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
+	 */
+	binary = transport_asciihex_to_binaryhex(
+				&DEV_T10_WWN(dev)->unit_serial[0]);
+	buf[off++] |= (binary & 0xf0) >> 4;
+	for (i = 0; i < 24; i += 2) {
+		binary_new = transport_asciihex_to_binaryhex(
+			&DEV_T10_WWN(dev)->unit_serial[i+2]);
+		buf[off] = (binary & 0x0f) << 4;
+		buf[off++] |= (binary_new & 0xf0) >> 4;
+		binary = binary_new;
+	}
+	len = 20;
+	off = (len + 4);
+
+check_t10_vend_desc:
+	/*
+	 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
+	 */
+	id_len = 8; /* For Vendor field */
+	prod_len = 4; /* For VPD Header */
+	prod_len += 8; /* For Vendor field */
+	prod_len += strlen(prod);
+	prod_len++; /* For : */
+
+	if (dev->se_sub_dev->su_dev_flags &
+			SDF_EMULATED_VPD_UNIT_SERIAL) {
+		unit_serial_len =
+			strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+		unit_serial_len++; /* For NULL Terminator */
+
+		if ((len + (id_len + 4) +
+		    (prod_len + unit_serial_len)) >
+				cmd->data_length) {
+			len += (prod_len + unit_serial_len);
+			goto check_port;
+		}
+		id_len += sprintf((unsigned char *)&buf[off+12],
+				"%s:%s", prod,
+				&DEV_T10_WWN(dev)->unit_serial[0]);
+	}
+	buf[off] = 0x2; /* ASCII */
+	buf[off+1] = 0x1; /* T10 Vendor ID */
+	buf[off+2] = 0x0;
+	memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+	/* Extra Byte for NULL Terminator */
+	id_len++;
+	/* Identifier Length */
+	buf[off+3] = id_len;
+	/* Header size for Designation descriptor */
+	len += (id_len + 4);
+	off += (id_len + 4);
+	/*
+	 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
+	 */
+check_port:
+	port = lun->lun_sep;
+	if (port) {
+		struct t10_alua_lu_gp *lu_gp;
+		u32 padding, scsi_name_len;
+		u16 lu_gp_id = 0;
+		u16 tg_pt_gp_id = 0;
+		u16 tpgt;
+
+		tpg = port->sep_tpg;
+		/*
+		 * Relative target port identifer, see spc4r17
+		 * section 7.7.3.7
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_tpgi;
+		}
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Relative target port identifer */
+		buf[off++] |= 0x4;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		/* Skip over Obsolete field in RTPI payload
+		 * in Table 472 */
+		off += 2;
+		buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+		buf[off++] = (port->sep_rtpi & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Target port group identifier, see spc4r17
+		 * section 7.7.3.8
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_tpgi:
+		if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+				SPC3_ALUA_EMULATED)
+			goto check_scsi_name;
+
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_lu_gp;
+		}
+		tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+		if (!tg_pt_gp_mem)
+			goto check_lu_gp;
+
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+		if (!(tg_pt_gp)) {
+			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+			goto check_lu_gp;
+		}
+		tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == Target port group identifier */
+		buf[off++] |= 0x5;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
+		buf[off++] = (tg_pt_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * Logical Unit Group identifier, see spc4r17
+		 * section 7.7.3.8
+		 */
+check_lu_gp:
+		if (((len + 4) + 8) > cmd->data_length) {
+			len += 8;
+			goto check_scsi_name;
+		}
+		lu_gp_mem = dev->dev_alua_lu_gp_mem;
+		if (!(lu_gp_mem))
+			goto check_scsi_name;
+
+		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+		lu_gp = lu_gp_mem->lu_gp;
+		if (!(lu_gp)) {
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+			goto check_scsi_name;
+		}
+		lu_gp_id = lu_gp->lu_gp_id;
+		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+		buf[off++] |= 0x1; /* CODE SET == Binary */
+		/* DESIGNATOR TYPE == Logical Unit Group identifier */
+		buf[off++] |= 0x6;
+		off++; /* Skip over Reserved */
+		buf[off++] = 4; /* DESIGNATOR LENGTH */
+		off += 2; /* Skip over Reserved Field */
+		buf[off++] = ((lu_gp_id >> 8) & 0xff);
+		buf[off++] = (lu_gp_id & 0xff);
+		len += 8; /* Header size + Designation descriptor */
+		/*
+		 * SCSI name string designator, see spc4r17
+		 * section 7.7.3.11
+		 *
+		 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
+		 * section 7.5.1 Table 362
+		 */
+check_scsi_name:
+		scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+		/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
+		scsi_name_len += 10;
+		/* Check for 4-byte padding */
+		padding = ((-scsi_name_len) & 3);
+		if (padding != 0)
+			scsi_name_len += padding;
+		/* Header size + Designation descriptor */
+		scsi_name_len += 4;
+
+		if (((len + 4) + scsi_name_len) > cmd->data_length) {
+			len += scsi_name_len;
+			goto set_len;
+		}
+		buf[off] =
+			(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+		buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+		buf[off] = 0x80; /* Set PIV=1 */
+		/* Set ASSOICATION == target port: 01b */
+		buf[off] |= 0x10;
+		/* DESIGNATOR TYPE == SCSI name string */
+		buf[off++] |= 0x8;
+		off += 2; /* Skip over Reserved and length */
+		/*
+		 * SCSI name string identifer containing, $FABRIC_MOD
+		 * dependent information.  For LIO-Target and iSCSI
+		 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in
+		 * UTF-8 encoding.
+		 */
+		tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+		scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
+					TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+		scsi_name_len += 1 /* Include  NULL terminator */;
+		/*
+		 * The null-terminated, null-padded (see 4.4.2) SCSI
+		 * NAME STRING field contains a UTF-8 format string.
+		 * The number of bytes in the SCSI NAME STRING field
+		 * (i.e., the value in the DESIGNATOR LENGTH field)
+		 * shall be no larger than 256 and shall be a multiple
+		 * of four.
+		 */
+		if (padding)
+			scsi_name_len += padding;
+
+		buf[off-1] = scsi_name_len;
+		off += scsi_name_len;
+		/* Header size + Designation descriptor */
+		len += (scsi_name_len + 4);
+	}
+set_len:
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
+	return 0;
+}
+
+/* Extended INQUIRY Data VPD Page */
+static int
+target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+{
+	if (cmd->data_length < 60)
+		return 0;
+
+	buf[1] = 0x86;
+	buf[2] = 0x3c;
+	/* Set HEADSUP, ORDSUP, SIMPSUP */
+	buf[5] = 0x07;
+
+	/* If WriteCache emulation is enabled, set V_SUP */
+	if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+		buf[6] = 0x01;
+	return 0;
+}
+
+/* Block Limits VPD page */
+static int
+target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	int have_tp = 0;
+
+	/*
+	 * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+	 * emulate_tpu=1 or emulate_tpws=1 we will be expect a
+	 * different page length for Thin Provisioning.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		have_tp = 1;
+
+	if (cmd->data_length < (0x10 + 4)) {
+		printk(KERN_INFO "Received data_length: %u"
+			" too small for EVPD 0xb0\n",
+			cmd->data_length);
+		return -1;
+	}
+
+	if (have_tp && cmd->data_length < (0x3c + 4)) {
+		printk(KERN_INFO "Received data_length: %u"
+			" too small for TPE=1 EVPD 0xb0\n",
+			cmd->data_length);
+		have_tp = 0;
+	}
+
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[1] = 0xb0;
+	buf[3] = have_tp ? 0x3c : 0x10;
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
+	 */
+	put_unaligned_be16(1, &buf[6]);
+
+	/*
+	 * Set MAXIMUM TRANSFER LENGTH
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+
+	/*
+	 * Set OPTIMAL TRANSFER LENGTH
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+
+	/*
+	 * Exit now if we don't support TP or the initiator sent a too
+	 * short buffer.
+	 */
+	if (!have_tp || cmd->data_length < (0x3c + 4))
+		return 0;
+
+	/*
+	 * Set MAXIMUM UNMAP LBA COUNT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+
+	/*
+	 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+			   &buf[24]);
+
+	/*
+	 * Set OPTIMAL UNMAP GRANULARITY
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+
+	/*
+	 * UNMAP GRANULARITY ALIGNMENT
+	 */
+	put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+			   &buf[32]);
+	if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+		buf[32] |= 0x80; /* Set the UGAVALID bit */
+
+	return 0;
+}
+
+/* Thin Provisioning VPD */
+static int
+target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	/*
+	 * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+	 *
+	 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
+	 * zero, then the page length shall be set to 0004h.  If the DP bit
+	 * is set to one, then the page length shall be set to the value
+	 * defined in table 162.
+	 */
+	buf[0] = dev->transport->get_device_type(dev);
+	buf[1] = 0xb2;
+
+	/*
+	 * Set Hardcoded length mentioned above for DP=0
+	 */
+	put_unaligned_be16(0x0004, &buf[2]);
+
+	/*
+	 * The THRESHOLD EXPONENT field indicates the threshold set size in
+	 * LBAs as a power of 2 (i.e., the threshold set size is equal to
+	 * 2(threshold exponent)).
+	 *
+	 * Note that this is currently set to 0x00 as mkp says it will be
+	 * changing again.  We can enable this once it has settled in T10
+	 * and is actually used by Linux/SCSI ML code.
+	 */
+	buf[4] = 0x00;
+
+	/*
+	 * A TPU bit set to one indicates that the device server supports
+	 * the UNMAP command (see 5.25). A TPU bit set to zero indicates
+	 * that the device server does not support the UNMAP command.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+		buf[5] = 0x80;
+
+	/*
+	 * A TPWS bit set to one indicates that the device server supports
+	 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
+	 * A TPWS bit set to zero indicates that the device server does not
+	 * support the use of the WRITE SAME (16) command to unmap LBAs.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+		buf[5] |= 0x40;
+
+	return 0;
+}
+
+static int
+target_emulate_inquiry(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned char *cdb = cmd->t_task->t_task_cdb;
+
+	if (!(cdb[1] & 0x1))
+		return target_emulate_inquiry_std(cmd);
+
+	/*
+	 * Make sure we at least have 4 bytes of INQUIRY response
+	 * payload for 0x00 going back for EVPD=1.  Note that 0x80
+	 * and 0x83 will check for enough payload data length and
+	 * jump to set_len: label when there is not enough inquiry EVPD
+	 * payload length left for the next outgoing EVPD metadata
+	 */
+	if (cmd->data_length < 4) {
+		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+			" too small for EVPD=1\n", cmd->data_length);
+		return -1;
+	}
+	buf[0] = dev->transport->get_device_type(dev);
+
+	switch (cdb[2]) {
+	case 0x00:
+		return target_emulate_evpd_00(cmd, buf);
+	case 0x80:
+		return target_emulate_evpd_80(cmd, buf);
+	case 0x83:
+		return target_emulate_evpd_83(cmd, buf);
+	case 0x86:
+		return target_emulate_evpd_86(cmd, buf);
+	case 0xb0:
+		return target_emulate_evpd_b0(cmd, buf);
+	case 0xb2:
+		return target_emulate_evpd_b2(cmd, buf);
+	default:
+		printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+target_emulate_readcapacity(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	u32 blocks = dev->transport->get_blocks(dev);
+
+	buf[0] = (blocks >> 24) & 0xff;
+	buf[1] = (blocks >> 16) & 0xff;
+	buf[2] = (blocks >> 8) & 0xff;
+	buf[3] = blocks & 0xff;
+	buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+	buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+	buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+	buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+	/*
+	 * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
+	*/
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+
+	return 0;
+}
+
+static int
+target_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	unsigned long long blocks = dev->transport->get_blocks(dev);
+
+	buf[0] = (blocks >> 56) & 0xff;
+	buf[1] = (blocks >> 48) & 0xff;
+	buf[2] = (blocks >> 40) & 0xff;
+	buf[3] = (blocks >> 32) & 0xff;
+	buf[4] = (blocks >> 24) & 0xff;
+	buf[5] = (blocks >> 16) & 0xff;
+	buf[6] = (blocks >> 8) & 0xff;
+	buf[7] = blocks & 0xff;
+	buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
+	buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
+	buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
+	buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+	/*
+	 * Set Thin Provisioning Enable bit following sbc3r22 in section
+	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+	 */
+	if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+		buf[14] = 0x80;
+
+	return 0;
+}
+
+static int
+target_modesense_rwrecovery(unsigned char *p)
+{
+	p[0] = 0x01;
+	p[1] = 0x0a;
+
+	return 12;
+}
+
+static int
+target_modesense_control(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x0a;
+	p[1] = 0x0a;
+	p[2] = 2;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
+	 *
+	 * 00b: The logical unit shall clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when a com-
+	 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
+	 * status.
+	 *
+	 * 10b: The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall not establish a unit attention condition when
+	 * a command is completed with BUSY, TASK SET FULL, or RESERVATION
+	 * CONFLICT status.
+	 *
+	 * 11b a The logical unit shall not clear any unit attention condition
+	 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
+	 * status and shall establish a unit attention condition for the
+	 * initiator port associated with the I_T nexus on which the BUSY,
+	 * TASK SET FULL, or RESERVATION CONFLICT status is being returned.
+	 * Depending on the status, the additional sense code shall be set to
+	 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
+	 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
+	 * command, a unit attention condition shall be established only once
+	 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
+	 * to the number of commands completed with one of those status codes.
+	 */
+	p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
+	       (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+	/*
+	 * From spc4r17, section 7.4.6 Control mode Page
+	 *
+	 * Task Aborted Status (TAS) bit set to zero.
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+	p[8] = 0xff;
+	p[9] = 0xff;
+	p[11] = 30;
+
+	return 12;
+}
+
+static int
+target_modesense_caching(struct se_device *dev, unsigned char *p)
+{
+	p[0] = 0x08;
+	p[1] = 0x12;
+	if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+		p[2] = 0x04; /* Write Cache Enable */
+	p[12] = 0x20; /* Disabled Read Ahead */
+
+	return 20;
+}
+
+static void
+target_modesense_write_protect(unsigned char *buf, int type)
+{
+	/*
+	 * I believe that the WP bit (bit 7) in the mode header is the same for
+	 * all device types..
+	 */
+	switch (type) {
+	case TYPE_DISK:
+	case TYPE_TAPE:
+	default:
+		buf[0] |= 0x80; /* WP bit */
+		break;
+	}
+}
+
+static void
+target_modesense_dpofua(unsigned char *buf, int type)
+{
+	switch (type) {
+	case TYPE_DISK:
+		buf[0] |= 0x10; /* DPOFUA bit */
+		break;
+	default:
+		break;
+	}
+}
+
+static int
+target_emulate_modesense(struct se_cmd *cmd, int ten)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *rbuf = cmd->t_task->t_task_buf;
+	int type = dev->transport->get_device_type(dev);
+	int offset = (ten) ? 8 : 4;
+	int length = 0;
+	unsigned char buf[SE_MODE_PAGE_BUF];
+
+	memset(buf, 0, SE_MODE_PAGE_BUF);
+
+	switch (cdb[2] & 0x3f) {
+	case 0x01:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		break;
+	case 0x08:
+		length = target_modesense_caching(dev, &buf[offset]);
+		break;
+	case 0x0a:
+		length = target_modesense_control(dev, &buf[offset]);
+		break;
+	case 0x3f:
+		length = target_modesense_rwrecovery(&buf[offset]);
+		length += target_modesense_caching(dev, &buf[offset+length]);
+		length += target_modesense_control(dev, &buf[offset+length]);
+		break;
+	default:
+		printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+				cdb[2] & 0x3f);
+		return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+	}
+	offset += length;
+
+	if (ten) {
+		offset -= 2;
+		buf[0] = (offset >> 8) & 0xff;
+		buf[1] = offset & 0xff;
+
+		if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[3], type);
+
+		if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[3], type);
+
+		if ((offset + 2) > cmd->data_length)
+			offset = cmd->data_length;
+
+	} else {
+		offset -= 1;
+		buf[0] = offset & 0xff;
+
+		if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+		    (cmd->se_deve &&
+		    (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+			target_modesense_write_protect(&buf[2], type);
+
+		if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+			target_modesense_dpofua(&buf[2], type);
+
+		if ((offset + 1) > cmd->data_length)
+			offset = cmd->data_length;
+	}
+	memcpy(rbuf, buf, offset);
+
+	return 0;
+}
+
+static int
+target_emulate_request_sense(struct se_cmd *cmd)
+{
+	unsigned char *cdb = cmd->t_task->t_task_cdb;
+	unsigned char *buf = cmd->t_task->t_task_buf;
+	u8 ua_asc = 0, ua_ascq = 0;
+
+	if (cdb[1] & 0x01) {
+		printk(KERN_ERR "REQUEST_SENSE description emulation not"
+			" supported\n");
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+	if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+		/*
+		 * CURRENT ERROR, UNIT ATTENTION
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+		/*
+		 * Make sure request data length is enough for additional
+		 * sense data.
+		 */
+		if (cmd->data_length <= 18) {
+			buf[7] = 0x00;
+			return 0;
+		}
+		/*
+		 * The Additional Sense Code (ASC) from the UNIT ATTENTION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = ua_asc;
+		buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
+		buf[7] = 0x0A;
+	} else {
+		/*
+		 * CURRENT ERROR, NO SENSE
+		 */
+		buf[0] = 0x70;
+		buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
+		/*
+		 * Make sure request data length is enough for additional
+		 * sense data.
+		 */
+		if (cmd->data_length <= 18) {
+			buf[7] = 0x00;
+			return 0;
+		}
+		/*
+		 * NO ADDITIONAL SENSE INFORMATION
+		 */
+		buf[SPC_ASC_KEY_OFFSET] = 0x00;
+		buf[7] = 0x0A;
+	}
+
+	return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_unmap(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
+	unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+	sector_t lba;
+	unsigned int size = cmd->data_length, range;
+	int ret, offset;
+	unsigned short dl, bd_dl;
+
+	/* First UNMAP block descriptor starts at 8 byte offset */
+	offset = 8;
+	size -= 8;
+	dl = get_unaligned_be16(&cdb[0]);
+	bd_dl = get_unaligned_be16(&cdb[2]);
+	ptr = &buf[offset];
+	printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+	while (size) {
+		lba = get_unaligned_be64(&ptr[0]);
+		range = get_unaligned_be32(&ptr[8]);
+		printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+				 (unsigned long long)lba, range);
+
+		ret = dev->transport->do_discard(dev, lba, range);
+		if (ret < 0) {
+			printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+					ret);
+			return -1;
+		}
+
+		ptr += 16;
+		size -= 16;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+/*
+ * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
+ * Note this is not used for TCM/pSCSI passthrough
+ */
+static int
+target_emulate_write_same(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	sector_t lba = cmd->t_task->t_task_lba;
+	unsigned int range;
+	int ret;
+
+	range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+
+	printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
+			 (unsigned long long)lba, range);
+
+	ret = dev->transport->do_discard(dev, lba, range);
+	if (ret < 0) {
+		printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
+		return -1;
+	}
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+	return 0;
+}
+
+int
+transport_emulate_control_cdb(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned short service_action;
+	int ret = 0;
+
+	switch (cmd->t_task->t_task_cdb[0]) {
+	case INQUIRY:
+		ret = target_emulate_inquiry(cmd);
+		break;
+	case READ_CAPACITY:
+		ret = target_emulate_readcapacity(cmd);
+		break;
+	case MODE_SENSE:
+		ret = target_emulate_modesense(cmd, 0);
+		break;
+	case MODE_SENSE_10:
+		ret = target_emulate_modesense(cmd, 1);
+		break;
+	case SERVICE_ACTION_IN:
+		switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+		case SAI_READ_CAPACITY_16:
+			ret = target_emulate_readcapacity_16(cmd);
+			break;
+		default:
+			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+				cmd->t_task->t_task_cdb[1] & 0x1f);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		break;
+	case REQUEST_SENSE:
+		ret = target_emulate_request_sense(cmd);
+		break;
+	case UNMAP:
+		if (!dev->transport->do_discard) {
+			printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+					dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = target_emulate_unmap(task);
+		break;
+	case WRITE_SAME_16:
+		if (!dev->transport->do_discard) {
+			printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+					" for: %s\n", dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		ret = target_emulate_write_same(task);
+		break;
+	case VARIABLE_LENGTH_CMD:
+		service_action =
+			get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+		switch (service_action) {
+		case WRITE_SAME_32:
+			if (!dev->transport->do_discard) {
+				printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+					" supported for: %s\n",
+					dev->transport->name);
+				return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+			}
+			ret = target_emulate_write_same(task);
+			break;
+		default:
+			printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+					" 0x%02x\n", service_action);
+			break;
+		}
+		break;
+	case SYNCHRONIZE_CACHE:
+	case 0x91: /* SYNCHRONIZE_CACHE_16: */
+		if (!dev->transport->do_sync_cache) {
+			printk(KERN_ERR
+				"SYNCHRONIZE_CACHE emulation not supported"
+				" for: %s\n", dev->transport->name);
+			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+		dev->transport->do_sync_cache(task);
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case START_STOP:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+		break;
+	default:
+		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
+			cmd->t_task->t_task_cdb[0], dev->transport->name);
+		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+	}
+
+	if (ret < 0)
+		return ret;
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
new file mode 100644
index 0000000..2764510
--- /dev/null
+++ b/drivers/target/target_core_configfs.c
@@ -0,0 +1,3225 @@
+/*******************************************************************************
+ * Filename:  target_core_configfs.c
+ *
+ * This file contains ConfigFS logic for the Generic Target Engine project.
+ *
+ * Copyright (c) 2008-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+#include <linux/proc_fs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_rd.h"
+
+static struct list_head g_tf_list;
+static struct mutex g_tf_lock;
+
+struct target_core_configfs_attribute {
+	struct configfs_attribute attr;
+	ssize_t (*show)(void *, char *);
+	ssize_t (*store)(void *, const char *, size_t);
+};
+
+static inline struct se_hba *
+item_to_hba(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct se_hba, hba_group);
+}
+
+/*
+ * Attributes for /sys/kernel/config/target/
+ */
+static ssize_t target_core_attr_show(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      char *page)
+{
+	return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
+		" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+		utsname()->sysname, utsname()->machine);
+}
+
+static struct configfs_item_operations target_core_fabric_item_ops = {
+	.show_attribute = target_core_attr_show,
+};
+
+static struct configfs_attribute target_core_item_attr_version = {
+	.ca_owner	= THIS_MODULE,
+	.ca_name	= "version",
+	.ca_mode	= S_IRUGO,
+};
+
+static struct target_fabric_configfs *target_core_get_fabric(
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!(name))
+		return NULL;
+
+	mutex_lock(&g_tf_lock);
+	list_for_each_entry(tf, &g_tf_list, tf_list) {
+		if (!(strcmp(tf->tf_name, name))) {
+			atomic_inc(&tf->tf_access_cnt);
+			mutex_unlock(&g_tf_lock);
+			return tf;
+		}
+	}
+	mutex_unlock(&g_tf_lock);
+
+	return NULL;
+}
+
+/*
+ * Called from struct target_core_group_ops->make_group()
+ */
+static struct config_group *target_core_register_fabric(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+			" %s\n", group, name);
+	/*
+	 * Ensure that TCM subsystem plugins are loaded at this point for
+	 * using the RAMDISK_DR virtual LUN 0 and all other struct se_port
+	 * LUN symlinks.
+	 */
+	if (transport_subsystem_check_init() < 0)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Below are some hardcoded request_module() calls to automatically
+	 * local fabric modules when the following is called:
+	 *
+	 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
+	 *
+	 * Note that this does not limit which TCM fabric module can be
+	 * registered, but simply provids auto loading logic for modules with
+	 * mkdir(2) system calls with known TCM fabric modules.
+	 */
+	if (!(strncmp(name, "iscsi", 5))) {
+		/*
+		 * Automatically load the LIO Target fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/iscsi
+		 */
+		ret = request_module("iscsi_target_mod");
+		if (ret < 0) {
+			printk(KERN_ERR "request_module() failed for"
+				" iscsi_target_mod.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	} else if (!(strncmp(name, "loopback", 8))) {
+		/*
+		 * Automatically load the tcm_loop fabric module when the
+		 * following is called:
+		 *
+		 * mkdir -p $CONFIGFS/target/loopback
+		 */
+		ret = request_module("tcm_loop");
+		if (ret < 0) {
+			printk(KERN_ERR "request_module() failed for"
+				" tcm_loop.ko: %d\n", ret);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	tf = target_core_get_fabric(name);
+	if (!(tf)) {
+		printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+			name);
+		return ERR_PTR(-EINVAL);
+	}
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+			" %s\n", tf->tf_name);
+	/*
+	 * On a successful target_core_get_fabric() look, the returned
+	 * struct target_fabric_configfs *tf will contain a usage reference.
+	 */
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+
+	tf->tf_group.default_groups = tf->tf_default_groups;
+	tf->tf_group.default_groups[0] = &tf->tf_disc_group;
+	tf->tf_group.default_groups[1] = NULL;
+
+	config_group_init_type_name(&tf->tf_group, name,
+			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
+	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
+			&TF_CIT_TMPL(tf)->tfc_discovery_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+			" %s\n", tf->tf_group.cg_item.ci_name);
+	/*
+	 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
+	 */
+	tf->tf_ops.tf_subsys = tf->tf_subsys;
+	tf->tf_fabric = &tf->tf_group.cg_item;
+	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+			" for %s\n", name);
+
+	return &tf->tf_group;
+}
+
+/*
+ * Called from struct target_core_group_ops->drop_item()
+ */
+static void target_core_deregister_fabric(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct target_fabric_configfs *tf = container_of(
+		to_config_group(item), struct target_fabric_configfs, tf_group);
+	struct config_group *tf_group;
+	struct config_item *df_item;
+	int i;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+		" tf list\n", config_item_name(item));
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+			" %s\n", tf->tf_name);
+	atomic_dec(&tf->tf_access_cnt);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+			" tf->tf_fabric for %s\n", tf->tf_name);
+	tf->tf_fabric = NULL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+			" %s\n", config_item_name(item));
+
+	tf_group = &tf->tf_group;
+	for (i = 0; tf_group->default_groups[i]; i++) {
+		df_item = &tf_group->default_groups[i]->cg_item;
+		tf_group->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_core_fabric_group_ops = {
+	.make_group	= &target_core_register_fabric,
+	.drop_item	= &target_core_deregister_fabric,
+};
+
+/*
+ * All item attributes appearing in /sys/kernel/target/ appear here.
+ */
+static struct configfs_attribute *target_core_fabric_item_attrs[] = {
+	&target_core_item_attr_version,
+	NULL,
+};
+
+/*
+ * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
+ */
+static struct config_item_type target_core_fabrics_item = {
+	.ct_item_ops	= &target_core_fabric_item_ops,
+	.ct_group_ops	= &target_core_fabric_group_ops,
+	.ct_attrs	= target_core_fabric_item_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct configfs_subsystem target_core_fabrics = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "target",
+			.ci_type = &target_core_fabrics_item,
+		},
+	},
+};
+
+static struct configfs_subsystem *target_core_subsystem[] = {
+	&target_core_fabrics,
+	NULL,
+};
+
+/*##############################################################################
+// Start functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/*
+ * First function called by fabric modules to:
+ *
+ * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
+ * 2) Add struct target_fabric_configfs to g_tf_list
+ * 3) Return struct target_fabric_configfs to fabric module to be passed
+ *    into target_fabric_configfs_register().
+ */
+struct target_fabric_configfs *target_fabric_configfs_init(
+	struct module *fabric_mod,
+	const char *name)
+{
+	struct target_fabric_configfs *tf;
+
+	if (!(fabric_mod)) {
+		printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
+		return NULL;
+	}
+	if (!(name)) {
+		printk(KERN_ERR "Unable to locate passed fabric name\n");
+		return NULL;
+	}
+	if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
+		printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+			"_NAME_SIZE\n", name);
+		return NULL;
+	}
+
+	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+	if (!(tf))
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&tf->tf_list);
+	atomic_set(&tf->tf_access_cnt, 0);
+	/*
+	 * Setup the default generic struct config_item_type's (cits) in
+	 * struct target_fabric_configfs->tf_cit_tmpl
+	 */
+	tf->tf_module = fabric_mod;
+	target_fabric_setup_cits(tf);
+
+	tf->tf_subsys = target_core_subsystem[0];
+	snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
+
+	mutex_lock(&g_tf_lock);
+	list_add_tail(&tf->tf_list, &g_tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+			">>>>>>>>>>>>>>\n");
+	printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+			" %s\n", tf, tf->tf_name);
+	return tf;
+}
+EXPORT_SYMBOL(target_fabric_configfs_init);
+
+/*
+ * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
+ */
+void target_fabric_configfs_free(
+	struct target_fabric_configfs *tf)
+{
+	mutex_lock(&g_tf_lock);
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	kfree(tf);
+}
+EXPORT_SYMBOL(target_fabric_configfs_free);
+
+/*
+ * Perform a sanity check of the passed tf->tf_ops before completing
+ * TCM fabric module registration.
+ */
+static int target_fabric_tf_ops_check(
+	struct target_fabric_configfs *tf)
+{
+	struct target_core_fabric_ops *tfo = &tf->tf_ops;
+
+	if (!(tfo->get_fabric_name)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_fabric_proto_ident)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_wwn)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_tag)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_default_depth)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_pr_transport_id)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_pr_transport_id_len)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode_cache)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_demo_mode_write_protect)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_check_prod_mode_write_protect)) {
+		printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_alloc_fabric_acl)) {
+		printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_release_fabric_acl)) {
+		printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->tpg_get_inst_index)) {
+		printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->release_cmd_to_pool)) {
+		printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->release_cmd_direct)) {
+		printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->shutdown_session)) {
+		printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->close_session)) {
+		printk(KERN_ERR "Missing tfo->close_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->stop_session)) {
+		printk(KERN_ERR "Missing tfo->stop_session()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fall_back_to_erl0)) {
+		printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->sess_logged_in)) {
+		printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->sess_get_index)) {
+		printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->write_pending)) {
+		printk(KERN_ERR "Missing tfo->write_pending()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->write_pending_status)) {
+		printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->set_default_node_attributes)) {
+		printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_task_tag)) {
+		printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_cmd_state)) {
+		printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->new_cmd_failure)) {
+		printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_data_in)) {
+		printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_status)) {
+		printk(KERN_ERR "Missing tfo->queue_status()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->queue_tm_rsp)) {
+		printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->set_fabric_sense_len)) {
+		printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->get_fabric_sense_len)) {
+		printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->is_state_remove)) {
+		printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->pack_lun)) {
+		printk(KERN_ERR "Missing tfo->pack_lun()\n");
+		return -EINVAL;
+	}
+	/*
+	 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+	 * target_core_fabric_configfs.c WWN+TPG group context code.
+	 */
+	if (!(tfo->fabric_make_wwn)) {
+		printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_drop_wwn)) {
+		printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_make_tpg)) {
+		printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+		return -EINVAL;
+	}
+	if (!(tfo->fabric_drop_tpg)) {
+		printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Called 2nd from fabric module with returned parameter of
+ * struct target_fabric_configfs * from target_fabric_configfs_init().
+ *
+ * Upon a successful registration, the new fabric's struct config_item is
+ * return.  Also, a pointer to this struct is set in the passed
+ * struct target_fabric_configfs.
+ */
+int target_fabric_configfs_register(
+	struct target_fabric_configfs *tf)
+{
+	struct config_group *su_group;
+	int ret;
+
+	if (!(tf)) {
+		printk(KERN_ERR "Unable to locate target_fabric_configfs"
+			" pointer\n");
+		return -EINVAL;
+	}
+	if (!(tf->tf_subsys)) {
+		printk(KERN_ERR "Unable to target struct config_subsystem"
+			" pointer\n");
+		return -EINVAL;
+	}
+	su_group = &tf->tf_subsys->su_group;
+	if (!(su_group)) {
+		printk(KERN_ERR "Unable to locate target struct config_group"
+			" pointer\n");
+		return -EINVAL;
+	}
+	ret = target_fabric_tf_ops_check(tf);
+	if (ret < 0)
+		return ret;
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+		">>>>>>>>>>\n");
+	return 0;
+}
+EXPORT_SYMBOL(target_fabric_configfs_register);
+
+void target_fabric_configfs_deregister(
+	struct target_fabric_configfs *tf)
+{
+	struct config_group *su_group;
+	struct configfs_subsystem *su;
+
+	if (!(tf)) {
+		printk(KERN_ERR "Unable to locate passed target_fabric_"
+			"configfs\n");
+		return;
+	}
+	su = tf->tf_subsys;
+	if (!(su)) {
+		printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+			" pointer\n");
+		return;
+	}
+	su_group = &tf->tf_subsys->su_group;
+	if (!(su_group)) {
+		printk(KERN_ERR "Unable to locate target struct config_group"
+			" pointer\n");
+		return;
+	}
+
+	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+			">>>>>>>>>>>>\n");
+	mutex_lock(&g_tf_lock);
+	if (atomic_read(&tf->tf_access_cnt)) {
+		mutex_unlock(&g_tf_lock);
+		printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+			tf->tf_name);
+		BUG();
+	}
+	list_del(&tf->tf_list);
+	mutex_unlock(&g_tf_lock);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+			" %s\n", tf->tf_name);
+	tf->tf_module = NULL;
+	tf->tf_subsys = NULL;
+	kfree(tf);
+
+	printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+			">>>>>\n");
+	return;
+}
+EXPORT_SYMBOL(target_fabric_configfs_deregister);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+/* Start functions for struct config_item_type target_core_dev_attrib_cit */
+
+#define DEF_DEV_ATTRIB_SHOW(_name)					\
+static ssize_t target_core_dev_show_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	char *page)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	ssize_t rb;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev)) {							\
+		spin_unlock(&se_dev->se_dev_lock); 			\
+		return -ENODEV;						\
+	}								\
+	rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return rb;							\
+}
+
+#define DEF_DEV_ATTRIB_STORE(_name)					\
+static ssize_t target_core_dev_store_attr_##_name(			\
+	struct se_dev_attrib *da,					\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct se_device *dev;						\
+	struct se_subsystem_dev *se_dev = da->da_sub_dev;			\
+	unsigned long val;						\
+	int ret;							\
+									\
+	spin_lock(&se_dev->se_dev_lock);				\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev)) {							\
+		spin_unlock(&se_dev->se_dev_lock);			\
+		return -ENODEV;						\
+	}								\
+	ret = strict_strtoul(page, 0, &val);				\
+	if (ret < 0) {							\
+		spin_unlock(&se_dev->se_dev_lock);                      \
+		printk(KERN_ERR "strict_strtoul() failed with"		\
+			" ret: %d\n", ret);				\
+		return -EINVAL;						\
+	}								\
+	ret = se_dev_set_##_name(dev, (u32)val);			\
+	spin_unlock(&se_dev->se_dev_lock);				\
+									\
+	return (!ret) ? count : -EINVAL;				\
+}
+
+#define DEF_DEV_ATTRIB(_name)						\
+DEF_DEV_ATTRIB_SHOW(_name);						\
+DEF_DEV_ATTRIB_STORE(_name);
+
+#define DEF_DEV_ATTRIB_RO(_name)					\
+DEF_DEV_ATTRIB_SHOW(_name);
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
+#define SE_DEV_ATTR(_name, _mode)					\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_show_attr_##_name,			\
+		target_core_dev_store_attr_##_name);
+
+#define SE_DEV_ATTR_RO(_name);						\
+static struct target_core_dev_attrib_attribute				\
+			target_core_dev_attrib_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_show_attr_##_name);
+
+DEF_DEV_ATTRIB(emulate_dpo);
+SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_write);
+SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_fua_read);
+SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_write_cache);
+SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
+SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tas);
+SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpu);
+SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_tpws);
+SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(enforce_pr_isids);
+SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_block_size);
+SE_DEV_ATTR_RO(hw_block_size);
+
+DEF_DEV_ATTRIB(block_size);
+SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_max_sectors);
+SE_DEV_ATTR_RO(hw_max_sectors);
+
+DEF_DEV_ATTRIB(max_sectors);
+SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(optimal_sectors);
+SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB_RO(hw_queue_depth);
+SE_DEV_ATTR_RO(hw_queue_depth);
+
+DEF_DEV_ATTRIB(queue_depth);
+SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(task_timeout);
+SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_lba_count);
+SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(max_unmap_block_desc_count);
+SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity);
+SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(unmap_granularity_alignment);
+SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+
+static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
+	&target_core_dev_attrib_emulate_dpo.attr,
+	&target_core_dev_attrib_emulate_fua_write.attr,
+	&target_core_dev_attrib_emulate_fua_read.attr,
+	&target_core_dev_attrib_emulate_write_cache.attr,
+	&target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
+	&target_core_dev_attrib_emulate_tas.attr,
+	&target_core_dev_attrib_emulate_tpu.attr,
+	&target_core_dev_attrib_emulate_tpws.attr,
+	&target_core_dev_attrib_enforce_pr_isids.attr,
+	&target_core_dev_attrib_hw_block_size.attr,
+	&target_core_dev_attrib_block_size.attr,
+	&target_core_dev_attrib_hw_max_sectors.attr,
+	&target_core_dev_attrib_max_sectors.attr,
+	&target_core_dev_attrib_optimal_sectors.attr,
+	&target_core_dev_attrib_hw_queue_depth.attr,
+	&target_core_dev_attrib_queue_depth.attr,
+	&target_core_dev_attrib_task_timeout.attr,
+	&target_core_dev_attrib_max_unmap_lba_count.attr,
+	&target_core_dev_attrib_max_unmap_block_desc_count.attr,
+	&target_core_dev_attrib_unmap_granularity.attr,
+	&target_core_dev_attrib_unmap_granularity_alignment.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_attrib_ops = {
+	.show_attribute		= target_core_dev_attrib_attr_show,
+	.store_attribute	= target_core_dev_attrib_attr_store,
+};
+
+static struct config_item_type target_core_dev_attrib_cit = {
+	.ct_item_ops		= &target_core_dev_attrib_ops,
+	.ct_attrs		= target_core_dev_attrib_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_attrib_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_wwn_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
+#define SE_DEV_WWN_ATTR(_name, _mode)					\
+static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
+		__CONFIGFS_EATTR(_name, _mode,				\
+		target_core_dev_wwn_show_attr_##_name,			\
+		target_core_dev_wwn_store_attr_##_name);
+
+#define SE_DEV_WWN_ATTR_RO(_name);					\
+do {									\
+	static struct target_core_dev_wwn_attribute			\
+			target_core_dev_wwn_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,				\
+		target_core_dev_wwn_show_attr_##_name);			\
+} while (0);
+
+/*
+ * VPD page 0x80 Unit serial
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
+		&t10_wwn->unit_serial[0]);
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+
+	/*
+	 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
+	 * from the struct scsi_device level firmware, do not allow
+	 * VPD Unit Serial to be emulated.
+	 *
+	 * Note this struct scsi_device could also be emulating VPD
+	 * information from its drivers/scsi LLD.  But for now we assume
+	 * it is doing 'the right thing' wrt a world wide unique
+	 * VPD Unit Serial Number that OS dependent multipath can depend on.
+	 */
+	if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+		printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+			" Unit Serial, ignoring request\n");
+		return -EOPNOTSUPP;
+	}
+
+	if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
+		printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
+		return -EOVERFLOW;
+	}
+	/*
+	 * Check to see if any active $FABRIC_MOD exports exist.  If they
+	 * do exist, fail here as changing this information on the fly
+	 * (underneath the initiator side OS dependent multipath code)
+	 * could cause negative effects.
+	 */
+	dev = su_dev->se_dev_ptr;
+	if ((dev)) {
+		if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+			printk(KERN_ERR "Unable to set VPD Unit Serial while"
+				" active %d $FABRIC_MOD exports exist\n",
+				atomic_read(&dev->dev_export_obj.obj_access_count));
+			return -EINVAL;
+		}
+	}
+	/*
+	 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
+	 *
+	 * Also, strip any newline added from the userspace
+	 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
+	 */
+	memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
+	snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
+	snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+			"%s", strstrip(buf));
+	su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+			" %s\n", su_dev->t10_wwn.unit_serial);
+
+	return count;
+}
+
+SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Protocol Identifier
+ */
+static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	char *page)
+{
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
+	struct se_device *dev;
+	struct t10_vpd *vpd;
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	ssize_t len = 0;
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	spin_lock(&t10_wwn->t10_vpd_lock);
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
+		if (!(vpd->protocol_identifier_set))
+			continue;
+
+		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
+
+		if ((len + strlen(buf) > PAGE_SIZE))
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+	}
+	spin_unlock(&t10_wwn->t10_vpd_lock);
+
+	return len;
+}
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
+
+/*
+ * Generic wrapper for dumping VPD identifiers by association.
+ */
+#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)				\
+static ssize_t target_core_dev_wwn_show_attr_##_name(			\
+	struct t10_wwn *t10_wwn,					\
+	char *page)							\
+{									\
+	struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;		\
+	struct se_device *dev;						\
+	struct t10_vpd *vpd;							\
+	unsigned char buf[VPD_TMP_BUF_SIZE];				\
+	ssize_t len = 0;						\
+									\
+	dev = se_dev->se_dev_ptr;					\
+	if (!(dev))							\
+		return -ENODEV;						\
+									\
+	spin_lock(&t10_wwn->t10_vpd_lock);				\
+	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {	\
+		if (vpd->association != _assoc)				\
+			continue;					\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+									\
+		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
+		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
+		if ((len + strlen(buf) > PAGE_SIZE))			\
+			break;						\
+		len += sprintf(page+len, "%s", buf);			\
+	}								\
+	spin_unlock(&t10_wwn->t10_vpd_lock);				\
+									\
+	return len;							\
+}
+
+/*
+ * VPD page 0x83 Assoication: Logical Unit
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: Target Port
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
+
+/*
+ * VPD page 0x83 Association: SCSI Target Device
+ */
+DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
+
+static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
+	struct t10_wwn *t10_wwn,
+	const char *page,
+	size_t count)
+{
+	return -ENOSYS;
+}
+
+SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
+
+static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
+	&target_core_dev_wwn_vpd_unit_serial.attr,
+	&target_core_dev_wwn_vpd_protocol_identifier.attr,
+	&target_core_dev_wwn_vpd_assoc_logical_unit.attr,
+	&target_core_dev_wwn_vpd_assoc_target_port.attr,
+	&target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_wwn_ops = {
+	.show_attribute		= target_core_dev_wwn_attr_show,
+	.store_attribute	= target_core_dev_wwn_attr_store,
+};
+
+static struct config_item_type target_core_dev_wwn_cit = {
+	.ct_item_ops		= &target_core_dev_wwn_ops,
+	.ct_attrs		= target_core_dev_wwn_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_wwn_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_pr_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+#define SE_DEV_PR_ATTR(_name, _mode)					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_dev_pr_show_attr_##_name,				\
+	target_core_dev_pr_store_attr_##_name);
+
+#define SE_DEV_PR_ATTR_RO(_name);					\
+static struct target_core_dev_pr_attribute target_core_dev_pr_##_name =	\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_dev_pr_show_attr_##_name);
+
+/*
+ * res_holder
+ */
+static ssize_t target_core_dev_pr_show_spc3_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+	struct t10_pr_registration *pr_reg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+		TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+		se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_spc2_res(
+	struct se_device *dev,
+	char *page,
+	ssize_t *len)
+{
+	struct se_node_acl *se_nacl;
+
+	spin_lock(&dev->dev_reservation_lock);
+	se_nacl = dev->dev_reserved_node_acl;
+	if (!(se_nacl)) {
+		*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return *len;
+	}
+	*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
+		TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+		se_nacl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return *len;
+}
+
+static ssize_t target_core_dev_pr_show_attr_res_holder(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	switch (T10_RES(su_dev)->res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC2_RESERVATIONS:
+		target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
+				page, &len);
+		break;
+	case SPC_PASSTHROUGH:
+		len += sprintf(page+len, "Passthrough\n");
+		break;
+	default:
+		len += sprintf(page+len, "Unknown\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_holder);
+
+/*
+ * res_pr_all_tgt_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	/*
+	 * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
+	 * Basic PERSISTENT RESERVER OUT parameter list, page 290
+	 */
+	if (pr_reg->pr_reg_all_tg_pt)
+		len = sprintf(page, "SPC-3 Reservation: All Target"
+			" Ports registration\n");
+	else
+		len = sprintf(page, "SPC-3 Reservation: Single"
+			" Target Port registration\n");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
+
+/*
+ * res_pr_generation
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_generation);
+
+/*
+ * res_pr_holder_tg_port
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_node_acl *se_nacl;
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg;
+	struct target_core_fabric_ops *tfo;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	se_nacl = pr_reg->pr_reg_nacl;
+	se_tpg = se_nacl->se_tpg;
+	lun = pr_reg->pr_reg_tg_pt_lun;
+	tfo = TPG_TFO(se_tpg);
+
+	len += sprintf(page+len, "SPC-3 Reservation: %s"
+		" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+		tfo->tpg_get_wwn(se_tpg));
+	len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
+		" Identifer Tag: %hu %s Portal Group Tag: %hu"
+		" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
+		tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
+		tfo->get_fabric_name(), lun->unpacked_lun);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
+
+/*
+ * res_pr_registered_i_pts
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct target_core_fabric_ops *tfo;
+	struct t10_pr_registration *pr_reg;
+	unsigned char buf[384];
+	char i_buf[PR_REG_ISID_ID_LEN];
+	ssize_t len = 0;
+	int reg_count = 0, prf_isid;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	len += sprintf(page+len, "SPC-3 PR Registrations:\n");
+
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+
+		memset(buf, 0, 384);
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+		prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+					PR_REG_ISID_ID_LEN);
+		sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
+			tfo->get_fabric_name(),
+			pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", pr_reg->pr_res_key,
+			pr_reg->pr_res_generation);
+
+		if ((len + strlen(buf) > PAGE_SIZE))
+			break;
+
+		len += sprintf(page+len, "%s", buf);
+		reg_count++;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	if (!(reg_count))
+		len += sprintf(page+len, "None\n");
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
+
+/*
+ * res_pr_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_pr_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	struct se_device *dev;
+	struct t10_pr_registration *pr_reg;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return len;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_reg = dev->dev_pr_res_holder;
+	if (!(pr_reg)) {
+		len = sprintf(page, "No SPC-3 Reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		return len;
+	}
+	len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_pr_type);
+
+/*
+ * res_type
+ */
+static ssize_t target_core_dev_pr_show_attr_res_type(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	ssize_t len = 0;
+
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	switch (T10_RES(su_dev)->res_type) {
+	case SPC3_PERSISTENT_RESERVATIONS:
+		len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+		break;
+	case SPC2_RESERVATIONS:
+		len = sprintf(page, "SPC2_RESERVATIONS\n");
+		break;
+	case SPC_PASSTHROUGH:
+		len = sprintf(page, "SPC_PASSTHROUGH\n");
+		break;
+	default:
+		len = sprintf(page, "UNKNOWN\n");
+		break;
+	}
+
+	return len;
+}
+
+SE_DEV_PR_ATTR_RO(res_type);
+
+/*
+ * res_aptpl_active
+ */
+
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "APTPL Bit Status: %s\n",
+		(T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+}
+
+SE_DEV_PR_ATTR_RO(res_aptpl_active);
+
+/*
+ * res_aptpl_metadata
+ */
+static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	char *page)
+{
+	if (!(su_dev->se_dev_ptr))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return sprintf(page, "Ready to process PR APTPL metadata..\n");
+}
+
+enum {
+	Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
+	Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
+	Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
+	Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_initiator_fabric, "initiator_fabric=%s"},
+	{Opt_initiator_node, "initiator_node=%s"},
+	{Opt_initiator_sid, "initiator_sid=%s"},
+	{Opt_sa_res_key, "sa_res_key=%s"},
+	{Opt_res_holder, "res_holder=%d"},
+	{Opt_res_type, "res_type=%d"},
+	{Opt_res_scope, "res_scope=%d"},
+	{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
+	{Opt_mapped_lun, "mapped_lun=%d"},
+	{Opt_target_fabric, "target_fabric=%s"},
+	{Opt_target_node, "target_node=%s"},
+	{Opt_tpgt, "tpgt=%d"},
+	{Opt_port_rtpi, "port_rtpi=%d"},
+	{Opt_target_lun, "target_lun=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
+	struct se_subsystem_dev *su_dev,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
+	unsigned char *isid = NULL;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	unsigned long long tmp_ll;
+	u64 sa_res_key = 0;
+	u32 mapped_lun = 0, target_lun = 0;
+	int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
+	u16 port_rpti = 0, tpgt = 0;
+	u8 type = 0, scope;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_INFO "Unable to process APTPL metadata while"
+			" active fabric exports exist\n");
+		return -EINVAL;
+	}
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_initiator_fabric:
+			i_fabric = match_strdup(&args[0]);
+			break;
+		case Opt_initiator_node:
+			i_port = match_strdup(&args[0]);
+			if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
+				printk(KERN_ERR "APTPL metadata initiator_node="
+					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
+					PR_APTPL_MAX_IPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_initiator_sid:
+			isid = match_strdup(&args[0]);
+			if (strlen(isid) > PR_REG_ISID_LEN) {
+				printk(KERN_ERR "APTPL metadata initiator_isid"
+					"= exceeds PR_REG_ISID_LEN: %d\n",
+					PR_REG_ISID_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_sa_res_key:
+			arg_p = match_strdup(&args[0]);
+			ret = strict_strtoull(arg_p, 0, &tmp_ll);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoull() failed for"
+					" sa_res_key=\n");
+				goto out;
+			}
+			sa_res_key = (u64)tmp_ll;
+			break;
+		/*
+		 * PR APTPL Metadata for Reservation
+		 */
+		case Opt_res_holder:
+			match_int(args, &arg);
+			res_holder = arg;
+			break;
+		case Opt_res_type:
+			match_int(args, &arg);
+			type = (u8)arg;
+			break;
+		case Opt_res_scope:
+			match_int(args, &arg);
+			scope = (u8)arg;
+			break;
+		case Opt_res_all_tg_pt:
+			match_int(args, &arg);
+			all_tg_pt = (int)arg;
+			break;
+		case Opt_mapped_lun:
+			match_int(args, &arg);
+			mapped_lun = (u32)arg;
+			break;
+		/*
+		 * PR APTPL Metadata for Target Port
+		 */
+		case Opt_target_fabric:
+			t_fabric = match_strdup(&args[0]);
+			break;
+		case Opt_target_node:
+			t_port = match_strdup(&args[0]);
+			if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
+				printk(KERN_ERR "APTPL metadata target_node="
+					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
+					PR_APTPL_MAX_TPORT_LEN);
+				ret = -EINVAL;
+				break;
+			}
+			break;
+		case Opt_tpgt:
+			match_int(args, &arg);
+			tpgt = (u16)arg;
+			break;
+		case Opt_port_rtpi:
+			match_int(args, &arg);
+			port_rpti = (u16)arg;
+			break;
+		case Opt_target_lun:
+			match_int(args, &arg);
+			target_lun = (u32)arg;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!(i_port) || !(t_port) || !(sa_res_key)) {
+		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (res_holder && !(type)) {
+		printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+				" holder\n", type);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+			i_port, isid, mapped_lun, t_port, tpgt, target_lun,
+			res_holder, all_tg_pt, type);
+out:
+	kfree(orig);
+	return (ret == 0) ? count : ret;
+}
+
+SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+
+static struct configfs_attribute *target_core_dev_pr_attrs[] = {
+	&target_core_dev_pr_res_holder.attr,
+	&target_core_dev_pr_res_pr_all_tgt_pts.attr,
+	&target_core_dev_pr_res_pr_generation.attr,
+	&target_core_dev_pr_res_pr_holder_tg_port.attr,
+	&target_core_dev_pr_res_pr_registered_i_pts.attr,
+	&target_core_dev_pr_res_pr_type.attr,
+	&target_core_dev_pr_res_type.attr,
+	&target_core_dev_pr_res_aptpl_active.attr,
+	&target_core_dev_pr_res_aptpl_metadata.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_dev_pr_ops = {
+	.show_attribute		= target_core_dev_pr_attr_show,
+	.store_attribute	= target_core_dev_pr_attr_store,
+};
+
+static struct config_item_type target_core_dev_pr_cit = {
+	.ct_item_ops		= &target_core_dev_pr_ops,
+	.ct_attrs		= target_core_dev_pr_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/*  End functions for struct config_item_type target_core_dev_pr_cit */
+
+/*  Start functions for struct config_item_type target_core_dev_cit */
+
+static ssize_t target_core_show_dev_info(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	int bl = 0;
+	ssize_t read_bytes = 0;
+
+	if (!(se_dev->se_dev_ptr))
+		return -ENODEV;
+
+	transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+	read_bytes += bl;
+	read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_info = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "info",
+		    .ca_mode = S_IRUGO },
+	.show	= target_core_show_dev_info,
+	.store	= NULL,
+};
+
+static ssize_t target_core_store_dev_control(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+				"_dev_su_ptr\n");
+		return -EINVAL;
+	}
+
+	return t->set_configfs_dev_params(hba, se_dev, page, count);
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_control = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "control",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_control,
+};
+
+static ssize_t target_core_show_dev_alias(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+}
+
+static ssize_t target_core_store_dev_alias(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_DEV_ALIAS_LEN-1)) {
+		printk(KERN_ERR "alias count: %d exceeds"
+			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
+			SE_DEV_ALIAS_LEN-1);
+		return -EINVAL;
+	}
+
+	se_dev->su_dev_flags |= SDF_USING_ALIAS;
+	read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
+			"%s", page);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_alias);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alias = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alias",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_alias,
+	.store	= target_core_store_dev_alias,
+};
+
+static ssize_t target_core_show_dev_udev_path(void *p, char *page)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+
+	if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+		return 0;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+}
+
+static ssize_t target_core_store_dev_udev_path(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	ssize_t read_bytes;
+
+	if (count > (SE_UDEV_PATH_LEN-1)) {
+		printk(KERN_ERR "udev_path count: %d exceeds"
+			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
+			SE_UDEV_PATH_LEN-1);
+		return -EINVAL;
+	}
+
+	se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+	read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+			"%s", page);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&se_dev->se_dev_group.cg_item),
+		se_dev->se_dev_udev_path);
+
+	return read_bytes;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "udev_path",
+		    .ca_mode =  S_IRUGO | S_IWUSR },
+	.show	= target_core_show_dev_udev_path,
+	.store	= target_core_store_dev_udev_path,
+};
+
+static ssize_t target_core_store_dev_enable(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+	struct se_device *dev;
+	struct se_hba *hba = se_dev->se_dev_hba;
+	struct se_subsystem_api *t = hba->transport;
+	char *ptr;
+
+	ptr = strstr(page, "1");
+	if (!(ptr)) {
+		printk(KERN_ERR "For dev_enable ops, only valid value"
+				" is \"1\"\n");
+		return -EINVAL;
+	}
+	if ((se_dev->se_dev_ptr)) {
+		printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+				" object\n");
+		return -EEXIST;
+	}
+
+	if (t->check_configfs_dev_params(hba, se_dev) < 0)
+		return -EINVAL;
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (!(dev) || IS_ERR(dev))
+		return -EINVAL;
+
+	se_dev->se_dev_ptr = dev;
+	printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+		" %p\n", se_dev->se_dev_ptr);
+
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_enable = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "enable",
+		    .ca_mode = S_IWUSR },
+	.show	= NULL,
+	.store	= target_core_store_dev_enable,
+};
+
+static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct config_item *lu_ci;
+	struct t10_alua_lu_gp *lu_gp;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+		return len;
+
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem)) {
+		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		lu_ci = &lu_gp->lu_gp_group.cg_item;
+		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
+			config_item_name(lu_ci), lu_gp->lu_gp_id);
+	}
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	return len;
+}
+
+static ssize_t target_core_store_alua_lu_gp(
+	void *p,
+	const char *page,
+	size_t count)
+{
+	struct se_device *dev;
+	struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+	struct se_hba *hba = su_dev->se_dev_hba;
+	struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+	int move = 0;
+
+	dev = su_dev->se_dev_ptr;
+	if (!(dev))
+		return -ENODEV;
+
+	if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
+		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		return -EINVAL;
+	}
+	if (count > LU_GROUP_NAME_BUF) {
+		printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+		return -EINVAL;
+	}
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+	memcpy(buf, page, count);
+	/*
+	 * Any ALUA logical unit alias besides "NULL" means we will be
+	 * making a new group association.
+	 */
+	if (strcmp(strstrip(buf), "NULL")) {
+		/*
+		 * core_alua_get_lu_gp_by_name() will increment reference to
+		 * struct t10_alua_lu_gp.  This reference is released with
+		 * core_alua_get_lu_gp_by_name below().
+		 */
+		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
+		if (!(lu_gp_new))
+			return -ENODEV;
+	}
+	lu_gp_mem = dev->dev_alua_lu_gp_mem;
+	if (!(lu_gp_mem)) {
+		if (lu_gp_new)
+			core_alua_put_lu_gp_from_name(lu_gp_new);
+		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+				" pointer\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
+	lu_gp = lu_gp_mem->lu_gp;
+	if ((lu_gp)) {
+		/*
+		 * Clearing an existing lu_gp association, and replacing
+		 * with NULL
+		 */
+		if (!(lu_gp_new)) {
+			printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
+				" %hu\n",
+				config_item_name(&hba->hba_group.cg_item),
+				config_item_name(&su_dev->se_dev_group.cg_item),
+				config_item_name(&lu_gp->lu_gp_group.cg_item),
+				lu_gp->lu_gp_id);
+
+			__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+			return count;
+		}
+		/*
+		 * Removing existing association of lu_gp_mem with lu_gp
+		 */
+		__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
+		move = 1;
+	}
+	/*
+	 * Associate lu_gp_mem with lu_gp_new.
+	 */
+	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
+	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+		" core/alua/lu_gps/%s, ID: %hu\n",
+		(move) ? "Moving" : "Adding",
+		config_item_name(&hba->hba_group.cg_item),
+		config_item_name(&su_dev->se_dev_group.cg_item),
+		config_item_name(&lu_gp_new->lu_gp_group.cg_item),
+		lu_gp_new->lu_gp_id);
+
+	core_alua_put_lu_gp_from_name(lu_gp_new);
+	return count;
+}
+
+static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
+	.attr	= { .ca_owner = THIS_MODULE,
+		    .ca_name = "alua_lu_gp",
+		    .ca_mode = S_IRUGO | S_IWUSR },
+	.show	= target_core_show_alua_lu_gp,
+	.store	= target_core_store_alua_lu_gp,
+};
+
+static struct configfs_attribute *lio_core_dev_attrs[] = {
+	&target_core_attr_dev_info.attr,
+	&target_core_attr_dev_control.attr,
+	&target_core_attr_dev_alias.attr,
+	&target_core_attr_dev_udev_path.attr,
+	&target_core_attr_dev_enable.attr,
+	&target_core_attr_dev_alua_lu_gp.attr,
+	NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct config_group *dev_cg;
+
+	if (!(se_dev))
+		return;
+
+	dev_cg = &se_dev->se_dev_group;
+	kfree(dev_cg->default_groups);
+}
+
+static ssize_t target_core_dev_show(struct config_item *item,
+				     struct configfs_attribute *attr,
+				     char *page)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!(tc_attr->show))
+		return -EINVAL;
+
+	return tc_attr->show((void *)se_dev, page);
+}
+
+static ssize_t target_core_dev_store(struct config_item *item,
+				      struct configfs_attribute *attr,
+				      const char *page, size_t count)
+{
+	struct se_subsystem_dev *se_dev = container_of(
+			to_config_group(item), struct se_subsystem_dev,
+			se_dev_group);
+	struct target_core_configfs_attribute *tc_attr = container_of(
+			attr, struct target_core_configfs_attribute, attr);
+
+	if (!(tc_attr->store))
+		return -EINVAL;
+
+	return tc_attr->store((void *)se_dev, page, count);
+}
+
+static struct configfs_item_operations target_core_dev_item_ops = {
+	.release		= target_core_dev_release,
+	.show_attribute		= target_core_dev_show,
+	.store_attribute	= target_core_dev_store,
+};
+
+static struct config_item_type target_core_dev_cit = {
+	.ct_item_ops		= &target_core_dev_item_ops,
+	.ct_attrs		= lio_core_dev_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_dev_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
+#define SE_DEV_ALUA_LU_ATTR(_name, _mode)				\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_lu_gp_show_attr_##_name,			\
+	target_core_alua_lu_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_LU_ATTR_RO(_name)					\
+static struct target_core_alua_lu_gp_attribute				\
+			target_core_alua_lu_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_lu_gp_show_attr_##_name);
+
+/*
+ * lu_gp_id
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	if (!(lu_gp->lu_gp_valid_id))
+		return 0;
+
+	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
+}
+
+static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
+	struct t10_alua_lu_gp *lu_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	unsigned long lu_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &lu_gp_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+			" lu_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (lu_gp_id > 0x0000ffff) {
+		printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", lu_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s to ID: %hu\n",
+		config_item_name(&alua_lu_gp_cg->cg_item),
+		lu_gp->lu_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_lu_gp_show_attr_members(
+	struct t10_alua_lu_gp *lu_gp,
+	char *page)
+{
+	struct se_device *dev;
+	struct se_hba *hba;
+	struct se_subsystem_dev *su_dev;
+	struct t10_alua_lu_gp_member *lu_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[LU_GROUP_NAME_BUF];
+
+	memset(buf, 0, LU_GROUP_NAME_BUF);
+
+	spin_lock(&lu_gp->lu_gp_lock);
+	list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
+		dev = lu_gp_mem->lu_gp_mem_dev;
+		su_dev = dev->se_sub_dev;
+		hba = su_dev->se_dev_hba;
+
+		cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+			config_item_name(&hba->hba_group.cg_item),
+			config_item_name(&su_dev->se_dev_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&lu_gp->lu_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_LU_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
+
+static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
+	&target_core_alua_lu_gp_lu_gp_id.attr,
+	&target_core_alua_lu_gp_members.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+	.show_attribute		= target_core_alua_lu_gp_attr_show,
+	.store_attribute	= target_core_alua_lu_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_lu_gp_cit = {
+	.ct_item_ops		= &target_core_alua_lu_gp_ops,
+	.ct_attrs		= target_core_alua_lu_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+static struct config_group *target_core_alua_create_lu_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_lu_gp *lu_gp;
+	struct config_group *alua_lu_gp_cg = NULL;
+	struct config_item *alua_lu_gp_ci = NULL;
+
+	lu_gp = core_alua_allocate_lu_gp(name, 0);
+	if (IS_ERR(lu_gp))
+		return NULL;
+
+	alua_lu_gp_cg = &lu_gp->lu_gp_group;
+	alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_lu_gp_cg, name,
+			&target_core_alua_lu_gp_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s\n",
+		config_item_name(alua_lu_gp_ci));
+
+	return alua_lu_gp_cg;
+
+}
+
+static void target_core_alua_drop_lu_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+		" Group: core/alua/lu_gps/%s, ID: %hu\n",
+		config_item_name(item), lu_gp->lu_gp_id);
+
+	config_item_put(item);
+	core_alua_free_lu_gp(lu_gp);
+}
+
+static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
+	.make_group		= &target_core_alua_create_lu_gp,
+	.drop_item		= &target_core_alua_drop_lu_gp,
+};
+
+static struct config_item_type target_core_alua_lu_gps_cit = {
+	.ct_item_ops		= NULL,
+	.ct_group_ops		= &target_core_alua_lu_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
+#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name,			\
+	target_core_alua_tg_pt_gp_store_attr_##_name);
+
+#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name)				\
+static struct target_core_alua_tg_pt_gp_attribute			\
+			target_core_alua_tg_pt_gp_##_name =		\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_core_alua_tg_pt_gp_show_attr_##_name);
+
+/*
+ * alua_access_state
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n",
+		atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+	unsigned long tmp;
+	int new_state, ret;
+
+	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+		printk(KERN_ERR "Unable to do implict ALUA on non valid"
+			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk("Unable to extract new ALUA access state from"
+				" %s\n", page);
+		return -EINVAL;
+	}
+	new_state = (int)tmp;
+
+	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Unable to process implict configfs ALUA"
+			" transition while TPGS_IMPLICT_ALUA is diabled\n");
+		return -EINVAL;
+	}
+
+	ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+					NULL, NULL, new_state, 0);
+	return (!ret) ? count : -EINVAL;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_status
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%s\n",
+		core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int new_status, ret;
+
+	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
+		printk(KERN_ERR "Unable to do set ALUA access status on non"
+			" valid tg_pt_gp ID: %hu\n",
+			tg_pt_gp->tg_pt_gp_valid_id);
+		return -EINVAL;
+	}
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract new ALUA access status"
+				" from %s\n", page);
+		return -EINVAL;
+	}
+	new_status = (int)tmp;
+
+	if ((new_status != ALUA_STATUS_NONE) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
+	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+		printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+				new_status);
+		return -EINVAL;
+	}
+
+	tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_access_type
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_access_type(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_access_type(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_write_metadata
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	unsigned long tmp;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tmp);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+		return -EINVAL;
+	}
+
+	if ((tmp != 0) && (tmp != 1)) {
+		printk(KERN_ERR "Illegal value for alua_write_metadata:"
+			" %lu\n", tmp);
+		return -EINVAL;
+	}
+	tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
+
+
+
+/*
+ * nonop_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
+
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * trans_delay_msecs
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
+
+/*
+ * preferred
+ */
+
+static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	return core_alua_show_preferred_bit(tg_pt_gp, page);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	return core_alua_store_preferred_bit(tg_pt_gp, page, count);
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
+
+/*
+ * tg_pt_gp_id
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	if (!(tg_pt_gp->tg_pt_gp_valid_id))
+		return 0;
+
+	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
+}
+
+static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	const char *page,
+	size_t count)
+{
+	struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	unsigned long tg_pt_gp_id;
+	int ret;
+
+	ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+			" tg_pt_gp_id\n", ret);
+		return -EINVAL;
+	}
+	if (tg_pt_gp_id > 0x0000ffff) {
+		printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+			" 0x0000ffff\n", tg_pt_gp_id);
+		return -EINVAL;
+	}
+
+	ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
+	if (ret < 0)
+		return -EINVAL;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+		"core/alua/tg_pt_gps/%s to ID: %hu\n",
+		config_item_name(&alua_tg_pt_gp_cg->cg_item),
+		tg_pt_gp->tg_pt_gp_id);
+
+	return count;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
+
+/*
+ * members
+ */
+static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
+	struct t10_alua_tg_pt_gp *tg_pt_gp,
+	char *page)
+{
+	struct se_port *port;
+	struct se_portal_group *tpg;
+	struct se_lun *lun;
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+	ssize_t len = 0, cur_len;
+	unsigned char buf[TG_PT_GROUP_NAME_BUF];
+
+	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+
+	spin_lock(&tg_pt_gp->tg_pt_gp_lock);
+	list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
+			tg_pt_gp_mem_list) {
+		port = tg_pt_gp_mem->tg_pt;
+		tpg = port->sep_tpg;
+		lun = port->sep_lun;
+
+		cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
+			"/%s\n", TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			config_item_name(&lun->lun_group.cg_item));
+		cur_len++; /* Extra byte for NULL terminator */
+
+		if ((cur_len + len) > PAGE_SIZE) {
+			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+				"_members buffer\n");
+			break;
+		}
+		memcpy(page+len, buf, cur_len);
+		len += cur_len;
+	}
+	spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+
+	return len;
+}
+
+SE_DEV_ALUA_TG_PT_ATTR_RO(members);
+
+CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
+			tg_pt_gp_group);
+
+static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
+	&target_core_alua_tg_pt_gp_alua_access_state.attr,
+	&target_core_alua_tg_pt_gp_alua_access_status.attr,
+	&target_core_alua_tg_pt_gp_alua_access_type.attr,
+	&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
+	&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
+	&target_core_alua_tg_pt_gp_preferred.attr,
+	&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
+	&target_core_alua_tg_pt_gp_members.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+	.show_attribute		= target_core_alua_tg_pt_gp_attr_show,
+	.store_attribute	= target_core_alua_tg_pt_gp_attr_store,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+	.ct_item_ops		= &target_core_alua_tg_pt_gp_ops,
+	.ct_attrs		= target_core_alua_tg_pt_gp_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
+
+/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+static struct config_group *target_core_alua_create_tg_pt_gp(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua *alua = container_of(group, struct t10_alua,
+					alua_tg_pt_gps_group);
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
+	struct config_group *alua_tg_pt_gp_cg = NULL;
+	struct config_item *alua_tg_pt_gp_ci = NULL;
+
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+	if (!(tg_pt_gp))
+		return NULL;
+
+	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
+	alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
+
+	config_group_init_type_name(alua_tg_pt_gp_cg, name,
+			&target_core_alua_tg_pt_gp_cit);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s\n",
+		config_item_name(alua_tg_pt_gp_ci));
+
+	return alua_tg_pt_gp_cg;
+}
+
+static void target_core_alua_drop_tg_pt_gp(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
+		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
+
+	config_item_put(item);
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
+static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
+	.make_group		= &target_core_alua_create_tg_pt_gp,
+	.drop_item		= &target_core_alua_drop_tg_pt_gp,
+};
+
+static struct config_item_type target_core_alua_tg_pt_gps_cit = {
+	.ct_group_ops		= &target_core_alua_tg_pt_gps_group_ops,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+
+/* Start functions for struct config_item_type target_core_alua_cit */
+
+/*
+ * target_core_alua_cit is a ConfigFS group that lives under
+ * /sys/kernel/config/target/core/alua.  There are default groups
+ * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
+ * target_core_alua_cit in target_core_init_configfs() below.
+ */
+static struct config_item_type target_core_alua_cit = {
+	.ct_item_ops		= NULL,
+	.ct_attrs		= NULL,
+	.ct_owner		= THIS_MODULE,
+};
+
+/* End functions for struct config_item_type target_core_alua_cit */
+
+/* Start functions for struct config_item_type target_core_hba_cit */
+
+static struct config_group *target_core_make_subdev(
+	struct config_group *group,
+	const char *name)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_subsystem_dev *se_dev;
+	struct se_subsystem_api *t;
+	struct config_item *hba_ci = &group->cg_item;
+	struct se_hba *hba = item_to_hba(hba_ci);
+	struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
+
+	if (mutex_lock_interruptible(&hba->hba_access_mutex))
+		return NULL;
+
+	/*
+	 * Locate the struct se_subsystem_api from parent's struct se_hba.
+	 */
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!se_dev) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		goto unlock;
+	}
+	INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_reservation.registration_lock);
+	spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+
+	se_dev->se_dev_hba = hba;
+	dev_cg = &se_dev->se_dev_group;
+
+	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+			GFP_KERNEL);
+	if (!(dev_cg->default_groups))
+		goto out;
+	/*
+	 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
+	 * for ->allocate_virtdevice()
+	 *
+	 * se_dev->se_dev_ptr will be set after ->create_virtdev()
+	 * has been called successfully in the next level up in the
+	 * configfs tree for device object's struct config_group.
+	 */
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		goto out;
+	}
+	spin_lock(&se_global->g_device_lock);
+	list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
+	spin_unlock(&se_global->g_device_lock);
+
+	config_group_init_type_name(&se_dev->se_dev_group, name,
+			&target_core_dev_cit);
+	config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+			&target_core_dev_attrib_cit);
+	config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+			&target_core_dev_pr_cit);
+	config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+			&target_core_dev_wwn_cit);
+	config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+			"alua", &target_core_alua_tg_pt_gps_cit);
+	dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
+	dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
+	dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
+	dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
+	dev_cg->default_groups[4] = NULL;
+	/*
+	 * Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
+	 */
+	tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+	if (!(tg_pt_gp))
+		goto out;
+
+	tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+	tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(tg_pt_gp_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+				"default_groups\n");
+		goto out;
+	}
+
+	config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
+			"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
+	tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
+	tg_pt_gp_cg->default_groups[1] = NULL;
+	T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+		" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+
+	mutex_unlock(&hba->hba_access_mutex);
+	return &se_dev->se_dev_group;
+out:
+	if (T10_ALUA(se_dev)->default_tg_pt_gp) {
+		core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+		T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+	}
+	if (tg_pt_gp_cg)
+		kfree(tg_pt_gp_cg->default_groups);
+	if (dev_cg)
+		kfree(dev_cg->default_groups);
+	if (se_dev->se_dev_su_ptr)
+		t->free_device(se_dev->se_dev_su_ptr);
+	kfree(se_dev);
+unlock:
+	mutex_unlock(&hba->hba_access_mutex);
+	return NULL;
+}
+
+static void target_core_drop_subdev(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
+				struct se_subsystem_dev, se_dev_group);
+	struct se_hba *hba;
+	struct se_subsystem_api *t;
+	struct config_item *df_item;
+	struct config_group *dev_cg, *tg_pt_gp_cg;
+	int i, ret;
+
+	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+
+	if (mutex_lock_interruptible(&hba->hba_access_mutex))
+		goto out;
+
+	t = hba->transport;
+
+	spin_lock(&se_global->g_device_lock);
+	list_del(&se_dev->g_se_dev_list);
+	spin_unlock(&se_global->g_device_lock);
+
+	tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+	for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
+		df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
+		tg_pt_gp_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+	kfree(tg_pt_gp_cg->default_groups);
+	core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+	T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+
+	dev_cg = &se_dev->se_dev_group;
+	for (i = 0; dev_cg->default_groups[i]; i++) {
+		df_item = &dev_cg->default_groups[i]->cg_item;
+		dev_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+	/*
+	 * This pointer will set when the storage is enabled with:
+	 * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 */
+	if (se_dev->se_dev_ptr) {
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+			"virtual_device() for se_dev_ptr: %p\n",
+				se_dev->se_dev_ptr);
+
+		ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
+		if (ret < 0)
+			goto hba_out;
+	} else {
+		/*
+		 * Release struct se_subsystem_dev->se_dev_su_ptr..
+		 */
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+			"device() for se_dev_su_ptr: %p\n",
+			se_dev->se_dev_su_ptr);
+
+		t->free_device(se_dev->se_dev_su_ptr);
+	}
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+		"_dev_t: %p\n", se_dev);
+
+hba_out:
+	mutex_unlock(&hba->hba_access_mutex);
+out:
+	kfree(se_dev);
+}
+
+static struct configfs_group_operations target_core_hba_group_ops = {
+	.make_group		= target_core_make_subdev,
+	.drop_item		= target_core_drop_subdev,
+};
+
+CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
+#define SE_HBA_ATTR(_name, _mode)				\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR(_name, _mode,			\
+		target_core_hba_show_attr_##_name,		\
+		target_core_hba_store_attr_##_name);
+
+#define SE_HBA_ATTR_RO(_name)					\
+static struct target_core_hba_attribute				\
+		target_core_hba_##_name =			\
+		__CONFIGFS_EATTR_RO(_name,			\
+		target_core_hba_show_attr_##_name);
+
+static ssize_t target_core_hba_show_attr_hba_info(
+	struct se_hba *hba,
+	char *page)
+{
+	return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
+			hba->hba_id, hba->transport->name,
+			TARGET_CORE_CONFIGFS_VERSION);
+}
+
+SE_HBA_ATTR_RO(hba_info);
+
+static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
+				char *page)
+{
+	int hba_mode = 0;
+
+	if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
+		hba_mode = 1;
+
+	return sprintf(page, "%d\n", hba_mode);
+}
+
+static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
+				const char *page, size_t count)
+{
+	struct se_subsystem_api *transport = hba->transport;
+	unsigned long mode_flag;
+	int ret;
+
+	if (transport->pmode_enable_hba == NULL)
+		return -EINVAL;
+
+	ret = strict_strtoul(page, 0, &mode_flag);
+	if (ret < 0) {
+		printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+		return -EINVAL;
+	}
+
+	spin_lock(&hba->device_lock);
+	if (!(list_empty(&hba->hba_dev_list))) {
+		printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+		spin_unlock(&hba->device_lock);
+		return -EINVAL;
+	}
+	spin_unlock(&hba->device_lock);
+
+	ret = transport->pmode_enable_hba(hba, mode_flag);
+	if (ret < 0)
+		return -EINVAL;
+	if (ret > 0)
+		hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+	else if (ret == 0)
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+
+	return count;
+}
+
+SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+
+static struct configfs_attribute *target_core_hba_attrs[] = {
+	&target_core_hba_hba_info.attr,
+	&target_core_hba_hba_mode.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_core_hba_item_ops = {
+	.show_attribute		= target_core_hba_attr_show,
+	.store_attribute	= target_core_hba_attr_store,
+};
+
+static struct config_item_type target_core_hba_cit = {
+	.ct_item_ops		= &target_core_hba_item_ops,
+	.ct_group_ops		= &target_core_hba_group_ops,
+	.ct_attrs		= target_core_hba_attrs,
+	.ct_owner		= THIS_MODULE,
+};
+
+static struct config_group *target_core_call_addhbatotarget(
+	struct config_group *group,
+	const char *name)
+{
+	char *se_plugin_str, *str, *str2;
+	struct se_hba *hba;
+	char buf[TARGET_CORE_NAME_MAX_LEN];
+	unsigned long plugin_dep_id = 0;
+	int ret;
+
+	memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
+	if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
+		printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
+			TARGET_CORE_NAME_MAX_LEN);
+		return ERR_PTR(-ENAMETOOLONG);
+	}
+	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
+
+	str = strstr(buf, "_");
+	if (!(str)) {
+		printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+		return ERR_PTR(-EINVAL);
+	}
+	se_plugin_str = buf;
+	/*
+	 * Special case for subsystem plugins that have "_" in their names.
+	 * Namely rd_direct and rd_mcp..
+	 */
+	str2 = strstr(str+1, "_");
+	if ((str2)) {
+		*str2 = '\0'; /* Terminate for *se_plugin_str */
+		str2++; /* Skip to start of plugin dependent ID */
+		str = str2;
+	} else {
+		*str = '\0'; /* Terminate for *se_plugin_str */
+		str++; /* Skip to start of plugin dependent ID */
+	}
+
+	ret = strict_strtoul(str, 0, &plugin_dep_id);
+	if (ret < 0) {
+		printk(KERN_ERR "strict_strtoul() returned %d for"
+				" plugin_dep_id\n", ret);
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * Load up TCM subsystem plugins if they have not already been loaded.
+	 */
+	if (transport_subsystem_check_init() < 0)
+		return ERR_PTR(-EINVAL);
+
+	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
+	if (IS_ERR(hba))
+		return ERR_CAST(hba);
+
+	config_group_init_type_name(&hba->hba_group, name,
+			&target_core_hba_cit);
+
+	return &hba->hba_group;
+}
+
+static void target_core_call_delhbafromtarget(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_hba *hba = item_to_hba(item);
+
+	config_item_put(item);
+	core_delete_hba(hba);
+}
+
+static struct configfs_group_operations target_core_group_ops = {
+	.make_group	= target_core_call_addhbatotarget,
+	.drop_item	= target_core_call_delhbafromtarget,
+};
+
+static struct config_item_type target_core_cit = {
+	.ct_item_ops	= NULL,
+	.ct_group_ops	= &target_core_group_ops,
+	.ct_attrs	= NULL,
+	.ct_owner	= THIS_MODULE,
+};
+
+/* Stop functions for struct config_item_type target_core_hba_cit */
+
+static int target_core_init_configfs(void)
+{
+	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
+	struct config_group *lu_gp_cg = NULL;
+	struct configfs_subsystem *subsys;
+	struct proc_dir_entry *scsi_target_proc = NULL;
+	struct t10_alua_lu_gp *lu_gp;
+	int ret;
+
+	printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
+		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
+
+	subsys = target_core_subsystem[0];
+	config_group_init(&subsys->su_group);
+	mutex_init(&subsys->su_mutex);
+
+	INIT_LIST_HEAD(&g_tf_list);
+	mutex_init(&g_tf_lock);
+	init_scsi_index_table();
+	ret = init_se_global();
+	if (ret < 0)
+		return -1;
+	/*
+	 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
+	 * and ALUA Logical Unit Group and Target Port Group infrastructure.
+	 */
+	target_cg = &subsys->su_group;
+	target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(target_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&se_global->target_core_hbagroup,
+			"core", &target_core_cit);
+	target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+	target_cg->default_groups[1] = NULL;
+	/*
+	 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
+	 */
+	hba_cg = &se_global->target_core_hbagroup;
+	hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL);
+	if (!(hba_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+		goto out_global;
+	}
+	config_group_init_type_name(&se_global->alua_group,
+			"alua", &target_core_alua_cit);
+	hba_cg->default_groups[0] = &se_global->alua_group;
+	hba_cg->default_groups[1] = NULL;
+	/*
+	 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
+	 * groups under /sys/kernel/config/target/core/alua/
+	 */
+	alua_cg = &se_global->alua_group;
+	alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!(alua_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&se_global->alua_lu_gps_group,
+			"lu_gps", &target_core_alua_lu_gps_cit);
+	alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+	alua_cg->default_groups[1] = NULL;
+	/*
+	 * Add core/alua/lu_gps/default_lu_gp
+	 */
+	lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+	if (IS_ERR(lu_gp))
+		goto out_global;
+
+	lu_gp_cg = &se_global->alua_lu_gps_group;
+	lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+			GFP_KERNEL);
+	if (!(lu_gp_cg->default_groups)) {
+		printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+		goto out_global;
+	}
+
+	config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
+				&target_core_alua_lu_gp_cit);
+	lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
+	lu_gp_cg->default_groups[1] = NULL;
+	se_global->default_lu_gp = lu_gp;
+	/*
+	 * Register the target_core_mod subsystem with configfs.
+	 */
+	ret = configfs_register_subsystem(subsys);
+	if (ret < 0) {
+		printk(KERN_ERR "Error %d while registering subsystem %s\n",
+			ret, subsys->su_group.cg_item.ci_namebuf);
+		goto out_global;
+	}
+	printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+		" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
+	/*
+	 * Register built-in RAMDISK subsystem logic for virtual LUN 0
+	 */
+	ret = rd_module_init();
+	if (ret < 0)
+		goto out;
+
+	if (core_dev_setup_virtual_lun0() < 0)
+		goto out;
+
+	scsi_target_proc = proc_mkdir("scsi_target", 0);
+	if (!(scsi_target_proc)) {
+		printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
+		goto out;
+	}
+	ret = init_scsi_target_mib();
+	if (ret < 0)
+		goto out;
+
+	return 0;
+
+out:
+	configfs_unregister_subsystem(subsys);
+	if (scsi_target_proc)
+		remove_proc_entry("scsi_target", 0);
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+out_global:
+	if (se_global->default_lu_gp) {
+		core_alua_free_lu_gp(se_global->default_lu_gp);
+		se_global->default_lu_gp = NULL;
+	}
+	if (lu_gp_cg)
+		kfree(lu_gp_cg->default_groups);
+	if (alua_cg)
+		kfree(alua_cg->default_groups);
+	if (hba_cg)
+		kfree(hba_cg->default_groups);
+	kfree(target_cg->default_groups);
+	release_se_global();
+	return -1;
+}
+
+static void target_core_exit_configfs(void)
+{
+	struct configfs_subsystem *subsys;
+	struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
+	struct config_item *item;
+	int i;
+
+	se_global->in_shutdown = 1;
+	subsys = target_core_subsystem[0];
+
+	lu_gp_cg = &se_global->alua_lu_gps_group;
+	for (i = 0; lu_gp_cg->default_groups[i]; i++) {
+		item = &lu_gp_cg->default_groups[i]->cg_item;
+		lu_gp_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(lu_gp_cg->default_groups);
+	core_alua_free_lu_gp(se_global->default_lu_gp);
+	se_global->default_lu_gp = NULL;
+
+	alua_cg = &se_global->alua_group;
+	for (i = 0; alua_cg->default_groups[i]; i++) {
+		item = &alua_cg->default_groups[i]->cg_item;
+		alua_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(alua_cg->default_groups);
+
+	hba_cg = &se_global->target_core_hbagroup;
+	for (i = 0; hba_cg->default_groups[i]; i++) {
+		item = &hba_cg->default_groups[i]->cg_item;
+		hba_cg->default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(hba_cg->default_groups);
+
+	for (i = 0; subsys->su_group.default_groups[i]; i++) {
+		item = &subsys->su_group.default_groups[i]->cg_item;
+		subsys->su_group.default_groups[i] = NULL;
+		config_item_put(item);
+	}
+	kfree(subsys->su_group.default_groups);
+
+	configfs_unregister_subsystem(subsys);
+	printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+			" Infrastructure\n");
+
+	remove_scsi_target_mib();
+	remove_proc_entry("scsi_target", 0);
+	core_dev_release_virtual_lun0();
+	rd_module_exit();
+	release_se_global();
+
+	return;
+}
+
+MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(target_core_init_configfs);
+module_exit(target_core_exit_configfs);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
new file mode 100644
index 0000000..317ce58
--- /dev/null
+++ b/drivers/target/target_core_device.c
@@ -0,0 +1,1694 @@
+/*******************************************************************************
+ * Filename:  target_core_device.c (based on iscsi_target_device.c)
+ *
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+static void se_dev_start(struct se_device *dev);
+static void se_dev_stop(struct se_device *dev);
+
+int transport_get_lun_for_cmd(
+	struct se_cmd *se_cmd,
+	unsigned char *cdb,
+	u32 unpacked_lun)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	unsigned long flags;
+	int read_only = 0;
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	deve = se_cmd->se_deve =
+			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		if (se_cmd) {
+			deve->total_cmds++;
+			deve->total_bytes += se_cmd->data_length;
+
+			if (se_cmd->data_direction == DMA_TO_DEVICE) {
+				if (deve->lun_flags &
+						TRANSPORT_LUNFLAGS_READ_ONLY) {
+					read_only = 1;
+					goto out;
+				}
+				deve->write_bytes += se_cmd->data_length;
+			} else if (se_cmd->data_direction ==
+				   DMA_FROM_DEVICE) {
+				deve->read_bytes += se_cmd->data_length;
+			}
+		}
+		deve->deve_cmds++;
+
+		se_lun = se_cmd->se_lun = deve->se_lun;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+	}
+out:
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	if (!se_lun) {
+		if (read_only) {
+			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08x\n",
+				CMD_TFO(se_cmd)->get_fabric_name(),
+				unpacked_lun);
+			return -1;
+		} else {
+			/*
+			 * Use the se_portal_group->tpg_virt_lun0 to allow for
+			 * REPORT_LUNS, et al to be returned when no active
+			 * MappedLUN=0 exists for this Initiator Port.
+			 */
+			if (unpacked_lun != 0) {
+				se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+					" Access for 0x%08x\n",
+					CMD_TFO(se_cmd)->get_fabric_name(),
+					unpacked_lun);
+				return -1;
+			}
+			/*
+			 * Force WRITE PROTECT for virtual LUN 0
+			 */
+			if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+			    (se_cmd->data_direction != DMA_NONE)) {
+				se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+				se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				return -1;
+			}
+#if 0
+			printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
+				CMD_TFO(se_cmd)->get_fabric_name());
+#endif
+			se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+			se_cmd->orig_fe_lun = 0;
+			se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+			se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+		}
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+
+	{
+	struct se_device *dev = se_lun->lun_se_dev;
+	spin_lock(&dev->stats_lock);
+	dev->num_cmds++;
+	if (se_cmd->data_direction == DMA_TO_DEVICE)
+		dev->write_bytes += se_cmd->data_length;
+	else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+		dev->read_bytes += se_cmd->data_length;
+	spin_unlock(&dev->stats_lock);
+	}
+
+	/*
+	 * Add the iscsi_cmd_t to the struct se_lun's cmd list.  This list is used
+	 * for tracking state of struct se_cmds during LUN shutdown events.
+	 */
+	spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
+	list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
+	atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
+#if 0
+	printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
+		CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
+#endif
+	spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_cmd);
+
+int transport_get_lun_for_tmr(
+	struct se_cmd *se_cmd,
+	u32 unpacked_lun)
+{
+	struct se_device *dev = NULL;
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun = NULL;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	deve = se_cmd->se_deve =
+			&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+		se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
+		dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+		se_cmd->pr_res_key = deve->pr_res_key;
+		se_cmd->orig_fe_lun = unpacked_lun;
+		se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+/*		se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+	}
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	if (!se_lun) {
+		printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+			" Access for 0x%08x\n",
+			CMD_TFO(se_cmd)->get_fabric_name(),
+			unpacked_lun);
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+	/*
+	 * Determine if the struct se_lun is online.
+	 */
+/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
+	if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
+		se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		return -1;
+	}
+
+	spin_lock(&dev->se_tmr_lock);
+	list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
+	spin_unlock(&dev->se_tmr_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_get_lun_for_tmr);
+
+/*
+ * This function is called from core_scsi3_emulate_pro_register_and_move()
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * when a matching rtpi is found.
+ */
+struct se_dev_entry *core_get_se_deve_from_rtpi(
+	struct se_node_acl *nacl,
+	u16 rtpi)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_port *port;
+	struct se_portal_group *tpg = nacl->se_tpg;
+	u32 i;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		lun = deve->se_lun;
+		if (!(lun)) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		port = lun->lun_sep;
+		if (!(port)) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		if (port->sep_rtpi != rtpi)
+			continue;
+
+		atomic_inc(&deve->pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		return deve;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	return NULL;
+}
+
+int core_free_device_list_for_node(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	u32 i;
+
+	if (!nacl->device_list)
+		return 0;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+		lun = deve->se_lun;
+
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+		spin_lock_irq(&nacl->device_list_lock);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	kfree(nacl->device_list);
+	nacl->device_list = NULL;
+
+	return 0;
+}
+
+void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
+{
+	struct se_dev_entry *deve;
+
+	spin_lock_irq(&se_nacl->device_list_lock);
+	deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
+	deve->deve_cmds--;
+	spin_unlock_irq(&se_nacl->device_list_lock);
+
+	return;
+}
+
+void core_update_device_list_access(
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[mapped_lun];
+	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+	} else {
+		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	return;
+}
+
+/*      core_update_device_list_for_node():
+ *
+ *
+ */
+int core_update_device_list_for_node(
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl,
+	u32 mapped_lun,
+	u32 lun_access,
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg,
+	int enable)
+{
+	struct se_port *port = lun->lun_sep;
+	struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
+	int trans = 0;
+	/*
+	 * If the MappedLUN entry is being disabled, the entry in
+	 * port->sep_alua_list must be removed now before clearing the
+	 * struct se_dev_entry pointers below as logic in
+	 * core_alua_do_transition_tg_pt() depends on these being present.
+	 */
+	if (!(enable)) {
+		/*
+		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
+		 * that have not been explictly concerted to MappedLUNs ->
+		 * struct se_lun_acl.
+		 */
+		if (!(deve->se_lun_acl))
+			return 0;
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_del(&deve->alua_port_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+	}
+
+	spin_lock_irq(&nacl->device_list_lock);
+	if (enable) {
+		/*
+		 * Check if the call is handling demo mode -> explict LUN ACL
+		 * transition.  This transition must be for the same struct se_lun
+		 * + mapped_lun that was setup in demo mode..
+		 */
+		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+			if (deve->se_lun_acl != NULL) {
+				printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+					" already set for demo mode -> explict"
+					" LUN ACL transition\n");
+				return -1;
+			}
+			if (deve->se_lun != lun) {
+				printk(KERN_ERR "struct se_dev_entry->se_lun does"
+					" match passed struct se_lun for demo mode"
+					" -> explict LUN ACL transition\n");
+				return -1;
+			}
+			deve->se_lun_acl = lun_acl;
+			trans = 1;
+		} else {
+			deve->se_lun = lun;
+			deve->se_lun_acl = lun_acl;
+			deve->mapped_lun = mapped_lun;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+		}
+
+		if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+			deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+		}
+
+		if (trans) {
+			spin_unlock_irq(&nacl->device_list_lock);
+			return 0;
+		}
+		deve->creation_time = get_jiffies_64();
+		deve->attach_count++;
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		return 0;
+	}
+	/*
+	 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
+	 * PR operation to complete.
+	 */
+	spin_unlock_irq(&nacl->device_list_lock);
+	while (atomic_read(&deve->pr_ref_count) != 0)
+		cpu_relax();
+	spin_lock_irq(&nacl->device_list_lock);
+	/*
+	 * Disable struct se_dev_entry LUN ACL mapping
+	 */
+	core_scsi3_ua_release_all(deve);
+	deve->se_lun = NULL;
+	deve->se_lun_acl = NULL;
+	deve->lun_flags = 0;
+	deve->creation_time = 0;
+	deve->attach_count--;
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
+	return 0;
+}
+
+/*      core_clear_lun_from_tpg():
+ *
+ *
+ */
+void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
+{
+	struct se_node_acl *nacl;
+	struct se_dev_entry *deve;
+	u32 i;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
+		spin_unlock_bh(&tpg->acl_node_lock);
+
+		spin_lock_irq(&nacl->device_list_lock);
+		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+			deve = &nacl->device_list[i];
+			if (lun != deve->se_lun)
+				continue;
+			spin_unlock_irq(&nacl->device_list_lock);
+
+			core_update_device_list_for_node(lun, NULL,
+				deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
+				nacl, tpg, 0);
+
+			spin_lock_irq(&nacl->device_list_lock);
+		}
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		spin_lock_bh(&tpg->acl_node_lock);
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return;
+}
+
+static struct se_port *core_alloc_port(struct se_device *dev)
+{
+	struct se_port *port, *port_tmp;
+
+	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
+	if (!(port)) {
+		printk(KERN_ERR "Unable to allocate struct se_port\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(&port->sep_alua_list);
+	INIT_LIST_HEAD(&port->sep_list);
+	atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+	spin_lock_init(&port->sep_alua_lock);
+	mutex_init(&port->sep_tg_pt_md_mutex);
+
+	spin_lock(&dev->se_port_lock);
+	if (dev->dev_port_count == 0x0000ffff) {
+		printk(KERN_WARNING "Reached dev->dev_port_count =="
+				" 0x0000ffff\n");
+		spin_unlock(&dev->se_port_lock);
+		return NULL;
+	}
+again:
+	/*
+	 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
+	 * Here is the table from spc4r17 section 7.7.3.8.
+	 *
+	 *    Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
+	 *
+	 * Code      Description
+	 * 0h        Reserved
+	 * 1h        Relative port 1, historically known as port A
+	 * 2h        Relative port 2, historically known as port B
+	 * 3h to FFFFh    Relative port 3 through 65 535
+	 */
+	port->sep_rtpi = dev->dev_rpti_counter++;
+	if (!(port->sep_rtpi))
+		goto again;
+
+	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
+		/*
+		 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
+		 * for 16-bit wrap..
+		 */
+		if (port->sep_rtpi == port_tmp->sep_rtpi)
+			goto again;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return port;
+}
+
+static void core_export_port(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_port *port,
+	struct se_lun *lun)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
+
+	spin_lock(&dev->se_port_lock);
+	spin_lock(&lun->lun_sep_lock);
+	port->sep_tpg = tpg;
+	port->sep_lun = lun;
+	lun->lun_sep = port;
+	spin_unlock(&lun->lun_sep_lock);
+
+	list_add_tail(&port->sep_list, &dev->dev_sep_list);
+	spin_unlock(&dev->se_port_lock);
+
+	if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
+		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
+			printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+					"_gp_member_t\n");
+			return;
+		}
+		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+			T10_ALUA(su_dev)->default_tg_pt_gp);
+		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+		printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+			" Group: alua/default_tg_pt_gp\n",
+			TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+	}
+
+	dev->dev_port_count++;
+	port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
+}
+
+/*
+ *	Called with struct se_device->se_port_lock spinlock held.
+ */
+static void core_release_port(struct se_device *dev, struct se_port *port)
+{
+	/*
+	 * Wait for any port reference for PR ALL_TG_PT=1 operation
+	 * to complete in __core_scsi3_alloc_registration()
+	 */
+	spin_unlock(&dev->se_port_lock);
+	if (atomic_read(&port->sep_tg_pt_ref_cnt))
+		cpu_relax();
+	spin_lock(&dev->se_port_lock);
+
+	core_alua_free_tg_pt_gp_mem(port);
+
+	list_del(&port->sep_list);
+	dev->dev_port_count--;
+	kfree(port);
+
+	return;
+}
+
+int core_dev_export(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port;
+
+	port = core_alloc_port(dev);
+	if (!(port))
+		return -1;
+
+	lun->lun_se_dev = dev;
+	se_dev_start(dev);
+
+	atomic_inc(&dev->dev_export_obj.obj_access_count);
+	core_export_port(dev, tpg, port, lun);
+	return 0;
+}
+
+void core_dev_unexport(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	struct se_port *port = lun->lun_sep;
+
+	spin_lock(&lun->lun_sep_lock);
+	if (lun->lun_se_dev == NULL) {
+		spin_unlock(&lun->lun_sep_lock);
+		return;
+	}
+	spin_unlock(&lun->lun_sep_lock);
+
+	spin_lock(&dev->se_port_lock);
+	atomic_dec(&dev->dev_export_obj.obj_access_count);
+	core_release_port(dev, port);
+	spin_unlock(&dev->se_port_lock);
+
+	se_dev_stop(dev);
+	lun->lun_se_dev = NULL;
+}
+
+int transport_core_report_lun_response(struct se_cmd *se_cmd)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *se_lun;
+	struct se_session *se_sess = SE_SESS(se_cmd);
+	struct se_task *se_task;
+	unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+	u32 cdb_offset = 0, lun_count = 0, offset = 8;
+	u64 i, lun;
+
+	list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+		break;
+
+	if (!(se_task)) {
+		printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	/*
+	 * If no struct se_session pointer is present, this struct se_cmd is
+	 * coming via a target_core_mod PASSTHROUGH op, and not through
+	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
+	 */
+	if (!(se_sess)) {
+		lun = 0;
+		buf[offset++] = ((lun >> 56) & 0xff);
+		buf[offset++] = ((lun >> 48) & 0xff);
+		buf[offset++] = ((lun >> 40) & 0xff);
+		buf[offset++] = ((lun >> 32) & 0xff);
+		buf[offset++] = ((lun >> 24) & 0xff);
+		buf[offset++] = ((lun >> 16) & 0xff);
+		buf[offset++] = ((lun >> 8) & 0xff);
+		buf[offset++] = (lun & 0xff);
+		lun_count = 1;
+		goto done;
+	}
+
+	spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &SE_NODE_ACL(se_sess)->device_list[i];
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+		se_lun = deve->se_lun;
+		/*
+		 * We determine the correct LUN LIST LENGTH even once we
+		 * have reached the initial allocation length.
+		 * See SPC2-R20 7.19.
+		 */
+		lun_count++;
+		if ((cdb_offset + 8) >= se_cmd->data_length)
+			continue;
+
+		lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
+		buf[offset++] = ((lun >> 56) & 0xff);
+		buf[offset++] = ((lun >> 48) & 0xff);
+		buf[offset++] = ((lun >> 40) & 0xff);
+		buf[offset++] = ((lun >> 32) & 0xff);
+		buf[offset++] = ((lun >> 24) & 0xff);
+		buf[offset++] = ((lun >> 16) & 0xff);
+		buf[offset++] = ((lun >> 8) & 0xff);
+		buf[offset++] = (lun & 0xff);
+		cdb_offset += 8;
+	}
+	spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+
+	/*
+	 * See SPC3 r07, page 159.
+	 */
+done:
+	lun_count *= 8;
+	buf[0] = ((lun_count >> 24) & 0xff);
+	buf[1] = ((lun_count >> 16) & 0xff);
+	buf[2] = ((lun_count >> 8) & 0xff);
+	buf[3] = (lun_count & 0xff);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	se_release_device_for_hba():
+ *
+ *
+ */
+void se_release_device_for_hba(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
+	    (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
+		se_dev_stop(dev);
+
+	if (dev->dev_ptr) {
+		kthread_stop(dev->process_thread);
+		if (dev->transport->free_device)
+			dev->transport->free_device(dev->dev_ptr);
+	}
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	core_scsi3_free_all_registrations(dev);
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev->dev_status_queue_obj);
+	kfree(dev->dev_queue_obj);
+	kfree(dev);
+
+	return;
+}
+
+void se_release_vpd_for_dev(struct se_device *dev)
+{
+	struct t10_vpd *vpd, *vpd_tmp;
+
+	spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+	list_for_each_entry_safe(vpd, vpd_tmp,
+			&DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+		list_del(&vpd->vpd_list);
+		kfree(vpd);
+	}
+	spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+
+	return;
+}
+
+/*
+ * Called with struct se_hba->device_lock held.
+ */
+void se_clear_dev_ports(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	struct se_port *sep, *sep_tmp;
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+		spin_unlock(&dev->se_port_lock);
+		spin_unlock(&hba->device_lock);
+
+		lun = sep->sep_lun;
+		tpg = sep->sep_tpg;
+		spin_lock(&lun->lun_sep_lock);
+		if (lun->lun_se_dev == NULL) {
+			spin_unlock(&lun->lun_sep_lock);
+			continue;
+		}
+		spin_unlock(&lun->lun_sep_lock);
+
+		core_dev_del_lun(tpg, lun->unpacked_lun);
+
+		spin_lock(&hba->device_lock);
+		spin_lock(&dev->se_port_lock);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return;
+}
+
+/*	se_free_virtual_device():
+ *
+ *	Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
+ */
+int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
+{
+	spin_lock(&hba->device_lock);
+	se_clear_dev_ports(dev);
+	spin_unlock(&hba->device_lock);
+
+	core_alua_free_lu_gp_mem(dev);
+	se_release_device_for_hba(dev);
+
+	return 0;
+}
+
+static void se_dev_start(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_inc(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
+		if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
+			dev->dev_status &=
+				~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+}
+
+static void se_dev_stop(struct se_device *dev)
+{
+	struct se_hba *hba = dev->se_hba;
+
+	spin_lock(&hba->device_lock);
+	atomic_dec(&dev->dev_obj.obj_access_count);
+	if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
+		if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
+		} else if (dev->dev_status &
+			   TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
+			dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
+			dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
+		}
+	}
+	spin_unlock(&hba->device_lock);
+
+	while (atomic_read(&hba->dev_mib_access_count))
+		cpu_relax();
+}
+
+int se_dev_check_online(struct se_device *dev)
+{
+	int ret;
+
+	spin_lock_irq(&dev->dev_status_lock);
+	ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
+	       (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
+	spin_unlock_irq(&dev->dev_status_lock);
+
+	return ret;
+}
+
+int se_dev_check_shutdown(struct se_device *dev)
+{
+	int ret;
+
+	spin_lock_irq(&dev->dev_status_lock);
+	ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
+	spin_unlock_irq(&dev->dev_status_lock);
+
+	return ret;
+}
+
+void se_dev_set_default_attribs(
+	struct se_device *dev,
+	struct se_dev_limits *dev_limits)
+{
+	struct queue_limits *limits = &dev_limits->limits;
+
+	DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
+	DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
+	DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
+	DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+	DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
+	DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
+	DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
+	DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
+	DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
+	DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+	/*
+	 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
+	 * iblock_create_virtdevice() from struct queue_limits values
+	 * if blk_queue_discard()==1
+	 */
+	DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+	DEV_ATTRIB(dev)->max_unmap_block_desc_count =
+				DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+	DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+	DEV_ATTRIB(dev)->unmap_granularity_alignment =
+				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+	/*
+	 * block_size is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
+	DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+	/*
+	 * max_sectors is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
+	DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+	/*
+	 * Set optimal_sectors from max_sectors, which can be lowered via
+	 * configfs.
+	 */
+	DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+	/*
+	 * queue_depth is based on subsystem plugin dependent requirements.
+	 */
+	DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
+	DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+}
+
+int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
+{
+	if (task_timeout > DA_TASK_TIMEOUT_MAX) {
+		printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+			" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
+		return -1;
+	} else {
+		DEV_ATTRIB(dev)->task_timeout = task_timeout;
+		printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+			dev, task_timeout);
+	}
+
+	return 0;
+}
+
+int se_dev_set_max_unmap_lba_count(
+	struct se_device *dev,
+	u32 max_unmap_lba_count)
+{
+	DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
+	printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
+			dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+	return 0;
+}
+
+int se_dev_set_max_unmap_block_desc_count(
+	struct se_device *dev,
+	u32 max_unmap_block_desc_count)
+{
+	DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
+	printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
+			dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity(
+	struct se_device *dev,
+	u32 unmap_granularity)
+{
+	DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
+	printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
+			dev, DEV_ATTRIB(dev)->unmap_granularity);
+	return 0;
+}
+
+int se_dev_set_unmap_granularity_alignment(
+	struct se_device *dev,
+	u32 unmap_granularity_alignment)
+{
+	DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
+	printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
+			dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+	return 0;
+}
+
+int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->dpo_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_dpo = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
+			" bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+	return 0;
+}
+
+int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_write_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_fua_write = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_fua_write);
+	return 0;
+}
+
+int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_read_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_fua_read = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_fua_read);
+	return 0;
+}
+
+int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	if (TRANSPORT(dev)->write_cache_emulated == NULL) {
+		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
+		return -1;
+	}
+	if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
+		printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_write_cache = flag;
+	printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+			dev, DEV_ATTRIB(dev)->emulate_write_cache);
+	return 0;
+}
+
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1) && (flag != 2)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
+			" exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
+	printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+		dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+
+	return 0;
+}
+
+int se_dev_set_emulate_tas(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	DEV_ATTRIB(dev)->emulate_tas = flag;
+	printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+		dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+
+	return 0;
+}
+
+int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+		printk(KERN_ERR "Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	DEV_ATTRIB(dev)->emulate_tpu = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	/*
+	 * We expect this value to be non-zero when generic Block Layer
+	 * Discard supported is detected iblock_create_virtdevice().
+	 */
+	if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
+		printk(KERN_ERR "Generic Block Discard not supported\n");
+		return -ENOSYS;
+	}
+
+	DEV_ATTRIB(dev)->emulate_tpws = flag;
+	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+				dev, flag);
+	return 0;
+}
+
+int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
+{
+	if ((flag != 0) && (flag != 1)) {
+		printk(KERN_ERR "Illegal value %d\n", flag);
+		return -1;
+	}
+	DEV_ATTRIB(dev)->enforce_pr_isids = flag;
+	printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+		(DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+	return 0;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
+{
+	u32 orig_queue_depth = dev->queue_depth;
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+			" dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	if (!(queue_depth)) {
+		printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+			"_depth\n", dev);
+		return -1;
+	}
+
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+			printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+				" exceeds TCM/SE_Device TCQ: %u\n",
+				dev, queue_depth,
+				DEV_ATTRIB(dev)->hw_queue_depth);
+			return -1;
+		}
+	} else {
+		if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
+			if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
+				printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+					" %u exceeds TCM/SE_Device MAX"
+					" TCQ: %u\n", dev, queue_depth,
+					DEV_ATTRIB(dev)->hw_queue_depth);
+				return -1;
+			}
+		}
+	}
+
+	DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+	if (queue_depth > orig_queue_depth)
+		atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
+	else if (queue_depth < orig_queue_depth)
+		atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
+
+	printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+			dev, queue_depth);
+	return 0;
+}
+
+int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
+{
+	int force = 0; /* Force setting for VDEVS */
+
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" max_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+	if (!(max_sectors)) {
+		printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+			" max_sectors\n", dev);
+		return -1;
+	}
+	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
+		printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MIN);
+		return -1;
+	}
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors:"
+				" %u\n", dev, max_sectors,
+				DEV_ATTRIB(dev)->hw_max_sectors);
+			 return -1;
+		}
+	} else {
+		if (!(force) && (max_sectors >
+				 DEV_ATTRIB(dev)->hw_max_sectors)) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than TCM/SE_Device max_sectors"
+				": %u, use force=1 to override.\n", dev,
+				max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
+			return -1;
+		}
+		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+				" greater than DA_STATUS_MAX_SECTORS_MAX:"
+				" %u\n", dev, max_sectors,
+				DA_STATUS_MAX_SECTORS_MAX);
+			return -1;
+		}
+	}
+
+	DEV_ATTRIB(dev)->max_sectors = max_sectors;
+	printk("dev[%p]: SE Device max_sectors changed to %u\n",
+			dev, max_sectors);
+	return 0;
+}
+
+int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+			" optimal_sectors while dev_export_obj: %d count exists\n",
+			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -EINVAL;
+	}
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+				" changed for TCM/pSCSI\n", dev);
+		return -EINVAL;
+	}
+	if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
+		printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+			" greater than max_sectors: %u\n", dev,
+			optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+		return -EINVAL;
+	}
+
+	DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
+	printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+			dev, optimal_sectors);
+	return 0;
+}
+
+int se_dev_set_block_size(struct se_device *dev, u32 block_size)
+{
+	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+		printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+			" while dev_export_obj: %d count exists\n", dev,
+			atomic_read(&dev->dev_export_obj.obj_access_count));
+		return -1;
+	}
+
+	if ((block_size != 512) &&
+	    (block_size != 1024) &&
+	    (block_size != 2048) &&
+	    (block_size != 4096)) {
+		printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+			" for SE device, must be 512, 1024, 2048 or 4096\n",
+			dev, block_size);
+		return -1;
+	}
+
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+			" Physical Device, use for Linux/SCSI to change"
+			" block_size for underlying hardware\n", dev);
+		return -1;
+	}
+
+	DEV_ATTRIB(dev)->block_size = block_size;
+	printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+			dev, block_size);
+	return 0;
+}
+
+struct se_lun *core_dev_add_lun(
+	struct se_portal_group *tpg,
+	struct se_hba *hba,
+	struct se_device *dev,
+	u32 lun)
+{
+	struct se_lun *lun_p;
+	u32 lun_access = 0;
+
+	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
+		printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+			atomic_read(&dev->dev_access_obj.obj_access_count));
+		return NULL;
+	}
+
+	lun_p = core_tpg_pre_addlun(tpg, lun);
+	if ((IS_ERR(lun_p)) || !(lun_p))
+		return NULL;
+
+	if (dev->dev_flags & DF_READ_ONLY)
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	else
+		lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+
+	if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
+		return NULL;
+
+	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+		" CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
+		TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+	/*
+	 * Update LUN maps for dynamically added initiators when
+	 * generate_node_acl is enabled.
+	 */
+	if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+		struct se_node_acl *acl;
+		spin_lock_bh(&tpg->acl_node_lock);
+		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+			if (acl->dynamic_node_acl) {
+				spin_unlock_bh(&tpg->acl_node_lock);
+				core_tpg_add_node_to_devs(acl, tpg);
+				spin_lock_bh(&tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_bh(&tpg->acl_node_lock);
+	}
+
+	return lun_p;
+}
+
+/*      core_dev_del_lun():
+ *
+ *
+ */
+int core_dev_del_lun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+	int ret = 0;
+
+	lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
+	if (!(lun))
+		return ret;
+
+	core_tpg_post_dellun(tpg, lun);
+
+	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+		" device object\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
+		TPG_TFO(tpg)->get_fabric_name());
+
+	return 0;
+}
+
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+/*      core_dev_get_lun():
+ *
+ *
+ */
+static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+			"_TPG-1: %u for Target Portal Group: %hu\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return NULL;
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	u32 mapped_lun,
+	char *initiatorname,
+	int *ret)
+{
+	struct se_lun_acl *lacl;
+	struct se_node_acl *nacl;
+
+	if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
+		printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
+			TPG_TFO(tpg)->get_fabric_name());
+		*ret = -EOVERFLOW;
+		return NULL;
+	}
+	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (!(nacl)) {
+		*ret = -EINVAL;
+		return NULL;
+	}
+	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+	if (!(lacl)) {
+		printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+		*ret = -ENOMEM;
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&lacl->lacl_list);
+	lacl->mapped_lun = mapped_lun;
+	lacl->se_lun_nacl = nacl;
+	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+
+	return lacl;
+}
+
+int core_dev_add_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl,
+	u32 unpacked_lun,
+	u32 lun_access)
+{
+	struct se_lun *lun;
+	struct se_node_acl *nacl;
+
+	lun = core_dev_get_lun(tpg, unpacked_lun);
+	if (!(lun)) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %hu, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return -EINVAL;
+	}
+
+	nacl = lacl->se_lun_nacl;
+	if (!(nacl))
+		return -EINVAL;
+
+	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
+	    (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
+		lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+
+	lacl->se_lun = lun;
+
+	if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
+			lun_access, nacl, tpg, 1) < 0)
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
+	atomic_inc(&lun->lun_acl_count);
+	smp_mb__after_atomic_inc();
+	spin_unlock(&lun->lun_acl_lock);
+
+	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+		" InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+		lacl->initiatorname);
+	/*
+	 * Check to see if there are any existing persistent reservation APTPL
+	 * pre-registrations that need to be enabled for this LUN ACL..
+	 */
+	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+	return 0;
+}
+
+/*      core_dev_del_initiator_node_lun_acl():
+ *
+ *
+ */
+int core_dev_del_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lacl)
+{
+	struct se_node_acl *nacl;
+
+	nacl = lacl->se_lun_nacl;
+	if (!(nacl))
+		return -EINVAL;
+
+	spin_lock(&lun->lun_acl_lock);
+	list_del(&lacl->lacl_list);
+	atomic_dec(&lun->lun_acl_count);
+	smp_mb__after_atomic_dec();
+	spin_unlock(&lun->lun_acl_lock);
+
+	core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
+		TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+	lacl->se_lun = NULL;
+
+	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+		" InitiatorNode: %s Mapped LUN: %u\n",
+		TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+		lacl->initiatorname, lacl->mapped_lun);
+
+	return 0;
+}
+
+void core_dev_free_initiator_node_lun_acl(
+	struct se_portal_group *tpg,
+	struct se_lun_acl *lacl)
+{
+	printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+		" Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg),
+		TPG_TFO(tpg)->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun);
+
+	kfree(lacl);
+}
+
+int core_dev_setup_virtual_lun0(void)
+{
+	struct se_hba *hba;
+	struct se_device *dev;
+	struct se_subsystem_dev *se_dev = NULL;
+	struct se_subsystem_api *t;
+	char buf[16];
+	int ret;
+
+	hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+	if (IS_ERR(hba))
+		return PTR_ERR(hba);
+
+	se_global->g_lun0_hba = hba;
+	t = hba->transport;
+
+	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
+	if (!(se_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_subsystem_dev\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+	INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
+	spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
+	INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
+	spin_lock_init(&se_dev->t10_reservation.registration_lock);
+	spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+	INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
+	spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
+	spin_lock_init(&se_dev->se_dev_lock);
+	se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+	se_dev->t10_wwn.t10_sub_dev = se_dev;
+	se_dev->t10_alua.t10_sub_dev = se_dev;
+	se_dev->se_dev_attrib.da_sub_dev = se_dev;
+	se_dev->se_dev_hba = hba;
+
+	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
+	if (!(se_dev->se_dev_su_ptr)) {
+		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+			" from allocate_virtdevice()\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	se_global->g_lun0_su_dev = se_dev;
+
+	memset(buf, 0, 16);
+	sprintf(buf, "rd_pages=8");
+	t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+
+	dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
+	if (!(dev) || IS_ERR(dev)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	se_dev->se_dev_ptr = dev;
+	se_global->g_lun0_dev = dev;
+
+	return 0;
+out:
+	se_global->g_lun0_su_dev = NULL;
+	kfree(se_dev);
+	if (se_global->g_lun0_hba) {
+		core_delete_hba(se_global->g_lun0_hba);
+		se_global->g_lun0_hba = NULL;
+	}
+	return ret;
+}
+
+
+void core_dev_release_virtual_lun0(void)
+{
+	struct se_hba *hba = se_global->g_lun0_hba;
+	struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+
+	if (!(hba))
+		return;
+
+	if (se_global->g_lun0_dev)
+		se_free_virtual_device(se_global->g_lun0_dev, hba);
+
+	kfree(su_dev);
+	core_delete_hba(hba);
+}
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
new file mode 100644
index 0000000..32b148d
--- /dev/null
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -0,0 +1,996 @@
+/*******************************************************************************
+* Filename: target_core_fabric_configfs.c
+ *
+ * This file contains generic fabric module configfs infrastructure for
+ * TCM v4.x code
+ *
+ * Copyright (c) 2010 Rising Tide Systems
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
+*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/unistd.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/configfs.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)		\
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{									\
+	struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl;	\
+	struct config_item_type *cit = &tfc->tfc_##_name##_cit;		\
+									\
+	cit->ct_item_ops = _item_ops;					\
+	cit->ct_group_ops = _group_ops;					\
+	cit->ct_attrs = _attrs;						\
+	cit->ct_owner = tf->tf_module;					\
+	printk("Setup generic %s\n", __stringify(_name));		\
+}
+
+/* Start of tfc_tpg_mappedlun_cit */
+
+static int target_fabric_mappedlun_link(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_dev_entry *deve;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+			struct se_lun, lun_group);
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg;
+	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+	int ret = 0, lun_access;
+	/*
+	 * Ensure that the source port exists
+	 */
+	if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
+		printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+				"_tpg does not exist\n");
+		return -EINVAL;
+	}
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+	tpg_ci = &nacl_ci->ci_group->cg_item;
+	wwn_ci = &tpg_ci->ci_group->cg_item;
+	tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
+	wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
+	/*
+	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
+	 */
+	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
+		printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+			config_item_name(wwn_ci));
+		return -EINVAL;
+	}
+	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
+		printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+			" TPGT: %s\n", config_item_name(wwn_ci),
+			config_item_name(tpg_ci));
+		return -EINVAL;
+	}
+	/*
+	 * If this struct se_node_acl was dynamically generated with
+	 * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
+	 * which be will write protected (READ-ONLY) when
+	 * tpg_1/attrib/demo_mode_write_protect=1
+	 */
+	spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
+	deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
+	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
+		lun_access = deve->lun_flags;
+	else
+		lun_access =
+			(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+				se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
+					   TRANSPORT_LUNFLAGS_READ_WRITE;
+	spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+	/*
+	 * Determine the actual mapped LUN value user wants..
+	 *
+	 * This value is what the SCSI Initiator actually sees the
+	 * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+	 */
+	ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
+			lun->unpacked_lun, lun_access);
+
+	return (ret < 0) ? -EINVAL : 0;
+}
+
+static int target_fabric_mappedlun_unlink(
+	struct config_item *lun_acl_ci,
+	struct config_item *lun_ci)
+{
+	struct se_lun *lun;
+	struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
+			struct se_lun_acl, se_lun_group);
+	struct se_node_acl *nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
+	struct se_portal_group *se_tpg;
+	/*
+	 * Determine if the underlying MappedLUN has already been released..
+	 */
+	if (!(deve->se_lun))
+		return 0;
+
+	lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+	se_tpg = lun->lun_sep->sep_tpg;
+
+	core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
+	return 0;
+}
+
+CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
+#define TCM_MAPPEDLUN_ATTR(_name, _mode)				\
+static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_mappedlun_show_##_name,				\
+	target_fabric_mappedlun_store_##_name);
+
+static ssize_t target_fabric_mappedlun_show_write_protect(
+	struct se_lun_acl *lacl,
+	char *page)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_dev_entry *deve;
+	ssize_t len;
+
+	spin_lock_irq(&se_nacl->device_list_lock);
+	deve = &se_nacl->device_list[lacl->mapped_lun];
+	len = sprintf(page, "%d\n",
+			(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
+			1 : 0);
+	spin_unlock_irq(&se_nacl->device_list_lock);
+
+	return len;
+}
+
+static ssize_t target_fabric_mappedlun_store_write_protect(
+	struct se_lun_acl *lacl,
+	const char *page,
+	size_t count)
+{
+	struct se_node_acl *se_nacl = lacl->se_lun_nacl;
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	unsigned long op;
+
+	if (strict_strtoul(page, 0, &op))
+		return -EINVAL;
+
+	if ((op != 1) && (op != 0))
+		return -EINVAL;
+
+	core_update_device_list_access(lacl->mapped_lun, (op) ?
+			TRANSPORT_LUNFLAGS_READ_ONLY :
+			TRANSPORT_LUNFLAGS_READ_WRITE,
+			lacl->se_lun_nacl);
+
+	printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+		" Mapped LUN: %u Write Protect bit to %s\n",
+		TPG_TFO(se_tpg)->get_fabric_name(),
+		lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+
+	return count;
+
+}
+
+TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
+
+CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+	&target_fabric_mappedlun_write_protect.attr,
+	NULL,
+};
+
+static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+	.show_attribute		= target_fabric_mappedlun_attr_show,
+	.store_attribute	= target_fabric_mappedlun_attr_store,
+	.allow_link		= target_fabric_mappedlun_link,
+	.drop_link		= target_fabric_mappedlun_unlink,
+};
+
+TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
+		target_fabric_mappedlun_attrs);
+
+/* End of tfc_tpg_mappedlun_cit */
+
+/* Start of tfc_tpg_nacl_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
+
+static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
+	.show_attribute		= target_fabric_nacl_attrib_attr_show,
+	.store_attribute	= target_fabric_nacl_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_attrib_cit */
+
+/* Start of tfc_tpg_nacl_auth_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
+
+static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
+	.show_attribute		= target_fabric_nacl_auth_attr_show,
+	.store_attribute	= target_fabric_nacl_auth_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_auth_cit */
+
+/* Start of tfc_tpg_nacl_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
+
+static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
+	.show_attribute		= target_fabric_nacl_param_attr_show,
+	.store_attribute	= target_fabric_nacl_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_nacl_param_cit */
+
+/* Start of tfc_tpg_nacl_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
+
+static struct config_group *target_fabric_make_mappedlun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl = container_of(group,
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_lun_acl *lacl;
+	struct config_item *acl_ci;
+	char *buf;
+	unsigned long mapped_lun;
+	int ret = 0;
+
+	acl_ci = &group->cg_item;
+	if (!(acl_ci)) {
+		printk(KERN_ERR "Unable to locatel acl_ci\n");
+		return NULL;
+	}
+
+	buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for name buf\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	snprintf(buf, strlen(name) + 1, "%s", name);
+	/*
+	 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
+	 */
+	if (strstr(buf, "lun_") != buf) {
+		printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+			" name: %s\n", buf, name);
+		ret = -EINVAL;
+		goto out;
+	}
+	/*
+	 * Determine the Mapped LUN value.  This is what the SCSI Initiator
+	 * Port will actually see.
+	 */
+	if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+			config_item_name(acl_ci), &ret);
+	if (!(lacl))
+		goto out;
+
+	config_group_init_type_name(&lacl->se_lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+
+	kfree(buf);
+	return &lacl->se_lun_group;
+out:
+	kfree(buf);
+	return ERR_PTR(ret);
+}
+
+static void target_fabric_drop_mappedlun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+			struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+	config_item_put(item);
+	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+	.show_attribute		= target_fabric_nacl_base_attr_show,
+	.store_attribute	= target_fabric_nacl_base_attr_store,
+};
+
+static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
+	.make_group		= target_fabric_make_mappedlun,
+	.drop_item		= target_fabric_drop_mappedlun,
+};
+
+TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+		&target_fabric_nacl_base_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_base_cit */
+
+/* Start of tfc_tpg_nacl_cit */
+
+static struct config_group *target_fabric_make_nodeacl(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_acl_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_node_acl *se_nacl;
+	struct config_group *nacl_cg;
+
+	if (!(tf->tf_ops.fabric_make_nodeacl)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+	if (IS_ERR(se_nacl))
+		return ERR_PTR(PTR_ERR(se_nacl));
+
+	nacl_cg = &se_nacl->acl_group;
+	nacl_cg->default_groups = se_nacl->acl_default_groups;
+	nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
+	nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
+	nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
+	nacl_cg->default_groups[3] = NULL;
+
+	config_group_init_type_name(&se_nacl->acl_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+	config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+	config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+	config_group_init_type_name(&se_nacl->acl_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+
+	return &se_nacl->acl_group;
+}
+
+static void target_fabric_drop_nodeacl(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_acl_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct config_item *df_item;
+	struct config_group *nacl_cg;
+	int i;
+
+	nacl_cg = &se_nacl->acl_group;
+	for (i = 0; nacl_cg->default_groups[i]; i++) {
+		df_item = &nacl_cg->default_groups[i]->cg_item;
+		nacl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+}
+
+static struct configfs_group_operations target_fabric_nacl_group_ops = {
+	.make_group	= target_fabric_make_nodeacl,
+	.drop_item	= target_fabric_drop_nodeacl,
+};
+
+TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+
+/* End of tfc_tpg_nacl_cit */
+
+/* Start of tfc_tpg_np_base_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+
+static struct configfs_item_operations target_fabric_np_base_item_ops = {
+	.show_attribute		= target_fabric_np_base_attr_show,
+	.store_attribute	= target_fabric_np_base_attr_store,
+};
+
+TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_np_base_cit */
+
+/* Start of tfc_tpg_np_cit */
+
+static struct config_group *target_fabric_make_np(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+				struct se_portal_group, tpg_np_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_tpg_np *se_tpg_np;
+
+	if (!(tf->tf_ops.fabric_make_np)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+	if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+		return ERR_PTR(-EINVAL);
+
+	config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+
+	return &se_tpg_np->tpg_np_group;
+}
+
+static void target_fabric_drop_np(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(group,
+				struct se_portal_group, tpg_np_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+				struct se_tpg_np, tpg_np_group);
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
+static struct configfs_group_operations target_fabric_np_group_ops = {
+	.make_group	= &target_fabric_make_np,
+	.drop_item	= &target_fabric_drop_np,
+};
+
+TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
+
+/* End of tfc_tpg_np_cit */
+
+/* Start of tfc_tpg_port_cit */
+
+CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
+#define TCM_PORT_ATTR(_name, _mode)					\
+static struct target_fabric_port_attribute target_fabric_port_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	target_fabric_port_show_attr_##_name,				\
+	target_fabric_port_store_attr_##_name);
+
+#define TCM_PORT_ATTOR_RO(_name)					\
+	__CONFIGFS_EATTR_RO(_name,					\
+	target_fabric_port_show_attr_##_name);
+
+/*
+ * alua_tg_pt_gp
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_offline
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_offline_bit(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_offline_bit(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_status
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_secondary_status(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_secondary_status(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
+
+/*
+ * alua_tg_pt_write_md
+ */
+static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	char *page)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_show_secondary_write_metadata(lun, page);
+}
+
+static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
+	struct se_lun *lun,
+	const char *page,
+	size_t count)
+{
+	if (!(lun))
+		return -ENODEV;
+
+	if (!(lun->lun_sep))
+		return -ENODEV;
+
+	return core_alua_store_secondary_write_metadata(lun, page, count);
+}
+
+TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
+
+
+static struct configfs_attribute *target_fabric_port_attrs[] = {
+	&target_fabric_port_alua_tg_pt_gp.attr,
+	&target_fabric_port_alua_tg_pt_offline.attr,
+	&target_fabric_port_alua_tg_pt_status.attr,
+	&target_fabric_port_alua_tg_pt_write_md.attr,
+	NULL,
+};
+
+CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
+
+static int target_fabric_port_link(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct config_item *tpg_ci;
+	struct se_device *dev;
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_lun *lun_p;
+	struct se_portal_group *se_tpg;
+	struct se_subsystem_dev *se_dev = container_of(
+				to_config_group(se_dev_ci), struct se_subsystem_dev,
+				se_dev_group);
+	struct target_fabric_configfs *tf;
+	int ret;
+
+	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+	se_tpg = container_of(to_config_group(tpg_ci),
+				struct se_portal_group, tpg_group);
+	tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (lun->lun_se_dev !=  NULL) {
+		printk(KERN_ERR "Port Symlink already exists\n");
+		return -EEXIST;
+	}
+
+	dev = se_dev->se_dev_ptr;
+	if (!(dev)) {
+		printk(KERN_ERR "Unable to locate struct se_device pointer from"
+			" %s\n", config_item_name(se_dev_ci));
+		ret = -ENODEV;
+		goto out;
+	}
+
+	lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
+				lun->unpacked_lun);
+	if ((IS_ERR(lun_p)) || !(lun_p)) {
+		printk(KERN_ERR "core_dev_add_lun() failed\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (tf->tf_ops.fabric_post_link) {
+		/*
+		 * Call the optional fabric_post_link() to allow a
+		 * fabric module to setup any additional state once
+		 * core_dev_add_lun() has been called..
+		 */
+		tf->tf_ops.fabric_post_link(se_tpg, lun);
+	}
+
+	return 0;
+out:
+	return ret;
+}
+
+static int target_fabric_port_unlink(
+	struct config_item *lun_ci,
+	struct config_item *se_dev_ci)
+{
+	struct se_lun *lun = container_of(to_config_group(lun_ci),
+				struct se_lun, lun_group);
+	struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	if (tf->tf_ops.fabric_pre_unlink) {
+		/*
+		 * Call the optional fabric_pre_unlink() to allow a
+		 * fabric module to release any additional stat before
+		 * core_dev_del_lun() is called.
+		*/
+		tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+	}
+
+	core_dev_del_lun(se_tpg, lun->unpacked_lun);
+	return 0;
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops = {
+	.show_attribute		= target_fabric_port_attr_show,
+	.store_attribute	= target_fabric_port_attr_store,
+	.allow_link		= target_fabric_port_link,
+	.drop_link		= target_fabric_port_unlink,
+};
+
+TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
+
+/* End of tfc_tpg_port_cit */
+
+/* Start of tfc_tpg_lun_cit */
+
+static struct config_group *target_fabric_make_lun(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_lun *lun;
+	struct se_portal_group *se_tpg = container_of(group,
+			struct se_portal_group, tpg_lun_group);
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+	unsigned long unpacked_lun;
+
+	if (strstr(name, "lun_") != name) {
+		printk(KERN_ERR "Unable to locate \'_\" in"
+				" \"lun_$LUN_NUMBER\"\n");
+		return ERR_PTR(-EINVAL);
+	}
+	if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+
+	lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
+	if (!(lun))
+		return ERR_PTR(-EINVAL);
+
+	config_group_init_type_name(&lun->lun_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+
+	return &lun->lun_group;
+}
+
+static void target_fabric_drop_lun(
+	struct config_group *group,
+	struct config_item *item)
+{
+	config_item_put(item);
+}
+
+static struct configfs_group_operations target_fabric_lun_group_ops = {
+	.make_group	= &target_fabric_make_lun,
+	.drop_item	= &target_fabric_drop_lun,
+};
+
+TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
+
+/* End of tfc_tpg_lun_cit */
+
+/* Start of tfc_tpg_attrib_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
+
+static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
+	.show_attribute		= target_fabric_tpg_attrib_attr_show,
+	.store_attribute	= target_fabric_tpg_attrib_attr_store,
+};
+
+TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_attrib_cit */
+
+/* Start of tfc_tpg_param_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
+
+static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
+	.show_attribute		= target_fabric_tpg_param_attr_show,
+	.store_attribute	= target_fabric_tpg_param_attr_store,
+};
+
+TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_param_cit */
+
+/* Start of tfc_tpg_base_cit */
+/*
+ * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+	.show_attribute		= target_fabric_tpg_attr_show,
+	.store_attribute	= target_fabric_tpg_attr_store,
+};
+
+TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+
+/* End of tfc_tpg_base_cit */
+
+/* Start of tfc_tpg_cit */
+
+static struct config_group *target_fabric_make_tpg(
+	struct config_group *group,
+	const char *name)
+{
+	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+	struct se_portal_group *se_tpg;
+
+	if (!(tf->tf_ops.fabric_make_tpg)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+	if (!(se_tpg) || IS_ERR(se_tpg))
+		return ERR_PTR(-EINVAL);
+	/*
+	 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
+	 */
+	se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
+	se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
+	se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
+	se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
+	se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
+	se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
+	se_tpg->tpg_group.default_groups[5] = NULL;
+
+	config_group_init_type_name(&se_tpg->tpg_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+	config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
+			&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+	config_group_init_type_name(&se_tpg->tpg_np_group, "np",
+			&TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+	config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
+			&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+	config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
+			&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+	config_group_init_type_name(&se_tpg->tpg_param_group, "param",
+			&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+
+	return &se_tpg->tpg_group;
+}
+
+static void target_fabric_drop_tpg(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+				struct se_portal_group, tpg_group);
+	struct config_group *tpg_cg = &se_tpg->tpg_group;
+	struct config_item *df_item;
+	int i;
+	/*
+	 * Release default groups, but do not release tpg_cg->default_groups
+	 * memory as it is statically allocated at se_tpg->tpg_default_groups.
+	 */
+	for (i = 0; tpg_cg->default_groups[i]; i++) {
+		df_item = &tpg_cg->default_groups[i]->cg_item;
+		tpg_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
+static struct configfs_group_operations target_fabric_tpg_group_ops = {
+	.make_group	= target_fabric_make_tpg,
+	.drop_item	= target_fabric_drop_tpg,
+};
+
+TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+
+/* End of tfc_tpg_cit */
+
+/* Start of tfc_wwn_cit */
+
+static struct config_group *target_fabric_make_wwn(
+	struct config_group *group,
+	const char *name)
+{
+	struct target_fabric_configfs *tf = container_of(group,
+				struct target_fabric_configfs, tf_group);
+	struct se_wwn *wwn;
+
+	if (!(tf->tf_ops.fabric_make_wwn)) {
+		printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+		return ERR_PTR(-ENOSYS);
+	}
+
+	wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+	if (!(wwn) || IS_ERR(wwn))
+		return ERR_PTR(-EINVAL);
+
+	wwn->wwn_tf = tf;
+	config_group_init_type_name(&wwn->wwn_group, name,
+			&TF_CIT_TMPL(tf)->tfc_tpg_cit);
+
+	return &wwn->wwn_group;
+}
+
+static void target_fabric_drop_wwn(
+	struct config_group *group,
+	struct config_item *item)
+{
+	struct target_fabric_configfs *tf = container_of(group,
+				struct target_fabric_configfs, tf_group);
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+
+	config_item_put(item);
+	tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_group_operations target_fabric_wwn_group_ops = {
+	.make_group	= target_fabric_make_wwn,
+	.drop_item	= target_fabric_drop_wwn,
+};
+/*
+ * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
+ */
+CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
+
+static struct configfs_item_operations target_fabric_wwn_item_ops = {
+	.show_attribute		= target_fabric_wwn_attr_show,
+	.store_attribute	= target_fabric_wwn_attr_store,
+};
+
+TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+
+/* End of tfc_wwn_cit */
+
+/* Start of tfc_discovery_cit */
+
+CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
+		tf_disc_group);
+
+static struct configfs_item_operations target_fabric_discovery_item_ops = {
+	.show_attribute		= target_fabric_discovery_attr_show,
+	.store_attribute	= target_fabric_discovery_attr_store,
+};
+
+TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+
+/* End of tfc_discovery_cit */
+
+int target_fabric_setup_cits(struct target_fabric_configfs *tf)
+{
+	target_fabric_setup_discovery_cit(tf);
+	target_fabric_setup_wwn_cit(tf);
+	target_fabric_setup_tpg_cit(tf);
+	target_fabric_setup_tpg_base_cit(tf);
+	target_fabric_setup_tpg_port_cit(tf);
+	target_fabric_setup_tpg_lun_cit(tf);
+	target_fabric_setup_tpg_np_cit(tf);
+	target_fabric_setup_tpg_np_base_cit(tf);
+	target_fabric_setup_tpg_attrib_cit(tf);
+	target_fabric_setup_tpg_param_cit(tf);
+	target_fabric_setup_tpg_nacl_cit(tf);
+	target_fabric_setup_tpg_nacl_base_cit(tf);
+	target_fabric_setup_tpg_nacl_attrib_cit(tf);
+	target_fabric_setup_tpg_nacl_auth_cit(tf);
+	target_fabric_setup_tpg_nacl_param_cit(tf);
+	target_fabric_setup_tpg_mappedlun_cit(tf);
+
+	return 0;
+}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
new file mode 100644
index 0000000..2628564
--- /dev/null
+++ b/drivers/target/target_core_fabric_lib.c
@@ -0,0 +1,451 @@
+/*******************************************************************************
+ * Filename:  target_core_fabric_lib.c
+ *
+ * This file contains generic high level protocol identifier and PR
+ * handlers for TCM fabric modules
+ *
+ * Copyright (c) 2010 Rising Tide Systems, Inc.
+ * Copyright (c) 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+
+/*
+ * Handlers for Serial Attached SCSI (SAS)
+ */
+u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * Return a SAS Serial SCSI Protocol identifier for loopback operations
+	 * This is defined in  section 7.5.1 Table 362 in spc4r17
+	 */
+	return 0x6;
+}
+EXPORT_SYMBOL(sas_get_fabric_proto_ident);
+
+u32 sas_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char binary, *ptr;
+	int i;
+	u32 off = 4;
+	/*
+	 * Set PROTOCOL IDENTIFIER to 6h for SAS
+	 */
+	buf[0] = 0x06;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 */
+	ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
+
+	for (i = 0; i < 16; i += 2) {
+		binary = transport_asciihex_to_binaryhex(&ptr[i]);
+		buf[off++] = binary;
+	}
+	/*
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id);
+
+u32 sas_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
+	 * over SAS Serial SCSI Protocol
+	 *
+	 * The SAS Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(sas_get_pr_transport_id_len);
+
+/*
+ * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
+ * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
+ */
+char *sas_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+	 * for initiator ports using SCSI over SAS Serial SCSI Protocol
+	 *
+	 * The TransportID for a SAS Initiator Port is of fixed size of
+	 * 24 bytes, and SAS does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Fibre Channel Protocol (FCP)
+ */
+u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	return 0x0;	/* 0 = fcp-2 per SPC4 section 7.5.1 */
+}
+EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+
+u32 fc_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	*format_code = 0;
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id_len);
+
+u32 fc_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	unsigned char binary, *ptr;
+	int i;
+	u32 off = 8;
+	/*
+	 * PROTOCOL IDENTIFIER is 0h for FCP-2
+	 *
+	 * From spc4r17, 7.5.4.2 TransportID for initiator ports using
+	 * SCSI over Fibre Channel
+	 *
+	 * We convert the ASCII formatted N Port name into a binary
+	 * encoded TransportID.
+	 */
+	ptr = &se_nacl->initiatorname[0];
+
+	for (i = 0; i < 24; ) {
+		if (!(strncmp(&ptr[i], ":", 1))) {
+			i++;
+			continue;
+		}
+		binary = transport_asciihex_to_binaryhex(&ptr[i]);
+		buf[off++] = binary;
+		i += 2;
+	}
+	/*
+	 * The FC Transport ID is a hardcoded 24-byte length
+	 */
+	return 24;
+}
+EXPORT_SYMBOL(fc_get_pr_transport_id);
+
+char *fc_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	/*
+	 * The TransportID for a FC N Port is of fixed size of
+	 * 24 bytes, and FC does not contain a I_T nexus identifier,
+	 * so we return the **port_nexus_ptr set to NULL.
+	 */
+	*port_nexus_ptr = NULL;
+	*out_tid_len = 24;
+
+	 return (char *)&buf[8];
+}
+EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+
+/*
+ * Handlers for Internet Small Computer Systems Interface (iSCSI)
+ */
+
+u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	/*
+	 * This value is defined for "Internet SCSI (iSCSI)"
+	 * in spc4r17 section 7.5.1 Table 362
+	 */
+	return 0x5;
+}
+EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
+
+u32 iscsi_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	u32 off = 4, padding = 0;
+	u16 len = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * Set PROTOCOL IDENTIFIER to 5h for iSCSI
+	*/
+	buf[0] = 0x05;
+	/*
+	 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+	 * ports using SCSI over iSCSI.
+	 *
+	 * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
+	 * shall contain the iSCSI name of an iSCSI initiator node (see
+	 * RFC 3720). The first ISCSI NAME field byte containing an ASCII
+	 * null character terminates the ISCSI NAME field without regard for
+	 * the specified length of the iSCSI TransportID or the contents of
+	 * the ADDITIONAL LENGTH field.
+	 */
+	len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+	/*
+	 * Add Extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration and *format code == 1
+	 * 1, use iSCSI Initiator port TransportID format.
+	 *
+	 * Otherwise use iSCSI Initiator device TransportID format that
+	 * does not contain the ASCII encoded iSCSI Initiator iSID value
+	 * provied by the iSCSi Initiator during the iSCSI login process.
+	 */
+	if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
+		/*
+		 * Set FORMAT CODE 01b for iSCSI Initiator port TransportID
+		 * format.
+		 */
+		buf[0] |= 0x40;
+		/*
+		 * From spc4r17 Section 7.5.4.6: TransportID for initiator
+		 * ports using SCSI over iSCSI.  Table 390
+		 *
+		 * The SEPARATOR field shall contain the five ASCII
+		 * characters ",i,0x".
+		 *
+		 * The null-terminated, null-padded ISCSI INITIATOR SESSION ID
+		 * field shall contain the iSCSI initiator session identifier
+		 * (see RFC 3720) in the form of ASCII characters that are the
+		 * hexadecimal digits converted from the binary iSCSI initiator
+		 * session identifier value. The first ISCSI INITIATOR SESSION
+		 * ID field byte containing an ASCII null character
+		 */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
+		buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
+		buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
+		buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+		len += 5;
+		buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
+		buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
+		buf[off+len] = '\0'; off++;
+		len += 7;
+	}
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	*/
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+
+	buf[2] = ((len >> 8) & 0xff);
+	buf[3] = (len & 0xff);
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id);
+
+u32 iscsi_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	u32 len = 0, padding = 0;
+
+	spin_lock_irq(&se_nacl->nacl_sess_lock);
+	len = strlen(se_nacl->initiatorname);
+	/*
+	 * Add extra byte for NULL terminator
+	 */
+	len++;
+	/*
+	 * If there is ISID present with the registration, use format code:
+	 * 01b: iSCSI Initiator port TransportID format
+	 *
+	 * If there is not an active iSCSI session, use format code:
+	 * 00b: iSCSI Initiator device TransportID format
+	 */
+	if (pr_reg->isid_present_at_reg) {
+		len += 5; /* For ",i,0x" ASCII seperator */
+		len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+		*format_code = 1;
+	} else
+		*format_code = 0;
+	spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	/*
+	 * The ADDITIONAL LENGTH field specifies the number of bytes that follow
+	 * in the TransportID. The additional length shall be at least 20 and
+	 * shall be a multiple of four.
+	 */
+	padding = ((-len) & 3);
+	if (padding != 0)
+		len += padding;
+	/*
+	 * Increment value for total payload + header length for
+	 * full status descriptor
+	 */
+	len += 4;
+
+	return len;
+}
+EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
+
+char *iscsi_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	char *p;
+	u32 tid_len, padding;
+	int i;
+	u16 add_len;
+	u8 format_code = (buf[0] & 0xc0);
+	/*
+	 * Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
+	 *
+	 *       TransportID for initiator ports using SCSI over iSCSI,
+	 *       from Table 388 -- iSCSI TransportID formats.
+	 *
+	 *    00b     Initiator port is identified using the world wide unique
+	 *            SCSI device name of the iSCSI initiator
+	 *            device containing the initiator port (see table 389).
+	 *    01b     Initiator port is identified using the world wide unique
+	 *            initiator port identifier (see table 390).10b to 11b
+	 *            Reserved
+	 */
+	if ((format_code != 0x00) && (format_code != 0x40)) {
+		printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+			" Initiator Transport ID\n", format_code);
+		return NULL;
+	}
+	/*
+	 * If the caller wants the TransportID Length, we set that value for the
+	 * entire iSCSI Tarnsport ID now.
+	 */
+	 if (out_tid_len != NULL) {
+		add_len = ((buf[2] >> 8) & 0xff);
+		add_len |= (buf[3] & 0xff);
+
+		tid_len = strlen((char *)&buf[4]);
+		tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
+		tid_len += 1; /* Add one byte for NULL terminator */
+		padding = ((-tid_len) & 3);
+		if (padding != 0)
+			tid_len += padding;
+
+		if ((add_len + 4) != tid_len) {
+			printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+				"does not match calculated tid_len: %u,"
+				" using tid_len instead\n", add_len+4, tid_len);
+			*out_tid_len = tid_len;
+		} else
+			*out_tid_len = (add_len + 4);
+	}
+	/*
+	 * Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
+	 * Session ID as defined in Table 390 - iSCSI initiator port TransportID
+	 * format.
+	 */
+	if (format_code == 0x40) {
+		p = strstr((char *)&buf[4], ",i,0x");
+		if (!(p)) {
+			printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+				" for Initiator port identifier: %s\n",
+				(char *)&buf[4]);
+			return NULL;
+		}
+		*p = '\0'; /* Terminate iSCSI Name */
+		p += 5; /* Skip over ",i,0x" seperator */
+
+		*port_nexus_ptr = p;
+		/*
+		 * Go ahead and do the lower case conversion of the received
+		 * 12 ASCII characters representing the ISID in the TransportID
+		 * for comparision against the running iSCSI session's ISID from
+		 * iscsi_target.c:lio_sess_get_initiator_sid()
+		 */
+		for (i = 0; i < 12; i++) {
+			if (isdigit(*p)) {
+				p++;
+				continue;
+			}
+			*p = tolower(*p);
+			p++;
+		}
+	}
+
+	return (char *)&buf[4];
+}
+EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
new file mode 100644
index 0000000..0aaca88
--- /dev/null
+++ b/drivers/target/target_core_file.c
@@ -0,0 +1,688 @@
+/*******************************************************************************
+ * Filename:  target_core_file.c
+ *
+ * This file contains the Storage Engine <-> FILEIO transport specific functions
+ *
+ * Copyright (c) 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_file.h"
+
+#if 1
+#define DEBUG_FD_CACHE(x...) printk(x)
+#else
+#define DEBUG_FD_CACHE(x...)
+#endif
+
+#if 1
+#define DEBUG_FD_FUA(x...) printk(x)
+#else
+#define DEBUG_FD_FUA(x...)
+#endif
+
+static struct se_subsystem_api fileio_template;
+
+/*	fd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int fd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct fd_host *fd_host;
+
+	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
+	if (!(fd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
+		return -1;
+	}
+
+	fd_host->fd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) fd_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
+		TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+		" Target Core with TCQ Depth: %d MaxSectors: %u\n",
+		hba->hba_id, fd_host->fd_host_id,
+		atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void fd_detach_hba(struct se_hba *hba)
+{
+	struct fd_host *fd_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
+
+	kfree(fd_host);
+	hba->hba_ptr = NULL;
+}
+
+static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct fd_dev *fd_dev;
+	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+
+	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
+	if (!(fd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+		return NULL;
+	}
+
+	fd_dev->fd_host = fd_host;
+
+	printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+
+	return fd_dev;
+}
+
+/*	fd_create_virtdevice(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static struct se_device *fd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	char *dev_p = NULL;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct queue_limits *limits;
+	struct fd_dev *fd_dev = (struct fd_dev *) p;
+	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+	mm_segment_t old_fs;
+	struct file *file;
+	struct inode *inode = NULL;
+	int dev_flags = 0, flags;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	dev_p = getname(fd_dev->fd_dev_name);
+	set_fs(old_fs);
+
+	if (IS_ERR(dev_p)) {
+		printk(KERN_ERR "getname(%s) failed: %lu\n",
+			fd_dev->fd_dev_name, IS_ERR(dev_p));
+		goto fail;
+	}
+#if 0
+	if (di->no_create_file)
+		flags = O_RDWR | O_LARGEFILE;
+	else
+		flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#else
+	flags = O_RDWR | O_CREAT | O_LARGEFILE;
+#endif
+/*	flags |= O_DIRECT; */
+	/*
+	 * If fd_buffered_io=1 has not been set explictly (the default),
+	 * use O_SYNC to force FILEIO writes to disk.
+	 */
+	if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
+		flags |= O_SYNC;
+
+	file = filp_open(dev_p, flags, 0600);
+
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+		goto fail;
+	}
+	fd_dev->fd_file = file;
+	/*
+	 * If using a block backend with this struct file, we extract
+	 * fd_dev->fd_[block,dev]_size from struct block_device.
+	 *
+	 * Otherwise, we use the passed fd_size= from configfs
+	 */
+	inode = file->f_mapping->host;
+	if (S_ISBLK(inode->i_mode)) {
+		struct request_queue *q;
+		/*
+		 * Setup the local scope queue_limits from struct request_queue->limits
+		 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+		 */
+		q = bdev_get_queue(inode->i_bdev);
+		limits = &dev_limits.limits;
+		limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
+		limits->max_hw_sectors = queue_max_hw_sectors(q);
+		limits->max_sectors = queue_max_sectors(q);
+		/*
+		 * Determine the number of bytes from i_size_read() minus
+		 * one (1) logical sector from underlying struct block_device
+		 */
+		fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+		fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
+				       fd_dev->fd_block_size);
+
+		printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+			" block_device blocks: %llu logical_block_size: %d\n",
+			fd_dev->fd_dev_size,
+			div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
+			fd_dev->fd_block_size);
+	} else {
+		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+			printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+				" parameter, and no backing struct"
+				" block_device\n");
+			goto fail;
+		}
+
+		limits = &dev_limits.limits;
+		limits->logical_block_size = FD_BLOCKSIZE;
+		limits->max_hw_sectors = FD_MAX_SECTORS;
+		limits->max_sectors = FD_MAX_SECTORS;
+		fd_dev->fd_block_size = FD_BLOCKSIZE;
+	}
+
+	dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba, &fileio_template,
+				se_dev, dev_flags, (void *)fd_dev,
+				&dev_limits, "FILEIO", FD_VERSION);
+	if (!(dev))
+		goto fail;
+
+	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+	fd_dev->fd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
+			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+
+	putname(dev_p);
+	return dev;
+fail:
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+	putname(dev_p);
+	return NULL;
+}
+
+/*	fd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_device(void *p)
+{
+	struct fd_dev *fd_dev = (struct fd_dev *) p;
+
+	if (fd_dev->fd_file) {
+		filp_close(fd_dev->fd_file, NULL);
+		fd_dev->fd_file = NULL;
+	}
+
+	kfree(fd_dev);
+}
+
+static inline struct fd_request *FILE_REQ(struct se_task *task)
+{
+	return container_of(task, struct fd_request, fd_task);
+}
+
+
+static struct se_task *
+fd_alloc_task(struct se_cmd *cmd)
+{
+	struct fd_request *fd_req;
+
+	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
+	if (!(fd_req)) {
+		printk(KERN_ERR "Unable to allocate struct fd_request\n");
+		return NULL;
+	}
+
+	fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
+
+	return &fd_req->fd_task;
+}
+
+static int fd_do_readv(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct file *fd = req->fd_dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+	int ret = 0, i;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+	if (!(iov)) {
+		printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
+		return -1;
+	}
+
+	for (i = 0; i < task->task_sg_num; i++) {
+		iov[i].iov_len = sg[i].length;
+		iov[i].iov_base = sg_virt(&sg[i]);
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+	set_fs(old_fs);
+
+	kfree(iov);
+	/*
+	 * Return zeros and GOOD status even if the READ did not return
+	 * the expected virt_size for struct file w/o a backing struct
+	 * block_device.
+	 */
+	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+		if (ret < 0 || ret != task->task_size) {
+			printk(KERN_ERR "vfs_readv() returned %d,"
+				" expecting %d for S_ISBLK\n", ret,
+				(int)task->task_size);
+			return -1;
+		}
+	} else {
+		if (ret < 0) {
+			printk(KERN_ERR "vfs_readv() returned %d for non"
+				" S_ISBLK\n", ret);
+			return -1;
+		}
+	}
+
+	return 1;
+}
+
+static int fd_do_writev(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+	struct file *fd = req->fd_dev->fd_file;
+	struct scatterlist *sg = task->task_sg;
+	struct iovec *iov;
+	mm_segment_t old_fs;
+	loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+	int ret, i = 0;
+
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
+	if (!(iov)) {
+		printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
+		return -1;
+	}
+
+	for (i = 0; i < task->task_sg_num; i++) {
+		iov[i].iov_len = sg[i].length;
+		iov[i].iov_base = sg_virt(&sg[i]);
+	}
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+	set_fs(old_fs);
+
+	kfree(iov);
+
+	if (ret < 0 || ret != task->task_size) {
+		printk(KERN_ERR "vfs_writev() returned %d\n", ret);
+		return -1;
+	}
+
+	return 1;
+}
+
+static void fd_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+	loff_t start, end;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	/*
+	 * Determine if we will be flushing the entire device.
+	 */
+	if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+		start = 0;
+		end = LLONG_MAX;
+	} else {
+		start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+		if (cmd->data_length)
+			end = start + cmd->data_length;
+		else
+			end = LLONG_MAX;
+	}
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+
+	if (!immed)
+		transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int fd_emulated_write_cache(struct se_device *dev)
+{
+	return 1;
+}
+
+static int fd_emulated_dpo(struct se_device *dev)
+{
+	return 0;
+}
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int fd_emulated_fua_write(struct se_device *dev)
+{
+	return 1;
+}
+
+static int fd_emulated_fua_read(struct se_device *dev)
+{
+	return 0;
+}
+
+/*
+ * WRITE Force Unit Access (FUA) emulation on a per struct se_task
+ * LBA range basis..
+ */
+static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+	loff_t end = start + task->task_size;
+	int ret;
+
+	DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+			task->task_lba, task->task_size);
+
+	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+	if (ret != 0)
+		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+}
+
+static int fd_do_task(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	/*
+	 * Call vectorized fileio functions to map struct scatterlist
+	 * physical memory addresses to struct iovec virtual memory.
+	 */
+	if (task->task_data_direction == DMA_FROM_DEVICE) {
+		ret = fd_do_readv(task);
+	} else {
+		ret = fd_do_writev(task);
+
+		if (ret > 0 &&
+		    DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
+		    DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+		    T_TASK(cmd)->t_tasks_fua) {
+			/*
+			 * We might need to be a bit smarter here
+			 * and return some sense data to let the initiator
+			 * know the FUA WRITE cache sync failed..?
+			 */
+			fd_emulate_write_fua(cmd, task);
+		}
+
+	}
+
+	if (ret < 0)
+		return ret;
+	if (ret) {
+		task->task_scsi_status = GOOD;
+		transport_complete_task(task, 1);
+	}
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	fd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void fd_free_task(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+
+	kfree(req);
+}
+
+enum {
+	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_fd_dev_name, "fd_dev_name=%s"},
+	{Opt_fd_dev_size, "fd_dev_size=%s"},
+	{Opt_fd_buffered_io, "fd_buffered_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t fd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page, ssize_t count)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_fd_dev_name:
+			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
+					"%s", match_strdup(&args[0]));
+			printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+					fd_dev->fd_dev_name);
+			fd_dev->fbd_flags |= FBDF_HAS_PATH;
+			break;
+		case Opt_fd_dev_size:
+			arg_p = match_strdup(&args[0]);
+			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+			if (ret < 0) {
+				printk(KERN_ERR "strict_strtoull() failed for"
+						" fd_dev_size=\n");
+				goto out;
+			}
+			printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+					" bytes\n", fd_dev->fd_dev_size);
+			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+			break;
+		case Opt_fd_buffered_io:
+			match_int(args, &arg);
+			if (arg != 1) {
+				printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+				ret = -EINVAL;
+				goto out;
+			}
+
+			printk(KERN_INFO "FILEIO: Using buffered I/O"
+				" operations for struct fd_dev\n");
+
+			fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+
+	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+		printk(KERN_ERR "Missing fd_dev_name=\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t fd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = 0;
+
+	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
+		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+		(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
+		"Buffered" : "Synchronous");
+	return bl;
+}
+
+/*	fd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *fd_get_cdb(struct se_task *task)
+{
+	struct fd_request *req = FILE_REQ(task);
+
+	return req->fd_scsi_cdb;
+}
+
+/*	fd_get_device_rev(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+/*	fd_get_device_type(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static u32 fd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t fd_get_blocks(struct se_device *dev)
+{
+	struct fd_dev *fd_dev = dev->dev_ptr;
+	unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
+			DEV_ATTRIB(dev)->block_size);
+
+	return blocks_long;
+}
+
+static struct se_subsystem_api fileio_template = {
+	.name			= "fileio",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.attach_hba		= fd_attach_hba,
+	.detach_hba		= fd_detach_hba,
+	.allocate_virtdevice	= fd_allocate_virtdevice,
+	.create_virtdevice	= fd_create_virtdevice,
+	.free_device		= fd_free_device,
+	.dpo_emulated		= fd_emulated_dpo,
+	.fua_write_emulated	= fd_emulated_fua_write,
+	.fua_read_emulated	= fd_emulated_fua_read,
+	.write_cache_emulated	= fd_emulated_write_cache,
+	.alloc_task		= fd_alloc_task,
+	.do_task		= fd_do_task,
+	.do_sync_cache		= fd_emulate_sync_cache,
+	.free_task		= fd_free_task,
+	.check_configfs_dev_params = fd_check_configfs_dev_params,
+	.set_configfs_dev_params = fd_set_configfs_dev_params,
+	.show_configfs_dev_params = fd_show_configfs_dev_params,
+	.get_cdb		= fd_get_cdb,
+	.get_device_rev		= fd_get_device_rev,
+	.get_device_type	= fd_get_device_type,
+	.get_blocks		= fd_get_blocks,
+};
+
+static int __init fileio_module_init(void)
+{
+	return transport_subsystem_register(&fileio_template);
+}
+
+static void fileio_module_exit(void)
+{
+	transport_subsystem_release(&fileio_template);
+}
+
+MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(fileio_module_init);
+module_exit(fileio_module_exit);
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
new file mode 100644
index 0000000..ef4de2b
--- /dev/null
+++ b/drivers/target/target_core_file.h
@@ -0,0 +1,50 @@
+#ifndef TARGET_CORE_FILE_H
+#define TARGET_CORE_FILE_H
+
+#define FD_VERSION		"4.0"
+
+#define FD_MAX_DEV_NAME		256
+/* Maximum queuedepth for the FILEIO HBA */
+#define FD_HBA_QUEUE_DEPTH	256
+#define FD_DEVICE_QUEUE_DEPTH	32
+#define FD_MAX_DEVICE_QUEUE_DEPTH 128
+#define FD_BLOCKSIZE		512
+#define FD_MAX_SECTORS		1024
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct fd_request {
+	struct se_task	fd_task;
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	/* FILEIO device */
+	struct fd_dev	*fd_dev;
+} ____cacheline_aligned;
+
+#define FBDF_HAS_PATH		0x01
+#define FBDF_HAS_SIZE		0x02
+#define FDBD_USE_BUFFERED_IO	0x04
+
+struct fd_dev {
+	u32		fbd_flags;
+	unsigned char	fd_dev_name[FD_MAX_DEV_NAME];
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		fd_dev_id;
+	/* Number of SG tables in sg_table_array */
+	u32		fd_table_count;
+	u32		fd_queue_depth;
+	u32		fd_block_size;
+	unsigned long long fd_dev_size;
+	struct file	*fd_file;
+	/* FILEIO HBA device is connected to */
+	struct fd_host *fd_host;
+} ____cacheline_aligned;
+
+struct fd_host {
+	u32		fd_host_dev_id_count;
+	/* Unique FILEIO Host ID */
+	u32		fd_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_FILE_H */
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
new file mode 100644
index 0000000..4bbe820
--- /dev/null
+++ b/drivers/target/target_core_hba.c
@@ -0,0 +1,185 @@
+/*******************************************************************************
+ * Filename:  target_core_hba.c
+ *
+ * This file copntains the iSCSI HBA Transport related functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_hba.h"
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(subsystem_mutex);
+
+int transport_subsystem_register(struct se_subsystem_api *sub_api)
+{
+	struct se_subsystem_api *s;
+
+	INIT_LIST_HEAD(&sub_api->sub_api_list);
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!(strcmp(s->name, sub_api->name))) {
+			printk(KERN_ERR "%p is already registered with"
+				" duplicate name %s, unable to process"
+				" request\n", s, s->name);
+			mutex_unlock(&subsystem_mutex);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&sub_api->sub_api_list, &subsystem_list);
+	mutex_unlock(&subsystem_mutex);
+
+	printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+			" %p\n", sub_api->name, sub_api->owner);
+	return 0;
+}
+EXPORT_SYMBOL(transport_subsystem_register);
+
+void transport_subsystem_release(struct se_subsystem_api *sub_api)
+{
+	mutex_lock(&subsystem_mutex);
+	list_del(&sub_api->sub_api_list);
+	mutex_unlock(&subsystem_mutex);
+}
+EXPORT_SYMBOL(transport_subsystem_release);
+
+static struct se_subsystem_api *core_get_backend(const char *sub_name)
+{
+	struct se_subsystem_api *s;
+
+	mutex_lock(&subsystem_mutex);
+	list_for_each_entry(s, &subsystem_list, sub_api_list) {
+		if (!strcmp(s->name, sub_name))
+			goto found;
+	}
+	mutex_unlock(&subsystem_mutex);
+	return NULL;
+found:
+	if (s->owner && !try_module_get(s->owner))
+		s = NULL;
+	mutex_unlock(&subsystem_mutex);
+	return s;
+}
+
+struct se_hba *
+core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
+{
+	struct se_hba *hba;
+	int ret = 0;
+
+	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+	if (!hba) {
+		printk(KERN_ERR "Unable to allocate struct se_hba\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_LIST_HEAD(&hba->hba_dev_list);
+	spin_lock_init(&hba->device_lock);
+	spin_lock_init(&hba->hba_queue_lock);
+	mutex_init(&hba->hba_access_mutex);
+
+	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
+	hba->hba_flags |= hba_flags;
+
+	atomic_set(&hba->max_queue_depth, 0);
+	atomic_set(&hba->left_queue_depth, 0);
+
+	hba->transport = core_get_backend(plugin_name);
+	if (!hba->transport) {
+		ret = -EINVAL;
+		goto out_free_hba;
+	}
+
+	ret = hba->transport->attach_hba(hba, plugin_dep_id);
+	if (ret < 0)
+		goto out_module_put;
+
+	spin_lock(&se_global->hba_lock);
+	hba->hba_id = se_global->g_hba_id_counter++;
+	list_add_tail(&hba->hba_list, &se_global->g_hba_list);
+	spin_unlock(&se_global->hba_lock);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+			" Core\n", hba->hba_id);
+
+	return hba;
+
+out_module_put:
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+	hba->transport = NULL;
+out_free_hba:
+	kfree(hba);
+	return ERR_PTR(ret);
+}
+
+int
+core_delete_hba(struct se_hba *hba)
+{
+	struct se_device *dev, *dev_tmp;
+
+	spin_lock(&hba->device_lock);
+	list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
+
+		se_clear_dev_ports(dev);
+		spin_unlock(&hba->device_lock);
+
+		se_release_device_for_hba(dev);
+
+		spin_lock(&hba->device_lock);
+	}
+	spin_unlock(&hba->device_lock);
+
+	hba->transport->detach_hba(hba);
+
+	spin_lock(&se_global->hba_lock);
+	list_del(&hba->hba_list);
+	spin_unlock(&se_global->hba_lock);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+			" Core\n", hba->hba_id);
+
+	if (hba->transport->owner)
+		module_put(hba->transport->owner);
+
+	hba->transport = NULL;
+	kfree(hba);
+	return 0;
+}
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
new file mode 100644
index 0000000..bb0fea5
--- /dev/null
+++ b/drivers/target/target_core_hba.h
@@ -0,0 +1,7 @@
+#ifndef TARGET_CORE_HBA_H
+#define TARGET_CORE_HBA_H
+
+extern struct se_hba *core_alloc_hba(const char *, u32, u32);
+extern int core_delete_hba(struct se_hba *);
+
+#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
new file mode 100644
index 0000000..c6e0d75
--- /dev/null
+++ b/drivers/target/target_core_iblock.c
@@ -0,0 +1,808 @@
+/*******************************************************************************
+ * Filename:  target_core_iblock.c
+ *
+ * This file contains the Storage Engine  <-> Linux BlockIO transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/bio.h>
+#include <linux/genhd.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_iblock.h"
+
+#if 0
+#define DEBUG_IBLOCK(x...) printk(x)
+#else
+#define DEBUG_IBLOCK(x...)
+#endif
+
+static struct se_subsystem_api iblock_template;
+
+static void iblock_bio_done(struct bio *, int);
+
+/*	iblock_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct iblock_hba *ib_host;
+
+	ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
+	if (!(ib_host)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct iblock_hba\n");
+		return -ENOMEM;
+	}
+
+	ib_host->iblock_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) ib_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+
+	printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
+		" Target Core TCQ Depth: %d\n", hba->hba_id,
+		ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+
+	return 0;
+}
+
+static void iblock_detach_hba(struct se_hba *hba)
+{
+	struct iblock_hba *ib_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+		" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
+
+	kfree(ib_host);
+	hba->hba_ptr = NULL;
+}
+
+static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct iblock_dev *ib_dev = NULL;
+	struct iblock_hba *ib_host = hba->hba_ptr;
+
+	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
+	if (!(ib_dev)) {
+		printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+		return NULL;
+	}
+	ib_dev->ibd_host = ib_host;
+
+	printk(KERN_INFO  "IBLOCK: Allocated ib_dev for %s\n", name);
+
+	return ib_dev;
+}
+
+static struct se_device *iblock_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct iblock_dev *ib_dev = p;
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct block_device *bd = NULL;
+	struct request_queue *q;
+	struct queue_limits *limits;
+	u32 dev_flags = 0;
+
+	if (!(ib_dev)) {
+		printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+		return 0;
+	}
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+	/*
+	 * These settings need to be made tunable..
+	 */
+	ib_dev->ibd_bio_set = bioset_create(32, 64);
+	if (!(ib_dev->ibd_bio_set)) {
+		printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+		return 0;
+	}
+	printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+	/*
+	 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
+	 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
+	 */
+	printk(KERN_INFO  "IBLOCK: Claiming struct block_device: %s\n",
+			ib_dev->ibd_udev_path);
+
+	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+	if (!(bd))
+		goto failed;
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = bdev_get_queue(bd);
+	limits = &dev_limits.limits;
+	limits->logical_block_size = bdev_logical_block_size(bd);
+	limits->max_hw_sectors = queue_max_hw_sectors(q);
+	limits->max_sectors = queue_max_sectors(q);
+	dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+
+	ib_dev->ibd_major = MAJOR(bd->bd_dev);
+	ib_dev->ibd_minor = MINOR(bd->bd_dev);
+	ib_dev->ibd_bd = bd;
+
+	dev = transport_add_device_to_core_hba(hba,
+			&iblock_template, se_dev, dev_flags, (void *)ib_dev,
+			&dev_limits, "IBLOCK", IBLOCK_VERSION);
+	if (!(dev))
+		goto failed;
+
+	ib_dev->ibd_depth = dev->queue_depth;
+
+	/*
+	 * Check if the underlying struct block_device request_queue supports
+	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+	 * in ATA and we need to set TPE=1
+	 */
+	if (blk_queue_discard(bdev_get_queue(bd))) {
+		struct request_queue *q = bdev_get_queue(bd);
+
+		DEV_ATTRIB(dev)->max_unmap_lba_count =
+				q->limits.max_discard_sectors;
+		/*
+		 * Currently hardcoded to 1 in Linux/SCSI code..
+		 */
+		DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
+		DEV_ATTRIB(dev)->unmap_granularity =
+				q->limits.discard_granularity;
+		DEV_ATTRIB(dev)->unmap_granularity_alignment =
+				q->limits.discard_alignment;
+
+		printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+				" disabled by default\n");
+	}
+
+	return dev;
+
+failed:
+	if (ib_dev->ibd_bio_set) {
+		bioset_free(ib_dev->ibd_bio_set);
+		ib_dev->ibd_bio_set = NULL;
+	}
+	ib_dev->ibd_bd = NULL;
+	ib_dev->ibd_major = 0;
+	ib_dev->ibd_minor = 0;
+	return NULL;
+}
+
+static void iblock_free_device(void *p)
+{
+	struct iblock_dev *ib_dev = p;
+
+	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+	bioset_free(ib_dev->ibd_bio_set);
+	kfree(ib_dev);
+}
+
+static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
+{
+	return container_of(task, struct iblock_req, ib_task);
+}
+
+static struct se_task *
+iblock_alloc_task(struct se_cmd *cmd)
+{
+	struct iblock_req *ib_req;
+
+	ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+	if (!(ib_req)) {
+		printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+		return NULL;
+	}
+
+	ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
+	atomic_set(&ib_req->ib_bio_cnt, 0);
+	return &ib_req->ib_task;
+}
+
+static unsigned long long iblock_emulate_read_cap_with_block_size(
+	struct se_device *dev,
+	struct block_device *bd,
+	struct request_queue *q)
+{
+	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
+					bdev_logical_block_size(bd)) - 1);
+	u32 block_size = bdev_logical_block_size(bd);
+
+	if (block_size == DEV_ATTRIB(dev)->block_size)
+		return blocks_long;
+
+	switch (block_size) {
+	case 4096:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 2048:
+			blocks_long <<= 1;
+			break;
+		case 1024:
+			blocks_long <<= 2;
+			break;
+		case 512:
+			blocks_long <<= 3;
+		default:
+			break;
+		}
+		break;
+	case 2048:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 1;
+			break;
+		case 1024:
+			blocks_long <<= 1;
+			break;
+		case 512:
+			blocks_long <<= 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 1024:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 2;
+			break;
+		case 2048:
+			blocks_long >>= 1;
+			break;
+		case 512:
+			blocks_long <<= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 512:
+		switch (DEV_ATTRIB(dev)->block_size) {
+		case 4096:
+			blocks_long >>= 3;
+			break;
+		case 2048:
+			blocks_long >>= 2;
+			break;
+		case 1024:
+			blocks_long >>= 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return blocks_long;
+}
+
+/*
+ * Emulate SYCHRONIZE_CACHE_*
+ */
+static void iblock_emulate_sync_cache(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+	int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+	sector_t error_sector;
+	int ret;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		transport_complete_sync_cache(cmd, 1);
+
+	/*
+	 * blkdev_issue_flush() does not support a specifying a range, so
+	 * we have to flush the entire cache.
+	 */
+	ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
+	if (ret != 0) {
+		printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+			" error_sector: %llu\n", ret,
+			(unsigned long long)error_sector);
+	}
+
+	if (!immed)
+		transport_complete_sync_cache(cmd, ret == 0);
+}
+
+/*
+ * Tell TCM Core that we are capable of WriteCache emulation for
+ * an underlying struct se_device.
+ */
+static int iblock_emulated_write_cache(struct se_device *dev)
+{
+	return 1;
+}
+
+static int iblock_emulated_dpo(struct se_device *dev)
+{
+	return 0;
+}
+
+/*
+ * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
+ * for TYPE_DISK.
+ */
+static int iblock_emulated_fua_write(struct se_device *dev)
+{
+	return 1;
+}
+
+static int iblock_emulated_fua_read(struct se_device *dev)
+{
+	return 0;
+}
+
+static int iblock_do_task(struct se_task *task)
+{
+	struct se_device *dev = task->task_se_cmd->se_dev;
+	struct iblock_req *req = IBLOCK_REQ(task);
+	struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
+	struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
+	struct bio *bio = req->ib_bio, *nbio = NULL;
+	int rw;
+
+	if (task->task_data_direction == DMA_TO_DEVICE) {
+		/*
+		 * Force data to disk if we pretend to not have a volatile
+		 * write cache, or the initiator set the Force Unit Access bit.
+		 */
+		if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
+		    (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
+		     T_TASK(task->task_se_cmd)->t_tasks_fua))
+			rw = WRITE_FUA;
+		else
+			rw = WRITE;
+	} else {
+		rw = READ;
+	}
+
+	while (bio) {
+		nbio = bio->bi_next;
+		bio->bi_next = NULL;
+		DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
+			" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+
+		submit_bio(rw, bio);
+		bio = nbio;
+	}
+
+	if (q->unplug_fn)
+		q->unplug_fn(q);
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	int barrier = 0;
+
+	return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+}
+
+static void iblock_free_task(struct se_task *task)
+{
+	struct iblock_req *req = IBLOCK_REQ(task);
+	struct bio *bio, *hbio = req->ib_bio;
+	/*
+	 * We only release the bio(s) here if iblock_bio_done() has not called
+	 * bio_put() -> iblock_bio_destructor().
+	 */
+	while (hbio != NULL) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_put(bio);
+	}
+
+	kfree(req);
+}
+
+enum {
+	Opt_udev_path, Opt_force, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_udev_path, "udev_path=%s"},
+	{Opt_force, "force=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
+					       struct se_subsystem_dev *se_dev,
+					       const char *page, ssize_t count)
+{
+	struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_udev_path:
+			if (ib_dev->ibd_bd) {
+				printk(KERN_ERR "Unable to set udev_path= while"
+					" ib_dev->ibd_bd exists\n");
+				ret = -EEXIST;
+				goto out;
+			}
+
+			ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
+				"%s", match_strdup(&args[0]));
+			printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+					ib_dev->ibd_udev_path);
+			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
+			break;
+		case Opt_force:
+			match_int(args, &arg);
+			ib_dev->ibd_force = arg;
+			printk(KERN_INFO "IBLOCK: Set force=%d\n",
+				ib_dev->ibd_force);
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t iblock_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+
+	if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+		printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t iblock_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	char buf[BDEVNAME_SIZE];
+	ssize_t bl = 0;
+
+	if (bd)
+		bl += sprintf(b + bl, "iBlock device: %s",
+				bdevname(bd, buf));
+	if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
+		bl += sprintf(b + bl, "  UDEV PATH: %s\n",
+				ibd->ibd_udev_path);
+	} else
+		bl += sprintf(b + bl, "\n");
+
+	bl += sprintf(b + bl, "        ");
+	if (bd) {
+		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
+			ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+			"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+			"CLAIMED: IBLOCK" : "CLAIMED: OS");
+	} else {
+		bl += sprintf(b + bl, "Major: %d Minor: %d\n",
+			ibd->ibd_major, ibd->ibd_minor);
+	}
+
+	return bl;
+}
+
+static void iblock_bio_destructor(struct bio *bio)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+
+	bio_free(bio, ib_dev->ibd_bio_set);
+}
+
+static struct bio *iblock_get_bio(
+	struct se_task *task,
+	struct iblock_req *ib_req,
+	struct iblock_dev *ib_dev,
+	int *ret,
+	sector_t lba,
+	u32 sg_num)
+{
+	struct bio *bio;
+
+	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+	if (!(bio)) {
+		printk(KERN_ERR "Unable to allocate memory for bio\n");
+		*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+		return NULL;
+	}
+
+	DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
+		" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
+	DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+
+	bio->bi_bdev = ib_dev->ibd_bd;
+	bio->bi_private = (void *) task;
+	bio->bi_destructor = iblock_bio_destructor;
+	bio->bi_end_io = &iblock_bio_done;
+	bio->bi_sector = lba;
+	atomic_inc(&ib_req->ib_bio_cnt);
+
+	DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
+	DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+			atomic_read(&ib_req->ib_bio_cnt));
+	return bio;
+}
+
+static int iblock_map_task_SG(struct se_task *task)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct se_device *dev = SE_DEV(cmd);
+	struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
+	struct iblock_req *ib_req = IBLOCK_REQ(task);
+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+	struct scatterlist *sg;
+	int ret = 0;
+	u32 i, sg_num = task->task_sg_num;
+	sector_t block_lba;
+	/*
+	 * Do starting conversion up from non 512-byte blocksize with
+	 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
+	 */
+	if (DEV_ATTRIB(dev)->block_size == 4096)
+		block_lba = (task->task_lba << 3);
+	else if (DEV_ATTRIB(dev)->block_size == 2048)
+		block_lba = (task->task_lba << 2);
+	else if (DEV_ATTRIB(dev)->block_size == 1024)
+		block_lba = (task->task_lba << 1);
+	else if (DEV_ATTRIB(dev)->block_size == 512)
+		block_lba = task->task_lba;
+	else {
+		printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
+				" %u\n", DEV_ATTRIB(dev)->block_size);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
+	if (!(bio))
+		return ret;
+
+	ib_req->ib_bio = bio;
+	hbio = tbio = bio;
+	/*
+	 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
+	 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+	 */
+	for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
+		DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+			" %p len: %u offset: %u\n", task, bio, sg_page(sg),
+				sg->length, sg->offset);
+again:
+		ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
+		if (ret != sg->length) {
+
+			DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
+					bio->bi_sector);
+			DEBUG_IBLOCK("** task->task_size: %u\n",
+					task->task_size);
+			DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+					bio->bi_max_vecs);
+			DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+					bio->bi_vcnt);
+
+			bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
+						block_lba, sg_num);
+			if (!(bio))
+				goto fail;
+
+			tbio = tbio->bi_next = bio;
+			DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+				" list, Going to again\n", bio);
+			goto again;
+		}
+		/* Always in 512 byte units for Linux/Block */
+		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+		sg_num--;
+		DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+			" sg_num to %u\n", task, sg_num);
+		DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
+				" to %llu\n", task, block_lba);
+		DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+				" %u\n", task, bio->bi_vcnt);
+	}
+
+	return 0;
+fail:
+	while (hbio) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_put(bio);
+	}
+	return ret;
+}
+
+static unsigned char *iblock_get_cdb(struct se_task *task)
+{
+	return IBLOCK_REQ(task)->ib_scsi_cdb;
+}
+
+static u32 iblock_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 iblock_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
+{
+	struct iblock_dev *ibd = dev->dev_ptr;
+	struct block_device *bd = ibd->ibd_bd;
+	struct request_queue *q = bdev_get_queue(bd);
+
+	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+	struct se_task *task = bio->bi_private;
+	struct iblock_req *ibr = IBLOCK_REQ(task);
+	/*
+	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+	 */
+	if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+		err = -EIO;
+
+	if (err != 0) {
+		printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+			" err: %d\n", bio, err);
+		/*
+		 * Bump the ib_bio_err_cnt and release bio.
+		 */
+		atomic_inc(&ibr->ib_bio_err_cnt);
+		smp_mb__after_atomic_inc();
+		bio_put(bio);
+		/*
+		 * Wait to complete the task until the last bio as completed.
+		 */
+		if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+			return;
+
+		ibr->ib_bio = NULL;
+		transport_complete_task(task, 0);
+		return;
+	}
+	DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+		task, bio, task->task_lba, bio->bi_sector, err);
+	/*
+	 * bio_put() will call iblock_bio_destructor() to release the bio back
+	 * to ibr->ib_bio_set.
+	 */
+	bio_put(bio);
+	/*
+	 * Wait to complete the task until the last bio as completed.
+	 */
+	if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+		return;
+	/*
+	 * Return GOOD status for task if zero ib_bio_err_cnt exists.
+	 */
+	ibr->ib_bio = NULL;
+	transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
+}
+
+static struct se_subsystem_api iblock_template = {
+	.name			= "iblock",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.map_task_SG		= iblock_map_task_SG,
+	.attach_hba		= iblock_attach_hba,
+	.detach_hba		= iblock_detach_hba,
+	.allocate_virtdevice	= iblock_allocate_virtdevice,
+	.create_virtdevice	= iblock_create_virtdevice,
+	.free_device		= iblock_free_device,
+	.dpo_emulated		= iblock_emulated_dpo,
+	.fua_write_emulated	= iblock_emulated_fua_write,
+	.fua_read_emulated	= iblock_emulated_fua_read,
+	.write_cache_emulated	= iblock_emulated_write_cache,
+	.alloc_task		= iblock_alloc_task,
+	.do_task		= iblock_do_task,
+	.do_discard		= iblock_do_discard,
+	.do_sync_cache		= iblock_emulate_sync_cache,
+	.free_task		= iblock_free_task,
+	.check_configfs_dev_params = iblock_check_configfs_dev_params,
+	.set_configfs_dev_params = iblock_set_configfs_dev_params,
+	.show_configfs_dev_params = iblock_show_configfs_dev_params,
+	.get_cdb		= iblock_get_cdb,
+	.get_device_rev		= iblock_get_device_rev,
+	.get_device_type	= iblock_get_device_type,
+	.get_blocks		= iblock_get_blocks,
+};
+
+static int __init iblock_module_init(void)
+{
+	return transport_subsystem_register(&iblock_template);
+}
+
+static void iblock_module_exit(void)
+{
+	transport_subsystem_release(&iblock_template);
+}
+
+MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iblock_module_init);
+module_exit(iblock_module_exit);
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
new file mode 100644
index 0000000..64c1f4d
--- /dev/null
+++ b/drivers/target/target_core_iblock.h
@@ -0,0 +1,40 @@
+#ifndef TARGET_CORE_IBLOCK_H
+#define TARGET_CORE_IBLOCK_H
+
+#define IBLOCK_VERSION		"4.0"
+
+#define IBLOCK_HBA_QUEUE_DEPTH	512
+#define IBLOCK_DEVICE_QUEUE_DEPTH	32
+#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH	128
+#define IBLOCK_MAX_CDBS		16
+#define IBLOCK_LBA_SHIFT	9
+
+struct iblock_req {
+	struct se_task ib_task;
+	unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	atomic_t ib_bio_cnt;
+	atomic_t ib_bio_err_cnt;
+	struct bio *ib_bio;
+	struct iblock_dev *ib_dev;
+} ____cacheline_aligned;
+
+#define IBDF_HAS_UDEV_PATH		0x01
+#define IBDF_HAS_FORCE			0x02
+
+struct iblock_dev {
+	unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
+	int	ibd_force;
+	int	ibd_major;
+	int	ibd_minor;
+	u32	ibd_depth;
+	u32	ibd_flags;
+	struct bio_set	*ibd_bio_set;
+	struct block_device *ibd_bd;
+	struct iblock_hba *ibd_host;
+} ____cacheline_aligned;
+
+struct iblock_hba {
+	int		iblock_host_id;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
new file mode 100644
index 0000000..d5a48aa
--- /dev/null
+++ b/drivers/target/target_core_mib.c
@@ -0,0 +1,1078 @@
+/*******************************************************************************
+ * Filename:  target_core_mib.c
+ *
+ * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_mib.h"
+
+/* SCSI mib table index */
+static struct scsi_index_table scsi_index_table;
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* SCSI Instance Table */
+#define SCSI_INST_SW_INDEX		1
+#define SCSI_TRANSPORT_INDEX		1
+
+#define NONE		"None"
+#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
+
+static inline int list_is_first(const struct list_head *list,
+				const struct list_head *head)
+{
+	return list->prev == head;
+}
+
+static void *locate_hba_start(
+	struct seq_file *seq,
+	loff_t *pos)
+{
+	spin_lock(&se_global->g_device_lock);
+	return seq_list_start(&se_global->g_se_dev_list, *pos);
+}
+
+static void *locate_hba_next(
+	struct seq_file *seq,
+	void *v,
+	loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_se_dev_list, pos);
+}
+
+static void locate_hba_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock(&se_global->g_device_lock);
+}
+
+/****************************************************************************
+ * SCSI MIB Tables
+ ****************************************************************************/
+
+/*
+ * SCSI Instance Table
+ */
+static void *scsi_inst_seq_start(
+	struct seq_file *seq,
+	loff_t *pos)
+{
+	spin_lock(&se_global->hba_lock);
+	return seq_list_start(&se_global->g_hba_list, *pos);
+}
+
+static void *scsi_inst_seq_next(
+	struct seq_file *seq,
+	void *v,
+	loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_hba_list, pos);
+}
+
+static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock(&se_global->hba_lock);
+}
+
+static int scsi_inst_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
+
+	if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
+		seq_puts(seq, "inst sw_indx\n");
+
+	seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
+	seq_printf(seq, "plugin: %s version: %s\n",
+			hba->transport->name, TARGET_CORE_VERSION);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_inst_seq_ops = {
+	.start	= scsi_inst_seq_start,
+	.next	= scsi_inst_seq_next,
+	.stop	= scsi_inst_seq_stop,
+	.show	= scsi_inst_seq_show
+};
+
+static int scsi_inst_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_inst_seq_ops);
+}
+
+static const struct file_operations scsi_inst_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_inst_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Device Table
+ */
+static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_dev_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	char str[28];
+	int k;
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst indx role ports\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
+		   dev->dev_index, "Target", dev->dev_port_count);
+
+	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+
+	/* vendor */
+	for (k = 0; k < 8; k++)
+		str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
+				DEV_T10_WWN(dev)->vendor[k] : 0x20;
+	str[k] = 0x20;
+
+	/* model */
+	for (k = 0; k < 16; k++)
+		str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
+				DEV_T10_WWN(dev)->model[k] : 0x20;
+	str[k + 9] = 0;
+
+	seq_printf(seq, "dev_alias: %s\n", str);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_dev_seq_ops = {
+	.start  = scsi_dev_seq_start,
+	.next   = scsi_dev_seq_next,
+	.stop   = scsi_dev_seq_stop,
+	.show   = scsi_dev_seq_show
+};
+
+static int scsi_dev_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_dev_seq_ops);
+}
+
+static const struct file_operations scsi_dev_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_dev_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Port Table
+ */
+static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_port_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_port_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	struct se_port *sep, *sep_tmp;
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst device indx role busy_count\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	/* FIXME: scsiPortBusyStatuses count */
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+		seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
+			dev->dev_index, sep->sep_index, "Device",
+			dev->dev_index, 0);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_port_seq_ops = {
+	.start  = scsi_port_seq_start,
+	.next   = scsi_port_seq_next,
+	.stop   = scsi_port_seq_stop,
+	.show   = scsi_port_seq_show
+};
+
+static int scsi_port_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_port_seq_ops);
+}
+
+static const struct file_operations scsi_port_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_port_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Transport Table
+ */
+static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_transport_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	struct se_port *se, *se_tmp;
+	struct se_portal_group *tpg;
+	struct t10_wwn *wwn;
+	char buf[64];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst device indx dev_name\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	wwn = DEV_T10_WWN(dev);
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
+		tpg = se->sep_tpg;
+		sprintf(buf, "scsiTransport%s",
+				TPG_TFO(tpg)->get_fabric_name());
+
+		seq_printf(seq, "%u %s %u %s+%s\n",
+			hba->hba_index, /* scsiTransportIndex */
+			buf,  /* scsiTransportType */
+			(TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
+			TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
+			0,
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			(strlen(wwn->unit_serial)) ?
+			/* scsiTransportDevName */
+			wwn->unit_serial : wwn->vendor);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_transport_seq_ops = {
+	.start  = scsi_transport_seq_start,
+	.next   = scsi_transport_seq_next,
+	.stop   = scsi_transport_seq_stop,
+	.show   = scsi_transport_seq_show
+};
+
+static int scsi_transport_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_transport_seq_ops);
+}
+
+static const struct file_operations scsi_transport_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_transport_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Target Device Table
+ */
+static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+
+#define LU_COUNT	1  /* for now */
+static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	int non_accessible_lus = 0;
+	char status[16];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst indx num_LUs status non_access_LUs"
+			" resets\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		strcpy(status, "activated");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		strcpy(status, "deactivated");
+		non_accessible_lus = 1;
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		strcpy(status, "shutdown");
+		non_accessible_lus = 1;
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		strcpy(status, "offline");
+		non_accessible_lus = 1;
+		break;
+	default:
+		sprintf(status, "unknown(%d)", dev->dev_status);
+		non_accessible_lus = 1;
+	}
+
+	seq_printf(seq, "%u %u %u %s %u %u\n",
+		   hba->hba_index, dev->dev_index, LU_COUNT,
+		   status, non_accessible_lus, dev->num_resets);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_tgt_dev_seq_ops = {
+	.start  = scsi_tgt_dev_seq_start,
+	.next   = scsi_tgt_dev_seq_next,
+	.stop   = scsi_tgt_dev_seq_stop,
+	.show   = scsi_tgt_dev_seq_show
+};
+
+static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_tgt_dev_seq_ops);
+}
+
+static const struct file_operations scsi_tgt_dev_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_tgt_dev_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Target Port Table
+ */
+static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	struct se_port *sep, *sep_tmp;
+	struct se_portal_group *tpg;
+	u32 rx_mbytes, tx_mbytes;
+	unsigned long long num_cmds;
+	char buf[64];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst device indx name port_index in_cmds"
+			" write_mbytes read_mbytes hs_in_cmds\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
+		tpg = sep->sep_tpg;
+		sprintf(buf, "%sPort#",
+			TPG_TFO(tpg)->get_fabric_name());
+
+		seq_printf(seq, "%u %u %u %s%d %s%s%d ",
+		     hba->hba_index,
+		     dev->dev_index,
+		     sep->sep_index,
+		     buf, sep->sep_index,
+		     TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
+		     TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+		spin_lock(&sep->sep_lun->lun_sep_lock);
+		num_cmds = sep->sep_stats.cmd_pdus;
+		rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
+		tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
+		spin_unlock(&sep->sep_lun->lun_sep_lock);
+
+		seq_printf(seq, "%llu %u %u %u\n", num_cmds,
+			rx_mbytes, tx_mbytes, 0);
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_tgt_port_seq_ops = {
+	.start  = scsi_tgt_port_seq_start,
+	.next   = scsi_tgt_port_seq_next,
+	.stop   = scsi_tgt_port_seq_stop,
+	.show   = scsi_tgt_port_seq_show
+};
+
+static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_tgt_port_seq_ops);
+}
+
+static const struct file_operations scsi_tgt_port_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_tgt_port_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Authorized Initiator Table:
+ * It contains the SCSI Initiators authorized to be attached to one of the
+ * local Target ports.
+ * Iterates through all active TPGs and extracts the info from the ACLs
+ */
+static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	spin_lock_bh(&se_global->se_tpg_lock);
+	return seq_list_start(&se_global->g_se_tpg_list, *pos);
+}
+
+static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
+					 loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
+}
+
+static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock_bh(&se_global->se_tpg_lock);
+}
+
+static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
+						se_tpg_list);
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_node_acl *se_nacl;
+	int j;
+
+	if (list_is_first(&se_tpg->se_tpg_list,
+			  &se_global->g_se_tpg_list))
+		seq_puts(seq, "inst dev port indx dev_or_port intr_name "
+			 "map_indx att_count num_cmds read_mbytes "
+			 "write_mbytes hs_num_cmds creation_time row_status\n");
+
+	if (!(se_tpg))
+		return 0;
+
+	spin_lock(&se_tpg->acl_node_lock);
+	list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
+
+		atomic_inc(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&se_tpg->acl_node_lock);
+
+		spin_lock_irq(&se_nacl->device_list_lock);
+		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
+			deve = &se_nacl->device_list[j];
+			if (!(deve->lun_flags &
+					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
+			    (!deve->se_lun))
+				continue;
+			lun = deve->se_lun;
+			if (!lun->lun_se_dev)
+				continue;
+
+			seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
+					" %u %s\n",
+				/* scsiInstIndex */
+				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
+				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
+				0,
+				/* scsiDeviceIndex */
+				lun->lun_se_dev->dev_index,
+				/* scsiAuthIntrTgtPortIndex */
+				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
+				/* scsiAuthIntrIndex */
+				se_nacl->acl_index,
+				/* scsiAuthIntrDevOrPort */
+				1,
+				/* scsiAuthIntrName */
+				se_nacl->initiatorname[0] ?
+					se_nacl->initiatorname : NONE,
+				/* FIXME: scsiAuthIntrLunMapIndex */
+				0,
+				/* scsiAuthIntrAttachedTimes */
+				deve->attach_count,
+				/* scsiAuthIntrOutCommands */
+				deve->total_cmds,
+				/* scsiAuthIntrReadMegaBytes */
+				(u32)(deve->read_bytes >> 20),
+				/* scsiAuthIntrWrittenMegaBytes */
+				(u32)(deve->write_bytes >> 20),
+				/* FIXME: scsiAuthIntrHSOutCommands */
+				0,
+				/* scsiAuthIntrLastCreation */
+				(u32)(((u32)deve->creation_time -
+					    INITIAL_JIFFIES) * 100 / HZ),
+				/* FIXME: scsiAuthIntrRowStatus */
+				"Ready");
+		}
+		spin_unlock_irq(&se_nacl->device_list_lock);
+
+		spin_lock(&se_tpg->acl_node_lock);
+		atomic_dec(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&se_tpg->acl_node_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_auth_intr_seq_ops = {
+	.start	= scsi_auth_intr_seq_start,
+	.next	= scsi_auth_intr_seq_next,
+	.stop	= scsi_auth_intr_seq_stop,
+	.show	= scsi_auth_intr_seq_show
+};
+
+static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_auth_intr_seq_ops);
+}
+
+static const struct file_operations scsi_auth_intr_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_auth_intr_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Attached Initiator Port Table:
+ * It lists the SCSI Initiators attached to one of the local Target ports.
+ * Iterates through all active TPGs and use active sessions from each TPG
+ * to list the info fo this table.
+ */
+static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	spin_lock_bh(&se_global->se_tpg_lock);
+	return seq_list_start(&se_global->g_se_tpg_list, *pos);
+}
+
+static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
+					 loff_t *pos)
+{
+	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
+}
+
+static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
+{
+	spin_unlock_bh(&se_global->se_tpg_lock);
+}
+
+static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
+						se_tpg_list);
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_node_acl *se_nacl;
+	struct se_session *se_sess;
+	unsigned char buf[64];
+	int j;
+
+	if (list_is_first(&se_tpg->se_tpg_list,
+			  &se_global->g_se_tpg_list))
+		seq_puts(seq, "inst dev port indx port_auth_indx port_name"
+			" port_ident\n");
+
+	if (!(se_tpg))
+		return 0;
+
+	spin_lock(&se_tpg->session_lock);
+	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
+		if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
+		    (!se_sess->se_node_acl) ||
+		    (!se_sess->se_node_acl->device_list))
+			continue;
+
+		atomic_inc(&se_sess->mib_ref_count);
+		smp_mb__after_atomic_inc();
+		se_nacl = se_sess->se_node_acl;
+		atomic_inc(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&se_tpg->session_lock);
+
+		spin_lock_irq(&se_nacl->device_list_lock);
+		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
+			deve = &se_nacl->device_list[j];
+			if (!(deve->lun_flags &
+					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
+			   (!deve->se_lun))
+				continue;
+
+			lun = deve->se_lun;
+			if (!lun->lun_se_dev)
+				continue;
+
+			memset(buf, 0, 64);
+			if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
+				TPG_TFO(se_tpg)->sess_get_initiator_sid(
+					se_sess, (unsigned char *)&buf[0], 64);
+
+			seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
+				/* scsiInstIndex */
+				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
+				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
+				0,
+				/* scsiDeviceIndex */
+				lun->lun_se_dev->dev_index,
+				/* scsiPortIndex */
+				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
+				/* scsiAttIntrPortIndex */
+				(TPG_TFO(se_tpg)->sess_get_index != NULL) ?
+				TPG_TFO(se_tpg)->sess_get_index(se_sess) :
+				0,
+				/* scsiAttIntrPortAuthIntrIdx */
+				se_nacl->acl_index,
+				/* scsiAttIntrPortName */
+				se_nacl->initiatorname[0] ?
+					se_nacl->initiatorname : NONE,
+				/* scsiAttIntrPortIdentifier */
+				buf);
+		}
+		spin_unlock_irq(&se_nacl->device_list_lock);
+
+		spin_lock(&se_tpg->session_lock);
+		atomic_dec(&se_nacl->mib_ref_count);
+		smp_mb__after_atomic_dec();
+		atomic_dec(&se_sess->mib_ref_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&se_tpg->session_lock);
+
+	return 0;
+}
+
+static const struct seq_operations scsi_att_intr_port_seq_ops = {
+	.start	= scsi_att_intr_port_seq_start,
+	.next	= scsi_att_intr_port_seq_next,
+	.stop	= scsi_att_intr_port_seq_stop,
+	.show	= scsi_att_intr_port_seq_show
+};
+
+static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_att_intr_port_seq_ops);
+}
+
+static const struct file_operations scsi_att_intr_port_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_att_intr_port_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * SCSI Logical Unit Table
+ */
+static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return locate_hba_start(seq, pos);
+}
+
+static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	return locate_hba_next(seq, v, pos);
+}
+
+static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
+{
+	locate_hba_stop(seq, v);
+}
+
+#define SCSI_LU_INDEX		1
+static int scsi_lu_seq_show(struct seq_file *seq, void *v)
+{
+	struct se_hba *hba;
+	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
+						g_se_dev_list);
+	struct se_device *dev = se_dev->se_dev_ptr;
+	int j;
+	char str[28];
+
+	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
+		seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
+		" dev_type status state-bit num_cmds read_mbytes"
+		" write_mbytes resets full_stat hs_num_cmds creation_time\n");
+
+	if (!(dev))
+		return 0;
+
+	hba = dev->se_hba;
+	if (!(hba)) {
+		/* Log error ? */
+		return 0;
+	}
+
+	/* Fix LU state, if we can read it from the device */
+	seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
+			dev->dev_index, SCSI_LU_INDEX,
+			(unsigned long long)0, /* FIXME: scsiLuDefaultLun */
+			(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
+			/* scsiLuWwnName */
+			(char *)&DEV_T10_WWN(dev)->unit_serial[0] :
+			"None");
+
+	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
+	/* scsiLuVendorId */
+	for (j = 0; j < 8; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
+			DEV_T10_WWN(dev)->vendor[j] : 0x20;
+	str[8] = 0;
+	seq_printf(seq, " %s", str);
+
+	/* scsiLuProductId */
+	for (j = 0; j < 16; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
+			DEV_T10_WWN(dev)->model[j] : 0x20;
+	str[16] = 0;
+	seq_printf(seq, " %s", str);
+
+	/* scsiLuRevisionId */
+	for (j = 0; j < 4; j++)
+		str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
+			DEV_T10_WWN(dev)->revision[j] : 0x20;
+	str[4] = 0;
+	seq_printf(seq, " %s", str);
+
+	seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
+		/* scsiLuPeripheralType */
+		   TRANSPORT(dev)->get_device_type(dev),
+		   (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
+		"available" : "notavailable", /* scsiLuStatus */
+		"exposed", 	/* scsiLuState */
+		(unsigned long long)dev->num_cmds,
+		/* scsiLuReadMegaBytes */
+		(u32)(dev->read_bytes >> 20),
+		/* scsiLuWrittenMegaBytes */
+		(u32)(dev->write_bytes >> 20),
+		dev->num_resets, /* scsiLuInResets */
+		0, /* scsiLuOutTaskSetFullStatus */
+		0, /* scsiLuHSInCommands */
+		(u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
+							100 / HZ));
+
+	return 0;
+}
+
+static const struct seq_operations scsi_lu_seq_ops = {
+	.start  = scsi_lu_seq_start,
+	.next   = scsi_lu_seq_next,
+	.stop   = scsi_lu_seq_stop,
+	.show   = scsi_lu_seq_show
+};
+
+static int scsi_lu_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &scsi_lu_seq_ops);
+}
+
+static const struct file_operations scsi_lu_seq_fops = {
+	.owner	 = THIS_MODULE,
+	.open	 = scsi_lu_seq_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release,
+};
+
+/****************************************************************************/
+
+/*
+ * Remove proc fs entries
+ */
+void remove_scsi_target_mib(void)
+{
+	remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_port", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
+	remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
+	remove_proc_entry("scsi_target/mib", NULL);
+}
+
+/*
+ * Create proc fs entries for the mib tables
+ */
+int init_scsi_target_mib(void)
+{
+	struct proc_dir_entry *dir_entry;
+	struct proc_dir_entry *scsi_inst_entry;
+	struct proc_dir_entry *scsi_dev_entry;
+	struct proc_dir_entry *scsi_port_entry;
+	struct proc_dir_entry *scsi_transport_entry;
+	struct proc_dir_entry *scsi_tgt_dev_entry;
+	struct proc_dir_entry *scsi_tgt_port_entry;
+	struct proc_dir_entry *scsi_auth_intr_entry;
+	struct proc_dir_entry *scsi_att_intr_port_entry;
+	struct proc_dir_entry *scsi_lu_entry;
+
+	dir_entry = proc_mkdir("scsi_target/mib", NULL);
+	if (!(dir_entry)) {
+		printk(KERN_ERR "proc_mkdir() failed.\n");
+		return -1;
+	}
+
+	scsi_inst_entry =
+		create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
+	if (scsi_inst_entry)
+		scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
+	else
+		goto error;
+
+	scsi_dev_entry =
+		create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
+	if (scsi_dev_entry)
+		scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
+	else
+		goto error;
+
+	scsi_port_entry =
+		create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
+	if (scsi_port_entry)
+		scsi_port_entry->proc_fops = &scsi_port_seq_fops;
+	else
+		goto error;
+
+	scsi_transport_entry =
+		create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
+	if (scsi_transport_entry)
+		scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
+	else
+		goto error;
+
+	scsi_tgt_dev_entry =
+		create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
+	if (scsi_tgt_dev_entry)
+		scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
+	else
+		goto error;
+
+	scsi_tgt_port_entry =
+		create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
+	if (scsi_tgt_port_entry)
+		scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
+	else
+		goto error;
+
+	scsi_auth_intr_entry =
+		create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
+	if (scsi_auth_intr_entry)
+		scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
+	else
+		goto error;
+
+	scsi_att_intr_port_entry =
+	      create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
+	if (scsi_att_intr_port_entry)
+		scsi_att_intr_port_entry->proc_fops =
+				&scsi_att_intr_port_seq_fops;
+	else
+		goto error;
+
+	scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
+	if (scsi_lu_entry)
+		scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
+	else
+		goto error;
+
+	return 0;
+
+error:
+	printk(KERN_ERR "create_proc_entry() failed.\n");
+	remove_scsi_target_mib();
+	return -1;
+}
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables
+ */
+void init_scsi_index_table(void)
+{
+	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+	spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+	u32 new_index;
+
+	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+		printk(KERN_ERR "Invalid index type %d\n", type);
+		return -1;
+	}
+
+	spin_lock(&scsi_index_table.lock);
+	new_index = ++scsi_index_table.scsi_mib_index[type];
+	if (new_index == 0)
+		new_index = ++scsi_index_table.scsi_mib_index[type];
+	spin_unlock(&scsi_index_table.lock);
+
+	return new_index;
+}
+EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
new file mode 100644
index 0000000..2772046
--- /dev/null
+++ b/drivers/target/target_core_mib.h
@@ -0,0 +1,28 @@
+#ifndef TARGET_CORE_MIB_H
+#define TARGET_CORE_MIB_H
+
+typedef enum {
+	SCSI_INST_INDEX,
+	SCSI_DEVICE_INDEX,
+	SCSI_AUTH_INTR_INDEX,
+	SCSI_INDEX_TYPE_MAX
+} scsi_index_t;
+
+struct scsi_index_table {
+	spinlock_t	lock;
+	u32 		scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+} ____cacheline_aligned;
+
+/* SCSI Port stats */
+struct scsi_port_stats {
+	u64	cmd_pdus;
+	u64	tx_data_octets;
+	u64	rx_data_octets;
+} ____cacheline_aligned;
+
+extern int init_scsi_target_mib(void);
+extern void remove_scsi_target_mib(void);
+extern void init_scsi_index_table(void);
+extern u32 scsi_get_new_index(scsi_index_t);
+
+#endif   /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
new file mode 100644
index 0000000..2521f75
--- /dev/null
+++ b/drivers/target/target_core_pr.c
@@ -0,0 +1,4252 @@
+/*******************************************************************************
+ * Filename:  target_core_pr.c
+ *
+ * This file contains SPC-3 compliant persistent reservations and
+ * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
+ *
+ * Copyright (c) 2009, 2010 Rising Tide Systems
+ * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+/*
+ * Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
+ */
+struct pr_transport_id_holder {
+	int dest_local_nexus;
+	struct t10_pr_registration *dest_pr_reg;
+	struct se_portal_group *dest_tpg;
+	struct se_node_acl *dest_node_acl;
+	struct se_dev_entry *dest_se_deve;
+	struct list_head dest_list;
+};
+
+int core_pr_dump_initiator_port(
+	struct t10_pr_registration *pr_reg,
+	char *buf,
+	u32 size)
+{
+	if (!(pr_reg->isid_present_at_reg))
+		return 0;
+
+	snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
+	return 1;
+}
+
+static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
+			struct t10_pr_registration *, int);
+
+static int core_scsi2_reservation_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	switch (cdb[0]) {
+	case INQUIRY:
+	case RELEASE:
+	case RELEASE_10:
+		return 0;
+	default:
+		return 1;
+	}
+
+	return 1;
+}
+
+static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!(sess))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -1;
+	}
+	if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static int core_scsi2_reservation_release(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+
+	if (!(sess) || !(tpg))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!dev->dev_reserved_node_acl || !sess) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+
+	if (dev->dev_reserved_node_acl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	dev->dev_reserved_node_acl = NULL;
+	dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+		dev->dev_res_bin_isid = 0;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
+		" MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return 0;
+}
+
+static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	struct se_portal_group *tpg = sess->se_tpg;
+
+	if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
+	    (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
+		printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+				" ILLEGAL_REQUEST\n");
+		return PYX_TRANSPORT_ILLEGAL_REQUEST;
+	}
+	/*
+	 * This is currently the case for target_core_mod passthrough struct se_cmd
+	 * ops
+	 */
+	if (!(sess) || !(tpg))
+		return 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (dev->dev_reserved_node_acl &&
+	   (dev->dev_reserved_node_acl != sess->se_node_acl)) {
+		printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+			TPG_TFO(tpg)->get_fabric_name());
+		printk(KERN_ERR "Original reserver LUN: %u %s\n",
+			SE_LUN(cmd)->unpacked_lun,
+			dev->dev_reserved_node_acl->initiatorname);
+		printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
+			" from %s \n", SE_LUN(cmd)->unpacked_lun,
+			cmd->se_deve->mapped_lun,
+			sess->se_node_acl->initiatorname);
+		spin_unlock(&dev->dev_reservation_lock);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+	dev->dev_reserved_node_acl = sess->se_node_acl;
+	dev->dev_flags |= DF_SPC2_RESERVATIONS;
+	if (sess->sess_bin_isid != 0) {
+		dev->dev_res_bin_isid = sess->sess_bin_isid;
+		dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+	}
+	printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+		" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+		sess->se_node_acl->initiatorname);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return 0;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+					struct se_node_acl *, struct se_session *);
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+/*
+ * Setup in target_core_transport.c:transport_generic_cmd_sequencer()
+ * and called via struct se_cmd->transport_emulate_cdb() in TCM processing
+ * thread context.
+ */
+int core_scsi2_emulate_crh(struct se_cmd *cmd)
+{
+	struct se_session *se_sess = cmd->se_sess;
+	struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+	struct t10_pr_registration *pr_reg;
+	struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
+	unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+	int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+	int conflict = 0;
+
+	if (!(se_sess))
+		return 0;
+
+	if (!(crh))
+		goto after_crh;
+
+	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+			se_sess);
+	if (pr_reg) {
+		/*
+		 * From spc4r17 5.7.3 Exceptions to SPC-2 RESERVE and RELEASE
+		 * behavior
+		 *
+		 * A RESERVE(6) or RESERVE(10) command shall complete with GOOD
+		 * status, but no reservation shall be established and the
+		 * persistent reservation shall not be changed, if the command
+		 * is received from a) and b) below.
+		 *
+		 * A RELEASE(6) or RELEASE(10) command shall complete with GOOD
+		 * status, but the persistent reservation shall not be released,
+		 * if the command is received from a) and b)
+		 *
+		 * a) An I_T nexus that is a persistent reservation holder; or
+		 * b) An I_T nexus that is registered if a registrants only or
+		 *    all registrants type persistent reservation is present.
+		 *
+		 * In all other cases, a RESERVE(6) command, RESERVE(10) command,
+		 * RELEASE(6) command, or RELEASE(10) command shall be processed
+		 * as defined in SPC-2.
+		 */
+		if (pr_reg->pr_res_holder) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 0;
+		}
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+		    (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+			core_scsi3_put_pr_reg(pr_reg);
+			return 0;
+		}
+		core_scsi3_put_pr_reg(pr_reg);
+		conflict = 1;
+	} else {
+		/*
+		 * Following spc2r20 5.5.1 Reservations overview:
+		 *
+		 * If a logical unit has executed a PERSISTENT RESERVE OUT
+		 * command with the REGISTER or the REGISTER AND IGNORE
+		 * EXISTING KEY service action and is still registered by any
+		 * initiator, all RESERVE commands and all RELEASE commands
+		 * regardless of initiator shall conflict and shall terminate
+		 * with a RESERVATION CONFLICT status.
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		conflict = (list_empty(&pr_tmpl->registration_list)) ? 0 : 1;
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+
+	if (conflict) {
+		printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+			" while active SPC-3 registrations exist,"
+			" returning RESERVATION_CONFLICT\n");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+after_crh:
+	if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
+		return core_scsi2_reservation_reserve(cmd);
+	else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
+		return core_scsi2_reservation_release(cmd);
+	else
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * Begin SPC-3/SPC-4 Persistent Reservations emulation support
+ *
+ * This function is called by those initiator ports who are *NOT*
+ * the active PR reservation holder when a reservation is present.
+ */
+static int core_scsi3_pr_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	struct se_dev_entry *se_deve;
+	struct se_session *se_sess = SE_SESS(cmd);
+	int other_cdb = 0, ignore_reg;
+	int registered_nexus = 0, ret = 1; /* Conflict by default */
+	int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
+	int we = 0; /* Write Exclusive */
+	int legacy = 0; /* Act like a legacy device and return
+			 * RESERVATION CONFLICT on some CDBs */
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_seq_non_holder(cmd,
+					cdb, pr_reg_type);
+
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Determine if the registration should be ignored due to
+	 * non-matching ISIDs in core_scsi3_pr_reservation_check().
+	 */
+	ignore_reg = (pr_reg_type & 0x80000000);
+	if (ignore_reg)
+		pr_reg_type &= ~0x80000000;
+
+	switch (pr_reg_type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		/*
+		 * Some commands are only allowed for the persistent reservation
+		 * holder.
+		 */
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		/*
+		 * Some commands are only allowed for registered I_T Nexuses.
+		 */
+		reg_only = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		we = 1;
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		/*
+		 * Each registered I_T Nexus is a reservation holder.
+		 */
+		all_reg = 1;
+		if ((se_deve->def_pr_registered) && !(ignore_reg))
+			registered_nexus = 1;
+		break;
+	default:
+		return -1;
+	}
+	/*
+	 * Referenced from spc4r17 table 45 for *NON* PR holder access
+	 */
+	switch (cdb[0]) {
+	case SECURITY_PROTOCOL_IN:
+		if (registered_nexus)
+			return 0;
+		ret = (we) ? 0 : 1;
+		break;
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case READ_ATTRIBUTE:
+	case READ_BUFFER:
+	case RECEIVE_DIAGNOSTIC:
+		if (legacy) {
+			ret = 1;
+			break;
+		}
+		if (registered_nexus) {
+			ret = 0;
+			break;
+		}
+		ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+		break;
+	case PERSISTENT_RESERVE_OUT:
+		/*
+		 * This follows PERSISTENT_RESERVE_OUT service actions that
+		 * are allowed in the presence of various reservations.
+		 * See spc4r17, table 46
+		 */
+		switch (cdb[1] & 0x1f) {
+		case PRO_CLEAR:
+		case PRO_PREEMPT:
+		case PRO_PREEMPT_AND_ABORT:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		case PRO_REGISTER:
+		case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+			ret = 0;
+			break;
+		case PRO_REGISTER_AND_MOVE:
+		case PRO_RESERVE:
+			ret = 1;
+			break;
+		case PRO_RELEASE:
+			ret = (registered_nexus) ? 0 : 1;
+			break;
+		default:
+			printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+				" action: 0x%02x\n", cdb[1] & 0x1f);
+			return -1;
+		}
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/* Handled by CRH=1 in core_scsi2_emulate_crh() */
+		ret = 0;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/* Handled by CRH=1 in core_scsi2_emulate_crh() */
+		ret = 0;
+		break;
+	case TEST_UNIT_READY:
+		ret = (legacy) ? 1 : 0; /* Conflict for legacy */
+		break;
+	case MAINTENANCE_IN:
+		switch (cdb[1] & 0x1f) {
+		case MI_MANAGEMENT_PROTOCOL_IN:
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_SUPPORTED_OPERATION_CODES:
+		case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+			if (legacy) {
+				ret = 1;
+				break;
+			}
+			if (registered_nexus) {
+				ret = 0;
+				break;
+			}
+			ret = (we) ? 0 : 1; /* Allowed Write Exclusive */
+			break;
+		case MI_REPORT_ALIASES:
+		case MI_REPORT_IDENTIFYING_INFORMATION:
+		case MI_REPORT_PRIORITY:
+		case MI_REPORT_TARGET_PGS:
+		case MI_REPORT_TIMESTAMP:
+			ret = 0; /* Allowed */
+			break;
+		default:
+			printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+				(cdb[1] & 0x1f));
+			return -1;
+		}
+		break;
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case INQUIRY:
+	case LOG_SENSE:
+	case READ_MEDIA_SERIAL_NUMBER:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		ret = 0; /*/ Allowed CDBs */
+		break;
+	default:
+		other_cdb = 1;
+		break;
+	}
+	/*
+	 * Case where the CDB is explictly allowed in the above switch
+	 * statement.
+	 */
+	if (!(ret) && !(other_cdb)) {
+#if 0
+		printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+			" reservation holder\n", cdb[0],
+			core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+		return ret;
+	}
+	/*
+	 * Check if write exclusive initiator ports *NOT* holding the
+	 * WRITE_EXCLUSIVE_* reservation.
+	 */
+	if ((we) && !(registered_nexus)) {
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			/*
+			 * Conflict for write exclusive
+			 */
+			printk(KERN_INFO "%s Conflict for unregistered nexus"
+				" %s CDB: 0x%02x to %s reservation\n",
+				transport_dump_cmd_direction(cmd),
+				se_sess->se_node_acl->initiatorname, cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+			return 1;
+		} else {
+			/*
+			 * Allow non WRITE CDBs for all Write Exclusive
+			 * PR TYPEs to pass for registered and
+			 * non-registered_nexuxes NOT holding the reservation.
+			 *
+			 * We only make noise for the unregisterd nexuses,
+			 * as we expect registered non-reservation holding
+			 * nexuses to issue CDBs.
+			 */
+#if 0
+			if (!(registered_nexus)) {
+				printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+					" for %s reservation on unregistered"
+					" nexus\n", cdb[0],
+					core_scsi3_pr_dump_type(pr_reg_type));
+			}
+#endif
+			return 0;
+		}
+	} else if ((reg_only) || (all_reg)) {
+		if (registered_nexus) {
+			/*
+			 * For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
+			 * allow commands from registered nexuses.
+			 */
+#if 0
+			printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+				" reservation\n", cdb[0],
+				core_scsi3_pr_dump_type(pr_reg_type));
+#endif
+			return 0;
+		}
+	}
+	printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+		" for %s reservation\n", transport_dump_cmd_direction(cmd),
+		(registered_nexus) ? "" : "un",
+		se_sess->se_node_acl->initiatorname, cdb[0],
+		core_scsi3_pr_dump_type(pr_reg_type));
+
+	return 1; /* Conflict by default */
+}
+
+static u32 core_scsi3_pr_generation(struct se_device *dev)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	u32 prg;
+	/*
+	 * PRGeneration field shall contain the value of a 32-bit wrapping
+	 * counter mainted by the device server.
+	 *
+	 * Note that this is done regardless of Active Persist across
+	 * Target PowerLoss (APTPL)
+	 *
+	 * See spc4r17 section 6.3.12 READ_KEYS service action
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	prg = T10_RES(su_dev)->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return prg;
+}
+
+static int core_scsi3_pr_reservation_check(
+	struct se_cmd *cmd,
+	u32 *pr_reg_type)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *sess = cmd->se_sess;
+	int ret;
+
+	if (!(sess))
+		return 0;
+	/*
+	 * A legacy SPC-2 reservation is being held.
+	 */
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS)
+		return core_scsi2_reservation_check(cmd, pr_reg_type);
+
+	spin_lock(&dev->dev_reservation_lock);
+	if (!(dev->dev_pr_res_holder)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	*pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+	cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+	if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return -1;
+	}
+	if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return 0;
+	}
+	ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
+	       sess->sess_bin_isid) ? 0 : -1;
+	/*
+	 * Use bit in *pr_reg_type to notify ISID mismatch in
+	 * core_scsi3_pr_seq_non_holder().
+	 */
+	if (ret != 0)
+		*pr_reg_type |= 0x80000000;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+		return NULL;
+	}
+
+	pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+					GFP_ATOMIC);
+	if (!(pr_reg->pr_aptpl_buf)) {
+		printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = nacl;
+	pr_reg->pr_reg_deve = deve;
+	pr_reg->pr_res_mapped_lun = deve->mapped_lun;
+	pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = aptpl;
+	pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
+	/*
+	 * If an ISID value for this SCSI Initiator Port exists,
+	 * save it to the registration now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+
+	return pr_reg;
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *);
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
+
+/*
+ * Function used for handling PR registrations for ALL_TG_PT=1 and ALL_TG_PT=0
+ * modes.
+ */
+static struct t10_pr_registration *__core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_dev_entry *deve_tmp;
+	struct se_node_acl *nacl_tmp;
+	struct se_port *port, *port_tmp;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
+	int ret;
+	/*
+	 * Create a registration for the I_T Nexus upon which the
+	 * PROUT REGISTER was received.
+	 */
+	pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!(pr_reg))
+		return NULL;
+	/*
+	 * Return pointer to pr_reg for ALL_TG_PT=0
+	 */
+	if (!(all_tg_pt))
+		return pr_reg;
+	/*
+	 * Create list of matching SCSI Initiator Port registrations
+	 * for ALL_TG_PT=1
+	 */
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
+		atomic_inc(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		spin_lock_bh(&port->sep_alua_lock);
+		list_for_each_entry(deve_tmp, &port->sep_alua_list,
+					alua_port_list) {
+			/*
+			 * This pointer will be NULL for demo mode MappedLUNs
+			 * that have not been make explict via a ConfigFS
+			 * MappedLUN group for the SCSI Initiator Node ACL.
+			 */
+			if (!(deve_tmp->se_lun_acl))
+				continue;
+
+			nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+			/*
+			 * Skip the matching struct se_node_acl that is allocated
+			 * above..
+			 */
+			if (nacl == nacl_tmp)
+				continue;
+			/*
+			 * Only perform PR registrations for target ports on
+			 * the same fabric module as the REGISTER w/ ALL_TG_PT=1
+			 * arrived.
+			 */
+			if (tfo != nacl_tmp->se_tpg->se_tpg_tfo)
+				continue;
+			/*
+			 * Look for a matching Initiator Node ACL in ASCII format
+			 */
+			if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
+				continue;
+
+			atomic_inc(&deve_tmp->pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock_bh(&port->sep_alua_lock);
+			/*
+			 * Grab a configfs group dependency that is released
+			 * for the exception path at label out: below, or upon
+			 * completion of adding ALL_TG_PT=1 registrations in
+			 * __core_scsi3_add_registration()
+			 */
+			ret = core_scsi3_lunacl_depend_item(deve_tmp);
+			if (ret < 0) {
+				printk(KERN_ERR "core_scsi3_lunacl_depend"
+						"_item() failed\n");
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				goto out;
+			}
+			/*
+			 * Located a matching SCSI Initiator Port on a different
+			 * port, allocate the pr_reg_atp and attach it to the
+			 * pr_reg->pr_reg_atp_list that will be processed once
+			 * the original *pr_reg is processed in
+			 * __core_scsi3_add_registration()
+			 */
+			pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
+						nacl_tmp, deve_tmp, NULL,
+						sa_res_key, all_tg_pt, aptpl);
+			if (!(pr_reg_atp)) {
+				atomic_dec(&port->sep_tg_pt_ref_cnt);
+				smp_mb__after_atomic_dec();
+				atomic_dec(&deve_tmp->pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_lunacl_undepend_item(deve_tmp);
+				goto out;
+			}
+
+			list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
+				      &pr_reg->pr_reg_atp_list);
+			spin_lock_bh(&port->sep_alua_lock);
+		}
+		spin_unlock_bh(&port->sep_alua_lock);
+
+		spin_lock(&dev->se_port_lock);
+		atomic_dec(&port->sep_tg_pt_ref_cnt);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	return pr_reg;
+out:
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+	}
+	kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	return NULL;
+}
+
+int core_scsi3_alloc_aptpl_registration(
+	struct t10_reservation_template *pr_tmpl,
+	u64 sa_res_key,
+	unsigned char *i_port,
+	unsigned char *isid,
+	u32 mapped_lun,
+	unsigned char *t_port,
+	u16 tpgt,
+	u32 target_lun,
+	int res_holder,
+	int all_tg_pt,
+	u8 type)
+{
+	struct t10_pr_registration *pr_reg;
+
+	if (!(i_port) || !(t_port) || !(sa_res_key)) {
+		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+		return -1;
+	}
+
+	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+		return -1;
+	}
+	pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
+
+	INIT_LIST_HEAD(&pr_reg->pr_reg_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_abort_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_aptpl_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_list);
+	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
+	atomic_set(&pr_reg->pr_res_holders, 0);
+	pr_reg->pr_reg_nacl = NULL;
+	pr_reg->pr_reg_deve = NULL;
+	pr_reg->pr_res_mapped_lun = mapped_lun;
+	pr_reg->pr_aptpl_target_lun = target_lun;
+	pr_reg->pr_res_key = sa_res_key;
+	pr_reg->pr_reg_all_tg_pt = all_tg_pt;
+	pr_reg->pr_reg_aptpl = 1;
+	pr_reg->pr_reg_tg_pt_lun = NULL;
+	pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
+	pr_reg->pr_res_type = type;
+	/*
+	 * If an ISID value had been saved in APTPL metadata for this
+	 * SCSI Initiator Port, restore it now.
+	 */
+	if (isid != NULL) {
+		pr_reg->pr_reg_bin_isid = get_unaligned_be64(isid);
+		snprintf(pr_reg->pr_reg_isid, PR_REG_ISID_LEN, "%s", isid);
+		pr_reg->isid_present_at_reg = 1;
+	}
+	/*
+	 * Copy the i_port and t_port information from caller.
+	 */
+	snprintf(pr_reg->pr_iport, PR_APTPL_MAX_IPORT_LEN, "%s", i_port);
+	snprintf(pr_reg->pr_tport, PR_APTPL_MAX_TPORT_LEN, "%s", t_port);
+	pr_reg->pr_reg_tpgt = tpgt;
+	/*
+	 * Set pr_res_holder from caller, the pr_reg who is the reservation
+	 * holder will get it's pointer set in core_scsi3_aptpl_reserve() once
+	 * the Initiator Node LUN ACL from the fabric module is created for
+	 * this registration.
+	 */
+	pr_reg->pr_res_holder = res_holder;
+
+	list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
+	printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+			" metadata\n", (res_holder) ? "+reservation" : "");
+	return 0;
+}
+
+static void core_scsi3_aptpl_reserve(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_node_acl *node_acl,
+	struct t10_pr_registration *pr_reg)
+{
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	spin_lock(&dev->dev_reservation_lock);
+	dev->dev_pr_res_holder = pr_reg;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+		" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		TPG_TFO(tpg)->get_fabric_name(),
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+		TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+}
+
+static void __core_scsi3_add_registration(struct se_device *, struct se_node_acl *,
+				struct t10_pr_registration *, int, int);
+
+static int __core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 target_lun,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
+	unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+	u16 tpgt;
+
+	memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
+	memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
+	/*
+	 * Copy Initiator Port information from struct se_node_acl
+	 */
+	snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
+	snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
+			TPG_TFO(tpg)->tpg_get_wwn(tpg));
+	tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+	/*
+	 * Look for the matching registrations+reservation from those
+	 * created from APTPL metadata.  Note that multiple registrations
+	 * may exist for fabrics that use ISIDs in their SCSI Initiator Port
+	 * TransportIDs.
+	 */
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+		     (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+		    !(strcmp(pr_reg->pr_tport, t_port)) &&
+		     (pr_reg->pr_reg_tpgt == tpgt) &&
+		     (pr_reg->pr_aptpl_target_lun == target_lun)) {
+
+			pr_reg->pr_reg_nacl = nacl;
+			pr_reg->pr_reg_deve = deve;
+			pr_reg->pr_reg_tg_pt_lun = lun;
+
+			list_del(&pr_reg->pr_reg_aptpl_list);
+			spin_unlock(&pr_tmpl->aptpl_reg_lock);
+			/*
+			 * At this point all of the pointers in *pr_reg will
+			 * be setup, so go ahead and add the registration.
+			 */
+
+			__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
+			/*
+			 * If this registration is the reservation holder,
+			 * make that happen now..
+			 */
+			if (pr_reg->pr_res_holder)
+				core_scsi3_aptpl_reserve(dev, tpg,
+						nacl, pr_reg);
+			/*
+			 * Reenable pr_aptpl_active to accept new metadata
+			 * updates once the SCSI device is active again..
+			 */
+			spin_lock(&pr_tmpl->aptpl_reg_lock);
+			pr_tmpl->pr_aptpl_active = 1;
+		}
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+
+	return 0;
+}
+
+int core_scsi3_check_aptpl_registration(
+	struct se_device *dev,
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	struct se_lun_acl *lun_acl)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct se_node_acl *nacl = lun_acl->se_lun_nacl;
+	struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
+
+	if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+		return 0;
+
+	return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
+				lun->unpacked_lun, nacl, deve);
+}
+
+static void __core_scsi3_dump_registration(
+	struct target_core_fabric_ops *tfo,
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type)
+{
+	struct se_portal_group *se_tpg = nacl->se_tpg;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
+		"_AND_MOVE" : (register_type == 1) ?
+		"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
+		(prf_isid) ? i_buf : "");
+	printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+		 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+		tfo->tpg_get_tag(se_tpg));
+	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n",  tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		TRANSPORT(dev)->name);
+	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x  APTPL: %d\n", tfo->get_fabric_name(),
+		pr_reg->pr_res_key, pr_reg->pr_res_generation,
+		pr_reg->pr_reg_aptpl);
+}
+
+/*
+ * this function can be called with struct se_device->dev_reservation_lock
+ * when register_move = 1
+ */
+static void __core_scsi3_add_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct t10_pr_registration *pr_reg,
+	int register_type,
+	int register_move)
+{
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+
+	/*
+	 * Increment PRgeneration counter for struct se_device upon a successful
+	 * REGISTER, see spc4r17 section 6.3.2 READ_KEYS service action
+	 *
+	 * Also, when register_move = 1 for PROUT REGISTER_AND_MOVE service
+	 * action, the struct se_device->dev_reservation_lock will already be held,
+	 * so we do not call core_scsi3_pr_generation() which grabs the lock
+	 * for the REGISTER.
+	 */
+	pr_reg->pr_res_generation = (register_move) ?
+			T10_RES(su_dev)->pr_generation++ :
+			core_scsi3_pr_generation(dev);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
+	pr_reg->pr_reg_deve->def_pr_registered = 1;
+
+	__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
+	 */
+	if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+		return;
+	/*
+	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
+	 * allocated in __core_scsi3_alloc_registration()
+	 */
+	list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+			&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+		list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+
+		pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		list_add_tail(&pr_reg_tmp->pr_reg_list,
+			      &pr_tmpl->registration_list);
+		pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
+
+		__core_scsi3_dump_registration(tfo, dev,
+				pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
+				register_type);
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Drop configfs group dependency reference from
+		 * __core_scsi3_alloc_registration()
+		 */
+		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+	}
+}
+
+static int core_scsi3_alloc_registration(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_dev_entry *deve,
+	unsigned char *isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl,
+	int register_type,
+	int register_move)
+{
+	struct t10_pr_registration *pr_reg;
+
+	pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
+			sa_res_key, all_tg_pt, aptpl);
+	if (!(pr_reg))
+		return -1;
+
+	__core_scsi3_add_registration(dev, nacl, pr_reg,
+			register_type, register_move);
+	return 0;
+}
+
+static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	unsigned char *isid)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct se_portal_group *tpg;
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+		/*
+		 * First look for a matching struct se_node_acl
+		 */
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		/*
+		 * If this registration does NOT contain a fabric provided
+		 * ISID, then we have found a match.
+		 */
+		if (!(pr_reg->isid_present_at_reg)) {
+			/*
+			 * Determine if this SCSI device server requires that
+			 * SCSI Intiatior TransportID w/ ISIDs is enforced
+			 * for fabric modules (iSCSI) requiring them.
+			 */
+			if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+				if (DEV_ATTRIB(dev)->enforce_pr_isids)
+					continue;
+			}
+			atomic_inc(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&pr_tmpl->registration_lock);
+			return pr_reg;
+		}
+		/*
+		 * If the *pr_reg contains a fabric defined ISID for multi-value
+		 * SCSI Initiator Port TransportIDs, then we expect a valid
+		 * matching ISID to be provided by the local SCSI Initiator Port.
+		 */
+		if (!(isid))
+			continue;
+		if (strcmp(isid, pr_reg->pr_reg_isid))
+			continue;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		return pr_reg;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	return NULL;
+}
+
+static struct t10_pr_registration *core_scsi3_locate_pr_reg(
+	struct se_device *dev,
+	struct se_node_acl *nacl,
+	struct se_session *sess)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+	unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+
+	if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+		memset(&buf[0], 0, PR_REG_ISID_LEN);
+		TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+					PR_REG_ISID_LEN);
+		isid_ptr = &buf[0];
+	}
+
+	return __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr);
+}
+
+static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
+{
+	atomic_dec(&pr_reg->pr_res_holders);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_check_implict_release(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct t10_pr_registration *pr_res_holder;
+	int ret = 0;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		return ret;
+	}
+	if (pr_res_holder == pr_reg) {
+		/*
+		 * Perform an implict RELEASE if the registration that
+		 * is being released is holding the reservation.
+		 *
+		 * From spc4r17, section 5.7.11.1:
+		 *
+		 * e) If the I_T nexus is the persistent reservation holder
+		 *    and the persistent reservation is not an all registrants
+		 *    type, then a PERSISTENT RESERVE OUT command with REGISTER
+		 *    service action or REGISTER AND  IGNORE EXISTING KEY
+		 *    service action with the SERVICE ACTION RESERVATION KEY
+		 *    field set to zero (see 5.7.11.3).
+		 */
+		__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+		ret = 1;
+		/*
+		 * For 'All Registrants' reservation types, all existing
+		 * registrations are still processed as reservation holders
+		 * in core_scsi3_pr_seq_non_holder() after the initial
+		 * reservation holder is implictly released here.
+		 */
+	} else if (pr_reg->pr_reg_all_tg_pt &&
+		  (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
+			  pr_reg->pr_reg_nacl->initiatorname)) &&
+		  (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+			" UNREGISTER while existing reservation with matching"
+			" key 0x%016Lx is present from another SCSI Initiator"
+			" Port\n", pr_reg->pr_res_key);
+		ret = -1;
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct t10_reservation_template->registration_lock held.
+ */
+static void __core_scsi3_free_registration(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int dec_holders)
+{
+	struct target_core_fabric_ops *tfo =
+			pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	pr_reg->pr_reg_deve->def_pr_registered = 0;
+	pr_reg->pr_reg_deve->pr_res_key = 0;
+	list_del(&pr_reg->pr_reg_list);
+	/*
+	 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
+	 * so call core_scsi3_put_pr_reg() to decrement our reference.
+	 */
+	if (dec_holders)
+		core_scsi3_put_pr_reg(pr_reg);
+	/*
+	 * Wait until all reference from any other I_T nexuses for this
+	 * *pr_reg have been released.  Because list_del() is called above,
+	 * the last core_scsi3_put_pr_reg(pr_reg) will release this reference
+	 * count back to zero, and we release *pr_reg.
+	 */
+	while (atomic_read(&pr_reg->pr_res_holders) != 0) {
+		spin_unlock(&pr_tmpl->registration_lock);
+		printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+				tfo->get_fabric_name());
+		cpu_relax();
+		spin_lock(&pr_tmpl->registration_lock);
+	}
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+		" Node: %s%s\n", tfo->get_fabric_name(),
+		pr_reg->pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+		" Port(s)\n", tfo->get_fabric_name(),
+		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
+		TRANSPORT(dev)->name);
+	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+		" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+		pr_reg->pr_res_generation);
+
+	if (!(preempt_and_abort_list)) {
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+		return;
+	}
+	/*
+	 * For PREEMPT_AND_ABORT, the list of *pr_reg in preempt_and_abort_list
+	 * are released once the ABORT_TASK_SET has completed..
+	 */
+	list_add_tail(&pr_reg->pr_reg_abort_list, preempt_and_abort_list);
+}
+
+void core_scsi3_free_pr_reg_from_nacl(
+	struct se_device *dev,
+	struct se_node_acl *nacl)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+	/*
+	 * If the passed se_node_acl matches the reservation holder,
+	 * release the reservation.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder != NULL) &&
+	    (pr_res_holder->pr_reg_nacl == nacl))
+		__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Release any registration associated with the struct se_node_acl.
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		if (pr_reg->pr_reg_nacl != nacl)
+			continue;
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+}
+
+void core_scsi3_free_all_registrations(
+	struct se_device *dev)
+{
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder != NULL) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+				pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 0);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	spin_lock(&pr_tmpl->aptpl_reg_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
+				pr_reg_aptpl_list) {
+		list_del(&pr_reg->pr_reg_aptpl_list);
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+	spin_unlock(&pr_tmpl->aptpl_reg_lock);
+}
+
+static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
+{
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&tpg->tpg_group.cg_item);
+}
+
+static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
+{
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&tpg->tpg_group.cg_item);
+
+	atomic_dec(&tpg->tpg_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl)
+		return 0;
+
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&nacl->acl_group.cg_item);
+}
+
+static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
+{
+	struct se_portal_group *tpg = nacl->se_tpg;
+
+	if (nacl->dynamic_node_acl) {
+		atomic_dec(&nacl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&nacl->acl_group.cg_item);
+
+	atomic_dec(&nacl->acl_pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!(lun_acl))
+		return 0;
+
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+}
+
+static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
+{
+	struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
+	struct se_node_acl *nacl;
+	struct se_portal_group *tpg;
+	/*
+	 * For nacl->dynamic_node_acl=1
+	 */
+	if (!(lun_acl)) {
+		atomic_dec(&se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		return;
+	}
+	nacl = lun_acl->se_lun_nacl;
+	tpg = nacl->se_tpg;
+
+	configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+			&lun_acl->se_lun_group.cg_item);
+
+	atomic_dec(&se_deve->pr_ref_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int core_scsi3_decode_spec_i_port(
+	struct se_cmd *cmd,
+	struct se_portal_group *tpg,
+	unsigned char *l_isid,
+	u64 sa_res_key,
+	int all_tg_pt,
+	int aptpl)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_port *tmp_port;
+	struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_node_acl *dest_node_acl = NULL;
+	struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+	struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
+	struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
+	struct list_head tid_dest_list;
+	struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
+	struct target_core_fabric_ops *tmp_tf_ops;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tpdl, tid_len = 0;
+	int ret, dest_local_nexus, prf_isid;
+	u32 dest_rtpi = 0;
+
+	memset(dest_iport, 0, 64);
+	INIT_LIST_HEAD(&tid_dest_list);
+
+	local_se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Allocate a struct pr_transport_id_holder and setup the
+	 * local_node_acl and local_se_deve pointers and add to
+	 * struct list_head tid_dest_list for add registration
+	 * processing in the loop of tid_dest_list below.
+	 */
+	tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
+	if (!(tidh_new)) {
+		printk(KERN_ERR "Unable to allocate tidh_new\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	INIT_LIST_HEAD(&tidh_new->dest_list);
+	tidh_new->dest_tpg = tpg;
+	tidh_new->dest_node_acl = se_sess->se_node_acl;
+	tidh_new->dest_se_deve = local_se_deve;
+
+	local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+				se_sess->se_node_acl, local_se_deve, l_isid,
+				sa_res_key, all_tg_pt, aptpl);
+	if (!(local_pr_reg)) {
+		kfree(tidh_new);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	tidh_new->dest_pr_reg = local_pr_reg;
+	/*
+	 * The local I_T nexus does not hold any configfs dependances,
+	 * so we set tid_h->dest_local_nexus=1 to prevent the
+	 * configfs_undepend_item() calls in the tid_dest_list loops below.
+	 */
+	tidh_new->dest_local_nexus = 1;
+	list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+	/*
+	 * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+	 * first extract TransportID Parameter Data Length, and make sure
+	 * the value matches up to the SCSI expected data transfer length.
+	 */
+	tpdl = (buf[24] & 0xff) << 24;
+	tpdl |= (buf[25] & 0xff) << 16;
+	tpdl |= (buf[26] & 0xff) << 8;
+	tpdl |= buf[27] & 0xff;
+
+	if ((tpdl + 28) != cmd->data_length) {
+		printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+			" does not equal CDB data_length: %u\n", tpdl,
+			cmd->data_length);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	/*
+	 * Start processing the received transport IDs using the
+	 * receiving I_T Nexus portal's fabric dependent methods to
+	 * obtain the SCSI Initiator Port/Device Identifiers.
+	 */
+	ptr = &buf[28];
+
+	while (tpdl > 0) {
+		proto_ident = (ptr[0] & 0x0f);
+		dest_tpg = NULL;
+
+		spin_lock(&dev->se_port_lock);
+		list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
+			tmp_tpg = tmp_port->sep_tpg;
+			if (!(tmp_tpg))
+				continue;
+			tmp_tf_ops = TPG_TFO(tmp_tpg);
+			if (!(tmp_tf_ops))
+				continue;
+			if (!(tmp_tf_ops->get_fabric_proto_ident) ||
+			    !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+				continue;
+			/*
+			 * Look for the matching proto_ident provided by
+			 * the received TransportID
+			 */
+			tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
+			if (tmp_proto_ident != proto_ident)
+				continue;
+			dest_rtpi = tmp_port->sep_rtpi;
+
+			i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
+					tmp_tpg, (const char *)ptr, &tid_len,
+					&iport_ptr);
+			if (!(i_str))
+				continue;
+
+			atomic_inc(&tmp_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_inc();
+			spin_unlock(&dev->se_port_lock);
+
+			ret = core_scsi3_tpg_depend_item(tmp_tpg);
+			if (ret != 0) {
+				printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+					" for tmp_tpg\n");
+				atomic_dec(&tmp_tpg->tpg_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+				goto out;
+			}
+			/*
+			 * Locate the desination initiator ACL to be registered
+			 * from the decoded fabric module specific TransportID
+			 * at *i_str.
+			 */
+			spin_lock_bh(&tmp_tpg->acl_node_lock);
+			dest_node_acl = __core_tpg_get_initiator_node_acl(
+						tmp_tpg, i_str);
+			if (dest_node_acl) {
+				atomic_inc(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_inc();
+			}
+			spin_unlock_bh(&tmp_tpg->acl_node_lock);
+
+			if (!(dest_node_acl)) {
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				spin_lock(&dev->se_port_lock);
+				continue;
+			}
+
+			ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+			if (ret != 0) {
+				printk(KERN_ERR "configfs_depend_item() failed"
+					" for dest_node_acl->acl_group\n");
+				atomic_dec(&dest_node_acl->acl_pr_ref_count);
+				smp_mb__after_atomic_dec();
+				core_scsi3_tpg_undepend_item(tmp_tpg);
+				ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+				goto out;
+			}
+
+			dest_tpg = tmp_tpg;
+			printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+				" %s Port RTPI: %hu\n",
+				TPG_TFO(dest_tpg)->get_fabric_name(),
+				dest_node_acl->initiatorname, dest_rtpi);
+
+			spin_lock(&dev->se_port_lock);
+			break;
+		}
+		spin_unlock(&dev->se_port_lock);
+
+		if (!(dest_tpg)) {
+			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+					" dest_tpg\n");
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+#if 0
+		printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+			" tid_len: %d for %s + %s\n",
+			TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+			tpdl, tid_len, i_str, iport_ptr);
+#endif
+		if (tid_len > tpdl) {
+			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+				" %u for Transport ID: %s\n", tid_len, ptr);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		/*
+		 * Locate the desintation struct se_dev_entry pointer for matching
+		 * RELATIVE TARGET PORT IDENTIFIER on the receiving I_T Nexus
+		 * Target Port.
+		 */
+		dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
+					dest_rtpi);
+		if (!(dest_se_deve)) {
+			printk(KERN_ERR "Unable to locate %s dest_se_deve"
+				" from destination RTPI: %hu\n",
+				TPG_TFO(dest_tpg)->get_fabric_name(),
+				dest_rtpi);
+
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+
+		ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+		if (ret < 0) {
+			printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+					" failed\n");
+			atomic_dec(&dest_se_deve->pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+			goto out;
+		}
+#if 0
+		printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+			" dest_se_deve mapped_lun: %u\n",
+			TPG_TFO(dest_tpg)->get_fabric_name(),
+			dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
+#endif
+		/*
+		 * Skip any TransportIDs that already have a registration for
+		 * this target port.
+		 */
+		pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+		if (pr_reg_e) {
+			core_scsi3_put_pr_reg(pr_reg_e);
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ptr += tid_len;
+			tpdl -= tid_len;
+			tid_len = 0;
+			continue;
+		}
+		/*
+		 * Allocate a struct pr_transport_id_holder and setup
+		 * the dest_node_acl and dest_se_deve pointers for the
+		 * loop below.
+		 */
+		tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
+				GFP_KERNEL);
+		if (!(tidh_new)) {
+			printk(KERN_ERR "Unable to allocate tidh_new\n");
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+			goto out;
+		}
+		INIT_LIST_HEAD(&tidh_new->dest_list);
+		tidh_new->dest_tpg = dest_tpg;
+		tidh_new->dest_node_acl = dest_node_acl;
+		tidh_new->dest_se_deve = dest_se_deve;
+
+		/*
+		 * Allocate, but do NOT add the registration for the
+		 * TransportID referenced SCSI Initiator port.  This
+		 * done because of the following from spc4r17 in section
+		 * 6.14.3 wrt SPEC_I_PT:
+		 *
+		 * "If a registration fails for any initiator port (e.g., if th
+		 * logical unit does not have enough resources available to
+		 * hold the registration information), no registrations shall be
+		 * made, and the command shall be terminated with
+		 * CHECK CONDITION status."
+		 *
+		 * That means we call __core_scsi3_alloc_registration() here,
+		 * and then call __core_scsi3_add_registration() in the
+		 * 2nd loop which will never fail.
+		 */
+		dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, all_tg_pt, aptpl);
+		if (!(dest_pr_reg)) {
+			core_scsi3_lunacl_undepend_item(dest_se_deve);
+			core_scsi3_nodeacl_undepend_item(dest_node_acl);
+			core_scsi3_tpg_undepend_item(dest_tpg);
+			kfree(tidh_new);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		tidh_new->dest_pr_reg = dest_pr_reg;
+		list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+		ptr += tid_len;
+		tpdl -= tid_len;
+		tid_len = 0;
+
+	}
+	/*
+	 * Go ahead and create a registrations from tid_dest_list for the
+	 * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
+	 * and dest_se_deve.
+	 *
+	 * The SA Reservation Key from the PROUT is set for the
+	 * registration, and ALL_TG_PT is also passed.  ALL_TG_PT=1
+	 * means that the TransportID Initiator port will be
+	 * registered on all of the target ports in the SCSI target device
+	 * ALL_TG_PT=0 means the registration will only be for the
+	 * SCSI target port the PROUT REGISTER with SPEC_I_PT=1
+	 * was received.
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+
+		memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+		prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
+						PR_REG_ISID_ID_LEN);
+
+		__core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+					dest_pr_reg, 0, 0);
+
+		printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+			" registered Transport ID for Node: %s%s Mapped LUN:"
+			" %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+			dest_node_acl->initiatorname, (prf_isid) ?
+			&i_buf[0] : "", dest_se_deve->mapped_lun);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+
+	return 0;
+out:
+	/*
+	 * For the failure case, release everything from tid_dest_list
+	 * including *dest_pr_reg and the configfs dependances..
+	 */
+	list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+		dest_tpg = tidh->dest_tpg;
+		dest_node_acl = tidh->dest_node_acl;
+		dest_se_deve = tidh->dest_se_deve;
+		dest_pr_reg = tidh->dest_pr_reg;
+		dest_local_nexus = tidh->dest_local_nexus;
+
+		list_del(&tidh->dest_list);
+		kfree(tidh);
+		/*
+		 * Release any extra ALL_TG_PT=1 registrations for
+		 * the SPEC_I_PT=1 case.
+		 */
+		list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
+				&dest_pr_reg->pr_reg_atp_list,
+				pr_reg_atp_mem_list) {
+			list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
+			core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg_tmp);
+		}
+
+		kfree(dest_pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
+
+		if (dest_local_nexus)
+			continue;
+
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+		core_scsi3_tpg_undepend_item(dest_tpg);
+	}
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held
+ */
+static int __core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	struct se_lun *lun;
+	struct se_portal_group *tpg;
+	struct se_subsystem_dev *su_dev = SU_DEV(dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char tmp[512], isid_buf[32];
+	ssize_t len = 0;
+	int reg_count = 0;
+
+	memset(buf, 0, pr_aptpl_buf_len);
+	/*
+	 * Called to clear metadata once APTPL has been deactivated.
+	 */
+	if (clear_aptpl_metadata) {
+		snprintf(buf, pr_aptpl_buf_len,
+				"No Registrations or Reservations\n");
+		return 0;
+	}
+	/*
+	 * Walk the registration list..
+	 */
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+
+		tmp[0] = '\0';
+		isid_buf[0] = '\0';
+		tpg = pr_reg->pr_reg_nacl->se_tpg;
+		lun = pr_reg->pr_reg_tg_pt_lun;
+		/*
+		 * Write out any ISID value to APTPL metadata that was included
+		 * in the original registration.
+		 */
+		if (pr_reg->isid_present_at_reg)
+			snprintf(isid_buf, 32, "initiator_sid=%s\n",
+					pr_reg->pr_reg_isid);
+		/*
+		 * Include special metadata if the pr_reg matches the
+		 * reservation holder.
+		 */
+		if (dev->dev_pr_res_holder == pr_reg) {
+			snprintf(tmp, 512, "PR_REG_START: %d"
+				"\ninitiator_fabric=%s\n"
+				"initiator_node=%s\n%s"
+				"sa_res_key=%llu\n"
+				"res_holder=1\nres_type=%02x\n"
+				"res_scope=%02x\nres_all_tg_pt=%d\n"
+				"mapped_lun=%u\n", reg_count,
+				TPG_TFO(tpg)->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_res_type,
+				pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		} else {
+			snprintf(tmp, 512, "PR_REG_START: %d\n"
+				"initiator_fabric=%s\ninitiator_node=%s\n%s"
+				"sa_res_key=%llu\nres_holder=0\n"
+				"res_all_tg_pt=%d\nmapped_lun=%u\n",
+				reg_count, TPG_TFO(tpg)->get_fabric_name(),
+				pr_reg->pr_reg_nacl->initiatorname, isid_buf,
+				pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
+				pr_reg->pr_res_mapped_lun);
+		}
+
+		if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+			printk(KERN_ERR "Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&T10_RES(su_dev)->registration_lock);
+			return -1;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+
+		/*
+		 * Include information about the associated SCSI target port.
+		 */
+		snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
+			"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
+			" %d\n", TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_wwn(tpg),
+			TPG_TFO(tpg)->tpg_get_tag(tpg),
+			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+
+		if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
+			printk(KERN_ERR "Unable to update renaming"
+				" APTPL metadata\n");
+			spin_unlock(&T10_RES(su_dev)->registration_lock);
+			return -1;
+		}
+		len += sprintf(buf+len, "%s", tmp);
+		reg_count++;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	if (!(reg_count))
+		len += sprintf(buf+len, "No Registrations or Reservations");
+
+	return 0;
+}
+
+static int core_scsi3_update_aptpl_buf(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len,
+	int clear_aptpl_metadata)
+{
+	int ret;
+
+	spin_lock(&dev->dev_reservation_lock);
+	ret = __core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	spin_unlock(&dev->dev_reservation_lock);
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->aptpl_file_mutex held
+ */
+static int __core_scsi3_write_aptpl_to_file(
+	struct se_device *dev,
+	unsigned char *buf,
+	u32 pr_aptpl_buf_len)
+{
+	struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+	struct file *file;
+	struct iovec iov[1];
+	mm_segment_t old_fs;
+	int flags = O_RDWR | O_CREAT | O_TRUNC;
+	char path[512];
+	int ret;
+
+	memset(iov, 0, sizeof(struct iovec));
+	memset(path, 0, 512);
+
+	if (strlen(&wwn->unit_serial[0]) > 512) {
+		printk(KERN_ERR "WWN value for struct se_device does not fit"
+			" into path buffer\n");
+		return -1;
+	}
+
+	snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+	file = filp_open(path, flags, 0600);
+	if (IS_ERR(file) || !file || !file->f_dentry) {
+		printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+			" failed\n", path);
+		return -1;
+	}
+
+	iov[0].iov_base = &buf[0];
+	if (!(pr_aptpl_buf_len))
+		iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
+	else
+		iov[0].iov_len = pr_aptpl_buf_len;
+
+	old_fs = get_fs();
+	set_fs(get_ds());
+	ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
+	set_fs(old_fs);
+
+	if (ret < 0) {
+		printk("Error writing APTPL metadata file: %s\n", path);
+		filp_close(file, NULL);
+		return -1;
+	}
+	filp_close(file, NULL);
+
+	return 0;
+}
+
+static int core_scsi3_update_and_write_aptpl(
+	struct se_device *dev,
+	unsigned char *in_buf,
+	u32 in_pr_aptpl_buf_len)
+{
+	unsigned char null_buf[64], *buf;
+	u32 pr_aptpl_buf_len;
+	int ret, clear_aptpl_metadata = 0;
+	/*
+	 * Can be called with a NULL pointer from PROUT service action CLEAR
+	 */
+	if (!(in_buf)) {
+		memset(null_buf, 0, 64);
+		buf = &null_buf[0];
+		/*
+		 * This will clear the APTPL metadata to:
+		 * "No Registrations or Reservations" status
+		 */
+		pr_aptpl_buf_len = 64;
+		clear_aptpl_metadata = 1;
+	} else {
+		buf = in_buf;
+		pr_aptpl_buf_len = in_pr_aptpl_buf_len;
+	}
+
+	ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
+				clear_aptpl_metadata);
+	if (ret != 0)
+		return -1;
+	/*
+	 * __core_scsi3_write_aptpl_to_file() will call strlen()
+	 * on the passed buf to determine pr_aptpl_buf_len.
+	 */
+	ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
+	if (ret != 0)
+		return -1;
+
+	return ret;
+}
+
+static int core_scsi3_emulate_pro_register(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int all_tg_pt,
+	int spec_i_pt,
+	int ignore_key)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	/* Used for APTPL metadata w/ UNREGISTER */
+	unsigned char *pr_aptpl_buf = NULL;
+	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+	int pr_holder = 0, ret = 0, type;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	se_tpg = se_sess->se_tpg;
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+
+	if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+		memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
+		TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+				PR_REG_ISID_LEN);
+		isid_ptr = &isid_buf[0];
+	}
+	/*
+	 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
+	 */
+	pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!(pr_reg_e)) {
+		if (res_key) {
+			printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+				" for SA REGISTER, returning CONFLICT\n");
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * Do nothing but return GOOD status.
+		 */
+		if (!(sa_res_key))
+			return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+
+		if (!(spec_i_pt)) {
+			/*
+			 * Perform the Service Action REGISTER on the Initiator
+			 * Port Endpoint that the PRO was received from on the
+			 * Logical Unit of the SCSI device server.
+			 */
+			ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+					se_sess->se_node_acl, se_deve, isid_ptr,
+					sa_res_key, all_tg_pt, aptpl,
+					ignore_key, 0);
+			if (ret != 0) {
+				printk(KERN_ERR "Unable to allocate"
+					" struct t10_pr_registration\n");
+				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			}
+		} else {
+			/*
+			 * Register both the Initiator port that received
+			 * PROUT SA REGISTER + SPEC_I_PT=1 and extract SCSI
+			 * TransportID from Parameter list and loop through
+			 * fabric dependent parameter list while calling
+			 * logic from of core_scsi3_alloc_registration() for
+			 * each TransportID provided SCSI Initiator Port/Device
+			 */
+			ret = core_scsi3_decode_spec_i_port(cmd, se_tpg,
+					isid_ptr, sa_res_key, all_tg_pt, aptpl);
+			if (ret != 0)
+				return ret;
+		}
+		/*
+		 * Nothing left to do for the APTPL=0 case.
+		 */
+		if (!(aptpl)) {
+			pr_tmpl->pr_aptpl_active = 0;
+			core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+			printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+					" REGISTER\n");
+			return 0;
+		}
+		/*
+		 * Locate the newly allocated local I_T Nexus *pr_reg, and
+		 * update the APTPL metadata information using its
+		 * preallocated *pr_reg->pr_aptpl_buf.
+		 */
+		pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+				se_sess->se_node_acl, se_sess);
+
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret)) {
+			pr_tmpl->pr_aptpl_active = 1;
+			printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg);
+		return ret;
+	} else {
+		/*
+		 * Locate the existing *pr_reg via struct se_node_acl pointers
+		 */
+		pr_reg = pr_reg_e;
+		type = pr_reg->pr_res_type;
+
+		if (!(ignore_key)) {
+			if (res_key != pr_reg->pr_res_key) {
+				printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+					" res_key: 0x%016Lx does not match"
+					" existing SA REGISTER res_key:"
+					" 0x%016Lx\n", res_key,
+					pr_reg->pr_res_key);
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_RESERVATION_CONFLICT;
+			}
+		}
+		if (spec_i_pt) {
+			printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+				" set while sa_res_key=0\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		}
+		/*
+		 * An existing ALL_TG_PT=1 registration being released
+		 * must also set ALL_TG_PT=1 in the incoming PROUT.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+			printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+				" registration exists, but ALL_TG_PT=1 bit not"
+				" present in received PROUT\n");
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_INVALID_CDB_FIELD;
+		}
+		/*
+		 * Allocate APTPL metadata buffer used for UNREGISTER ops
+		 */
+		if (aptpl) {
+			pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+						GFP_KERNEL);
+			if (!(pr_aptpl_buf)) {
+				printk(KERN_ERR "Unable to allocate"
+					" pr_aptpl_buf\n");
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_LU_COMM_FAILURE;
+			}
+		}
+		/*
+		 * sa_res_key=0 Unregister Reservation Key for registered I_T
+		 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+		 * Nexus.
+		 */
+		if (!(sa_res_key)) {
+			pr_holder = core_scsi3_check_implict_release(
+					SE_DEV(cmd), pr_reg);
+			if (pr_holder < 0) {
+				kfree(pr_aptpl_buf);
+				core_scsi3_put_pr_reg(pr_reg);
+				return PYX_TRANSPORT_RESERVATION_CONFLICT;
+			}
+
+			spin_lock(&pr_tmpl->registration_lock);
+			/*
+			 * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+			 * and matching pr_res_key.
+			 */
+			if (pr_reg->pr_reg_all_tg_pt) {
+				list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					if (!(pr_reg_p->pr_reg_all_tg_pt))
+						continue;
+
+					if (pr_reg_p->pr_res_key != res_key)
+						continue;
+
+					if (pr_reg == pr_reg_p)
+						continue;
+
+					if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+						   pr_reg_p->pr_reg_nacl->initiatorname))
+						continue;
+
+					__core_scsi3_free_registration(dev,
+							pr_reg_p, NULL, 0);
+				}
+			}
+			/*
+			 * Release the calling I_T Nexus registration now..
+			 */
+			__core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+							NULL, 1);
+			/*
+			 * From spc4r17, section 5.7.11.3 Unregistering
+			 *
+			 * If the persistent reservation is a registrants only
+			 * type, the device server shall establish a unit
+			 * attention condition for the initiator port associated
+			 * with every registered I_T nexus except for the I_T
+			 * nexus on which the PERSISTENT RESERVE OUT command was
+			 * received, with the additional sense code set to
+			 * RESERVATIONS RELEASED.
+			 */
+			if (pr_holder &&
+			   ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+			    (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
+				list_for_each_entry(pr_reg_p,
+						&pr_tmpl->registration_list,
+						pr_reg_list) {
+
+					core_scsi3_ua_allocate(
+						pr_reg_p->pr_reg_nacl,
+						pr_reg_p->pr_res_mapped_lun,
+						0x2A,
+						ASCQ_2AH_RESERVATIONS_RELEASED);
+				}
+			}
+			spin_unlock(&pr_tmpl->registration_lock);
+
+			if (!(aptpl)) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for UNREGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret)) {
+				pr_tmpl->pr_aptpl_active = 1;
+				printk("SPC-3 PR: Set APTPL Bit Activated"
+						" for UNREGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			return ret;
+		} else {
+			/*
+			 * Increment PRgeneration counter for struct se_device"
+			 * upon a successful REGISTER, see spc4r17 section 6.3.2
+			 * READ_KEYS service action.
+			 */
+			pr_reg->pr_res_generation = core_scsi3_pr_generation(
+							SE_DEV(cmd));
+			pr_reg->pr_res_key = sa_res_key;
+			printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+				" Key for %s to: 0x%016Lx PRgeneration:"
+				" 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+				(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+				pr_reg->pr_reg_nacl->initiatorname,
+				pr_reg->pr_res_key, pr_reg->pr_res_generation);
+
+			if (!(aptpl)) {
+				pr_tmpl->pr_aptpl_active = 0;
+				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+				core_scsi3_put_pr_reg(pr_reg);
+				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+						" for REGISTER\n");
+				return 0;
+			}
+
+			ret = core_scsi3_update_and_write_aptpl(dev,
+					&pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret)) {
+				pr_tmpl->pr_aptpl_active = 1;
+				printk("SPC-3 PR: Set APTPL Bit Activated"
+						" for REGISTER\n");
+			}
+
+			kfree(pr_aptpl_buf);
+			core_scsi3_put_pr_reg(pr_reg);
+		}
+	}
+	return 0;
+}
+
+unsigned char *core_scsi3_pr_dump_type(int type)
+{
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+		return "Write Exclusive Access";
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+		return "Exclusive Access";
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+		return "Write Exclusive Access, Registrants Only";
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+		return "Exclusive Access, Registrants Only";
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+		return "Write Exclusive Access, All Registrants";
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		return "Exclusive Access, All Registrants";
+	default:
+		break;
+	}
+
+	return "Unknown SPC-3 PR Type";
+}
+
+static int core_scsi3_pro_reserve(
+	struct se_cmd *cmd,
+	struct se_device *dev,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int ret, prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	se_tpg = se_sess->se_tpg;
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RESERVE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * An application client creates a persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RESERVE service action through
+	 * a registered I_T nexus with the following parameters:
+	 *    a) RESERVATION KEY set to the value of the reservation key that is
+	 * 	 registered with the logical unit for the I_T nexus; and
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.9: Reserving:
+	 *
+	 * From above:
+	 *  b) TYPE field and SCOPE field set to the persistent reservation
+	 *     being created.
+	 *
+	 * Only one persistent reservation is allowed at a time per logical unit
+	 * and that persistent reservation has a scope of LU_SCOPE.
+	 */
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * See if we have an existing PR reservation holder pointer at
+	 * struct se_device->dev_pr_res_holder in the form struct t10_pr_registration
+	 * *pr_res_holder.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if ((pr_res_holder)) {
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command from an I_T nexus other than a persistent reservation
+		 * holder (see 5.7.10) that attempts to create a persistent
+		 * reservation when a persistent reservation already exists for
+		 * the logical unit, then the command shall be completed with
+		 * RESERVATION CONFLICT status.
+		 */
+		if (pr_res_holder != pr_reg) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s while reservation already held by"
+				" [%s]: %s, returning RESERVATION_CONFLICT\n",
+				CMD_TFO(cmd)->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If a persistent reservation holder attempts to modify the
+		 * type or scope of an existing persistent reservation, the
+		 * command shall be completed with RESERVATION CONFLICT status.
+		 */
+		if ((pr_res_holder->pr_res_type != type) ||
+		    (pr_res_holder->pr_res_scope != scope)) {
+			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+				" [%s]: %s trying to change TYPE and/or SCOPE,"
+				" while reservation already held by [%s]: %s,"
+				" returning RESERVATION_CONFLICT\n",
+				CMD_TFO(cmd)->get_fabric_name(),
+				se_sess->se_node_acl->initiatorname,
+				TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+				pr_res_holder->pr_reg_nacl->initiatorname);
+
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * From spc4r17 Section 5.7.9: Reserving:
+		 *
+		 * If the device server receives a PERSISTENT RESERVE OUT
+		 * command with RESERVE service action where the TYPE field and
+		 * the SCOPE field contain the same values as the existing type
+		 * and scope from a persistent reservation holder, it shall not
+		 * make any change to the existing persistent reservation and
+		 * shall completethe command with GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	/*
+	 * Otherwise, our *pr_reg becomes the PR reservation holder for said
+	 * TYPE/SCOPE.  Also set the received scope and type in *pr_reg.
+	 */
+	pr_reg->pr_res_scope = scope;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_holder = 1;
+	dev->dev_pr_res_holder = pr_reg;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+			CMD_TFO(cmd)->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			(prf_isid) ? &i_buf[0] : "");
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+					" for RESERVE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_reserve(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
+		break;
+	default:
+		printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+			" 0x%02x\n", type);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return ret;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_release(
+	struct se_device *dev,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int explict)
+{
+	struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Go ahead and release the current PR reservation holder.
+	 */
+	dev->dev_pr_res_holder = NULL;
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (explict) ? "explict" : "implict",
+		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+		tfo->get_fabric_name(), se_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "");
+	/*
+	 * Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
+	 */
+	pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
+}
+
+static int core_scsi3_emulate_pro_release(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	int ret, all_reg = 0;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for RELEASE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * If there is no persistent reservation or in response to a persistent
+	 * reservation release request from a registered I_T nexus that is not a
+	 * persistent reservation holder (see 5.7.10), the device server shall
+	 * do the following:
+	 *
+	 *     a) Not release the persistent reservation, if any;
+	 *     b) Not remove any registrations; and
+	 *     c) Complete the command with GOOD status.
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		/*
+		 * No persistent reservation, return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+		all_reg = 1;
+
+	if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+		/*
+		 * Non 'All Registrants' PR Type cases..
+		 * Release request from a registered I_T nexus that is not a
+		 * persistent reservation holder. return GOOD status.
+		 */
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing:
+	 *
+	 * Only the persistent reservation holder (see 5.7.10) is allowed to
+	 * release a persistent reservation.
+	 *
+	 * An application client releases the persistent reservation by issuing
+	 * a PERSISTENT RESERVE OUT command with RELEASE service action through
+	 * an I_T nexus that is a persistent reservation holder with the
+	 * following parameters:
+	 *
+	 *     a) RESERVATION KEY field set to the value of the reservation key
+	 *	  that is registered with the logical unit for the I_T nexus;
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+			" does not match existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * From spc4r17 Section 5.7.11.2 Releasing and above:
+	 *
+	 * b) TYPE field and SCOPE field set to match the persistent
+	 *    reservation being released.
+	 */
+	if ((pr_res_holder->pr_res_type != type) ||
+	    (pr_res_holder->pr_res_scope != scope)) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+			" reservation from [%s]: %s with different TYPE "
+			"and/or SCOPE  while reservation already held by"
+			" [%s]: %s, returning RESERVATION_CONFLICT\n",
+			CMD_TFO(cmd)->get_fabric_name(),
+			se_sess->se_node_acl->initiatorname,
+			TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+			pr_res_holder->pr_reg_nacl->initiatorname);
+
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * In response to a persistent reservation release request from the
+	 * persistent reservation holder the device server shall perform a
+	 * release by doing the following as an uninterrupted series of actions:
+	 * a) Release the persistent reservation;
+	 * b) Not remove any registration(s);
+	 * c) If the released persistent reservation is a registrants only type
+	 * or all registrants type persistent reservation,
+	 *    the device server shall establish a unit attention condition for
+	 *    the initiator port associated with every regis-
+	 *    tered I_T nexus other than I_T nexus on which the PERSISTENT
+	 *    RESERVE OUT command with RELEASE service action was received,
+	 *    with the additional sense code set to RESERVATIONS RELEASED; and
+	 * d) If the persistent reservation is of any other type, the device
+	 *    server shall not establish a unit attention condition.
+	 */
+	__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
+			pr_reg, 1);
+
+	spin_unlock(&dev->dev_reservation_lock);
+
+	if ((type != PR_TYPE_WRITE_EXCLUSIVE_REGONLY) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) &&
+	    (type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+	    (type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		/*
+		 * If no UNIT ATTENTION conditions will be established for
+		 * PR_TYPE_WRITE_EXCLUSIVE or PR_TYPE_EXCLUSIVE_ACCESS
+		 * go ahead and check for APTPL=1 update+write below
+		 */
+		goto write_aptpl;
+	}
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list,
+			pr_reg_list) {
+		/*
+		 * Do not establish a UNIT ATTENTION condition
+		 * for the calling I_T Nexus
+		 */
+		if (pr_reg_p == pr_reg)
+			continue;
+
+		core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+				pr_reg_p->pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+write_aptpl:
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg);
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_clear(
+	struct se_cmd *cmd,
+	u64 res_key)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	u32 pr_res_mapped_lun = 0;
+	int calling_it_nexus = 0;
+	/*
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+			se_sess->se_node_acl, se_sess);
+	if (!(pr_reg_n)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for CLEAR\n");
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * From spc4r17 section 5.7.11.6, Clearing:
+	 *
+	 * Any application client may release the persistent reservation and
+	 * remove all registrations from a device server by issuing a
+	 * PERSISTENT RESERVE OUT command with CLEAR service action through a
+	 * registered I_T nexus with the following parameter:
+	 *
+	 *	a) RESERVATION KEY field set to the value of the reservation key
+	 * 	   that is registered with the logical unit for the I_T nexus.
+	 */
+	if (res_key != pr_reg_n->pr_res_key) {
+		printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+			" res_key: 0x%016Lx does not match"
+			" existing SA REGISTER res_key:"
+			" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * a) Release the persistent reservation, if any;
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder) {
+		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
+		__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			pr_res_holder, 0);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * b) Remove all registration(s) (see spc4r17 5.7.7);
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg, NULL,
+					calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every registered I_T nexus other
+		 *    than the I_T nexus on which the PERSISTENT RESERVE OUT
+		 *    command with CLEAR service action was received, with the
+		 *    additional sense code set to RESERVATIONS PREEMPTED.
+		 */
+		if (!(calling_it_nexus))
+			core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+				0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
+		CMD_TFO(cmd)->get_fabric_name());
+
+	if (pr_tmpl->pr_aptpl_active) {
+		core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+		printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+				" for CLEAR\n");
+	}
+
+	core_scsi3_pr_generation(dev);
+	return 0;
+}
+
+/*
+ * Called with struct se_device->dev_reservation_lock held.
+ */
+static void __core_scsi3_complete_pro_preempt(
+	struct se_device *dev,
+	struct t10_pr_registration *pr_reg,
+	struct list_head *preempt_and_abort_list,
+	int type,
+	int scope,
+	int abort)
+{
+	struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+	struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+	char i_buf[PR_REG_ISID_ID_LEN];
+	int prf_isid;
+
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Do an implict RELEASE of the existing reservation.
+	 */
+	if (dev->dev_pr_res_holder)
+		__core_scsi3_complete_pro_release(dev, nacl,
+				dev->dev_pr_res_holder, 0);
+
+	dev->dev_pr_res_holder = pr_reg;
+	pr_reg->pr_res_holder = 1;
+	pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		core_scsi3_pr_dump_type(type),
+		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+	printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
+		nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
+	/*
+	 * For PREEMPT_AND_ABORT, add the preempting reservation's
+	 * struct t10_pr_registration to the list that will be compared
+	 * against received CDBs..
+	 */
+	if (preempt_and_abort_list)
+		list_add_tail(&pr_reg->pr_reg_abort_list,
+				preempt_and_abort_list);
+}
+
+static void core_scsi3_release_preempt_and_abort(
+	struct list_head *preempt_and_abort_list,
+	struct t10_pr_registration *pr_reg_holder)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+
+		list_del(&pr_reg->pr_reg_abort_list);
+		if (pr_reg_holder == pr_reg)
+			continue;
+		if (pr_reg->pr_res_holder) {
+			printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+			continue;
+		}
+
+		pr_reg->pr_reg_deve = NULL;
+		pr_reg->pr_reg_nacl = NULL;
+		kfree(pr_reg->pr_aptpl_buf);
+		kmem_cache_free(t10_pr_reg_cache, pr_reg);
+	}
+}
+
+int core_scsi3_check_cdb_abort_and_preempt(
+	struct list_head *preempt_and_abort_list,
+	struct se_cmd *cmd)
+{
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
+				pr_reg_abort_list) {
+		if (pr_reg->pr_res_key == cmd->pr_res_key)
+			return 0;
+	}
+
+	return 1;
+}
+
+static int core_scsi3_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct list_head preempt_and_abort_list;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	u32 pr_res_mapped_lun = 0;
+	int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+	int prh_type = 0, prh_scope = 0, ret;
+
+	if (!(se_sess))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg_n)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+			" PR_REGISTERED *pr_reg for PREEMPT%s\n",
+			(abort) ? "_AND_ABORT" : "");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	if (pr_reg_n->pr_res_key != res_key) {
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	if (scope != PR_SCOPE_LU_SCOPE) {
+		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	INIT_LIST_HEAD(&preempt_and_abort_list);
+
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (pr_res_holder &&
+	   ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
+		all_reg = 1;
+
+	if (!(all_reg) && !(sa_res_key)) {
+		spin_unlock(&dev->dev_reservation_lock);
+		core_scsi3_put_pr_reg(pr_reg_n);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * From spc4r17, section 5.7.11.4.4 Removing Registrations:
+	 *
+	 * If the SERVICE ACTION RESERVATION KEY field does not identify a
+	 * persistent reservation holder or there is no persistent reservation
+	 * holder (i.e., there is no persistent reservation), then the device
+	 * server shall perform a preempt by doing the following in an
+	 * uninterrupted series of actions. (See below..)
+	 */
+	if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+		/*
+		 * No existing or SA Reservation Key matching reservations..
+		 *
+		 * PROUT SA PREEMPT with All Registrant type reservations are
+		 * allowed to be processed without a matching SA Reservation Key
+		 */
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+			/*
+			 * Removing of registrations in non all registrants
+			 * type reservations without a matching SA reservation
+			 * key.
+			 *
+			 * a) Remove the registrations for all I_T nexuses
+			 *    specified by the SERVICE ACTION RESERVATION KEY
+			 *    field;
+			 * b) Ignore the contents of the SCOPE and TYPE fields;
+			 * c) Process tasks as defined in 5.7.1; and
+			 * d) Establish a unit attention condition for the
+			 *    initiator port associated with every I_T nexus
+			 *    that lost its registration other than the I_T
+			 *    nexus on which the PERSISTENT RESERVE OUT command
+			 *    was received, with the additional sense code set
+			 *    to REGISTRATIONS PREEMPTED.
+			 */
+			if (!(all_reg)) {
+				if (pr_reg->pr_res_key != sa_res_key)
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, calling_it_nexus);
+				released_regs++;
+			} else {
+				/*
+				 * Case for any existing all registrants type
+				 * reservation, follow logic in spc4r17 section
+				 * 5.7.11.4 Preempting, Table 52 and Figure 7.
+				 *
+				 * For a ZERO SA Reservation key, release
+				 * all other registrations and do an implict
+				 * release of active persistent reservation.
+				 *
+				 * For a non-ZERO SA Reservation key, only
+				 * release the matching reservation key from
+				 * registrations.
+				 */
+				if ((sa_res_key) &&
+				     (pr_reg->pr_res_key != sa_res_key))
+					continue;
+
+				calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+				if (calling_it_nexus)
+					continue;
+
+				pr_reg_nacl = pr_reg->pr_reg_nacl;
+				pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+				__core_scsi3_free_registration(dev, pr_reg,
+					(abort) ? &preempt_and_abort_list :
+						NULL, 0);
+				released_regs++;
+			}
+			if (!(calling_it_nexus))
+				core_scsi3_ua_allocate(pr_reg_nacl,
+					pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_PREEMPTED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * If a PERSISTENT RESERVE OUT with a PREEMPT service action or
+		 * a PREEMPT AND ABORT service action sets the SERVICE ACTION
+		 * RESERVATION KEY field to a value that does not match any
+		 * registered reservation key, then the device server shall
+		 * complete the command with RESERVATION CONFLICT status.
+		 */
+		if (!(released_regs)) {
+			spin_unlock(&dev->dev_reservation_lock);
+			core_scsi3_put_pr_reg(pr_reg_n);
+			return PYX_TRANSPORT_RESERVATION_CONFLICT;
+		}
+		/*
+		 * For an existing all registrants type reservation
+		 * with a zero SA rservation key, preempt the existing
+		 * reservation with the new PR type and scope.
+		 */
+		if (pr_res_holder && all_reg && !(sa_res_key)) {
+			__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+				(abort) ? &preempt_and_abort_list : NULL,
+				type, scope, abort);
+
+			if (abort)
+				core_scsi3_release_preempt_and_abort(
+					&preempt_and_abort_list, pr_reg_n);
+		}
+		spin_unlock(&dev->dev_reservation_lock);
+
+		if (pr_tmpl->pr_aptpl_active) {
+			ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+					&pr_reg_n->pr_aptpl_buf[0],
+					pr_tmpl->pr_aptpl_buf_len);
+			if (!(ret))
+				printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+					" metadata for  PREEMPT%s\n", (abort) ?
+					"_AND_ABORT" : "");
+		}
+
+		core_scsi3_put_pr_reg(pr_reg_n);
+		core_scsi3_pr_generation(SE_DEV(cmd));
+		return 0;
+	}
+	/*
+	 * The PREEMPTing SA reservation key matches that of the
+	 * existing persistent reservation, first, we check if
+	 * we are preempting our own reservation.
+	 * From spc4r17, section 5.7.11.4.3 Preempting
+	 * persistent reservations and registration handling
+	 *
+	 * If an all registrants persistent reservation is not
+	 * present, it is not an error for the persistent
+	 * reservation holder to preempt itself (i.e., a
+	 * PERSISTENT RESERVE OUT with a PREEMPT service action
+	 * or a PREEMPT AND ABORT service action with the
+	 * SERVICE ACTION RESERVATION KEY value equal to the
+	 * persistent reservation holder's reservation key that
+	 * is received from the persistent reservation holder).
+	 * In that case, the device server shall establish the
+	 * new persistent reservation and maintain the
+	 * registration.
+	 */
+	prh_type = pr_res_holder->pr_res_type;
+	prh_scope = pr_res_holder->pr_res_scope;
+	/*
+	 * If the SERVICE ACTION RESERVATION KEY field identifies a
+	 * persistent reservation holder (see 5.7.10), the device
+	 * server shall perform a preempt by doing the following as
+	 * an uninterrupted series of actions:
+	 *
+	 * a) Release the persistent reservation for the holder
+	 *    identified by the SERVICE ACTION RESERVATION KEY field;
+	 */
+	if (pr_reg_n != pr_res_holder)
+		__core_scsi3_complete_pro_release(dev,
+				pr_res_holder->pr_reg_nacl,
+				dev->dev_pr_res_holder, 0);
+	/*
+	 * b) Remove the registrations for all I_T nexuses identified
+	 *    by the SERVICE ACTION RESERVATION KEY field, except the
+	 *    I_T nexus that is being used for the PERSISTENT RESERVE
+	 *    OUT command. If an all registrants persistent reservation
+	 *    is present and the SERVICE ACTION RESERVATION KEY field
+	 *    is set to zero, then all registrations shall be removed
+	 *    except for that of the I_T nexus that is being used for
+	 *    the PERSISTENT RESERVE OUT command;
+	 */
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+		if (calling_it_nexus)
+			continue;
+
+		if (pr_reg->pr_res_key != sa_res_key)
+			continue;
+
+		pr_reg_nacl = pr_reg->pr_reg_nacl;
+		pr_res_mapped_lun = pr_reg->pr_res_mapped_lun;
+		__core_scsi3_free_registration(dev, pr_reg,
+				(abort) ? &preempt_and_abort_list : NULL,
+				calling_it_nexus);
+		/*
+		 * e) Establish a unit attention condition for the initiator
+		 *    port associated with every I_T nexus that lost its
+		 *    persistent reservation and/or registration, with the
+		 *    additional sense code set to REGISTRATIONS PREEMPTED;
+		 */
+		core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+				ASCQ_2AH_RESERVATIONS_PREEMPTED);
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * c) Establish a persistent reservation for the preempting
+	 *    I_T nexus using the contents of the SCOPE and TYPE fields;
+	 */
+	__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
+			(abort) ? &preempt_and_abort_list : NULL,
+			type, scope, abort);
+	/*
+	 * d) Process tasks as defined in 5.7.1;
+	 * e) See above..
+	 * f) If the type or scope has changed, then for every I_T nexus
+	 *    whose reservation key was not removed, except for the I_T
+	 *    nexus on which the PERSISTENT RESERVE OUT command was
+	 *    received, the device server shall establish a unit
+	 *    attention condition for the initiator port associated with
+	 *    that I_T nexus, with the additional sense code set to
+	 *    RESERVATIONS RELEASED. If the type or scope have not
+	 *    changed, then no unit attention condition(s) shall be
+	 *    established for this reason.
+	 */
+	if ((prh_type != type) || (prh_scope != scope)) {
+		spin_lock(&pr_tmpl->registration_lock);
+		list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+				&pr_tmpl->registration_list, pr_reg_list) {
+
+			calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
+			if (calling_it_nexus)
+				continue;
+
+			core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+					pr_reg->pr_res_mapped_lun, 0x2A,
+					ASCQ_2AH_RESERVATIONS_RELEASED);
+		}
+		spin_unlock(&pr_tmpl->registration_lock);
+	}
+	spin_unlock(&dev->dev_reservation_lock);
+	/*
+	 * Call LUN_RESET logic upon list of struct t10_pr_registration,
+	 * All received CDBs for the matching existing reservation and
+	 * registrations undergo ABORT_TASK logic.
+	 *
+	 * From there, core_scsi3_release_preempt_and_abort() will
+	 * release every registration in the list (which have already
+	 * been removed from the primary pr_reg list), except the
+	 * new persistent reservation holder, the calling Initiator Port.
+	 */
+	if (abort) {
+		core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, cmd);
+		core_scsi3_release_preempt_and_abort(&preempt_and_abort_list,
+						pr_reg_n);
+	}
+
+	if (pr_tmpl->pr_aptpl_active) {
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&pr_reg_n->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+				"%s\n", (abort) ? "_AND_ABORT" : "");
+	}
+
+	core_scsi3_put_pr_reg(pr_reg_n);
+	core_scsi3_pr_generation(SE_DEV(cmd));
+	return 0;
+}
+
+static int core_scsi3_emulate_pro_preempt(
+	struct se_cmd *cmd,
+	int type,
+	int scope,
+	u64 res_key,
+	u64 sa_res_key,
+	int abort)
+{
+	int ret = 0;
+
+	switch (type) {
+	case PR_TYPE_WRITE_EXCLUSIVE:
+	case PR_TYPE_EXCLUSIVE_ACCESS:
+	case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
+	case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
+	case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
+	case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
+		ret = core_scsi3_pro_preempt(cmd, type, scope,
+				res_key, sa_res_key, abort);
+		break;
+	default:
+		printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+			" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return ret;
+}
+
+
+static int core_scsi3_emulate_pro_register_and_move(
+	struct se_cmd *cmd,
+	u64 res_key,
+	u64 sa_res_key,
+	int aptpl,
+	int unreg)
+{
+	struct se_session *se_sess = SE_SESS(cmd);
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *se_deve, *dest_se_deve = NULL;
+	struct se_lun *se_lun = SE_LUN(cmd);
+	struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
+	struct se_port *se_port;
+	struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
+	struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+	struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	unsigned char *initiator_str;
+	char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+	u32 tid_len, tmp_tid_len;
+	int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+	unsigned short rtpi;
+	unsigned char proto_ident;
+
+	if (!(se_sess) || !(se_lun)) {
+		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	memset(dest_iport, 0, 64);
+	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+	se_tpg = se_sess->se_tpg;
+	tf_ops = TPG_TFO(se_tpg);
+	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+	/*
+	 * Follow logic from spc4r17 Section 5.7.8, Table 50 --
+	 *	Register behaviors for a REGISTER AND MOVE service action
+	 *
+	 * Locate the existing *pr_reg via struct se_node_acl pointers
+	 */
+	pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+				se_sess);
+	if (!(pr_reg)) {
+		printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+			" *pr_reg for REGISTER_AND_MOVE\n");
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * The provided reservation key much match the existing reservation key
+	 * provided during this initiator's I_T nexus registration.
+	 */
+	if (res_key != pr_reg->pr_res_key) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+			" res_key: 0x%016Lx does not match existing SA REGISTER"
+			" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+	/*
+	 * The service active reservation key needs to be non zero
+	 */
+	if (!(sa_res_key)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+			" sa_res_key\n");
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * Determine the Relative Target Port Identifier where the reservation
+	 * will be moved to for the TransportID containing SCSI initiator WWN
+	 * information.
+	 */
+	rtpi = (buf[18] & 0xff) << 8;
+	rtpi |= buf[19] & 0xff;
+	tid_len = (buf[20] & 0xff) << 24;
+	tid_len |= (buf[21] & 0xff) << 16;
+	tid_len |= (buf[22] & 0xff) << 8;
+	tid_len |= buf[23] & 0xff;
+
+	if ((tid_len + 24) != cmd->data_length) {
+		printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+			" does not equal CDB data_length: %u\n", tid_len,
+			cmd->data_length);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+
+	spin_lock(&dev->se_port_lock);
+	list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
+		if (se_port->sep_rtpi != rtpi)
+			continue;
+		dest_se_tpg = se_port->sep_tpg;
+		if (!(dest_se_tpg))
+			continue;
+		dest_tf_ops = TPG_TFO(dest_se_tpg);
+		if (!(dest_tf_ops))
+			continue;
+
+		atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&dev->se_port_lock);
+
+		ret = core_scsi3_tpg_depend_item(dest_se_tpg);
+		if (ret != 0) {
+			printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+				" for dest_se_tpg\n");
+			atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
+			smp_mb__after_atomic_dec();
+			core_scsi3_put_pr_reg(pr_reg);
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+		}
+
+		spin_lock(&dev->se_port_lock);
+		break;
+	}
+	spin_unlock(&dev->se_port_lock);
+
+	if (!(dest_se_tpg) || (!dest_tf_ops)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" fabric ops from Relative Target Port Identifier:"
+			" %hu\n", rtpi);
+		core_scsi3_put_pr_reg(pr_reg);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	proto_ident = (buf[24] & 0x0f);
+#if 0
+	printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+			" 0x%02x\n", proto_ident);
+#endif
+	if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+			" proto_ident: 0x%02x does not match ident: 0x%02x"
+			" from fabric: %s\n", proto_ident,
+			dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
+			dest_tf_ops->get_fabric_name());
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+			" containg a valid tpg_parse_pr_out_transport_id"
+			" function pointer\n");
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+	initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
+			(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+	if (!(initiator_str)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+			" initiator_str from Transport ID\n");
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+		" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+		"port" : "device", initiator_str, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action specifies a TransportID that is the same as the initiator port
+	 * of the I_T nexus for the command received, then the command shall
+	 * be terminated with CHECK CONDITION status, with the sense key set to
+	 * ILLEGAL REQUEST, and the additional sense code set to INVALID FIELD
+	 * IN PARAMETER LIST.
+	 */
+	pr_reg_nacl = pr_reg->pr_reg_nacl;
+	matching_iname = (!strcmp(initiator_str,
+				  pr_reg_nacl->initiatorname)) ? 1 : 0;
+	if (!(matching_iname))
+		goto after_iport_check;
+
+	if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+			" matches: %s on received I_T Nexus\n", initiator_str,
+			pr_reg_nacl->initiatorname);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
+		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+			" matches: %s %s on received I_T Nexus\n",
+			initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
+			pr_reg->pr_reg_isid);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+after_iport_check:
+	/*
+	 * Locate the destination struct se_node_acl from the received Transport ID
+	 */
+	spin_lock_bh(&dest_se_tpg->acl_node_lock);
+	dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
+				initiator_str);
+	if (dest_node_acl) {
+		atomic_inc(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_inc();
+	}
+	spin_unlock_bh(&dest_se_tpg->acl_node_lock);
+
+	if (!(dest_node_acl)) {
+		printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+			" TransportID%s\n", dest_tf_ops->get_fabric_name(),
+			initiator_str);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+	ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
+	if (ret != 0) {
+		printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+			" dest_node_acl\n");
+		atomic_dec(&dest_node_acl->acl_pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_node_acl = NULL;
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+#if 0
+	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+		" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname);
+#endif
+	/*
+	 * Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
+	 * PORT IDENTIFIER.
+	 */
+	dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
+	if (!(dest_se_deve)) {
+		printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+			" %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
+		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+		goto out;
+	}
+
+	ret = core_scsi3_lunacl_depend_item(dest_se_deve);
+	if (ret < 0) {
+		printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+		atomic_dec(&dest_se_deve->pr_ref_count);
+		smp_mb__after_atomic_dec();
+		dest_se_deve = NULL;
+		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+		goto out;
+	}
+#if 0
+	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+		" ACL for dest_se_deve->mapped_lun: %u\n",
+		dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+		dest_se_deve->mapped_lun);
+#endif
+	/*
+	 * A persistent reservation needs to already existing in order to
+	 * successfully complete the REGISTER_AND_MOVE service action..
+	 */
+	spin_lock(&dev->dev_reservation_lock);
+	pr_res_holder = dev->dev_pr_res_holder;
+	if (!(pr_res_holder)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+			" currently held\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+		goto out;
+	}
+	/*
+	 * The received on I_T Nexus must be the reservation holder.
+	 *
+	 * From spc4r17 section 5.7.8  Table 50 --
+	 * 	Register behaviors for a REGISTER AND MOVE service action
+	 */
+	if (pr_res_holder != pr_reg) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+			" Nexus is not reservation holder\n");
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+		goto out;
+	}
+	/*
+	 * From spc4r17 section 5.7.8: registering and moving reservation
+	 *
+	 * If a PERSISTENT RESERVE OUT command with a REGISTER AND MOVE service
+	 * action is received and the established persistent reservation is a
+	 * Write Exclusive - All Registrants type or Exclusive Access -
+	 * All Registrants type reservation, then the command shall be completed
+	 * with RESERVATION CONFLICT status.
+	 */
+	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+			" reservation for type: %s\n",
+			core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
+		spin_unlock(&dev->dev_reservation_lock);
+		ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+		goto out;
+	}
+	pr_res_nacl = pr_res_holder->pr_reg_nacl;
+	/*
+	 * b) Ignore the contents of the (received) SCOPE and TYPE fields;
+	 */
+	type = pr_res_holder->pr_res_type;
+	scope = pr_res_holder->pr_res_type;
+	/*
+	 * c) Associate the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field with the I_T nexus specified as the
+	 *    destination of the register and move, where:
+	 *    A) The I_T nexus is specified by the TransportID and the
+	 *	 RELATIVE TARGET PORT IDENTIFIER field (see 6.14.4); and
+	 *    B) Regardless of the TransportID format used, the association for
+	 *       the initiator port is based on either the initiator port name
+	 *       (see 3.1.71) on SCSI transport protocols where port names are
+	 *       required or the initiator port identifier (see 3.1.70) on SCSI
+	 *       transport protocols where port names are not required;
+	 * d) Register the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field;
+	 * e) Retain the reservation key specified in the SERVICE ACTION
+	 *    RESERVATION KEY field and associated information;
+	 *
+	 * Also, It is not an error for a REGISTER AND MOVE service action to
+	 * register an I_T nexus that is already registered with the same
+	 * reservation key or a different reservation key.
+	 */
+	dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+					iport_ptr);
+	if (!(dest_pr_reg)) {
+		ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+				dest_node_acl, dest_se_deve, iport_ptr,
+				sa_res_key, 0, aptpl, 2, 1);
+		if (ret != 0) {
+			spin_unlock(&dev->dev_reservation_lock);
+			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+			goto out;
+		}
+		dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
+						iport_ptr);
+		new_reg = 1;
+	}
+	/*
+	 * f) Release the persistent reservation for the persistent reservation
+	 *    holder (i.e., the I_T nexus on which the
+	 */
+	__core_scsi3_complete_pro_release(dev, pr_res_nacl,
+			dev->dev_pr_res_holder, 0);
+	/*
+	 * g) Move the persistent reservation to the specified I_T nexus using
+	 *    the same scope and type as the persistent reservation released in
+	 *    item f); and
+	 */
+	dev->dev_pr_res_holder = dest_pr_reg;
+	dest_pr_reg->pr_res_holder = 1;
+	dest_pr_reg->pr_res_type = type;
+	pr_reg->pr_res_scope = scope;
+	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
+				PR_REG_ISID_ID_LEN);
+	/*
+	 * Increment PRGeneration for existing registrations..
+	 */
+	if (!(new_reg))
+		dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
+	spin_unlock(&dev->dev_reservation_lock);
+
+	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+		" created new reservation holder TYPE: %s on object RTPI:"
+		" %hu  PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+		core_scsi3_pr_dump_type(type), rtpi,
+		dest_pr_reg->pr_res_generation);
+	printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+		" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
+		tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
+		(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
+		dest_node_acl->initiatorname, (iport_ptr != NULL) ?
+		iport_ptr : "");
+	/*
+	 * It is now safe to release configfs group dependencies for destination
+	 * of Transport ID Initiator Device/Port Identifier
+	 */
+	core_scsi3_lunacl_undepend_item(dest_se_deve);
+	core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	/*
+	 * h) If the UNREG bit is set to one, unregister (see 5.7.11.3) the I_T
+	 * nexus on which PERSISTENT RESERVE OUT command was received.
+	 */
+	if (unreg) {
+		spin_lock(&pr_tmpl->registration_lock);
+		__core_scsi3_free_registration(dev, pr_reg, NULL, 1);
+		spin_unlock(&pr_tmpl->registration_lock);
+	} else
+		core_scsi3_put_pr_reg(pr_reg);
+
+	/*
+	 * Clear the APTPL metadata if APTPL has been disabled, otherwise
+	 * write out the updated metadata to struct file for this SCSI device.
+	 */
+	if (!(aptpl)) {
+		pr_tmpl->pr_aptpl_active = 0;
+		core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
+		printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+				" REGISTER_AND_MOVE\n");
+	} else {
+		pr_tmpl->pr_aptpl_active = 1;
+		ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+				&dest_pr_reg->pr_aptpl_buf[0],
+				pr_tmpl->pr_aptpl_buf_len);
+		if (!(ret))
+			printk("SPC-3 PR: Set APTPL Bit Activated for"
+					" REGISTER_AND_MOVE\n");
+	}
+
+	core_scsi3_put_pr_reg(dest_pr_reg);
+	return 0;
+out:
+	if (dest_se_deve)
+		core_scsi3_lunacl_undepend_item(dest_se_deve);
+	if (dest_node_acl)
+		core_scsi3_nodeacl_undepend_item(dest_node_acl);
+	core_scsi3_tpg_undepend_item(dest_se_tpg);
+	core_scsi3_put_pr_reg(pr_reg);
+	return ret;
+}
+
+static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
+	__v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * See spc4r17 section 6.14 Table 170
+ */
+static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
+{
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u64 res_key, sa_res_key;
+	int sa, scope, type, aptpl;
+	int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
+	/*
+	 * FIXME: A NULL struct se_session pointer means an this is not coming from
+	 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
+	 */
+	if (!(SE_SESS(cmd)))
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	if (cmd->data_length < 24) {
+		printk(KERN_WARNING "SPC-PR: Recieved PR OUT parameter list"
+			" length too small: %u\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
+	 */
+	sa = (cdb[1] & 0x1f);
+	scope = (cdb[2] & 0xf0);
+	type = (cdb[2] & 0x0f);
+	/*
+	 * From PERSISTENT_RESERVE_OUT parameter list (payload)
+	 */
+	res_key = core_scsi3_extract_reservation_key(&buf[0]);
+	sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+	/*
+	 * REGISTER_AND_MOVE uses a different SA parameter list containing
+	 * SCSI TransportIDs.
+	 */
+	if (sa != PRO_REGISTER_AND_MOVE) {
+		spec_i_pt = (buf[20] & 0x08);
+		all_tg_pt = (buf[20] & 0x04);
+		aptpl = (buf[20] & 0x01);
+	} else {
+		aptpl = (buf[17] & 0x01);
+		unreg = (buf[17] & 0x02);
+	}
+	/*
+	 * SPEC_I_PT=1 is only valid for Service action: REGISTER
+	 */
+	if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	/*
+	 * From spc4r17 section 6.14:
+	 *
+	 * If the SPEC_I_PT bit is set to zero, the service action is not
+	 * REGISTER AND MOVE, and the parameter list length is not 24, then
+	 * the command shall be terminated with CHECK CONDITION status, with
+	 * the sense key set to ILLEGAL REQUEST, and the additional sense
+	 * code set to PARAMETER LIST LENGTH ERROR.
+	 */
+	if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+	    (cmd->data_length != 24)) {
+		printk(KERN_WARNING "SPC-PR: Recieved PR OUT illegal parameter"
+			" list length: %u\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+	}
+	/*
+	 * (core_scsi3_emulate_pro_* function parameters
+	 * are defined by spc4r17 Table 174:
+	 * PERSISTENT_RESERVE_OUT service actions and valid parameters.
+	 */
+	switch (sa) {
+	case PRO_REGISTER:
+		return core_scsi3_emulate_pro_register(cmd,
+			res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
+	case PRO_RESERVE:
+		return core_scsi3_emulate_pro_reserve(cmd,
+			type, scope, res_key);
+	case PRO_RELEASE:
+		return core_scsi3_emulate_pro_release(cmd,
+			type, scope, res_key);
+	case PRO_CLEAR:
+		return core_scsi3_emulate_pro_clear(cmd, res_key);
+	case PRO_PREEMPT:
+		return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 0);
+	case PRO_PREEMPT_AND_ABORT:
+		return core_scsi3_emulate_pro_preempt(cmd, type, scope,
+					res_key, sa_res_key, 1);
+	case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+		return core_scsi3_emulate_pro_register(cmd,
+			0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
+	case PRO_REGISTER_AND_MOVE:
+		return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
+				sa_res_key, aptpl, unreg);
+	default:
+		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	return PYX_TRANSPORT_INVALID_CDB_FIELD;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_KEYS
+ *
+ * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
+ */
+static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 add_len = 0, off = 8;
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&T10_RES(su_dev)->registration_lock);
+	list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+			pr_reg_list) {
+		/*
+		 * Check for overflow of 8byte PRI READ_KEYS payload and
+		 * next reservation key list descriptor.
+		 */
+		if ((add_len + 8) > (cmd->data_length - 8))
+			break;
+
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+
+		add_len += 8;
+	}
+	spin_unlock(&T10_RES(su_dev)->registration_lock);
+
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_RESERVATION
+ *
+ * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
+ */
+static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct t10_pr_registration *pr_reg;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u64 pr_res_key;
+	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&se_dev->dev_reservation_lock);
+	pr_reg = se_dev->dev_pr_res_holder;
+	if ((pr_reg)) {
+		/*
+		 * Set the hardcoded Additional Length
+		 */
+		buf[4] = ((add_len >> 24) & 0xff);
+		buf[5] = ((add_len >> 16) & 0xff);
+		buf[6] = ((add_len >> 8) & 0xff);
+		buf[7] = (add_len & 0xff);
+
+		if (cmd->data_length < 22) {
+			spin_unlock(&se_dev->dev_reservation_lock);
+			return 0;
+		}
+		/*
+		 * Set the Reservation key.
+		 *
+		 * From spc4r17, section 5.7.10:
+		 * A persistent reservation holder has its reservation key
+		 * returned in the parameter data from a PERSISTENT
+		 * RESERVE IN command with READ RESERVATION service action as
+		 * follows:
+		 * a) For a persistent reservation of the type Write Exclusive
+		 *    - All Registrants or Exclusive Access ­ All Regitrants,
+		 *      the reservation key shall be set to zero; or
+		 * b) For all other persistent reservation types, the
+		 *    reservation key shall be set to the registered
+		 *    reservation key for the I_T nexus that holds the
+		 *    persistent reservation.
+		 */
+		if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+		    (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
+			pr_res_key = 0;
+		else
+			pr_res_key = pr_reg->pr_res_key;
+
+		buf[8] = ((pr_res_key >> 56) & 0xff);
+		buf[9] = ((pr_res_key >> 48) & 0xff);
+		buf[10] = ((pr_res_key >> 40) & 0xff);
+		buf[11] = ((pr_res_key >> 32) & 0xff);
+		buf[12] = ((pr_res_key >> 24) & 0xff);
+		buf[13] = ((pr_res_key >> 16) & 0xff);
+		buf[14] = ((pr_res_key >> 8) & 0xff);
+		buf[15] = (pr_res_key & 0xff);
+		/*
+		 * Set the SCOPE and TYPE
+		 */
+		buf[21] = (pr_reg->pr_res_scope & 0xf0) |
+			  (pr_reg->pr_res_type & 0x0f);
+	}
+	spin_unlock(&se_dev->dev_reservation_lock);
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action REPORT_CAPABILITIES
+ *
+ * See spc4r17 section 6.13.4 Table 165
+ */
+static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u16 add_len = 8; /* Hardcoded to 8. */
+
+	if (cmd->data_length < 6) {
+		printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+			" %u too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((add_len << 8) & 0xff);
+	buf[1] = (add_len & 0xff);
+	buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
+	buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
+	buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
+	buf[2] |= 0x01; /* PTPL_C: Persistence across Target Power Loss bit */
+	/*
+	 * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+	 * set the TMV: Task Mask Valid bit.
+	 */
+	buf[3] |= 0x80;
+	/*
+	 * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+	 */
+	buf[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+	/*
+	 * PTPL_A: Persistence across Target Power Loss Active bit
+	 */
+	if (pr_tmpl->pr_aptpl_active)
+		buf[3] |= 0x01;
+	/*
+	 * Setup the PERSISTENT RESERVATION TYPE MASK from Table 167
+	 */
+	buf[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+	buf[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+	buf[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+	buf[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+	buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+	buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+	return 0;
+}
+
+/*
+ * PERSISTENT_RESERVE_IN Service Action READ_FULL_STATUS
+ *
+ * See spc4r17 section 6.13.5 Table 168 and 169
+ */
+static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = SE_DEV(cmd);
+	struct se_node_acl *se_nacl;
+	struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+	struct se_portal_group *se_tpg;
+	struct t10_pr_registration *pr_reg, *pr_reg_tmp;
+	struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
+	unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+	u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+	u32 off = 8; /* off into first Full Status descriptor */
+	int format_code = 0;
+
+	if (cmd->data_length < 8) {
+		printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+			" too small\n", cmd->data_length);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+	buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
+	buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
+	buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
+	buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+
+	spin_lock(&pr_tmpl->registration_lock);
+	list_for_each_entry_safe(pr_reg, pr_reg_tmp,
+			&pr_tmpl->registration_list, pr_reg_list) {
+
+		se_nacl = pr_reg->pr_reg_nacl;
+		se_tpg = pr_reg->pr_reg_nacl->se_tpg;
+		add_desc_len = 0;
+
+		atomic_inc(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_inc();
+		spin_unlock(&pr_tmpl->registration_lock);
+		/*
+		 * Determine expected length of $FABRIC_MOD specific
+		 * TransportID full status descriptor..
+		 */
+		exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+				se_tpg, se_nacl, pr_reg, &format_code);
+
+		if ((exp_desc_len + add_len) > cmd->data_length) {
+			printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+				" out of buffer: %d\n", cmd->data_length);
+			spin_lock(&pr_tmpl->registration_lock);
+			atomic_dec(&pr_reg->pr_res_holders);
+			smp_mb__after_atomic_dec();
+			break;
+		}
+		/*
+		 * Set RESERVATION KEY
+		 */
+		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
+		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
+		buf[off++] = (pr_reg->pr_res_key & 0xff);
+		off += 4; /* Skip Over Reserved area */
+
+		/*
+		 * Set ALL_TG_PT bit if PROUT SA REGISTER had this set.
+		 */
+		if (pr_reg->pr_reg_all_tg_pt)
+			buf[off] = 0x02;
+		/*
+		 * The struct se_lun pointer will be present for the
+		 * reservation holder for PR_HOLDER bit.
+		 *
+		 * Also, if this registration is the reservation
+		 * holder, fill in SCOPE and TYPE in the next byte.
+		 */
+		if (pr_reg->pr_res_holder) {
+			buf[off++] |= 0x01;
+			buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
+				     (pr_reg->pr_res_type & 0x0f);
+		} else
+			off += 2;
+
+		off += 4; /* Skip over reserved area */
+		/*
+		 * From spc4r17 6.3.15:
+		 *
+		 * If the ALL_TG_PT bit set to zero, the RELATIVE TARGET PORT
+		 * IDENTIFIER field contains the relative port identifier (see
+		 * 3.1.120) of the target port that is part of the I_T nexus
+		 * described by this full status descriptor. If the ALL_TG_PT
+		 * bit is set to one, the contents of the RELATIVE TARGET PORT
+		 * IDENTIFIER field are not defined by this standard.
+		 */
+		if (!(pr_reg->pr_reg_all_tg_pt)) {
+			struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+
+			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
+			buf[off++] = (port->sep_rtpi & 0xff);
+		} else
+			off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFER */
+
+		/*
+		 * Now, have the $FABRIC_MOD fill in the protocol identifier
+		 */
+		desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+				se_nacl, pr_reg, &format_code, &buf[off+4]);
+
+		spin_lock(&pr_tmpl->registration_lock);
+		atomic_dec(&pr_reg->pr_res_holders);
+		smp_mb__after_atomic_dec();
+		/*
+		 * Set the ADDITIONAL DESCRIPTOR LENGTH
+		 */
+		buf[off++] = ((desc_len >> 24) & 0xff);
+		buf[off++] = ((desc_len >> 16) & 0xff);
+		buf[off++] = ((desc_len >> 8) & 0xff);
+		buf[off++] = (desc_len & 0xff);
+		/*
+		 * Size of full desctipor header minus TransportID
+		 * containing $FABRIC_MOD specific) initiator device/port
+		 * WWN information.
+		 *
+		 *  See spc4r17 Section 6.13.5 Table 169
+		 */
+		add_desc_len = (24 + desc_len);
+
+		off += desc_len;
+		add_len += add_desc_len;
+	}
+	spin_unlock(&pr_tmpl->registration_lock);
+	/*
+	 * Set ADDITIONAL_LENGTH
+	 */
+	buf[4] = ((add_len >> 24) & 0xff);
+	buf[5] = ((add_len >> 16) & 0xff);
+	buf[6] = ((add_len >> 8) & 0xff);
+	buf[7] = (add_len & 0xff);
+
+	return 0;
+}
+
+static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
+{
+	switch (cdb[1] & 0x1f) {
+	case PRI_READ_KEYS:
+		return core_scsi3_pri_read_keys(cmd);
+	case PRI_READ_RESERVATION:
+		return core_scsi3_pri_read_reservation(cmd);
+	case PRI_REPORT_CAPABILITIES:
+		return core_scsi3_pri_report_capabilities(cmd);
+	case PRI_READ_FULL_STATUS:
+		return core_scsi3_pri_read_full_status(cmd);
+	default:
+		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+			" action: 0x%02x\n", cdb[1] & 0x1f);
+		return PYX_TRANSPORT_INVALID_CDB_FIELD;
+	}
+
+}
+
+int core_scsi3_emulate_pr(struct se_cmd *cmd)
+{
+	unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+	struct se_device *dev = cmd->se_dev;
+	/*
+	 * Following spc2r20 5.5.1 Reservations overview:
+	 *
+	 * If a logical unit has been reserved by any RESERVE command and is
+	 * still reserved by any initiator, all PERSISTENT RESERVE IN and all
+	 * PERSISTENT RESERVE OUT commands shall conflict regardless of
+	 * initiator or service action and shall terminate with a RESERVATION
+	 * CONFLICT status.
+	 */
+	if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
+		printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+			" SPC-2 reservation is held, returning"
+			" RESERVATION_CONFLICT\n");
+		return PYX_TRANSPORT_RESERVATION_CONFLICT;
+	}
+
+	return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
+	       core_scsi3_emulate_pr_out(cmd, cdb) :
+	       core_scsi3_emulate_pr_in(cmd, cdb);
+}
+
+static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
+{
+	return 0;
+}
+
+static int core_pt_seq_non_holder(
+	struct se_cmd *cmd,
+	unsigned char *cdb,
+	u32 pr_reg_type)
+{
+	return 0;
+}
+
+int core_setup_reservations(struct se_device *dev, int force_pt)
+{
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	struct t10_reservation_template *rest = &su_dev->t10_reservation;
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, use the reservations
+	 * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
+	 * cause a problem because libata and some SATA RAID HBAs appear
+	 * under Linux/SCSI, but to emulate reservations themselves.
+	 */
+	if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+	    !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+		rest->res_type = SPC_PASSTHROUGH;
+		rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
+			" emulation\n", TRANSPORT(dev)->name);
+		return 0;
+	}
+	/*
+	 * If SPC-3 or above is reported by real or emulated struct se_device,
+	 * use emulated Persistent Reservations.
+	 */
+	if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+		rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
+		rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
+			" emulation\n", TRANSPORT(dev)->name);
+	} else {
+		rest->res_type = SPC2_RESERVATIONS;
+		rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
+		rest->pr_ops.t10_seq_non_holder =
+				&core_scsi2_reservation_seq_non_holder;
+		printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
+			TRANSPORT(dev)->name);
+	}
+
+	return 0;
+}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
new file mode 100644
index 0000000..5603bcf
--- /dev/null
+++ b/drivers/target/target_core_pr.h
@@ -0,0 +1,67 @@
+#ifndef TARGET_CORE_PR_H
+#define TARGET_CORE_PR_H
+/*
+ * PERSISTENT_RESERVE_OUT service action codes
+ *
+ * spc4r17 section 6.14.2 Table 171
+ */
+#define PRO_REGISTER				0x00
+#define PRO_RESERVE				0x01
+#define PRO_RELEASE				0x02
+#define PRO_CLEAR				0x03
+#define PRO_PREEMPT				0x04
+#define PRO_PREEMPT_AND_ABORT			0x05
+#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY	0x06
+#define PRO_REGISTER_AND_MOVE			0x07
+/*
+ * PERSISTENT_RESERVE_IN service action codes
+ *
+ * spc4r17 section 6.13.1 Table 159
+ */
+#define PRI_READ_KEYS				0x00
+#define PRI_READ_RESERVATION			0x01
+#define PRI_REPORT_CAPABILITIES			0x02
+#define PRI_READ_FULL_STATUS			0x03
+/*
+ * PERSISTENT_RESERVE_ SCOPE field
+ *
+ * spc4r17 section 6.13.3.3 Table 163
+ */
+#define PR_SCOPE_LU_SCOPE			0x00
+/*
+ * PERSISTENT_RESERVE_* TYPE field
+ *
+ * spc4r17 section 6.13.3.4 Table 164
+ */
+#define PR_TYPE_WRITE_EXCLUSIVE			0x01
+#define PR_TYPE_EXCLUSIVE_ACCESS		0x03
+#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY		0x05
+#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY	0x06
+#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG		0x07
+#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG		0x08
+
+#define PR_APTPL_MAX_IPORT_LEN			256
+#define PR_APTPL_MAX_TPORT_LEN			256
+
+extern struct kmem_cache *t10_pr_reg_cache;
+
+extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
+			char *, u32);
+extern int core_scsi2_emulate_crh(struct se_cmd *);
+extern int core_scsi3_alloc_aptpl_registration(
+			struct t10_reservation_template *, u64,
+			unsigned char *, unsigned char *, u32,
+			unsigned char *, u16, u32, int, int, u8);
+extern int core_scsi3_check_aptpl_registration(struct se_device *,
+			struct se_portal_group *, struct se_lun *,
+			struct se_lun_acl *);
+extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
+					     struct se_node_acl *);
+extern void core_scsi3_free_all_registrations(struct se_device *);
+extern unsigned char *core_scsi3_pr_dump_type(int);
+extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
+						  struct se_cmd *);
+extern int core_scsi3_emulate_pr(struct se_cmd *);
+extern int core_setup_reservations(struct se_device *, int);
+
+#endif /* TARGET_CORE_PR_H */
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
new file mode 100644
index 0000000..742d246
--- /dev/null
+++ b/drivers/target/target_core_pscsi.c
@@ -0,0 +1,1470 @@
+/*******************************************************************************
+ * Filename:  target_core_pscsi.c
+ *
+ * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/genhd.h>
+#include <linux/cdrom.h>
+#include <linux/file.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_pscsi.h"
+
+#define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
+
+static struct se_subsystem_api pscsi_template;
+
+static void pscsi_req_done(struct request *, int);
+
+/*	pscsi_get_sh():
+ *
+ *
+ */
+static struct Scsi_Host *pscsi_get_sh(u32 host_no)
+{
+	struct Scsi_Host *sh = NULL;
+
+	sh = scsi_host_lookup(host_no);
+	if (IS_ERR(sh)) {
+		printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
+				" %u\n", host_no);
+		return NULL;
+	}
+
+	return sh;
+}
+
+/*	pscsi_attach_hba():
+ *
+ * 	pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
+ *	from the passed SCSI Host ID.
+ */
+static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	int hba_depth;
+	struct pscsi_hba_virt *phv;
+
+	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
+	if (!(phv)) {
+		printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
+		return -1;
+	}
+	phv->phv_host_id = host_id;
+	phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+	hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+	atomic_set(&hba->left_queue_depth, hba_depth);
+	atomic_set(&hba->max_queue_depth, hba_depth);
+
+	hba->hba_ptr = (void *)phv;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
+		" Target Core with TCQ Depth: %d\n", hba->hba_id,
+		atomic_read(&hba->max_queue_depth));
+
+	return 0;
+}
+
+static void pscsi_detach_hba(struct se_hba *hba)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	struct Scsi_Host *scsi_host = phv->phv_lld_host;
+
+	if (scsi_host) {
+		scsi_host_put(scsi_host);
+
+		printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+			" Generic Target Core\n", hba->hba_id,
+			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
+			"Unknown");
+	} else
+		printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+			" from Generic Target Core\n", hba->hba_id);
+
+	kfree(phv);
+	hba->hba_ptr = NULL;
+}
+
+static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+{
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
+	/*
+	 * Release the struct Scsi_Host
+	 */
+	if (!(mode_flag)) {
+		if (!(sh))
+			return 0;
+
+		phv->phv_lld_host = NULL;
+		phv->phv_mode = PHV_VIRUTAL_HOST_ID;
+		atomic_set(&hba->left_queue_depth, hba_depth);
+		atomic_set(&hba->max_queue_depth, hba_depth);
+
+		printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+			" %s\n", hba->hba_id, (sh->hostt->name) ?
+			(sh->hostt->name) : "Unknown");
+
+		scsi_host_put(sh);
+		return 0;
+	}
+	/*
+	 * Otherwise, locate struct Scsi_Host from the original passed
+	 * pSCSI Host ID and enable for phba mode
+	 */
+	sh = pscsi_get_sh(phv->phv_host_id);
+	if (!(sh)) {
+		printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+			" phv_host_id: %d\n", phv->phv_host_id);
+		return -1;
+	}
+	/*
+	 * Usually the SCSI LLD will use the hostt->can_queue value to define
+	 * its HBA TCQ depth.  Some other drivers (like 2.6 megaraid) don't set
+	 * this at all and set sh->can_queue at runtime.
+	 */
+	hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
+		sh->hostt->can_queue : sh->can_queue;
+
+	atomic_set(&hba->left_queue_depth, hba_depth);
+	atomic_set(&hba->max_queue_depth, hba_depth);
+
+	phv->phv_lld_host = sh;
+	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
+
+	return 1;
+}
+
+static void pscsi_tape_read_blocksize(struct se_device *dev,
+		struct scsi_device *sdev)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(12, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = MODE_SENSE;
+	cdb[4] = 0x0c; /* 12 bytes */
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
+			HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	/*
+	 * If MODE_SENSE still returns zero, set the default value to 1024.
+	 */
+	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+	if (!sdev->sector_size)
+		sdev->sector_size = 1024;
+out_free:
+	kfree(buf);
+}
+
+static void
+pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char *buf;
+
+	if (sdev->inquiry_len < INQUIRY_LEN)
+		return;
+
+	buf = sdev->inquiry;
+	if (!buf)
+		return;
+	/*
+	 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+	 */
+	memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
+	memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
+	memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+}
+
+static int
+pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf;
+	int ret;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return -1;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x80; /* Unit Serial Number */
+	cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+	if (ret)
+		goto out_free;
+
+	snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
+
+	wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+
+	kfree(buf);
+	return 0;
+
+out_free:
+	kfree(buf);
+	return -1;
+}
+
+static void
+pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
+		struct t10_wwn *wwn)
+{
+	unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
+	int ident_len, page_len, off = 4, ret;
+	struct t10_vpd *vpd;
+
+	buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
+	if (!buf)
+		return;
+
+	memset(cdb, 0, MAX_COMMAND_SIZE);
+	cdb[0] = INQUIRY;
+	cdb[1] = 0x01; /* Query VPD */
+	cdb[2] = 0x83; /* Device Identifier */
+	cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
+	cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+
+	ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
+			      INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
+			      NULL, HZ, 1, NULL);
+	if (ret)
+		goto out;
+
+	page_len = (buf[2] << 8) | buf[3];
+	while (page_len > 0) {
+		/* Grab a pointer to the Identification descriptor */
+		page_83 = &buf[off];
+		ident_len = page_83[3];
+		if (!ident_len) {
+			printk(KERN_ERR "page_83[3]: identifier"
+					" length zero!\n");
+			break;
+		}
+		printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+
+		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
+		if (!vpd) {
+			printk(KERN_ERR "Unable to allocate memory for"
+					" struct t10_vpd\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&vpd->vpd_list);
+
+		transport_set_vpd_proto_id(vpd, page_83);
+		transport_set_vpd_assoc(vpd, page_83);
+
+		if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+		if (transport_set_vpd_ident(vpd, page_83) < 0) {
+			off += (ident_len + 4);
+			page_len -= (ident_len + 4);
+			kfree(vpd);
+			continue;
+		}
+
+		list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
+		off += (ident_len + 4);
+		page_len -= (ident_len + 4);
+	}
+
+out:
+	kfree(buf);
+}
+
+/*	pscsi_add_device_to_list():
+ *
+ *
+ */
+static struct se_device *pscsi_add_device_to_list(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	struct pscsi_dev_virt *pdv,
+	struct scsi_device *sd,
+	int dev_flags)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct request_queue *q;
+	struct queue_limits *limits;
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	if (!sd->queue_depth) {
+		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
+
+		printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+			" queue_depth to %d\n", sd->channel, sd->id,
+				sd->lun, sd->queue_depth);
+	}
+	/*
+	 * Setup the local scope queue_limits from struct request_queue->limits
+	 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+	 */
+	q = sd->request_queue;
+	limits = &dev_limits.limits;
+	limits->logical_block_size = sd->sector_size;
+	limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
+				  queue_max_hw_sectors(q) : sd->host->max_sectors;
+	limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
+				  queue_max_sectors(q) : sd->host->max_sectors;
+	dev_limits.hw_queue_depth = sd->queue_depth;
+	dev_limits.queue_depth = sd->queue_depth;
+	/*
+	 * Setup our standard INQUIRY info into se_dev->t10_wwn
+	 */
+	pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+	/*
+	 * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
+	 * which has already been referenced with Linux SCSI code with
+	 * scsi_device_get() in this file's pscsi_create_virtdevice().
+	 *
+	 * The passthrough operations called by the transport_add_device_*
+	 * function below will require this pointer to be set for passthroug
+	 *  ops.
+	 *
+	 * For the shutdown case in pscsi_free_device(), this struct
+	 * scsi_device  reference is released with Linux SCSI code
+	 * scsi_device_put() and the pdv->pdv_sd cleared.
+	 */
+	pdv->pdv_sd = sd;
+
+	dev = transport_add_device_to_core_hba(hba, &pscsi_template,
+				se_dev, dev_flags, (void *)pdv,
+				&dev_limits, NULL, NULL);
+	if (!(dev)) {
+		pdv->pdv_sd = NULL;
+		return NULL;
+	}
+
+	/*
+	 * Locate VPD WWN Information used for various purposes within
+	 * the Storage Engine.
+	 */
+	if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+		/*
+		 * If VPD Unit Serial returned GOOD status, try
+		 * VPD Device Identification page (0x83).
+		 */
+		pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+	}
+
+	/*
+	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
+	 */
+	if (sd->type == TYPE_TAPE)
+		pscsi_tape_read_blocksize(dev, sd);
+	return dev;
+}
+
+static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	struct pscsi_dev_virt *pdv;
+
+	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
+	if (!(pdv)) {
+		printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+		return NULL;
+	}
+	pdv->pdv_se_hba = hba;
+
+	printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+	return (void *)pdv;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_disk(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	struct block_device *bd;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+	/*
+	 * Claim exclusive struct block_device access to struct scsi_device
+	 * for TYPE_DISK using supplied udev_path
+	 */
+	bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
+	if (!(bd)) {
+		printk("pSCSI: blkdev_get_by_path() failed\n");
+		scsi_device_put(sd);
+		return NULL;
+	}
+	pdv->pdv_bd = bd;
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev)) {
+		blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+		scsi_device_put(sd);
+		return NULL;
+	}
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+		phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ * Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_rom(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	if (scsi_device_get(sd)) {
+		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+			sh->host_no, sd->channel, sd->id, sd->lun);
+		spin_unlock_irq(sh->host_lock);
+		return NULL;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev)) {
+		scsi_device_put(sd);
+		return NULL;
+	}
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+/*
+ *Called with struct Scsi_Host->host_lock called.
+ */
+static struct se_device *pscsi_create_type_other(
+	struct scsi_device *sd,
+	struct pscsi_dev_virt *pdv,
+	struct se_subsystem_dev *se_dev,
+	struct se_hba *hba)
+{
+	struct se_device *dev;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+	struct Scsi_Host *sh = sd->host;
+	u32 dev_flags = 0;
+
+	spin_unlock_irq(sh->host_lock);
+	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
+	if (!(dev))
+		return NULL;
+
+	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
+		sd->channel, sd->id, sd->lun);
+
+	return dev;
+}
+
+static struct se_device *pscsi_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+	struct se_device *dev;
+	struct scsi_device *sd;
+	struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+	struct Scsi_Host *sh = phv->phv_lld_host;
+	int legacy_mode_enable = 0;
+
+	if (!(pdv)) {
+		printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+				" parameter\n");
+		return NULL;
+	}
+	/*
+	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
+	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
+	 */
+	if (!(sh)) {
+		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+			printk(KERN_ERR "pSCSI: Unable to locate struct"
+				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
+			return NULL;
+		}
+		/*
+		 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
+		 * reference, we enforce that udev_path has been set
+		 */
+		if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+			printk(KERN_ERR "pSCSI: udev_path attribute has not"
+				" been set before ENABLE=1\n");
+			return NULL;
+		}
+		/*
+		 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
+		 * use the original TCM hba ID to reference Linux/SCSI Host No
+		 * and enable for PHV_LLD_SCSI_HOST_NO mode.
+		 */
+		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
+			spin_lock(&hba->device_lock);
+			if (!(list_empty(&hba->hba_dev_list))) {
+				printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+					" with active devices\n");
+				spin_unlock(&hba->device_lock);
+				return NULL;
+			}
+			spin_unlock(&hba->device_lock);
+
+			if (pscsi_pmode_enable_hba(hba, 1) != 1)
+				return NULL;
+
+			legacy_mode_enable = 1;
+			hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
+			sh = phv->phv_lld_host;
+		} else {
+			sh = pscsi_get_sh(pdv->pdv_host_id);
+			if (!(sh)) {
+				printk(KERN_ERR "pSCSI: Unable to locate"
+					" pdv_host_id: %d\n", pdv->pdv_host_id);
+				return NULL;
+			}
+		}
+	} else {
+		if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
+			printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+				" struct Scsi_Host exists\n");
+			return NULL;
+		}
+	}
+
+	spin_lock_irq(sh->host_lock);
+	list_for_each_entry(sd, &sh->__devices, siblings) {
+		if ((pdv->pdv_channel_id != sd->channel) ||
+		    (pdv->pdv_target_id != sd->id) ||
+		    (pdv->pdv_lun_id != sd->lun))
+			continue;
+		/*
+		 * Functions will release the held struct scsi_host->host_lock
+		 * before calling calling pscsi_add_device_to_list() to register
+		 * struct scsi_device with target_core_mod.
+		 */
+		switch (sd->type) {
+		case TYPE_DISK:
+			dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+			break;
+		case TYPE_ROM:
+			dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+			break;
+		default:
+			dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+			break;
+		}
+
+		if (!(dev)) {
+			if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+				scsi_host_put(sh);
+			else if (legacy_mode_enable) {
+				pscsi_pmode_enable_hba(hba, 0);
+				hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+			}
+			pdv->pdv_sd = NULL;
+			return NULL;
+		}
+		return dev;
+	}
+	spin_unlock_irq(sh->host_lock);
+
+	printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
+
+	if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+		scsi_host_put(sh);
+	else if (legacy_mode_enable) {
+		pscsi_pmode_enable_hba(hba, 0);
+		hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
+	}
+
+	return NULL;
+}
+
+/*	pscsi_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void pscsi_free_device(void *p)
+{
+	struct pscsi_dev_virt *pdv = p;
+	struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	if (sd) {
+		/*
+		 * Release exclusive pSCSI internal struct block_device claim for
+		 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+		 */
+		if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
+			blkdev_put(pdv->pdv_bd,
+				   FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+			pdv->pdv_bd = NULL;
+		}
+		/*
+		 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
+		 * to struct Scsi_Host now.
+		 */
+		if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
+		    (phv->phv_lld_host != NULL))
+			scsi_host_put(phv->phv_lld_host);
+
+		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
+			scsi_device_put(sd);
+
+		pdv->pdv_sd = NULL;
+	}
+
+	kfree(pdv);
+}
+
+static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
+{
+	return container_of(task, struct pscsi_plugin_task, pscsi_task);
+}
+
+
+/*	pscsi_transport_complete():
+ *
+ *
+ */
+static int pscsi_transport_complete(struct se_task *task)
+{
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	int result;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	unsigned char *cdb = &pt->pscsi_cdb[0];
+
+	result = pt->pscsi_result;
+	/*
+	 * Hack to make sure that Write-Protect modepage is set if R/O mode is
+	 * forced.
+	 */
+	if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
+	     (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		if (!TASK_CMD(task)->se_deve)
+			goto after_mode_sense;
+
+		if (TASK_CMD(task)->se_deve->lun_flags &
+				TRANSPORT_LUNFLAGS_READ_ONLY) {
+			unsigned char *buf = (unsigned char *)
+				T_TASK(task->task_se_cmd)->t_task_buf;
+
+			if (cdb[0] == MODE_SENSE_10) {
+				if (!(buf[3] & 0x80))
+					buf[3] |= 0x80;
+			} else {
+				if (!(buf[2] & 0x80))
+					buf[2] |= 0x80;
+			}
+		}
+	}
+after_mode_sense:
+
+	if (sd->type != TYPE_TAPE)
+		goto after_mode_select;
+
+	/*
+	 * Hack to correctly obtain the initiator requested blocksize for
+	 * TYPE_TAPE.  Since this value is dependent upon each tape media,
+	 * struct scsi_device->sector_size will not contain the correct value
+	 * by default, so we go ahead and set it so
+	 * TRANSPORT(dev)->get_blockdev() returns the correct value to the
+	 * storage engine.
+	 */
+	if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
+	      (status_byte(result) << 1) == SAM_STAT_GOOD) {
+		unsigned char *buf;
+		struct scatterlist *sg = task->task_sg;
+		u16 bdl;
+		u32 blocksize;
+
+		buf = sg_virt(&sg[0]);
+		if (!(buf)) {
+			printk(KERN_ERR "Unable to get buf for scatterlist\n");
+			goto after_mode_select;
+		}
+
+		if (cdb[0] == MODE_SELECT)
+			bdl = (buf[3]);
+		else
+			bdl = (buf[6] << 8) | (buf[7]);
+
+		if (!bdl)
+			goto after_mode_select;
+
+		if (cdb[0] == MODE_SELECT)
+			blocksize = (buf[9] << 16) | (buf[10] << 8) |
+					(buf[11]);
+		else
+			blocksize = (buf[13] << 16) | (buf[14] << 8) |
+					(buf[15]);
+
+		sd->sector_size = blocksize;
+	}
+after_mode_select:
+
+	if (status_byte(result) & CHECK_CONDITION)
+		return 1;
+
+	return 0;
+}
+
+static struct se_task *
+pscsi_alloc_task(struct se_cmd *cmd)
+{
+	struct pscsi_plugin_task *pt;
+	unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
+
+	pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+	if (!pt) {
+		printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+		return NULL;
+	}
+
+	/*
+	 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
+	 * allocate the extended CDB buffer for per struct se_task context
+	 * pt->pscsi_cdb now.
+	 */
+	if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
+
+		pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
+		if (!(pt->pscsi_cdb)) {
+			printk(KERN_ERR "pSCSI: Unable to allocate extended"
+					" pt->pscsi_cdb\n");
+			return NULL;
+		}
+	} else
+		pt->pscsi_cdb = &pt->__pscsi_cdb[0];
+
+	return &pt->pscsi_task;
+}
+
+static inline void pscsi_blk_init_request(
+	struct se_task *task,
+	struct pscsi_plugin_task *pt,
+	struct request *req,
+	int bidi_read)
+{
+	/*
+	 * Defined as "scsi command" in include/linux/blkdev.h.
+	 */
+	req->cmd_type = REQ_TYPE_BLOCK_PC;
+	/*
+	 * For the extra BIDI-COMMAND READ struct request we do not
+	 * need to setup the remaining structure members
+	 */
+	if (bidi_read)
+		return;
+	/*
+	 * Setup the done function pointer for struct request,
+	 * also set the end_io_data pointer.to struct se_task.
+	 */
+	req->end_io = pscsi_req_done;
+	req->end_io_data = (void *)task;
+	/*
+	 * Load the referenced struct se_task's SCSI CDB into
+	 * include/linux/blkdev.h:struct request->cmd
+	 */
+	req->cmd_len = scsi_command_size(pt->pscsi_cdb);
+	req->cmd = &pt->pscsi_cdb[0];
+	/*
+	 * Setup pointer for outgoing sense data.
+	 */
+	req->sense = (void *)&pt->pscsi_sense[0];
+	req->sense_len = 0;
+}
+
+/*
+ * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
+*/
+static int pscsi_blk_get_request(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+
+	pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
+			(task->task_data_direction == DMA_TO_DEVICE),
+			GFP_KERNEL);
+	if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
+		printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+				IS_ERR(pt->pscsi_req));
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	/*
+	 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+	 * and setup rq callback, CDB and sense.
+	 */
+	pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+	return 0;
+}
+
+/*      pscsi_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int pscsi_do_task(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	/*
+	 * Set the struct request->timeout value based on peripheral
+	 * device type from SCSI.
+	 */
+	if (pdv->pdv_sd->type == TYPE_DISK)
+		pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
+	else
+		pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
+
+	pt->pscsi_req->retries = PS_RETRY;
+	/*
+	 * Queue the struct request into the struct scsi_device->request_queue.
+	 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
+	 * descriptor
+	 */
+	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
+			(task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
+			pscsi_req_done);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+static void pscsi_free_task(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct se_cmd *cmd = task->task_se_cmd;
+
+	/*
+	 * Release the extended CDB allocation from pscsi_alloc_task()
+	 * if one exists.
+	 */
+	if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
+		kfree(pt->pscsi_cdb);
+	/*
+	 * We do not release the bio(s) here associated with this task, as
+	 * this is handled by bio_put() and pscsi_bi_endio().
+	 */
+	kfree(pt);
+}
+
+enum {
+	Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
+	Opt_scsi_lun_id, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_scsi_host_id, "scsi_host_id=%d"},
+	{Opt_scsi_channel_id, "scsi_channel_id=%d"},
+	{Opt_scsi_target_id, "scsi_target_id=%d"},
+	{Opt_scsi_lun_id, "scsi_lun_id=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_scsi_host_id:
+			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
+				printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+					" scsi_host_id while phv_mode =="
+					" PHV_LLD_SCSI_HOST_NO\n",
+					phv->phv_host_id);
+				ret = -EINVAL;
+				goto out;
+			}
+			match_int(args, &arg);
+			pdv->pdv_host_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
+			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
+			break;
+		case Opt_scsi_channel_id:
+			match_int(args, &arg);
+			pdv->pdv_channel_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+				" ID: %d\n",  phv->phv_host_id,
+				pdv->pdv_channel_id);
+			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
+			break;
+		case Opt_scsi_target_id:
+			match_int(args, &arg);
+			pdv->pdv_target_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+				" ID: %d\n", phv->phv_host_id,
+				pdv->pdv_target_id);
+			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
+			break;
+		case Opt_scsi_lun_id:
+			match_int(args, &arg);
+			pdv->pdv_lun_id = arg;
+			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
+			pdv->pdv_flags |= PDF_HAS_LUN_ID;
+			break;
+		default:
+			break;
+		}
+	}
+
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t pscsi_check_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev)
+{
+	struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+
+	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+		printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+			" scsi_lun_id= parameters\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
+					      struct se_subsystem_dev *se_dev,
+					      char *b)
+{
+	struct pscsi_hba_virt *phv = hba->hba_ptr;
+        struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+	unsigned char host_id[16];
+	ssize_t bl;
+	int i;
+
+	if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
+		snprintf(host_id, 16, "%d", pdv->pdv_host_id);
+	else
+		snprintf(host_id, 16, "PHBA Mode");
+
+	bl = sprintf(b, "SCSI Device Bus Location:"
+		" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
+		pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
+		host_id);
+
+	if (sd) {
+		bl += sprintf(b + bl, "        ");
+		bl += sprintf(b + bl, "Vendor: ");
+		for (i = 0; i < 8; i++) {
+			if (ISPRINT(sd->vendor[i]))   /* printable character? */
+				bl += sprintf(b + bl, "%c", sd->vendor[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Model: ");
+		for (i = 0; i < 16; i++) {
+			if (ISPRINT(sd->model[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->model[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, " Rev: ");
+		for (i = 0; i < 4; i++) {
+			if (ISPRINT(sd->rev[i]))   /* printable character ? */
+				bl += sprintf(b + bl, "%c", sd->rev[i]);
+			else
+				bl += sprintf(b + bl, " ");
+		}
+		bl += sprintf(b + bl, "\n");
+	}
+	return bl;
+}
+
+static void pscsi_bi_endio(struct bio *bio, int error)
+{
+	bio_put(bio);
+}
+
+static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+{
+	struct bio *bio;
+	/*
+	 * Use bio_malloc() following the comment in for bio -> struct request
+	 * in block/blk-core.c:blk_make_request()
+	 */
+	bio = bio_kmalloc(GFP_KERNEL, sg_num);
+	if (!(bio)) {
+		printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+		return NULL;
+	}
+	bio->bi_end_io = pscsi_bi_endio;
+
+	return bio;
+}
+
+#if 0
+#define DEBUG_PSCSI(x...) printk(x)
+#else
+#define DEBUG_PSCSI(x...)
+#endif
+
+static int __pscsi_map_task_SG(
+	struct se_task *task,
+	struct scatterlist *task_sg,
+	u32 task_sg_num,
+	int bidi_read)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
+	struct page *page;
+	struct scatterlist *sg;
+	u32 data_len = task->task_size, i, len, bytes, off;
+	int nr_pages = (task->task_size + task_sg[0].offset +
+			PAGE_SIZE - 1) >> PAGE_SHIFT;
+	int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	int rw = (task->task_data_direction == DMA_TO_DEVICE);
+
+	if (!task->task_size)
+		return 0;
+	/*
+	 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
+	 * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+	 * struct scatterlist memory.  The struct se_task->task_sg[] currently needs
+	 * to be attached to struct bios for submission to Linux/SCSI using
+	 * struct request to struct scsi_device->request_queue.
+	 *
+	 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
+	 * is ported to upstream SCSI passthrough functionality that accepts
+	 * struct scatterlist->page_link or struct page as a paraemeter.
+	 */
+	DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+
+	for_each_sg(task_sg, sg, task_sg_num, i) {
+		page = sg_page(sg);
+		off = sg->offset;
+		len = sg->length;
+
+		DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+			page, len, off);
+
+		while (len > 0 && data_len > 0) {
+			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+			bytes = min(bytes, data_len);
+
+			if (!(bio)) {
+				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+				nr_pages -= nr_vecs;
+				/*
+				 * Calls bio_kmalloc() and sets bio->bi_end_io()
+				 */
+				bio = pscsi_get_bio(pdv, nr_vecs);
+				if (!(bio))
+					goto fail;
+
+				if (rw)
+					bio->bi_rw |= REQ_WRITE;
+
+				DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+					" dir: %s nr_vecs: %d\n", bio,
+					(rw) ? "rw" : "r", nr_vecs);
+				/*
+				 * Set *hbio pointer to handle the case:
+				 * nr_pages > BIO_MAX_PAGES, where additional
+				 * bios need to be added to complete a given
+				 * struct se_task
+				 */
+				if (!hbio)
+					hbio = tbio = bio;
+				else
+					tbio = tbio->bi_next = bio;
+			}
+
+			DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+				" bio: %p page: %p len: %d off: %d\n", i, bio,
+				page, len, off);
+
+			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
+					bio, page, bytes, off);
+			if (rc != bytes)
+				goto fail;
+
+			DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+				bio->bi_vcnt, nr_vecs);
+
+			if (bio->bi_vcnt > nr_vecs) {
+				DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+					" %d i: %d bio: %p, allocating another"
+					" bio\n", bio->bi_vcnt, i, bio);
+				/*
+				 * Clear the pointer so that another bio will
+				 * be allocated with pscsi_get_bio() above, the
+				 * current bio has already been set *tbio and
+				 * bio->bi_next.
+				 */
+				bio = NULL;
+			}
+
+			page++;
+			len -= bytes;
+			data_len -= bytes;
+			off = 0;
+		}
+	}
+	/*
+	 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
+	 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
+	 */
+	if (!(bidi_read)) {
+		/*
+		 * Starting with v2.6.31, call blk_make_request() passing in *hbio to
+		 * allocate the pSCSI task a struct request.
+		 */
+		pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
+					hbio, GFP_KERNEL);
+		if (!(pt->pscsi_req)) {
+			printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+			goto fail;
+		}
+		/*
+		 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
+		 * and setup rq callback, CDB and sense.
+		 */
+		pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
+
+		return task->task_sg_num;
+	}
+	/*
+	 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
+	 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
+	 */
+	pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
+					hbio, GFP_KERNEL);
+	if (!(pt->pscsi_req->next_rq)) {
+		printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+		goto fail;
+	}
+	pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
+
+	return task->task_sg_num;
+fail:
+	while (hbio) {
+		bio = hbio;
+		hbio = hbio->bi_next;
+		bio->bi_next = NULL;
+		bio_endio(bio, 0);
+	}
+	return ret;
+}
+
+static int pscsi_map_task_SG(struct se_task *task)
+{
+	int ret;
+
+	/*
+	 * Setup the main struct request for the task->task_sg[] payload
+	 */
+
+	ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+	if (ret >= 0 && task->task_sg_bidi) {
+		/*
+		 * If present, set up the extra BIDI-COMMAND SCSI READ
+		 * struct request and payload.
+		 */
+		ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
+					task->task_sg_num, 1);
+	}
+
+	if (ret < 0)
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	return 0;
+}
+
+/*	pscsi_map_task_non_SG():
+ *
+ *
+ */
+static int pscsi_map_task_non_SG(struct se_task *task)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
+	int ret = 0;
+
+	if (pscsi_blk_get_request(task) < 0)
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+	if (!task->task_size)
+		return 0;
+
+	ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
+			pt->pscsi_req, T_TASK(cmd)->t_task_buf,
+			task->task_size, GFP_KERNEL);
+	if (ret < 0) {
+		printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
+		return PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+	return 0;
+}
+
+static int pscsi_CDB_none(struct se_task *task)
+{
+	return pscsi_blk_get_request(task);
+}
+
+/*	pscsi_get_cdb():
+ *
+ *
+ */
+static unsigned char *pscsi_get_cdb(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	return pt->pscsi_cdb;
+}
+
+/*	pscsi_get_sense_buffer():
+ *
+ *
+ */
+static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
+{
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	return (unsigned char *)&pt->pscsi_sense[0];
+}
+
+/*	pscsi_get_device_rev():
+ *
+ *
+ */
+static u32 pscsi_get_device_rev(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+}
+
+/*	pscsi_get_device_type():
+ *
+ *
+ */
+static u32 pscsi_get_device_type(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+	struct scsi_device *sd = pdv->pdv_sd;
+
+	return sd->type;
+}
+
+static sector_t pscsi_get_blocks(struct se_device *dev)
+{
+	struct pscsi_dev_virt *pdv = dev->dev_ptr;
+
+	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
+		return pdv->pdv_bd->bd_part->nr_sects;
+
+	dump_stack();
+	return 0;
+}
+
+/*	pscsi_handle_SAM_STATUS_failures():
+ *
+ *
+ */
+static inline void pscsi_process_SAM_status(
+	struct se_task *task,
+	struct pscsi_plugin_task *pt)
+{
+	task->task_scsi_status = status_byte(pt->pscsi_result);
+	if ((task->task_scsi_status)) {
+		task->task_scsi_status <<= 1;
+		printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+	}
+
+	switch (host_byte(pt->pscsi_result)) {
+	case DID_OK:
+		transport_complete_task(task, (!task->task_scsi_status));
+		break;
+	default:
+		printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
+			pt->pscsi_result);
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		TASK_CMD(task)->transport_error_status =
+					PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		transport_complete_task(task, 0);
+		break;
+	}
+
+	return;
+}
+
+static void pscsi_req_done(struct request *req, int uptodate)
+{
+	struct se_task *task = req->end_io_data;
+	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
+
+	pt->pscsi_result = req->errors;
+	pt->pscsi_resid = req->resid_len;
+
+	pscsi_process_SAM_status(task, pt);
+	/*
+	 * Release BIDI-READ if present
+	 */
+	if (req->next_rq != NULL)
+		__blk_put_request(req->q, req->next_rq);
+
+	__blk_put_request(req->q, req);
+	pt->pscsi_req = NULL;
+}
+
+static struct se_subsystem_api pscsi_template = {
+	.name			= "pscsi",
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_PHBA_PDEV,
+	.cdb_none		= pscsi_CDB_none,
+	.map_task_non_SG	= pscsi_map_task_non_SG,
+	.map_task_SG		= pscsi_map_task_SG,
+	.attach_hba		= pscsi_attach_hba,
+	.detach_hba		= pscsi_detach_hba,
+	.pmode_enable_hba	= pscsi_pmode_enable_hba,
+	.allocate_virtdevice	= pscsi_allocate_virtdevice,
+	.create_virtdevice	= pscsi_create_virtdevice,
+	.free_device		= pscsi_free_device,
+	.transport_complete	= pscsi_transport_complete,
+	.alloc_task		= pscsi_alloc_task,
+	.do_task		= pscsi_do_task,
+	.free_task		= pscsi_free_task,
+	.check_configfs_dev_params = pscsi_check_configfs_dev_params,
+	.set_configfs_dev_params = pscsi_set_configfs_dev_params,
+	.show_configfs_dev_params = pscsi_show_configfs_dev_params,
+	.get_cdb		= pscsi_get_cdb,
+	.get_sense_buffer	= pscsi_get_sense_buffer,
+	.get_device_rev		= pscsi_get_device_rev,
+	.get_device_type	= pscsi_get_device_type,
+	.get_blocks		= pscsi_get_blocks,
+};
+
+static int __init pscsi_module_init(void)
+{
+	return transport_subsystem_register(&pscsi_template);
+}
+
+static void pscsi_module_exit(void)
+{
+	transport_subsystem_release(&pscsi_template);
+}
+
+MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(pscsi_module_init);
+module_exit(pscsi_module_exit);
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
new file mode 100644
index 0000000..a4cd5d3
--- /dev/null
+++ b/drivers/target/target_core_pscsi.h
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_PSCSI_H
+#define TARGET_CORE_PSCSI_H
+
+#define PSCSI_VERSION		"v4.0"
+#define PSCSI_VIRTUAL_HBA_DEPTH	2048
+
+/* used in pscsi_find_alloc_len() */
+#ifndef INQUIRY_DATA_SIZE
+#define INQUIRY_DATA_SIZE	0x24
+#endif
+
+/* used in pscsi_add_device_to_list() */
+#define PSCSI_DEFAULT_QUEUEDEPTH	1
+
+#define PS_RETRY		5
+#define PS_TIMEOUT_DISK		(15*HZ)
+#define PS_TIMEOUT_OTHER	(500*HZ)
+
+#include <linux/device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_device.h>
+#include <linux/kref.h>
+#include <linux/kobject.h>
+
+struct pscsi_plugin_task {
+	struct se_task pscsi_task;
+	unsigned char *pscsi_cdb;
+	unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
+	int	pscsi_direction;
+	int	pscsi_result;
+	u32	pscsi_resid;
+	struct request *pscsi_req;
+} ____cacheline_aligned;
+
+#define PDF_HAS_CHANNEL_ID	0x01
+#define PDF_HAS_TARGET_ID	0x02
+#define PDF_HAS_LUN_ID		0x04
+#define PDF_HAS_VPD_UNIT_SERIAL 0x08
+#define PDF_HAS_VPD_DEV_IDENT	0x10
+#define PDF_HAS_VIRT_HOST_ID	0x20
+
+struct pscsi_dev_virt {
+	int	pdv_flags;
+	int	pdv_host_id;
+	int	pdv_channel_id;
+	int	pdv_target_id;
+	int	pdv_lun_id;
+	struct block_device *pdv_bd;
+	struct scsi_device *pdv_sd;
+	struct se_hba *pdv_se_hba;
+} ____cacheline_aligned;
+
+typedef enum phv_modes {
+	PHV_VIRUTAL_HOST_ID,
+	PHV_LLD_SCSI_HOST_NO
+} phv_modes_t;
+
+struct pscsi_hba_virt {
+	int			phv_host_id;
+	phv_modes_t		phv_mode;
+	struct Scsi_Host	*phv_lld_host;
+} ____cacheline_aligned;
+
+#endif   /*** TARGET_CORE_PSCSI_H ***/
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
new file mode 100644
index 0000000..979aebf
--- /dev/null
+++ b/drivers/target/target_core_rd.c
@@ -0,0 +1,1091 @@
+/*******************************************************************************
+ * Filename:  target_core_rd.c
+ *
+ * This file contains the Storage Engine <-> Ramdisk transport
+ * specific functions.
+ *
+ * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/timer.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_rd.h"
+
+static struct se_subsystem_api rd_dr_template;
+static struct se_subsystem_api rd_mcp_template;
+
+/* #define DEBUG_RAMDISK_MCP */
+/* #define DEBUG_RAMDISK_DR */
+
+/*	rd_attach_hba(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct rd_host *rd_host;
+
+	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
+	if (!(rd_host)) {
+		printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+		return -ENOMEM;
+	}
+
+	rd_host->rd_host_id = host_id;
+
+	atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
+	atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
+	hba->hba_ptr = (void *) rd_host;
+
+	printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+		" Generic Target Core Stack %s\n", hba->hba_id,
+		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+	printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+		" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
+		rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
+		RD_MAX_SECTORS);
+
+	return 0;
+}
+
+static void rd_detach_hba(struct se_hba *hba)
+{
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
+
+	kfree(rd_host);
+	hba->hba_ptr = NULL;
+}
+
+/*	rd_release_device_space():
+ *
+ *
+ */
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+	u32 i, j, page_count = 0, sg_per_table;
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+		return;
+
+	sg_table = rd_dev->sg_table_array;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg = sg_table[i].sg_table;
+		sg_per_table = sg_table[i].rd_sg_count;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = sg_page(&sg[j]);
+			if ((pg)) {
+				__free_page(pg);
+				page_count++;
+			}
+		}
+
+		kfree(sg);
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
+		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+	kfree(sg_table);
+	rd_dev->sg_table_array = NULL;
+	rd_dev->sg_table_count = 0;
+}
+
+
+/*	rd_build_device_space():
+ *
+ *
+ */
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+	u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+				sizeof(struct scatterlist));
+	struct rd_dev_sg_table *sg_table;
+	struct page *pg;
+	struct scatterlist *sg;
+
+	if (rd_dev->rd_page_count <= 0) {
+		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+			rd_dev->rd_page_count);
+		return -1;
+	}
+	total_sg_needed = rd_dev->rd_page_count;
+
+	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+
+	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
+	if (!(sg_table)) {
+		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+			" scatterlist tables\n");
+		return -1;
+	}
+
+	rd_dev->sg_table_array = sg_table;
+	rd_dev->sg_table_count = sg_tables;
+
+	while (total_sg_needed) {
+		sg_per_table = (total_sg_needed > max_sg_per_table) ?
+			max_sg_per_table : total_sg_needed;
+
+		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+				GFP_KERNEL);
+		if (!(sg)) {
+			printk(KERN_ERR "Unable to allocate scatterlist array"
+				" for struct rd_dev\n");
+			return -1;
+		}
+
+		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+
+		sg_table[i].sg_table = sg;
+		sg_table[i].rd_sg_count = sg_per_table;
+		sg_table[i].page_start_offset = page_offset;
+		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
+						- 1;
+
+		for (j = 0; j < sg_per_table; j++) {
+			pg = alloc_pages(GFP_KERNEL, 0);
+			if (!(pg)) {
+				printk(KERN_ERR "Unable to allocate scatterlist"
+					" pages for struct rd_dev_sg_table\n");
+				return -1;
+			}
+			sg_assign_page(&sg[j], pg);
+			sg[j].length = PAGE_SIZE;
+		}
+
+		page_offset += sg_per_table;
+		total_sg_needed -= sg_per_table;
+	}
+
+	printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+		rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count);
+
+	return 0;
+}
+
+static void *rd_allocate_virtdevice(
+	struct se_hba *hba,
+	const char *name,
+	int rd_direct)
+{
+	struct rd_dev *rd_dev;
+	struct rd_host *rd_host = hba->hba_ptr;
+
+	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
+	if (!(rd_dev)) {
+		printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+		return NULL;
+	}
+
+	rd_dev->rd_host = rd_host;
+	rd_dev->rd_direct = rd_direct;
+
+	return rd_dev;
+}
+
+static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 1);
+}
+
+static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
+{
+	return rd_allocate_virtdevice(hba, name, 0);
+}
+
+/*	rd_create_virtdevice():
+ *
+ *
+ */
+static struct se_device *rd_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p,
+	int rd_direct)
+{
+	struct se_device *dev;
+	struct se_dev_limits dev_limits;
+	struct rd_dev *rd_dev = p;
+	struct rd_host *rd_host = hba->hba_ptr;
+	int dev_flags = 0;
+	char prod[16], rev[4];
+
+	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+
+	if (rd_build_device_space(rd_dev) < 0)
+		goto fail;
+
+	snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
+	snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
+						RD_MCP_VERSION);
+
+	dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
+	dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
+	dev_limits.limits.max_sectors = RD_MAX_SECTORS;
+	dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
+
+	dev = transport_add_device_to_core_hba(hba,
+			(rd_dev->rd_direct) ? &rd_dr_template :
+			&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+			&dev_limits, prod, rev);
+	if (!(dev))
+		goto fail;
+
+	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
+	rd_dev->rd_queue_depth = dev->queue_depth;
+
+	printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+		" %u pages in %u tables, %lu total bytes\n",
+		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
+		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
+		rd_dev->sg_table_count,
+		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
+
+	return dev;
+
+fail:
+	rd_release_device_space(rd_dev);
+	return NULL;
+}
+
+static struct se_device *rd_DIRECT_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 1);
+}
+
+static struct se_device *rd_MEMCPY_create_virtdevice(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	void *p)
+{
+	return rd_create_virtdevice(hba, se_dev, p, 0);
+}
+
+/*	rd_free_device(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_device(void *p)
+{
+	struct rd_dev *rd_dev = p;
+
+	rd_release_device_space(rd_dev);
+	kfree(rd_dev);
+}
+
+static inline struct rd_request *RD_REQ(struct se_task *task)
+{
+	return container_of(task, struct rd_request, rd_task);
+}
+
+static struct se_task *
+rd_alloc_task(struct se_cmd *cmd)
+{
+	struct rd_request *rd_req;
+
+	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
+	if (!rd_req) {
+		printk(KERN_ERR "Unable to allocate struct rd_request\n");
+		return NULL;
+	}
+	rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
+
+	return &rd_req->rd_task;
+}
+
+/*	rd_get_sg_table():
+ *
+ *
+ */
+static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
+{
+	u32 i;
+	struct rd_dev_sg_table *sg_table;
+
+	for (i = 0; i < rd_dev->sg_table_count; i++) {
+		sg_table = &rd_dev->sg_table_array[i];
+		if ((sg_table->page_start_offset <= page) &&
+		    (sg_table->page_end_offset >= page))
+			return sg_table;
+	}
+
+	printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+			page);
+
+	return NULL;
+}
+
+/*	rd_MEMCPY_read():
+ *
+ *
+ */
+static int rd_MEMCPY_read(struct rd_request *req)
+{
+	struct se_task *task = &req->rd_task;
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = task->task_sg;
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+		" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	src_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_d[i].length - dst_offset) <
+		    (sg_s[j].length - src_offset)) {
+			length = (sg_d[i].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
+				sg_s[j].length);
+			printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src = sg_virt(&sg_s[j]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst_offset = 0;
+			src_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_s[j].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+				" offset: %u sg_s[%d].length: %u\n", i,
+				&sg_d[i], sg_d[i].length, sg_d[i].offset,
+				j, sg_s[j].length);
+			printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+				" src_offset: %u\n", length, dst_offset,
+				src_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			dst = sg_virt(&sg_d[i]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			if (sg_d[i].length == length) {
+				i++;
+				dst_offset = 0;
+			} else
+				dst_offset = length;
+
+			src = sg_virt(&sg_s[j++]) + src_offset;
+			if (!src)
+				BUG();
+
+			src_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_write():
+ *
+ *
+ */
+static int rd_MEMCPY_write(struct rd_request *req)
+{
+	struct se_task *task = &req->rd_task;
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct scatterlist *sg_d, *sg_s;
+	void *dst, *src;
+	u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
+	u32 length, page_end = 0, table_sg_end;
+	u32 rd_offset = req->rd_offset;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
+	sg_s = task->task_sg;
+#ifdef DEBUG_RAMDISK_MCP
+	printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+		" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
+		req->rd_page, req->rd_offset);
+#endif
+	dst_offset = rd_offset;
+
+	while (req->rd_size) {
+		if ((sg_s[i].length - src_offset) <
+		    (sg_d[j].length - dst_offset)) {
+			length = (sg_s[i].length - src_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i++]) + src_offset;
+			if (!src)
+				BUG();
+
+			dst = sg_virt(&sg_d[j]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			src_offset = 0;
+			dst_offset = length;
+			page_end = 0;
+		} else {
+			length = (sg_d[j].length - dst_offset);
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+				" offset: %d sg_d[%d].length: %u\n", i,
+				&sg_s[i], sg_s[i].length, sg_s[i].offset,
+				j, sg_d[j].length);
+			printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+				" dst_offset: %u\n", length, src_offset,
+				dst_offset);
+#endif
+			if (length > req->rd_size)
+				length = req->rd_size;
+
+			src = sg_virt(&sg_s[i]) + src_offset;
+			if (!src)
+				BUG();
+
+			if (sg_s[i].length == length) {
+				i++;
+				src_offset = 0;
+			} else
+				src_offset = length;
+
+			dst = sg_virt(&sg_d[j++]) + dst_offset;
+			if (!dst)
+				BUG();
+
+			dst_offset = 0;
+			page_end = 1;
+		}
+
+		memcpy(dst, src, length);
+
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+			" i: %u, j: %u\n", req->rd_page,
+			(req->rd_size - length), length, i, j);
+#endif
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			return 0;
+
+		if (!page_end)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_MCP
+			printk(KERN_INFO "page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_MCP
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_d = &table->sg_table[j = 0];
+	}
+
+	return 0;
+}
+
+/*	rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_MEMCPY_do_task(struct se_task *task)
+{
+	struct se_device *dev = task->se_dev;
+	struct rd_request *req = RD_REQ(task);
+	unsigned long long lba;
+	int ret;
+
+	req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+	lba = task->task_lba;
+	req->rd_offset = (do_div(lba,
+			  (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
+			   DEV_ATTRIB(dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (task->task_data_direction == DMA_FROM_DEVICE)
+		ret = rd_MEMCPY_read(req);
+	else
+		ret = rd_MEMCPY_write(req);
+
+	if (ret != 0)
+		return ret;
+
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_DIRECT_with_offset():
+ *
+ *
+ */
+static int rd_DIRECT_with_offset(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct rd_request *req = RD_REQ(task);
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct se_mem *se_mem;
+	struct scatterlist *sg_s;
+	u32 j = 0, set_offset = 1;
+	u32 get_next_table = 0, offset_length, table_sg_end;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	table_sg_end = (table->page_end_offset - req->rd_page);
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+		(task->task_data_direction == DMA_TO_DEVICE) ?
+			"Write" : "Read",
+		task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
+#endif
+	while (req->rd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		if (set_offset) {
+			offset_length = sg_s[j].length - req->rd_offset;
+			if (offset_length > req->rd_size)
+				offset_length = req->rd_size;
+
+			se_mem->se_page = sg_page(&sg_s[j++]);
+			se_mem->se_off = req->rd_offset;
+			se_mem->se_len = offset_length;
+
+			set_offset = 0;
+			get_next_table = (j > table_sg_end);
+			goto check_eot;
+		}
+
+		offset_length = (req->rd_size < req->rd_offset) ?
+			req->rd_size : req->rd_offset;
+
+		se_mem->se_page = sg_page(&sg_s[j]);
+		se_mem->se_len = offset_length;
+
+		set_offset = 1;
+
+check_eot:
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
+			" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
+			req->rd_page, req->rd_size, offset_length, j, se_mem,
+			se_mem->se_page, se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= offset_length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (!set_offset && !get_next_table)
+			continue;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk(KERN_INFO "page: %u in same page table\n",
+					req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_without_offset():
+ *
+ *
+ */
+static int rd_DIRECT_without_offset(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct rd_request *req = RD_REQ(task);
+	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev_sg_table *table;
+	struct se_mem *se_mem;
+	struct scatterlist *sg_s;
+	u32 length, j = 0;
+
+	table = rd_get_sg_table(dev, req->rd_page);
+	if (!(table))
+		return -1;
+
+	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
+		(task->task_data_direction == DMA_TO_DEVICE) ?
+			"Write" : "Read",
+		task->task_lba, req->rd_size, req->rd_page);
+#endif
+	while (req->rd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+
+		length = (req->rd_size < sg_s[j].length) ?
+			req->rd_size : sg_s[j].length;
+
+		se_mem->se_page = sg_page(&sg_s[j++]);
+		se_mem->se_len = length;
+
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
+			" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
+			req->rd_size, j, se_mem, se_mem->se_page,
+			se_mem->se_off, se_mem->se_len);
+#endif
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+
+		req->rd_size -= length;
+		if (!(req->rd_size))
+			goto out;
+
+		if (++req->rd_page <= table->page_end_offset) {
+#ifdef DEBUG_RAMDISK_DR
+			printk("page: %u in same page table\n",
+				req->rd_page);
+#endif
+			continue;
+		}
+#ifdef DEBUG_RAMDISK_DR
+		printk(KERN_INFO "getting new page table for page: %u\n",
+				req->rd_page);
+#endif
+		table = rd_get_sg_table(dev, req->rd_page);
+		if (!(table))
+			return -1;
+
+		sg_s = &table->sg_table[j = 0];
+	}
+
+out:
+	T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+#ifdef DEBUG_RAMDISK_DR
+	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
+			*se_mem_cnt);
+#endif
+	return 0;
+}
+
+/*	rd_DIRECT_do_se_mem_map():
+ *
+ *
+ */
+static int rd_DIRECT_do_se_mem_map(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset_in)
+{
+	struct se_cmd *cmd = task->task_se_cmd;
+	struct rd_request *req = RD_REQ(task);
+	u32 task_offset = *task_offset_in;
+	unsigned long long lba;
+	int ret;
+
+	req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
+			PAGE_SIZE);
+	lba = task->task_lba;
+	req->rd_offset = (do_div(lba,
+			  (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
+			   DEV_ATTRIB(task->se_dev)->block_size;
+	req->rd_size = task->task_size;
+
+	if (req->rd_offset)
+		ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
+				task_offset_in);
+	else
+		ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
+				task_offset_in);
+
+	if (ret < 0)
+		return ret;
+
+	if (CMD_TFO(cmd)->task_sg_chaining == 0)
+		return 0;
+	/*
+	 * Currently prevent writers from multiple HW fabrics doing
+	 * pci_map_sg() to RD_DR's internal scatterlist memory.
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		printk(KERN_ERR "DMA_TO_DEVICE not supported for"
+				" RAMDISK_DR with task_sg_chaining=1\n");
+		return -1;
+	}
+	/*
+	 * Special case for if task_sg_chaining is enabled, then
+	 * we setup struct se_task->task_sg[], as it will be used by
+	 * transport_do_task_sg_chain() for creating chainged SGLs
+	 * across multiple struct se_task->task_sg[].
+	 */
+	if (!(transport_calc_sg_num(task,
+			list_entry(T_TASK(cmd)->t_mem_list->next,
+				   struct se_mem, se_list),
+			task_offset)))
+		return -1;
+
+	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+			list_entry(T_TASK(cmd)->t_mem_list->next,
+				   struct se_mem, se_list),
+			out_se_mem, se_mem_cnt, task_offset_in);
+}
+
+/*	rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static int rd_DIRECT_do_task(struct se_task *task)
+{
+	/*
+	 * At this point the locally allocated RD tables have been mapped
+	 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
+	 */
+	task->task_scsi_status = GOOD;
+	transport_complete_task(task, 1);
+
+	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+}
+
+/*	rd_free_task(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static void rd_free_task(struct se_task *task)
+{
+	kfree(RD_REQ(task));
+}
+
+enum {
+	Opt_rd_pages, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_rd_pages, "rd_pages=%d"},
+	{Opt_err, NULL}
+};
+
+static ssize_t rd_set_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	const char *page,
+	ssize_t count)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	char *orig, *ptr, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, arg, token;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_rd_pages:
+			match_int(args, &arg);
+			rd_dev->rd_page_count = arg;
+			printk(KERN_INFO "RAMDISK: Referencing Page"
+				" Count: %u\n", rd_dev->rd_page_count);
+			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
+			break;
+		default:
+			break;
+		}
+	}
+
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+
+	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+		printk(KERN_INFO "Missing rd_pages= parameter\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static ssize_t rd_show_configfs_dev_params(
+	struct se_hba *hba,
+	struct se_subsystem_dev *se_dev,
+	char *b)
+{
+	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: %s\n",
+			rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
+			"rd_direct" : "rd_mcp");
+	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
+			"  SG_table_count: %u\n", rd_dev->rd_page_count,
+			PAGE_SIZE, rd_dev->sg_table_count);
+	return bl;
+}
+
+/*	rd_get_cdb(): (Part of se_subsystem_api_t template)
+ *
+ *
+ */
+static unsigned char *rd_get_cdb(struct se_task *task)
+{
+	struct rd_request *req = RD_REQ(task);
+
+	return req->rd_scsi_cdb;
+}
+
+static u32 rd_get_device_rev(struct se_device *dev)
+{
+	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
+}
+
+static u32 rd_get_device_type(struct se_device *dev)
+{
+	return TYPE_DISK;
+}
+
+static sector_t rd_get_blocks(struct se_device *dev)
+{
+	struct rd_dev *rd_dev = dev->dev_ptr;
+	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
+			DEV_ATTRIB(dev)->block_size) - 1;
+
+	return blocks_long;
+}
+
+static struct se_subsystem_api rd_dr_template = {
+	.name			= "rd_dr",
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_DIRECT_allocate_virtdevice,
+	.create_virtdevice	= rd_DIRECT_create_virtdevice,
+	.free_device		= rd_free_device,
+	.alloc_task		= rd_alloc_task,
+	.do_task		= rd_DIRECT_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_cdb		= rd_get_cdb,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_blocks		= rd_get_blocks,
+	.do_se_mem_map		= rd_DIRECT_do_se_mem_map,
+};
+
+static struct se_subsystem_api rd_mcp_template = {
+	.name			= "rd_mcp",
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
+	.attach_hba		= rd_attach_hba,
+	.detach_hba		= rd_detach_hba,
+	.allocate_virtdevice	= rd_MEMCPY_allocate_virtdevice,
+	.create_virtdevice	= rd_MEMCPY_create_virtdevice,
+	.free_device		= rd_free_device,
+	.alloc_task		= rd_alloc_task,
+	.do_task		= rd_MEMCPY_do_task,
+	.free_task		= rd_free_task,
+	.check_configfs_dev_params = rd_check_configfs_dev_params,
+	.set_configfs_dev_params = rd_set_configfs_dev_params,
+	.show_configfs_dev_params = rd_show_configfs_dev_params,
+	.get_cdb		= rd_get_cdb,
+	.get_device_rev		= rd_get_device_rev,
+	.get_device_type	= rd_get_device_type,
+	.get_blocks		= rd_get_blocks,
+};
+
+int __init rd_module_init(void)
+{
+	int ret;
+
+	ret = transport_subsystem_register(&rd_dr_template);
+	if (ret < 0)
+		return ret;
+
+	ret = transport_subsystem_register(&rd_mcp_template);
+	if (ret < 0) {
+		transport_subsystem_release(&rd_dr_template);
+		return ret;
+	}
+
+	return 0;
+}
+
+void rd_module_exit(void)
+{
+	transport_subsystem_release(&rd_dr_template);
+	transport_subsystem_release(&rd_mcp_template);
+}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
new file mode 100644
index 0000000..13badfb
--- /dev/null
+++ b/drivers/target/target_core_rd.h
@@ -0,0 +1,73 @@
+#ifndef TARGET_CORE_RD_H
+#define TARGET_CORE_RD_H
+
+#define RD_HBA_VERSION		"v4.0"
+#define RD_DR_VERSION		"4.0"
+#define RD_MCP_VERSION		"4.0"
+
+/* Largest piece of memory kmalloc can allocate */
+#define RD_MAX_ALLOCATION_SIZE	65536
+/* Maximum queuedepth for the Ramdisk HBA */
+#define RD_HBA_QUEUE_DEPTH	256
+#define RD_DEVICE_QUEUE_DEPTH	32
+#define RD_MAX_DEVICE_QUEUE_DEPTH 128
+#define RD_BLOCKSIZE		512
+#define RD_MAX_SECTORS		1024
+
+extern struct kmem_cache *se_mem_cache;
+
+/* Used in target_core_init_configfs() for virtual LUN 0 access */
+int __init rd_module_init(void);
+void rd_module_exit(void);
+
+#define RRF_EMULATE_CDB		0x01
+#define RRF_GOT_LBA		0x02
+
+struct rd_request {
+	struct se_task	rd_task;
+
+	/* SCSI CDB from iSCSI Command PDU */
+	unsigned char	rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
+	/* Offset from start of page */
+	u32		rd_offset;
+	/* Starting page in Ramdisk for request */
+	u32		rd_page;
+	/* Total number of pages needed for request */
+	u32		rd_page_count;
+	/* Scatterlist count */
+	u32		rd_size;
+	/* Ramdisk device */
+	struct rd_dev	*rd_dev;
+} ____cacheline_aligned;
+
+struct rd_dev_sg_table {
+	u32		page_start_offset;
+	u32		page_end_offset;
+	u32		rd_sg_count;
+	struct scatterlist *sg_table;
+} ____cacheline_aligned;
+
+#define RDF_HAS_PAGE_COUNT	0x01
+
+struct rd_dev {
+	int		rd_direct;
+	u32		rd_flags;
+	/* Unique Ramdisk Device ID in Ramdisk HBA */
+	u32		rd_dev_id;
+	/* Total page count for ramdisk device */
+	u32		rd_page_count;
+	/* Number of SG tables in sg_table_array */
+	u32		sg_table_count;
+	u32		rd_queue_depth;
+	/* Array of rd_dev_sg_table_t containing scatterlists */
+	struct rd_dev_sg_table *sg_table_array;
+	/* Ramdisk HBA device is connected to */
+	struct rd_host *rd_host;
+} ____cacheline_aligned;
+
+struct rd_host {
+	u32		rd_host_dev_id_count;
+	u32		rd_host_id;		/* Unique Ramdisk Host ID */
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_RD_H */
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
new file mode 100644
index 0000000..dc6fed0
--- /dev/null
+++ b/drivers/target/target_core_scdb.c
@@ -0,0 +1,105 @@
+/*******************************************************************************
+ * Filename:  target_core_scdb.c
+ *
+ * This file contains the generic target engine Split CDB related functions.
+ *
+ * Copyright (c) 2004-2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "target_core_scdb.h"
+
+/*	split_cdb_XX_6():
+ *
+ *      21-bit LBA w/ 8-bit SECTORS
+ */
+void split_cdb_XX_6(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	cdb[1] = (lba >> 16) & 0x1f;
+	cdb[2] = (lba >> 8) & 0xff;
+	cdb[3] = lba & 0xff;
+	cdb[4] = *sectors & 0xff;
+}
+
+/*	split_cdb_XX_10():
+ *
+ *	32-bit LBA w/ 16-bit SECTORS
+ */
+void split_cdb_XX_10(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be32(lba, &cdb[2]);
+	put_unaligned_be16(*sectors, &cdb[7]);
+}
+
+/*	split_cdb_XX_12():
+ *
+ *	32-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_12(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be32(lba, &cdb[2]);
+	put_unaligned_be32(*sectors, &cdb[6]);
+}
+
+/*	split_cdb_XX_16():
+ *
+ *	64-bit LBA w/ 32-bit SECTORS
+ */
+void split_cdb_XX_16(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be64(lba, &cdb[2]);
+	put_unaligned_be32(*sectors, &cdb[10]);
+}
+
+/*
+ *	split_cdb_XX_32():
+ *
+ * 	64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
+ */
+void split_cdb_XX_32(
+	unsigned long long lba,
+	u32 *sectors,
+	unsigned char *cdb)
+{
+	put_unaligned_be64(lba, &cdb[12]);
+	put_unaligned_be32(*sectors, &cdb[28]);
+}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
new file mode 100644
index 0000000..98cd1c0
--- /dev/null
+++ b/drivers/target/target_core_scdb.h
@@ -0,0 +1,10 @@
+#ifndef TARGET_CORE_SCDB_H
+#define TARGET_CORE_SCDB_H
+
+extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+
+#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
new file mode 100644
index 0000000..158cecb
--- /dev/null
+++ b/drivers/target/target_core_tmr.c
@@ -0,0 +1,404 @@
+/*******************************************************************************
+ * Filename:  target_core_tmr.c
+ *
+ * This file contains SPC-3 task management infrastructure
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+
+#define DEBUG_LUN_RESET
+#ifdef DEBUG_LUN_RESET
+#define DEBUG_LR(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_LR(x...)
+#endif
+
+struct se_tmr_req *core_tmr_alloc_req(
+	struct se_cmd *se_cmd,
+	void *fabric_tmr_ptr,
+	u8 function)
+{
+	struct se_tmr_req *tmr;
+
+	tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
+	if (!(tmr)) {
+		printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	tmr->task_cmd = se_cmd;
+	tmr->fabric_tmr_ptr = fabric_tmr_ptr;
+	tmr->function = function;
+	INIT_LIST_HEAD(&tmr->tmr_list);
+
+	return tmr;
+}
+EXPORT_SYMBOL(core_tmr_alloc_req);
+
+void core_tmr_release_req(
+	struct se_tmr_req *tmr)
+{
+	struct se_device *dev = tmr->tmr_dev;
+
+	spin_lock(&dev->se_tmr_lock);
+	list_del(&tmr->tmr_list);
+	kmem_cache_free(se_tmr_req_cache, tmr);
+	spin_unlock(&dev->se_tmr_lock);
+}
+
+static void core_tmr_handle_tas_abort(
+	struct se_node_acl *tmr_nacl,
+	struct se_cmd *cmd,
+	int tas,
+	int fe_count)
+{
+	if (!(fe_count)) {
+		transport_cmd_finish_abort(cmd, 1);
+		return;
+	}
+	/*
+	 * TASK ABORTED status (TAS) bit support
+	*/
+	if (((tmr_nacl != NULL) &&
+	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+		transport_send_task_abort(cmd);
+
+	transport_cmd_finish_abort(cmd, 0);
+}
+
+int core_tmr_lun_reset(
+	struct se_device *dev,
+	struct se_tmr_req *tmr,
+	struct list_head *preempt_and_abort_list,
+	struct se_cmd *prout_cmd)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr, *qr_tmp;
+	struct se_node_acl *tmr_nacl = NULL;
+	struct se_portal_group *tmr_tpg = NULL;
+	struct se_queue_obj *qobj = dev->dev_queue_obj;
+	struct se_tmr_req *tmr_p, *tmr_pp;
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int fe_count, state, tas;
+	/*
+	 * TASK_ABORTED status bit, this is configurable via ConfigFS
+	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
+	 *
+	 * A task aborted status (TAS) bit set to zero specifies that aborted
+	 * tasks shall be terminated by the device server without any response
+	 * to the application client. A TAS bit set to one specifies that tasks
+	 * aborted by the actions of an I_T nexus other than the I_T nexus on
+	 * which the command was received shall be completed with TASK ABORTED
+	 * status (see SAM-4).
+	 */
+	tas = DEV_ATTRIB(dev)->emulate_tas;
+	/*
+	 * Determine if this se_tmr is coming from a $FABRIC_MOD
+	 * or struct se_device passthrough..
+	 */
+	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+		if (tmr_nacl && tmr_tpg) {
+			DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+				" initiator port %s\n",
+				TPG_TFO(tmr_tpg)->get_fabric_name(),
+				tmr_nacl->initiatorname);
+		}
+	}
+	DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+		(preempt_and_abort_list) ? "Preempt" : "TMR",
+		TRANSPORT(dev)->name, tas);
+	/*
+	 * Release all pending and outgoing TMRs aside from the received
+	 * LUN_RESET tmr..
+	 */
+	spin_lock(&dev->se_tmr_lock);
+	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
+		/*
+		 * Allow the received TMR to return with FUNCTION_COMPLETE.
+		 */
+		if (tmr && (tmr_p == tmr))
+			continue;
+
+		cmd = tmr_p->task_cmd;
+		if (!(cmd)) {
+			printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+			continue;
+		}
+		/*
+		 * If this function was called with a valid pr_res_key
+		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+		 * skip non regisration key matching TMRs.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		spin_unlock(&dev->se_tmr_lock);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+			spin_lock(&dev->se_tmr_lock);
+			continue;
+		}
+		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+			spin_lock(&dev->se_tmr_lock);
+			continue;
+		}
+		DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+			" Response: 0x%02x, t_state: %d\n",
+			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+			tmr_p->function, tmr_p->response, cmd->t_state);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_cmd_finish_abort_tmr(cmd);
+		spin_lock(&dev->se_tmr_lock);
+	}
+	spin_unlock(&dev->se_tmr_lock);
+	/*
+	 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
+	 * This is following sam4r17, section 5.6 Aborting commands, Table 38
+	 * for TMR LUN_RESET:
+	 *
+	 * a) "Yes" indicates that each command that is aborted on an I_T nexus
+	 * other than the one that caused the SCSI device condition is
+	 * completed with TASK ABORTED status, if the TAS bit is set to one in
+	 * the Control mode page (see SPC-4). "No" indicates that no status is
+	 * returned for aborted commands.
+	 *
+	 * d) If the logical unit reset is caused by a particular I_T nexus
+	 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
+	 * (TASK_ABORTED status) applies.
+	 *
+	 * Otherwise (e.g., if triggered by a hard reset), "no"
+	 * (no TASK_ABORTED SAM status) applies.
+	 *
+	 * Note that this seems to be independent of TAS (Task Aborted Status)
+	 * in the Control Mode Page.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
+				t_state_list) {
+		if (!(TASK_CMD(task))) {
+			printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+			continue;
+		}
+		cmd = TASK_CMD(task);
+
+		if (!T_TASK(cmd)) {
+			printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+				" %p ITT: 0x%08x\n", task, cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+			continue;
+		}
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		list_del(&task->t_state_list);
+		atomic_set(&task->task_state_active, 0);
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
+			"def_t_state: %d/%d cdb: 0x%02x\n",
+			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
+			CMD_TFO(cmd)->get_task_tag(cmd), 0,
+			CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+			cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
+		DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+			" t_task_cdbs: %d t_task_cdbs_left: %d"
+			" t_task_cdbs_sent: %d -- t_transport_active: %d"
+			" t_transport_stop: %d t_transport_sent: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
+			T_TASK(cmd)->t_task_cdbs,
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+			atomic_read(&T_TASK(cmd)->t_transport_active),
+			atomic_read(&T_TASK(cmd)->t_transport_stop),
+			atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+				" for dev: %p\n", task, dev);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+				" dev: %p\n", task, dev);
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		}
+		__transport_stop_task_timer(task, &flags);
+
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+			spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+			DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+				" t_task_cdbs_ex_left: %d\n", task, dev,
+				atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+
+		if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+			DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+				" task: %p, t_fe_count: %d dev: %p\n", task,
+				fe_count, dev);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+						flags);
+			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	/*
+	 * Release all commands remaining in the struct se_device cmd queue.
+	 *
+	 * This follows the same logic as above for the struct se_device
+	 * struct se_task state list, where commands are returned with
+	 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
+	 * reference, otherwise the struct se_cmd is released.
+	 */
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
+		cmd = (struct se_cmd *)qr->cmd;
+		if (!(cmd)) {
+			/*
+			 * Skip these for non PREEMPT_AND_ABORT usage..
+			 */
+			if (preempt_and_abort_list != NULL)
+				continue;
+
+			atomic_dec(&qobj->queue_cnt);
+			list_del(&qr->qr_list);
+			kfree(qr);
+			continue;
+		}
+		/*
+		 * For PREEMPT_AND_ABORT usage, only process commands
+		 * with a matching reservation key.
+		 */
+		if ((preempt_and_abort_list != NULL) &&
+		    (core_scsi3_check_cdb_abort_and_preempt(
+					preempt_and_abort_list, cmd) != 0))
+			continue;
+		/*
+		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+		 */
+		if (prout_cmd == cmd)
+			continue;
+
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+		atomic_dec(&qobj->queue_cnt);
+		list_del(&qr->qr_list);
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+		state = qr->state;
+		kfree(qr);
+
+		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
+			"Preempt" : "", cmd, state,
+			atomic_read(&T_TASK(cmd)->t_fe_count));
+		/*
+		 * Signal that the command has failed via cmd->se_cmd_flags,
+		 * and call TFO->new_cmd_failure() to wakeup any fabric
+		 * dependent code used to wait for unsolicited data out
+		 * allocation to complete.  The fabric module is expected
+		 * to dump any remaining unsolicited data out for the aborted
+		 * command at this point.
+		 */
+		transport_new_cmd_failure(cmd);
+
+		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
+				atomic_read(&T_TASK(cmd)->t_fe_count));
+		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+	/*
+	 * Clear any legacy SPC-2 reservation when called during
+	 * LOGICAL UNIT RESET
+	 */
+	if (!(preempt_and_abort_list) &&
+	     (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+		spin_lock(&dev->dev_reservation_lock);
+		dev->dev_reserved_node_acl = NULL;
+		dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+		spin_unlock(&dev->dev_reservation_lock);
+		printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+	}
+
+	spin_lock(&dev->stats_lock);
+	dev->num_resets++;
+	spin_unlock(&dev->stats_lock);
+
+	DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+			(preempt_and_abort_list) ? "Preempt" : "TMR",
+			TRANSPORT(dev)->name);
+	return 0;
+}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
new file mode 100644
index 0000000..abfa81a
--- /dev/null
+++ b/drivers/target/target_core_tpg.c
@@ -0,0 +1,826 @@
+/*******************************************************************************
+ * Filename:  target_core_tpg.c
+ *
+ * This file contains generic Target Portal Group related functions.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/net.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+
+#include "target_core_hba.h"
+
+/*	core_clear_initiator_node_from_tpg():
+ *
+ *
+ */
+static void core_clear_initiator_node_from_tpg(
+	struct se_node_acl *nacl,
+	struct se_portal_group *tpg)
+{
+	int i;
+	struct se_dev_entry *deve;
+	struct se_lun *lun;
+	struct se_lun_acl *acl, *acl_tmp;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+			continue;
+
+		if (!deve->se_lun) {
+			printk(KERN_ERR "%s device entries device pointer is"
+				" NULL, but Initiator has access.\n",
+				TPG_TFO(tpg)->get_fabric_name());
+			continue;
+		}
+
+		lun = deve->se_lun;
+		spin_unlock_irq(&nacl->device_list_lock);
+		core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+			TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+		spin_lock(&lun->lun_acl_lock);
+		list_for_each_entry_safe(acl, acl_tmp,
+					&lun->lun_acl_list, lacl_list) {
+			if (!(strcmp(acl->initiatorname,
+					nacl->initiatorname)) &&
+			     (acl->mapped_lun == deve->mapped_lun))
+				break;
+		}
+
+		if (!acl) {
+			printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+				" mapped_lun: %u\n", nacl->initiatorname,
+				deve->mapped_lun);
+			spin_unlock(&lun->lun_acl_lock);
+			spin_lock_irq(&nacl->device_list_lock);
+			continue;
+		}
+
+		list_del(&acl->lacl_list);
+		spin_unlock(&lun->lun_acl_lock);
+
+		spin_lock_irq(&nacl->device_list_lock);
+		kfree(acl);
+	}
+	spin_unlock_irq(&nacl->device_list_lock);
+}
+
+/*	__core_tpg_get_initiator_node_acl():
+ *
+ *	spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ */
+struct se_node_acl *__core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	const char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!(strcmp(acl->initiatorname, initiatorname)))
+			return acl;
+	}
+
+	return NULL;
+}
+
+/*	core_tpg_get_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_get_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+		if (!(strcmp(acl->initiatorname, initiatorname)) &&
+		   (!(acl->dynamic_node_acl))) {
+			spin_unlock_bh(&tpg->acl_node_lock);
+			return acl;
+		}
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return NULL;
+}
+
+/*	core_tpg_add_node_to_devs():
+ *
+ *
+ */
+void core_tpg_add_node_to_devs(
+	struct se_node_acl *acl,
+	struct se_portal_group *tpg)
+{
+	int i = 0;
+	u32 lun_access = 0;
+	struct se_lun *lun;
+	struct se_device *dev;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &tpg->tpg_lun_list[i];
+		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+
+		dev = lun->lun_se_dev;
+		/*
+		 * By default in LIO-Target $FABRIC_MOD,
+		 * demo_mode_write_protect is ON, or READ_ONLY;
+		 */
+		if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+			if (dev->dev_flags & DF_READ_ONLY)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		} else {
+			/*
+			 * Allow only optical drives to issue R/W in default RO
+			 * demo mode.
+			 */
+			if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+			else
+				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		}
+
+		printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+			" access for LUN in Demo Mode\n",
+			TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+			"READ-WRITE" : "READ-ONLY");
+
+		core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
+				lun_access, acl, tpg, 1);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+
+/*      core_set_queue_depth_for_node():
+ *
+ *
+ */
+static int core_set_queue_depth_for_node(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl)
+{
+	if (!acl->queue_depth) {
+		printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
+			"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+			acl->initiatorname);
+		acl->queue_depth = 1;
+	}
+
+	return 0;
+}
+
+/*      core_create_device_list_for_node():
+ *
+ *
+ */
+static int core_create_device_list_for_node(struct se_node_acl *nacl)
+{
+	struct se_dev_entry *deve;
+	int i;
+
+	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
+				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
+	if (!(nacl->device_list)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+			" struct se_node_acl->device_list\n");
+		return -1;
+	}
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		deve = &nacl->device_list[i];
+
+		atomic_set(&deve->ua_count, 0);
+		atomic_set(&deve->pr_ref_count, 0);
+		spin_lock_init(&deve->ua_lock);
+		INIT_LIST_HEAD(&deve->alua_port_list);
+		INIT_LIST_HEAD(&deve->ua_list);
+	}
+
+	return 0;
+}
+
+/*	core_tpg_check_initiator_node_acl()
+ *
+ *
+ */
+struct se_node_acl *core_tpg_check_initiator_node_acl(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname)
+{
+	struct se_node_acl *acl;
+
+	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if ((acl))
+		return acl;
+
+	if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+		return NULL;
+
+	acl =  TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
+	if (!(acl))
+		return NULL;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	atomic_set(&acl->mib_ref_count, 0);
+	acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+	acl->dynamic_node_acl = 1;
+
+	TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return NULL;
+	}
+
+	core_tpg_add_node_to_devs(acl, tpg);
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
+
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
+{
+	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
+		cpu_relax();
+}
+
+void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
+{
+	while (atomic_read(&nacl->mib_ref_count) != 0)
+		cpu_relax();
+}
+
+void core_tpg_clear_object_luns(struct se_portal_group *tpg)
+{
+	int i, ret;
+	struct se_lun *lun;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &tpg->tpg_lun_list[i];
+
+		if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
+		    (lun->lun_se_dev == NULL))
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+		ret = core_dev_del_lun(tpg, lun->unpacked_lun);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+EXPORT_SYMBOL(core_tpg_clear_object_luns);
+
+/*	core_tpg_add_initiator_node_acl():
+ *
+ *
+ */
+struct se_node_acl *core_tpg_add_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *se_nacl,
+	const char *initiatorname,
+	u32 queue_depth)
+{
+	struct se_node_acl *acl = NULL;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if ((acl)) {
+		if (acl->dynamic_node_acl) {
+			acl->dynamic_node_acl = 0;
+			printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
+				" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
+				TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+			spin_unlock_bh(&tpg->acl_node_lock);
+			/*
+			 * Release the locally allocated struct se_node_acl
+			 * because * core_tpg_add_initiator_node_acl() returned
+			 * a pointer to an existing demo mode node ACL.
+			 */
+			if (se_nacl)
+				TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+							se_nacl);
+			goto done;
+		}
+
+		printk(KERN_ERR "ACL entry for %s Initiator"
+			" Node %s already exists for TPG %u, ignoring"
+			" request.\n",  TPG_TFO(tpg)->get_fabric_name(),
+			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return ERR_PTR(-EEXIST);
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	if (!(se_nacl)) {
+		printk("struct se_node_acl pointer is NULL\n");
+		return ERR_PTR(-EINVAL);
+	}
+	/*
+	 * For v4.x logic the se_node_acl_s is hanging off a fabric
+	 * dependent structure allocated via
+	 * struct target_core_fabric_ops->fabric_make_nodeacl()
+	 */
+	acl = se_nacl;
+
+	INIT_LIST_HEAD(&acl->acl_list);
+	INIT_LIST_HEAD(&acl->acl_sess_list);
+	spin_lock_init(&acl->device_list_lock);
+	spin_lock_init(&acl->nacl_sess_lock);
+	atomic_set(&acl->acl_pr_ref_count, 0);
+	acl->queue_depth = queue_depth;
+	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+	acl->se_tpg = tpg;
+	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+	spin_lock_init(&acl->stats_lock);
+
+	TPG_TFO(tpg)->set_default_node_attributes(acl);
+
+	if (core_create_device_list_for_node(acl) < 0) {
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		core_free_device_list_for_node(acl, tpg);
+		TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+	tpg->num_node_acls++;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+done:
+	printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+
+	return acl;
+}
+EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
+
+/*	core_tpg_del_initiator_node_acl():
+ *
+ *
+ */
+int core_tpg_del_initiator_node_acl(
+	struct se_portal_group *tpg,
+	struct se_node_acl *acl,
+	int force)
+{
+	struct se_session *sess, *sess_tmp;
+	int dynamic_acl = 0;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+		dynamic_acl = 1;
+	}
+	list_del(&acl->acl_list);
+	tpg->num_node_acls--;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	spin_lock_bh(&tpg->session_lock);
+	list_for_each_entry_safe(sess, sess_tmp,
+				&tpg->tpg_sess_list, sess_list) {
+		if (sess->se_node_acl != acl)
+			continue;
+		/*
+		 * Determine if the session needs to be closed by our context.
+		 */
+		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+			continue;
+
+		spin_unlock_bh(&tpg->session_lock);
+		/*
+		 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+		 * forcefully shutdown the $FABRIC_MOD session/nexus.
+		 */
+		TPG_TFO(tpg)->close_session(sess);
+
+		spin_lock_bh(&tpg->session_lock);
+	}
+	spin_unlock_bh(&tpg->session_lock);
+
+	core_tpg_wait_for_nacl_pr_ref(acl);
+	core_tpg_wait_for_mib_ref(acl);
+	core_clear_initiator_node_from_tpg(acl, tpg);
+	core_free_device_list_for_node(acl, tpg);
+
+	printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+		" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
+		TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
+
+/*	core_tpg_set_initiator_node_queue_depth():
+ *
+ *
+ */
+int core_tpg_set_initiator_node_queue_depth(
+	struct se_portal_group *tpg,
+	unsigned char *initiatorname,
+	u32 queue_depth,
+	int force)
+{
+	struct se_session *sess, *init_sess = NULL;
+	struct se_node_acl *acl;
+	int dynamic_acl = 0;
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+	if (!(acl)) {
+		printk(KERN_ERR "Access Control List entry for %s Initiator"
+			" Node %s does not exists for TPG %hu, ignoring"
+			" request.\n", TPG_TFO(tpg)->get_fabric_name(),
+			initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return -ENODEV;
+	}
+	if (acl->dynamic_node_acl) {
+		acl->dynamic_node_acl = 0;
+		dynamic_acl = 1;
+	}
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	spin_lock_bh(&tpg->session_lock);
+	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
+		if (sess->se_node_acl != acl)
+			continue;
+
+		if (!force) {
+			printk(KERN_ERR "Unable to change queue depth for %s"
+				" Initiator Node: %s while session is"
+				" operational.  To forcefully change the queue"
+				" depth and force session reinstatement"
+				" use the \"force=1\" parameter.\n",
+				TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+			spin_unlock_bh(&tpg->session_lock);
+
+			spin_lock_bh(&tpg->acl_node_lock);
+			if (dynamic_acl)
+				acl->dynamic_node_acl = 1;
+			spin_unlock_bh(&tpg->acl_node_lock);
+			return -EEXIST;
+		}
+		/*
+		 * Determine if the session needs to be closed by our context.
+		 */
+		if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+			continue;
+
+		init_sess = sess;
+		break;
+	}
+
+	/*
+	 * User has requested to change the queue depth for a Initiator Node.
+	 * Change the value in the Node's struct se_node_acl, and call
+	 * core_set_queue_depth_for_node() to add the requested queue depth.
+	 *
+	 * Finally call  TPG_TFO(tpg)->close_session() to force session
+	 * reinstatement to occur if there is an active session for the
+	 * $FABRIC_MOD Initiator Node in question.
+	 */
+	acl->queue_depth = queue_depth;
+
+	if (core_set_queue_depth_for_node(tpg, acl) < 0) {
+		spin_unlock_bh(&tpg->session_lock);
+		/*
+		 * Force session reinstatement if
+		 * core_set_queue_depth_for_node() failed, because we assume
+		 * the $FABRIC_MOD has already the set session reinstatement
+		 * bit from TPG_TFO(tpg)->shutdown_session() called above.
+		 */
+		if (init_sess)
+			TPG_TFO(tpg)->close_session(init_sess);
+
+		spin_lock_bh(&tpg->acl_node_lock);
+		if (dynamic_acl)
+			acl->dynamic_node_acl = 1;
+		spin_unlock_bh(&tpg->acl_node_lock);
+		return -EINVAL;
+	}
+	spin_unlock_bh(&tpg->session_lock);
+	/*
+	 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
+	 * forcefully shutdown the $FABRIC_MOD session/nexus.
+	 */
+	if (init_sess)
+		TPG_TFO(tpg)->close_session(init_sess);
+
+	printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
+		initiatorname, TPG_TFO(tpg)->get_fabric_name(),
+		TPG_TFO(tpg)->tpg_get_tag(tpg));
+
+	spin_lock_bh(&tpg->acl_node_lock);
+	if (dynamic_acl)
+		acl->dynamic_node_acl = 1;
+	spin_unlock_bh(&tpg->acl_node_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
+
+static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	/* Set in core_dev_setup_virtual_lun0() */
+	struct se_device *dev = se_global->g_lun0_dev;
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+	u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	int ret;
+
+	lun->unpacked_lun = 0;
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	atomic_set(&lun->lun_acl_count, 0);
+	init_completion(&lun->lun_shutdown_comp);
+	INIT_LIST_HEAD(&lun->lun_acl_list);
+	INIT_LIST_HEAD(&lun->lun_cmd_list);
+	spin_lock_init(&lun->lun_acl_lock);
+	spin_lock_init(&lun->lun_cmd_lock);
+	spin_lock_init(&lun->lun_sep_lock);
+
+	ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
+	if (ret < 0)
+		return -1;
+
+	return 0;
+}
+
+static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
+{
+	struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+
+	core_tpg_post_dellun(se_tpg, lun);
+}
+
+int core_tpg_register(
+	struct target_core_fabric_ops *tfo,
+	struct se_wwn *se_wwn,
+	struct se_portal_group *se_tpg,
+	void *tpg_fabric_ptr,
+	int se_tpg_type)
+{
+	struct se_lun *lun;
+	u32 i;
+
+	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
+				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
+	if (!(se_tpg->tpg_lun_list)) {
+		printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+				"tpg_lun_list\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = &se_tpg->tpg_lun_list[i];
+		lun->unpacked_lun = i;
+		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+		atomic_set(&lun->lun_acl_count, 0);
+		init_completion(&lun->lun_shutdown_comp);
+		INIT_LIST_HEAD(&lun->lun_acl_list);
+		INIT_LIST_HEAD(&lun->lun_cmd_list);
+		spin_lock_init(&lun->lun_acl_lock);
+		spin_lock_init(&lun->lun_cmd_lock);
+		spin_lock_init(&lun->lun_sep_lock);
+	}
+
+	se_tpg->se_tpg_type = se_tpg_type;
+	se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
+	se_tpg->se_tpg_tfo = tfo;
+	se_tpg->se_tpg_wwn = se_wwn;
+	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
+	INIT_LIST_HEAD(&se_tpg->acl_node_list);
+	INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
+	spin_lock_init(&se_tpg->acl_node_lock);
+	spin_lock_init(&se_tpg->session_lock);
+	spin_lock_init(&se_tpg->tpg_lun_lock);
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
+		if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
+			kfree(se_tpg);
+			return -ENOMEM;
+		}
+	}
+
+	spin_lock_bh(&se_global->se_tpg_lock);
+	list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
+	spin_unlock_bh(&se_global->se_tpg_lock);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
+		"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_register);
+
+int core_tpg_deregister(struct se_portal_group *se_tpg)
+{
+	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+		" for endpoint: %s Portal Tag %u\n",
+		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
+		"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
+		TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
+		TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+
+	spin_lock_bh(&se_global->se_tpg_lock);
+	list_del(&se_tpg->se_tpg_list);
+	spin_unlock_bh(&se_global->se_tpg_lock);
+
+	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
+		cpu_relax();
+
+	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
+		core_tpg_release_virtual_lun0(se_tpg);
+
+	se_tpg->se_tpg_fabric_ptr = NULL;
+	kfree(se_tpg->tpg_lun_list);
+	return 0;
+}
+EXPORT_SYMBOL(core_tpg_deregister);
+
+struct se_lun *core_tpg_pre_addlun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			TPG_TFO(tpg)->get_fabric_name(),
+			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+			" on %s Target Portal Group: %u, ignoring request.\n",
+			unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-EINVAL);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_addlun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun,
+	u32 lun_access,
+	void *lun_ptr)
+{
+	if (core_dev_export(lun_ptr, tpg, lun) < 0)
+		return -1;
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_access = lun_access;
+	lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
+
+static void core_tpg_shutdown_lun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_clear_lun_from_tpg(lun, tpg);
+	transport_clear_lun_from_sessions(lun);
+}
+
+struct se_lun *core_tpg_pre_dellun(
+	struct se_portal_group *tpg,
+	u32 unpacked_lun,
+	int *ret)
+{
+	struct se_lun *lun;
+
+	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
+		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+			"-1: %u for Target Portal Group: %u\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TRANSPORT_MAX_LUNS_PER_TPG-1,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		return ERR_PTR(-EOVERFLOW);
+	}
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun = &tpg->tpg_lun_list[unpacked_lun];
+	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
+		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+			" Target Portal Group: %u, ignoring request.\n",
+			TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+			TPG_TFO(tpg)->tpg_get_tag(tpg));
+		spin_unlock(&tpg->tpg_lun_lock);
+		return ERR_PTR(-ENODEV);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return lun;
+}
+
+int core_tpg_post_dellun(
+	struct se_portal_group *tpg,
+	struct se_lun *lun)
+{
+	core_tpg_shutdown_lun(tpg, lun);
+
+	core_dev_unexport(lun->lun_se_dev, tpg, lun);
+
+	spin_lock(&tpg->tpg_lun_lock);
+	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+	spin_unlock(&tpg->tpg_lun_lock);
+
+	return 0;
+}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
new file mode 100644
index 0000000..28b6292
--- /dev/null
+++ b/drivers/target/target_core_transport.c
@@ -0,0 +1,6134 @@
+/*******************************************************************************
+ * Filename:  target_core_transport.c
+ *
+ * This file contains the Generic Target Engine Core.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+#include <linux/in.h>
+#include <linux/cdrom.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/libsas.h> /* For TASK_ATTR_* */
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_scdb.h"
+#include "target_core_ua.h"
+
+/* #define DEBUG_CDB_HANDLER */
+#ifdef DEBUG_CDB_HANDLER
+#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CDB_H(x...)
+#endif
+
+/* #define DEBUG_CMD_MAP */
+#ifdef DEBUG_CMD_MAP
+#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CMD_M(x...)
+#endif
+
+/* #define DEBUG_MEM_ALLOC */
+#ifdef DEBUG_MEM_ALLOC
+#define DEBUG_MEM(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM(x...)
+#endif
+
+/* #define DEBUG_MEM2_ALLOC */
+#ifdef DEBUG_MEM2_ALLOC
+#define DEBUG_MEM2(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_MEM2(x...)
+#endif
+
+/* #define DEBUG_SG_CALC */
+#ifdef DEBUG_SG_CALC
+#define DEBUG_SC(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SC(x...)
+#endif
+
+/* #define DEBUG_SE_OBJ */
+#ifdef DEBUG_SE_OBJ
+#define DEBUG_SO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_SO(x...)
+#endif
+
+/* #define DEBUG_CMD_VOL */
+#ifdef DEBUG_CMD_VOL
+#define DEBUG_VOL(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_VOL(x...)
+#endif
+
+/* #define DEBUG_CMD_STOP */
+#ifdef DEBUG_CMD_STOP
+#define DEBUG_CS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CS(x...)
+#endif
+
+/* #define DEBUG_PASSTHROUGH */
+#ifdef DEBUG_PASSTHROUGH
+#define DEBUG_PT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_PT(x...)
+#endif
+
+/* #define DEBUG_TASK_STOP */
+#ifdef DEBUG_TASK_STOP
+#define DEBUG_TS(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TS(x...)
+#endif
+
+/* #define DEBUG_TRANSPORT_STOP */
+#ifdef DEBUG_TRANSPORT_STOP
+#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TRANSPORT_S(x...)
+#endif
+
+/* #define DEBUG_TASK_FAILURE */
+#ifdef DEBUG_TASK_FAILURE
+#define DEBUG_TF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TF(x...)
+#endif
+
+/* #define DEBUG_DEV_OFFLINE */
+#ifdef DEBUG_DEV_OFFLINE
+#define DEBUG_DO(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_DO(x...)
+#endif
+
+/* #define DEBUG_TASK_STATE */
+#ifdef DEBUG_TASK_STATE
+#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TSTATE(x...)
+#endif
+
+/* #define DEBUG_STATUS_THR */
+#ifdef DEBUG_STATUS_THR
+#define DEBUG_ST(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_ST(x...)
+#endif
+
+/* #define DEBUG_TASK_TIMEOUT */
+#ifdef DEBUG_TASK_TIMEOUT
+#define DEBUG_TT(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_TT(x...)
+#endif
+
+/* #define DEBUG_GENERIC_REQUEST_FAILURE */
+#ifdef DEBUG_GENERIC_REQUEST_FAILURE
+#define DEBUG_GRF(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_GRF(x...)
+#endif
+
+/* #define DEBUG_SAM_TASK_ATTRS */
+#ifdef DEBUG_SAM_TASK_ATTRS
+#define DEBUG_STA(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_STA(x...)
+#endif
+
+struct se_global *se_global;
+
+static struct kmem_cache *se_cmd_cache;
+static struct kmem_cache *se_sess_cache;
+struct kmem_cache *se_tmr_req_cache;
+struct kmem_cache *se_ua_cache;
+struct kmem_cache *se_mem_cache;
+struct kmem_cache *t10_pr_reg_cache;
+struct kmem_cache *t10_alua_lu_gp_cache;
+struct kmem_cache *t10_alua_lu_gp_mem_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_cache;
+struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+
+/* Used for transport_dev_get_map_*() */
+typedef int (*map_func_t)(struct se_task *, u32);
+
+static int transport_generic_write_pending(struct se_cmd *);
+static int transport_processing_thread(void *);
+static int __transport_execute_tasks(struct se_device *dev);
+static void transport_complete_task_attr(struct se_cmd *cmd);
+static void transport_direct_request_timeout(struct se_cmd *cmd);
+static void transport_free_dev_tasks(struct se_cmd *cmd);
+static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
+		unsigned long long starting_lba, u32 sectors,
+		enum dma_data_direction data_direction,
+		struct list_head *mem_list, int set_counts);
+static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
+		u32 dma_size);
+static int transport_generic_remove(struct se_cmd *cmd,
+		int release_to_pool, int session_reinstatement);
+static int transport_get_sectors(struct se_cmd *cmd);
+static struct list_head *transport_init_se_mem_list(void);
+static int transport_map_sg_to_mem(struct se_cmd *cmd,
+		struct list_head *se_mem_list, void *in_mem,
+		u32 *se_mem_cnt);
+static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
+		unsigned char *dst, struct list_head *se_mem_list);
+static void transport_release_fe_cmd(struct se_cmd *cmd);
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+		struct se_queue_obj *qobj);
+static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
+static void transport_stop_all_task_timers(struct se_cmd *cmd);
+
+int transport_emulate_control_cdb(struct se_task *task);
+
+int init_se_global(void)
+{
+	struct se_global *global;
+
+	global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
+	if (!(global)) {
+		printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
+		return -1;
+	}
+
+	INIT_LIST_HEAD(&global->g_lu_gps_list);
+	INIT_LIST_HEAD(&global->g_se_tpg_list);
+	INIT_LIST_HEAD(&global->g_hba_list);
+	INIT_LIST_HEAD(&global->g_se_dev_list);
+	spin_lock_init(&global->g_device_lock);
+	spin_lock_init(&global->hba_lock);
+	spin_lock_init(&global->se_tpg_lock);
+	spin_lock_init(&global->lu_gps_lock);
+	spin_lock_init(&global->plugin_class_lock);
+
+	se_cmd_cache = kmem_cache_create("se_cmd_cache",
+			sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
+	if (!(se_cmd_cache)) {
+		printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+		goto out;
+	}
+	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
+			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
+			0, NULL);
+	if (!(se_tmr_req_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+				" failed\n");
+		goto out;
+	}
+	se_sess_cache = kmem_cache_create("se_sess_cache",
+			sizeof(struct se_session), __alignof__(struct se_session),
+			0, NULL);
+	if (!(se_sess_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_session"
+				" failed\n");
+		goto out;
+	}
+	se_ua_cache = kmem_cache_create("se_ua_cache",
+			sizeof(struct se_ua), __alignof__(struct se_ua),
+			0, NULL);
+	if (!(se_ua_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
+		goto out;
+	}
+	se_mem_cache = kmem_cache_create("se_mem_cache",
+			sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
+	if (!(se_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+		goto out;
+	}
+	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
+			sizeof(struct t10_pr_registration),
+			__alignof__(struct t10_pr_registration), 0, NULL);
+	if (!(t10_pr_reg_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+				" failed\n");
+		goto out;
+	}
+	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
+			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
+			0, NULL);
+	if (!(t10_alua_lu_gp_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+				" failed\n");
+		goto out;
+	}
+	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
+			sizeof(struct t10_alua_lu_gp_member),
+			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
+	if (!(t10_alua_lu_gp_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+				"cache failed\n");
+		goto out;
+	}
+	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
+			sizeof(struct t10_alua_tg_pt_gp),
+			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
+	if (!(t10_alua_tg_pt_gp_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"cache failed\n");
+		goto out;
+	}
+	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
+			"t10_alua_tg_pt_gp_mem_cache",
+			sizeof(struct t10_alua_tg_pt_gp_member),
+			__alignof__(struct t10_alua_tg_pt_gp_member),
+			0, NULL);
+	if (!(t10_alua_tg_pt_gp_mem_cache)) {
+		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+				"mem_t failed\n");
+		goto out;
+	}
+
+	se_global = global;
+
+	return 0;
+out:
+	if (se_cmd_cache)
+		kmem_cache_destroy(se_cmd_cache);
+	if (se_tmr_req_cache)
+		kmem_cache_destroy(se_tmr_req_cache);
+	if (se_sess_cache)
+		kmem_cache_destroy(se_sess_cache);
+	if (se_ua_cache)
+		kmem_cache_destroy(se_ua_cache);
+	if (se_mem_cache)
+		kmem_cache_destroy(se_mem_cache);
+	if (t10_pr_reg_cache)
+		kmem_cache_destroy(t10_pr_reg_cache);
+	if (t10_alua_lu_gp_cache)
+		kmem_cache_destroy(t10_alua_lu_gp_cache);
+	if (t10_alua_lu_gp_mem_cache)
+		kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	if (t10_alua_tg_pt_gp_cache)
+		kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	if (t10_alua_tg_pt_gp_mem_cache)
+		kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kfree(global);
+	return -1;
+}
+
+void release_se_global(void)
+{
+	struct se_global *global;
+
+	global = se_global;
+	if (!(global))
+		return;
+
+	kmem_cache_destroy(se_cmd_cache);
+	kmem_cache_destroy(se_tmr_req_cache);
+	kmem_cache_destroy(se_sess_cache);
+	kmem_cache_destroy(se_ua_cache);
+	kmem_cache_destroy(se_mem_cache);
+	kmem_cache_destroy(t10_pr_reg_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_cache);
+	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
+	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+	kfree(global);
+
+	se_global = NULL;
+}
+
+void transport_init_queue_obj(struct se_queue_obj *qobj)
+{
+	atomic_set(&qobj->queue_cnt, 0);
+	INIT_LIST_HEAD(&qobj->qobj_list);
+	init_waitqueue_head(&qobj->thread_wq);
+	spin_lock_init(&qobj->cmd_queue_lock);
+}
+EXPORT_SYMBOL(transport_init_queue_obj);
+
+static int transport_subsystem_reqmods(void)
+{
+	int ret;
+
+	ret = request_module("target_core_iblock");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_iblock\n");
+
+	ret = request_module("target_core_file");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_file\n");
+
+	ret = request_module("target_core_pscsi");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_pscsi\n");
+
+	ret = request_module("target_core_stgt");
+	if (ret != 0)
+		printk(KERN_ERR "Unable to load target_core_stgt\n");
+
+	return 0;
+}
+
+int transport_subsystem_check_init(void)
+{
+	if (se_global->g_sub_api_initialized)
+		return 0;
+	/*
+	 * Request the loading of known TCM subsystem plugins..
+	 */
+	if (transport_subsystem_reqmods() < 0)
+		return -1;
+
+	se_global->g_sub_api_initialized = 1;
+	return 0;
+}
+
+struct se_session *transport_init_session(void)
+{
+	struct se_session *se_sess;
+
+	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+	if (!(se_sess)) {
+		printk(KERN_ERR "Unable to allocate struct se_session from"
+				" se_sess_cache\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	INIT_LIST_HEAD(&se_sess->sess_list);
+	INIT_LIST_HEAD(&se_sess->sess_acl_list);
+	atomic_set(&se_sess->mib_ref_count, 0);
+
+	return se_sess;
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
+ */
+void __transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	unsigned char buf[PR_REG_ISID_LEN];
+
+	se_sess->se_tpg = se_tpg;
+	se_sess->fabric_sess_ptr = fabric_sess_ptr;
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
+	 *
+	 * Only set for struct se_session's that will actually be moving I/O.
+	 * eg: *NOT* discovery sessions.
+	 */
+	if (se_nacl) {
+		/*
+		 * If the fabric module supports an ISID based TransportID,
+		 * save this value in binary from the fabric I_T Nexus now.
+		 */
+		if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+			memset(&buf[0], 0, PR_REG_ISID_LEN);
+			TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+					&buf[0], PR_REG_ISID_LEN);
+			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+		}
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		/*
+		 * The se_nacl->nacl_sess pointer will be set to the
+		 * last active I_T Nexus for each struct se_node_acl.
+		 */
+		se_nacl->nacl_sess = se_sess;
+
+		list_add_tail(&se_sess->sess_acl_list,
+			      &se_nacl->acl_sess_list);
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+		TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+}
+EXPORT_SYMBOL(__transport_register_session);
+
+void transport_register_session(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct se_session *se_sess,
+	void *fabric_sess_ptr)
+{
+	spin_lock_bh(&se_tpg->session_lock);
+	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
+	spin_unlock_bh(&se_tpg->session_lock);
+}
+EXPORT_SYMBOL(transport_register_session);
+
+void transport_deregister_session_configfs(struct se_session *se_sess)
+{
+	struct se_node_acl *se_nacl;
+
+	/*
+	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if ((se_nacl)) {
+		spin_lock_irq(&se_nacl->nacl_sess_lock);
+		list_del(&se_sess->sess_acl_list);
+		/*
+		 * If the session list is empty, then clear the pointer.
+		 * Otherwise, set the struct se_session pointer from the tail
+		 * element of the per struct se_node_acl active session list.
+		 */
+		if (list_empty(&se_nacl->acl_sess_list))
+			se_nacl->nacl_sess = NULL;
+		else {
+			se_nacl->nacl_sess = container_of(
+					se_nacl->acl_sess_list.prev,
+					struct se_session, sess_acl_list);
+		}
+		spin_unlock_irq(&se_nacl->nacl_sess_lock);
+	}
+}
+EXPORT_SYMBOL(transport_deregister_session_configfs);
+
+void transport_free_session(struct se_session *se_sess)
+{
+	kmem_cache_free(se_sess_cache, se_sess);
+}
+EXPORT_SYMBOL(transport_free_session);
+
+void transport_deregister_session(struct se_session *se_sess)
+{
+	struct se_portal_group *se_tpg = se_sess->se_tpg;
+	struct se_node_acl *se_nacl;
+
+	if (!(se_tpg)) {
+		transport_free_session(se_sess);
+		return;
+	}
+	/*
+	 * Wait for possible reference in drivers/target/target_core_mib.c:
+	 * scsi_att_intr_port_seq_show()
+	 */
+	while (atomic_read(&se_sess->mib_ref_count) != 0)
+		cpu_relax();
+
+	spin_lock_bh(&se_tpg->session_lock);
+	list_del(&se_sess->sess_list);
+	se_sess->se_tpg = NULL;
+	se_sess->fabric_sess_ptr = NULL;
+	spin_unlock_bh(&se_tpg->session_lock);
+
+	/*
+	 * Determine if we need to do extra work for this initiator node's
+	 * struct se_node_acl if it had been previously dynamically generated.
+	 */
+	se_nacl = se_sess->se_node_acl;
+	if ((se_nacl)) {
+		spin_lock_bh(&se_tpg->acl_node_lock);
+		if (se_nacl->dynamic_node_acl) {
+			if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
+					se_tpg))) {
+				list_del(&se_nacl->acl_list);
+				se_tpg->num_node_acls--;
+				spin_unlock_bh(&se_tpg->acl_node_lock);
+
+				core_tpg_wait_for_nacl_pr_ref(se_nacl);
+				core_tpg_wait_for_mib_ref(se_nacl);
+				core_free_device_list_for_node(se_nacl, se_tpg);
+				TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+						se_nacl);
+				spin_lock_bh(&se_tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_bh(&se_tpg->acl_node_lock);
+	}
+
+	transport_free_session(se_sess);
+
+	printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
+		TPG_TFO(se_tpg)->get_fabric_name());
+}
+EXPORT_SYMBOL(transport_deregister_session);
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
+{
+	struct se_device *dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	if (!T_TASK(cmd))
+		return;
+
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		dev = task->se_dev;
+		if (!(dev))
+			continue;
+
+		if (atomic_read(&task->task_active))
+			continue;
+
+		if (!(atomic_read(&task->task_state_active)))
+			continue;
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+		list_del(&task->t_state_list);
+		DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
+			CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		atomic_set(&task->task_state_active, 0);
+		atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+	}
+}
+
+/*	transport_cmd_check_stop():
+ *
+ *	'transport_off = 1' determines if t_transport_active should be cleared.
+ *	'transport_off = 2' determines if task_dev_state should be removed.
+ *
+ *	A non-zero u8 t_state sets cmd->t_state.
+ *	Returns 1 when command is stopped, else 0.
+ */
+static int transport_cmd_check_stop(
+	struct se_cmd *cmd,
+	int transport_off,
+	u8 t_state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * Determine if IOCTL context caller in requesting the stopping of this
+	 * command for LUN shutdown purposes.
+	 */
+	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		cmd->deferred_t_state = cmd->t_state;
+		cmd->t_state = TRANSPORT_DEFERRED_CMD;
+		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&T_TASK(cmd)->transport_lun_stop_comp);
+		return 1;
+	}
+	/*
+	 * Determine if frontend context caller is requesting the stopping of
+	 * this command for frontend excpections.
+	 */
+	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+		DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		cmd->deferred_t_state = cmd->t_state;
+		cmd->t_state = TRANSPORT_DEFERRED_CMD;
+		if (transport_off == 2)
+			transport_all_task_dev_remove_state(cmd);
+
+		/*
+		 * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
+		 * to FE.
+		 */
+		if (transport_off == 2)
+			cmd->se_lun = NULL;
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&T_TASK(cmd)->t_transport_stop_comp);
+		return 1;
+	}
+	if (transport_off) {
+		atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+		if (transport_off == 2) {
+			transport_all_task_dev_remove_state(cmd);
+			/*
+			 * Clear struct se_cmd->se_lun before the transport_off == 2
+			 * handoff to fabric module.
+			 */
+			cmd->se_lun = NULL;
+			/*
+			 * Some fabric modules like tcm_loop can release
+			 * their internally allocated I/O refrence now and
+			 * struct se_cmd now.
+			 */
+			if (CMD_TFO(cmd)->check_stop_free != NULL) {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+				CMD_TFO(cmd)->check_stop_free(cmd);
+				return 1;
+			}
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		return 0;
+	} else if (t_state)
+		cmd->t_state = t_state;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return 0;
+}
+
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+	return transport_cmd_check_stop(cmd, 2, 0);
+}
+
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+	struct se_lun *lun = SE_LUN(cmd);
+	unsigned long flags;
+
+	if (!lun)
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto check_lun;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_free_dev_tasks(cmd);
+
+check_lun:
+	spin_lock_irqsave(&lun->lun_cmd_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
+		list_del(&cmd->se_lun_list);
+		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+#if 0
+		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
+			CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+#endif
+	}
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
+}
+
+void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+{
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+	transport_lun_remove_cmd(cmd);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return;
+	if (remove)
+		transport_generic_remove(cmd, 0, 0);
+}
+
+void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
+{
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+	if (transport_cmd_check_stop_to_fabric(cmd))
+		return;
+
+	transport_generic_remove(cmd, 0, 0);
+}
+
+static int transport_add_cmd_to_queue(
+	struct se_cmd *cmd,
+	int t_state)
+{
+	struct se_device *dev = cmd->se_dev;
+	struct se_queue_obj *qobj = dev->dev_queue_obj;
+	struct se_queue_req *qr;
+	unsigned long flags;
+
+	qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
+	if (!(qr)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" struct se_queue_req\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&qr->qr_list);
+
+	qr->cmd = (void *)cmd;
+	qr->state = t_state;
+
+	if (t_state) {
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+		cmd->t_state = t_state;
+		atomic_set(&T_TASK(cmd)->t_transport_active, 1);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	}
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	list_add_tail(&qr->qr_list, &qobj->qobj_list);
+	atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	atomic_inc(&qobj->queue_cnt);
+	wake_up_interruptible(&qobj->thread_wq);
+	return 0;
+}
+
+/*
+ * Called with struct se_queue_obj->cmd_queue_lock held.
+ */
+static struct se_queue_req *
+__transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr = NULL;
+
+	if (list_empty(&qobj->qobj_list))
+		return NULL;
+
+	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+		break;
+
+	if (qr->cmd) {
+		cmd = (struct se_cmd *)qr->cmd;
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+	}
+	list_del(&qr->qr_list);
+	atomic_dec(&qobj->queue_cnt);
+
+	return qr;
+}
+
+static struct se_queue_req *
+transport_get_qr_from_queue(struct se_queue_obj *qobj)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (list_empty(&qobj->qobj_list)) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return NULL;
+	}
+
+	list_for_each_entry(qr, &qobj->qobj_list, qr_list)
+		break;
+
+	if (qr->cmd) {
+		cmd = (struct se_cmd *)qr->cmd;
+		atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+	}
+	list_del(&qr->qr_list);
+	atomic_dec(&qobj->queue_cnt);
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	return qr;
+}
+
+static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
+		struct se_queue_obj *qobj)
+{
+	struct se_cmd *q_cmd;
+	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+		return;
+	}
+
+	list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
+		q_cmd = (struct se_cmd *)qr->cmd;
+		if (q_cmd != cmd)
+			continue;
+
+		atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
+		atomic_dec(&qobj->queue_cnt);
+		list_del(&qr->qr_list);
+		kfree(qr);
+	}
+	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+	if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
+		printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+	}
+}
+
+/*
+ * Completion function used by TCM subsystem plugins (such as FILEIO)
+ * for queueing up response from struct se_subsystem_api->do_task()
+ */
+void transport_complete_sync_cache(struct se_cmd *cmd, int good)
+{
+	struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+				struct se_task, t_list);
+
+	if (good) {
+		cmd->scsi_status = SAM_STAT_GOOD;
+		task->task_scsi_status = GOOD;
+	} else {
+		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
+		task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
+		TASK_CMD(task)->transport_error_status =
+					PYX_TRANSPORT_ILLEGAL_REQUEST;
+	}
+
+	transport_complete_task(task, good);
+}
+EXPORT_SYMBOL(transport_complete_sync_cache);
+
+/*	transport_complete_task():
+ *
+ *	Called from interrupt and non interrupt context depending
+ *	on the transport plugin.
+ */
+void transport_complete_task(struct se_task *task, int success)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+	struct se_device *dev = task->se_dev;
+	int t_state;
+	unsigned long flags;
+#if 0
+	printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+			T_TASK(cmd)->t_task_cdb[0], dev);
+#endif
+	if (dev) {
+		spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+		atomic_inc(&dev->depth_left);
+		atomic_inc(&SE_HBA(dev)->left_queue_depth);
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+	}
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&task->task_active, 0);
+
+	/*
+	 * See if any sense data exists, if so set the TASK_SENSE flag.
+	 * Also check for any other post completion work that needs to be
+	 * done by the plugins.
+	 */
+	if (dev && dev->transport->transport_complete) {
+		if (dev->transport->transport_complete(task) != 0) {
+			cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+			task->task_sense = 1;
+			success = 1;
+		}
+	}
+
+	/*
+	 * See if we are waiting for outstanding struct se_task
+	 * to complete for an exception condition
+	 */
+	if (atomic_read(&task->task_stop)) {
+		/*
+		 * Decrement T_TASK(cmd)->t_se_count if this task had
+		 * previously thrown its timeout exception handler.
+		 */
+		if (atomic_read(&task->task_timeout)) {
+			atomic_dec(&T_TASK(cmd)->t_se_count);
+			atomic_set(&task->task_timeout, 0);
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		complete(&task->task_stop_comp);
+		return;
+	}
+	/*
+	 * If the task's timeout handler has fired, use the t_task_cdbs_timeout
+	 * left counter to determine when the struct se_cmd is ready to be queued to
+	 * the processing thread.
+	 */
+	if (atomic_read(&task->task_timeout)) {
+		if (!(atomic_dec_and_test(
+				&T_TASK(cmd)->t_task_cdbs_timeout_left))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+				flags);
+			return;
+		}
+		t_state = TRANSPORT_COMPLETE_TIMEOUT;
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_add_cmd_to_queue(cmd, t_state);
+		return;
+	}
+	atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+
+	/*
+	 * Decrement the outstanding t_task_cdbs_left count.  The last
+	 * struct se_task from struct se_cmd will complete itself into the
+	 * device queue depending upon int success.
+	 */
+	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+		if (!success)
+			T_TASK(cmd)->t_tasks_failed = 1;
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	if (!success || T_TASK(cmd)->t_tasks_failed) {
+		t_state = TRANSPORT_COMPLETE_FAILURE;
+		if (!task->task_error_status) {
+			task->task_error_status =
+				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+			cmd->transport_error_status =
+				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+		}
+	} else {
+		atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+		t_state = TRANSPORT_COMPLETE_OK;
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_add_cmd_to_queue(cmd, t_state);
+}
+EXPORT_SYMBOL(transport_complete_task);
+
+/*
+ * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
+ * struct se_task list are ready to be added to the active execution list
+ * struct se_device
+
+ * Called with se_dev_t->execute_task_lock called.
+ */
+static inline int transport_add_task_check_sam_attr(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	/*
+	 * No SAM Task attribute emulation enabled, add to tail of
+	 * execution queue
+	 */
+	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
+		list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+		return 0;
+	}
+	/*
+	 * HEAD_OF_QUEUE attribute for received CDB, which means
+	 * the first task that is associated with a struct se_cmd goes to
+	 * head of the struct se_device->execute_task_list, and task_prev
+	 * after that for each subsequent task
+	 */
+	if (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		list_add(&task->t_execute_list,
+				(task_prev != NULL) ?
+				&task_prev->t_execute_list :
+				&dev->execute_task_list);
+
+		DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+				" in execution queue\n",
+				T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+		return 1;
+	}
+	/*
+	 * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
+	 * transitioned from Dermant -> Active state, and are added to the end
+	 * of the struct se_device->execute_task_list
+	 */
+	list_add_tail(&task->t_execute_list, &dev->execute_task_list);
+	return 0;
+}
+
+/*	__transport_add_task_to_execute_queue():
+ *
+ *	Called with se_dev_t->execute_task_lock called.
+ */
+static void __transport_add_task_to_execute_queue(
+	struct se_task *task,
+	struct se_task *task_prev,
+	struct se_device *dev)
+{
+	int head_of_queue;
+
+	head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
+	atomic_inc(&dev->execute_tasks);
+
+	if (atomic_read(&task->task_state_active))
+		return;
+	/*
+	 * Determine if this task needs to go to HEAD_OF_QUEUE for the
+	 * state list as well.  Running with SAM Task Attribute emulation
+	 * will always return head_of_queue == 0 here
+	 */
+	if (head_of_queue)
+		list_add(&task->t_state_list, (task_prev) ?
+				&task_prev->t_state_list :
+				&dev->state_task_list);
+	else
+		list_add_tail(&task->t_state_list, &dev->state_task_list);
+
+	atomic_set(&task->task_state_active, 1);
+
+	DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+		CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+		task, dev);
+}
+
+static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
+{
+	struct se_device *dev;
+	struct se_task *task;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		dev = task->se_dev;
+
+		if (atomic_read(&task->task_state_active))
+			continue;
+
+		spin_lock(&dev->execute_task_lock);
+		list_add_tail(&task->t_state_list, &dev->state_task_list);
+		atomic_set(&task->task_state_active, 1);
+
+		DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+			CMD_TFO(task->task_se_cmd)->get_task_tag(
+			task->task_se_cmd), task, dev);
+
+		spin_unlock(&dev->execute_task_lock);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_task *task, *task_prev = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		if (atomic_read(&task->task_execute_queue))
+			continue;
+		/*
+		 * __transport_add_task_to_execute_queue() handles the
+		 * SAM Task Attribute emulation if enabled
+		 */
+		__transport_add_task_to_execute_queue(task, task_prev, dev);
+		atomic_set(&task->task_execute_queue, 1);
+		task_prev = task;
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+	return;
+}
+
+/*	transport_get_task_from_execute_queue():
+ *
+ *	Called with dev->execute_task_lock held.
+ */
+static struct se_task *
+transport_get_task_from_execute_queue(struct se_device *dev)
+{
+	struct se_task *task;
+
+	if (list_empty(&dev->execute_task_list))
+		return NULL;
+
+	list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
+		break;
+
+	list_del(&task->t_execute_list);
+	atomic_dec(&dev->execute_tasks);
+
+	return task;
+}
+
+/*	transport_remove_task_from_execute_queue():
+ *
+ *
+ */
+static void transport_remove_task_from_execute_queue(
+	struct se_task *task,
+	struct se_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_del(&task->t_execute_list);
+	atomic_dec(&dev->execute_tasks);
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+}
+
+unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
+{
+	switch (cmd->data_direction) {
+	case DMA_NONE:
+		return "NONE";
+	case DMA_FROM_DEVICE:
+		return "READ";
+	case DMA_TO_DEVICE:
+		return "WRITE";
+	case DMA_BIDIRECTIONAL:
+		return "BIDI";
+	default:
+		break;
+	}
+
+	return "UNKNOWN";
+}
+
+void transport_dump_dev_state(
+	struct se_device *dev,
+	char *b,
+	int *bl)
+{
+	*bl += sprintf(b + *bl, "Status: ");
+	switch (dev->dev_status) {
+	case TRANSPORT_DEVICE_ACTIVATED:
+		*bl += sprintf(b + *bl, "ACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "DEACTIVATED");
+		break;
+	case TRANSPORT_DEVICE_SHUTDOWN:
+		*bl += sprintf(b + *bl, "SHUTDOWN");
+		break;
+	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
+	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
+		*bl += sprintf(b + *bl, "OFFLINE");
+		break;
+	default:
+		*bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
+		break;
+	}
+
+	*bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
+		atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
+		dev->queue_depth);
+	*bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
+		DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+	*bl += sprintf(b + *bl, "        ");
+}
+
+/*	transport_release_all_cmds():
+ *
+ *
+ */
+static void transport_release_all_cmds(struct se_device *dev)
+{
+	struct se_cmd *cmd = NULL;
+	struct se_queue_req *qr = NULL, *qr_p = NULL;
+	int bug_out = 0, t_state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
+				qr_list) {
+
+		cmd = (struct se_cmd *)qr->cmd;
+		t_state = qr->state;
+		list_del(&qr->qr_list);
+		kfree(qr);
+		spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+				flags);
+
+		printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+			" t_state: %u directly\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+
+		transport_release_fe_cmd(cmd);
+		bug_out = 1;
+
+		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+#if 0
+	if (bug_out)
+		BUG();
+#endif
+}
+
+void transport_dump_vpd_proto_id(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
+
+	switch (vpd->protocol_identifier) {
+	case 0x00:
+		sprintf(buf+len, "Fibre Channel\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "Parallel SCSI\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SSA\n");
+		break;
+	case 0x30:
+		sprintf(buf+len, "IEEE 1394\n");
+		break;
+	case 0x40:
+		sprintf(buf+len, "SCSI Remote Direct Memory Access"
+				" Protocol\n");
+		break;
+	case 0x50:
+		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
+		break;
+	case 0x60:
+		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
+		break;
+	case 0x70:
+		sprintf(buf+len, "Automation/Drive Interface Transport"
+				" Protocol\n");
+		break;
+	case 0x80:
+		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n",
+				vpd->protocol_identifier);
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk(KERN_INFO "%s", buf);
+}
+
+void
+transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * Check if the Protocol Identifier Valid (PIV) bit is set..
+	 *
+	 * from spc3r23.pdf section 7.5.1
+	 */
+	 if (page_83[1] & 0x80) {
+		vpd->protocol_identifier = (page_83[0] & 0xf0);
+		vpd->protocol_identifier_set = 1;
+		transport_dump_vpd_proto_id(vpd, NULL, 0);
+	}
+}
+EXPORT_SYMBOL(transport_set_vpd_proto_id);
+
+int transport_dump_vpd_assoc(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0, len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Association: ");
+
+	switch (vpd->association) {
+	case 0x00:
+		sprintf(buf+len, "addressed logical unit\n");
+		break;
+	case 0x10:
+		sprintf(buf+len, "target port\n");
+		break;
+	case 0x20:
+		sprintf(buf+len, "SCSI target device\n");
+		break;
+	default:
+		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identification association..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 297
+	 */
+	vpd->association = (page_83[1] & 0x30);
+	return transport_dump_vpd_assoc(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_assoc);
+
+int transport_dump_vpd_ident_type(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0, len;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+	len = sprintf(buf, "T10 VPD Identifier Type: ");
+
+	switch (vpd->device_identifier_type) {
+	case 0x00:
+		sprintf(buf+len, "Vendor specific\n");
+		break;
+	case 0x01:
+		sprintf(buf+len, "T10 Vendor ID based\n");
+		break;
+	case 0x02:
+		sprintf(buf+len, "EUI-64 based\n");
+		break;
+	case 0x03:
+		sprintf(buf+len, "NAA\n");
+		break;
+	case 0x04:
+		sprintf(buf+len, "Relative target port identifier\n");
+		break;
+	case 0x08:
+		sprintf(buf+len, "SCSI name string\n");
+		break;
+	default:
+		sprintf(buf+len, "Unsupported: 0x%02x\n",
+				vpd->device_identifier_type);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	/*
+	 * The VPD identifier type..
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 298
+	 */
+	vpd->device_identifier_type = (page_83[1] & 0x0f);
+	return transport_dump_vpd_ident_type(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident_type);
+
+int transport_dump_vpd_ident(
+	struct t10_vpd *vpd,
+	unsigned char *p_buf,
+	int p_buf_len)
+{
+	unsigned char buf[VPD_TMP_BUF_SIZE];
+	int ret = 0;
+
+	memset(buf, 0, VPD_TMP_BUF_SIZE);
+
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x02: /* ASCII */
+		sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	case 0x03: /* UTF-8 */
+		sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
+			&vpd->device_identifier[0]);
+		break;
+	default:
+		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
+			" 0x%02x", vpd->device_identifier_code_set);
+		ret = -1;
+		break;
+	}
+
+	if (p_buf)
+		strncpy(p_buf, buf, p_buf_len);
+	else
+		printk("%s", buf);
+
+	return ret;
+}
+
+int
+transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
+{
+	static const char hex_str[] = "0123456789abcdef";
+	int j = 0, i = 4; /* offset to start of the identifer */
+
+	/*
+	 * The VPD Code Set (encoding)
+	 *
+	 * from spc3r23.pdf Section 7.6.3.1 Table 296
+	 */
+	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
+	switch (vpd->device_identifier_code_set) {
+	case 0x01: /* Binary */
+		vpd->device_identifier[j++] =
+				hex_str[vpd->device_identifier_type];
+		while (i < (4 + page_83[3])) {
+			vpd->device_identifier[j++] =
+				hex_str[(page_83[i] & 0xf0) >> 4];
+			vpd->device_identifier[j++] =
+				hex_str[page_83[i] & 0x0f];
+			i++;
+		}
+		break;
+	case 0x02: /* ASCII */
+	case 0x03: /* UTF-8 */
+		while (i < (4 + page_83[3]))
+			vpd->device_identifier[j++] = page_83[i++];
+		break;
+	default:
+		break;
+	}
+
+	return transport_dump_vpd_ident(vpd, NULL, 0);
+}
+EXPORT_SYMBOL(transport_set_vpd_ident);
+
+static void core_setup_task_attr_emulation(struct se_device *dev)
+{
+	/*
+	 * If this device is from Target_Core_Mod/pSCSI, disable the
+	 * SAM Task Attribute emulation.
+	 *
+	 * This is currently not available in upsream Linux/SCSI Target
+	 * mode code, and is assumed to be disabled while using TCM/pSCSI.
+	 */
+	if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+		dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
+		return;
+	}
+
+	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
+	DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+		" device\n", TRANSPORT(dev)->name,
+		TRANSPORT(dev)->get_device_rev(dev));
+}
+
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+	struct t10_wwn *wwn = DEV_T10_WWN(dev);
+	int i, device_type;
+	/*
+	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+	 */
+	printk("  Vendor: ");
+	for (i = 0; i < 8; i++)
+		if (wwn->vendor[i] >= 0x20)
+			printk("%c", wwn->vendor[i]);
+		else
+			printk(" ");
+
+	printk("  Model: ");
+	for (i = 0; i < 16; i++)
+		if (wwn->model[i] >= 0x20)
+			printk("%c", wwn->model[i]);
+		else
+			printk(" ");
+
+	printk("  Revision: ");
+	for (i = 0; i < 4; i++)
+		if (wwn->revision[i] >= 0x20)
+			printk("%c", wwn->revision[i]);
+		else
+			printk(" ");
+
+	printk("\n");
+
+	device_type = TRANSPORT(dev)->get_device_type(dev);
+	printk("  Type:   %s ", scsi_device_type(device_type));
+	printk("                 ANSI SCSI revision: %02x\n",
+				TRANSPORT(dev)->get_device_rev(dev));
+}
+
+struct se_device *transport_add_device_to_core_hba(
+	struct se_hba *hba,
+	struct se_subsystem_api *transport,
+	struct se_subsystem_dev *se_dev,
+	u32 device_flags,
+	void *transport_dev,
+	struct se_dev_limits *dev_limits,
+	const char *inquiry_prod,
+	const char *inquiry_rev)
+{
+	int ret = 0, force_pt;
+	struct se_device  *dev;
+
+	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
+	if (!(dev)) {
+		printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
+		return NULL;
+	}
+	dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
+	if (!(dev->dev_queue_obj)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" dev->dev_queue_obj\n");
+		kfree(dev);
+		return NULL;
+	}
+	transport_init_queue_obj(dev->dev_queue_obj);
+
+	dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
+					GFP_KERNEL);
+	if (!(dev->dev_status_queue_obj)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" dev->dev_status_queue_obj\n");
+		kfree(dev->dev_queue_obj);
+		kfree(dev);
+		return NULL;
+	}
+	transport_init_queue_obj(dev->dev_status_queue_obj);
+
+	dev->dev_flags		= device_flags;
+	dev->dev_status		|= TRANSPORT_DEVICE_DEACTIVATED;
+	dev->dev_ptr		= (void *) transport_dev;
+	dev->se_hba		= hba;
+	dev->se_sub_dev		= se_dev;
+	dev->transport		= transport;
+	atomic_set(&dev->active_cmds, 0);
+	INIT_LIST_HEAD(&dev->dev_list);
+	INIT_LIST_HEAD(&dev->dev_sep_list);
+	INIT_LIST_HEAD(&dev->dev_tmr_list);
+	INIT_LIST_HEAD(&dev->execute_task_list);
+	INIT_LIST_HEAD(&dev->delayed_cmd_list);
+	INIT_LIST_HEAD(&dev->ordered_cmd_list);
+	INIT_LIST_HEAD(&dev->state_task_list);
+	spin_lock_init(&dev->execute_task_lock);
+	spin_lock_init(&dev->delayed_cmd_lock);
+	spin_lock_init(&dev->ordered_cmd_lock);
+	spin_lock_init(&dev->state_task_lock);
+	spin_lock_init(&dev->dev_alua_lock);
+	spin_lock_init(&dev->dev_reservation_lock);
+	spin_lock_init(&dev->dev_status_lock);
+	spin_lock_init(&dev->dev_status_thr_lock);
+	spin_lock_init(&dev->se_port_lock);
+	spin_lock_init(&dev->se_tmr_lock);
+
+	dev->queue_depth	= dev_limits->queue_depth;
+	atomic_set(&dev->depth_left, dev->queue_depth);
+	atomic_set(&dev->dev_ordered_id, 0);
+
+	se_dev_set_default_attribs(dev, dev_limits);
+
+	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+	dev->creation_time = get_jiffies_64();
+	spin_lock_init(&dev->stats_lock);
+
+	spin_lock(&hba->device_lock);
+	list_add_tail(&dev->dev_list, &hba->hba_dev_list);
+	hba->dev_count++;
+	spin_unlock(&hba->device_lock);
+	/*
+	 * Setup the SAM Task Attribute emulation for struct se_device
+	 */
+	core_setup_task_attr_emulation(dev);
+	/*
+	 * Force PR and ALUA passthrough emulation with internal object use.
+	 */
+	force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
+	/*
+	 * Setup the Reservations infrastructure for struct se_device
+	 */
+	core_setup_reservations(dev, force_pt);
+	/*
+	 * Setup the Asymmetric Logical Unit Assignment for struct se_device
+	 */
+	if (core_setup_alua(dev, force_pt) < 0)
+		goto out;
+
+	/*
+	 * Startup the struct se_device processing thread
+	 */
+	dev->process_thread = kthread_run(transport_processing_thread, dev,
+					  "LIO_%s", TRANSPORT(dev)->name);
+	if (IS_ERR(dev->process_thread)) {
+		printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
+			TRANSPORT(dev)->name);
+		goto out;
+	}
+
+	/*
+	 * Preload the initial INQUIRY const values if we are doing
+	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+	 * passthrough because this is being provided by the backend LLD.
+	 * This is required so that transport_get_inquiry() copies these
+	 * originals once back into DEV_T10_WWN(dev) for the virtual device
+	 * setup.
+	 */
+	if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+		if (!(inquiry_prod) || !(inquiry_prod)) {
+			printk(KERN_ERR "All non TCM/pSCSI plugins require"
+				" INQUIRY consts\n");
+			goto out;
+		}
+
+		strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
+		strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
+		strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+	}
+	scsi_dump_inquiry(dev);
+
+out:
+	if (!ret)
+		return dev;
+	kthread_stop(dev->process_thread);
+
+	spin_lock(&hba->device_lock);
+	list_del(&dev->dev_list);
+	hba->dev_count--;
+	spin_unlock(&hba->device_lock);
+
+	se_release_vpd_for_dev(dev);
+
+	kfree(dev->dev_status_queue_obj);
+	kfree(dev->dev_queue_obj);
+	kfree(dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(transport_add_device_to_core_hba);
+
+/*	transport_generic_prepare_cdb():
+ *
+ *	Since the Initiator sees iSCSI devices as LUNs,  the SCSI CDB will
+ *	contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
+ *	The point of this is since we are mapping iSCSI LUNs to
+ *	SCSI Target IDs having a non-zero LUN in the CDB will throw the
+ *	devices and HBAs for a loop.
+ */
+static inline void transport_generic_prepare_cdb(
+	unsigned char *cdb)
+{
+	switch (cdb[0]) {
+	case READ_10: /* SBC - RDProtect */
+	case READ_12: /* SBC - RDProtect */
+	case READ_16: /* SBC - RDProtect */
+	case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+	case VERIFY: /* SBC - VRProtect */
+	case VERIFY_16: /* SBC - VRProtect */
+	case WRITE_VERIFY: /* SBC - VRProtect */
+	case WRITE_VERIFY_12: /* SBC - VRProtect */
+		break;
+	default:
+		cdb[1] &= 0x1f; /* clear logical unit number */
+		break;
+	}
+}
+
+static struct se_task *
+transport_generic_get_task(struct se_cmd *cmd,
+		enum dma_data_direction data_direction)
+{
+	struct se_task *task;
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned long flags;
+
+	task = dev->transport->alloc_task(cmd);
+	if (!task) {
+		printk(KERN_ERR "Unable to allocate struct se_task\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&task->t_list);
+	INIT_LIST_HEAD(&task->t_execute_list);
+	INIT_LIST_HEAD(&task->t_state_list);
+	init_completion(&task->task_stop_comp);
+	task->task_no = T_TASK(cmd)->t_tasks_no++;
+	task->task_se_cmd = cmd;
+	task->se_dev = dev;
+	task->task_data_direction = data_direction;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return task;
+}
+
+static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+
+void transport_device_setup_cmd(struct se_cmd *cmd)
+{
+	cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
+}
+EXPORT_SYMBOL(transport_device_setup_cmd);
+
+/*
+ * Used by fabric modules containing a local struct se_cmd within their
+ * fabric dependent per I/O descriptor.
+ */
+void transport_init_se_cmd(
+	struct se_cmd *cmd,
+	struct target_core_fabric_ops *tfo,
+	struct se_session *se_sess,
+	u32 data_length,
+	int data_direction,
+	int task_attr,
+	unsigned char *sense_buffer)
+{
+	INIT_LIST_HEAD(&cmd->se_lun_list);
+	INIT_LIST_HEAD(&cmd->se_delayed_list);
+	INIT_LIST_HEAD(&cmd->se_ordered_list);
+	/*
+	 * Setup t_task pointer to t_task_backstore
+	 */
+	cmd->t_task = &cmd->t_task_backstore;
+
+	INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
+	init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+	init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+	init_completion(&T_TASK(cmd)->t_transport_stop_comp);
+	spin_lock_init(&T_TASK(cmd)->t_state_lock);
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+
+	cmd->se_tfo = tfo;
+	cmd->se_sess = se_sess;
+	cmd->data_length = data_length;
+	cmd->data_direction = data_direction;
+	cmd->sam_task_attr = task_attr;
+	cmd->sense_buffer = sense_buffer;
+}
+EXPORT_SYMBOL(transport_init_se_cmd);
+
+static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+{
+	/*
+	 * Check if SAM Task Attribute emulation is enabled for this
+	 * struct se_device storage object
+	 */
+	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 0;
+
+	if (cmd->sam_task_attr == TASK_ATTR_ACA) {
+		DEBUG_STA("SAM Task Attribute ACA"
+			" emulation is not supported\n");
+		return -1;
+	}
+	/*
+	 * Used to determine when ORDERED commands should go from
+	 * Dormant to Active status.
+	 */
+	cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+	smp_mb__after_atomic_inc();
+	DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+			cmd->se_ordered_id, cmd->sam_task_attr,
+			TRANSPORT(cmd->se_dev)->name);
+	return 0;
+}
+
+void transport_free_se_cmd(
+	struct se_cmd *se_cmd)
+{
+	if (se_cmd->se_tmr_req)
+		core_tmr_release_req(se_cmd->se_tmr_req);
+	/*
+	 * Check and free any extended CDB buffer that was allocated
+	 */
+	if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
+		kfree(T_TASK(se_cmd)->t_task_cdb);
+}
+EXPORT_SYMBOL(transport_free_se_cmd);
+
+static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
+
+/*	transport_generic_allocate_tasks():
+ *
+ *	Called from fabric RX Thread.
+ */
+int transport_generic_allocate_tasks(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	int ret;
+
+	transport_generic_prepare_cdb(cdb);
+
+	/*
+	 * This is needed for early exceptions.
+	 */
+	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+
+	transport_device_setup_cmd(cmd);
+	/*
+	 * Ensure that the received CDB is less than the max (252 + 8) bytes
+	 * for VARIABLE_LENGTH_CMD
+	 */
+	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
+		printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+		return -1;
+	}
+	/*
+	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
+	 * allocate the additional extended CDB buffer now..  Otherwise
+	 * setup the pointer from __t_task_cdb to t_task_cdb.
+	 */
+	if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
+		T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+						GFP_KERNEL);
+		if (!(T_TASK(cmd)->t_task_cdb)) {
+			printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
+				" %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+				scsi_command_size(cdb),
+				(unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
+			return -1;
+		}
+	} else
+		T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+	/*
+	 * Copy the original CDB into T_TASK(cmd).
+	 */
+	memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+	/*
+	 * Setup the received CDB based on SCSI defined opcodes and
+	 * perform unit attention, persistent reservations and ALUA
+	 * checks for virtual device backends.  The T_TASK(cmd)->t_task_cdb
+	 * pointer is expected to be setup before we reach this point.
+	 */
+	ret = transport_generic_cmd_sequencer(cmd, cdb);
+	if (ret < 0)
+		return ret;
+	/*
+	 * Check for SAM Task Attribute Emulation
+	 */
+	if (transport_check_alloc_task_attr(cmd) < 0) {
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		return -2;
+	}
+	spin_lock(&cmd->se_lun->lun_sep_lock);
+	if (cmd->se_lun->lun_sep)
+		cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
+	spin_unlock(&cmd->se_lun->lun_sep_lock);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_allocate_tasks);
+
+/*
+ * Used by fabric module frontends not defining a TFO->new_cmd_map()
+ * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
+ */
+int transport_generic_handle_cdb(
+	struct se_cmd *cmd)
+{
+	if (!SE_LUN(cmd)) {
+		dump_stack();
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb);
+
+/*
+ * Used by fabric module frontends defining a TFO->new_cmd_map() caller
+ * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
+ * complete setup in TCM process context w/ TFO->new_cmd_map().
+ */
+int transport_generic_handle_cdb_map(
+	struct se_cmd *cmd)
+{
+	if (!SE_LUN(cmd)) {
+		dump_stack();
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_cdb_map);
+
+/*	transport_generic_handle_data():
+ *
+ *
+ */
+int transport_generic_handle_data(
+	struct se_cmd *cmd)
+{
+	/*
+	 * For the software fabric case, then we assume the nexus is being
+	 * failed/shutdown when signals are pending from the kthread context
+	 * caller, so we return a failure.  For the HW target mode case running
+	 * in interrupt code, the signal_pending() check is skipped.
+	 */
+	if (!in_interrupt() && signal_pending(current))
+		return -1;
+	/*
+	 * If the received CDB has aleady been ABORTED by the generic
+	 * target engine, we now call transport_check_aborted_status()
+	 * to queue any delated TASK_ABORTED status for the received CDB to the
+	 * fabric module as we are expecting no futher incoming DATA OUT
+	 * sequences at this point.
+	 */
+	if (transport_check_aborted_status(cmd, 1) != 0)
+		return 0;
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_data);
+
+/*	transport_generic_handle_tmr():
+ *
+ *
+ */
+int transport_generic_handle_tmr(
+	struct se_cmd *cmd)
+{
+	/*
+	 * This is needed for early exceptions.
+	 */
+	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
+	transport_device_setup_cmd(cmd);
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+	int ret = 0;
+
+	DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
+		CMD_TFO(cmd)->get_task_tag(cmd));
+
+	/*
+	 * No tasks remain in the execution queue
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+		DEBUG_TS("task_no[%d] - Processing task %p\n",
+				task->task_no, task);
+		/*
+		 * If the struct se_task has not been sent and is not active,
+		 * remove the struct se_task from the execution queue.
+		 */
+		if (!atomic_read(&task->task_sent) &&
+		    !atomic_read(&task->task_active)) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			transport_remove_task_from_execute_queue(task,
+					task->se_dev);
+
+			DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+				task->task_no);
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			continue;
+		}
+
+		/*
+		 * If the struct se_task is active, sleep until it is returned
+		 * from the plugin.
+		 */
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+
+			DEBUG_TS("task_no[%d] - Waiting to complete\n",
+				task->task_no);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_TS("task_no[%d] - Stopped successfully\n",
+				task->task_no);
+
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		} else {
+			DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+			ret++;
+		}
+
+		__transport_stop_task_timer(task, &flags);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return ret;
+}
+
+static void transport_failure_reset_queue_depth(struct se_device *dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);;
+	atomic_inc(&dev->depth_left);
+	atomic_inc(&SE_HBA(dev)->left_queue_depth);
+	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+}
+
+/*
+ * Handle SAM-esque emulation for generic transport request failures.
+ */
+static void transport_generic_request_failure(
+	struct se_cmd *cmd,
+	struct se_device *dev,
+	int complete,
+	int sc)
+{
+	DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+		" CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+		T_TASK(cmd)->t_task_cdb[0]);
+	DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+		" %d/%d transport_error_status: %d\n",
+		CMD_TFO(cmd)->get_cmd_state(cmd),
+		cmd->t_state, cmd->deferred_t_state,
+		cmd->transport_error_status);
+	DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
+		" t_transport_active: %d t_transport_stop: %d"
+		" t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+		atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
+		atomic_read(&T_TASK(cmd)->t_transport_active),
+		atomic_read(&T_TASK(cmd)->t_transport_stop),
+		atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+	transport_stop_all_task_timers(cmd);
+
+	if (dev)
+		transport_failure_reset_queue_depth(dev);
+	/*
+	 * For SAM Task Attribute emulation for failed struct se_cmd
+	 */
+	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+
+	if (complete) {
+		transport_direct_request_timeout(cmd);
+		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+	}
+
+	switch (cmd->transport_error_status) {
+	case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
+		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+		break;
+	case PYX_TRANSPORT_INVALID_CDB_FIELD:
+		cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+		break;
+	case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
+		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+		break;
+	case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
+		if (!sc)
+			transport_new_cmd_failure(cmd);
+		/*
+		 * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
+		 * we force this session to fall back to session
+		 * recovery.
+		 */
+		CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
+		CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+
+		goto check_stop;
+	case PYX_TRANSPORT_LU_COMM_FAILURE:
+	case PYX_TRANSPORT_ILLEGAL_REQUEST:
+		cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		break;
+	case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
+		cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+		break;
+	case PYX_TRANSPORT_WRITE_PROTECTED:
+		cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+		break;
+	case PYX_TRANSPORT_RESERVATION_CONFLICT:
+		/*
+		 * No SENSE Data payload for this case, set SCSI Status
+		 * and queue the response to $FABRIC_MOD.
+		 *
+		 * Uses linux/include/scsi/scsi.h SAM status codes defs
+		 */
+		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+		/*
+		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+		 * CONFLICT STATUS.
+		 *
+		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+		 */
+		if (SE_SESS(cmd) &&
+		    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+			core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+				cmd->orig_fe_lun, 0x2C,
+				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+
+		CMD_TFO(cmd)->queue_status(cmd);
+		goto check_stop;
+	case PYX_TRANSPORT_USE_SENSE_REASON:
+		/*
+		 * struct se_cmd->scsi_sense_reason already set
+		 */
+		break;
+	default:
+		printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			cmd->transport_error_status);
+		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+		break;
+	}
+
+	if (!sc)
+		transport_new_cmd_failure(cmd);
+	else
+		transport_send_check_condition_and_sense(cmd,
+			cmd->scsi_sense_reason, 0);
+check_stop:
+	transport_lun_remove_cmd(cmd);
+	if (!(transport_cmd_check_stop_to_fabric(cmd)))
+		;
+}
+
+static void transport_direct_request_timeout(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
+		   &T_TASK(cmd)->t_se_count);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static void transport_generic_request_timeout(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	/*
+	 * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+	 * to allow last call to free memory resources.
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
+		int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
+
+		atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_generic_remove(cmd, 0, 0);
+}
+
+static int
+transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
+{
+	unsigned char *buf;
+
+	buf = kzalloc(data_length, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate memory for buffer\n");
+		return -1;
+	}
+
+	T_TASK(cmd)->t_tasks_se_num = 0;
+	T_TASK(cmd)->t_task_buf = buf;
+
+	return 0;
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+	return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+	return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+	__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+	unsigned int __v1, __v2;
+
+	__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+	__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+	return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+	se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+}
+
+/*
+ * Called from interrupt context.
+ */
+static void transport_task_timeout_handler(unsigned long data)
+{
+	struct se_task *task = (struct se_task *)data;
+	struct se_cmd *cmd = TASK_CMD(task);
+	unsigned long flags;
+
+	DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (task->task_flags & TF_STOP) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	task->task_flags &= ~TF_RUNNING;
+
+	/*
+	 * Determine if transport_complete_task() has already been called.
+	 */
+	if (!(atomic_read(&task->task_active))) {
+		DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+				" == 0\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+
+	atomic_inc(&T_TASK(cmd)->t_se_count);
+	atomic_inc(&T_TASK(cmd)->t_transport_timeout);
+	T_TASK(cmd)->t_tasks_failed = 1;
+
+	atomic_set(&task->task_timeout, 1);
+	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
+	task->task_scsi_status = 1;
+
+	if (atomic_read(&task->task_stop)) {
+		DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+				" == 1\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		complete(&task->task_stop_comp);
+		return;
+	}
+
+	if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+		DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+				" t_task_cdbs_left\n", task, cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return;
+	}
+	DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+			task, cmd);
+
+	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
+}
+
+/*
+ * Called with T_TASK(cmd)->t_state_lock held.
+ */
+static void transport_start_task_timer(struct se_task *task)
+{
+	struct se_device *dev = task->se_dev;
+	int timeout;
+
+	if (task->task_flags & TF_RUNNING)
+		return;
+	/*
+	 * If the task_timeout is disabled, exit now.
+	 */
+	timeout = DEV_ATTRIB(dev)->task_timeout;
+	if (!(timeout))
+		return;
+
+	init_timer(&task->task_timer);
+	task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
+	task->task_timer.data = (unsigned long) task;
+	task->task_timer.function = transport_task_timeout_handler;
+
+	task->task_flags |= TF_RUNNING;
+	add_timer(&task->task_timer);
+#if 0
+	printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+		" %d\n", task->task_se_cmd, task, timeout);
+#endif
+}
+
+/*
+ * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ */
+void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
+{
+	struct se_cmd *cmd = TASK_CMD(task);
+
+	if (!(task->task_flags & TF_RUNNING))
+		return;
+
+	task->task_flags |= TF_STOP;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+
+	del_timer_sync(&task->task_timer);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+	task->task_flags &= ~TF_RUNNING;
+	task->task_flags &= ~TF_STOP;
+}
+
+static void transport_stop_all_task_timers(struct se_cmd *cmd)
+{
+	struct se_task *task = NULL, *task_tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list)
+		__transport_stop_task_timer(task, &flags);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline int transport_tcq_window_closed(struct se_device *dev)
+{
+	if (dev->dev_tcq_window_closed++ <
+			PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
+		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
+	} else
+		msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
+
+	wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+	return 0;
+}
+
+/*
+ * Called from Fabric Module context from transport_execute_tasks()
+ *
+ * The return of this function determins if the tasks from struct se_cmd
+ * get added to the execution queue in transport_execute_tasks(),
+ * or are added to the delayed or ordered lists here.
+ */
+static inline int transport_execute_task_attr(struct se_cmd *cmd)
+{
+	if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+		return 1;
+	/*
+	 * Check for the existance of HEAD_OF_QUEUE, and if true return 1
+	 * to allow the passed struct se_cmd list of tasks to the front of the list.
+	 */
+	 if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+		smp_mb__after_atomic_inc();
+		DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+			" 0x%02x, se_ordered_id: %u\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			cmd->se_ordered_id);
+		return 1;
+	} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+		spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
+		list_add_tail(&cmd->se_ordered_list,
+				&SE_DEV(cmd)->ordered_cmd_list);
+		spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+
+		atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+		smp_mb__after_atomic_inc();
+
+		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+				" list, se_ordered_id: %u\n",
+				T_TASK(cmd)->t_task_cdb[0],
+				cmd->se_ordered_id);
+		/*
+		 * Add ORDERED command to tail of execution queue if
+		 * no other older commands exist that need to be
+		 * completed first.
+		 */
+		if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+			return 1;
+	} else {
+		/*
+		 * For SIMPLE and UNTAGGED Task Attribute commands
+		 */
+		atomic_inc(&SE_DEV(cmd)->simple_cmds);
+		smp_mb__after_atomic_inc();
+	}
+	/*
+	 * Otherwise if one or more outstanding ORDERED task attribute exist,
+	 * add the dormant task(s) built for the passed struct se_cmd to the
+	 * execution queue and become in Active state for this struct se_device.
+	 */
+	if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+		/*
+		 * Otherwise, add cmd w/ tasks to delayed cmd queue that
+		 * will be drained upon competion of HEAD_OF_QUEUE task.
+		 */
+		spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+		cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
+		list_add_tail(&cmd->se_delayed_list,
+				&SE_DEV(cmd)->delayed_cmd_list);
+		spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+
+		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+			" delayed CMD list, se_ordered_id: %u\n",
+			T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+			cmd->se_ordered_id);
+		/*
+		 * Return zero to let transport_execute_tasks() know
+		 * not to add the delayed tasks to the execution list.
+		 */
+		return 0;
+	}
+	/*
+	 * Otherwise, no ORDERED task attributes exist..
+	 */
+	return 1;
+}
+
+/*
+ * Called from fabric module context in transport_generic_new_cmd() and
+ * transport_generic_process_write()
+ */
+static int transport_execute_tasks(struct se_cmd *cmd)
+{
+	int add_tasks;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
+		if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+			cmd->transport_error_status =
+				PYX_TRANSPORT_LU_COMM_FAILURE;
+			transport_generic_request_failure(cmd, NULL, 0, 1);
+			return 0;
+		}
+	}
+	/*
+	 * Call transport_cmd_check_stop() to see if a fabric exception
+	 * has occured that prevents execution.
+	 */
+	if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+		/*
+		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
+		 * attribute for the tasks of the received struct se_cmd CDB
+		 */
+		add_tasks = transport_execute_task_attr(cmd);
+		if (add_tasks == 0)
+			goto execute_tasks;
+		/*
+		 * This calls transport_add_tasks_from_cmd() to handle
+		 * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
+		 * (if enabled) in __transport_add_task_to_execute_queue() and
+		 * transport_add_task_check_sam_attr().
+		 */
+		transport_add_tasks_from_cmd(cmd);
+	}
+	/*
+	 * Kick the execution queue for the cmd associated struct se_device
+	 * storage object.
+	 */
+execute_tasks:
+	__transport_execute_tasks(SE_DEV(cmd));
+	return 0;
+}
+
+/*
+ * Called to check struct se_device tcq depth window, and once open pull struct se_task
+ * from struct se_device->execute_task_list and
+ *
+ * Called from transport_processing_thread()
+ */
+static int __transport_execute_tasks(struct se_device *dev)
+{
+	int error;
+	struct se_cmd *cmd = NULL;
+	struct se_task *task;
+	unsigned long flags;
+
+	/*
+	 * Check if there is enough room in the device and HBA queue to send
+	 * struct se_transport_task's to the selected transport.
+	 */
+check_depth:
+	spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+	if (!(atomic_read(&dev->depth_left)) ||
+	    !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+		return transport_tcq_window_closed(dev);
+	}
+	dev->dev_tcq_window_closed = 0;
+
+	spin_lock(&dev->execute_task_lock);
+	task = transport_get_task_from_execute_queue(dev);
+	spin_unlock(&dev->execute_task_lock);
+
+	if (!task) {
+		spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+		return 0;
+	}
+
+	atomic_dec(&dev->depth_left);
+	atomic_dec(&SE_HBA(dev)->left_queue_depth);
+	spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+
+	cmd = TASK_CMD(task);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&task->task_active, 1);
+	atomic_set(&task->task_sent, 1);
+	atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+
+	if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
+	    T_TASK(cmd)->t_task_cdbs)
+		atomic_set(&cmd->transport_sent, 1);
+
+	transport_start_task_timer(task);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * The struct se_cmd->transport_emulate_cdb() function pointer is used
+	 * to grab REPORT_LUNS CDBs before they hit the
+	 * struct se_subsystem_api->do_task() caller below.
+	 */
+	if (cmd->transport_emulate_cdb) {
+		error = cmd->transport_emulate_cdb(cmd);
+		if (error != 0) {
+			cmd->transport_error_status = error;
+			atomic_set(&task->task_active, 0);
+			atomic_set(&cmd->transport_sent, 0);
+			transport_stop_tasks_for_cmd(cmd);
+			transport_generic_request_failure(cmd, dev, 0, 1);
+			goto check_depth;
+		}
+		/*
+		 * Handle the successful completion for transport_emulate_cdb()
+		 * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
+		 * Otherwise the caller is expected to complete the task with
+		 * proper status.
+		 */
+		if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
+			cmd->scsi_status = SAM_STAT_GOOD;
+			task->task_scsi_status = GOOD;
+			transport_complete_task(task, 1);
+		}
+	} else {
+		/*
+		 * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
+		 * RAMDISK we use the internal transport_emulate_control_cdb() logic
+		 * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
+		 * LUN emulation code.
+		 *
+		 * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
+		 * call ->do_task() directly and let the underlying TCM subsystem plugin
+		 * code handle the CDB emulation.
+		 */
+		if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+		    (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+			error = transport_emulate_control_cdb(task);
+		else
+			error = TRANSPORT(dev)->do_task(task);
+
+		if (error != 0) {
+			cmd->transport_error_status = error;
+			atomic_set(&task->task_active, 0);
+			atomic_set(&cmd->transport_sent, 0);
+			transport_stop_tasks_for_cmd(cmd);
+			transport_generic_request_failure(cmd, dev, 0, 1);
+		}
+	}
+
+	goto check_depth;
+
+	return 0;
+}
+
+void transport_new_cmd_failure(struct se_cmd *se_cmd)
+{
+	unsigned long flags;
+	/*
+	 * Any unsolicited data will get dumped for failed command inside of
+	 * the fabric plugin
+	 */
+	spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+	se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
+	se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+
+	CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+}
+
+static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
+
+static inline u32 transport_get_sectors_6(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 8-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 8-bit sector value.
+	 */
+type_disk:
+	return (u32)cdb[4];
+}
+
+static inline u32 transport_get_sectors_10(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 16-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_10 is not defined in SSC, throw an exception
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -1;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 16-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * XXX_12 is not defined in SSC, throw an exception
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		*ret = -1;
+		return 0;
+	}
+
+	/*
+	 * Everything else assume TYPE_DISK Sector CDB location.
+	 * Use 32-bit sector value.
+	 */
+type_disk:
+	return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	if (!dev)
+		goto type_disk;
+
+	/*
+	 * Use 24-bit allocation length for TYPE_TAPE.
+	 */
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+		return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
+
+type_disk:
+	return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+		    (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(
+	unsigned char *cdb,
+	struct se_cmd *cmd,
+	int *ret)
+{
+	/*
+	 * Assume TYPE_DISK for non struct se_device objects.
+	 * Use 32-bit sector value.
+	 */
+	return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+		    (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_get_size(
+	u32 sectors,
+	unsigned char *cdb,
+	struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+		if (cdb[1] & 1) { /* sectors */
+			return DEV_ATTRIB(dev)->block_size * sectors;
+		} else /* bytes */
+			return sectors;
+	}
+#if 0
+	printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
+			" %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
+			DEV_ATTRIB(dev)->block_size * sectors,
+			TRANSPORT(dev)->name);
+#endif
+	return DEV_ATTRIB(dev)->block_size * sectors;
+}
+
+unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
+{
+	unsigned char result = 0;
+	/*
+	 * MSB
+	 */
+	if ((val[0] >= 'a') && (val[0] <= 'f'))
+		result = ((val[0] - 'a' + 10) & 0xf) << 4;
+	else
+		if ((val[0] >= 'A') && (val[0] <= 'F'))
+			result = ((val[0] - 'A' + 10) & 0xf) << 4;
+		else /* digit */
+			result = ((val[0] - '0') & 0xf) << 4;
+	/*
+	 * LSB
+	 */
+	if ((val[1] >= 'a') && (val[1] <= 'f'))
+		result |= ((val[1] - 'a' + 10) & 0xf);
+	else
+		if ((val[1] >= 'A') && (val[1] <= 'F'))
+			result |= ((val[1] - 'A' + 10) & 0xf);
+		else /* digit */
+			result |= ((val[1] - '0') & 0xf);
+
+	return result;
+}
+EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
+
+static void transport_xor_callback(struct se_cmd *cmd)
+{
+	unsigned char *buf, *addr;
+	struct se_mem *se_mem;
+	unsigned int offset;
+	int i;
+	/*
+	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+	 *
+	 * 1) read the specified logical block(s);
+	 * 2) transfer logical blocks from the data-out buffer;
+	 * 3) XOR the logical blocks transferred from the data-out buffer with
+	 *    the logical blocks read, storing the resulting XOR data in a buffer;
+	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+	 *    blocks transferred from the data-out buffer; and
+	 * 5) transfer the resulting XOR data to the data-in buffer.
+	 */
+	buf = kmalloc(cmd->data_length, GFP_KERNEL);
+	if (!(buf)) {
+		printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+		return;
+	}
+	/*
+	 * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+	 * into the locally allocated *buf
+	 */
+	transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+	/*
+	 * Now perform the XOR against the BIDI read memory located at
+	 * T_TASK(cmd)->t_mem_bidi_list
+	 */
+
+	offset = 0;
+	list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
+		addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
+		if (!(addr))
+			goto out;
+
+		for (i = 0; i < se_mem->se_len; i++)
+			*(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+
+		offset += se_mem->se_len;
+		kunmap_atomic(addr, KM_USER0);
+	}
+out:
+	kfree(buf);
+}
+
+/*
+ * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
+ */
+static int transport_get_sense_data(struct se_cmd *cmd)
+{
+	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
+	struct se_device *dev;
+	struct se_task *task = NULL, *task_tmp;
+	unsigned long flags;
+	u32 offset = 0;
+
+	if (!SE_LUN(cmd)) {
+		printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
+		return -1;
+	}
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return 0;
+	}
+
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+
+		if (!task->task_sense)
+			continue;
+
+		dev = task->se_dev;
+		if (!(dev))
+			continue;
+
+		if (!TRANSPORT(dev)->get_sense_buffer) {
+			printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+					" is NULL\n");
+			continue;
+		}
+
+		sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
+		if (!(sense_buffer)) {
+			printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+				" sense buffer for task with sense\n",
+				CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+			continue;
+		}
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+		offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+
+		memcpy((void *)&buffer[offset], (void *)sense_buffer,
+				TRANSPORT_SENSE_BUFFER);
+		cmd->scsi_status = task->task_scsi_status;
+		/* Automatically padded */
+		cmd->scsi_sense_length =
+				(TRANSPORT_SENSE_BUFFER + offset);
+
+		printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+				" and sense\n",
+			dev->se_hba->hba_id, TRANSPORT(dev)->name,
+				cmd->scsi_status);
+		return 0;
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return -1;
+}
+
+static int transport_allocate_resources(struct se_cmd *cmd)
+{
+	u32 length = cmd->data_length;
+
+	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
+		return transport_generic_get_mem(cmd, length, PAGE_SIZE);
+	else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
+		return transport_generic_allocate_buf(cmd, length);
+	else
+		return 0;
+}
+
+static int
+transport_handle_reservation_conflict(struct se_cmd *cmd)
+{
+	cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+	/*
+	 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
+	 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
+	 * CONFLICT STATUS.
+	 *
+	 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
+	 */
+	if (SE_SESS(cmd) &&
+	    DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
+		core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+			cmd->orig_fe_lun, 0x2C,
+			ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+	return -2;
+}
+
+/*	transport_generic_cmd_sequencer():
+ *
+ *	Generic Command Sequencer that should work for most DAS transport
+ *	drivers.
+ *
+ *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ *	RX Thread.
+ *
+ *	FIXME: Need to support other SCSI OPCODES where as well.
+ */
+static int transport_generic_cmd_sequencer(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+	int ret = 0, sector_ret = 0, passthrough;
+	u32 sectors = 0, size = 0, pr_reg_type = 0;
+	u16 service_action;
+	u8 alua_ascq = 0;
+	/*
+	 * Check for an existing UNIT ATTENTION condition
+	 */
+	if (core_scsi3_ua_check(cmd, cdb) < 0) {
+		cmd->transport_wait_for_tasks =
+				&transport_nop_wait_for_tasks;
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+		return -2;
+	}
+	/*
+	 * Check status of Asymmetric Logical Unit Assignment port
+	 */
+	ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+	if (ret != 0) {
+		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+		/*
+		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
+		 * The ALUA additional sense code qualifier (ASCQ) is determined
+		 * by the ALUA primary or secondary access state..
+		 */
+		if (ret > 0) {
+#if 0
+			printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
+				CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+#endif
+			transport_set_sense_codes(cmd, 0x04, alua_ascq);
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+			return -2;
+		}
+		goto out_invalid_cdb_field;
+	}
+	/*
+	 * Check status for SPC-3 Persistent Reservations
+	 */
+	if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
+		if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+					cmd, cdb, pr_reg_type) != 0)
+			return transport_handle_reservation_conflict(cmd);
+		/*
+		 * This means the CDB is allowed for the SCSI Initiator port
+		 * when said port is *NOT* holding the legacy SPC-2 or
+		 * SPC-3 Persistent Reservation.
+		 */
+	}
+
+	switch (cdb[0]) {
+	case READ_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_6;
+		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_12;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case READ_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_16;
+		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_6:
+		sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_6;
+		T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_10:
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_12:
+		sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_12;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case WRITE_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_16;
+		T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		break;
+	case XDWRITEREAD_10:
+		if ((cmd->data_direction != DMA_TO_DEVICE) ||
+		    !(T_TASK(cmd)->t_tasks_bidi))
+			goto out_invalid_cdb_field;
+		sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->transport_split_cdb = &split_cdb_XX_10;
+		T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Skip the remaining assignments for TCM/PSCSI passthrough
+		 */
+		if (passthrough)
+			break;
+		/*
+		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
+		 */
+		cmd->transport_complete_callback = &transport_xor_callback;
+		T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+		break;
+	case VARIABLE_LENGTH_CMD:
+		service_action = get_unaligned_be16(&cdb[8]);
+		/*
+		 * Determine if this is TCM/PSCSI device and we should disable
+		 * internal emulation for this CDB.
+		 */
+		passthrough = (TRANSPORT(dev)->transport_type ==
+					TRANSPORT_PLUGIN_PHBA_PDEV);
+
+		switch (service_action) {
+		case XDWRITEREAD_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+			size = transport_get_size(sectors, cdb, cmd);
+			/*
+			 * Use WRITE_32 and READ_32 opcodes for the emulated
+			 * XDWRITE_READ_32 logic.
+			 */
+			cmd->transport_split_cdb = &split_cdb_XX_32;
+			T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
+
+			/*
+			 * Skip the remaining assignments for TCM/PSCSI passthrough
+			 */
+			if (passthrough)
+				break;
+
+			/*
+			 * Setup BIDI XOR callback to be run during
+			 * transport_generic_complete_ok()
+			 */
+			cmd->transport_complete_callback = &transport_xor_callback;
+			T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+			break;
+		case WRITE_SAME_32:
+			sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
+			if (sector_ret)
+				goto out_unsupported_cdb;
+			size = transport_get_size(sectors, cdb, cmd);
+			T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+			/*
+			 * Skip the remaining assignments for TCM/PSCSI passthrough
+			 */
+			if (passthrough)
+				break;
+
+			if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
+				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+					" bits not supported for Block Discard"
+					" Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			/*
+			 * Currently for the emulated case we only accept
+			 * tpws with the UNMAP=1 bit set.
+			 */
+			if (!(cdb[10] & 0x08)) {
+				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+					" supported for Block Discard Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			break;
+		default:
+			printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+				" 0x%04x not supported\n", service_action);
+			goto out_unsupported_cdb;
+		}
+		break;
+	case 0xa3:
+		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_IN from SCC-2 */
+			/*
+			 * Check for emulated MI_REPORT_TARGET_PGS.
+			 */
+			if (cdb[1] == MI_REPORT_TARGET_PGS) {
+				cmd->transport_emulate_cdb =
+				(T10_ALUA(su_dev)->alua_type ==
+				 SPC3_ALUA_EMULATED) ?
+				&core_emulate_report_target_port_groups :
+				NULL;
+			}
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else {
+			/* GPCMD_SEND_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case MODE_SELECT:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SELECT_10:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case MODE_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case MODE_SENSE_10:
+	case GPCMD_READ_BUFFER_CAPACITY:
+	case GPCMD_SEND_OPC:
+	case LOG_SELECT:
+	case LOG_SENSE:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_BLOCK_LIMITS:
+		size = READ_BLOCK_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case GPCMD_GET_CONFIGURATION:
+	case GPCMD_READ_FORMAT_CAPACITIES:
+	case GPCMD_READ_DISC_INFO:
+	case GPCMD_READ_TRACK_RZONE_INFO:
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case PERSISTENT_RESERVE_IN:
+	case PERSISTENT_RESERVE_OUT:
+		cmd->transport_emulate_cdb =
+			(T10_RES(su_dev)->res_type ==
+			 SPC3_PERSISTENT_RESERVATIONS) ?
+			&core_scsi3_emulate_pr : NULL;
+		size = (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case GPCMD_MECHANISM_STATUS:
+	case GPCMD_READ_DVD_STRUCTURE:
+		size = (cdb[8] << 8) + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case READ_POSITION:
+		size = READ_POSITION_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case 0xa4:
+		if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+			/* MAINTENANCE_OUT from SCC-2
+			 *
+			 * Check for emulated MO_SET_TARGET_PGS.
+			 */
+			if (cdb[1] == MO_SET_TARGET_PGS) {
+				cmd->transport_emulate_cdb =
+				(T10_ALUA(su_dev)->alua_type ==
+					SPC3_ALUA_EMULATED) ?
+				&core_emulate_set_target_port_groups :
+				NULL;
+			}
+
+			size = (cdb[6] << 24) | (cdb[7] << 16) |
+			       (cdb[8] << 8) | cdb[9];
+		} else  {
+			/* GPCMD_REPORT_KEY from multi media commands */
+			size = (cdb[8] << 8) + cdb[9];
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case INQUIRY:
+		size = (cdb[3] << 8) + cdb[4];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+		 * See spc4r17 section 5.3
+		 */
+		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = TASK_ATTR_HOQ;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_CAPACITY:
+		size = READ_CAP_LEN;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_MEDIA_SERIAL_NUMBER:
+	case SECURITY_PROTOCOL_IN:
+	case SECURITY_PROTOCOL_OUT:
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case SERVICE_ACTION_IN:
+	case ACCESS_CONTROL_IN:
+	case ACCESS_CONTROL_OUT:
+	case EXTENDED_COPY:
+	case READ_ATTRIBUTE:
+	case RECEIVE_COPY_RESULTS:
+	case WRITE_ATTRIBUTE:
+		size = (cdb[10] << 24) | (cdb[11] << 16) |
+		       (cdb[12] << 8) | cdb[13];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case RECEIVE_DIAGNOSTIC:
+	case SEND_DIAGNOSTIC:
+		size = (cdb[3] << 8) | cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
+#if 0
+	case GPCMD_READ_CD:
+		sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		size = (2336 * sectors);
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+#endif
+	case READ_TOC:
+		size = cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case REQUEST_SENSE:
+		size = cdb[4];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case READ_ELEMENT_STATUS:
+		size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case WRITE_BUFFER:
+		size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case RESERVE:
+	case RESERVE_10:
+		/*
+		 * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		 */
+		if (cdb[0] == RESERVE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		/*
+		 * Setup the legacy emulated handler for SPC-2 and
+		 * >= SPC-3 compatible reservation handling (CRH=1)
+		 * Otherwise, we assume the underlying SCSI logic is
+		 * is running in SPC_PASSTHROUGH, and wants reservations
+		 * emulation disabled.
+		 */
+		cmd->transport_emulate_cdb =
+				(T10_RES(su_dev)->res_type !=
+				 SPC_PASSTHROUGH) ?
+				&core_scsi2_emulate_crh : NULL;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case RELEASE:
+	case RELEASE_10:
+		/*
+		 * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
+		 * Assume the passthrough or $FABRIC_MOD will tell us about it.
+		*/
+		if (cdb[0] == RELEASE_10)
+			size = (cdb[7] << 8) | cdb[8];
+		else
+			size = cmd->data_length;
+
+		cmd->transport_emulate_cdb =
+				(T10_RES(su_dev)->res_type !=
+				 SPC_PASSTHROUGH) ?
+				&core_scsi2_emulate_crh : NULL;
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case SYNCHRONIZE_CACHE:
+	case 0x91: /* SYNCHRONIZE_CACHE_16: */
+		/*
+		 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+		 */
+		if (cdb[0] == SYNCHRONIZE_CACHE) {
+			sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
+			T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+			T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+		}
+		if (sector_ret)
+			goto out_unsupported_cdb;
+
+		size = transport_get_size(sectors, cdb, cmd);
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+
+		/*
+		 * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
+		 */
+		if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+			break;
+		/*
+		 * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
+		 * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
+		 */
+		cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
+		/*
+		 * Check to ensure that LBA + Range does not exceed past end of
+		 * device.
+		 */
+		if (transport_get_sectors(cmd) < 0)
+			goto out_invalid_cdb_field;
+		break;
+	case UNMAP:
+		size = get_unaligned_be16(&cdb[7]);
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Determine if the received UNMAP used to for direct passthrough
+		 * into Linux/SCSI with struct request via TCM/pSCSI or we are
+		 * signaling the use of internal transport_generic_unmap() emulation
+		 * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
+		 * subsystem plugin backstores.
+		 */
+		if (!(passthrough))
+			cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
+
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	case WRITE_SAME_16:
+		sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
+		if (sector_ret)
+			goto out_unsupported_cdb;
+		size = transport_get_size(sectors, cdb, cmd);
+		T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
+		passthrough = (TRANSPORT(dev)->transport_type ==
+				TRANSPORT_PLUGIN_PHBA_PDEV);
+		/*
+		 * Determine if the received WRITE_SAME_16 is used to for direct
+		 * passthrough into Linux/SCSI with struct request via TCM/pSCSI
+		 * or we are signaling the use of internal WRITE_SAME + UNMAP=1
+		 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
+		 * TCM/FILEIO subsystem plugin backstores.
+		 */
+		if (!(passthrough)) {
+			if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
+				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+					" bits not supported for Block Discard"
+					" Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+			/*
+			 * Currently for the emulated case we only accept
+			 * tpws with the UNMAP=1 bit set.
+			 */
+			if (!(cdb[1] & 0x08)) {
+				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+					" supported for Block Discard Emulation\n");
+				goto out_invalid_cdb_field;
+			}
+		}
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case GPCMD_CLOSE_TRACK:
+	case ERASE:
+	case INITIALIZE_ELEMENT_STATUS:
+	case GPCMD_LOAD_UNLOAD:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case GPCMD_SET_SPEED:
+	case SPACE:
+	case START_STOP:
+	case TEST_UNIT_READY:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+	case MOVE_MEDIUM:
+		cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
+		break;
+	case REPORT_LUNS:
+		cmd->transport_emulate_cdb =
+				&transport_core_report_lun_response;
+		size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+		/*
+		 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+		 * See spc4r17 section 5.3
+		 */
+		if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+			cmd->sam_task_attr = TASK_ATTR_HOQ;
+		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+		break;
+	default:
+		printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+			" 0x%02x, sending CHECK_CONDITION.\n",
+			CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
+		goto out_unsupported_cdb;
+	}
+
+	if (size != cmd->data_length) {
+		printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
+			" 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+				cmd->data_length, size, cdb[0]);
+
+		cmd->cmd_spdtl = size;
+
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			printk(KERN_ERR "Rejecting underflow/overflow"
+					" WRITE data\n");
+			goto out_invalid_cdb_field;
+		}
+		/*
+		 * Reject READ_* or WRITE_* with overflow/underflow for
+		 * type SCF_SCSI_DATA_SG_IO_CDB.
+		 */
+		if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512))  {
+			printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+				" CDB on non 512-byte sector setup subsystem"
+				" plugin: %s\n", TRANSPORT(dev)->name);
+			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+			goto out_invalid_cdb_field;
+		}
+
+		if (size > cmd->data_length) {
+			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+			cmd->residual_count = (size - cmd->data_length);
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = (cmd->data_length - size);
+		}
+		cmd->data_length = size;
+	}
+
+	transport_set_supported_SAM_opcode(cmd);
+	return ret;
+
+out_unsupported_cdb:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+	return -2;
+out_invalid_cdb_field:
+	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+	cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+	return -2;
+}
+
+static inline void transport_release_tasks(struct se_cmd *);
+
+/*
+ * This function will copy a contiguous *src buffer into a destination
+ * struct scatterlist array.
+ */
+static void transport_memcpy_write_contig(
+	struct se_cmd *cmd,
+	struct scatterlist *sg_d,
+	unsigned char *src)
+{
+	u32 i = 0, length = 0, total_length = cmd->data_length;
+	void *dst;
+
+	while (total_length) {
+		length = sg_d[i].length;
+
+		if (length > total_length)
+			length = total_length;
+
+		dst = sg_virt(&sg_d[i]);
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		src += length;
+		i++;
+	}
+}
+
+/*
+ * This function will copy a struct scatterlist array *sg_s into a destination
+ * contiguous *dst buffer.
+ */
+static void transport_memcpy_read_contig(
+	struct se_cmd *cmd,
+	unsigned char *dst,
+	struct scatterlist *sg_s)
+{
+	u32 i = 0, length = 0, total_length = cmd->data_length;
+	void *src;
+
+	while (total_length) {
+		length = sg_s[i].length;
+
+		if (length > total_length)
+			length = total_length;
+
+		src = sg_virt(&sg_s[i]);
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		dst += length;
+		i++;
+	}
+}
+
+static void transport_memcpy_se_mem_read_contig(
+	struct se_cmd *cmd,
+	unsigned char *dst,
+	struct list_head *se_mem_list)
+{
+	struct se_mem *se_mem;
+	void *src;
+	u32 length = 0, total_length = cmd->data_length;
+
+	list_for_each_entry(se_mem, se_mem_list, se_list) {
+		length = se_mem->se_len;
+
+		if (length > total_length)
+			length = total_length;
+
+		src = page_address(se_mem->se_page) + se_mem->se_off;
+
+		memcpy(dst, src, length);
+
+		if (!(total_length -= length))
+			return;
+
+		dst += length;
+	}
+}
+
+/*
+ * Called from transport_generic_complete_ok() and
+ * transport_generic_request_failure() to determine which dormant/delayed
+ * and ordered cmds need to have their tasks added to the execution queue.
+ */
+static void transport_complete_task_attr(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_cmd *cmd_p, *cmd_tmp;
+	int new_active_tasks = 0;
+
+	if (cmd->sam_task_attr == TASK_ATTR_SIMPLE) {
+		atomic_dec(&dev->simple_cmds);
+		smp_mb__after_atomic_dec();
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
+		atomic_dec(&dev->dev_hoq_count);
+		smp_mb__after_atomic_dec();
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
+			cmd->se_ordered_id);
+	} else if (cmd->sam_task_attr == TASK_ATTR_ORDERED) {
+		spin_lock(&dev->ordered_cmd_lock);
+		list_del(&cmd->se_ordered_list);
+		atomic_dec(&dev->dev_ordered_sync);
+		smp_mb__after_atomic_dec();
+		spin_unlock(&dev->ordered_cmd_lock);
+
+		dev->dev_cur_ordered_id++;
+		DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+	}
+	/*
+	 * Process all commands up to the last received
+	 * ORDERED task attribute which requires another blocking
+	 * boundary
+	 */
+	spin_lock(&dev->delayed_cmd_lock);
+	list_for_each_entry_safe(cmd_p, cmd_tmp,
+			&dev->delayed_cmd_list, se_delayed_list) {
+
+		list_del(&cmd_p->se_delayed_list);
+		spin_unlock(&dev->delayed_cmd_lock);
+
+		DEBUG_STA("Calling add_tasks() for"
+			" cmd_p: 0x%02x Task Attr: 0x%02x"
+			" Dormant -> Active, se_ordered_id: %u\n",
+			T_TASK(cmd_p)->t_task_cdb[0],
+			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
+
+		transport_add_tasks_from_cmd(cmd_p);
+		new_active_tasks++;
+
+		spin_lock(&dev->delayed_cmd_lock);
+		if (cmd_p->sam_task_attr == TASK_ATTR_ORDERED)
+			break;
+	}
+	spin_unlock(&dev->delayed_cmd_lock);
+	/*
+	 * If new tasks have become active, wake up the transport thread
+	 * to do the processing of the Active tasks.
+	 */
+	if (new_active_tasks != 0)
+		wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+}
+
+static void transport_generic_complete_ok(struct se_cmd *cmd)
+{
+	int reason = 0;
+	/*
+	 * Check if we need to move delayed/dormant tasks from cmds on the
+	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
+	 * Attribute.
+	 */
+	if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+		transport_complete_task_attr(cmd);
+	/*
+	 * Check if we need to retrieve a sense buffer from
+	 * the struct se_cmd in question.
+	 */
+	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+		if (transport_get_sense_data(cmd) < 0)
+			reason = TCM_NON_EXISTENT_LUN;
+
+		/*
+		 * Only set when an struct se_task->task_scsi_status returned
+		 * a non GOOD status.
+		 */
+		if (cmd->scsi_status) {
+			transport_send_check_condition_and_sense(
+					cmd, reason, 1);
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop_to_fabric(cmd);
+			return;
+		}
+	}
+	/*
+	 * Check for a callback, used by amoungst other things
+	 * XDWRITE_READ_10 emulation.
+	 */
+	if (cmd->transport_complete_callback)
+		cmd->transport_complete_callback(cmd);
+
+	switch (cmd->data_direction) {
+	case DMA_FROM_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (SE_LUN(cmd)->lun_sep) {
+			SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * If enabled by TCM fabirc module pre-registered SGL
+		 * memory, perform the memcpy() from the TCM internal
+		 * contigious buffer back to the original SGL.
+		 */
+		if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+			transport_memcpy_write_contig(cmd,
+				 T_TASK(cmd)->t_task_pt_sgl,
+				 T_TASK(cmd)->t_task_buf);
+
+		CMD_TFO(cmd)->queue_data_in(cmd);
+		break;
+	case DMA_TO_DEVICE:
+		spin_lock(&cmd->se_lun->lun_sep_lock);
+		if (SE_LUN(cmd)->lun_sep) {
+			SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+				cmd->data_length;
+		}
+		spin_unlock(&cmd->se_lun->lun_sep_lock);
+		/*
+		 * Check if we need to send READ payload for BIDI-COMMAND
+		 */
+		if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+			spin_lock(&cmd->se_lun->lun_sep_lock);
+			if (SE_LUN(cmd)->lun_sep) {
+				SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+					cmd->data_length;
+			}
+			spin_unlock(&cmd->se_lun->lun_sep_lock);
+			CMD_TFO(cmd)->queue_data_in(cmd);
+			break;
+		}
+		/* Fall through for DMA_TO_DEVICE */
+	case DMA_NONE:
+		CMD_TFO(cmd)->queue_status(cmd);
+		break;
+	default:
+		break;
+	}
+
+	transport_lun_remove_cmd(cmd);
+	transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void transport_free_dev_tasks(struct se_cmd *cmd)
+{
+	struct se_task *task, *task_tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	list_for_each_entry_safe(task, task_tmp,
+				&T_TASK(cmd)->t_task_list, t_list) {
+		if (atomic_read(&task->task_active))
+			continue;
+
+		kfree(task->task_sg_bidi);
+		kfree(task->task_sg);
+
+		list_del(&task->t_list);
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		if (task->se_dev)
+			TRANSPORT(task->se_dev)->free_task(task);
+		else
+			printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+				task->task_no);
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+	struct se_mem *se_mem, *se_mem_tmp;
+	int free_page = 1;
+
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+		free_page = 0;
+	if (cmd->se_dev->transport->do_se_mem_map)
+		free_page = 0;
+
+	if (T_TASK(cmd)->t_task_buf) {
+		kfree(T_TASK(cmd)->t_task_buf);
+		T_TASK(cmd)->t_task_buf = NULL;
+		return;
+	}
+
+	/*
+	 * Caller will handle releasing of struct se_mem.
+	 */
+	if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
+		return;
+
+	if (!(T_TASK(cmd)->t_tasks_se_num))
+		return;
+
+	list_for_each_entry_safe(se_mem, se_mem_tmp,
+			T_TASK(cmd)->t_mem_list, se_list) {
+		/*
+		 * We only release call __free_page(struct se_mem->se_page) when
+		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+		 */
+		if (free_page)
+			__free_page(se_mem->se_page);
+
+		list_del(&se_mem->se_list);
+		kmem_cache_free(se_mem_cache, se_mem);
+	}
+
+	if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
+		list_for_each_entry_safe(se_mem, se_mem_tmp,
+				T_TASK(cmd)->t_mem_bidi_list, se_list) {
+			/*
+			 * We only release call __free_page(struct se_mem->se_page) when
+			 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
+			 */
+			if (free_page)
+				__free_page(se_mem->se_page);
+
+			list_del(&se_mem->se_list);
+			kmem_cache_free(se_mem_cache, se_mem);
+		}
+	}
+
+	kfree(T_TASK(cmd)->t_mem_bidi_list);
+	T_TASK(cmd)->t_mem_bidi_list = NULL;
+	kfree(T_TASK(cmd)->t_mem_list);
+	T_TASK(cmd)->t_mem_list = NULL;
+	T_TASK(cmd)->t_tasks_se_num = 0;
+}
+
+static inline void transport_release_tasks(struct se_cmd *cmd)
+{
+	transport_free_dev_tasks(cmd);
+}
+
+static inline int transport_dec_and_check(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			return 1;
+		}
+	}
+
+	if (atomic_read(&T_TASK(cmd)->t_se_count)) {
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+			return 1;
+		}
+	}
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	return 0;
+}
+
+static void transport_release_fe_cmd(struct se_cmd *cmd)
+{
+	unsigned long flags;
+
+	if (transport_dec_and_check(cmd))
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto free_pages;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_release_tasks(cmd);
+free_pages:
+	transport_free_pages(cmd);
+	transport_free_se_cmd(cmd);
+	CMD_TFO(cmd)->release_cmd_direct(cmd);
+}
+
+static int transport_generic_remove(
+	struct se_cmd *cmd,
+	int release_to_pool,
+	int session_reinstatement)
+{
+	unsigned long flags;
+
+	if (!(T_TASK(cmd)))
+		goto release_cmd;
+
+	if (transport_dec_and_check(cmd)) {
+		if (session_reinstatement) {
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			transport_all_task_dev_remove_state(cmd);
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					flags);
+		}
+		return 1;
+	}
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		goto free_pages;
+	}
+	atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+	transport_all_task_dev_remove_state(cmd);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	transport_release_tasks(cmd);
+free_pages:
+	transport_free_pages(cmd);
+
+release_cmd:
+	if (release_to_pool) {
+		transport_release_cmd_to_pool(cmd);
+	} else {
+		transport_free_se_cmd(cmd);
+		CMD_TFO(cmd)->release_cmd_direct(cmd);
+	}
+
+	return 0;
+}
+
+/*
+ * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * @cmd:  Associated se_cmd descriptor
+ * @mem:  SGL style memory for TCM WRITE / READ
+ * @sg_mem_num: Number of SGL elements
+ * @mem_bidi_in: SGL style memory for TCM BIDI READ
+ * @sg_mem_bidi_num: Number of BIDI READ SGL elements
+ *
+ * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
+ * of parameters.
+ */
+int transport_generic_map_mem_to_cmd(
+	struct se_cmd *cmd,
+	struct scatterlist *mem,
+	u32 sg_mem_num,
+	struct scatterlist *mem_bidi_in,
+	u32 sg_mem_bidi_num)
+{
+	u32 se_mem_cnt_out = 0;
+	int ret;
+
+	if (!(mem) || !(sg_mem_num))
+		return 0;
+	/*
+	 * Passed *mem will contain a list_head containing preformatted
+	 * struct se_mem elements...
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
+		if ((mem_bidi_in) || (sg_mem_bidi_num)) {
+			printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
+				" with BIDI-COMMAND\n");
+			return -ENOSYS;
+		}
+
+		T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
+		T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
+		cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
+		return 0;
+	}
+	/*
+	 * Otherwise, assume the caller is passing a struct scatterlist
+	 * array from include/linux/scatterlist.h
+	 */
+	if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
+	    (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+		/*
+		 * For CDB using TCM struct se_mem linked list scatterlist memory
+		 * processed into a TCM struct se_subsystem_dev, we do the mapping
+		 * from the passed physical memory to struct se_mem->se_page here.
+		 */
+		T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+		if (!(T_TASK(cmd)->t_mem_list))
+			return -ENOMEM;
+
+		ret = transport_map_sg_to_mem(cmd,
+			T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
+		if (ret < 0)
+			return -ENOMEM;
+
+		T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
+		/*
+		 * Setup BIDI READ list of struct se_mem elements
+		 */
+		if ((mem_bidi_in) && (sg_mem_bidi_num)) {
+			T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+			if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+				kfree(T_TASK(cmd)->t_mem_list);
+				return -ENOMEM;
+			}
+			se_mem_cnt_out = 0;
+
+			ret = transport_map_sg_to_mem(cmd,
+				T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
+				&se_mem_cnt_out);
+			if (ret < 0) {
+				kfree(T_TASK(cmd)->t_mem_list);
+				return -ENOMEM;
+			}
+
+			T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+		}
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+		if (mem_bidi_in || sg_mem_bidi_num) {
+			printk(KERN_ERR "BIDI-Commands not supported using "
+				"SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
+			return -ENOSYS;
+		}
+		/*
+		 * For incoming CDBs using a contiguous buffer internall with TCM,
+		 * save the passed struct scatterlist memory.  After TCM storage object
+		 * processing has completed for this struct se_cmd, TCM core will call
+		 * transport_memcpy_[write,read]_contig() as necessary from
+		 * transport_generic_complete_ok() and transport_write_pending() in order
+		 * to copy the TCM buffer to/from the original passed *mem in SGL ->
+		 * struct scatterlist format.
+		 */
+		cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
+		T_TASK(cmd)->t_task_pt_sgl = mem;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
+
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+	return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_get_sectors(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+
+	T_TASK(cmd)->t_tasks_sectors =
+		(cmd->data_length / DEV_ATTRIB(dev)->block_size);
+	if (!(T_TASK(cmd)->t_tasks_sectors))
+		T_TASK(cmd)->t_tasks_sectors = 1;
+
+	if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
+		return 0;
+
+	if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
+	     transport_dev_end_lba(dev)) {
+		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
+			" transport_dev_end_lba(): %llu\n",
+			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+			transport_dev_end_lba(dev));
+		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+		cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
+		return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
+	}
+
+	return 0;
+}
+
+static int transport_new_cmd_obj(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	u32 task_cdbs = 0, rc;
+
+	if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+		task_cdbs++;
+		T_TASK(cmd)->t_task_cdbs++;
+	} else {
+		int set_counts = 1;
+
+		/*
+		 * Setup any BIDI READ tasks and memory from
+		 * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
+		 * are queued first for the non pSCSI passthrough case.
+		 */
+		if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+		    (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+			rc = transport_generic_get_cdb_count(cmd,
+				T_TASK(cmd)->t_task_lba,
+				T_TASK(cmd)->t_tasks_sectors,
+				DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
+				set_counts);
+			if (!(rc)) {
+				cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+				cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+				return PYX_TRANSPORT_LU_COMM_FAILURE;
+			}
+			set_counts = 0;
+		}
+		/*
+		 * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
+		 * Note for BIDI transfers this will contain the WRITE payload
+		 */
+		task_cdbs = transport_generic_get_cdb_count(cmd,
+				T_TASK(cmd)->t_task_lba,
+				T_TASK(cmd)->t_tasks_sectors,
+				cmd->data_direction, T_TASK(cmd)->t_mem_list,
+				set_counts);
+		if (!(task_cdbs)) {
+			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+			cmd->scsi_sense_reason =
+					TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+			return PYX_TRANSPORT_LU_COMM_FAILURE;
+		}
+		T_TASK(cmd)->t_task_cdbs += task_cdbs;
+
+#if 0
+		printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
+			" %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
+			T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
+			T_TASK(cmd)->t_task_cdbs);
+#endif
+	}
+
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
+	atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
+	return 0;
+}
+
+static struct list_head *transport_init_se_mem_list(void)
+{
+	struct list_head *se_mem_list;
+
+	se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+	if (!(se_mem_list)) {
+		printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
+		return NULL;
+	}
+	INIT_LIST_HEAD(se_mem_list);
+
+	return se_mem_list;
+}
+
+static int
+transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
+{
+	unsigned char *buf;
+	struct se_mem *se_mem;
+
+	T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
+	if (!(T_TASK(cmd)->t_mem_list))
+		return -ENOMEM;
+
+	/*
+	 * If the device uses memory mapping this is enough.
+	 */
+	if (cmd->se_dev->transport->do_se_mem_map)
+		return 0;
+
+	/*
+	 * Setup BIDI-COMMAND READ list of struct se_mem elements
+	 */
+	if (T_TASK(cmd)->t_tasks_bidi) {
+		T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
+		if (!(T_TASK(cmd)->t_mem_bidi_list)) {
+			kfree(T_TASK(cmd)->t_mem_list);
+			return -ENOMEM;
+		}
+	}
+
+	while (length) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			goto out;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+		se_mem->se_len = (length > dma_size) ? dma_size : length;
+
+/* #warning FIXME Allocate contigous pages for struct se_mem elements */
+		se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+		if (!(se_mem->se_page)) {
+			printk(KERN_ERR "alloc_pages() failed\n");
+			goto out;
+		}
+
+		buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
+		if (!(buf)) {
+			printk(KERN_ERR "kmap_atomic() failed\n");
+			goto out;
+		}
+		memset(buf, 0, se_mem->se_len);
+		kunmap_atomic(buf, KM_IRQ0);
+
+		list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
+		T_TASK(cmd)->t_tasks_se_num++;
+
+		DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
+			" Offset(%u)\n", se_mem->se_page, se_mem->se_len,
+			se_mem->se_off);
+
+		length -= se_mem->se_len;
+	}
+
+	DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
+			T_TASK(cmd)->t_tasks_se_num);
+
+	return 0;
+out:
+	return -1;
+}
+
+extern u32 transport_calc_sg_num(
+	struct se_task *task,
+	struct se_mem *in_se_mem,
+	u32 task_offset)
+{
+	struct se_cmd *se_cmd = task->task_se_cmd;
+	struct se_device *se_dev = SE_DEV(se_cmd);
+	struct se_mem *se_mem = in_se_mem;
+	struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
+	u32 sg_length, task_size = task->task_size, task_sg_num_padded;
+
+	while (task_size != 0) {
+		DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
+			" se_mem->se_off(%u) task_offset(%u)\n",
+			se_mem->se_page, se_mem->se_len,
+			se_mem->se_off, task_offset);
+
+		if (task_offset == 0) {
+			if (task_size >= se_mem->se_len) {
+				sg_length = se_mem->se_len;
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list)))
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+			} else {
+				sg_length = task_size;
+				task_size -= sg_length;
+				goto next;
+			}
+
+			DEBUG_SC("sg_length(%u) task_size(%u)\n",
+					sg_length, task_size);
+		} else {
+			if ((se_mem->se_len - task_offset) > task_size) {
+				sg_length = task_size;
+				task_size -= sg_length;
+				goto next;
+			 } else {
+				sg_length = (se_mem->se_len - task_offset);
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list)))
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+			}
+
+			DEBUG_SC("sg_length(%u) task_size(%u)\n",
+					sg_length, task_size);
+
+			task_offset = 0;
+		}
+		task_size -= sg_length;
+next:
+		DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
+			task->task_no, task_size);
+
+		task->task_sg_num++;
+	}
+	/*
+	 * Check if the fabric module driver is requesting that all
+	 * struct se_task->task_sg[] be chained together..  If so,
+	 * then allocate an extra padding SG entry for linking and
+	 * marking the end of the chained SGL.
+	 */
+	if (tfo->task_sg_chaining) {
+		task_sg_num_padded = (task->task_sg_num + 1);
+		task->task_padded_sg = 1;
+	} else
+		task_sg_num_padded = task->task_sg_num;
+
+	task->task_sg = kzalloc(task_sg_num_padded *
+			sizeof(struct scatterlist), GFP_KERNEL);
+	if (!(task->task_sg)) {
+		printk(KERN_ERR "Unable to allocate memory for"
+				" task->task_sg\n");
+		return 0;
+	}
+	sg_init_table(&task->task_sg[0], task_sg_num_padded);
+	/*
+	 * Setup task->task_sg_bidi for SCSI READ payload for
+	 * TCM/pSCSI passthrough if present for BIDI-COMMAND
+	 */
+	if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
+	    (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
+		task->task_sg_bidi = kzalloc(task_sg_num_padded *
+				sizeof(struct scatterlist), GFP_KERNEL);
+		if (!(task->task_sg_bidi)) {
+			printk(KERN_ERR "Unable to allocate memory for"
+				" task->task_sg_bidi\n");
+			return 0;
+		}
+		sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
+	}
+	/*
+	 * For the chaining case, setup the proper end of SGL for the
+	 * initial submission struct task into struct se_subsystem_api.
+	 * This will be cleared later by transport_do_task_sg_chain()
+	 */
+	if (task->task_padded_sg) {
+		sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
+		/*
+		 * Added the 'if' check before marking end of bi-directional
+		 * scatterlist (which gets created only in case of request
+		 * (RD + WR).
+		 */
+		if (task->task_sg_bidi)
+			sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
+	}
+
+	DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
+		" task_sg_num_padded(%u)\n", task->task_sg_num,
+		task_sg_num_padded);
+
+	return task->task_sg_num;
+}
+
+static inline int transport_set_tasks_sectors_disk(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	if ((lba + sectors) > transport_dev_end_lba(dev)) {
+		task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
+
+		if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
+			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+			*max_sectors_set = 1;
+		}
+	} else {
+		if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+			task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+			*max_sectors_set = 1;
+		} else
+			task->task_sectors = sectors;
+	}
+
+	return 0;
+}
+
+static inline int transport_set_tasks_sectors_non_disk(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	if (sectors > DEV_ATTRIB(dev)->max_sectors) {
+		task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
+		*max_sectors_set = 1;
+	} else
+		task->task_sectors = sectors;
+
+	return 0;
+}
+
+static inline int transport_set_tasks_sectors(
+	struct se_task *task,
+	struct se_device *dev,
+	unsigned long long lba,
+	u32 sectors,
+	int *max_sectors_set)
+{
+	return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
+		transport_set_tasks_sectors_disk(task, dev, lba, sectors,
+				max_sectors_set) :
+		transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
+				max_sectors_set);
+}
+
+static int transport_map_sg_to_mem(
+	struct se_cmd *cmd,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	u32 *se_mem_cnt)
+{
+	struct se_mem *se_mem;
+	struct scatterlist *sg;
+	u32 sg_count = 1, cmd_size = cmd->data_length;
+
+	if (!in_mem) {
+		printk(KERN_ERR "No source scatterlist\n");
+		return -1;
+	}
+	sg = (struct scatterlist *)in_mem;
+
+	while (cmd_size) {
+		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
+		if (!(se_mem)) {
+			printk(KERN_ERR "Unable to allocate struct se_mem\n");
+			return -1;
+		}
+		INIT_LIST_HEAD(&se_mem->se_list);
+		DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
+			" sg_page: %p offset: %d length: %d\n", cmd_size,
+			sg_page(sg), sg->offset, sg->length);
+
+		se_mem->se_page = sg_page(sg);
+		se_mem->se_off = sg->offset;
+
+		if (cmd_size > sg->length) {
+			se_mem->se_len = sg->length;
+			sg = sg_next(sg);
+			sg_count++;
+		} else
+			se_mem->se_len = cmd_size;
+
+		cmd_size -= se_mem->se_len;
+
+		DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
+				*se_mem_cnt, cmd_size);
+		DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
+				se_mem->se_page, se_mem->se_off, se_mem->se_len);
+
+		list_add_tail(&se_mem->se_list, se_mem_list);
+		(*se_mem_cnt)++;
+	}
+
+	DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
+		" struct se_mem\n", sg_count, *se_mem_cnt);
+
+	if (sg_count != *se_mem_cnt)
+		BUG();
+
+	return 0;
+}
+
+/*	transport_map_mem_to_sg():
+ *
+ *
+ */
+int transport_map_mem_to_sg(
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset)
+{
+	struct se_cmd *se_cmd = task->task_se_cmd;
+	struct se_mem *se_mem = in_se_mem;
+	struct scatterlist *sg = (struct scatterlist *)in_mem;
+	u32 task_size = task->task_size, sg_no = 0;
+
+	if (!sg) {
+		printk(KERN_ERR "Unable to locate valid struct"
+				" scatterlist pointer\n");
+		return -1;
+	}
+
+	while (task_size != 0) {
+		/*
+		 * Setup the contigious array of scatterlists for
+		 * this struct se_task.
+		 */
+		sg_assign_page(sg, se_mem->se_page);
+
+		if (*task_offset == 0) {
+			sg->offset = se_mem->se_off;
+
+			if (task_size >= se_mem->se_len) {
+				sg->length = se_mem->se_len;
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list))) {
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+					(*se_mem_cnt)++;
+				}
+			} else {
+				sg->length = task_size;
+				/*
+				 * Determine if we need to calculate an offset
+				 * into the struct se_mem on the next go around..
+				 */
+				task_size -= sg->length;
+				if (!(task_size))
+					*task_offset = sg->length;
+
+				goto next;
+			}
+
+		} else {
+			sg->offset = (*task_offset + se_mem->se_off);
+
+			if ((se_mem->se_len - *task_offset) > task_size) {
+				sg->length = task_size;
+				/*
+				 * Determine if we need to calculate an offset
+				 * into the struct se_mem on the next go around..
+				 */
+				task_size -= sg->length;
+				if (!(task_size))
+					*task_offset += sg->length;
+
+				goto next;
+			} else {
+				sg->length = (se_mem->se_len - *task_offset);
+
+				if (!(list_is_last(&se_mem->se_list,
+						T_TASK(se_cmd)->t_mem_list))) {
+					se_mem = list_entry(se_mem->se_list.next,
+							struct se_mem, se_list);
+					(*se_mem_cnt)++;
+				}
+			}
+
+			*task_offset = 0;
+		}
+		task_size -= sg->length;
+next:
+		DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
+			" task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
+			sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
+
+		sg_no++;
+		if (!(task_size))
+			break;
+
+		sg = sg_next(sg);
+
+		if (task_size > se_cmd->data_length)
+			BUG();
+	}
+	*out_se_mem = se_mem;
+
+	DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
+		" SGs\n", task->task_no, *se_mem_cnt, sg_no);
+
+	return 0;
+}
+
+/*
+ * This function can be used by HW target mode drivers to create a linked
+ * scatterlist from all contiguously allocated struct se_task->task_sg[].
+ * This is intended to be called during the completion path by TCM Core
+ * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
+ */
+void transport_do_task_sg_chain(struct se_cmd *cmd)
+{
+	struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
+	struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
+	struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+	struct se_task *task;
+	struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
+	u32 task_sg_num = 0, sg_count = 0;
+	int i;
+
+	if (tfo->task_sg_chaining == 0) {
+		printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
+				" %s\n", tfo->get_fabric_name());
+		dump_stack();
+		return;
+	}
+	/*
+	 * Walk the struct se_task list and setup scatterlist chains
+	 * for each contiguosly allocated struct se_task->task_sg[].
+	 */
+	list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+		if (!(task->task_sg) || !(task->task_padded_sg))
+			continue;
+
+		if (sg_head && sg_link) {
+			sg_head_cur = &task->task_sg[0];
+			sg_link_cur = &task->task_sg[task->task_sg_num];
+			/*
+			 * Either add chain or mark end of scatterlist
+			 */
+			if (!(list_is_last(&task->t_list,
+					&T_TASK(cmd)->t_task_list))) {
+				/*
+				 * Clear existing SGL termination bit set in
+				 * transport_calc_sg_num(), see sg_mark_end()
+				 */
+				sg_end_cur = &task->task_sg[task->task_sg_num - 1];
+				sg_end_cur->page_link &= ~0x02;
+
+				sg_chain(sg_head, task_sg_num, sg_head_cur);
+				sg_count += (task->task_sg_num + 1);
+			} else
+				sg_count += task->task_sg_num;
+
+			sg_head = sg_head_cur;
+			sg_link = sg_link_cur;
+			task_sg_num = task->task_sg_num;
+			continue;
+		}
+		sg_head = sg_first = &task->task_sg[0];
+		sg_link = &task->task_sg[task->task_sg_num];
+		task_sg_num = task->task_sg_num;
+		/*
+		 * Check for single task..
+		 */
+		if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
+			/*
+			 * Clear existing SGL termination bit set in
+			 * transport_calc_sg_num(), see sg_mark_end()
+			 */
+			sg_end = &task->task_sg[task->task_sg_num - 1];
+			sg_end->page_link &= ~0x02;
+			sg_count += (task->task_sg_num + 1);
+		} else
+			sg_count += task->task_sg_num;
+	}
+	/*
+	 * Setup the starting pointer and total t_tasks_sg_linked_no including
+	 * padding SGs for linking and to mark the end.
+	 */
+	T_TASK(cmd)->t_tasks_sg_chained = sg_first;
+	T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+
+	DEBUG_CMD_M("Setup T_TASK(cmd)->t_tasks_sg_chained: %p and"
+		" t_tasks_sg_chained_no: %u\n", T_TASK(cmd)->t_tasks_sg_chained,
+		T_TASK(cmd)->t_tasks_sg_chained_no);
+
+	for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
+			T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+
+		DEBUG_CMD_M("SG: %p page: %p length: %d offset: %d\n",
+			sg, sg_page(sg), sg->length, sg->offset);
+		if (sg_is_chain(sg))
+			DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+		if (sg_is_last(sg))
+			DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+	}
+
+}
+EXPORT_SYMBOL(transport_do_task_sg_chain);
+
+static int transport_do_se_mem_map(
+	struct se_device *dev,
+	struct se_task *task,
+	struct list_head *se_mem_list,
+	void *in_mem,
+	struct se_mem *in_se_mem,
+	struct se_mem **out_se_mem,
+	u32 *se_mem_cnt,
+	u32 *task_offset_in)
+{
+	u32 task_offset = *task_offset_in;
+	int ret = 0;
+	/*
+	 * se_subsystem_api_t->do_se_mem_map is used when internal allocation
+	 * has been done by the transport plugin.
+	 */
+	if (TRANSPORT(dev)->do_se_mem_map) {
+		ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
+				in_mem, in_se_mem, out_se_mem, se_mem_cnt,
+				task_offset_in);
+		if (ret == 0)
+			T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
+
+		return ret;
+	}
+	/*
+	 * This is the normal path for all normal non BIDI and BIDI-COMMAND
+	 * WRITE payloads..  If we need to do BIDI READ passthrough for
+	 * TCM/pSCSI the first call to transport_do_se_mem_map ->
+	 * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
+	 * allocation for task->task_sg_bidi, and the subsequent call to
+	 * transport_do_se_mem_map() from transport_generic_get_cdb_count()
+	 */
+	if (!(task->task_sg_bidi)) {
+		/*
+		 * Assume default that transport plugin speaks preallocated
+		 * scatterlists.
+		 */
+		if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
+			return -1;
+		/*
+		 * struct se_task->task_sg now contains the struct scatterlist array.
+		 */
+		return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
+					in_se_mem, out_se_mem, se_mem_cnt,
+					task_offset_in);
+	}
+	/*
+	 * Handle the se_mem_list -> struct task->task_sg_bidi
+	 * memory map for the extra BIDI READ payload
+	 */
+	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
+				in_se_mem, out_se_mem, se_mem_cnt,
+				task_offset_in);
+}
+
+static u32 transport_generic_get_cdb_count(
+	struct se_cmd *cmd,
+	unsigned long long lba,
+	u32 sectors,
+	enum dma_data_direction data_direction,
+	struct list_head *mem_list,
+	int set_counts)
+{
+	unsigned char *cdb = NULL;
+	struct se_task *task;
+	struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+	struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
+	struct se_device *dev = SE_DEV(cmd);
+	int max_sectors_set = 0, ret;
+	u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
+
+	if (!mem_list) {
+		printk(KERN_ERR "mem_list is NULL in transport_generic_get"
+				"_cdb_count()\n");
+		return 0;
+	}
+	/*
+	 * While using RAMDISK_DR backstores is the only case where
+	 * mem_list will ever be empty at this point.
+	 */
+	if (!(list_empty(mem_list)))
+		se_mem = list_entry(mem_list->next, struct se_mem, se_list);
+	/*
+	 * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
+	 * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
+	 */
+	if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
+	    !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
+	    (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
+		se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
+					struct se_mem, se_list);
+
+	while (sectors) {
+		DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
+			CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
+			transport_dev_end_lba(dev));
+
+		task = transport_generic_get_task(cmd, data_direction);
+		if (!(task))
+			goto out;
+
+		transport_set_tasks_sectors(task, dev, lba, sectors,
+				&max_sectors_set);
+
+		task->task_lba = lba;
+		lba += task->task_sectors;
+		sectors -= task->task_sectors;
+		task->task_size = (task->task_sectors *
+				   DEV_ATTRIB(dev)->block_size);
+
+		cdb = TRANSPORT(dev)->get_cdb(task);
+		if ((cdb)) {
+			memcpy(cdb, T_TASK(cmd)->t_task_cdb,
+				scsi_command_size(T_TASK(cmd)->t_task_cdb));
+			cmd->transport_split_cdb(task->task_lba,
+					&task->task_sectors, cdb);
+		}
+
+		/*
+		 * Perform the SE OBJ plugin and/or Transport plugin specific
+		 * mapping for T_TASK(cmd)->t_mem_list. And setup the
+		 * task->task_sg and if necessary task->task_sg_bidi
+		 */
+		ret = transport_do_se_mem_map(dev, task, mem_list,
+				NULL, se_mem, &se_mem_lout, &se_mem_cnt,
+				&task_offset_in);
+		if (ret < 0)
+			goto out;
+
+		se_mem = se_mem_lout;
+		/*
+		 * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
+		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
+		 *
+		 * Note that the first call to transport_do_se_mem_map() above will
+		 * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
+		 * -> transport_calc_sg_num(), and the second here will do the
+		 * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+		 */
+		if (task->task_sg_bidi != NULL) {
+			ret = transport_do_se_mem_map(dev, task,
+				T_TASK(cmd)->t_mem_bidi_list, NULL,
+				se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
+				&task_offset_in);
+			if (ret < 0)
+				goto out;
+
+			se_mem_bidi = se_mem_bidi_lout;
+		}
+		task_cdbs++;
+
+		DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
+				task_cdbs, task->task_sg_num);
+
+		if (max_sectors_set) {
+			max_sectors_set = 0;
+			continue;
+		}
+
+		if (!sectors)
+			break;
+	}
+
+	if (set_counts) {
+		atomic_inc(&T_TASK(cmd)->t_fe_count);
+		atomic_inc(&T_TASK(cmd)->t_se_count);
+	}
+
+	DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
+		CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
+		? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+
+	return task_cdbs;
+out:
+	return 0;
+}
+
+static int
+transport_map_control_cmd_to_task(struct se_cmd *cmd)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	unsigned char *cdb;
+	struct se_task *task;
+	int ret;
+
+	task = transport_generic_get_task(cmd, cmd->data_direction);
+	if (!task)
+		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+	cdb = TRANSPORT(dev)->get_cdb(task);
+	if (cdb)
+		memcpy(cdb, cmd->t_task->t_task_cdb,
+			scsi_command_size(cmd->t_task->t_task_cdb));
+
+	task->task_size = cmd->data_length;
+	task->task_sg_num =
+		(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+
+	atomic_inc(&cmd->t_task->t_fe_count);
+	atomic_inc(&cmd->t_task->t_se_count);
+
+	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
+		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
+		u32 se_mem_cnt = 0, task_offset = 0;
+
+		BUG_ON(list_empty(cmd->t_task->t_mem_list));
+
+		ret = transport_do_se_mem_map(dev, task,
+				cmd->t_task->t_mem_list, NULL, se_mem,
+				&se_mem_lout, &se_mem_cnt, &task_offset);
+		if (ret < 0)
+			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+
+		if (dev->transport->map_task_SG)
+			return dev->transport->map_task_SG(task);
+		return 0;
+	} else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
+		if (dev->transport->map_task_non_SG)
+			return dev->transport->map_task_non_SG(task);
+		return 0;
+	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+		if (dev->transport->cdb_none)
+			return dev->transport->cdb_none(task);
+		return 0;
+	} else {
+		BUG();
+		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	}
+}
+
+/*	 transport_generic_new_cmd(): Called from transport_processing_thread()
+ *
+ *	 Allocate storage transport resources from a set of values predefined
+ *	 by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
+ *	 Any non zero return here is treated as an "out of resource' op here.
+ */
+	/*
+	 * Generate struct se_task(s) and/or their payloads for this CDB.
+	 */
+static int transport_generic_new_cmd(struct se_cmd *cmd)
+{
+	struct se_portal_group *se_tpg;
+	struct se_task *task;
+	struct se_device *dev = SE_DEV(cmd);
+	int ret = 0;
+
+	/*
+	 * Determine is the TCM fabric module has already allocated physical
+	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
+	 * to setup beforehand the linked list of physical memory at
+	 * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+	 */
+	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+		ret = transport_allocate_resources(cmd);
+		if (ret < 0)
+			return ret;
+	}
+
+	ret = transport_get_sectors(cmd);
+	if (ret < 0)
+		return ret;
+
+	ret = transport_new_cmd_obj(cmd);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Determine if the calling TCM fabric module is talking to
+	 * Linux/NET via kernel sockets and needs to allocate a
+	 * struct iovec array to complete the struct se_cmd
+	 */
+	se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
+	if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
+		ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
+		if (ret < 0)
+			return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+	}
+
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+		list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+			if (atomic_read(&task->task_sent))
+				continue;
+			if (!dev->transport->map_task_SG)
+				continue;
+
+			ret = dev->transport->map_task_SG(task);
+			if (ret < 0)
+				return ret;
+		}
+	} else {
+		ret = transport_map_control_cmd_to_task(cmd);
+		if (ret < 0)
+			return ret;
+	}
+
+	/*
+	 * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+	 * This WRITE struct se_cmd (and all of its associated struct se_task's)
+	 * will be added to the struct se_device execution queue after its WRITE
+	 * data has arrived. (ie: It gets handled by the transport processing
+	 * thread a second time)
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		transport_add_tasks_to_state_queue(cmd);
+		return transport_generic_write_pending(cmd);
+	}
+	/*
+	 * Everything else but a WRITE, add the struct se_cmd's struct se_task's
+	 * to the execution queue.
+	 */
+	transport_execute_tasks(cmd);
+	return 0;
+}
+
+/*	transport_generic_process_write():
+ *
+ *
+ */
+void transport_generic_process_write(struct se_cmd *cmd)
+{
+#if 0
+	/*
+	 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
+	 * original EDTL
+	 */
+	if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+		if (!T_TASK(cmd)->t_tasks_se_num) {
+			unsigned char *dst, *buf =
+				(unsigned char *)T_TASK(cmd)->t_task_buf;
+
+			dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
+			if (!(dst)) {
+				printk(KERN_ERR "Unable to allocate memory for"
+						" WRITE underflow\n");
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				return;
+			}
+			memcpy(dst, buf, cmd->cmd_spdtl);
+
+			kfree(T_TASK(cmd)->t_task_buf);
+			T_TASK(cmd)->t_task_buf = dst;
+		} else {
+			struct scatterlist *sg =
+				(struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
+			struct scatterlist *orig_sg;
+
+			orig_sg = kzalloc(sizeof(struct scatterlist) *
+					T_TASK(cmd)->t_tasks_se_num,
+					GFP_KERNEL))) {
+			if (!(orig_sg)) {
+				printk(KERN_ERR "Unable to allocate memory"
+						" for WRITE underflow\n");
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				return;
+			}
+
+			memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
+					sizeof(struct scatterlist) *
+					T_TASK(cmd)->t_tasks_se_num);
+
+			cmd->data_length = cmd->cmd_spdtl;
+			/*
+			 * FIXME, clear out original struct se_task and state
+			 * information.
+			 */
+			if (transport_generic_new_cmd(cmd) < 0) {
+				transport_generic_request_failure(cmd, NULL,
+					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
+				kfree(orig_sg);
+				return;
+			}
+
+			transport_memcpy_write_sg(cmd, orig_sg);
+		}
+	}
+#endif
+	transport_execute_tasks(cmd);
+}
+EXPORT_SYMBOL(transport_generic_process_write);
+
+/*	transport_generic_write_pending():
+ *
+ *
+ */
+static int transport_generic_write_pending(struct se_cmd *cmd)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	cmd->t_state = TRANSPORT_WRITE_PENDING;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * For the TCM control CDBs using a contiguous buffer, do the memcpy
+	 * from the passed Linux/SCSI struct scatterlist located at
+	 * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
+	 * T_TASK(se_cmd)->t_task_buf.
+	 */
+	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
+		transport_memcpy_read_contig(cmd,
+				T_TASK(cmd)->t_task_buf,
+				T_TASK(cmd)->t_task_pt_sgl);
+	/*
+	 * Clear the se_cmd for WRITE_PENDING status in order to set
+	 * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+	 * can be called from HW target mode interrupt code.  This is safe
+	 * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+	 * because the se_cmd->se_lun pointer is not being cleared.
+	 */
+	transport_cmd_check_stop(cmd, 1, 0);
+
+	/*
+	 * Call the fabric write_pending function here to let the
+	 * frontend know that WRITE buffers are ready.
+	 */
+	ret = CMD_TFO(cmd)->write_pending(cmd);
+	if (ret < 0)
+		return ret;
+
+	return PYX_TRANSPORT_WRITE_PENDING;
+}
+
+/*	transport_release_cmd_to_pool():
+ *
+ *
+ */
+void transport_release_cmd_to_pool(struct se_cmd *cmd)
+{
+	BUG_ON(!T_TASK(cmd));
+	BUG_ON(!CMD_TFO(cmd));
+
+	transport_free_se_cmd(cmd);
+	CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+}
+EXPORT_SYMBOL(transport_release_cmd_to_pool);
+
+/*	transport_generic_free_cmd():
+ *
+ *	Called from processing frontend to release storage engine resources
+ */
+void transport_generic_free_cmd(
+	struct se_cmd *cmd,
+	int wait_for_tasks,
+	int release_to_pool,
+	int session_reinstatement)
+{
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
+		transport_release_cmd_to_pool(cmd);
+	else {
+		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
+
+		if (SE_LUN(cmd)) {
+#if 0
+			printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
+				" SE_LUN(cmd)\n", cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+			transport_lun_remove_cmd(cmd);
+		}
+
+		if (wait_for_tasks && cmd->transport_wait_for_tasks)
+			cmd->transport_wait_for_tasks(cmd, 0, 0);
+
+		transport_generic_remove(cmd, release_to_pool,
+				session_reinstatement);
+	}
+}
+EXPORT_SYMBOL(transport_generic_free_cmd);
+
+static void transport_nop_wait_for_tasks(
+	struct se_cmd *cmd,
+	int remove_cmd,
+	int session_reinstatement)
+{
+	return;
+}
+
+/*	transport_lun_wait_for_tasks():
+ *
+ *	Called from ConfigFS context to stop the passed struct se_cmd to allow
+ *	an struct se_lun to be successfully shutdown.
+ */
+static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
+{
+	unsigned long flags;
+	int ret;
+	/*
+	 * If the frontend has already requested this struct se_cmd to
+	 * be stopped, we can safely ignore this struct se_cmd.
+	 */
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+		DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
+			" TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		transport_cmd_check_stop(cmd, 1, 0);
+		return -1;
+	}
+	atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+	ret = transport_stop_tasks_for_cmd(cmd);
+
+	DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
+			" %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+	if (!ret) {
+		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+				CMD_TFO(cmd)->get_task_tag(cmd));
+		wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
+		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+				CMD_TFO(cmd)->get_task_tag(cmd));
+	}
+	transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+
+	return 0;
+}
+
+/* #define DEBUG_CLEAR_LUN */
+#ifdef DEBUG_CLEAR_LUN
+#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
+#else
+#define DEBUG_CLEAR_L(x...)
+#endif
+
+static void __transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct se_cmd *cmd = NULL;
+	unsigned long lun_flags, cmd_flags;
+	/*
+	 * Do exception processing and return CHECK_CONDITION status to the
+	 * Initiator Port.
+	 */
+	spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	while (!list_empty_careful(&lun->lun_cmd_list)) {
+		cmd = list_entry(lun->lun_cmd_list.next,
+			struct se_cmd, se_lun_list);
+		list_del(&cmd->se_lun_list);
+
+		if (!(T_TASK(cmd))) {
+			printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
+				"[i,t]_state: %u/%u\n",
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+			BUG();
+		}
+		atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+		/*
+		 * This will notify iscsi_target_transport.c:
+		 * transport_cmd_check_stop() that a LUN shutdown is in
+		 * progress for the iscsi_cmd_t.
+		 */
+		spin_lock(&T_TASK(cmd)->t_state_lock);
+		DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+			"_lun_stop for  ITT: 0x%08x\n",
+			SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
+		spin_unlock(&T_TASK(cmd)->t_state_lock);
+
+		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+
+		if (!(SE_LUN(cmd))) {
+			printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+			BUG();
+		}
+		/*
+		 * If the Storage engine still owns the iscsi_cmd_t, determine
+		 * and/or stop its context.
+		 */
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
+			"_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+			"_wait_for_tasks(): SUCCESS\n",
+			SE_LUN(cmd)->unpacked_lun,
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+			goto check_cond;
+		}
+		atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+		transport_all_task_dev_remove_state(cmd);
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+
+		transport_free_dev_tasks(cmd);
+		/*
+		 * The Storage engine stopped this struct se_cmd before it was
+		 * send to the fabric frontend for delivery back to the
+		 * Initiator Node.  Return this SCSI CDB back with an
+		 * CHECK_CONDITION status.
+		 */
+check_cond:
+		transport_send_check_condition_and_sense(cmd,
+				TCM_NON_EXISTENT_LUN, 0);
+		/*
+		 *  If the fabric frontend is waiting for this iscsi_cmd_t to
+		 * be released, notify the waiting thread now that LU has
+		 * finished accessing it.
+		 */
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
+			DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+				" struct se_cmd: %p ITT: 0x%08x\n",
+				lun->unpacked_lun,
+				cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+
+			spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+					cmd_flags);
+			transport_cmd_check_stop(cmd, 1, 0);
+			complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+			continue;
+		}
+		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+			lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+		spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
+	}
+	spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+}
+
+static int transport_clear_lun_thread(void *p)
+{
+	struct se_lun *lun = (struct se_lun *)p;
+
+	__transport_clear_lun_from_sessions(lun);
+	complete(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+int transport_clear_lun_from_sessions(struct se_lun *lun)
+{
+	struct task_struct *kt;
+
+	kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+			"tcm_cl_%u", lun->unpacked_lun);
+	if (IS_ERR(kt)) {
+		printk(KERN_ERR "Unable to start clear_lun thread\n");
+		return -1;
+	}
+	wait_for_completion(&lun->lun_shutdown_comp);
+
+	return 0;
+}
+
+/*	transport_generic_wait_for_tasks():
+ *
+ *	Called from frontend or passthrough context to wait for storage engine
+ *	to pause and/or release frontend generated struct se_cmd.
+ */
+static void transport_generic_wait_for_tasks(
+	struct se_cmd *cmd,
+	int remove_cmd,
+	int session_reinstatement)
+{
+	unsigned long flags;
+
+	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
+		return;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	/*
+	 * If we are already stopped due to an external event (ie: LUN shutdown)
+	 * sleep until the connection can have the passed struct se_cmd back.
+	 * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+	 * transport_clear_lun_from_sessions() once the ConfigFS context caller
+	 * has completed its operation on the struct se_cmd.
+	 */
+	if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+
+		DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
+			" wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+			"_stop_comp); for ITT: 0x%08x\n",
+			CMD_TFO(cmd)->get_task_tag(cmd));
+		/*
+		 * There is a special case for WRITES where a FE exception +
+		 * LUN shutdown means ConfigFS context is still sleeping on
+		 * transport_lun_stop_comp in transport_lun_wait_for_tasks().
+		 * We go ahead and up transport_lun_stop_comp just to be sure
+		 * here.
+		 */
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		complete(&T_TASK(cmd)->transport_lun_stop_comp);
+		wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+		transport_all_task_dev_remove_state(cmd);
+		/*
+		 * At this point, the frontend who was the originator of this
+		 * struct se_cmd, now owns the structure and can be released through
+		 * normal means below.
+		 */
+		DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
+			" wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+			"stop_comp); for ITT: 0x%08x\n",
+			CMD_TFO(cmd)->get_task_tag(cmd));
+
+		atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+	}
+	if (!atomic_read(&T_TASK(cmd)->t_transport_active))
+		goto remove;
+
+	atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+
+	DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
+		" = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
+		CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+		cmd->deferred_t_state);
+
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+
+	wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+	atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+
+	DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
+		"&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
+		CMD_TFO(cmd)->get_task_tag(cmd));
+remove:
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+	if (!remove_cmd)
+		return;
+
+	transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+}
+
+static int transport_get_sense_codes(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	*asc = cmd->scsi_asc;
+	*ascq = cmd->scsi_ascq;
+
+	return 0;
+}
+
+static int transport_set_sense_codes(
+	struct se_cmd *cmd,
+	u8 asc,
+	u8 ascq)
+{
+	cmd->scsi_asc = asc;
+	cmd->scsi_ascq = ascq;
+
+	return 0;
+}
+
+int transport_send_check_condition_and_sense(
+	struct se_cmd *cmd,
+	u8 reason,
+	int from_transport)
+{
+	unsigned char *buffer = cmd->sense_buffer;
+	unsigned long flags;
+	int offset;
+	u8 asc = 0, ascq = 0;
+
+	spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+		spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+		return 0;
+	}
+	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+	spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+
+	if (!reason && from_transport)
+		goto after_reason;
+
+	if (!from_transport)
+		cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+	/*
+	 * Data Segment and SenseLength of the fabric response PDU.
+	 *
+	 * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
+	 * from include/scsi/scsi_cmnd.h
+	 */
+	offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+				TRANSPORT_SENSE_BUFFER);
+	/*
+	 * Actual SENSE DATA, see SPC-3 7.23.2  SPC_SENSE_KEY_OFFSET uses
+	 * SENSE KEY values from include/scsi/scsi.h
+	 */
+	switch (reason) {
+	case TCM_NON_EXISTENT_LUN:
+	case TCM_UNSUPPORTED_SCSI_OPCODE:
+	case TCM_SECTOR_COUNT_TOO_MANY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID COMMAND OPERATION CODE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
+		break;
+	case TCM_UNKNOWN_MODE_PAGE:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_CHECK_CONDITION_ABORT_CMD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* BUS DEVICE RESET FUNCTION OCCURRED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
+		break;
+	case TCM_INCORRECT_AMOUNT_OF_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* NOT ENOUGH UNSOLICITED DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
+		break;
+	case TCM_INVALID_CDB_FIELD:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* INVALID FIELD IN CDB */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+		break;
+	case TCM_INVALID_PARAMETER_LIST:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* INVALID FIELD IN PARAMETER LIST */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+		break;
+	case TCM_UNEXPECTED_UNSOLICITED_DATA:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* WRITE ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
+		/* UNEXPECTED_UNSOLICITED_DATA */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
+		break;
+	case TCM_SERVICE_CRC_ERROR:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* PROTOCOL SERVICE CRC ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
+		/* N/A */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
+		break;
+	case TCM_SNACK_REJECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ABORTED COMMAND */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+		/* READ ERROR */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
+		/* FAILED RETRANSMISSION REQUEST */
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
+		break;
+	case TCM_WRITE_PROTECTED:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* DATA PROTECT */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+		/* WRITE PROTECTED */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+		break;
+	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* UNIT ATTENTION */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+		core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_CHECK_CONDITION_NOT_READY:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* Not Ready */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+		transport_get_sense_codes(cmd, &asc, &ascq);
+		buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
+		buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
+		break;
+	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+	default:
+		/* CURRENT ERROR */
+		buffer[offset] = 0x70;
+		/* ILLEGAL REQUEST */
+		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/* LOGICAL UNIT COMMUNICATION FAILURE */
+		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
+		break;
+	}
+	/*
+	 * This code uses linux/include/scsi/scsi.h SAM status codes!
+	 */
+	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+	/*
+	 * Automatically padded, this value is encoded in the fabric's
+	 * data_length response PDU containing the SCSI defined sense data.
+	 */
+	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER + offset;
+
+after_reason:
+	CMD_TFO(cmd)->queue_status(cmd);
+	return 0;
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+	int ret = 0;
+
+	if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
+		if (!(send_status) ||
+		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+			return 1;
+#if 0
+		printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+			" status for CDB: 0x%02x ITT: 0x%08x\n",
+			T_TASK(cmd)->t_task_cdb[0],
+			CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+		cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+		CMD_TFO(cmd)->queue_status(cmd);
+		ret = 1;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(transport_check_aborted_status);
+
+void transport_send_task_abort(struct se_cmd *cmd)
+{
+	/*
+	 * If there are still expected incoming fabric WRITEs, we wait
+	 * until until they have completed before sending a TASK_ABORTED
+	 * response.  This response with TASK_ABORTED status will be
+	 * queued back to fabric module by transport_check_aborted_status().
+	 */
+	if (cmd->data_direction == DMA_TO_DEVICE) {
+		if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
+			atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+			smp_mb__after_atomic_inc();
+			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+			transport_new_cmd_failure(cmd);
+			return;
+		}
+	}
+	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+#if 0
+	printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+		" ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
+		CMD_TFO(cmd)->get_task_tag(cmd));
+#endif
+	CMD_TFO(cmd)->queue_status(cmd);
+}
+
+/*	transport_generic_do_tmr():
+ *
+ *
+ */
+int transport_generic_do_tmr(struct se_cmd *cmd)
+{
+	struct se_cmd *ref_cmd;
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_tmr_req *tmr = cmd->se_tmr_req;
+	int ret;
+
+	switch (tmr->function) {
+	case ABORT_TASK:
+		ref_cmd = tmr->ref_cmd;
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case ABORT_TASK_SET:
+	case CLEAR_ACA:
+	case CLEAR_TASK_SET:
+		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+		break;
+	case LUN_RESET:
+		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
+		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
+					 TMR_FUNCTION_REJECTED;
+		break;
+#if 0
+	case TARGET_WARM_RESET:
+		transport_generic_host_reset(dev->se_hba);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	case TARGET_COLD_RESET:
+		transport_generic_host_reset(dev->se_hba);
+		transport_generic_cold_reset(dev->se_hba);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+#endif
+	default:
+		printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+				tmr->function);
+		tmr->response = TMR_FUNCTION_REJECTED;
+		break;
+	}
+
+	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+	CMD_TFO(cmd)->queue_tm_rsp(cmd);
+
+	transport_cmd_check_stop(cmd, 2, 0);
+	return 0;
+}
+
+/*
+ *	Called with spin_lock_irq(&dev->execute_task_lock); held
+ *
+ */
+static struct se_task *
+transport_get_task_from_state_list(struct se_device *dev)
+{
+	struct se_task *task;
+
+	if (list_empty(&dev->state_task_list))
+		return NULL;
+
+	list_for_each_entry(task, &dev->state_task_list, t_state_list)
+		break;
+
+	list_del(&task->t_state_list);
+	atomic_set(&task->task_state_active, 0);
+
+	return task;
+}
+
+static void transport_processing_shutdown(struct se_device *dev)
+{
+	struct se_cmd *cmd;
+	struct se_queue_req *qr;
+	struct se_task *task;
+	u8 state;
+	unsigned long flags;
+	/*
+	 * Empty the struct se_device's struct se_task state list.
+	 */
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	while ((task = transport_get_task_from_state_list(dev))) {
+		if (!(TASK_CMD(task))) {
+			printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+			continue;
+		}
+		cmd = TASK_CMD(task);
+
+		if (!T_TASK(cmd)) {
+			printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
+				" %p ITT: 0x%08x\n", task, cmd,
+				CMD_TFO(cmd)->get_task_tag(cmd));
+			continue;
+		}
+		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+		spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+
+		DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
+			" i_state/def_i_state: %d/%d, t_state/def_t_state:"
+			" %d/%d cdb: 0x%02x\n", cmd, task,
+			CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
+			CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+			cmd->t_state, cmd->deferred_t_state,
+			T_TASK(cmd)->t_task_cdb[0]);
+		DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
+			" t_transport_stop: %d t_transport_sent: %d\n",
+			CMD_TFO(cmd)->get_task_tag(cmd),
+			T_TASK(cmd)->t_task_cdbs,
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
+			atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
+			atomic_read(&T_TASK(cmd)->t_transport_active),
+			atomic_read(&T_TASK(cmd)->t_transport_stop),
+			atomic_read(&T_TASK(cmd)->t_transport_sent));
+
+		if (atomic_read(&task->task_active)) {
+			atomic_set(&task->task_stop, 1);
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+				" %p\n", task, dev);
+			wait_for_completion(&task->task_stop_comp);
+			DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+				task, dev);
+
+			spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+			atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+
+			atomic_set(&task->task_active, 0);
+			atomic_set(&task->task_stop, 0);
+		}
+		__transport_stop_task_timer(task, &flags);
+
+		if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+			spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+			DEBUG_DO("Skipping task: %p, dev: %p for"
+				" t_task_cdbs_ex_left: %d\n", task, dev,
+				atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+
+		if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
+			DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+					" %p\n", task, dev);
+
+			if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+				transport_send_check_condition_and_sense(
+					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
+					0);
+				transport_remove_cmd_from_queue(cmd,
+					SE_DEV(cmd)->dev_queue_obj);
+
+				transport_lun_remove_cmd(cmd);
+				transport_cmd_check_stop(cmd, 1, 0);
+			} else {
+				spin_unlock_irqrestore(
+					&T_TASK(cmd)->t_state_lock, flags);
+
+				transport_remove_cmd_from_queue(cmd,
+					SE_DEV(cmd)->dev_queue_obj);
+
+				transport_lun_remove_cmd(cmd);
+
+				if (transport_cmd_check_stop(cmd, 1, 0))
+					transport_generic_remove(cmd, 0, 0);
+			}
+
+			spin_lock_irqsave(&dev->execute_task_lock, flags);
+			continue;
+		}
+		DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+				task, dev);
+
+		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+			transport_send_check_condition_and_sense(cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+			transport_remove_cmd_from_queue(cmd,
+				SE_DEV(cmd)->dev_queue_obj);
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop(cmd, 1, 0);
+		} else {
+			spin_unlock_irqrestore(
+				&T_TASK(cmd)->t_state_lock, flags);
+
+			transport_remove_cmd_from_queue(cmd,
+				SE_DEV(cmd)->dev_queue_obj);
+			transport_lun_remove_cmd(cmd);
+
+			if (transport_cmd_check_stop(cmd, 1, 0))
+				transport_generic_remove(cmd, 0, 0);
+		}
+
+		spin_lock_irqsave(&dev->execute_task_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+	/*
+	 * Empty the struct se_device's struct se_cmd list.
+	 */
+	spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
+		spin_unlock_irqrestore(
+				&dev->dev_queue_obj->cmd_queue_lock, flags);
+		cmd = (struct se_cmd *)qr->cmd;
+		state = qr->state;
+		kfree(qr);
+
+		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
+				cmd, state);
+
+		if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+			transport_send_check_condition_and_sense(cmd,
+				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+
+			transport_lun_remove_cmd(cmd);
+			transport_cmd_check_stop(cmd, 1, 0);
+		} else {
+			transport_lun_remove_cmd(cmd);
+			if (transport_cmd_check_stop(cmd, 1, 0))
+				transport_generic_remove(cmd, 0, 0);
+		}
+		spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+}
+
+/*	transport_processing_thread():
+ *
+ *
+ */
+static int transport_processing_thread(void *param)
+{
+	int ret, t_state;
+	struct se_cmd *cmd;
+	struct se_device *dev = (struct se_device *) param;
+	struct se_queue_req *qr;
+
+	set_user_nice(current, -20);
+
+	while (!kthread_should_stop()) {
+		ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
+				atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+				kthread_should_stop());
+		if (ret < 0)
+			goto out;
+
+		spin_lock_irq(&dev->dev_status_lock);
+		if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
+			spin_unlock_irq(&dev->dev_status_lock);
+			transport_processing_shutdown(dev);
+			continue;
+		}
+		spin_unlock_irq(&dev->dev_status_lock);
+
+get_cmd:
+		__transport_execute_tasks(dev);
+
+		qr = transport_get_qr_from_queue(dev->dev_queue_obj);
+		if (!(qr))
+			continue;
+
+		cmd = (struct se_cmd *)qr->cmd;
+		t_state = qr->state;
+		kfree(qr);
+
+		switch (t_state) {
+		case TRANSPORT_NEW_CMD_MAP:
+			if (!(CMD_TFO(cmd)->new_cmd_map)) {
+				printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+					" NULL for TRANSPORT_NEW_CMD_MAP\n");
+				BUG();
+			}
+			ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+			if (ret < 0) {
+				cmd->transport_error_status = ret;
+				transport_generic_request_failure(cmd, NULL,
+						0, (cmd->data_direction !=
+						    DMA_TO_DEVICE));
+				break;
+			}
+			/* Fall through */
+		case TRANSPORT_NEW_CMD:
+			ret = transport_generic_new_cmd(cmd);
+			if (ret < 0) {
+				cmd->transport_error_status = ret;
+				transport_generic_request_failure(cmd, NULL,
+					0, (cmd->data_direction !=
+					 DMA_TO_DEVICE));
+			}
+			break;
+		case TRANSPORT_PROCESS_WRITE:
+			transport_generic_process_write(cmd);
+			break;
+		case TRANSPORT_COMPLETE_OK:
+			transport_stop_all_task_timers(cmd);
+			transport_generic_complete_ok(cmd);
+			break;
+		case TRANSPORT_REMOVE:
+			transport_generic_remove(cmd, 1, 0);
+			break;
+		case TRANSPORT_PROCESS_TMR:
+			transport_generic_do_tmr(cmd);
+			break;
+		case TRANSPORT_COMPLETE_FAILURE:
+			transport_generic_request_failure(cmd, NULL, 1, 1);
+			break;
+		case TRANSPORT_COMPLETE_TIMEOUT:
+			transport_stop_all_task_timers(cmd);
+			transport_generic_request_timeout(cmd);
+			break;
+		default:
+			printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+				" %d for ITT: 0x%08x i_state: %d on SE LUN:"
+				" %u\n", t_state, cmd->deferred_t_state,
+				CMD_TFO(cmd)->get_task_tag(cmd),
+				CMD_TFO(cmd)->get_cmd_state(cmd),
+				SE_LUN(cmd)->unpacked_lun);
+			BUG();
+		}
+
+		goto get_cmd;
+	}
+
+out:
+	transport_release_all_cmds(dev);
+	dev->process_thread = NULL;
+	return 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
new file mode 100644
index 0000000..a2ef346
--- /dev/null
+++ b/drivers/target/target_core_ua.c
@@ -0,0 +1,332 @@
+/*******************************************************************************
+ * Filename: target_core_ua.c
+ *
+ * This file contains logic for SPC-3 Unit Attention emulation
+ *
+ * Copyright (c) 2009,2010 Rising Tide Systems
+ * Copyright (c) 2009,2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ******************************************************************************/
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "target_core_alua.h"
+#include "target_core_hba.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+
+int core_scsi3_ua_check(
+	struct se_cmd *cmd,
+	unsigned char *cdb)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+
+	if (!(sess))
+		return 0;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return 0;
+
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count)))
+		return 0;
+	/*
+	 * From sam4r14, section 5.14 Unit attention condition:
+	 *
+	 * a) if an INQUIRY command enters the enabled command state, the
+	 *    device server shall process the INQUIRY command and shall neither
+	 *    report nor clear any unit attention condition;
+	 * b) if a REPORT LUNS command enters the enabled command state, the
+	 *    device server shall process the REPORT LUNS command and shall not
+	 *    report any unit attention condition;
+	 * e) if a REQUEST SENSE command enters the enabled command state while
+	 *    a unit attention condition exists for the SCSI initiator port
+	 *    associated with the I_T nexus on which the REQUEST SENSE command
+	 *    was received, then the device server shall process the command
+	 *    and either:
+	 */
+	switch (cdb[0]) {
+	case INQUIRY:
+	case REPORT_LUNS:
+	case REQUEST_SENSE:
+		return 0;
+	default:
+		return -1;
+	}
+
+	return -1;
+}
+
+int core_scsi3_ua_allocate(
+	struct se_node_acl *nacl,
+	u32 unpacked_lun,
+	u8 asc,
+	u8 ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_ua *ua, *ua_p, *ua_tmp;
+	/*
+	 * PASSTHROUGH OPS
+	 */
+	if (!(nacl))
+		return -1;
+
+	ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+	if (!(ua)) {
+		printk(KERN_ERR "Unable to allocate struct se_ua\n");
+		return -1;
+	}
+	INIT_LIST_HEAD(&ua->ua_dev_list);
+	INIT_LIST_HEAD(&ua->ua_nacl_list);
+
+	ua->ua_nacl = nacl;
+	ua->ua_asc = asc;
+	ua->ua_ascq = ascq;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[unpacked_lun];
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * Do not report the same UNIT ATTENTION twice..
+		 */
+		if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
+			spin_unlock(&deve->ua_lock);
+			spin_unlock_irq(&nacl->device_list_lock);
+			kmem_cache_free(se_ua_cache, ua);
+			return 0;
+		}
+		/*
+		 * Attach the highest priority Unit Attention to
+		 * the head of the list following sam4r14,
+		 * Section 5.14 Unit Attention Condition:
+		 *
+		 * POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
+		 * POWER ON OCCURRED or
+		 * DEVICE INTERNAL RESET
+		 * SCSI BUS RESET OCCURRED or
+		 * MICROCODE HAS BEEN CHANGED or
+		 * protocol specific
+		 * BUS DEVICE RESET FUNCTION OCCURRED
+		 * I_T NEXUS LOSS OCCURRED
+		 * COMMANDS CLEARED BY POWER LOSS NOTIFICATION
+		 * all others                                    Lowest
+		 *
+		 * Each of the ASCQ codes listed above are defined in
+		 * the 29h ASC family, see spc4r17 Table D.1
+		 */
+		if (ua_p->ua_asc == 0x29) {
+			if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
+				list_add(&ua->ua_nacl_list,
+						&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else if (ua_p->ua_asc == 0x2a) {
+			/*
+			 * Incoming Family 29h ASCQ codes will override
+			 * Family 2AHh ASCQ codes for Unit Attention condition.
+			 */
+			if ((asc == 0x29) || (ascq > ua_p->ua_asc))
+				list_add(&ua->ua_nacl_list,
+					&deve->ua_list);
+			else
+				list_add_tail(&ua->ua_nacl_list,
+						&deve->ua_list);
+		} else
+			list_add_tail(&ua->ua_nacl_list,
+				&deve->ua_list);
+		spin_unlock(&deve->ua_lock);
+		spin_unlock_irq(&nacl->device_list_lock);
+
+		atomic_inc(&deve->ua_count);
+		smp_mb__after_atomic_inc();
+		return 0;
+	}
+	list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+		" 0x%02x, ASCQ: 0x%02x\n",
+		TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+		asc, ascq);
+
+	atomic_inc(&deve->ua_count);
+	smp_mb__after_atomic_inc();
+	return 0;
+}
+
+void core_scsi3_ua_release_all(
+	struct se_dev_entry *deve)
+{
+	struct se_ua *ua, *ua_p;
+
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+}
+
+void core_scsi3_ua_for_check_condition(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_device *dev = SE_DEV(cmd);
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!(sess))
+		return;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count))) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
+	 * sense data for the received CDB.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		/*
+		 * For ua_intlck_ctrl code not equal to 00b, only report the
+		 * highest priority UNIT_ATTENTION and ASC/ASCQ without
+		 * clearing it.
+		 */
+		if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			break;
+		}
+		/*
+		 * Otherwise for the default 00b, release the UNIT ATTENTION
+		 * condition.  Return the ASC/ASCQ of the higest priority UA
+		 * (head of the list) in the outgoing CHECK_CONDITION + sense.
+		 */
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+		" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+		" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
+		TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+		(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+		"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
+		cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+}
+
+int core_scsi3_ua_clear_for_request_sense(
+	struct se_cmd *cmd,
+	u8 *asc,
+	u8 *ascq)
+{
+	struct se_dev_entry *deve;
+	struct se_session *sess = cmd->se_sess;
+	struct se_node_acl *nacl;
+	struct se_ua *ua = NULL, *ua_p;
+	int head = 1;
+
+	if (!(sess))
+		return -1;
+
+	nacl = sess->se_node_acl;
+	if (!(nacl))
+		return -1;
+
+	spin_lock_irq(&nacl->device_list_lock);
+	deve = &nacl->device_list[cmd->orig_fe_lun];
+	if (!(atomic_read(&deve->ua_count))) {
+		spin_unlock_irq(&nacl->device_list_lock);
+		return -1;
+	}
+	/*
+	 * The highest priority Unit Attentions are placed at the head of the
+	 * struct se_dev_entry->ua_list.  The First (and hence highest priority)
+	 * ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
+	 * matching struct se_lun.
+	 *
+	 * Once the returning ASC/ASCQ values are set, we go ahead and
+	 * release all of the Unit Attention conditions for the assoicated
+	 * struct se_lun.
+	 */
+	spin_lock(&deve->ua_lock);
+	list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
+		if (head) {
+			*asc = ua->ua_asc;
+			*ascq = ua->ua_ascq;
+			head = 0;
+		}
+		list_del(&ua->ua_nacl_list);
+		kmem_cache_free(se_ua_cache, ua);
+
+		atomic_dec(&deve->ua_count);
+		smp_mb__after_atomic_dec();
+	}
+	spin_unlock(&deve->ua_lock);
+	spin_unlock_irq(&nacl->device_list_lock);
+
+	printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+		" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
+		" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+		cmd->orig_fe_lun, *asc, *ascq);
+
+	return (head) ? -1 : 0;
+}
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
new file mode 100644
index 0000000..6e6b034
--- /dev/null
+++ b/drivers/target/target_core_ua.h
@@ -0,0 +1,36 @@
+#ifndef TARGET_CORE_UA_H
+
+/*
+ * From spc4r17, Table D.1: ASC and ASCQ Assignement
+ */
+#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED	0x00
+#define ASCQ_29H_POWER_ON_OCCURRED				0x01
+#define ASCQ_29H_SCSI_BUS_RESET_OCCURED				0x02
+#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED		0x03
+#define ASCQ_29H_DEVICE_INTERNAL_RESET				0x04
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED	0x05
+#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD		0x06
+#define ASCQ_29H_NEXUS_LOSS_OCCURRED				0x07
+
+#define ASCQ_2AH_PARAMETERS_CHANGED				0x00
+#define ASCQ_2AH_MODE_PARAMETERS_CHANGED			0x01
+#define ASCQ_2AH_LOG_PARAMETERS_CHANGED				0x02
+#define ASCQ_2AH_RESERVATIONS_PREEMPTED				0x03
+#define ASCQ_2AH_RESERVATIONS_RELEASED				0x04
+#define ASCQ_2AH_REGISTRATIONS_PREEMPTED			0x05
+#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED		0x06
+#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_PRIORITY_CHANGED				0x08
+
+#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS		0x09
+
+extern struct kmem_cache *se_ua_cache;
+
+extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern void core_scsi3_ua_release_all(struct se_dev_entry *);
+extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
+						u8 *, u8 *);
+
+#endif /* TARGET_CORE_UA_H */
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 0d236f4..b001019 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -284,12 +284,11 @@
 
 module_param(ixjdebug, int, 0);
 
-static struct pci_device_id ixj_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = {
 	{ PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ }
 };
-
 MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
 
 /************************************************************************
@@ -6581,7 +6580,8 @@
 	case IXJCTL_SET_FILTER:
 		if (copy_from_user(&jf, argp, sizeof(jf))) 
 			retval = -EFAULT;
-		retval = ixj_init_filter(j, &jf);
+		else
+			retval = ixj_init_filter(j, &jf);
 		break;
 	case IXJCTL_SET_FILTER_RAW:
 		if (copy_from_user(&jfr, argp, sizeof(jfr))) 
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bf7c687..f7a5dba 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,6 +4,7 @@
 
 menuconfig THERMAL
 	tristate "Generic Thermal sysfs driver"
+	depends on NET
 	help
 	  Generic Thermal Sysfs driver offers a generic mechanism for
 	  thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 13c72c6..7d0e63c 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -32,6 +32,8 @@
 #include <linux/thermal.h>
 #include <linux/spinlock.h>
 #include <linux/reboot.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
 
 MODULE_AUTHOR("Zhang Rui");
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
@@ -58,6 +60,22 @@
 static LIST_HEAD(thermal_cdev_list);
 static DEFINE_MUTEX(thermal_list_lock);
 
+static unsigned int thermal_event_seqnum;
+
+static struct genl_family thermal_event_genl_family = {
+	.id = GENL_ID_GENERATE,
+	.name = THERMAL_GENL_FAMILY_NAME,
+	.version = THERMAL_GENL_VERSION,
+	.maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+	.name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
+static int genetlink_init(void);
+static void genetlink_exit(void);
+
 static int get_idr(struct idr *idr, struct mutex *lock, int *id)
 {
 	int err;
@@ -823,11 +841,8 @@
  * @devdata:	device private data.
  * @ops:		standard thermal cooling devices callbacks.
  */
-struct thermal_cooling_device *thermal_cooling_device_register(char *type,
-							       void *devdata,
-							       struct
-							       thermal_cooling_device_ops
-							       *ops)
+struct thermal_cooling_device *thermal_cooling_device_register(
+     char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
 {
 	struct thermal_cooling_device *cdev;
 	struct thermal_zone_device *pos;
@@ -1048,13 +1063,9 @@
  * section 11.1.5.1 of the ACPI specification 3.0.
  */
 struct thermal_zone_device *thermal_zone_device_register(char *type,
-							 int trips,
-							 void *devdata, struct
-							 thermal_zone_device_ops
-							 *ops, int tc1, int
-							 tc2,
-							 int passive_delay,
-							 int polling_delay)
+	int trips, void *devdata,
+	const struct thermal_zone_device_ops *ops,
+	int tc1, int tc2, int passive_delay, int polling_delay)
 {
 	struct thermal_zone_device *tz;
 	struct thermal_cooling_device *pos;
@@ -1214,6 +1225,82 @@
 
 EXPORT_SYMBOL(thermal_zone_device_unregister);
 
+int generate_netlink_event(u32 orig, enum events event)
+{
+	struct sk_buff *skb;
+	struct nlattr *attr;
+	struct thermal_genl_event *thermal_event;
+	void *msg_header;
+	int size;
+	int result;
+
+	/* allocate memory */
+	size = nla_total_size(sizeof(struct thermal_genl_event)) + \
+				nla_total_size(0);
+
+	skb = genlmsg_new(size, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	/* add the genetlink message header */
+	msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++,
+				 &thermal_event_genl_family, 0,
+				 THERMAL_GENL_CMD_EVENT);
+	if (!msg_header) {
+		nlmsg_free(skb);
+		return -ENOMEM;
+	}
+
+	/* fill the data */
+	attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \
+			sizeof(struct thermal_genl_event));
+
+	if (!attr) {
+		nlmsg_free(skb);
+		return -EINVAL;
+	}
+
+	thermal_event = nla_data(attr);
+	if (!thermal_event) {
+		nlmsg_free(skb);
+		return -EINVAL;
+	}
+
+	memset(thermal_event, 0, sizeof(struct thermal_genl_event));
+
+	thermal_event->orig = orig;
+	thermal_event->event = event;
+
+	/* send multicast genetlink message */
+	result = genlmsg_end(skb, msg_header);
+	if (result < 0) {
+		nlmsg_free(skb);
+		return result;
+	}
+
+	result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
+	if (result)
+		printk(KERN_INFO "failed to send netlink event:%d", result);
+
+	return result;
+}
+EXPORT_SYMBOL(generate_netlink_event);
+
+static int genetlink_init(void)
+{
+	int result;
+
+	result = genl_register_family(&thermal_event_genl_family);
+	if (result)
+		return result;
+
+	result = genl_register_mc_group(&thermal_event_genl_family,
+					&thermal_event_mcgrp);
+	if (result)
+		genl_unregister_family(&thermal_event_genl_family);
+	return result;
+}
+
 static int __init thermal_init(void)
 {
 	int result = 0;
@@ -1225,9 +1312,15 @@
 		mutex_destroy(&thermal_idr_lock);
 		mutex_destroy(&thermal_list_lock);
 	}
+	result = genetlink_init();
 	return result;
 }
 
+static void genetlink_exit(void)
+{
+	genl_unregister_family(&thermal_event_genl_family);
+}
+
 static void __exit thermal_exit(void)
 {
 	class_unregister(&thermal_class);
@@ -1235,7 +1328,8 @@
 	idr_destroy(&thermal_cdev_idr);
 	mutex_destroy(&thermal_idr_lock);
 	mutex_destroy(&thermal_list_lock);
+	genetlink_exit();
 }
 
-subsys_initcall(thermal_init);
+fs_initcall(thermal_init);
 module_exit(thermal_exit);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c5f8e5b..44b8412 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -19,7 +19,7 @@
  *
  * TO DO:
  *	Mostly done:	ioctls for setting modes/timing
- *	Partly done: 	hooks so you can pull off frames to non tty devs
+ *	Partly done:	hooks so you can pull off frames to non tty devs
  *	Restart DLCI 0 when it closes ?
  *	Test basic encoding
  *	Improve the tx engine
@@ -73,8 +73,10 @@
 #define T2	(2 * HZ)
 #endif
 
-/* Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte
-   limits so this is plenty */
+/*
+ * Semi-arbitary buffer size limits. 0710 is normally run with 32-64 byte
+ * limits so this is plenty
+ */
 #define MAX_MRU 512
 #define MAX_MTU 512
 
@@ -184,6 +186,9 @@
 #define GSM_DATA		5
 #define GSM_FCS			6
 #define GSM_OVERRUN		7
+#define GSM_LEN0		8
+#define GSM_LEN1		9
+#define GSM_SSOF		10
 	unsigned int len;
 	unsigned int address;
 	unsigned int count;
@@ -191,6 +196,7 @@
 	int encoding;
 	u8 control;
 	u8 fcs;
+	u8 received_fcs;
 	u8 *txframe;			/* TX framing buffer */
 
 	/* Methods for the receiver side */
@@ -286,7 +292,7 @@
 #define MDM_DV			0x40
 
 #define GSM0_SOF		0xF9
-#define GSM1_SOF 		0x7E
+#define GSM1_SOF		0x7E
 #define GSM1_ESCAPE		0x7D
 #define GSM1_ESCAPE_BITS	0x20
 #define XON			0x11
@@ -429,61 +435,63 @@
 	if (!(debug & 1))
 		return;
 
-	printk(KERN_INFO "%s %d) %c: ", hdr, addr, "RC"[cr]);
+	pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]);
 
 	switch (control & ~PF) {
 	case SABM:
-		printk(KERN_CONT "SABM");
+		pr_cont("SABM");
 		break;
 	case UA:
-		printk(KERN_CONT "UA");
+		pr_cont("UA");
 		break;
 	case DISC:
-		printk(KERN_CONT "DISC");
+		pr_cont("DISC");
 		break;
 	case DM:
-		printk(KERN_CONT "DM");
+		pr_cont("DM");
 		break;
 	case UI:
-		printk(KERN_CONT "UI");
+		pr_cont("UI");
 		break;
 	case UIH:
-		printk(KERN_CONT "UIH");
+		pr_cont("UIH");
 		break;
 	default:
 		if (!(control & 0x01)) {
-			printk(KERN_CONT "I N(S)%d N(R)%d",
-				(control & 0x0E) >> 1, (control & 0xE)>> 5);
+			pr_cont("I N(S)%d N(R)%d",
+				(control & 0x0E) >> 1, (control & 0xE) >> 5);
 		} else switch (control & 0x0F) {
-		case RR:
-			printk("RR(%d)", (control & 0xE0) >> 5);
-			break;
-		case RNR:
-			printk("RNR(%d)", (control & 0xE0) >> 5);
-			break;
-		case REJ:
-			printk("REJ(%d)", (control & 0xE0) >> 5);
-			break;
-		default:
-			printk(KERN_CONT "[%02X]", control);
+			case RR:
+				pr_cont("RR(%d)", (control & 0xE0) >> 5);
+				break;
+			case RNR:
+				pr_cont("RNR(%d)", (control & 0xE0) >> 5);
+				break;
+			case REJ:
+				pr_cont("REJ(%d)", (control & 0xE0) >> 5);
+				break;
+			default:
+				pr_cont("[%02X]", control);
 		}
 	}
 
 	if (control & PF)
-		printk(KERN_CONT "(P)");
+		pr_cont("(P)");
 	else
-		printk(KERN_CONT "(F)");
+		pr_cont("(F)");
 
 	if (dlen) {
 		int ct = 0;
 		while (dlen--) {
-			if (ct % 8 == 0)
-				printk(KERN_CONT "\n    ");
-			printk(KERN_CONT "%02X ", *data++);
+			if (ct % 8 == 0) {
+				pr_cont("\n");
+				pr_debug("    ");
+			}
+			pr_cont("%02X ", *data++);
 			ct++;
 		}
 	}
-	printk(KERN_CONT "\n");
+	pr_cont("\n");
 }
 
 
@@ -522,11 +530,13 @@
 {
 	int i;
 	for (i = 0; i < len; i++) {
-		if (i && (i % 16) == 0)
-			printk("\n");
-		printk("%02X ", *p++);
+		if (i && (i % 16) == 0) {
+			pr_cont("\n");
+			pr_debug("");
+		}
+		pr_cont("%02X ", *p++);
 	}
-	printk("\n");
+	pr_cont("\n");
 }
 
 /**
@@ -676,7 +686,7 @@
 		}
 
 		if (debug & 4) {
-			printk("gsm_data_kick: \n");
+			pr_debug("gsm_data_kick:\n");
 			hex_packet(gsm->txframe, len);
 		}
 
@@ -1231,7 +1241,7 @@
 }
 
 /**
- *	gsm_control_transmit 	-	send control packet
+ *	gsm_control_transmit	-	send control packet
  *	@gsm: gsm mux
  *	@ctrl: frame to send
  *
@@ -1361,7 +1371,7 @@
 {
 	del_timer(&dlci->t1);
 	if (debug & 8)
-		printk("DLCI %d goes closed.\n", dlci->addr);
+		pr_debug("DLCI %d goes closed.\n", dlci->addr);
 	dlci->state = DLCI_CLOSED;
 	if (dlci->addr != 0) {
 		struct tty_struct  *tty = tty_port_tty_get(&dlci->port);
@@ -1392,7 +1402,7 @@
 	/* This will let a tty open continue */
 	dlci->state = DLCI_OPEN;
 	if (debug & 8)
-		printk("DLCI %d goes open.\n", dlci->addr);
+		pr_debug("DLCI %d goes open.\n", dlci->addr);
 	wake_up(&dlci->gsm->event);
 }
 
@@ -1494,29 +1504,29 @@
 	unsigned int modem = 0;
 
 	if (debug & 16)
-		printk("%d bytes for tty %p\n", len, tty);
+		pr_debug("%d bytes for tty %p\n", len, tty);
 	if (tty) {
 		switch (dlci->adaption)  {
-			/* Unsupported types */
-			/* Packetised interruptible data */
-			case 4:
-				break;
-			/* Packetised uininterruptible voice/data */
-			case 3:
-				break;
-			/* Asynchronous serial with line state in each frame */
-			case 2:
-				while (gsm_read_ea(&modem, *data++) == 0) {
-					len--;
-					if (len == 0)
-						return;
-				}
-				gsm_process_modem(tty, dlci, modem);
-			/* Line state will go via DLCI 0 controls only */
-			case 1:
-			default:
-				tty_insert_flip_string(tty, data, len);
-				tty_flip_buffer_push(tty);
+		/* Unsupported types */
+		/* Packetised interruptible data */
+		case 4:
+			break;
+		/* Packetised uininterruptible voice/data */
+		case 3:
+			break;
+		/* Asynchronous serial with line state in each frame */
+		case 2:
+			while (gsm_read_ea(&modem, *data++) == 0) {
+				len--;
+				if (len == 0)
+					return;
+			}
+			gsm_process_modem(tty, dlci, modem);
+		/* Line state will go via DLCI 0 controls only */
+		case 1:
+		default:
+			tty_insert_flip_string(tty, data, len);
+			tty_flip_buffer_push(tty);
 		}
 		tty_kref_put(tty);
 	}
@@ -1625,7 +1635,6 @@
 	kfree(dlci);
 }
 
-
 /*
  *	LAPBish link layer logic
  */
@@ -1650,10 +1659,12 @@
 
 	if ((gsm->control & ~PF) == UI)
 		gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
+	/* generate final CRC with received FCS */
+	gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
 	if (gsm->fcs != GOOD_FCS) {
 		gsm->bad_fcs++;
 		if (debug & 4)
-			printk("BAD FCS %02x\n", gsm->fcs);
+			pr_debug("BAD FCS %02x\n", gsm->fcs);
 		return;
 	}
 	address = gsm->address >> 1;
@@ -1748,6 +1759,8 @@
 
 static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
 {
+	unsigned int len;
+
 	switch (gsm->state) {
 	case GSM_SEARCH:	/* SOF marker */
 		if (c == GSM0_SOF) {
@@ -1756,8 +1769,8 @@
 			gsm->len = 0;
 			gsm->fcs = INIT_FCS;
 		}
-		break;		/* Address EA */
-	case GSM_ADDRESS:
+		break;
+	case GSM_ADDRESS:	/* Address EA */
 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
 		if (gsm_read_ea(&gsm->address, c))
 			gsm->state = GSM_CONTROL;
@@ -1765,9 +1778,9 @@
 	case GSM_CONTROL:	/* Control Byte */
 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
 		gsm->control = c;
-		gsm->state = GSM_LEN;
+		gsm->state = GSM_LEN0;
 		break;
-	case GSM_LEN:		/* Length EA */
+	case GSM_LEN0:		/* Length EA */
 		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
 		if (gsm_read_ea(&gsm->len, c)) {
 			if (gsm->len > gsm->mru) {
@@ -1776,8 +1789,28 @@
 				break;
 			}
 			gsm->count = 0;
-			gsm->state = GSM_DATA;
+			if (!gsm->len)
+				gsm->state = GSM_FCS;
+			else
+				gsm->state = GSM_DATA;
+			break;
 		}
+		gsm->state = GSM_LEN1;
+		break;
+	case GSM_LEN1:
+		gsm->fcs = gsm_fcs_add(gsm->fcs, c);
+		len = c;
+		gsm->len |= len << 7;
+		if (gsm->len > gsm->mru) {
+			gsm->bad_size++;
+			gsm->state = GSM_SEARCH;
+			break;
+		}
+		gsm->count = 0;
+		if (!gsm->len)
+			gsm->state = GSM_FCS;
+		else
+			gsm->state = GSM_DATA;
 		break;
 	case GSM_DATA:		/* Data */
 		gsm->buf[gsm->count++] = c;
@@ -1785,16 +1818,25 @@
 			gsm->state = GSM_FCS;
 		break;
 	case GSM_FCS:		/* FCS follows the packet */
-		gsm->fcs = c;
+		gsm->received_fcs = c;
+		if (c == GSM0_SOF) {
+			gsm->state = GSM_SEARCH;
+			break;
+		}
 		gsm_queue(gsm);
-		/* And then back for the next frame */
-		gsm->state = GSM_SEARCH;
+		gsm->state = GSM_SSOF;
+		break;
+	case GSM_SSOF:
+		if (c == GSM0_SOF) {
+			gsm->state = GSM_SEARCH;
+			break;
+		}
 		break;
 	}
 }
 
 /**
- *	gsm0_receive	-	perform processing for non-transparency
+ *	gsm1_receive	-	perform processing for non-transparency
  *	@gsm: gsm data for this ldisc instance
  *	@c: character
  *
@@ -1856,7 +1898,7 @@
 		gsm->state = GSM_DATA;
 		break;
 	case GSM_DATA:		/* Data */
-		if (gsm->count > gsm->mru ) {	/* Allow one for the FCS */
+		if (gsm->count > gsm->mru) {	/* Allow one for the FCS */
 			gsm->state = GSM_OVERRUN;
 			gsm->bad_size++;
 		} else
@@ -2034,9 +2076,6 @@
 }
 EXPORT_SYMBOL_GPL(gsm_alloc_mux);
 
-
-
-
 /**
  *	gsmld_output		-	write to link
  *	@gsm: our mux
@@ -2054,7 +2093,7 @@
 		return -ENOSPC;
 	}
 	if (debug & 4) {
-		printk("-->%d bytes out\n", len);
+		pr_debug("-->%d bytes out\n", len);
 		hex_packet(data, len);
 	}
 	gsm->tty->ops->write(gsm->tty, data, len);
@@ -2111,7 +2150,7 @@
 	char flags;
 
 	if (debug & 4) {
-		printk("Inbytes %dd\n", count);
+		pr_debug("Inbytes %dd\n", count);
 		hex_packet(cp, count);
 	}
 
@@ -2128,7 +2167,7 @@
 			gsm->error(gsm, *dp, flags);
 			break;
 		default:
-			printk(KERN_ERR "%s: unknown flag %d\n",
+			WARN_ONCE("%s: unknown flag %d\n",
 			       tty_name(tty, buf), flags);
 			break;
 		}
@@ -2323,7 +2362,7 @@
 	int need_restart = 0;
 
 	/* Stuff we don't support yet - UI or I frame transport, windowing */
-	if ((c->adaption !=1 && c->adaption != 2) || c->k)
+	if ((c->adaption != 1 && c->adaption != 2) || c->k)
 		return -EOPNOTSUPP;
 	/* Check the MRU/MTU range looks sane */
 	if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
@@ -2418,7 +2457,7 @@
 			c.i = 1;
 		else
 			c.i = 2;
-		printk("Ftype %d i %d\n", gsm->ftype, c.i);
+		pr_debug("Ftype %d i %d\n", gsm->ftype, c.i);
 		c.mru = gsm->mru;
 		c.mtu = gsm->mtu;
 		c.k = 0;
@@ -2712,14 +2751,15 @@
 	/* Fill in our line protocol discipline, and register it */
 	int status = tty_register_ldisc(N_GSM0710, &tty_ldisc_packet);
 	if (status != 0) {
-		printk(KERN_ERR "n_gsm: can't register line discipline (err = %d)\n", status);
+		pr_err("n_gsm: can't register line discipline (err = %d)\n",
+								status);
 		return status;
 	}
 
 	gsm_tty_driver = alloc_tty_driver(256);
 	if (!gsm_tty_driver) {
 		tty_unregister_ldisc(N_GSM0710);
-		printk(KERN_ERR "gsm_init: tty allocation failed.\n");
+		pr_err("gsm_init: tty allocation failed.\n");
 		return -EINVAL;
 	}
 	gsm_tty_driver->owner	= THIS_MODULE;
@@ -2730,7 +2770,7 @@
 	gsm_tty_driver->type		= TTY_DRIVER_TYPE_SERIAL;
 	gsm_tty_driver->subtype	= SERIAL_TYPE_NORMAL;
 	gsm_tty_driver->flags	= TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
-							| TTY_DRIVER_HARDWARE_BREAK;
+						| TTY_DRIVER_HARDWARE_BREAK;
 	gsm_tty_driver->init_termios	= tty_std_termios;
 	/* Fixme */
 	gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
@@ -2741,10 +2781,11 @@
 	if (tty_register_driver(gsm_tty_driver)) {
 		put_tty_driver(gsm_tty_driver);
 		tty_unregister_ldisc(N_GSM0710);
-		printk(KERN_ERR "gsm_init: tty registration failed.\n");
+		pr_err("gsm_init: tty registration failed.\n");
 		return -EBUSY;
 	}
-	printk(KERN_INFO "gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start);
+	pr_debug("gsm_init: loaded as %d,%d.\n",
+			gsm_tty_driver->major, gsm_tty_driver->minor_start);
 	return 0;
 }
 
@@ -2752,10 +2793,10 @@
 {
 	int status = tty_unregister_ldisc(N_GSM0710);
 	if (status != 0)
-		printk(KERN_ERR "n_gsm: can't unregister line discipline (err = %d)\n", status);
+		pr_err("n_gsm: can't unregister line discipline (err = %d)\n",
+								status);
 	tty_unregister_driver(gsm_tty_driver);
 	put_tty_driver(gsm_tty_driver);
-	printk(KERN_INFO "gsm_init: unloaded.\n");
 }
 
 module_init(gsm_init);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 35480dd..464d09d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2627,6 +2627,11 @@
 		return put_user(tty->ldisc->ops->num, (int __user *)p);
 	case TIOCSETD:
 		return tiocsetd(tty, p);
+	case TIOCGDEV:
+	{
+		unsigned int ret = new_encode_dev(tty_devnum(real_tty));
+		return put_user(ret, (unsigned int __user *)p);
+	}
 	/*
 	 * Break handling
 	 */
@@ -3241,9 +3246,45 @@
 postcore_initcall(tty_class_init);
 
 /* 3/2004 jmc: why do these devices exist? */
-
 static struct cdev tty_cdev, console_cdev;
 
+static ssize_t show_cons_active(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct console *cs[16];
+	int i = 0;
+	struct console *c;
+	ssize_t count = 0;
+
+	acquire_console_sem();
+	for (c = console_drivers; c; c = c->next) {
+		if (!c->device)
+			continue;
+		if (!c->write)
+			continue;
+		if ((c->flags & CON_ENABLED) == 0)
+			continue;
+		cs[i++] = c;
+		if (i >= ARRAY_SIZE(cs))
+			break;
+	}
+	while (i--)
+		count += sprintf(buf + count, "%s%d%c",
+				 cs[i]->name, cs[i]->index, i ? ' ':'\n');
+	release_console_sem();
+
+	return count;
+}
+static DEVICE_ATTR(active, S_IRUGO, show_cons_active, NULL);
+
+static struct device *consdev;
+
+void console_sysfs_notify(void)
+{
+	if (consdev)
+		sysfs_notify(&consdev->kobj, NULL, "active");
+}
+
 /*
  * Ok, now we can initialize the rest of the tty devices and can count
  * on memory allocations, interrupts etc..
@@ -3254,15 +3295,18 @@
 	if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
 		panic("Couldn't register /dev/tty driver\n");
-	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL,
-			      "tty");
+	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 0), NULL, "tty");
 
 	cdev_init(&console_cdev, &console_fops);
 	if (cdev_add(&console_cdev, MKDEV(TTYAUX_MAJOR, 1), 1) ||
 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 1), 1, "/dev/console") < 0)
 		panic("Couldn't register /dev/console driver\n");
-	device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL,
+	consdev = device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 1), NULL,
 			      "console");
+	if (IS_ERR(consdev))
+		consdev = NULL;
+	else
+		device_create_file(consdev, &dev_attr_active);
 
 #ifdef CONFIG_VT
 	vty_init(&console_fops);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index a8ec48e..76407ec 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -236,6 +236,14 @@
 };
 
 /*
+ * /sys/class/tty/tty0/
+ *
+ * the attribute 'active' contains the name of the current vc
+ * console and it supports poll() to detect vc switches
+ */
+static struct device *tty0dev;
+
+/*
  * Notifier list for console events.
  */
 static ATOMIC_NOTIFIER_HEAD(vt_notifier_list);
@@ -688,6 +696,8 @@
 			save_screen(old_vc);
 			set_origin(old_vc);
 		}
+		if (tty0dev)
+			sysfs_notify(&tty0dev->kobj, NULL, "active");
 	} else {
 		hide_cursor(vc);
 		redraw = 1;
@@ -2967,13 +2977,24 @@
 
 static struct cdev vc0_cdev;
 
+static ssize_t show_tty_active(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "tty%d\n", fg_console + 1);
+}
+static DEVICE_ATTR(active, S_IRUGO, show_tty_active, NULL);
+
 int __init vty_init(const struct file_operations *console_fops)
 {
 	cdev_init(&vc0_cdev, console_fops);
 	if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
 	    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
 		panic("Couldn't register /dev/tty0 driver\n");
-	device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+	tty0dev = device_create(tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
+	if (IS_ERR(tty0dev))
+		tty0dev = NULL;
+	else
+		device_create_file(tty0dev, &dev_attr_active);
 
 	vcs_init();
 
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 5a7c8f1..fceea5e 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -42,17 +42,13 @@
 	default y if ARCH_W90X900
 	default y if ARCH_DAVINCI_DA8XX
 	default y if ARCH_CNS3XXX
+	default y if PLAT_SPEAR
 	# PPC:
 	default y if STB03xxx
 	default y if PPC_MPC52xx
 	# MIPS:
 	default y if MIPS_ALCHEMY
 	default y if MACH_JZ4740
-	# SH:
-	default y if CPU_SUBTYPE_SH7720
-	default y if CPU_SUBTYPE_SH7721
-	default y if CPU_SUBTYPE_SH7763
-	default y if CPU_SUBTYPE_SH7786
 	# more:
 	default PCI
 
@@ -68,6 +64,9 @@
 	default y if ARCH_MXC
 	default y if ARCH_OMAP3
 	default y if ARCH_CNS3XXX
+	default y if ARCH_VT8500
+	default y if PLAT_SPEAR
+	default y if ARCH_MSM
 	default PCI
 
 # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index f383cb4..a845f8b 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1247,7 +1247,7 @@
 	mutex_unlock(&instance->poll_state_serialize);
 
 	if (is_polling)
-		cancel_rearming_delayed_work(&instance->poll_work);
+		cancel_delayed_work_sync(&instance->poll_work);
 
 	usb_kill_urb(instance->snd_urb);
 	usb_kill_urb(instance->rcv_urb);
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 4716e70..0842cfb 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -139,7 +139,8 @@
 
 	struct speedtch_params params; /* set in probe, constant afterwards */
 
-	struct delayed_work status_checker;
+	struct timer_list status_check_timer;
+	struct work_struct status_check_work;
 
 	unsigned char last_status;
 
@@ -498,7 +499,7 @@
 {
 	struct speedtch_instance_data *instance =
 		container_of(work, struct speedtch_instance_data,
-			     status_checker.work);
+			     status_check_work);
 	struct usbatm_data *usbatm = instance->usbatm;
 	struct atm_dev *atm_dev = usbatm->atm_dev;
 	unsigned char *buf = instance->scratch_buffer;
@@ -575,11 +576,11 @@
 {
 	struct speedtch_instance_data *instance = (void *)data;
 
-	schedule_delayed_work(&instance->status_checker, 0);
+	schedule_work(&instance->status_check_work);
 
 	/* The following check is racy, but the race is harmless */
 	if (instance->poll_delay < MAX_POLL_DELAY)
-		mod_timer(&instance->status_checker.timer, jiffies + msecs_to_jiffies(instance->poll_delay));
+		mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(instance->poll_delay));
 	else
 		atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
 }
@@ -595,7 +596,7 @@
 	if (int_urb) {
 		ret = usb_submit_urb(int_urb, GFP_ATOMIC);
 		if (!ret)
-			schedule_delayed_work(&instance->status_checker, 0);
+			schedule_work(&instance->status_check_work);
 		else {
 			atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
 			mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@ -624,7 +625,7 @@
 	}
 
 	if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) {
-		del_timer(&instance->status_checker.timer);
+		del_timer(&instance->status_check_timer);
 		atm_info(usbatm, "DSL line goes up\n");
 	} else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) {
 		atm_info(usbatm, "DSL line goes down\n");
@@ -640,7 +641,7 @@
 
 	if ((int_urb = instance->int_urb)) {
 		ret = usb_submit_urb(int_urb, GFP_ATOMIC);
-		schedule_delayed_work(&instance->status_checker, 0);
+		schedule_work(&instance->status_check_work);
 		if (ret < 0) {
 			atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
 			goto fail;
@@ -686,7 +687,7 @@
 	}
 
 	/* Start status polling */
-	mod_timer(&instance->status_checker.timer, jiffies + msecs_to_jiffies(1000));
+	mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(1000));
 
 	return 0;
 }
@@ -698,7 +699,7 @@
 
 	atm_dbg(usbatm, "%s entered\n", __func__);
 
-	del_timer_sync(&instance->status_checker.timer);
+	del_timer_sync(&instance->status_check_timer);
 
 	/*
 	 * Since resubmit_timer and int_urb can schedule themselves and
@@ -717,7 +718,7 @@
 	del_timer_sync(&instance->resubmit_timer);
 	usb_free_urb(int_urb);
 
-	flush_scheduled_work();
+	flush_work_sync(&instance->status_check_work);
 }
 
 static int speedtch_pre_reset(struct usb_interface *intf)
@@ -869,10 +870,11 @@
 
 	usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
 
-	INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
+	INIT_WORK(&instance->status_check_work, speedtch_check_status);
+	init_timer(&instance->status_check_timer);
 
-	instance->status_checker.timer.function = speedtch_status_poll;
-	instance->status_checker.timer.data = (unsigned long)instance;
+	instance->status_check_timer.function = speedtch_status_poll;
+	instance->status_check_timer.data = (unsigned long)instance;
 	instance->last_status = 0xff;
 	instance->poll_delay = MIN_POLL_DELAY;
 
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index c0e60fb..fca6172 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -27,7 +27,6 @@
 #include <linux/usb.h>
 #include <linux/usb/quirks.h>
 #include <linux/usb/hcd.h>
-#include <linux/pm_runtime.h>
 
 #include "usb.h"
 
@@ -376,7 +375,7 @@
 		 * Just re-enable it without affecting the endpoint toggles.
 		 */
 		usb_enable_interface(udev, intf, false);
-	} else if (!error && intf->dev.power.status == DPM_ON) {
+	} else if (!error && !intf->dev.power.in_suspend) {
 		r = usb_set_interface(udev, intf->altsetting[0].
 				desc.bInterfaceNumber, 0);
 		if (r < 0)
@@ -961,7 +960,7 @@
 	}
 
 	/* Try to rebind the interface */
-	if (intf->dev.power.status == DPM_ON) {
+	if (!intf->dev.power.in_suspend) {
 		intf->needs_binding = 0;
 		rc = device_attach(&intf->dev);
 		if (rc < 0)
@@ -1108,8 +1107,7 @@
 	if (intf->condition == USB_INTERFACE_UNBOUND) {
 
 		/* Carry out a deferred switch to altsetting 0 */
-		if (intf->needs_altsetting0 &&
-				intf->dev.power.status == DPM_ON) {
+		if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
 			usb_set_interface(udev, intf->altsetting[0].
 					desc.bInterfaceNumber, 0);
 			intf->needs_altsetting0 = 0;
@@ -1262,6 +1260,7 @@
 					udev->reset_resume);
 		}
 	}
+	usb_mark_last_busy(udev);
 
  done:
 	dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
@@ -1329,7 +1328,6 @@
 			pm_runtime_disable(dev);
 			pm_runtime_set_active(dev);
 			pm_runtime_enable(dev);
-			udev->last_busy = jiffies;
 			do_unbind_rebind(udev, DO_REBIND);
 		}
 	}
@@ -1397,33 +1395,8 @@
 {
 	int	status;
 
-	udev->last_busy = jiffies;
-	status = pm_runtime_put_sync(&udev->dev);
-	dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
-			__func__, atomic_read(&udev->dev.power.usage_count),
-			status);
-}
-
-/**
- * usb_try_autosuspend_device - attempt an autosuspend of a USB device and its interfaces
- * @udev: the usb_device to autosuspend
- *
- * This routine should be called when a core subsystem thinks @udev may
- * be ready to autosuspend.
- *
- * @udev's usage counter left unchanged.  If it is 0 and all the interfaces
- * are inactive then an autosuspend will be attempted.  The attempt may
- * fail or be delayed.
- *
- * The caller must hold @udev's device lock.
- *
- * This routine can run only in process context.
- */
-void usb_try_autosuspend_device(struct usb_device *udev)
-{
-	int	status;
-
-	status = pm_runtime_idle(&udev->dev);
+	usb_mark_last_busy(udev);
+	status = pm_runtime_put_sync_autosuspend(&udev->dev);
 	dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&udev->dev.power.usage_count),
 			status);
@@ -1482,7 +1455,7 @@
 	struct usb_device	*udev = interface_to_usbdev(intf);
 	int			status;
 
-	udev->last_busy = jiffies;
+	usb_mark_last_busy(udev);
 	atomic_dec(&intf->pm_usage_cnt);
 	status = pm_runtime_put_sync(&intf->dev);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
@@ -1509,32 +1482,11 @@
 void usb_autopm_put_interface_async(struct usb_interface *intf)
 {
 	struct usb_device	*udev = interface_to_usbdev(intf);
-	unsigned long		last_busy;
-	int			status = 0;
+	int			status;
 
-	last_busy = udev->last_busy;
-	udev->last_busy = jiffies;
+	usb_mark_last_busy(udev);
 	atomic_dec(&intf->pm_usage_cnt);
-	pm_runtime_put_noidle(&intf->dev);
-
-	if (udev->dev.power.runtime_auto) {
-		/* Optimization: Don't schedule a delayed autosuspend if
-		 * the timer is already running and the expiration time
-		 * wouldn't change.
-		 *
-		 * We have to use the interface's timer.  Attempts to
-		 * schedule a suspend for the device would fail because
-		 * the interface is still active.
-		 */
-		if (intf->dev.power.timer_expires == 0 ||
-				round_jiffies_up(last_busy) !=
-				round_jiffies_up(jiffies)) {
-			status = pm_schedule_suspend(&intf->dev,
-					jiffies_to_msecs(
-					round_jiffies_up_relative(
-						udev->autosuspend_delay)));
-		}
-	}
+	status = pm_runtime_put(&intf->dev);
 	dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
 			__func__, atomic_read(&intf->dev.power.usage_count),
 			status);
@@ -1554,7 +1506,7 @@
 {
 	struct usb_device	*udev = interface_to_usbdev(intf);
 
-	udev->last_busy = jiffies;
+	usb_mark_last_busy(udev);
 	atomic_dec(&intf->pm_usage_cnt);
 	pm_runtime_put_noidle(&intf->dev);
 }
@@ -1612,18 +1564,9 @@
  */
 int usb_autopm_get_interface_async(struct usb_interface *intf)
 {
-	int		status = 0;
-	enum rpm_status	s;
+	int	status;
 
-	/* Don't request a resume unless the interface is already suspending
-	 * or suspended.  Doing so would force a running suspend timer to be
-	 * cancelled.
-	 */
-	pm_runtime_get_noresume(&intf->dev);
-	s = ACCESS_ONCE(intf->dev.power.runtime_status);
-	if (s == RPM_SUSPENDING || s == RPM_SUSPENDED)
-		status = pm_request_resume(&intf->dev);
-
+	status = pm_runtime_get(&intf->dev);
 	if (status < 0 && status != -EINPROGRESS)
 		pm_runtime_put_noidle(&intf->dev);
 	else
@@ -1650,7 +1593,7 @@
 {
 	struct usb_device	*udev = interface_to_usbdev(intf);
 
-	udev->last_busy = jiffies;
+	usb_mark_last_busy(udev);
 	atomic_inc(&intf->pm_usage_cnt);
 	pm_runtime_get_noresume(&intf->dev);
 }
@@ -1661,7 +1604,6 @@
 {
 	int			w, i;
 	struct usb_interface	*intf;
-	unsigned long		suspend_time, j;
 
 	/* Fail if autosuspend is disabled, or any interfaces are in use, or
 	 * any interface drivers require remote wakeup but it isn't available.
@@ -1701,87 +1643,46 @@
 		return -EOPNOTSUPP;
 	}
 	udev->do_remote_wakeup = w;
-
-	/* If everything is okay but the device hasn't been idle for long
-	 * enough, queue a delayed autosuspend request.
-	 */
-	j = ACCESS_ONCE(jiffies);
-	suspend_time = udev->last_busy + udev->autosuspend_delay;
-	if (time_before(j, suspend_time)) {
-		pm_schedule_suspend(&udev->dev, jiffies_to_msecs(
-				round_jiffies_up_relative(suspend_time - j)));
-		return -EAGAIN;
-	}
 	return 0;
 }
 
 static int usb_runtime_suspend(struct device *dev)
 {
-	int	status = 0;
+	struct usb_device	*udev = to_usb_device(dev);
+	int			status;
 
 	/* A USB device can be suspended if it passes the various autosuspend
 	 * checks.  Runtime suspend for a USB device means suspending all the
 	 * interfaces and then the device itself.
 	 */
-	if (is_usb_device(dev)) {
-		struct usb_device	*udev = to_usb_device(dev);
+	if (autosuspend_check(udev) != 0)
+		return -EAGAIN;
 
-		if (autosuspend_check(udev) != 0)
-			return -EAGAIN;
-
-		status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
-
-		/* If an interface fails the suspend, adjust the last_busy
-		 * time so that we don't get another suspend attempt right
-		 * away.
-		 */
-		if (status) {
-			udev->last_busy = jiffies +
-					(udev->autosuspend_delay == 0 ?
-						HZ/2 : 0);
-		}
-
-		/* Prevent the parent from suspending immediately after */
-		else if (udev->parent)
-			udev->parent->last_busy = jiffies;
-	}
-
-	/* Runtime suspend for a USB interface doesn't mean anything. */
+	status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
 	return status;
 }
 
 static int usb_runtime_resume(struct device *dev)
 {
+	struct usb_device	*udev = to_usb_device(dev);
+	int			status;
+
 	/* Runtime resume for a USB device means resuming both the device
 	 * and all its interfaces.
 	 */
-	if (is_usb_device(dev)) {
-		struct usb_device	*udev = to_usb_device(dev);
-		int			status;
-
-		status = usb_resume_both(udev, PMSG_AUTO_RESUME);
-		udev->last_busy = jiffies;
-		return status;
-	}
-
-	/* Runtime resume for a USB interface doesn't mean anything. */
-	return 0;
+	status = usb_resume_both(udev, PMSG_AUTO_RESUME);
+	return status;
 }
 
 static int usb_runtime_idle(struct device *dev)
 {
+	struct usb_device	*udev = to_usb_device(dev);
+
 	/* An idle USB device can be suspended if it passes the various
-	 * autosuspend checks.  An idle interface can be suspended at
-	 * any time.
+	 * autosuspend checks.
 	 */
-	if (is_usb_device(dev)) {
-		struct usb_device	*udev = to_usb_device(dev);
-
-		if (autosuspend_check(udev) != 0)
-			return 0;
-	}
-
-	pm_runtime_suspend(dev);
+	if (autosuspend_check(udev) == 0)
+		pm_runtime_autosuspend(dev);
 	return 0;
 }
 
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 3799573..b55d460 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/pm_runtime.h>
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ced846a..6a95017 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -38,7 +38,6 @@
 #include <asm/unaligned.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
-#include <linux/pm_runtime.h>
 
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 27115b4..b98efae 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -24,7 +24,6 @@
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/freezer.h>
-#include <linux/pm_runtime.h>
 
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
@@ -1804,8 +1803,15 @@
 
 	/* Tell the runtime-PM framework the device is active */
 	pm_runtime_set_active(&udev->dev);
+	pm_runtime_get_noresume(&udev->dev);
+	pm_runtime_use_autosuspend(&udev->dev);
 	pm_runtime_enable(&udev->dev);
 
+	/* By default, forbid autosuspend for all devices.  It will be
+	 * allowed for hubs during binding.
+	 */
+	usb_disable_autosuspend(udev);
+
 	err = usb_enumerate_device(udev);	/* Read descriptors */
 	if (err < 0)
 		goto fail;
@@ -1831,6 +1837,8 @@
 	}
 
 	(void) usb_create_ep_devs(&udev->dev, &udev->ep0, udev);
+	usb_mark_last_busy(udev);
+	pm_runtime_put_sync_autosuspend(&udev->dev);
 	return err;
 
 fail:
@@ -2221,6 +2229,7 @@
 		usb_set_device_state(udev, USB_STATE_SUSPENDED);
 		msleep(10);
 	}
+	usb_mark_last_busy(hub->hdev);
 	return status;
 }
 
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index b690aa3..1b125c2 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -343,17 +343,19 @@
 {
 	struct list_head *list;
 
-	spin_lock(&dcache_lock);
-
+	spin_lock(&dentry->d_lock);
 	list_for_each(list, &dentry->d_subdirs) {
 		struct dentry *de = list_entry(list, struct dentry, d_u.d_child);
+
+		spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
 		if (usbfs_positive(de)) {
-			spin_unlock(&dcache_lock);
+			spin_unlock(&de->d_lock);
+			spin_unlock(&dentry->d_lock);
 			return 0;
 		}
+		spin_unlock(&de->d_lock);
 	}
-
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
 	return 1;
 }
 
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index d6e3e41..8324874 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1804,6 +1804,7 @@
 		INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
 		intf->minor = -1;
 		device_initialize(&intf->dev);
+		pm_runtime_no_callbacks(&intf->dev);
 		dev_set_name(&intf->dev, "%d-%s:%d.%d",
 			dev->bus->busnum, dev->devpath,
 			configuration, alt->desc.bInterfaceNumber);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 25719da..44c5954 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -117,21 +117,6 @@
 		dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
 				udev->quirks);
 
-#ifdef	CONFIG_USB_SUSPEND
-
-	/* By default, disable autosuspend for all devices.  The hub driver
-	 * will enable it for hubs.
-	 */
-	usb_disable_autosuspend(udev);
-
-	/* Autosuspend can also be disabled if the initial autosuspend_delay
-	 * is negative.
-	 */
-	if (udev->autosuspend_delay < 0)
-		usb_autoresume_device(udev);
-
-#endif
-
 	/* For the present, all devices default to USB-PERSIST enabled */
 #if 0		/* was: #ifdef CONFIG_PM */
 	/* Hubs are automatically enabled for USB-PERSIST */
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 448f5b4..6781c36 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -233,8 +233,6 @@
 
 #ifdef	CONFIG_PM
 
-static const char power_group[] = "power";
-
 static ssize_t
 show_persist(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -278,7 +276,7 @@
 		if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
 			rc = sysfs_add_file_to_group(&dev->kobj,
 					&dev_attr_persist.attr,
-					power_group);
+					power_group_name);
 	}
 	return rc;
 }
@@ -287,7 +285,7 @@
 {
 	sysfs_remove_file_from_group(&dev->kobj,
 			&dev_attr_persist.attr,
-			power_group);
+			power_group_name);
 }
 #else
 
@@ -336,44 +334,20 @@
 static ssize_t
 show_autosuspend(struct device *dev, struct device_attribute *attr, char *buf)
 {
-	struct usb_device *udev = to_usb_device(dev);
-
-	return sprintf(buf, "%d\n", udev->autosuspend_delay / HZ);
+	return sprintf(buf, "%d\n", dev->power.autosuspend_delay / 1000);
 }
 
 static ssize_t
 set_autosuspend(struct device *dev, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
-	struct usb_device *udev = to_usb_device(dev);
-	int value, old_delay;
-	int rc;
+	int value;
 
-	if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ ||
-			value <= - INT_MAX/HZ)
+	if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/1000 ||
+			value <= -INT_MAX/1000)
 		return -EINVAL;
-	value *= HZ;
 
-	usb_lock_device(udev);
-	old_delay = udev->autosuspend_delay;
-	udev->autosuspend_delay = value;
-
-	if (old_delay < 0) {	/* Autosuspend wasn't allowed */
-		if (value >= 0)
-			usb_autosuspend_device(udev);
-	} else {		/* Autosuspend was allowed */
-		if (value < 0) {
-			rc = usb_autoresume_device(udev);
-			if (rc < 0) {
-				count = rc;
-				udev->autosuspend_delay = old_delay;
-			}
-		} else {
-			usb_try_autosuspend_device(udev);
-		}
-	}
-
-	usb_unlock_device(udev);
+	pm_runtime_set_autosuspend_delay(dev, value * 1000);
 	return count;
 }
 
@@ -438,44 +412,30 @@
 
 static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
 
+static struct attribute *power_attrs[] = {
+	&dev_attr_autosuspend.attr,
+	&dev_attr_level.attr,
+	&dev_attr_connected_duration.attr,
+	&dev_attr_active_duration.attr,
+	NULL,
+};
+static struct attribute_group power_attr_group = {
+	.name	= power_group_name,
+	.attrs	= power_attrs,
+};
+
 static int add_power_attributes(struct device *dev)
 {
 	int rc = 0;
 
-	if (is_usb_device(dev)) {
-		rc = sysfs_add_file_to_group(&dev->kobj,
-				&dev_attr_autosuspend.attr,
-				power_group);
-		if (rc == 0)
-			rc = sysfs_add_file_to_group(&dev->kobj,
-					&dev_attr_level.attr,
-					power_group);
-		if (rc == 0)
-			rc = sysfs_add_file_to_group(&dev->kobj,
-					&dev_attr_connected_duration.attr,
-					power_group);
-		if (rc == 0)
-			rc = sysfs_add_file_to_group(&dev->kobj,
-					&dev_attr_active_duration.attr,
-					power_group);
-	}
+	if (is_usb_device(dev))
+		rc = sysfs_merge_group(&dev->kobj, &power_attr_group);
 	return rc;
 }
 
 static void remove_power_attributes(struct device *dev)
 {
-	sysfs_remove_file_from_group(&dev->kobj,
-			&dev_attr_active_duration.attr,
-			power_group);
-	sysfs_remove_file_from_group(&dev->kobj,
-			&dev_attr_connected_duration.attr,
-			power_group);
-	sysfs_remove_file_from_group(&dev->kobj,
-			&dev_attr_level.attr,
-			power_group);
-	sysfs_remove_file_from_group(&dev->kobj,
-			&dev_attr_autosuspend.attr,
-			power_group);
+	sysfs_unmerge_group(&dev->kobj, &power_attr_group);
 }
 
 #else
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index fdd4130..079cb57 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -445,7 +445,8 @@
 	INIT_LIST_HEAD(&dev->filelist);
 
 #ifdef	CONFIG_PM
-	dev->autosuspend_delay = usb_autosuspend_delay * HZ;
+	pm_runtime_set_autosuspend_delay(&dev->dev,
+			usb_autosuspend_delay * 1000);
 	dev->connect_time = jiffies;
 	dev->active_duration = -jiffies;
 #endif
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index cd88220..b975450 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -75,14 +75,12 @@
 #ifdef CONFIG_USB_SUSPEND
 
 extern void usb_autosuspend_device(struct usb_device *udev);
-extern void usb_try_autosuspend_device(struct usb_device *udev);
 extern int usb_autoresume_device(struct usb_device *udev);
 extern int usb_remote_wakeup(struct usb_device *dev);
 
 #else
 
 #define usb_autosuspend_device(udev)		do {} while (0)
-#define usb_try_autosuspend_device(udev)	do {} while (0)
 static inline int usb_autoresume_device(struct usb_device *udev)
 {
 	return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 607d0db..1dc9739 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -338,6 +338,19 @@
 	boolean "S3C2410 udc debug messages"
 	depends on USB_GADGET_S3C2410
 
+config USB_GADGET_PXA_U2O
+	boolean "PXA9xx Processor USB2.0 controller"
+	select USB_GADGET_DUALSPEED
+	help
+	  PXA9xx Processor series include a high speed USB2.0 device
+	  controller, which support high speed and full speed USB peripheral.
+
+config USB_PXA_U2O
+	tristate
+	depends on USB_GADGET_PXA_U2O
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
 #
 # Controllers available in both integrated and discrete versions
 #
@@ -414,8 +427,8 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
-config USB_GADGET_CI13XXX
-	boolean "MIPS USB CI13xxx"
+config USB_GADGET_CI13XXX_PCI
+	boolean "MIPS USB CI13xxx PCI UDC"
 	depends on PCI
 	select USB_GADGET_DUALSPEED
 	help
@@ -426,9 +439,9 @@
 	  dynamically linked module called "ci13xxx_udc" and force all
 	  gadget drivers to also be dynamically linked.
 
-config USB_CI13XXX
+config USB_CI13XXX_PCI
 	tristate
-	depends on USB_GADGET_CI13XXX
+	depends on USB_GADGET_CI13XXX_PCI
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
@@ -495,6 +508,49 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
+config USB_GADGET_EG20T
+	boolean "Intel EG20T(Topcliff) USB Device controller"
+	depends on PCI
+	select USB_GADGET_DUALSPEED
+	help
+	  This is a USB device driver for EG20T PCH.
+	  EG20T PCH is the platform controller hub that is used in Intel's
+	  general embedded platform. EG20T PCH has USB device interface.
+	  Using this interface, it is able to access system devices connected
+	  to USB device.
+	  This driver enables USB device function.
+	  USB device is a USB peripheral controller which
+	  supports both full and high speed USB 2.0 data transfers.
+	  This driver supports both control transfer and bulk transfer modes.
+	  This driver dose not support interrupt transfer or isochronous
+	  transfer modes.
+
+config USB_EG20T
+	tristate
+	depends on USB_GADGET_EG20T
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_GADGET_CI13XXX_MSM
+	boolean "MIPS USB CI13xxx for MSM"
+	depends on ARCH_MSM
+	select USB_GADGET_DUALSPEED
+	select USB_MSM_OTG_72K
+	help
+	  MSM SoC has chipidea USB controller.  This driver uses
+	  ci13xxx_udc core.
+	  This driver depends on OTG driver for PHY initialization,
+	  clock management, powering up VBUS, and power management.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "ci13xxx_msm" and force all
+	  gadget drivers to also be dynamically linked.
+
+config USB_CI13XXX_MSM
+	tristate
+	depends on USB_GADGET_CI13XXX_MSM
+	default USB_GADGET
+	select USB_GADGET_SELECTED
 
 #
 # LAST -- dummy/emulated controller
@@ -685,6 +741,19 @@
          If you say "y" here, the Ethernet gadget driver will use the EEM
          protocol rather than ECM.  If unsure, say "n".
 
+config USB_G_NCM
+	tristate "Network Control Model (NCM) support"
+	depends on NET
+	select CRC32
+	help
+	  This driver implements USB CDC NCM subclass standard. NCM is
+	  an advanced protocol for Ethernet encapsulation, allows grouping
+	  of several ethernet frames into one USB transfer and diffferent
+	  alignment possibilities.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module called "g_ncm".
+
 config USB_GADGETFS
 	tristate "Gadget Filesystem (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 5780db4..55f5e8a 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -21,9 +21,13 @@
 obj-$(CONFIG_USB_M66592)	+= m66592-udc.o
 obj-$(CONFIG_USB_R8A66597)	+= r8a66597-udc.o
 obj-$(CONFIG_USB_FSL_QE)	+= fsl_qe_udc.o
-obj-$(CONFIG_USB_CI13XXX)	+= ci13xxx_udc.o
+obj-$(CONFIG_USB_CI13XXX_PCI)	+= ci13xxx_pci.o
 obj-$(CONFIG_USB_S3C_HSOTG)	+= s3c-hsotg.o
 obj-$(CONFIG_USB_LANGWELL)	+= langwell_udc.o
+obj-$(CONFIG_USB_EG20T)		+= pch_udc.o
+obj-$(CONFIG_USB_PXA_U2O)	+= mv_udc.o
+mv_udc-y			:= mv_udc_core.o mv_udc_phy.o
+obj-$(CONFIG_USB_CI13XXX_MSM)	+= ci13xxx_msm.o
 
 #
 # USB gadget drivers
@@ -43,6 +47,7 @@
 g_dbgp-y			:= dbgp.o
 g_nokia-y			:= nokia.o
 g_webcam-y			:= webcam.o
+g_ncm-y				:= ncm.o
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
 obj-$(CONFIG_USB_AUDIO)		+= g_audio.o
@@ -60,3 +65,4 @@
 obj-$(CONFIG_USB_G_MULTI)	+= g_multi.o
 obj-$(CONFIG_USB_G_NOKIA)	+= g_nokia.o
 obj-$(CONFIG_USB_G_WEBCAM)	+= g_webcam.o
+obj-$(CONFIG_USB_G_NCM)		+= g_ncm.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 9034e03..f8dd726 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -3359,7 +3359,6 @@
 	dev_set_name(&dev->gadget.dev, "gadget");
 	dev->gadget.dev.release = gadget_release;
 	dev->gadget.name = name;
-	dev->gadget.name = name;
 	dev->gadget.is_dualspeed = 1;
 
 	/* init registers, interrupts, ... */
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 717ff65..e7c65a4 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -2057,8 +2057,10 @@
 		usba_ep_cleanup_debugfs(&usba_ep[i]);
 	usba_cleanup_debugfs(udc);
 
-	if (gpio_is_valid(udc->vbus_pin))
+	if (gpio_is_valid(udc->vbus_pin)) {
+		free_irq(gpio_to_irq(udc->vbus_pin), udc);
 		gpio_free(udc->vbus_pin);
+	}
 
 	free_irq(udc->irq, udc);
 	kfree(usba_ep);
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
new file mode 100644
index 0000000..139ac94
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -0,0 +1,134 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/msm_hsusb_hw.h>
+#include <linux/usb/ulpi.h>
+
+#include "ci13xxx_udc.c"
+
+#define MSM_USB_BASE	(udc->regs)
+
+static irqreturn_t msm_udc_irq(int irq, void *data)
+{
+	return udc_irq();
+}
+
+static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event)
+{
+	struct device *dev = udc->gadget.dev.parent;
+	int val;
+
+	switch (event) {
+	case CI13XXX_CONTROLLER_RESET_EVENT:
+		dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n");
+		writel(0, USB_AHBBURST);
+		writel(0, USB_AHBMODE);
+		break;
+	case CI13XXX_CONTROLLER_STOPPED_EVENT:
+		dev_dbg(dev, "CI13XXX_CONTROLLER_STOPPED_EVENT received\n");
+		/*
+		 * Put the transceiver in non-driving mode. Otherwise host
+		 * may not detect soft-disconnection.
+		 */
+		val = otg_io_read(udc->transceiver, ULPI_FUNC_CTRL);
+		val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+		val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+		otg_io_write(udc->transceiver, val, ULPI_FUNC_CTRL);
+		break;
+	default:
+		dev_dbg(dev, "unknown ci13xxx_udc event\n");
+		break;
+	}
+}
+
+static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = {
+	.name			= "ci13xxx_msm",
+	.flags			= CI13XXX_REGS_SHARED |
+				  CI13XXX_REQUIRE_TRANSCEIVER |
+				  CI13XXX_PULLUP_ON_VBUS |
+				  CI13XXX_DISABLE_STREAMING,
+
+	.notify_event		= ci13xxx_msm_notify_event,
+};
+
+static int ci13xxx_msm_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	void __iomem *regs;
+	int irq;
+	int ret;
+
+	dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n");
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get platform resource mem\n");
+		return -ENXIO;
+	}
+
+	regs = ioremap(res->start, resource_size(res));
+	if (!regs) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	ret = udc_probe(&ci13xxx_msm_udc_driver, &pdev->dev, regs);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "udc_probe failed\n");
+		goto iounmap;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "IRQ not found\n");
+		ret = -ENXIO;
+		goto udc_remove;
+	}
+
+	ret = request_irq(irq, msm_udc_irq, IRQF_SHARED, pdev->name, pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "request_irq failed\n");
+		goto udc_remove;
+	}
+
+	pm_runtime_no_callbacks(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+
+udc_remove:
+	udc_remove();
+iounmap:
+	iounmap(regs);
+
+	return ret;
+}
+
+static struct platform_driver ci13xxx_msm_driver = {
+	.probe = ci13xxx_msm_probe,
+	.driver = { .name = "msm_hsusb", },
+};
+
+static int __init ci13xxx_msm_init(void)
+{
+	return platform_driver_register(&ci13xxx_msm_driver);
+}
+module_init(ci13xxx_msm_init);
diff --git a/drivers/usb/gadget/ci13xxx_pci.c b/drivers/usb/gadget/ci13xxx_pci.c
new file mode 100644
index 0000000..883ab5e
--- /dev/null
+++ b/drivers/usb/gadget/ci13xxx_pci.c
@@ -0,0 +1,176 @@
+/*
+ * ci13xxx_pci.c - MIPS USB IP core family device controller
+ *
+ * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ *
+ * Author: David Lopo
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ci13xxx_udc.c"
+
+/* driver name */
+#define UDC_DRIVER_NAME   "ci13xxx_pci"
+
+/******************************************************************************
+ * PCI block
+ *****************************************************************************/
+/**
+ * ci13xxx_pci_irq: interrut handler
+ * @irq:  irq number
+ * @pdev: USB Device Controller interrupt source
+ *
+ * This function returns IRQ_HANDLED if the IRQ has been handled
+ * This is an ISR don't trace, use attribute interface instead
+ */
+static irqreturn_t ci13xxx_pci_irq(int irq, void *pdev)
+{
+	if (irq == 0) {
+		dev_err(&((struct pci_dev *)pdev)->dev, "Invalid IRQ0 usage!");
+		return IRQ_HANDLED;
+	}
+	return udc_irq();
+}
+
+static struct ci13xxx_udc_driver ci13xxx_pci_udc_driver = {
+	.name		= UDC_DRIVER_NAME,
+};
+
+/**
+ * ci13xxx_pci_probe: PCI probe
+ * @pdev: USB device controller being probed
+ * @id:   PCI hotplug ID connecting controller to UDC framework
+ *
+ * This function returns an error code
+ * Allocates basic PCI resources for this USB device controller, and then
+ * invokes the udc_probe() method to start the UDC associated with it
+ */
+static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
+				       const struct pci_device_id *id)
+{
+	void __iomem *regs = NULL;
+	int retval = 0;
+
+	if (id == NULL)
+		return -EINVAL;
+
+	retval = pci_enable_device(pdev);
+	if (retval)
+		goto done;
+
+	if (!pdev->irq) {
+		dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
+		retval = -ENODEV;
+		goto disable_device;
+	}
+
+	retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
+	if (retval)
+		goto disable_device;
+
+	/* BAR 0 holds all the registers */
+	regs = pci_iomap(pdev, 0, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "Error mapping memory!");
+		retval = -EFAULT;
+		goto release_regions;
+	}
+	pci_set_drvdata(pdev, (__force void *)regs);
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	retval = udc_probe(&ci13xxx_pci_udc_driver, &pdev->dev, regs);
+	if (retval)
+		goto iounmap;
+
+	/* our device does not have MSI capability */
+
+	retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
+			     UDC_DRIVER_NAME, pdev);
+	if (retval)
+		goto gadget_remove;
+
+	return 0;
+
+ gadget_remove:
+	udc_remove();
+ iounmap:
+	pci_iounmap(pdev, regs);
+ release_regions:
+	pci_release_regions(pdev);
+ disable_device:
+	pci_disable_device(pdev);
+ done:
+	return retval;
+}
+
+/**
+ * ci13xxx_pci_remove: PCI remove
+ * @pdev: USB Device Controller being removed
+ *
+ * Reverses the effect of ci13xxx_pci_probe(),
+ * first invoking the udc_remove() and then releases
+ * all PCI resources allocated for this USB device controller
+ */
+static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
+{
+	free_irq(pdev->irq, pdev);
+	udc_remove();
+	pci_iounmap(pdev, (__force void __iomem *)pci_get_drvdata(pdev));
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * PCI device table
+ * PCI device structure
+ *
+ * Check "pci.h" for details
+ */
+static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
+	{ PCI_DEVICE(0x153F, 0x1004) },
+	{ PCI_DEVICE(0x153F, 0x1006) },
+	{ 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
+
+static struct pci_driver ci13xxx_pci_driver = {
+	.name         =	UDC_DRIVER_NAME,
+	.id_table     =	ci13xxx_pci_id_table,
+	.probe        =	ci13xxx_pci_probe,
+	.remove       =	__devexit_p(ci13xxx_pci_remove),
+};
+
+/**
+ * ci13xxx_pci_init: module init
+ *
+ * Driver load
+ */
+static int __init ci13xxx_pci_init(void)
+{
+	return pci_register_driver(&ci13xxx_pci_driver);
+}
+module_init(ci13xxx_pci_init);
+
+/**
+ * ci13xxx_pci_exit: module exit
+ *
+ * Driver unload
+ */
+static void __exit ci13xxx_pci_exit(void)
+{
+	pci_unregister_driver(&ci13xxx_pci_driver);
+}
+module_exit(ci13xxx_pci_exit);
+
+MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
+MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("June 2008");
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 98b36fc..31656a2 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -22,7 +22,6 @@
  * - ENDPT:  endpoint operations (Gadget API)
  * - GADGET: gadget operations (Gadget API)
  * - BUS:    bus glue code, bus abstraction layer
- * - PCI:    PCI core interface and PCI resources (interrupts, memory...)
  *
  * Compile Options
  * - CONFIG_USB_GADGET_DEBUG_FILES: enable debug facilities
@@ -60,11 +59,11 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
 
 #include "ci13xxx_udc.h"
 
@@ -75,9 +74,6 @@
 /* ctrl register bank access */
 static DEFINE_SPINLOCK(udc_lock);
 
-/* driver name */
-#define UDC_DRIVER_NAME   "ci13xxx_udc"
-
 /* control endpoint description */
 static const struct usb_endpoint_descriptor
 ctrl_endpt_desc = {
@@ -132,6 +128,9 @@
 	size_t        size;   /* bank size */
 } hw_bank;
 
+/* MSM specific */
+#define ABS_AHBBURST        (0x0090UL)
+#define ABS_AHBMODE         (0x0098UL)
 /* UDC register map */
 #define ABS_CAPLENGTH       (0x100UL)
 #define ABS_HCCPARAMS       (0x108UL)
@@ -248,13 +247,7 @@
 	return (reg & mask) >> ffs_nr(mask);
 }
 
-/**
- * hw_device_reset: resets chip (execute without interruption)
- * @base: register base address
- *
- * This function returns an error code
- */
-static int hw_device_reset(void __iomem *base)
+static int hw_device_init(void __iomem *base)
 {
 	u32 reg;
 
@@ -271,25 +264,6 @@
 	hw_bank.size += CAP_LAST;
 	hw_bank.size /= sizeof(u32);
 
-	/* should flush & stop before reset */
-	hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
-	hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
-
-	hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
-	while (hw_cread(CAP_USBCMD, USBCMD_RST))
-		udelay(10);             /* not RTOS friendly */
-
-	/* USBMODE should be configured step by step */
-	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
-	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
-	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
-
-	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
-		pr_err("cannot enter in device mode");
-		pr_err("lpm = %i", hw_bank.lpm);
-		return -ENODEV;
-	}
-
 	reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
 	if (reg == 0 || reg > ENDPT_MAX)
 		return -ENODEV;
@@ -304,6 +278,43 @@
 
 	return 0;
 }
+/**
+ * hw_device_reset: resets chip (execute without interruption)
+ * @base: register base address
+ *
+ * This function returns an error code
+ */
+static int hw_device_reset(struct ci13xxx *udc)
+{
+	/* should flush & stop before reset */
+	hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
+	hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
+
+	hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
+	while (hw_cread(CAP_USBCMD, USBCMD_RST))
+		udelay(10);             /* not RTOS friendly */
+
+
+	if (udc->udc_driver->notify_event)
+		udc->udc_driver->notify_event(udc,
+			CI13XXX_CONTROLLER_RESET_EVENT);
+
+	if (udc->udc_driver->flags && CI13XXX_DISABLE_STREAMING)
+		hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
+
+	/* USBMODE should be configured step by step */
+	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
+	hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
+	hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);  /* HW >= 2.3 */
+
+	if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
+		pr_err("cannot enter in device mode");
+		pr_err("lpm = %i", hw_bank.lpm);
+		return -ENODEV;
+	}
+
+	return 0;
+}
 
 /**
  * hw_device_state: enables/disables interrupts & starts/stops device (execute
@@ -1449,7 +1460,7 @@
 	mReq->ptr->page[0]  = mReq->req.dma;
 	for (i = 1; i < 5; i++)
 		mReq->ptr->page[i] =
-			(mReq->req.dma + i * PAGE_SIZE) & ~TD_RESERVED_MASK;
+			(mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
 
 	/*
 	 *  QH configuration
@@ -1540,7 +1551,7 @@
 		list_del_init(&mReq->queue);
 		mReq->req.status = -ESHUTDOWN;
 
-		if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+		if (mReq->req.complete != NULL) {
 			spin_unlock(mEp->lock);
 			mReq->req.complete(&mEp->ep, &mReq->req);
 			spin_lock(mEp->lock);
@@ -1557,8 +1568,6 @@
  * Caller must hold lock
  */
 static int _gadget_stop_activity(struct usb_gadget *gadget)
-__releases(udc->lock)
-__acquires(udc->lock)
 {
 	struct usb_ep *ep;
 	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
@@ -1570,8 +1579,6 @@
 	if (gadget == NULL)
 		return -EINVAL;
 
-	spin_unlock(udc->lock);
-
 	/* flush all endpoints */
 	gadget_for_each_ep(ep, gadget) {
 		usb_ep_fifo_flush(ep);
@@ -1591,8 +1598,6 @@
 		mEp->status = NULL;
 	}
 
-	spin_lock(udc->lock);
-
 	return 0;
 }
 
@@ -1621,6 +1626,7 @@
 
 	dbg_event(0xFF, "BUS RST", 0);
 
+	spin_unlock(udc->lock);
 	retval = _gadget_stop_activity(&udc->gadget);
 	if (retval)
 		goto done;
@@ -1629,10 +1635,9 @@
 	if (retval)
 		goto done;
 
-	spin_unlock(udc->lock);
 	retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
 	if (!retval) {
-		mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_KERNEL);
+		mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC);
 		if (mEp->status == NULL) {
 			usb_ep_disable(&mEp->ep);
 			retval = -ENOMEM;
@@ -1789,18 +1794,20 @@
 
 	dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
 
-	if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+	if (!list_empty(&mEp->qh[mEp->dir].queue)) {
+		struct ci13xxx_req* mReqEnq;
+
+		mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next,
+				  struct ci13xxx_req, queue);
+		_hardware_enqueue(mEp, mReqEnq);
+	}
+
+	if (mReq->req.complete != NULL) {
 		spin_unlock(mEp->lock);
 		mReq->req.complete(&mEp->ep, &mReq->req);
 		spin_lock(mEp->lock);
 	}
 
-	if (!list_empty(&mEp->qh[mEp->dir].queue)) {
-		mReq = list_entry(mEp->qh[mEp->dir].queue.next,
-				  struct ci13xxx_req, queue);
-		_hardware_enqueue(mEp, mReq);
-	}
-
  done:
 	return retval;
 }
@@ -2061,7 +2068,6 @@
 {
 	struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
 	struct ci13xxx_req *mReq = NULL;
-	unsigned long flags;
 
 	trace("%p, %i", ep, gfp_flags);
 
@@ -2070,8 +2076,6 @@
 		return NULL;
 	}
 
-	spin_lock_irqsave(mEp->lock, flags);
-
 	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
 	if (mReq != NULL) {
 		INIT_LIST_HEAD(&mReq->queue);
@@ -2086,8 +2090,6 @@
 
 	dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
 
-	spin_unlock_irqrestore(mEp->lock, flags);
-
 	return (mReq == NULL) ? NULL : &mReq->req;
 }
 
@@ -2157,8 +2159,8 @@
 		goto done;
 	}
 
-	if (req->length > (4 * PAGE_SIZE)) {
-		req->length = (4 * PAGE_SIZE);
+	if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
+		req->length = (4 * CI13XXX_PAGE_SIZE);
 		retval = -EMSGSIZE;
 		warn("request length truncated");
 	}
@@ -2170,8 +2172,10 @@
 	mReq->req.actual = 0;
 	list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
 
-	retval = _hardware_enqueue(mEp, mReq);
-	if (retval == -EALREADY || retval == -EBUSY) {
+	if (list_is_singular(&mEp->qh[mEp->dir].queue))
+		retval = _hardware_enqueue(mEp, mReq);
+
+	if (retval == -EALREADY) {
 		dbg_event(_usb_addr(mEp), "QUEUE", retval);
 		retval = 0;
 	}
@@ -2209,7 +2213,7 @@
 	list_del_init(&mReq->queue);
 	req->status = -ECONNRESET;
 
-	if (!mReq->req.no_interrupt && mReq->req.complete != NULL) {
+	if (mReq->req.complete != NULL) {
 		spin_unlock(mEp->lock);
 		mReq->req.complete(&mEp->ep, &mReq->req);
 		spin_lock(mEp->lock);
@@ -2332,12 +2336,47 @@
 /******************************************************************************
  * GADGET block
  *****************************************************************************/
+static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
+	unsigned long flags;
+	int gadget_ready = 0;
+
+	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
+		return -EOPNOTSUPP;
+
+	spin_lock_irqsave(udc->lock, flags);
+	udc->vbus_active = is_active;
+	if (udc->driver)
+		gadget_ready = 1;
+	spin_unlock_irqrestore(udc->lock, flags);
+
+	if (gadget_ready) {
+		if (is_active) {
+			pm_runtime_get_sync(&_gadget->dev);
+			hw_device_reset(udc);
+			hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+		} else {
+			hw_device_state(0);
+			if (udc->udc_driver->notify_event)
+				udc->udc_driver->notify_event(udc,
+				CI13XXX_CONTROLLER_STOPPED_EVENT);
+			_gadget_stop_activity(&udc->gadget);
+			pm_runtime_put_sync(&_gadget->dev);
+		}
+	}
+
+	return 0;
+}
+
 /**
  * Device operations part of the API to the USB controller hardware,
  * which don't involve endpoints (or i/o)
  * Check  "usb_gadget.h" for details
  */
-static const struct usb_gadget_ops usb_gadget_ops;
+static const struct usb_gadget_ops usb_gadget_ops = {
+	.vbus_session	= ci13xxx_vbus_session,
+};
 
 /**
  * usb_gadget_probe_driver: register a gadget driver
@@ -2358,7 +2397,6 @@
 
 	if (driver             == NULL ||
 	    bind               == NULL ||
-	    driver->unbind     == NULL ||
 	    driver->setup      == NULL ||
 	    driver->disconnect == NULL ||
 	    driver->suspend    == NULL ||
@@ -2372,13 +2410,13 @@
 	/* alloc resources */
 	udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
 				       sizeof(struct ci13xxx_qh),
-				       64, PAGE_SIZE);
+				       64, CI13XXX_PAGE_SIZE);
 	if (udc->qh_pool == NULL)
 		return -ENOMEM;
 
 	udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
 				       sizeof(struct ci13xxx_td),
-				       64, PAGE_SIZE);
+				       64, CI13XXX_PAGE_SIZE);
 	if (udc->td_pool == NULL) {
 		dma_pool_destroy(udc->qh_pool);
 		udc->qh_pool = NULL;
@@ -2390,7 +2428,6 @@
 	info("hw_ep_max = %d", hw_ep_max);
 
 	udc->driver = driver;
-	udc->gadget.ops        = NULL;
 	udc->gadget.dev.driver = NULL;
 
 	retval = 0;
@@ -2410,9 +2447,11 @@
 		/* this allocation cannot be random */
 		for (k = RX; k <= TX; k++) {
 			INIT_LIST_HEAD(&mEp->qh[k].queue);
+			spin_unlock_irqrestore(udc->lock, flags);
 			mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
 							GFP_KERNEL,
 							&mEp->qh[k].dma);
+			spin_lock_irqsave(udc->lock, flags);
 			if (mEp->qh[k].ptr == NULL)
 				retval = -ENOMEM;
 			else
@@ -2429,7 +2468,6 @@
 
 	/* bind gadget */
 	driver->driver.bus     = NULL;
-	udc->gadget.ops        = &usb_gadget_ops;
 	udc->gadget.dev.driver = &driver->driver;
 
 	spin_unlock_irqrestore(udc->lock, flags);
@@ -2437,12 +2475,24 @@
 	spin_lock_irqsave(udc->lock, flags);
 
 	if (retval) {
-		udc->gadget.ops        = NULL;
 		udc->gadget.dev.driver = NULL;
 		goto done;
 	}
 
+	pm_runtime_get_sync(&udc->gadget.dev);
+	if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
+		if (udc->vbus_active) {
+			if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
+				hw_device_reset(udc);
+		} else {
+			pm_runtime_put_sync(&udc->gadget.dev);
+			goto done;
+		}
+	}
+
 	retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+	if (retval)
+		pm_runtime_put_sync(&udc->gadget.dev);
 
  done:
 	spin_unlock_irqrestore(udc->lock, flags);
@@ -2475,19 +2525,22 @@
 
 	spin_lock_irqsave(udc->lock, flags);
 
-	hw_device_state(0);
+	if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
+			udc->vbus_active) {
+		hw_device_state(0);
+		if (udc->udc_driver->notify_event)
+			udc->udc_driver->notify_event(udc,
+			CI13XXX_CONTROLLER_STOPPED_EVENT);
+		_gadget_stop_activity(&udc->gadget);
+		pm_runtime_put(&udc->gadget.dev);
+	}
 
 	/* unbind gadget */
-	if (udc->gadget.ops != NULL) {
-		_gadget_stop_activity(&udc->gadget);
+	spin_unlock_irqrestore(udc->lock, flags);
+	driver->unbind(&udc->gadget);               /* MAY SLEEP */
+	spin_lock_irqsave(udc->lock, flags);
 
-		spin_unlock_irqrestore(udc->lock, flags);
-		driver->unbind(&udc->gadget);               /* MAY SLEEP */
-		spin_lock_irqsave(udc->lock, flags);
-
-		udc->gadget.ops        = NULL;
-		udc->gadget.dev.driver = NULL;
-	}
+	udc->gadget.dev.driver = NULL;
 
 	/* free resources */
 	for (i = 0; i < hw_ep_max; i++) {
@@ -2544,6 +2597,14 @@
 	}
 
 	spin_lock(udc->lock);
+
+	if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
+		if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
+				USBMODE_CM_DEVICE) {
+			spin_unlock(udc->lock);
+			return IRQ_NONE;
+		}
+	}
 	intr = hw_test_and_clear_intr_active();
 	if (intr) {
 		isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
@@ -2602,14 +2663,16 @@
  * No interrupts active, the IRQ has not been requested yet
  * Kernel assumes 32-bit DMA operations by default, no need to dma_set_mask
  */
-static int udc_probe(struct device *dev, void __iomem *regs, const char *name)
+static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
+		void __iomem *regs)
 {
 	struct ci13xxx *udc;
 	int retval = 0;
 
 	trace("%p, %p, %p", dev, regs, name);
 
-	if (dev == NULL || regs == NULL || name == NULL)
+	if (dev == NULL || regs == NULL || driver == NULL ||
+			driver->name == NULL)
 		return -EINVAL;
 
 	udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
@@ -2617,42 +2680,77 @@
 		return -ENOMEM;
 
 	udc->lock = &udc_lock;
+	udc->regs = regs;
+	udc->udc_driver = driver;
 
-	retval = hw_device_reset(regs);
-	if (retval)
-		goto done;
-
-	udc->gadget.ops          = NULL;
+	udc->gadget.ops          = &usb_gadget_ops;
 	udc->gadget.speed        = USB_SPEED_UNKNOWN;
 	udc->gadget.is_dualspeed = 1;
 	udc->gadget.is_otg       = 0;
-	udc->gadget.name         = name;
+	udc->gadget.name         = driver->name;
 
 	INIT_LIST_HEAD(&udc->gadget.ep_list);
 	udc->gadget.ep0 = NULL;
 
 	dev_set_name(&udc->gadget.dev, "gadget");
 	udc->gadget.dev.dma_mask = dev->dma_mask;
+	udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
 	udc->gadget.dev.parent   = dev;
 	udc->gadget.dev.release  = udc_release;
 
+	retval = hw_device_init(regs);
+	if (retval < 0)
+		goto free_udc;
+
+	udc->transceiver = otg_get_transceiver();
+
+	if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
+		if (udc->transceiver == NULL) {
+			retval = -ENODEV;
+			goto free_udc;
+		}
+	}
+
+	if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
+		retval = hw_device_reset(udc);
+		if (retval)
+			goto put_transceiver;
+	}
+
 	retval = device_register(&udc->gadget.dev);
-	if (retval)
-		goto done;
+	if (retval) {
+		put_device(&udc->gadget.dev);
+		goto put_transceiver;
+	}
 
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
 	retval = dbg_create_files(&udc->gadget.dev);
 #endif
-	if (retval) {
-		device_unregister(&udc->gadget.dev);
-		goto done;
+	if (retval)
+		goto unreg_device;
+
+	if (udc->transceiver) {
+		retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
+		if (retval)
+			goto remove_dbg;
 	}
+	pm_runtime_no_callbacks(&udc->gadget.dev);
+	pm_runtime_enable(&udc->gadget.dev);
 
 	_udc = udc;
 	return retval;
 
- done:
 	err("error = %i", retval);
+remove_dbg:
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	dbg_remove_files(&udc->gadget.dev);
+#endif
+unreg_device:
+	device_unregister(&udc->gadget.dev);
+put_transceiver:
+	if (udc->transceiver)
+		otg_put_transceiver(udc->transceiver);
+free_udc:
 	kfree(udc);
 	_udc = NULL;
 	return retval;
@@ -2672,6 +2770,10 @@
 		return;
 	}
 
+	if (udc->transceiver) {
+		otg_set_peripheral(udc->transceiver, &udc->gadget);
+		otg_put_transceiver(udc->transceiver);
+	}
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
 	dbg_remove_files(&udc->gadget.dev);
 #endif
@@ -2680,156 +2782,3 @@
 	kfree(udc);
 	_udc = NULL;
 }
-
-/******************************************************************************
- * PCI block
- *****************************************************************************/
-/**
- * ci13xxx_pci_irq: interrut handler
- * @irq:  irq number
- * @pdev: USB Device Controller interrupt source
- *
- * This function returns IRQ_HANDLED if the IRQ has been handled
- * This is an ISR don't trace, use attribute interface instead
- */
-static irqreturn_t ci13xxx_pci_irq(int irq, void *pdev)
-{
-	if (irq == 0) {
-		dev_err(&((struct pci_dev *)pdev)->dev, "Invalid IRQ0 usage!");
-		return IRQ_HANDLED;
-	}
-	return udc_irq();
-}
-
-/**
- * ci13xxx_pci_probe: PCI probe
- * @pdev: USB device controller being probed
- * @id:   PCI hotplug ID connecting controller to UDC framework
- *
- * This function returns an error code
- * Allocates basic PCI resources for this USB device controller, and then
- * invokes the udc_probe() method to start the UDC associated with it
- */
-static int __devinit ci13xxx_pci_probe(struct pci_dev *pdev,
-				       const struct pci_device_id *id)
-{
-	void __iomem *regs = NULL;
-	int retval = 0;
-
-	if (id == NULL)
-		return -EINVAL;
-
-	retval = pci_enable_device(pdev);
-	if (retval)
-		goto done;
-
-	if (!pdev->irq) {
-		dev_err(&pdev->dev, "No IRQ, check BIOS/PCI setup!");
-		retval = -ENODEV;
-		goto disable_device;
-	}
-
-	retval = pci_request_regions(pdev, UDC_DRIVER_NAME);
-	if (retval)
-		goto disable_device;
-
-	/* BAR 0 holds all the registers */
-	regs = pci_iomap(pdev, 0, 0);
-	if (!regs) {
-		dev_err(&pdev->dev, "Error mapping memory!");
-		retval = -EFAULT;
-		goto release_regions;
-	}
-	pci_set_drvdata(pdev, (__force void *)regs);
-
-	pci_set_master(pdev);
-	pci_try_set_mwi(pdev);
-
-	retval = udc_probe(&pdev->dev, regs, UDC_DRIVER_NAME);
-	if (retval)
-		goto iounmap;
-
-	/* our device does not have MSI capability */
-
-	retval = request_irq(pdev->irq, ci13xxx_pci_irq, IRQF_SHARED,
-			     UDC_DRIVER_NAME, pdev);
-	if (retval)
-		goto gadget_remove;
-
-	return 0;
-
- gadget_remove:
-	udc_remove();
- iounmap:
-	pci_iounmap(pdev, regs);
- release_regions:
-	pci_release_regions(pdev);
- disable_device:
-	pci_disable_device(pdev);
- done:
-	return retval;
-}
-
-/**
- * ci13xxx_pci_remove: PCI remove
- * @pdev: USB Device Controller being removed
- *
- * Reverses the effect of ci13xxx_pci_probe(),
- * first invoking the udc_remove() and then releases
- * all PCI resources allocated for this USB device controller
- */
-static void __devexit ci13xxx_pci_remove(struct pci_dev *pdev)
-{
-	free_irq(pdev->irq, pdev);
-	udc_remove();
-	pci_iounmap(pdev, (__force void __iomem *)pci_get_drvdata(pdev));
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
-}
-
-/**
- * PCI device table
- * PCI device structure
- *
- * Check "pci.h" for details
- */
-static DEFINE_PCI_DEVICE_TABLE(ci13xxx_pci_id_table) = {
-	{ PCI_DEVICE(0x153F, 0x1004) },
-	{ PCI_DEVICE(0x153F, 0x1006) },
-	{ 0, 0, 0, 0, 0, 0, 0 /* end: all zeroes */ }
-};
-MODULE_DEVICE_TABLE(pci, ci13xxx_pci_id_table);
-
-static struct pci_driver ci13xxx_pci_driver = {
-	.name         =	UDC_DRIVER_NAME,
-	.id_table     =	ci13xxx_pci_id_table,
-	.probe        =	ci13xxx_pci_probe,
-	.remove       =	__devexit_p(ci13xxx_pci_remove),
-};
-
-/**
- * ci13xxx_pci_init: module init
- *
- * Driver load
- */
-static int __init ci13xxx_pci_init(void)
-{
-	return pci_register_driver(&ci13xxx_pci_driver);
-}
-module_init(ci13xxx_pci_init);
-
-/**
- * ci13xxx_pci_exit: module exit
- *
- * Driver unload
- */
-static void __exit ci13xxx_pci_exit(void)
-{
-	pci_unregister_driver(&ci13xxx_pci_driver);
-}
-module_exit(ci13xxx_pci_exit);
-
-MODULE_AUTHOR("MIPS - David Lopo <dlopo@chipidea.mips.com>");
-MODULE_DESCRIPTION("MIPS CI13XXX USB Peripheral Controller");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("June 2008");
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 4026e9c..f61fed0 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -19,6 +19,7 @@
 /******************************************************************************
  * DEFINE
  *****************************************************************************/
+#define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
 #define ENDPT_MAX          (16)
 #define CTRL_PAYLOAD_MAX   (64)
 #define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
@@ -97,9 +98,24 @@
 	struct dma_pool                       *td_pool;
 };
 
+struct ci13xxx;
+struct ci13xxx_udc_driver {
+	const char	*name;
+	unsigned long	 flags;
+#define CI13XXX_REGS_SHARED		BIT(0)
+#define CI13XXX_REQUIRE_TRANSCEIVER	BIT(1)
+#define CI13XXX_PULLUP_ON_VBUS		BIT(2)
+#define CI13XXX_DISABLE_STREAMING	BIT(3)
+
+#define CI13XXX_CONTROLLER_RESET_EVENT		0
+#define CI13XXX_CONTROLLER_STOPPED_EVENT	1
+	void	(*notify_event) (struct ci13xxx *udc, unsigned event);
+};
+
 /* CI13XXX UDC descriptor & global resources */
 struct ci13xxx {
 	spinlock_t		  *lock;      /* ctrl register bank access */
+	void __iomem              *regs;      /* registers address space */
 
 	struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
 	struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
@@ -108,6 +124,9 @@
 	struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
 
 	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
+	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
+	int                        vbus_active; /* is VBUS active */
+	struct otg_transceiver    *transceiver; /* Transceiver struct */
 };
 
 /******************************************************************************
@@ -157,6 +176,7 @@
 #define    USBMODE_CM_DEVICE  (0x02UL <<  0)
 #define    USBMODE_CM_HOST    (0x03UL <<  0)
 #define USBMODE_SLOM          BIT(3)
+#define USBMODE_SDIS          BIT(4)
 
 /* ENDPTCTRL */
 #define ENDPTCTRL_RXS         BIT(0)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 8572dad..f6ff845 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1126,7 +1126,7 @@
 	if (bcdDevice)
 		cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
 
-	/* stirng overrides */
+	/* string overrides */
 	if (iManufacturer || !cdev->desc.iManufacturer) {
 		if (!iManufacturer && !composite->iManufacturer &&
 		    !*composite_manufacturer)
@@ -1188,6 +1188,8 @@
 		composite->suspend(cdev);
 
 	cdev->suspended = 1;
+
+	usb_gadget_vbus_draw(gadget, 2);
 }
 
 static void
@@ -1195,6 +1197,7 @@
 {
 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
 	struct usb_function		*f;
+	u8				maxpower;
 
 	/* REVISIT:  should we have config level
 	 * suspend/resume callbacks?
@@ -1207,6 +1210,11 @@
 			if (f->resume)
 				f->resume(f);
 		}
+
+		maxpower = cdev->config->bMaxPower;
+
+		usb_gadget_vbus_draw(gadget, maxpower ?
+			(2 * maxpower) : CONFIG_USB_GADGET_VBUS_DRAW);
 	}
 
 	cdev->suspended = 0;
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 1d2a2ab..13b9f47 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1197,6 +1197,139 @@
 #define Ep_Request	(USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
 #define Ep_InRequest	(Ep_Request | USB_DIR_IN)
 
+
+/**
+ * handle_control_request() - handles all control transfers
+ * @dum: pointer to dummy (the_controller)
+ * @urb: the urb request to handle
+ * @setup: pointer to the setup data for a USB device control
+ *	 request
+ * @status: pointer to request handling status
+ *
+ * Return 0 - if the request was handled
+ *	  1 - if the request wasn't handles
+ *	  error code on error
+ */
+static int handle_control_request(struct dummy *dum, struct urb *urb,
+				  struct usb_ctrlrequest *setup,
+				  int *status)
+{
+	struct dummy_ep		*ep2;
+	int			ret_val = 1;
+	unsigned	w_index;
+	unsigned	w_value;
+
+	w_index = le16_to_cpu(setup->wIndex);
+	w_value = le16_to_cpu(setup->wValue);
+	switch (setup->bRequest) {
+	case USB_REQ_SET_ADDRESS:
+		if (setup->bRequestType != Dev_Request)
+			break;
+		dum->address = w_value;
+		*status = 0;
+		dev_dbg(udc_dev(dum), "set_address = %d\n",
+				w_value);
+		ret_val = 0;
+		break;
+	case USB_REQ_SET_FEATURE:
+		if (setup->bRequestType == Dev_Request) {
+			ret_val = 0;
+			switch (w_value) {
+			case USB_DEVICE_REMOTE_WAKEUP:
+				break;
+			case USB_DEVICE_B_HNP_ENABLE:
+				dum->gadget.b_hnp_enable = 1;
+				break;
+			case USB_DEVICE_A_HNP_SUPPORT:
+				dum->gadget.a_hnp_support = 1;
+				break;
+			case USB_DEVICE_A_ALT_HNP_SUPPORT:
+				dum->gadget.a_alt_hnp_support = 1;
+				break;
+			default:
+				ret_val = -EOPNOTSUPP;
+			}
+			if (ret_val == 0) {
+				dum->devstatus |= (1 << w_value);
+				*status = 0;
+			}
+		} else if (setup->bRequestType == Ep_Request) {
+			/* endpoint halt */
+			ep2 = find_endpoint(dum, w_index);
+			if (!ep2 || ep2->ep.name == ep0name) {
+				ret_val = -EOPNOTSUPP;
+				break;
+			}
+			ep2->halted = 1;
+			ret_val = 0;
+			*status = 0;
+		}
+		break;
+	case USB_REQ_CLEAR_FEATURE:
+		if (setup->bRequestType == Dev_Request) {
+			ret_val = 0;
+			switch (w_value) {
+			case USB_DEVICE_REMOTE_WAKEUP:
+				w_value = USB_DEVICE_REMOTE_WAKEUP;
+				break;
+			default:
+				ret_val = -EOPNOTSUPP;
+				break;
+			}
+			if (ret_val == 0) {
+				dum->devstatus &= ~(1 << w_value);
+				*status = 0;
+			}
+		} else if (setup->bRequestType == Ep_Request) {
+			/* endpoint halt */
+			ep2 = find_endpoint(dum, w_index);
+			if (!ep2) {
+				ret_val = -EOPNOTSUPP;
+				break;
+			}
+			if (!ep2->wedged)
+				ep2->halted = 0;
+			ret_val = 0;
+			*status = 0;
+		}
+		break;
+	case USB_REQ_GET_STATUS:
+		if (setup->bRequestType == Dev_InRequest
+				|| setup->bRequestType == Intf_InRequest
+				|| setup->bRequestType == Ep_InRequest) {
+			char *buf;
+			/*
+			 * device: remote wakeup, selfpowered
+			 * interface: nothing
+			 * endpoint: halt
+			 */
+			buf = (char *)urb->transfer_buffer;
+			if (urb->transfer_buffer_length > 0) {
+				if (setup->bRequestType == Ep_InRequest) {
+					ep2 = find_endpoint(dum, w_index);
+					if (!ep2) {
+						ret_val = -EOPNOTSUPP;
+						break;
+					}
+					buf[0] = ep2->halted;
+				} else if (setup->bRequestType ==
+					   Dev_InRequest) {
+					buf[0] = (u8)dum->devstatus;
+				} else
+					buf[0] = 0;
+			}
+			if (urb->transfer_buffer_length > 1)
+				buf[1] = 0;
+			urb->actual_length = min_t(u32, 2,
+				urb->transfer_buffer_length);
+			ret_val = 0;
+			*status = 0;
+		}
+		break;
+	}
+	return ret_val;
+}
+
 /* drive both sides of the transfers; looks like irq handlers to
  * both drivers except the callbacks aren't in_irq().
  */
@@ -1299,14 +1432,8 @@
 		if (ep == &dum->ep [0] && ep->setup_stage) {
 			struct usb_ctrlrequest		setup;
 			int				value = 1;
-			struct dummy_ep			*ep2;
-			unsigned			w_index;
-			unsigned			w_value;
 
 			setup = *(struct usb_ctrlrequest*) urb->setup_packet;
-			w_index = le16_to_cpu(setup.wIndex);
-			w_value = le16_to_cpu(setup.wValue);
-
 			/* paranoia, in case of stale queued data */
 			list_for_each_entry (req, &ep->queue, queue) {
 				list_del_init (&req->queue);
@@ -1328,117 +1455,9 @@
 			ep->last_io = jiffies;
 			ep->setup_stage = 0;
 			ep->halted = 0;
-			switch (setup.bRequest) {
-			case USB_REQ_SET_ADDRESS:
-				if (setup.bRequestType != Dev_Request)
-					break;
-				dum->address = w_value;
-				status = 0;
-				dev_dbg (udc_dev(dum), "set_address = %d\n",
-						w_value);
-				value = 0;
-				break;
-			case USB_REQ_SET_FEATURE:
-				if (setup.bRequestType == Dev_Request) {
-					value = 0;
-					switch (w_value) {
-					case USB_DEVICE_REMOTE_WAKEUP:
-						break;
-					case USB_DEVICE_B_HNP_ENABLE:
-						dum->gadget.b_hnp_enable = 1;
-						break;
-					case USB_DEVICE_A_HNP_SUPPORT:
-						dum->gadget.a_hnp_support = 1;
-						break;
-					case USB_DEVICE_A_ALT_HNP_SUPPORT:
-						dum->gadget.a_alt_hnp_support
-							= 1;
-						break;
-					default:
-						value = -EOPNOTSUPP;
-					}
-					if (value == 0) {
-						dum->devstatus |=
-							(1 << w_value);
-						status = 0;
-					}
 
-				} else if (setup.bRequestType == Ep_Request) {
-					// endpoint halt
-					ep2 = find_endpoint (dum, w_index);
-					if (!ep2 || ep2->ep.name == ep0name) {
-						value = -EOPNOTSUPP;
-						break;
-					}
-					ep2->halted = 1;
-					value = 0;
-					status = 0;
-				}
-				break;
-			case USB_REQ_CLEAR_FEATURE:
-				if (setup.bRequestType == Dev_Request) {
-					switch (w_value) {
-					case USB_DEVICE_REMOTE_WAKEUP:
-						dum->devstatus &= ~(1 <<
-							USB_DEVICE_REMOTE_WAKEUP);
-						value = 0;
-						status = 0;
-						break;
-					default:
-						value = -EOPNOTSUPP;
-						break;
-					}
-				} else if (setup.bRequestType == Ep_Request) {
-					// endpoint halt
-					ep2 = find_endpoint (dum, w_index);
-					if (!ep2) {
-						value = -EOPNOTSUPP;
-						break;
-					}
-					if (!ep2->wedged)
-						ep2->halted = 0;
-					value = 0;
-					status = 0;
-				}
-				break;
-			case USB_REQ_GET_STATUS:
-				if (setup.bRequestType == Dev_InRequest
-						|| setup.bRequestType
-							== Intf_InRequest
-						|| setup.bRequestType
-							== Ep_InRequest
-						) {
-					char *buf;
-
-					// device: remote wakeup, selfpowered
-					// interface: nothing
-					// endpoint: halt
-					buf = (char *)urb->transfer_buffer;
-					if (urb->transfer_buffer_length > 0) {
-						if (setup.bRequestType ==
-								Ep_InRequest) {
-	ep2 = find_endpoint (dum, w_index);
-	if (!ep2) {
-		value = -EOPNOTSUPP;
-		break;
-	}
-	buf [0] = ep2->halted;
-						} else if (setup.bRequestType ==
-								Dev_InRequest) {
-							buf [0] = (u8)
-								dum->devstatus;
-						} else
-							buf [0] = 0;
-					}
-					if (urb->transfer_buffer_length > 1)
-						buf [1] = 0;
-					urb->actual_length = min_t(u32, 2,
-						urb->transfer_buffer_length);
-					value = 0;
-					status = 0;
-				}
-				break;
-			}
+			value = handle_control_request(dum, urb, &setup,
+						       &status);
 
 			/* gadget driver handles all other requests.  block
 			 * until setup() returns; no reentrancy issues etc.
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 484c5ba..1499f9e 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1,10 +1,10 @@
 /*
- * f_fs.c -- user mode filesystem api for usb composite funtcion controllers
+ * f_fs.c -- user mode file system API for USB composite function controllers
  *
  * Copyright (C) 2010 Samsung Electronics
  * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
  *
- * Based on inode.c (GadgetFS):
+ * Based on inode.c (GadgetFS) which was:
  * Copyright (C) 2003-2004 David Brownell
  * Copyright (C) 2003 Agilent Technologies
  *
@@ -38,62 +38,56 @@
 #define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
 
 
-/* Debuging *****************************************************************/
-
-#define ffs_printk(level, fmt, args...) printk(level "f_fs: " fmt "\n", ## args)
-
-#define FERR(...)  ffs_printk(KERN_ERR,  __VA_ARGS__)
-#define FINFO(...) ffs_printk(KERN_INFO, __VA_ARGS__)
-
-#ifdef DEBUG
-#  define FDBG(...) ffs_printk(KERN_DEBUG, __VA_ARGS__)
-#else
-#  define FDBG(...) do { } while (0)
-#endif /* DEBUG */
+/* Debugging ****************************************************************/
 
 #ifdef VERBOSE_DEBUG
-#  define FVDBG FDBG
+#  define pr_vdebug pr_debug
+#  define ffs_dump_mem(prefix, ptr, len) \
+	print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
 #else
-#  define FVDBG(...) do { } while (0)
+#  define pr_vdebug(...)                 do { } while (0)
+#  define ffs_dump_mem(prefix, ptr, len) do { } while (0)
 #endif /* VERBOSE_DEBUG */
 
-#define ENTER()    FVDBG("%s()", __func__)
-
-#ifdef VERBOSE_DEBUG
-#  define ffs_dump_mem(prefix, ptr, len) \
-	print_hex_dump_bytes("f_fs" prefix ": ", DUMP_PREFIX_NONE, ptr, len)
-#else
-#  define ffs_dump_mem(prefix, ptr, len) do { } while (0)
-#endif
+#define ENTER()    pr_vdebug("%s()\n", __func__)
 
 
 /* The data structure and setup file ****************************************/
 
 enum ffs_state {
-	/* Waiting for descriptors and strings. */
-	/* In this state no open(2), read(2) or write(2) on epfiles
+	/*
+	 * Waiting for descriptors and strings.
+	 *
+	 * In this state no open(2), read(2) or write(2) on epfiles
 	 * may succeed (which should not be the problem as there
-	 * should be no such files opened in the firts place). */
+	 * should be no such files opened in the first place).
+	 */
 	FFS_READ_DESCRIPTORS,
 	FFS_READ_STRINGS,
 
-	/* We've got descriptors and strings.  We are or have called
+	/*
+	 * We've got descriptors and strings.  We are or have called
 	 * functionfs_ready_callback().  functionfs_bind() may have
-	 * been called but we don't know. */
-	/* This is the only state in which operations on epfiles may
-	 * succeed. */
+	 * been called but we don't know.
+	 *
+	 * This is the only state in which operations on epfiles may
+	 * succeed.
+	 */
 	FFS_ACTIVE,
 
-	/* All endpoints have been closed.  This state is also set if
+	/*
+	 * All endpoints have been closed.  This state is also set if
 	 * we encounter an unrecoverable error.  The only
 	 * unrecoverable error is situation when after reading strings
-	 * from user space we fail to initialise EP files or
-	 * functionfs_ready_callback() returns with error (<0). */
-	/* In this state no open(2), read(2) or write(2) (both on ep0
+	 * from user space we fail to initialise epfiles or
+	 * functionfs_ready_callback() returns with error (<0).
+	 *
+	 * In this state no open(2), read(2) or write(2) (both on ep0
 	 * as well as epfile) may succeed (at this point epfiles are
 	 * unlinked and all closed so this is not a problem; ep0 is
 	 * also closed but ep0 file exists and so open(2) on ep0 must
-	 * fail). */
+	 * fail).
+	 */
 	FFS_CLOSING
 };
 
@@ -101,14 +95,18 @@
 enum ffs_setup_state {
 	/* There is no setup request pending. */
 	FFS_NO_SETUP,
-	/* User has read events and there was a setup request event
+	/*
+	 * User has read events and there was a setup request event
 	 * there.  The next read/write on ep0 will handle the
-	 * request. */
+	 * request.
+	 */
 	FFS_SETUP_PENDING,
-	/* There was event pending but before user space handled it
+	/*
+	 * There was event pending but before user space handled it
 	 * some other event was introduced which canceled existing
 	 * setup.  If this state is set read/write on ep0 return
-	 * -EIDRM.  This state is only set when adding event. */
+	 * -EIDRM.  This state is only set when adding event.
+	 */
 	FFS_SETUP_CANCELED
 };
 
@@ -120,23 +118,29 @@
 struct ffs_data {
 	struct usb_gadget		*gadget;
 
-	/* Protect access read/write operations, only one read/write
+	/*
+	 * Protect access read/write operations, only one read/write
 	 * at a time.  As a consequence protects ep0req and company.
 	 * While setup request is being processed (queued) this is
-	 * held. */
+	 * held.
+	 */
 	struct mutex			mutex;
 
-	/* Protect access to enpoint related structures (basically
+	/*
+	 * Protect access to endpoint related structures (basically
 	 * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
-	 * endpint zero. */
+	 * endpoint zero.
+	 */
 	spinlock_t			eps_lock;
 
-	/* XXX REVISIT do we need our own request? Since we are not
-	 * handling setup requests immidiatelly user space may be so
+	/*
+	 * XXX REVISIT do we need our own request? Since we are not
+	 * handling setup requests immediately user space may be so
 	 * slow that another setup will be sent to the gadget but this
 	 * time not to us but another function and then there could be
 	 * a race.  Is that the case? Or maybe we can use cdev->req
-	 * after all, maybe we just need some spinlock for that? */
+	 * after all, maybe we just need some spinlock for that?
+	 */
 	struct usb_request		*ep0req;		/* P: mutex */
 	struct completion		ep0req_completion;	/* P: mutex */
 	int				ep0req_status;		/* P: mutex */
@@ -150,7 +154,7 @@
 	enum ffs_state			state;
 
 	/*
-	 * Possible transations:
+	 * Possible transitions:
 	 * + FFS_NO_SETUP       -> FFS_SETUP_PENDING  -- P: ev.waitq.lock
 	 *               happens only in ep0 read which is P: mutex
 	 * + FFS_SETUP_PENDING  -> FFS_NO_SETUP       -- P: ev.waitq.lock
@@ -183,18 +187,21 @@
 	/* Active function */
 	struct ffs_function		*func;
 
-	/* Device name, write once when file system is mounted.
-	 * Intendet for user to read if she wants. */
+	/*
+	 * Device name, write once when file system is mounted.
+	 * Intended for user to read if she wants.
+	 */
 	const char			*dev_name;
-	/* Private data for our user (ie. gadget).  Managed by
-	 * user. */
+	/* Private data for our user (ie. gadget).  Managed by user. */
 	void				*private_data;
 
 	/* filled by __ffs_data_got_descs() */
-	/* real descriptors are 16 bytes after raw_descs (so you need
+	/*
+	 * Real descriptors are 16 bytes after raw_descs (so you need
 	 * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
 	 * first full speed descriptor).  raw_descs_length and
-	 * raw_fs_descs_length do not have those 16 bytes added. */
+	 * raw_fs_descs_length do not have those 16 bytes added.
+	 */
 	const void			*raw_descs;
 	unsigned			raw_descs_length;
 	unsigned			raw_fs_descs_length;
@@ -211,18 +218,23 @@
 	const void			*raw_strings;
 	struct usb_gadget_strings	**stringtabs;
 
-	/* File system's super block, write once when file system is mounted. */
+	/*
+	 * File system's super block, write once when file system is
+	 * mounted.
+	 */
 	struct super_block		*sb;
 
-	/* File permissions, written once when fs is mounted*/
+	/* File permissions, written once when fs is mounted */
 	struct ffs_file_perms {
 		umode_t				mode;
 		uid_t				uid;
 		gid_t				gid;
 	}				file_perms;
 
-	/* The endpoint files, filled by ffs_epfiles_create(),
-	 * destroyed by ffs_epfiles_destroy(). */
+	/*
+	 * The endpoint files, filled by ffs_epfiles_create(),
+	 * destroyed by ffs_epfiles_destroy().
+	 */
 	struct ffs_epfile		*epfiles;
 };
 
@@ -236,7 +248,7 @@
 static void ffs_data_opened(struct ffs_data *ffs);
 static void ffs_data_closed(struct ffs_data *ffs);
 
-/* Called with ffs->mutex held; take over ownerrship of data. */
+/* Called with ffs->mutex held; take over ownership of data. */
 static int __must_check
 __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
 static int __must_check
@@ -267,11 +279,9 @@
 
 static void ffs_func_free(struct ffs_function *func);
 
-
 static void ffs_func_eps_disable(struct ffs_function *func);
 static int __must_check ffs_func_eps_enable(struct ffs_function *func);
 
-
 static int ffs_func_bind(struct usb_configuration *,
 			 struct usb_function *);
 static void ffs_func_unbind(struct usb_configuration *,
@@ -288,7 +298,6 @@
 static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
 
 
-
 /* The endpoints structures *************************************************/
 
 struct ffs_ep {
@@ -321,7 +330,6 @@
 	unsigned char			_pad;
 };
 
-
 static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
 
@@ -348,7 +356,6 @@
 	complete_all(&ffs->ep0req_completion);
 }
 
-
 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
 {
 	struct usb_request *req = ffs->ep0req;
@@ -380,17 +387,16 @@
 static int __ffs_ep0_stall(struct ffs_data *ffs)
 {
 	if (ffs->ev.can_stall) {
-		FVDBG("ep0 stall\n");
+		pr_vdebug("ep0 stall\n");
 		usb_ep_set_halt(ffs->gadget->ep0);
 		ffs->setup_state = FFS_NO_SETUP;
 		return -EL2HLT;
 	} else {
-		FDBG("bogus ep0 stall!\n");
+		pr_debug("bogus ep0 stall!\n");
 		return -ESRCH;
 	}
 }
 
-
 static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
 			     size_t len, loff_t *ptr)
 {
@@ -409,7 +415,6 @@
 	if (unlikely(ret < 0))
 		return ret;
 
-
 	/* Check state */
 	switch (ffs->state) {
 	case FFS_READ_DESCRIPTORS:
@@ -421,14 +426,14 @@
 		}
 
 		data = ffs_prepare_buffer(buf, len);
-		if (unlikely(IS_ERR(data))) {
+		if (IS_ERR(data)) {
 			ret = PTR_ERR(data);
 			break;
 		}
 
 		/* Handle data */
 		if (ffs->state == FFS_READ_DESCRIPTORS) {
-			FINFO("read descriptors");
+			pr_info("read descriptors\n");
 			ret = __ffs_data_got_descs(ffs, data, len);
 			if (unlikely(ret < 0))
 				break;
@@ -436,7 +441,7 @@
 			ffs->state = FFS_READ_STRINGS;
 			ret = len;
 		} else {
-			FINFO("read strings");
+			pr_info("read strings\n");
 			ret = __ffs_data_got_strings(ffs, data, len);
 			if (unlikely(ret < 0))
 				break;
@@ -461,11 +466,12 @@
 		}
 		break;
 
-
 	case FFS_ACTIVE:
 		data = NULL;
-		/* We're called from user space, we can use _irq
-		 * rather then _irqsave */
+		/*
+		 * We're called from user space, we can use _irq
+		 * rather then _irqsave
+		 */
 		spin_lock_irq(&ffs->ev.waitq.lock);
 		switch (FFS_SETUP_STATE(ffs)) {
 		case FFS_SETUP_CANCELED:
@@ -493,23 +499,25 @@
 		spin_unlock_irq(&ffs->ev.waitq.lock);
 
 		data = ffs_prepare_buffer(buf, len);
-		if (unlikely(IS_ERR(data))) {
+		if (IS_ERR(data)) {
 			ret = PTR_ERR(data);
 			break;
 		}
 
 		spin_lock_irq(&ffs->ev.waitq.lock);
 
-		/* We are guaranteed to be still in FFS_ACTIVE state
+		/*
+		 * We are guaranteed to be still in FFS_ACTIVE state
 		 * but the state of setup could have changed from
 		 * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
 		 * to check for that.  If that happened we copied data
-		 * from user space in vain but it's unlikely. */
-		/* For sure we are not in FFS_NO_SETUP since this is
+		 * from user space in vain but it's unlikely.
+		 *
+		 * For sure we are not in FFS_NO_SETUP since this is
 		 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
 		 * transition can be performed and it's protected by
-		 * mutex. */
-
+		 * mutex.
+		 */
 		if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
 			ret = -EIDRM;
 done_spin:
@@ -521,25 +529,22 @@
 		kfree(data);
 		break;
 
-
 	default:
 		ret = -EBADFD;
 		break;
 	}
 
-
 	mutex_unlock(&ffs->mutex);
 	return ret;
 }
 
-
-
 static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
 				     size_t n)
 {
-	/* We are holding ffs->ev.waitq.lock and ffs->mutex and we need
-	 * to release them. */
-
+	/*
+	 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
+	 * to release them.
+	 */
 	struct usb_functionfs_event events[n];
 	unsigned i = 0;
 
@@ -568,7 +573,6 @@
 		? -EFAULT : sizeof events;
 }
 
-
 static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
 			    size_t len, loff_t *ptr)
 {
@@ -588,16 +592,16 @@
 	if (unlikely(ret < 0))
 		return ret;
 
-
 	/* Check state */
 	if (ffs->state != FFS_ACTIVE) {
 		ret = -EBADFD;
 		goto done_mutex;
 	}
 
-
-	/* We're called from user space, we can use _irq rather then
-	 * _irqsave */
+	/*
+	 * We're called from user space, we can use _irq rather then
+	 * _irqsave
+	 */
 	spin_lock_irq(&ffs->ev.waitq.lock);
 
 	switch (FFS_SETUP_STATE(ffs)) {
@@ -617,7 +621,8 @@
 			break;
 		}
 
-		if (unlikely(wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, ffs->ev.count))) {
+		if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
+							ffs->ev.count)) {
 			ret = -EINTR;
 			break;
 		}
@@ -625,7 +630,6 @@
 		return __ffs_ep0_read_events(ffs, buf,
 					     min(n, (size_t)ffs->ev.count));
 
-
 	case FFS_SETUP_PENDING:
 		if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
 			spin_unlock_irq(&ffs->ev.waitq.lock);
@@ -671,8 +675,6 @@
 	return ret;
 }
 
-
-
 static int ffs_ep0_open(struct inode *inode, struct file *file)
 {
 	struct ffs_data *ffs = inode->i_private;
@@ -688,7 +690,6 @@
 	return 0;
 }
 
-
 static int ffs_ep0_release(struct inode *inode, struct file *file)
 {
 	struct ffs_data *ffs = file->private_data;
@@ -700,7 +701,6 @@
 	return 0;
 }
 
-
 static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
 {
 	struct ffs_data *ffs = file->private_data;
@@ -721,7 +721,6 @@
 	return ret;
 }
 
-
 static const struct file_operations ffs_ep0_operations = {
 	.owner =	THIS_MODULE,
 	.llseek =	no_llseek,
@@ -736,7 +735,6 @@
 
 /* "Normal" endpoints operations ********************************************/
 
-
 static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
 {
 	ENTER();
@@ -747,7 +745,6 @@
 	}
 }
 
-
 static ssize_t ffs_epfile_io(struct file *file,
 			     char __user *buf, size_t len, int read)
 {
@@ -777,8 +774,8 @@
 				goto error;
 			}
 
-			if (unlikely(wait_event_interruptible
-				     (epfile->wait, (ep = epfile->ep)))) {
+			if (wait_event_interruptible(epfile->wait,
+						     (ep = epfile->ep))) {
 				ret = -EINTR;
 				goto error;
 			}
@@ -810,12 +807,16 @@
 		if (unlikely(ret))
 			goto error;
 
-		/* We're called from user space, we can use _irq rather then
-		 * _irqsave */
+		/*
+		 * We're called from user space, we can use _irq rather then
+		 * _irqsave
+		 */
 		spin_lock_irq(&epfile->ffs->eps_lock);
 
-		/* While we were acquiring mutex endpoint got disabled
-		 * or changed? */
+		/*
+		 * While we were acquiring mutex endpoint got disabled
+		 * or changed?
+		 */
 	} while (unlikely(epfile->ep != ep));
 
 	/* Halt */
@@ -857,7 +858,6 @@
 	return ret;
 }
 
-
 static ssize_t
 ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
 		 loff_t *ptr)
@@ -903,7 +903,6 @@
 	return 0;
 }
 
-
 static long ffs_epfile_ioctl(struct file *file, unsigned code,
 			     unsigned long value)
 {
@@ -942,7 +941,6 @@
 	return ret;
 }
 
-
 static const struct file_operations ffs_epfile_operations = {
 	.owner =	THIS_MODULE,
 	.llseek =	no_llseek,
@@ -955,15 +953,13 @@
 };
 
 
-
 /* File system and super block operations ***********************************/
 
 /*
- * Mounting the filesystem creates a controller file, used first for
+ * Mounting the file system creates a controller file, used first for
  * function configuration then later for event monitoring.
  */
 
-
 static struct inode *__must_check
 ffs_sb_make_inode(struct super_block *sb, void *data,
 		  const struct file_operations *fops,
@@ -996,9 +992,7 @@
 	return inode;
 }
 
-
 /* Create "regular" file */
-
 static struct inode *ffs_sb_create_file(struct super_block *sb,
 					const char *name, void *data,
 					const struct file_operations *fops,
@@ -1027,9 +1021,7 @@
 	return inode;
 }
 
-
 /* Super block */
-
 static const struct super_operations ffs_sb_operations = {
 	.statfs =	simple_statfs,
 	.drop_inode =	generic_delete_inode,
@@ -1050,7 +1042,7 @@
 
 	ENTER();
 
-	/* Initialize data */
+	/* Initialise data */
 	ffs = ffs_data_new();
 	if (unlikely(!ffs))
 		goto enomem0;
@@ -1096,7 +1088,6 @@
 	return -ENOMEM;
 }
 
-
 static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
 {
 	ENTER();
@@ -1116,7 +1107,7 @@
 		/* Value limit */
 		eq = strchr(opts, '=');
 		if (unlikely(!eq)) {
-			FERR("'=' missing in %s", opts);
+			pr_err("'=' missing in %s\n", opts);
 			return -EINVAL;
 		}
 		*eq = 0;
@@ -1124,7 +1115,7 @@
 		/* Parse value */
 		value = simple_strtoul(eq + 1, &end, 0);
 		if (unlikely(*end != ',' && *end != 0)) {
-			FERR("%s: invalid value: %s", opts, eq + 1);
+			pr_err("%s: invalid value: %s\n", opts, eq + 1);
 			return -EINVAL;
 		}
 
@@ -1159,7 +1150,7 @@
 
 		default:
 invalid:
-			FERR("%s: invalid option", opts);
+			pr_err("%s: invalid option\n", opts);
 			return -EINVAL;
 		}
 
@@ -1172,7 +1163,6 @@
 	return 0;
 }
 
-
 /* "mount -t functionfs dev_name /dev/function" ends up here */
 
 static struct dentry *
@@ -1224,10 +1214,8 @@
 };
 
 
-
 /* Driver's main init/cleanup functions *************************************/
 
-
 static int functionfs_init(void)
 {
 	int ret;
@@ -1236,9 +1224,9 @@
 
 	ret = register_filesystem(&ffs_fs_type);
 	if (likely(!ret))
-		FINFO("file system registered");
+		pr_info("file system registered\n");
 	else
-		FERR("failed registering file system (%d)", ret);
+		pr_err("failed registering file system (%d)\n", ret);
 
 	return ret;
 }
@@ -1247,18 +1235,16 @@
 {
 	ENTER();
 
-	FINFO("unloading");
+	pr_info("unloading\n");
 	unregister_filesystem(&ffs_fs_type);
 }
 
 
-
 /* ffs_data and ffs_function construction and destruction code **************/
 
 static void ffs_data_clear(struct ffs_data *ffs);
 static void ffs_data_reset(struct ffs_data *ffs);
 
-
 static void ffs_data_get(struct ffs_data *ffs)
 {
 	ENTER();
@@ -1279,7 +1265,7 @@
 	ENTER();
 
 	if (unlikely(atomic_dec_and_test(&ffs->ref))) {
-		FINFO("%s(): freeing", __func__);
+		pr_info("%s(): freeing\n", __func__);
 		ffs_data_clear(ffs);
 		BUG_ON(mutex_is_locked(&ffs->mutex) ||
 		       spin_is_locked(&ffs->ev.waitq.lock) ||
@@ -1289,8 +1275,6 @@
 	}
 }
 
-
-
 static void ffs_data_closed(struct ffs_data *ffs)
 {
 	ENTER();
@@ -1303,7 +1287,6 @@
 	ffs_data_put(ffs);
 }
 
-
 static struct ffs_data *ffs_data_new(void)
 {
 	struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
@@ -1326,7 +1309,6 @@
 	return ffs;
 }
 
-
 static void ffs_data_clear(struct ffs_data *ffs)
 {
 	ENTER();
@@ -1344,7 +1326,6 @@
 	kfree(ffs->stringtabs);
 }
 
-
 static void ffs_data_reset(struct ffs_data *ffs)
 {
 	ENTER();
@@ -1407,7 +1388,6 @@
 	return 0;
 }
 
-
 static void functionfs_unbind(struct ffs_data *ffs)
 {
 	ENTER();
@@ -1420,7 +1400,6 @@
 	}
 }
 
-
 static int ffs_epfiles_create(struct ffs_data *ffs)
 {
 	struct ffs_epfile *epfile, *epfiles;
@@ -1451,7 +1430,6 @@
 	return 0;
 }
 
-
 static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
 {
 	struct ffs_epfile *epfile = epfiles;
@@ -1471,7 +1449,6 @@
 	kfree(epfiles);
 }
 
-
 static int functionfs_bind_config(struct usb_composite_dev *cdev,
 				  struct usb_configuration *c,
 				  struct ffs_data *ffs)
@@ -1491,7 +1468,6 @@
 	func->function.bind    = ffs_func_bind;
 	func->function.unbind  = ffs_func_unbind;
 	func->function.set_alt = ffs_func_set_alt;
-	/*func->function.get_alt = ffs_func_get_alt;*/
 	func->function.disable = ffs_func_disable;
 	func->function.setup   = ffs_func_setup;
 	func->function.suspend = ffs_func_suspend;
@@ -1516,14 +1492,15 @@
 	ffs_data_put(func->ffs);
 
 	kfree(func->eps);
-	/* eps and interfaces_nums are allocated in the same chunk so
+	/*
+	 * eps and interfaces_nums are allocated in the same chunk so
 	 * only one free is required.  Descriptors are also allocated
-	 * in the same chunk. */
+	 * in the same chunk.
+	 */
 
 	kfree(func);
 }
 
-
 static void ffs_func_eps_disable(struct ffs_function *func)
 {
 	struct ffs_ep *ep         = func->eps;
@@ -1581,11 +1558,12 @@
 
 /* Parsing and building descriptors and strings *****************************/
 
-
-/* This validates if data pointed by data is a valid USB descriptor as
+/*
+ * This validates if data pointed by data is a valid USB descriptor as
  * well as record how many interfaces, endpoints and strings are
- * required by given configuration.  Returns address afther the
- * descriptor or NULL if data is invalid. */
+ * required by given configuration.  Returns address after the
+ * descriptor or NULL if data is invalid.
+ */
 
 enum ffs_entity_type {
 	FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
@@ -1607,14 +1585,14 @@
 
 	/* At least two bytes are required: length and type */
 	if (len < 2) {
-		FVDBG("descriptor too short");
+		pr_vdebug("descriptor too short\n");
 		return -EINVAL;
 	}
 
 	/* If we have at least as many bytes as the descriptor takes? */
 	length = _ds->bLength;
 	if (len < length) {
-		FVDBG("descriptor longer then available data");
+		pr_vdebug("descriptor longer then available data\n");
 		return -EINVAL;
 	}
 
@@ -1622,15 +1600,15 @@
 #define __entity_check_STRING(val)     (val)
 #define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
 #define __entity(type, val) do {					\
-		FVDBG("entity " #type "(%02x)", (val));			\
+		pr_vdebug("entity " #type "(%02x)\n", (val));		\
 		if (unlikely(!__entity_check_ ##type(val))) {		\
-			FVDBG("invalid entity's value");		\
+			pr_vdebug("invalid entity's value\n");		\
 			return -EINVAL;					\
 		}							\
 		ret = entity(FFS_ ##type, &val, _ds, priv);		\
 		if (unlikely(ret < 0)) {				\
-			FDBG("entity " #type "(%02x); ret = %d",	\
-			     (val), ret);				\
+			pr_debug("entity " #type "(%02x); ret = %d\n",	\
+				 (val), ret);				\
 			return ret;					\
 		}							\
 	} while (0)
@@ -1642,12 +1620,13 @@
 	case USB_DT_STRING:
 	case USB_DT_DEVICE_QUALIFIER:
 		/* function can't have any of those */
-		FVDBG("descriptor reserved for gadget: %d", _ds->bDescriptorType);
+		pr_vdebug("descriptor reserved for gadget: %d\n",
+		      _ds->bDescriptorType);
 		return -EINVAL;
 
 	case USB_DT_INTERFACE: {
 		struct usb_interface_descriptor *ds = (void *)_ds;
-		FVDBG("interface descriptor");
+		pr_vdebug("interface descriptor\n");
 		if (length != sizeof *ds)
 			goto inv_length;
 
@@ -1659,7 +1638,7 @@
 
 	case USB_DT_ENDPOINT: {
 		struct usb_endpoint_descriptor *ds = (void *)_ds;
-		FVDBG("endpoint descriptor");
+		pr_vdebug("endpoint descriptor\n");
 		if (length != USB_DT_ENDPOINT_SIZE &&
 		    length != USB_DT_ENDPOINT_AUDIO_SIZE)
 			goto inv_length;
@@ -1674,7 +1653,7 @@
 
 	case USB_DT_INTERFACE_ASSOCIATION: {
 		struct usb_interface_assoc_descriptor *ds = (void *)_ds;
-		FVDBG("interface association descriptor");
+		pr_vdebug("interface association descriptor\n");
 		if (length != sizeof *ds)
 			goto inv_length;
 		if (ds->iFunction)
@@ -1688,17 +1667,17 @@
 	case USB_DT_SECURITY:
 	case USB_DT_CS_RADIO_CONTROL:
 		/* TODO */
-		FVDBG("unimplemented descriptor: %d", _ds->bDescriptorType);
+		pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
 		return -EINVAL;
 
 	default:
 		/* We should never be here */
-		FVDBG("unknown descriptor: %d", _ds->bDescriptorType);
+		pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
 		return -EINVAL;
 
-	inv_length:
-		FVDBG("invalid length: %d (descriptor %d)",
-		      _ds->bLength, _ds->bDescriptorType);
+inv_length:
+		pr_vdebug("invalid length: %d (descriptor %d)\n",
+			  _ds->bLength, _ds->bDescriptorType);
 		return -EINVAL;
 	}
 
@@ -1711,7 +1690,6 @@
 	return length;
 }
 
-
 static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
 				     ffs_entity_callback entity, void *priv)
 {
@@ -1726,10 +1704,11 @@
 		if (num == count)
 			data = NULL;
 
-		/* Record "descriptor" entitny */
+		/* Record "descriptor" entity */
 		ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
 		if (unlikely(ret < 0)) {
-			FDBG("entity DESCRIPTOR(%02lx); ret = %d", num, ret);
+			pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
+				 num, ret);
 			return ret;
 		}
 
@@ -1738,7 +1717,7 @@
 
 		ret = ffs_do_desc(data, len, entity, priv);
 		if (unlikely(ret < 0)) {
-			FDBG("%s returns %d", __func__, ret);
+			pr_debug("%s returns %d\n", __func__, ret);
 			return ret;
 		}
 
@@ -1748,7 +1727,6 @@
 	}
 }
 
-
 static int __ffs_data_do_entity(enum ffs_entity_type type,
 				u8 *valuep, struct usb_descriptor_header *desc,
 				void *priv)
@@ -1762,16 +1740,20 @@
 		break;
 
 	case FFS_INTERFACE:
-		/* Interfaces are indexed from zero so if we
+		/*
+		 * Interfaces are indexed from zero so if we
 		 * encountered interface "n" then there are at least
-		 * "n+1" interfaces. */
+		 * "n+1" interfaces.
+		 */
 		if (*valuep >= ffs->interfaces_count)
 			ffs->interfaces_count = *valuep + 1;
 		break;
 
 	case FFS_STRING:
-		/* Strings are indexed from 1 (0 is magic ;) reserved
-		 * for languages list or some such) */
+		/*
+		 * Strings are indexed from 1 (0 is magic ;) reserved
+		 * for languages list or some such)
+		 */
 		if (*valuep > ffs->strings_count)
 			ffs->strings_count = *valuep;
 		break;
@@ -1786,7 +1768,6 @@
 	return 0;
 }
 
-
 static int __ffs_data_got_descs(struct ffs_data *ffs,
 				char *const _data, size_t len)
 {
@@ -1849,8 +1830,6 @@
 	return ret;
 }
 
-
-
 static int __ffs_data_got_strings(struct ffs_data *ffs,
 				  char *const _data, size_t len)
 {
@@ -1876,17 +1855,17 @@
 	if (unlikely(str_count < needed_count))
 		goto error;
 
-	/* If we don't need any strings just return and free all
-	 * memory */
+	/*
+	 * If we don't need any strings just return and free all
+	 * memory.
+	 */
 	if (!needed_count) {
 		kfree(_data);
 		return 0;
 	}
 
-	/* Allocate */
+	/* Allocate everything in one chunk so there's less maintenance. */
 	{
-		/* Allocate everything in one chunk so there's less
-		 * maintanance. */
 		struct {
 			struct usb_gadget_strings *stringtabs[lang_count + 1];
 			struct usb_gadget_strings stringtab[lang_count];
@@ -1937,13 +1916,17 @@
 			if (unlikely(length == len))
 				goto error_free;
 
-			/* user may provide more strings then we need,
-			 * if that's the case we simply ingore the
-			 * rest */
+			/*
+			 * User may provide more strings then we need,
+			 * if that's the case we simply ignore the
+			 * rest
+			 */
 			if (likely(needed)) {
-				/* s->id will be set while adding
+				/*
+				 * s->id will be set while adding
 				 * function to configuration so for
-				 * now just leave garbage here. */
+				 * now just leave garbage here.
+				 */
 				s->s = data;
 				--needed;
 				++s;
@@ -1977,8 +1960,6 @@
 }
 
 
-
-
 /* Events handling and management *******************************************/
 
 static void __ffs_event_add(struct ffs_data *ffs,
@@ -1987,29 +1968,32 @@
 	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
 	int neg = 0;
 
-	/* Abort any unhandled setup */
-	/* We do not need to worry about some cmpxchg() changing value
+	/*
+	 * Abort any unhandled setup
+	 *
+	 * We do not need to worry about some cmpxchg() changing value
 	 * of ffs->setup_state without holding the lock because when
 	 * state is FFS_SETUP_PENDING cmpxchg() in several places in
-	 * the source does nothing. */
+	 * the source does nothing.
+	 */
 	if (ffs->setup_state == FFS_SETUP_PENDING)
 		ffs->setup_state = FFS_SETUP_CANCELED;
 
 	switch (type) {
 	case FUNCTIONFS_RESUME:
 		rem_type2 = FUNCTIONFS_SUSPEND;
-		/* FALL THGOUTH */
+		/* FALL THROUGH */
 	case FUNCTIONFS_SUSPEND:
 	case FUNCTIONFS_SETUP:
 		rem_type1 = type;
-		/* discard all similar events */
+		/* Discard all similar events */
 		break;
 
 	case FUNCTIONFS_BIND:
 	case FUNCTIONFS_UNBIND:
 	case FUNCTIONFS_DISABLE:
 	case FUNCTIONFS_ENABLE:
-		/* discard everything other then power management. */
+		/* Discard everything other then power management. */
 		rem_type1 = FUNCTIONFS_SUSPEND;
 		rem_type2 = FUNCTIONFS_RESUME;
 		neg = 1;
@@ -2026,11 +2010,11 @@
 			if ((*ev == rem_type1 || *ev == rem_type2) == neg)
 				*out++ = *ev;
 			else
-				FVDBG("purging event %d", *ev);
+				pr_vdebug("purging event %d\n", *ev);
 		ffs->ev.count = out - ffs->ev.types;
 	}
 
-	FVDBG("adding event %d", type);
+	pr_vdebug("adding event %d\n", type);
 	ffs->ev.types[ffs->ev.count++] = type;
 	wake_up_locked(&ffs->ev.waitq);
 }
@@ -2055,8 +2039,10 @@
 	struct ffs_function *func = priv;
 	struct ffs_ep *ffs_ep;
 
-	/* If hs_descriptors is not NULL then we are reading hs
-	 * descriptors now */
+	/*
+	 * If hs_descriptors is not NULL then we are reading hs
+	 * descriptors now
+	 */
 	const int isHS = func->function.hs_descriptors != NULL;
 	unsigned idx;
 
@@ -2075,9 +2061,9 @@
 	ffs_ep = func->eps + idx;
 
 	if (unlikely(ffs_ep->descs[isHS])) {
-		FVDBG("two %sspeed descriptors for EP %d",
-		      isHS ? "high" : "full",
-		      ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+		pr_vdebug("two %sspeed descriptors for EP %d\n",
+			  isHS ? "high" : "full",
+			  ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
 		return -EINVAL;
 	}
 	ffs_ep->descs[isHS] = ds;
@@ -2091,11 +2077,11 @@
 		struct usb_request *req;
 		struct usb_ep *ep;
 
-		FVDBG("autoconfig");
+		pr_vdebug("autoconfig\n");
 		ep = usb_ep_autoconfig(func->gadget, ds);
 		if (unlikely(!ep))
 			return -ENOTSUPP;
-		ep->driver_data = func->eps + idx;;
+		ep->driver_data = func->eps + idx;
 
 		req = usb_ep_alloc_request(ep, GFP_KERNEL);
 		if (unlikely(!req))
@@ -2111,7 +2097,6 @@
 	return 0;
 }
 
-
 static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
 				   struct usb_descriptor_header *desc,
 				   void *priv)
@@ -2143,8 +2128,10 @@
 		break;
 
 	case FFS_ENDPOINT:
-		/* USB_DT_ENDPOINT are handled in
-		 * __ffs_func_bind_do_descs(). */
+		/*
+		 * USB_DT_ENDPOINT are handled in
+		 * __ffs_func_bind_do_descs().
+		 */
 		if (desc->bDescriptorType == USB_DT_ENDPOINT)
 			return 0;
 
@@ -2160,7 +2147,7 @@
 		break;
 	}
 
-	FVDBG("%02x -> %02x", *valuep, newValue);
+	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
 	*valuep = newValue;
 	return 0;
 }
@@ -2211,9 +2198,11 @@
 	func->eps             = data->eps;
 	func->interfaces_nums = data->inums;
 
-	/* Go throught all the endpoint descriptors and allocate
+	/*
+	 * Go through all the endpoint descriptors and allocate
 	 * endpoints first, so that later we can rewrite the endpoint
-	 * numbers without worying that it may be described later on. */
+	 * numbers without worrying that it may be described later on.
+	 */
 	if (likely(full)) {
 		func->function.descriptors = data->fs_descs;
 		ret = ffs_do_descs(ffs->fs_descs_count,
@@ -2234,9 +2223,11 @@
 				   __ffs_func_bind_do_descs, func);
 	}
 
-	/* Now handle interface numbers allocation and interface and
-	 * enpoint numbers rewritting.  We can do that in one go
-	 * now. */
+	/*
+	 * Now handle interface numbers allocation and interface and
+	 * endpoint numbers rewriting.  We can do that in one go
+	 * now.
+	 */
 	ret = ffs_do_descs(ffs->fs_descs_count +
 			   (high ? ffs->hs_descs_count : 0),
 			   data->raw_descs, sizeof data->raw_descs,
@@ -2274,7 +2265,6 @@
 	ffs_func_free(func);
 }
 
-
 static int ffs_func_set_alt(struct usb_function *f,
 			    unsigned interface, unsigned alt)
 {
@@ -2322,20 +2312,21 @@
 
 	ENTER();
 
-	FVDBG("creq->bRequestType = %02x", creq->bRequestType);
-	FVDBG("creq->bRequest     = %02x", creq->bRequest);
-	FVDBG("creq->wValue       = %04x", le16_to_cpu(creq->wValue));
-	FVDBG("creq->wIndex       = %04x", le16_to_cpu(creq->wIndex));
-	FVDBG("creq->wLength      = %04x", le16_to_cpu(creq->wLength));
+	pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
+	pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
+	pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
+	pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
+	pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
 
-	/* Most requests directed to interface go throught here
+	/*
+	 * Most requests directed to interface go through here
 	 * (notable exceptions are set/get interface) so we need to
 	 * handle them.  All other either handled by composite or
 	 * passed to usb_configuration->setup() (if one is set).  No
 	 * matter, we will handle requests directed to endpoint here
 	 * as well (as it's straightforward) but what to do with any
-	 * other request? */
-
+	 * other request?
+	 */
 	if (ffs->state != FFS_ACTIVE)
 		return -ENODEV;
 
@@ -2378,8 +2369,7 @@
 }
 
 
-
-/* Enpoint and interface numbers reverse mapping ****************************/
+/* Endpoint and interface numbers reverse mapping ***************************/
 
 static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
 {
@@ -2410,7 +2400,6 @@
 		: mutex_lock_interruptible(mutex);
 }
 
-
 static char *ffs_prepare_buffer(const char * __user buf, size_t len)
 {
 	char *data;
@@ -2427,7 +2416,7 @@
 		return ERR_PTR(-EFAULT);
 	}
 
-	FVDBG("Buffer from user space:");
+	pr_vdebug("Buffer from user space:\n");
 	ffs_dump_mem("", data, len);
 
 	return data;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 838286b..b5dbb23 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -37,7 +37,6 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-
 /*
  * The Mass Storage Function acts as a USB Mass Storage device,
  * appearing to the host as a disk drive or as a CD-ROM drive.  In
@@ -185,7 +184,6 @@
  * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
  */
 
-
 /*
  *				Driver Design
  *
@@ -275,7 +273,6 @@
 /* #define VERBOSE_DEBUG */
 /* #define DUMP_MSGS */
 
-
 #include <linux/blkdev.h>
 #include <linux/completion.h>
 #include <linux/dcache.h>
@@ -300,7 +297,6 @@
 #include "gadget_chips.h"
 
 
-
 /*------------------------------------------------------------------------*/
 
 #define FSG_DRIVER_DESC		"Mass Storage Function"
@@ -308,7 +304,6 @@
 
 static const char fsg_string_interface[] = "Mass Storage";
 
-
 #define FSG_NO_INTR_EP 1
 #define FSG_NO_DEVICE_STRINGS    1
 #define FSG_NO_OTG               1
@@ -324,25 +319,30 @@
 
 /* FSF callback functions */
 struct fsg_operations {
-	/* Callback function to call when thread exits.  If no
+	/*
+	 * Callback function to call when thread exits.  If no
 	 * callback is set or it returns value lower then zero MSF
 	 * will force eject all LUNs it operates on (including those
 	 * marked as non-removable or with prevent_medium_removal flag
-	 * set). */
+	 * set).
+	 */
 	int (*thread_exits)(struct fsg_common *common);
 
-	/* Called prior to ejection.  Negative return means error,
+	/*
+	 * Called prior to ejection.  Negative return means error,
 	 * zero means to continue with ejection, positive means not to
-	 * eject. */
+	 * eject.
+	 */
 	int (*pre_eject)(struct fsg_common *common,
 			 struct fsg_lun *lun, int num);
-	/* Called after ejection.  Negative return means error, zero
-	 * or positive is just a success. */
+	/*
+	 * Called after ejection.  Negative return means error, zero
+	 * or positive is just a success.
+	 */
 	int (*post_eject)(struct fsg_common *common,
 			  struct fsg_lun *lun, int num);
 };
 
-
 /* Data shared by all the FSG instances. */
 struct fsg_common {
 	struct usb_gadget	*gadget;
@@ -398,14 +398,15 @@
 	/* Gadget's private data. */
 	void			*private_data;
 
-	/* Vendor (8 chars), product (16 chars), release (4
-	 * hexadecimal digits) and NUL byte */
+	/*
+	 * Vendor (8 chars), product (16 chars), release (4
+	 * hexadecimal digits) and NUL byte
+	 */
 	char inquiry_string[8 + 16 + 4 + 1];
 
 	struct kref		ref;
 };
 
-
 struct fsg_config {
 	unsigned nluns;
 	struct fsg_lun_config {
@@ -431,7 +432,6 @@
 	char			can_stall;
 };
 
-
 struct fsg_dev {
 	struct usb_function	function;
 	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
@@ -449,7 +449,6 @@
 	struct usb_ep		*bulk_out;
 };
 
-
 static inline int __fsg_is_set(struct fsg_common *common,
 			       const char *func, unsigned line)
 {
@@ -462,13 +461,11 @@
 
 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
 
-
 static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
 {
 	return container_of(f, struct fsg_dev, function);
 }
 
-
 typedef void (*fsg_routine_t)(struct fsg_dev *);
 
 static int exception_in_progress(struct fsg_common *common)
@@ -478,7 +475,7 @@
 
 /* Make bulk-out requests be divisible by the maxpacket size */
 static void set_bulk_out_req_length(struct fsg_common *common,
-		struct fsg_buffhd *bh, unsigned int length)
+				    struct fsg_buffhd *bh, unsigned int length)
 {
 	unsigned int	rem;
 
@@ -489,6 +486,7 @@
 	bh->outreq->length = length;
 }
 
+
 /*-------------------------------------------------------------------------*/
 
 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
@@ -519,14 +517,15 @@
 		wake_up_process(common->thread_task);
 }
 
-
 static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
 {
 	unsigned long		flags;
 
-	/* Do nothing if a higher-priority exception is already in progress.
+	/*
+	 * Do nothing if a higher-priority exception is already in progress.
 	 * If a lower-or-equal priority exception is in progress, preempt it
-	 * and notify the main thread by sending it a signal. */
+	 * and notify the main thread by sending it a signal.
+	 */
 	spin_lock_irqsave(&common->lock, flags);
 	if (common->state <= new_state) {
 		common->exception_req_tag = common->ep0_req_tag;
@@ -555,10 +554,10 @@
 	return rc;
 }
 
+
 /*-------------------------------------------------------------------------*/
 
-/* Bulk and interrupt endpoint completion handlers.
- * These always run in_irq. */
+/* Completion handlers. These always run in_irq. */
 
 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
 {
@@ -567,7 +566,7 @@
 
 	if (req->status || req->actual != req->length)
 		DBG(common, "%s --> %d, %u/%u\n", __func__,
-				req->status, req->actual, req->length);
+		    req->status, req->actual, req->length);
 	if (req->status == -ECONNRESET)		/* Request was cancelled */
 		usb_ep_fifo_flush(ep);
 
@@ -588,8 +587,7 @@
 	dump_msg(common, "bulk-out", req->buf, req->actual);
 	if (req->status || req->actual != bh->bulk_out_intended_length)
 		DBG(common, "%s --> %d, %u/%u\n", __func__,
-				req->status, req->actual,
-				bh->bulk_out_intended_length);
+		    req->status, req->actual, bh->bulk_out_intended_length);
 	if (req->status == -ECONNRESET)		/* Request was cancelled */
 		usb_ep_fifo_flush(ep);
 
@@ -602,13 +600,8 @@
 	spin_unlock(&common->lock);
 }
 
-
-/*-------------------------------------------------------------------------*/
-
-/* Ep0 class-specific handlers.  These always run in_irq. */
-
 static int fsg_setup(struct usb_function *f,
-		const struct usb_ctrlrequest *ctrl)
+		     const struct usb_ctrlrequest *ctrl)
 {
 	struct fsg_dev		*fsg = fsg_from_func(f);
 	struct usb_request	*req = fsg->common->ep0req;
@@ -628,8 +621,10 @@
 		if (w_index != fsg->interface_number || w_value != 0)
 			return -EDOM;
 
-		/* Raise an exception to stop the current operation
-		 * and reinitialize our state. */
+		/*
+		 * Raise an exception to stop the current operation
+		 * and reinitialize our state.
+		 */
 		DBG(fsg, "bulk reset request\n");
 		raise_exception(fsg->common, FSG_STATE_RESET);
 		return DELAYED_STATUS;
@@ -641,7 +636,7 @@
 		if (w_index != fsg->interface_number || w_value != 0)
 			return -EDOM;
 		VDBG(fsg, "get max LUN\n");
-		*(u8 *) req->buf = fsg->common->nluns - 1;
+		*(u8 *)req->buf = fsg->common->nluns - 1;
 
 		/* Respond with data/status */
 		req->length = min((u16)1, w_length);
@@ -649,8 +644,7 @@
 	}
 
 	VDBG(fsg,
-	     "unknown class-specific control req "
-	     "%02x.%02x v%04x i%04x l%u\n",
+	     "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
 	     ctrl->bRequestType, ctrl->bRequest,
 	     le16_to_cpu(ctrl->wValue), w_index, w_length);
 	return -EOPNOTSUPP;
@@ -661,11 +655,10 @@
 
 /* All the following routines run in process context */
 
-
 /* Use this for bulk or interrupt transfers, not ep0 */
 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
-		struct usb_request *req, int *pbusy,
-		enum fsg_buffer_state *state)
+			   struct usb_request *req, int *pbusy,
+			   enum fsg_buffer_state *state)
 {
 	int	rc;
 
@@ -683,25 +676,34 @@
 
 		/* We can't do much more than wait for a reset */
 
-		/* Note: currently the net2280 driver fails zero-length
-		 * submissions if DMA is enabled. */
-		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
-						req->length == 0))
+		/*
+		 * Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled.
+		 */
+		if (rc != -ESHUTDOWN &&
+		    !(rc == -EOPNOTSUPP && req->length == 0))
 			WARNING(fsg, "error in submission: %s --> %d\n",
-					ep->name, rc);
+				ep->name, rc);
 	}
 }
 
-#define START_TRANSFER_OR(common, ep_name, req, pbusy, state)		\
-	if (fsg_is_set(common))						\
-		start_transfer((common)->fsg, (common)->fsg->ep_name,	\
-			       req, pbusy, state);			\
-	else
+static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	if (!fsg_is_set(common))
+		return false;
+	start_transfer(common->fsg, common->fsg->bulk_in,
+		       bh->inreq, &bh->inreq_busy, &bh->state);
+	return true;
+}
 
-#define START_TRANSFER(common, ep_name, req, pbusy, state)		\
-	START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
-
-
+static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	if (!fsg_is_set(common))
+		return false;
+	start_transfer(common->fsg, common->fsg->bulk_out,
+		       bh->outreq, &bh->outreq_busy, &bh->state);
+	return true;
+}
 
 static int sleep_thread(struct fsg_common *common)
 {
@@ -739,16 +741,20 @@
 	unsigned int		partial_page;
 	ssize_t			nread;
 
-	/* Get the starting Logical Block Address and check that it's
-	 * not too big */
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big.
+	 */
 	if (common->cmnd[0] == READ_6)
 		lba = get_unaligned_be24(&common->cmnd[1]);
 	else {
 		lba = get_unaligned_be32(&common->cmnd[2]);
 
-		/* We allow DPO (Disable Page Out = don't save data in the
+		/*
+		 * We allow DPO (Disable Page Out = don't save data in the
 		 * cache) and FUA (Force Unit Access = don't read from the
-		 * cache), but we don't implement them. */
+		 * cache), but we don't implement them.
+		 */
 		if ((common->cmnd[1] & ~0x18) != 0) {
 			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
 			return -EINVAL;
@@ -766,22 +772,23 @@
 		return -EIO;		/* No default reply */
 
 	for (;;) {
-
-		/* Figure out how much we need to read:
+		/*
+		 * Figure out how much we need to read:
 		 * Try to read the remaining amount.
 		 * But don't read more than the buffer size.
 		 * And don't try to read past the end of the file.
 		 * Finally, if we're not at a page boundary, don't read past
 		 *	the next page.
 		 * If this means reading 0 then we were asked to read past
-		 *	the end of file. */
+		 *	the end of file.
+		 */
 		amount = min(amount_left, FSG_BUFLEN);
-		amount = min((loff_t) amount,
-				curlun->file_length - file_offset);
+		amount = min((loff_t)amount,
+			     curlun->file_length - file_offset);
 		partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
 		if (partial_page > 0)
-			amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
-					partial_page);
+			amount = min(amount, (unsigned int)PAGE_CACHE_SIZE -
+					     partial_page);
 
 		/* Wait for the next buffer to become available */
 		bh = common->next_buffhd_to_fill;
@@ -791,8 +798,10 @@
 				return rc;
 		}
 
-		/* If we were asked to read past the end of file,
-		 * end with an empty buffer. */
+		/*
+		 * If we were asked to read past the end of file,
+		 * end with an empty buffer.
+		 */
 		if (amount == 0) {
 			curlun->sense_data =
 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
@@ -806,21 +815,19 @@
 		/* Perform the read */
 		file_offset_tmp = file_offset;
 		nread = vfs_read(curlun->filp,
-				(char __user *) bh->buf,
-				amount, &file_offset_tmp);
+				 (char __user *)bh->buf,
+				 amount, &file_offset_tmp);
 		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
-				(unsigned long long) file_offset,
-				(int) nread);
+		      (unsigned long long)file_offset, (int)nread);
 		if (signal_pending(current))
 			return -EINTR;
 
 		if (nread < 0) {
-			LDBG(curlun, "error in file read: %d\n",
-					(int) nread);
+			LDBG(curlun, "error in file read: %d\n", (int)nread);
 			nread = 0;
 		} else if (nread < amount) {
 			LDBG(curlun, "partial file read: %d/%u\n",
-					(int) nread, amount);
+			     (int)nread, amount);
 			nread -= (nread & 511);	/* Round down to a block */
 		}
 		file_offset  += nread;
@@ -842,10 +849,8 @@
 
 		/* Send this buffer and go read some more */
 		bh->inreq->zero = 0;
-		START_TRANSFER_OR(common, bulk_in, bh->inreq,
-			       &bh->inreq_busy, &bh->state)
-			/* Don't know what to do if
-			 * common->fsg is NULL */
+		if (!start_in_transfer(common, bh))
+			/* Don't know what to do if common->fsg is NULL */
 			return -EIO;
 		common->next_buffhd_to_fill = bh->next;
 	}
@@ -877,17 +882,21 @@
 	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
 	spin_unlock(&curlun->filp->f_lock);
 
-	/* Get the starting Logical Block Address and check that it's
-	 * not too big */
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big
+	 */
 	if (common->cmnd[0] == WRITE_6)
 		lba = get_unaligned_be24(&common->cmnd[1]);
 	else {
 		lba = get_unaligned_be32(&common->cmnd[2]);
 
-		/* We allow DPO (Disable Page Out = don't save data in the
+		/*
+		 * We allow DPO (Disable Page Out = don't save data in the
 		 * cache) and FUA (Force Unit Access = write directly to the
 		 * medium).  We don't implement DPO; we implement FUA by
-		 * performing synchronous output. */
+		 * performing synchronous output.
+		 */
 		if (common->cmnd[1] & ~0x18) {
 			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
 			return -EINVAL;
@@ -915,7 +924,8 @@
 		bh = common->next_buffhd_to_fill;
 		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
 
-			/* Figure out how much we want to get:
+			/*
+			 * Figure out how much we want to get:
 			 * Try to get the remaining amount.
 			 * But don't get more than the buffer size.
 			 * And don't try to go past the end of the file.
@@ -923,14 +933,15 @@
 			 *	don't go past the next page.
 			 * If this means getting 0, then we were asked
 			 *	to write past the end of file.
-			 * Finally, round down to a block boundary. */
+			 * Finally, round down to a block boundary.
+			 */
 			amount = min(amount_left_to_req, FSG_BUFLEN);
-			amount = min((loff_t) amount, curlun->file_length -
-					usb_offset);
+			amount = min((loff_t)amount,
+				     curlun->file_length - usb_offset);
 			partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
 			if (partial_page > 0)
 				amount = min(amount,
-	(unsigned int) PAGE_CACHE_SIZE - partial_page);
+	(unsigned int)PAGE_CACHE_SIZE - partial_page);
 
 			if (amount == 0) {
 				get_some_more = 0;
@@ -940,11 +951,13 @@
 				curlun->info_valid = 1;
 				continue;
 			}
-			amount -= (amount & 511);
+			amount -= amount & 511;
 			if (amount == 0) {
 
-				/* Why were we were asked to transfer a
-				 * partial block? */
+				/*
+				 * Why were we were asked to transfer a
+				 * partial block?
+				 */
 				get_some_more = 0;
 				continue;
 			}
@@ -956,15 +969,15 @@
 			if (amount_left_to_req == 0)
 				get_some_more = 0;
 
-			/* amount is always divisible by 512, hence by
-			 * the bulk-out maxpacket size */
+			/*
+			 * amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size
+			 */
 			bh->outreq->length = amount;
 			bh->bulk_out_intended_length = amount;
 			bh->outreq->short_not_ok = 1;
-			START_TRANSFER_OR(common, bulk_out, bh->outreq,
-					  &bh->outreq_busy, &bh->state)
-				/* Don't know what to do if
-				 * common->fsg is NULL */
+			if (!start_out_transfer(common, bh))
+				/* Dunno what to do if common->fsg is NULL */
 				return -EIO;
 			common->next_buffhd_to_fill = bh->next;
 			continue;
@@ -990,30 +1003,29 @@
 			amount = bh->outreq->actual;
 			if (curlun->file_length - file_offset < amount) {
 				LERROR(curlun,
-	"write %u @ %llu beyond end %llu\n",
-	amount, (unsigned long long) file_offset,
-	(unsigned long long) curlun->file_length);
+				       "write %u @ %llu beyond end %llu\n",
+				       amount, (unsigned long long)file_offset,
+				       (unsigned long long)curlun->file_length);
 				amount = curlun->file_length - file_offset;
 			}
 
 			/* Perform the write */
 			file_offset_tmp = file_offset;
 			nwritten = vfs_write(curlun->filp,
-					(char __user *) bh->buf,
-					amount, &file_offset_tmp);
+					     (char __user *)bh->buf,
+					     amount, &file_offset_tmp);
 			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
-					(unsigned long long) file_offset,
-					(int) nwritten);
+			      (unsigned long long)file_offset, (int)nwritten);
 			if (signal_pending(current))
 				return -EINTR;		/* Interrupted! */
 
 			if (nwritten < 0) {
 				LDBG(curlun, "error in file write: %d\n",
-						(int) nwritten);
+				     (int)nwritten);
 				nwritten = 0;
 			} else if (nwritten < amount) {
 				LDBG(curlun, "partial file write: %d/%u\n",
-						(int) nwritten, amount);
+				     (int)nwritten, amount);
 				nwritten -= (nwritten & 511);
 				/* Round down to a block */
 			}
@@ -1086,16 +1098,20 @@
 	unsigned int		amount;
 	ssize_t			nread;
 
-	/* Get the starting Logical Block Address and check that it's
-	 * not too big */
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big.
+	 */
 	lba = get_unaligned_be32(&common->cmnd[2]);
 	if (lba >= curlun->num_sectors) {
 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
 		return -EINVAL;
 	}
 
-	/* We allow DPO (Disable Page Out = don't save data in the
-	 * cache) but we don't implement it. */
+	/*
+	 * We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it.
+	 */
 	if (common->cmnd[1] & ~0x10) {
 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
 		return -EINVAL;
@@ -1120,16 +1136,17 @@
 
 	/* Just try to read the requested blocks */
 	while (amount_left > 0) {
-
-		/* Figure out how much we need to read:
+		/*
+		 * Figure out how much we need to read:
 		 * Try to read the remaining amount, but not more than
 		 * the buffer size.
 		 * And don't try to read past the end of the file.
 		 * If this means reading 0 then we were asked to read
-		 * past the end of file. */
+		 * past the end of file.
+		 */
 		amount = min(amount_left, FSG_BUFLEN);
-		amount = min((loff_t) amount,
-				curlun->file_length - file_offset);
+		amount = min((loff_t)amount,
+			     curlun->file_length - file_offset);
 		if (amount == 0) {
 			curlun->sense_data =
 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
@@ -1150,13 +1167,12 @@
 			return -EINTR;
 
 		if (nread < 0) {
-			LDBG(curlun, "error in file verify: %d\n",
-					(int) nread);
+			LDBG(curlun, "error in file verify: %d\n", (int)nread);
 			nread = 0;
 		} else if (nread < amount) {
 			LDBG(curlun, "partial file verify: %d/%u\n",
-					(int) nread, amount);
-			nread -= (nread & 511);	/* Round down to a sector */
+			     (int)nread, amount);
+			nread -= nread & 511;	/* Round down to a sector */
 		}
 		if (nread == 0) {
 			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
@@ -1198,7 +1214,6 @@
 	return 36;
 }
 
-
 static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	struct fsg_lun	*curlun = common->curlun;
@@ -1252,13 +1267,12 @@
 	return 18;
 }
 
-
 static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	struct fsg_lun	*curlun = common->curlun;
 	u32		lba = get_unaligned_be32(&common->cmnd[2]);
 	int		pmi = common->cmnd[8];
-	u8		*buf = (u8 *) bh->buf;
+	u8		*buf = (u8 *)bh->buf;
 
 	/* Check the PMI and LBA fields */
 	if (pmi > 1 || (pmi == 0 && lba != 0)) {
@@ -1272,13 +1286,12 @@
 	return 8;
 }
 
-
 static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	struct fsg_lun	*curlun = common->curlun;
 	int		msf = common->cmnd[1] & 0x02;
 	u32		lba = get_unaligned_be32(&common->cmnd[2]);
-	u8		*buf = (u8 *) bh->buf;
+	u8		*buf = (u8 *)bh->buf;
 
 	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
@@ -1295,13 +1308,12 @@
 	return 8;
 }
 
-
 static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	struct fsg_lun	*curlun = common->curlun;
 	int		msf = common->cmnd[1] & 0x02;
 	int		start_track = common->cmnd[6];
-	u8		*buf = (u8 *) bh->buf;
+	u8		*buf = (u8 *)bh->buf;
 
 	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
 			start_track > 1) {
@@ -1323,7 +1335,6 @@
 	return 20;
 }
 
-
 static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	struct fsg_lun	*curlun = common->curlun;
@@ -1348,10 +1359,12 @@
 	changeable_values = (pc == 1);
 	all_pages = (page_code == 0x3f);
 
-	/* Write the mode parameter header.  Fixed values are: default
+	/*
+	 * Write the mode parameter header.  Fixed values are: default
 	 * medium type, no cache control (DPOFUA), and no block descriptors.
 	 * The only variable value is the WriteProtect bit.  We will fill in
-	 * the mode data length later. */
+	 * the mode data length later.
+	 */
 	memset(buf, 0, 8);
 	if (mscmnd == MODE_SENSE) {
 		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
@@ -1365,8 +1378,10 @@
 
 	/* No block descriptors */
 
-	/* The mode pages, in numerical order.  The only page we support
-	 * is the Caching page. */
+	/*
+	 * The mode pages, in numerical order.  The only page we support
+	 * is the Caching page.
+	 */
 	if (page_code == 0x08 || all_pages) {
 		valid_page = 1;
 		buf[0] = 0x08;		/* Page code */
@@ -1388,8 +1403,10 @@
 		buf += 12;
 	}
 
-	/* Check that a valid page was requested and the mode data length
-	 * isn't too long. */
+	/*
+	 * Check that a valid page was requested and the mode data length
+	 * isn't too long.
+	 */
 	len = buf - buf0;
 	if (!valid_page || len > limit) {
 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
@@ -1404,7 +1421,6 @@
 	return len;
 }
 
-
 static int do_start_stop(struct fsg_common *common)
 {
 	struct fsg_lun	*curlun = common->curlun;
@@ -1424,8 +1440,10 @@
 	loej  = common->cmnd[4] & 0x02;
 	start = common->cmnd[4] & 0x01;
 
-	/* Our emulation doesn't support mounting; the medium is
-	 * available for use as soon as it is loaded. */
+	/*
+	 * Our emulation doesn't support mounting; the medium is
+	 * available for use as soon as it is loaded.
+	 */
 	if (start) {
 		if (!fsg_lun_is_open(curlun)) {
 			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
@@ -1466,7 +1484,6 @@
 		: 0;
 }
 
-
 static int do_prevent_allow(struct fsg_common *common)
 {
 	struct fsg_lun	*curlun = common->curlun;
@@ -1491,7 +1508,6 @@
 	return 0;
 }
 
-
 static int do_read_format_capacities(struct fsg_common *common,
 			struct fsg_buffhd *bh)
 {
@@ -1509,7 +1525,6 @@
 	return 12;
 }
 
-
 static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
 {
 	struct fsg_lun	*curlun = common->curlun;
@@ -1591,7 +1606,7 @@
 		bh->inreq->length = nsend;
 		bh->inreq->zero = 0;
 		start_transfer(fsg, fsg->bulk_in, bh->inreq,
-				&bh->inreq_busy, &bh->state);
+			       &bh->inreq_busy, &bh->state);
 		bh = fsg->common->next_buffhd_to_fill = bh->next;
 		fsg->common->usb_amount_left -= nsend;
 		nkeep = 0;
@@ -1617,7 +1632,7 @@
 
 			/* A short packet or an error ends everything */
 			if (bh->outreq->actual != bh->outreq->length ||
-					bh->outreq->status != 0) {
+			    bh->outreq->status != 0) {
 				raise_exception(common,
 						FSG_STATE_ABORT_BULK_OUT);
 				return -EINTR;
@@ -1631,15 +1646,15 @@
 		 && common->usb_amount_left > 0) {
 			amount = min(common->usb_amount_left, FSG_BUFLEN);
 
-			/* amount is always divisible by 512, hence by
-			 * the bulk-out maxpacket size */
+			/*
+			 * amount is always divisible by 512, hence by
+			 * the bulk-out maxpacket size.
+			 */
 			bh->outreq->length = amount;
 			bh->bulk_out_intended_length = amount;
 			bh->outreq->short_not_ok = 1;
-			START_TRANSFER_OR(common, bulk_out, bh->outreq,
-					  &bh->outreq_busy, &bh->state)
-				/* Don't know what to do if
-				 * common->fsg is NULL */
+			if (!start_out_transfer(common, bh))
+				/* Dunno what to do if common->fsg is NULL */
 				return -EIO;
 			common->next_buffhd_to_fill = bh->next;
 			common->usb_amount_left -= amount;
@@ -1654,7 +1669,6 @@
 	return 0;
 }
 
-
 static int finish_reply(struct fsg_common *common)
 {
 	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
@@ -1664,10 +1678,12 @@
 	case DATA_DIR_NONE:
 		break;			/* Nothing to send */
 
-	/* If we don't know whether the host wants to read or write,
+	/*
+	 * If we don't know whether the host wants to read or write,
 	 * this must be CB or CBI with an unknown command.  We mustn't
 	 * try to send or receive any data.  So stall both bulk pipes
-	 * if we can and wait for a reset. */
+	 * if we can and wait for a reset.
+	 */
 	case DATA_DIR_UNKNOWN:
 		if (!common->can_stall) {
 			/* Nothing */
@@ -1688,18 +1704,18 @@
 		/* If there's no residue, simply send the last buffer */
 		} else if (common->residue == 0) {
 			bh->inreq->zero = 0;
-			START_TRANSFER_OR(common, bulk_in, bh->inreq,
-					  &bh->inreq_busy, &bh->state)
+			if (!start_in_transfer(common, bh))
 				return -EIO;
 			common->next_buffhd_to_fill = bh->next;
 
-		/* For Bulk-only, if we're allowed to stall then send the
+		/*
+		 * For Bulk-only, if we're allowed to stall then send the
 		 * short packet and halt the bulk-in endpoint.  If we can't
-		 * stall, pad out the remaining data with 0's. */
+		 * stall, pad out the remaining data with 0's.
+		 */
 		} else if (common->can_stall) {
 			bh->inreq->zero = 1;
-			START_TRANSFER_OR(common, bulk_in, bh->inreq,
-					  &bh->inreq_busy, &bh->state)
+			if (!start_in_transfer(common, bh))
 				/* Don't know what to do if
 				 * common->fsg is NULL */
 				rc = -EIO;
@@ -1714,8 +1730,10 @@
 		}
 		break;
 
-	/* We have processed all we want from the data the host has sent.
-	 * There may still be outstanding bulk-out requests. */
+	/*
+	 * We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests.
+	 */
 	case DATA_DIR_FROM_HOST:
 		if (common->residue == 0) {
 			/* Nothing to receive */
@@ -1725,12 +1743,14 @@
 			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
 			rc = -EINTR;
 
-		/* We haven't processed all the incoming data.  Even though
+		/*
+		 * We haven't processed all the incoming data.  Even though
 		 * we may be allowed to stall, doing so would cause a race.
 		 * The controller may already have ACK'ed all the remaining
 		 * bulk-out packets, in which case the host wouldn't see a
 		 * STALL.  Not realizing the endpoint was halted, it wouldn't
-		 * clear the halt -- leading to problems later on. */
+		 * clear the halt -- leading to problems later on.
+		 */
 #if 0
 		} else if (common->can_stall) {
 			if (fsg_is_set(common))
@@ -1740,8 +1760,10 @@
 			rc = -EINTR;
 #endif
 
-		/* We can't stall.  Read in the excess data and throw it
-		 * all away. */
+		/*
+		 * We can't stall.  Read in the excess data and throw it
+		 * all away.
+		 */
 		} else {
 			rc = throw_away_data(common);
 		}
@@ -1750,7 +1772,6 @@
 	return rc;
 }
 
-
 static int send_status(struct fsg_common *common)
 {
 	struct fsg_lun		*curlun = common->curlun;
@@ -1798,8 +1819,7 @@
 
 	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
 	bh->inreq->zero = 0;
-	START_TRANSFER_OR(common, bulk_in, bh->inreq,
-			  &bh->inreq_busy, &bh->state)
+	if (!start_in_transfer(common, bh))
 		/* Don't know what to do if common->fsg is NULL */
 		return -EIO;
 
@@ -1810,11 +1830,13 @@
 
 /*-------------------------------------------------------------------------*/
 
-/* Check whether the command is properly formed and whether its data size
- * and direction agree with the values we already have. */
+/*
+ * Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have.
+ */
 static int check_command(struct fsg_common *common, int cmnd_size,
-		enum data_direction data_dir, unsigned int mask,
-		int needs_medium, const char *name)
+			 enum data_direction data_dir, unsigned int mask,
+			 int needs_medium, const char *name)
 {
 	int			i;
 	int			lun = common->cmnd[1] >> 5;
@@ -1825,19 +1847,23 @@
 	hdlen[0] = 0;
 	if (common->data_dir != DATA_DIR_UNKNOWN)
 		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
-				common->data_size);
+			common->data_size);
 	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
 	     name, cmnd_size, dirletter[(int) data_dir],
 	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
 
-	/* We can't reply at all until we know the correct data direction
-	 * and size. */
+	/*
+	 * We can't reply at all until we know the correct data direction
+	 * and size.
+	 */
 	if (common->data_size_from_cmnd == 0)
 		data_dir = DATA_DIR_NONE;
 	if (common->data_size < common->data_size_from_cmnd) {
-		/* Host data size < Device data size is a phase error.
+		/*
+		 * Host data size < Device data size is a phase error.
 		 * Carry out the command, but only transfer as much as
-		 * we are allowed. */
+		 * we are allowed.
+		 */
 		common->data_size_from_cmnd = common->data_size;
 		common->phase_error = 1;
 	}
@@ -1845,8 +1871,7 @@
 	common->usb_amount_left = common->data_size;
 
 	/* Conflicting data directions is a phase error */
-	if (common->data_dir != data_dir
-	 && common->data_size_from_cmnd > 0) {
+	if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
 		common->phase_error = 1;
 		return -EINVAL;
 	}
@@ -1854,7 +1879,8 @@
 	/* Verify the length of the command itself */
 	if (cmnd_size != common->cmnd_size) {
 
-		/* Special case workaround: There are plenty of buggy SCSI
+		/*
+		 * Special case workaround: There are plenty of buggy SCSI
 		 * implementations. Many have issues with cbw->Length
 		 * field passing a wrong command size. For those cases we
 		 * always try to work around the problem by using the length
@@ -1896,8 +1922,10 @@
 		curlun = NULL;
 		common->bad_lun_okay = 0;
 
-		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
-		 * to use unsupported LUNs; all others may not. */
+		/*
+		 * INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not.
+		 */
 		if (common->cmnd[0] != INQUIRY &&
 		    common->cmnd[0] != REQUEST_SENSE) {
 			DBG(common, "unsupported LUN %d\n", common->lun);
@@ -1905,11 +1933,13 @@
 		}
 	}
 
-	/* If a unit attention condition exists, only INQUIRY and
-	 * REQUEST SENSE commands are allowed; anything else must fail. */
+	/*
+	 * If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail.
+	 */
 	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
-			common->cmnd[0] != INQUIRY &&
-			common->cmnd[0] != REQUEST_SENSE) {
+	    common->cmnd[0] != INQUIRY &&
+	    common->cmnd[0] != REQUEST_SENSE) {
 		curlun->sense_data = curlun->unit_attention_data;
 		curlun->unit_attention_data = SS_NO_SENSE;
 		return -EINVAL;
@@ -1935,7 +1965,6 @@
 	return 0;
 }
 
-
 static int do_scsi_command(struct fsg_common *common)
 {
 	struct fsg_buffhd	*bh;
@@ -2123,8 +2152,10 @@
 				"TEST UNIT READY");
 		break;
 
-	/* Although optional, this command is used by MS-Windows.  We
-	 * support a minimal version: BytChk must be 0. */
+	/*
+	 * Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0.
+	 */
 	case VERIFY:
 		common->data_size_from_cmnd = 0;
 		reply = check_command(common, 10, DATA_DIR_NONE,
@@ -2164,10 +2195,12 @@
 			reply = do_write(common);
 		break;
 
-	/* Some mandatory commands that we recognize but don't implement.
+	/*
+	 * Some mandatory commands that we recognize but don't implement.
 	 * They don't mean much in this setting.  It's left as an exercise
 	 * for anyone interested to implement RESERVE and RELEASE in terms
-	 * of Posix locks. */
+	 * of Posix locks.
+	 */
 	case FORMAT_UNIT:
 	case RELEASE:
 	case RESERVE:
@@ -2195,7 +2228,7 @@
 	if (reply == -EINVAL)
 		reply = 0;		/* Error reply length */
 	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
-		reply = min((u32) reply, common->data_size_from_cmnd);
+		reply = min((u32)reply, common->data_size_from_cmnd);
 		bh->inreq->length = reply;
 		bh->state = BUF_STATE_FULL;
 		common->residue -= reply;
@@ -2225,7 +2258,8 @@
 				req->actual,
 				le32_to_cpu(cbw->Signature));
 
-		/* The Bulk-only spec says we MUST stall the IN endpoint
+		/*
+		 * The Bulk-only spec says we MUST stall the IN endpoint
 		 * (6.6.1), so it's unavoidable.  It also says we must
 		 * retain this state until the next reset, but there's
 		 * no way to tell the controller driver it should ignore
@@ -2233,7 +2267,8 @@
 		 *
 		 * We aren't required to halt the OUT endpoint; instead
 		 * we can simply accept and discard any data received
-		 * until the next reset. */
+		 * until the next reset.
+		 */
 		wedge_bulk_in_endpoint(fsg);
 		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
 		return -EINVAL;
@@ -2246,8 +2281,10 @@
 				"cmdlen %u\n",
 				cbw->Lun, cbw->Flags, cbw->Length);
 
-		/* We can do anything we want here, so let's stall the
-		 * bulk pipes if we are allowed to. */
+		/*
+		 * We can do anything we want here, so let's stall the
+		 * bulk pipes if we are allowed to.
+		 */
 		if (common->can_stall) {
 			fsg_set_halt(fsg, fsg->bulk_out);
 			halt_bulk_in_endpoint(fsg);
@@ -2270,7 +2307,6 @@
 	return 0;
 }
 
-
 static int get_next_command(struct fsg_common *common)
 {
 	struct fsg_buffhd	*bh;
@@ -2287,14 +2323,15 @@
 	/* Queue a request to read a Bulk-only CBW */
 	set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
 	bh->outreq->short_not_ok = 1;
-	START_TRANSFER_OR(common, bulk_out, bh->outreq,
-			  &bh->outreq_busy, &bh->state)
+	if (!start_out_transfer(common, bh))
 		/* Don't know what to do if common->fsg is NULL */
 		return -EIO;
 
-	/* We will drain the buffer in software, which means we
+	/*
+	 * We will drain the buffer in software, which means we
 	 * can reuse it for the next filling.  No need to advance
-	 * next_buffhd_to_fill. */
+	 * next_buffhd_to_fill.
+	 */
 
 	/* Wait for the CBW to arrive */
 	while (bh->state != BUF_STATE_FULL) {
@@ -2425,7 +2462,6 @@
 
 /****************************** ALT CONFIGS ******************************/
 
-
 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
@@ -2453,8 +2489,10 @@
 	struct fsg_lun		*curlun;
 	unsigned int		exception_req_tag;
 
-	/* Clear the existing signals.  Anything but SIGUSR1 is converted
-	 * into a high-priority EXIT exception. */
+	/*
+	 * Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception.
+	 */
 	for (;;) {
 		int sig =
 			dequeue_signal_lock(current, &current->blocked, &info);
@@ -2498,8 +2536,10 @@
 			usb_ep_fifo_flush(common->fsg->bulk_out);
 	}
 
-	/* Reset the I/O buffer states and pointers, the SCSI
-	 * state, and the exception.  Then invoke the handler. */
+	/*
+	 * Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler.
+	 */
 	spin_lock_irq(&common->lock);
 
 	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
@@ -2537,9 +2577,11 @@
 		break;
 
 	case FSG_STATE_RESET:
-		/* In case we were forced against our will to halt a
+		/*
+		 * In case we were forced against our will to halt a
 		 * bulk endpoint, clear the halt now.  (The SuperH UDC
-		 * requires this.) */
+		 * requires this.)
+		 */
 		if (!fsg_is_set(common))
 			break;
 		if (test_and_clear_bit(IGNORE_BULK_OUT,
@@ -2549,9 +2591,11 @@
 		if (common->ep0_req_tag == exception_req_tag)
 			ep0_queue(common);	/* Complete the status stage */
 
-		/* Technically this should go here, but it would only be
+		/*
+		 * Technically this should go here, but it would only be
 		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
-		 * CONFIG_CHANGE cases. */
+		 * CONFIG_CHANGE cases.
+		 */
 		/* for (i = 0; i < common->nluns; ++i) */
 		/*	common->luns[i].unit_attention_data = */
 		/*		SS_RESET_OCCURRED;  */
@@ -2586,8 +2630,10 @@
 {
 	struct fsg_common	*common = common_;
 
-	/* Allow the thread to be killed by a signal, but set the signal mask
-	 * to block everything but INT, TERM, KILL, and USR1. */
+	/*
+	 * Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1.
+	 */
 	allow_signal(SIGINT);
 	allow_signal(SIGTERM);
 	allow_signal(SIGKILL);
@@ -2596,9 +2642,11 @@
 	/* Allow the thread to be frozen */
 	set_freezable();
 
-	/* Arrange for userspace references to be interpreted as kernel
+	/*
+	 * Arrange for userspace references to be interpreted as kernel
 	 * pointers.  That way we can pass a kernel pointer to a routine
-	 * that expects a __user pointer and it will work okay. */
+	 * that expects a __user pointer and it will work okay.
+	 */
 	set_fs(get_ds());
 
 	/* The main loop */
@@ -2658,7 +2706,7 @@
 		up_write(&common->filesem);
 	}
 
-	/* Let the unbind and cleanup routines know the thread has exited */
+	/* Let fsg_unbind() know the thread has exited */
 	complete_and_exit(&common->thread_notifier, 0);
 }
 
@@ -2690,7 +2738,6 @@
 	kref_put(&common->ref, fsg_common_release);
 }
 
-
 static struct fsg_common *fsg_common_init(struct fsg_common *common,
 					  struct usb_composite_dev *cdev,
 					  struct fsg_config *cfg)
@@ -2736,8 +2783,10 @@
 		fsg_intf_desc.iInterface = rc;
 	}
 
-	/* Create the LUNs, open their backing files, and register the
-	 * LUN devices in sysfs. */
+	/*
+	 * Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs.
+	 */
 	curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
 	if (unlikely(!curlun)) {
 		rc = -ENOMEM;
@@ -2765,6 +2814,7 @@
 		if (rc) {
 			INFO(common, "failed to register LUN%d: %d\n", i, rc);
 			common->nluns = i;
+			put_device(&curlun->dev);
 			goto error_release;
 		}
 
@@ -2790,7 +2840,6 @@
 	}
 	common->nluns = nluns;
 
-
 	/* Data buffers cyclic list */
 	bh = common->buffhds;
 	i = FSG_NUM_BUFFERS;
@@ -2807,7 +2856,6 @@
 	} while (--i);
 	bh->next = common->buffhds;
 
-
 	/* Prepare inquiryString */
 	if (cfg->release != 0xffff) {
 		i = cfg->release;
@@ -2821,41 +2869,35 @@
 			i = 0x0399;
 		}
 	}
-#define OR(x, y) ((x) ? (x) : (y))
 	snprintf(common->inquiry_string, sizeof common->inquiry_string,
-		 "%-8s%-16s%04x",
-		 OR(cfg->vendor_name, "Linux   "),
+		 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
 		 /* Assume product name dependent on the first LUN */
-		 OR(cfg->product_name, common->luns->cdrom
+		 cfg->product_name ?: (common->luns->cdrom
 				     ? "File-Stor Gadget"
-				     : "File-CD Gadget  "),
+				     : "File-CD Gadget"),
 		 i);
 
-
-	/* Some peripheral controllers are known not to be able to
+	/*
+	 * Some peripheral controllers are known not to be able to
 	 * halt bulk endpoints correctly.  If one of them is present,
 	 * disable stalls.
 	 */
 	common->can_stall = cfg->can_stall &&
 		!(gadget_is_at91(common->gadget));
 
-
 	spin_lock_init(&common->lock);
 	kref_init(&common->ref);
 
-
 	/* Tell the thread to start working */
 	common->thread_task =
 		kthread_create(fsg_main_thread, common,
-			       OR(cfg->thread_name, "file-storage"));
+			       cfg->thread_name ?: "file-storage");
 	if (IS_ERR(common->thread_task)) {
 		rc = PTR_ERR(common->thread_task);
 		goto error_release;
 	}
 	init_completion(&common->thread_notifier);
 	init_waitqueue_head(&common->fsg_wait);
-#undef OR
-
 
 	/* Information */
 	INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
@@ -2889,18 +2931,15 @@
 
 	return common;
 
-
 error_luns:
 	common->nluns = i + 1;
 error_release:
 	common->state = FSG_STATE_TERMINATED;	/* The thread is dead */
-	/* Call fsg_common_release() directly, ref might be not
-	 * initialised */
+	/* Call fsg_common_release() directly, ref might be not initialised. */
 	fsg_common_release(&common->ref);
 	return ERR_PTR(rc);
 }
 
-
 static void fsg_common_release(struct kref *ref)
 {
 	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
@@ -2909,9 +2948,6 @@
 	if (common->state != FSG_STATE_TERMINATED) {
 		raise_exception(common, FSG_STATE_EXIT);
 		wait_for_completion(&common->thread_notifier);
-
-		/* The cleanup routine waits for this completion also */
-		complete(&common->thread_notifier);
 	}
 
 	if (likely(common->luns)) {
@@ -2945,7 +2981,6 @@
 
 /*-------------------------------------------------------------------------*/
 
-
 static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct fsg_dev		*fsg = fsg_from_func(f);
@@ -2965,7 +3000,6 @@
 	kfree(fsg);
 }
 
-
 static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct fsg_dev		*fsg = fsg_from_func(f);
@@ -3048,11 +3082,13 @@
 	fsg->function.disable     = fsg_disable;
 
 	fsg->common               = common;
-	/* Our caller holds a reference to common structure so we
+	/*
+	 * Our caller holds a reference to common structure so we
 	 * don't have to be worry about it being freed until we return
 	 * from this function.  So instead of incrementing counter now
 	 * and decrement in error recovery we increment it only when
-	 * call to usb_add_function() was successful. */
+	 * call to usb_add_function() was successful.
+	 */
 
 	rc = usb_add_function(c, &fsg->function);
 	if (unlikely(rc))
@@ -3063,8 +3099,7 @@
 }
 
 static inline int __deprecated __maybe_unused
-fsg_add(struct usb_composite_dev *cdev,
-	struct usb_configuration *c,
+fsg_add(struct usb_composite_dev *cdev, struct usb_configuration *c,
 	struct fsg_common *common)
 {
 	return fsg_bind_config(cdev, c, common);
@@ -3073,7 +3108,6 @@
 
 /************************* Module parameters *************************/
 
-
 struct fsg_module_parameters {
 	char		*file[FSG_MAX_LUNS];
 	int		ro[FSG_MAX_LUNS];
@@ -3087,7 +3121,6 @@
 	int		stall;	/* can_stall */
 };
 
-
 #define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc)	\
 	module_param_array_named(prefix ## name, params.name, type,	\
 				 &prefix ## params.name ## _count,	\
@@ -3115,7 +3148,6 @@
 	_FSG_MODULE_PARAM(prefix, params, stall, bool,			\
 			  "false to prevent bulk stalls")
 
-
 static void
 fsg_config_from_params(struct fsg_config *cfg,
 		       const struct fsg_module_parameters *params)
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
new file mode 100644
index 0000000..130eee6
--- /dev/null
+++ b/drivers/usb/gadget/f_ncm.c
@@ -0,0 +1,1407 @@
+/*
+ * f_ncm.c -- USB CDC Network (NCM) link function driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
+ *
+ * The driver borrows from f_ecm.c which is:
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+
+#include <linux/usb/cdc.h>
+
+#include "u_ether.h"
+
+/*
+ * This function is a "CDC Network Control Model" (CDC NCM) Ethernet link.
+ * NCM is intended to be used with high-speed network attachments.
+ *
+ * Note that NCM requires the use of "alternate settings" for its data
+ * interface.  This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ */
+
+/* to trigger crc/non-crc ndp signature */
+
+#define NCM_NDP_HDR_CRC_MASK	0x01000000
+#define NCM_NDP_HDR_CRC		0x01000000
+#define NCM_NDP_HDR_NOCRC	0x00000000
+
+struct ncm_ep_descs {
+	struct usb_endpoint_descriptor	*in;
+	struct usb_endpoint_descriptor	*out;
+	struct usb_endpoint_descriptor	*notify;
+};
+
+enum ncm_notify_state {
+	NCM_NOTIFY_NONE,		/* don't notify */
+	NCM_NOTIFY_CONNECT,		/* issue CONNECT next */
+	NCM_NOTIFY_SPEED,		/* issue SPEED_CHANGE next */
+};
+
+struct f_ncm {
+	struct gether			port;
+	u8				ctrl_id, data_id;
+
+	char				ethaddr[14];
+
+	struct ncm_ep_descs		fs;
+	struct ncm_ep_descs		hs;
+
+	struct usb_ep			*notify;
+	struct usb_endpoint_descriptor	*notify_desc;
+	struct usb_request		*notify_req;
+	u8				notify_state;
+	bool				is_open;
+
+	struct ndp_parser_opts		*parser_opts;
+	bool				is_crc;
+
+	/*
+	 * for notification, it is accessed from both
+	 * callback and ethernet open/close
+	 */
+	spinlock_t			lock;
+};
+
+static inline struct f_ncm *func_to_ncm(struct usb_function *f)
+{
+	return container_of(f, struct f_ncm, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ncm_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 *  64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We cannot group frames so use just the minimal size which ok to put
+ * one max-size ethernet frame.
+ * If the host can group frames, allow it to do that, 16K is selected,
+ * because it's used by default by the current linux host driver
+ */
+#define NTB_DEFAULT_IN_SIZE	USB_CDC_NCM_NTB_MIN_IN_SIZE
+#define NTB_OUT_SIZE		16384
+
+/*
+ * skbs of size less than that will not be alligned
+ * to NCM's dwNtbInMaxSize to save bus bandwidth
+ */
+
+#define	MAX_TX_NONFIXED		(512 * 3)
+
+#define FORMATS_SUPPORTED	(USB_CDC_NCM_NTB16_SUPPORTED |	\
+				 USB_CDC_NCM_NTB32_SUPPORTED)
+
+static struct usb_cdc_ncm_ntb_parameters ntb_parameters = {
+	.wLength = sizeof ntb_parameters,
+	.bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED),
+	.dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE),
+	.wNdpInDivisor = cpu_to_le16(4),
+	.wNdpInPayloadRemainder = cpu_to_le16(0),
+	.wNdpInAlignment = cpu_to_le16(4),
+
+	.dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE),
+	.wNdpOutDivisor = cpu_to_le16(4),
+	.wNdpOutPayloadRemainder = cpu_to_le16(0),
+	.wNdpOutAlignment = cpu_to_le16(4),
+};
+
+/*
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define NCM_STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+static struct usb_interface_assoc_descriptor ncm_iad_desc __initdata = {
+	.bLength =		sizeof ncm_iad_desc,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount =	2,	/* control + data */
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_NCM,
+	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	/* .iFunction =		DYNAMIC */
+};
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ncm_control_intf __initdata = {
+	.bLength =		sizeof ncm_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_NCM,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ncm_header_desc __initdata = {
+	.bLength =		sizeof ncm_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ncm_union_desc __initdata = {
+	.bLength =		sizeof(ncm_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_desc __initdata = {
+	.bLength =		sizeof ecm_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	/* .iMACAddress = DYNAMIC */
+	.bmEthernetStatistics =	cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+#define NCAPS	(USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE)
+
+static struct usb_cdc_ncm_desc ncm_desc __initdata = {
+	.bLength =		sizeof ncm_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_NCM_TYPE,
+
+	.bcdNcmVersion =	cpu_to_le16(0x0100),
+	/* can process SetEthernetPacketFilter */
+	.bmNetworkCapabilities = NCAPS,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ncm_data_nop_intf __initdata = {
+	.bLength =		sizeof ncm_data_nop_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	USB_CDC_NCM_PROTO_NTB,
+	/* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ncm_data_intf __initdata = {
+	.bLength =		sizeof ncm_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	USB_CDC_NCM_PROTO_NTB,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_ncm_notify_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(NCM_STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_ncm_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_ncm_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ncm_fs_function[] __initdata = {
+	(struct usb_descriptor_header *) &ncm_iad_desc,
+	/* CDC NCM control descriptors */
+	(struct usb_descriptor_header *) &ncm_control_intf,
+	(struct usb_descriptor_header *) &ncm_header_desc,
+	(struct usb_descriptor_header *) &ncm_union_desc,
+	(struct usb_descriptor_header *) &ecm_desc,
+	(struct usb_descriptor_header *) &ncm_desc,
+	(struct usb_descriptor_header *) &fs_ncm_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ncm_data_nop_intf,
+	(struct usb_descriptor_header *) &ncm_data_intf,
+	(struct usb_descriptor_header *) &fs_ncm_in_desc,
+	(struct usb_descriptor_header *) &fs_ncm_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_ncm_notify_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(NCM_STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor hs_ncm_in_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_ncm_out_desc __initdata = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ncm_hs_function[] __initdata = {
+	(struct usb_descriptor_header *) &ncm_iad_desc,
+	/* CDC NCM control descriptors */
+	(struct usb_descriptor_header *) &ncm_control_intf,
+	(struct usb_descriptor_header *) &ncm_header_desc,
+	(struct usb_descriptor_header *) &ncm_union_desc,
+	(struct usb_descriptor_header *) &ecm_desc,
+	(struct usb_descriptor_header *) &ncm_desc,
+	(struct usb_descriptor_header *) &hs_ncm_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ncm_data_nop_intf,
+	(struct usb_descriptor_header *) &ncm_data_intf,
+	(struct usb_descriptor_header *) &hs_ncm_in_desc,
+	(struct usb_descriptor_header *) &hs_ncm_out_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+#define STRING_CTRL_IDX	0
+#define STRING_MAC_IDX	1
+#define STRING_DATA_IDX	2
+#define STRING_IAD_IDX	3
+
+static struct usb_string ncm_string_defs[] = {
+	[STRING_CTRL_IDX].s = "CDC Network Control Model (NCM)",
+	[STRING_MAC_IDX].s = NULL /* DYNAMIC */,
+	[STRING_DATA_IDX].s = "CDC Network Data",
+	[STRING_IAD_IDX].s = "CDC NCM",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings ncm_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		ncm_string_defs,
+};
+
+static struct usb_gadget_strings *ncm_strings[] = {
+	&ncm_string_table,
+	NULL,
+};
+
+/*
+ * Here are options for NCM Datagram Pointer table (NDP) parser.
+ * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3),
+ * in NDP16 offsets and sizes fields are 1 16bit word wide,
+ * in NDP32 -- 2 16bit words wide. Also signatures are different.
+ * To make the parser code the same, put the differences in the structure,
+ * and switch pointers to the structures when the format is changed.
+ */
+
+struct ndp_parser_opts {
+	u32		nth_sign;
+	u32		ndp_sign;
+	unsigned	nth_size;
+	unsigned	ndp_size;
+	unsigned	ndplen_align;
+	/* sizes in u16 units */
+	unsigned	dgram_item_len; /* index or length */
+	unsigned	block_length;
+	unsigned	fp_index;
+	unsigned	reserved1;
+	unsigned	reserved2;
+	unsigned	next_fp_index;
+};
+
+#define INIT_NDP16_OPTS {					\
+		.nth_sign = USB_CDC_NCM_NTH16_SIGN,		\
+		.ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN,	\
+		.nth_size = sizeof(struct usb_cdc_ncm_nth16),	\
+		.ndp_size = sizeof(struct usb_cdc_ncm_ndp16),	\
+		.ndplen_align = 4,				\
+		.dgram_item_len = 1,				\
+		.block_length = 1,				\
+		.fp_index = 1,					\
+		.reserved1 = 0,					\
+		.reserved2 = 0,					\
+		.next_fp_index = 1,				\
+	}
+
+
+#define INIT_NDP32_OPTS {					\
+		.nth_sign = USB_CDC_NCM_NTH32_SIGN,		\
+		.ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN,	\
+		.nth_size = sizeof(struct usb_cdc_ncm_nth32),	\
+		.ndp_size = sizeof(struct usb_cdc_ncm_ndp32),	\
+		.ndplen_align = 8,				\
+		.dgram_item_len = 2,				\
+		.block_length = 2,				\
+		.fp_index = 2,					\
+		.reserved1 = 1,					\
+		.reserved2 = 2,					\
+		.next_fp_index = 2,				\
+	}
+
+static struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;
+static struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;
+
+static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
+{
+	switch (size) {
+	case 1:
+		put_unaligned_le16((u16)val, *p);
+		break;
+	case 2:
+		put_unaligned_le32((u32)val, *p);
+
+		break;
+	default:
+		BUG();
+	}
+
+	*p += size;
+}
+
+static inline unsigned get_ncm(__le16 **p, unsigned size)
+{
+	unsigned tmp;
+
+	switch (size) {
+	case 1:
+		tmp = get_unaligned_le16(*p);
+		break;
+	case 2:
+		tmp = get_unaligned_le32(*p);
+		break;
+	default:
+		BUG();
+	}
+
+	*p += size;
+	return tmp;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void ncm_reset_values(struct f_ncm *ncm)
+{
+	ncm->parser_opts = &ndp16_opts;
+	ncm->is_crc = false;
+	ncm->port.cdc_filter = DEFAULT_FILTER;
+
+	/* doesn't make sense for ncm, fixed size used */
+	ncm->port.header_len = 0;
+
+	ncm->port.fixed_out_len = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+	ncm->port.fixed_in_len = NTB_DEFAULT_IN_SIZE;
+}
+
+/*
+ * Context: ncm->lock held
+ */
+static void ncm_do_notify(struct f_ncm *ncm)
+{
+	struct usb_request		*req = ncm->notify_req;
+	struct usb_cdc_notification	*event;
+	struct usb_composite_dev	*cdev = ncm->port.func.config->cdev;
+	__le32				*data;
+	int				status;
+
+	/* notification already in flight? */
+	if (!req)
+		return;
+
+	event = req->buf;
+	switch (ncm->notify_state) {
+	case NCM_NOTIFY_NONE:
+		return;
+
+	case NCM_NOTIFY_CONNECT:
+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+		if (ncm->is_open)
+			event->wValue = cpu_to_le16(1);
+		else
+			event->wValue = cpu_to_le16(0);
+		event->wLength = 0;
+		req->length = sizeof *event;
+
+		DBG(cdev, "notify connect %s\n",
+				ncm->is_open ? "true" : "false");
+		ncm->notify_state = NCM_NOTIFY_NONE;
+		break;
+
+	case NCM_NOTIFY_SPEED:
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(8);
+		req->length = NCM_STATUS_BYTECOUNT;
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data = req->buf + sizeof *event;
+		data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
+		data[1] = data[0];
+
+		DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget));
+		ncm->notify_state = NCM_NOTIFY_CONNECT;
+		break;
+	}
+	event->bmRequestType = 0xA1;
+	event->wIndex = cpu_to_le16(ncm->ctrl_id);
+
+	ncm->notify_req = NULL;
+	/*
+	 * In double buffering if there is a space in FIFO,
+	 * completion callback can be called right after the call,
+	 * so unlocking
+	 */
+	spin_unlock(&ncm->lock);
+	status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
+	spin_lock(&ncm->lock);
+	if (status < 0) {
+		ncm->notify_req = req;
+		DBG(cdev, "notify --> %d\n", status);
+	}
+}
+
+/*
+ * Context: ncm->lock held
+ */
+static void ncm_notify(struct f_ncm *ncm)
+{
+	/*
+	 * NOTE on most versions of Linux, host side cdc-ethernet
+	 * won't listen for notifications until its netdevice opens.
+	 * The first notification then sits in the FIFO for a long
+	 * time, and the second one is queued.
+	 *
+	 * If ncm_notify() is called before the second (CONNECT)
+	 * notification is sent, then it will reset to send the SPEED
+	 * notificaion again (and again, and again), but it's not a problem
+	 */
+	ncm->notify_state = NCM_NOTIFY_SPEED;
+	ncm_do_notify(ncm);
+}
+
+static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_ncm			*ncm = req->context;
+	struct usb_composite_dev	*cdev = ncm->port.func.config->cdev;
+	struct usb_cdc_notification	*event = req->buf;
+
+	spin_lock(&ncm->lock);
+	switch (req->status) {
+	case 0:
+		VDBG(cdev, "Notification %02x sent\n",
+		     event->bNotificationType);
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		ncm->notify_state = NCM_NOTIFY_NONE;
+		break;
+	default:
+		DBG(cdev, "event %02x --> %d\n",
+			event->bNotificationType, req->status);
+		break;
+	}
+	ncm->notify_req = req;
+	ncm_do_notify(ncm);
+	spin_unlock(&ncm->lock);
+}
+
+static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* now for SET_NTB_INPUT_SIZE only */
+	unsigned		in_size;
+	struct usb_function	*f = req->context;
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = ep->driver_data;
+
+	req->context = NULL;
+	if (req->status || req->actual != req->length) {
+		DBG(cdev, "Bad control-OUT transfer\n");
+		goto invalid;
+	}
+
+	in_size = get_unaligned_le32(req->buf);
+	if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE ||
+	    in_size > le32_to_cpu(ntb_parameters.dwNtbInMaxSize)) {
+		DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size);
+		goto invalid;
+	}
+
+	ncm->port.fixed_in_len = in_size;
+	VDBG(cdev, "Set NTB INPUT SIZE %d\n", in_size);
+	return;
+
+invalid:
+	usb_ep_set_halt(ep);
+	return;
+}
+
+static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/*
+	 * composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/*
+		 * see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (w_length != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		DBG(cdev, "packet filter %02x\n", w_value);
+		/*
+		 * REVISIT locking of cdc_filter.  This assumes the UDC
+		 * driver won't have a concurrent packet TX irq running on
+		 * another CPU; or that if it does, this write is atomic...
+		 */
+		ncm->port.cdc_filter = w_value;
+		value = 0;
+		break;
+	/*
+	 * and optionally:
+	 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+	 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_PARAMETERS:
+
+		if (w_length == 0 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		value = w_length > sizeof ntb_parameters ?
+			sizeof ntb_parameters : w_length;
+		memcpy(req->buf, &ntb_parameters, value);
+		VDBG(cdev, "Host asked NTB parameters\n");
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_INPUT_SIZE:
+
+		if (w_length < 4 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		put_unaligned_le32(ncm->port.fixed_in_len, req->buf);
+		value = 4;
+		VDBG(cdev, "Host asked INPUT SIZE, sending %d\n",
+		     ncm->port.fixed_in_len);
+		break;
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_SET_NTB_INPUT_SIZE:
+	{
+		if (w_length != 4 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		req->complete = ncm_ep0out_complete;
+		req->length = w_length;
+		req->context = f;
+
+		value = req->length;
+		break;
+	}
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_NTB_FORMAT:
+	{
+		uint16_t format;
+
+		if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		format = (ncm->parser_opts == &ndp16_opts) ? 0x0000 : 0x0001;
+		put_unaligned_le16(format, req->buf);
+		value = 2;
+		VDBG(cdev, "Host asked NTB FORMAT, sending %d\n", format);
+		break;
+	}
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_SET_NTB_FORMAT:
+	{
+		if (w_length != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		switch (w_value) {
+		case 0x0000:
+			ncm->parser_opts = &ndp16_opts;
+			DBG(cdev, "NCM16 selected\n");
+			break;
+		case 0x0001:
+			ncm->parser_opts = &ndp32_opts;
+			DBG(cdev, "NCM32 selected\n");
+			break;
+		default:
+			goto invalid;
+		}
+		value = 0;
+		break;
+	}
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_GET_CRC_MODE:
+	{
+		uint16_t is_crc;
+
+		if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		is_crc = ncm->is_crc ? 0x0001 : 0x0000;
+		put_unaligned_le16(is_crc, req->buf);
+		value = 2;
+		VDBG(cdev, "Host asked CRC MODE, sending %d\n", is_crc);
+		break;
+	}
+
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+		| USB_CDC_SET_CRC_MODE:
+	{
+		int ndp_hdr_crc = 0;
+
+		if (w_length != 0 || w_index != ncm->ctrl_id)
+			goto invalid;
+		switch (w_value) {
+		case 0x0000:
+			ncm->is_crc = false;
+			ndp_hdr_crc = NCM_NDP_HDR_NOCRC;
+			DBG(cdev, "non-CRC mode selected\n");
+			break;
+		case 0x0001:
+			ncm->is_crc = true;
+			ndp_hdr_crc = NCM_NDP_HDR_CRC;
+			DBG(cdev, "CRC mode selected\n");
+			break;
+		default:
+			goto invalid;
+		}
+		ncm->parser_opts->ndp_sign &= ~NCM_NDP_HDR_CRC_MASK;
+		ncm->parser_opts->ndp_sign |= ndp_hdr_crc;
+		value = 0;
+		break;
+	}
+
+	/* and disabled in ncm descriptor: */
+	/* case USB_CDC_GET_NET_ADDRESS: */
+	/* case USB_CDC_SET_NET_ADDRESS: */
+	/* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */
+	/* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */
+
+	default:
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "ncm req %02x.%02x response err %d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* Control interface has only altsetting 0 */
+	if (intf == ncm->ctrl_id) {
+		if (alt != 0)
+			goto fail;
+
+		if (ncm->notify->driver_data) {
+			DBG(cdev, "reset ncm control %d\n", intf);
+			usb_ep_disable(ncm->notify);
+		} else {
+			DBG(cdev, "init ncm ctrl %d\n", intf);
+			ncm->notify_desc = ep_choose(cdev->gadget,
+					ncm->hs.notify,
+					ncm->fs.notify);
+		}
+		usb_ep_enable(ncm->notify, ncm->notify_desc);
+		ncm->notify->driver_data = ncm;
+
+	/* Data interface has two altsettings, 0 and 1 */
+	} else if (intf == ncm->data_id) {
+		if (alt > 1)
+			goto fail;
+
+		if (ncm->port.in_ep->driver_data) {
+			DBG(cdev, "reset ncm\n");
+			gether_disconnect(&ncm->port);
+			ncm_reset_values(ncm);
+		}
+
+		/*
+		 * CDC Network only sends data in non-default altsettings.
+		 * Changing altsettings resets filters, statistics, etc.
+		 */
+		if (alt == 1) {
+			struct net_device	*net;
+
+			if (!ncm->port.in) {
+				DBG(cdev, "init ncm\n");
+				ncm->port.in = ep_choose(cdev->gadget,
+							 ncm->hs.in,
+							 ncm->fs.in);
+				ncm->port.out = ep_choose(cdev->gadget,
+							  ncm->hs.out,
+							  ncm->fs.out);
+			}
+
+			/* TODO */
+			/* Enable zlps by default for NCM conformance;
+			 * override for musb_hdrc (avoids txdma ovhead)
+			 */
+			ncm->port.is_zlp_ok = !(
+				gadget_is_musbhdrc(cdev->gadget)
+				);
+			ncm->port.cdc_filter = DEFAULT_FILTER;
+			DBG(cdev, "activate ncm\n");
+			net = gether_connect(&ncm->port);
+			if (IS_ERR(net))
+				return PTR_ERR(net);
+		}
+
+		spin_lock(&ncm->lock);
+		ncm_notify(ncm);
+		spin_unlock(&ncm->lock);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+/*
+ * Because the data interface supports multiple altsettings,
+ * this NCM function *MUST* implement a get_alt() method.
+ */
+static int ncm_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+
+	if (intf == ncm->ctrl_id)
+		return 0;
+	return ncm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static struct sk_buff *ncm_wrap_ntb(struct gether *port,
+				    struct sk_buff *skb)
+{
+	struct f_ncm	*ncm = func_to_ncm(&port->func);
+	struct sk_buff	*skb2;
+	int		ncb_len = 0;
+	__le16		*tmp;
+	int		div = ntb_parameters.wNdpInDivisor;
+	int		rem = ntb_parameters.wNdpInPayloadRemainder;
+	int		pad;
+	int		ndp_align = ntb_parameters.wNdpInAlignment;
+	int		ndp_pad;
+	unsigned	max_size = ncm->port.fixed_in_len;
+	struct ndp_parser_opts *opts = ncm->parser_opts;
+	unsigned	crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
+
+	ncb_len += opts->nth_size;
+	ndp_pad = ALIGN(ncb_len, ndp_align) - ncb_len;
+	ncb_len += ndp_pad;
+	ncb_len += opts->ndp_size;
+	ncb_len += 2 * 2 * opts->dgram_item_len; /* Datagram entry */
+	ncb_len += 2 * 2 * opts->dgram_item_len; /* Zero datagram entry */
+	pad = ALIGN(ncb_len, div) + rem - ncb_len;
+	ncb_len += pad;
+
+	if (ncb_len + skb->len + crc_len > max_size) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+
+	skb2 = skb_copy_expand(skb, ncb_len,
+			       max_size - skb->len - ncb_len - crc_len,
+			       GFP_ATOMIC);
+	dev_kfree_skb_any(skb);
+	if (!skb2)
+		return NULL;
+
+	skb = skb2;
+
+	tmp = (void *) skb_push(skb, ncb_len);
+	memset(tmp, 0, ncb_len);
+
+	put_unaligned_le32(opts->nth_sign, tmp); /* dwSignature */
+	tmp += 2;
+	/* wHeaderLength */
+	put_unaligned_le16(opts->nth_size, tmp++);
+	tmp++; /* skip wSequence */
+	put_ncm(&tmp, opts->block_length, skb->len); /* (d)wBlockLength */
+	/* (d)wFpIndex */
+	/* the first pointer is right after the NTH + align */
+	put_ncm(&tmp, opts->fp_index, opts->nth_size + ndp_pad);
+
+	tmp = (void *)tmp + ndp_pad;
+
+	/* NDP */
+	put_unaligned_le32(opts->ndp_sign, tmp); /* dwSignature */
+	tmp += 2;
+	/* wLength */
+	put_unaligned_le16(ncb_len - opts->nth_size - pad, tmp++);
+
+	tmp += opts->reserved1;
+	tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */
+	tmp += opts->reserved2;
+
+	if (ncm->is_crc) {
+		uint32_t crc;
+
+		crc = ~crc32_le(~0,
+				skb->data + ncb_len,
+				skb->len - ncb_len);
+		put_unaligned_le32(crc, skb->data + skb->len);
+		skb_put(skb, crc_len);
+	}
+
+	/* (d)wDatagramIndex[0] */
+	put_ncm(&tmp, opts->dgram_item_len, ncb_len);
+	/* (d)wDatagramLength[0] */
+	put_ncm(&tmp, opts->dgram_item_len, skb->len - ncb_len);
+	/* (d)wDatagramIndex[1] and  (d)wDatagramLength[1] already zeroed */
+
+	if (skb->len > MAX_TX_NONFIXED)
+		memset(skb_put(skb, max_size - skb->len),
+		       0, max_size - skb->len);
+
+	return skb;
+}
+
+static int ncm_unwrap_ntb(struct gether *port,
+			  struct sk_buff *skb,
+			  struct sk_buff_head *list)
+{
+	struct f_ncm	*ncm = func_to_ncm(&port->func);
+	__le16		*tmp = (void *) skb->data;
+	unsigned	index, index2;
+	unsigned	dg_len, dg_len2;
+	unsigned	ndp_len;
+	struct sk_buff	*skb2;
+	int		ret = -EINVAL;
+	unsigned	max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
+	struct ndp_parser_opts *opts = ncm->parser_opts;
+	unsigned	crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
+	int		dgram_counter;
+
+	/* dwSignature */
+	if (get_unaligned_le32(tmp) != opts->nth_sign) {
+		INFO(port->func.config->cdev, "Wrong NTH SIGN, skblen %d\n",
+			skb->len);
+		print_hex_dump(KERN_INFO, "HEAD:", DUMP_PREFIX_ADDRESS, 32, 1,
+			       skb->data, 32, false);
+
+		goto err;
+	}
+	tmp += 2;
+	/* wHeaderLength */
+	if (get_unaligned_le16(tmp++) != opts->nth_size) {
+		INFO(port->func.config->cdev, "Wrong NTB headersize\n");
+		goto err;
+	}
+	tmp++; /* skip wSequence */
+
+	/* (d)wBlockLength */
+	if (get_ncm(&tmp, opts->block_length) > max_size) {
+		INFO(port->func.config->cdev, "OUT size exceeded\n");
+		goto err;
+	}
+
+	index = get_ncm(&tmp, opts->fp_index);
+	/* NCM 3.2 */
+	if (((index % 4) != 0) && (index < opts->nth_size)) {
+		INFO(port->func.config->cdev, "Bad index: %x\n",
+			index);
+		goto err;
+	}
+
+	/* walk through NDP */
+	tmp = ((void *)skb->data) + index;
+	if (get_unaligned_le32(tmp) != opts->ndp_sign) {
+		INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
+		goto err;
+	}
+	tmp += 2;
+
+	ndp_len = get_unaligned_le16(tmp++);
+	/*
+	 * NCM 3.3.1
+	 * entry is 2 items
+	 * item size is 16/32 bits, opts->dgram_item_len * 2 bytes
+	 * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
+	 */
+	if ((ndp_len < opts->ndp_size + 2 * 2 * (opts->dgram_item_len * 2))
+	    || (ndp_len % opts->ndplen_align != 0)) {
+		INFO(port->func.config->cdev, "Bad NDP length: %x\n", ndp_len);
+		goto err;
+	}
+	tmp += opts->reserved1;
+	tmp += opts->next_fp_index; /* skip reserved (d)wNextFpIndex */
+	tmp += opts->reserved2;
+
+	ndp_len -= opts->ndp_size;
+	index2 = get_ncm(&tmp, opts->dgram_item_len);
+	dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+	dgram_counter = 0;
+
+	do {
+		index = index2;
+		dg_len = dg_len2;
+		if (dg_len < 14 + crc_len) { /* ethernet header + crc */
+			INFO(port->func.config->cdev, "Bad dgram length: %x\n",
+			     dg_len);
+			goto err;
+		}
+		if (ncm->is_crc) {
+			uint32_t crc, crc2;
+
+			crc = get_unaligned_le32(skb->data +
+						 index + dg_len - crc_len);
+			crc2 = ~crc32_le(~0,
+					 skb->data + index,
+					 dg_len - crc_len);
+			if (crc != crc2) {
+				INFO(port->func.config->cdev, "Bad CRC\n");
+				goto err;
+			}
+		}
+
+		index2 = get_ncm(&tmp, opts->dgram_item_len);
+		dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+
+		if (index2 == 0 || dg_len2 == 0) {
+			skb2 = skb;
+		} else {
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2 == NULL)
+				goto err;
+		}
+
+		if (!skb_pull(skb2, index)) {
+			ret = -EOVERFLOW;
+			goto err;
+		}
+
+		skb_trim(skb2, dg_len - crc_len);
+		skb_queue_tail(list, skb2);
+
+		ndp_len -= 2 * (opts->dgram_item_len * 2);
+
+		dgram_counter++;
+
+		if (index2 == 0 || dg_len2 == 0)
+			break;
+	} while (ndp_len > 2 * (opts->dgram_item_len * 2)); /* zero entry */
+
+	VDBG(port->func.config->cdev,
+	     "Parsed NTB with %d frames\n", dgram_counter);
+	return 0;
+err:
+	skb_queue_purge(list);
+	dev_kfree_skb_any(skb);
+	return ret;
+}
+
+static void ncm_disable(struct usb_function *f)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "ncm deactivated\n");
+
+	if (ncm->port.in_ep->driver_data)
+		gether_disconnect(&ncm->port);
+
+	if (ncm->notify->driver_data) {
+		usb_ep_disable(ncm->notify);
+		ncm->notify->driver_data = NULL;
+		ncm->notify_desc = NULL;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ *   - disconnected/unconfigured
+ *   - configured but inactive (data alt 0)
+ *   - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting).  Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ncm_open(struct gether *geth)
+{
+	struct f_ncm		*ncm = func_to_ncm(&geth->func);
+
+	DBG(ncm->port.func.config->cdev, "%s\n", __func__);
+
+	spin_lock(&ncm->lock);
+	ncm->is_open = true;
+	ncm_notify(ncm);
+	spin_unlock(&ncm->lock);
+}
+
+static void ncm_close(struct gether *geth)
+{
+	struct f_ncm		*ncm = func_to_ncm(&geth->func);
+
+	DBG(ncm->port.func.config->cdev, "%s\n", __func__);
+
+	spin_lock(&ncm->lock);
+	ncm->is_open = false;
+	ncm_notify(ncm);
+	spin_unlock(&ncm->lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int __init
+ncm_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_ncm		*ncm = func_to_ncm(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ncm->ctrl_id = status;
+	ncm_iad_desc.bFirstInterface = status;
+
+	ncm_control_intf.bInterfaceNumber = status;
+	ncm_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ncm->data_id = status;
+
+	ncm_data_nop_intf.bInterfaceNumber = status;
+	ncm_data_intf.bInterfaceNumber = status;
+	ncm_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
+	if (!ep)
+		goto fail;
+	ncm->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
+	if (!ep)
+		goto fail;
+	ncm->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
+	if (!ep)
+		goto fail;
+	ncm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!ncm->notify_req)
+		goto fail;
+	ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!ncm->notify_req->buf)
+		goto fail;
+	ncm->notify_req->context = ncm;
+	ncm->notify_req->complete = ncm_notify_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(ncm_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	ncm->fs.in = usb_find_endpoint(ncm_fs_function,
+			f->descriptors, &fs_ncm_in_desc);
+	ncm->fs.out = usb_find_endpoint(ncm_fs_function,
+			f->descriptors, &fs_ncm_out_desc);
+	ncm->fs.notify = usb_find_endpoint(ncm_fs_function,
+			f->descriptors, &fs_ncm_notify_desc);
+
+	/*
+	 * support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_ncm_in_desc.bEndpointAddress =
+				fs_ncm_in_desc.bEndpointAddress;
+		hs_ncm_out_desc.bEndpointAddress =
+				fs_ncm_out_desc.bEndpointAddress;
+		hs_ncm_notify_desc.bEndpointAddress =
+				fs_ncm_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(ncm_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+
+		ncm->hs.in = usb_find_endpoint(ncm_hs_function,
+				f->hs_descriptors, &hs_ncm_in_desc);
+		ncm->hs.out = usb_find_endpoint(ncm_hs_function,
+				f->hs_descriptors, &hs_ncm_out_desc);
+		ncm->hs.notify = usb_find_endpoint(ncm_hs_function,
+				f->hs_descriptors, &hs_ncm_notify_desc);
+	}
+
+	/*
+	 * NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	ncm->port.open = ncm_open;
+	ncm->port.close = ncm_close;
+
+	DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			ncm->port.in_ep->name, ncm->port.out_ep->name,
+			ncm->notify->name);
+	return 0;
+
+fail:
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+
+	if (ncm->notify_req) {
+		kfree(ncm->notify_req->buf);
+		usb_ep_free_request(ncm->notify, ncm->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (ncm->notify)
+		ncm->notify->driver_data = NULL;
+	if (ncm->port.out)
+		ncm->port.out_ep->driver_data = NULL;
+	if (ncm->port.in)
+		ncm->port.in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+ncm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_ncm		*ncm = func_to_ncm(f);
+
+	DBG(c->cdev, "ncm unbind\n");
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(ncm->notify_req->buf);
+	usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+	ncm_string_defs[1].s = NULL;
+	kfree(ncm);
+}
+
+/**
+ * ncm_bind_config - add CDC Network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int __init ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	struct f_ncm	*ncm;
+	int		status;
+
+	if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+		return -EINVAL;
+
+	/* maybe allocate device-global string IDs */
+	if (ncm_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_CTRL_IDX].id = status;
+		ncm_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_DATA_IDX].id = status;
+		ncm_data_nop_intf.iInterface = status;
+		ncm_data_intf.iInterface = status;
+
+		/* MAC address */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_MAC_IDX].id = status;
+		ecm_desc.iMACAddress = status;
+
+		/* IAD */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ncm_string_defs[STRING_IAD_IDX].id = status;
+		ncm_iad_desc.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	ncm = kzalloc(sizeof *ncm, GFP_KERNEL);
+	if (!ncm)
+		return -ENOMEM;
+
+	/* export host's Ethernet address in CDC format */
+	snprintf(ncm->ethaddr, sizeof ncm->ethaddr,
+		"%02X%02X%02X%02X%02X%02X",
+		ethaddr[0], ethaddr[1], ethaddr[2],
+		ethaddr[3], ethaddr[4], ethaddr[5]);
+	ncm_string_defs[1].s = ncm->ethaddr;
+
+	spin_lock_init(&ncm->lock);
+	ncm_reset_values(ncm);
+	ncm->port.is_fixed = true;
+
+	ncm->port.func.name = "cdc_network";
+	ncm->port.func.strings = ncm_strings;
+	/* descriptors are per-instance copies */
+	ncm->port.func.bind = ncm_bind;
+	ncm->port.func.unbind = ncm_unbind;
+	ncm->port.func.set_alt = ncm_set_alt;
+	ncm->port.func.get_alt = ncm_get_alt;
+	ncm->port.func.setup = ncm_setup;
+	ncm->port.func.disable = ncm_disable;
+
+	ncm->port.wrap = ncm_wrap_ntb;
+	ncm->port.unwrap = ncm_unwrap_ntb;
+
+	status = usb_add_function(c, &ncm->port.func);
+	if (status) {
+		ncm_string_defs[1].s = NULL;
+		kfree(ncm);
+	}
+	return status;
+}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index d4fdf65..a6eacb5 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3392,25 +3392,28 @@
 		dev_set_name(&curlun->dev,"%s-lun%d",
 			     dev_name(&gadget->dev), i);
 
-		if ((rc = device_register(&curlun->dev)) != 0) {
+		kref_get(&fsg->ref);
+		rc = device_register(&curlun->dev);
+		if (rc) {
 			INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
-			goto out;
-		}
-		if ((rc = device_create_file(&curlun->dev,
-					&dev_attr_ro)) != 0 ||
-				(rc = device_create_file(&curlun->dev,
-					&dev_attr_nofua)) != 0 ||
-				(rc = device_create_file(&curlun->dev,
-					&dev_attr_file)) != 0) {
-			device_unregister(&curlun->dev);
+			put_device(&curlun->dev);
 			goto out;
 		}
 		curlun->registered = 1;
-		kref_get(&fsg->ref);
+
+		rc = device_create_file(&curlun->dev, &dev_attr_ro);
+		if (rc)
+			goto out;
+		rc = device_create_file(&curlun->dev, &dev_attr_nofua);
+		if (rc)
+			goto out;
+		rc = device_create_file(&curlun->dev, &dev_attr_file);
+		if (rc)
+			goto out;
 
 		if (mod_data.file[i] && *mod_data.file[i]) {
-			if ((rc = fsg_lun_open(curlun,
-					mod_data.file[i])) != 0)
+			rc = fsg_lun_open(curlun, mod_data.file[i]);
+			if (rc)
 				goto out;
 		} else if (!mod_data.removable) {
 			ERROR(fsg, "no file given for LUN%d\n", i);
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index af75e36..ebf6970 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -1,7 +1,29 @@
+/*
+ * g_ffs.c -- user mode file system API for USB composite function controllers
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#define pr_fmt(fmt) "g_ffs: " fmt
+
 #include <linux/module.h>
 #include <linux/utsname.h>
 
-
 /*
  * kbuild is not very cooperative with respect to linking separately
  * compiled library objects into one module.  So for now we won't use
@@ -43,7 +65,6 @@
 
 #include "f_fs.c"
 
-
 #define DRIVER_NAME	"g_ffs"
 #define DRIVER_DESC	"USB Function Filesystem"
 #define DRIVER_VERSION	"24 Aug 2004"
@@ -73,8 +94,6 @@
 module_param_named(bDeviceProtocol, gfs_dev_desc.bDeviceProtocol, byte,   0644);
 MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol");
 
-
-
 static const struct usb_descriptor_header *gfs_otg_desc[] = {
 	(const struct usb_descriptor_header *)
 	&(const struct usb_otg_descriptor) {
@@ -91,8 +110,7 @@
 	NULL
 };
 
-/* string IDs are assigned dynamically */
-
+/* String IDs are assigned dynamically */
 static struct usb_string gfs_strings[] = {
 #ifdef CONFIG_USB_FUNCTIONFS_RNDIS
 	{ .s = "FunctionFS + RNDIS" },
@@ -114,8 +132,6 @@
 	NULL,
 };
 
-
-
 struct gfs_configuration {
 	struct usb_configuration c;
 	int (*eth)(struct usb_configuration *c, u8 *ethaddr);
@@ -138,7 +154,6 @@
 #endif
 };
 
-
 static int gfs_bind(struct usb_composite_dev *cdev);
 static int gfs_unbind(struct usb_composite_dev *cdev);
 static int gfs_do_config(struct usb_configuration *c);
@@ -151,11 +166,9 @@
 	.iProduct	= DRIVER_DESC,
 };
 
-
 static struct ffs_data *gfs_ffs_data;
 static unsigned long gfs_registered;
 
-
 static int  gfs_init(void)
 {
 	ENTER();
@@ -175,7 +188,6 @@
 }
 module_exit(gfs_exit);
 
-
 static int functionfs_ready_callback(struct ffs_data *ffs)
 {
 	int ret;
@@ -200,14 +212,11 @@
 		usb_composite_unregister(&gfs_driver);
 }
 
-
 static int functionfs_check_dev_callback(const char *dev_name)
 {
 	return 0;
 }
 
-
-
 static int gfs_bind(struct usb_composite_dev *cdev)
 {
 	int ret, i;
@@ -274,7 +283,6 @@
 	return 0;
 }
 
-
 static int gfs_do_config(struct usb_configuration *c)
 {
 	struct gfs_configuration *gc =
@@ -315,7 +323,6 @@
 	return 0;
 }
 
-
 #ifdef CONFIG_USB_FUNCTIONFS_ETH
 
 static int eth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index e511fec..5c2720d 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -96,7 +96,7 @@
 
 /* Mentor high speed "dual role" controller, in peripheral role */
 #ifdef CONFIG_USB_GADGET_MUSB_HDRC
-#define gadget_is_musbhdrc(g)	!strcmp("musb_hdrc", (g)->name)
+#define gadget_is_musbhdrc(g)	!strcmp("musb-hdrc", (g)->name)
 #else
 #define gadget_is_musbhdrc(g)	0
 #endif
@@ -120,10 +120,10 @@
 #define gadget_is_fsl_qe(g)	0
 #endif
 
-#ifdef CONFIG_USB_GADGET_CI13XXX
-#define gadget_is_ci13xxx(g)	(!strcmp("ci13xxx_udc", (g)->name))
+#ifdef CONFIG_USB_GADGET_CI13XXX_PCI
+#define gadget_is_ci13xxx_pci(g)	(!strcmp("ci13xxx_pci", (g)->name))
 #else
-#define gadget_is_ci13xxx(g)	0
+#define gadget_is_ci13xxx_pci(g)	0
 #endif
 
 // CONFIG_USB_GADGET_SX2
@@ -142,6 +142,17 @@
 #define gadget_is_s3c_hsotg(g)    0
 #endif
 
+#ifdef CONFIG_USB_GADGET_EG20T
+#define	gadget_is_pch(g)	(!strcmp("pch_udc", (g)->name))
+#else
+#define	gadget_is_pch(g)	0
+#endif
+
+#ifdef CONFIG_USB_GADGET_CI13XXX_MSM
+#define gadget_is_ci13xxx_msm(g)	(!strcmp("ci13xxx_msm", (g)->name))
+#else
+#define gadget_is_ci13xxx_msm(g)	0
+#endif
 
 /**
  * usb_gadget_controller_number - support bcdDevice id convention
@@ -192,7 +203,7 @@
 		return 0x21;
 	else if (gadget_is_fsl_qe(gadget))
 		return 0x22;
-	else if (gadget_is_ci13xxx(gadget))
+	else if (gadget_is_ci13xxx_pci(gadget))
 		return 0x23;
 	else if (gadget_is_langwell(gadget))
 		return 0x24;
@@ -200,6 +211,10 @@
 		return 0x25;
 	else if (gadget_is_s3c_hsotg(gadget))
 		return 0x26;
+	else if (gadget_is_pch(gadget))
+		return 0x27;
+	else if (gadget_is_ci13xxx_msm(gadget))
+		return 0x28;
 	return -ENOENT;
 }
 
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index ed02664..5408186 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1191,13 +1191,17 @@
 	return IRQ_HANDLED;
 }
 
+#ifndef MX1_INT_USBD0
+#define MX1_INT_USBD0 MX1_USBD_INT0
+#endif
+
 static irqreturn_t imx_udc_bulk_irq(int irq, void *dev)
 {
 	struct imx_udc_struct *imx_usb = dev;
-	struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - USBD_INT0];
+	struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - MX1_INT_USBD0];
 	int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
 
-	dump_ep_intr(__func__, irq - USBD_INT0, intr, imx_usb->dev);
+	dump_ep_intr(__func__, irq - MX1_INT_USBD0, intr, imx_usb->dev);
 
 	if (!imx_usb->driver) {
 		__raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
@@ -1316,7 +1320,7 @@
 };
 
 /*******************************************************************************
- * USB gadged driver functions
+ * USB gadget driver functions
  *******************************************************************************
  */
 int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
diff --git a/drivers/usb/gadget/imx_udc.h b/drivers/usb/gadget/imx_udc.h
index b48ad59..7136c24 100644
--- a/drivers/usb/gadget/imx_udc.h
+++ b/drivers/usb/gadget/imx_udc.h
@@ -23,9 +23,6 @@
 /* Helper macros */
 #define EP_NO(ep)	((ep->bEndpointAddress) & ~USB_DIR_IN) /* IN:1, OUT:0 */
 #define EP_DIR(ep)	((ep->bEndpointAddress) & USB_DIR_IN ? 1 : 0)
-#define irq_to_ep(irq)	(((irq) >= USBD_INT0) || ((irq) <= USBD_INT6) \
-		? ((irq) - USBD_INT0) : (USBD_INT6)) /*should not happen*/
-#define ep_to_irq(ep)	(EP_NO((ep)) + USBD_INT0)
 #define IMX_USB_NB_EP	6
 
 /* Driver structures */
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index b8ec954..1eca8b4 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -2225,6 +2225,7 @@
 	u16	wValue = le16_to_cpu(setup->wValue);
 	u16	wIndex = le16_to_cpu(setup->wIndex);
 	u16	wLength = le16_to_cpu(setup->wLength);
+	u32	portsc1;
 
 	dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
 
@@ -2313,6 +2314,28 @@
 					dev->dev_status &= ~(1 << wValue);
 				}
 				break;
+			case USB_DEVICE_TEST_MODE:
+				dev_dbg(&dev->pdev->dev, "SETUP: TEST MODE\n");
+				if ((wIndex & 0xff) ||
+					(dev->gadget.speed != USB_SPEED_HIGH))
+					ep0_stall(dev);
+
+				switch (wIndex >> 8) {
+				case TEST_J:
+				case TEST_K:
+				case TEST_SE0_NAK:
+				case TEST_PACKET:
+				case TEST_FORCE_EN:
+					if (prime_status_phase(dev, EP_DIR_IN))
+						ep0_stall(dev);
+					portsc1 = readl(&dev->op_regs->portsc1);
+					portsc1 |= (wIndex & 0xf00) << 8;
+					writel(portsc1, &dev->op_regs->portsc1);
+					goto end;
+				default:
+					rc = -EOPNOTSUPP;
+				}
+				break;
 			default:
 				rc = -EOPNOTSUPP;
 				break;
@@ -3063,7 +3086,7 @@
 
 	kfree(dev->ep);
 
-	/* diable IRQ handler */
+	/* disable IRQ handler */
 	if (dev->got_irq)
 		free_irq(pdev->irq, dev);
 
@@ -3383,7 +3406,7 @@
 	/* disable interrupt and set controller to stop state */
 	langwell_udc_stop(dev);
 
-	/* diable IRQ handler */
+	/* disable IRQ handler */
 	if (dev->got_irq)
 		free_irq(pdev->irq, dev);
 	dev->got_irq = 0;
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 0769179..0182242 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -102,7 +102,7 @@
 };
 FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
 
-static unsigned long msg_registered = 0;
+static unsigned long msg_registered;
 static void msg_cleanup(void);
 
 static int msg_thread_exits(struct fsg_common *common)
diff --git a/drivers/usb/gadget/mv_udc.h b/drivers/usb/gadget/mv_udc.h
new file mode 100644
index 0000000..65f1f7c
--- /dev/null
+++ b/drivers/usb/gadget/mv_udc.h
@@ -0,0 +1,294 @@
+
+#ifndef __MV_UDC_H
+#define __MV_UDC_H
+
+#define VUSBHS_MAX_PORTS	8
+
+#define DQH_ALIGNMENT		2048
+#define DTD_ALIGNMENT		64
+#define DMA_BOUNDARY		4096
+
+#define EP_DIR_IN	1
+#define EP_DIR_OUT	0
+
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#define EP0_MAX_PKT_SIZE	64
+/* ep0 transfer state */
+#define WAIT_FOR_SETUP		0
+#define DATA_STATE_XMIT		1
+#define DATA_STATE_NEED_ZLP	2
+#define WAIT_FOR_OUT_STATUS	3
+#define DATA_STATE_RECV		4
+
+#define CAPLENGTH_MASK		(0xff)
+#define DCCPARAMS_DEN_MASK	(0x1f)
+
+#define HCSPARAMS_PPC		(0x10)
+
+/* Frame Index Register Bit Masks */
+#define USB_FRINDEX_MASKS	0x3fff
+
+/* Command Register Bit Masks */
+#define USBCMD_RUN_STOP				(0x00000001)
+#define USBCMD_CTRL_RESET			(0x00000002)
+#define USBCMD_SETUP_TRIPWIRE_SET		(0x00002000)
+#define USBCMD_SETUP_TRIPWIRE_CLEAR		(~USBCMD_SETUP_TRIPWIRE_SET)
+
+#define USBCMD_ATDTW_TRIPWIRE_SET		(0x00004000)
+#define USBCMD_ATDTW_TRIPWIRE_CLEAR		(~USBCMD_ATDTW_TRIPWIRE_SET)
+
+/* bit 15,3,2 are for frame list size */
+#define USBCMD_FRAME_SIZE_1024			(0x00000000) /* 000 */
+#define USBCMD_FRAME_SIZE_512			(0x00000004) /* 001 */
+#define USBCMD_FRAME_SIZE_256			(0x00000008) /* 010 */
+#define USBCMD_FRAME_SIZE_128			(0x0000000C) /* 011 */
+#define USBCMD_FRAME_SIZE_64			(0x00008000) /* 100 */
+#define USBCMD_FRAME_SIZE_32			(0x00008004) /* 101 */
+#define USBCMD_FRAME_SIZE_16			(0x00008008) /* 110 */
+#define USBCMD_FRAME_SIZE_8			(0x0000800C) /* 111 */
+
+#define EPCTRL_TX_ALL_MASK			(0xFFFF0000)
+#define EPCTRL_RX_ALL_MASK			(0x0000FFFF)
+
+#define EPCTRL_TX_DATA_TOGGLE_RST		(0x00400000)
+#define EPCTRL_TX_EP_STALL			(0x00010000)
+#define EPCTRL_RX_EP_STALL			(0x00000001)
+#define EPCTRL_RX_DATA_TOGGLE_RST		(0x00000040)
+#define EPCTRL_RX_ENABLE			(0x00000080)
+#define EPCTRL_TX_ENABLE			(0x00800000)
+#define EPCTRL_CONTROL				(0x00000000)
+#define EPCTRL_ISOCHRONOUS			(0x00040000)
+#define EPCTRL_BULK				(0x00080000)
+#define EPCTRL_INT				(0x000C0000)
+#define EPCTRL_TX_TYPE				(0x000C0000)
+#define EPCTRL_RX_TYPE				(0x0000000C)
+#define EPCTRL_DATA_TOGGLE_INHIBIT		(0x00000020)
+#define EPCTRL_TX_EP_TYPE_SHIFT			(18)
+#define EPCTRL_RX_EP_TYPE_SHIFT			(2)
+
+#define EPCOMPLETE_MAX_ENDPOINTS		(16)
+
+/* endpoint list address bit masks */
+#define USB_EP_LIST_ADDRESS_MASK              0xfffff800
+
+#define PORTSCX_W1C_BITS			0x2a
+#define PORTSCX_PORT_RESET			0x00000100
+#define PORTSCX_PORT_POWER			0x00001000
+#define PORTSCX_FORCE_FULL_SPEED_CONNECT	0x01000000
+#define PORTSCX_PAR_XCVR_SELECT			0xC0000000
+#define PORTSCX_PORT_FORCE_RESUME		0x00000040
+#define PORTSCX_PORT_SUSPEND			0x00000080
+#define PORTSCX_PORT_SPEED_FULL			0x00000000
+#define PORTSCX_PORT_SPEED_LOW			0x04000000
+#define PORTSCX_PORT_SPEED_HIGH			0x08000000
+#define PORTSCX_PORT_SPEED_MASK			0x0C000000
+
+/* USB MODE Register Bit Masks */
+#define USBMODE_CTRL_MODE_IDLE			0x00000000
+#define USBMODE_CTRL_MODE_DEVICE		0x00000002
+#define USBMODE_CTRL_MODE_HOST			0x00000003
+#define USBMODE_CTRL_MODE_RSV			0x00000001
+#define USBMODE_SETUP_LOCK_OFF			0x00000008
+#define USBMODE_STREAM_DISABLE			0x00000010
+
+/* USB STS Register Bit Masks */
+#define USBSTS_INT			0x00000001
+#define USBSTS_ERR			0x00000002
+#define USBSTS_PORT_CHANGE		0x00000004
+#define USBSTS_FRM_LST_ROLL		0x00000008
+#define USBSTS_SYS_ERR			0x00000010
+#define USBSTS_IAA			0x00000020
+#define USBSTS_RESET			0x00000040
+#define USBSTS_SOF			0x00000080
+#define USBSTS_SUSPEND			0x00000100
+#define USBSTS_HC_HALTED		0x00001000
+#define USBSTS_RCL			0x00002000
+#define USBSTS_PERIODIC_SCHEDULE	0x00004000
+#define USBSTS_ASYNC_SCHEDULE		0x00008000
+
+
+/* Interrupt Enable Register Bit Masks */
+#define USBINTR_INT_EN                          (0x00000001)
+#define USBINTR_ERR_INT_EN                      (0x00000002)
+#define USBINTR_PORT_CHANGE_DETECT_EN           (0x00000004)
+
+#define USBINTR_ASYNC_ADV_AAE                   (0x00000020)
+#define USBINTR_ASYNC_ADV_AAE_ENABLE            (0x00000020)
+#define USBINTR_ASYNC_ADV_AAE_DISABLE           (0xFFFFFFDF)
+
+#define USBINTR_RESET_EN                        (0x00000040)
+#define USBINTR_SOF_UFRAME_EN                   (0x00000080)
+#define USBINTR_DEVICE_SUSPEND                  (0x00000100)
+
+#define USB_DEVICE_ADDRESS_MASK			(0xfe000000)
+#define USB_DEVICE_ADDRESS_BIT_SHIFT		(25)
+
+struct mv_cap_regs {
+	u32	caplength_hciversion;
+	u32	hcsparams;	/* HC structural parameters */
+	u32	hccparams;	/* HC Capability Parameters*/
+	u32	reserved[5];
+	u32	dciversion;	/* DC version number and reserved 16 bits */
+	u32	dccparams;	/* DC Capability Parameters */
+};
+
+struct mv_op_regs {
+	u32	usbcmd;		/* Command register */
+	u32	usbsts;		/* Status register */
+	u32	usbintr;	/* Interrupt enable */
+	u32	frindex;	/* Frame index */
+	u32	reserved1[1];
+	u32	deviceaddr;	/* Device Address */
+	u32	eplistaddr;	/* Endpoint List Address */
+	u32	ttctrl;		/* HOST TT status and control */
+	u32	burstsize;	/* Programmable Burst Size */
+	u32	txfilltuning;	/* Host Transmit Pre-Buffer Packet Tuning */
+	u32	reserved[4];
+	u32	epnak;		/* Endpoint NAK */
+	u32	epnaken;	/* Endpoint NAK Enable */
+	u32	configflag;	/* Configured Flag register */
+	u32	portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
+	u32	otgsc;
+	u32	usbmode;	/* USB Host/Device mode */
+	u32	epsetupstat;	/* Endpoint Setup Status */
+	u32	epprime;	/* Endpoint Initialize */
+	u32	epflush;	/* Endpoint De-initialize */
+	u32	epstatus;	/* Endpoint Status */
+	u32	epcomplete;	/* Endpoint Interrupt On Complete */
+	u32	epctrlx[16];	/* Endpoint Control, where x = 0.. 15 */
+	u32	mcr;		/* Mux Control */
+	u32	isr;		/* Interrupt Status */
+	u32	ier;		/* Interrupt Enable */
+};
+
+struct mv_udc {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	spinlock_t			lock;
+	struct completion		*done;
+	struct platform_device		*dev;
+	int				irq;
+
+	struct mv_cap_regs __iomem	*cap_regs;
+	struct mv_op_regs __iomem	*op_regs;
+	unsigned int			phy_regs;
+	unsigned int			max_eps;
+	struct mv_dqh			*ep_dqh;
+	size_t				ep_dqh_size;
+	dma_addr_t			ep_dqh_dma;
+
+	struct dma_pool			*dtd_pool;
+	struct mv_ep			*eps;
+
+	struct mv_dtd			*dtd_head;
+	struct mv_dtd			*dtd_tail;
+	unsigned int			dtd_entries;
+
+	struct mv_req			*status_req;
+	struct usb_ctrlrequest		local_setup_buff;
+
+	unsigned int		resume_state;	/* USB state to resume */
+	unsigned int		usb_state;	/* USB current state */
+	unsigned int		ep0_state;	/* Endpoint zero state */
+	unsigned int		ep0_dir;
+
+	unsigned int		dev_addr;
+
+	int			errors;
+	unsigned		softconnect:1,
+				vbus_active:1,
+				remote_wakeup:1,
+				softconnected:1,
+				force_fs:1;
+	struct clk		*clk;
+};
+
+/* endpoint data structure */
+struct mv_ep {
+	struct usb_ep		ep;
+	struct mv_udc		*udc;
+	struct list_head	queue;
+	struct mv_dqh		*dqh;
+	const struct usb_endpoint_descriptor	*desc;
+	u32			direction;
+	char			name[14];
+	unsigned		stopped:1,
+				wedge:1,
+				ep_type:2,
+				ep_num:8;
+};
+
+/* request data structure */
+struct mv_req {
+	struct usb_request	req;
+	struct mv_dtd		*dtd, *head, *tail;
+	struct mv_ep		*ep;
+	struct list_head	queue;
+	unsigned		dtd_count;
+	unsigned		mapped:1;
+};
+
+#define EP_QUEUE_HEAD_MULT_POS			30
+#define EP_QUEUE_HEAD_ZLT_SEL			0x20000000
+#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS		16
+#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info)	(((ep_info)>>16)&0x07ff)
+#define EP_QUEUE_HEAD_IOS			0x00008000
+#define EP_QUEUE_HEAD_NEXT_TERMINATE		0x00000001
+#define EP_QUEUE_HEAD_IOC			0x00008000
+#define EP_QUEUE_HEAD_MULTO			0x00000C00
+#define EP_QUEUE_HEAD_STATUS_HALT		0x00000040
+#define EP_QUEUE_HEAD_STATUS_ACTIVE		0x00000080
+#define EP_QUEUE_CURRENT_OFFSET_MASK		0x00000FFF
+#define EP_QUEUE_HEAD_NEXT_POINTER_MASK		0xFFFFFFE0
+#define EP_QUEUE_FRINDEX_MASK			0x000007FF
+#define EP_MAX_LENGTH_TRANSFER			0x4000
+
+struct mv_dqh {
+	/* Bits 16..26 Bit 15 is Interrupt On Setup */
+	u32	max_packet_length;
+	u32	curr_dtd_ptr;		/* Current dTD Pointer */
+	u32	next_dtd_ptr;		/* Next dTD Pointer */
+	/* Total bytes (16..30), IOC (15), INT (8), STS (0-7) */
+	u32	size_ioc_int_sts;
+	u32	buff_ptr0;		/* Buffer pointer Page 0 (12-31) */
+	u32	buff_ptr1;		/* Buffer pointer Page 1 (12-31) */
+	u32	buff_ptr2;		/* Buffer pointer Page 2 (12-31) */
+	u32	buff_ptr3;		/* Buffer pointer Page 3 (12-31) */
+	u32	buff_ptr4;		/* Buffer pointer Page 4 (12-31) */
+	u32	reserved1;
+	/* 8 bytes of setup data that follows the Setup PID */
+	u8	setup_buffer[8];
+	u32	reserved2[4];
+};
+
+
+#define DTD_NEXT_TERMINATE		(0x00000001)
+#define DTD_IOC				(0x00008000)
+#define DTD_STATUS_ACTIVE		(0x00000080)
+#define DTD_STATUS_HALTED		(0x00000040)
+#define DTD_STATUS_DATA_BUFF_ERR	(0x00000020)
+#define DTD_STATUS_TRANSACTION_ERR	(0x00000008)
+#define DTD_RESERVED_FIELDS		(0x00007F00)
+#define DTD_ERROR_MASK			(0x68)
+#define DTD_ADDR_MASK			(0xFFFFFFE0)
+#define DTD_PACKET_SIZE			0x7FFF0000
+#define DTD_LENGTH_BIT_POS		(16)
+
+struct mv_dtd {
+	u32	dtd_next;
+	u32	size_ioc_sts;
+	u32	buff_ptr0;		/* Buffer pointer Page 0 */
+	u32	buff_ptr1;		/* Buffer pointer Page 1 */
+	u32	buff_ptr2;		/* Buffer pointer Page 2 */
+	u32	buff_ptr3;		/* Buffer pointer Page 3 */
+	u32	buff_ptr4;		/* Buffer pointer Page 4 */
+	u32	scratch_ptr;
+	/* 32 bytes */
+	dma_addr_t td_dma;		/* dma address for this td */
+	struct mv_dtd *next_dtd_virt;
+};
+
+extern int mv_udc_phy_init(unsigned int base);
+
+#endif
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
new file mode 100644
index 0000000..d5468a7
--- /dev/null
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -0,0 +1,2149 @@
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include "mv_udc.h"
+
+#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
+#define DRIVER_VERSION		"8 Nov 2010"
+
+#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
+				((ep)->udc->ep0_dir) : ((ep)->direction))
+
+/* timeout value -- usec */
+#define RESET_TIMEOUT		10000
+#define FLUSH_TIMEOUT		10000
+#define EPSTATUS_TIMEOUT	10000
+#define PRIME_TIMEOUT		10000
+#define READSAFE_TIMEOUT	1000
+#define DTD_TIMEOUT		1000
+
+#define LOOPS_USEC_SHIFT	4
+#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
+#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
+
+static const char driver_name[] = "mv_udc";
+static const char driver_desc[] = DRIVER_DESC;
+
+/* controller device global variable */
+static struct mv_udc	*the_controller;
+int mv_usb_otgsc;
+
+static void nuke(struct mv_ep *ep, int status);
+
+/* for endpoint 0 operations */
+static const struct usb_endpoint_descriptor mv_ep0_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	0,
+	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
+};
+
+static void ep0_reset(struct mv_udc *udc)
+{
+	struct mv_ep *ep;
+	u32 epctrlx;
+	int i = 0;
+
+	/* ep0 in and out */
+	for (i = 0; i < 2; i++) {
+		ep = &udc->eps[i];
+		ep->udc = udc;
+
+		/* ep0 dQH */
+		ep->dqh = &udc->ep_dqh[i];
+
+		/* configure ep0 endpoint capabilities in dQH */
+		ep->dqh->max_packet_length =
+			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+			| EP_QUEUE_HEAD_IOS;
+
+		epctrlx = readl(&udc->op_regs->epctrlx[0]);
+		if (i) {	/* TX */
+			epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
+				| (USB_ENDPOINT_XFER_CONTROL
+					<< EPCTRL_TX_EP_TYPE_SHIFT);
+
+		} else {	/* RX */
+			epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
+				| (USB_ENDPOINT_XFER_CONTROL
+					<< EPCTRL_RX_EP_TYPE_SHIFT);
+		}
+
+		writel(epctrlx, &udc->op_regs->epctrlx[0]);
+	}
+}
+
+/* protocol ep0 stall, will automatically be cleared on new transaction */
+static void ep0_stall(struct mv_udc *udc)
+{
+	u32	epctrlx;
+
+	/* set TX and RX to stall */
+	epctrlx = readl(&udc->op_regs->epctrlx[0]);
+	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
+	writel(epctrlx, &udc->op_regs->epctrlx[0]);
+
+	/* update ep0 state */
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->ep0_dir = EP_DIR_OUT;
+}
+
+static int process_ep_req(struct mv_udc *udc, int index,
+	struct mv_req *curr_req)
+{
+	struct mv_dtd	*curr_dtd;
+	struct mv_dqh	*curr_dqh;
+	int td_complete, actual, remaining_length;
+	int i, direction;
+	int retval = 0;
+	u32 errors;
+
+	curr_dqh = &udc->ep_dqh[index];
+	direction = index % 2;
+
+	curr_dtd = curr_req->head;
+	td_complete = 0;
+	actual = curr_req->req.length;
+
+	for (i = 0; i < curr_req->dtd_count; i++) {
+		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
+			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
+				udc->eps[index].name);
+			return 1;
+		}
+
+		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
+		if (!errors) {
+			remaining_length +=
+				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
+					>> DTD_LENGTH_BIT_POS;
+			actual -= remaining_length;
+		} else {
+			dev_info(&udc->dev->dev,
+				"complete_tr error: ep=%d %s: error = 0x%x\n",
+				index >> 1, direction ? "SEND" : "RECV",
+				errors);
+			if (errors & DTD_STATUS_HALTED) {
+				/* Clear the errors and Halt condition */
+				curr_dqh->size_ioc_int_sts &= ~errors;
+				retval = -EPIPE;
+			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
+				retval = -EPROTO;
+			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
+				retval = -EILSEQ;
+			}
+		}
+		if (i != curr_req->dtd_count - 1)
+			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
+	}
+	if (retval)
+		return retval;
+
+	curr_req->req.actual = actual;
+
+	return 0;
+}
+
+/*
+ * done() - retire a request; caller blocked irqs
+ * @status : request status to be set, only works when
+ * request is still in progress.
+ */
+static void done(struct mv_ep *ep, struct mv_req *req, int status)
+{
+	struct mv_udc *udc = NULL;
+	unsigned char stopped = ep->stopped;
+	struct mv_dtd *curr_td, *next_td;
+	int j;
+
+	udc = (struct mv_udc *)ep->udc;
+	/* Removed the req from fsl_ep->queue */
+	list_del_init(&req->queue);
+
+	/* req.status should be set as -EINPROGRESS in ep_queue() */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	/* Free dtd for the request */
+	next_td = req->head;
+	for (j = 0; j < req->dtd_count; j++) {
+		curr_td = next_td;
+		if (j != req->dtd_count - 1)
+			next_td = curr_td->next_dtd_virt;
+		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
+	}
+
+	if (req->mapped) {
+		dma_unmap_single(ep->udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			((ep_dir(ep) == EP_DIR_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE));
+		req->req.dma = DMA_ADDR_INVALID;
+		req->mapped = 0;
+	} else
+		dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
+			req->req.dma, req->req.length,
+			((ep_dir(ep) == EP_DIR_IN) ?
+				DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+	if (status && (status != -ESHUTDOWN))
+		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
+			ep->ep.name, &req->req, status,
+			req->req.actual, req->req.length);
+
+	ep->stopped = 1;
+
+	spin_unlock(&ep->udc->lock);
+	/*
+	 * complete() is from gadget layer,
+	 * eg fsg->bulk_in_complete()
+	 */
+	if (req->req.complete)
+		req->req.complete(&ep->ep, &req->req);
+
+	spin_lock(&ep->udc->lock);
+	ep->stopped = stopped;
+}
+
+static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
+{
+	u32 tmp, epstatus, bit_pos, direction;
+	struct mv_udc *udc;
+	struct mv_dqh *dqh;
+	unsigned int loops;
+	int readsafe, retval = 0;
+
+	udc = ep->udc;
+	direction = ep_dir(ep);
+	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
+	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+	/* check if the pipe is empty */
+	if (!(list_empty(&ep->queue))) {
+		struct mv_req *lastreq;
+		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
+		lastreq->tail->dtd_next =
+			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+		if (readl(&udc->op_regs->epprime) & bit_pos) {
+			loops = LOOPS(PRIME_TIMEOUT);
+			while (readl(&udc->op_regs->epprime) & bit_pos) {
+				if (loops == 0) {
+					retval = -ETIME;
+					goto done;
+				}
+				udelay(LOOPS_USEC);
+				loops--;
+			}
+			if (readl(&udc->op_regs->epstatus) & bit_pos)
+				goto done;
+		}
+		readsafe = 0;
+		loops = LOOPS(READSAFE_TIMEOUT);
+		while (readsafe == 0) {
+			if (loops == 0) {
+				retval = -ETIME;
+				goto done;
+			}
+			/* start with setting the semaphores */
+			tmp = readl(&udc->op_regs->usbcmd);
+			tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
+			writel(tmp, &udc->op_regs->usbcmd);
+
+			/* read the endpoint status */
+			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
+
+			/*
+			 * Reread the ATDTW semaphore bit to check if it is
+			 * cleared. When hardware see a hazard, it will clear
+			 * the bit or else we remain set to 1 and we can
+			 * proceed with priming of endpoint if not already
+			 * primed.
+			 */
+			if (readl(&udc->op_regs->usbcmd)
+				& USBCMD_ATDTW_TRIPWIRE_SET) {
+				readsafe = 1;
+			}
+			loops--;
+			udelay(LOOPS_USEC);
+		}
+
+		/* Clear the semaphore */
+		tmp = readl(&udc->op_regs->usbcmd);
+		tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
+		writel(tmp, &udc->op_regs->usbcmd);
+
+		/* If endpoint is not active, we activate it now. */
+		if (!epstatus) {
+			if (direction == EP_DIR_IN) {
+				struct mv_dtd *curr_dtd = dma_to_virt(
+					&udc->dev->dev, dqh->curr_dtd_ptr);
+
+				loops = LOOPS(DTD_TIMEOUT);
+				while (curr_dtd->size_ioc_sts
+					& DTD_STATUS_ACTIVE) {
+					if (loops == 0) {
+						retval = -ETIME;
+						goto done;
+					}
+					loops--;
+					udelay(LOOPS_USEC);
+				}
+			}
+			/* No other transfers on the queue */
+
+			/* Write dQH next pointer and terminate bit to 0 */
+			dqh->next_dtd_ptr = req->head->td_dma
+				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+			dqh->size_ioc_int_sts = 0;
+
+			/*
+			 * Ensure that updates to the QH will
+			 * occure before priming.
+			 */
+			wmb();
+
+			/* Prime the Endpoint */
+			writel(bit_pos, &udc->op_regs->epprime);
+		}
+	} else {
+		/* Write dQH next pointer and terminate bit to 0 */
+		dqh->next_dtd_ptr = req->head->td_dma
+			& EP_QUEUE_HEAD_NEXT_POINTER_MASK;;
+		dqh->size_ioc_int_sts = 0;
+
+		/* Ensure that updates to the QH will occure before priming. */
+		wmb();
+
+		/* Prime the Endpoint */
+		writel(bit_pos, &udc->op_regs->epprime);
+
+		if (direction == EP_DIR_IN) {
+			/* FIXME add status check after prime the IN ep */
+			int prime_again;
+			u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
+
+			loops = LOOPS(DTD_TIMEOUT);
+			prime_again = 0;
+			while ((curr_dtd_ptr != req->head->td_dma)) {
+				curr_dtd_ptr = dqh->curr_dtd_ptr;
+				if (loops == 0) {
+					dev_err(&udc->dev->dev,
+						"failed to prime %s\n",
+						ep->name);
+					retval = -ETIME;
+					goto done;
+				}
+				loops--;
+				udelay(LOOPS_USEC);
+
+				if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
+					if (prime_again)
+						goto done;
+					dev_info(&udc->dev->dev,
+						"prime again\n");
+					writel(bit_pos,
+						&udc->op_regs->epprime);
+					prime_again = 1;
+				}
+			}
+		}
+	}
+done:
+	return retval;;
+}
+
+static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
+		dma_addr_t *dma, int *is_last)
+{
+	u32 temp;
+	struct mv_dtd *dtd;
+	struct mv_udc *udc;
+
+	/* how big will this transfer be? */
+	*length = min(req->req.length - req->req.actual,
+			(unsigned)EP_MAX_LENGTH_TRANSFER);
+
+	udc = req->ep->udc;
+
+	/*
+	 * Be careful that no _GFP_HIGHMEM is set,
+	 * or we can not use dma_to_virt
+	 */
+	dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
+	if (dtd == NULL)
+		return dtd;
+
+	dtd->td_dma = *dma;
+	/* initialize buffer page pointers */
+	temp = (u32)(req->req.dma + req->req.actual);
+	dtd->buff_ptr0 = cpu_to_le32(temp);
+	temp &= ~0xFFF;
+	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
+	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
+	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
+	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
+
+	req->req.actual += *length;
+
+	/* zlp is needed if req->req.zero is set */
+	if (req->req.zero) {
+		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+			*is_last = 1;
+		else
+			*is_last = 0;
+	} else if (req->req.length == req->req.actual)
+		*is_last = 1;
+	else
+		*is_last = 0;
+
+	/* Fill in the transfer size; set active bit */
+	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
+
+	/* Enable interrupt for the last dtd of a request */
+	if (*is_last && !req->req.no_interrupt)
+		temp |= DTD_IOC;
+
+	dtd->size_ioc_sts = temp;
+
+	mb();
+
+	return dtd;
+}
+
+/* generate dTD linked list for a request */
+static int req_to_dtd(struct mv_req *req)
+{
+	unsigned count;
+	int is_last, is_first = 1;
+	struct mv_dtd *dtd, *last_dtd = NULL;
+	struct mv_udc *udc;
+	dma_addr_t dma;
+
+	udc = req->ep->udc;
+
+	do {
+		dtd = build_dtd(req, &count, &dma, &is_last);
+		if (dtd == NULL)
+			return -ENOMEM;
+
+		if (is_first) {
+			is_first = 0;
+			req->head = dtd;
+		} else {
+			last_dtd->dtd_next = dma;
+			last_dtd->next_dtd_virt = dtd;
+		}
+		last_dtd = dtd;
+		req->dtd_count++;
+	} while (!is_last);
+
+	/* set terminate bit to 1 for the last dTD */
+	dtd->dtd_next = DTD_NEXT_TERMINATE;
+
+	req->tail = dtd;
+
+	return 0;
+}
+
+static int mv_ep_enable(struct usb_ep *_ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct mv_udc *udc;
+	struct mv_ep *ep;
+	struct mv_dqh *dqh;
+	u16 max = 0;
+	u32 bit_pos, epctrlx, direction;
+	unsigned char zlt = 0, ios = 0, mult = 0;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	udc = ep->udc;
+
+	if (!_ep || !desc || ep->desc
+			|| desc->bDescriptorType != USB_DT_ENDPOINT)
+		return -EINVAL;
+
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	direction = ep_dir(ep);
+	max = le16_to_cpu(desc->wMaxPacketSize);
+
+	/*
+	 * disable HW zero length termination select
+	 * driver handles zero length packet through req->req.zero
+	 */
+	zlt = 1;
+
+	/* Get the endpoint queue head address */
+	dqh = (struct mv_dqh *)ep->dqh;
+
+	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+
+	/* Check if the Endpoint is Primed */
+	if ((readl(&udc->op_regs->epprime) & bit_pos)
+		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
+		dev_info(&udc->dev->dev,
+			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
+			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
+			(unsigned)readl(&udc->op_regs->epprime),
+			(unsigned)readl(&udc->op_regs->epstatus),
+			(unsigned)bit_pos);
+		goto en_done;
+	}
+	/* Set the max packet length, interrupt on Setup and Mult fields */
+	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+	case USB_ENDPOINT_XFER_BULK:
+		zlt = 1;
+		mult = 0;
+		break;
+	case USB_ENDPOINT_XFER_CONTROL:
+		ios = 1;
+	case USB_ENDPOINT_XFER_INT:
+		mult = 0;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		/* Calculate transactions needed for high bandwidth iso */
+		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+		max = max & 0x8ff;	/* bit 0~10 */
+		/* 3 transactions at most */
+		if (mult > 3)
+			goto en_done;
+		break;
+	default:
+		goto en_done;
+	}
+	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+		| (mult << EP_QUEUE_HEAD_MULT_POS)
+		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
+		| (ios ? EP_QUEUE_HEAD_IOS : 0);
+	dqh->next_dtd_ptr = 1;
+	dqh->size_ioc_int_sts = 0;
+
+	ep->ep.maxpacket = max;
+	ep->desc = desc;
+	ep->stopped = 0;
+
+	/* Enable the endpoint for Rx or Tx and set the endpoint type */
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if (direction == EP_DIR_IN) {
+		epctrlx &= ~EPCTRL_TX_ALL_MASK;
+		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
+			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				<< EPCTRL_TX_EP_TYPE_SHIFT);
+	} else {
+		epctrlx &= ~EPCTRL_RX_ALL_MASK;
+		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
+			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				<< EPCTRL_RX_EP_TYPE_SHIFT);
+	}
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+	/*
+	 * Implement Guideline (GL# USB-7) The unused endpoint type must
+	 * be programmed to bulk.
+	 */
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
+		epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				<< EPCTRL_RX_EP_TYPE_SHIFT);
+		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+	}
+
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
+		epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				<< EPCTRL_TX_EP_TYPE_SHIFT);
+		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+	}
+
+	return 0;
+en_done:
+	return -EINVAL;
+}
+
+static int  mv_ep_disable(struct usb_ep *_ep)
+{
+	struct mv_udc *udc;
+	struct mv_ep *ep;
+	struct mv_dqh *dqh;
+	u32 bit_pos, epctrlx, direction;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	if ((_ep == NULL) || !ep->desc)
+		return -EINVAL;
+
+	udc = ep->udc;
+
+	/* Get the endpoint queue head address */
+	dqh = ep->dqh;
+
+	direction = ep_dir(ep);
+	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+
+	/* Reset the max packet length and the interrupt on Setup */
+	dqh->max_packet_length = 0;
+
+	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	epctrlx &= ~((direction == EP_DIR_IN)
+			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
+			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+	/* nuke all pending requests (does flush) */
+	nuke(ep, -ESHUTDOWN);
+
+	ep->desc = NULL;
+	ep->stopped = 1;
+	return 0;
+}
+
+static struct usb_request *
+mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+	struct mv_req *req = NULL;
+
+	req = kzalloc(sizeof *req, gfp_flags);
+	if (!req)
+		return NULL;
+
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+
+	return &req->req;
+}
+
+static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_req *req = NULL;
+
+	req = container_of(_req, struct mv_req, req);
+
+	if (_req)
+		kfree(req);
+}
+
+static void mv_ep_fifo_flush(struct usb_ep *_ep)
+{
+	struct mv_udc *udc;
+	u32 bit_pos, direction;
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	unsigned int loops;
+
+	udc = ep->udc;
+	direction = ep_dir(ep);
+	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+	/*
+	 * Flushing will halt the pipe
+	 * Write 1 to the Flush register
+	 */
+	writel(bit_pos, &udc->op_regs->epflush);
+
+	/* Wait until flushing completed */
+	loops = LOOPS(FLUSH_TIMEOUT);
+	while (readl(&udc->op_regs->epflush) & bit_pos) {
+		/*
+		 * ENDPTFLUSH bit should be cleared to indicate this
+		 * operation is complete
+		 */
+		if (loops == 0) {
+			dev_err(&udc->dev->dev,
+				"TIMEOUT for ENDPTFLUSH=0x%x, bit_pos=0x%x\n",
+				(unsigned)readl(&udc->op_regs->epflush),
+				(unsigned)bit_pos);
+			return;
+		}
+		loops--;
+		udelay(LOOPS_USEC);
+	}
+	loops = LOOPS(EPSTATUS_TIMEOUT);
+	while (readl(&udc->op_regs->epstatus) & bit_pos) {
+		unsigned int inter_loops;
+
+		if (loops == 0) {
+			dev_err(&udc->dev->dev,
+				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+				(unsigned)readl(&udc->op_regs->epstatus),
+				(unsigned)bit_pos);
+			return;
+		}
+		/* Write 1 to the Flush register */
+		writel(bit_pos, &udc->op_regs->epflush);
+
+		/* Wait until flushing completed */
+		inter_loops = LOOPS(FLUSH_TIMEOUT);
+		while (readl(&udc->op_regs->epflush) & bit_pos) {
+			/*
+			 * ENDPTFLUSH bit should be cleared to indicate this
+			 * operation is complete
+			 */
+			if (inter_loops == 0) {
+				dev_err(&udc->dev->dev,
+					"TIMEOUT for ENDPTFLUSH=0x%x,"
+					"bit_pos=0x%x\n",
+					(unsigned)readl(&udc->op_regs->epflush),
+					(unsigned)bit_pos);
+				return;
+			}
+			inter_loops--;
+			udelay(LOOPS_USEC);
+		}
+		loops--;
+	}
+}
+
+/* queues (submits) an I/O request to an endpoint */
+static int
+mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req = container_of(_req, struct mv_req, req);
+	struct mv_udc *udc = ep->udc;
+	unsigned long flags;
+
+	/* catch various bogus parameters */
+	if (!_req || !req->req.complete || !req->req.buf
+			|| !list_empty(&req->queue)) {
+		dev_err(&udc->dev->dev, "%s, bad params", __func__);
+		return -EINVAL;
+	}
+	if (unlikely(!_ep || !ep->desc)) {
+		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
+		return -EINVAL;
+	}
+	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		if (req->req.length > ep->ep.maxpacket)
+			return -EMSGSIZE;
+	}
+
+	udc = ep->udc;
+	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+		return -ESHUTDOWN;
+
+	req->ep = ep;
+
+	/* map virtual address to hardware */
+	if (req->req.dma == DMA_ADDR_INVALID) {
+		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+					req->req.buf,
+					req->req.length, ep_dir(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 1;
+	} else {
+		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
+					req->req.dma, req->req.length,
+					ep_dir(ep)
+						? DMA_TO_DEVICE
+						: DMA_FROM_DEVICE);
+		req->mapped = 0;
+	}
+
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->dtd_count = 0;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* build dtds and push them to device queue */
+	if (!req_to_dtd(req)) {
+		int retval;
+		retval = queue_dtd(ep, req);
+		if (retval) {
+			spin_unlock_irqrestore(&udc->lock, flags);
+			return retval;
+		}
+	} else {
+		spin_unlock_irqrestore(&udc->lock, flags);
+		return -ENOMEM;
+	}
+
+	/* Update ep0 state */
+	if (ep->ep_num == 0)
+		udc->ep0_state = DATA_STATE_XMIT;
+
+	/* irq handler advances the queue */
+	if (req != NULL)
+		list_add_tail(&req->queue, &ep->queue);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	return 0;
+}
+
+/* dequeues (cancels, unlinks) an I/O request from an endpoint */
+static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+	struct mv_req *req;
+	struct mv_udc *udc = ep->udc;
+	unsigned long flags;
+	int stopped, ret = 0;
+	u32 epctrlx;
+
+	if (!_ep || !_req)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	stopped = ep->stopped;
+
+	/* Stop the ep before we deal with the queue */
+	ep->stopped = 1;
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if (ep_dir(ep) == EP_DIR_IN)
+		epctrlx &= ~EPCTRL_TX_ENABLE;
+	else
+		epctrlx &= ~EPCTRL_RX_ENABLE;
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+
+	/* make sure it's actually queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == _req)
+			break;
+	}
+	if (&req->req != _req) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* The request is in progress, or completed but not dequeued */
+	if (ep->queue.next == &req->queue) {
+		_req->status = -ECONNRESET;
+		mv_ep_fifo_flush(_ep);	/* flush current transfer */
+
+		/* The request isn't the last request in this ep queue */
+		if (req->queue.next != &ep->queue) {
+			struct mv_dqh *qh;
+			struct mv_req *next_req;
+
+			qh = ep->dqh;
+			next_req = list_entry(req->queue.next, struct mv_req,
+					queue);
+
+			/* Point the QH to the first TD of next request */
+			writel((u32) next_req->head, &qh->curr_dtd_ptr);
+		} else {
+			struct mv_dqh *qh;
+
+			qh = ep->dqh;
+			qh->next_dtd_ptr = 1;
+			qh->size_ioc_int_sts = 0;
+		}
+
+		/* The request hasn't been processed, patch up the TD chain */
+	} else {
+		struct mv_req *prev_req;
+
+		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
+		writel(readl(&req->tail->dtd_next),
+				&prev_req->tail->dtd_next);
+
+	}
+
+	done(ep, req, -ECONNRESET);
+
+	/* Enable EP */
+out:
+	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+	if (ep_dir(ep) == EP_DIR_IN)
+		epctrlx |= EPCTRL_TX_ENABLE;
+	else
+		epctrlx |= EPCTRL_RX_ENABLE;
+	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+	ep->stopped = stopped;
+
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+	return ret;
+}
+
+static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
+{
+	u32 epctrlx;
+
+	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+
+	if (stall) {
+		if (direction == EP_DIR_IN)
+			epctrlx |= EPCTRL_TX_EP_STALL;
+		else
+			epctrlx |= EPCTRL_RX_EP_STALL;
+	} else {
+		if (direction == EP_DIR_IN) {
+			epctrlx &= ~EPCTRL_TX_EP_STALL;
+			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
+		} else {
+			epctrlx &= ~EPCTRL_RX_EP_STALL;
+			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
+		}
+	}
+	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
+}
+
+static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
+{
+	u32 epctrlx;
+
+	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+
+	if (direction == EP_DIR_OUT)
+		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
+	else
+		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
+}
+
+static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+{
+	struct mv_ep *ep;
+	unsigned long flags = 0;
+	int status = 0;
+	struct mv_udc *udc;
+
+	ep = container_of(_ep, struct mv_ep, ep);
+	udc = ep->udc;
+	if (!_ep || !ep->desc) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+		status = -EOPNOTSUPP;
+		goto out;
+	}
+
+	/*
+	 * Attempt to halt IN ep will fail if any transfer requests
+	 * are still queue
+	 */
+	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
+		status = -EAGAIN;
+		goto out;
+	}
+
+	spin_lock_irqsave(&ep->udc->lock, flags);
+	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
+	if (halt && wedge)
+		ep->wedge = 1;
+	else if (!halt)
+		ep->wedge = 0;
+	spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+	if (ep->ep_num == 0) {
+		udc->ep0_state = WAIT_FOR_SETUP;
+		udc->ep0_dir = EP_DIR_OUT;
+	}
+out:
+	return status;
+}
+
+static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+	return mv_ep_set_halt_wedge(_ep, halt, 0);
+}
+
+static int mv_ep_set_wedge(struct usb_ep *_ep)
+{
+	return mv_ep_set_halt_wedge(_ep, 1, 1);
+}
+
+static struct usb_ep_ops mv_ep_ops = {
+	.enable		= mv_ep_enable,
+	.disable	= mv_ep_disable,
+
+	.alloc_request	= mv_alloc_request,
+	.free_request	= mv_free_request,
+
+	.queue		= mv_ep_queue,
+	.dequeue	= mv_ep_dequeue,
+
+	.set_wedge	= mv_ep_set_wedge,
+	.set_halt	= mv_ep_set_halt,
+	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
+};
+
+static void udc_stop(struct mv_udc *udc)
+{
+	u32 tmp;
+
+	/* Disable interrupts */
+	tmp = readl(&udc->op_regs->usbintr);
+	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
+		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
+	writel(tmp, &udc->op_regs->usbintr);
+
+	/* Reset the Run the bit in the command register to stop VUSB */
+	tmp = readl(&udc->op_regs->usbcmd);
+	tmp &= ~USBCMD_RUN_STOP;
+	writel(tmp, &udc->op_regs->usbcmd);
+}
+
+static void udc_start(struct mv_udc *udc)
+{
+	u32 usbintr;
+
+	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
+		| USBINTR_PORT_CHANGE_DETECT_EN
+		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
+	/* Enable interrupts */
+	writel(usbintr, &udc->op_regs->usbintr);
+
+	/* Set the Run bit in the command register */
+	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
+}
+
+static int udc_reset(struct mv_udc *udc)
+{
+	unsigned int loops;
+	u32 tmp, portsc;
+
+	/* Stop the controller */
+	tmp = readl(&udc->op_regs->usbcmd);
+	tmp &= ~USBCMD_RUN_STOP;
+	writel(tmp, &udc->op_regs->usbcmd);
+
+	/* Reset the controller to get default values */
+	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
+
+	/* wait for reset to complete */
+	loops = LOOPS(RESET_TIMEOUT);
+	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
+		if (loops == 0) {
+			dev_err(&udc->dev->dev,
+				"Wait for RESET completed TIMEOUT\n");
+			return -ETIMEDOUT;
+		}
+		loops--;
+		udelay(LOOPS_USEC);
+	}
+
+	/* set controller to device mode */
+	tmp = readl(&udc->op_regs->usbmode);
+	tmp |= USBMODE_CTRL_MODE_DEVICE;
+
+	/* turn setup lockout off, require setup tripwire in usbcmd */
+	tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
+
+	writel(tmp, &udc->op_regs->usbmode);
+
+	writel(0x0, &udc->op_regs->epsetupstat);
+
+	/* Configure the Endpoint List Address */
+	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
+		&udc->op_regs->eplistaddr);
+
+	portsc = readl(&udc->op_regs->portsc[0]);
+	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
+		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
+
+	if (udc->force_fs)
+		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
+	else
+		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
+
+	writel(portsc, &udc->op_regs->portsc[0]);
+
+	tmp = readl(&udc->op_regs->epctrlx[0]);
+	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
+	writel(tmp, &udc->op_regs->epctrlx[0]);
+
+	return 0;
+}
+
+static int mv_udc_get_frame(struct usb_gadget *gadget)
+{
+	struct mv_udc *udc;
+	u16	retval;
+
+	if (!gadget)
+		return -ENODEV;
+
+	udc = container_of(gadget, struct mv_udc, gadget);
+
+	retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
+
+	return retval;
+}
+
+/* Tries to wake up the host connected to this gadget */
+static int mv_udc_wakeup(struct usb_gadget *gadget)
+{
+	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
+	u32 portsc;
+
+	/* Remote wakeup feature not enabled by host */
+	if (!udc->remote_wakeup)
+		return -ENOTSUPP;
+
+	portsc = readl(&udc->op_regs->portsc);
+	/* not suspended? */
+	if (!(portsc & PORTSCX_PORT_SUSPEND))
+		return 0;
+	/* trigger force resume */
+	portsc |= PORTSCX_PORT_FORCE_RESUME;
+	writel(portsc, &udc->op_regs->portsc[0]);
+	return 0;
+}
+
+static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct mv_udc *udc;
+	unsigned long flags;
+
+	udc = container_of(gadget, struct mv_udc, gadget);
+	spin_lock_irqsave(&udc->lock, flags);
+
+	udc->softconnect = (is_on != 0);
+	if (udc->driver && udc->softconnect)
+		udc_start(udc);
+	else
+		udc_stop(udc);
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+	return 0;
+}
+
+/* device controller usb_gadget_ops structure */
+static const struct usb_gadget_ops mv_ops = {
+
+	/* returns the current frame number */
+	.get_frame	= mv_udc_get_frame,
+
+	/* tries to wake up the host connected to this gadget */
+	.wakeup		= mv_udc_wakeup,
+
+	/* D+ pullup, software-controlled connect/disconnect to USB host */
+	.pullup		= mv_udc_pullup,
+};
+
+static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
+{
+	dev_info(&udc->dev->dev, "Test Mode is not support yet\n");
+}
+
+static int eps_init(struct mv_udc *udc)
+{
+	struct mv_ep	*ep;
+	char name[14];
+	int i;
+
+	/* initialize ep0 */
+	ep = &udc->eps[0];
+	ep->udc = udc;
+	strncpy(ep->name, "ep0", sizeof(ep->name));
+	ep->ep.name = ep->name;
+	ep->ep.ops = &mv_ep_ops;
+	ep->wedge = 0;
+	ep->stopped = 0;
+	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
+	ep->ep_num = 0;
+	ep->desc = &mv_ep0_desc;
+	INIT_LIST_HEAD(&ep->queue);
+
+	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+
+	/* initialize other endpoints */
+	for (i = 2; i < udc->max_eps * 2; i++) {
+		ep = &udc->eps[i];
+		if (i % 2) {
+			snprintf(name, sizeof(name), "ep%din", i / 2);
+			ep->direction = EP_DIR_IN;
+		} else {
+			snprintf(name, sizeof(name), "ep%dout", i / 2);
+			ep->direction = EP_DIR_OUT;
+		}
+		ep->udc = udc;
+		strncpy(ep->name, name, sizeof(ep->name));
+		ep->ep.name = ep->name;
+
+		ep->ep.ops = &mv_ep_ops;
+		ep->stopped = 0;
+		ep->ep.maxpacket = (unsigned short) ~0;
+		ep->ep_num = i / 2;
+
+		INIT_LIST_HEAD(&ep->queue);
+		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+
+		ep->dqh = &udc->ep_dqh[i];
+	}
+
+	return 0;
+}
+
+/* delete all endpoint requests, called with spinlock held */
+static void nuke(struct mv_ep *ep, int status)
+{
+	/* called with spinlock held */
+	ep->stopped = 1;
+
+	/* endpoint fifo flush */
+	mv_ep_fifo_flush(&ep->ep);
+
+	while (!list_empty(&ep->queue)) {
+		struct mv_req *req = NULL;
+		req = list_entry(ep->queue.next, struct mv_req, queue);
+		done(ep, req, status);
+	}
+}
+
+/* stop all USB activities */
+static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
+{
+	struct mv_ep	*ep;
+
+	nuke(&udc->eps[0], -ESHUTDOWN);
+
+	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+		nuke(ep, -ESHUTDOWN);
+	}
+
+	/* report disconnect; the driver is already quiesced */
+	if (driver) {
+		spin_unlock(&udc->lock);
+		driver->disconnect(&udc->gadget);
+		spin_lock(&udc->lock);
+	}
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+		int (*bind)(struct usb_gadget *))
+{
+	struct mv_udc *udc = the_controller;
+	int retval = 0;
+	unsigned long flags;
+
+	if (!udc)
+		return -ENODEV;
+
+	if (udc->driver)
+		return -EBUSY;
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* hook up the driver ... */
+	driver->driver.bus = NULL;
+	udc->driver = driver;
+	udc->gadget.dev.driver = &driver->driver;
+
+	udc->usb_state = USB_STATE_ATTACHED;
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->ep0_dir = USB_DIR_OUT;
+
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	retval = bind(&udc->gadget);
+	if (retval) {
+		dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
+				driver->driver.name, retval);
+		udc->driver = NULL;
+		udc->gadget.dev.driver = NULL;
+		return retval;
+	}
+	udc_reset(udc);
+	ep0_reset(udc);
+	udc_start(udc);
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct mv_udc *udc = the_controller;
+	unsigned long flags;
+
+	if (!udc)
+		return -ENODEV;
+
+	udc_stop(udc);
+
+	spin_lock_irqsave(&udc->lock, flags);
+
+	/* stop all usb activities */
+	udc->gadget.speed = USB_SPEED_UNKNOWN;
+	stop_activity(udc, driver);
+	spin_unlock_irqrestore(&udc->lock, flags);
+
+	/* unbind gadget driver */
+	driver->unbind(&udc->gadget);
+	udc->gadget.dev.driver = NULL;
+	udc->driver = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+static int
+udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
+{
+	int retval = 0;
+	struct mv_req *req;
+	struct mv_ep *ep;
+
+	ep = &udc->eps[0];
+	udc->ep0_dir = direction;
+
+	req = udc->status_req;
+
+	/* fill in the reqest structure */
+	if (empty == false) {
+		*((u16 *) req->req.buf) = cpu_to_le16(status);
+		req->req.length = 2;
+	} else
+		req->req.length = 0;
+
+	req->ep = ep;
+	req->req.status = -EINPROGRESS;
+	req->req.actual = 0;
+	req->req.complete = NULL;
+	req->dtd_count = 0;
+
+	/* prime the data phase */
+	if (!req_to_dtd(req))
+		retval = queue_dtd(ep, req);
+	else{	/* no mem */
+		retval = -ENOMEM;
+		goto out;
+	}
+
+	if (retval) {
+		dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
+		goto out;
+	}
+
+	list_add_tail(&req->queue, &ep->queue);
+
+	return 0;
+out:
+	return retval;
+}
+
+static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+	udc->dev_addr = (u8)setup->wValue;
+
+	/* update usb state */
+	udc->usb_state = USB_STATE_ADDRESS;
+
+	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+		ep0_stall(udc);
+}
+
+static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
+	struct usb_ctrlrequest *setup)
+{
+	u16 status;
+	int retval;
+
+	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+		!= (USB_DIR_IN | USB_TYPE_STANDARD))
+		return;
+
+	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+		status = 1 << USB_DEVICE_SELF_POWERED;
+		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+	} else if ((setup->bRequestType & USB_RECIP_MASK)
+			== USB_RECIP_INTERFACE) {
+		/* get interface status */
+		status = 0;
+	} else if ((setup->bRequestType & USB_RECIP_MASK)
+			== USB_RECIP_ENDPOINT) {
+		u8 ep_num, direction;
+
+		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+				? EP_DIR_IN : EP_DIR_OUT;
+		status = ep_is_stall(udc, ep_num, direction)
+				<< USB_ENDPOINT_HALT;
+	}
+
+	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
+	if (retval)
+		ep0_stall(udc);
+}
+
+static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+	u8 ep_num;
+	u8 direction;
+	struct mv_ep *ep;
+
+	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+		switch (setup->wValue) {
+		case USB_DEVICE_REMOTE_WAKEUP:
+			udc->remote_wakeup = 0;
+			break;
+		case USB_DEVICE_TEST_MODE:
+			mv_udc_testmode(udc, 0, false);
+			break;
+		default:
+			goto out;
+		}
+	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+		switch (setup->wValue) {
+		case USB_ENDPOINT_HALT:
+			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+				? EP_DIR_IN : EP_DIR_OUT;
+			if (setup->wValue != 0 || setup->wLength != 0
+				|| ep_num > udc->max_eps)
+				goto out;
+			ep = &udc->eps[ep_num * 2 + direction];
+			if (ep->wedge == 1)
+				break;
+			spin_unlock(&udc->lock);
+			ep_set_stall(udc, ep_num, direction, 0);
+			spin_lock(&udc->lock);
+			break;
+		default:
+			goto out;
+		}
+	} else
+		goto out;
+
+	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+		ep0_stall(udc);
+	else
+		udc->ep0_state = DATA_STATE_XMIT;
+out:
+	return;
+}
+
+static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+{
+	u8 ep_num;
+	u8 direction;
+
+	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+		switch (setup->wValue) {
+		case USB_DEVICE_REMOTE_WAKEUP:
+			udc->remote_wakeup = 1;
+			break;
+		case USB_DEVICE_TEST_MODE:
+			if (setup->wIndex & 0xFF
+				&& udc->gadget.speed != USB_SPEED_HIGH)
+				goto out;
+			if (udc->usb_state == USB_STATE_CONFIGURED
+				|| udc->usb_state == USB_STATE_ADDRESS
+				|| udc->usb_state == USB_STATE_DEFAULT)
+				mv_udc_testmode(udc,
+					setup->wIndex & 0xFF00, true);
+			else
+				goto out;
+			break;
+		default:
+			goto out;
+		}
+	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+		switch (setup->wValue) {
+		case USB_ENDPOINT_HALT:
+			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+				? EP_DIR_IN : EP_DIR_OUT;
+			if (setup->wValue != 0 || setup->wLength != 0
+				|| ep_num > udc->max_eps)
+				goto out;
+			spin_unlock(&udc->lock);
+			ep_set_stall(udc, ep_num, direction, 1);
+			spin_lock(&udc->lock);
+			break;
+		default:
+			goto out;
+		}
+	} else
+		goto out;
+
+	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+		ep0_stall(udc);
+out:
+	return;
+}
+
+static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
+	struct usb_ctrlrequest *setup)
+{
+	bool delegate = false;
+
+	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
+
+	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+			setup->bRequestType, setup->bRequest,
+			setup->wValue, setup->wIndex, setup->wLength);
+	/* We process some stardard setup requests here */
+	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+		switch (setup->bRequest) {
+		case USB_REQ_GET_STATUS:
+			ch9getstatus(udc, ep_num, setup);
+			break;
+
+		case USB_REQ_SET_ADDRESS:
+			ch9setaddress(udc, setup);
+			break;
+
+		case USB_REQ_CLEAR_FEATURE:
+			ch9clearfeature(udc, setup);
+			break;
+
+		case USB_REQ_SET_FEATURE:
+			ch9setfeature(udc, setup);
+			break;
+
+		default:
+			delegate = true;
+		}
+	} else
+		delegate = true;
+
+	/* delegate USB standard requests to the gadget driver */
+	if (delegate == true) {
+		/* USB requests handled by gadget */
+		if (setup->wLength) {
+			/* DATA phase from gadget, STATUS phase from udc */
+			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+					?  EP_DIR_IN : EP_DIR_OUT;
+			spin_unlock(&udc->lock);
+			if (udc->driver->setup(&udc->gadget,
+				&udc->local_setup_buff) < 0)
+				ep0_stall(udc);
+			spin_lock(&udc->lock);
+			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
+					?  DATA_STATE_XMIT : DATA_STATE_RECV;
+		} else {
+			/* no DATA phase, IN STATUS phase from gadget */
+			udc->ep0_dir = EP_DIR_IN;
+			spin_unlock(&udc->lock);
+			if (udc->driver->setup(&udc->gadget,
+				&udc->local_setup_buff) < 0)
+				ep0_stall(udc);
+			spin_lock(&udc->lock);
+			udc->ep0_state = WAIT_FOR_OUT_STATUS;
+		}
+	}
+}
+
+/* complete DATA or STATUS phase of ep0 prime status phase if needed */
+static void ep0_req_complete(struct mv_udc *udc,
+	struct mv_ep *ep0, struct mv_req *req)
+{
+	u32 new_addr;
+
+	if (udc->usb_state == USB_STATE_ADDRESS) {
+		/* set the new address */
+		new_addr = (u32)udc->dev_addr;
+		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
+			&udc->op_regs->deviceaddr);
+	}
+
+	done(ep0, req, 0);
+
+	switch (udc->ep0_state) {
+	case DATA_STATE_XMIT:
+		/* receive status phase */
+		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
+			ep0_stall(udc);
+		break;
+	case DATA_STATE_RECV:
+		/* send status phase */
+		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
+			ep0_stall(udc);
+		break;
+	case WAIT_FOR_OUT_STATUS:
+		udc->ep0_state = WAIT_FOR_SETUP;
+		break;
+	case WAIT_FOR_SETUP:
+		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
+		break;
+	default:
+		ep0_stall(udc);
+		break;
+	}
+}
+
+static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
+{
+	u32 temp;
+	struct mv_dqh *dqh;
+
+	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
+
+	/* Clear bit in ENDPTSETUPSTAT */
+	temp = readl(&udc->op_regs->epsetupstat);
+	writel(temp | (1 << ep_num), &udc->op_regs->epsetupstat);
+
+	/* while a hazard exists when setup package arrives */
+	do {
+		/* Set Setup Tripwire */
+		temp = readl(&udc->op_regs->usbcmd);
+		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+
+		/* Copy the setup packet to local buffer */
+		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
+	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
+
+	/* Clear Setup Tripwire */
+	temp = readl(&udc->op_regs->usbcmd);
+	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+}
+
+static void irq_process_tr_complete(struct mv_udc *udc)
+{
+	u32 tmp, bit_pos;
+	int i, ep_num = 0, direction = 0;
+	struct mv_ep	*curr_ep;
+	struct mv_req *curr_req, *temp_req;
+	int status;
+
+	/*
+	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
+	 * because the setup packets are to be read ASAP
+	 */
+
+	/* Process all Setup packet received interrupts */
+	tmp = readl(&udc->op_regs->epsetupstat);
+
+	if (tmp) {
+		for (i = 0; i < udc->max_eps; i++) {
+			if (tmp & (1 << i)) {
+				get_setup_data(udc, i,
+					(u8 *)(&udc->local_setup_buff));
+				handle_setup_packet(udc, i,
+					&udc->local_setup_buff);
+			}
+		}
+	}
+
+	/* Don't clear the endpoint setup status register here.
+	 * It is cleared as a setup packet is read out of the buffer
+	 */
+
+	/* Process non-setup transaction complete interrupts */
+	tmp = readl(&udc->op_regs->epcomplete);
+
+	if (!tmp)
+		return;
+
+	writel(tmp, &udc->op_regs->epcomplete);
+
+	for (i = 0; i < udc->max_eps * 2; i++) {
+		ep_num = i >> 1;
+		direction = i % 2;
+
+		bit_pos = 1 << (ep_num + 16 * direction);
+
+		if (!(bit_pos & tmp))
+			continue;
+
+		if (i == 1)
+			curr_ep = &udc->eps[0];
+		else
+			curr_ep = &udc->eps[i];
+		/* process the req queue until an uncomplete request */
+		list_for_each_entry_safe(curr_req, temp_req,
+			&curr_ep->queue, queue) {
+			status = process_ep_req(udc, i, curr_req);
+			if (status)
+				break;
+
+			/* write back status to req */
+			curr_req->req.status = status;
+
+			/* ep0 request completion */
+			if (ep_num == 0) {
+				ep0_req_complete(udc, curr_ep, curr_req);
+				break;
+			} else {
+				done(curr_ep, curr_req, status);
+			}
+		}
+	}
+}
+
+void irq_process_reset(struct mv_udc *udc)
+{
+	u32 tmp;
+	unsigned int loops;
+
+	udc->ep0_dir = EP_DIR_OUT;
+	udc->ep0_state = WAIT_FOR_SETUP;
+	udc->remote_wakeup = 0;		/* default to 0 on reset */
+
+	/* The address bits are past bit 25-31. Set the address */
+	tmp = readl(&udc->op_regs->deviceaddr);
+	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
+	writel(tmp, &udc->op_regs->deviceaddr);
+
+	/* Clear all the setup token semaphores */
+	tmp = readl(&udc->op_regs->epsetupstat);
+	writel(tmp, &udc->op_regs->epsetupstat);
+
+	/* Clear all the endpoint complete status bits */
+	tmp = readl(&udc->op_regs->epcomplete);
+	writel(tmp, &udc->op_regs->epcomplete);
+
+	/* wait until all endptprime bits cleared */
+	loops = LOOPS(PRIME_TIMEOUT);
+	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
+		if (loops == 0) {
+			dev_err(&udc->dev->dev,
+				"Timeout for ENDPTPRIME = 0x%x\n",
+				readl(&udc->op_regs->epprime));
+			break;
+		}
+		loops--;
+		udelay(LOOPS_USEC);
+	}
+
+	/* Write 1s to the Flush register */
+	writel((u32)~0, &udc->op_regs->epflush);
+
+	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
+		dev_info(&udc->dev->dev, "usb bus reset\n");
+		udc->usb_state = USB_STATE_DEFAULT;
+		/* reset all the queues, stop all USB activities */
+		stop_activity(udc, udc->driver);
+	} else {
+		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
+			readl(&udc->op_regs->portsc));
+
+		/*
+		 * re-initialize
+		 * controller reset
+		 */
+		udc_reset(udc);
+
+		/* reset all the queues, stop all USB activities */
+		stop_activity(udc, udc->driver);
+
+		/* reset ep0 dQH and endptctrl */
+		ep0_reset(udc);
+
+		/* enable interrupt and set controller to run state */
+		udc_start(udc);
+
+		udc->usb_state = USB_STATE_ATTACHED;
+	}
+}
+
+static void handle_bus_resume(struct mv_udc *udc)
+{
+	udc->usb_state = udc->resume_state;
+	udc->resume_state = 0;
+
+	/* report resume to the driver */
+	if (udc->driver) {
+		if (udc->driver->resume) {
+			spin_unlock(&udc->lock);
+			udc->driver->resume(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+	}
+}
+
+static void irq_process_suspend(struct mv_udc *udc)
+{
+	udc->resume_state = udc->usb_state;
+	udc->usb_state = USB_STATE_SUSPENDED;
+
+	if (udc->driver->suspend) {
+		spin_unlock(&udc->lock);
+		udc->driver->suspend(&udc->gadget);
+		spin_lock(&udc->lock);
+	}
+}
+
+static void irq_process_port_change(struct mv_udc *udc)
+{
+	u32 portsc;
+
+	portsc = readl(&udc->op_regs->portsc[0]);
+	if (!(portsc & PORTSCX_PORT_RESET)) {
+		/* Get the speed */
+		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
+		switch (speed) {
+		case PORTSCX_PORT_SPEED_HIGH:
+			udc->gadget.speed = USB_SPEED_HIGH;
+			break;
+		case PORTSCX_PORT_SPEED_FULL:
+			udc->gadget.speed = USB_SPEED_FULL;
+			break;
+		case PORTSCX_PORT_SPEED_LOW:
+			udc->gadget.speed = USB_SPEED_LOW;
+			break;
+		default:
+			udc->gadget.speed = USB_SPEED_UNKNOWN;
+			break;
+		}
+	}
+
+	if (portsc & PORTSCX_PORT_SUSPEND) {
+		udc->resume_state = udc->usb_state;
+		udc->usb_state = USB_STATE_SUSPENDED;
+		if (udc->driver->suspend) {
+			spin_unlock(&udc->lock);
+			udc->driver->suspend(&udc->gadget);
+			spin_lock(&udc->lock);
+		}
+	}
+
+	if (!(portsc & PORTSCX_PORT_SUSPEND)
+		&& udc->usb_state == USB_STATE_SUSPENDED) {
+		handle_bus_resume(udc);
+	}
+
+	if (!udc->resume_state)
+		udc->usb_state = USB_STATE_DEFAULT;
+}
+
+static void irq_process_error(struct mv_udc *udc)
+{
+	/* Increment the error count */
+	udc->errors++;
+}
+
+static irqreturn_t mv_udc_irq(int irq, void *dev)
+{
+	struct mv_udc *udc = (struct mv_udc *)dev;
+	u32 status, intr;
+
+	spin_lock(&udc->lock);
+
+	status = readl(&udc->op_regs->usbsts);
+	intr = readl(&udc->op_regs->usbintr);
+	status &= intr;
+
+	if (status == 0) {
+		spin_unlock(&udc->lock);
+		return IRQ_NONE;
+	}
+
+	/* Clear all the interrupts occured */
+	writel(status, &udc->op_regs->usbsts);
+
+	if (status & USBSTS_ERR)
+		irq_process_error(udc);
+
+	if (status & USBSTS_RESET)
+		irq_process_reset(udc);
+
+	if (status & USBSTS_PORT_CHANGE)
+		irq_process_port_change(udc);
+
+	if (status & USBSTS_INT)
+		irq_process_tr_complete(udc);
+
+	if (status & USBSTS_SUSPEND)
+		irq_process_suspend(udc);
+
+	spin_unlock(&udc->lock);
+
+	return IRQ_HANDLED;
+}
+
+/* release device structure */
+static void gadget_release(struct device *_dev)
+{
+	struct mv_udc *udc = the_controller;
+
+	complete(udc->done);
+	kfree(udc);
+}
+
+static int mv_udc_remove(struct platform_device *dev)
+{
+	struct mv_udc *udc = the_controller;
+
+	DECLARE_COMPLETION(done);
+
+	udc->done = &done;
+
+	/* free memory allocated in probe */
+	if (udc->dtd_pool)
+		dma_pool_destroy(udc->dtd_pool);
+
+	if (udc->ep_dqh)
+		dma_free_coherent(&dev->dev, udc->ep_dqh_size,
+			udc->ep_dqh, udc->ep_dqh_dma);
+
+	kfree(udc->eps);
+
+	if (udc->irq)
+		free_irq(udc->irq, &dev->dev);
+
+	if (udc->cap_regs)
+		iounmap(udc->cap_regs);
+	udc->cap_regs = NULL;
+
+	if (udc->phy_regs)
+		iounmap((void *)udc->phy_regs);
+	udc->phy_regs = 0;
+
+	if (udc->status_req) {
+		kfree(udc->status_req->req.buf);
+		kfree(udc->status_req);
+	}
+
+	device_unregister(&udc->gadget.dev);
+
+	/* free dev, wait for the release() finished */
+	wait_for_completion(&done);
+
+	the_controller = NULL;
+
+	return 0;
+}
+
+int mv_udc_probe(struct platform_device *dev)
+{
+	struct mv_udc *udc;
+	int retval = 0;
+	struct resource *r;
+	size_t size;
+
+	udc = kzalloc(sizeof *udc, GFP_KERNEL);
+	if (udc == NULL) {
+		dev_err(&dev->dev, "failed to allocate memory for udc\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	spin_lock_init(&udc->lock);
+
+	udc->dev = dev;
+
+	udc->clk = clk_get(&dev->dev, "U2OCLK");
+	if (IS_ERR(udc->clk)) {
+		retval = PTR_ERR(udc->clk);
+		goto error;
+	}
+
+	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "u2o");
+	if (r == NULL) {
+		dev_err(&dev->dev, "no I/O memory resource defined\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	udc->cap_regs = (struct mv_cap_regs __iomem *)
+		ioremap(r->start, resource_size(r));
+	if (udc->cap_regs == NULL) {
+		dev_err(&dev->dev, "failed to map I/O memory\n");
+		retval = -EBUSY;
+		goto error;
+	}
+
+	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "u2ophy");
+	if (r == NULL) {
+		dev_err(&dev->dev, "no phy I/O memory resource defined\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
+	if (udc->phy_regs == 0) {
+		dev_err(&dev->dev, "failed to map phy I/O memory\n");
+		retval = -EBUSY;
+		goto error;
+	}
+
+	/* we will acces controller register, so enable the clk */
+	clk_enable(udc->clk);
+	retval = mv_udc_phy_init(udc->phy_regs);
+	if (retval) {
+		dev_err(&dev->dev, "phy initialization error %d\n", retval);
+		goto error;
+	}
+
+	udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
+		+ (readl(&udc->cap_regs->caplength_hciversion)
+			& CAPLENGTH_MASK));
+	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
+
+	size = udc->max_eps * sizeof(struct mv_dqh) *2;
+	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
+	udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
+					&udc->ep_dqh_dma, GFP_KERNEL);
+
+	if (udc->ep_dqh == NULL) {
+		dev_err(&dev->dev, "allocate dQH memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+	udc->ep_dqh_size = size;
+
+	/* create dTD dma_pool resource */
+	udc->dtd_pool = dma_pool_create("mv_dtd",
+			&dev->dev,
+			sizeof(struct mv_dtd),
+			DTD_ALIGNMENT,
+			DMA_BOUNDARY);
+
+	if (!udc->dtd_pool) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	size = udc->max_eps * sizeof(struct mv_ep) *2;
+	udc->eps = kzalloc(size, GFP_KERNEL);
+	if (udc->eps == NULL) {
+		dev_err(&dev->dev, "allocate ep memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+
+	/* initialize ep0 status request structure */
+	udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
+	if (!udc->status_req) {
+		dev_err(&dev->dev, "allocate status_req memory failed\n");
+		retval = -ENOMEM;
+		goto error;
+	}
+	INIT_LIST_HEAD(&udc->status_req->queue);
+
+	/* allocate a small amount of memory to get valid address */
+	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
+	udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
+
+	udc->resume_state = USB_STATE_NOTATTACHED;
+	udc->usb_state = USB_STATE_POWERED;
+	udc->ep0_dir = EP_DIR_OUT;
+	udc->remote_wakeup = 0;
+
+	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
+	if (r == NULL) {
+		dev_err(&dev->dev, "no IRQ resource defined\n");
+		retval = -ENODEV;
+		goto error;
+	}
+	udc->irq = r->start;
+	if (request_irq(udc->irq, mv_udc_irq,
+		IRQF_DISABLED | IRQF_SHARED, driver_name, udc)) {
+		dev_err(&dev->dev, "Request irq %d for UDC failed\n",
+			udc->irq);
+		retval = -ENODEV;
+		goto error;
+	}
+
+	/* initialize gadget structure */
+	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
+	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
+	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
+	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
+	udc->gadget.is_dualspeed = 1;		/* support dual speed */
+
+	/* the "gadget" abstracts/virtualizes the controller */
+	dev_set_name(&udc->gadget.dev, "gadget");
+	udc->gadget.dev.parent = &dev->dev;
+	udc->gadget.dev.dma_mask = dev->dev.dma_mask;
+	udc->gadget.dev.release = gadget_release;
+	udc->gadget.name = driver_name;		/* gadget name */
+
+	retval = device_register(&udc->gadget.dev);
+	if (retval)
+		goto error;
+
+	eps_init(udc);
+
+	the_controller = udc;
+
+	goto out;
+error:
+	if (udc)
+		mv_udc_remove(udc->dev);
+out:
+	return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state)
+{
+	struct mv_udc *udc = the_controller;
+
+	udc_stop(udc);
+
+	return 0;
+}
+
+static int mv_udc_resume(struct platform_device *_dev)
+{
+	struct mv_udc *udc = the_controller;
+	int retval;
+
+	retval = mv_udc_phy_init(udc->phy_regs);
+	if (retval) {
+		dev_err(_dev, "phy initialization error %d\n", retval);
+		goto error;
+	}
+	udc_reset(udc);
+	ep0_reset(udc);
+	udc_start(udc);
+
+	return 0;
+}
+
+static const struct dev_pm_ops mv_udc_pm_ops = {
+	.suspend	= mv_udc_suspend,
+	.resume		= mv_udc_resume,
+};
+#endif
+
+static struct platform_driver udc_driver = {
+	.probe		= mv_udc_probe,
+	.remove		= __exit_p(mv_udc_remove),
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "pxa-u2o",
+#ifdef CONFIG_PM
+		.pm	= mv_udc_pm_ops,
+#endif
+	},
+};
+
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+
+static int __init init(void)
+{
+	return platform_driver_register(&udc_driver);
+}
+module_init(init);
+
+
+static void __exit cleanup(void)
+{
+	platform_driver_unregister(&udc_driver);
+}
+module_exit(cleanup);
+
diff --git a/drivers/usb/gadget/mv_udc_phy.c b/drivers/usb/gadget/mv_udc_phy.c
new file mode 100644
index 0000000..d4dea97
--- /dev/null
+++ b/drivers/usb/gadget/mv_udc_phy.c
@@ -0,0 +1,214 @@
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include <mach/cputype.h>
+
+#ifdef CONFIG_ARCH_MMP
+
+#define UTMI_REVISION		0x0
+#define UTMI_CTRL		0x4
+#define UTMI_PLL		0x8
+#define UTMI_TX			0xc
+#define UTMI_RX			0x10
+#define UTMI_IVREF		0x14
+#define UTMI_T0			0x18
+#define UTMI_T1			0x1c
+#define UTMI_T2			0x20
+#define UTMI_T3			0x24
+#define UTMI_T4			0x28
+#define UTMI_T5			0x2c
+#define UTMI_RESERVE		0x30
+#define UTMI_USB_INT		0x34
+#define UTMI_DBG_CTL		0x38
+#define UTMI_OTG_ADDON		0x3c
+
+/* For UTMICTRL Register */
+#define UTMI_CTRL_USB_CLK_EN			(1 << 31)
+/* pxa168 */
+#define UTMI_CTRL_SUSPEND_SET1			(1 << 30)
+#define UTMI_CTRL_SUSPEND_SET2			(1 << 29)
+#define UTMI_CTRL_RXBUF_PDWN			(1 << 24)
+#define UTMI_CTRL_TXBUF_PDWN			(1 << 11)
+
+#define UTMI_CTRL_INPKT_DELAY_SHIFT		30
+#define UTMI_CTRL_INPKT_DELAY_SOF_SHIFT		28
+#define UTMI_CTRL_PU_REF_SHIFT			20
+#define UTMI_CTRL_ARC_PULLDN_SHIFT		12
+#define UTMI_CTRL_PLL_PWR_UP_SHIFT		1
+#define UTMI_CTRL_PWR_UP_SHIFT			0
+/* For UTMI_PLL Register */
+#define UTMI_PLL_CLK_BLK_EN_SHIFT		24
+#define UTMI_PLL_FBDIV_SHIFT			4
+#define UTMI_PLL_REFDIV_SHIFT			0
+#define UTMI_PLL_FBDIV_MASK			0x00000FF0
+#define UTMI_PLL_REFDIV_MASK			0x0000000F
+#define UTMI_PLL_ICP_MASK			0x00007000
+#define UTMI_PLL_KVCO_MASK			0x00031000
+#define UTMI_PLL_PLLCALI12_SHIFT		29
+#define UTMI_PLL_PLLCALI12_MASK			(0x3 << 29)
+#define UTMI_PLL_PLLVDD18_SHIFT			27
+#define UTMI_PLL_PLLVDD18_MASK			(0x3 << 27)
+#define UTMI_PLL_PLLVDD12_SHIFT			25
+#define UTMI_PLL_PLLVDD12_MASK			(0x3 << 25)
+#define UTMI_PLL_KVCO_SHIFT			15
+#define UTMI_PLL_ICP_SHIFT			12
+/* For UTMI_TX Register */
+#define UTMI_TX_REG_EXT_FS_RCAL_SHIFT		27
+#define UTMI_TX_REG_EXT_FS_RCAL_MASK		(0xf << 27)
+#define UTMI_TX_REG_EXT_FS_RCAL_EN_MASK		26
+#define UTMI_TX_REG_EXT_FS_RCAL_EN		(0x1 << 26)
+#define UTMI_TX_LOW_VDD_EN_SHIFT		11
+#define UTMI_TX_IMPCAL_VTH_SHIFT		14
+#define UTMI_TX_IMPCAL_VTH_MASK			(0x7 << 14)
+#define UTMI_TX_CK60_PHSEL_SHIFT		17
+#define UTMI_TX_CK60_PHSEL_MASK			(0xf << 17)
+#define UTMI_TX_TXVDD12_SHIFT                   22
+#define UTMI_TX_TXVDD12_MASK			(0x3 << 22)
+#define UTMI_TX_AMP_SHIFT			0
+#define UTMI_TX_AMP_MASK			(0x7 << 0)
+/* For UTMI_RX Register */
+#define UTMI_RX_SQ_THRESH_SHIFT			4
+#define UTMI_RX_SQ_THRESH_MASK			(0xf << 4)
+#define UTMI_REG_SQ_LENGTH_SHIFT		15
+#define UTMI_REG_SQ_LENGTH_MASK			(0x3 << 15)
+
+#define REG_RCAL_START				0x00001000
+#define VCOCAL_START				0x00200000
+#define KVCO_EXT				0x00400000
+#define PLL_READY				0x00800000
+#define CLK_BLK_EN				0x01000000
+#endif
+
+static unsigned int u2o_read(unsigned int base, unsigned int offset)
+{
+	return readl(base + offset);
+}
+
+static void u2o_set(unsigned int base, unsigned int offset, unsigned int value)
+{
+	unsigned int reg;
+
+	reg = readl(base + offset);
+	reg |= value;
+	writel(reg, base + offset);
+	readl(base + offset);
+}
+
+static void u2o_clear(unsigned int base, unsigned int offset,
+	unsigned int value)
+{
+	unsigned int reg;
+
+	reg = readl(base + offset);
+	reg &= ~value;
+	writel(reg, base + offset);
+	readl(base + offset);
+}
+
+static void u2o_write(unsigned int base, unsigned int offset,
+	unsigned int value)
+{
+	writel(value, base + offset);
+	readl(base + offset);
+}
+
+#ifdef CONFIG_ARCH_MMP
+int mv_udc_phy_init(unsigned int base)
+{
+	unsigned long timeout;
+
+	/* Initialize the USB PHY power */
+	if (cpu_is_pxa910()) {
+		u2o_set(base, UTMI_CTRL, (1 << UTMI_CTRL_INPKT_DELAY_SOF_SHIFT)
+			| (1 << UTMI_CTRL_PU_REF_SHIFT));
+	}
+
+	u2o_set(base, UTMI_CTRL, 1 << UTMI_CTRL_PLL_PWR_UP_SHIFT);
+	u2o_set(base, UTMI_CTRL, 1 << UTMI_CTRL_PWR_UP_SHIFT);
+
+	/* UTMI_PLL settings */
+	u2o_clear(base, UTMI_PLL, UTMI_PLL_PLLVDD18_MASK
+		| UTMI_PLL_PLLVDD12_MASK | UTMI_PLL_PLLCALI12_MASK
+		| UTMI_PLL_FBDIV_MASK | UTMI_PLL_REFDIV_MASK
+		| UTMI_PLL_ICP_MASK | UTMI_PLL_KVCO_MASK);
+
+	u2o_set(base, UTMI_PLL, (0xee << UTMI_PLL_FBDIV_SHIFT)
+		| (0xb << UTMI_PLL_REFDIV_SHIFT)
+		| (3 << UTMI_PLL_PLLVDD18_SHIFT)
+		| (3 << UTMI_PLL_PLLVDD12_SHIFT)
+		| (3 << UTMI_PLL_PLLCALI12_SHIFT)
+		| (1 << UTMI_PLL_ICP_SHIFT) | (3 << UTMI_PLL_KVCO_SHIFT));
+
+	/* UTMI_TX */
+	u2o_clear(base, UTMI_TX, UTMI_TX_REG_EXT_FS_RCAL_EN_MASK
+		| UTMI_TX_TXVDD12_MASK
+		| UTMI_TX_CK60_PHSEL_MASK | UTMI_TX_IMPCAL_VTH_MASK
+		| UTMI_TX_REG_EXT_FS_RCAL_MASK | UTMI_TX_AMP_MASK);
+	u2o_set(base, UTMI_TX, (3 << UTMI_TX_TXVDD12_SHIFT)
+		| (4 << UTMI_TX_CK60_PHSEL_SHIFT)
+		| (4 << UTMI_TX_IMPCAL_VTH_SHIFT)
+		| (8 << UTMI_TX_REG_EXT_FS_RCAL_SHIFT)
+		| (3 << UTMI_TX_AMP_SHIFT));
+
+	/* UTMI_RX */
+	u2o_clear(base, UTMI_RX, UTMI_RX_SQ_THRESH_MASK
+		| UTMI_REG_SQ_LENGTH_MASK);
+	if (cpu_is_pxa168())
+		u2o_set(base, UTMI_RX, (7 << UTMI_RX_SQ_THRESH_SHIFT)
+			| (2 << UTMI_REG_SQ_LENGTH_SHIFT));
+	else
+		u2o_set(base, UTMI_RX, (0x7 << UTMI_RX_SQ_THRESH_SHIFT)
+			| (2 << UTMI_REG_SQ_LENGTH_SHIFT));
+
+	/* UTMI_IVREF */
+	if (cpu_is_pxa168())
+		/*
+		 * fixing Microsoft Altair board interface with NEC hub issue -
+		 * Set UTMI_IVREF from 0x4a3 to 0x4bf
+		 */
+		u2o_write(base, UTMI_IVREF, 0x4bf);
+
+	/* calibrate */
+	timeout = jiffies + 100;
+	while ((u2o_read(base, UTMI_PLL) & PLL_READY) == 0) {
+		if (time_after(jiffies, timeout))
+			return -ETIME;
+		cpu_relax();
+	}
+
+	/* toggle VCOCAL_START bit of UTMI_PLL */
+	udelay(200);
+	u2o_set(base, UTMI_PLL, VCOCAL_START);
+	udelay(40);
+	u2o_clear(base, UTMI_PLL, VCOCAL_START);
+
+	/* toggle REG_RCAL_START bit of UTMI_TX */
+	udelay(200);
+	u2o_set(base, UTMI_TX, REG_RCAL_START);
+	udelay(40);
+	u2o_clear(base, UTMI_TX, REG_RCAL_START);
+	udelay(200);
+
+	/* make sure phy is ready */
+	timeout = jiffies + 100;
+	while ((u2o_read(base, UTMI_PLL) & PLL_READY) == 0) {
+		if (time_after(jiffies, timeout))
+			return -ETIME;
+		cpu_relax();
+	}
+
+	if (cpu_is_pxa168()) {
+		u2o_set(base, UTMI_RESERVE, 1 << 5);
+		/* Turn on UTMI PHY OTG extension */
+		u2o_write(base, UTMI_OTG_ADDON, 1);
+	}
+	return 0;
+}
+#else
+int mv_udc_phy_init(unsigned int base)
+{
+	return 0;
+}
+#endif
diff --git a/drivers/usb/gadget/ncm.c b/drivers/usb/gadget/ncm.c
new file mode 100644
index 0000000..99c179a
--- /dev/null
+++ b/drivers/usb/gadget/ncm.c
@@ -0,0 +1,248 @@
+/*
+ * ncm.c -- NCM gadget driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com>
+ *
+ * The driver borrows from ether.c which is:
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+
+#include "u_ether.h"
+
+#define DRIVER_DESC		"NCM Gadget"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module.  So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "f_ncm.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ * It's for devices with only CDC Ethernet configurations.
+ */
+#define CDC_VENDOR_NUM		0x0525	/* NetChip */
+#define CDC_PRODUCT_NUM		0xa4a1	/* Linux-USB Ethernet Gadget */
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+	.bLength =		sizeof device_desc,
+	.bDescriptorType =	USB_DT_DEVICE,
+
+	.bcdUSB =		cpu_to_le16 (0x0200),
+
+	.bDeviceClass =		USB_CLASS_COMM,
+	.bDeviceSubClass =	0,
+	.bDeviceProtocol =	0,
+	/* .bMaxPacketSize0 = f(hardware) */
+
+	/* Vendor and product id defaults change according to what configs
+	 * we support.  (As does bNumConfigurations.)  These values can
+	 * also be overridden by module parameters.
+	 */
+	.idVendor =		cpu_to_le16 (CDC_VENDOR_NUM),
+	.idProduct =		cpu_to_le16 (CDC_PRODUCT_NUM),
+	/* .bcdDevice = f(hardware) */
+	/* .iManufacturer = DYNAMIC */
+	/* .iProduct = DYNAMIC */
+	/* NO SERIAL NUMBER */
+	.bNumConfigurations =	1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+	.bLength =		sizeof otg_descriptor,
+	.bDescriptorType =	USB_DT_OTG,
+
+	/* REVISIT SRP-only hardware is possible, although
+	 * it would not be called "OTG" ...
+	 */
+	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+	(struct usb_descriptor_header *) &otg_descriptor,
+	NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX		0
+#define STRING_PRODUCT_IDX		1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+	[STRING_MANUFACTURER_IDX].s = manufacturer,
+	[STRING_PRODUCT_IDX].s = DRIVER_DESC,
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+	.language	= 0x0409,	/* en-us */
+	.strings	= strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+	&stringtab_dev,
+	NULL,
+};
+
+static u8 hostaddr[ETH_ALEN];
+
+/*-------------------------------------------------------------------------*/
+
+static int __init ncm_do_config(struct usb_configuration *c)
+{
+	/* FIXME alloc iConfiguration string, set it in c->strings */
+
+	if (gadget_is_otg(c->cdev->gadget)) {
+		c->descriptors = otg_desc;
+		c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+	}
+
+	return ncm_bind_config(c, hostaddr);
+}
+
+static struct usb_configuration ncm_config_driver = {
+	/* .label = f(hardware) */
+	.label			= "CDC Ethernet (NCM)",
+	.bConfigurationValue	= 1,
+	/* .iConfiguration = DYNAMIC */
+	.bmAttributes		= USB_CONFIG_ATT_SELFPOWER,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init gncm_bind(struct usb_composite_dev *cdev)
+{
+	int			gcnum;
+	struct usb_gadget	*gadget = cdev->gadget;
+	int			status;
+
+	/* set up network link layer */
+	status = gether_setup(cdev->gadget, hostaddr);
+	if (status < 0)
+		return status;
+
+	gcnum = usb_gadget_controller_number(gadget);
+	if (gcnum >= 0)
+		device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+	else {
+		/* We assume that can_support_ecm() tells the truth;
+		 * but if the controller isn't recognized at all then
+		 * that assumption is a bit more likely to be wrong.
+		 */
+		dev_warn(&gadget->dev,
+			 "controller '%s' not recognized; trying %s\n",
+			 gadget->name,
+			 ncm_config_driver.label);
+		device_desc.bcdDevice =
+			cpu_to_le16(0x0300 | 0x0099);
+	}
+
+
+	/* Allocate string descriptor numbers ... note that string
+	 * contents can be overridden by the composite_dev glue.
+	 */
+
+	/* device descriptor strings: manufacturer, product */
+	snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+		init_utsname()->sysname, init_utsname()->release,
+		gadget->name);
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_MANUFACTURER_IDX].id = status;
+	device_desc.iManufacturer = status;
+
+	status = usb_string_id(cdev);
+	if (status < 0)
+		goto fail;
+	strings_dev[STRING_PRODUCT_IDX].id = status;
+	device_desc.iProduct = status;
+
+	status = usb_add_config(cdev, &ncm_config_driver,
+				ncm_do_config);
+	if (status < 0)
+		goto fail;
+
+	dev_info(&gadget->dev, "%s\n", DRIVER_DESC);
+
+	return 0;
+
+fail:
+	gether_cleanup();
+	return status;
+}
+
+static int __exit gncm_unbind(struct usb_composite_dev *cdev)
+{
+	gether_cleanup();
+	return 0;
+}
+
+static struct usb_composite_driver ncm_driver = {
+	.name		= "g_ncm",
+	.dev		= &device_desc,
+	.strings	= dev_strings,
+	.unbind		= __exit_p(gncm_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Yauheni Kaliuta");
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+	return usb_composite_probe(&ncm_driver, gncm_bind);
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+	usb_composite_unregister(&ncm_driver);
+}
+module_exit(cleanup);
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
new file mode 100644
index 0000000..0c8dd81
--- /dev/null
+++ b/drivers/usb/gadget/pch_udc.c
@@ -0,0 +1,2947 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/* Address offset of Registers */
+#define UDC_EP_REG_SHIFT	0x20	/* Offset to next EP */
+
+#define UDC_EPCTL_ADDR		0x00	/* Endpoint control */
+#define UDC_EPSTS_ADDR		0x04	/* Endpoint status */
+#define UDC_BUFIN_FRAMENUM_ADDR	0x08	/* buffer size in / frame number out */
+#define UDC_BUFOUT_MAXPKT_ADDR	0x0C	/* buffer size out / maxpkt in */
+#define UDC_SUBPTR_ADDR		0x10	/* setup buffer pointer */
+#define UDC_DESPTR_ADDR		0x14	/* Data descriptor pointer */
+#define UDC_CONFIRM_ADDR	0x18	/* Write/Read confirmation */
+
+#define UDC_DEVCFG_ADDR		0x400	/* Device configuration */
+#define UDC_DEVCTL_ADDR		0x404	/* Device control */
+#define UDC_DEVSTS_ADDR		0x408	/* Device status */
+#define UDC_DEVIRQSTS_ADDR	0x40C	/* Device irq status */
+#define UDC_DEVIRQMSK_ADDR	0x410	/* Device irq mask */
+#define UDC_EPIRQSTS_ADDR	0x414	/* Endpoint irq status */
+#define UDC_EPIRQMSK_ADDR	0x418	/* Endpoint irq mask */
+#define UDC_DEVLPM_ADDR		0x41C	/* LPM control / status */
+#define UDC_CSR_BUSY_ADDR	0x4f0	/* UDC_CSR_BUSY Status register */
+#define UDC_SRST_ADDR		0x4fc	/* SOFT RESET register */
+#define UDC_CSR_ADDR		0x500	/* USB_DEVICE endpoint register */
+
+/* Endpoint control register */
+/* Bit position */
+#define UDC_EPCTL_MRXFLUSH		(1 << 12)
+#define UDC_EPCTL_RRDY			(1 << 9)
+#define UDC_EPCTL_CNAK			(1 << 8)
+#define UDC_EPCTL_SNAK			(1 << 7)
+#define UDC_EPCTL_NAK			(1 << 6)
+#define UDC_EPCTL_P			(1 << 3)
+#define UDC_EPCTL_F			(1 << 1)
+#define UDC_EPCTL_S			(1 << 0)
+#define UDC_EPCTL_ET_SHIFT		4
+/* Mask patern */
+#define UDC_EPCTL_ET_MASK		0x00000030
+/* Value for ET field */
+#define UDC_EPCTL_ET_CONTROL		0
+#define UDC_EPCTL_ET_ISO		1
+#define UDC_EPCTL_ET_BULK		2
+#define UDC_EPCTL_ET_INTERRUPT		3
+
+/* Endpoint status register */
+/* Bit position */
+#define UDC_EPSTS_XFERDONE		(1 << 27)
+#define UDC_EPSTS_RSS			(1 << 26)
+#define UDC_EPSTS_RCS			(1 << 25)
+#define UDC_EPSTS_TXEMPTY		(1 << 24)
+#define UDC_EPSTS_TDC			(1 << 10)
+#define UDC_EPSTS_HE			(1 << 9)
+#define UDC_EPSTS_MRXFIFO_EMP		(1 << 8)
+#define UDC_EPSTS_BNA			(1 << 7)
+#define UDC_EPSTS_IN			(1 << 6)
+#define UDC_EPSTS_OUT_SHIFT		4
+/* Mask patern */
+#define UDC_EPSTS_OUT_MASK		0x00000030
+#define UDC_EPSTS_ALL_CLR_MASK		0x1F0006F0
+/* Value for OUT field */
+#define UDC_EPSTS_OUT_SETUP		2
+#define UDC_EPSTS_OUT_DATA		1
+
+/* Device configuration register */
+/* Bit position */
+#define UDC_DEVCFG_CSR_PRG		(1 << 17)
+#define UDC_DEVCFG_SP			(1 << 3)
+/* SPD Valee */
+#define UDC_DEVCFG_SPD_HS		0x0
+#define UDC_DEVCFG_SPD_FS		0x1
+#define UDC_DEVCFG_SPD_LS		0x2
+
+/* Device control register */
+/* Bit position */
+#define UDC_DEVCTL_THLEN_SHIFT		24
+#define UDC_DEVCTL_BRLEN_SHIFT		16
+#define UDC_DEVCTL_CSR_DONE		(1 << 13)
+#define UDC_DEVCTL_SD			(1 << 10)
+#define UDC_DEVCTL_MODE			(1 << 9)
+#define UDC_DEVCTL_BREN			(1 << 8)
+#define UDC_DEVCTL_THE			(1 << 7)
+#define UDC_DEVCTL_DU			(1 << 4)
+#define UDC_DEVCTL_TDE			(1 << 3)
+#define UDC_DEVCTL_RDE			(1 << 2)
+#define UDC_DEVCTL_RES			(1 << 0)
+
+/* Device status register */
+/* Bit position */
+#define UDC_DEVSTS_TS_SHIFT		18
+#define UDC_DEVSTS_ENUM_SPEED_SHIFT	13
+#define UDC_DEVSTS_ALT_SHIFT		8
+#define UDC_DEVSTS_INTF_SHIFT		4
+#define UDC_DEVSTS_CFG_SHIFT		0
+/* Mask patern */
+#define UDC_DEVSTS_TS_MASK		0xfffc0000
+#define UDC_DEVSTS_ENUM_SPEED_MASK	0x00006000
+#define UDC_DEVSTS_ALT_MASK		0x00000f00
+#define UDC_DEVSTS_INTF_MASK		0x000000f0
+#define UDC_DEVSTS_CFG_MASK		0x0000000f
+/* value for maximum speed for SPEED field */
+#define UDC_DEVSTS_ENUM_SPEED_FULL	1
+#define UDC_DEVSTS_ENUM_SPEED_HIGH	0
+#define UDC_DEVSTS_ENUM_SPEED_LOW	2
+#define UDC_DEVSTS_ENUM_SPEED_FULLX	3
+
+/* Device irq register */
+/* Bit position */
+#define UDC_DEVINT_RWKP			(1 << 7)
+#define UDC_DEVINT_ENUM			(1 << 6)
+#define UDC_DEVINT_SOF			(1 << 5)
+#define UDC_DEVINT_US			(1 << 4)
+#define UDC_DEVINT_UR			(1 << 3)
+#define UDC_DEVINT_ES			(1 << 2)
+#define UDC_DEVINT_SI			(1 << 1)
+#define UDC_DEVINT_SC			(1 << 0)
+/* Mask patern */
+#define UDC_DEVINT_MSK			0x7f
+
+/* Endpoint irq register */
+/* Bit position */
+#define UDC_EPINT_IN_SHIFT		0
+#define UDC_EPINT_OUT_SHIFT		16
+#define UDC_EPINT_IN_EP0		(1 << 0)
+#define UDC_EPINT_OUT_EP0		(1 << 16)
+/* Mask patern */
+#define UDC_EPINT_MSK_DISABLE_ALL	0xffffffff
+
+/* UDC_CSR_BUSY Status register */
+/* Bit position */
+#define UDC_CSR_BUSY			(1 << 0)
+
+/* SOFT RESET register */
+/* Bit position */
+#define UDC_PSRST			(1 << 1)
+#define UDC_SRST			(1 << 0)
+
+/* USB_DEVICE endpoint register */
+/* Bit position */
+#define UDC_CSR_NE_NUM_SHIFT		0
+#define UDC_CSR_NE_DIR_SHIFT		4
+#define UDC_CSR_NE_TYPE_SHIFT		5
+#define UDC_CSR_NE_CFG_SHIFT		7
+#define UDC_CSR_NE_INTF_SHIFT		11
+#define UDC_CSR_NE_ALT_SHIFT		15
+#define UDC_CSR_NE_MAX_PKT_SHIFT	19
+/* Mask patern */
+#define UDC_CSR_NE_NUM_MASK		0x0000000f
+#define UDC_CSR_NE_DIR_MASK		0x00000010
+#define UDC_CSR_NE_TYPE_MASK		0x00000060
+#define UDC_CSR_NE_CFG_MASK		0x00000780
+#define UDC_CSR_NE_INTF_MASK		0x00007800
+#define UDC_CSR_NE_ALT_MASK		0x00078000
+#define UDC_CSR_NE_MAX_PKT_MASK		0x3ff80000
+
+#define PCH_UDC_CSR(ep)	(UDC_CSR_ADDR + ep*4)
+#define PCH_UDC_EPINT(in, num)\
+		(1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
+
+/* Index of endpoint */
+#define UDC_EP0IN_IDX		0
+#define UDC_EP0OUT_IDX		1
+#define UDC_EPIN_IDX(ep)	(ep * 2)
+#define UDC_EPOUT_IDX(ep)	(ep * 2 + 1)
+#define PCH_UDC_EP0		0
+#define PCH_UDC_EP1		1
+#define PCH_UDC_EP2		2
+#define PCH_UDC_EP3		3
+
+/* Number of endpoint */
+#define PCH_UDC_EP_NUM		32	/* Total number of EPs (16 IN,16 OUT) */
+#define PCH_UDC_USED_EP_NUM	4	/* EP number of EP's really used */
+/* Length Value */
+#define PCH_UDC_BRLEN		0x0F	/* Burst length */
+#define PCH_UDC_THLEN		0x1F	/* Threshold length */
+/* Value of EP Buffer Size */
+#define UDC_EP0IN_BUFF_SIZE	64
+#define UDC_EPIN_BUFF_SIZE	512
+#define UDC_EP0OUT_BUFF_SIZE	64
+#define UDC_EPOUT_BUFF_SIZE	512
+/* Value of EP maximum packet size */
+#define UDC_EP0IN_MAX_PKT_SIZE	64
+#define UDC_EP0OUT_MAX_PKT_SIZE	64
+#define UDC_BULK_MAX_PKT_SIZE	512
+
+/* DMA */
+#define DMA_DIR_RX		1	/* DMA for data receive */
+#define DMA_DIR_TX		2	/* DMA for data transmit */
+#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
+#define UDC_DMA_MAXPACKET	65536	/* maximum packet size for DMA */
+
+/**
+ * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
+ *				  for data
+ * @status:		Status quadlet
+ * @reserved:		Reserved
+ * @dataptr:		Buffer descriptor
+ * @next:		Next descriptor
+ */
+struct pch_udc_data_dma_desc {
+	u32 status;
+	u32 reserved;
+	u32 dataptr;
+	u32 next;
+};
+
+/**
+ * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
+ *				 for control data
+ * @status:	Status
+ * @reserved:	Reserved
+ * @data12:	First setup word
+ * @data34:	Second setup word
+ */
+struct pch_udc_stp_dma_desc {
+	u32 status;
+	u32 reserved;
+	struct usb_ctrlrequest request;
+} __attribute((packed));
+
+/* DMA status definitions */
+/* Buffer status */
+#define PCH_UDC_BUFF_STS	0xC0000000
+#define PCH_UDC_BS_HST_RDY	0x00000000
+#define PCH_UDC_BS_DMA_BSY	0x40000000
+#define PCH_UDC_BS_DMA_DONE	0x80000000
+#define PCH_UDC_BS_HST_BSY	0xC0000000
+/*  Rx/Tx Status */
+#define PCH_UDC_RXTX_STS	0x30000000
+#define PCH_UDC_RTS_SUCC	0x00000000
+#define PCH_UDC_RTS_DESERR	0x10000000
+#define PCH_UDC_RTS_BUFERR	0x30000000
+/* Last Descriptor Indication */
+#define PCH_UDC_DMA_LAST	0x08000000
+/* Number of Rx/Tx Bytes Mask */
+#define PCH_UDC_RXTX_BYTES	0x0000ffff
+
+/**
+ * struct pch_udc_cfg_data - Structure to hold current configuration
+ *			     and interface information
+ * @cur_cfg:	current configuration in use
+ * @cur_intf:	current interface in use
+ * @cur_alt:	current alt interface in use
+ */
+struct pch_udc_cfg_data {
+	u16 cur_cfg;
+	u16 cur_intf;
+	u16 cur_alt;
+};
+
+/**
+ * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
+ * @ep:			embedded ep request
+ * @td_stp_phys:	for setup request
+ * @td_data_phys:	for data request
+ * @td_stp:		for setup request
+ * @td_data:		for data request
+ * @dev:		reference to device struct
+ * @offset_addr:	offset address of ep register
+ * @desc:		for this ep
+ * @queue:		queue for requests
+ * @num:		endpoint number
+ * @in:			endpoint is IN
+ * @halted:		endpoint halted?
+ * @epsts:		Endpoint status
+ */
+struct pch_udc_ep {
+	struct usb_ep			ep;
+	dma_addr_t			td_stp_phys;
+	dma_addr_t			td_data_phys;
+	struct pch_udc_stp_dma_desc	*td_stp;
+	struct pch_udc_data_dma_desc	*td_data;
+	struct pch_udc_dev		*dev;
+	unsigned long			offset_addr;
+	const struct usb_endpoint_descriptor	*desc;
+	struct list_head		queue;
+	unsigned			num:5,
+					in:1,
+					halted:1;
+	unsigned long			epsts;
+};
+
+/**
+ * struct pch_udc_dev - Structure holding complete information
+ *			of the PCH USB device
+ * @gadget:		gadget driver data
+ * @driver:		reference to gadget driver bound
+ * @pdev:		reference to the PCI device
+ * @ep:			array of endpoints
+ * @lock:		protects all state
+ * @active:		enabled the PCI device
+ * @stall:		stall requested
+ * @prot_stall:		protcol stall requested
+ * @irq_registered:	irq registered with system
+ * @mem_region:		device memory mapped
+ * @registered:		driver regsitered with system
+ * @suspended:		driver in suspended state
+ * @connected:		gadget driver associated
+ * @set_cfg_not_acked:	pending acknowledgement 4 setup
+ * @waiting_zlp_ack:	pending acknowledgement 4 ZLP
+ * @data_requests:	DMA pool for data requests
+ * @stp_requests:	DMA pool for setup requests
+ * @dma_addr:		DMA pool for received
+ * @ep0out_buf:		Buffer for DMA
+ * @setup_data:		Received setup data
+ * @phys_addr:		of device memory
+ * @base_addr:		for mapped device memory
+ * @irq:		IRQ line for the device
+ * @cfg_data:		current cfg, intf, and alt in use
+ */
+struct pch_udc_dev {
+	struct usb_gadget		gadget;
+	struct usb_gadget_driver	*driver;
+	struct pci_dev			*pdev;
+	struct pch_udc_ep		ep[PCH_UDC_EP_NUM];
+	spinlock_t			lock; /* protects all state */
+	unsigned	active:1,
+			stall:1,
+			prot_stall:1,
+			irq_registered:1,
+			mem_region:1,
+			registered:1,
+			suspended:1,
+			connected:1,
+			set_cfg_not_acked:1,
+			waiting_zlp_ack:1;
+	struct pci_pool		*data_requests;
+	struct pci_pool		*stp_requests;
+	dma_addr_t			dma_addr;
+	unsigned long			ep0out_buf[64];
+	struct usb_ctrlrequest		setup_data;
+	unsigned long			phys_addr;
+	void __iomem			*base_addr;
+	unsigned			irq;
+	struct pch_udc_cfg_data	cfg_data;
+};
+
+#define PCH_UDC_PCI_BAR			1
+#define PCI_DEVICE_ID_INTEL_EG20T_UDC	0x8808
+
+static const char	ep0_string[] = "ep0in";
+static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
+struct pch_udc_dev *pch_udc;		/* pointer to device object */
+
+static int speed_fs;
+module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
+MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
+
+/**
+ * struct pch_udc_request - Structure holding a PCH USB device request packet
+ * @req:		embedded ep request
+ * @td_data_phys:	phys. address
+ * @td_data:		first dma desc. of chain
+ * @td_data_last:	last dma desc. of chain
+ * @queue:		associated queue
+ * @dma_going:		DMA in progress for request
+ * @dma_mapped:		DMA memory mapped for request
+ * @dma_done:		DMA completed for request
+ * @chain_len:		chain length
+ */
+struct pch_udc_request {
+	struct usb_request		req;
+	dma_addr_t			td_data_phys;
+	struct pch_udc_data_dma_desc	*td_data;
+	struct pch_udc_data_dma_desc	*td_data_last;
+	struct list_head		queue;
+	unsigned			dma_going:1,
+					dma_mapped:1,
+					dma_done:1;
+	unsigned			chain_len;
+};
+
+static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
+{
+	return ioread32(dev->base_addr + reg);
+}
+
+static inline void pch_udc_writel(struct pch_udc_dev *dev,
+				    unsigned long val, unsigned long reg)
+{
+	iowrite32(val, dev->base_addr + reg);
+}
+
+static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
+}
+
+static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
+}
+
+static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
+{
+	return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
+}
+
+static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
+				    unsigned long val, unsigned long reg)
+{
+	iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
+}
+
+static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
+}
+
+static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
+				     unsigned long reg,
+				     unsigned long bitmask)
+{
+	pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
+}
+
+/**
+ * pch_udc_csr_busy() - Wait till idle.
+ * @dev:	Reference to pch_udc_dev structure
+ */
+static void pch_udc_csr_busy(struct pch_udc_dev *dev)
+{
+	unsigned int count = 200;
+
+	/* Wait till idle */
+	while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
+		&& --count)
+		cpu_relax();
+	if (!count)
+		dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
+}
+
+/**
+ * pch_udc_write_csr() - Write the command and status registers.
+ * @dev:	Reference to pch_udc_dev structure
+ * @val:	value to be written to CSR register
+ * @addr:	address of CSR register
+ */
+static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
+			       unsigned int ep)
+{
+	unsigned long reg = PCH_UDC_CSR(ep);
+
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+	pch_udc_writel(dev, val, reg);
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+}
+
+/**
+ * pch_udc_read_csr() - Read the command and status registers.
+ * @dev:	Reference to pch_udc_dev structure
+ * @addr:	address of CSR register
+ *
+ * Return codes:	content of CSR register
+ */
+static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
+{
+	unsigned long reg = PCH_UDC_CSR(ep);
+
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+	pch_udc_readl(dev, reg);	/* Dummy read */
+	pch_udc_csr_busy(dev);		/* Wait till idle */
+	return pch_udc_readl(dev, reg);
+}
+
+/**
+ * pch_udc_rmt_wakeup() - Initiate for remote wakeup
+ * @dev:	Reference to pch_udc_dev structure
+ */
+static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+	mdelay(1);
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+/**
+ * pch_udc_get_frame() - Get the current frame from device status register
+ * @dev:	Reference to pch_udc_dev structure
+ * Retern	current frame
+ */
+static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
+{
+	u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
+	return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
+}
+
+/**
+ * pch_udc_clear_selfpowered() - Clear the self power control
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
+}
+
+/**
+ * pch_udc_set_selfpowered() - Set the self power control
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
+}
+
+/**
+ * pch_udc_set_disconnect() - Set the disconnect status.
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+}
+
+/**
+ * pch_udc_clear_disconnect() - Clear the disconnect status.
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
+{
+	/* Clear the disconnect */
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
+	mdelay(1);
+	/* Resume USB signalling */
+	pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
+}
+
+/**
+ * pch_udc_vbus_session() - set or clearr the disconnect status.
+ * @dev:	Reference to pch_udc_regs structure
+ * @is_active:	Parameter specifying the action
+ *		  0:   indicating VBUS power is ending
+ *		  !0:  indicating VBUS power is starting
+ */
+static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
+					  int is_active)
+{
+	if (is_active)
+		pch_udc_clear_disconnect(dev);
+	else
+		pch_udc_set_disconnect(dev);
+}
+
+/**
+ * pch_udc_ep_set_stall() - Set the stall of endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
+{
+	if (ep->in) {
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+	} else {
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+	}
+}
+
+/**
+ * pch_udc_ep_clear_stall() - Clear the stall of endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
+{
+	/* Clear the stall */
+	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
+	/* Clear NAK by writing CNAK */
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
+}
+
+/**
+ * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @type:	Type of endpoint
+ */
+static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
+					u8 type)
+{
+	pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
+				UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @buf_size:	The buffer size
+ */
+static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
+						 u32 buf_size, u32 ep_in)
+{
+	u32 data;
+	if (ep_in) {
+		data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
+		data = (data & 0xffff0000) | (buf_size & 0xffff);
+		pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
+	} else {
+		data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
+		data = (buf_size << 16) | (data & 0xffff);
+		pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
+	}
+}
+
+/**
+ * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @pkt_size:	The packet size
+ */
+static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
+{
+	u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
+	data = (data & 0xffff0000) | (pkt_size & 0xffff);
+	pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @addr:	Address of the register
+ */
+static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
+{
+	pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @addr:	Address of the register
+ */
+static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
+{
+	pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
+}
+
+/**
+ * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
+}
+
+/**
+ * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
+}
+
+/**
+ * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
+ *			register depending on the direction specified
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @dir:	whether Tx or Rx
+ *		  DMA_DIR_RX: Receive
+ *		  DMA_DIR_TX: Transmit
+ */
+static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
+{
+	if (dir == DMA_DIR_RX)
+		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
+	else if (dir == DMA_DIR_TX)
+		pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
+}
+
+/**
+ * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
+ *				 register depending on the direction specified
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @dir:	Whether Tx or Rx
+ *		  DMA_DIR_RX: Receive
+ *		  DMA_DIR_TX: Transmit
+ */
+static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
+{
+	if (dir == DMA_DIR_RX)
+		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
+	else if (dir == DMA_DIR_TX)
+		pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
+}
+
+/**
+ * pch_udc_set_csr_done() - Set the device control register
+ *				CSR done field (bit 13)
+ * @dev:	reference to structure of type pch_udc_regs
+ */
+static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
+{
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
+}
+
+/**
+ * pch_udc_disable_interrupts() - Disables the specified interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to disable interrupts
+ */
+static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
+					    u32 mask)
+{
+	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_enable_interrupts() - Enable the specified interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to enable interrupts
+ */
+static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
+					   u32 mask)
+{
+	pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to disable interrupts
+ */
+static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
+						u32 mask)
+{
+	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @mask:	Mask to enable interrupts
+ */
+static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
+					      u32 mask)
+{
+	pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
+}
+
+/**
+ * pch_udc_read_device_interrupts() - Read the device interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * Retern	The device interrupts
+ */
+static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
+{
+	return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_write_device_interrupts() - Write device interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @val:	The value to be written to interrupt register
+ */
+static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
+						     u32 val)
+{
+	pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * Retern	The endpoint interrupt
+ */
+static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
+{
+	return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_write_ep_interrupts() - Clear endpoint interupts
+ * @dev:	Reference to structure of type pch_udc_regs
+ * @val:	The value to be written to interrupt register
+ */
+static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
+					     u32 val)
+{
+	pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_device_status() - Read the device status
+ * @dev:	Reference to structure of type pch_udc_regs
+ * Retern	The device status
+ */
+static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
+{
+	return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_control() - Read the endpoint control
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * Retern	The endpoint control register value
+ */
+static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
+{
+	return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_clear_ep_control() - Clear the endpoint control register
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * Retern	The endpoint control register value
+ */
+static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
+{
+	return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
+}
+
+/**
+ * pch_udc_read_ep_status() - Read the endpoint status
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * Retern	The endpoint status
+ */
+static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
+{
+	return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
+}
+
+/**
+ * pch_udc_clear_ep_status() - Clear the endpoint status
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ * @stat:	Endpoint status
+ */
+static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
+					 u32 stat)
+{
+	return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
+}
+
+/**
+ * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
+ *				of the endpoint control register
+ * @ep:		Reference to structure of type pch_udc_ep_regs
+ */
+static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
+{
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
+}
+
+/**
+ * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
+ *				of the endpoint control register
+ * @ep:		reference to structure of type pch_udc_ep_regs
+ */
+static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
+{
+	unsigned int loopcnt = 0;
+	struct pch_udc_dev *dev = ep->dev;
+
+	if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
+		return;
+	if (!ep->in) {
+		loopcnt = 10000;
+		while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
+			--loopcnt)
+			udelay(5);
+		if (!loopcnt)
+			dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
+				__func__);
+	}
+	loopcnt = 10000;
+	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
+		udelay(5);
+	}
+	if (!loopcnt)
+		dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
+			__func__, ep->num, (ep->in ? "in" : "out"));
+}
+
+/**
+ * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
+ * @ep:	reference to structure of type pch_udc_ep_regs
+ * @dir:	direction of endpoint
+ *		  0:  endpoint is OUT
+ *		  !0: endpoint is IN
+ */
+static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
+{
+	unsigned int loopcnt = 0;
+	struct pch_udc_dev *dev = ep->dev;
+
+	if (dir) {	/* IN ep */
+		pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
+		return;
+	}
+
+	if (pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP)
+		return;
+	pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
+	/* Wait for RxFIFO Empty */
+	loopcnt = 10000;
+	while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
+		--loopcnt)
+		udelay(5);
+	if (!loopcnt)
+		dev_err(&dev->pdev->dev, "RxFIFO not Empty\n");
+	pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_MRXFLUSH);
+}
+
+/**
+ * pch_udc_ep_enable() - This api enables endpoint
+ * @regs:	Reference to structure pch_udc_ep_regs
+ * @desc:	endpoint descriptor
+ */
+static void pch_udc_ep_enable(struct pch_udc_ep *ep,
+			       struct pch_udc_cfg_data *cfg,
+			       const struct usb_endpoint_descriptor *desc)
+{
+	u32 val = 0;
+	u32 buff_size = 0;
+
+	pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
+	if (ep->in)
+		buff_size = UDC_EPIN_BUFF_SIZE;
+	else
+		buff_size = UDC_EPOUT_BUFF_SIZE;
+	pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
+	pch_udc_ep_set_maxpkt(ep, le16_to_cpu(desc->wMaxPacketSize));
+	pch_udc_ep_set_nak(ep);
+	pch_udc_ep_fifo_flush(ep, ep->in);
+	/* Configure the endpoint */
+	val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
+	      ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
+		UDC_CSR_NE_TYPE_SHIFT) |
+	      (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
+	      (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
+	      (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
+	      le16_to_cpu(desc->wMaxPacketSize) << UDC_CSR_NE_MAX_PKT_SHIFT;
+
+	if (ep->in)
+		pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
+	else
+		pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
+}
+
+/**
+ * pch_udc_ep_disable() - This api disables endpoint
+ * @regs:	Reference to structure pch_udc_ep_regs
+ */
+static void pch_udc_ep_disable(struct pch_udc_ep *ep)
+{
+	if (ep->in) {
+		/* flush the fifo */
+		pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
+		/* set NAK */
+		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
+		pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
+	} else {
+		/* set NAK */
+		pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
+	}
+	/* reset desc pointer */
+	pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
+}
+
+/**
+ * pch_udc_wait_ep_stall() - Wait EP stall.
+ * @dev:	Reference to pch_udc_dev structure
+ */
+static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
+{
+	unsigned int count = 10000;
+
+	/* Wait till idle */
+	while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
+		udelay(5);
+	if (!count)
+		dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
+}
+
+/**
+ * pch_udc_init() - This API initializes usb device controller
+ * @dev:	Rreference to pch_udc_regs structure
+ */
+static void pch_udc_init(struct pch_udc_dev *dev)
+{
+	if (NULL == dev) {
+		pr_err("%s: Invalid address\n", __func__);
+		return;
+	}
+	/* Soft Reset and Reset PHY */
+	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+	pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
+	mdelay(1);
+	pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
+	pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
+	mdelay(1);
+	/* mask and clear all device interrupts */
+	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
+	pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
+
+	/* mask and clear all ep interrupts */
+	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+	pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+
+	/* enable dynamic CSR programmingi, self powered and device speed */
+	if (speed_fs)
+		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
+				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
+	else /* defaul high speed */
+		pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
+				UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
+	pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
+			(PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
+			(PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
+			UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
+			UDC_DEVCTL_THE);
+}
+
+/**
+ * pch_udc_exit() - This API exit usb device controller
+ * @dev:	Reference to pch_udc_regs structure
+ */
+static void pch_udc_exit(struct pch_udc_dev *dev)
+{
+	/* mask all device interrupts */
+	pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
+	/* mask all ep interrupts */
+	pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
+	/* put device in disconnected state */
+	pch_udc_set_disconnect(dev);
+}
+
+/**
+ * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
+ * @gadget:	Reference to the gadget driver
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	return pch_udc_get_frame(dev);
+}
+
+/**
+ * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
+ * @gadget:	Reference to the gadget driver
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
+{
+	struct pch_udc_dev	*dev;
+	unsigned long		flags;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	spin_lock_irqsave(&dev->lock, flags);
+	pch_udc_rmt_wakeup(dev);
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
+ *				is self powered or not
+ * @gadget:	Reference to the gadget driver
+ * @value:	Specifies self powered or not
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	if (value)
+		pch_udc_set_selfpowered(dev);
+	else
+		pch_udc_clear_selfpowered(dev);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_pullup() - This API is invoked to make the device
+ *				visible/invisible to the host
+ * @gadget:	Reference to the gadget driver
+ * @is_on:	Specifies whether the pull up is made active or inactive
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	pch_udc_vbus_session(dev, is_on);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
+ *				transceiver (or GPIO) that
+ *				detects a VBUS power session starting/ending
+ * @gadget:	Reference to the gadget driver
+ * @is_active:	specifies whether the session is starting or ending
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:	If the gadget passed is NULL
+ */
+static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct pch_udc_dev	*dev;
+
+	if (!gadget)
+		return -EINVAL;
+	dev = container_of(gadget, struct pch_udc_dev, gadget);
+	pch_udc_vbus_session(dev, is_active);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
+ *				SET_CONFIGURATION calls to
+ *				specify how much power the device can consume
+ * @gadget:	Reference to the gadget driver
+ * @mA:		specifies the current limit in 2mA unit
+ *
+ * Return codes:
+ *	-EINVAL:	If the gadget passed is NULL
+ *	-EOPNOTSUPP:
+ */
+static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
+{
+	return -EOPNOTSUPP;
+}
+
+static const struct usb_gadget_ops pch_udc_ops = {
+	.get_frame = pch_udc_pcd_get_frame,
+	.wakeup = pch_udc_pcd_wakeup,
+	.set_selfpowered = pch_udc_pcd_selfpowered,
+	.pullup = pch_udc_pcd_pullup,
+	.vbus_session = pch_udc_pcd_vbus_session,
+	.vbus_draw = pch_udc_pcd_vbus_draw,
+};
+
+/**
+ * complete_req() - This API is invoked from the driver when processing
+ *			of a request is complete
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request structure
+ * @status:	Indicates the success/failure of completion
+ */
+static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
+								 int status)
+{
+	struct pch_udc_dev	*dev;
+	unsigned halted = ep->halted;
+
+	list_del_init(&req->queue);
+
+	/* set new status if pending */
+	if (req->req.status == -EINPROGRESS)
+		req->req.status = status;
+	else
+		status = req->req.status;
+
+	dev = ep->dev;
+	if (req->dma_mapped) {
+		if (ep->in)
+			pci_unmap_single(dev->pdev, req->req.dma,
+					 req->req.length, PCI_DMA_TODEVICE);
+		else
+			pci_unmap_single(dev->pdev, req->req.dma,
+					 req->req.length, PCI_DMA_FROMDEVICE);
+		req->dma_mapped = 0;
+		req->req.dma = DMA_ADDR_INVALID;
+	}
+	ep->halted = 1;
+	spin_unlock(&dev->lock);
+	if (!ep->in)
+		pch_udc_ep_clear_rrdy(ep);
+	req->req.complete(&ep->ep, &req->req);
+	spin_lock(&dev->lock);
+	ep->halted = halted;
+}
+
+/**
+ * empty_req_queue() - This API empties the request queue of an endpoint
+ * @ep:		Reference to the endpoint structure
+ */
+static void empty_req_queue(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request	*req;
+
+	ep->halted = 1;
+	while (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+		complete_req(ep, req, -ESHUTDOWN);	/* Remove from list */
+	}
+}
+
+/**
+ * pch_udc_free_dma_chain() - This function frees the DMA chain created
+ *				for the request
+ * @dev		Reference to the driver structure
+ * @req		Reference to the request to be freed
+ *
+ * Return codes:
+ *	0: Success
+ */
+static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
+				   struct pch_udc_request *req)
+{
+	struct pch_udc_data_dma_desc *td = req->td_data;
+	unsigned i = req->chain_len;
+
+	for (; i > 1; --i) {
+		dma_addr_t addr = (dma_addr_t)td->next;
+		/* do not free first desc., will be done by free for request */
+		td = phys_to_virt(addr);
+		pci_pool_free(dev->data_requests, td, addr);
+	}
+}
+
+/**
+ * pch_udc_create_dma_chain() - This function creates or reinitializes
+ *				a DMA chain
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request
+ * @buf_len:	The buffer length
+ * @gfp_flags:	Flags to be used while mapping the data buffer
+ *
+ * Return codes:
+ *	0:		success,
+ *	-ENOMEM:	pci_pool_alloc invocation fails
+ */
+static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
+				    struct pch_udc_request *req,
+				    unsigned long buf_len,
+				    gfp_t gfp_flags)
+{
+	struct pch_udc_data_dma_desc *td = req->td_data, *last;
+	unsigned long bytes = req->req.length, i = 0;
+	dma_addr_t dma_addr;
+	unsigned len = 1;
+
+	if (req->chain_len > 1)
+		pch_udc_free_dma_chain(ep->dev, req);
+
+	for (; ; bytes -= buf_len, ++len) {
+		if (ep->in)
+			td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
+		else
+			td->status = PCH_UDC_BS_HST_BSY;
+
+		if (bytes <= buf_len)
+			break;
+
+		last = td;
+		td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
+				    &dma_addr);
+		if (!td)
+			goto nomem;
+
+		i += buf_len;
+		td->dataptr = req->req.dma + i;
+		last->next = dma_addr;
+	}
+
+	req->td_data_last = td;
+	td->status |= PCH_UDC_DMA_LAST;
+	td->next = req->td_data_phys;
+	req->chain_len = len;
+	return 0;
+
+nomem:
+	if (len > 1) {
+		req->chain_len = len;
+		pch_udc_free_dma_chain(ep->dev, req);
+	}
+	req->chain_len = 1;
+	return -ENOMEM;
+}
+
+/**
+ * prepare_dma() - This function creates and initializes the DMA chain
+ *			for the request
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request
+ * @gfp:	Flag to be used while mapping the data buffer
+ *
+ * Return codes:
+ *	0:		Success
+ *	Other 0:	linux error number on failure
+ */
+static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
+			  gfp_t gfp)
+{
+	int	retval;
+
+	req->td_data->dataptr = req->req.dma;
+	req->td_data->status |= PCH_UDC_DMA_LAST;
+	/* Allocate and create a DMA chain */
+	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
+	if (retval) {
+		pr_err("%s: could not create DMA chain: %d\n",
+		       __func__, retval);
+		return retval;
+	}
+	if (!ep->in)
+		return 0;
+	if (req->req.length <= ep->ep.maxpacket)
+		req->td_data->status = PCH_UDC_DMA_LAST | PCH_UDC_BS_HST_BSY |
+				       req->req.length;
+	/* if bytes < max packet then tx bytes must
+	 * be written in packet per buffer mode
+	 */
+	if ((req->req.length < ep->ep.maxpacket) || !ep->num)
+		req->td_data->status = (req->td_data->status &
+					~PCH_UDC_RXTX_BYTES) | req->req.length;
+	req->td_data->status = (req->td_data->status &
+				~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_BSY;
+	return 0;
+}
+
+/**
+ * process_zlp() - This function process zero length packets
+ *			from the gadget driver
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request
+ */
+static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
+{
+	struct pch_udc_dev	*dev = ep->dev;
+
+	/* IN zlp's are handled by hardware */
+	complete_req(ep, req, 0);
+
+	/* if set_config or set_intf is waiting for ack by zlp
+	 * then set CSR_DONE
+	 */
+	if (dev->set_cfg_not_acked) {
+		pch_udc_set_csr_done(dev);
+		dev->set_cfg_not_acked = 0;
+	}
+	/* setup command is ACK'ed now by zlp */
+	if (!dev->stall && dev->waiting_zlp_ack) {
+		pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
+		dev->waiting_zlp_ack = 0;
+	}
+}
+
+/**
+ * pch_udc_start_rxrequest() - This function starts the receive requirement.
+ * @ep:		Reference to the endpoint structure
+ * @req:	Reference to the request structure
+ */
+static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
+					 struct pch_udc_request *req)
+{
+	struct pch_udc_data_dma_desc *td_data;
+
+	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+	td_data = req->td_data;
+	ep->td_data = req->td_data;
+	/* Set the status bits for all descriptors */
+	while (1) {
+		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
+				    PCH_UDC_BS_HST_RDY;
+		if ((td_data->status & PCH_UDC_DMA_LAST) ==  PCH_UDC_DMA_LAST)
+			break;
+		td_data = phys_to_virt(td_data->next);
+	}
+	/* Write the descriptor pointer */
+	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
+	req->dma_going = 1;
+	pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
+	pch_udc_set_dma(ep->dev, DMA_DIR_RX);
+	pch_udc_ep_clear_nak(ep);
+	pch_udc_ep_set_rrdy(ep);
+}
+
+/**
+ * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
+ *				from gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @desc:	Reference to the USB endpoint descriptor structure
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:
+ *	-ESHUTDOWN:
+ */
+static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
+				    const struct usb_endpoint_descriptor *desc)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long		iflags;
+
+	if (!usbep || (usbep->name == ep0_string) || !desc ||
+	    (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
+		return -EINVAL;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&dev->lock, iflags);
+	ep->desc = desc;
+	ep->halted = 0;
+	pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
+	ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+	spin_unlock_irqrestore(&dev->lock, iflags);
+	return 0;
+}
+
+/**
+ * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
+ *				from gadget driver
+ * @usbep	Reference to the USB endpoint structure
+ *
+ * Return codes:
+ *	0:		Success
+ *	-EINVAL:
+ */
+static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long	iflags;
+
+	if (!usbep)
+		return -EINVAL;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if ((usbep->name == ep0_string) || !ep->desc)
+		return -EINVAL;
+
+	spin_lock_irqsave(&ep->dev->lock, iflags);
+	empty_req_queue(ep);
+	ep->halted = 1;
+	pch_udc_ep_disable(ep);
+	pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+	ep->desc = NULL;
+	INIT_LIST_HEAD(&ep->queue);
+	spin_unlock_irqrestore(&ep->dev->lock, iflags);
+	return 0;
+}
+
+/**
+ * pch_udc_alloc_request() - This function allocates request structure.
+ *				It is called by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @gfp:	Flag to be used while allocating memory
+ *
+ * Return codes:
+ *	NULL:			Failure
+ *	Allocated address:	Success
+ */
+static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
+						  gfp_t gfp)
+{
+	struct pch_udc_request		*req;
+	struct pch_udc_ep		*ep;
+	struct pch_udc_data_dma_desc	*dma_desc;
+	struct pch_udc_dev		*dev;
+
+	if (!usbep)
+		return NULL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	req = kzalloc(sizeof *req, gfp);
+	if (!req)
+		return NULL;
+	req->req.dma = DMA_ADDR_INVALID;
+	INIT_LIST_HEAD(&req->queue);
+	if (!ep->dev->dma_addr)
+		return &req->req;
+	/* ep0 in requests are allocated from data pool here */
+	dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
+				  &req->td_data_phys);
+	if (NULL == dma_desc) {
+		kfree(req);
+		return NULL;
+	}
+	/* prevent from using desc. - set HOST BUSY */
+	dma_desc->status |= PCH_UDC_BS_HST_BSY;
+	dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
+	req->td_data = dma_desc;
+	req->td_data_last = dma_desc;
+	req->chain_len = 1;
+	return &req->req;
+}
+
+/**
+ * pch_udc_free_request() - This function frees request structure.
+ *				It is called by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @usbreq:	Reference to the USB request
+ */
+static void pch_udc_free_request(struct usb_ep *usbep,
+				  struct usb_request *usbreq)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_request	*req;
+	struct pch_udc_dev	*dev;
+
+	if (!usbep || !usbreq)
+		return;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	req = container_of(usbreq, struct pch_udc_request, req);
+	dev = ep->dev;
+	if (!list_empty(&req->queue))
+		dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
+			__func__, usbep->name, req);
+	if (req->td_data != NULL) {
+		if (req->chain_len > 1)
+			pch_udc_free_dma_chain(ep->dev, req);
+		pci_pool_free(ep->dev->data_requests, req->td_data,
+			      req->td_data_phys);
+	}
+	kfree(req);
+}
+
+/**
+ * pch_udc_pcd_queue() - This function queues a request packet. It is called
+ *			by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @usbreq:	Reference to the USB request
+ * @gfp:	Flag to be used while mapping the data buffer
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
+								 gfp_t gfp)
+{
+	int retval = 0;
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	struct pch_udc_request	*req;
+	unsigned long	iflags;
+
+	if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
+		return -EINVAL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!ep->desc && ep->num)
+		return -EINVAL;
+	req = container_of(usbreq, struct pch_udc_request, req);
+	if (!list_empty(&req->queue))
+		return -EINVAL;
+	if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&ep->dev->lock, iflags);
+	/* map the buffer for dma */
+	if (usbreq->length &&
+	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
+		if (ep->in)
+			usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
+					usbreq->length, PCI_DMA_TODEVICE);
+		else
+			usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
+					usbreq->length, PCI_DMA_FROMDEVICE);
+		req->dma_mapped = 1;
+	}
+	if (usbreq->length > 0) {
+		retval = prepare_dma(ep, req, gfp);
+		if (retval)
+			goto probe_end;
+	}
+	usbreq->actual = 0;
+	usbreq->status = -EINPROGRESS;
+	req->dma_done = 0;
+	if (list_empty(&ep->queue) && !ep->halted) {
+		/* no pending transfer, so start this req */
+		if (!usbreq->length) {
+			process_zlp(ep, req);
+			retval = 0;
+			goto probe_end;
+		}
+		if (!ep->in) {
+			pch_udc_start_rxrequest(ep, req);
+		} else {
+			/*
+			* For IN trfr the descriptors will be programmed and
+			* P bit will be set when
+			* we get an IN token
+			*/
+			pch_udc_wait_ep_stall(ep);
+			pch_udc_ep_clear_nak(ep);
+			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
+			pch_udc_set_dma(dev, DMA_DIR_TX);
+		}
+	}
+	/* Now add this request to the ep's pending requests */
+	if (req != NULL)
+		list_add_tail(&req->queue, &ep->queue);
+
+probe_end:
+	spin_unlock_irqrestore(&dev->lock, iflags);
+	return retval;
+}
+
+/**
+ * pch_udc_pcd_dequeue() - This function de-queues a request packet.
+ *				It is called by gadget driver
+ * @usbep:	Reference to the USB endpoint structure
+ * @usbreq:	Reference to the USB request
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
+				struct usb_request *usbreq)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_request	*req;
+	struct pch_udc_dev	*dev;
+	unsigned long		flags;
+	int ret = -EINVAL;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!usbep || !usbreq || (!ep->desc && ep->num))
+		return ret;
+	req = container_of(usbreq, struct pch_udc_request, req);
+	spin_lock_irqsave(&ep->dev->lock, flags);
+	/* make sure it's still queued on this endpoint */
+	list_for_each_entry(req, &ep->queue, queue) {
+		if (&req->req == usbreq) {
+			pch_udc_ep_set_nak(ep);
+			if (!list_empty(&req->queue))
+				complete_req(ep, req, -ECONNRESET);
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ep->dev->lock, flags);
+	return ret;
+}
+
+/**
+ * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
+ *			    feature
+ * @usbep:	Reference to the USB endpoint structure
+ * @halt:	Specifies whether to set or clear the feature
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long iflags;
+	int ret;
+
+	if (!usbep)
+		return -EINVAL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!ep->desc && !ep->num)
+		return -EINVAL;
+	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&udc_stall_spinlock, iflags);
+	if (list_empty(&ep->queue)) {
+		if (halt) {
+			if (ep->num == PCH_UDC_EP0)
+				ep->dev->stall = 1;
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						     PCH_UDC_EPINT(ep->in,
+								   ep->num));
+		} else {
+			pch_udc_ep_clear_stall(ep);
+		}
+		ret = 0;
+	} else {
+		ret = -EAGAIN;
+	}
+	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+	return ret;
+}
+
+/**
+ * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
+ *				halt feature
+ * @usbep:	Reference to the USB endpoint structure
+ * @halt:	Specifies whether to set or clear the feature
+ *
+ * Return codes:
+ *	0:			Success
+ *	linux error number:	Failure
+ */
+static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_dev	*dev;
+	unsigned long iflags;
+	int ret;
+
+	if (!usbep)
+		return -EINVAL;
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	dev = ep->dev;
+	if (!ep->desc && !ep->num)
+		return -EINVAL;
+	if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
+		return -ESHUTDOWN;
+	spin_lock_irqsave(&udc_stall_spinlock, iflags);
+	if (!list_empty(&ep->queue)) {
+		ret = -EAGAIN;
+	} else {
+		if (ep->num == PCH_UDC_EP0)
+			ep->dev->stall = 1;
+		pch_udc_ep_set_stall(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+		ep->dev->prot_stall = 1;
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
+	return ret;
+}
+
+/**
+ * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
+ * @usbep:	Reference to the USB endpoint structure
+ */
+static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
+{
+	struct pch_udc_ep  *ep;
+
+	if (!usbep)
+		return;
+
+	ep = container_of(usbep, struct pch_udc_ep, ep);
+	if (ep->desc || !ep->num)
+		pch_udc_ep_fifo_flush(ep, ep->in);
+}
+
+static const struct usb_ep_ops pch_udc_ep_ops = {
+	.enable		= pch_udc_pcd_ep_enable,
+	.disable	= pch_udc_pcd_ep_disable,
+	.alloc_request	= pch_udc_alloc_request,
+	.free_request	= pch_udc_free_request,
+	.queue		= pch_udc_pcd_queue,
+	.dequeue	= pch_udc_pcd_dequeue,
+	.set_halt	= pch_udc_pcd_set_halt,
+	.set_wedge	= pch_udc_pcd_set_wedge,
+	.fifo_status	= NULL,
+	.fifo_flush	= pch_udc_pcd_fifo_flush,
+};
+
+/**
+ * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
+ * @td_stp:	Reference to the SETP buffer structure
+ */
+static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
+{
+	static u32	pky_marker;
+
+	if (!td_stp)
+		return;
+	td_stp->reserved = ++pky_marker;
+	memset(&td_stp->request, 0xFF, sizeof td_stp->request);
+	td_stp->status = PCH_UDC_BS_HST_RDY;
+}
+
+/**
+ * pch_udc_start_next_txrequest() - This function starts
+ *					the next transmission requirement
+ * @ep:	Reference to the endpoint structure
+ */
+static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request *req;
+	struct pch_udc_data_dma_desc *td_data;
+
+	if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
+		return;
+
+	if (list_empty(&ep->queue))
+		return;
+
+	/* next request */
+	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+	if (req->dma_going)
+		return;
+	if (!req->td_data)
+		return;
+	pch_udc_wait_ep_stall(ep);
+	req->dma_going = 1;
+	pch_udc_ep_set_ddptr(ep, 0);
+	td_data = req->td_data;
+	while (1) {
+		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
+				   PCH_UDC_BS_HST_RDY;
+		if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
+			break;
+		td_data = phys_to_virt(td_data->next);
+	}
+	pch_udc_ep_set_ddptr(ep, req->td_data_phys);
+	pch_udc_set_dma(ep->dev, DMA_DIR_TX);
+	pch_udc_ep_set_pd(ep);
+	pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+	pch_udc_ep_clear_nak(ep);
+}
+
+/**
+ * pch_udc_complete_transfer() - This function completes a transfer
+ * @ep:		Reference to the endpoint structure
+ */
+static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request *req;
+	struct pch_udc_dev *dev = ep->dev;
+
+	if (list_empty(&ep->queue))
+		return;
+	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
+	    PCH_UDC_BS_DMA_DONE)
+		return;
+	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
+	     PCH_UDC_RTS_SUCC) {
+		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
+			"epstatus=0x%08x\n",
+		       (req->td_data_last->status & PCH_UDC_RXTX_STS),
+		       (int)(ep->epsts));
+		return;
+	}
+
+	req->req.actual = req->req.length;
+	req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
+	req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
+	complete_req(ep, req, 0);
+	req->dma_going = 0;
+	if (!list_empty(&ep->queue)) {
+		pch_udc_wait_ep_stall(ep);
+		pch_udc_ep_clear_nak(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+	} else {
+		pch_udc_disable_ep_interrupts(ep->dev,
+					      PCH_UDC_EPINT(ep->in, ep->num));
+	}
+}
+
+/**
+ * pch_udc_complete_receiver() - This function completes a receiver
+ * @ep:		Reference to the endpoint structure
+ */
+static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
+{
+	struct pch_udc_request *req;
+	struct pch_udc_dev *dev = ep->dev;
+	unsigned int count;
+
+	if (list_empty(&ep->queue))
+		return;
+
+	/* next request */
+	req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+	if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
+	    PCH_UDC_BS_DMA_DONE)
+		return;
+	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
+	    PCH_UDC_RTS_SUCC) {
+		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
+			"epstatus=0x%08x\n",
+			(req->td_data_last->status & PCH_UDC_RXTX_STS),
+			(int)(ep->epsts));
+		return;
+	}
+	count = req->td_data_last->status & PCH_UDC_RXTX_BYTES;
+
+	/* on 64k packets the RXBYTES field is zero */
+	if (!count && (req->req.length == UDC_DMA_MAXPACKET))
+		count = UDC_DMA_MAXPACKET;
+	req->td_data->status |= PCH_UDC_DMA_LAST;
+	req->td_data_last->status |= PCH_UDC_BS_HST_BSY;
+
+	req->dma_going = 0;
+	req->req.actual = count;
+	complete_req(ep, req, 0);
+	/* If there is a new/failed requests try that now */
+	if (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+		pch_udc_start_rxrequest(ep, req);
+	}
+}
+
+/**
+ * pch_udc_svc_data_in() - This function process endpoint interrupts
+ *				for IN endpoints
+ * @dev:	Reference to the device structure
+ * @ep_num:	Endpoint that generated the interrupt
+ */
+static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
+{
+	u32	epsts;
+	struct pch_udc_ep	*ep;
+
+	ep = &dev->ep[2*ep_num];
+	epsts = ep->epsts;
+	ep->epsts = 0;
+
+	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA  | UDC_EPSTS_HE |
+		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
+		       UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
+		return;
+	if ((epsts & UDC_EPSTS_BNA))
+		return;
+	if (epsts & UDC_EPSTS_HE)
+		return;
+	if (epsts & UDC_EPSTS_RSS) {
+		pch_udc_ep_set_stall(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+	}
+	if (epsts & UDC_EPSTS_RCS) {
+		if (!dev->prot_stall) {
+			pch_udc_ep_clear_stall(ep);
+		} else {
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+		}
+	}
+	if (epsts & UDC_EPSTS_TDC)
+		pch_udc_complete_transfer(ep);
+	/* On IN interrupt, provide data if we have any */
+	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
+	    !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
+		pch_udc_start_next_txrequest(ep);
+}
+
+/**
+ * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
+ * @dev:	Reference to the device structure
+ * @ep_num:	Endpoint that generated the interrupt
+ */
+static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
+{
+	u32			epsts;
+	struct pch_udc_ep		*ep;
+	struct pch_udc_request		*req = NULL;
+
+	ep = &dev->ep[2*ep_num + 1];
+	epsts = ep->epsts;
+	ep->epsts = 0;
+
+	if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
+		/* next request */
+		req = list_entry(ep->queue.next, struct pch_udc_request,
+				 queue);
+		if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
+		     PCH_UDC_BS_DMA_DONE) {
+			if (!req->dma_going)
+				pch_udc_start_rxrequest(ep, req);
+			return;
+		}
+	}
+	if (epsts & UDC_EPSTS_HE)
+		return;
+	if (epsts & UDC_EPSTS_RSS)
+		pch_udc_ep_set_stall(ep);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+	if (epsts & UDC_EPSTS_RCS) {
+		if (!dev->prot_stall) {
+			pch_udc_ep_clear_stall(ep);
+		} else {
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+		}
+	}
+	if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+	    UDC_EPSTS_OUT_DATA) {
+		if (ep->dev->prot_stall == 1) {
+			pch_udc_ep_set_stall(ep);
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+		} else {
+			pch_udc_complete_receiver(ep);
+		}
+	}
+	if (list_empty(&ep->queue))
+		pch_udc_set_dma(dev, DMA_DIR_RX);
+}
+
+/**
+ * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
+{
+	u32	epsts;
+	struct pch_udc_ep	*ep;
+
+	ep = &dev->ep[UDC_EP0IN_IDX];
+	epsts = ep->epsts;
+	ep->epsts = 0;
+
+	if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
+		       UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
+		       UDC_EPSTS_XFERDONE)))
+		return;
+	if ((epsts & UDC_EPSTS_BNA))
+		return;
+	if (epsts & UDC_EPSTS_HE)
+		return;
+	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall))
+		pch_udc_complete_transfer(ep);
+	/* On IN interrupt, provide data if we have any */
+	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
+	     !(epsts & UDC_EPSTS_TXEMPTY))
+		pch_udc_start_next_txrequest(ep);
+}
+
+/**
+ * pch_udc_svc_control_out() - Routine that handle Control
+ *					OUT endpoint interrupts
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
+{
+	u32	stat;
+	int setup_supported;
+	struct pch_udc_ep	*ep;
+
+	ep = &dev->ep[UDC_EP0OUT_IDX];
+	stat = ep->epsts;
+	ep->epsts = 0;
+
+	/* If setup data */
+	if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+	    UDC_EPSTS_OUT_SETUP) {
+		dev->stall = 0;
+		dev->ep[UDC_EP0IN_IDX].halted = 0;
+		dev->ep[UDC_EP0OUT_IDX].halted = 0;
+		/* In data not ready */
+		pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
+		dev->setup_data = ep->td_stp->request;
+		pch_udc_init_setup_buff(ep->td_stp);
+		pch_udc_clear_dma(dev, DMA_DIR_TX);
+		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
+				      dev->ep[UDC_EP0IN_IDX].in);
+		if ((dev->setup_data.bRequestType & USB_DIR_IN))
+			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+		else /* OUT */
+			dev->gadget.ep0 = &ep->ep;
+		spin_unlock(&dev->lock);
+		/* If Mass storage Reset */
+		if ((dev->setup_data.bRequestType == 0x21) &&
+		    (dev->setup_data.bRequest == 0xFF))
+			dev->prot_stall = 0;
+		/* call gadget with setup data received */
+		setup_supported = dev->driver->setup(&dev->gadget,
+						     &dev->setup_data);
+		spin_lock(&dev->lock);
+		/* ep0 in returns data on IN phase */
+		if (setup_supported >= 0 && setup_supported <
+					    UDC_EP0IN_MAX_PKT_SIZE) {
+			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
+			/* Gadget would have queued a request when
+			 * we called the setup */
+			pch_udc_set_dma(dev, DMA_DIR_RX);
+			pch_udc_ep_clear_nak(ep);
+		} else if (setup_supported < 0) {
+			/* if unsupported request, then stall */
+			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
+			pch_udc_enable_ep_interrupts(ep->dev,
+						PCH_UDC_EPINT(ep->in, ep->num));
+			dev->stall = 0;
+			pch_udc_set_dma(dev, DMA_DIR_RX);
+		} else {
+			dev->waiting_zlp_ack = 1;
+		}
+	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
+		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
+		if (list_empty(&ep->queue)) {
+			dev_err(&dev->pdev->dev, "%s: No request\n", __func__);
+			ep->td_data->status = (ep->td_data->status &
+					       ~PCH_UDC_BUFF_STS) |
+					       PCH_UDC_BS_HST_RDY;
+			pch_udc_set_dma(dev, DMA_DIR_RX);
+		} else {
+			/* control write */
+			/* next function will pickuo an clear the status */
+			ep->epsts = stat;
+
+			pch_udc_svc_data_out(dev, 0);
+			/* re-program desc. pointer for possible ZLPs */
+			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+			pch_udc_set_dma(dev, DMA_DIR_RX);
+		}
+	}
+	pch_udc_ep_set_rrdy(ep);
+}
+
+
+/**
+ * pch_udc_postsvc_epinters() - This function enables end point interrupts
+ *				and clears NAK status
+ * @dev:	Reference to the device structure
+ * @ep_num:	End point number
+ */
+static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
+{
+	struct pch_udc_ep	*ep;
+	struct pch_udc_request *req;
+
+	ep = &dev->ep[2*ep_num];
+	if (!list_empty(&ep->queue)) {
+		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
+		pch_udc_enable_ep_interrupts(ep->dev,
+					     PCH_UDC_EPINT(ep->in, ep->num));
+		pch_udc_ep_clear_nak(ep);
+	}
+}
+
+/**
+ * pch_udc_read_all_epstatus() - This function read all endpoint status
+ * @dev:	Reference to the device structure
+ * @ep_intr:	Status of endpoint interrupt
+ */
+static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
+{
+	int i;
+	struct pch_udc_ep	*ep;
+
+	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
+		/* IN */
+		if (ep_intr & (0x1 << i)) {
+			ep = &dev->ep[2*i];
+			ep->epsts = pch_udc_read_ep_status(ep);
+			pch_udc_clear_ep_status(ep, ep->epsts);
+		}
+		/* OUT */
+		if (ep_intr & (0x10000 << i)) {
+			ep = &dev->ep[2*i+1];
+			ep->epsts = pch_udc_read_ep_status(ep);
+			pch_udc_clear_ep_status(ep, ep->epsts);
+		}
+	}
+}
+
+/**
+ * pch_udc_activate_control_ep() - This function enables the control endpoints
+ *					for traffic after a reset
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
+{
+	struct pch_udc_ep	*ep;
+	u32 val;
+
+	/* Setup the IN endpoint */
+	ep = &dev->ep[UDC_EP0IN_IDX];
+	pch_udc_clear_ep_control(ep);
+	pch_udc_ep_fifo_flush(ep, ep->in);
+	pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
+	pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
+	/* Initialize the IN EP Descriptor */
+	ep->td_data      = NULL;
+	ep->td_stp       = NULL;
+	ep->td_data_phys = 0;
+	ep->td_stp_phys  = 0;
+
+	/* Setup the OUT endpoint */
+	ep = &dev->ep[UDC_EP0OUT_IDX];
+	pch_udc_clear_ep_control(ep);
+	pch_udc_ep_fifo_flush(ep, ep->in);
+	pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
+	pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
+	val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
+	pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
+
+	/* Initialize the SETUP buffer */
+	pch_udc_init_setup_buff(ep->td_stp);
+	/* Write the pointer address of dma descriptor */
+	pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
+	/* Write the pointer address of Setup descriptor */
+	pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+
+	/* Initialize the dma descriptor */
+	ep->td_data->status  = PCH_UDC_DMA_LAST;
+	ep->td_data->dataptr = dev->dma_addr;
+	ep->td_data->next    = ep->td_data_phys;
+
+	pch_udc_ep_clear_nak(ep);
+}
+
+
+/**
+ * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
+{
+	struct pch_udc_ep	*ep;
+	int i;
+
+	pch_udc_clear_dma(dev, DMA_DIR_TX);
+	pch_udc_clear_dma(dev, DMA_DIR_RX);
+	/* Mask all endpoint interrupts */
+	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+	/* clear all endpoint interrupts */
+	pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
+		ep = &dev->ep[i];
+		pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
+		pch_udc_clear_ep_control(ep);
+		pch_udc_ep_set_ddptr(ep, 0);
+		pch_udc_write_csr(ep->dev, 0x00, i);
+	}
+	dev->stall = 0;
+	dev->prot_stall = 0;
+	dev->waiting_zlp_ack = 0;
+	dev->set_cfg_not_acked = 0;
+
+	/* disable ep to empty req queue. Skip the control EP's */
+	for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
+		ep = &dev->ep[i];
+		pch_udc_ep_set_nak(ep);
+		pch_udc_ep_fifo_flush(ep, ep->in);
+		/* Complete request queue */
+		empty_req_queue(ep);
+	}
+	if (dev->driver && dev->driver->disconnect)
+		dev->driver->disconnect(&dev->gadget);
+}
+
+/**
+ * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
+ *				done interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
+{
+	u32 dev_stat, dev_speed;
+	u32 speed = USB_SPEED_FULL;
+
+	dev_stat = pch_udc_read_device_status(dev);
+	dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
+						 UDC_DEVSTS_ENUM_SPEED_SHIFT;
+	switch (dev_speed) {
+	case UDC_DEVSTS_ENUM_SPEED_HIGH:
+		speed = USB_SPEED_HIGH;
+		break;
+	case  UDC_DEVSTS_ENUM_SPEED_FULL:
+		speed = USB_SPEED_FULL;
+		break;
+	case  UDC_DEVSTS_ENUM_SPEED_LOW:
+		speed = USB_SPEED_LOW;
+		break;
+	default:
+		BUG();
+	}
+	dev->gadget.speed = speed;
+	pch_udc_activate_control_ep(dev);
+	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
+	pch_udc_set_dma(dev, DMA_DIR_TX);
+	pch_udc_set_dma(dev, DMA_DIR_RX);
+	pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
+}
+
+/**
+ * pch_udc_svc_intf_interrupt() - This function handles a set interface
+ *				  interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
+{
+	u32 reg, dev_stat = 0;
+	int i, ret;
+
+	dev_stat = pch_udc_read_device_status(dev);
+	dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
+							 UDC_DEVSTS_INTF_SHIFT;
+	dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
+							 UDC_DEVSTS_ALT_SHIFT;
+	dev->set_cfg_not_acked = 1;
+	/* Construct the usb request for gadget driver and inform it */
+	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
+	dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
+	dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
+	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
+	dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
+	/* programm the Endpoint Cfg registers */
+	/* Only one end point cfg register */
+	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
+	reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
+	      (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
+	reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
+	      (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
+	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
+	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
+		/* clear stall bits */
+		pch_udc_ep_clear_stall(&(dev->ep[i]));
+		dev->ep[i].halted = 0;
+	}
+	dev->stall = 0;
+	spin_unlock(&dev->lock);
+	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+	spin_lock(&dev->lock);
+}
+
+/**
+ * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
+ *				interrupt
+ * @dev:	Reference to driver structure
+ */
+static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
+{
+	int i, ret;
+	u32 reg, dev_stat = 0;
+
+	dev_stat = pch_udc_read_device_status(dev);
+	dev->set_cfg_not_acked = 1;
+	dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
+				UDC_DEVSTS_CFG_SHIFT;
+	/* make usb request for gadget driver */
+	memset(&dev->setup_data, 0 , sizeof dev->setup_data);
+	dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
+	dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
+	/* program the NE registers */
+	/* Only one end point cfg register */
+	reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
+	reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
+	      (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
+	pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
+	for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
+		/* clear stall bits */
+		pch_udc_ep_clear_stall(&(dev->ep[i]));
+		dev->ep[i].halted = 0;
+	}
+	dev->stall = 0;
+
+	/* call gadget zero with setup data received */
+	spin_unlock(&dev->lock);
+	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+	spin_lock(&dev->lock);
+}
+
+/**
+ * pch_udc_dev_isr() - This function services device interrupts
+ *			by invoking appropriate routines.
+ * @dev:	Reference to the device structure
+ * @dev_intr:	The Device interrupt status.
+ */
+static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
+{
+	/* USB Reset Interrupt */
+	if (dev_intr & UDC_DEVINT_UR)
+		pch_udc_svc_ur_interrupt(dev);
+	/* Enumeration Done Interrupt */
+	if (dev_intr & UDC_DEVINT_ENUM)
+		pch_udc_svc_enum_interrupt(dev);
+	/* Set Interface Interrupt */
+	if (dev_intr & UDC_DEVINT_SI)
+		pch_udc_svc_intf_interrupt(dev);
+	/* Set Config Interrupt */
+	if (dev_intr & UDC_DEVINT_SC)
+		pch_udc_svc_cfg_interrupt(dev);
+	/* USB Suspend interrupt */
+	if (dev_intr & UDC_DEVINT_US)
+		dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
+	/* Clear the SOF interrupt, if enabled */
+	if (dev_intr & UDC_DEVINT_SOF)
+		dev_dbg(&dev->pdev->dev, "SOF\n");
+	/* ES interrupt, IDLE > 3ms on the USB */
+	if (dev_intr & UDC_DEVINT_ES)
+		dev_dbg(&dev->pdev->dev, "ES\n");
+	/* RWKP interrupt */
+	if (dev_intr & UDC_DEVINT_RWKP)
+		dev_dbg(&dev->pdev->dev, "RWKP\n");
+}
+
+/**
+ * pch_udc_isr() - This function handles interrupts from the PCH USB Device
+ * @irq:	Interrupt request number
+ * @dev:	Reference to the device structure
+ */
+static irqreturn_t pch_udc_isr(int irq, void *pdev)
+{
+	struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
+	u32 dev_intr, ep_intr;
+	int i;
+
+	dev_intr = pch_udc_read_device_interrupts(dev);
+	ep_intr = pch_udc_read_ep_interrupts(dev);
+
+	if (dev_intr)
+		/* Clear device interrupts */
+		pch_udc_write_device_interrupts(dev, dev_intr);
+	if (ep_intr)
+		/* Clear ep interrupts */
+		pch_udc_write_ep_interrupts(dev, ep_intr);
+	if (!dev_intr && !ep_intr)
+		return IRQ_NONE;
+	spin_lock(&dev->lock);
+	if (dev_intr)
+		pch_udc_dev_isr(dev, dev_intr);
+	if (ep_intr) {
+		pch_udc_read_all_epstatus(dev, ep_intr);
+		/* Process Control In interrupts, if present */
+		if (ep_intr & UDC_EPINT_IN_EP0) {
+			pch_udc_svc_control_in(dev);
+			pch_udc_postsvc_epinters(dev, 0);
+		}
+		/* Process Control Out interrupts, if present */
+		if (ep_intr & UDC_EPINT_OUT_EP0)
+			pch_udc_svc_control_out(dev);
+		/* Process data in end point interrupts */
+		for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
+			if (ep_intr & (1 <<  i)) {
+				pch_udc_svc_data_in(dev, i);
+				pch_udc_postsvc_epinters(dev, i);
+			}
+		}
+		/* Process data out end point interrupts */
+		for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
+						 PCH_UDC_USED_EP_NUM); i++)
+			if (ep_intr & (1 <<  i))
+				pch_udc_svc_data_out(dev, i -
+							 UDC_EPINT_OUT_SHIFT);
+	}
+	spin_unlock(&dev->lock);
+	return IRQ_HANDLED;
+}
+
+/**
+ * pch_udc_setup_ep0() - This function enables control endpoint for traffic
+ * @dev:	Reference to the device structure
+ */
+static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
+{
+	/* enable ep0 interrupts */
+	pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
+						UDC_EPINT_OUT_EP0);
+	/* enable device interrupts */
+	pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
+				       UDC_DEVINT_ES | UDC_DEVINT_ENUM |
+				       UDC_DEVINT_SI | UDC_DEVINT_SC);
+}
+
+/**
+ * gadget_release() - Free the gadget driver private data
+ * @pdev	reference to struct pci_dev
+ */
+static void gadget_release(struct device *pdev)
+{
+	struct pch_udc_dev *dev = dev_get_drvdata(pdev);
+
+	kfree(dev);
+}
+
+/**
+ * pch_udc_pcd_reinit() - This API initializes the endpoint structures
+ * @dev:	Reference to the driver structure
+ */
+static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
+{
+	const char *const ep_string[] = {
+		ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
+		"ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
+		"ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
+		"ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
+		"ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
+		"ep15in", "ep15out",
+	};
+	int i;
+
+	dev->gadget.speed = USB_SPEED_UNKNOWN;
+	INIT_LIST_HEAD(&dev->gadget.ep_list);
+
+	/* Initialize the endpoints structures */
+	memset(dev->ep, 0, sizeof dev->ep);
+	for (i = 0; i < PCH_UDC_EP_NUM; i++) {
+		struct pch_udc_ep *ep = &dev->ep[i];
+		ep->dev = dev;
+		ep->halted = 1;
+		ep->num = i / 2;
+		ep->in = ~i & 1;
+		ep->ep.name = ep_string[i];
+		ep->ep.ops = &pch_udc_ep_ops;
+		if (ep->in)
+			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
+		else
+			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
+					  UDC_EP_REG_SHIFT;
+		/* need to set ep->ep.maxpacket and set Default Configuration?*/
+		ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
+		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+		INIT_LIST_HEAD(&ep->queue);
+	}
+	dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
+	dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
+
+	dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
+				  PCI_DMA_FROMDEVICE);
+
+	/* remove ep0 in and out from the list.  They have own pointer */
+	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
+	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
+
+	dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
+	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+}
+
+/**
+ * pch_udc_pcd_init() - This API initializes the driver structure
+ * @dev:	Reference to the driver structure
+ *
+ * Return codes:
+ *	0: Success
+ */
+static int pch_udc_pcd_init(struct pch_udc_dev *dev)
+{
+	pch_udc_init(dev);
+	pch_udc_pcd_reinit(dev);
+	return 0;
+}
+
+/**
+ * init_dma_pools() - create dma pools during initialization
+ * @pdev:	reference to struct pci_dev
+ */
+static int init_dma_pools(struct pch_udc_dev *dev)
+{
+	struct pch_udc_stp_dma_desc	*td_stp;
+	struct pch_udc_data_dma_desc	*td_data;
+
+	/* DMA setup */
+	dev->data_requests = pci_pool_create("data_requests", dev->pdev,
+		sizeof(struct pch_udc_data_dma_desc), 0, 0);
+	if (!dev->data_requests) {
+		dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	/* dma desc for setup data */
+	dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
+		sizeof(struct pch_udc_stp_dma_desc), 0, 0);
+	if (!dev->stp_requests) {
+		dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
+			__func__);
+		return -ENOMEM;
+	}
+	/* setup */
+	td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
+				&dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
+	if (!td_stp) {
+		dev_err(&dev->pdev->dev,
+			"%s: can't allocate setup dma descriptor\n", __func__);
+		return -ENOMEM;
+	}
+	dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
+
+	/* data: 0 packets !? */
+	td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
+				&dev->ep[UDC_EP0OUT_IDX].td_data_phys);
+	if (!td_data) {
+		dev_err(&dev->pdev->dev,
+			"%s: can't allocate data dma descriptor\n", __func__);
+		return -ENOMEM;
+	}
+	dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
+	dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
+	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
+	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
+	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
+	return 0;
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+	int (*bind)(struct usb_gadget *))
+{
+	struct pch_udc_dev	*dev = pch_udc;
+	int			retval;
+
+	if (!driver || (driver->speed == USB_SPEED_UNKNOWN) || !bind ||
+	    !driver->setup || !driver->unbind || !driver->disconnect) {
+		dev_err(&dev->pdev->dev,
+			"%s: invalid driver parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!dev)
+		return -ENODEV;
+
+	if (dev->driver) {
+		dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
+		return -EBUSY;
+	}
+	driver->driver.bus = NULL;
+	dev->driver = driver;
+	dev->gadget.dev.driver = &driver->driver;
+
+	/* Invoke the bind routine of the gadget driver */
+	retval = bind(&dev->gadget);
+
+	if (retval) {
+		dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
+		       __func__, driver->driver.name, retval);
+		dev->driver = NULL;
+		dev->gadget.dev.driver = NULL;
+		return retval;
+	}
+	/* get ready for ep0 traffic */
+	pch_udc_setup_ep0(dev);
+
+	/* clear SD */
+	pch_udc_clear_disconnect(dev);
+
+	dev->connected = 1;
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+	struct pch_udc_dev	*dev = pch_udc;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!driver || (driver != dev->driver)) {
+		dev_err(&dev->pdev->dev,
+			"%s: invalid driver parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+
+	/* Assues that there are no pending requets with this driver */
+	driver->unbind(&dev->gadget);
+	dev->gadget.dev.driver = NULL;
+	dev->driver = NULL;
+	dev->connected = 0;
+
+	/* set SD */
+	pch_udc_set_disconnect(dev);
+	return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+static void pch_udc_shutdown(struct pci_dev *pdev)
+{
+	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+
+	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+	/* disable the pullup so the host will think we're gone */
+	pch_udc_set_disconnect(dev);
+}
+
+static void pch_udc_remove(struct pci_dev *pdev)
+{
+	struct pch_udc_dev	*dev = pci_get_drvdata(pdev);
+
+	/* gadget driver must not be registered */
+	if (dev->driver)
+		dev_err(&pdev->dev,
+			"%s: gadget driver still bound!!!\n", __func__);
+	/* dma pool cleanup */
+	if (dev->data_requests)
+		pci_pool_destroy(dev->data_requests);
+
+	if (dev->stp_requests) {
+		/* cleanup DMA desc's for ep0in */
+		if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
+			pci_pool_free(dev->stp_requests,
+				dev->ep[UDC_EP0OUT_IDX].td_stp,
+				dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
+		}
+		if (dev->ep[UDC_EP0OUT_IDX].td_data) {
+			pci_pool_free(dev->stp_requests,
+				dev->ep[UDC_EP0OUT_IDX].td_data,
+				dev->ep[UDC_EP0OUT_IDX].td_data_phys);
+		}
+		pci_pool_destroy(dev->stp_requests);
+	}
+
+	pch_udc_exit(dev);
+
+	if (dev->irq_registered)
+		free_irq(pdev->irq, dev);
+	if (dev->base_addr)
+		iounmap(dev->base_addr);
+	if (dev->mem_region)
+		release_mem_region(dev->phys_addr,
+				   pci_resource_len(pdev, PCH_UDC_PCI_BAR));
+	if (dev->active)
+		pci_disable_device(pdev);
+	if (dev->registered)
+		device_unregister(&dev->gadget.dev);
+	kfree(dev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+
+	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
+	pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
+
+	pci_disable_device(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+
+	if (pci_save_state(pdev)) {
+		dev_err(&pdev->dev,
+			"%s: could not save PCI config state\n", __func__);
+		return -ENOMEM;
+	}
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int pch_udc_resume(struct pci_dev *pdev)
+{
+	int ret;
+
+	pci_set_power_state(pdev, PCI_D0);
+	ret = pci_restore_state(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
+		return ret;
+	}
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
+		return ret;
+	}
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	return 0;
+}
+#else
+#define pch_udc_suspend	NULL
+#define pch_udc_resume	NULL
+#endif /* CONFIG_PM */
+
+static int pch_udc_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *id)
+{
+	unsigned long		resource;
+	unsigned long		len;
+	int			retval;
+	struct pch_udc_dev	*dev;
+
+	/* one udc only */
+	if (pch_udc) {
+		pr_err("%s: already probed\n", __func__);
+		return -EBUSY;
+	}
+	/* init */
+	dev = kzalloc(sizeof *dev, GFP_KERNEL);
+	if (!dev) {
+		pr_err("%s: no memory for device structure\n", __func__);
+		return -ENOMEM;
+	}
+	/* pci setup */
+	if (pci_enable_device(pdev) < 0) {
+		kfree(dev);
+		pr_err("%s: pci_enable_device failed\n", __func__);
+		return -ENODEV;
+	}
+	dev->active = 1;
+	pci_set_drvdata(pdev, dev);
+
+	/* PCI resource allocation */
+	resource = pci_resource_start(pdev, 1);
+	len = pci_resource_len(pdev, 1);
+
+	if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
+		dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
+		retval = -EBUSY;
+		goto finished;
+	}
+	dev->phys_addr = resource;
+	dev->mem_region = 1;
+
+	dev->base_addr = ioremap_nocache(resource, len);
+	if (!dev->base_addr) {
+		pr_err("%s: device memory cannot be mapped\n", __func__);
+		retval = -ENOMEM;
+		goto finished;
+	}
+	if (!pdev->irq) {
+		dev_err(&pdev->dev, "%s: irq not set\n", __func__);
+		retval = -ENODEV;
+		goto finished;
+	}
+	pch_udc = dev;
+	/* initialize the hardware */
+	if (pch_udc_pcd_init(dev))
+		goto finished;
+	if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
+			dev)) {
+		dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
+			pdev->irq);
+		retval = -ENODEV;
+		goto finished;
+	}
+	dev->irq = pdev->irq;
+	dev->irq_registered = 1;
+
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
+	/* device struct setup */
+	spin_lock_init(&dev->lock);
+	dev->pdev = pdev;
+	dev->gadget.ops = &pch_udc_ops;
+
+	retval = init_dma_pools(dev);
+	if (retval)
+		goto finished;
+
+	dev_set_name(&dev->gadget.dev, "gadget");
+	dev->gadget.dev.parent = &pdev->dev;
+	dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
+	dev->gadget.dev.release = gadget_release;
+	dev->gadget.name = KBUILD_MODNAME;
+	dev->gadget.is_dualspeed = 1;
+
+	retval = device_register(&dev->gadget.dev);
+	if (retval)
+		goto finished;
+	dev->registered = 1;
+
+	/* Put the device in disconnected state till a driver is bound */
+	pch_udc_set_disconnect(dev);
+	return 0;
+
+finished:
+	pch_udc_remove(pdev);
+	return retval;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
+		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+		.class_mask = 0xffffffff,
+	},
+	{ 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
+
+
+static struct pci_driver pch_udc_driver = {
+	.name =	KBUILD_MODNAME,
+	.id_table =	pch_udc_pcidev_id,
+	.probe =	pch_udc_probe,
+	.remove =	pch_udc_remove,
+	.suspend =	pch_udc_suspend,
+	.resume =	pch_udc_resume,
+	.shutdown =	pch_udc_shutdown,
+};
+
+static int __init pch_udc_pci_init(void)
+{
+	return pci_register_driver(&pch_udc_driver);
+}
+module_init(pch_udc_pci_init);
+
+static void __exit pch_udc_pci_exit(void)
+{
+	pci_unregister_driver(&pch_udc_driver);
+}
+module_exit(pch_udc_pci_exit);
+
+MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 3b513ba..b015561 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -543,7 +543,7 @@
 	ro = curlun->initially_ro;
 	if (!ro) {
 		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
-		if (-EROFS == PTR_ERR(filp))
+		if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
 			ro = 1;
 	}
 	if (ro)
@@ -558,10 +558,7 @@
 
 	if (filp->f_path.dentry)
 		inode = filp->f_path.dentry->d_inode;
-	if (inode && S_ISBLK(inode->i_mode)) {
-		if (bdev_read_only(inode->i_bdev))
-			ro = 1;
-	} else if (!inode || !S_ISREG(inode->i_mode)) {
+	if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
 		LINFO(curlun, "invalid file type: %s\n", filename);
 		goto out;
 	}
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
index 7a86d2c..59ffe1e 100644
--- a/drivers/usb/gadget/u_audio.c
+++ b/drivers/usb/gadget/u_audio.c
@@ -255,6 +255,7 @@
 		ERROR(card, "No such PCM capture device: %s\n", fn_cap);
 		snd->substream = NULL;
 		snd->card = NULL;
+		snd->filp = NULL;
 	} else {
 		pcm_file = snd->filp->private_data;
 		snd->substream = pcm_file->substream;
@@ -273,17 +274,17 @@
 
 	/* Close control device */
 	snd = &gau->control;
-	if (!IS_ERR(snd->filp))
+	if (snd->filp)
 		filp_close(snd->filp, current->files);
 
 	/* Close PCM playback device and setup substream */
 	snd = &gau->playback;
-	if (!IS_ERR(snd->filp))
+	if (snd->filp)
 		filp_close(snd->filp, current->files);
 
 	/* Close PCM capture device and setup substream */
 	snd = &gau->capture;
-	if (!IS_ERR(snd->filp))
+	if (snd->filp)
 		filp_close(snd->filp, current->files);
 
 	return 0;
@@ -304,8 +305,7 @@
 	ret = gaudio_open_snd_dev(card);
 	if (ret)
 		ERROR(card, "we need at least one control device\n");
-
-	if (!the_card)
+	else if (!the_card)
 		the_card = card;
 
 	return ret;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index fbe86ca..1eda968 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -240,6 +240,9 @@
 	size += out->maxpacket - 1;
 	size -= size % out->maxpacket;
 
+	if (dev->port_usb->is_fixed)
+		size = max(size, dev->port_usb->fixed_out_len);
+
 	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
 	if (skb == NULL) {
 		DBG(dev, "no rx skb\n");
@@ -578,12 +581,19 @@
 	req->context = skb;
 	req->complete = tx_complete;
 
+	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
+	if (dev->port_usb->is_fixed &&
+	    length == dev->port_usb->fixed_in_len &&
+	    (length % in->maxpacket) == 0)
+		req->zero = 0;
+	else
+		req->zero = 1;
+
 	/* use zlp framing on tx for strict CDC-Ether conformance,
 	 * though any robust network rx path ignores extra padding.
 	 * and some hardware doesn't like to write zlps.
 	 */
-	req->zero = 1;
-	if (!dev->zlp && (length % in->maxpacket) == 0)
+	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 		length++;
 
 	req->length = length;
@@ -829,11 +839,9 @@
 		return;
 
 	unregister_netdev(the_dev->net);
+	flush_work_sync(&the_dev->work);
 	free_netdev(the_dev->net);
 
-	/* assuming we used keventd, it must quiesce too */
-	flush_scheduled_work();
-
 	the_dev = NULL;
 }
 
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 3c8c0c9..b56e1e7 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -62,6 +62,10 @@
 
 	/* hooks for added framing, as needed for RNDIS and EEM. */
 	u32				header_len;
+	/* NCM requires fixed size bundles */
+	bool				is_fixed;
+	u32				fixed_out_len;
+	u32				fixed_in_len;
 	struct sk_buff			*(*wrap)(struct gether *port,
 						struct sk_buff *skb);
 	int				(*unwrap)(struct gether *port,
@@ -103,6 +107,7 @@
 /* each configuration may bind one instance of an ethernet link */
 int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
 int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
 int eem_bind_config(struct usb_configuration *c);
 
 #ifdef USB_ETH_RNDIS
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index f8970d1..24046c0 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -133,6 +133,25 @@
 	---help---
 	  Variation of ARC USB block used in some Freescale chips.
 
+config USB_EHCI_HCD_OMAP
+	bool "EHCI support for OMAP3 and later chips"
+	depends on USB_EHCI_HCD && ARCH_OMAP
+	default y
+	--- help ---
+	  Enables support for the on-chip EHCI controller on
+	  OMAP3 and later chips.
+
+config USB_EHCI_MSM
+	bool "Support for MSM on-chip EHCI USB controller"
+	depends on USB_EHCI_HCD && ARCH_MSM
+	select USB_EHCI_ROOT_HUB_TT
+	select USB_MSM_OTG_72K
+	---help---
+	  Enables support for the USB Host controller present on the
+	  Qualcomm chipsets. Root Hub has inbuilt TT.
+	  This driver depends on OTG driver for PHY initialization,
+	  clock management, powering up VBUS, and power management.
+
 config USB_EHCI_HCD_PPC_OF
 	bool "EHCI support for PPC USB controller on OF platform bus"
 	depends on USB_EHCI_HCD && PPC_OF
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 51bd0ed..d6a69d5 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -99,6 +99,7 @@
 	.urb_enqueue		= ehci_urb_enqueue,
 	.urb_dequeue		= ehci_urb_dequeue,
 	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
 
 	/* scheduling support */
 	.get_frame_number	= ehci_get_frame,
@@ -110,6 +111,8 @@
 	.bus_resume		= ehci_bus_resume,
 	.relinquish_port	= ehci_relinquish_port,
 	.port_handed_over	= ehci_port_handed_over,
+
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
 };
 
 static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 6e25996..3be238a 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -879,7 +879,7 @@
 	int ret = 0;
 
 	if (!buf->output_buf)
-		buf->output_buf = (char *)vmalloc(buf->alloc_size);
+		buf->output_buf = vmalloc(buf->alloc_size);
 
 	if (!buf->output_buf) {
 		ret = -ENOMEM;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d0c8f7c..6fee3cd 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -114,6 +114,9 @@
 
 #define	INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
 
+/* for ASPM quirk of ISOC on AMD SB800 */
+static struct pci_dev *amd_nb_dev;
+
 /*-------------------------------------------------------------------------*/
 
 #include "ehci.h"
@@ -529,6 +532,11 @@
 	spin_unlock_irq (&ehci->lock);
 	ehci_mem_cleanup (ehci);
 
+	if (amd_nb_dev) {
+		pci_dev_put(amd_nb_dev);
+		amd_nb_dev = NULL;
+	}
+
 #ifdef	EHCI_STATS
 	ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n",
 		ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim,
@@ -1166,12 +1174,17 @@
 #define PLATFORM_DRIVER		ehci_mxc_driver
 #endif
 
+#ifdef CONFIG_CPU_SUBTYPE_SH7786
+#include "ehci-sh.c"
+#define PLATFORM_DRIVER		ehci_hcd_sh_driver
+#endif
+
 #ifdef CONFIG_SOC_AU1200
 #include "ehci-au1xxx.c"
 #define	PLATFORM_DRIVER		ehci_hcd_au1xxx_driver
 #endif
 
-#ifdef CONFIG_ARCH_OMAP3
+#ifdef CONFIG_USB_EHCI_HCD_OMAP
 #include "ehci-omap.c"
 #define        PLATFORM_DRIVER         ehci_hcd_omap_driver
 #endif
@@ -1221,6 +1234,21 @@
 #define PLATFORM_DRIVER		cns3xxx_ehci_driver
 #endif
 
+#ifdef CONFIG_ARCH_VT8500
+#include "ehci-vt8500.c"
+#define	PLATFORM_DRIVER		vt8500_ehci_driver
+#endif
+
+#ifdef CONFIG_PLAT_SPEAR
+#include "ehci-spear.c"
+#define PLATFORM_DRIVER		spear_ehci_hcd_driver
+#endif
+
+#ifdef CONFIG_USB_EHCI_MSM
+#include "ehci-msm.c"
+#define PLATFORM_DRIVER		ehci_msm_driver
+#endif
+
 #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
     !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
     !defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
new file mode 100644
index 0000000..413f4de
--- /dev/null
+++ b/drivers/usb/host/ehci-msm.c
@@ -0,0 +1,345 @@
+/* ehci-msm.c - HSUSB Host Controller Driver Implementation
+ *
+ * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+ *
+ * Partly derived from ehci-fsl.c and ehci-hcd.c
+ * Copyright (c) 2000-2004 by David Brownell
+ * Copyright (c) 2005 MontaVista Software
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/usb/otg.h>
+#include <linux/usb/msm_hsusb_hw.h>
+
+#define MSM_USB_BASE (hcd->regs)
+
+static struct otg_transceiver *otg;
+
+/*
+ * ehci_run defined in drivers/usb/host/ehci-hcd.c reset the controller and
+ * the configuration settings in ehci_msm_reset vanish after controller is
+ * reset. Resetting the controler in ehci_run seems to be un-necessary
+ * provided HCD reset the controller before calling ehci_run. Most of the HCD
+ * do but some are not. So this function is same as ehci_run but we don't
+ * reset the controller here.
+ */
+static int ehci_msm_run(struct usb_hcd *hcd)
+{
+	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
+	u32			temp;
+	u32			hcc_params;
+
+	hcd->uses_new_polling = 1;
+
+	ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
+	ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
+
+	/*
+	 * hcc_params controls whether ehci->regs->segment must (!!!)
+	 * be used; it constrains QH/ITD/SITD and QTD locations.
+	 * pci_pool consistent memory always uses segment zero.
+	 * streaming mappings for I/O buffers, like pci_map_single(),
+	 * can return segments above 4GB, if the device allows.
+	 *
+	 * NOTE:  the dma mask is visible through dma_supported(), so
+	 * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+	 * Scsi_Host.highmem_io, and so forth.  It's readonly to all
+	 * host side drivers though.
+	 */
+	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+	if (HCC_64BIT_ADDR(hcc_params))
+		ehci_writel(ehci, 0, &ehci->regs->segment);
+
+	/*
+	 * Philips, Intel, and maybe others need CMD_RUN before the
+	 * root hub will detect new devices (why?); NEC doesn't
+	 */
+	ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+	ehci->command |= CMD_RUN;
+	ehci_writel(ehci, ehci->command, &ehci->regs->command);
+	dbg_cmd(ehci, "init", ehci->command);
+
+	/*
+	 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+	 * are explicitly handed to companion controller(s), so no TT is
+	 * involved with the root hub.  (Except where one is integrated,
+	 * and there's no companion controller unless maybe for USB OTG.)
+	 *
+	 * Turning on the CF flag will transfer ownership of all ports
+	 * from the companions to the EHCI controller.  If any of the
+	 * companions are in the middle of a port reset at the time, it
+	 * could cause trouble.  Write-locking ehci_cf_port_reset_rwsem
+	 * guarantees that no resets are in progress.  After we set CF,
+	 * a short delay lets the hardware catch up; new resets shouldn't
+	 * be started before the port switching actions could complete.
+	 */
+	down_write(&ehci_cf_port_reset_rwsem);
+	hcd->state = HC_STATE_RUNNING;
+	ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+	ehci_readl(ehci, &ehci->regs->command);	/* unblock posted writes */
+	usleep_range(5000, 5500);
+	up_write(&ehci_cf_port_reset_rwsem);
+	ehci->last_periodic_enable = ktime_get_real();
+
+	temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
+	ehci_info(ehci,
+		"USB %x.%x started, EHCI %x.%02x%s\n",
+		((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
+		temp >> 8, temp & 0xff,
+		ignore_oc ? ", overcurrent ignored" : "");
+
+	ehci_writel(ehci, INTR_MASK,
+		    &ehci->regs->intr_enable); /* Turn On Interrupts */
+
+	/* GRR this is run-once init(), being done every time the HC starts.
+	 * So long as they're part of class devices, we can't do it init()
+	 * since the class device isn't created that early.
+	 */
+	create_debug_files(ehci);
+	create_companion_file(ehci);
+
+	return 0;
+}
+
+static int ehci_msm_reset(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval;
+
+	ehci->caps = USB_CAPLENGTH;
+	ehci->regs = USB_CAPLENGTH +
+		HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* cache the data to minimize the chip reads*/
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	hcd->has_tt = 1;
+	ehci->sbrn = HCD_USB2;
+
+	/* data structure init */
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	retval = ehci_reset(ehci);
+	if (retval)
+		return retval;
+
+	/* bursts of unspecified length. */
+	writel(0, USB_AHBBURST);
+	/* Use the AHB transactor */
+	writel(0, USB_AHBMODE);
+	/* Disable streaming mode and select host mode */
+	writel(0x13, USB_USBMODE);
+
+	ehci_port_power(ehci, 1);
+	return 0;
+}
+
+static struct hc_driver msm_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "Qualcomm On-Chip EHCI Host Controller",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= ehci_irq,
+	.flags			= HCD_USB2 | HCD_MEMORY,
+
+	.reset			= ehci_msm_reset,
+	.start			= ehci_msm_run,
+
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+	.relinquish_port	= ehci_relinquish_port,
+	.port_handed_over	= ehci_port_handed_over,
+
+	/*
+	 * PM support
+	 */
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+};
+
+static int ehci_msm_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct resource *res;
+	int ret;
+
+	dev_dbg(&pdev->dev, "ehci_msm proble\n");
+
+	hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		dev_err(&pdev->dev, "Unable to create HCD\n");
+		return  -ENOMEM;
+	}
+
+	hcd->irq = platform_get_irq(pdev, 0);
+	if (hcd->irq < 0) {
+		dev_err(&pdev->dev, "Unable to get IRQ resource\n");
+		ret = hcd->irq;
+		goto put_hcd;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get memory resource\n");
+		ret = -ENODEV;
+		goto put_hcd;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		ret = -ENOMEM;
+		goto put_hcd;
+	}
+
+	/*
+	 * OTG driver takes care of PHY initialization, clock management,
+	 * powering up VBUS, mapping of registers address space and power
+	 * management.
+	 */
+	otg = otg_get_transceiver();
+	if (!otg) {
+		dev_err(&pdev->dev, "unable to find transceiver\n");
+		ret = -ENODEV;
+		goto unmap;
+	}
+
+	ret = otg_set_host(otg, &hcd->self);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "unable to register with transceiver\n");
+		goto put_transceiver;
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+	/*
+	 * OTG device parent of HCD takes care of putting
+	 * hardware into low power mode.
+	 */
+	pm_runtime_no_callbacks(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+
+put_transceiver:
+	otg_put_transceiver(otg);
+unmap:
+	iounmap(hcd->regs);
+put_hcd:
+	usb_put_hcd(hcd);
+
+	return ret;
+}
+
+static int __devexit ehci_msm_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	device_init_wakeup(&pdev->dev, 0);
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+
+	otg_set_host(otg, NULL);
+	otg_put_transceiver(otg);
+
+	usb_put_hcd(hcd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ehci_msm_pm_suspend(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	bool wakeup = device_may_wakeup(dev);
+
+	dev_dbg(dev, "ehci-msm PM suspend\n");
+
+	/*
+	 * EHCI helper function has also the same check before manipulating
+	 * port wakeup flags.  We do check here the same condition before
+	 * calling the same helper function to avoid bringing hardware
+	 * from Low power mode when there is no need for adjusting port
+	 * wakeup flags.
+	 */
+	if (hcd->self.root_hub->do_remote_wakeup && !wakeup) {
+		pm_runtime_resume(dev);
+		ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd),
+				wakeup);
+	}
+
+	return 0;
+}
+
+static int ehci_msm_pm_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "ehci-msm PM resume\n");
+	ehci_prepare_ports_for_controller_resume(hcd_to_ehci(hcd));
+
+	return 0;
+}
+#else
+#define ehci_msm_pm_suspend	NULL
+#define ehci_msm_pm_resume	NULL
+#endif
+
+static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
+	.suspend         = ehci_msm_pm_suspend,
+	.resume          = ehci_msm_pm_resume,
+};
+
+static struct platform_driver ehci_msm_driver = {
+	.probe	= ehci_msm_probe,
+	.remove	= __devexit_p(ehci_msm_remove),
+	.driver = {
+		   .name = "msm_hsusb_host",
+		   .pm = &ehci_msm_dev_pm_ops,
+	},
+};
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a22d2df..fa59b26 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,14 +36,8 @@
 static int ehci_mxc_setup(struct usb_hcd *hcd)
 {
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-	struct device *dev = hcd->self.controller;
-	struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
 	int retval;
 
-	/* EHCI registers start at offset 0x100 */
-	ehci->caps = hcd->regs + 0x100;
-	ehci->regs = hcd->regs + 0x100 +
-	    HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
 	dbg_hcs_params(ehci, "reset");
 	dbg_hcc_params(ehci, "reset");
 
@@ -65,12 +59,6 @@
 
 	ehci_reset(ehci);
 
-	/* set up the PORTSCx register */
-	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
-
-	/* is this really needed? */
-	msleep(10);
-
 	ehci_port_power(ehci, 0);
 	return 0;
 }
@@ -100,6 +88,7 @@
 	.urb_enqueue = ehci_urb_enqueue,
 	.urb_dequeue = ehci_urb_dequeue,
 	.endpoint_disable = ehci_endpoint_disable,
+	.endpoint_reset = ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
@@ -115,6 +104,8 @@
 	.bus_resume = ehci_bus_resume,
 	.relinquish_port = ehci_relinquish_port,
 	.port_handed_over = ehci_port_handed_over,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
 };
 
 static int ehci_mxc_drv_probe(struct platform_device *pdev)
@@ -125,6 +116,7 @@
 	int irq, ret;
 	struct ehci_mxc_priv *priv;
 	struct device *dev = &pdev->dev;
+	struct ehci_hcd *ehci;
 
 	dev_info(&pdev->dev, "initializing i.MX USB Controller\n");
 
@@ -212,6 +204,19 @@
 	if (ret < 0)
 		goto err_init;
 
+	ehci = hcd_to_ehci(hcd);
+
+	/* EHCI registers start at offset 0x100 */
+	ehci->caps = hcd->regs + 0x100;
+	ehci->regs = hcd->regs + 0x100 +
+	    HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+	/* set up the PORTSCx register */
+	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
+
+	/* is this really needed? */
+	msleep(10);
+
 	/* Initialize the transceiver */
 	if (pdata->otg) {
 		pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 116ae28..680f2ef 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -1,11 +1,12 @@
 /*
- * ehci-omap.c - driver for USBHOST on OMAP 34xx processor
+ * ehci-omap.c - driver for USBHOST on OMAP3/4 processors
  *
- * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller
- * Tested on OMAP3430 ES2.0 SDP
+ * Bus Glue for the EHCI controllers in OMAP3/4
+ * Tested on several OMAP3 boards, and OMAP4 Pandaboard
  *
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Copyright (C) 2007-2010 Texas Instruments, Inc.
  *	Author: Vikram Pandita <vikram.pandita@ti.com>
+ *	Author: Anand Gadiyar <gadiyar@ti.com>
  *
  * Copyright (C) 2009 Nokia Corporation
  *	Contact: Felipe Balbi <felipe.balbi@nokia.com>
@@ -26,11 +27,14 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  *
- * TODO (last updated Feb 12, 2010):
+ * TODO (last updated Nov 21, 2010):
  *	- add kernel-doc
  *	- enable AUTOIDLE
  *	- add suspend/resume
  *	- move workarounds to board-files
+ *	- factor out code common to OHCI
+ *	- add HSIC and TLL support
+ *	- convert to use hwmod and runtime PM
  */
 
 #include <linux/platform_device.h>
@@ -86,9 +90,9 @@
 #define	OMAP_TLL_ULPI_SCRATCH_REGISTER(num)		(0x816 + 0x100 * num)
 
 #define OMAP_TLL_CHANNEL_COUNT				3
-#define OMAP_TLL_CHANNEL_1_EN_MASK			(1 << 1)
-#define OMAP_TLL_CHANNEL_2_EN_MASK			(1 << 2)
-#define OMAP_TLL_CHANNEL_3_EN_MASK			(1 << 4)
+#define OMAP_TLL_CHANNEL_1_EN_MASK			(1 << 0)
+#define OMAP_TLL_CHANNEL_2_EN_MASK			(1 << 1)
+#define OMAP_TLL_CHANNEL_3_EN_MASK			(1 << 2)
 
 /* UHH Register Set */
 #define	OMAP_UHH_REVISION				(0x00)
@@ -114,6 +118,23 @@
 #define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS		(1 << 9)
 #define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS		(1 << 10)
 
+/* OMAP4-specific defines */
+#define OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR		(3 << 2)
+#define OMAP4_UHH_SYSCONFIG_NOIDLE			(1 << 2)
+
+#define OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR		(3 << 4)
+#define OMAP4_UHH_SYSCONFIG_NOSTDBY			(1 << 4)
+#define OMAP4_UHH_SYSCONFIG_SOFTRESET			(1 << 0)
+
+#define OMAP4_P1_MODE_CLEAR				(3 << 16)
+#define OMAP4_P1_MODE_TLL				(1 << 16)
+#define OMAP4_P1_MODE_HSIC				(3 << 16)
+#define OMAP4_P2_MODE_CLEAR				(3 << 18)
+#define OMAP4_P2_MODE_TLL				(1 << 18)
+#define OMAP4_P2_MODE_HSIC				(3 << 18)
+
+#define OMAP_REV2_TLL_CHANNEL_COUNT			2
+
 #define	OMAP_UHH_DEBUG_CSR				(0x44)
 
 /* EHCI Register Set */
@@ -127,6 +148,17 @@
 #define	EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT		8
 #define	EHCI_INSNREG05_ULPI_WRDATA_SHIFT		0
 
+/* Values of UHH_REVISION - Note: these are not given in the TRM */
+#define OMAP_EHCI_REV1	0x00000010	/* OMAP3 */
+#define OMAP_EHCI_REV2	0x50700100	/* OMAP4 */
+
+#define is_omap_ehci_rev1(x)	(x->omap_ehci_rev == OMAP_EHCI_REV1)
+#define is_omap_ehci_rev2(x)	(x->omap_ehci_rev == OMAP_EHCI_REV2)
+
+#define is_ehci_phy_mode(x)	(x == EHCI_HCD_OMAP_MODE_PHY)
+#define is_ehci_tll_mode(x)	(x == EHCI_HCD_OMAP_MODE_TLL)
+#define is_ehci_hsic_mode(x)	(x == EHCI_HCD_OMAP_MODE_HSIC)
+
 /*-------------------------------------------------------------------------*/
 
 static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
@@ -156,10 +188,14 @@
 	struct device		*dev;
 
 	struct clk		*usbhost_ick;
-	struct clk		*usbhost2_120m_fck;
-	struct clk		*usbhost1_48m_fck;
+	struct clk		*usbhost_hs_fck;
+	struct clk		*usbhost_fs_fck;
 	struct clk		*usbtll_fck;
 	struct clk		*usbtll_ick;
+	struct clk		*xclk60mhsp1_ck;
+	struct clk		*xclk60mhsp2_ck;
+	struct clk		*utmi_p1_fck;
+	struct clk		*utmi_p2_fck;
 
 	/* FIXME the following two workarounds are
 	 * board specific not silicon-specific so these
@@ -176,6 +212,9 @@
 	/* phy reset workaround */
 	int			phy_reset;
 
+	/* IP revision */
+	u32			omap_ehci_rev;
+
 	/* desired phy_mode: TLL, PHY */
 	enum ehci_hcd_omap_mode	port_mode[OMAP3_HS_USB_PORTS];
 
@@ -191,13 +230,14 @@
 
 /*-------------------------------------------------------------------------*/
 
-static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
+static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask,
+				u8 tll_channel_count)
 {
 	unsigned reg;
 	int i;
 
 	/* Program the 3 TLL channels upfront */
-	for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+	for (i = 0; i < tll_channel_count; i++) {
 		reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
 
 		/* Disable AutoIdle, BitStuffing and use SDR Mode */
@@ -217,7 +257,7 @@
 	ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
 
 	/* Enable channels now */
-	for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+	for (i = 0; i < tll_channel_count; i++) {
 		reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
 
 		/* Enable only the reg that is needed */
@@ -286,19 +326,19 @@
 	}
 	clk_enable(omap->usbhost_ick);
 
-	omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
-	if (IS_ERR(omap->usbhost2_120m_fck)) {
-		ret = PTR_ERR(omap->usbhost2_120m_fck);
+	omap->usbhost_hs_fck = clk_get(omap->dev, "hs_fck");
+	if (IS_ERR(omap->usbhost_hs_fck)) {
+		ret = PTR_ERR(omap->usbhost_hs_fck);
 		goto err_host_120m_fck;
 	}
-	clk_enable(omap->usbhost2_120m_fck);
+	clk_enable(omap->usbhost_hs_fck);
 
-	omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
-	if (IS_ERR(omap->usbhost1_48m_fck)) {
-		ret = PTR_ERR(omap->usbhost1_48m_fck);
+	omap->usbhost_fs_fck = clk_get(omap->dev, "fs_fck");
+	if (IS_ERR(omap->usbhost_fs_fck)) {
+		ret = PTR_ERR(omap->usbhost_fs_fck);
 		goto err_host_48m_fck;
 	}
-	clk_enable(omap->usbhost1_48m_fck);
+	clk_enable(omap->usbhost_fs_fck);
 
 	if (omap->phy_reset) {
 		/* Refer: ISSUE1 */
@@ -333,6 +373,80 @@
 	}
 	clk_enable(omap->usbtll_ick);
 
+	omap->omap_ehci_rev = ehci_omap_readl(omap->uhh_base,
+						OMAP_UHH_REVISION);
+	dev_dbg(omap->dev, "OMAP UHH_REVISION 0x%x\n",
+					omap->omap_ehci_rev);
+
+	/*
+	 * Enable per-port clocks as needed (newer controllers only).
+	 * - External ULPI clock for PHY mode
+	 * - Internal clocks for TLL and HSIC modes (TODO)
+	 */
+	if (is_omap_ehci_rev2(omap)) {
+		switch (omap->port_mode[0]) {
+		case EHCI_HCD_OMAP_MODE_PHY:
+			omap->xclk60mhsp1_ck = clk_get(omap->dev,
+							"xclk60mhsp1_ck");
+			if (IS_ERR(omap->xclk60mhsp1_ck)) {
+				ret = PTR_ERR(omap->xclk60mhsp1_ck);
+				dev_err(omap->dev,
+					"Unable to get Port1 ULPI clock\n");
+			}
+
+			omap->utmi_p1_fck = clk_get(omap->dev,
+							"utmi_p1_gfclk");
+			if (IS_ERR(omap->utmi_p1_fck)) {
+				ret = PTR_ERR(omap->utmi_p1_fck);
+				dev_err(omap->dev,
+					"Unable to get utmi_p1_fck\n");
+			}
+
+			ret = clk_set_parent(omap->utmi_p1_fck,
+						omap->xclk60mhsp1_ck);
+			if (ret != 0) {
+				dev_err(omap->dev,
+					"Unable to set P1 f-clock\n");
+			}
+			break;
+		case EHCI_HCD_OMAP_MODE_TLL:
+			/* TODO */
+		default:
+			break;
+		}
+		switch (omap->port_mode[1]) {
+		case EHCI_HCD_OMAP_MODE_PHY:
+			omap->xclk60mhsp2_ck = clk_get(omap->dev,
+							"xclk60mhsp2_ck");
+			if (IS_ERR(omap->xclk60mhsp2_ck)) {
+				ret = PTR_ERR(omap->xclk60mhsp2_ck);
+				dev_err(omap->dev,
+					"Unable to get Port2 ULPI clock\n");
+			}
+
+			omap->utmi_p2_fck = clk_get(omap->dev,
+							"utmi_p2_gfclk");
+			if (IS_ERR(omap->utmi_p2_fck)) {
+				ret = PTR_ERR(omap->utmi_p2_fck);
+				dev_err(omap->dev,
+					"Unable to get utmi_p2_fck\n");
+			}
+
+			ret = clk_set_parent(omap->utmi_p2_fck,
+						omap->xclk60mhsp2_ck);
+			if (ret != 0) {
+				dev_err(omap->dev,
+					"Unable to set P2 f-clock\n");
+			}
+			break;
+		case EHCI_HCD_OMAP_MODE_TLL:
+			/* TODO */
+		default:
+			break;
+		}
+	}
+
+
 	/* perform TLL soft reset, and wait until reset is complete */
 	ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
 			OMAP_USBTLL_SYSCONFIG_SOFTRESET);
@@ -360,12 +474,20 @@
 
 	/* Put UHH in NoIdle/NoStandby mode */
 	reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
-	reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
-			| OMAP_UHH_SYSCONFIG_SIDLEMODE
-			| OMAP_UHH_SYSCONFIG_CACTIVITY
-			| OMAP_UHH_SYSCONFIG_MIDLEMODE);
-	reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+	if (is_omap_ehci_rev1(omap)) {
+		reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+				| OMAP_UHH_SYSCONFIG_SIDLEMODE
+				| OMAP_UHH_SYSCONFIG_CACTIVITY
+				| OMAP_UHH_SYSCONFIG_MIDLEMODE);
+		reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
 
+
+	} else if (is_omap_ehci_rev2(omap)) {
+		reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+		reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+		reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+		reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+	}
 	ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
 
 	reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
@@ -376,40 +498,56 @@
 			| OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
 	reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
 
-	if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
-		reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
-	if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
-		reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
-	if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
-		reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+	if (is_omap_ehci_rev1(omap)) {
+		if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+			reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+		if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+			reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+		if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+			reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
 
-	/* Bypass the TLL module for PHY mode operation */
-	if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
-		dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
-		if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
-			(omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
-				(omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
-			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
-		else
-			reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
-	} else {
-		dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
-		if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
-			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
-		else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
-			reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+		/* Bypass the TLL module for PHY mode operation */
+		if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
+			dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1\n");
+			if (is_ehci_phy_mode(omap->port_mode[0]) ||
+				is_ehci_phy_mode(omap->port_mode[1]) ||
+					is_ehci_phy_mode(omap->port_mode[2]))
+				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+			else
+				reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+		} else {
+			dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
+			if (is_ehci_phy_mode(omap->port_mode[0]))
+				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+			else if (is_ehci_tll_mode(omap->port_mode[0]))
+				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
 
-		if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
-			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
-		else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
-			reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+			if (is_ehci_phy_mode(omap->port_mode[1]))
+				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+			else if (is_ehci_tll_mode(omap->port_mode[1]))
+				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
 
-		if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)
-			reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
-		else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
-			reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+			if (is_ehci_phy_mode(omap->port_mode[2]))
+				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+			else if (is_ehci_tll_mode(omap->port_mode[2]))
+				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+		}
+	} else if (is_omap_ehci_rev2(omap)) {
+		/* Clear port mode fields for PHY mode*/
+		reg &= ~OMAP4_P1_MODE_CLEAR;
+		reg &= ~OMAP4_P2_MODE_CLEAR;
 
+		if (is_ehci_tll_mode(omap->port_mode[0]))
+			reg |= OMAP4_P1_MODE_TLL;
+		else if (is_ehci_hsic_mode(omap->port_mode[0]))
+			reg |= OMAP4_P1_MODE_HSIC;
+
+		if (is_ehci_tll_mode(omap->port_mode[1]))
+			reg |= OMAP4_P2_MODE_TLL;
+		else if (is_ehci_hsic_mode(omap->port_mode[1]))
+			reg |= OMAP4_P2_MODE_HSIC;
 	}
+
 	ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
 	dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
 
@@ -438,7 +576,7 @@
 			tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
 
 		/* Enable UTMI mode for required TLL channels */
-		omap_usb_utmi_init(omap, tll_ch_mask);
+		omap_usb_utmi_init(omap, tll_ch_mask, OMAP_TLL_CHANNEL_COUNT);
 	}
 
 	if (omap->phy_reset) {
@@ -464,6 +602,14 @@
 	return 0;
 
 err_sys_status:
+	clk_disable(omap->utmi_p2_fck);
+	clk_put(omap->utmi_p2_fck);
+	clk_disable(omap->xclk60mhsp2_ck);
+	clk_put(omap->xclk60mhsp2_ck);
+	clk_disable(omap->utmi_p1_fck);
+	clk_put(omap->utmi_p1_fck);
+	clk_disable(omap->xclk60mhsp1_ck);
+	clk_put(omap->xclk60mhsp1_ck);
 	clk_disable(omap->usbtll_ick);
 	clk_put(omap->usbtll_ick);
 
@@ -472,8 +618,8 @@
 	clk_put(omap->usbtll_fck);
 
 err_tll_fck:
-	clk_disable(omap->usbhost1_48m_fck);
-	clk_put(omap->usbhost1_48m_fck);
+	clk_disable(omap->usbhost_fs_fck);
+	clk_put(omap->usbhost_fs_fck);
 
 	if (omap->phy_reset) {
 		if (gpio_is_valid(omap->reset_gpio_port[0]))
@@ -484,8 +630,8 @@
 	}
 
 err_host_48m_fck:
-	clk_disable(omap->usbhost2_120m_fck);
-	clk_put(omap->usbhost2_120m_fck);
+	clk_disable(omap->usbhost_hs_fck);
+	clk_put(omap->usbhost_hs_fck);
 
 err_host_120m_fck:
 	clk_disable(omap->usbhost_ick);
@@ -503,6 +649,8 @@
 
 	/* Reset OMAP modules for insmod/rmmod to work */
 	ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
+			is_omap_ehci_rev2(omap) ?
+			OMAP4_UHH_SYSCONFIG_SOFTRESET :
 			OMAP_UHH_SYSCONFIG_SOFTRESET);
 	while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
 				& (1 << 0))) {
@@ -550,16 +698,16 @@
 		omap->usbhost_ick = NULL;
 	}
 
-	if (omap->usbhost1_48m_fck != NULL) {
-		clk_disable(omap->usbhost1_48m_fck);
-		clk_put(omap->usbhost1_48m_fck);
-		omap->usbhost1_48m_fck = NULL;
+	if (omap->usbhost_fs_fck != NULL) {
+		clk_disable(omap->usbhost_fs_fck);
+		clk_put(omap->usbhost_fs_fck);
+		omap->usbhost_fs_fck = NULL;
 	}
 
-	if (omap->usbhost2_120m_fck != NULL) {
-		clk_disable(omap->usbhost2_120m_fck);
-		clk_put(omap->usbhost2_120m_fck);
-		omap->usbhost2_120m_fck = NULL;
+	if (omap->usbhost_hs_fck != NULL) {
+		clk_disable(omap->usbhost_hs_fck);
+		clk_put(omap->usbhost_hs_fck);
+		omap->usbhost_hs_fck = NULL;
 	}
 
 	if (omap->usbtll_ick != NULL) {
@@ -568,6 +716,32 @@
 		omap->usbtll_ick = NULL;
 	}
 
+	if (is_omap_ehci_rev2(omap)) {
+		if (omap->xclk60mhsp1_ck != NULL) {
+			clk_disable(omap->xclk60mhsp1_ck);
+			clk_put(omap->xclk60mhsp1_ck);
+			omap->xclk60mhsp1_ck = NULL;
+		}
+
+		if (omap->utmi_p1_fck != NULL) {
+			clk_disable(omap->utmi_p1_fck);
+			clk_put(omap->utmi_p1_fck);
+			omap->utmi_p1_fck = NULL;
+		}
+
+		if (omap->xclk60mhsp2_ck != NULL) {
+			clk_disable(omap->xclk60mhsp2_ck);
+			clk_put(omap->xclk60mhsp2_ck);
+			omap->xclk60mhsp2_ck = NULL;
+		}
+
+		if (omap->utmi_p2_fck != NULL) {
+			clk_disable(omap->utmi_p2_fck);
+			clk_put(omap->utmi_p2_fck);
+			omap->utmi_p2_fck = NULL;
+		}
+	}
+
 	if (omap->phy_reset) {
 		if (gpio_is_valid(omap->reset_gpio_port[0]))
 			gpio_free(omap->reset_gpio_port[0]);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 655f3c9..76179c3 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -22,6 +22,9 @@
 #error "This file is PCI bus glue.  CONFIG_PCI must be defined."
 #endif
 
+/* defined here to avoid adding to pci_ids.h for single instance use */
+#define PCI_DEVICE_ID_INTEL_CE4100_USB	0x2e70
+
 /*-------------------------------------------------------------------------*/
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -41,6 +44,35 @@
 	return 0;
 }
 
+static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
+{
+	struct pci_dev *amd_smbus_dev;
+	u8 rev = 0;
+
+	amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+	if (!amd_smbus_dev)
+		return 0;
+
+	pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+	if (rev < 0x40) {
+		pci_dev_put(amd_smbus_dev);
+		amd_smbus_dev = NULL;
+		return 0;
+	}
+
+	if (!amd_nb_dev)
+		amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
+	if (!amd_nb_dev)
+		ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
+
+	ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
+
+	pci_dev_put(amd_smbus_dev);
+	amd_smbus_dev = NULL;
+
+	return 1;
+}
+
 /* called during probe() after chip reset completes */
 static int ehci_pci_setup(struct usb_hcd *hcd)
 {
@@ -99,6 +131,9 @@
 	/* cache this readonly data; minimize chip reads */
 	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
 
+	if (ehci_quirk_amd_SB800(ehci))
+		ehci->amd_l1_fix = 1;
+
 	retval = ehci_halt(ehci);
 	if (retval)
 		return retval;
@@ -137,6 +172,10 @@
 			ehci_info(ehci, "disable lpm for langwell/penwell\n");
 			ehci->has_lpm = 0;
 		}
+		if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) {
+			hcd->has_tt = 1;
+			tdi_reset(ehci);
+		}
 		break;
 	case PCI_VENDOR_ID_TDI:
 		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index d9f78eb..aa46f57 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1590,6 +1590,63 @@
 	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
 }
 
+#define AB_REG_BAR_LOW 0xe0
+#define AB_REG_BAR_HIGH 0xe1
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define NB_PIF0_PWRDOWN_0 0x01100012
+#define NB_PIF0_PWRDOWN_1 0x01100013
+
+static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable)
+{
+	u32 addr, addr_low, addr_high, val;
+
+	outb_p(AB_REG_BAR_LOW, 0xcd6);
+	addr_low = inb_p(0xcd7);
+	outb_p(AB_REG_BAR_HIGH, 0xcd6);
+	addr_high = inb_p(0xcd7);
+	addr = addr_high << 8 | addr_low;
+	outl_p(0x30, AB_INDX(addr));
+	outl_p(0x40, AB_DATA(addr));
+	outl_p(0x34, AB_INDX(addr));
+	val = inl_p(AB_DATA(addr));
+
+	if (disable) {
+		val &= ~0x8;
+		val |= (1 << 4) | (1 << 9);
+	} else {
+		val |= 0x8;
+		val &= ~((1 << 4) | (1 << 9));
+	}
+	outl_p(val, AB_DATA(addr));
+
+	if (amd_nb_dev) {
+		addr = NB_PIF0_PWRDOWN_0;
+		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
+		pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
+		if (disable)
+			val &= ~(0x3f << 7);
+		else
+			val |= 0x3f << 7;
+
+		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
+
+		addr = NB_PIF0_PWRDOWN_1;
+		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr);
+		pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val);
+		if (disable)
+			val &= ~(0x3f << 7);
+		else
+			val |= 0x3f << 7;
+
+		pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val);
+	}
+
+	return;
+}
+
 /* fit urb's itds into the selected schedule slot; activate as needed */
 static int
 itd_link_urb (
@@ -1616,6 +1673,12 @@
 			urb->interval,
 			next_uframe >> 3, next_uframe & 0x7);
 	}
+
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_l1_fix == 1)
+			ehci_quirk_amd_L1(ehci, 1);
+	}
+
 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
 
 	/* fill iTDs uframe by uframe */
@@ -1740,6 +1803,11 @@
 	(void) disable_periodic(ehci);
 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
 
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_l1_fix == 1)
+			ehci_quirk_amd_L1(ehci, 0);
+	}
+
 	if (unlikely(list_is_singular(&stream->td_list))) {
 		ehci_to_hcd(ehci)->self.bandwidth_allocated
 				-= stream->bandwidth;
@@ -2025,6 +2093,12 @@
 			(next_uframe >> 3) & (ehci->periodic_size - 1),
 			stream->interval, hc32_to_cpu(ehci, stream->splits));
 	}
+
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_l1_fix == 1)
+			ehci_quirk_amd_L1(ehci, 1);
+	}
+
 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
 
 	/* fill sITDs frame by frame */
@@ -2125,6 +2199,11 @@
 	(void) disable_periodic(ehci);
 	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
 
+	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
+		if (ehci->amd_l1_fix == 1)
+			ehci_quirk_amd_L1(ehci, 0);
+	}
+
 	if (list_is_singular(&stream->td_list)) {
 		ehci_to_hcd(ehci)->self.bandwidth_allocated
 				-= stream->bandwidth;
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
new file mode 100644
index 0000000..595f70f
--- /dev/null
+++ b/drivers/usb/host/ehci-sh.c
@@ -0,0 +1,243 @@
+/*
+ * SuperH EHCI host controller driver
+ *
+ * Copyright (C) 2010  Paul Mundt
+ *
+ * Based on ohci-sh.c and ehci-atmel.c.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+struct ehci_sh_priv {
+	struct clk *iclk, *fclk;
+	struct usb_hcd *hcd;
+};
+
+static int ehci_sh_reset(struct usb_hcd *hcd)
+{
+	struct ehci_hcd	*ehci = hcd_to_ehci(hcd);
+	int ret;
+
+	ehci->caps = hcd->regs;
+	ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
+		&ehci->caps->hc_capbase));
+
+	dbg_hcs_params(ehci, "reset");
+	dbg_hcc_params(ehci, "reset");
+
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+	ret = ehci_halt(ehci);
+	if (unlikely(ret))
+		return ret;
+
+	ret = ehci_init(hcd);
+	if (unlikely(ret))
+		return ret;
+
+	ehci->sbrn = 0x20;
+
+	ehci_reset(ehci);
+	ehci_port_power(ehci, 0);
+
+	return ret;
+}
+
+static const struct hc_driver ehci_sh_hc_driver = {
+	.description			= hcd_name,
+	.product_desc			= "SuperH EHCI",
+	.hcd_priv_size			= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq				= ehci_irq,
+	.flags				= HCD_USB2 | HCD_MEMORY,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset				= ehci_sh_reset,
+	.start				= ehci_run,
+	.stop				= ehci_stop,
+	.shutdown			= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue			= ehci_urb_enqueue,
+	.urb_dequeue			= ehci_urb_dequeue,
+	.endpoint_disable		= ehci_endpoint_disable,
+	.endpoint_reset			= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number		= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data		= ehci_hub_status_data,
+	.hub_control			= ehci_hub_control,
+
+#ifdef CONFIG_PM
+	.bus_suspend			= ehci_bus_suspend,
+	.bus_resume			= ehci_bus_resume,
+#endif
+
+	.relinquish_port		= ehci_relinquish_port,
+	.port_handed_over		= ehci_port_handed_over,
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_sh_probe(struct platform_device *pdev)
+{
+	const struct hc_driver *driver = &ehci_sh_hc_driver;
+	struct resource *res;
+	struct ehci_sh_priv *priv;
+	struct usb_hcd *hcd;
+	int irq, ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Found HC with no register addr. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		ret = -ENODEV;
+		goto fail_create_hcd;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(&pdev->dev,
+			"Found HC with no IRQ. Check %s setup!\n",
+			dev_name(&pdev->dev));
+		ret = -ENODEV;
+		goto fail_create_hcd;
+	}
+
+	/* initialize hcd */
+	hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
+			     dev_name(&pdev->dev));
+	if (!hcd) {
+		ret = -ENOMEM;
+		goto fail_create_hcd;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+				driver->description)) {
+		dev_dbg(&pdev->dev, "controller already in use\n");
+		ret = -EBUSY;
+		goto fail_request_resource;
+	}
+
+	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
+	if (hcd->regs == NULL) {
+		dev_dbg(&pdev->dev, "error mapping memory\n");
+		ret = -ENXIO;
+		goto fail_ioremap;
+	}
+
+	priv = kmalloc(sizeof(struct ehci_sh_priv), GFP_KERNEL);
+	if (!priv) {
+		dev_dbg(&pdev->dev, "error allocating priv data\n");
+		ret = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	/* These are optional, we don't care if they fail */
+	priv->fclk = clk_get(&pdev->dev, "usb_fck");
+	if (IS_ERR(priv->fclk))
+		priv->fclk = NULL;
+
+	priv->iclk = clk_get(&pdev->dev, "usb_ick");
+	if (IS_ERR(priv->iclk))
+		priv->iclk = NULL;
+
+	clk_enable(priv->fclk);
+	clk_enable(priv->iclk);
+
+	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to add hcd");
+		goto fail_add_hcd;
+	}
+
+	priv->hcd = hcd;
+	platform_set_drvdata(pdev, priv);
+
+	return ret;
+
+fail_add_hcd:
+	clk_disable(priv->iclk);
+	clk_disable(priv->fclk);
+
+	clk_put(priv->iclk);
+	clk_put(priv->fclk);
+
+	kfree(priv);
+fail_alloc:
+	iounmap(hcd->regs);
+fail_ioremap:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
+
+	return ret;
+}
+
+static int __exit ehci_hcd_sh_remove(struct platform_device *pdev)
+{
+	struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = priv->hcd;
+
+	usb_remove_hcd(hcd);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+	platform_set_drvdata(pdev, NULL);
+
+	clk_disable(priv->fclk);
+	clk_disable(priv->iclk);
+
+	clk_put(priv->fclk);
+	clk_put(priv->iclk);
+
+	kfree(priv);
+
+	return 0;
+}
+
+static void ehci_hcd_sh_shutdown(struct platform_device *pdev)
+{
+	struct ehci_sh_priv *priv = platform_get_drvdata(pdev);
+	struct usb_hcd *hcd = priv->hcd;
+
+	if (hcd->driver->shutdown)
+		hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_hcd_sh_driver = {
+	.probe		= ehci_hcd_sh_probe,
+	.remove		= __exit_p(ehci_hcd_sh_remove),
+	.shutdown	= ehci_hcd_sh_shutdown,
+	.driver		= {
+		.name	= "sh_ehci",
+		.owner	= THIS_MODULE,
+	},
+};
+
+MODULE_ALIAS("platform:sh_ehci");
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
new file mode 100644
index 0000000..75c0087
--- /dev/null
+++ b/drivers/usb/host/ehci-spear.c
@@ -0,0 +1,212 @@
+/*
+* Driver for EHCI HCD on SPEAR SOC
+*
+* Copyright (C) 2010 ST Micro Electronics,
+* Deepak Sikri <deepak.sikri@st.com>
+*
+* Based on various ehci-*.c drivers
+*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file COPYING in the main directory of this archive for
+* more details.
+*/
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+struct spear_ehci {
+	struct ehci_hcd ehci;
+	struct clk *clk;
+};
+
+#define to_spear_ehci(hcd)	(struct spear_ehci *)hcd_to_ehci(hcd)
+
+static void spear_start_ehci(struct spear_ehci *ehci)
+{
+	clk_enable(ehci->clk);
+}
+
+static void spear_stop_ehci(struct spear_ehci *ehci)
+{
+	clk_disable(ehci->clk);
+}
+
+static int ehci_spear_setup(struct usb_hcd *hcd)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int retval = 0;
+
+	/* registers start at offset 0x0 */
+	ehci->caps = hcd->regs;
+	ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
+				&ehci->caps->hc_capbase));
+	/* cache this readonly data; minimize chip reads */
+	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+	retval = ehci_halt(ehci);
+	if (retval)
+		return retval;
+
+	retval = ehci_init(hcd);
+	if (retval)
+		return retval;
+
+	ehci_reset(ehci);
+	ehci_port_power(ehci, 0);
+
+	return retval;
+}
+
+static const struct hc_driver ehci_spear_hc_driver = {
+	.description			= hcd_name,
+	.product_desc			= "SPEAr EHCI",
+	.hcd_priv_size			= sizeof(struct spear_ehci),
+
+	/* generic hardware linkage */
+	.irq				= ehci_irq,
+	.flags				= HCD_MEMORY | HCD_USB2,
+
+	/* basic lifecycle operations */
+	.reset				= ehci_spear_setup,
+	.start				= ehci_run,
+	.stop				= ehci_stop,
+	.shutdown			= ehci_shutdown,
+
+	/* managing i/o requests and associated device resources */
+	.urb_enqueue			= ehci_urb_enqueue,
+	.urb_dequeue			= ehci_urb_dequeue,
+	.endpoint_disable		= ehci_endpoint_disable,
+	.endpoint_reset			= ehci_endpoint_reset,
+
+	/* scheduling support */
+	.get_frame_number		= ehci_get_frame,
+
+	/* root hub support */
+	.hub_status_data		= ehci_hub_status_data,
+	.hub_control			= ehci_hub_control,
+	.bus_suspend			= ehci_bus_suspend,
+	.bus_resume			= ehci_bus_resume,
+	.relinquish_port		= ehci_relinquish_port,
+	.port_handed_over		= ehci_port_handed_over,
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd ;
+	struct spear_ehci *ehci;
+	struct resource *res;
+	struct clk *usbh_clk;
+	const struct hc_driver *driver = &ehci_spear_hc_driver;
+	int *pdata = pdev->dev.platform_data;
+	int irq, retval;
+	char clk_name[20] = "usbh_clk";
+
+	if (pdata == NULL)
+		return -EFAULT;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		retval = irq;
+		goto fail_irq_get;
+	}
+
+	if (*pdata >= 0)
+		sprintf(clk_name, "usbh.%01d_clk", *pdata);
+
+	usbh_clk = clk_get(NULL, clk_name);
+	if (IS_ERR(usbh_clk)) {
+		dev_err(&pdev->dev, "Error getting interface clock\n");
+		retval = PTR_ERR(usbh_clk);
+		goto fail_get_usbh_clk;
+	}
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto fail_create_hcd;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		retval = -ENODEV;
+		goto fail_request_resource;
+	}
+
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+				driver->description)) {
+		retval = -EBUSY;
+		goto fail_request_resource;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (hcd->regs == NULL) {
+		dev_dbg(&pdev->dev, "error mapping memory\n");
+		retval = -ENOMEM;
+		goto fail_ioremap;
+	}
+
+	ehci = (struct spear_ehci *)hcd_to_ehci(hcd);
+	ehci->clk = usbh_clk;
+
+	spear_start_ehci(ehci);
+	retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+	if (retval)
+		goto fail_add_hcd;
+
+	return retval;
+
+fail_add_hcd:
+	spear_stop_ehci(ehci);
+	iounmap(hcd->regs);
+fail_ioremap:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	clk_put(usbh_clk);
+fail_get_usbh_clk:
+fail_irq_get:
+	dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+	return retval ;
+}
+
+static int spear_ehci_hcd_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct spear_ehci *ehci_p = to_spear_ehci(hcd);
+
+	if (!hcd)
+		return 0;
+	if (in_interrupt())
+		BUG();
+	usb_remove_hcd(hcd);
+
+	if (ehci_p->clk)
+		spear_stop_ehci(ehci_p);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+
+	if (ehci_p->clk)
+		clk_put(ehci_p->clk);
+
+	return 0;
+}
+
+static struct platform_driver spear_ehci_hcd_driver = {
+	.probe		= spear_ehci_hcd_drv_probe,
+	.remove		= spear_ehci_hcd_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver		= {
+		.name = "spear-ehci",
+		.bus = &platform_bus_type
+	}
+};
+
+MODULE_ALIAS("platform:spear-ehci");
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
new file mode 100644
index 0000000..2016806
--- /dev/null
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -0,0 +1,172 @@
+/*
+ * drivers/usb/host/ehci-vt8500.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * Based on ehci-au1xxx.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+
+static int ehci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	int rc = 0;
+
+	if (!udev->parent) /* udev is root hub itself, impossible */
+		rc = -1;
+	/* we only support lpm device connected to root hub yet */
+	if (ehci->has_lpm && !udev->parent->parent) {
+		rc = ehci_lpm_set_da(ehci, udev->devnum, udev->portnum);
+		if (!rc)
+			rc = ehci_lpm_check(ehci, udev->portnum);
+	}
+	return rc;
+}
+
+static const struct hc_driver vt8500_ehci_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "VT8500 EHCI",
+	.hcd_priv_size		= sizeof(struct ehci_hcd),
+
+	/*
+	 * generic hardware linkage
+	 */
+	.irq			= ehci_irq,
+	.flags			= HCD_MEMORY | HCD_USB2,
+
+	/*
+	 * basic lifecycle operations
+	 */
+	.reset			= ehci_init,
+	.start			= ehci_run,
+	.stop			= ehci_stop,
+	.shutdown		= ehci_shutdown,
+
+	/*
+	 * managing i/o requests and associated device resources
+	 */
+	.urb_enqueue		= ehci_urb_enqueue,
+	.urb_dequeue		= ehci_urb_dequeue,
+	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
+
+	/*
+	 * scheduling support
+	 */
+	.get_frame_number	= ehci_get_frame,
+
+	/*
+	 * root hub support
+	 */
+	.hub_status_data	= ehci_hub_status_data,
+	.hub_control		= ehci_hub_control,
+	.bus_suspend		= ehci_bus_suspend,
+	.bus_resume		= ehci_bus_resume,
+	.relinquish_port	= ehci_relinquish_port,
+	.port_handed_over	= ehci_port_handed_over,
+
+	/*
+	 * call back when device connected and addressed
+	 */
+	.update_device =	ehci_update_device,
+
+	.clear_tt_buffer_complete	= ehci_clear_tt_buffer_complete,
+};
+
+static int vt8500_ehci_drv_probe(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd;
+	struct ehci_hcd *ehci;
+	struct resource *res;
+	int ret;
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	if (pdev->resource[1].flags != IORESOURCE_IRQ) {
+		pr_debug("resource[1] is not IORESOURCE_IRQ");
+		return -ENOMEM;
+	}
+	hcd = usb_create_hcd(&vt8500_ehci_hc_driver, &pdev->dev, "VT8500");
+	if (!hcd)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hcd->rsrc_start = res->start;
+	hcd->rsrc_len = resource_size(res);
+
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		pr_debug("request_mem_region failed");
+		ret = -EBUSY;
+		goto err1;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		pr_debug("ioremap failed");
+		ret = -ENOMEM;
+		goto err2;
+	}
+
+	ehci = hcd_to_ehci(hcd);
+	ehci->caps = hcd->regs;
+	ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase));
+
+	dbg_hcs_params(ehci, "reset");
+	dbg_hcc_params(ehci, "reset");
+
+	/* cache this readonly data; minimize chip reads */
+	ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+	ehci_port_power(ehci, 1);
+
+	ret = usb_add_hcd(hcd, pdev->resource[1].start,
+			  IRQF_DISABLED | IRQF_SHARED);
+	if (ret == 0) {
+		platform_set_drvdata(pdev, hcd);
+		return ret;
+	}
+
+	iounmap(hcd->regs);
+err2:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err1:
+	usb_put_hcd(hcd);
+	return ret;
+}
+
+static int vt8500_ehci_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	usb_remove_hcd(hcd);
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static struct platform_driver vt8500_ehci_driver = {
+	.probe		= vt8500_ehci_drv_probe,
+	.remove		= vt8500_ehci_drv_remove,
+	.shutdown	= usb_hcd_platform_shutdown,
+	.driver = {
+		.name	= "vt8500-ehci",
+		.owner	= THIS_MODULE,
+	}
+};
+
+MODULE_ALIAS("platform:vt8500-ehci");
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index cfa21ea..6bc3580 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -130,6 +130,7 @@
 	.urb_enqueue = ehci_urb_enqueue,
 	.urb_dequeue = ehci_urb_dequeue,
 	.endpoint_disable = ehci_endpoint_disable,
+	.endpoint_reset = ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
@@ -147,6 +148,8 @@
 #endif
 	.relinquish_port	= ehci_relinquish_port,
 	.port_handed_over	= ehci_port_handed_over,
+
+	.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
 };
 
 static int __devinit ehci_w90x900_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 6c8076a..e8f4f36 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -117,6 +117,7 @@
 	.urb_enqueue		= ehci_urb_enqueue,
 	.urb_dequeue		= ehci_urb_dequeue,
 	.endpoint_disable	= ehci_endpoint_disable,
+	.endpoint_reset		= ehci_endpoint_reset,
 
 	/*
 	 * scheduling support
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index ba8eab3..799ac16 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -131,6 +131,7 @@
 	unsigned		has_amcc_usb23:1;
 	unsigned		need_io_watchdog:1;
 	unsigned		broken_periodic:1;
+	unsigned		amd_l1_fix:1;
 	unsigned		fs_i_thresh:1;	/* Intel iso scheduling */
 	unsigned		use_dummy_qh:1;	/* AMD Frame List table quirk*/
 
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 20092a2..12fd184 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -98,13 +98,13 @@
 	usb->intr_nesting_cnt--;
 }
 
-/* diable the usb interrupt */
+/* disable the usb interrupt */
 void fhci_usb_disable_interrupt(struct fhci_usb *usb)
 {
 	struct fhci_hcd *fhci = usb->fhci;
 
 	if (usb->intr_nesting_cnt == 0) {
-		/* diable the timer interrupt */
+		/* disable the timer interrupt */
 		disable_irq_nosync(fhci->timer->irq);
 
 		/* disable the usb interrupt */
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 7be548c..38fe058 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -271,8 +271,8 @@
 
 /*
  * Collect the submitted frames and inform the application about them
- * It is also prepearing the TDs for new frames. If the Tx interrupts
- * are diabled, the application should call that routine to get
+ * It is also preparing the TDs for new frames. If the Tx interrupts
+ * are disabled, the application should call that routine to get
  * confirmation about the submitted frames. Otherwise, the routine is
  * called frome the interrupt service routine during the Tx interrupt.
  * In that case the application is informed by calling the application
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index e49b75a..f90d003 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1658,7 +1658,7 @@
 
 	spin_lock_irqsave(&imx21->lock, flags);
 
-	/* Reset the Host controler modules */
+	/* Reset the Host controller modules */
 	writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
 		USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
 		imx21->regs + USBOTG_RST_CTRL);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 5cb6731..759a12f 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -901,7 +901,8 @@
 
 	ohci_dump (ohci, 1);
 
-	flush_scheduled_work();
+	if (quirk_nec(ohci))
+		flush_work_sync(&ohci->nec_work);
 
 	ohci_usb_reset (ohci);
 	ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
@@ -1081,6 +1082,11 @@
 #define OF_PLATFORM_DRIVER	ohci_hcd_ppc_of_driver
 #endif
 
+#ifdef CONFIG_PLAT_SPEAR
+#include "ohci-spear.c"
+#define PLATFORM_DRIVER		spear_ohci_hcd_driver
+#endif
+
 #ifdef CONFIG_PPC_PS3
 #include "ohci-ps3.c"
 #define PS3_SYSTEM_BUS_DRIVER	ps3_ohci_driver
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 2cc8a50..a37d599 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -648,7 +648,7 @@
 
 	ret = omap3_start_ohci(omap, hcd);
 	if (ret) {
-		dev_dbg(&pdev->dev, "failed to start ehci\n");
+		dev_dbg(&pdev->dev, "failed to start ohci\n");
 		goto err_start;
 	}
 
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index 0b35d22..f47867f 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -109,7 +109,7 @@
 	hcd->regs = (void __iomem *)res->start;
 	hcd->rsrc_start = res->start;
 	hcd->rsrc_len = resource_size(res);
-	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
 	if (ret != 0) {
 		err("Failed to add hcd");
 		usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
new file mode 100644
index 0000000..4fd4bea
--- /dev/null
+++ b/drivers/usb/host/ohci-spear.c
@@ -0,0 +1,240 @@
+/*
+* OHCI HCD (Host Controller Driver) for USB.
+*
+* Copyright (C) 2010 ST Microelectronics.
+* Deepak Sikri<deepak.sikri@st.com>
+*
+* Based on various ohci-*.c drivers
+*
+* This file is licensed under the terms of the GNU General Public
+* License version 2. This program is licensed "as is" without any
+* warranty of any kind, whether express or implied.
+*/
+
+#include <linux/signal.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+struct spear_ohci {
+	struct ohci_hcd ohci;
+	struct clk *clk;
+};
+
+#define to_spear_ohci(hcd)	(struct spear_ohci *)hcd_to_ohci(hcd)
+
+static void spear_start_ohci(struct spear_ohci *ohci)
+{
+	clk_enable(ohci->clk);
+}
+
+static void spear_stop_ohci(struct spear_ohci *ohci)
+{
+	clk_disable(ohci->clk);
+}
+
+static int __devinit ohci_spear_start(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+	int ret;
+
+	ret = ohci_init(ohci);
+	if (ret < 0)
+		return ret;
+	ohci->regs = hcd->regs;
+
+	ret = ohci_run(ohci);
+	if (ret < 0) {
+		dev_err(hcd->self.controller, "can't start\n");
+		ohci_stop(hcd);
+		return ret;
+	}
+
+	create_debug_files(ohci);
+
+#ifdef DEBUG
+	ohci_dump(ohci, 1);
+#endif
+	return 0;
+}
+
+static const struct hc_driver ohci_spear_hc_driver = {
+	.description		= hcd_name,
+	.product_desc		= "SPEAr OHCI",
+	.hcd_priv_size		= sizeof(struct spear_ohci),
+
+	/* generic hardware linkage */
+	.irq			= ohci_irq,
+	.flags			= HCD_USB11 | HCD_MEMORY,
+
+	/* basic lifecycle operations */
+	.start			= ohci_spear_start,
+	.stop			= ohci_stop,
+	.shutdown		= ohci_shutdown,
+#ifdef	CONFIG_PM
+	.bus_suspend		= ohci_bus_suspend,
+	.bus_resume		= ohci_bus_resume,
+#endif
+
+	/* managing i/o requests and associated device resources */
+	.urb_enqueue		= ohci_urb_enqueue,
+	.urb_dequeue		= ohci_urb_dequeue,
+	.endpoint_disable	= ohci_endpoint_disable,
+
+	/* scheduling support */
+	.get_frame_number	= ohci_get_frame,
+
+	/* root hub support */
+	.hub_status_data	= ohci_hub_status_data,
+	.hub_control		= ohci_hub_control,
+
+	.start_port_reset	= ohci_start_port_reset,
+};
+
+static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
+{
+	const struct hc_driver *driver = &ohci_spear_hc_driver;
+	struct usb_hcd *hcd = NULL;
+	struct clk *usbh_clk;
+	struct spear_ohci *ohci_p;
+	struct resource *res;
+	int retval, irq;
+	int *pdata = pdev->dev.platform_data;
+	char clk_name[20] = "usbh_clk";
+
+	if (pdata == NULL)
+		return -EFAULT;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		retval = irq;
+		goto fail_irq_get;
+	}
+
+	if (*pdata >= 0)
+		sprintf(clk_name, "usbh.%01d_clk", *pdata);
+
+	usbh_clk = clk_get(NULL, clk_name);
+	if (IS_ERR(usbh_clk)) {
+		dev_err(&pdev->dev, "Error getting interface clock\n");
+		retval = PTR_ERR(usbh_clk);
+		goto fail_get_usbh_clk;
+	}
+
+	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	if (!hcd) {
+		retval = -ENOMEM;
+		goto fail_create_hcd;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		retval = -ENODEV;
+		goto fail_request_resource;
+	}
+
+	hcd->rsrc_start = pdev->resource[0].start;
+	hcd->rsrc_len = resource_size(res);
+	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+		dev_dbg(&pdev->dev, "request_mem_region failed\n");
+		retval = -EBUSY;
+		goto fail_request_resource;
+	}
+
+	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+	if (!hcd->regs) {
+		dev_dbg(&pdev->dev, "ioremap failed\n");
+		retval = -ENOMEM;
+		goto fail_ioremap;
+	}
+
+	ohci_p = (struct spear_ohci *)hcd_to_ohci(hcd);
+	ohci_p->clk = usbh_clk;
+	spear_start_ohci(ohci_p);
+	ohci_hcd_init(hcd_to_ohci(hcd));
+
+	retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), IRQF_DISABLED);
+	if (retval == 0)
+		return retval;
+
+	spear_stop_ohci(ohci_p);
+	iounmap(hcd->regs);
+fail_ioremap:
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+	usb_put_hcd(hcd);
+fail_create_hcd:
+	clk_put(usbh_clk);
+fail_get_usbh_clk:
+fail_irq_get:
+	dev_err(&pdev->dev, "init fail, %d\n", retval);
+
+	return retval;
+}
+
+static int spear_ohci_hcd_drv_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct spear_ohci *ohci_p = to_spear_ohci(hcd);
+
+	usb_remove_hcd(hcd);
+	if (ohci_p->clk)
+		spear_stop_ohci(ohci_p);
+
+	iounmap(hcd->regs);
+	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+	usb_put_hcd(hcd);
+
+	if (ohci_p->clk)
+		clk_put(ohci_p->clk);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+#if defined(CONFIG_PM)
+static int spear_ohci_hcd_drv_suspend(struct platform_device *dev,
+		pm_message_t message)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	struct spear_ohci *ohci_p = to_spear_ohci(hcd);
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	spear_stop_ohci(ohci_p);
+	ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
+	return 0;
+}
+
+static int spear_ohci_hcd_drv_resume(struct platform_device *dev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+	struct spear_ohci *ohci_p = to_spear_ohci(hcd);
+
+	if (time_before(jiffies, ohci->next_statechange))
+		msleep(5);
+	ohci->next_statechange = jiffies;
+
+	spear_start_ohci(ohci_p);
+	ohci_finish_controller_resume(hcd);
+	return 0;
+}
+#endif
+
+/* Driver definition to register with the platform bus */
+static struct platform_driver spear_ohci_hcd_driver = {
+	.probe =	spear_ohci_hcd_drv_probe,
+	.remove =	spear_ohci_hcd_drv_remove,
+#ifdef CONFIG_PM
+	.suspend =	spear_ohci_hcd_drv_suspend,
+	.resume =	spear_ohci_hcd_drv_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "spear-ohci",
+	},
+};
+
+MODULE_ALIAS("platform:spear-ohci");
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 32149be..e0cb12b 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3094,7 +3094,7 @@
 
 	/* Some boards (mostly VIA?) report bogus overcurrent indications,
 	 * causing massive log spam unless we completely ignore them.  It
-	 * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+	 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
 	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
 	 * PORT_POWER; that's surprising, but maybe within-spec.
 	 */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index f52d04d..cee8678 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -569,7 +569,7 @@
  */
 static void uhci_shutdown(struct pci_dev *pdev)
 {
-	struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev);
+	struct usb_hcd *hcd = pci_get_drvdata(pdev);
 
 	uhci_hc_died(hcd_to_uhci(hcd));
 }
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 2090b45..af77abb 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -29,7 +29,7 @@
 {
 	if (uhci->is_stopped)
 		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
-	uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
+	uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
 }
 
 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
@@ -195,7 +195,9 @@
 		} else {
 			struct uhci_td *ntd;
 
-			ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
+			ntd = list_entry(td->fl_list.next,
+					 struct uhci_td,
+					 fl_list);
 			uhci->frame[td->frame] = LINK_TO_TD(ntd);
 			uhci->frame_cpu[td->frame] = ntd;
 		}
@@ -728,7 +730,7 @@
 
 	urbp->urb = urb;
 	urb->hcpriv = urbp;
-	
+
 	INIT_LIST_HEAD(&urbp->node);
 	INIT_LIST_HEAD(&urbp->td_list);
 
@@ -846,7 +848,7 @@
 
 		/* Alternate Data0/1 (start with Data1) */
 		destination ^= TD_TOKEN_TOGGLE;
-	
+
 		uhci_add_td_to_urbp(td, urbp);
 		uhci_fill_td(td, status, destination | uhci_explen(pktsze),
 				data);
@@ -857,7 +859,7 @@
 	}
 
 	/*
-	 * Build the final TD for control status 
+	 * Build the final TD for control status
 	 */
 	td = uhci_alloc_td(uhci);
 	if (!td)
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 72b6892..9546f6c 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -356,7 +356,7 @@
 module_exit(whci_hc_driver_exit);
 
 /* PCI device ID's that we handle (so it gets loaded) */
-static struct pci_device_id whci_hcd_id_table[] = {
+static struct pci_device_id __used whci_hcd_id_table[] = {
 	{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
 	{ /* empty last entry */ }
 };
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 44f8b92..a6afd15 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -717,7 +717,7 @@
 		goto exit;
 	}
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL);
 	if (dev == NULL) {
 		dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c9078e4..e573e47 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -769,7 +769,7 @@
 	int i;
 	int retval = -ENOMEM;
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
 	if (dev == NULL) {
 		dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index edffef6..eefb827 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -642,7 +642,7 @@
 	int i;
 	int retval = -ENOMEM;
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (dev == NULL) {
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index c96f51d..1732d9b 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -1,5 +1,5 @@
 /*
- * USB LED driver - 1.1
+ * USB LED driver
  *
  * Copyright (C) 2004 Greg Kroah-Hartman (greg@kroah.com)
  *
@@ -20,12 +20,17 @@
 #define DRIVER_AUTHOR "Greg Kroah-Hartman, greg@kroah.com"
 #define DRIVER_DESC "USB LED Driver"
 
-#define VENDOR_ID	0x0fc5
-#define PRODUCT_ID	0x1223
+enum led_type {
+	DELCOM_VISUAL_SIGNAL_INDICATOR,
+	DREAM_CHEEKY_WEBMAIL_NOTIFIER,
+};
 
 /* table of devices that work with this driver */
 static const struct usb_device_id id_table[] = {
-	{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
+	{ USB_DEVICE(0x0fc5, 0x1223),
+			.driver_info = DELCOM_VISUAL_SIGNAL_INDICATOR },
+	{ USB_DEVICE(0x1d34, 0x0004),
+			.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
 	{ },
 };
 MODULE_DEVICE_TABLE (usb, id_table);
@@ -35,15 +40,12 @@
 	unsigned char		blue;
 	unsigned char		red;
 	unsigned char		green;
+	enum led_type		type;
 };
 
-#define BLUE	0x04
-#define RED	0x02
-#define GREEN	0x01
 static void change_color(struct usb_led *led)
 {
 	int retval;
-	unsigned char color = 0x07;
 	unsigned char *buffer;
 
 	buffer = kmalloc(8, GFP_KERNEL);
@@ -52,25 +54,59 @@
 		return;
 	}
 
-	if (led->blue)
-		color &= ~(BLUE);
-	if (led->red)
-		color &= ~(RED);
-	if (led->green)
-		color &= ~(GREEN);
-	dev_dbg(&led->udev->dev,
-		"blue = %d, red = %d, green = %d, color = %.2x\n",
-		led->blue, led->red, led->green, color);
+	switch (led->type) {
+	case DELCOM_VISUAL_SIGNAL_INDICATOR: {
+		unsigned char color = 0x07;
 
-	retval = usb_control_msg(led->udev,
-				usb_sndctrlpipe(led->udev, 0),
-				0x12,
-				0xc8,
-				(0x02 * 0x100) + 0x0a,
-				(0x00 * 0x100) + color,
-				buffer,	
-				8,
-				2000);
+		if (led->blue)
+			color &= ~0x04;
+		if (led->red)
+			color &= ~0x02;
+		if (led->green)
+			color &= ~0x01;
+		dev_dbg(&led->udev->dev,
+			"blue = %d, red = %d, green = %d, color = %.2x\n",
+			led->blue, led->red, led->green, color);
+
+		retval = usb_control_msg(led->udev,
+					usb_sndctrlpipe(led->udev, 0),
+					0x12,
+					0xc8,
+					(0x02 * 0x100) + 0x0a,
+					(0x00 * 0x100) + color,
+					buffer,
+					8,
+					2000);
+		break;
+	}
+
+	case DREAM_CHEEKY_WEBMAIL_NOTIFIER:
+		dev_dbg(&led->udev->dev,
+			"red = %d, green = %d, blue = %d\n",
+			led->red, led->green, led->blue);
+
+		buffer[0] = led->red;
+		buffer[1] = led->green;
+		buffer[2] = led->blue;
+		buffer[3] = buffer[4] = buffer[5] = 0;
+		buffer[6] = 0x1a;
+		buffer[7] = 0x05;
+
+		retval = usb_control_msg(led->udev,
+					usb_sndctrlpipe(led->udev, 0),
+					0x09,
+					0x21,
+					0x200,
+					0,
+					buffer,
+					8,
+					2000);
+		break;
+
+	default:
+		dev_err(&led->udev->dev, "unknown device type %d\n", led->type);
+	}
+
 	if (retval)
 		dev_dbg(&led->udev->dev, "retval = %d\n", retval);
 	kfree(buffer);
@@ -107,11 +143,12 @@
 
 	dev = kzalloc(sizeof(struct usb_led), GFP_KERNEL);
 	if (dev == NULL) {
-		dev_err(&interface->dev, "Out of memory\n");
+		dev_err(&interface->dev, "out of memory\n");
 		goto error_mem;
 	}
 
 	dev->udev = usb_get_dev(udev);
+	dev->type = id->driver_info;
 
 	usb_set_intfdata (interface, dev);
 
@@ -125,6 +162,31 @@
 	if (retval)
 		goto error;
 
+	if (dev->type == DREAM_CHEEKY_WEBMAIL_NOTIFIER) {
+		unsigned char *enable;
+
+		enable = kmemdup("\x1f\x02\0\x5f\0\0\x1a\x03", 8, GFP_KERNEL);
+		if (!enable) {
+			dev_err(&interface->dev, "out of memory\n");
+			retval = -ENOMEM;
+			goto error;
+		}
+
+		retval = usb_control_msg(udev,
+					usb_sndctrlpipe(udev, 0),
+					0x09,
+					0x21,
+					0x200,
+					0,
+					enable,
+					8,
+					2000);
+
+		kfree(enable);
+		if (retval != 8)
+			goto error;
+	}
+
 	dev_info(&interface->dev, "USB LED device now attached\n");
 	return 0;
 
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index c436e1e..a09dbd2 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -436,6 +436,28 @@
 	return length;
 }
 
+/*
+ * This is the look-ahead pass in case of 'C Zi', when actual_length cannot
+ * be used to determine the length of the whole contiguous buffer.
+ */
+static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp,
+    struct urb *urb, unsigned int ndesc)
+{
+	struct usb_iso_packet_descriptor *fp;
+	unsigned int length;
+
+	length = 0;
+	fp = urb->iso_frame_desc;
+	while (ndesc-- != 0) {
+		if (fp->actual_length != 0) {
+			if (fp->offset + fp->actual_length > length)
+				length = fp->offset + fp->actual_length;
+		}
+		fp++;
+	}
+	return length;
+}
+
 static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
     unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc)
 {
@@ -478,6 +500,10 @@
 	/*
 	 * Find the maximum allowable length, then allocate space.
 	 */
+	urb_length = (ev_type == 'S') ?
+	    urb->transfer_buffer_length : urb->actual_length;
+	length = urb_length;
+
 	if (usb_endpoint_xfer_isoc(epd)) {
 		if (urb->number_of_packets < 0) {
 			ndesc = 0;
@@ -486,14 +512,16 @@
 		} else {
 			ndesc = urb->number_of_packets;
 		}
+		if (ev_type == 'C' && usb_urb_dir_in(urb))
+			length = mon_bin_collate_isodesc(rp, urb, ndesc);
 	} else {
 		ndesc = 0;
 	}
 	lendesc = ndesc*sizeof(struct mon_bin_isodesc);
 
-	urb_length = (ev_type == 'S') ?
-	    urb->transfer_buffer_length : urb->actual_length;
-	length = urb_length;
+	/* not an issue unless there's a subtle bug in a HCD somewhere */
+	if (length >= urb->transfer_buffer_length)
+		length = urb->transfer_buffer_length;
 
 	if (length >= rp->b_size/5)
 		length = rp->b_size/5;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 341a37a..4cbb7e4 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -12,6 +12,7 @@
 	depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
 	select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
 	select TWL4030_USB if MACH_OMAP_3430SDP
+	select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
 	select USB_OTG_UTILS
 	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
 	help
@@ -30,57 +31,41 @@
 	  If you do not know what this is, please say N.
 
 	  To compile this driver as a module, choose M here; the
-	  module will be called "musb_hdrc".
+	  module will be called "musb-hdrc".
 
-config USB_MUSB_SOC
-	boolean
+choice
+	prompt "Platform Glue Layer"
 	depends on USB_MUSB_HDRC
-	default y if ARCH_DAVINCI
-	default y if ARCH_OMAP2430
-	default y if ARCH_OMAP3
-	default y if ARCH_OMAP4
-	default y if (BF54x && !BF544)
-	default y if (BF52x && !BF522 && !BF523)
 
-comment "DaVinci 35x and 644x USB support"
-	depends on USB_MUSB_HDRC && ARCH_DAVINCI_DMx
+config USB_MUSB_DAVINCI
+	bool "DaVinci"
+	depends on ARCH_DAVINCI_DMx
 
-comment "DA8xx/OMAP-L1x USB support"
-	depends on USB_MUSB_HDRC && ARCH_DAVINCI_DA8XX
+config USB_MUSB_DA8XX
+	bool "DA8xx/OMAP-L1x"
+	depends on ARCH_DAVINCI_DA8XX
 
-comment "OMAP 243x high speed USB support"
-	depends on USB_MUSB_HDRC && ARCH_OMAP2430
+config USB_MUSB_TUSB6010
+	bool "TUSB6010"
+	depends on ARCH_OMAP
 
-comment "OMAP 343x high speed USB support"
-	depends on USB_MUSB_HDRC && ARCH_OMAP3
-
-comment "OMAP 44xx high speed USB support"
-	depends on USB_MUSB_HDRC && ARCH_OMAP4
-
-comment "Blackfin high speed USB Support"
-	depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523))
+config USB_MUSB_OMAP2PLUS
+	bool "OMAP2430 and onwards"
+	depends on ARCH_OMAP2PLUS
 
 config USB_MUSB_AM35X
-	bool
-	depends on USB_MUSB_HDRC && !ARCH_OMAP2430 && !ARCH_OMAP4
-	select NOP_USB_XCEIV
-	default MACH_OMAP3517EVM
-	help
-	  Select this option if your platform is based on AM35x. As
-	  AM35x has an updated MUSB with CPPI4.1 DMA so this config
-	  is introduced to differentiate musb ip between OMAP3x and
-	  AM35x platforms.
+	bool "AM35x"
+	depends on ARCH_OMAP
 
-config USB_TUSB6010
-	boolean "TUSB 6010 support"
-	depends on USB_MUSB_HDRC && !USB_MUSB_SOC
-	select NOP_USB_XCEIV
-	default y
-	help
-	  The TUSB 6010 chip, from Texas Instruments, connects a discrete
-	  HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
-	  (a high speed serial link).  It can use system-specific external
-	  DMA controllers.
+config USB_MUSB_BLACKFIN
+	bool "Blackfin"
+	depends on (BF54x && !BF544) || (BF52x && ! BF522 && !BF523)
+
+config USB_MUSB_UX500
+	bool "U8500 and U5500"
+	depends on (ARCH_U8500 && AB8500_USB) || (ARCH_U5500)
+
+endchoice
 
 choice
 	prompt "Driver Mode"
@@ -158,7 +143,7 @@
 config MUSB_PIO_ONLY
 	bool 'Disable DMA (always use PIO)'
 	depends on USB_MUSB_HDRC
-	default USB_TUSB6010 || ARCH_DAVINCI_DA8XX || USB_MUSB_AM35X
+	default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
 	help
 	  All data is copied between memory and FIFO by the CPU.
 	  DMA controllers are ignored.
@@ -171,21 +156,21 @@
 config USB_INVENTRA_DMA
 	bool
 	depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
-	default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN || ARCH_OMAP4
+	default USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
 	help
 	  Enable DMA transfers using Mentor's engine.
 
 config USB_TI_CPPI_DMA
 	bool
 	depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
-	default ARCH_DAVINCI
+	default USB_MUSB_DAVINCI
 	help
 	  Enable DMA transfers when TI CPPI DMA is available.
 
 config USB_TUSB_OMAP_DMA
 	bool
 	depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
-	depends on USB_TUSB6010
+	depends on USB_MUSB_TUSB6010
 	depends on ARCH_OMAP
 	default y
 	help
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index ce164e8..74df528 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -8,22 +8,19 @@
 
 musb_hdrc-y := musb_core.o
 
-musb_hdrc-$(CONFIG_ARCH_DAVINCI_DMx)		+= davinci.o
-musb_hdrc-$(CONFIG_ARCH_DAVINCI_DA8XX)		+= da8xx.o
-musb_hdrc-$(CONFIG_USB_TUSB6010)		+= tusb6010.o
-musb_hdrc-$(CONFIG_ARCH_OMAP2430)		+= omap2430.o
-ifeq ($(CONFIG_USB_MUSB_AM35X),y)
-	musb_hdrc-$(CONFIG_ARCH_OMAP3430)	+= am35x.o
-else
-	musb_hdrc-$(CONFIG_ARCH_OMAP3430)	+= omap2430.o
-endif
-musb_hdrc-$(CONFIG_ARCH_OMAP4)			+= omap2430.o
-musb_hdrc-$(CONFIG_BF54x)			+= blackfin.o
-musb_hdrc-$(CONFIG_BF52x)			+= blackfin.o
 musb_hdrc-$(CONFIG_USB_GADGET_MUSB_HDRC)	+= musb_gadget_ep0.o musb_gadget.o
 musb_hdrc-$(CONFIG_USB_MUSB_HDRC_HCD)		+= musb_virthub.o musb_host.o
 musb_hdrc-$(CONFIG_DEBUG_FS)			+= musb_debugfs.o
 
+# Hardware Glue Layer
+obj-$(CONFIG_USB_MUSB_OMAP2PLUS)		+= omap2430.o
+obj-$(CONFIG_USB_MUSB_AM35X)			+= am35x.o
+obj-$(CONFIG_USB_MUSB_TUSB6010)			+= tusb6010.o
+obj-$(CONFIG_USB_MUSB_DAVINCI)			+= davinci.o
+obj-$(CONFIG_USB_MUSB_DA8XX)			+= da8xx.o
+obj-$(CONFIG_USB_MUSB_BLACKFIN)			+= blackfin.o
+obj-$(CONFIG_USB_MUSB_UX500)			+= ux500.o
+
 # the kconfig must guarantee that only one of the
 # possible I/O schemes will be enabled at a time ...
 # PIO only, or DMA (several potential schemes).
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index b0aabf3..d5a3da3 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -29,8 +29,9 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 
-#include <plat/control.h>
 #include <plat/usb.h>
 
 #include "musb_core.h"
@@ -80,51 +81,18 @@
 
 #define USB_MENTOR_CORE_OFFSET	0x400
 
-static inline void phy_on(void)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies(100);
-	u32 devconf2;
-
-	/*
-	 * Start the on-chip PHY and its PLL.
-	 */
-	devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
-
-	devconf2 &= ~(CONF2_RESET | CONF2_PHYPWRDN | CONF2_OTGPWRDN);
-	devconf2 |= CONF2_PHY_PLLON;
-
-	omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
-
-	DBG(1, "Waiting for PHY clock good...\n");
-	while (!(omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2)
-			& CONF2_PHYCLKGD)) {
-		cpu_relax();
-
-		if (time_after(jiffies, timeout)) {
-			DBG(1, "musb PHY clock good timed out\n");
-			break;
-		}
-	}
-}
-
-static inline void phy_off(void)
-{
-	u32 devconf2;
-
-	/*
-	 * Power down the on-chip PHY.
-	 */
-	devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
-
-	devconf2 &= ~CONF2_PHY_PLLON;
-	devconf2 |=  CONF2_PHYPWRDN | CONF2_OTGPWRDN;
-	omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
-}
+struct am35x_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+	struct clk		*phy_clk;
+	struct clk		*clk;
+};
+#define glue_to_musb(g)		platform_get_drvdata(g->musb)
 
 /*
- * musb_platform_enable - enable interrupts
+ * am35x_musb_enable - enable interrupts
  */
-void musb_platform_enable(struct musb *musb)
+static void am35x_musb_enable(struct musb *musb)
 {
 	void __iomem *reg_base = musb->ctrl_base;
 	u32 epmask;
@@ -143,9 +111,9 @@
 }
 
 /*
- * musb_platform_disable - disable HDRC and flush interrupts
+ * am35x_musb_disable - disable HDRC and flush interrupts
  */
-void musb_platform_disable(struct musb *musb)
+static void am35x_musb_disable(struct musb *musb)
 {
 	void __iomem *reg_base = musb->ctrl_base;
 
@@ -162,7 +130,7 @@
 #define portstate(stmt)
 #endif
 
-static void am35x_set_vbus(struct musb *musb, int is_on)
+static void am35x_musb_set_vbus(struct musb *musb, int is_on)
 {
 	WARN_ON(is_on && is_peripheral_active(musb));
 }
@@ -221,7 +189,7 @@
 	spin_unlock_irqrestore(&musb->lock, flags);
 }
 
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
 {
 	static unsigned long last_timer;
 
@@ -251,13 +219,16 @@
 	mod_timer(&otg_workaround, timeout);
 }
 
-static irqreturn_t am35x_interrupt(int irq, void *hci)
+static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
 {
 	struct musb  *musb = hci;
 	void __iomem *reg_base = musb->ctrl_base;
+	struct device *dev = musb->controller;
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+	struct omap_musb_board_data *data = plat->board_data;
 	unsigned long flags;
 	irqreturn_t ret = IRQ_NONE;
-	u32 epintr, usbintr, lvl_intr;
+	u32 epintr, usbintr;
 
 	spin_lock_irqsave(&musb->lock, flags);
 
@@ -346,9 +317,8 @@
 	/* EOI needs to be written for the IRQ to be re-asserted. */
 	if (ret == IRQ_HANDLED || epintr || usbintr) {
 		/* clear level interrupt */
-		lvl_intr = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
-		lvl_intr |= AM35XX_USBOTGSS_INT_CLR;
-		omap_ctrl_writel(lvl_intr, AM35XX_CONTROL_LVL_INTR_CLEAR);
+		if (data->clear_irq)
+			data->clear_irq();
 		/* write EOI */
 		musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
 	}
@@ -362,137 +332,85 @@
 	return ret;
 }
 
-int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
 {
-	u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
+	struct device *dev = musb->controller;
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+	struct omap_musb_board_data *data = plat->board_data;
+	int     retval = 0;
 
-	devconf2 &= ~CONF2_OTGMODE;
-	switch (musb_mode) {
-#ifdef	CONFIG_USB_MUSB_HDRC_HCD
-	case MUSB_HOST:		/* Force VBUS valid, ID = 0 */
-		devconf2 |= CONF2_FORCE_HOST;
-		break;
-#endif
-#ifdef	CONFIG_USB_GADGET_MUSB_HDRC
-	case MUSB_PERIPHERAL:	/* Force VBUS valid, ID = 1 */
-		devconf2 |= CONF2_FORCE_DEVICE;
-		break;
-#endif
-#ifdef	CONFIG_USB_MUSB_OTG
-	case MUSB_OTG:		/* Don't override the VBUS/ID comparators */
-		devconf2 |= CONF2_NO_OVERRIDE;
-		break;
-#endif
-	default:
-		DBG(2, "Trying to set unsupported mode %u\n", musb_mode);
-	}
+	if (data->set_mode)
+		data->set_mode(musb_mode);
+	else
+		retval = -EIO;
 
-	omap_ctrl_writel(devconf2, AM35XX_CONTROL_DEVCONF2);
-	return 0;
+	return retval;
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static int am35x_musb_init(struct musb *musb)
 {
+	struct device *dev = musb->controller;
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+	struct omap_musb_board_data *data = plat->board_data;
 	void __iomem *reg_base = musb->ctrl_base;
-	u32 rev, lvl_intr, sw_reset;
-	int status;
+	u32 rev;
 
 	musb->mregs += USB_MENTOR_CORE_OFFSET;
 
-	clk_enable(musb->clock);
-	DBG(2, "musb->clock=%lud\n", clk_get_rate(musb->clock));
-
-	musb->phy_clock = clk_get(musb->controller, "fck");
-	if (IS_ERR(musb->phy_clock)) {
-		status = PTR_ERR(musb->phy_clock);
-		goto exit0;
-	}
-	clk_enable(musb->phy_clock);
-	DBG(2, "musb->phy_clock=%lud\n", clk_get_rate(musb->phy_clock));
-
 	/* Returns zero if e.g. not clocked */
 	rev = musb_readl(reg_base, USB_REVISION_REG);
-	if (!rev) {
-		status = -ENODEV;
-		goto exit1;
-	}
+	if (!rev)
+		return -ENODEV;
 
 	usb_nop_xceiv_register();
 	musb->xceiv = otg_get_transceiver();
-	if (!musb->xceiv) {
-		status = -ENODEV;
-		goto exit1;
-	}
+	if (!musb->xceiv)
+		return -ENODEV;
 
 	if (is_host_enabled(musb))
 		setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
 
-	musb->board_set_vbus = am35x_set_vbus;
-
-	/* Global reset */
-	sw_reset = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
-
-	sw_reset |= AM35XX_USBOTGSS_SW_RST;
-	omap_ctrl_writel(sw_reset, AM35XX_CONTROL_IP_SW_RESET);
-
-	sw_reset &= ~AM35XX_USBOTGSS_SW_RST;
-	omap_ctrl_writel(sw_reset, AM35XX_CONTROL_IP_SW_RESET);
+	/* Reset the musb */
+	if (data->reset)
+		data->reset();
 
 	/* Reset the controller */
 	musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
 
 	/* Start the on-chip PHY and its PLL. */
-	phy_on();
+	if (data->set_phy_power)
+		data->set_phy_power(1);
 
 	msleep(5);
 
-	musb->isr = am35x_interrupt;
+	musb->isr = am35x_musb_interrupt;
 
 	/* clear level interrupt */
-	lvl_intr = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
-	lvl_intr |= AM35XX_USBOTGSS_INT_CLR;
-	omap_ctrl_writel(lvl_intr, AM35XX_CONTROL_LVL_INTR_CLEAR);
+	if (data->clear_irq)
+		data->clear_irq();
+
 	return 0;
-exit1:
-	clk_disable(musb->phy_clock);
-	clk_put(musb->phy_clock);
-exit0:
-	clk_disable(musb->clock);
-	return status;
 }
 
-int musb_platform_exit(struct musb *musb)
+static int am35x_musb_exit(struct musb *musb)
 {
+	struct device *dev = musb->controller;
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+	struct omap_musb_board_data *data = plat->board_data;
+
 	if (is_host_enabled(musb))
 		del_timer_sync(&otg_workaround);
 
-	phy_off();
+	/* Shutdown the on-chip PHY and its PLL. */
+	if (data->set_phy_power)
+		data->set_phy_power(0);
 
 	otg_put_transceiver(musb->xceiv);
 	usb_nop_xceiv_unregister();
 
-	clk_disable(musb->clock);
-
-	clk_disable(musb->phy_clock);
-	clk_put(musb->phy_clock);
-
 	return 0;
 }
 
-#ifdef CONFIG_PM
-void musb_platform_save_context(struct musb *musb,
-	struct musb_context_registers *musb_context)
-{
-	phy_off();
-}
-
-void musb_platform_restore_context(struct musb *musb,
-	struct musb_context_registers *musb_context)
-{
-	phy_on();
-}
-#endif
-
 /* AM35x supports only 32bit read operation */
 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
 {
@@ -522,3 +440,215 @@
 		memcpy(dst, &val, len);
 	}
 }
+
+static const struct musb_platform_ops am35x_ops = {
+	.init		= am35x_musb_init,
+	.exit		= am35x_musb_exit,
+
+	.enable		= am35x_musb_enable,
+	.disable	= am35x_musb_disable,
+
+	.set_mode	= am35x_musb_set_mode,
+	.try_idle	= am35x_musb_try_idle,
+
+	.set_vbus	= am35x_musb_set_vbus,
+};
+
+static u64 am35x_dmamask = DMA_BIT_MASK(32);
+
+static int __init am35x_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
+	struct platform_device		*musb;
+	struct am35x_glue		*glue;
+
+	struct clk			*phy_clk;
+	struct clk			*clk;
+
+	int				ret = -ENOMEM;
+
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	musb = platform_device_alloc("musb-hdrc", -1);
+	if (!musb) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err1;
+	}
+
+	phy_clk = clk_get(&pdev->dev, "fck");
+	if (IS_ERR(phy_clk)) {
+		dev_err(&pdev->dev, "failed to get PHY clock\n");
+		ret = PTR_ERR(phy_clk);
+		goto err2;
+	}
+
+	clk = clk_get(&pdev->dev, "ick");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		ret = PTR_ERR(clk);
+		goto err3;
+	}
+
+	ret = clk_enable(phy_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable PHY clock\n");
+		goto err4;
+	}
+
+	ret = clk_enable(clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock\n");
+		goto err5;
+	}
+
+	musb->dev.parent		= &pdev->dev;
+	musb->dev.dma_mask		= &am35x_dmamask;
+	musb->dev.coherent_dma_mask	= am35x_dmamask;
+
+	glue->dev			= &pdev->dev;
+	glue->musb			= musb;
+	glue->phy_clk			= phy_clk;
+	glue->clk			= clk;
+
+	pdata->platform_ops		= &am35x_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musb, pdev->resource,
+			pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err6;
+	}
+
+	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err6;
+	}
+
+	ret = platform_device_add(musb);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musb device\n");
+		goto err6;
+	}
+
+	return 0;
+
+err6:
+	clk_disable(clk);
+
+err5:
+	clk_disable(phy_clk);
+
+err4:
+	clk_put(clk);
+
+err3:
+	clk_put(phy_clk);
+
+err2:
+	platform_device_put(musb);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit am35x_remove(struct platform_device *pdev)
+{
+	struct am35x_glue	*glue = platform_get_drvdata(pdev);
+
+	platform_device_del(glue->musb);
+	platform_device_put(glue->musb);
+	clk_disable(glue->clk);
+	clk_disable(glue->phy_clk);
+	clk_put(glue->clk);
+	clk_put(glue->phy_clk);
+	kfree(glue);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int am35x_suspend(struct device *dev)
+{
+	struct am35x_glue	*glue = dev_get_drvdata(dev);
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+	struct omap_musb_board_data *data = plat->board_data;
+
+	/* Shutdown the on-chip PHY and its PLL. */
+	if (data->set_phy_power)
+		data->set_phy_power(0);
+
+	clk_disable(glue->phy_clk);
+	clk_disable(glue->clk);
+
+	return 0;
+}
+
+static int am35x_resume(struct device *dev)
+{
+	struct am35x_glue	*glue = dev_get_drvdata(dev);
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+	struct omap_musb_board_data *data = plat->board_data;
+	int			ret;
+
+	/* Start the on-chip PHY and its PLL. */
+	if (data->set_phy_power)
+		data->set_phy_power(1);
+
+	ret = clk_enable(glue->phy_clk);
+	if (ret) {
+		dev_err(dev, "failed to enable PHY clock\n");
+		return ret;
+	}
+
+	ret = clk_enable(glue->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct dev_pm_ops am35x_pm_ops = {
+	.suspend	= am35x_suspend,
+	.resume		= am35x_resume,
+};
+
+#define DEV_PM_OPS	&am35x_pm_ops
+#else
+#define DEV_PM_OPS	NULL
+#endif
+
+static struct platform_driver am35x_driver = {
+	.remove		= __exit_p(am35x_remove),
+	.driver		= {
+		.name	= "musb-am35x",
+		.pm	= DEV_PM_OPS,
+	},
+};
+
+MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
+MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+static int __init am35x_init(void)
+{
+	return platform_driver_probe(&am35x_driver, am35x_probe);
+}
+subsys_initcall(am35x_init);
+
+static void __exit am35x_exit(void)
+{
+	platform_driver_unregister(&am35x_driver);
+}
+module_exit(am35x_exit);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index fcb5206..eeba228 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -15,12 +15,20 @@
 #include <linux/list.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/cacheflush.h>
 
 #include "musb_core.h"
 #include "blackfin.h"
 
+struct bfin_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+};
+#define glue_to_musb(g)		platform_get_drvdata(g->musb)
+
 /*
  * Load an endpoint's FIFO
  */
@@ -278,7 +286,7 @@
 	DBG(4, "state is %s\n", otg_state_string(musb));
 }
 
-void musb_platform_enable(struct musb *musb)
+static void bfin_musb_enable(struct musb *musb)
 {
 	if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
 		mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
@@ -286,11 +294,11 @@
 	}
 }
 
-void musb_platform_disable(struct musb *musb)
+static void bfin_musb_disable(struct musb *musb)
 {
 }
 
-static void bfin_set_vbus(struct musb *musb, int is_on)
+static void bfin_musb_set_vbus(struct musb *musb, int is_on)
 {
 	int value = musb->config->gpio_vrsel_active;
 	if (!is_on)
@@ -303,28 +311,28 @@
 		musb_readb(musb->mregs, MUSB_DEVCTL));
 }
 
-static int bfin_set_power(struct otg_transceiver *x, unsigned mA)
+static int bfin_musb_set_power(struct otg_transceiver *x, unsigned mA)
 {
 	return 0;
 }
 
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout)
 {
 	if (!is_otg_enabled(musb) && is_host_enabled(musb))
 		mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
 }
 
-int musb_platform_get_vbus_status(struct musb *musb)
+static int bfin_musb_get_vbus_status(struct musb *musb)
 {
 	return 0;
 }
 
-int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
 {
 	return -EIO;
 }
 
-static void musb_platform_reg_init(struct musb *musb)
+static void bfin_musb_reg_init(struct musb *musb)
 {
 	if (ANOMALY_05000346) {
 		bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
@@ -362,7 +370,7 @@
 	SSYNC();
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static int bfin_musb_init(struct musb *musb)
 {
 
 	/*
@@ -386,25 +394,124 @@
 		return -ENODEV;
 	}
 
-	musb_platform_reg_init(musb);
+	bfin_musb_reg_init(musb);
 
 	if (is_host_enabled(musb)) {
-		musb->board_set_vbus = bfin_set_vbus;
 		setup_timer(&musb_conn_timer,
 			musb_conn_timer_handler, (unsigned long) musb);
 	}
 	if (is_peripheral_enabled(musb))
-		musb->xceiv->set_power = bfin_set_power;
+		musb->xceiv->set_power = bfin_musb_set_power;
 
 	musb->isr = blackfin_interrupt;
 
 	return 0;
 }
 
-#ifdef CONFIG_PM
-void musb_platform_save_context(struct musb *musb,
-			struct musb_context_registers *musb_context)
+static int bfin_musb_exit(struct musb *musb)
 {
+	gpio_free(musb->config->gpio_vrsel);
+
+	otg_put_transceiver(musb->xceiv);
+	usb_nop_xceiv_unregister();
+	return 0;
+}
+
+static const struct musb_platform_ops bfin_ops = {
+	.init		= bfin_musb_init,
+	.exit		= bfin_musb_exit,
+
+	.enable		= bfin_musb_enable,
+	.disable	= bfin_musb_disable,
+
+	.set_mode	= bfin_musb_set_mode,
+	.try_idle	= bfin_musb_try_idle,
+
+	.vbus_status	= bfin_musb_vbus_status,
+	.set_vbus	= bfin_musb_set_vbus,
+};
+
+static u64 bfin_dmamask = DMA_BIT_MASK(32);
+
+static int __init bfin_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
+	struct platform_device		*musb;
+	struct bfin_glue		*glue;
+
+	int				ret = -ENOMEM;
+
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	musb = platform_device_alloc("musb-hdrc", -1);
+	if (!musb) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err1;
+	}
+
+	musb->dev.parent		= &pdev->dev;
+	musb->dev.dma_mask		= &bfin_dmamask;
+	musb->dev.coherent_dma_mask	= bfin_dmamask;
+
+	glue->dev			= &pdev->dev;
+	glue->musb			= musb;
+
+	pdata->platform_ops		= &bfin_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musb, pdev->resource,
+			pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err2;
+	}
+
+	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err2;
+	}
+
+	ret = platform_device_add(musb);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musb device\n");
+		goto err2;
+	}
+
+	return 0;
+
+err2:
+	platform_device_put(musb);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit bfin_remove(struct platform_device *pdev)
+{
+	struct bfin_glue		*glue = platform_get_drvdata(pdev);
+
+	platform_device_del(glue->musb);
+	platform_device_put(glue->musb);
+	kfree(glue);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_suspend(struct device *dev)
+{
+	struct bfin_glue	*glue = dev_get_drvdata(dev);
+	struct musb		*musb = glue_to_musb(glue);
+
 	if (is_host_active(musb))
 		/*
 		 * During hibernate gpio_vrsel will change from high to low
@@ -413,20 +520,50 @@
 		 * wakeup event.
 		 */
 		gpio_set_value(musb->config->gpio_vrsel, 0);
-}
 
-void musb_platform_restore_context(struct musb *musb,
-			struct musb_context_registers *musb_context)
-{
-	musb_platform_reg_init(musb);
-}
-#endif
-
-int musb_platform_exit(struct musb *musb)
-{
-	gpio_free(musb->config->gpio_vrsel);
-
-	otg_put_transceiver(musb->xceiv);
-	usb_nop_xceiv_unregister();
 	return 0;
 }
+
+static int bfin_resume(struct device *dev)
+{
+	struct bfin_glue	*glue = dev_get_drvdata(dev);
+	struct musb		*musb = glue_to_musb(glue);
+
+	bfin_musb_reg_init(musb);
+
+	return 0;
+}
+
+static struct dev_pm_ops bfin_pm_ops = {
+	.suspend	= bfin_suspend,
+	.resume		= bfin_resume,
+};
+
+#define DEV_PM_OPS	&bfin_pm_op,
+#else
+#define DEV_PM_OPS	NULL
+#endif
+
+static struct platform_driver bfin_driver = {
+	.remove		= __exit_p(bfin_remove),
+	.driver		= {
+		.name	= "musb-bfin",
+		.pm	= DEV_PM_OPS,
+	},
+};
+
+MODULE_DESCRIPTION("Blackfin MUSB Glue Layer");
+MODULE_AUTHOR("Bryan Wy <cooloney@kernel.org>");
+MODULE_LICENSE("GPL v2");
+
+static int __init bfin_init(void)
+{
+	return platform_driver_probe(&bfin_driver, bfin_probe);
+}
+subsys_initcall(bfin_init);
+
+static void __exit bfin_exit(void)
+{
+	platform_driver_unregister(&bfin_driver);
+}
+module_exit(bfin_exit);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index f5a65ff..de55a3c 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1308,7 +1308,7 @@
 	struct cppi		*controller;
 	struct device		*dev = musb->controller;
 	struct platform_device	*pdev = to_platform_device(dev);
-	int			irq = platform_get_irq(pdev, 1);
+	int			irq = platform_get_irq_byname(pdev, "dma");
 
 	controller = kzalloc(sizeof *controller, GFP_KERNEL);
 	if (!controller)
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 84427be..69a0da3 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -29,6 +29,8 @@
 #include <linux/init.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 
 #include <mach/da8xx.h>
 #include <mach/usb.h>
@@ -78,6 +80,12 @@
 
 #define CFGCHIP2	IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG)
 
+struct da8xx_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+	struct clk		*clk;
+};
+
 /*
  * REVISIT (PM): we should be able to keep the PHY in low power mode most
  * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0
@@ -131,9 +139,9 @@
  */
 
 /**
- * musb_platform_enable - enable interrupts
+ * da8xx_musb_enable - enable interrupts
  */
-void musb_platform_enable(struct musb *musb)
+static void da8xx_musb_enable(struct musb *musb)
 {
 	void __iomem *reg_base = musb->ctrl_base;
 	u32 mask;
@@ -151,9 +159,9 @@
 }
 
 /**
- * musb_platform_disable - disable HDRC and flush interrupts
+ * da8xx_musb_disable - disable HDRC and flush interrupts
  */
-void musb_platform_disable(struct musb *musb)
+static void da8xx_musb_disable(struct musb *musb)
 {
 	void __iomem *reg_base = musb->ctrl_base;
 
@@ -170,7 +178,7 @@
 #define portstate(stmt)
 #endif
 
-static void da8xx_set_vbus(struct musb *musb, int is_on)
+static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
 {
 	WARN_ON(is_on && is_peripheral_active(musb));
 }
@@ -252,7 +260,7 @@
 	spin_unlock_irqrestore(&musb->lock, flags);
 }
 
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
 {
 	static unsigned long last_timer;
 
@@ -282,7 +290,7 @@
 	mod_timer(&otg_workaround, timeout);
 }
 
-static irqreturn_t da8xx_interrupt(int irq, void *hci)
+static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
 {
 	struct musb		*musb = hci;
 	void __iomem		*reg_base = musb->ctrl_base;
@@ -380,7 +388,7 @@
 	return ret;
 }
 
-int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
 {
 	u32 cfgchip2 = __raw_readl(CFGCHIP2);
 
@@ -409,15 +417,13 @@
 	return 0;
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static int da8xx_musb_init(struct musb *musb)
 {
 	void __iomem *reg_base = musb->ctrl_base;
 	u32 rev;
 
 	musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
 
-	clk_enable(musb->clock);
-
 	/* Returns zero if e.g. not clocked */
 	rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
 	if (!rev)
@@ -431,8 +437,6 @@
 	if (is_host_enabled(musb))
 		setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
 
-	musb->board_set_vbus = da8xx_set_vbus;
-
 	/* Reset the controller */
 	musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
 
@@ -446,14 +450,13 @@
 		 rev, __raw_readl(CFGCHIP2),
 		 musb_readb(reg_base, DA8XX_USB_CTRL_REG));
 
-	musb->isr = da8xx_interrupt;
+	musb->isr = da8xx_musb_interrupt;
 	return 0;
 fail:
-	clk_disable(musb->clock);
 	return -ENODEV;
 }
 
-int musb_platform_exit(struct musb *musb)
+static int da8xx_musb_exit(struct musb *musb)
 {
 	if (is_host_enabled(musb))
 		del_timer_sync(&otg_workaround);
@@ -463,7 +466,140 @@
 	otg_put_transceiver(musb->xceiv);
 	usb_nop_xceiv_unregister();
 
-	clk_disable(musb->clock);
+	return 0;
+}
+
+static const struct musb_platform_ops da8xx_ops = {
+	.init		= da8xx_musb_init,
+	.exit		= da8xx_musb_exit,
+
+	.enable		= da8xx_musb_enable,
+	.disable	= da8xx_musb_disable,
+
+	.set_mode	= da8xx_musb_set_mode,
+	.try_idle	= da8xx_musb_try_idle,
+
+	.set_vbus	= da8xx_musb_set_vbus,
+};
+
+static u64 da8xx_dmamask = DMA_BIT_MASK(32);
+
+static int __init da8xx_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
+	struct platform_device		*musb;
+	struct da8xx_glue		*glue;
+
+	struct clk			*clk;
+
+	int				ret = -ENOMEM;
+
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	musb = platform_device_alloc("musb-hdrc", -1);
+	if (!musb) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err1;
+	}
+
+	clk = clk_get(&pdev->dev, "usb20");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		ret = PTR_ERR(clk);
+		goto err2;
+	}
+
+	ret = clk_enable(clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock\n");
+		goto err3;
+	}
+
+	musb->dev.parent		= &pdev->dev;
+	musb->dev.dma_mask		= &da8xx_dmamask;
+	musb->dev.coherent_dma_mask	= da8xx_dmamask;
+
+	glue->dev			= &pdev->dev;
+	glue->musb			= musb;
+	glue->clk			= clk;
+
+	pdata->platform_ops		= &da8xx_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musb, pdev->resource,
+			pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err4;
+	}
+
+	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err4;
+	}
+
+	ret = platform_device_add(musb);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musb device\n");
+		goto err4;
+	}
+
+	return 0;
+
+err4:
+	clk_disable(clk);
+
+err3:
+	clk_put(clk);
+
+err2:
+	platform_device_put(musb);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit da8xx_remove(struct platform_device *pdev)
+{
+	struct da8xx_glue		*glue = platform_get_drvdata(pdev);
+
+	platform_device_del(glue->musb);
+	platform_device_put(glue->musb);
+	clk_disable(glue->clk);
+	clk_put(glue->clk);
+	kfree(glue);
 
 	return 0;
 }
+
+static struct platform_driver da8xx_driver = {
+	.remove		= __exit_p(da8xx_remove),
+	.driver		= {
+		.name	= "musb-da8xx",
+	},
+};
+
+MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer");
+MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>");
+MODULE_LICENSE("GPL v2");
+
+static int __init da8xx_init(void)
+{
+	return platform_driver_probe(&da8xx_driver, da8xx_probe);
+}
+subsys_initcall(da8xx_init);
+
+static void __exit da8xx_exit(void)
+{
+	platform_driver_unregister(&da8xx_driver);
+}
+module_exit(da8xx_exit);
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 6e67629..e6de097 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -30,6 +30,8 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 
 #include <mach/hardware.h>
 #include <mach/memory.h>
@@ -51,6 +53,12 @@
 #define USB_PHY_CTRL	IO_ADDRESS(USBPHY_CTL_PADDR)
 #define DM355_DEEPSLEEP	IO_ADDRESS(DM355_DEEPSLEEP_PADDR)
 
+struct davinci_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+	struct clk		*clk;
+};
+
 /* REVISIT (PM) we should be able to keep the PHY in low power mode most
  * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
  * and, when in host mode, autosuspending idle root ports... PHYPLLON
@@ -83,7 +91,7 @@
 
 static int dma_off = 1;
 
-void musb_platform_enable(struct musb *musb)
+static void davinci_musb_enable(struct musb *musb)
 {
 	u32	tmp, old, val;
 
@@ -116,7 +124,7 @@
 /*
  * Disable the HDRC and flush interrupts
  */
-void musb_platform_disable(struct musb *musb)
+static void davinci_musb_disable(struct musb *musb)
 {
 	/* because we don't set CTRLR.UINT, "important" to:
 	 *  - not read/write INTRUSB/INTRUSBE
@@ -167,7 +175,7 @@
 
 #endif	/* EVM */
 
-static void davinci_source_power(struct musb *musb, int is_on, int immediate)
+static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
 {
 #ifdef CONFIG_MACH_DAVINCI_EVM
 	if (is_on)
@@ -190,10 +198,10 @@
 #endif
 }
 
-static void davinci_set_vbus(struct musb *musb, int is_on)
+static void davinci_musb_set_vbus(struct musb *musb, int is_on)
 {
 	WARN_ON(is_on && is_peripheral_active(musb));
-	davinci_source_power(musb, is_on, 0);
+	davinci_musb_source_power(musb, is_on, 0);
 }
 
 
@@ -259,7 +267,7 @@
 	spin_unlock_irqrestore(&musb->lock, flags);
 }
 
-static irqreturn_t davinci_interrupt(int irq, void *__hci)
+static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
 {
 	unsigned long	flags;
 	irqreturn_t	retval = IRQ_NONE;
@@ -345,7 +353,7 @@
 		/* NOTE:  this must complete poweron within 100 msec
 		 * (OTG_TIME_A_WAIT_VRISE) but we don't check for that.
 		 */
-		davinci_source_power(musb, drvvbus, 0);
+		davinci_musb_source_power(musb, drvvbus, 0);
 		DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
 				drvvbus ? "on" : "off",
 				otg_state_string(musb),
@@ -370,13 +378,13 @@
 	return retval;
 }
 
-int musb_platform_set_mode(struct musb *musb, u8 mode)
+static int davinci_musb_set_mode(struct musb *musb, u8 mode)
 {
 	/* EVM can't do this (right?) */
 	return -EIO;
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static int davinci_musb_init(struct musb *musb)
 {
 	void __iomem	*tibase = musb->ctrl_base;
 	u32		revision;
@@ -388,8 +396,6 @@
 
 	musb->mregs += DAVINCI_BASE_OFFSET;
 
-	clk_enable(musb->clock);
-
 	/* returns zero if e.g. not clocked */
 	revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
 	if (revision == 0)
@@ -398,8 +404,7 @@
 	if (is_host_enabled(musb))
 		setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
 
-	musb->board_set_vbus = davinci_set_vbus;
-	davinci_source_power(musb, 0, 1);
+	davinci_musb_source_power(musb, 0, 1);
 
 	/* dm355 EVM swaps D+/D- for signal integrity, and
 	 * is clocked from the main 24 MHz crystal.
@@ -440,18 +445,16 @@
 		revision, __raw_readl(USB_PHY_CTRL),
 		musb_readb(tibase, DAVINCI_USB_CTRL_REG));
 
-	musb->isr = davinci_interrupt;
+	musb->isr = davinci_musb_interrupt;
 	return 0;
 
 fail:
-	clk_disable(musb->clock);
-
 	otg_put_transceiver(musb->xceiv);
 	usb_nop_xceiv_unregister();
 	return -ENODEV;
 }
 
-int musb_platform_exit(struct musb *musb)
+static int davinci_musb_exit(struct musb *musb)
 {
 	if (is_host_enabled(musb))
 		del_timer_sync(&otg_workaround);
@@ -465,7 +468,7 @@
 		__raw_writel(deepsleep, DM355_DEEPSLEEP);
 	}
 
-	davinci_source_power(musb, 0 /*off*/, 1);
+	davinci_musb_source_power(musb, 0 /*off*/, 1);
 
 	/* delay, to avoid problems with module reload */
 	if (is_host_enabled(musb) && musb->xceiv->default_a) {
@@ -495,10 +498,141 @@
 
 	phy_off();
 
-	clk_disable(musb->clock);
-
 	otg_put_transceiver(musb->xceiv);
 	usb_nop_xceiv_unregister();
 
 	return 0;
 }
+
+static const struct musb_platform_ops davinci_ops = {
+	.init		= davinci_musb_init,
+	.exit		= davinci_musb_exit,
+
+	.enable		= davinci_musb_enable,
+	.disable	= davinci_musb_disable,
+
+	.set_mode	= davinci_musb_set_mode,
+
+	.set_vbus	= davinci_musb_set_vbus,
+};
+
+static u64 davinci_dmamask = DMA_BIT_MASK(32);
+
+static int __init davinci_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
+	struct platform_device		*musb;
+	struct davinci_glue		*glue;
+	struct clk			*clk;
+
+	int				ret = -ENOMEM;
+
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	musb = platform_device_alloc("musb-hdrc", -1);
+	if (!musb) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err1;
+	}
+
+	clk = clk_get(&pdev->dev, "usb");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		ret = PTR_ERR(clk);
+		goto err2;
+	}
+
+	ret = clk_enable(clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock\n");
+		goto err3;
+	}
+
+	musb->dev.parent		= &pdev->dev;
+	musb->dev.dma_mask		= &davinci_dmamask;
+	musb->dev.coherent_dma_mask	= davinci_dmamask;
+
+	glue->dev			= &pdev->dev;
+	glue->musb			= musb;
+	glue->clk			= clk;
+
+	pdata->platform_ops		= &davinci_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musb, pdev->resource,
+			pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err4;
+	}
+
+	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err4;
+	}
+
+	ret = platform_device_add(musb);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musb device\n");
+		goto err4;
+	}
+
+	return 0;
+
+err4:
+	clk_disable(clk);
+
+err3:
+	clk_put(clk);
+
+err2:
+	platform_device_put(musb);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit davinci_remove(struct platform_device *pdev)
+{
+	struct davinci_glue		*glue = platform_get_drvdata(pdev);
+
+	platform_device_del(glue->musb);
+	platform_device_put(glue->musb);
+	clk_disable(glue->clk);
+	clk_put(glue->clk);
+	kfree(glue);
+
+	return 0;
+}
+
+static struct platform_driver davinci_driver = {
+	.remove		= __exit_p(davinci_remove),
+	.driver		= {
+		.name	= "musb-davinci",
+	},
+};
+
+MODULE_DESCRIPTION("DaVinci MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+static int __init davinci_init(void)
+{
+	return platform_driver_probe(&davinci_driver, davinci_probe);
+}
+subsys_initcall(davinci_init);
+
+static void __exit davinci_exit(void)
+{
+	platform_driver_unregister(&davinci_driver);
+}
+module_exit(davinci_exit);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 99beebc..07cf394 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -99,19 +99,8 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
-#ifdef	CONFIG_ARM
-#include <mach/hardware.h>
-#include <mach/memory.h>
-#include <asm/mach-types.h>
-#endif
-
 #include "musb_core.h"
 
-
-#ifdef CONFIG_ARCH_DAVINCI
-#include "davinci.h"
-#endif
-
 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
 
 
@@ -126,7 +115,7 @@
 
 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
 
-#define MUSB_DRIVER_NAME "musb_hdrc"
+#define MUSB_DRIVER_NAME "musb-hdrc"
 const char musb_driver_name[] = MUSB_DRIVER_NAME;
 
 MODULE_DESCRIPTION(DRIVER_INFO);
@@ -230,7 +219,7 @@
 
 /*-------------------------------------------------------------------------*/
 
-#if !defined(CONFIG_USB_TUSB6010) && !defined(CONFIG_BLACKFIN)
+#if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN)
 
 /*
  * Load an endpoint's FIFO
@@ -390,7 +379,7 @@
 	case OTG_STATE_A_SUSPEND:
 	case OTG_STATE_A_WAIT_BCON:
 		DBG(1, "HNP: %s timeout\n", otg_state_string(musb));
-		musb_set_vbus(musb, 0);
+		musb_platform_set_vbus(musb, 0);
 		musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
 		break;
 	default:
@@ -571,7 +560,7 @@
 		musb->ep0_stage = MUSB_EP0_START;
 		musb->xceiv->state = OTG_STATE_A_IDLE;
 		MUSB_HST_MODE(musb);
-		musb_set_vbus(musb, 1);
+		musb_platform_set_vbus(musb, 1);
 
 		handled = IRQ_HANDLED;
 	}
@@ -642,7 +631,7 @@
 
 		/* go through A_WAIT_VFALL then start a new session */
 		if (!ignore)
-			musb_set_vbus(musb, 0);
+			musb_platform_set_vbus(musb, 0);
 		handled = IRQ_HANDLED;
 	}
 
@@ -1049,8 +1038,6 @@
 	spin_lock_irqsave(&musb->lock, flags);
 	musb_platform_disable(musb);
 	musb_generic_disable(musb);
-	if (musb->clock)
-		clk_put(musb->clock);
 	spin_unlock_irqrestore(&musb->lock, flags);
 
 	if (!is_otg_enabled(musb) && is_host_enabled(musb))
@@ -1074,10 +1061,11 @@
  * We don't currently use dynamic fifo setup capability to do anything
  * more than selecting one of a bunch of predefined configurations.
  */
-#if defined(CONFIG_USB_TUSB6010) || \
-	defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
-	|| defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
+	|| defined(CONFIG_USB_MUSB_AM35X)
 static ushort __initdata fifo_mode = 4;
+#elif defined(CONFIG_USB_MUSB_UX500)
+static ushort __initdata fifo_mode = 5;
 #else
 static ushort __initdata fifo_mode = 2;
 #endif
@@ -1501,7 +1489,7 @@
 		struct musb_hw_ep	*hw_ep = musb->endpoints + i;
 
 		hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
-#ifdef CONFIG_USB_TUSB6010
+#ifdef CONFIG_USB_MUSB_TUSB6010
 		hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
 		hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
 		hw_ep->fifo_sync_va =
@@ -1548,7 +1536,8 @@
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \
-	defined(CONFIG_ARCH_OMAP4)
+	defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \
+	defined(CONFIG_ARCH_U5500)
 
 static irqreturn_t generic_interrupt(int irq, void *__hci)
 {
@@ -1904,6 +1893,7 @@
 	}
 
 	musb->controller = dev;
+
 	return musb;
 }
 
@@ -2000,30 +1990,14 @@
 	spin_lock_init(&musb->lock);
 	musb->board_mode = plat->mode;
 	musb->board_set_power = plat->set_power;
-	musb->set_clock = plat->set_clock;
 	musb->min_power = plat->min_power;
-
-	/* Clock usage is chip-specific ... functional clock (DaVinci,
-	 * OMAP2430), or PHY ref (some TUSB6010 boards).  All this core
-	 * code does is make sure a clock handle is available; platform
-	 * code manages it during start/stop and suspend/resume.
-	 */
-	if (plat->clock) {
-		musb->clock = clk_get(dev, plat->clock);
-		if (IS_ERR(musb->clock)) {
-			status = PTR_ERR(musb->clock);
-			musb->clock = NULL;
-			goto fail1;
-		}
-	}
+	musb->ops = plat->platform_ops;
 
 	/* The musb_platform_init() call:
 	 *   - adjusts musb->mregs and musb->isr if needed,
 	 *   - may initialize an integrated tranceiver
 	 *   - initializes musb->xceiv, usually by otg_get_transceiver()
-	 *   - activates clocks.
 	 *   - stops powering VBUS
-	 *   - assigns musb->board_set_vbus if host mode is enabled
 	 *
 	 * There are various transciever configurations.  Blackfin,
 	 * DaVinci, TUSB60x0, and others integrate them.  OMAP3 uses
@@ -2031,9 +2005,9 @@
 	 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
 	 */
 	musb->isr = generic_interrupt;
-	status = musb_platform_init(musb, plat->board_data);
+	status = musb_platform_init(musb);
 	if (status < 0)
-		goto fail2;
+		goto fail1;
 
 	if (!musb->isr) {
 		status = -ENODEV;
@@ -2186,10 +2160,6 @@
 		device_init_wakeup(dev, 0);
 	musb_platform_exit(musb);
 
-fail2:
-	if (musb->clock)
-		clk_put(musb->clock);
-
 fail1:
 	dev_err(musb->controller,
 		"musb_init_controller failed with status %d\n", status);
@@ -2215,7 +2185,7 @@
 static int __init musb_probe(struct platform_device *pdev)
 {
 	struct device	*dev = &pdev->dev;
-	int		irq = platform_get_irq(pdev, 0);
+	int		irq = platform_get_irq_byname(pdev, "mc");
 	int		status;
 	struct resource	*iomem;
 	void __iomem	*base;
@@ -2265,144 +2235,138 @@
 
 #ifdef	CONFIG_PM
 
-static struct musb_context_registers musb_context;
-
-void musb_save_context(struct musb *musb)
+static void musb_save_context(struct musb *musb)
 {
 	int i;
 	void __iomem *musb_base = musb->mregs;
 	void __iomem *epio;
 
 	if (is_host_enabled(musb)) {
-		musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
-		musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
-		musb_context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
+		musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
+		musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+		musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
 	}
-	musb_context.power = musb_readb(musb_base, MUSB_POWER);
-	musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
-	musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
-	musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
-	musb_context.index = musb_readb(musb_base, MUSB_INDEX);
-	musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
+	musb->context.power = musb_readb(musb_base, MUSB_POWER);
+	musb->context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
+	musb->context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
+	musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+	musb->context.index = musb_readb(musb_base, MUSB_INDEX);
+	musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
 
 	for (i = 0; i < musb->config->num_eps; ++i) {
 		epio = musb->endpoints[i].regs;
-		musb_context.index_regs[i].txmaxp =
+		musb->context.index_regs[i].txmaxp =
 			musb_readw(epio, MUSB_TXMAXP);
-		musb_context.index_regs[i].txcsr =
+		musb->context.index_regs[i].txcsr =
 			musb_readw(epio, MUSB_TXCSR);
-		musb_context.index_regs[i].rxmaxp =
+		musb->context.index_regs[i].rxmaxp =
 			musb_readw(epio, MUSB_RXMAXP);
-		musb_context.index_regs[i].rxcsr =
+		musb->context.index_regs[i].rxcsr =
 			musb_readw(epio, MUSB_RXCSR);
 
 		if (musb->dyn_fifo) {
-			musb_context.index_regs[i].txfifoadd =
+			musb->context.index_regs[i].txfifoadd =
 					musb_read_txfifoadd(musb_base);
-			musb_context.index_regs[i].rxfifoadd =
+			musb->context.index_regs[i].rxfifoadd =
 					musb_read_rxfifoadd(musb_base);
-			musb_context.index_regs[i].txfifosz =
+			musb->context.index_regs[i].txfifosz =
 					musb_read_txfifosz(musb_base);
-			musb_context.index_regs[i].rxfifosz =
+			musb->context.index_regs[i].rxfifosz =
 					musb_read_rxfifosz(musb_base);
 		}
 		if (is_host_enabled(musb)) {
-			musb_context.index_regs[i].txtype =
+			musb->context.index_regs[i].txtype =
 				musb_readb(epio, MUSB_TXTYPE);
-			musb_context.index_regs[i].txinterval =
+			musb->context.index_regs[i].txinterval =
 				musb_readb(epio, MUSB_TXINTERVAL);
-			musb_context.index_regs[i].rxtype =
+			musb->context.index_regs[i].rxtype =
 				musb_readb(epio, MUSB_RXTYPE);
-			musb_context.index_regs[i].rxinterval =
+			musb->context.index_regs[i].rxinterval =
 				musb_readb(epio, MUSB_RXINTERVAL);
 
-			musb_context.index_regs[i].txfunaddr =
+			musb->context.index_regs[i].txfunaddr =
 				musb_read_txfunaddr(musb_base, i);
-			musb_context.index_regs[i].txhubaddr =
+			musb->context.index_regs[i].txhubaddr =
 				musb_read_txhubaddr(musb_base, i);
-			musb_context.index_regs[i].txhubport =
+			musb->context.index_regs[i].txhubport =
 				musb_read_txhubport(musb_base, i);
 
-			musb_context.index_regs[i].rxfunaddr =
+			musb->context.index_regs[i].rxfunaddr =
 				musb_read_rxfunaddr(musb_base, i);
-			musb_context.index_regs[i].rxhubaddr =
+			musb->context.index_regs[i].rxhubaddr =
 				musb_read_rxhubaddr(musb_base, i);
-			musb_context.index_regs[i].rxhubport =
+			musb->context.index_regs[i].rxhubport =
 				musb_read_rxhubport(musb_base, i);
 		}
 	}
-
-	musb_platform_save_context(musb, &musb_context);
 }
 
-void musb_restore_context(struct musb *musb)
+static void musb_restore_context(struct musb *musb)
 {
 	int i;
 	void __iomem *musb_base = musb->mregs;
 	void __iomem *ep_target_regs;
 	void __iomem *epio;
 
-	musb_platform_restore_context(musb, &musb_context);
-
 	if (is_host_enabled(musb)) {
-		musb_writew(musb_base, MUSB_FRAME, musb_context.frame);
-		musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode);
-		musb_write_ulpi_buscontrol(musb->mregs, musb_context.busctl);
+		musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
+		musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
+		musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
 	}
-	musb_writeb(musb_base, MUSB_POWER, musb_context.power);
-	musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe);
-	musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe);
-	musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe);
-	musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl);
+	musb_writeb(musb_base, MUSB_POWER, musb->context.power);
+	musb_writew(musb_base, MUSB_INTRTXE, musb->context.intrtxe);
+	musb_writew(musb_base, MUSB_INTRRXE, musb->context.intrrxe);
+	musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
+	musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
 
 	for (i = 0; i < musb->config->num_eps; ++i) {
 		epio = musb->endpoints[i].regs;
 		musb_writew(epio, MUSB_TXMAXP,
-			musb_context.index_regs[i].txmaxp);
+			musb->context.index_regs[i].txmaxp);
 		musb_writew(epio, MUSB_TXCSR,
-			musb_context.index_regs[i].txcsr);
+			musb->context.index_regs[i].txcsr);
 		musb_writew(epio, MUSB_RXMAXP,
-			musb_context.index_regs[i].rxmaxp);
+			musb->context.index_regs[i].rxmaxp);
 		musb_writew(epio, MUSB_RXCSR,
-			musb_context.index_regs[i].rxcsr);
+			musb->context.index_regs[i].rxcsr);
 
 		if (musb->dyn_fifo) {
 			musb_write_txfifosz(musb_base,
-				musb_context.index_regs[i].txfifosz);
+				musb->context.index_regs[i].txfifosz);
 			musb_write_rxfifosz(musb_base,
-				musb_context.index_regs[i].rxfifosz);
+				musb->context.index_regs[i].rxfifosz);
 			musb_write_txfifoadd(musb_base,
-				musb_context.index_regs[i].txfifoadd);
+				musb->context.index_regs[i].txfifoadd);
 			musb_write_rxfifoadd(musb_base,
-				musb_context.index_regs[i].rxfifoadd);
+				musb->context.index_regs[i].rxfifoadd);
 		}
 
 		if (is_host_enabled(musb)) {
 			musb_writeb(epio, MUSB_TXTYPE,
-				musb_context.index_regs[i].txtype);
+				musb->context.index_regs[i].txtype);
 			musb_writeb(epio, MUSB_TXINTERVAL,
-				musb_context.index_regs[i].txinterval);
+				musb->context.index_regs[i].txinterval);
 			musb_writeb(epio, MUSB_RXTYPE,
-				musb_context.index_regs[i].rxtype);
+				musb->context.index_regs[i].rxtype);
 			musb_writeb(epio, MUSB_RXINTERVAL,
 
-			musb_context.index_regs[i].rxinterval);
+			musb->context.index_regs[i].rxinterval);
 			musb_write_txfunaddr(musb_base, i,
-				musb_context.index_regs[i].txfunaddr);
+				musb->context.index_regs[i].txfunaddr);
 			musb_write_txhubaddr(musb_base, i,
-				musb_context.index_regs[i].txhubaddr);
+				musb->context.index_regs[i].txhubaddr);
 			musb_write_txhubport(musb_base, i,
-				musb_context.index_regs[i].txhubport);
+				musb->context.index_regs[i].txhubport);
 
 			ep_target_regs =
 				musb_read_target_reg_base(i, musb_base);
 
 			musb_write_rxfunaddr(ep_target_regs,
-				musb_context.index_regs[i].rxfunaddr);
+				musb->context.index_regs[i].rxfunaddr);
 			musb_write_rxhubaddr(ep_target_regs,
-				musb_context.index_regs[i].rxhubaddr);
+				musb->context.index_regs[i].rxhubaddr);
 			musb_write_rxhubport(ep_target_regs,
-				musb_context.index_regs[i].rxhubport);
+				musb->context.index_regs[i].rxhubport);
 		}
 	}
 }
@@ -2427,12 +2391,6 @@
 
 	musb_save_context(musb);
 
-	if (musb->clock) {
-		if (musb->set_clock)
-			musb->set_clock(musb->clock, 0);
-		else
-			clk_disable(musb->clock);
-	}
 	spin_unlock_irqrestore(&musb->lock, flags);
 	return 0;
 }
@@ -2442,13 +2400,6 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct musb	*musb = dev_to_musb(&pdev->dev);
 
-	if (musb->clock) {
-		if (musb->set_clock)
-			musb->set_clock(musb->clock, 1);
-		else
-			clk_enable(musb->clock);
-	}
-
 	musb_restore_context(musb);
 
 	/* for static cmos like DaVinci, register values were preserved
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index febaabc..d0c236f 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -222,7 +222,7 @@
 #endif
 
 /* TUSB mapping: "flat" plus ep0 special cases */
-#if	defined(CONFIG_USB_TUSB6010)
+#if	defined(CONFIG_USB_MUSB_TUSB6010)
 #define musb_ep_select(_mbase, _epnum) \
 	musb_writeb((_mbase), MUSB_INDEX, (_epnum))
 #define	MUSB_EP_OFFSET			MUSB_TUSB_OFFSET
@@ -253,6 +253,29 @@
 
 /******************************** TYPES *************************************/
 
+/**
+ * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
+ * @init:	turns on clocks, sets up platform-specific registers, etc
+ * @exit:	undoes @init
+ * @set_mode:	forcefully changes operating mode
+ * @try_ilde:	tries to idle the IP
+ * @vbus_status: returns vbus status if possible
+ * @set_vbus:	forces vbus status
+ */
+struct musb_platform_ops {
+	int	(*init)(struct musb *musb);
+	int	(*exit)(struct musb *musb);
+
+	void	(*enable)(struct musb *musb);
+	void	(*disable)(struct musb *musb);
+
+	int	(*set_mode)(struct musb *musb, u8 mode);
+	void	(*try_idle)(struct musb *musb, unsigned long timeout);
+
+	int	(*vbus_status)(struct musb *musb);
+	void	(*set_vbus)(struct musb *musb, int on);
+};
+
 /*
  * struct musb_hw_ep - endpoint hardware (bidirectional)
  *
@@ -263,7 +286,7 @@
 	void __iomem		*fifo;
 	void __iomem		*regs;
 
-#ifdef CONFIG_USB_TUSB6010
+#ifdef CONFIG_USB_MUSB_TUSB6010
 	void __iomem		*conf;
 #endif
 
@@ -280,7 +303,7 @@
 	struct dma_channel	*tx_channel;
 	struct dma_channel	*rx_channel;
 
-#ifdef CONFIG_USB_TUSB6010
+#ifdef CONFIG_USB_MUSB_TUSB6010
 	/* TUSB has "asynchronous" and "synchronous" dma modes */
 	dma_addr_t		fifo_async;
 	dma_addr_t		fifo_sync;
@@ -323,14 +346,43 @@
 #endif
 }
 
+struct musb_csr_regs {
+	/* FIFO registers */
+	u16 txmaxp, txcsr, rxmaxp, rxcsr;
+	u16 rxfifoadd, txfifoadd;
+	u8 txtype, txinterval, rxtype, rxinterval;
+	u8 rxfifosz, txfifosz;
+	u8 txfunaddr, txhubaddr, txhubport;
+	u8 rxfunaddr, rxhubaddr, rxhubport;
+};
+
+struct musb_context_registers {
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
+    defined(CONFIG_ARCH_OMAP4)
+	u32 otg_sysconfig, otg_forcestandby;
+#endif
+	u8 power;
+	u16 intrtxe, intrrxe;
+	u8 intrusbe;
+	u16 frame;
+	u8 index, testmode;
+
+	u8 devctl, busctl, misc;
+
+	struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
+};
+
 /*
  * struct musb - Driver instance data.
  */
 struct musb {
 	/* device lock */
 	spinlock_t		lock;
-	struct clk		*clock;
-	struct clk		*phy_clock;
+
+	const struct musb_platform_ops *ops;
+	struct musb_context_registers context;
+
 	irqreturn_t		(*isr)(int, void *);
 	struct work_struct	irq_work;
 	u16			hwvers;
@@ -359,11 +411,7 @@
 
 	struct timer_list	otg_timer;
 #endif
-
-	/* called with IRQs blocked; ON/nonzero implies starting a session,
-	 * and waiting at least a_wait_vrise_tmout.
-	 */
-	void			(*board_set_vbus)(struct musb *, int is_on);
+	struct notifier_block	nb;
 
 	struct dma_controller	*dma_controller;
 
@@ -371,7 +419,7 @@
 	void __iomem		*ctrl_base;
 	void __iomem		*mregs;
 
-#ifdef CONFIG_USB_TUSB6010
+#ifdef CONFIG_USB_MUSB_TUSB6010
 	dma_addr_t		async;
 	dma_addr_t		sync;
 	void __iomem		*sync_va;
@@ -398,8 +446,6 @@
 	u8 board_mode;		/* enum musb_mode */
 	int			(*board_set_power)(int state);
 
-	int			(*set_clock)(struct clk *clk, int is_active);
-
 	u8			min_power;	/* vbus for periph, in mA/2 */
 
 	bool			is_host;
@@ -458,52 +504,6 @@
 #endif
 };
 
-#ifdef CONFIG_PM
-struct musb_csr_regs {
-	/* FIFO registers */
-	u16 txmaxp, txcsr, rxmaxp, rxcsr;
-	u16 rxfifoadd, txfifoadd;
-	u8 txtype, txinterval, rxtype, rxinterval;
-	u8 rxfifosz, txfifosz;
-	u8 txfunaddr, txhubaddr, txhubport;
-	u8 rxfunaddr, rxhubaddr, rxhubport;
-};
-
-struct musb_context_registers {
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
-    defined(CONFIG_ARCH_OMAP4)
-	u32 otg_sysconfig, otg_forcestandby;
-#endif
-	u8 power;
-	u16 intrtxe, intrrxe;
-	u8 intrusbe;
-	u16 frame;
-	u8 index, testmode;
-
-	u8 devctl, busctl, misc;
-
-	struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
-};
-
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
-    defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
-extern void musb_platform_save_context(struct musb *musb,
-		struct musb_context_registers *musb_context);
-extern void musb_platform_restore_context(struct musb *musb,
-		struct musb_context_registers *musb_context);
-#else
-#define musb_platform_save_context(m, x)	do {} while (0)
-#define musb_platform_restore_context(m, x)	do {} while (0)
-#endif
-
-#endif
-
-static inline void musb_set_vbus(struct musb *musb, int is_on)
-{
-	musb->board_set_vbus(musb, is_on);
-}
-
 #ifdef CONFIG_USB_GADGET_MUSB_HDRC
 static inline struct musb *gadget_to_musb(struct usb_gadget *g)
 {
@@ -592,29 +592,63 @@
 
 extern irqreturn_t musb_interrupt(struct musb *);
 
-extern void musb_platform_enable(struct musb *musb);
-extern void musb_platform_disable(struct musb *musb);
-
 extern void musb_hnp_stop(struct musb *musb);
 
-extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
+{
+	if (musb->ops->set_vbus)
+		musb->ops->set_vbus(musb, is_on);
+}
 
-#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
-	defined(CONFIG_ARCH_DAVINCI_DA8XX) || \
-	defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
-	defined(CONFIG_ARCH_OMAP4)
-extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
-#else
-#define musb_platform_try_idle(x, y)		do {} while (0)
-#endif
+static inline void musb_platform_enable(struct musb *musb)
+{
+	if (musb->ops->enable)
+		musb->ops->enable(musb);
+}
 
-#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN)
-extern int musb_platform_get_vbus_status(struct musb *musb);
-#else
-#define musb_platform_get_vbus_status(x)	0
-#endif
+static inline void musb_platform_disable(struct musb *musb)
+{
+	if (musb->ops->disable)
+		musb->ops->disable(musb);
+}
 
-extern int __init musb_platform_init(struct musb *musb, void *board_data);
-extern int musb_platform_exit(struct musb *musb);
+static inline int musb_platform_set_mode(struct musb *musb, u8 mode)
+{
+	if (!musb->ops->set_mode)
+		return 0;
+
+	return musb->ops->set_mode(musb, mode);
+}
+
+static inline void musb_platform_try_idle(struct musb *musb,
+		unsigned long timeout)
+{
+	if (musb->ops->try_idle)
+		musb->ops->try_idle(musb, timeout);
+}
+
+static inline int musb_platform_get_vbus_status(struct musb *musb)
+{
+	if (!musb->ops->vbus_status)
+		return 0;
+
+	return musb->ops->vbus_status(musb);
+}
+
+static inline int musb_platform_init(struct musb *musb)
+{
+	if (!musb->ops->init)
+		return -EINVAL;
+
+	return musb->ops->init(musb);
+}
+
+static inline int musb_platform_exit(struct musb *musb)
+{
+	if (!musb->ops->exit)
+		return -EINVAL;
+
+	return musb->ops->exit(musb);
+}
 
 #endif	/* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 9e8639d..b0176e4 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -36,7 +36,6 @@
 #include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/list.h>
-#include <linux/kobject.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/debugfs.h>
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9d6ade8..ed58c6c 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1136,13 +1136,16 @@
 	struct musb_request	*request = NULL;
 
 	request = kzalloc(sizeof *request, gfp_flags);
-	if (request) {
-		INIT_LIST_HEAD(&request->request.list);
-		request->request.dma = DMA_ADDR_INVALID;
-		request->epnum = musb_ep->current_epnum;
-		request->ep = musb_ep;
+	if (!request) {
+		DBG(4, "not enough memory\n");
+		return NULL;
 	}
 
+	INIT_LIST_HEAD(&request->request.list);
+	request->request.dma = DMA_ADDR_INVALID;
+	request->epnum = musb_ep->current_epnum;
+	request->ep = musb_ep;
+
 	return &request->request;
 }
 
@@ -1681,7 +1684,7 @@
 	struct musb_hw_ep	*hw_ep;
 	unsigned		count = 0;
 
-	/* intialize endpoint list just once */
+	/* initialize endpoint list just once */
 	INIT_LIST_HEAD(&(musb->g.ep_list));
 
 	for (epnum = 0, hw_ep = musb->endpoints;
@@ -1762,7 +1765,7 @@
  *
  * -EINVAL something went wrong (not driver)
  * -EBUSY another gadget is already using the controller
- * -ENOMEM no memeory to perform the operation
+ * -ENOMEM no memory to perform the operation
  *
  * @param driver the gadget driver
  * @param bind the driver's bind function
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index b06e9ef..03c6ccd 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -74,7 +74,7 @@
 	{ __raw_writel(data, addr + offset); }
 
 
-#ifdef CONFIG_USB_TUSB6010
+#ifdef CONFIG_USB_MUSB_TUSB6010
 
 /*
  * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
@@ -114,7 +114,7 @@
 static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
 	{ __raw_writeb(data, addr + offset); }
 
-#endif	/* CONFIG_USB_TUSB6010 */
+#endif	/* CONFIG_USB_MUSB_TUSB6010 */
 
 #else
 
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 5a727c5..8241070 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -234,7 +234,7 @@
 #define MUSB_TESTMODE		0x0F	/* 8 bit */
 
 /* Get offset for a given FIFO from musb->mregs */
-#ifdef	CONFIG_USB_TUSB6010
+#ifdef	CONFIG_USB_MUSB_TUSB6010
 #define MUSB_FIFO_OFFSET(epnum)	(0x200 + ((epnum) * 0x20))
 #else
 #define MUSB_FIFO_OFFSET(epnum)	(0x20 + ((epnum) * 4))
@@ -295,7 +295,7 @@
 #define MUSB_FLAT_OFFSET(_epnum, _offset)	\
 	(0x100 + (0x10*(_epnum)) + (_offset))
 
-#ifdef CONFIG_USB_TUSB6010
+#ifdef CONFIG_USB_MUSB_TUSB6010
 /* TUSB6010 EP0 configuration register is special */
 #define MUSB_TUSB_OFFSET(_epnum, _offset)	\
 	(0x10 + _offset)
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 43233c3..b46d187 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -276,7 +276,7 @@
 			break;
 		case USB_PORT_FEAT_POWER:
 			if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
-				musb_set_vbus(musb, 0);
+				musb_platform_set_vbus(musb, 0);
 			break;
 		case USB_PORT_FEAT_C_CONNECTION:
 		case USB_PORT_FEAT_C_ENABLE:
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 563114d..0144a2d 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -377,7 +377,7 @@
 	struct musb_dma_controller *controller;
 	struct device *dev = musb->controller;
 	struct platform_device *pdev = to_platform_device(dev);
-	int irq = platform_get_irq(pdev, 1);
+	int irq = platform_get_irq_byname(pdev, "dma");
 
 	if (irq == 0) {
 		dev_err(dev, "No DMA interrupt line!\n");
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index ed618bd..a3f1233 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -31,10 +31,18 @@
 #include <linux/list.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 
 #include "musb_core.h"
 #include "omap2430.h"
 
+struct omap2430_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+	struct clk		*clk;
+};
+#define glue_to_musb(g)		platform_get_drvdata(g->musb)
 
 static struct timer_list musb_idle_timer;
 
@@ -49,12 +57,8 @@
 
 	spin_lock_irqsave(&musb->lock, flags);
 
-	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
 	switch (musb->xceiv->state) {
 	case OTG_STATE_A_WAIT_BCON:
-		devctl &= ~MUSB_DEVCTL_SESSION;
-		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
 
 		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 		if (devctl & MUSB_DEVCTL_BDEVICE) {
@@ -98,7 +102,7 @@
 }
 
 
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
 {
 	unsigned long		default_timeout = jiffies + msecs_to_jiffies(3);
 	static unsigned long	last_timer;
@@ -131,15 +135,11 @@
 	mod_timer(&musb_idle_timer, timeout);
 }
 
-void musb_platform_enable(struct musb *musb)
-{
-}
-void musb_platform_disable(struct musb *musb)
-{
-}
-static void omap_set_vbus(struct musb *musb, int is_on)
+static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
 {
 	u8		devctl;
+	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+	int ret = 1;
 	/* HDRC controls CPEN, but beware current surges during device
 	 * connect.  They can trigger transient overcurrent conditions
 	 * that must be ignored.
@@ -148,12 +148,35 @@
 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 
 	if (is_on) {
-		musb->is_active = 1;
-		musb->xceiv->default_a = 1;
-		musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
-		devctl |= MUSB_DEVCTL_SESSION;
+		if (musb->xceiv->state == OTG_STATE_A_IDLE) {
+			/* start the session */
+			devctl |= MUSB_DEVCTL_SESSION;
+			musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+			/*
+			 * Wait for the musb to set as A device to enable the
+			 * VBUS
+			 */
+			while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
 
-		MUSB_HST_MODE(musb);
+				cpu_relax();
+
+				if (time_after(jiffies, timeout)) {
+					dev_err(musb->controller,
+					"configured as A device timeout");
+					ret = -EINVAL;
+					break;
+				}
+			}
+
+			if (ret && musb->xceiv->set_vbus)
+				otg_set_vbus(musb->xceiv, 1);
+		} else {
+			musb->is_active = 1;
+			musb->xceiv->default_a = 1;
+			musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
+			devctl |= MUSB_DEVCTL_SESSION;
+			MUSB_HST_MODE(musb);
+		}
 	} else {
 		musb->is_active = 0;
 
@@ -175,9 +198,7 @@
 		musb_readb(musb->mregs, MUSB_DEVCTL));
 }
 
-static int musb_platform_resume(struct musb *musb);
-
-int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
 {
 	u8	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 
@@ -187,10 +208,94 @@
 	return 0;
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static inline void omap2430_low_level_exit(struct musb *musb)
 {
 	u32 l;
-	struct omap_musb_board_data *data = board_data;
+
+	/* in any role */
+	l = musb_readl(musb->mregs, OTG_FORCESTDBY);
+	l |= ENABLEFORCE;	/* enable MSTANDBY */
+	musb_writel(musb->mregs, OTG_FORCESTDBY, l);
+
+	l = musb_readl(musb->mregs, OTG_SYSCONFIG);
+	l |= ENABLEWAKEUP;	/* enable wakeup */
+	musb_writel(musb->mregs, OTG_SYSCONFIG, l);
+}
+
+static inline void omap2430_low_level_init(struct musb *musb)
+{
+	u32 l;
+
+	l = musb_readl(musb->mregs, OTG_SYSCONFIG);
+	l &= ~ENABLEWAKEUP;	/* disable wakeup */
+	musb_writel(musb->mregs, OTG_SYSCONFIG, l);
+
+	l = musb_readl(musb->mregs, OTG_FORCESTDBY);
+	l &= ~ENABLEFORCE;	/* disable MSTANDBY */
+	musb_writel(musb->mregs, OTG_FORCESTDBY, l);
+}
+
+/* blocking notifier support */
+static int musb_otg_notifications(struct notifier_block *nb,
+		unsigned long event, void *unused)
+{
+	struct musb	*musb = container_of(nb, struct musb, nb);
+	struct device *dev = musb->controller;
+	struct musb_hdrc_platform_data *pdata = dev->platform_data;
+	struct omap_musb_board_data *data = pdata->board_data;
+
+	switch (event) {
+	case USB_EVENT_ID:
+		DBG(4, "ID GND\n");
+
+		if (is_otg_enabled(musb)) {
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+			if (musb->gadget_driver) {
+				otg_init(musb->xceiv);
+
+				if (data->interface_type ==
+						MUSB_INTERFACE_UTMI)
+					omap2430_musb_set_vbus(musb, 1);
+
+			}
+#endif
+		} else {
+			otg_init(musb->xceiv);
+			if (data->interface_type ==
+					MUSB_INTERFACE_UTMI)
+				omap2430_musb_set_vbus(musb, 1);
+		}
+		break;
+
+	case USB_EVENT_VBUS:
+		DBG(4, "VBUS Connect\n");
+
+		otg_init(musb->xceiv);
+		break;
+
+	case USB_EVENT_NONE:
+		DBG(4, "VBUS Disconnect\n");
+
+		if (data->interface_type == MUSB_INTERFACE_UTMI) {
+			if (musb->xceiv->set_vbus)
+				otg_set_vbus(musb->xceiv, 0);
+		}
+		otg_shutdown(musb->xceiv);
+		break;
+	default:
+		DBG(4, "ID float\n");
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int omap2430_musb_init(struct musb *musb)
+{
+	u32 l, status = 0;
+	struct device *dev = musb->controller;
+	struct musb_hdrc_platform_data *plat = dev->platform_data;
+	struct omap_musb_board_data *data = plat->board_data;
 
 	/* We require some kind of external transceiver, hooked
 	 * up through ULPI.  TWL4030-family PMICs include one,
@@ -202,7 +307,7 @@
 		return -ENODEV;
 	}
 
-	musb_platform_resume(musb);
+	omap2430_low_level_init(musb);
 
 	l = musb_readl(musb->mregs, OTG_SYSCONFIG);
 	l &= ~ENABLEWAKEUP;	/* disable wakeup */
@@ -239,87 +344,214 @@
 			musb_readl(musb->mregs, OTG_INTERFSEL),
 			musb_readl(musb->mregs, OTG_SIMENABLE));
 
-	if (is_host_enabled(musb))
-		musb->board_set_vbus = omap_set_vbus;
+	musb->nb.notifier_call = musb_otg_notifications;
+	status = otg_register_notifier(musb->xceiv, &musb->nb);
+
+	if (status)
+		DBG(1, "notification register failed\n");
+
+	/* check whether cable is already connected */
+	if (musb->xceiv->state ==OTG_STATE_B_IDLE)
+		musb_otg_notifications(&musb->nb, 1,
+					musb->xceiv->gadget);
 
 	setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
 
 	return 0;
 }
 
-#ifdef CONFIG_PM
-void musb_platform_save_context(struct musb *musb,
-		struct musb_context_registers *musb_context)
+static int omap2430_musb_exit(struct musb *musb)
 {
-	musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
-	musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
-}
 
-void musb_platform_restore_context(struct musb *musb,
-		struct musb_context_registers *musb_context)
-{
-	musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig);
-	musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby);
-}
-#endif
-
-static int musb_platform_suspend(struct musb *musb)
-{
-	u32 l;
-
-	if (!musb->clock)
-		return 0;
-
-	/* in any role */
-	l = musb_readl(musb->mregs, OTG_FORCESTDBY);
-	l |= ENABLEFORCE;	/* enable MSTANDBY */
-	musb_writel(musb->mregs, OTG_FORCESTDBY, l);
-
-	l = musb_readl(musb->mregs, OTG_SYSCONFIG);
-	l |= ENABLEWAKEUP;	/* enable wakeup */
-	musb_writel(musb->mregs, OTG_SYSCONFIG, l);
-
-	otg_set_suspend(musb->xceiv, 1);
-
-	if (musb->set_clock)
-		musb->set_clock(musb->clock, 0);
-	else
-		clk_disable(musb->clock);
+	omap2430_low_level_exit(musb);
+	otg_put_transceiver(musb->xceiv);
 
 	return 0;
 }
 
-static int musb_platform_resume(struct musb *musb)
+static const struct musb_platform_ops omap2430_ops = {
+	.init		= omap2430_musb_init,
+	.exit		= omap2430_musb_exit,
+
+	.set_mode	= omap2430_musb_set_mode,
+	.try_idle	= omap2430_musb_try_idle,
+
+	.set_vbus	= omap2430_musb_set_vbus,
+};
+
+static u64 omap2430_dmamask = DMA_BIT_MASK(32);
+
+static int __init omap2430_probe(struct platform_device *pdev)
 {
-	u32 l;
+	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
+	struct platform_device		*musb;
+	struct omap2430_glue		*glue;
+	struct clk			*clk;
 
-	if (!musb->clock)
-		return 0;
+	int				ret = -ENOMEM;
 
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	musb = platform_device_alloc("musb-hdrc", -1);
+	if (!musb) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err1;
+	}
+
+	clk = clk_get(&pdev->dev, "ick");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		ret = PTR_ERR(clk);
+		goto err2;
+	}
+
+	ret = clk_enable(clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock\n");
+		goto err3;
+	}
+
+	musb->dev.parent		= &pdev->dev;
+	musb->dev.dma_mask		= &omap2430_dmamask;
+	musb->dev.coherent_dma_mask	= omap2430_dmamask;
+
+	glue->dev			= &pdev->dev;
+	glue->musb			= musb;
+	glue->clk			= clk;
+
+	pdata->platform_ops		= &omap2430_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musb, pdev->resource,
+			pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err4;
+	}
+
+	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err4;
+	}
+
+	ret = platform_device_add(musb);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musb device\n");
+		goto err4;
+	}
+
+	return 0;
+
+err4:
+	clk_disable(clk);
+
+err3:
+	clk_put(clk);
+
+err2:
+	platform_device_put(musb);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit omap2430_remove(struct platform_device *pdev)
+{
+	struct omap2430_glue		*glue = platform_get_drvdata(pdev);
+
+	platform_device_del(glue->musb);
+	platform_device_put(glue->musb);
+	clk_disable(glue->clk);
+	clk_put(glue->clk);
+	kfree(glue);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static void omap2430_save_context(struct musb *musb)
+{
+	musb->context.otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
+	musb->context.otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
+}
+
+static void omap2430_restore_context(struct musb *musb)
+{
+	musb_writel(musb->mregs, OTG_SYSCONFIG, musb->context.otg_sysconfig);
+	musb_writel(musb->mregs, OTG_FORCESTDBY, musb->context.otg_forcestandby);
+}
+
+static int omap2430_suspend(struct device *dev)
+{
+	struct omap2430_glue		*glue = dev_get_drvdata(dev);
+	struct musb			*musb = glue_to_musb(glue);
+
+	omap2430_low_level_exit(musb);
+	otg_set_suspend(musb->xceiv, 1);
+	omap2430_save_context(musb);
+	clk_disable(glue->clk);
+
+	return 0;
+}
+
+static int omap2430_resume(struct device *dev)
+{
+	struct omap2430_glue		*glue = dev_get_drvdata(dev);
+	struct musb			*musb = glue_to_musb(glue);
+	int				ret;
+
+	ret = clk_enable(glue->clk);
+	if (ret) {
+		dev_err(dev, "faled to enable clock\n");
+		return ret;
+	}
+
+	omap2430_low_level_init(musb);
+	omap2430_restore_context(musb);
 	otg_set_suspend(musb->xceiv, 0);
 
-	if (musb->set_clock)
-		musb->set_clock(musb->clock, 1);
-	else
-		clk_enable(musb->clock);
-
-	l = musb_readl(musb->mregs, OTG_SYSCONFIG);
-	l &= ~ENABLEWAKEUP;	/* disable wakeup */
-	musb_writel(musb->mregs, OTG_SYSCONFIG, l);
-
-	l = musb_readl(musb->mregs, OTG_FORCESTDBY);
-	l &= ~ENABLEFORCE;	/* disable MSTANDBY */
-	musb_writel(musb->mregs, OTG_FORCESTDBY, l);
-
 	return 0;
 }
 
+static struct dev_pm_ops omap2430_pm_ops = {
+	.suspend	= omap2430_suspend,
+	.resume		= omap2430_resume,
+};
 
-int musb_platform_exit(struct musb *musb)
+#define DEV_PM_OPS	(&omap2430_pm_ops)
+#else
+#define DEV_PM_OPS	NULL
+#endif
+
+static struct platform_driver omap2430_driver = {
+	.remove		= __exit_p(omap2430_remove),
+	.driver		= {
+		.name	= "musb-omap2430",
+		.pm	= DEV_PM_OPS,
+	},
+};
+
+MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+static int __init omap2430_init(void)
 {
-
-	musb_platform_suspend(musb);
-
-	otg_put_transceiver(musb->xceiv);
-	return 0;
+	return platform_driver_probe(&omap2430_driver, omap2430_probe);
 }
+subsys_initcall(omap2430_init);
+
+static void __exit omap2430_exit(void)
+{
+	platform_driver_unregister(&omap2430_driver);
+}
+module_exit(omap2430_exit);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index bde40ef..2ba3b07 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -21,10 +21,16 @@
 #include <linux/usb.h>
 #include <linux/irq.h>
 #include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
 
 #include "musb_core.h"
 
-static void tusb_source_power(struct musb *musb, int is_on);
+struct tusb6010_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+};
+
+static void tusb_musb_set_vbus(struct musb *musb, int is_on);
 
 #define TUSB_REV_MAJOR(reg_val)		((reg_val >> 4) & 0xf)
 #define TUSB_REV_MINOR(reg_val)		(reg_val & 0xf)
@@ -50,7 +56,7 @@
 	return rev;
 }
 
-static int __init tusb_print_revision(struct musb *musb)
+static int tusb_print_revision(struct musb *musb)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 	u8		rev;
@@ -275,17 +281,6 @@
 	void __iomem	*tbase = musb->ctrl_base;
 	u32		reg;
 
-	/*
-	 * Keep clock active when enabled. Note that this is not tied to
-	 * drawing VBUS, as with OTG mA can be less than musb->min_power.
-	 */
-	if (musb->set_clock) {
-		if (mA)
-			musb->set_clock(musb->clock, 1);
-		else
-			musb->set_clock(musb->clock, 0);
-	}
-
 	/* tps65030 seems to consume max 100mA, with maybe 60mA available
 	 * (measured on one board) for things other than tps and tusb.
 	 *
@@ -348,7 +343,7 @@
  * USB link is not suspended ... and tells us the relevant wakeup
  * events.  SW_EN for voltage is handled separately.
  */
-void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 	u32		reg;
@@ -385,7 +380,7 @@
 /*
  * Updates cable VBUS status. Caller must take care of locking.
  */
-int musb_platform_get_vbus_status(struct musb *musb)
+static int tusb_musb_vbus_status(struct musb *musb)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 	u32		otg_stat, prcm_mngmt;
@@ -431,7 +426,7 @@
 		}
 		/* FALLTHROUGH */
 	case OTG_STATE_A_IDLE:
-		tusb_source_power(musb, 0);
+		tusb_musb_set_vbus(musb, 0);
 	default:
 		break;
 	}
@@ -475,7 +470,7 @@
  * we don't want to treat that full speed J as a wakeup event.
  * ... peripherals must draw only suspend current after 10 msec.
  */
-void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
 {
 	unsigned long		default_timeout = jiffies + msecs_to_jiffies(3);
 	static unsigned long	last_timer;
@@ -515,7 +510,7 @@
 				| TUSB_DEV_OTG_TIMER_ENABLE) \
 		: 0)
 
-static void tusb_source_power(struct musb *musb, int is_on)
+static void tusb_musb_set_vbus(struct musb *musb, int is_on)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 	u32		conf, prcm, timer;
@@ -531,8 +526,6 @@
 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 
 	if (is_on) {
-		if (musb->set_clock)
-			musb->set_clock(musb->clock, 1);
 		timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
 		musb->xceiv->default_a = 1;
 		musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
@@ -571,8 +564,6 @@
 
 		devctl &= ~MUSB_DEVCTL_SESSION;
 		conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
-		if (musb->set_clock)
-			musb->set_clock(musb->clock, 0);
 	}
 	prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
 
@@ -599,7 +590,7 @@
  * and peripheral modes in non-OTG configurations by reconfiguring hardware
  * and then setting musb->board_mode. For now, only support OTG mode.
  */
-int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 	u32		otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
@@ -677,7 +668,7 @@
 			default_a = is_host_enabled(musb);
 		DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
 		musb->xceiv->default_a = default_a;
-		tusb_source_power(musb, default_a);
+		tusb_musb_set_vbus(musb, default_a);
 
 		/* Don't allow idling immediately */
 		if (default_a)
@@ -722,7 +713,7 @@
 			switch (musb->xceiv->state) {
 			case OTG_STATE_A_IDLE:
 				DBG(2, "Got SRP, turning on VBUS\n");
-				musb_set_vbus(musb, 1);
+				musb_platform_set_vbus(musb, 1);
 
 				/* CONNECT can wake if a_wait_bcon is set */
 				if (musb->a_wait_bcon != 0)
@@ -748,11 +739,11 @@
 				 */
 				if (musb->vbuserr_retry) {
 					musb->vbuserr_retry--;
-					tusb_source_power(musb, 1);
+					tusb_musb_set_vbus(musb, 1);
 				} else {
 					musb->vbuserr_retry
 						= VBUSERR_RETRY_COUNT;
-					tusb_source_power(musb, 0);
+					tusb_musb_set_vbus(musb, 0);
 				}
 				break;
 			default:
@@ -786,7 +777,7 @@
 			} else {
 				/* REVISIT report overcurrent to hub? */
 				ERR("vbus too slow, devctl %02x\n", devctl);
-				tusb_source_power(musb, 0);
+				tusb_musb_set_vbus(musb, 0);
 			}
 			break;
 		case OTG_STATE_A_WAIT_BCON:
@@ -807,7 +798,7 @@
 	return idle_timeout;
 }
 
-static irqreturn_t tusb_interrupt(int irq, void *__hci)
+static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
 {
 	struct musb	*musb = __hci;
 	void __iomem	*tbase = musb->ctrl_base;
@@ -911,7 +902,7 @@
 	musb_writel(tbase, TUSB_INT_SRC_CLEAR,
 		int_src & ~TUSB_INT_MASK_RESERVED_BITS);
 
-	musb_platform_try_idle(musb, idle_timeout);
+	tusb_musb_try_idle(musb, idle_timeout);
 
 	musb_writel(tbase, TUSB_INT_MASK, int_mask);
 	spin_unlock_irqrestore(&musb->lock, flags);
@@ -926,7 +917,7 @@
  * REVISIT:
  * - Check what is unnecessary in MGC_HdrcStart()
  */
-void musb_platform_enable(struct musb *musb)
+static void tusb_musb_enable(struct musb *musb)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 
@@ -970,7 +961,7 @@
 /*
  * Disables TUSB6010. Caller must take care of locking.
  */
-void musb_platform_disable(struct musb *musb)
+static void tusb_musb_disable(struct musb *musb)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 
@@ -995,7 +986,7 @@
  * Sets up TUSB6010 CPU interface specific signals and registers
  * Note: Settings optimized for OMAP24xx
  */
-static void __init tusb_setup_cpu_interface(struct musb *musb)
+static void tusb_setup_cpu_interface(struct musb *musb)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 
@@ -1022,7 +1013,7 @@
 	musb_writel(tbase, TUSB_WAIT_COUNT, 1);
 }
 
-static int __init tusb_start(struct musb *musb)
+static int tusb_musb_start(struct musb *musb)
 {
 	void __iomem	*tbase = musb->ctrl_base;
 	int		ret = 0;
@@ -1091,7 +1082,7 @@
 	return -ENODEV;
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static int tusb_musb_init(struct musb *musb)
 {
 	struct platform_device	*pdev;
 	struct resource		*mem;
@@ -1131,16 +1122,14 @@
 	 */
 	musb->mregs += TUSB_BASE_OFFSET;
 
-	ret = tusb_start(musb);
+	ret = tusb_musb_start(musb);
 	if (ret) {
 		printk(KERN_ERR "Could not start tusb6010 (%d)\n",
 				ret);
 		goto done;
 	}
-	musb->isr = tusb_interrupt;
+	musb->isr = tusb_musb_interrupt;
 
-	if (is_host_enabled(musb))
-		musb->board_set_vbus = tusb_source_power;
 	if (is_peripheral_enabled(musb)) {
 		musb->xceiv->set_power = tusb_draw_power;
 		the_musb = musb;
@@ -1159,7 +1148,7 @@
 	return ret;
 }
 
-int musb_platform_exit(struct musb *musb)
+static int tusb_musb_exit(struct musb *musb)
 {
 	del_timer_sync(&musb_idle_timer);
 	the_musb = NULL;
@@ -1173,3 +1162,115 @@
 	usb_nop_xceiv_unregister();
 	return 0;
 }
+
+static const struct musb_platform_ops tusb_ops = {
+	.init		= tusb_musb_init,
+	.exit		= tusb_musb_exit,
+
+	.enable		= tusb_musb_enable,
+	.disable	= tusb_musb_disable,
+
+	.set_mode	= tusb_musb_set_mode,
+	.try_idle	= tusb_musb_try_idle,
+
+	.vbus_status	= tusb_musb_vbus_status,
+	.set_vbus	= tusb_musb_set_vbus,
+};
+
+static u64 tusb_dmamask = DMA_BIT_MASK(32);
+
+static int __init tusb_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
+	struct platform_device		*musb;
+	struct tusb6010_glue		*glue;
+
+	int				ret = -ENOMEM;
+
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	musb = platform_device_alloc("musb-hdrc", -1);
+	if (!musb) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err1;
+	}
+
+	musb->dev.parent		= &pdev->dev;
+	musb->dev.dma_mask		= &tusb_dmamask;
+	musb->dev.coherent_dma_mask	= tusb_dmamask;
+
+	glue->dev			= &pdev->dev;
+	glue->musb			= musb;
+
+	pdata->platform_ops		= &tusb_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musb, pdev->resource,
+			pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err2;
+	}
+
+	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err2;
+	}
+
+	ret = platform_device_add(musb);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musb device\n");
+		goto err1;
+	}
+
+	return 0;
+
+err2:
+	platform_device_put(musb);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit tusb_remove(struct platform_device *pdev)
+{
+	struct tusb6010_glue		*glue = platform_get_drvdata(pdev);
+
+	platform_device_del(glue->musb);
+	platform_device_put(glue->musb);
+	kfree(glue);
+
+	return 0;
+}
+
+static struct platform_driver tusb_driver = {
+	.remove		= __exit_p(tusb_remove),
+	.driver		= {
+		.name	= "musb-tusb",
+	},
+};
+
+MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
+
+static int __init tusb_init(void)
+{
+	return platform_driver_probe(&tusb_driver, tusb_probe);
+}
+subsys_initcall(tusb_init);
+
+static void __exit tusb_exit(void)
+{
+	platform_driver_unregister(&tusb_driver);
+}
+module_exit(tusb_exit);
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
new file mode 100644
index 0000000..d6384e4
--- /dev/null
+++ b/drivers/usb/musb/ux500.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson AB
+ * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ *
+ * Based on omap2430.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+struct ux500_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+	struct clk		*clk;
+};
+#define glue_to_musb(g)	platform_get_drvdata(g->musb)
+
+static int ux500_musb_init(struct musb *musb)
+{
+	musb->xceiv = otg_get_transceiver();
+	if (!musb->xceiv) {
+		pr_err("HS USB OTG: no transceiver configured\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int ux500_musb_exit(struct musb *musb)
+{
+	otg_put_transceiver(musb->xceiv);
+
+	return 0;
+}
+
+static const struct musb_platform_ops ux500_ops = {
+	.init		= ux500_musb_init,
+	.exit		= ux500_musb_exit,
+};
+
+static int __init ux500_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
+	struct platform_device		*musb;
+	struct ux500_glue		*glue;
+	struct clk			*clk;
+
+	int				ret = -ENOMEM;
+
+	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	if (!glue) {
+		dev_err(&pdev->dev, "failed to allocate glue context\n");
+		goto err0;
+	}
+
+	musb = platform_device_alloc("musb-hdrc", -1);
+	if (!musb) {
+		dev_err(&pdev->dev, "failed to allocate musb device\n");
+		goto err1;
+	}
+
+	clk = clk_get(&pdev->dev, "usb");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		ret = PTR_ERR(clk);
+		goto err2;
+	}
+
+	ret = clk_enable(clk);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable clock\n");
+		goto err3;
+	}
+
+	musb->dev.parent		= &pdev->dev;
+
+	glue->dev			= &pdev->dev;
+	glue->musb			= musb;
+	glue->clk			= clk;
+
+	pdata->platform_ops		= &ux500_ops;
+
+	platform_set_drvdata(pdev, glue);
+
+	ret = platform_device_add_resources(musb, pdev->resource,
+			pdev->num_resources);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add resources\n");
+		goto err4;
+	}
+
+	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add platform_data\n");
+		goto err4;
+	}
+
+	ret = platform_device_add(musb);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register musb device\n");
+		goto err4;
+	}
+
+	return 0;
+
+err4:
+	clk_disable(clk);
+
+err3:
+	clk_put(clk);
+
+err2:
+	platform_device_put(musb);
+
+err1:
+	kfree(glue);
+
+err0:
+	return ret;
+}
+
+static int __exit ux500_remove(struct platform_device *pdev)
+{
+	struct ux500_glue	*glue = platform_get_drvdata(pdev);
+
+	platform_device_del(glue->musb);
+	platform_device_put(glue->musb);
+	clk_disable(glue->clk);
+	clk_put(glue->clk);
+	kfree(glue);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int ux500_suspend(struct device *dev)
+{
+	struct ux500_glue	*glue = dev_get_drvdata(dev);
+	struct musb		*musb = glue_to_musb(glue);
+
+	otg_set_suspend(musb->xceiv, 1);
+	clk_disable(glue->clk);
+
+	return 0;
+}
+
+static int ux500_resume(struct device *dev)
+{
+	struct ux500_glue	*glue = dev_get_drvdata(dev);
+	struct musb		*musb = glue_to_musb(glue);
+	int			ret;
+
+	ret = clk_enable(glue->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable clock\n");
+		return ret;
+	}
+
+	otg_set_suspend(musb->xceiv, 0);
+
+	return 0;
+}
+
+static const struct dev_pm_ops ux500_pm_ops = {
+	.suspend	= ux500_suspend,
+	.resume		= ux500_resume,
+};
+
+#define DEV_PM_OPS	(&ux500_pm_ops)
+#else
+#define DEV_PM_OPS	NULL
+#endif
+
+static struct platform_driver ux500_driver = {
+	.remove		= __exit_p(ux500_remove),
+	.driver		= {
+		.name	= "musb-ux500",
+		.pm	= DEV_PM_OPS,
+	},
+};
+
+MODULE_DESCRIPTION("UX500 MUSB Glue Layer");
+MODULE_AUTHOR("Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>");
+MODULE_LICENSE("GPL v2");
+
+static int __init ux500_init(void)
+{
+	return platform_driver_probe(&ux500_driver, ux500_probe);
+}
+subsys_initcall(ux500_init);
+
+static void __exit ux500_exit(void)
+{
+	platform_driver_unregister(&ux500_driver);
+}
+module_exit(ux500_exit);
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 5ce0752..9fb875d 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -59,6 +59,18 @@
 	  This transceiver supports high and full speed devices plus,
 	  in host mode, low speed.
 
+config TWL6030_USB
+	tristate "TWL6030 USB Transceiver Driver"
+	depends on TWL4030_CORE
+	select USB_OTG_UTILS
+	help
+	  Enable this to support the USB OTG transceiver on TWL6030
+	  family chips. This TWL6030 transceiver has the VBUS and ID GND
+	  and OTG SRP events capabilities. For all other transceiver functionality
+	  UTMI PHY is embedded in OMAP4430. The internal PHY configurations APIs
+	  are hooked to this driver through platform_data structure.
+	  The definition of internal PHY APIs are in the mach-omap2 layer.
+
 config NOP_USB_XCEIV
 	tristate "NOP USB Transceiver Driver"
 	select USB_OTG_UTILS
@@ -81,4 +93,24 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called langwell_otg.
 
+config USB_MSM_OTG_72K
+	tristate "OTG support for Qualcomm on-chip USB controller"
+	depends on (USB || USB_GADGET) && ARCH_MSM
+	select USB_OTG_UTILS
+	help
+	  Enable this to support the USB OTG transceiver on MSM chips. It
+	  handles PHY initialization, clock management, and workarounds
+	  required after resetting the hardware and power management.
+	  This driver is required even for peripheral only or host only
+	  mode configurations.
+
+config AB8500_USB
+        tristate "AB8500 USB Transceiver Driver"
+        depends on AB8500_CORE
+        select USB_OTG_UTILS
+        help
+          Enable this to support the USB OTG transceiver in AB8500 chip.
+          This transceiver supports high and full speed devices plus,
+          in host mode, low speed.
+
 endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 66f1b83..a520e71 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -12,6 +12,9 @@
 obj-$(CONFIG_USB_GPIO_VBUS)	+= gpio_vbus.o
 obj-$(CONFIG_ISP1301_OMAP)	+= isp1301_omap.o
 obj-$(CONFIG_TWL4030_USB)	+= twl4030-usb.o
+obj-$(CONFIG_TWL6030_USB)	+= twl6030-usb.o
 obj-$(CONFIG_USB_LANGWELL_OTG)	+= langwell_otg.o
 obj-$(CONFIG_NOP_USB_XCEIV)	+= nop-usb-xceiv.o
 obj-$(CONFIG_USB_ULPI)		+= ulpi.o
+obj-$(CONFIG_USB_MSM_OTG_72K)	+= msm72k_otg.o
+obj-$(CONFIG_AB8500_USB)	+= ab8500-usb.o
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
new file mode 100644
index 0000000..d14736b
--- /dev/null
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -0,0 +1,585 @@
+/*
+ * drivers/usb/otg/ab8500_usb.c
+ *
+ * USB transceiver driver for AB8500 chip
+ *
+ * Copyright (C) 2010 ST-Ericsson AB
+ * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb/otg.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/ab8500.h>
+
+#define AB8500_MAIN_WD_CTRL_REG 0x01
+#define AB8500_USB_LINE_STAT_REG 0x80
+#define AB8500_USB_PHY_CTRL_REG 0x8A
+
+#define AB8500_BIT_OTG_STAT_ID (1 << 0)
+#define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0)
+#define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1)
+#define AB8500_BIT_WD_CTRL_ENABLE (1 << 0)
+#define AB8500_BIT_WD_CTRL_KICK (1 << 1)
+
+#define AB8500_V1x_LINK_STAT_WAIT (HZ/10)
+#define AB8500_WD_KICK_DELAY_US 100 /* usec */
+#define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */
+#define AB8500_WD_V10_DISABLE_DELAY_MS 100 /* ms */
+
+/* Usb line status register */
+enum ab8500_usb_link_status {
+	USB_LINK_NOT_CONFIGURED = 0,
+	USB_LINK_STD_HOST_NC,
+	USB_LINK_STD_HOST_C_NS,
+	USB_LINK_STD_HOST_C_S,
+	USB_LINK_HOST_CHG_NM,
+	USB_LINK_HOST_CHG_HS,
+	USB_LINK_HOST_CHG_HS_CHIRP,
+	USB_LINK_DEDICATED_CHG,
+	USB_LINK_ACA_RID_A,
+	USB_LINK_ACA_RID_B,
+	USB_LINK_ACA_RID_C_NM,
+	USB_LINK_ACA_RID_C_HS,
+	USB_LINK_ACA_RID_C_HS_CHIRP,
+	USB_LINK_HM_IDGND,
+	USB_LINK_RESERVED,
+	USB_LINK_NOT_VALID_LINK
+};
+
+struct ab8500_usb {
+	struct otg_transceiver otg;
+	struct device *dev;
+	int irq_num_id_rise;
+	int irq_num_id_fall;
+	int irq_num_vbus_rise;
+	int irq_num_vbus_fall;
+	int irq_num_link_status;
+	unsigned vbus_draw;
+	struct delayed_work dwork;
+	struct work_struct phy_dis_work;
+	unsigned long link_status_wait;
+	int rev;
+};
+
+static inline struct ab8500_usb *xceiv_to_ab(struct otg_transceiver *x)
+{
+	return container_of(x, struct ab8500_usb, otg);
+}
+
+static void ab8500_usb_wd_workaround(struct ab8500_usb *ab)
+{
+	abx500_set_register_interruptible(ab->dev,
+		AB8500_SYS_CTRL2_BLOCK,
+		AB8500_MAIN_WD_CTRL_REG,
+		AB8500_BIT_WD_CTRL_ENABLE);
+
+	udelay(AB8500_WD_KICK_DELAY_US);
+
+	abx500_set_register_interruptible(ab->dev,
+		AB8500_SYS_CTRL2_BLOCK,
+		AB8500_MAIN_WD_CTRL_REG,
+		(AB8500_BIT_WD_CTRL_ENABLE
+		| AB8500_BIT_WD_CTRL_KICK));
+
+	if (ab->rev > 0x10) /* v1.1 v2.0 */
+		udelay(AB8500_WD_V11_DISABLE_DELAY_US);
+	else /* v1.0 */
+		msleep(AB8500_WD_V10_DISABLE_DELAY_MS);
+
+	abx500_set_register_interruptible(ab->dev,
+		AB8500_SYS_CTRL2_BLOCK,
+		AB8500_MAIN_WD_CTRL_REG,
+		0);
+}
+
+static void ab8500_usb_phy_ctrl(struct ab8500_usb *ab, bool sel_host,
+					bool enable)
+{
+	u8 ctrl_reg;
+	abx500_get_register_interruptible(ab->dev,
+				AB8500_USB,
+				AB8500_USB_PHY_CTRL_REG,
+				&ctrl_reg);
+	if (sel_host) {
+		if (enable)
+			ctrl_reg |= AB8500_BIT_PHY_CTRL_HOST_EN;
+		else
+			ctrl_reg &= ~AB8500_BIT_PHY_CTRL_HOST_EN;
+	} else {
+		if (enable)
+			ctrl_reg |= AB8500_BIT_PHY_CTRL_DEVICE_EN;
+		else
+			ctrl_reg &= ~AB8500_BIT_PHY_CTRL_DEVICE_EN;
+	}
+
+	abx500_set_register_interruptible(ab->dev,
+				AB8500_USB,
+				AB8500_USB_PHY_CTRL_REG,
+				ctrl_reg);
+
+	/* Needed to enable the phy.*/
+	if (enable)
+		ab8500_usb_wd_workaround(ab);
+}
+
+#define ab8500_usb_host_phy_en(ab)	ab8500_usb_phy_ctrl(ab, true, true)
+#define ab8500_usb_host_phy_dis(ab)	ab8500_usb_phy_ctrl(ab, true, false)
+#define ab8500_usb_peri_phy_en(ab)	ab8500_usb_phy_ctrl(ab, false, true)
+#define ab8500_usb_peri_phy_dis(ab)	ab8500_usb_phy_ctrl(ab, false, false)
+
+static int ab8500_usb_link_status_update(struct ab8500_usb *ab)
+{
+	u8 reg;
+	enum ab8500_usb_link_status lsts;
+	void *v = NULL;
+	enum usb_xceiv_events event;
+
+	abx500_get_register_interruptible(ab->dev,
+			AB8500_USB,
+			AB8500_USB_LINE_STAT_REG,
+			&reg);
+
+	lsts = (reg >> 3) & 0x0F;
+
+	switch (lsts) {
+	case USB_LINK_NOT_CONFIGURED:
+	case USB_LINK_RESERVED:
+	case USB_LINK_NOT_VALID_LINK:
+		/* TODO: Disable regulators. */
+		ab8500_usb_host_phy_dis(ab);
+		ab8500_usb_peri_phy_dis(ab);
+		ab->otg.state = OTG_STATE_B_IDLE;
+		ab->otg.default_a = false;
+		ab->vbus_draw = 0;
+		event = USB_EVENT_NONE;
+		break;
+
+	case USB_LINK_STD_HOST_NC:
+	case USB_LINK_STD_HOST_C_NS:
+	case USB_LINK_STD_HOST_C_S:
+	case USB_LINK_HOST_CHG_NM:
+	case USB_LINK_HOST_CHG_HS:
+	case USB_LINK_HOST_CHG_HS_CHIRP:
+		if (ab->otg.gadget) {
+			/* TODO: Enable regulators. */
+			ab8500_usb_peri_phy_en(ab);
+			v = ab->otg.gadget;
+		}
+		event = USB_EVENT_VBUS;
+		break;
+
+	case USB_LINK_HM_IDGND:
+		if (ab->otg.host) {
+			/* TODO: Enable regulators. */
+			ab8500_usb_host_phy_en(ab);
+			v = ab->otg.host;
+		}
+		ab->otg.state = OTG_STATE_A_IDLE;
+		ab->otg.default_a = true;
+		event = USB_EVENT_ID;
+		break;
+
+	case USB_LINK_ACA_RID_A:
+	case USB_LINK_ACA_RID_B:
+		/* TODO */
+	case USB_LINK_ACA_RID_C_NM:
+	case USB_LINK_ACA_RID_C_HS:
+	case USB_LINK_ACA_RID_C_HS_CHIRP:
+	case USB_LINK_DEDICATED_CHG:
+		/* TODO: vbus_draw */
+		event = USB_EVENT_CHARGER;
+		break;
+	}
+
+	blocking_notifier_call_chain(&ab->otg.notifier, event, v);
+
+	return 0;
+}
+
+static void ab8500_usb_delayed_work(struct work_struct *work)
+{
+	struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
+						dwork.work);
+
+	ab8500_usb_link_status_update(ab);
+}
+
+static irqreturn_t ab8500_usb_v1x_common_irq(int irq, void *data)
+{
+	struct ab8500_usb *ab = (struct ab8500_usb *) data;
+
+	/* Wait for link status to become stable. */
+	schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ab8500_usb_v1x_vbus_fall_irq(int irq, void *data)
+{
+	struct ab8500_usb *ab = (struct ab8500_usb *) data;
+
+	/* Link status will not be updated till phy is disabled. */
+	ab8500_usb_peri_phy_dis(ab);
+
+	/* Wait for link status to become stable. */
+	schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ab8500_usb_v20_irq(int irq, void *data)
+{
+	struct ab8500_usb *ab = (struct ab8500_usb *) data;
+
+	ab8500_usb_link_status_update(ab);
+
+	return IRQ_HANDLED;
+}
+
+static void ab8500_usb_phy_disable_work(struct work_struct *work)
+{
+	struct ab8500_usb *ab = container_of(work, struct ab8500_usb,
+						phy_dis_work);
+
+	if (!ab->otg.host)
+		ab8500_usb_host_phy_dis(ab);
+
+	if (!ab->otg.gadget)
+		ab8500_usb_peri_phy_dis(ab);
+}
+
+static int ab8500_usb_set_power(struct otg_transceiver *otg, unsigned mA)
+{
+	struct ab8500_usb *ab;
+
+	if (!otg)
+		return -ENODEV;
+
+	ab = xceiv_to_ab(otg);
+
+	ab->vbus_draw = mA;
+
+	if (mA)
+		blocking_notifier_call_chain(&ab->otg.notifier,
+				USB_EVENT_ENUMERATED, ab->otg.gadget);
+	return 0;
+}
+
+/* TODO: Implement some way for charging or other drivers to read
+ * ab->vbus_draw.
+ */
+
+static int ab8500_usb_set_suspend(struct otg_transceiver *x, int suspend)
+{
+	/* TODO */
+	return 0;
+}
+
+static int ab8500_usb_set_peripheral(struct otg_transceiver *otg,
+		struct usb_gadget *gadget)
+{
+	struct ab8500_usb *ab;
+
+	if (!otg)
+		return -ENODEV;
+
+	ab = xceiv_to_ab(otg);
+
+	/* Some drivers call this function in atomic context.
+	 * Do not update ab8500 registers directly till this
+	 * is fixed.
+	 */
+
+	if (!gadget) {
+		/* TODO: Disable regulators. */
+		ab->otg.gadget = NULL;
+		schedule_work(&ab->phy_dis_work);
+	} else {
+		ab->otg.gadget = gadget;
+		ab->otg.state = OTG_STATE_B_IDLE;
+
+		/* Phy will not be enabled if cable is already
+		 * plugged-in. Schedule to enable phy.
+		 * Use same delay to avoid any race condition.
+		 */
+		schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+	}
+
+	return 0;
+}
+
+static int ab8500_usb_set_host(struct otg_transceiver *otg,
+					struct usb_bus *host)
+{
+	struct ab8500_usb *ab;
+
+	if (!otg)
+		return -ENODEV;
+
+	ab = xceiv_to_ab(otg);
+
+	/* Some drivers call this function in atomic context.
+	 * Do not update ab8500 registers directly till this
+	 * is fixed.
+	 */
+
+	if (!host) {
+		/* TODO: Disable regulators. */
+		ab->otg.host = NULL;
+		schedule_work(&ab->phy_dis_work);
+	} else {
+		ab->otg.host = host;
+		/* Phy will not be enabled if cable is already
+		 * plugged-in. Schedule to enable phy.
+		 * Use same delay to avoid any race condition.
+		 */
+		schedule_delayed_work(&ab->dwork, ab->link_status_wait);
+	}
+
+	return 0;
+}
+
+static void ab8500_usb_irq_free(struct ab8500_usb *ab)
+{
+	if (ab->rev < 0x20) {
+		free_irq(ab->irq_num_id_rise, ab);
+		free_irq(ab->irq_num_id_fall, ab);
+		free_irq(ab->irq_num_vbus_rise, ab);
+		free_irq(ab->irq_num_vbus_fall, ab);
+	} else {
+		free_irq(ab->irq_num_link_status, ab);
+	}
+}
+
+static int ab8500_usb_v1x_res_setup(struct platform_device *pdev,
+				struct ab8500_usb *ab)
+{
+	int err;
+
+	ab->irq_num_id_rise = platform_get_irq_byname(pdev, "ID_WAKEUP_R");
+	if (ab->irq_num_id_rise < 0) {
+		dev_err(&pdev->dev, "ID rise irq not found\n");
+		return ab->irq_num_id_rise;
+	}
+	err = request_threaded_irq(ab->irq_num_id_rise, NULL,
+		ab8500_usb_v1x_common_irq,
+		IRQF_NO_SUSPEND | IRQF_SHARED,
+		"usb-id-rise", ab);
+	if (err < 0) {
+		dev_err(ab->dev, "request_irq failed for ID rise irq\n");
+		goto fail0;
+	}
+
+	ab->irq_num_id_fall = platform_get_irq_byname(pdev, "ID_WAKEUP_F");
+	if (ab->irq_num_id_fall < 0) {
+		dev_err(&pdev->dev, "ID fall irq not found\n");
+		return ab->irq_num_id_fall;
+	}
+	err = request_threaded_irq(ab->irq_num_id_fall, NULL,
+		ab8500_usb_v1x_common_irq,
+		IRQF_NO_SUSPEND | IRQF_SHARED,
+		"usb-id-fall", ab);
+	if (err < 0) {
+		dev_err(ab->dev, "request_irq failed for ID fall irq\n");
+		goto fail1;
+	}
+
+	ab->irq_num_vbus_rise = platform_get_irq_byname(pdev, "VBUS_DET_R");
+	if (ab->irq_num_vbus_rise < 0) {
+		dev_err(&pdev->dev, "VBUS rise irq not found\n");
+		return ab->irq_num_vbus_rise;
+	}
+	err = request_threaded_irq(ab->irq_num_vbus_rise, NULL,
+		ab8500_usb_v1x_common_irq,
+		IRQF_NO_SUSPEND | IRQF_SHARED,
+		"usb-vbus-rise", ab);
+	if (err < 0) {
+		dev_err(ab->dev, "request_irq failed for Vbus rise irq\n");
+		goto fail2;
+	}
+
+	ab->irq_num_vbus_fall = platform_get_irq_byname(pdev, "VBUS_DET_F");
+	if (ab->irq_num_vbus_fall < 0) {
+		dev_err(&pdev->dev, "VBUS fall irq not found\n");
+		return ab->irq_num_vbus_fall;
+	}
+	err = request_threaded_irq(ab->irq_num_vbus_fall, NULL,
+		ab8500_usb_v1x_vbus_fall_irq,
+		IRQF_NO_SUSPEND | IRQF_SHARED,
+		"usb-vbus-fall", ab);
+	if (err < 0) {
+		dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
+		goto fail3;
+	}
+
+	return 0;
+fail3:
+	free_irq(ab->irq_num_vbus_rise, ab);
+fail2:
+	free_irq(ab->irq_num_id_fall, ab);
+fail1:
+	free_irq(ab->irq_num_id_rise, ab);
+fail0:
+	return err;
+}
+
+static int ab8500_usb_v2_res_setup(struct platform_device *pdev,
+				struct ab8500_usb *ab)
+{
+	int err;
+
+	ab->irq_num_link_status = platform_get_irq_byname(pdev,
+						"USB_LINK_STATUS");
+	if (ab->irq_num_link_status < 0) {
+		dev_err(&pdev->dev, "Link status irq not found\n");
+		return ab->irq_num_link_status;
+	}
+
+	err = request_threaded_irq(ab->irq_num_link_status, NULL,
+		ab8500_usb_v20_irq,
+		IRQF_NO_SUSPEND | IRQF_SHARED,
+		"usb-link-status", ab);
+	if (err < 0) {
+		dev_err(ab->dev,
+			"request_irq failed for link status irq\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static int __devinit ab8500_usb_probe(struct platform_device *pdev)
+{
+	struct ab8500_usb	*ab;
+	int err;
+	int rev;
+
+	rev = abx500_get_chip_id(&pdev->dev);
+	if (rev < 0) {
+		dev_err(&pdev->dev, "Chip id read failed\n");
+		return rev;
+	} else if (rev < 0x10) {
+		dev_err(&pdev->dev, "Unsupported AB8500 chip\n");
+		return -ENODEV;
+	}
+
+	ab = kzalloc(sizeof *ab, GFP_KERNEL);
+	if (!ab)
+		return -ENOMEM;
+
+	ab->dev			= &pdev->dev;
+	ab->rev			= rev;
+	ab->otg.dev		= ab->dev;
+	ab->otg.label		= "ab8500";
+	ab->otg.state		= OTG_STATE_UNDEFINED;
+	ab->otg.set_host	= ab8500_usb_set_host;
+	ab->otg.set_peripheral	= ab8500_usb_set_peripheral;
+	ab->otg.set_suspend	= ab8500_usb_set_suspend;
+	ab->otg.set_power	= ab8500_usb_set_power;
+
+	platform_set_drvdata(pdev, ab);
+
+	BLOCKING_INIT_NOTIFIER_HEAD(&ab->otg.notifier);
+
+	/* v1: Wait for link status to become stable.
+	 * all: Updates form set_host and set_peripheral as they are atomic.
+	 */
+	INIT_DELAYED_WORK(&ab->dwork, ab8500_usb_delayed_work);
+
+	/* all: Disable phy when called from set_host and set_peripheral */
+	INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work);
+
+	if (ab->rev < 0x20) {
+		err = ab8500_usb_v1x_res_setup(pdev, ab);
+		ab->link_status_wait = AB8500_V1x_LINK_STAT_WAIT;
+	} else {
+		err = ab8500_usb_v2_res_setup(pdev, ab);
+	}
+
+	if (err < 0)
+		goto fail0;
+
+	err = otg_set_transceiver(&ab->otg);
+	if (err) {
+		dev_err(&pdev->dev, "Can't register transceiver\n");
+		goto fail1;
+	}
+
+	dev_info(&pdev->dev, "AB8500 usb driver initialized\n");
+
+	return 0;
+fail1:
+	ab8500_usb_irq_free(ab);
+fail0:
+	kfree(ab);
+	return err;
+}
+
+static int __devexit ab8500_usb_remove(struct platform_device *pdev)
+{
+	struct ab8500_usb *ab = platform_get_drvdata(pdev);
+
+	ab8500_usb_irq_free(ab);
+
+	cancel_delayed_work_sync(&ab->dwork);
+
+	cancel_work_sync(&ab->phy_dis_work);
+
+	otg_set_transceiver(NULL);
+
+	ab8500_usb_host_phy_dis(ab);
+	ab8500_usb_peri_phy_dis(ab);
+
+	platform_set_drvdata(pdev, NULL);
+
+	kfree(ab);
+
+	return 0;
+}
+
+static struct platform_driver ab8500_usb_driver = {
+	.probe		= ab8500_usb_probe,
+	.remove		= __devexit_p(ab8500_usb_remove),
+	.driver		= {
+		.name	= "ab8500-usb",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init ab8500_usb_init(void)
+{
+	return platform_driver_register(&ab8500_usb_driver);
+}
+subsys_initcall(ab8500_usb_init);
+
+static void __exit ab8500_usb_exit(void)
+{
+	platform_driver_unregister(&ab8500_usb_driver);
+}
+module_exit(ab8500_usb_exit);
+
+MODULE_ALIAS("platform:ab8500_usb");
+MODULE_AUTHOR("ST-Ericsson AB");
+MODULE_DESCRIPTION("AB8500 usb transceiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 4569694..e00fa1b 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1247,7 +1247,7 @@
 	isp->timer.data = 0;
 	set_bit(WORK_STOP, &isp->todo);
 	del_timer_sync(&isp->timer);
-	flush_scheduled_work();
+	flush_work_sync(&isp->work);
 
 	put_device(&i2c->dev);
 	the_transceiver = NULL;
diff --git a/drivers/usb/otg/msm72k_otg.c b/drivers/usb/otg/msm72k_otg.c
new file mode 100644
index 0000000..1cd52ed
--- /dev/null
+++ b/drivers/usb/otg/msm72k_otg.c
@@ -0,0 +1,1125 @@
+/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/usb/msm_hsusb_hw.h>
+
+#include <mach/clk.h>
+
+#define MSM_USB_BASE	(motg->regs)
+#define DRIVER_NAME	"msm_otg"
+
+#define ULPI_IO_TIMEOUT_USEC	(10 * 1000)
+static int ulpi_read(struct otg_transceiver *otg, u32 reg)
+{
+	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
+	int cnt = 0;
+
+	/* initiate read operation */
+	writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while (cnt < ULPI_IO_TIMEOUT_USEC) {
+		if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
+			break;
+		udelay(1);
+		cnt++;
+	}
+
+	if (cnt >= ULPI_IO_TIMEOUT_USEC) {
+		dev_err(otg->dev, "ulpi_read: timeout %08x\n",
+			readl(USB_ULPI_VIEWPORT));
+		return -ETIMEDOUT;
+	}
+	return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
+}
+
+static int ulpi_write(struct otg_transceiver *otg, u32 val, u32 reg)
+{
+	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
+	int cnt = 0;
+
+	/* initiate write operation */
+	writel(ULPI_RUN | ULPI_WRITE |
+	       ULPI_ADDR(reg) | ULPI_DATA(val),
+	       USB_ULPI_VIEWPORT);
+
+	/* wait for completion */
+	while (cnt < ULPI_IO_TIMEOUT_USEC) {
+		if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
+			break;
+		udelay(1);
+		cnt++;
+	}
+
+	if (cnt >= ULPI_IO_TIMEOUT_USEC) {
+		dev_err(otg->dev, "ulpi_write: timeout\n");
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static struct otg_io_access_ops msm_otg_io_ops = {
+	.read = ulpi_read,
+	.write = ulpi_write,
+};
+
+static void ulpi_init(struct msm_otg *motg)
+{
+	struct msm_otg_platform_data *pdata = motg->pdata;
+	int *seq = pdata->phy_init_seq;
+
+	if (!seq)
+		return;
+
+	while (seq[0] >= 0) {
+		dev_vdbg(motg->otg.dev, "ulpi: write 0x%02x to 0x%02x\n",
+				seq[0], seq[1]);
+		ulpi_write(&motg->otg, seq[0], seq[1]);
+		seq += 2;
+	}
+}
+
+static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
+{
+	int ret;
+
+	if (assert) {
+		ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
+		if (ret)
+			dev_err(motg->otg.dev, "usb hs_clk assert failed\n");
+	} else {
+		ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
+		if (ret)
+			dev_err(motg->otg.dev, "usb hs_clk deassert failed\n");
+	}
+	return ret;
+}
+
+static int msm_otg_phy_clk_reset(struct msm_otg *motg)
+{
+	int ret;
+
+	ret = clk_reset(motg->phy_reset_clk, CLK_RESET_ASSERT);
+	if (ret) {
+		dev_err(motg->otg.dev, "usb phy clk assert failed\n");
+		return ret;
+	}
+	usleep_range(10000, 12000);
+	ret = clk_reset(motg->phy_reset_clk, CLK_RESET_DEASSERT);
+	if (ret)
+		dev_err(motg->otg.dev, "usb phy clk deassert failed\n");
+	return ret;
+}
+
+static int msm_otg_phy_reset(struct msm_otg *motg)
+{
+	u32 val;
+	int ret;
+	int retries;
+
+	ret = msm_otg_link_clk_reset(motg, 1);
+	if (ret)
+		return ret;
+	ret = msm_otg_phy_clk_reset(motg);
+	if (ret)
+		return ret;
+	ret = msm_otg_link_clk_reset(motg, 0);
+	if (ret)
+		return ret;
+
+	val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
+	writel(val | PORTSC_PTS_ULPI, USB_PORTSC);
+
+	for (retries = 3; retries > 0; retries--) {
+		ret = ulpi_write(&motg->otg, ULPI_FUNC_CTRL_SUSPENDM,
+				ULPI_CLR(ULPI_FUNC_CTRL));
+		if (!ret)
+			break;
+		ret = msm_otg_phy_clk_reset(motg);
+		if (ret)
+			return ret;
+	}
+	if (!retries)
+		return -ETIMEDOUT;
+
+	/* This reset calibrates the phy, if the above write succeeded */
+	ret = msm_otg_phy_clk_reset(motg);
+	if (ret)
+		return ret;
+
+	for (retries = 3; retries > 0; retries--) {
+		ret = ulpi_read(&motg->otg, ULPI_DEBUG);
+		if (ret != -ETIMEDOUT)
+			break;
+		ret = msm_otg_phy_clk_reset(motg);
+		if (ret)
+			return ret;
+	}
+	if (!retries)
+		return -ETIMEDOUT;
+
+	dev_info(motg->otg.dev, "phy_reset: success\n");
+	return 0;
+}
+
+#define LINK_RESET_TIMEOUT_USEC		(250 * 1000)
+static int msm_otg_reset(struct otg_transceiver *otg)
+{
+	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = motg->pdata;
+	int cnt = 0;
+	int ret;
+	u32 val = 0;
+	u32 ulpi_val = 0;
+
+	ret = msm_otg_phy_reset(motg);
+	if (ret) {
+		dev_err(otg->dev, "phy_reset failed\n");
+		return ret;
+	}
+
+	ulpi_init(motg);
+
+	writel(USBCMD_RESET, USB_USBCMD);
+	while (cnt < LINK_RESET_TIMEOUT_USEC) {
+		if (!(readl(USB_USBCMD) & USBCMD_RESET))
+			break;
+		udelay(1);
+		cnt++;
+	}
+	if (cnt >= LINK_RESET_TIMEOUT_USEC)
+		return -ETIMEDOUT;
+
+	/* select ULPI phy */
+	writel(0x80000000, USB_PORTSC);
+
+	msleep(100);
+
+	writel(0x0, USB_AHBBURST);
+	writel(0x00, USB_AHBMODE);
+
+	if (pdata->otg_control == OTG_PHY_CONTROL) {
+		val = readl(USB_OTGSC);
+		if (pdata->mode == USB_OTG) {
+			ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
+			val |= OTGSC_IDIE | OTGSC_BSVIE;
+		} else if (pdata->mode == USB_PERIPHERAL) {
+			ulpi_val = ULPI_INT_SESS_VALID;
+			val |= OTGSC_BSVIE;
+		}
+		writel(val, USB_OTGSC);
+		ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_RISE);
+		ulpi_write(otg, ulpi_val, ULPI_USB_INT_EN_FALL);
+	}
+
+	return 0;
+}
+
+#define PHY_SUSPEND_TIMEOUT_USEC	(500 * 1000)
+static int msm_otg_suspend(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+	struct usb_bus *bus = otg->host;
+	struct msm_otg_platform_data *pdata = motg->pdata;
+	int cnt = 0;
+
+	if (atomic_read(&motg->in_lpm))
+		return 0;
+
+	disable_irq(motg->irq);
+	/*
+	 * Interrupt Latch Register auto-clear feature is not present
+	 * in all PHY versions. Latch register is clear on read type.
+	 * Clear latch register to avoid spurious wakeup from
+	 * low power mode (LPM).
+	 */
+	ulpi_read(otg, 0x14);
+
+	/*
+	 * PHY comparators are disabled when PHY enters into low power
+	 * mode (LPM). Keep PHY comparators ON in LPM only when we expect
+	 * VBUS/Id notifications from USB PHY. Otherwise turn off USB
+	 * PHY comparators. This save significant amount of power.
+	 */
+	if (pdata->otg_control == OTG_PHY_CONTROL)
+		ulpi_write(otg, 0x01, 0x30);
+
+	/*
+	 * PLL is not turned off when PHY enters into low power mode (LPM).
+	 * Disable PLL for maximum power savings.
+	 */
+	ulpi_write(otg, 0x08, 0x09);
+
+	/*
+	 * PHY may take some time or even fail to enter into low power
+	 * mode (LPM). Hence poll for 500 msec and reset the PHY and link
+	 * in failure case.
+	 */
+	writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+	while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
+		if (readl(USB_PORTSC) & PORTSC_PHCD)
+			break;
+		udelay(1);
+		cnt++;
+	}
+
+	if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) {
+		dev_err(otg->dev, "Unable to suspend PHY\n");
+		msm_otg_reset(otg);
+		enable_irq(motg->irq);
+		return -ETIMEDOUT;
+	}
+
+	/*
+	 * PHY has capability to generate interrupt asynchronously in low
+	 * power mode (LPM). This interrupt is level triggered. So USB IRQ
+	 * line must be disabled till async interrupt enable bit is cleared
+	 * in USBCMD register. Assert STP (ULPI interface STOP signal) to
+	 * block data communication from PHY.
+	 */
+	writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
+
+	clk_disable(motg->pclk);
+	clk_disable(motg->clk);
+	if (motg->core_clk)
+		clk_disable(motg->core_clk);
+
+	if (device_may_wakeup(otg->dev))
+		enable_irq_wake(motg->irq);
+	if (bus)
+		clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
+
+	atomic_set(&motg->in_lpm, 1);
+	enable_irq(motg->irq);
+
+	dev_info(otg->dev, "USB in low power mode\n");
+
+	return 0;
+}
+
+#define PHY_RESUME_TIMEOUT_USEC	(100 * 1000)
+static int msm_otg_resume(struct msm_otg *motg)
+{
+	struct otg_transceiver *otg = &motg->otg;
+	struct usb_bus *bus = otg->host;
+	int cnt = 0;
+	unsigned temp;
+
+	if (!atomic_read(&motg->in_lpm))
+		return 0;
+
+	clk_enable(motg->pclk);
+	clk_enable(motg->clk);
+	if (motg->core_clk)
+		clk_enable(motg->core_clk);
+
+	temp = readl(USB_USBCMD);
+	temp &= ~ASYNC_INTR_CTRL;
+	temp &= ~ULPI_STP_CTRL;
+	writel(temp, USB_USBCMD);
+
+	/*
+	 * PHY comes out of low power mode (LPM) in case of wakeup
+	 * from asynchronous interrupt.
+	 */
+	if (!(readl(USB_PORTSC) & PORTSC_PHCD))
+		goto skip_phy_resume;
+
+	writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
+	while (cnt < PHY_RESUME_TIMEOUT_USEC) {
+		if (!(readl(USB_PORTSC) & PORTSC_PHCD))
+			break;
+		udelay(1);
+		cnt++;
+	}
+
+	if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
+		/*
+		 * This is a fatal error. Reset the link and
+		 * PHY. USB state can not be restored. Re-insertion
+		 * of USB cable is the only way to get USB working.
+		 */
+		dev_err(otg->dev, "Unable to resume USB."
+				"Re-plugin the cable\n");
+		msm_otg_reset(otg);
+	}
+
+skip_phy_resume:
+	if (device_may_wakeup(otg->dev))
+		disable_irq_wake(motg->irq);
+	if (bus)
+		set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
+
+	if (motg->async_int) {
+		motg->async_int = 0;
+		pm_runtime_put(otg->dev);
+		enable_irq(motg->irq);
+	}
+
+	atomic_set(&motg->in_lpm, 0);
+
+	dev_info(otg->dev, "USB exited from low power mode\n");
+
+	return 0;
+}
+
+static void msm_otg_start_host(struct otg_transceiver *otg, int on)
+{
+	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = motg->pdata;
+	struct usb_hcd *hcd;
+
+	if (!otg->host)
+		return;
+
+	hcd = bus_to_hcd(otg->host);
+
+	if (on) {
+		dev_dbg(otg->dev, "host on\n");
+
+		if (pdata->vbus_power)
+			pdata->vbus_power(1);
+		/*
+		 * Some boards have a switch cotrolled by gpio
+		 * to enable/disable internal HUB. Enable internal
+		 * HUB before kicking the host.
+		 */
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(OTG_STATE_A_HOST);
+#ifdef CONFIG_USB
+		usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+#endif
+	} else {
+		dev_dbg(otg->dev, "host off\n");
+
+#ifdef CONFIG_USB
+		usb_remove_hcd(hcd);
+#endif
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(OTG_STATE_UNDEFINED);
+		if (pdata->vbus_power)
+			pdata->vbus_power(0);
+	}
+}
+
+static int msm_otg_set_host(struct otg_transceiver *otg, struct usb_bus *host)
+{
+	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
+	struct usb_hcd *hcd;
+
+	/*
+	 * Fail host registration if this board can support
+	 * only peripheral configuration.
+	 */
+	if (motg->pdata->mode == USB_PERIPHERAL) {
+		dev_info(otg->dev, "Host mode is not supported\n");
+		return -ENODEV;
+	}
+
+	if (!host) {
+		if (otg->state == OTG_STATE_A_HOST) {
+			pm_runtime_get_sync(otg->dev);
+			msm_otg_start_host(otg, 0);
+			otg->host = NULL;
+			otg->state = OTG_STATE_UNDEFINED;
+			schedule_work(&motg->sm_work);
+		} else {
+			otg->host = NULL;
+		}
+
+		return 0;
+	}
+
+	hcd = bus_to_hcd(host);
+	hcd->power_budget = motg->pdata->power_budget;
+
+	otg->host = host;
+	dev_dbg(otg->dev, "host driver registered w/ tranceiver\n");
+
+	/*
+	 * Kick the state machine work, if peripheral is not supported
+	 * or peripheral is already registered with us.
+	 */
+	if (motg->pdata->mode == USB_HOST || otg->gadget) {
+		pm_runtime_get_sync(otg->dev);
+		schedule_work(&motg->sm_work);
+	}
+
+	return 0;
+}
+
+static void msm_otg_start_peripheral(struct otg_transceiver *otg, int on)
+{
+	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
+	struct msm_otg_platform_data *pdata = motg->pdata;
+
+	if (!otg->gadget)
+		return;
+
+	if (on) {
+		dev_dbg(otg->dev, "gadget on\n");
+		/*
+		 * Some boards have a switch cotrolled by gpio
+		 * to enable/disable internal HUB. Disable internal
+		 * HUB before kicking the gadget.
+		 */
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(OTG_STATE_B_PERIPHERAL);
+		usb_gadget_vbus_connect(otg->gadget);
+	} else {
+		dev_dbg(otg->dev, "gadget off\n");
+		usb_gadget_vbus_disconnect(otg->gadget);
+		if (pdata->setup_gpio)
+			pdata->setup_gpio(OTG_STATE_UNDEFINED);
+	}
+
+}
+
+static int msm_otg_set_peripheral(struct otg_transceiver *otg,
+			struct usb_gadget *gadget)
+{
+	struct msm_otg *motg = container_of(otg, struct msm_otg, otg);
+
+	/*
+	 * Fail peripheral registration if this board can support
+	 * only host configuration.
+	 */
+	if (motg->pdata->mode == USB_HOST) {
+		dev_info(otg->dev, "Peripheral mode is not supported\n");
+		return -ENODEV;
+	}
+
+	if (!gadget) {
+		if (otg->state == OTG_STATE_B_PERIPHERAL) {
+			pm_runtime_get_sync(otg->dev);
+			msm_otg_start_peripheral(otg, 0);
+			otg->gadget = NULL;
+			otg->state = OTG_STATE_UNDEFINED;
+			schedule_work(&motg->sm_work);
+		} else {
+			otg->gadget = NULL;
+		}
+
+		return 0;
+	}
+	otg->gadget = gadget;
+	dev_dbg(otg->dev, "peripheral driver registered w/ tranceiver\n");
+
+	/*
+	 * Kick the state machine work, if host is not supported
+	 * or host is already registered with us.
+	 */
+	if (motg->pdata->mode == USB_PERIPHERAL || otg->host) {
+		pm_runtime_get_sync(otg->dev);
+		schedule_work(&motg->sm_work);
+	}
+
+	return 0;
+}
+
+/*
+ * We support OTG, Peripheral only and Host only configurations. In case
+ * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
+ * via Id pin status or user request (debugfs). Id/BSV interrupts are not
+ * enabled when switch is controlled by user and default mode is supplied
+ * by board file, which can be changed by userspace later.
+ */
+static void msm_otg_init_sm(struct msm_otg *motg)
+{
+	struct msm_otg_platform_data *pdata = motg->pdata;
+	u32 otgsc = readl(USB_OTGSC);
+
+	switch (pdata->mode) {
+	case USB_OTG:
+		if (pdata->otg_control == OTG_PHY_CONTROL) {
+			if (otgsc & OTGSC_ID)
+				set_bit(ID, &motg->inputs);
+			else
+				clear_bit(ID, &motg->inputs);
+
+			if (otgsc & OTGSC_BSV)
+				set_bit(B_SESS_VLD, &motg->inputs);
+			else
+				clear_bit(B_SESS_VLD, &motg->inputs);
+		} else if (pdata->otg_control == OTG_USER_CONTROL) {
+			if (pdata->default_mode == USB_HOST) {
+				clear_bit(ID, &motg->inputs);
+			} else if (pdata->default_mode == USB_PERIPHERAL) {
+				set_bit(ID, &motg->inputs);
+				set_bit(B_SESS_VLD, &motg->inputs);
+			} else {
+				set_bit(ID, &motg->inputs);
+				clear_bit(B_SESS_VLD, &motg->inputs);
+			}
+		}
+		break;
+	case USB_HOST:
+		clear_bit(ID, &motg->inputs);
+		break;
+	case USB_PERIPHERAL:
+		set_bit(ID, &motg->inputs);
+		if (otgsc & OTGSC_BSV)
+			set_bit(B_SESS_VLD, &motg->inputs);
+		else
+			clear_bit(B_SESS_VLD, &motg->inputs);
+		break;
+	default:
+		break;
+	}
+}
+
+static void msm_otg_sm_work(struct work_struct *w)
+{
+	struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
+	struct otg_transceiver *otg = &motg->otg;
+
+	switch (otg->state) {
+	case OTG_STATE_UNDEFINED:
+		dev_dbg(otg->dev, "OTG_STATE_UNDEFINED state\n");
+		msm_otg_reset(otg);
+		msm_otg_init_sm(motg);
+		otg->state = OTG_STATE_B_IDLE;
+		/* FALL THROUGH */
+	case OTG_STATE_B_IDLE:
+		dev_dbg(otg->dev, "OTG_STATE_B_IDLE state\n");
+		if (!test_bit(ID, &motg->inputs) && otg->host) {
+			/* disable BSV bit */
+			writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
+			msm_otg_start_host(otg, 1);
+			otg->state = OTG_STATE_A_HOST;
+		} else if (test_bit(B_SESS_VLD, &motg->inputs) && otg->gadget) {
+			msm_otg_start_peripheral(otg, 1);
+			otg->state = OTG_STATE_B_PERIPHERAL;
+		}
+		pm_runtime_put_sync(otg->dev);
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		dev_dbg(otg->dev, "OTG_STATE_B_PERIPHERAL state\n");
+		if (!test_bit(B_SESS_VLD, &motg->inputs) ||
+				!test_bit(ID, &motg->inputs)) {
+			msm_otg_start_peripheral(otg, 0);
+			otg->state = OTG_STATE_B_IDLE;
+			msm_otg_reset(otg);
+			schedule_work(w);
+		}
+		break;
+	case OTG_STATE_A_HOST:
+		dev_dbg(otg->dev, "OTG_STATE_A_HOST state\n");
+		if (test_bit(ID, &motg->inputs)) {
+			msm_otg_start_host(otg, 0);
+			otg->state = OTG_STATE_B_IDLE;
+			msm_otg_reset(otg);
+			schedule_work(w);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static irqreturn_t msm_otg_irq(int irq, void *data)
+{
+	struct msm_otg *motg = data;
+	struct otg_transceiver *otg = &motg->otg;
+	u32 otgsc = 0;
+
+	if (atomic_read(&motg->in_lpm)) {
+		disable_irq_nosync(irq);
+		motg->async_int = 1;
+		pm_runtime_get(otg->dev);
+		return IRQ_HANDLED;
+	}
+
+	otgsc = readl(USB_OTGSC);
+	if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
+		return IRQ_NONE;
+
+	if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
+		if (otgsc & OTGSC_ID)
+			set_bit(ID, &motg->inputs);
+		else
+			clear_bit(ID, &motg->inputs);
+		dev_dbg(otg->dev, "ID set/clear\n");
+		pm_runtime_get_noresume(otg->dev);
+	} else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
+		if (otgsc & OTGSC_BSV)
+			set_bit(B_SESS_VLD, &motg->inputs);
+		else
+			clear_bit(B_SESS_VLD, &motg->inputs);
+		dev_dbg(otg->dev, "BSV set/clear\n");
+		pm_runtime_get_noresume(otg->dev);
+	}
+
+	writel(otgsc, USB_OTGSC);
+	schedule_work(&motg->sm_work);
+	return IRQ_HANDLED;
+}
+
+static int msm_otg_mode_show(struct seq_file *s, void *unused)
+{
+	struct msm_otg *motg = s->private;
+	struct otg_transceiver *otg = &motg->otg;
+
+	switch (otg->state) {
+	case OTG_STATE_A_HOST:
+		seq_printf(s, "host\n");
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		seq_printf(s, "peripheral\n");
+		break;
+	default:
+		seq_printf(s, "none\n");
+		break;
+	}
+
+	return 0;
+}
+
+static int msm_otg_mode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, msm_otg_mode_show, inode->i_private);
+}
+
+static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
+				size_t count, loff_t *ppos)
+{
+	struct msm_otg *motg = file->private_data;
+	char buf[16];
+	struct otg_transceiver *otg = &motg->otg;
+	int status = count;
+	enum usb_mode_type req_mode;
+
+	memset(buf, 0x00, sizeof(buf));
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
+		status = -EFAULT;
+		goto out;
+	}
+
+	if (!strncmp(buf, "host", 4)) {
+		req_mode = USB_HOST;
+	} else if (!strncmp(buf, "peripheral", 10)) {
+		req_mode = USB_PERIPHERAL;
+	} else if (!strncmp(buf, "none", 4)) {
+		req_mode = USB_NONE;
+	} else {
+		status = -EINVAL;
+		goto out;
+	}
+
+	switch (req_mode) {
+	case USB_NONE:
+		switch (otg->state) {
+		case OTG_STATE_A_HOST:
+		case OTG_STATE_B_PERIPHERAL:
+			set_bit(ID, &motg->inputs);
+			clear_bit(B_SESS_VLD, &motg->inputs);
+			break;
+		default:
+			goto out;
+		}
+		break;
+	case USB_PERIPHERAL:
+		switch (otg->state) {
+		case OTG_STATE_B_IDLE:
+		case OTG_STATE_A_HOST:
+			set_bit(ID, &motg->inputs);
+			set_bit(B_SESS_VLD, &motg->inputs);
+			break;
+		default:
+			goto out;
+		}
+		break;
+	case USB_HOST:
+		switch (otg->state) {
+		case OTG_STATE_B_IDLE:
+		case OTG_STATE_B_PERIPHERAL:
+			clear_bit(ID, &motg->inputs);
+			break;
+		default:
+			goto out;
+		}
+		break;
+	default:
+		goto out;
+	}
+
+	pm_runtime_get_sync(otg->dev);
+	schedule_work(&motg->sm_work);
+out:
+	return status;
+}
+
+const struct file_operations msm_otg_mode_fops = {
+	.open = msm_otg_mode_open,
+	.read = seq_read,
+	.write = msm_otg_mode_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct dentry *msm_otg_dbg_root;
+static struct dentry *msm_otg_dbg_mode;
+
+static int msm_otg_debugfs_init(struct msm_otg *motg)
+{
+	msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
+
+	if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
+		return -ENODEV;
+
+	msm_otg_dbg_mode = debugfs_create_file("mode", S_IRUGO | S_IWUSR,
+				msm_otg_dbg_root, motg, &msm_otg_mode_fops);
+	if (!msm_otg_dbg_mode) {
+		debugfs_remove(msm_otg_dbg_root);
+		msm_otg_dbg_root = NULL;
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void msm_otg_debugfs_cleanup(void)
+{
+	debugfs_remove(msm_otg_dbg_mode);
+	debugfs_remove(msm_otg_dbg_root);
+}
+
+static int __init msm_otg_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	struct msm_otg *motg;
+	struct otg_transceiver *otg;
+
+	dev_info(&pdev->dev, "msm_otg probe\n");
+	if (!pdev->dev.platform_data) {
+		dev_err(&pdev->dev, "No platform data given. Bailing out\n");
+		return -ENODEV;
+	}
+
+	motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
+	if (!motg) {
+		dev_err(&pdev->dev, "unable to allocate msm_otg\n");
+		return -ENOMEM;
+	}
+
+	motg->pdata = pdev->dev.platform_data;
+	otg = &motg->otg;
+	otg->dev = &pdev->dev;
+
+	motg->phy_reset_clk = clk_get(&pdev->dev, "usb_phy_clk");
+	if (IS_ERR(motg->phy_reset_clk)) {
+		dev_err(&pdev->dev, "failed to get usb_phy_clk\n");
+		ret = PTR_ERR(motg->phy_reset_clk);
+		goto free_motg;
+	}
+
+	motg->clk = clk_get(&pdev->dev, "usb_hs_clk");
+	if (IS_ERR(motg->clk)) {
+		dev_err(&pdev->dev, "failed to get usb_hs_clk\n");
+		ret = PTR_ERR(motg->clk);
+		goto put_phy_reset_clk;
+	}
+
+	motg->pclk = clk_get(&pdev->dev, "usb_hs_pclk");
+	if (IS_ERR(motg->pclk)) {
+		dev_err(&pdev->dev, "failed to get usb_hs_pclk\n");
+		ret = PTR_ERR(motg->pclk);
+		goto put_clk;
+	}
+
+	/*
+	 * USB core clock is not present on all MSM chips. This
+	 * clock is introduced to remove the dependency on AXI
+	 * bus frequency.
+	 */
+	motg->core_clk = clk_get(&pdev->dev, "usb_hs_core_clk");
+	if (IS_ERR(motg->core_clk))
+		motg->core_clk = NULL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get platform resource mem\n");
+		ret = -ENODEV;
+		goto put_core_clk;
+	}
+
+	motg->regs = ioremap(res->start, resource_size(res));
+	if (!motg->regs) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		ret = -ENOMEM;
+		goto put_core_clk;
+	}
+	dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
+
+	motg->irq = platform_get_irq(pdev, 0);
+	if (!motg->irq) {
+		dev_err(&pdev->dev, "platform_get_irq failed\n");
+		ret = -ENODEV;
+		goto free_regs;
+	}
+
+	clk_enable(motg->clk);
+	clk_enable(motg->pclk);
+	if (motg->core_clk)
+		clk_enable(motg->core_clk);
+
+	writel(0, USB_USBINTR);
+	writel(0, USB_OTGSC);
+
+	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+	ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
+					"msm_otg", motg);
+	if (ret) {
+		dev_err(&pdev->dev, "request irq failed\n");
+		goto disable_clks;
+	}
+
+	otg->init = msm_otg_reset;
+	otg->set_host = msm_otg_set_host;
+	otg->set_peripheral = msm_otg_set_peripheral;
+
+	otg->io_ops = &msm_otg_io_ops;
+
+	ret = otg_set_transceiver(&motg->otg);
+	if (ret) {
+		dev_err(&pdev->dev, "otg_set_transceiver failed\n");
+		goto free_irq;
+	}
+
+	platform_set_drvdata(pdev, motg);
+	device_init_wakeup(&pdev->dev, 1);
+
+	if (motg->pdata->mode == USB_OTG &&
+			motg->pdata->otg_control == OTG_USER_CONTROL) {
+		ret = msm_otg_debugfs_init(motg);
+		if (ret)
+			dev_dbg(&pdev->dev, "mode debugfs file is"
+					"not available\n");
+	}
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+free_irq:
+	free_irq(motg->irq, motg);
+disable_clks:
+	clk_disable(motg->pclk);
+	clk_disable(motg->clk);
+free_regs:
+	iounmap(motg->regs);
+put_core_clk:
+	if (motg->core_clk)
+		clk_put(motg->core_clk);
+	clk_put(motg->pclk);
+put_clk:
+	clk_put(motg->clk);
+put_phy_reset_clk:
+	clk_put(motg->phy_reset_clk);
+free_motg:
+	kfree(motg);
+	return ret;
+}
+
+static int __devexit msm_otg_remove(struct platform_device *pdev)
+{
+	struct msm_otg *motg = platform_get_drvdata(pdev);
+	struct otg_transceiver *otg = &motg->otg;
+	int cnt = 0;
+
+	if (otg->host || otg->gadget)
+		return -EBUSY;
+
+	msm_otg_debugfs_cleanup();
+	cancel_work_sync(&motg->sm_work);
+
+	msm_otg_resume(motg);
+
+	device_init_wakeup(&pdev->dev, 0);
+	pm_runtime_disable(&pdev->dev);
+
+	otg_set_transceiver(NULL);
+	free_irq(motg->irq, motg);
+
+	/*
+	 * Put PHY in low power mode.
+	 */
+	ulpi_read(otg, 0x14);
+	ulpi_write(otg, 0x08, 0x09);
+
+	writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
+	while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
+		if (readl(USB_PORTSC) & PORTSC_PHCD)
+			break;
+		udelay(1);
+		cnt++;
+	}
+	if (cnt >= PHY_SUSPEND_TIMEOUT_USEC)
+		dev_err(otg->dev, "Unable to suspend PHY\n");
+
+	clk_disable(motg->pclk);
+	clk_disable(motg->clk);
+	if (motg->core_clk)
+		clk_disable(motg->core_clk);
+
+	iounmap(motg->regs);
+	pm_runtime_set_suspended(&pdev->dev);
+
+	clk_put(motg->phy_reset_clk);
+	clk_put(motg->pclk);
+	clk_put(motg->clk);
+	if (motg->core_clk)
+		clk_put(motg->core_clk);
+
+	kfree(motg);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int msm_otg_runtime_idle(struct device *dev)
+{
+	struct msm_otg *motg = dev_get_drvdata(dev);
+	struct otg_transceiver *otg = &motg->otg;
+
+	dev_dbg(dev, "OTG runtime idle\n");
+
+	/*
+	 * It is observed some times that a spurious interrupt
+	 * comes when PHY is put into LPM immediately after PHY reset.
+	 * This 1 sec delay also prevents entering into LPM immediately
+	 * after asynchronous interrupt.
+	 */
+	if (otg->state != OTG_STATE_UNDEFINED)
+		pm_schedule_suspend(dev, 1000);
+
+	return -EAGAIN;
+}
+
+static int msm_otg_runtime_suspend(struct device *dev)
+{
+	struct msm_otg *motg = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "OTG runtime suspend\n");
+	return msm_otg_suspend(motg);
+}
+
+static int msm_otg_runtime_resume(struct device *dev)
+{
+	struct msm_otg *motg = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "OTG runtime resume\n");
+	return msm_otg_resume(motg);
+}
+#else
+#define msm_otg_runtime_idle	NULL
+#define msm_otg_runtime_suspend	NULL
+#define msm_otg_runtime_resume	NULL
+#endif
+
+#ifdef CONFIG_PM
+static int msm_otg_pm_suspend(struct device *dev)
+{
+	struct msm_otg *motg = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "OTG PM suspend\n");
+	return msm_otg_suspend(motg);
+}
+
+static int msm_otg_pm_resume(struct device *dev)
+{
+	struct msm_otg *motg = dev_get_drvdata(dev);
+	int ret;
+
+	dev_dbg(dev, "OTG PM resume\n");
+
+	ret = msm_otg_resume(motg);
+	if (ret)
+		return ret;
+
+	/*
+	 * Runtime PM Documentation recommends bringing the
+	 * device to full powered state upon resume.
+	 */
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	return 0;
+}
+#else
+#define msm_otg_pm_suspend	NULL
+#define msm_otg_pm_resume	NULL
+#endif
+
+static const struct dev_pm_ops msm_otg_dev_pm_ops = {
+	.runtime_suspend = msm_otg_runtime_suspend,
+	.runtime_resume  = msm_otg_runtime_resume,
+	.runtime_idle    = msm_otg_runtime_idle,
+	.suspend         = msm_otg_pm_suspend,
+	.resume          = msm_otg_pm_resume,
+};
+
+static struct platform_driver msm_otg_driver = {
+	.remove = __devexit_p(msm_otg_remove),
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_otg_dev_pm_ops,
+	},
+};
+
+static int __init msm_otg_init(void)
+{
+	return platform_driver_probe(&msm_otg_driver, msm_otg_probe);
+}
+
+static void __exit msm_otg_exit(void)
+{
+	platform_driver_unregister(&msm_otg_driver);
+}
+
+module_init(msm_otg_init);
+module_exit(msm_otg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index d335f48..6ca505f 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -678,7 +678,8 @@
 	/* disable complete OTG block */
 	twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
 
-	twl4030_phy_power(twl, 0);
+	if (!twl->asleep)
+		twl4030_phy_power(twl, 0);
 	regulator_put(twl->usb1v5);
 	regulator_put(twl->usb1v8);
 	regulator_put(twl->usb3v1);
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
new file mode 100644
index 0000000..28f7701
--- /dev/null
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -0,0 +1,493 @@
+/*
+ * twl6030_usb - TWL6030 USB transceiver, talking to OMAP OTG driver.
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Hema HK <hemahk@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/usb/otg.h>
+#include <linux/i2c/twl.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+
+/* usb register definitions */
+#define USB_VENDOR_ID_LSB		0x00
+#define USB_VENDOR_ID_MSB		0x01
+#define USB_PRODUCT_ID_LSB		0x02
+#define USB_PRODUCT_ID_MSB		0x03
+#define USB_VBUS_CTRL_SET		0x04
+#define USB_VBUS_CTRL_CLR		0x05
+#define USB_ID_CTRL_SET			0x06
+#define USB_ID_CTRL_CLR			0x07
+#define USB_VBUS_INT_SRC		0x08
+#define USB_VBUS_INT_LATCH_SET		0x09
+#define USB_VBUS_INT_LATCH_CLR		0x0A
+#define USB_VBUS_INT_EN_LO_SET		0x0B
+#define USB_VBUS_INT_EN_LO_CLR		0x0C
+#define USB_VBUS_INT_EN_HI_SET		0x0D
+#define USB_VBUS_INT_EN_HI_CLR		0x0E
+#define USB_ID_INT_SRC			0x0F
+#define USB_ID_INT_LATCH_SET		0x10
+#define USB_ID_INT_LATCH_CLR		0x11
+
+#define USB_ID_INT_EN_LO_SET		0x12
+#define USB_ID_INT_EN_LO_CLR		0x13
+#define USB_ID_INT_EN_HI_SET		0x14
+#define USB_ID_INT_EN_HI_CLR		0x15
+#define USB_OTG_ADP_CTRL		0x16
+#define USB_OTG_ADP_HIGH		0x17
+#define USB_OTG_ADP_LOW			0x18
+#define USB_OTG_ADP_RISE		0x19
+#define USB_OTG_REVISION		0x1A
+
+/* to be moved to LDO */
+#define TWL6030_MISC2			0xE5
+#define TWL6030_CFG_LDO_PD2		0xF5
+#define TWL6030_BACKUP_REG		0xFA
+
+#define STS_HW_CONDITIONS		0x21
+
+/* In module TWL6030_MODULE_PM_MASTER */
+#define STS_HW_CONDITIONS		0x21
+#define STS_USB_ID			BIT(2)
+
+/* In module TWL6030_MODULE_PM_RECEIVER */
+#define VUSB_CFG_TRANS			0x71
+#define VUSB_CFG_STATE			0x72
+#define VUSB_CFG_VOLTAGE		0x73
+
+/* in module TWL6030_MODULE_MAIN_CHARGE */
+
+#define CHARGERUSB_CTRL1		0x8
+
+#define CONTROLLER_STAT1		0x03
+#define	VBUS_DET			BIT(2)
+
+struct twl6030_usb {
+	struct otg_transceiver	otg;
+	struct device		*dev;
+
+	/* for vbus reporting with irqs disabled */
+	spinlock_t		lock;
+
+	struct regulator		*usb3v3;
+
+	int			irq1;
+	int			irq2;
+	u8			linkstat;
+	u8			asleep;
+	bool			irq_enabled;
+};
+
+#define xceiv_to_twl(x)		container_of((x), struct twl6030_usb, otg);
+
+/*-------------------------------------------------------------------------*/
+
+static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
+						u8 data, u8 address)
+{
+	int ret = 0;
+
+	ret = twl_i2c_write_u8(module, data, address);
+	if (ret < 0)
+		dev_err(twl->dev,
+			"Write[0x%x] Error %d\n", address, ret);
+	return ret;
+}
+
+static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
+{
+	u8 data, ret = 0;
+
+	ret = twl_i2c_read_u8(module, &data, address);
+	if (ret >= 0)
+		ret = data;
+	else
+		dev_err(twl->dev,
+			"readb[0x%x,0x%x] Error %d\n",
+					module, address, ret);
+	return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+static int twl6030_set_phy_clk(struct otg_transceiver *x, int on)
+{
+	struct twl6030_usb *twl;
+	struct device *dev;
+	struct twl4030_usb_data *pdata;
+
+	twl = xceiv_to_twl(x);
+	dev  = twl->dev;
+	pdata = dev->platform_data;
+
+	pdata->phy_set_clock(twl->dev, on);
+
+	return 0;
+}
+
+static int twl6030_phy_init(struct otg_transceiver *x)
+{
+	u8 hw_state;
+	struct twl6030_usb *twl;
+	struct device *dev;
+	struct twl4030_usb_data *pdata;
+
+	twl = xceiv_to_twl(x);
+	dev  = twl->dev;
+	pdata = dev->platform_data;
+
+	regulator_enable(twl->usb3v3);
+
+	hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
+
+	if (hw_state & STS_USB_ID)
+		pdata->phy_power(twl->dev, 1, 1);
+	else
+		pdata->phy_power(twl->dev, 0, 1);
+
+	return 0;
+}
+
+static void twl6030_phy_shutdown(struct otg_transceiver *x)
+{
+	struct twl6030_usb *twl;
+	struct device *dev;
+	struct twl4030_usb_data *pdata;
+
+	twl = xceiv_to_twl(x);
+	dev  = twl->dev;
+	pdata = dev->platform_data;
+	pdata->phy_power(twl->dev, 0, 0);
+	regulator_disable(twl->usb3v3);
+}
+
+static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
+{
+
+	/* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
+	twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
+
+	/* Program CFG_LDO_PD2 register and set VUSB bit */
+	twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_CFG_LDO_PD2);
+
+	/* Program MISC2 register and set bit VUSB_IN_VBAT */
+	twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
+
+	twl->usb3v3 = regulator_get(twl->dev, "vusb");
+	if (IS_ERR(twl->usb3v3))
+		return -ENODEV;
+
+	regulator_enable(twl->usb3v3);
+
+	/* Program the VUSB_CFG_TRANS for ACTIVE state. */
+	twl6030_writeb(twl, TWL_MODULE_PM_RECEIVER, 0x3F,
+						VUSB_CFG_TRANS);
+
+	/* Program the VUSB_CFG_STATE register to ON on all groups. */
+	twl6030_writeb(twl, TWL_MODULE_PM_RECEIVER, 0xE1,
+						VUSB_CFG_STATE);
+
+	/* Program the USB_VBUS_CTRL_SET and set VBUS_ACT_COMP bit */
+	twl6030_writeb(twl, TWL_MODULE_USB, 0x4, USB_VBUS_CTRL_SET);
+
+	/*
+	 * Program the USB_ID_CTRL_SET register to enable GND drive
+	 * and the ID comparators
+	 */
+	twl6030_writeb(twl, TWL_MODULE_USB, 0x14, USB_ID_CTRL_SET);
+
+	return 0;
+}
+
+static ssize_t twl6030_usb_vbus_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct twl6030_usb *twl = dev_get_drvdata(dev);
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&twl->lock, flags);
+
+	switch (twl->linkstat) {
+	case USB_EVENT_VBUS:
+	       ret = snprintf(buf, PAGE_SIZE, "vbus\n");
+	       break;
+	case USB_EVENT_ID:
+	       ret = snprintf(buf, PAGE_SIZE, "id\n");
+	       break;
+	case USB_EVENT_NONE:
+	       ret = snprintf(buf, PAGE_SIZE, "none\n");
+	       break;
+	default:
+	       ret = snprintf(buf, PAGE_SIZE, "UNKNOWN\n");
+	}
+	spin_unlock_irqrestore(&twl->lock, flags);
+
+	return ret;
+}
+static DEVICE_ATTR(vbus, 0444, twl6030_usb_vbus_show, NULL);
+
+static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
+{
+	struct twl6030_usb *twl = _twl;
+	int status;
+	u8 vbus_state, hw_state;
+
+	hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
+
+	vbus_state = twl6030_readb(twl, TWL_MODULE_MAIN_CHARGE,
+						CONTROLLER_STAT1);
+	if (!(hw_state & STS_USB_ID)) {
+		if (vbus_state & VBUS_DET) {
+			status = USB_EVENT_VBUS;
+			twl->otg.default_a = false;
+			twl->otg.state = OTG_STATE_B_IDLE;
+		} else {
+			status = USB_EVENT_NONE;
+		}
+		if (status >= 0) {
+			twl->linkstat = status;
+			blocking_notifier_call_chain(&twl->otg.notifier,
+						status, twl->otg.gadget);
+		}
+	}
+	sysfs_notify(&twl->dev->kobj, NULL, "vbus");
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
+{
+	struct twl6030_usb *twl = _twl;
+	int status = USB_EVENT_NONE;
+	u8 hw_state;
+
+	hw_state = twl6030_readb(twl, TWL6030_MODULE_ID0, STS_HW_CONDITIONS);
+
+	if (hw_state & STS_USB_ID) {
+
+		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
+		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
+								0x10);
+		status = USB_EVENT_ID;
+		twl->otg.default_a = true;
+		twl->otg.state = OTG_STATE_A_IDLE;
+		blocking_notifier_call_chain(&twl->otg.notifier, status,
+							twl->otg.gadget);
+	} else  {
+		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
+								0x10);
+		twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
+								0x1);
+	}
+	twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+	twl->linkstat = status;
+
+	return IRQ_HANDLED;
+}
+
+static int twl6030_set_peripheral(struct otg_transceiver *x,
+		struct usb_gadget *gadget)
+{
+	struct twl6030_usb *twl;
+
+	if (!x)
+		return -ENODEV;
+
+	twl = xceiv_to_twl(x);
+	twl->otg.gadget = gadget;
+	if (!gadget)
+		twl->otg.state = OTG_STATE_UNDEFINED;
+
+	return 0;
+}
+
+static int twl6030_enable_irq(struct otg_transceiver *x)
+{
+	struct twl6030_usb *twl = xceiv_to_twl(x);
+
+	twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+	twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
+	twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
+
+	twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
+				REG_INT_MSK_LINE_C);
+	twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
+				REG_INT_MSK_STS_C);
+	twl6030_usb_irq(twl->irq2, twl);
+	twl6030_usbotg_irq(twl->irq1, twl);
+
+	return 0;
+}
+
+static int twl6030_set_vbus(struct otg_transceiver *x, bool enabled)
+{
+	struct twl6030_usb *twl = xceiv_to_twl(x);
+
+	/*
+	 * Start driving VBUS. Set OPA_MODE bit in CHARGERUSB_CTRL1
+	 * register. This enables boost mode.
+	 */
+	if (enabled)
+		twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x40,
+						CHARGERUSB_CTRL1);
+	 else
+		twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x00,
+						CHARGERUSB_CTRL1);
+	return 0;
+}
+
+static int twl6030_set_host(struct otg_transceiver *x, struct usb_bus *host)
+{
+	struct twl6030_usb *twl;
+
+	if (!x)
+		return -ENODEV;
+
+	twl = xceiv_to_twl(x);
+	twl->otg.host = host;
+	if (!host)
+		twl->otg.state = OTG_STATE_UNDEFINED;
+	return 0;
+}
+
+static int __devinit twl6030_usb_probe(struct platform_device *pdev)
+{
+	struct twl6030_usb	*twl;
+	int			status, err;
+	struct twl4030_usb_data *pdata;
+	struct device *dev = &pdev->dev;
+	pdata = dev->platform_data;
+
+	twl = kzalloc(sizeof *twl, GFP_KERNEL);
+	if (!twl)
+		return -ENOMEM;
+
+	twl->dev		= &pdev->dev;
+	twl->irq1		= platform_get_irq(pdev, 0);
+	twl->irq2		= platform_get_irq(pdev, 1);
+	twl->otg.dev		= twl->dev;
+	twl->otg.label		= "twl6030";
+	twl->otg.set_host	= twl6030_set_host;
+	twl->otg.set_peripheral	= twl6030_set_peripheral;
+	twl->otg.set_vbus	= twl6030_set_vbus;
+	twl->otg.init		= twl6030_phy_init;
+	twl->otg.shutdown	= twl6030_phy_shutdown;
+
+	/* init spinlock for workqueue */
+	spin_lock_init(&twl->lock);
+
+	err = twl6030_usb_ldo_init(twl);
+	if (err) {
+		dev_err(&pdev->dev, "ldo init failed\n");
+		kfree(twl);
+		return err;
+	}
+	otg_set_transceiver(&twl->otg);
+
+	platform_set_drvdata(pdev, twl);
+	if (device_create_file(&pdev->dev, &dev_attr_vbus))
+		dev_warn(&pdev->dev, "could not create sysfs file\n");
+
+	BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+
+	twl->irq_enabled = true;
+	status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
+			IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+			"twl6030_usb", twl);
+	if (status < 0) {
+		dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
+			twl->irq1, status);
+		device_remove_file(twl->dev, &dev_attr_vbus);
+		kfree(twl);
+		return status;
+	}
+
+	status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
+			IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+			"twl6030_usb", twl);
+	if (status < 0) {
+		dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
+			twl->irq2, status);
+		free_irq(twl->irq1, twl);
+		device_remove_file(twl->dev, &dev_attr_vbus);
+		kfree(twl);
+		return status;
+	}
+
+	pdata->phy_init(dev);
+	twl6030_enable_irq(&twl->otg);
+	dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
+
+	return 0;
+}
+
+static int __exit twl6030_usb_remove(struct platform_device *pdev)
+{
+	struct twl6030_usb *twl = platform_get_drvdata(pdev);
+
+	struct twl4030_usb_data *pdata;
+	struct device *dev = &pdev->dev;
+	pdata = dev->platform_data;
+
+	twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
+		REG_INT_MSK_LINE_C);
+	twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
+			REG_INT_MSK_STS_C);
+	free_irq(twl->irq1, twl);
+	free_irq(twl->irq2, twl);
+	regulator_put(twl->usb3v3);
+	pdata->phy_exit(twl->dev);
+	device_remove_file(twl->dev, &dev_attr_vbus);
+	kfree(twl);
+
+	return 0;
+}
+
+static struct platform_driver twl6030_usb_driver = {
+	.probe		= twl6030_usb_probe,
+	.remove		= __exit_p(twl6030_usb_remove),
+	.driver		= {
+		.name	= "twl6030_usb",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init twl6030_usb_init(void)
+{
+	return platform_driver_register(&twl6030_usb_driver);
+}
+subsys_initcall(twl6030_usb_init);
+
+static void __exit twl6030_usb_exit(void)
+{
+	platform_driver_unregister(&twl6030_usb_driver);
+}
+module_exit(twl6030_usb_exit);
+
+MODULE_ALIAS("platform:twl6030_usb");
+MODULE_AUTHOR("Hema HK <hemahk@ti.com>");
+MODULE_DESCRIPTION("TWL6030 USB transceiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 2dec500..a2668d0 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -75,6 +75,7 @@
 	unsigned long last_dtr_rts;	/* saved modem control outputs */
 	wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
 	char prev_status, diff_status;        /* Used for TIOCMIWAIT */
+	char transmit_empty;	/* If transmitter is empty or not */
 	struct usb_serial_port *port;
 	__u16 interface;	/* FT2232C, FT2232H or FT4232H port interface
 				   (0 for FT232/245) */
@@ -1323,6 +1324,23 @@
 	return 0;
 }
 
+static int get_lsr_info(struct usb_serial_port *port,
+			struct serial_struct __user *retinfo)
+{
+	struct ftdi_private *priv = usb_get_serial_port_data(port);
+	unsigned int result = 0;
+
+	if (!retinfo)
+		return -EFAULT;
+
+	if (priv->transmit_empty)
+		result = TIOCSER_TEMT;
+
+	if (copy_to_user(retinfo, &result, sizeof(unsigned int)))
+		return -EFAULT;
+	return 0;
+}
+
 
 /* Determine type of FTDI chip based on USB config and descriptor. */
 static void ftdi_determine_type(struct usb_serial_port *port)
@@ -1872,6 +1890,12 @@
 			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
 	}
 
+	/* save if the transmitter is empty or not */
+	if (packet[1] & FTDI_RS_TEMT)
+		priv->transmit_empty = 1;
+	else
+		priv->transmit_empty = 0;
+
 	len -= 2;
 	if (!len)
 		return 0;	/* status only */
@@ -2235,6 +2259,9 @@
 			}
 		}
 		return 0;
+	case TIOCSERGETLSR:
+		return get_lsr_info(port, (struct serial_struct __user *)arg);
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ef2977d..7487782 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -615,7 +615,6 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
@@ -989,6 +988,7 @@
 	.set_termios       = usb_wwan_set_termios,
 	.tiocmget          = usb_wwan_tiocmget,
 	.tiocmset          = usb_wwan_tiocmset,
+	.ioctl             = usb_wwan_ioctl,
 	.attach            = usb_wwan_startup,
 	.disconnect        = usb_wwan_disconnect,
 	.release           = usb_wwan_release,
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index e199b0f..5be866b 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -613,9 +613,8 @@
 	dbg("%s(): after buf_clear()", __func__);
 
 	/* cancel scheduled setup */
-	cancel_delayed_work(&priv->delayed_setup_work);
-	cancel_delayed_work(&priv->delayed_write_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&priv->delayed_setup_work);
+	cancel_delayed_work_sync(&priv->delayed_write_work);
 
 	/* shutdown our urbs */
 	dbg("%s(): shutting down urbs", __func__);
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index f5312dd333..8359ec7 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -79,7 +79,6 @@
 	u8 shadowLSR;
 	u8 shadowMSR;
 	wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
-	unsigned short max_packet_size;
 	struct async_icount icount;
 };
 
@@ -464,36 +463,6 @@
 	return -ENOIOCTLCMD;
 }
 
-static void ssu100_set_max_packet_size(struct usb_serial_port *port)
-{
-	struct ssu100_port_private *priv = usb_get_serial_port_data(port);
-	struct usb_serial *serial = port->serial;
-	struct usb_device *udev = serial->dev;
-
-	struct usb_interface *interface = serial->interface;
-	struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
-
-	unsigned num_endpoints;
-	int i;
-	unsigned long flags;
-
-	num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
-	dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
-
-	for (i = 0; i < num_endpoints; i++) {
-		dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1,
-			interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
-		ep_desc = &interface->cur_altsetting->endpoint[i].desc;
-	}
-
-	/* set max packet size based on descriptor */
-	spin_lock_irqsave(&priv->status_lock, flags);
-	priv->max_packet_size = ep_desc->wMaxPacketSize;
-	spin_unlock_irqrestore(&priv->status_lock, flags);
-
-	dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
-}
-
 static int ssu100_attach(struct usb_serial *serial)
 {
 	struct ssu100_port_private *priv;
@@ -511,7 +480,6 @@
 	spin_lock_init(&priv->status_lock);
 	init_waitqueue_head(&priv->delta_msr_wait);
 	usb_set_serial_port_data(port, priv);
-	ssu100_set_max_packet_size(port);
 
 	return ssu100_initdevice(serial->dev);
 }
@@ -641,13 +609,14 @@
 
 }
 
-static int ssu100_process_packet(struct tty_struct *tty,
-				 struct usb_serial_port *port,
-				 struct ssu100_port_private *priv,
-				 char *packet, int len)
+static int ssu100_process_packet(struct urb *urb,
+				 struct tty_struct *tty)
 {
-	int i;
+	struct usb_serial_port *port = urb->context;
+	char *packet = (char *)urb->transfer_buffer;
 	char flag = TTY_NORMAL;
+	u32 len = urb->actual_length;
+	int i;
 	char *ch;
 
 	dbg("%s - port %d", __func__, port->number);
@@ -685,12 +654,8 @@
 static void ssu100_process_read_urb(struct urb *urb)
 {
 	struct usb_serial_port *port = urb->context;
-	struct ssu100_port_private *priv = usb_get_serial_port_data(port);
-	char *data = (char *)urb->transfer_buffer;
 	struct tty_struct *tty;
-	int count = 0;
-	int i;
-	int len;
+	int count;
 
 	dbg("%s", __func__);
 
@@ -698,10 +663,7 @@
 	if (!tty)
 		return;
 
-	for (i = 0; i < urb->actual_length; i += priv->max_packet_size) {
-		len = min_t(int, urb->actual_length - i, priv->max_packet_size);
-		count += ssu100_process_packet(tty, port, priv, &data[i], len);
-	}
+	count = ssu100_process_packet(urb, tty);
 
 	if (count)
 		tty_flip_buffer_push(tty);
@@ -717,8 +679,6 @@
 	.id_table	     = id_table,
 	.usb_driver	     = &ssu100_driver,
 	.num_ports	     = 1,
-	.bulk_in_size        = 256,
-	.bulk_out_size       = 256,
 	.open		     = ssu100_open,
 	.close		     = ssu100_close,
 	.attach              = ssu100_attach,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 2be298a..3ab77c5 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -18,6 +18,8 @@
 extern int usb_wwan_tiocmget(struct tty_struct *tty, struct file *file);
 extern int usb_wwan_tiocmset(struct tty_struct *tty, struct file *file,
 			     unsigned int set, unsigned int clear);
+extern int usb_wwan_ioctl(struct tty_struct *tty, struct file *file,
+			  unsigned int cmd, unsigned long arg);
 extern int usb_wwan_send_setup(struct usb_serial_port *port);
 extern int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
 			  const unsigned char *buf, int count);
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index fbc9467..b004b2a 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -31,8 +31,10 @@
 #include <linux/tty_flip.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
+#include <linux/uaccess.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
+#include <linux/serial.h>
 #include "usb-wwan.h"
 
 static int debug;
@@ -123,6 +125,83 @@
 }
 EXPORT_SYMBOL(usb_wwan_tiocmset);
 
+static int get_serial_info(struct usb_serial_port *port,
+			   struct serial_struct __user *retinfo)
+{
+	struct serial_struct tmp;
+
+	if (!retinfo)
+		return -EFAULT;
+
+	memset(&tmp, 0, sizeof(tmp));
+	tmp.line            = port->serial->minor;
+	tmp.port            = port->number;
+	tmp.baud_base       = tty_get_baud_rate(port->port.tty);
+	tmp.close_delay	    = port->port.close_delay / 10;
+	tmp.closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+				 ASYNC_CLOSING_WAIT_NONE :
+				 port->port.closing_wait / 10;
+
+	if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+		return -EFAULT;
+	return 0;
+}
+
+static int set_serial_info(struct usb_serial_port *port,
+			   struct serial_struct __user *newinfo)
+{
+	struct serial_struct new_serial;
+	unsigned int closing_wait, close_delay;
+	int retval = 0;
+
+	if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
+		return -EFAULT;
+
+	close_delay = new_serial.close_delay * 10;
+	closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+			ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+
+	mutex_lock(&port->port.mutex);
+
+	if (!capable(CAP_SYS_ADMIN)) {
+		if ((close_delay != port->port.close_delay) ||
+		    (closing_wait != port->port.closing_wait))
+			retval = -EPERM;
+		else
+			retval = -EOPNOTSUPP;
+	} else {
+		port->port.close_delay  = close_delay;
+		port->port.closing_wait = closing_wait;
+	}
+
+	mutex_unlock(&port->port.mutex);
+	return retval;
+}
+
+int usb_wwan_ioctl(struct tty_struct *tty, struct file *file,
+		   unsigned int cmd, unsigned long arg)
+{
+	struct usb_serial_port *port = tty->driver_data;
+
+	dbg("%s cmd 0x%04x", __func__, cmd);
+
+	switch (cmd) {
+	case TIOCGSERIAL:
+		return get_serial_info(port,
+				       (struct serial_struct __user *) arg);
+	case TIOCSSERIAL:
+		return set_serial_info(port,
+				       (struct serial_struct __user *) arg);
+	default:
+		break;
+	}
+
+	dbg("%s arg not supported", __func__);
+
+	return -ENOIOCTLCMD;
+}
+EXPORT_SYMBOL(usb_wwan_ioctl);
+
 /* Write */
 int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
 		   const unsigned char *buf, int count)
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 339fac3..23f0dd9 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -49,14 +49,17 @@
 	__u8 cdb[16];	/* XXX: Overflow-checking tools may misunderstand */
 };
 
+/*
+ * Also used for the Read Ready and Write Ready IUs since they have the
+ * same first four bytes
+ */
 struct sense_iu {
 	__u8 iu_id;
 	__u8 rsvd1;
 	__be16 tag;
 	__be16 status_qual;
 	__u8 status;
-	__u8 service_response;
-	__u8 rsvd8[6];
+	__u8 rsvd7[7];
 	__be16 len;
 	__u8 sense[SCSI_SENSE_BUFFERSIZE];
 };
@@ -97,8 +100,8 @@
 };
 
 enum {
-	ALLOC_SENSE_URB		= (1 << 0),
-	SUBMIT_SENSE_URB	= (1 << 1),
+	ALLOC_STATUS_URB	= (1 << 0),
+	SUBMIT_STATUS_URB	= (1 << 1),
 	ALLOC_DATA_IN_URB	= (1 << 2),
 	SUBMIT_DATA_IN_URB	= (1 << 3),
 	ALLOC_DATA_OUT_URB	= (1 << 4),
@@ -112,7 +115,7 @@
 	unsigned int state;
 	unsigned int stream;
 	struct urb *cmd_urb;
-	struct urb *sense_urb;
+	struct urb *status_urb;
 	struct urb *data_in_urb;
 	struct urb *data_out_urb;
 	struct list_head list;
@@ -138,7 +141,7 @@
 		struct scsi_pointer *scp = (void *)cmdinfo;
 		struct scsi_cmnd *cmnd = container_of(scp,
 							struct scsi_cmnd, SCp);
-		uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_KERNEL);
+		uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_NOIO);
 	}
 }
 
@@ -204,7 +207,7 @@
 	struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
 	int err;
 
-	cmdinfo->state = direction | SUBMIT_SENSE_URB;
+	cmdinfo->state = direction | SUBMIT_STATUS_URB;
 	err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
 	if (err) {
 		spin_lock(&uas_work_lock);
@@ -294,7 +297,7 @@
 	if (!urb)
 		goto out;
 
-	iu = kmalloc(sizeof(*iu), gfp);
+	iu = kzalloc(sizeof(*iu), gfp);
 	if (!iu)
 		goto free;
 
@@ -325,7 +328,7 @@
 	if (len < 0)
 		len = 0;
 	len = ALIGN(len, 4);
-	iu = kmalloc(sizeof(*iu) + len, gfp);
+	iu = kzalloc(sizeof(*iu) + len, gfp);
 	if (!iu)
 		goto free;
 
@@ -357,21 +360,21 @@
 {
 	struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
 
-	if (cmdinfo->state & ALLOC_SENSE_URB) {
-		cmdinfo->sense_urb = uas_alloc_sense_urb(devinfo, gfp, cmnd,
-							cmdinfo->stream);
-		if (!cmdinfo->sense_urb)
+	if (cmdinfo->state & ALLOC_STATUS_URB) {
+		cmdinfo->status_urb = uas_alloc_sense_urb(devinfo, gfp, cmnd,
+							  cmdinfo->stream);
+		if (!cmdinfo->status_urb)
 			return SCSI_MLQUEUE_DEVICE_BUSY;
-		cmdinfo->state &= ~ALLOC_SENSE_URB;
+		cmdinfo->state &= ~ALLOC_STATUS_URB;
 	}
 
-	if (cmdinfo->state & SUBMIT_SENSE_URB) {
-		if (usb_submit_urb(cmdinfo->sense_urb, gfp)) {
+	if (cmdinfo->state & SUBMIT_STATUS_URB) {
+		if (usb_submit_urb(cmdinfo->status_urb, gfp)) {
 			scmd_printk(KERN_INFO, cmnd,
 					"sense urb submission failure\n");
 			return SCSI_MLQUEUE_DEVICE_BUSY;
 		}
-		cmdinfo->state &= ~SUBMIT_SENSE_URB;
+		cmdinfo->state &= ~SUBMIT_STATUS_URB;
 	}
 
 	if (cmdinfo->state & ALLOC_DATA_IN_URB) {
@@ -440,7 +443,7 @@
 
 	BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
 
-	if (!cmdinfo->sense_urb && sdev->current_cmnd)
+	if (!cmdinfo->status_urb && sdev->current_cmnd)
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 
 	if (blk_rq_tagged(cmnd->request)) {
@@ -452,7 +455,7 @@
 
 	cmnd->scsi_done = done;
 
-	cmdinfo->state = ALLOC_SENSE_URB | SUBMIT_SENSE_URB |
+	cmdinfo->state = ALLOC_STATUS_URB | SUBMIT_STATUS_URB |
 			ALLOC_CMD_URB | SUBMIT_CMD_URB;
 
 	switch (cmnd->sc_data_direction) {
@@ -475,8 +478,8 @@
 	err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC);
 	if (err) {
 		/* If we did nothing, give up now */
-		if (cmdinfo->state & SUBMIT_SENSE_URB) {
-			usb_free_urb(cmdinfo->sense_urb);
+		if (cmdinfo->state & SUBMIT_STATUS_URB) {
+			usb_free_urb(cmdinfo->status_urb);
 			return SCSI_MLQUEUE_DEVICE_BUSY;
 		}
 		spin_lock(&uas_work_lock);
@@ -578,6 +581,34 @@
 };
 MODULE_DEVICE_TABLE(usb, uas_usb_ids);
 
+static int uas_is_interface(struct usb_host_interface *intf)
+{
+	return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
+		intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
+		intf->desc.bInterfaceProtocol == USB_PR_UAS);
+}
+
+static int uas_switch_interface(struct usb_device *udev,
+						struct usb_interface *intf)
+{
+	int i;
+
+	if (uas_is_interface(intf->cur_altsetting))
+		return 0;
+
+	for (i = 0; i < intf->num_altsetting; i++) {
+		struct usb_host_interface *alt = &intf->altsetting[i];
+		if (alt == intf->cur_altsetting)
+			continue;
+		if (uas_is_interface(alt))
+			return usb_set_interface(udev,
+						alt->desc.bInterfaceNumber,
+						alt->desc.bAlternateSetting);
+	}
+
+	return -ENODEV;
+}
+
 static void uas_configure_endpoints(struct uas_dev_info *devinfo)
 {
 	struct usb_host_endpoint *eps[4] = { };
@@ -651,13 +682,8 @@
 	struct uas_dev_info *devinfo;
 	struct usb_device *udev = interface_to_usbdev(intf);
 
-	if (id->bInterfaceProtocol == 0x50) {
-		int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-/* XXX: Shouldn't assume that 1 is the alternative we want */
-		int ret = usb_set_interface(udev, ifnum, 1);
-		if (ret)
-			return -ENODEV;
-	}
+	if (uas_switch_interface(udev, intf))
+		return -ENODEV;
 
 	devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL);
 	if (!devinfo)
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index c7b1d81..8cb9d80 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -49,7 +49,7 @@
  *
  *  USB Stack port number    4 (1 based)
  *  WUSB code port index     3 (0 based)
- *  USB Addresss             5 (2 based -- 0 is for default, 1 for root hub)
+ *  USB Address             5 (2 based -- 0 is for default, 1 for root hub)
  *
  *  Now, because we don't use the concept as default address exactly
  *  like the (wired) USB code does, we need to kind of skip it. So we
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c
index f2eb4d8..d5de5e1 100644
--- a/drivers/uwb/i1480/i1480-est.c
+++ b/drivers/uwb/i1480/i1480-est.c
@@ -91,7 +91,7 @@
  *
  * [so we are loaded when this kind device is connected]
  */
-static struct usb_device_id i1480_est_id_table[] = {
+static struct usb_device_id __used i1480_est_id_table[] = {
 	{ USB_DEVICE(0x8086, 0xdf3b), },
 	{ USB_DEVICE(0x8086, 0x0c3b), },
 	{ },
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
index 43ea998..ccd2184 100644
--- a/drivers/uwb/umc-dev.c
+++ b/drivers/uwb/umc-dev.c
@@ -54,11 +54,8 @@
 
 	err = request_resource(umc->resource.parent, &umc->resource);
 	if (err < 0) {
-		dev_err(&umc->dev, "can't allocate resource range "
-			"%016Lx to %016Lx: %d\n",
-			(unsigned long long)umc->resource.start,
-			(unsigned long long)umc->resource.end,
-			err);
+		dev_err(&umc->dev, "can't allocate resource range %pR: %d\n",
+			&umc->resource, err);
 		goto error_request_resource;
 	}
 
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
index 7349558..70a004a 100644
--- a/drivers/uwb/whc-rc.c
+++ b/drivers/uwb/whc-rc.c
@@ -449,7 +449,7 @@
 }
 
 /* PCI device ID's that we handle [so it gets loaded] */
-static struct pci_device_id whcrc_id_table[] = {
+static struct pci_device_id __used whcrc_id_table[] = {
 	{ PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
 	{ /* empty last entry */ }
 };
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38244f5..ade0568 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -97,22 +97,26 @@
 	remove_wait_queue(poll->wqh, &poll->wait);
 }
 
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+				unsigned seq)
+{
+	int left;
+	spin_lock_irq(&dev->work_lock);
+	left = seq - work->done_seq;
+	spin_unlock_irq(&dev->work_lock);
+	return left <= 0;
+}
+
 static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 {
 	unsigned seq;
-	int left;
 	int flushing;
 
 	spin_lock_irq(&dev->work_lock);
 	seq = work->queue_seq;
 	work->flushing++;
 	spin_unlock_irq(&dev->work_lock);
-	wait_event(work->done, ({
-		   spin_lock_irq(&dev->work_lock);
-		   left = seq - work->done_seq <= 0;
-		   spin_unlock_irq(&dev->work_lock);
-		   left;
-	}));
+	wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 	spin_lock_irq(&dev->work_lock);
 	flushing = --work->flushing;
 	spin_unlock_irq(&dev->work_lock);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 55dc6fb..d916ac0 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -11,6 +11,13 @@
 config HAVE_FB_IMX
 	bool
 
+config SH_MIPI_DSI
+	tristate
+	depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
+
+config SH_LCD_MIPI_DSI
+	bool
+
 source "drivers/char/agp/Kconfig"
 
 source "drivers/gpu/vga/Kconfig"
@@ -414,7 +421,7 @@
 	  Y here.
 
 config FB_IMX
-	tristate "Motorola i.MX LCD support"
+	tristate "Freescale i.MX LCD support"
 	depends on FB && (HAVE_FB_IMX || ARCH_MX1 || ARCH_MX2)
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
@@ -1273,7 +1280,7 @@
 	  module will be called matroxfb.
 
 	  You can pass several parameters to the driver at boot time or at
-	  module load time. The parameters look like "video=matrox:XXX", and
+	  module load time. The parameters look like "video=matroxfb:XXX", and
 	  are described in <file:Documentation/fb/matroxfb.txt>.
 
 config FB_MATROX_MILLENIUM
@@ -1990,13 +1997,6 @@
 
 	  If unsure, say N.
 
-config SH_MIPI_DSI
-	tristate
-	depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
-
-config SH_LCD_MIPI_DSI
-	bool
-
 config FB_SH_MOBILE_LCDC
 	tristate "SuperH Mobile LCDC framebuffer support"
 	depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8dce251..bac16345 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -111,7 +111,7 @@
 	return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
 }
 
-static struct backlight_ops atmel_lcdc_bl_ops = {
+static const struct backlight_ops atmel_lcdc_bl_ops = {
 	.update_status = atmel_bl_update_status,
 	.get_brightness = atmel_bl_get_brightness,
 };
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 34a0851..dd9de2e 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1786,7 +1786,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops aty128_bl_data = {
+static const struct backlight_ops aty128_bl_data = {
 	.get_brightness	= aty128_bl_get_brightness,
 	.update_status	= aty128_bl_update_status,
 };
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 5a3ce3a..767ab4f 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2221,7 +2221,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops aty_bl_data = {
+static const struct backlight_ops aty_bl_data = {
 	.get_brightness = aty_bl_get_brightness,
 	.update_status	= aty_bl_update_status,
 };
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 256966e..9b811dd 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -128,7 +128,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops radeon_bl_data = {
+static const struct backlight_ops radeon_bl_data = {
 	.get_brightness = radeon_bl_get_brightness,
 	.update_status	= radeon_bl_update_status,
 };
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 38ffc3f..c789c46 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -155,7 +155,7 @@
 	return -EINVAL;
 }
 
-static struct backlight_ops pm860x_backlight_ops = {
+static const struct backlight_ops pm860x_backlight_ops = {
 	.options	= BL_CORE_SUSPENDRESUME,
 	.update_status	= pm860x_backlight_update_status,
 	.get_brightness	= pm860x_backlight_get_brightness,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index c67801e..98ad3e5 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -25,7 +25,7 @@
 struct l4f00242t03_priv {
 	struct spi_device	*spi;
 	struct lcd_device	*ld;
-	int lcd_on:1;
+	int lcd_state;
 	struct regulator *io_reg;
 	struct regulator *core_reg;
 };
@@ -62,11 +62,36 @@
 		regulator_enable(priv->core_reg);
 	}
 
+	l4f00242t03_reset(pdata->reset_gpio);
+
 	gpio_set_value(pdata->data_enable_gpio, 1);
 	msleep(60);
 	spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
 }
 
+static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
+{
+	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+	dev_dbg(&spi->dev, "Powering down LCD\n");
+
+	gpio_set_value(pdata->data_enable_gpio, 0);
+
+	if (priv->io_reg)
+		regulator_disable(priv->io_reg);
+
+	if (priv->core_reg)
+		regulator_disable(priv->core_reg);
+}
+
+static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
+{
+	struct l4f00242t03_priv *priv = lcd_get_data(ld);
+
+	return priv->lcd_state;
+}
+
 static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
 {
 	struct l4f00242t03_priv *priv = lcd_get_data(ld);
@@ -79,35 +104,54 @@
 	const u16 disoff = 0x28;
 
 	if (power <= FB_BLANK_NORMAL) {
-		if (priv->lcd_on)
-			return 0;
+		if (priv->lcd_state <= FB_BLANK_NORMAL) {
+			/* Do nothing, the LCD is running */
+		} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+			dev_dbg(&spi->dev, "Resuming LCD\n");
 
-		dev_dbg(&spi->dev, "turning on LCD\n");
+			spi_write(spi, (const u8 *)&slpout, sizeof(u16));
+			msleep(60);
+			spi_write(spi, (const u8 *)&dison, sizeof(u16));
+		} else {
+			/* priv->lcd_state == FB_BLANK_POWERDOWN */
+			l4f00242t03_lcd_init(spi);
+			priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+			l4f00242t03_lcd_power_set(priv->ld, power);
+		}
+	} else if (power < FB_BLANK_POWERDOWN) {
+		if (priv->lcd_state <= FB_BLANK_NORMAL) {
+			/* Send the display in standby */
+			dev_dbg(&spi->dev, "Standby the LCD\n");
 
-		spi_write(spi, (const u8 *)&slpout, sizeof(u16));
-		msleep(60);
-		spi_write(spi, (const u8 *)&dison, sizeof(u16));
-
-		priv->lcd_on = 1;
+			spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+			msleep(60);
+			spi_write(spi, (const u8 *)&slpin, sizeof(u16));
+		} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+			/* Do nothing, the LCD is already in standby */
+		} else {
+			/* priv->lcd_state == FB_BLANK_POWERDOWN */
+			l4f00242t03_lcd_init(spi);
+			priv->lcd_state = FB_BLANK_UNBLANK;
+			l4f00242t03_lcd_power_set(ld, power);
+		}
 	} else {
-		if (!priv->lcd_on)
-			return 0;
-
-		dev_dbg(&spi->dev, "turning off LCD\n");
-
-		spi_write(spi, (const u8 *)&disoff, sizeof(u16));
-		msleep(60);
-		spi_write(spi, (const u8 *)&slpin, sizeof(u16));
-
-		priv->lcd_on = 0;
+		/* power == FB_BLANK_POWERDOWN */
+		if (priv->lcd_state != FB_BLANK_POWERDOWN) {
+			/* Clear the screen before shutting down */
+			spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+			msleep(60);
+			l4f00242t03_lcd_powerdown(spi);
+		}
 	}
 
+	priv->lcd_state = power;
+
 	return 0;
 }
 
 static struct lcd_ops l4f_ops = {
 	.set_power	= l4f00242t03_lcd_power_set,
-	.get_power	= NULL,
+	.get_power	= l4f00242t03_lcd_power_get,
 };
 
 static int __devinit l4f00242t03_probe(struct spi_device *spi)
@@ -185,9 +229,9 @@
 	}
 
 	/* Init the LCD */
-	l4f00242t03_reset(pdata->reset_gpio);
 	l4f00242t03_lcd_init(spi);
-	l4f00242t03_lcd_power_set(priv->ld, 1);
+	priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+	l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK);
 
 	dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
 
@@ -214,9 +258,11 @@
 	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
 	struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
 
-	l4f00242t03_lcd_power_set(priv->ld, 0);
+	l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(priv->ld);
 
+	dev_set_drvdata(&spi->dev, NULL);
+
 	gpio_free(pdata->data_enable_gpio);
 	gpio_free(pdata->reset_gpio);
 
@@ -230,6 +276,15 @@
 	return 0;
 }
 
+static void l4f00242t03_shutdown(struct spi_device *spi)
+{
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+	if (priv)
+		l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
+
+}
+
 static struct spi_driver l4f00242t03_driver = {
 	.driver = {
 		.name	= "l4f00242t03",
@@ -237,6 +292,7 @@
 	},
 	.probe		= l4f00242t03_probe,
 	.remove		= __devexit_p(l4f00242t03_remove),
+	.shutdown	= l4f00242t03_shutdown,
 };
 
 static __init int l4f00242t03_init(void)
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index b2b2c7b..209acc1 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -92,7 +92,7 @@
 	return ret;
 }
 
-static struct backlight_ops max8925_backlight_ops = {
+static const struct backlight_ops max8925_backlight_ops = {
 	.options	= BL_CORE_SUSPENDRESUME,
 	.update_status	= max8925_backlight_update_status,
 	.get_brightness	= max8925_backlight_get_brightness,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 915448e..c97491b 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -375,7 +375,8 @@
 	u16 saved1, saved2;
 	volatile u16 *p;
 
-	if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) {
+	if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB ||
+	    screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
 	      no_vga:
 #ifdef CONFIG_DUMMY_CONSOLE
 		conswitchp = &dummy_con;
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 6b93ef9..8040001 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -75,7 +75,7 @@
 		return 0;
 
 	/* Kill off the delayed work */
-	cancel_rearming_delayed_work(&info->deferred_work);
+	cancel_delayed_work_sync(&info->deferred_work);
 
 	/* Run it immediately */
 	return schedule_delayed_work(&info->deferred_work, 0);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 1ab2c25..69bd4a5 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -974,6 +974,6 @@
 module_init(imxfb_init);
 module_exit(imxfb_cleanup);
 
-MODULE_DESCRIPTION("Motorola i.MX framebuffer driver");
+MODULE_DESCRIPTION("Freescale i.MX framebuffer driver");
 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 052dd9f..a082deb 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1247,46 +1247,46 @@
 };
 
 /* initialized by setup, see explanation at end of file (search for MODULE_PARM_DESC) */
-static unsigned int mem;		/* "matrox:mem:xxxxxM" */
+static unsigned int mem;		/* "matroxfb:mem:xxxxxM" */
 static int option_precise_width = 1;	/* cannot be changed, option_precise_width==0 must imply noaccel */
-static int inv24;			/* "matrox:inv24" */
-static int cross4MB = -1;		/* "matrox:cross4MB" */
-static int disabled;			/* "matrox:disabled" */
-static int noaccel;			/* "matrox:noaccel" */
-static int nopan;			/* "matrox:nopan" */
-static int no_pci_retry;		/* "matrox:nopciretry" */
-static int novga;			/* "matrox:novga" */
-static int nobios;			/* "matrox:nobios" */
-static int noinit = 1;			/* "matrox:init" */
-static int inverse;			/* "matrox:inverse" */
-static int sgram;			/* "matrox:sgram" */
+static int inv24;			/* "matroxfb:inv24" */
+static int cross4MB = -1;		/* "matroxfb:cross4MB" */
+static int disabled;			/* "matroxfb:disabled" */
+static int noaccel;			/* "matroxfb:noaccel" */
+static int nopan;			/* "matroxfb:nopan" */
+static int no_pci_retry;		/* "matroxfb:nopciretry" */
+static int novga;			/* "matroxfb:novga" */
+static int nobios;			/* "matroxfb:nobios" */
+static int noinit = 1;			/* "matroxfb:init" */
+static int inverse;			/* "matroxfb:inverse" */
+static int sgram;			/* "matroxfb:sgram" */
 #ifdef CONFIG_MTRR
-static int mtrr = 1;			/* "matrox:nomtrr" */
+static int mtrr = 1;			/* "matroxfb:nomtrr" */
 #endif
-static int grayscale;			/* "matrox:grayscale" */
-static int dev = -1;			/* "matrox:dev:xxxxx" */
-static unsigned int vesa = ~0;		/* "matrox:vesa:xxxxx" */
-static int depth = -1;			/* "matrox:depth:xxxxx" */
-static unsigned int xres;		/* "matrox:xres:xxxxx" */
-static unsigned int yres;		/* "matrox:yres:xxxxx" */
-static unsigned int upper = ~0;		/* "matrox:upper:xxxxx" */
-static unsigned int lower = ~0;		/* "matrox:lower:xxxxx" */
-static unsigned int vslen;		/* "matrox:vslen:xxxxx" */
-static unsigned int left = ~0;		/* "matrox:left:xxxxx" */
-static unsigned int right = ~0;		/* "matrox:right:xxxxx" */
-static unsigned int hslen;		/* "matrox:hslen:xxxxx" */
-static unsigned int pixclock;		/* "matrox:pixclock:xxxxx" */
-static int sync = -1;			/* "matrox:sync:xxxxx" */
-static unsigned int fv;			/* "matrox:fv:xxxxx" */
-static unsigned int fh;			/* "matrox:fh:xxxxxk" */
-static unsigned int maxclk;		/* "matrox:maxclk:xxxxM" */
-static int dfp;				/* "matrox:dfp */
-static int dfp_type = -1;		/* "matrox:dfp:xxx */
-static int memtype = -1;		/* "matrox:memtype:xxx" */
-static char outputs[8];			/* "matrox:outputs:xxx" */
+static int grayscale;			/* "matroxfb:grayscale" */
+static int dev = -1;			/* "matroxfb:dev:xxxxx" */
+static unsigned int vesa = ~0;		/* "matroxfb:vesa:xxxxx" */
+static int depth = -1;			/* "matroxfb:depth:xxxxx" */
+static unsigned int xres;		/* "matroxfb:xres:xxxxx" */
+static unsigned int yres;		/* "matroxfb:yres:xxxxx" */
+static unsigned int upper = ~0;		/* "matroxfb:upper:xxxxx" */
+static unsigned int lower = ~0;		/* "matroxfb:lower:xxxxx" */
+static unsigned int vslen;		/* "matroxfb:vslen:xxxxx" */
+static unsigned int left = ~0;		/* "matroxfb:left:xxxxx" */
+static unsigned int right = ~0;		/* "matroxfb:right:xxxxx" */
+static unsigned int hslen;		/* "matroxfb:hslen:xxxxx" */
+static unsigned int pixclock;		/* "matroxfb:pixclock:xxxxx" */
+static int sync = -1;			/* "matroxfb:sync:xxxxx" */
+static unsigned int fv;			/* "matroxfb:fv:xxxxx" */
+static unsigned int fh;			/* "matroxfb:fh:xxxxxk" */
+static unsigned int maxclk;		/* "matroxfb:maxclk:xxxxM" */
+static int dfp;				/* "matroxfb:dfp */
+static int dfp_type = -1;		/* "matroxfb:dfp:xxx */
+static int memtype = -1;		/* "matroxfb:memtype:xxx" */
+static char outputs[8];			/* "matroxfb:outputs:xxx" */
 
 #ifndef MODULE
-static char videomode[64];		/* "matrox:mode:xxxxx" or "matrox:xxxxx" */
+static char videomode[64];		/* "matroxfb:mode:xxxxx" or "matroxfb:xxxxx" */
 #endif
 
 static int matroxfb_getmemory(struct matrox_fb_info *minfo,
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index d2bb365..48c3ea8 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -32,300 +32,320 @@
 const char *fb_mode_option;
 EXPORT_SYMBOL_GPL(fb_mode_option);
 
-    /*
-     *  Standard video mode definitions (taken from XFree86)
-     */
+/*
+ *  Standard video mode definitions (taken from XFree86)
+ */
 
 static const struct fb_videomode modedb[] = {
-    {
+
 	/* 640x400 @ 70 Hz, 31.5 kHz hsync */
-	NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 60 Hz, 31.5 kHz hsync */
-	NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 800x600 @ 56 Hz, 35.15 kHz hsync */
-	NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */
-	NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8,
-	0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, 0,
+		FB_VMODE_INTERLACED },
+
 	/* 640x400 @ 85 Hz, 37.86 kHz hsync */
-	NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
-	FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
+		FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 72 Hz, 36.5 kHz hsync */
-	NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 75 Hz, 37.50 kHz hsync */
-	NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 800x600 @ 60 Hz, 37.8 kHz hsync */
-	NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 85 Hz, 43.27 kHz hsync */
-	NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */
-	NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10,
-	0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, 0,
+		FB_VMODE_INTERLACED },
 	/* 800x600 @ 72 Hz, 48.0 kHz hsync */
-	NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 60 Hz, 48.4 kHz hsync */
-	NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 640x480 @ 100 Hz, 53.01 kHz hsync */
-	NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,	0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 60 Hz, 53.5 kHz hsync */
-	NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 800x600 @ 85 Hz, 55.84 kHz hsync */
-	NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 70 Hz, 56.5 kHz hsync */
-	NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */
-	NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,
-	0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,	0,
+		FB_VMODE_INTERLACED },
+
 	/* 800x600 @ 100 Hz, 64.02 kHz hsync */
-	NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 76 Hz, 62.5 kHz hsync */
-	NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 70 Hz, 62.4 kHz hsync */
-	NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 61 Hz, 64.2 kHz hsync */
-	NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1400x1050 @ 60Hz, 63.9 kHz hsync */
-	NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3,
-	0, FB_VMODE_NONINTERLACED   	
-    }, {
+	{ NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/
-	NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/
-        NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 85 Hz, 70.24 kHz hsync */
-	NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 78 Hz, 70.8 kHz hsync */
-	NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 70 Hz, 74.59 kHz hsync */
-	NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 60Hz, 75.00 kHz hsync */
-	NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 84 Hz, 76.0 kHz hsync */
-	NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 74 Hz, 78.85 kHz hsync */
-	NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1024x768 @ 100Hz, 80.21 kHz hsync */
-	NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 76 Hz, 81.13 kHz hsync */
-	NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 70 Hz, 87.50 kHz hsync */
-	NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x864 @ 100 Hz, 89.62 kHz hsync */
-	NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 85 Hz, 91.15 kHz hsync */
-	NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 75 Hz, 93.75 kHz hsync */
-	NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
-	NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
-	NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x1024 @ 100 Hz, 107.16 kHz hsync */
-	NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1800x1440 @ 64Hz, 96.15 kHz hsync  */
-	NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1800x1440 @ 70Hz, 104.52 kHz hsync  */
-	NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 512x384 @ 78 Hz, 31.50 kHz hsync */
-	NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 512x384 @ 85 Hz, 34.38 kHz hsync */
-	NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */
-	NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */
-	NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 320x240 @ 72 Hz, 36.5 kHz hsync */
-	NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */
-	NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 400x300 @ 60 Hz, 37.8 kHz hsync */
-	NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 400x300 @ 72 Hz, 48.0 kHz hsync */
-	NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,	0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */
-	NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 60 Hz, 37.8 kHz hsync */
-	NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 63 Hz, 39.6 kHz hsync */
-	NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 480x300 @ 72 Hz, 48.0 kHz hsync */
-	NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3,
-	0, FB_VMODE_DOUBLE
-    }, {
+	{ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
+		FB_VMODE_DOUBLE },
+
 	/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
-	NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
-	FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
-	FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
-	NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
-	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-    }, {
+	{ NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
-	NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
-	0, FB_VMODE_NONINTERLACED
-   }, {
+	{ NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, 0,
+		FB_VMODE_NONINTERLACED },
+
 	/* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
-	NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
-	0, FB_VMODE_NONINTERLACED
-    }, {
-       /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
-       NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
-       0, FB_VMODE_INTERLACED
-    }, {
-       /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
-       NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
-       0, FB_VMODE_INTERLACED
-    }, {
+	{ NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 0,
+		FB_VMODE_NONINTERLACED },
+
+	/* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+	{ NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, 0,
+		FB_VMODE_INTERLACED },
+
+	/* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
+	{ NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, 0,
+		FB_VMODE_INTERLACED },
+
 	/* 864x480 @ 60 Hz, 35.15 kHz hsync */
-	NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0,
-	0, FB_VMODE_NONINTERLACED
-    },
+	{ NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0,
+		0, FB_VMODE_NONINTERLACED },
 };
 
 #ifdef CONFIG_FB_MODE_HELPERS
 const struct fb_videomode cea_modes[64] = {
 	/* #1: 640x480p@59.94/60Hz */
 	[1] = {
-		NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #3: 720x480p@59.94/60Hz */
 	[3] = {
-		NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #5: 1920x1080i@59.94/60Hz */
 	[5] = {
 		NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_INTERLACED, 0,
 	},
 	/* #7: 720(1440)x480iH@59.94/60Hz */
 	[7] = {
-		NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0, FB_VMODE_INTERLACED, 0,
+		NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
+		FB_VMODE_INTERLACED, 0,
 	},
 	/* #9: 720(1440)x240pH@59.94/60Hz */
 	[9] = {
-		NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #18: 720x576pH@50Hz */
 	[18] = {
-		NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #19: 1280x720p@50Hz */
 	[19] = {
 		NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #20: 1920x1080i@50Hz */
 	[20] = {
 		NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_INTERLACED, 0,
 	},
 	/* #32: 1920x1080p@23.98/24Hz */
 	[32] = {
 		NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, 0,
+		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 	/* #35: (2880)x480p4x@59.94/60Hz */
 	[35] = {
-		NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0, FB_VMODE_NONINTERLACED, 0,
+		NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
+		FB_VMODE_NONINTERLACED, 0,
 	},
 };
 
@@ -340,10 +360,10 @@
 	{ NULL, 85, 721, 400, 28169, 108, 36, 42, 01, 72, 3,
 	  FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 3 640x480-60 VESA */
-	{ NULL, 60, 640, 480, 39682,  48, 16, 33, 10, 96, 2, 
+	{ NULL, 60, 640, 480, 39682,  48, 16, 33, 10, 96, 2,
 	  0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 4 640x480-72 VESA */
-	{ NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2, 
+	{ NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2,
 	  0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 5 640x480-75 VESA */
 	{ NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3,
@@ -426,7 +446,7 @@
 	  FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
 	  FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 26 1600x1200-75 VESA */
-	{ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, 
+	{ NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
 	  FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
 	  FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 	/* 27 1600x1200-85 VESA */
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 81687ed..62498bd 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -15,6 +15,7 @@
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/mm.h>
@@ -597,9 +598,9 @@
 	}
 
 	fbi->clk = clk_get(&pdev->dev, NULL);
-	if (!fbi->clk || IS_ERR(fbi->clk)) {
+	if (IS_ERR(fbi->clk)) {
 		printk(KERN_ERR "nuc900-lcd:failed to get lcd clock source\n");
-		ret = -ENOENT;
+		ret = PTR_ERR(fbi->clk);
 		goto release_irq;
 	}
 
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 2fb552a..6aac6d1 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -87,7 +87,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops nvidia_bl_ops = {
+static const struct backlight_ops nvidia_bl_ops = {
 	.get_brightness = nvidia_bl_get_brightness,
 	.update_status	= nvidia_bl_update_status,
 };
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index 64dcc74..90e3bdd 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -396,7 +396,7 @@
 static void mipid_esd_stop_check(struct mipid_device *md)
 {
 	if (md->esd_check != NULL)
-		cancel_rearming_delayed_workqueue(md->esd_wq, &md->esd_work);
+		cancel_delayed_work_sync(&md->esd_work);
 }
 
 static void mipid_esd_work(struct work_struct *work)
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 12327bb..940cab3 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -1,11 +1,13 @@
 menu "OMAP2/3 Display Device Drivers"
         depends on OMAP2_DSS
 
-config PANEL_GENERIC
-        tristate "Generic Panel"
+config PANEL_GENERIC_DPI
+        tristate "Generic DPI Panel"
         help
-	  Generic panel driver.
-	  Used for DVI output for Beagle and OMAP3 SDP.
+	  Generic DPI panel driver.
+	  Supports DVI output for Beagle and OMAP3 SDP.
+	  Supports LCD Panel used in TI SDP3430 and EVM boards,
+	  OMAP3517 EVM boards and CM-T35.
 
 config PANEL_SHARP_LS037V7DW01
         tristate "Sharp LS037V7DW01 LCD Panel"
@@ -14,11 +16,12 @@
         help
           LCD Panel used in TI's SDP3430 and EVM boards
 
-config PANEL_SHARP_LQ043T1DG01
-        tristate "Sharp LQ043T1DG01 LCD Panel"
-        depends on OMAP2_DSS
-        help
-          LCD Panel used in TI's OMAP3517 EVM boards
+config PANEL_NEC_NL8048HL11_01B
+	tristate "NEC NL8048HL11-01B Panel"
+	depends on OMAP2_DSS
+	help
+		This NEC NL8048HL11-01B panel is TFT LCD
+		used in the Zoom2/3/3630 sdp boards.
 
 config PANEL_TAAL
         tristate "Taal DSI Panel"
@@ -26,12 +29,6 @@
         help
           Taal DSI command mode panel from TPO.
 
-config PANEL_TOPPOLY_TDO35S
-        tristate "Toppoly TDO35S LCD Panel support"
-        depends on OMAP2_DSS
-        help
-          LCD Panel used in CM-T35
-
 config PANEL_TPO_TD043MTEA1
         tristate "TPO TD043MTEA1 LCD Panel"
         depends on OMAP2_DSS && SPI
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index aa38609..861f025 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -1,8 +1,7 @@
-obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
+obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o
 obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o
+obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o
 
 obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
-obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
 obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
 obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
new file mode 100644
index 0000000..07eb30e
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -0,0 +1,365 @@
+/*
+ * Generic DPI Panels support
+ *
+ * Copyright (C) 2010 Canonical Ltd.
+ * Author: Bryan Wu <bryan.wu@canonical.com>
+ *
+ * LCD panel driver for Sharp LQ043T1DG01
+ *
+ * Copyright (C) 2009 Texas Instruments Inc
+ * Author: Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * LCD panel driver for Toppoly TDO35S
+ *
+ * Copyright (C) 2009 CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <plat/panel-generic-dpi.h>
+
+struct panel_config {
+	struct omap_video_timings timings;
+
+	int acbi;	/* ac-bias pin transitions per interrupt */
+	/* Unit: line clocks */
+	int acb;	/* ac-bias pin frequency */
+
+	enum omap_panel_config config;
+
+	int power_on_delay;
+	int power_off_delay;
+
+	/*
+	 * Used to match device to panel configuration
+	 * when use generic panel driver
+	 */
+	const char *name;
+};
+
+/* Panel configurations */
+static struct panel_config generic_dpi_panels[] = {
+	/* Generic Panel */
+	{
+		{
+			.x_res		= 640,
+			.y_res		= 480,
+
+			.pixel_clock	= 23500,
+
+			.hfp		= 48,
+			.hsw		= 32,
+			.hbp		= 80,
+
+			.vfp		= 3,
+			.vsw		= 4,
+			.vbp		= 7,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x0,
+		.config			= OMAP_DSS_LCD_TFT,
+		.power_on_delay		= 0,
+		.power_off_delay	= 0,
+		.name			= "generic",
+	},
+
+	/* Sharp LQ043T1DG01 */
+	{
+		{
+			.x_res		= 480,
+			.y_res		= 272,
+
+			.pixel_clock	= 9000,
+
+			.hsw		= 42,
+			.hfp		= 3,
+			.hbp		= 2,
+
+			.vsw		= 11,
+			.vfp		= 3,
+			.vbp		= 2,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x0,
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
+		.power_on_delay		= 50,
+		.power_off_delay	= 100,
+		.name			= "sharp_lq",
+	},
+
+	/* Sharp LS037V7DW01 */
+	{
+		{
+			.x_res		= 480,
+			.y_res		= 640,
+
+			.pixel_clock	= 19200,
+
+			.hsw		= 2,
+			.hfp		= 1,
+			.hbp		= 28,
+
+			.vsw		= 1,
+			.vfp		= 1,
+			.vbp		= 1,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x28,
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+						OMAP_DSS_LCD_IHS,
+		.power_on_delay		= 50,
+		.power_off_delay	= 100,
+		.name			= "sharp_ls",
+	},
+
+	/* Toppoly TDO35S */
+	{
+		{
+			.x_res		= 480,
+			.y_res		= 640,
+
+			.pixel_clock	= 26000,
+
+			.hfp		= 104,
+			.hsw		= 8,
+			.hbp		= 8,
+
+			.vfp		= 4,
+			.vsw		= 2,
+			.vbp		= 2,
+		},
+		.acbi			= 0x0,
+		.acb			= 0x0,
+		.config			= OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+					OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC |
+					OMAP_DSS_LCD_ONOFF,
+		.power_on_delay		= 0,
+		.power_off_delay	= 0,
+		.name			= "toppoly_tdo35s",
+	},
+};
+
+struct panel_drv_data {
+
+	struct omap_dss_device *dssdev;
+
+	struct panel_config *panel_config;
+};
+
+static inline struct panel_generic_dpi_data
+*get_panel_data(const struct omap_dss_device *dssdev)
+{
+	return (struct panel_generic_dpi_data *) dssdev->data;
+}
+
+static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
+{
+	int r;
+	struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+	struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+	struct panel_config *panel_config = drv_data->panel_config;
+
+	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+		return 0;
+
+	r = omapdss_dpi_display_enable(dssdev);
+	if (r)
+		goto err0;
+
+	/* wait couple of vsyncs until enabling the LCD */
+	if (panel_config->power_on_delay)
+		msleep(panel_config->power_on_delay);
+
+	if (panel_data->platform_enable) {
+		r = panel_data->platform_enable(dssdev);
+		if (r)
+			goto err1;
+	}
+
+	return 0;
+err1:
+	omapdss_dpi_display_disable(dssdev);
+err0:
+	return r;
+}
+
+static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
+{
+	struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+	struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+	struct panel_config *panel_config = drv_data->panel_config;
+
+	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+		return;
+
+	if (panel_data->platform_disable)
+		panel_data->platform_disable(dssdev);
+
+	/* wait couple of vsyncs after disabling the LCD */
+	if (panel_config->power_off_delay)
+		msleep(panel_config->power_off_delay);
+
+	omapdss_dpi_display_disable(dssdev);
+}
+
+static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
+{
+	struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
+	struct panel_config *panel_config = NULL;
+	struct panel_drv_data *drv_data = NULL;
+	int i;
+
+	dev_dbg(&dssdev->dev, "probe\n");
+
+	if (!panel_data || !panel_data->name)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(generic_dpi_panels); i++) {
+		if (strcmp(panel_data->name, generic_dpi_panels[i].name) == 0) {
+			panel_config = &generic_dpi_panels[i];
+			break;
+		}
+	}
+
+	if (!panel_config)
+		return -EINVAL;
+
+	dssdev->panel.config = panel_config->config;
+	dssdev->panel.timings = panel_config->timings;
+	dssdev->panel.acb = panel_config->acb;
+	dssdev->panel.acbi = panel_config->acbi;
+
+	drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data)
+		return -ENOMEM;
+
+	drv_data->dssdev = dssdev;
+	drv_data->panel_config = panel_config;
+
+	dev_set_drvdata(&dssdev->dev, drv_data);
+
+	return 0;
+}
+
+static void generic_dpi_panel_remove(struct omap_dss_device *dssdev)
+{
+	struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev);
+
+	dev_dbg(&dssdev->dev, "remove\n");
+
+	kfree(drv_data);
+
+	dev_set_drvdata(&dssdev->dev, NULL);
+}
+
+static int generic_dpi_panel_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	r = generic_dpi_panel_power_on(dssdev);
+	if (r)
+		return r;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	return 0;
+}
+
+static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
+{
+	generic_dpi_panel_power_off(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int generic_dpi_panel_suspend(struct omap_dss_device *dssdev)
+{
+	generic_dpi_panel_power_off(dssdev);
+
+	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+	return 0;
+}
+
+static int generic_dpi_panel_resume(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+
+	r = generic_dpi_panel_power_on(dssdev);
+	if (r)
+		return r;
+
+	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+	return 0;
+}
+
+static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	dpi_set_timings(dssdev, timings);
+}
+
+static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	*timings = dssdev->panel.timings;
+}
+
+static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
+		struct omap_video_timings *timings)
+{
+	return dpi_check_timings(dssdev, timings);
+}
+
+static struct omap_dss_driver dpi_driver = {
+	.probe		= generic_dpi_panel_probe,
+	.remove		= generic_dpi_panel_remove,
+
+	.enable		= generic_dpi_panel_enable,
+	.disable	= generic_dpi_panel_disable,
+	.suspend	= generic_dpi_panel_suspend,
+	.resume		= generic_dpi_panel_resume,
+
+	.set_timings	= generic_dpi_panel_set_timings,
+	.get_timings	= generic_dpi_panel_get_timings,
+	.check_timings	= generic_dpi_panel_check_timings,
+
+	.driver         = {
+		.name   = "generic_dpi_panel",
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init generic_dpi_panel_drv_init(void)
+{
+	return omap_dss_register_driver(&dpi_driver);
+}
+
+static void __exit generic_dpi_panel_drv_exit(void)
+{
+	omap_dss_unregister_driver(&dpi_driver);
+}
+
+module_init(generic_dpi_panel_drv_init);
+module_exit(generic_dpi_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
deleted file mode 100644
index 395a68d..0000000
--- a/drivers/video/omap2/displays/panel-generic.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Generic panel support
- *
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings generic_panel_timings = {
-	/* 640 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
-	.x_res		= 640,
-	.y_res		= 480,
-	.pixel_clock	= 23500,
-	.hfp		= 48,
-	.hsw		= 32,
-	.hbp		= 80,
-	.vfp		= 3,
-	.vsw		= 4,
-	.vbp		= 7,
-};
-
-static int generic_panel_power_on(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
-		return 0;
-
-	r = omapdss_dpi_display_enable(dssdev);
-	if (r)
-		goto err0;
-
-	if (dssdev->platform_enable) {
-		r = dssdev->platform_enable(dssdev);
-		if (r)
-			goto err1;
-	}
-
-	return 0;
-err1:
-	omapdss_dpi_display_disable(dssdev);
-err0:
-	return r;
-}
-
-static void generic_panel_power_off(struct omap_dss_device *dssdev)
-{
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-		return;
-
-	if (dssdev->platform_disable)
-		dssdev->platform_disable(dssdev);
-
-	omapdss_dpi_display_disable(dssdev);
-}
-
-static int generic_panel_probe(struct omap_dss_device *dssdev)
-{
-	dssdev->panel.config = OMAP_DSS_LCD_TFT;
-	dssdev->panel.timings = generic_panel_timings;
-
-	return 0;
-}
-
-static void generic_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int generic_panel_enable(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = generic_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void generic_panel_disable(struct omap_dss_device *dssdev)
-{
-	generic_panel_power_off(dssdev);
-
-	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int generic_panel_suspend(struct omap_dss_device *dssdev)
-{
-	generic_panel_power_off(dssdev);
-	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-	return 0;
-}
-
-static int generic_panel_resume(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = generic_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void generic_panel_set_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	dpi_set_timings(dssdev, timings);
-}
-
-static void generic_panel_get_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	*timings = dssdev->panel.timings;
-}
-
-static int generic_panel_check_timings(struct omap_dss_device *dssdev,
-		struct omap_video_timings *timings)
-{
-	return dpi_check_timings(dssdev, timings);
-}
-
-static struct omap_dss_driver generic_driver = {
-	.probe		= generic_panel_probe,
-	.remove		= generic_panel_remove,
-
-	.enable		= generic_panel_enable,
-	.disable	= generic_panel_disable,
-	.suspend	= generic_panel_suspend,
-	.resume		= generic_panel_resume,
-
-	.set_timings	= generic_panel_set_timings,
-	.get_timings	= generic_panel_get_timings,
-	.check_timings	= generic_panel_check_timings,
-
-	.driver         = {
-		.name   = "generic_panel",
-		.owner  = THIS_MODULE,
-	},
-};
-
-static int __init generic_panel_drv_init(void)
-{
-	return omap_dss_register_driver(&generic_driver);
-}
-
-static void __exit generic_panel_drv_exit(void)
-{
-	omap_dss_unregister_driver(&generic_driver);
-}
-
-module_init(generic_panel_drv_init);
-module_exit(generic_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
new file mode 100644
index 0000000..925e0fa
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -0,0 +1,325 @@
+/*
+ * Support for NEC-nl8048hl11-01b panel driver
+ *
+ * Copyright (C) 2010 Texas Instruments Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+
+#include <plat/display.h>
+
+#define LCD_XRES		800
+#define LCD_YRES		480
+/*
+ * NEC PIX Clock Ratings
+ * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
+ */
+#define LCD_PIXEL_CLOCK		23800
+
+struct nec_8048_data {
+	struct backlight_device *bl;
+};
+
+static const struct {
+	unsigned char addr;
+	unsigned char dat;
+} nec_8048_init_seq[] = {
+	{ 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
+	{ 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
+	{ 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 },	{ 24, 0x25 },
+	{ 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
+	{ 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F },	{ 38, 0x0F },
+	{ 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 },	{ 43, 0x0F },
+	{ 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F },	{ 48, 0x0F },
+	{ 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
+	{ 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 },	{ 86, 0x14 },
+	{ 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 },	{ 93, 0x0C },
+	{ 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
+	{ 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
+	{ 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
+	{ 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
+	{ 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
+	{ 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
+	{ 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
+	{ 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
+};
+
+/*
+ * NEC NL8048HL11-01B  Manual
+ * defines HFB, HSW, HBP, VFP, VSW, VBP as shown below
+ */
+
+static struct omap_video_timings nec_8048_panel_timings = {
+	/* 800 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
+	.x_res		= LCD_XRES,
+	.y_res		= LCD_YRES,
+	.pixel_clock	= LCD_PIXEL_CLOCK,
+	.hfp		= 6,
+	.hsw		= 1,
+	.hbp		= 4,
+	.vfp		= 3,
+	.vsw		= 1,
+	.vbp		= 4,
+};
+
+static int nec_8048_bl_update_status(struct backlight_device *bl)
+{
+	struct omap_dss_device *dssdev = dev_get_drvdata(&bl->dev);
+	int level;
+
+	if (!dssdev->set_backlight)
+		return -EINVAL;
+
+	if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+			bl->props.power == FB_BLANK_UNBLANK)
+		level = bl->props.brightness;
+	else
+		level = 0;
+
+	return dssdev->set_backlight(dssdev, level);
+}
+
+static int nec_8048_bl_get_brightness(struct backlight_device *bl)
+{
+	if (bl->props.fb_blank == FB_BLANK_UNBLANK &&
+			bl->props.power == FB_BLANK_UNBLANK)
+		return bl->props.brightness;
+
+	return 0;
+}
+
+static const struct backlight_ops nec_8048_bl_ops = {
+	.get_brightness	= nec_8048_bl_get_brightness,
+	.update_status	= nec_8048_bl_update_status,
+};
+
+static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
+{
+	struct backlight_device *bl;
+	struct nec_8048_data *necd;
+	struct backlight_properties props;
+	int r;
+
+	dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+				OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF |
+				OMAP_DSS_LCD_ONOFF;
+	dssdev->panel.timings = nec_8048_panel_timings;
+
+	necd = kzalloc(sizeof(*necd), GFP_KERNEL);
+	if (!necd)
+		return -ENOMEM;
+
+	dev_set_drvdata(&dssdev->dev, necd);
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 255;
+
+	bl = backlight_device_register("nec-8048", &dssdev->dev, dssdev,
+			&nec_8048_bl_ops, &props);
+	if (IS_ERR(bl)) {
+		r = PTR_ERR(bl);
+		kfree(necd);
+		return r;
+	}
+	necd->bl = bl;
+
+	bl->props.fb_blank = FB_BLANK_UNBLANK;
+	bl->props.power = FB_BLANK_UNBLANK;
+	bl->props.max_brightness = dssdev->max_backlight_level;
+	bl->props.brightness = dssdev->max_backlight_level;
+
+	r = nec_8048_bl_update_status(bl);
+	if (r < 0)
+		dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
+	return 0;
+}
+
+static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
+{
+	struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bl = necd->bl;
+
+	bl->props.power = FB_BLANK_POWERDOWN;
+	nec_8048_bl_update_status(bl);
+	backlight_device_unregister(bl);
+
+	kfree(necd);
+}
+
+static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
+{
+	int r = 0;
+	struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bl = necd->bl;
+
+	if (dssdev->platform_enable) {
+		r = dssdev->platform_enable(dssdev);
+		if (r)
+			return r;
+	}
+
+	r = nec_8048_bl_update_status(bl);
+	if (r < 0)
+		dev_err(&dssdev->dev, "failed to set lcd brightness\n");
+
+	r = omapdss_dpi_display_enable(dssdev);
+
+	return r;
+}
+
+static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
+{
+	struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
+	struct backlight_device *bl = necd->bl;
+
+	omapdss_dpi_display_disable(dssdev);
+
+	bl->props.brightness = 0;
+	nec_8048_bl_update_status(bl);
+
+	if (dssdev->platform_disable)
+		dssdev->platform_disable(dssdev);
+}
+
+static int nec_8048_panel_suspend(struct omap_dss_device *dssdev)
+{
+	nec_8048_panel_disable(dssdev);
+	return 0;
+}
+
+static int nec_8048_panel_resume(struct omap_dss_device *dssdev)
+{
+	return nec_8048_panel_enable(dssdev);
+}
+
+static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
+{
+	return 16;
+}
+
+static struct omap_dss_driver nec_8048_driver = {
+	.probe			= nec_8048_panel_probe,
+	.remove			= nec_8048_panel_remove,
+	.enable			= nec_8048_panel_enable,
+	.disable		= nec_8048_panel_disable,
+	.suspend		= nec_8048_panel_suspend,
+	.resume			= nec_8048_panel_resume,
+	.get_recommended_bpp	= nec_8048_recommended_bpp,
+
+	.driver		= {
+		.name		= "NEC_8048_panel",
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
+			unsigned char reg_data)
+{
+	int ret = 0;
+	unsigned int cmd = 0, data = 0;
+
+	cmd = 0x0000 | reg_addr; /* register address write */
+	data = 0x0100 | reg_data ; /* register data write */
+	data = (cmd << 16) | data;
+
+	ret = spi_write(spi, (unsigned char *)&data, 4);
+	if (ret)
+		pr_err("error in spi_write %x\n", data);
+
+	return ret;
+}
+
+static int init_nec_8048_wvga_lcd(struct spi_device *spi)
+{
+	unsigned int i;
+	/* Initialization Sequence */
+	/* nec_8048_spi_send(spi, REG, VAL) */
+	for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
+		nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+				nec_8048_init_seq[i].dat);
+	udelay(20);
+	nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
+				nec_8048_init_seq[i].dat);
+	return 0;
+}
+
+static int nec_8048_spi_probe(struct spi_device *spi)
+{
+	spi->mode = SPI_MODE_0;
+	spi->bits_per_word = 32;
+	spi_setup(spi);
+
+	init_nec_8048_wvga_lcd(spi);
+
+	return omap_dss_register_driver(&nec_8048_driver);
+}
+
+static int nec_8048_spi_remove(struct spi_device *spi)
+{
+	omap_dss_unregister_driver(&nec_8048_driver);
+
+	return 0;
+}
+
+static int nec_8048_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+	nec_8048_spi_send(spi, 2, 0x01);
+	mdelay(40);
+
+	return 0;
+}
+
+static int nec_8048_spi_resume(struct spi_device *spi)
+{
+	/* reinitialize the panel */
+	spi_setup(spi);
+	nec_8048_spi_send(spi, 2, 0x00);
+	init_nec_8048_wvga_lcd(spi);
+
+	return 0;
+}
+
+static struct spi_driver nec_8048_spi_driver = {
+	.probe		= nec_8048_spi_probe,
+	.remove		= __devexit_p(nec_8048_spi_remove),
+	.suspend	= nec_8048_spi_suspend,
+	.resume		= nec_8048_spi_resume,
+	.driver		= {
+		.name	= "nec_8048_spi",
+		.bus	= &spi_bus_type,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init nec_8048_lcd_init(void)
+{
+	return spi_register_driver(&nec_8048_spi_driver);
+}
+
+static void __exit nec_8048_lcd_exit(void)
+{
+	return spi_unregister_driver(&nec_8048_spi_driver);
+}
+
+module_init(nec_8048_lcd_init);
+module_exit(nec_8048_lcd_exit);
+MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
+MODULE_DESCRIPTION("NEC-nl8048hl11-01b Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
deleted file mode 100644
index 0c6896c..0000000
--- a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * LCD panel driver for Sharp LQ043T1DG01
- *
- * Copyright (C) 2009 Texas Instruments Inc
- * Author: Vaibhav Hiremath <hvaibhav@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/err.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings sharp_lq_timings = {
-	.x_res = 480,
-	.y_res = 272,
-
-	.pixel_clock	= 9000,
-
-	.hsw		= 42,
-	.hfp		= 3,
-	.hbp		= 2,
-
-	.vsw		= 11,
-	.vfp		= 3,
-	.vbp		= 2,
-};
-
-static int sharp_lq_panel_power_on(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
-		return 0;
-
-	r = omapdss_dpi_display_enable(dssdev);
-	if (r)
-		goto err0;
-
-	/* wait couple of vsyncs until enabling the LCD */
-	msleep(50);
-
-	if (dssdev->platform_enable) {
-		r = dssdev->platform_enable(dssdev);
-		if (r)
-			goto err1;
-	}
-
-	return 0;
-err1:
-	omapdss_dpi_display_disable(dssdev);
-err0:
-	return r;
-}
-
-static void sharp_lq_panel_power_off(struct omap_dss_device *dssdev)
-{
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-		return;
-
-	if (dssdev->platform_disable)
-		dssdev->platform_disable(dssdev);
-
-	/* wait at least 5 vsyncs after disabling the LCD */
-	msleep(100);
-
-	omapdss_dpi_display_disable(dssdev);
-}
-
-static int sharp_lq_panel_probe(struct omap_dss_device *dssdev)
-{
-
-	dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
-		OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO;
-	dssdev->panel.acb = 0x0;
-	dssdev->panel.timings = sharp_lq_timings;
-
-	return 0;
-}
-
-static void sharp_lq_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int sharp_lq_panel_enable(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = sharp_lq_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void sharp_lq_panel_disable(struct omap_dss_device *dssdev)
-{
-	sharp_lq_panel_power_off(dssdev);
-
-	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int sharp_lq_panel_suspend(struct omap_dss_device *dssdev)
-{
-	sharp_lq_panel_power_off(dssdev);
-	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-	return 0;
-}
-
-static int sharp_lq_panel_resume(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = sharp_lq_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static struct omap_dss_driver sharp_lq_driver = {
-	.probe		= sharp_lq_panel_probe,
-	.remove		= sharp_lq_panel_remove,
-
-	.enable		= sharp_lq_panel_enable,
-	.disable	= sharp_lq_panel_disable,
-	.suspend	= sharp_lq_panel_suspend,
-	.resume		= sharp_lq_panel_resume,
-
-	.driver         = {
-		.name   = "sharp_lq_panel",
-		.owner  = THIS_MODULE,
-	},
-};
-
-static int __init sharp_lq_panel_drv_init(void)
-{
-	return omap_dss_register_driver(&sharp_lq_driver);
-}
-
-static void __exit sharp_lq_panel_drv_exit(void)
-{
-	omap_dss_unregister_driver(&sharp_lq_driver);
-}
-
-module_init(sharp_lq_panel_drv_init);
-module_exit(sharp_lq_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index e1c765d..61026f9 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -465,7 +465,7 @@
 	return 0;
 }
 
-static struct backlight_ops taal_bl_ops = {
+static const struct backlight_ops taal_bl_ops = {
 	.get_brightness = taal_bl_get_intensity,
 	.update_status  = taal_bl_update_status,
 };
diff --git a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
deleted file mode 100644
index 526e906..0000000
--- a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * LCD panel driver for Toppoly TDO35S
- *
- * Copyright (C) 2009 CompuLab, Ltd.
- * Author: Mike Rapoport <mike@compulab.co.il>
- *
- * Based on generic panel support
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-
-#include <plat/display.h>
-
-static struct omap_video_timings toppoly_tdo_panel_timings = {
-	/* 640 x 480 @ 60 Hz  Reduced blanking VESA CVT 0.31M3-R */
-	.x_res		= 480,
-	.y_res		= 640,
-
-	.pixel_clock	= 26000,
-
-	.hfp		= 104,
-	.hsw		= 8,
-	.hbp		= 8,
-
-	.vfp		= 4,
-	.vsw		= 2,
-	.vbp		= 2,
-};
-
-static int toppoly_tdo_panel_power_on(struct omap_dss_device *dssdev)
-{
-	int r;
-
-	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
-		return 0;
-
-	r = omapdss_dpi_display_enable(dssdev);
-	if (r)
-		goto err0;
-
-	if (dssdev->platform_enable) {
-		r = dssdev->platform_enable(dssdev);
-		if (r)
-			goto err1;
-	}
-
-	return 0;
-err1:
-	omapdss_dpi_display_disable(dssdev);
-err0:
-	return r;
-}
-
-static void toppoly_tdo_panel_power_off(struct omap_dss_device *dssdev)
-{
-	if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
-		return;
-
-	if (dssdev->platform_disable)
-		dssdev->platform_disable(dssdev);
-
-	omapdss_dpi_display_disable(dssdev);
-}
-
-static int toppoly_tdo_panel_probe(struct omap_dss_device *dssdev)
-{
-	dssdev->panel.config = OMAP_DSS_LCD_TFT |
-			       OMAP_DSS_LCD_IVS |
-			       OMAP_DSS_LCD_IHS |
-			       OMAP_DSS_LCD_IPC |
-			       OMAP_DSS_LCD_ONOFF;
-
-	dssdev->panel.timings = toppoly_tdo_panel_timings;
-
-	return 0;
-}
-
-static void toppoly_tdo_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int toppoly_tdo_panel_enable(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = toppoly_tdo_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static void toppoly_tdo_panel_disable(struct omap_dss_device *dssdev)
-{
-	toppoly_tdo_panel_power_off(dssdev);
-
-	dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int toppoly_tdo_panel_suspend(struct omap_dss_device *dssdev)
-{
-	toppoly_tdo_panel_power_off(dssdev);
-	dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-	return 0;
-}
-
-static int toppoly_tdo_panel_resume(struct omap_dss_device *dssdev)
-{
-	int r = 0;
-
-	r = toppoly_tdo_panel_power_on(dssdev);
-	if (r)
-		return r;
-
-	dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-	return 0;
-}
-
-static struct omap_dss_driver generic_driver = {
-	.probe		= toppoly_tdo_panel_probe,
-	.remove		= toppoly_tdo_panel_remove,
-
-	.enable		= toppoly_tdo_panel_enable,
-	.disable	= toppoly_tdo_panel_disable,
-	.suspend	= toppoly_tdo_panel_suspend,
-	.resume		= toppoly_tdo_panel_resume,
-
-	.driver         = {
-		.name   = "toppoly_tdo35s_panel",
-		.owner  = THIS_MODULE,
-	},
-};
-
-static int __init toppoly_tdo_panel_drv_init(void)
-{
-	return omap_dss_register_driver(&generic_driver);
-}
-
-static void __exit toppoly_tdo_panel_drv_exit(void)
-{
-	omap_dss_unregister_driver(&generic_driver);
-}
-
-module_init(toppoly_tdo_panel_drv_init);
-module_exit(toppoly_tdo_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index fa40fa5..9f8c69f 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -44,34 +44,40 @@
 /* DISPC */
 #define DISPC_BASE			0x48050400
 
-#define DISPC_SZ_REGS			SZ_1K
+#define DISPC_SZ_REGS			SZ_4K
 
 struct dispc_reg { u16 idx; };
 
 #define DISPC_REG(idx)			((const struct dispc_reg) { idx })
 
-/* DISPC common */
+/*
+ * DISPC common registers and
+ * DISPC channel registers , ch = 0 for LCD, ch = 1 for
+ * DIGIT, and ch = 2 for LCD2
+ */
 #define DISPC_REVISION			DISPC_REG(0x0000)
 #define DISPC_SYSCONFIG			DISPC_REG(0x0010)
 #define DISPC_SYSSTATUS			DISPC_REG(0x0014)
 #define DISPC_IRQSTATUS			DISPC_REG(0x0018)
 #define DISPC_IRQENABLE			DISPC_REG(0x001C)
 #define DISPC_CONTROL			DISPC_REG(0x0040)
+#define DISPC_CONTROL2			DISPC_REG(0x0238)
 #define DISPC_CONFIG			DISPC_REG(0x0044)
+#define DISPC_CONFIG2			DISPC_REG(0x0620)
 #define DISPC_CAPABLE			DISPC_REG(0x0048)
-#define DISPC_DEFAULT_COLOR0		DISPC_REG(0x004C)
-#define DISPC_DEFAULT_COLOR1		DISPC_REG(0x0050)
-#define DISPC_TRANS_COLOR0		DISPC_REG(0x0054)
-#define DISPC_TRANS_COLOR1		DISPC_REG(0x0058)
+#define DISPC_DEFAULT_COLOR(ch)		DISPC_REG(ch == 0 ? 0x004C : \
+					(ch == 1 ? 0x0050 : 0x03AC))
+#define DISPC_TRANS_COLOR(ch)		DISPC_REG(ch == 0 ? 0x0054 : \
+					(ch == 1 ? 0x0058 : 0x03B0))
 #define DISPC_LINE_STATUS		DISPC_REG(0x005C)
 #define DISPC_LINE_NUMBER		DISPC_REG(0x0060)
-#define DISPC_TIMING_H			DISPC_REG(0x0064)
-#define DISPC_TIMING_V			DISPC_REG(0x0068)
-#define DISPC_POL_FREQ			DISPC_REG(0x006C)
-#define DISPC_DIVISOR			DISPC_REG(0x0070)
+#define DISPC_TIMING_H(ch)		DISPC_REG(ch != 2 ? 0x0064 : 0x0400)
+#define DISPC_TIMING_V(ch)		DISPC_REG(ch != 2 ? 0x0068 : 0x0404)
+#define DISPC_POL_FREQ(ch)		DISPC_REG(ch != 2 ? 0x006C : 0x0408)
+#define DISPC_DIVISOR(ch)		DISPC_REG(ch != 2 ? 0x0070 : 0x040C)
 #define DISPC_GLOBAL_ALPHA		DISPC_REG(0x0074)
 #define DISPC_SIZE_DIG			DISPC_REG(0x0078)
-#define DISPC_SIZE_LCD			DISPC_REG(0x007C)
+#define DISPC_SIZE_LCD(ch)		DISPC_REG(ch != 2 ? 0x007C : 0x03CC)
 
 /* DISPC GFX plane */
 #define DISPC_GFX_BA0			DISPC_REG(0x0080)
@@ -86,13 +92,12 @@
 #define DISPC_GFX_WINDOW_SKIP		DISPC_REG(0x00B4)
 #define DISPC_GFX_TABLE_BA		DISPC_REG(0x00B8)
 
-#define DISPC_DATA_CYCLE1		DISPC_REG(0x01D4)
-#define DISPC_DATA_CYCLE2		DISPC_REG(0x01D8)
-#define DISPC_DATA_CYCLE3		DISPC_REG(0x01DC)
-
-#define DISPC_CPR_COEF_R		DISPC_REG(0x0220)
-#define DISPC_CPR_COEF_G		DISPC_REG(0x0224)
-#define DISPC_CPR_COEF_B		DISPC_REG(0x0228)
+#define DISPC_DATA_CYCLE1(ch)		DISPC_REG(ch != 2 ? 0x01D4 : 0x03C0)
+#define DISPC_DATA_CYCLE2(ch)		DISPC_REG(ch != 2 ? 0x01D8 : 0x03C4)
+#define DISPC_DATA_CYCLE3(ch)		DISPC_REG(ch != 2 ? 0x01DC : 0x03C8)
+#define DISPC_CPR_COEF_R(ch)		DISPC_REG(ch != 2 ? 0x0220 : 0x03BC)
+#define DISPC_CPR_COEF_G(ch)		DISPC_REG(ch != 2 ? 0x0224 : 0x03B8)
+#define DISPC_CPR_COEF_B(ch)		DISPC_REG(ch != 2 ? 0x0228 : 0x03B4)
 
 #define DISPC_GFX_PRELOAD		DISPC_REG(0x022C)
 
@@ -217,18 +222,29 @@
 	SR(IRQENABLE);
 	SR(CONTROL);
 	SR(CONFIG);
-	SR(DEFAULT_COLOR0);
-	SR(DEFAULT_COLOR1);
-	SR(TRANS_COLOR0);
-	SR(TRANS_COLOR1);
+	SR(DEFAULT_COLOR(0));
+	SR(DEFAULT_COLOR(1));
+	SR(TRANS_COLOR(0));
+	SR(TRANS_COLOR(1));
 	SR(LINE_NUMBER);
-	SR(TIMING_H);
-	SR(TIMING_V);
-	SR(POL_FREQ);
-	SR(DIVISOR);
+	SR(TIMING_H(0));
+	SR(TIMING_V(0));
+	SR(POL_FREQ(0));
+	SR(DIVISOR(0));
 	SR(GLOBAL_ALPHA);
 	SR(SIZE_DIG);
-	SR(SIZE_LCD);
+	SR(SIZE_LCD(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		SR(CONTROL2);
+		SR(DEFAULT_COLOR(2));
+		SR(TRANS_COLOR(2));
+		SR(SIZE_LCD(2));
+		SR(TIMING_H(2));
+		SR(TIMING_V(2));
+		SR(POL_FREQ(2));
+		SR(DIVISOR(2));
+		SR(CONFIG2);
+	}
 
 	SR(GFX_BA0);
 	SR(GFX_BA1);
@@ -241,13 +257,22 @@
 	SR(GFX_WINDOW_SKIP);
 	SR(GFX_TABLE_BA);
 
-	SR(DATA_CYCLE1);
-	SR(DATA_CYCLE2);
-	SR(DATA_CYCLE3);
+	SR(DATA_CYCLE1(0));
+	SR(DATA_CYCLE2(0));
+	SR(DATA_CYCLE3(0));
 
-	SR(CPR_COEF_R);
-	SR(CPR_COEF_G);
-	SR(CPR_COEF_B);
+	SR(CPR_COEF_R(0));
+	SR(CPR_COEF_G(0));
+	SR(CPR_COEF_B(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		SR(CPR_COEF_B(2));
+		SR(CPR_COEF_G(2));
+		SR(CPR_COEF_R(2));
+
+		SR(DATA_CYCLE1(2));
+		SR(DATA_CYCLE2(2));
+		SR(DATA_CYCLE3(2));
+	}
 
 	SR(GFX_PRELOAD);
 
@@ -356,18 +381,28 @@
 	/*RR(IRQENABLE);*/
 	/*RR(CONTROL);*/
 	RR(CONFIG);
-	RR(DEFAULT_COLOR0);
-	RR(DEFAULT_COLOR1);
-	RR(TRANS_COLOR0);
-	RR(TRANS_COLOR1);
+	RR(DEFAULT_COLOR(0));
+	RR(DEFAULT_COLOR(1));
+	RR(TRANS_COLOR(0));
+	RR(TRANS_COLOR(1));
 	RR(LINE_NUMBER);
-	RR(TIMING_H);
-	RR(TIMING_V);
-	RR(POL_FREQ);
-	RR(DIVISOR);
+	RR(TIMING_H(0));
+	RR(TIMING_V(0));
+	RR(POL_FREQ(0));
+	RR(DIVISOR(0));
 	RR(GLOBAL_ALPHA);
 	RR(SIZE_DIG);
-	RR(SIZE_LCD);
+	RR(SIZE_LCD(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		RR(DEFAULT_COLOR(2));
+		RR(TRANS_COLOR(2));
+		RR(SIZE_LCD(2));
+		RR(TIMING_H(2));
+		RR(TIMING_V(2));
+		RR(POL_FREQ(2));
+		RR(DIVISOR(2));
+		RR(CONFIG2);
+	}
 
 	RR(GFX_BA0);
 	RR(GFX_BA1);
@@ -380,13 +415,22 @@
 	RR(GFX_WINDOW_SKIP);
 	RR(GFX_TABLE_BA);
 
-	RR(DATA_CYCLE1);
-	RR(DATA_CYCLE2);
-	RR(DATA_CYCLE3);
+	RR(DATA_CYCLE1(0));
+	RR(DATA_CYCLE2(0));
+	RR(DATA_CYCLE3(0));
 
-	RR(CPR_COEF_R);
-	RR(CPR_COEF_G);
-	RR(CPR_COEF_B);
+	RR(CPR_COEF_R(0));
+	RR(CPR_COEF_G(0));
+	RR(CPR_COEF_B(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		RR(DATA_CYCLE1(2));
+		RR(DATA_CYCLE2(2));
+		RR(DATA_CYCLE3(2));
+
+		RR(CPR_COEF_B(2));
+		RR(CPR_COEF_G(2));
+		RR(CPR_COEF_R(2));
+	}
 
 	RR(GFX_PRELOAD);
 
@@ -490,7 +534,8 @@
 
 	/* enable last, because LCD & DIGIT enable are here */
 	RR(CONTROL);
-
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		RR(CONTROL2);
 	/* clear spurious SYNC_LOST_DIGIT interrupts */
 	dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
 
@@ -516,42 +561,63 @@
 {
 	int bit;
 
-	if (channel == OMAP_DSS_CHANNEL_LCD)
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
 		bit = 5; /* GOLCD */
 	else
 		bit = 6; /* GODIGIT */
 
-	return REG_GET(DISPC_CONTROL, bit, bit) == 1;
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		return REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+	else
+		return REG_GET(DISPC_CONTROL, bit, bit) == 1;
 }
 
 void dispc_go(enum omap_channel channel)
 {
 	int bit;
+	bool enable_bit, go_bit;
 
 	enable_clocks(1);
 
-	if (channel == OMAP_DSS_CHANNEL_LCD)
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
 		bit = 0; /* LCDENABLE */
 	else
 		bit = 1; /* DIGITALENABLE */
 
 	/* if the channel is not enabled, we don't need GO */
-	if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+	else
+		enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+
+	if (!enable_bit)
 		goto end;
 
-	if (channel == OMAP_DSS_CHANNEL_LCD)
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
 		bit = 5; /* GOLCD */
 	else
 		bit = 6; /* GODIGIT */
 
-	if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
+	else
+		go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+
+	if (go_bit) {
 		DSSERR("GO bit not down for channel %d\n", channel);
 		goto end;
 	}
 
-	DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT");
+	DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
+		(channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT"));
 
-	REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
 end:
 	enable_clocks(0);
 }
@@ -773,13 +839,26 @@
 	dispc_write_reg(vsi_reg[plane-1], val);
 }
 
+static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable)
+{
+	if (!dss_has_feature(FEAT_PRE_MULT_ALPHA))
+		return;
+
+	if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+		plane == OMAP_DSS_VIDEO1)
+		return;
+
+	REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 28, 28);
+}
+
 static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
 {
 	if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
 		return;
 
-	BUG_ON(!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
-			plane == OMAP_DSS_VIDEO1);
+	if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+		plane == OMAP_DSS_VIDEO1)
+		return;
 
 	if (plane == OMAP_DSS_GFX)
 		REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
@@ -851,6 +930,7 @@
 {
 	int shift;
 	u32 val;
+	int chan = 0, chan2 = 0;
 
 	switch (plane) {
 	case OMAP_DSS_GFX:
@@ -866,7 +946,29 @@
 	}
 
 	val = dispc_read_reg(dispc_reg_att[plane]);
-	val = FLD_MOD(val, channel, shift, shift);
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		switch (channel) {
+		case OMAP_DSS_CHANNEL_LCD:
+			chan = 0;
+			chan2 = 0;
+			break;
+		case OMAP_DSS_CHANNEL_DIGIT:
+			chan = 1;
+			chan2 = 0;
+			break;
+		case OMAP_DSS_CHANNEL_LCD2:
+			chan = 0;
+			chan2 = 1;
+			break;
+		default:
+			BUG();
+		}
+
+		val = FLD_MOD(val, chan, shift, shift);
+		val = FLD_MOD(val, chan2, 31, 30);
+	} else {
+		val = FLD_MOD(val, channel, shift, shift);
+	}
 	dispc_write_reg(dispc_reg_att[plane], val);
 }
 
@@ -923,13 +1025,13 @@
 	enable_clocks(0);
 }
 
-void dispc_set_lcd_size(u16 width, u16 height)
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
 {
 	u32 val;
 	BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
 	val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
 	enable_clocks(1);
-	dispc_write_reg(DISPC_SIZE_LCD, val);
+	dispc_write_reg(DISPC_SIZE_LCD(channel), val);
 	enable_clocks(0);
 }
 
@@ -1426,12 +1528,13 @@
 	}
 }
 
-static unsigned long calc_fclk_five_taps(u16 width, u16 height,
-		u16 out_width, u16 out_height, enum omap_color_mode color_mode)
+static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+		u16 height, u16 out_width, u16 out_height,
+		enum omap_color_mode color_mode)
 {
 	u32 fclk = 0;
 	/* FIXME venc pclk? */
-	u64 tmp, pclk = dispc_pclk_rate();
+	u64 tmp, pclk = dispc_pclk_rate(channel);
 
 	if (height > out_height) {
 		/* FIXME get real display PPL */
@@ -1463,8 +1566,8 @@
 	return fclk;
 }
 
-static unsigned long calc_fclk(u16 width, u16 height,
-		u16 out_width, u16 out_height)
+static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+		u16 height, u16 out_width, u16 out_height)
 {
 	unsigned int hf, vf;
 
@@ -1488,7 +1591,7 @@
 		vf = 1;
 
 	/* FIXME venc pclk? */
-	return dispc_pclk_rate() * vf * hf;
+	return dispc_pclk_rate(channel) * vf * hf;
 }
 
 void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
@@ -1507,7 +1610,8 @@
 		bool ilace,
 		enum omap_dss_rotation_type rotation_type,
 		u8 rotation, int mirror,
-		u8 global_alpha)
+		u8 global_alpha, u8 pre_mult_alpha,
+		enum omap_channel channel)
 {
 	const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
 	bool five_taps = 0;
@@ -1536,29 +1640,12 @@
 				height, pos_y, out_height);
 	}
 
+	if (!dss_feat_color_mode_supported(plane, color_mode))
+		return -EINVAL;
+
 	if (plane == OMAP_DSS_GFX) {
 		if (width != out_width || height != out_height)
 			return -EINVAL;
-
-		switch (color_mode) {
-		case OMAP_DSS_COLOR_ARGB16:
-		case OMAP_DSS_COLOR_ARGB32:
-		case OMAP_DSS_COLOR_RGBA32:
-			if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
-				return -EINVAL;
-		case OMAP_DSS_COLOR_RGBX32:
-			if (cpu_is_omap24xx())
-				return -EINVAL;
-			/* fall through */
-		case OMAP_DSS_COLOR_RGB12U:
-		case OMAP_DSS_COLOR_RGB16:
-		case OMAP_DSS_COLOR_RGB24P:
-		case OMAP_DSS_COLOR_RGB24U:
-			break;
-
-		default:
-			return -EINVAL;
-		}
 	} else {
 		/* video plane */
 
@@ -1572,42 +1659,16 @@
 		   out_height > height * 8)
 			return -EINVAL;
 
-		switch (color_mode) {
-		case OMAP_DSS_COLOR_RGBX32:
-		case OMAP_DSS_COLOR_RGB12U:
-			if (cpu_is_omap24xx())
-				return -EINVAL;
-			/* fall through */
-		case OMAP_DSS_COLOR_RGB16:
-		case OMAP_DSS_COLOR_RGB24P:
-		case OMAP_DSS_COLOR_RGB24U:
-			break;
-
-		case OMAP_DSS_COLOR_ARGB16:
-		case OMAP_DSS_COLOR_ARGB32:
-		case OMAP_DSS_COLOR_RGBA32:
-			if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
-				return -EINVAL;
-			if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
-					plane == OMAP_DSS_VIDEO1)
-				return -EINVAL;
-			break;
-
-		case OMAP_DSS_COLOR_YUV2:
-		case OMAP_DSS_COLOR_UYVY:
+		if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+			color_mode == OMAP_DSS_COLOR_UYVY)
 			cconv = 1;
-			break;
-
-		default:
-			return -EINVAL;
-		}
 
 		/* Must use 5-tap filter? */
 		five_taps = height > out_height * 2;
 
 		if (!five_taps) {
-			fclk = calc_fclk(width, height,
-					out_width, out_height);
+			fclk = calc_fclk(channel, width, height, out_width,
+					out_height);
 
 			/* Try 5-tap filter if 3-tap fclk is too high */
 			if (cpu_is_omap34xx() && height > out_height &&
@@ -1621,7 +1682,7 @@
 		}
 
 		if (five_taps)
-			fclk = calc_fclk_five_taps(width, height,
+			fclk = calc_fclk_five_taps(channel, width, height,
 					out_width, out_height, color_mode);
 
 		DSSDBG("required fclk rate = %lu Hz\n", fclk);
@@ -1693,8 +1754,8 @@
 
 	_dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
 
-	if (plane != OMAP_DSS_VIDEO1)
-		_dispc_setup_global_alpha(plane, global_alpha);
+	_dispc_set_pre_mult_alpha(plane, pre_mult_alpha);
+	_dispc_setup_global_alpha(plane, global_alpha);
 
 	return 0;
 }
@@ -1710,36 +1771,44 @@
 	complete(compl);
 }
 
-static void _enable_lcd_out(bool enable)
+static void _enable_lcd_out(enum omap_channel channel, bool enable)
 {
-	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
 }
 
-static void dispc_enable_lcd_out(bool enable)
+static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
 {
 	struct completion frame_done_completion;
 	bool is_on;
 	int r;
+	u32 irq;
 
 	enable_clocks(1);
 
 	/* When we disable LCD output, we need to wait until frame is done.
 	 * Otherwise the DSS is still working, and turning off the clocks
 	 * prevents DSS from going to OFF mode */
-	is_on = REG_GET(DISPC_CONTROL, 0, 0);
+	is_on = channel == OMAP_DSS_CHANNEL_LCD2 ?
+			REG_GET(DISPC_CONTROL2, 0, 0) :
+			REG_GET(DISPC_CONTROL, 0, 0);
+
+	irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 :
+			DISPC_IRQ_FRAMEDONE;
 
 	if (!enable && is_on) {
 		init_completion(&frame_done_completion);
 
 		r = omap_dispc_register_isr(dispc_disable_isr,
-				&frame_done_completion,
-				DISPC_IRQ_FRAMEDONE);
+				&frame_done_completion, irq);
 
 		if (r)
 			DSSERR("failed to register FRAMEDONE isr\n");
 	}
 
-	_enable_lcd_out(enable);
+	_enable_lcd_out(channel, enable);
 
 	if (!enable && is_on) {
 		if (!wait_for_completion_timeout(&frame_done_completion,
@@ -1747,8 +1816,7 @@
 			DSSERR("timeout waiting for FRAME DONE\n");
 
 		r = omap_dispc_unregister_isr(dispc_disable_isr,
-				&frame_done_completion,
-				DISPC_IRQ_FRAMEDONE);
+				&frame_done_completion, irq);
 
 		if (r)
 			DSSERR("failed to unregister FRAMEDONE isr\n");
@@ -1818,6 +1886,8 @@
 		unsigned long flags;
 		spin_lock_irqsave(&dispc.irq_lock, flags);
 		dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+		if (dss_has_feature(FEAT_MGR_LCD2))
+			dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
 		dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
 		_omap_dispc_set_irqs();
 		spin_unlock_irqrestore(&dispc.irq_lock, flags);
@@ -1832,14 +1902,17 @@
 		return !!REG_GET(DISPC_CONTROL, 0, 0);
 	else if (channel == OMAP_DSS_CHANNEL_DIGIT)
 		return !!REG_GET(DISPC_CONTROL, 1, 1);
+	else if (channel == OMAP_DSS_CHANNEL_LCD2)
+		return !!REG_GET(DISPC_CONTROL2, 0, 0);
 	else
 		BUG();
 }
 
 void dispc_enable_channel(enum omap_channel channel, bool enable)
 {
-	if (channel == OMAP_DSS_CHANNEL_LCD)
-		dispc_enable_lcd_out(enable);
+	if (channel == OMAP_DSS_CHANNEL_LCD ||
+			channel == OMAP_DSS_CHANNEL_LCD2)
+		dispc_enable_lcd_out(channel, enable);
 	else if (channel == OMAP_DSS_CHANNEL_DIGIT)
 		dispc_enable_digit_out(enable);
 	else
@@ -1848,6 +1921,9 @@
 
 void dispc_lcd_enable_signal_polarity(bool act_high)
 {
+	if (!dss_has_feature(FEAT_LCDENABLEPOL))
+		return;
+
 	enable_clocks(1);
 	REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
 	enable_clocks(0);
@@ -1855,6 +1931,9 @@
 
 void dispc_lcd_enable_signal(bool enable)
 {
+	if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
+		return;
+
 	enable_clocks(1);
 	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
 	enable_clocks(0);
@@ -1862,20 +1941,27 @@
 
 void dispc_pck_free_enable(bool enable)
 {
+	if (!dss_has_feature(FEAT_PCKFREEENABLE))
+		return;
+
 	enable_clocks(1);
 	REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
 	enable_clocks(0);
 }
 
-void dispc_enable_fifohandcheck(bool enable)
+void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
 {
 	enable_clocks(1);
-	REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
+	else
+		REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
 	enable_clocks(0);
 }
 
 
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
+void dispc_set_lcd_display_type(enum omap_channel channel,
+		enum omap_lcd_display_type type)
 {
 	int mode;
 
@@ -1894,7 +1980,10 @@
 	}
 
 	enable_clocks(1);
-	REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
 	enable_clocks(0);
 }
 
@@ -1908,25 +1997,21 @@
 
 void dispc_set_default_color(enum omap_channel channel, u32 color)
 {
-	const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
-				DISPC_DEFAULT_COLOR1 };
-
 	enable_clocks(1);
-	dispc_write_reg(def_reg[channel], color);
+	dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
 	enable_clocks(0);
 }
 
 u32 dispc_get_default_color(enum omap_channel channel)
 {
-	const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
-				DISPC_DEFAULT_COLOR1 };
 	u32 l;
 
 	BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
-	       channel != OMAP_DSS_CHANNEL_LCD);
+		channel != OMAP_DSS_CHANNEL_LCD &&
+		channel != OMAP_DSS_CHANNEL_LCD2);
 
 	enable_clocks(1);
-	l = dispc_read_reg(def_reg[channel]);
+	l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
 	enable_clocks(0);
 
 	return l;
@@ -1936,16 +2021,15 @@
 		enum omap_dss_trans_key_type type,
 		u32 trans_key)
 {
-	const struct dispc_reg tr_reg[] = {
-		DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
-
 	enable_clocks(1);
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
-	else /* OMAP_DSS_CHANNEL_DIGIT */
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
+	else /* OMAP_DSS_CHANNEL_LCD2 */
+		REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
 
-	dispc_write_reg(tr_reg[ch], trans_key);
+	dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
 	enable_clocks(0);
 }
 
@@ -1953,21 +2037,20 @@
 		enum omap_dss_trans_key_type *type,
 		u32 *trans_key)
 {
-	const struct dispc_reg tr_reg[] = {
-		DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
-
 	enable_clocks(1);
 	if (type) {
 		if (ch == OMAP_DSS_CHANNEL_LCD)
 			*type = REG_GET(DISPC_CONFIG, 11, 11);
 		else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 			*type = REG_GET(DISPC_CONFIG, 13, 13);
+		else if (ch == OMAP_DSS_CHANNEL_LCD2)
+			*type = REG_GET(DISPC_CONFIG2, 11, 11);
 		else
 			BUG();
 	}
 
 	if (trans_key)
-		*trans_key = dispc_read_reg(tr_reg[ch]);
+		*trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
 	enable_clocks(0);
 }
 
@@ -1976,8 +2059,10 @@
 	enable_clocks(1);
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
-	else /* OMAP_DSS_CHANNEL_DIGIT */
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
+	else /* OMAP_DSS_CHANNEL_LCD2 */
+		REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
 	enable_clocks(0);
 }
 void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
@@ -1988,8 +2073,10 @@
 	enable_clocks(1);
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
-	else /* OMAP_DSS_CHANNEL_DIGIT */
+	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
+	else /* OMAP_DSS_CHANNEL_LCD2 */
+		REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
 	enable_clocks(0);
 }
 bool dispc_alpha_blending_enabled(enum omap_channel ch)
@@ -2003,13 +2090,14 @@
 	if (ch == OMAP_DSS_CHANNEL_LCD)
 		enabled = REG_GET(DISPC_CONFIG, 18, 18);
 	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
-		enabled = REG_GET(DISPC_CONFIG, 18, 18);
+		enabled = REG_GET(DISPC_CONFIG, 19, 19);
+	else if (ch == OMAP_DSS_CHANNEL_LCD2)
+		enabled = REG_GET(DISPC_CONFIG2, 18, 18);
 	else
 		BUG();
 	enable_clocks(0);
 
 	return enabled;
-
 }
 
 
@@ -2022,6 +2110,8 @@
 		enabled = REG_GET(DISPC_CONFIG, 10, 10);
 	else if (ch == OMAP_DSS_CHANNEL_DIGIT)
 		enabled = REG_GET(DISPC_CONFIG, 12, 12);
+	else if (ch == OMAP_DSS_CHANNEL_LCD2)
+		enabled = REG_GET(DISPC_CONFIG2, 10, 10);
 	else
 		BUG();
 	enable_clocks(0);
@@ -2030,7 +2120,7 @@
 }
 
 
-void dispc_set_tft_data_lines(u8 data_lines)
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
 {
 	int code;
 
@@ -2053,11 +2143,15 @@
 	}
 
 	enable_clocks(1);
-	REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
+	if (channel == OMAP_DSS_CHANNEL_LCD2)
+		REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
+	else
+		REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
 	enable_clocks(0);
 }
 
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+		enum omap_parallel_interface_mode mode)
 {
 	u32 l;
 	int stallmode;
@@ -2087,13 +2181,17 @@
 
 	enable_clocks(1);
 
-	l = dispc_read_reg(DISPC_CONTROL);
-
-	l = FLD_MOD(l, stallmode, 11, 11);
-	l = FLD_MOD(l, gpout0, 15, 15);
-	l = FLD_MOD(l, gpout1, 16, 16);
-
-	dispc_write_reg(DISPC_CONTROL, l);
+	if (channel == OMAP_DSS_CHANNEL_LCD2) {
+		l = dispc_read_reg(DISPC_CONTROL2);
+		l = FLD_MOD(l, stallmode, 11, 11);
+		dispc_write_reg(DISPC_CONTROL2, l);
+	} else {
+		l = dispc_read_reg(DISPC_CONTROL);
+		l = FLD_MOD(l, stallmode, 11, 11);
+		l = FLD_MOD(l, gpout0, 15, 15);
+		l = FLD_MOD(l, gpout1, 16, 16);
+		dispc_write_reg(DISPC_CONTROL, l);
+	}
 
 	enable_clocks(0);
 }
@@ -2129,8 +2227,8 @@
 			timings->vfp, timings->vbp);
 }
 
-static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
-				   int vsw, int vfp, int vbp)
+static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
+		int hfp, int hbp, int vsw, int vfp, int vbp)
 {
 	u32 timing_h, timing_v;
 
@@ -2149,13 +2247,14 @@
 	}
 
 	enable_clocks(1);
-	dispc_write_reg(DISPC_TIMING_H, timing_h);
-	dispc_write_reg(DISPC_TIMING_V, timing_v);
+	dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
+	dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
 	enable_clocks(0);
 }
 
 /* change name to mode? */
-void dispc_set_lcd_timings(struct omap_video_timings *timings)
+void dispc_set_lcd_timings(enum omap_channel channel,
+		struct omap_video_timings *timings)
 {
 	unsigned xtot, ytot;
 	unsigned long ht, vt;
@@ -2165,10 +2264,11 @@
 				timings->vfp, timings->vbp))
 		BUG();
 
-	_dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp,
-			timings->vsw, timings->vfp, timings->vbp);
+	_dispc_set_lcd_timings(channel, timings->hsw, timings->hfp,
+			timings->hbp, timings->vsw, timings->vfp,
+			timings->vbp);
 
-	dispc_set_lcd_size(timings->x_res, timings->y_res);
+	dispc_set_lcd_size(channel, timings->x_res, timings->y_res);
 
 	xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
 	ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
@@ -2176,7 +2276,8 @@
 	ht = (timings->pixel_clock * 1000) / xtot;
 	vt = (timings->pixel_clock * 1000) / xtot / ytot;
 
-	DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res);
+	DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
+			timings->y_res);
 	DSSDBG("pck %u\n", timings->pixel_clock);
 	DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
 			timings->hsw, timings->hfp, timings->hbp,
@@ -2185,21 +2286,23 @@
 	DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
 }
 
-static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div)
+static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
+		u16 pck_div)
 {
 	BUG_ON(lck_div < 1);
 	BUG_ON(pck_div < 2);
 
 	enable_clocks(1);
-	dispc_write_reg(DISPC_DIVISOR,
+	dispc_write_reg(DISPC_DIVISOR(channel),
 			FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
 	enable_clocks(0);
 }
 
-static void dispc_get_lcd_divisor(int *lck_div, int *pck_div)
+static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
+		int *pck_div)
 {
 	u32 l;
-	l = dispc_read_reg(DISPC_DIVISOR);
+	l = dispc_read_reg(DISPC_DIVISOR(channel));
 	*lck_div = FLD_GET(l, 23, 16);
 	*pck_div = FLD_GET(l, 7, 0);
 }
@@ -2219,13 +2322,13 @@
 	return r;
 }
 
-unsigned long dispc_lclk_rate(void)
+unsigned long dispc_lclk_rate(enum omap_channel channel)
 {
 	int lcd;
 	unsigned long r;
 	u32 l;
 
-	l = dispc_read_reg(DISPC_DIVISOR);
+	l = dispc_read_reg(DISPC_DIVISOR(channel));
 
 	lcd = FLD_GET(l, 23, 16);
 
@@ -2234,13 +2337,13 @@
 	return r / lcd;
 }
 
-unsigned long dispc_pclk_rate(void)
+unsigned long dispc_pclk_rate(enum omap_channel channel)
 {
 	int lcd, pcd;
 	unsigned long r;
 	u32 l;
 
-	l = dispc_read_reg(DISPC_DIVISOR);
+	l = dispc_read_reg(DISPC_DIVISOR(channel));
 
 	lcd = FLD_GET(l, 23, 16);
 	pcd = FLD_GET(l, 7, 0);
@@ -2256,8 +2359,6 @@
 
 	enable_clocks(1);
 
-	dispc_get_lcd_divisor(&lcd, &pcd);
-
 	seq_printf(s, "- DISPC -\n");
 
 	seq_printf(s, "dispc fclk source = %s\n",
@@ -2265,9 +2366,25 @@
 			"dss1_alwon_fclk" : "dsi1_pll_fclk");
 
 	seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
-	seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd);
-	seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd);
 
+	seq_printf(s, "- LCD1 -\n");
+
+	dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
+
+	seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+			dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
+	seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
+			dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		seq_printf(s, "- LCD2 -\n");
+
+		dispc_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
+
+		seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+				dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd);
+		seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
+				dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
+	}
 	enable_clocks(0);
 }
 
@@ -2309,6 +2426,12 @@
 	PIS(SYNC_LOST);
 	PIS(SYNC_LOST_DIGIT);
 	PIS(WAKEUP);
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		PIS(FRAMEDONE2);
+		PIS(VSYNC2);
+		PIS(ACBIAS_COUNT_STAT2);
+		PIS(SYNC_LOST2);
+	}
 #undef PIS
 }
 #endif
@@ -2327,19 +2450,30 @@
 	DUMPREG(DISPC_CONTROL);
 	DUMPREG(DISPC_CONFIG);
 	DUMPREG(DISPC_CAPABLE);
-	DUMPREG(DISPC_DEFAULT_COLOR0);
-	DUMPREG(DISPC_DEFAULT_COLOR1);
-	DUMPREG(DISPC_TRANS_COLOR0);
-	DUMPREG(DISPC_TRANS_COLOR1);
+	DUMPREG(DISPC_DEFAULT_COLOR(0));
+	DUMPREG(DISPC_DEFAULT_COLOR(1));
+	DUMPREG(DISPC_TRANS_COLOR(0));
+	DUMPREG(DISPC_TRANS_COLOR(1));
 	DUMPREG(DISPC_LINE_STATUS);
 	DUMPREG(DISPC_LINE_NUMBER);
-	DUMPREG(DISPC_TIMING_H);
-	DUMPREG(DISPC_TIMING_V);
-	DUMPREG(DISPC_POL_FREQ);
-	DUMPREG(DISPC_DIVISOR);
+	DUMPREG(DISPC_TIMING_H(0));
+	DUMPREG(DISPC_TIMING_V(0));
+	DUMPREG(DISPC_POL_FREQ(0));
+	DUMPREG(DISPC_DIVISOR(0));
 	DUMPREG(DISPC_GLOBAL_ALPHA);
 	DUMPREG(DISPC_SIZE_DIG);
-	DUMPREG(DISPC_SIZE_LCD);
+	DUMPREG(DISPC_SIZE_LCD(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		DUMPREG(DISPC_CONTROL2);
+		DUMPREG(DISPC_CONFIG2);
+		DUMPREG(DISPC_DEFAULT_COLOR(2));
+		DUMPREG(DISPC_TRANS_COLOR(2));
+		DUMPREG(DISPC_TIMING_H(2));
+		DUMPREG(DISPC_TIMING_V(2));
+		DUMPREG(DISPC_POL_FREQ(2));
+		DUMPREG(DISPC_DIVISOR(2));
+		DUMPREG(DISPC_SIZE_LCD(2));
+	}
 
 	DUMPREG(DISPC_GFX_BA0);
 	DUMPREG(DISPC_GFX_BA1);
@@ -2353,13 +2487,22 @@
 	DUMPREG(DISPC_GFX_WINDOW_SKIP);
 	DUMPREG(DISPC_GFX_TABLE_BA);
 
-	DUMPREG(DISPC_DATA_CYCLE1);
-	DUMPREG(DISPC_DATA_CYCLE2);
-	DUMPREG(DISPC_DATA_CYCLE3);
+	DUMPREG(DISPC_DATA_CYCLE1(0));
+	DUMPREG(DISPC_DATA_CYCLE2(0));
+	DUMPREG(DISPC_DATA_CYCLE3(0));
 
-	DUMPREG(DISPC_CPR_COEF_R);
-	DUMPREG(DISPC_CPR_COEF_G);
-	DUMPREG(DISPC_CPR_COEF_B);
+	DUMPREG(DISPC_CPR_COEF_R(0));
+	DUMPREG(DISPC_CPR_COEF_G(0));
+	DUMPREG(DISPC_CPR_COEF_B(0));
+	if (dss_has_feature(FEAT_MGR_LCD2)) {
+		DUMPREG(DISPC_DATA_CYCLE1(2));
+		DUMPREG(DISPC_DATA_CYCLE2(2));
+		DUMPREG(DISPC_DATA_CYCLE3(2));
+
+		DUMPREG(DISPC_CPR_COEF_R(2));
+		DUMPREG(DISPC_CPR_COEF_G(2));
+		DUMPREG(DISPC_CPR_COEF_B(2));
+	}
 
 	DUMPREG(DISPC_GFX_PRELOAD);
 
@@ -2458,8 +2601,8 @@
 #undef DUMPREG
 }
 
-static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
-				bool ihs, bool ivs, u8 acbi, u8 acb)
+static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
+		bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, u8 acb)
 {
 	u32 l = 0;
 
@@ -2476,13 +2619,14 @@
 	l |= FLD_VAL(acb, 7, 0);
 
 	enable_clocks(1);
-	dispc_write_reg(DISPC_POL_FREQ, l);
+	dispc_write_reg(DISPC_POL_FREQ(channel), l);
 	enable_clocks(0);
 }
 
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb)
+void dispc_set_pol_freq(enum omap_channel channel,
+		enum omap_panel_config config, u8 acbi, u8 acb)
 {
-	_dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0,
+	_dispc_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0,
 			(config & OMAP_DSS_LCD_RF) != 0,
 			(config & OMAP_DSS_LCD_IEO) != 0,
 			(config & OMAP_DSS_LCD_IPC) != 0,
@@ -2551,24 +2695,26 @@
 	return 0;
 }
 
-int dispc_set_clock_div(struct dispc_clock_info *cinfo)
+int dispc_set_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo)
 {
 	DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
 	DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
 
-	dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div);
+	dispc_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
 
 	return 0;
 }
 
-int dispc_get_clock_div(struct dispc_clock_info *cinfo)
+int dispc_get_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo)
 {
 	unsigned long fck;
 
 	fck = dispc_fclk_rate();
 
-	cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16);
-	cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0);
+	cinfo->lck_div = REG_GET(DISPC_DIVISOR(channel), 23, 16);
+	cinfo->pck_div = REG_GET(DISPC_DIVISOR(channel), 7, 0);
 
 	cinfo->lck = fck / cinfo->lck_div;
 	cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -2708,6 +2854,8 @@
 	PIS(VID2_FIFO_UNDERFLOW);
 	PIS(SYNC_LOST);
 	PIS(SYNC_LOST_DIGIT);
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		PIS(SYNC_LOST2);
 #undef PIS
 
 	printk("\n");
@@ -2926,6 +3074,45 @@
 		}
 	}
 
+	if (errors & DISPC_IRQ_SYNC_LOST2) {
+		struct omap_overlay_manager *manager = NULL;
+		bool enable = false;
+
+		DSSERR("SYNC_LOST for LCD2, disabling LCD2\n");
+
+		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+			struct omap_overlay_manager *mgr;
+			mgr = omap_dss_get_overlay_manager(i);
+
+			if (mgr->id == OMAP_DSS_CHANNEL_LCD2) {
+				manager = mgr;
+				enable = mgr->device->state ==
+						OMAP_DSS_DISPLAY_ACTIVE;
+				mgr->device->driver->disable(mgr->device);
+				break;
+			}
+		}
+
+		if (manager) {
+			struct omap_dss_device *dssdev = manager->device;
+			for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+				struct omap_overlay *ovl;
+				ovl = omap_dss_get_overlay(i);
+
+				if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+					continue;
+
+				if (ovl->id != 0 && ovl->manager == manager)
+					dispc_enable_plane(ovl->id, 0);
+			}
+
+			dispc_go(manager->id);
+			mdelay(50);
+			if (enable)
+				dssdev->driver->enable(dssdev);
+		}
+	}
+
 	if (errors & DISPC_IRQ_OCP_ERR) {
 		DSSERR("OCP_ERR\n");
 		for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
@@ -3033,6 +3220,8 @@
 	memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
 
 	dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
 
 	/* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
 	 * so clear it */
@@ -3065,7 +3254,8 @@
 	dispc_write_reg(DISPC_SYSCONFIG, l);
 
 	/* FUNCGATED */
-	REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+	if (dss_has_feature(FEAT_FUNCGATED))
+		REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
 
 	/* L3 firewall setting: enable access to OCM RAM */
 	/* XXX this should be somewhere in plat-omap */
@@ -3139,17 +3329,18 @@
 		       enum omap_color_mode color_mode,
 		       bool ilace,
 		       enum omap_dss_rotation_type rotation_type,
-		       u8 rotation, bool mirror, u8 global_alpha)
+		       u8 rotation, bool mirror, u8 global_alpha,
+		       u8 pre_mult_alpha, enum omap_channel channel)
 {
 	int r = 0;
 
 	DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
-	       "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n",
+	       "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
 	       plane, paddr, screen_width, pos_x, pos_y,
 	       width, height,
 	       out_width, out_height,
 	       ilace, color_mode,
-	       rotation, mirror);
+	       rotation, mirror, channel);
 
 	enable_clocks(1);
 
@@ -3161,7 +3352,8 @@
 			   color_mode, ilace,
 			   rotation_type,
 			   rotation, mirror,
-			   global_alpha);
+			   global_alpha,
+			   pre_mult_alpha, channel);
 
 	enable_clocks(0);
 
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 960e977..75fb0a5 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -40,8 +40,9 @@
 } dpi;
 
 #ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
-static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
-		unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
+		unsigned long pck_req, unsigned long *fck, int *lck_div,
+		int *pck_div)
 {
 	struct dsi_clock_info dsi_cinfo;
 	struct dispc_clock_info dispc_cinfo;
@@ -58,7 +59,7 @@
 
 	dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r)
 		return r;
 
@@ -69,8 +70,9 @@
 	return 0;
 }
 #else
-static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
-		unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
+		unsigned long pck_req, unsigned long *fck, int *lck_div,
+		int *pck_div)
 {
 	struct dss_clock_info dss_cinfo;
 	struct dispc_clock_info dispc_cinfo;
@@ -84,7 +86,7 @@
 	if (r)
 		return r;
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r)
 		return r;
 
@@ -107,17 +109,17 @@
 
 	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
 
-	dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
-			dssdev->panel.acb);
+	dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+			dssdev->panel.acbi, dssdev->panel.acb);
 
 	is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
 
 #ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
-	r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000,
-			&fck, &lck_div, &pck_div);
+	r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck,
+			&lck_div, &pck_div);
 #else
-	r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000,
-			&fck, &lck_div, &pck_div);
+	r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck,
+			&lck_div, &pck_div);
 #endif
 	if (r)
 		goto err0;
@@ -132,7 +134,7 @@
 		t->pixel_clock = pck;
 	}
 
-	dispc_set_lcd_timings(t);
+	dispc_set_lcd_timings(dssdev->manager->id, t);
 
 err0:
 	dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -145,10 +147,12 @@
 
 	is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
 
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
-	dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT :
-			OMAP_DSS_LCD_DISPLAY_STN);
-	dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines);
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_BYPASS);
+	dispc_set_lcd_display_type(dssdev->manager->id, is_tft ?
+			OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
+	dispc_set_tft_data_lines(dssdev->manager->id,
+			dssdev->phy.dpi.data_lines);
 
 	return 0;
 }
@@ -234,7 +238,7 @@
 	dssdev->panel.timings = *timings;
 	if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
 		dpi_set_mode(dssdev);
-		dispc_go(OMAP_DSS_CHANNEL_LCD);
+		dispc_go(dssdev->manager->id);
 	}
 }
 EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index aa4f7a5..ddf3a05 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -792,7 +792,8 @@
 }
 
 /* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
+static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+		struct dsi_clock_info *cinfo)
 {
 	if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
 		return -EINVAL;
@@ -812,7 +813,7 @@
 		 * with DSS2_FCK source also */
 		cinfo->highfreq = 0;
 	} else {
-		cinfo->clkin = dispc_pclk_rate();
+		cinfo->clkin = dispc_pclk_rate(dssdev->manager->id);
 
 		if (cinfo->clkin < 32000000)
 			cinfo->highfreq = 0;
@@ -1206,8 +1207,8 @@
 
 	seq_printf(s,	"VP_CLK\t\t%lu\n"
 			"VP_PCLK\t\t%lu\n",
-			dispc_lclk_rate(),
-			dispc_pclk_rate());
+			dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD),
+			dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD));
 
 	enable_clocks(0);
 }
@@ -2888,7 +2889,7 @@
 	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
 		dss_setup_partial_planes(dssdev, x, y, w, h,
 				enlarge_update_area);
-		dispc_set_lcd_size(*w, *h);
+		dispc_set_lcd_size(dssdev->manager->id, *w, *h);
 	}
 
 	return 0;
@@ -2947,12 +2948,14 @@
 		return r;
 	}
 
-	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+	dispc_set_lcd_display_type(dssdev->manager->id,
+			OMAP_DSS_LCD_DISPLAY_TFT);
 
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
-	dispc_enable_fifohandcheck(1);
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_DSI);
+	dispc_enable_fifohandcheck(dssdev->manager->id, 1);
 
-	dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+	dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
 
 	{
 		struct omap_video_timings timings = {
@@ -2964,7 +2967,7 @@
 			.vbp		= 0,
 		};
 
-		dispc_set_lcd_timings(&timings);
+		dispc_set_lcd_timings(dssdev->manager->id, &timings);
 	}
 
 	return 0;
@@ -2987,7 +2990,7 @@
 	cinfo.regm  = dssdev->phy.dsi.div.regm;
 	cinfo.regm3 = dssdev->phy.dsi.div.regm3;
 	cinfo.regm4 = dssdev->phy.dsi.div.regm4;
-	r = dsi_calc_clock_rates(&cinfo);
+	r = dsi_calc_clock_rates(dssdev, &cinfo);
 	if (r) {
 		DSSERR("Failed to calc dsi clocks\n");
 		return r;
@@ -3019,7 +3022,7 @@
 		return r;
 	}
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r) {
 		DSSERR("Failed to set dispc clocks\n");
 		return r;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 5c7940d..b394951 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -333,9 +333,9 @@
 void dispc_lcd_enable_signal_polarity(bool act_high);
 void dispc_lcd_enable_signal(bool enable);
 void dispc_pck_free_enable(bool enable);
-void dispc_enable_fifohandcheck(bool enable);
+void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
 
-void dispc_set_lcd_size(u16 width, u16 height);
+void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
 void dispc_set_digit_size(u16 width, u16 height);
 u32 dispc_get_plane_fifo_size(enum omap_plane plane);
 void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
@@ -359,7 +359,8 @@
 		      bool ilace,
 		      enum omap_dss_rotation_type rotation_type,
 		      u8 rotation, bool mirror,
-		      u8 global_alpha);
+		      u8 global_alpha, u8 pre_mult_alpha,
+		      enum omap_channel channel);
 
 bool dispc_go_busy(enum omap_channel channel);
 void dispc_go(enum omap_channel channel);
@@ -368,9 +369,11 @@
 int dispc_enable_plane(enum omap_plane plane, bool enable);
 void dispc_enable_replication(enum omap_plane plane, bool enable);
 
-void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode);
-void dispc_set_tft_data_lines(u8 data_lines);
-void dispc_set_lcd_display_type(enum omap_lcd_display_type type);
+void dispc_set_parallel_interface_mode(enum omap_channel channel,
+		enum omap_parallel_interface_mode mode);
+void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
+void dispc_set_lcd_display_type(enum omap_channel channel,
+		enum omap_lcd_display_type type);
 void dispc_set_loadmode(enum omap_dss_load_mode mode);
 
 void dispc_set_default_color(enum omap_channel channel, u32 color);
@@ -387,17 +390,21 @@
 bool dispc_alpha_blending_enabled(enum omap_channel ch);
 
 bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
-void dispc_set_lcd_timings(struct omap_video_timings *timings);
+void dispc_set_lcd_timings(enum omap_channel channel,
+		struct omap_video_timings *timings);
 unsigned long dispc_fclk_rate(void);
-unsigned long dispc_lclk_rate(void);
-unsigned long dispc_pclk_rate(void);
-void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb);
+unsigned long dispc_lclk_rate(enum omap_channel channel);
+unsigned long dispc_pclk_rate(enum omap_channel channel);
+void dispc_set_pol_freq(enum omap_channel channel,
+		enum omap_panel_config config, u8 acbi, u8 acb);
 void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
 		struct dispc_clock_info *cinfo);
 int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
 		struct dispc_clock_info *cinfo);
-int dispc_set_clock_div(struct dispc_clock_info *cinfo);
-int dispc_get_clock_div(struct dispc_clock_info *cinfo);
+int dispc_set_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo);
+int dispc_get_clock_div(enum omap_channel channel,
+		struct dispc_clock_info *cinfo);
 
 
 /* VENC */
@@ -424,8 +431,8 @@
 
 int rfbi_configure(int rfbi_module, int bpp, int lines);
 void rfbi_enable_rfbi(bool enable);
-void rfbi_transfer_area(u16 width, u16 height,
-			     void (callback)(void *data), void *data);
+void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
+		u16 height, void (callback)(void *data), void *data);
 void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
 unsigned long rfbi_get_max_tx_rate(void);
 int rfbi_init_display(struct omap_dss_device *display);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 867f68d..cf3ef69 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -82,6 +82,18 @@
 	OMAP_DISPLAY_TYPE_VENC,
 };
 
+static const enum omap_display_type omap4_dss_supported_displays[] = {
+	/* OMAP_DSS_CHANNEL_LCD */
+	OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI,
+
+	/* OMAP_DSS_CHANNEL_DIGIT */
+	OMAP_DISPLAY_TYPE_VENC,
+
+	/* OMAP_DSS_CHANNEL_LCD2 */
+	OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
+	OMAP_DISPLAY_TYPE_DSI,
+};
+
 static const enum omap_color_mode omap2_dss_supported_color_modes[] = {
 	/* OMAP_DSS_GFX */
 	OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
@@ -127,6 +139,10 @@
 	.reg_fields = omap2_dss_reg_fields,
 	.num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
 
+	.has_feature	=
+		FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL |
+		FEAT_PCKFREEENABLE | FEAT_FUNCGATED,
+
 	.num_mgrs = 2,
 	.num_ovls = 3,
 	.supported_displays = omap2_dss_supported_displays,
@@ -134,11 +150,14 @@
 };
 
 /* OMAP3 DSS Features */
-static struct omap_dss_features omap3_dss_features = {
+static struct omap_dss_features omap3430_dss_features = {
 	.reg_fields = omap3_dss_reg_fields,
 	.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
 
-	.has_feature	= FEAT_GLOBAL_ALPHA,
+	.has_feature	=
+		FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+		FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
+		FEAT_FUNCGATED,
 
 	.num_mgrs = 2,
 	.num_ovls = 3,
@@ -146,6 +165,36 @@
 	.supported_color_modes = omap3_dss_supported_color_modes,
 };
 
+static struct omap_dss_features omap3630_dss_features = {
+	.reg_fields = omap3_dss_reg_fields,
+	.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+
+	.has_feature    =
+		FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL |
+		FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
+		FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED,
+
+	.num_mgrs = 2,
+	.num_ovls = 3,
+	.supported_displays = omap3_dss_supported_displays,
+	.supported_color_modes = omap3_dss_supported_color_modes,
+};
+
+/* OMAP4 DSS Features */
+static struct omap_dss_features omap4_dss_features = {
+	.reg_fields = omap3_dss_reg_fields,
+	.num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
+
+	.has_feature	=
+		FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA |
+		FEAT_MGR_LCD2,
+
+	.num_mgrs = 3,
+	.num_ovls = 3,
+	.supported_displays = omap4_dss_supported_displays,
+	.supported_color_modes = omap3_dss_supported_color_modes,
+};
+
 /* Functions returning values related to a DSS feature */
 int dss_feat_get_num_mgrs(void)
 {
@@ -167,6 +216,13 @@
 	return omap_current_dss_features->supported_color_modes[plane];
 }
 
+bool dss_feat_color_mode_supported(enum omap_plane plane,
+		enum omap_color_mode color_mode)
+{
+	return omap_current_dss_features->supported_color_modes[plane] &
+			color_mode;
+}
+
 /* DSS has_feature check */
 bool dss_has_feature(enum dss_feat_id id)
 {
@@ -186,6 +242,10 @@
 {
 	if (cpu_is_omap24xx())
 		omap_current_dss_features = &omap2_dss_features;
+	else if (cpu_is_omap3630())
+		omap_current_dss_features = &omap3630_dss_features;
+	else if (cpu_is_omap34xx())
+		omap_current_dss_features = &omap3430_dss_features;
 	else
-		omap_current_dss_features = &omap3_dss_features;
+		omap_current_dss_features = &omap4_dss_features;
 }
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index cb231ea..b9c70be 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -20,13 +20,19 @@
 #ifndef __OMAP2_DSS_FEATURES_H
 #define __OMAP2_DSS_FEATURES_H
 
-#define MAX_DSS_MANAGERS	2
+#define MAX_DSS_MANAGERS	3
 #define MAX_DSS_OVERLAYS	3
 
 /* DSS has feature id */
 enum dss_feat_id {
 	FEAT_GLOBAL_ALPHA	= 1 << 0,
 	FEAT_GLOBAL_ALPHA_VID1	= 1 << 1,
+	FEAT_PRE_MULT_ALPHA	= 1 << 2,
+	FEAT_LCDENABLEPOL	= 1 << 3,
+	FEAT_LCDENABLESIGNAL	= 1 << 4,
+	FEAT_PCKFREEENABLE	= 1 << 5,
+	FEAT_FUNCGATED		= 1 << 6,
+	FEAT_MGR_LCD2		= 1 << 7,
 };
 
 /* DSS register field id */
@@ -43,6 +49,8 @@
 int dss_feat_get_num_ovls(void);
 enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
 enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+bool dss_feat_color_mode_supported(enum omap_plane plane,
+		enum omap_color_mode color_mode);
 
 bool dss_has_feature(enum dss_feat_id id);
 void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 545e9b9..172d4e6 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -406,6 +406,7 @@
 	u16 out_width;	/* if 0, out_width == width */
 	u16 out_height;	/* if 0, out_height == height */
 	u8 global_alpha;
+	u8 pre_mult_alpha;
 
 	enum omap_channel channel;
 	bool replication;
@@ -512,11 +513,14 @@
 	unsigned long timeout = msecs_to_jiffies(500);
 	u32 irq;
 
-	if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC)
+	if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
 		irq = DISPC_IRQ_EVSYNC_ODD;
-	else
-		irq = DISPC_IRQ_VSYNC;
-
+	} else {
+		if (mgr->id == OMAP_DSS_CHANNEL_LCD)
+			irq = DISPC_IRQ_VSYNC;
+		else
+			irq = DISPC_IRQ_VSYNC2;
+	}
 	return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
 }
 
@@ -524,7 +528,6 @@
 {
 	unsigned long timeout = msecs_to_jiffies(500);
 	struct manager_cache_data *mc;
-	enum omap_channel channel;
 	u32 irq;
 	int r;
 	int i;
@@ -535,7 +538,6 @@
 
 	if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
 		irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
-		channel = OMAP_DSS_CHANNEL_DIGIT;
 	} else {
 		if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
 			enum omap_dss_update_mode mode;
@@ -543,11 +545,14 @@
 			if (mode != OMAP_DSS_UPDATE_AUTO)
 				return 0;
 
-			irq = DISPC_IRQ_FRAMEDONE;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_FRAMEDONE
+				: DISPC_IRQ_FRAMEDONE2;
 		} else {
-			irq = DISPC_IRQ_VSYNC;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_VSYNC
+				: DISPC_IRQ_VSYNC2;
 		}
-		channel = OMAP_DSS_CHANNEL_LCD;
 	}
 
 	mc = &dss_cache.manager_cache[mgr->id];
@@ -594,7 +599,6 @@
 int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
 {
 	unsigned long timeout = msecs_to_jiffies(500);
-	enum omap_channel channel;
 	struct overlay_cache_data *oc;
 	struct omap_dss_device *dssdev;
 	u32 irq;
@@ -611,7 +615,6 @@
 
 	if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
 		irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
-		channel = OMAP_DSS_CHANNEL_DIGIT;
 	} else {
 		if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
 			enum omap_dss_update_mode mode;
@@ -619,11 +622,14 @@
 			if (mode != OMAP_DSS_UPDATE_AUTO)
 				return 0;
 
-			irq = DISPC_IRQ_FRAMEDONE;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_FRAMEDONE
+				: DISPC_IRQ_FRAMEDONE2;
 		} else {
-			irq = DISPC_IRQ_VSYNC;
+			irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+				DISPC_IRQ_VSYNC
+				: DISPC_IRQ_VSYNC2;
 		}
-		channel = OMAP_DSS_CHANNEL_LCD;
 	}
 
 	oc = &dss_cache.overlay_cache[ovl->id];
@@ -842,7 +848,9 @@
 			c->rotation_type,
 			c->rotation,
 			c->mirror,
-			c->global_alpha);
+			c->global_alpha,
+			c->pre_mult_alpha,
+			c->channel);
 
 	if (r) {
 		/* this shouldn't happen */
@@ -894,10 +902,10 @@
 	r = 0;
 	busy = false;
 
-	mgr_busy[0] = dispc_go_busy(0);
-	mgr_busy[1] = dispc_go_busy(1);
-	mgr_go[0] = false;
-	mgr_go[1] = false;
+	for (i = 0; i < num_mgrs; i++) {
+		mgr_busy[i] = dispc_go_busy(i);
+		mgr_go[i] = false;
+	}
 
 	/* Commit overlay settings */
 	for (i = 0; i < num_ovls; ++i) {
@@ -1156,9 +1164,10 @@
 	const int num_mgrs = dss_feat_get_num_mgrs();
 	int i, r;
 	bool mgr_busy[MAX_DSS_MANAGERS];
+	u32 irq_mask;
 
-	mgr_busy[0] = dispc_go_busy(0);
-	mgr_busy[1] = dispc_go_busy(1);
+	for (i = 0; i < num_mgrs; i++)
+		mgr_busy[i] = dispc_go_busy(i);
 
 	spin_lock(&dss_cache.lock);
 
@@ -1179,8 +1188,8 @@
 		goto end;
 
 	/* re-read busy flags */
-	mgr_busy[0] = dispc_go_busy(0);
-	mgr_busy[1] = dispc_go_busy(1);
+	for (i = 0; i < num_mgrs; i++)
+		mgr_busy[i] = dispc_go_busy(i);
 
 	/* keep running as long as there are busy managers, so that
 	 * we can collect overlay-applied information */
@@ -1189,9 +1198,12 @@
 			goto end;
 	}
 
-	omap_dispc_unregister_isr(dss_apply_irq_handler, NULL,
-			DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
-			DISPC_IRQ_EVSYNC_EVEN);
+	irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
+			DISPC_IRQ_EVSYNC_EVEN;
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		irq_mask |= DISPC_IRQ_VSYNC2;
+
+	omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
 	dss_cache.irq_enabled = false;
 
 end:
@@ -1265,6 +1277,7 @@
 		oc->out_width = ovl->info.out_width;
 		oc->out_height = ovl->info.out_height;
 		oc->global_alpha = ovl->info.global_alpha;
+		oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
 
 		oc->replication =
 			dss_use_replication(dssdev, ovl->info.color_mode);
@@ -1383,9 +1396,14 @@
 	r = 0;
 	dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
 	if (!dss_cache.irq_enabled) {
-		r = omap_dispc_register_isr(dss_apply_irq_handler, NULL,
-				DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
-				DISPC_IRQ_EVSYNC_EVEN);
+		u32 mask;
+
+		mask = DISPC_IRQ_VSYNC	| DISPC_IRQ_EVSYNC_ODD |
+			DISPC_IRQ_EVSYNC_EVEN;
+		if (dss_has_feature(FEAT_MGR_LCD2))
+			mask |= DISPC_IRQ_VSYNC2;
+
+		r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
 		dss_cache.irq_enabled = true;
 	}
 	configure_dispc();
@@ -1477,6 +1495,10 @@
 			mgr->name = "tv";
 			mgr->id = OMAP_DSS_CHANNEL_DIGIT;
 			break;
+		case 2:
+			mgr->name = "lcd2";
+			mgr->id = OMAP_DSS_CHANNEL_LCD2;
+			break;
 		}
 
 		mgr->set_device = &omap_dss_set_device;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 75642c2..456efef 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -257,6 +257,43 @@
 	return size;
 }
 
+static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			ovl->info.pre_mult_alpha);
+}
+
+static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
+		const char *buf, size_t size)
+{
+	int r;
+	struct omap_overlay_info info;
+
+	ovl->get_overlay_info(ovl, &info);
+
+	/* only GFX and Video2 plane support pre alpha multiplied
+	 * set zero for Video1 plane
+	 */
+	if (!dss_has_feature(FEAT_GLOBAL_ALPHA_VID1) &&
+		ovl->id == OMAP_DSS_VIDEO1)
+		info.pre_mult_alpha = 0;
+	else
+		info.pre_mult_alpha = simple_strtoul(buf, NULL, 10);
+
+	r = ovl->set_overlay_info(ovl, &info);
+	if (r)
+		return r;
+
+	if (ovl->manager) {
+		r = ovl->manager->apply(ovl->manager);
+		if (r)
+			return r;
+	}
+
+	return size;
+}
+
 struct overlay_attribute {
 	struct attribute attr;
 	ssize_t (*show)(struct omap_overlay *, char *);
@@ -280,6 +317,9 @@
 		overlay_enabled_show, overlay_enabled_store);
 static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
 		overlay_global_alpha_show, overlay_global_alpha_store);
+static OVERLAY_ATTR(pre_mult_alpha, S_IRUGO|S_IWUSR,
+		overlay_pre_mult_alpha_show,
+		overlay_pre_mult_alpha_store);
 
 static struct attribute *overlay_sysfs_attrs[] = {
 	&overlay_attr_name.attr,
@@ -290,6 +330,7 @@
 	&overlay_attr_output_size.attr,
 	&overlay_attr_enabled.attr,
 	&overlay_attr_global_alpha.attr,
+	&overlay_attr_pre_mult_alpha.attr,
 	NULL
 };
 
@@ -623,12 +664,22 @@
 	int i;
 	struct omap_overlay_manager *lcd_mgr;
 	struct omap_overlay_manager *tv_mgr;
+	struct omap_overlay_manager *lcd2_mgr = NULL;
 	struct omap_overlay_manager *mgr = NULL;
 
 	lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
 	tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
+	if (dss_has_feature(FEAT_MGR_LCD2))
+		lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2);
 
-	if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
+	if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+		if (!lcd2_mgr->device || force) {
+			if (lcd2_mgr->device)
+				lcd2_mgr->unset_device(lcd2_mgr);
+			lcd2_mgr->set_device(lcd2_mgr, dssdev);
+			mgr = lcd2_mgr;
+		}
+	} else if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
 		if (!lcd_mgr->device || force) {
 			if (lcd_mgr->device)
 				lcd_mgr->unset_device(lcd_mgr);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index bbe6246..10a2ffe 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -301,8 +301,8 @@
 }
 EXPORT_SYMBOL(omap_rfbi_write_pixels);
 
-void rfbi_transfer_area(u16 width, u16 height,
-			     void (callback)(void *data), void *data)
+void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
+		u16 height, void (*callback)(void *data), void *data)
 {
 	u32 l;
 
@@ -311,9 +311,9 @@
 
 	DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
 
-	dispc_set_lcd_size(width, height);
+	dispc_set_lcd_size(dssdev->manager->id, width, height);
 
-	dispc_enable_channel(OMAP_DSS_CHANNEL_LCD, true);
+	dispc_enable_channel(dssdev->manager->id, true);
 
 	rfbi.framedone_callback = callback;
 	rfbi.framedone_callback_data = data;
@@ -887,7 +887,7 @@
 
 	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
 		dss_setup_partial_planes(dssdev, x, y, w, h, true);
-		dispc_set_lcd_size(*w, *h);
+		dispc_set_lcd_size(dssdev->manager->id, *w, *h);
 	}
 
 	return 0;
@@ -899,7 +899,7 @@
 		void (*callback)(void *), void *data)
 {
 	if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
-		rfbi_transfer_area(w, h, callback, data);
+		rfbi_transfer_area(dssdev, w, h, callback, data);
 	} else {
 		struct omap_overlay *ovl;
 		void __iomem *addr;
@@ -1018,11 +1018,13 @@
 		goto err1;
 	}
 
-	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+	dispc_set_lcd_display_type(dssdev->manager->id,
+			OMAP_DSS_LCD_DISPLAY_TFT);
 
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI);
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_RFBI);
 
-	dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+	dispc_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
 
 	rfbi_configure(dssdev->phy.rfbi.channel,
 			       dssdev->ctrl.pixel_size,
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index ee07a3c..b64adf7 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -35,12 +35,16 @@
 	struct regulator *vdds_sdi_reg;
 } sdi;
 
-static void sdi_basic_init(void)
-{
-	dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+static void sdi_basic_init(struct omap_dss_device *dssdev)
 
-	dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
-	dispc_set_tft_data_lines(24);
+{
+	dispc_set_parallel_interface_mode(dssdev->manager->id,
+			OMAP_DSS_PARALLELMODE_BYPASS);
+
+	dispc_set_lcd_display_type(dssdev->manager->id,
+			OMAP_DSS_LCD_DISPLAY_TFT);
+
+	dispc_set_tft_data_lines(dssdev->manager->id, 24);
 	dispc_lcd_enable_signal_polarity(1);
 }
 
@@ -68,20 +72,20 @@
 	if (!sdi.skip_init)
 		dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
 
-	sdi_basic_init();
+	sdi_basic_init(dssdev);
 
 	/* 15.5.9.1.2 */
 	dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
 
-	dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
-			dssdev->panel.acb);
+	dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
+			dssdev->panel.acbi, dssdev->panel.acb);
 
 	if (!sdi.skip_init) {
 		r = dss_calc_clock_div(1, t->pixel_clock * 1000,
 				&dss_cinfo, &dispc_cinfo);
 	} else {
 		r = dss_get_clock_div(&dss_cinfo);
-		r = dispc_get_clock_div(&dispc_cinfo);
+		r = dispc_get_clock_div(dssdev->manager->id, &dispc_cinfo);
 	}
 
 	if (r)
@@ -102,13 +106,13 @@
 	}
 
 
-	dispc_set_lcd_timings(t);
+	dispc_set_lcd_timings(dssdev->manager->id, t);
 
 	r = dss_set_clock_div(&dss_cinfo);
 	if (r)
 		goto err2;
 
-	r = dispc_set_clock_div(&dispc_cinfo);
+	r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
 	if (r)
 		goto err2;
 
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 6a704f1..4fdab8e 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -2132,8 +2132,9 @@
 	char *str, *options, *this_opt;
 	int r = 0;
 
-	str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL);
-	strcpy(str, def_mode);
+	str = kstrdup(def_mode, GFP_KERNEL);
+	if (!str)
+		return -ENOMEM;
 	options = str;
 
 	while (!r && (this_opt = strsep(&options, ",")) != NULL) {
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 618f36b..da38818 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -331,7 +331,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops riva_bl_ops = {
+static const struct backlight_ops riva_bl_ops = {
 	.get_brightness = riva_bl_get_brightness,
 	.update_status	= riva_bl_update_status,
 };
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 46b4309..61c819e 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -13,6 +13,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/mm.h>
@@ -918,9 +919,9 @@
 	}
 
 	info->clk = clk_get(NULL, "lcd");
-	if (!info->clk || IS_ERR(info->clk)) {
+	if (IS_ERR(info->clk)) {
 		printk(KERN_ERR "failed to get lcd clock source\n");
-		ret = -ENOENT;
+		ret = PTR_ERR(info->clk);
 		goto release_irq;
 	}
 
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 8c59cc8..74d9f54 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <sound/soc.h>
 #include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
@@ -221,6 +222,7 @@
 	struct delayed_work edid_work;
 	struct fb_var_screeninfo var;
 	struct fb_monspecs monspec;
+	struct notifier_block notifier;
 };
 
 static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
@@ -737,7 +739,7 @@
 	struct fb_modelist *modelist = NULL;
 	unsigned int f_width = 0, f_height = 0, f_refresh = 0;
 	unsigned long found_rate_error = ULONG_MAX; /* silly compiler... */
-	bool exact_match = false;
+	bool scanning = false, preferred_bad = false;
 	u8 edid[128];
 	char *forced;
 	int i;
@@ -800,6 +802,9 @@
 		if (i < 2) {
 			f_width = 0;
 			f_height = 0;
+		} else {
+			/* The user wants us to use the EDID data */
+			scanning = true;
 		}
 		dev_dbg(hdmi->dev, "Forced mode %ux%u@%uHz\n",
 			f_width, f_height, f_refresh);
@@ -807,37 +812,56 @@
 
 	/* Walk monitor modes to find the best or the exact match */
 	for (i = 0, mode = hdmi->monspec.modedb;
-	     f_width && f_height && i < hdmi->monspec.modedb_len && !exact_match;
+	     i < hdmi->monspec.modedb_len && scanning;
 	     i++, mode++) {
 		unsigned long rate_error;
 
-		/* No interest in unmatching modes */
-		if (f_width != mode->xres || f_height != mode->yres)
+		if (!f_width && !f_height) {
+			/*
+			 * A parameter string "video=sh_mobile_lcdc:0x0" means
+			 * use the preferred EDID mode. If it is rejected by
+			 * .fb_check_var(), keep looking, until an acceptable
+			 * one is found.
+			 */
+			if ((mode->flag & FB_MODE_IS_FIRST) || preferred_bad)
+				scanning = false;
+			else
+				continue;
+		} else if (f_width != mode->xres || f_height != mode->yres) {
+			/* No interest in unmatching modes */
 			continue;
+		}
 
 		rate_error = sh_hdmi_rate_error(hdmi, mode, hdmi_rate, parent_rate);
 
-		if (f_refresh == mode->refresh || (!f_refresh && !rate_error))
-			/*
-			 * Exact match if either the refresh rate matches or it
-			 * hasn't been specified and we've found a mode, for
-			 * which we can configure the clock precisely
-			 */
-			exact_match = true;
-		else if (found && found_rate_error <= rate_error)
-			/*
-			 * We otherwise search for the closest matching clock
-			 * rate - either if no refresh rate has been specified
-			 * or we cannot find an exactly matching one
-			 */
-			continue;
+		if (scanning) {
+			if (f_refresh == mode->refresh || (!f_refresh && !rate_error))
+				/*
+				 * Exact match if either the refresh rate
+				 * matches or it hasn't been specified and we've
+				 * found a mode, for which we can configure the
+				 * clock precisely
+				 */
+				scanning = false;
+			else if (found && found_rate_error <= rate_error)
+				/*
+				 * We otherwise search for the closest matching
+				 * clock rate - either if no refresh rate has
+				 * been specified or we cannot find an exactly
+				 * matching one
+				 */
+				continue;
+		}
 
 		/* Check if supported: sufficient fb memory, supported clock-rate */
 		fb_videomode_to_var(var, mode);
 
+		var->bits_per_pixel = info->var.bits_per_pixel;
+
 		if (info && info->fbops->fb_check_var &&
 		    info->fbops->fb_check_var(var, info)) {
-			exact_match = false;
+			scanning = true;
+			preferred_bad = true;
 			continue;
 		}
 
@@ -855,9 +879,9 @@
 	 * driver, and passing ->info with HDMI platform data.
 	 */
 	if (info && !found) {
-		modelist = hdmi->info->modelist.next &&
-			!list_empty(&hdmi->info->modelist) ?
-			list_entry(hdmi->info->modelist.next,
+		modelist = info->modelist.next &&
+			!list_empty(&info->modelist) ?
+			list_entry(info->modelist.next,
 				   struct fb_modelist, list) :
 			NULL;
 
@@ -1100,6 +1124,7 @@
 	mutex_lock(&hdmi->mutex);
 
 	if (hdmi->hp_state == HDMI_HOTPLUG_CONNECTED) {
+		struct fb_info *info = hdmi->info;
 		unsigned long parent_rate = 0, hdmi_rate;
 
 		/* A device has been plugged in */
@@ -1121,22 +1146,21 @@
 		/* Switched to another (d) power-save mode */
 		msleep(10);
 
-		if (!hdmi->info)
+		if (!info)
 			goto out;
 
-		ch = hdmi->info->par;
+		ch = info->par;
 
 		acquire_console_sem();
 
 		/* HDMI plug in */
 		if (!sh_hdmi_must_reconfigure(hdmi) &&
-		    hdmi->info->state == FBINFO_STATE_RUNNING) {
+		    info->state == FBINFO_STATE_RUNNING) {
 			/*
 			 * First activation with the default monitor - just turn
 			 * on, if we run a resume here, the logo disappears
 			 */
-			if (lock_fb_info(hdmi->info)) {
-				struct fb_info *info = hdmi->info;
+			if (lock_fb_info(info)) {
 				info->var.width = hdmi->var.width;
 				info->var.height = hdmi->var.height;
 				sh_hdmi_display_on(hdmi, info);
@@ -1144,7 +1168,7 @@
 			}
 		} else {
 			/* New monitor or have to wake up */
-			fb_set_suspend(hdmi->info, 0);
+			fb_set_suspend(info, 0);
 		}
 
 		release_console_sem();
@@ -1175,13 +1199,6 @@
 }
 
 static int sh_hdmi_notify(struct notifier_block *nb,
-			  unsigned long action, void *data);
-
-static struct notifier_block sh_hdmi_notifier = {
-	.notifier_call = sh_hdmi_notify,
-};
-
-static int sh_hdmi_notify(struct notifier_block *nb,
 			  unsigned long action, void *data)
 {
 	struct fb_event *event = data;
@@ -1190,7 +1207,7 @@
 	struct sh_mobile_lcdc_board_cfg	*board_cfg = &ch->cfg.board_cfg;
 	struct sh_hdmi *hdmi = board_cfg->board_data;
 
-	if (nb != &sh_hdmi_notifier || !hdmi || hdmi->info != info)
+	if (!hdmi || nb != &hdmi->notifier || hdmi->info != info)
 		return NOTIFY_DONE;
 
 	switch(action) {
@@ -1209,11 +1226,11 @@
 		 * temporarily, synchronise with the work queue and re-acquire
 		 * the info->lock.
 		 */
-		unlock_fb_info(hdmi->info);
+		unlock_fb_info(info);
 		mutex_lock(&hdmi->mutex);
 		hdmi->info = NULL;
 		mutex_unlock(&hdmi->mutex);
-		lock_fb_info(hdmi->info);
+		lock_fb_info(info);
 		return NOTIFY_OK;
 	}
 	return NOTIFY_DONE;
@@ -1311,6 +1328,9 @@
 		goto ecodec;
 	}
 
+	hdmi->notifier.notifier_call = sh_hdmi_notify;
+	fb_register_client(&hdmi->notifier);
+
 	return 0;
 
 ecodec:
@@ -1341,6 +1361,8 @@
 
 	snd_soc_unregister_codec(&pdev->dev);
 
+	fb_unregister_client(&hdmi->notifier);
+
 	board_cfg->display_on = NULL;
 	board_cfg->display_off = NULL;
 	board_cfg->board_data = NULL;
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index dee64c3..2ab7041 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -536,7 +536,7 @@
 	fbiinit2 = sst_read(FBIINIT2);
 	fbiinit3 = sst_read(FBIINIT3);
 
-	/* everything is reset. we enable fbiinit2/3 remap : dac acces ok */
+	/* everything is reset. we enable fbiinit2/3 remap : dac access ok */
 	pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
 	                       PCI_EN_INIT_WR | PCI_REMAP_DAC );
 
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 020589a..2c8364e 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1128,14 +1128,13 @@
 		 * But with imperfect damage info we may send pixels over USB
 		 * that were, in fact, unchanged - wasting limited USB bandwidth
 		 */
-		new_back = vmalloc(new_len);
+		new_back = vzalloc(new_len);
 		if (!new_back)
-			pr_info("No shadow/backing buffer allcoated\n");
+			pr_info("No shadow/backing buffer allocated\n");
 		else {
 			if (dev->backing_buffer)
 				vfree(dev->backing_buffer);
 			dev->backing_buffer = new_back;
-			memset(dev->backing_buffer, 0, new_len);
 		}
 	}
 
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 7617f12..0e120d6 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -215,6 +215,33 @@
 	return 0;
 }
 
+/*
+ * vt8500lcd_blank():
+ *	Blank the display by setting all palette values to zero.  Note,
+ * 	True Color modes do not really use the palette, so this will not
+ *      blank the display in all modes.
+ */
+static int vt8500lcd_blank(int blank, struct fb_info *info)
+{
+	int i;
+
+	switch (blank) {
+	case FB_BLANK_POWERDOWN:
+	case FB_BLANK_VSYNC_SUSPEND:
+	case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_NORMAL:
+		if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
+		    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+			for (i = 0; i < 256; i++)
+				vt8500lcd_setcolreg(i, 0, 0, 0, 0, info);
+	case FB_BLANK_UNBLANK:
+		if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR ||
+		    info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
+			fb_set_cmap(&info->cmap, info);
+	}
+	return 0;
+}
+
 static struct fb_ops vt8500lcd_ops = {
 	.owner		= THIS_MODULE,
 	.fb_set_par	= vt8500lcd_set_par,
@@ -225,6 +252,7 @@
 	.fb_sync	= wmt_ge_sync,
 	.fb_ioctl	= vt8500lcd_ioctl,
 	.fb_pan_display	= vt8500lcd_pan_display,
+	.fb_blank	= vt8500lcd_blank,
 };
 
 static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 428d273..3e6934d 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -492,7 +492,7 @@
 		return;
 
 	acquire_console_sem();
-	for (c = console_drivers; c; c = c->next) {
+	for_each_console(c) {
 		if (!strcmp(c->name, "tty") && c->index == 0)
 			break;
 	}
@@ -562,26 +562,24 @@
 static int xenfb_connect_backend(struct xenbus_device *dev,
 				 struct xenfb_info *info)
 {
-	int ret, evtchn;
+	int ret, evtchn, irq;
 	struct xenbus_transaction xbt;
 
 	ret = xenbus_alloc_evtchn(dev, &evtchn);
 	if (ret)
 		return ret;
-	ret = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
+	irq = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
 					0, dev->devicetype, info);
-	if (ret < 0) {
+	if (irq < 0) {
 		xenbus_free_evtchn(dev, evtchn);
 		xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
-		return ret;
+		return irq;
 	}
-	info->irq = ret;
-
  again:
 	ret = xenbus_transaction_start(&xbt);
 	if (ret) {
 		xenbus_dev_fatal(dev, ret, "starting transaction");
-		return ret;
+		goto unbind_irq;
 	}
 	ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
 			    virt_to_mfn(info->page));
@@ -603,20 +601,25 @@
 		if (ret == -EAGAIN)
 			goto again;
 		xenbus_dev_fatal(dev, ret, "completing transaction");
-		return ret;
+		goto unbind_irq;
 	}
 
 	xenbus_switch_state(dev, XenbusStateInitialised);
+	info->irq = irq;
 	return 0;
 
  error_xenbus:
 	xenbus_transaction_end(xbt, 1);
 	xenbus_dev_fatal(dev, ret, "writing xenstore");
+ unbind_irq:
+	unbind_from_irqhandler(irq, info);
 	return ret;
 }
 
 static void xenfb_disconnect_backend(struct xenfb_info *info)
 {
+	/* Prevent xenfb refresh */
+	info->update_wanted = 0;
 	if (info->irq >= 0)
 		unbind_from_irqhandler(info->irq, info);
 	info->irq = -1;
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 1f51366..f0c9096 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,17 @@
 	  Say Y here if you want to connect 1-wire
 	  simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
 
+config W1_SLAVE_DS2423
+	tristate "Counter 1-wire device (DS2423)"
+	select CRC16
+	help
+	  If you enable this you can read the counter values available
+	  in the DS2423 chipset from the w1_slave file under the
+	  sys file system.
+
+	  Say Y here if you want to use a 1-wire
+	  counter family device (DS2423).
+
 config W1_SLAVE_DS2431
 	tristate "1kb EEPROM family support (DS2431)"
 	help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index f1f51f1..3c76350 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_W1_SLAVE_THERM)	+= w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)	+= w1_smem.o
+obj-$(CONFIG_W1_SLAVE_DS2423)	+= w1_ds2423.o
 obj-$(CONFIG_W1_SLAVE_DS2431)	+= w1_ds2431.o
 obj-$(CONFIG_W1_SLAVE_DS2433)	+= w1_ds2433.o
 obj-$(CONFIG_W1_SLAVE_DS2760)	+= w1_ds2760.o
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
new file mode 100644
index 0000000..7a7dbe5
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -0,0 +1,166 @@
+/*
+ *	w1_ds2423.c
+ *
+ * Copyright (c) 2010 Mika Laitio <lamikr@pilppa.org>
+ *
+ * This driver will read and write the value of 4 counters to w1_slave file in
+ * sys filesystem.
+ * Inspired by the w1_therm and w1_ds2431 drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the therms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/crc16.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+#define CRC16_VALID	0xb001
+#define CRC16_INIT	0
+
+#define COUNTER_COUNT 4
+#define READ_BYTE_COUNT 42
+
+static ssize_t w1_counter_read(struct device *device,
+	struct device_attribute *attr, char *buf);
+
+static struct device_attribute w1_counter_attr =
+	__ATTR(w1_slave, S_IRUGO, w1_counter_read, NULL);
+
+static ssize_t w1_counter_read(struct device *device,
+	struct device_attribute *attr, char *out_buf)
+{
+	struct w1_slave *sl = dev_to_w1_slave(device);
+	struct w1_master *dev = sl->master;
+	u8 rbuf[COUNTER_COUNT * READ_BYTE_COUNT];
+	u8 wrbuf[3];
+	int rom_addr;
+	int read_byte_count;
+	int result;
+	ssize_t c;
+	int ii;
+	int p;
+	int crc;
+
+	c		= PAGE_SIZE;
+	rom_addr	= (12 << 5) + 31;
+	wrbuf[0]	= 0xA5;
+	wrbuf[1]	= rom_addr & 0xFF;
+	wrbuf[2]	= rom_addr >> 8;
+	mutex_lock(&dev->mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_block(dev, wrbuf, 3);
+		read_byte_count = 0;
+		for (p = 0; p < 4; p++) {
+			/*
+			 * 1 byte for first bytes in ram page read
+			 * 4 bytes for counter
+			 * 4 bytes for zero bits
+			 * 2 bytes for crc
+			 * 31 remaining bytes from the ram page
+			 */
+			read_byte_count += w1_read_block(dev,
+				rbuf + (p * READ_BYTE_COUNT), READ_BYTE_COUNT);
+			for (ii = 0; ii < READ_BYTE_COUNT; ++ii)
+				c -= snprintf(out_buf + PAGE_SIZE - c,
+					c, "%02x ",
+					rbuf[(p * READ_BYTE_COUNT) + ii]);
+			if (read_byte_count != (p + 1) * READ_BYTE_COUNT) {
+				dev_warn(device,
+					"w1_counter_read() returned %u bytes "
+					"instead of %d bytes wanted.\n",
+					read_byte_count,
+					READ_BYTE_COUNT);
+				c -= snprintf(out_buf + PAGE_SIZE - c,
+					c, "crc=NO\n");
+			} else {
+				if (p == 0) {
+					crc = crc16(CRC16_INIT, wrbuf, 3);
+					crc = crc16(crc, rbuf, 11);
+				} else {
+					/*
+					 * DS2423 calculates crc from all bytes
+					 * read after the previous crc bytes.
+					 */
+					crc = crc16(CRC16_INIT,
+						(rbuf + 11) +
+						((p - 1) * READ_BYTE_COUNT),
+						READ_BYTE_COUNT);
+				}
+				if (crc == CRC16_VALID) {
+					result = 0;
+					for (ii = 4; ii > 0; ii--) {
+						result <<= 8;
+						result |= rbuf[(p *
+							READ_BYTE_COUNT) + ii];
+					}
+					c -= snprintf(out_buf + PAGE_SIZE - c,
+						c, "crc=YES c=%d\n", result);
+				} else {
+					c -= snprintf(out_buf + PAGE_SIZE - c,
+						c, "crc=NO\n");
+				}
+			}
+		}
+	} else {
+		c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error");
+	}
+	mutex_unlock(&dev->mutex);
+	return PAGE_SIZE - c;
+}
+
+static int w1_f1d_add_slave(struct w1_slave *sl)
+{
+	return device_create_file(&sl->dev, &w1_counter_attr);
+}
+
+static void w1_f1d_remove_slave(struct w1_slave *sl)
+{
+	device_remove_file(&sl->dev, &w1_counter_attr);
+}
+
+static struct w1_family_ops w1_f1d_fops = {
+	.add_slave      = w1_f1d_add_slave,
+	.remove_slave   = w1_f1d_remove_slave,
+};
+
+static struct w1_family w1_family_1d = {
+	.fid = W1_COUNTER_DS2423,
+	.fops = &w1_f1d_fops,
+};
+
+static int __init w1_f1d_init(void)
+{
+	return w1_register_family(&w1_family_1d);
+}
+
+static void __exit w1_f1d_exit(void)
+{
+	w1_unregister_family(&w1_family_1d);
+}
+
+module_init(w1_f1d_init);
+module_exit(w1_f1d_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
+MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram");
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 3ca1b92..f3b636d 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -30,6 +30,7 @@
 #define W1_FAMILY_SMEM_01	0x01
 #define W1_FAMILY_SMEM_81	0x81
 #define W1_THERM_DS18S20 	0x10
+#define W1_COUNTER_DS2423	0x1D
 #define W1_THERM_DS1822  	0x22
 #define W1_EEPROM_DS2433  	0x23
 #define W1_THERM_DS18B20 	0x28
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a5ad77e..2e2400e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -409,15 +409,26 @@
 	  Most people will say N.
 
 config F71808E_WDT
-	tristate "Fintek F71808E, F71882FG and F71889FG Watchdog"
+	tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
 	depends on X86 && EXPERIMENTAL
 	help
 	  This is the driver for the hardware watchdog on the Fintek
-	  F71808E, F71882FG and F71889FG Super I/O controllers.
+	  F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
 
 	  You can compile this driver directly into the kernel, or use
 	  it as a module.  The module will be called f71808e_wdt.
 
+config SP5100_TCO
+	tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
+	depends on X86 && PCI
+	---help---
+	  Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
+	  (Total Cost of Ownership) timer is a watchdog timer that will reboot
+	  the machine after its expiration. The expiration time can be
+	  configured with the "heartbeat" parameter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sp5100_tco.
 
 config GEODE_WDT
 	tristate "AMD Geode CS5535/CS5536 Watchdog"
@@ -631,6 +642,24 @@
 
 	  Most people will say N.
 
+config NV_TCO
+	tristate "nVidia TCO Timer/Watchdog"
+	depends on X86 && PCI
+	---help---
+	  Hardware driver for the TCO timer built into the nVidia Hub family
+	  (such as the MCP51).  The TCO (Total Cost of Ownership) timer is a
+	  watchdog timer that will reboot the machine after its second
+	  expiration. The expiration time can be configured with the
+	  "heartbeat" parameter.
+
+	  On some motherboards the driver may fail to reset the chipset's
+	  NO_REBOOT flag which prevents the watchdog from rebooting the
+	  machine. If this is the case you will get a kernel message like
+	  "failed to reset NO_REBOOT flag, reboot disabled by hardware".
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nv_tco.
+
 config RDC321X_WDT
 	tristate "RDC R-321x SoC watchdog"
 	depends on X86_RDC321X
@@ -722,14 +751,15 @@
 	  Most people will say N.
 
 config W83627HF_WDT
-	tristate "W83627HF Watchdog Timer"
+	tristate "W83627HF/W83627DHG Watchdog Timer"
 	depends on X86
 	---help---
 	  This is the driver for the hardware watchdog on the W83627HF chipset
 	  as used in Advantech PC-9578 and Tyan S2721-533 motherboards
-	  (and likely others).  This watchdog simply watches your kernel to
-	  make sure it doesn't freeze, and if it does, it reboots your computer
-	  after a certain amount of time.
+	  (and likely others). The driver also supports the W83627DHG chip.
+	  This watchdog simply watches your kernel to make sure it doesn't
+	  freeze, and if it does, it reboots your computer after a certain
+	  amount of time.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called w83627hf_wdt.
@@ -832,10 +862,22 @@
 
 # M68K Architecture
 
-# M68KNOMMU Architecture
+config M548x_WATCHDOG
+	tristate "MCF548x watchdog support"
+	depends on M548x
+	help
+	  To compile this driver as a module, choose M here: the
+	  module will be called m548x_wdt.
 
 # MIPS Architecture
 
+config ATH79_WDT
+	tristate "Atheros AR71XX/AR724X/AR913X hardware watchdog"
+	depends on ATH79
+	help
+	  Hardware driver for the built-in watchdog timer on the Atheros
+	  AR71XX/AR724X/AR913X SoCs.
+
 config BCM47XX_WDT
 	tristate "Broadcom BCM47xx Watchdog Timer"
 	depends on BCM47XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 4b0ef38..dd77665 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -68,6 +68,7 @@
 obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
 obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
 obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
+obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o
 obj-$(CONFIG_GEODE_WDT) += geodewdt.o
 obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
 obj-$(CONFIG_SBC_FITPC2_WATCHDOG) += sbc_fitpc2_wdt.o
@@ -86,6 +87,7 @@
 obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
 obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
 obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o
+obj-$(CONFIG_NV_TCO) += nv_tco.o
 obj-$(CONFIG_RDC321X_WDT) += rdc321x_wdt.o
 obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
 obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
@@ -104,10 +106,10 @@
 # M32R Architecture
 
 # M68K Architecture
-
-# M68KNOMMU Architecture
+obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o
 
 # MIPS Architecture
+obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
 obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
 obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
 obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 1e9caea..fa4d360 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@
  *	want to register another driver on the same PCI id.
  */
 
-static struct pci_device_id ali_pci_tbl[] = {
+static struct pci_device_id ali_pci_tbl[] __used = {
 	{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
 	{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
 	{ 0, },
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index d8d4da9..4b7a2b4 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@
 module_init(alim7101_wdt_init);
 module_exit(alim7101_wdt_unload);
 
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata = {
+static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
 	{ }
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
new file mode 100644
index 0000000..725c84b
--- /dev/null
+++ b/drivers/watchdog/ath79_wdt.c
@@ -0,0 +1,305 @@
+/*
+ * Atheros AR71XX/AR724X/AR913X built-in hardware watchdog timer.
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This driver was based on: drivers/watchdog/ixp4xx_wdt.c
+ *	Author: Deepak Saxena <dsaxena@plexity.net>
+ *	Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * which again was based on sa1100 driver,
+ *	Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+#define DRIVER_NAME	"ath79-wdt"
+
+#define WDT_TIMEOUT	15	/* seconds */
+
+#define WDOG_CTRL_LAST_RESET	BIT(31)
+#define WDOG_CTRL_ACTION_MASK	3
+#define WDOG_CTRL_ACTION_NONE	0	/* no action */
+#define WDOG_CTRL_ACTION_GPI	1	/* general purpose interrupt */
+#define WDOG_CTRL_ACTION_NMI	2	/* NMI */
+#define WDOG_CTRL_ACTION_FCR	3	/* full chip reset */
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+			   "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+			  "(default=" __MODULE_STRING(WDT_TIMEOUT) "s)");
+
+static unsigned long wdt_flags;
+
+#define WDT_FLAGS_BUSY		0
+#define WDT_FLAGS_EXPECT_CLOSE	1
+
+static struct clk *wdt_clk;
+static unsigned long wdt_freq;
+static int boot_status;
+static int max_timeout;
+
+static inline void ath79_wdt_keepalive(void)
+{
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+}
+
+static inline void ath79_wdt_enable(void)
+{
+	ath79_wdt_keepalive();
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+}
+
+static inline void ath79_wdt_disable(void)
+{
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+}
+
+static int ath79_wdt_set_timeout(int val)
+{
+	if (val < 1 || val > max_timeout)
+		return -EINVAL;
+
+	timeout = val;
+	ath79_wdt_keepalive();
+
+	return 0;
+}
+
+static int ath79_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(WDT_FLAGS_BUSY, &wdt_flags))
+		return -EBUSY;
+
+	clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+	ath79_wdt_enable();
+
+	return nonseekable_open(inode, file);
+}
+
+static int ath79_wdt_release(struct inode *inode, struct file *file)
+{
+	if (test_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags))
+		ath79_wdt_disable();
+	else {
+		pr_crit(DRIVER_NAME ": device closed unexpectedly, "
+			"watchdog timer will not stop!\n");
+		ath79_wdt_keepalive();
+	}
+
+	clear_bit(WDT_FLAGS_BUSY, &wdt_flags);
+	clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+	return 0;
+}
+
+static ssize_t ath79_wdt_write(struct file *file, const char *data,
+				size_t len, loff_t *ppos)
+{
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, data + i))
+					return -EFAULT;
+
+				if (c == 'V')
+					set_bit(WDT_FLAGS_EXPECT_CLOSE,
+						&wdt_flags);
+			}
+		}
+
+		ath79_wdt_keepalive();
+	}
+
+	return len;
+}
+
+static const struct watchdog_info ath79_wdt_info = {
+	.options		= WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+				  WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+	.firmware_version	= 0,
+	.identity		= "ATH79 watchdog",
+};
+
+static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	int err;
+	int t;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		err = copy_to_user(argp, &ath79_wdt_info,
+				   sizeof(ath79_wdt_info)) ? -EFAULT : 0;
+		break;
+
+	case WDIOC_GETSTATUS:
+		err = put_user(0, p);
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		err = put_user(boot_status, p);
+		break;
+
+	case WDIOC_KEEPALIVE:
+		ath79_wdt_keepalive();
+		err = 0;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		err = get_user(t, p);
+		if (err)
+			break;
+
+		err = ath79_wdt_set_timeout(t);
+		if (err)
+			break;
+
+		/* fallthrough */
+	case WDIOC_GETTIMEOUT:
+		err = put_user(timeout, p);
+		break;
+
+	default:
+		err = -ENOTTY;
+		break;
+	}
+
+	return err;
+}
+
+static const struct file_operations ath79_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= ath79_wdt_write,
+	.unlocked_ioctl	= ath79_wdt_ioctl,
+	.open		= ath79_wdt_open,
+	.release	= ath79_wdt_release,
+};
+
+static struct miscdevice ath79_wdt_miscdev = {
+	.minor = WATCHDOG_MINOR,
+	.name = "watchdog",
+	.fops = &ath79_wdt_fops,
+};
+
+static int __devinit ath79_wdt_probe(struct platform_device *pdev)
+{
+	u32 ctrl;
+	int err;
+
+	wdt_clk = clk_get(&pdev->dev, "wdt");
+	if (IS_ERR(wdt_clk))
+		return PTR_ERR(wdt_clk);
+
+	err = clk_enable(wdt_clk);
+	if (err)
+		goto err_clk_put;
+
+	wdt_freq = clk_get_rate(wdt_clk);
+	if (!wdt_freq) {
+		err = -EINVAL;
+		goto err_clk_disable;
+	}
+
+	max_timeout = (0xfffffffful / wdt_freq);
+	if (timeout < 1 || timeout > max_timeout) {
+		timeout = max_timeout;
+		dev_info(&pdev->dev,
+			"timeout value must be 0 < timeout < %d, using %d\n",
+			max_timeout, timeout);
+	}
+
+	ctrl = ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+	boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0;
+
+	err = misc_register(&ath79_wdt_miscdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"unable to register misc device, err=%d\n", err);
+		goto err_clk_disable;
+	}
+
+	return 0;
+
+err_clk_disable:
+	clk_disable(wdt_clk);
+err_clk_put:
+	clk_put(wdt_clk);
+	return err;
+}
+
+static int __devexit ath79_wdt_remove(struct platform_device *pdev)
+{
+	misc_deregister(&ath79_wdt_miscdev);
+	clk_disable(wdt_clk);
+	clk_put(wdt_clk);
+	return 0;
+}
+
+static void ath97_wdt_shutdown(struct platform_device *pdev)
+{
+	ath79_wdt_disable();
+}
+
+static struct platform_driver ath79_wdt_driver = {
+	.remove		= __devexit_p(ath79_wdt_remove),
+	.shutdown	= ath97_wdt_shutdown,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init ath79_wdt_init(void)
+{
+	return platform_driver_probe(&ath79_wdt_driver, ath79_wdt_probe);
+}
+module_init(ath79_wdt_init);
+
+static void __exit ath79_wdt_exit(void)
+{
+	platform_driver_unregister(&ath79_wdt_driver);
+}
+module_exit(ath79_wdt_exit);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
+MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index d11ffb0..7e7ec9c 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -85,6 +85,22 @@
 	return 0;
 }
 
+static void __booke_wdt_set(void *data)
+{
+	u32 val;
+
+	val = mfspr(SPRN_TCR);
+	val &= ~WDTP_MASK;
+	val |= WDTP(booke_wdt_period);
+
+	mtspr(SPRN_TCR, val);
+}
+
+static void booke_wdt_set(void)
+{
+	on_each_cpu(__booke_wdt_set, NULL, 0);
+}
+
 static void __booke_wdt_ping(void *data)
 {
 	mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
@@ -181,8 +197,7 @@
 #else
 		booke_wdt_period = tmp;
 #endif
-		mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP_MASK) |
-						WDTP(booke_wdt_period));
+		booke_wdt_set();
 		return 0;
 	case WDIOC_GETTIMEOUT:
 		return put_user(booke_wdt_period, p);
@@ -193,8 +208,15 @@
 	return 0;
 }
 
+/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+static unsigned long wdt_is_active;
+
 static int booke_wdt_open(struct inode *inode, struct file *file)
 {
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &wdt_is_active))
+		return -EBUSY;
+
 	spin_lock(&booke_wdt_lock);
 	if (booke_wdt_enabled == 0) {
 		booke_wdt_enabled = 1;
@@ -210,8 +232,17 @@
 
 static int booke_wdt_release(struct inode *inode, struct file *file)
 {
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+	/* Normally, the watchdog is disabled when /dev/watchdog is closed, but
+	 * if CONFIG_WATCHDOG_NOWAYOUT is defined, then it means that the
+	 * watchdog should remain enabled.  So we disable it only if
+	 * CONFIG_WATCHDOG_NOWAYOUT is not defined.
+	 */
 	on_each_cpu(__booke_wdt_disable, NULL, 0);
 	booke_wdt_enabled = 0;
+#endif
+
+	clear_bit(0, &wdt_is_active);
 
 	return 0;
 }
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 65e5796..d4d8d1f 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -42,18 +42,21 @@
 #define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
 #define SIO_REG_DEVREV		0x22	/* Device revision */
 #define SIO_REG_MANID		0x23	/* Fintek ID (2 bytes) */
+#define SIO_REG_ROM_ADDR_SEL	0x27	/* ROM address select */
+#define SIO_REG_MFUNCT1		0x29	/* Multi function select 1 */
+#define SIO_REG_MFUNCT2		0x2a	/* Multi function select 2 */
+#define SIO_REG_MFUNCT3		0x2b	/* Multi function select 3 */
 #define SIO_REG_ENABLE		0x30	/* Logical device enable */
 #define SIO_REG_ADDR		0x60	/* Logical device address (2 bytes) */
 
 #define SIO_FINTEK_ID		0x1934	/* Manufacturers ID */
-#define SIO_F71808_ID		0x0901  /* Chipset ID */
-#define SIO_F71858_ID		0x0507  /* Chipset ID */
+#define SIO_F71808_ID		0x0901	/* Chipset ID */
+#define SIO_F71858_ID		0x0507	/* Chipset ID */
 #define SIO_F71862_ID		0x0601	/* Chipset ID */
+#define SIO_F71869_ID		0x0814	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
 #define SIO_F71889_ID		0x0723	/* Chipset ID */
 
-#define	F71882FG_REG_START		0x01
-
 #define F71808FG_REG_WDO_CONF		0xf0
 #define F71808FG_REG_WDT_CONF		0xf5
 #define F71808FG_REG_WD_TIME		0xf6
@@ -70,13 +73,15 @@
 #define WATCHDOG_MAX_TIMEOUT	(60 * 255)
 #define WATCHDOG_PULSE_WIDTH	125	/* 125 ms, default pulse width for
 					   watchdog signal */
+#define WATCHDOG_F71862FG_PIN	63	/* default watchdog reset output
+					   pin number 63 */
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
 static const int max_timeout = WATCHDOG_MAX_TIMEOUT;
-static int timeout = 60;	/* default timeout in seconds */
+static int timeout = WATCHDOG_TIMEOUT;	/* default timeout in seconds */
 module_param(timeout, int, 0);
 MODULE_PARM_DESC(timeout,
 	"Watchdog timeout in seconds. 1<= timeout <="
@@ -89,6 +94,12 @@
 	"Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms"
 			" (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
 
+static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN;
+module_param(f71862fg_pin, uint, 0);
+MODULE_PARM_DESC(f71862fg_pin,
+	"Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63"
+			" (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")");
+
 static int nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0444);
 MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
@@ -98,12 +109,13 @@
 MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
 	" given initial timeout. Zero (default) disables this feature.");
 
-enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg };
 
 static const char *f71808e_names[] = {
 	"f71808fg",
 	"f71858fg",
 	"f71862fg",
+	"f71869",
 	"f71882fg",
 	"f71889fg",
 };
@@ -282,6 +294,28 @@
 	return err;
 }
 
+static int f71862fg_pin_configure(unsigned short ioaddr)
+{
+	/* When ioaddr is non-zero the calling function has to take care of
+	   mutex handling and superio preparation! */
+
+	if (f71862fg_pin == 63) {
+		if (ioaddr) {
+			/* SPI must be disabled first to use this pin! */
+			superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6);
+			superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4);
+		}
+	} else if (f71862fg_pin == 56) {
+		if (ioaddr)
+			superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1);
+	} else {
+		printk(KERN_ERR DRVNAME ": Invalid argument f71862fg_pin=%d\n",
+				f71862fg_pin);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int watchdog_start(void)
 {
 	/* Make sure we don't die as soon as the watchdog is enabled below */
@@ -299,19 +333,30 @@
 	switch (watchdog.type) {
 	case f71808fg:
 		/* Set pin 21 to GPIO23/WDTRST#, then to WDTRST# */
-		superio_clear_bit(watchdog.sioaddr, 0x2a, 3);
-		superio_clear_bit(watchdog.sioaddr, 0x2b, 3);
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT2, 3);
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 3);
+		break;
+
+	case f71862fg:
+		err = f71862fg_pin_configure(watchdog.sioaddr);
+		if (err)
+			goto exit_superio;
+		break;
+
+	case f71869:
+		/* GPIO14 --> WDTRST# */
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4);
 		break;
 
 	case f71882fg:
 		/* Set pin 56 to WDTRST# */
-		superio_set_bit(watchdog.sioaddr, 0x29, 1);
+		superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1);
 		break;
 
 	case f71889fg:
 		/* set pin 40 to WDTRST# */
-		superio_outb(watchdog.sioaddr, 0x2b,
-				superio_inb(watchdog.sioaddr, 0x2b) & 0xcf);
+		superio_outb(watchdog.sioaddr, SIO_REG_MFUNCT3,
+			superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf);
 		break;
 
 	default:
@@ -711,16 +756,19 @@
 	case SIO_F71808_ID:
 		watchdog.type = f71808fg;
 		break;
+	case SIO_F71862_ID:
+		watchdog.type = f71862fg;
+		err = f71862fg_pin_configure(0); /* validate module parameter */
+		break;
+	case SIO_F71869_ID:
+		watchdog.type = f71869;
+		break;
 	case SIO_F71882_ID:
 		watchdog.type = f71882fg;
 		break;
 	case SIO_F71889_ID:
 		watchdog.type = f71889fg;
 		break;
-	case SIO_F71862_ID:
-		/* These have a watchdog, though it isn't implemented (yet). */
-		err = -ENOSYS;
-		goto exit;
 	case SIO_F71858_ID:
 		/* Confirmed (by datasheet) not to have a watchdog. */
 		err = -ENODEV;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index dea7b5b..24b966d 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -469,7 +469,7 @@
 	unsigned long rom_pl;
 	static int die_nmi_called;
 
-	if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
+	if (ulReason != DIE_NMIUNKNOWN)
 		goto out;
 
 	if (!hpwdt_nmi_decoding)
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index b8838d2..2c6c2b4 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
 /*
  *	intel TCO Watchdog Driver
  *
- *	(c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
+ *	(c) Copyright 2006-2010 Wim Van Sebroeck <wim@iguana.be>.
  *
  *	This program is free software; you can redistribute it and/or
  *	modify it under the terms of the GNU General Public License
@@ -26,13 +26,15 @@
  *	document number 301473-002, 301474-026: 82801F (ICH6)
  *	document number 313082-001, 313075-006: 631xESB, 632xESB
  *	document number 307013-003, 307014-024: 82801G (ICH7)
+ *	document number 322896-001, 322897-001: NM10
  *	document number 313056-003, 313057-017: 82801H (ICH8)
  *	document number 316972-004, 316973-012: 82801I (ICH9)
  *	document number 319973-002, 319974-002: 82801J (ICH10)
  *	document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
  *	document number 320066-003, 320257-008: EP80597 (IICH)
- *	document number TBD                   : Cougar Point (CPT)
+ *	document number 324645-001, 324646-001: Cougar Point (CPT)
  *	document number TBD                   : Patsburg (PBG)
+ *	document number TBD                   : DH89xxCC
  */
 
 /*
@@ -85,6 +87,7 @@
 	TCO_ICH7DH,	/* ICH7DH */
 	TCO_ICH7M,	/* ICH7-M & ICH7-U */
 	TCO_ICH7MDH,	/* ICH7-M DH */
+	TCO_NM10,	/* NM10 */
 	TCO_ICH8,	/* ICH8 & ICH8R */
 	TCO_ICH8DH,	/* ICH8DH */
 	TCO_ICH8DO,	/* ICH8DO */
@@ -149,6 +152,7 @@
 	TCO_CPT31,	/* Cougar Point */
 	TCO_PBG1,	/* Patsburg */
 	TCO_PBG2,	/* Patsburg */
+	TCO_DH89XXCC,	/* DH89xxCC */
 };
 
 static struct {
@@ -174,6 +178,7 @@
 	{"ICH7DH", 2},
 	{"ICH7-M or ICH7-U", 2},
 	{"ICH7-M DH", 2},
+	{"NM10", 2},
 	{"ICH8 or ICH8R", 2},
 	{"ICH8DH", 2},
 	{"ICH8DO", 2},
@@ -238,6 +243,7 @@
 	{"Cougar Point", 2},
 	{"Patsburg", 2},
 	{"Patsburg", 2},
+	{"DH89xxCC", 2},
 	{NULL, 0}
 };
 
@@ -291,6 +297,7 @@
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30,		TCO_ICH7DH)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1,		TCO_ICH7M)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31,		TCO_ICH7MDH)},
+	{ ITCO_PCI_DEVICE(0x27bc,				TCO_NM10)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0,		TCO_ICH8)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2,		TCO_ICH8DH)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3,		TCO_ICH8DO)},
@@ -355,6 +362,7 @@
 	{ ITCO_PCI_DEVICE(0x1c5f,				TCO_CPT31)},
 	{ ITCO_PCI_DEVICE(0x1d40,				TCO_PBG1)},
 	{ ITCO_PCI_DEVICE(0x1d41,				TCO_PBG2)},
+	{ ITCO_PCI_DEVICE(0x2310,				TCO_DH89XXCC)},
 	{ 0, },			/* End of list */
 };
 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 2852bb2..8114719 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -21,7 +21,7 @@
 #include <linux/watchdog.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
-#include <mach/timex.h>
+#include <mach/hardware.h>
 #include <mach/regs-timer.h>
 
 #define WDT_DEFAULT_TIME	5	/* seconds */
diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m548x_wdt.c
new file mode 100644
index 0000000..cabbcfe
--- /dev/null
+++ b/drivers/watchdog/m548x_wdt.c
@@ -0,0 +1,227 @@
+/*
+ * drivers/watchdog/m548x_wdt.c
+ *
+ * Watchdog driver for ColdFire MCF548x processors
+ * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
+ *
+ * Adapted from the IXP4xx watchdog driver, which carries these notices:
+ *
+ *  Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ *  Copyright 2004 (c) MontaVista, Software, Inc.
+ *  Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+
+#include <asm/coldfire.h>
+#include <asm/m548xsim.h>
+#include <asm/m548xgpt.h>
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int heartbeat = 30;	/* (secs) Default is 0.5 minute */
+static unsigned long wdt_status;
+
+#define	WDT_IN_USE		0
+#define	WDT_OK_TO_CLOSE		1
+
+static void wdt_enable(void)
+{
+	unsigned int gms0;
+
+	/* preserve GPIO usage, if any */
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	if (gms0 & MCF_GPT_GMS_TMS_GPIO)
+		gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK
+							| MCF_GPT_GMS_OD);
+	else
+		gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD;
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+	__raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) |
+			MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0);
+	gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE;
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_disable(void)
+{
+	unsigned int gms0;
+
+	/* disable watchdog */
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE);
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_keepalive(void)
+{
+	unsigned int gms0;
+
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	gms0 |= MCF_GPT_GMS_OCPW(0xA5);
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static int m548x_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+		return -EBUSY;
+
+	clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+	wdt_enable();
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t m548x_wdt_write(struct file *file, const char *data,
+						size_t len, loff_t *ppos)
+{
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+			}
+		}
+		wdt_keepalive();
+	}
+	return len;
+}
+
+static const struct watchdog_info ident = {
+	.options	= WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
+				WDIOF_KEEPALIVEPING,
+	.identity	= "Coldfire M548x Watchdog",
+};
+
+static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
+							 unsigned long arg)
+{
+	int ret = -ENOTTY;
+	int time;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		ret = copy_to_user((struct watchdog_info *)arg, &ident,
+				   sizeof(ident)) ? -EFAULT : 0;
+		break;
+
+	case WDIOC_GETSTATUS:
+		ret = put_user(0, (int *)arg);
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		ret = put_user(0, (int *)arg);
+		break;
+
+	case WDIOC_KEEPALIVE:
+		wdt_keepalive();
+		ret = 0;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		ret = get_user(time, (int *)arg);
+		if (ret)
+			break;
+
+		if (time <= 0 || time > 30) {
+			ret = -EINVAL;
+			break;
+		}
+
+		heartbeat = time;
+		wdt_enable();
+		/* Fall through */
+
+	case WDIOC_GETTIMEOUT:
+		ret = put_user(heartbeat, (int *)arg);
+		break;
+	}
+	return ret;
+}
+
+static int m548x_wdt_release(struct inode *inode, struct file *file)
+{
+	if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
+		wdt_disable();
+	else {
+		printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
+					"timer will not stop\n");
+		wdt_keepalive();
+	}
+	clear_bit(WDT_IN_USE, &wdt_status);
+	clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+	return 0;
+}
+
+
+static const struct file_operations m548x_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= m548x_wdt_write,
+	.unlocked_ioctl	= m548x_wdt_ioctl,
+	.open		= m548x_wdt_open,
+	.release	= m548x_wdt_release,
+};
+
+static struct miscdevice m548x_wdt_miscdev = {
+	.minor		= WATCHDOG_MINOR,
+	.name		= "watchdog",
+	.fops		= &m548x_wdt_fops,
+};
+
+static int __init m548x_wdt_init(void)
+{
+	if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
+						"Coldfire M548x Watchdog")) {
+		printk(KERN_WARNING
+				"Coldfire M548x Watchdog : I/O region busy\n");
+		return -EBUSY;
+	}
+	printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
+
+	return misc_register(&m548x_wdt_miscdev);
+}
+
+static void __exit m548x_wdt_exit(void)
+{
+	misc_deregister(&m548x_wdt_miscdev);
+	release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
+}
+
+module_init(m548x_wdt_init);
+module_exit(m548x_wdt_exit);
+
+MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
+MODULE_DESCRIPTION("Coldfire M548x Watchdog");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
new file mode 100644
index 0000000..1a50aa7
--- /dev/null
+++ b/drivers/watchdog/nv_tco.c
@@ -0,0 +1,512 @@
+/*
+ *	nv_tco 0.01:	TCO timer driver for NV chipsets
+ *
+ *	(c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ *	Based off i8xx_tco.c:
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	TCO timer driver for NV chipsets
+ *	based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ *	Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "nv_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "NV_TCO"
+#define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static unsigned int tcobase;
+static DEFINE_SPINLOCK(tco_lock);	/* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *nv_tco_platform_device;
+
+/* module parameters */
+#define WATCHDOG_HEARTBEAT 30	/* 30 sec default heartbeat (2<heartbeat<39) */
+static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, "
+			    "default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+		" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static inline unsigned char seconds_to_ticks(int seconds)
+{
+	/* the internal timer is stored as ticks which decrement
+	 * every 0.6 seconds */
+	return (seconds * 10) / 6;
+}
+
+static void tco_timer_start(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inl(TCO_CNT(tcobase));
+	val &= ~TCO_CNT_TCOHALT;
+	outl(val, TCO_CNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inl(TCO_CNT(tcobase));
+	val |= TCO_CNT_TCOHALT;
+	outl(val, TCO_CNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	outb(0x01, TCO_RLD(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+	int ret = 0;
+	unsigned char tmrval;
+	unsigned long flags;
+	u8 val;
+
+	/*
+	 * note seconds_to_ticks(t) > t, so if t > 0x3f, so is
+	 * tmrval=seconds_to_ticks(t).  Check that the count in seconds isn't
+	 * out of range on it's own (to avoid overflow in tmrval).
+	 */
+	if (t < 0 || t > 0x3f)
+		return -EINVAL;
+	tmrval = seconds_to_ticks(t);
+
+	/* "Values of 0h-3h are ignored and should not be attempted" */
+	if (tmrval > 0x3f || tmrval < 0x04)
+		return -EINVAL;
+
+	/* Write new heartbeat to watchdog */
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inb(TCO_TMR(tcobase));
+	val &= 0xc0;
+	val |= tmrval;
+	outb(val, TCO_TMR(tcobase));
+	val = inb(TCO_TMR(tcobase));
+
+	if ((val & 0x3f) != tmrval)
+		ret = -EINVAL;
+	spin_unlock_irqrestore(&tco_lock, flags);
+
+	if (ret)
+		return ret;
+
+	heartbeat = t;
+	return 0;
+}
+
+/*
+ *	/dev/watchdog handling
+ */
+
+static int nv_tco_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &timer_alive))
+		return -EBUSY;
+
+	/* Reload and activate timer */
+	tco_timer_keepalive();
+	tco_timer_start();
+	return nonseekable_open(inode, file);
+}
+
+static int nv_tco_release(struct inode *inode, struct file *file)
+{
+	/* Shut off the timer */
+	if (tco_expect_close == 42) {
+		tco_timer_stop();
+	} else {
+		printk(KERN_CRIT PFX "Unexpected close, not stopping "
+		       "watchdog!\n");
+		tco_timer_keepalive();
+	}
+	clear_bit(0, &timer_alive);
+	tco_expect_close = 0;
+	return 0;
+}
+
+static ssize_t nv_tco_write(struct file *file, const char __user *data,
+			    size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/*
+			 * note: just in case someone wrote the magic character
+			 * five months ago...
+			 */
+			tco_expect_close = 0;
+
+			/*
+			 * scan to see whether or not we got the magic
+			 * character
+			 */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					tco_expect_close = 42;
+			}
+		}
+
+		/* someone wrote to us, we should reload the timer */
+		tco_timer_keepalive();
+	}
+	return len;
+}
+
+static long nv_tco_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long arg)
+{
+	int new_options, retval = -EINVAL;
+	int new_heartbeat;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	static const struct watchdog_info ident = {
+		.options =		WDIOF_SETTIMEOUT |
+					WDIOF_KEEPALIVEPING |
+					WDIOF_MAGICCLOSE,
+		.firmware_version =	0,
+		.identity =		TCO_MODULE_NAME,
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(new_options, p))
+			return -EFAULT;
+		if (new_options & WDIOS_DISABLECARD) {
+			tco_timer_stop();
+			retval = 0;
+		}
+		if (new_options & WDIOS_ENABLECARD) {
+			tco_timer_keepalive();
+			tco_timer_start();
+			retval = 0;
+		}
+		return retval;
+	case WDIOC_KEEPALIVE:
+		tco_timer_keepalive();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_heartbeat, p))
+			return -EFAULT;
+		if (tco_timer_set_heartbeat(new_heartbeat))
+			return -EINVAL;
+		tco_timer_keepalive();
+		/* Fall through */
+	case WDIOC_GETTIMEOUT:
+		return put_user(heartbeat, p);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+ *	Kernel Interfaces
+ */
+
+static const struct file_operations nv_tco_fops = {
+	.owner =		THIS_MODULE,
+	.llseek =		no_llseek,
+	.write =		nv_tco_write,
+	.unlocked_ioctl =	nv_tco_ioctl,
+	.open =			nv_tco_open,
+	.release =		nv_tco_release,
+};
+
+static struct miscdevice nv_tco_miscdev = {
+	.minor =	WATCHDOG_MINOR,
+	.name =		"watchdog",
+	.fops =		&nv_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id tco_pci_tbl[] = {
+	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
+	  PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
+	  PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0, },			/* End of list */
+};
+MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
+
+/*
+ *	Init & exit routines
+ */
+
+static unsigned char __init nv_tco_getdevice(void)
+{
+	struct pci_dev *dev = NULL;
+	u32 val;
+
+	/* Find the PCI device */
+	for_each_pci_dev(dev) {
+		if (pci_match_id(tco_pci_tbl, dev) != NULL) {
+			tco_pci = dev;
+			break;
+		}
+	}
+
+	if (!tco_pci)
+		return 0;
+
+	/* Find the base io port */
+	pci_read_config_dword(tco_pci, 0x64, &val);
+	val &= 0xffff;
+	if (val == 0x0001 || val == 0x0000) {
+		/* Something is wrong here, bar isn't setup */
+		printk(KERN_ERR PFX "failed to get tcobase address\n");
+		return 0;
+	}
+	val &= 0xff00;
+	tcobase = val + 0x40;
+
+	if (!request_region(tcobase, 0x10, "NV TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+		       tcobase);
+		return 0;
+	}
+
+	/* Set a reasonable heartbeat before we stop the timer */
+	tco_timer_set_heartbeat(30);
+
+	/*
+	 * Stop the TCO before we change anything so we don't race with
+	 * a zeroed timer.
+	 */
+	tco_timer_keepalive();
+	tco_timer_stop();
+
+	/* Disable SMI caused by TCO */
+	if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+		       MCP51_SMI_EN(tcobase));
+		goto out;
+	}
+	val = inl(MCP51_SMI_EN(tcobase));
+	val &= ~MCP51_SMI_EN_TCO;
+	outl(val, MCP51_SMI_EN(tcobase));
+	val = inl(MCP51_SMI_EN(tcobase));
+	release_region(MCP51_SMI_EN(tcobase), 4);
+	if (val & MCP51_SMI_EN_TCO) {
+		printk(KERN_ERR PFX "Could not disable SMI caused by TCO\n");
+		goto out;
+	}
+
+	/* Check chipset's NO_REBOOT bit */
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+	pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) {
+		printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot "
+		       "disabled by hardware\n");
+		goto out;
+	}
+
+	return 1;
+out:
+	release_region(tcobase, 0x10);
+	return 0;
+}
+
+static int __devinit nv_tco_init(struct platform_device *dev)
+{
+	int ret;
+
+	/* Check whether or not the hardware watchdog is there */
+	if (!nv_tco_getdevice())
+		return -ENODEV;
+
+	/* Check to see if last reboot was due to watchdog timeout */
+	printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+	       inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not ");
+
+	/* Clear out the old status */
+	outl(TCO_STS_RESET, TCO_STS(tcobase));
+
+	/*
+	 * Check that the heartbeat value is within it's range.
+	 * If not, reset to the default.
+	 */
+	if (tco_timer_set_heartbeat(heartbeat)) {
+		heartbeat = WATCHDOG_HEARTBEAT;
+		tco_timer_set_heartbeat(heartbeat);
+		printk(KERN_INFO PFX "heartbeat value must be 2<heartbeat<39, "
+		       "using %d\n", heartbeat);
+	}
+
+	ret = misc_register(&nv_tco_miscdev);
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register miscdev on minor=%d "
+		       "(err=%d)\n", WATCHDOG_MINOR, ret);
+		goto unreg_region;
+	}
+
+	clear_bit(0, &timer_alive);
+
+	tco_timer_stop();
+
+	printk(KERN_INFO PFX "initialized (0x%04x). heartbeat=%d sec "
+	       "(nowayout=%d)\n", tcobase, heartbeat, nowayout);
+
+	return 0;
+
+unreg_region:
+	release_region(tcobase, 0x10);
+	return ret;
+}
+
+static void __devexit nv_tco_cleanup(void)
+{
+	u32 val;
+
+	/* Stop the timer before we leave */
+	if (!nowayout)
+		tco_timer_stop();
+
+	/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+	pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	if (val & MCP51_SMBUS_SETUP_B_TCO_REBOOT) {
+		printk(KERN_CRIT PFX "Couldn't unset REBOOT bit.  Machine may "
+		       "soon reset\n");
+	}
+
+	/* Deregister */
+	misc_deregister(&nv_tco_miscdev);
+	release_region(tcobase, 0x10);
+}
+
+static int __devexit nv_tco_remove(struct platform_device *dev)
+{
+	if (tcobase)
+		nv_tco_cleanup();
+
+	return 0;
+}
+
+static void nv_tco_shutdown(struct platform_device *dev)
+{
+	tco_timer_stop();
+}
+
+static struct platform_driver nv_tco_driver = {
+	.probe		= nv_tco_init,
+	.remove		= __devexit_p(nv_tco_remove),
+	.shutdown	= nv_tco_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= TCO_MODULE_NAME,
+	},
+};
+
+static int __init nv_tco_init_module(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "NV TCO WatchDog Timer Driver v%s\n",
+	       TCO_VERSION);
+
+	err = platform_driver_register(&nv_tco_driver);
+	if (err)
+		return err;
+
+	nv_tco_platform_device = platform_device_register_simple(
+					TCO_MODULE_NAME, -1, NULL, 0);
+	if (IS_ERR(nv_tco_platform_device)) {
+		err = PTR_ERR(nv_tco_platform_device);
+		goto unreg_platform_driver;
+	}
+
+	return 0;
+
+unreg_platform_driver:
+	platform_driver_unregister(&nv_tco_driver);
+	return err;
+}
+
+static void __exit nv_tco_cleanup_module(void)
+{
+	platform_device_unregister(nv_tco_platform_device);
+	platform_driver_unregister(&nv_tco_driver);
+	printk(KERN_INFO PFX "NV TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(nv_tco_init_module);
+module_exit(nv_tco_cleanup_module);
+
+MODULE_AUTHOR("Mike Waychison");
+MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.h b/drivers/watchdog/nv_tco.h
new file mode 100644
index 0000000..c2d1d04
--- /dev/null
+++ b/drivers/watchdog/nv_tco.h
@@ -0,0 +1,64 @@
+/*
+ *	nv_tco:	TCO timer driver for nVidia chipsets.
+ *
+ *	(c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ *	Supported Chipsets:
+ *		- MCP51/MCP55
+ *
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Neither kernel concepts nor Nils Faerber admit liability nor provide
+ *	warranty for any of this software. This material is provided
+ *	"AS-IS" and at no charge.
+ *
+ *	(c) Copyright 2000	kernel concepts <nils@kernelconcepts.de>
+ *				developed for
+ *                              Jentro AG, Haar/Munich (Germany)
+ *
+ *	TCO timer driver for NV chipsets
+ *	based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ * Some address definitions for the TCO
+ */
+
+#define TCO_RLD(base)	((base) + 0x00)	/* TCO Timer Reload and Current Value */
+#define TCO_TMR(base)	((base) + 0x01)	/* TCO Timer Initial Value	*/
+
+#define TCO_STS(base)	((base) + 0x04)	/* TCO Status Register		*/
+/*
+ * TCO Boot Status bit: set on TCO reset, reset by software or standby
+ * power-good (survives reboots), unfortunately this bit is never
+ * set.
+ */
+#  define TCO_STS_BOOT_STS	(1 << 9)
+/*
+ * first and 2nd timeout status bits, these also survive a warm boot,
+ * and they work, so we use them.
+ */
+#  define TCO_STS_TCO_INT_STS	(1 << 1)
+#  define TCO_STS_TCO2TO_STS	(1 << 10)
+#  define TCO_STS_RESET		(TCO_STS_BOOT_STS | TCO_STS_TCO2TO_STS | \
+				 TCO_STS_TCO_INT_STS)
+
+#define TCO_CNT(base)	((base) + 0x08)	/* TCO Control Register	*/
+#  define TCO_CNT_TCOHALT	(1 << 12)
+
+#define MCP51_SMBUS_SETUP_B 0xe8
+#  define MCP51_SMBUS_SETUP_B_TCO_REBOOT (1 << 25)
+
+/*
+ * The SMI_EN register is at the base io address + 0x04,
+ * while TCOBASE is + 0x40.
+ */
+#define MCP51_SMI_EN(base)	((base) - 0x40 + 0x04)
+#  define MCP51_SMI_EN_TCO	((1 << 4) | (1 << 5))
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
new file mode 100644
index 0000000..8083728
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.c
@@ -0,0 +1,480 @@
+/*
+ *	sp5100_tco :	TCO timer driver for sp5100 chipsets
+ *
+ *	(c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ *	Based on i8xx_tco.c:
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide"
+ */
+
+/*
+ *	Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "sp5100_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "SP5100 TCO timer"
+#define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static void __iomem *tcobase;
+static unsigned int pm_iobase;
+static DEFINE_SPINLOCK(tco_lock);	/* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *sp5100_tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *sp5100_tco_platform_device;
+
+/* module parameters */
+
+#define WATCHDOG_HEARTBEAT 60	/* 60 sec default heartbeat. */
+static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
+		 __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+		" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static void tco_timer_start(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val |= SP5100_WDT_START_STOP_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_WDT_START_STOP_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val |= SP5100_WDT_TRIGGER_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+	unsigned long flags;
+
+	if (t < 0 || t > 0xffff)
+		return -EINVAL;
+
+	/* Write new heartbeat to watchdog */
+	spin_lock_irqsave(&tco_lock, flags);
+	writel(t, SP5100_WDT_COUNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+
+	heartbeat = t;
+	return 0;
+}
+
+/*
+ *	/dev/watchdog handling
+ */
+
+static int sp5100_tco_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &timer_alive))
+		return -EBUSY;
+
+	/* Reload and activate timer */
+	tco_timer_start();
+	tco_timer_keepalive();
+	return nonseekable_open(inode, file);
+}
+
+static int sp5100_tco_release(struct inode *inode, struct file *file)
+{
+	/* Shut off the timer. */
+	if (tco_expect_close == 42) {
+		tco_timer_stop();
+	} else {
+		printk(KERN_CRIT PFX
+			"Unexpected close, not stopping watchdog!\n");
+		tco_timer_keepalive();
+	}
+	clear_bit(0, &timer_alive);
+	tco_expect_close = 0;
+	return 0;
+}
+
+static ssize_t sp5100_tco_write(struct file *file, const char __user *data,
+				size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/* note: just in case someone wrote the magic character
+			 * five months ago... */
+			tco_expect_close = 0;
+
+			/* scan to see whether or not we got the magic character
+			 */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					tco_expect_close = 42;
+			}
+		}
+
+		/* someone wrote to us, we should reload the timer */
+		tco_timer_keepalive();
+	}
+	return len;
+}
+
+static long sp5100_tco_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	int new_options, retval = -EINVAL;
+	int new_heartbeat;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	static const struct watchdog_info ident = {
+		.options =		WDIOF_SETTIMEOUT |
+					WDIOF_KEEPALIVEPING |
+					WDIOF_MAGICCLOSE,
+		.firmware_version =	0,
+		.identity =		TCO_MODULE_NAME,
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident,
+			sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(new_options, p))
+			return -EFAULT;
+		if (new_options & WDIOS_DISABLECARD) {
+			tco_timer_stop();
+			retval = 0;
+		}
+		if (new_options & WDIOS_ENABLECARD) {
+			tco_timer_start();
+			tco_timer_keepalive();
+			retval = 0;
+		}
+		return retval;
+	case WDIOC_KEEPALIVE:
+		tco_timer_keepalive();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_heartbeat, p))
+			return -EFAULT;
+		if (tco_timer_set_heartbeat(new_heartbeat))
+			return -EINVAL;
+		tco_timer_keepalive();
+		/* Fall through */
+	case WDIOC_GETTIMEOUT:
+		return put_user(heartbeat, p);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static const struct file_operations sp5100_tco_fops = {
+	.owner =		THIS_MODULE,
+	.llseek =		no_llseek,
+	.write =		sp5100_tco_write,
+	.unlocked_ioctl =	sp5100_tco_ioctl,
+	.open =			sp5100_tco_open,
+	.release =		sp5100_tco_release,
+};
+
+static struct miscdevice sp5100_tco_miscdev = {
+	.minor =	WATCHDOG_MINOR,
+	.name =		"watchdog",
+	.fops =		&sp5100_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id sp5100_tco_pci_tbl[] = {
+	{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
+	  PCI_ANY_ID, },
+	{ 0, },			/* End of list */
+};
+MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
+
+/*
+ * Init & exit routines
+ */
+
+static unsigned char __devinit sp5100_tco_setupdevice(void)
+{
+	struct pci_dev *dev = NULL;
+	u32 val;
+
+	/* Match the PCI device */
+	for_each_pci_dev(dev) {
+		if (pci_match_id(sp5100_tco_pci_tbl, dev) != NULL) {
+			sp5100_tco_pci = dev;
+			break;
+		}
+	}
+
+	if (!sp5100_tco_pci)
+		return 0;
+
+	/* Request the IO ports used by this driver */
+	pm_iobase = SP5100_IO_PM_INDEX_REG;
+	if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+			pm_iobase);
+		goto exit;
+	}
+
+	/* Find the watchdog base address. */
+	outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG);
+	val = inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG);
+	val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG);
+	val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG);
+	/* Low three bits of BASE0 are reserved. */
+	val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);
+
+	tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
+	if (tcobase == 0) {
+		printk(KERN_ERR PFX "failed to get tcobase address\n");
+		goto unreg_region;
+	}
+
+	/* Enable watchdog decode bit */
+	pci_read_config_dword(sp5100_tco_pci,
+			      SP5100_PCI_WATCHDOG_MISC_REG,
+			      &val);
+
+	val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+	pci_write_config_dword(sp5100_tco_pci,
+			       SP5100_PCI_WATCHDOG_MISC_REG,
+			       val);
+
+	/* Enable Watchdog timer and set the resolution to 1 sec. */
+	outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+	val = inb(SP5100_IO_PM_DATA_REG);
+	val |= SP5100_PM_WATCHDOG_SECOND_RES;
+	val &= ~SP5100_PM_WATCHDOG_DISABLE;
+	outb(val, SP5100_IO_PM_DATA_REG);
+
+	/* Check that the watchdog action is set to reset the system. */
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_PM_WATCHDOG_ACTION_RESET;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+
+	/* Set a reasonable heartbeat before we stop the timer */
+	tco_timer_set_heartbeat(heartbeat);
+
+	/*
+	 * Stop the TCO before we change anything so we don't race with
+	 * a zeroed timer.
+	 */
+	tco_timer_stop();
+
+	/* Done */
+	return 1;
+
+	iounmap(tcobase);
+unreg_region:
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+exit:
+	return 0;
+}
+
+static int __devinit sp5100_tco_init(struct platform_device *dev)
+{
+	int ret;
+	u32 val;
+
+	/* Check whether or not the hardware watchdog is there. If found, then
+	 * set it up.
+	 */
+	if (!sp5100_tco_setupdevice())
+		return -ENODEV;
+
+	/* Check to see if last reboot was due to watchdog timeout */
+	printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+	       readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ?
+		      "" : "not ");
+
+	/* Clear out the old status */
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_PM_WATCHDOG_FIRED;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+
+	/*
+	 * Check that the heartbeat value is within it's range.
+	 * If not, reset to the default.
+	 */
+	if (tco_timer_set_heartbeat(heartbeat)) {
+		heartbeat = WATCHDOG_HEARTBEAT;
+		tco_timer_set_heartbeat(heartbeat);
+	}
+
+	ret = misc_register(&sp5100_tco_miscdev);
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register miscdev on minor="
+		       "%d (err=%d)\n",
+		       WATCHDOG_MINOR, ret);
+		goto exit;
+	}
+
+	clear_bit(0, &timer_alive);
+
+	printk(KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec"
+		" (nowayout=%d)\n",
+		tcobase, heartbeat, nowayout);
+
+	return 0;
+
+exit:
+	iounmap(tcobase);
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+	return ret;
+}
+
+static void __devexit sp5100_tco_cleanup(void)
+{
+	/* Stop the timer before we leave */
+	if (!nowayout)
+		tco_timer_stop();
+
+	/* Deregister */
+	misc_deregister(&sp5100_tco_miscdev);
+	iounmap(tcobase);
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+}
+
+static int __devexit sp5100_tco_remove(struct platform_device *dev)
+{
+	if (tcobase)
+		sp5100_tco_cleanup();
+	return 0;
+}
+
+static void sp5100_tco_shutdown(struct platform_device *dev)
+{
+	tco_timer_stop();
+}
+
+static struct platform_driver sp5100_tco_driver = {
+	.probe		= sp5100_tco_init,
+	.remove		= __devexit_p(sp5100_tco_remove),
+	.shutdown	= sp5100_tco_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= TCO_MODULE_NAME,
+	},
+};
+
+static int __init sp5100_tco_init_module(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "SP5100 TCO WatchDog Timer Driver v%s\n",
+	       TCO_VERSION);
+
+	err = platform_driver_register(&sp5100_tco_driver);
+	if (err)
+		return err;
+
+	sp5100_tco_platform_device = platform_device_register_simple(
+					TCO_MODULE_NAME, -1, NULL, 0);
+	if (IS_ERR(sp5100_tco_platform_device)) {
+		err = PTR_ERR(sp5100_tco_platform_device);
+		goto unreg_platform_driver;
+	}
+
+	return 0;
+
+unreg_platform_driver:
+	platform_driver_unregister(&sp5100_tco_driver);
+	return err;
+}
+
+static void __exit sp5100_tco_cleanup_module(void)
+{
+	platform_device_unregister(sp5100_tco_platform_device);
+	platform_driver_unregister(&sp5100_tco_driver);
+	printk(KERN_INFO PFX "SP5100 TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(sp5100_tco_init_module);
+module_exit(sp5100_tco_cleanup_module);
+
+MODULE_AUTHOR("Priyanka Gupta");
+MODULE_DESCRIPTION("TCO timer driver for SP5100 chipset");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
new file mode 100644
index 0000000..a5a16cc
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.h
@@ -0,0 +1,41 @@
+/*
+ *	sp5100_tco:	TCO timer driver for sp5100 chipsets.
+ *
+ *	(c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ *	TCO timer driver for sp5100 chipsets
+ */
+
+/*
+ * Some address definitions for the Watchdog
+ */
+
+#define SP5100_WDT_MEM_MAP_SIZE		0x08
+#define SP5100_WDT_CONTROL(base)	((base) + 0x00) /* Watchdog Control */
+#define SP5100_WDT_COUNT(base)		((base) + 0x04) /* Watchdog Count */
+
+#define SP5100_WDT_START_STOP_BIT	1
+#define SP5100_WDT_TRIGGER_BIT		(1 << 7)
+
+#define SP5100_PCI_WATCHDOG_MISC_REG	0x41
+#define SP5100_PCI_WATCHDOG_DECODE_EN	(1 << 3)
+
+#define SP5100_PM_IOPORTS_SIZE		0x02
+
+/* These two IO registers are hardcoded and there doesn't seem to be a way to
+ * read them from a register.
+ */
+#define SP5100_IO_PM_INDEX_REG		0xCD6
+#define SP5100_IO_PM_DATA_REG		0xCD7
+
+#define SP5100_PM_WATCHDOG_CONTROL	0x69
+#define SP5100_PM_WATCHDOG_BASE0	0x6C
+#define SP5100_PM_WATCHDOG_BASE1	0x6D
+#define SP5100_PM_WATCHDOG_BASE2	0x6E
+#define SP5100_PM_WATCHDOG_BASE3	0x6F
+
+#define SP5100_PM_WATCHDOG_FIRED	(1 << 1)
+#define SP5100_PM_WATCHDOG_ACTION_RESET	(1 << 2)
+
+#define SP5100_PM_WATCHDOG_DISABLE	1
+#define SP5100_PM_WATCHDOG_SECOND_RES	(3 << 1)
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 0f5288d..e5c91d4 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -42,7 +42,7 @@
 
 #include <asm/system.h>
 
-#define WATCHDOG_NAME "w83627hf/thf/hg WDT"
+#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
 #define PFX WATCHDOG_NAME ": "
 #define WATCHDOG_TIMEOUT 60		/* 60 sec default timeout */
 
@@ -89,7 +89,7 @@
 		c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
 		outb_p(0x2b, WDT_EFER);
 		outb_p(c, WDT_EFDR);	/* set GPIO3 to WDT0 */
-	} else if (c == 0x88) {	/* W83627EHF */
+	} else if (c == 0x88 || c == 0xa0) {	/* W83627EHF / W83627DHG */
 		outb_p(0x2d, WDT_EFER); /* select GPIO5 */
 		c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
 		outb_p(0x2d, WDT_EFER);
@@ -129,6 +129,8 @@
 	t = inb_p(WDT_EFDR);      /* read CRF5 */
 	t &= ~0x0C;               /* set second mode & disable keyboard
 				    turning off watchdog */
+	t |= 0x02;		  /* enable the WDTO# output low pulse
+				    to the KBRST# pin (PIN60) */
 	outb_p(t, WDT_EFDR);    /* Write back to CRF5 */
 
 	outb_p(0xF7, WDT_EFER); /* Select CRF7 */
@@ -321,7 +323,7 @@
 {
 	int ret;
 
-	printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG Super I/O chip initialising.\n");
+	printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising.\n");
 
 	if (wdt_set_heartbeat(timeout)) {
 		wdt_set_heartbeat(WATCHDOG_TIMEOUT);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 6e6180c..07bec09 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -29,6 +29,14 @@
 	  firing.
 	  If in doubt, say yes.
 
+config XEN_BACKEND
+	bool "Backend driver support"
+	depends on XEN_DOM0
+	default y
+	help
+	  Support for backend device drivers that provide I/O services
+	  to other virtual machines.
+
 config XENFS
 	tristate "Xen filesystem"
 	default y
@@ -62,9 +70,19 @@
 	 virtual environment, /sys/hypervisor will still be present,
 	 but will have no xen contents.
 
+config XEN_XENBUS_FRONTEND
+	tristate
+
+config XEN_GNTDEV
+	tristate "userspace grant access device driver"
+	depends on XEN
+	select MMU_NOTIFIER
+	help
+	  Allows userspace processes to use grants.
+
 config XEN_PLATFORM_PCI
 	tristate "xen platform pci device driver"
-	depends on XEN_PVHVM
+	depends on XEN_PVHVM && PCI
 	default m
 	help
 	  Driver for the Xen PCI Platform device: it is responsible for
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 533a199..5088cc2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -9,11 +9,14 @@
 obj-$(CONFIG_XEN_XENCOMM)	+= xencomm.o
 obj-$(CONFIG_XEN_BALLOON)	+= balloon.o
 obj-$(CONFIG_XEN_DEV_EVTCHN)	+= xen-evtchn.o
+obj-$(CONFIG_XEN_GNTDEV)	+= xen-gntdev.o
 obj-$(CONFIG_XENFS)		+= xenfs/
 obj-$(CONFIG_XEN_SYS_HYPERVISOR)	+= sys-hypervisor.o
-obj-$(CONFIG_XEN_PLATFORM_PCI)	+= platform-pci.o
+obj-$(CONFIG_XEN_PLATFORM_PCI)	+= xen-platform-pci.o
 obj-$(CONFIG_SWIOTLB_XEN)	+= swiotlb-xen.o
 obj-$(CONFIG_XEN_DOM0)		+= pci.o
 
 xen-evtchn-y			:= evtchn.o
+xen-gntdev-y				:= gntdev.o
 
+xen-platform-pci-y		:= platform-pci.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 31af0ac..7468147 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -170,6 +170,9 @@
 
 static unsigned int evtchn_from_irq(unsigned irq)
 {
+	if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+		return 0;
+
 	return info_for_irq(irq)->evtchn;
 }
 
@@ -355,7 +358,7 @@
 		struct evtchn_unmask unmask = { .port = port };
 		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
 	} else {
-		struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+		struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
 
 		sync_clear_bit(port, &s->evtchn_mask[0]);
 
@@ -405,15 +408,21 @@
 {
 	struct irq_data *data;
 	int irq, res;
-	int start = get_nr_hw_irqs();
+	int bottom = get_nr_hw_irqs();
+	int top = nr_irqs-1;
 
-	if (start == nr_irqs)
+	if (bottom == nr_irqs)
 		goto no_irqs;
 
-	/* nr_irqs is a magic value. Must not use it.*/
-	for (irq = nr_irqs-1; irq > start; irq--) {
+	/* This loop starts from the top of IRQ space and goes down.
+	 * We need this b/c if we have a PCI device in a Xen PV guest
+	 * we do not have an IO-APIC (though the backend might have them)
+	 * mapped in. To not have a collision of physical IRQs with the Xen
+	 * event channels start at the top of the IRQ space for virtual IRQs.
+	 */
+	for (irq = top; irq > bottom; irq--) {
 		data = irq_get_irq_data(irq);
-		/* only 0->15 have init'd desc; handle irq > 16 */
+		/* only 15->0 have init'd desc; handle irq > 16 */
 		if (!data)
 			break;
 		if (data->chip == &no_irq_chip)
@@ -424,7 +433,7 @@
 			return irq;
 	}
 
-	if (irq == start)
+	if (irq == bottom)
 		goto no_irqs;
 
 	res = irq_alloc_desc_at(irq, -1);
@@ -1101,7 +1110,7 @@
 {
 	int cpu = get_cpu();
 	struct shared_info *s = HYPERVISOR_shared_info;
-	struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  	unsigned count;
 
 	do {
@@ -1109,7 +1118,7 @@
 
 		vcpu_info->evtchn_upcall_pending = 0;
 
-		if (__get_cpu_var(xed_nesting_count)++)
+		if (__this_cpu_inc_return(xed_nesting_count) - 1)
 			goto out;
 
 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -1141,8 +1150,8 @@
 
 		BUG_ON(!irqs_disabled());
 
-		count = __get_cpu_var(xed_nesting_count);
-		__get_cpu_var(xed_nesting_count) = 0;
+		count = __this_cpu_read(xed_nesting_count);
+		__this_cpu_write(xed_nesting_count, 0);
 	} while (count != 1 || vcpu_info->evtchn_upcall_pending);
 
 out:
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
new file mode 100644
index 0000000..1e31cdc
--- /dev/null
+++ b/drivers/xen/gntdev.c
@@ -0,0 +1,665 @@
+/******************************************************************************
+ * gntdev.c
+ *
+ * Device for accessing (in user-space) pages that have been granted by other
+ * domains.
+ *
+ * Copyright (c) 2006-2007, D G Murray.
+ *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mmu_notifier.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include <xen/xen.h>
+#include <xen/grant_table.h>
+#include <xen/gntdev.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/page.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
+	      "Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_DESCRIPTION("User-space granted page access driver");
+
+static int limit = 1024;
+module_param(limit, int, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
+		"once by a gntdev instance");
+
+struct gntdev_priv {
+	struct list_head maps;
+	uint32_t used;
+	uint32_t limit;
+	/* lock protects maps from concurrent changes */
+	spinlock_t lock;
+	struct mm_struct *mm;
+	struct mmu_notifier mn;
+};
+
+struct grant_map {
+	struct list_head next;
+	struct gntdev_priv *priv;
+	struct vm_area_struct *vma;
+	int index;
+	int count;
+	int flags;
+	int is_mapped;
+	struct ioctl_gntdev_grant_ref *grants;
+	struct gnttab_map_grant_ref   *map_ops;
+	struct gnttab_unmap_grant_ref *unmap_ops;
+	struct page **pages;
+};
+
+/* ------------------------------------------------------------------ */
+
+static void gntdev_print_maps(struct gntdev_priv *priv,
+			      char *text, int text_index)
+{
+#ifdef DEBUG
+	struct grant_map *map;
+
+	pr_debug("maps list (priv %p, usage %d/%d)\n",
+	       priv, priv->used, priv->limit);
+
+	list_for_each_entry(map, &priv->maps, next)
+		pr_debug("  index %2d, count %2d %s\n",
+		       map->index, map->count,
+		       map->index == text_index && text ? text : "");
+#endif
+}
+
+static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+{
+	struct grant_map *add;
+	int i;
+
+	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
+	if (NULL == add)
+		return NULL;
+
+	add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
+	add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
+	add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
+	add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
+	if (NULL == add->grants    ||
+	    NULL == add->map_ops   ||
+	    NULL == add->unmap_ops ||
+	    NULL == add->pages)
+		goto err;
+
+	for (i = 0; i < count; i++) {
+		add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+		if (add->pages[i] == NULL)
+			goto err;
+	}
+
+	add->index = 0;
+	add->count = count;
+	add->priv  = priv;
+
+	if (add->count + priv->used > priv->limit)
+		goto err;
+
+	return add;
+
+err:
+	if (add->pages)
+		for (i = 0; i < count; i++) {
+			if (add->pages[i])
+				__free_page(add->pages[i]);
+		}
+	kfree(add->pages);
+	kfree(add->grants);
+	kfree(add->map_ops);
+	kfree(add->unmap_ops);
+	kfree(add);
+	return NULL;
+}
+
+static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
+{
+	struct grant_map *map;
+
+	list_for_each_entry(map, &priv->maps, next) {
+		if (add->index + add->count < map->index) {
+			list_add_tail(&add->next, &map->next);
+			goto done;
+		}
+		add->index = map->index + map->count;
+	}
+	list_add_tail(&add->next, &priv->maps);
+
+done:
+	priv->used += add->count;
+	gntdev_print_maps(priv, "[new]", add->index);
+}
+
+static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
+		int index, int count)
+{
+	struct grant_map *map;
+
+	list_for_each_entry(map, &priv->maps, next) {
+		if (map->index != index)
+			continue;
+		if (map->count != count)
+			continue;
+		return map;
+	}
+	return NULL;
+}
+
+static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
+					       unsigned long vaddr)
+{
+	struct grant_map *map;
+
+	list_for_each_entry(map, &priv->maps, next) {
+		if (!map->vma)
+			continue;
+		if (vaddr < map->vma->vm_start)
+			continue;
+		if (vaddr >= map->vma->vm_end)
+			continue;
+		return map;
+	}
+	return NULL;
+}
+
+static int gntdev_del_map(struct grant_map *map)
+{
+	int i;
+
+	if (map->vma)
+		return -EBUSY;
+	for (i = 0; i < map->count; i++)
+		if (map->unmap_ops[i].handle)
+			return -EBUSY;
+
+	map->priv->used -= map->count;
+	list_del(&map->next);
+	return 0;
+}
+
+static void gntdev_free_map(struct grant_map *map)
+{
+	int i;
+
+	if (!map)
+		return;
+
+	if (map->pages)
+		for (i = 0; i < map->count; i++) {
+			if (map->pages[i])
+				__free_page(map->pages[i]);
+		}
+	kfree(map->pages);
+	kfree(map->grants);
+	kfree(map->map_ops);
+	kfree(map->unmap_ops);
+	kfree(map);
+}
+
+/* ------------------------------------------------------------------ */
+
+static int find_grant_ptes(pte_t *pte, pgtable_t token,
+		unsigned long addr, void *data)
+{
+	struct grant_map *map = data;
+	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+	u64 pte_maddr;
+
+	BUG_ON(pgnr >= map->count);
+	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
+
+	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
+			  GNTMAP_contains_pte | map->flags,
+			  map->grants[pgnr].ref,
+			  map->grants[pgnr].domid);
+	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
+			    GNTMAP_contains_pte | map->flags,
+			    0 /* handle */);
+	return 0;
+}
+
+static int map_grant_pages(struct grant_map *map)
+{
+	int i, err = 0;
+
+	pr_debug("map %d+%d\n", map->index, map->count);
+	err = gnttab_map_refs(map->map_ops, map->pages, map->count);
+	if (err)
+		return err;
+
+	for (i = 0; i < map->count; i++) {
+		if (map->map_ops[i].status)
+			err = -EINVAL;
+		map->unmap_ops[i].handle = map->map_ops[i].handle;
+	}
+	return err;
+}
+
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+{
+	int i, err = 0;
+
+	pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
+	err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+	if (err)
+		return err;
+
+	for (i = 0; i < pages; i++) {
+		if (map->unmap_ops[offset+i].status)
+			err = -EINVAL;
+		map->unmap_ops[offset+i].handle = 0;
+	}
+	return err;
+}
+
+/* ------------------------------------------------------------------ */
+
+static void gntdev_vma_close(struct vm_area_struct *vma)
+{
+	struct grant_map *map = vma->vm_private_data;
+
+	pr_debug("close %p\n", vma);
+	map->is_mapped = 0;
+	map->vma = NULL;
+	vma->vm_private_data = NULL;
+}
+
+static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
+			vmf->virtual_address, vmf->pgoff);
+	vmf->flags = VM_FAULT_ERROR;
+	return 0;
+}
+
+static struct vm_operations_struct gntdev_vmops = {
+	.close = gntdev_vma_close,
+	.fault = gntdev_vma_fault,
+};
+
+/* ------------------------------------------------------------------ */
+
+static void mn_invl_range_start(struct mmu_notifier *mn,
+				struct mm_struct *mm,
+				unsigned long start, unsigned long end)
+{
+	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+	struct grant_map *map;
+	unsigned long mstart, mend;
+	int err;
+
+	spin_lock(&priv->lock);
+	list_for_each_entry(map, &priv->maps, next) {
+		if (!map->vma)
+			continue;
+		if (!map->is_mapped)
+			continue;
+		if (map->vma->vm_start >= end)
+			continue;
+		if (map->vma->vm_end <= start)
+			continue;
+		mstart = max(start, map->vma->vm_start);
+		mend   = min(end,   map->vma->vm_end);
+		pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
+				map->index, map->count,
+				map->vma->vm_start, map->vma->vm_end,
+				start, end, mstart, mend);
+		err = unmap_grant_pages(map,
+					(mstart - map->vma->vm_start) >> PAGE_SHIFT,
+					(mend - mstart) >> PAGE_SHIFT);
+		WARN_ON(err);
+	}
+	spin_unlock(&priv->lock);
+}
+
+static void mn_invl_page(struct mmu_notifier *mn,
+			 struct mm_struct *mm,
+			 unsigned long address)
+{
+	mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
+}
+
+static void mn_release(struct mmu_notifier *mn,
+		       struct mm_struct *mm)
+{
+	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+	struct grant_map *map;
+	int err;
+
+	spin_lock(&priv->lock);
+	list_for_each_entry(map, &priv->maps, next) {
+		if (!map->vma)
+			continue;
+		pr_debug("map %d+%d (%lx %lx)\n",
+				map->index, map->count,
+				map->vma->vm_start, map->vma->vm_end);
+		err = unmap_grant_pages(map, /* offset */ 0, map->count);
+		WARN_ON(err);
+	}
+	spin_unlock(&priv->lock);
+}
+
+struct mmu_notifier_ops gntdev_mmu_ops = {
+	.release                = mn_release,
+	.invalidate_page        = mn_invl_page,
+	.invalidate_range_start = mn_invl_range_start,
+};
+
+/* ------------------------------------------------------------------ */
+
+static int gntdev_open(struct inode *inode, struct file *flip)
+{
+	struct gntdev_priv *priv;
+	int ret = 0;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&priv->maps);
+	spin_lock_init(&priv->lock);
+	priv->limit = limit;
+
+	priv->mm = get_task_mm(current);
+	if (!priv->mm) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+	priv->mn.ops = &gntdev_mmu_ops;
+	ret = mmu_notifier_register(&priv->mn, priv->mm);
+	mmput(priv->mm);
+
+	if (ret) {
+		kfree(priv);
+		return ret;
+	}
+
+	flip->private_data = priv;
+	pr_debug("priv %p\n", priv);
+
+	return 0;
+}
+
+static int gntdev_release(struct inode *inode, struct file *flip)
+{
+	struct gntdev_priv *priv = flip->private_data;
+	struct grant_map *map;
+	int err;
+
+	pr_debug("priv %p\n", priv);
+
+	spin_lock(&priv->lock);
+	while (!list_empty(&priv->maps)) {
+		map = list_entry(priv->maps.next, struct grant_map, next);
+		err = gntdev_del_map(map);
+		if (WARN_ON(err))
+			gntdev_free_map(map);
+
+	}
+	spin_unlock(&priv->lock);
+
+	mmu_notifier_unregister(&priv->mn, priv->mm);
+	kfree(priv);
+	return 0;
+}
+
+static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
+				       struct ioctl_gntdev_map_grant_ref __user *u)
+{
+	struct ioctl_gntdev_map_grant_ref op;
+	struct grant_map *map;
+	int err;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, add %d\n", priv, op.count);
+	if (unlikely(op.count <= 0))
+		return -EINVAL;
+	if (unlikely(op.count > priv->limit))
+		return -EINVAL;
+
+	err = -ENOMEM;
+	map = gntdev_alloc_map(priv, op.count);
+	if (!map)
+		return err;
+	if (copy_from_user(map->grants, &u->refs,
+			   sizeof(map->grants[0]) * op.count) != 0) {
+		gntdev_free_map(map);
+		return err;
+	}
+
+	spin_lock(&priv->lock);
+	gntdev_add_map(priv, map);
+	op.index = map->index << PAGE_SHIFT;
+	spin_unlock(&priv->lock);
+
+	if (copy_to_user(u, &op, sizeof(op)) != 0) {
+		spin_lock(&priv->lock);
+		gntdev_del_map(map);
+		spin_unlock(&priv->lock);
+		gntdev_free_map(map);
+		return err;
+	}
+	return 0;
+}
+
+static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
+					 struct ioctl_gntdev_unmap_grant_ref __user *u)
+{
+	struct ioctl_gntdev_unmap_grant_ref op;
+	struct grant_map *map;
+	int err = -ENOENT;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
+
+	spin_lock(&priv->lock);
+	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
+	if (map)
+		err = gntdev_del_map(map);
+	spin_unlock(&priv->lock);
+	if (!err)
+		gntdev_free_map(map);
+	return err;
+}
+
+static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
+					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
+{
+	struct ioctl_gntdev_get_offset_for_vaddr op;
+	struct grant_map *map;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
+
+	spin_lock(&priv->lock);
+	map = gntdev_find_map_vaddr(priv, op.vaddr);
+	if (map == NULL ||
+	    map->vma->vm_start != op.vaddr) {
+		spin_unlock(&priv->lock);
+		return -EINVAL;
+	}
+	op.offset = map->index << PAGE_SHIFT;
+	op.count = map->count;
+	spin_unlock(&priv->lock);
+
+	if (copy_to_user(u, &op, sizeof(op)) != 0)
+		return -EFAULT;
+	return 0;
+}
+
+static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
+					struct ioctl_gntdev_set_max_grants __user *u)
+{
+	struct ioctl_gntdev_set_max_grants op;
+
+	if (copy_from_user(&op, u, sizeof(op)) != 0)
+		return -EFAULT;
+	pr_debug("priv %p, limit %d\n", priv, op.count);
+	if (op.count > limit)
+		return -E2BIG;
+
+	spin_lock(&priv->lock);
+	priv->limit = op.count;
+	spin_unlock(&priv->lock);
+	return 0;
+}
+
+static long gntdev_ioctl(struct file *flip,
+			 unsigned int cmd, unsigned long arg)
+{
+	struct gntdev_priv *priv = flip->private_data;
+	void __user *ptr = (void __user *)arg;
+
+	switch (cmd) {
+	case IOCTL_GNTDEV_MAP_GRANT_REF:
+		return gntdev_ioctl_map_grant_ref(priv, ptr);
+
+	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
+		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
+
+	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
+		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
+
+	case IOCTL_GNTDEV_SET_MAX_GRANTS:
+		return gntdev_ioctl_set_max_grants(priv, ptr);
+
+	default:
+		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
+		return -ENOIOCTLCMD;
+	}
+
+	return 0;
+}
+
+static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+{
+	struct gntdev_priv *priv = flip->private_data;
+	int index = vma->vm_pgoff;
+	int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	struct grant_map *map;
+	int err = -EINVAL;
+
+	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
+		return -EINVAL;
+
+	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
+			index, count, vma->vm_start, vma->vm_pgoff);
+
+	spin_lock(&priv->lock);
+	map = gntdev_find_map_index(priv, index, count);
+	if (!map)
+		goto unlock_out;
+	if (map->vma)
+		goto unlock_out;
+	if (priv->mm != vma->vm_mm) {
+		printk(KERN_WARNING "Huh? Other mm?\n");
+		goto unlock_out;
+	}
+
+	vma->vm_ops = &gntdev_vmops;
+
+	vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
+
+	vma->vm_private_data = map;
+	map->vma = vma;
+
+	map->flags = GNTMAP_host_map | GNTMAP_application_map;
+	if (!(vma->vm_flags & VM_WRITE))
+		map->flags |= GNTMAP_readonly;
+
+	spin_unlock(&priv->lock);
+
+	err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+				  vma->vm_end - vma->vm_start,
+				  find_grant_ptes, map);
+	if (err) {
+		printk(KERN_WARNING "find_grant_ptes() failure.\n");
+		return err;
+	}
+
+	err = map_grant_pages(map);
+	if (err) {
+		printk(KERN_WARNING "map_grant_pages() failure.\n");
+		return err;
+	}
+
+	map->is_mapped = 1;
+
+	return 0;
+
+unlock_out:
+	spin_unlock(&priv->lock);
+	return err;
+}
+
+static const struct file_operations gntdev_fops = {
+	.owner = THIS_MODULE,
+	.open = gntdev_open,
+	.release = gntdev_release,
+	.mmap = gntdev_mmap,
+	.unlocked_ioctl = gntdev_ioctl
+};
+
+static struct miscdevice gntdev_miscdev = {
+	.minor        = MISC_DYNAMIC_MINOR,
+	.name         = "xen/gntdev",
+	.fops         = &gntdev_fops,
+};
+
+/* ------------------------------------------------------------------ */
+
+static int __init gntdev_init(void)
+{
+	int err;
+
+	if (!xen_domain())
+		return -ENODEV;
+
+	err = misc_register(&gntdev_miscdev);
+	if (err != 0) {
+		printk(KERN_ERR "Could not register gntdev device\n");
+		return err;
+	}
+	return 0;
+}
+
+static void __exit gntdev_exit(void)
+{
+	misc_deregister(&gntdev_miscdev);
+}
+
+module_init(gntdev_init);
+module_exit(gntdev_exit);
+
+/* ------------------------------------------------------------------ */
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 6c45318..9ef54eb 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -447,6 +447,52 @@
 }
 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+		    struct page **pages, unsigned int count)
+{
+	int i, ret;
+	pte_t *pte;
+	unsigned long mfn;
+
+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		/* m2p override only supported for GNTMAP_contains_pte mappings */
+		if (!(map_ops[i].flags & GNTMAP_contains_pte))
+			continue;
+		pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
+				(map_ops[i].host_addr & ~PAGE_MASK));
+		mfn = pte_mfn(*pte);
+		ret = m2p_add_override(mfn, pages[i]);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_map_refs);
+
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+		struct page **pages, unsigned int count)
+{
+	int i, ret;
+
+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < count; i++) {
+		ret = m2p_remove_override(pages[i]);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+
 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
 {
 	struct gnttab_setup_table setup;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index c01b5dd..afbe041 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -105,7 +105,7 @@
 				       const struct pci_device_id *ent)
 {
 	int i, ret;
-	long ioaddr, iolen;
+	long ioaddr;
 	long mmio_addr, mmio_len;
 	unsigned int max_nr_gframes;
 
@@ -114,7 +114,6 @@
 		return i;
 
 	ioaddr = pci_resource_start(pdev, 0);
-	iolen = pci_resource_len(pdev, 0);
 
 	mmio_addr = pci_resource_start(pdev, 1);
 	mmio_len = pci_resource_len(pdev, 1);
@@ -125,19 +124,13 @@
 		goto pci_out;
 	}
 
-	if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) {
-		dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n",
-		       mmio_addr, mmio_len);
-		ret = -EBUSY;
+	ret = pci_request_region(pdev, 1, DRV_NAME);
+	if (ret < 0)
 		goto pci_out;
-	}
 
-	if (request_region(ioaddr, iolen, DRV_NAME) == NULL) {
-		dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n",
-		       iolen, ioaddr);
-		ret = -EBUSY;
+	ret = pci_request_region(pdev, 0, DRV_NAME);
+	if (ret < 0)
 		goto mem_out;
-	}
 
 	platform_mmio = mmio_addr;
 	platform_mmiolen = mmio_len;
@@ -169,9 +162,9 @@
 	return 0;
 
 out:
-	release_region(ioaddr, iolen);
+	pci_release_region(pdev, 0);
 mem_out:
-	release_mem_region(mmio_addr, mmio_len);
+	pci_release_region(pdev, 1);
 pci_out:
 	pci_disable_device(pdev);
 	return ret;
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 5571f5b..8dca685 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -5,3 +5,8 @@
 xenbus-objs += xenbus_comms.o
 xenbus-objs += xenbus_xs.o
 xenbus-objs += xenbus_probe.o
+
+xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
+xenbus-objs += $(xenbus-be-objs-y)
+
+obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index deb9c4b..baa65e7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -56,7 +56,6 @@
 #include <xen/events.h>
 #include <xen/page.h>
 
-#include <xen/platform_pci.h>
 #include <xen/hvm.h>
 
 #include "xenbus_comms.h"
@@ -73,15 +72,6 @@
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
 
-static void wait_for_devices(struct xenbus_driver *xendrv);
-
-static int xenbus_probe_frontend(const char *type, const char *name);
-
-static void xenbus_dev_shutdown(struct device *_dev);
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
-static int xenbus_dev_resume(struct device *dev);
-
 /* If something in array of ids matches this device, return it. */
 static const struct xenbus_device_id *
 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -102,34 +92,7 @@
 
 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
 }
-
-static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
-{
-	struct xenbus_device *dev = to_xenbus_device(_dev);
-
-	if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
-		return -ENOMEM;
-
-	return 0;
-}
-
-/* device/<type>/<id> => <type>-<id> */
-static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
-{
-	nodename = strchr(nodename, '/');
-	if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
-		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
-		return -EINVAL;
-	}
-
-	strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
-	if (!strchr(bus_id, '/')) {
-		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
-		return -EINVAL;
-	}
-	*strchr(bus_id, '/') = '-';
-	return 0;
-}
+EXPORT_SYMBOL_GPL(xenbus_match);
 
 
 static void free_otherend_details(struct xenbus_device *dev)
@@ -149,7 +112,30 @@
 }
 
 
-int read_otherend_details(struct xenbus_device *xendev,
+static int talk_to_otherend(struct xenbus_device *dev)
+{
+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+
+	free_otherend_watch(dev);
+	free_otherend_details(dev);
+
+	return drv->read_otherend_details(dev);
+}
+
+
+
+static int watch_otherend(struct xenbus_device *dev)
+{
+	struct xen_bus_type *bus =
+		container_of(dev->dev.bus, struct xen_bus_type, bus);
+
+	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
+				    bus->otherend_changed,
+				    "%s/%s", dev->otherend, "state");
+}
+
+
+int xenbus_read_otherend_details(struct xenbus_device *xendev,
 				 char *id_node, char *path_node)
 {
 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
@@ -174,39 +160,11 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
 
-
-static int read_backend_details(struct xenbus_device *xendev)
-{
-	return read_otherend_details(xendev, "backend-id", "backend");
-}
-
-static struct device_attribute xenbus_dev_attrs[] = {
-	__ATTR_NULL
-};
-
-/* Bus type for frontend drivers. */
-static struct xen_bus_type xenbus_frontend = {
-	.root = "device",
-	.levels = 2, 		/* device/type/<id> */
-	.get_bus_id = frontend_bus_id,
-	.probe = xenbus_probe_frontend,
-	.bus = {
-		.name      = "xen",
-		.match     = xenbus_match,
-		.uevent    = xenbus_uevent,
-		.probe     = xenbus_dev_probe,
-		.remove    = xenbus_dev_remove,
-		.shutdown  = xenbus_dev_shutdown,
-		.dev_attrs = xenbus_dev_attrs,
-
-		.suspend   = xenbus_dev_suspend,
-		.resume    = xenbus_dev_resume,
-	},
-};
-
-static void otherend_changed(struct xenbus_watch *watch,
-			     const char **vec, unsigned int len)
+void xenbus_otherend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len,
+			     int ignore_on_shutdown)
 {
 	struct xenbus_device *dev =
 		container_of(watch, struct xenbus_device, otherend_watch);
@@ -234,11 +192,7 @@
 	 * work that can fail e.g., when the rootfs is gone.
 	 */
 	if (system_state > SYSTEM_RUNNING) {
-		struct xen_bus_type *bus = bus;
-		bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
-		/* If we're frontend, drive the state machine to Closed. */
-		/* This should cause the backend to release our resources. */
-		if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
+		if (ignore_on_shutdown && (state == XenbusStateClosing))
 			xenbus_frontend_closed(dev);
 		return;
 	}
@@ -246,25 +200,7 @@
 	if (drv->otherend_changed)
 		drv->otherend_changed(dev, state);
 }
-
-
-static int talk_to_otherend(struct xenbus_device *dev)
-{
-	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-
-	free_otherend_watch(dev);
-	free_otherend_details(dev);
-
-	return drv->read_otherend_details(dev);
-}
-
-
-static int watch_otherend(struct xenbus_device *dev)
-{
-	return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
-				    "%s/%s", dev->otherend, "state");
-}
-
+EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
 
 int xenbus_dev_probe(struct device *_dev)
 {
@@ -308,8 +244,9 @@
 fail:
 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
 	xenbus_switch_state(dev, XenbusStateClosed);
-	return -ENODEV;
+	return err;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_probe);
 
 int xenbus_dev_remove(struct device *_dev)
 {
@@ -327,8 +264,9 @@
 	xenbus_switch_state(dev, XenbusStateClosed);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_remove);
 
-static void xenbus_dev_shutdown(struct device *_dev)
+void xenbus_dev_shutdown(struct device *_dev)
 {
 	struct xenbus_device *dev = to_xenbus_device(_dev);
 	unsigned long timeout = 5*HZ;
@@ -349,6 +287,7 @@
  out:
 	put_device(&dev->dev);
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
 
 int xenbus_register_driver_common(struct xenbus_driver *drv,
 				  struct xen_bus_type *bus,
@@ -362,25 +301,7 @@
 
 	return driver_register(&drv->driver);
 }
-
-int __xenbus_register_frontend(struct xenbus_driver *drv,
-			       struct module *owner, const char *mod_name)
-{
-	int ret;
-
-	drv->read_otherend_details = read_backend_details;
-
-	ret = xenbus_register_driver_common(drv, &xenbus_frontend,
-					    owner, mod_name);
-	if (ret)
-		return ret;
-
-	/* If this driver is loaded as a module wait for devices to attach. */
-	wait_for_devices(drv);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
 
 void xenbus_unregister_driver(struct xenbus_driver *drv)
 {
@@ -551,24 +472,7 @@
 	kfree(xendev);
 	return err;
 }
-
-/* device/<typename>/<name> */
-static int xenbus_probe_frontend(const char *type, const char *name)
-{
-	char *nodename;
-	int err;
-
-	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
-			     xenbus_frontend.root, type, name);
-	if (!nodename)
-		return -ENOMEM;
-
-	DPRINTK("%s", nodename);
-
-	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
-	kfree(nodename);
-	return err;
-}
+EXPORT_SYMBOL_GPL(xenbus_probe_node);
 
 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
 {
@@ -582,10 +486,11 @@
 		return PTR_ERR(dir);
 
 	for (i = 0; i < dir_n; i++) {
-		err = bus->probe(type, dir[i]);
+		err = bus->probe(bus, type, dir[i]);
 		if (err)
 			break;
 	}
+
 	kfree(dir);
 	return err;
 }
@@ -605,9 +510,11 @@
 		if (err)
 			break;
 	}
+
 	kfree(dir);
 	return err;
 }
+EXPORT_SYMBOL_GPL(xenbus_probe_devices);
 
 static unsigned int char_count(const char *str, char c)
 {
@@ -670,32 +577,18 @@
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_changed);
 
-static void frontend_changed(struct xenbus_watch *watch,
-			     const char **vec, unsigned int len)
-{
-	DPRINTK("");
-
-	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-}
-
-/* We watch for devices appearing and vanishing. */
-static struct xenbus_watch fe_watch = {
-	.node = "device",
-	.callback = frontend_changed,
-};
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
+int xenbus_dev_suspend(struct device *dev, pm_message_t state)
 {
 	int err = 0;
 	struct xenbus_driver *drv;
-	struct xenbus_device *xdev;
+	struct xenbus_device *xdev
+		= container_of(dev, struct xenbus_device, dev);
 
-	DPRINTK("");
+	DPRINTK("%s", xdev->nodename);
 
 	if (dev->driver == NULL)
 		return 0;
 	drv = to_xenbus_driver(dev->driver);
-	xdev = container_of(dev, struct xenbus_device, dev);
 	if (drv->suspend)
 		err = drv->suspend(xdev, state);
 	if (err)
@@ -703,21 +596,20 @@
 		       "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
 
-static int xenbus_dev_resume(struct device *dev)
+int xenbus_dev_resume(struct device *dev)
 {
 	int err;
 	struct xenbus_driver *drv;
-	struct xenbus_device *xdev;
+	struct xenbus_device *xdev
+		= container_of(dev, struct xenbus_device, dev);
 
-	DPRINTK("");
+	DPRINTK("%s", xdev->nodename);
 
 	if (dev->driver == NULL)
 		return 0;
-
 	drv = to_xenbus_driver(dev->driver);
-	xdev = container_of(dev, struct xenbus_device, dev);
-
 	err = talk_to_otherend(xdev);
 	if (err) {
 		printk(KERN_WARNING
@@ -748,6 +640,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_resume);
 
 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
 int xenstored_ready = 0;
@@ -776,11 +669,6 @@
 {
 	xenstored_ready = 1;
 
-	/* Enumerate devices in xenstore and watch for changes. */
-	xenbus_probe_devices(&xenbus_frontend);
-	register_xenbus_watch(&fe_watch);
-	xenbus_backend_probe_and_watch();
-
 	/* Notify others that xenstore is up */
 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
 }
@@ -809,16 +697,7 @@
 
 	err = -ENODEV;
 	if (!xen_domain())
-		goto out_error;
-
-	/* Register ourselves with the kernel bus subsystem */
-	err = bus_register(&xenbus_frontend.bus);
-	if (err)
-		goto out_error;
-
-	err = xenbus_backend_bus_register();
-	if (err)
-		goto out_unreg_front;
+		return err;
 
 	/*
 	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
@@ -874,7 +753,7 @@
 	if (err) {
 		printk(KERN_WARNING
 		       "XENBUS: Error initializing xenstore comms: %i\n", err);
-		goto out_unreg_back;
+		goto out_error;
 	}
 
 #ifdef CONFIG_XEN_COMPAT_XENFS
@@ -887,133 +766,13 @@
 
 	return 0;
 
-  out_unreg_back:
-	xenbus_backend_bus_unregister();
-
-  out_unreg_front:
-	bus_unregister(&xenbus_frontend.bus);
-
   out_error:
 	if (page != 0)
 		free_page(page);
+
 	return err;
 }
 
 postcore_initcall(xenbus_init);
 
 MODULE_LICENSE("GPL");
-
-static int is_device_connecting(struct device *dev, void *data)
-{
-	struct xenbus_device *xendev = to_xenbus_device(dev);
-	struct device_driver *drv = data;
-	struct xenbus_driver *xendrv;
-
-	/*
-	 * A device with no driver will never connect. We care only about
-	 * devices which should currently be in the process of connecting.
-	 */
-	if (!dev->driver)
-		return 0;
-
-	/* Is this search limited to a particular driver? */
-	if (drv && (dev->driver != drv))
-		return 0;
-
-	xendrv = to_xenbus_driver(dev->driver);
-	return (xendev->state < XenbusStateConnected ||
-		(xendev->state == XenbusStateConnected &&
-		 xendrv->is_ready && !xendrv->is_ready(xendev)));
-}
-
-static int exists_connecting_device(struct device_driver *drv)
-{
-	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-				is_device_connecting);
-}
-
-static int print_device_status(struct device *dev, void *data)
-{
-	struct xenbus_device *xendev = to_xenbus_device(dev);
-	struct device_driver *drv = data;
-
-	/* Is this operation limited to a particular driver? */
-	if (drv && (dev->driver != drv))
-		return 0;
-
-	if (!dev->driver) {
-		/* Information only: is this too noisy? */
-		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
-		       xendev->nodename);
-	} else if (xendev->state < XenbusStateConnected) {
-		enum xenbus_state rstate = XenbusStateUnknown;
-		if (xendev->otherend)
-			rstate = xenbus_read_driver_state(xendev->otherend);
-		printk(KERN_WARNING "XENBUS: Timeout connecting "
-		       "to device: %s (local state %d, remote state %d)\n",
-		       xendev->nodename, xendev->state, rstate);
-	}
-
-	return 0;
-}
-
-/* We only wait for device setup after most initcalls have run. */
-static int ready_to_wait_for_devices;
-
-/*
- * On a 5-minute timeout, wait for all devices currently configured.  We need
- * to do this to guarantee that the filesystems and / or network devices
- * needed for boot are available, before we can allow the boot to proceed.
- *
- * This needs to be on a late_initcall, to happen after the frontend device
- * drivers have been initialised, but before the root fs is mounted.
- *
- * A possible improvement here would be to have the tools add a per-device
- * flag to the store entry, indicating whether it is needed at boot time.
- * This would allow people who knew what they were doing to accelerate their
- * boot slightly, but of course needs tools or manual intervention to set up
- * those flags correctly.
- */
-static void wait_for_devices(struct xenbus_driver *xendrv)
-{
-	unsigned long start = jiffies;
-	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
-	unsigned int seconds_waited = 0;
-
-	if (!ready_to_wait_for_devices || !xen_domain())
-		return;
-
-	while (exists_connecting_device(drv)) {
-		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
-			if (!seconds_waited)
-				printk(KERN_WARNING "XENBUS: Waiting for "
-				       "devices to initialise: ");
-			seconds_waited += 5;
-			printk("%us...", 300 - seconds_waited);
-			if (seconds_waited == 300)
-				break;
-		}
-
-		schedule_timeout_interruptible(HZ/10);
-	}
-
-	if (seconds_waited)
-		printk("\n");
-
-	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-			 print_device_status);
-}
-
-#ifndef MODULE
-static int __init boot_wait_for_devices(void)
-{
-	if (xen_hvm_domain() && !xen_platform_pci_unplug)
-		return -ENODEV;
-
-	ready_to_wait_for_devices = 1;
-	wait_for_devices(NULL);
-	return 0;
-}
-
-late_initcall(boot_wait_for_devices);
-#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 6c5e318..2466581 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -36,26 +36,15 @@
 
 #define XEN_BUS_ID_SIZE			20
 
-#ifdef CONFIG_XEN_BACKEND
-extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
-extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
-extern void xenbus_backend_probe_and_watch(void);
-extern int xenbus_backend_bus_register(void);
-extern void xenbus_backend_bus_unregister(void);
-#else
-static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_probe_and_watch(void) {}
-static inline int xenbus_backend_bus_register(void) { return 0; }
-static inline void xenbus_backend_bus_unregister(void) {}
-#endif
-
 struct xen_bus_type
 {
 	char *root;
 	unsigned int levels;
 	int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
-	int (*probe)(const char *type, const char *dir);
+	int (*probe)(struct xen_bus_type *bus, const char *type,
+		     const char *dir);
+	void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
+				 unsigned int len);
 	struct bus_type bus;
 };
 
@@ -73,4 +62,16 @@
 
 extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
 
+extern void xenbus_dev_shutdown(struct device *_dev);
+
+extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+extern int xenbus_dev_resume(struct device *dev);
+
+extern void xenbus_otherend_changed(struct xenbus_watch *watch,
+				    const char **vec, unsigned int len,
+				    int ignore_on_shutdown);
+
+extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
+					char *id_node, char *path_node);
+
 #endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
new file mode 100644
index 0000000..6cf467b
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -0,0 +1,276 @@
+/******************************************************************************
+ * Talks to Xen Store to figure out what devices we have (backend half).
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
+ * Copyright (C) 2005, 2006 XenSource Ltd
+ * Copyright (C) 2007 Solarflare Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define DPRINTK(fmt, args...)				\
+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
+		 __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
+static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+	int domid, err;
+	const char *devid, *type, *frontend;
+	unsigned int typelen;
+
+	type = strchr(nodename, '/');
+	if (!type)
+		return -EINVAL;
+	type++;
+	typelen = strcspn(type, "/");
+	if (!typelen || type[typelen] != '/')
+		return -EINVAL;
+
+	devid = strrchr(nodename, '/') + 1;
+
+	err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
+			    "frontend", NULL, &frontend,
+			    NULL);
+	if (err)
+		return err;
+	if (strlen(frontend) == 0)
+		err = -ERANGE;
+	if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
+		err = -ENOENT;
+	kfree(frontend);
+
+	if (err)
+		return err;
+
+	if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s",
+		     typelen, type, domid, devid) >= XEN_BUS_ID_SIZE)
+		return -ENOSPC;
+	return 0;
+}
+
+static int xenbus_uevent_backend(struct device *dev,
+				 struct kobj_uevent_env *env)
+{
+	struct xenbus_device *xdev;
+	struct xenbus_driver *drv;
+	struct xen_bus_type *bus;
+
+	DPRINTK("");
+
+	if (dev == NULL)
+		return -ENODEV;
+
+	xdev = to_xenbus_device(dev);
+	bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
+	if (xdev == NULL)
+		return -ENODEV;
+
+	/* stuff we want to pass to /sbin/hotplug */
+	if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root))
+		return -ENOMEM;
+
+	if (dev->driver) {
+		drv = to_xenbus_driver(dev->driver);
+		if (drv && drv->uevent)
+			return drv->uevent(xdev, env);
+	}
+
+	return 0;
+}
+
+/* backend/<typename>/<frontend-uuid>/<name> */
+static int xenbus_probe_backend_unit(struct xen_bus_type *bus,
+				     const char *dir,
+				     const char *type,
+				     const char *name)
+{
+	char *nodename;
+	int err;
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
+	if (!nodename)
+		return -ENOMEM;
+
+	DPRINTK("%s\n", nodename);
+
+	err = xenbus_probe_node(bus, type, nodename);
+	kfree(nodename);
+	return err;
+}
+
+/* backend/<typename>/<frontend-domid> */
+static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
+				const char *domid)
+{
+	char *nodename;
+	int err = 0;
+	char **dir;
+	unsigned int i, dir_n = 0;
+
+	DPRINTK("");
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid);
+	if (!nodename)
+		return -ENOMEM;
+
+	dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
+	if (IS_ERR(dir)) {
+		kfree(nodename);
+		return PTR_ERR(dir);
+	}
+
+	for (i = 0; i < dir_n; i++) {
+		err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]);
+		if (err)
+			break;
+	}
+	kfree(dir);
+	kfree(nodename);
+	return err;
+}
+
+static void frontend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	xenbus_otherend_changed(watch, vec, len, 0);
+}
+
+static struct device_attribute xenbus_backend_dev_attrs[] = {
+	__ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_backend = {
+	.root = "backend",
+	.levels = 3,		/* backend/type/<frontend>/<id> */
+	.get_bus_id = backend_bus_id,
+	.probe = xenbus_probe_backend,
+	.otherend_changed = frontend_changed,
+	.bus = {
+		.name		= "xen-backend",
+		.match		= xenbus_match,
+		.uevent		= xenbus_uevent_backend,
+		.probe		= xenbus_dev_probe,
+		.remove		= xenbus_dev_remove,
+		.shutdown	= xenbus_dev_shutdown,
+		.dev_attrs	= xenbus_backend_dev_attrs,
+	},
+};
+
+static void backend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	DPRINTK("");
+
+	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
+}
+
+static struct xenbus_watch be_watch = {
+	.node = "backend",
+	.callback = backend_changed,
+};
+
+static int read_frontend_details(struct xenbus_device *xendev)
+{
+	return xenbus_read_otherend_details(xendev, "frontend-id", "frontend");
+}
+
+int xenbus_dev_is_online(struct xenbus_device *dev)
+{
+	int rc, val;
+
+	rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
+	if (rc != 1)
+		val = 0; /* no online node present */
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
+
+int __xenbus_register_backend(struct xenbus_driver *drv,
+			      struct module *owner, const char *mod_name)
+{
+	drv->read_otherend_details = read_frontend_details;
+
+	return xenbus_register_driver_common(drv, &xenbus_backend,
+					     owner, mod_name);
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+
+static int backend_probe_and_watch(struct notifier_block *notifier,
+				   unsigned long event,
+				   void *data)
+{
+	/* Enumerate devices in xenstore and watch for changes. */
+	xenbus_probe_devices(&xenbus_backend);
+	register_xenbus_watch(&be_watch);
+
+	return NOTIFY_DONE;
+}
+
+static int __init xenbus_probe_backend_init(void)
+{
+	static struct notifier_block xenstore_notifier = {
+		.notifier_call = backend_probe_and_watch
+	};
+	int err;
+
+	DPRINTK("");
+
+	/* Register ourselves with the kernel bus subsystem */
+	err = bus_register(&xenbus_backend.bus);
+	if (err)
+		return err;
+
+	register_xenstore_notifier(&xenstore_notifier);
+
+	return 0;
+}
+subsys_initcall(xenbus_probe_backend_init);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
new file mode 100644
index 0000000..5bcc2d6
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -0,0 +1,294 @@
+#define DPRINTK(fmt, args...)				\
+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
+		 __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/platform_pci.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+
+/* device/<type>/<id> => <type>-<id> */
+static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+	nodename = strchr(nodename, '/');
+	if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
+		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+		return -EINVAL;
+	}
+
+	strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+	if (!strchr(bus_id, '/')) {
+		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+		return -EINVAL;
+	}
+	*strchr(bus_id, '/') = '-';
+	return 0;
+}
+
+/* device/<typename>/<name> */
+static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
+				 const char *name)
+{
+	char *nodename;
+	int err;
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name);
+	if (!nodename)
+		return -ENOMEM;
+
+	DPRINTK("%s", nodename);
+
+	err = xenbus_probe_node(bus, type, nodename);
+	kfree(nodename);
+	return err;
+}
+
+static int xenbus_uevent_frontend(struct device *_dev,
+				  struct kobj_uevent_env *env)
+{
+	struct xenbus_device *dev = to_xenbus_device(_dev);
+
+	if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
+		return -ENOMEM;
+
+	return 0;
+}
+
+
+static void backend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	xenbus_otherend_changed(watch, vec, len, 1);
+}
+
+static struct device_attribute xenbus_frontend_dev_attrs[] = {
+	__ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_frontend = {
+	.root = "device",
+	.levels = 2,		/* device/type/<id> */
+	.get_bus_id = frontend_bus_id,
+	.probe = xenbus_probe_frontend,
+	.otherend_changed = backend_changed,
+	.bus = {
+		.name		= "xen",
+		.match		= xenbus_match,
+		.uevent		= xenbus_uevent_frontend,
+		.probe		= xenbus_dev_probe,
+		.remove		= xenbus_dev_remove,
+		.shutdown	= xenbus_dev_shutdown,
+		.dev_attrs	= xenbus_frontend_dev_attrs,
+
+		.suspend	= xenbus_dev_suspend,
+		.resume		= xenbus_dev_resume,
+	},
+};
+
+static void frontend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len)
+{
+	DPRINTK("");
+
+	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+}
+
+
+/* We watch for devices appearing and vanishing. */
+static struct xenbus_watch fe_watch = {
+	.node = "device",
+	.callback = frontend_changed,
+};
+
+static int read_backend_details(struct xenbus_device *xendev)
+{
+	return xenbus_read_otherend_details(xendev, "backend-id", "backend");
+}
+
+static int is_device_connecting(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+	struct xenbus_driver *xendrv;
+
+	/*
+	 * A device with no driver will never connect. We care only about
+	 * devices which should currently be in the process of connecting.
+	 */
+	if (!dev->driver)
+		return 0;
+
+	/* Is this search limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	xendrv = to_xenbus_driver(dev->driver);
+	return (xendev->state < XenbusStateConnected ||
+		(xendev->state == XenbusStateConnected &&
+		 xendrv->is_ready && !xendrv->is_ready(xendev)));
+}
+
+static int exists_connecting_device(struct device_driver *drv)
+{
+	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+				is_device_connecting);
+}
+
+static int print_device_status(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+
+	/* Is this operation limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	if (!dev->driver) {
+		/* Information only: is this too noisy? */
+		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+		       xendev->nodename);
+	} else if (xendev->state < XenbusStateConnected) {
+		enum xenbus_state rstate = XenbusStateUnknown;
+		if (xendev->otherend)
+			rstate = xenbus_read_driver_state(xendev->otherend);
+		printk(KERN_WARNING "XENBUS: Timeout connecting "
+		       "to device: %s (local state %d, remote state %d)\n",
+		       xendev->nodename, xendev->state, rstate);
+	}
+
+	return 0;
+}
+
+/* We only wait for device setup after most initcalls have run. */
+static int ready_to_wait_for_devices;
+
+/*
+ * On a 5-minute timeout, wait for all devices currently configured.  We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+ * This needs to be on a late_initcall, to happen after the frontend device
+ * drivers have been initialised, but before the root fs is mounted.
+ *
+ * A possible improvement here would be to have the tools add a per-device
+ * flag to the store entry, indicating whether it is needed at boot time.
+ * This would allow people who knew what they were doing to accelerate their
+ * boot slightly, but of course needs tools or manual intervention to set up
+ * those flags correctly.
+ */
+static void wait_for_devices(struct xenbus_driver *xendrv)
+{
+	unsigned long start = jiffies;
+	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
+	unsigned int seconds_waited = 0;
+
+	if (!ready_to_wait_for_devices || !xen_domain())
+		return;
+
+	while (exists_connecting_device(drv)) {
+		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
+			if (!seconds_waited)
+				printk(KERN_WARNING "XENBUS: Waiting for "
+				       "devices to initialise: ");
+			seconds_waited += 5;
+			printk("%us...", 300 - seconds_waited);
+			if (seconds_waited == 300)
+				break;
+		}
+
+		schedule_timeout_interruptible(HZ/10);
+	}
+
+	if (seconds_waited)
+		printk("\n");
+
+	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+			 print_device_status);
+}
+
+int __xenbus_register_frontend(struct xenbus_driver *drv,
+			       struct module *owner, const char *mod_name)
+{
+	int ret;
+
+	drv->read_otherend_details = read_backend_details;
+
+	ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+					    owner, mod_name);
+	if (ret)
+		return ret;
+
+	/* If this driver is loaded as a module wait for devices to attach. */
+	wait_for_devices(drv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+
+static int frontend_probe_and_watch(struct notifier_block *notifier,
+				   unsigned long event,
+				   void *data)
+{
+	/* Enumerate devices in xenstore and watch for changes. */
+	xenbus_probe_devices(&xenbus_frontend);
+	register_xenbus_watch(&fe_watch);
+
+	return NOTIFY_DONE;
+}
+
+
+static int __init xenbus_probe_frontend_init(void)
+{
+	static struct notifier_block xenstore_notifier = {
+		.notifier_call = frontend_probe_and_watch
+	};
+	int err;
+
+	DPRINTK("");
+
+	/* Register ourselves with the kernel bus subsystem */
+	err = bus_register(&xenbus_frontend.bus);
+	if (err)
+		return err;
+
+	register_xenstore_notifier(&xenstore_notifier);
+
+	return 0;
+}
+subsys_initcall(xenbus_probe_frontend_init);
+
+#ifndef MODULE
+static int __init boot_wait_for_devices(void)
+{
+	if (xen_hvm_domain() && !xen_platform_pci_unplug)
+		return -ENODEV;
+
+	ready_to_wait_for_devices = 1;
+	wait_for_devices(NULL);
+	return 0;
+}
+
+late_initcall(boot_wait_for_devices);
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c
index ba0cf0b..cf38e15 100644
--- a/firmware/ihex2fw.c
+++ b/firmware/ihex2fw.c
@@ -124,8 +124,7 @@
 	if (process_ihex(data, st.st_size))
 		return 1;
 
-	output_records(outfd);
-	return 0;
+	return output_records(outfd);
 }
 
 static int process_ihex(uint8_t *data, ssize_t size)
@@ -269,11 +268,13 @@
 
 		p->addr = htonl(p->addr);
 		p->len = htons(p->len);
-		write(outfd, &p->addr, writelen);
+		if (write(outfd, &p->addr, writelen) != writelen)
+			return 1;
 		p = p->next;
 	}
 	/* EOF record is zero length, since we don't bother to represent
 	   the type field in the binary version */
-	write(outfd, zeroes, 6);
+	if (write(outfd, zeroes, 6) != 6)
+		return 1;
 	return 0;
 }
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 7e05114..814ac4e 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -9,6 +9,8 @@
 
 	  If unsure, say N.
 
+if 9P_FS
+
 config 9P_FSCACHE
 	bool "Enable 9P client caching support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
@@ -20,7 +22,6 @@
 
 config 9P_FS_POSIX_ACL
 	bool "9P POSIX Access Control Lists"
-	depends on 9P_FS
 	select FS_POSIX_ACL
 	help
 	  POSIX Access Control Lists (ACLs) support permissions for users and
@@ -30,3 +31,5 @@
 	  Linux website <http://acl.bestbits.at/>.
 
 	  If you don't know what Access Control Lists are, say N
+
+endif
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index f8ba37e..ab8c127 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -3,6 +3,7 @@
 9p-objs := \
 	vfs_super.o \
 	vfs_inode.o \
+	vfs_inode_dotl.o \
 	vfs_addr.o \
 	vfs_file.o \
 	vfs_dir.o \
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 12d6023..02a2cf6 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -28,7 +28,7 @@
 {
 	ssize_t size;
 	void *value = NULL;
-	struct posix_acl *acl = NULL;;
+	struct posix_acl *acl = NULL;
 
 	size = v9fs_fid_xattr_get(fid, name, NULL, 0);
 	if (size > 0) {
@@ -91,11 +91,14 @@
 	return acl;
 }
 
-int v9fs_check_acl(struct inode *inode, int mask)
+int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
 	struct posix_acl *acl;
 	struct v9fs_session_info *v9ses;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	v9ses = v9fs_inode2v9ses(inode);
 	if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) {
 		/*
@@ -362,7 +365,7 @@
 	case ACL_TYPE_DEFAULT:
 		name = POSIX_ACL_XATTR_DEFAULT;
 		if (!S_ISDIR(inode->i_mode)) {
-			retval = -EINVAL;
+			retval = acl ? -EINVAL : 0;
 			goto err_out;
 		}
 		break;
diff --git a/fs/9p/acl.h b/fs/9p/acl.h
index 59e18c2..7ef3ac9 100644
--- a/fs/9p/acl.h
+++ b/fs/9p/acl.h
@@ -16,7 +16,7 @@
 
 #ifdef CONFIG_9P_FS_POSIX_ACL
 extern int v9fs_get_acl(struct inode *, struct p9_fid *);
-extern int v9fs_check_acl(struct inode *inode, int mask);
+extern int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags);
 extern int v9fs_acl_chmod(struct dentry *);
 extern int v9fs_set_create_acl(struct dentry *,
 			       struct posix_acl *, struct posix_acl *);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index cb63968..c4b5d88 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -113,9 +113,27 @@
 
 struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
 									char *);
-void v9fs_session_close(struct v9fs_session_info *v9ses);
-void v9fs_session_cancel(struct v9fs_session_info *v9ses);
-void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_close(struct v9fs_session_info *v9ses);
+extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+			struct nameidata *nameidata);
+extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry);
+extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
+			void *p);
+extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses,
+			struct p9_fid *fid,
+			struct super_block *sb);
+
+extern const struct inode_operations v9fs_dir_inode_operations_dotl;
+extern const struct inode_operations v9fs_file_inode_operations_dotl;
+extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses,
+			struct p9_fid *fid,
+			struct super_block *sb);
 
 /* other default globals */
 #define V9FS_PORT	564
@@ -138,3 +156,21 @@
 {
 	return v9ses->flags & V9FS_PROTO_2000L;
 }
+
+/**
+ * v9fs_inode_from_fid - Helper routine to populate an inode by
+ * issuing a attribute request
+ * @v9ses: session information
+ * @fid: fid to issue attribute request for
+ * @sb: superblock on which to create inode
+ *
+ */
+static inline struct inode *
+v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+				struct super_block *sb)
+{
+	if (v9fs_proto_dotl(v9ses))
+		return v9fs_inode_dotl(v9ses, fid, sb);
+	else
+		return v9fs_inode(v9ses, fid, sb);
+}
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index bab0eac..b789f8e 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -59,7 +59,6 @@
 int v9fs_dir_release(struct inode *inode, struct file *filp);
 int v9fs_file_open(struct inode *inode, struct file *file);
 void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
-void v9fs_dentry_release(struct dentry *);
 int v9fs_uflags2omode(int uflags, int extended);
 
 ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index cbf4e50..233b7d4 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -51,7 +51,7 @@
  *
  */
 
-static int v9fs_dentry_delete(struct dentry *dentry)
+static int v9fs_dentry_delete(const struct dentry *dentry)
 {
 	P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
 									dentry);
@@ -68,7 +68,7 @@
  *
  */
 
-static int v9fs_cached_dentry_delete(struct dentry *dentry)
+static int v9fs_cached_dentry_delete(const struct dentry *dentry)
 {
 	struct inode *inode = dentry->d_inode;
 	P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
@@ -86,7 +86,7 @@
  *
  */
 
-void v9fs_dentry_release(struct dentry *dentry)
+static void v9fs_dentry_release(struct dentry *dentry)
 {
 	struct v9fs_dentry *dent;
 	struct p9_fid *temp, *current_fid;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 34bf71b..b76a40b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -49,15 +49,8 @@
 
 static const struct inode_operations v9fs_dir_inode_operations;
 static const struct inode_operations v9fs_dir_inode_operations_dotu;
-static const struct inode_operations v9fs_dir_inode_operations_dotl;
 static const struct inode_operations v9fs_file_inode_operations;
-static const struct inode_operations v9fs_file_inode_operations_dotl;
 static const struct inode_operations v9fs_symlink_inode_operations;
-static const struct inode_operations v9fs_symlink_inode_operations_dotl;
-
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		    dev_t rdev);
 
 /**
  * unixmode2p9mode - convert unix mode bits to plan 9
@@ -237,48 +230,20 @@
  *
  */
 
-void v9fs_destroy_inode(struct inode *inode)
+static void v9fs_i_callback(struct rcu_head *head)
 {
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(vcookie_cache, v9fs_inode2cookie(inode));
 }
+
+void v9fs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, v9fs_i_callback);
+}
 #endif
 
 /**
- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
- * new file system object. This checks the S_ISGID to determine the owning
- * group of the new file system object.
- */
-
-static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
-{
-	BUG_ON(dir_inode == NULL);
-
-	if (dir_inode->i_mode & S_ISGID) {
-		/* set_gid bit is set.*/
-		return dir_inode->i_gid;
-	}
-	return current_fsgid();
-}
-
-/**
- * v9fs_dentry_from_dir_inode - helper function to get the dentry from
- * dir inode.
- *
- */
-
-static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
-{
-	struct dentry *dentry;
-
-	spin_lock(&dcache_lock);
-	/* Directory should have only one entry. */
-	BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
-	dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
-	spin_unlock(&dcache_lock);
-	return dentry;
-}
-
-/**
  * v9fs_get_inode - helper function to setup an inode
  * @sb: superblock
  * @mode: mode to setup inode with
@@ -447,7 +412,7 @@
 #endif
 }
 
-static struct inode *
+struct inode *
 v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
 	struct super_block *sb)
 {
@@ -482,60 +447,6 @@
 	return ERR_PTR(err);
 }
 
-static struct inode *
-v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
-	struct super_block *sb)
-{
-	struct inode *ret = NULL;
-	int err;
-	struct p9_stat_dotl *st;
-
-	st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
-	if (IS_ERR(st))
-		return ERR_CAST(st);
-
-	ret = v9fs_get_inode(sb, st->st_mode);
-	if (IS_ERR(ret)) {
-		err = PTR_ERR(ret);
-		goto error;
-	}
-
-	v9fs_stat2inode_dotl(st, ret);
-	ret->i_ino = v9fs_qid2ino(&st->qid);
-#ifdef CONFIG_9P_FSCACHE
-	v9fs_vcookie_set_qid(ret, &st->qid);
-	v9fs_cache_inode_get_cookie(ret);
-#endif
-	err = v9fs_get_acl(ret, fid);
-	if (err) {
-		iput(ret);
-		goto error;
-	}
-	kfree(st);
-	return ret;
-error:
-	kfree(st);
-	return ERR_PTR(err);
-}
-
-/**
- * v9fs_inode_from_fid - Helper routine to populate an inode by
- * issuing a attribute request
- * @v9ses: session information
- * @fid: fid to issue attribute request for
- * @sb: superblock on which to create inode
- *
- */
-static inline struct inode *
-v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
-			struct super_block *sb)
-{
-	if (v9fs_proto_dotl(v9ses))
-		return v9fs_inode_dotl(v9ses, fid, sb);
-	else
-		return v9fs_inode(v9ses, fid, sb);
-}
-
 /**
  * v9fs_remove - helper function to remove files and directories
  * @dir: directory inode that is being deleted
@@ -626,12 +537,6 @@
 		P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
 		goto error;
 	}
-
-	if (v9ses->cache)
-		dentry->d_op = &v9fs_cached_dentry_operations;
-	else
-		dentry->d_op = &v9fs_dentry_operations;
-
 	d_instantiate(dentry, inode);
 	err = v9fs_fid_add(dentry, fid);
 	if (err < 0)
@@ -650,144 +555,6 @@
 }
 
 /**
- * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
- * @dir: directory inode that is being created
- * @dentry:  dentry that is being deleted
- * @mode: create permissions
- * @nd: path information
- *
- */
-
-static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		struct nameidata *nd)
-{
-	int err = 0;
-	char *name = NULL;
-	gid_t gid;
-	int flags;
-	mode_t mode;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL;
-	struct p9_fid *dfid, *ofid;
-	struct file *filp;
-	struct p9_qid qid;
-	struct inode *inode;
-	struct posix_acl *pacl = NULL, *dacl = NULL;
-
-	v9ses = v9fs_inode2v9ses(dir);
-	if (nd && nd->flags & LOOKUP_OPEN)
-		flags = nd->intent.open.flags - 1;
-	else {
-		/*
-		 * create call without LOOKUP_OPEN is due
-		 * to mknod of regular files. So use mknod
-		 * operation.
-		 */
-		return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
-	}
-
-	name = (char *) dentry->d_name.name;
-	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
-			"mode:0x%x\n", name, flags, omode);
-
-	dfid = v9fs_fid_lookup(dentry->d_parent);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		return err;
-	}
-
-	/* clone a fid to use for creation */
-	ofid = p9_client_walk(dfid, 0, NULL, 1);
-	if (IS_ERR(ofid)) {
-		err = PTR_ERR(ofid);
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
-		return err;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in creat %d\n", err);
-		goto error;
-	}
-	err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-				"p9_client_open_dotl failed in creat %d\n",
-				err);
-		goto error;
-	}
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE ||
-	    (nd && nd->flags & LOOKUP_OPEN)) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		dentry->d_op = &v9fs_cached_dentry_operations;
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		/* The fid would get clunked via a dput */
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate
-		 * inode with stat. We need to get an inode
-		 * so that we can set the acl with dentry
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		dentry->d_op = &v9fs_dentry_operations;
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-
-	/* if we are opening a file, assign the open fid to the file */
-	if (nd && nd->flags & LOOKUP_OPEN) {
-		filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
-		if (IS_ERR(filp)) {
-			p9_client_clunk(ofid);
-			return PTR_ERR(filp);
-		}
-		filp->private_data = ofid;
-	} else
-		p9_client_clunk(ofid);
-
-	return 0;
-
-error:
-	if (ofid)
-		p9_client_clunk(ofid);
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
-/**
  * v9fs_vfs_create - VFS hook to create files
  * @dir: directory inode that is being created
  * @dentry:  dentry that is being deleted
@@ -877,107 +644,6 @@
 	return err;
 }
 
-
-/**
- * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
- * @dir:  inode that is being unlinked
- * @dentry: dentry that is being unlinked
- * @mode: mode for new directory
- *
- */
-
-static int v9fs_vfs_mkdir_dotl(struct inode *dir,
-			       struct dentry *dentry, int omode)
-{
-	int err;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL, *dfid = NULL;
-	gid_t gid;
-	char *name;
-	mode_t mode;
-	struct inode *inode;
-	struct p9_qid qid;
-	struct dentry *dir_dentry;
-	struct posix_acl *dacl = NULL, *pacl = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
-	err = 0;
-	v9ses = v9fs_inode2v9ses(dir);
-
-	omode |= S_IFDIR;
-	if (dir->i_mode & S_ISGID)
-		omode |= S_ISGID;
-
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		dfid = NULL;
-		goto error;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in mkdir %d\n", err);
-		goto error;
-	}
-	name = (char *) dentry->d_name.name;
-	err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
-	if (err < 0)
-		goto error;
-
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		dentry->d_op = &v9fs_cached_dentry_operations;
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate
-		 * inode with stat. We need to get an inode
-		 * so that we can set the acl with dentry
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		dentry->d_op = &v9fs_dentry_operations;
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-
-error:
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
 /**
  * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode
  * @dir:  inode that is being walked from
@@ -986,7 +652,7 @@
  *
  */
 
-static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 				      struct nameidata *nameidata)
 {
 	struct super_block *sb;
@@ -1033,11 +699,6 @@
 		goto error_iput;
 
 inst_out:
-	if (v9ses->cache)
-		dentry->d_op = &v9fs_cached_dentry_operations;
-	else
-		dentry->d_op = &v9fs_dentry_operations;
-
 	d_add(dentry, inode);
 	return NULL;
 
@@ -1056,7 +717,7 @@
  *
  */
 
-static int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
+int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
 {
 	return v9fs_remove(i, d, 0);
 }
@@ -1068,7 +729,7 @@
  *
  */
 
-static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
+int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
 {
 	return v9fs_remove(i, d, 1);
 }
@@ -1082,7 +743,7 @@
  *
  */
 
-static int
+int
 v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		struct inode *new_dir, struct dentry *new_dentry)
 {
@@ -1189,42 +850,6 @@
 	return 0;
 }
 
-static int
-v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
-		 struct kstat *stat)
-{
-	int err;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid;
-	struct p9_stat_dotl *st;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
-	err = -EPERM;
-	v9ses = v9fs_inode2v9ses(dentry->d_inode);
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
-		return simple_getattr(mnt, dentry, stat);
-
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	/* Ask for all the fields in stat structure. Server will return
-	 * whatever it supports
-	 */
-
-	st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
-	if (IS_ERR(st))
-		return PTR_ERR(st);
-
-	v9fs_stat2inode_dotl(st, dentry->d_inode);
-	generic_fillattr(dentry->d_inode, stat);
-	/* Change block size to what the server returned */
-	stat->blksize = st->st_blksize;
-
-	kfree(st);
-	return 0;
-}
-
 /**
  * v9fs_vfs_setattr - set file metadata
  * @dentry: file whose metadata to set
@@ -1284,64 +909,6 @@
 }
 
 /**
- * v9fs_vfs_setattr_dotl - set file metadata
- * @dentry: file whose metadata to set
- * @iattr: metadata assignment structure
- *
- */
-
-int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
-{
-	int retval;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid;
-	struct p9_iattr_dotl p9attr;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "\n");
-
-	retval = inode_change_ok(dentry->d_inode, iattr);
-	if (retval)
-		return retval;
-
-	p9attr.valid = iattr->ia_valid;
-	p9attr.mode = iattr->ia_mode;
-	p9attr.uid = iattr->ia_uid;
-	p9attr.gid = iattr->ia_gid;
-	p9attr.size = iattr->ia_size;
-	p9attr.atime_sec = iattr->ia_atime.tv_sec;
-	p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
-	p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
-	p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
-
-	retval = -EPERM;
-	v9ses = v9fs_inode2v9ses(dentry->d_inode);
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	retval = p9_client_setattr(fid, &p9attr);
-	if (retval < 0)
-		return retval;
-
-	if ((iattr->ia_valid & ATTR_SIZE) &&
-	    iattr->ia_size != i_size_read(dentry->d_inode)) {
-		retval = vmtruncate(dentry->d_inode, iattr->ia_size);
-		if (retval)
-			return retval;
-	}
-
-	setattr_copy(dentry->d_inode, iattr);
-	mark_inode_dirty(dentry->d_inode);
-	if (iattr->ia_valid & ATTR_MODE) {
-		/* We also want to update ACL when we update mode bits */
-		retval = v9fs_acl_chmod(dentry);
-		if (retval < 0)
-			return retval;
-	}
-	return 0;
-}
-
-/**
  * v9fs_stat2inode - populate an inode structure with mistat info
  * @stat: Plan 9 metadata (mistat) structure
  * @inode: inode to populate
@@ -1419,77 +986,6 @@
 }
 
 /**
- * v9fs_stat2inode_dotl - populate an inode structure with stat info
- * @stat: stat structure
- * @inode: inode to populate
- * @sb: superblock of filesystem
- *
- */
-
-void
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
-{
-
-	if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
-		inode->i_atime.tv_sec = stat->st_atime_sec;
-		inode->i_atime.tv_nsec = stat->st_atime_nsec;
-		inode->i_mtime.tv_sec = stat->st_mtime_sec;
-		inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
-		inode->i_ctime.tv_sec = stat->st_ctime_sec;
-		inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
-		inode->i_uid = stat->st_uid;
-		inode->i_gid = stat->st_gid;
-		inode->i_nlink = stat->st_nlink;
-		inode->i_mode = stat->st_mode;
-		inode->i_rdev = new_decode_dev(stat->st_rdev);
-
-		if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
-			init_special_inode(inode, inode->i_mode, inode->i_rdev);
-
-		i_size_write(inode, stat->st_size);
-		inode->i_blocks = stat->st_blocks;
-	} else {
-		if (stat->st_result_mask & P9_STATS_ATIME) {
-			inode->i_atime.tv_sec = stat->st_atime_sec;
-			inode->i_atime.tv_nsec = stat->st_atime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_MTIME) {
-			inode->i_mtime.tv_sec = stat->st_mtime_sec;
-			inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_CTIME) {
-			inode->i_ctime.tv_sec = stat->st_ctime_sec;
-			inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_UID)
-			inode->i_uid = stat->st_uid;
-		if (stat->st_result_mask & P9_STATS_GID)
-			inode->i_gid = stat->st_gid;
-		if (stat->st_result_mask & P9_STATS_NLINK)
-			inode->i_nlink = stat->st_nlink;
-		if (stat->st_result_mask & P9_STATS_MODE) {
-			inode->i_mode = stat->st_mode;
-			if ((S_ISBLK(inode->i_mode)) ||
-						(S_ISCHR(inode->i_mode)))
-				init_special_inode(inode, inode->i_mode,
-								inode->i_rdev);
-		}
-		if (stat->st_result_mask & P9_STATS_RDEV)
-			inode->i_rdev = new_decode_dev(stat->st_rdev);
-		if (stat->st_result_mask & P9_STATS_SIZE)
-			i_size_write(inode, stat->st_size);
-		if (stat->st_result_mask & P9_STATS_BLOCKS)
-			inode->i_blocks = stat->st_blocks;
-	}
-	if (stat->st_result_mask & P9_STATS_GEN)
-			inode->i_generation = stat->st_gen;
-
-	/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
-	 * because the inode structure does not have fields for them.
-	 */
-}
-
-/**
  * v9fs_qid2ino - convert qid into inode number
  * @qid: qid to hash
  *
@@ -1595,7 +1091,7 @@
  *
  */
 
-static void
+void
 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
 	char *s = nd_get_link(nd);
@@ -1639,94 +1135,6 @@
 }
 
 /**
- * v9fs_vfs_symlink_dotl - helper function to create symlinks
- * @dir: directory inode containing symlink
- * @dentry: dentry for symlink
- * @symname: symlink data
- *
- * See Also: 9P2000.L RFC for more information
- *
- */
-
-static int
-v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
-		const char *symname)
-{
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *dfid;
-	struct p9_fid *fid = NULL;
-	struct inode *inode;
-	struct p9_qid qid;
-	char *name;
-	int err;
-	gid_t gid;
-
-	name = (char *) dentry->d_name.name;
-	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
-			dir->i_ino, name, symname);
-	v9ses = v9fs_inode2v9ses(dir);
-
-	dfid = v9fs_fid_lookup(dentry->d_parent);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		return err;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-
-	/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
-	err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
-
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
-		goto error;
-	}
-
-	if (v9ses->cache) {
-		/* Now walk from the parent so we can get an unopened fid. */
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-					err);
-			fid = NULL;
-			goto error;
-		}
-
-		/* instantiate inode and assign the unopened fid to dentry */
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-					err);
-			goto error;
-		}
-		dentry->d_op = &v9fs_cached_dentry_operations;
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/* Not in cached mode. No need to populate inode with stat */
-		inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		dentry->d_op = &v9fs_dentry_operations;
-		d_instantiate(dentry, inode);
-	}
-
-error:
-	if (fid)
-		p9_client_clunk(fid);
-
-	return err;
-}
-
-/**
  * v9fs_vfs_symlink - helper function to create symlinks
  * @dir: directory inode containing symlink
  * @dentry: dentry for symlink
@@ -1785,77 +1193,6 @@
 }
 
 /**
- * v9fs_vfs_link_dotl - create a hardlink for dotl
- * @old_dentry: dentry for file to link to
- * @dir: inode destination for new link
- * @dentry: dentry for link
- *
- */
-
-static int
-v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
-		struct dentry *dentry)
-{
-	int err;
-	struct p9_fid *dfid, *oldfid;
-	char *name;
-	struct v9fs_session_info *v9ses;
-	struct dentry *dir_dentry;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
-			dir->i_ino, old_dentry->d_name.name,
-			dentry->d_name.name);
-
-	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid))
-		return PTR_ERR(dfid);
-
-	oldfid = v9fs_fid_lookup(old_dentry);
-	if (IS_ERR(oldfid))
-		return PTR_ERR(oldfid);
-
-	name = (char *) dentry->d_name.name;
-
-	err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
-
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
-		return err;
-	}
-
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		/* Get the latest stat info from server. */
-		struct p9_fid *fid;
-		struct p9_stat_dotl *st;
-
-		fid = v9fs_fid_lookup(old_dentry);
-		if (IS_ERR(fid))
-			return PTR_ERR(fid);
-
-		st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
-		if (IS_ERR(st))
-			return PTR_ERR(st);
-
-		v9fs_stat2inode_dotl(st, old_dentry->d_inode);
-
-		kfree(st);
-	} else {
-		/* Caching disabled. No need to get upto date stat info.
-		 * This dentry will be released immediately. So, just hold the
-		 * inode
-		 */
-		ihold(old_dentry->d_inode);
-	}
-
-	dentry->d_op = old_dentry->d_op;
-	d_instantiate(dentry, old_dentry->d_inode);
-
-	return err;
-}
-
-/**
  * v9fs_vfs_mknod - create a special file
  * @dir: inode destination for new link
  * @dentry: dentry for file
@@ -1900,160 +1237,6 @@
 	return retval;
 }
 
-/**
- * v9fs_vfs_mknod_dotl - create a special file
- * @dir: inode destination for new link
- * @dentry: dentry for file
- * @mode: mode for creation
- * @rdev: device associated with special file
- *
- */
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		dev_t rdev)
-{
-	int err;
-	char *name;
-	mode_t mode;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL, *dfid = NULL;
-	struct inode *inode;
-	gid_t gid;
-	struct p9_qid qid;
-	struct dentry *dir_dentry;
-	struct posix_acl *dacl = NULL, *pacl = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS,
-		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
-		dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
-
-	if (!new_valid_dev(rdev))
-		return -EINVAL;
-
-	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		dfid = NULL;
-		goto error;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in mknod %d\n", err);
-		goto error;
-	}
-	name = (char *) dentry->d_name.name;
-
-	err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
-	if (err < 0)
-		goto error;
-
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		dentry->d_op = &v9fs_cached_dentry_operations;
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate inode with stat.
-		 * socket syscall returns a fd, so we need instantiate
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		dentry->d_op = &v9fs_dentry_operations;
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-error:
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
-static int
-v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen)
-{
-	int retval;
-	struct p9_fid *fid;
-	char *target = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
-	retval = -EPERM;
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	retval = p9_client_readlink(fid, &target);
-	if (retval < 0)
-		return retval;
-
-	strncpy(buffer, target, buflen);
-	P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer);
-
-	retval = strnlen(buffer, buflen);
-	return retval;
-}
-
-/**
- * v9fs_vfs_follow_link_dotl - follow a symlink path
- * @dentry: dentry for symlink
- * @nd: nameidata
- *
- */
-
-static void *
-v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
-{
-	int len = 0;
-	char *link = __getname();
-
-	P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
-
-	if (!link)
-		link = ERR_PTR(-ENOMEM);
-	else {
-		len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX);
-		if (len < 0) {
-			__putname(link);
-			link = ERR_PTR(len);
-		} else
-			link[min(len, PATH_MAX-1)] = 0;
-	}
-	nd_set_link(nd, link);
-
-	return NULL;
-}
-
 static const struct inode_operations v9fs_dir_inode_operations_dotu = {
 	.create = v9fs_vfs_create,
 	.lookup = v9fs_vfs_lookup,
@@ -2068,25 +1251,6 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_dir_inode_operations_dotl = {
-	.create = v9fs_vfs_create_dotl,
-	.lookup = v9fs_vfs_lookup,
-	.link = v9fs_vfs_link_dotl,
-	.symlink = v9fs_vfs_symlink_dotl,
-	.unlink = v9fs_vfs_unlink,
-	.mkdir = v9fs_vfs_mkdir_dotl,
-	.rmdir = v9fs_vfs_rmdir,
-	.mknod = v9fs_vfs_mknod_dotl,
-	.rename = v9fs_vfs_rename,
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-	.check_acl = v9fs_check_acl,
-};
-
 static const struct inode_operations v9fs_dir_inode_operations = {
 	.create = v9fs_vfs_create,
 	.lookup = v9fs_vfs_lookup,
@@ -2104,16 +1268,6 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_file_inode_operations_dotl = {
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-	.check_acl = v9fs_check_acl,
-};
-
 static const struct inode_operations v9fs_symlink_inode_operations = {
 	.readlink = generic_readlink,
 	.follow_link = v9fs_vfs_follow_link,
@@ -2122,14 +1276,3 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
-	.readlink = v9fs_vfs_readlink_dotl,
-	.follow_link = v9fs_vfs_follow_link_dotl,
-	.put_link = v9fs_vfs_put_link,
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-};
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
new file mode 100644
index 0000000..fe3ffa9
--- /dev/null
+++ b/fs/9p/vfs_inode_dotl.c
@@ -0,0 +1,824 @@
+/*
+ *  linux/fs/9p/vfs_inode_dotl.c
+ *
+ * This file contains vfs inode ops for the 9P2000.L protocol.
+ *
+ *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/inet.h>
+#include <linux/namei.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+
+#include "v9fs.h"
+#include "v9fs_vfs.h"
+#include "fid.h"
+#include "cache.h"
+#include "xattr.h"
+#include "acl.h"
+
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		    dev_t rdev);
+
+/**
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * new file system object. This checks the S_ISGID to determine the owning
+ * group of the new file system object.
+ */
+
+static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
+{
+	BUG_ON(dir_inode == NULL);
+
+	if (dir_inode->i_mode & S_ISGID) {
+		/* set_gid bit is set.*/
+		return dir_inode->i_gid;
+	}
+	return current_fsgid();
+}
+
+/**
+ * v9fs_dentry_from_dir_inode - helper function to get the dentry from
+ * dir inode.
+ *
+ */
+
+static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
+{
+	struct dentry *dentry;
+
+	spin_lock(&inode->i_lock);
+	/* Directory should have only one entry. */
+	BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
+	dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+	spin_unlock(&inode->i_lock);
+	return dentry;
+}
+
+struct inode *
+v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+	struct super_block *sb)
+{
+	struct inode *ret = NULL;
+	int err;
+	struct p9_stat_dotl *st;
+
+	st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+	if (IS_ERR(st))
+		return ERR_CAST(st);
+
+	ret = v9fs_get_inode(sb, st->st_mode);
+	if (IS_ERR(ret)) {
+		err = PTR_ERR(ret);
+		goto error;
+	}
+
+	v9fs_stat2inode_dotl(st, ret);
+	ret->i_ino = v9fs_qid2ino(&st->qid);
+#ifdef CONFIG_9P_FSCACHE
+	v9fs_vcookie_set_qid(ret, &st->qid);
+	v9fs_cache_inode_get_cookie(ret);
+#endif
+	err = v9fs_get_acl(ret, fid);
+	if (err) {
+		iput(ret);
+		goto error;
+	}
+	kfree(st);
+	return ret;
+error:
+	kfree(st);
+	return ERR_PTR(err);
+}
+
+/**
+ * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @dir: directory inode that is being created
+ * @dentry:  dentry that is being deleted
+ * @mode: create permissions
+ * @nd: path information
+ *
+ */
+
+static int
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		struct nameidata *nd)
+{
+	int err = 0;
+	char *name = NULL;
+	gid_t gid;
+	int flags;
+	mode_t mode;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL;
+	struct p9_fid *dfid, *ofid;
+	struct file *filp;
+	struct p9_qid qid;
+	struct inode *inode;
+	struct posix_acl *pacl = NULL, *dacl = NULL;
+
+	v9ses = v9fs_inode2v9ses(dir);
+	if (nd && nd->flags & LOOKUP_OPEN)
+		flags = nd->intent.open.flags - 1;
+	else {
+		/*
+		 * create call without LOOKUP_OPEN is due
+		 * to mknod of regular files. So use mknod
+		 * operation.
+		 */
+		return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+	}
+
+	name = (char *) dentry->d_name.name;
+	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
+			"mode:0x%x\n", name, flags, omode);
+
+	dfid = v9fs_fid_lookup(dentry->d_parent);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		return err;
+	}
+
+	/* clone a fid to use for creation */
+	ofid = p9_client_walk(dfid, 0, NULL, 1);
+	if (IS_ERR(ofid)) {
+		err = PTR_ERR(ofid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+		return err;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in creat %d\n", err);
+		goto error;
+	}
+	err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+				"p9_client_open_dotl failed in creat %d\n",
+				err);
+		goto error;
+	}
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	fid = p9_client_walk(dfid, 1, &name, 1);
+	if (IS_ERR(fid)) {
+		err = PTR_ERR(fid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+		fid = NULL;
+		goto error;
+	}
+	inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
+		goto error;
+	}
+	d_instantiate(dentry, inode);
+	err = v9fs_fid_add(dentry, fid);
+	if (err < 0)
+		goto error;
+
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+
+	/* Since we are opening a file, assign the open fid to the file */
+	filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
+	if (IS_ERR(filp)) {
+		p9_client_clunk(ofid);
+		return PTR_ERR(filp);
+	}
+	filp->private_data = ofid;
+	return 0;
+
+error:
+	if (ofid)
+		p9_client_clunk(ofid);
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+/**
+ * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @dir:  inode that is being unlinked
+ * @dentry: dentry that is being unlinked
+ * @mode: mode for new directory
+ *
+ */
+
+static int v9fs_vfs_mkdir_dotl(struct inode *dir,
+			       struct dentry *dentry, int omode)
+{
+	int err;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL, *dfid = NULL;
+	gid_t gid;
+	char *name;
+	mode_t mode;
+	struct inode *inode;
+	struct p9_qid qid;
+	struct dentry *dir_dentry;
+	struct posix_acl *dacl = NULL, *pacl = NULL;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+	err = 0;
+	v9ses = v9fs_inode2v9ses(dir);
+
+	omode |= S_IFDIR;
+	if (dir->i_mode & S_ISGID)
+		omode |= S_ISGID;
+
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		dfid = NULL;
+		goto error;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in mkdir %d\n", err);
+		goto error;
+	}
+	name = (char *) dentry->d_name.name;
+	err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
+	if (err < 0)
+		goto error;
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+				err);
+			fid = NULL;
+			goto error;
+		}
+
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+				err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/*
+		 * Not in cached mode. No need to populate
+		 * inode with stat. We need to get an inode
+		 * so that we can set the acl with dentry
+		 */
+		inode = v9fs_get_inode(dir->i_sb, mode);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+
+error:
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+static int
+v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
+		 struct kstat *stat)
+{
+	int err;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid;
+	struct p9_stat_dotl *st;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+	err = -EPERM;
+	v9ses = v9fs_inode2v9ses(dentry->d_inode);
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
+		return simple_getattr(mnt, dentry, stat);
+
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid))
+		return PTR_ERR(fid);
+
+	/* Ask for all the fields in stat structure. Server will return
+	 * whatever it supports
+	 */
+
+	st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
+	if (IS_ERR(st))
+		return PTR_ERR(st);
+
+	v9fs_stat2inode_dotl(st, dentry->d_inode);
+	generic_fillattr(dentry->d_inode, stat);
+	/* Change block size to what the server returned */
+	stat->blksize = st->st_blksize;
+
+	kfree(st);
+	return 0;
+}
+
+/**
+ * v9fs_vfs_setattr_dotl - set file metadata
+ * @dentry: file whose metadata to set
+ * @iattr: metadata assignment structure
+ *
+ */
+
+int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+{
+	int retval;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid;
+	struct p9_iattr_dotl p9attr;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "\n");
+
+	retval = inode_change_ok(dentry->d_inode, iattr);
+	if (retval)
+		return retval;
+
+	p9attr.valid = iattr->ia_valid;
+	p9attr.mode = iattr->ia_mode;
+	p9attr.uid = iattr->ia_uid;
+	p9attr.gid = iattr->ia_gid;
+	p9attr.size = iattr->ia_size;
+	p9attr.atime_sec = iattr->ia_atime.tv_sec;
+	p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+	p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+	p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+
+	retval = -EPERM;
+	v9ses = v9fs_inode2v9ses(dentry->d_inode);
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid))
+		return PTR_ERR(fid);
+
+	retval = p9_client_setattr(fid, &p9attr);
+	if (retval < 0)
+		return retval;
+
+	if ((iattr->ia_valid & ATTR_SIZE) &&
+	    iattr->ia_size != i_size_read(dentry->d_inode)) {
+		retval = vmtruncate(dentry->d_inode, iattr->ia_size);
+		if (retval)
+			return retval;
+	}
+
+	setattr_copy(dentry->d_inode, iattr);
+	mark_inode_dirty(dentry->d_inode);
+	if (iattr->ia_valid & ATTR_MODE) {
+		/* We also want to update ACL when we update mode bits */
+		retval = v9fs_acl_chmod(dentry);
+		if (retval < 0)
+			return retval;
+	}
+	return 0;
+}
+
+/**
+ * v9fs_stat2inode_dotl - populate an inode structure with stat info
+ * @stat: stat structure
+ * @inode: inode to populate
+ * @sb: superblock of filesystem
+ *
+ */
+
+void
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+{
+
+	if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
+		inode->i_atime.tv_sec = stat->st_atime_sec;
+		inode->i_atime.tv_nsec = stat->st_atime_nsec;
+		inode->i_mtime.tv_sec = stat->st_mtime_sec;
+		inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+		inode->i_ctime.tv_sec = stat->st_ctime_sec;
+		inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+		inode->i_uid = stat->st_uid;
+		inode->i_gid = stat->st_gid;
+		inode->i_nlink = stat->st_nlink;
+		inode->i_mode = stat->st_mode;
+		inode->i_rdev = new_decode_dev(stat->st_rdev);
+
+		if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
+			init_special_inode(inode, inode->i_mode, inode->i_rdev);
+
+		i_size_write(inode, stat->st_size);
+		inode->i_blocks = stat->st_blocks;
+	} else {
+		if (stat->st_result_mask & P9_STATS_ATIME) {
+			inode->i_atime.tv_sec = stat->st_atime_sec;
+			inode->i_atime.tv_nsec = stat->st_atime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_MTIME) {
+			inode->i_mtime.tv_sec = stat->st_mtime_sec;
+			inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_CTIME) {
+			inode->i_ctime.tv_sec = stat->st_ctime_sec;
+			inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_UID)
+			inode->i_uid = stat->st_uid;
+		if (stat->st_result_mask & P9_STATS_GID)
+			inode->i_gid = stat->st_gid;
+		if (stat->st_result_mask & P9_STATS_NLINK)
+			inode->i_nlink = stat->st_nlink;
+		if (stat->st_result_mask & P9_STATS_MODE) {
+			inode->i_mode = stat->st_mode;
+			if ((S_ISBLK(inode->i_mode)) ||
+						(S_ISCHR(inode->i_mode)))
+				init_special_inode(inode, inode->i_mode,
+								inode->i_rdev);
+		}
+		if (stat->st_result_mask & P9_STATS_RDEV)
+			inode->i_rdev = new_decode_dev(stat->st_rdev);
+		if (stat->st_result_mask & P9_STATS_SIZE)
+			i_size_write(inode, stat->st_size);
+		if (stat->st_result_mask & P9_STATS_BLOCKS)
+			inode->i_blocks = stat->st_blocks;
+	}
+	if (stat->st_result_mask & P9_STATS_GEN)
+			inode->i_generation = stat->st_gen;
+
+	/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
+	 * because the inode structure does not have fields for them.
+	 */
+}
+
+static int
+v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
+		const char *symname)
+{
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *dfid;
+	struct p9_fid *fid = NULL;
+	struct inode *inode;
+	struct p9_qid qid;
+	char *name;
+	int err;
+	gid_t gid;
+
+	name = (char *) dentry->d_name.name;
+	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
+			dir->i_ino, name, symname);
+	v9ses = v9fs_inode2v9ses(dir);
+
+	dfid = v9fs_fid_lookup(dentry->d_parent);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		return err;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+
+	/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
+	err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
+
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
+		goto error;
+	}
+
+	if (v9ses->cache) {
+		/* Now walk from the parent so we can get an unopened fid. */
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+					err);
+			fid = NULL;
+			goto error;
+		}
+
+		/* instantiate inode and assign the unopened fid to dentry */
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+					err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/* Not in cached mode. No need to populate inode with stat */
+		inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+
+error:
+	if (fid)
+		p9_client_clunk(fid);
+
+	return err;
+}
+
+/**
+ * v9fs_vfs_link_dotl - create a hardlink for dotl
+ * @old_dentry: dentry for file to link to
+ * @dir: inode destination for new link
+ * @dentry: dentry for link
+ *
+ */
+
+static int
+v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
+		struct dentry *dentry)
+{
+	int err;
+	struct p9_fid *dfid, *oldfid;
+	char *name;
+	struct v9fs_session_info *v9ses;
+	struct dentry *dir_dentry;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
+			dir->i_ino, old_dentry->d_name.name,
+			dentry->d_name.name);
+
+	v9ses = v9fs_inode2v9ses(dir);
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid))
+		return PTR_ERR(dfid);
+
+	oldfid = v9fs_fid_lookup(old_dentry);
+	if (IS_ERR(oldfid))
+		return PTR_ERR(oldfid);
+
+	name = (char *) dentry->d_name.name;
+
+	err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
+
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
+		return err;
+	}
+
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		/* Get the latest stat info from server. */
+		struct p9_fid *fid;
+		struct p9_stat_dotl *st;
+
+		fid = v9fs_fid_lookup(old_dentry);
+		if (IS_ERR(fid))
+			return PTR_ERR(fid);
+
+		st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+		if (IS_ERR(st))
+			return PTR_ERR(st);
+
+		v9fs_stat2inode_dotl(st, old_dentry->d_inode);
+
+		kfree(st);
+	} else {
+		/* Caching disabled. No need to get upto date stat info.
+		 * This dentry will be released immediately. So, just hold the
+		 * inode
+		 */
+		ihold(old_dentry->d_inode);
+	}
+	d_instantiate(dentry, old_dentry->d_inode);
+
+	return err;
+}
+
+/**
+ * v9fs_vfs_mknod_dotl - create a special file
+ * @dir: inode destination for new link
+ * @dentry: dentry for file
+ * @mode: mode for creation
+ * @rdev: device associated with special file
+ *
+ */
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		dev_t rdev)
+{
+	int err;
+	char *name;
+	mode_t mode;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL, *dfid = NULL;
+	struct inode *inode;
+	gid_t gid;
+	struct p9_qid qid;
+	struct dentry *dir_dentry;
+	struct posix_acl *dacl = NULL, *pacl = NULL;
+
+	P9_DPRINTK(P9_DEBUG_VFS,
+		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
+		dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+	v9ses = v9fs_inode2v9ses(dir);
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		dfid = NULL;
+		goto error;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in mknod %d\n", err);
+		goto error;
+	}
+	name = (char *) dentry->d_name.name;
+
+	err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
+	if (err < 0)
+		goto error;
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+				err);
+			fid = NULL;
+			goto error;
+		}
+
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+				err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/*
+		 * Not in cached mode. No need to populate inode with stat.
+		 * socket syscall returns a fd, so we need instantiate
+		 */
+		inode = v9fs_get_inode(dir->i_sb, mode);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+error:
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+/**
+ * v9fs_vfs_follow_link_dotl - follow a symlink path
+ * @dentry: dentry for symlink
+ * @nd: nameidata
+ *
+ */
+
+static void *
+v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
+{
+	int retval;
+	struct p9_fid *fid;
+	char *link = __getname();
+	char *target;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+
+	if (!link) {
+		link = ERR_PTR(-ENOMEM);
+		goto ndset;
+	}
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid)) {
+		__putname(link);
+		link = ERR_PTR(PTR_ERR(fid));
+		goto ndset;
+	}
+	retval = p9_client_readlink(fid, &target);
+	if (!retval) {
+		strcpy(link, target);
+		kfree(target);
+		goto ndset;
+	}
+	__putname(link);
+	link = ERR_PTR(retval);
+ndset:
+	nd_set_link(nd, link);
+	return NULL;
+}
+
+const struct inode_operations v9fs_dir_inode_operations_dotl = {
+	.create = v9fs_vfs_create_dotl,
+	.lookup = v9fs_vfs_lookup,
+	.link = v9fs_vfs_link_dotl,
+	.symlink = v9fs_vfs_symlink_dotl,
+	.unlink = v9fs_vfs_unlink,
+	.mkdir = v9fs_vfs_mkdir_dotl,
+	.rmdir = v9fs_vfs_rmdir,
+	.mknod = v9fs_vfs_mknod_dotl,
+	.rename = v9fs_vfs_rename,
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+	.check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_file_inode_operations_dotl = {
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+	.check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_symlink_inode_operations_dotl = {
+	.readlink = generic_readlink,
+	.follow_link = v9fs_vfs_follow_link_dotl,
+	.put_link = v9fs_vfs_put_link,
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index c55c614..dbaabe3 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -141,6 +141,11 @@
 	}
 	v9fs_fill_super(sb, v9ses, flags, data);
 
+	if (v9ses->cache)
+		sb->s_d_op = &v9fs_cached_dentry_operations;
+	else
+		sb->s_d_op = &v9fs_dentry_operations;
+
 	inode = v9fs_get_inode(sb, S_IFDIR | mode);
 	if (IS_ERR(inode)) {
 		retval = PTR_ERR(inode);
@@ -217,9 +222,6 @@
 
 	P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
 
-	if (s->s_root)
-		v9fs_dentry_release(s->s_root);	/* clunk root */
-
 	kill_anon_super(s);
 
 	v9fs_session_cancel(v9ses);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 43ec7df..d288773 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -133,7 +133,7 @@
 			"p9_client_xattrcreate failed %d\n", retval);
 		goto error;
 	}
-	msize = fid->clnt->msize;;
+	msize = fid->clnt->msize;
 	while (value_len) {
 		if (value_len > (msize - P9_IOHDRSZ))
 			write_count = msize - P9_IOHDRSZ;
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index f4287e4..3b4a764 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -201,7 +201,8 @@
 };
 
 static int
-adfs_hash(struct dentry *parent, struct qstr *qstr)
+adfs_hash(const struct dentry *parent, const struct inode *inode,
+		struct qstr *qstr)
 {
 	const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen;
 	const unsigned char *name;
@@ -237,17 +238,19 @@
  * requirements of the underlying filesystem.
  */
 static int
-adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name)
+adfs_compare(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
 	int i;
 
-	if (entry->len != name->len)
+	if (len != name->len)
 		return 1;
 
 	for (i = 0; i < name->len; i++) {
 		char a, b;
 
-		a = entry->name[i];
+		a = str[i];
 		b = name->name[i];
 
 		if (a >= 'A' && a <= 'Z')
@@ -273,7 +276,6 @@
 	struct object_info obj;
 	int error;
 
-	dentry->d_op = &adfs_dentry_operations;	
 	lock_kernel();
 	error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj);
 	if (error == 0) {
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 959dbff..2d79540 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -240,9 +240,16 @@
 	return &ei->vfs_inode;
 }
 
+static void adfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
+}
+
 static void adfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
+	call_rcu(&inode->i_rcu, adfs_i_callback);
 }
 
 static void init_once(void *foo)
@@ -466,6 +473,7 @@
 		asb->s_namelen = ADFS_F_NAME_LEN;
 	}
 
+	sb->s_d_op = &adfs_dentry_operations;
 	root = adfs_iget(sb, &root_obj);
 	sb->s_root = d_alloc_root(root);
 	if (!sb->s_root) {
@@ -476,8 +484,7 @@
 		kfree(asb->s_map);
 		adfs_error(sb, "get root inode failed\n");
 		goto error;
-	} else
-		sb->s_root->d_op = &adfs_dentry_operations;
+	}
 	unlock_kernel();
 	return 0;
 
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index a8cbdeb..0e95f73 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -201,6 +201,7 @@
 extern const struct address_space_operations	 affs_aops_ofs;
 
 extern const struct dentry_operations	 affs_dentry_operations;
+extern const struct dentry_operations	 affs_intl_dentry_operations;
 
 static inline void
 affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 7d0f0a3..3a4557e 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -128,7 +128,7 @@
 	void *data = dentry->d_fsdata;
 	struct list_head *head, *next;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&inode->i_lock);
 	head = &inode->i_dentry;
 	next = head->next;
 	while (next != head) {
@@ -139,7 +139,7 @@
 		}
 		next = next->next;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 }
 
 
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 914d1c0..e3e9efc 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -13,18 +13,26 @@
 typedef int (*toupper_t)(int);
 
 static int	 affs_toupper(int ch);
-static int	 affs_hash_dentry(struct dentry *, struct qstr *);
-static int       affs_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
+static int	 affs_hash_dentry(const struct dentry *,
+		const struct inode *, struct qstr *);
+static int       affs_compare_dentry(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
 static int	 affs_intl_toupper(int ch);
-static int	 affs_intl_hash_dentry(struct dentry *, struct qstr *);
-static int       affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
+static int	 affs_intl_hash_dentry(const struct dentry *,
+		const struct inode *, struct qstr *);
+static int       affs_intl_compare_dentry(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
 
 const struct dentry_operations affs_dentry_operations = {
 	.d_hash		= affs_hash_dentry,
 	.d_compare	= affs_compare_dentry,
 };
 
-static const struct dentry_operations affs_intl_dentry_operations = {
+const struct dentry_operations affs_intl_dentry_operations = {
 	.d_hash		= affs_intl_hash_dentry,
 	.d_compare	= affs_intl_compare_dentry,
 };
@@ -58,13 +66,13 @@
  * Note: the dentry argument is the parent dentry.
  */
 static inline int
-__affs_hash_dentry(struct dentry *dentry, struct qstr *qstr, toupper_t toupper)
+__affs_hash_dentry(struct qstr *qstr, toupper_t toupper)
 {
 	const u8 *name = qstr->name;
 	unsigned long hash;
 	int i;
 
-	i = affs_check_name(qstr->name,qstr->len);
+	i = affs_check_name(qstr->name, qstr->len);
 	if (i)
 		return i;
 
@@ -78,39 +86,41 @@
 }
 
 static int
-affs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
+affs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
-	return __affs_hash_dentry(dentry, qstr, affs_toupper);
+	return __affs_hash_dentry(qstr, affs_toupper);
 }
 static int
-affs_intl_hash_dentry(struct dentry *dentry, struct qstr *qstr)
+affs_intl_hash_dentry(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
-	return __affs_hash_dentry(dentry, qstr, affs_intl_toupper);
+	return __affs_hash_dentry(qstr, affs_intl_toupper);
 }
 
-static inline int
-__affs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b, toupper_t toupper)
+static inline int __affs_compare_dentry(unsigned int len,
+		const char *str, const struct qstr *name, toupper_t toupper)
 {
-	const u8 *aname = a->name;
-	const u8 *bname = b->name;
-	int len;
+	const u8 *aname = str;
+	const u8 *bname = name->name;
 
-	/* 'a' is the qstr of an already existing dentry, so the name
-	 * must be valid. 'b' must be validated first.
+	/*
+	 * 'str' is the name of an already existing dentry, so the name
+	 * must be valid. 'name' must be validated first.
 	 */
 
-	if (affs_check_name(b->name,b->len))
+	if (affs_check_name(name->name, name->len))
 		return 1;
 
-	/* If the names are longer than the allowed 30 chars,
+	/*
+	 * If the names are longer than the allowed 30 chars,
 	 * the excess is ignored, so their length may differ.
 	 */
-	len = a->len;
 	if (len >= 30) {
-		if (b->len < 30)
+		if (name->len < 30)
 			return 1;
 		len = 30;
-	} else if (len != b->len)
+	} else if (len != name->len)
 		return 1;
 
 	for (; len > 0; len--)
@@ -121,14 +131,18 @@
 }
 
 static int
-affs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+affs_compare_dentry(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	return __affs_compare_dentry(dentry, a, b, affs_toupper);
+	return __affs_compare_dentry(len, str, name, affs_toupper);
 }
 static int
-affs_intl_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+affs_intl_compare_dentry(const struct dentry *parent,const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	return __affs_compare_dentry(dentry, a, b, affs_intl_toupper);
+	return __affs_compare_dentry(len, str, name, affs_intl_toupper);
 }
 
 /*
@@ -226,7 +240,6 @@
 		if (IS_ERR(inode))
 			return ERR_CAST(inode);
 	}
-	dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations;
 	d_add(dentry, inode);
 	return NULL;
 }
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 0cf7f43..b31507d 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -95,9 +95,16 @@
 	return &i->vfs_inode;
 }
 
+static void affs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
+}
+
 static void affs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
+	call_rcu(&inode->i_rcu, affs_i_callback);
 }
 
 static void init_once(void *foo)
@@ -470,12 +477,16 @@
 		goto out_error_noinode;
 	}
 
+	if (AFFS_SB(sb)->s_flags & SF_INTL)
+		sb->s_d_op = &affs_intl_dentry_operations;
+	else
+		sb->s_d_op = &affs_dentry_operations;
+
 	sb->s_root = d_alloc_root(root_inode);
 	if (!sb->s_root) {
 		printk(KERN_ERR "AFFS: Get root inode failed\n");
 		goto out_error;
 	}
-	sb->s_root->d_op = &affs_dentry_operations;
 
 	pr_debug("AFFS: s_flags=%lX\n",sb->s_flags);
 	return 0;
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index a3bcec7..1c8c6cc 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -289,7 +289,7 @@
 	call->server = server;
 
 	INIT_WORK(&call->work, SRXAFSCB_CallBack);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -336,7 +336,7 @@
 	call->server = server;
 
 	INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -367,7 +367,7 @@
 	call->server = server;
 
 	INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -400,7 +400,7 @@
 	call->state = AFS_CALL_REPLYING;
 
 	INIT_WORK(&call->work, SRXAFSCB_Probe);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -496,7 +496,7 @@
 	call->state = AFS_CALL_REPLYING;
 
 	INIT_WORK(&call->work, SRXAFSCB_ProbeUuid);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
 
@@ -580,6 +580,6 @@
 	call->state = AFS_CALL_REPLYING;
 
 	INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself);
-	schedule_work(&call->work);
+	queue_work(afs_wq, &call->work);
 	return 0;
 }
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 5439e1b..e6a4ab9 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/fs.h>
+#include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/ctype.h>
 #include <linux/sched.h>
@@ -23,7 +24,7 @@
 static int afs_dir_open(struct inode *inode, struct file *file);
 static int afs_readdir(struct file *file, void *dirent, filldir_t filldir);
 static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd);
-static int afs_d_delete(struct dentry *dentry);
+static int afs_d_delete(const struct dentry *dentry);
 static void afs_d_release(struct dentry *dentry);
 static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
 				  loff_t fpos, u64 ino, unsigned dtype);
@@ -61,7 +62,7 @@
 	.setattr	= afs_setattr,
 };
 
-static const struct dentry_operations afs_fs_dentry_operations = {
+const struct dentry_operations afs_fs_dentry_operations = {
 	.d_revalidate	= afs_d_revalidate,
 	.d_delete	= afs_d_delete,
 	.d_release	= afs_d_release,
@@ -581,8 +582,6 @@
 	}
 
 success:
-	dentry->d_op = &afs_fs_dentry_operations;
-
 	d_add(dentry, inode);
 	_leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }",
 	       fid.vnode,
@@ -607,6 +606,9 @@
 	void *dir_version;
 	int ret;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
 	vnode = AFS_FS_I(dentry->d_inode);
 
 	if (dentry->d_inode)
@@ -730,7 +732,7 @@
  * - called from dput() when d_count is going to 0.
  * - return 1 to request dentry be unhashed, 0 otherwise
  */
-static int afs_d_delete(struct dentry *dentry)
+static int afs_d_delete(const struct dentry *dentry)
 {
 	_enter("%s", dentry->d_name.name);
 
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index cca8eef..58c633b 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -486,6 +486,7 @@
  * dir.c
  */
 extern const struct inode_operations afs_dir_inode_operations;
+extern const struct dentry_operations afs_fs_dentry_operations;
 extern const struct file_operations afs_dir_file_operations;
 
 /*
@@ -576,6 +577,7 @@
 /*
  * main.c
  */
+extern struct workqueue_struct *afs_wq;
 extern struct afs_uuid afs_uuid;
 
 /*
@@ -624,7 +626,7 @@
 extern void afs_cache_permit(struct afs_vnode *, struct key *, long);
 extern void afs_zap_permits(struct rcu_head *);
 extern struct key *afs_request_key(struct afs_cell *);
-extern int afs_permission(struct inode *, int);
+extern int afs_permission(struct inode *, int, unsigned int);
 
 /*
  * server.c
diff --git a/fs/afs/main.c b/fs/afs/main.c
index cfd1cbe..42dd2e4 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -30,6 +30,7 @@
 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
 
 struct afs_uuid afs_uuid;
+struct workqueue_struct *afs_wq;
 
 /*
  * get a client UUID
@@ -87,10 +88,16 @@
 	if (ret < 0)
 		return ret;
 
+	/* create workqueue */
+	ret = -ENOMEM;
+	afs_wq = alloc_workqueue("afs", 0, 0);
+	if (!afs_wq)
+		return ret;
+
 	/* register the /proc stuff */
 	ret = afs_proc_init();
 	if (ret < 0)
-		return ret;
+		goto error_proc;
 
 #ifdef CONFIG_AFS_FSCACHE
 	/* we want to be able to cache */
@@ -140,6 +147,8 @@
 error_cache:
 #endif
 	afs_proc_cleanup();
+error_proc:
+	destroy_workqueue(afs_wq);
 	rcu_barrier();
 	printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
 	return ret;
@@ -163,7 +172,7 @@
 	afs_purge_servers();
 	afs_callback_update_kill();
 	afs_vlocation_purge();
-	flush_scheduled_work();
+	destroy_workqueue(afs_wq);
 	afs_cell_purge();
 #ifdef CONFIG_AFS_FSCACHE
 	fscache_unregister_netfs(&afs_cache_netfs);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 6153417..e83c033 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -268,8 +268,8 @@
 		path_put(&nd->path);
 		nd->path.mnt = newmnt;
 		nd->path.dentry = dget(newmnt->mnt_root);
-		schedule_delayed_work(&afs_mntpt_expiry_timer,
-				      afs_mntpt_expiry_timeout * HZ);
+		queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
+				   afs_mntpt_expiry_timeout * HZ);
 		break;
 	case -EBUSY:
 		/* someone else made a mount here whilst we were busy */
@@ -295,8 +295,8 @@
 
 	if (!list_empty(&afs_vfsmounts)) {
 		mark_mounts_for_expiry(&afs_vfsmounts);
-		schedule_delayed_work(&afs_mntpt_expiry_timer,
-				      afs_mntpt_expiry_timeout * HZ);
+		queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer,
+				   afs_mntpt_expiry_timeout * HZ);
 	}
 
 	_leave("");
@@ -310,6 +310,5 @@
 	_enter("");
 
 	ASSERT(list_empty(&afs_vfsmounts));
-	cancel_delayed_work(&afs_mntpt_expiry_timer);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&afs_mntpt_expiry_timer);
 }
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 654d8fd..e45a323 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -410,7 +410,7 @@
 	if (!call) {
 		/* its an incoming call for our callback service */
 		skb_queue_tail(&afs_incoming_calls, skb);
-		schedule_work(&afs_collect_incoming_call_work);
+		queue_work(afs_wq, &afs_collect_incoming_call_work);
 	} else {
 		/* route the messages directly to the appropriate call */
 		skb_queue_tail(&call->rx_queue, skb);
diff --git a/fs/afs/security.c b/fs/afs/security.c
index bb4ed14..f44b9d3 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -285,13 +285,16 @@
  * - AFS ACLs are attached to directories only, and a file is controlled by its
  *   parent directory's ACL
  */
-int afs_permission(struct inode *inode, int mask)
+int afs_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	struct afs_vnode *vnode = AFS_FS_I(inode);
 	afs_access_t uninitialized_var(access);
 	struct key *key;
 	int ret;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	_enter("{{%x:%u},%lx},%x,",
 	       vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
 
@@ -347,7 +350,7 @@
 	}
 
 	key_put(key);
-	ret = generic_permission(inode, mask, NULL);
+	ret = generic_permission(inode, mask, flags, NULL);
 	_leave(" = %d", ret);
 	return ret;
 
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 9fdc7fe..d59b751 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -238,8 +238,8 @@
 	if (atomic_read(&server->usage) == 0) {
 		list_move_tail(&server->grave, &afs_server_graveyard);
 		server->time_of_death = get_seconds();
-		schedule_delayed_work(&afs_server_reaper,
-				      afs_server_timeout * HZ);
+		queue_delayed_work(afs_wq, &afs_server_reaper,
+				   afs_server_timeout * HZ);
 	}
 	spin_unlock(&afs_server_graveyard_lock);
 	_leave(" [dead]");
@@ -285,10 +285,11 @@
 		expiry = server->time_of_death + afs_server_timeout;
 		if (expiry > now) {
 			delay = (expiry - now) * HZ;
-			if (!schedule_delayed_work(&afs_server_reaper, delay)) {
+			if (!queue_delayed_work(afs_wq, &afs_server_reaper,
+						delay)) {
 				cancel_delayed_work(&afs_server_reaper);
-				schedule_delayed_work(&afs_server_reaper,
-						      delay);
+				queue_delayed_work(afs_wq, &afs_server_reaper,
+						   delay);
 			}
 			break;
 		}
@@ -323,5 +324,5 @@
 {
 	afs_server_timeout = 0;
 	cancel_delayed_work(&afs_server_reaper);
-	schedule_delayed_work(&afs_server_reaper, 0);
+	queue_delayed_work(afs_wq, &afs_server_reaper, 0);
 }
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 27201cf..fb240e8 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -336,6 +336,7 @@
 	if (!root)
 		goto error;
 
+	sb->s_d_op = &afs_fs_dentry_operations;
 	sb->s_root = root;
 
 	_leave(" = 0");
@@ -498,6 +499,14 @@
 	return &vnode->vfs_inode;
 }
 
+static void afs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	struct afs_vnode *vnode = AFS_FS_I(inode);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(afs_inode_cachep, vnode);
+}
+
 /*
  * destroy an AFS inode struct
  */
@@ -511,7 +520,7 @@
 
 	ASSERTCMP(vnode->server, ==, NULL);
 
-	kmem_cache_free(afs_inode_cachep, vnode);
+	call_rcu(&inode->i_rcu, afs_i_callback);
 	atomic_dec(&afs_count_active_inodes);
 }
 
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 9ac260d..431984d 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -507,8 +507,8 @@
 		_debug("buried");
 		list_move_tail(&vl->grave, &afs_vlocation_graveyard);
 		vl->time_of_death = get_seconds();
-		schedule_delayed_work(&afs_vlocation_reap,
-				      afs_vlocation_timeout * HZ);
+		queue_delayed_work(afs_wq, &afs_vlocation_reap,
+				   afs_vlocation_timeout * HZ);
 
 		/* suspend updates on this record */
 		if (!list_empty(&vl->update)) {
@@ -561,11 +561,11 @@
 		if (expiry > now) {
 			delay = (expiry - now) * HZ;
 			_debug("delay %lu", delay);
-			if (!schedule_delayed_work(&afs_vlocation_reap,
-						   delay)) {
+			if (!queue_delayed_work(afs_wq, &afs_vlocation_reap,
+						delay)) {
 				cancel_delayed_work(&afs_vlocation_reap);
-				schedule_delayed_work(&afs_vlocation_reap,
-						      delay);
+				queue_delayed_work(afs_wq, &afs_vlocation_reap,
+						   delay);
 			}
 			break;
 		}
@@ -620,7 +620,7 @@
 	destroy_workqueue(afs_vlocation_update_worker);
 
 	cancel_delayed_work(&afs_vlocation_reap);
-	schedule_delayed_work(&afs_vlocation_reap, 0);
+	queue_delayed_work(afs_wq, &afs_vlocation_reap, 0);
 }
 
 /*
diff --git a/fs/aio.c b/fs/aio.c
index 8c8f6c5..5e00f15 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -798,29 +798,12 @@
 	queue_delayed_work(aio_wq, &ctx->wq, timeout);
 }
 
-
 /*
- * aio_run_iocbs:
- * 	Process all pending retries queued on the ioctx
- * 	run list.
- * Assumes it is operating within the aio issuer's mm
- * context.
- */
-static inline void aio_run_iocbs(struct kioctx *ctx)
-{
-	int requeue;
-
-	spin_lock_irq(&ctx->ctx_lock);
-
-	requeue = __aio_run_iocbs(ctx);
-	spin_unlock_irq(&ctx->ctx_lock);
-	if (requeue)
-		aio_queue_work(ctx);
-}
-
-/*
- * just like aio_run_iocbs, but keeps running them until
- * the list stays empty
+ * aio_run_all_iocbs:
+ *	Process all pending retries queued on the ioctx
+ *	run list, and keep running them until the list
+ *	stays empty.
+ * Assumes it is operating within the aio issuer's mm context.
  */
 static inline void aio_run_all_iocbs(struct kioctx *ctx)
 {
@@ -1839,7 +1822,7 @@
 	long ret = -EINVAL;
 
 	if (likely(ioctx)) {
-		if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0))
+		if (likely(min_nr <= nr && min_nr >= 0))
 			ret = read_events(ioctx, min_nr, nr, events, timeout);
 		put_ioctx(ioctx);
 	}
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 57ce55b..cbe57f3 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -26,12 +26,6 @@
 static struct inode *anon_inode_inode;
 static const struct file_operations anon_inode_fops;
 
-static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
-				int flags, const char *dev_name, void *data)
-{
-	return mount_pseudo(fs_type, "anon_inode:", NULL, ANON_INODE_FS_MAGIC);
-}
-
 /*
  * anon_inodefs_dname() is called from d_path().
  */
@@ -41,14 +35,22 @@
 				dentry->d_name.name);
 }
 
+static const struct dentry_operations anon_inodefs_dentry_operations = {
+	.d_dname	= anon_inodefs_dname,
+};
+
+static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
+				int flags, const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "anon_inode:", NULL,
+			&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
+}
+
 static struct file_system_type anon_inode_fs_type = {
 	.name		= "anon_inodefs",
 	.mount		= anon_inodefs_mount,
 	.kill_sb	= kill_anon_super,
 };
-static const struct dentry_operations anon_inodefs_dentry_operations = {
-	.d_dname	= anon_inodefs_dname,
-};
 
 /*
  * nop .set_page_dirty method so that people can use .page_mkwrite on
@@ -64,9 +66,9 @@
 };
 
 /**
- * anon_inode_getfd - creates a new file instance by hooking it up to an
- *                    anonymous inode, and a dentry that describe the "class"
- *                    of the file
+ * anon_inode_getfile - creates a new file instance by hooking it up to an
+ *                      anonymous inode, and a dentry that describe the "class"
+ *                      of the file
  *
  * @name:    [in]    name of the "class" of the new file
  * @fops:    [in]    file operations for the new file
@@ -102,7 +104,7 @@
 	this.name = name;
 	this.len = strlen(name);
 	this.hash = 0;
-	path.dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this);
+	path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
 	if (!path.dentry)
 		goto err_module;
 
@@ -113,7 +115,6 @@
 	 */
 	ihold(anon_inode_inode);
 
-	path.dentry->d_op = &anon_inodefs_dentry_operations;
 	d_instantiate(path.dentry, anon_inode_inode);
 
 	error = -ENFILE;
@@ -232,7 +233,7 @@
 	return 0;
 
 err_mntput:
-	mntput(anon_inode_mnt);
+	mntput_long(anon_inode_mnt);
 err_unregister_filesystem:
 	unregister_filesystem(&anon_inode_fs_type);
 err_exit:
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 3d283ab..0fffe1c 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -16,6 +16,7 @@
 #include <linux/auto_fs4.h>
 #include <linux/auto_dev-ioctl.h>
 #include <linux/mutex.h>
+#include <linux/spinlock.h>
 #include <linux/list.h>
 
 /* This is the range of ioctl() numbers we claim as ours */
@@ -60,6 +61,8 @@
 		current->pid, __func__, ##args);	\
 } while (0)
 
+extern spinlock_t autofs4_lock;
+
 /* Unified info structure.  This is pointed to by both the dentry and
    inode structures.  Each file in the filesystem has an instance of this
    structure.  It holds a reference to the dentry, so dentries are never
@@ -254,17 +257,15 @@
 	return dentry->d_inode && !d_unhashed(dentry);
 }
 
-static inline int __simple_empty(struct dentry *dentry)
+static inline void __autofs4_add_expiring(struct dentry *dentry)
 {
-	struct dentry *child;
-	int ret = 0;
-
-	list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child)
-		if (simple_positive(child))
-			goto out;
-	ret = 1;
-out:
-	return ret;
+	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+	if (ino) {
+		if (list_empty(&ino->expiring))
+			list_add(&ino->expiring, &sbi->expiring_list);
+	}
+	return;
 }
 
 static inline void autofs4_add_expiring(struct dentry *dentry)
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index a796c94..cc1d013 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -91,24 +91,64 @@
 }
 
 /*
- * Calculate next entry in top down tree traversal.
- * From next_mnt in namespace.c - elegant.
+ * Calculate and dget next entry in top down tree traversal.
  */
-static struct dentry *next_dentry(struct dentry *p, struct dentry *root)
+static struct dentry *get_next_positive_dentry(struct dentry *prev,
+						struct dentry *root)
 {
-	struct list_head *next = p->d_subdirs.next;
+	struct list_head *next;
+	struct dentry *p, *ret;
 
+	if (prev == NULL)
+		return dget(prev);
+
+	spin_lock(&autofs4_lock);
+relock:
+	p = prev;
+	spin_lock(&p->d_lock);
+again:
+	next = p->d_subdirs.next;
 	if (next == &p->d_subdirs) {
 		while (1) {
-			if (p == root)
+			struct dentry *parent;
+
+			if (p == root) {
+				spin_unlock(&p->d_lock);
+				spin_unlock(&autofs4_lock);
+				dput(prev);
 				return NULL;
+			}
+
+			parent = p->d_parent;
+			if (!spin_trylock(&parent->d_lock)) {
+				spin_unlock(&p->d_lock);
+				cpu_relax();
+				goto relock;
+			}
+			spin_unlock(&p->d_lock);
 			next = p->d_u.d_child.next;
-			if (next != &p->d_parent->d_subdirs)
+			p = parent;
+			if (next != &parent->d_subdirs)
 				break;
-			p = p->d_parent;
 		}
 	}
-	return list_entry(next, struct dentry, d_u.d_child);
+	ret = list_entry(next, struct dentry, d_u.d_child);
+
+	spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
+	/* Negative dentry - try next */
+	if (!simple_positive(ret)) {
+		spin_unlock(&ret->d_lock);
+		p = ret;
+		goto again;
+	}
+	dget_dlock(ret);
+	spin_unlock(&ret->d_lock);
+	spin_unlock(&p->d_lock);
+	spin_unlock(&autofs4_lock);
+
+	dput(prev);
+
+	return ret;
 }
 
 /*
@@ -158,18 +198,11 @@
 	if (!simple_positive(top))
 		return 1;
 
-	spin_lock(&dcache_lock);
-	for (p = top; p; p = next_dentry(p, top)) {
-		/* Negative dentry - give up */
-		if (!simple_positive(p))
-			continue;
-
+	p = NULL;
+	while ((p = get_next_positive_dentry(p, top))) {
 		DPRINTK("dentry %p %.*s",
 			p, (int) p->d_name.len, p->d_name.name);
 
-		p = dget(p);
-		spin_unlock(&dcache_lock);
-
 		/*
 		 * Is someone visiting anywhere in the subtree ?
 		 * If there's no mount we need to check the usage
@@ -198,16 +231,13 @@
 			else
 				ino_count++;
 
-			if (atomic_read(&p->d_count) > ino_count) {
+			if (p->d_count > ino_count) {
 				top_ino->last_used = jiffies;
 				dput(p);
 				return 1;
 			}
 		}
-		dput(p);
-		spin_lock(&dcache_lock);
 	}
-	spin_unlock(&dcache_lock);
 
 	/* Timeout of a tree mount is ultimately determined by its top dentry */
 	if (!autofs4_can_expire(top, timeout, do_now))
@@ -226,32 +256,21 @@
 	DPRINTK("parent %p %.*s",
 		parent, (int)parent->d_name.len, parent->d_name.name);
 
-	spin_lock(&dcache_lock);
-	for (p = parent; p; p = next_dentry(p, parent)) {
-		/* Negative dentry - give up */
-		if (!simple_positive(p))
-			continue;
-
+	p = NULL;
+	while ((p = get_next_positive_dentry(p, parent))) {
 		DPRINTK("dentry %p %.*s",
 			p, (int) p->d_name.len, p->d_name.name);
 
-		p = dget(p);
-		spin_unlock(&dcache_lock);
-
 		if (d_mountpoint(p)) {
 			/* Can we umount this guy */
 			if (autofs4_mount_busy(mnt, p))
-				goto cont;
+				continue;
 
 			/* Can we expire this guy */
 			if (autofs4_can_expire(p, timeout, do_now))
 				return p;
 		}
-cont:
-		dput(p);
-		spin_lock(&dcache_lock);
 	}
-	spin_unlock(&dcache_lock);
 	return NULL;
 }
 
@@ -276,7 +295,9 @@
 		struct autofs_info *ino = autofs4_dentry_ino(root);
 		if (d_mountpoint(root)) {
 			ino->flags |= AUTOFS_INF_MOUNTPOINT;
-			root->d_mounted--;
+			spin_lock(&root->d_lock);
+			root->d_flags &= ~DCACHE_MOUNTED;
+			spin_unlock(&root->d_lock);
 		}
 		ino->flags |= AUTOFS_INF_EXPIRING;
 		init_completion(&ino->expire_complete);
@@ -302,8 +323,8 @@
 {
 	unsigned long timeout;
 	struct dentry *root = sb->s_root;
+	struct dentry *dentry;
 	struct dentry *expired = NULL;
-	struct list_head *next;
 	int do_now = how & AUTOFS_EXP_IMMEDIATE;
 	int exp_leaves = how & AUTOFS_EXP_LEAVES;
 	struct autofs_info *ino;
@@ -315,23 +336,8 @@
 	now = jiffies;
 	timeout = sbi->exp_timeout;
 
-	spin_lock(&dcache_lock);
-	next = root->d_subdirs.next;
-
-	/* On exit from the loop expire is set to a dgot dentry
-	 * to expire or it's NULL */
-	while ( next != &root->d_subdirs ) {
-		struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
-
-		/* Negative dentry - give up */
-		if (!simple_positive(dentry)) {
-			next = next->next;
-			continue;
-		}
-
-		dentry = dget(dentry);
-		spin_unlock(&dcache_lock);
-
+	dentry = NULL;
+	while ((dentry = get_next_positive_dentry(dentry, root))) {
 		spin_lock(&sbi->fs_lock);
 		ino = autofs4_dentry_ino(dentry);
 
@@ -347,7 +353,7 @@
 
 			/* Path walk currently on this dentry? */
 			ino_count = atomic_read(&ino->count) + 2;
-			if (atomic_read(&dentry->d_count) > ino_count)
+			if (dentry->d_count > ino_count)
 				goto next;
 
 			/* Can we umount this guy */
@@ -369,7 +375,7 @@
 		if (!exp_leaves) {
 			/* Path walk currently on this dentry? */
 			ino_count = atomic_read(&ino->count) + 1;
-			if (atomic_read(&dentry->d_count) > ino_count)
+			if (dentry->d_count > ino_count)
 				goto next;
 
 			if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
@@ -383,7 +389,7 @@
 		} else {
 			/* Path walk currently on this dentry? */
 			ino_count = atomic_read(&ino->count) + 1;
-			if (atomic_read(&dentry->d_count) > ino_count)
+			if (dentry->d_count > ino_count)
 				goto next;
 
 			expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
@@ -394,11 +400,7 @@
 		}
 next:
 		spin_unlock(&sbi->fs_lock);
-		dput(dentry);
-		spin_lock(&dcache_lock);
-		next = next->next;
 	}
-	spin_unlock(&dcache_lock);
 	return NULL;
 
 found:
@@ -408,9 +410,13 @@
 	ino->flags |= AUTOFS_INF_EXPIRING;
 	init_completion(&ino->expire_complete);
 	spin_unlock(&sbi->fs_lock);
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
+	spin_lock(&expired->d_parent->d_lock);
+	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
 	list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&expired->d_lock);
+	spin_unlock(&expired->d_parent->d_lock);
+	spin_unlock(&autofs4_lock);
 	return expired;
 }
 
@@ -499,7 +505,14 @@
 
 		spin_lock(&sbi->fs_lock);
 		if (ino->flags & AUTOFS_INF_MOUNTPOINT) {
-			sb->s_root->d_mounted++;
+			spin_lock(&sb->s_root->d_lock);
+			/*
+			 * If we haven't been expired away, then reset
+			 * mounted status.
+			 */
+			if (mnt->mnt_parent != mnt)
+				sb->s_root->d_flags |= DCACHE_MOUNTED;
+			spin_unlock(&sb->s_root->d_lock);
 			ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
 		}
 		ino->flags &= ~AUTOFS_INF_EXPIRING;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index ac87e49..a7bdb9d 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -309,7 +309,7 @@
 		goto fail_iput;
 	pipe = NULL;
 
-	root->d_op = &autofs4_sb_dentry_operations;
+	d_set_d_op(root, &autofs4_sb_dentry_operations);
 	root->d_fsdata = ino;
 
 	/* Can this call block? */
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index d34896c..651e4ef 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -23,6 +23,8 @@
 
 #include "autofs_i.h"
 
+DEFINE_SPINLOCK(autofs4_lock);
+
 static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
 static int autofs4_dir_unlink(struct inode *,struct dentry *);
 static int autofs4_dir_rmdir(struct inode *,struct dentry *);
@@ -142,12 +144,15 @@
 	 * autofs file system so just let the libfs routines handle
 	 * it.
 	 */
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
+	spin_lock(&dentry->d_lock);
 	if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&autofs4_lock);
 		return -ENOENT;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&autofs4_lock);
 
 out:
 	return dcache_dir_open(inode, file);
@@ -252,9 +257,11 @@
 	/* We trigger a mount for almost all flags */
 	lookup_type = autofs4_need_mount(nd->flags);
 	spin_lock(&sbi->fs_lock);
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
+	spin_lock(&dentry->d_lock);
 	if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&autofs4_lock);
 		spin_unlock(&sbi->fs_lock);
 		goto follow;
 	}
@@ -266,7 +273,8 @@
 	 */
 	if (ino->flags & AUTOFS_INF_PENDING ||
 	    (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&autofs4_lock);
 		spin_unlock(&sbi->fs_lock);
 
 		status = try_to_fill_dentry(dentry, nd->flags);
@@ -275,7 +283,8 @@
 
 		goto follow;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&autofs4_lock);
 	spin_unlock(&sbi->fs_lock);
 follow:
 	/*
@@ -306,12 +315,19 @@
  */
 static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct inode *dir = dentry->d_parent->d_inode;
-	struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
-	int oz_mode = autofs4_oz_mode(sbi);
+	struct inode *dir;
+	struct autofs_sb_info *sbi;
+	int oz_mode;
 	int flags = nd ? nd->flags : 0;
 	int status = 1;
 
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	dir = dentry->d_parent->d_inode;
+	sbi = autofs4_sbi(dir->i_sb);
+	oz_mode = autofs4_oz_mode(sbi);
+
 	/* Pending dentry */
 	spin_lock(&sbi->fs_lock);
 	if (autofs4_ispending(dentry)) {
@@ -346,12 +362,14 @@
 		return 0;
 
 	/* Check for a non-mountpoint directory with no contents */
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
+	spin_lock(&dentry->d_lock);
 	if (S_ISDIR(dentry->d_inode->i_mode) &&
 	    !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
 		DPRINTK("dentry=%p %.*s, emptydir",
 			 dentry, dentry->d_name.len, dentry->d_name.name);
-		spin_unlock(&dcache_lock);
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&autofs4_lock);
 
 		/* The daemon never causes a mount to trigger */
 		if (oz_mode)
@@ -367,7 +385,8 @@
 
 		return status;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&autofs4_lock);
 
 	return 1;
 }
@@ -422,7 +441,7 @@
 	const unsigned char *str = name->name;
 	struct list_head *p, *head;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
 	spin_lock(&sbi->lookup_lock);
 	head = &sbi->active_list;
 	list_for_each(p, head) {
@@ -436,7 +455,7 @@
 		spin_lock(&active->d_lock);
 
 		/* Already gone? */
-		if (atomic_read(&active->d_count) == 0)
+		if (active->d_count == 0)
 			goto next;
 
 		qstr = &active->d_name;
@@ -452,17 +471,17 @@
 			goto next;
 
 		if (d_unhashed(active)) {
-			dget(active);
+			dget_dlock(active);
 			spin_unlock(&active->d_lock);
 			spin_unlock(&sbi->lookup_lock);
-			spin_unlock(&dcache_lock);
+			spin_unlock(&autofs4_lock);
 			return active;
 		}
 next:
 		spin_unlock(&active->d_lock);
 	}
 	spin_unlock(&sbi->lookup_lock);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&autofs4_lock);
 
 	return NULL;
 }
@@ -477,7 +496,7 @@
 	const unsigned char *str = name->name;
 	struct list_head *p, *head;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
 	spin_lock(&sbi->lookup_lock);
 	head = &sbi->expiring_list;
 	list_for_each(p, head) {
@@ -507,17 +526,17 @@
 			goto next;
 
 		if (d_unhashed(expiring)) {
-			dget(expiring);
+			dget_dlock(expiring);
 			spin_unlock(&expiring->d_lock);
 			spin_unlock(&sbi->lookup_lock);
-			spin_unlock(&dcache_lock);
+			spin_unlock(&autofs4_lock);
 			return expiring;
 		}
 next:
 		spin_unlock(&expiring->d_lock);
 	}
 	spin_unlock(&sbi->lookup_lock);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&autofs4_lock);
 
 	return NULL;
 }
@@ -559,7 +578,7 @@
 		 * we check for the hashed dentry and return the newly
 		 * hashed dentry.
 		 */
-		dentry->d_op = &autofs4_root_dentry_operations;
+		d_set_d_op(dentry, &autofs4_root_dentry_operations);
 
 		/*
 		 * And we need to ensure that the same dentry is used for
@@ -698,9 +717,9 @@
 	d_add(dentry, inode);
 
 	if (dir == dir->i_sb->s_root->d_inode)
-		dentry->d_op = &autofs4_root_dentry_operations;
+		d_set_d_op(dentry, &autofs4_root_dentry_operations);
 	else
-		dentry->d_op = &autofs4_dentry_operations;
+		d_set_d_op(dentry, &autofs4_dentry_operations);
 
 	dentry->d_fsdata = ino;
 	ino->dentry = dget(dentry);
@@ -753,12 +772,12 @@
 
 	dir->i_mtime = CURRENT_TIME;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
 	autofs4_add_expiring(dentry);
 	spin_lock(&dentry->d_lock);
 	__d_drop(dentry);
 	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&autofs4_lock);
 
 	return 0;
 }
@@ -775,16 +794,20 @@
 	if (!autofs4_oz_mode(sbi))
 		return -EACCES;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&autofs4_lock);
+	spin_lock(&sbi->lookup_lock);
+	spin_lock(&dentry->d_lock);
 	if (!list_empty(&dentry->d_subdirs)) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&sbi->lookup_lock);
+		spin_unlock(&autofs4_lock);
 		return -ENOTEMPTY;
 	}
-	autofs4_add_expiring(dentry);
-	spin_lock(&dentry->d_lock);
+	__autofs4_add_expiring(dentry);
+	spin_unlock(&sbi->lookup_lock);
 	__d_drop(dentry);
 	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&autofs4_lock);
 
 	if (atomic_dec_and_test(&ino->count)) {
 		p_ino = autofs4_dentry_ino(dentry->d_parent);
@@ -829,9 +852,9 @@
 	d_add(dentry, inode);
 
 	if (dir == dir->i_sb->s_root->d_inode)
-		dentry->d_op = &autofs4_root_dentry_operations;
+		d_set_d_op(dentry, &autofs4_root_dentry_operations);
 	else
-		dentry->d_op = &autofs4_dentry_operations;
+		d_set_d_op(dentry, &autofs4_dentry_operations);
 
 	dentry->d_fsdata = ino;
 	ino->dentry = dget(dentry);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 2341375..c5f8459 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -186,16 +186,26 @@
 {
 	struct dentry *root = sbi->sb->s_root;
 	struct dentry *tmp;
-	char *buf = *name;
+	char *buf;
 	char *p;
-	int len = 0;
+	int len;
+	unsigned seq;
 
-	spin_lock(&dcache_lock);
+rename_retry:
+	buf = *name;
+	len = 0;
+
+	seq = read_seqbegin(&rename_lock);
+	rcu_read_lock();
+	spin_lock(&autofs4_lock);
 	for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
 		len += tmp->d_name.len + 1;
 
 	if (!len || --len > NAME_MAX) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&autofs4_lock);
+		rcu_read_unlock();
+		if (read_seqretry(&rename_lock, seq))
+			goto rename_retry;
 		return 0;
 	}
 
@@ -208,7 +218,10 @@
 		p -= tmp->d_name.len;
 		strncpy(p, tmp->d_name.name, tmp->d_name.len);
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&autofs4_lock);
+	rcu_read_unlock();
+	if (read_seqretry(&rename_lock, seq))
+		goto rename_retry;
 
 	return len;
 }
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index f024d8a..9ad2369 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -229,8 +229,11 @@
 	return -EIO;
 }
 
-static int bad_inode_permission(struct inode *inode, int mask)
+static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
 {
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	return -EIO;
 }
 
diff --git a/fs/befs/endian.h b/fs/befs/endian.h
index 6cb84d8..2722387 100644
--- a/fs/befs/endian.h
+++ b/fs/befs/endian.h
@@ -102,22 +102,22 @@
 }
 
 static inline befs_data_stream
-fsds_to_cpu(const struct super_block *sb, befs_disk_data_stream n)
+fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n)
 {
 	befs_data_stream data;
 	int i;
 
 	for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i)
-		data.direct[i] = fsrun_to_cpu(sb, n.direct[i]);
+		data.direct[i] = fsrun_to_cpu(sb, n->direct[i]);
 
-	data.max_direct_range = fs64_to_cpu(sb, n.max_direct_range);
-	data.indirect = fsrun_to_cpu(sb, n.indirect);
-	data.max_indirect_range = fs64_to_cpu(sb, n.max_indirect_range);
-	data.double_indirect = fsrun_to_cpu(sb, n.double_indirect);
+	data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range);
+	data.indirect = fsrun_to_cpu(sb, n->indirect);
+	data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range);
+	data.double_indirect = fsrun_to_cpu(sb, n->double_indirect);
 	data.max_double_indirect_range = fs64_to_cpu(sb,
-						     n.
+						     n->
 						     max_double_indirect_range);
-	data.size = fs64_to_cpu(sb, n.size);
+	data.size = fs64_to_cpu(sb, n->size);
 
 	return data;
 }
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index aa4e7c7..b1d0c79 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -284,12 +284,18 @@
         return &bi->vfs_inode;
 }
 
-static void
-befs_destroy_inode(struct inode *inode)
+static void befs_i_callback(struct rcu_head *head)
 {
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
         kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
 }
 
+static void befs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, befs_i_callback);
+}
+
 static void init_once(void *foo)
 {
         struct befs_inode_info *bi = (struct befs_inode_info *) foo;
@@ -384,7 +390,7 @@
 		int num_blks;
 
 		befs_ino->i_data.ds =
-		    fsds_to_cpu(sb, raw_inode->data.datastream);
+		    fsds_to_cpu(sb, &raw_inode->data.datastream);
 
 		num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds);
 		inode->i_blocks =
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 76db6d7..a8e37f8 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -248,9 +248,16 @@
 	return &bi->vfs_inode;
 }
 
+static void bfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
+}
+
 static void bfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
+	call_rcu(&inode->i_rcu, bfs_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6884e19..d5b640b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -66,12 +66,11 @@
 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
 
 static struct linux_binfmt elf_format = {
-		.module		= THIS_MODULE,
-		.load_binary	= load_elf_binary,
-		.load_shlib	= load_elf_library,
-		.core_dump	= elf_core_dump,
-		.min_coredump	= ELF_EXEC_PAGESIZE,
-		.hasvdso	= 1
+	.module		= THIS_MODULE,
+	.load_binary	= load_elf_binary,
+	.load_shlib	= load_elf_library,
+	.core_dump	= elf_core_dump,
+	.min_coredump	= ELF_EXEC_PAGESIZE,
 };
 
 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
@@ -316,8 +315,6 @@
 	return 0;
 }
 
-#ifndef elf_map
-
 static unsigned long elf_map(struct file *filep, unsigned long addr,
 		struct elf_phdr *eppnt, int prot, int type,
 		unsigned long total_size)
@@ -354,8 +351,6 @@
 	return(map_addr);
 }
 
-#endif /* !elf_map */
-
 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
 {
 	int i, first_idx = -1, last_idx = -1;
@@ -421,7 +416,7 @@
 		goto out;
 
 	retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
-			     (char *)elf_phdata,size);
+			     (char *)elf_phdata, size);
 	error = -EIO;
 	if (retval != size) {
 		if (retval < 0)
@@ -601,7 +596,7 @@
 		goto out;
 	if (!elf_check_arch(&loc->elf_ex))
 		goto out;
-	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
+	if (!bprm->file->f_op || !bprm->file->f_op->mmap)
 		goto out;
 
 	/* Now read in all of the header information */
@@ -761,8 +756,8 @@
 			/* There was a PT_LOAD segment with p_memsz > p_filesz
 			   before this one. Map anonymous pages, if needed,
 			   and clear the area.  */
-			retval = set_brk (elf_bss + load_bias,
-					  elf_brk + load_bias);
+			retval = set_brk(elf_bss + load_bias,
+					 elf_brk + load_bias);
 			if (retval) {
 				send_sig(SIGKILL, current, 0);
 				goto out_free_dentry;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 4d0ff5e..e49cce2 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -782,7 +782,12 @@
 {
 	unsigned int i;
 
-	kintegrityd_wq = create_workqueue("kintegrityd");
+	/*
+	 * kintegrityd won't block much but may burn a lot of CPU cycles.
+	 * Make it highpri CPU intensive wq with max concurrency of 1.
+	 */
+	kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
+					 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
 	if (!kintegrityd_wq)
 		panic("Failed to create kintegrityd\n");
 
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4230252..333a7bb 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -409,13 +409,20 @@
 	return &ei->vfs_inode;
 }
 
-static void bdev_destroy_inode(struct inode *inode)
+static void bdev_i_callback(struct rcu_head *head)
 {
+	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct bdev_inode *bdi = BDEV_I(inode);
 
+	INIT_LIST_HEAD(&inode->i_dentry);
 	kmem_cache_free(bdev_cachep, bdi);
 }
 
+static void bdev_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, bdev_i_callback);
+}
+
 static void init_once(void *foo)
 {
 	struct bdev_inode *ei = (struct bdev_inode *) foo;
@@ -426,7 +433,7 @@
 	INIT_LIST_HEAD(&bdev->bd_inodes);
 	INIT_LIST_HEAD(&bdev->bd_list);
 #ifdef CONFIG_SYSFS
-	INIT_LIST_HEAD(&bdev->bd_holder_list);
+	INIT_LIST_HEAD(&bdev->bd_holder_disks);
 #endif
 	inode_init_once(&ei->vfs_inode);
 	/* Initialize mutex for freeze. */
@@ -466,7 +473,7 @@
 static struct dentry *bd_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
+	return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, 0x62646576);
 }
 
 static struct file_system_type bd_type = {
@@ -662,7 +669,7 @@
 	else if (bdev->bd_contains == bdev)
 		return true;  	 /* is a whole device which isn't held */
 
-	else if (whole->bd_holder == bd_claim)
+	else if (whole->bd_holder == bd_may_claim)
 		return true; 	 /* is a partition of a device that is being partitioned */
 	else if (whole->bd_holder != NULL)
 		return false;	 /* is a partition of a held device */
@@ -774,440 +781,143 @@
 	}
 }
 
-/* releases bdev_lock */
-static void __bd_abort_claiming(struct block_device *whole, void *holder)
-{
-	BUG_ON(whole->bd_claiming != holder);
-	whole->bd_claiming = NULL;
-	wake_up_bit(&whole->bd_claiming, 0);
-
-	spin_unlock(&bdev_lock);
-	bdput(whole);
-}
-
-/**
- * bd_abort_claiming - abort claiming a block device
- * @whole: whole block device returned by bd_start_claiming()
- * @holder: holder trying to claim @bdev
- *
- * Abort a claiming block started by bd_start_claiming().  Note that
- * @whole is not the block device to be claimed but the whole device
- * returned by bd_start_claiming().
- *
- * CONTEXT:
- * Grabs and releases bdev_lock.
- */
-static void bd_abort_claiming(struct block_device *whole, void *holder)
-{
-	spin_lock(&bdev_lock);
-	__bd_abort_claiming(whole, holder);		/* releases bdev_lock */
-}
-
-/* increment holders when we have a legitimate claim. requires bdev_lock */
-static void __bd_claim(struct block_device *bdev, struct block_device *whole,
-					void *holder)
-{
-	/* note that for a whole device bd_holders
-	 * will be incremented twice, and bd_holder will
-	 * be set to bd_claim before being set to holder
-	 */
-	whole->bd_holders++;
-	whole->bd_holder = bd_claim;
-	bdev->bd_holders++;
-	bdev->bd_holder = holder;
-}
-
-/**
- * bd_finish_claiming - finish claiming a block device
- * @bdev: block device of interest (passed to bd_start_claiming())
- * @whole: whole block device returned by bd_start_claiming()
- * @holder: holder trying to claim @bdev
- *
- * Finish a claiming block started by bd_start_claiming().
- *
- * CONTEXT:
- * Grabs and releases bdev_lock.
- */
-static void bd_finish_claiming(struct block_device *bdev,
-				struct block_device *whole, void *holder)
-{
-	spin_lock(&bdev_lock);
-	BUG_ON(!bd_may_claim(bdev, whole, holder));
-	__bd_claim(bdev, whole, holder);
-	__bd_abort_claiming(whole, holder); /* not actually an abort */
-}
-
-/**
- * bd_claim - claim a block device
- * @bdev: block device to claim
- * @holder: holder trying to claim @bdev
- *
- * Try to claim @bdev which must have been opened successfully.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * 0 if successful, -EBUSY if @bdev is already claimed.
- */
-int bd_claim(struct block_device *bdev, void *holder)
-{
-	struct block_device *whole = bdev->bd_contains;
-	int res;
-
-	might_sleep();
-
-	spin_lock(&bdev_lock);
-	res = bd_prepare_to_claim(bdev, whole, holder);
-	if (res == 0)
-		__bd_claim(bdev, whole, holder);
-	spin_unlock(&bdev_lock);
-
-	return res;
-}
-EXPORT_SYMBOL(bd_claim);
-
-void bd_release(struct block_device *bdev)
-{
-	spin_lock(&bdev_lock);
-	if (!--bdev->bd_contains->bd_holders)
-		bdev->bd_contains->bd_holder = NULL;
-	if (!--bdev->bd_holders)
-		bdev->bd_holder = NULL;
-	spin_unlock(&bdev_lock);
-}
-
-EXPORT_SYMBOL(bd_release);
-
 #ifdef CONFIG_SYSFS
-/*
- * Functions for bd_claim_by_kobject / bd_release_from_kobject
- *
- *     If a kobject is passed to bd_claim_by_kobject()
- *     and the kobject has a parent directory,
- *     following symlinks are created:
- *        o from the kobject to the claimed bdev
- *        o from "holders" directory of the bdev to the parent of the kobject
- *     bd_release_from_kobject() removes these symlinks.
- *
- *     Example:
- *        If /dev/dm-0 maps to /dev/sda, kobject corresponding to
- *        /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
- *           /sys/block/dm-0/slaves/sda --> /sys/block/sda
- *           /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
- */
+struct bd_holder_disk {
+	struct list_head	list;
+	struct gendisk		*disk;
+	int			refcnt;
+};
+
+static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
+						  struct gendisk *disk)
+{
+	struct bd_holder_disk *holder;
+
+	list_for_each_entry(holder, &bdev->bd_holder_disks, list)
+		if (holder->disk == disk)
+			return holder;
+	return NULL;
+}
 
 static int add_symlink(struct kobject *from, struct kobject *to)
 {
-	if (!from || !to)
-		return 0;
 	return sysfs_create_link(from, to, kobject_name(to));
 }
 
 static void del_symlink(struct kobject *from, struct kobject *to)
 {
-	if (!from || !to)
-		return;
 	sysfs_remove_link(from, kobject_name(to));
 }
 
-/*
- * 'struct bd_holder' contains pointers to kobjects symlinked by
- * bd_claim_by_kobject.
- * It's connected to bd_holder_list which is protected by bdev->bd_sem.
- */
-struct bd_holder {
-	struct list_head list;	/* chain of holders of the bdev */
-	int count;		/* references from the holder */
-	struct kobject *sdir;	/* holder object, e.g. "/block/dm-0/slaves" */
-	struct kobject *hdev;	/* e.g. "/block/dm-0" */
-	struct kobject *hdir;	/* e.g. "/block/sda/holders" */
-	struct kobject *sdev;	/* e.g. "/block/sda" */
-};
-
-/*
- * Get references of related kobjects at once.
- * Returns 1 on success. 0 on failure.
- *
- * Should call bd_holder_release_dirs() after successful use.
- */
-static int bd_holder_grab_dirs(struct block_device *bdev,
-			struct bd_holder *bo)
-{
-	if (!bdev || !bo)
-		return 0;
-
-	bo->sdir = kobject_get(bo->sdir);
-	if (!bo->sdir)
-		return 0;
-
-	bo->hdev = kobject_get(bo->sdir->parent);
-	if (!bo->hdev)
-		goto fail_put_sdir;
-
-	bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
-	if (!bo->sdev)
-		goto fail_put_hdev;
-
-	bo->hdir = kobject_get(bdev->bd_part->holder_dir);
-	if (!bo->hdir)
-		goto fail_put_sdev;
-
-	return 1;
-
-fail_put_sdev:
-	kobject_put(bo->sdev);
-fail_put_hdev:
-	kobject_put(bo->hdev);
-fail_put_sdir:
-	kobject_put(bo->sdir);
-
-	return 0;
-}
-
-/* Put references of related kobjects at once. */
-static void bd_holder_release_dirs(struct bd_holder *bo)
-{
-	kobject_put(bo->hdir);
-	kobject_put(bo->sdev);
-	kobject_put(bo->hdev);
-	kobject_put(bo->sdir);
-}
-
-static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
-{
-	struct bd_holder *bo;
-
-	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-	if (!bo)
-		return NULL;
-
-	bo->count = 1;
-	bo->sdir = kobj;
-
-	return bo;
-}
-
-static void free_bd_holder(struct bd_holder *bo)
-{
-	kfree(bo);
-}
-
 /**
- * find_bd_holder - find matching struct bd_holder from the block device
+ * bd_link_disk_holder - create symlinks between holding disk and slave bdev
+ * @bdev: the claimed slave bdev
+ * @disk: the holding disk
  *
- * @bdev:	struct block device to be searched
- * @bo:		target struct bd_holder
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
  *
- * Returns matching entry with @bo in @bdev->bd_holder_list.
- * If found, increment the reference count and return the pointer.
- * If not found, returns NULL.
+ * This functions creates the following sysfs symlinks.
+ *
+ * - from "slaves" directory of the holder @disk to the claimed @bdev
+ * - from "holders" directory of the @bdev to the holder @disk
+ *
+ * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
+ * passed to bd_link_disk_holder(), then:
+ *
+ *   /sys/block/dm-0/slaves/sda --> /sys/block/sda
+ *   /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
+ *
+ * The caller must have claimed @bdev before calling this function and
+ * ensure that both @bdev and @disk are valid during the creation and
+ * lifetime of these symlinks.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
-static struct bd_holder *find_bd_holder(struct block_device *bdev,
-					struct bd_holder *bo)
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
 {
-	struct bd_holder *tmp;
-
-	list_for_each_entry(tmp, &bdev->bd_holder_list, list)
-		if (tmp->sdir == bo->sdir) {
-			tmp->count++;
-			return tmp;
-		}
-
-	return NULL;
-}
-
-/**
- * add_bd_holder - create sysfs symlinks for bd_claim() relationship
- *
- * @bdev:	block device to be bd_claimed
- * @bo:		preallocated and initialized by alloc_bd_holder()
- *
- * Add @bo to @bdev->bd_holder_list, create symlinks.
- *
- * Returns 0 if symlinks are created.
- * Returns -ve if something fails.
- */
-static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
-{
-	int err;
-
-	if (!bo)
-		return -EINVAL;
-
-	if (!bd_holder_grab_dirs(bdev, bo))
-		return -EBUSY;
-
-	err = add_symlink(bo->sdir, bo->sdev);
-	if (err)
-		return err;
-
-	err = add_symlink(bo->hdir, bo->hdev);
-	if (err) {
-		del_symlink(bo->sdir, bo->sdev);
-		return err;
-	}
-
-	list_add_tail(&bo->list, &bdev->bd_holder_list);
-	return 0;
-}
-
-/**
- * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
- *
- * @bdev:	block device to be bd_claimed
- * @kobj:	holder's kobject
- *
- * If there is matching entry with @kobj in @bdev->bd_holder_list
- * and no other bd_claim() from the same kobject,
- * remove the struct bd_holder from the list, delete symlinks for it.
- *
- * Returns a pointer to the struct bd_holder when it's removed from the list
- * and ready to be freed.
- * Returns NULL if matching claim isn't found or there is other bd_claim()
- * by the same kobject.
- */
-static struct bd_holder *del_bd_holder(struct block_device *bdev,
-					struct kobject *kobj)
-{
-	struct bd_holder *bo;
-
-	list_for_each_entry(bo, &bdev->bd_holder_list, list) {
-		if (bo->sdir == kobj) {
-			bo->count--;
-			BUG_ON(bo->count < 0);
-			if (!bo->count) {
-				list_del(&bo->list);
-				del_symlink(bo->sdir, bo->sdev);
-				del_symlink(bo->hdir, bo->hdev);
-				bd_holder_release_dirs(bo);
-				return bo;
-			}
-			break;
-		}
-	}
-
-	return NULL;
-}
-
-/**
- * bd_claim_by_kobject - bd_claim() with additional kobject signature
- *
- * @bdev:	block device to be claimed
- * @holder:	holder's signature
- * @kobj:	holder's kobject
- *
- * Do bd_claim() and if it succeeds, create sysfs symlinks between
- * the bdev and the holder's kobject.
- * Use bd_release_from_kobject() when relesing the claimed bdev.
- *
- * Returns 0 on success. (same as bd_claim())
- * Returns errno on failure.
- */
-static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
-				struct kobject *kobj)
-{
-	int err;
-	struct bd_holder *bo, *found;
-
-	if (!kobj)
-		return -EINVAL;
-
-	bo = alloc_bd_holder(kobj);
-	if (!bo)
-		return -ENOMEM;
+	struct bd_holder_disk *holder;
+	int ret = 0;
 
 	mutex_lock(&bdev->bd_mutex);
 
-	err = bd_claim(bdev, holder);
-	if (err)
-		goto fail;
+	WARN_ON_ONCE(!bdev->bd_holder);
 
-	found = find_bd_holder(bdev, bo);
-	if (found)
-		goto fail;
+	/* FIXME: remove the following once add_disk() handles errors */
+	if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
+		goto out_unlock;
 
-	err = add_bd_holder(bdev, bo);
-	if (err)
-		bd_release(bdev);
-	else
-		bo = NULL;
-fail:
+	holder = bd_find_holder_disk(bdev, disk);
+	if (holder) {
+		holder->refcnt++;
+		goto out_unlock;
+	}
+
+	holder = kzalloc(sizeof(*holder), GFP_KERNEL);
+	if (!holder) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	INIT_LIST_HEAD(&holder->list);
+	holder->disk = disk;
+	holder->refcnt = 1;
+
+	ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+	if (ret)
+		goto out_free;
+
+	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
+	if (ret)
+		goto out_del;
+
+	list_add(&holder->list, &bdev->bd_holder_disks);
+	goto out_unlock;
+
+out_del:
+	del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+out_free:
+	kfree(holder);
+out_unlock:
 	mutex_unlock(&bdev->bd_mutex);
-	free_bd_holder(bo);
-	return err;
+	return ret;
 }
+EXPORT_SYMBOL_GPL(bd_link_disk_holder);
 
 /**
- * bd_release_from_kobject - bd_release() with additional kobject signature
+ * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
+ * @bdev: the calimed slave bdev
+ * @disk: the holding disk
  *
- * @bdev:	block device to be released
- * @kobj:	holder's kobject
+ * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
  *
- * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
+ * CONTEXT:
+ * Might sleep.
  */
-static void bd_release_from_kobject(struct block_device *bdev,
-					struct kobject *kobj)
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
 {
-	if (!kobj)
-		return;
+	struct bd_holder_disk *holder;
 
 	mutex_lock(&bdev->bd_mutex);
-	bd_release(bdev);
-	free_bd_holder(del_bd_holder(bdev, kobj));
+
+	holder = bd_find_holder_disk(bdev, disk);
+
+	if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
+		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
+		del_symlink(bdev->bd_part->holder_dir,
+			    &disk_to_dev(disk)->kobj);
+		list_del_init(&holder->list);
+		kfree(holder);
+	}
+
 	mutex_unlock(&bdev->bd_mutex);
 }
-
-/**
- * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
- *
- * @bdev:	block device to be claimed
- * @holder:	holder's signature
- * @disk:	holder's gendisk
- *
- * Call bd_claim_by_kobject() with getting @disk->slave_dir.
- */
-int bd_claim_by_disk(struct block_device *bdev, void *holder,
-			struct gendisk *disk)
-{
-	return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
-}
-EXPORT_SYMBOL_GPL(bd_claim_by_disk);
-
-/**
- * bd_release_from_disk - wrapper function for bd_release_from_kobject()
- *
- * @bdev:	block device to be claimed
- * @disk:	holder's gendisk
- *
- * Call bd_release_from_kobject() and put @disk->slave_dir.
- */
-void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
-{
-	bd_release_from_kobject(bdev, disk->slave_dir);
-	kobject_put(disk->slave_dir);
-}
-EXPORT_SYMBOL_GPL(bd_release_from_disk);
+EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
 #endif
 
-/*
- * Tries to open block device by device number.  Use it ONLY if you
- * really do not have anything better - i.e. when you are behind a
- * truly sucky interface and all you are given is a device number.  _Never_
- * to be used for internal purposes.  If you ever need it - reconsider
- * your API.
- */
-struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
-{
-	struct block_device *bdev = bdget(dev);
-	int err = -ENOMEM;
-	if (bdev)
-		err = blkdev_get(bdev, mode);
-	return err ? ERR_PTR(err) : bdev;
-}
-
-EXPORT_SYMBOL(open_by_devnum);
-
 /**
  * flush_disk - invalidates all buffer-cache entries on a disk
  *
@@ -1302,10 +1012,11 @@
 {
 	struct gendisk *disk = bdev->bd_disk;
 	const struct block_device_operations *bdops = disk->fops;
+	unsigned int events;
 
-	if (!bdops->media_changed)
-		return 0;
-	if (!bdops->media_changed(bdev->bd_disk))
+	events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
+				   DISK_EVENT_EJECT_REQUEST);
+	if (!(events & DISK_EVENT_MEDIA_CHANGE))
 		return 0;
 
 	flush_disk(bdev);
@@ -1468,17 +1179,171 @@
 	return ret;
 }
 
-int blkdev_get(struct block_device *bdev, fmode_t mode)
+/**
+ * blkdev_get - open a block device
+ * @bdev: block_device to open
+ * @mode: FMODE_* mask
+ * @holder: exclusive holder identifier
+ *
+ * Open @bdev with @mode.  If @mode includes %FMODE_EXCL, @bdev is
+ * open with exclusive access.  Specifying %FMODE_EXCL with %NULL
+ * @holder is invalid.  Exclusive opens may nest for the same @holder.
+ *
+ * On success, the reference count of @bdev is unchanged.  On failure,
+ * @bdev is put.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
 {
-	return __blkdev_get(bdev, mode, 0);
+	struct block_device *whole = NULL;
+	int res;
+
+	WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
+
+	if ((mode & FMODE_EXCL) && holder) {
+		whole = bd_start_claiming(bdev, holder);
+		if (IS_ERR(whole)) {
+			bdput(bdev);
+			return PTR_ERR(whole);
+		}
+	}
+
+	res = __blkdev_get(bdev, mode, 0);
+
+	/* __blkdev_get() may alter read only status, check it afterwards */
+	if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
+		__blkdev_put(bdev, mode, 0);
+		res = -EACCES;
+	}
+
+	if (whole) {
+		/* finish claiming */
+		mutex_lock(&bdev->bd_mutex);
+		spin_lock(&bdev_lock);
+
+		if (!res) {
+			BUG_ON(!bd_may_claim(bdev, whole, holder));
+			/*
+			 * Note that for a whole device bd_holders
+			 * will be incremented twice, and bd_holder
+			 * will be set to bd_may_claim before being
+			 * set to holder
+			 */
+			whole->bd_holders++;
+			whole->bd_holder = bd_may_claim;
+			bdev->bd_holders++;
+			bdev->bd_holder = holder;
+		}
+
+		/* tell others that we're done */
+		BUG_ON(whole->bd_claiming != holder);
+		whole->bd_claiming = NULL;
+		wake_up_bit(&whole->bd_claiming, 0);
+
+		spin_unlock(&bdev_lock);
+
+		/*
+		 * Block event polling for write claims.  Any write
+		 * holder makes the write_holder state stick until all
+		 * are released.  This is good enough and tracking
+		 * individual writeable reference is too fragile given
+		 * the way @mode is used in blkdev_get/put().
+		 */
+		if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
+			bdev->bd_write_holder = true;
+			disk_block_events(bdev->bd_disk);
+		}
+
+		mutex_unlock(&bdev->bd_mutex);
+		bdput(whole);
+	}
+
+	return res;
 }
 EXPORT_SYMBOL(blkdev_get);
 
+/**
+ * blkdev_get_by_path - open a block device by name
+ * @path: path to the block device to open
+ * @mode: FMODE_* mask
+ * @holder: exclusive holder identifier
+ *
+ * Open the blockdevice described by the device file at @path.  @mode
+ * and @holder are identical to blkdev_get().
+ *
+ * On success, the returned block_device has reference count of one.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * Pointer to block_device on success, ERR_PTR(-errno) on failure.
+ */
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+					void *holder)
+{
+	struct block_device *bdev;
+	int err;
+
+	bdev = lookup_bdev(path);
+	if (IS_ERR(bdev))
+		return bdev;
+
+	err = blkdev_get(bdev, mode, holder);
+	if (err)
+		return ERR_PTR(err);
+
+	return bdev;
+}
+EXPORT_SYMBOL(blkdev_get_by_path);
+
+/**
+ * blkdev_get_by_dev - open a block device by device number
+ * @dev: device number of block device to open
+ * @mode: FMODE_* mask
+ * @holder: exclusive holder identifier
+ *
+ * Open the blockdevice described by device number @dev.  @mode and
+ * @holder are identical to blkdev_get().
+ *
+ * Use it ONLY if you really do not have anything better - i.e. when
+ * you are behind a truly sucky interface and all you are given is a
+ * device number.  _Never_ to be used for internal purposes.  If you
+ * ever need it - reconsider your API.
+ *
+ * On success, the returned block_device has reference count of one.
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * Pointer to block_device on success, ERR_PTR(-errno) on failure.
+ */
+struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
+{
+	struct block_device *bdev;
+	int err;
+
+	bdev = bdget(dev);
+	if (!bdev)
+		return ERR_PTR(-ENOMEM);
+
+	err = blkdev_get(bdev, mode, holder);
+	if (err)
+		return ERR_PTR(err);
+
+	return bdev;
+}
+EXPORT_SYMBOL(blkdev_get_by_dev);
+
 static int blkdev_open(struct inode * inode, struct file * filp)
 {
-	struct block_device *whole = NULL;
 	struct block_device *bdev;
-	int res;
 
 	/*
 	 * Preserve backwards compatibility and allow large file access
@@ -1499,26 +1364,9 @@
 	if (bdev == NULL)
 		return -ENOMEM;
 
-	if (filp->f_mode & FMODE_EXCL) {
-		whole = bd_start_claiming(bdev, filp);
-		if (IS_ERR(whole)) {
-			bdput(bdev);
-			return PTR_ERR(whole);
-		}
-	}
-
 	filp->f_mapping = bdev->bd_inode->i_mapping;
 
-	res = blkdev_get(bdev, filp->f_mode);
-
-	if (whole) {
-		if (res == 0)
-			bd_finish_claiming(bdev, whole, filp);
-		else
-			bd_abort_claiming(whole, filp);
-	}
-
-	return res;
+	return blkdev_get(bdev, filp->f_mode, filp);
 }
 
 static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
@@ -1532,6 +1380,7 @@
 		bdev->bd_part_count--;
 
 	if (!--bdev->bd_openers) {
+		WARN_ON_ONCE(bdev->bd_holders);
 		sync_blockdev(bdev);
 		kill_bdev(bdev);
 	}
@@ -1562,6 +1411,44 @@
 
 int blkdev_put(struct block_device *bdev, fmode_t mode)
 {
+	if (mode & FMODE_EXCL) {
+		bool bdev_free;
+
+		/*
+		 * Release a claim on the device.  The holder fields
+		 * are protected with bdev_lock.  bd_mutex is to
+		 * synchronize disk_holder unlinking.
+		 */
+		mutex_lock(&bdev->bd_mutex);
+		spin_lock(&bdev_lock);
+
+		WARN_ON_ONCE(--bdev->bd_holders < 0);
+		WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
+
+		/* bd_contains might point to self, check in a separate step */
+		if ((bdev_free = !bdev->bd_holders))
+			bdev->bd_holder = NULL;
+		if (!bdev->bd_contains->bd_holders)
+			bdev->bd_contains->bd_holder = NULL;
+
+		spin_unlock(&bdev_lock);
+
+		/*
+		 * If this was the last claim, remove holder link and
+		 * unblock evpoll if it was a write holder.
+		 */
+		if (bdev_free) {
+			if (bdev->bd_write_holder) {
+				disk_unblock_events(bdev->bd_disk);
+				bdev->bd_write_holder = false;
+			} else
+				disk_check_events(bdev->bd_disk);
+		}
+
+		mutex_unlock(&bdev->bd_mutex);
+	} else
+		disk_check_events(bdev->bd_disk);
+
 	return __blkdev_put(bdev, mode, 0);
 }
 EXPORT_SYMBOL(blkdev_put);
@@ -1569,8 +1456,7 @@
 static int blkdev_close(struct inode * inode, struct file * filp)
 {
 	struct block_device *bdev = I_BDEV(filp->f_mapping->host);
-	if (bdev->bd_holder == filp)
-		bd_release(bdev);
+
 	return blkdev_put(bdev, filp->f_mode);
 }
 
@@ -1715,67 +1601,6 @@
 }
 EXPORT_SYMBOL(lookup_bdev);
 
-/**
- * open_bdev_exclusive  -  open a block device by name and set it up for use
- *
- * @path:	special file representing the block device
- * @mode:	FMODE_... combination to pass be used
- * @holder:	owner for exclusion
- *
- * Open the blockdevice described by the special file at @path, claim it
- * for the @holder.
- */
-struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
-{
-	struct block_device *bdev, *whole;
-	int error;
-
-	bdev = lookup_bdev(path);
-	if (IS_ERR(bdev))
-		return bdev;
-
-	whole = bd_start_claiming(bdev, holder);
-	if (IS_ERR(whole)) {
-		bdput(bdev);
-		return whole;
-	}
-
-	error = blkdev_get(bdev, mode);
-	if (error)
-		goto out_abort_claiming;
-
-	error = -EACCES;
-	if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
-		goto out_blkdev_put;
-
-	bd_finish_claiming(bdev, whole, holder);
-	return bdev;
-
-out_blkdev_put:
-	blkdev_put(bdev, mode);
-out_abort_claiming:
-	bd_abort_claiming(whole, holder);
-	return ERR_PTR(error);
-}
-
-EXPORT_SYMBOL(open_bdev_exclusive);
-
-/**
- * close_bdev_exclusive  -  close a blockdevice opened by open_bdev_exclusive()
- *
- * @bdev:	blockdevice to close
- * @mode:	mode, must match that used to open.
- *
- * This is the counterpart to open_bdev_exclusive().
- */
-void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
-{
-	bd_release(bdev);
-	blkdev_put(bdev, mode);
-}
-
-EXPORT_SYMBOL(close_bdev_exclusive);
-
 int __invalidate_device(struct block_device *bdev)
 {
 	struct super_block *sb = get_super(bdev);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 2222d16..6ae2c8c 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -185,18 +185,23 @@
 	return ret;
 }
 
-int btrfs_check_acl(struct inode *inode, int mask)
+int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct posix_acl *acl;
 	int error = -EAGAIN;
 
-	acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
+	if (flags & IPERM_FLAG_RCU) {
+		if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
+			error = -ECHILD;
 
-	if (IS_ERR(acl))
-		return PTR_ERR(acl);
-	if (acl) {
-		error = posix_acl_permission(inode, acl, mask);
-		posix_acl_release(acl);
+	} else {
+		struct posix_acl *acl;
+		acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
+		if (IS_ERR(acl))
+			return PTR_ERR(acl);
+		if (acl) {
+			error = posix_acl_permission(inode, acl, mask);
+			posix_acl_release(acl);
+		}
 	}
 
 	return error;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index af52f6d..a142d20 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2544,7 +2544,7 @@
 
 /* acl.c */
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-int btrfs_check_acl(struct inode *inode, int mask);
+int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags);
 #else
 #define btrfs_check_acl NULL
 #endif
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 659f532..9786963 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -65,7 +65,6 @@
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info;
 	struct btrfs_root *root;
-	struct dentry *dentry;
 	struct inode *inode;
 	struct btrfs_key key;
 	int index;
@@ -108,10 +107,7 @@
 		return ERR_PTR(-ESTALE);
 	}
 
-	dentry = d_obtain_alias(inode);
-	if (!IS_ERR(dentry))
-		dentry->d_op = &btrfs_dentry_operations;
-	return dentry;
+	return d_obtain_alias(inode);
 fail:
 	srcu_read_unlock(&fs_info->subvol_srcu, index);
 	return ERR_PTR(err);
@@ -166,7 +162,6 @@
 static struct dentry *btrfs_get_parent(struct dentry *child)
 {
 	struct inode *dir = child->d_inode;
-	struct dentry *dentry;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
@@ -223,10 +218,7 @@
 
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
-	dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
-	if (!IS_ERR(dentry))
-		dentry->d_op = &btrfs_dentry_operations;
-	return dentry;
+	return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
 fail:
 	btrfs_free_path(path);
 	return ERR_PTR(ret);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 72f31ec..a3798a3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4084,8 +4084,6 @@
 	int index;
 	int ret;
 
-	dentry->d_op = &btrfs_dentry_operations;
-
 	if (dentry->d_name.len > BTRFS_NAME_LEN)
 		return ERR_PTR(-ENAMETOOLONG);
 
@@ -4127,7 +4125,7 @@
 	return inode;
 }
 
-static int btrfs_dentry_delete(struct dentry *dentry)
+static int btrfs_dentry_delete(const struct dentry *dentry)
 {
 	struct btrfs_root *root;
 
@@ -6495,6 +6493,13 @@
 	return inode;
 }
 
+static void btrfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+}
+
 void btrfs_destroy_inode(struct inode *inode)
 {
 	struct btrfs_ordered_extent *ordered;
@@ -6564,7 +6569,7 @@
 	inode_tree_del(inode);
 	btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
 free:
-	kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+	call_rcu(&inode->i_rcu, btrfs_i_callback);
 }
 
 int btrfs_drop_inode(struct inode *inode)
@@ -7110,6 +7115,10 @@
 	alloc_start = offset & ~mask;
 	alloc_end =  (offset + len + mask) & ~mask;
 
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
+		return -EOPNOTSUPP;
+
 	/*
 	 * wait for ordered IO before we have any locks.  We'll loop again
 	 * below with the locks held.
@@ -7204,11 +7213,11 @@
 	return __set_page_dirty_nobuffers(page);
 }
 
-static int btrfs_permission(struct inode *inode, int mask)
+static int btrfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
 		return -EACCES;
-	return generic_permission(inode, mask, btrfs_check_acl);
+	return generic_permission(inode, mask, flags, btrfs_check_acl);
 }
 
 static const struct inode_operations btrfs_dir_inode_operations = {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 883c6fa..22acdaa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -460,6 +460,7 @@
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	sb->s_magic = BTRFS_SUPER_MAGIC;
 	sb->s_op = &btrfs_super_ops;
+	sb->s_d_op = &btrfs_dentry_operations;
 	sb->s_export_op = &btrfs_export_ops;
 	sb->s_xattr = btrfs_xattr_handlers;
 	sb->s_time_gran = 1;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6b98845..1718e1a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -493,7 +493,7 @@
 			continue;
 
 		if (device->bdev) {
-			close_bdev_exclusive(device->bdev, device->mode);
+			blkdev_put(device->bdev, device->mode);
 			device->bdev = NULL;
 			fs_devices->open_devices--;
 		}
@@ -527,7 +527,7 @@
 
 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
 		if (device->bdev) {
-			close_bdev_exclusive(device->bdev, device->mode);
+			blkdev_put(device->bdev, device->mode);
 			fs_devices->open_devices--;
 		}
 		if (device->writeable) {
@@ -584,13 +584,15 @@
 	int seeding = 1;
 	int ret = 0;
 
+	flags |= FMODE_EXCL;
+
 	list_for_each_entry(device, head, dev_list) {
 		if (device->bdev)
 			continue;
 		if (!device->name)
 			continue;
 
-		bdev = open_bdev_exclusive(device->name, flags, holder);
+		bdev = blkdev_get_by_path(device->name, flags, holder);
 		if (IS_ERR(bdev)) {
 			printk(KERN_INFO "open %s failed\n", device->name);
 			goto error;
@@ -642,7 +644,7 @@
 error_brelse:
 		brelse(bh);
 error_close:
-		close_bdev_exclusive(bdev, FMODE_READ);
+		blkdev_put(bdev, flags);
 error:
 		continue;
 	}
@@ -688,7 +690,8 @@
 
 	mutex_lock(&uuid_mutex);
 
-	bdev = open_bdev_exclusive(path, flags, holder);
+	flags |= FMODE_EXCL;
+	bdev = blkdev_get_by_path(path, flags, holder);
 
 	if (IS_ERR(bdev)) {
 		ret = PTR_ERR(bdev);
@@ -720,7 +723,7 @@
 
 	brelse(bh);
 error_close:
-	close_bdev_exclusive(bdev, flags);
+	blkdev_put(bdev, flags);
 error:
 	mutex_unlock(&uuid_mutex);
 	return ret;
@@ -1183,8 +1186,8 @@
 			goto out;
 		}
 	} else {
-		bdev = open_bdev_exclusive(device_path, FMODE_READ,
-				      root->fs_info->bdev_holder);
+		bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
+					  root->fs_info->bdev_holder);
 		if (IS_ERR(bdev)) {
 			ret = PTR_ERR(bdev);
 			goto out;
@@ -1251,7 +1254,7 @@
 		root->fs_info->fs_devices->latest_bdev = next_device->bdev;
 
 	if (device->bdev) {
-		close_bdev_exclusive(device->bdev, device->mode);
+		blkdev_put(device->bdev, device->mode);
 		device->bdev = NULL;
 		device->fs_devices->open_devices--;
 	}
@@ -1294,7 +1297,7 @@
 	brelse(bh);
 error_close:
 	if (bdev)
-		close_bdev_exclusive(bdev, FMODE_READ);
+		blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
 out:
 	mutex_unlock(&root->fs_info->volume_mutex);
 	mutex_unlock(&uuid_mutex);
@@ -1446,7 +1449,8 @@
 	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
 		return -EINVAL;
 
-	bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
+	bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+				  root->fs_info->bdev_holder);
 	if (IS_ERR(bdev))
 		return PTR_ERR(bdev);
 
@@ -1572,7 +1576,7 @@
 	mutex_unlock(&root->fs_info->volume_mutex);
 	return ret;
 error:
-	close_bdev_exclusive(bdev, 0);
+	blkdev_put(bdev, FMODE_EXCL);
 	if (seeding_dev) {
 		mutex_unlock(&uuid_mutex);
 		up_write(&sb->s_umount);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 2740db4..1be7810 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -50,7 +50,7 @@
 
 	struct block_device *bdev;
 
-	/* the mode sent to open_bdev_exclusive */
+	/* the mode sent to blkdev_get */
 	fmode_t mode;
 
 	char *name;
diff --git a/fs/buffer.c b/fs/buffer.c
index 5930e38..2219a76 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1270,12 +1270,10 @@
 static void bh_lru_install(struct buffer_head *bh)
 {
 	struct buffer_head *evictee = NULL;
-	struct bh_lru *lru;
 
 	check_irqs_on();
 	bh_lru_lock();
-	lru = &__get_cpu_var(bh_lrus);
-	if (lru->bhs[0] != bh) {
+	if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
 		struct buffer_head *bhs[BH_LRU_SIZE];
 		int in;
 		int out = 0;
@@ -1283,7 +1281,8 @@
 		get_bh(bh);
 		bhs[out++] = bh;
 		for (in = 0; in < BH_LRU_SIZE; in++) {
-			struct buffer_head *bh2 = lru->bhs[in];
+			struct buffer_head *bh2 =
+				__this_cpu_read(bh_lrus.bhs[in]);
 
 			if (bh2 == bh) {
 				__brelse(bh2);
@@ -1298,7 +1297,7 @@
 		}
 		while (out < BH_LRU_SIZE)
 			bhs[out++] = NULL;
-		memcpy(lru->bhs, bhs, sizeof(bhs));
+		memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
 	}
 	bh_lru_unlock();
 
@@ -1313,23 +1312,22 @@
 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
 {
 	struct buffer_head *ret = NULL;
-	struct bh_lru *lru;
 	unsigned int i;
 
 	check_irqs_on();
 	bh_lru_lock();
-	lru = &__get_cpu_var(bh_lrus);
 	for (i = 0; i < BH_LRU_SIZE; i++) {
-		struct buffer_head *bh = lru->bhs[i];
+		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
 
 		if (bh && bh->b_bdev == bdev &&
 				bh->b_blocknr == block && bh->b_size == size) {
 			if (i) {
 				while (i) {
-					lru->bhs[i] = lru->bhs[i - 1];
+					__this_cpu_write(bh_lrus.bhs[i],
+						__this_cpu_read(bh_lrus.bhs[i - 1]));
 					i--;
 				}
-				lru->bhs[0] = bh;
+				__this_cpu_write(bh_lrus.bhs[0], bh);
 			}
 			get_bh(bh);
 			ret = bh;
@@ -3203,22 +3201,23 @@
 	int i;
 	int tot = 0;
 
-	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
+	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
 		return;
-	__get_cpu_var(bh_accounting).ratelimit = 0;
+	__this_cpu_write(bh_accounting.ratelimit, 0);
 	for_each_online_cpu(i)
 		tot += per_cpu(bh_accounting, i).nr;
 	buffer_heads_over_limit = (tot > max_buffer_heads);
 }
-	
+
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
 	if (ret) {
 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
-		get_cpu_var(bh_accounting).nr++;
+		preempt_disable();
+		__this_cpu_inc(bh_accounting.nr);
 		recalc_bh_state();
-		put_cpu_var(bh_accounting);
+		preempt_enable();
 	}
 	return ret;
 }
@@ -3228,9 +3227,10 @@
 {
 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
 	kmem_cache_free(bh_cachep, bh);
-	get_cpu_var(bh_accounting).nr--;
+	preempt_disable();
+	__this_cpu_dec(bh_accounting.nr);
 	recalc_bh_state();
-	put_cpu_var(bh_accounting);
+	preempt_enable();
 }
 EXPORT_SYMBOL(free_buffer_head);
 
@@ -3243,9 +3243,8 @@
 		brelse(b->bhs[i]);
 		b->bhs[i] = NULL;
 	}
-	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
 	per_cpu(bh_accounting, cpu).nr = 0;
-	put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 9e6c4f2..bd35212 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -2,31 +2,10 @@
 # Makefile for CEPH filesystem.
 #
 
-ifneq ($(KERNELRELEASE),)
-
 obj-$(CONFIG_CEPH_FS) += ceph.o
 
-ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
+ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
 	export.o caps.o snap.o xattr.o \
 	mds_client.o mdsmap.o strings.o ceph_frag.o \
 	debugfs.o
 
-else
-#Otherwise we were called directly from the command
-# line; invoke the kernel build system.
-
-KERNELDIR ?= /lib/modules/$(shell uname -r)/build
-PWD := $(shell pwd)
-
-default: all
-
-all:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules
-
-modules_install:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install
-
-clean:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) clean
-
-endif
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 7ae1b3d..08f65fa 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -60,10 +60,13 @@
 	for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
 		req = rb_entry(rp, struct ceph_mds_request, r_node);
 
-		if (req->r_request)
-			seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds);
-		else
+		if (req->r_request && req->r_session)
+			seq_printf(s, "%lld\tmds%d\t", req->r_tid,
+				   req->r_session->s_mds);
+		else if (!req->r_request)
 			seq_printf(s, "%lld\t(no request)\t", req->r_tid);
+		else
+			seq_printf(s, "%lld\t(no session)\t", req->r_tid);
 
 		seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
 
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d902948..0bc68de 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -42,11 +42,11 @@
 
 	if (dentry->d_parent == NULL ||   /* nfs fh_to_dentry */
 	    ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
-		dentry->d_op = &ceph_dentry_ops;
+		d_set_d_op(dentry, &ceph_dentry_ops);
 	else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
-		dentry->d_op = &ceph_snapdir_dentry_ops;
+		d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
 	else
-		dentry->d_op = &ceph_snap_dentry_ops;
+		d_set_d_op(dentry, &ceph_snap_dentry_ops);
 
 	di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
 	if (!di)
@@ -112,7 +112,7 @@
 	dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
 	     last);
 
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 
 	/* start at beginning? */
 	if (filp->f_pos == 2 || last == NULL ||
@@ -136,6 +136,7 @@
 			fi->at_end = 1;
 			goto out_unlock;
 		}
+		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 		if (!d_unhashed(dentry) && dentry->d_inode &&
 		    ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
 		    ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
@@ -145,13 +146,15 @@
 		     dentry->d_name.len, dentry->d_name.name, di->offset,
 		     filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
 		     !dentry->d_inode ? " null" : "");
+		spin_unlock(&dentry->d_lock);
 		p = p->prev;
 		dentry = list_entry(p, struct dentry, d_u.d_child);
 		di = ceph_dentry(dentry);
 	}
 
-	atomic_inc(&dentry->d_count);
-	spin_unlock(&dcache_lock);
+	dget_dlock(dentry);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&parent->d_lock);
 
 	dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
 	     dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
@@ -177,19 +180,19 @@
 
 	filp->f_pos++;
 
-	/* make sure a dentry wasn't dropped while we didn't have dcache_lock */
+	/* make sure a dentry wasn't dropped while we didn't have parent lock */
 	if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
 		dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
 		err = -EAGAIN;
 		goto out;
 	}
 
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 	p = p->prev;	/* advance to next dentry */
 	goto more;
 
 out_unlock:
-	spin_unlock(&dcache_lock);
+	spin_unlock(&parent->d_lock);
 out:
 	if (last)
 		dput(last);
@@ -987,7 +990,12 @@
  */
 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct inode *dir = dentry->d_parent->d_inode;
+	struct inode *dir;
+
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	dir = dentry->d_parent->d_inode;
 
 	dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
 	     dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
@@ -1216,6 +1224,26 @@
 	}
 }
 
+/*
+ * Return name hash for a given dentry.  This is dependent on
+ * the parent directory's hash function.
+ */
+unsigned ceph_dentry_hash(struct dentry *dn)
+{
+	struct inode *dir = dn->d_parent->d_inode;
+	struct ceph_inode_info *dci = ceph_inode(dir);
+
+	switch (dci->i_dir_layout.dl_dir_hash) {
+	case 0:	/* for backward compat */
+	case CEPH_STR_HASH_LINUX:
+		return dn->d_name.hash;
+
+	default:
+		return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+				     dn->d_name.name, dn->d_name.len);
+	}
+}
+
 const struct file_operations ceph_dir_fops = {
 	.read = ceph_read_dir,
 	.readdir = ceph_readdir,
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 2297d94..e410561 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -59,7 +59,7 @@
 		dout("encode_fh %p connectable\n", dentry);
 		cfh->ino = ceph_ino(dentry->d_inode);
 		cfh->parent_ino = ceph_ino(parent->d_inode);
-		cfh->parent_name_hash = parent->d_name.hash;
+		cfh->parent_name_hash = ceph_dentry_hash(parent);
 		*max_len = connected_handle_length;
 		type = 2;
 	} else if (*max_len >= handle_length) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index bf12865..e835eff 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -297,6 +297,8 @@
 	ci->i_release_count = 0;
 	ci->i_symlink = NULL;
 
+	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
+
 	ci->i_fragtree = RB_ROOT;
 	mutex_init(&ci->i_fragtree_mutex);
 
@@ -368,6 +370,15 @@
 	return &ci->vfs_inode;
 }
 
+static void ceph_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	struct ceph_inode_info *ci = ceph_inode(inode);
+
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ceph_inode_cachep, ci);
+}
+
 void ceph_destroy_inode(struct inode *inode)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
@@ -407,7 +418,7 @@
 	if (ci->i_xattrs.prealloc_blob)
 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
 
-	kmem_cache_free(ceph_inode_cachep, ci);
+	call_rcu(&inode->i_rcu, ceph_i_callback);
 }
 
 
@@ -680,6 +691,8 @@
 		inode->i_op = &ceph_dir_iops;
 		inode->i_fop = &ceph_dir_fops;
 
+		ci->i_dir_layout = iinfo->dir_layout;
+
 		ci->i_files = le64_to_cpu(info->files);
 		ci->i_subdirs = le64_to_cpu(info->subdirs);
 		ci->i_rbytes = le64_to_cpu(info->rbytes);
@@ -841,13 +854,13 @@
 	di->offset = ceph_inode(inode)->i_max_offset++;
 	spin_unlock(&inode->i_lock);
 
-	spin_lock(&dcache_lock);
-	spin_lock(&dn->d_lock);
+	spin_lock(&dir->d_lock);
+	spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
 	list_move(&dn->d_u.d_child, &dir->d_subdirs);
 	dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
 	     dn->d_u.d_child.prev, dn->d_u.d_child.next);
 	spin_unlock(&dn->d_lock);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dir->d_lock);
 }
 
 /*
@@ -879,8 +892,8 @@
 	} else if (realdn) {
 		dout("dn %p (%d) spliced with %p (%d) "
 		     "inode %p ino %llx.%llx\n",
-		     dn, atomic_read(&dn->d_count),
-		     realdn, atomic_read(&realdn->d_count),
+		     dn, dn->d_count,
+		     realdn, realdn->d_count,
 		     realdn->d_inode, ceph_vinop(realdn->d_inode));
 		dput(dn);
 		dn = realdn;
@@ -1231,11 +1244,11 @@
 			goto retry_lookup;
 		} else {
 			/* reorder parent's d_subdirs */
-			spin_lock(&dcache_lock);
-			spin_lock(&dn->d_lock);
+			spin_lock(&parent->d_lock);
+			spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
 			list_move(&dn->d_u.d_child, &parent->d_subdirs);
 			spin_unlock(&dn->d_lock);
-			spin_unlock(&dcache_lock);
+			spin_unlock(&parent->d_lock);
 		}
 
 		di = dn->d_fsdata;
@@ -1772,12 +1785,17 @@
  * Check inode permissions.  We verify we have a valid value for
  * the AUTH cap, then call the generic handler.
  */
-int ceph_permission(struct inode *inode, int mask)
+int ceph_permission(struct inode *inode, int mask, unsigned int flags)
 {
-	int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
+	int err;
+
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
+	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
 
 	if (!err)
-		err = generic_permission(inode, mask, NULL);
+		err = generic_permission(inode, mask, flags, NULL);
 	return err;
 }
 
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 38800ea..1e30d19 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -60,7 +60,8 @@
  * parse individual inode info
  */
 static int parse_reply_info_in(void **p, void *end,
-			       struct ceph_mds_reply_info_in *info)
+			       struct ceph_mds_reply_info_in *info,
+			       int features)
 {
 	int err = -EIO;
 
@@ -74,6 +75,12 @@
 	info->symlink = *p;
 	*p += info->symlink_len;
 
+	if (features & CEPH_FEATURE_DIRLAYOUTHASH)
+		ceph_decode_copy_safe(p, end, &info->dir_layout,
+				      sizeof(info->dir_layout), bad);
+	else
+		memset(&info->dir_layout, 0, sizeof(info->dir_layout));
+
 	ceph_decode_32_safe(p, end, info->xattr_len, bad);
 	ceph_decode_need(p, end, info->xattr_len, bad);
 	info->xattr_data = *p;
@@ -88,12 +95,13 @@
  * target inode.
  */
 static int parse_reply_info_trace(void **p, void *end,
-				  struct ceph_mds_reply_info_parsed *info)
+				  struct ceph_mds_reply_info_parsed *info,
+				  int features)
 {
 	int err;
 
 	if (info->head->is_dentry) {
-		err = parse_reply_info_in(p, end, &info->diri);
+		err = parse_reply_info_in(p, end, &info->diri, features);
 		if (err < 0)
 			goto out_bad;
 
@@ -114,7 +122,7 @@
 	}
 
 	if (info->head->is_target) {
-		err = parse_reply_info_in(p, end, &info->targeti);
+		err = parse_reply_info_in(p, end, &info->targeti, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -134,7 +142,8 @@
  * parse readdir results
  */
 static int parse_reply_info_dir(void **p, void *end,
-				struct ceph_mds_reply_info_parsed *info)
+				struct ceph_mds_reply_info_parsed *info,
+				int features)
 {
 	u32 num, i = 0;
 	int err;
@@ -182,7 +191,7 @@
 		*p += sizeof(struct ceph_mds_reply_lease);
 
 		/* inode */
-		err = parse_reply_info_in(p, end, &info->dir_in[i]);
+		err = parse_reply_info_in(p, end, &info->dir_in[i], features);
 		if (err < 0)
 			goto out_bad;
 		i++;
@@ -205,7 +214,8 @@
  * parse fcntl F_GETLK results
  */
 static int parse_reply_info_filelock(void **p, void *end,
-                struct ceph_mds_reply_info_parsed *info)
+				     struct ceph_mds_reply_info_parsed *info,
+				     int features)
 {
 	if (*p + sizeof(*info->filelock_reply) > end)
 		goto bad;
@@ -225,19 +235,21 @@
  * parse extra results
  */
 static int parse_reply_info_extra(void **p, void *end,
-                struct ceph_mds_reply_info_parsed *info)
+				  struct ceph_mds_reply_info_parsed *info,
+				  int features)
 {
 	if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
-		return parse_reply_info_filelock(p, end, info);
+		return parse_reply_info_filelock(p, end, info, features);
 	else
-		return parse_reply_info_dir(p, end, info);
+		return parse_reply_info_dir(p, end, info, features);
 }
 
 /*
  * parse entire mds reply
  */
 static int parse_reply_info(struct ceph_msg *msg,
-			    struct ceph_mds_reply_info_parsed *info)
+			    struct ceph_mds_reply_info_parsed *info,
+			    int features)
 {
 	void *p, *end;
 	u32 len;
@@ -250,7 +262,7 @@
 	/* trace */
 	ceph_decode_32_safe(&p, end, len, bad);
 	if (len > 0) {
-		err = parse_reply_info_trace(&p, p+len, info);
+		err = parse_reply_info_trace(&p, p+len, info, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -258,7 +270,7 @@
 	/* extra */
 	ceph_decode_32_safe(&p, end, len, bad);
 	if (len > 0) {
-		err = parse_reply_info_extra(&p, p+len, info);
+		err = parse_reply_info_extra(&p, p+len, info, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -654,7 +666,7 @@
 		} else {
 			/* dir + name */
 			inode = dir;
-			hash = req->r_dentry->d_name.hash;
+			hash = ceph_dentry_hash(req->r_dentry);
 			is_hash = true;
 		}
 	}
@@ -1486,7 +1498,7 @@
 	*base = ceph_ino(temp->d_inode);
 	*plen = len;
 	dout("build_path on %p %d built %llx '%.*s'\n",
-	     dentry, atomic_read(&dentry->d_count), *base, len, path);
+	     dentry, dentry->d_count, *base, len, path);
 	return path;
 }
 
@@ -1693,7 +1705,6 @@
 	struct ceph_msg *msg;
 	int flags = 0;
 
-	req->r_mds = mds;
 	req->r_attempts++;
 	if (req->r_inode) {
 		struct ceph_cap *cap =
@@ -1780,6 +1791,8 @@
 		goto finish;
 	}
 
+	put_request_session(req);
+
 	mds = __choose_mds(mdsc, req);
 	if (mds < 0 ||
 	    ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
@@ -1797,6 +1810,8 @@
 			goto finish;
 		}
 	}
+	req->r_session = get_session(session);
+
 	dout("do_request mds%d session %p state %s\n", mds, session,
 	     session_state_name(session->s_state));
 	if (session->s_state != CEPH_MDS_SESSION_OPEN &&
@@ -1809,7 +1824,6 @@
 	}
 
 	/* send request */
-	req->r_session = get_session(session);
 	req->r_resend_mds = -1;   /* forget any previous mds hint */
 
 	if (req->r_request_started == 0)   /* note request start time */
@@ -1863,7 +1877,6 @@
 		if (req->r_session &&
 		    req->r_session->s_mds == mds) {
 			dout(" kicking tid %llu\n", req->r_tid);
-			put_request_session(req);
 			__do_request(mdsc, req);
 		}
 	}
@@ -2056,8 +2069,11 @@
 			goto out;
 		} else  {
 			struct ceph_inode_info *ci = ceph_inode(req->r_inode);
-			struct ceph_cap *cap =
-				ceph_get_cap_for_mds(ci, req->r_mds);;
+			struct ceph_cap *cap = NULL;
+
+			if (req->r_session)
+				cap = ceph_get_cap_for_mds(ci,
+						   req->r_session->s_mds);
 
 			dout("already using auth");
 			if ((!cap || cap != ci->i_auth_cap) ||
@@ -2101,7 +2117,7 @@
 
 	dout("handle_reply tid %lld result %d\n", tid, result);
 	rinfo = &req->r_reply_info;
-	err = parse_reply_info(msg, rinfo);
+	err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
 	mutex_unlock(&mdsc->mutex);
 
 	mutex_lock(&session->s_mutex);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index aabe563..4e3a9cc 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -35,6 +35,7 @@
  */
 struct ceph_mds_reply_info_in {
 	struct ceph_mds_reply_inode *in;
+	struct ceph_dir_layout dir_layout;
 	u32 symlink_len;
 	char *symlink;
 	u32 xattr_len;
@@ -165,7 +166,6 @@
 	struct ceph_mds_client *r_mdsc;
 
 	int r_op;                    /* mds op code */
-	int r_mds;
 
 	/* operation on what? */
 	struct inode *r_inode;              /* arg1 */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 08b460a..bf6f0f3 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -428,7 +428,8 @@
 		goto fail;
 	}
 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
-	fsc->client->supported_features |= CEPH_FEATURE_FLOCK;
+	fsc->client->supported_features |= CEPH_FEATURE_FLOCK |
+		CEPH_FEATURE_DIRLAYOUTHASH;
 	fsc->client->monc.want_mdsmap = 1;
 
 	fsc->mount_options = fsopt;
@@ -443,13 +444,17 @@
 		goto fail_client;
 
 	err = -ENOMEM;
-	fsc->wb_wq = create_workqueue("ceph-writeback");
+	/*
+	 * The number of concurrent works can be high but they don't need
+	 * to be processed in parallel, limit concurrency.
+	 */
+	fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
 	if (fsc->wb_wq == NULL)
 		goto fail_bdi;
-	fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
+	fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
 	if (fsc->pg_inv_wq == NULL)
 		goto fail_wb_wq;
-	fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc");
+	fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
 	if (fsc->trunc_wq == NULL)
 		goto fail_pg_inv_wq;
 
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 7f01728..20b907d 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -239,6 +239,7 @@
 	unsigned i_ceph_flags;
 	unsigned long i_release_count;
 
+	struct ceph_dir_layout i_dir_layout;
 	struct ceph_file_layout i_layout;
 	char *i_symlink;
 
@@ -665,7 +666,7 @@
 extern void ceph_queue_writeback(struct inode *inode);
 
 extern int ceph_do_getattr(struct inode *inode, int mask);
-extern int ceph_permission(struct inode *inode, int mask);
+extern int ceph_permission(struct inode *inode, int mask, unsigned int flags);
 extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
 extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
 			struct kstat *stat);
@@ -768,6 +769,7 @@
 extern void ceph_dentry_lru_touch(struct dentry *dn);
 extern void ceph_dentry_lru_del(struct dentry *dn);
 extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
+extern unsigned ceph_dentry_hash(struct dentry *dn);
 
 /*
  * our d_ops vary depending on whether the inode is live,
diff --git a/fs/char_dev.c b/fs/char_dev.c
index e5b9df9..dca9e5e 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -59,7 +59,7 @@
 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
 
 /* index in the above */
-static inline int major_to_index(int major)
+static inline int major_to_index(unsigned major)
 {
 	return major % CHRDEV_MAJOR_HASH_SIZE;
 }
@@ -417,18 +417,6 @@
 	return ret;
 }
 
-int cdev_index(struct inode *inode)
-{
-	int idx;
-	struct kobject *kobj;
-
-	kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
-	if (!kobj)
-		return -1;
-	kobject_put(kobj);
-	return idx;
-}
-
 void cd_forget(struct inode *inode)
 {
 	spin_lock(&cdev_lock);
@@ -582,7 +570,6 @@
 EXPORT_SYMBOL(cdev_alloc);
 EXPORT_SYMBOL(cdev_del);
 EXPORT_SYMBOL(cdev_add);
-EXPORT_SYMBOL(cdev_index);
 EXPORT_SYMBOL(__register_chrdev);
 EXPORT_SYMBOL(__unregister_chrdev);
 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 224d7bb..e654dfd 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -64,7 +64,9 @@
 				   void *buffer, uint16_t maxbuf)
 {
 	const struct TCP_Server_Info *server = cookie_netfs_data;
-	const struct sockaddr *sa = (struct sockaddr *) &server->addr.sockAddr;
+	const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
+	const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
+	const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
 	struct cifs_server_key *key = buffer;
 	uint16_t key_len = sizeof(struct cifs_server_key);
 
@@ -76,16 +78,16 @@
 	 */
 	switch (sa->sa_family) {
 	case AF_INET:
-		key->family = server->addr.sockAddr.sin_family;
-		key->port = server->addr.sockAddr.sin_port;
-		key->addr[0].ipv4_addr = server->addr.sockAddr.sin_addr;
+		key->family = sa->sa_family;
+		key->port = addr->sin_port;
+		key->addr[0].ipv4_addr = addr->sin_addr;
 		key_len += sizeof(key->addr[0].ipv4_addr);
 		break;
 
 	case AF_INET6:
-		key->family = server->addr.sockAddr6.sin6_family;
-		key->port = server->addr.sockAddr6.sin6_port;
-		key->addr[0].ipv6_addr = server->addr.sockAddr6.sin6_addr;
+		key->family = sa->sa_family;
+		key->port = addr6->sin6_port;
+		key->addr[0].ipv6_addr = addr6->sin6_addr;
 		key_len += sizeof(key->addr[0].ipv6_addr);
 		break;
 
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 103ab8b..ede9830 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -119,29 +119,27 @@
 		    "Display Internal CIFS Data Structures for Debugging\n"
 		    "---------------------------------------------------\n");
 	seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
-	seq_printf(m, "Features: ");
+	seq_printf(m, "Features:");
 #ifdef CONFIG_CIFS_DFS_UPCALL
-	seq_printf(m, "dfs");
-	seq_putc(m, ' ');
+	seq_printf(m, " dfs");
 #endif
 #ifdef CONFIG_CIFS_FSCACHE
-	seq_printf(m, "fscache");
-	seq_putc(m, ' ');
+	seq_printf(m, " fscache");
 #endif
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-	seq_printf(m, "lanman");
-	seq_putc(m, ' ');
+	seq_printf(m, " lanman");
 #endif
 #ifdef CONFIG_CIFS_POSIX
-	seq_printf(m, "posix");
-	seq_putc(m, ' ');
+	seq_printf(m, " posix");
 #endif
 #ifdef CONFIG_CIFS_UPCALL
-	seq_printf(m, "spnego");
-	seq_putc(m, ' ');
+	seq_printf(m, " spnego");
 #endif
 #ifdef CONFIG_CIFS_XATTR
-	seq_printf(m, "xattr");
+	seq_printf(m, " xattr");
+#endif
+#ifdef CONFIG_CIFS_ACL
+	seq_printf(m, " acl");
 #endif
 	seq_putc(m, '\n');
 	seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 8704490..4dfba82 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -98,6 +98,8 @@
 cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
 {
 	struct TCP_Server_Info *server = sesInfo->server;
+	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
+	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
 	char *description, *dp;
 	size_t desc_len;
 	struct key *spnego_key;
@@ -127,10 +129,10 @@
 	dp = description + strlen(description);
 
 	/* add the server address */
-	if (server->addr.sockAddr.sin_family == AF_INET)
-		sprintf(dp, "ip4=%pI4", &server->addr.sockAddr.sin_addr);
-	else if (server->addr.sockAddr.sin_family == AF_INET6)
-		sprintf(dp, "ip6=%pI6", &server->addr.sockAddr6.sin6_addr);
+	if (server->dstaddr.ss_family == AF_INET)
+		sprintf(dp, "ip4=%pI4", &sa->sin_addr);
+	else if (server->dstaddr.ss_family == AF_INET6)
+		sprintf(dp, "ip6=%pI6", &sa6->sin6_addr);
 	else
 		goto out;
 
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index f856732..66f3d50 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -72,6 +72,7 @@
 	return 0;
 }
 
+/* must be called with server->srv_mutex held */
 int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
 		  __u32 *pexpected_response_sequence_number)
 {
@@ -84,14 +85,12 @@
 	if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
 		return rc;
 
-	spin_lock(&GlobalMid_Lock);
 	cifs_pdu->Signature.Sequence.SequenceNumber =
 			cpu_to_le32(server->sequence_number);
 	cifs_pdu->Signature.Sequence.Reserved = 0;
 
 	*pexpected_response_sequence_number = server->sequence_number++;
 	server->sequence_number++;
-	spin_unlock(&GlobalMid_Lock);
 
 	rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
 	if (rc)
@@ -149,6 +148,7 @@
 	return rc;
 }
 
+/* must be called with server->srv_mutex held */
 int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
 		   __u32 *pexpected_response_sequence_number)
 {
@@ -162,14 +162,12 @@
 	if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
 		return rc;
 
-	spin_lock(&GlobalMid_Lock);
 	cifs_pdu->Signature.Sequence.SequenceNumber =
 				cpu_to_le32(server->sequence_number);
 	cifs_pdu->Signature.Sequence.Reserved = 0;
 
 	*pexpected_response_sequence_number = server->sequence_number++;
 	server->sequence_number++;
-	spin_unlock(&GlobalMid_Lock);
 
 	rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
 	if (rc)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3936aa7..d9f652a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -174,6 +174,12 @@
 		goto out_no_root;
 	}
 
+	/* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
+	if (cifs_sb_master_tcon(cifs_sb)->nocase)
+		sb->s_d_op = &cifs_ci_dentry_ops;
+	else
+		sb->s_d_op = &cifs_dentry_ops;
+
 #ifdef CONFIG_CIFS_EXPERIMENTAL
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 		cFYI(1, "export ops supported");
@@ -283,10 +289,13 @@
 	return 0;
 }
 
-static int cifs_permission(struct inode *inode, int mask)
+static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	struct cifs_sb_info *cifs_sb;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	cifs_sb = CIFS_SB(inode->i_sb);
 
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
@@ -298,7 +307,7 @@
 		on the client (above and beyond ACL on servers) for
 		servers which do not support setting and viewing mode bits,
 		so allowing client to check permissions is useful */
-		return generic_permission(inode, mask, NULL);
+		return generic_permission(inode, mask, flags, NULL);
 }
 
 static struct kmem_cache *cifs_inode_cachep;
@@ -326,6 +335,8 @@
 	cifs_inode->invalid_mapping = false;
 	cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
 	cifs_inode->server_eof = 0;
+	cifs_inode->uniqueid = 0;
+	cifs_inode->createtime = 0;
 
 	/* Can not set i_flags here - they get immediately overwritten
 	   to zero by the VFS */
@@ -334,10 +345,17 @@
 	return &cifs_inode->vfs_inode;
 }
 
+static void cifs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
+}
+
 static void
 cifs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
+	call_rcu(&inode->i_rcu, cifs_i_callback);
 }
 
 static void
@@ -351,18 +369,19 @@
 static void
 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
 {
+	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
+	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
+
 	seq_printf(s, ",addr=");
 
-	switch (server->addr.sockAddr.sin_family) {
+	switch (server->dstaddr.ss_family) {
 	case AF_INET:
-		seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr);
+		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
 		break;
 	case AF_INET6:
-		seq_printf(s, "%pI6",
-			   &server->addr.sockAddr6.sin6_addr.s6_addr);
-		if (server->addr.sockAddr6.sin6_scope_id)
-			seq_printf(s, "%%%u",
-				   server->addr.sockAddr6.sin6_scope_id);
+		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
+		if (sa6->sin6_scope_id)
+			seq_printf(s, "%%%u", sa6->sin6_scope_id);
 		break;
 	default:
 		seq_printf(s, "(unknown)");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 7136c0c..606ca8b 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -163,10 +163,7 @@
 	char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
 	char *hostname; /* hostname portion of UNC string */
 	struct socket *ssocket;
-	union {
-		struct sockaddr_in sockAddr;
-		struct sockaddr_in6 sockAddr6;
-	} addr;
+	struct sockaddr_storage dstaddr;
 	struct sockaddr_storage srcaddr; /* locally bind to this IP */
 	wait_queue_head_t response_q;
 	wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
@@ -210,7 +207,7 @@
 	char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
 	/* 16th byte of RFC1001 workstation name is always null */
 	char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
-	__u32 sequence_number; /* needed for CIFS PDU signature */
+	__u32 sequence_number; /* for signing, protected by srv_mutex */
 	struct session_key session_key;
 	unsigned long lstrp; /* when we got last response from this server */
 	u16 dialect; /* dialect index that server chose */
@@ -456,6 +453,7 @@
 	bool invalid_mapping:1;		/* pagecache is invalid */
 	u64  server_eof;		/* current file size on server */
 	u64  uniqueid;			/* server inode number */
+	u64  createtime;		/* creation time on server */
 #ifdef CONFIG_CIFS_FSCACHE
 	struct fscache_cookie *fscache;
 #endif
@@ -576,6 +574,7 @@
 	u64		cf_uniqueid;
 	u64		cf_eof;
 	u64		cf_bytes;
+	u64		cf_createtime;
 	uid_t		cf_uid;
 	gid_t		cf_gid;
 	umode_t		cf_mode;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 67acfb3..2f6795e 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -401,15 +401,12 @@
 	else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) {
 		cFYI(1, "Kerberos only mechanism, enable extended security");
 		pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
-	}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
-	else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP)
+	} else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP)
 		pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
 	else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) {
 		cFYI(1, "NTLMSSP only mechanism, enable extended security");
 		pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
 	}
-#endif
 
 	count = 0;
 	for (i = 0; i < CIFS_NUM_PROT; i++) {
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index cc1a860..a65d311 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -64,8 +64,8 @@
 	char *UNC;
 	char *UNCip;
 	char *iocharset;  /* local code page for mapping to and from Unicode */
-	char source_rfc1001_name[16]; /* netbios name of client */
-	char target_rfc1001_name[16]; /* netbios name of server for Win9x/ME */
+	char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
+	char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
 	uid_t cred_uid;
 	uid_t linux_uid;
 	gid_t linux_gid;
@@ -115,8 +115,8 @@
 #define TLINK_ERROR_EXPIRE	(1 * HZ)
 #define TLINK_IDLE_EXPIRE	(600 * HZ)
 
-static int ipv4_connect(struct TCP_Server_Info *server);
-static int ipv6_connect(struct TCP_Server_Info *server);
+static int ip_connect(struct TCP_Server_Info *server);
+static int generic_ip_connect(struct TCP_Server_Info *server);
 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
 static void cifs_prune_tlinks(struct work_struct *work);
 
@@ -200,10 +200,9 @@
 	while ((server->tcpStatus != CifsExiting) &&
 	       (server->tcpStatus != CifsGood)) {
 		try_to_freeze();
-		if (server->addr.sockAddr6.sin6_family == AF_INET6)
-			rc = ipv6_connect(server);
-		else
-			rc = ipv4_connect(server);
+
+		/* we should try only the port we connected to before */
+		rc = generic_ip_connect(server);
 		if (rc) {
 			cFYI(1, "reconnect error %d", rc);
 			msleep(3000);
@@ -477,7 +476,7 @@
 			 * initialize frame)
 			 */
 			cifs_set_port((struct sockaddr *)
-					&server->addr.sockAddr, CIFS_PORT);
+					&server->dstaddr, CIFS_PORT);
 			cifs_reconnect(server);
 			csocket = server->ssocket;
 			wake_up(&server->response_q);
@@ -817,11 +816,11 @@
 	 * informational, only used for servers that do not support
 	 * port 445 and it can be overridden at mount time
 	 */
-	memset(vol->source_rfc1001_name, 0x20, 15);
-	for (i = 0; i < strnlen(nodename, 15); i++)
+	memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN);
+	for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++)
 		vol->source_rfc1001_name[i] = toupper(nodename[i]);
 
-	vol->source_rfc1001_name[15] = 0;
+	vol->source_rfc1001_name[RFC1001_NAME_LEN] = 0;
 	/* null target name indicates to use *SMBSERVR default called name
 	   if we end up sending RFC1001 session initialize */
 	vol->target_rfc1001_name[0] = 0;
@@ -985,13 +984,11 @@
 				return 1;
 			} else if (strnicmp(value, "krb5", 4) == 0) {
 				vol->secFlg |= CIFSSEC_MAY_KRB5;
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 			} else if (strnicmp(value, "ntlmsspi", 8) == 0) {
 				vol->secFlg |= CIFSSEC_MAY_NTLMSSP |
 					CIFSSEC_MUST_SIGN;
 			} else if (strnicmp(value, "ntlmssp", 7) == 0) {
 				vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
-#endif
 			} else if (strnicmp(value, "ntlmv2i", 7) == 0) {
 				vol->secFlg |= CIFSSEC_MAY_NTLMV2 |
 					CIFSSEC_MUST_SIGN;
@@ -1168,22 +1165,22 @@
 			if (!value || !*value || (*value == ' ')) {
 				cFYI(1, "invalid (empty) netbiosname");
 			} else {
-				memset(vol->source_rfc1001_name, 0x20, 15);
-				for (i = 0; i < 15; i++) {
-				/* BB are there cases in which a comma can be
-				valid in this workstation netbios name (and need
-				special handling)? */
-
-				/* We do not uppercase netbiosname for user */
+				memset(vol->source_rfc1001_name, 0x20,
+					RFC1001_NAME_LEN);
+				/*
+				 * FIXME: are there cases in which a comma can
+				 * be valid in workstation netbios name (and
+				 * need special handling)?
+				 */
+				for (i = 0; i < RFC1001_NAME_LEN; i++) {
+					/* don't ucase netbiosname for user */
 					if (value[i] == 0)
 						break;
-					else
-						vol->source_rfc1001_name[i] =
-								value[i];
+					vol->source_rfc1001_name[i] = value[i];
 				}
 				/* The string has 16th byte zero still from
 				set at top of the function  */
-				if ((i == 15) && (value[i] != 0))
+				if (i == RFC1001_NAME_LEN && value[i] != 0)
 					printk(KERN_WARNING "CIFS: netbiosname"
 						" longer than 15 truncated.\n");
 			}
@@ -1193,7 +1190,8 @@
 				cFYI(1, "empty server netbiosname specified");
 			} else {
 				/* last byte, type, is 0x20 for servr type */
-				memset(vol->target_rfc1001_name, 0x20, 16);
+				memset(vol->target_rfc1001_name, 0x20,
+					RFC1001_NAME_LEN_WITH_NULL);
 
 				for (i = 0; i < 15; i++) {
 				/* BB are there cases in which a comma can be
@@ -1210,7 +1208,7 @@
 				}
 				/* The string has 16th byte zero still from
 				   set at top of the function  */
-				if ((i == 15) && (value[i] != 0))
+				if (i == RFC1001_NAME_LEN && value[i] != 0)
 					printk(KERN_WARNING "CIFS: server net"
 					"biosname longer than 15 truncated.\n");
 			}
@@ -1341,10 +1339,8 @@
 			vol->no_psx_acl = 0;
 		} else if (strnicmp(data, "noacl", 5) == 0) {
 			vol->no_psx_acl = 1;
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 		} else if (strnicmp(data, "locallease", 6) == 0) {
 			vol->local_lease = 1;
-#endif
 		} else if (strnicmp(data, "sign", 4) == 0) {
 			vol->secFlg |= CIFSSEC_MUST_SIGN;
 		} else if (strnicmp(data, "seal", 4) == 0) {
@@ -1454,35 +1450,71 @@
 	}
 }
 
+/*
+ * If no port is specified in addr structure, we try to match with 445 port
+ * and if it fails - with 139 ports. It should be called only if address
+ * families of server and addr are equal.
+ */
+static bool
+match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
+{
+	unsigned short int port, *sport;
+
+	switch (addr->sa_family) {
+	case AF_INET:
+		sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
+		port = ((struct sockaddr_in *) addr)->sin_port;
+		break;
+	case AF_INET6:
+		sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
+		port = ((struct sockaddr_in6 *) addr)->sin6_port;
+		break;
+	default:
+		WARN_ON(1);
+		return false;
+	}
+
+	if (!port) {
+		port = htons(CIFS_PORT);
+		if (port == *sport)
+			return true;
+
+		port = htons(RFC1001_PORT);
+	}
+
+	return port == *sport;
+}
 
 static bool
 match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
 	      struct sockaddr *srcaddr)
 {
-	struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
-	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
-
 	switch (addr->sa_family) {
-	case AF_INET:
-		if (addr4->sin_addr.s_addr !=
-		    server->addr.sockAddr.sin_addr.s_addr)
-			return false;
-		if (addr4->sin_port &&
-		    addr4->sin_port != server->addr.sockAddr.sin_port)
+	case AF_INET: {
+		struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
+		struct sockaddr_in *srv_addr4 =
+					(struct sockaddr_in *)&server->dstaddr;
+
+		if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
 			return false;
 		break;
-	case AF_INET6:
+	}
+	case AF_INET6: {
+		struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
+		struct sockaddr_in6 *srv_addr6 =
+					(struct sockaddr_in6 *)&server->dstaddr;
+
 		if (!ipv6_addr_equal(&addr6->sin6_addr,
-				     &server->addr.sockAddr6.sin6_addr))
+				     &srv_addr6->sin6_addr))
 			return false;
-		if (addr6->sin6_scope_id !=
-		    server->addr.sockAddr6.sin6_scope_id)
-			return false;
-		if (addr6->sin6_port &&
-		    addr6->sin6_port != server->addr.sockAddr6.sin6_port)
+		if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
 			return false;
 		break;
 	}
+	default:
+		WARN_ON(1);
+		return false; /* don't expect to be here */
+	}
 
 	if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr))
 		return false;
@@ -1549,6 +1581,9 @@
 				   (struct sockaddr *)&vol->srcaddr))
 			continue;
 
+		if (!match_port(server, addr))
+			continue;
+
 		if (!match_security(server, vol))
 			continue;
 
@@ -1681,14 +1716,13 @@
 		cFYI(1, "attempting ipv6 connect");
 		/* BB should we allow ipv6 on port 139? */
 		/* other OS never observed in Wild doing 139 with v6 */
-		memcpy(&tcp_ses->addr.sockAddr6, sin_server6,
-			sizeof(struct sockaddr_in6));
-		rc = ipv6_connect(tcp_ses);
-	} else {
-		memcpy(&tcp_ses->addr.sockAddr, sin_server,
-			sizeof(struct sockaddr_in));
-		rc = ipv4_connect(tcp_ses);
-	}
+		memcpy(&tcp_ses->dstaddr, sin_server6,
+		       sizeof(struct sockaddr_in6));
+	} else
+		memcpy(&tcp_ses->dstaddr, sin_server,
+		       sizeof(struct sockaddr_in));
+
+	rc = ip_connect(tcp_ses);
 	if (rc < 0) {
 		cERROR(1, "Error connecting to socket. Aborting operation");
 		goto out_err_crypto_release;
@@ -1793,6 +1827,8 @@
 {
 	int rc = -ENOMEM, xid;
 	struct cifsSesInfo *ses;
+	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
 
 	xid = GetXid();
 
@@ -1836,12 +1872,10 @@
 
 	/* new SMB session uses our server ref */
 	ses->server = server;
-	if (server->addr.sockAddr6.sin6_family == AF_INET6)
-		sprintf(ses->serverName, "%pI6",
-			&server->addr.sockAddr6.sin6_addr);
+	if (server->dstaddr.ss_family == AF_INET6)
+		sprintf(ses->serverName, "%pI6", &addr6->sin6_addr);
 	else
-		sprintf(ses->serverName, "%pI4",
-			&server->addr.sockAddr.sin_addr.s_addr);
+		sprintf(ses->serverName, "%pI4", &addr->sin_addr);
 
 	if (volume_info->username)
 		strncpy(ses->userName, volume_info->username,
@@ -2136,19 +2170,106 @@
 }
 
 static int
-ipv4_connect(struct TCP_Server_Info *server)
+ip_rfc1001_connect(struct TCP_Server_Info *server)
 {
 	int rc = 0;
-	int val;
-	bool connected = false;
-	__be16 orig_port = 0;
+	/*
+	 * some servers require RFC1001 sessinit before sending
+	 * negprot - BB check reconnection in case where second
+	 * sessinit is sent but no second negprot
+	 */
+	struct rfc1002_session_packet *ses_init_buf;
+	struct smb_hdr *smb_buf;
+	ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
+			       GFP_KERNEL);
+	if (ses_init_buf) {
+		ses_init_buf->trailer.session_req.called_len = 32;
+
+		if (server->server_RFC1001_name &&
+		    server->server_RFC1001_name[0] != 0)
+			rfc1002mangle(ses_init_buf->trailer.
+				      session_req.called_name,
+				      server->server_RFC1001_name,
+				      RFC1001_NAME_LEN_WITH_NULL);
+		else
+			rfc1002mangle(ses_init_buf->trailer.
+				      session_req.called_name,
+				      DEFAULT_CIFS_CALLED_NAME,
+				      RFC1001_NAME_LEN_WITH_NULL);
+
+		ses_init_buf->trailer.session_req.calling_len = 32;
+
+		/*
+		 * calling name ends in null (byte 16) from old smb
+		 * convention.
+		 */
+		if (server->workstation_RFC1001_name &&
+		    server->workstation_RFC1001_name[0] != 0)
+			rfc1002mangle(ses_init_buf->trailer.
+				      session_req.calling_name,
+				      server->workstation_RFC1001_name,
+				      RFC1001_NAME_LEN_WITH_NULL);
+		else
+			rfc1002mangle(ses_init_buf->trailer.
+				      session_req.calling_name,
+				      "LINUX_CIFS_CLNT",
+				      RFC1001_NAME_LEN_WITH_NULL);
+
+		ses_init_buf->trailer.session_req.scope1 = 0;
+		ses_init_buf->trailer.session_req.scope2 = 0;
+		smb_buf = (struct smb_hdr *)ses_init_buf;
+
+		/* sizeof RFC1002_SESSION_REQUEST with no scope */
+		smb_buf->smb_buf_length = 0x81000044;
+		rc = smb_send(server, smb_buf, 0x44);
+		kfree(ses_init_buf);
+		/*
+		 * RFC1001 layer in at least one server
+		 * requires very short break before negprot
+		 * presumably because not expecting negprot
+		 * to follow so fast.  This is a simple
+		 * solution that works without
+		 * complicating the code and causes no
+		 * significant slowing down on mount
+		 * for everyone else
+		 */
+		usleep_range(1000, 2000);
+	}
+	/*
+	 * else the negprot may still work without this
+	 * even though malloc failed
+	 */
+
+	return rc;
+}
+
+static int
+generic_ip_connect(struct TCP_Server_Info *server)
+{
+	int rc = 0;
+	unsigned short int sport;
+	int slen, sfamily;
 	struct socket *socket = server->ssocket;
+	struct sockaddr *saddr;
+
+	saddr = (struct sockaddr *) &server->dstaddr;
+
+	if (server->dstaddr.ss_family == AF_INET6) {
+		sport = ((struct sockaddr_in6 *) saddr)->sin6_port;
+		slen = sizeof(struct sockaddr_in6);
+		sfamily = AF_INET6;
+	} else {
+		sport = ((struct sockaddr_in *) saddr)->sin_port;
+		slen = sizeof(struct sockaddr_in);
+		sfamily = AF_INET;
+	}
 
 	if (socket == NULL) {
-		rc = sock_create_kern(PF_INET, SOCK_STREAM,
+		rc = sock_create_kern(sfamily, SOCK_STREAM,
 				      IPPROTO_TCP, &socket);
 		if (rc < 0) {
 			cERROR(1, "Error %d creating socket", rc);
+			server->ssocket = NULL;
 			return rc;
 		}
 
@@ -2156,218 +2277,19 @@
 		cFYI(1, "Socket created");
 		server->ssocket = socket;
 		socket->sk->sk_allocation = GFP_NOFS;
-		cifs_reclassify_socket4(socket);
+		if (sfamily == AF_INET6)
+			cifs_reclassify_socket6(socket);
+		else
+			cifs_reclassify_socket4(socket);
 	}
 
 	rc = bind_socket(server);
 	if (rc < 0)
 		return rc;
 
-	/* user overrode default port */
-	if (server->addr.sockAddr.sin_port) {
-		rc = socket->ops->connect(socket, (struct sockaddr *)
-					  &server->addr.sockAddr,
-					  sizeof(struct sockaddr_in), 0);
-		if (rc >= 0)
-			connected = true;
-	}
-
-	if (!connected) {
-		/* save original port so we can retry user specified port
-			later if fall back ports fail this time  */
-		orig_port = server->addr.sockAddr.sin_port;
-
-		/* do not retry on the same port we just failed on */
-		if (server->addr.sockAddr.sin_port != htons(CIFS_PORT)) {
-			server->addr.sockAddr.sin_port = htons(CIFS_PORT);
-			rc = socket->ops->connect(socket,
-						(struct sockaddr *)
-						&server->addr.sockAddr,
-						sizeof(struct sockaddr_in), 0);
-			if (rc >= 0)
-				connected = true;
-		}
-	}
-	if (!connected) {
-		server->addr.sockAddr.sin_port = htons(RFC1001_PORT);
-		rc = socket->ops->connect(socket, (struct sockaddr *)
-					      &server->addr.sockAddr,
-					      sizeof(struct sockaddr_in), 0);
-		if (rc >= 0)
-			connected = true;
-	}
-
-	/* give up here - unless we want to retry on different
-		protocol families some day */
-	if (!connected) {
-		if (orig_port)
-			server->addr.sockAddr.sin_port = orig_port;
-		cFYI(1, "Error %d connecting to server via ipv4", rc);
-		sock_release(socket);
-		server->ssocket = NULL;
-		return rc;
-	}
-
-
-	/*
-	 * Eventually check for other socket options to change from
-	 *  the default. sock_setsockopt not used because it expects
-	 *  user space buffer
-	 */
-	socket->sk->sk_rcvtimeo = 7 * HZ;
-	socket->sk->sk_sndtimeo = 5 * HZ;
-
-	/* make the bufsizes depend on wsize/rsize and max requests */
-	if (server->noautotune) {
-		if (socket->sk->sk_sndbuf < (200 * 1024))
-			socket->sk->sk_sndbuf = 200 * 1024;
-		if (socket->sk->sk_rcvbuf < (140 * 1024))
-			socket->sk->sk_rcvbuf = 140 * 1024;
-	}
-
-	if (server->tcp_nodelay) {
-		val = 1;
-		rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
-				(char *)&val, sizeof(val));
-		if (rc)
-			cFYI(1, "set TCP_NODELAY socket option error %d", rc);
-	}
-
-	 cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
-		 socket->sk->sk_sndbuf,
-		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
-
-	/* send RFC1001 sessinit */
-	if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) {
-		/* some servers require RFC1001 sessinit before sending
-		negprot - BB check reconnection in case where second
-		sessinit is sent but no second negprot */
-		struct rfc1002_session_packet *ses_init_buf;
-		struct smb_hdr *smb_buf;
-		ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
-				       GFP_KERNEL);
-		if (ses_init_buf) {
-			ses_init_buf->trailer.session_req.called_len = 32;
-			if (server->server_RFC1001_name &&
-			    server->server_RFC1001_name[0] != 0)
-				rfc1002mangle(ses_init_buf->trailer.
-						session_req.called_name,
-					      server->server_RFC1001_name,
-					      RFC1001_NAME_LEN_WITH_NULL);
-			else
-				rfc1002mangle(ses_init_buf->trailer.
-						session_req.called_name,
-					      DEFAULT_CIFS_CALLED_NAME,
-					      RFC1001_NAME_LEN_WITH_NULL);
-
-			ses_init_buf->trailer.session_req.calling_len = 32;
-
-			/* calling name ends in null (byte 16) from old smb
-			convention. */
-			if (server->workstation_RFC1001_name &&
-			    server->workstation_RFC1001_name[0] != 0)
-				rfc1002mangle(ses_init_buf->trailer.
-						session_req.calling_name,
-					      server->workstation_RFC1001_name,
-					      RFC1001_NAME_LEN_WITH_NULL);
-			else
-				rfc1002mangle(ses_init_buf->trailer.
-						session_req.calling_name,
-					      "LINUX_CIFS_CLNT",
-					      RFC1001_NAME_LEN_WITH_NULL);
-
-			ses_init_buf->trailer.session_req.scope1 = 0;
-			ses_init_buf->trailer.session_req.scope2 = 0;
-			smb_buf = (struct smb_hdr *)ses_init_buf;
-			/* sizeof RFC1002_SESSION_REQUEST with no scope */
-			smb_buf->smb_buf_length = 0x81000044;
-			rc = smb_send(server, smb_buf, 0x44);
-			kfree(ses_init_buf);
-			msleep(1); /* RFC1001 layer in at least one server
-				      requires very short break before negprot
-				      presumably because not expecting negprot
-				      to follow so fast.  This is a simple
-				      solution that works without
-				      complicating the code and causes no
-				      significant slowing down on mount
-				      for everyone else */
-		}
-		/* else the negprot may still work without this
-		even though malloc failed */
-
-	}
-
-	return rc;
-}
-
-static int
-ipv6_connect(struct TCP_Server_Info *server)
-{
-	int rc = 0;
-	int val;
-	bool connected = false;
-	__be16 orig_port = 0;
-	struct socket *socket = server->ssocket;
-
-	if (socket == NULL) {
-		rc = sock_create_kern(PF_INET6, SOCK_STREAM,
-				      IPPROTO_TCP, &socket);
-		if (rc < 0) {
-			cERROR(1, "Error %d creating ipv6 socket", rc);
-			socket = NULL;
-			return rc;
-		}
-
-		/* BB other socket options to set KEEPALIVE, NODELAY? */
-		cFYI(1, "ipv6 Socket created");
-		server->ssocket = socket;
-		socket->sk->sk_allocation = GFP_NOFS;
-		cifs_reclassify_socket6(socket);
-	}
-
-	rc = bind_socket(server);
-	if (rc < 0)
-		return rc;
-
-	/* user overrode default port */
-	if (server->addr.sockAddr6.sin6_port) {
-		rc = socket->ops->connect(socket,
-				(struct sockaddr *) &server->addr.sockAddr6,
-				sizeof(struct sockaddr_in6), 0);
-		if (rc >= 0)
-			connected = true;
-	}
-
-	if (!connected) {
-		/* save original port so we can retry user specified port
-			later if fall back ports fail this time  */
-
-		orig_port = server->addr.sockAddr6.sin6_port;
-		/* do not retry on the same port we just failed on */
-		if (server->addr.sockAddr6.sin6_port != htons(CIFS_PORT)) {
-			server->addr.sockAddr6.sin6_port = htons(CIFS_PORT);
-			rc = socket->ops->connect(socket, (struct sockaddr *)
-					&server->addr.sockAddr6,
-					sizeof(struct sockaddr_in6), 0);
-			if (rc >= 0)
-				connected = true;
-		}
-	}
-	if (!connected) {
-		server->addr.sockAddr6.sin6_port = htons(RFC1001_PORT);
-		rc = socket->ops->connect(socket, (struct sockaddr *)
-				&server->addr.sockAddr6,
-				sizeof(struct sockaddr_in6), 0);
-		if (rc >= 0)
-			connected = true;
-	}
-
-	/* give up here - unless we want to retry on different
-		protocol families some day */
-	if (!connected) {
-		if (orig_port)
-			server->addr.sockAddr6.sin6_port = orig_port;
-		cFYI(1, "Error %d connecting to server via ipv6", rc);
+	rc = socket->ops->connect(socket, saddr, slen, 0);
+	if (rc < 0) {
+		cFYI(1, "Error %d connecting to server", rc);
 		sock_release(socket);
 		server->ssocket = NULL;
 		return rc;
@@ -2381,19 +2303,61 @@
 	socket->sk->sk_rcvtimeo = 7 * HZ;
 	socket->sk->sk_sndtimeo = 5 * HZ;
 
+	/* make the bufsizes depend on wsize/rsize and max requests */
+	if (server->noautotune) {
+		if (socket->sk->sk_sndbuf < (200 * 1024))
+			socket->sk->sk_sndbuf = 200 * 1024;
+		if (socket->sk->sk_rcvbuf < (140 * 1024))
+			socket->sk->sk_rcvbuf = 140 * 1024;
+	}
+
 	if (server->tcp_nodelay) {
-		val = 1;
+		int val = 1;
 		rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
 				(char *)&val, sizeof(val));
 		if (rc)
 			cFYI(1, "set TCP_NODELAY socket option error %d", rc);
 	}
 
-	server->ssocket = socket;
+	 cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
+		 socket->sk->sk_sndbuf,
+		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
+
+	if (sport == htons(RFC1001_PORT))
+		rc = ip_rfc1001_connect(server);
 
 	return rc;
 }
 
+static int
+ip_connect(struct TCP_Server_Info *server)
+{
+	unsigned short int *sport;
+	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
+	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+
+	if (server->dstaddr.ss_family == AF_INET6)
+		sport = &addr6->sin6_port;
+	else
+		sport = &addr->sin_port;
+
+	if (*sport == 0) {
+		int rc;
+
+		/* try with 445 port at first */
+		*sport = htons(CIFS_PORT);
+
+		rc = generic_ip_connect(server);
+		if (rc >= 0)
+			return rc;
+
+		/* if it failed, try with 139 port */
+		*sport = htons(RFC1001_PORT);
+	}
+
+	return generic_ip_connect(server);
+}
+
 void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
 			  struct super_block *sb, struct smb_vol *vol_info)
 {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3840edd..1e95dd6 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -130,17 +130,6 @@
 	return full_path;
 }
 
-static void setup_cifs_dentry(struct cifsTconInfo *tcon,
-			      struct dentry *direntry,
-			      struct inode *newinode)
-{
-	if (tcon->nocase)
-		direntry->d_op = &cifs_ci_dentry_ops;
-	else
-		direntry->d_op = &cifs_dentry_ops;
-	d_instantiate(direntry, newinode);
-}
-
 /* Inode operations in similar order to how they appear in Linux file fs.h */
 
 int
@@ -293,10 +282,8 @@
 			args.uid = NO_CHANGE_64;
 			args.gid = NO_CHANGE_64;
 		}
-		CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
-					cifs_sb->local_nls,
-					cifs_sb->mnt_cifs_flags &
-						CIFS_MOUNT_MAP_SPECIAL_CHR);
+		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fileHandle,
+					current->tgid);
 	} else {
 		/* BB implement mode setting via Windows security
 		   descriptors e.g. */
@@ -329,7 +316,7 @@
 
 cifs_create_set_dentry:
 	if (rc == 0)
-		setup_cifs_dentry(tcon, direntry, newinode);
+		d_instantiate(direntry, newinode);
 	else
 		cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
 
@@ -420,10 +407,6 @@
 
 		rc = cifs_get_inode_info_unix(&newinode, full_path,
 						inode->i_sb, xid);
-		if (pTcon->nocase)
-			direntry->d_op = &cifs_ci_dentry_ops;
-		else
-			direntry->d_op = &cifs_dentry_ops;
 
 		if (rc == 0)
 			d_instantiate(direntry, newinode);
@@ -603,10 +586,6 @@
 				parent_dir_inode->i_sb, xid, NULL);
 
 	if ((rc == 0) && (newInode != NULL)) {
-		if (pTcon->nocase)
-			direntry->d_op = &cifs_ci_dentry_ops;
-		else
-			direntry->d_op = &cifs_dentry_ops;
 		d_add(direntry, newInode);
 		if (posix_open) {
 			filp = lookup_instantiate_filp(nd, direntry,
@@ -633,10 +612,6 @@
 	} else if (rc == -ENOENT) {
 		rc = 0;
 		direntry->d_time = jiffies;
-		if (pTcon->nocase)
-			direntry->d_op = &cifs_ci_dentry_ops;
-		else
-			direntry->d_op = &cifs_dentry_ops;
 		d_add(direntry, NULL);
 	/*	if it was once a directory (but how can we tell?) we could do
 		shrink_dcache_parent(direntry); */
@@ -656,22 +631,37 @@
 static int
 cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
 {
-	int isValid = 1;
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
 
 	if (direntry->d_inode) {
 		if (cifs_revalidate_dentry(direntry))
 			return 0;
-	} else {
-		cFYI(1, "neg dentry 0x%p name = %s",
-			 direntry, direntry->d_name.name);
-		if (time_after(jiffies, direntry->d_time + HZ) ||
-			!lookupCacheEnabled) {
-			d_drop(direntry);
-			isValid = 0;
-		}
+		else
+			return 1;
 	}
 
-	return isValid;
+	/*
+	 * This may be nfsd (or something), anyway, we can't see the
+	 * intent of this. So, since this can be for creation, drop it.
+	 */
+	if (!nd)
+		return 0;
+
+	/*
+	 * Drop the negative dentry, in order to make sure to use the
+	 * case sensitive name which is specified by user if this is
+	 * for creation.
+	 */
+	if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) {
+		if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+			return 0;
+	}
+
+	if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled)
+		return 0;
+
+	return 1;
 }
 
 /* static int cifs_d_delete(struct dentry *direntry)
@@ -688,9 +678,10 @@
 /* d_delete:       cifs_d_delete,      */ /* not needed except for debugging */
 };
 
-static int cifs_ci_hash(struct dentry *dentry, struct qstr *q)
+static int cifs_ci_hash(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *q)
 {
-	struct nls_table *codepage = CIFS_SB(dentry->d_inode->i_sb)->local_nls;
+	struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
 	unsigned long hash;
 	int i;
 
@@ -703,21 +694,16 @@
 	return 0;
 }
 
-static int cifs_ci_compare(struct dentry *dentry, struct qstr *a,
-			   struct qstr *b)
+static int cifs_ci_compare(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	struct nls_table *codepage = CIFS_SB(dentry->d_inode->i_sb)->local_nls;
+	struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls;
 
-	if ((a->len == b->len) &&
-	    (nls_strnicmp(codepage, a->name, b->name, a->len) == 0)) {
-		/*
-		 * To preserve case, don't let an existing negative dentry's
-		 * case take precedence.  If a is not a negative dentry, this
-		 * should have no side effects
-		 */
-		memcpy((void *)a->name, b->name, a->len);
+	if ((name->len == len) &&
+	    (nls_strnicmp(codepage, name->name, str, len) == 0))
 		return 0;
-	}
 	return 1;
 }
 
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5a28660..d843631 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -104,53 +104,6 @@
 		return FILE_OPEN;
 }
 
-static inline int cifs_open_inode_helper(struct inode *inode,
-	struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
-	char *full_path, int xid)
-{
-	struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
-	struct timespec temp;
-	int rc;
-
-	if (pCifsInode->clientCanCacheRead) {
-		/* we have the inode open somewhere else
-		   no need to discard cache data */
-		goto client_can_cache;
-	}
-
-	/* BB need same check in cifs_create too? */
-	/* if not oplocked, invalidate inode pages if mtime or file
-	   size changed */
-	temp = cifs_NTtimeToUnix(buf->LastWriteTime);
-	if (timespec_equal(&inode->i_mtime, &temp) &&
-			   (inode->i_size ==
-			    (loff_t)le64_to_cpu(buf->EndOfFile))) {
-		cFYI(1, "inode unchanged on server");
-	} else {
-		if (inode->i_mapping) {
-			/* BB no need to lock inode until after invalidate
-			since namei code should already have it locked? */
-			rc = filemap_write_and_wait(inode->i_mapping);
-			mapping_set_error(inode->i_mapping, rc);
-		}
-		cFYI(1, "invalidating remote inode since open detected it "
-			 "changed");
-		invalidate_remote_inode(inode);
-	}
-
-client_can_cache:
-	if (pTcon->unix_ext)
-		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
-					      xid);
-	else
-		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
-					 xid, NULL);
-
-	cifs_set_oplock_level(pCifsInode, oplock);
-
-	return rc;
-}
-
 int cifs_posix_open(char *full_path, struct inode **pinode,
 			struct super_block *sb, int mode, unsigned int f_flags,
 			__u32 *poplock, __u16 *pnetfid, int xid)
@@ -213,6 +166,76 @@
 	return rc;
 }
 
+static int
+cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
+	     struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
+	     __u16 *pnetfid, int xid)
+{
+	int rc;
+	int desiredAccess;
+	int disposition;
+	FILE_ALL_INFO *buf;
+
+	desiredAccess = cifs_convert_flags(f_flags);
+
+/*********************************************************************
+ *  open flag mapping table:
+ *
+ *	POSIX Flag            CIFS Disposition
+ *	----------            ----------------
+ *	O_CREAT               FILE_OPEN_IF
+ *	O_CREAT | O_EXCL      FILE_CREATE
+ *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
+ *	O_TRUNC               FILE_OVERWRITE
+ *	none of the above     FILE_OPEN
+ *
+ *	Note that there is not a direct match between disposition
+ *	FILE_SUPERSEDE (ie create whether or not file exists although
+ *	O_CREAT | O_TRUNC is similar but truncates the existing
+ *	file rather than creating a new file as FILE_SUPERSEDE does
+ *	(which uses the attributes / metadata passed in on open call)
+ *?
+ *?  O_SYNC is a reasonable match to CIFS writethrough flag
+ *?  and the read write flags match reasonably.  O_LARGEFILE
+ *?  is irrelevant because largefile support is always used
+ *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
+ *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
+ *********************************************************************/
+
+	disposition = cifs_get_disposition(f_flags);
+
+	/* BB pass O_SYNC flag through on file attributes .. BB */
+
+	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (tcon->ses->capabilities & CAP_NT_SMBS)
+		rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
+			 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
+			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
+				 & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	else
+		rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
+			desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
+			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
+				& CIFS_MOUNT_MAP_SPECIAL_CHR);
+
+	if (rc)
+		goto out;
+
+	if (tcon->unix_ext)
+		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
+					      xid);
+	else
+		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
+					 xid, pnetfid);
+
+out:
+	kfree(buf);
+	return rc;
+}
+
 struct cifsFileInfo *
 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
 		  struct tcon_link *tlink, __u32 oplock)
@@ -317,10 +340,8 @@
 	struct cifsFileInfo *pCifsFile = NULL;
 	struct cifsInodeInfo *pCifsInode;
 	char *full_path = NULL;
-	int desiredAccess;
-	int disposition;
+	bool posix_open_ok = false;
 	__u16 netfid;
-	FILE_ALL_INFO *buf = NULL;
 
 	xid = GetXid();
 
@@ -358,17 +379,7 @@
 				file->f_flags, &oplock, &netfid, xid);
 		if (rc == 0) {
 			cFYI(1, "posix open succeeded");
-
-			pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
-						      oplock);
-			if (pCifsFile == NULL) {
-				CIFSSMBClose(xid, tcon, netfid);
-				rc = -ENOMEM;
-			}
-
-			cifs_fscache_set_inode_cookie(inode, file);
-
-			goto out;
+			posix_open_ok = true;
 		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
 			if (tcon->ses->serverNOS)
 				cERROR(1, "server %s of type %s returned"
@@ -385,103 +396,39 @@
 		   or DFS errors */
 	}
 
-	desiredAccess = cifs_convert_flags(file->f_flags);
-
-/*********************************************************************
- *  open flag mapping table:
- *
- *	POSIX Flag            CIFS Disposition
- *	----------            ----------------
- *	O_CREAT               FILE_OPEN_IF
- *	O_CREAT | O_EXCL      FILE_CREATE
- *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
- *	O_TRUNC               FILE_OVERWRITE
- *	none of the above     FILE_OPEN
- *
- *	Note that there is not a direct match between disposition
- *	FILE_SUPERSEDE (ie create whether or not file exists although
- *	O_CREAT | O_TRUNC is similar but truncates the existing
- *	file rather than creating a new file as FILE_SUPERSEDE does
- *	(which uses the attributes / metadata passed in on open call)
- *?
- *?  O_SYNC is a reasonable match to CIFS writethrough flag
- *?  and the read write flags match reasonably.  O_LARGEFILE
- *?  is irrelevant because largefile support is always used
- *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
- *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
- *********************************************************************/
-
-	disposition = cifs_get_disposition(file->f_flags);
-
-	/* BB pass O_SYNC flag through on file attributes .. BB */
-
-	/* Also refresh inode by passing in file_info buf returned by SMBOpen
-	   and calling get_inode_info with returned buf (at least helps
-	   non-Unix server case) */
-
-	/* BB we can not do this if this is the second open of a file
-	   and the first handle has writebehind data, we might be
-	   able to simply do a filemap_fdatawrite/filemap_fdatawait first */
-	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
-	if (!buf) {
-		rc = -ENOMEM;
-		goto out;
+	if (!posix_open_ok) {
+		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
+				  file->f_flags, &oplock, &netfid, xid);
+		if (rc)
+			goto out;
 	}
 
-	if (tcon->ses->capabilities & CAP_NT_SMBS)
-		rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
-			 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
-			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
-				 & CIFS_MOUNT_MAP_SPECIAL_CHR);
-	else
-		rc = -EIO; /* no NT SMB support fall into legacy open below */
-
-	if (rc == -EIO) {
-		/* Old server, try legacy style OpenX */
-		rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
-			desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
-			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
-				& CIFS_MOUNT_MAP_SPECIAL_CHR);
-	}
-	if (rc) {
-		cFYI(1, "cifs_open returned 0x%x", rc);
-		goto out;
-	}
-
-	rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
-	if (rc != 0)
-		goto out;
-
 	pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
 	if (pCifsFile == NULL) {
+		CIFSSMBClose(xid, tcon, netfid);
 		rc = -ENOMEM;
 		goto out;
 	}
 
 	cifs_fscache_set_inode_cookie(inode, file);
 
-	if (oplock & CIFS_CREATE_ACTION) {
+	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
 		/* time to set mode which we can not set earlier due to
 		   problems creating new read-only files */
-		if (tcon->unix_ext) {
-			struct cifs_unix_set_info_args args = {
-				.mode	= inode->i_mode,
-				.uid	= NO_CHANGE_64,
-				.gid	= NO_CHANGE_64,
-				.ctime	= NO_CHANGE_64,
-				.atime	= NO_CHANGE_64,
-				.mtime	= NO_CHANGE_64,
-				.device	= 0,
-			};
-			CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
-					       cifs_sb->local_nls,
-					       cifs_sb->mnt_cifs_flags &
-						CIFS_MOUNT_MAP_SPECIAL_CHR);
-		}
+		struct cifs_unix_set_info_args args = {
+			.mode	= inode->i_mode,
+			.uid	= NO_CHANGE_64,
+			.gid	= NO_CHANGE_64,
+			.ctime	= NO_CHANGE_64,
+			.atime	= NO_CHANGE_64,
+			.mtime	= NO_CHANGE_64,
+			.device	= 0,
+		};
+		CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
+					pCifsFile->pid);
 	}
 
 out:
-	kfree(buf);
 	kfree(full_path);
 	FreeXid(xid);
 	cifs_put_tlink(tlink);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 589f3e3..b06b606 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -518,6 +518,7 @@
 
 	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
 	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+	fattr->cf_createtime = le64_to_cpu(info->CreationTime);
 
 	if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
 		fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
@@ -779,6 +780,10 @@
 	if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
 		return 0;
 
+	/* use createtime like an i_generation field */
+	if (CIFS_I(inode)->createtime != fattr->cf_createtime)
+		return 0;
+
 	/* don't match inode of different type */
 	if ((inode->i_mode & S_IFMT) != (fattr->cf_mode & S_IFMT))
 		return 0;
@@ -796,6 +801,7 @@
 	struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
 
 	CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
+	CIFS_I(inode)->createtime = fattr->cf_createtime;
 	return 0;
 }
 
@@ -809,14 +815,14 @@
 {
 	struct dentry *dentry;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&inode->i_lock);
 	list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
 		if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
-			spin_unlock(&dcache_lock);
+			spin_unlock(&inode->i_lock);
 			return true;
 		}
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 	return false;
 }
 
@@ -1318,10 +1324,6 @@
 /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
 	to set uid/gid */
 			inc_nlink(inode);
-			if (pTcon->nocase)
-				direntry->d_op = &cifs_ci_dentry_ops;
-			else
-				direntry->d_op = &cifs_dentry_ops;
 
 			cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
 			cifs_fill_uniqueid(inode->i_sb, &fattr);
@@ -1362,10 +1364,6 @@
 			rc = cifs_get_inode_info(&newinode, full_path, NULL,
 						 inode->i_sb, xid, NULL);
 
-		if (pTcon->nocase)
-			direntry->d_op = &cifs_ci_dentry_ops;
-		else
-			direntry->d_op = &cifs_dentry_ops;
 		d_instantiate(direntry, newinode);
 		 /* setting nlink not necessary except in cases where we
 		  * failed to get it from the server or was set bogus */
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 85cdbf8..306769d 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -524,10 +524,6 @@
 			cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d",
 			      rc);
 		} else {
-			if (pTcon->nocase)
-				direntry->d_op = &cifs_ci_dentry_ops;
-			else
-				direntry->d_op = &cifs_dentry_ops;
 			d_instantiate(direntry, newinode);
 		}
 	}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index a73eb9f..7f25cc3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -79,7 +79,7 @@
 	cFYI(1, "For %s", name->name);
 
 	if (parent->d_op && parent->d_op->d_hash)
-		parent->d_op->d_hash(parent, name);
+		parent->d_op->d_hash(parent, parent->d_inode, name);
 	else
 		name->hash = full_name_hash(name->name, name->len);
 
@@ -102,11 +102,6 @@
 		return NULL;
 	}
 
-	if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase)
-		dentry->d_op = &cifs_ci_dentry_ops;
-	else
-		dentry->d_op = &cifs_dentry_ops;
-
 	alias = d_materialise_unique(dentry, inode);
 	if (alias != NULL) {
 		dput(dentry);
@@ -160,6 +155,7 @@
 	fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
 	fattr->cf_eof = le64_to_cpu(info->EndOfFile);
 	fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
+	fattr->cf_createtime = le64_to_cpu(info->CreationTime);
 	fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
 	fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
 	fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7b01d3f..eb74648 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -420,7 +420,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_CIFS_EXPERIMENTAL
 /* BB Move to ntlmssp.c eventually */
 
 /* We do not malloc the blob, it is passed in pbuffer, because
@@ -431,13 +430,14 @@
 	NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
 	__u32 flags;
 
+	memset(pbuffer, 0, sizeof(NEGOTIATE_MESSAGE));
 	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
 	sec_blob->MessageType = NtLmNegotiate;
 
 	/* BB is NTLMV2 session security format easier to use here? */
 	flags = NTLMSSP_NEGOTIATE_56 |	NTLMSSP_REQUEST_TARGET |
 		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-		NTLMSSP_NEGOTIATE_NTLM;
+		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
 	if (ses->server->secMode &
 			(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
 		flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -446,7 +446,7 @@
 				NTLMSSP_NEGOTIATE_EXTENDED_SEC;
 	}
 
-	sec_blob->NegotiateFlags |= cpu_to_le32(flags);
+	sec_blob->NegotiateFlags = cpu_to_le32(flags);
 
 	sec_blob->WorkstationName.BufferOffset = 0;
 	sec_blob->WorkstationName.Length = 0;
@@ -477,7 +477,7 @@
 	flags = NTLMSSP_NEGOTIATE_56 |
 		NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
 		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-		NTLMSSP_NEGOTIATE_NTLM;
+		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
 	if (ses->server->secMode &
 	   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 		flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -485,7 +485,7 @@
 		flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
 
 	tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
-	sec_blob->NegotiateFlags |= cpu_to_le32(flags);
+	sec_blob->NegotiateFlags = cpu_to_le32(flags);
 
 	sec_blob->LmChallengeResponse.BufferOffset =
 				cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE));
@@ -544,8 +544,9 @@
 	sec_blob->WorkstationName.MaximumLength = 0;
 	tmp += 2;
 
-	if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
-			!calc_seckey(ses)) {
+	if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
+		(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
+			&& !calc_seckey(ses)) {
 		memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
 		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
 		sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
@@ -563,17 +564,6 @@
 	return rc;
 }
 
-
-static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
-				 struct cifsSesInfo *ses)
-{
-	build_ntlmssp_negotiate_blob(&pSMB->req.SecurityBlob[0], ses);
-	pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE));
-
-	return;
-}
-#endif
-
 int
 CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
 	       const struct nls_table *nls_cp)
@@ -814,71 +804,70 @@
 		rc = -ENOSYS;
 		goto ssetup_exit;
 #endif /* CONFIG_CIFS_UPCALL */
-	} else {
-#ifdef CONFIG_CIFS_EXPERIMENTAL
-		if (type == RawNTLMSSP) {
-			if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
-				cERROR(1, "NTLMSSP requires Unicode support");
-				rc = -ENOSYS;
-				goto ssetup_exit;
-			}
-
-			cFYI(1, "ntlmssp session setup phase %d", phase);
-			pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
-			capabilities |= CAP_EXTENDED_SECURITY;
-			pSMB->req.Capabilities |= cpu_to_le32(capabilities);
-			if (phase == NtLmNegotiate) {
-				setup_ntlmssp_neg_req(pSMB, ses);
-				iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
-				iov[1].iov_base = &pSMB->req.SecurityBlob[0];
-			} else if (phase == NtLmAuthenticate) {
-				/* 5 is an empirical value, large enought to
-				 * hold authenticate message, max 10 of
-				 * av paris, doamin,user,workstation mames,
-				 * flags etc..
-				 */
-				ntlmsspblob = kmalloc(
-					5*sizeof(struct _AUTHENTICATE_MESSAGE),
-					GFP_KERNEL);
-				if (!ntlmsspblob) {
-					cERROR(1, "Can't allocate NTLMSSP");
-					rc = -ENOMEM;
-					goto ssetup_exit;
-				}
-
-				rc = build_ntlmssp_auth_blob(ntlmsspblob,
-							&blob_len, ses, nls_cp);
-				if (rc)
-					goto ssetup_exit;
-				iov[1].iov_len = blob_len;
-				iov[1].iov_base = ntlmsspblob;
-				pSMB->req.SecurityBlobLength =
-					cpu_to_le16(blob_len);
-				/* Make sure that we tell the server that we
-				   are using the uid that it just gave us back
-				   on the response (challenge) */
-				smb_buf->Uid = ses->Suid;
-			} else {
-				cERROR(1, "invalid phase %d", phase);
-				rc = -ENOSYS;
-				goto ssetup_exit;
-			}
-			/* unicode strings must be word aligned */
-			if ((iov[0].iov_len + iov[1].iov_len) % 2) {
-				*bcc_ptr = 0;
-				bcc_ptr++;
-			}
-			unicode_oslm_strings(&bcc_ptr, nls_cp);
-		} else {
-			cERROR(1, "secType %d not supported!", type);
+	} else if (type == RawNTLMSSP) {
+		if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
+			cERROR(1, "NTLMSSP requires Unicode support");
 			rc = -ENOSYS;
 			goto ssetup_exit;
 		}
-#else
+
+		cFYI(1, "ntlmssp session setup phase %d", phase);
+		pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+		capabilities |= CAP_EXTENDED_SECURITY;
+		pSMB->req.Capabilities |= cpu_to_le32(capabilities);
+		switch(phase) {
+		case NtLmNegotiate:
+			build_ntlmssp_negotiate_blob(
+				pSMB->req.SecurityBlob, ses);
+			iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
+			iov[1].iov_base = pSMB->req.SecurityBlob;
+			pSMB->req.SecurityBlobLength =
+				cpu_to_le16(sizeof(NEGOTIATE_MESSAGE));
+			break;
+		case NtLmAuthenticate:
+			/*
+			 * 5 is an empirical value, large enough to hold
+			 * authenticate message plus max 10 of av paris,
+			 * domain, user, workstation names, flags, etc.
+			 */
+			ntlmsspblob = kzalloc(
+				5*sizeof(struct _AUTHENTICATE_MESSAGE),
+				GFP_KERNEL);
+			if (!ntlmsspblob) {
+				cERROR(1, "Can't allocate NTLMSSP blob");
+				rc = -ENOMEM;
+				goto ssetup_exit;
+			}
+
+			rc = build_ntlmssp_auth_blob(ntlmsspblob,
+						&blob_len, ses, nls_cp);
+			if (rc)
+				goto ssetup_exit;
+			iov[1].iov_len = blob_len;
+			iov[1].iov_base = ntlmsspblob;
+			pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
+			/*
+			 * Make sure that we tell the server that we are using
+			 * the uid that it just gave us back on the response
+			 * (challenge)
+			 */
+			smb_buf->Uid = ses->Suid;
+			break;
+		default:
+			cERROR(1, "invalid phase %d", phase);
+			rc = -ENOSYS;
+			goto ssetup_exit;
+		}
+		/* unicode strings must be word aligned */
+		if ((iov[0].iov_len + iov[1].iov_len) % 2) {
+			*bcc_ptr = 0;
+			bcc_ptr++;
+		}
+		unicode_oslm_strings(&bcc_ptr, nls_cp);
+	} else {
 		cERROR(1, "secType %d not supported!", type);
 		rc = -ENOSYS;
 		goto ssetup_exit;
-#endif
 	}
 
 	iov[2].iov_base = str_area;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index e0588cd..59ca81b 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -119,7 +119,7 @@
 	if (ssocket == NULL)
 		return -ENOTSOCK; /* BB eventually add reconnect code here */
 
-	smb_msg.msg_name = (struct sockaddr *) &server->addr.sockAddr;
+	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
 	smb_msg.msg_namelen = sizeof(struct sockaddr);
 	smb_msg.msg_control = NULL;
 	smb_msg.msg_controllen = 0;
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 9060f08..6901578 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -20,10 +20,9 @@
 #include <linux/spinlock.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 static atomic_t permission_epoch = ATOMIC_INIT(0);
 
@@ -93,7 +92,7 @@
 	struct list_head *child;
 	struct dentry *de;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 	list_for_each(child, &parent->d_subdirs)
 	{
 		de = list_entry(child, struct dentry, d_u.d_child);
@@ -102,7 +101,7 @@
 			continue;
 		coda_flag_inode(de->d_inode, flag);
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&parent->d_lock);
 	return; 
 }
 
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index 6022405..6475877 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -7,9 +7,8 @@
 #include <linux/time.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
+#include "coda_linux.h"
 
 static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2)
 {
diff --git a/include/linux/coda_cache.h b/fs/coda/coda_cache.h
similarity index 100%
rename from include/linux/coda_cache.h
rename to fs/coda/coda_cache.h
diff --git a/include/linux/coda_fs_i.h b/fs/coda/coda_fs_i.h
similarity index 100%
rename from include/linux/coda_fs_i.h
rename to fs/coda/coda_fs_i.h
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index bf4a3fd..2bdbcc1 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -17,9 +17,8 @@
 #include <linux/string.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
+#include "coda_linux.h"
 
 /* initialize the debugging variables */
 int coda_fake_statfs;
diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h
new file mode 100644
index 0000000..9b0c532
--- /dev/null
+++ b/fs/coda/coda_linux.h
@@ -0,0 +1,101 @@
+/* 
+ * Coda File System, Linux Kernel module
+ * 
+ * Original version, adapted from cfs_mach.c, (C) Carnegie Mellon University
+ * Linux modifications (C) 1996, Peter J. Braam
+ * Rewritten for Linux 2.1 (C) 1997 Carnegie Mellon University
+ *
+ * Carnegie Mellon University encourages users of this software to
+ * contribute improvements to the Coda project.
+ */
+
+#ifndef _LINUX_CODA_FS
+#define _LINUX_CODA_FS
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/wait.h>		
+#include <linux/types.h>
+#include <linux/fs.h>
+#include "coda_fs_i.h"
+
+/* operations */
+extern const struct inode_operations coda_dir_inode_operations;
+extern const struct inode_operations coda_file_inode_operations;
+extern const struct inode_operations coda_ioctl_inode_operations;
+
+extern const struct dentry_operations coda_dentry_operations;
+
+extern const struct address_space_operations coda_file_aops;
+extern const struct address_space_operations coda_symlink_aops;
+
+extern const struct file_operations coda_dir_operations;
+extern const struct file_operations coda_file_operations;
+extern const struct file_operations coda_ioctl_operations;
+
+/* operations shared over more than one file */
+int coda_open(struct inode *i, struct file *f);
+int coda_release(struct inode *i, struct file *f);
+int coda_permission(struct inode *inode, int mask, unsigned int flags);
+int coda_revalidate_inode(struct dentry *);
+int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+int coda_setattr(struct dentry *, struct iattr *);
+
+/* this file:  heloers */
+char *coda_f2s(struct CodaFid *f);
+int coda_isroot(struct inode *i);
+int coda_iscontrol(const char *name, size_t length);
+
+void coda_vattr_to_iattr(struct inode *, struct coda_vattr *);
+void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *);
+unsigned short coda_flags_to_cflags(unsigned short);
+
+/* sysctl.h */
+void coda_sysctl_init(void);
+void coda_sysctl_clean(void);
+
+#define CODA_ALLOC(ptr, cast, size) do { \
+    if (size < PAGE_SIZE) \
+        ptr = kmalloc((unsigned long) size, GFP_KERNEL); \
+    else \
+        ptr = (cast)vmalloc((unsigned long) size); \
+    if (!ptr) \
+        printk("kernel malloc returns 0 at %s:%d\n", __FILE__, __LINE__); \
+    else memset( ptr, 0, size ); \
+} while (0)
+
+
+#define CODA_FREE(ptr,size) \
+    do { if (size < PAGE_SIZE) kfree((ptr)); else vfree((ptr)); } while (0)
+
+/* inode to cnode access functions */
+
+static inline struct coda_inode_info *ITOC(struct inode *inode)
+{
+	return list_entry(inode, struct coda_inode_info, vfs_inode);
+}
+
+static __inline__ struct CodaFid *coda_i2f(struct inode *inode)
+{
+	return &(ITOC(inode)->c_fid);
+}
+
+static __inline__ char *coda_i2s(struct inode *inode)
+{
+	return coda_f2s(&(ITOC(inode)->c_fid));
+}
+
+/* this will not zap the inode away */
+static __inline__ void coda_flag_inode(struct inode *inode, int flag)
+{
+	struct coda_inode_info *cii = ITOC(inode);
+
+	spin_lock(&cii->c_lock);
+	cii->c_flags |= flag;
+	spin_unlock(&cii->c_lock);
+}		
+
+#endif
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 5d8b355..2b8dae4 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -18,14 +18,14 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/spinlock.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
@@ -47,7 +47,7 @@
 
 /* dentry ops */
 static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd);
-static int coda_dentry_delete(struct dentry *);
+static int coda_dentry_delete(const struct dentry *);
 
 /* support routines */
 static int coda_venus_readdir(struct file *coda_file, void *buf,
@@ -60,7 +60,7 @@
 }
 #define CODA_EIO_ERROR ((void *) (coda_return_EIO))
 
-static const struct dentry_operations coda_dentry_operations =
+const struct dentry_operations coda_dentry_operations =
 {
 	.d_revalidate	= coda_dentry_revalidate,
 	.d_delete	= coda_dentry_delete,
@@ -125,8 +125,6 @@
 		return ERR_PTR(error);
 
 exit:
-	entry->d_op = &coda_dentry_operations;
-
 	if (inode && (type & CODA_NOCACHE))
 		coda_flag_inode(inode, C_VATTR | C_PURGE);
 
@@ -134,10 +132,13 @@
 }
 
 
-int coda_permission(struct inode *inode, int mask)
+int coda_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	int error;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
  
 	if (!mask)
@@ -541,9 +542,13 @@
 /* called when a cache lookup succeeds */
 static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
 {
-	struct inode *inode = de->d_inode;
+	struct inode *inode;
 	struct coda_inode_info *cii;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = de->d_inode;
 	if (!inode || coda_isroot(inode))
 		goto out;
 	if (is_bad_inode(inode))
@@ -559,7 +564,7 @@
 	if (cii->c_flags & C_FLUSH) 
 		coda_flag_inode_children(inode, C_FLUSH);
 
-	if (atomic_read(&de->d_count) > 1)
+	if (de->d_count > 1)
 		/* pretend it's valid, but don't change the flags */
 		goto out;
 
@@ -577,7 +582,7 @@
  * This is the callback from dput() when d_count is going to 0.
  * We use this to unhash dentries with bad inodes.
  */
-static int coda_dentry_delete(struct dentry * dentry)
+static int coda_dentry_delete(const struct dentry * dentry)
 {
 	int flags;
 
diff --git a/fs/coda/file.c b/fs/coda/file.c
index c8b50ba..0433057 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -21,10 +21,9 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
 #include "coda_int.h"
 
 static ssize_t
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 5ea57c8..871b277 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -28,10 +28,9 @@
 #include <linux/vmalloc.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
@@ -45,7 +44,7 @@
 static struct inode *coda_alloc_inode(struct super_block *sb)
 {
 	struct coda_inode_info *ei;
-	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
+	ei = kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -56,9 +55,16 @@
 	return &ei->vfs_inode;
 }
 
+static void coda_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(coda_inode_cachep, ITOC(inode));
+}
+
 static void coda_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(coda_inode_cachep, ITOC(inode));
+	call_rcu(&inode->i_rcu, coda_i_callback);
 }
 
 static void init_once(void *foo)
@@ -186,6 +192,7 @@
 	sb->s_blocksize_bits = 12;
 	sb->s_magic = CODA_SUPER_MAGIC;
 	sb->s_op = &coda_super_operations;
+	sb->s_d_op = &coda_dentry_operations;
 	sb->s_bdi = &vc->bdi;
 
 	/* get root fid from Venus: this needs the root inode */
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 2fd89b5..6cbb3af 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -19,12 +19,12 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
+
 /* pioctl ops */
-static int coda_ioctl_permission(struct inode *inode, int mask);
+static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags);
 static long coda_pioctl(struct file *filp, unsigned int cmd,
 			unsigned long user_data);
 
@@ -41,8 +41,10 @@
 };
 
 /* the coda pioctl inode ops */
-static int coda_ioctl_permission(struct inode *inode, int mask)
+static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
 {
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
 	return (mask & MAY_EXEC) ? -EACCES : 0;
 }
 
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 62647a8..8f616e0 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -43,10 +43,10 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
+
 #include "coda_int.h"
 
 /* statistics */
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index af78f00..ab94ef6 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -16,9 +16,9 @@
 #include <linux/pagemap.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
+
+#include "coda_linux.h"
 
 static int coda_symlink_filler(struct file *file, struct page *page)
 {
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index c3563ca..9727e0c 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -33,10 +33,9 @@
 #include <linux/vfs.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 175736c..61abb63 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -836,6 +836,7 @@
 COMPATIBLE_IOCTL(TCSETSF)
 COMPATIBLE_IOCTL(TIOCLINUX)
 COMPATIBLE_IOCTL(TIOCSBRK)
+COMPATIBLE_IOCTL(TIOCGDEV)
 COMPATIBLE_IOCTL(TIOCCBRK)
 COMPATIBLE_IOCTL(TIOCGSID)
 COMPATIBLE_IOCTL(TIOCGICOUNT)
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index da6061a..82bda8f 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -90,6 +90,7 @@
 extern const struct file_operations bin_fops;
 extern const struct inode_operations configfs_dir_inode_operations;
 extern const struct inode_operations configfs_symlink_inode_operations;
+extern const struct dentry_operations configfs_dentry_ops;
 
 extern int configfs_symlink(struct inode *dir, struct dentry *dentry,
 			    const char *symname);
@@ -120,7 +121,7 @@
 {
 	struct config_item * item = NULL;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&dentry->d_lock);
 	if (!d_unhashed(dentry)) {
 		struct configfs_dirent * sd = dentry->d_fsdata;
 		if (sd->s_type & CONFIGFS_ITEM_LINK) {
@@ -129,7 +130,7 @@
 		} else
 			item = config_item_get(sd->s_element);
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
 
 	return item;
 }
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 0b502f8..90ff3cb 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -67,12 +67,12 @@
  * We _must_ delete our dentries on last dput, as the chain-to-parent
  * behavior is required to clear the parents of default_groups.
  */
-static int configfs_d_delete(struct dentry *dentry)
+static int configfs_d_delete(const struct dentry *dentry)
 {
 	return 1;
 }
 
-static const struct dentry_operations configfs_dentry_ops = {
+const struct dentry_operations configfs_dentry_ops = {
 	.d_iput		= configfs_d_iput,
 	/* simple_delete_dentry() isn't exported */
 	.d_delete	= configfs_d_delete,
@@ -232,10 +232,8 @@
 
 	sd->s_mode = mode;
 	sd->s_dentry = dentry;
-	if (dentry) {
+	if (dentry)
 		dentry->d_fsdata = configfs_get(sd);
-		dentry->d_op = &configfs_dentry_ops;
-	}
 
 	return 0;
 }
@@ -278,7 +276,6 @@
 		error = configfs_create(d, mode, init_dir);
 		if (!error) {
 			inc_nlink(p->d_inode);
-			(d)->d_op = &configfs_dentry_ops;
 		} else {
 			struct configfs_dirent *sd = d->d_fsdata;
 			if (sd) {
@@ -371,9 +368,7 @@
 				   CONFIGFS_ITEM_LINK);
 	if (!err) {
 		err = configfs_create(dentry, mode, init_symlink);
-		if (!err)
-			dentry->d_op = &configfs_dentry_ops;
-		else {
+		if (err) {
 			struct configfs_dirent *sd = dentry->d_fsdata;
 			if (sd) {
 				spin_lock(&configfs_dirent_lock);
@@ -399,8 +394,7 @@
 	if (d->d_inode)
 		simple_rmdir(parent->d_inode,d);
 
-	pr_debug(" o %s removing done (%d)\n",d->d_name.name,
-		 atomic_read(&d->d_count));
+	pr_debug(" o %s removing done (%d)\n",d->d_name.name, d->d_count);
 
 	dput(parent);
 }
@@ -448,7 +442,6 @@
 		return error;
 	}
 
-	dentry->d_op = &configfs_dentry_ops;
 	d_rehash(dentry);
 
 	return 0;
@@ -493,7 +486,10 @@
 		 * If it doesn't exist and it isn't a NOT_PINNED item,
 		 * it must be negative.
 		 */
-		return simple_lookup(dir, dentry, nd);
+		if (dentry->d_name.len > NAME_MAX)
+			return ERR_PTR(-ENAMETOOLONG);
+		d_add(dentry, NULL);
+		return NULL;
 	}
 
 out:
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 253476d..c83f476 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -250,18 +250,14 @@
 	struct dentry * dentry = sd->s_dentry;
 
 	if (dentry) {
-		spin_lock(&dcache_lock);
 		spin_lock(&dentry->d_lock);
 		if (!(d_unhashed(dentry) && dentry->d_inode)) {
-			dget_locked(dentry);
+			dget_dlock(dentry);
 			__d_drop(dentry);
 			spin_unlock(&dentry->d_lock);
-			spin_unlock(&dcache_lock);
 			simple_unlink(parent->d_inode, dentry);
-		} else {
+		} else
 			spin_unlock(&dentry->d_lock);
-			spin_unlock(&dcache_lock);
-		}
 	}
 }
 
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 7d3607f..ecc6217 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -101,6 +101,7 @@
 	configfs_root_group.cg_item.ci_dentry = root;
 	root->d_fsdata = &configfs_root;
 	sb->s_root = root;
+	sb->s_d_op = &configfs_dentry_ops; /* the rest get that */
 	return 0;
 }
 
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 32fd5fe..e141939 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -34,57 +34,81 @@
 static DEFINE_MUTEX(read_mutex);
 
 
-/* These two macros may change in future, to provide better st_ino
-   semantics. */
-#define CRAMINO(x)	(((x)->offset && (x)->size)?(x)->offset<<2:1)
+/* These macros may change in future, to provide better st_ino semantics. */
 #define OFFSET(x)	((x)->i_ino)
 
-static void setup_inode(struct inode *inode, struct cramfs_inode * cramfs_inode)
+static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
 {
+	if (!cino->offset)
+		return offset + 1;
+	if (!cino->size)
+		return offset + 1;
+
+	/*
+	 * The file mode test fixes buggy mkcramfs implementations where
+	 * cramfs_inode->offset is set to a non zero value for entries
+	 * which did not contain data, like devices node and fifos.
+	 */
+	switch (cino->mode & S_IFMT) {
+	case S_IFREG:
+	case S_IFDIR:
+	case S_IFLNK:
+		return cino->offset << 2;
+	default:
+		break;
+	}
+	return offset + 1;
+}
+
+static struct inode *get_cramfs_inode(struct super_block *sb,
+	struct cramfs_inode *cramfs_inode, unsigned int offset)
+{
+	struct inode *inode;
 	static struct timespec zerotime;
+
+	inode = iget_locked(sb, cramino(cramfs_inode, offset));
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	if (!(inode->i_state & I_NEW))
+		return inode;
+
+	switch (cramfs_inode->mode & S_IFMT) {
+	case S_IFREG:
+		inode->i_fop = &generic_ro_fops;
+		inode->i_data.a_ops = &cramfs_aops;
+		break;
+	case S_IFDIR:
+		inode->i_op = &cramfs_dir_inode_operations;
+		inode->i_fop = &cramfs_directory_operations;
+		break;
+	case S_IFLNK:
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_data.a_ops = &cramfs_aops;
+		break;
+	default:
+		init_special_inode(inode, cramfs_inode->mode,
+				old_decode_dev(cramfs_inode->size));
+	}
+
 	inode->i_mode = cramfs_inode->mode;
 	inode->i_uid = cramfs_inode->uid;
-	inode->i_size = cramfs_inode->size;
-	inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
 	inode->i_gid = cramfs_inode->gid;
+
+	/* if the lower 2 bits are zero, the inode contains data */
+	if (!(inode->i_ino & 3)) {
+		inode->i_size = cramfs_inode->size;
+		inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
+	}
+
 	/* Struct copy intentional */
 	inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
 	/* inode->i_nlink is left 1 - arguably wrong for directories,
 	   but it's the best we can do without reading the directory
 	   contents.  1 yields the right result in GNU find, even
 	   without -noleaf option. */
-	if (S_ISREG(inode->i_mode)) {
-		inode->i_fop = &generic_ro_fops;
-		inode->i_data.a_ops = &cramfs_aops;
-	} else if (S_ISDIR(inode->i_mode)) {
-		inode->i_op = &cramfs_dir_inode_operations;
-		inode->i_fop = &cramfs_directory_operations;
-	} else if (S_ISLNK(inode->i_mode)) {
-		inode->i_op = &page_symlink_inode_operations;
-		inode->i_data.a_ops = &cramfs_aops;
-	} else {
-		init_special_inode(inode, inode->i_mode,
-			old_decode_dev(cramfs_inode->size));
-	}
-}
 
-static struct inode *get_cramfs_inode(struct super_block *sb,
-				struct cramfs_inode * cramfs_inode)
-{
-	struct inode *inode;
-	if (CRAMINO(cramfs_inode) == 1) {
-		inode = new_inode(sb);
-		if (inode) {
-			inode->i_ino = 1;
-			setup_inode(inode, cramfs_inode);
-		}
-	} else {
-		inode = iget_locked(sb, CRAMINO(cramfs_inode));
-		if (inode && (inode->i_state & I_NEW)) {
-			setup_inode(inode, cramfs_inode);
-			unlock_new_inode(inode);
-		}
-	}
+	unlock_new_inode(inode);
+
 	return inode;
 }
 
@@ -265,6 +289,9 @@
 		printk(KERN_ERR "cramfs: root is not a directory\n");
 		goto out;
 	}
+	/* correct strange, hard-coded permissions of mkcramfs */
+	super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
+
 	root_offset = super.root.offset << 2;
 	if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
 		sbi->size=super.size;
@@ -289,7 +316,7 @@
 
 	/* Set it all up.. */
 	sb->s_op = &cramfs_ops;
-	root = get_cramfs_inode(sb, &super.root);
+	root = get_cramfs_inode(sb, &super.root, 0);
 	if (!root)
 		goto out;
 	sb->s_root = d_alloc_root(root);
@@ -365,7 +392,7 @@
 		 */
 		namelen = de->namelen << 2;
 		memcpy(buf, name, namelen);
-		ino = CRAMINO(de);
+		ino = cramino(de, OFFSET(inode) + offset);
 		mode = de->mode;
 		mutex_unlock(&read_mutex);
 		nextoffset = offset + sizeof(*de) + namelen;
@@ -404,8 +431,9 @@
 		struct cramfs_inode *de;
 		char *name;
 		int namelen, retval;
+		int dir_off = OFFSET(dir) + offset;
 
-		de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
+		de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
 		name = (char *)(de+1);
 
 		/* Try to take advantage of sorted directories */
@@ -436,7 +464,7 @@
 		if (!retval) {
 			struct cramfs_inode entry = *de;
 			mutex_unlock(&read_mutex);
-			d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
+			d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off));
 			return NULL;
 		}
 		/* else (retval < 0) */
diff --git a/fs/dcache.c b/fs/dcache.c
index 23702a9..274a222 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -33,20 +33,58 @@
 #include <linux/bootmem.h>
 #include <linux/fs_struct.h>
 #include <linux/hardirq.h>
+#include <linux/bit_spinlock.h>
+#include <linux/rculist_bl.h>
 #include "internal.h"
 
+/*
+ * Usage:
+ * dcache->d_inode->i_lock protects:
+ *   - i_dentry, d_alias, d_inode of aliases
+ * dcache_hash_bucket lock protects:
+ *   - the dcache hash table
+ * s_anon bl list spinlock protects:
+ *   - the s_anon list (see __d_drop)
+ * dcache_lru_lock protects:
+ *   - the dcache lru lists and counters
+ * d_lock protects:
+ *   - d_flags
+ *   - d_name
+ *   - d_lru
+ *   - d_count
+ *   - d_unhashed()
+ *   - d_parent and d_subdirs
+ *   - childrens' d_child and d_parent
+ *   - d_alias, d_inode
+ *
+ * Ordering:
+ * dentry->d_inode->i_lock
+ *   dentry->d_lock
+ *     dcache_lru_lock
+ *     dcache_hash_bucket lock
+ *     s_anon lock
+ *
+ * If there is an ancestor relationship:
+ * dentry->d_parent->...->d_parent->d_lock
+ *   ...
+ *     dentry->d_parent->d_lock
+ *       dentry->d_lock
+ *
+ * If no ancestor relationship:
+ * if (dentry1 < dentry2)
+ *   dentry1->d_lock
+ *     dentry2->d_lock
+ */
 int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
- __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
-EXPORT_SYMBOL(dcache_lock);
+EXPORT_SYMBOL(rename_lock);
 
 static struct kmem_cache *dentry_cache __read_mostly;
 
-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
-
 /*
  * This is the single most critical data structure when it comes
  * to the dcache: the hashtable for lookups. Somebody should try
@@ -60,22 +98,51 @@
 
 static unsigned int d_hash_mask __read_mostly;
 static unsigned int d_hash_shift __read_mostly;
-static struct hlist_head *dentry_hashtable __read_mostly;
+
+struct dcache_hash_bucket {
+	struct hlist_bl_head head;
+};
+static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
+
+static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
+					unsigned long hash)
+{
+	hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
+	hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
+	return dentry_hashtable + (hash & D_HASHMASK);
+}
+
+static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
+{
+	bit_spin_lock(0, (unsigned long *)&b->head.first);
+}
+
+static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
+{
+	__bit_spin_unlock(0, (unsigned long *)&b->head.first);
+}
 
 /* Statistics gathering. */
 struct dentry_stat_t dentry_stat = {
 	.age_limit = 45,
 };
 
-static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
-static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(unsigned int, nr_dentry);
 
 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+static int get_nr_dentry(void)
+{
+	int i;
+	int sum = 0;
+	for_each_possible_cpu(i)
+		sum += per_cpu(nr_dentry, i);
+	return sum < 0 ? 0 : sum;
+}
+
 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
 		   size_t *lenp, loff_t *ppos)
 {
-	dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
-	dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
+	dentry_stat.nr_dentry = get_nr_dentry();
 	return proc_dointvec(table, write, buffer, lenp, ppos);
 }
 #endif
@@ -91,35 +158,50 @@
 }
 
 /*
- * no dcache_lock, please.
+ * no locks, please.
  */
 static void d_free(struct dentry *dentry)
 {
-	percpu_counter_dec(&nr_dentry);
+	BUG_ON(dentry->d_count);
+	this_cpu_dec(nr_dentry);
 	if (dentry->d_op && dentry->d_op->d_release)
 		dentry->d_op->d_release(dentry);
 
 	/* if dentry was never inserted into hash, immediate free is OK */
-	if (hlist_unhashed(&dentry->d_hash))
+	if (hlist_bl_unhashed(&dentry->d_hash))
 		__d_free(&dentry->d_u.d_rcu);
 	else
 		call_rcu(&dentry->d_u.d_rcu, __d_free);
 }
 
+/**
+ * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
+ * After this call, in-progress rcu-walk path lookup will fail. This
+ * should be called after unhashing, and after changing d_inode (if
+ * the dentry has not already been unhashed).
+ */
+static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
+{
+	assert_spin_locked(&dentry->d_lock);
+	/* Go through a barrier */
+	write_seqcount_barrier(&dentry->d_seq);
+}
+
 /*
  * Release the dentry's inode, using the filesystem
- * d_iput() operation if defined.
+ * d_iput() operation if defined. Dentry has no refcount
+ * and is unhashed.
  */
 static void dentry_iput(struct dentry * dentry)
 	__releases(dentry->d_lock)
-	__releases(dcache_lock)
+	__releases(dentry->d_inode->i_lock)
 {
 	struct inode *inode = dentry->d_inode;
 	if (inode) {
 		dentry->d_inode = NULL;
 		list_del_init(&dentry->d_alias);
 		spin_unlock(&dentry->d_lock);
-		spin_unlock(&dcache_lock);
+		spin_unlock(&inode->i_lock);
 		if (!inode->i_nlink)
 			fsnotify_inoderemove(inode);
 		if (dentry->d_op && dentry->d_op->d_iput)
@@ -128,40 +210,72 @@
 			iput(inode);
 	} else {
 		spin_unlock(&dentry->d_lock);
-		spin_unlock(&dcache_lock);
 	}
 }
 
 /*
- * dentry_lru_(add|del|move_tail) must be called with dcache_lock held.
+ * Release the dentry's inode, using the filesystem
+ * d_iput() operation if defined. dentry remains in-use.
+ */
+static void dentry_unlink_inode(struct dentry * dentry)
+	__releases(dentry->d_lock)
+	__releases(dentry->d_inode->i_lock)
+{
+	struct inode *inode = dentry->d_inode;
+	dentry->d_inode = NULL;
+	list_del_init(&dentry->d_alias);
+	dentry_rcuwalk_barrier(dentry);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&inode->i_lock);
+	if (!inode->i_nlink)
+		fsnotify_inoderemove(inode);
+	if (dentry->d_op && dentry->d_op->d_iput)
+		dentry->d_op->d_iput(dentry, inode);
+	else
+		iput(inode);
+}
+
+/*
+ * dentry_lru_(add|del|move_tail) must be called with d_lock held.
  */
 static void dentry_lru_add(struct dentry *dentry)
 {
 	if (list_empty(&dentry->d_lru)) {
+		spin_lock(&dcache_lru_lock);
 		list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
 		dentry->d_sb->s_nr_dentry_unused++;
-		percpu_counter_inc(&nr_dentry_unused);
+		dentry_stat.nr_unused++;
+		spin_unlock(&dcache_lru_lock);
 	}
 }
 
+static void __dentry_lru_del(struct dentry *dentry)
+{
+	list_del_init(&dentry->d_lru);
+	dentry->d_sb->s_nr_dentry_unused--;
+	dentry_stat.nr_unused--;
+}
+
 static void dentry_lru_del(struct dentry *dentry)
 {
 	if (!list_empty(&dentry->d_lru)) {
-		list_del_init(&dentry->d_lru);
-		dentry->d_sb->s_nr_dentry_unused--;
-		percpu_counter_dec(&nr_dentry_unused);
+		spin_lock(&dcache_lru_lock);
+		__dentry_lru_del(dentry);
+		spin_unlock(&dcache_lru_lock);
 	}
 }
 
 static void dentry_lru_move_tail(struct dentry *dentry)
 {
+	spin_lock(&dcache_lru_lock);
 	if (list_empty(&dentry->d_lru)) {
 		list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
 		dentry->d_sb->s_nr_dentry_unused++;
-		percpu_counter_inc(&nr_dentry_unused);
+		dentry_stat.nr_unused++;
 	} else {
 		list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
 	}
+	spin_unlock(&dcache_lru_lock);
 }
 
 /**
@@ -171,22 +285,115 @@
  * The dentry must already be unhashed and removed from the LRU.
  *
  * If this is the root of the dentry tree, return NULL.
+ *
+ * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
+ * d_kill.
  */
-static struct dentry *d_kill(struct dentry *dentry)
+static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
 	__releases(dentry->d_lock)
-	__releases(dcache_lock)
+	__releases(parent->d_lock)
+	__releases(dentry->d_inode->i_lock)
 {
+	dentry->d_parent = NULL;
+	list_del(&dentry->d_u.d_child);
+	if (parent)
+		spin_unlock(&parent->d_lock);
+	dentry_iput(dentry);
+	/*
+	 * dentry_iput drops the locks, at which point nobody (except
+	 * transient RCU lookups) can reach this dentry.
+	 */
+	d_free(dentry);
+	return parent;
+}
+
+/**
+ * d_drop - drop a dentry
+ * @dentry: dentry to drop
+ *
+ * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
+ * be found through a VFS lookup any more. Note that this is different from
+ * deleting the dentry - d_delete will try to mark the dentry negative if
+ * possible, giving a successful _negative_ lookup, while d_drop will
+ * just make the cache lookup fail.
+ *
+ * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
+ * reason (NFS timeouts or autofs deletes).
+ *
+ * __d_drop requires dentry->d_lock.
+ */
+void __d_drop(struct dentry *dentry)
+{
+	if (!(dentry->d_flags & DCACHE_UNHASHED)) {
+		if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
+			bit_spin_lock(0,
+				(unsigned long *)&dentry->d_sb->s_anon.first);
+			dentry->d_flags |= DCACHE_UNHASHED;
+			hlist_bl_del_init(&dentry->d_hash);
+			__bit_spin_unlock(0,
+				(unsigned long *)&dentry->d_sb->s_anon.first);
+		} else {
+			struct dcache_hash_bucket *b;
+			b = d_hash(dentry->d_parent, dentry->d_name.hash);
+			spin_lock_bucket(b);
+			/*
+			 * We may not actually need to put DCACHE_UNHASHED
+			 * manipulations under the hash lock, but follow
+			 * the principle of least surprise.
+			 */
+			dentry->d_flags |= DCACHE_UNHASHED;
+			hlist_bl_del_rcu(&dentry->d_hash);
+			spin_unlock_bucket(b);
+			dentry_rcuwalk_barrier(dentry);
+		}
+	}
+}
+EXPORT_SYMBOL(__d_drop);
+
+void d_drop(struct dentry *dentry)
+{
+	spin_lock(&dentry->d_lock);
+	__d_drop(dentry);
+	spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(d_drop);
+
+/*
+ * Finish off a dentry we've decided to kill.
+ * dentry->d_lock must be held, returns with it unlocked.
+ * If ref is non-zero, then decrement the refcount too.
+ * Returns dentry requiring refcount drop, or NULL if we're done.
+ */
+static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
+	__releases(dentry->d_lock)
+{
+	struct inode *inode;
 	struct dentry *parent;
 
-	list_del(&dentry->d_u.d_child);
-	/*drops the locks, at that point nobody can reach this dentry */
-	dentry_iput(dentry);
+	inode = dentry->d_inode;
+	if (inode && !spin_trylock(&inode->i_lock)) {
+relock:
+		spin_unlock(&dentry->d_lock);
+		cpu_relax();
+		return dentry; /* try again with same dentry */
+	}
 	if (IS_ROOT(dentry))
 		parent = NULL;
 	else
 		parent = dentry->d_parent;
-	d_free(dentry);
-	return parent;
+	if (parent && !spin_trylock(&parent->d_lock)) {
+		if (inode)
+			spin_unlock(&inode->i_lock);
+		goto relock;
+	}
+
+	if (ref)
+		dentry->d_count--;
+	/* if dentry was on the d_lru list delete it from there */
+	dentry_lru_del(dentry);
+	/* if it was on the hash then remove it */
+	__d_drop(dentry);
+	return d_kill(dentry, parent);
 }
 
 /* 
@@ -214,34 +421,26 @@
  * call the dentry unlink method as well as removing it from the queues and
  * releasing its resources. If the parent dentries were scheduled for release
  * they too may now get deleted.
- *
- * no dcache lock, please.
  */
-
 void dput(struct dentry *dentry)
 {
 	if (!dentry)
 		return;
 
 repeat:
-	if (atomic_read(&dentry->d_count) == 1)
+	if (dentry->d_count == 1)
 		might_sleep();
-	if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
-		return;
-
 	spin_lock(&dentry->d_lock);
-	if (atomic_read(&dentry->d_count)) {
+	BUG_ON(!dentry->d_count);
+	if (dentry->d_count > 1) {
+		dentry->d_count--;
 		spin_unlock(&dentry->d_lock);
-		spin_unlock(&dcache_lock);
 		return;
 	}
 
-	/*
-	 * AV: ->d_delete() is _NOT_ allowed to block now.
-	 */
-	if (dentry->d_op && dentry->d_op->d_delete) {
+	if (dentry->d_flags & DCACHE_OP_DELETE) {
 		if (dentry->d_op->d_delete(dentry))
-			goto unhash_it;
+			goto kill_it;
 	}
 
 	/* Unreachable? Get rid of it */
@@ -252,16 +451,12 @@
 	dentry->d_flags |= DCACHE_REFERENCED;
 	dentry_lru_add(dentry);
 
- 	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
+	dentry->d_count--;
+	spin_unlock(&dentry->d_lock);
 	return;
 
-unhash_it:
-	__d_drop(dentry);
 kill_it:
-	/* if dentry was on the d_lru list delete it from there */
-	dentry_lru_del(dentry);
-	dentry = d_kill(dentry);
+	dentry = dentry_kill(dentry, 1);
 	if (dentry)
 		goto repeat;
 }
@@ -284,9 +479,9 @@
 	/*
 	 * If it's already been dropped, return OK.
 	 */
-	spin_lock(&dcache_lock);
+	spin_lock(&dentry->d_lock);
 	if (d_unhashed(dentry)) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&dentry->d_lock);
 		return 0;
 	}
 	/*
@@ -294,9 +489,9 @@
 	 * to get rid of unused child entries.
 	 */
 	if (!list_empty(&dentry->d_subdirs)) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&dentry->d_lock);
 		shrink_dcache_parent(dentry);
-		spin_lock(&dcache_lock);
+		spin_lock(&dentry->d_lock);
 	}
 
 	/*
@@ -309,35 +504,61 @@
 	 * we might still populate it if it was a
 	 * working directory or similar).
 	 */
-	spin_lock(&dentry->d_lock);
-	if (atomic_read(&dentry->d_count) > 1) {
+	if (dentry->d_count > 1) {
 		if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
 			spin_unlock(&dentry->d_lock);
-			spin_unlock(&dcache_lock);
 			return -EBUSY;
 		}
 	}
 
 	__d_drop(dentry);
 	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
 	return 0;
 }
 EXPORT_SYMBOL(d_invalidate);
 
-/* This should be called _only_ with dcache_lock held */
-static inline struct dentry * __dget_locked(struct dentry *dentry)
+/* This must be called with d_lock held */
+static inline void __dget_dlock(struct dentry *dentry)
 {
-	atomic_inc(&dentry->d_count);
-	dentry_lru_del(dentry);
-	return dentry;
+	dentry->d_count++;
 }
 
-struct dentry * dget_locked(struct dentry *dentry)
+static inline void __dget(struct dentry *dentry)
 {
-	return __dget_locked(dentry);
+	spin_lock(&dentry->d_lock);
+	__dget_dlock(dentry);
+	spin_unlock(&dentry->d_lock);
 }
-EXPORT_SYMBOL(dget_locked);
+
+struct dentry *dget_parent(struct dentry *dentry)
+{
+	struct dentry *ret;
+
+repeat:
+	/*
+	 * Don't need rcu_dereference because we re-check it was correct under
+	 * the lock.
+	 */
+	rcu_read_lock();
+	ret = dentry->d_parent;
+	if (!ret) {
+		rcu_read_unlock();
+		goto out;
+	}
+	spin_lock(&ret->d_lock);
+	if (unlikely(ret != dentry->d_parent)) {
+		spin_unlock(&ret->d_lock);
+		rcu_read_unlock();
+		goto repeat;
+	}
+	rcu_read_unlock();
+	BUG_ON(!ret->d_count);
+	ret->d_count++;
+	spin_unlock(&ret->d_lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(dget_parent);
 
 /**
  * d_find_alias - grab a hashed alias of inode
@@ -355,42 +576,51 @@
  * any other hashed alias over that one unless @want_discon is set,
  * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
  */
-
-static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
+static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
 {
-	struct list_head *head, *next, *tmp;
-	struct dentry *alias, *discon_alias=NULL;
+	struct dentry *alias, *discon_alias;
 
-	head = &inode->i_dentry;
-	next = inode->i_dentry.next;
-	while (next != head) {
-		tmp = next;
-		next = tmp->next;
-		prefetch(next);
-		alias = list_entry(tmp, struct dentry, d_alias);
+again:
+	discon_alias = NULL;
+	list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+		spin_lock(&alias->d_lock);
  		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
 			if (IS_ROOT(alias) &&
-			    (alias->d_flags & DCACHE_DISCONNECTED))
+			    (alias->d_flags & DCACHE_DISCONNECTED)) {
 				discon_alias = alias;
-			else if (!want_discon) {
-				__dget_locked(alias);
+			} else if (!want_discon) {
+				__dget_dlock(alias);
+				spin_unlock(&alias->d_lock);
 				return alias;
 			}
 		}
+		spin_unlock(&alias->d_lock);
 	}
-	if (discon_alias)
-		__dget_locked(discon_alias);
-	return discon_alias;
+	if (discon_alias) {
+		alias = discon_alias;
+		spin_lock(&alias->d_lock);
+		if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
+			if (IS_ROOT(alias) &&
+			    (alias->d_flags & DCACHE_DISCONNECTED)) {
+				__dget_dlock(alias);
+				spin_unlock(&alias->d_lock);
+				return alias;
+			}
+		}
+		spin_unlock(&alias->d_lock);
+		goto again;
+	}
+	return NULL;
 }
 
-struct dentry * d_find_alias(struct inode *inode)
+struct dentry *d_find_alias(struct inode *inode)
 {
 	struct dentry *de = NULL;
 
 	if (!list_empty(&inode->i_dentry)) {
-		spin_lock(&dcache_lock);
+		spin_lock(&inode->i_lock);
 		de = __d_find_alias(inode, 0);
-		spin_unlock(&dcache_lock);
+		spin_unlock(&inode->i_lock);
 	}
 	return de;
 }
@@ -404,54 +634,61 @@
 {
 	struct dentry *dentry;
 restart:
-	spin_lock(&dcache_lock);
+	spin_lock(&inode->i_lock);
 	list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
 		spin_lock(&dentry->d_lock);
-		if (!atomic_read(&dentry->d_count)) {
-			__dget_locked(dentry);
+		if (!dentry->d_count) {
+			__dget_dlock(dentry);
 			__d_drop(dentry);
 			spin_unlock(&dentry->d_lock);
-			spin_unlock(&dcache_lock);
+			spin_unlock(&inode->i_lock);
 			dput(dentry);
 			goto restart;
 		}
 		spin_unlock(&dentry->d_lock);
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL(d_prune_aliases);
 
 /*
- * Throw away a dentry - free the inode, dput the parent.  This requires that
- * the LRU list has already been removed.
+ * Try to throw away a dentry - free the inode, dput the parent.
+ * Requires dentry->d_lock is held, and dentry->d_count == 0.
+ * Releases dentry->d_lock.
  *
- * Try to prune ancestors as well.  This is necessary to prevent
- * quadratic behavior of shrink_dcache_parent(), but is also expected
- * to be beneficial in reducing dentry cache fragmentation.
+ * This may fail if locks cannot be acquired no problem, just try again.
  */
-static void prune_one_dentry(struct dentry * dentry)
+static void try_prune_one_dentry(struct dentry *dentry)
 	__releases(dentry->d_lock)
-	__releases(dcache_lock)
-	__acquires(dcache_lock)
 {
-	__d_drop(dentry);
-	dentry = d_kill(dentry);
+	struct dentry *parent;
 
+	parent = dentry_kill(dentry, 0);
 	/*
-	 * Prune ancestors.  Locking is simpler than in dput(),
-	 * because dcache_lock needs to be taken anyway.
+	 * If dentry_kill returns NULL, we have nothing more to do.
+	 * if it returns the same dentry, trylocks failed. In either
+	 * case, just loop again.
+	 *
+	 * Otherwise, we need to prune ancestors too. This is necessary
+	 * to prevent quadratic behavior of shrink_dcache_parent(), but
+	 * is also expected to be beneficial in reducing dentry cache
+	 * fragmentation.
 	 */
-	spin_lock(&dcache_lock);
-	while (dentry) {
-		if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
-			return;
+	if (!parent)
+		return;
+	if (parent == dentry)
+		return;
 
-		if (dentry->d_op && dentry->d_op->d_delete)
-			dentry->d_op->d_delete(dentry);
-		dentry_lru_del(dentry);
-		__d_drop(dentry);
-		dentry = d_kill(dentry);
-		spin_lock(&dcache_lock);
+	/* Prune ancestors. */
+	dentry = parent;
+	while (dentry) {
+		spin_lock(&dentry->d_lock);
+		if (dentry->d_count > 1) {
+			dentry->d_count--;
+			spin_unlock(&dentry->d_lock);
+			return;
+		}
+		dentry = dentry_kill(dentry, 1);
 	}
 }
 
@@ -459,24 +696,35 @@
 {
 	struct dentry *dentry;
 
-	while (!list_empty(list)) {
-		dentry = list_entry(list->prev, struct dentry, d_lru);
-		dentry_lru_del(dentry);
+	rcu_read_lock();
+	for (;;) {
+		dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
+		if (&dentry->d_lru == list)
+			break; /* empty */
+		spin_lock(&dentry->d_lock);
+		if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
+			spin_unlock(&dentry->d_lock);
+			continue;
+		}
 
 		/*
 		 * We found an inuse dentry which was not removed from
 		 * the LRU because of laziness during lookup.  Do not free
 		 * it - just keep it off the LRU list.
 		 */
-		spin_lock(&dentry->d_lock);
-		if (atomic_read(&dentry->d_count)) {
+		if (dentry->d_count) {
+			dentry_lru_del(dentry);
 			spin_unlock(&dentry->d_lock);
 			continue;
 		}
-		prune_one_dentry(dentry);
-		/* dentry->d_lock was dropped in prune_one_dentry() */
-		cond_resched_lock(&dcache_lock);
+
+		rcu_read_unlock();
+
+		try_prune_one_dentry(dentry);
+
+		rcu_read_lock();
 	}
+	rcu_read_unlock();
 }
 
 /**
@@ -495,42 +743,44 @@
 	LIST_HEAD(tmp);
 	int cnt = *count;
 
-	spin_lock(&dcache_lock);
+relock:
+	spin_lock(&dcache_lru_lock);
 	while (!list_empty(&sb->s_dentry_lru)) {
 		dentry = list_entry(sb->s_dentry_lru.prev,
 				struct dentry, d_lru);
 		BUG_ON(dentry->d_sb != sb);
 
+		if (!spin_trylock(&dentry->d_lock)) {
+			spin_unlock(&dcache_lru_lock);
+			cpu_relax();
+			goto relock;
+		}
+
 		/*
 		 * If we are honouring the DCACHE_REFERENCED flag and the
 		 * dentry has this flag set, don't free it.  Clear the flag
 		 * and put it back on the LRU.
 		 */
-		if (flags & DCACHE_REFERENCED) {
-			spin_lock(&dentry->d_lock);
-			if (dentry->d_flags & DCACHE_REFERENCED) {
-				dentry->d_flags &= ~DCACHE_REFERENCED;
-				list_move(&dentry->d_lru, &referenced);
-				spin_unlock(&dentry->d_lock);
-				cond_resched_lock(&dcache_lock);
-				continue;
-			}
+		if (flags & DCACHE_REFERENCED &&
+				dentry->d_flags & DCACHE_REFERENCED) {
+			dentry->d_flags &= ~DCACHE_REFERENCED;
+			list_move(&dentry->d_lru, &referenced);
 			spin_unlock(&dentry->d_lock);
+		} else {
+			list_move_tail(&dentry->d_lru, &tmp);
+			spin_unlock(&dentry->d_lock);
+			if (!--cnt)
+				break;
 		}
-
-		list_move_tail(&dentry->d_lru, &tmp);
-		if (!--cnt)
-			break;
-		cond_resched_lock(&dcache_lock);
+		cond_resched_lock(&dcache_lru_lock);
 	}
-
-	*count = cnt;
-	shrink_dentry_list(&tmp);
-
 	if (!list_empty(&referenced))
 		list_splice(&referenced, &sb->s_dentry_lru);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dcache_lru_lock);
 
+	shrink_dentry_list(&tmp);
+
+	*count = cnt;
 }
 
 /**
@@ -546,13 +796,12 @@
 {
 	struct super_block *sb, *p = NULL;
 	int w_count;
-	int unused = percpu_counter_sum_positive(&nr_dentry_unused);
+	int unused = dentry_stat.nr_unused;
 	int prune_ratio;
 	int pruned;
 
 	if (unused == 0 || count == 0)
 		return;
-	spin_lock(&dcache_lock);
 	if (count >= unused)
 		prune_ratio = 1;
 	else
@@ -589,11 +838,9 @@
 		if (down_read_trylock(&sb->s_umount)) {
 			if ((sb->s_root != NULL) &&
 			    (!list_empty(&sb->s_dentry_lru))) {
-				spin_unlock(&dcache_lock);
 				__shrink_dcache_sb(sb, &w_count,
 						DCACHE_REFERENCED);
 				pruned -= w_count;
-				spin_lock(&dcache_lock);
 			}
 			up_read(&sb->s_umount);
 		}
@@ -609,7 +856,6 @@
 	if (p)
 		__put_super(p);
 	spin_unlock(&sb_lock);
-	spin_unlock(&dcache_lock);
 }
 
 /**
@@ -623,12 +869,14 @@
 {
 	LIST_HEAD(tmp);
 
-	spin_lock(&dcache_lock);
+	spin_lock(&dcache_lru_lock);
 	while (!list_empty(&sb->s_dentry_lru)) {
 		list_splice_init(&sb->s_dentry_lru, &tmp);
+		spin_unlock(&dcache_lru_lock);
 		shrink_dentry_list(&tmp);
+		spin_lock(&dcache_lru_lock);
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dcache_lru_lock);
 }
 EXPORT_SYMBOL(shrink_dcache_sb);
 
@@ -645,10 +893,10 @@
 	BUG_ON(!IS_ROOT(dentry));
 
 	/* detach this root from the system */
-	spin_lock(&dcache_lock);
+	spin_lock(&dentry->d_lock);
 	dentry_lru_del(dentry);
 	__d_drop(dentry);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
 
 	for (;;) {
 		/* descend to the first leaf in the current subtree */
@@ -657,14 +905,16 @@
 
 			/* this is a branch with children - detach all of them
 			 * from the system in one go */
-			spin_lock(&dcache_lock);
+			spin_lock(&dentry->d_lock);
 			list_for_each_entry(loop, &dentry->d_subdirs,
 					    d_u.d_child) {
+				spin_lock_nested(&loop->d_lock,
+						DENTRY_D_LOCK_NESTED);
 				dentry_lru_del(loop);
 				__d_drop(loop);
-				cond_resched_lock(&dcache_lock);
+				spin_unlock(&loop->d_lock);
 			}
-			spin_unlock(&dcache_lock);
+			spin_unlock(&dentry->d_lock);
 
 			/* move to the first child */
 			dentry = list_entry(dentry->d_subdirs.next,
@@ -676,7 +926,7 @@
 		do {
 			struct inode *inode;
 
-			if (atomic_read(&dentry->d_count) != 0) {
+			if (dentry->d_count != 0) {
 				printk(KERN_ERR
 				       "BUG: Dentry %p{i=%lx,n=%s}"
 				       " still in use (%d)"
@@ -685,20 +935,23 @@
 				       dentry->d_inode ?
 				       dentry->d_inode->i_ino : 0UL,
 				       dentry->d_name.name,
-				       atomic_read(&dentry->d_count),
+				       dentry->d_count,
 				       dentry->d_sb->s_type->name,
 				       dentry->d_sb->s_id);
 				BUG();
 			}
 
-			if (IS_ROOT(dentry))
+			if (IS_ROOT(dentry)) {
 				parent = NULL;
-			else {
+				list_del(&dentry->d_u.d_child);
+			} else {
 				parent = dentry->d_parent;
-				atomic_dec(&parent->d_count);
+				spin_lock(&parent->d_lock);
+				parent->d_count--;
+				list_del(&dentry->d_u.d_child);
+				spin_unlock(&parent->d_lock);
 			}
 
-			list_del(&dentry->d_u.d_child);
 			detached++;
 
 			inode = dentry->d_inode;
@@ -728,8 +981,7 @@
 
 /*
  * destroy the dentries attached to a superblock on unmounting
- * - we don't need to use dentry->d_lock, and only need dcache_lock when
- *   removing the dentry from the system lists and hashes because:
+ * - we don't need to use dentry->d_lock because:
  *   - the superblock is detached from all mountings and open files, so the
  *     dentry trees will not be rearranged by the VFS
  *   - s_umount is write-locked, so the memory pressure shrinker will ignore
@@ -746,11 +998,13 @@
 
 	dentry = sb->s_root;
 	sb->s_root = NULL;
-	atomic_dec(&dentry->d_count);
+	spin_lock(&dentry->d_lock);
+	dentry->d_count--;
+	spin_unlock(&dentry->d_lock);
 	shrink_dcache_for_umount_subtree(dentry);
 
-	while (!hlist_empty(&sb->s_anon)) {
-		dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
+	while (!hlist_bl_empty(&sb->s_anon)) {
+		dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
 		shrink_dcache_for_umount_subtree(dentry);
 	}
 }
@@ -768,15 +1022,20 @@
  * Return true if the parent or its subdirectories contain
  * a mount point
  */
- 
 int have_submounts(struct dentry *parent)
 {
-	struct dentry *this_parent = parent;
+	struct dentry *this_parent;
 	struct list_head *next;
+	unsigned seq;
+	int locked = 0;
 
-	spin_lock(&dcache_lock);
+	seq = read_seqbegin(&rename_lock);
+again:
+	this_parent = parent;
+
 	if (d_mountpoint(parent))
 		goto positive;
+	spin_lock(&this_parent->d_lock);
 repeat:
 	next = this_parent->d_subdirs.next;
 resume:
@@ -784,27 +1043,65 @@
 		struct list_head *tmp = next;
 		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 		next = tmp->next;
+
+		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 		/* Have we found a mount point ? */
-		if (d_mountpoint(dentry))
+		if (d_mountpoint(dentry)) {
+			spin_unlock(&dentry->d_lock);
+			spin_unlock(&this_parent->d_lock);
 			goto positive;
+		}
 		if (!list_empty(&dentry->d_subdirs)) {
+			spin_unlock(&this_parent->d_lock);
+			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
 			this_parent = dentry;
+			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
 			goto repeat;
 		}
+		spin_unlock(&dentry->d_lock);
 	}
 	/*
 	 * All done at this level ... ascend and resume the search.
 	 */
 	if (this_parent != parent) {
-		next = this_parent->d_u.d_child.next;
-		this_parent = this_parent->d_parent;
+		struct dentry *tmp;
+		struct dentry *child;
+
+		tmp = this_parent->d_parent;
+		rcu_read_lock();
+		spin_unlock(&this_parent->d_lock);
+		child = this_parent;
+		this_parent = tmp;
+		spin_lock(&this_parent->d_lock);
+		/* might go back up the wrong parent if we have had a rename
+		 * or deletion */
+		if (this_parent != child->d_parent ||
+			 (!locked && read_seqretry(&rename_lock, seq))) {
+			spin_unlock(&this_parent->d_lock);
+			rcu_read_unlock();
+			goto rename_retry;
+		}
+		rcu_read_unlock();
+		next = child->d_u.d_child.next;
 		goto resume;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&this_parent->d_lock);
+	if (!locked && read_seqretry(&rename_lock, seq))
+		goto rename_retry;
+	if (locked)
+		write_sequnlock(&rename_lock);
 	return 0; /* No mount points found in tree */
 positive:
-	spin_unlock(&dcache_lock);
+	if (!locked && read_seqretry(&rename_lock, seq))
+		goto rename_retry;
+	if (locked)
+		write_sequnlock(&rename_lock);
 	return 1;
+
+rename_retry:
+	locked = 1;
+	write_seqlock(&rename_lock);
+	goto again;
 }
 EXPORT_SYMBOL(have_submounts);
 
@@ -824,11 +1121,16 @@
  */
 static int select_parent(struct dentry * parent)
 {
-	struct dentry *this_parent = parent;
+	struct dentry *this_parent;
 	struct list_head *next;
+	unsigned seq;
 	int found = 0;
+	int locked = 0;
 
-	spin_lock(&dcache_lock);
+	seq = read_seqbegin(&rename_lock);
+again:
+	this_parent = parent;
+	spin_lock(&this_parent->d_lock);
 repeat:
 	next = this_parent->d_subdirs.next;
 resume:
@@ -837,11 +1139,13 @@
 		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 		next = tmp->next;
 
+		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
 		/* 
 		 * move only zero ref count dentries to the end 
 		 * of the unused list for prune_dcache
 		 */
-		if (!atomic_read(&dentry->d_count)) {
+		if (!dentry->d_count) {
 			dentry_lru_move_tail(dentry);
 			found++;
 		} else {
@@ -853,28 +1157,63 @@
 		 * ensures forward progress). We'll be coming back to find
 		 * the rest.
 		 */
-		if (found && need_resched())
+		if (found && need_resched()) {
+			spin_unlock(&dentry->d_lock);
 			goto out;
+		}
 
 		/*
 		 * Descend a level if the d_subdirs list is non-empty.
 		 */
 		if (!list_empty(&dentry->d_subdirs)) {
+			spin_unlock(&this_parent->d_lock);
+			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
 			this_parent = dentry;
+			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
 			goto repeat;
 		}
+
+		spin_unlock(&dentry->d_lock);
 	}
 	/*
 	 * All done at this level ... ascend and resume the search.
 	 */
 	if (this_parent != parent) {
-		next = this_parent->d_u.d_child.next;
-		this_parent = this_parent->d_parent;
+		struct dentry *tmp;
+		struct dentry *child;
+
+		tmp = this_parent->d_parent;
+		rcu_read_lock();
+		spin_unlock(&this_parent->d_lock);
+		child = this_parent;
+		this_parent = tmp;
+		spin_lock(&this_parent->d_lock);
+		/* might go back up the wrong parent if we have had a rename
+		 * or deletion */
+		if (this_parent != child->d_parent ||
+			(!locked && read_seqretry(&rename_lock, seq))) {
+			spin_unlock(&this_parent->d_lock);
+			rcu_read_unlock();
+			goto rename_retry;
+		}
+		rcu_read_unlock();
+		next = child->d_u.d_child.next;
 		goto resume;
 	}
 out:
-	spin_unlock(&dcache_lock);
+	spin_unlock(&this_parent->d_lock);
+	if (!locked && read_seqretry(&rename_lock, seq))
+		goto rename_retry;
+	if (locked)
+		write_sequnlock(&rename_lock);
 	return found;
+
+rename_retry:
+	if (found)
+		return found;
+	locked = 1;
+	write_seqlock(&rename_lock);
+	goto again;
 }
 
 /**
@@ -908,16 +1247,13 @@
  */
 static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
 {
-	int nr_unused;
-
 	if (nr) {
 		if (!(gfp_mask & __GFP_FS))
 			return -1;
 		prune_dcache(nr);
 	}
 
-	nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
-	return (nr_unused / 100) * sysctl_vfs_cache_pressure;
+	return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
 
 static struct shrinker dcache_shrinker = {
@@ -960,38 +1296,54 @@
 	memcpy(dname, name->name, name->len);
 	dname[name->len] = 0;
 
-	atomic_set(&dentry->d_count, 1);
+	dentry->d_count = 1;
 	dentry->d_flags = DCACHE_UNHASHED;
 	spin_lock_init(&dentry->d_lock);
+	seqcount_init(&dentry->d_seq);
 	dentry->d_inode = NULL;
 	dentry->d_parent = NULL;
 	dentry->d_sb = NULL;
 	dentry->d_op = NULL;
 	dentry->d_fsdata = NULL;
-	dentry->d_mounted = 0;
-	INIT_HLIST_NODE(&dentry->d_hash);
+	INIT_HLIST_BL_NODE(&dentry->d_hash);
 	INIT_LIST_HEAD(&dentry->d_lru);
 	INIT_LIST_HEAD(&dentry->d_subdirs);
 	INIT_LIST_HEAD(&dentry->d_alias);
+	INIT_LIST_HEAD(&dentry->d_u.d_child);
 
 	if (parent) {
-		dentry->d_parent = dget(parent);
+		spin_lock(&parent->d_lock);
+		/*
+		 * don't need child lock because it is not subject
+		 * to concurrency here
+		 */
+		__dget_dlock(parent);
+		dentry->d_parent = parent;
 		dentry->d_sb = parent->d_sb;
-	} else {
-		INIT_LIST_HEAD(&dentry->d_u.d_child);
+		d_set_d_op(dentry, dentry->d_sb->s_d_op);
+		list_add(&dentry->d_u.d_child, &parent->d_subdirs);
+		spin_unlock(&parent->d_lock);
 	}
 
-	spin_lock(&dcache_lock);
-	if (parent)
-		list_add(&dentry->d_u.d_child, &parent->d_subdirs);
-	spin_unlock(&dcache_lock);
-
-	percpu_counter_inc(&nr_dentry);
+	this_cpu_inc(nr_dentry);
 
 	return dentry;
 }
 EXPORT_SYMBOL(d_alloc);
 
+struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
+{
+	struct dentry *dentry = d_alloc(NULL, name);
+	if (dentry) {
+		dentry->d_sb = sb;
+		d_set_d_op(dentry, dentry->d_sb->s_d_op);
+		dentry->d_parent = dentry;
+		dentry->d_flags |= DCACHE_DISCONNECTED;
+	}
+	return dentry;
+}
+EXPORT_SYMBOL(d_alloc_pseudo);
+
 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
 {
 	struct qstr q;
@@ -1003,12 +1355,36 @@
 }
 EXPORT_SYMBOL(d_alloc_name);
 
-/* the caller must hold dcache_lock */
+void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+{
+	WARN_ON_ONCE(dentry->d_op);
+	WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH	|
+				DCACHE_OP_COMPARE	|
+				DCACHE_OP_REVALIDATE	|
+				DCACHE_OP_DELETE ));
+	dentry->d_op = op;
+	if (!op)
+		return;
+	if (op->d_hash)
+		dentry->d_flags |= DCACHE_OP_HASH;
+	if (op->d_compare)
+		dentry->d_flags |= DCACHE_OP_COMPARE;
+	if (op->d_revalidate)
+		dentry->d_flags |= DCACHE_OP_REVALIDATE;
+	if (op->d_delete)
+		dentry->d_flags |= DCACHE_OP_DELETE;
+
+}
+EXPORT_SYMBOL(d_set_d_op);
+
 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
 {
+	spin_lock(&dentry->d_lock);
 	if (inode)
 		list_add(&dentry->d_alias, &inode->i_dentry);
 	dentry->d_inode = inode;
+	dentry_rcuwalk_barrier(dentry);
+	spin_unlock(&dentry->d_lock);
 	fsnotify_d_instantiate(dentry, inode);
 }
 
@@ -1030,9 +1406,11 @@
 void d_instantiate(struct dentry *entry, struct inode * inode)
 {
 	BUG_ON(!list_empty(&entry->d_alias));
-	spin_lock(&dcache_lock);
+	if (inode)
+		spin_lock(&inode->i_lock);
 	__d_instantiate(entry, inode);
-	spin_unlock(&dcache_lock);
+	if (inode)
+		spin_unlock(&inode->i_lock);
 	security_d_instantiate(entry, inode);
 }
 EXPORT_SYMBOL(d_instantiate);
@@ -1069,15 +1447,18 @@
 	list_for_each_entry(alias, &inode->i_dentry, d_alias) {
 		struct qstr *qstr = &alias->d_name;
 
+		/*
+		 * Don't need alias->d_lock here, because aliases with
+		 * d_parent == entry->d_parent are not subject to name or
+		 * parent changes, because the parent inode i_mutex is held.
+		 */
 		if (qstr->hash != hash)
 			continue;
 		if (alias->d_parent != entry->d_parent)
 			continue;
-		if (qstr->len != len)
+		if (dentry_cmp(qstr->name, qstr->len, name, len))
 			continue;
-		if (memcmp(qstr->name, name, len))
-			continue;
-		dget_locked(alias);
+		__dget(alias);
 		return alias;
 	}
 
@@ -1091,9 +1472,11 @@
 
 	BUG_ON(!list_empty(&entry->d_alias));
 
-	spin_lock(&dcache_lock);
+	if (inode)
+		spin_lock(&inode->i_lock);
 	result = __d_instantiate_unique(entry, inode);
-	spin_unlock(&dcache_lock);
+	if (inode)
+		spin_unlock(&inode->i_lock);
 
 	if (!result) {
 		security_d_instantiate(entry, inode);
@@ -1126,6 +1509,7 @@
 		res = d_alloc(NULL, &name);
 		if (res) {
 			res->d_sb = root_inode->i_sb;
+			d_set_d_op(res, res->d_sb->s_d_op);
 			res->d_parent = res;
 			d_instantiate(res, root_inode);
 		}
@@ -1134,14 +1518,6 @@
 }
 EXPORT_SYMBOL(d_alloc_root);
 
-static inline struct hlist_head *d_hash(struct dentry *parent,
-					unsigned long hash)
-{
-	hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
-	hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
-	return dentry_hashtable + (hash & D_HASHMASK);
-}
-
 /**
  * d_obtain_alias - find or allocate a dentry for a given inode
  * @inode: inode to allocate the dentry for
@@ -1182,10 +1558,11 @@
 	}
 	tmp->d_parent = tmp; /* make sure dput doesn't croak */
 
-	spin_lock(&dcache_lock);
+
+	spin_lock(&inode->i_lock);
 	res = __d_find_alias(inode, 0);
 	if (res) {
-		spin_unlock(&dcache_lock);
+		spin_unlock(&inode->i_lock);
 		dput(tmp);
 		goto out_iput;
 	}
@@ -1193,14 +1570,17 @@
 	/* attach a disconnected dentry */
 	spin_lock(&tmp->d_lock);
 	tmp->d_sb = inode->i_sb;
+	d_set_d_op(tmp, tmp->d_sb->s_d_op);
 	tmp->d_inode = inode;
 	tmp->d_flags |= DCACHE_DISCONNECTED;
-	tmp->d_flags &= ~DCACHE_UNHASHED;
 	list_add(&tmp->d_alias, &inode->i_dentry);
-	hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
+	bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
+	tmp->d_flags &= ~DCACHE_UNHASHED;
+	hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
+	__bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
 	spin_unlock(&tmp->d_lock);
+	spin_unlock(&inode->i_lock);
 
-	spin_unlock(&dcache_lock);
 	return tmp;
 
  out_iput:
@@ -1230,18 +1610,18 @@
 	struct dentry *new = NULL;
 
 	if (inode && S_ISDIR(inode->i_mode)) {
-		spin_lock(&dcache_lock);
+		spin_lock(&inode->i_lock);
 		new = __d_find_alias(inode, 1);
 		if (new) {
 			BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
-			spin_unlock(&dcache_lock);
+			spin_unlock(&inode->i_lock);
 			security_d_instantiate(new, inode);
 			d_move(new, dentry);
 			iput(inode);
 		} else {
-			/* already taking dcache_lock, so d_add() by hand */
+			/* already taking inode->i_lock, so d_add() by hand */
 			__d_instantiate(dentry, inode);
-			spin_unlock(&dcache_lock);
+			spin_unlock(&inode->i_lock);
 			security_d_instantiate(dentry, inode);
 			d_rehash(dentry);
 		}
@@ -1314,10 +1694,10 @@
 	 * Negative dentry: instantiate it unless the inode is a directory and
 	 * already has a dentry.
 	 */
-	spin_lock(&dcache_lock);
+	spin_lock(&inode->i_lock);
 	if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
 		__d_instantiate(found, inode);
-		spin_unlock(&dcache_lock);
+		spin_unlock(&inode->i_lock);
 		security_d_instantiate(found, inode);
 		return found;
 	}
@@ -1327,8 +1707,8 @@
 	 * reference to it, move it in place and use it.
 	 */
 	new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
-	dget_locked(new);
-	spin_unlock(&dcache_lock);
+	__dget(new);
+	spin_unlock(&inode->i_lock);
 	security_d_instantiate(found, inode);
 	d_move(new, found);
 	iput(inode);
@@ -1342,6 +1722,112 @@
 EXPORT_SYMBOL(d_add_ci);
 
 /**
+ * __d_lookup_rcu - search for a dentry (racy, store-free)
+ * @parent: parent dentry
+ * @name: qstr of name we wish to find
+ * @seq: returns d_seq value at the point where the dentry was found
+ * @inode: returns dentry->d_inode when the inode was found valid.
+ * Returns: dentry, or NULL
+ *
+ * __d_lookup_rcu is the dcache lookup function for rcu-walk name
+ * resolution (store-free path walking) design described in
+ * Documentation/filesystems/path-lookup.txt.
+ *
+ * This is not to be used outside core vfs.
+ *
+ * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
+ * held, and rcu_read_lock held. The returned dentry must not be stored into
+ * without taking d_lock and checking d_seq sequence count against @seq
+ * returned here.
+ *
+ * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
+ * function.
+ *
+ * Alternatively, __d_lookup_rcu may be called again to look up the child of
+ * the returned dentry, so long as its parent's seqlock is checked after the
+ * child is looked up. Thus, an interlocking stepping of sequence lock checks
+ * is formed, giving integrity down the path walk.
+ */
+struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
+				unsigned *seq, struct inode **inode)
+{
+	unsigned int len = name->len;
+	unsigned int hash = name->hash;
+	const unsigned char *str = name->name;
+	struct dcache_hash_bucket *b = d_hash(parent, hash);
+	struct hlist_bl_node *node;
+	struct dentry *dentry;
+
+	/*
+	 * Note: There is significant duplication with __d_lookup_rcu which is
+	 * required to prevent single threaded performance regressions
+	 * especially on architectures where smp_rmb (in seqcounts) are costly.
+	 * Keep the two functions in sync.
+	 */
+
+	/*
+	 * The hash list is protected using RCU.
+	 *
+	 * Carefully use d_seq when comparing a candidate dentry, to avoid
+	 * races with d_move().
+	 *
+	 * It is possible that concurrent renames can mess up our list
+	 * walk here and result in missing our dentry, resulting in the
+	 * false-negative result. d_lookup() protects against concurrent
+	 * renames using rename_lock seqlock.
+	 *
+	 * See Documentation/vfs/dcache-locking.txt for more details.
+	 */
+	hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+		struct inode *i;
+		const char *tname;
+		int tlen;
+
+		if (dentry->d_name.hash != hash)
+			continue;
+
+seqretry:
+		*seq = read_seqcount_begin(&dentry->d_seq);
+		if (dentry->d_parent != parent)
+			continue;
+		if (d_unhashed(dentry))
+			continue;
+		tlen = dentry->d_name.len;
+		tname = dentry->d_name.name;
+		i = dentry->d_inode;
+		prefetch(tname);
+		if (i)
+			prefetch(i);
+		/*
+		 * This seqcount check is required to ensure name and
+		 * len are loaded atomically, so as not to walk off the
+		 * edge of memory when walking. If we could load this
+		 * atomically some other way, we could drop this check.
+		 */
+		if (read_seqcount_retry(&dentry->d_seq, *seq))
+			goto seqretry;
+		if (parent->d_flags & DCACHE_OP_COMPARE) {
+			if (parent->d_op->d_compare(parent, *inode,
+						dentry, i,
+						tlen, tname, name))
+				continue;
+		} else {
+			if (dentry_cmp(tname, tlen, str, len))
+				continue;
+		}
+		/*
+		 * No extra seqcount check is required after the name
+		 * compare. The caller must perform a seqcount check in
+		 * order to do anything useful with the returned dentry
+		 * anyway.
+		 */
+		*inode = i;
+		return dentry;
+	}
+	return NULL;
+}
+
+/**
  * d_lookup - search for a dentry
  * @parent: parent dentry
  * @name: qstr of name we wish to find
@@ -1352,10 +1838,10 @@
  * dentry is returned. The caller must use dput to free the entry when it has
  * finished using it. %NULL is returned if the dentry does not exist.
  */
-struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
+struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
 {
-	struct dentry * dentry = NULL;
-	unsigned long seq;
+	struct dentry *dentry;
+	unsigned seq;
 
         do {
                 seq = read_seqbegin(&rename_lock);
@@ -1367,7 +1853,7 @@
 }
 EXPORT_SYMBOL(d_lookup);
 
-/*
+/**
  * __d_lookup - search for a dentry (racy)
  * @parent: parent dentry
  * @name: qstr of name we wish to find
@@ -1382,17 +1868,24 @@
  *
  * __d_lookup callers must be commented.
  */
-struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
+struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
 {
 	unsigned int len = name->len;
 	unsigned int hash = name->hash;
 	const unsigned char *str = name->name;
-	struct hlist_head *head = d_hash(parent,hash);
+	struct dcache_hash_bucket *b = d_hash(parent, hash);
+	struct hlist_bl_node *node;
 	struct dentry *found = NULL;
-	struct hlist_node *node;
 	struct dentry *dentry;
 
 	/*
+	 * Note: There is significant duplication with __d_lookup_rcu which is
+	 * required to prevent single threaded performance regressions
+	 * especially on architectures where smp_rmb (in seqcounts) are costly.
+	 * Keep the two functions in sync.
+	 */
+
+	/*
 	 * The hash list is protected using RCU.
 	 *
 	 * Take d_lock when comparing a candidate dentry, to avoid races
@@ -1407,25 +1900,16 @@
 	 */
 	rcu_read_lock();
 	
-	hlist_for_each_entry_rcu(dentry, node, head, d_hash) {
-		struct qstr *qstr;
+	hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+		const char *tname;
+		int tlen;
 
 		if (dentry->d_name.hash != hash)
 			continue;
-		if (dentry->d_parent != parent)
-			continue;
 
 		spin_lock(&dentry->d_lock);
-
-		/*
-		 * Recheck the dentry after taking the lock - d_move may have
-		 * changed things. Don't bother checking the hash because
-		 * we're about to compare the whole name anyway.
-		 */
 		if (dentry->d_parent != parent)
 			goto next;
-
-		/* non-existing due to RCU? */
 		if (d_unhashed(dentry))
 			goto next;
 
@@ -1433,18 +1917,19 @@
 		 * It is safe to compare names since d_move() cannot
 		 * change the qstr (protected by d_lock).
 		 */
-		qstr = &dentry->d_name;
-		if (parent->d_op && parent->d_op->d_compare) {
-			if (parent->d_op->d_compare(parent, qstr, name))
+		tlen = dentry->d_name.len;
+		tname = dentry->d_name.name;
+		if (parent->d_flags & DCACHE_OP_COMPARE) {
+			if (parent->d_op->d_compare(parent, parent->d_inode,
+						dentry, dentry->d_inode,
+						tlen, tname, name))
 				goto next;
 		} else {
-			if (qstr->len != len)
-				goto next;
-			if (memcmp(qstr->name, str, len))
+			if (dentry_cmp(tname, tlen, str, len))
 				goto next;
 		}
 
-		atomic_inc(&dentry->d_count);
+		dentry->d_count++;
 		found = dentry;
 		spin_unlock(&dentry->d_lock);
 		break;
@@ -1473,8 +1958,8 @@
 	 * routine may choose to leave the hash value unchanged.
 	 */
 	name->hash = full_name_hash(name->name, name->len);
-	if (dir->d_op && dir->d_op->d_hash) {
-		if (dir->d_op->d_hash(dir, name) < 0)
+	if (dir->d_flags & DCACHE_OP_HASH) {
+		if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
 			goto out;
 	}
 	dentry = d_lookup(dir, name);
@@ -1483,34 +1968,32 @@
 }
 
 /**
- * d_validate - verify dentry provided from insecure source
+ * d_validate - verify dentry provided from insecure source (deprecated)
  * @dentry: The dentry alleged to be valid child of @dparent
- * @dparent: The parent dentry (known to be valid)
+ * @parent: The parent dentry (known to be valid)
  *
  * An insecure source has sent us a dentry, here we verify it and dget() it.
  * This is used by ncpfs in its readdir implementation.
  * Zero is returned in the dentry is invalid.
+ *
+ * This function is slow for big directories, and deprecated, do not use it.
  */
-int d_validate(struct dentry *dentry, struct dentry *parent)
+int d_validate(struct dentry *dentry, struct dentry *dparent)
 {
-	struct hlist_head *head = d_hash(parent, dentry->d_name.hash);
-	struct hlist_node *node;
-	struct dentry *d;
+	struct dentry *child;
 
-	/* Check whether the ptr might be valid at all.. */
-	if (!kmem_ptr_validate(dentry_cache, dentry))
-		return 0;
-	if (dentry->d_parent != parent)
-		return 0;
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(d, node, head, d_hash) {
-		if (d == dentry) {
-			dget(dentry);
+	spin_lock(&dparent->d_lock);
+	list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
+		if (dentry == child) {
+			spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+			__dget_dlock(dentry);
+			spin_unlock(&dentry->d_lock);
+			spin_unlock(&dparent->d_lock);
 			return 1;
 		}
 	}
-	rcu_read_unlock();
+	spin_unlock(&dparent->d_lock);
+
 	return 0;
 }
 EXPORT_SYMBOL(d_validate);
@@ -1538,16 +2021,23 @@
  
 void d_delete(struct dentry * dentry)
 {
+	struct inode *inode;
 	int isdir = 0;
 	/*
 	 * Are we the only user?
 	 */
-	spin_lock(&dcache_lock);
+again:
 	spin_lock(&dentry->d_lock);
-	isdir = S_ISDIR(dentry->d_inode->i_mode);
-	if (atomic_read(&dentry->d_count) == 1) {
+	inode = dentry->d_inode;
+	isdir = S_ISDIR(inode->i_mode);
+	if (dentry->d_count == 1) {
+		if (inode && !spin_trylock(&inode->i_lock)) {
+			spin_unlock(&dentry->d_lock);
+			cpu_relax();
+			goto again;
+		}
 		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-		dentry_iput(dentry);
+		dentry_unlink_inode(dentry);
 		fsnotify_nameremove(dentry, isdir);
 		return;
 	}
@@ -1556,17 +2046,18 @@
 		__d_drop(dentry);
 
 	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
 
 	fsnotify_nameremove(dentry, isdir);
 }
 EXPORT_SYMBOL(d_delete);
 
-static void __d_rehash(struct dentry * entry, struct hlist_head *list)
+static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
 {
-
+	BUG_ON(!d_unhashed(entry));
+	spin_lock_bucket(b);
  	entry->d_flags &= ~DCACHE_UNHASHED;
- 	hlist_add_head_rcu(&entry->d_hash, list);
+	hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
+	spin_unlock_bucket(b);
 }
 
 static void _d_rehash(struct dentry * entry)
@@ -1583,25 +2074,39 @@
  
 void d_rehash(struct dentry * entry)
 {
-	spin_lock(&dcache_lock);
 	spin_lock(&entry->d_lock);
 	_d_rehash(entry);
 	spin_unlock(&entry->d_lock);
-	spin_unlock(&dcache_lock);
 }
 EXPORT_SYMBOL(d_rehash);
 
-/*
- * When switching names, the actual string doesn't strictly have to
- * be preserved in the target - because we're dropping the target
- * anyway. As such, we can just do a simple memcpy() to copy over
- * the new name before we switch.
+/**
+ * dentry_update_name_case - update case insensitive dentry with a new name
+ * @dentry: dentry to be updated
+ * @name: new name
  *
- * Note that we have to be a lot more careful about getting the hash
- * switched - we have to switch the hash value properly even if it
- * then no longer matches the actual (corrupted) string of the target.
- * The hash value has to match the hash queue that the dentry is on..
+ * Update a case insensitive dentry with new case of name.
+ *
+ * dentry must have been returned by d_lookup with name @name. Old and new
+ * name lengths must match (ie. no d_compare which allows mismatched name
+ * lengths).
+ *
+ * Parent inode i_mutex must be held over d_lookup and into this call (to
+ * keep renames and concurrent inserts, and readdir(2) away).
  */
+void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
+{
+	BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+	BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
+
+	spin_lock(&dentry->d_lock);
+	write_seqcount_begin(&dentry->d_seq);
+	memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
+	write_seqcount_end(&dentry->d_seq);
+	spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(dentry_update_name_case);
+
 static void switch_names(struct dentry *dentry, struct dentry *target)
 {
 	if (dname_external(target)) {
@@ -1643,54 +2148,84 @@
 	swap(dentry->d_name.len, target->d_name.len);
 }
 
+static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
+{
+	/*
+	 * XXXX: do we really need to take target->d_lock?
+	 */
+	if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
+		spin_lock(&target->d_parent->d_lock);
+	else {
+		if (d_ancestor(dentry->d_parent, target->d_parent)) {
+			spin_lock(&dentry->d_parent->d_lock);
+			spin_lock_nested(&target->d_parent->d_lock,
+						DENTRY_D_LOCK_NESTED);
+		} else {
+			spin_lock(&target->d_parent->d_lock);
+			spin_lock_nested(&dentry->d_parent->d_lock,
+						DENTRY_D_LOCK_NESTED);
+		}
+	}
+	if (target < dentry) {
+		spin_lock_nested(&target->d_lock, 2);
+		spin_lock_nested(&dentry->d_lock, 3);
+	} else {
+		spin_lock_nested(&dentry->d_lock, 2);
+		spin_lock_nested(&target->d_lock, 3);
+	}
+}
+
+static void dentry_unlock_parents_for_move(struct dentry *dentry,
+					struct dentry *target)
+{
+	if (target->d_parent != dentry->d_parent)
+		spin_unlock(&dentry->d_parent->d_lock);
+	if (target->d_parent != target)
+		spin_unlock(&target->d_parent->d_lock);
+}
+
 /*
- * We cannibalize "target" when moving dentry on top of it,
- * because it's going to be thrown away anyway. We could be more
- * polite about it, though.
+ * When switching names, the actual string doesn't strictly have to
+ * be preserved in the target - because we're dropping the target
+ * anyway. As such, we can just do a simple memcpy() to copy over
+ * the new name before we switch.
  *
- * This forceful removal will result in ugly /proc output if
- * somebody holds a file open that got deleted due to a rename.
- * We could be nicer about the deleted file, and let it show
- * up under the name it had before it was deleted rather than
- * under the original name of the file that was moved on top of it.
+ * Note that we have to be a lot more careful about getting the hash
+ * switched - we have to switch the hash value properly even if it
+ * then no longer matches the actual (corrupted) string of the target.
+ * The hash value has to match the hash queue that the dentry is on..
  */
- 
 /*
- * d_move_locked - move a dentry
+ * d_move - move a dentry
  * @dentry: entry to move
  * @target: new dentry
  *
  * Update the dcache to reflect the move of a file name. Negative
  * dcache entries should not be moved in this way.
  */
-static void d_move_locked(struct dentry * dentry, struct dentry * target)
+void d_move(struct dentry * dentry, struct dentry * target)
 {
-	struct hlist_head *list;
-
 	if (!dentry->d_inode)
 		printk(KERN_WARNING "VFS: moving negative dcache entry\n");
 
+	BUG_ON(d_ancestor(dentry, target));
+	BUG_ON(d_ancestor(target, dentry));
+
 	write_seqlock(&rename_lock);
+
+	dentry_lock_for_move(dentry, target);
+
+	write_seqcount_begin(&dentry->d_seq);
+	write_seqcount_begin(&target->d_seq);
+
+	/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
+
 	/*
-	 * XXXX: do we really need to take target->d_lock?
+	 * Move the dentry to the target hash queue. Don't bother checking
+	 * for the same hash queue because of how unlikely it is.
 	 */
-	if (target < dentry) {
-		spin_lock(&target->d_lock);
-		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-	} else {
-		spin_lock(&dentry->d_lock);
-		spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED);
-	}
-
-	/* Move the dentry to the target hash queue, if on different bucket */
-	if (d_unhashed(dentry))
-		goto already_unhashed;
-
-	hlist_del_rcu(&dentry->d_hash);
-
-already_unhashed:
-	list = d_hash(target->d_parent, target->d_name.hash);
-	__d_rehash(dentry, list);
+	__d_drop(dentry);
+	__d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
 
 	/* Unhash the target: dput() will then get rid of it */
 	__d_drop(target);
@@ -1715,27 +2250,16 @@
 	}
 
 	list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
+
+	write_seqcount_end(&target->d_seq);
+	write_seqcount_end(&dentry->d_seq);
+
+	dentry_unlock_parents_for_move(dentry, target);
 	spin_unlock(&target->d_lock);
 	fsnotify_d_move(dentry);
 	spin_unlock(&dentry->d_lock);
 	write_sequnlock(&rename_lock);
 }
-
-/**
- * d_move - move a dentry
- * @dentry: entry to move
- * @target: new dentry
- *
- * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
- */
-
-void d_move(struct dentry * dentry, struct dentry * target)
-{
-	spin_lock(&dcache_lock);
-	d_move_locked(dentry, target);
-	spin_unlock(&dcache_lock);
-}
 EXPORT_SYMBOL(d_move);
 
 /**
@@ -1761,13 +2285,13 @@
  * This helper attempts to cope with remotely renamed directories
  *
  * It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the dcache_lock
+ * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
  *
  * Note: If ever the locking in lock_rename() changes, then please
  * remember to update this too...
  */
-static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
-	__releases(dcache_lock)
+static struct dentry *__d_unalias(struct inode *inode,
+		struct dentry *dentry, struct dentry *alias)
 {
 	struct mutex *m1 = NULL, *m2 = NULL;
 	struct dentry *ret;
@@ -1790,10 +2314,10 @@
 		goto out_err;
 	m2 = &alias->d_parent->d_inode->i_mutex;
 out_unalias:
-	d_move_locked(alias, dentry);
+	d_move(alias, dentry);
 	ret = alias;
 out_err:
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 	if (m2)
 		mutex_unlock(m2);
 	if (m1)
@@ -1804,17 +2328,23 @@
 /*
  * Prepare an anonymous dentry for life in the superblock's dentry tree as a
  * named dentry in place of the dentry to be replaced.
+ * returns with anon->d_lock held!
  */
 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
 {
 	struct dentry *dparent, *aparent;
 
-	switch_names(dentry, anon);
-	swap(dentry->d_name.hash, anon->d_name.hash);
+	dentry_lock_for_move(anon, dentry);
+
+	write_seqcount_begin(&dentry->d_seq);
+	write_seqcount_begin(&anon->d_seq);
 
 	dparent = dentry->d_parent;
 	aparent = anon->d_parent;
 
+	switch_names(dentry, anon);
+	swap(dentry->d_name.hash, anon->d_name.hash);
+
 	dentry->d_parent = (aparent == anon) ? dentry : aparent;
 	list_del(&dentry->d_u.d_child);
 	if (!IS_ROOT(dentry))
@@ -1829,6 +2359,13 @@
 	else
 		INIT_LIST_HEAD(&anon->d_u.d_child);
 
+	write_seqcount_end(&dentry->d_seq);
+	write_seqcount_end(&anon->d_seq);
+
+	dentry_unlock_parents_for_move(anon, dentry);
+	spin_unlock(&dentry->d_lock);
+
+	/* anon->d_lock still locked, returns locked */
 	anon->d_flags &= ~DCACHE_DISCONNECTED;
 }
 
@@ -1846,14 +2383,15 @@
 
 	BUG_ON(!d_unhashed(dentry));
 
-	spin_lock(&dcache_lock);
-
 	if (!inode) {
 		actual = dentry;
 		__d_instantiate(dentry, NULL);
-		goto found_lock;
+		d_rehash(actual);
+		goto out_nolock;
 	}
 
+	spin_lock(&inode->i_lock);
+
 	if (S_ISDIR(inode->i_mode)) {
 		struct dentry *alias;
 
@@ -1864,13 +2402,12 @@
 			/* Is this an anonymous mountpoint that we could splice
 			 * into our tree? */
 			if (IS_ROOT(alias)) {
-				spin_lock(&alias->d_lock);
 				__d_materialise_dentry(dentry, alias);
 				__d_drop(alias);
 				goto found;
 			}
 			/* Nope, but we must(!) avoid directory aliasing */
-			actual = __d_unalias(dentry, alias);
+			actual = __d_unalias(inode, dentry, alias);
 			if (IS_ERR(actual))
 				dput(alias);
 			goto out_nolock;
@@ -1881,15 +2418,14 @@
 	actual = __d_instantiate_unique(dentry, inode);
 	if (!actual)
 		actual = dentry;
-	else if (unlikely(!d_unhashed(actual)))
-		goto shouldnt_be_hashed;
+	else
+		BUG_ON(!d_unhashed(actual));
 
-found_lock:
 	spin_lock(&actual->d_lock);
 found:
 	_d_rehash(actual);
 	spin_unlock(&actual->d_lock);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 out_nolock:
 	if (actual == dentry) {
 		security_d_instantiate(dentry, inode);
@@ -1898,10 +2434,6 @@
 
 	iput(inode);
 	return actual;
-
-shouldnt_be_hashed:
-	spin_unlock(&dcache_lock);
-	BUG();
 }
 EXPORT_SYMBOL_GPL(d_materialise_unique);
 
@@ -1921,14 +2453,13 @@
 }
 
 /**
- * Prepend path string to a buffer
- *
+ * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
  * @root: root vfsmnt/dentry (may be modified by this function)
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
- * Caller holds the dcache_lock.
+ * Caller holds the rename_lock.
  *
  * If path is not reachable from the supplied root, then the value of
  * root is changed (without modifying refcounts).
@@ -1956,7 +2487,9 @@
 		}
 		parent = dentry->d_parent;
 		prefetch(parent);
+		spin_lock(&dentry->d_lock);
 		error = prepend_name(buffer, buflen, &dentry->d_name);
+		spin_unlock(&dentry->d_lock);
 		if (!error)
 			error = prepend(buffer, buflen, "/", 1);
 		if (error)
@@ -2012,9 +2545,9 @@
 	int error;
 
 	prepend(&res, &buflen, "\0", 1);
-	spin_lock(&dcache_lock);
+	write_seqlock(&rename_lock);
 	error = prepend_path(path, root, &res, &buflen);
-	spin_unlock(&dcache_lock);
+	write_sequnlock(&rename_lock);
 
 	if (error)
 		return ERR_PTR(error);
@@ -2076,12 +2609,12 @@
 		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
 
 	get_fs_root(current->fs, &root);
-	spin_lock(&dcache_lock);
+	write_seqlock(&rename_lock);
 	tmp = root;
 	error = path_with_deleted(path, &tmp, &res, &buflen);
 	if (error)
 		res = ERR_PTR(error);
-	spin_unlock(&dcache_lock);
+	write_sequnlock(&rename_lock);
 	path_put(&root);
 	return res;
 }
@@ -2107,12 +2640,12 @@
 		return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
 
 	get_fs_root(current->fs, &root);
-	spin_lock(&dcache_lock);
+	write_seqlock(&rename_lock);
 	tmp = root;
 	error = path_with_deleted(path, &tmp, &res, &buflen);
 	if (!error && !path_equal(&tmp, &root))
 		error = prepend_unreachable(&res, &buflen);
-	spin_unlock(&dcache_lock);
+	write_sequnlock(&rename_lock);
 	path_put(&root);
 	if (error)
 		res =  ERR_PTR(error);
@@ -2144,7 +2677,7 @@
 /*
  * Write full pathname from the root of the filesystem into the buffer.
  */
-char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
+static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
 {
 	char *end = buf + buflen;
 	char *retval;
@@ -2158,10 +2691,13 @@
 
 	while (!IS_ROOT(dentry)) {
 		struct dentry *parent = dentry->d_parent;
+		int error;
 
 		prefetch(parent);
-		if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
-		    (prepend(&end, &buflen, "/", 1) != 0))
+		spin_lock(&dentry->d_lock);
+		error = prepend_name(&end, &buflen, &dentry->d_name);
+		spin_unlock(&dentry->d_lock);
+		if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
 			goto Elong;
 
 		retval = end;
@@ -2171,14 +2707,25 @@
 Elong:
 	return ERR_PTR(-ENAMETOOLONG);
 }
-EXPORT_SYMBOL(__dentry_path);
+
+char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
+{
+	char *retval;
+
+	write_seqlock(&rename_lock);
+	retval = __dentry_path(dentry, buf, buflen);
+	write_sequnlock(&rename_lock);
+
+	return retval;
+}
+EXPORT_SYMBOL(dentry_path_raw);
 
 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
 {
 	char *p = NULL;
 	char *retval;
 
-	spin_lock(&dcache_lock);
+	write_seqlock(&rename_lock);
 	if (d_unlinked(dentry)) {
 		p = buf + buflen;
 		if (prepend(&p, &buflen, "//deleted", 10) != 0)
@@ -2186,12 +2733,11 @@
 		buflen++;
 	}
 	retval = __dentry_path(dentry, buf, buflen);
-	spin_unlock(&dcache_lock);
+	write_sequnlock(&rename_lock);
 	if (!IS_ERR(retval) && p)
 		*p = '/';	/* restore '/' overriden with '\0' */
 	return retval;
 Elong:
-	spin_unlock(&dcache_lock);
 	return ERR_PTR(-ENAMETOOLONG);
 }
 
@@ -2225,7 +2771,7 @@
 	get_fs_root_and_pwd(current->fs, &root, &pwd);
 
 	error = -ENOENT;
-	spin_lock(&dcache_lock);
+	write_seqlock(&rename_lock);
 	if (!d_unlinked(pwd.dentry)) {
 		unsigned long len;
 		struct path tmp = root;
@@ -2234,7 +2780,7 @@
 
 		prepend(&cwd, &buflen, "\0", 1);
 		error = prepend_path(&pwd, &tmp, &cwd, &buflen);
-		spin_unlock(&dcache_lock);
+		write_sequnlock(&rename_lock);
 
 		if (error)
 			goto out;
@@ -2253,8 +2799,9 @@
 			if (copy_to_user(buf, cwd, len))
 				error = -EFAULT;
 		}
-	} else
-		spin_unlock(&dcache_lock);
+	} else {
+		write_sequnlock(&rename_lock);
+	}
 
 out:
 	path_put(&pwd);
@@ -2282,25 +2829,25 @@
 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
 {
 	int result;
-	unsigned long seq;
+	unsigned seq;
 
 	if (new_dentry == old_dentry)
 		return 1;
 
-	/*
-	 * Need rcu_readlock to protect against the d_parent trashing
-	 * due to d_move
-	 */
-	rcu_read_lock();
 	do {
 		/* for restarting inner loop in case of seq retry */
 		seq = read_seqbegin(&rename_lock);
+		/*
+		 * Need rcu_readlock to protect against the d_parent trashing
+		 * due to d_move
+		 */
+		rcu_read_lock();
 		if (d_ancestor(old_dentry, new_dentry))
 			result = 1;
 		else
 			result = 0;
+		rcu_read_unlock();
 	} while (read_seqretry(&rename_lock, seq));
-	rcu_read_unlock();
 
 	return result;
 }
@@ -2332,10 +2879,15 @@
 
 void d_genocide(struct dentry *root)
 {
-	struct dentry *this_parent = root;
+	struct dentry *this_parent;
 	struct list_head *next;
+	unsigned seq;
+	int locked = 0;
 
-	spin_lock(&dcache_lock);
+	seq = read_seqbegin(&rename_lock);
+again:
+	this_parent = root;
+	spin_lock(&this_parent->d_lock);
 repeat:
 	next = this_parent->d_subdirs.next;
 resume:
@@ -2343,21 +2895,62 @@
 		struct list_head *tmp = next;
 		struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
 		next = tmp->next;
-		if (d_unhashed(dentry)||!dentry->d_inode)
+
+		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+		if (d_unhashed(dentry) || !dentry->d_inode) {
+			spin_unlock(&dentry->d_lock);
 			continue;
+		}
 		if (!list_empty(&dentry->d_subdirs)) {
+			spin_unlock(&this_parent->d_lock);
+			spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
 			this_parent = dentry;
+			spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
 			goto repeat;
 		}
-		atomic_dec(&dentry->d_count);
+		if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
+			dentry->d_flags |= DCACHE_GENOCIDE;
+			dentry->d_count--;
+		}
+		spin_unlock(&dentry->d_lock);
 	}
 	if (this_parent != root) {
-		next = this_parent->d_u.d_child.next;
-		atomic_dec(&this_parent->d_count);
-		this_parent = this_parent->d_parent;
+		struct dentry *tmp;
+		struct dentry *child;
+
+		tmp = this_parent->d_parent;
+		if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
+			this_parent->d_flags |= DCACHE_GENOCIDE;
+			this_parent->d_count--;
+		}
+		rcu_read_lock();
+		spin_unlock(&this_parent->d_lock);
+		child = this_parent;
+		this_parent = tmp;
+		spin_lock(&this_parent->d_lock);
+		/* might go back up the wrong parent if we have had a rename
+		 * or deletion */
+		if (this_parent != child->d_parent ||
+			 (!locked && read_seqretry(&rename_lock, seq))) {
+			spin_unlock(&this_parent->d_lock);
+			rcu_read_unlock();
+			goto rename_retry;
+		}
+		rcu_read_unlock();
+		next = child->d_u.d_child.next;
 		goto resume;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&this_parent->d_lock);
+	if (!locked && read_seqretry(&rename_lock, seq))
+		goto rename_retry;
+	if (locked)
+		write_sequnlock(&rename_lock);
+	return;
+
+rename_retry:
+	locked = 1;
+	write_seqlock(&rename_lock);
+	goto again;
 }
 
 /**
@@ -2411,7 +3004,7 @@
 
 	dentry_hashtable =
 		alloc_large_system_hash("Dentry cache",
-					sizeof(struct hlist_head),
+					sizeof(struct dcache_hash_bucket),
 					dhash_entries,
 					13,
 					HASH_EARLY,
@@ -2420,16 +3013,13 @@
 					0);
 
 	for (loop = 0; loop < (1 << d_hash_shift); loop++)
-		INIT_HLIST_HEAD(&dentry_hashtable[loop]);
+		INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
 }
 
 static void __init dcache_init(void)
 {
 	int loop;
 
-	percpu_counter_init(&nr_dentry, 0);
-	percpu_counter_init(&nr_dentry_unused, 0);
-
 	/* 
 	 * A constructor could be added for stable state like the lists,
 	 * but it is probably not worth it because of the cache nature
@@ -2446,7 +3036,7 @@
 
 	dentry_hashtable =
 		alloc_large_system_hash("Dentry cache",
-					sizeof(struct hlist_head),
+					sizeof(struct dcache_hash_bucket),
 					dhash_entries,
 					13,
 					0,
@@ -2455,7 +3045,7 @@
 					0);
 
 	for (loop = 0; loop < (1 << d_hash_shift); loop++)
-		INIT_HLIST_HEAD(&dentry_hashtable[loop]);
+		INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
 }
 
 /* SLAB cache for __getname() consumers */
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 37a34c2..9c64ae9 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -63,6 +63,9 @@
 #define NEEDED_RMEM (4*1024*1024)
 #define CONN_HASH_SIZE 32
 
+/* Number of messages to send before rescheduling */
+#define MAX_SEND_MSG_COUNT 25
+
 struct cbuf {
 	unsigned int base;
 	unsigned int len;
@@ -108,6 +111,7 @@
 #define CF_INIT_PENDING 4
 #define CF_IS_OTHERCON 5
 #define CF_CLOSE 6
+#define CF_APP_LIMITED 7
 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
 	spinlock_t writequeue_lock;
 	int (*rx_action) (struct connection *);	/* What to do when active */
@@ -295,7 +299,17 @@
 {
 	struct connection *con = sock2con(sk);
 
-	if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+	if (!con)
+		return;
+
+	clear_bit(SOCK_NOSPACE, &con->sock->flags);
+
+	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
+		con->sock->sk->sk_write_pending--;
+		clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags);
+	}
+
+	if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
 		queue_work(send_workqueue, &con->swork);
 }
 
@@ -915,6 +929,7 @@
 	struct sockaddr_storage saddr, src_addr;
 	int addr_len;
 	struct socket *sock = NULL;
+	int one = 1;
 
 	if (con->nodeid == 0) {
 		log_print("attempt to connect sock 0 foiled");
@@ -960,6 +975,11 @@
 	make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
 
 	log_print("connecting to %d", con->nodeid);
+
+	/* Turn off Nagle's algorithm */
+	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
+			  sizeof(one));
+
 	result =
 		sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
 				   O_NONBLOCK);
@@ -1011,6 +1031,10 @@
 		goto create_out;
 	}
 
+	/* Turn off Nagle's algorithm */
+	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
+			  sizeof(one));
+
 	result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
 				   (char *)&one, sizeof(one));
 
@@ -1297,6 +1321,7 @@
 	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
 	struct writequeue_entry *e;
 	int len, offset;
+	int count = 0;
 
 	mutex_lock(&con->sock_mutex);
 	if (con->sock == NULL)
@@ -1319,14 +1344,27 @@
 			ret = kernel_sendpage(con->sock, e->page, offset, len,
 					      msg_flags);
 			if (ret == -EAGAIN || ret == 0) {
+				if (ret == -EAGAIN &&
+				    test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) &&
+				    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
+					/* Notify TCP that we're limited by the
+					 * application window size.
+					 */
+					set_bit(SOCK_NOSPACE, &con->sock->flags);
+					con->sock->sk->sk_write_pending++;
+				}
 				cond_resched();
 				goto out;
 			}
 			if (ret <= 0)
 				goto send_error;
 		}
-			/* Don't starve people filling buffers */
+
+		/* Don't starve people filling buffers */
+		if (++count >= MAX_SEND_MSG_COUNT) {
 			cond_resched();
+			count = 0;
+		}
 
 		spin_lock(&con->writequeue_lock);
 		e->offset += ret;
@@ -1430,20 +1468,19 @@
 
 static int work_start(void)
 {
-	int error;
-	recv_workqueue = create_workqueue("dlm_recv");
-	error = IS_ERR(recv_workqueue);
-	if (error) {
-		log_print("can't start dlm_recv %d", error);
-		return error;
+	recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM |
+					 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+	if (!recv_workqueue) {
+		log_print("can't start dlm_recv");
+		return -ENOMEM;
 	}
 
-	send_workqueue = create_singlethread_workqueue("dlm_send");
-	error = IS_ERR(send_workqueue);
-	if (error) {
-		log_print("can't start dlm_send %d", error);
+	send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM |
+					 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+	if (!send_workqueue) {
+		log_print("can't start dlm_send");
 		destroy_workqueue(recv_workqueue);
-		return error;
+		return -ENOMEM;
 	}
 
 	return 0;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 906e803..6fc4f31 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -44,12 +44,17 @@
  */
 static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
-	struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+	struct dentry *lower_dentry;
+	struct vfsmount *lower_mnt;
 	struct dentry *dentry_save;
 	struct vfsmount *vfsmount_save;
 	int rc = 1;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	lower_dentry = ecryptfs_dentry_to_lower(dentry);
+	lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
 	if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
 		goto out;
 	dentry_save = nd->path.dentry;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 9d1a22d..64ff023 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -260,7 +260,7 @@
 				   ecryptfs_dentry->d_parent));
 	lower_inode = lower_dentry->d_inode;
 	fsstack_copy_attr_atime(ecryptfs_dir_inode, lower_dir_dentry->d_inode);
-	BUG_ON(!atomic_read(&lower_dentry->d_count));
+	BUG_ON(!lower_dentry->d_count);
 	ecryptfs_set_dentry_private(ecryptfs_dentry,
 				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
 						     GFP_KERNEL));
@@ -441,7 +441,6 @@
 	struct qstr lower_name;
 	int rc = 0;
 
-	ecryptfs_dentry->d_op = &ecryptfs_dops;
 	if ((ecryptfs_dentry->d_name.len == 1
 	     && !strcmp(ecryptfs_dentry->d_name.name, "."))
 	    || (ecryptfs_dentry->d_name.len == 2
@@ -454,7 +453,7 @@
 	lower_name.hash = ecryptfs_dentry->d_name.hash;
 	if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
 		rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
-						    &lower_name);
+				lower_dir_dentry->d_inode, &lower_name);
 		if (rc < 0)
 			goto out_d_drop;
 	}
@@ -489,7 +488,7 @@
 	lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
 	if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
 		rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
-						    &lower_name);
+				lower_dir_dentry->d_inode, &lower_name);
 		if (rc < 0)
 			goto out_d_drop;
 	}
@@ -980,8 +979,10 @@
 }
 
 static int
-ecryptfs_permission(struct inode *inode, int mask)
+ecryptfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
 	return inode_permission(ecryptfs_inode_to_lower(inode), mask);
 }
 
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index a9dbd62..d3b28ab 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -141,25 +141,12 @@
 	return rc;
 }
 
-/**
- * ecryptfs_interpose
- * @lower_dentry: Existing dentry in the lower filesystem
- * @dentry: ecryptfs' dentry
- * @sb: ecryptfs's super_block
- * @flags: flags to govern behavior of interpose procedure
- *
- * Interposes upper and lower dentries.
- *
- * Returns zero on success; non-zero otherwise
- */
-int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
-		       struct super_block *sb, u32 flags)
+static struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+		       struct super_block *sb)
 {
-	struct inode *lower_inode;
 	struct inode *inode;
 	int rc = 0;
 
-	lower_inode = lower_dentry->d_inode;
 	if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb)) {
 		rc = -EXDEV;
 		goto out;
@@ -189,17 +176,38 @@
 	if (special_file(lower_inode->i_mode))
 		init_special_inode(inode, lower_inode->i_mode,
 				   lower_inode->i_rdev);
-	dentry->d_op = &ecryptfs_dops;
 	fsstack_copy_attr_all(inode, lower_inode);
 	/* This size will be overwritten for real files w/ headers and
 	 * other metadata */
 	fsstack_copy_inode_size(inode, lower_inode);
+	return inode;
+out:
+	return ERR_PTR(rc);
+}
+
+/**
+ * ecryptfs_interpose
+ * @lower_dentry: Existing dentry in the lower filesystem
+ * @dentry: ecryptfs' dentry
+ * @sb: ecryptfs's super_block
+ * @flags: flags to govern behavior of interpose procedure
+ *
+ * Interposes upper and lower dentries.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
+		       struct super_block *sb, u32 flags)
+{
+	struct inode *lower_inode = lower_dentry->d_inode;
+	struct inode *inode = ecryptfs_get_inode(lower_inode, sb);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
 	if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD)
 		d_add(dentry, inode);
 	else
 		d_instantiate(dentry, inode);
-out:
-	return rc;
+	return 0;
 }
 
 enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
@@ -492,59 +500,11 @@
 static struct file_system_type ecryptfs_fs_type;
 
 /**
- * ecryptfs_read_super
- * @sb: The ecryptfs super block
- * @dev_name: The path to mount over
- *
- * Read the super block of the lower filesystem, and use
- * ecryptfs_interpose to create our initial inode and super block
- * struct.
- */
-static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
-{
-	struct path path;
-	int rc;
-
-	rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
-	if (rc) {
-		ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
-		goto out;
-	}
-	if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
-		rc = -EINVAL;
-		printk(KERN_ERR "Mount on filesystem of type "
-			"eCryptfs explicitly disallowed due to "
-			"known incompatibilities\n");
-		goto out_free;
-	}
-	ecryptfs_set_superblock_lower(sb, path.dentry->d_sb);
-	sb->s_maxbytes = path.dentry->d_sb->s_maxbytes;
-	sb->s_blocksize = path.dentry->d_sb->s_blocksize;
-	ecryptfs_set_dentry_lower(sb->s_root, path.dentry);
-	ecryptfs_set_dentry_lower_mnt(sb->s_root, path.mnt);
-	rc = ecryptfs_interpose(path.dentry, sb->s_root, sb, 0);
-	if (rc)
-		goto out_free;
-	rc = 0;
-	goto out;
-out_free:
-	path_put(&path);
-out:
-	return rc;
-}
-
-/**
  * ecryptfs_get_sb
  * @fs_type
  * @flags
  * @dev_name: The path to mount over
  * @raw_data: The options passed into the kernel
- *
- * The whole ecryptfs_get_sb process is broken into 3 functions:
- * ecryptfs_parse_options(): handle options passed to ecryptfs, if any
- * ecryptfs_read_super(): this accesses the lower filesystem and uses
- *                        ecryptfs_interpose to perform most of the linking
- * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c)
  */
 static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
 			const char *dev_name, void *raw_data)
@@ -553,6 +513,8 @@
 	struct ecryptfs_sb_info *sbi;
 	struct ecryptfs_dentry_info *root_info;
 	const char *err = "Getting sb failed";
+	struct inode *inode;
+	struct path path;
 	int rc;
 
 	sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
@@ -575,10 +537,8 @@
 
 	s->s_flags = flags;
 	rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
-	if (rc) {
-		deactivate_locked_super(s);
-		goto out;
-	}
+	if (rc)
+		goto out1;
 
 	ecryptfs_set_superblock_private(s, sbi);
 	s->s_bdi = &sbi->bdi;
@@ -586,34 +546,54 @@
 	/* ->kill_sb() will take care of sbi after that point */
 	sbi = NULL;
 	s->s_op = &ecryptfs_sops;
+	s->s_d_op = &ecryptfs_dops;
+
+	err = "Reading sb failed";
+	rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+	if (rc) {
+		ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
+		goto out1;
+	}
+	if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
+		rc = -EINVAL;
+		printk(KERN_ERR "Mount on filesystem of type "
+			"eCryptfs explicitly disallowed due to "
+			"known incompatibilities\n");
+		goto out_free;
+	}
+	ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
+	s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+	s->s_blocksize = path.dentry->d_sb->s_blocksize;
+
+	inode = ecryptfs_get_inode(path.dentry->d_inode, s);
+	rc = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_free;
+
+	s->s_root = d_alloc_root(inode);
+	if (!s->s_root) {
+		iput(inode);
+		rc = -ENOMEM;
+		goto out_free;
+	}
 
 	rc = -ENOMEM;
-	s->s_root = d_alloc(NULL, &(const struct qstr) {
-			     .hash = 0,.name = "/",.len = 1});
-	if (!s->s_root) {
-		deactivate_locked_super(s);
-		goto out;
-	}
-	s->s_root->d_op = &ecryptfs_dops;
-	s->s_root->d_sb = s;
-	s->s_root->d_parent = s->s_root;
-
 	root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
-	if (!root_info) {
-		deactivate_locked_super(s);
-		goto out;
-	}
+	if (!root_info)
+		goto out_free;
+
 	/* ->kill_sb() will take care of root_info */
 	ecryptfs_set_dentry_private(s->s_root, root_info);
+	ecryptfs_set_dentry_lower(s->s_root, path.dentry);
+	ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
+
 	s->s_flags |= MS_ACTIVE;
-	rc = ecryptfs_read_super(s, dev_name);
-	if (rc) {
-		deactivate_locked_super(s);
-		err = "Reading sb failed";
-		goto out;
-	}
 	return dget(s->s_root);
 
+out_free:
+	path_put(&path);
+out1:
+	deactivate_locked_super(s);
 out:
 	if (sbi) {
 		ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 2720178..3042fe1 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -62,6 +62,16 @@
 	return inode;
 }
 
+static void ecryptfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	struct ecryptfs_inode_info *inode_info;
+	inode_info = ecryptfs_inode_to_private(inode);
+
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
+}
+
 /**
  * ecryptfs_destroy_inode
  * @inode: The ecryptfs inode
@@ -88,7 +98,7 @@
 		}
 	}
 	ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
-	kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
+	call_rcu(&inode->i_rcu, ecryptfs_i_callback);
 }
 
 /**
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 5073a07..0f31acb 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -65,9 +65,16 @@
 	return &ei->vfs_inode;
 }
 
+static void efs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
+}
+
 static void efs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
+	call_rcu(&inode->i_rcu, efs_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 8cf0724..cc8a9b7 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -217,7 +217,7 @@
  * Configuration options available inside /proc/sys/fs/epoll/
  */
 /* Maximum number of epoll watched descriptors, per user */
-static int max_user_watches __read_mostly;
+static long max_user_watches __read_mostly;
 
 /*
  * This mutex is used to serialize ep_free() and eventpoll_release_file().
@@ -240,16 +240,18 @@
 
 #include <linux/sysctl.h>
 
-static int zero;
+static long zero;
+static long long_max = LONG_MAX;
 
 ctl_table epoll_table[] = {
 	{
 		.procname	= "max_user_watches",
 		.data		= &max_user_watches,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(max_user_watches),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &zero,
+		.extra2		= &long_max,
 	},
 	{ }
 };
@@ -561,7 +563,7 @@
 	/* At this point it is safe to free the eventpoll item */
 	kmem_cache_free(epi_cache, epi);
 
-	atomic_dec(&ep->user->epoll_watches);
+	atomic_long_dec(&ep->user->epoll_watches);
 
 	return 0;
 }
@@ -898,11 +900,12 @@
 {
 	int error, revents, pwake = 0;
 	unsigned long flags;
+	long user_watches;
 	struct epitem *epi;
 	struct ep_pqueue epq;
 
-	if (unlikely(atomic_read(&ep->user->epoll_watches) >=
-		     max_user_watches))
+	user_watches = atomic_long_read(&ep->user->epoll_watches);
+	if (unlikely(user_watches >= max_user_watches))
 		return -ENOSPC;
 	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
 		return -ENOMEM;
@@ -966,7 +969,7 @@
 
 	spin_unlock_irqrestore(&ep->lock, flags);
 
-	atomic_inc(&ep->user->epoll_watches);
+	atomic_long_inc(&ep->user->epoll_watches);
 
 	/* We have to call this outside the lock */
 	if (pwake)
@@ -1426,6 +1429,7 @@
 	 */
 	max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
 		EP_ITEM_COST;
+	BUG_ON(max_user_watches < 0);
 
 	/* Initialize the structure used to perform safe poll wait head wake ups */
 	ep_nested_calls_init(&poll_safewake_ncalls);
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 79c3ae6..8c6c466 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -150,12 +150,19 @@
 	return &oi->vfs_inode;
 }
 
+static void exofs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(exofs_inode_cachep, exofs_i(inode));
+}
+
 /*
  * Remove an inode from the cache
  */
 static void exofs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(exofs_inode_cachep, exofs_i(inode));
+	call_rcu(&inode->i_rcu, exofs_i_callback);
 }
 
 /*
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 51b3040..4b68257 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -43,24 +43,26 @@
 		void *context)
 {
 	struct dentry *dentry, *toput = NULL;
+	struct inode *inode;
 
 	if (acceptable(context, result))
 		return result;
 
-	spin_lock(&dcache_lock);
-	list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) {
-		dget_locked(dentry);
-		spin_unlock(&dcache_lock);
+	inode = result->d_inode;
+	spin_lock(&inode->i_lock);
+	list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+		dget(dentry);
+		spin_unlock(&inode->i_lock);
 		if (toput)
 			dput(toput);
 		if (dentry != result && acceptable(context, dentry)) {
 			dput(result);
 			return dentry;
 		}
-		spin_lock(&dcache_lock);
+		spin_lock(&inode->i_lock);
 		toput = dentry;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 
 	if (toput)
 		dput(toput);
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 2bcc043..7b41805 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -232,10 +232,17 @@
 }
 
 int
-ext2_check_acl(struct inode *inode, int mask)
+ext2_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct posix_acl *acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
+	struct posix_acl *acl;
 
+	if (flags & IPERM_FLAG_RCU) {
+		if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
+			return -ECHILD;
+		return -EAGAIN;
+	}
+
+	acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (acl) {
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index 3ff6cbb..c939b7b 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -54,7 +54,7 @@
 #ifdef CONFIG_EXT2_FS_POSIX_ACL
 
 /* acl.c */
-extern int ext2_check_acl (struct inode *, int);
+extern int ext2_check_acl (struct inode *, int, unsigned int);
 extern int ext2_acl_chmod (struct inode *);
 extern int ext2_init_acl (struct inode *, struct inode *);
 
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 2709b34..47cda41 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -28,21 +28,30 @@
 
 typedef struct ext2_dir_entry_2 ext2_dirent;
 
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
 static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
 {
 	unsigned len = le16_to_cpu(dlen);
 
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == EXT2_MAX_REC_LEN)
 		return 1 << 16;
+#endif
 	return len;
 }
 
 static inline __le16 ext2_rec_len_to_disk(unsigned len)
 {
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(EXT2_MAX_REC_LEN);
 	else
 		BUG_ON(len > (1 << 16));
+#endif
 	return cpu_to_le16(len);
 }
 
@@ -129,15 +138,15 @@
 		p = (ext2_dirent *)(kaddr + offs);
 		rec_len = ext2_rec_len_from_disk(p->rec_len);
 
-		if (rec_len < EXT2_DIR_REC_LEN(1))
+		if (unlikely(rec_len < EXT2_DIR_REC_LEN(1)))
 			goto Eshort;
-		if (rec_len & 3)
+		if (unlikely(rec_len & 3))
 			goto Ealign;
-		if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
+		if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
 			goto Enamelen;
-		if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+		if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)))
 			goto Espan;
-		if (le32_to_cpu(p->inode) > max_inumber)
+		if (unlikely(le32_to_cpu(p->inode) > max_inumber))
 			goto Einumber;
 	}
 	if (offs != limit)
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index f8aecd2..2e1d834 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -67,7 +67,7 @@
 	inode = NULL;
 	if (ino) {
 		inode = ext2_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				ext2_error(dir->i_sb, __func__,
 						"deleted inode referenced: %lu",
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index d89e0b6..7731695 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -43,9 +43,10 @@
 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
 static int ext2_sync_fs(struct super_block *sb, int wait);
 
-void ext2_error (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext2_error(struct super_block *sb, const char *function,
+		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 	struct ext2_sb_info *sbi = EXT2_SB(sb);
 	struct ext2_super_block *es = sbi->s_es;
@@ -59,9 +60,13 @@
 	}
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_PANIC))
@@ -76,12 +81,16 @@
 void ext2_msg(struct super_block *sb, const char *prefix,
 		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT2-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
 	va_end(args);
 }
 
@@ -161,9 +170,16 @@
 	return &ei->vfs_inode;
 }
 
+static void ext2_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
+}
+
 static void ext2_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
+	call_rcu(&inode->i_rcu, ext2_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index f84700b..c2e4dce 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -199,14 +199,6 @@
 			goto found;
 		entry = next;
 	}
-	/* Check the remaining name entries */
-	while (!IS_LAST_ENTRY(entry)) {
-		struct ext2_xattr_entry *next =
-			EXT2_XATTR_NEXT(entry);
-		if ((char *)next >= end)
-			goto bad_block;
-		entry = next;
-	}
 	if (ext2_xattr_cache_insert(bh))
 		ea_idebug(inode, "cache insert failed");
 	error = -ENODATA;
@@ -355,7 +347,7 @@
 /*
  * ext2_xattr_set()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 8a11fe2..e4fa49e 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -240,10 +240,17 @@
 }
 
 int
-ext3_check_acl(struct inode *inode, int mask)
+ext3_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
+	struct posix_acl *acl;
 
+	if (flags & IPERM_FLAG_RCU) {
+		if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
+			return -ECHILD;
+		return -EAGAIN;
+	}
+
+	acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (acl) {
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
index 5973346..5faf804 100644
--- a/fs/ext3/acl.h
+++ b/fs/ext3/acl.h
@@ -54,7 +54,7 @@
 #ifdef CONFIG_EXT3_FS_POSIX_ACL
 
 /* acl.c */
-extern int ext3_check_acl (struct inode *, int);
+extern int ext3_check_acl (struct inode *, int, unsigned int);
 extern int ext3_acl_chmod (struct inode *);
 extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
 
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b3db226..045995c 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -20,6 +20,7 @@
 #include <linux/ext3_jbd.h>
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
+#include <linux/blkdev.h>
 
 /*
  * balloc.c contains the blocks allocation and deallocation routines
@@ -39,6 +40,21 @@
 
 #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
 
+/*
+ * Calculate the block group number and offset, given a block number
+ */
+static void ext3_get_group_no_and_offset(struct super_block *sb,
+	ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
+{
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+
+	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
+	if (offsetp)
+		*offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
+	if (blockgrpp)
+		*blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
+}
+
 /**
  * ext3_get_group_desc() -- load group descriptor from disk
  * @sb:			super block
@@ -1885,3 +1901,253 @@
 	return ext3_bg_num_gdb_meta(sb,group);
 
 }
+
+/**
+ * ext3_trim_all_free -- function to trim all free space in alloc. group
+ * @sb:			super block for file system
+ * @group:		allocation group to trim
+ * @start:		first group block to examine
+ * @max:		last group block to examine
+ * @gdp:		allocation group description structure
+ * @minblocks:		minimum extent block count
+ *
+ * ext3_trim_all_free walks through group's block bitmap searching for free
+ * blocks. When the free block is found, it tries to allocate this block and
+ * consequent free block to get the biggest free extent possible, until it
+ * reaches any used block. Then issue a TRIM command on this extent and free
+ * the extent in the block bitmap. This is done until whole group is scanned.
+ */
+ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group,
+				ext3_grpblk_t start, ext3_grpblk_t max,
+				ext3_grpblk_t minblocks)
+{
+	handle_t *handle;
+	ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
+	ext3_fsblk_t discard_block;
+	struct ext3_sb_info *sbi;
+	struct buffer_head *gdp_bh, *bitmap_bh = NULL;
+	struct ext3_group_desc *gdp;
+	int err = 0, ret = 0;
+
+	/*
+	 * We will update one block bitmap, and one group descriptor
+	 */
+	handle = ext3_journal_start_sb(sb, 2);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	bitmap_bh = read_block_bitmap(sb, group);
+	if (!bitmap_bh) {
+		err = -EIO;
+		goto err_out;
+	}
+
+	BUFFER_TRACE(bitmap_bh, "getting undo access");
+	err = ext3_journal_get_undo_access(handle, bitmap_bh);
+	if (err)
+		goto err_out;
+
+	gdp = ext3_get_group_desc(sb, group, &gdp_bh);
+	if (!gdp) {
+		err = -EIO;
+		goto err_out;
+	}
+
+	BUFFER_TRACE(gdp_bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, gdp_bh);
+	if (err)
+		goto err_out;
+
+	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+	sbi = EXT3_SB(sb);
+
+	 /* Walk through the whole group */
+	while (start < max) {
+		start = bitmap_search_next_usable_block(start, bitmap_bh, max);
+		if (start < 0)
+			break;
+		next = start;
+
+		/*
+		 * Allocate contiguous free extents by setting bits in the
+		 * block bitmap
+		 */
+		while (next < max
+			&& claim_block(sb_bgl_lock(sbi, group),
+					next, bitmap_bh)) {
+			next++;
+		}
+
+		 /* We did not claim any blocks */
+		if (next == start)
+			continue;
+
+		discard_block = (ext3_fsblk_t)start +
+				ext3_group_first_block_no(sb, group);
+
+		/* Update counters */
+		spin_lock(sb_bgl_lock(sbi, group));
+		le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
+		spin_unlock(sb_bgl_lock(sbi, group));
+		percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
+
+		/* Do not issue a TRIM on extents smaller than minblocks */
+		if ((next - start) < minblocks)
+			goto free_extent;
+
+		 /* Send the TRIM command down to the device */
+		err = sb_issue_discard(sb, discard_block, next - start,
+				       GFP_NOFS, 0);
+		count += (next - start);
+free_extent:
+		freed = 0;
+
+		/*
+		 * Clear bits in the bitmap
+		 */
+		for (bit = start; bit < next; bit++) {
+			BUFFER_TRACE(bitmap_bh, "clear bit");
+			if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
+						bit, bitmap_bh->b_data)) {
+				ext3_error(sb, __func__,
+					"bit already cleared for block "E3FSBLK,
+					 (unsigned long)bit);
+				BUFFER_TRACE(bitmap_bh, "bit already cleared");
+			} else {
+				freed++;
+			}
+		}
+
+		/* Update couters */
+		spin_lock(sb_bgl_lock(sbi, group));
+		le16_add_cpu(&gdp->bg_free_blocks_count, freed);
+		spin_unlock(sb_bgl_lock(sbi, group));
+		percpu_counter_add(&sbi->s_freeblocks_counter, freed);
+
+		start = next;
+		if (err < 0) {
+			if (err != -EOPNOTSUPP)
+				ext3_warning(sb, __func__, "Discard command "
+					     "returned error %d\n", err);
+			break;
+		}
+
+		if (fatal_signal_pending(current)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		cond_resched();
+
+		/* No more suitable extents */
+		if ((free_blocks - count) < minblocks)
+			break;
+	}
+
+	/* We dirtied the bitmap block */
+	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+	ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
+	if (!err)
+		err = ret;
+
+	/* And the group descriptor block */
+	BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
+	ret = ext3_journal_dirty_metadata(handle, gdp_bh);
+	if (!err)
+		err = ret;
+
+	ext3_debug("trimmed %d blocks in the group %d\n",
+		count, group);
+
+err_out:
+	if (err)
+		count = err;
+	ext3_journal_stop(handle);
+	brelse(bitmap_bh);
+
+	return count;
+}
+
+/**
+ * ext3_trim_fs() -- trim ioctl handle function
+ * @sb:			superblock for filesystem
+ * @start:		First Byte to trim
+ * @len:		number of Bytes to trim from start
+ * @minlen:		minimum extent length in Bytes
+ *
+ * ext3_trim_fs goes through all allocation groups containing Bytes from
+ * start to start+len. For each such a group ext3_trim_all_free function
+ * is invoked to trim all free space.
+ */
+int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+	ext3_grpblk_t last_block, first_block, free_blocks;
+	unsigned long first_group, last_group;
+	unsigned long group, ngroups;
+	struct ext3_group_desc *gdp;
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+	uint64_t start, len, minlen, trimmed;
+	ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
+	int ret = 0;
+
+	start = range->start >> sb->s_blocksize_bits;
+	len = range->len >> sb->s_blocksize_bits;
+	minlen = range->minlen >> sb->s_blocksize_bits;
+	trimmed = 0;
+
+	if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)))
+		return -EINVAL;
+	if (start >= max_blks)
+		goto out;
+	if (start < le32_to_cpu(es->s_first_data_block)) {
+		len -= le32_to_cpu(es->s_first_data_block) - start;
+		start = le32_to_cpu(es->s_first_data_block);
+	}
+	if (start + len > max_blks)
+		len = max_blks - start;
+
+	ngroups = EXT3_SB(sb)->s_groups_count;
+	smp_rmb();
+
+	/* Determine first and last group to examine based on start and len */
+	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
+				     &first_group, &first_block);
+	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len),
+				     &last_group, &last_block);
+	last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
+	last_block = EXT3_BLOCKS_PER_GROUP(sb);
+
+	if (first_group > last_group)
+		return -EINVAL;
+
+	for (group = first_group; group <= last_group; group++) {
+		gdp = ext3_get_group_desc(sb, group, NULL);
+		if (!gdp)
+			break;
+
+		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+		if (free_blocks < minlen)
+			continue;
+
+		if (len >= EXT3_BLOCKS_PER_GROUP(sb))
+			len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block);
+		else
+			last_block = first_block + len;
+
+		ret = ext3_trim_all_free(sb, group, first_block,
+					last_block, minlen);
+		if (ret < 0)
+			break;
+
+		trimmed += ret;
+		first_block = 0;
+	}
+
+	if (ret >= 0)
+		ret = 0;
+
+out:
+	range->len = trimmed * sb->s_blocksize;
+
+	return ret;
+}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index e2e72c3..34f0a07 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -69,25 +69,26 @@
 	const char * error_msg = NULL;
 	const int rlen = ext3_rec_len_from_disk(de->rec_len);
 
-	if (rlen < EXT3_DIR_REC_LEN(1))
+	if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
 		error_msg = "rec_len is smaller than minimal";
-	else if (rlen % 4 != 0)
+	else if (unlikely(rlen % 4 != 0))
 		error_msg = "rec_len % 4 != 0";
-	else if (rlen < EXT3_DIR_REC_LEN(de->name_len))
+	else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
-	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+	else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
 		error_msg = "directory entry across blocks";
-	else if (le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
+	else if (unlikely(le32_to_cpu(de->inode) >
+			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
 
-	if (error_msg != NULL)
+	if (unlikely(error_msg != NULL))
 		ext3_error (dir->i_sb, function,
 			"bad entry in directory #%lu: %s - "
 			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
 			dir->i_ino, error_msg, offset,
 			(unsigned long) le32_to_cpu(de->inode),
 			rlen, de->name_len);
+
 	return error_msg == NULL ? 1 : 0;
 }
 
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a958061..ae94f6d 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2145,13 +2145,15 @@
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
 			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			ext3_journal_dirty_metadata(handle, bh);
+			if (ext3_journal_dirty_metadata(handle, bh))
+				return;
 		}
 		ext3_mark_inode_dirty(handle, inode);
 		truncate_restart_transaction(handle, inode);
 		if (bh) {
 			BUFFER_TRACE(bh, "retaking write access");
-			ext3_journal_get_write_access(handle, bh);
+			if (ext3_journal_get_write_access(handle, bh))
+				return;
 		}
 	}
 
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 8897481..fc080dd 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -276,7 +276,29 @@
 		mnt_drop_write(filp->f_path.mnt);
 		return err;
 	}
+	case FITRIM: {
 
+		struct super_block *sb = inode->i_sb;
+		struct fstrim_range range;
+		int ret = 0;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&range, (struct fstrim_range *)arg,
+				   sizeof(range)))
+			return -EFAULT;
+
+		ret = ext3_trim_fs(sb, &range);
+		if (ret < 0)
+			return ret;
+
+		if (copy_to_user((struct fstrim_range *)arg, &range,
+				 sizeof(range)))
+			return -EFAULT;
+
+		return 0;
+	}
 
 	default:
 		return -ENOTTY;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index bce9dce..b27ba71 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -858,6 +858,7 @@
 	struct buffer_head * bh_use[NAMEI_RA_SIZE];
 	struct buffer_head * bh, *ret = NULL;
 	unsigned long start, block, b;
+	const u8 *name = entry->name;
 	int ra_max = 0;		/* Number of bh's in the readahead
 				   buffer, bh_use[] */
 	int ra_ptr = 0;		/* Current index into readahead
@@ -871,6 +872,16 @@
 	namelen = entry->len;
 	if (namelen > EXT3_NAME_LEN)
 		return NULL;
+	if ((namelen <= 2) && (name[0] == '.') &&
+	    (name[1] == '.' || name[1] == 0)) {
+		/*
+		 * "." or ".." will only be in the first block
+		 * NFS may look up ".."; "." should be handled by the VFS
+		 */
+		block = start = 0;
+		nblocks = 1;
+		goto restart;
+	}
 	if (is_dx(dir)) {
 		bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
 		/*
@@ -961,55 +972,35 @@
 			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
 			int *err)
 {
-	struct super_block * sb;
+	struct super_block *sb = dir->i_sb;
 	struct dx_hash_info	hinfo;
-	u32 hash;
 	struct dx_frame frames[2], *frame;
-	struct ext3_dir_entry_2 *de, *top;
 	struct buffer_head *bh;
 	unsigned long block;
 	int retval;
-	int namelen = entry->len;
-	const u8 *name = entry->name;
 
-	sb = dir->i_sb;
-	/* NFS may look up ".." - look at dx_root directory block */
-	if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) {
-		if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
-			return NULL;
-	} else {
-		frame = frames;
-		frame->bh = NULL;			/* for dx_release() */
-		frame->at = (struct dx_entry *)frames;	/* hack for zero entry*/
-		dx_set_block(frame->at, 0);		/* dx_root block is 0 */
-	}
-	hash = hinfo.hash;
+	if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
+		return NULL;
 	do {
 		block = dx_get_block(frame->at);
 		if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
 			goto errout;
-		de = (struct ext3_dir_entry_2 *) bh->b_data;
-		top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
-				       EXT3_DIR_REC_LEN(0));
-		for (; de < top; de = ext3_next_entry(de)) {
-			int off = (block << EXT3_BLOCK_SIZE_BITS(sb))
-				  + ((char *) de - bh->b_data);
 
-			if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) {
-				brelse(bh);
-				*err = ERR_BAD_DX_DIR;
-				goto errout;
-			}
-
-			if (ext3_match(namelen, name, de)) {
-				*res_dir = de;
-				dx_release(frames);
-				return bh;
-			}
+		retval = search_dirblock(bh, dir, entry,
+					 block << EXT3_BLOCK_SIZE_BITS(sb),
+					 res_dir);
+		if (retval == 1) {
+			dx_release(frames);
+			return bh;
 		}
-		brelse (bh);
+		brelse(bh);
+		if (retval == -1) {
+			*err = ERR_BAD_DX_DIR;
+			goto errout;
+		}
+
 		/* Check to see if we should continue to search */
-		retval = ext3_htree_next_block(dir, hash, frame,
+		retval = ext3_htree_next_block(dir, hinfo.hash, frame,
 					       frames, NULL);
 		if (retval < 0) {
 			ext3_warning(sb, __func__,
@@ -1047,7 +1038,7 @@
 			return ERR_PTR(-EIO);
 		}
 		inode = ext3_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				ext3_error(dir->i_sb, __func__,
 						"deleted inode referenced: %lu",
@@ -1607,7 +1598,9 @@
 			if (err)
 				goto journal_error;
 		}
-		ext3_journal_dirty_metadata(handle, frames[0].bh);
+		err = ext3_journal_dirty_metadata(handle, frames[0].bh);
+		if (err)
+			goto journal_error;
 	}
 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
 	if (!de)
@@ -1644,8 +1637,13 @@
 		if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
 			return -EIO;
 		if (de == de_del)  {
+			int err;
+
 			BUFFER_TRACE(bh, "get_write_access");
-			ext3_journal_get_write_access(handle, bh);
+			err = ext3_journal_get_write_access(handle, bh);
+			if (err)
+				goto journal_error;
+
 			if (pde)
 				pde->rec_len = ext3_rec_len_to_disk(
 					ext3_rec_len_from_disk(pde->rec_len) +
@@ -1654,7 +1652,12 @@
 				de->inode = 0;
 			dir->i_version++;
 			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			ext3_journal_dirty_metadata(handle, bh);
+			err = ext3_journal_dirty_metadata(handle, bh);
+			if (err) {
+journal_error:
+				ext3_std_error(dir->i_sb, err);
+				return err;
+			}
 			return 0;
 		}
 		i += ext3_rec_len_from_disk(de->rec_len);
@@ -1762,7 +1765,7 @@
 {
 	handle_t *handle;
 	struct inode * inode;
-	struct buffer_head * dir_block;
+	struct buffer_head * dir_block = NULL;
 	struct ext3_dir_entry_2 * de;
 	int err, retries = 0;
 
@@ -1790,15 +1793,14 @@
 	inode->i_fop = &ext3_dir_operations;
 	inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
 	dir_block = ext3_bread (handle, inode, 0, 1, &err);
-	if (!dir_block) {
-		drop_nlink(inode); /* is this nlink == 0? */
-		unlock_new_inode(inode);
-		ext3_mark_inode_dirty(handle, inode);
-		iput (inode);
-		goto out_stop;
-	}
+	if (!dir_block)
+		goto out_clear_inode;
+
 	BUFFER_TRACE(dir_block, "get_write_access");
-	ext3_journal_get_write_access(handle, dir_block);
+	err = ext3_journal_get_write_access(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
+
 	de = (struct ext3_dir_entry_2 *) dir_block->b_data;
 	de->inode = cpu_to_le32(inode->i_ino);
 	de->name_len = 1;
@@ -1814,11 +1816,16 @@
 	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
 	inode->i_nlink = 2;
 	BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
-	ext3_journal_dirty_metadata(handle, dir_block);
-	brelse (dir_block);
-	ext3_mark_inode_dirty(handle, inode);
-	err = ext3_add_entry (handle, dentry, inode);
+	err = ext3_journal_dirty_metadata(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
+
+	err = ext3_mark_inode_dirty(handle, inode);
+	if (!err)
+		err = ext3_add_entry (handle, dentry, inode);
+
 	if (err) {
+out_clear_inode:
 		inode->i_nlink = 0;
 		unlock_new_inode(inode);
 		ext3_mark_inode_dirty(handle, inode);
@@ -1827,10 +1834,14 @@
 	}
 	inc_nlink(dir);
 	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
+	err = ext3_mark_inode_dirty(handle, dir);
+	if (err)
+		goto out_clear_inode;
+
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
 out_stop:
+	brelse(dir_block);
 	ext3_journal_stop(handle);
 	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
 		goto retry;
@@ -2353,7 +2364,9 @@
 			goto end_rename;
 	} else {
 		BUFFER_TRACE(new_bh, "get write access");
-		ext3_journal_get_write_access(handle, new_bh);
+		retval = ext3_journal_get_write_access(handle, new_bh);
+		if (retval)
+			goto journal_error;
 		new_de->inode = cpu_to_le32(old_inode->i_ino);
 		if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
 					      EXT3_FEATURE_INCOMPAT_FILETYPE))
@@ -2362,7 +2375,9 @@
 		new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
 		ext3_mark_inode_dirty(handle, new_dir);
 		BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
-		ext3_journal_dirty_metadata(handle, new_bh);
+		retval = ext3_journal_dirty_metadata(handle, new_bh);
+		if (retval)
+			goto journal_error;
 		brelse(new_bh);
 		new_bh = NULL;
 	}
@@ -2411,10 +2426,17 @@
 	ext3_update_dx_flag(old_dir);
 	if (dir_bh) {
 		BUFFER_TRACE(dir_bh, "get_write_access");
-		ext3_journal_get_write_access(handle, dir_bh);
+		retval = ext3_journal_get_write_access(handle, dir_bh);
+		if (retval)
+			goto journal_error;
 		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
-		ext3_journal_dirty_metadata(handle, dir_bh);
+		retval = ext3_journal_dirty_metadata(handle, dir_bh);
+		if (retval) {
+journal_error:
+			ext3_std_error(new_dir->i_sb, retval);
+			goto end_rename;
+		}
 		drop_nlink(old_dir);
 		if (new_inode) {
 			drop_nlink(new_inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index e746d30..108b142 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -249,7 +249,11 @@
 		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
 		set_buffer_uptodate(gdb);
 		unlock_buffer(gdb);
-		ext3_journal_dirty_metadata(handle, gdb);
+		err = ext3_journal_dirty_metadata(handle, gdb);
+		if (err) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext3_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -269,7 +273,11 @@
 			err = PTR_ERR(gdb);
 			goto exit_bh;
 		}
-		ext3_journal_dirty_metadata(handle, gdb);
+		err = ext3_journal_dirty_metadata(handle, gdb);
+		if (err) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext3_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -295,7 +303,11 @@
 			err = PTR_ERR(it);
 			goto exit_bh;
 		}
-		ext3_journal_dirty_metadata(handle, it);
+		err = ext3_journal_dirty_metadata(handle, it);
+		if (err) {
+			brelse(it);
+			goto exit_bh;
+		}
 		brelse(it);
 		ext3_set_bit(bit, bh->b_data);
 	}
@@ -306,7 +318,9 @@
 
 	mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
 			bh->b_data);
-	ext3_journal_dirty_metadata(handle, bh);
+	err = ext3_journal_dirty_metadata(handle, bh);
+	if (err)
+		goto exit_bh;
 	brelse(bh);
 
 	/* Mark unused entries in inode bitmap used */
@@ -319,7 +333,7 @@
 
 	mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
 			bh->b_data);
-	ext3_journal_dirty_metadata(handle, bh);
+	err = ext3_journal_dirty_metadata(handle, bh);
 exit_bh:
 	brelse(bh);
 
@@ -503,12 +517,19 @@
 	 * reserved inode, and will become GDT blocks (primary and backup).
 	 */
 	data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
-	ext3_journal_dirty_metadata(handle, dind);
+	err = ext3_journal_dirty_metadata(handle, dind);
+	if (err)
+		goto exit_group_desc;
 	brelse(dind);
+	dind = NULL;
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
-	ext3_mark_iloc_dirty(handle, inode, &iloc);
+	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+	if (err)
+		goto exit_group_desc;
 	memset((*primary)->b_data, 0, sb->s_blocksize);
-	ext3_journal_dirty_metadata(handle, *primary);
+	err = ext3_journal_dirty_metadata(handle, *primary);
+	if (err)
+		goto exit_group_desc;
 
 	o_group_desc = EXT3_SB(sb)->s_group_desc;
 	memcpy(n_group_desc, o_group_desc,
@@ -519,10 +540,14 @@
 	kfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	if (err)
+		goto exit_inode;
 
 	return 0;
 
+exit_group_desc:
+	kfree(n_group_desc);
 exit_inode:
 	//ext3_journal_release_buffer(handle, iloc.bh);
 	brelse(iloc.bh);
@@ -706,16 +731,20 @@
 		}
 		ext3_debug("update metadata backup %#04lx\n",
 			  (unsigned long)bh->b_blocknr);
-		if ((err = ext3_journal_get_write_access(handle, bh)))
+		if ((err = ext3_journal_get_write_access(handle, bh))) {
+			brelse(bh);
 			break;
+		}
 		lock_buffer(bh);
 		memcpy(bh->b_data, data, size);
 		if (rest)
 			memset(bh->b_data + size, 0, rest);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
-		ext3_journal_dirty_metadata(handle, bh);
+		err = ext3_journal_dirty_metadata(handle, bh);
 		brelse(bh);
+		if (err)
+			break;
 	}
 	if ((err2 = ext3_journal_stop(handle)) && !err)
 		err = err2;
@@ -922,7 +951,9 @@
 	/* Update the global fs size fields */
 	sbi->s_groups_count++;
 
-	ext3_journal_dirty_metadata(handle, primary);
+	err = ext3_journal_dirty_metadata(handle, primary);
+	if (err)
+		goto exit_journal;
 
 	/* Update the reserved block counts only once the new group is
 	 * active. */
@@ -934,7 +965,7 @@
 	percpu_counter_add(&sbi->s_freeinodes_counter,
 			   EXT3_INODES_PER_GROUP(sb));
 
-	ext3_journal_dirty_metadata(handle, sbi->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
 
 exit_journal:
 	mutex_unlock(&sbi->s_resize_lock);
@@ -1064,8 +1095,14 @@
 		goto exit_put;
 	}
 	es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
-	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
 	mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
+	if (err) {
+		ext3_warning(sb, __func__,
+			     "error %d on journal dirty metadata", err);
+		ext3_journal_stop(handle);
+		goto exit_put;
+	}
 	ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
 		   o_blocks_count, o_blocks_count + add);
 	ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index acf8695..7aa767d 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -143,12 +143,16 @@
 void ext3_msg(struct super_block *sb, const char *prefix,
 		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT3-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
 	va_end(args);
 }
 
@@ -195,15 +199,20 @@
 			sb->s_id);
 }
 
-void ext3_error (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext3_error(struct super_block *sb, const char *function,
+		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	ext3_handle_error(sb);
@@ -274,15 +283,20 @@
  * case we take the easy way out and panic immediately.
  */
 
-void ext3_abort (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext3_abort(struct super_block *sb, const char *function,
+		 const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_PANIC))
@@ -300,16 +314,20 @@
 		journal_abort(EXT3_SB(sb)->s_journal, -EIO);
 }
 
-void ext3_warning (struct super_block * sb, const char * function,
-		   const char * fmt, ...)
+void ext3_warning(struct super_block *sb, const char *function,
+		  const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ",
-	       sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 }
 
@@ -346,7 +364,7 @@
 	struct block_device *bdev;
 	char b[BDEVNAME_SIZE];
 
-	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
 	if (IS_ERR(bdev))
 		goto fail;
 	return bdev;
@@ -363,8 +381,7 @@
  */
 static int ext3_blkdev_put(struct block_device *bdev)
 {
-	bd_release(bdev);
-	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 static int ext3_blkdev_remove(struct ext3_sb_info *sbi)
@@ -479,6 +496,13 @@
 	return &ei->vfs_inode;
 }
 
+static void ext3_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
+}
+
 static void ext3_destroy_inode(struct inode *inode)
 {
 	if (!list_empty(&(EXT3_I(inode)->i_orphan))) {
@@ -489,7 +513,7 @@
 				false);
 		dump_stack();
 	}
-	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
+	call_rcu(&inode->i_rcu, ext3_i_callback);
 }
 
 static void init_once(void *foo)
@@ -1841,13 +1865,15 @@
 		goto failed_mount;
 	}
 
-	if (generic_check_addressable(sb->s_blocksize_bits,
-				      le32_to_cpu(es->s_blocks_count))) {
+	err = generic_check_addressable(sb->s_blocksize_bits,
+					le32_to_cpu(es->s_blocks_count));
+	if (err) {
 		ext3_msg(sb, KERN_ERR,
 			"error: filesystem is too large to mount safely");
 		if (sizeof(sector_t) < 8)
 			ext3_msg(sb, KERN_ERR,
 				"error: CONFIG_LBDAF not enabled");
+		ret = err;
 		goto failed_mount;
 	}
 
@@ -2135,13 +2161,6 @@
 	if (bdev == NULL)
 		return NULL;
 
-	if (bd_claim(bdev, sb)) {
-		ext3_msg(sb, KERN_ERR,
-			"error: failed to claim external journal device");
-		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
-		return NULL;
-	}
-
 	blocksize = sb->s_blocksize;
 	hblock = bdev_logical_block_size(bdev);
 	if (blocksize < hblock) {
@@ -2290,7 +2309,7 @@
 	EXT3_SB(sb)->s_journal = journal;
 	ext3_clear_journal_err(sb, es);
 
-	if (journal_devnum &&
+	if (!really_read_only && journal_devnum &&
 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
 		es->s_journal_dev = cpu_to_le32(journal_devnum);
 
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index e69dc6d..32e6cc2 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -925,7 +925,7 @@
 /*
  * ext3_xattr_set_handle()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 5e2ed45..e0270d1 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -238,10 +238,17 @@
 }
 
 int
-ext4_check_acl(struct inode *inode, int mask)
+ext4_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct posix_acl *acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
+	struct posix_acl *acl;
 
+	if (flags & IPERM_FLAG_RCU) {
+		if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
+			return -ECHILD;
+		return -EAGAIN;
+	}
+
+	acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (acl) {
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 9d843d5..dec8211 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -54,7 +54,7 @@
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 
 /* acl.c */
-extern int ext4_check_acl(struct inode *, int);
+extern int ext4_check_acl(struct inode *, int, unsigned int);
 extern int ext4_acl_chmod(struct inode *);
 extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
 
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 14c3af2..adf96b8 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -592,7 +592,8 @@
 	 * Account for the allocated meta blocks.  We will never
 	 * fail EDQUOT for metdata, but we do account for it.
 	 */
-	if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
+	if (!(*errp) &&
+	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
 		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ece76fb..164c560 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -60,9 +60,13 @@
 	return (ext4_filetype_table[filetype]);
 }
 
-
+/*
+ * Return 0 if the directory entry is OK, and 1 if there is a problem
+ *
+ * Note: this is the opposite of what ext2 and ext3 historically returned...
+ */
 int __ext4_check_dir_entry(const char *function, unsigned int line,
-			   struct inode *dir,
+			   struct inode *dir, struct file *filp,
 			   struct ext4_dir_entry_2 *de,
 			   struct buffer_head *bh,
 			   unsigned int offset)
@@ -71,26 +75,37 @@
 	const int rlen = ext4_rec_len_from_disk(de->rec_len,
 						dir->i_sb->s_blocksize);
 
-	if (rlen < EXT4_DIR_REC_LEN(1))
+	if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
 		error_msg = "rec_len is smaller than minimal";
-	else if (rlen % 4 != 0)
+	else if (unlikely(rlen % 4 != 0))
 		error_msg = "rec_len % 4 != 0";
-	else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
+	else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
-	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+	else if (unlikely(((char *) de - bh->b_data) + rlen >
+			  dir->i_sb->s_blocksize))
 		error_msg = "directory entry across blocks";
-	else if (le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))
+	else if (unlikely(le32_to_cpu(de->inode) >
+			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
+	else
+		return 0;
 
-	if (error_msg != NULL)
-		ext4_error_inode(dir, function, line, bh->b_blocknr,
-			"bad entry in directory: %s - "
-			"offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
-			error_msg, (unsigned) (offset%bh->b_size), offset,
-			le32_to_cpu(de->inode),
-			rlen, de->name_len);
-	return error_msg == NULL ? 1 : 0;
+	if (filp)
+		ext4_error_file(filp, function, line, bh ? bh->b_blocknr : 0,
+				"bad entry in directory: %s - offset=%u(%u), "
+				"inode=%u, rec_len=%d, name_len=%d",
+				error_msg, (unsigned) (offset%bh->b_size),
+				offset, le32_to_cpu(de->inode),
+				rlen, de->name_len);
+	else
+		ext4_error_inode(dir, function, line, bh ? bh->b_blocknr : 0,
+				"bad entry in directory: %s - offset=%u(%u), "
+				"inode=%u, rec_len=%d, name_len=%d",
+				error_msg, (unsigned) (offset%bh->b_size),
+				offset, le32_to_cpu(de->inode),
+				rlen, de->name_len);
+
+	return 1;
 }
 
 static int ext4_readdir(struct file *filp,
@@ -152,8 +167,9 @@
 		 */
 		if (!bh) {
 			if (!dir_has_error) {
-				EXT4_ERROR_INODE(inode, "directory "
-					   "contains a hole at offset %Lu",
+				EXT4_ERROR_FILE(filp, 0,
+						"directory contains a "
+						"hole at offset %llu",
 					   (unsigned long long) filp->f_pos);
 				dir_has_error = 1;
 			}
@@ -194,8 +210,8 @@
 		while (!error && filp->f_pos < inode->i_size
 		       && offset < sb->s_blocksize) {
 			de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
-			if (!ext4_check_dir_entry(inode, de,
-						  bh, offset)) {
+			if (ext4_check_dir_entry(inode, filp, de,
+						 bh, offset)) {
 				/*
 				 * On error, skip the f_pos to the next block
 				 */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 94ce3d7..1de65f5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -62,8 +62,8 @@
 #define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...)			\
 	ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
 
-#define EXT4_ERROR_FILE(file, fmt, a...)	\
-	ext4_error_file(__func__, __LINE__, (file), (fmt), ## a)
+#define EXT4_ERROR_FILE(file, block, fmt, a...)				\
+	ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
 
 /* data type for block offset of block group */
 typedef int ext4_grpblk_t;
@@ -561,23 +561,7 @@
 #define EXT4_IOC32_SETVERSION_OLD	FS_IOC32_SETVERSION
 #endif
 
-
-/*
- *  Mount options
- */
-struct ext4_mount_options {
-	unsigned long s_mount_opt;
-	uid_t s_resuid;
-	gid_t s_resgid;
-	unsigned long s_commit_interval;
-	u32 s_min_batch_time, s_max_batch_time;
-#ifdef CONFIG_QUOTA
-	int s_jquota_fmt;
-	char *s_qf_names[MAXQUOTAS];
-#endif
-};
-
-/* Max physical block we can addres w/o extents */
+/* Max physical block we can address w/o extents */
 #define EXT4_MAX_BLOCK_FILE_PHYS	0xFFFFFFFF
 
 /*
@@ -709,6 +693,8 @@
 	if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra))     \
 		ext4_decode_extra_time(&(inode)->xtime,			       \
 				       raw_inode->xtime ## _extra);	       \
+	else								       \
+		(inode)->xtime.tv_nsec = 0;				       \
 } while (0)
 
 #define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode)			       \
@@ -719,6 +705,8 @@
 	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))	       \
 		ext4_decode_extra_time(&(einode)->xtime,		       \
 				       raw_inode->xtime ## _extra);	       \
+	else								       \
+		(einode)->xtime.tv_nsec = 0;				       \
 } while (0)
 
 #define i_disk_version osd1.linux1.l_i_version
@@ -750,12 +738,13 @@
 
 /*
  * storage for cached extent
+ * If ec_len == 0, then the cache is invalid.
+ * If ec_start == 0, then the cache represents a gap (null mapping)
  */
 struct ext4_ext_cache {
 	ext4_fsblk_t	ec_start;
 	ext4_lblk_t	ec_block;
 	__u32		ec_len; /* must be 32bit to return holes */
-	__u32		ec_type;
 };
 
 /*
@@ -774,10 +763,12 @@
 	 * near to their parent directory's inode.
 	 */
 	ext4_group_t	i_block_group;
+	ext4_lblk_t	i_dir_start_lookup;
+#if (BITS_PER_LONG < 64)
 	unsigned long	i_state_flags;		/* Dynamic state flags */
+#endif
 	unsigned long	i_flags;
 
-	ext4_lblk_t		i_dir_start_lookup;
 #ifdef CONFIG_EXT4_FS_XATTR
 	/*
 	 * Extended attributes can be read independently of the main file
@@ -820,7 +811,7 @@
 	 */
 	struct rw_semaphore i_data_sem;
 	struct inode vfs_inode;
-	struct jbd2_inode jinode;
+	struct jbd2_inode *jinode;
 
 	struct ext4_ext_cache i_cached_extent;
 	/*
@@ -840,14 +831,12 @@
 	unsigned int i_reserved_data_blocks;
 	unsigned int i_reserved_meta_blocks;
 	unsigned int i_allocated_meta_blocks;
-	unsigned short i_delalloc_reserved_flag;
-	sector_t i_da_metadata_calc_last_lblock;
+	ext4_lblk_t i_da_metadata_calc_last_lblock;
 	int i_da_metadata_calc_len;
 
 	/* on-disk additional length */
 	__u16 i_extra_isize;
 
-	spinlock_t i_block_reservation_lock;
 #ifdef CONFIG_QUOTA
 	/* quota space reservation, managed internally by quota code */
 	qsize_t i_reserved_quota;
@@ -856,9 +845,11 @@
 	/* completed IOs that might need unwritten extents handling */
 	struct list_head i_completed_io_list;
 	spinlock_t i_completed_io_lock;
+	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
 	/* current io_end structure for async DIO write*/
 	ext4_io_end_t *cur_aio_dio;
-	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
+
+	spinlock_t i_block_reservation_lock;
 
 	/*
 	 * Transactions that contain inode's metadata needed to complete
@@ -917,11 +908,20 @@
 #define EXT4_MOUNT_DISCARD		0x40000000 /* Issue DISCARD requests */
 #define EXT4_MOUNT_INIT_INODE_TABLE	0x80000000 /* Initialize uninitialized itables */
 
-#define clear_opt(o, opt)		o &= ~EXT4_MOUNT_##opt
-#define set_opt(o, opt)			o |= EXT4_MOUNT_##opt
+#define clear_opt(sb, opt)		EXT4_SB(sb)->s_mount_opt &= \
+						~EXT4_MOUNT_##opt
+#define set_opt(sb, opt)		EXT4_SB(sb)->s_mount_opt |= \
+						EXT4_MOUNT_##opt
 #define test_opt(sb, opt)		(EXT4_SB(sb)->s_mount_opt & \
 					 EXT4_MOUNT_##opt)
 
+#define clear_opt2(sb, opt)		EXT4_SB(sb)->s_mount_opt2 &= \
+						~EXT4_MOUNT2_##opt
+#define set_opt2(sb, opt)		EXT4_SB(sb)->s_mount_opt2 |= \
+						EXT4_MOUNT2_##opt
+#define test_opt2(sb, opt)		(EXT4_SB(sb)->s_mount_opt2 & \
+					 EXT4_MOUNT2_##opt)
+
 #define ext4_set_bit			ext2_set_bit
 #define ext4_set_bit_atomic		ext2_set_bit_atomic
 #define ext4_clear_bit			ext2_clear_bit
@@ -1087,6 +1087,7 @@
 	struct ext4_super_block *s_es;	/* Pointer to the super block in the buffer */
 	struct buffer_head **s_group_desc;
 	unsigned int s_mount_opt;
+	unsigned int s_mount_opt2;
 	unsigned int s_mount_flags;
 	ext4_fsblk_t s_sb_block;
 	uid_t s_resuid;
@@ -1237,24 +1238,39 @@
 	EXT4_STATE_EXT_MIGRATE,		/* Inode is migrating */
 	EXT4_STATE_DIO_UNWRITTEN,	/* need convert on dio done*/
 	EXT4_STATE_NEWENTRY,		/* File just added to dir */
+	EXT4_STATE_DELALLOC_RESERVED,	/* blks already reserved for delalloc */
 };
 
-#define EXT4_INODE_BIT_FNS(name, field)					\
+#define EXT4_INODE_BIT_FNS(name, field, offset)				\
 static inline int ext4_test_inode_##name(struct inode *inode, int bit)	\
 {									\
-	return test_bit(bit, &EXT4_I(inode)->i_##field);		\
+	return test_bit(bit + (offset), &EXT4_I(inode)->i_##field);	\
 }									\
 static inline void ext4_set_inode_##name(struct inode *inode, int bit)	\
 {									\
-	set_bit(bit, &EXT4_I(inode)->i_##field);			\
+	set_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }									\
 static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
 {									\
-	clear_bit(bit, &EXT4_I(inode)->i_##field);			\
+	clear_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }
 
-EXT4_INODE_BIT_FNS(flag, flags)
-EXT4_INODE_BIT_FNS(state, state_flags)
+EXT4_INODE_BIT_FNS(flag, flags, 0)
+#if (BITS_PER_LONG < 64)
+EXT4_INODE_BIT_FNS(state, state_flags, 0)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+	(ei)->i_state_flags = 0;
+}
+#else
+EXT4_INODE_BIT_FNS(state, flags, 32)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+	/* We depend on the fact that callers will set i_flags */
+}
+#endif
 #else
 /* Assume that user mode programs are passing in an ext4fs superblock, not
  * a kernel struct super_block.  This will allow us to call the feature-test
@@ -1642,10 +1658,12 @@
 
 /* dir.c */
 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
+				  struct file *,
 				  struct ext4_dir_entry_2 *,
 				  struct buffer_head *, unsigned int);
-#define ext4_check_dir_entry(dir, de, bh, offset) \
-	__ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset))
+#define ext4_check_dir_entry(dir, filp, de, bh, offset)			\
+	unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
+					(de), (bh), (offset)))
 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
 				    __u32 minor_hash,
 				    struct ext4_dir_entry_2 *dirent);
@@ -1653,6 +1671,7 @@
 
 /* fsync.c */
 extern int ext4_sync_file(struct file *, int);
+extern int ext4_flush_completed_IO(struct inode *);
 
 /* hash.c */
 extern int ext4fs_dirhash(const char *name, int len, struct
@@ -1752,8 +1771,8 @@
 			     ext4_fsblk_t, const char *, ...)
 	__attribute__ ((format (printf, 5, 6)));
 extern void ext4_error_file(struct file *, const char *, unsigned int,
-			    const char *, ...)
-	__attribute__ ((format (printf, 4, 5)));
+			    ext4_fsblk_t, const char *, ...)
+	__attribute__ ((format (printf, 5, 6)));
 extern void __ext4_std_error(struct super_block *, const char *,
 			     unsigned int, int);
 extern void __ext4_abort(struct super_block *, const char *, unsigned int,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 28ce70f..2e29abb 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -119,10 +119,6 @@
  * structure for external API
  */
 
-#define EXT4_EXT_CACHE_NO	0
-#define EXT4_EXT_CACHE_GAP	1
-#define EXT4_EXT_CACHE_EXTENT	2
-
 /*
  * to be called by ext4_ext_walk_space()
  * negative retcode - error
@@ -197,7 +193,7 @@
 static inline void
 ext4_ext_invalidate_cache(struct inode *inode)
 {
-	EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
+	EXT4_I(inode)->i_cached_extent.ec_len = 0;
 }
 
 static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
@@ -278,7 +274,7 @@
 }
 
 extern int ext4_ext_calc_metadata_amount(struct inode *inode,
-					 sector_t lblocks);
+					 ext4_lblk_t lblocks);
 extern int ext4_extent_tree_init(handle_t *, struct inode *);
 extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
 						   int num,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b0bd792..d8b992e 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -253,7 +253,7 @@
 static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
 {
 	if (ext4_handle_valid(handle))
-		return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
+		return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
 	return 0;
 }
 
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0554c48..c4068f6 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -117,11 +117,33 @@
 		struct ext4_extent *ex;
 		depth = path->p_depth;
 
-		/* try to predict block placement */
+		/*
+		 * Try to predict block placement assuming that we are
+		 * filling in a file which will eventually be
+		 * non-sparse --- i.e., in the case of libbfd writing
+		 * an ELF object sections out-of-order but in a way
+		 * the eventually results in a contiguous object or
+		 * executable file, or some database extending a table
+		 * space file.  However, this is actually somewhat
+		 * non-ideal if we are writing a sparse file such as
+		 * qemu or KVM writing a raw image file that is going
+		 * to stay fairly sparse, since it will end up
+		 * fragmenting the file system's free space.  Maybe we
+		 * should have some hueristics or some way to allow
+		 * userspace to pass a hint to file system,
+		 * especiially if the latter case turns out to be
+		 * common.
+		 */
 		ex = path[depth].p_ext;
-		if (ex)
-			return (ext4_ext_pblock(ex) +
-				(block - le32_to_cpu(ex->ee_block)));
+		if (ex) {
+			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
+			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
+
+			if (block > ext_block)
+				return ext_pblk + (block - ext_block);
+			else
+				return ext_pblk - (ext_block - block);
+		}
 
 		/* it looks like index is empty;
 		 * try to find starting block from index itself */
@@ -244,7 +266,7 @@
  * to allocate @blocks
  * Worse case is one block per extent
  */
-int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
+int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 {
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	int idxs, num = 0;
@@ -1872,12 +1894,10 @@
 			cbex.ec_block = start;
 			cbex.ec_len = end - start;
 			cbex.ec_start = 0;
-			cbex.ec_type = EXT4_EXT_CACHE_GAP;
 		} else {
 			cbex.ec_block = le32_to_cpu(ex->ee_block);
 			cbex.ec_len = ext4_ext_get_actual_len(ex);
 			cbex.ec_start = ext4_ext_pblock(ex);
-			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
 		}
 
 		if (unlikely(cbex.ec_len == 0)) {
@@ -1917,13 +1937,12 @@
 
 static void
 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
-			__u32 len, ext4_fsblk_t start, int type)
+			__u32 len, ext4_fsblk_t start)
 {
 	struct ext4_ext_cache *cex;
 	BUG_ON(len == 0);
 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 	cex = &EXT4_I(inode)->i_cached_extent;
-	cex->ec_type = type;
 	cex->ec_block = block;
 	cex->ec_len = len;
 	cex->ec_start = start;
@@ -1976,15 +1995,18 @@
 	}
 
 	ext_debug(" -> %u:%lu\n", lblock, len);
-	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
+	ext4_ext_put_in_cache(inode, lblock, len, 0);
 }
 
+/*
+ * Return 0 if cache is invalid; 1 if the cache is valid
+ */
 static int
 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
 			struct ext4_extent *ex)
 {
 	struct ext4_ext_cache *cex;
-	int ret = EXT4_EXT_CACHE_NO;
+	int ret = 0;
 
 	/*
 	 * We borrow i_block_reservation_lock to protect i_cached_extent
@@ -1993,11 +2015,9 @@
 	cex = &EXT4_I(inode)->i_cached_extent;
 
 	/* has cache valid data? */
-	if (cex->ec_type == EXT4_EXT_CACHE_NO)
+	if (cex->ec_len == 0)
 		goto errout;
 
-	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
-			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
 	if (in_range(block, cex->ec_block, cex->ec_len)) {
 		ex->ee_block = cpu_to_le32(cex->ec_block);
 		ext4_ext_store_pblock(ex, cex->ec_start);
@@ -2005,7 +2025,7 @@
 		ext_debug("%u cached by %u:%u:%llu\n",
 				block,
 				cex->ec_block, cex->ec_len, cex->ec_start);
-		ret = cex->ec_type;
+		ret = 1;
 	}
 errout:
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
@@ -2825,14 +2845,14 @@
  * to an uninitialized extent.
  *
  * Writing to an uninitized extent may result in splitting the uninitialized
- * extent into multiple /intialized unintialized extents (up to three)
+ * extent into multiple /initialized uninitialized extents (up to three)
  * There are three possibilities:
  *   a> There is no split required: Entire extent should be uninitialized
  *   b> Splits in two extents: Write is happening at either end of the extent
  *   c> Splits in three extents: Somone is writing in middle of the extent
  *
  * One of more index blocks maybe needed if the extent tree grow after
- * the unintialized extent split. To prevent ENOSPC occur at the IO
+ * the uninitialized extent split. To prevent ENOSPC occur at the IO
  * complete, we need to split the uninitialized extent before DIO submit
  * the IO. The uninitialized extent called at this time will be split
  * into three uninitialized extent(at most). After IO complete, the part
@@ -3082,7 +3102,7 @@
  * Handle EOFBLOCKS_FL flag, clearing it if necessary
  */
 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
-			      struct ext4_map_blocks *map,
+			      ext4_lblk_t lblk,
 			      struct ext4_ext_path *path,
 			      unsigned int len)
 {
@@ -3112,7 +3132,7 @@
 	 * this turns out to be false, we can bail out from this
 	 * function immediately.
 	 */
-	if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
+	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
 	    ext4_ext_get_actual_len(last_ex))
 		return 0;
 	/*
@@ -3168,8 +3188,8 @@
 							path);
 		if (ret >= 0) {
 			ext4_update_inode_fsync_trans(handle, inode, 1);
-			err = check_eofblocks_fl(handle, inode, map, path,
-						 map->m_len);
+			err = check_eofblocks_fl(handle, inode, map->m_lblk,
+						 path, map->m_len);
 		} else
 			err = ret;
 		goto out2;
@@ -3199,7 +3219,8 @@
 	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
 	if (ret >= 0) {
 		ext4_update_inode_fsync_trans(handle, inode, 1);
-		err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
+		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
+					 map->m_len);
 		if (err < 0)
 			goto out2;
 	}
@@ -3276,7 +3297,7 @@
 	struct ext4_extent_header *eh;
 	struct ext4_extent newex, *ex;
 	ext4_fsblk_t newblock;
-	int err = 0, depth, ret, cache_type;
+	int err = 0, depth, ret;
 	unsigned int allocated = 0;
 	struct ext4_allocation_request ar;
 	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3285,9 +3306,8 @@
 		  map->m_lblk, map->m_len, inode->i_ino);
 
 	/* check in cache */
-	cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
-	if (cache_type) {
-		if (cache_type == EXT4_EXT_CACHE_GAP) {
+	if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+		if (!newex.ee_start_lo && !newex.ee_start_hi) {
 			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
 				/*
 				 * block isn't allocated yet and
@@ -3296,7 +3316,7 @@
 				goto out2;
 			}
 			/* we should allocate requested block */
-		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
+		} else {
 			/* block is already allocated */
 			newblock = map->m_lblk
 				   - le32_to_cpu(newex.ee_block)
@@ -3305,8 +3325,6 @@
 			allocated = ext4_ext_get_actual_len(&newex) -
 				(map->m_lblk - le32_to_cpu(newex.ee_block));
 			goto out;
-		} else {
-			BUG();
 		}
 	}
 
@@ -3357,8 +3375,7 @@
 			/* Do not put uninitialized extent in the cache */
 			if (!ext4_ext_is_uninitialized(ex)) {
 				ext4_ext_put_in_cache(inode, ee_block,
-							ee_len, ee_start,
-							EXT4_EXT_CACHE_EXTENT);
+							ee_len, ee_start);
 				goto out;
 			}
 			ret = ext4_ext_handle_uninitialized_extents(handle,
@@ -3456,7 +3473,7 @@
 			map->m_flags |= EXT4_MAP_UNINIT;
 	}
 
-	err = check_eofblocks_fl(handle, inode, map, path, ar.len);
+	err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
 	if (err)
 		goto out2;
 
@@ -3490,8 +3507,7 @@
 	 * when it is _not_ an uninitialized extent.
 	 */
 	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
-		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
-						EXT4_EXT_CACHE_EXTENT);
+		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
 		ext4_update_inode_fsync_trans(handle, inode, 1);
 	} else
 		ext4_update_inode_fsync_trans(handle, inode, 0);
@@ -3519,6 +3535,12 @@
 	int err = 0;
 
 	/*
+	 * finish any pending end_io work so we won't run the risk of
+	 * converting any truncated blocks to initialized later
+	 */
+	ext4_flush_completed_IO(inode);
+
+	/*
 	 * probably first extent we're gonna free will be last in block
 	 */
 	err = ext4_writepage_trans_blocks(inode);
@@ -3622,6 +3644,10 @@
 	struct ext4_map_blocks map;
 	unsigned int credits, blkbits = inode->i_blkbits;
 
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
+		return -EOPNOTSUPP;
+
 	/*
 	 * currently supporting (pre)allocate mode for extent-based
 	 * files _only_
@@ -3767,7 +3793,7 @@
 
 	logical =  (__u64)newex->ec_block << blksize_bits;
 
-	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
+	if (newex->ec_start == 0) {
 		pgoff_t offset;
 		struct page *page;
 		struct buffer_head *bh = NULL;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5a5c55d..bb003dc 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -104,6 +104,7 @@
 {
 	struct super_block *sb = inode->i_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct vfsmount *mnt = filp->f_path.mnt;
 	struct path path;
 	char buf[64], *cp;
@@ -127,6 +128,27 @@
 			ext4_mark_super_dirty(sb);
 		}
 	}
+	/*
+	 * Set up the jbd2_inode if we are opening the inode for
+	 * writing and the journal is present
+	 */
+	if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
+		struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
+
+		spin_lock(&inode->i_lock);
+		if (!ei->jinode) {
+			if (!jinode) {
+				spin_unlock(&inode->i_lock);
+				return -ENOMEM;
+			}
+			ei->jinode = jinode;
+			jbd2_journal_init_jbd_inode(ei->jinode, inode);
+			jinode = NULL;
+		}
+		spin_unlock(&inode->i_lock);
+		if (unlikely(jinode != NULL))
+			jbd2_free_inode(jinode);
+	}
 	return dquot_file_open(inode, filp);
 }
 
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index c1a7bc9..7829b28 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -75,7 +75,7 @@
  * to written.
  * The function return the number of pending IOs on success.
  */
-static int flush_completed_IO(struct inode *inode)
+extern int ext4_flush_completed_IO(struct inode *inode)
 {
 	ext4_io_end_t *io;
 	struct ext4_inode_info *ei = EXT4_I(inode);
@@ -169,7 +169,7 @@
 	if (inode->i_sb->s_flags & MS_RDONLY)
 		return 0;
 
-	ret = flush_completed_IO(inode);
+	ret = ext4_flush_completed_IO(inode);
 	if (ret < 0)
 		return ret;
 
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 1ce240a..eb9097a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1027,7 +1027,7 @@
 	inode->i_generation = sbi->s_next_generation++;
 	spin_unlock(&sbi->s_next_gen_lock);
 
-	ei->i_state_flags = 0;
+	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
 
 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e659597..9f7f9e4 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -39,7 +39,9 @@
 #include <linux/bio.h>
 #include <linux/workqueue.h>
 #include <linux/kernel.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -54,10 +56,17 @@
 					      loff_t new_size)
 {
 	trace_ext4_begin_ordered_truncate(inode, new_size);
-	return jbd2_journal_begin_ordered_truncate(
-					EXT4_SB(inode->i_sb)->s_journal,
-					&EXT4_I(inode)->jinode,
-					new_size);
+	/*
+	 * If jinode is zero, then we never opened the file for
+	 * writing, so there's no need to call
+	 * jbd2_journal_begin_ordered_truncate() since there's no
+	 * outstanding writes we need to flush.
+	 */
+	if (!EXT4_I(inode)->jinode)
+		return 0;
+	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
+						   EXT4_I(inode)->jinode,
+						   new_size);
 }
 
 static void ext4_invalidatepage(struct page *page, unsigned long offset);
@@ -552,7 +561,7 @@
 }
 
 /**
- *	ext4_blks_to_allocate: Look up the block map and count the number
+ *	ext4_blks_to_allocate - Look up the block map and count the number
  *	of direct blocks need to be allocated for the given branch.
  *
  *	@branch: chain of indirect blocks
@@ -591,13 +600,19 @@
 
 /**
  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
+ *	@handle: handle for this transaction
+ *	@inode: inode which needs allocated blocks
+ *	@iblock: the logical block to start allocated at
+ *	@goal: preferred physical block of allocation
  *	@indirect_blks: the number of blocks need to allocate for indirect
  *			blocks
- *
+ *	@blks: number of desired blocks
  *	@new_blocks: on return it will store the new block numbers for
  *	the indirect blocks(if needed) and the first direct block,
- *	@blks:	on return it will store the total number of allocated
- *		direct blocks
+ *	@err: on return it will store the error code
+ *
+ *	This function will return the number of blocks allocated as
+ *	requested by the passed-in parameters.
  */
 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
 			     ext4_lblk_t iblock, ext4_fsblk_t goal,
@@ -711,9 +726,11 @@
 
 /**
  *	ext4_alloc_branch - allocate and set up a chain of blocks.
+ *	@handle: handle for this transaction
  *	@inode: owner
  *	@indirect_blks: number of allocated indirect blocks
  *	@blks: number of allocated direct blocks
+ *	@goal: preferred place for allocation
  *	@offsets: offsets (in the blocks) to store the pointers to next.
  *	@branch: place to store the chain in.
  *
@@ -826,6 +843,7 @@
 
 /**
  * ext4_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
  * @inode: owner
  * @block: (logical) number of block we are adding
  * @chain: chain of indirect blocks (with a missing link - see
@@ -1081,7 +1099,7 @@
  * Calculate the number of metadata blocks need to reserve
  * to allocate a block located at @lblock
  */
-static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
+static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 {
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		return ext4_ext_calc_metadata_amount(inode, lblock);
@@ -1320,7 +1338,7 @@
 	 * avoid double accounting
 	 */
 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
+		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 	/*
 	 * We need to check for EXT4 here because migrate
 	 * could have changed the inode type in between
@@ -1350,7 +1368,7 @@
 			ext4_da_update_reserve_space(inode, retval, 1);
 	}
 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
+		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 
 	up_write((&EXT4_I(inode)->i_data_sem));
 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
@@ -1878,7 +1896,7 @@
 /*
  * Reserve a single block located at lblock
  */
-static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
+static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
 {
 	int retries = 0;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2239,7 +2257,7 @@
 	 * affects functions in many different parts of the allocation
 	 * call path.  This flag exists primarily because we don't
 	 * want to change *many* call functions, so ext4_map_blocks()
-	 * will set the magic i_delalloc_reserved_flag once the
+	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
 	 * inode's allocation semaphore is taken.
 	 *
 	 * If the blocks in questions were delalloc blocks, set
@@ -3362,7 +3380,7 @@
 	 * doing I/O at all.
 	 *
 	 * We could call write_cache_pages(), and then redirty all of
-	 * the pages by calling redirty_page_for_writeback() but that
+	 * the pages by calling redirty_page_for_writepage() but that
 	 * would be ugly in the extreme.  So instead we would need to
 	 * replicate parts of the code in the above functions,
 	 * simplifying them becuase we wouldn't actually intend to
@@ -3720,8 +3738,7 @@
 retry:
 	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
 	if (!io_end) {
-		if (printk_ratelimit())
-			printk(KERN_WARNING "%s: allocation fail\n", __func__);
+		pr_warn_ratelimited("%s: allocation fail\n", __func__);
 		schedule();
 		goto retry;
 	}
@@ -3745,9 +3762,9 @@
  * preallocated extents, and those write extend the file, no need to
  * fall back to buffered IO.
  *
- * For holes, we fallocate those blocks, mark them as unintialized
+ * For holes, we fallocate those blocks, mark them as uninitialized
  * If those blocks were preallocated, we mark sure they are splited, but
- * still keep the range to write as unintialized.
+ * still keep the range to write as uninitialized.
  *
  * The unwrritten extents will be converted to written when DIO is completed.
  * For async direct IO, since the IO may still pending when return, we
@@ -4045,7 +4062,7 @@
 	if (ext4_should_journal_data(inode)) {
 		err = ext4_handle_dirty_metadata(handle, inode, bh);
 	} else {
-		if (ext4_should_order_data(inode))
+		if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
 			err = ext4_jbd2_file_inode(handle, inode);
 		mark_buffer_dirty(bh);
 	}
@@ -4169,6 +4186,7 @@
 {
 	__le32 *p;
 	int	flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+	int	err;
 
 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
 		flags |= EXT4_FREE_BLOCKS_METADATA;
@@ -4184,11 +4202,23 @@
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-			ext4_handle_dirty_metadata(handle, inode, bh);
+			err = ext4_handle_dirty_metadata(handle, inode, bh);
+			if (unlikely(err)) {
+				ext4_std_error(inode->i_sb, err);
+				return 1;
+			}
 		}
-		ext4_mark_inode_dirty(handle, inode);
-		ext4_truncate_restart_trans(handle, inode,
-					    blocks_for_truncate(inode));
+		err = ext4_mark_inode_dirty(handle, inode);
+		if (unlikely(err)) {
+			ext4_std_error(inode->i_sb, err);
+			return 1;
+		}
+		err = ext4_truncate_restart_trans(handle, inode,
+						  blocks_for_truncate(inode));
+		if (unlikely(err)) {
+			ext4_std_error(inode->i_sb, err);
+			return 1;
+		}
 		if (bh) {
 			BUFFER_TRACE(bh, "retaking write access");
 			ext4_journal_get_write_access(handle, bh);
@@ -4349,6 +4379,7 @@
 					(__le32 *) bh->b_data,
 					(__le32 *) bh->b_data + addr_per_block,
 					depth);
+			brelse(bh);
 
 			/*
 			 * Everything below this this pointer has been
@@ -4859,7 +4890,7 @@
 	}
 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 
-	ei->i_state_flags = 0;
+	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
 	ei->i_dir_start_lookup = 0;
 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
 	/* We now have enough fields to check if the inode was active or not.
@@ -5118,7 +5149,7 @@
 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
 		goto out_brelse;
 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 	    cpu_to_le32(EXT4_OS_HURD))
 		raw_inode->i_file_acl_high =
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5b4d4e3..851f49b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2608,18 +2608,12 @@
 static inline int ext4_issue_discard(struct super_block *sb,
 		ext4_group_t block_group, ext4_grpblk_t block, int count)
 {
-	int ret;
 	ext4_fsblk_t discard_block;
 
 	discard_block = block + ext4_group_first_block_no(sb, block_group);
 	trace_ext4_discard_blocks(sb,
 			(unsigned long long) discard_block, count);
-	ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
-	if (ret == -EOPNOTSUPP) {
-		ext4_warning(sb, "discard not supported, disabling");
-		clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
-	}
-	return ret;
+	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
 }
 
 /*
@@ -2631,7 +2625,7 @@
 	struct super_block *sb = journal->j_private;
 	struct ext4_buddy e4b;
 	struct ext4_group_info *db;
-	int err, count = 0, count2 = 0;
+	int err, ret, count = 0, count2 = 0;
 	struct ext4_free_data *entry;
 	struct list_head *l, *ltmp;
 
@@ -2641,9 +2635,15 @@
 		mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
 			 entry->count, entry->group, entry);
 
-		if (test_opt(sb, DISCARD))
-			ext4_issue_discard(sb, entry->group,
+		if (test_opt(sb, DISCARD)) {
+			ret = ext4_issue_discard(sb, entry->group,
 					entry->start_blk, entry->count);
+			if (unlikely(ret == -EOPNOTSUPP)) {
+				ext4_warning(sb, "discard not supported, "
+						 "disabling");
+				clear_opt(sb, DISCARD);
+			}
+		}
 
 		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
 		/* we expect to find existing buddy because it's pinned */
@@ -3881,19 +3881,6 @@
 	}
 }
 
-/*
- * finds all preallocated spaces and return blocks being freed to them
- * if preallocated space becomes full (no block is used from the space)
- * then the function frees space in buddy
- * XXX: at the moment, truncate (which is the only way to free blocks)
- * discards all preallocations
- */
-static void ext4_mb_return_to_preallocation(struct inode *inode,
-					struct ext4_buddy *e4b,
-					sector_t block, int count)
-{
-	BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
-}
 #ifdef CONFIG_EXT4_DEBUG
 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
 {
@@ -4283,7 +4270,7 @@
 	 * EDQUOT check, as blocks and quotas have been already
 	 * reserved when data being copied into pagecache.
 	 */
-	if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
 	else {
 		/* Without delayed allocation we need to verify
@@ -4380,7 +4367,8 @@
 	if (inquota && ar->len < inquota)
 		dquot_free_block(ar->inode, inquota - ar->len);
 	if (!ar->len) {
-		if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+		if (!ext4_test_inode_state(ar->inode,
+					   EXT4_STATE_DELALLOC_RESERVED))
 			/* release all the reserved blocks if non delalloc */
 			percpu_counter_sub(&sbi->s_dirtyblocks_counter,
 						reserv_blks);
@@ -4626,7 +4614,11 @@
 		 * blocks being freed are metadata. these blocks shouldn't
 		 * be used until this transaction is committed
 		 */
-		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+		new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+		if (!new_entry) {
+			err = -ENOMEM;
+			goto error_return;
+		}
 		new_entry->start_blk = bit;
 		new_entry->group  = block_group;
 		new_entry->count = count;
@@ -4643,7 +4635,6 @@
 		ext4_lock_group(sb, block_group);
 		mb_clear_bits(bitmap_bh->b_data, bit, count);
 		mb_free_blocks(inode, &e4b, bit, count);
-		ext4_mb_return_to_preallocation(inode, &e4b, block, count);
 	}
 
 	ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4718,8 +4709,6 @@
 	ext4_unlock_group(sb, group);
 
 	ret = ext4_issue_discard(sb, group, start, count);
-	if (ret)
-		ext4_std_error(sb, ret);
 
 	ext4_lock_group(sb, group);
 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
@@ -4819,6 +4808,8 @@
 	ext4_group_t group, ngroups = ext4_get_groups_count(sb);
 	ext4_grpblk_t cnt = 0, first_block, last_block;
 	uint64_t start, len, minlen, trimmed;
+	ext4_fsblk_t first_data_blk =
+			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
 	int ret = 0;
 
 	start = range->start >> sb->s_blocksize_bits;
@@ -4828,6 +4819,10 @@
 
 	if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
 		return -EINVAL;
+	if (start < first_data_blk) {
+		len -= first_data_blk - start;
+		start = first_data_blk;
+	}
 
 	/* Determine first and last group to examine based on start and len */
 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
@@ -4851,7 +4846,7 @@
 		if (len >= EXT4_BLOCKS_PER_GROUP(sb))
 			len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
 		else
-			last_block = len;
+			last_block = first_block + len;
 
 		if (e4b.bd_info->bb_free >= minlen) {
 			cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 25f3a97..b0a126f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -496,7 +496,7 @@
 	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
 		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
 	tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
-				   S_IFREG, 0, goal);
+				   S_IFREG, NULL, goal);
 	if (IS_ERR(tmp_inode)) {
 		retval = -ENOMEM;
 		ext4_journal_stop(handle);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index dc40e75..5485390 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -581,9 +581,9 @@
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
-		if (!ext4_check_dir_entry(dir, de, bh,
-					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
-						+((char *)de - bh->b_data))) {
+		if (ext4_check_dir_entry(dir, NULL, de, bh,
+				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+					 + ((char *)de - bh->b_data))) {
 			/* On error, skip the f_pos to the next block. */
 			dir_file->f_pos = (dir_file->f_pos |
 					(dir->i_sb->s_blocksize - 1)) + 1;
@@ -820,7 +820,7 @@
 		if ((char *) de + namelen <= dlimit &&
 		    ext4_match (namelen, name, de)) {
 			/* found a match - just to be sure, do a full check */
-			if (!ext4_check_dir_entry(dir, de, bh, offset))
+			if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
 				return -1;
 			*res_dir = de;
 			return 1;
@@ -1036,7 +1036,7 @@
 			return ERR_PTR(-EIO);
 		}
 		inode = ext4_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				EXT4_ERROR_INODE(dir,
 						 "deleted inode referenced: %u",
@@ -1269,7 +1269,7 @@
 		de = (struct ext4_dir_entry_2 *)bh->b_data;
 		top = bh->b_data + blocksize - reclen;
 		while ((char *) de <= top) {
-			if (!ext4_check_dir_entry(dir, de, bh, offset))
+			if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
 				return -EIO;
 			if (ext4_match(namelen, name, de))
 				return -EEXIST;
@@ -1602,7 +1602,11 @@
 			if (err)
 				goto journal_error;
 		}
-		ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+		err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+		if (err) {
+			ext4_std_error(inode->i_sb, err);
+			goto cleanup;
+		}
 	}
 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
 	if (!de)
@@ -1630,17 +1634,21 @@
 {
 	struct ext4_dir_entry_2 *de, *pde;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
-	int i;
+	int i, err;
 
 	i = 0;
 	pde = NULL;
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
 	while (i < bh->b_size) {
-		if (!ext4_check_dir_entry(dir, de, bh, i))
+		if (ext4_check_dir_entry(dir, NULL, de, bh, i))
 			return -EIO;
 		if (de == de_del)  {
 			BUFFER_TRACE(bh, "get_write_access");
-			ext4_journal_get_write_access(handle, bh);
+			err = ext4_journal_get_write_access(handle, bh);
+			if (unlikely(err)) {
+				ext4_std_error(dir->i_sb, err);
+				return err;
+			}
 			if (pde)
 				pde->rec_len = ext4_rec_len_to_disk(
 					ext4_rec_len_from_disk(pde->rec_len,
@@ -1652,7 +1660,11 @@
 				de->inode = 0;
 			dir->i_version++;
 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-			ext4_handle_dirty_metadata(handle, dir, bh);
+			err = ext4_handle_dirty_metadata(handle, dir, bh);
+			if (unlikely(err)) {
+				ext4_std_error(dir->i_sb, err);
+				return err;
+			}
 			return 0;
 		}
 		i += ext4_rec_len_from_disk(de->rec_len, blocksize);
@@ -1789,7 +1801,7 @@
 {
 	handle_t *handle;
 	struct inode *inode;
-	struct buffer_head *dir_block;
+	struct buffer_head *dir_block = NULL;
 	struct ext4_dir_entry_2 *de;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
 	int err, retries = 0;
@@ -1822,7 +1834,9 @@
 	if (!dir_block)
 		goto out_clear_inode;
 	BUFFER_TRACE(dir_block, "get_write_access");
-	ext4_journal_get_write_access(handle, dir_block);
+	err = ext4_journal_get_write_access(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
 	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
 	de->inode = cpu_to_le32(inode->i_ino);
 	de->name_len = 1;
@@ -1839,10 +1853,12 @@
 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
 	inode->i_nlink = 2;
 	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-	ext4_handle_dirty_metadata(handle, dir, dir_block);
-	brelse(dir_block);
-	ext4_mark_inode_dirty(handle, inode);
-	err = ext4_add_entry(handle, dentry, inode);
+	err = ext4_handle_dirty_metadata(handle, dir, dir_block);
+	if (err)
+		goto out_clear_inode;
+	err = ext4_mark_inode_dirty(handle, inode);
+	if (!err)
+		err = ext4_add_entry(handle, dentry, inode);
 	if (err) {
 out_clear_inode:
 		clear_nlink(inode);
@@ -1853,10 +1869,13 @@
 	}
 	ext4_inc_count(handle, dir);
 	ext4_update_dx_flag(dir);
-	ext4_mark_inode_dirty(handle, dir);
+	err = ext4_mark_inode_dirty(handle, dir);
+	if (err)
+		goto out_clear_inode;
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
 out_stop:
+	brelse(dir_block);
 	ext4_journal_stop(handle);
 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
 		goto retry;
@@ -1919,7 +1938,7 @@
 			}
 			de = (struct ext4_dir_entry_2 *) bh->b_data;
 		}
-		if (!ext4_check_dir_entry(inode, de, bh, offset)) {
+		if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
 			de = (struct ext4_dir_entry_2 *)(bh->b_data +
 							 sb->s_blocksize);
 			offset = (offset | (sb->s_blocksize - 1)) + 1;
@@ -2407,7 +2426,11 @@
 					ext4_current_time(new_dir);
 		ext4_mark_inode_dirty(handle, new_dir);
 		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-		ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+		retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+		if (unlikely(retval)) {
+			ext4_std_error(new_dir->i_sb, retval);
+			goto end_rename;
+		}
 		brelse(new_bh);
 		new_bh = NULL;
 	}
@@ -2459,7 +2482,11 @@
 		PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
 						cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-		ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+		retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+		if (retval) {
+			ext4_std_error(old_dir->i_sb, retval);
+			goto end_rename;
+		}
 		ext4_dec_count(handle, old_dir);
 		if (new_inode) {
 			/* checked empty_dir above, can't have another parent,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index beacce1..7270dcf 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -44,7 +44,7 @@
 	if (io_page_cachep == NULL)
 		return -ENOMEM;
 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
-	if (io_page_cachep == NULL) {
+	if (io_end_cachep == NULL) {
 		kmem_cache_destroy(io_page_cachep);
 		return -ENOMEM;
 	}
@@ -158,11 +158,8 @@
 
 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
 {
-	ext4_io_end_t *io = NULL;
-
-	io = kmem_cache_alloc(io_end_cachep, flags);
+	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
 	if (io) {
-		memset(io, 0, sizeof(*io));
 		atomic_inc(&EXT4_I(inode)->i_ioend_count);
 		io->inode = inode;
 		INIT_WORK(&io->work, ext4_end_io_work);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 981c847..3ecc6e4 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -220,7 +220,11 @@
 		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
 		set_buffer_uptodate(gdb);
 		unlock_buffer(gdb);
-		ext4_handle_dirty_metadata(handle, NULL, gdb);
+		err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+		if (unlikely(err)) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext4_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -258,7 +262,11 @@
 
 	ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
 			     bh->b_data);
-	ext4_handle_dirty_metadata(handle, NULL, bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bh);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_bh;
+	}
 	brelse(bh);
 	/* Mark unused entries in inode bitmap used */
 	ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
@@ -270,7 +278,9 @@
 
 	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
 			     bh->b_data);
-	ext4_handle_dirty_metadata(handle, NULL, bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bh);
+	if (unlikely(err))
+		ext4_std_error(sb, err);
 exit_bh:
 	brelse(bh);
 
@@ -422,17 +432,21 @@
 		goto exit_dind;
 	}
 
-	if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh)))
+	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+	if (unlikely(err))
 		goto exit_dind;
 
-	if ((err = ext4_journal_get_write_access(handle, *primary)))
+	err = ext4_journal_get_write_access(handle, *primary);
+	if (unlikely(err))
 		goto exit_sbh;
 
-	if ((err = ext4_journal_get_write_access(handle, dind)))
-		goto exit_primary;
+	err = ext4_journal_get_write_access(handle, dind);
+	if (unlikely(err))
+		ext4_std_error(sb, err);
 
 	/* ext4_reserve_inode_write() gets a reference on the iloc */
-	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
+	err = ext4_reserve_inode_write(handle, inode, &iloc);
+	if (unlikely(err))
 		goto exit_dindj;
 
 	n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
@@ -454,12 +468,20 @@
 	 * reserved inode, and will become GDT blocks (primary and backup).
 	 */
 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
-	ext4_handle_dirty_metadata(handle, NULL, dind);
-	brelse(dind);
+	err = ext4_handle_dirty_metadata(handle, NULL, dind);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_inode;
+	}
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
 	ext4_mark_iloc_dirty(handle, inode, &iloc);
 	memset((*primary)->b_data, 0, sb->s_blocksize);
-	ext4_handle_dirty_metadata(handle, NULL, *primary);
+	err = ext4_handle_dirty_metadata(handle, NULL, *primary);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_inode;
+	}
+	brelse(dind);
 
 	o_group_desc = EXT4_SB(sb)->s_group_desc;
 	memcpy(n_group_desc, o_group_desc,
@@ -470,19 +492,19 @@
 	kfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	if (err)
+		ext4_std_error(sb, err);
 
-	return 0;
+	return err;
 
 exit_inode:
 	/* ext4_journal_release_buffer(handle, iloc.bh); */
 	brelse(iloc.bh);
 exit_dindj:
 	/* ext4_journal_release_buffer(handle, dind); */
-exit_primary:
-	/* ext4_journal_release_buffer(handle, *primary); */
 exit_sbh:
-	/* ext4_journal_release_buffer(handle, *primary); */
+	/* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
 exit_dind:
 	brelse(dind);
 exit_bh:
@@ -665,7 +687,9 @@
 			memset(bh->b_data + size, 0, rest);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
-		ext4_handle_dirty_metadata(handle, NULL, bh);
+		err = ext4_handle_dirty_metadata(handle, NULL, bh);
+		if (unlikely(err))
+			ext4_std_error(sb, err);
 		brelse(bh);
 	}
 	if ((err2 = ext4_journal_stop(handle)) && !err)
@@ -883,7 +907,11 @@
 	/* Update the global fs size fields */
 	sbi->s_groups_count++;
 
-	ext4_handle_dirty_metadata(handle, NULL, primary);
+	err = ext4_handle_dirty_metadata(handle, NULL, primary);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_journal;
+	}
 
 	/* Update the reserved block counts only once the new group is
 	 * active. */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index fb15c9c..cb10a06 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -388,13 +388,14 @@
 void __ext4_error(struct super_block *sb, const char *function,
 		  unsigned int line, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
-	       sb->s_id, function, line, current->comm);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+	       sb->s_id, function, line, current->comm, &vaf);
 	va_end(args);
 
 	ext4_handle_error(sb);
@@ -405,28 +406,31 @@
 		      const char *fmt, ...)
 {
 	va_list args;
+	struct va_format vaf;
 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	es->s_last_error_block = cpu_to_le64(block);
 	save_error_info(inode->i_sb, function, line);
 	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
 	       inode->i_sb->s_id, function, line, inode->i_ino);
 	if (block)
-		printk("block %llu: ", block);
-	printk("comm %s: ", current->comm);
-	vprintk(fmt, args);
-	printk("\n");
+		printk(KERN_CONT "block %llu: ", block);
+	printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
 	va_end(args);
 
 	ext4_handle_error(inode->i_sb);
 }
 
 void ext4_error_file(struct file *file, const char *function,
-		     unsigned int line, const char *fmt, ...)
+		     unsigned int line, ext4_fsblk_t block,
+		     const char *fmt, ...)
 {
 	va_list args;
+	struct va_format vaf;
 	struct ext4_super_block *es;
 	struct inode *inode = file->f_dentry->d_inode;
 	char pathname[80], *path;
@@ -434,17 +438,18 @@
 	es = EXT4_SB(inode->i_sb)->s_es;
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	save_error_info(inode->i_sb, function, line);
-	va_start(args, fmt);
 	path = d_path(&(file->f_path), pathname, sizeof(pathname));
-	if (!path)
+	if (IS_ERR(path))
 		path = "(unknown)";
 	printk(KERN_CRIT
-	       "EXT4-fs error (device %s): %s:%d: inode #%lu "
-	       "(comm %s path %s): ",
-	       inode->i_sb->s_id, function, line, inode->i_ino,
-	       current->comm, path);
-	vprintk(fmt, args);
-	printk("\n");
+	       "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
+	       inode->i_sb->s_id, function, line, inode->i_ino);
+	if (block)
+		printk(KERN_CONT "block %llu: ", block);
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
 	va_end(args);
 
 	ext4_handle_error(inode->i_sb);
@@ -543,28 +548,29 @@
 		panic("EXT4-fs panic from previous error\n");
 }
 
-void ext4_msg (struct super_block * sb, const char *prefix,
-		   const char *fmt, ...)
+void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT4-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
 	va_end(args);
 }
 
 void __ext4_warning(struct super_block *sb, const char *function,
 		    unsigned int line, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
-	       sb->s_id, function, line);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
+	       sb->s_id, function, line, &vaf);
 	va_end(args);
 }
 
@@ -575,21 +581,25 @@
 __releases(bitlock)
 __acquires(bitlock)
 {
+	struct va_format vaf;
 	va_list args;
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 
 	es->s_last_error_ino = cpu_to_le32(ino);
 	es->s_last_error_block = cpu_to_le64(block);
 	__save_error_info(sb, function, line);
+
 	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
 	       sb->s_id, function, line, grp);
 	if (ino)
-		printk("inode %lu: ", ino);
+		printk(KERN_CONT "inode %lu: ", ino);
 	if (block)
-		printk("block %llu:", (unsigned long long) block);
-	vprintk(fmt, args);
-	printk("\n");
+		printk(KERN_CONT "block %llu:", (unsigned long long) block);
+	printk(KERN_CONT "%pV\n", &vaf);
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_CONT)) {
@@ -647,7 +657,7 @@
 	struct block_device *bdev;
 	char b[BDEVNAME_SIZE];
 
-	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
 	if (IS_ERR(bdev))
 		goto fail;
 	return bdev;
@@ -663,8 +673,7 @@
  */
 static int ext4_blkdev_put(struct block_device *bdev)
 {
-	bd_release(bdev);
-	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
@@ -808,21 +817,15 @@
 	memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
 	INIT_LIST_HEAD(&ei->i_prealloc_list);
 	spin_lock_init(&ei->i_prealloc_lock);
-	/*
-	 * Note:  We can be called before EXT4_SB(sb)->s_journal is set,
-	 * therefore it can be null here.  Don't check it, just initialize
-	 * jinode.
-	 */
-	jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
 	ei->i_reserved_data_blocks = 0;
 	ei->i_reserved_meta_blocks = 0;
 	ei->i_allocated_meta_blocks = 0;
 	ei->i_da_metadata_calc_len = 0;
-	ei->i_delalloc_reserved_flag = 0;
 	spin_lock_init(&(ei->i_block_reservation_lock));
 #ifdef CONFIG_QUOTA
 	ei->i_reserved_quota = 0;
 #endif
+	ei->jinode = NULL;
 	INIT_LIST_HEAD(&ei->i_completed_io_list);
 	spin_lock_init(&ei->i_completed_io_lock);
 	ei->cur_aio_dio = NULL;
@@ -841,6 +844,13 @@
 	return drop;
 }
 
+static void ext4_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
+}
+
 static void ext4_destroy_inode(struct inode *inode)
 {
 	ext4_ioend_wait(inode);
@@ -853,7 +863,7 @@
 				true);
 		dump_stack();
 	}
-	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
+	call_rcu(&inode->i_rcu, ext4_i_callback);
 }
 
 static void init_once(void *foo)
@@ -891,9 +901,12 @@
 	end_writeback(inode);
 	dquot_drop(inode);
 	ext4_discard_preallocations(inode);
-	if (EXT4_JOURNAL(inode))
-		jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
-				       &EXT4_I(inode)->jinode);
+	if (EXT4_I(inode)->jinode) {
+		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
+					       EXT4_I(inode)->jinode);
+		jbd2_free_inode(EXT4_I(inode)->jinode);
+		EXT4_I(inode)->jinode = NULL;
+	}
 }
 
 static inline void ext4_show_quota_options(struct seq_file *seq,
@@ -1386,7 +1399,7 @@
 		sbi->s_qf_names[qtype] = NULL;
 		return 0;
 	}
-	set_opt(sbi->s_mount_opt, QUOTA);
+	set_opt(sb, QUOTA);
 	return 1;
 }
 
@@ -1441,21 +1454,21 @@
 		switch (token) {
 		case Opt_bsd_df:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			clear_opt(sbi->s_mount_opt, MINIX_DF);
+			clear_opt(sb, MINIX_DF);
 			break;
 		case Opt_minix_df:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			set_opt(sbi->s_mount_opt, MINIX_DF);
+			set_opt(sb, MINIX_DF);
 
 			break;
 		case Opt_grpid:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			set_opt(sbi->s_mount_opt, GRPID);
+			set_opt(sb, GRPID);
 
 			break;
 		case Opt_nogrpid:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			clear_opt(sbi->s_mount_opt, GRPID);
+			clear_opt(sb, GRPID);
 
 			break;
 		case Opt_resuid:
@@ -1473,38 +1486,38 @@
 			/* *sb_block = match_int(&args[0]); */
 			break;
 		case Opt_err_panic:
-			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt(sbi->s_mount_opt, ERRORS_RO);
-			set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+			clear_opt(sb, ERRORS_CONT);
+			clear_opt(sb, ERRORS_RO);
+			set_opt(sb, ERRORS_PANIC);
 			break;
 		case Opt_err_ro:
-			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt(sbi->s_mount_opt, ERRORS_RO);
+			clear_opt(sb, ERRORS_CONT);
+			clear_opt(sb, ERRORS_PANIC);
+			set_opt(sb, ERRORS_RO);
 			break;
 		case Opt_err_cont:
-			clear_opt(sbi->s_mount_opt, ERRORS_RO);
-			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt(sbi->s_mount_opt, ERRORS_CONT);
+			clear_opt(sb, ERRORS_RO);
+			clear_opt(sb, ERRORS_PANIC);
+			set_opt(sb, ERRORS_CONT);
 			break;
 		case Opt_nouid32:
-			set_opt(sbi->s_mount_opt, NO_UID32);
+			set_opt(sb, NO_UID32);
 			break;
 		case Opt_debug:
-			set_opt(sbi->s_mount_opt, DEBUG);
+			set_opt(sb, DEBUG);
 			break;
 		case Opt_oldalloc:
-			set_opt(sbi->s_mount_opt, OLDALLOC);
+			set_opt(sb, OLDALLOC);
 			break;
 		case Opt_orlov:
-			clear_opt(sbi->s_mount_opt, OLDALLOC);
+			clear_opt(sb, OLDALLOC);
 			break;
 #ifdef CONFIG_EXT4_FS_XATTR
 		case Opt_user_xattr:
-			set_opt(sbi->s_mount_opt, XATTR_USER);
+			set_opt(sb, XATTR_USER);
 			break;
 		case Opt_nouser_xattr:
-			clear_opt(sbi->s_mount_opt, XATTR_USER);
+			clear_opt(sb, XATTR_USER);
 			break;
 #else
 		case Opt_user_xattr:
@@ -1514,10 +1527,10 @@
 #endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 		case Opt_acl:
-			set_opt(sbi->s_mount_opt, POSIX_ACL);
+			set_opt(sb, POSIX_ACL);
 			break;
 		case Opt_noacl:
-			clear_opt(sbi->s_mount_opt, POSIX_ACL);
+			clear_opt(sb, POSIX_ACL);
 			break;
 #else
 		case Opt_acl:
@@ -1536,7 +1549,7 @@
 					 "Cannot specify journal on remount");
 				return 0;
 			}
-			set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
+			set_opt(sb, UPDATE_JOURNAL);
 			break;
 		case Opt_journal_dev:
 			if (is_remount) {
@@ -1549,14 +1562,14 @@
 			*journal_devnum = option;
 			break;
 		case Opt_journal_checksum:
-			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+			set_opt(sb, JOURNAL_CHECKSUM);
 			break;
 		case Opt_journal_async_commit:
-			set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
-			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+			set_opt(sb, JOURNAL_ASYNC_COMMIT);
+			set_opt(sb, JOURNAL_CHECKSUM);
 			break;
 		case Opt_noload:
-			set_opt(sbi->s_mount_opt, NOLOAD);
+			set_opt(sb, NOLOAD);
 			break;
 		case Opt_commit:
 			if (match_int(&args[0], &option))
@@ -1599,15 +1612,15 @@
 					return 0;
 				}
 			} else {
-				clear_opt(sbi->s_mount_opt, DATA_FLAGS);
+				clear_opt(sb, DATA_FLAGS);
 				sbi->s_mount_opt |= data_opt;
 			}
 			break;
 		case Opt_data_err_abort:
-			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			set_opt(sb, DATA_ERR_ABORT);
 			break;
 		case Opt_data_err_ignore:
-			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			clear_opt(sb, DATA_ERR_ABORT);
 			break;
 #ifdef CONFIG_QUOTA
 		case Opt_usrjquota:
@@ -1647,12 +1660,12 @@
 			break;
 		case Opt_quota:
 		case Opt_usrquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, USRQUOTA);
+			set_opt(sb, QUOTA);
+			set_opt(sb, USRQUOTA);
 			break;
 		case Opt_grpquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, GRPQUOTA);
+			set_opt(sb, QUOTA);
+			set_opt(sb, GRPQUOTA);
 			break;
 		case Opt_noquota:
 			if (sb_any_quota_loaded(sb)) {
@@ -1660,9 +1673,9 @@
 					"options when quota turned on");
 				return 0;
 			}
-			clear_opt(sbi->s_mount_opt, QUOTA);
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
+			clear_opt(sb, QUOTA);
+			clear_opt(sb, USRQUOTA);
+			clear_opt(sb, GRPQUOTA);
 			break;
 #else
 		case Opt_quota:
@@ -1688,7 +1701,7 @@
 			sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
 			break;
 		case Opt_nobarrier:
-			clear_opt(sbi->s_mount_opt, BARRIER);
+			clear_opt(sb, BARRIER);
 			break;
 		case Opt_barrier:
 			if (args[0].from) {
@@ -1697,9 +1710,9 @@
 			} else
 				option = 1;	/* No argument, default to 1 */
 			if (option)
-				set_opt(sbi->s_mount_opt, BARRIER);
+				set_opt(sb, BARRIER);
 			else
-				clear_opt(sbi->s_mount_opt, BARRIER);
+				clear_opt(sb, BARRIER);
 			break;
 		case Opt_ignore:
 			break;
@@ -1723,17 +1736,17 @@
 				 "Ignoring deprecated bh option");
 			break;
 		case Opt_i_version:
-			set_opt(sbi->s_mount_opt, I_VERSION);
+			set_opt(sb, I_VERSION);
 			sb->s_flags |= MS_I_VERSION;
 			break;
 		case Opt_nodelalloc:
-			clear_opt(sbi->s_mount_opt, DELALLOC);
+			clear_opt(sb, DELALLOC);
 			break;
 		case Opt_mblk_io_submit:
-			set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+			set_opt(sb, MBLK_IO_SUBMIT);
 			break;
 		case Opt_nomblk_io_submit:
-			clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+			clear_opt(sb, MBLK_IO_SUBMIT);
 			break;
 		case Opt_stripe:
 			if (match_int(&args[0], &option))
@@ -1743,13 +1756,13 @@
 			sbi->s_stripe = option;
 			break;
 		case Opt_delalloc:
-			set_opt(sbi->s_mount_opt, DELALLOC);
+			set_opt(sb, DELALLOC);
 			break;
 		case Opt_block_validity:
-			set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+			set_opt(sb, BLOCK_VALIDITY);
 			break;
 		case Opt_noblock_validity:
-			clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+			clear_opt(sb, BLOCK_VALIDITY);
 			break;
 		case Opt_inode_readahead_blks:
 			if (match_int(&args[0], &option))
@@ -1773,7 +1786,7 @@
 							    option);
 			break;
 		case Opt_noauto_da_alloc:
-			set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+			set_opt(sb, NO_AUTO_DA_ALLOC);
 			break;
 		case Opt_auto_da_alloc:
 			if (args[0].from) {
@@ -1782,24 +1795,24 @@
 			} else
 				option = 1;	/* No argument, default to 1 */
 			if (option)
-				clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
+				clear_opt(sb, NO_AUTO_DA_ALLOC);
 			else
-				set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+				set_opt(sb,NO_AUTO_DA_ALLOC);
 			break;
 		case Opt_discard:
-			set_opt(sbi->s_mount_opt, DISCARD);
+			set_opt(sb, DISCARD);
 			break;
 		case Opt_nodiscard:
-			clear_opt(sbi->s_mount_opt, DISCARD);
+			clear_opt(sb, DISCARD);
 			break;
 		case Opt_dioread_nolock:
-			set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			set_opt(sb, DIOREAD_NOLOCK);
 			break;
 		case Opt_dioread_lock:
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 			break;
 		case Opt_init_inode_table:
-			set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+			set_opt(sb, INIT_INODE_TABLE);
 			if (args[0].from) {
 				if (match_int(&args[0], &option))
 					return 0;
@@ -1810,7 +1823,7 @@
 			sbi->s_li_wait_mult = option;
 			break;
 		case Opt_noinit_inode_table:
-			clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+			clear_opt(sb, INIT_INODE_TABLE);
 			break;
 		default:
 			ext4_msg(sb, KERN_ERR,
@@ -1822,10 +1835,10 @@
 #ifdef CONFIG_QUOTA
 	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
+			clear_opt(sb, USRQUOTA);
 
 		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
+			clear_opt(sb, GRPQUOTA);
 
 		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
 			ext4_msg(sb, KERN_ERR, "old and new quota "
@@ -1895,12 +1908,12 @@
 	ext4_commit_super(sb, 1);
 	if (test_opt(sb, DEBUG))
 		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
-				"bpg=%lu, ipg=%lu, mo=%04x]\n",
+				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
 			sb->s_blocksize,
 			sbi->s_groups_count,
 			EXT4_BLOCKS_PER_GROUP(sb),
 			EXT4_INODES_PER_GROUP(sb),
-			sbi->s_mount_opt);
+			sbi->s_mount_opt, sbi->s_mount_opt2);
 
 	return res;
 }
@@ -1930,14 +1943,13 @@
 	size = flex_group_count * sizeof(struct flex_groups);
 	sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
 	if (sbi->s_flex_groups == NULL) {
-		sbi->s_flex_groups = vmalloc(size);
-		if (sbi->s_flex_groups)
-			memset(sbi->s_flex_groups, 0, size);
-	}
-	if (sbi->s_flex_groups == NULL) {
-		ext4_msg(sb, KERN_ERR, "not enough memory for "
-				"%u flex groups", flex_group_count);
-		goto failed;
+		sbi->s_flex_groups = vzalloc(size);
+		if (sbi->s_flex_groups == NULL) {
+			ext4_msg(sb, KERN_ERR,
+				 "not enough memory for %u flex groups",
+				 flex_group_count);
+			goto failed;
+		}
 	}
 
 	for (i = 0; i < sbi->s_groups_count; i++) {
@@ -2916,7 +2928,7 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_li_request *elr;
 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
-	int ret;
+	int ret = 0;
 
 	if (sbi->s_li_request != NULL)
 		return 0;
@@ -3071,41 +3083,41 @@
 
 	/* Set defaults before we parse the mount options */
 	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-	set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+	set_opt(sb, INIT_INODE_TABLE);
 	if (def_mount_opts & EXT4_DEFM_DEBUG)
-		set_opt(sbi->s_mount_opt, DEBUG);
+		set_opt(sb, DEBUG);
 	if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
 		ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
 			"2.6.38");
-		set_opt(sbi->s_mount_opt, GRPID);
+		set_opt(sb, GRPID);
 	}
 	if (def_mount_opts & EXT4_DEFM_UID16)
-		set_opt(sbi->s_mount_opt, NO_UID32);
+		set_opt(sb, NO_UID32);
 #ifdef CONFIG_EXT4_FS_XATTR
 	if (def_mount_opts & EXT4_DEFM_XATTR_USER)
-		set_opt(sbi->s_mount_opt, XATTR_USER);
+		set_opt(sb, XATTR_USER);
 #endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 	if (def_mount_opts & EXT4_DEFM_ACL)
-		set_opt(sbi->s_mount_opt, POSIX_ACL);
+		set_opt(sb, POSIX_ACL);
 #endif
 	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
-		set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+		set_opt(sb, JOURNAL_DATA);
 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
-		set_opt(sbi->s_mount_opt, ORDERED_DATA);
+		set_opt(sb, ORDERED_DATA);
 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+		set_opt(sb, WRITEBACK_DATA);
 
 	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
-		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+		set_opt(sb, ERRORS_PANIC);
 	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
-		set_opt(sbi->s_mount_opt, ERRORS_CONT);
+		set_opt(sb, ERRORS_CONT);
 	else
-		set_opt(sbi->s_mount_opt, ERRORS_RO);
+		set_opt(sb, ERRORS_RO);
 	if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
-		set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+		set_opt(sb, BLOCK_VALIDITY);
 	if (def_mount_opts & EXT4_DEFM_DISCARD)
-		set_opt(sbi->s_mount_opt, DISCARD);
+		set_opt(sb, DISCARD);
 
 	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
 	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -3114,7 +3126,7 @@
 	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
 
 	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
-		set_opt(sbi->s_mount_opt, BARRIER);
+		set_opt(sb, BARRIER);
 
 	/*
 	 * enable delayed allocation by default
@@ -3122,7 +3134,7 @@
 	 */
 	if (!IS_EXT3_SB(sb) &&
 	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
-		set_opt(sbi->s_mount_opt, DELALLOC);
+		set_opt(sb, DELALLOC);
 
 	if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
 			   &journal_devnum, &journal_ioprio, NULL, 0)) {
@@ -3425,8 +3437,8 @@
 		       "suppressed and not mounted read-only");
 		goto failed_mount_wq;
 	} else {
-		clear_opt(sbi->s_mount_opt, DATA_FLAGS);
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+		clear_opt(sb, DATA_FLAGS);
+		set_opt(sb, WRITEBACK_DATA);
 		sbi->s_journal = NULL;
 		needs_recovery = 0;
 		goto no_journal;
@@ -3464,9 +3476,9 @@
 		 */
 		if (jbd2_journal_check_available_features
 		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
-			set_opt(sbi->s_mount_opt, ORDERED_DATA);
+			set_opt(sb, ORDERED_DATA);
 		else
-			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+			set_opt(sb, JOURNAL_DATA);
 		break;
 
 	case EXT4_MOUNT_ORDERED_DATA:
@@ -3556,18 +3568,18 @@
 	    (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
 		ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
 			 "requested data journaling mode");
-		clear_opt(sbi->s_mount_opt, DELALLOC);
+		clear_opt(sb, DELALLOC);
 	}
 	if (test_opt(sb, DIOREAD_NOLOCK)) {
 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
 			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 				"option - requested data journaling mode");
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 		}
 		if (sb->s_blocksize < PAGE_SIZE) {
 			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 				"option - block size is too small");
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 		}
 	}
 
@@ -3765,13 +3777,6 @@
 	if (bdev == NULL)
 		return NULL;
 
-	if (bd_claim(bdev, sb)) {
-		ext4_msg(sb, KERN_ERR,
-			"failed to claim external journal device");
-		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
-		return NULL;
-	}
-
 	blocksize = sb->s_blocksize;
 	hblock = bdev_logical_block_size(bdev);
 	if (blocksize < hblock) {
@@ -4166,6 +4171,22 @@
 	return 0;
 }
 
+/*
+ * Structure to save mount options for ext4_remount's benefit
+ */
+struct ext4_mount_options {
+	unsigned long s_mount_opt;
+	unsigned long s_mount_opt2;
+	uid_t s_resuid;
+	gid_t s_resgid;
+	unsigned long s_commit_interval;
+	u32 s_min_batch_time, s_max_batch_time;
+#ifdef CONFIG_QUOTA
+	int s_jquota_fmt;
+	char *s_qf_names[MAXQUOTAS];
+#endif
+};
+
 static int ext4_remount(struct super_block *sb, int *flags, char *data)
 {
 	struct ext4_super_block *es;
@@ -4186,6 +4207,7 @@
 	lock_super(sb);
 	old_sb_flags = sb->s_flags;
 	old_opts.s_mount_opt = sbi->s_mount_opt;
+	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
 	old_opts.s_resuid = sbi->s_resuid;
 	old_opts.s_resgid = sbi->s_resgid;
 	old_opts.s_commit_interval = sbi->s_commit_interval;
@@ -4339,6 +4361,7 @@
 restore_opts:
 	sb->s_flags = old_sb_flags;
 	sbi->s_mount_opt = old_opts.s_mount_opt;
+	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
 	sbi->s_resuid = old_opts.s_resuid;
 	sbi->s_resgid = old_opts.s_resgid;
 	sbi->s_commit_interval = old_opts.s_commit_interval;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fa4b899..fc32176 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -427,23 +427,23 @@
 static int
 ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
 {
-	int i_error, b_error;
+	int ret, ret2;
 
 	down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
-	i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
-	if (i_error < 0) {
-		b_error = 0;
-	} else {
-		if (buffer) {
-			buffer += i_error;
-			buffer_size -= i_error;
-		}
-		b_error = ext4_xattr_block_list(dentry, buffer, buffer_size);
-		if (b_error < 0)
-			i_error = 0;
+	ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
+	if (ret < 0)
+		goto errout;
+	if (buffer) {
+		buffer += ret;
+		buffer_size -= ret;
 	}
+	ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
+	if (ret < 0)
+		goto errout;
+	ret += ret2;
+errout:
 	up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
-	return i_error + b_error;
+	return ret;
 }
 
 /*
@@ -947,7 +947,7 @@
 /*
  * ext4_xattr_set_handle()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index d75a77f..f504089 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -319,7 +319,8 @@
 			struct msdos_dir_entry *de, loff_t i_pos);
 extern int fat_sync_inode(struct inode *inode);
 extern int fat_fill_super(struct super_block *sb, void *data, int silent,
-			const struct inode_operations *fs_dir_inode_ops, int isvfat);
+			const struct inode_operations *fs_dir_inode_ops,
+			int isvfat, void (*setup)(struct super_block *));
 
 extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
 		            struct inode *i2);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index ad6998a..86753fe 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -514,9 +514,16 @@
 	return &ei->vfs_inode;
 }
 
+static void fat_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
+}
+
 static void fat_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
+	call_rcu(&inode->i_rcu, fat_i_callback);
 }
 
 static void init_once(void *foo)
@@ -696,7 +703,6 @@
 		struct fid *fid, int fh_len, int fh_type)
 {
 	struct inode *inode = NULL;
-	struct dentry *result;
 	u32 *fh = fid->raw;
 
 	if (fh_len < 5 || fh_type != 3)
@@ -741,10 +747,7 @@
 	 * the fat_iget lookup again.  If that fails, then we are totally out
 	 * of luck.  But all that is for another day
 	 */
-	result = d_obtain_alias(inode);
-	if (!IS_ERR(result))
-		result->d_op = sb->s_root->d_op;
-	return result;
+	return d_obtain_alias(inode);
 }
 
 static int
@@ -792,8 +795,6 @@
 	brelse(bh);
 
 	parent = d_obtain_alias(inode);
-	if (!IS_ERR(parent))
-		parent->d_op = sb->s_root->d_op;
 out:
 	unlock_super(sb);
 
@@ -1237,7 +1238,8 @@
  * Read the super block of an MS-DOS FS.
  */
 int fat_fill_super(struct super_block *sb, void *data, int silent,
-		   const struct inode_operations *fs_dir_inode_ops, int isvfat)
+		   const struct inode_operations *fs_dir_inode_ops, int isvfat,
+		   void (*setup)(struct super_block *))
 {
 	struct inode *root_inode = NULL, *fat_inode = NULL;
 	struct buffer_head *bh;
@@ -1273,6 +1275,8 @@
 	if (error)
 		goto out_fail;
 
+	setup(sb); /* flavour-specific stuff that needs options */
+
 	error = -EIO;
 	sb_min_blocksize(sb, 512);
 	bh = sb_bread(sb, 0);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 3345aab..7114990 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -148,7 +148,8 @@
  * that the existing dentry can be used. The msdos fs routines will
  * return ENOENT or EINVAL as appropriate.
  */
-static int msdos_hash(struct dentry *dentry, struct qstr *qstr)
+static int msdos_hash(const struct dentry *dentry, const struct inode *inode,
+	       struct qstr *qstr)
 {
 	struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
 	unsigned char msdos_name[MSDOS_NAME];
@@ -164,16 +165,18 @@
  * Compare two msdos names. If either of the names are invalid,
  * we fall back to doing the standard name comparison.
  */
-static int msdos_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
+static int msdos_cmp(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
+	struct fat_mount_options *options = &MSDOS_SB(parent->d_sb)->options;
 	unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME];
 	int error;
 
-	error = msdos_format_name(a->name, a->len, a_msdos_name, options);
+	error = msdos_format_name(name->name, name->len, a_msdos_name, options);
 	if (error)
 		goto old_compare;
-	error = msdos_format_name(b->name, b->len, b_msdos_name, options);
+	error = msdos_format_name(str, len, b_msdos_name, options);
 	if (error)
 		goto old_compare;
 	error = memcmp(a_msdos_name, b_msdos_name, MSDOS_NAME);
@@ -182,8 +185,8 @@
 
 old_compare:
 	error = 1;
-	if (a->len == b->len)
-		error = memcmp(a->name, b->name, a->len);
+	if (name->len == len)
+		error = memcmp(name->name, str, len);
 	goto out;
 }
 
@@ -224,11 +227,7 @@
 	}
 out:
 	unlock_super(sb);
-	dentry->d_op = &msdos_dentry_operations;
-	dentry = d_splice_alias(inode, dentry);
-	if (dentry)
-		dentry->d_op = &msdos_dentry_operations;
-	return dentry;
+	return d_splice_alias(inode, dentry);
 
 error:
 	unlock_super(sb);
@@ -658,21 +657,16 @@
 	.getattr	= fat_getattr,
 };
 
+static void setup(struct super_block *sb)
+{
+	sb->s_d_op = &msdos_dentry_operations;
+	sb->s_flags |= MS_NOATIME;
+}
+
 static int msdos_fill_super(struct super_block *sb, void *data, int silent)
 {
-	int res;
-
-	lock_super(sb);
-	res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0);
-	if (res) {
-		unlock_super(sb);
-		return res;
-	}
-
-	sb->s_flags |= MS_NOATIME;
-	sb->s_root->d_op = &msdos_dentry_operations;
-	unlock_super(sb);
-	return 0;
+	return fat_fill_super(sb, data, silent, &msdos_dir_inode_operations,
+			     0, setup);
 }
 
 static struct dentry *msdos_mount(struct file_system_type *fs_type,
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index b936703..f88f752 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,6 +43,9 @@
 
 static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
 	/* This is not negative dentry. Always valid. */
 	if (dentry->d_inode)
 		return 1;
@@ -51,6 +54,9 @@
 
 static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
 {
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
 	/*
 	 * This is not negative dentry. Always valid.
 	 *
@@ -85,22 +91,26 @@
 }
 
 /* returns the length of a struct qstr, ignoring trailing dots */
-static unsigned int vfat_striptail_len(struct qstr *qstr)
+static unsigned int __vfat_striptail_len(unsigned int len, const char *name)
 {
-	unsigned int len = qstr->len;
-
-	while (len && qstr->name[len - 1] == '.')
+	while (len && name[len - 1] == '.')
 		len--;
 	return len;
 }
 
+static unsigned int vfat_striptail_len(const struct qstr *qstr)
+{
+	return __vfat_striptail_len(qstr->len, qstr->name);
+}
+
 /*
  * Compute the hash for the vfat name corresponding to the dentry.
  * Note: if the name is invalid, we leave the hash code unchanged so
  * that the existing dentry can be used. The vfat fs routines will
  * return ENOENT or EINVAL as appropriate.
  */
-static int vfat_hash(struct dentry *dentry, struct qstr *qstr)
+static int vfat_hash(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
 	qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr));
 	return 0;
@@ -112,9 +122,10 @@
  * that the existing dentry can be used. The vfat fs routines will
  * return ENOENT or EINVAL as appropriate.
  */
-static int vfat_hashi(struct dentry *dentry, struct qstr *qstr)
+static int vfat_hashi(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
-	struct nls_table *t = MSDOS_SB(dentry->d_inode->i_sb)->nls_io;
+	struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
 	const unsigned char *name;
 	unsigned int len;
 	unsigned long hash;
@@ -133,16 +144,18 @@
 /*
  * Case insensitive compare of two vfat names.
  */
-static int vfat_cmpi(struct dentry *dentry, struct qstr *a, struct qstr *b)
+static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	struct nls_table *t = MSDOS_SB(dentry->d_inode->i_sb)->nls_io;
+	struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
 	unsigned int alen, blen;
 
 	/* A filename cannot end in '.' or we treat it like it has none */
-	alen = vfat_striptail_len(a);
-	blen = vfat_striptail_len(b);
+	alen = vfat_striptail_len(name);
+	blen = __vfat_striptail_len(len, str);
 	if (alen == blen) {
-		if (nls_strnicmp(t, a->name, b->name, alen) == 0)
+		if (nls_strnicmp(t, name->name, str, alen) == 0)
 			return 0;
 	}
 	return 1;
@@ -151,15 +164,17 @@
 /*
  * Case sensitive compare of two vfat names.
  */
-static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
+static int vfat_cmp(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
 	unsigned int alen, blen;
 
 	/* A filename cannot end in '.' or we treat it like it has none */
-	alen = vfat_striptail_len(a);
-	blen = vfat_striptail_len(b);
+	alen = vfat_striptail_len(name);
+	blen = __vfat_striptail_len(len, str);
 	if (alen == blen) {
-		if (strncmp(a->name, b->name, alen) == 0)
+		if (strncmp(name->name, str, alen) == 0)
 			return 0;
 	}
 	return 1;
@@ -757,13 +772,10 @@
 
 out:
 	unlock_super(sb);
-	dentry->d_op = sb->s_root->d_op;
 	dentry->d_time = dentry->d_parent->d_inode->i_version;
 	dentry = d_splice_alias(inode, dentry);
-	if (dentry) {
-		dentry->d_op = sb->s_root->d_op;
+	if (dentry)
 		dentry->d_time = dentry->d_parent->d_inode->i_version;
-	}
 	return dentry;
 
 error:
@@ -1051,24 +1063,18 @@
 	.getattr	= fat_getattr,
 };
 
+static void setup(struct super_block *sb)
+{
+	if (MSDOS_SB(sb)->options.name_check != 's')
+		sb->s_d_op = &vfat_ci_dentry_ops;
+	else
+		sb->s_d_op = &vfat_dentry_ops;
+}
+
 static int vfat_fill_super(struct super_block *sb, void *data, int silent)
 {
-	int res;
-
-	lock_super(sb);
-	res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1);
-	if (res) {
-		unlock_super(sb);
-		return res;
-	}
-
-	if (MSDOS_SB(sb)->options.name_check != 's')
-		sb->s_root->d_op = &vfat_ci_dentry_ops;
-	else
-		sb->s_root->d_op = &vfat_dentry_ops;
-
-	unlock_super(sb);
-	return 0;
+	return fat_fill_super(sb, data, silent, &vfat_dir_inode_operations,
+			     1, setup);
 }
 
 static struct dentry *vfat_mount(struct file_system_type *fs_type,
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 68ba492..751d6b2 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -115,6 +115,9 @@
 		tmp = &(*tmp)->next;
 	}
 	write_unlock(&file_systems_lock);
+
+	synchronize_rcu();
+
 	return -EINVAL;
 }
 
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 8c04eac..2ba6719 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -337,6 +337,13 @@
 	return ip;
 }
 
+static void vxfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(vxfs_inode_cachep, inode->i_private);
+}
+
 /**
  * vxfs_evict_inode - remove inode from main memory
  * @ip:		inode to discard.
@@ -350,5 +357,5 @@
 {
 	truncate_inode_pages(&ip->i_data, 0);
 	end_writeback(ip);
-	kmem_cache_free(vxfs_inode_cachep, ip->i_private);
+	call_rcu(&ip->i_rcu, vxfs_i_callback);
 }
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 3d06ccc..59c6e49 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -84,13 +84,9 @@
 	return list_entry(head, struct inode, i_wb_list);
 }
 
-static void bdi_queue_work(struct backing_dev_info *bdi,
-		struct wb_writeback_work *work)
+/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
+static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
 {
-	trace_writeback_queue(bdi, work);
-
-	spin_lock_bh(&bdi->wb_lock);
-	list_add_tail(&work->list, &bdi->work_list);
 	if (bdi->wb.task) {
 		wake_up_process(bdi->wb.task);
 	} else {
@@ -98,15 +94,26 @@
 		 * The bdi thread isn't there, wake up the forker thread which
 		 * will create and run it.
 		 */
-		trace_writeback_nothread(bdi, work);
 		wake_up_process(default_backing_dev_info.wb.task);
 	}
+}
+
+static void bdi_queue_work(struct backing_dev_info *bdi,
+			   struct wb_writeback_work *work)
+{
+	trace_writeback_queue(bdi, work);
+
+	spin_lock_bh(&bdi->wb_lock);
+	list_add_tail(&work->list, &bdi->work_list);
+	if (!bdi->wb.task)
+		trace_writeback_nothread(bdi, work);
+	bdi_wakeup_flusher(bdi);
 	spin_unlock_bh(&bdi->wb_lock);
 }
 
 static void
 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
-		bool range_cyclic, bool for_background)
+		      bool range_cyclic)
 {
 	struct wb_writeback_work *work;
 
@@ -126,7 +133,6 @@
 	work->sync_mode	= WB_SYNC_NONE;
 	work->nr_pages	= nr_pages;
 	work->range_cyclic = range_cyclic;
-	work->for_background = for_background;
 
 	bdi_queue_work(bdi, work);
 }
@@ -144,7 +150,7 @@
  */
 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
 {
-	__bdi_start_writeback(bdi, nr_pages, true, false);
+	__bdi_start_writeback(bdi, nr_pages, true);
 }
 
 /**
@@ -152,13 +158,21 @@
  * @bdi: the backing device to write from
  *
  * Description:
- *   This does WB_SYNC_NONE background writeback. The IO is only
- *   started when this function returns, we make no guarentees on
- *   completion. Caller need not hold sb s_umount semaphore.
+ *   This makes sure WB_SYNC_NONE background writeback happens. When
+ *   this function returns, it is only guaranteed that for given BDI
+ *   some IO is happening if we are over background dirty threshold.
+ *   Caller need not hold sb s_umount semaphore.
  */
 void bdi_start_background_writeback(struct backing_dev_info *bdi)
 {
-	__bdi_start_writeback(bdi, LONG_MAX, true, true);
+	/*
+	 * We just wake up the flusher thread. It will perform background
+	 * writeback as soon as there is no other work to do.
+	 */
+	trace_writeback_wake_background(bdi);
+	spin_lock_bh(&bdi->wb_lock);
+	bdi_wakeup_flusher(bdi);
+	spin_unlock_bh(&bdi->wb_lock);
 }
 
 /*
@@ -616,6 +630,7 @@
 	};
 	unsigned long oldest_jif;
 	long wrote = 0;
+	long write_chunk;
 	struct inode *inode;
 
 	if (wbc.for_kupdate) {
@@ -628,6 +643,24 @@
 		wbc.range_end = LLONG_MAX;
 	}
 
+	/*
+	 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
+	 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
+	 * here avoids calling into writeback_inodes_wb() more than once.
+	 *
+	 * The intended call sequence for WB_SYNC_ALL writeback is:
+	 *
+	 *      wb_writeback()
+	 *          __writeback_inodes_sb()     <== called only once
+	 *              write_cache_pages()     <== called once for each inode
+	 *                   (quickly) tag currently dirty pages
+	 *                   (maybe slowly) sync all tagged pages
+	 */
+	if (wbc.sync_mode == WB_SYNC_NONE)
+		write_chunk = MAX_WRITEBACK_PAGES;
+	else
+		write_chunk = LONG_MAX;
+
 	wbc.wb_start = jiffies; /* livelock avoidance */
 	for (;;) {
 		/*
@@ -637,6 +670,16 @@
 			break;
 
 		/*
+		 * Background writeout and kupdate-style writeback may
+		 * run forever. Stop them if there is other work to do
+		 * so that e.g. sync can proceed. They'll be restarted
+		 * after the other works are all done.
+		 */
+		if ((work->for_background || work->for_kupdate) &&
+		    !list_empty(&wb->bdi->work_list))
+			break;
+
+		/*
 		 * For background writeout, stop when we are below the
 		 * background dirty threshold
 		 */
@@ -644,7 +687,7 @@
 			break;
 
 		wbc.more_io = 0;
-		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
+		wbc.nr_to_write = write_chunk;
 		wbc.pages_skipped = 0;
 
 		trace_wbc_writeback_start(&wbc, wb->bdi);
@@ -654,8 +697,8 @@
 			writeback_inodes_wb(wb, &wbc);
 		trace_wbc_writeback_written(&wbc, wb->bdi);
 
-		work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
-		wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+		work->nr_pages -= write_chunk - wbc.nr_to_write;
+		wrote += write_chunk - wbc.nr_to_write;
 
 		/*
 		 * If we consumed everything, see if we have more
@@ -670,7 +713,7 @@
 		/*
 		 * Did we write something? Try for more
 		 */
-		if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
+		if (wbc.nr_to_write < write_chunk)
 			continue;
 		/*
 		 * Nothing written. Wait for some inode to
@@ -718,6 +761,23 @@
 		get_nr_dirty_inodes();
 }
 
+static long wb_check_background_flush(struct bdi_writeback *wb)
+{
+	if (over_bground_thresh()) {
+
+		struct wb_writeback_work work = {
+			.nr_pages	= LONG_MAX,
+			.sync_mode	= WB_SYNC_NONE,
+			.for_background	= 1,
+			.range_cyclic	= 1,
+		};
+
+		return wb_writeback(wb, &work);
+	}
+
+	return 0;
+}
+
 static long wb_check_old_data_flush(struct bdi_writeback *wb)
 {
 	unsigned long expired;
@@ -787,6 +847,7 @@
 	 * Check for periodic writeback, kupdated() style
 	 */
 	wrote += wb_check_old_data_flush(wb);
+	wrote += wb_check_background_flush(wb);
 	clear_bit(BDI_writeback_running, &wb->bdi->state);
 
 	return wrote;
@@ -873,7 +934,7 @@
 	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
 		if (!bdi_has_dirty_io(bdi))
 			continue;
-		__bdi_start_writeback(bdi, nr_pages, false, false);
+		__bdi_start_writeback(bdi, nr_pages, false);
 	}
 	rcu_read_unlock();
 }
@@ -1164,7 +1225,7 @@
  * @sb: the superblock
  *
  * This function writes and waits on any dirty inode belonging to this
- * super_block. The number of pages synced is returned.
+ * super_block.
  */
 void sync_inodes_sb(struct super_block *sb)
 {
@@ -1242,11 +1303,11 @@
 EXPORT_SYMBOL(sync_inode);
 
 /**
- * sync_inode - write an inode to disk
+ * sync_inode_metadata - write an inode to disk
  * @inode: the inode to sync
  * @wait: wait for I/O to complete.
  *
- * Write an inode to disk and adjust it's dirty state after completion.
+ * Write an inode to disk and adjust its dirty state after completion.
  *
  * Note: only writes the actual inode, no associated data or other metadata.
  */
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index ed45a9c..68ca487 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -14,12 +14,14 @@
 	struct path old_root;
 
 	spin_lock(&fs->lock);
+	write_seqcount_begin(&fs->seq);
 	old_root = fs->root;
 	fs->root = *path;
-	path_get(path);
+	path_get_long(path);
+	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 	if (old_root.dentry)
-		path_put(&old_root);
+		path_put_long(&old_root);
 }
 
 /*
@@ -31,13 +33,15 @@
 	struct path old_pwd;
 
 	spin_lock(&fs->lock);
+	write_seqcount_begin(&fs->seq);
 	old_pwd = fs->pwd;
 	fs->pwd = *path;
-	path_get(path);
+	path_get_long(path);
+	write_seqcount_end(&fs->seq);
 	spin_unlock(&fs->lock);
 
 	if (old_pwd.dentry)
-		path_put(&old_pwd);
+		path_put_long(&old_pwd);
 }
 
 void chroot_fs_refs(struct path *old_root, struct path *new_root)
@@ -52,31 +56,33 @@
 		fs = p->fs;
 		if (fs) {
 			spin_lock(&fs->lock);
+			write_seqcount_begin(&fs->seq);
 			if (fs->root.dentry == old_root->dentry
 			    && fs->root.mnt == old_root->mnt) {
-				path_get(new_root);
+				path_get_long(new_root);
 				fs->root = *new_root;
 				count++;
 			}
 			if (fs->pwd.dentry == old_root->dentry
 			    && fs->pwd.mnt == old_root->mnt) {
-				path_get(new_root);
+				path_get_long(new_root);
 				fs->pwd = *new_root;
 				count++;
 			}
+			write_seqcount_end(&fs->seq);
 			spin_unlock(&fs->lock);
 		}
 		task_unlock(p);
 	} while_each_thread(g, p);
 	read_unlock(&tasklist_lock);
 	while (count--)
-		path_put(old_root);
+		path_put_long(old_root);
 }
 
 void free_fs_struct(struct fs_struct *fs)
 {
-	path_put(&fs->root);
-	path_put(&fs->pwd);
+	path_put_long(&fs->root);
+	path_put_long(&fs->pwd);
 	kmem_cache_free(fs_cachep, fs);
 }
 
@@ -88,8 +94,10 @@
 		int kill;
 		task_lock(tsk);
 		spin_lock(&fs->lock);
+		write_seqcount_begin(&fs->seq);
 		tsk->fs = NULL;
 		kill = !--fs->users;
+		write_seqcount_end(&fs->seq);
 		spin_unlock(&fs->lock);
 		task_unlock(tsk);
 		if (kill)
@@ -105,8 +113,15 @@
 		fs->users = 1;
 		fs->in_exec = 0;
 		spin_lock_init(&fs->lock);
+		seqcount_init(&fs->seq);
 		fs->umask = old->umask;
-		get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
+
+		spin_lock(&old->lock);
+		fs->root = old->root;
+		path_get_long(&fs->root);
+		fs->pwd = old->pwd;
+		path_get_long(&fs->pwd);
+		spin_unlock(&old->lock);
 	}
 	return fs;
 }
@@ -144,6 +159,7 @@
 struct fs_struct init_fs = {
 	.users		= 1,
 	.lock		= __SPIN_LOCK_UNLOCKED(init_fs.lock),
+	.seq		= SEQCNT_ZERO,
 	.umask		= 0022,
 };
 
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index b9f34ea..48a18f1 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -101,7 +101,7 @@
 		object->n_ops++;
 		object->n_exclusive++;	/* reads and writes must wait */
 
-		if (object->n_ops > 0) {
+		if (object->n_ops > 1) {
 			atomic_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
 			fscache_stat(&fscache_n_op_pend);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 6e07696..cf8d28d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -251,6 +251,20 @@
 	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
 }
 
+void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
+		       u64 nodeid, u64 nlookup)
+{
+	forget->forget_one.nodeid = nodeid;
+	forget->forget_one.nlookup = nlookup;
+
+	spin_lock(&fc->lock);
+	fc->forget_list_tail->next = forget;
+	fc->forget_list_tail = forget;
+	wake_up(&fc->waitq);
+	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+	spin_unlock(&fc->lock);
+}
+
 static void flush_bg_queue(struct fuse_conn *fc)
 {
 	while (fc->active_background < fc->max_background &&
@@ -438,12 +452,6 @@
 	}
 }
 
-void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
-{
-	req->isreply = 0;
-	fuse_request_send_nowait(fc, req);
-}
-
 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
 {
 	req->isreply = 1;
@@ -896,9 +904,15 @@
 	return err;
 }
 
+static int forget_pending(struct fuse_conn *fc)
+{
+	return fc->forget_list_head.next != NULL;
+}
+
 static int request_pending(struct fuse_conn *fc)
 {
-	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
+	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
+		forget_pending(fc);
 }
 
 /* Wait until a request is available on the pending list */
@@ -960,6 +974,120 @@
 	return err ? err : reqsize;
 }
 
+static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
+					       unsigned max,
+					       unsigned *countp)
+{
+	struct fuse_forget_link *head = fc->forget_list_head.next;
+	struct fuse_forget_link **newhead = &head;
+	unsigned count;
+
+	for (count = 0; *newhead != NULL && count < max; count++)
+		newhead = &(*newhead)->next;
+
+	fc->forget_list_head.next = *newhead;
+	*newhead = NULL;
+	if (fc->forget_list_head.next == NULL)
+		fc->forget_list_tail = &fc->forget_list_head;
+
+	if (countp != NULL)
+		*countp = count;
+
+	return head;
+}
+
+static int fuse_read_single_forget(struct fuse_conn *fc,
+				   struct fuse_copy_state *cs,
+				   size_t nbytes)
+__releases(fc->lock)
+{
+	int err;
+	struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
+	struct fuse_forget_in arg = {
+		.nlookup = forget->forget_one.nlookup,
+	};
+	struct fuse_in_header ih = {
+		.opcode = FUSE_FORGET,
+		.nodeid = forget->forget_one.nodeid,
+		.unique = fuse_get_unique(fc),
+		.len = sizeof(ih) + sizeof(arg),
+	};
+
+	spin_unlock(&fc->lock);
+	kfree(forget);
+	if (nbytes < ih.len)
+		return -EINVAL;
+
+	err = fuse_copy_one(cs, &ih, sizeof(ih));
+	if (!err)
+		err = fuse_copy_one(cs, &arg, sizeof(arg));
+	fuse_copy_finish(cs);
+
+	if (err)
+		return err;
+
+	return ih.len;
+}
+
+static int fuse_read_batch_forget(struct fuse_conn *fc,
+				   struct fuse_copy_state *cs, size_t nbytes)
+__releases(fc->lock)
+{
+	int err;
+	unsigned max_forgets;
+	unsigned count;
+	struct fuse_forget_link *head;
+	struct fuse_batch_forget_in arg = { .count = 0 };
+	struct fuse_in_header ih = {
+		.opcode = FUSE_BATCH_FORGET,
+		.unique = fuse_get_unique(fc),
+		.len = sizeof(ih) + sizeof(arg),
+	};
+
+	if (nbytes < ih.len) {
+		spin_unlock(&fc->lock);
+		return -EINVAL;
+	}
+
+	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
+	head = dequeue_forget(fc, max_forgets, &count);
+	spin_unlock(&fc->lock);
+
+	arg.count = count;
+	ih.len += count * sizeof(struct fuse_forget_one);
+	err = fuse_copy_one(cs, &ih, sizeof(ih));
+	if (!err)
+		err = fuse_copy_one(cs, &arg, sizeof(arg));
+
+	while (head) {
+		struct fuse_forget_link *forget = head;
+
+		if (!err) {
+			err = fuse_copy_one(cs, &forget->forget_one,
+					    sizeof(forget->forget_one));
+		}
+		head = forget->next;
+		kfree(forget);
+	}
+
+	fuse_copy_finish(cs);
+
+	if (err)
+		return err;
+
+	return ih.len;
+}
+
+static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
+			    size_t nbytes)
+__releases(fc->lock)
+{
+	if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
+		return fuse_read_single_forget(fc, cs, nbytes);
+	else
+		return fuse_read_batch_forget(fc, cs, nbytes);
+}
+
 /*
  * Read a single request into the userspace filesystem's buffer.  This
  * function waits until a request is available, then removes it from
@@ -998,6 +1126,14 @@
 		return fuse_read_interrupt(fc, cs, nbytes, req);
 	}
 
+	if (forget_pending(fc)) {
+		if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
+			return fuse_read_forget(fc, cs, nbytes);
+
+		if (fc->forget_batch <= -8)
+			fc->forget_batch = 16;
+	}
+
 	req = list_entry(fc->pending.next, struct fuse_req, list);
 	req->state = FUSE_REQ_READING;
 	list_move(&req->list, &fc->io);
@@ -1090,7 +1226,7 @@
 	if (!fc)
 		return -EPERM;
 
-	bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
+	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
 	if (!bufs)
 		return -ENOMEM;
 
@@ -1626,7 +1762,7 @@
 	if (!fc)
 		return -EPERM;
 
-	bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
+	bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
 	if (!bufs)
 		return -ENOMEM;
 
@@ -1770,6 +1906,8 @@
 	flush_bg_queue(fc);
 	end_requests(fc, &fc->pending);
 	end_requests(fc, &fc->processing);
+	while (forget_pending(fc))
+		kfree(dequeue_forget(fc, 1, NULL));
 }
 
 /*
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c9627c9..bfed844 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -10,9 +10,9 @@
 
 #include <linux/pagemap.h>
 #include <linux/file.h>
-#include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/namei.h>
+#include <linux/slab.h>
 
 #if BITS_PER_LONG >= 64
 static inline void fuse_dentry_settime(struct dentry *entry, u64 time)
@@ -156,8 +156,12 @@
  */
 static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
 {
-	struct inode *inode = entry->d_inode;
+	struct inode *inode;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = entry->d_inode;
 	if (inode && is_bad_inode(inode))
 		return 0;
 	else if (fuse_dentry_time(entry) < get_jiffies_64()) {
@@ -165,7 +169,7 @@
 		struct fuse_entry_out outarg;
 		struct fuse_conn *fc;
 		struct fuse_req *req;
-		struct fuse_req *forget_req;
+		struct fuse_forget_link *forget;
 		struct dentry *parent;
 		u64 attr_version;
 
@@ -178,8 +182,8 @@
 		if (IS_ERR(req))
 			return 0;
 
-		forget_req = fuse_get_req(fc);
-		if (IS_ERR(forget_req)) {
+		forget = fuse_alloc_forget();
+		if (!forget) {
 			fuse_put_request(fc, req);
 			return 0;
 		}
@@ -199,15 +203,14 @@
 		if (!err) {
 			struct fuse_inode *fi = get_fuse_inode(inode);
 			if (outarg.nodeid != get_node_id(inode)) {
-				fuse_send_forget(fc, forget_req,
-						 outarg.nodeid, 1);
+				fuse_queue_forget(fc, forget, outarg.nodeid, 1);
 				return 0;
 			}
 			spin_lock(&fc->lock);
 			fi->nlookup++;
 			spin_unlock(&fc->lock);
 		}
-		fuse_put_request(fc, forget_req);
+		kfree(forget);
 		if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
 			return 0;
 
@@ -259,7 +262,7 @@
 {
 	struct fuse_conn *fc = get_fuse_conn_super(sb);
 	struct fuse_req *req;
-	struct fuse_req *forget_req;
+	struct fuse_forget_link *forget;
 	u64 attr_version;
 	int err;
 
@@ -273,9 +276,9 @@
 	if (IS_ERR(req))
 		goto out;
 
-	forget_req = fuse_get_req(fc);
-	err = PTR_ERR(forget_req);
-	if (IS_ERR(forget_req)) {
+	forget = fuse_alloc_forget();
+	err = -ENOMEM;
+	if (!forget) {
 		fuse_put_request(fc, req);
 		goto out;
 	}
@@ -301,13 +304,13 @@
 			   attr_version);
 	err = -ENOMEM;
 	if (!*inode) {
-		fuse_send_forget(fc, forget_req, outarg->nodeid, 1);
+		fuse_queue_forget(fc, forget, outarg->nodeid, 1);
 		goto out;
 	}
 	err = 0;
 
  out_put_forget:
-	fuse_put_request(fc, forget_req);
+	kfree(forget);
  out:
 	return err;
 }
@@ -347,7 +350,6 @@
 	}
 
 	entry = newent ? newent : entry;
-	entry->d_op = &fuse_dentry_operations;
 	if (outarg_valid)
 		fuse_change_entry_timeout(entry, &outarg);
 	else
@@ -374,7 +376,7 @@
 	struct inode *inode;
 	struct fuse_conn *fc = get_fuse_conn(dir);
 	struct fuse_req *req;
-	struct fuse_req *forget_req;
+	struct fuse_forget_link *forget;
 	struct fuse_create_in inarg;
 	struct fuse_open_out outopen;
 	struct fuse_entry_out outentry;
@@ -388,9 +390,9 @@
 	if (flags & O_DIRECT)
 		return -EINVAL;
 
-	forget_req = fuse_get_req(fc);
-	if (IS_ERR(forget_req))
-		return PTR_ERR(forget_req);
+	forget = fuse_alloc_forget();
+	if (!forget)
+		return -ENOMEM;
 
 	req = fuse_get_req(fc);
 	err = PTR_ERR(req);
@@ -448,10 +450,10 @@
 	if (!inode) {
 		flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
 		fuse_sync_release(ff, flags);
-		fuse_send_forget(fc, forget_req, outentry.nodeid, 1);
+		fuse_queue_forget(fc, forget, outentry.nodeid, 1);
 		return -ENOMEM;
 	}
-	fuse_put_request(fc, forget_req);
+	kfree(forget);
 	d_instantiate(entry, inode);
 	fuse_change_entry_timeout(entry, &outentry);
 	fuse_invalidate_attr(dir);
@@ -469,7 +471,7 @@
  out_put_request:
 	fuse_put_request(fc, req);
  out_put_forget_req:
-	fuse_put_request(fc, forget_req);
+	kfree(forget);
 	return err;
 }
 
@@ -483,12 +485,12 @@
 	struct fuse_entry_out outarg;
 	struct inode *inode;
 	int err;
-	struct fuse_req *forget_req;
+	struct fuse_forget_link *forget;
 
-	forget_req = fuse_get_req(fc);
-	if (IS_ERR(forget_req)) {
+	forget = fuse_alloc_forget();
+	if (!forget) {
 		fuse_put_request(fc, req);
-		return PTR_ERR(forget_req);
+		return -ENOMEM;
 	}
 
 	memset(&outarg, 0, sizeof(outarg));
@@ -515,10 +517,10 @@
 	inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
 			  &outarg.attr, entry_attr_timeout(&outarg), 0);
 	if (!inode) {
-		fuse_send_forget(fc, forget_req, outarg.nodeid, 1);
+		fuse_queue_forget(fc, forget, outarg.nodeid, 1);
 		return -ENOMEM;
 	}
-	fuse_put_request(fc, forget_req);
+	kfree(forget);
 
 	if (S_ISDIR(inode->i_mode)) {
 		struct dentry *alias;
@@ -541,7 +543,7 @@
 	return 0;
 
  out_put_forget_req:
-	fuse_put_request(fc, forget_req);
+	kfree(forget);
 	return err;
 }
 
@@ -981,12 +983,15 @@
  * access request is sent.  Execute permission is still checked
  * locally based on file mode.
  */
-static int fuse_permission(struct inode *inode, int mask)
+static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	struct fuse_conn *fc = get_fuse_conn(inode);
 	bool refreshed = false;
 	int err = 0;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	if (!fuse_allow_task(fc, current))
 		return -EACCES;
 
@@ -1001,7 +1006,7 @@
 	}
 
 	if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
-		err = generic_permission(inode, mask, NULL);
+		err = generic_permission(inode, mask, flags, NULL);
 
 		/* If permission is denied, try to refresh file
 		   attributes.  This is also needed, because the root
@@ -1009,7 +1014,8 @@
 		if (err == -EACCES && !refreshed) {
 			err = fuse_do_getattr(inode, NULL, NULL);
 			if (!err)
-				err = generic_permission(inode, mask, NULL);
+				err = generic_permission(inode, mask,
+							flags, NULL);
 		}
 
 		/* Note: the opposite of the above test does not
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 8b984a2..95da1bc 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1634,9 +1634,9 @@
  * and 64bit.  Fortunately we can determine which structure the server
  * used from the size of the reply.
  */
-static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src,
-				 size_t transferred, unsigned count,
-				 bool is_compat)
+static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
+				     size_t transferred, unsigned count,
+				     bool is_compat)
 {
 #ifdef CONFIG_COMPAT
 	if (count * sizeof(struct compat_iovec) == transferred) {
@@ -1680,6 +1680,42 @@
 	return 0;
 }
 
+static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
+				 void *src, size_t transferred, unsigned count,
+				 bool is_compat)
+{
+	unsigned i;
+	struct fuse_ioctl_iovec *fiov = src;
+
+	if (fc->minor < 16) {
+		return fuse_copy_ioctl_iovec_old(dst, src, transferred,
+						 count, is_compat);
+	}
+
+	if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
+		return -EIO;
+
+	for (i = 0; i < count; i++) {
+		/* Did the server supply an inappropriate value? */
+		if (fiov[i].base != (unsigned long) fiov[i].base ||
+		    fiov[i].len != (unsigned long) fiov[i].len)
+			return -EIO;
+
+		dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
+		dst[i].iov_len = (size_t) fiov[i].len;
+
+#ifdef CONFIG_COMPAT
+		if (is_compat &&
+		    (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
+		     (compat_size_t) dst[i].iov_len != fiov[i].len))
+			return -EIO;
+#endif
+	}
+
+	return 0;
+}
+
+
 /*
  * For ioctls, there is no generic way to determine how much memory
  * needs to be read and/or written.  Furthermore, ioctls are allowed
@@ -1740,18 +1776,25 @@
 	struct fuse_ioctl_out outarg;
 	struct fuse_req *req = NULL;
 	struct page **pages = NULL;
-	struct page *iov_page = NULL;
+	struct iovec *iov_page = NULL;
 	struct iovec *in_iov = NULL, *out_iov = NULL;
 	unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
 	size_t in_size, out_size, transferred;
 	int err;
 
+#if BITS_PER_LONG == 32
+	inarg.flags |= FUSE_IOCTL_32BIT;
+#else
+	if (flags & FUSE_IOCTL_COMPAT)
+		inarg.flags |= FUSE_IOCTL_32BIT;
+#endif
+
 	/* assume all the iovs returned by client always fits in a page */
-	BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
+	BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
 
 	err = -ENOMEM;
 	pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
-	iov_page = alloc_page(GFP_KERNEL);
+	iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
 	if (!pages || !iov_page)
 		goto out;
 
@@ -1760,7 +1803,7 @@
 	 * RETRY from server is not allowed.
 	 */
 	if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
-		struct iovec *iov = page_address(iov_page);
+		struct iovec *iov = iov_page;
 
 		iov->iov_base = (void __user *)arg;
 		iov->iov_len = _IOC_SIZE(cmd);
@@ -1841,7 +1884,7 @@
 
 	/* did it ask for retry? */
 	if (outarg.flags & FUSE_IOCTL_RETRY) {
-		char *vaddr;
+		void *vaddr;
 
 		/* no retry if in restricted mode */
 		err = -EIO;
@@ -1862,14 +1905,14 @@
 			goto out;
 
 		vaddr = kmap_atomic(pages[0], KM_USER0);
-		err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr,
+		err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
 					    transferred, in_iovs + out_iovs,
 					    (flags & FUSE_IOCTL_COMPAT) != 0);
 		kunmap_atomic(vaddr, KM_USER0);
 		if (err)
 			goto out;
 
-		in_iov = page_address(iov_page);
+		in_iov = iov_page;
 		out_iov = in_iov + in_iovs;
 
 		err = fuse_verify_ioctl_iov(in_iov, in_iovs);
@@ -1891,8 +1934,7 @@
  out:
 	if (req)
 		fuse_put_request(fc, req);
-	if (iov_page)
-		__free_page(iov_page);
+	free_page((unsigned long) iov_page);
 	while (num_pages)
 		__free_page(pages[--num_pages]);
 	kfree(pages);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 57d4a3a..ae5744a 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -53,6 +53,12 @@
 extern unsigned max_user_bgreq;
 extern unsigned max_user_congthresh;
 
+/* One forget request */
+struct fuse_forget_link {
+	struct fuse_forget_one forget_one;
+	struct fuse_forget_link *next;
+};
+
 /** FUSE inode */
 struct fuse_inode {
 	/** Inode data */
@@ -66,7 +72,7 @@
 	u64 nlookup;
 
 	/** The request used for sending the FORGET message */
-	struct fuse_req *forget_req;
+	struct fuse_forget_link *forget;
 
 	/** Time in jiffies until the file attributes are valid */
 	u64 i_time;
@@ -255,7 +261,6 @@
 
 	/** Data for asynchronous requests */
 	union {
-		struct fuse_forget_in forget_in;
 		struct {
 			struct fuse_release_in in;
 			struct path path;
@@ -369,6 +374,13 @@
 	/** Pending interrupts */
 	struct list_head interrupts;
 
+	/** Queue of pending forgets */
+	struct fuse_forget_link forget_list_head;
+	struct fuse_forget_link *forget_list_tail;
+
+	/** Batching of FORGET requests (positive indicates FORGET batch) */
+	int forget_batch;
+
 	/** Flag indicating if connection is blocked.  This will be
 	    the case before the INIT reply is received, and if there
 	    are too many outstading backgrounds requests */
@@ -543,8 +555,10 @@
 /**
  * Send FORGET command
  */
-void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
-		      u64 nodeid, u64 nlookup);
+void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
+		       u64 nodeid, u64 nlookup);
+
+struct fuse_forget_link *fuse_alloc_forget(void);
 
 /**
  * Initialize READ or READDIR request
@@ -656,11 +670,6 @@
 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req);
 
 /**
- * Send a request with no reply
- */
-void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
-
-/**
  * Send a request in the background
  */
 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index cfce3ad..9e3f68c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -71,6 +71,11 @@
 	unsigned blksize;
 };
 
+struct fuse_forget_link *fuse_alloc_forget()
+{
+	return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL);
+}
+
 static struct inode *fuse_alloc_inode(struct super_block *sb)
 {
 	struct inode *inode;
@@ -90,8 +95,8 @@
 	INIT_LIST_HEAD(&fi->queued_writes);
 	INIT_LIST_HEAD(&fi->writepages);
 	init_waitqueue_head(&fi->page_waitq);
-	fi->forget_req = fuse_request_alloc();
-	if (!fi->forget_req) {
+	fi->forget = fuse_alloc_forget();
+	if (!fi->forget) {
 		kmem_cache_free(fuse_inode_cachep, inode);
 		return NULL;
 	}
@@ -99,27 +104,20 @@
 	return inode;
 }
 
+static void fuse_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(fuse_inode_cachep, inode);
+}
+
 static void fuse_destroy_inode(struct inode *inode)
 {
 	struct fuse_inode *fi = get_fuse_inode(inode);
 	BUG_ON(!list_empty(&fi->write_files));
 	BUG_ON(!list_empty(&fi->queued_writes));
-	if (fi->forget_req)
-		fuse_request_free(fi->forget_req);
-	kmem_cache_free(fuse_inode_cachep, inode);
-}
-
-void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
-		      u64 nodeid, u64 nlookup)
-{
-	struct fuse_forget_in *inarg = &req->misc.forget_in;
-	inarg->nlookup = nlookup;
-	req->in.h.opcode = FUSE_FORGET;
-	req->in.h.nodeid = nodeid;
-	req->in.numargs = 1;
-	req->in.args[0].size = sizeof(struct fuse_forget_in);
-	req->in.args[0].value = inarg;
-	fuse_request_send_noreply(fc, req);
+	kfree(fi->forget);
+	call_rcu(&inode->i_rcu, fuse_i_callback);
 }
 
 static void fuse_evict_inode(struct inode *inode)
@@ -129,8 +127,8 @@
 	if (inode->i_sb->s_flags & MS_ACTIVE) {
 		struct fuse_conn *fc = get_fuse_conn(inode);
 		struct fuse_inode *fi = get_fuse_inode(inode);
-		fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup);
-		fi->forget_req = NULL;
+		fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
+		fi->forget = NULL;
 	}
 }
 
@@ -534,6 +532,7 @@
 	INIT_LIST_HEAD(&fc->interrupts);
 	INIT_LIST_HEAD(&fc->bg_queue);
 	INIT_LIST_HEAD(&fc->entry);
+	fc->forget_list_tail = &fc->forget_list_head;
 	atomic_set(&fc->num_waiting, 0);
 	fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
 	fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
@@ -618,10 +617,8 @@
 		goto out_iput;
 
 	entry = d_obtain_alias(inode);
-	if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) {
-		entry->d_op = &fuse_dentry_operations;
+	if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID)
 		fuse_invalidate_entry_cache(entry);
-	}
 
 	return entry;
 
@@ -720,10 +717,8 @@
 	}
 
 	parent = d_obtain_alias(inode);
-	if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) {
-		parent->d_op = &fuse_dentry_operations;
+	if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID)
 		fuse_invalidate_entry_cache(parent);
-	}
 
 	return parent;
 }
@@ -990,6 +985,8 @@
 		iput(root);
 		goto err_put_conn;
 	}
+	/* only now - we want root dentry with NULL ->d_op */
+	sb->s_d_op = &fuse_dentry_operations;
 
 	init_req = fuse_request_alloc();
 	if (!init_req)
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 6bc9e3a..06c48a8 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -190,14 +190,20 @@
 }
 
 int
-generic_check_acl(struct inode *inode, int mask)
+generic_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct posix_acl *acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
+	if (flags & IPERM_FLAG_RCU) {
+		if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
+			return -ECHILD;
+	} else {
+		struct posix_acl *acl;
 
-	if (acl) {
-		int error = posix_acl_permission(inode, acl, mask);
-		posix_acl_release(acl);
-		return error;
+		acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
+		if (acl) {
+			int error = posix_acl_permission(inode, acl, mask);
+			posix_acl_release(acl);
+			return error;
+		}
 	}
 	return -EAGAIN;
 }
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 48171f4..7118f1a 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -75,11 +75,14 @@
  * Returns: errno
  */
 
-int gfs2_check_acl(struct inode *inode, int mask)
+int gfs2_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
 	struct posix_acl *acl;
 	int error;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index b522b0c..a93907c 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -16,7 +16,7 @@
 #define GFS2_POSIX_ACL_DEFAULT		"posix_acl_default"
 #define GFS2_ACL_MAX_ENTRIES		25
 
-extern int gfs2_check_acl(struct inode *inode, int mask);
+extern int gfs2_check_acl(struct inode *inode, int mask, unsigned int);
 extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
 extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
 extern const struct xattr_handler gfs2_xattr_system_handler;
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 6798755..4a45633 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -11,6 +11,7 @@
 #include <linux/completion.h>
 #include <linux/buffer_head.h>
 #include <linux/gfs2_ondisk.h>
+#include <linux/namei.h>
 #include <linux/crc32.h>
 
 #include "gfs2.h"
@@ -34,15 +35,23 @@
 
 static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct dentry *parent = dget_parent(dentry);
-	struct gfs2_sbd *sdp = GFS2_SB(parent->d_inode);
-	struct gfs2_inode *dip = GFS2_I(parent->d_inode);
-	struct inode *inode = dentry->d_inode;
+	struct dentry *parent;
+	struct gfs2_sbd *sdp;
+	struct gfs2_inode *dip;
+	struct inode *inode;
 	struct gfs2_holder d_gh;
 	struct gfs2_inode *ip = NULL;
 	int error;
 	int had_lock = 0;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	parent = dget_parent(dentry);
+	sdp = GFS2_SB(parent->d_inode);
+	dip = GFS2_I(parent->d_inode);
+	inode = dentry->d_inode;
+
 	if (inode) {
 		if (is_bad_inode(inode))
 			goto invalid;
@@ -100,13 +109,14 @@
 	return 0;
 }
 
-static int gfs2_dhash(struct dentry *dentry, struct qstr *str)
+static int gfs2_dhash(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *str)
 {
 	str->hash = gfs2_disk_hash(str->name, str->len);
 	return 0;
 }
 
-static int gfs2_dentry_delete(struct dentry *dentry)
+static int gfs2_dentry_delete(const struct dentry *dentry)
 {
 	struct gfs2_inode *ginode;
 
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 5ab3839..9023db8 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -126,12 +126,7 @@
 
 static struct dentry *gfs2_get_parent(struct dentry *child)
 {
-	struct dentry *dentry;
-
-	dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
-	if (!IS_ERR(dentry))
-		dentry->d_op = &gfs2_dops;
-	return dentry;
+	return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
 }
 
 static struct dentry *gfs2_get_dentry(struct super_block *sb,
@@ -139,7 +134,6 @@
 {
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	struct inode *inode;
-	struct dentry *dentry;
 
 	inode = gfs2_ilookup(sb, inum->no_addr);
 	if (inode) {
@@ -156,10 +150,7 @@
 		return ERR_CAST(inode);
 
 out_inode:
-	dentry = d_obtain_alias(inode);
-	if (!IS_ERR(dentry))
-		dentry->d_op = &gfs2_dops;
-	return dentry;
+	return d_obtain_alias(inode);
 }
 
 static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index aa99647..fca6689 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -241,7 +241,7 @@
 	    !capable(CAP_LINUX_IMMUTABLE))
 		goto out;
 	if (!IS_IMMUTABLE(inode)) {
-		error = gfs2_permission(inode, MAY_WRITE);
+		error = gfs2_permission(inode, MAY_WRITE, 0);
 		if (error)
 			goto out;
 	}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 8d3d2b4..a79790c 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -11,6 +11,7 @@
 #define __INCORE_DOT_H__
 
 #include <linux/fs.h>
+#include <linux/kobject.h>
 #include <linux/workqueue.h>
 #include <linux/dlm.h>
 #include <linux/buffer_head.h>
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 14e682d..2232b3c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -509,7 +509,7 @@
 	}
 
 	if (!is_root) {
-		error = gfs2_permission(dir, MAY_EXEC);
+		error = gfs2_permission(dir, MAY_EXEC, 0);
 		if (error)
 			goto out;
 	}
@@ -539,7 +539,7 @@
 {
 	int error;
 
-	error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
+	error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0);
 	if (error)
 		return error;
 
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index d8499fa..732a183 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -113,7 +113,7 @@
 extern struct inode *gfs2_createi(struct gfs2_holder *ghs,
 				  const struct qstr *name,
 				  unsigned int mode, dev_t dev);
-extern int gfs2_permission(struct inode *inode, int mask);
+extern int gfs2_permission(struct inode *inode, int mask, unsigned int flags);
 extern int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
 extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
 extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 3eb1393..777927c 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -440,7 +440,6 @@
 		iput(inode);
 		return -ENOMEM;
 	}
-	dentry->d_op = &gfs2_dops;
 	*dptr = dentry;
 	return 0;
 }
@@ -1106,6 +1105,7 @@
 
 	sb->s_magic = GFS2_MAGIC;
 	sb->s_op = &gfs2_super_ops;
+	sb->s_d_op = &gfs2_dops;
 	sb->s_export_op = &gfs2_export_ops;
 	sb->s_xattr = gfs2_xattr_handlers;
 	sb->s_qcop = &gfs2_quotactl_ops;
@@ -1268,7 +1268,7 @@
 {
 	struct block_device *bdev;
 	struct super_block *s;
-	fmode_t mode = FMODE_READ;
+	fmode_t mode = FMODE_READ | FMODE_EXCL;
 	int error;
 	struct gfs2_args args;
 	struct gfs2_sbd *sdp;
@@ -1276,7 +1276,7 @@
 	if (!(flags & MS_RDONLY))
 		mode |= FMODE_WRITE;
 
-	bdev = open_bdev_exclusive(dev_name, mode, fs_type);
+	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 	if (IS_ERR(bdev))
 		return ERR_CAST(bdev);
 
@@ -1298,7 +1298,7 @@
 		goto error_bdev;
 
 	if (s->s_root)
-		close_bdev_exclusive(bdev, mode);
+		blkdev_put(bdev, mode);
 
 	memset(&args, 0, sizeof(args));
 	args.ar_quota = GFS2_QUOTA_DEFAULT;
@@ -1342,7 +1342,7 @@
 	deactivate_locked_super(s);
 	return ERR_PTR(error);
 error_bdev:
-	close_bdev_exclusive(bdev, mode);
+	blkdev_put(bdev, mode);
 	return ERR_PTR(error);
 }
 
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 1db6b73..040b5a2 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -106,8 +106,6 @@
 {
 	struct inode *inode = NULL;
 
-	dentry->d_op = &gfs2_dops;
-
 	inode = gfs2_lookupi(dir, &dentry->d_name, 0);
 	if (inode && IS_ERR(inode))
 		return ERR_CAST(inode);
@@ -166,7 +164,7 @@
 	if (error)
 		goto out_child;
 
-	error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC);
+	error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC, 0);
 	if (error)
 		goto out_gunlock;
 
@@ -289,7 +287,7 @@
 	if (IS_APPEND(&dip->i_inode))
 		return -EPERM;
 
-	error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
+	error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0);
 	if (error)
 		return error;
 
@@ -822,7 +820,7 @@
 			}
 		}
 	} else {
-		error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC);
+		error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC, 0);
 		if (error)
 			goto out_gunlock;
 
@@ -857,7 +855,7 @@
 	/* Check out the dir to be renamed */
 
 	if (dir_rename) {
-		error = gfs2_permission(odentry->d_inode, MAY_WRITE);
+		error = gfs2_permission(odentry->d_inode, MAY_WRITE, 0);
 		if (error)
 			goto out_gunlock;
 	}
@@ -1041,13 +1039,17 @@
  * Returns: errno
  */
 
-int gfs2_permission(struct inode *inode, int mask)
+int gfs2_permission(struct inode *inode, int mask, unsigned int flags)
 {
-	struct gfs2_inode *ip = GFS2_I(inode);
+	struct gfs2_inode *ip;
 	struct gfs2_holder i_gh;
 	int error;
 	int unlock = 0;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
+	ip = GFS2_I(inode);
 	if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 		if (error)
@@ -1058,7 +1060,7 @@
 	if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
 		error = -EACCES;
 	else
-		error = generic_permission(inode, mask, gfs2_check_acl);
+		error = generic_permission(inode, mask, flags, gfs2_check_acl);
 	if (unlock)
 		gfs2_glock_dq_uninit(&i_gh);
 
@@ -1423,6 +1425,10 @@
 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
 
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
+		return -EOPNOTSUPP;
+
 	offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
 		 sdp->sd_sb.sb_bsize_shift;
 
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2b2c499..16c2eca 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1405,9 +1405,16 @@
 	return &ip->i_inode;
 }
 
+static void gfs2_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(gfs2_inode_cachep, inode);
+}
+
 static void gfs2_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(gfs2_inode_cachep, inode);
+	call_rcu(&inode->i_rcu, gfs2_i_callback);
 }
 
 const struct super_operations gfs2_super_ops = {
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 2b3b861..afa66aa 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -25,8 +25,6 @@
 	struct inode *inode = NULL;
 	int res;
 
-	dentry->d_op = &hfs_dentry_operations;
-
 	hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
 	hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name);
 	res = hfs_brec_read(&fd, &rec, sizeof(rec));
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index c8cffb8..ad97c2d 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -213,10 +213,14 @@
 /* string.c */
 extern const struct dentry_operations hfs_dentry_operations;
 
-extern int hfs_hash_dentry(struct dentry *, struct qstr *);
+extern int hfs_hash_dentry(const struct dentry *, const struct inode *,
+		struct qstr *);
 extern int hfs_strcmp(const unsigned char *, unsigned int,
 		      const unsigned char *, unsigned int);
-extern int hfs_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
+extern int hfs_compare_dentry(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
 
 /* trans.c */
 extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *);
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
index 927a5af..495a976 100644
--- a/fs/hfs/string.c
+++ b/fs/hfs/string.c
@@ -51,7 +51,8 @@
 /*
  * Hash a string to an integer in a case-independent way
  */
-int hfs_hash_dentry(struct dentry *dentry, struct qstr *this)
+int hfs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *this)
 {
 	const unsigned char *name = this->name;
 	unsigned int hash, len = this->len;
@@ -92,21 +93,21 @@
  * Test for equality of two strings in the HFS filename character ordering.
  * return 1 on failure and 0 on success
  */
-int hfs_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *s2)
+int hfs_compare_dentry(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
 	const unsigned char *n1, *n2;
-	int len;
 
-	len = s1->len;
 	if (len >= HFS_NAMELEN) {
-		if (s2->len < HFS_NAMELEN)
+		if (name->len < HFS_NAMELEN)
 			return 1;
 		len = HFS_NAMELEN;
-	} else if (len != s2->len)
+	} else if (len != name->len)
 		return 1;
 
-	n1 = s1->name;
-	n2 = s2->name;
+	n1 = str;
+	n2 = name->name;
 	while (len--) {
 		if (caseorder[*n1++] != caseorder[*n2++])
 			return 1;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 4824c27..1b55f70 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -167,9 +167,16 @@
 	return i ? &i->vfs_inode : NULL;
 }
 
+static void hfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(hfs_inode_cachep, HFS_I(inode));
+}
+
 static void hfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(hfs_inode_cachep, HFS_I(inode));
+	call_rcu(&inode->i_rcu, hfs_i_callback);
 }
 
 static const struct super_operations hfs_super_operations = {
@@ -422,13 +429,12 @@
 	if (!root_inode)
 		goto bail_no_root;
 
+	sb->s_d_op = &hfs_dentry_operations;
 	res = -ENOMEM;
 	sb->s_root = d_alloc_root(root_inode);
 	if (!sb->s_root)
 		goto bail_iput;
 
-	sb->s_root->d_op = &hfs_dentry_operations;
-
 	/* everything's okay */
 	return 0;
 
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 7478f5c..19cf291 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -8,15 +8,20 @@
  * This file contains the code to do various system dependent things.
  */
 
+#include <linux/namei.h>
 #include "hfs_fs.h"
 
 /* dentry case-handling: just lowercase everything */
 
 static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
 {
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode;
 	int diff;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = dentry->d_inode;
 	if(!inode)
 		return 1;
 
diff --git a/fs/hfsplus/bfind.c b/fs/hfsplus/bfind.c
index d182438..5d799c1 100644
--- a/fs/hfsplus/bfind.c
+++ b/fs/hfsplus/bfind.c
@@ -22,7 +22,8 @@
 		return -ENOMEM;
 	fd->search_key = ptr;
 	fd->key = ptr + tree->max_key_len + 2;
-	dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0));
+	dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n",
+		tree->cnid, __builtin_return_address(0));
 	mutex_lock(&tree->tree_lock);
 	return 0;
 }
@@ -31,7 +32,8 @@
 {
 	hfs_bnode_put(fd->bnode);
 	kfree(fd->search_key);
-	dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0));
+	dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n",
+		fd->tree->cnid, __builtin_return_address(0));
 	mutex_unlock(&fd->tree->tree_lock);
 	fd->tree = NULL;
 }
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index ad57f59..1cad80c 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -15,7 +15,8 @@
 
 #define PAGE_CACHE_BITS	(PAGE_CACHE_SIZE * 8)
 
-int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
+int hfsplus_block_allocate(struct super_block *sb, u32 size,
+		u32 offset, u32 *max)
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
 	struct page *page;
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 29da657..1c42cc5 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -42,7 +42,7 @@
 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
 {
 	__be16 data;
-	// optimize later...
+	/* TODO: optimize later... */
 	hfs_bnode_read(node, &data, off, 2);
 	return be16_to_cpu(data);
 }
@@ -50,7 +50,7 @@
 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
 {
 	u8 data;
-	// optimize later...
+	/* TODO: optimize later... */
 	hfs_bnode_read(node, &data, off, 1);
 	return data;
 }
@@ -96,7 +96,7 @@
 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
 {
 	__be16 v = cpu_to_be16(data);
-	// optimize later...
+	/* TODO: optimize later... */
 	hfs_bnode_write(node, &v, off, 2);
 }
 
@@ -212,7 +212,8 @@
 				dst_page--;
 			}
 			src -= len;
-			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len);
+			memmove(kmap(*dst_page) + src,
+				kmap(*src_page) + src, len);
 			kunmap(*src_page);
 			set_page_dirty(*dst_page);
 			kunmap(*dst_page);
@@ -250,14 +251,16 @@
 
 		if (src == dst) {
 			l = min(len, (int)PAGE_CACHE_SIZE - src);
-			memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l);
+			memmove(kmap(*dst_page) + src,
+				kmap(*src_page) + src, l);
 			kunmap(*src_page);
 			set_page_dirty(*dst_page);
 			kunmap(*dst_page);
 
 			while ((len -= l) != 0) {
 				l = min(len, (int)PAGE_CACHE_SIZE);
-				memmove(kmap(*++dst_page), kmap(*++src_page), l);
+				memmove(kmap(*++dst_page),
+					kmap(*++src_page), l);
 				kunmap(*src_page);
 				set_page_dirty(*dst_page);
 				kunmap(*dst_page);
@@ -268,7 +271,8 @@
 			do {
 				src_ptr = kmap(*src_page) + src;
 				dst_ptr = kmap(*dst_page) + dst;
-				if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
+				if (PAGE_CACHE_SIZE - src <
+						PAGE_CACHE_SIZE - dst) {
 					l = PAGE_CACHE_SIZE - src;
 					src = 0;
 					dst += l;
@@ -340,7 +344,8 @@
 			return;
 		tmp->next = node->next;
 		cnid = cpu_to_be32(tmp->next);
-		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
+		hfs_bnode_write(tmp, &cnid,
+			offsetof(struct hfs_bnode_desc, next), 4);
 		hfs_bnode_put(tmp);
 	} else if (node->type == HFS_NODE_LEAF)
 		tree->leaf_head = node->next;
@@ -351,15 +356,15 @@
 			return;
 		tmp->prev = node->prev;
 		cnid = cpu_to_be32(tmp->prev);
-		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
+		hfs_bnode_write(tmp, &cnid,
+			offsetof(struct hfs_bnode_desc, prev), 4);
 		hfs_bnode_put(tmp);
 	} else if (node->type == HFS_NODE_LEAF)
 		tree->leaf_tail = node->prev;
 
-	// move down?
-	if (!node->prev && !node->next) {
-		printk(KERN_DEBUG "hfs_btree_del_level\n");
-	}
+	/* move down? */
+	if (!node->prev && !node->next)
+		dprint(DBG_BNODE_MOD, "hfs_btree_del_level\n");
 	if (!node->parent) {
 		tree->root = 0;
 		tree->depth = 0;
@@ -379,16 +384,16 @@
 	struct hfs_bnode *node;
 
 	if (cnid >= tree->node_count) {
-		printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
+		printk(KERN_ERR "hfs: request for non-existent node "
+				"%d in B*Tree\n",
+			cnid);
 		return NULL;
 	}
 
 	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
-	     node; node = node->next_hash) {
-		if (node->this == cnid) {
+			node; node = node->next_hash)
+		if (node->this == cnid)
 			return node;
-		}
-	}
 	return NULL;
 }
 
@@ -402,7 +407,9 @@
 	loff_t off;
 
 	if (cnid >= tree->node_count) {
-		printk(KERN_ERR "hfs: request for non-existent node %d in B*Tree\n", cnid);
+		printk(KERN_ERR "hfs: request for non-existent node "
+				"%d in B*Tree\n",
+			cnid);
 		return NULL;
 	}
 
@@ -429,7 +436,8 @@
 	} else {
 		spin_unlock(&tree->hash_lock);
 		kfree(node);
-		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
+		wait_event(node2->lock_wq,
+			!test_bit(HFS_BNODE_NEW, &node2->flags));
 		return node2;
 	}
 	spin_unlock(&tree->hash_lock);
@@ -483,7 +491,8 @@
 	if (node) {
 		hfs_bnode_get(node);
 		spin_unlock(&tree->hash_lock);
-		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
+		wait_event(node->lock_wq,
+			!test_bit(HFS_BNODE_NEW, &node->flags));
 		if (test_bit(HFS_BNODE_ERROR, &node->flags))
 			goto node_error;
 		return node;
@@ -497,7 +506,8 @@
 	if (!test_bit(HFS_BNODE_NEW, &node->flags))
 		return node;
 
-	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
+	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
+			node->page_offset);
 	node->prev = be32_to_cpu(desc->prev);
 	node->next = be32_to_cpu(desc->next);
 	node->num_recs = be16_to_cpu(desc->num_recs);
@@ -556,11 +566,13 @@
 
 void hfs_bnode_free(struct hfs_bnode *node)
 {
-	//int i;
+#if 0
+	int i;
 
-	//for (i = 0; i < node->tree->pages_per_bnode; i++)
-	//	if (node->page[i])
-	//		page_cache_release(node->page[i]);
+	for (i = 0; i < node->tree->pages_per_bnode; i++)
+		if (node->page[i])
+			page_cache_release(node->page[i]);
+#endif
 	kfree(node);
 }
 
@@ -607,7 +619,8 @@
 	if (node) {
 		atomic_inc(&node->refcnt);
 		dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
-		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+			node->tree->cnid, node->this,
+			atomic_read(&node->refcnt));
 	}
 }
 
@@ -619,7 +632,8 @@
 		int i;
 
 		dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
-		       node->tree->cnid, node->this, atomic_read(&node->refcnt));
+			node->tree->cnid, node->this,
+			atomic_read(&node->refcnt));
 		BUG_ON(!atomic_read(&node->refcnt));
 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
 			return;
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 2f39d05..2312de3 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -39,7 +39,8 @@
 	   !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) {
 		retval = node->tree->max_key_len + 2;
 	} else {
-		recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
+		recoff = hfs_bnode_read_u16(node,
+			node->tree->node_size - (rec + 1) * 2);
 		if (!recoff)
 			return 0;
 
@@ -84,7 +85,8 @@
 	end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
 	end_off = hfs_bnode_read_u16(node, end_rec_off);
 	end_rec_off -= 2;
-	dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off);
+	dprint(DBG_BNODE_MOD, "insert_rec: %d, %d, %d, %d\n",
+		rec, size, end_off, end_rec_off);
 	if (size > end_rec_off - end_off) {
 		if (new_node)
 			panic("not enough room!\n");
@@ -99,7 +101,9 @@
 	}
 	node->num_recs++;
 	/* write new last offset */
-	hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
+	hfs_bnode_write_u16(node,
+		offsetof(struct hfs_bnode_desc, num_recs),
+		node->num_recs);
 	hfs_bnode_write_u16(node, end_rec_off, end_off + size);
 	data_off = end_off;
 	data_rec_off = end_rec_off + 2;
@@ -151,7 +155,8 @@
 		if (tree->attributes & HFS_TREE_VARIDXKEYS)
 			key_len = be16_to_cpu(fd->search_key->key_len) + 2;
 		else {
-			fd->search_key->key_len = cpu_to_be16(tree->max_key_len);
+			fd->search_key->key_len =
+				cpu_to_be16(tree->max_key_len);
 			key_len = tree->max_key_len + 2;
 		}
 		goto again;
@@ -180,7 +185,8 @@
 		mark_inode_dirty(tree->inode);
 	}
 	hfs_bnode_dump(node);
-	dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength);
+	dprint(DBG_BNODE_MOD, "remove_rec: %d, %d\n",
+		fd->record, fd->keylength + fd->entrylength);
 	if (!--node->num_recs) {
 		hfs_bnode_unlink(node);
 		if (!node->parent)
@@ -194,7 +200,9 @@
 		__hfs_brec_find(node, fd);
 		goto again;
 	}
-	hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs);
+	hfs_bnode_write_u16(node,
+		offsetof(struct hfs_bnode_desc, num_recs),
+		node->num_recs);
 
 	if (rec_off == end_off)
 		goto skip;
@@ -364,7 +372,8 @@
 		newkeylen = hfs_bnode_read_u16(node, 14) + 2;
 	else
 		fd->keylength = newkeylen = tree->max_key_len + 2;
-	dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen);
+	dprint(DBG_BNODE_MOD, "update_rec: %d, %d, %d\n",
+		rec, fd->keylength, newkeylen);
 
 	rec_off = tree->node_size - (rec + 2) * 2;
 	end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
@@ -375,7 +384,7 @@
 		end_off = hfs_bnode_read_u16(parent, end_rec_off);
 		if (end_rec_off - end_off < diff) {
 
-			printk(KERN_DEBUG "hfs: splitting index node...\n");
+			dprint(DBG_BNODE_MOD, "hfs: splitting index node.\n");
 			fd->bnode = parent;
 			new_node = hfs_bnode_split(fd);
 			if (IS_ERR(new_node))
@@ -383,7 +392,8 @@
 			parent = fd->bnode;
 			rec = fd->record;
 			rec_off = tree->node_size - (rec + 2) * 2;
-			end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
+			end_rec_off = tree->node_size -
+				(parent->num_recs + 1) * 2;
 		}
 	}
 
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 22e4d4e..21023d9 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -51,7 +51,8 @@
 		goto free_inode;
 
 	/* Load the header */
-	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+	head = (struct hfs_btree_header_rec *)(kmap(page) +
+		sizeof(struct hfs_bnode_desc));
 	tree->root = be32_to_cpu(head->root);
 	tree->leaf_count = be32_to_cpu(head->leaf_count);
 	tree->leaf_head = be32_to_cpu(head->leaf_head);
@@ -115,7 +116,9 @@
 
 	tree->node_size_shift = ffs(size) - 1;
 
-	tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	tree->pages_per_bnode =
+		(tree->node_size + PAGE_CACHE_SIZE - 1) >>
+		PAGE_CACHE_SHIFT;
 
 	kunmap(page);
 	page_cache_release(page);
@@ -144,8 +147,10 @@
 		while ((node = tree->node_hash[i])) {
 			tree->node_hash[i] = node->next_hash;
 			if (atomic_read(&node->refcnt))
-				printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
-					node->tree->cnid, node->this, atomic_read(&node->refcnt));
+				printk(KERN_CRIT "hfs: node %d:%d "
+						"still has %d user(s)!\n",
+					node->tree->cnid, node->this,
+					atomic_read(&node->refcnt));
 			hfs_bnode_free(node);
 			tree->node_hash_cnt--;
 		}
@@ -166,7 +171,8 @@
 		return;
 	/* Load the header */
 	page = node->page[0];
-	head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
+	head = (struct hfs_btree_header_rec *)(kmap(page) +
+		sizeof(struct hfs_bnode_desc));
 
 	head->root = cpu_to_be32(tree->root);
 	head->leaf_count = cpu_to_be32(tree->leaf_count);
@@ -272,7 +278,8 @@
 						tree->free_nodes--;
 						mark_inode_dirty(tree->inode);
 						hfs_bnode_put(node);
-						return hfs_bnode_create(tree, idx);
+						return hfs_bnode_create(tree,
+							idx);
 					}
 				}
 			}
@@ -287,7 +294,7 @@
 		kunmap(*pagep);
 		nidx = node->next;
 		if (!nidx) {
-			printk(KERN_DEBUG "hfs: create new bmap node...\n");
+			dprint(DBG_BNODE_MOD, "hfs: create new bmap node.\n");
 			next_node = hfs_bmap_new_bmap(node, idx);
 		} else
 			next_node = hfs_bnode_find(tree, nidx);
@@ -329,7 +336,9 @@
 		hfs_bnode_put(node);
 		if (!i) {
 			/* panic */;
-			printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
+			printk(KERN_CRIT "hfs: unable to free bnode %u. "
+					"bmap not found!\n",
+				node->this);
 			return;
 		}
 		node = hfs_bnode_find(tree, i);
@@ -337,7 +346,9 @@
 			return;
 		if (node->type != HFS_NODE_MAP) {
 			/* panic */;
-			printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
+			printk(KERN_CRIT "hfs: invalid bmap found! "
+					"(%u,%d)\n",
+				node->this, node->type);
 			hfs_bnode_put(node);
 			return;
 		}
@@ -350,7 +361,9 @@
 	m = 1 << (~nidx & 7);
 	byte = data[off];
 	if (!(byte & m)) {
-		printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
+		printk(KERN_CRIT "hfs: trying to free free bnode "
+				"%u(%d)\n",
+			node->this, node->type);
 		kunmap(page);
 		hfs_bnode_put(node);
 		return;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 8af45fc..b4ba1b3 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -91,7 +91,8 @@
 		perms->dev = 0;
 }
 
-static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode)
+static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
+		u32 cnid, struct inode *inode)
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
 
@@ -128,20 +129,32 @@
 		if (cnid == inode->i_ino) {
 			hfsplus_cat_set_perms(inode, &file->permissions);
 			if (S_ISLNK(inode->i_mode)) {
-				file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE);
-				file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR);
+				file->user_info.fdType =
+					cpu_to_be32(HFSP_SYMLINK_TYPE);
+				file->user_info.fdCreator =
+					cpu_to_be32(HFSP_SYMLINK_CREATOR);
 			} else {
-				file->user_info.fdType = cpu_to_be32(sbi->type);
-				file->user_info.fdCreator = cpu_to_be32(sbi->creator);
+				file->user_info.fdType =
+					cpu_to_be32(sbi->type);
+				file->user_info.fdCreator =
+					cpu_to_be32(sbi->creator);
 			}
-			if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
-				file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
+			if (HFSPLUS_FLG_IMMUTABLE &
+					(file->permissions.rootflags |
+					file->permissions.userflags))
+				file->flags |=
+					cpu_to_be16(HFSPLUS_FILE_LOCKED);
 		} else {
-			file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE);
-			file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR);
-			file->user_info.fdFlags = cpu_to_be16(0x100);
-			file->create_date = HFSPLUS_I(sbi->hidden_dir)->create_date;
-			file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode)->linkid);
+			file->user_info.fdType =
+				cpu_to_be32(HFSP_HARDLINK_TYPE);
+			file->user_info.fdCreator =
+				cpu_to_be32(HFSP_HFSPLUS_CREATOR);
+			file->user_info.fdFlags =
+				cpu_to_be16(0x100);
+			file->create_date =
+				HFSPLUS_I(sbi->hidden_dir)->create_date;
+			file->permissions.dev =
+				cpu_to_be32(HFSPLUS_I(inode)->linkid);
 		}
 		return sizeof(*file);
 	}
@@ -182,12 +195,14 @@
 		return -EIO;
 	}
 
-	hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID),
-				 &tmp.thread.nodeName);
+	hfsplus_cat_build_key_uni(fd->search_key,
+		be32_to_cpu(tmp.thread.parentID),
+		&tmp.thread.nodeName);
 	return hfs_brec_find(fd);
 }
 
-int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
+int hfsplus_create_cat(u32 cnid, struct inode *dir,
+		struct qstr *str, struct inode *inode)
 {
 	struct super_block *sb = dir->i_sb;
 	struct hfs_find_data fd;
@@ -195,13 +210,15 @@
 	int entry_size;
 	int err;
 
-	dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
+	dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
+		str->name, cnid, inode->i_nlink);
 	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 
 	hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
-	entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
+	entry_size = hfsplus_fill_cat_thread(sb, &entry,
+		S_ISDIR(inode->i_mode) ?
 			HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD,
-			dir->i_ino, str);
+		dir->i_ino, str);
 	err = hfs_brec_find(&fd);
 	if (err != -ENOENT) {
 		if (!err)
@@ -227,7 +244,8 @@
 
 	dir->i_size++;
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-	mark_inode_dirty(dir);
+	hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
+
 	hfs_find_exit(&fd);
 	return 0;
 
@@ -249,7 +267,8 @@
 	int err, off;
 	u16 type;
 
-	dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid);
+	dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n",
+		str ? str->name : NULL, cnid);
 	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 
 	if (!str) {
@@ -260,11 +279,15 @@
 		if (err)
 			goto out;
 
-		off = fd.entryoffset + offsetof(struct hfsplus_cat_thread, nodeName);
+		off = fd.entryoffset +
+			offsetof(struct hfsplus_cat_thread, nodeName);
 		fd.search_key->cat.parent = cpu_to_be32(dir->i_ino);
-		hfs_bnode_read(fd.bnode, &fd.search_key->cat.name.length, off, 2);
+		hfs_bnode_read(fd.bnode,
+			&fd.search_key->cat.name.length, off, 2);
 		len = be16_to_cpu(fd.search_key->cat.name.length) * 2;
-		hfs_bnode_read(fd.bnode, &fd.search_key->cat.name.unicode, off + 2, len);
+		hfs_bnode_read(fd.bnode,
+			&fd.search_key->cat.name.unicode,
+			off + 2, len);
 		fd.search_key->key_len = cpu_to_be16(6 + len);
 	} else
 		hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str);
@@ -281,7 +304,8 @@
 		hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_DATA);
 #endif
 
-		off = fd.entryoffset + offsetof(struct hfsplus_cat_file, rsrc_fork);
+		off = fd.entryoffset +
+			offsetof(struct hfsplus_cat_file, rsrc_fork);
 		hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork));
 		hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC);
 	}
@@ -308,7 +332,7 @@
 
 	dir->i_size--;
 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-	mark_inode_dirty(dir);
+	hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
 out:
 	hfs_find_exit(&fd);
 
@@ -325,7 +349,8 @@
 	int entry_size, type;
 	int err = 0;
 
-	dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
+	dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
+		cnid, src_dir->i_ino, src_name->name,
 		dst_dir->i_ino, dst_name->name);
 	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
 	dst_fd = src_fd;
@@ -353,7 +378,6 @@
 		goto out;
 	dst_dir->i_size++;
 	dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
-	mark_inode_dirty(dst_dir);
 
 	/* finally remove the old entry */
 	hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
@@ -365,7 +389,6 @@
 		goto out;
 	src_dir->i_size--;
 	src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
-	mark_inode_dirty(src_dir);
 
 	/* remove old thread entry */
 	hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL);
@@ -379,7 +402,8 @@
 
 	/* create new thread entry */
 	hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
-	entry_size = hfsplus_fill_cat_thread(sb, &entry, type, dst_dir->i_ino, dst_name);
+	entry_size = hfsplus_fill_cat_thread(sb, &entry, type,
+		dst_dir->i_ino, dst_name);
 	err = hfs_brec_find(&dst_fd);
 	if (err != -ENOENT) {
 		if (!err)
@@ -387,6 +411,9 @@
 		goto out;
 	}
 	err = hfs_brec_insert(&dst_fd, &entry, entry_size);
+
+	hfsplus_mark_inode_dirty(dst_dir, HFSPLUS_I_CAT_DIRTY);
+	hfsplus_mark_inode_dirty(src_dir, HFSPLUS_I_CAT_DIRTY);
 out:
 	hfs_bnode_put(dst_fd.bnode);
 	hfs_find_exit(&src_fd);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 9d59c05..4df5059 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -37,7 +37,6 @@
 
 	sb = dir->i_sb;
 
-	dentry->d_op = &hfsplus_dentry_operations;
 	dentry->d_fsdata = NULL;
 	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 	hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
@@ -66,11 +65,17 @@
 			goto fail;
 		}
 		cnid = be32_to_cpu(entry.file.id);
-		if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) &&
-		    entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
-		    (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->create_date ||
-		     entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)->create_date) &&
-		    HFSPLUS_SB(sb)->hidden_dir) {
+		if (entry.file.user_info.fdType ==
+				cpu_to_be32(HFSP_HARDLINK_TYPE) &&
+				entry.file.user_info.fdCreator ==
+				cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
+				(entry.file.create_date ==
+					HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
+						create_date ||
+				entry.file.create_date ==
+					HFSPLUS_I(sb->s_root->d_inode)->
+						create_date) &&
+				HFSPLUS_SB(sb)->hidden_dir) {
 			struct qstr str;
 			char name[32];
 
@@ -83,11 +88,13 @@
 				linkid = 0;
 			} else {
 				dentry->d_fsdata = (void *)(unsigned long)cnid;
-				linkid = be32_to_cpu(entry.file.permissions.dev);
+				linkid =
+					be32_to_cpu(entry.file.permissions.dev);
 				str.len = sprintf(name, "iNode%d", linkid);
 				str.name = name;
 				hfsplus_cat_build_key(sb, fd.search_key,
-					HFSPLUS_SB(sb)->hidden_dir->i_ino, &str);
+					HFSPLUS_SB(sb)->hidden_dir->i_ino,
+					&str);
 				goto again;
 			}
 		} else if (!dentry->d_fsdata)
@@ -139,7 +146,8 @@
 		filp->f_pos++;
 		/* fall through */
 	case 1:
-		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+			fd.entrylength);
 		if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
 			printk(KERN_ERR "hfs: bad catalog folder thread\n");
 			err = -EIO;
@@ -169,14 +177,16 @@
 			err = -EIO;
 			goto out;
 		}
-		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
+		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+			fd.entrylength);
 		type = be16_to_cpu(entry.type);
 		len = HFSPLUS_MAX_STRLEN;
 		err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len);
 		if (err)
 			goto out;
 		if (type == HFSPLUS_FOLDER) {
-			if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) {
+			if (fd.entrylength <
+					sizeof(struct hfsplus_cat_folder)) {
 				printk(KERN_ERR "hfs: small dir entry\n");
 				err = -EIO;
 				goto out;
@@ -202,7 +212,7 @@
 			err = -EIO;
 			goto out;
 		}
-	next:
+next:
 		filp->f_pos++;
 		if (filp->f_pos >= inode->i_size)
 			goto out;
@@ -273,7 +283,8 @@
 		HFSPLUS_I(inode)->linkid = id;
 		cnid = sbi->next_cnid++;
 		src_dentry->d_fsdata = (void *)(unsigned long)cnid;
-		res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode);
+		res = hfsplus_create_cat(cnid, src_dir,
+			&src_dentry->d_name, inode);
 		if (res)
 			/* panic? */
 			goto out;
@@ -485,6 +496,7 @@
 };
 
 const struct file_operations hfsplus_dir_operations = {
+	.fsync		= hfsplus_file_fsync,
 	.read		= generic_read_dir,
 	.readdir	= hfsplus_readdir,
 	.unlocked_ioctl = hfsplus_ioctl,
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 0c9cb18..52a0bca 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -83,7 +83,8 @@
 	return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
 }
 
-static void __hfsplus_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
+static void __hfsplus_ext_write_extent(struct inode *inode,
+		struct hfs_find_data *fd)
 {
 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 	int res;
@@ -95,24 +96,32 @@
 				HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
 
 	res = hfs_brec_find(fd);
-	if (hip->flags & HFSPLUS_FLG_EXT_NEW) {
+	if (hip->extent_state & HFSPLUS_EXT_NEW) {
 		if (res != -ENOENT)
 			return;
 		hfs_brec_insert(fd, hip->cached_extents,
 				sizeof(hfsplus_extent_rec));
-		hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
 	} else {
 		if (res)
 			return;
 		hfs_bnode_write(fd->bnode, hip->cached_extents,
 				fd->entryoffset, fd->entrylength);
-		hip->flags &= ~HFSPLUS_FLG_EXT_DIRTY;
+		hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
 	}
+
+	/*
+	 * We can't just use hfsplus_mark_inode_dirty here, because we
+	 * also get called from hfsplus_write_inode, which should not
+	 * redirty the inode.  Instead the callers have to be careful
+	 * to explicily mark the inode dirty, too.
+	 */
+	set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
 }
 
 static void hfsplus_ext_write_extent_locked(struct inode *inode)
 {
-	if (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_EXT_DIRTY) {
+	if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
 		struct hfs_find_data fd;
 
 		hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
@@ -144,18 +153,20 @@
 		return -ENOENT;
 	if (fd->entrylength != sizeof(hfsplus_extent_rec))
 		return -EIO;
-	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfsplus_extent_rec));
+	hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
+		sizeof(hfsplus_extent_rec));
 	return 0;
 }
 
-static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
+static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
+		struct inode *inode, u32 block)
 {
 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 	int res;
 
 	WARN_ON(!mutex_is_locked(&hip->extents_lock));
 
-	if (hip->flags & HFSPLUS_FLG_EXT_DIRTY)
+	if (hip->extent_state & HFSPLUS_EXT_DIRTY)
 		__hfsplus_ext_write_extent(inode, fd);
 
 	res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
@@ -164,10 +175,11 @@
 						HFSPLUS_TYPE_DATA);
 	if (!res) {
 		hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
-		hip->cached_blocks = hfsplus_ext_block_count(hip->cached_extents);
+		hip->cached_blocks =
+			hfsplus_ext_block_count(hip->cached_extents);
 	} else {
 		hip->cached_start = hip->cached_blocks = 0;
-		hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
 	}
 	return res;
 }
@@ -197,6 +209,7 @@
 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
 	int res = -EIO;
 	u32 ablock, dblock, mask;
+	int was_dirty = 0;
 	int shift;
 
 	/* Convert inode block to disk allocation block */
@@ -223,27 +236,37 @@
 		return -EIO;
 
 	mutex_lock(&hip->extents_lock);
+
+	/*
+	 * hfsplus_ext_read_extent will write out a cached extent into
+	 * the extents btree.  In that case we may have to mark the inode
+	 * dirty even for a pure read of an extent here.
+	 */
+	was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
 	res = hfsplus_ext_read_extent(inode, ablock);
-	if (!res) {
-		dblock = hfsplus_ext_find_block(hip->cached_extents,
-						ablock - hip->cached_start);
-	} else {
+	if (res) {
 		mutex_unlock(&hip->extents_lock);
 		return -EIO;
 	}
+	dblock = hfsplus_ext_find_block(hip->cached_extents,
+					ablock - hip->cached_start);
 	mutex_unlock(&hip->extents_lock);
 
 done:
-	dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock);
+	dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n",
+		inode->i_ino, (long long)iblock, dblock);
 	mask = (1 << sbi->fs_shift) - 1;
-	map_bh(bh_result, sb, (dblock << sbi->fs_shift) + sbi->blockoffset + (iblock & mask));
+	map_bh(bh_result, sb,
+		(dblock << sbi->fs_shift) + sbi->blockoffset +
+			(iblock & mask));
 	if (create) {
 		set_buffer_new(bh_result);
 		hip->phys_size += sb->s_blocksize;
 		hip->fs_blocks++;
 		inode_add_bytes(inode, sb->s_blocksize);
-		mark_inode_dirty(inode);
 	}
+	if (create || was_dirty)
+		mark_inode_dirty(inode);
 	return 0;
 }
 
@@ -326,7 +349,8 @@
 	}
 }
 
-int hfsplus_free_fork(struct super_block *sb, u32 cnid, struct hfsplus_fork_raw *fork, int type)
+int hfsplus_free_fork(struct super_block *sb, u32 cnid,
+		struct hfsplus_fork_raw *fork, int type)
 {
 	struct hfs_find_data fd;
 	hfsplus_extent_rec ext_entry;
@@ -373,12 +397,13 @@
 	u32 start, len, goal;
 	int res;
 
-	if (sbi->alloc_file->i_size * 8 <
-	    sbi->total_blocks - sbi->free_blocks + 8) {
-		// extend alloc file
-		printk(KERN_ERR "hfs: extend alloc file! (%Lu,%u,%u)\n",
-				sbi->alloc_file->i_size * 8,
-				sbi->total_blocks, sbi->free_blocks);
+	if (sbi->total_blocks - sbi->free_blocks + 8 >
+			sbi->alloc_file->i_size * 8) {
+		/* extend alloc file */
+		printk(KERN_ERR "hfs: extend alloc file! "
+				"(%llu,%u,%u)\n",
+			sbi->alloc_file->i_size * 8,
+			sbi->total_blocks, sbi->free_blocks);
 		return -ENOSPC;
 	}
 
@@ -429,7 +454,7 @@
 					 start, len);
 		if (!res) {
 			hfsplus_dump_extent(hip->cached_extents);
-			hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
+			hip->extent_state |= HFSPLUS_EXT_DIRTY;
 			hip->cached_blocks += len;
 		} else if (res == -ENOSPC)
 			goto insert_extent;
@@ -438,7 +463,7 @@
 	mutex_unlock(&hip->extents_lock);
 	if (!res) {
 		hip->alloc_blocks += len;
-		mark_inode_dirty(inode);
+		hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
 	}
 	return res;
 
@@ -450,7 +475,7 @@
 	hip->cached_extents[0].start_block = cpu_to_be32(start);
 	hip->cached_extents[0].block_count = cpu_to_be32(len);
 	hfsplus_dump_extent(hip->cached_extents);
-	hip->flags |= HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW;
+	hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
 	hip->cached_start = hip->alloc_blocks;
 	hip->cached_blocks = len;
 
@@ -466,8 +491,9 @@
 	u32 alloc_cnt, blk_cnt, start;
 	int res;
 
-	dprint(DBG_INODE, "truncate: %lu, %Lu -> %Lu\n",
-		inode->i_ino, (long long)hip->phys_size, inode->i_size);
+	dprint(DBG_INODE, "truncate: %lu, %llu -> %llu\n",
+		inode->i_ino, (long long)hip->phys_size,
+		inode->i_size);
 
 	if (inode->i_size > hip->phys_size) {
 		struct address_space *mapping = inode->i_mapping;
@@ -481,7 +507,8 @@
 						&page, &fsdata);
 		if (res)
 			return;
-		res = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
+		res = pagecache_write_end(NULL, mapping, size,
+			0, 0, page, fsdata);
 		if (res < 0)
 			return;
 		mark_inode_dirty(inode);
@@ -513,12 +540,12 @@
 				     alloc_cnt - start, alloc_cnt - blk_cnt);
 		hfsplus_dump_extent(hip->cached_extents);
 		if (blk_cnt > start) {
-			hip->flags |= HFSPLUS_FLG_EXT_DIRTY;
+			hip->extent_state |= HFSPLUS_EXT_DIRTY;
 			break;
 		}
 		alloc_cnt = start;
 		hip->cached_start = hip->cached_blocks = 0;
-		hip->flags &= ~(HFSPLUS_FLG_EXT_DIRTY | HFSPLUS_FLG_EXT_NEW);
+		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
 		hfs_brec_remove(&fd);
 	}
 	hfs_find_exit(&fd);
@@ -527,7 +554,8 @@
 	hip->alloc_blocks = blk_cnt;
 out:
 	hip->phys_size = inode->i_size;
-	hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+	hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
+		sb->s_blocksize_bits;
 	inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
-	mark_inode_dirty(inode);
+	hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
 }
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index cb3653e..d685752 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -23,13 +23,16 @@
 #define DBG_EXTENT	0x00000020
 #define DBG_BITMAP	0x00000040
 
-//#define DBG_MASK	(DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
-//#define DBG_MASK	(DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
-//#define DBG_MASK	(DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
+#if 0
+#define DBG_MASK	(DBG_EXTENT|DBG_INODE|DBG_BNODE_MOD)
+#define DBG_MASK	(DBG_BNODE_MOD|DBG_CAT_MOD|DBG_INODE)
+#define DBG_MASK	(DBG_CAT_MOD|DBG_BNODE_REFS|DBG_INODE|DBG_EXTENT)
+#endif
 #define DBG_MASK	(0)
 
 #define dprint(flg, fmt, args...) \
-	if (flg & DBG_MASK) printk(fmt , ## args)
+	if (flg & DBG_MASK) \
+		printk(fmt , ## args)
 
 /* Runtime config options */
 #define HFSPLUS_DEF_CR_TYPE    0x3F3F3F3F  /* '????' */
@@ -37,7 +40,8 @@
 #define HFSPLUS_TYPE_DATA 0x00
 #define HFSPLUS_TYPE_RSRC 0xFF
 
-typedef int (*btree_keycmp)(const hfsplus_btree_key *, const hfsplus_btree_key *);
+typedef int (*btree_keycmp)(const hfsplus_btree_key *,
+		const hfsplus_btree_key *);
 
 #define NODE_HASH_SIZE	256
 
@@ -61,7 +65,6 @@
 	unsigned int max_key_len;
 	unsigned int depth;
 
-	//unsigned int map1_size, map_size;
 	struct mutex tree_lock;
 
 	unsigned int pages_per_bnode;
@@ -107,8 +110,8 @@
 struct hfs_btree;
 
 struct hfsplus_sb_info {
-	struct buffer_head *s_vhbh;
 	struct hfsplus_vh *s_vhdr;
+	struct hfsplus_vh *s_backup_vhdr;
 	struct hfs_btree *ext_tree;
 	struct hfs_btree *cat_tree;
 	struct hfs_btree *attr_tree;
@@ -118,7 +121,8 @@
 
 	/* Runtime variables */
 	u32 blockoffset;
-	u32 sect_count;
+	sector_t part_start;
+	sector_t sect_count;
 	int fs_shift;
 
 	/* immutable data from the volume header */
@@ -155,6 +159,12 @@
 #define HFSPLUS_SB_FORCE	2
 #define HFSPLUS_SB_HFSX		3
 #define HFSPLUS_SB_CASEFOLD	4
+#define HFSPLUS_SB_NOBARRIER	5
+
+static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
 
 
 struct hfsplus_inode_info {
@@ -170,7 +180,7 @@
 	u32 cached_blocks;
 	hfsplus_extent_rec first_extents;
 	hfsplus_extent_rec cached_extents;
-	unsigned long flags;
+	unsigned int extent_state;
 	struct mutex extents_lock;
 
 	/*
@@ -185,6 +195,11 @@
 	u32 linkid;
 
 	/*
+	 * Accessed using atomic bitops.
+	 */
+	unsigned long flags;
+
+	/*
 	 * Protected by i_mutex.
 	 */
 	sector_t fs_blocks;
@@ -195,12 +210,34 @@
 	struct inode vfs_inode;
 };
 
-#define HFSPLUS_FLG_RSRC	0x0001
-#define HFSPLUS_FLG_EXT_DIRTY	0x0002
-#define HFSPLUS_FLG_EXT_NEW	0x0004
+#define HFSPLUS_EXT_DIRTY	0x0001
+#define HFSPLUS_EXT_NEW		0x0002
 
-#define HFSPLUS_IS_DATA(inode)   (!(HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC))
-#define HFSPLUS_IS_RSRC(inode)   (HFSPLUS_I(inode)->flags & HFSPLUS_FLG_RSRC)
+#define HFSPLUS_I_RSRC		0	/* represents a resource fork */
+#define HFSPLUS_I_CAT_DIRTY	1	/* has changes in the catalog tree */
+#define HFSPLUS_I_EXT_DIRTY	2	/* has changes in the extent tree */
+#define HFSPLUS_I_ALLOC_DIRTY	3	/* has changes in the allocation file */
+
+#define HFSPLUS_IS_RSRC(inode) \
+	test_bit(HFSPLUS_I_RSRC, &HFSPLUS_I(inode)->flags)
+
+static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
+{
+	return list_entry(inode, struct hfsplus_inode_info, vfs_inode);
+}
+
+/*
+ * Mark an inode dirty, and also mark the btree in which the
+ * specific type of metadata is stored.
+ * For data or metadata that gets written back by into the catalog btree
+ * by hfsplus_write_inode a plain mark_inode_dirty call is enough.
+ */
+static inline void hfsplus_mark_inode_dirty(struct inode *inode,
+		unsigned int flag)
+{
+	set_bit(flag, &HFSPLUS_I(inode)->flags);
+	mark_inode_dirty(inode);
+}
 
 struct hfs_find_data {
 	/* filled by caller */
@@ -318,9 +355,12 @@
 int hfs_brec_goto(struct hfs_find_data *, int);
 
 /* catalog.c */
-int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
-int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
-void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *, u32, struct qstr *);
+int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *,
+		const hfsplus_btree_key *);
+int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *,
+		const hfsplus_btree_key *);
+void hfsplus_cat_build_key(struct super_block *sb,
+		hfsplus_btree_key *, u32, struct qstr *);
 int hfsplus_find_cat(struct super_block *, u32, struct hfs_find_data *);
 int hfsplus_create_cat(u32, struct inode *, struct qstr *, struct inode *);
 int hfsplus_delete_cat(u32, struct inode *, struct qstr *);
@@ -336,7 +376,8 @@
 int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
 void hfsplus_ext_write_extent(struct inode *);
 int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int);
-int hfsplus_free_fork(struct super_block *, u32, struct hfsplus_fork_raw *, int);
+int hfsplus_free_fork(struct super_block *, u32,
+		struct hfsplus_fork_raw *, int);
 int hfsplus_file_extend(struct inode *);
 void hfsplus_file_truncate(struct inode *);
 
@@ -351,6 +392,7 @@
 int hfsplus_cat_write_inode(struct inode *);
 struct inode *hfsplus_new_inode(struct super_block *, int);
 void hfsplus_delete_inode(struct inode *);
+int hfsplus_file_fsync(struct file *file, int datasync);
 
 /* ioctl.c */
 long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
@@ -362,6 +404,7 @@
 
 /* options.c */
 int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
+int hfsplus_parse_options_remount(char *input, int *force);
 void hfsplus_fill_defaults(struct hfsplus_sb_info *);
 int hfsplus_show_options(struct seq_file *, struct vfsmount *);
 
@@ -375,45 +418,26 @@
 extern u16 hfsplus_compose_table[];
 
 /* unicode.c */
-int hfsplus_strcasecmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
-int hfsplus_strcmp(const struct hfsplus_unistr *, const struct hfsplus_unistr *);
-int hfsplus_uni2asc(struct super_block *, const struct hfsplus_unistr *, char *, int *);
-int hfsplus_asc2uni(struct super_block *, struct hfsplus_unistr *, const char *, int);
-int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str);
-int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *s2);
+int hfsplus_strcasecmp(const struct hfsplus_unistr *,
+		const struct hfsplus_unistr *);
+int hfsplus_strcmp(const struct hfsplus_unistr *,
+		const struct hfsplus_unistr *);
+int hfsplus_uni2asc(struct super_block *,
+		const struct hfsplus_unistr *, char *, int *);
+int hfsplus_asc2uni(struct super_block *,
+		struct hfsplus_unistr *, const char *, int);
+int hfsplus_hash_dentry(const struct dentry *dentry,
+		const struct inode *inode, struct qstr *str);
+int hfsplus_compare_dentry(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
 
 /* wrapper.c */
 int hfsplus_read_wrapper(struct super_block *);
-
 int hfs_part_find(struct super_block *, sector_t *, sector_t *);
-
-/* access macros */
-static inline struct hfsplus_sb_info *HFSPLUS_SB(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-
-static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode)
-{
-	return list_entry(inode, struct hfsplus_inode_info, vfs_inode);
-}
-
-#define sb_bread512(sb, sec, data) ({			\
-	struct buffer_head *__bh;			\
-	sector_t __block;				\
-	loff_t __start;					\
-	int __offset;					\
-							\
-	__start = (loff_t)(sec) << HFSPLUS_SECTOR_SHIFT;\
-	__block = __start >> (sb)->s_blocksize_bits;	\
-	__offset = __start & ((sb)->s_blocksize - 1);	\
-	__bh = sb_bread((sb), __block);			\
-	if (likely(__bh != NULL))			\
-		data = (void *)(__bh->b_data + __offset);\
-	else						\
-		data = NULL;				\
-	__bh;						\
-})
+int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
+		void *data, int rw);
 
 /* time macros */
 #define __hfsp_mt2ut(t)		(be32_to_cpu(t) - 2082844800U)
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 6892899..927cdd6 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -36,7 +36,8 @@
 #define HFSP_WRAPOFF_EMBEDSIG     0x7C
 #define HFSP_WRAPOFF_EMBEDEXT     0x7E
 
-#define HFSP_HIDDENDIR_NAME	"\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
+#define HFSP_HIDDENDIR_NAME \
+	"\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
 
 #define HFSP_HARDLINK_TYPE	0x686c6e6b	/* 'hlnk' */
 #define HFSP_HFSPLUS_CREATOR	0x6866732b	/* 'hfs+' */
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 8afd7e8..a8df651 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -8,6 +8,7 @@
  * Inode handling routines
  */
 
+#include <linux/blkdev.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/pagemap.h>
@@ -77,7 +78,8 @@
 	if (!tree)
 		return 0;
 	if (tree->node_size >= PAGE_CACHE_SIZE) {
-		nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
+		nidx = page->index >>
+			(tree->node_size_shift - PAGE_CACHE_SHIFT);
 		spin_lock(&tree->hash_lock);
 		node = hfs_bnode_findhash(tree, nidx);
 		if (!node)
@@ -90,7 +92,8 @@
 		}
 		spin_unlock(&tree->hash_lock);
 	} else {
-		nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
+		nidx = page->index <<
+			(PAGE_CACHE_SHIFT - tree->node_size_shift);
 		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
 		spin_lock(&tree->hash_lock);
 		do {
@@ -166,8 +169,8 @@
 	.d_compare    = hfsplus_compare_dentry,
 };
 
-static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
-					  struct nameidata *nd)
+static struct dentry *hfsplus_file_lookup(struct inode *dir,
+		struct dentry *dentry, struct nameidata *nd)
 {
 	struct hfs_find_data fd;
 	struct super_block *sb = dir->i_sb;
@@ -190,7 +193,9 @@
 	inode->i_ino = dir->i_ino;
 	INIT_LIST_HEAD(&hip->open_dir_list);
 	mutex_init(&hip->extents_lock);
-	hip->flags = HFSPLUS_FLG_RSRC;
+	hip->extent_state = 0;
+	hip->flags = 0;
+	set_bit(HFSPLUS_I_RSRC, &hip->flags);
 
 	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 	err = hfsplus_find_cat(sb, dir->i_ino, &fd);
@@ -219,7 +224,8 @@
 	return NULL;
 }
 
-static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
+static void hfsplus_get_perms(struct inode *inode,
+		struct hfsplus_perm *perms, int dir)
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
 	u16 mode;
@@ -302,29 +308,41 @@
 	return 0;
 }
 
-static int hfsplus_file_fsync(struct file *filp, int datasync)
+int hfsplus_file_fsync(struct file *file, int datasync)
 {
-	struct inode *inode = filp->f_mapping->host;
-	struct super_block * sb;
-	int ret, err;
+	struct inode *inode = file->f_mapping->host;
+	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
+	int error = 0, error2;
 
-	/* sync the inode to buffers */
-	ret = write_inode_now(inode, 0);
+	/*
+	 * Sync inode metadata into the catalog and extent trees.
+	 */
+	sync_inode_metadata(inode, 1);
 
-	/* sync the superblock to buffers */
-	sb = inode->i_sb;
-	if (sb->s_dirt) {
-		if (!(sb->s_flags & MS_RDONLY))
-			hfsplus_sync_fs(sb, 1);
-		else
-			sb->s_dirt = 0;
+	/*
+	 * And explicitly write out the btrees.
+	 */
+	if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags))
+		error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
+
+	if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) {
+		error2 =
+			filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
+		if (!error)
+			error = error2;
 	}
 
-	/* .. finally sync the buffers to disk */
-	err = sync_blockdev(sb->s_bdev);
-	if (!ret)
-		ret = err;
-	return ret;
+	if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
+		error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
+		if (!error)
+			error = error2;
+	}
+
+	if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
+		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+
+	return error;
 }
 
 static const struct inode_operations hfsplus_file_inode_operations = {
@@ -337,7 +355,7 @@
 };
 
 static const struct file_operations hfsplus_file_operations = {
-	.llseek 	= generic_file_llseek,
+	.llseek		= generic_file_llseek,
 	.read		= do_sync_read,
 	.aio_read	= generic_file_aio_read,
 	.write		= do_sync_write,
@@ -370,6 +388,7 @@
 	INIT_LIST_HEAD(&hip->open_dir_list);
 	mutex_init(&hip->extents_lock);
 	atomic_set(&hip->opencnt, 0);
+	hip->extent_state = 0;
 	hip->flags = 0;
 	memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
 	memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
@@ -457,7 +476,8 @@
 	}
 }
 
-void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
+void hfsplus_inode_write_fork(struct inode *inode,
+		struct hfsplus_fork_raw *fork)
 {
 	memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
 	       sizeof(hfsplus_extent_rec));
@@ -499,13 +519,14 @@
 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
 					sizeof(struct hfsplus_cat_file));
 
-		hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ?
-					&file->data_fork : &file->rsrc_fork);
+		hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
+					&file->rsrc_fork : &file->data_fork);
 		hfsplus_get_perms(inode, &file->permissions, 0);
 		inode->i_nlink = 1;
 		if (S_ISREG(inode->i_mode)) {
 			if (file->permissions.dev)
-				inode->i_nlink = be32_to_cpu(file->permissions.dev);
+				inode->i_nlink =
+					be32_to_cpu(file->permissions.dev);
 			inode->i_op = &hfsplus_file_inode_operations;
 			inode->i_fop = &hfsplus_file_operations;
 			inode->i_mapping->a_ops = &hfsplus_aops;
@@ -578,7 +599,9 @@
 					sizeof(struct hfsplus_cat_file));
 		hfsplus_inode_write_fork(inode, &file->data_fork);
 		hfsplus_cat_set_perms(inode, &file->permissions);
-		if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
+		if (HFSPLUS_FLG_IMMUTABLE &
+				(file->permissions.rootflags |
+					file->permissions.userflags))
 			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
 		else
 			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
@@ -588,6 +611,8 @@
 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
 					 sizeof(struct hfsplus_cat_file));
 	}
+
+	set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
 out:
 	hfs_find_exit(&fd);
 	return 0;
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 40a85a3..508ce66 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -28,7 +28,7 @@
 
 	if (inode->i_flags & S_IMMUTABLE)
 		flags |= FS_IMMUTABLE_FL;
-	if (inode->i_flags |= S_APPEND)
+	if (inode->i_flags & S_APPEND)
 		flags |= FS_APPEND_FL;
 	if (hip->userflags & HFSPLUS_FLG_NODUMP)
 		flags |= FS_NODUMP_FL;
@@ -147,9 +147,11 @@
 			res = -ERANGE;
 	} else
 		res = -EOPNOTSUPP;
-	if (!res)
+	if (!res) {
 		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
 				sizeof(struct hfsplus_cat_file));
+		hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
+	}
 out:
 	hfs_find_exit(&fd);
 	return res;
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index f9ab276..bb62a5882 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -23,6 +23,7 @@
 	opt_umask, opt_uid, opt_gid,
 	opt_part, opt_session, opt_nls,
 	opt_nodecompose, opt_decompose,
+	opt_barrier, opt_nobarrier,
 	opt_force, opt_err
 };
 
@@ -37,6 +38,8 @@
 	{ opt_nls, "nls=%s" },
 	{ opt_decompose, "decompose" },
 	{ opt_nodecompose, "nodecompose" },
+	{ opt_barrier, "barrier" },
+	{ opt_nobarrier, "nobarrier" },
 	{ opt_force, "force" },
 	{ opt_err, NULL }
 };
@@ -65,6 +68,32 @@
 	return 0;
 }
 
+int hfsplus_parse_options_remount(char *input, int *force)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int token;
+
+	if (!input)
+		return 0;
+
+	while ((p = strsep(&input, ",")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case opt_force:
+			*force = 1;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return 1;
+}
+
 /* Parse options from mount. Returns 0 on failure */
 /* input is the options passed to mount() as a string */
 int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
@@ -136,7 +165,9 @@
 			if (p)
 				sbi->nls = load_nls(p);
 			if (!sbi->nls) {
-				printk(KERN_ERR "hfs: unable to load nls mapping \"%s\"\n", p);
+				printk(KERN_ERR "hfs: unable to load "
+						"nls mapping \"%s\"\n",
+					p);
 				kfree(p);
 				return 0;
 			}
@@ -148,6 +179,12 @@
 		case opt_nodecompose:
 			set_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags);
 			break;
+		case opt_barrier:
+			clear_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags);
+			break;
+		case opt_nobarrier:
+			set_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags);
+			break;
 		case opt_force:
 			set_bit(HFSPLUS_SB_FORCE, &sbi->flags);
 			break;
@@ -177,7 +214,8 @@
 		seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
 	if (sbi->type != HFSPLUS_DEF_CR_TYPE)
 		seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
-	seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask, sbi->uid, sbi->gid);
+	seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
+		sbi->uid, sbi->gid);
 	if (sbi->part >= 0)
 		seq_printf(seq, ",part=%u", sbi->part);
 	if (sbi->session >= 0)
@@ -186,5 +224,7 @@
 		seq_printf(seq, ",nls=%s", sbi->nls->charset);
 	if (test_bit(HFSPLUS_SB_NODECOMPOSE, &sbi->flags))
 		seq_printf(seq, ",nodecompose");
+	if (test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
+		seq_printf(seq, ",nobarrier");
 	return 0;
 }
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index 208b16c..d66ad11 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -2,7 +2,8 @@
  * linux/fs/hfsplus/part_tbl.c
  *
  * Copyright (C) 1996-1997  Paul H. Hargrove
- * This file may be distributed under the terms of the GNU General Public License.
+ * This file may be distributed under the terms of
+ * the GNU General Public License.
  *
  * Original code to handle the new style Mac partition table based on
  * a patch contributed by Holger Schemel (aeglos@valinor.owl.de).
@@ -13,6 +14,7 @@
  *
  */
 
+#include <linux/slab.h>
 #include "hfsplus_fs.h"
 
 /* offsets to various blocks */
@@ -58,77 +60,94 @@
  */
 struct old_pmap {
 	__be16		pdSig;	/* Signature bytes */
-	struct 	old_pmap_entry {
+	struct old_pmap_entry {
 		__be32	pdStart;
 		__be32	pdSize;
 		__be32	pdFSID;
 	}	pdEntry[42];
 } __packed;
 
-/*
- * hfs_part_find()
- *
- * Parse the partition map looking for the
- * start and length of the 'part'th HFS partition.
- */
-int hfs_part_find(struct super_block *sb,
-		  sector_t *part_start, sector_t *part_size)
+static int hfs_parse_old_pmap(struct super_block *sb, struct old_pmap *pm,
+		sector_t *part_start, sector_t *part_size)
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
-	struct buffer_head *bh;
-	__be16 *data;
-	int i, size, res;
+	int i;
 
-	res = -ENOENT;
-	bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK, data);
-	if (!bh)
-		return -EIO;
+	for (i = 0; i < 42; i++) {
+		struct old_pmap_entry *p = &pm->pdEntry[i];
 
-	switch (be16_to_cpu(*data)) {
-	case HFS_OLD_PMAP_MAGIC:
-	  {
-		struct old_pmap *pm;
-		struct old_pmap_entry *p;
-
-		pm = (struct old_pmap *)bh->b_data;
-		p = pm->pdEntry;
-		size = 42;
-		for (i = 0; i < size; p++, i++) {
-			if (p->pdStart && p->pdSize &&
-			    p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
-			    (sbi->part < 0 || sbi->part == i)) {
-				*part_start += be32_to_cpu(p->pdStart);
-				*part_size = be32_to_cpu(p->pdSize);
-				res = 0;
-			}
+		if (p->pdStart && p->pdSize &&
+		    p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ &&
+		    (sbi->part < 0 || sbi->part == i)) {
+			*part_start += be32_to_cpu(p->pdStart);
+			*part_size = be32_to_cpu(p->pdSize);
+			return 0;
 		}
-		break;
-	  }
-	case HFS_NEW_PMAP_MAGIC:
-	  {
-		struct new_pmap *pm;
-
-		pm = (struct new_pmap *)bh->b_data;
-		size = be32_to_cpu(pm->pmMapBlkCnt);
-		for (i = 0; i < size;) {
-			if (!memcmp(pm->pmPartType,"Apple_HFS", 9) &&
-			    (sbi->part < 0 || sbi->part == i)) {
-				*part_start += be32_to_cpu(pm->pmPyPartStart);
-				*part_size = be32_to_cpu(pm->pmPartBlkCnt);
-				res = 0;
-				break;
-			}
-			brelse(bh);
-			bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK + ++i, pm);
-			if (!bh)
-				return -EIO;
-			if (pm->pmSig != cpu_to_be16(HFS_NEW_PMAP_MAGIC))
-				break;
-		}
-		break;
-	  }
 	}
-	brelse(bh);
 
+	return -ENOENT;
+}
+
+static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
+		sector_t *part_start, sector_t *part_size)
+{
+	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+	int size = be32_to_cpu(pm->pmMapBlkCnt);
+	int res;
+	int i = 0;
+
+	do {
+		if (!memcmp(pm->pmPartType, "Apple_HFS", 9) &&
+		    (sbi->part < 0 || sbi->part == i)) {
+			*part_start += be32_to_cpu(pm->pmPyPartStart);
+			*part_size = be32_to_cpu(pm->pmPartBlkCnt);
+			return 0;
+		}
+
+		if (++i >= size)
+			return -ENOENT;
+
+		res = hfsplus_submit_bio(sb->s_bdev,
+					 *part_start + HFS_PMAP_BLK + i,
+					 pm, READ);
+		if (res)
+			return res;
+	} while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));
+
+	return -ENOENT;
+}
+
+/*
+ * Parse the partition map looking for the start and length of a
+ * HFS/HFS+ partition.
+ */
+int hfs_part_find(struct super_block *sb,
+		sector_t *part_start, sector_t *part_size)
+{
+	void *data;
+	int res;
+
+	data = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
+				 data, READ);
+	if (res)
+		return res;
+
+	switch (be16_to_cpu(*((__be16 *)data))) {
+	case HFS_OLD_PMAP_MAGIC:
+		res = hfs_parse_old_pmap(sb, data, part_start, part_size);
+		break;
+	case HFS_NEW_PMAP_MAGIC:
+		res = hfs_parse_new_pmap(sb, data, part_start, part_size);
+		break;
+	default:
+		res = -ENOENT;
+		break;
+	}
+
+	kfree(data);
 	return res;
 }
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 52cc746..9a3b479 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
+#include <linux/blkdev.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/vfs.h>
@@ -66,6 +67,7 @@
 	INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
 	mutex_init(&HFSPLUS_I(inode)->extents_lock);
 	HFSPLUS_I(inode)->flags = 0;
+	HFSPLUS_I(inode)->extent_state = 0;
 	HFSPLUS_I(inode)->rsrc_inode = NULL;
 	atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
 
@@ -157,45 +159,65 @@
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
 	struct hfsplus_vh *vhdr = sbi->s_vhdr;
+	int write_backup = 0;
+	int error, error2;
+
+	if (!wait)
+		return 0;
 
 	dprint(DBG_SUPER, "hfsplus_write_super\n");
 
-	mutex_lock(&sbi->vh_mutex);
-	mutex_lock(&sbi->alloc_mutex);
 	sb->s_dirt = 0;
 
+	/*
+	 * Explicitly write out the special metadata inodes.
+	 *
+	 * While these special inodes are marked as hashed and written
+	 * out peridocically by the flusher threads we redirty them
+	 * during writeout of normal inodes, and thus the life lock
+	 * prevents us from getting the latest state to disk.
+	 */
+	error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
+	error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
+	if (!error)
+		error = error2;
+	error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
+	if (!error)
+		error = error2;
+
+	mutex_lock(&sbi->vh_mutex);
+	mutex_lock(&sbi->alloc_mutex);
 	vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
 	vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
 	vhdr->folder_count = cpu_to_be32(sbi->folder_count);
 	vhdr->file_count = cpu_to_be32(sbi->file_count);
 
-	mark_buffer_dirty(sbi->s_vhbh);
 	if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
-		if (sbi->sect_count) {
-			struct buffer_head *bh;
-			u32 block, offset;
-
-			block = sbi->blockoffset;
-			block += (sbi->sect_count - 2) >> (sb->s_blocksize_bits - 9);
-			offset = ((sbi->sect_count - 2) << 9) & (sb->s_blocksize - 1);
-			printk(KERN_DEBUG "hfs: backup: %u,%u,%u,%u\n",
-					  sbi->blockoffset, sbi->sect_count,
-					  block, offset);
-			bh = sb_bread(sb, block);
-			if (bh) {
-				vhdr = (struct hfsplus_vh *)(bh->b_data + offset);
-				if (be16_to_cpu(vhdr->signature) == HFSPLUS_VOLHEAD_SIG) {
-					memcpy(vhdr, sbi->s_vhdr, sizeof(*vhdr));
-					mark_buffer_dirty(bh);
-					brelse(bh);
-				} else
-					printk(KERN_WARNING "hfs: backup not found!\n");
-			}
-		}
+		memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
+		write_backup = 1;
 	}
+
+	error2 = hfsplus_submit_bio(sb->s_bdev,
+				   sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
+				   sbi->s_vhdr, WRITE_SYNC);
+	if (!error)
+		error = error2;
+	if (!write_backup)
+		goto out;
+
+	error2 = hfsplus_submit_bio(sb->s_bdev,
+				  sbi->part_start + sbi->sect_count - 2,
+				  sbi->s_backup_vhdr, WRITE_SYNC);
+	if (!error)
+		error2 = error;
+out:
 	mutex_unlock(&sbi->alloc_mutex);
 	mutex_unlock(&sbi->vh_mutex);
-	return 0;
+
+	if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
+		blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
+
+	return error;
 }
 
 static void hfsplus_write_super(struct super_block *sb)
@@ -215,23 +237,22 @@
 	if (!sb->s_fs_info)
 		return;
 
-	if (sb->s_dirt)
-		hfsplus_write_super(sb);
 	if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
 		struct hfsplus_vh *vhdr = sbi->s_vhdr;
 
 		vhdr->modify_date = hfsp_now2mt();
 		vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
 		vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
-		mark_buffer_dirty(sbi->s_vhbh);
-		sync_dirty_buffer(sbi->s_vhbh);
+
+		hfsplus_sync_fs(sb, 1);
 	}
 
 	hfs_btree_close(sbi->cat_tree);
 	hfs_btree_close(sbi->ext_tree);
 	iput(sbi->alloc_file);
 	iput(sbi->hidden_dir);
-	brelse(sbi->s_vhbh);
+	kfree(sbi->s_vhdr);
+	kfree(sbi->s_backup_vhdr);
 	unload_nls(sbi->nls);
 	kfree(sb->s_fs_info);
 	sb->s_fs_info = NULL;
@@ -263,26 +284,31 @@
 		return 0;
 	if (!(*flags & MS_RDONLY)) {
 		struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
-		struct hfsplus_sb_info sbi;
+		int force = 0;
 
-		memset(&sbi, 0, sizeof(struct hfsplus_sb_info));
-		sbi.nls = HFSPLUS_SB(sb)->nls;
-		if (!hfsplus_parse_options(data, &sbi))
+		if (!hfsplus_parse_options_remount(data, &force))
 			return -EINVAL;
 
 		if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
-			printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, "
-			       "running fsck.hfsplus is recommended.  leaving read-only.\n");
+			printk(KERN_WARNING "hfs: filesystem was "
+					"not cleanly unmounted, "
+					"running fsck.hfsplus is recommended.  "
+					"leaving read-only.\n");
 			sb->s_flags |= MS_RDONLY;
 			*flags |= MS_RDONLY;
-		} else if (test_bit(HFSPLUS_SB_FORCE, &sbi.flags)) {
+		} else if (force) {
 			/* nothing */
-		} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
-			printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n");
+		} else if (vhdr->attributes &
+				cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
+			printk(KERN_WARNING "hfs: filesystem is marked locked, "
+					"leaving read-only.\n");
 			sb->s_flags |= MS_RDONLY;
 			*flags |= MS_RDONLY;
-		} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
-			printk(KERN_WARNING "hfs: filesystem is marked journaled, leaving read-only.\n");
+		} else if (vhdr->attributes &
+				cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
+			printk(KERN_WARNING "hfs: filesystem is "
+					"marked journaled, "
+					"leaving read-only.\n");
 			sb->s_flags |= MS_RDONLY;
 			*flags |= MS_RDONLY;
 		}
@@ -372,17 +398,22 @@
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 
 	if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
-		printk(KERN_WARNING "hfs: Filesystem was not cleanly unmounted, "
-		       "running fsck.hfsplus is recommended.  mounting read-only.\n");
+		printk(KERN_WARNING "hfs: Filesystem was "
+				"not cleanly unmounted, "
+				"running fsck.hfsplus is recommended.  "
+				"mounting read-only.\n");
 		sb->s_flags |= MS_RDONLY;
 	} else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
 		/* nothing */
 	} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
 		printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n");
 		sb->s_flags |= MS_RDONLY;
-	} else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) && !(sb->s_flags & MS_RDONLY)) {
-		printk(KERN_WARNING "hfs: write access to a journaled filesystem is not supported, "
-		       "use the force option at your own risk, mounting read-only.\n");
+	} else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
+			!(sb->s_flags & MS_RDONLY)) {
+		printk(KERN_WARNING "hfs: write access to "
+				"a journaled filesystem is not supported, "
+				"use the force option at your own risk, "
+				"mounting read-only.\n");
 		sb->s_flags |= MS_RDONLY;
 	}
 
@@ -413,13 +444,13 @@
 		err = PTR_ERR(root);
 		goto cleanup;
 	}
+	sb->s_d_op = &hfsplus_dentry_operations;
 	sb->s_root = d_alloc_root(root);
 	if (!sb->s_root) {
 		iput(root);
 		err = -ENOMEM;
 		goto cleanup;
 	}
-	sb->s_root->d_op = &hfsplus_dentry_operations;
 
 	str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
 	str.name = HFSP_HIDDENDIR_NAME;
@@ -449,19 +480,16 @@
 	be32_add_cpu(&vhdr->write_count, 1);
 	vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
 	vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
-	mark_buffer_dirty(sbi->s_vhbh);
-	sync_dirty_buffer(sbi->s_vhbh);
+	hfsplus_sync_fs(sb, 1);
 
 	if (!sbi->hidden_dir) {
-		printk(KERN_DEBUG "hfs: create hidden dir...\n");
-
 		mutex_lock(&sbi->vh_mutex);
 		sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
 		hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode,
 				   &str, sbi->hidden_dir);
 		mutex_unlock(&sbi->vh_mutex);
 
-		mark_inode_dirty(sbi->hidden_dir);
+		hfsplus_mark_inode_dirty(sbi->hidden_dir, HFSPLUS_I_CAT_DIRTY);
 	}
 out:
 	unload_nls(sbi->nls);
@@ -488,9 +516,17 @@
 	return i ? &i->vfs_inode : NULL;
 }
 
+static void hfsplus_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
+}
+
 static void hfsplus_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
+	call_rcu(&inode->i_rcu, hfsplus_i_callback);
 }
 
 #define HFSPLUS_INODE_SIZE	sizeof(struct hfsplus_inode_info)
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index b66d67d..a3f0bfc 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -17,14 +17,14 @@
 /* Returns folded char, or 0 if ignorable */
 static inline u16 case_fold(u16 c)
 {
-        u16 tmp;
+	u16 tmp;
 
-        tmp = hfsplus_case_fold_table[c >> 8];
-        if (tmp)
-                tmp = hfsplus_case_fold_table[tmp + (c & 0xff)];
-        else
-                tmp = c;
-        return tmp;
+	tmp = hfsplus_case_fold_table[c >> 8];
+	if (tmp)
+		tmp = hfsplus_case_fold_table[tmp + (c & 0xff)];
+	else
+		tmp = c;
+	return tmp;
 }
 
 /* Compare unicode strings, return values like normal strcmp */
@@ -118,7 +118,9 @@
 	return NULL;
 }
 
-int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p)
+int hfsplus_uni2asc(struct super_block *sb,
+		const struct hfsplus_unistr *ustr,
+		char *astr, int *len_p)
 {
 	const hfsplus_unichr *ip;
 	struct nls_table *nls = HFSPLUS_SB(sb)->nls;
@@ -171,7 +173,8 @@
 				goto same;
 			c1 = be16_to_cpu(*ip);
 			if (likely(compose))
-				ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c1);
+				ce1 = hfsplus_compose_lookup(
+					hfsplus_compose_table, c1);
 			if (ce1)
 				break;
 			switch (c0) {
@@ -199,7 +202,8 @@
 		if (ce2) {
 			i = 1;
 			while (i < ustrlen) {
-				ce1 = hfsplus_compose_lookup(ce2, be16_to_cpu(ip[i]));
+				ce1 = hfsplus_compose_lookup(ce2,
+					be16_to_cpu(ip[i]));
 				if (!ce1)
 					break;
 				i++;
@@ -211,7 +215,7 @@
 				goto done;
 			}
 		}
-	same:
+same:
 		switch (c0) {
 		case 0:
 			cc = 0x2400;
@@ -222,7 +226,7 @@
 		default:
 			cc = c0;
 		}
-	done:
+done:
 		res = nls->uni2char(cc, op, len);
 		if (res < 0) {
 			if (res == -ENAMETOOLONG)
@@ -320,7 +324,8 @@
  * Composed unicode characters are decomposed and case-folding is performed
  * if the appropriate bits are (un)set on the superblock.
  */
-int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str)
+int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *str)
 {
 	struct super_block *sb = dentry->d_sb;
 	const char *astr;
@@ -363,9 +368,12 @@
  * Composed unicode characters are decomposed and case-folding is performed
  * if the appropriate bits are (un)set on the superblock.
  */
-int hfsplus_compare_dentry(struct dentry *dentry, struct qstr *s1, struct qstr *s2)
+int hfsplus_compare_dentry(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	struct super_block *sb = dentry->d_sb;
+	struct super_block *sb = parent->d_sb;
 	int casefold, decompose, size;
 	int dsize1, dsize2, len1, len2;
 	const u16 *dstr1, *dstr2;
@@ -375,10 +383,10 @@
 
 	casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
 	decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
-	astr1 = s1->name;
-	len1 = s1->len;
-	astr2 = s2->name;
-	len2 = s2->len;
+	astr1 = str;
+	len1 = len;
+	astr2 = name->name;
+	len2 = name->len;
 	dsize1 = dsize2 = 0;
 	dstr1 = dstr2 = NULL;
 
@@ -388,7 +396,9 @@
 			astr1 += size;
 			len1 -= size;
 
-			if (!decompose || !(dstr1 = decompose_unichar(c, &dsize1))) {
+			if (decompose)
+				dstr1 = decompose_unichar(c, &dsize1);
+			if (!decompose || !dstr1) {
 				c1 = c;
 				dstr1 = &c1;
 				dsize1 = 1;
@@ -400,7 +410,9 @@
 			astr2 += size;
 			len2 -= size;
 
-			if (!decompose || !(dstr2 = decompose_unichar(c, &dsize2))) {
+			if (decompose)
+				dstr2 = decompose_unichar(c, &dsize2);
+			if (!decompose || !dstr2) {
 				c2 = c;
 				dstr2 = &c2;
 				dsize2 = 1;
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 8972c20..1962317 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -24,6 +24,40 @@
 	u16 embed_count;
 };
 
+static void hfsplus_end_io_sync(struct bio *bio, int err)
+{
+	if (err)
+		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+	complete(bio->bi_private);
+}
+
+int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
+		void *data, int rw)
+{
+	DECLARE_COMPLETION_ONSTACK(wait);
+	struct bio *bio;
+
+	bio = bio_alloc(GFP_NOIO, 1);
+	bio->bi_sector = sector;
+	bio->bi_bdev = bdev;
+	bio->bi_end_io = hfsplus_end_io_sync;
+	bio->bi_private = &wait;
+
+	/*
+	 * We always submit one sector at a time, so bio_add_page must not fail.
+	 */
+	if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
+			 offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
+		BUG();
+
+	submit_bio(rw, bio);
+	wait_for_completion(&wait);
+
+	if (!bio_flagged(bio, BIO_UPTODATE))
+		return -EIO;
+	return 0;
+}
+
 static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
 {
 	u32 extent;
@@ -40,12 +74,14 @@
 	   !(attrib & HFSP_WRAP_ATTRIB_SPARED))
 		return 0;
 
-	wd->ablk_size = be32_to_cpu(*(__be32 *)(bufptr + HFSP_WRAPOFF_ABLKSIZE));
+	wd->ablk_size =
+		be32_to_cpu(*(__be32 *)(bufptr + HFSP_WRAPOFF_ABLKSIZE));
 	if (wd->ablk_size < HFSPLUS_SECTOR_SIZE)
 		return 0;
 	if (wd->ablk_size % HFSPLUS_SECTOR_SIZE)
 		return 0;
-	wd->ablk_start = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART));
+	wd->ablk_start =
+		be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART));
 
 	extent = get_unaligned_be32(bufptr + HFSP_WRAPOFF_EMBEDEXT);
 	wd->embed_start = (extent >> 16) & 0xFFFF;
@@ -68,7 +104,8 @@
 	if (HFSPLUS_SB(sb)->session >= 0) {
 		te.cdte_track = HFSPLUS_SB(sb)->session;
 		te.cdte_format = CDROM_LBA;
-		res = ioctl_by_bdev(sb->s_bdev, CDROMREADTOCENTRY, (unsigned long)&te);
+		res = ioctl_by_bdev(sb->s_bdev,
+			CDROMREADTOCENTRY, (unsigned long)&te);
 		if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) {
 			*start = (sector_t)te.cdte_addr.lba << 2;
 			return 0;
@@ -77,7 +114,8 @@
 		return -EINVAL;
 	}
 	ms_info.addr_format = CDROM_LBA;
-	res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION, (unsigned long)&ms_info);
+	res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION,
+		(unsigned long)&ms_info);
 	if (!res && ms_info.xa_flag)
 		*start = (sector_t)ms_info.addr.lba << 2;
 	return 0;
@@ -88,100 +126,112 @@
 int hfsplus_read_wrapper(struct super_block *sb)
 {
 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
-	struct buffer_head *bh;
-	struct hfsplus_vh *vhdr;
 	struct hfsplus_wd wd;
 	sector_t part_start, part_size;
 	u32 blocksize;
+	int error = 0;
 
+	error = -EINVAL;
 	blocksize = sb_min_blocksize(sb, HFSPLUS_SECTOR_SIZE);
 	if (!blocksize)
-		return -EINVAL;
+		goto out;
 
 	if (hfsplus_get_last_session(sb, &part_start, &part_size))
-		return -EINVAL;
+		goto out;
 	if ((u64)part_start + part_size > 0x100000000ULL) {
 		pr_err("hfs: volumes larger than 2TB are not supported yet\n");
-		return -EINVAL;
+		goto out;
 	}
-	while (1) {
-		bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
-		if (!bh)
-			return -EIO;
 
-		if (vhdr->signature == cpu_to_be16(HFSP_WRAP_MAGIC)) {
-			if (!hfsplus_read_mdb(vhdr, &wd))
-				goto error;
-			wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
-			part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
-			part_size = wd.embed_count * wd.ablk_size;
-			brelse(bh);
-			bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
-			if (!bh)
-				return -EIO;
-		}
-		if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
-			break;
-		if (vhdr->signature == cpu_to_be16(HFSPLUS_VOLHEAD_SIGX)) {
-			set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
-			break;
-		}
-		brelse(bh);
+	error = -ENOMEM;
+	sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
+	if (!sbi->s_vhdr)
+		goto out;
+	sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
+	if (!sbi->s_backup_vhdr)
+		goto out_free_vhdr;
 
-		/* check for a partition block
+reread:
+	error = hfsplus_submit_bio(sb->s_bdev,
+				   part_start + HFSPLUS_VOLHEAD_SECTOR,
+				   sbi->s_vhdr, READ);
+	if (error)
+		goto out_free_backup_vhdr;
+
+	error = -EINVAL;
+	switch (sbi->s_vhdr->signature) {
+	case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX):
+		set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
+		/*FALLTHRU*/
+	case cpu_to_be16(HFSPLUS_VOLHEAD_SIG):
+		break;
+	case cpu_to_be16(HFSP_WRAP_MAGIC):
+		if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
+			goto out;
+		wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
+		part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
+		part_size = wd.embed_count * wd.ablk_size;
+		goto reread;
+	default:
+		/*
+		 * Check for a partition block.
+		 *
 		 * (should do this only for cdrom/loop though)
 		 */
 		if (hfs_part_find(sb, &part_start, &part_size))
-			return -EINVAL;
+			goto out;
+		goto reread;
 	}
 
-	blocksize = be32_to_cpu(vhdr->blocksize);
-	brelse(bh);
+	error = hfsplus_submit_bio(sb->s_bdev,
+				   part_start + part_size - 2,
+				   sbi->s_backup_vhdr, READ);
+	if (error)
+		goto out_free_backup_vhdr;
 
-	/* block size must be at least as large as a sector
-	 * and a multiple of 2
+	error = -EINVAL;
+	if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) {
+		printk(KERN_WARNING
+			"hfs: invalid secondary volume header\n");
+		goto out_free_backup_vhdr;
+	}
+
+	blocksize = be32_to_cpu(sbi->s_vhdr->blocksize);
+
+	/*
+	 * Block size must be at least as large as a sector and a multiple of 2.
 	 */
-	if (blocksize < HFSPLUS_SECTOR_SIZE ||
-	    ((blocksize - 1) & blocksize))
-		return -EINVAL;
+	if (blocksize < HFSPLUS_SECTOR_SIZE || ((blocksize - 1) & blocksize))
+		goto out_free_backup_vhdr;
 	sbi->alloc_blksz = blocksize;
 	sbi->alloc_blksz_shift = 0;
 	while ((blocksize >>= 1) != 0)
 		sbi->alloc_blksz_shift++;
 	blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE);
 
-	/* align block size to block offset */
+	/*
+	 * Align block size to block offset.
+	 */
 	while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
 		blocksize >>= 1;
 
 	if (sb_set_blocksize(sb, blocksize) != blocksize) {
-		printk(KERN_ERR "hfs: unable to set blocksize to %u!\n", blocksize);
-		return -EINVAL;
+		printk(KERN_ERR "hfs: unable to set blocksize to %u!\n",
+			blocksize);
+		goto out_free_backup_vhdr;
 	}
 
 	sbi->blockoffset =
 		part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
+	sbi->part_start = part_start;
 	sbi->sect_count = part_size;
 	sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
-
-	bh = sb_bread512(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, vhdr);
-	if (!bh)
-		return -EIO;
-
-	/* should still be the same... */
-	if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
-		if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
-			goto error;
-	} else {
-		if (vhdr->signature != cpu_to_be16(HFSPLUS_VOLHEAD_SIG))
-			goto error;
-	}
-
-	sbi->s_vhbh = bh;
-	sbi->s_vhdr = vhdr;
-
 	return 0;
- error:
-	brelse(bh);
-	return -EINVAL;
+
+out_free_backup_vhdr:
+	kfree(sbi->s_backup_vhdr);
+out_free_vhdr:
+	kfree(sbi->s_vhdr);
+out:
+	return error;
 }
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2c0f148..2638c834e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -32,7 +32,7 @@
 
 #define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
 
-static int hostfs_d_delete(struct dentry *dentry)
+static int hostfs_d_delete(const struct dentry *dentry)
 {
 	return 1;
 }
@@ -92,12 +92,10 @@
 
 static char *__dentry_name(struct dentry *dentry, char *name)
 {
-	char *p = __dentry_path(dentry, name, PATH_MAX);
+	char *p = dentry_path_raw(dentry, name, PATH_MAX);
 	char *root;
 	size_t len;
 
-	spin_unlock(&dcache_lock);
-
 	root = dentry->d_sb->s_fs_info;
 	len = strlen(root);
 	if (IS_ERR(p)) {
@@ -123,25 +121,23 @@
 	if (!name)
 		return NULL;
 
-	spin_lock(&dcache_lock);
 	return __dentry_name(dentry, name); /* will unlock */
 }
 
 static char *inode_name(struct inode *ino)
 {
 	struct dentry *dentry;
-	char *name = __getname();
-	if (!name)
+	char *name;
+
+	dentry = d_find_alias(ino);
+	if (!dentry)
 		return NULL;
 
-	spin_lock(&dcache_lock);
-	if (list_empty(&ino->i_dentry)) {
-		spin_unlock(&dcache_lock);
-		__putname(name);
-		return NULL;
-	}
-	dentry = list_first_entry(&ino->i_dentry, struct dentry, d_alias);
-	return __dentry_name(dentry, name); /* will unlock */
+	name = dentry_name(dentry);
+
+	dput(dentry);
+
+	return name;
 }
 
 static char *follow_link(char *link)
@@ -251,9 +247,16 @@
 	}
 }
 
+static void hostfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kfree(HOSTFS_I(inode));
+}
+
 static void hostfs_destroy_inode(struct inode *inode)
 {
-	kfree(HOSTFS_I(inode));
+	call_rcu(&inode->i_rcu, hostfs_i_callback);
 }
 
 static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
@@ -609,7 +612,6 @@
 		goto out_put;
 
 	d_add(dentry, inode);
-	dentry->d_op = &hostfs_dentry_ops;
 	return NULL;
 
  out_put:
@@ -746,11 +748,14 @@
 	return err;
 }
 
-int hostfs_permission(struct inode *ino, int desired)
+int hostfs_permission(struct inode *ino, int desired, unsigned int flags)
 {
 	char *name;
 	int r = 0, w = 0, x = 0, err;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	if (desired & MAY_READ) r = 1;
 	if (desired & MAY_WRITE) w = 1;
 	if (desired & MAY_EXEC) x = 1;
@@ -765,7 +770,7 @@
 		err = access_file(name, r, w, x);
 	__putname(name);
 	if (!err)
-		err = generic_permission(ino, desired, NULL);
+		err = generic_permission(ino, desired, flags, NULL);
 	return err;
 }
 
@@ -916,6 +921,7 @@
 	sb->s_blocksize_bits = 10;
 	sb->s_magic = HOSTFS_SUPER_MAGIC;
 	sb->s_op = &hostfs_sbops;
+	sb->s_d_op = &hostfs_dentry_ops;
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 
 	/* NULL is printed as <NULL> by sprintf: avoid that. */
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 67d9d36..05d4816 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -12,7 +12,8 @@
  * Note: the dentry argument is the parent dentry.
  */
 
-static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
+static int hpfs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
 	unsigned long	 hash;
 	int		 i;
@@ -34,29 +35,30 @@
 	return 0;
 }
 
-static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+static int hpfs_compare_dentry(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	unsigned al=a->len;
-	unsigned bl=b->len;
-	hpfs_adjust_length(a->name, &al);
+	unsigned al = len;
+	unsigned bl = name->len;
+
+	hpfs_adjust_length(str, &al);
 	/*hpfs_adjust_length(b->name, &bl);*/
-	/* 'a' is the qstr of an already existing dentry, so the name
-	 * must be valid. 'b' must be validated first.
+
+	/*
+	 * 'str' is the nane of an already existing dentry, so the name
+	 * must be valid. 'name' must be validated first.
 	 */
 
-	if (hpfs_chk_name(b->name, &bl))
+	if (hpfs_chk_name(name->name, &bl))
 		return 1;
-	if (hpfs_compare_names(dentry->d_sb, a->name, al, b->name, bl, 0))
+	if (hpfs_compare_names(parent->d_sb, str, al, name->name, bl, 0))
 		return 1;
 	return 0;
 }
 
-static const struct dentry_operations hpfs_dentry_operations = {
+const struct dentry_operations hpfs_dentry_operations = {
 	.d_hash		= hpfs_hash_dentry,
 	.d_compare	= hpfs_compare_dentry,
 };
-
-void hpfs_set_dentry_operations(struct dentry *dentry)
-{
-	dentry->d_op = &hpfs_dentry_operations;
-}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2338130..d32f63a 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -298,7 +298,6 @@
 
 	end:
 	end_add:
-	hpfs_set_dentry_operations(dentry);
 	unlock_kernel();
 	d_add(dentry, result);
 	return NULL;
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 2fee17d..1c43dbe 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -233,7 +233,7 @@
 
 /* dentry.c */
 
-void hpfs_set_dentry_operations(struct dentry *);
+extern const struct dentry_operations hpfs_dentry_operations;
 
 /* dir.c */
 
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 11c2b40..f4ad9e3 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -419,7 +419,7 @@
 			unlock_kernel();
 			return -ENOSPC;
 		}
-		if (generic_permission(inode, MAY_WRITE, NULL) ||
+		if (generic_permission(inode, MAY_WRITE, 0, NULL) ||
 		    !S_ISREG(inode->i_mode) ||
 		    get_write_access(inode)) {
 			d_rehash(dentry);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 6c5f015..b30426b 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -177,9 +177,16 @@
 	return &ei->vfs_inode;
 }
 
+static void hpfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
+}
+
 static void hpfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
+	call_rcu(&inode->i_rcu, hpfs_i_callback);
 }
 
 static void init_once(void *foo)
@@ -543,6 +550,7 @@
 	/* Fill superblock stuff */
 	s->s_magic = HPFS_SUPER_MAGIC;
 	s->s_op = &hpfs_sops;
+	s->s_d_op = &hpfs_dentry_operations;
 
 	sbi->sb_root = superblock->root;
 	sbi->sb_fs_size = superblock->n_sectors;
@@ -644,7 +652,6 @@
 		iput(root);
 		goto bail0;
 	}
-	hpfs_set_dentry_operations(s->s_root);
 
 	/*
 	 * find the root directory's . pointer & finish filling in the inode
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index f702b5f..87ed48e 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -632,9 +632,16 @@
 	mntput(ino->i_sb->s_fs_info);
 }
 
+static void hppfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kfree(HPPFS_I(inode));
+}
+
 static void hppfs_destroy_inode(struct inode *inode)
 {
-	kfree(HPPFS_I(inode));
+	call_rcu(&inode->i_rcu, hppfs_i_callback);
 }
 
 static const struct super_operations hppfs_sbops = {
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a5fe681..9885082 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -663,11 +663,18 @@
 	return &p->vfs_inode;
 }
 
+static void hugetlbfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
+}
+
 static void hugetlbfs_destroy_inode(struct inode *inode)
 {
 	hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
 	mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
-	kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
+	call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
 }
 
 static const struct address_space_operations hugetlbfs_aops = {
diff --git a/fs/inode.c b/fs/inode.c
index ae2727a..da85e56 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -102,26 +102,29 @@
  */
 struct inodes_stat_t inodes_stat;
 
-static struct percpu_counter nr_inodes __cacheline_aligned_in_smp;
-static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(unsigned int, nr_inodes);
 
 static struct kmem_cache *inode_cachep __read_mostly;
 
-static inline int get_nr_inodes(void)
+static int get_nr_inodes(void)
 {
-	return percpu_counter_sum_positive(&nr_inodes);
+	int i;
+	int sum = 0;
+	for_each_possible_cpu(i)
+		sum += per_cpu(nr_inodes, i);
+	return sum < 0 ? 0 : sum;
 }
 
 static inline int get_nr_inodes_unused(void)
 {
-	return percpu_counter_sum_positive(&nr_inodes_unused);
+	return inodes_stat.nr_unused;
 }
 
 int get_nr_dirty_inodes(void)
 {
+	/* not actually dirty inodes, but a wild approximation */
 	int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
 	return nr_dirty > 0 ? nr_dirty : 0;
-
 }
 
 /*
@@ -132,7 +135,6 @@
 		   void __user *buffer, size_t *lenp, loff_t *ppos)
 {
 	inodes_stat.nr_inodes = get_nr_inodes();
-	inodes_stat.nr_unused = get_nr_inodes_unused();
 	return proc_dointvec(table, write, buffer, lenp, ppos);
 }
 #endif
@@ -224,7 +226,7 @@
 	inode->i_fsnotify_mask = 0;
 #endif
 
-	percpu_counter_inc(&nr_inodes);
+	this_cpu_inc(nr_inodes);
 
 	return 0;
 out:
@@ -255,6 +257,12 @@
 	return inode;
 }
 
+void free_inode_nonrcu(struct inode *inode)
+{
+	kmem_cache_free(inode_cachep, inode);
+}
+EXPORT_SYMBOL(free_inode_nonrcu);
+
 void __destroy_inode(struct inode *inode)
 {
 	BUG_ON(inode_has_buffers(inode));
@@ -266,10 +274,17 @@
 	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
 		posix_acl_release(inode->i_default_acl);
 #endif
-	percpu_counter_dec(&nr_inodes);
+	this_cpu_dec(nr_inodes);
 }
 EXPORT_SYMBOL(__destroy_inode);
 
+static void i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(inode_cachep, inode);
+}
+
 static void destroy_inode(struct inode *inode)
 {
 	BUG_ON(!list_empty(&inode->i_lru));
@@ -277,7 +292,7 @@
 	if (inode->i_sb->s_op->destroy_inode)
 		inode->i_sb->s_op->destroy_inode(inode);
 	else
-		kmem_cache_free(inode_cachep, (inode));
+		call_rcu(&inode->i_rcu, i_callback);
 }
 
 /*
@@ -335,7 +350,7 @@
 {
 	if (list_empty(&inode->i_lru)) {
 		list_add(&inode->i_lru, &inode_lru);
-		percpu_counter_inc(&nr_inodes_unused);
+		inodes_stat.nr_unused++;
 	}
 }
 
@@ -343,7 +358,7 @@
 {
 	if (!list_empty(&inode->i_lru)) {
 		list_del_init(&inode->i_lru);
-		percpu_counter_dec(&nr_inodes_unused);
+		inodes_stat.nr_unused--;
 	}
 }
 
@@ -430,6 +445,7 @@
 	BUG_ON(!(inode->i_state & I_FREEING));
 	BUG_ON(inode->i_state & I_CLEAR);
 	inode_sync_wait(inode);
+	/* don't need i_lock here, no concurrent mods to i_state */
 	inode->i_state = I_FREEING | I_CLEAR;
 }
 EXPORT_SYMBOL(end_writeback);
@@ -513,7 +529,7 @@
 		list_move(&inode->i_lru, &dispose);
 		list_del_init(&inode->i_wb_list);
 		if (!(inode->i_state & (I_DIRTY | I_SYNC)))
-			percpu_counter_dec(&nr_inodes_unused);
+			inodes_stat.nr_unused--;
 	}
 	spin_unlock(&inode_lock);
 
@@ -554,7 +570,7 @@
 		list_move(&inode->i_lru, &dispose);
 		list_del_init(&inode->i_wb_list);
 		if (!(inode->i_state & (I_DIRTY | I_SYNC)))
-			percpu_counter_dec(&nr_inodes_unused);
+			inodes_stat.nr_unused--;
 	}
 	spin_unlock(&inode_lock);
 
@@ -616,7 +632,7 @@
 		if (atomic_read(&inode->i_count) ||
 		    (inode->i_state & ~I_REFERENCED)) {
 			list_del_init(&inode->i_lru);
-			percpu_counter_dec(&nr_inodes_unused);
+			inodes_stat.nr_unused--;
 			continue;
 		}
 
@@ -650,7 +666,7 @@
 		 */
 		list_move(&inode->i_lru, &freeable);
 		list_del_init(&inode->i_wb_list);
-		percpu_counter_dec(&nr_inodes_unused);
+		inodes_stat.nr_unused--;
 	}
 	if (current_is_kswapd())
 		__count_vm_events(KSWAPD_INODESTEAL, reap);
@@ -1648,8 +1664,6 @@
 					 SLAB_MEM_SPREAD),
 					 init_once);
 	register_shrinker(&icache_shrinker);
-	percpu_counter_init(&nr_inodes, 0);
-	percpu_counter_init(&nr_inodes_unused, 0);
 
 	/* Hash may have been set up in inode_init_early */
 	if (!hashdist)
diff --git a/fs/internal.h b/fs/internal.h
index e43b9a4..9687c2e 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -63,6 +63,7 @@
 
 extern void free_vfsmnt(struct vfsmount *);
 extern struct vfsmount *alloc_vfsmnt(const char *);
+extern unsigned int mnt_get_count(struct vfsmount *mnt);
 extern struct vfsmount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
 extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
 				struct vfsmount *);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index bfdeb82..a0f3833 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -26,16 +26,32 @@
 
 #define BEQUIET
 
-static int isofs_hashi(struct dentry *parent, struct qstr *qstr);
-static int isofs_hash(struct dentry *parent, struct qstr *qstr);
-static int isofs_dentry_cmpi(struct dentry *dentry, struct qstr *a, struct qstr *b);
-static int isofs_dentry_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b);
+static int isofs_hashi(const struct dentry *parent, const struct inode *inode,
+		struct qstr *qstr);
+static int isofs_hash(const struct dentry *parent, const struct inode *inode,
+		struct qstr *qstr);
+static int isofs_dentry_cmpi(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
+static int isofs_dentry_cmp(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
 
 #ifdef CONFIG_JOLIET
-static int isofs_hashi_ms(struct dentry *parent, struct qstr *qstr);
-static int isofs_hash_ms(struct dentry *parent, struct qstr *qstr);
-static int isofs_dentry_cmpi_ms(struct dentry *dentry, struct qstr *a, struct qstr *b);
-static int isofs_dentry_cmp_ms(struct dentry *dentry, struct qstr *a, struct qstr *b);
+static int isofs_hashi_ms(const struct dentry *parent, const struct inode *inode,
+		struct qstr *qstr);
+static int isofs_hash_ms(const struct dentry *parent, const struct inode *inode,
+		struct qstr *qstr);
+static int isofs_dentry_cmpi_ms(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
+static int isofs_dentry_cmp_ms(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name);
 #endif
 
 static void isofs_put_super(struct super_block *sb)
@@ -65,9 +81,16 @@
 	return &ei->vfs_inode;
 }
 
+static void isofs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
+}
+
 static void isofs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
+	call_rcu(&inode->i_rcu, isofs_i_callback);
 }
 
 static void init_once(void *foo)
@@ -160,7 +183,7 @@
  * Compute the hash for the isofs name corresponding to the dentry.
  */
 static int
-isofs_hash_common(struct dentry *dentry, struct qstr *qstr, int ms)
+isofs_hash_common(const struct dentry *dentry, struct qstr *qstr, int ms)
 {
 	const char *name;
 	int len;
@@ -181,7 +204,7 @@
  * Compute the hash for the isofs name corresponding to the dentry.
  */
 static int
-isofs_hashi_common(struct dentry *dentry, struct qstr *qstr, int ms)
+isofs_hashi_common(const struct dentry *dentry, struct qstr *qstr, int ms)
 {
 	const char *name;
 	int len;
@@ -206,100 +229,94 @@
 }
 
 /*
- * Case insensitive compare of two isofs names.
+ * Compare of two isofs names.
  */
-static int isofs_dentry_cmpi_common(struct dentry *dentry, struct qstr *a,
-				struct qstr *b, int ms)
+static int isofs_dentry_cmp_common(
+		unsigned int len, const char *str,
+		const struct qstr *name, int ms, int ci)
 {
 	int alen, blen;
 
 	/* A filename cannot end in '.' or we treat it like it has none */
-	alen = a->len;
-	blen = b->len;
+	alen = name->len;
+	blen = len;
 	if (ms) {
-		while (alen && a->name[alen-1] == '.')
+		while (alen && name->name[alen-1] == '.')
 			alen--;
-		while (blen && b->name[blen-1] == '.')
+		while (blen && str[blen-1] == '.')
 			blen--;
 	}
 	if (alen == blen) {
-		if (strnicmp(a->name, b->name, alen) == 0)
-			return 0;
-	}
-	return 1;
-}
-
-/*
- * Case sensitive compare of two isofs names.
- */
-static int isofs_dentry_cmp_common(struct dentry *dentry, struct qstr *a,
-					struct qstr *b, int ms)
-{
-	int alen, blen;
-
-	/* A filename cannot end in '.' or we treat it like it has none */
-	alen = a->len;
-	blen = b->len;
-	if (ms) {
-		while (alen && a->name[alen-1] == '.')
-			alen--;
-		while (blen && b->name[blen-1] == '.')
-			blen--;
-	}
-	if (alen == blen) {
-		if (strncmp(a->name, b->name, alen) == 0)
-			return 0;
+		if (ci) {
+			if (strnicmp(name->name, str, alen) == 0)
+				return 0;
+		} else {
+			if (strncmp(name->name, str, alen) == 0)
+				return 0;
+		}
 	}
 	return 1;
 }
 
 static int
-isofs_hash(struct dentry *dentry, struct qstr *qstr)
+isofs_hash(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
 	return isofs_hash_common(dentry, qstr, 0);
 }
 
 static int
-isofs_hashi(struct dentry *dentry, struct qstr *qstr)
+isofs_hashi(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
 	return isofs_hashi_common(dentry, qstr, 0);
 }
 
 static int
-isofs_dentry_cmp(struct dentry *dentry,struct qstr *a,struct qstr *b)
+isofs_dentry_cmp(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	return isofs_dentry_cmp_common(dentry, a, b, 0);
+	return isofs_dentry_cmp_common(len, str, name, 0, 0);
 }
 
 static int
-isofs_dentry_cmpi(struct dentry *dentry,struct qstr *a,struct qstr *b)
+isofs_dentry_cmpi(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	return isofs_dentry_cmpi_common(dentry, a, b, 0);
+	return isofs_dentry_cmp_common(len, str, name, 0, 1);
 }
 
 #ifdef CONFIG_JOLIET
 static int
-isofs_hash_ms(struct dentry *dentry, struct qstr *qstr)
+isofs_hash_ms(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
 	return isofs_hash_common(dentry, qstr, 1);
 }
 
 static int
-isofs_hashi_ms(struct dentry *dentry, struct qstr *qstr)
+isofs_hashi_ms(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
 	return isofs_hashi_common(dentry, qstr, 1);
 }
 
 static int
-isofs_dentry_cmp_ms(struct dentry *dentry,struct qstr *a,struct qstr *b)
+isofs_dentry_cmp_ms(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	return isofs_dentry_cmp_common(dentry, a, b, 1);
+	return isofs_dentry_cmp_common(len, str, name, 1, 0);
 }
 
 static int
-isofs_dentry_cmpi_ms(struct dentry *dentry,struct qstr *a,struct qstr *b)
+isofs_dentry_cmpi_ms(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	return isofs_dentry_cmpi_common(dentry, a, b, 1);
+	return isofs_dentry_cmp_common(len, str, name, 1, 1);
 }
 #endif
 
@@ -922,17 +939,18 @@
 		goto out_iput;
 	}
 
-	/* get the root dentry */
-	s->s_root = d_alloc_root(inode);
-	if (!(s->s_root))
-		goto out_no_root;
-
 	table = 0;
 	if (joliet_level)
 		table += 2;
 	if (opt.check == 'r')
 		table++;
-	s->s_root->d_op = &isofs_dentry_ops[table];
+
+	s->s_d_op = &isofs_dentry_ops[table];
+
+	/* get the root dentry */
+	s->s_root = d_alloc_root(inode);
+	if (!(s->s_root))
+		goto out_no_root;
 
 	kfree(opt.iocharset);
 
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 0d23abf..4fb3e80 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -37,7 +37,8 @@
 
 	qstr.name = compare;
 	qstr.len = dlen;
-	return dentry->d_op->d_compare(dentry, &dentry->d_name, &qstr);
+	return dentry->d_op->d_compare(NULL, NULL, NULL, NULL,
+			dentry->d_name.len, dentry->d_name.name, &qstr);
 }
 
 /*
@@ -171,8 +172,6 @@
 	struct inode *inode;
 	struct page *page;
 
-	dentry->d_op = dir->i_sb->s_root->d_op;
-
 	page = alloc_page(GFP_USER);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 846a3f3..5b2e4c3 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -207,7 +207,7 @@
 	 * the committing transaction.  Really, we only need to give it
 	 * committing_transaction->t_outstanding_credits plus "enough" for
 	 * the log control blocks.
-	 * Also, this test is inconsitent with the matching one in
+	 * Also, this test is inconsistent with the matching one in
 	 * journal_extend().
 	 */
 	if (__log_space_left(journal) < jbd_space_needed(journal)) {
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f837ba9..9e46869 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -43,6 +43,7 @@
 #include <linux/vmalloc.h>
 #include <linux/backing-dev.h>
 #include <linux/bitops.h>
+#include <linux/ratelimit.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/jbd2.h>
@@ -93,6 +94,7 @@
 EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
+EXPORT_SYMBOL(jbd2_inode_cache);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
@@ -827,7 +829,7 @@
 
 	journal = kzalloc(sizeof(*journal), GFP_KERNEL);
 	if (!journal)
-		goto fail;
+		return NULL;
 
 	init_waitqueue_head(&journal->j_wait_transaction_locked);
 	init_waitqueue_head(&journal->j_wait_logspace);
@@ -852,14 +854,12 @@
 	err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
 	if (err) {
 		kfree(journal);
-		goto fail;
+		return NULL;
 	}
 
 	spin_lock_init(&journal->j_history_lock);
 
 	return journal;
-fail:
-	return NULL;
 }
 
 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
@@ -1982,7 +1982,6 @@
 static struct journal_head *journal_alloc_journal_head(void)
 {
 	struct journal_head *ret;
-	static unsigned long last_warning;
 
 #ifdef CONFIG_JBD2_DEBUG
 	atomic_inc(&nr_journal_heads);
@@ -1990,11 +1989,7 @@
 	ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
 	if (!ret) {
 		jbd_debug(1, "out of memory for journal_head\n");
-		if (time_after(jiffies, last_warning + 5*HZ)) {
-			printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-			       __func__);
-			last_warning = jiffies;
-		}
+		pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
 		while (!ret) {
 			yield();
 			ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -2292,17 +2287,19 @@
 
 #endif
 
-struct kmem_cache *jbd2_handle_cache;
+struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
 
 static int __init journal_init_handle_cache(void)
 {
-	jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle",
-				sizeof(handle_t),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
+	jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
 	if (jbd2_handle_cache == NULL) {
-		printk(KERN_EMERG "JBD: failed to create handle cache\n");
+		printk(KERN_EMERG "JBD2: failed to create handle cache\n");
+		return -ENOMEM;
+	}
+	jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
+	if (jbd2_inode_cache == NULL) {
+		printk(KERN_EMERG "JBD2: failed to create inode cache\n");
+		kmem_cache_destroy(jbd2_handle_cache);
 		return -ENOMEM;
 	}
 	return 0;
@@ -2312,6 +2309,9 @@
 {
 	if (jbd2_handle_cache)
 		kmem_cache_destroy(jbd2_handle_cache);
+	if (jbd2_inode_cache)
+		kmem_cache_destroy(jbd2_inode_cache);
+
 }
 
 /*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 2bc4d5f..1cad869 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -299,10 +299,10 @@
 #ifdef CONFIG_JBD2_DEBUG
 		int dropped = info.end_transaction - 
 			be32_to_cpu(journal->j_superblock->s_sequence);
-#endif
 		jbd_debug(1,
 			  "JBD: ignoring %d transaction%s from the journal.\n",
 			  dropped, (dropped == 1) ? "" : "s");
+#endif
 		journal->j_transaction_sequence = ++info.end_transaction;
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6bf0a24..faad2bd 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -251,7 +251,7 @@
 	 * the committing transaction.  Really, we only need to give it
 	 * committing_transaction->t_outstanding_credits plus "enough" for
 	 * the log control blocks.
-	 * Also, this test is inconsitent with the matching one in
+	 * Also, this test is inconsistent with the matching one in
 	 * jbd2_journal_extend().
 	 */
 	if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
@@ -340,9 +340,7 @@
 		jbd2_free_handle(handle);
 		current->journal_info = NULL;
 		handle = ERR_PTR(err);
-		goto out;
 	}
-out:
 	return handle;
 }
 EXPORT_SYMBOL(jbd2__journal_start);
@@ -589,7 +587,7 @@
 	transaction = handle->h_transaction;
 	journal = transaction->t_journal;
 
-	jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
 
 	JBUFFER_TRACE(jh, "entry");
 repeat:
@@ -774,7 +772,7 @@
 		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
 			    "Possible IO failure.\n");
 		page = jh2bh(jh)->b_page;
-		offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+		offset = offset_in_page(jh2bh(jh)->b_data);
 		source = kmap_atomic(page, KM_USER0);
 		/* Fire data frozen trigger just before we copy the data */
 		jbd2_buffer_frozen_trigger(jh, source + offset,
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 54a92fd..95b7967 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -259,11 +259,14 @@
 	return rc;
 }
 
-int jffs2_check_acl(struct inode *inode, int mask)
+int jffs2_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
 	struct posix_acl *acl;
 	int rc;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 5e42de8..3119f59 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -26,7 +26,7 @@
 
 #ifdef CONFIG_JFFS2_FS_POSIX_ACL
 
-extern int jffs2_check_acl(struct inode *, int);
+extern int jffs2_check_acl(struct inode *, int, unsigned int);
 extern int jffs2_acl_chmod(struct inode *);
 extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
 extern int jffs2_init_acl_post(struct inode *);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index c86041b..853b8e3 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -40,9 +40,16 @@
 	return &f->vfs_inode;
 }
 
+static void jffs2_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+}
+
 static void jffs2_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+	call_rcu(&inode->i_rcu, jffs2_i_callback);
 }
 
 static void jffs2_i_init_once(void *foo)
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 1057a49..e5de942 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -114,10 +114,14 @@
 	return rc;
 }
 
-int jfs_check_acl(struct inode *inode, int mask)
+int jfs_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
+	struct posix_acl *acl;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
+	acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
 	if (acl) {
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
index 54e0755..f9285c4 100644
--- a/fs/jfs/jfs_acl.h
+++ b/fs/jfs/jfs_acl.h
@@ -20,7 +20,7 @@
 
 #ifdef CONFIG_JFS_POSIX_ACL
 
-int jfs_check_acl(struct inode *, int);
+int jfs_check_acl(struct inode *, int, unsigned int flags);
 int jfs_init_acl(tid_t, struct inode *, struct inode *);
 int jfs_acl_chmod(struct inode *inode);
 
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index e1b8493..278e3fb 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1120,16 +1120,13 @@
 	 * file systems to log may have n-to-1 relationship;
 	 */
 
-	bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE);
+	bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				 log);
 	if (IS_ERR(bdev)) {
 		rc = -PTR_ERR(bdev);
 		goto free;
 	}
 
-	if ((rc = bd_claim(bdev, log))) {
-		goto close;
-	}
-
 	log->bdev = bdev;
 	memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid));
 
@@ -1137,7 +1134,7 @@
 	 * initialize log:
 	 */
 	if ((rc = lmLogInit(log)))
-		goto unclaim;
+		goto close;
 
 	list_add(&log->journal_list, &jfs_external_logs);
 
@@ -1163,11 +1160,8 @@
 	list_del(&log->journal_list);
 	lbmLogShutdown(log);
 
-      unclaim:
-	bd_release(bdev);
-
       close:		/* close external log device */
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 
       free:		/* free log descriptor */
 	mutex_unlock(&jfs_log_mutex);
@@ -1512,8 +1506,7 @@
 	bdev = log->bdev;
 	rc = lmLogShutdown(log);
 
-	bd_release(bdev);
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 
 	kfree(log);
 
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 231ca4a..81ead85 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/fs.h>
+#include <linux/namei.h>
 #include <linux/ctype.h>
 #include <linux/quotaops.h>
 #include <linux/exportfs.h>
@@ -1464,9 +1465,6 @@
 
 	jfs_info("jfs_lookup: name = %s", name);
 
-	if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
-		dentry->d_op = &jfs_ci_dentry_operations;
-
 	if ((name[0] == '.') && (len == 1))
 		inum = dip->i_ino;
 	else if (strcmp(name, "..") == 0)
@@ -1491,12 +1489,7 @@
 		return ERR_CAST(ip);
 	}
 
-	dentry = d_splice_alias(ip, dentry);
-
-	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
-		dentry->d_op = &jfs_ci_dentry_operations;
-
-	return dentry;
+	return d_splice_alias(ip, dentry);
 }
 
 static struct inode *jfs_nfs_get_inode(struct super_block *sb,
@@ -1573,7 +1566,8 @@
 	.llseek		= generic_file_llseek,
 };
 
-static int jfs_ci_hash(struct dentry *dir, struct qstr *this)
+static int jfs_ci_hash(const struct dentry *dir, const struct inode *inode,
+		struct qstr *this)
 {
 	unsigned long hash;
 	int i;
@@ -1586,32 +1580,63 @@
 	return 0;
 }
 
-static int jfs_ci_compare(struct dentry *dir, struct qstr *a, struct qstr *b)
+static int jfs_ci_compare(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
 	int i, result = 1;
 
-	if (a->len != b->len)
+	if (len != name->len)
 		goto out;
-	for (i=0; i < a->len; i++) {
-		if (tolower(a->name[i]) != tolower(b->name[i]))
+	for (i=0; i < len; i++) {
+		if (tolower(str[i]) != tolower(name->name[i]))
 			goto out;
 	}
 	result = 0;
-
-	/*
-	 * We want creates to preserve case.  A negative dentry, a, that
-	 * has a different case than b may cause a new entry to be created
-	 * with the wrong case.  Since we can't tell if a comes from a negative
-	 * dentry, we blindly replace it with b.  This should be harmless if
-	 * a is not a negative dentry.
-	 */
-	memcpy((unsigned char *)a->name, b->name, a->len);
 out:
 	return result;
 }
 
+static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+	/*
+	 * This is not negative dentry. Always valid.
+	 *
+	 * Note, rename() to existing directory entry will have ->d_inode,
+	 * and will use existing name which isn't specified name by user.
+	 *
+	 * We may be able to drop this positive dentry here. But dropping
+	 * positive dentry isn't good idea. So it's unsupported like
+	 * rename("filename", "FILENAME") for now.
+	 */
+	if (dentry->d_inode)
+		return 1;
+
+	/*
+	 * This may be nfsd (or something), anyway, we can't see the
+	 * intent of this. So, since this can be for creation, drop it.
+	 */
+	if (!nd)
+		return 0;
+
+	/*
+	 * Drop the negative dentry, in order to make sure to use the
+	 * case sensitive name which is specified by user if this is
+	 * for creation.
+	 */
+	if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) {
+		if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+			return 0;
+	}
+	return 1;
+}
+
 const struct dentry_operations jfs_ci_dentry_operations =
 {
 	.d_hash = jfs_ci_hash,
 	.d_compare = jfs_ci_compare,
+	.d_revalidate = jfs_ci_revalidate,
 };
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 0669fc1..eeca48a 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -115,6 +115,14 @@
 	return &jfs_inode->vfs_inode;
 }
 
+static void jfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	struct jfs_inode_info *ji = JFS_IP(inode);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(jfs_inode_cachep, ji);
+}
+
 static void jfs_destroy_inode(struct inode *inode)
 {
 	struct jfs_inode_info *ji = JFS_IP(inode);
@@ -128,7 +136,7 @@
 		ji->active_ag = -1;
 	}
 	spin_unlock_irq(&ji->ag_lock);
-	kmem_cache_free(jfs_inode_cachep, ji);
+	call_rcu(&inode->i_rcu, jfs_i_callback);
 }
 
 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -507,6 +515,9 @@
 
 	sb->s_magic = JFS_SUPER_MAGIC;
 
+	if (sbi->mntflag & JFS_OS2)
+		sb->s_d_op = &jfs_ci_dentry_operations;
+
 	inode = jfs_iget(sb, ROOT_I);
 	if (IS_ERR(inode)) {
 		ret = PTR_ERR(inode);
@@ -516,9 +527,6 @@
 	if (!sb->s_root)
 		goto out_no_root;
 
-	if (sbi->mntflag & JFS_OS2)
-		sb->s_root->d_op = &jfs_ci_dentry_operations;
-
 	/* logical blocks are represented by 40 bits in pxd_t, etc. */
 	sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
 #if BITS_PER_LONG == 32
diff --git a/fs/libfs.c b/fs/libfs.c
index a3accdf..c88eab5 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -16,6 +16,11 @@
 
 #include <asm/uaccess.h>
 
+static inline int simple_positive(struct dentry *dentry)
+{
+	return dentry->d_inode && !d_unhashed(dentry);
+}
+
 int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		   struct kstat *stat)
 {
@@ -37,7 +42,7 @@
  * Retaining negative dentries for an in-memory filesystem just wastes
  * memory and lookup time: arrange for them to be deleted immediately.
  */
-static int simple_delete_dentry(struct dentry *dentry)
+static int simple_delete_dentry(const struct dentry *dentry)
 {
 	return 1;
 }
@@ -54,7 +59,7 @@
 
 	if (dentry->d_name.len > NAME_MAX)
 		return ERR_PTR(-ENAMETOOLONG);
-	dentry->d_op = &simple_dentry_operations;
+	d_set_d_op(dentry, &simple_dentry_operations);
 	d_add(dentry, NULL);
 	return NULL;
 }
@@ -76,7 +81,8 @@
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
 {
-	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
+	struct dentry *dentry = file->f_path.dentry;
+	mutex_lock(&dentry->d_inode->i_mutex);
 	switch (origin) {
 		case 1:
 			offset += file->f_pos;
@@ -84,7 +90,7 @@
 			if (offset >= 0)
 				break;
 		default:
-			mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+			mutex_unlock(&dentry->d_inode->i_mutex);
 			return -EINVAL;
 	}
 	if (offset != file->f_pos) {
@@ -94,21 +100,24 @@
 			struct dentry *cursor = file->private_data;
 			loff_t n = file->f_pos - 2;
 
-			spin_lock(&dcache_lock);
+			spin_lock(&dentry->d_lock);
+			/* d_lock not required for cursor */
 			list_del(&cursor->d_u.d_child);
-			p = file->f_path.dentry->d_subdirs.next;
-			while (n && p != &file->f_path.dentry->d_subdirs) {
+			p = dentry->d_subdirs.next;
+			while (n && p != &dentry->d_subdirs) {
 				struct dentry *next;
 				next = list_entry(p, struct dentry, d_u.d_child);
-				if (!d_unhashed(next) && next->d_inode)
+				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+				if (simple_positive(next))
 					n--;
+				spin_unlock(&next->d_lock);
 				p = p->next;
 			}
 			list_add_tail(&cursor->d_u.d_child, p);
-			spin_unlock(&dcache_lock);
+			spin_unlock(&dentry->d_lock);
 		}
 	}
-	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
+	mutex_unlock(&dentry->d_inode->i_mutex);
 	return offset;
 }
 
@@ -148,29 +157,35 @@
 			i++;
 			/* fallthrough */
 		default:
-			spin_lock(&dcache_lock);
+			spin_lock(&dentry->d_lock);
 			if (filp->f_pos == 2)
 				list_move(q, &dentry->d_subdirs);
 
 			for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
 				struct dentry *next;
 				next = list_entry(p, struct dentry, d_u.d_child);
-				if (d_unhashed(next) || !next->d_inode)
+				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+				if (!simple_positive(next)) {
+					spin_unlock(&next->d_lock);
 					continue;
+				}
 
-				spin_unlock(&dcache_lock);
+				spin_unlock(&next->d_lock);
+				spin_unlock(&dentry->d_lock);
 				if (filldir(dirent, next->d_name.name, 
 					    next->d_name.len, filp->f_pos, 
 					    next->d_inode->i_ino, 
 					    dt_type(next->d_inode)) < 0)
 					return 0;
-				spin_lock(&dcache_lock);
+				spin_lock(&dentry->d_lock);
+				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
 				/* next is still alive */
 				list_move(q, p);
+				spin_unlock(&next->d_lock);
 				p = q;
 				filp->f_pos++;
 			}
-			spin_unlock(&dcache_lock);
+			spin_unlock(&dentry->d_lock);
 	}
 	return 0;
 }
@@ -202,7 +217,8 @@
  * will never be mountable)
  */
 struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name,
-	const struct super_operations *ops, unsigned long magic)
+	const struct super_operations *ops,
+	const struct dentry_operations *dops, unsigned long magic)
 {
 	struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
 	struct dentry *dentry;
@@ -239,6 +255,7 @@
 	dentry->d_parent = dentry;
 	d_instantiate(dentry, root);
 	s->s_root = dentry;
+	s->s_d_op = dops;
 	s->s_flags |= MS_ACTIVE;
 	return dget(s->s_root);
 
@@ -259,23 +276,23 @@
 	return 0;
 }
 
-static inline int simple_positive(struct dentry *dentry)
-{
-	return dentry->d_inode && !d_unhashed(dentry);
-}
-
 int simple_empty(struct dentry *dentry)
 {
 	struct dentry *child;
 	int ret = 0;
 
-	spin_lock(&dcache_lock);
-	list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child)
-		if (simple_positive(child))
+	spin_lock(&dentry->d_lock);
+	list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
+		spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+		if (simple_positive(child)) {
+			spin_unlock(&child->d_lock);
 			goto out;
+		}
+		spin_unlock(&child->d_lock);
+	}
 	ret = 1;
 out:
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
 	return ret;
 }
 
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index 97f6073..ca58d64 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_LOCKD) += lockd.o
 
-lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \
-	        svcproc.o svcsubs.o mon.o xdr.o grace.o
-lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o
+lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
+	        svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o
+lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
 lockd-objs		      := $(lockd-objs-y)
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
new file mode 100644
index 0000000..f848b52
--- /dev/null
+++ b/fs/lockd/clnt4xdr.c
@@ -0,0 +1,605 @@
+/*
+ * linux/fs/lockd/clnt4xdr.c
+ *
+ * XDR functions to encode/decode NLM version 4 RPC arguments and results.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle.  All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
+#  error "NLM host name cannot be larger than NLM's maximum string length!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM4_void_sz		(0)
+#define NLM4_cookie_sz		(1+(NLM_MAXCOOKIELEN>>2))
+#define NLM4_caller_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_owner_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_fhandle_sz		(1+(NFS3_FHSIZE>>2))
+#define NLM4_lock_sz		(5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz)
+#define NLM4_holder_sz		(6+NLM4_owner_sz)
+
+#define NLM4_testargs_sz	(NLM4_cookie_sz+1+NLM4_lock_sz)
+#define NLM4_lockargs_sz	(NLM4_cookie_sz+4+NLM4_lock_sz)
+#define NLM4_cancargs_sz	(NLM4_cookie_sz+2+NLM4_lock_sz)
+#define NLM4_unlockargs_sz	(NLM4_cookie_sz+NLM4_lock_sz)
+
+#define NLM4_testres_sz		(NLM4_cookie_sz+1+NLM4_holder_sz)
+#define NLM4_res_sz		(NLM4_cookie_sz+1)
+#define NLM4_norep_sz		(0)
+
+
+static s64 loff_t_to_s64(loff_t offset)
+{
+	s64 res;
+
+	if (offset >= NLM4_OFFSET_MAX)
+		res = NLM4_OFFSET_MAX;
+	else if (offset <= -NLM4_OFFSET_MAX)
+		res = -NLM4_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+static void nlm4_compute_offsets(const struct nlm_lock *lock,
+				 u64 *l_offset, u64 *l_len)
+{
+	const struct file_lock *fl = &lock->fl;
+
+	BUG_ON(fl->fl_start > NLM4_OFFSET_MAX);
+	BUG_ON(fl->fl_end > NLM4_OFFSET_MAX &&
+				fl->fl_end != OFFSET_MAX);
+
+	*l_offset = loff_t_to_s64(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		*l_len = 0;
+	else
+		*l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+	dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv4 basic data types
+ *
+ * Basic NLMv4 data types are defined in Appendix II, section 6.1.4
+ * of RFC 1813: "NFS Version 3 Protocol Specification" and in Chapter
+ * 10 of X/Open's "Protocols for Interworking: XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
+}
+
+/*
+ *	typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+			  const u8 *data, const unsigned int length)
+{
+	__be32 *p;
+
+	BUG_ON(length > XDR_MAX_NETOBJ);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+			 struct xdr_netobj *obj)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > XDR_MAX_NETOBJ))
+		goto out_size;
+	obj->len = length;
+	obj->data = (u8 *)p;
+	return 0;
+out_size:
+	dprintk("NFS: returned netobj was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+			  const struct nlm_cookie *cookie)
+{
+	BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+	encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+			     struct nlm_cookie *cookie)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	/* apparently HPUX can return empty cookies */
+	if (length == 0)
+		goto out_hpux;
+	if (length > NLM_MAXCOOKIELEN)
+		goto out_size;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	cookie->len = length;
+	memcpy(cookie->data, p, length);
+	return 0;
+out_hpux:
+	cookie->len = 4;
+	memset(cookie->data, 0, 4);
+	return 0;
+out_size:
+	dprintk("NFS: returned cookie was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	BUG_ON(fh->size > NFS3_FHSIZE);
+	encode_netobj(xdr, (u8 *)&fh->data, fh->size);
+}
+
+/*
+ *	enum nlm4_stats {
+ *		NLM4_GRANTED = 0,
+ *		NLM4_DENIED = 1,
+ *		NLM4_DENIED_NOLOCKS = 2,
+ *		NLM4_BLOCKED = 3,
+ *		NLM4_DENIED_GRACE_PERIOD = 4,
+ *		NLM4_DEADLCK = 5,
+ *		NLM4_ROFS = 6,
+ *		NLM4_STALE_FH = 7,
+ *		NLM4_FBIG = 8,
+ *		NLM4_FAILED = 9
+ *	};
+ *
+ *	struct nlm4_stat {
+ *		nlm4_stats stat;
+ *	};
+ *
+ * NB: we don't swap bytes for the NLM status values.  The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+static void encode_nlm4_stat(struct xdr_stream *xdr,
+			     const __be32 stat)
+{
+	__be32 *p;
+
+	BUG_ON(be32_to_cpu(stat) > NLM_FAILED);
+	p = xdr_reserve_space(xdr, 4);
+	*p = stat;
+}
+
+static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (unlikely(*p > nlm4_failed))
+		goto out_bad_xdr;
+	*stat = *p;
+	return 0;
+out_bad_xdr:
+	dprintk("%s: server returned invalid nlm4_stats value: %u\n",
+			__func__, be32_to_cpup(p));
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	struct nlm4_holder {
+ *		bool	exclusive;
+ *		int32	svid;
+ *		netobj	oh;
+ *		uint64	l_offset;
+ *		uint64	l_len;
+ *	};
+ */
+static void encode_nlm4_holder(struct xdr_stream *xdr,
+			       const struct nlm_res *result)
+{
+	const struct nlm_lock *lock = &result->lock;
+	u64 l_offset, l_len;
+	__be32 *p;
+
+	encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+	encode_int32(xdr, lock->svid);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	nlm4_compute_offsets(lock, &l_offset, &l_len);
+	p = xdr_encode_hyper(p, l_offset);
+	xdr_encode_hyper(p, l_len);
+}
+
+static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+	struct nlm_lock *lock = &result->lock;
+	struct file_lock *fl = &lock->fl;
+	u64 l_offset, l_len;
+	u32 exclusive;
+	int error;
+	__be32 *p;
+	s32 end;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(fl);
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	exclusive = be32_to_cpup(p++);
+	lock->svid = be32_to_cpup(p);
+	fl->fl_pid = (pid_t)lock->svid;
+
+	error = decode_netobj(xdr, &lock->oh);
+	if (unlikely(error))
+		goto out;
+
+	p = xdr_inline_decode(xdr, 8 + 8);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+	p = xdr_decode_hyper(p, &l_offset);
+	xdr_decode_hyper(p, &l_len);
+	end = l_offset + l_len - 1;
+
+	fl->fl_start = (loff_t)l_offset;
+	if (l_len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = (loff_t)end;
+	error = 0;
+out:
+	return error;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+	/* NB: client-side does not set lock->len */
+	u32 length = strlen(name);
+	__be32 *p;
+
+	BUG_ON(length > NLM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+/*
+ *	struct nlm4_lock {
+ *		string	caller_name<LM_MAXSTRLEN>;
+ *		netobj	fh;
+ *		netobj	oh;
+ *		int32	svid;
+ *		uint64	l_offset;
+ *		uint64	l_len;
+ *	};
+ */
+static void encode_nlm4_lock(struct xdr_stream *xdr,
+			     const struct nlm_lock *lock)
+{
+	u64 l_offset, l_len;
+	__be32 *p;
+
+	encode_caller_name(xdr, lock->caller);
+	encode_fh(xdr, &lock->fh);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 8 + 8);
+	*p++ = cpu_to_be32(lock->svid);
+
+	nlm4_compute_offsets(lock, &l_offset, &l_len);
+	p = xdr_encode_hyper(p, l_offset);
+	xdr_encode_hyper(p, l_len);
+}
+
+
+/*
+ * NLMv4 XDR encode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	struct nlm4_testargs {
+ *		netobj cookie;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_lockargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *		bool reclaim;
+ *		int state;
+ *	};
+ */
+static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+	encode_bool(xdr, args->reclaim);
+	encode_int32(xdr, args->state);
+}
+
+/*
+ *	struct nlm4_cancargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_unlockargs {
+ *		netobj cookie;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_res {
+ *		netobj cookie;
+ *		nlm4_stat stat;
+ *	};
+ */
+static void nlm4_xdr_enc_res(struct rpc_rqst *req,
+			     struct xdr_stream *xdr,
+			     const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm4_stat(xdr, result->status);
+}
+
+/*
+ *	union nlm4_testrply switch (nlm4_stats stat) {
+ *	case NLM4_DENIED:
+ *		struct nlm4_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm4_testres {
+ *		netobj cookie;
+ *		nlm4_testrply test_stat;
+ *	};
+ */
+static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm4_stat(xdr, result->status);
+	if (result->status == nlm_lck_denied)
+		encode_nlm4_holder(xdr, result);
+}
+
+
+/*
+ * NLMv4 XDR decode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	union nlm4_testrply switch (nlm4_stats stat) {
+ *	case NLM4_DENIED:
+ *		struct nlm4_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm4_testres {
+ *		netobj cookie;
+ *		nlm4_testrply test_stat;
+ *	};
+ */
+static int decode_nlm4_testrply(struct xdr_stream *xdr,
+				struct nlm_res *result)
+{
+	int error;
+
+	error = decode_nlm4_stat(xdr, &result->status);
+	if (unlikely(error))
+		goto out;
+	if (result->status == nlm_lck_denied)
+		error = decode_nlm4_holder(xdr, result);
+out:
+	return error;
+}
+
+static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm4_testrply(xdr, result);
+out:
+	return error;
+}
+
+/*
+ *	struct nlm4_res {
+ *		netobj cookie;
+ *		nlm4_stat stat;
+ *	};
+ */
+static int nlm4_xdr_dec_res(struct rpc_rqst *req,
+			    struct xdr_stream *xdr,
+			    struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm4_stat(xdr, &result->status);
+out:
+	return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm4_xdr_dec_norep	NULL
+
+#define PROC(proc, argtype, restype)					\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdreproc_t)nlm4_xdr_enc_##argtype,		\
+	.p_decode    = (kxdrdproc_t)nlm4_xdr_dec_##restype,		\
+	.p_arglen    = NLM4_##argtype##_sz,				\
+	.p_replen    = NLM4_##restype##_sz,				\
+	.p_statidx   = NLMPROC_##proc,					\
+	.p_name      = #proc,						\
+	}
+
+static struct rpc_procinfo	nlm4_procedures[] = {
+	PROC(TEST,		testargs,	testres),
+	PROC(LOCK,		lockargs,	res),
+	PROC(CANCEL,		cancargs,	res),
+	PROC(UNLOCK,		unlockargs,	res),
+	PROC(GRANTED,		testargs,	res),
+	PROC(TEST_MSG,		testargs,	norep),
+	PROC(LOCK_MSG,		lockargs,	norep),
+	PROC(CANCEL_MSG,	cancargs,	norep),
+	PROC(UNLOCK_MSG,	unlockargs,	norep),
+	PROC(GRANTED_MSG,	testargs,	norep),
+	PROC(TEST_RES,		testres,	norep),
+	PROC(LOCK_RES,		res,		norep),
+	PROC(CANCEL_RES,	res,		norep),
+	PROC(UNLOCK_RES,	res,		norep),
+	PROC(GRANTED_RES,	res,		norep),
+};
+
+struct rpc_version	nlm_version4 = {
+	.number		= 4,
+	.nrprocs	= ARRAY_SIZE(nlm4_procedures),
+	.procs		= nlm4_procedures,
+};
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 25509eb..8d4ea83 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -79,7 +79,7 @@
  */
 void nlmclnt_done(struct nlm_host *host)
 {
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	lockd_down();
 }
 EXPORT_SYMBOL_GPL(nlmclnt_done);
@@ -273,7 +273,7 @@
 	spin_unlock(&nlm_blocked_lock);
 
 	/* Release host handle after use */
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	lockd_down();
 	return 0;
 }
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 332c54c..adb45ec 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -58,7 +58,7 @@
 		return;
 	list_del(&lockowner->list);
 	spin_unlock(&lockowner->host->h_lock);
-	nlm_release_host(lockowner->host);
+	nlmclnt_release_host(lockowner->host);
 	kfree(lockowner);
 }
 
@@ -207,22 +207,22 @@
 		printk("nlm_alloc_call: failed, waiting for memory\n");
 		schedule_timeout_interruptible(5*HZ);
 	}
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	return NULL;
 }
 
-void nlm_release_call(struct nlm_rqst *call)
+void nlmclnt_release_call(struct nlm_rqst *call)
 {
 	if (!atomic_dec_and_test(&call->a_count))
 		return;
-	nlm_release_host(call->a_host);
+	nlmclnt_release_host(call->a_host);
 	nlmclnt_release_lockargs(call);
 	kfree(call);
 }
 
 static void nlmclnt_rpc_release(void *data)
 {
-	nlm_release_call(data);
+	nlmclnt_release_call(data);
 }
 
 static int nlm_wait_on_grace(wait_queue_head_t *queue)
@@ -436,7 +436,7 @@
 			status = nlm_stat_to_errno(req->a_res.status);
 	}
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
@@ -593,7 +593,7 @@
 out_unblock:
 	nlmclnt_finish_block(block);
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 out_unlock:
 	/* Fatal error: ensure that we remove the lock altogether */
@@ -694,7 +694,7 @@
 	/* What to do now? I'm out of my depth... */
 	status = -ENOLCK;
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
@@ -755,7 +755,7 @@
 			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
 	if (status == 0 && req->a_res.status == nlm_lck_denied)
 		status = -ENOLCK;
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
new file mode 100644
index 0000000..180ac34
--- /dev/null
+++ b/fs/lockd/clntxdr.c
@@ -0,0 +1,627 @@
+/*
+ * linux/fs/lockd/clntxdr.c
+ *
+ * XDR functions to encode/decode NLM version 3 RPC arguments and results.
+ * NLM version 3 is backwards compatible with NLM versions 1 and 2.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle.  All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM_cookie_sz		(1+(NLM_MAXCOOKIELEN>>2))
+#define NLM_caller_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM_owner_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM_fhandle_sz		(1+(NFS2_FHSIZE>>2))
+#define NLM_lock_sz		(3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz)
+#define NLM_holder_sz		(4+NLM_owner_sz)
+
+#define NLM_testargs_sz		(NLM_cookie_sz+1+NLM_lock_sz)
+#define NLM_lockargs_sz		(NLM_cookie_sz+4+NLM_lock_sz)
+#define NLM_cancargs_sz		(NLM_cookie_sz+2+NLM_lock_sz)
+#define NLM_unlockargs_sz	(NLM_cookie_sz+NLM_lock_sz)
+
+#define NLM_testres_sz		(NLM_cookie_sz+1+NLM_holder_sz)
+#define NLM_res_sz		(NLM_cookie_sz+1)
+#define NLM_norep_sz		(0)
+
+
+static s32 loff_t_to_s32(loff_t offset)
+{
+	s32 res;
+
+	if (offset >= NLM_OFFSET_MAX)
+		res = NLM_OFFSET_MAX;
+	else if (offset <= -NLM_OFFSET_MAX)
+		res = -NLM_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+static void nlm_compute_offsets(const struct nlm_lock *lock,
+				u32 *l_offset, u32 *l_len)
+{
+	const struct file_lock *fl = &lock->fl;
+
+	BUG_ON(fl->fl_start > NLM_OFFSET_MAX);
+	BUG_ON(fl->fl_end > NLM_OFFSET_MAX &&
+				fl->fl_end != OFFSET_MAX);
+
+	*l_offset = loff_t_to_s32(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		*l_len = 0;
+	else
+		*l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+	dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv3 basic data types
+ *
+ * Basic NLMv3 data types are not defined in an IETF standards
+ * document.  X/Open has a description of these data types that
+ * is useful.  See Chapter 10 of "Protocols for Interworking:
+ * XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
+}
+
+/*
+ *	typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+			  const u8 *data, const unsigned int length)
+{
+	__be32 *p;
+
+	BUG_ON(length > XDR_MAX_NETOBJ);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+			 struct xdr_netobj *obj)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > XDR_MAX_NETOBJ))
+		goto out_size;
+	obj->len = length;
+	obj->data = (u8 *)p;
+	return 0;
+out_size:
+	dprintk("NFS: returned netobj was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+			  const struct nlm_cookie *cookie)
+{
+	BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+	encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+			 struct nlm_cookie *cookie)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	/* apparently HPUX can return empty cookies */
+	if (length == 0)
+		goto out_hpux;
+	if (length > NLM_MAXCOOKIELEN)
+		goto out_size;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	cookie->len = length;
+	memcpy(cookie->data, p, length);
+	return 0;
+out_hpux:
+	cookie->len = 4;
+	memset(cookie->data, 0, 4);
+	return 0;
+out_size:
+	dprintk("NFS: returned cookie was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	BUG_ON(fh->size != NFS2_FHSIZE);
+	encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE);
+}
+
+/*
+ *	enum nlm_stats {
+ *		LCK_GRANTED = 0,
+ *		LCK_DENIED = 1,
+ *		LCK_DENIED_NOLOCKS = 2,
+ *		LCK_BLOCKED = 3,
+ *		LCK_DENIED_GRACE_PERIOD = 4
+ *	};
+ *
+ *
+ *	struct nlm_stat {
+ *		nlm_stats stat;
+ *	};
+ *
+ * NB: we don't swap bytes for the NLM status values.  The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+
+static void encode_nlm_stat(struct xdr_stream *xdr,
+			    const __be32 stat)
+{
+	__be32 *p;
+
+	BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+	p = xdr_reserve_space(xdr, 4);
+	*p = stat;
+}
+
+static int decode_nlm_stat(struct xdr_stream *xdr,
+			   __be32 *stat)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (unlikely(*p > nlm_lck_denied_grace_period))
+		goto out_enum;
+	*stat = *p;
+	return 0;
+out_enum:
+	dprintk("%s: server returned invalid nlm_stats value: %u\n",
+		__func__, be32_to_cpup(p));
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	struct nlm_holder {
+ *		bool exclusive;
+ *		int uppid;
+ *		netobj oh;
+ *		unsigned l_offset;
+ *		unsigned l_len;
+ *	};
+ */
+static void encode_nlm_holder(struct xdr_stream *xdr,
+			      const struct nlm_res *result)
+{
+	const struct nlm_lock *lock = &result->lock;
+	u32 l_offset, l_len;
+	__be32 *p;
+
+	encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+	encode_int32(xdr, lock->svid);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	nlm_compute_offsets(lock, &l_offset, &l_len);
+	*p++ = cpu_to_be32(l_offset);
+	*p   = cpu_to_be32(l_len);
+}
+
+static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+	struct nlm_lock *lock = &result->lock;
+	struct file_lock *fl = &lock->fl;
+	u32 exclusive, l_offset, l_len;
+	int error;
+	__be32 *p;
+	s32 end;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(fl);
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	exclusive = be32_to_cpup(p++);
+	lock->svid = be32_to_cpup(p);
+	fl->fl_pid = (pid_t)lock->svid;
+
+	error = decode_netobj(xdr, &lock->oh);
+	if (unlikely(error))
+		goto out;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+	l_offset = be32_to_cpup(p++);
+	l_len = be32_to_cpup(p);
+	end = l_offset + l_len - 1;
+
+	fl->fl_start = (loff_t)l_offset;
+	if (l_len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = (loff_t)end;
+	error = 0;
+out:
+	return error;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+	/* NB: client-side does not set lock->len */
+	u32 length = strlen(name);
+	__be32 *p;
+
+	BUG_ON(length > NLM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+/*
+ *	struct nlm_lock {
+ *		string caller_name<LM_MAXSTRLEN>;
+ *		netobj fh;
+ *		netobj oh;
+ *		int uppid;
+ *		unsigned l_offset;
+ *		unsigned l_len;
+ *	};
+ */
+static void encode_nlm_lock(struct xdr_stream *xdr,
+			    const struct nlm_lock *lock)
+{
+	u32 l_offset, l_len;
+	__be32 *p;
+
+	encode_caller_name(xdr, lock->caller);
+	encode_fh(xdr, &lock->fh);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(lock->svid);
+
+	nlm_compute_offsets(lock, &l_offset, &l_len);
+	*p++ = cpu_to_be32(l_offset);
+	*p   = cpu_to_be32(l_len);
+}
+
+
+/*
+ * NLMv3 XDR encode functions
+ *
+ * NLMv3 argument types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	struct nlm_testargs {
+ *		netobj cookie;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_lockargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *		bool reclaim;
+ *		int state;
+ *	};
+ */
+static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+	encode_bool(xdr, args->reclaim);
+	encode_int32(xdr, args->state);
+}
+
+/*
+ *	struct nlm_cancargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_unlockargs {
+ *		netobj cookie;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_res {
+ *		netobj cookie;
+ *		nlm_stat stat;
+ *	};
+ */
+static void nlm_xdr_enc_res(struct rpc_rqst *req,
+			    struct xdr_stream *xdr,
+			    const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm_stat(xdr, result->status);
+}
+
+/*
+ *	union nlm_testrply switch (nlm_stats stat) {
+ *	case LCK_DENIED:
+ *		struct nlm_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm_testres {
+ *		netobj cookie;
+ *		nlm_testrply test_stat;
+ *	};
+ */
+static void encode_nlm_testrply(struct xdr_stream *xdr,
+				const struct nlm_res *result)
+{
+	if (result->status == nlm_lck_denied)
+		encode_nlm_holder(xdr, result);
+}
+
+static void nlm_xdr_enc_testres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm_stat(xdr, result->status);
+	encode_nlm_testrply(xdr, result);
+}
+
+
+/*
+ * NLMv3 XDR decode functions
+ *
+ * NLMv3 result types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	union nlm_testrply switch (nlm_stats stat) {
+ *	case LCK_DENIED:
+ *		struct nlm_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm_testres {
+ *		netobj cookie;
+ *		nlm_testrply test_stat;
+ *	};
+ */
+static int decode_nlm_testrply(struct xdr_stream *xdr,
+			       struct nlm_res *result)
+{
+	int error;
+
+	error = decode_nlm_stat(xdr, &result->status);
+	if (unlikely(error))
+		goto out;
+	if (result->status == nlm_lck_denied)
+		error = decode_nlm_holder(xdr, result);
+out:
+	return error;
+}
+
+static int nlm_xdr_dec_testres(struct rpc_rqst *req,
+			       struct xdr_stream *xdr,
+			       struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm_testrply(xdr, result);
+out:
+	return error;
+}
+
+/*
+ *	struct nlm_res {
+ *		netobj cookie;
+ *		nlm_stat stat;
+ *	};
+ */
+static int nlm_xdr_dec_res(struct rpc_rqst *req,
+			   struct xdr_stream *xdr,
+			   struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm_stat(xdr, &result->status);
+out:
+	return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm_xdr_dec_norep	NULL
+
+#define PROC(proc, argtype, restype)	\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdreproc_t)nlm_xdr_enc_##argtype,		\
+	.p_decode    = (kxdrdproc_t)nlm_xdr_dec_##restype,		\
+	.p_arglen    = NLM_##argtype##_sz,				\
+	.p_replen    = NLM_##restype##_sz,				\
+	.p_statidx   = NLMPROC_##proc,					\
+	.p_name      = #proc,						\
+	}
+
+static struct rpc_procinfo	nlm_procedures[] = {
+	PROC(TEST,		testargs,	testres),
+	PROC(LOCK,		lockargs,	res),
+	PROC(CANCEL,		cancargs,	res),
+	PROC(UNLOCK,		unlockargs,	res),
+	PROC(GRANTED,		testargs,	res),
+	PROC(TEST_MSG,		testargs,	norep),
+	PROC(LOCK_MSG,		lockargs,	norep),
+	PROC(CANCEL_MSG,	cancargs,	norep),
+	PROC(UNLOCK_MSG,	unlockargs,	norep),
+	PROC(GRANTED_MSG,	testargs,	norep),
+	PROC(TEST_RES,		testres,	norep),
+	PROC(LOCK_RES,		res,		norep),
+	PROC(CANCEL_RES,	res,		norep),
+	PROC(UNLOCK_RES,	res,		norep),
+	PROC(GRANTED_RES,	res,		norep),
+};
+
+static struct rpc_version	nlm_version1 = {
+		.number		= 1,
+		.nrprocs	= ARRAY_SIZE(nlm_procedures),
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	nlm_version3 = {
+		.number		= 3,
+		.nrprocs	= ARRAY_SIZE(nlm_procedures),
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	*nlm_versions[] = {
+	[1] = &nlm_version1,
+	[3] = &nlm_version3,
+#ifdef CONFIG_LOCKD_V4
+	[4] = &nlm_version4,
+#endif
+};
+
+static struct rpc_stat		nlm_rpc_stats;
+
+struct rpc_program		nlm_program = {
+		.name		= "lockd",
+		.number		= NLM_PROGRAM,
+		.nrvers		= ARRAY_SIZE(nlm_versions),
+		.version	= nlm_versions,
+		.stats		= &nlm_rpc_stats,
+};
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ed0c59f..5f1bcb2 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -25,9 +25,22 @@
 #define NLM_HOST_EXPIRE		(300 * HZ)
 #define NLM_HOST_COLLECT	(120 * HZ)
 
-static struct hlist_head	nlm_hosts[NLM_HOST_NRHASH];
+static struct hlist_head	nlm_server_hosts[NLM_HOST_NRHASH];
+static struct hlist_head	nlm_client_hosts[NLM_HOST_NRHASH];
+
+#define for_each_host(host, pos, chain, table) \
+	for ((chain) = (table); \
+	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+		hlist_for_each_entry((host), (pos), (chain), h_hash)
+
+#define for_each_host_safe(host, pos, next, chain, table) \
+	for ((chain) = (table); \
+	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+		hlist_for_each_entry_safe((host), (pos), (next), \
+						(chain), h_hash)
+
 static unsigned long		next_gc;
-static int			nrhosts;
+static unsigned long		nrhosts;
 static DEFINE_MUTEX(nlm_host_mutex);
 
 static void			nlm_gc_hosts(void);
@@ -40,8 +53,6 @@
 	const u32		version;	/* NLM version to search for */
 	const char		*hostname;	/* remote's hostname */
 	const size_t		hostname_len;	/* it's length */
-	const struct sockaddr	*src_sap;	/* our address (optional) */
-	const size_t		src_len;	/* it's length */
 	const int		noresvport;	/* use non-priv port */
 };
 
@@ -88,127 +99,83 @@
 }
 
 /*
- * Common host lookup routine for server & client
+ * Allocate and initialize an nlm_host.  Common to both client and server.
  */
-static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
+static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
+				       struct nsm_handle *nsm)
 {
-	struct hlist_head *chain;
-	struct hlist_node *pos;
-	struct nlm_host	*host;
-	struct nsm_handle *nsm = NULL;
+	struct nlm_host *host = NULL;
+	unsigned long now = jiffies;
 
-	mutex_lock(&nlm_host_mutex);
-
-	if (time_after_eq(jiffies, next_gc))
-		nlm_gc_hosts();
-
-	/* We may keep several nlm_host objects for a peer, because each
-	 * nlm_host is identified by
-	 * (address, protocol, version, server/client)
-	 * We could probably simplify this a little by putting all those
-	 * different NLM rpc_clients into one single nlm_host object.
-	 * This would allow us to have one nlm_host per address.
-	 */
-	chain = &nlm_hosts[nlm_hash_address(ni->sap)];
-	hlist_for_each_entry(host, pos, chain, h_hash) {
-		if (!rpc_cmp_addr(nlm_addr(host), ni->sap))
-			continue;
-
-		/* See if we have an NSM handle for this client */
-		if (!nsm)
-			nsm = host->h_nsmhandle;
-
-		if (host->h_proto != ni->protocol)
-			continue;
-		if (host->h_version != ni->version)
-			continue;
-		if (host->h_server != ni->server)
-			continue;
-		if (ni->server && ni->src_len != 0 &&
-		    !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap))
-			continue;
-
-		/* Move to head of hash chain. */
-		hlist_del(&host->h_hash);
-		hlist_add_head(&host->h_hash, chain);
-
-		nlm_get_host(host);
-		dprintk("lockd: nlm_lookup_host found host %s (%s)\n",
-				host->h_name, host->h_addrbuf);
-		goto out;
-	}
-
-	/*
-	 * The host wasn't in our hash table.  If we don't
-	 * have an NSM handle for it yet, create one.
-	 */
-	if (nsm)
+	if (nsm != NULL)
 		atomic_inc(&nsm->sm_count);
 	else {
 		host = NULL;
 		nsm = nsm_get_handle(ni->sap, ni->salen,
 					ni->hostname, ni->hostname_len);
-		if (!nsm) {
-			dprintk("lockd: nlm_lookup_host failed; "
-				"no nsm handle\n");
+		if (unlikely(nsm == NULL)) {
+			dprintk("lockd: %s failed; no nsm handle\n",
+				__func__);
 			goto out;
 		}
 	}
 
-	host = kzalloc(sizeof(*host), GFP_KERNEL);
-	if (!host) {
+	host = kmalloc(sizeof(*host), GFP_KERNEL);
+	if (unlikely(host == NULL)) {
+		dprintk("lockd: %s failed; no memory\n", __func__);
 		nsm_release(nsm);
-		dprintk("lockd: nlm_lookup_host failed; no memory\n");
 		goto out;
 	}
-	host->h_name	   = nsm->sm_name;
-	host->h_addrbuf    = nsm->sm_addrbuf;
+
 	memcpy(nlm_addr(host), ni->sap, ni->salen);
-	host->h_addrlen = ni->salen;
+	host->h_addrlen    = ni->salen;
 	rpc_set_port(nlm_addr(host), 0);
-	memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
-	host->h_srcaddrlen = ni->src_len;
+	host->h_srcaddrlen = 0;
+
+	host->h_rpcclnt    = NULL;
+	host->h_name	   = nsm->sm_name;
 	host->h_version    = ni->version;
 	host->h_proto      = ni->protocol;
-	host->h_rpcclnt    = NULL;
-	mutex_init(&host->h_mutex);
-	host->h_nextrebind = jiffies + NLM_HOST_REBIND;
-	host->h_expires    = jiffies + NLM_HOST_EXPIRE;
-	atomic_set(&host->h_count, 1);
+	host->h_reclaiming = 0;
+	host->h_server     = ni->server;
+	host->h_noresvport = ni->noresvport;
+	host->h_inuse      = 0;
 	init_waitqueue_head(&host->h_gracewait);
 	init_rwsem(&host->h_rwsem);
-	host->h_state      = 0;			/* pseudo NSM state */
-	host->h_nsmstate   = 0;			/* real NSM state */
-	host->h_nsmhandle  = nsm;
-	host->h_server	   = ni->server;
-	host->h_noresvport = ni->noresvport;
-	hlist_add_head(&host->h_hash, chain);
+	host->h_state      = 0;
+	host->h_nsmstate   = 0;
+	host->h_pidcount   = 0;
+	atomic_set(&host->h_count, 1);
+	mutex_init(&host->h_mutex);
+	host->h_nextrebind = now + NLM_HOST_REBIND;
+	host->h_expires    = now + NLM_HOST_EXPIRE;
 	INIT_LIST_HEAD(&host->h_lockowners);
 	spin_lock_init(&host->h_lock);
 	INIT_LIST_HEAD(&host->h_granted);
 	INIT_LIST_HEAD(&host->h_reclaim);
-
-	nrhosts++;
-
-	dprintk("lockd: nlm_lookup_host created host %s\n",
-			host->h_name);
+	host->h_nsmhandle  = nsm;
+	host->h_addrbuf    = nsm->sm_addrbuf;
 
 out:
-	mutex_unlock(&nlm_host_mutex);
 	return host;
 }
 
 /*
- * Destroy a host
+ * Destroy an nlm_host and free associated resources
+ *
+ * Caller must hold nlm_host_mutex.
  */
-static void
-nlm_destroy_host(struct nlm_host *host)
+static void nlm_destroy_host_locked(struct nlm_host *host)
 {
 	struct rpc_clnt	*clnt;
 
+	dprintk("lockd: destroy host %s\n", host->h_name);
+
 	BUG_ON(!list_empty(&host->h_lockowners));
 	BUG_ON(atomic_read(&host->h_count));
 
+	hlist_del_init(&host->h_hash);
+
 	nsm_unmonitor(host);
 	nsm_release(host->h_nsmhandle);
 
@@ -216,6 +183,8 @@
 	if (clnt != NULL)
 		rpc_shutdown_client(clnt);
 	kfree(host);
+
+	nrhosts--;
 }
 
 /**
@@ -249,12 +218,76 @@
 		.hostname_len	= strlen(hostname),
 		.noresvport	= noresvport,
 	};
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+	struct nlm_host	*host;
+	struct nsm_handle *nsm = NULL;
 
 	dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
 			(hostname ? hostname : "<none>"), version,
 			(protocol == IPPROTO_UDP ? "udp" : "tcp"));
 
-	return nlm_lookup_host(&ni);
+	mutex_lock(&nlm_host_mutex);
+
+	chain = &nlm_client_hosts[nlm_hash_address(sap)];
+	hlist_for_each_entry(host, pos, chain, h_hash) {
+		if (!rpc_cmp_addr(nlm_addr(host), sap))
+			continue;
+
+		/* Same address. Share an NSM handle if we already have one */
+		if (nsm == NULL)
+			nsm = host->h_nsmhandle;
+
+		if (host->h_proto != protocol)
+			continue;
+		if (host->h_version != version)
+			continue;
+
+		nlm_get_host(host);
+		dprintk("lockd: %s found host %s (%s)\n", __func__,
+			host->h_name, host->h_addrbuf);
+		goto out;
+	}
+
+	host = nlm_alloc_host(&ni, nsm);
+	if (unlikely(host == NULL))
+		goto out;
+
+	hlist_add_head(&host->h_hash, chain);
+	nrhosts++;
+
+	dprintk("lockd: %s created host %s (%s)\n", __func__,
+		host->h_name, host->h_addrbuf);
+
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
+}
+
+/**
+ * nlmclnt_release_host - release client nlm_host
+ * @host: nlm_host to release
+ *
+ */
+void nlmclnt_release_host(struct nlm_host *host)
+{
+	if (host == NULL)
+		return;
+
+	dprintk("lockd: release client host %s\n", host->h_name);
+
+	BUG_ON(atomic_read(&host->h_count) < 0);
+	BUG_ON(host->h_server);
+
+	if (atomic_dec_and_test(&host->h_count)) {
+		BUG_ON(!list_empty(&host->h_lockowners));
+		BUG_ON(!list_empty(&host->h_granted));
+		BUG_ON(!list_empty(&host->h_reclaim));
+
+		mutex_lock(&nlm_host_mutex);
+		nlm_destroy_host_locked(host);
+		mutex_unlock(&nlm_host_mutex);
+	}
 }
 
 /**
@@ -279,12 +312,18 @@
 				    const char *hostname,
 				    const size_t hostname_len)
 {
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+	struct nlm_host	*host = NULL;
+	struct nsm_handle *nsm = NULL;
 	struct sockaddr_in sin = {
 		.sin_family	= AF_INET,
 	};
 	struct sockaddr_in6 sin6 = {
 		.sin6_family	= AF_INET6,
 	};
+	struct sockaddr *src_sap;
+	size_t src_len = rqstp->rq_addrlen;
 	struct nlm_lookup_host_info ni = {
 		.server		= 1,
 		.sap		= svc_addr(rqstp),
@@ -293,27 +332,91 @@
 		.version	= rqstp->rq_vers,
 		.hostname	= hostname,
 		.hostname_len	= hostname_len,
-		.src_len	= rqstp->rq_addrlen,
 	};
 
 	dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
 			(int)hostname_len, hostname, rqstp->rq_vers,
 			(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
 
+	mutex_lock(&nlm_host_mutex);
+
 	switch (ni.sap->sa_family) {
 	case AF_INET:
 		sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr;
-		ni.src_sap = (struct sockaddr *)&sin;
+		src_sap = (struct sockaddr *)&sin;
 		break;
 	case AF_INET6:
 		ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6);
-		ni.src_sap = (struct sockaddr *)&sin6;
+		src_sap = (struct sockaddr *)&sin6;
 		break;
 	default:
-		return NULL;
+		dprintk("lockd: %s failed; unrecognized address family\n",
+			__func__);
+		goto out;
 	}
 
-	return nlm_lookup_host(&ni);
+	if (time_after_eq(jiffies, next_gc))
+		nlm_gc_hosts();
+
+	chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
+	hlist_for_each_entry(host, pos, chain, h_hash) {
+		if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
+			continue;
+
+		/* Same address. Share an NSM handle if we already have one */
+		if (nsm == NULL)
+			nsm = host->h_nsmhandle;
+
+		if (host->h_proto != ni.protocol)
+			continue;
+		if (host->h_version != ni.version)
+			continue;
+		if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap))
+			continue;
+
+		/* Move to head of hash chain. */
+		hlist_del(&host->h_hash);
+		hlist_add_head(&host->h_hash, chain);
+
+		nlm_get_host(host);
+		dprintk("lockd: %s found host %s (%s)\n",
+			__func__, host->h_name, host->h_addrbuf);
+		goto out;
+	}
+
+	host = nlm_alloc_host(&ni, nsm);
+	if (unlikely(host == NULL))
+		goto out;
+
+	memcpy(nlm_srcaddr(host), src_sap, src_len);
+	host->h_srcaddrlen = src_len;
+	hlist_add_head(&host->h_hash, chain);
+	nrhosts++;
+
+	dprintk("lockd: %s created host %s (%s)\n",
+		__func__, host->h_name, host->h_addrbuf);
+
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
+}
+
+/**
+ * nlmsvc_release_host - release server nlm_host
+ * @host: nlm_host to release
+ *
+ * Host is destroyed later in nlm_gc_host().
+ */
+void nlmsvc_release_host(struct nlm_host *host)
+{
+	if (host == NULL)
+		return;
+
+	dprintk("lockd: release server host %s\n", host->h_name);
+
+	BUG_ON(atomic_read(&host->h_count) < 0);
+	BUG_ON(!host->h_server);
+	atomic_dec(&host->h_count);
 }
 
 /*
@@ -413,20 +516,28 @@
 	return host;
 }
 
-/*
- * Release NLM host after use
- */
-void nlm_release_host(struct nlm_host *host)
+static struct nlm_host *next_host_state(struct hlist_head *cache,
+					struct nsm_handle *nsm,
+					const struct nlm_reboot *info)
 {
-	if (host != NULL) {
-		dprintk("lockd: release host %s\n", host->h_name);
-		BUG_ON(atomic_read(&host->h_count) < 0);
-		if (atomic_dec_and_test(&host->h_count)) {
-			BUG_ON(!list_empty(&host->h_lockowners));
-			BUG_ON(!list_empty(&host->h_granted));
-			BUG_ON(!list_empty(&host->h_reclaim));
+	struct nlm_host *host = NULL;
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+
+	mutex_lock(&nlm_host_mutex);
+	for_each_host(host, pos, chain, cache) {
+		if (host->h_nsmhandle == nsm
+		    && host->h_nsmstate != info->state) {
+			host->h_nsmstate = info->state;
+			host->h_state++;
+
+			nlm_get_host(host);
+			goto out;
 		}
 	}
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
 }
 
 /**
@@ -438,8 +549,6 @@
  */
 void nlm_host_rebooted(const struct nlm_reboot *info)
 {
-	struct hlist_head *chain;
-	struct hlist_node *pos;
 	struct nsm_handle *nsm;
 	struct nlm_host	*host;
 
@@ -452,32 +561,15 @@
 	 * lock for this.
 	 * To avoid processing a host several times, we match the nsmstate.
 	 */
-again:	mutex_lock(&nlm_host_mutex);
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash) {
-			if (host->h_nsmhandle == nsm
-			 && host->h_nsmstate != info->state) {
-				host->h_nsmstate = info->state;
-				host->h_state++;
-
-				nlm_get_host(host);
-				mutex_unlock(&nlm_host_mutex);
-
-				if (host->h_server) {
-					/* We're server for this guy, just ditch
-					 * all the locks he held. */
-					nlmsvc_free_host_resources(host);
-				} else {
-					/* He's the server, initiate lock recovery. */
-					nlmclnt_recovery(host);
-				}
-
-				nlm_release_host(host);
-				goto again;
-			}
-		}
+	while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) {
+		nlmsvc_free_host_resources(host);
+		nlmsvc_release_host(host);
 	}
-	mutex_unlock(&nlm_host_mutex);
+	while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) {
+		nlmclnt_recovery(host);
+		nlmclnt_release_host(host);
+	}
+
 	nsm_release(nsm);
 }
 
@@ -497,13 +589,11 @@
 
 	/* First, make all hosts eligible for gc */
 	dprintk("lockd: nuking all hosts...\n");
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash) {
-			host->h_expires = jiffies - 1;
-			if (host->h_rpcclnt) {
-				rpc_shutdown_client(host->h_rpcclnt);
-				host->h_rpcclnt = NULL;
-			}
+	for_each_host(host, pos, chain, nlm_server_hosts) {
+		host->h_expires = jiffies - 1;
+		if (host->h_rpcclnt) {
+			rpc_shutdown_client(host->h_rpcclnt);
+			host->h_rpcclnt = NULL;
 		}
 	}
 
@@ -512,15 +602,13 @@
 	mutex_unlock(&nlm_host_mutex);
 
 	/* complain if any hosts are left */
-	if (nrhosts) {
+	if (nrhosts != 0) {
 		printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
-		dprintk("lockd: %d hosts left:\n", nrhosts);
-		for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-			hlist_for_each_entry(host, pos, chain, h_hash) {
-				dprintk("       %s (cnt %d use %d exp %ld)\n",
-					host->h_name, atomic_read(&host->h_count),
-					host->h_inuse, host->h_expires);
-			}
+		dprintk("lockd: %lu hosts left:\n", nrhosts);
+		for_each_host(host, pos, chain, nlm_server_hosts) {
+			dprintk("       %s (cnt %d use %d exp %ld)\n",
+				host->h_name, atomic_read(&host->h_count),
+				host->h_inuse, host->h_expires);
 		}
 	}
 }
@@ -538,29 +626,22 @@
 	struct nlm_host	*host;
 
 	dprintk("lockd: host garbage collection\n");
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash)
-			host->h_inuse = 0;
-	}
+	for_each_host(host, pos, chain, nlm_server_hosts)
+		host->h_inuse = 0;
 
 	/* Mark all hosts that hold locks, blocks or shares */
 	nlmsvc_mark_resources();
 
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
-			if (atomic_read(&host->h_count) || host->h_inuse
-			 || time_before(jiffies, host->h_expires)) {
-				dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
-					host->h_name, atomic_read(&host->h_count),
-					host->h_inuse, host->h_expires);
-				continue;
-			}
-			dprintk("lockd: delete host %s\n", host->h_name);
-			hlist_del_init(&host->h_hash);
-
-			nlm_destroy_host(host);
-			nrhosts--;
+	for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
+		if (atomic_read(&host->h_count) || host->h_inuse
+		 || time_before(jiffies, host->h_expires)) {
+			dprintk("nlm_gc_hosts skipping %s "
+				"(cnt %d use %d exp %ld)\n",
+				host->h_name, atomic_read(&host->h_count),
+				host->h_inuse, host->h_expires);
+			continue;
 		}
+		nlm_destroy_host_locked(host);
 	}
 
 	next_gc = jiffies + NLM_HOST_COLLECT;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e0c9189..23d7451 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -401,26 +401,22 @@
  * Status Monitor wire protocol.
  */
 
-static int encode_nsm_string(struct xdr_stream *xdr, const char *string)
+static void encode_nsm_string(struct xdr_stream *xdr, const char *string)
 {
 	const u32 len = strlen(string);
 	__be32 *p;
 
-	if (unlikely(len > SM_MAXSTRLEN))
-		return -EIO;
-	p = xdr_reserve_space(xdr, sizeof(u32) + len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(len > SM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + len);
 	xdr_encode_opaque(p, string, len);
-	return 0;
 }
 
 /*
  * "mon_name" specifies the host to be monitored.
  */
-static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	return encode_nsm_string(xdr, argp->mon_name);
+	encode_nsm_string(xdr, argp->mon_name);
 }
 
 /*
@@ -429,35 +425,25 @@
  * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name"
  * has changed.
  */
-static int encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	int status;
 	__be32 *p;
 
-	status = encode_nsm_string(xdr, utsname()->nodename);
-	if (unlikely(status != 0))
-		return status;
-	p = xdr_reserve_space(xdr, 3 * sizeof(u32));
-	if (unlikely(p == NULL))
-		return -EIO;
-	*p++ = htonl(argp->prog);
-	*p++ = htonl(argp->vers);
-	*p++ = htonl(argp->proc);
-	return 0;
+	encode_nsm_string(xdr, utsname()->nodename);
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(argp->prog);
+	*p++ = cpu_to_be32(argp->vers);
+	*p = cpu_to_be32(argp->proc);
 }
 
 /*
  * The "mon_id" argument specifies the non-private arguments
  * of an NSMPROC_MON or NSMPROC_UNMON call.
  */
-static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	int status;
-
-	status = encode_mon_name(xdr, argp);
-	if (unlikely(status != 0))
-		return status;
-	return encode_my_id(xdr, argp);
+	encode_mon_name(xdr, argp);
+	encode_my_id(xdr, argp);
 }
 
 /*
@@ -465,68 +451,56 @@
  * by the NSMPROC_MON call. This information will be supplied in the
  * NLMPROC_SM_NOTIFY call.
  */
-static int encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
 	__be32 *p;
 
 	p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
+	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
+}
+
+static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
+			    const struct nsm_args *argp)
+{
+	encode_mon_id(xdr, argp);
+	encode_priv(xdr, argp);
+}
+
+static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      const struct nsm_args *argp)
+{
+	encode_mon_id(xdr, argp);
+}
+
+static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
+				struct xdr_stream *xdr,
+				struct nsm_res *resp)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
+	resp->status = be32_to_cpup(p++);
+	resp->state = be32_to_cpup(p);
+
+	dprintk("lockd: %s status %d state %d\n",
+		__func__, resp->status, resp->state);
 	return 0;
 }
 
-static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p,
-		       const struct nsm_args *argp)
-{
-	struct xdr_stream xdr;
-	int status;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	status = encode_mon_id(&xdr, argp);
-	if (unlikely(status))
-		return status;
-	return encode_priv(&xdr, argp);
-}
-
-static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p,
-			 const struct nsm_args *argp)
-{
-	struct xdr_stream xdr;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	return encode_mon_id(&xdr, argp);
-}
-
-static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p,
+static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
+			    struct xdr_stream *xdr,
 			    struct nsm_res *resp)
 {
-	struct xdr_stream xdr;
+	__be32 *p;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	p = xdr_inline_decode(&xdr, 2 * sizeof(u32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	resp->status = ntohl(*p++);
-	resp->state = ntohl(*p);
+	resp->state = be32_to_cpup(p);
 
-	dprintk("lockd: xdr_dec_stat_res status %d state %d\n",
-			resp->status, resp->state);
-	return 0;
-}
-
-static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p,
-			struct nsm_res *resp)
-{
-	struct xdr_stream xdr;
-
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	p = xdr_inline_decode(&xdr, sizeof(u32));
-	if (unlikely(p == NULL))
-		return -EIO;
-	resp->state = ntohl(*p);
-
-	dprintk("lockd: xdr_dec_stat state %d\n", resp->state);
+	dprintk("lockd: %s state %d\n", __func__, resp->state);
 	return 0;
 }
 
@@ -542,8 +516,8 @@
 static struct rpc_procinfo	nsm_procedures[] = {
 [NSMPROC_MON] = {
 		.p_proc		= NSMPROC_MON,
-		.p_encode	= (kxdrproc_t)xdr_enc_mon,
-		.p_decode	= (kxdrproc_t)xdr_dec_stat_res,
+		.p_encode	= (kxdreproc_t)nsm_xdr_enc_mon,
+		.p_decode	= (kxdrdproc_t)nsm_xdr_dec_stat_res,
 		.p_arglen	= SM_mon_sz,
 		.p_replen	= SM_monres_sz,
 		.p_statidx	= NSMPROC_MON,
@@ -551,8 +525,8 @@
 	},
 [NSMPROC_UNMON] = {
 		.p_proc		= NSMPROC_UNMON,
-		.p_encode	= (kxdrproc_t)xdr_enc_unmon,
-		.p_decode	= (kxdrproc_t)xdr_dec_stat,
+		.p_encode	= (kxdreproc_t)nsm_xdr_enc_unmon,
+		.p_decode	= (kxdrdproc_t)nsm_xdr_dec_stat,
 		.p_arglen	= SM_mon_id_sz,
 		.p_replen	= SM_unmonres_sz,
 		.p_statidx	= NSMPROC_UNMON,
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 38d2611..9a41fdc 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -51,7 +51,7 @@
 	return 0;
 
 no_locks:
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
  	if (error)
 		return error;	
 	return nlm_lck_denied_nolocks;
@@ -92,7 +92,7 @@
 	else
 		dprintk("lockd: TEST4        status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -134,7 +134,7 @@
 	else
 		dprintk("lockd: LOCK         status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -164,7 +164,7 @@
 	resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
 
 	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -197,7 +197,7 @@
 	resp->status = nlmsvc_unlock(file, &argp->lock);
 
 	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -229,7 +229,7 @@
 
 static void nlm4svc_callback_release(void *data)
 {
-	nlm_release_call(data);
+	nlmsvc_release_call(data);
 }
 
 static const struct rpc_call_ops nlm4svc_callback_ops = {
@@ -261,7 +261,7 @@
 
 	stat = func(rqstp, argp, &call->a_res);
 	if (stat != 0) {
-		nlm_release_call(call);
+		nlmsvc_release_call(call);
 		return stat;
 	}
 
@@ -334,7 +334,7 @@
 	resp->status = nlmsvc_share_file(host, file, argp);
 
 	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -367,7 +367,7 @@
 	resp->status = nlmsvc_unshare_file(host, file, argp);
 
 	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -399,7 +399,7 @@
 		return rpc_success;
 
 	nlmsvc_free_host_resources(host);
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index ef5659b..6e31695 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -46,6 +46,7 @@
 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
 static const struct rpc_call_ops nlmsvc_grant_ops;
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie);
 
 /*
  * The list of blocked locks to retry
@@ -233,7 +234,7 @@
 failed_free:
 	kfree(block);
 failed:
-	nlm_release_call(call);
+	nlmsvc_release_call(call);
 	return NULL;
 }
 
@@ -266,7 +267,7 @@
 	mutex_unlock(&file->f_mutex);
 
 	nlmsvc_freegrantargs(block->b_call);
-	nlm_release_call(block->b_call);
+	nlmsvc_release_call(block->b_call);
 	nlm_release_file(block->b_file);
 	kfree(block->b_fl);
 	kfree(block);
@@ -934,3 +935,32 @@
 
 	return timeout;
 }
+
+#ifdef RPC_DEBUG
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
+{
+	/*
+	 * We can get away with a static buffer because we're only
+	 * called with BKL held.
+	 */
+	static char buf[2*NLM_MAXCOOKIELEN+1];
+	unsigned int i, len = sizeof(buf);
+	char *p = buf;
+
+	len--;	/* allow for trailing \0 */
+	if (len < 3)
+		return "???";
+	for (i = 0 ; i < cookie->len ; i++) {
+		if (len < 2) {
+			strcpy(p-3, "...");
+			break;
+		}
+		sprintf(p, "%02x", cookie->data[i]);
+		p += 2;
+		len -= 2;
+	}
+	*p = '\0';
+
+	return buf;
+}
+#endif
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0caea53..d27aab1 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -80,7 +80,7 @@
 	return 0;
 
 no_locks:
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	if (error)
 		return error;
 	return nlm_lck_denied_nolocks;
@@ -122,7 +122,7 @@
 		dprintk("lockd: TEST          status %d vers %d\n",
 			ntohl(resp->status), rqstp->rq_vers);
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -164,7 +164,7 @@
 	else
 		dprintk("lockd: LOCK         status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -194,7 +194,7 @@
 	resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
 
 	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -227,7 +227,7 @@
 	resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
 
 	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -257,9 +257,17 @@
 			-task->tk_status);
 }
 
+void nlmsvc_release_call(struct nlm_rqst *call)
+{
+	if (!atomic_dec_and_test(&call->a_count))
+		return;
+	nlmsvc_release_host(call->a_host);
+	kfree(call);
+}
+
 static void nlmsvc_callback_release(void *data)
 {
-	nlm_release_call(data);
+	nlmsvc_release_call(data);
 }
 
 static const struct rpc_call_ops nlmsvc_callback_ops = {
@@ -291,7 +299,7 @@
 
 	stat = func(rqstp, argp, &call->a_res);
 	if (stat != 0) {
-		nlm_release_call(call);
+		nlmsvc_release_call(call);
 		return stat;
 	}
 
@@ -366,7 +374,7 @@
 	resp->status = cast_status(nlmsvc_share_file(host, file, argp));
 
 	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -399,7 +407,7 @@
 	resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
 
 	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -431,7 +439,7 @@
 		return rpc_success;
 
 	nlmsvc_free_host_resources(host);
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index b583ab0..964666c 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -149,37 +149,6 @@
 }
 
 /*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
-	struct file_lock	*fl = &lock->fl;
-	__s32			start, len;
-
-	if (!(p = xdr_encode_string(p, lock->caller))
-	 || !(p = nlm_encode_fh(p, &lock->fh))
-	 || !(p = nlm_encode_oh(p, &lock->oh)))
-		return NULL;
-
-	if (fl->fl_start > NLM_OFFSET_MAX
-	 || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
-		return NULL;
-
-	start = loff_t_to_s32(fl->fl_start);
-	if (fl->fl_end == OFFSET_MAX)
-		len = 0;
-	else
-		len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
-
-	*p++ = htonl(lock->svid);
-	*p++ = htonl(start);
-	*p++ = htonl(len);
-
-	return p;
-}
-
-/*
  * Encode result of a TEST/TEST_MSG call
  */
 static __be32 *
@@ -372,259 +341,3 @@
 {
 	return xdr_ressize_check(rqstp, p);
 }
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
-{
-	return 0;
-}
-#endif
-
-static int
-nlmclt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	if (resp->status == nlm_lck_denied) {
-		struct file_lock	*fl = &resp->lock.fl;
-		u32			excl;
-		s32			start, len, end;
-
-		memset(&resp->lock, 0, sizeof(resp->lock));
-		locks_init_lock(fl);
-		excl = ntohl(*p++);
-		resp->lock.svid = ntohl(*p++);
-		fl->fl_pid = (pid_t)resp->lock.svid;
-		if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
-			return -EIO;
-
-		fl->fl_flags = FL_POSIX;
-		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
-		start = ntohl(*p++);
-		len = ntohl(*p++);
-		end = start + len - 1;
-
-		fl->fl_start = s32_to_loff_t(start);
-		if (len == 0 || end < 0)
-			fl->fl_end = OFFSET_MAX;
-		else
-			fl->fl_end = s32_to_loff_t(end);
-	}
-	return 0;
-}
-
-
-static int
-nlmclt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	*p++ = argp->reclaim? xdr_one : xdr_zero;
-	*p++ = htonl(argp->state);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
-		return -EIO;
-	*p++ = resp->status;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_encode_testres(p, resp)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM_void_sz		0
-#define NLM_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM_caller_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_owner_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_fhandle_sz		1+XDR_QUADLEN(NFS2_FHSIZE)
-#define NLM_lock_sz		3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz
-#define NLM_holder_sz		4+NLM_owner_sz
-
-#define NLM_testargs_sz		NLM_cookie_sz+1+NLM_lock_sz
-#define NLM_lockargs_sz		NLM_cookie_sz+4+NLM_lock_sz
-#define NLM_cancargs_sz		NLM_cookie_sz+2+NLM_lock_sz
-#define NLM_unlockargs_sz	NLM_cookie_sz+NLM_lock_sz
-
-#define NLM_testres_sz		NLM_cookie_sz+1+NLM_holder_sz
-#define NLM_res_sz		NLM_cookie_sz+1
-#define NLM_norep_sz		0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlmclt_decode_norep	NULL
-
-#define PROC(proc, argtype, restype)	\
-[NLMPROC_##proc] = {							\
-	.p_proc      = NLMPROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nlmclt_encode_##argtype,		\
-	.p_decode    = (kxdrproc_t) nlmclt_decode_##restype,		\
-	.p_arglen    = NLM_##argtype##_sz,				\
-	.p_replen    = NLM_##restype##_sz,				\
-	.p_statidx   = NLMPROC_##proc,					\
-	.p_name      = #proc,						\
-	}
-
-static struct rpc_procinfo	nlm_procedures[] = {
-    PROC(TEST,		testargs,	testres),
-    PROC(LOCK,		lockargs,	res),
-    PROC(CANCEL,	cancargs,	res),
-    PROC(UNLOCK,	unlockargs,	res),
-    PROC(GRANTED,	testargs,	res),
-    PROC(TEST_MSG,	testargs,	norep),
-    PROC(LOCK_MSG,	lockargs,	norep),
-    PROC(CANCEL_MSG,	cancargs,	norep),
-    PROC(UNLOCK_MSG,	unlockargs,	norep),
-    PROC(GRANTED_MSG,	testargs,	norep),
-    PROC(TEST_RES,	testres,	norep),
-    PROC(LOCK_RES,	res,		norep),
-    PROC(CANCEL_RES,	res,		norep),
-    PROC(UNLOCK_RES,	res,		norep),
-    PROC(GRANTED_RES,	res,		norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
-    PROC(SHARE,		shareargs,	shareres),
-    PROC(UNSHARE,	shareargs,	shareres),
-    PROC(NM_LOCK,	lockargs,	res),
-    PROC(FREE_ALL,	notify,		void),
-#endif
-};
-
-static struct rpc_version	nlm_version1 = {
-		.number		= 1,
-		.nrprocs	= 16,
-		.procs		= nlm_procedures,
-};
-
-static struct rpc_version	nlm_version3 = {
-		.number		= 3,
-		.nrprocs	= 24,
-		.procs		= nlm_procedures,
-};
-
-static struct rpc_version *	nlm_versions[] = {
-	[1] = &nlm_version1,
-	[3] = &nlm_version3,
-#ifdef 	CONFIG_LOCKD_V4
-	[4] = &nlm_version4,
-#endif
-};
-
-static struct rpc_stat		nlm_stats;
-
-struct rpc_program		nlm_program = {
-		.name		= "lockd",
-		.number		= NLM_PROGRAM,
-		.nrvers		= ARRAY_SIZE(nlm_versions),
-		.version	= nlm_versions,
-		.stats		= &nlm_stats,
-};
-
-#ifdef RPC_DEBUG
-const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
-{
-	/*
-	 * We can get away with a static buffer because we're only
-	 * called with BKL held.
-	 */
-	static char buf[2*NLM_MAXCOOKIELEN+1];
-	unsigned int i, len = sizeof(buf);
-	char *p = buf;
-
-	len--;	/* allow for trailing \0 */
-	if (len < 3)
-		return "???";
-	for (i = 0 ; i < cookie->len ; i++) {
-		if (len < 2) {
-			strcpy(p-3, "...");
-			break;
-		}
-		sprintf(p, "%02x", cookie->data[i]);
-		p += 2;
-		len -= 2;
-	}
-	*p = '\0';
-
-	return buf;
-}
-#endif
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index ad9dbbc..dfa4789 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -93,15 +93,6 @@
 	return p + XDR_QUADLEN(f->size);
 }
 
-static __be32 *
-nlm4_encode_fh(__be32 *p, struct nfs_fh *f)
-{
-	*p++ = htonl(f->size);
-	if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
-	memcpy(p, f->data, f->size);
-	return p + XDR_QUADLEN(f->size);
-}
-
 /*
  * Encode and decode owner handle
  */
@@ -112,12 +103,6 @@
 }
 
 static __be32 *
-nlm4_encode_oh(__be32 *p, struct xdr_netobj *oh)
-{
-	return xdr_encode_netobj(p, oh);
-}
-
-static __be32 *
 nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
 {
 	struct file_lock	*fl = &lock->fl;
@@ -150,38 +135,6 @@
 }
 
 /*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm4_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
-	struct file_lock	*fl = &lock->fl;
-	__s64			start, len;
-
-	if (!(p = xdr_encode_string(p, lock->caller))
-	 || !(p = nlm4_encode_fh(p, &lock->fh))
-	 || !(p = nlm4_encode_oh(p, &lock->oh)))
-		return NULL;
-
-	if (fl->fl_start > NLM4_OFFSET_MAX
-	 || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
-		return NULL;
-
-	*p++ = htonl(lock->svid);
-
-	start = loff_t_to_s64(fl->fl_start);
-	if (fl->fl_end == OFFSET_MAX)
-		len = 0;
-	else
-		len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
-
-	p = xdr_encode_hyper(p, start);
-	p = xdr_encode_hyper(p, len);
-
-	return p;
-}
-
-/*
  * Encode result of a TEST/TEST_MSG call
  */
 static __be32 *
@@ -379,211 +332,3 @@
 {
 	return xdr_ressize_check(rqstp, p);
 }
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlm4clt_decode_void(struct rpc_rqst *req, __be32 *p, void *ptr)
-{
-	return 0;
-}
-#endif
-
-static int
-nlm4clt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	if (resp->status == nlm_lck_denied) {
-		struct file_lock	*fl = &resp->lock.fl;
-		u32			excl;
-		__u64			start, len;
-		__s64			end;
-
-		memset(&resp->lock, 0, sizeof(resp->lock));
-		locks_init_lock(fl);
-		excl = ntohl(*p++);
-		resp->lock.svid = ntohl(*p++);
-		fl->fl_pid = (pid_t)resp->lock.svid;
-		if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
-			return -EIO;
-
-		fl->fl_flags = FL_POSIX;
-		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
-		p = xdr_decode_hyper(p, &start);
-		p = xdr_decode_hyper(p, &len);
-		end = start + len - 1;
-
-		fl->fl_start = s64_to_loff_t(start);
-		if (len == 0 || end < 0)
-			fl->fl_end = OFFSET_MAX;
-		else
-			fl->fl_end = s64_to_loff_t(end);
-	}
-	return 0;
-}
-
-
-static int
-nlm4clt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	*p++ = argp->reclaim? xdr_one : xdr_zero;
-	*p++ = htonl(argp->state);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
-		return -EIO;
-	*p++ = resp->status;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_encode_testres(p, resp)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
-#  error "NLM host name cannot be larger than NLM's maximum string length!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM4_void_sz		0
-#define NLM4_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM4_caller_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_owner_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_fhandle_sz		1+XDR_QUADLEN(NFS3_FHSIZE)
-#define NLM4_lock_sz		5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz
-#define NLM4_holder_sz		6+NLM4_owner_sz
-
-#define NLM4_testargs_sz	NLM4_cookie_sz+1+NLM4_lock_sz
-#define NLM4_lockargs_sz	NLM4_cookie_sz+4+NLM4_lock_sz
-#define NLM4_cancargs_sz	NLM4_cookie_sz+2+NLM4_lock_sz
-#define NLM4_unlockargs_sz	NLM4_cookie_sz+NLM4_lock_sz
-
-#define NLM4_testres_sz		NLM4_cookie_sz+1+NLM4_holder_sz
-#define NLM4_res_sz		NLM4_cookie_sz+1
-#define NLM4_norep_sz		0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlm4clt_decode_norep	NULL
-
-#define PROC(proc, argtype, restype)					\
-[NLMPROC_##proc] = {							\
-	.p_proc      = NLMPROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nlm4clt_encode_##argtype,		\
-	.p_decode    = (kxdrproc_t) nlm4clt_decode_##restype,		\
-	.p_arglen    = NLM4_##argtype##_sz,				\
-	.p_replen    = NLM4_##restype##_sz,				\
-	.p_statidx   = NLMPROC_##proc,					\
-	.p_name      = #proc,						\
-	}
-
-static struct rpc_procinfo	nlm4_procedures[] = {
-    PROC(TEST,		testargs,	testres),
-    PROC(LOCK,		lockargs,	res),
-    PROC(CANCEL,	cancargs,	res),
-    PROC(UNLOCK,	unlockargs,	res),
-    PROC(GRANTED,	testargs,	res),
-    PROC(TEST_MSG,	testargs,	norep),
-    PROC(LOCK_MSG,	lockargs,	norep),
-    PROC(CANCEL_MSG,	cancargs,	norep),
-    PROC(UNLOCK_MSG,	unlockargs,	norep),
-    PROC(GRANTED_MSG,	testargs,	norep),
-    PROC(TEST_RES,	testres,	norep),
-    PROC(LOCK_RES,	res,		norep),
-    PROC(CANCEL_RES,	res,		norep),
-    PROC(UNLOCK_RES,	res,		norep),
-    PROC(GRANTED_RES,	res,		norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
-    PROC(SHARE,		shareargs,	shareres),
-    PROC(UNSHARE,	shareargs,	shareres),
-    PROC(NM_LOCK,	lockargs,	res),
-    PROC(FREE_ALL,	notify,		void),
-#endif
-};
-
-struct rpc_version	nlm_version4 = {
-	.number		= 4,
-	.nrprocs	= 24,
-	.procs		= nlm4_procedures,
-};
diff --git a/fs/locks.c b/fs/locks.c
index 8729347..0f39982 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -444,15 +444,9 @@
 	fl->fl_file->f_owner.signum = 0;
 }
 
-static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
-{
-	return fl->fl_file == try->fl_file;
-}
-
 static const struct lock_manager_operations lease_manager_ops = {
 	.fl_break = lease_break_callback,
 	.fl_release_private = lease_release_private_callback,
-	.fl_mylease = lease_mylease_callback,
 	.fl_change = lease_modify,
 };
 
@@ -1389,7 +1383,7 @@
 		if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
 			goto out;
 		if ((arg == F_WRLCK)
-		    && ((atomic_read(&dentry->d_count) > 1)
+		    && ((dentry->d_count > 1)
 			|| (atomic_read(&inode->i_count) > 1)))
 			goto out;
 	}
@@ -1405,7 +1399,7 @@
 	for (before = &inode->i_flock;
 			((fl = *before) != NULL) && IS_LEASE(fl);
 			before = &fl->fl_next) {
-		if (lease->fl_lmops->fl_mylease(fl, lease))
+		if (fl->fl_file == filp)
 			my_before = before;
 		else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
 			/*
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 92ca6fb..723bc5b 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -300,7 +300,7 @@
 
 static void bdev_put_device(struct logfs_super *s)
 {
-	close_bdev_exclusive(s->s_bdev, FMODE_READ|FMODE_WRITE);
+	blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
@@ -325,13 +325,14 @@
 {
 	struct block_device *bdev;
 
-	bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type);
+	bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				  type);
 	if (IS_ERR(bdev))
 		return PTR_ERR(bdev);
 
 	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
 		int mtdnr = MINOR(bdev->bd_dev);
-		close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
+		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 		return logfs_get_sb_mtd(p, mtdnr);
 	}
 
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 409dfd6..f9ddf0c 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -555,9 +555,11 @@
 	return __logfs_create(dir, dentry, inode, target, destlen);
 }
 
-static int logfs_permission(struct inode *inode, int mask)
+static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
-	return generic_permission(inode, mask, NULL);
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+	return generic_permission(inode, mask, flags, NULL);
 }
 
 static int logfs_link(struct dentry *old_dentry, struct inode *dir,
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index d8c71ec..03b8c24 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -141,13 +141,20 @@
 	return __logfs_iget(sb, ino);
 }
 
+static void logfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
+}
+
 static void __logfs_destroy_inode(struct inode *inode)
 {
 	struct logfs_inode *li = logfs_inode(inode);
 
 	BUG_ON(li->li_block);
 	list_del(&li->li_freeing_list);
-	kmem_cache_free(logfs_inode_cache, li);
+	call_rcu(&inode->i_rcu, logfs_i_callback);
 }
 
 static void logfs_destroy_inode(struct inode *inode)
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 9344474..a25444ab 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -76,18 +76,6 @@
 EXPORT_SYMBOL(mb_cache_entry_find_next);
 #endif
 
-struct mb_cache {
-	struct list_head		c_cache_list;
-	const char			*c_name;
-	atomic_t			c_entry_count;
-	int				c_max_entries;
-	int				c_bucket_bits;
-	struct kmem_cache		*c_entry_cache;
-	struct list_head		*c_block_hash;
-	struct list_head		*c_index_hash;
-};
-
-
 /*
  * Global data: list of all mbcache's, lru list, and a spinlock for
  * accessing cache data structures on SMP machines. The lru list is
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fb20208..ae0b83f 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -68,9 +68,16 @@
 	return &ei->vfs_inode;
 }
 
+static void minix_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(minix_inode_cachep, minix_i(inode));
+}
+
 static void minix_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(minix_inode_cachep, minix_i(inode));
+	call_rcu(&inode->i_rcu, minix_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index c0d35a3..ce7337d 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -23,8 +23,6 @@
 	struct inode * inode = NULL;
 	ino_t ino;
 
-	dentry->d_op = dir->i_sb->s_root->d_op;
-
 	if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen)
 		return ERR_PTR(-ENAMETOOLONG);
 
diff --git a/fs/mpage.c b/fs/mpage.c
index fd56ca2..d78455a 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -40,7 +40,7 @@
  * status of that page is hard.  See end_buffer_async_read() for the details.
  * There is no point in duplicating all that complexity.
  */
-static void mpage_end_io_read(struct bio *bio, int err)
+static void mpage_end_io(struct bio *bio, int err)
 {
 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -50,44 +50,29 @@
 
 		if (--bvec >= bio->bi_io_vec)
 			prefetchw(&bvec->bv_page->flags);
-
-		if (uptodate) {
-			SetPageUptodate(page);
-		} else {
-			ClearPageUptodate(page);
-			SetPageError(page);
+		if (bio_data_dir(bio) == READ) {
+			if (uptodate) {
+				SetPageUptodate(page);
+			} else {
+				ClearPageUptodate(page);
+				SetPageError(page);
+			}
+			unlock_page(page);
+		} else { /* bio_data_dir(bio) == WRITE */
+			if (!uptodate) {
+				SetPageError(page);
+				if (page->mapping)
+					set_bit(AS_EIO, &page->mapping->flags);
+			}
+			end_page_writeback(page);
 		}
-		unlock_page(page);
-	} while (bvec >= bio->bi_io_vec);
-	bio_put(bio);
-}
-
-static void mpage_end_io_write(struct bio *bio, int err)
-{
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-
-	do {
-		struct page *page = bvec->bv_page;
-
-		if (--bvec >= bio->bi_io_vec)
-			prefetchw(&bvec->bv_page->flags);
-
-		if (!uptodate){
-			SetPageError(page);
-			if (page->mapping)
-				set_bit(AS_EIO, &page->mapping->flags);
-		}
-		end_page_writeback(page);
 	} while (bvec >= bio->bi_io_vec);
 	bio_put(bio);
 }
 
 static struct bio *mpage_bio_submit(int rw, struct bio *bio)
 {
-	bio->bi_end_io = mpage_end_io_read;
-	if (rw == WRITE)
-		bio->bi_end_io = mpage_end_io_write;
+	bio->bi_end_io = mpage_end_io;
 	submit_bio(rw, bio);
 	return NULL;
 }
diff --git a/fs/namei.c b/fs/namei.c
index 4ff7ca5..8df7a78 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -169,8 +169,8 @@
 /*
  * This does basic POSIX ACL permission checking
  */
-static int acl_permission_check(struct inode *inode, int mask,
-		int (*check_acl)(struct inode *inode, int mask))
+static int acl_permission_check(struct inode *inode, int mask, unsigned int flags,
+		int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
 {
 	umode_t			mode = inode->i_mode;
 
@@ -180,7 +180,7 @@
 		mode >>= 6;
 	else {
 		if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) {
-			int error = check_acl(inode, mask);
+			int error = check_acl(inode, mask, flags);
 			if (error != -EAGAIN)
 				return error;
 		}
@@ -198,25 +198,30 @@
 }
 
 /**
- * generic_permission  -  check for access rights on a Posix-like filesystem
+ * generic_permission -  check for access rights on a Posix-like filesystem
  * @inode:	inode to check access rights for
  * @mask:	right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
  * @check_acl:	optional callback to check for Posix ACLs
+ * @flags:	IPERM_FLAG_ flags.
  *
  * Used to check for read/write/execute permissions on a file.
  * We use "fsuid" for this, letting us set arbitrary permissions
  * for filesystem access without changing the "normal" uids which
- * are used for other things..
+ * are used for other things.
+ *
+ * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk
+ * request cannot be satisfied (eg. requires blocking or too much complexity).
+ * It would then be called again in ref-walk mode.
  */
-int generic_permission(struct inode *inode, int mask,
-		int (*check_acl)(struct inode *inode, int mask))
+int generic_permission(struct inode *inode, int mask, unsigned int flags,
+	int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
 {
 	int ret;
 
 	/*
 	 * Do the basic POSIX ACL permission checks.
 	 */
-	ret = acl_permission_check(inode, mask, check_acl);
+	ret = acl_permission_check(inode, mask, flags, check_acl);
 	if (ret != -EACCES)
 		return ret;
 
@@ -271,9 +276,10 @@
 	}
 
 	if (inode->i_op->permission)
-		retval = inode->i_op->permission(inode, mask);
+		retval = inode->i_op->permission(inode, mask, 0);
 	else
-		retval = generic_permission(inode, mask, inode->i_op->check_acl);
+		retval = generic_permission(inode, mask, 0,
+				inode->i_op->check_acl);
 
 	if (retval)
 		return retval;
@@ -362,6 +368,18 @@
 EXPORT_SYMBOL(path_get);
 
 /**
+ * path_get_long - get a long reference to a path
+ * @path: path to get the reference to
+ *
+ * Given a path increment the reference count to the dentry and the vfsmount.
+ */
+void path_get_long(struct path *path)
+{
+	mntget_long(path->mnt);
+	dget(path->dentry);
+}
+
+/**
  * path_put - put a reference to a path
  * @path: path to put the reference to
  *
@@ -375,6 +393,193 @@
 EXPORT_SYMBOL(path_put);
 
 /**
+ * path_put_long - put a long reference to a path
+ * @path: path to put the reference to
+ *
+ * Given a path decrement the reference count to the dentry and the vfsmount.
+ */
+void path_put_long(struct path *path)
+{
+	dput(path->dentry);
+	mntput_long(path->mnt);
+}
+
+/**
+ * nameidata_drop_rcu - drop this nameidata out of rcu-walk
+ * @nd: nameidata pathwalk data to drop
+ * Returns: 0 on success, -ECHILD on failure
+ *
+ * Path walking has 2 modes, rcu-walk and ref-walk (see
+ * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt
+ * to drop out of rcu-walk mode and take normal reference counts on dentries
+ * and vfsmounts to transition to rcu-walk mode. __drop_rcu* functions take
+ * refcounts at the last known good point before rcu-walk got stuck, so
+ * ref-walk may continue from there. If this is not successful (eg. a seqcount
+ * has changed), then failure is returned and path walk restarts from the
+ * beginning in ref-walk mode.
+ *
+ * nameidata_drop_rcu attempts to drop the current nd->path and nd->root into
+ * ref-walk. Must be called from rcu-walk context.
+ */
+static int nameidata_drop_rcu(struct nameidata *nd)
+{
+	struct fs_struct *fs = current->fs;
+	struct dentry *dentry = nd->path.dentry;
+
+	BUG_ON(!(nd->flags & LOOKUP_RCU));
+	if (nd->root.mnt) {
+		spin_lock(&fs->lock);
+		if (nd->root.mnt != fs->root.mnt ||
+				nd->root.dentry != fs->root.dentry)
+			goto err_root;
+	}
+	spin_lock(&dentry->d_lock);
+	if (!__d_rcu_to_refcount(dentry, nd->seq))
+		goto err;
+	BUG_ON(nd->inode != dentry->d_inode);
+	spin_unlock(&dentry->d_lock);
+	if (nd->root.mnt) {
+		path_get(&nd->root);
+		spin_unlock(&fs->lock);
+	}
+	mntget(nd->path.mnt);
+
+	rcu_read_unlock();
+	br_read_unlock(vfsmount_lock);
+	nd->flags &= ~LOOKUP_RCU;
+	return 0;
+err:
+	spin_unlock(&dentry->d_lock);
+err_root:
+	if (nd->root.mnt)
+		spin_unlock(&fs->lock);
+	return -ECHILD;
+}
+
+/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing.  */
+static inline int nameidata_drop_rcu_maybe(struct nameidata *nd)
+{
+	if (nd->flags & LOOKUP_RCU)
+		return nameidata_drop_rcu(nd);
+	return 0;
+}
+
+/**
+ * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk
+ * @nd: nameidata pathwalk data to drop
+ * @dentry: dentry to drop
+ * Returns: 0 on success, -ECHILD on failure
+ *
+ * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root,
+ * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on
+ * @nd. Must be called from rcu-walk context.
+ */
+static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry)
+{
+	struct fs_struct *fs = current->fs;
+	struct dentry *parent = nd->path.dentry;
+
+	/*
+	 * It can be possible to revalidate the dentry that we started
+	 * the path walk with. force_reval_path may also revalidate the
+	 * dentry already committed to the nameidata.
+	 */
+	if (unlikely(parent == dentry))
+		return nameidata_drop_rcu(nd);
+
+	BUG_ON(!(nd->flags & LOOKUP_RCU));
+	if (nd->root.mnt) {
+		spin_lock(&fs->lock);
+		if (nd->root.mnt != fs->root.mnt ||
+				nd->root.dentry != fs->root.dentry)
+			goto err_root;
+	}
+	spin_lock(&parent->d_lock);
+	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+	if (!__d_rcu_to_refcount(dentry, nd->seq))
+		goto err;
+	/*
+	 * If the sequence check on the child dentry passed, then the child has
+	 * not been removed from its parent. This means the parent dentry must
+	 * be valid and able to take a reference at this point.
+	 */
+	BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
+	BUG_ON(!parent->d_count);
+	parent->d_count++;
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&parent->d_lock);
+	if (nd->root.mnt) {
+		path_get(&nd->root);
+		spin_unlock(&fs->lock);
+	}
+	mntget(nd->path.mnt);
+
+	rcu_read_unlock();
+	br_read_unlock(vfsmount_lock);
+	nd->flags &= ~LOOKUP_RCU;
+	return 0;
+err:
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&parent->d_lock);
+err_root:
+	if (nd->root.mnt)
+		spin_unlock(&fs->lock);
+	return -ECHILD;
+}
+
+/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing.  */
+static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry)
+{
+	if (nd->flags & LOOKUP_RCU)
+		return nameidata_dentry_drop_rcu(nd, dentry);
+	return 0;
+}
+
+/**
+ * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk
+ * @nd: nameidata pathwalk data to drop
+ * Returns: 0 on success, -ECHILD on failure
+ *
+ * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk.
+ * nd->path should be the final element of the lookup, so nd->root is discarded.
+ * Must be called from rcu-walk context.
+ */
+static int nameidata_drop_rcu_last(struct nameidata *nd)
+{
+	struct dentry *dentry = nd->path.dentry;
+
+	BUG_ON(!(nd->flags & LOOKUP_RCU));
+	nd->flags &= ~LOOKUP_RCU;
+	nd->root.mnt = NULL;
+	spin_lock(&dentry->d_lock);
+	if (!__d_rcu_to_refcount(dentry, nd->seq))
+		goto err_unlock;
+	BUG_ON(nd->inode != dentry->d_inode);
+	spin_unlock(&dentry->d_lock);
+
+	mntget(nd->path.mnt);
+
+	rcu_read_unlock();
+	br_read_unlock(vfsmount_lock);
+
+	return 0;
+
+err_unlock:
+	spin_unlock(&dentry->d_lock);
+	rcu_read_unlock();
+	br_read_unlock(vfsmount_lock);
+	return -ECHILD;
+}
+
+/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing.  */
+static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd)
+{
+	if (likely(nd->flags & LOOKUP_RCU))
+		return nameidata_drop_rcu_last(nd);
+	return 0;
+}
+
+/**
  * release_open_intent - free up open intent resources
  * @nd: pointer to nameidata
  */
@@ -386,10 +591,33 @@
 		fput(nd->intent.open.file);
 }
 
+/*
+ * Call d_revalidate and handle filesystems that request rcu-walk
+ * to be dropped. This may be called and return in rcu-walk mode,
+ * regardless of success or error. If -ECHILD is returned, the caller
+ * must return -ECHILD back up the path walk stack so path walk may
+ * be restarted in ref-walk mode.
+ */
+static int d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	int status;
+
+	status = dentry->d_op->d_revalidate(dentry, nd);
+	if (status == -ECHILD) {
+		if (nameidata_dentry_drop_rcu(nd, dentry))
+			return status;
+		status = dentry->d_op->d_revalidate(dentry, nd);
+	}
+
+	return status;
+}
+
 static inline struct dentry *
 do_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	int status = dentry->d_op->d_revalidate(dentry, nd);
+	int status;
+
+	status = d_revalidate(dentry, nd);
 	if (unlikely(status <= 0)) {
 		/*
 		 * The dentry failed validation.
@@ -397,19 +625,36 @@
 		 * the dentry otherwise d_revalidate is asking us
 		 * to return a fail status.
 		 */
-		if (!status) {
+		if (status < 0) {
+			/* If we're in rcu-walk, we don't have a ref */
+			if (!(nd->flags & LOOKUP_RCU))
+				dput(dentry);
+			dentry = ERR_PTR(status);
+
+		} else {
+			/* Don't d_invalidate in rcu-walk mode */
+			if (nameidata_dentry_drop_rcu_maybe(nd, dentry))
+				return ERR_PTR(-ECHILD);
 			if (!d_invalidate(dentry)) {
 				dput(dentry);
 				dentry = NULL;
 			}
-		} else {
-			dput(dentry);
-			dentry = ERR_PTR(status);
 		}
 	}
 	return dentry;
 }
 
+static inline int need_reval_dot(struct dentry *dentry)
+{
+	if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
+		return 0;
+
+	if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)))
+		return 0;
+
+	return 1;
+}
+
 /*
  * force_reval_path - force revalidation of a dentry
  *
@@ -433,17 +678,19 @@
 
 	/*
 	 * only check on filesystems where it's possible for the dentry to
-	 * become stale. It's assumed that if this flag is set then the
-	 * d_revalidate op will also be defined.
+	 * become stale.
 	 */
-	if (!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT))
+	if (!need_reval_dot(dentry))
 		return 0;
 
-	status = dentry->d_op->d_revalidate(dentry, nd);
+	status = d_revalidate(dentry, nd);
 	if (status > 0)
 		return 0;
 
 	if (!status) {
+		/* Don't d_invalidate in rcu-walk mode */
+		if (nameidata_drop_rcu(nd))
+			return -ECHILD;
 		d_invalidate(dentry);
 		status = -ESTALE;
 	}
@@ -459,26 +706,27 @@
  * short-cut DAC fails, then call ->permission() to do more
  * complete permission check.
  */
-static int exec_permission(struct inode *inode)
+static inline int exec_permission(struct inode *inode, unsigned int flags)
 {
 	int ret;
 
 	if (inode->i_op->permission) {
-		ret = inode->i_op->permission(inode, MAY_EXEC);
-		if (!ret)
-			goto ok;
-		return ret;
+		ret = inode->i_op->permission(inode, MAY_EXEC, flags);
+	} else {
+		ret = acl_permission_check(inode, MAY_EXEC, flags,
+				inode->i_op->check_acl);
 	}
-	ret = acl_permission_check(inode, MAY_EXEC, inode->i_op->check_acl);
-	if (!ret)
+	if (likely(!ret))
 		goto ok;
+	if (ret == -ECHILD)
+		return ret;
 
 	if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
 		goto ok;
 
 	return ret;
 ok:
-	return security_inode_permission(inode, MAY_EXEC);
+	return security_inode_exec_permission(inode, flags);
 }
 
 static __always_inline void set_root(struct nameidata *nd)
@@ -489,8 +737,23 @@
 
 static int link_path_walk(const char *, struct nameidata *);
 
+static __always_inline void set_root_rcu(struct nameidata *nd)
+{
+	if (!nd->root.mnt) {
+		struct fs_struct *fs = current->fs;
+		unsigned seq;
+
+		do {
+			seq = read_seqcount_begin(&fs->seq);
+			nd->root = fs->root;
+		} while (read_seqcount_retry(&fs->seq, seq));
+	}
+}
+
 static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
 {
+	int ret;
+
 	if (IS_ERR(link))
 		goto fail;
 
@@ -500,8 +763,10 @@
 		nd->path = nd->root;
 		path_get(&nd->root);
 	}
+	nd->inode = nd->path.dentry->d_inode;
 
-	return link_path_walk(link, nd);
+	ret = link_path_walk(link, nd);
+	return ret;
 fail:
 	path_put(&nd->path);
 	return PTR_ERR(link);
@@ -514,30 +779,34 @@
 		mntput(path->mnt);
 }
 
-static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
+static inline void path_to_nameidata(const struct path *path,
+					struct nameidata *nd)
 {
-	dput(nd->path.dentry);
-	if (nd->path.mnt != path->mnt) {
-		mntput(nd->path.mnt);
-		nd->path.mnt = path->mnt;
+	if (!(nd->flags & LOOKUP_RCU)) {
+		dput(nd->path.dentry);
+		if (nd->path.mnt != path->mnt)
+			mntput(nd->path.mnt);
 	}
+	nd->path.mnt = path->mnt;
 	nd->path.dentry = path->dentry;
 }
 
 static __always_inline int
-__do_follow_link(struct path *path, struct nameidata *nd, void **p)
+__do_follow_link(const struct path *link, struct nameidata *nd, void **p)
 {
 	int error;
-	struct dentry *dentry = path->dentry;
+	struct dentry *dentry = link->dentry;
 
-	touch_atime(path->mnt, dentry);
+	touch_atime(link->mnt, dentry);
 	nd_set_link(nd, NULL);
 
-	if (path->mnt != nd->path.mnt) {
-		path_to_nameidata(path, nd);
+	if (link->mnt != nd->path.mnt) {
+		path_to_nameidata(link, nd);
+		nd->inode = nd->path.dentry->d_inode;
 		dget(dentry);
 	}
-	mntget(path->mnt);
+	mntget(link->mnt);
+
 	nd->last_type = LAST_BIND;
 	*p = dentry->d_inode->i_op->follow_link(dentry, nd);
 	error = PTR_ERR(*p);
@@ -591,6 +860,20 @@
 	return err;
 }
 
+static int follow_up_rcu(struct path *path)
+{
+	struct vfsmount *parent;
+	struct dentry *mountpoint;
+
+	parent = path->mnt->mnt_parent;
+	if (parent == path->mnt)
+		return 0;
+	mountpoint = path->mnt->mnt_mountpoint;
+	path->dentry = mountpoint;
+	path->mnt = parent;
+	return 1;
+}
+
 int follow_up(struct path *path)
 {
 	struct vfsmount *parent;
@@ -612,9 +895,24 @@
 	return 1;
 }
 
-/* no need for dcache_lock, as serialization is taken care in
- * namespace.c
+/*
+ * serialization is taken care of in namespace.c
  */
+static void __follow_mount_rcu(struct nameidata *nd, struct path *path,
+				struct inode **inode)
+{
+	while (d_mountpoint(path->dentry)) {
+		struct vfsmount *mounted;
+		mounted = __lookup_mnt(path->mnt, path->dentry, 1);
+		if (!mounted)
+			return;
+		path->mnt = mounted;
+		path->dentry = mounted->mnt_root;
+		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+		*inode = path->dentry->d_inode;
+	}
+}
+
 static int __follow_mount(struct path *path)
 {
 	int res = 0;
@@ -645,9 +943,6 @@
 	}
 }
 
-/* no need for dcache_lock, as serialization is taken care in
- * namespace.c
- */
 int follow_down(struct path *path)
 {
 	struct vfsmount *mounted;
@@ -663,7 +958,42 @@
 	return 0;
 }
 
-static __always_inline void follow_dotdot(struct nameidata *nd)
+static int follow_dotdot_rcu(struct nameidata *nd)
+{
+	struct inode *inode = nd->inode;
+
+	set_root_rcu(nd);
+
+	while(1) {
+		if (nd->path.dentry == nd->root.dentry &&
+		    nd->path.mnt == nd->root.mnt) {
+			break;
+		}
+		if (nd->path.dentry != nd->path.mnt->mnt_root) {
+			struct dentry *old = nd->path.dentry;
+			struct dentry *parent = old->d_parent;
+			unsigned seq;
+
+			seq = read_seqcount_begin(&parent->d_seq);
+			if (read_seqcount_retry(&old->d_seq, nd->seq))
+				return -ECHILD;
+			inode = parent->d_inode;
+			nd->path.dentry = parent;
+			nd->seq = seq;
+			break;
+		}
+		if (!follow_up_rcu(&nd->path))
+			break;
+		nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+		inode = nd->path.dentry->d_inode;
+	}
+	__follow_mount_rcu(nd, &nd->path, &inode);
+	nd->inode = inode;
+
+	return 0;
+}
+
+static void follow_dotdot(struct nameidata *nd)
 {
 	set_root(nd);
 
@@ -684,6 +1014,7 @@
 			break;
 	}
 	follow_mount(&nd->path);
+	nd->inode = nd->path.dentry->d_inode;
 }
 
 /*
@@ -721,17 +1052,17 @@
  *  It _is_ time-critical.
  */
 static int do_lookup(struct nameidata *nd, struct qstr *name,
-		     struct path *path)
+			struct path *path, struct inode **inode)
 {
 	struct vfsmount *mnt = nd->path.mnt;
-	struct dentry *dentry, *parent;
+	struct dentry *dentry, *parent = nd->path.dentry;
 	struct inode *dir;
 	/*
 	 * See if the low-level filesystem might want
 	 * to use its own hash..
 	 */
-	if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
-		int err = nd->path.dentry->d_op->d_hash(nd->path.dentry, name);
+	if (unlikely(parent->d_flags & DCACHE_OP_HASH)) {
+		int err = parent->d_op->d_hash(parent, nd->inode, name);
 		if (err < 0)
 			return err;
 	}
@@ -741,21 +1072,44 @@
 	 * of a false negative due to a concurrent rename, we're going to
 	 * do the non-racy lookup, below.
 	 */
-	dentry = __d_lookup(nd->path.dentry, name);
-	if (!dentry)
-		goto need_lookup;
+	if (nd->flags & LOOKUP_RCU) {
+		unsigned seq;
+
+		*inode = nd->inode;
+		dentry = __d_lookup_rcu(parent, name, &seq, inode);
+		if (!dentry) {
+			if (nameidata_drop_rcu(nd))
+				return -ECHILD;
+			goto need_lookup;
+		}
+		/* Memory barrier in read_seqcount_begin of child is enough */
+		if (__read_seqcount_retry(&parent->d_seq, nd->seq))
+			return -ECHILD;
+
+		nd->seq = seq;
+		if (dentry->d_flags & DCACHE_OP_REVALIDATE)
+			goto need_revalidate;
+		path->mnt = mnt;
+		path->dentry = dentry;
+		__follow_mount_rcu(nd, path, inode);
+	} else {
+		dentry = __d_lookup(parent, name);
+		if (!dentry)
+			goto need_lookup;
 found:
-	if (dentry->d_op && dentry->d_op->d_revalidate)
-		goto need_revalidate;
+		if (dentry->d_flags & DCACHE_OP_REVALIDATE)
+			goto need_revalidate;
 done:
-	path->mnt = mnt;
-	path->dentry = dentry;
-	__follow_mount(path);
+		path->mnt = mnt;
+		path->dentry = dentry;
+		__follow_mount(path);
+		*inode = path->dentry->d_inode;
+	}
 	return 0;
 
 need_lookup:
-	parent = nd->path.dentry;
 	dir = parent->d_inode;
+	BUG_ON(nd->inode != dir);
 
 	mutex_lock(&dir->i_mutex);
 	/*
@@ -817,7 +1171,6 @@
 static int link_path_walk(const char *name, struct nameidata *nd)
 {
 	struct path next;
-	struct inode *inode;
 	int err;
 	unsigned int lookup_flags = nd->flags;
 	
@@ -826,18 +1179,28 @@
 	if (!*name)
 		goto return_reval;
 
-	inode = nd->path.dentry->d_inode;
 	if (nd->depth)
 		lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
 
 	/* At this point we know we have a real path component. */
 	for(;;) {
+		struct inode *inode;
 		unsigned long hash;
 		struct qstr this;
 		unsigned int c;
 
 		nd->flags |= LOOKUP_CONTINUE;
-		err = exec_permission(inode);
+		if (nd->flags & LOOKUP_RCU) {
+			err = exec_permission(nd->inode, IPERM_FLAG_RCU);
+			if (err == -ECHILD) {
+				if (nameidata_drop_rcu(nd))
+					return -ECHILD;
+				goto exec_again;
+			}
+		} else {
+exec_again:
+			err = exec_permission(nd->inode, 0);
+		}
  		if (err)
 			break;
 
@@ -868,37 +1231,44 @@
 		if (this.name[0] == '.') switch (this.len) {
 			default:
 				break;
-			case 2:	
+			case 2:
 				if (this.name[1] != '.')
 					break;
-				follow_dotdot(nd);
-				inode = nd->path.dentry->d_inode;
+				if (nd->flags & LOOKUP_RCU) {
+					if (follow_dotdot_rcu(nd))
+						return -ECHILD;
+				} else
+					follow_dotdot(nd);
 				/* fallthrough */
 			case 1:
 				continue;
 		}
 		/* This does the actual lookups.. */
-		err = do_lookup(nd, &this, &next);
+		err = do_lookup(nd, &this, &next, &inode);
 		if (err)
 			break;
-
 		err = -ENOENT;
-		inode = next.dentry->d_inode;
 		if (!inode)
 			goto out_dput;
 
 		if (inode->i_op->follow_link) {
+			/* We commonly drop rcu-walk here */
+			if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
+				return -ECHILD;
+			BUG_ON(inode != next.dentry->d_inode);
 			err = do_follow_link(&next, nd);
 			if (err)
 				goto return_err;
+			nd->inode = nd->path.dentry->d_inode;
 			err = -ENOENT;
-			inode = nd->path.dentry->d_inode;
-			if (!inode)
+			if (!nd->inode)
 				break;
-		} else
+		} else {
 			path_to_nameidata(&next, nd);
+			nd->inode = inode;
+		}
 		err = -ENOTDIR; 
-		if (!inode->i_op->lookup)
+		if (!nd->inode->i_op->lookup)
 			break;
 		continue;
 		/* here ends the main loop */
@@ -913,32 +1283,39 @@
 		if (this.name[0] == '.') switch (this.len) {
 			default:
 				break;
-			case 2:	
+			case 2:
 				if (this.name[1] != '.')
 					break;
-				follow_dotdot(nd);
-				inode = nd->path.dentry->d_inode;
+				if (nd->flags & LOOKUP_RCU) {
+					if (follow_dotdot_rcu(nd))
+						return -ECHILD;
+				} else
+					follow_dotdot(nd);
 				/* fallthrough */
 			case 1:
 				goto return_reval;
 		}
-		err = do_lookup(nd, &this, &next);
+		err = do_lookup(nd, &this, &next, &inode);
 		if (err)
 			break;
-		inode = next.dentry->d_inode;
 		if (follow_on_final(inode, lookup_flags)) {
+			if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
+				return -ECHILD;
+			BUG_ON(inode != next.dentry->d_inode);
 			err = do_follow_link(&next, nd);
 			if (err)
 				goto return_err;
-			inode = nd->path.dentry->d_inode;
-		} else
+			nd->inode = nd->path.dentry->d_inode;
+		} else {
 			path_to_nameidata(&next, nd);
+			nd->inode = inode;
+		}
 		err = -ENOENT;
-		if (!inode)
+		if (!nd->inode)
 			break;
 		if (lookup_flags & LOOKUP_DIRECTORY) {
 			err = -ENOTDIR; 
-			if (!inode->i_op->lookup)
+			if (!nd->inode->i_op->lookup)
 				break;
 		}
 		goto return_base;
@@ -958,25 +1335,43 @@
 		 * We bypassed the ordinary revalidation routines.
 		 * We may need to check the cached dentry for staleness.
 		 */
-		if (nd->path.dentry && nd->path.dentry->d_sb &&
-		    (nd->path.dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) {
-			err = -ESTALE;
+		if (need_reval_dot(nd->path.dentry)) {
 			/* Note: we do not d_invalidate() */
-			if (!nd->path.dentry->d_op->d_revalidate(
-					nd->path.dentry, nd))
+			err = d_revalidate(nd->path.dentry, nd);
+			if (!err)
+				err = -ESTALE;
+			if (err < 0)
 				break;
 		}
 return_base:
+		if (nameidata_drop_rcu_last_maybe(nd))
+			return -ECHILD;
 		return 0;
 out_dput:
-		path_put_conditional(&next, nd);
+		if (!(nd->flags & LOOKUP_RCU))
+			path_put_conditional(&next, nd);
 		break;
 	}
-	path_put(&nd->path);
+	if (!(nd->flags & LOOKUP_RCU))
+		path_put(&nd->path);
 return_err:
 	return err;
 }
 
+static inline int path_walk_rcu(const char *name, struct nameidata *nd)
+{
+	current->total_link_count = 0;
+
+	return link_path_walk(name, nd);
+}
+
+static inline int path_walk_simple(const char *name, struct nameidata *nd)
+{
+	current->total_link_count = 0;
+
+	return link_path_walk(name, nd);
+}
+
 static int path_walk(const char *name, struct nameidata *nd)
 {
 	struct path save = nd->path;
@@ -1002,6 +1397,93 @@
 	return result;
 }
 
+static void path_finish_rcu(struct nameidata *nd)
+{
+	if (nd->flags & LOOKUP_RCU) {
+		/* RCU dangling. Cancel it. */
+		nd->flags &= ~LOOKUP_RCU;
+		nd->root.mnt = NULL;
+		rcu_read_unlock();
+		br_read_unlock(vfsmount_lock);
+	}
+	if (nd->file)
+		fput(nd->file);
+}
+
+static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
+{
+	int retval = 0;
+	int fput_needed;
+	struct file *file;
+
+	nd->last_type = LAST_ROOT; /* if there are only slashes... */
+	nd->flags = flags | LOOKUP_RCU;
+	nd->depth = 0;
+	nd->root.mnt = NULL;
+	nd->file = NULL;
+
+	if (*name=='/') {
+		struct fs_struct *fs = current->fs;
+		unsigned seq;
+
+		br_read_lock(vfsmount_lock);
+		rcu_read_lock();
+
+		do {
+			seq = read_seqcount_begin(&fs->seq);
+			nd->root = fs->root;
+			nd->path = nd->root;
+			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+		} while (read_seqcount_retry(&fs->seq, seq));
+
+	} else if (dfd == AT_FDCWD) {
+		struct fs_struct *fs = current->fs;
+		unsigned seq;
+
+		br_read_lock(vfsmount_lock);
+		rcu_read_lock();
+
+		do {
+			seq = read_seqcount_begin(&fs->seq);
+			nd->path = fs->pwd;
+			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+		} while (read_seqcount_retry(&fs->seq, seq));
+
+	} else {
+		struct dentry *dentry;
+
+		file = fget_light(dfd, &fput_needed);
+		retval = -EBADF;
+		if (!file)
+			goto out_fail;
+
+		dentry = file->f_path.dentry;
+
+		retval = -ENOTDIR;
+		if (!S_ISDIR(dentry->d_inode->i_mode))
+			goto fput_fail;
+
+		retval = file_permission(file, MAY_EXEC);
+		if (retval)
+			goto fput_fail;
+
+		nd->path = file->f_path;
+		if (fput_needed)
+			nd->file = file;
+
+		nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+		br_read_lock(vfsmount_lock);
+		rcu_read_lock();
+	}
+	nd->inode = nd->path.dentry->d_inode;
+	return 0;
+
+fput_fail:
+	fput_light(file, fput_needed);
+out_fail:
+	return retval;
+}
+
 static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
 {
 	int retval = 0;
@@ -1042,6 +1524,7 @@
 
 		fput_light(file, fput_needed);
 	}
+	nd->inode = nd->path.dentry->d_inode;
 	return 0;
 
 fput_fail:
@@ -1054,16 +1537,53 @@
 static int do_path_lookup(int dfd, const char *name,
 				unsigned int flags, struct nameidata *nd)
 {
-	int retval = path_init(dfd, name, flags, nd);
-	if (!retval)
-		retval = path_walk(name, nd);
-	if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
-				nd->path.dentry->d_inode))
-		audit_inode(name, nd->path.dentry);
+	int retval;
+
+	/*
+	 * Path walking is largely split up into 2 different synchronisation
+	 * schemes, rcu-walk and ref-walk (explained in
+	 * Documentation/filesystems/path-lookup.txt). These share much of the
+	 * path walk code, but some things particularly setup, cleanup, and
+	 * following mounts are sufficiently divergent that functions are
+	 * duplicated. Typically there is a function foo(), and its RCU
+	 * analogue, foo_rcu().
+	 *
+	 * -ECHILD is the error number of choice (just to avoid clashes) that
+	 * is returned if some aspect of an rcu-walk fails. Such an error must
+	 * be handled by restarting a traditional ref-walk (which will always
+	 * be able to complete).
+	 */
+	retval = path_init_rcu(dfd, name, flags, nd);
+	if (unlikely(retval))
+		return retval;
+	retval = path_walk_rcu(name, nd);
+	path_finish_rcu(nd);
 	if (nd->root.mnt) {
 		path_put(&nd->root);
 		nd->root.mnt = NULL;
 	}
+
+	if (unlikely(retval == -ECHILD || retval == -ESTALE)) {
+		/* slower, locked walk */
+		if (retval == -ESTALE)
+			flags |= LOOKUP_REVAL;
+		retval = path_init(dfd, name, flags, nd);
+		if (unlikely(retval))
+			return retval;
+		retval = path_walk(name, nd);
+		if (nd->root.mnt) {
+			path_put(&nd->root);
+			nd->root.mnt = NULL;
+		}
+	}
+
+	if (likely(!retval)) {
+		if (unlikely(!audit_dummy_context())) {
+			if (nd->path.dentry && nd->inode)
+				audit_inode(name, nd->path.dentry);
+		}
+	}
+
 	return retval;
 }
 
@@ -1106,10 +1626,11 @@
 	path_get(&nd->path);
 	nd->root = nd->path;
 	path_get(&nd->root);
+	nd->inode = nd->path.dentry->d_inode;
 
 	retval = path_walk(name, nd);
 	if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
-				nd->path.dentry->d_inode))
+				nd->inode))
 		audit_inode(name, nd->path.dentry);
 
 	path_put(&nd->root);
@@ -1125,7 +1646,7 @@
 	struct dentry *dentry;
 	int err;
 
-	err = exec_permission(inode);
+	err = exec_permission(inode, 0);
 	if (err)
 		return ERR_PTR(err);
 
@@ -1133,8 +1654,8 @@
 	 * See if the low-level filesystem might want
 	 * to use its own hash..
 	 */
-	if (base->d_op && base->d_op->d_hash) {
-		err = base->d_op->d_hash(base, name);
+	if (base->d_flags & DCACHE_OP_HASH) {
+		err = base->d_op->d_hash(base, inode, name);
 		dentry = ERR_PTR(err);
 		if (err < 0)
 			goto out;
@@ -1147,7 +1668,7 @@
 	 */
 	dentry = d_lookup(base, name);
 
-	if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
+	if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE))
 		dentry = do_revalidate(dentry, nd);
 
 	if (!dentry)
@@ -1448,8 +1969,9 @@
 	return break_lease(inode, flag);
 }
 
-static int handle_truncate(struct path *path)
+static int handle_truncate(struct file *filp)
 {
+	struct path *path = &filp->f_path;
 	struct inode *inode = path->dentry->d_inode;
 	int error = get_write_access(inode);
 	if (error)
@@ -1463,7 +1985,7 @@
 	if (!error) {
 		error = do_truncate(path->dentry, 0,
 				    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
-				    NULL);
+				    filp);
 	}
 	put_write_access(inode);
 	return error;
@@ -1490,6 +2012,7 @@
 	mutex_unlock(&dir->d_inode->i_mutex);
 	dput(nd->path.dentry);
 	nd->path.dentry = path->dentry;
+
 	if (error)
 		return error;
 	/* Don't check for write permission, don't truncate */
@@ -1560,7 +2083,7 @@
 	}
 	if (!IS_ERR(filp)) {
 		if (will_truncate) {
-			error = handle_truncate(&nd->path);
+			error = handle_truncate(filp);
 			if (error) {
 				fput(filp);
 				filp = ERR_PTR(error);
@@ -1584,6 +2107,9 @@
 	return ERR_PTR(error);
 }
 
+/*
+ * Handle O_CREAT case for do_filp_open
+ */
 static struct file *do_last(struct nameidata *nd, struct path *path,
 			    int open_flag, int acc_mode,
 			    int mode, const char *pathname)
@@ -1597,50 +2123,27 @@
 		follow_dotdot(nd);
 		dir = nd->path.dentry;
 	case LAST_DOT:
-		if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
-			if (!dir->d_op->d_revalidate(dir, nd)) {
-				error = -ESTALE;
+		if (need_reval_dot(dir)) {
+			int status = d_revalidate(nd->path.dentry, nd);
+			if (!status)
+				status = -ESTALE;
+			if (status < 0) {
+				error = status;
 				goto exit;
 			}
 		}
 		/* fallthrough */
 	case LAST_ROOT:
-		if (open_flag & O_CREAT)
-			goto exit;
-		/* fallthrough */
+		goto exit;
 	case LAST_BIND:
 		audit_inode(pathname, dir);
 		goto ok;
 	}
 
 	/* trailing slashes? */
-	if (nd->last.name[nd->last.len]) {
-		if (open_flag & O_CREAT)
-			goto exit;
-		nd->flags |= LOOKUP_DIRECTORY | LOOKUP_FOLLOW;
-	}
+	if (nd->last.name[nd->last.len])
+		goto exit;
 
-	/* just plain open? */
-	if (!(open_flag & O_CREAT)) {
-		error = do_lookup(nd, &nd->last, path);
-		if (error)
-			goto exit;
-		error = -ENOENT;
-		if (!path->dentry->d_inode)
-			goto exit_dput;
-		if (path->dentry->d_inode->i_op->follow_link)
-			return NULL;
-		error = -ENOTDIR;
-		if (nd->flags & LOOKUP_DIRECTORY) {
-			if (!path->dentry->d_inode->i_op->lookup)
-				goto exit_dput;
-		}
-		path_to_nameidata(path, nd);
-		audit_inode(pathname, nd->path.dentry);
-		goto ok;
-	}
-
-	/* OK, it's O_CREAT */
 	mutex_lock(&dir->d_inode->i_mutex);
 
 	path->dentry = lookup_hash(nd);
@@ -1711,8 +2214,9 @@
 		return NULL;
 
 	path_to_nameidata(path, nd);
+	nd->inode = path->dentry->d_inode;
 	error = -EISDIR;
-	if (S_ISDIR(path->dentry->d_inode->i_mode))
+	if (S_ISDIR(nd->inode->i_mode))
 		goto exit;
 ok:
 	filp = finish_open(nd, open_flag, acc_mode);
@@ -1743,7 +2247,7 @@
 	struct path path;
 	int count = 0;
 	int flag = open_to_namei_flags(open_flag);
-	int force_reval = 0;
+	int flags;
 
 	if (!(open_flag & O_CREAT))
 		mode = 0;
@@ -1772,54 +2276,85 @@
 	if (open_flag & O_APPEND)
 		acc_mode |= MAY_APPEND;
 
-	/* find the parent */
-reval:
-	error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
-	if (error)
-		return ERR_PTR(error);
-	if (force_reval)
-		nd.flags |= LOOKUP_REVAL;
-
-	current->total_link_count = 0;
-	error = link_path_walk(pathname, &nd);
-	if (error) {
-		filp = ERR_PTR(error);
-		goto out;
+	flags = LOOKUP_OPEN;
+	if (open_flag & O_CREAT) {
+		flags |= LOOKUP_CREATE;
+		if (open_flag & O_EXCL)
+			flags |= LOOKUP_EXCL;
 	}
-	if (unlikely(!audit_dummy_context()) && (open_flag & O_CREAT))
+	if (open_flag & O_DIRECTORY)
+		flags |= LOOKUP_DIRECTORY;
+	if (!(open_flag & O_NOFOLLOW))
+		flags |= LOOKUP_FOLLOW;
+
+	filp = get_empty_filp();
+	if (!filp)
+		return ERR_PTR(-ENFILE);
+
+	filp->f_flags = open_flag;
+	nd.intent.open.file = filp;
+	nd.intent.open.flags = flag;
+	nd.intent.open.create_mode = mode;
+
+	if (open_flag & O_CREAT)
+		goto creat;
+
+	/* !O_CREAT, simple open */
+	error = do_path_lookup(dfd, pathname, flags, &nd);
+	if (unlikely(error))
+		goto out_filp;
+	error = -ELOOP;
+	if (!(nd.flags & LOOKUP_FOLLOW)) {
+		if (nd.inode->i_op->follow_link)
+			goto out_path;
+	}
+	error = -ENOTDIR;
+	if (nd.flags & LOOKUP_DIRECTORY) {
+		if (!nd.inode->i_op->lookup)
+			goto out_path;
+	}
+	audit_inode(pathname, nd.path.dentry);
+	filp = finish_open(&nd, open_flag, acc_mode);
+	return filp;
+
+creat:
+	/* OK, have to create the file. Find the parent. */
+	error = path_init_rcu(dfd, pathname,
+			LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd);
+	if (error)
+		goto out_filp;
+	error = path_walk_rcu(pathname, &nd);
+	path_finish_rcu(&nd);
+	if (unlikely(error == -ECHILD || error == -ESTALE)) {
+		/* slower, locked walk */
+		if (error == -ESTALE) {
+reval:
+			flags |= LOOKUP_REVAL;
+		}
+		error = path_init(dfd, pathname,
+				LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd);
+		if (error)
+			goto out_filp;
+
+		error = path_walk_simple(pathname, &nd);
+	}
+	if (unlikely(error))
+		goto out_filp;
+	if (unlikely(!audit_dummy_context()))
 		audit_inode(pathname, nd.path.dentry);
 
 	/*
 	 * We have the parent and last component.
 	 */
-
-	error = -ENFILE;
-	filp = get_empty_filp();
-	if (filp == NULL)
-		goto exit_parent;
-	nd.intent.open.file = filp;
-	filp->f_flags = open_flag;
-	nd.intent.open.flags = flag;
-	nd.intent.open.create_mode = mode;
-	nd.flags &= ~LOOKUP_PARENT;
-	nd.flags |= LOOKUP_OPEN;
-	if (open_flag & O_CREAT) {
-		nd.flags |= LOOKUP_CREATE;
-		if (open_flag & O_EXCL)
-			nd.flags |= LOOKUP_EXCL;
-	}
-	if (open_flag & O_DIRECTORY)
-		nd.flags |= LOOKUP_DIRECTORY;
-	if (!(open_flag & O_NOFOLLOW))
-		nd.flags |= LOOKUP_FOLLOW;
+	nd.flags = flags;
 	filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
 	while (unlikely(!filp)) { /* trailing symlink */
-		struct path holder;
-		struct inode *inode = path.dentry->d_inode;
+		struct path link = path;
+		struct inode *linki = link.dentry->d_inode;
 		void *cookie;
 		error = -ELOOP;
 		/* S_ISDIR part is a temporary automount kludge */
-		if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(inode->i_mode))
+		if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(linki->i_mode))
 			goto exit_dput;
 		if (count++ == 32)
 			goto exit_dput;
@@ -1835,41 +2370,37 @@
 		 * just set LAST_BIND.
 		 */
 		nd.flags |= LOOKUP_PARENT;
-		error = security_inode_follow_link(path.dentry, &nd);
+		error = security_inode_follow_link(link.dentry, &nd);
 		if (error)
 			goto exit_dput;
-		error = __do_follow_link(&path, &nd, &cookie);
+		error = __do_follow_link(&link, &nd, &cookie);
 		if (unlikely(error)) {
+			if (!IS_ERR(cookie) && linki->i_op->put_link)
+				linki->i_op->put_link(link.dentry, &nd, cookie);
 			/* nd.path had been dropped */
-			if (!IS_ERR(cookie) && inode->i_op->put_link)
-				inode->i_op->put_link(path.dentry, &nd, cookie);
-			path_put(&path);
-			release_open_intent(&nd);
-			filp = ERR_PTR(error);
-			goto out;
+			nd.path = link;
+			goto out_path;
 		}
-		holder = path;
 		nd.flags &= ~LOOKUP_PARENT;
 		filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
-		if (inode->i_op->put_link)
-			inode->i_op->put_link(holder.dentry, &nd, cookie);
-		path_put(&holder);
+		if (linki->i_op->put_link)
+			linki->i_op->put_link(link.dentry, &nd, cookie);
+		path_put(&link);
 	}
 out:
 	if (nd.root.mnt)
 		path_put(&nd.root);
-	if (filp == ERR_PTR(-ESTALE) && !force_reval) {
-		force_reval = 1;
+	if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL))
 		goto reval;
-	}
 	return filp;
 
 exit_dput:
 	path_put_conditional(&path, &nd);
+out_path:
+	path_put(&nd.path);
+out_filp:
 	if (!IS_ERR(nd.intent.open.file))
 		release_open_intent(&nd);
-exit_parent:
-	path_put(&nd.path);
 	filp = ERR_PTR(error);
 	goto out;
 }
@@ -2130,12 +2661,10 @@
 {
 	dget(dentry);
 	shrink_dcache_parent(dentry);
-	spin_lock(&dcache_lock);
 	spin_lock(&dentry->d_lock);
-	if (atomic_read(&dentry->d_count) == 2)
+	if (dentry->d_count == 2)
 		__d_drop(dentry);
 	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
 }
 
 int vfs_rmdir(struct inode *dir, struct dentry *dentry)
diff --git a/fs/namespace.c b/fs/namespace.c
index 3dbfc07..3ddfd90 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -138,6 +138,64 @@
 	mnt->mnt_group_id = 0;
 }
 
+/*
+ * vfsmount lock must be held for read
+ */
+static inline void mnt_add_count(struct vfsmount *mnt, int n)
+{
+#ifdef CONFIG_SMP
+	this_cpu_add(mnt->mnt_pcp->mnt_count, n);
+#else
+	preempt_disable();
+	mnt->mnt_count += n;
+	preempt_enable();
+#endif
+}
+
+static inline void mnt_set_count(struct vfsmount *mnt, int n)
+{
+#ifdef CONFIG_SMP
+	this_cpu_write(mnt->mnt_pcp->mnt_count, n);
+#else
+	mnt->mnt_count = n;
+#endif
+}
+
+/*
+ * vfsmount lock must be held for read
+ */
+static inline void mnt_inc_count(struct vfsmount *mnt)
+{
+	mnt_add_count(mnt, 1);
+}
+
+/*
+ * vfsmount lock must be held for read
+ */
+static inline void mnt_dec_count(struct vfsmount *mnt)
+{
+	mnt_add_count(mnt, -1);
+}
+
+/*
+ * vfsmount lock must be held for write
+ */
+unsigned int mnt_get_count(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	unsigned int count = atomic_read(&mnt->mnt_longrefs);
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
+	}
+
+	return count;
+#else
+	return mnt->mnt_count;
+#endif
+}
+
 struct vfsmount *alloc_vfsmnt(const char *name)
 {
 	struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
@@ -154,7 +212,17 @@
 				goto out_free_id;
 		}
 
-		atomic_set(&mnt->mnt_count, 1);
+#ifdef CONFIG_SMP
+		mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
+		if (!mnt->mnt_pcp)
+			goto out_free_devname;
+
+		atomic_set(&mnt->mnt_longrefs, 1);
+#else
+		mnt->mnt_count = 1;
+		mnt->mnt_writers = 0;
+#endif
+
 		INIT_LIST_HEAD(&mnt->mnt_hash);
 		INIT_LIST_HEAD(&mnt->mnt_child);
 		INIT_LIST_HEAD(&mnt->mnt_mounts);
@@ -166,13 +234,6 @@
 #ifdef CONFIG_FSNOTIFY
 		INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
 #endif
-#ifdef CONFIG_SMP
-		mnt->mnt_writers = alloc_percpu(int);
-		if (!mnt->mnt_writers)
-			goto out_free_devname;
-#else
-		mnt->mnt_writers = 0;
-#endif
 	}
 	return mnt;
 
@@ -216,32 +277,32 @@
 }
 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
 
-static inline void inc_mnt_writers(struct vfsmount *mnt)
+static inline void mnt_inc_writers(struct vfsmount *mnt)
 {
 #ifdef CONFIG_SMP
-	(*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))++;
+	this_cpu_inc(mnt->mnt_pcp->mnt_writers);
 #else
 	mnt->mnt_writers++;
 #endif
 }
 
-static inline void dec_mnt_writers(struct vfsmount *mnt)
+static inline void mnt_dec_writers(struct vfsmount *mnt)
 {
 #ifdef CONFIG_SMP
-	(*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))--;
+	this_cpu_dec(mnt->mnt_pcp->mnt_writers);
 #else
 	mnt->mnt_writers--;
 #endif
 }
 
-static unsigned int count_mnt_writers(struct vfsmount *mnt)
+static unsigned int mnt_get_writers(struct vfsmount *mnt)
 {
 #ifdef CONFIG_SMP
 	unsigned int count = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		count += *per_cpu_ptr(mnt->mnt_writers, cpu);
+		count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
 	}
 
 	return count;
@@ -273,9 +334,9 @@
 	int ret = 0;
 
 	preempt_disable();
-	inc_mnt_writers(mnt);
+	mnt_inc_writers(mnt);
 	/*
-	 * The store to inc_mnt_writers must be visible before we pass
+	 * The store to mnt_inc_writers must be visible before we pass
 	 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
 	 * incremented count after it has set MNT_WRITE_HOLD.
 	 */
@@ -289,7 +350,7 @@
 	 */
 	smp_rmb();
 	if (__mnt_is_readonly(mnt)) {
-		dec_mnt_writers(mnt);
+		mnt_dec_writers(mnt);
 		ret = -EROFS;
 		goto out;
 	}
@@ -317,7 +378,7 @@
 	if (__mnt_is_readonly(mnt))
 		return -EROFS;
 	preempt_disable();
-	inc_mnt_writers(mnt);
+	mnt_inc_writers(mnt);
 	preempt_enable();
 	return 0;
 }
@@ -351,7 +412,7 @@
 void mnt_drop_write(struct vfsmount *mnt)
 {
 	preempt_disable();
-	dec_mnt_writers(mnt);
+	mnt_dec_writers(mnt);
 	preempt_enable();
 }
 EXPORT_SYMBOL_GPL(mnt_drop_write);
@@ -384,7 +445,7 @@
 	 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
 	 * we're counting up here.
 	 */
-	if (count_mnt_writers(mnt) > 0)
+	if (mnt_get_writers(mnt) > 0)
 		ret = -EBUSY;
 	else
 		mnt->mnt_flags |= MNT_READONLY;
@@ -418,7 +479,7 @@
 	kfree(mnt->mnt_devname);
 	mnt_free_id(mnt);
 #ifdef CONFIG_SMP
-	free_percpu(mnt->mnt_writers);
+	free_percpu(mnt->mnt_pcp);
 #endif
 	kmem_cache_free(mnt_cache, mnt);
 }
@@ -492,6 +553,27 @@
 }
 
 /*
+ * Clear dentry's mounted state if it has no remaining mounts.
+ * vfsmount_lock must be held for write.
+ */
+static void dentry_reset_mounted(struct vfsmount *mnt, struct dentry *dentry)
+{
+	unsigned u;
+
+	for (u = 0; u < HASH_SIZE; u++) {
+		struct vfsmount *p;
+
+		list_for_each_entry(p, &mount_hashtable[u], mnt_hash) {
+			if (p->mnt_mountpoint == dentry)
+				return;
+		}
+	}
+	spin_lock(&dentry->d_lock);
+	dentry->d_flags &= ~DCACHE_MOUNTED;
+	spin_unlock(&dentry->d_lock);
+}
+
+/*
  * vfsmount lock must be held for write
  */
 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
@@ -502,7 +584,7 @@
 	mnt->mnt_mountpoint = mnt->mnt_root;
 	list_del_init(&mnt->mnt_child);
 	list_del_init(&mnt->mnt_hash);
-	old_path->dentry->d_mounted--;
+	dentry_reset_mounted(old_path->mnt, old_path->dentry);
 }
 
 /*
@@ -513,7 +595,9 @@
 {
 	child_mnt->mnt_parent = mntget(mnt);
 	child_mnt->mnt_mountpoint = dget(dentry);
-	dentry->d_mounted++;
+	spin_lock(&dentry->d_lock);
+	dentry->d_flags |= DCACHE_MOUNTED;
+	spin_unlock(&dentry->d_lock);
 }
 
 /*
@@ -629,9 +713,10 @@
 	return NULL;
 }
 
-static inline void __mntput(struct vfsmount *mnt)
+static inline void mntfree(struct vfsmount *mnt)
 {
 	struct super_block *sb = mnt->mnt_sb;
+
 	/*
 	 * This probably indicates that somebody messed
 	 * up a mnt_want/drop_write() pair.  If this
@@ -639,38 +724,123 @@
 	 * to make r/w->r/o transitions.
 	 */
 	/*
-	 * atomic_dec_and_lock() used to deal with ->mnt_count decrements
-	 * provides barriers, so count_mnt_writers() below is safe.  AV
+	 * The locking used to deal with mnt_count decrement provides barriers,
+	 * so mnt_get_writers() below is safe.
 	 */
-	WARN_ON(count_mnt_writers(mnt));
+	WARN_ON(mnt_get_writers(mnt));
 	fsnotify_vfsmount_delete(mnt);
 	dput(mnt->mnt_root);
 	free_vfsmnt(mnt);
 	deactivate_super(sb);
 }
 
-void mntput_no_expire(struct vfsmount *mnt)
+#ifdef CONFIG_SMP
+static inline void __mntput(struct vfsmount *mnt, int longrefs)
 {
-repeat:
-	if (atomic_add_unless(&mnt->mnt_count, -1, 1))
+	if (!longrefs) {
+put_again:
+		br_read_lock(vfsmount_lock);
+		if (likely(atomic_read(&mnt->mnt_longrefs))) {
+			mnt_dec_count(mnt);
+			br_read_unlock(vfsmount_lock);
+			return;
+		}
+		br_read_unlock(vfsmount_lock);
+	} else {
+		BUG_ON(!atomic_read(&mnt->mnt_longrefs));
+		if (atomic_add_unless(&mnt->mnt_longrefs, -1, 1))
+			return;
+	}
+
+	br_write_lock(vfsmount_lock);
+	if (!longrefs)
+		mnt_dec_count(mnt);
+	else
+		atomic_dec(&mnt->mnt_longrefs);
+	if (mnt_get_count(mnt)) {
+		br_write_unlock(vfsmount_lock);
+		return;
+	}
+	if (unlikely(mnt->mnt_pinned)) {
+		mnt_add_count(mnt, mnt->mnt_pinned + 1);
+		mnt->mnt_pinned = 0;
+		br_write_unlock(vfsmount_lock);
+		acct_auto_close_mnt(mnt);
+		goto put_again;
+	}
+	br_write_unlock(vfsmount_lock);
+	mntfree(mnt);
+}
+#else
+static inline void __mntput(struct vfsmount *mnt, int longrefs)
+{
+put_again:
+	mnt_dec_count(mnt);
+	if (likely(mnt_get_count(mnt)))
 		return;
 	br_write_lock(vfsmount_lock);
-	if (!atomic_dec_and_test(&mnt->mnt_count)) {
+	if (unlikely(mnt->mnt_pinned)) {
+		mnt_add_count(mnt, mnt->mnt_pinned + 1);
+		mnt->mnt_pinned = 0;
 		br_write_unlock(vfsmount_lock);
-		return;
+		acct_auto_close_mnt(mnt);
+		goto put_again;
 	}
-	if (likely(!mnt->mnt_pinned)) {
-		br_write_unlock(vfsmount_lock);
-		__mntput(mnt);
-		return;
-	}
-	atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
-	mnt->mnt_pinned = 0;
 	br_write_unlock(vfsmount_lock);
-	acct_auto_close_mnt(mnt);
-	goto repeat;
+	mntfree(mnt);
 }
-EXPORT_SYMBOL(mntput_no_expire);
+#endif
+
+static void mntput_no_expire(struct vfsmount *mnt)
+{
+	__mntput(mnt, 0);
+}
+
+void mntput(struct vfsmount *mnt)
+{
+	if (mnt) {
+		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
+		if (unlikely(mnt->mnt_expiry_mark))
+			mnt->mnt_expiry_mark = 0;
+		__mntput(mnt, 0);
+	}
+}
+EXPORT_SYMBOL(mntput);
+
+struct vfsmount *mntget(struct vfsmount *mnt)
+{
+	if (mnt)
+		mnt_inc_count(mnt);
+	return mnt;
+}
+EXPORT_SYMBOL(mntget);
+
+void mntput_long(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	if (mnt) {
+		/* avoid cacheline pingpong, hope gcc doesn't get "smart" */
+		if (unlikely(mnt->mnt_expiry_mark))
+			mnt->mnt_expiry_mark = 0;
+		__mntput(mnt, 1);
+	}
+#else
+	mntput(mnt);
+#endif
+}
+EXPORT_SYMBOL(mntput_long);
+
+struct vfsmount *mntget_long(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+	if (mnt)
+		atomic_inc(&mnt->mnt_longrefs);
+	return mnt;
+#else
+	return mntget(mnt);
+#endif
+}
+EXPORT_SYMBOL(mntget_long);
 
 void mnt_pin(struct vfsmount *mnt)
 {
@@ -678,19 +848,17 @@
 	mnt->mnt_pinned++;
 	br_write_unlock(vfsmount_lock);
 }
-
 EXPORT_SYMBOL(mnt_pin);
 
 void mnt_unpin(struct vfsmount *mnt)
 {
 	br_write_lock(vfsmount_lock);
 	if (mnt->mnt_pinned) {
-		atomic_inc(&mnt->mnt_count);
+		mnt_inc_count(mnt);
 		mnt->mnt_pinned--;
 	}
 	br_write_unlock(vfsmount_lock);
 }
-
 EXPORT_SYMBOL(mnt_unpin);
 
 static inline void mangle(struct seq_file *m, const char *s)
@@ -985,12 +1153,13 @@
 	int minimum_refs = 0;
 	struct vfsmount *p;
 
-	br_read_lock(vfsmount_lock);
+	/* write lock needed for mnt_get_count */
+	br_write_lock(vfsmount_lock);
 	for (p = mnt; p; p = next_mnt(p, mnt)) {
-		actual_refs += atomic_read(&p->mnt_count);
+		actual_refs += mnt_get_count(p);
 		minimum_refs += 2;
 	}
-	br_read_unlock(vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 
 	if (actual_refs > minimum_refs)
 		return 0;
@@ -1017,10 +1186,10 @@
 {
 	int ret = 1;
 	down_read(&namespace_sem);
-	br_read_lock(vfsmount_lock);
+	br_write_lock(vfsmount_lock);
 	if (propagate_mount_busy(mnt, 2))
 		ret = 0;
-	br_read_unlock(vfsmount_lock);
+	br_write_unlock(vfsmount_lock);
 	up_read(&namespace_sem);
 	return ret;
 }
@@ -1047,7 +1216,7 @@
 			dput(dentry);
 			mntput(m);
 		}
-		mntput(mnt);
+		mntput_long(mnt);
 	}
 }
 
@@ -1073,7 +1242,7 @@
 		list_del_init(&p->mnt_child);
 		if (p->mnt_parent != p) {
 			p->mnt_parent->mnt_ghosts++;
-			p->mnt_mountpoint->d_mounted--;
+			dentry_reset_mounted(p->mnt_parent, p->mnt_mountpoint);
 		}
 		change_mnt_propagation(p, MS_PRIVATE);
 	}
@@ -1102,8 +1271,16 @@
 		    flags & (MNT_FORCE | MNT_DETACH))
 			return -EINVAL;
 
-		if (atomic_read(&mnt->mnt_count) != 2)
+		/*
+		 * probably don't strictly need the lock here if we examined
+		 * all race cases, but it's a slowpath.
+		 */
+		br_write_lock(vfsmount_lock);
+		if (mnt_get_count(mnt) != 2) {
+			br_write_lock(vfsmount_lock);
 			return -EBUSY;
+		}
+		br_write_unlock(vfsmount_lock);
 
 		if (!xchg(&mnt->mnt_expiry_mark, 1))
 			return -EAGAIN;
@@ -1792,7 +1969,7 @@
 
 unlock:
 	up_write(&namespace_sem);
-	mntput(newmnt);
+	mntput_long(newmnt);
 	return err;
 }
 
@@ -2125,11 +2302,11 @@
 		if (fs) {
 			if (p == fs->root.mnt) {
 				rootmnt = p;
-				fs->root.mnt = mntget(q);
+				fs->root.mnt = mntget_long(q);
 			}
 			if (p == fs->pwd.mnt) {
 				pwdmnt = p;
-				fs->pwd.mnt = mntget(q);
+				fs->pwd.mnt = mntget_long(q);
 			}
 		}
 		p = next_mnt(p, mnt_ns->root);
@@ -2138,9 +2315,9 @@
 	up_write(&namespace_sem);
 
 	if (rootmnt)
-		mntput(rootmnt);
+		mntput_long(rootmnt);
 	if (pwdmnt)
-		mntput(pwdmnt);
+		mntput_long(pwdmnt);
 
 	return new_ns;
 }
@@ -2327,6 +2504,7 @@
 	touch_mnt_namespace(current->nsproxy->mnt_ns);
 	br_write_unlock(vfsmount_lock);
 	chroot_fs_refs(&root, &new);
+
 	error = 0;
 	path_put(&root_parent);
 	path_put(&parent_path);
@@ -2353,6 +2531,7 @@
 	mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
 	if (IS_ERR(mnt))
 		panic("Can't create rootfs");
+
 	ns = create_mnt_ns(mnt);
 	if (IS_ERR(ns))
 		panic("Can't allocate initial namespace");
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index f22b12e..f6946bb 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -17,12 +17,11 @@
 #include <linux/kernel.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
+#include <linux/namei.h>
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
 
-#include <linux/ncp_fs.h>
-
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static void ncp_read_volume_list(struct file *, void *, filldir_t,
 				struct ncp_cache_control *);
@@ -74,11 +73,14 @@
  * Dentry operations routines
  */
 static int ncp_lookup_validate(struct dentry *, struct nameidata *);
-static int ncp_hash_dentry(struct dentry *, struct qstr *);
-static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
-static int ncp_delete_dentry(struct dentry *);
+static int ncp_hash_dentry(const struct dentry *, const struct inode *,
+		struct qstr *);
+static int ncp_compare_dentry(const struct dentry *, const struct inode *,
+		const struct dentry *, const struct inode *,
+		unsigned int, const char *, const struct qstr *);
+static int ncp_delete_dentry(const struct dentry *);
 
-static const struct dentry_operations ncp_dentry_operations =
+const struct dentry_operations ncp_dentry_operations =
 {
 	.d_revalidate	= ncp_lookup_validate,
 	.d_hash		= ncp_hash_dentry,
@@ -86,14 +88,6 @@
 	.d_delete	= ncp_delete_dentry,
 };
 
-const struct dentry_operations ncp_root_dentry_operations =
-{
-	.d_hash		= ncp_hash_dentry,
-	.d_compare	= ncp_compare_dentry,
-	.d_delete	= ncp_delete_dentry,
-};
-
-
 #define ncp_namespace(i)	(NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber])
 
 static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator)
@@ -113,10 +107,10 @@
 
 #define ncp_preserve_case(i)	(ncp_namespace(i) != NW_NS_DOS)
 
-static inline int ncp_case_sensitive(struct dentry *dentry)
+static inline int ncp_case_sensitive(const struct inode *i)
 {
 #ifdef CONFIG_NCPFS_NFS_NS
-	return ncp_namespace(dentry->d_inode) == NW_NS_NFS;
+	return ncp_namespace(i) == NW_NS_NFS;
 #else
 	return 0;
 #endif /* CONFIG_NCPFS_NFS_NS */
@@ -127,14 +121,16 @@
  * is case-sensitive.
  */
 static int 
-ncp_hash_dentry(struct dentry *dentry, struct qstr *this)
+ncp_hash_dentry(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *this)
 {
-	if (!ncp_case_sensitive(dentry)) {
+	if (!ncp_case_sensitive(inode)) {
+		struct super_block *sb = dentry->d_sb;
 		struct nls_table *t;
 		unsigned long hash;
 		int i;
 
-		t = NCP_IO_TABLE(dentry);
+		t = NCP_IO_TABLE(sb);
 		hash = init_name_hash();
 		for (i=0; i<this->len ; i++)
 			hash = partial_name_hash(ncp_tolower(t, this->name[i]),
@@ -145,15 +141,17 @@
 }
 
 static int
-ncp_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
+ncp_compare_dentry(const struct dentry *parent, const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	if (a->len != b->len)
+	if (len != name->len)
 		return 1;
 
-	if (ncp_case_sensitive(dentry))
-		return strncmp(a->name, b->name, a->len);
+	if (ncp_case_sensitive(pinode))
+		return strncmp(str, name->name, len);
 
-	return ncp_strnicmp(NCP_IO_TABLE(dentry), a->name, b->name, a->len);
+	return ncp_strnicmp(NCP_IO_TABLE(pinode->i_sb), str, name->name, len);
 }
 
 /*
@@ -162,7 +160,7 @@
  * Closing files can be safely postponed until iput() - it's done there anyway.
  */
 static int
-ncp_delete_dentry(struct dentry * dentry)
+ncp_delete_dentry(const struct dentry * dentry)
 {
 	struct inode *inode = dentry->d_inode;
 
@@ -301,6 +299,12 @@
 	int res, val = 0, len;
 	__u8 __name[NCP_MAXPATHLEN + 1];
 
+	if (dentry == dentry->d_sb->s_root)
+		return 1;
+
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
 	parent = dget_parent(dentry);
 	dir = parent->d_inode;
 
@@ -384,21 +388,21 @@
 	}
 
 	/* If a pointer is invalid, we search the dentry. */
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dent = list_entry(next, struct dentry, d_u.d_child);
 		if ((unsigned long)dent->d_fsdata == fpos) {
 			if (dent->d_inode)
-				dget_locked(dent);
+				dget(dent);
 			else
 				dent = NULL;
-			spin_unlock(&dcache_lock);
+			spin_unlock(&parent->d_lock);
 			goto out;
 		}
 		next = next->next;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&parent->d_lock);
 	return NULL;
 
 out:
@@ -592,7 +596,7 @@
 	qname.hash = full_name_hash(qname.name, qname.len);
 
 	if (dentry->d_op && dentry->d_op->d_hash)
-		if (dentry->d_op->d_hash(dentry, &qname) != 0)
+		if (dentry->d_op->d_hash(dentry, dentry->d_inode, &qname) != 0)
 			goto end_advance;
 
 	newdent = d_lookup(dentry, &qname);
@@ -611,35 +615,12 @@
 			shrink_dcache_parent(newdent);
 
 		/*
-		 * It is not as dangerous as it looks.  NetWare's OS2 namespace is
-		 * case preserving yet case insensitive.  So we update dentry's name
-		 * as received from server.  We found dentry via d_lookup with our
-		 * hash, so we know that hash does not change, and so replacing name
-		 * should be reasonably safe.
+		 * NetWare's OS2 namespace is case preserving yet case
+		 * insensitive.  So we update dentry's name as received from
+		 * server. Parent dir's i_mutex is locked because we're in
+		 * readdir.
 		 */
-		if (qname.len == newdent->d_name.len &&
-		    memcmp(newdent->d_name.name, qname.name, newdent->d_name.len)) {
-			struct inode *inode = newdent->d_inode;
-
-			/*
-			 * Inside ncpfs all uses of d_name are either for debugging,
-			 * or on functions which acquire inode mutex (mknod, creat,
-			 * lookup).  So grab i_mutex here, to be sure.  d_path
-			 * uses dcache_lock when generating path, so we should too.
-			 * And finally d_compare is protected by dentry's d_lock, so
-			 * here we go.
-			 */
-			if (inode)
-				mutex_lock(&inode->i_mutex);
-			spin_lock(&dcache_lock);
-			spin_lock(&newdent->d_lock);
-			memcpy((char *) newdent->d_name.name, qname.name,
-								newdent->d_name.len);
-			spin_unlock(&newdent->d_lock);
-			spin_unlock(&dcache_lock);
-			if (inode)
-				mutex_unlock(&inode->i_mutex);
-		}
+		dentry_update_name_case(newdent, &qname);
 	}
 
 	if (!newdent->d_inode) {
@@ -649,7 +630,6 @@
 		entry->ino = iunique(dir->i_sb, 2);
 		inode = ncp_iget(dir->i_sb, entry);
 		if (inode) {
-			newdent->d_op = &ncp_dentry_operations;
 			d_instantiate(newdent, inode);
 			if (!hashed)
 				d_rehash(newdent);
@@ -657,7 +637,7 @@
 	} else {
 		struct inode *inode = newdent->d_inode;
 
-		mutex_lock(&inode->i_mutex);
+		mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
 		ncp_update_inode2(inode, entry);
 		mutex_unlock(&inode->i_mutex);
 	}
@@ -905,7 +885,6 @@
 	if (inode) {
 		ncp_new_dentry(dentry);
 add_entry:
-		dentry->d_op = &ncp_dentry_operations;
 		d_add(dentry, inode);
 		error = 0;
 	}
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index cb50aaf..0ed65e0 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -18,8 +18,7 @@
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 
-#include <linux/ncp_fs.h>
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static int ncp_fsync(struct file *file, int datasync)
 {
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 8fb93b6..00a1d1c 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -29,12 +29,11 @@
 #include <linux/vfs.h>
 #include <linux/mount.h>
 #include <linux/seq_file.h>
-
-#include <linux/ncp_fs.h>
+#include <linux/namei.h>
 
 #include <net/sock.h>
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 #include "getopt.h"
 
 #define NCP_DEFAULT_FILE_MODE 0600
@@ -58,9 +57,16 @@
 	return &ei->vfs_inode;
 }
 
+static void ncp_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
+}
+
 static void ncp_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
+	call_rcu(&inode->i_rcu, ncp_i_callback);
 }
 
 static void init_once(void *foo)
@@ -309,7 +315,12 @@
 	sk->sk_write_space  = server->write_space;
 	release_sock(sk);
 	del_timer_sync(&server->timeout_tm);
-	flush_scheduled_work();
+
+	flush_work_sync(&server->rcv.tq);
+	if (sk->sk_socket->type == SOCK_STREAM)
+		flush_work_sync(&server->tx.tq);
+	else
+		flush_work_sync(&server->timeout_tq);
 }
 
 static int  ncp_show_options(struct seq_file *seq, struct vfsmount *mnt)
@@ -531,6 +542,7 @@
 	sb->s_blocksize_bits = 10;
 	sb->s_magic = NCP_SUPER_MAGIC;
 	sb->s_op = &ncp_sops;
+	sb->s_d_op = &ncp_dentry_operations;
 	sb->s_bdi = &server->bdi;
 
 	server = NCP_SBP(sb);
@@ -710,7 +722,6 @@
 	sb->s_root = d_alloc_root(root_inode);
         if (!sb->s_root)
 		goto out_no_root;
-	sb->s_root->d_op = &ncp_root_dentry_operations;
 	return 0;
 
 out_no_root:
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index d40a547..790e92a 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -20,11 +20,9 @@
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 
-#include <linux/ncp_fs.h>
-
 #include <asm/uaccess.h>
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 /* maximum limit for ncp_objectname_ioctl */
 #define NCP_OBJECT_NAME_MAX_LEN	4096
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 56f5b3a..a7c07b4 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -16,12 +16,12 @@
 #include <linux/mman.h>
 #include <linux/string.h>
 #include <linux/fcntl.h>
-#include <linux/ncp_fs.h>
 
-#include "ncplib_kernel.h"
 #include <asm/uaccess.h>
 #include <asm/system.h>
 
+#include "ncp_fs.h"
+
 /*
  * Fill in the supplied page for mmap
  * XXX: how are we excluding truncate/invalidate here? Maybe need to lock
diff --git a/fs/ncpfs/ncp_fs.h b/fs/ncpfs/ncp_fs.h
new file mode 100644
index 0000000..31831af
--- /dev/null
+++ b/fs/ncpfs/ncp_fs.h
@@ -0,0 +1,98 @@
+#include <linux/ncp_fs.h>
+#include "ncp_fs_i.h"
+#include "ncp_fs_sb.h"
+
+/* define because it is easy to change PRINTK to {*}PRINTK */
+#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args)
+
+#undef NCPFS_PARANOIA
+#ifdef NCPFS_PARANOIA
+#define PPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define PPRINTK(format, args...)
+#endif
+
+#ifndef DEBUG_NCP
+#define DEBUG_NCP 0
+#endif
+#if DEBUG_NCP > 0
+#define DPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define DPRINTK(format, args...)
+#endif
+#if DEBUG_NCP > 1
+#define DDPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define DDPRINTK(format, args...)
+#endif
+
+#define NCP_MAX_RPC_TIMEOUT (6*HZ)
+
+
+struct ncp_entry_info {
+	struct nw_info_struct	i;
+	ino_t			ino;
+	int			opened;
+	int			access;
+	unsigned int		volume;
+	__u8			file_handle[6];
+};
+
+static inline struct ncp_server *NCP_SBP(const struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+#define NCP_SERVER(inode)	NCP_SBP((inode)->i_sb)
+static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode)
+{
+	return container_of(inode, struct ncp_inode_info, vfs_inode);
+}
+
+/* linux/fs/ncpfs/inode.c */
+int ncp_notify_change(struct dentry *, struct iattr *);
+struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *);
+void ncp_update_inode(struct inode *, struct ncp_entry_info *);
+void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
+
+/* linux/fs/ncpfs/dir.c */
+extern const struct inode_operations ncp_dir_inode_operations;
+extern const struct file_operations ncp_dir_operations;
+extern const struct dentry_operations ncp_dentry_operations;
+int ncp_conn_logged_in(struct super_block *);
+int ncp_date_dos2unix(__le16 time, __le16 date);
+void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
+
+/* linux/fs/ncpfs/ioctl.c */
+long ncp_ioctl(struct file *, unsigned int, unsigned long);
+long ncp_compat_ioctl(struct file *, unsigned int, unsigned long);
+
+/* linux/fs/ncpfs/sock.c */
+int ncp_request2(struct ncp_server *server, int function,
+	void* reply, int max_reply_size);
+static inline int ncp_request(struct ncp_server *server, int function) {
+	return ncp_request2(server, function, server->packet, server->packet_size);
+}
+int ncp_connect(struct ncp_server *server);
+int ncp_disconnect(struct ncp_server *server);
+void ncp_lock_server(struct ncp_server *server);
+void ncp_unlock_server(struct ncp_server *server);
+
+/* linux/fs/ncpfs/symlink.c */
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern const struct address_space_operations ncp_symlink_aops;
+int ncp_symlink(struct inode*, struct dentry*, const char*);
+#endif
+
+/* linux/fs/ncpfs/file.c */
+extern const struct inode_operations ncp_file_inode_operations;
+extern const struct file_operations ncp_file_operations;
+int ncp_make_open(struct inode *, int);
+
+/* linux/fs/ncpfs/mmap.c */
+int ncp_mmap(struct file *, struct vm_area_struct *);
+
+/* linux/fs/ncpfs/ncplib_kernel.c */
+int ncp_make_closed(struct inode *);
+
+#include "ncplib_kernel.h"
diff --git a/include/linux/ncp_fs_i.h b/fs/ncpfs/ncp_fs_i.h
similarity index 100%
rename from include/linux/ncp_fs_i.h
rename to fs/ncpfs/ncp_fs_i.h
diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
new file mode 100644
index 0000000..4af803f
--- /dev/null
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -0,0 +1,176 @@
+/*
+ *  ncp_fs_sb.h
+ *
+ *  Copyright (C) 1995, 1996 by Volker Lendecke
+ *
+ */
+
+#ifndef _NCP_FS_SB
+#define _NCP_FS_SB
+
+#include <linux/types.h>
+#include <linux/ncp_mount.h>
+#include <linux/net.h>
+#include <linux/mutex.h>
+#include <linux/backing-dev.h>
+#include <linux/workqueue.h>
+
+#define NCP_DEFAULT_OPTIONS 0		/* 2 for packet signatures */
+
+struct sock;
+
+struct ncp_mount_data_kernel {
+	unsigned long    flags;		/* NCP_MOUNT_* flags */
+	unsigned int	 int_flags;	/* internal flags */
+#define NCP_IMOUNT_LOGGEDIN_POSSIBLE	0x0001
+	__kernel_uid32_t mounted_uid;	/* Who may umount() this filesystem? */
+	struct pid      *wdog_pid;	/* Who cares for our watchdog packets? */
+	unsigned int     ncp_fd;	/* The socket to the ncp port */
+	unsigned int     time_out;	/* How long should I wait after
+					   sending a NCP request? */
+	unsigned int     retry_count;	/* And how often should I retry? */
+	unsigned char	 mounted_vol[NCP_VOLNAME_LEN + 1];
+	__kernel_uid32_t uid;
+	__kernel_gid32_t gid;
+	__kernel_mode_t  file_mode;
+	__kernel_mode_t  dir_mode;
+	int		 info_fd;
+};
+
+struct ncp_server {
+
+	struct ncp_mount_data_kernel m;	/* Nearly all of the mount data is of
+					   interest for us later, so we store
+					   it completely. */
+
+	__u8 name_space[NCP_NUMBER_OF_VOLUMES + 2];
+
+	struct file *ncp_filp;	/* File pointer to ncp socket */
+	struct socket *ncp_sock;/* ncp socket */
+	struct file *info_filp;
+	struct socket *info_sock;
+
+	u8 sequence;
+	u8 task;
+	u16 connection;		/* Remote connection number */
+
+	u8 completion;		/* Status message from server */
+	u8 conn_status;		/* Bit 4 = 1 ==> Server going down, no
+				   requests allowed anymore.
+				   Bit 0 = 1 ==> Server is down. */
+
+	int buffer_size;	/* Negotiated bufsize */
+
+	int reply_size;		/* Size of last reply */
+
+	int packet_size;
+	unsigned char *packet;	/* Here we prepare requests and
+				   receive replies */
+	unsigned char *txbuf;	/* Storage for current request */
+	unsigned char *rxbuf;	/* Storage for reply to current request */
+
+	int lock;		/* To prevent mismatch in protocols. */
+	struct mutex mutex;
+
+	int current_size;	/* for packet preparation */
+	int has_subfunction;
+	int ncp_reply_size;
+
+	int root_setuped;
+	struct mutex root_setup_lock;
+
+	/* info for packet signing */
+	int sign_wanted;	/* 1=Server needs signed packets */
+	int sign_active;	/* 0=don't do signing, 1=do */
+	char sign_root[8];	/* generated from password and encr. key */
+	char sign_last[16];	
+
+	/* Authentication info: NDS or BINDERY, username */
+	struct {
+		int	auth_type;
+		size_t	object_name_len;
+		void*	object_name;
+		int	object_type;
+	} auth;
+	/* Password info */
+	struct {
+		size_t	len;
+		void*	data;
+	} priv;
+	struct rw_semaphore auth_rwsem;
+
+	/* nls info: codepage for volume and charset for I/O */
+	struct nls_table *nls_vol;
+	struct nls_table *nls_io;
+
+	/* maximum age in jiffies */
+	atomic_t dentry_ttl;
+
+	/* miscellaneous */
+	unsigned int flags;
+
+	spinlock_t requests_lock;	/* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
+
+	void (*data_ready)(struct sock* sk, int len);
+	void (*error_report)(struct sock* sk);
+	void (*write_space)(struct sock* sk);	/* STREAM mode only */
+	struct {
+		struct work_struct tq;		/* STREAM/DGRAM: data/error ready */
+		struct ncp_request_reply* creq;	/* STREAM/DGRAM: awaiting reply from this request */
+		struct mutex creq_mutex;	/* DGRAM only: lock accesses to rcv.creq */
+
+		unsigned int state;		/* STREAM only: receiver state */
+		struct {
+			__u32 magic __packed;
+			__u32 len __packed;
+			__u16 type __packed;
+			__u16 p1 __packed;
+			__u16 p2 __packed;
+			__u16 p3 __packed;
+			__u16 type2 __packed;
+		} buf;				/* STREAM only: temporary buffer */
+		unsigned char* ptr;		/* STREAM only: pointer to data */
+		size_t len;			/* STREAM only: length of data to receive */
+	} rcv;
+	struct {
+		struct list_head requests;	/* STREAM only: queued requests */
+		struct work_struct tq;		/* STREAM only: transmitter ready */
+		struct ncp_request_reply* creq;	/* STREAM only: currently transmitted entry */
+	} tx;
+	struct timer_list timeout_tm;		/* DGRAM only: timeout timer */
+	struct work_struct timeout_tq;		/* DGRAM only: associated queue, we run timers from process context */
+	int timeout_last;			/* DGRAM only: current timeout length */
+	int timeout_retries;			/* DGRAM only: retries left */
+	struct {
+		size_t len;
+		__u8 data[128];
+	} unexpected_packet;
+	struct backing_dev_info bdi;
+};
+
+extern void ncp_tcp_rcv_proc(struct work_struct *work);
+extern void ncp_tcp_tx_proc(struct work_struct *work);
+extern void ncpdgram_rcv_proc(struct work_struct *work);
+extern void ncpdgram_timeout_proc(struct work_struct *work);
+extern void ncpdgram_timeout_call(unsigned long server);
+extern void ncp_tcp_data_ready(struct sock* sk, int len);
+extern void ncp_tcp_write_space(struct sock* sk);
+extern void ncp_tcp_error_report(struct sock* sk);
+
+#define NCP_FLAG_UTF8	1
+
+#define NCP_CLR_FLAG(server, flag)	((server)->flags &= ~(flag))
+#define NCP_SET_FLAG(server, flag)	((server)->flags |= (flag))
+#define NCP_IS_FLAG(server, flag)	((server)->flags & (flag))
+
+static inline int ncp_conn_valid(struct ncp_server *server)
+{
+	return ((server->conn_status & 0x11) == 0);
+}
+
+static inline void ncp_invalidate_conn(struct ncp_server *server)
+{
+	server->conn_status |= 0x01;
+}
+
+#endif
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index a95615a..981a956 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -11,7 +11,7 @@
 
 
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static inline void assert_server_locked(struct ncp_server *server)
 {
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 3c57eca..09881e6 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -32,8 +32,6 @@
 #include <linux/ctype.h>
 #endif /* CONFIG_NCPFS_NLS */
 
-#include <linux/ncp_fs.h>
-
 #define NCP_MIN_SYMLINK_SIZE	8
 #define NCP_MAX_SYMLINK_SIZE	512
 
@@ -135,7 +133,7 @@
 				const unsigned char *, unsigned int, int);
 
 #define NCP_ESC			':'
-#define NCP_IO_TABLE(dentry)	(NCP_SERVER((dentry)->d_inode)->nls_io)
+#define NCP_IO_TABLE(sb)	(NCP_SBP(sb)->nls_io)
 #define ncp_tolower(t, c)	nls_tolower(t, c)
 #define ncp_toupper(t, c)	nls_toupper(t, c)
 #define ncp_strnicmp(t, s1, s2, len) \
@@ -150,15 +148,15 @@
 int ncp__vol2io(unsigned char *, unsigned int *,
 				const unsigned char *, unsigned int, int);
 
-#define NCP_IO_TABLE(dentry)	NULL
+#define NCP_IO_TABLE(sb)	NULL
 #define ncp_tolower(t, c)	tolower(c)
 #define ncp_toupper(t, c)	toupper(c)
 #define ncp_io2vol(S,m,i,n,k,U)	ncp__io2vol(m,i,n,k,U)
 #define ncp_vol2io(S,m,i,n,k,U)	ncp__vol2io(m,i,n,k,U)
 
 
-static inline int ncp_strnicmp(struct nls_table *t, const unsigned char *s1,
-		const unsigned char *s2, int len)
+static inline int ncp_strnicmp(const struct nls_table *t,
+		const unsigned char *s1, const unsigned char *s2, int len)
 {
 	while (len--) {
 		if (tolower(*s1++) != tolower(*s2++))
@@ -193,7 +191,7 @@
 	struct list_head *next;
 	struct dentry *dentry;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -205,7 +203,7 @@
 
 		next = next->next;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&parent->d_lock);
 }
 
 static inline void
@@ -215,7 +213,7 @@
 	struct list_head *next;
 	struct dentry *dentry;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&parent->d_lock);
 	next = parent->d_subdirs.next;
 	while (next != &parent->d_subdirs) {
 		dentry = list_entry(next, struct dentry, d_u.d_child);
@@ -223,7 +221,7 @@
 		ncp_age_dentry(server, dentry);
 		next = next->next;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&parent->d_lock);
 }
 
 struct ncp_cache_head {
diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c
index d8b2d7e..0890759 100644
--- a/fs/ncpfs/ncpsign_kernel.c
+++ b/fs/ncpfs/ncpsign_kernel.c
@@ -11,6 +11,7 @@
 #include <linux/string.h>
 #include <linux/ncp.h>
 #include <linux/bitops.h>
+#include "ncp_fs.h"
 #include "ncpsign_kernel.h"
 
 /* i386: 32-bit, little endian, handles mis-alignment */
diff --git a/fs/ncpfs/ncpsign_kernel.h b/fs/ncpfs/ncpsign_kernel.h
index 6451a68..d9a1438 100644
--- a/fs/ncpfs/ncpsign_kernel.h
+++ b/fs/ncpfs/ncpsign_kernel.h
@@ -8,8 +8,6 @@
 #ifndef _NCPSIGN_KERNEL_H
 #define _NCPSIGN_KERNEL_H
 
-#include <linux/ncp_fs.h>
-
 #ifdef CONFIG_NCPFS_PACKET_SIGNING
 void __sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff);
 int sign_verify_reply(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, const void *sign_buff);
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 668bd26..3a15872 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -28,7 +28,7 @@
 #include <linux/poll.h>
 #include <linux/file.h>
 
-#include <linux/ncp_fs.h>
+#include "ncp_fs.h"
 
 #include "ncpsign_kernel.h"
 
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index c634fd1..661f861 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -25,13 +25,11 @@
 
 #include <linux/errno.h>
 #include <linux/fs.h>
-#include <linux/ncp_fs.h>
 #include <linux/time.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/stat.h>
-#include "ncplib_kernel.h"
-
+#include "ncp_fs.h"
 
 /* these magic numbers must appear in the symlink file -- this makes it a bit
    more resilient against the magic attributes being set on random files. */
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 93a8b3b..1990165 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -16,9 +16,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/sunrpc/svcauth_gss.h>
-#if defined(CONFIG_NFS_V4_1)
 #include <linux/sunrpc/bc_xprt.h>
-#endif
 
 #include <net/inet_sock.h>
 
@@ -137,6 +135,33 @@
 
 #if defined(CONFIG_NFS_V4_1)
 /*
+ *  * CB_SEQUENCE operations will fail until the callback sessionid is set.
+ *   */
+int nfs4_set_callback_sessionid(struct nfs_client *clp)
+{
+	struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv;
+	struct nfs4_sessionid *bc_sid;
+
+	if (!serv->sv_bc_xprt)
+		return -EINVAL;
+
+	/* on success freed in xprt_free */
+	bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL);
+	if (!bc_sid)
+		return -ENOMEM;
+	memcpy(bc_sid->data, &clp->cl_session->sess_id.data,
+		NFS4_MAX_SESSIONID_LEN);
+	spin_lock_bh(&serv->sv_cb_lock);
+	serv->sv_bc_xprt->xpt_bc_sid = bc_sid;
+	spin_unlock_bh(&serv->sv_cb_lock);
+	dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__,
+		((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1],
+		((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3],
+		serv->sv_bc_xprt);
+	return 0;
+}
+
+/*
  * The callback service for NFSv4.1 callbacks
  */
 static int
@@ -177,30 +202,38 @@
 struct svc_rqst *
 nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
 {
-	struct svc_xprt *bc_xprt;
-	struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
+	struct svc_rqst *rqstp;
+	int ret;
 
-	dprintk("--> %s\n", __func__);
-	/* Create a svc_sock for the service */
-	bc_xprt = svc_sock_create(serv, xprt->prot);
-	if (!bc_xprt)
+	/*
+	 * Create an svc_sock for the back channel service that shares the
+	 * fore channel connection.
+	 * Returns the input port (0) and sets the svc_serv bc_xprt on success
+	 */
+	ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
+			      SVC_SOCK_ANONYMOUS);
+	if (ret < 0) {
+		rqstp = ERR_PTR(ret);
 		goto out;
+	}
 
 	/*
 	 * Save the svc_serv in the transport so that it can
 	 * be referenced when the session backchannel is initialized
 	 */
-	serv->bc_xprt = bc_xprt;
 	xprt->bc_serv = serv;
 
 	INIT_LIST_HEAD(&serv->sv_cb_list);
 	spin_lock_init(&serv->sv_cb_lock);
 	init_waitqueue_head(&serv->sv_cb_waitq);
 	rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
-	if (IS_ERR(rqstp))
-		svc_sock_destroy(bc_xprt);
+	if (IS_ERR(rqstp)) {
+		svc_xprt_put(serv->sv_bc_xprt);
+		serv->sv_bc_xprt = NULL;
+	}
 out:
-	dprintk("--> %s return %p\n", __func__, rqstp);
+	dprintk("--> %s return %ld\n", __func__,
+		IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
 	return rqstp;
 }
 
@@ -233,6 +266,10 @@
 		struct nfs_callback_data *cb_info)
 {
 }
+int nfs4_set_callback_sessionid(struct nfs_client *clp)
+{
+	return 0;
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 /*
@@ -328,6 +365,9 @@
 	struct rpc_clnt *r = clp->cl_rpcclient;
 	char *p = svc_gss_principal(rqstp);
 
+	/* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
+	if (clp->cl_minorversion != 0)
+		return SVC_DROP;
 	/*
 	 * It might just be a normal user principal, in which case
 	 * userspace won't bother to tell us the name at all.
@@ -345,6 +385,23 @@
 	return SVC_OK;
 }
 
+/* pg_authenticate method helper */
+static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp)
+{
+	struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp);
+	int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0;
+
+	dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc);
+	if (svc_is_backchannel(rqstp))
+		/* Sessionid (usually) set after CB_NULL ping */
+		return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid,
+						  is_cb_compound);
+	else
+		/* No callback identifier in pg_authenticate */
+		return nfs4_find_client_no_ident(svc_addr(rqstp));
+}
+
+/* pg_authenticate method for nfsv4 callback threads. */
 static int nfs_callback_authenticate(struct svc_rqst *rqstp)
 {
 	struct nfs_client *clp;
@@ -352,7 +409,7 @@
 	int ret = SVC_OK;
 
 	/* Don't talk to strangers */
-	clp = nfs_find_client(svc_addr(rqstp), 4);
+	clp = nfs_cb_find_client(rqstp);
 	if (clp == NULL)
 		return SVC_DROP;
 
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 85a7cfd..d3b44f9 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -34,10 +34,17 @@
 	OP_CB_ILLEGAL = 10044,
 };
 
+struct cb_process_state {
+	__be32			drc_status;
+	struct nfs_client	*clp;
+	struct nfs4_sessionid	*svc_sid; /* v4.1 callback service sessionid */
+};
+
 struct cb_compound_hdr_arg {
 	unsigned int taglen;
 	const char *tag;
 	unsigned int minorversion;
+	unsigned int cb_ident; /* v4.0 callback identifier */
 	unsigned nops;
 };
 
@@ -103,14 +110,23 @@
 	uint32_t			csr_target_highestslotid;
 };
 
-extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
-				       struct cb_sequenceres *res);
+extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+				       struct cb_sequenceres *res,
+				       struct cb_process_state *cps);
 
 extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
 					     const nfs4_stateid *stateid);
 
 #define RCA4_TYPE_MASK_RDATA_DLG	0
 #define RCA4_TYPE_MASK_WDATA_DLG	1
+#define RCA4_TYPE_MASK_DIR_DLG         2
+#define RCA4_TYPE_MASK_FILE_LAYOUT     3
+#define RCA4_TYPE_MASK_BLK_LAYOUT      4
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN  8
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX  9
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15
+#define RCA4_TYPE_MASK_ALL 0xf31f
 
 struct cb_recallanyargs {
 	struct sockaddr	*craa_addr;
@@ -118,25 +134,52 @@
 	uint32_t	craa_type_mask;
 };
 
-extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
+extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
+					void *dummy,
+					struct cb_process_state *cps);
 
 struct cb_recallslotargs {
 	struct sockaddr	*crsa_addr;
 	uint32_t	crsa_target_max_slots;
 };
-extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args,
-					  void *dummy);
+extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
+					 void *dummy,
+					 struct cb_process_state *cps);
 
+struct cb_layoutrecallargs {
+	struct sockaddr		*cbl_addr;
+	uint32_t		cbl_recall_type;
+	uint32_t		cbl_layout_type;
+	uint32_t		cbl_layoutchanged;
+	union {
+		struct {
+			struct nfs_fh		cbl_fh;
+			struct pnfs_layout_range cbl_range;
+			nfs4_stateid		cbl_stateid;
+		};
+		struct nfs_fsid		cbl_fsid;
+	};
+};
+
+extern unsigned nfs4_callback_layoutrecall(
+	struct cb_layoutrecallargs *args,
+	void *dummy, struct cb_process_state *cps);
+
+extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
+extern void nfs4_cb_take_slot(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4_1 */
 
-extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
-extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
-
+extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+				    struct cb_getattrres *res,
+				    struct cb_process_state *cps);
+extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+				   struct cb_process_state *cps);
 #ifdef CONFIG_NFS_V4
 extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
 extern void nfs_callback_down(int minorversion);
 extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
 					    const nfs4_stateid *stateid);
+extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4 */
 /*
  * nfs41: Callbacks are expected to not cause substantial latency,
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2950fca..4bb91cb 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -12,30 +12,33 @@
 #include "callback.h"
 #include "delegation.h"
 #include "internal.h"
+#include "pnfs.h"
 
 #ifdef NFS_DEBUG
 #define NFSDBG_FACILITY NFSDBG_CALLBACK
 #endif
- 
-__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
+
+__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+			     struct cb_getattrres *res,
+			     struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct nfs_delegation *delegation;
 	struct nfs_inode *nfsi;
 	struct inode *inode;
 
-	res->bitmap[0] = res->bitmap[1] = 0;
-	res->status = htonl(NFS4ERR_BADHANDLE);
-	clp = nfs_find_client(args->addr, 4);
-	if (clp == NULL)
+	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 		goto out;
 
-	dprintk("NFS: GETATTR callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+	res->bitmap[0] = res->bitmap[1] = 0;
+	res->status = htonl(NFS4ERR_BADHANDLE);
 
-	inode = nfs_delegation_find_inode(clp, &args->fh);
+	dprintk("NFS: GETATTR callback request from %s\n",
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 	if (inode == NULL)
-		goto out_putclient;
+		goto out;
 	nfsi = NFS_I(inode);
 	rcu_read_lock();
 	delegation = rcu_dereference(nfsi->delegation);
@@ -55,49 +58,41 @@
 out_iput:
 	rcu_read_unlock();
 	iput(inode);
-out_putclient:
-	nfs_put_client(clp);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
 	return res->status;
 }
 
-__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
+__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+			    struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct inode *inode;
 	__be32 res;
 	
-	res = htonl(NFS4ERR_BADHANDLE);
-	clp = nfs_find_client(args->addr, 4);
-	if (clp == NULL)
+	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 		goto out;
 
 	dprintk("NFS: RECALL callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 
-	do {
-		struct nfs_client *prev = clp;
-
-		inode = nfs_delegation_find_inode(clp, &args->fh);
-		if (inode != NULL) {
-			/* Set up a helper thread to actually return the delegation */
-			switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
-				case 0:
-					res = 0;
-					break;
-				case -ENOENT:
-					if (res != 0)
-						res = htonl(NFS4ERR_BAD_STATEID);
-					break;
-				default:
-					res = htonl(NFS4ERR_RESOURCE);
-			}
-			iput(inode);
-		}
-		clp = nfs_find_client_next(prev);
-		nfs_put_client(prev);
-	} while (clp != NULL);
+	res = htonl(NFS4ERR_BADHANDLE);
+	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
+	if (inode == NULL)
+		goto out;
+	/* Set up a helper thread to actually return the delegation */
+	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
+	case 0:
+		res = 0;
+		break;
+	case -ENOENT:
+		if (res != 0)
+			res = htonl(NFS4ERR_BAD_STATEID);
+		break;
+	default:
+		res = htonl(NFS4ERR_RESOURCE);
+	}
+	iput(inode);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
 	return res;
@@ -113,6 +108,139 @@
 
 #if defined(CONFIG_NFS_V4_1)
 
+static u32 initiate_file_draining(struct nfs_client *clp,
+				  struct cb_layoutrecallargs *args)
+{
+	struct pnfs_layout_hdr *lo;
+	struct inode *ino;
+	bool found = false;
+	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+	LIST_HEAD(free_me_list);
+
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+		if (nfs_compare_fh(&args->cbl_fh,
+				   &NFS_I(lo->plh_inode)->fh))
+			continue;
+		ino = igrab(lo->plh_inode);
+		if (!ino)
+			continue;
+		found = true;
+		/* Without this, layout can be freed as soon
+		 * as we release cl_lock.
+		 */
+		get_layout_hdr(lo);
+		break;
+	}
+	spin_unlock(&clp->cl_lock);
+	if (!found)
+		return NFS4ERR_NOMATCHING_LAYOUT;
+
+	spin_lock(&ino->i_lock);
+	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+	    mark_matching_lsegs_invalid(lo, &free_me_list,
+					args->cbl_range.iomode))
+		rv = NFS4ERR_DELAY;
+	else
+		rv = NFS4ERR_NOMATCHING_LAYOUT;
+	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
+	spin_unlock(&ino->i_lock);
+	pnfs_free_lseg_list(&free_me_list);
+	put_layout_hdr(lo);
+	iput(ino);
+	return rv;
+}
+
+static u32 initiate_bulk_draining(struct nfs_client *clp,
+				  struct cb_layoutrecallargs *args)
+{
+	struct pnfs_layout_hdr *lo;
+	struct inode *ino;
+	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+	struct pnfs_layout_hdr *tmp;
+	LIST_HEAD(recall_list);
+	LIST_HEAD(free_me_list);
+	struct pnfs_layout_range range = {
+		.iomode = IOMODE_ANY,
+		.offset = 0,
+		.length = NFS4_MAX_UINT64,
+	};
+
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+		if ((args->cbl_recall_type == RETURN_FSID) &&
+		    memcmp(&NFS_SERVER(lo->plh_inode)->fsid,
+			   &args->cbl_fsid, sizeof(struct nfs_fsid)))
+			continue;
+		if (!igrab(lo->plh_inode))
+			continue;
+		get_layout_hdr(lo);
+		BUG_ON(!list_empty(&lo->plh_bulk_recall));
+		list_add(&lo->plh_bulk_recall, &recall_list);
+	}
+	spin_unlock(&clp->cl_lock);
+	list_for_each_entry_safe(lo, tmp,
+				 &recall_list, plh_bulk_recall) {
+		ino = lo->plh_inode;
+		spin_lock(&ino->i_lock);
+		set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+		if (mark_matching_lsegs_invalid(lo, &free_me_list, range.iomode))
+			rv = NFS4ERR_DELAY;
+		list_del_init(&lo->plh_bulk_recall);
+		spin_unlock(&ino->i_lock);
+		put_layout_hdr(lo);
+		iput(ino);
+	}
+	pnfs_free_lseg_list(&free_me_list);
+	return rv;
+}
+
+static u32 do_callback_layoutrecall(struct nfs_client *clp,
+				    struct cb_layoutrecallargs *args)
+{
+	u32 res = NFS4ERR_DELAY;
+
+	dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
+	if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state))
+		goto out;
+	if (args->cbl_recall_type == RETURN_FILE)
+		res = initiate_file_draining(clp, args);
+	else
+		res = initiate_bulk_draining(clp, args);
+	clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state);
+out:
+	dprintk("%s returning %i\n", __func__, res);
+	return res;
+
+}
+
+__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
+				  void *dummy, struct cb_process_state *cps)
+{
+	u32 res;
+
+	dprintk("%s: -->\n", __func__);
+
+	if (cps->clp)
+		res = do_callback_layoutrecall(cps->clp, args);
+	else
+		res = NFS4ERR_OP_NOT_IN_SESSION;
+
+	dprintk("%s: exit with status = %d\n", __func__, res);
+	return cpu_to_be32(res);
+}
+
+static void pnfs_recall_all_layouts(struct nfs_client *clp)
+{
+	struct cb_layoutrecallargs args;
+
+	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
+	memset(&args, 0, sizeof(args));
+	args.cbl_recall_type = RETURN_ALL;
+	/* FIXME we ignore errors, what should we do? */
+	do_callback_layoutrecall(clp, &args);
+}
+
 int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
 {
 	if (delegation == NULL)
@@ -185,42 +313,6 @@
 }
 
 /*
- * Returns a pointer to a held 'struct nfs_client' that matches the server's
- * address, major version number, and session ID.  It is the caller's
- * responsibility to release the returned reference.
- *
- * Returns NULL if there are no connections with sessions, or if no session
- * matches the one of interest.
- */
- static struct nfs_client *find_client_with_session(
-	const struct sockaddr *addr, u32 nfsversion,
-	struct nfs4_sessionid *sessionid)
-{
-	struct nfs_client *clp;
-
-	clp = nfs_find_client(addr, 4);
-	if (clp == NULL)
-		return NULL;
-
-	do {
-		struct nfs_client *prev = clp;
-
-		if (clp->cl_session != NULL) {
-			if (memcmp(clp->cl_session->sess_id.data,
-					sessionid->data,
-					NFS4_MAX_SESSIONID_LEN) == 0) {
-				/* Returns a held reference to clp */
-				return clp;
-			}
-		}
-		clp = nfs_find_client_next(prev);
-		nfs_put_client(prev);
-	} while (clp != NULL);
-
-	return NULL;
-}
-
-/*
  * For each referring call triple, check the session's slot table for
  * a match.  If the slot is in use and the sequence numbers match, the
  * client is still waiting for a response to the original request.
@@ -276,20 +368,34 @@
 }
 
 __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
-				struct cb_sequenceres *res)
+			      struct cb_sequenceres *res,
+			      struct cb_process_state *cps)
 {
 	struct nfs_client *clp;
 	int i;
 	__be32 status;
 
+	cps->clp = NULL;
+
 	status = htonl(NFS4ERR_BADSESSION);
-	clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
+	/* Incoming session must match the callback session */
+	if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN))
+		goto out;
+
+	clp = nfs4_find_client_sessionid(args->csa_addr,
+					 &args->csa_sessionid, 1);
 	if (clp == NULL)
 		goto out;
 
+	/* state manager is resetting the session */
+	if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+		status = NFS4ERR_DELAY;
+		goto out;
+	}
+
 	status = validate_seqid(&clp->cl_session->bc_slot_table, args);
 	if (status)
-		goto out_putclient;
+		goto out;
 
 	/*
 	 * Check for pending referring calls.  If a match is found, a
@@ -298,7 +404,7 @@
 	 */
 	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
 		status = htonl(NFS4ERR_DELAY);
-		goto out_putclient;
+		goto out;
 	}
 
 	memcpy(&res->csr_sessionid, &args->csa_sessionid,
@@ -307,83 +413,93 @@
 	res->csr_slotid = args->csa_slotid;
 	res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
 	res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+	nfs4_cb_take_slot(clp);
+	cps->clp = clp; /* put in nfs4_callback_compound */
 
-out_putclient:
-	nfs_put_client(clp);
 out:
 	for (i = 0; i < args->csa_nrclists; i++)
 		kfree(args->csa_rclists[i].rcl_refcalls);
 	kfree(args->csa_rclists);
 
-	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP))
-		res->csr_status = 0;
-	else
+	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
+		cps->drc_status = status;
+		status = 0;
+	} else
 		res->csr_status = status;
+
 	dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
 		ntohl(status), ntohl(res->csr_status));
 	return status;
 }
 
-__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
+static bool
+validate_bitmap_values(unsigned long mask)
 {
-	struct nfs_client *clp;
+	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
+}
+
+__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
+			       struct cb_process_state *cps)
+{
 	__be32 status;
 	fmode_t flags = 0;
 
-	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
-	clp = nfs_find_client(args->craa_addr, 4);
-	if (clp == NULL)
+	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* set in cb_sequence */
 		goto out;
 
 	dprintk("NFS: RECALL_ANY callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 
+	status = cpu_to_be32(NFS4ERR_INVAL);
+	if (!validate_bitmap_values(args->craa_type_mask))
+		goto out;
+
+	status = cpu_to_be32(NFS4_OK);
 	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
 		     &args->craa_type_mask))
 		flags = FMODE_READ;
 	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
 		     &args->craa_type_mask))
 		flags |= FMODE_WRITE;
-
+	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
+		     &args->craa_type_mask))
+		pnfs_recall_all_layouts(cps->clp);
 	if (flags)
-		nfs_expire_all_delegation_types(clp, flags);
-	status = htonl(NFS4_OK);
+		nfs_expire_all_delegation_types(cps->clp, flags);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
 }
 
 /* Reduce the fore channel's max_slots to the target value */
-__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy)
+__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
+				struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct nfs4_slot_table *fc_tbl;
 	__be32 status;
 
 	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
-	clp = nfs_find_client(args->crsa_addr, 4);
-	if (clp == NULL)
+	if (!cps->clp) /* set in cb_sequence */
 		goto out;
 
 	dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
 		args->crsa_target_max_slots);
 
-	fc_tbl = &clp->cl_session->fc_slot_table;
+	fc_tbl = &cps->clp->cl_session->fc_slot_table;
 
 	status = htonl(NFS4ERR_BAD_HIGH_SLOT);
 	if (args->crsa_target_max_slots > fc_tbl->max_slots ||
 	    args->crsa_target_max_slots < 1)
-		goto out_putclient;
+		goto out;
 
 	status = htonl(NFS4_OK);
 	if (args->crsa_target_max_slots == fc_tbl->max_slots)
-		goto out_putclient;
+		goto out;
 
 	fc_tbl->target_max_slots = args->crsa_target_max_slots;
-	nfs41_handle_recall_slot(clp);
-out_putclient:
-	nfs_put_client(clp);	/* balance nfs_find_client */
+	nfs41_handle_recall_slot(cps->clp);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 05af212..23112c2 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -10,8 +10,10 @@
 #include <linux/nfs4.h>
 #include <linux/nfs_fs.h>
 #include <linux/slab.h>
+#include <linux/sunrpc/bc_xprt.h>
 #include "nfs4_fs.h"
 #include "callback.h"
+#include "internal.h"
 
 #define CB_OP_TAGLEN_MAXSZ	(512)
 #define CB_OP_HDR_RES_MAXSZ	(2 + CB_OP_TAGLEN_MAXSZ)
@@ -22,6 +24,7 @@
 #define CB_OP_RECALL_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 
 #if defined(CONFIG_NFS_V4_1)
+#define CB_OP_LAYOUTRECALL_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 #define CB_OP_SEQUENCE_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ + \
 					4 + 1 + 3)
 #define CB_OP_RECALLANY_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
@@ -33,7 +36,8 @@
 /* Internal error code */
 #define NFS4ERR_RESOURCE_HDR	11050
 
-typedef __be32 (*callback_process_op_t)(void *, void *);
+typedef __be32 (*callback_process_op_t)(void *, void *,
+					struct cb_process_state *);
 typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
 typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
 
@@ -160,7 +164,7 @@
 	hdr->minorversion = ntohl(*p++);
 	/* Check minor version is zero or one. */
 	if (hdr->minorversion <= 1) {
-		p++;	/* skip callback_ident */
+		hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
 	} else {
 		printk(KERN_WARNING "%s: NFSv4 server callback with "
 			"illegal minor version %u!\n",
@@ -220,6 +224,66 @@
 
 #if defined(CONFIG_NFS_V4_1)
 
+static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
+				       struct xdr_stream *xdr,
+				       struct cb_layoutrecallargs *args)
+{
+	__be32 *p;
+	__be32 status = 0;
+	uint32_t iomode;
+
+	args->cbl_addr = svc_addr(rqstp);
+	p = read_buf(xdr, 4 * sizeof(uint32_t));
+	if (unlikely(p == NULL)) {
+		status = htonl(NFS4ERR_BADXDR);
+		goto out;
+	}
+
+	args->cbl_layout_type = ntohl(*p++);
+	/* Depite the spec's xdr, iomode really belongs in the FILE switch,
+	 * as it is unuseable and ignored with the other types.
+	 */
+	iomode = ntohl(*p++);
+	args->cbl_layoutchanged = ntohl(*p++);
+	args->cbl_recall_type = ntohl(*p++);
+
+	if (args->cbl_recall_type == RETURN_FILE) {
+		args->cbl_range.iomode = iomode;
+		status = decode_fh(xdr, &args->cbl_fh);
+		if (unlikely(status != 0))
+			goto out;
+
+		p = read_buf(xdr, 2 * sizeof(uint64_t));
+		if (unlikely(p == NULL)) {
+			status = htonl(NFS4ERR_BADXDR);
+			goto out;
+		}
+		p = xdr_decode_hyper(p, &args->cbl_range.offset);
+		p = xdr_decode_hyper(p, &args->cbl_range.length);
+		status = decode_stateid(xdr, &args->cbl_stateid);
+		if (unlikely(status != 0))
+			goto out;
+	} else if (args->cbl_recall_type == RETURN_FSID) {
+		p = read_buf(xdr, 2 * sizeof(uint64_t));
+		if (unlikely(p == NULL)) {
+			status = htonl(NFS4ERR_BADXDR);
+			goto out;
+		}
+		p = xdr_decode_hyper(p, &args->cbl_fsid.major);
+		p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
+	} else if (args->cbl_recall_type != RETURN_ALL) {
+		status = htonl(NFS4ERR_BADXDR);
+		goto out;
+	}
+	dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n",
+		__func__,
+		args->cbl_layout_type, iomode,
+		args->cbl_layoutchanged, args->cbl_recall_type);
+out:
+	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+	return status;
+}
+
 static __be32 decode_sessionid(struct xdr_stream *xdr,
 				 struct nfs4_sessionid *sid)
 {
@@ -574,10 +638,10 @@
 	case OP_CB_SEQUENCE:
 	case OP_CB_RECALL_ANY:
 	case OP_CB_RECALL_SLOT:
+	case OP_CB_LAYOUTRECALL:
 		*op = &callback_ops[op_nr];
 		break;
 
-	case OP_CB_LAYOUTRECALL:
 	case OP_CB_NOTIFY_DEVICEID:
 	case OP_CB_NOTIFY:
 	case OP_CB_PUSH_DELEG:
@@ -593,6 +657,37 @@
 	return htonl(NFS_OK);
 }
 
+static void nfs4_callback_free_slot(struct nfs4_session *session)
+{
+	struct nfs4_slot_table *tbl = &session->bc_slot_table;
+
+	spin_lock(&tbl->slot_tbl_lock);
+	/*
+	 * Let the state manager know callback processing done.
+	 * A single slot, so highest used slotid is either 0 or -1
+	 */
+	tbl->highest_used_slotid--;
+	nfs4_check_drain_bc_complete(session);
+	spin_unlock(&tbl->slot_tbl_lock);
+}
+
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+	if (clp && clp->cl_session)
+		nfs4_callback_free_slot(clp->cl_session);
+}
+
+/* A single slot, so highest used slotid is either 0 or -1 */
+void nfs4_cb_take_slot(struct nfs_client *clp)
+{
+	struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table;
+
+	spin_lock(&tbl->slot_tbl_lock);
+	tbl->highest_used_slotid++;
+	BUG_ON(tbl->highest_used_slotid != 0);
+	spin_unlock(&tbl->slot_tbl_lock);
+}
+
 #else /* CONFIG_NFS_V4_1 */
 
 static __be32
@@ -601,6 +696,9 @@
 	return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
 }
 
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 static __be32
@@ -621,7 +719,8 @@
 static __be32 process_op(uint32_t minorversion, int nop,
 		struct svc_rqst *rqstp,
 		struct xdr_stream *xdr_in, void *argp,
-		struct xdr_stream *xdr_out, void *resp, int* drc_status)
+		struct xdr_stream *xdr_out, void *resp,
+		struct cb_process_state *cps)
 {
 	struct callback_op *op = &callback_ops[0];
 	unsigned int op_nr;
@@ -644,8 +743,8 @@
 	if (status)
 		goto encode_hdr;
 
-	if (*drc_status) {
-		status = *drc_status;
+	if (cps->drc_status) {
+		status = cps->drc_status;
 		goto encode_hdr;
 	}
 
@@ -653,16 +752,10 @@
 	if (maxlen > 0 && maxlen < PAGE_SIZE) {
 		status = op->decode_args(rqstp, xdr_in, argp);
 		if (likely(status == 0))
-			status = op->process_op(argp, resp);
+			status = op->process_op(argp, resp, cps);
 	} else
 		status = htonl(NFS4ERR_RESOURCE);
 
-	/* Only set by OP_CB_SEQUENCE processing */
-	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
-		*drc_status = status;
-		status = 0;
-	}
-
 encode_hdr:
 	res = encode_op_hdr(xdr_out, op_nr, status);
 	if (unlikely(res))
@@ -681,8 +774,11 @@
 	struct cb_compound_hdr_arg hdr_arg = { 0 };
 	struct cb_compound_hdr_res hdr_res = { NULL };
 	struct xdr_stream xdr_in, xdr_out;
-	__be32 *p;
-	__be32 status, drc_status = 0;
+	__be32 *p, status;
+	struct cb_process_state cps = {
+		.drc_status = 0,
+		.clp = NULL,
+	};
 	unsigned int nops = 0;
 
 	dprintk("%s: start\n", __func__);
@@ -696,6 +792,13 @@
 	if (status == __constant_htonl(NFS4ERR_RESOURCE))
 		return rpc_garbage_args;
 
+	if (hdr_arg.minorversion == 0) {
+		cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident);
+		if (!cps.clp)
+			return rpc_drop_reply;
+	} else
+		cps.svc_sid = bc_xprt_sid(rqstp);
+
 	hdr_res.taglen = hdr_arg.taglen;
 	hdr_res.tag = hdr_arg.tag;
 	if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
@@ -703,7 +806,7 @@
 
 	while (status == 0 && nops != hdr_arg.nops) {
 		status = process_op(hdr_arg.minorversion, nops, rqstp,
-				    &xdr_in, argp, &xdr_out, resp, &drc_status);
+				    &xdr_in, argp, &xdr_out, resp, &cps);
 		nops++;
 	}
 
@@ -716,6 +819,8 @@
 
 	*hdr_res.status = status;
 	*hdr_res.nops = htonl(nops);
+	nfs4_cb_free_slot(cps.clp);
+	nfs_put_client(cps.clp);
 	dprintk("%s: done, status = %u\n", __func__, ntohl(status));
 	return rpc_success;
 }
@@ -739,6 +844,12 @@
 		.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
 	},
 #if defined(CONFIG_NFS_V4_1)
+	[OP_CB_LAYOUTRECALL] = {
+		.process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
+		.decode_args =
+			(callback_decode_arg_t)decode_layoutrecall_args,
+		.res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
+	},
 	[OP_CB_SEQUENCE] = {
 		.process_op = (callback_process_op_t)nfs4_callback_sequence,
 		.decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0870d0d..192f2f8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -56,6 +56,30 @@
 static LIST_HEAD(nfs_client_list);
 static LIST_HEAD(nfs_volume_list);
 static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
+#ifdef CONFIG_NFS_V4
+static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */
+
+/*
+ * Get a unique NFSv4.0 callback identifier which will be used
+ * by the V4.0 callback service to lookup the nfs_client struct
+ */
+static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+{
+	int ret = 0;
+
+	if (clp->rpc_ops->version != 4 || minorversion != 0)
+		return ret;
+retry:
+	if (!idr_pre_get(&cb_ident_idr, GFP_KERNEL))
+		return -ENOMEM;
+	spin_lock(&nfs_client_lock);
+	ret = idr_get_new(&cb_ident_idr, clp, &clp->cl_cb_ident);
+	spin_unlock(&nfs_client_lock);
+	if (ret == -EAGAIN)
+		goto retry;
+	return ret;
+}
+#endif /* CONFIG_NFS_V4 */
 
 /*
  * RPC cruft for NFS
@@ -144,7 +168,10 @@
 	clp->cl_proto = cl_init->proto;
 
 #ifdef CONFIG_NFS_V4
-	INIT_LIST_HEAD(&clp->cl_delegations);
+	err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
+	if (err)
+		goto error_cleanup;
+
 	spin_lock_init(&clp->cl_lock);
 	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
@@ -170,21 +197,17 @@
 }
 
 #ifdef CONFIG_NFS_V4
-/*
- * Clears/puts all minor version specific parts from an nfs_client struct
- * reverting it to minorversion 0.
- */
-static void nfs4_clear_client_minor_version(struct nfs_client *clp)
-{
 #ifdef CONFIG_NFS_V4_1
-	if (nfs4_has_session(clp)) {
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+	if (nfs4_has_session(clp))
 		nfs4_destroy_session(clp->cl_session);
-		clp->cl_session = NULL;
-	}
-
-	clp->cl_mvops = nfs_v4_minor_ops[0];
-#endif /* CONFIG_NFS_V4_1 */
 }
+#else /* CONFIG_NFS_V4_1 */
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
 
 /*
  * Destroy the NFS4 callback service
@@ -199,17 +222,49 @@
 {
 	if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
 		nfs4_kill_renewd(clp);
-	nfs4_clear_client_minor_version(clp);
+	nfs4_shutdown_session(clp);
 	nfs4_destroy_callback(clp);
 	if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
 		nfs_idmap_delete(clp);
 
 	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
 }
+
+/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
+void nfs_cleanup_cb_ident_idr(void)
+{
+	idr_destroy(&cb_ident_idr);
+}
+
+/* nfs_client_lock held */
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+	if (clp->cl_cb_ident)
+		idr_remove(&cb_ident_idr, clp->cl_cb_ident);
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+	rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
+}
+
 #else
 static void nfs4_shutdown_client(struct nfs_client *clp)
 {
 }
+
+void nfs_cleanup_cb_ident_idr(void)
+{
+}
+
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+}
+
 #endif /* CONFIG_NFS_V4 */
 
 /*
@@ -248,6 +303,7 @@
 
 	if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) {
 		list_del(&clp->cl_share_link);
+		nfs_cb_idr_remove_locked(clp);
 		spin_unlock(&nfs_client_lock);
 
 		BUG_ON(!list_empty(&clp->cl_superblocks));
@@ -363,70 +419,28 @@
 	return 0;
 }
 
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
+/* Common match routine for v4.0 and v4.1 callback services */
+bool
+nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp,
+		     u32 minorversion)
 {
-	struct nfs_client *clp;
+	struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
 
-	spin_lock(&nfs_client_lock);
-	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
-		struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
+	/* Don't match clients that failed to initialise */
+	if (!(clp->cl_cons_state == NFS_CS_READY ||
+	    clp->cl_cons_state == NFS_CS_SESSION_INITING))
+		return false;
 
-		/* Don't match clients that failed to initialise properly */
-		if (!(clp->cl_cons_state == NFS_CS_READY ||
-		      clp->cl_cons_state == NFS_CS_SESSION_INITING))
-			continue;
+	/* Match the version and minorversion */
+	if (clp->rpc_ops->version != 4 ||
+	    clp->cl_minorversion != minorversion)
+		return false;
 
-		/* Different NFS versions cannot share the same nfs_client */
-		if (clp->rpc_ops->version != nfsversion)
-			continue;
+	/* Match only the IP address, not the port number */
+	if (!nfs_sockaddr_match_ipaddr(addr, clap))
+		return false;
 
-		/* Match only the IP address, not the port number */
-		if (!nfs_sockaddr_match_ipaddr(addr, clap))
-			continue;
-
-		atomic_inc(&clp->cl_count);
-		spin_unlock(&nfs_client_lock);
-		return clp;
-	}
-	spin_unlock(&nfs_client_lock);
-	return NULL;
-}
-
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client_next(struct nfs_client *clp)
-{
-	struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr;
-	u32 nfsvers = clp->rpc_ops->version;
-
-	spin_lock(&nfs_client_lock);
-	list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) {
-		struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
-
-		/* Don't match clients that failed to initialise properly */
-		if (clp->cl_cons_state != NFS_CS_READY)
-			continue;
-
-		/* Different NFS versions cannot share the same nfs_client */
-		if (clp->rpc_ops->version != nfsvers)
-			continue;
-
-		/* Match only the IP address, not the port number */
-		if (!nfs_sockaddr_match_ipaddr(sap, clap))
-			continue;
-
-		atomic_inc(&clp->cl_count);
-		spin_unlock(&nfs_client_lock);
-		return clp;
-	}
-	spin_unlock(&nfs_client_lock);
-	return NULL;
+	return true;
 }
 
 /*
@@ -988,6 +1002,27 @@
 	target->options = source->options;
 }
 
+static void nfs_server_insert_lists(struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+
+	spin_lock(&nfs_client_lock);
+	list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
+	list_add_tail(&server->master_link, &nfs_volume_list);
+	spin_unlock(&nfs_client_lock);
+
+}
+
+static void nfs_server_remove_lists(struct nfs_server *server)
+{
+	spin_lock(&nfs_client_lock);
+	list_del_rcu(&server->client_link);
+	list_del(&server->master_link);
+	spin_unlock(&nfs_client_lock);
+
+	synchronize_rcu();
+}
+
 /*
  * Allocate and initialise a server record
  */
@@ -1004,6 +1039,7 @@
 	/* Zero out the NFS state stuff */
 	INIT_LIST_HEAD(&server->client_link);
 	INIT_LIST_HEAD(&server->master_link);
+	INIT_LIST_HEAD(&server->delegations);
 
 	atomic_set(&server->active, 0);
 
@@ -1019,6 +1055,8 @@
 		return NULL;
 	}
 
+	pnfs_init_server(server);
+
 	return server;
 }
 
@@ -1029,11 +1067,8 @@
 {
 	dprintk("--> nfs_free_server()\n");
 
+	nfs_server_remove_lists(server);
 	unset_pnfs_layoutdriver(server);
-	spin_lock(&nfs_client_lock);
-	list_del(&server->client_link);
-	list_del(&server->master_link);
-	spin_unlock(&nfs_client_lock);
 
 	if (server->destroy != NULL)
 		server->destroy(server);
@@ -1108,11 +1143,7 @@
 		(unsigned long long) server->fsid.major,
 		(unsigned long long) server->fsid.minor);
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 	nfs_free_fattr(fattr);
 	return server;
@@ -1125,6 +1156,101 @@
 
 #ifdef CONFIG_NFS_V4
 /*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by IP address, protocol version, and minorversion
+ *
+ * Called from the pg_authenticate method. The callback identifier
+ * is not used as it has not been decoded.
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_no_ident(const struct sockaddr *addr)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+		if (nfs4_cb_match_client(addr, clp, 0) == false)
+			continue;
+		atomic_inc(&clp->cl_count);
+		spin_unlock(&nfs_client_lock);
+		return clp;
+	}
+	spin_unlock(&nfs_client_lock);
+	return NULL;
+}
+
+/*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by callback identifier
+ */
+struct nfs_client *
+nfs4_find_client_ident(int cb_ident)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	clp = idr_find(&cb_ident_idr, cb_ident);
+	if (clp)
+		atomic_inc(&clp->cl_count);
+	spin_unlock(&nfs_client_lock);
+	return clp;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * NFSv4.1 callback thread helper
+ * For CB_COMPOUND calls, find a client by IP address, protocol version,
+ * minorversion, and sessionID
+ *
+ * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service
+ * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL
+ * can arrive before the callback sessionid is set. For CB_NULL calls,
+ * find a client by IP address protocol version, and minorversion.
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+			   struct nfs4_sessionid *sid, int is_cb_compound)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+		if (nfs4_cb_match_client(addr, clp, 1) == false)
+			continue;
+
+		if (!nfs4_has_session(clp))
+			continue;
+
+		/* Match sessionid unless cb_null call*/
+		if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data,
+		    sid->data, NFS4_MAX_SESSIONID_LEN) != 0))
+			continue;
+
+		atomic_inc(&clp->cl_count);
+		spin_unlock(&nfs_client_lock);
+		return clp;
+	}
+	spin_unlock(&nfs_client_lock);
+	return NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+			   struct nfs4_sessionid *sid, int is_cb_compound)
+{
+	return NULL;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
  * Initialize the NFS4 callback service
  */
 static int nfs4_init_callback(struct nfs_client *clp)
@@ -1342,11 +1468,7 @@
 	if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
 		server->namelen = NFS4_MAXNAMLEN;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 out:
 	nfs_free_fattr(fattr);
@@ -1551,11 +1673,7 @@
 	if (error < 0)
 		goto out_free_server;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 
 	nfs_free_fattr(fattr_fsinfo);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1fd62fc..364e432 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -40,11 +40,23 @@
 	call_rcu(&delegation->rcu, nfs_free_delegation_callback);
 }
 
+/**
+ * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
+ * @delegation: delegation to process
+ *
+ */
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
 {
 	set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
 }
 
+/**
+ * nfs_have_delegation - check if inode has a delegation
+ * @inode: inode to check
+ * @flags: delegation types to check for
+ *
+ * Returns one if inode has the indicated delegation, otherwise zero.
+ */
 int nfs_have_delegation(struct inode *inode, fmode_t flags)
 {
 	struct nfs_delegation *delegation;
@@ -119,10 +131,15 @@
 	return 0;
 }
 
-/*
- * Set up a delegation on an inode
+/**
+ * nfs_inode_reclaim_delegation - process a delegation reclaim request
+ * @inode: inode to process
+ * @cred: credential to use for request
+ * @res: new delegation state from server
+ *
  */
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+				  struct nfs_openres *res)
 {
 	struct nfs_delegation *delegation;
 	struct rpc_cred *oldcred = NULL;
@@ -175,38 +192,52 @@
 	return inode;
 }
 
-static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
-							   const nfs4_stateid *stateid,
-							   struct nfs_client *clp)
+static struct nfs_delegation *
+nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+			     struct nfs_server *server)
 {
 	struct nfs_delegation *delegation =
 		rcu_dereference_protected(nfsi->delegation,
-					  lockdep_is_held(&clp->cl_lock));
+				lockdep_is_held(&server->nfs_client->cl_lock));
 
 	if (delegation == NULL)
 		goto nomatch;
+
 	spin_lock(&delegation->lock);
-	if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
-				sizeof(delegation->stateid.data)) != 0)
-		goto nomatch_unlock;
 	list_del_rcu(&delegation->super_list);
 	delegation->inode = NULL;
 	nfsi->delegation_state = 0;
 	rcu_assign_pointer(nfsi->delegation, NULL);
 	spin_unlock(&delegation->lock);
 	return delegation;
-nomatch_unlock:
-	spin_unlock(&delegation->lock);
 nomatch:
 	return NULL;
 }
 
-/*
- * Set up a delegation on an inode
+static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
+						    struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+	struct nfs_delegation *delegation;
+
+	spin_lock(&clp->cl_lock);
+	delegation = nfs_detach_delegation_locked(nfsi, server);
+	spin_unlock(&clp->cl_lock);
+	return delegation;
+}
+
+/**
+ * nfs_inode_set_delegation - set up a delegation on an inode
+ * @inode: inode to which delegation applies
+ * @cred: cred to use for subsequent delegation processing
+ * @res: new delegation state from server
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation, *old_delegation;
 	struct nfs_delegation *freeme = NULL;
@@ -227,7 +258,7 @@
 
 	spin_lock(&clp->cl_lock);
 	old_delegation = rcu_dereference_protected(nfsi->delegation,
-						   lockdep_is_held(&clp->cl_lock));
+					lockdep_is_held(&clp->cl_lock));
 	if (old_delegation != NULL) {
 		if (memcmp(&delegation->stateid, &old_delegation->stateid,
 					sizeof(old_delegation->stateid)) == 0 &&
@@ -246,9 +277,9 @@
 			delegation = NULL;
 			goto out;
 		}
-		freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
+		freeme = nfs_detach_delegation_locked(nfsi, server);
 	}
-	list_add_rcu(&delegation->super_list, &clp->cl_delegations);
+	list_add_rcu(&delegation->super_list, &server->delegations);
 	nfsi->delegation_state = delegation->type;
 	rcu_assign_pointer(nfsi->delegation, delegation);
 	delegation = NULL;
@@ -290,73 +321,85 @@
 	return err;
 }
 
-/*
- * Return all delegations that have been marked for return
+/**
+ * nfs_client_return_marked_delegations - return previously marked delegations
+ * @clp: nfs_client to process
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_client_return_marked_delegations(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
 	int err = 0;
 
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
-		rcu_read_unlock();
-		if (delegation != NULL) {
-			filemap_flush(inode->i_mapping);
-			err = __nfs_inode_return_delegation(inode, delegation, 0);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
+							&delegation->flags))
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL) {
+				filemap_flush(inode->i_mapping);
+				err = __nfs_inode_return_delegation(inode,
+								delegation, 0);
+			}
+			iput(inode);
+			if (!err)
+				goto restart;
+			set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+			return err;
 		}
-		iput(inode);
-		if (!err)
-			goto restart;
-		set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
-		return err;
 	}
 	rcu_read_unlock();
 	return 0;
 }
 
-/*
- * This function returns the delegation without reclaiming opens
- * or protecting against delegation reclaims.
- * It is therefore really only safe to be called from
- * nfs4_clear_inode()
+/**
+ * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
+ * @inode: inode to process
+ *
+ * Does not protect against delegation reclaims, therefore really only safe
+ * to be called from nfs4_clear_inode().
  */
 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL)
 			nfs_do_return_delegation(inode, delegation, 0);
 	}
 }
 
+/**
+ * nfs_inode_return_delegation - synchronously return a delegation
+ * @inode: inode to process
+ *
+ * Returns zero on success, or a negative errno value.
+ */
 int nfs_inode_return_delegation(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 	int err = 0;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL) {
 			nfs_wb_all(inode);
 			err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -365,46 +408,61 @@
 	return err;
 }
 
-static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
+static void nfs_mark_return_delegation(struct nfs_delegation *delegation)
 {
+	struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client;
+
 	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 	set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
 }
 
-/*
- * Return all delegations associated to a super block
+/**
+ * nfs_super_return_all_delegations - return delegations for one superblock
+ * @sb: sb to process
+ *
  */
 void nfs_super_return_all_delegations(struct super_block *sb)
 {
-	struct nfs_client *clp = NFS_SB(sb)->nfs_client;
+	struct nfs_server *server = NFS_SB(sb);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_delegation *delegation;
 
 	if (clp == NULL)
 		return;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
-		if (delegation->inode != NULL && delegation->inode->i_sb == sb)
-			set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+		set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 		spin_unlock(&delegation->lock);
 	}
 	rcu_read_unlock();
+
 	if (nfs_client_return_marked_delegations(clp) != 0)
 		nfs4_schedule_state_manager(clp);
 }
 
-static
-void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
+static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
+						 fmode_t flags)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
 			continue;
 		if (delegation->type & flags)
-			nfs_mark_return_delegation(clp, delegation);
+			nfs_mark_return_delegation(delegation);
 	}
+}
+
+static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
+							fmode_t flags)
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_all_delegation_types(server, flags);
 	rcu_read_unlock();
 }
 
@@ -419,19 +477,32 @@
 		nfs4_schedule_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+ * @flags: delegation types to expire
+ *
+ */
 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
 {
 	nfs_client_mark_return_all_delegation_types(clp, flags);
 	nfs_delegation_run_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegations
+ * @clp: client to process
+ *
+ */
 void nfs_expire_all_delegations(struct nfs_client *clp)
 {
 	nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
 }
 
-/*
- * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
+/**
+ * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
+ * @clp: client to process
+ *
  */
 void nfs_handle_cb_pathdown(struct nfs_client *clp)
 {
@@ -440,29 +511,43 @@
 	nfs_client_mark_return_all_delegations(clp);
 }
 
-static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
+static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
 			continue;
-		nfs_mark_return_delegation(clp, delegation);
+		nfs_mark_return_delegation(delegation);
 	}
-	rcu_read_unlock();
 }
 
+/**
+ * nfs_expire_unreferenced_delegations - Eliminate unused delegations
+ * @clp: nfs_client to process
+ *
+ */
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
 {
-	nfs_client_mark_return_unreferenced_delegations(clp);
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_unreferenced_delegations(server);
+	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 }
 
-/*
- * Asynchronous delegation recall!
+/**
+ * nfs_async_inode_return_delegation - asynchronously return a delegation
+ * @inode: inode to process
+ * @stateid: state ID information from CB_RECALL arguments
+ *
+ * Returns zero on success, or a negative errno value.
  */
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+int nfs_async_inode_return_delegation(struct inode *inode,
+				      const nfs4_stateid *stateid)
 {
 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
 	struct nfs_delegation *delegation;
@@ -474,22 +559,21 @@
 		rcu_read_unlock();
 		return -ENOENT;
 	}
-
-	nfs_mark_return_delegation(clp, delegation);
+	nfs_mark_return_delegation(delegation);
 	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 	return 0;
 }
 
-/*
- * Retrieve the inode associated with a delegation
- */
-struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
+static struct inode *
+nfs_delegation_find_inode_server(struct nfs_server *server,
+				 const struct nfs_fh *fhandle)
 {
 	struct nfs_delegation *delegation;
 	struct inode *res = NULL;
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
 		if (delegation->inode != NULL &&
 		    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
@@ -499,49 +583,121 @@
 		if (res != NULL)
 			break;
 	}
+	return res;
+}
+
+/**
+ * nfs_delegation_find_inode - retrieve the inode associated with a delegation
+ * @clp: client state handle
+ * @fhandle: filehandle from a delegation recall
+ *
+ * Returns pointer to inode matching "fhandle," or NULL if a matching inode
+ * cannot be found.
+ */
+struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
+					const struct nfs_fh *fhandle)
+{
+	struct nfs_server *server;
+	struct inode *res = NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		res = nfs_delegation_find_inode_server(server, fhandle);
+		if (res != NULL)
+			break;
+	}
 	rcu_read_unlock();
 	return res;
 }
 
-/*
- * Mark all delegations as needing to be reclaimed
+static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
+{
+	struct nfs_delegation *delegation;
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+}
+
+/**
+ * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
 {
-	struct nfs_delegation *delegation;
+	struct nfs_server *server;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
-		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_delegation_mark_reclaim_server(server);
 	rcu_read_unlock();
 }
 
-/*
- * Reap all unclaimed delegations after reboot recovery is done
+/**
+ * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
+
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
-		rcu_read_unlock();
-		if (delegation != NULL)
-			nfs_free_delegation(delegation);
-		iput(inode);
-		goto restart;
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+						&delegation->flags) == 0)
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL)
+				nfs_free_delegation(delegation);
+			iput(inode);
+			goto restart;
+		}
 	}
 	rcu_read_unlock();
 }
 
+/**
+ * nfs_delegations_present - check for existence of delegations
+ * @clp: client state handle
+ *
+ * Returns one if there are any nfs_delegation structures attached
+ * to this nfs_client.
+ */
+int nfs_delegations_present(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+	int ret = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		if (!list_empty(&server->delegations)) {
+			ret = 1;
+			break;
+		}
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * nfs4_copy_delegation_stateid - Copy inode's state ID information
+ * @dst: stateid data structure to fill in
+ * @inode: inode to check
+ *
+ * Returns one and fills in "dst->data" * if inode had a delegation,
+ * otherwise zero is returned.
+ */
 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 2026304..d9322e4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -44,6 +44,7 @@
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
 void nfs_handle_cb_pathdown(struct nfs_client *clp);
 int nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_delegations_present(struct nfs_client *clp);
 
 void nfs_delegation_mark_reclaim(struct nfs_client *clp);
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 996dd89..df8c03a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -33,8 +33,8 @@
 #include <linux/namei.h>
 #include <linux/mount.h>
 #include <linux/sched.h>
-#include <linux/vmalloc.h>
 #include <linux/kmemleak.h>
+#include <linux/xattr.h>
 
 #include "delegation.h"
 #include "iostat.h"
@@ -125,9 +125,10 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr       = nfs4_getxattr,
-	.setxattr       = nfs4_setxattr,
-	.listxattr      = nfs4_listxattr,
+	.getxattr	= generic_getxattr,
+	.setxattr	= generic_setxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
 };
 
 #endif /* CONFIG_NFS_V4 */
@@ -172,7 +173,7 @@
 	struct nfs_cache_array_entry array[0];
 };
 
-typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
 typedef struct {
 	struct file	*file;
 	struct page	*page;
@@ -378,14 +379,14 @@
 	return error;
 }
 
-/* Fill in an entry based on the xdr code stored in desc->page */
-static
-int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct xdr_stream *stream)
+static int xdr_decode(nfs_readdir_descriptor_t *desc,
+		      struct nfs_entry *entry, struct xdr_stream *xdr)
 {
-	__be32 *p = desc->decode(stream, entry, NFS_SERVER(desc->file->f_path.dentry->d_inode), desc->plus);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
+	int error;
 
+	error = desc->decode(xdr, entry, desc->plus);
+	if (error)
+		return error;
 	entry->fattr->time_start = desc->timestamp;
 	entry->fattr->gencount = desc->gencount;
 	return 0;
@@ -438,7 +439,6 @@
 	if (dentry == NULL)
 		return;
 
-	dentry->d_op = NFS_PROTO(dir)->dentry_ops;
 	inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
 	if (IS_ERR(inode))
 		goto out;
@@ -459,25 +459,26 @@
 /* Perform conversion from xdr to cache array */
 static
 int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
-				void *xdr_page, struct page *page, unsigned int buflen)
+				struct page **xdr_pages, struct page *page, unsigned int buflen)
 {
 	struct xdr_stream stream;
-	struct xdr_buf buf;
-	__be32 *ptr = xdr_page;
+	struct xdr_buf buf = {
+		.pages = xdr_pages,
+		.page_len = buflen,
+		.buflen = buflen,
+		.len = buflen,
+	};
+	struct page *scratch;
 	struct nfs_cache_array *array;
 	unsigned int count = 0;
 	int status;
 
-	buf.head->iov_base = xdr_page;
-	buf.head->iov_len = buflen;
-	buf.tail->iov_len = 0;
-	buf.page_base = 0;
-	buf.page_len = 0;
-	buf.buflen = buf.head->iov_len;
-	buf.len = buf.head->iov_len;
+	scratch = alloc_page(GFP_KERNEL);
+	if (scratch == NULL)
+		return -ENOMEM;
 
-	xdr_init_decode(&stream, &buf, ptr);
-
+	xdr_init_decode(&stream, &buf, NULL);
+	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
 
 	do {
 		status = xdr_decode(desc, entry, &stream);
@@ -506,6 +507,8 @@
 		} else
 			status = PTR_ERR(array);
 	}
+
+	put_page(scratch);
 	return status;
 }
 
@@ -521,7 +524,6 @@
 void nfs_readdir_free_large_page(void *ptr, struct page **pages,
 		unsigned int npages)
 {
-	vm_unmap_ram(ptr, npages);
 	nfs_readdir_free_pagearray(pages, npages);
 }
 
@@ -530,9 +532,8 @@
  * to nfs_readdir_free_large_page
  */
 static
-void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
+int nfs_readdir_large_page(struct page **pages, unsigned int npages)
 {
-	void *ptr;
 	unsigned int i;
 
 	for (i = 0; i < npages; i++) {
@@ -541,13 +542,11 @@
 			goto out_freepages;
 		pages[i] = page;
 	}
+	return 0;
 
-	ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL);
-	if (!IS_ERR_OR_NULL(ptr))
-		return ptr;
 out_freepages:
 	nfs_readdir_free_pagearray(pages, i);
-	return NULL;
+	return -ENOMEM;
 }
 
 static
@@ -566,6 +565,7 @@
 	entry.eof = 0;
 	entry.fh = nfs_alloc_fhandle();
 	entry.fattr = nfs_alloc_fattr();
+	entry.server = NFS_SERVER(inode);
 	if (entry.fh == NULL || entry.fattr == NULL)
 		goto out;
 
@@ -577,8 +577,8 @@
 	memset(array, 0, sizeof(struct nfs_cache_array));
 	array->eof_index = -1;
 
-	pages_ptr = nfs_readdir_large_page(pages, array_size);
-	if (!pages_ptr)
+	status = nfs_readdir_large_page(pages, array_size);
+	if (status < 0)
 		goto out_release_array;
 	do {
 		unsigned int pglen;
@@ -587,7 +587,7 @@
 		if (status < 0)
 			break;
 		pglen = status;
-		status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen);
+		status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
 		if (status < 0) {
 			if (status == -ENOSPC)
 				status = 0;
@@ -938,7 +938,8 @@
  * component of the path.
  * We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT.
  */
-static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, unsigned int mask)
+static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd,
+						unsigned int mask)
 {
 	if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))
 		return 0;
@@ -1018,7 +1019,7 @@
  * If the parent directory is seen to have changed, we throw out the
  * cached dentry and do a new lookup.
  */
-static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
+static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
 	struct inode *dir;
 	struct inode *inode;
@@ -1027,6 +1028,9 @@
 	struct nfs_fattr *fattr = NULL;
 	int error;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
 	parent = dget_parent(dentry);
 	dir = parent->d_inode;
 	nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
@@ -1117,7 +1121,7 @@
 /*
  * This is called from dput() when d_count is going to 0.
  */
-static int nfs_dentry_delete(struct dentry *dentry)
+static int nfs_dentry_delete(const struct dentry *dentry)
 {
 	dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
 		dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -1188,8 +1192,6 @@
 	if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
 		goto out;
 
-	dentry->d_op = NFS_PROTO(dir)->dentry_ops;
-
 	/*
 	 * If we're doing an exclusive create, optimize away the lookup
 	 * but don't hash the dentry.
@@ -1217,7 +1219,7 @@
 		goto out_unblock_sillyrename;
 	}
 	inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
-	res = (struct dentry *)inode;
+	res = ERR_CAST(inode);
 	if (IS_ERR(res))
 		goto out_unblock_sillyrename;
 
@@ -1333,7 +1335,6 @@
 		res = ERR_PTR(-ENAMETOOLONG);
 		goto out;
 	}
-	dentry->d_op = NFS_PROTO(dir)->dentry_ops;
 
 	/* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash
 	 * the dentry. */
@@ -1351,8 +1352,7 @@
 	if (nd->flags & LOOKUP_CREATE) {
 		attr.ia_mode = nd->intent.open.create_mode;
 		attr.ia_valid = ATTR_MODE;
-		if (!IS_POSIXACL(dir))
-			attr.ia_mode &= ~current_umask();
+		attr.ia_mode &= ~current_umask();
 	} else {
 		open_flags &= ~(O_EXCL | O_CREAT);
 		attr.ia_valid = 0;
@@ -1406,11 +1406,15 @@
 static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
 	struct dentry *parent = NULL;
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode;
 	struct inode *dir;
 	struct nfs_open_context *ctx;
 	int openflags, ret = 0;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = dentry->d_inode;
 	if (!is_atomic_open(nd) || d_mountpoint(dentry))
 		goto no_open;
 
@@ -1579,6 +1583,7 @@
 {
 	struct iattr attr;
 	int error;
+	int open_flags = 0;
 
 	dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
 			dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
@@ -1586,7 +1591,10 @@
 	attr.ia_mode = mode;
 	attr.ia_valid = ATTR_MODE;
 
-	error = NFS_PROTO(dir)->create(dir, dentry, &attr, 0, NULL);
+	if ((nd->flags & LOOKUP_CREATE) != 0)
+		open_flags = nd->intent.open.flags;
+
+	error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL);
 	if (error != 0)
 		goto out_err;
 	return 0;
@@ -1718,11 +1726,9 @@
 	dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
 		dir->i_ino, dentry->d_name.name);
 
-	spin_lock(&dcache_lock);
 	spin_lock(&dentry->d_lock);
-	if (atomic_read(&dentry->d_count) > 1) {
+	if (dentry->d_count > 1) {
 		spin_unlock(&dentry->d_lock);
-		spin_unlock(&dcache_lock);
 		/* Start asynchronous writeout of the inode */
 		write_inode_now(dentry->d_inode, 0);
 		error = nfs_sillyrename(dir, dentry);
@@ -1733,7 +1739,6 @@
 		need_rehash = 1;
 	}
 	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
 	error = nfs_safe_remove(dentry);
 	if (!error || error == -ENOENT) {
 		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -1868,7 +1873,7 @@
 	dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
 		 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
 		 new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
-		 atomic_read(&new_dentry->d_count));
+		 new_dentry->d_count);
 
 	/*
 	 * For non-directories, check whether the target is busy and if so,
@@ -1886,7 +1891,7 @@
 			rehash = new_dentry;
 		}
 
-		if (atomic_read(&new_dentry->d_count) > 2) {
+		if (new_dentry->d_count > 2) {
 			int err;
 
 			/* copy the target dentry's name */
@@ -2188,11 +2193,14 @@
 	return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
 }
 
-int nfs_permission(struct inode *inode, int mask)
+int nfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	struct rpc_cred *cred;
 	int res = 0;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	nfs_inc_stats(inode, NFSIOS_VFSACCESS);
 
 	if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
@@ -2240,7 +2248,7 @@
 out_notsup:
 	res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
 	if (res == 0)
-		res = generic_permission(inode, mask, NULL);
+		res = generic_permission(inode, mask, flags, NULL);
 	goto out;
 }
 
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index ac7b814..b5ffe8f 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -63,9 +63,11 @@
 		 * This again causes shrink_dcache_for_umount_subtree() to
 		 * Oops, since the test for IS_ROOT() will fail.
 		 */
-		spin_lock(&dcache_lock);
+		spin_lock(&sb->s_root->d_inode->i_lock);
+		spin_lock(&sb->s_root->d_lock);
 		list_del_init(&sb->s_root->d_alias);
-		spin_unlock(&dcache_lock);
+		spin_unlock(&sb->s_root->d_lock);
+		spin_unlock(&sb->s_root->d_inode->i_lock);
 	}
 	return 0;
 }
@@ -117,9 +119,6 @@
 	}
 
 	security_d_instantiate(ret, inode);
-
-	if (ret->d_op == NULL)
-		ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
 out:
 	nfs_free_fattr(fsinfo.fattr);
 	return ret;
@@ -225,9 +224,6 @@
 
 	security_d_instantiate(ret, inode);
 
-	if (ret->d_op == NULL)
-		ret->d_op = server->nfs_client->rpc_ops->dentry_ops;
-
 out:
 	nfs_free_fattr(fattr);
 	dprintk("<-- nfs4_get_root()\n");
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 4e2d9b6..1869688 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -238,7 +238,7 @@
 	return nfs_idmap_lookup_name(gid, "group", buf, buflen);
 }
 
-#else  /* CONFIG_NFS_USE_IDMAPPER not defined */
+#else  /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */
 
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e67e31c..ce00b70 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1410,9 +1410,9 @@
  */
 void nfs4_evict_inode(struct inode *inode)
 {
+	pnfs_destroy_layout(NFS_I(inode));
 	truncate_inode_pages(&inode->i_data, 0);
 	end_writeback(inode);
-	pnfs_destroy_layout(NFS_I(inode));
 	/* If we are holding a delegation, return it! */
 	nfs_inode_return_delegation_noreclaim(inode);
 	/* First call standard NFS clear_inode() code */
@@ -1438,9 +1438,16 @@
 	return &nfsi->vfs_inode;
 }
 
+static void nfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
+}
+
 void nfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
+	call_rcu(&inode->i_rcu, nfs_i_callback);
 }
 
 static inline void nfs4_init_once(struct nfs_inode *nfsi)
@@ -1612,6 +1619,7 @@
 #ifdef CONFIG_PROC_FS
 	rpc_proc_unregister("nfs");
 #endif
+	nfs_cleanup_cb_ident_idr();
 	unregister_nfs_fs();
 	nfs_fs_proc_exit();
 	nfsiod_stop();
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e6356b7..bfa3a34 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -128,9 +128,13 @@
 /* client.c */
 extern struct rpc_program nfs_program;
 
+extern void nfs_cleanup_cb_ident_idr(void);
 extern void nfs_put_client(struct nfs_client *);
-extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32);
-extern struct nfs_client *nfs_find_client_next(struct nfs_client *);
+extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *);
+extern struct nfs_client *nfs4_find_client_ident(int);
+extern struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *,
+			   int);
 extern struct nfs_server *nfs_create_server(
 					const struct nfs_parsed_mount_data *,
 					struct nfs_fh *);
@@ -185,17 +189,20 @@
 extern void nfs_destroy_directcache(void);
 
 /* nfs2xdr.c */
-extern int nfs_stat_to_errno(int);
+extern int nfs_stat_to_errno(enum nfs_stat);
 extern struct rpc_procinfo nfs_procedures[];
-extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs2_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 
 /* nfs3xdr.c */
 extern struct rpc_procinfo nfs3_procedures[];
-extern __be32 *nfs3_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs3_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 
 /* nfs4xdr.c */
 #ifdef CONFIG_NFS_V4
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs4_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 #endif
 #ifdef CONFIG_NFS_V4_1
 extern const u32 nfs41_maxread_overhead;
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 4f981f1..d4c2d6b 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -236,10 +236,8 @@
 		.authflavor	= RPC_AUTH_UNIX,
 		.flags		= RPC_CLNT_CREATE_NOPING,
 	};
-	struct mountres	result;
 	struct rpc_message msg	= {
 		.rpc_argp	= info->dirpath,
-		.rpc_resp	= &result,
 	};
 	struct rpc_clnt *clnt;
 	int status;
@@ -248,7 +246,7 @@
 		args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
 
 	clnt = rpc_create(&args);
-	if (unlikely(IS_ERR(clnt)))
+	if (IS_ERR(clnt))
 		goto out_clnt_err;
 
 	dprintk("NFS: sending UMNT request for %s:%s\n",
@@ -280,29 +278,20 @@
  * XDR encode/decode functions for MOUNT
  */
 
-static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
 {
 	const u32 pathname_len = strlen(pathname);
 	__be32 *p;
 
-	if (unlikely(pathname_len > MNTPATHLEN))
-		return -EIO;
-
-	p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(pathname_len > MNTPATHLEN);
+	p = xdr_reserve_space(xdr, 4 + pathname_len);
 	xdr_encode_opaque(p, pathname, pathname_len);
-
-	return 0;
 }
 
-static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p,
-			   const char *dirpath)
+static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const char *dirpath)
 {
-	struct xdr_stream xdr;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	return encode_mntdirpath(&xdr, dirpath);
+	encode_mntdirpath(xdr, dirpath);
 }
 
 /*
@@ -320,10 +309,10 @@
 	u32 status;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(status));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	status = ntohl(*p);
+	status = be32_to_cpup(p);
 
 	for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
 		if (mnt_errtbl[i].status == status) {
@@ -351,18 +340,16 @@
 	return 0;
 }
 
-static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p,
-			    struct mountres *res)
+static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				struct mountres *res)
 {
-	struct xdr_stream xdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	status = decode_status(&xdr, res);
+	status = decode_status(xdr, res);
 	if (unlikely(status != 0 || res->errno != 0))
 		return status;
-	return decode_fhandle(&xdr, res);
+	return decode_fhandle(xdr, res);
 }
 
 static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
@@ -371,10 +358,10 @@
 	u32 status;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(status));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	status = ntohl(*p);
+	status = be32_to_cpup(p);
 
 	for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
 		if (mnt3_errtbl[i].status == status) {
@@ -394,11 +381,11 @@
 	u32 size;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(size));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
-	size = ntohl(*p++);
+	size = be32_to_cpup(p);
 	if (size > NFS3_FHSIZE || size == 0)
 		return -EIO;
 
@@ -421,15 +408,15 @@
 	if (*count == 0)
 		return 0;
 
-	p = xdr_inline_decode(xdr, sizeof(entries));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	entries = ntohl(*p);
+	entries = be32_to_cpup(p);
 	dprintk("NFS: received %u auth flavors\n", entries);
 	if (entries > NFS_MAX_SECFLAVORS)
 		entries = NFS_MAX_SECFLAVORS;
 
-	p = xdr_inline_decode(xdr, sizeof(u32) * entries);
+	p = xdr_inline_decode(xdr, 4 * entries);
 	if (unlikely(p == NULL))
 		return -EIO;
 
@@ -437,7 +424,7 @@
 		entries = *count;
 
 	for (i = 0; i < entries; i++) {
-		flavors[i] = ntohl(*p++);
+		flavors[i] = be32_to_cpup(p++);
 		dprintk("NFS:   auth flavor[%u]: %d\n", i, flavors[i]);
 	}
 	*count = i;
@@ -445,30 +432,28 @@
 	return 0;
 }
 
-static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p,
-			     struct mountres *res)
+static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 struct mountres *res)
 {
-	struct xdr_stream xdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	status = decode_fhs_status(&xdr, res);
+	status = decode_fhs_status(xdr, res);
 	if (unlikely(status != 0 || res->errno != 0))
 		return status;
-	status = decode_fhandle3(&xdr, res);
+	status = decode_fhandle3(xdr, res);
 	if (unlikely(status != 0)) {
 		res->errno = -EBADHANDLE;
 		return 0;
 	}
-	return decode_auth_flavors(&xdr, res);
+	return decode_auth_flavors(xdr, res);
 }
 
 static struct rpc_procinfo mnt_procedures[] = {
 	[MOUNTPROC_MNT] = {
 		.p_proc		= MOUNTPROC_MNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
-		.p_decode	= (kxdrproc_t)mnt_dec_mountres,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
+		.p_decode	= (kxdrdproc_t)mnt_xdr_dec_mountres,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_replen	= MNT_dec_mountres_sz,
 		.p_statidx	= MOUNTPROC_MNT,
@@ -476,7 +461,7 @@
 	},
 	[MOUNTPROC_UMNT] = {
 		.p_proc		= MOUNTPROC_UMNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_statidx	= MOUNTPROC_UMNT,
 		.p_name		= "UMOUNT",
@@ -486,8 +471,8 @@
 static struct rpc_procinfo mnt3_procedures[] = {
 	[MOUNTPROC3_MNT] = {
 		.p_proc		= MOUNTPROC3_MNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
-		.p_decode	= (kxdrproc_t)mnt_dec_mountres3,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
+		.p_decode	= (kxdrdproc_t)mnt_xdr_dec_mountres3,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_replen	= MNT_dec_mountres3_sz,
 		.p_statidx	= MOUNTPROC3_MNT,
@@ -495,7 +480,7 @@
 	},
 	[MOUNTPROC3_UMNT] = {
 		.p_proc		= MOUNTPROC3_UMNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_statidx	= MOUNTPROC3_UMNT,
 		.p_name		= "UMOUNT",
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index db6aa36..74aaf39 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -49,12 +49,17 @@
 	       const struct dentry *dentry,
 	       char *buffer, ssize_t buflen)
 {
-	char *end = buffer+buflen;
+	char *end;
 	int namelen;
+	unsigned seq;
 
+rename_retry:
+	end = buffer+buflen;
 	*--end = '\0';
 	buflen--;
-	spin_lock(&dcache_lock);
+
+	seq = read_seqbegin(&rename_lock);
+	rcu_read_lock();
 	while (!IS_ROOT(dentry) && dentry != droot) {
 		namelen = dentry->d_name.len;
 		buflen -= namelen + 1;
@@ -65,7 +70,9 @@
 		*--end = '/';
 		dentry = dentry->d_parent;
 	}
-	spin_unlock(&dcache_lock);
+	rcu_read_unlock();
+	if (read_seqretry(&rename_lock, seq))
+		goto rename_retry;
 	if (*end != '/') {
 		if (--buflen < 0)
 			goto Elong;
@@ -82,7 +89,9 @@
 	memcpy(end, base, namelen);
 	return end;
 Elong_unlock:
-	spin_unlock(&dcache_lock);
+	rcu_read_unlock();
+	if (read_seqretry(&rename_lock, seq))
+		goto rename_retry;
 Elong:
 	return ERR_PTR(-ENAMETOOLONG);
 }
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5914a19..792cb13 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,584 +61,1008 @@
 #define NFS_readdirres_sz	(1)
 #define NFS_statfsres_sz	(1+NFS_info_sz)
 
-/*
- * Common NFS XDR functions as inlines
- */
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle)
-{
-	memcpy(p, fhandle->data, NFS2_FHSIZE);
-	return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle)
-{
-	/* NFSv2 handles have a fixed length */
-	fhandle->size = NFS2_FHSIZE;
-	memcpy(fhandle->data, p, NFS2_FHSIZE);
-	return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
-static inline __be32*
-xdr_encode_time(__be32 *p, struct timespec *timep)
-{
-	*p++ = htonl(timep->tv_sec);
-	/* Convert nanoseconds into microseconds */
-	*p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0);
-	return p;
-}
-
-static inline __be32*
-xdr_encode_current_server_time(__be32 *p, struct timespec *timep)
-{
-	/*
-	 * Passing the invalid value useconds=1000000 is a
-	 * Sun convention for "set to current server time".
-	 * It's needed to make permissions checks for the
-	 * "touch" program across v2 mounts to Solaris and
-	 * Irix boxes work correctly. See description of
-	 * sattr in section 6.1 of "NFS Illustrated" by
-	 * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
-	 */
-	*p++ = htonl(timep->tv_sec);
-	*p++ = htonl(1000000);
-	return p;
-}
-
-static inline __be32*
-xdr_decode_time(__be32 *p, struct timespec *timep)
-{
-	timep->tv_sec = ntohl(*p++);
-	/* Convert microseconds into nanoseconds */
-	timep->tv_nsec = ntohl(*p++) * 1000;
-	return p;
-}
-
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
-{
-	u32 rdev, type;
-	type = ntohl(*p++);
-	fattr->mode = ntohl(*p++);
-	fattr->nlink = ntohl(*p++);
-	fattr->uid = ntohl(*p++);
-	fattr->gid = ntohl(*p++);
-	fattr->size = ntohl(*p++);
-	fattr->du.nfs2.blocksize = ntohl(*p++);
-	rdev = ntohl(*p++);
-	fattr->du.nfs2.blocks = ntohl(*p++);
-	fattr->fsid.major = ntohl(*p++);
-	fattr->fsid.minor = 0;
-	fattr->fileid = ntohl(*p++);
-	p = xdr_decode_time(p, &fattr->atime);
-	p = xdr_decode_time(p, &fattr->mtime);
-	p = xdr_decode_time(p, &fattr->ctime);
-	fattr->valid |= NFS_ATTR_FATTR_V2;
-	fattr->rdev = new_decode_dev(rdev);
-	if (type == NFCHR && rdev == NFS2_FIFO_DEV) {
-		fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
-		fattr->rdev = 0;
-	}
-	return p;
-}
-
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
-{
-	const __be32 not_set = __constant_htonl(0xFFFFFFFF);
-
-	*p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set;
-	*p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
-	*p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
-	*p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
-
-	if (attr->ia_valid & ATTR_ATIME_SET) {
-		p = xdr_encode_time(p, &attr->ia_atime);
-	} else if (attr->ia_valid & ATTR_ATIME) {
-		p = xdr_encode_current_server_time(p, &attr->ia_atime);
-	} else {
-		*p++ = not_set;
-		*p++ = not_set;
-	}
-
-	if (attr->ia_valid & ATTR_MTIME_SET) {
-		p = xdr_encode_time(p, &attr->ia_mtime);
-	} else if (attr->ia_valid & ATTR_MTIME) {
-		p = xdr_encode_current_server_time(p, &attr->ia_mtime);
-	} else {
-		*p++ = not_set;	
-		*p++ = not_set;
-	}
-  	return p;
-}
 
 /*
- * NFS encode functions
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
  */
-/*
- * Encode file handle argument
- * GETATTR, READLINK, STATFS
- */
-static int
-nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
-{
-	p = xdr_encode_fhandle(p, fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SETATTR arguments
- */
-static int
-nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode directory ops argument
- * LOOKUP, RMDIR
- */
-static int
-nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode REMOVE argument
- */
-static int
-nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name.name, args->name.len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
- */
-static int
-nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+				 unsigned int base, unsigned int len,
+				 unsigned int bufsize)
 {
 	struct rpc_auth	*auth = req->rq_cred->cr_auth;
 	unsigned int replen;
-	u32 offset = (u32)args->offset;
-	u32 count = args->count;
 
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(offset);
-	*p++ = htonl(count);
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen,
-			 args->pages, args->pgbase, count);
-	req->rq_rcv_buf.flags |= XDRBUF_READ;
-	return 0;
+	replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
 }
 
 /*
- * Decode READ reply
+ * Handle decode buffer overflows out-of-line.
  */
-static int
-nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
-{
-	struct kvec *iov = req->rq_rcv_buf.head;
-	size_t hdrlen;
-	u32 count, recvd;
-	int status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_fattr(p, res->fattr);
-
-	count = ntohl(*p++);
-	res->eof = 0;
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READ reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READ header is short. iovec will be shifted.\n");
-		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
-	}
-
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (count > recvd) {
-		dprintk("NFS: server cheating in read reply: "
-			"count %u > recvd %u\n", count, recvd);
-		count = recvd;
-	}
-
-	dprintk("RPC:      readres OK count %u\n", count);
-	if (count < res->count)
-		res->count = count;
-
-	return count;
-}
-
-
-/*
- * Write arguments. Splice the buffer to be written into the iovec.
- */
-static int
-nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	u32 offset = (u32)args->offset;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(offset);
-	*p++ = htonl(offset);
-	*p++ = htonl(count);
-	*p++ = htonl(count);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	/* Copy the page array */
-	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
-	sndbuf->flags |= XDRBUF_WRITE;
-	return 0;
-}
-
-/*
- * Encode create arguments
- * CREATE, MKDIR
- */
-static int
-nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode RENAME arguments
- */
-static int
-nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
-{
-	p = xdr_encode_fhandle(p, args->old_dir);
-	p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
-	p = xdr_encode_fhandle(p, args->new_dir);
-	p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode LINK arguments
- */
-static int
-nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_fhandle(p, args->tofh);
-	p = xdr_encode_array(p, args->toname, args->tolen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SYMLINK arguments
- */
-static int
-nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	size_t pad;
-
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_array(p, args->fromname, args->fromlen);
-	*p++ = htonl(args->pathlen);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	xdr_encode_pages(sndbuf, args->pages, 0, args->pathlen);
-
-	/*
-	 * xdr_encode_pages may have added a few bytes to ensure the
-	 * pathname ends on a 4-byte boundary.  Start encoding the
-	 * attributes after the pad bytes.
-	 */
-	pad = sndbuf->tail->iov_len;
-	if (pad > 0)
-		p++;
-	p = xdr_encode_sattr(p, args->sattr);
-	sndbuf->len += xdr_adjust_iovec(sndbuf->tail, p) - pad;
-	return 0;
-}
-
-/*
- * Encode arguments to readdir call
- */
-static int
-nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->cookie);
-	*p++ = htonl(count); /* see above */
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
-	return 0;
-}
-
-/*
- * Decode the result of a readdir call.
- * We're not really decoding anymore, we just leave the buffer untouched
- * and only check that it is syntactically correct.
- * The real decoding happens in nfs_decode_entry below, called directly
- * from nfs_readdir for each entry.
- */
-static int
-nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
-	struct page **page;
-	size_t hdrlen;
-	unsigned int pglen, recvd;
-	int status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READDIR reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-
-	pglen = rcvbuf->page_len;
-	recvd = rcvbuf->len - hdrlen;
-	if (pglen > recvd)
-		pglen = recvd;
-	page = rcvbuf->pages;
-	return pglen;
-}
-
 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	dprintk("nfs: %s: prematurely hit end of receive buffer. "
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
 		"Remaining buffer length is %tu words.\n",
 		func, xdr->end - xdr->p);
 }
 
-__be32 *
-nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+
+/*
+ * Encode/decode NFSv2 basic data types
+ *
+ * Basic NFSv2 data types are defined in section 2.3 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+/*
+ *	typedef opaque	nfsdata<>;
+ */
+static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result)
+{
+	u32 recvd, count;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, count);
+	result->eof = 0;	/* NFSv2 does not pass EOF flag on the wire. */
+	result->count = count;
+	return count;
+out_cheating:
+	dprintk("NFS: server cheating in read result: "
+		"count %u > recvd %u\n", count, recvd);
+	count = recvd;
+	goto out;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	enum stat {
+ *		NFS_OK = 0,
+ *		NFSERR_PERM = 1,
+ *		NFSERR_NOENT = 2,
+ *		NFSERR_IO = 5,
+ *		NFSERR_NXIO = 6,
+ *		NFSERR_ACCES = 13,
+ *		NFSERR_EXIST = 17,
+ *		NFSERR_NODEV = 19,
+ *		NFSERR_NOTDIR = 20,
+ *		NFSERR_ISDIR = 21,
+ *		NFSERR_FBIG = 27,
+ *		NFSERR_NOSPC = 28,
+ *		NFSERR_ROFS = 30,
+ *		NFSERR_NAMETOOLONG = 63,
+ *		NFSERR_NOTEMPTY = 66,
+ *		NFSERR_DQUOT = 69,
+ *		NFSERR_STALE = 70,
+ *		NFSERR_WFLUSH = 99
+ *	};
+ */
+static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
 {
 	__be32 *p;
+
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (!ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
-		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.2.  ftype
+ *
+ *	enum ftype {
+ *		NFNON = 0,
+ *		NFREG = 1,
+ *		NFDIR = 2,
+ *		NFBLK = 3,
+ *		NFCHR = 4,
+ *		NFLNK = 5
+ *	};
+ *
+ */
+static __be32 *xdr_decode_ftype(__be32 *p, u32 *type)
+{
+	*type = be32_to_cpup(p++);
+	if (unlikely(*type > NF2FIFO))
+		*type = NFBAD;
+	return p;
+}
+
+/*
+ * 2.3.3.  fhandle
+ *
+ *	typedef opaque fhandle[FHSIZE];
+ */
+static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	BUG_ON(fh->size != NFS2_FHSIZE);
+	p = xdr_reserve_space(xdr, NFS2_FHSIZE);
+	memcpy(p, fh->data, NFS2_FHSIZE);
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	fh->size = NFS2_FHSIZE;
+	memcpy(fh->data, p, NFS2_FHSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.4.  timeval
+ *
+ *	struct timeval {
+ *		unsigned int seconds;
+ *		unsigned int useconds;
+ *	};
+ */
+static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	if (timep->tv_nsec != 0)
+		*p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC);
+	else
+		*p++ = cpu_to_be32(0);
+	return p;
+}
+
+/*
+ * Passing the invalid value useconds=1000000 is a Sun convention for
+ * "set to current server time".  It's needed to make permissions checks
+ * for the "touch" program across v2 mounts to Solaris and Irix servers
+ * work correctly.  See description of sattr in section 6.1 of "NFS
+ * Illustrated" by Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5.
+ */
+static __be32 *xdr_encode_current_server_time(__be32 *p,
+					      const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	*p++ = cpu_to_be32(1000000);
+	return p;
+}
+
+static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep)
+{
+	timep->tv_sec = be32_to_cpup(p++);
+	timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC;
+	return p;
+}
+
+/*
+ * 2.3.5.  fattr
+ *
+ *	struct fattr {
+ *		ftype		type;
+ *		unsigned int	mode;
+ *		unsigned int	nlink;
+ *		unsigned int	uid;
+ *		unsigned int	gid;
+ *		unsigned int	size;
+ *		unsigned int	blocksize;
+ *		unsigned int	rdev;
+ *		unsigned int	blocks;
+ *		unsigned int	fsid;
+ *		unsigned int	fileid;
+ *		timeval		atime;
+ *		timeval		mtime;
+ *		timeval		ctime;
+ *	};
+ *
+ */
+static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+	u32 rdev, type;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fattr->valid |= NFS_ATTR_FATTR_V2;
+
+	p = xdr_decode_ftype(p, &type);
+
+	fattr->mode = be32_to_cpup(p++);
+	fattr->nlink = be32_to_cpup(p++);
+	fattr->uid = be32_to_cpup(p++);
+	fattr->gid = be32_to_cpup(p++);
+	fattr->size = be32_to_cpup(p++);
+	fattr->du.nfs2.blocksize = be32_to_cpup(p++);
+
+	rdev = be32_to_cpup(p++);
+	fattr->rdev = new_decode_dev(rdev);
+	if (type == (u32)NFCHR && rdev == (u32)NFS2_FIFO_DEV) {
+		fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
+		fattr->rdev = 0;
 	}
 
-	p = xdr_inline_decode(xdr, 8);
-	if (unlikely(!p))
-		goto out_overflow;
+	fattr->du.nfs2.blocks = be32_to_cpup(p++);
+	fattr->fsid.major = be32_to_cpup(p++);
+	fattr->fsid.minor = 0;
+	fattr->fileid = be32_to_cpup(p++);
 
-	entry->ino	  = ntohl(*p++);
-	entry->len	  = ntohl(*p++);
+	p = xdr_decode_time(p, &fattr->atime);
+	p = xdr_decode_time(p, &fattr->mtime);
+	xdr_decode_time(p, &fattr->ctime);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
 
-	p = xdr_inline_decode(xdr, entry->len + 4);
-	if (unlikely(!p))
+/*
+ * 2.3.6.  sattr
+ *
+ *	struct sattr {
+ *		unsigned int	mode;
+ *		unsigned int	uid;
+ *		unsigned int	gid;
+ *		unsigned int	size;
+ *		timeval		atime;
+ *		timeval		mtime;
+ *	};
+ */
+
+#define NFS2_SATTR_NOT_SET	(0xffffffff)
+
+static __be32 *xdr_time_not_set(__be32 *p)
+{
+	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	return p;
+}
+
+static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS_sattr_sz << 2);
+
+	if (attr->ia_valid & ATTR_MODE)
+		*p++ = cpu_to_be32(attr->ia_mode);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_UID)
+		*p++ = cpu_to_be32(attr->ia_uid);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_GID)
+		*p++ = cpu_to_be32(attr->ia_gid);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_SIZE)
+		*p++ = cpu_to_be32((u32)attr->ia_size);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+
+	if (attr->ia_valid & ATTR_ATIME_SET)
+		p = xdr_encode_time(p, &attr->ia_atime);
+	else if (attr->ia_valid & ATTR_ATIME)
+		p = xdr_encode_current_server_time(p, &attr->ia_atime);
+	else
+		p = xdr_time_not_set(p);
+	if (attr->ia_valid & ATTR_MTIME_SET)
+		xdr_encode_time(p, &attr->ia_mtime);
+	else if (attr->ia_valid & ATTR_MTIME)
+		xdr_encode_current_server_time(p, &attr->ia_mtime);
+	else
+		xdr_time_not_set(p);
+}
+
+/*
+ * 2.3.7.  filename
+ *
+ *	typedef string filename<MAXNAMLEN>;
+ */
+static void encode_filename(struct xdr_stream *xdr,
+			    const char *name, u32 length)
+{
+	__be32 *p;
+
+	BUG_ON(length > NFS2_MAXNAMLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+static int decode_filename_inline(struct xdr_stream *xdr,
+				  const char **name, u32 *length)
+{
+	__be32 *p;
+	u32 count;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	entry->name	  = (const char *) p;
-	p		 += XDR_QUADLEN(entry->len);
-	entry->prev_cookie	  = entry->cookie;
-	entry->cookie	  = ntohl(*p++);
+	count = be32_to_cpup(p);
+	if (count > NFS3_MAXNAMLEN)
+		goto out_nametoolong;
+	p = xdr_inline_decode(xdr, count);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*name = (const char *)p;
+	*length = count;
+	return 0;
+out_nametoolong:
+	dprintk("NFS: returned filename too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.8.  path
+ *
+ *	typedef string path<MAXPATHLEN>;
+ */
+static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
+{
+	__be32 *p;
+
+	BUG_ON(length > NFS2_MAXPATHLEN);
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(length);
+	xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_path(struct xdr_stream *xdr)
+{
+	u32 length, recvd;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p);
+	if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
+		goto out_size;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(length > recvd))
+		goto out_cheating;
+
+	xdr_read_pages(xdr, length);
+	xdr_terminate_string(xdr->buf, length);
+	return 0;
+out_size:
+	dprintk("NFS: returned pathname too long: %u\n", length);
+	return -ENAMETOOLONG;
+out_cheating:
+	dprintk("NFS: server cheating in pathname result: "
+		"length %u > received %u\n", length, recvd);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.9.  attrstat
+ *
+ *	union attrstat switch (stat status) {
+ *	case NFS_OK:
+ *		fattr attributes;
+ *	default:
+ *		void;
+ *	};
+ */
+static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_fattr(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.3.10.  diropargs
+ *
+ *	struct diropargs {
+ *		fhandle  dir;
+ *		filename name;
+ *	};
+ */
+static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh,
+			     const char *name, u32 length)
+{
+	encode_fhandle(xdr, fh);
+	encode_filename(xdr, name, length);
+}
+
+/*
+ * 2.3.11.  diropres
+ *
+ *	union diropres switch (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			fhandle file;
+ *			fattr   attributes;
+ *		} diropok;
+ *	default:
+ *		void;
+ *	};
+ */
+static int decode_diropok(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+	int error;
+
+	error = decode_fhandle(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_fattr(xdr, result->fattr);
+out:
+	return error;
+}
+
+static int decode_diropres(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_diropok(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+
+/*
+ * NFSv2 XDR encode functions
+ *
+ * NFSv2 argument types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nfs_fh *fh)
+{
+	encode_fhandle(xdr, fh);
+}
+
+/*
+ * 2.2.3.  sattrargs
+ *
+ *	struct sattrargs {
+ *		fhandle file;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_sattrargs *args)
+{
+	encode_fhandle(xdr, args->fh);
+	encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_diropargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name, args->len);
+}
+
+static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs_readlinkargs *args)
+{
+	encode_fhandle(xdr, args->fh);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->pglen, NFS_readlinkres_sz);
+}
+
+/*
+ * 2.2.7.  readargs
+ *
+ *	struct readargs {
+ *		fhandle file;
+ *		unsigned offset;
+ *		unsigned count;
+ *		unsigned totalcount;
+ *	};
+ */
+static void encode_readargs(struct xdr_stream *xdr,
+			    const struct nfs_readargs *args)
+{
+	u32 offset = args->offset;
+	u32 count = args->count;
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(count);
+	*p = cpu_to_be32(count);
+}
+
+static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nfs_readargs *args)
+{
+	encode_readargs(xdr, args);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->count, NFS_readres_sz);
+	req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 2.2.9.  writeargs
+ *
+ *	struct writeargs {
+ *		fhandle file;
+ *		unsigned beginoffset;
+ *		unsigned offset;
+ *		unsigned totalcount;
+ *		nfsdata data;
+ *	};
+ */
+static void encode_writeargs(struct xdr_stream *xdr,
+			     const struct nfs_writeargs *args)
+{
+	u32 offset = args->offset;
+	u32 count = args->count;
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(count);
+
+	/* nfsdata */
+	*p = cpu_to_be32(count);
+	xdr_write_pages(xdr, args->pages, args->pgbase, count);
+}
+
+static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_writeargs *args)
+{
+	encode_writeargs(xdr, args);
+	xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 2.2.10.  createargs
+ *
+ *	struct createargs {
+ *		diropargs where;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_createargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name, args->len);
+	encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_removeargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 2.2.12.  renameargs
+ *
+ *	struct renameargs {
+ *		diropargs from;
+ *		diropargs to;
+ *	};
+ */
+static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_renameargs *args)
+{
+	const struct qstr *old = args->old_name;
+	const struct qstr *new = args->new_name;
+
+	encode_diropargs(xdr, args->old_dir, old->name, old->len);
+	encode_diropargs(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 2.2.13.  linkargs
+ *
+ *	struct linkargs {
+ *		fhandle from;
+ *		diropargs to;
+ *	};
+ */
+static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nfs_linkargs *args)
+{
+	encode_fhandle(xdr, args->fromfh);
+	encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 2.2.14.  symlinkargs
+ *
+ *	struct symlinkargs {
+ *		diropargs from;
+ *		path to;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_symlinkargs *args)
+{
+	encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
+	encode_path(xdr, args->pages, args->pathlen);
+	encode_sattr(xdr, args->sattr);
+}
+
+/*
+ * 2.2.17.  readdirargs
+ *
+ *	struct readdirargs {
+ *		fhandle dir;
+ *		nfscookie cookie;
+ *		unsigned count;
+ *	};
+ */
+static void encode_readdirargs(struct xdr_stream *xdr,
+			       const struct nfs_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	*p++ = cpu_to_be32(args->cookie);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_readdirargs *args)
+{
+	encode_readdirargs(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+					args->count, NFS_readdirres_sz);
+}
+
+/*
+ * NFSv2 XDR decode functions
+ *
+ * NFSv2 result types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_fattr *result)
+{
+	return decode_attrstat(xdr, result);
+}
+
+static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_diropok *result)
+{
+	return decode_diropres(xdr, result);
+}
+
+/*
+ * 2.2.6.  readlinkres
+ *
+ *	union readlinkres switch (stat status) {
+ *	case NFS_OK:
+ *		path data;
+ *	default:
+ *		void;
+ *	};
+ */
+static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
+				    struct xdr_stream *xdr, void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_path(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.2.7.  readres
+ *
+ *	union readres switch (stat status) {
+ *	case NFS_OK:
+ *		fattr attributes;
+ *		nfsdata data;
+ *	default:
+ *		void;
+ *	};
+ */
+static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_readres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_fattr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_nfsdata(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_writeres *result)
+{
+	/* All NFSv2 writes are "file sync" writes */
+	result->verf->committed = NFS_FILE_SYNC;
+	return decode_attrstat(xdr, result->fattr);
+}
+
+/**
+ * nfs2_decode_dirent - Decode a single NFSv2 directory entry stored in
+ *                      the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 2.2.17.  entry
+ *
+ *	struct entry {
+ *		unsigned	fileid;
+ *		filename	name;
+ *		nfscookie	cookie;
+ *		entry		*nextentry;
+ *	};
+ */
+int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
+{
+	__be32 *p;
+	int error;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p++ == xdr_zero) {
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p++ == xdr_zero)
+			return -EAGAIN;
+		entry->eof = 1;
+		return -EBADCOOKIE;
+	}
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	entry->ino = be32_to_cpup(p);
+
+	error = decode_filename_inline(xdr, &entry->name, &entry->len);
+	if (unlikely(error))
+		return error;
+
+	/*
+	 * The type (size and byte order) of nfscookie isn't defined in
+	 * RFC 1094.  This implementation assumes that it's an XDR uint32.
+	 */
+	entry->prev_cookie = entry->cookie;
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	entry->cookie = be32_to_cpup(p);
 
 	entry->d_type = DT_UNKNOWN;
 
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
+	return 0;
 
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EAGAIN);
+	return -EAGAIN;
 }
 
 /*
- * NFS XDR decode functions
+ * 2.2.17.  readdirres
+ *
+ *	union readdirres switch (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			entry *entries;
+ *			bool eof;
+ *		} readdirok;
+ *	default:
+ *		void;
+ *	};
+ *
+ * Read the directory contents into the page cache, but don't
+ * touch them.  The actual decoding is done by nfs2_decode_dirent()
+ * during subsequent nfs_readdir() calls.
  */
-/*
- * Decode simple status reply
- */
-static int
-nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
+static int decode_readdirok(struct xdr_stream *xdr)
 {
-	int	status;
-
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	return status;
-}
-
-/*
- * Decode attrstat reply
- * GETATTR, SETATTR, WRITE
- */
-static int
-nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	xdr_decode_fattr(p, fattr);
-	return 0;
-}
-
-/*
- * Decode diropres reply
- * LOOKUP, CREATE, MKDIR
- */
-static int
-nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_fhandle(p, res->fh);
-	xdr_decode_fattr(p, res->fattr);
-	return 0;
-}
-
-/*
- * Encode READLINK args
- */
-static int
-nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
-	return 0;
-}
-
-/*
- * Decode READLINK reply
- */
-static int
-nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
+	u32 recvd, pglen;
 	size_t hdrlen;
-	u32 len, recvd;
-	int	status;
 
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	/* Convert length of symlink */
-	len = ntohl(*p++);
-	if (len >= rcvbuf->page_len) {
-		dprintk("nfs: server returned giant symlink!\n");
-		return -ENAMETOOLONG;
-	}
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READLINK reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (recvd < len) {
-		dprintk("NFS: server cheating in readlink reply: "
-				"count %u > recvd %u\n", len, recvd);
-		return -EIO;
-	}
+	pglen = xdr->buf->page_len;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(pglen > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, pglen);
+	return pglen;
+out_cheating:
+	dprintk("NFS: server cheating in readdir result: "
+		"pglen %u > recvd %u\n", pglen, recvd);
+	pglen = recvd;
+	goto out;
+}
 
-	xdr_terminate_string(rcvbuf, len);
-	return 0;
+static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
+				   struct xdr_stream *xdr, void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_readdirok(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode WRITE reply
+ * 2.2.18.  statfsres
+ *
+ *	union statfsres (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			unsigned tsize;
+ *			unsigned bsize;
+ *			unsigned blocks;
+ *			unsigned bfree;
+ *			unsigned bavail;
+ *		} info;
+ *	default:
+ *		void;
+ *	};
  */
-static int
-nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
 {
-	res->verf->committed = NFS_FILE_SYNC;
-	return nfs_xdr_attrstat(req, p, res->fattr);
-}
+	__be32 *p;
 
-/*
- * Decode STATFS reply
- */
-static int
-nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-
-	res->tsize  = ntohl(*p++);
-	res->bsize  = ntohl(*p++);
-	res->blocks = ntohl(*p++);
-	res->bfree  = ntohl(*p++);
-	res->bavail = ntohl(*p++);
+	p = xdr_inline_decode(xdr, NFS_info_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->tsize  = be32_to_cpup(p++);
+	result->bsize  = be32_to_cpup(p++);
+	result->blocks = be32_to_cpup(p++);
+	result->bfree  = be32_to_cpup(p++);
+	result->bavail = be32_to_cpup(p);
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
+static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs2_fsstat *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_info(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+
 /*
  * We need to translate between nfs status return values and
  * the local errno values which may not be the same.
  */
-static struct {
+static const struct {
 	int stat;
 	int errno;
 } nfs_errtbl[] = {
@@ -678,28 +1102,30 @@
 	{ -1,			-EIO		}
 };
 
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized.  This function is used jointly by NFSv2 and NFSv3.
  */
-int
-nfs_stat_to_errno(int stat)
+int nfs_stat_to_errno(enum nfs_stat status)
 {
 	int i;
 
 	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
-		if (nfs_errtbl[i].stat == stat)
+		if (nfs_errtbl[i].stat == (int)status)
 			return nfs_errtbl[i].errno;
 	}
-	dprintk("nfs_stat_to_errno: bad nfs status return value: %d\n", stat);
+	dprintk("NFS: Unrecognized nfs status value: %u\n", status);
 	return nfs_errtbl[i].errno;
 }
 
 #define PROC(proc, argtype, restype, timer)				\
 [NFSPROC_##proc] = {							\
 	.p_proc	    =  NFSPROC_##proc,					\
-	.p_encode   =  (kxdrproc_t) nfs_xdr_##argtype,			\
-	.p_decode   =  (kxdrproc_t) nfs_xdr_##restype,			\
+	.p_encode   =  (kxdreproc_t)nfs2_xdr_enc_##argtype,		\
+	.p_decode   =  (kxdrdproc_t)nfs2_xdr_dec_##restype,		\
 	.p_arglen   =  NFS_##argtype##_sz,				\
 	.p_replen   =  NFS_##restype##_sz,				\
 	.p_timer    =  timer,						\
@@ -707,21 +1133,21 @@
 	.p_name     =  #proc,						\
 	}
 struct rpc_procinfo	nfs_procedures[] = {
-    PROC(GETATTR,	fhandle,	attrstat, 1),
-    PROC(SETATTR,	sattrargs,	attrstat, 0),
-    PROC(LOOKUP,	diropargs,	diropres, 2),
-    PROC(READLINK,	readlinkargs,	readlinkres, 3),
-    PROC(READ,		readargs,	readres, 3),
-    PROC(WRITE,		writeargs,	writeres, 4),
-    PROC(CREATE,	createargs,	diropres, 0),
-    PROC(REMOVE,	removeargs,	stat, 0),
-    PROC(RENAME,	renameargs,	stat, 0),
-    PROC(LINK,		linkargs,	stat, 0),
-    PROC(SYMLINK,	symlinkargs,	stat, 0),
-    PROC(MKDIR,		createargs,	diropres, 0),
-    PROC(RMDIR,		diropargs,	stat, 0),
-    PROC(READDIR,	readdirargs,	readdirres, 3),
-    PROC(STATFS,	fhandle,	statfsres, 0),
+	PROC(GETATTR,	fhandle,	attrstat,	1),
+	PROC(SETATTR,	sattrargs,	attrstat,	0),
+	PROC(LOOKUP,	diropargs,	diropres,	2),
+	PROC(READLINK,	readlinkargs,	readlinkres,	3),
+	PROC(READ,	readargs,	readres,	3),
+	PROC(WRITE,	writeargs,	writeres,	4),
+	PROC(CREATE,	createargs,	diropres,	0),
+	PROC(REMOVE,	removeargs,	stat,		0),
+	PROC(RENAME,	renameargs,	stat,		0),
+	PROC(LINK,	linkargs,	stat,		0),
+	PROC(SYMLINK,	symlinkargs,	stat,		0),
+	PROC(MKDIR,	createargs,	diropres,	0),
+	PROC(RMDIR,	diropargs,	stat,		0),
+	PROC(READDIR,	readdirargs,	readdirres,	3),
+	PROC(STATFS,	fhandle,	statfsres,	0),
 };
 
 struct rpc_version		nfs_version2 = {
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index f6cc60f..01c5e8b 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -37,18 +37,16 @@
 #define NFS3_filename_sz	(1+(NFS3_MAXNAMLEN>>2))
 #define NFS3_path_sz		(1+(NFS3_MAXPATHLEN>>2))
 #define NFS3_fattr_sz		(21)
-#define NFS3_wcc_attr_sz		(6)
+#define NFS3_cookieverf_sz	(NFS3_COOKIEVERFSIZE>>2)
+#define NFS3_wcc_attr_sz	(6)
 #define NFS3_pre_op_attr_sz	(1+NFS3_wcc_attr_sz)
 #define NFS3_post_op_attr_sz	(1+NFS3_fattr_sz)
-#define NFS3_wcc_data_sz		(NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
-#define NFS3_fsstat_sz		
-#define NFS3_fsinfo_sz		
-#define NFS3_pathconf_sz		
-#define NFS3_entry_sz		(NFS3_filename_sz+3)
-
-#define NFS3_sattrargs_sz	(NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_wcc_data_sz	(NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
 #define NFS3_diropargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
-#define NFS3_removeargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
+
+#define NFS3_getattrargs_sz	(NFS3_fh_sz)
+#define NFS3_setattrargs_sz	(NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_lookupargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
 #define NFS3_accessargs_sz	(NFS3_fh_sz+1)
 #define NFS3_readlinkargs_sz	(NFS3_fh_sz)
 #define NFS3_readargs_sz	(NFS3_fh_sz+3)
@@ -57,14 +55,16 @@
 #define NFS3_mkdirargs_sz	(NFS3_diropargs_sz+NFS3_sattr_sz)
 #define NFS3_symlinkargs_sz	(NFS3_diropargs_sz+1+NFS3_sattr_sz)
 #define NFS3_mknodargs_sz	(NFS3_diropargs_sz+2+NFS3_sattr_sz)
+#define NFS3_removeargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
 #define NFS3_renameargs_sz	(NFS3_diropargs_sz+NFS3_diropargs_sz)
 #define NFS3_linkargs_sz		(NFS3_fh_sz+NFS3_diropargs_sz)
-#define NFS3_readdirargs_sz	(NFS3_fh_sz+2)
+#define NFS3_readdirargs_sz	(NFS3_fh_sz+NFS3_cookieverf_sz+3)
+#define NFS3_readdirplusargs_sz	(NFS3_fh_sz+NFS3_cookieverf_sz+4)
 #define NFS3_commitargs_sz	(NFS3_fh_sz+3)
 
-#define NFS3_attrstat_sz	(1+NFS3_fattr_sz)
-#define NFS3_wccstat_sz		(1+NFS3_wcc_data_sz)
-#define NFS3_removeres_sz	(NFS3_wccstat_sz)
+#define NFS3_getattrres_sz	(1+NFS3_fattr_sz)
+#define NFS3_setattrres_sz	(1+NFS3_wcc_data_sz)
+#define NFS3_removeres_sz	(NFS3_setattrres_sz)
 #define NFS3_lookupres_sz	(1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
 #define NFS3_accessres_sz	(1+NFS3_post_op_attr_sz+1)
 #define NFS3_readlinkres_sz	(1+NFS3_post_op_attr_sz+1)
@@ -100,1079 +100,2362 @@
 	[NF3FIFO] = S_IFIFO,
 };
 
+/*
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
+ */
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+				 unsigned int base, unsigned int len,
+				 unsigned int bufsize)
+{
+	struct rpc_auth	*auth = req->rq_cred->cr_auth;
+	unsigned int replen;
+
+	replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	dprintk("nfs: %s: prematurely hit end of receive buffer. "
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
 		"Remaining buffer length is %tu words.\n",
 		func, xdr->end - xdr->p);
 }
 
+
 /*
- * Common NFS XDR functions as inlines
+ * Encode/decode NFSv3 basic data types
+ *
+ * Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
  */
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fh)
+
+static void encode_uint32(struct xdr_stream *xdr, u32 value)
 {
-	return xdr_encode_array(p, fh->data, fh->size);
+	__be32 *p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
 }
 
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh)
-{
-	if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) {
-		memcpy(fh->data, p, fh->size);
-		return p + XDR_QUADLEN(fh->size);
-	}
-	return NULL;
-}
-
-static inline __be32 *
-xdr_decode_fhandle_stream(struct xdr_stream *xdr, struct nfs_fh *fh)
+static int decode_uint32(struct xdr_stream *xdr, u32 *value)
 {
 	__be32 *p;
+
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	fh->size = ntohl(*p++);
-
-	if (fh->size <= NFS3_FHSIZE) {
-		p = xdr_inline_decode(xdr, fh->size);
-		if (unlikely(!p))
-			goto out_overflow;
-		memcpy(fh->data, p, fh->size);
-		return p + XDR_QUADLEN(fh->size);
-	}
-	return NULL;
-
+	*value = be32_to_cpup(p);
+	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EIO);
+	return -EIO;
+}
+
+static int decode_uint64(struct xdr_stream *xdr, u64 *value)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 8);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	xdr_decode_hyper(p, value);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
 /*
- * Encode/decode time.
+ * fileid3
+ *
+ *	typedef uint64 fileid3;
  */
-static inline __be32 *
-xdr_encode_time3(__be32 *p, struct timespec *timep)
+static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
 {
-	*p++ = htonl(timep->tv_sec);
-	*p++ = htonl(timep->tv_nsec);
-	return p;
+	return xdr_decode_hyper(p, fileid);
 }
 
-static inline __be32 *
-xdr_decode_time3(__be32 *p, struct timespec *timep)
+static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
 {
-	timep->tv_sec = ntohl(*p++);
-	timep->tv_nsec = ntohl(*p++);
-	return p;
+	return decode_uint64(xdr, fileid);
 }
 
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * filename3
+ *
+ *	typedef string filename3<>;
+ */
+static void encode_filename3(struct xdr_stream *xdr,
+			     const char *name, u32 length)
 {
-	unsigned int	type, major, minor;
-	umode_t		fmode;
+	__be32 *p;
 
-	type = ntohl(*p++);
+	BUG_ON(length > NFS3_MAXNAMLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+static int decode_inline_filename3(struct xdr_stream *xdr,
+				   const char **name, u32 *length)
+{
+	__be32 *p;
+	u32 count;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	if (count > NFS3_MAXNAMLEN)
+		goto out_nametoolong;
+	p = xdr_inline_decode(xdr, count);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*name = (const char *)p;
+	*length = count;
+	return 0;
+
+out_nametoolong:
+	dprintk("NFS: returned filename too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * nfspath3
+ *
+ *	typedef string nfspath3<>;
+ */
+static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
+			    const u32 length)
+{
+	BUG_ON(length > NFS3_MAXPATHLEN);
+	encode_uint32(xdr, length);
+	xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_nfspath3(struct xdr_stream *xdr)
+{
+	u32 recvd, count;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
+		goto out_nametoolong;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
+
+	xdr_read_pages(xdr, count);
+	xdr_terminate_string(xdr->buf, count);
+	return 0;
+
+out_nametoolong:
+	dprintk("NFS: returned pathname too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_cheating:
+	dprintk("NFS: server cheating in pathname result: "
+		"count %u > recvd %u\n", count, recvd);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * cookie3
+ *
+ *	typedef uint64 cookie3
+ */
+static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
+{
+	return xdr_encode_hyper(p, cookie);
+}
+
+static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
+{
+	return decode_uint64(xdr, cookie);
+}
+
+/*
+ * cookieverf3
+ *
+ *	typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
+ */
+static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
+{
+	memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
+	return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
+}
+
+static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * createverf3
+ *
+ *	typedef opaque createverf3[NFS3_CREATEVERFSIZE];
+ */
+static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
+	memcpy(p, verifier, NFS3_CREATEVERFSIZE);
+}
+
+static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	memcpy(verifier, p, NFS3_WRITEVERFSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * size3
+ *
+ *	typedef uint64 size3;
+ */
+static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
+{
+	return xdr_decode_hyper(p, size);
+}
+
+/*
+ * nfsstat3
+ *
+ *	enum nfsstat3 {
+ *		NFS3_OK = 0,
+ *		...
+ *	}
+ */
+#define NFS3_OK		NFS_OK
+
+static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * ftype3
+ *
+ *	enum ftype3 {
+ *		NF3REG	= 1,
+ *		NF3DIR	= 2,
+ *		NF3BLK	= 3,
+ *		NF3CHR	= 4,
+ *		NF3LNK	= 5,
+ *		NF3SOCK	= 6,
+ *		NF3FIFO	= 7
+ *	};
+ */
+static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
+{
+	BUG_ON(type > NF3FIFO);
+	encode_uint32(xdr, type);
+}
+
+static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
+{
+	u32 type;
+
+	type = be32_to_cpup(p++);
 	if (type > NF3FIFO)
 		type = NF3NON;
-	fmode = nfs_type2fmt[type];
-	fattr->mode = (ntohl(*p++) & ~S_IFMT) | fmode;
-	fattr->nlink = ntohl(*p++);
-	fattr->uid = ntohl(*p++);
-	fattr->gid = ntohl(*p++);
-	p = xdr_decode_hyper(p, &fattr->size);
-	p = xdr_decode_hyper(p, &fattr->du.nfs3.used);
+	*mode = nfs_type2fmt[type];
+	return p;
+}
 
-	/* Turn remote device info into Linux-specific dev_t */
-	major = ntohl(*p++);
-	minor = ntohl(*p++);
-	fattr->rdev = MKDEV(major, minor);
-	if (MAJOR(fattr->rdev) != major || MINOR(fattr->rdev) != minor)
-		fattr->rdev = 0;
+/*
+ * specdata3
+ *
+ *     struct specdata3 {
+ *             uint32  specdata1;
+ *             uint32  specdata2;
+ *     };
+ */
+static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 8);
+	*p++ = cpu_to_be32(MAJOR(rdev));
+	*p = cpu_to_be32(MINOR(rdev));
+}
+
+static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
+{
+	unsigned int major, minor;
+
+	major = be32_to_cpup(p++);
+	minor = be32_to_cpup(p++);
+	*rdev = MKDEV(major, minor);
+	if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
+		*rdev = 0;
+	return p;
+}
+
+/*
+ * nfs_fh3
+ *
+ *	struct nfs_fh3 {
+ *		opaque       data<NFS3_FHSIZE>;
+ *	};
+ */
+static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	BUG_ON(fh->size > NFS3_FHSIZE);
+	p = xdr_reserve_space(xdr, 4 + fh->size);
+	xdr_encode_opaque(p, fh->data, fh->size);
+}
+
+static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > NFS3_FHSIZE))
+		goto out_toobig;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	fh->size = length;
+	memcpy(fh->data, p, length);
+	return 0;
+out_toobig:
+	dprintk("NFS: file handle size (%u) too big\n", length);
+	return -E2BIG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static void zero_nfs_fh3(struct nfs_fh *fh)
+{
+	memset(fh, 0, sizeof(*fh));
+}
+
+/*
+ * nfstime3
+ *
+ *	struct nfstime3 {
+ *		uint32	seconds;
+ *		uint32	nseconds;
+ *	};
+ */
+static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	*p++ = cpu_to_be32(timep->tv_nsec);
+	return p;
+}
+
+static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
+{
+	timep->tv_sec = be32_to_cpup(p++);
+	timep->tv_nsec = be32_to_cpup(p++);
+	return p;
+}
+
+/*
+ * sattr3
+ *
+ *	enum time_how {
+ *		DONT_CHANGE		= 0,
+ *		SET_TO_SERVER_TIME	= 1,
+ *		SET_TO_CLIENT_TIME	= 2
+ *	};
+ *
+ *	union set_mode3 switch (bool set_it) {
+ *	case TRUE:
+ *		mode3	mode;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_uid3 switch (bool set_it) {
+ *	case TRUE:
+ *		uid3	uid;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_gid3 switch (bool set_it) {
+ *	case TRUE:
+ *		gid3	gid;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_size3 switch (bool set_it) {
+ *	case TRUE:
+ *		size3	size;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_atime switch (time_how set_it) {
+ *	case SET_TO_CLIENT_TIME:
+ *		nfstime3	atime;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_mtime switch (time_how set_it) {
+ *	case SET_TO_CLIENT_TIME:
+ *		nfstime3  mtime;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct sattr3 {
+ *		set_mode3	mode;
+ *		set_uid3	uid;
+ *		set_gid3	gid;
+ *		set_size3	size;
+ *		set_atime	atime;
+ *		set_mtime	mtime;
+ *	};
+ */
+static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
+{
+	u32 nbytes;
+	__be32 *p;
+
+	/*
+	 * In order to make only a single xdr_reserve_space() call,
+	 * pre-compute the total number of bytes to be reserved.
+	 * Six boolean values, one for each set_foo field, are always
+	 * present in the encoded result, so start there.
+	 */
+	nbytes = 6 * 4;
+	if (attr->ia_valid & ATTR_MODE)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_UID)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_GID)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_SIZE)
+		nbytes += 8;
+	if (attr->ia_valid & ATTR_ATIME_SET)
+		nbytes += 8;
+	if (attr->ia_valid & ATTR_MTIME_SET)
+		nbytes += 8;
+	p = xdr_reserve_space(xdr, nbytes);
+
+	if (attr->ia_valid & ATTR_MODE) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_UID) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_uid);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_GID) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_gid);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_SIZE) {
+		*p++ = xdr_one;
+		p = xdr_encode_hyper(p, (u64)attr->ia_size);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_ATIME_SET) {
+		*p++ = xdr_two;
+		p = xdr_encode_nfstime3(p, &attr->ia_atime);
+	} else if (attr->ia_valid & ATTR_ATIME) {
+		*p++ = xdr_one;
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_MTIME_SET) {
+		*p++ = xdr_two;
+		xdr_encode_nfstime3(p, &attr->ia_mtime);
+	} else if (attr->ia_valid & ATTR_MTIME) {
+		*p = xdr_one;
+	} else
+		*p = xdr_zero;
+}
+
+/*
+ * fattr3
+ *
+ *	struct fattr3 {
+ *		ftype3		type;
+ *		mode3		mode;
+ *		uint32		nlink;
+ *		uid3		uid;
+ *		gid3		gid;
+ *		size3		size;
+ *		size3		used;
+ *		specdata3	rdev;
+ *		uint64		fsid;
+ *		fileid3		fileid;
+ *		nfstime3	atime;
+ *		nfstime3	mtime;
+ *		nfstime3	ctime;
+ *	};
+ */
+static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+	umode_t fmode;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	p = xdr_decode_ftype3(p, &fmode);
+
+	fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
+	fattr->nlink = be32_to_cpup(p++);
+	fattr->uid = be32_to_cpup(p++);
+	fattr->gid = be32_to_cpup(p++);
+
+	p = xdr_decode_size3(p, &fattr->size);
+	p = xdr_decode_size3(p, &fattr->du.nfs3.used);
+	p = xdr_decode_specdata3(p, &fattr->rdev);
 
 	p = xdr_decode_hyper(p, &fattr->fsid.major);
 	fattr->fsid.minor = 0;
-	p = xdr_decode_hyper(p, &fattr->fileid);
-	p = xdr_decode_time3(p, &fattr->atime);
-	p = xdr_decode_time3(p, &fattr->mtime);
-	p = xdr_decode_time3(p, &fattr->ctime);
 
-	/* Update the mode bits */
+	p = xdr_decode_fileid3(p, &fattr->fileid);
+	p = xdr_decode_nfstime3(p, &fattr->atime);
+	p = xdr_decode_nfstime3(p, &fattr->mtime);
+	xdr_decode_nfstime3(p, &fattr->ctime);
+
 	fattr->valid |= NFS_ATTR_FATTR_V3;
-	return p;
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
+/*
+ * post_op_attr
+ *
+ *	union post_op_attr switch (bool attributes_follow) {
+ *	case TRUE:
+ *		fattr3	attributes;
+ *	case FALSE:
+ *		void;
+ *	};
+ */
+static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	if (attr->ia_valid & ATTR_MODE) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_mode & S_IALLUGO);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_UID) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_uid);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_GID) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_gid);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_SIZE) {
-		*p++ = xdr_one;
-		p = xdr_encode_hyper(p, (__u64) attr->ia_size);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_ATIME_SET) {
-		*p++ = xdr_two;
-		p = xdr_encode_time3(p, &attr->ia_atime);
-	} else if (attr->ia_valid & ATTR_ATIME) {
-		*p++ = xdr_one;
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_MTIME_SET) {
-		*p++ = xdr_two;
-		p = xdr_encode_time3(p, &attr->ia_mtime);
-	} else if (attr->ia_valid & ATTR_MTIME) {
-		*p++ = xdr_one;
-	} else {
-		*p++ = xdr_zero;
-	}
-	return p;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p != xdr_zero)
+		return decode_fattr3(xdr, fattr);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * wcc_attr
+ *	struct wcc_attr {
+ *		size3		size;
+ *		nfstime3	mtime;
+ *		nfstime3	ctime;
+ *	};
+ */
+static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	p = xdr_decode_hyper(p, &fattr->pre_size);
-	p = xdr_decode_time3(p, &fattr->pre_mtime);
-	p = xdr_decode_time3(p, &fattr->pre_ctime);
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
 	fattr->valid |= NFS_ATTR_FATTR_PRESIZE
 		| NFS_ATTR_FATTR_PREMTIME
 		| NFS_ATTR_FATTR_PRECTIME;
-	return p;
+
+	p = xdr_decode_size3(p, &fattr->pre_size);
+	p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
+	xdr_decode_nfstime3(p, &fattr->pre_ctime);
+
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_post_op_attr(__be32 *p, struct nfs_fattr *fattr)
-{
-	if (*p++)
-		p = xdr_decode_fattr(p, fattr);
-	return p;
-}
-
-static inline __be32 *
-xdr_decode_post_op_attr_stream(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+/*
+ * pre_op_attr
+ *	union pre_op_attr switch (bool attributes_follow) {
+ *	case TRUE:
+ *		wcc_attr	attributes;
+ *	case FALSE:
+ *		void;
+ *	};
+ *
+ * wcc_data
+ *
+ *	struct wcc_data {
+ *		pre_op_attr	before;
+ *		post_op_attr	after;
+ *	};
+ */
+static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
 	__be32 *p;
 
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 84);
-		if (unlikely(!p))
-			goto out_overflow;
-		p = xdr_decode_fattr(p, fattr);
-	}
-	return p;
+	if (*p != xdr_zero)
+		return decode_wcc_attr(xdr, fattr);
+	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EIO);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_pre_op_attr(__be32 *p, struct nfs_fattr *fattr)
+static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	if (*p++)
-		return xdr_decode_wcc_attr(p, fattr);
-	return p;
-}
+	int error;
 
-
-static inline __be32 *
-xdr_decode_wcc_data(__be32 *p, struct nfs_fattr *fattr)
-{
-	p = xdr_decode_pre_op_attr(p, fattr);
-	return xdr_decode_post_op_attr(p, fattr);
+	error = decode_pre_op_attr(xdr, fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, fattr);
+out:
+	return error;
 }
 
 /*
- * NFS encode functions
+ * post_op_fh3
+ *
+ *	union post_op_fh3 switch (bool handle_follows) {
+ *	case TRUE:
+ *		nfs_fh3  handle;
+ *	case FALSE:
+ *		void;
+ *	};
  */
-
-/*
- * Encode file handle argument
- */
-static int
-nfs3_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
+static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
 {
-	p = xdr_encode_fhandle(p, fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	__be32 *p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p != xdr_zero)
+		return decode_nfs_fh3(xdr, fh);
+	zero_nfs_fh3(fh);
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
 /*
- * Encode SETATTR arguments
+ * diropargs3
+ *
+ *	struct diropargs3 {
+ *		nfs_fh3		dir;
+ *		filename3	name;
+ *	};
  */
-static int
-nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args)
+static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
+			      const char *name, u32 length)
 {
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_sattr(p, args->sattr);
-	*p++ = htonl(args->guard);
-	if (args->guard)
-		p = xdr_encode_time3(p, &args->guardtime);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
+	encode_nfs_fh3(xdr, fh);
+	encode_filename3(xdr, name, length);
+}
+
+
+/*
+ * NFSv3 XDR encode functions
+ *
+ * NFSv3 argument types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ */
+
+/*
+ * 3.3.1  GETATTR3args
+ *
+ *	struct GETATTR3args {
+ *		nfs_fh3  object;
+ *	};
+ */
+static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs_fh *fh)
+{
+	encode_nfs_fh3(xdr, fh);
 }
 
 /*
- * Encode directory ops argument
+ * 3.3.2  SETATTR3args
+ *
+ *	union sattrguard3 switch (bool check) {
+ *	case TRUE:
+ *		nfstime3  obj_ctime;
+ *	case FALSE:
+ *		void;
+ *	};
+ *
+ *	struct SETATTR3args {
+ *		nfs_fh3		object;
+ *		sattr3		new_attributes;
+ *		sattrguard3	guard;
+ *	};
  */
-static int
-nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args)
+static void encode_sattrguard3(struct xdr_stream *xdr,
+			       const struct nfs3_sattrargs *args)
 {
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
+	__be32 *p;
 
-/*
- * Encode REMOVE argument
- */
-static int
-nfs3_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name.name, args->name.len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode access() argument
- */
-static int
-nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->access);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
- */
-static int
-nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen,
-			 args->pages, args->pgbase, count);
-	req->rq_rcv_buf.flags |= XDRBUF_READ;
-	return 0;
-}
-
-/*
- * Write arguments. Splice the buffer to be written into the iovec.
- */
-static int
-nfs3_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(count);
-	*p++ = htonl(args->stable);
-	*p++ = htonl(count);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	/* Copy the page array */
-	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
-	sndbuf->flags |= XDRBUF_WRITE;
-	return 0;
-}
-
-/*
- * Encode CREATE arguments
- */
-static int
-nfs3_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs3_createargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-
-	*p++ = htonl(args->createmode);
-	if (args->createmode == NFS3_CREATE_EXCLUSIVE) {
-		*p++ = args->verifier[0];
-		*p++ = args->verifier[1];
-	} else
-		p = xdr_encode_sattr(p, args->sattr);
-
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode MKDIR arguments
- */
-static int
-nfs3_xdr_mkdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mkdirargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SYMLINK arguments
- */
-static int
-nfs3_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_symlinkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_array(p, args->fromname, args->fromlen);
-	p = xdr_encode_sattr(p, args->sattr);
-	*p++ = htonl(args->pathlen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Copy the page */
-	xdr_encode_pages(&req->rq_snd_buf, args->pages, 0, args->pathlen);
-	return 0;
-}
-
-/*
- * Encode MKNOD arguments
- */
-static int
-nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	*p++ = htonl(args->type);
-	p = xdr_encode_sattr(p, args->sattr);
-	if (args->type == NF3CHR || args->type == NF3BLK) {
-		*p++ = htonl(MAJOR(args->rdev));
-		*p++ = htonl(MINOR(args->rdev));
-	}
-
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode RENAME arguments
- */
-static int
-nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
-{
-	p = xdr_encode_fhandle(p, args->old_dir);
-	p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
-	p = xdr_encode_fhandle(p, args->new_dir);
-	p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode LINK arguments
- */
-static int
-nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_fhandle(p, args->tofh);
-	p = xdr_encode_array(p, args->toname, args->tolen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode arguments to readdir call
- */
-static int
-nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->cookie);
-	*p++ = args->verf[0];
-	*p++ = args->verf[1];
-	if (args->plus) {
-		/* readdirplus: need dircount + buffer size.
-		 * We just make sure we make dircount big enough */
-		*p++ = htonl(count >> 3);
-	}
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
-	return 0;
-}
-
-/*
- * Decode the result of a readdir call.
- * We just check for syntactical correctness.
- */
-static int
-nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
-	struct page **page;
-	size_t hdrlen;
-	u32 recvd, pglen;
-	int status;
-
-	status = ntohl(*p++);
-	/* Decode post_op_attrs */
-	p = xdr_decode_post_op_attr(p, res->dir_attr);
-	if (status)
-		return nfs_stat_to_errno(status);
-	/* Decode verifier cookie */
-	if (res->verf) {
-		res->verf[0] = *p++;
-		res->verf[1] = *p++;
+	if (args->guard) {
+		p = xdr_reserve_space(xdr, 4 + 8);
+		*p++ = xdr_one;
+		xdr_encode_nfstime3(p, &args->guardtime);
 	} else {
-		p += 2;
+		p = xdr_reserve_space(xdr, 4);
+		*p = xdr_zero;
 	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READDIR reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-
-	pglen = rcvbuf->page_len;
-	recvd = rcvbuf->len - hdrlen;
-	if (pglen > recvd)
-		pglen = recvd;
-	page = rcvbuf->pages;
-
-	return pglen;
 }
 
-__be32 *
-nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_sattrargs *args)
 {
-	__be32 *p;
-	struct nfs_entry old = *entry;
-
-	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
-		goto out_overflow;
-	if (!ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
-		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
-	}
-
-	p = xdr_inline_decode(xdr, 12);
-	if (unlikely(!p))
-		goto out_overflow;
-	p = xdr_decode_hyper(p, &entry->ino);
-	entry->len  = ntohl(*p++);
-
-	p = xdr_inline_decode(xdr, entry->len + 8);
-	if (unlikely(!p))
-		goto out_overflow;
-	entry->name = (const char *) p;
-	p += XDR_QUADLEN(entry->len);
-	entry->prev_cookie = entry->cookie;
-	p = xdr_decode_hyper(p, &entry->cookie);
-
-	entry->d_type = DT_UNKNOWN;
-	if (plus) {
-		entry->fattr->valid = 0;
-		p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
-		if (IS_ERR(p))
-			goto out_overflow_exit;
-		entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
-		/* In fact, a post_op_fh3: */
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (*p++) {
-			p = xdr_decode_fhandle_stream(xdr, entry->fh);
-			if (IS_ERR(p))
-				goto out_overflow_exit;
-			/* Ugh -- server reply was truncated */
-			if (p == NULL) {
-				dprintk("NFS: FH truncated\n");
-				*entry = old;
-				return ERR_PTR(-EAGAIN);
-			}
-		} else
-			memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
-	}
-
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
-
-out_overflow:
-	print_overflow_msg(__func__, xdr);
-out_overflow_exit:
-	return ERR_PTR(-EAGAIN);
+	encode_nfs_fh3(xdr, args->fh);
+	encode_sattr3(xdr, args->sattr);
+	encode_sattrguard3(xdr, args);
 }
 
 /*
- * Encode COMMIT arguments
+ * 3.3.3  LOOKUP3args
+ *
+ *	struct LOOKUP3args {
+ *		diropargs3  what;
+ *	};
  */
-static int
-nfs3_xdr_commitargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_diropargs *args)
 {
-	p = xdr_encode_fhandle(p, args->fh);
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+}
+
+/*
+ * 3.3.4  ACCESS3args
+ *
+ *	struct ACCESS3args {
+ *		nfs_fh3		object;
+ *		uint32		access;
+ *	};
+ */
+static void encode_access3args(struct xdr_stream *xdr,
+			       const struct nfs3_accessargs *args)
+{
+	encode_nfs_fh3(xdr, args->fh);
+	encode_uint32(xdr, args->access);
+}
+
+static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_accessargs *args)
+{
+	encode_access3args(xdr, args);
+}
+
+/*
+ * 3.3.5  READLINK3args
+ *
+ *	struct READLINK3args {
+ *		nfs_fh3	symlink;
+ *	};
+ */
+static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
+				       struct xdr_stream *xdr,
+				       const struct nfs3_readlinkargs *args)
+{
+	encode_nfs_fh3(xdr, args->fh);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->pglen, NFS3_readlinkres_sz);
+}
+
+/*
+ * 3.3.6  READ3args
+ *
+ *	struct READ3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *	};
+ */
+static void encode_read3args(struct xdr_stream *xdr,
+			     const struct nfs_readargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4);
 	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(args->count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_readargs *args)
+{
+	encode_read3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->count, NFS3_readres_sz);
+	req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 3.3.7  WRITE3args
+ *
+ *	enum stable_how {
+ *		UNSTABLE  = 0,
+ *		DATA_SYNC = 1,
+ *		FILE_SYNC = 2
+ *	};
+ *
+ *	struct WRITE3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *		stable_how	stable;
+ *		opaque		data<>;
+ *	};
+ */
+static void encode_write3args(struct xdr_stream *xdr,
+			      const struct nfs_writeargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
+	p = xdr_encode_hyper(p, args->offset);
+	*p++ = cpu_to_be32(args->count);
+	*p++ = cpu_to_be32(args->stable);
+	*p = cpu_to_be32(args->count);
+	xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+}
+
+static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_writeargs *args)
+{
+	encode_write3args(xdr, args);
+	xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 3.3.8  CREATE3args
+ *
+ *	enum createmode3 {
+ *		UNCHECKED = 0,
+ *		GUARDED   = 1,
+ *		EXCLUSIVE = 2
+ *	};
+ *
+ *	union createhow3 switch (createmode3 mode) {
+ *	case UNCHECKED:
+ *	case GUARDED:
+ *		sattr3       obj_attributes;
+ *	case EXCLUSIVE:
+ *		createverf3  verf;
+ *	};
+ *
+ *	struct CREATE3args {
+ *		diropargs3	where;
+ *		createhow3	how;
+ *	};
+ */
+static void encode_createhow3(struct xdr_stream *xdr,
+			      const struct nfs3_createargs *args)
+{
+	encode_uint32(xdr, args->createmode);
+	switch (args->createmode) {
+	case NFS3_CREATE_UNCHECKED:
+	case NFS3_CREATE_GUARDED:
+		encode_sattr3(xdr, args->sattr);
+		break;
+	case NFS3_CREATE_EXCLUSIVE:
+		encode_createverf3(xdr, args->verifier);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_createargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_createhow3(xdr, args);
+}
+
+/*
+ * 3.3.9  MKDIR3args
+ *
+ *	struct MKDIR3args {
+ *		diropargs3	where;
+ *		sattr3		attributes;
+ *	};
+ */
+static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs3_mkdirargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_sattr3(xdr, args->sattr);
+}
+
+/*
+ * 3.3.10  SYMLINK3args
+ *
+ *	struct symlinkdata3 {
+ *		sattr3		symlink_attributes;
+ *		nfspath3	symlink_data;
+ *	};
+ *
+ *	struct SYMLINK3args {
+ *		diropargs3	where;
+ *		symlinkdata3	symlink;
+ *	};
+ */
+static void encode_symlinkdata3(struct xdr_stream *xdr,
+				const struct nfs3_symlinkargs *args)
+{
+	encode_sattr3(xdr, args->sattr);
+	encode_nfspath3(xdr, args->pages, args->pathlen);
+}
+
+static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_symlinkargs *args)
+{
+	encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
+	encode_symlinkdata3(xdr, args);
+}
+
+/*
+ * 3.3.11  MKNOD3args
+ *
+ *	struct devicedata3 {
+ *		sattr3		dev_attributes;
+ *		specdata3	spec;
+ *	};
+ *
+ *	union mknoddata3 switch (ftype3 type) {
+ *	case NF3CHR:
+ *	case NF3BLK:
+ *		devicedata3	device;
+ *	case NF3SOCK:
+ *	case NF3FIFO:
+ *		sattr3		pipe_attributes;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct MKNOD3args {
+ *		diropargs3	where;
+ *		mknoddata3	what;
+ *	};
+ */
+static void encode_devicedata3(struct xdr_stream *xdr,
+			       const struct nfs3_mknodargs *args)
+{
+	encode_sattr3(xdr, args->sattr);
+	encode_specdata3(xdr, args->rdev);
+}
+
+static void encode_mknoddata3(struct xdr_stream *xdr,
+			      const struct nfs3_mknodargs *args)
+{
+	encode_ftype3(xdr, args->type);
+	switch (args->type) {
+	case NF3CHR:
+	case NF3BLK:
+		encode_devicedata3(xdr, args);
+		break;
+	case NF3SOCK:
+	case NF3FIFO:
+		encode_sattr3(xdr, args->sattr);
+		break;
+	case NF3REG:
+	case NF3DIR:
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs3_mknodargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_mknoddata3(xdr, args);
+}
+
+/*
+ * 3.3.12  REMOVE3args
+ *
+ *	struct REMOVE3args {
+ *		diropargs3  object;
+ *	};
+ */
+static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_removeargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 3.3.14  RENAME3args
+ *
+ *	struct RENAME3args {
+ *		diropargs3	from;
+ *		diropargs3	to;
+ *	};
+ */
+static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_renameargs *args)
+{
+	const struct qstr *old = args->old_name;
+	const struct qstr *new = args->new_name;
+
+	encode_diropargs3(xdr, args->old_dir, old->name, old->len);
+	encode_diropargs3(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 3.3.15  LINK3args
+ *
+ *	struct LINK3args {
+ *		nfs_fh3		file;
+ *		diropargs3	link;
+ *	};
+ */
+static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs3_linkargs *args)
+{
+	encode_nfs_fh3(xdr, args->fromfh);
+	encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 3.3.16  READDIR3args
+ *
+ *	struct READDIR3args {
+ *		nfs_fh3		dir;
+ *		cookie3		cookie;
+ *		cookieverf3	cookieverf;
+ *		count3		count;
+ *	};
+ */
+static void encode_readdir3args(struct xdr_stream *xdr,
+				const struct nfs3_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
+	p = xdr_encode_cookie3(p, args->cookie);
+	p = xdr_encode_cookieverf3(p, args->verf);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_readdirargs *args)
+{
+	encode_readdir3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+				args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.17  READDIRPLUS3args
+ *
+ *	struct READDIRPLUS3args {
+ *		nfs_fh3		dir;
+ *		cookie3		cookie;
+ *		cookieverf3	cookieverf;
+ *		count3		dircount;
+ *		count3		maxcount;
+ *	};
+ */
+static void encode_readdirplus3args(struct xdr_stream *xdr,
+				    const struct nfs3_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
+	p = xdr_encode_cookie3(p, args->cookie);
+	p = xdr_encode_cookieverf3(p, args->verf);
+
+	/*
+	 * readdirplus: need dircount + buffer size.
+	 * We just make sure we make dircount big enough
+	 */
+	*p++ = cpu_to_be32(args->count >> 3);
+
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
+					  struct xdr_stream *xdr,
+					  const struct nfs3_readdirargs *args)
+{
+	encode_readdirplus3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+				args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.21  COMMIT3args
+ *
+ *	struct COMMIT3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *	};
+ */
+static void encode_commit3args(struct xdr_stream *xdr,
+			       const struct nfs_writeargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4);
+	p = xdr_encode_hyper(p, args->offset);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_writeargs *args)
+{
+	encode_commit3args(xdr, args);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
-/*
- * Encode GETACL arguments
- */
-static int
-nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
-		    struct nfs3_getaclargs *args)
+
+static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_getaclargs *args)
 {
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->mask);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	if (args->mask & (NFS_ACL | NFS_DFACL)) {
-		/* Inline the page array */
-		replen = (RPC_REPHDRSIZE + auth->au_rslack +
-			  ACL3_getaclres_sz) << 2;
-		xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0,
-				 NFSACL_MAXPAGES << PAGE_SHIFT);
-	}
-	return 0;
+	encode_nfs_fh3(xdr, args->fh);
+	encode_uint32(xdr, args->mask);
+	if (args->mask & (NFS_ACL | NFS_DFACL))
+		prepare_reply_buffer(req, args->pages, 0,
+					NFSACL_MAXPAGES << PAGE_SHIFT,
+					ACL3_getaclres_sz);
 }
 
-/*
- * Encode SETACL arguments
- */
-static int
-nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p,
-                   struct nfs3_setaclargs *args)
+static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_setaclargs *args)
 {
-	struct xdr_buf *buf = &req->rq_snd_buf;
 	unsigned int base;
-	int err;
+	int error;
 
-	p = xdr_encode_fhandle(p, NFS_FH(args->inode));
-	*p++ = htonl(args->mask);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	base = req->rq_slen;
-
+	encode_nfs_fh3(xdr, NFS_FH(args->inode));
+	encode_uint32(xdr, args->mask);
 	if (args->npages != 0)
-		xdr_encode_pages(buf, args->pages, 0, args->len);
-	else
-		req->rq_slen = xdr_adjust_iovec(req->rq_svec,
-				p + XDR_QUADLEN(args->len));
+		xdr_write_pages(xdr, args->pages, 0, args->len);
 
-	err = nfsacl_encode(buf, base, args->inode,
+	base = req->rq_slen;
+	error = nfsacl_encode(xdr->buf, base, args->inode,
 			    (args->mask & NFS_ACL) ?
 			    args->acl_access : NULL, 1, 0);
-	if (err > 0)
-		err = nfsacl_encode(buf, base + err, args->inode,
-				    (args->mask & NFS_DFACL) ?
-				    args->acl_default : NULL, 1,
-				    NFS_ACL_DEFAULT);
-	return (err > 0) ? 0 : err;
+	BUG_ON(error < 0);
+	error = nfsacl_encode(xdr->buf, base + error, args->inode,
+			    (args->mask & NFS_DFACL) ?
+			    args->acl_default : NULL, 1,
+			    NFS_ACL_DEFAULT);
+	BUG_ON(error < 0);
 }
+
 #endif  /* CONFIG_NFS_V3_ACL */
 
 /*
- * NFS XDR decode functions
+ * NFSv3 XDR decode functions
+ *
+ * NFSv3 result types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
  */
 
 /*
- * Decode attrstat reply.
+ * 3.3.1  GETATTR3res
+ *
+ *	struct GETATTR3resok {
+ *		fattr3		obj_attributes;
+ *	};
+ *
+ *	union GETATTR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		GETATTR3resok  resok;
+ *	default:
+ *		void;
+ *	};
  */
-static int
-nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs_fattr *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	xdr_decode_fattr(p, fattr);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_fattr3(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode status+wcc_data reply
- * SATTR, REMOVE, RMDIR
+ * 3.3.2  SETATTR3res
+ *
+ *	struct SETATTR3resok {
+ *		wcc_data  obj_wcc;
+ *	};
+ *
+ *	struct SETATTR3resfail {
+ *		wcc_data  obj_wcc;
+ *	};
+ *
+ *	union SETATTR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		SETATTR3resok   resok;
+ *	default:
+ *		SETATTR3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs_fattr *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++)))
-		status = nfs_stat_to_errno(status);
-	xdr_decode_wcc_data(p, fattr);
-	return status;
-}
-
-static int
-nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
-{
-	return nfs3_xdr_wccstat(req, p, res->dir_attr);
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode LOOKUP reply
+ * 3.3.3  LOOKUP3res
+ *
+ *	struct LOOKUP3resok {
+ *		nfs_fh3		object;
+ *		post_op_attr	obj_attributes;
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	struct LOOKUP3resfail {
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	union LOOKUP3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		LOOKUP3resok	resok;
+ *	default:
+ *		LOOKUP3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
+static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_diropres *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++))) {
-		status = nfs_stat_to_errno(status);
-	} else {
-		if (!(p = xdr_decode_fhandle(p, res->fh)))
-			return -errno_NFSERR_IO;
-		p = xdr_decode_post_op_attr(p, res->fattr);
-	}
-	xdr_decode_post_op_attr(p, res->dir_attr);
-	return status;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_nfs_fh3(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->dir_attr);
+out:
+	return error;
+out_default:
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode ACCESS reply
+ * 3.3.4  ACCESS3res
+ *
+ *	struct ACCESS3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		access;
+ *	};
+ *
+ *	struct ACCESS3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union ACCESS3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		ACCESS3resok	resok;
+ *	default:
+ *		ACCESS3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
+static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_accessres *result)
 {
-	int	status = ntohl(*p++);
+	enum nfs_stat status;
+	int error;
 
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status)
-		return nfs_stat_to_errno(status);
-	res->access = ntohl(*p++);
-	return 0;
-}
-
-static int
-nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_uint32(xdr, &result->access);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode READLINK reply
+ * 3.3.5  READLINK3res
+ *
+ *	struct READLINK3resok {
+ *		post_op_attr	symlink_attributes;
+ *		nfspath3	data;
+ *	};
+ *
+ *	struct READLINK3resfail {
+ *		post_op_attr	symlink_attributes;
+ *	};
+ *
+ *	union READLINK3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READLINK3resok	resok;
+ *	default:
+ *		READLINK3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_fattr *result)
 {
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_nfspath3(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.6  READ3res
+ *
+ *	struct READ3resok {
+ *		post_op_attr	file_attributes;
+ *		count3		count;
+ *		bool		eof;
+ *		opaque		data<>;
+ *	};
+ *
+ *	struct READ3resfail {
+ *		post_op_attr	file_attributes;
+ *	};
+ *
+ *	union READ3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READ3resok	resok;
+ *	default:
+ *		READ3resfail	resfail;
+ *	};
+ */
+static int decode_read3resok(struct xdr_stream *xdr,
+			     struct nfs_readres *result)
+{
+	u32 eof, count, ocount, recvd;
 	size_t hdrlen;
-	u32 len, recvd;
-	int	status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-	p = xdr_decode_post_op_attr(p, fattr);
+	p = xdr_inline_decode(xdr, 4 + 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p++);
+	eof = be32_to_cpup(p++);
+	ocount = be32_to_cpup(p++);
+	if (unlikely(ocount != count))
+		goto out_mismatch;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	/* Convert length of symlink */
-	len = ntohl(*p++);
-	if (len >= rcvbuf->page_len) {
-		dprintk("nfs: server returned giant symlink!\n");
-		return -ENAMETOOLONG;
-	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READLINK reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READLINK header is short. "
-			"iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (recvd < len) {
-		dprintk("NFS: server cheating in readlink reply: "
-				"count %u > recvd %u\n", len, recvd);
-		return -EIO;
-	}
-
-	xdr_terminate_string(rcvbuf, len);
-	return 0;
-}
-
-/*
- * Decode READ reply
- */
-static int
-nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
-{
-	struct kvec *iov = req->rq_rcv_buf.head;
-	size_t hdrlen;
-	u32 count, ocount, recvd;
-	int status;
-
-	status = ntohl(*p++);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	/* Decode reply count and EOF flag. NFSv3 is somewhat redundant
-	 * in that it puts the count both in the res struct and in the
-	 * opaque data count. */
-	count    = ntohl(*p++);
-	res->eof = ntohl(*p++);
-	ocount   = ntohl(*p++);
-
-	if (ocount != count) {
-		dprintk("NFS: READ count doesn't match RPC opaque count.\n");
-		return -errno_NFSERR_IO;
-	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READ reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-       		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READ header is short. iovec will be shifted.\n");
-		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
-	}
-
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (count > recvd) {
-		dprintk("NFS: server cheating in read reply: "
-			"count %u > recvd %u\n", count, recvd);
-		count = recvd;
-		res->eof = 0;
-	}
-
-	if (count < res->count)
-		res->count = count;
-
+out:
+	xdr_read_pages(xdr, count);
+	result->eof = eof;
+	result->count = count;
 	return count;
+out_mismatch:
+	dprintk("NFS: READ count doesn't match length of opaque: "
+		"count %u != ocount %u\n", count, ocount);
+	return -EIO;
+out_cheating:
+	dprintk("NFS: server cheating in read result: "
+		"count %u > recvd %u\n", count, recvd);
+	count = recvd;
+	eof = 0;
+	goto out;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_readres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_read3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode WRITE response
+ * 3.3.7  WRITE3res
+ *
+ *	enum stable_how {
+ *		UNSTABLE  = 0,
+ *		DATA_SYNC = 1,
+ *		FILE_SYNC = 2
+ *	};
+ *
+ *	struct WRITE3resok {
+ *		wcc_data	file_wcc;
+ *		count3		count;
+ *		stable_how	committed;
+ *		writeverf3	verf;
+ *	};
+ *
+ *	struct WRITE3resfail {
+ *		wcc_data	file_wcc;
+ *	};
+ *
+ *	union WRITE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		WRITE3resok	resok;
+ *	default:
+ *		WRITE3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int decode_write3resok(struct xdr_stream *xdr,
+			      struct nfs_writeres *result)
 {
-	int	status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-	p = xdr_decode_wcc_data(p, res->fattr);
+	p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->count = be32_to_cpup(p++);
+	result->verf->committed = be32_to_cpup(p++);
+	if (unlikely(result->verf->committed > NFS_FILE_SYNC))
+		goto out_badvalue;
+	memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
+	return result->count;
+out_badvalue:
+	dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
+static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs_writeres *result)
+{
+	enum nfs_stat status;
+	int error;
 
-	res->count = ntohl(*p++);
-	res->verf->committed = (enum nfs3_stable_how)ntohl(*p++);
-	res->verf->verifier[0] = *p++;
-	res->verf->verifier[1] = *p++;
-
-	return res->count;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_write3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode a CREATE response
+ * 3.3.8  CREATE3res
+ *
+ *	struct CREATE3resok {
+ *		post_op_fh3	obj;
+ *		post_op_attr	obj_attributes;
+ *		wcc_data	dir_wcc;
+ *	};
+ *
+ *	struct CREATE3resfail {
+ *		wcc_data	dir_wcc;
+ *	};
+ *
+ *	union CREATE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		CREATE3resok	resok;
+ *	default:
+ *		CREATE3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
+static int decode_create3resok(struct xdr_stream *xdr,
+			       struct nfs3_diropres *result)
 {
-	int	status;
+	int error;
 
-	status = ntohl(*p++);
-	if (status == 0) {
-		if (*p++) {
-			if (!(p = xdr_decode_fhandle(p, res->fh)))
-				return -errno_NFSERR_IO;
-			p = xdr_decode_post_op_attr(p, res->fattr);
-		} else {
-			memset(res->fh, 0, sizeof(*res->fh));
-			/* Do decode post_op_attr but set it to NULL */
-			p = xdr_decode_post_op_attr(p, res->fattr);
-			res->fattr->valid = 0;
-		}
-	} else {
-		status = nfs_stat_to_errno(status);
+	error = decode_post_op_fh3(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	/* The server isn't required to return a file handle.
+	 * If it didn't, force the client to perform a LOOKUP
+	 * to determine the correct file handle and attribute
+	 * values for the new object. */
+	if (result->fh->size == 0)
+		result->fattr->valid = 0;
+	error = decode_wcc_data(xdr, result->dir_attr);
+out:
+	return error;
+}
+
+static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_diropres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_create3resok(xdr, result);
+out:
+	return error;
+out_default:
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.12  REMOVE3res
+ *
+ *	struct REMOVE3resok {
+ *		wcc_data    dir_wcc;
+ *	};
+ *
+ *	struct REMOVE3resfail {
+ *		wcc_data    dir_wcc;
+ *	};
+ *
+ *	union REMOVE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		REMOVE3resok   resok;
+ *	default:
+ *		REMOVE3resfail resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_removeres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.14  RENAME3res
+ *
+ *	struct RENAME3resok {
+ *		wcc_data	fromdir_wcc;
+ *		wcc_data	todir_wcc;
+ *	};
+ *
+ *	struct RENAME3resfail {
+ *		wcc_data	fromdir_wcc;
+ *		wcc_data	todir_wcc;
+ *	};
+ *
+ *	union RENAME3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		RENAME3resok   resok;
+ *	default:
+ *		RENAME3resfail resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_renameres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->old_fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->new_fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.15  LINK3res
+ *
+ *	struct LINK3resok {
+ *		post_op_attr	file_attributes;
+ *		wcc_data	linkdir_wcc;
+ *	};
+ *
+ *	struct LINK3resfail {
+ *		post_op_attr	file_attributes;
+ *		wcc_data	linkdir_wcc;
+ *	};
+ *
+ *	union LINK3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		LINK3resok	resok;
+ *	default:
+ *		LINK3resfail	resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs3_linkres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/**
+ * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
+ *			the local page cache
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 3.3.16  entry3
+ *
+ *	struct entry3 {
+ *		fileid3		fileid;
+ *		filename3	name;
+ *		cookie3		cookie;
+ *		fhandle3	filehandle;
+ *		post_op_attr3	attributes;
+ *		entry3		*nextentry;
+ *	};
+ *
+ * 3.3.17  entryplus3
+ *	struct entryplus3 {
+ *		fileid3		fileid;
+ *		filename3	name;
+ *		cookie3		cookie;
+ *		post_op_attr	name_attributes;
+ *		post_op_fh3	name_handle;
+ *		entryplus3	*nextentry;
+ *	};
+ */
+int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
+{
+	struct nfs_entry old = *entry;
+	__be32 *p;
+	int error;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p == xdr_zero) {
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p == xdr_zero)
+			return -EAGAIN;
+		entry->eof = 1;
+		return -EBADCOOKIE;
 	}
-	p = xdr_decode_wcc_data(p, res->dir_attr);
-	return status;
+
+	error = decode_fileid3(xdr, &entry->ino);
+	if (unlikely(error))
+		return error;
+
+	error = decode_inline_filename3(xdr, &entry->name, &entry->len);
+	if (unlikely(error))
+		return error;
+
+	entry->prev_cookie = entry->cookie;
+	error = decode_cookie3(xdr, &entry->cookie);
+	if (unlikely(error))
+		return error;
+
+	entry->d_type = DT_UNKNOWN;
+
+	if (plus) {
+		entry->fattr->valid = 0;
+		error = decode_post_op_attr(xdr, entry->fattr);
+		if (unlikely(error))
+			return error;
+		if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
+			entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
+
+		/* In fact, a post_op_fh3: */
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p != xdr_zero) {
+			error = decode_nfs_fh3(xdr, entry->fh);
+			if (unlikely(error)) {
+				if (error == -E2BIG)
+					goto out_truncated;
+				return error;
+			}
+		} else
+			zero_nfs_fh3(entry->fh);
+	}
+
+	return 0;
+
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EAGAIN;
+out_truncated:
+	dprintk("NFS: directory entry contains invalid file handle\n");
+	*entry = old;
+	return -EAGAIN;
 }
 
 /*
- * Decode RENAME reply
+ * 3.3.16  READDIR3res
+ *
+ *	struct dirlist3 {
+ *		entry3		*entries;
+ *		bool		eof;
+ *	};
+ *
+ *	struct READDIR3resok {
+ *		post_op_attr	dir_attributes;
+ *		cookieverf3	cookieverf;
+ *		dirlist3	reply;
+ *	};
+ *
+ *	struct READDIR3resfail {
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	union READDIR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READDIR3resok	resok;
+ *	default:
+ *		READDIR3resfail	resfail;
+ *	};
+ *
+ * Read the directory contents into the page cache, but otherwise
+ * don't touch them.  The actual decoding is done by nfs3_decode_entry()
+ * during subsequent nfs_readdir() calls.
  */
-static int
-nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res)
+static int decode_dirlist3(struct xdr_stream *xdr)
 {
-	int	status;
+	u32 recvd, pglen;
+	size_t hdrlen;
 
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	p = xdr_decode_wcc_data(p, res->old_fattr);
-	p = xdr_decode_wcc_data(p, res->new_fattr);
-	return status;
+	pglen = xdr->buf->page_len;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(pglen > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, pglen);
+	return pglen;
+out_cheating:
+	dprintk("NFS: server cheating in readdir result: "
+		"pglen %u > recvd %u\n", pglen, recvd);
+	pglen = recvd;
+	goto out;
+}
+
+static int decode_readdir3resok(struct xdr_stream *xdr,
+				struct nfs3_readdirres *result)
+{
+	int error;
+
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	/* XXX: do we need to check if result->verf != NULL ? */
+	error = decode_cookieverf3(xdr, result->verf);
+	if (unlikely(error))
+		goto out;
+	error = decode_dirlist3(xdr);
+out:
+	return error;
+}
+
+static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs3_readdirres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_readdir3resok(xdr, result);
+out:
+	return error;
+out_default:
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode LINK reply
+ * 3.3.18  FSSTAT3res
+ *
+ *	struct FSSTAT3resok {
+ *		post_op_attr	obj_attributes;
+ *		size3		tbytes;
+ *		size3		fbytes;
+ *		size3		abytes;
+ *		size3		tfiles;
+ *		size3		ffiles;
+ *		size3		afiles;
+ *		uint32		invarsec;
+ *	};
+ *
+ *	struct FSSTAT3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union FSSTAT3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		FSSTAT3resok	resok;
+ *	default:
+ *		FSSTAT3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res)
+static int decode_fsstat3resok(struct xdr_stream *xdr,
+			       struct nfs_fsstat *result)
 {
-	int	status;
+	__be32 *p;
 
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	p = xdr_decode_wcc_data(p, res->dir_attr);
-	return status;
-}
-
-/*
- * Decode FSSTAT reply
- */
-static int
-nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res)
-{
-	int		status;
-
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	p = xdr_decode_hyper(p, &res->tbytes);
-	p = xdr_decode_hyper(p, &res->fbytes);
-	p = xdr_decode_hyper(p, &res->abytes);
-	p = xdr_decode_hyper(p, &res->tfiles);
-	p = xdr_decode_hyper(p, &res->ffiles);
-	p = xdr_decode_hyper(p, &res->afiles);
-
+	p = xdr_inline_decode(xdr, 8 * 6 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	p = xdr_decode_size3(p, &result->tbytes);
+	p = xdr_decode_size3(p, &result->fbytes);
+	p = xdr_decode_size3(p, &result->abytes);
+	p = xdr_decode_size3(p, &result->tfiles);
+	p = xdr_decode_size3(p, &result->ffiles);
+	xdr_decode_size3(p, &result->afiles);
 	/* ignore invarsec */
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fsstat *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_fsstat3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode FSINFO reply
+ * 3.3.19  FSINFO3res
+ *
+ *	struct FSINFO3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		rtmax;
+ *		uint32		rtpref;
+ *		uint32		rtmult;
+ *		uint32		wtmax;
+ *		uint32		wtpref;
+ *		uint32		wtmult;
+ *		uint32		dtpref;
+ *		size3		maxfilesize;
+ *		nfstime3	time_delta;
+ *		uint32		properties;
+ *	};
+ *
+ *	struct FSINFO3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union FSINFO3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		FSINFO3resok	resok;
+ *	default:
+ *		FSINFO3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res)
+static int decode_fsinfo3resok(struct xdr_stream *xdr,
+			       struct nfs_fsinfo *result)
 {
-	int		status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	res->rtmax  = ntohl(*p++);
-	res->rtpref = ntohl(*p++);
-	res->rtmult = ntohl(*p++);
-	res->wtmax  = ntohl(*p++);
-	res->wtpref = ntohl(*p++);
-	res->wtmult = ntohl(*p++);
-	res->dtpref = ntohl(*p++);
-	p = xdr_decode_hyper(p, &res->maxfilesize);
-	p = xdr_decode_time3(p, &res->time_delta);
+	p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->rtmax  = be32_to_cpup(p++);
+	result->rtpref = be32_to_cpup(p++);
+	result->rtmult = be32_to_cpup(p++);
+	result->wtmax  = be32_to_cpup(p++);
+	result->wtpref = be32_to_cpup(p++);
+	result->wtmult = be32_to_cpup(p++);
+	result->dtpref = be32_to_cpup(p++);
+	p = xdr_decode_size3(p, &result->maxfilesize);
+	xdr_decode_nfstime3(p, &result->time_delta);
 
 	/* ignore properties */
-	res->lease_time = 0;
+	result->lease_time = 0;
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fsinfo *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_fsinfo3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode PATHCONF reply
+ * 3.3.20  PATHCONF3res
+ *
+ *	struct PATHCONF3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		linkmax;
+ *		uint32		name_max;
+ *		bool		no_trunc;
+ *		bool		chown_restricted;
+ *		bool		case_insensitive;
+ *		bool		case_preserving;
+ *	};
+ *
+ *	struct PATHCONF3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union PATHCONF3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		PATHCONF3resok	resok;
+ *	default:
+ *		PATHCONF3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res)
+static int decode_pathconf3resok(struct xdr_stream *xdr,
+				 struct nfs_pathconf *result)
 {
-	int		status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-	res->max_link = ntohl(*p++);
-	res->max_namelen = ntohl(*p++);
-
+	p = xdr_inline_decode(xdr, 4 * 6);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->max_link = be32_to_cpup(p++);
+	result->max_namelen = be32_to_cpup(p);
 	/* ignore remaining fields */
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_pathconf *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_pathconf3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode COMMIT reply
+ * 3.3.21  COMMIT3res
+ *
+ *	struct COMMIT3resok {
+ *		wcc_data	file_wcc;
+ *		writeverf3	verf;
+ *	};
+ *
+ *	struct COMMIT3resfail {
+ *		wcc_data	file_wcc;
+ *	};
+ *
+ *	union COMMIT3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		COMMIT3resok	resok;
+ *	default:
+ *		COMMIT3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_writeres *result)
 {
-	int		status;
+	enum nfs_stat status;
+	int error;
 
-	status = ntohl(*p++);
-	p = xdr_decode_wcc_data(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	res->verf->verifier[0] = *p++;
-	res->verf->verifier[1] = *p++;
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_writeverf3(xdr, result->verf->verifier);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
-/*
- * Decode GETACL reply
- */
-static int
-nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p,
-		   struct nfs3_getaclres *res)
+
+static inline int decode_getacl3resok(struct xdr_stream *xdr,
+				      struct nfs3_getaclres *result)
 {
-	struct xdr_buf *buf = &req->rq_rcv_buf;
-	int status = ntohl(*p++);
 	struct posix_acl **acl;
 	unsigned int *aclcnt;
-	int err, base;
+	size_t hdrlen;
+	int error;
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	res->mask = ntohl(*p++);
-	if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
-		return -EINVAL;
-	base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_uint32(xdr, &result->mask);
+	if (unlikely(error))
+		goto out;
+	error = -EINVAL;
+	if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+		goto out;
 
-	acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL;
-	aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL;
-	err = nfsacl_decode(buf, base, aclcnt, acl);
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
 
-	acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL;
-	aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL;
-	if (err > 0)
-		err = nfsacl_decode(buf, base + err, aclcnt, acl);
-	return (err > 0) ? 0 : err;
+	acl = NULL;
+	if (result->mask & NFS_ACL)
+		acl = &result->acl_access;
+	aclcnt = NULL;
+	if (result->mask & NFS_ACLCNT)
+		aclcnt = &result->acl_access_count;
+	error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
+	if (unlikely(error <= 0))
+		goto out;
+
+	acl = NULL;
+	if (result->mask & NFS_DFACL)
+		acl = &result->acl_default;
+	aclcnt = NULL;
+	if (result->mask & NFS_DFACLCNT)
+		aclcnt = &result->acl_default_count;
+	error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
+	if (unlikely(error <= 0))
+		return error;
+	error = 0;
+out:
+	return error;
 }
 
-/*
- * Decode setacl reply.
- */
-static int
-nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_getaclres *result)
 {
-	int status = ntohl(*p++);
+	enum nfs_stat status;
+	int error;
 
-	if (status)
-		return nfs_stat_to_errno(status);
-	xdr_decode_post_op_attr(p, fattr);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_getacl3resok(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
+
+static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fattr *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_post_op_attr(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
 #endif  /* CONFIG_NFS_V3_ACL */
 
 #define PROC(proc, argtype, restype, timer)				\
 [NFS3PROC_##proc] = {							\
 	.p_proc      = NFS3PROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nfs3_xdr_##argtype,			\
-	.p_decode    = (kxdrproc_t) nfs3_xdr_##restype,			\
-	.p_arglen    = NFS3_##argtype##_sz,				\
-	.p_replen    = NFS3_##restype##_sz,				\
+	.p_encode    = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args,	\
+	.p_decode    = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res,	\
+	.p_arglen    = NFS3_##argtype##args_sz,				\
+	.p_replen    = NFS3_##restype##res_sz,				\
 	.p_timer     = timer,						\
 	.p_statidx   = NFS3PROC_##proc,					\
 	.p_name      = #proc,						\
 	}
 
 struct rpc_procinfo	nfs3_procedures[] = {
-  PROC(GETATTR,		fhandle,	attrstat, 1),
-  PROC(SETATTR, 	sattrargs,	wccstat, 0),
-  PROC(LOOKUP,		diropargs,	lookupres, 2),
-  PROC(ACCESS,		accessargs,	accessres, 1),
-  PROC(READLINK,	readlinkargs,	readlinkres, 3),
-  PROC(READ,		readargs,	readres, 3),
-  PROC(WRITE,		writeargs,	writeres, 4),
-  PROC(CREATE,		createargs,	createres, 0),
-  PROC(MKDIR,		mkdirargs,	createres, 0),
-  PROC(SYMLINK,		symlinkargs,	createres, 0),
-  PROC(MKNOD,		mknodargs,	createres, 0),
-  PROC(REMOVE,		removeargs,	removeres, 0),
-  PROC(RMDIR,		diropargs,	wccstat, 0),
-  PROC(RENAME,		renameargs,	renameres, 0),
-  PROC(LINK,		linkargs,	linkres, 0),
-  PROC(READDIR,		readdirargs,	readdirres, 3),
-  PROC(READDIRPLUS,	readdirargs,	readdirres, 3),
-  PROC(FSSTAT,		fhandle,	fsstatres, 0),
-  PROC(FSINFO,  	fhandle,	fsinfores, 0),
-  PROC(PATHCONF,	fhandle,	pathconfres, 0),
-  PROC(COMMIT,		commitargs,	commitres, 5),
+	PROC(GETATTR,		getattr,	getattr,	1),
+	PROC(SETATTR,		setattr,	setattr,	0),
+	PROC(LOOKUP,		lookup,		lookup,		2),
+	PROC(ACCESS,		access,		access,		1),
+	PROC(READLINK,		readlink,	readlink,	3),
+	PROC(READ,		read,		read,		3),
+	PROC(WRITE,		write,		write,		4),
+	PROC(CREATE,		create,		create,		0),
+	PROC(MKDIR,		mkdir,		create,		0),
+	PROC(SYMLINK,		symlink,	create,		0),
+	PROC(MKNOD,		mknod,		create,		0),
+	PROC(REMOVE,		remove,		remove,		0),
+	PROC(RMDIR,		lookup,		setattr,	0),
+	PROC(RENAME,		rename,		rename,		0),
+	PROC(LINK,		link,		link,		0),
+	PROC(READDIR,		readdir,	readdir,	3),
+	PROC(READDIRPLUS,	readdirplus,	readdir,	3),
+	PROC(FSSTAT,		getattr,	fsstat,		0),
+	PROC(FSINFO,		getattr,	fsinfo,		0),
+	PROC(PATHCONF,		getattr,	pathconf,	0),
+	PROC(COMMIT,		commit,		commit,		5),
 };
 
 struct rpc_version		nfs_version3 = {
@@ -1185,8 +2468,8 @@
 static struct rpc_procinfo	nfs3_acl_procedures[] = {
 	[ACLPROC3_GETACL] = {
 		.p_proc = ACLPROC3_GETACL,
-		.p_encode = (kxdrproc_t) nfs3_xdr_getaclargs,
-		.p_decode = (kxdrproc_t) nfs3_xdr_getaclres,
+		.p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
+		.p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
 		.p_arglen = ACL3_getaclargs_sz,
 		.p_replen = ACL3_getaclres_sz,
 		.p_timer = 1,
@@ -1194,8 +2477,8 @@
 	},
 	[ACLPROC3_SETACL] = {
 		.p_proc = ACLPROC3_SETACL,
-		.p_encode = (kxdrproc_t) nfs3_xdr_setaclargs,
-		.p_decode = (kxdrproc_t) nfs3_xdr_setaclres,
+		.p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
+		.p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
 		.p_arglen = ACL3_setaclargs_sz,
 		.p_replen = ACL3_setaclres_sz,
 		.p_timer = 0,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9fa4963..7a74740 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@
 	NFS4CLNT_RECLAIM_REBOOT,
 	NFS4CLNT_RECLAIM_NOGRACE,
 	NFS4CLNT_DELEGRETURN,
+	NFS4CLNT_LAYOUTRECALL,
 	NFS4CLNT_SESSION_RESET,
 	NFS4CLNT_RECALL_SLOT,
 };
@@ -109,7 +110,7 @@
 struct nfs4_state_owner {
 	struct nfs_unique_id so_owner_id;
 	struct nfs_server    *so_server;
-	struct rb_node	     so_client_node;
+	struct rb_node	     so_server_node;
 
 	struct rpc_cred	     *so_cred;	 /* Associated cred */
 
@@ -227,12 +228,6 @@
 extern const struct dentry_operations nfs4_dentry_operations;
 extern const struct inode_operations nfs4_dir_inode_operations;
 
-/* inode.c */
-extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
-extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);
-extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
-
-
 /* nfs4proc.c */
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
@@ -241,11 +236,12 @@
 extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait);
+extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
 extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
 		struct nfs4_fs_locations *fs_locations, struct page *page);
 extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
+extern const struct xattr_handler *nfs4_xattr_handlers[];
 
 #if defined(CONFIG_NFS_V4_1)
 static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
@@ -331,7 +327,6 @@
 extern const nfs4_stateid zero_stateid;
 
 /* nfs4xdr.c */
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
 extern struct rpc_procinfo nfs4_procedures[];
 
 struct nfs4_mount_data;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 2e92f0d..23f930c 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,7 +82,7 @@
 {
 	struct nfs4_file_layout_dsaddr *dsaddr;
 	int status = -EINVAL;
-	struct nfs_server *nfss = NFS_SERVER(lo->inode);
+	struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
 
 	dprintk("--> %s\n", __func__);
 
@@ -101,7 +101,7 @@
 	/* find and reference the deviceid */
 	dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
 	if (dsaddr == NULL) {
-		dsaddr = get_device_info(lo->inode, id);
+		dsaddr = get_device_info(lo->plh_inode, id);
 		if (dsaddr == NULL)
 			goto out;
 	}
@@ -243,7 +243,7 @@
 static void
 filelayout_free_lseg(struct pnfs_layout_segment *lseg)
 {
-	struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode);
+	struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->plh_inode);
 	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
 
 	dprintk("--> %s\n", __func__);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4435e5e..9d992b0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -49,6 +49,7 @@
 #include <linux/mount.h>
 #include <linux/module.h>
 #include <linux/sunrpc/bc_xprt.h>
+#include <linux/xattr.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -355,9 +356,9 @@
 }
 
 /*
- * Signal state manager thread if session is drained
+ * Signal state manager thread if session fore channel is drained
  */
-static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
+static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
 {
 	struct rpc_task *task;
 
@@ -371,8 +372,20 @@
 	if (ses->fc_slot_table.highest_used_slotid != -1)
 		return;
 
-	dprintk("%s COMPLETE: Session Drained\n", __func__);
-	complete(&ses->complete);
+	dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
+	complete(&ses->fc_slot_table.complete);
+}
+
+/*
+ * Signal state manager thread if session back channel is drained
+ */
+void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
+{
+	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
+	    ses->bc_slot_table.highest_used_slotid != -1)
+		return;
+	dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
+	complete(&ses->bc_slot_table.complete);
 }
 
 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
@@ -389,7 +402,7 @@
 
 	spin_lock(&tbl->slot_tbl_lock);
 	nfs4_free_slot(tbl, res->sr_slot);
-	nfs41_check_drain_session_complete(res->sr_session);
+	nfs4_check_drain_fc_complete(res->sr_session);
 	spin_unlock(&tbl->slot_tbl_lock);
 	res->sr_slot = NULL;
 }
@@ -1826,6 +1839,8 @@
 	struct nfs_closeres res;
 	struct nfs_fattr fattr;
 	unsigned long timestamp;
+	bool roc;
+	u32 roc_barrier;
 };
 
 static void nfs4_free_closedata(void *data)
@@ -1833,6 +1848,8 @@
 	struct nfs4_closedata *calldata = data;
 	struct nfs4_state_owner *sp = calldata->state->owner;
 
+	if (calldata->roc)
+		pnfs_roc_release(calldata->state->inode);
 	nfs4_put_open_state(calldata->state);
 	nfs_free_seqid(calldata->arg.seqid);
 	nfs4_put_state_owner(sp);
@@ -1865,6 +1882,9 @@
 	 */
 	switch (task->tk_status) {
 		case 0:
+			if (calldata->roc)
+				pnfs_roc_set_barrier(state->inode,
+						     calldata->roc_barrier);
 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
 			renew_lease(server, calldata->timestamp);
 			nfs4_close_clear_stateid_flags(state,
@@ -1917,8 +1937,15 @@
 		return;
 	}
 
-	if (calldata->arg.fmode == 0)
+	if (calldata->arg.fmode == 0) {
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+		if (calldata->roc &&
+		    pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
+			rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
+				     task, NULL);
+			return;
+		}
+	}
 
 	nfs_fattr_init(calldata->res.fattr);
 	calldata->timestamp = jiffies;
@@ -1946,7 +1973,7 @@
  *
  * NOTE: Caller must be holding the sp->so_owner semaphore!
  */
-int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait)
+int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
 	struct nfs4_closedata *calldata;
@@ -1981,11 +2008,12 @@
 	calldata->res.fattr = &calldata->fattr;
 	calldata->res.seqid = calldata->arg.seqid;
 	calldata->res.server = server;
+	calldata->roc = roc;
 	path_get(path);
 	calldata->path = *path;
 
-	msg.rpc_argp = &calldata->arg,
-	msg.rpc_resp = &calldata->res,
+	msg.rpc_argp = &calldata->arg;
+	msg.rpc_resp = &calldata->res;
 	task_setup_data.callback_data = calldata;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
@@ -1998,6 +2026,8 @@
 out_free_calldata:
 	kfree(calldata);
 out:
+	if (roc)
+		pnfs_roc_release(state->inode);
 	nfs4_put_open_state(state);
 	nfs4_put_state_owner(sp);
 	return status;
@@ -2486,6 +2516,7 @@
 		path = &ctx->path;
 		fmode = ctx->mode;
 	}
+	sattr->ia_mode &= ~current_umask();
 	state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
 	d_drop(dentry);
 	if (IS_ERR(state)) {
@@ -2816,6 +2847,8 @@
 {
 	struct nfs4_exception exception = { };
 	int err;
+
+	sattr->ia_mode &= ~current_umask();
 	do {
 		err = nfs4_handle_exception(NFS_SERVER(dir),
 				_nfs4_proc_mkdir(dir, dentry, sattr),
@@ -2916,6 +2949,8 @@
 {
 	struct nfs4_exception exception = { };
 	int err;
+
+	sattr->ia_mode &= ~current_umask();
 	do {
 		err = nfs4_handle_exception(NFS_SERVER(dir),
 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
@@ -3478,6 +3513,7 @@
 	struct nfs4_setclientid setclientid = {
 		.sc_verifier = &sc_verifier,
 		.sc_prog = program,
+		.sc_cb_ident = clp->cl_cb_ident,
 	};
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
@@ -3517,7 +3553,7 @@
 		if (signalled())
 			break;
 		if (loop++ & 1)
-			ssleep(clp->cl_lease_time + 1);
+			ssleep(clp->cl_lease_time / HZ + 1);
 		else
 			if (++clp->cl_id_uniquifier == 0)
 				break;
@@ -3663,8 +3699,8 @@
 	data->rpc_status = 0;
 
 	task_setup_data.callback_data = data;
-	msg.rpc_argp = &data->args,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->args;
+	msg.rpc_resp = &data->res;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
 		return PTR_ERR(task);
@@ -3743,6 +3779,7 @@
 		goto out;
 	lsp = request->fl_u.nfs4_fl.owner;
 	arg.lock_owner.id = lsp->ls_id.id;
+	arg.lock_owner.s_dev = server->s_dev;
 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
 	switch (status) {
 		case 0:
@@ -3908,8 +3945,8 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	msg.rpc_argp = &data->arg,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->arg;
+	msg.rpc_resp = &data->res;
 	task_setup_data.callback_data = data;
 	return rpc_run_task(&task_setup_data);
 }
@@ -3988,6 +4025,7 @@
 	p->arg.lock_stateid = &lsp->ls_stateid;
 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
 	p->arg.lock_owner.id = lsp->ls_id.id;
+	p->arg.lock_owner.s_dev = server->s_dev;
 	p->res.lock_seqid = p->arg.lock_seqid;
 	p->lsp = lsp;
 	p->server = server;
@@ -4145,8 +4183,8 @@
 			data->arg.reclaim = NFS_LOCK_RECLAIM;
 		task_setup_data.callback_ops = &nfs4_recover_lock_ops;
 	}
-	msg.rpc_argp = &data->arg,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->arg;
+	msg.rpc_resp = &data->res;
 	task_setup_data.callback_data = data;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
@@ -4392,48 +4430,43 @@
 		return;
 	args->lock_owner.clientid = server->nfs_client->cl_clientid;
 	args->lock_owner.id = lsp->ls_id.id;
+	args->lock_owner.s_dev = server->s_dev;
 	msg.rpc_argp = args;
 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
 }
 
 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
 
-int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
-		size_t buflen, int flags)
+static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
+				   const void *buf, size_t buflen,
+				   int flags, int type)
 {
-	struct inode *inode = dentry->d_inode;
+	if (strcmp(key, "") != 0)
+		return -EINVAL;
 
-	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
-		return -EOPNOTSUPP;
-
-	return nfs4_proc_set_acl(inode, buf, buflen);
+	return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
 }
 
-/* The getxattr man page suggests returning -ENODATA for unknown attributes,
- * and that's what we'll do for e.g. user attributes that haven't been set.
- * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
- * attributes in kernel-managed attribute namespaces. */
-ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
-		size_t buflen)
+static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
+				   void *buf, size_t buflen, int type)
 {
-	struct inode *inode = dentry->d_inode;
+	if (strcmp(key, "") != 0)
+		return -EINVAL;
 
-	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
-		return -EOPNOTSUPP;
-
-	return nfs4_proc_get_acl(inode, buf, buflen);
+	return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
 }
 
-ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
+static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
+				       size_t list_len, const char *name,
+				       size_t name_len, int type)
 {
-	size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
+	size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
 
 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
 		return 0;
-	if (buf && buflen < len)
-		return -ERANGE;
-	if (buf)
-		memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
+
+	if (list && len <= list_len)
+		memcpy(list, XATTR_NAME_NFSV4_ACL, len);
 	return len;
 }
 
@@ -4486,6 +4519,25 @@
 
 #ifdef CONFIG_NFS_V4_1
 /*
+ * Check the exchange flags returned by the server for invalid flags, having
+ * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
+ * DS flags set.
+ */
+static int nfs4_check_cl_exchange_flags(u32 flags)
+{
+	if (flags & ~EXCHGID4_FLAG_MASK_R)
+		goto out_inval;
+	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
+	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
+		goto out_inval;
+	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
+		goto out_inval;
+	return NFS_OK;
+out_inval:
+	return -NFS4ERR_INVAL;
+}
+
+/*
  * nfs4_proc_exchange_id()
  *
  * Since the clientid has expired, all compounds using sessions
@@ -4498,7 +4550,7 @@
 	nfs4_verifier verifier;
 	struct nfs41_exchange_id_args args = {
 		.client = clp,
-		.flags = clp->cl_exchange_flags,
+		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
 	};
 	struct nfs41_exchange_id_res res = {
 		.client = clp,
@@ -4515,9 +4567,6 @@
 	dprintk("--> %s\n", __func__);
 	BUG_ON(clp == NULL);
 
-	/* Remove server-only flags */
-	args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
-
 	p = (u32 *)verifier.data;
 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
@@ -4543,6 +4592,7 @@
 			break;
 	}
 
+	status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
 	dprintk("<-- %s status= %d\n", __func__, status);
 	return status;
 }
@@ -4776,17 +4826,17 @@
 	if (!session)
 		return NULL;
 
-	init_completion(&session->complete);
-
 	tbl = &session->fc_slot_table;
 	tbl->highest_used_slotid = -1;
 	spin_lock_init(&tbl->slot_tbl_lock);
 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+	init_completion(&tbl->complete);
 
 	tbl = &session->bc_slot_table;
 	tbl->highest_used_slotid = -1;
 	spin_lock_init(&tbl->slot_tbl_lock);
 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+	init_completion(&tbl->complete);
 
 	session->session_state = 1<<NFS4_SESSION_INITING;
 
@@ -5280,13 +5330,23 @@
 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_layoutget *lgp = calldata;
-	struct inode *ino = lgp->args.inode;
-	struct nfs_server *server = NFS_SERVER(ino);
+	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
 
 	dprintk("--> %s\n", __func__);
+	/* Note the is a race here, where a CB_LAYOUTRECALL can come in
+	 * right now covering the LAYOUTGET we are about to send.
+	 * However, that is not so catastrophic, and there seems
+	 * to be no way to prevent it completely.
+	 */
 	if (nfs4_setup_sequence(server, &lgp->args.seq_args,
 				&lgp->res.seq_res, 0, task))
 		return;
+	if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
+					  NFS_I(lgp->args.inode)->layout,
+					  lgp->args.ctx->state)) {
+		rpc_exit(task, NFS4_OK);
+		return;
+	}
 	rpc_call_start(task);
 }
 
@@ -5313,7 +5373,6 @@
 			return;
 		}
 	}
-	lgp->status = task->tk_status;
 	dprintk("<-- %s\n", __func__);
 }
 
@@ -5322,7 +5381,6 @@
 	struct nfs4_layoutget *lgp = calldata;
 
 	dprintk("--> %s\n", __func__);
-	put_layout_hdr(lgp->args.inode);
 	if (lgp->res.layout.buf != NULL)
 		free_page((unsigned long) lgp->res.layout.buf);
 	put_nfs_open_context(lgp->args.ctx);
@@ -5367,13 +5425,10 @@
 	if (IS_ERR(task))
 		return PTR_ERR(task);
 	status = nfs4_wait_for_completion_rpc_task(task);
-	if (status != 0)
-		goto out;
-	status = lgp->status;
-	if (status != 0)
-		goto out;
-	status = pnfs_layout_process(lgp);
-out:
+	if (status == 0)
+		status = task->tk_status;
+	if (status == 0)
+		status = pnfs_layout_process(lgp);
 	rpc_put_task(task);
 	dprintk("<-- %s status=%d\n", __func__, status);
 	return status;
@@ -5504,9 +5559,10 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr	= nfs4_getxattr,
-	.setxattr	= nfs4_setxattr,
-	.listxattr	= nfs4_listxattr,
+	.getxattr	= generic_getxattr,
+	.setxattr	= generic_setxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
 };
 
 const struct nfs_rpc_ops nfs_v4_clientops = {
@@ -5551,6 +5607,18 @@
 	.open_context	= nfs4_atomic_open,
 };
 
+static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
+	.prefix	= XATTR_NAME_NFSV4_ACL,
+	.list	= nfs4_xattr_list_nfs4_acl,
+	.get	= nfs4_xattr_get_nfs4_acl,
+	.set	= nfs4_xattr_set_nfs4_acl,
+};
+
+const struct xattr_handler *nfs4_xattr_handlers[] = {
+	&nfs4_xattr_nfs4_acl_handler,
+	NULL
+};
+
 /*
  * Local variables:
  *  c-basic-offset: 8
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 72b6c58..402143d 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -63,9 +63,14 @@
 
 	ops = clp->cl_mvops->state_renewal_ops;
 	dprintk("%s: start\n", __func__);
-	/* Are there any active superblocks? */
-	if (list_empty(&clp->cl_superblocks))
+
+	rcu_read_lock();
+	if (list_empty(&clp->cl_superblocks)) {
+		rcu_read_unlock();
 		goto out;
+	}
+	rcu_read_unlock();
+
 	spin_lock(&clp->cl_lock);
 	lease = clp->cl_lease_time;
 	last = clp->cl_last_renewal;
@@ -75,7 +80,7 @@
 		cred = ops->get_state_renewal_cred_locked(clp);
 		spin_unlock(&clp->cl_lock);
 		if (cred == NULL) {
-			if (list_empty(&clp->cl_delegations)) {
+			if (!nfs_delegations_present(clp)) {
 				set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
 				goto out;
 			}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f575a31..2336d53 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -105,14 +105,17 @@
 		put_rpccred(cred);
 }
 
-struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_renew_cred_server_locked(struct nfs_server *server)
 {
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
-	struct rpc_cred *cred = NULL;
 
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		if (list_empty(&sp->so_states))
 			continue;
 		cred = get_rpccred(sp->so_cred);
@@ -121,6 +124,28 @@
 	return cred;
 }
 
+/**
+ * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ * Caller must hold clp->cl_lock.
+ */
+struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+{
+	struct rpc_cred *cred = NULL;
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_renew_cred_server_locked(server);
+		if (cred != NULL)
+			break;
+	}
+	rcu_read_unlock();
+	return cred;
+}
+
 #if defined(CONFIG_NFS_V4_1)
 
 static int nfs41_setup_state_renewal(struct nfs_client *clp)
@@ -142,6 +167,11 @@
 	return status;
 }
 
+/*
+ * Back channel returns NFS4ERR_DELAY for new requests when
+ * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
+ * is ended.
+ */
 static void nfs4_end_drain_session(struct nfs_client *clp)
 {
 	struct nfs4_session *ses = clp->cl_session;
@@ -165,22 +195,32 @@
 	}
 }
 
-static int nfs4_begin_drain_session(struct nfs_client *clp)
+static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
 {
-	struct nfs4_session *ses = clp->cl_session;
-	struct nfs4_slot_table *tbl = &ses->fc_slot_table;
-
 	spin_lock(&tbl->slot_tbl_lock);
-	set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
 	if (tbl->highest_used_slotid != -1) {
-		INIT_COMPLETION(ses->complete);
+		INIT_COMPLETION(tbl->complete);
 		spin_unlock(&tbl->slot_tbl_lock);
-		return wait_for_completion_interruptible(&ses->complete);
+		return wait_for_completion_interruptible(&tbl->complete);
 	}
 	spin_unlock(&tbl->slot_tbl_lock);
 	return 0;
 }
 
+static int nfs4_begin_drain_session(struct nfs_client *clp)
+{
+	struct nfs4_session *ses = clp->cl_session;
+	int ret = 0;
+
+	set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
+	/* back channel */
+	ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+	if (ret)
+		return ret;
+	/* fore channel */
+	return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+}
+
 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 {
 	int status;
@@ -192,6 +232,12 @@
 	status = nfs4_proc_create_session(clp);
 	if (status != 0)
 		goto out;
+	status = nfs4_set_callback_sessionid(clp);
+	if (status != 0) {
+		printk(KERN_WARNING "Sessionid not set. No callback service\n");
+		nfs_callback_down(1);
+		status = 0;
+	}
 	nfs41_setup_state_renewal(clp);
 	nfs_mark_client_ready(clp, NFS_CS_READY);
 out:
@@ -210,28 +256,56 @@
 
 #endif /* CONFIG_NFS_V4_1 */
 
-struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_setclientid_cred_server(struct nfs_server *server)
 {
+	struct nfs_client *clp = server->nfs_client;
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
-	struct rpc_cred *cred;
 
 	spin_lock(&clp->cl_lock);
-	cred = nfs4_get_machine_cred_locked(clp);
-	if (cred != NULL)
-		goto out;
-	pos = rb_first(&clp->cl_state_owners);
+	pos = rb_first(&server->state_owners);
 	if (pos != NULL) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		cred = get_rpccred(sp->so_cred);
 	}
-out:
 	spin_unlock(&clp->cl_lock);
 	return cred;
 }
 
-static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
-		__u64 minval, int maxbits)
+/**
+ * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+	struct rpc_cred *cred;
+
+	spin_lock(&clp->cl_lock);
+	cred = nfs4_get_machine_cred_locked(clp);
+	spin_unlock(&clp->cl_lock);
+	if (cred != NULL)
+		goto out;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_setclientid_cred_server(server);
+		if (cred != NULL)
+			break;
+	}
+	rcu_read_unlock();
+
+out:
+	return cred;
+}
+
+static void nfs_alloc_unique_id_locked(struct rb_root *root,
+				       struct nfs_unique_id *new,
+				       __u64 minval, int maxbits)
 {
 	struct rb_node **p, *parent;
 	struct nfs_unique_id *pos;
@@ -286,16 +360,15 @@
 }
 
 static struct nfs4_state_owner *
-nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
 {
-	struct nfs_client *clp = server->nfs_client;
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp, *res = NULL;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
 		if (server < sp->so_server) {
 			p = &parent->rb_left;
@@ -319,24 +392,17 @@
 }
 
 static struct nfs4_state_owner *
-nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
+nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
 {
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct nfs_server *server = new->so_server;
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
-		if (new->so_server < sp->so_server) {
-			p = &parent->rb_left;
-			continue;
-		}
-		if (new->so_server > sp->so_server) {
-			p = &parent->rb_right;
-			continue;
-		}
 		if (new->so_cred < sp->so_cred)
 			p = &parent->rb_left;
 		else if (new->so_cred > sp->so_cred)
@@ -346,18 +412,21 @@
 			return sp;
 		}
 	}
-	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
-	rb_link_node(&new->so_client_node, parent, p);
-	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
+	nfs_alloc_unique_id_locked(&server->openowner_id,
+					&new->so_owner_id, 1, 64);
+	rb_link_node(&new->so_server_node, parent, p);
+	rb_insert_color(&new->so_server_node, &server->state_owners);
 	return new;
 }
 
 static void
-nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
+nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node))
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
+	struct nfs_server *server = sp->so_server;
+
+	if (!RB_EMPTY_NODE(&sp->so_server_node))
+		rb_erase(&sp->so_server_node, &server->state_owners);
+	nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
 }
 
 /*
@@ -386,23 +455,32 @@
 static void
 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
-		struct nfs_client *clp = sp->so_server->nfs_client;
+	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+		struct nfs_server *server = sp->so_server;
+		struct nfs_client *clp = server->nfs_client;
 
 		spin_lock(&clp->cl_lock);
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-		RB_CLEAR_NODE(&sp->so_client_node);
+		rb_erase(&sp->so_server_node, &server->state_owners);
+		RB_CLEAR_NODE(&sp->so_server_node);
 		spin_unlock(&clp->cl_lock);
 	}
 }
 
-struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+/**
+ * nfs4_get_state_owner - Look up a state owner given a credential
+ * @server: nfs_server to search
+ * @cred: RPC credential to match
+ *
+ * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
+ */
+struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
+					      struct rpc_cred *cred)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp, *new;
 
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_find_state_owner(server, cred);
+	sp = nfs4_find_state_owner_locked(server, cred);
 	spin_unlock(&clp->cl_lock);
 	if (sp != NULL)
 		return sp;
@@ -412,7 +490,7 @@
 	new->so_server = server;
 	new->so_cred = cred;
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_insert_state_owner(clp, new);
+	sp = nfs4_insert_state_owner_locked(new);
 	spin_unlock(&clp->cl_lock);
 	if (sp == new)
 		get_rpccred(cred);
@@ -423,6 +501,11 @@
 	return sp;
 }
 
+/**
+ * nfs4_put_state_owner - Release a nfs4_state_owner
+ * @sp: state owner data to release
+ *
+ */
 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 {
 	struct nfs_client *clp = sp->so_server->nfs_client;
@@ -430,7 +513,7 @@
 
 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
 		return;
-	nfs4_remove_state_owner(clp, sp);
+	nfs4_remove_state_owner_locked(sp);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&sp->so_sequence.wait);
 	put_rpccred(cred);
@@ -585,8 +668,11 @@
 	if (!call_close) {
 		nfs4_put_open_state(state);
 		nfs4_put_state_owner(owner);
-	} else
-		nfs4_do_close(path, state, gfp_mask, wait);
+	} else {
+		bool roc = pnfs_roc(state->inode);
+
+		nfs4_do_close(path, state, gfp_mask, wait, roc);
+	}
 }
 
 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
@@ -633,7 +719,8 @@
 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
 {
 	struct nfs4_lock_state *lsp;
-	struct nfs_client *clp = state->owner->so_server->nfs_client;
+	struct nfs_server *server = state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
 	if (lsp == NULL)
@@ -657,7 +744,7 @@
 		return NULL;
 	}
 	spin_lock(&clp->cl_lock);
-	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
+	nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64);
 	spin_unlock(&clp->cl_lock);
 	INIT_LIST_HEAD(&lsp->ls_locks);
 	return lsp;
@@ -665,10 +752,11 @@
 
 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
 {
-	struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
+	struct nfs_server *server = lsp->ls_state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	spin_lock(&clp->cl_lock);
-	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
+	nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
 	kfree(lsp);
@@ -1114,15 +1202,19 @@
 	}
 }
 
-static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+static void nfs4_reset_seqids(struct nfs_server *server,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
 {
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	/* Reset all sequence ids to zero */
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	spin_lock(&clp->cl_lock);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		sp->so_seqid.flags = 0;
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
@@ -1131,6 +1223,18 @@
 		}
 		spin_unlock(&sp->so_lock);
 	}
+	spin_unlock(&clp->cl_lock);
+}
+
+static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_reset_seqids(server, mark_reclaim);
+	rcu_read_unlock();
 }
 
 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
@@ -1148,25 +1252,41 @@
 		(void)ops->reclaim_complete(clp);
 }
 
-static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+static void nfs4_clear_reclaim_server(struct nfs_server *server)
 {
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
-		return 0;
-
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	spin_lock(&clp->cl_lock);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
-			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
+			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
+						&state->flags))
 				continue;
 			nfs4_state_mark_reclaim_nograce(clp, state);
 		}
 		spin_unlock(&sp->so_lock);
 	}
+	spin_unlock(&clp->cl_lock);
+}
+
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+
+	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+		return 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_clear_reclaim_server(server);
+	rcu_read_unlock();
 
 	nfs_delegation_reap_unclaimed(clp);
 	return 1;
@@ -1238,27 +1358,40 @@
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
 {
+	struct nfs4_state_owner *sp;
+	struct nfs_server *server;
 	struct rb_node *pos;
 	int status = 0;
 
 restart:
-	spin_lock(&clp->cl_lock);
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
-		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
-			continue;
-		atomic_inc(&sp->so_count);
-		spin_unlock(&clp->cl_lock);
-		status = nfs4_reclaim_open_state(sp, ops);
-		if (status < 0) {
-			set_bit(ops->owner_flag_bit, &sp->so_flags);
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		spin_lock(&clp->cl_lock);
+		for (pos = rb_first(&server->state_owners);
+		     pos != NULL;
+		     pos = rb_next(pos)) {
+			sp = rb_entry(pos,
+				struct nfs4_state_owner, so_server_node);
+			if (!test_and_clear_bit(ops->owner_flag_bit,
+							&sp->so_flags))
+				continue;
+			atomic_inc(&sp->so_count);
+			spin_unlock(&clp->cl_lock);
+			rcu_read_unlock();
+
+			status = nfs4_reclaim_open_state(sp, ops);
+			if (status < 0) {
+				set_bit(ops->owner_flag_bit, &sp->so_flags);
+				nfs4_put_state_owner(sp);
+				return nfs4_recovery_handle_error(clp, status);
+			}
+
 			nfs4_put_state_owner(sp);
-			return nfs4_recovery_handle_error(clp, status);
+			goto restart;
 		}
-		nfs4_put_state_owner(sp);
-		goto restart;
+		spin_unlock(&clp->cl_lock);
 	}
-	spin_unlock(&clp->cl_lock);
+	rcu_read_unlock();
 	return status;
 }
 
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 9f1826b..2ab8e5c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -71,8 +71,8 @@
 /* lock,open owner id:
  * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT  >> 2)
  */
-#define open_owner_id_maxsz	(1 + 4)
-#define lock_owner_id_maxsz	(1 + 4)
+#define open_owner_id_maxsz	(1 + 1 + 4)
+#define lock_owner_id_maxsz	(1 + 1 + 4)
 #define decode_lockowner_maxsz	(1 + XDR_QUADLEN(IDMAP_NAMESZ))
 #define compound_encode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
 #define compound_decode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
@@ -1088,10 +1088,11 @@
 {
 	__be32 *p;
 
-	p = reserve_space(xdr, 28);
+	p = reserve_space(xdr, 32);
 	p = xdr_encode_hyper(p, lowner->clientid);
-	*p++ = cpu_to_be32(16);
+	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+	*p++ = cpu_to_be32(lowner->s_dev);
 	xdr_encode_hyper(p, lowner->id);
 }
 
@@ -1210,10 +1211,11 @@
 	*p++ = cpu_to_be32(OP_OPEN);
 	*p = cpu_to_be32(arg->seqid->sequence->counter);
 	encode_share_access(xdr, arg->fmode);
-	p = reserve_space(xdr, 28);
+	p = reserve_space(xdr, 32);
 	p = xdr_encode_hyper(p, arg->clientid);
-	*p++ = cpu_to_be32(16);
+	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "open id:", 8);
+	*p++ = cpu_to_be32(arg->server->s_dev);
 	xdr_encode_hyper(p, arg->id);
 }
 
@@ -1510,7 +1512,7 @@
 	hdr->replen += decode_restorefh_maxsz;
 }
 
-static int
+static void
 encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
 {
 	__be32 *p;
@@ -1521,14 +1523,12 @@
 	p = reserve_space(xdr, 2*4);
 	*p++ = cpu_to_be32(1);
 	*p = cpu_to_be32(FATTR4_WORD0_ACL);
-	if (arg->acl_len % 4)
-		return -EINVAL;
+	BUG_ON(arg->acl_len % 4);
 	p = reserve_space(xdr, 4);
 	*p = cpu_to_be32(arg->acl_len);
 	xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
 	hdr->nops++;
 	hdr->replen += decode_setacl_maxsz;
-	return 0;
 }
 
 static void
@@ -1789,7 +1789,6 @@
 		      const struct nfs4_layoutget_args *args,
 		      struct compound_hdr *hdr)
 {
-	nfs4_stateid stateid;
 	__be32 *p;
 
 	p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
@@ -1800,9 +1799,7 @@
 	p = xdr_encode_hyper(p, args->range.offset);
 	p = xdr_encode_hyper(p, args->range.length);
 	p = xdr_encode_hyper(p, args->minlength);
-	pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout,
-				args->ctx->state);
-	p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE);
+	p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
 	*p = cpu_to_be32(args->maxcount);
 
 	dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
@@ -1833,393 +1830,362 @@
 /*
  * Encode an ACCESS request
  */
-static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs4_accessargs *args)
+static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_accessargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_access(&xdr, args->access, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_access(xdr, args->access, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LOOKUP request
  */
-static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_arg *args)
+static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_lookup_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_lookup(&xdr, args->name, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_lookup(xdr, args->name, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LOOKUP_ROOT request
  */
-static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_root_arg *args)
+static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs4_lookup_root_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode REMOVE request
  */
-static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs_removeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_remove(&xdr, &args->name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_remove(xdr, &args->name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode RENAME request
  */
-static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs_renameargs *args)
+static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs_renameargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->old_dir, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_putfh(&xdr, args->new_dir, &hdr);
-	encode_rename(&xdr, args->old_name, args->new_name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->old_dir, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_putfh(xdr, args->new_dir, &hdr);
+	encode_rename(xdr, args->old_name, args->new_name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LINK request
  */
-static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_link_arg *args)
+static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct nfs4_link_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_link(&xdr, args->name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_link(xdr, args->name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode CREATE request
  */
-static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_create_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_create(&xdr, args, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_create(xdr, args, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode SYMLINK request
  */
-static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_create_arg *args)
 {
-	return nfs4_xdr_enc_create(req, p, args);
+	nfs4_xdr_enc_create(req, xdr, args);
 }
 
 /*
  * Encode GETATTR request
  */
-static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nfs4_getattr_arg *args)
+static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_getattr_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a CLOSE request
  */
-static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_closeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_close(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_close(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN request
  */
-static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_openargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_open(&xdr, args, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_open(xdr, args, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN_CONFIRM request
  */
-static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_open_confirmargs *args)
+static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      struct nfs_open_confirmargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops   = 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open_confirm(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open_confirm(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN request with no attributes.
  */
-static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_openargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN_DOWNGRADE request
  */
-static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs_closeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open_downgrade(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open_downgrade(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCK request
  */
-static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_args *args)
+static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_lock_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_lock(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_lock(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCKT request
  */
-static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_args *args)
+static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_lockt_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_lockt(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_lockt(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCKU request
  */
-static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_args *args)
+static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_locku_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_locku(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_locku(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
-static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args)
+static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
+					   struct xdr_stream *xdr,
+					struct nfs_release_lockowner_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_release_lockowner(xdr, &args->lock_owner, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READLINK request
  */
-static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_readlink *args)
+static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  const struct nfs4_readlink *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_readlink(&xdr, args, req, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_readlink(xdr, args, req, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
 			args->pgbase, args->pglen);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READDIR request
  */
-static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nfs4_readdir_arg *args)
+static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_readdir_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_readdir(&xdr, args, req, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_readdir(xdr, args, req, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
 			 args->pgbase, args->count);
@@ -2227,428 +2193,387 @@
 			__func__, hdr.replen << 2, args->pages,
 			args->pgbase, args->count);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READ request
  */
-static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_readargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_read(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_read(xdr, args, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
 			 args->pages, args->pgbase, args->count);
 	req->rq_rcv_buf.flags |= XDRBUF_READ;
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an SETATTR request
  */
-static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_setattrargs *args)
+static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_setattrargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_setattr(&xdr, args, args->server, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_setattr(xdr, args, args->server, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a GETACL request
  */
-static int
-nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
-		struct nfs_getaclargs *args)
+static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_getaclargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 	uint32_t replen;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
 	replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
-	encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
+	encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
 		args->acl_pages, args->acl_pgbase, args->acl_len);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a WRITE request
  */
-static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_writeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_write(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_write(xdr, args, &hdr);
 	req->rq_snd_buf.flags |= XDRBUF_WRITE;
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  *  a COMMIT request
  */
-static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_writeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_commit(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_commit(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * FSINFO request
  */
-static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsinfo_arg *args)
+static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs4_fsinfo_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_fsinfo(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_fsinfo(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a PATHCONF request
  */
-static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct nfs4_pathconf_arg *args)
+static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  const struct nfs4_pathconf_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
 			   &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a STATFS request
  */
-static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs4_statfs_arg *args)
+static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_statfs_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
 			   args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * GETATTR_BITMAP request
  */
-static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p,
-				    struct nfs4_server_caps_arg *args)
+static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs4_server_caps_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fhandle, &hdr);
-	encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fhandle, &hdr);
+	encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
 			   FATTR4_WORD0_LINK_SUPPORT|
 			   FATTR4_WORD0_SYMLINK_SUPPORT|
 			   FATTR4_WORD0_ACLSUPPORT, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a RENEW request
  */
-static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
+static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_client *clp)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_renew(&xdr, clp, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_renew(xdr, clp, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SETCLIENTID request
  */
-static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid *sc)
+static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs4_setclientid *sc)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_setclientid(&xdr, sc, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_setclientid(xdr, sc, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SETCLIENTID_CONFIRM request
  */
-static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg)
+static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
+					     struct xdr_stream *xdr,
+					     struct nfs4_setclientid_res *arg)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 	const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_setclientid_confirm(&xdr, arg, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_fsinfo(&xdr, lease_bitmap, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_setclientid_confirm(xdr, arg, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_fsinfo(xdr, lease_bitmap, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * DELEGRETURN request
  */
-static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struct nfs4_delegreturnargs *args)
+static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs4_delegreturnargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fhandle, &hdr);
-	encode_delegreturn(&xdr, args->stateid, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fhandle, &hdr);
+	encode_delegreturn(xdr, args->stateid, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode FS_LOCATIONS request
  */
-static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations_arg *args)
+static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      struct nfs4_fs_locations_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 	uint32_t replen;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_lookup(&xdr, args->name, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_lookup(xdr, args->name, &hdr);
 	replen = hdr.replen;	/* get the attribute into args->page */
-	encode_fs_locations(&xdr, args->bitmask, &hdr);
+	encode_fs_locations(xdr, args->bitmask, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
 			0, PAGE_SIZE);
 	encode_nops(&hdr);
-	return 0;
 }
 
 #if defined(CONFIG_NFS_V4_1)
 /*
  * EXCHANGE_ID request
  */
-static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
-				    struct nfs41_exchange_id_args *args)
+static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs41_exchange_id_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = args->client->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_exchange_id(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_exchange_id(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a CREATE_SESSION request
  */
-static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
-				       struct nfs41_create_session_args *args)
+static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs41_create_session_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = args->client->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_create_session(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_create_session(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a DESTROY_SESSION request
  */
-static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
-					struct nfs4_session *session)
+static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
+					 struct xdr_stream *xdr,
+					 struct nfs4_session *session)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = session->clp->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_destroy_session(&xdr, session, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_destroy_session(xdr, session, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SEQUENCE request
  */
-static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p,
-				 struct nfs4_sequence_args *args)
+static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs4_sequence_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a GET_LEASE_TIME request
  */
-static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
-				       struct nfs4_get_lease_time_args *args)
+static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs4_get_lease_time_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
 	};
 	const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->la_seq_args, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_fsinfo(&xdr, lease_bitmap, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->la_seq_args, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_fsinfo(xdr, lease_bitmap, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a RECLAIM_COMPLETE request
  */
-static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
-				     struct nfs41_reclaim_complete_args *args)
+static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
+					  struct xdr_stream *xdr,
+				struct nfs41_reclaim_complete_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args)
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_reclaim_complete(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_reclaim_complete(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode GETDEVICEINFO request
  */
-static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
-				      struct nfs4_getdeviceinfo_args *args)
+static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
+				       struct xdr_stream *xdr,
+				       struct nfs4_getdeviceinfo_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_getdeviceinfo(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_getdeviceinfo(xdr, args, &hdr);
 
 	/* set up reply kvec. Subtract notification bitmap max size (2)
 	 * so that notification bitmap is put in xdr_buf tail */
@@ -2657,27 +2582,24 @@
 			 args->pdev->pglen);
 
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  *  Encode LAYOUTGET request
  */
-static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p,
-				  struct nfs4_layoutget_args *args)
+static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs4_layoutget_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, NFS_FH(args->inode), &hdr);
-	encode_layoutget(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+	encode_layoutget(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
@@ -4475,7 +4397,7 @@
 		goto out_overflow;
 	eof = be32_to_cpup(p++);
 	count = be32_to_cpup(p);
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
 	recvd = req->rq_rcv_buf.len - hdrlen;
 	if (count > recvd) {
 		dprintk("NFS: server cheating in read reply: "
@@ -5000,7 +4922,7 @@
 		goto out_overflow;
 	len = be32_to_cpup(p);
 	if (len) {
-		int i;
+		uint32_t i;
 
 		p = xdr_inline_decode(xdr, 4 * len);
 		if (unlikely(!p))
@@ -5090,26 +5012,26 @@
 /*
  * Decode OPEN_DOWNGRADE response
  */
-static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
+				       struct nfs_closeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open_downgrade(&xdr, res);
+	status = decode_open_downgrade(xdr, res);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5118,26 +5040,25 @@
 /*
  * Decode ACCESS response
  */
-static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_accessres *res)
+static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_accessres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
-	status = decode_access(&xdr, res);
+	status = decode_access(xdr, res);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5146,26 +5067,28 @@
 /*
  * Decode LOOKUP response
  */
-static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_lookup_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_lookup(&xdr)) != 0)
+	status = decode_lookup(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+	status = decode_getfh(xdr, res->fh);
+	if (status)
 		goto out;
-	status = decode_getfattr(&xdr, res->fattr, res->server
+	status = decode_getfattr(xdr, res->fattr, res->server
 			,!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5174,23 +5097,25 @@
 /*
  * Decode LOOKUP_ROOT response
  */
-static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs4_lookup_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putrootfh(&xdr)) != 0)
+	status = decode_putrootfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) == 0)
-		status = decode_getfattr(&xdr, res->fattr, res->server,
+	status = decode_getfh(xdr, res->fh);
+	if (status == 0)
+		status = decode_getfattr(xdr, res->fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5199,24 +5124,25 @@
 /*
  * Decode REMOVE response
  */
-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res)
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_removeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
+	status = decode_remove(xdr, &res->cinfo);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->dir_attr, res->server,
+	decode_getfattr(xdr, res->dir_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5225,34 +5151,38 @@
 /*
  * Decode RENAME response
  */
-static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs_renameres *res)
+static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_renameres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
+	status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
+	if (status)
 		goto out;
 	/* Current FH is target directory */
-	if (decode_getfattr(&xdr, res->new_fattr, res->server,
+	if (decode_getfattr(xdr, res->new_fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->old_fattr, res->server,
+	decode_getfattr(xdr, res->old_fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5261,37 +5191,41 @@
 /*
  * Decode LINK response
  */
-static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link_res *res)
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs4_link_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_link(&xdr, &res->cinfo)) != 0)
+	status = decode_link(xdr, &res->cinfo);
+	if (status)
 		goto out;
 	/*
 	 * Note order: OP_LINK leaves the directory as the current
 	 *             filehandle.
 	 */
-	if (decode_getfattr(&xdr, res->dir_attr, res->server,
+	if (decode_getfattr(xdr, res->dir_attr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5300,33 +5234,37 @@
 /*
  * Decode CREATE response
  */
-static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_create_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0)
+	status = decode_create(xdr, &res->dir_cinfo);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+	status = decode_getfh(xdr, res->fh);
+	if (status)
 		goto out;
-	if (decode_getfattr(&xdr, res->fattr, res->server,
+	if (decode_getfattr(xdr, res->fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->dir_fattr, res->server,
+	decode_getfattr(xdr, res->dir_fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5335,31 +5273,31 @@
 /*
  * Decode SYMLINK response
  */
-static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_create_res *res)
 {
-	return nfs4_xdr_dec_create(rqstp, p, res);
+	return nfs4_xdr_dec_create(rqstp, xdr, res);
 }
 
 /*
  * Decode GETATTR response
  */
-static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_getattr_res *res)
+static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_getattr_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_getfattr(&xdr, res->fattr, res->server,
+	status = decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5368,46 +5306,40 @@
 /*
  * Encode an SETACL request
  */
-static int
-nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args)
+static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_setaclargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
-	int status;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	status = encode_setacl(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_setacl(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return status;
 }
 
 /*
  * Decode SETACL response
  */
 static int
-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
 		    struct nfs_setaclres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_setattr(&xdr);
+	status = decode_setattr(xdr);
 out:
 	return status;
 }
@@ -5416,24 +5348,22 @@
  * Decode GETACL response
  */
 static int
-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
 		    struct nfs_getaclres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_getacl(&xdr, rqstp, &res->acl_len);
+	status = decode_getacl(xdr, rqstp, &res->acl_len);
 
 out:
 	return status;
@@ -5442,23 +5372,22 @@
 /*
  * Decode CLOSE response
  */
-static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_closeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_close(&xdr, res);
+	status = decode_close(xdr, res);
 	if (status != 0)
 		goto out;
 	/*
@@ -5467,7 +5396,7 @@
 	 * 	an ESTALE error. Shouldn't be a problem,
 	 * 	though, since fattr->valid will remain unset.
 	 */
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5476,36 +5405,35 @@
 /*
  * Decode OPEN response
  */
-static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_openres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_savefh(&xdr);
+	status = decode_savefh(xdr);
 	if (status)
 		goto out;
-	status = decode_open(&xdr, res);
+	status = decode_open(xdr, res);
 	if (status)
 		goto out;
-	if (decode_getfh(&xdr, &res->fh) != 0)
+	if (decode_getfh(xdr, &res->fh) != 0)
 		goto out;
-	if (decode_getfattr(&xdr, res->f_attr, res->server,
+	if (decode_getfattr(xdr, res->f_attr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if (decode_restorefh(&xdr) != 0)
+	if (decode_restorefh(xdr) != 0)
 		goto out;
-	decode_getfattr(&xdr, res->dir_attr, res->server,
+	decode_getfattr(xdr, res->dir_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5514,20 +5442,20 @@
 /*
  * Decode OPEN_CONFIRM response
  */
-static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, __be32 *p, struct nfs_open_confirmres *res)
+static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
+				     struct xdr_stream *xdr,
+				     struct nfs_open_confirmres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open_confirm(&xdr, res);
+	status = decode_open_confirm(xdr, res);
 out:
 	return status;
 }
@@ -5535,26 +5463,26 @@
 /*
  * Decode OPEN response
  */
-static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs_openres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open(&xdr, res);
+	status = decode_open(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->f_attr, res->server,
+	decode_getfattr(xdr, res->f_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5563,26 +5491,26 @@
 /*
  * Decode SETATTR response
  */
-static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_setattrres *res)
+static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
+				struct xdr_stream *xdr,
+				struct nfs_setattrres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_setattr(&xdr);
+	status = decode_setattr(xdr);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5591,23 +5519,22 @@
 /*
  * Decode LOCK response
  */
-static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock_res *res)
+static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_lock_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_lock(&xdr, res);
+	status = decode_lock(xdr, res);
 out:
 	return status;
 }
@@ -5615,23 +5542,22 @@
 /*
  * Decode LOCKT response
  */
-static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lockt_res *res)
+static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_lockt_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_lockt(&xdr, res);
+	status = decode_lockt(xdr, res);
 out:
 	return status;
 }
@@ -5639,61 +5565,58 @@
 /*
  * Decode LOCKU response
  */
-static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, __be32 *p, struct nfs_locku_res *res)
+static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_locku_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_locku(&xdr, res);
+	status = decode_locku(xdr, res);
 out:
 	return status;
 }
 
-static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
+					  struct xdr_stream *xdr, void *dummy)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_release_lockowner(&xdr);
+		status = decode_release_lockowner(xdr);
 	return status;
 }
 
 /*
  * Decode READLINK response
  */
-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p,
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
+				 struct xdr_stream *xdr,
 				 struct nfs4_readlink_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_readlink(&xdr, rqstp);
+	status = decode_readlink(xdr, rqstp);
 out:
 	return status;
 }
@@ -5701,23 +5624,22 @@
 /*
  * Decode READDIR response
  */
-static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_readdir_res *res)
+static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_readdir_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_readdir(&xdr, rqstp, res);
+	status = decode_readdir(xdr, rqstp, res);
 out:
 	return status;
 }
@@ -5725,23 +5647,22 @@
 /*
  * Decode Read response
  */
-static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, __be32 *p, struct nfs_readres *res)
+static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_readres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_read(&xdr, rqstp, res);
+	status = decode_read(xdr, rqstp, res);
 	if (!status)
 		status = res->count;
 out:
@@ -5751,26 +5672,25 @@
 /*
  * Decode WRITE response
  */
-static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_writeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_write(&xdr, res);
+	status = decode_write(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 	if (!status)
 		status = res->count;
@@ -5781,26 +5701,25 @@
 /*
  * Decode COMMIT response
  */
-static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_writeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_commit(&xdr, res);
+	status = decode_commit(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5809,85 +5728,80 @@
 /*
  * Decode FSINFO response
  */
-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
 			       struct nfs4_fsinfo_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, res->fsinfo);
+		status = decode_fsinfo(xdr, res->fsinfo);
 	return status;
 }
 
 /*
  * Decode PATHCONF response
  */
-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
 				 struct nfs4_pathconf_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_pathconf(&xdr, res->pathconf);
+		status = decode_pathconf(xdr, res->pathconf);
 	return status;
 }
 
 /*
  * Decode STATFS response
  */
-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
 			       struct nfs4_statfs_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_statfs(&xdr, res->fsstat);
+		status = decode_statfs(xdr, res->fsstat);
 	return status;
 }
 
 /*
  * Decode GETATTR_BITMAP response
  */
-static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res)
+static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs4_server_caps_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, req);
+	status = decode_sequence(xdr, &res->seq_res, req);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	status = decode_server_caps(&xdr, res);
+	status = decode_server_caps(xdr, res);
 out:
 	return status;
 }
@@ -5895,79 +5809,77 @@
 /*
  * Decode RENEW response
  */
-static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      void *__unused)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_renew(&xdr);
+		status = decode_renew(xdr);
 	return status;
 }
 
 /*
  * Decode SETCLIENTID response
  */
-static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
-		struct nfs4_setclientid_res *res)
+static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs4_setclientid_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_setclientid(&xdr, res);
+		status = decode_setclientid(xdr, res);
 	return status;
 }
 
 /*
  * Decode SETCLIENTID_CONFIRM response
  */
-static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
+static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
+					    struct xdr_stream *xdr,
+					    struct nfs_fsinfo *fsinfo)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_setclientid_confirm(&xdr);
+		status = decode_setclientid_confirm(xdr);
 	if (!status)
-		status = decode_putrootfh(&xdr);
+		status = decode_putrootfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, fsinfo);
+		status = decode_fsinfo(xdr, fsinfo);
 	return status;
 }
 
 /*
  * Decode DELEGRETURN response
  */
-static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res)
+static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs4_delegreturnres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
-	status = decode_delegreturn(&xdr);
+	status = decode_delegreturn(xdr);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5976,26 +5888,27 @@
 /*
  * Decode FS_LOCATIONS response
  */
-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
 				     struct nfs4_fs_locations_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, req);
+	status = decode_sequence(xdr, &res->seq_res, req);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_lookup(&xdr)) != 0)
+	status = decode_lookup(xdr);
+	if (status)
 		goto out;
-	xdr_enter_page(&xdr, PAGE_SIZE);
-	status = decode_getfattr(&xdr, &res->fs_locations->fattr,
+	xdr_enter_page(xdr, PAGE_SIZE);
+	status = decode_getfattr(xdr, &res->fs_locations->fattr,
 				 res->fs_locations->server,
 				 !RPC_IS_ASYNC(req->rq_task));
 out:
@@ -6006,129 +5919,122 @@
 /*
  * Decode EXCHANGE_ID response
  */
-static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
 				    void *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_exchange_id(&xdr, res);
+		status = decode_exchange_id(xdr, res);
 	return status;
 }
 
 /*
  * Decode CREATE_SESSION response
  */
-static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
 				       struct nfs41_create_session_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_create_session(&xdr, res);
+		status = decode_create_session(xdr, res);
 	return status;
 }
 
 /*
  * Decode DESTROY_SESSION response
  */
-static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
-					void *dummy)
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
+					struct xdr_stream *xdr,
+					void *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_destroy_session(&xdr, dummy);
+		status = decode_destroy_session(xdr, res);
 	return status;
 }
 
 /*
  * Decode SEQUENCE response
  */
-static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
+				 struct xdr_stream *xdr,
 				 struct nfs4_sequence_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, res, rqstp);
+		status = decode_sequence(xdr, res, rqstp);
 	return status;
 }
 
 /*
  * Decode GET_LEASE_TIME response
  */
-static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
 				       struct nfs4_get_lease_time_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->lr_seq_res, rqstp);
+		status = decode_sequence(xdr, &res->lr_seq_res, rqstp);
 	if (!status)
-		status = decode_putrootfh(&xdr);
+		status = decode_putrootfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, res->lr_fsinfo);
+		status = decode_fsinfo(xdr, res->lr_fsinfo);
 	return status;
 }
 
 /*
  * Decode RECLAIM_COMPLETE response
  */
-static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
+					 struct xdr_stream *xdr,
 					 struct nfs41_reclaim_complete_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, rqstp);
+		status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (!status)
-		status = decode_reclaim_complete(&xdr, (void *)NULL);
+		status = decode_reclaim_complete(xdr, (void *)NULL);
 	return status;
 }
 
 /*
  * Decode GETDEVINFO response
  */
-static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
+				      struct xdr_stream *xdr,
 				      struct nfs4_getdeviceinfo_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status != 0)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status != 0)
 		goto out;
-	status = decode_getdeviceinfo(&xdr, res->pdev);
+	status = decode_getdeviceinfo(xdr, res->pdev);
 out:
 	return status;
 }
@@ -6136,31 +6042,44 @@
 /*
  * Decode LAYOUTGET response
  */
-static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
+				  struct xdr_stream *xdr,
 				  struct nfs4_layoutget_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_layoutget(&xdr, rqstp, res);
+	status = decode_layoutget(xdr, rqstp, res);
 out:
 	return status;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
-__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
-			   struct nfs_server *server, int plus)
+/**
+ * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
+ *                      the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ */
+int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
 {
 	uint32_t bitmap[2] = {0};
 	uint32_t len;
@@ -6172,9 +6091,9 @@
 		if (unlikely(!p))
 			goto out_overflow;
 		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
+			return -EAGAIN;
 		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
+		return -EBADCOOKIE;
 	}
 
 	p = xdr_inline_decode(xdr, 12);
@@ -6203,7 +6122,8 @@
 	if (decode_attr_length(xdr, &len, &p) < 0)
 		goto out_overflow;
 
-	if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, server, 1) < 0)
+	if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
+					entry->server, 1) < 0)
 		goto out_overflow;
 	if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
 		entry->ino = entry->fattr->fileid;
@@ -6215,17 +6135,11 @@
 	if (verify_attr_len(xdr, p, len) < 0)
 		goto out_overflow;
 
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
+	return 0;
 
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EAGAIN);
+	return -EAGAIN;
 }
 
 /*
@@ -6301,8 +6215,8 @@
 #define PROC(proc, argtype, restype)				\
 [NFSPROC4_CLNT_##proc] = {					\
 	.p_proc   = NFSPROC4_COMPOUND,				\
-	.p_encode = (kxdrproc_t) nfs4_xdr_##argtype,		\
-	.p_decode = (kxdrproc_t) nfs4_xdr_##restype,		\
+	.p_encode = (kxdreproc_t)nfs4_xdr_##argtype,		\
+	.p_decode = (kxdrdproc_t)nfs4_xdr_##restype,		\
 	.p_arglen = NFS4_##argtype##_sz,			\
 	.p_replen = NFS4_##restype##_sz,			\
 	.p_statidx = NFSPROC4_CLNT_##proc,			\
@@ -6310,50 +6224,50 @@
 }
 
 struct rpc_procinfo	nfs4_procedures[] = {
-  PROC(READ,		enc_read,	dec_read),
-  PROC(WRITE,		enc_write,	dec_write),
-  PROC(COMMIT,		enc_commit,	dec_commit),
-  PROC(OPEN,		enc_open,	dec_open),
-  PROC(OPEN_CONFIRM,	enc_open_confirm,	dec_open_confirm),
-  PROC(OPEN_NOATTR,	enc_open_noattr,	dec_open_noattr),
-  PROC(OPEN_DOWNGRADE,	enc_open_downgrade,	dec_open_downgrade),
-  PROC(CLOSE,		enc_close,	dec_close),
-  PROC(SETATTR,		enc_setattr,	dec_setattr),
-  PROC(FSINFO,		enc_fsinfo,	dec_fsinfo),
-  PROC(RENEW,		enc_renew,	dec_renew),
-  PROC(SETCLIENTID,	enc_setclientid,	dec_setclientid),
-  PROC(SETCLIENTID_CONFIRM,	enc_setclientid_confirm,	dec_setclientid_confirm),
-  PROC(LOCK,            enc_lock,       dec_lock),
-  PROC(LOCKT,           enc_lockt,      dec_lockt),
-  PROC(LOCKU,           enc_locku,      dec_locku),
-  PROC(ACCESS,		enc_access,	dec_access),
-  PROC(GETATTR,		enc_getattr,	dec_getattr),
-  PROC(LOOKUP,		enc_lookup,	dec_lookup),
-  PROC(LOOKUP_ROOT,	enc_lookup_root,	dec_lookup_root),
-  PROC(REMOVE,		enc_remove,	dec_remove),
-  PROC(RENAME,		enc_rename,	dec_rename),
-  PROC(LINK,		enc_link,	dec_link),
-  PROC(SYMLINK,		enc_symlink,	dec_symlink),
-  PROC(CREATE,		enc_create,	dec_create),
-  PROC(PATHCONF,	enc_pathconf,	dec_pathconf),
-  PROC(STATFS,		enc_statfs,	dec_statfs),
-  PROC(READLINK,	enc_readlink,	dec_readlink),
-  PROC(READDIR,		enc_readdir,	dec_readdir),
-  PROC(SERVER_CAPS,	enc_server_caps, dec_server_caps),
-  PROC(DELEGRETURN,	enc_delegreturn, dec_delegreturn),
-  PROC(GETACL,		enc_getacl,	dec_getacl),
-  PROC(SETACL,		enc_setacl,	dec_setacl),
-  PROC(FS_LOCATIONS,	enc_fs_locations, dec_fs_locations),
-  PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
+	PROC(READ,		enc_read,		dec_read),
+	PROC(WRITE,		enc_write,		dec_write),
+	PROC(COMMIT,		enc_commit,		dec_commit),
+	PROC(OPEN,		enc_open,		dec_open),
+	PROC(OPEN_CONFIRM,	enc_open_confirm,	dec_open_confirm),
+	PROC(OPEN_NOATTR,	enc_open_noattr,	dec_open_noattr),
+	PROC(OPEN_DOWNGRADE,	enc_open_downgrade,	dec_open_downgrade),
+	PROC(CLOSE,		enc_close,		dec_close),
+	PROC(SETATTR,		enc_setattr,		dec_setattr),
+	PROC(FSINFO,		enc_fsinfo,		dec_fsinfo),
+	PROC(RENEW,		enc_renew,		dec_renew),
+	PROC(SETCLIENTID,	enc_setclientid,	dec_setclientid),
+	PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
+	PROC(LOCK,		enc_lock,		dec_lock),
+	PROC(LOCKT,		enc_lockt,		dec_lockt),
+	PROC(LOCKU,		enc_locku,		dec_locku),
+	PROC(ACCESS,		enc_access,		dec_access),
+	PROC(GETATTR,		enc_getattr,		dec_getattr),
+	PROC(LOOKUP,		enc_lookup,		dec_lookup),
+	PROC(LOOKUP_ROOT,	enc_lookup_root,	dec_lookup_root),
+	PROC(REMOVE,		enc_remove,		dec_remove),
+	PROC(RENAME,		enc_rename,		dec_rename),
+	PROC(LINK,		enc_link,		dec_link),
+	PROC(SYMLINK,		enc_symlink,		dec_symlink),
+	PROC(CREATE,		enc_create,		dec_create),
+	PROC(PATHCONF,		enc_pathconf,		dec_pathconf),
+	PROC(STATFS,		enc_statfs,		dec_statfs),
+	PROC(READLINK,		enc_readlink,		dec_readlink),
+	PROC(READDIR,		enc_readdir,		dec_readdir),
+	PROC(SERVER_CAPS,	enc_server_caps,	dec_server_caps),
+	PROC(DELEGRETURN,	enc_delegreturn,	dec_delegreturn),
+	PROC(GETACL,		enc_getacl,		dec_getacl),
+	PROC(SETACL,		enc_setacl,		dec_setacl),
+	PROC(FS_LOCATIONS,	enc_fs_locations,	dec_fs_locations),
+	PROC(RELEASE_LOCKOWNER,	enc_release_lockowner,	dec_release_lockowner),
 #if defined(CONFIG_NFS_V4_1)
-  PROC(EXCHANGE_ID,	enc_exchange_id,	dec_exchange_id),
-  PROC(CREATE_SESSION,	enc_create_session,	dec_create_session),
-  PROC(DESTROY_SESSION,	enc_destroy_session,	dec_destroy_session),
-  PROC(SEQUENCE,	enc_sequence,	dec_sequence),
-  PROC(GET_LEASE_TIME,	enc_get_lease_time,	dec_get_lease_time),
-  PROC(RECLAIM_COMPLETE, enc_reclaim_complete,  dec_reclaim_complete),
-  PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
-  PROC(LAYOUTGET,  enc_layoutget,     dec_layoutget),
+	PROC(EXCHANGE_ID,	enc_exchange_id,	dec_exchange_id),
+	PROC(CREATE_SESSION,	enc_create_session,	dec_create_session),
+	PROC(DESTROY_SESSION,	enc_destroy_session,	dec_destroy_session),
+	PROC(SEQUENCE,		enc_sequence,		dec_sequence),
+	PROC(GET_LEASE_TIME,	enc_get_lease_time,	dec_get_lease_time),
+	PROC(RECLAIM_COMPLETE,	enc_reclaim_complete,	dec_reclaim_complete),
+	PROC(GETDEVICEINFO,	enc_getdeviceinfo,	dec_getdeviceinfo),
+	PROC(LAYOUTGET,		enc_layoutget,		dec_layoutget),
 #endif /* CONFIG_NFS_V4_1 */
 };
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b68536c..e1164e3 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,12 +26,9 @@
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
-	struct nfs_page	*p;
-	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
-	if (p) {
-		memset(p, 0, sizeof(*p));
+	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+	if (p)
 		INIT_LIST_HEAD(&p->wb_list);
-	}
 	return p;
 }
 
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index db77342..bc40897 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -177,105 +177,149 @@
  * pNFS client layout cache
  */
 
-static void
-get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+/* Need to hold i_lock if caller does not already hold reference */
+void
+get_layout_hdr(struct pnfs_layout_hdr *lo)
 {
-	assert_spin_locked(&lo->inode->i_lock);
-	lo->refcount++;
+	atomic_inc(&lo->plh_refcount);
+}
+
+static void
+destroy_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+	dprintk("%s: freeing layout cache %p\n", __func__, lo);
+	BUG_ON(!list_empty(&lo->plh_layouts));
+	NFS_I(lo->plh_inode)->layout = NULL;
+	kfree(lo);
 }
 
 static void
 put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
 {
-	assert_spin_locked(&lo->inode->i_lock);
-	BUG_ON(lo->refcount == 0);
-
-	lo->refcount--;
-	if (!lo->refcount) {
-		dprintk("%s: freeing layout cache %p\n", __func__, lo);
-		BUG_ON(!list_empty(&lo->layouts));
-		NFS_I(lo->inode)->layout = NULL;
-		kfree(lo);
-	}
+	if (atomic_dec_and_test(&lo->plh_refcount))
+		destroy_layout_hdr(lo);
 }
 
 void
-put_layout_hdr(struct inode *inode)
+put_layout_hdr(struct pnfs_layout_hdr *lo)
 {
-	spin_lock(&inode->i_lock);
-	put_layout_hdr_locked(NFS_I(inode)->layout);
-	spin_unlock(&inode->i_lock);
+	struct inode *inode = lo->plh_inode;
+
+	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+		destroy_layout_hdr(lo);
+		spin_unlock(&inode->i_lock);
+	}
 }
 
 static void
 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
 {
-	INIT_LIST_HEAD(&lseg->fi_list);
-	kref_init(&lseg->kref);
-	lseg->layout = lo;
+	INIT_LIST_HEAD(&lseg->pls_list);
+	atomic_set(&lseg->pls_refcount, 1);
+	smp_mb();
+	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
+	lseg->pls_layout = lo;
 }
 
-/* Called without i_lock held, as the free_lseg call may sleep */
-static void
-destroy_lseg(struct kref *kref)
+static void free_lseg(struct pnfs_layout_segment *lseg)
 {
-	struct pnfs_layout_segment *lseg =
-		container_of(kref, struct pnfs_layout_segment, kref);
-	struct inode *ino = lseg->layout->inode;
+	struct inode *ino = lseg->pls_layout->plh_inode;
 
-	dprintk("--> %s\n", __func__);
 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
-	/* Matched by get_layout_hdr_locked in pnfs_insert_layout */
-	put_layout_hdr(ino);
+	/* Matched by get_layout_hdr in pnfs_insert_layout */
+	put_layout_hdr(NFS_I(ino)->layout);
 }
 
-static void
-put_lseg(struct pnfs_layout_segment *lseg)
+/* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
+ * could sleep, so must be called outside of the lock.
+ * Returns 1 if object was removed, otherwise return 0.
+ */
+static int
+put_lseg_locked(struct pnfs_layout_segment *lseg,
+		struct list_head *tmp_list)
 {
-	if (!lseg)
-		return;
+	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
+		atomic_read(&lseg->pls_refcount),
+		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+	if (atomic_dec_and_test(&lseg->pls_refcount)) {
+		struct inode *ino = lseg->pls_layout->plh_inode;
 
-	dprintk("%s: lseg %p ref %d\n", __func__, lseg,
-		atomic_read(&lseg->kref.refcount));
-	kref_put(&lseg->kref, destroy_lseg);
+		BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+		list_del(&lseg->pls_list);
+		if (list_empty(&lseg->pls_layout->plh_segs)) {
+			struct nfs_client *clp;
+
+			clp = NFS_SERVER(ino)->nfs_client;
+			spin_lock(&clp->cl_lock);
+			/* List does not take a reference, so no need for put here */
+			list_del_init(&lseg->pls_layout->plh_layouts);
+			spin_unlock(&clp->cl_lock);
+			clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags);
+		}
+		rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
+		list_add(&lseg->pls_list, tmp_list);
+		return 1;
+	}
+	return 0;
 }
 
-static void
-pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
+static bool
+should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
+{
+	return (recall_iomode == IOMODE_ANY ||
+		lseg_iomode == recall_iomode);
+}
+
+/* Returns 1 if lseg is removed from list, 0 otherwise */
+static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
+			     struct list_head *tmp_list)
+{
+	int rv = 0;
+
+	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+		/* Remove the reference keeping the lseg in the
+		 * list.  It will now be removed when all
+		 * outstanding io is finished.
+		 */
+		rv = put_lseg_locked(lseg, tmp_list);
+	}
+	return rv;
+}
+
+/* Returns count of number of matching invalid lsegs remaining in list
+ * after call.
+ */
+int
+mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+			    struct list_head *tmp_list,
+			    u32 iomode)
 {
 	struct pnfs_layout_segment *lseg, *next;
-	struct nfs_client *clp;
+	int invalid = 0, removed = 0;
 
 	dprintk("%s:Begin lo %p\n", __func__, lo);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
-		dprintk("%s: freeing lseg %p\n", __func__, lseg);
-		list_move(&lseg->fi_list, tmp_list);
-	}
-	clp = NFS_SERVER(lo->inode)->nfs_client;
-	spin_lock(&clp->cl_lock);
-	/* List does not take a reference, so no need for put here */
-	list_del_init(&lo->layouts);
-	spin_unlock(&clp->cl_lock);
-	write_seqlock(&lo->seqlock);
-	clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
-	write_sequnlock(&lo->seqlock);
-
-	dprintk("%s:Return\n", __func__);
+	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+		if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
+			dprintk("%s: freeing lseg %p iomode %d "
+				"offset %llu length %llu\n", __func__,
+				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
+				lseg->pls_range.length);
+			invalid++;
+			removed += mark_lseg_invalid(lseg, tmp_list);
+		}
+	dprintk("%s:Return %i\n", __func__, invalid - removed);
+	return invalid - removed;
 }
 
-static void
-pnfs_free_lseg_list(struct list_head *tmp_list)
+void
+pnfs_free_lseg_list(struct list_head *free_me)
 {
-	struct pnfs_layout_segment *lseg;
+	struct pnfs_layout_segment *lseg, *tmp;
 
-	while (!list_empty(tmp_list)) {
-		lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
-				fi_list);
-		dprintk("%s calling put_lseg on %p\n", __func__, lseg);
-		list_del(&lseg->fi_list);
-		put_lseg(lseg);
+	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
+		list_del(&lseg->pls_list);
+		free_lseg(lseg);
 	}
 }
 
@@ -288,7 +332,8 @@
 	spin_lock(&nfsi->vfs_inode.i_lock);
 	lo = nfsi->layout;
 	if (lo) {
-		pnfs_clear_lseg_list(lo, &tmp_list);
+		set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags);
+		mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
 		/* Matched by refcount set to 1 in alloc_init_layout_hdr */
 		put_layout_hdr_locked(lo);
 	}
@@ -312,76 +357,80 @@
 
 	while (!list_empty(&tmp_list)) {
 		lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
-				layouts);
+				plh_layouts);
 		dprintk("%s freeing layout for inode %lu\n", __func__,
-			lo->inode->i_ino);
-		pnfs_destroy_layout(NFS_I(lo->inode));
+			lo->plh_inode->i_ino);
+		pnfs_destroy_layout(NFS_I(lo->plh_inode));
 	}
 }
 
-/* update lo->stateid with new if is more recent
- *
- * lo->stateid could be the open stateid, in which case we just use what given.
- */
-static void
-pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
-			const nfs4_stateid *new)
-{
-	nfs4_stateid *old = &lo->stateid;
-	bool overwrite = false;
-
-	write_seqlock(&lo->seqlock);
-	if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
-	    memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
-		overwrite = true;
-	else {
-		u32 oldseq, newseq;
-
-		oldseq = be32_to_cpu(old->stateid.seqid);
-		newseq = be32_to_cpu(new->stateid.seqid);
-		if ((int)(newseq - oldseq) > 0)
-			overwrite = true;
-	}
-	if (overwrite)
-		memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
-	write_sequnlock(&lo->seqlock);
-}
-
-static void
-pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
-			      struct nfs4_state *state)
-{
-	int seq;
-
-	dprintk("--> %s\n", __func__);
-	write_seqlock(&lo->seqlock);
-	do {
-		seq = read_seqbegin(&state->seqlock);
-		memcpy(lo->stateid.data, state->stateid.data,
-		       sizeof(state->stateid.data));
-	} while (read_seqretry(&state->seqlock, seq));
-	set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
-	write_sequnlock(&lo->seqlock);
-	dprintk("<-- %s\n", __func__);
-}
-
+/* update lo->plh_stateid with new if is more recent */
 void
-pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
-			struct nfs4_state *open_state)
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
+			bool update_barrier)
 {
-	int seq;
+	u32 oldseq, newseq;
+
+	oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+	newseq = be32_to_cpu(new->stateid.seqid);
+	if ((int)(newseq - oldseq) > 0) {
+		memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
+		if (update_barrier) {
+			u32 new_barrier = be32_to_cpu(new->stateid.seqid);
+
+			if ((int)(new_barrier - lo->plh_barrier))
+				lo->plh_barrier = new_barrier;
+		} else {
+			/* Because of wraparound, we want to keep the barrier
+			 * "close" to the current seqids.  It needs to be
+			 * within 2**31 to count as "behind", so if it
+			 * gets too near that limit, give us a litle leeway
+			 * and bring it to within 2**30.
+			 * NOTE - and yes, this is all unsigned arithmetic.
+			 */
+			if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
+				lo->plh_barrier = newseq - (1 << 30);
+		}
+	}
+}
+
+/* lget is set to 1 if called from inside send_layoutget call chain */
+static bool
+pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
+			int lget)
+{
+	if ((stateid) &&
+	    (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
+		return true;
+	return lo->plh_block_lgets ||
+		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+		(list_empty(&lo->plh_segs) &&
+		 (atomic_read(&lo->plh_outstanding) > lget));
+}
+
+int
+pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+			      struct nfs4_state *open_state)
+{
+	int status = 0;
 
 	dprintk("--> %s\n", __func__);
-	do {
-		seq = read_seqbegin(&lo->seqlock);
-		if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
-			/* This will trigger retry of the read */
-			pnfs_layout_from_open_stateid(lo, open_state);
-		} else
-			memcpy(dst->data, lo->stateid.data,
-			       sizeof(lo->stateid.data));
-	} while (read_seqretry(&lo->seqlock, seq));
+	spin_lock(&lo->plh_inode->i_lock);
+	if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
+		status = -EAGAIN;
+	} else if (list_empty(&lo->plh_segs)) {
+		int seq;
+
+		do {
+			seq = read_seqbegin(&open_state->seqlock);
+			memcpy(dst->data, open_state->stateid.data,
+			       sizeof(open_state->stateid.data));
+		} while (read_seqretry(&open_state->seqlock, seq));
+	} else
+		memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
+	spin_unlock(&lo->plh_inode->i_lock);
 	dprintk("<-- %s\n", __func__);
+	return status;
 }
 
 /*
@@ -395,7 +444,7 @@
 	   struct nfs_open_context *ctx,
 	   u32 iomode)
 {
-	struct inode *ino = lo->inode;
+	struct inode *ino = lo->plh_inode;
 	struct nfs_server *server = NFS_SERVER(ino);
 	struct nfs4_layoutget *lgp;
 	struct pnfs_layout_segment *lseg = NULL;
@@ -404,10 +453,8 @@
 
 	BUG_ON(ctx == NULL);
 	lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
-	if (lgp == NULL) {
-		put_layout_hdr(lo->inode);
+	if (lgp == NULL)
 		return NULL;
-	}
 	lgp->args.minlength = NFS4_MAX_UINT64;
 	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
 	lgp->args.range.iomode = iomode;
@@ -424,11 +471,88 @@
 	nfs4_proc_layoutget(lgp);
 	if (!lseg) {
 		/* remember that LAYOUTGET failed and suspend trying */
-		set_bit(lo_fail_bit(iomode), &lo->state);
+		set_bit(lo_fail_bit(iomode), &lo->plh_flags);
 	}
 	return lseg;
 }
 
+bool pnfs_roc(struct inode *ino)
+{
+	struct pnfs_layout_hdr *lo;
+	struct pnfs_layout_segment *lseg, *tmp;
+	LIST_HEAD(tmp_list);
+	bool found = false;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
+		goto out_nolayout;
+	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
+		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+			mark_lseg_invalid(lseg, &tmp_list);
+			found = true;
+		}
+	if (!found)
+		goto out_nolayout;
+	lo->plh_block_lgets++;
+	get_layout_hdr(lo); /* matched in pnfs_roc_release */
+	spin_unlock(&ino->i_lock);
+	pnfs_free_lseg_list(&tmp_list);
+	return true;
+
+out_nolayout:
+	spin_unlock(&ino->i_lock);
+	return false;
+}
+
+void pnfs_roc_release(struct inode *ino)
+{
+	struct pnfs_layout_hdr *lo;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	lo->plh_block_lgets--;
+	put_layout_hdr_locked(lo);
+	spin_unlock(&ino->i_lock);
+}
+
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+	struct pnfs_layout_hdr *lo;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	if ((int)(barrier - lo->plh_barrier) > 0)
+		lo->plh_barrier = barrier;
+	spin_unlock(&ino->i_lock);
+}
+
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+	struct nfs_inode *nfsi = NFS_I(ino);
+	struct pnfs_layout_segment *lseg;
+	bool found = false;
+
+	spin_lock(&ino->i_lock);
+	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
+		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+			found = true;
+			break;
+		}
+	if (!found) {
+		struct pnfs_layout_hdr *lo = nfsi->layout;
+		u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+
+		/* Since close does not return a layout stateid for use as
+		 * a barrier, we choose the worst-case barrier.
+		 */
+		*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
+	}
+	spin_unlock(&ino->i_lock);
+	return found;
+}
+
 /*
  * Compare two layout segments for sorting into layout cache.
  * We want to preferentially return RW over RO layouts, so ensure those
@@ -450,37 +574,29 @@
 
 	dprintk("%s:Begin\n", __func__);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	if (list_empty(&lo->segs)) {
-		struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
-
-		spin_lock(&clp->cl_lock);
-		BUG_ON(!list_empty(&lo->layouts));
-		list_add_tail(&lo->layouts, &clp->cl_layouts);
-		spin_unlock(&clp->cl_lock);
-	}
-	list_for_each_entry(lp, &lo->segs, fi_list) {
-		if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
+	assert_spin_locked(&lo->plh_inode->i_lock);
+	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
+		if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
 			continue;
-		list_add_tail(&lseg->fi_list, &lp->fi_list);
+		list_add_tail(&lseg->pls_list, &lp->pls_list);
 		dprintk("%s: inserted lseg %p "
 			"iomode %d offset %llu length %llu before "
 			"lp %p iomode %d offset %llu length %llu\n",
-			__func__, lseg, lseg->range.iomode,
-			lseg->range.offset, lseg->range.length,
-			lp, lp->range.iomode, lp->range.offset,
-			lp->range.length);
+			__func__, lseg, lseg->pls_range.iomode,
+			lseg->pls_range.offset, lseg->pls_range.length,
+			lp, lp->pls_range.iomode, lp->pls_range.offset,
+			lp->pls_range.length);
 		found = 1;
 		break;
 	}
 	if (!found) {
-		list_add_tail(&lseg->fi_list, &lo->segs);
+		list_add_tail(&lseg->pls_list, &lo->plh_segs);
 		dprintk("%s: inserted lseg %p "
 			"iomode %d offset %llu length %llu at tail\n",
-			__func__, lseg, lseg->range.iomode,
-			lseg->range.offset, lseg->range.length);
+			__func__, lseg, lseg->pls_range.iomode,
+			lseg->pls_range.offset, lseg->pls_range.length);
 	}
-	get_layout_hdr_locked(lo);
+	get_layout_hdr(lo);
 
 	dprintk("%s:Return\n", __func__);
 }
@@ -493,11 +609,11 @@
 	lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
 	if (!lo)
 		return NULL;
-	lo->refcount = 1;
-	INIT_LIST_HEAD(&lo->layouts);
-	INIT_LIST_HEAD(&lo->segs);
-	seqlock_init(&lo->seqlock);
-	lo->inode = ino;
+	atomic_set(&lo->plh_refcount, 1);
+	INIT_LIST_HEAD(&lo->plh_layouts);
+	INIT_LIST_HEAD(&lo->plh_segs);
+	INIT_LIST_HEAD(&lo->plh_bulk_recall);
+	lo->plh_inode = ino;
 	return lo;
 }
 
@@ -510,9 +626,12 @@
 	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
 
 	assert_spin_locked(&ino->i_lock);
-	if (nfsi->layout)
-		return nfsi->layout;
-
+	if (nfsi->layout) {
+		if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
+			return NULL;
+		else
+			return nfsi->layout;
+	}
 	spin_unlock(&ino->i_lock);
 	new = alloc_init_layout_hdr(ino);
 	spin_lock(&ino->i_lock);
@@ -538,31 +657,32 @@
 static int
 is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
 {
-	return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
+	return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
 }
 
 /*
  * lookup range in layout
  */
 static struct pnfs_layout_segment *
-pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
+pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
 {
 	struct pnfs_layout_segment *lseg, *ret = NULL;
 
 	dprintk("%s:Begin\n", __func__);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	list_for_each_entry(lseg, &lo->segs, fi_list) {
-		if (is_matching_lseg(lseg, iomode)) {
+	assert_spin_locked(&lo->plh_inode->i_lock);
+	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
+		    is_matching_lseg(lseg, iomode)) {
 			ret = lseg;
 			break;
 		}
-		if (cmp_layout(iomode, lseg->range.iomode) > 0)
+		if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
 			break;
 	}
 
 	dprintk("%s:Return lseg %p ref %d\n",
-		__func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
+		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
 	return ret;
 }
 
@@ -576,6 +696,7 @@
 		   enum pnfs_iomode iomode)
 {
 	struct nfs_inode *nfsi = NFS_I(ino);
+	struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
 	struct pnfs_layout_hdr *lo;
 	struct pnfs_layout_segment *lseg = NULL;
 
@@ -588,25 +709,53 @@
 		goto out_unlock;
 	}
 
-	/* Check to see if the layout for the given range already exists */
-	lseg = pnfs_has_layout(lo, iomode);
-	if (lseg) {
-		dprintk("%s: Using cached lseg %p for iomode %d)\n",
-			__func__, lseg, iomode);
+	/* Do we even need to bother with this? */
+	if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+		dprintk("%s matches recall, use MDS\n", __func__);
 		goto out_unlock;
 	}
-
-	/* if LAYOUTGET already failed once we don't try again */
-	if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
+	/* Check to see if the layout for the given range already exists */
+	lseg = pnfs_find_lseg(lo, iomode);
+	if (lseg)
 		goto out_unlock;
 
-	get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
+	/* if LAYOUTGET already failed once we don't try again */
+	if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
+		goto out_unlock;
+
+	if (pnfs_layoutgets_blocked(lo, NULL, 0))
+		goto out_unlock;
+	atomic_inc(&lo->plh_outstanding);
+
+	get_layout_hdr(lo);
+	if (list_empty(&lo->plh_segs)) {
+		/* The lo must be on the clp list if there is any
+		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
+		 */
+		spin_lock(&clp->cl_lock);
+		BUG_ON(!list_empty(&lo->plh_layouts));
+		list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
+		spin_unlock(&clp->cl_lock);
+	}
 	spin_unlock(&ino->i_lock);
 
 	lseg = send_layoutget(lo, ctx, iomode);
+	if (!lseg) {
+		spin_lock(&ino->i_lock);
+		if (list_empty(&lo->plh_segs)) {
+			spin_lock(&clp->cl_lock);
+			list_del_init(&lo->plh_layouts);
+			spin_unlock(&clp->cl_lock);
+			clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+		}
+		spin_unlock(&ino->i_lock);
+	}
+	atomic_dec(&lo->plh_outstanding);
+	put_layout_hdr(lo);
 out:
 	dprintk("%s end, state 0x%lx lseg %p\n", __func__,
-		nfsi->layout->state, lseg);
+		nfsi->layout->plh_flags, lseg);
 	return lseg;
 out_unlock:
 	spin_unlock(&ino->i_lock);
@@ -619,9 +768,21 @@
 	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
 	struct nfs4_layoutget_res *res = &lgp->res;
 	struct pnfs_layout_segment *lseg;
-	struct inode *ino = lo->inode;
+	struct inode *ino = lo->plh_inode;
+	struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
 	int status = 0;
 
+	/* Verify we got what we asked for.
+	 * Note that because the xdr parsing only accepts a single
+	 * element array, this can fail even if the server is behaving
+	 * correctly.
+	 */
+	if (lgp->args.range.iomode > res->range.iomode ||
+	    res->range.offset != 0 ||
+	    res->range.length != NFS4_MAX_UINT64) {
+		status = -EINVAL;
+		goto out;
+	}
 	/* Inject layout blob into I/O device driver */
 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
 	if (!lseg || IS_ERR(lseg)) {
@@ -635,16 +796,37 @@
 	}
 
 	spin_lock(&ino->i_lock);
+	if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+		dprintk("%s forget reply due to recall\n", __func__);
+		goto out_forget_reply;
+	}
+
+	if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
+		dprintk("%s forget reply due to state\n", __func__);
+		goto out_forget_reply;
+	}
 	init_lseg(lo, lseg);
-	lseg->range = res->range;
+	lseg->pls_range = res->range;
 	*lgp->lsegpp = lseg;
 	pnfs_insert_layout(lo, lseg);
 
+	if (res->return_on_close) {
+		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
+		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
+	}
+
 	/* Done processing layoutget. Set the layout stateid */
-	pnfs_set_layout_stateid(lo, &res->stateid);
+	pnfs_set_layout_stateid(lo, &res->stateid, false);
 	spin_unlock(&ino->i_lock);
 out:
 	return status;
+
+out_forget_reply:
+	spin_unlock(&ino->i_lock);
+	lseg->pls_layout = lo;
+	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+	goto out;
 }
 
 /*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index e12367d..e2612ea 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,11 +30,17 @@
 #ifndef FS_NFS_PNFS_H
 #define FS_NFS_PNFS_H
 
+enum {
+	NFS_LSEG_VALID = 0,	/* cleared when lseg is recalled/returned */
+	NFS_LSEG_ROC,		/* roc bit received from server */
+};
+
 struct pnfs_layout_segment {
-	struct list_head fi_list;
-	struct pnfs_layout_range range;
-	struct kref kref;
-	struct pnfs_layout_hdr *layout;
+	struct list_head pls_list;
+	struct pnfs_layout_range pls_range;
+	atomic_t pls_refcount;
+	unsigned long pls_flags;
+	struct pnfs_layout_hdr *pls_layout;
 };
 
 #ifdef CONFIG_NFS_V4_1
@@ -44,7 +50,9 @@
 enum {
 	NFS_LAYOUT_RO_FAILED = 0,	/* get ro layout failed stop trying */
 	NFS_LAYOUT_RW_FAILED,		/* get rw layout failed stop trying */
-	NFS_LAYOUT_STATEID_SET,		/* have a valid layout stateid */
+	NFS_LAYOUT_BULK_RECALL,		/* bulk recall affecting layout */
+	NFS_LAYOUT_ROC,			/* some lseg had roc bit set */
+	NFS_LAYOUT_DESTROYED,		/* no new use of layout allowed */
 };
 
 /* Per-layout driver specific registration structure */
@@ -60,13 +68,16 @@
 };
 
 struct pnfs_layout_hdr {
-	unsigned long		refcount;
-	struct list_head	layouts;   /* other client layouts */
-	struct list_head	segs;      /* layout segments list */
-	seqlock_t		seqlock;   /* Protects the stateid */
-	nfs4_stateid		stateid;
-	unsigned long		state;
-	struct inode		*inode;
+	atomic_t		plh_refcount;
+	struct list_head	plh_layouts;   /* other client layouts */
+	struct list_head	plh_bulk_recall; /* clnt list of bulk recalls */
+	struct list_head	plh_segs;      /* layout segments list */
+	nfs4_stateid		plh_stateid;
+	atomic_t		plh_outstanding; /* number of RPCs out */
+	unsigned long		plh_block_lgets; /* block LAYOUTGET if >0 */
+	u32			plh_barrier; /* ignore lower seqids */
+	unsigned long		plh_flags;
+	struct inode		*plh_inode;
 };
 
 struct pnfs_device {
@@ -134,17 +145,30 @@
 extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
 
 /* pnfs.c */
+void get_layout_hdr(struct pnfs_layout_hdr *lo);
 struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
 		   enum pnfs_iomode access_type);
 void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
 void unset_pnfs_layoutdriver(struct nfs_server *);
 int pnfs_layout_process(struct nfs4_layoutget *lgp);
+void pnfs_free_lseg_list(struct list_head *tmp_list);
 void pnfs_destroy_layout(struct nfs_inode *);
 void pnfs_destroy_all_layouts(struct nfs_client *);
-void put_layout_hdr(struct inode *inode);
-void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
-			     struct nfs4_state *open_state);
+void put_layout_hdr(struct pnfs_layout_hdr *lo);
+void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+			     const nfs4_stateid *new,
+			     bool update_barrier);
+int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
+				  struct pnfs_layout_hdr *lo,
+				  struct nfs4_state *open_state);
+int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+				struct list_head *tmp_list,
+				u32 iomode);
+bool pnfs_roc(struct inode *ino);
+void pnfs_roc_release(struct inode *ino);
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
 
 
 static inline int lo_fail_bit(u32 iomode)
@@ -176,6 +200,28 @@
 	return NULL;
 }
 
+static inline bool
+pnfs_roc(struct inode *ino)
+{
+	return false;
+}
+
+static inline void
+pnfs_roc_release(struct inode *ino)
+{
+}
+
+static inline void
+pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+}
+
+static inline bool
+pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+	return false;
+}
+
 static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
 {
 }
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 58e7f84..77d5e21 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -458,7 +458,7 @@
 	fattr = nfs_alloc_fattr();
 	status = -ENOMEM;
 	if (fh == NULL || fattr == NULL)
-		goto out;
+		goto out_free;
 
 	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
 	nfs_mark_for_revalidate(dir);
@@ -471,6 +471,7 @@
 	if (status == 0)
 		status = nfs_instantiate(dentry, fh, fattr);
 
+out_free:
 	nfs_free_fattr(fattr);
 	nfs_free_fhandle(fh);
 out:
@@ -731,7 +732,7 @@
 	.statfs		= nfs_proc_statfs,
 	.fsinfo		= nfs_proc_fsinfo,
 	.pathconf	= nfs_proc_pathconf,
-	.decode_dirent	= nfs_decode_dirent,
+	.decode_dirent	= nfs2_decode_dirent,
 	.read_setup	= nfs_proc_read_setup,
 	.read_done	= nfs_read_done,
 	.write_setup	= nfs_proc_write_setup,
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4100630..b68c860 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -598,7 +598,9 @@
 
 	if (nfss->mountd_version || showdefaults)
 		seq_printf(m, ",mountvers=%u", nfss->mountd_version);
-	if (nfss->mountd_port || showdefaults)
+	if ((nfss->mountd_port &&
+		nfss->mountd_port != (unsigned short)NFS_UNSPEC_PORT) ||
+		showdefaults)
 		seq_printf(m, ",mountport=%u", nfss->mountd_port);
 
 	nfs_show_mountd_netid(m, nfss, showdefaults);
@@ -2200,6 +2202,7 @@
 
 	s->s_flags = sb_mntdata->mntflags;
 	s->s_fs_info = server;
+	s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
 	ret = set_anon_super(s, server);
 	if (ret == 0)
 		server->s_dev = s->s_dev;
@@ -2494,7 +2497,13 @@
 	sb->s_maxbytes = old_sb->s_maxbytes;
 	sb->s_time_gran = 1;
 	sb->s_op = old_sb->s_op;
- 	nfs_initialise_sb(sb);
+	/*
+	 * The VFS shouldn't apply the umask to mode bits. We will do
+	 * so ourselves when necessary.
+	 */
+	sb->s_flags  |= MS_POSIXACL;
+	sb->s_xattr  = old_sb->s_xattr;
+	nfs_initialise_sb(sb);
 }
 
 /*
@@ -2504,6 +2513,12 @@
 {
 	sb->s_time_gran = 1;
 	sb->s_op = &nfs4_sops;
+	/*
+	 * The VFS shouldn't apply the umask to mode bits. We will do
+	 * so ourselves when necessary.
+	 */
+	sb->s_flags  |= MS_POSIXACL;
+	sb->s_xattr = nfs4_xattr_handlers;
 	nfs_initialise_sb(sb);
 }
 
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 7bdec85..e313a51 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -429,7 +429,7 @@
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (data == NULL)
 		return ERR_PTR(-ENOMEM);
-	task_setup_data.callback_data = data,
+	task_setup_data.callback_data = data;
 
 	data->cred = rpc_lookup_cred();
 	if (IS_ERR(data->cred)) {
@@ -496,7 +496,7 @@
 
 	dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
 		dentry->d_parent->d_name.name, dentry->d_name.name,
-		atomic_read(&dentry->d_count));
+		dentry->d_count);
 	nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
 
 	/*
diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h
new file mode 100644
index 0000000..34e5c40
--- /dev/null
+++ b/fs/nfsd/acl.h
@@ -0,0 +1,59 @@
+/*
+ *  Common NFSv4 ACL handling definitions.
+ *
+ *  Copyright (c) 2002 The Regents of the University of Michigan.
+ *  All rights reserved.
+ *
+ *  Marius Aamodt Eriksen <marius@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LINUX_NFS4_ACL_H
+#define LINUX_NFS4_ACL_H
+
+#include <linux/posix_acl.h>
+
+/* Maximum ACL we'll accept from client; chosen (somewhat arbitrarily) to
+ * fit in a page: */
+#define NFS4_ACL_MAX 170
+
+struct nfs4_acl *nfs4_acl_new(int);
+int nfs4_acl_get_whotype(char *, u32);
+int nfs4_acl_write_who(int who, char *p);
+int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
+		                        uid_t who, u32 mask);
+
+#define NFS4_ACL_TYPE_DEFAULT	0x01
+#define NFS4_ACL_DIR		0x02
+#define NFS4_ACL_OWNER		0x04
+
+struct nfs4_acl *nfs4_acl_posix_to_nfsv4(struct posix_acl *,
+				struct posix_acl *, unsigned int flags);
+int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *, struct posix_acl **,
+				struct posix_acl **, unsigned int flags);
+
+#endif /* LINUX_NFS4_ACL_H */
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c0fcb7a..8b31e5f 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1,4 +1,3 @@
-#define MSNFS	/* HACK HACK */
 /*
  * NFS exporting and validation.
  *
@@ -1444,9 +1443,6 @@
 	{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
 	{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
 	{ NFSEXP_V4ROOT, {"v4root", ""}},
-#ifdef MSNFS
-	{ NFSEXP_MSNFS, {"msnfs", ""}},
-#endif
 	{ 0, {"", ""}}
 };
 
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
new file mode 100644
index 0000000..2f3be13
--- /dev/null
+++ b/fs/nfsd/idmap.h
@@ -0,0 +1,62 @@
+/*
+ *  Mapping of UID to name and vice versa.
+ *
+ *  Copyright (c) 2002, 2003 The Regents of the University of
+ *  Michigan.  All rights reserved.
+> *
+ *  Marius Aamodt Eriksen <marius@umich.edu>
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *  3. Neither the name of the University nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LINUX_NFSD_IDMAP_H
+#define LINUX_NFSD_IDMAP_H
+
+#include <linux/in.h>
+#include <linux/sunrpc/svc.h>
+
+/* XXX from linux/nfs_idmap.h */
+#define IDMAP_NAMESZ 128
+
+#ifdef CONFIG_NFSD_V4
+int nfsd_idmap_init(void);
+void nfsd_idmap_shutdown(void);
+#else
+static inline int nfsd_idmap_init(void)
+{
+	return 0;
+}
+static inline void nfsd_idmap_shutdown(void)
+{
+}
+#endif
+
+__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
+__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
+int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *);
+int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *);
+
+#endif /* LINUX_NFSD_IDMAP_H */
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 5b7e302..2247fc9 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -151,10 +151,10 @@
 	__be32	nfserr;
 	u32	max_blocksize = svc_max_payload(rqstp);
 
-	dprintk("nfsd: READ(3) %s %lu bytes at %lu\n",
+	dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
 				SVCFH_fmt(&argp->fh),
 				(unsigned long) argp->count,
-				(unsigned long) argp->offset);
+				(unsigned long long) argp->offset);
 
 	/* Obtain buffer pointer for payload.
 	 * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof)
@@ -191,10 +191,10 @@
 	__be32	nfserr;
 	unsigned long cnt = argp->len;
 
-	dprintk("nfsd: WRITE(3)    %s %d bytes at %ld%s\n",
+	dprintk("nfsd: WRITE(3)    %s %d bytes at %Lu%s\n",
 				SVCFH_fmt(&argp->fh),
 				argp->len,
-				(unsigned long) argp->offset,
+				(unsigned long long) argp->offset,
 				argp->stable? " stable" : "");
 
 	fh_copy(&resp->fh, &argp->fh);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index e480526..ad88f1c 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -36,7 +36,7 @@
 
 #include <linux/slab.h>
 #include <linux/nfs_fs.h>
-#include <linux/nfs4_acl.h>
+#include "acl.h"
 
 
 /* mode bit translations: */
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 143da2e..3be975e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -50,11 +50,6 @@
 	NFSPROC4_CLNT_CB_SEQUENCE,
 };
 
-enum nfs_cb_opnum4 {
-	OP_CB_RECALL            = 4,
-	OP_CB_SEQUENCE          = 11,
-};
-
 #define NFS4_MAXTAGLEN		20
 
 #define NFS4_enc_cb_null_sz		0
@@ -79,61 +74,6 @@
 					cb_sequence_dec_sz +            \
 					op_dec_sz)
 
-/*
-* Generic encode routines from fs/nfs/nfs4xdr.c
-*/
-static inline __be32 *
-xdr_writemem(__be32 *p, const void *ptr, int nbytes)
-{
-	int tmp = XDR_QUADLEN(nbytes);
-	if (!tmp)
-		return p;
-	p[tmp-1] = 0;
-	memcpy(p, ptr, nbytes);
-	return p + tmp;
-}
-
-#define WRITE32(n)               *p++ = htonl(n)
-#define WRITEMEM(ptr,nbytes)     do {                           \
-	p = xdr_writemem(p, ptr, nbytes);                       \
-} while (0)
-#define RESERVE_SPACE(nbytes)   do {                            \
-	p = xdr_reserve_space(xdr, nbytes);                     \
-	if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
-	BUG_ON(!p);                                             \
-} while (0)
-
-/*
- * Generic decode routines from fs/nfs/nfs4xdr.c
- */
-#define DECODE_TAIL                             \
-	status = 0;                             \
-out:                                            \
-	return status;                          \
-xdr_error:                                      \
-	dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
-	status = -EIO;                          \
-	goto out
-
-#define READ32(x)         (x) = ntohl(*p++)
-#define READ64(x)         do {                  \
-	(x) = (u64)ntohl(*p++) << 32;           \
-	(x) |= ntohl(*p++);                     \
-} while (0)
-#define READTIME(x)       do {                  \
-	p++;                                    \
-	(x.tv_sec) = ntohl(*p++);               \
-	(x.tv_nsec) = ntohl(*p++);              \
-} while (0)
-#define READ_BUF(nbytes)  do { \
-	p = xdr_inline_decode(xdr, nbytes); \
-	if (!p) { \
-		dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
-			__func__, __LINE__); \
-		return -EIO; \
-	} \
-} while (0)
-
 struct nfs4_cb_compound_hdr {
 	/* args */
 	u32		ident;	/* minorversion 0 only */
@@ -144,295 +84,513 @@
 	int		status;
 };
 
-static struct {
-int stat;
-int errno;
-} nfs_cb_errtbl[] = {
-	{ NFS4_OK,		0               },
-	{ NFS4ERR_PERM,		EPERM           },
-	{ NFS4ERR_NOENT,	ENOENT          },
-	{ NFS4ERR_IO,		EIO             },
-	{ NFS4ERR_NXIO,		ENXIO           },
-	{ NFS4ERR_ACCESS,	EACCES          },
-	{ NFS4ERR_EXIST,	EEXIST          },
-	{ NFS4ERR_XDEV,		EXDEV           },
-	{ NFS4ERR_NOTDIR,	ENOTDIR         },
-	{ NFS4ERR_ISDIR,	EISDIR          },
-	{ NFS4ERR_INVAL,	EINVAL          },
-	{ NFS4ERR_FBIG,		EFBIG           },
-	{ NFS4ERR_NOSPC,	ENOSPC          },
-	{ NFS4ERR_ROFS,		EROFS           },
-	{ NFS4ERR_MLINK,	EMLINK          },
-	{ NFS4ERR_NAMETOOLONG,	ENAMETOOLONG    },
-	{ NFS4ERR_NOTEMPTY,	ENOTEMPTY       },
-	{ NFS4ERR_DQUOT,	EDQUOT          },
-	{ NFS4ERR_STALE,	ESTALE          },
-	{ NFS4ERR_BADHANDLE,	EBADHANDLE      },
-	{ NFS4ERR_BAD_COOKIE,	EBADCOOKIE      },
-	{ NFS4ERR_NOTSUPP,	ENOTSUPP        },
-	{ NFS4ERR_TOOSMALL,	ETOOSMALL       },
-	{ NFS4ERR_SERVERFAULT,	ESERVERFAULT    },
-	{ NFS4ERR_BADTYPE,	EBADTYPE        },
-	{ NFS4ERR_LOCKED,	EAGAIN          },
-	{ NFS4ERR_RESOURCE,	EREMOTEIO       },
-	{ NFS4ERR_SYMLINK,	ELOOP           },
-	{ NFS4ERR_OP_ILLEGAL,	EOPNOTSUPP      },
-	{ NFS4ERR_DEADLOCK,	EDEADLK         },
-	{ -1,                   EIO             }
-};
-
-static int
-nfs_cb_stat_to_errno(int stat)
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	int i;
-	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
-		if (nfs_cb_errtbl[i].stat == stat)
-			return nfs_cb_errtbl[i].errno;
-	}
-	/* If we cannot translate the error, the recovery routines should
-	* handle it.
-	* Note: remaining NFSv4 error codes have values > 10000, so should
-	* not conflict with native Linux error codes.
-	*/
-	return stat;
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+static __be32 *xdr_encode_empty_array(__be32 *p)
+{
+	*p++ = xdr_zero;
+	return p;
 }
 
 /*
- * XDR encode
+ * Encode/decode NFSv4 CB basic data types
+ *
+ * Basic NFSv4 callback data types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section
+ * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
+ * 1 Protocol"
  */
 
-static void
-encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+/*
+ *	nfs_cb_opnum4
+ *
+ *	enum nfs_cb_opnum4 {
+ *		OP_CB_GETATTR		= 3,
+ *		  ...
+ *	};
+ */
+enum nfs_cb_opnum4 {
+	OP_CB_GETATTR			= 3,
+	OP_CB_RECALL			= 4,
+	OP_CB_LAYOUTRECALL		= 5,
+	OP_CB_NOTIFY			= 6,
+	OP_CB_PUSH_DELEG		= 7,
+	OP_CB_RECALL_ANY		= 8,
+	OP_CB_RECALLABLE_OBJ_AVAIL	= 9,
+	OP_CB_RECALL_SLOT		= 10,
+	OP_CB_SEQUENCE			= 11,
+	OP_CB_WANTS_CANCELLED		= 12,
+	OP_CB_NOTIFY_LOCK		= 13,
+	OP_CB_NOTIFY_DEVICEID		= 14,
+	OP_CB_ILLEGAL			= 10044
+};
+
+static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
 {
 	__be32 *p;
 
-	RESERVE_SPACE(sizeof(stateid_t));
-	WRITE32(sid->si_generation);
-	WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(op);
 }
 
-static void
-encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
+/*
+ * nfs_fh4
+ *
+ *	typedef opaque nfs_fh4<NFS4_FHSIZE>;
+ */
+static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
+{
+	u32 length = fh->fh_size;
+	__be32 *p;
+
+	BUG_ON(length > NFS4_FHSIZE);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, &fh->fh_base, length);
+}
+
+/*
+ * stateid4
+ *
+ *	struct stateid4 {
+ *		uint32_t	seqid;
+ *		opaque		other[12];
+ *	};
+ */
+static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
+	*p++ = cpu_to_be32(sid->si_generation);
+	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
+}
+
+/*
+ * sessionid4
+ *
+ *	typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
+ */
+static void encode_sessionid4(struct xdr_stream *xdr,
+			      const struct nfsd4_session *session)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
+	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
+					NFS4_MAX_SESSIONID_LEN);
+}
+
+/*
+ * nfsstat4
+ */
+static const struct {
+	int stat;
+	int errno;
+} nfs_cb_errtbl[] = {
+	{ NFS4_OK,		0		},
+	{ NFS4ERR_PERM,		-EPERM		},
+	{ NFS4ERR_NOENT,	-ENOENT		},
+	{ NFS4ERR_IO,		-EIO		},
+	{ NFS4ERR_NXIO,		-ENXIO		},
+	{ NFS4ERR_ACCESS,	-EACCES		},
+	{ NFS4ERR_EXIST,	-EEXIST		},
+	{ NFS4ERR_XDEV,		-EXDEV		},
+	{ NFS4ERR_NOTDIR,	-ENOTDIR	},
+	{ NFS4ERR_ISDIR,	-EISDIR		},
+	{ NFS4ERR_INVAL,	-EINVAL		},
+	{ NFS4ERR_FBIG,		-EFBIG		},
+	{ NFS4ERR_NOSPC,	-ENOSPC		},
+	{ NFS4ERR_ROFS,		-EROFS		},
+	{ NFS4ERR_MLINK,	-EMLINK		},
+	{ NFS4ERR_NAMETOOLONG,	-ENAMETOOLONG	},
+	{ NFS4ERR_NOTEMPTY,	-ENOTEMPTY	},
+	{ NFS4ERR_DQUOT,	-EDQUOT		},
+	{ NFS4ERR_STALE,	-ESTALE		},
+	{ NFS4ERR_BADHANDLE,	-EBADHANDLE	},
+	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
+	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
+	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
+	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
+	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
+	{ NFS4ERR_LOCKED,	-EAGAIN		},
+	{ NFS4ERR_RESOURCE,	-EREMOTEIO	},
+	{ NFS4ERR_SYMLINK,	-ELOOP		},
+	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
+	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
+	{ -1,			-EIO		}
+};
+
+/*
+ * If we cannot translate the error, the recovery routines should
+ * handle it.
+ *
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+static int nfs_cb_stat_to_errno(int status)
+{
+	int i;
+
+	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
+		if (nfs_cb_errtbl[i].stat == status)
+			return nfs_cb_errtbl[i].errno;
+	}
+
+	dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
+	return -status;
+}
+
+static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
+			       enum nfsstat4 *status)
+{
+	__be32 *p;
+	u32 op;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	op = be32_to_cpup(p++);
+	if (unlikely(op != expected))
+		goto out_unexpected;
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+out_unexpected:
+	dprintk("NFSD: Callback server returned operation %d but "
+		"we issued a request for %d\n", op, expected);
+	return -EIO;
+}
+
+/*
+ * CB_COMPOUND4args
+ *
+ *	struct CB_COMPOUND4args {
+ *		utf8str_cs	tag;
+ *		uint32_t	minorversion;
+ *		uint32_t	callback_ident;
+ *		nfs_cb_argop4	argarray<>;
+ *	};
+*/
+static void encode_cb_compound4args(struct xdr_stream *xdr,
+				    struct nfs4_cb_compound_hdr *hdr)
 {
 	__be32 * p;
 
-	RESERVE_SPACE(16);
-	WRITE32(0);            /* tag length is always 0 */
-	WRITE32(hdr->minorversion);
-	WRITE32(hdr->ident);
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+	p = xdr_encode_empty_array(p);		/* empty tag */
+	*p++ = cpu_to_be32(hdr->minorversion);
+	*p++ = cpu_to_be32(hdr->ident);
+
 	hdr->nops_p = p;
-	WRITE32(hdr->nops);
+	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
 }
 
+/*
+ * Update argarray element count
+ */
 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
 {
-	*hdr->nops_p = htonl(hdr->nops);
+	BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
+	*hdr->nops_p = cpu_to_be32(hdr->nops);
 }
 
-static void
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
-		struct nfs4_cb_compound_hdr *hdr)
+/*
+ * CB_COMPOUND4res
+ *
+ *	struct CB_COMPOUND4res {
+ *		nfsstat4	status;
+ *		utf8str_cs	tag;
+ *		nfs_cb_resop4	resarray<>;
+ *	};
+ */
+static int decode_cb_compound4res(struct xdr_stream *xdr,
+				  struct nfs4_cb_compound_hdr *hdr)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	hdr->status = be32_to_cpup(p++);
+	/* Ignore the tag */
+	length = be32_to_cpup(p++);
+	p = xdr_inline_decode(xdr, length + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	hdr->nops = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * CB_RECALL4args
+ *
+ *	struct CB_RECALL4args {
+ *		stateid4	stateid;
+ *		bool		truncate;
+ *		nfs_fh4		fh;
+ *	};
+ */
+static void encode_cb_recall4args(struct xdr_stream *xdr,
+				  const struct nfs4_delegation *dp,
+				  struct nfs4_cb_compound_hdr *hdr)
 {
 	__be32 *p;
-	int len = dp->dl_fh.fh_size;
 
-	RESERVE_SPACE(4);
-	WRITE32(OP_CB_RECALL);
-	encode_stateid(xdr, &dp->dl_stateid);
-	RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
-	WRITE32(0); /* truncate optimization not implemented */
-	WRITE32(len);
-	WRITEMEM(&dp->dl_fh.fh_base, len);
+	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
+	encode_stateid4(xdr, &dp->dl_stateid);
+
+	p = xdr_reserve_space(xdr, 4);
+	*p++ = xdr_zero;			/* truncate */
+
+	encode_nfs_fh4(xdr, &dp->dl_fh);
+
 	hdr->nops++;
 }
 
-static void
-encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
-		   struct nfs4_cb_compound_hdr *hdr)
+/*
+ * CB_SEQUENCE4args
+ *
+ *	struct CB_SEQUENCE4args {
+ *		sessionid4		csa_sessionid;
+ *		sequenceid4		csa_sequenceid;
+ *		slotid4			csa_slotid;
+ *		slotid4			csa_highest_slotid;
+ *		bool			csa_cachethis;
+ *		referring_call_list4	csa_referring_call_lists<>;
+ *	};
+ */
+static void encode_cb_sequence4args(struct xdr_stream *xdr,
+				    const struct nfsd4_callback *cb,
+				    struct nfs4_cb_compound_hdr *hdr)
 {
+	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
 	__be32 *p;
-	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
 
 	if (hdr->minorversion == 0)
 		return;
 
-	RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
+	encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
+	encode_sessionid4(xdr, session);
 
-	WRITE32(OP_CB_SEQUENCE);
-	WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
-	WRITE32(ses->se_cb_seq_nr);
-	WRITE32(0);		/* slotid, always 0 */
-	WRITE32(0);		/* highest slotid always 0 */
-	WRITE32(0);		/* cachethis always 0 */
-	WRITE32(0); /* FIXME: support referring_call_lists */
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
+	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
+	*p++ = xdr_zero;			/* csa_slotid */
+	*p++ = xdr_zero;			/* csa_highest_slotid */
+	*p++ = xdr_zero;			/* csa_cachethis */
+	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
+
 	hdr->nops++;
 }
 
-static int
-nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
-{
-	struct xdr_stream xdrs, *xdr = &xdrs;
-
-	xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
-        RESERVE_SPACE(0);
-	return 0;
-}
-
-static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
-		struct nfsd4_callback *cb)
-{
-	struct xdr_stream xdr;
-	struct nfs4_delegation *args = cb->cb_op;
-	struct nfs4_cb_compound_hdr hdr = {
-		.ident = cb->cb_clp->cl_cb_ident,
-		.minorversion = cb->cb_minorversion,
-	};
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_cb_compound_hdr(&xdr, &hdr);
-	encode_cb_sequence(&xdr, cb, &hdr);
-	encode_cb_recall(&xdr, args, &hdr);
-	encode_cb_nops(&hdr);
-	return 0;
-}
-
-
-static int
-decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
-        __be32 *p;
-	u32 taglen;
-
-        READ_BUF(8);
-        READ32(hdr->status);
-	/* We've got no use for the tag; ignore it: */
-        READ32(taglen);
-        READ_BUF(taglen + 4);
-        p += XDR_QUADLEN(taglen);
-        READ32(hdr->nops);
-        return 0;
-}
-
-static int
-decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
-{
-	__be32 *p;
-	u32 op;
-	int32_t nfserr;
-
-	READ_BUF(8);
-	READ32(op);
-	if (op != expected) {
-		dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
-		         " operation %d but we issued a request for %d\n",
-		         op, expected);
-		return -EIO;
-	}
-	READ32(nfserr);
-	if (nfserr != NFS_OK)
-		return -nfs_cb_stat_to_errno(nfserr);
-	return 0;
-}
-
 /*
+ * CB_SEQUENCE4resok
+ *
+ *	struct CB_SEQUENCE4resok {
+ *		sessionid4	csr_sessionid;
+ *		sequenceid4	csr_sequenceid;
+ *		slotid4		csr_slotid;
+ *		slotid4		csr_highest_slotid;
+ *		slotid4		csr_target_highest_slotid;
+ *	};
+ *
+ *	union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
+ *	case NFS4_OK:
+ *		CB_SEQUENCE4resok	csr_resok4;
+ *	default:
+ *		void;
+ *	};
+ *
  * Our current back channel implmentation supports a single backchannel
  * with a single slot.
  */
-static int
-decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
-		   struct rpc_rqst *rqstp)
+static int decode_cb_sequence4resok(struct xdr_stream *xdr,
+				    struct nfsd4_callback *cb)
 {
-	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
+	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
 	struct nfs4_sessionid id;
 	int status;
-	u32 dummy;
 	__be32 *p;
+	u32 dummy;
 
-	if (cb->cb_minorversion == 0)
-		return 0;
-
-	status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
-	if (status)
-		return status;
+	status = -ESERVERFAULT;
 
 	/*
 	 * If the server returns different values for sessionID, slotID or
 	 * sequence number, the server is looney tunes.
 	 */
-	status = -ESERVERFAULT;
-
-	READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
+	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
 	memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
+	if (memcmp(id.data, session->se_sessionid.data,
+					NFS4_MAX_SESSIONID_LEN) != 0) {
+		dprintk("NFS: %s Invalid session id\n", __func__);
+		goto out;
+	}
 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
-	if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
-		dprintk("%s Invalid session id\n", __func__);
+
+	dummy = be32_to_cpup(p++);
+	if (dummy != session->se_cb_seq_nr) {
+		dprintk("NFS: %s Invalid sequence number\n", __func__);
 		goto out;
 	}
-	READ32(dummy);
-	if (dummy != ses->se_cb_seq_nr) {
-		dprintk("%s Invalid sequence number\n", __func__);
-		goto out;
-	}
-	READ32(dummy); 	/* slotid must be 0 */
+
+	dummy = be32_to_cpup(p++);
 	if (dummy != 0) {
-		dprintk("%s Invalid slotid\n", __func__);
+		dprintk("NFS: %s Invalid slotid\n", __func__);
 		goto out;
 	}
-	/* FIXME: process highest slotid and target highest slotid */
+
+	/*
+	 * FIXME: process highest slotid and target highest slotid
+	 */
 	status = 0;
 out:
 	return status;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int decode_cb_sequence4res(struct xdr_stream *xdr,
+				  struct nfsd4_callback *cb)
+{
+	enum nfsstat4 nfserr;
+	int status;
+
+	if (cb->cb_minorversion == 0)
+		return 0;
+
+	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
+	if (unlikely(status))
+		goto out;
+	if (unlikely(nfserr != NFS4_OK))
+		goto out_default;
+	status = decode_cb_sequence4resok(xdr, cb);
+out:
+	return status;
+out_default:
+	return nfs_cb_stat_to_errno(status);
+}
+
+/*
+ * NFSv4.0 and NFSv4.1 XDR encode functions
+ *
+ * NFSv4.0 callback argument types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+/*
+ * NB: Without this zero space reservation, callbacks over krb5p fail
+ */
+static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 void *__unused)
+{
+	xdr_reserve_space(xdr, 0);
+}
+
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
+				   const struct nfsd4_callback *cb)
+{
+	const struct nfs4_delegation *args = cb->cb_op;
+	struct nfs4_cb_compound_hdr hdr = {
+		.ident = cb->cb_clp->cl_cb_ident,
+		.minorversion = cb->cb_minorversion,
+	};
+
+	encode_cb_compound4args(xdr, &hdr);
+	encode_cb_sequence4args(xdr, cb, &hdr);
+	encode_cb_recall4args(xdr, args, &hdr);
+	encode_cb_nops(&hdr);
 }
 
 
-static int
-nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
+/*
+ * NFSv4.0 and NFSv4.1 XDR decode functions
+ *
+ * NFSv4.0 callback result types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+				void *__unused)
 {
 	return 0;
 }
 
-static int
-nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
-		struct nfsd4_callback *cb)
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
+				  struct xdr_stream *xdr,
+				  struct nfsd4_callback *cb)
 {
-	struct xdr_stream xdr;
 	struct nfs4_cb_compound_hdr hdr;
+	enum nfsstat4 nfserr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_cb_compound_hdr(&xdr, &hdr);
-	if (status)
+	status = decode_cb_compound4res(xdr, &hdr);
+	if (unlikely(status))
 		goto out;
-	if (cb) {
-		status = decode_cb_sequence(&xdr, cb, rqstp);
-		if (status)
+
+	if (cb != NULL) {
+		status = decode_cb_sequence4res(xdr, cb);
+		if (unlikely(status))
 			goto out;
 	}
-	status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
+
+	status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
+	if (unlikely(status))
+		goto out;
+	if (unlikely(nfserr != NFS4_OK))
+		goto out_default;
 out:
 	return status;
+out_default:
+	return nfs_cb_stat_to_errno(status);
 }
 
 /*
  * RPC procedure tables
  */
-#define PROC(proc, call, argtype, restype)                              \
-[NFSPROC4_CLNT_##proc] = {                                      	\
-        .p_proc   = NFSPROC4_CB_##call,					\
-        .p_encode = (kxdrproc_t) nfs4_xdr_##argtype,                    \
-        .p_decode = (kxdrproc_t) nfs4_xdr_##restype,                    \
-        .p_arglen = NFS4_##argtype##_sz,                                \
-        .p_replen = NFS4_##restype##_sz,                                \
-        .p_statidx = NFSPROC4_CB_##call,				\
-	.p_name   = #proc,                                              \
+#define PROC(proc, call, argtype, restype)				\
+[NFSPROC4_CLNT_##proc] = {						\
+	.p_proc    = NFSPROC4_CB_##call,				\
+	.p_encode  = (kxdreproc_t)nfs4_xdr_enc_##argtype,		\
+	.p_decode  = (kxdrdproc_t)nfs4_xdr_dec_##restype,		\
+	.p_arglen  = NFS4_enc_##argtype##_sz,				\
+	.p_replen  = NFS4_dec_##restype##_sz,				\
+	.p_statidx = NFSPROC4_CB_##call,				\
+	.p_name    = #proc,						\
 }
 
-static struct rpc_procinfo     nfs4_cb_procedures[] = {
-    PROC(CB_NULL,      NULL,     enc_cb_null,     dec_cb_null),
-    PROC(CB_RECALL,    COMPOUND,   enc_cb_recall,      dec_cb_recall),
+static struct rpc_procinfo nfs4_cb_procedures[] = {
+	PROC(CB_NULL,	NULL,		cb_null,	cb_null),
+	PROC(CB_RECALL,	COMPOUND,	cb_recall,	cb_recall),
 };
 
-static struct rpc_version       nfs_cb_version4 = {
+static struct rpc_version nfs_cb_version4 = {
 /*
  * Note on the callback rpc program version number: despite language in rfc
  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
@@ -440,29 +598,29 @@
  * in practice that appears to be what implementations use.  The section
  * 18.36.3 language is expected to be fixed in an erratum.
  */
-        .number                 = 1,
-        .nrprocs                = ARRAY_SIZE(nfs4_cb_procedures),
-        .procs                  = nfs4_cb_procedures
+	.number			= 1,
+	.nrprocs		= ARRAY_SIZE(nfs4_cb_procedures),
+	.procs			= nfs4_cb_procedures
 };
 
-static struct rpc_version *	nfs_cb_version[] = {
+static struct rpc_version *nfs_cb_version[] = {
 	&nfs_cb_version4,
 };
 
 static struct rpc_program cb_program;
 
 static struct rpc_stat cb_stats = {
-		.program	= &cb_program
+	.program		= &cb_program
 };
 
 #define NFS4_CALLBACK 0x40000000
 static struct rpc_program cb_program = {
-		.name 		= "nfs4_cb",
-		.number		= NFS4_CALLBACK,
-		.nrvers		= ARRAY_SIZE(nfs_cb_version),
-		.version	= nfs_cb_version,
-		.stats		= &cb_stats,
-		.pipe_dir_name  = "/nfsd4_cb",
+	.name			= "nfs4_cb",
+	.number			= NFS4_CALLBACK,
+	.nrvers			= ARRAY_SIZE(nfs_cb_version),
+	.version		= nfs_cb_version,
+	.stats			= &cb_stats,
+	.pipe_dir_name		= "/nfsd4_cb",
 };
 
 static int max_cb_time(void)
@@ -470,10 +628,8 @@
 	return max(nfsd4_lease/10, (time_t)1) * HZ;
 }
 
-/* Reference counting, callback cleanup, etc., all look racy as heck.
- * And why is cl_cb_set an atomic? */
 
-int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
+static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
 	struct rpc_timeout	timeparms = {
 		.to_initval	= max_cb_time(),
@@ -483,6 +639,7 @@
 		.net		= &init_net,
 		.address	= (struct sockaddr *) &conn->cb_addr,
 		.addrsize	= conn->cb_addrlen,
+		.saddress	= (struct sockaddr *) &conn->cb_saddr,
 		.timeout	= &timeparms,
 		.program	= &cb_program,
 		.version	= 0,
@@ -499,6 +656,10 @@
 		args.protocol = XPRT_TRANSPORT_TCP;
 		clp->cl_cb_ident = conn->cb_ident;
 	} else {
+		if (!conn->cb_xprt)
+			return -EINVAL;
+		clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
+		clp->cl_cb_session = ses;
 		args.bc_xprt = conn->cb_xprt;
 		args.prognumber = clp->cl_cb_session->se_cb_prog;
 		args.protocol = XPRT_TRANSPORT_BC_TCP;
@@ -521,14 +682,20 @@
 		(int)clp->cl_name.len, clp->cl_name.data, reason);
 }
 
+static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
+{
+	clp->cl_cb_state = NFSD4_CB_DOWN;
+	warn_no_callback_path(clp, reason);
+}
+
 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
 
 	if (task->tk_status)
-		warn_no_callback_path(clp, task->tk_status);
+		nfsd4_mark_cb_down(clp, task->tk_status);
 	else
-		atomic_set(&clp->cl_cb_set, 1);
+		clp->cl_cb_state = NFSD4_CB_UP;
 }
 
 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
@@ -551,6 +718,11 @@
 
 static struct workqueue_struct *callback_wq;
 
+static void run_nfsd4_cb(struct nfsd4_callback *cb)
+{
+	queue_work(callback_wq, &cb->cb_work);
+}
+
 static void do_probe_callback(struct nfs4_client *clp)
 {
 	struct nfsd4_callback *cb = &clp->cl_cb_null;
@@ -565,7 +737,7 @@
 
 	cb->cb_ops = &nfsd4_cb_probe_ops;
 
-	queue_work(callback_wq, &cb->cb_work);
+	run_nfsd4_cb(cb);
 }
 
 /*
@@ -574,14 +746,21 @@
  */
 void nfsd4_probe_callback(struct nfs4_client *clp)
 {
+	/* XXX: atomicity?  Also, should we be using cl_cb_flags? */
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
 	do_probe_callback(clp);
 }
 
+void nfsd4_probe_callback_sync(struct nfs4_client *clp)
+{
+	nfsd4_probe_callback(clp);
+	flush_workqueue(callback_wq);
+}
+
 void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
 {
-	BUG_ON(atomic_read(&clp->cl_cb_set));
-
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	spin_lock(&clp->cl_lock);
 	memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
 	spin_unlock(&clp->cl_lock);
@@ -592,24 +771,14 @@
  * If the slot is available, then mark it busy.  Otherwise, set the
  * thread for sleeping on the callback RPC wait queue.
  */
-static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
-		struct rpc_task *task)
+static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
 {
-	u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
-	int status = 0;
-
-	dprintk("%s: %u:%u:%u:%u\n", __func__,
-		ptr[0], ptr[1], ptr[2], ptr[3]);
-
 	if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
 		rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
 		dprintk("%s slot is busy\n", __func__);
-		status = -EAGAIN;
-		goto out;
+		return false;
 	}
-out:
-	dprintk("%s status=%d\n", __func__, status);
-	return status;
+	return true;
 }
 
 /*
@@ -622,20 +791,19 @@
 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
 	struct nfs4_client *clp = dp->dl_client;
 	u32 minorversion = clp->cl_minorversion;
-	int status = 0;
 
 	cb->cb_minorversion = minorversion;
 	if (minorversion) {
-		status = nfsd41_cb_setup_sequence(clp, task);
-		if (status) {
-			if (status != -EAGAIN) {
-				/* terminate rpc task */
-				task->tk_status = status;
-				task->tk_action = NULL;
-			}
+		if (!nfsd41_cb_get_slot(clp, task))
 			return;
-		}
 	}
+	spin_lock(&clp->cl_lock);
+	if (list_empty(&cb->cb_per_client)) {
+		/* This is the first call, not a restart */
+		cb->cb_done = false;
+		list_add(&cb->cb_per_client, &clp->cl_callbacks);
+	}
+	spin_unlock(&clp->cl_lock);
 	rpc_call_start(task);
 }
 
@@ -671,15 +839,18 @@
 
 	nfsd4_cb_done(task, calldata);
 
-	if (current_rpc_client == NULL) {
-		/* We're shutting down; give up. */
-		/* XXX: err, or is it ok just to fall through
-		 * and rpc_restart_call? */
+	if (current_rpc_client != task->tk_client) {
+		/* We're shutting down or changing cl_cb_client; leave
+		 * it to nfsd4_process_cb_update to restart the call if
+		 * necessary. */
 		return;
 	}
 
+	if (cb->cb_done)
+		return;
 	switch (task->tk_status) {
 	case 0:
+		cb->cb_done = true;
 		return;
 	case -EBADHANDLE:
 	case -NFS4ERR_BAD_STATEID:
@@ -688,32 +859,30 @@
 		break;
 	default:
 		/* Network partition? */
-		atomic_set(&clp->cl_cb_set, 0);
-		warn_no_callback_path(clp, task->tk_status);
-		if (current_rpc_client != task->tk_client) {
-			/* queue a callback on the new connection: */
-			atomic_inc(&dp->dl_count);
-			nfsd4_cb_recall(dp);
-			return;
-		}
+		nfsd4_mark_cb_down(clp, task->tk_status);
 	}
 	if (dp->dl_retries--) {
 		rpc_delay(task, 2*HZ);
 		task->tk_status = 0;
 		rpc_restart_call_prepare(task);
 		return;
-	} else {
-		atomic_set(&clp->cl_cb_set, 0);
-		warn_no_callback_path(clp, task->tk_status);
 	}
+	nfsd4_mark_cb_down(clp, task->tk_status);
+	cb->cb_done = true;
 }
 
 static void nfsd4_cb_recall_release(void *calldata)
 {
 	struct nfsd4_callback *cb = calldata;
+	struct nfs4_client *clp = cb->cb_clp;
 	struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
 
-	nfs4_put_delegation(dp);
+	if (cb->cb_done) {
+		spin_lock(&clp->cl_lock);
+		list_del(&cb->cb_per_client);
+		spin_unlock(&clp->cl_lock);
+		nfs4_put_delegation(dp);
+	}
 }
 
 static const struct rpc_call_ops nfsd4_cb_recall_ops = {
@@ -748,16 +917,33 @@
 	flush_workqueue(callback_wq);
 }
 
-void nfsd4_release_cb(struct nfsd4_callback *cb)
+static void nfsd4_release_cb(struct nfsd4_callback *cb)
 {
 	if (cb->cb_ops->rpc_release)
 		cb->cb_ops->rpc_release(cb);
 }
 
-void nfsd4_process_cb_update(struct nfsd4_callback *cb)
+/* requires cl_lock: */
+static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
+{
+	struct nfsd4_session *s;
+	struct nfsd4_conn *c;
+
+	list_for_each_entry(s, &clp->cl_sessions, se_perclnt) {
+		list_for_each_entry(c, &s->se_conns, cn_persession) {
+			if (c->cn_flags & NFS4_CDFC4_BACK)
+				return c;
+		}
+	}
+	return NULL;
+}
+
+static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
 {
 	struct nfs4_cb_conn conn;
 	struct nfs4_client *clp = cb->cb_clp;
+	struct nfsd4_session *ses = NULL;
+	struct nfsd4_conn *c;
 	int err;
 
 	/*
@@ -768,6 +954,10 @@
 		rpc_shutdown_client(clp->cl_cb_client);
 		clp->cl_cb_client = NULL;
 	}
+	if (clp->cl_cb_conn.cb_xprt) {
+		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
+		clp->cl_cb_conn.cb_xprt = NULL;
+	}
 	if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
 		return;
 	spin_lock(&clp->cl_lock);
@@ -778,11 +968,22 @@
 	BUG_ON(!clp->cl_cb_flags);
 	clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
 	memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
+	c = __nfsd4_find_backchannel(clp);
+	if (c) {
+		svc_xprt_get(c->cn_xprt);
+		conn.cb_xprt = c->cn_xprt;
+		ses = c->cn_session;
+	}
 	spin_unlock(&clp->cl_lock);
 
-	err = setup_callback_client(clp, &conn);
-	if (err)
+	err = setup_callback_client(clp, &conn, ses);
+	if (err) {
 		warn_no_callback_path(clp, err);
+		return;
+	}
+	/* Yay, the callback channel's back! Restart any callbacks: */
+	list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
+		run_nfsd4_cb(cb);
 }
 
 void nfsd4_do_callback_rpc(struct work_struct *w)
@@ -807,10 +1008,11 @@
 void nfsd4_cb_recall(struct nfs4_delegation *dp)
 {
 	struct nfsd4_callback *cb = &dp->dl_recall;
+	struct nfs4_client *clp = dp->dl_client;
 
 	dp->dl_retries = 1;
 	cb->cb_op = dp;
-	cb->cb_clp = dp->dl_client;
+	cb->cb_clp = clp;
 	cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
 	cb->cb_msg.rpc_argp = cb;
 	cb->cb_msg.rpc_resp = cb;
@@ -819,5 +1021,8 @@
 	cb->cb_ops = &nfsd4_cb_recall_ops;
 	dp->dl_retries = 1;
 
-	queue_work(callback_wq, &dp->dl_recall.cb_work);
+	INIT_LIST_HEAD(&cb->cb_per_client);
+	cb->cb_done = true;
+
+	run_nfsd4_cb(&dp->dl_recall);
 }
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index f0695e8..6d2c397 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -33,10 +33,11 @@
  */
 
 #include <linux/module.h>
-#include <linux/nfsd_idmap.h>
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include "idmap.h"
+#include "nfsd.h"
 
 /*
  * Cache entry
@@ -514,7 +515,7 @@
 	return clp->name;
 }
 
-static int
+static __be32
 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
 		uid_t *id)
 {
@@ -524,15 +525,15 @@
 	int ret;
 
 	if (namelen + 1 > sizeof(key.name))
-		return -EINVAL;
+		return nfserr_badowner;
 	memcpy(key.name, name, namelen);
 	key.name[namelen] = '\0';
 	strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
 	ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
 	if (ret == -ENOENT)
-		ret = -ESRCH; /* nfserr_badname */
+		return nfserr_badowner;
 	if (ret)
-		return ret;
+		return nfserrno(ret);
 	*id = item->id;
 	cache_put(&item->h, &nametoid_cache);
 	return 0;
@@ -560,14 +561,14 @@
 	return ret;
 }
 
-int
+__be32
 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
 		__u32 *id)
 {
 	return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
 }
 
-int
+__be32
 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
 		__u32 *id)
 {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0cdfd02..db52546 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -604,9 +604,7 @@
 	return status;
 }
 
-static __be32
-nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
-	      void *arg)
+static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
 {
 	struct svc_fh tmp_fh;
 	__be32 ret;
@@ -615,13 +613,19 @@
 	ret = exp_pseudoroot(rqstp, &tmp_fh);
 	if (ret)
 		return ret;
-	if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
+	if (tmp_fh.fh_dentry == fh->fh_dentry) {
 		fh_put(&tmp_fh);
 		return nfserr_noent;
 	}
 	fh_put(&tmp_fh);
-	return nfsd_lookup(rqstp, &cstate->current_fh,
-			   "..", 2, &cstate->current_fh);
+	return nfsd_lookup(rqstp, fh, "..", 2, fh);
+}
+
+static __be32
+nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+	      void *arg)
+{
+	return nfsd4_do_lookupp(rqstp, &cstate->current_fh);
 }
 
 static __be32
@@ -769,10 +773,36 @@
 	} else
 		secinfo->si_exp = exp;
 	dput(dentry);
+	if (cstate->minorversion)
+		/* See rfc 5661 section 2.6.3.1.1.8 */
+		fh_put(&cstate->current_fh);
 	return err;
 }
 
 static __be32
+nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+	      struct nfsd4_secinfo_no_name *sin)
+{
+	__be32 err;
+
+	switch (sin->sin_style) {
+	case NFS4_SECINFO_STYLE4_CURRENT_FH:
+		break;
+	case NFS4_SECINFO_STYLE4_PARENT:
+		err = nfsd4_do_lookupp(rqstp, &cstate->current_fh);
+		if (err)
+			return err;
+		break;
+	default:
+		return nfserr_inval;
+	}
+	exp_get(cstate->current_fh.fh_export);
+	sin->sin_exp = cstate->current_fh.fh_export;
+	fh_put(&cstate->current_fh);
+	return nfs_ok;
+}
+
+static __be32
 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	      struct nfsd4_setattr *setattr)
 {
@@ -974,8 +1004,8 @@
  * Also note, enforced elsewhere:
  *	- SEQUENCE other than as first op results in
  *	  NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().)
- *	- BIND_CONN_TO_SESSION must be the only op in its compound
- *	  (Will be enforced in nfsd4_bind_conn_to_session().)
+ *	- BIND_CONN_TO_SESSION must be the only op in its compound.
+ *	  (Enforced in nfsd4_bind_conn_to_session().)
  *	- DESTROY_SESSION must be the final operation in a compound, if
  *	  sessionid's in SEQUENCE and DESTROY_SESSION are the same.
  *	  (Enforced in nfsd4_destroy_session().)
@@ -1126,10 +1156,6 @@
 
 		nfsd4_increment_op_stats(op->opnum);
 	}
-	if (!rqstp->rq_usedeferral && status == nfserr_dropit) {
-		dprintk("%s Dropit - send NFS4ERR_DELAY\n", __func__);
-		status = nfserr_jukebox;
-	}
 
 	resp->cstate.status = status;
 	fh_put(&resp->cstate.current_fh);
@@ -1300,6 +1326,11 @@
 		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
 		.op_name = "OP_EXCHANGE_ID",
 	},
+	[OP_BIND_CONN_TO_SESSION] = {
+		.op_func = (nfsd4op_func)nfsd4_bind_conn_to_session,
+		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
+		.op_name = "OP_BIND_CONN_TO_SESSION",
+	},
 	[OP_CREATE_SESSION] = {
 		.op_func = (nfsd4op_func)nfsd4_create_session,
 		.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
@@ -1320,6 +1351,10 @@
 		.op_flags = ALLOWED_WITHOUT_FH,
 		.op_name = "OP_RECLAIM_COMPLETE",
 	},
+	[OP_SECINFO_NO_NAME] = {
+		.op_func = (nfsd4op_func)nfsd4_secinfo_no_name,
+		.op_name = "OP_SECINFO_NO_NAME",
+	},
 };
 
 static const char *nfsd4_op_name(unsigned opnum)
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 7e26caa..ffb59ef 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -302,7 +302,6 @@
 {
 	int status;
 
-	/* note: we currently use this path only for minorversion 0 */
 	if (nfs4_has_reclaimed_state(child->d_name.name, false))
 		return 0;
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 116cab9..d98d021 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -230,7 +230,8 @@
 	dp->dl_client = clp;
 	get_nfs4_file(fp);
 	dp->dl_file = fp;
-	nfs4_file_get_access(fp, O_RDONLY);
+	dp->dl_vfs_file = find_readable_file(fp);
+	get_file(dp->dl_vfs_file);
 	dp->dl_flock = NULL;
 	dp->dl_type = type;
 	dp->dl_stateid.si_boot = boot_time;
@@ -252,6 +253,7 @@
 	if (atomic_dec_and_test(&dp->dl_count)) {
 		dprintk("NFSD: freeing dp %p\n",dp);
 		put_nfs4_file(dp->dl_file);
+		fput(dp->dl_vfs_file);
 		kmem_cache_free(deleg_slab, dp);
 		num_delegations--;
 	}
@@ -265,12 +267,10 @@
 static void
 nfs4_close_delegation(struct nfs4_delegation *dp)
 {
-	struct file *filp = find_readable_file(dp->dl_file);
-
 	dprintk("NFSD: close_delegation dp %p\n",dp);
+	/* XXX: do we even need this check?: */
 	if (dp->dl_flock)
-		vfs_setlease(filp, F_UNLCK, &dp->dl_flock);
-	nfs4_file_put_access(dp->dl_file, O_RDONLY);
+		vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock);
 }
 
 /* Called under the state lock. */
@@ -642,6 +642,7 @@
 		free_conn(c);
 	}
 	spin_unlock(&clp->cl_lock);
+	nfsd4_probe_callback(clp);
 }
 
 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
@@ -679,15 +680,12 @@
 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
 }
 
-static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
+static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
 {
 	struct nfsd4_conn *conn;
-	u32 flags = NFS4_CDFC4_FORE;
 	int ret;
 
-	if (ses->se_flags & SESSION4_BACK_CHAN)
-		flags |= NFS4_CDFC4_BACK;
-	conn = alloc_conn(rqstp, flags);
+	conn = alloc_conn(rqstp, dir);
 	if (!conn)
 		return nfserr_jukebox;
 	nfsd4_hash_conn(conn, ses);
@@ -698,6 +696,17 @@
 	return nfs_ok;
 }
 
+static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
+{
+	u32 dir = NFS4_CDFC4_FORE;
+
+	if (ses->se_flags & SESSION4_BACK_CHAN)
+		dir |= NFS4_CDFC4_BACK;
+
+	return nfsd4_new_conn(rqstp, ses, dir);
+}
+
+/* must be called under client_lock */
 static void nfsd4_del_conns(struct nfsd4_session *s)
 {
 	struct nfs4_client *clp = s->se_client;
@@ -749,6 +758,8 @@
 	 */
 	slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
 	numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
+	if (numslots < 1)
+		return NULL;
 
 	new = alloc_session(slotsize, numslots);
 	if (!new) {
@@ -769,25 +780,30 @@
 	idx = hash_sessionid(&new->se_sessionid);
 	spin_lock(&client_lock);
 	list_add(&new->se_hash, &sessionid_hashtbl[idx]);
+	spin_lock(&clp->cl_lock);
 	list_add(&new->se_perclnt, &clp->cl_sessions);
+	spin_unlock(&clp->cl_lock);
 	spin_unlock(&client_lock);
 
-	status = nfsd4_new_conn(rqstp, new);
+	status = nfsd4_new_conn_from_crses(rqstp, new);
 	/* whoops: benny points out, status is ignored! (err, or bogus) */
 	if (status) {
 		free_session(&new->se_ref);
 		return NULL;
 	}
-	if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) {
+	if (cses->flags & SESSION4_BACK_CHAN) {
 		struct sockaddr *sa = svc_addr(rqstp);
-
-		clp->cl_cb_session = new;
-		clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
-		svc_xprt_get(rqstp->rq_xprt);
+		/*
+		 * This is a little silly; with sessions there's no real
+		 * use for the callback address.  Use the peer address
+		 * as a reasonable default for now, but consider fixing
+		 * the rpc client not to require an address in the
+		 * future:
+		 */
 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
-		nfsd4_probe_callback(clp);
 	}
+	nfsd4_probe_callback(clp);
 	return new;
 }
 
@@ -817,7 +833,9 @@
 unhash_session(struct nfsd4_session *ses)
 {
 	list_del(&ses->se_hash);
+	spin_lock(&ses->se_client->cl_lock);
 	list_del(&ses->se_perclnt);
+	spin_unlock(&ses->se_client->cl_lock);
 }
 
 /* must be called under the client_lock */
@@ -923,8 +941,10 @@
 
 	mark_client_expired(clp);
 	list_del(&clp->cl_lru);
+	spin_lock(&clp->cl_lock);
 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
 		list_del_init(&ses->se_hash);
+	spin_unlock(&clp->cl_lock);
 }
 
 static void
@@ -1051,12 +1071,13 @@
 
 	memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
 	atomic_set(&clp->cl_refcount, 0);
-	atomic_set(&clp->cl_cb_set, 0);
+	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
 	INIT_LIST_HEAD(&clp->cl_idhash);
 	INIT_LIST_HEAD(&clp->cl_strhash);
 	INIT_LIST_HEAD(&clp->cl_openowners);
 	INIT_LIST_HEAD(&clp->cl_delegations);
 	INIT_LIST_HEAD(&clp->cl_lru);
+	INIT_LIST_HEAD(&clp->cl_callbacks);
 	spin_lock_init(&clp->cl_lock);
 	INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
 	clp->cl_time = get_seconds();
@@ -1132,54 +1153,55 @@
 	return NULL;
 }
 
-/*
- * Return 1 iff clp's clientid establishment method matches the use_exchange_id
- * parameter. Matching is based on the fact the at least one of the
- * EXCHGID4_FLAG_USE_{NON_PNFS,PNFS_MDS,PNFS_DS} flags must be set for v4.1
- *
- * FIXME: we need to unify the clientid namespaces for nfsv4.x
- * and correctly deal with client upgrade/downgrade in EXCHANGE_ID
- * and SET_CLIENTID{,_CONFIRM}
- */
-static inline int
-match_clientid_establishment(struct nfs4_client *clp, bool use_exchange_id)
+static bool clp_used_exchangeid(struct nfs4_client *clp)
 {
-	bool has_exchange_flags = (clp->cl_exchange_flags != 0);
-	return use_exchange_id == has_exchange_flags;
-}
+	return clp->cl_exchange_flags != 0;
+} 
 
 static struct nfs4_client *
-find_confirmed_client_by_str(const char *dname, unsigned int hashval,
-			     bool use_exchange_id)
+find_confirmed_client_by_str(const char *dname, unsigned int hashval)
 {
 	struct nfs4_client *clp;
 
 	list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
-		if (same_name(clp->cl_recdir, dname) &&
-		    match_clientid_establishment(clp, use_exchange_id))
+		if (same_name(clp->cl_recdir, dname))
 			return clp;
 	}
 	return NULL;
 }
 
 static struct nfs4_client *
-find_unconfirmed_client_by_str(const char *dname, unsigned int hashval,
-			       bool use_exchange_id)
+find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
 {
 	struct nfs4_client *clp;
 
 	list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
-		if (same_name(clp->cl_recdir, dname) &&
-		    match_clientid_establishment(clp, use_exchange_id))
+		if (same_name(clp->cl_recdir, dname))
 			return clp;
 	}
 	return NULL;
 }
 
+static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr)
+{
+	switch (family) {
+	case AF_INET:
+		((struct sockaddr_in *)sa)->sin_family = AF_INET;
+		((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr;
+		return;
+	case AF_INET6:
+		((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6;
+		((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6;
+		return;
+	}
+}
+
 static void
-gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
+gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
 {
 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
+	struct sockaddr	*sa = svc_addr(rqstp);
+	u32 scopeid = rpc_get_scope_id(sa);
 	unsigned short expected_family;
 
 	/* Currently, we only support tcp and tcp6 for the callback channel */
@@ -1205,6 +1227,7 @@
 
 	conn->cb_prog = se->se_callback_prog;
 	conn->cb_ident = se->se_callback_ident;
+	rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr);
 	return;
 out_err:
 	conn->cb_addr.ss_family = AF_UNSPEC;
@@ -1344,7 +1367,7 @@
 	case SP4_NONE:
 		break;
 	case SP4_SSV:
-		return nfserr_encr_alg_unsupp;
+		return nfserr_serverfault;
 	default:
 		BUG();				/* checked by xdr code */
 	case SP4_MACH_CRED:
@@ -1361,8 +1384,12 @@
 	nfs4_lock_state();
 	status = nfs_ok;
 
-	conf = find_confirmed_client_by_str(dname, strhashval, true);
+	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
+		if (!clp_used_exchangeid(conf)) {
+			status = nfserr_clid_inuse; /* XXX: ? */
+			goto out;
+		}
 		if (!same_verf(&verf, &conf->cl_verifier)) {
 			/* 18.35.4 case 8 */
 			if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
@@ -1403,7 +1430,7 @@
 		goto out;
 	}
 
-	unconf  = find_unconfirmed_client_by_str(dname, strhashval, true);
+	unconf  = find_unconfirmed_client_by_str(dname, strhashval);
 	if (unconf) {
 		/*
 		 * Possible retry or client restart.  Per 18.35.4 case 4,
@@ -1560,6 +1587,8 @@
 	status = nfs_ok;
 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
 	       NFS4_MAX_SESSIONID_LEN);
+	memcpy(&cr_ses->fore_channel, &new->se_fchannel,
+		sizeof(struct nfsd4_channel_attrs));
 	cs_slot->sl_seqid++;
 	cr_ses->seqid = cs_slot->sl_seqid;
 
@@ -1581,6 +1610,45 @@
 	return argp->opcnt == resp->opcnt;
 }
 
+static __be32 nfsd4_map_bcts_dir(u32 *dir)
+{
+	switch (*dir) {
+	case NFS4_CDFC4_FORE:
+	case NFS4_CDFC4_BACK:
+		return nfs_ok;
+	case NFS4_CDFC4_FORE_OR_BOTH:
+	case NFS4_CDFC4_BACK_OR_BOTH:
+		*dir = NFS4_CDFC4_BOTH;
+		return nfs_ok;
+	};
+	return nfserr_inval;
+}
+
+__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
+		     struct nfsd4_compound_state *cstate,
+		     struct nfsd4_bind_conn_to_session *bcts)
+{
+	__be32 status;
+
+	if (!nfsd4_last_compound_op(rqstp))
+		return nfserr_not_only_op;
+	spin_lock(&client_lock);
+	cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
+	/* Sorta weird: we only need the refcnt'ing because new_conn acquires
+	 * client_lock iself: */
+	if (cstate->session) {
+		nfsd4_get_session(cstate->session);
+		atomic_inc(&cstate->session->se_client->cl_refcount);
+	}
+	spin_unlock(&client_lock);
+	if (!cstate->session)
+		return nfserr_badsession;
+
+	status = nfsd4_map_bcts_dir(&bcts->dir);
+	nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
+	return nfs_ok;
+}
+
 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
 {
 	if (!session)
@@ -1619,8 +1687,7 @@
 	spin_unlock(&client_lock);
 
 	nfs4_lock_state();
-	/* wait for callbacks */
-	nfsd4_shutdown_callback(ses->se_client);
+	nfsd4_probe_callback_sync(ses->se_client);
 	nfs4_unlock_state();
 
 	nfsd4_del_conns(ses);
@@ -1733,8 +1800,12 @@
 out:
 	/* Hold a session reference until done processing the compound. */
 	if (cstate->session) {
+		struct nfs4_client *clp = session->se_client;
+
 		nfsd4_get_session(cstate->session);
-		atomic_inc(&session->se_client->cl_refcount);
+		atomic_inc(&clp->cl_refcount);
+		if (clp->cl_cb_state == NFSD4_CB_DOWN)
+			seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
 	}
 	kfree(conn);
 	spin_unlock(&client_lock);
@@ -1775,7 +1846,6 @@
 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 		  struct nfsd4_setclientid *setclid)
 {
-	struct sockaddr		*sa = svc_addr(rqstp);
 	struct xdr_netobj 	clname = { 
 		.len = setclid->se_namelen,
 		.data = setclid->se_name,
@@ -1801,10 +1871,12 @@
 	strhashval = clientstr_hashval(dname);
 
 	nfs4_lock_state();
-	conf = find_confirmed_client_by_str(dname, strhashval, false);
+	conf = find_confirmed_client_by_str(dname, strhashval);
 	if (conf) {
 		/* RFC 3530 14.2.33 CASE 0: */
 		status = nfserr_clid_inuse;
+		if (clp_used_exchangeid(conf))
+			goto out;
 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
 			char addr_str[INET6_ADDRSTRLEN];
 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
@@ -1819,7 +1891,7 @@
 	 * has a description of SETCLIENTID request processing consisting
 	 * of 5 bullet points, labeled as CASE0 - CASE4 below.
 	 */
-	unconf = find_unconfirmed_client_by_str(dname, strhashval, false);
+	unconf = find_unconfirmed_client_by_str(dname, strhashval);
 	status = nfserr_resource;
 	if (!conf) {
 		/*
@@ -1876,7 +1948,7 @@
 	 * for consistent minorversion use throughout:
 	 */
 	new->cl_minorversion = 0;
-	gen_callback(new, setclid, rpc_get_scope_id(sa));
+	gen_callback(new, setclid, rqstp);
 	add_to_unconfirmed(new, strhashval);
 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
@@ -1935,7 +2007,6 @@
 		if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
 			status = nfserr_clid_inuse;
 		else {
-			atomic_set(&conf->cl_cb_set, 0);
 			nfsd4_change_callback(conf, &unconf->cl_cb_conn);
 			nfsd4_probe_callback(conf);
 			expire_client(unconf);
@@ -1964,7 +2035,7 @@
 			unsigned int hash =
 				clientstr_hashval(unconf->cl_recdir);
 			conf = find_confirmed_client_by_str(unconf->cl_recdir,
-							    hash, false);
+							    hash);
 			if (conf) {
 				nfsd4_remove_clid_dir(conf);
 				expire_client(conf);
@@ -2300,41 +2371,6 @@
 	nfsd4_cb_recall(dp);
 }
 
-/*
- * The file_lock is being reapd.
- *
- * Called by locks_free_lock() with lock_flocks() held.
- */
-static
-void nfsd_release_deleg_cb(struct file_lock *fl)
-{
-	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
-
-	dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count));
-
-	if (!(fl->fl_flags & FL_LEASE) || !dp)
-		return;
-	dp->dl_flock = NULL;
-}
-
-/*
- * Called from setlease() with lock_flocks() held
- */
-static
-int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
-{
-	struct nfs4_delegation *onlistd =
-		(struct nfs4_delegation *)onlist->fl_owner;
-	struct nfs4_delegation *tryd =
-		(struct nfs4_delegation *)try->fl_owner;
-
-	if (onlist->fl_lmops != try->fl_lmops)
-		return 0;
-
-	return onlistd->dl_client == tryd->dl_client;
-}
-
-
 static
 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
 {
@@ -2346,8 +2382,6 @@
 
 static const struct lock_manager_operations nfsd_lease_mng_ops = {
 	.fl_break = nfsd_break_deleg_cb,
-	.fl_release_private = nfsd_release_deleg_cb,
-	.fl_mylease = nfsd_same_client_deleg_cb,
 	.fl_change = nfsd_change_deleg_cb,
 };
 
@@ -2514,8 +2548,6 @@
 	if (!fp->fi_fds[oflag]) {
 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
 			&fp->fi_fds[oflag]);
-		if (status == nfserr_dropit)
-			status = nfserr_jukebox;
 		if (status)
 			return status;
 	}
@@ -2596,6 +2628,19 @@
 	open->op_stateowner->so_client->cl_firststate = 1;
 }
 
+/* Should we give out recallable state?: */
+static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
+{
+	if (clp->cl_cb_state == NFSD4_CB_UP)
+		return true;
+	/*
+	 * In the sessions case, since we don't have to establish a
+	 * separate connection for callbacks, we assume it's OK
+	 * until we hear otherwise:
+	 */
+	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
+}
+
 /*
  * Attempt to hand out a delegation.
  */
@@ -2604,10 +2649,11 @@
 {
 	struct nfs4_delegation *dp;
 	struct nfs4_stateowner *sop = stp->st_stateowner;
-	int cb_up = atomic_read(&sop->so_client->cl_cb_set);
+	int cb_up;
 	struct file_lock *fl;
 	int status, flag = 0;
 
+	cb_up = nfsd4_cb_channel_good(sop->so_client);
 	flag = NFS4_OPEN_DELEGATE_NONE;
 	open->op_recall = 0;
 	switch (open->op_claim_type) {
@@ -2655,7 +2701,7 @@
 	dp->dl_flock = fl;
 
 	/* vfs_setlease checks to see if delegation should be handed out.
-	 * the lock_manager callbacks fl_mylease and fl_change are used
+	 * the lock_manager callback fl_change is used
 	 */
 	if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
 		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
@@ -2794,7 +2840,7 @@
 	renew_client(clp);
 	status = nfserr_cb_path_down;
 	if (!list_empty(&clp->cl_delegations)
-			&& !atomic_read(&clp->cl_cb_set))
+			&& clp->cl_cb_state != NFSD4_CB_UP)
 		goto out;
 	status = nfs_ok;
 out:
@@ -3081,9 +3127,10 @@
 		if (status)
 			goto out;
 		renew_client(dp->dl_client);
-		if (filpp)
+		if (filpp) {
 			*filpp = find_readable_file(dp->dl_file);
-		BUG_ON(!*filpp);
+			BUG_ON(!*filpp);
+		}
 	} else { /* open or lock stateid */
 		stp = find_stateid(stateid, flags);
 		if (!stp)
@@ -4107,7 +4154,7 @@
 	unsigned int strhashval = clientstr_hashval(name);
 	struct nfs4_client *clp;
 
-	clp = find_confirmed_client_by_str(name, strhashval, use_exchange_id);
+	clp = find_confirmed_client_by_str(name, strhashval);
 	return clp ? 1 : 0;
 }
 
@@ -4336,7 +4383,7 @@
 void
 nfs4_state_shutdown(void)
 {
-	cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work);
+	cancel_delayed_work_sync(&laundromat_work);
 	destroy_workqueue(laundry_wq);
 	locks_end_grace(&nfsd4_manager);
 	nfs4_lock_state();
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index f35a94a..956629b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -44,13 +44,14 @@
 #include <linux/namei.h>
 #include <linux/statfs.h>
 #include <linux/utsname.h>
-#include <linux/nfsd_idmap.h>
-#include <linux/nfs4_acl.h>
 #include <linux/sunrpc/svcauth_gss.h>
 
+#include "idmap.h"
+#include "acl.h"
 #include "xdr4.h"
 #include "vfs.h"
 
+
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
 /*
@@ -288,17 +289,17 @@
 			len += XDR_QUADLEN(dummy32) << 2;
 			READMEM(buf, dummy32);
 			ace->whotype = nfs4_acl_get_whotype(buf, dummy32);
-			host_err = 0;
+			status = nfs_ok;
 			if (ace->whotype != NFS4_ACL_WHO_NAMED)
 				ace->who = 0;
 			else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
-				host_err = nfsd_map_name_to_gid(argp->rqstp,
+				status = nfsd_map_name_to_gid(argp->rqstp,
 						buf, dummy32, &ace->who);
 			else
-				host_err = nfsd_map_name_to_uid(argp->rqstp,
+				status = nfsd_map_name_to_uid(argp->rqstp,
 						buf, dummy32, &ace->who);
-			if (host_err)
-				goto out_nfserr;
+			if (status)
+				return status;
 		}
 	} else
 		*acl = NULL;
@@ -420,6 +421,21 @@
 	DECODE_TAIL;
 }
 
+static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts)
+{
+	DECODE_HEAD;
+	u32 dummy;
+
+	READ_BUF(NFS4_MAX_SESSIONID_LEN + 8);
+	COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
+	READ32(bcts->dir);
+	/* XXX: Perhaps Tom Tucker could help us figure out how we
+	 * should be using ctsa_use_conn_in_rdma_mode: */
+	READ32(dummy);
+
+	DECODE_TAIL;
+}
+
 static __be32
 nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close)
 {
@@ -847,6 +863,17 @@
 }
 
 static __be32
+nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp,
+		     struct nfsd4_secinfo_no_name *sin)
+{
+	DECODE_HEAD;
+
+	READ_BUF(4);
+	READ32(sin->sin_style);
+	DECODE_TAIL;
+}
+
+static __be32
 nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
 {
 	__be32 status;
@@ -1005,7 +1032,7 @@
 nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
 			 struct nfsd4_exchange_id *exid)
 {
-	int dummy;
+	int dummy, tmp;
 	DECODE_HEAD;
 
 	READ_BUF(NFS4_VERIFIER_SIZE);
@@ -1053,15 +1080,23 @@
 
 		/* ssp_hash_algs<> */
 		READ_BUF(4);
-		READ32(dummy);
-		READ_BUF(dummy);
-		p += XDR_QUADLEN(dummy);
+		READ32(tmp);
+		while (tmp--) {
+			READ_BUF(4);
+			READ32(dummy);
+			READ_BUF(dummy);
+			p += XDR_QUADLEN(dummy);
+		}
 
 		/* ssp_encr_algs<> */
 		READ_BUF(4);
-		READ32(dummy);
-		READ_BUF(dummy);
-		p += XDR_QUADLEN(dummy);
+		READ32(tmp);
+		while (tmp--) {
+			READ_BUF(4);
+			READ32(dummy);
+			READ_BUF(dummy);
+			p += XDR_QUADLEN(dummy);
+		}
 
 		/* ssp_window and ssp_num_gss_handles */
 		READ_BUF(8);
@@ -1339,7 +1374,7 @@
 
 	/* new operations for NFSv4.1 */
 	[OP_BACKCHANNEL_CTL]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session,
 	[OP_EXCHANGE_ID]	= (nfsd4_dec)nfsd4_decode_exchange_id,
 	[OP_CREATE_SESSION]	= (nfsd4_dec)nfsd4_decode_create_session,
 	[OP_DESTROY_SESSION]	= (nfsd4_dec)nfsd4_decode_destroy_session,
@@ -1350,7 +1385,7 @@
 	[OP_LAYOUTCOMMIT]	= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_LAYOUTGET]		= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_LAYOUTRETURN]	= (nfsd4_dec)nfsd4_decode_notsupp,
-	[OP_SECINFO_NO_NAME]	= (nfsd4_dec)nfsd4_decode_notsupp,
+	[OP_SECINFO_NO_NAME]	= (nfsd4_dec)nfsd4_decode_secinfo_no_name,
 	[OP_SEQUENCE]		= (nfsd4_dec)nfsd4_decode_sequence,
 	[OP_SET_SSV]		= (nfsd4_dec)nfsd4_decode_notsupp,
 	[OP_TEST_STATEID]	= (nfsd4_dec)nfsd4_decode_notsupp,
@@ -2309,8 +2344,6 @@
 	case nfserr_resource:
 		nfserr = nfserr_toosmall;
 		goto fail;
-	case nfserr_dropit:
-		goto fail;
 	case nfserr_noent:
 		goto skip_entry;
 	default:
@@ -2365,6 +2398,21 @@
 	return nfserr;
 }
 
+static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts)
+{
+	__be32 *p;
+
+	if (!nfserr) {
+		RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8);
+		WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN);
+		WRITE32(bcts->dir);
+		/* XXX: ? */
+		WRITE32(0);
+		ADJUST_ARGS();
+	}
+	return nfserr;
+}
+
 static __be32
 nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close)
 {
@@ -2826,11 +2874,10 @@
 }
 
 static __be32
-nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
-		     struct nfsd4_secinfo *secinfo)
+nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
+			 __be32 nfserr,struct svc_export *exp)
 {
 	int i = 0;
-	struct svc_export *exp = secinfo->si_exp;
 	u32 nflavs;
 	struct exp_flavor_info *flavs;
 	struct exp_flavor_info def_flavs[2];
@@ -2892,6 +2939,20 @@
 	return nfserr;
 }
 
+static __be32
+nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
+		     struct nfsd4_secinfo *secinfo)
+{
+	return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->si_exp);
+}
+
+static __be32
+nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
+		     struct nfsd4_secinfo_no_name *secinfo)
+{
+	return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->sin_exp);
+}
+
 /*
  * The SETATTR encode routine is special -- it always encodes a bitmap,
  * regardless of the error status.
@@ -3076,13 +3137,9 @@
 	WRITE32(seq->seqid);
 	WRITE32(seq->slotid);
 	WRITE32(seq->maxslots);
-	/*
-	 * FIXME: for now:
-	 *   target_maxslots = maxslots
-	 *   status_flags = 0
-	 */
+	/* For now: target_maxslots = maxslots */
 	WRITE32(seq->maxslots);
-	WRITE32(0);
+	WRITE32(seq->status_flags);
 
 	ADJUST_ARGS();
 	resp->cstate.datap = p; /* DRC cache data pointer */
@@ -3143,7 +3200,7 @@
 
 	/* NFSv4.1 operations */
 	[OP_BACKCHANNEL_CTL]	= (nfsd4_enc)nfsd4_encode_noop,
-	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_noop,
+	[OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session,
 	[OP_EXCHANGE_ID]	= (nfsd4_enc)nfsd4_encode_exchange_id,
 	[OP_CREATE_SESSION]	= (nfsd4_enc)nfsd4_encode_create_session,
 	[OP_DESTROY_SESSION]	= (nfsd4_enc)nfsd4_encode_destroy_session,
@@ -3154,7 +3211,7 @@
 	[OP_LAYOUTCOMMIT]	= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_LAYOUTGET]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_LAYOUTRETURN]	= (nfsd4_enc)nfsd4_encode_noop,
-	[OP_SECINFO_NO_NAME]	= (nfsd4_enc)nfsd4_encode_noop,
+	[OP_SECINFO_NO_NAME]	= (nfsd4_enc)nfsd4_encode_secinfo_no_name,
 	[OP_SEQUENCE]		= (nfsd4_enc)nfsd4_encode_sequence,
 	[OP_SET_SSV]		= (nfsd4_enc)nfsd4_encode_noop,
 	[OP_TEST_STATEID]	= (nfsd4_enc)nfsd4_encode_noop,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 4514ebb..33b3e2b 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -8,12 +8,12 @@
 #include <linux/namei.h>
 #include <linux/ctype.h>
 
-#include <linux/nfsd_idmap.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/nfsd/syscall.h>
 #include <linux/lockd/lockd.h>
 #include <linux/sunrpc/clnt.h>
 
+#include "idmap.h"
 #include "nfsd.h"
 #include "cache.h"
 
@@ -127,6 +127,7 @@
 
 static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
 {
+#ifdef CONFIG_NFSD_DEPRECATED
 	static int warned;
 	if (file->f_dentry->d_name.name[0] == '.' && !warned) {
 		printk(KERN_INFO
@@ -135,6 +136,7 @@
 		       current->comm, file->f_dentry->d_name.name);
 		warned = 1;
 	}
+#endif
 	if (! file->private_data) {
 		/* An attempt to read a transaction file without writing
 		 * causes a 0-byte write so that the file can return
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 6b641cf..7ecfa24 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -158,6 +158,7 @@
 #define	nfserr_attrnotsupp	cpu_to_be32(NFSERR_ATTRNOTSUPP)
 #define	nfserr_bad_xdr		cpu_to_be32(NFSERR_BAD_XDR)
 #define	nfserr_openmode		cpu_to_be32(NFSERR_OPENMODE)
+#define	nfserr_badowner		cpu_to_be32(NFSERR_BADOWNER)
 #define	nfserr_locks_held	cpu_to_be32(NFSERR_LOCKS_HELD)
 #define	nfserr_op_illegal	cpu_to_be32(NFSERR_OP_ILLEGAL)
 #define	nfserr_grace		cpu_to_be32(NFSERR_GRACE)
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 08e1726..e15dc45 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -735,9 +735,9 @@
 		{ nfserr_stale, -ESTALE },
 		{ nfserr_jukebox, -ETIMEDOUT },
 		{ nfserr_jukebox, -ERESTARTSYS },
-		{ nfserr_dropit, -EAGAIN },
-		{ nfserr_dropit, -ENOMEM },
-		{ nfserr_badname, -ESRCH },
+		{ nfserr_jukebox, -EAGAIN },
+		{ nfserr_jukebox, -EWOULDBLOCK },
+		{ nfserr_jukebox, -ENOMEM },
 		{ nfserr_io, -ETXTBSY },
 		{ nfserr_notsupp, -EOPNOTSUPP },
 		{ nfserr_toosmall, -ETOOSMALL },
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 2bae1d8..18743c4 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -608,7 +608,7 @@
 	/* Now call the procedure handler, and encode NFS status. */
 	nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
 	nfserr = map_new_errors(rqstp->rq_vers, nfserr);
-	if (nfserr == nfserr_dropit) {
+	if (nfserr == nfserr_dropit || rqstp->rq_dropme) {
 		dprintk("nfsd: Dropping request; may be revisited later\n");
 		nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
 		return 0;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 39adc27..3074656 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -68,10 +68,12 @@
 struct nfsd4_callback {
 	void *cb_op;
 	struct nfs4_client *cb_clp;
+	struct list_head cb_per_client;
 	u32 cb_minorversion;
 	struct rpc_message cb_msg;
 	const struct rpc_call_ops *cb_ops;
 	struct work_struct cb_work;
+	bool cb_done;
 };
 
 struct nfs4_delegation {
@@ -81,6 +83,7 @@
 	atomic_t		dl_count;       /* ref count */
 	struct nfs4_client	*dl_client;
 	struct nfs4_file	*dl_file;
+	struct file		*dl_vfs_file;
 	struct file_lock	*dl_flock;
 	u32			dl_type;
 	time_t			dl_time;
@@ -95,6 +98,7 @@
 struct nfs4_cb_conn {
 	/* SETCLIENTID info */
 	struct sockaddr_storage	cb_addr;
+	struct sockaddr_storage	cb_saddr;
 	size_t			cb_addrlen;
 	u32                     cb_prog; /* used only in 4.0 case;
 					    per-session otherwise */
@@ -146,6 +150,11 @@
 	u32				gid;
 };
 
+struct nfsd4_bind_conn_to_session {
+	struct nfs4_sessionid		sessionid;
+	u32				dir;
+};
+
 /* The single slot clientid cache structure */
 struct nfsd4_clid_slot {
 	u32				sl_seqid;
@@ -235,9 +244,13 @@
 	unsigned long		cl_cb_flags;
 	struct rpc_clnt		*cl_cb_client;
 	u32			cl_cb_ident;
-	atomic_t		cl_cb_set;
+#define NFSD4_CB_UP		0
+#define NFSD4_CB_UNKNOWN	1
+#define NFSD4_CB_DOWN		2
+	int			cl_cb_state;
 	struct nfsd4_callback	cl_cb_null;
 	struct nfsd4_session	*cl_cb_session;
+	struct list_head	cl_callbacks; /* list of in-progress callbacks */
 
 	/* for all client information that callback code might need: */
 	spinlock_t		cl_lock;
@@ -454,6 +467,7 @@
 extern void nfs4_free_stateowner(struct kref *kref);
 extern int set_callback_cred(void);
 extern void nfsd4_probe_callback(struct nfs4_client *clp);
+extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
 extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
 extern void nfsd4_do_callback_rpc(struct work_struct *);
 extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 184938f..a3c7f70 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1,4 +1,3 @@
-#define MSNFS	/* HACK HACK */
 /*
  * File operations used by nfsd. Some of these have been ripped from
  * other parts of the kernel because they weren't exported, others
@@ -35,8 +34,8 @@
 #endif /* CONFIG_NFSD_V3 */
 
 #ifdef CONFIG_NFSD_V4
-#include <linux/nfs4_acl.h>
-#include <linux/nfsd_idmap.h>
+#include "acl.h"
+#include "idmap.h"
 #endif /* CONFIG_NFSD_V4 */
 
 #include "nfsd.h"
@@ -273,6 +272,13 @@
 	return err;
 }
 
+static int nfsd_break_lease(struct inode *inode)
+{
+	if (!S_ISREG(inode->i_mode))
+		return 0;
+	return break_lease(inode, O_WRONLY | O_NONBLOCK);
+}
+
 /*
  * Commit metadata changes to stable storage.
  */
@@ -375,16 +381,6 @@
 				goto out;
 		}
 
-		/*
-		 * If we are changing the size of the file, then
-		 * we need to break all leases.
-		 */
-		host_err = break_lease(inode, O_WRONLY | O_NONBLOCK);
-		if (host_err == -EWOULDBLOCK)
-			host_err = -ETIMEDOUT;
-		if (host_err) /* ENOMEM or EWOULDBLOCK */
-			goto out_nfserr;
-
 		host_err = get_write_access(inode);
 		if (host_err)
 			goto out_nfserr;
@@ -425,7 +421,11 @@
 
 	err = nfserr_notsync;
 	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+		host_err = nfsd_break_lease(inode);
+		if (host_err)
+			goto out_nfserr;
 		fh_lock(fhp);
+
 		host_err = notify_change(dentry, iap);
 		err = nfserrno(host_err);
 		fh_unlock(fhp);
@@ -752,8 +752,6 @@
 	 */
 	if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
 		host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
-	if (host_err == -EWOULDBLOCK)
-		host_err = -ETIMEDOUT;
 	if (host_err) /* NOMEM or WOULDBLOCK */
 		goto out_nfserr;
 
@@ -845,11 +843,6 @@
 	struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
 	struct page *page = buf->page;
 	size_t size;
-	int ret;
-
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
 
 	size = sd->len;
 
@@ -879,15 +872,6 @@
 	return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
 }
 
-static inline int svc_msnfs(struct svc_fh *ffhp)
-{
-#ifdef MSNFS
-	return (ffhp->fh_export->ex_flags & NFSEXP_MSNFS);
-#else
-	return 0;
-#endif
-}
-
 static __be32
 nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
               loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
@@ -900,9 +884,6 @@
 	err = nfserr_perm;
 	inode = file->f_path.dentry->d_inode;
 
-	if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count))
-		goto out;
-
 	if (file->f_op->splice_read && rqstp->rq_splice_ok) {
 		struct splice_desc sd = {
 			.len		= 0,
@@ -927,7 +908,6 @@
 		fsnotify_access(file);
 	} else 
 		err = nfserrno(host_err);
-out:
 	return err;
 }
 
@@ -992,14 +972,6 @@
 	int			stable = *stablep;
 	int			use_wgather;
 
-#ifdef MSNFS
-	err = nfserr_perm;
-
-	if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-		(!lock_may_write(file->f_path.dentry->d_inode, offset, *cnt)))
-		goto out;
-#endif
-
 	dentry = file->f_path.dentry;
 	inode = dentry->d_inode;
 	exp   = fhp->fh_export;
@@ -1050,7 +1022,6 @@
 		err = 0;
 	else
 		err = nfserrno(host_err);
-out:
 	return err;
 }
 
@@ -1670,6 +1641,12 @@
 		err = nfserrno(host_err);
 		goto out_dput;
 	}
+	err = nfserr_noent;
+	if (!dold->d_inode)
+		goto out_drop_write;
+	host_err = nfsd_break_lease(dold->d_inode);
+	if (host_err)
+		goto out_drop_write;
 	host_err = vfs_link(dold, dirp, dnew);
 	if (!host_err) {
 		err = nfserrno(commit_metadata(ffhp));
@@ -1681,6 +1658,7 @@
 		else
 			err = nfserrno(host_err);
 	}
+out_drop_write:
 	mnt_drop_write(tfhp->fh_export->ex_path.mnt);
 out_dput:
 	dput(dnew);
@@ -1755,13 +1733,6 @@
 	if (ndentry == trap)
 		goto out_dput_new;
 
-	if (svc_msnfs(ffhp) &&
-		((atomic_read(&odentry->d_count) > 1)
-		 || (atomic_read(&ndentry->d_count) > 1))) {
-			host_err = -EPERM;
-			goto out_dput_new;
-	}
-
 	host_err = -EXDEV;
 	if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
 		goto out_dput_new;
@@ -1769,15 +1740,17 @@
 	if (host_err)
 		goto out_dput_new;
 
+	host_err = nfsd_break_lease(odentry->d_inode);
+	if (host_err)
+		goto out_drop_write;
 	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
 	if (!host_err) {
 		host_err = commit_metadata(tfhp);
 		if (!host_err)
 			host_err = commit_metadata(ffhp);
 	}
-
+out_drop_write:
 	mnt_drop_write(ffhp->fh_export->ex_path.mnt);
-
  out_dput_new:
 	dput(ndentry);
  out_dput_old:
@@ -1840,18 +1813,14 @@
 	if (host_err)
 		goto out_nfserr;
 
-	if (type != S_IFDIR) { /* It's UNLINK */
-#ifdef MSNFS
-		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-			(atomic_read(&rdentry->d_count) > 1)) {
-			host_err = -EPERM;
-		} else
-#endif
+	host_err = nfsd_break_lease(rdentry->d_inode);
+	if (host_err)
+		goto out_put;
+	if (type != S_IFDIR)
 		host_err = vfs_unlink(dirp, rdentry);
-	} else { /* It's RMDIR */
+	else
 		host_err = vfs_rmdir(dirp, rdentry);
-	}
-
+out_put:
 	dput(rdentry);
 
 	if (!host_err)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 60fce3d..366401e 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -311,6 +311,11 @@
 	struct svc_export *si_exp;			/* response */
 };
 
+struct nfsd4_secinfo_no_name {
+	u32 sin_style;					/* request */
+	struct svc_export *sin_exp;			/* response */
+};
+
 struct nfsd4_setattr {
 	stateid_t	sa_stateid;         /* request */
 	u32		sa_bmval[3];        /* request */
@@ -373,8 +378,8 @@
 	u32			cachethis;		/* request */
 #if 0
 	u32			target_maxslots;	/* response */
-	u32			status_flags;		/* response */
 #endif /* not yet */
+	u32			status_flags;		/* response */
 };
 
 struct nfsd4_destroy_session {
@@ -422,6 +427,7 @@
 
 		/* NFSv4.1 */
 		struct nfsd4_exchange_id	exchange_id;
+		struct nfsd4_bind_conn_to_session bind_conn_to_session;
 		struct nfsd4_create_session	create_session;
 		struct nfsd4_destroy_session	destroy_session;
 		struct nfsd4_sequence		sequence;
@@ -518,6 +524,7 @@
 		struct nfsd4_sequence *seq);
 extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
 		struct nfsd4_compound_state *, struct nfsd4_exchange_id *);
+extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *);
 extern __be32 nfsd4_create_session(struct svc_rqst *,
 		struct nfsd4_compound_state *,
 		struct nfsd4_create_session *);
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 8b782b0..3ee67c6 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -35,7 +35,20 @@
 
 struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap)
 {
-	return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode));
+	return NILFS_I_NILFS(bmap->b_inode)->ns_dat;
+}
+
+static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
+				     const char *fname, int err)
+{
+	struct inode *inode = bmap->b_inode;
+
+	if (err == -EINVAL) {
+		nilfs_error(inode->i_sb, fname,
+			    "broken bmap (inode number=%lu)\n", inode->i_ino);
+		err = -EIO;
+	}
+	return err;
 }
 
 /**
@@ -66,8 +79,10 @@
 
 	down_read(&bmap->b_sem);
 	ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
-	if (ret < 0)
+	if (ret < 0) {
+		ret = nilfs_bmap_convert_error(bmap, __func__, ret);
 		goto out;
+	}
 	if (NILFS_BMAP_USE_VBN(bmap)) {
 		ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
 					  &blocknr);
@@ -88,7 +103,8 @@
 	down_read(&bmap->b_sem);
 	ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks);
 	up_read(&bmap->b_sem);
-	return ret;
+
+	return nilfs_bmap_convert_error(bmap, __func__, ret);
 }
 
 static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
@@ -144,7 +160,8 @@
 	down_write(&bmap->b_sem);
 	ret = nilfs_bmap_do_insert(bmap, key, rec);
 	up_write(&bmap->b_sem);
-	return ret;
+
+	return nilfs_bmap_convert_error(bmap, __func__, ret);
 }
 
 static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key)
@@ -180,9 +197,12 @@
 
 	down_read(&bmap->b_sem);
 	ret = bmap->b_ops->bop_last_key(bmap, &lastkey);
-	if (!ret)
-		*key = lastkey;
 	up_read(&bmap->b_sem);
+
+	if (ret < 0)
+		ret = nilfs_bmap_convert_error(bmap, __func__, ret);
+	else
+		*key = lastkey;
 	return ret;
 }
 
@@ -210,7 +230,8 @@
 	down_write(&bmap->b_sem);
 	ret = nilfs_bmap_do_delete(bmap, key);
 	up_write(&bmap->b_sem);
-	return ret;
+
+	return nilfs_bmap_convert_error(bmap, __func__, ret);
 }
 
 static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key)
@@ -261,7 +282,8 @@
 	down_write(&bmap->b_sem);
 	ret = nilfs_bmap_do_truncate(bmap, key);
 	up_write(&bmap->b_sem);
-	return ret;
+
+	return nilfs_bmap_convert_error(bmap, __func__, ret);
 }
 
 /**
@@ -300,7 +322,8 @@
 	down_write(&bmap->b_sem);
 	ret = bmap->b_ops->bop_propagate(bmap, bh);
 	up_write(&bmap->b_sem);
-	return ret;
+
+	return nilfs_bmap_convert_error(bmap, __func__, ret);
 }
 
 /**
@@ -344,7 +367,8 @@
 	down_write(&bmap->b_sem);
 	ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo);
 	up_write(&bmap->b_sem);
-	return ret;
+
+	return nilfs_bmap_convert_error(bmap, __func__, ret);
 }
 
 /**
@@ -373,7 +397,8 @@
 	down_write(&bmap->b_sem);
 	ret = bmap->b_ops->bop_mark(bmap, key, level);
 	up_write(&bmap->b_sem);
-	return ret;
+
+	return nilfs_bmap_convert_error(bmap, __func__, ret);
 }
 
 /**
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 5115814..388e9e8 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -104,8 +104,7 @@
 	if (pblocknr == 0) {
 		pblocknr = blocknr;
 		if (inode->i_ino != NILFS_DAT_INO) {
-			struct inode *dat =
-				nilfs_dat_inode(NILFS_I_NILFS(inode));
+			struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
 
 			/* blocknr is a virtual block number */
 			err = nilfs_dat_translate(dat, blocknr, &pblocknr);
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index cb003c8..9d45773 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -91,7 +91,6 @@
 			       unsigned from, unsigned to)
 {
 	struct inode *dir = mapping->host;
-	struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb);
 	loff_t pos = page_offset(page) + from;
 	unsigned len = to - from;
 	unsigned nr_dirty, copied;
@@ -103,7 +102,7 @@
 		i_size_write(dir, pos + copied);
 	if (IS_DIRSYNC(dir))
 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
-	err = nilfs_set_file_dirty(sbi, dir, nr_dirty);
+	err = nilfs_set_file_dirty(dir, nr_dirty);
 	WARN_ON(err); /* do not happen */
 	unlock_page(page);
 }
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index c9a30d7..2f560c9 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -155,6 +155,7 @@
 	.truncate	= nilfs_truncate,
 	.setattr	= nilfs_setattr,
 	.permission     = nilfs_permission,
+	.fiemap		= nilfs_fiemap,
 };
 
 /* end of file */
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 9f8a2da..bfc73d3 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -149,14 +149,9 @@
 	}
 
 	err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh);
-	if (unlikely(err)) {
-		if (err == -EINVAL)
-			nilfs_error(sb, __func__, "ifile is broken");
-		else
-			nilfs_warning(sb, __func__,
-				      "unable to read inode: %lu",
-				      (unsigned long) ino);
-	}
+	if (unlikely(err))
+		nilfs_warning(sb, __func__, "unable to read inode: %lu",
+			      (unsigned long) ino);
 	return err;
 }
 
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 71d4bc8..2fd440d 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -58,7 +58,7 @@
 	struct nilfs_inode_info *ii = NILFS_I(inode);
 	__u64 blknum = 0;
 	int err = 0, ret;
-	struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode));
+	struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
 
 	down_read(&NILFS_MDT(dat)->mi_sem);
@@ -96,11 +96,6 @@
 				       inode->i_ino,
 				       (unsigned long long)blkoff);
 				err = 0;
-			} else if (err == -EINVAL) {
-				nilfs_error(inode->i_sb, __func__,
-					    "broken bmap (inode=%lu)\n",
-					    inode->i_ino);
-				err = -EIO;
 			}
 			nilfs_transaction_abort(inode->i_sb);
 			goto out;
@@ -109,6 +104,7 @@
 		nilfs_transaction_commit(inode->i_sb); /* never fails */
 		/* Error handling should be detailed */
 		set_buffer_new(bh_result);
+		set_buffer_delay(bh_result);
 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
 						      to proper value */
 	} else if (ret == -ENOENT) {
@@ -185,10 +181,9 @@
 
 	if (ret) {
 		struct inode *inode = page->mapping->host;
-		struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
 		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
 
-		nilfs_set_file_dirty(sbi, inode, nr_dirty);
+		nilfs_set_file_dirty(inode, nr_dirty);
 	}
 	return ret;
 }
@@ -229,7 +224,7 @@
 						  start + copied);
 	copied = generic_write_end(file, mapping, pos, len, copied, page,
 				   fsdata);
-	nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
+	nilfs_set_file_dirty(inode, nr_dirty);
 	err = nilfs_transaction_commit(inode->i_sb);
 	return err ? : copied;
 }
@@ -425,13 +420,12 @@
 			      struct nilfs_root *root, unsigned long ino,
 			      struct inode *inode)
 {
-	struct nilfs_sb_info *sbi = NILFS_SB(sb);
-	struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
+	struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
 	struct buffer_head *bh;
 	struct nilfs_inode *raw_inode;
 	int err;
 
-	down_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
+	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
 	if (unlikely(err))
 		goto bad_inode;
@@ -461,7 +455,7 @@
 	}
 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
 	brelse(bh);
-	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
+	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 	nilfs_set_inode_flags(inode);
 	return 0;
 
@@ -470,7 +464,7 @@
 	brelse(bh);
 
  bad_inode:
-	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
+	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 	return err;
 }
 
@@ -629,7 +623,7 @@
 
 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
 		return;
- repeat:
+repeat:
 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
 	if (ret == -ENOENT)
 		return;
@@ -646,14 +640,10 @@
 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
 		goto repeat;
 
- failed:
-	if (ret == -EINVAL)
-		nilfs_error(ii->vfs_inode.i_sb, __func__,
-			    "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino);
-	else
-		nilfs_warning(ii->vfs_inode.i_sb, __func__,
-			      "failed to truncate bmap (ino=%lu, err=%d)",
-			      ii->vfs_inode.i_ino, ret);
+failed:
+	nilfs_warning(ii->vfs_inode.i_sb, __func__,
+		      "failed to truncate bmap (ino=%lu, err=%d)",
+		      ii->vfs_inode.i_ino, ret);
 }
 
 void nilfs_truncate(struct inode *inode)
@@ -682,7 +672,7 @@
 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
 
 	nilfs_mark_inode_dirty(inode);
-	nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
+	nilfs_set_file_dirty(inode, 0);
 	nilfs_transaction_commit(sb);
 	/* May construct a logical segment and may fail in sync mode.
 	   But truncate has no return value. */
@@ -785,20 +775,24 @@
 	return err;
 }
 
-int nilfs_permission(struct inode *inode, int mask)
+int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
-	struct nilfs_root *root = NILFS_I(inode)->i_root;
+	struct nilfs_root *root;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
+	root = NILFS_I(inode)->i_root;
 	if ((mask & MAY_WRITE) && root &&
 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
 		return -EROFS; /* snapshot is not writable */
 
-	return generic_permission(inode, mask, NULL);
+	return generic_permission(inode, mask, flags, NULL);
 }
 
-int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
-			   struct buffer_head **pbh)
+int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
 {
+	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
 	struct nilfs_inode_info *ii = NILFS_I(inode);
 	int err;
 
@@ -839,9 +833,9 @@
 	return ret;
 }
 
-int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode,
-			 unsigned nr_dirty)
+int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
 {
+	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
 	struct nilfs_inode_info *ii = NILFS_I(inode);
 
 	atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
@@ -874,11 +868,10 @@
 
 int nilfs_mark_inode_dirty(struct inode *inode)
 {
-	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
 	struct buffer_head *ibh;
 	int err;
 
-	err = nilfs_load_inode_block(sbi, inode, &ibh);
+	err = nilfs_load_inode_block(inode, &ibh);
 	if (unlikely(err)) {
 		nilfs_warning(inode->i_sb, __func__,
 			      "failed to reget inode block.\n");
@@ -920,3 +913,134 @@
 	nilfs_mark_inode_dirty(inode);
 	nilfs_transaction_commit(inode->i_sb); /* never fails */
 }
+
+int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+		 __u64 start, __u64 len)
+{
+	struct the_nilfs *nilfs = NILFS_I_NILFS(inode);
+	__u64 logical = 0, phys = 0, size = 0;
+	__u32 flags = 0;
+	loff_t isize;
+	sector_t blkoff, end_blkoff;
+	sector_t delalloc_blkoff;
+	unsigned long delalloc_blklen;
+	unsigned int blkbits = inode->i_blkbits;
+	int ret, n;
+
+	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+	if (ret)
+		return ret;
+
+	mutex_lock(&inode->i_mutex);
+
+	isize = i_size_read(inode);
+
+	blkoff = start >> blkbits;
+	end_blkoff = (start + len - 1) >> blkbits;
+
+	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
+							&delalloc_blkoff);
+
+	do {
+		__u64 blkphy;
+		unsigned int maxblocks;
+
+		if (delalloc_blklen && blkoff == delalloc_blkoff) {
+			if (size) {
+				/* End of the current extent */
+				ret = fiemap_fill_next_extent(
+					fieinfo, logical, phys, size, flags);
+				if (ret)
+					break;
+			}
+			if (blkoff > end_blkoff)
+				break;
+
+			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
+			logical = blkoff << blkbits;
+			phys = 0;
+			size = delalloc_blklen << blkbits;
+
+			blkoff = delalloc_blkoff + delalloc_blklen;
+			delalloc_blklen = nilfs_find_uncommitted_extent(
+				inode, blkoff, &delalloc_blkoff);
+			continue;
+		}
+
+		/*
+		 * Limit the number of blocks that we look up so as
+		 * not to get into the next delayed allocation extent.
+		 */
+		maxblocks = INT_MAX;
+		if (delalloc_blklen)
+			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
+					  maxblocks);
+		blkphy = 0;
+
+		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
+		n = nilfs_bmap_lookup_contig(
+			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
+		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
+
+		if (n < 0) {
+			int past_eof;
+
+			if (unlikely(n != -ENOENT))
+				break; /* error */
+
+			/* HOLE */
+			blkoff++;
+			past_eof = ((blkoff << blkbits) >= isize);
+
+			if (size) {
+				/* End of the current extent */
+
+				if (past_eof)
+					flags |= FIEMAP_EXTENT_LAST;
+
+				ret = fiemap_fill_next_extent(
+					fieinfo, logical, phys, size, flags);
+				if (ret)
+					break;
+				size = 0;
+			}
+			if (blkoff > end_blkoff || past_eof)
+				break;
+		} else {
+			if (size) {
+				if (phys && blkphy << blkbits == phys + size) {
+					/* The current extent goes on */
+					size += n << blkbits;
+				} else {
+					/* Terminate the current extent */
+					ret = fiemap_fill_next_extent(
+						fieinfo, logical, phys, size,
+						flags);
+					if (ret || blkoff > end_blkoff)
+						break;
+
+					/* Start another extent */
+					flags = FIEMAP_EXTENT_MERGED;
+					logical = blkoff << blkbits;
+					phys = blkphy << blkbits;
+					size = n << blkbits;
+				}
+			} else {
+				/* Start a new extent */
+				flags = FIEMAP_EXTENT_MERGED;
+				logical = blkoff << blkbits;
+				phys = blkphy << blkbits;
+				size = n << blkbits;
+			}
+			blkoff += n;
+		}
+		cond_resched();
+	} while (true);
+
+	/* If ret is 1 then we just hit the end of the extent array */
+	if (ret == 1)
+		ret = 0;
+
+	mutex_unlock(&inode->i_mutex);
+	return ret;
+}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index b185e93..4967389 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -233,7 +233,7 @@
 	int ret;
 
 	down_read(&nilfs->ns_segctor_sem);
-	ret = nilfs_dat_get_vinfo(nilfs_dat_inode(nilfs), buf, size, nmembs);
+	ret = nilfs_dat_get_vinfo(nilfs->ns_dat, buf, size, nmembs);
 	up_read(&nilfs->ns_segctor_sem);
 	return ret;
 }
@@ -242,8 +242,7 @@
 nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
 			  void *buf, size_t size, size_t nmembs)
 {
-	struct inode *dat = nilfs_dat_inode(nilfs);
-	struct nilfs_bmap *bmap = NILFS_I(dat)->i_bmap;
+	struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap;
 	struct nilfs_bdesc *bdescs = buf;
 	int ret, i;
 
@@ -421,7 +420,7 @@
 	size_t nmembs = argv->v_nmembs;
 	int ret;
 
-	ret = nilfs_dat_freev(nilfs_dat_inode(nilfs), buf, nmembs);
+	ret = nilfs_dat_freev(nilfs->ns_dat, buf, nmembs);
 
 	return (ret < 0) ? ret : nmembs;
 }
@@ -430,8 +429,7 @@
 					 struct nilfs_argv *argv, void *buf)
 {
 	size_t nmembs = argv->v_nmembs;
-	struct inode *dat = nilfs_dat_inode(nilfs);
-	struct nilfs_bmap *bmap = NILFS_I(dat)->i_bmap;
+	struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap;
 	struct nilfs_bdesc *bdescs = buf;
 	int ret, i;
 
@@ -450,7 +448,7 @@
 			/* skip dead block */
 			continue;
 		if (bdescs[i].bd_level == 0) {
-			ret = nilfs_mdt_mark_block_dirty(dat,
+			ret = nilfs_mdt_mark_block_dirty(nilfs->ns_dat,
 							 bdescs[i].bd_offset);
 			if (ret < 0) {
 				WARN_ON(ret == -ENOENT);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 39a5b84..6a0e2a1 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -237,8 +237,6 @@
  *
  * %-ENOENT - the specified block does not exist (hole block)
  *
- * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
- *
  * %-EROFS - Read only filesystem (for create mode)
  */
 int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
@@ -273,8 +271,6 @@
  * %-ENOMEM - Insufficient memory available.
  *
  * %-EIO - I/O error
- *
- * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
  */
 int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
 {
@@ -350,8 +346,6 @@
  * %-EIO - I/O error
  *
  * %-ENOENT - the specified block does not exist (hole block)
- *
- * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
  */
 int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
 {
@@ -499,31 +493,29 @@
 	struct buffer_head *bh_frozen;
 	struct page *page;
 	int blkbits = inode->i_blkbits;
-	int ret = -ENOMEM;
 
 	page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
 	if (!page)
-		return ret;
+		return -ENOMEM;
 
 	if (!page_has_buffers(page))
 		create_empty_buffers(page, 1 << blkbits, 0);
 
 	bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
-	if (bh_frozen) {
-		if (!buffer_uptodate(bh_frozen))
-			nilfs_copy_buffer(bh_frozen, bh);
-		if (list_empty(&bh_frozen->b_assoc_buffers)) {
-			list_add_tail(&bh_frozen->b_assoc_buffers,
-				      &shadow->frozen_buffers);
-			set_buffer_nilfs_redirected(bh);
-		} else {
-			brelse(bh_frozen); /* already frozen */
-		}
-		ret = 0;
+
+	if (!buffer_uptodate(bh_frozen))
+		nilfs_copy_buffer(bh_frozen, bh);
+	if (list_empty(&bh_frozen->b_assoc_buffers)) {
+		list_add_tail(&bh_frozen->b_assoc_buffers,
+			      &shadow->frozen_buffers);
+		set_buffer_nilfs_redirected(bh);
+	} else {
+		brelse(bh_frozen); /* already frozen */
 	}
+
 	unlock_page(page);
 	page_cache_release(page);
-	return ret;
+	return 0;
 }
 
 struct buffer_head *
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 6e9557e..9803427 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -577,6 +577,7 @@
 	.rename		= nilfs_rename,
 	.setattr	= nilfs_setattr,
 	.permission	= nilfs_permission,
+	.fiemap		= nilfs_fiemap,
 };
 
 const struct inode_operations nilfs_special_inode_operations = {
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index f7560da..777e8fd 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -190,11 +190,6 @@
 	return nilfs_test_transaction_flag(NILFS_TI_WRITER);
 }
 
-static inline struct inode *nilfs_dat_inode(const struct the_nilfs *nilfs)
-{
-	return nilfs->ns_dat;
-}
-
 /*
  * function prototype
  */
@@ -256,14 +251,14 @@
 extern void nilfs_truncate(struct inode *);
 extern void nilfs_evict_inode(struct inode *);
 extern int nilfs_setattr(struct dentry *, struct iattr *);
-int nilfs_permission(struct inode *inode, int mask);
-extern int nilfs_load_inode_block(struct nilfs_sb_info *, struct inode *,
-				  struct buffer_head **);
+int nilfs_permission(struct inode *inode, int mask, unsigned int flags);
+int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh);
 extern int nilfs_inode_dirty(struct inode *);
-extern int nilfs_set_file_dirty(struct nilfs_sb_info *, struct inode *,
-				unsigned);
+int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty);
 extern int nilfs_mark_inode_dirty(struct inode *);
 extern void nilfs_dirty_inode(struct inode *);
+int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+		 __u64 start, __u64 len);
 
 /* super.c */
 extern struct inode *nilfs_alloc_inode(struct super_block *);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index a6c3c2e8..0c43241 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -491,7 +491,7 @@
 	}
 	return nc;
 }
- 
+
 void nilfs_mapping_init_once(struct address_space *mapping)
 {
 	memset(mapping, 0, sizeof(*mapping));
@@ -546,3 +546,87 @@
 	}
 	return TestClearPageDirty(page);
 }
+
+/**
+ * nilfs_find_uncommitted_extent - find extent of uncommitted data
+ * @inode: inode
+ * @start_blk: start block offset (in)
+ * @blkoff: start offset of the found extent (out)
+ *
+ * This function searches an extent of buffers marked "delayed" which
+ * starts from a block offset equal to or larger than @start_blk.  If
+ * such an extent was found, this will store the start offset in
+ * @blkoff and return its length in blocks.  Otherwise, zero is
+ * returned.
+ */
+unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
+					    sector_t start_blk,
+					    sector_t *blkoff)
+{
+	unsigned int i;
+	pgoff_t index;
+	unsigned int nblocks_in_page;
+	unsigned long length = 0;
+	sector_t b;
+	struct pagevec pvec;
+	struct page *page;
+
+	if (inode->i_mapping->nrpages == 0)
+		return 0;
+
+	index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+
+	pagevec_init(&pvec, 0);
+
+repeat:
+	pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
+					pvec.pages);
+	if (pvec.nr == 0)
+		return length;
+
+	if (length > 0 && pvec.pages[0]->index > index)
+		goto out;
+
+	b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+	i = 0;
+	do {
+		page = pvec.pages[i];
+
+		lock_page(page);
+		if (page_has_buffers(page)) {
+			struct buffer_head *bh, *head;
+
+			bh = head = page_buffers(page);
+			do {
+				if (b < start_blk)
+					continue;
+				if (buffer_delay(bh)) {
+					if (length == 0)
+						*blkoff = b;
+					length++;
+				} else if (length > 0) {
+					goto out_locked;
+				}
+			} while (++b, bh = bh->b_this_page, bh != head);
+		} else {
+			if (length > 0)
+				goto out_locked;
+
+			b += nblocks_in_page;
+		}
+		unlock_page(page);
+
+	} while (++i < pagevec_count(&pvec));
+
+	index = page->index + 1;
+	pagevec_release(&pvec);
+	cond_resched();
+	goto repeat;
+
+out_locked:
+	unlock_page(page);
+out:
+	pagevec_release(&pvec);
+	return length;
+}
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index fb9e8a8..622df27 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -66,6 +66,9 @@
 			struct backing_dev_info *bdi,
 			const struct address_space_operations *aops);
 unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
+unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
+					    sector_t start_blk,
+					    sector_t *blkoff);
 
 #define NILFS_PAGE_BUG(page, m, a...) \
 	do { nilfs_page_bug(page); BUG(); } while (0)
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 5d2711c2..3dfcd3b 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -535,7 +535,7 @@
 		if (unlikely(err))
 			goto failed_page;
 
-		err = nilfs_set_file_dirty(sbi, inode, 1);
+		err = nilfs_set_file_dirty(inode, 1);
 		if (unlikely(err))
 			goto failed_page;
 
diff --git a/fs/nilfs2/sb.h b/fs/nilfs2/sb.h
index 35a0715..7a17715 100644
--- a/fs/nilfs2/sb.h
+++ b/fs/nilfs2/sb.h
@@ -27,14 +27,6 @@
 #include <linux/types.h>
 #include <linux/fs.h>
 
-/*
- * Mount options
- */
-struct nilfs_mount_options {
-	unsigned long mount_opt;
-	__u64 snapshot_cno;
-};
-
 struct the_nilfs;
 struct nilfs_sc_info;
 
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 687d090..55ebae5 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -504,17 +504,6 @@
 	return err;
 }
 
-static int nilfs_handle_bmap_error(int err, const char *fname,
-				   struct inode *inode, struct super_block *sb)
-{
-	if (err == -EINVAL) {
-		nilfs_error(sb, fname, "broken bmap (inode=%lu)\n",
-			    inode->i_ino);
-		err = -EIO;
-	}
-	return err;
-}
-
 /*
  * Callback functions that enumerate, mark, and collect dirty blocks
  */
@@ -524,9 +513,8 @@
 	int err;
 
 	err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
-	if (unlikely(err < 0))
-		return nilfs_handle_bmap_error(err, __func__, inode,
-					       sci->sc_super);
+	if (err < 0)
+		return err;
 
 	err = nilfs_segctor_add_file_block(sci, bh, inode,
 					   sizeof(struct nilfs_binfo_v));
@@ -539,13 +527,7 @@
 				   struct buffer_head *bh,
 				   struct inode *inode)
 {
-	int err;
-
-	err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
-	if (unlikely(err < 0))
-		return nilfs_handle_bmap_error(err, __func__, inode,
-					       sci->sc_super);
-	return 0;
+	return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
 }
 
 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
@@ -588,9 +570,8 @@
 	int err;
 
 	err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
-	if (unlikely(err < 0))
-		return nilfs_handle_bmap_error(err, __func__, inode,
-					       sci->sc_super);
+	if (err < 0)
+		return err;
 
 	err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
 	if (!err)
@@ -776,9 +757,8 @@
 		ret++;
 	if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
 		ret++;
-	if (ret || nilfs_doing_gc())
-		if (nilfs_mdt_fetch_dirty(nilfs_dat_inode(nilfs)))
-			ret++;
+	if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
+		ret++;
 	return ret;
 }
 
@@ -814,7 +794,7 @@
 	nilfs_mdt_clear_dirty(sci->sc_root->ifile);
 	nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
 	nilfs_mdt_clear_dirty(nilfs->ns_sufile);
-	nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs));
+	nilfs_mdt_clear_dirty(nilfs->ns_dat);
 }
 
 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
@@ -923,7 +903,7 @@
 			      nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
 	raw_sr->sr_flags = 0;
 
-	nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr +
+	nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
 				 NILFS_SR_DAT_OFFSET(isz), 1);
 	nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
 				 NILFS_SR_CPFILE_OFFSET(isz), 1);
@@ -1179,7 +1159,7 @@
 		sci->sc_stage.scnt++;  /* Fall through */
 	case NILFS_ST_DAT:
  dat_stage:
-		err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs),
+		err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
 					      &nilfs_sc_dat_ops);
 		if (unlikely(err))
 			break;
@@ -1563,7 +1543,6 @@
 	return 0;
 
  failed_bmap:
-	err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super);
 	return err;
 }
 
@@ -1783,6 +1762,7 @@
 				if (!err) {
 					set_buffer_uptodate(bh);
 					clear_buffer_dirty(bh);
+					clear_buffer_delay(bh);
 					clear_buffer_nilfs_volatile(bh);
 				}
 				brelse(bh); /* for b_assoc_buffers */
@@ -1909,6 +1889,7 @@
 				    b_assoc_buffers) {
 			set_buffer_uptodate(bh);
 			clear_buffer_dirty(bh);
+			clear_buffer_delay(bh);
 			clear_buffer_nilfs_volatile(bh);
 			clear_buffer_nilfs_redirected(bh);
 			if (bh == segbuf->sb_super_root) {
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index f804d41..0994f6a7 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -47,7 +47,6 @@
 #include <linux/crc32.h>
 #include <linux/vfs.h>
 #include <linux/writeback.h>
-#include <linux/kobject.h>
 #include <linux/seq_file.h>
 #include <linux/mount.h>
 #include "nilfs.h"
@@ -111,12 +110,17 @@
 		 const char *fmt, ...)
 {
 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "NILFS error (device %s): %s: ", sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	if (!(sb->s_flags & MS_RDONLY)) {
@@ -136,13 +140,17 @@
 void nilfs_warning(struct super_block *sb, const char *function,
 		   const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_WARNING "NILFS warning (device %s): %s: ",
-	       sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_WARNING "NILFS warning (device %s): %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 }
 
@@ -162,10 +170,13 @@
 	return &ii->vfs_inode;
 }
 
-void nilfs_destroy_inode(struct inode *inode)
+static void nilfs_i_callback(struct rcu_head *head)
 {
+	struct inode *inode = container_of(head, struct inode, i_rcu);
 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
 
+	INIT_LIST_HEAD(&inode->i_dentry);
+
 	if (mdi) {
 		kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
 		kfree(mdi);
@@ -173,6 +184,11 @@
 	kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
 }
 
+void nilfs_destroy_inode(struct inode *inode)
+{
+	call_rcu(&inode->i_rcu, nilfs_i_callback);
+}
+
 static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
 {
 	struct the_nilfs *nilfs = sbi->s_nilfs;
@@ -838,7 +854,7 @@
 
 static int nilfs_tree_was_touched(struct dentry *root_dentry)
 {
-	return atomic_read(&root_dentry->d_count) > 1;
+	return root_dentry->d_count > 1;
 }
 
 /**
@@ -1002,11 +1018,11 @@
 	struct nilfs_sb_info *sbi = NILFS_SB(sb);
 	struct the_nilfs *nilfs = sbi->s_nilfs;
 	unsigned long old_sb_flags;
-	struct nilfs_mount_options old_opts;
+	unsigned long old_mount_opt;
 	int err;
 
 	old_sb_flags = sb->s_flags;
-	old_opts.mount_opt = sbi->s_mount_opt;
+	old_mount_opt = sbi->s_mount_opt;
 
 	if (!parse_options(data, sb, 1)) {
 		err = -EINVAL;
@@ -1075,7 +1091,7 @@
 
  restore_opts:
 	sb->s_flags = old_sb_flags;
-	sbi->s_mount_opt = old_opts.mount_opt;
+	sbi->s_mount_opt = old_mount_opt;
 	return err;
 }
 
@@ -1147,14 +1163,14 @@
 {
 	struct nilfs_super_data sd;
 	struct super_block *s;
-	fmode_t mode = FMODE_READ;
+	fmode_t mode = FMODE_READ | FMODE_EXCL;
 	struct dentry *root_dentry;
 	int err, s_new = false;
 
 	if (!(flags & MS_RDONLY))
 		mode |= FMODE_WRITE;
 
-	sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type);
+	sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 	if (IS_ERR(sd.bdev))
 		return ERR_CAST(sd.bdev);
 
@@ -1233,7 +1249,7 @@
 	}
 
 	if (!s_new)
-		close_bdev_exclusive(sd.bdev, mode);
+		blkdev_put(sd.bdev, mode);
 
 	return root_dentry;
 
@@ -1242,7 +1258,7 @@
 
  failed:
 	if (!s_new)
-		close_bdev_exclusive(sd.bdev, mode);
+		blkdev_put(sd.bdev, mode);
 	return ERR_PTR(err);
 }
 
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 0254be2..ad4ac60 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -329,7 +329,6 @@
 	printk(KERN_INFO "NILFS: recovery complete.\n");
 
  skip_recovery:
-	set_nilfs_loaded(nilfs);
 	nilfs_clear_recovery_info(&ri);
 	sbi->s_super->s_flags = s_flags;
 	return 0;
@@ -651,12 +650,11 @@
 
 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
 {
-	struct inode *dat = nilfs_dat_inode(nilfs);
 	unsigned long ncleansegs;
 
-	down_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
+	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 	ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
-	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
+	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
 	*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
 	return 0;
 }
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 69226e1..fd85e4c 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -36,8 +36,6 @@
 /* the_nilfs struct */
 enum {
 	THE_NILFS_INIT = 0,     /* Information from super_block is set */
-	THE_NILFS_LOADED,       /* Roll-back/roll-forward has done and
-				   the latest checkpoint was loaded */
 	THE_NILFS_DISCONTINUED,	/* 'next' pointer chain has broken */
 	THE_NILFS_GC_RUNNING,	/* gc process is running */
 	THE_NILFS_SB_DIRTY,	/* super block is dirty */
@@ -178,7 +176,6 @@
 }
 
 THE_NILFS_FNS(INIT, init)
-THE_NILFS_FNS(LOADED, loaded)
 THE_NILFS_FNS(DISCONTINUED, discontinued)
 THE_NILFS_FNS(GC_RUNNING, gc_running)
 THE_NILFS_FNS(SB_DIRTY, sb_dirty)
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
index 3ac36b7..7dceff0 100644
--- a/fs/notify/fanotify/Kconfig
+++ b/fs/notify/fanotify/Kconfig
@@ -6,7 +6,7 @@
 	---help---
 	   Say Y here to enable fanotify suport.  fanotify is a file access
 	   notification system which differs from inotify in that it sends
-	   and open file descriptor to the userspace listener along with
+	   an open file descriptor to the userspace listener along with
 	   the event.
 
 	   If unsure, say Y.
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 20dc218..79b47cb 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -59,7 +59,7 @@
 	/* determine if the children should tell inode about their events */
 	watched = fsnotify_inode_watches_children(inode);
 
-	spin_lock(&dcache_lock);
+	spin_lock(&inode->i_lock);
 	/* run all of the dentries associated with this inode.  Since this is a
 	 * directory, there damn well better only be one item on this list */
 	list_for_each_entry(alias, &inode->i_dentry, d_alias) {
@@ -68,19 +68,21 @@
 		/* run all of the children of the original inode and fix their
 		 * d_flags to indicate parental interest (their parent is the
 		 * original inode) */
+		spin_lock(&alias->d_lock);
 		list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
 			if (!child->d_inode)
 				continue;
 
-			spin_lock(&child->d_lock);
+			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
 			if (watched)
 				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
 			else
 				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
 			spin_unlock(&child->d_lock);
 		}
+		spin_unlock(&alias->d_lock);
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 }
 
 /* Notify this dentry's parent about a child's events. */
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 58b6be9..4ff028f 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -6,7 +6,7 @@
 	     index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
 	     unistr.o upcase.o
 
-EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\"
+EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\"
 
 ifeq ($(CONFIG_NTFS_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 113ebd9..f4b1057 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1,7 +1,7 @@
 /*
  * file.c - NTFS kernel file operations.  Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  *
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
@@ -1380,15 +1380,14 @@
  * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
  * single-segment behaviour.
  *
- * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
- * when atomic and when not atomic.  This is ok because
- * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
- * and it is ok to call this when non-atomic.
- * Infact, the only difference between __copy_from_user_inatomic() and
+ * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
+ * atomic and when not atomic.  This is ok because it calls
+ * __copy_from_user_inatomic() and it is ok to call this when non-atomic.  In
+ * fact, the only difference between __copy_from_user_inatomic() and
  * __copy_from_user() is that the latter calls might_sleep() and the former
- * should not zero the tail of the buffer on error.  And on many
- * architectures __copy_from_user_inatomic() is just defined to
- * __copy_from_user() so it makes no difference at all on those architectures.
+ * should not zero the tail of the buffer on error.  And on many architectures
+ * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
+ * makes no difference at all on those architectures.
  */
 static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
 		unsigned nr_pages, unsigned ofs, const struct iovec **iov,
@@ -1409,28 +1408,28 @@
 		if (unlikely(copied != len)) {
 			/* Do it the slow way. */
 			addr = kmap(*pages);
-			copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
-					*iov, *iov_ofs, len);
-			/*
-			 * Zero the rest of the target like __copy_from_user().
-			 */
-			memset(addr + ofs + copied, 0, len - copied);
-			kunmap(*pages);
+			copied = __ntfs_copy_from_user_iovec_inatomic(addr +
+					ofs, *iov, *iov_ofs, len);
 			if (unlikely(copied != len))
 				goto err_out;
+			kunmap(*pages);
 		}
 		total += len;
+		ntfs_set_next_iovec(iov, iov_ofs, len);
 		bytes -= len;
 		if (!bytes)
 			break;
-		ntfs_set_next_iovec(iov, iov_ofs, len);
 		ofs = 0;
 	} while (++pages < last_page);
 out:
 	return total;
 err_out:
-	total += copied;
+	BUG_ON(copied > len);
 	/* Zero the rest of the target like __copy_from_user(). */
+	memset(addr + ofs + copied, 0, len - copied);
+	kunmap(*pages);
+	total += copied;
+	ntfs_set_next_iovec(iov, iov_ofs, copied);
 	while (++pages < last_page) {
 		bytes -= len;
 		if (!bytes)
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 93622b1..a627ed8 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -332,6 +332,13 @@
 	return NULL;
 }
 
+static void ntfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
+}
+
 void ntfs_destroy_big_inode(struct inode *inode)
 {
 	ntfs_inode *ni = NTFS_I(inode);
@@ -340,7 +347,7 @@
 	BUG_ON(ni->page);
 	if (!atomic_dec_and_test(&ni->count))
 		BUG();
-	kmem_cache_free(ntfs_big_inode_cache, NTFS_I(inode));
+	call_rcu(&inode->i_rcu, ntfs_i_callback);
 }
 
 static inline ntfs_inode *ntfs_alloc_extent_inode(void)
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index a30ecacc..29099a0 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1,7 +1,7 @@
 /*
  * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  * Copyright (c) 2001,2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -3193,8 +3193,8 @@
 	ntfs_sysctl(0);
 }
 
-MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
-MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov");
+MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>");
+MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.");
 MODULE_VERSION(NTFS_VERSION);
 MODULE_LICENSE("GPL");
 #ifdef DEBUG
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 0d84066..ab152c0 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -51,7 +51,7 @@
 
 config OCFS2_FS_STATS
 	bool "OCFS2 statistics"
-	depends on OCFS2_FS
+	depends on OCFS2_FS && DEBUG_FS
 	default y
 	help
 	  This option allows some fs statistics to be captured. Enabling
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 3919150..704f6b1 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -291,13 +291,17 @@
 	return ret;
 }
 
-int ocfs2_check_acl(struct inode *inode, int mask)
+int ocfs2_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+	struct ocfs2_super *osb;
 	struct buffer_head *di_bh = NULL;
 	struct posix_acl *acl;
 	int ret = -EAGAIN;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
+	osb = OCFS2_SB(inode->i_sb);
 	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
 		return ret;
 
diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
index 5c5d31f..4fe7c9c 100644
--- a/fs/ocfs2/acl.h
+++ b/fs/ocfs2/acl.h
@@ -26,7 +26,7 @@
 	__le32 e_id;
 };
 
-extern int ocfs2_check_acl(struct inode *, int);
+extern int ocfs2_check_acl(struct inode *, int, unsigned int);
 extern int ocfs2_acl_chmod(struct inode *);
 extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
 			  struct buffer_head *, struct buffer_head *,
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 592fae5..e4984e2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -565,7 +565,6 @@
 	return ret;
 }
 
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
 static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
 					 struct ocfs2_extent_block *eb);
 static void ocfs2_adjust_rightmost_records(handle_t *handle,
@@ -5858,6 +5857,7 @@
 
 	ocfs2_journal_dirty(handle, tl_bh);
 
+	osb->truncated_clusters += num_clusters;
 bail:
 	mlog_exit(status);
 	return status;
@@ -5929,6 +5929,8 @@
 		i--;
 	}
 
+	osb->truncated_clusters = 0;
+
 bail:
 	mlog_exit(status);
 	return status;
@@ -7139,64 +7141,6 @@
 }
 
 /*
- * Expects the inode to already be locked.
- */
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
-			   struct inode *inode,
-			   struct buffer_head *fe_bh,
-			   struct ocfs2_truncate_context **tc)
-{
-	int status;
-	unsigned int new_i_clusters;
-	struct ocfs2_dinode *fe;
-	struct ocfs2_extent_block *eb;
-	struct buffer_head *last_eb_bh = NULL;
-
-	mlog_entry_void();
-
-	*tc = NULL;
-
-	new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
-						  i_size_read(inode));
-	fe = (struct ocfs2_dinode *) fe_bh->b_data;
-
-	mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
-	     "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
-	     (unsigned long long)le64_to_cpu(fe->i_size));
-
-	*tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
-	if (!(*tc)) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-	ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
-
-	if (fe->id2.i_list.l_tree_depth) {
-		status = ocfs2_read_extent_block(INODE_CACHE(inode),
-						 le64_to_cpu(fe->i_last_eb_blk),
-						 &last_eb_bh);
-		if (status < 0) {
-			mlog_errno(status);
-			goto bail;
-		}
-		eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
-	}
-
-	(*tc)->tc_last_eb_bh = last_eb_bh;
-
-	status = 0;
-bail:
-	if (status < 0) {
-		if (*tc)
-			ocfs2_free_truncate_context(*tc);
-		*tc = NULL;
-	}
-	mlog_exit_void();
-	return status;
-}
-
-/*
  * 'start' is inclusive, 'end' is not.
  */
 int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
@@ -7270,18 +7214,3 @@
 out:
 	return ret;
 }
-
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
-{
-	/*
-	 * The caller is responsible for completing deallocation
-	 * before freeing the context.
-	 */
-	if (tc->tc_dealloc.c_first_suballocator != NULL)
-		mlog(ML_NOTICE,
-		     "Truncate completion has non-empty dealloc context\n");
-
-	brelse(tc->tc_last_eb_bh);
-
-	kfree(tc);
-}
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 55762b5..3bd08a0 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -228,10 +228,6 @@
 
 int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
 				  u64 range_start, u64 range_end);
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
-			   struct inode *inode,
-			   struct buffer_head *fe_bh,
-			   struct ocfs2_truncate_context **tc);
 int ocfs2_commit_truncate(struct ocfs2_super *osb,
 			  struct inode *inode,
 			  struct buffer_head *di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d7c554..1fbb0e2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1630,6 +1630,43 @@
 	return ret;
 }
 
+/*
+ * Try to flush truncate logs if we can free enough clusters from it.
+ * As for return value, "< 0" means error, "0" no space and "1" means
+ * we have freed enough spaces and let the caller try to allocate again.
+ */
+static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+					  unsigned int needed)
+{
+	tid_t target;
+	int ret = 0;
+	unsigned int truncated_clusters;
+
+	mutex_lock(&osb->osb_tl_inode->i_mutex);
+	truncated_clusters = osb->truncated_clusters;
+	mutex_unlock(&osb->osb_tl_inode->i_mutex);
+
+	/*
+	 * Check whether we can succeed in allocating if we free
+	 * the truncate log.
+	 */
+	if (truncated_clusters < needed)
+		goto out;
+
+	ret = ocfs2_flush_truncate_log(osb);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
+		jbd2_log_wait_commit(osb->journal->j_journal, target);
+		ret = 1;
+	}
+out:
+	return ret;
+}
+
 int ocfs2_write_begin_nolock(struct file *filp,
 			     struct address_space *mapping,
 			     loff_t pos, unsigned len, unsigned flags,
@@ -1637,7 +1674,7 @@
 			     struct buffer_head *di_bh, struct page *mmap_page)
 {
 	int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
-	unsigned int clusters_to_alloc, extents_to_split;
+	unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
 	struct ocfs2_write_ctxt *wc;
 	struct inode *inode = mapping->host;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -1646,7 +1683,9 @@
 	struct ocfs2_alloc_context *meta_ac = NULL;
 	handle_t *handle;
 	struct ocfs2_extent_tree et;
+	int try_free = 1, ret1;
 
+try_again:
 	ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
 	if (ret) {
 		mlog_errno(ret);
@@ -1681,6 +1720,7 @@
 		mlog_errno(ret);
 		goto out;
 	} else if (ret == 1) {
+		clusters_need = wc->w_clen;
 		ret = ocfs2_refcount_cow(inode, filp, di_bh,
 					 wc->w_cpos, wc->w_clen, UINT_MAX);
 		if (ret) {
@@ -1695,6 +1735,7 @@
 		mlog_errno(ret);
 		goto out;
 	}
+	clusters_need += clusters_to_alloc;
 
 	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
 
@@ -1817,6 +1858,22 @@
 		ocfs2_free_alloc_context(data_ac);
 	if (meta_ac)
 		ocfs2_free_alloc_context(meta_ac);
+
+	if (ret == -ENOSPC && try_free) {
+		/*
+		 * Try to free some truncate log so that we can have enough
+		 * clusters to allocate.
+		 */
+		try_free = 0;
+
+		ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
+		if (ret1 == 1)
+			goto try_again;
+
+		if (ret1 < 0)
+			mlog_errno(ret1);
+	}
+
 	return ret;
 }
 
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9f26ac9..b108e86 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -82,6 +82,7 @@
 #define O2HB_DB_TYPE_REGION_LIVENODES	4
 #define O2HB_DB_TYPE_REGION_NUMBER	5
 #define O2HB_DB_TYPE_REGION_ELAPSED_TIME	6
+#define O2HB_DB_TYPE_REGION_PINNED	7
 struct o2hb_debug_buf {
 	int db_type;
 	int db_size;
@@ -101,6 +102,7 @@
 #define O2HB_DEBUG_FAILEDREGIONS	"failed_regions"
 #define O2HB_DEBUG_REGION_NUMBER	"num"
 #define O2HB_DEBUG_REGION_ELAPSED_TIME	"elapsed_time_in_ms"
+#define O2HB_DEBUG_REGION_PINNED	"pinned"
 
 static struct dentry *o2hb_debug_dir;
 static struct dentry *o2hb_debug_livenodes;
@@ -132,6 +134,33 @@
 unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
 unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
 
+/*
+ * o2hb_dependent_users tracks the number of registered callbacks that depend
+ * on heartbeat. o2net and o2dlm are two entities that register this callback.
+ * However only o2dlm depends on the heartbeat. It does not want the heartbeat
+ * to stop while a dlm domain is still active.
+ */
+unsigned int o2hb_dependent_users;
+
+/*
+ * In global heartbeat mode, all regions are pinned if there are one or more
+ * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All
+ * regions are unpinned if the region count exceeds the cut off or the number
+ * of dependent users falls to zero.
+ */
+#define O2HB_PIN_CUT_OFF		3
+
+/*
+ * In local heartbeat mode, we assume the dlm domain name to be the same as
+ * region uuid. This is true for domains created for the file system but not
+ * necessarily true for userdlm domains. This is a known limitation.
+ *
+ * In global heartbeat mode, we pin/unpin all o2hb regions. This solution
+ * works for both file system and userdlm domains.
+ */
+static int o2hb_region_pin(const char *region_uuid);
+static void o2hb_region_unpin(const char *region_uuid);
+
 /* Only sets a new threshold if there are no active regions.
  *
  * No locking or otherwise interesting code is required for reading
@@ -186,7 +215,9 @@
 	struct config_item	hr_item;
 
 	struct list_head	hr_all_item;
-	unsigned		hr_unclean_stop:1;
+	unsigned		hr_unclean_stop:1,
+				hr_item_pinned:1,
+				hr_item_dropped:1;
 
 	/* protected by the hr_callback_sem */
 	struct task_struct 	*hr_task;
@@ -212,9 +243,11 @@
 	struct dentry		*hr_debug_livenodes;
 	struct dentry		*hr_debug_regnum;
 	struct dentry		*hr_debug_elapsed_time;
+	struct dentry		*hr_debug_pinned;
 	struct o2hb_debug_buf	*hr_db_livenodes;
 	struct o2hb_debug_buf	*hr_db_regnum;
 	struct o2hb_debug_buf	*hr_db_elapsed_time;
+	struct o2hb_debug_buf	*hr_db_pinned;
 
 	/* let the person setting up hb wait for it to return until it
 	 * has reached a 'steady' state.  This will be fixed when we have
@@ -307,8 +340,7 @@
 
 static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
 {
-	cancel_delayed_work(&reg->hr_write_timeout_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&reg->hr_write_timeout_work);
 }
 
 static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
@@ -702,6 +734,14 @@
 	       config_item_name(&reg->hr_item));
 
 	set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+
+	/*
+	 * If global heartbeat active, unpin all regions if the
+	 * region count > CUT_OFF
+	 */
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
+		o2hb_region_unpin(NULL);
 }
 
 static int o2hb_check_slot(struct o2hb_region *reg,
@@ -1042,6 +1082,9 @@
 
 	set_user_nice(current, -20);
 
+	/* Pin node */
+	o2nm_depend_this_node();
+
 	while (!kthread_should_stop() && !reg->hr_unclean_stop) {
 		/* We track the time spent inside
 		 * o2hb_do_disk_heartbeat so that we avoid more than
@@ -1091,6 +1134,9 @@
 		mlog_errno(ret);
 	}
 
+	/* Unpin node */
+	o2nm_undepend_this_node();
+
 	mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
 
 	return 0;
@@ -1143,6 +1189,12 @@
 						 reg->hr_last_timeout_start));
 		goto done;
 
+	case O2HB_DB_TYPE_REGION_PINNED:
+		reg = (struct o2hb_region *)db->db_data;
+		out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
+				!!reg->hr_item_pinned);
+		goto done;
+
 	default:
 		goto done;
 	}
@@ -1316,6 +1368,8 @@
 	memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap));
 	memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap));
 
+	o2hb_dependent_users = 0;
+
 	return o2hb_debug_init();
 }
 
@@ -1385,6 +1439,7 @@
 	debugfs_remove(reg->hr_debug_livenodes);
 	debugfs_remove(reg->hr_debug_regnum);
 	debugfs_remove(reg->hr_debug_elapsed_time);
+	debugfs_remove(reg->hr_debug_pinned);
 	debugfs_remove(reg->hr_debug_dir);
 
 	spin_lock(&o2hb_live_lock);
@@ -1674,7 +1729,7 @@
 		goto out;
 
 	reg->hr_bdev = I_BDEV(filp->f_mapping->host);
-	ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ);
+	ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL);
 	if (ret) {
 		reg->hr_bdev = NULL;
 		goto out;
@@ -1949,6 +2004,18 @@
 		goto bail;
 	}
 
+	reg->hr_debug_pinned =
+			o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
+					  reg->hr_debug_dir,
+					  &(reg->hr_db_pinned),
+					  sizeof(*(reg->hr_db_pinned)),
+					  O2HB_DB_TYPE_REGION_PINNED,
+					  0, 0, reg);
+	if (!reg->hr_debug_pinned) {
+		mlog_errno(ret);
+		goto bail;
+	}
+
 	ret = 0;
 bail:
 	return ret;
@@ -2003,15 +2070,20 @@
 {
 	struct task_struct *hb_task;
 	struct o2hb_region *reg = to_o2hb_region(item);
+	int quorum_region = 0;
 
 	/* stop the thread when the user removes the region dir */
 	spin_lock(&o2hb_live_lock);
 	if (o2hb_global_heartbeat_active()) {
 		clear_bit(reg->hr_region_num, o2hb_region_bitmap);
 		clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+		if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+			quorum_region = 1;
+		clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
 	}
 	hb_task = reg->hr_task;
 	reg->hr_task = NULL;
+	reg->hr_item_dropped = 1;
 	spin_unlock(&o2hb_live_lock);
 
 	if (hb_task)
@@ -2029,7 +2101,27 @@
 	if (o2hb_global_heartbeat_active())
 		printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
 		       config_item_name(&reg->hr_item));
+
 	config_item_put(item);
+
+	if (!o2hb_global_heartbeat_active() || !quorum_region)
+		return;
+
+	/*
+	 * If global heartbeat active and there are dependent users,
+	 * pin all regions if quorum region count <= CUT_OFF
+	 */
+	spin_lock(&o2hb_live_lock);
+
+	if (!o2hb_dependent_users)
+		goto unlock;
+
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+		o2hb_region_pin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
 }
 
 struct o2hb_heartbeat_group_attribute {
@@ -2215,63 +2307,138 @@
 }
 EXPORT_SYMBOL_GPL(o2hb_setup_callback);
 
-static struct o2hb_region *o2hb_find_region(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only pin the matching region. In global we pin all the active
+ * regions.
+ */
+static int o2hb_region_pin(const char *region_uuid)
 {
-	struct o2hb_region *p, *reg = NULL;
+	int ret = 0, found = 0;
+	struct o2hb_region *reg;
+	char *uuid;
 
 	assert_spin_locked(&o2hb_live_lock);
 
-	list_for_each_entry(p, &o2hb_all_regions, hr_all_item) {
-		if (!strcmp(region_uuid, config_item_name(&p->hr_item))) {
-			reg = p;
-			break;
+	list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+		uuid = config_item_name(&reg->hr_item);
+
+		/* local heartbeat */
+		if (region_uuid) {
+			if (strcmp(region_uuid, uuid))
+				continue;
+			found = 1;
 		}
+
+		if (reg->hr_item_pinned || reg->hr_item_dropped)
+			goto skip_pin;
+
+		/* Ignore ENOENT only for local hb (userdlm domain) */
+		ret = o2nm_depend_item(&reg->hr_item);
+		if (!ret) {
+			mlog(ML_CLUSTER, "Pin region %s\n", uuid);
+			reg->hr_item_pinned = 1;
+		} else {
+			if (ret == -ENOENT && found)
+				ret = 0;
+			else {
+				mlog(ML_ERROR, "Pin region %s fails with %d\n",
+				     uuid, ret);
+				break;
+			}
+		}
+skip_pin:
+		if (found)
+			break;
 	}
 
-	return reg;
-}
-
-static int o2hb_region_get(const char *region_uuid)
-{
-	int ret = 0;
-	struct o2hb_region *reg;
-
-	spin_lock(&o2hb_live_lock);
-
-	reg = o2hb_find_region(region_uuid);
-	if (!reg)
-		ret = -ENOENT;
-	spin_unlock(&o2hb_live_lock);
-
-	if (ret)
-		goto out;
-
-	ret = o2nm_depend_this_node();
-	if (ret)
-		goto out;
-
-	ret = o2nm_depend_item(&reg->hr_item);
-	if (ret)
-		o2nm_undepend_this_node();
-
-out:
 	return ret;
 }
 
-static void o2hb_region_put(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only unpin the matching region. In global we unpin all the
+ * active regions.
+ */
+static void o2hb_region_unpin(const char *region_uuid)
 {
 	struct o2hb_region *reg;
+	char *uuid;
+	int found = 0;
+
+	assert_spin_locked(&o2hb_live_lock);
+
+	list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+		uuid = config_item_name(&reg->hr_item);
+		if (region_uuid) {
+			if (strcmp(region_uuid, uuid))
+				continue;
+			found = 1;
+		}
+
+		if (reg->hr_item_pinned) {
+			mlog(ML_CLUSTER, "Unpin region %s\n", uuid);
+			o2nm_undepend_item(&reg->hr_item);
+			reg->hr_item_pinned = 0;
+		}
+		if (found)
+			break;
+	}
+}
+
+static int o2hb_region_inc_user(const char *region_uuid)
+{
+	int ret = 0;
 
 	spin_lock(&o2hb_live_lock);
 
-	reg = o2hb_find_region(region_uuid);
-
-	spin_unlock(&o2hb_live_lock);
-
-	if (reg) {
-		o2nm_undepend_item(&reg->hr_item);
-		o2nm_undepend_this_node();
+	/* local heartbeat */
+	if (!o2hb_global_heartbeat_active()) {
+	    ret = o2hb_region_pin(region_uuid);
+	    goto unlock;
 	}
+
+	/*
+	 * if global heartbeat active and this is the first dependent user,
+	 * pin all regions if quorum region count <= CUT_OFF
+	 */
+	o2hb_dependent_users++;
+	if (o2hb_dependent_users > 1)
+		goto unlock;
+
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+		ret = o2hb_region_pin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
+	return ret;
+}
+
+void o2hb_region_dec_user(const char *region_uuid)
+{
+	spin_lock(&o2hb_live_lock);
+
+	/* local heartbeat */
+	if (!o2hb_global_heartbeat_active()) {
+	    o2hb_region_unpin(region_uuid);
+	    goto unlock;
+	}
+
+	/*
+	 * if global heartbeat active and there are no dependent users,
+	 * unpin all quorum regions
+	 */
+	o2hb_dependent_users--;
+	if (!o2hb_dependent_users)
+		o2hb_region_unpin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
 }
 
 int o2hb_register_callback(const char *region_uuid,
@@ -2292,9 +2459,11 @@
 	}
 
 	if (region_uuid) {
-		ret = o2hb_region_get(region_uuid);
-		if (ret)
+		ret = o2hb_region_inc_user(region_uuid);
+		if (ret) {
+			mlog_errno(ret);
 			goto out;
+		}
 	}
 
 	down_write(&o2hb_callback_sem);
@@ -2312,7 +2481,7 @@
 	up_write(&o2hb_callback_sem);
 	ret = 0;
 out:
-	mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n",
+	mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
 	     ret, __builtin_return_address(0), hc);
 	return ret;
 }
@@ -2323,7 +2492,7 @@
 {
 	BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
 
-	mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n",
+	mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
 	     __builtin_return_address(0), hc);
 
 	/* XXX Can this happen _with_ a region reference? */
@@ -2331,7 +2500,7 @@
 		return;
 
 	if (region_uuid)
-		o2hb_region_put(region_uuid);
+		o2hb_region_dec_user(region_uuid);
 
 	down_write(&o2hb_callback_sem);
 
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index a3f150e..3a58359 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -46,10 +46,15 @@
 #define O2NET_DEBUG_DIR		"o2net"
 #define SC_DEBUG_NAME		"sock_containers"
 #define NST_DEBUG_NAME		"send_tracking"
+#define STATS_DEBUG_NAME	"stats"
+
+#define SHOW_SOCK_CONTAINERS	0
+#define SHOW_SOCK_STATS		1
 
 static struct dentry *o2net_dentry;
 static struct dentry *sc_dentry;
 static struct dentry *nst_dentry;
+static struct dentry *stats_dentry;
 
 static DEFINE_SPINLOCK(o2net_debug_lock);
 
@@ -123,37 +128,42 @@
 static int nst_seq_show(struct seq_file *seq, void *v)
 {
 	struct o2net_send_tracking *nst, *dummy_nst = seq->private;
+	ktime_t now;
+	s64 sock, send, status;
 
 	spin_lock(&o2net_debug_lock);
 	nst = next_nst(dummy_nst);
+	if (!nst)
+		goto out;
 
-	if (nst != NULL) {
-		/* get_task_comm isn't exported.  oh well. */
-		seq_printf(seq, "%p:\n"
-			   "  pid:          %lu\n"
-			   "  tgid:         %lu\n"
-			   "  process name: %s\n"
-			   "  node:         %u\n"
-			   "  sc:           %p\n"
-			   "  message id:   %d\n"
-			   "  message type: %u\n"
-			   "  message key:  0x%08x\n"
-			   "  sock acquiry: %lu.%ld\n"
-			   "  send start:   %lu.%ld\n"
-			   "  wait start:   %lu.%ld\n",
-			   nst, (unsigned long)nst->st_task->pid,
-			   (unsigned long)nst->st_task->tgid,
-			   nst->st_task->comm, nst->st_node,
-			   nst->st_sc, nst->st_id, nst->st_msg_type,
-			   nst->st_msg_key,
-			   nst->st_sock_time.tv_sec,
-			   (long)nst->st_sock_time.tv_usec,
-			   nst->st_send_time.tv_sec,
-			   (long)nst->st_send_time.tv_usec,
-			   nst->st_status_time.tv_sec,
-			   (long)nst->st_status_time.tv_usec);
-	}
+	now = ktime_get();
+	sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
+	send = ktime_to_us(ktime_sub(now, nst->st_send_time));
+	status = ktime_to_us(ktime_sub(now, nst->st_status_time));
 
+	/* get_task_comm isn't exported.  oh well. */
+	seq_printf(seq, "%p:\n"
+		   "  pid:          %lu\n"
+		   "  tgid:         %lu\n"
+		   "  process name: %s\n"
+		   "  node:         %u\n"
+		   "  sc:           %p\n"
+		   "  message id:   %d\n"
+		   "  message type: %u\n"
+		   "  message key:  0x%08x\n"
+		   "  sock acquiry: %lld usecs ago\n"
+		   "  send start:   %lld usecs ago\n"
+		   "  wait start:   %lld usecs ago\n",
+		   nst, (unsigned long)task_pid_nr(nst->st_task),
+		   (unsigned long)nst->st_task->tgid,
+		   nst->st_task->comm, nst->st_node,
+		   nst->st_sc, nst->st_id, nst->st_msg_type,
+		   nst->st_msg_key,
+		   (long long)sock,
+		   (long long)send,
+		   (long long)status);
+
+out:
 	spin_unlock(&o2net_debug_lock);
 
 	return 0;
@@ -228,6 +238,11 @@
 	spin_unlock(&o2net_debug_lock);
 }
 
+struct o2net_sock_debug {
+	int dbg_ctxt;
+	struct o2net_sock_container *dbg_sock;
+};
+
 static struct o2net_sock_container
 			*next_sc(struct o2net_sock_container *sc_start)
 {
@@ -253,7 +268,8 @@
 
 static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
@@ -264,7 +280,8 @@
 
 static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
@@ -276,65 +293,107 @@
 	return sc; /* unused, just needs to be null when done */
 }
 
-#define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec
+#ifdef CONFIG_OCFS2_FS_STATS
+# define sc_send_count(_s)		((_s)->sc_send_count)
+# define sc_recv_count(_s)		((_s)->sc_recv_count)
+# define sc_tv_acquiry_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_acquiry_total))
+# define sc_tv_send_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_send_total))
+# define sc_tv_status_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_status_total))
+# define sc_tv_process_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_process_total))
+#else
+# define sc_send_count(_s)		(0U)
+# define sc_recv_count(_s)		(0U)
+# define sc_tv_acquiry_total_ns(_s)	(0LL)
+# define sc_tv_send_total_ns(_s)	(0LL)
+# define sc_tv_status_total_ns(_s)	(0LL)
+# define sc_tv_process_total_ns(_s)	(0LL)
+#endif
+
+/* So that debugfs.ocfs2 can determine which format is being used */
+#define O2NET_STATS_STR_VERSION		1
+static void sc_show_sock_stats(struct seq_file *seq,
+			       struct o2net_sock_container *sc)
+{
+	if (!sc)
+		return;
+
+	seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
+		   sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
+		   (long long)sc_tv_acquiry_total_ns(sc),
+		   (long long)sc_tv_send_total_ns(sc),
+		   (long long)sc_tv_status_total_ns(sc),
+		   (unsigned long)sc_recv_count(sc),
+		   (long long)sc_tv_process_total_ns(sc));
+}
+
+static void sc_show_sock_container(struct seq_file *seq,
+				   struct o2net_sock_container *sc)
+{
+	struct inet_sock *inet = NULL;
+	__be32 saddr = 0, daddr = 0;
+	__be16 sport = 0, dport = 0;
+
+	if (!sc)
+		return;
+
+	if (sc->sc_sock) {
+		inet = inet_sk(sc->sc_sock->sk);
+		/* the stack's structs aren't sparse endian clean */
+		saddr = (__force __be32)inet->inet_saddr;
+		daddr = (__force __be32)inet->inet_daddr;
+		sport = (__force __be16)inet->inet_sport;
+		dport = (__force __be16)inet->inet_dport;
+	}
+
+	/* XXX sigh, inet-> doesn't have sparse annotation so any
+	 * use of it here generates a warning with -Wbitwise */
+	seq_printf(seq, "%p:\n"
+		   "  krefs:           %d\n"
+		   "  sock:            %pI4:%u -> "
+				      "%pI4:%u\n"
+		   "  remote node:     %s\n"
+		   "  page off:        %zu\n"
+		   "  handshake ok:    %u\n"
+		   "  timer:           %lld usecs\n"
+		   "  data ready:      %lld usecs\n"
+		   "  advance start:   %lld usecs\n"
+		   "  advance stop:    %lld usecs\n"
+		   "  func start:      %lld usecs\n"
+		   "  func stop:       %lld usecs\n"
+		   "  func key:        0x%08x\n"
+		   "  func type:       %u\n",
+		   sc,
+		   atomic_read(&sc->sc_kref.refcount),
+		   &saddr, inet ? ntohs(sport) : 0,
+		   &daddr, inet ? ntohs(dport) : 0,
+		   sc->sc_node->nd_name,
+		   sc->sc_page_off,
+		   sc->sc_handshake_ok,
+		   (long long)ktime_to_us(sc->sc_tv_timer),
+		   (long long)ktime_to_us(sc->sc_tv_data_ready),
+		   (long long)ktime_to_us(sc->sc_tv_advance_start),
+		   (long long)ktime_to_us(sc->sc_tv_advance_stop),
+		   (long long)ktime_to_us(sc->sc_tv_func_start),
+		   (long long)ktime_to_us(sc->sc_tv_func_stop),
+		   sc->sc_msg_key,
+		   sc->sc_msg_type);
+}
 
 static int sc_seq_show(struct seq_file *seq, void *v)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
 
-	if (sc != NULL) {
-		struct inet_sock *inet = NULL;
-
-		__be32 saddr = 0, daddr = 0;
-		__be16 sport = 0, dport = 0;
-
-		if (sc->sc_sock) {
-			inet = inet_sk(sc->sc_sock->sk);
-			/* the stack's structs aren't sparse endian clean */
-			saddr = (__force __be32)inet->inet_saddr;
-			daddr = (__force __be32)inet->inet_daddr;
-			sport = (__force __be16)inet->inet_sport;
-			dport = (__force __be16)inet->inet_dport;
-		}
-
-		/* XXX sigh, inet-> doesn't have sparse annotation so any
-		 * use of it here generates a warning with -Wbitwise */
-		seq_printf(seq, "%p:\n"
-			   "  krefs:           %d\n"
-			   "  sock:            %pI4:%u -> "
-					      "%pI4:%u\n"
-			   "  remote node:     %s\n"
-			   "  page off:        %zu\n"
-			   "  handshake ok:    %u\n"
-			   "  timer:           %lu.%ld\n"
-			   "  data ready:      %lu.%ld\n"
-			   "  advance start:   %lu.%ld\n"
-			   "  advance stop:    %lu.%ld\n"
-			   "  func start:      %lu.%ld\n"
-			   "  func stop:       %lu.%ld\n"
-			   "  func key:        %u\n"
-			   "  func type:       %u\n",
-			   sc,
-			   atomic_read(&sc->sc_kref.refcount),
-			   &saddr, inet ? ntohs(sport) : 0,
-			   &daddr, inet ? ntohs(dport) : 0,
-			   sc->sc_node->nd_name,
-			   sc->sc_page_off,
-			   sc->sc_handshake_ok,
-			   TV_SEC_USEC(sc->sc_tv_timer),
-			   TV_SEC_USEC(sc->sc_tv_data_ready),
-			   TV_SEC_USEC(sc->sc_tv_advance_start),
-			   TV_SEC_USEC(sc->sc_tv_advance_stop),
-			   TV_SEC_USEC(sc->sc_tv_func_start),
-			   TV_SEC_USEC(sc->sc_tv_func_stop),
-			   sc->sc_msg_key,
-			   sc->sc_msg_type);
+	if (sc) {
+		if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
+			sc_show_sock_container(seq, sc);
+		else
+			sc_show_sock_stats(seq, sc);
 	}
 
-
 	spin_unlock(&o2net_debug_lock);
 
 	return 0;
@@ -351,7 +410,7 @@
 	.show = sc_seq_show,
 };
 
-static int sc_fop_open(struct inode *inode, struct file *file)
+static int sc_common_open(struct file *file, struct o2net_sock_debug *sd)
 {
 	struct o2net_sock_container *dummy_sc;
 	struct seq_file *seq;
@@ -369,7 +428,8 @@
 		goto out;
 
 	seq = file->private_data;
-	seq->private = dummy_sc;
+	seq->private = sd;
+	sd->dbg_sock = dummy_sc;
 	o2net_debug_add_sc(dummy_sc);
 
 	dummy_sc = NULL;
@@ -382,12 +442,48 @@
 static int sc_fop_release(struct inode *inode, struct file *file)
 {
 	struct seq_file *seq = file->private_data;
-	struct o2net_sock_container *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *dummy_sc = sd->dbg_sock;
 
 	o2net_debug_del_sc(dummy_sc);
 	return seq_release_private(inode, file);
 }
 
+static int stats_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_sock_debug *sd;
+
+	sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+	if (sd == NULL)
+		return -ENOMEM;
+
+	sd->dbg_ctxt = SHOW_SOCK_STATS;
+	sd->dbg_sock = NULL;
+
+	return sc_common_open(file, sd);
+}
+
+static const struct file_operations stats_seq_fops = {
+	.open = stats_fop_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = sc_fop_release,
+};
+
+static int sc_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_sock_debug *sd;
+
+	sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+	if (sd == NULL)
+		return -ENOMEM;
+
+	sd->dbg_ctxt = SHOW_SOCK_CONTAINERS;
+	sd->dbg_sock = NULL;
+
+	return sc_common_open(file, sd);
+}
+
 static const struct file_operations sc_seq_fops = {
 	.open = sc_fop_open,
 	.read = seq_read,
@@ -419,25 +515,29 @@
 		goto bail;
 	}
 
+	stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
+					   o2net_dentry, NULL,
+					   &stats_seq_fops);
+	if (!stats_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
 	return 0;
 bail:
-	if (sc_dentry)
-		debugfs_remove(sc_dentry);
-	if (nst_dentry)
-		debugfs_remove(nst_dentry);
-	if (o2net_dentry)
-		debugfs_remove(o2net_dentry);
+	debugfs_remove(stats_dentry);
+	debugfs_remove(sc_dentry);
+	debugfs_remove(nst_dentry);
+	debugfs_remove(o2net_dentry);
 	return -ENOMEM;
 }
 
 void o2net_debugfs_exit(void)
 {
-	if (sc_dentry)
-		debugfs_remove(sc_dentry);
-	if (nst_dentry)
-		debugfs_remove(nst_dentry);
-	if (o2net_dentry)
-		debugfs_remove(o2net_dentry);
+	debugfs_remove(stats_dentry);
+	debugfs_remove(sc_dentry);
+	debugfs_remove(nst_dentry);
+	debugfs_remove(o2net_dentry);
 }
 
 #endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index cf3e166..a873667 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -325,5 +325,7 @@
 
 void o2quo_exit(void)
 {
-	flush_scheduled_work();
+	struct o2quo_state *qs = &o2quo_state;
+
+	flush_work_sync(&qs->qs_work);
 }
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 9aa426e..3b11cb1 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -153,63 +153,114 @@
 	nst->st_node = node;
 }
 
-static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_sock_time);
-}
-
-static void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_send_time);
-}
-
-static void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_status_time);
-}
-
-static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
-					 struct o2net_sock_container *sc)
-{
-	nst->st_sc = sc;
-}
-
-static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id)
-{
-	nst->st_id = msg_id;
-}
-
-#else  /* CONFIG_DEBUG_FS */
-
-static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
-				  u32 msgkey, struct task_struct *task, u8 node)
-{
-}
-
 static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
 {
+	nst->st_sock_time = ktime_get();
 }
 
 static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
 {
+	nst->st_send_time = ktime_get();
 }
 
 static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
 {
+	nst->st_status_time = ktime_get();
 }
 
 static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
 						struct o2net_sock_container *sc)
 {
+	nst->st_sc = sc;
 }
 
 static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
 					u32 msg_id)
 {
+	nst->st_id = msg_id;
 }
 
+static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_timer = ktime_get();
+}
+
+static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_data_ready = ktime_get();
+}
+
+static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_advance_start = ktime_get();
+}
+
+static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_advance_stop = ktime_get();
+}
+
+static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_func_start = ktime_get();
+}
+
+static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_func_stop = ktime_get();
+}
+
+static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
+{
+	return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+#else  /* CONFIG_DEBUG_FS */
+# define o2net_init_nst(a, b, c, d, e)
+# define o2net_set_nst_sock_time(a)
+# define o2net_set_nst_send_time(a)
+# define o2net_set_nst_status_time(a)
+# define o2net_set_nst_sock_container(a, b)
+# define o2net_set_nst_msg_id(a, b)
+# define o2net_set_sock_timer(a)
+# define o2net_set_data_ready_time(a)
+# define o2net_set_advance_start_time(a)
+# define o2net_set_advance_stop_time(a)
+# define o2net_set_func_start_time(a)
+# define o2net_set_func_stop_time(a)
+# define o2net_get_func_run_time(a)		(ktime_t)0
 #endif /* CONFIG_DEBUG_FS */
 
+#ifdef CONFIG_OCFS2_FS_STATS
+static void o2net_update_send_stats(struct o2net_send_tracking *nst,
+				    struct o2net_sock_container *sc)
+{
+	sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
+					   ktime_sub(ktime_get(),
+						     nst->st_status_time));
+	sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
+					 ktime_sub(nst->st_status_time,
+						   nst->st_send_time));
+	sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
+					    ktime_sub(nst->st_send_time,
+						      nst->st_sock_time));
+	sc->sc_send_count++;
+}
+
+static void o2net_update_recv_stats(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
+					    o2net_get_func_run_time(sc));
+	sc->sc_recv_count++;
+}
+
+#else
+
+# define o2net_update_send_stats(a, b)
+
+# define o2net_update_recv_stats(sc)
+
+#endif /* CONFIG_OCFS2_FS_STATS */
+
 static inline int o2net_reconnect_delay(void)
 {
 	return o2nm_single_cluster->cl_reconnect_delay_ms;
@@ -355,6 +406,7 @@
 		sc->sc_sock = NULL;
 	}
 
+	o2nm_undepend_item(&sc->sc_node->nd_item);
 	o2nm_node_put(sc->sc_node);
 	sc->sc_node = NULL;
 
@@ -376,6 +428,7 @@
 {
 	struct o2net_sock_container *sc, *ret = NULL;
 	struct page *page = NULL;
+	int status = 0;
 
 	page = alloc_page(GFP_NOFS);
 	sc = kzalloc(sizeof(*sc), GFP_NOFS);
@@ -386,6 +439,13 @@
 	o2nm_node_get(node);
 	sc->sc_node = node;
 
+	/* pin the node item of the remote node */
+	status = o2nm_depend_item(&node->nd_item);
+	if (status) {
+		mlog_errno(status);
+		o2nm_node_put(node);
+		goto out;
+	}
 	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
 	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
 	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
@@ -546,7 +606,7 @@
 	if (sk->sk_user_data) {
 		struct o2net_sock_container *sc = sk->sk_user_data;
 		sclog(sc, "data_ready hit\n");
-		do_gettimeofday(&sc->sc_tv_data_ready);
+		o2net_set_data_ready_time(sc);
 		o2net_sc_queue_work(sc, &sc->sc_rx_work);
 		ready = sc->sc_data_ready;
 	} else {
@@ -1070,6 +1130,8 @@
 	o2net_set_nst_status_time(&nst);
 	wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
 
+	o2net_update_send_stats(&nst, sc);
+
 	/* Note that we avoid overwriting the callers status return
 	 * variable if a system error was reported on the other
 	 * side. Callers beware. */
@@ -1183,13 +1245,15 @@
 	if (syserr != O2NET_ERR_NONE)
 		goto out_respond;
 
-	do_gettimeofday(&sc->sc_tv_func_start);
+	o2net_set_func_start_time(sc);
 	sc->sc_msg_key = be32_to_cpu(hdr->key);
 	sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
 	handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
 					     be16_to_cpu(hdr->data_len),
 					nmh->nh_func_data, &ret_data);
-	do_gettimeofday(&sc->sc_tv_func_stop);
+	o2net_set_func_stop_time(sc);
+
+	o2net_update_recv_stats(sc);
 
 out_respond:
 	/* this destroys the hdr, so don't use it after this */
@@ -1300,7 +1364,7 @@
 	size_t datalen;
 
 	sclog(sc, "receiving\n");
-	do_gettimeofday(&sc->sc_tv_advance_start);
+	o2net_set_advance_start_time(sc);
 
 	if (unlikely(sc->sc_handshake_ok == 0)) {
 		if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
@@ -1375,7 +1439,7 @@
 
 out:
 	sclog(sc, "ret = %d\n", ret);
-	do_gettimeofday(&sc->sc_tv_advance_stop);
+	o2net_set_advance_stop_time(sc);
 	return ret;
 }
 
@@ -1475,27 +1539,28 @@
 {
 	struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-	struct timeval now;
 
-	do_gettimeofday(&now);
+#ifdef CONFIG_DEBUG_FS
+	ktime_t now = ktime_get();
+#endif
 
 	printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
 	     "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
 		     o2net_idle_timeout() / 1000,
 		     o2net_idle_timeout() % 1000);
-	mlog(ML_NOTICE, "here are some times that might help debug the "
-	     "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
-	     "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
-	     sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
-	     now.tv_sec, (long) now.tv_usec,
-	     sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
-	     sc->sc_tv_advance_start.tv_sec,
-	     (long) sc->sc_tv_advance_start.tv_usec,
-	     sc->sc_tv_advance_stop.tv_sec,
-	     (long) sc->sc_tv_advance_stop.tv_usec,
+
+#ifdef CONFIG_DEBUG_FS
+	mlog(ML_NOTICE, "Here are some times that might help debug the "
+	     "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
+	     "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
+	     (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
+	     (long long)ktime_to_us(sc->sc_tv_data_ready),
+	     (long long)ktime_to_us(sc->sc_tv_advance_start),
+	     (long long)ktime_to_us(sc->sc_tv_advance_stop),
 	     sc->sc_msg_key, sc->sc_msg_type,
-	     sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec,
-	     sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec);
+	     (long long)ktime_to_us(sc->sc_tv_func_start),
+	     (long long)ktime_to_us(sc->sc_tv_func_stop));
+#endif
 
 	/*
 	 * Initialize the nn_timeout so that the next connection attempt
@@ -1511,7 +1576,7 @@
 	o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
 	o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
 		      msecs_to_jiffies(o2net_keepalive_delay()));
-	do_gettimeofday(&sc->sc_tv_timer);
+	o2net_set_sock_timer(sc);
 	mod_timer(&sc->sc_idle_timeout,
 	       jiffies + msecs_to_jiffies(o2net_idle_timeout()));
 }
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 15fdbdf..4cbcb65 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -166,18 +166,27 @@
 	/* original handlers for the sockets */
 	void			(*sc_state_change)(struct sock *sk);
 	void			(*sc_data_ready)(struct sock *sk, int bytes);
-#ifdef CONFIG_DEBUG_FS
-	struct list_head        sc_net_debug_item;
-#endif
-	struct timeval 		sc_tv_timer;
-	struct timeval 		sc_tv_data_ready;
-	struct timeval 		sc_tv_advance_start;
-	struct timeval 		sc_tv_advance_stop;
-	struct timeval 		sc_tv_func_start;
-	struct timeval 		sc_tv_func_stop;
+
 	u32			sc_msg_key;
 	u16			sc_msg_type;
 
+#ifdef CONFIG_DEBUG_FS
+	struct list_head        sc_net_debug_item;
+	ktime_t			sc_tv_timer;
+	ktime_t			sc_tv_data_ready;
+	ktime_t			sc_tv_advance_start;
+	ktime_t			sc_tv_advance_stop;
+	ktime_t			sc_tv_func_start;
+	ktime_t			sc_tv_func_stop;
+#endif
+#ifdef CONFIG_OCFS2_FS_STATS
+	ktime_t			sc_tv_acquiry_total;
+	ktime_t			sc_tv_send_total;
+	ktime_t			sc_tv_status_total;
+	u32			sc_send_count;
+	u32			sc_recv_count;
+	ktime_t			sc_tv_process_total;
+#endif
 	struct mutex		sc_send_lock;
 };
 
@@ -220,9 +229,9 @@
 	u32				st_msg_type;
 	u32				st_msg_key;
 	u8				st_node;
-	struct timeval			st_sock_time;
-	struct timeval			st_send_time;
-	struct timeval			st_status_time;
+	ktime_t				st_sock_time;
+	ktime_t				st_send_time;
+	ktime_t				st_status_time;
 };
 #else
 struct o2net_send_tracking {
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 895532ac..6d80ecc 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -52,9 +52,15 @@
 static int ocfs2_dentry_revalidate(struct dentry *dentry,
 				   struct nameidata *nd)
 {
-	struct inode *inode = dentry->d_inode;
+	struct inode *inode;
 	int ret = 0;    /* if all else fails, just return false */
-	struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
+	struct ocfs2_super *osb;
+
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = dentry->d_inode;
+	osb = OCFS2_SB(dentry->d_sb);
 
 	mlog_entry("(0x%p, '%.*s')\n", dentry,
 		   dentry->d_name.len, dentry->d_name.name);
@@ -169,23 +175,25 @@
 	struct list_head *p;
 	struct dentry *dentry = NULL;
 
-	spin_lock(&dcache_lock);
-
+	spin_lock(&inode->i_lock);
 	list_for_each(p, &inode->i_dentry) {
 		dentry = list_entry(p, struct dentry, d_alias);
 
+		spin_lock(&dentry->d_lock);
 		if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
 			mlog(0, "dentry found: %.*s\n",
 			     dentry->d_name.len, dentry->d_name.name);
 
-			dget_locked(dentry);
+			dget_dlock(dentry);
+			spin_unlock(&dentry->d_lock);
 			break;
 		}
+		spin_unlock(&dentry->d_lock);
 
 		dentry = NULL;
 	}
 
-	spin_unlock(&dcache_lock);
+	spin_unlock(&inode->i_lock);
 
 	return dentry;
 }
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index f449991..3a3ed4b 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -90,19 +90,29 @@
 
 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
+	struct dlm_lock_resource *res;
 
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
+	res = lock->lockres;
+
 	assert_spin_locked(&dlm->ast_lock);
+
 	if (!list_empty(&lock->ast_list)) {
-		mlog(ML_ERROR, "ast list not empty!!  pending=%d, newlevel=%d\n",
+		mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
+		     "AST list not empty, pending %d, newlevel %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
 		     lock->ast_pending, lock->ml.type);
 		BUG();
 	}
 	if (lock->ast_pending)
-		mlog(0, "lock has an ast getting flushed right now\n");
+		mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	/* putting lock on list, add a ref */
 	dlm_lock_get(lock);
@@ -110,9 +120,10 @@
 
 	/* check to see if this ast obsoletes the bast */
 	if (dlm_should_cancel_bast(dlm, lock)) {
-		struct dlm_lock_resource *res = lock->lockres;
-		mlog(0, "%s: cancelling bast for %.*s\n",
-		     dlm->name, res->lockname.len, res->lockname.name);
+		mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 		lock->bast_pending = 0;
 		list_del_init(&lock->bast_list);
 		lock->ml.highest_blocked = LKM_IVMODE;
@@ -134,8 +145,6 @@
 
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
-
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
@@ -147,15 +156,21 @@
 
 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
+	struct dlm_lock_resource *res;
 
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
+
 	assert_spin_locked(&dlm->ast_lock);
 
+	res = lock->lockres;
+
 	BUG_ON(!list_empty(&lock->bast_list));
 	if (lock->bast_pending)
-		mlog(0, "lock has a bast getting flushed right now\n");
+		mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	/* putting lock on list, add a ref */
 	dlm_lock_get(lock);
@@ -167,8 +182,6 @@
 
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
-
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
@@ -213,7 +226,10 @@
 	dlm_astlockfunc_t *fn;
 	struct dlm_lockstatus *lksb;
 
-	mlog_entry_void();
+	mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
+	     res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	lksb = lock->lksb;
 	fn = lock->ast;
@@ -231,7 +247,10 @@
 	struct dlm_lockstatus *lksb;
 	int lksbflags;
 
-	mlog_entry_void();
+	mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
+	     res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	lksb = lock->lksb;
 	BUG_ON(lock->ml.node == dlm->node_num);
@@ -250,9 +269,14 @@
 {
 	dlm_bastlockfunc_t *fn = lock->bast;
 
-	mlog_entry_void();
 	BUG_ON(lock->ml.node != dlm->node_num);
 
+	mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
+	     dlm->name, res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+	     blocked_type);
+
 	(*fn)(lock->astdata, blocked_type);
 }
 
@@ -332,7 +356,8 @@
 	/* cannot get a proxy ast message if this node owns it */
 	BUG_ON(res->owner == dlm->node_num);
 
-	mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
+	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+	     res->lockname.name);
 
 	spin_lock(&res->spinlock);
 	if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -382,8 +407,12 @@
 	if (past->type == DLM_AST) {
 		/* do not alter lock refcount.  switching lists. */
 		list_move_tail(&lock->list, &res->granted);
-		mlog(0, "ast: Adding to granted list... type=%d, "
-		     "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
+		mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+		     lock->ml.type, lock->ml.convert_type);
+
 		if (lock->ml.convert_type != LKM_IVMODE) {
 			lock->ml.type = lock->ml.convert_type;
 			lock->ml.convert_type = LKM_IVMODE;
@@ -426,9 +455,9 @@
 	size_t veclen = 1;
 	int status;
 
-	mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
-		   res->lockname.len, res->lockname.name, lock->ml.node,
-		   msg_type, blocked_type);
+	mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
+	     res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
+	     blocked_type);
 
 	memset(&past, 0, sizeof(struct dlm_proxy_ast));
 	past.node_idx = dlm->node_num;
@@ -441,7 +470,6 @@
 	vec[0].iov_len = sizeof(struct dlm_proxy_ast);
 	vec[0].iov_base = &past;
 	if (flags & DLM_LKSB_GET_LVB) {
-		mlog(0, "returning requested LVB data\n");
 		be32_add_cpu(&past.flags, LKM_GET_LVB);
 		vec[1].iov_len = DLM_LVB_LEN;
 		vec[1].iov_base = lock->lksb->lvb;
@@ -451,8 +479,8 @@
 	ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
 				     lock->ml.node, &status);
 	if (ret < 0)
-		mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-		     "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key,
+		mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
+		     dlm->name, res->lockname.len, res->lockname.name, ret,
 		     lock->ml.node);
 	else {
 		if (status == DLM_RECOVERING) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index b36d0bf..4bdf7ba 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -50,10 +50,10 @@
 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
 
 enum dlm_mle_type {
-	DLM_MLE_BLOCK,
-	DLM_MLE_MASTER,
-	DLM_MLE_MIGRATION,
-	DLM_MLE_NUM_TYPES
+	DLM_MLE_BLOCK = 0,
+	DLM_MLE_MASTER = 1,
+	DLM_MLE_MIGRATION = 2,
+	DLM_MLE_NUM_TYPES = 3,
 };
 
 struct dlm_master_list_entry {
@@ -82,8 +82,8 @@
 
 enum dlm_ast_type {
 	DLM_AST = 0,
-	DLM_BAST,
-	DLM_ASTUNLOCK
+	DLM_BAST = 1,
+	DLM_ASTUNLOCK = 2,
 };
 
 
@@ -119,9 +119,9 @@
 
 enum dlm_ctxt_state {
 	DLM_CTXT_NEW = 0,
-	DLM_CTXT_JOINED,
-	DLM_CTXT_IN_SHUTDOWN,
-	DLM_CTXT_LEAVING,
+	DLM_CTXT_JOINED = 1,
+	DLM_CTXT_IN_SHUTDOWN = 2,
+	DLM_CTXT_LEAVING = 3,
 };
 
 struct dlm_ctxt
@@ -388,8 +388,8 @@
 
 enum dlm_lockres_list {
 	DLM_GRANTED_LIST = 0,
-	DLM_CONVERTING_LIST,
-	DLM_BLOCKED_LIST
+	DLM_CONVERTING_LIST = 1,
+	DLM_BLOCKED_LIST = 2,
 };
 
 static inline int dlm_lvb_is_empty(char *lvb)
@@ -427,27 +427,27 @@
 
 
 enum {
-	DLM_MASTER_REQUEST_MSG    = 500,
-	DLM_UNUSED_MSG1,         /* 501 */
-	DLM_ASSERT_MASTER_MSG,	 /* 502 */
-	DLM_CREATE_LOCK_MSG,	 /* 503 */
-	DLM_CONVERT_LOCK_MSG,	 /* 504 */
-	DLM_PROXY_AST_MSG,	 /* 505 */
-	DLM_UNLOCK_LOCK_MSG,	 /* 506 */
-	DLM_DEREF_LOCKRES_MSG,	 /* 507 */
-	DLM_MIGRATE_REQUEST_MSG, /* 508 */
-	DLM_MIG_LOCKRES_MSG, 	 /* 509 */
-	DLM_QUERY_JOIN_MSG,	 /* 510 */
-	DLM_ASSERT_JOINED_MSG,	 /* 511 */
-	DLM_CANCEL_JOIN_MSG,	 /* 512 */
-	DLM_EXIT_DOMAIN_MSG,	 /* 513 */
-	DLM_MASTER_REQUERY_MSG,	 /* 514 */
-	DLM_LOCK_REQUEST_MSG,	 /* 515 */
-	DLM_RECO_DATA_DONE_MSG,	 /* 516 */
-	DLM_BEGIN_RECO_MSG,	 /* 517 */
-	DLM_FINALIZE_RECO_MSG,	 /* 518 */
-	DLM_QUERY_REGION,	 /* 519 */
-	DLM_QUERY_NODEINFO,	 /* 520 */
+	DLM_MASTER_REQUEST_MSG		= 500,
+	DLM_UNUSED_MSG1			= 501,
+	DLM_ASSERT_MASTER_MSG		= 502,
+	DLM_CREATE_LOCK_MSG		= 503,
+	DLM_CONVERT_LOCK_MSG		= 504,
+	DLM_PROXY_AST_MSG		= 505,
+	DLM_UNLOCK_LOCK_MSG		= 506,
+	DLM_DEREF_LOCKRES_MSG		= 507,
+	DLM_MIGRATE_REQUEST_MSG		= 508,
+	DLM_MIG_LOCKRES_MSG		= 509,
+	DLM_QUERY_JOIN_MSG		= 510,
+	DLM_ASSERT_JOINED_MSG		= 511,
+	DLM_CANCEL_JOIN_MSG		= 512,
+	DLM_EXIT_DOMAIN_MSG		= 513,
+	DLM_MASTER_REQUERY_MSG		= 514,
+	DLM_LOCK_REQUEST_MSG		= 515,
+	DLM_RECO_DATA_DONE_MSG		= 516,
+	DLM_BEGIN_RECO_MSG		= 517,
+	DLM_FINALIZE_RECO_MSG		= 518,
+	DLM_QUERY_REGION		= 519,
+	DLM_QUERY_NODEINFO		= 520,
 };
 
 struct dlm_reco_node_data
@@ -460,19 +460,19 @@
 enum {
 	DLM_RECO_NODE_DATA_DEAD = -1,
 	DLM_RECO_NODE_DATA_INIT = 0,
-	DLM_RECO_NODE_DATA_REQUESTING,
-	DLM_RECO_NODE_DATA_REQUESTED,
-	DLM_RECO_NODE_DATA_RECEIVING,
-	DLM_RECO_NODE_DATA_DONE,
-	DLM_RECO_NODE_DATA_FINALIZE_SENT,
+	DLM_RECO_NODE_DATA_REQUESTING = 1,
+	DLM_RECO_NODE_DATA_REQUESTED = 2,
+	DLM_RECO_NODE_DATA_RECEIVING = 3,
+	DLM_RECO_NODE_DATA_DONE = 4,
+	DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
 };
 
 
 enum {
 	DLM_MASTER_RESP_NO = 0,
-	DLM_MASTER_RESP_YES,
-	DLM_MASTER_RESP_MAYBE,
-	DLM_MASTER_RESP_ERROR
+	DLM_MASTER_RESP_YES = 1,
+	DLM_MASTER_RESP_MAYBE = 2,
+	DLM_MASTER_RESP_ERROR = 3,
 };
 
 
@@ -649,9 +649,9 @@
 #define DLM_MOD_KEY (0x666c6172)
 enum dlm_query_join_response_code {
 	JOIN_DISALLOW = 0,
-	JOIN_OK,
-	JOIN_OK_NO_MAP,
-	JOIN_PROTOCOL_MISMATCH,
+	JOIN_OK = 1,
+	JOIN_OK_NO_MAP = 2,
+	JOIN_PROTOCOL_MISMATCH = 3,
 };
 
 struct dlm_query_join_packet {
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 272ec86..04a32be 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -370,92 +370,46 @@
 	kref_get(&dc->debug_refcnt);
 }
 
-static struct debug_buffer *debug_buffer_allocate(void)
+static int debug_release(struct inode *inode, struct file *file)
 {
-	struct debug_buffer *db = NULL;
-
-	db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
-	if (!db)
-		goto bail;
-
-	db->len = PAGE_SIZE;
-	db->buf = kmalloc(db->len, GFP_KERNEL);
-	if (!db->buf)
-		goto bail;
-
-	return db;
-bail:
-	kfree(db);
-	return NULL;
-}
-
-static ssize_t debug_buffer_read(struct file *file, char __user *buf,
-				 size_t nbytes, loff_t *ppos)
-{
-	struct debug_buffer *db = file->private_data;
-
-	return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len);
-}
-
-static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence)
-{
-	struct debug_buffer *db = file->private_data;
-	loff_t new = -1;
-
-	switch (whence) {
-	case 0:
-		new = off;
-		break;
-	case 1:
-		new = file->f_pos + off;
-		break;
-	}
-
-	if (new < 0 || new > db->len)
-		return -EINVAL;
-
-	return (file->f_pos = new);
-}
-
-static int debug_buffer_release(struct inode *inode, struct file *file)
-{
-	struct debug_buffer *db = file->private_data;
-
-	if (db)
-		kfree(db->buf);
-	kfree(db);
-
+	free_page((unsigned long)file->private_data);
 	return 0;
 }
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+			  size_t nbytes, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+				       i_size_read(file->f_mapping->host));
+}
 /* end - util funcs */
 
 /* begin - purge list funcs */
-static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	struct dlm_lock_resource *res;
 	int out = 0;
 	unsigned long total = 0;
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dumping Purgelist for Domain: %s\n", dlm->name);
 
 	spin_lock(&dlm->spinlock);
 	list_for_each_entry(res, &dlm->purge_list, purge) {
 		++total;
-		if (db->len - out < 100)
+		if (len - out < 100)
 			continue;
 		spin_lock(&res->spinlock);
 		out += stringify_lockname(res->lockname.name,
 					  res->lockname.len,
-					  db->buf + out, db->len - out);
-		out += snprintf(db->buf + out, db->len - out, "\t%ld\n",
+					  buf + out, len - out);
+		out += snprintf(buf + out, len - out, "\t%ld\n",
 				(jiffies - res->last_used)/HZ);
 		spin_unlock(&res->spinlock);
 	}
 	spin_unlock(&dlm->spinlock);
 
-	out += snprintf(db->buf + out, db->len - out,
-			"Total on list: %ld\n", total);
+	out += snprintf(buf + out, len - out, "Total on list: %ld\n", total);
 
 	return out;
 }
@@ -463,15 +417,15 @@
 static int debug_purgelist_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_purgelist_print(dlm, db);
+	i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -480,14 +434,14 @@
 
 static const struct file_operations debug_purgelist_fops = {
 	.open =		debug_purgelist_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 /* end - purge list funcs */
 
 /* begin - debug mle funcs */
-static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	struct dlm_master_list_entry *mle;
 	struct hlist_head *bucket;
@@ -495,7 +449,7 @@
 	int i, out = 0;
 	unsigned long total = 0, longest = 0, bucket_count = 0;
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dumping MLEs for Domain: %s\n", dlm->name);
 
 	spin_lock(&dlm->master_lock);
@@ -506,16 +460,16 @@
 					  master_hash_node);
 			++total;
 			++bucket_count;
-			if (db->len - out < 200)
+			if (len - out < 200)
 				continue;
-			out += dump_mle(mle, db->buf + out, db->len - out);
+			out += dump_mle(mle, buf + out, len - out);
 		}
 		longest = max(longest, bucket_count);
 		bucket_count = 0;
 	}
 	spin_unlock(&dlm->master_lock);
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Total: %ld, Longest: %ld\n", total, longest);
 	return out;
 }
@@ -523,15 +477,15 @@
 static int debug_mle_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_mle_print(dlm, db);
+	i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -540,9 +494,9 @@
 
 static const struct file_operations debug_mle_fops = {
 	.open =		debug_mle_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 
 /* end - debug mle funcs */
@@ -757,7 +711,7 @@
 /* end - debug lockres funcs */
 
 /* begin - debug state funcs */
-static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	int out = 0;
 	struct dlm_reco_node_data *node;
@@ -781,35 +735,35 @@
 	}
 
 	/* Domain: xxxxxxxxxx  Key: 0xdfbac769 */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Domain: %s  Key: 0x%08x  Protocol: %d.%d\n",
 			dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
 			dlm->dlm_locking_proto.pv_minor);
 
 	/* Thread Pid: xxx  Node: xxx  State: xxxxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Thread Pid: %d  Node: %d  State: %s\n",
-			dlm->dlm_thread_task->pid, dlm->node_num, state);
+			task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
 
 	/* Number of Joins: xxx  Joining Node: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Number of Joins: %d  Joining Node: %d\n",
 			dlm->num_joins, dlm->joining_node);
 
 	/* Domain Map: xx xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Domain Map: ");
+	out += snprintf(buf + out, len - out, "Domain Map: ");
 	out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Live Map: xx xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Live Map: ");
+	out += snprintf(buf + out, len - out, "Live Map: ");
 	out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Lock Resources: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Lock Resources: %d (%d)\n",
 			atomic_read(&dlm->res_cur_count),
 			atomic_read(&dlm->res_tot_count));
@@ -821,29 +775,29 @@
 		cur_mles += atomic_read(&dlm->mle_cur_count[i]);
 
 	/* MLEs: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"MLEs: %d (%d)\n", cur_mles, tot_mles);
 
 	/*  Blocking: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Blocking: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
 
 	/*  Mastery: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Mastery: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
 
 	/*  Migration: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Migration: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
 
 	/* Lists: Dirty=Empty  Purge=InUse  PendingASTs=Empty  ... */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Lists: Dirty=%s  Purge=%s  PendingASTs=%s  "
 			"PendingBASTs=%s\n",
 			(list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
@@ -852,12 +806,12 @@
 			(list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
 
 	/* Purge Count: xxx  Refs: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Purge Count: %d  Refs: %d\n", dlm->purge_count,
 			atomic_read(&dlm->dlm_refs.refcount));
 
 	/* Dead Node: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dead Node: %d\n", dlm->reco.dead_node);
 
 	/* What about DLM_RECO_STATE_FINALIZE? */
@@ -867,19 +821,19 @@
 		state = "INACTIVE";
 
 	/* Recovery Pid: xxxx  Master: xxx  State: xxxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Recovery Pid: %d  Master: %d  State: %s\n",
-			dlm->dlm_reco_thread_task->pid,
+			task_pid_nr(dlm->dlm_reco_thread_task),
 			dlm->reco.new_master, state);
 
 	/* Recovery Map: xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Recovery Map: ");
+	out += snprintf(buf + out, len - out, "Recovery Map: ");
 	out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Recovery Node State: */
-	out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n");
+	out += snprintf(buf + out, len - out, "Recovery Node State:\n");
 	list_for_each_entry(node, &dlm->reco.node_data, list) {
 		switch (node->state) {
 		case DLM_RECO_NODE_DATA_INIT:
@@ -907,7 +861,7 @@
 			state = "BAD";
 			break;
 		}
-		out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n",
+		out += snprintf(buf + out, len - out, "\t%u - %s\n",
 				node->node_num, state);
 	}
 
@@ -919,15 +873,15 @@
 static int debug_state_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db = NULL;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_state_print(dlm, db);
+	i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -936,9 +890,9 @@
 
 static const struct file_operations debug_state_fops = {
 	.open =		debug_state_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 /* end  - debug state funcs */
 
@@ -1002,14 +956,10 @@
 	struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
 
 	if (dc) {
-		if (dc->debug_purgelist_dentry)
-			debugfs_remove(dc->debug_purgelist_dentry);
-		if (dc->debug_mle_dentry)
-			debugfs_remove(dc->debug_mle_dentry);
-		if (dc->debug_lockres_dentry)
-			debugfs_remove(dc->debug_lockres_dentry);
-		if (dc->debug_state_dentry)
-			debugfs_remove(dc->debug_state_dentry);
+		debugfs_remove(dc->debug_purgelist_dentry);
+		debugfs_remove(dc->debug_mle_dentry);
+		debugfs_remove(dc->debug_lockres_dentry);
+		debugfs_remove(dc->debug_state_dentry);
 		dlm_debug_put(dc);
 	}
 }
@@ -1040,8 +990,7 @@
 
 void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
 {
-	if (dlm->dlm_debugfs_subroot)
-		debugfs_remove(dlm->dlm_debugfs_subroot);
+	debugfs_remove(dlm->dlm_debugfs_subroot);
 }
 
 /* debugfs root */
@@ -1057,7 +1006,6 @@
 
 void dlm_destroy_debugfs_root(void)
 {
-	if (dlm_debugfs_root)
-		debugfs_remove(dlm_debugfs_root);
+	debugfs_remove(dlm_debugfs_root);
 }
 #endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 8c686d2..1f27c48 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -37,11 +37,6 @@
 	struct dentry *debug_purgelist_dentry;
 };
 
-struct debug_buffer {
-	int len;
-	char *buf;
-};
-
 struct debug_lockres {
 	int dl_len;
 	char *dl_buf;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index cc2aaa9..7e38a07 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -460,8 +460,6 @@
 		}
 		cond_resched_lock(&dlm->spinlock);
 		num += n;
-		mlog(0, "%s: touched %d lockreses in bucket %d "
-		     "(tot=%d)\n", dlm->name, n, i, num);
 	}
 	spin_unlock(&dlm->spinlock);
 	wake_up(&dlm->dlm_thread_wq);
@@ -1661,8 +1659,8 @@
 
 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
 {
-	o2hb_unregister_callback(NULL, &dlm->dlm_hb_up);
-	o2hb_unregister_callback(NULL, &dlm->dlm_hb_down);
+	o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
+	o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
 	o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
 }
 
@@ -1674,13 +1672,13 @@
 
 	o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
 			    dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
-	status = o2hb_register_callback(NULL, &dlm->dlm_hb_down);
+	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
 	if (status)
 		goto bail;
 
 	o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
 			    dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
-	status = o2hb_register_callback(NULL, &dlm->dlm_hb_up);
+	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
 	if (status)
 		goto bail;
 
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 69cf369..7009292 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -106,6 +106,9 @@
 
 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
 			return 0;
+		if (!dlm_lock_compatible(tmplock->ml.convert_type,
+					 lock->ml.type))
+			return 0;
 	}
 
 	return 1;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 2211acf..1d6d1d2 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -122,15 +122,13 @@
 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
 			      struct dlm_lock_resource *res)
 {
-	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
-
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
 	if (__dlm_lockres_unused(res)){
 		if (list_empty(&res->purge)) {
-			mlog(0, "putting lockres %.*s:%p onto purge list\n",
-			     res->lockname.len, res->lockname.name, res);
+			mlog(0, "%s: Adding res %.*s to purge list\n",
+			     dlm->name, res->lockname.len, res->lockname.name);
 
 			res->last_used = jiffies;
 			dlm_lockres_get(res);
@@ -138,8 +136,8 @@
 			dlm->purge_count++;
 		}
 	} else if (!list_empty(&res->purge)) {
-		mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
-		     res->lockname.len, res->lockname.name, res, res->owner);
+		mlog(0, "%s: Removing res %.*s from purge list\n",
+		     dlm->name, res->lockname.len, res->lockname.name);
 
 		list_del_init(&res->purge);
 		dlm_lockres_put(res);
@@ -150,7 +148,6 @@
 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
 			    struct dlm_lock_resource *res)
 {
-	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
 	spin_lock(&dlm->spinlock);
 	spin_lock(&res->spinlock);
 
@@ -171,9 +168,8 @@
 
 	master = (res->owner == dlm->node_num);
 
-
-	mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
-	     res->lockname.name, master);
+	mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
+	     res->lockname.len, res->lockname.name, master);
 
 	if (!master) {
 		res->state |= DLM_LOCK_RES_DROPPING_REF;
@@ -189,27 +185,25 @@
 		/* clear our bit from the master's refmap, ignore errors */
 		ret = dlm_drop_lockres_ref(dlm, res);
 		if (ret < 0) {
-			mlog_errno(ret);
+			mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
+			     res->lockname.len, res->lockname.name, ret);
 			if (!dlm_is_host_down(ret))
 				BUG();
 		}
-		mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
-		     dlm->name, res->lockname.len, res->lockname.name, ret);
 		spin_lock(&dlm->spinlock);
 		spin_lock(&res->spinlock);
 	}
 
 	if (!list_empty(&res->purge)) {
-		mlog(0, "removing lockres %.*s:%p from purgelist, "
-		     "master = %d\n", res->lockname.len, res->lockname.name,
-		     res, master);
+		mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name, master);
 		list_del_init(&res->purge);
 		dlm_lockres_put(res);
 		dlm->purge_count--;
 	}
 
 	if (!__dlm_lockres_unused(res)) {
-		mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
+		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
 		     dlm->name, res->lockname.len, res->lockname.name);
 		__dlm_print_one_lock_resource(res);
 		BUG();
@@ -266,10 +260,10 @@
 		unused = __dlm_lockres_unused(lockres);
 		if (!unused ||
 		    (lockres->state & DLM_LOCK_RES_MIGRATING)) {
-			mlog(0, "lockres %s:%.*s: is in use or "
-			     "being remastered, used %d, state %d\n",
-			     dlm->name, lockres->lockname.len,
-			     lockres->lockname.name, !unused, lockres->state);
+			mlog(0, "%s: res %.*s is in use or being remastered, "
+			     "used %d, state %d\n", dlm->name,
+			     lockres->lockname.len, lockres->lockname.name,
+			     !unused, lockres->state);
 			list_move_tail(&dlm->purge_list, &lockres->purge);
 			spin_unlock(&lockres->spinlock);
 			continue;
@@ -296,15 +290,12 @@
 	struct list_head *head;
 	int can_grant = 1;
 
-	//mlog(0, "res->lockname.len=%d\n", res->lockname.len);
-	//mlog(0, "res->lockname.name=%p\n", res->lockname.name);
-	//mlog(0, "shuffle res %.*s\n", res->lockname.len,
-	//	  res->lockname.name);
-
-	/* because this function is called with the lockres
+	/*
+	 * Because this function is called with the lockres
 	 * spinlock, and because we know that it is not migrating/
 	 * recovering/in-progress, it is fine to reserve asts and
-	 * basts right before queueing them all throughout */
+	 * basts right before queueing them all throughout
+	 */
 	assert_spin_locked(&dlm->ast_lock);
 	assert_spin_locked(&res->spinlock);
 	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
@@ -314,13 +305,13 @@
 converting:
 	if (list_empty(&res->converting))
 		goto blocked;
-	mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
-	     res->lockname.name);
+	mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
+	     res->lockname.len, res->lockname.name);
 
 	target = list_entry(res->converting.next, struct dlm_lock, list);
 	if (target->ml.convert_type == LKM_IVMODE) {
-		mlog(ML_ERROR, "%.*s: converting a lock with no "
-		     "convert_type!\n", res->lockname.len, res->lockname.name);
+		mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
+		     dlm->name, res->lockname.len, res->lockname.name);
 		BUG();
 	}
 	head = &res->granted;
@@ -365,9 +356,12 @@
 		spin_lock(&target->spinlock);
 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
 
-		mlog(0, "calling ast for converting lock: %.*s, have: %d, "
-		     "granting: %d, node: %u\n", res->lockname.len,
-		     res->lockname.name, target->ml.type,
+		mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
+		     "%d => %d, node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
+		     target->ml.type,
 		     target->ml.convert_type, target->ml.node);
 
 		target->ml.type = target->ml.convert_type;
@@ -428,11 +422,14 @@
 		spin_lock(&target->spinlock);
 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
 
-		mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
-		     "node: %u\n", res->lockname.len, res->lockname.name,
+		mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
+		     "node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
 		     target->ml.type, target->ml.node);
 
-		// target->ml.type is already correct
+		/* target->ml.type is already correct */
 		list_move_tail(&target->list, &res->granted);
 
 		BUG_ON(!target->lksb);
@@ -453,7 +450,6 @@
 /* must have NO locks when calling this with res !=NULL * */
 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-	mlog_entry("dlm=%p, res=%p\n", dlm, res);
 	if (res) {
 		spin_lock(&dlm->spinlock);
 		spin_lock(&res->spinlock);
@@ -466,8 +462,6 @@
 
 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-	mlog_entry("dlm=%p, res=%p\n", dlm, res);
-
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
@@ -484,13 +478,16 @@
 			res->state |= DLM_LOCK_RES_DIRTY;
 		}
 	}
+
+	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+	     res->lockname.name);
 }
 
 
 /* Launch the NM thread for the mounted volume */
 int dlm_launch_thread(struct dlm_ctxt *dlm)
 {
-	mlog(0, "starting dlm thread...\n");
+	mlog(0, "Starting dlm_thread...\n");
 
 	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
 	if (IS_ERR(dlm->dlm_thread_task)) {
@@ -505,7 +502,7 @@
 void dlm_complete_thread(struct dlm_ctxt *dlm)
 {
 	if (dlm->dlm_thread_task) {
-		mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
+		mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
 		kthread_stop(dlm->dlm_thread_task);
 		dlm->dlm_thread_task = NULL;
 	}
@@ -536,7 +533,12 @@
 		/* get an extra ref on lock */
 		dlm_lock_get(lock);
 		res = lock->lockres;
-		mlog(0, "delivering an ast for this lockres\n");
+		mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
+		     "node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		     lock->ml.type, lock->ml.node);
 
 		BUG_ON(!lock->ast_pending);
 
@@ -557,9 +559,9 @@
 		/* possible that another ast was queued while
 		 * we were delivering the last one */
 		if (!list_empty(&lock->ast_list)) {
-			mlog(0, "aha another ast got queued while "
-			     "we were finishing the last one.  will "
-			     "keep the ast_pending flag set.\n");
+			mlog(0, "%s: res %.*s, AST queued while flushing last "
+			     "one\n", dlm->name, res->lockname.len,
+			     res->lockname.name);
 		} else
 			lock->ast_pending = 0;
 
@@ -590,8 +592,12 @@
 		dlm_lock_put(lock);
 		spin_unlock(&dlm->ast_lock);
 
-		mlog(0, "delivering a bast for this lockres "
-		     "(blocked = %d\n", hi);
+		mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
+		     "blocked %d, node %u\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		     hi, lock->ml.node);
 
 		if (lock->ml.node != dlm->node_num) {
 			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
@@ -605,9 +611,9 @@
 		/* possible that another bast was queued while
 		 * we were delivering the last one */
 		if (!list_empty(&lock->bast_list)) {
-			mlog(0, "aha another bast got queued while "
-			     "we were finishing the last one.  will "
-			     "keep the bast_pending flag set.\n");
+			mlog(0, "%s: res %.*s, BAST queued while flushing last "
+			     "one\n", dlm->name, res->lockname.len,
+			     res->lockname.name);
 		} else
 			lock->bast_pending = 0;
 
@@ -675,11 +681,12 @@
 			spin_lock(&res->spinlock);
 			if (res->owner != dlm->node_num) {
 				__dlm_print_one_lock_resource(res);
-				mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
-				     res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+				mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
+				     " dirty %d\n", dlm->name,
+				     !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
+				     !!(res->state & DLM_LOCK_RES_MIGRATING),
+				     !!(res->state & DLM_LOCK_RES_RECOVERING),
+				     !!(res->state & DLM_LOCK_RES_DIRTY));
 			}
 			BUG_ON(res->owner != dlm->node_num);
 
@@ -693,8 +700,8 @@
 				res->state &= ~DLM_LOCK_RES_DIRTY;
 				spin_unlock(&res->spinlock);
 				spin_unlock(&dlm->ast_lock);
-				mlog(0, "delaying list shuffling for in-"
-				     "progress lockres %.*s, state=%d\n",
+				mlog(0, "%s: res %.*s, inprogress, delay list "
+				     "shuffle, state %d\n", dlm->name,
 				     res->lockname.len, res->lockname.name,
 				     res->state);
 				delay = 1;
@@ -706,10 +713,6 @@
 			 * spinlock and do NOT have the dlm lock.
 			 * safe to reserve/queue asts and run the lists. */
 
-			mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
-			     "res=%.*s\n", dlm->name,
-			     res->lockname.len, res->lockname.name);
-
 			/* called while holding lockres lock */
 			dlm_shuffle_lists(dlm, res);
 			res->state &= ~DLM_LOCK_RES_DIRTY;
@@ -733,7 +736,8 @@
 			/* unlikely, but we may need to give time to
 			 * other tasks */
 			if (!--n) {
-				mlog(0, "throttling dlm_thread\n");
+				mlog(0, "%s: Throttling dlm thread\n",
+				     dlm->name);
 				break;
 			}
 		}
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index b2df490..8c5c0ed 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -351,9 +351,16 @@
 	return &ip->ip_vfs_inode;
 }
 
+static void dlmfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
+}
+
 static void dlmfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
+	call_rcu(&inode->i_rcu, dlmfs_i_callback);
 }
 
 static void dlmfs_evict_inode(struct inode *inode)
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 19ad145..5dbc306 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -137,9 +137,7 @@
 	}
 
 	result = d_obtain_alias(inode);
-	if (!IS_ERR(result))
-		result->d_op = &ocfs2_dentry_ops;
-	else
+	if (IS_ERR(result))
 		mlog_errno(PTR_ERR(result));
 
 bail:
@@ -175,8 +173,6 @@
 	}
 
 	parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
-	if (!IS_ERR(parent))
-		parent->d_op = &ocfs2_dentry_ops;
 
 bail_unlock:
 	ocfs2_inode_unlock(dir, 0);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f6cba56..63e3fca 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1307,10 +1307,13 @@
 	return err;
 }
 
-int ocfs2_permission(struct inode *inode, int mask)
+int ocfs2_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	int ret;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	mlog_entry_void();
 
 	ret = ocfs2_inode_lock(inode, NULL, 0);
@@ -1320,7 +1323,7 @@
 		goto out;
 	}
 
-	ret = generic_permission(inode, mask, ocfs2_check_acl);
+	ret = generic_permission(inode, mask, flags, ocfs2_check_acl);
 
 	ocfs2_inode_unlock(inode, 0);
 out:
@@ -1992,6 +1995,7 @@
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct ocfs2_space_resv sr;
 	int change_size = 1;
+	int cmd = OCFS2_IOC_RESVSP64;
 
 	if (!ocfs2_writes_unwritten_extents(osb))
 		return -EOPNOTSUPP;
@@ -2002,12 +2006,15 @@
 	if (mode & FALLOC_FL_KEEP_SIZE)
 		change_size = 0;
 
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		cmd = OCFS2_IOC_UNRESVSP64;
+
 	sr.l_whence = 0;
 	sr.l_start = (s64)offset;
 	sr.l_len = (s64)len;
 
-	return __ocfs2_change_file_space(NULL, inode, offset,
-					 OCFS2_IOC_RESVSP64, &sr, change_size);
+	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
+					 change_size);
 }
 
 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index 97bf761..f5afbbe 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -61,7 +61,7 @@
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
 int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		  struct kstat *stat);
-int ocfs2_permission(struct inode *inode, int mask);
+int ocfs2_permission(struct inode *inode, int mask, unsigned int flags);
 
 int ocfs2_should_update_atime(struct inode *inode,
 			      struct vfsmount *vfsmnt);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index f935fd6..4068c6c 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -434,7 +434,7 @@
 	 * #1 and #2 can be simply solved by never taking the lock
 	 * here for system files (which are the only type we read
 	 * during mount). It's a heavier approach, but our main
-	 * concern is user-accesible files anyway.
+	 * concern is user-accessible files anyway.
 	 *
 	 * #3 works itself out because we'll eventually take the
 	 * cluster lock before trusting anything anyway.
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index ff5744e..849fb4a 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -147,7 +147,6 @@
 	spin_unlock(&oi->ip_lock);
 
 bail_add:
-	dentry->d_op = &ocfs2_dentry_ops;
 	ret = d_splice_alias(inode, dentry);
 
 	if (inode) {
@@ -415,7 +414,6 @@
 		mlog_errno(status);
 		goto leave;
 	}
-	dentry->d_op = &ocfs2_dentry_ops;
 
 	status = ocfs2_add_entry(handle, dentry, inode,
 				 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
@@ -743,7 +741,6 @@
 	}
 
 	ihold(inode);
-	dentry->d_op = &ocfs2_dentry_ops;
 	d_instantiate(dentry, inode);
 
 out_commit:
@@ -1017,8 +1014,11 @@
 		 * An error return must mean that no cluster locks
 		 * were held on function exit.
 		 */
-		if (oi1->ip_blkno != oi2->ip_blkno)
+		if (oi1->ip_blkno != oi2->ip_blkno) {
 			ocfs2_inode_unlock(inode2, 1);
+			brelse(*bh2);
+			*bh2 = NULL;
+		}
 
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -1794,7 +1794,6 @@
 		mlog_errno(status);
 		goto bail;
 	}
-	dentry->d_op = &ocfs2_dentry_ops;
 
 	status = ocfs2_add_entry(handle, dentry, inode,
 				 le64_to_cpu(fe->i_blkno), parent_fe_bh,
@@ -2459,7 +2458,6 @@
 		goto out_commit;
 	}
 
-	dentry->d_op = &ocfs2_dentry_ops;
 	d_instantiate(dentry, inode);
 	status = 0;
 out_commit:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 70dd3b1..51cd689 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -420,6 +420,11 @@
 	struct inode			*osb_tl_inode;
 	struct buffer_head		*osb_tl_bh;
 	struct delayed_work		osb_truncate_log_wq;
+	/*
+	 * How many clusters in our truncate log.
+	 * It must be protected by osb_tl_inode->i_mutex.
+	 */
+	unsigned int truncated_clusters;
 
 	struct ocfs2_node_map		osb_recovering_orphan_dirs;
 	unsigned int			*osb_orphan_wipes;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 5fed60d..71998d4 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1916,7 +1916,7 @@
 	if (res->sr_bg_blkno) {
 		/* Attempt to short-circuit the usual search mechanism
 		 * by jumping straight to the most recently used
-		 * allocation group. This helps us mantain some
+		 * allocation group. This helps us maintain some
 		 * contiguousness across allocations. */
 		status = ocfs2_search_one_group(ac, handle, bits_wanted,
 						min_bits, res, &bits_left);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index cfeab7c..06d1f74 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -569,9 +569,16 @@
 	return &oi->vfs_inode;
 }
 
+static void ocfs2_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
+}
+
 static void ocfs2_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
+	call_rcu(&inode->i_rcu, ocfs2_i_callback);
 }
 
 static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
@@ -2090,6 +2097,7 @@
 
 	sb->s_fs_info = osb;
 	sb->s_op = &ocfs2_sops;
+	sb->s_d_op = &ocfs2_dentry_ops;
 	sb->s_export_op = &ocfs2_export_ops;
 	sb->s_qcop = &ocfs2_quotactl_ops;
 	sb->dq_op = &ocfs2_quota_operations;
diff --git a/fs/open.c b/fs/open.c
index 4197b9e..5b6ef7e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -223,7 +223,12 @@
 		return -EINVAL;
 
 	/* Return error if mode is not supported */
-	if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
+
+	/* Punch hole must have keep size set */
+	if ((mode & FALLOC_FL_PUNCH_HOLE) &&
+	    !(mode & FALLOC_FL_KEEP_SIZE))
 		return -EOPNOTSUPP;
 
 	if (!(file->f_mode & FMODE_WRITE))
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 911e61f..a2a5bff 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -343,9 +343,16 @@
 	return &oi->vfs_inode;
 }
 
+static void openprom_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(op_inode_cachep, OP_I(inode));
+}
+
 static void openprom_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(op_inode_cachep, OP_I(inode));
+	call_rcu(&inode->i_rcu, openprom_i_callback);
 }
 
 static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 0a8b0ad..9c21119 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -237,6 +237,13 @@
 	return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
 }
 
+ssize_t part_ro_show(struct device *dev,
+		       struct device_attribute *attr, char *buf)
+{
+	struct hd_struct *p = dev_to_part(dev);
+	return sprintf(buf, "%d\n", p->policy ? 1 : 0);
+}
+
 ssize_t part_alignment_offset_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
@@ -312,6 +319,7 @@
 static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
 static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
+static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL);
 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
 static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show,
 		   NULL);
@@ -326,6 +334,7 @@
 	&dev_attr_partition.attr,
 	&dev_attr_start.attr,
 	&dev_attr_size.attr,
+	&dev_attr_ro.attr,
 	&dev_attr_alignment_offset.attr,
 	&dev_attr_discard_alignment.attr,
 	&dev_attr_stat.attr,
@@ -372,6 +381,11 @@
 	put_device(part_to_dev(part));
 }
 
+void __delete_partition(struct hd_struct *part)
+{
+	call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+}
+
 void delete_partition(struct gendisk *disk, int partno)
 {
 	struct disk_part_tbl *ptbl = disk->part_tbl;
@@ -390,7 +404,7 @@
 	kobject_put(part->holder_dir);
 	device_del(part_to_dev(part));
 
-	call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+	hd_struct_put(part);
 }
 
 static ssize_t whole_disk_show(struct device *dev,
@@ -489,6 +503,7 @@
 	if (!dev_get_uevent_suppress(ddev))
 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
 
+	hd_ref_init(p);
 	return p;
 
 out_free_info:
@@ -507,65 +522,6 @@
 	return ERR_PTR(err);
 }
 
-/* Not exported, helper to add_disk(). */
-void register_disk(struct gendisk *disk)
-{
-	struct device *ddev = disk_to_dev(disk);
-	struct block_device *bdev;
-	struct disk_part_iter piter;
-	struct hd_struct *part;
-	int err;
-
-	ddev->parent = disk->driverfs_dev;
-
-	dev_set_name(ddev, disk->disk_name);
-
-	/* delay uevents, until we scanned partition table */
-	dev_set_uevent_suppress(ddev, 1);
-
-	if (device_add(ddev))
-		return;
-	if (!sysfs_deprecated) {
-		err = sysfs_create_link(block_depr, &ddev->kobj,
-					kobject_name(&ddev->kobj));
-		if (err) {
-			device_del(ddev);
-			return;
-		}
-	}
-	disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
-	disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
-
-	/* No minors to use for partitions */
-	if (!disk_partitionable(disk))
-		goto exit;
-
-	/* No such device (e.g., media were just removed) */
-	if (!get_capacity(disk))
-		goto exit;
-
-	bdev = bdget_disk(disk, 0);
-	if (!bdev)
-		goto exit;
-
-	bdev->bd_invalidated = 1;
-	err = blkdev_get(bdev, FMODE_READ);
-	if (err < 0)
-		goto exit;
-	blkdev_put(bdev, FMODE_READ);
-
-exit:
-	/* announce disk after possible partitions are created */
-	dev_set_uevent_suppress(ddev, 0);
-	kobject_uevent(&ddev->kobj, KOBJ_ADD);
-
-	/* announce possible partitions */
-	disk_part_iter_init(&piter, disk, 0);
-	while ((part = disk_part_iter_next(&piter)))
-		kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
-	disk_part_iter_exit(&piter);
-}
-
 static bool disk_unlock_native_capacity(struct gendisk *disk)
 {
 	const struct block_device_operations *bdops = disk->fops;
@@ -728,33 +684,3 @@
 }
 
 EXPORT_SYMBOL(read_dev_sector);
-
-void del_gendisk(struct gendisk *disk)
-{
-	struct disk_part_iter piter;
-	struct hd_struct *part;
-
-	/* invalidate stuff */
-	disk_part_iter_init(&piter, disk,
-			     DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
-	while ((part = disk_part_iter_next(&piter))) {
-		invalidate_partition(disk, part->partno);
-		delete_partition(disk, part->partno);
-	}
-	disk_part_iter_exit(&piter);
-
-	invalidate_partition(disk, 0);
-	blk_free_devt(disk_to_dev(disk)->devt);
-	set_capacity(disk, 0);
-	disk->flags &= ~GENHD_FL_UP;
-	unlink_gendisk(disk);
-	part_stat_set_all(&disk->part0, 0);
-	disk->part0.stamp = 0;
-
-	kobject_put(disk->part0.holder_dir);
-	kobject_put(disk->slave_dir);
-	disk->driverfs_dev = NULL;
-	if (!sysfs_deprecated)
-		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
-	device_del(disk_to_dev(disk));
-}
diff --git a/fs/pipe.c b/fs/pipe.c
index 04629f3..e2e95fb 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -441,7 +441,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync(&pipe->wait);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 		}
 		pipe_wait(pipe);
@@ -450,7 +450,7 @@
 
 	/* Signal writers asynchronously that there is more room. */
 	if (do_wakeup) {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
 	if (ret > 0)
@@ -612,7 +612,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync(&pipe->wait);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 			do_wakeup = 0;
 		}
@@ -623,7 +623,7 @@
 out:
 	mutex_unlock(&inode->i_mutex);
 	if (do_wakeup) {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	}
 	if (ret > 0)
@@ -715,7 +715,7 @@
 	if (!pipe->readers && !pipe->writers) {
 		free_pipe_info(inode);
 	} else {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
@@ -999,12 +999,11 @@
 		goto err;
 
 	err = -ENOMEM;
-	path.dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name);
+	path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
 	if (!path.dentry)
 		goto err_inode;
 	path.mnt = mntget(pipe_mnt);
 
-	path.dentry->d_op = &pipefs_dentry_operations;
 	d_instantiate(path.dentry, inode);
 
 	err = -ENFILE;
@@ -1253,6 +1252,10 @@
 	return ret;
 }
 
+static const struct super_operations pipefs_ops = {
+	.destroy_inode = free_inode_nonrcu,
+};
+
 /*
  * pipefs should _never_ be mounted by userland - too much of security hassle,
  * no real gain from having the whole whorehouse mounted. So we don't need
@@ -1262,7 +1265,8 @@
 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
 			 int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC);
+	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
+			&pipefs_dentry_operations, PIPEFS_MAGIC);
 }
 
 static struct file_system_type pipe_fs_type = {
@@ -1288,7 +1292,7 @@
 static void __exit exit_pipe_fs(void)
 {
 	unregister_filesystem(&pipe_fs_type);
-	mntput(pipe_mnt);
+	mntput_long(pipe_mnt);
 }
 
 fs_initcall(init_pipe_fs);
diff --git a/fs/pnode.c b/fs/pnode.c
index 8066b8d..d42514e 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -288,7 +288,7 @@
  */
 static inline int do_refcount_check(struct vfsmount *mnt, int count)
 {
-	int mycount = atomic_read(&mnt->mnt_count) - mnt->mnt_ghosts;
+	int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
 	return (mycount > count);
 }
 
@@ -300,7 +300,7 @@
  * Check if any of these mounts that **do not have submounts**
  * have more references than 'refcnt'. If so return busy.
  *
- * vfsmount lock must be held for read or write
+ * vfsmount lock must be held for write
  */
 int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
 {
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 2758e2a..df434c5 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -10,6 +10,7 @@
 proc-y       += inode.o root.o base.o generic.o array.o \
 		proc_tty.o
 proc-y	+= cmdline.o
+proc-y	+= consoles.o
 proc-y	+= cpuinfo.o
 proc-y	+= devices.o
 proc-y	+= interrupts.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index fff6572..df2b703 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -95,7 +95,7 @@
 
 	get_task_comm(tcomm, p);
 
-	seq_printf(m, "Name:\t");
+	seq_puts(m, "Name:\t");
 	end = m->buf + m->size;
 	buf = m->buf + m->count;
 	name = tcomm;
@@ -122,7 +122,7 @@
 		buf++;
 	}
 	m->count = buf - m->buf;
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 /*
@@ -208,7 +208,7 @@
 		seq_printf(m, "%d ", GROUP_AT(group_info, g));
 	put_cred(cred);
 
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static void render_sigset_t(struct seq_file *m, const char *header,
@@ -216,7 +216,7 @@
 {
 	int i;
 
-	seq_printf(m, "%s", header);
+	seq_puts(m, header);
 
 	i = _NSIG;
 	do {
@@ -230,7 +230,7 @@
 		seq_printf(m, "%x", x);
 	} while (i >= 4);
 
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
@@ -291,12 +291,12 @@
 {
 	unsigned __capi;
 
-	seq_printf(m, "%s", header);
+	seq_puts(m, header);
 	CAP_FOR_EACH_U32(__capi) {
 		seq_printf(m, "%08x",
 			   a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
 	}
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static inline void task_cap(struct seq_file *m, struct task_struct *p)
@@ -329,12 +329,12 @@
 
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
-	seq_printf(m, "Cpus_allowed:\t");
+	seq_puts(m, "Cpus_allowed:\t");
 	seq_cpumask(m, &task->cpus_allowed);
-	seq_printf(m, "\n");
-	seq_printf(m, "Cpus_allowed_list:\t");
+	seq_putc(m, '\n');
+	seq_puts(m, "Cpus_allowed_list:\t");
 	seq_cpumask_list(m, &task->cpus_allowed);
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
@@ -535,15 +535,15 @@
 int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
 			struct pid *pid, struct task_struct *task)
 {
-	int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
+	unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
 	struct mm_struct *mm = get_task_mm(task);
 
 	if (mm) {
 		size = task_statm(mm, &shared, &text, &data, &resident);
 		mmput(mm);
 	}
-	seq_printf(m, "%d %d %d %d %d %d %d\n",
-			size, resident, shared, text, lib, data, 0);
+	seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
+			size, resident, shared, text, data);
 
 	return 0;
 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 08cba2c..9d096e8 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -373,26 +373,20 @@
 		return -ESRCH;
 	seq_puts(m, "Latency Top version : v0.1\n");
 	for (i = 0; i < 32; i++) {
-		if (task->latency_record[i].backtrace[0]) {
+		struct latency_record *lr = &task->latency_record[i];
+		if (lr->backtrace[0]) {
 			int q;
-			seq_printf(m, "%i %li %li ",
-				task->latency_record[i].count,
-				task->latency_record[i].time,
-				task->latency_record[i].max);
+			seq_printf(m, "%i %li %li",
+				   lr->count, lr->time, lr->max);
 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
-				char sym[KSYM_SYMBOL_LEN];
-				char *c;
-				if (!task->latency_record[i].backtrace[q])
+				unsigned long bt = lr->backtrace[q];
+				if (!bt)
 					break;
-				if (task->latency_record[i].backtrace[q] == ULONG_MAX)
+				if (bt == ULONG_MAX)
 					break;
-				sprint_symbol(sym, task->latency_record[i].backtrace[q]);
-				c = strchr(sym, '+');
-				if (c)
-					*c = 0;
-				seq_printf(m, "%s ", sym);
+				seq_printf(m, " %ps", (void *)bt);
 			}
-			seq_printf(m, "\n");
+			seq_putc(m, '\n');
 		}
 
 	}
@@ -751,14 +745,7 @@
 
 static int proc_single_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-	ret = single_open(filp, proc_single_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, proc_single_show, inode);
 }
 
 static const struct file_operations proc_single_file_operations = {
@@ -1164,7 +1151,7 @@
 		goto err_task_lock;
 	}
 
-	if (oom_score_adj < task->signal->oom_score_adj &&
+	if (oom_score_adj < task->signal->oom_score_adj_min &&
 			!capable(CAP_SYS_RESOURCE)) {
 		err = -EACCES;
 		goto err_sighand;
@@ -1177,6 +1164,8 @@
 			atomic_dec(&task->mm->oom_disable_count);
 	}
 	task->signal->oom_score_adj = oom_score_adj;
+	if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
+		task->signal->oom_score_adj_min = oom_score_adj;
 	/*
 	 * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
 	 * always attainable.
@@ -1386,15 +1375,7 @@
 
 static int sched_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-
-	ret = single_open(filp, sched_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, sched_show, inode);
 }
 
 static const struct file_operations proc_pid_sched_operations = {
@@ -1530,15 +1511,7 @@
 
 static int comm_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-
-	ret = single_open(filp, comm_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, comm_show, inode);
 }
 
 static const struct file_operations proc_pid_set_comm_operations = {
@@ -1795,10 +1768,16 @@
  */
 static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct inode *inode = dentry->d_inode;
-	struct task_struct *task = get_proc_task(inode);
+	struct inode *inode;
+	struct task_struct *task;
 	const struct cred *cred;
 
+	if (nd && nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = dentry->d_inode;
+	task = get_proc_task(inode);
+
 	if (task) {
 		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
 		    task_dumpable(task)) {
@@ -1820,7 +1799,7 @@
 	return 0;
 }
 
-static int pid_delete_dentry(struct dentry * dentry)
+static int pid_delete_dentry(const struct dentry * dentry)
 {
 	/* Is the task we represent dead?
 	 * If so, then don't put the dentry on the lru list,
@@ -1964,12 +1943,19 @@
 
 static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct inode *inode = dentry->d_inode;
-	struct task_struct *task = get_proc_task(inode);
-	int fd = proc_fd(inode);
+	struct inode *inode;
+	struct task_struct *task;
+	int fd;
 	struct files_struct *files;
 	const struct cred *cred;
 
+	if (nd && nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = dentry->d_inode;
+	task = get_proc_task(inode);
+	fd = proc_fd(inode);
+
 	if (task) {
 		files = get_files_struct(task);
 		if (files) {
@@ -2045,7 +2031,7 @@
 	inode->i_op = &proc_pid_link_inode_operations;
 	inode->i_size = 64;
 	ei->op.proc_get_link = proc_fd_link;
-	dentry->d_op = &tid_fd_dentry_operations;
+	d_set_d_op(dentry, &tid_fd_dentry_operations);
 	d_add(dentry, inode);
 	/* Close the race of the process dying before we return the dentry */
 	if (tid_fd_revalidate(dentry, NULL))
@@ -2177,11 +2163,13 @@
  * /proc/pid/fd needs a special permission handler so that a process can still
  * access /proc/self/fd after it has executed a setuid().
  */
-static int proc_fd_permission(struct inode *inode, int mask)
+static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
 {
 	int rv;
 
-	rv = generic_permission(inode, mask, NULL);
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+	rv = generic_permission(inode, mask, flags, NULL);
 	if (rv == 0)
 		return 0;
 	if (task_pid(current) == proc_pid(inode))
@@ -2213,7 +2201,7 @@
 	ei->fd = fd;
 	inode->i_mode = S_IFREG | S_IRUSR;
 	inode->i_fop = &proc_fdinfo_file_operations;
-	dentry->d_op = &tid_fd_dentry_operations;
+	d_set_d_op(dentry, &tid_fd_dentry_operations);
 	d_add(dentry, inode);
 	/* Close the race of the process dying before we return the dentry */
 	if (tid_fd_revalidate(dentry, NULL))
@@ -2272,7 +2260,7 @@
 	if (p->fop)
 		inode->i_fop = p->fop;
 	ei->op = p->op;
-	dentry->d_op = &pid_dentry_operations;
+	d_set_d_op(dentry, &pid_dentry_operations);
 	d_add(dentry, inode);
 	/* Close the race of the process dying before we return the dentry */
 	if (pid_revalidate(dentry, NULL))
@@ -2639,8 +2627,14 @@
  */
 static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct inode *inode = dentry->d_inode;
-	struct task_struct *task = get_proc_task(inode);
+	struct inode *inode;
+	struct task_struct *task;
+
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	inode = dentry->d_inode;
+	task = get_proc_task(inode);
 	if (task) {
 		put_task_struct(task);
 		return 1;
@@ -2691,7 +2685,7 @@
 	if (p->fop)
 		inode->i_fop = p->fop;
 	ei->op = p->op;
-	dentry->d_op = &proc_base_dentry_operations;
+	d_set_d_op(dentry, &proc_base_dentry_operations);
 	d_add(dentry, inode);
 	error = NULL;
 out:
@@ -3005,7 +2999,7 @@
 	inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff,
 		ARRAY_SIZE(tgid_base_stuff));
 
-	dentry->d_op = &pid_dentry_operations;
+	d_set_d_op(dentry, &pid_dentry_operations);
 
 	d_add(dentry, inode);
 	/* Close the race of the process dying before we return the dentry */
@@ -3248,7 +3242,7 @@
 	inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff,
 		ARRAY_SIZE(tid_base_stuff));
 
-	dentry->d_op = &pid_dentry_operations;
+	d_set_d_op(dentry, &pid_dentry_operations);
 
 	d_add(dentry, inode);
 	/* Close the race of the process dying before we return the dentry */
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c
new file mode 100644
index 0000000..eafc22a
--- /dev/null
+++ b/fs/proc/consoles.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2010 Werner Fink, Jiri Slaby
+ *
+ * Licensed under GPLv2
+ */
+
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/tty_driver.h>
+
+/*
+ * This is handler for /proc/consoles
+ */
+static int show_console_dev(struct seq_file *m, void *v)
+{
+	static const struct {
+		short flag;
+		char name;
+	} con_flags[] = {
+		{ CON_ENABLED,		'E' },
+		{ CON_CONSDEV,		'C' },
+		{ CON_BOOT,		'B' },
+		{ CON_PRINTBUFFER,	'p' },
+		{ CON_BRL,		'b' },
+		{ CON_ANYTIME,		'a' },
+	};
+	char flags[ARRAY_SIZE(con_flags) + 1];
+	struct console *con = v;
+	unsigned int a;
+	int len;
+	dev_t dev = 0;
+
+	if (con->device) {
+		const struct tty_driver *driver;
+		int index;
+		driver = con->device(con, &index);
+		if (driver) {
+			dev = MKDEV(driver->major, driver->minor_start);
+			dev += index;
+		}
+	}
+
+	for (a = 0; a < ARRAY_SIZE(con_flags); a++)
+		flags[a] = (con->flags & con_flags[a].flag) ?
+			con_flags[a].name : ' ';
+	flags[a] = 0;
+
+	seq_printf(m, "%s%d%n", con->name, con->index, &len);
+	len = 21 - len;
+	if (len < 1)
+		len = 1;
+	seq_printf(m, "%*c%c%c%c (%s)", len, ' ', con->read ? 'R' : '-',
+			con->write ? 'W' : '-', con->unblank ? 'U' : '-',
+			flags);
+	if (dev)
+		seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
+
+	seq_printf(m, "\n");
+
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	struct console *con;
+	loff_t off = 0;
+
+	acquire_console_sem();
+	for_each_console(con)
+		if (off++ == *pos)
+			break;
+
+	return con;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct console *con = v;
+	++*pos;
+	return con->next;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+	release_console_sem();
+}
+
+static const struct seq_operations consoles_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= show_console_dev
+};
+
+static int consoles_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &consoles_op);
+}
+
+static const struct file_operations proc_consoles_operations = {
+	.open		= consoles_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int __init proc_consoles_init(void)
+{
+	proc_create("consoles", 0, NULL, &proc_consoles_operations);
+	return 0;
+}
+module_init(proc_consoles_init);
diff --git a/fs/proc/devices.c b/fs/proc/devices.c
index 59ee7da..b143471 100644
--- a/fs/proc/devices.c
+++ b/fs/proc/devices.c
@@ -9,14 +9,14 @@
 
 	if (i < CHRDEV_MAJOR_HASH_SIZE) {
 		if (i == 0)
-			seq_printf(f, "Character devices:\n");
+			seq_puts(f, "Character devices:\n");
 		chrdev_show(f, i);
 	}
 #ifdef CONFIG_BLOCK
 	else {
 		i -= CHRDEV_MAJOR_HASH_SIZE;
 		if (i == 0)
-			seq_printf(f, "\nBlock devices:\n");
+			seq_puts(f, "\nBlock devices:\n");
 		blkdev_show(f, i);
 	}
 #endif
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index dd29f03..01e07f2 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -400,7 +400,7 @@
  * smarter: we could keep a "volatile" flag in the 
  * inode to indicate which ones to keep.
  */
-static int proc_delete_dentry(struct dentry * dentry)
+static int proc_delete_dentry(const struct dentry * dentry)
 {
 	return 1;
 }
@@ -425,13 +425,10 @@
 		if (de->namelen != dentry->d_name.len)
 			continue;
 		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
-			unsigned int ino;
-
-			ino = de->low_ino;
 			pde_get(de);
 			spin_unlock(&proc_subdir_lock);
 			error = -EINVAL;
-			inode = proc_get_inode(dir->i_sb, ino, de);
+			inode = proc_get_inode(dir->i_sb, de);
 			goto out_unlock;
 		}
 	}
@@ -439,7 +436,7 @@
 out_unlock:
 
 	if (inode) {
-		dentry->d_op = &proc_dentry_operations;
+		d_set_d_op(dentry, &proc_dentry_operations);
 		d_add(dentry, inode);
 		return NULL;
 	}
@@ -768,12 +765,7 @@
 
 static void free_proc_entry(struct proc_dir_entry *de)
 {
-	unsigned int ino = de->low_ino;
-
-	if (ino < PROC_DYNAMIC_FIRST)
-		return;
-
-	release_inode_number(ino);
+	release_inode_number(de->low_ino);
 
 	if (S_ISLNK(de->mode))
 		kfree(de->data);
@@ -834,12 +826,9 @@
 
 		wait_for_completion(de->pde_unload_completion);
 
-		goto continue_removing;
+		spin_lock(&de->pde_unload_lock);
 	}
-	spin_unlock(&de->pde_unload_lock);
 
-continue_removing:
-	spin_lock(&de->pde_unload_lock);
 	while (!list_empty(&de->pde_openers)) {
 		struct pde_opener *pdeo;
 
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 3ddb606..176ce4c 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -65,9 +65,16 @@
 	return inode;
 }
 
+static void proc_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
+}
+
 static void proc_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(proc_inode_cachep, PROC_I(inode));
+	call_rcu(&inode->i_rcu, proc_i_callback);
 }
 
 static void init_once(void *foo)
@@ -409,12 +416,11 @@
 };
 #endif
 
-struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
-				struct proc_dir_entry *de)
+struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
 	struct inode * inode;
 
-	inode = iget_locked(sb, ino);
+	inode = iget_locked(sb, de->low_ino);
 	if (!inode)
 		return NULL;
 	if (inode->i_state & I_NEW) {
@@ -464,7 +470,7 @@
 	s->s_time_gran = 1;
 	
 	pde_get(&proc_root);
-	root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
+	root_inode = proc_get_inode(s, &proc_root);
 	if (!root_inode)
 		goto out_no_root;
 	root_inode->i_uid = 0;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 1f24a3e..9ad561d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -96,7 +96,8 @@
 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
 unsigned long task_vsize(struct mm_struct *);
-int task_statm(struct mm_struct *, int *, int *, int *, int *);
+unsigned long task_statm(struct mm_struct *,
+	unsigned long *, unsigned long *, unsigned long *, unsigned long *);
 void task_mem(struct seq_file *, struct mm_struct *);
 
 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
@@ -108,7 +109,7 @@
 
 extern struct vfsmount *proc_mnt;
 int proc_fill_super(struct super_block *);
-struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *);
+struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
 
 /*
  * These are generic /proc routines that use the internal
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 6f37c39..d245cb2 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -558,7 +558,7 @@
 static const struct file_operations proc_kcore_operations = {
 	.read		= read_kcore,
 	.open		= open_kcore,
-	.llseek		= generic_file_llseek,
+	.llseek		= default_llseek,
 };
 
 #ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index a65239c..ed257d1 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -101,6 +101,9 @@
 #ifdef CONFIG_MEMORY_FAILURE
 		"HardwareCorrupted: %5lu kB\n"
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		"AnonHugePages:  %8lu kB\n"
+#endif
 		,
 		K(i.totalram),
 		K(i.freeram),
@@ -128,7 +131,12 @@
 		K(i.freeswap),
 		K(global_page_state(NR_FILE_DIRTY)),
 		K(global_page_state(NR_WRITEBACK)),
-		K(global_page_state(NR_ANON_PAGES)),
+		K(global_page_state(NR_ANON_PAGES)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+		  HPAGE_PMD_NR
+#endif
+		  ),
 		K(global_page_state(NR_FILE_MAPPED)),
 		K(global_page_state(NR_SHMEM)),
 		K(global_page_state(NR_SLAB_RECLAIMABLE) +
@@ -151,6 +159,10 @@
 #ifdef CONFIG_MEMORY_FAILURE
 		,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+		,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
+		   HPAGE_PMD_NR)
+#endif
 		);
 
 	hugetlb_report_meminfo(m);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 3b8b456..6d8e6a9 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -40,7 +40,7 @@
 			ppage = pfn_to_page(pfn);
 		else
 			ppage = NULL;
-		if (!ppage)
+		if (!ppage || PageSlab(ppage))
 			pcount = 0;
 		else
 			pcount = page_mapcount(ppage);
@@ -116,15 +116,17 @@
 	if (PageHuge(page))
 		u |= 1 << KPF_HUGE;
 
+	/*
+	 * Caveats on high order pages: page->_count will only be set
+	 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
+	 * SLOB won't set PG_slab at all on compound pages.
+	 */
+	if (PageBuddy(page))
+		u |= 1 << KPF_BUDDY;
+
 	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);
 
-	/*
-	 * Caveats on high order pages:
-	 * PG_buddy will only be set on the head page; SLUB/SLQB do the same
-	 * for PG_slab; SLOB won't set PG_slab at all on compound pages.
-	 */
 	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);
-	u |= kpf_copy_bit(k, KPF_BUDDY,		PG_buddy);
 
 	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
 	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index b652cb0..09a1f92 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -5,6 +5,7 @@
 #include <linux/sysctl.h>
 #include <linux/proc_fs.h>
 #include <linux/security.h>
+#include <linux/namei.h>
 #include "internal.h"
 
 static const struct dentry_operations proc_sys_dentry_operations;
@@ -120,7 +121,7 @@
 		goto out;
 
 	err = NULL;
-	dentry->d_op = &proc_sys_dentry_operations;
+	d_set_d_op(dentry, &proc_sys_dentry_operations);
 	d_add(dentry, inode);
 
 out:
@@ -201,7 +202,7 @@
 				dput(child);
 				return -ENOMEM;
 			} else {
-				child->d_op = &proc_sys_dentry_operations;
+				d_set_d_op(child, &proc_sys_dentry_operations);
 				d_add(child, inode);
 			}
 		} else {
@@ -294,7 +295,7 @@
 	return ret;
 }
 
-static int proc_sys_permission(struct inode *inode, int mask)
+static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
 {
 	/*
 	 * sysctl entries that are not writeable,
@@ -304,6 +305,9 @@
 	struct ctl_table *table;
 	int error;
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	/* Executable files are not allowed under /proc/sys/ */
 	if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
 		return -EACCES;
@@ -389,23 +393,30 @@
 
 static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
 	return !PROC_I(dentry->d_inode)->sysctl->unregistering;
 }
 
-static int proc_sys_delete(struct dentry *dentry)
+static int proc_sys_delete(const struct dentry *dentry)
 {
 	return !!PROC_I(dentry->d_inode)->sysctl->unregistering;
 }
 
-static int proc_sys_compare(struct dentry *dir, struct qstr *qstr,
-			    struct qstr *name)
+static int proc_sys_compare(const struct dentry *parent,
+		const struct inode *pinode,
+		const struct dentry *dentry, const struct inode *inode,
+		unsigned int len, const char *str, const struct qstr *name)
 {
-	struct dentry *dentry = container_of(qstr, struct dentry, d_name);
-	if (qstr->len != name->len)
+	/* Although proc doesn't have negative dentries, rcu-walk means
+	 * that inode here can be NULL */
+	if (!inode)
+		return 0;
+	if (name->len != len)
 		return 1;
-	if (memcmp(qstr->name, name->name, name->len))
+	if (memcmp(name->name, str, len))
 		return 1;
-	return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl);
+	return !sysctl_is_seen(PROC_I(inode)->sysctl);
 }
 
 static const struct dentry_operations proc_sys_dentry_operations = {
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 83adcc8..cb761f0 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -36,27 +36,27 @@
 	}
 	switch (p->type) {
 	case TTY_DRIVER_TYPE_SYSTEM:
-		seq_printf(m, "system");
+		seq_puts(m, "system");
 		if (p->subtype == SYSTEM_TYPE_TTY)
-			seq_printf(m, ":/dev/tty");
+			seq_puts(m, ":/dev/tty");
 		else if (p->subtype == SYSTEM_TYPE_SYSCONS)
-			seq_printf(m, ":console");
+			seq_puts(m, ":console");
 		else if (p->subtype == SYSTEM_TYPE_CONSOLE)
-			seq_printf(m, ":vtmaster");
+			seq_puts(m, ":vtmaster");
 		break;
 	case TTY_DRIVER_TYPE_CONSOLE:
-		seq_printf(m, "console");
+		seq_puts(m, "console");
 		break;
 	case TTY_DRIVER_TYPE_SERIAL:
-		seq_printf(m, "serial");
+		seq_puts(m, "serial");
 		break;
 	case TTY_DRIVER_TYPE_PTY:
 		if (p->subtype == PTY_TYPE_MASTER)
-			seq_printf(m, "pty:master");
+			seq_puts(m, "pty:master");
 		else if (p->subtype == PTY_TYPE_SLAVE)
-			seq_printf(m, "pty:slave");
+			seq_puts(m, "pty:slave");
 		else
-			seq_printf(m, "pty");
+			seq_puts(m, "pty");
 		break;
 	default:
 		seq_printf(m, "type:%d.%d", p->type, p->subtype);
@@ -74,19 +74,19 @@
 		/* pseudo-drivers first */
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0);
-		seq_printf(m, "system:/dev/tty\n");
+		seq_puts(m, "system:/dev/tty\n");
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1);
-		seq_printf(m, "system:console\n");
+		seq_puts(m, "system:console\n");
 #ifdef CONFIG_UNIX98_PTYS
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2);
-		seq_printf(m, "system\n");
+		seq_puts(m, "system\n");
 #endif
 #ifdef CONFIG_VT
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0");
 		seq_printf(m, "%3d %7d ", TTY_MAJOR, 0);
-		seq_printf(m, "system:vtmaster\n");
+		seq_puts(m, "system:vtmaster\n");
 #endif
 	}
 
diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index 3799473..62604be 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -10,16 +10,16 @@
 {
 	int i, j;
 
-	seq_printf(p, "                    ");
+	seq_puts(p, "                    ");
 	for_each_possible_cpu(i)
 		seq_printf(p, "CPU%-8d", i);
-	seq_printf(p, "\n");
+	seq_putc(p, '\n');
 
 	for (i = 0; i < NR_SOFTIRQS; i++) {
 		seq_printf(p, "%12s:", softirq_to_name[i]);
 		for_each_possible_cpu(j)
 			seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
-		seq_printf(p, "\n");
+		seq_putc(p, '\n');
 	}
 	return 0;
 }
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index e15a19c..1cffa2b 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -126,7 +126,7 @@
 
 	for (i = 0; i < NR_SOFTIRQS; i++)
 		seq_printf(p, " %u", per_softirq_sums[i]);
-	seq_printf(p, "\n");
+	seq_putc(p, '\n');
 
 	return 0;
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c126c83..60b9148 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -66,8 +66,9 @@
 	return PAGE_SIZE * mm->total_vm;
 }
 
-int task_statm(struct mm_struct *mm, int *shared, int *text,
-	       int *data, int *resident)
+unsigned long task_statm(struct mm_struct *mm,
+			 unsigned long *shared, unsigned long *text,
+			 unsigned long *data, unsigned long *resident)
 {
 	*shared = get_mm_counter(mm, MM_FILEPAGES);
 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
@@ -417,7 +418,8 @@
 		   "Anonymous:      %8lu kB\n"
 		   "Swap:           %8lu kB\n"
 		   "KernelPageSize: %8lu kB\n"
-		   "MMUPageSize:    %8lu kB\n",
+		   "MMUPageSize:    %8lu kB\n"
+		   "Locked:         %8lu kB\n",
 		   (vma->vm_end - vma->vm_start) >> 10,
 		   mss.resident >> 10,
 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
@@ -429,7 +431,9 @@
 		   mss.anonymous >> 10,
 		   mss.swap >> 10,
 		   vma_kernel_pagesize(vma) >> 10,
-		   vma_mmu_pagesize(vma) >> 10);
+		   vma_mmu_pagesize(vma) >> 10,
+		   (vma->vm_flags & VM_LOCKED) ?
+			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
 
 	if (m->count < m->size)  /* vma is copied successfully */
 		m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index cb6306e..b535d3e 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -92,13 +92,14 @@
 	return vsize;
 }
 
-int task_statm(struct mm_struct *mm, int *shared, int *text,
-	       int *data, int *resident)
+unsigned long task_statm(struct mm_struct *mm,
+			 unsigned long *shared, unsigned long *text,
+			 unsigned long *data, unsigned long *resident)
 {
 	struct vm_area_struct *vma;
 	struct vm_region *region;
 	struct rb_node *p;
-	int size = kobjsize(mm);
+	unsigned long size = kobjsize(mm);
 
 	down_read(&mm->mmap_sem);
 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index fcada42..e63b417 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -425,9 +425,16 @@
 	return &ei->vfs_inode;
 }
 
+static void qnx4_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
+}
+
 static void qnx4_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
+	call_rcu(&inode->i_rcu, qnx4_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 0fed41e..84becd3 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -133,16 +133,20 @@
 EXPORT_SYMBOL(dq_data_lock);
 
 void __quota_error(struct super_block *sb, const char *func,
-		  const char *fmt, ...)
+		   const char *fmt, ...)
 {
-	va_list args;
-
 	if (printk_ratelimit()) {
+		va_list args;
+		struct va_format vaf;
+
 		va_start(args, fmt);
-		printk(KERN_ERR "Quota error (device %s): %s: ",
-		       sb->s_id, func);
-		vprintk(fmt, args);
-		printk("\n");
+
+		vaf.fmt = fmt;
+		vaf.va = &args;
+
+		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
+		       sb->s_id, func, &vaf);
+
 		va_end(args);
 	}
 }
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 9e48874..e41c1becf 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -468,8 +468,8 @@
 		return -ENOMEM;
 	ret = read_blk(info, *blk, buf);
 	if (ret < 0) {
-		quota_error(dquot->dq_sb, "Can't read quota data "
-			    "block %u", blk);
+		quota_error(dquot->dq_sb, "Can't read quota data block %u",
+			    *blk);
 		goto out_buf;
 	}
 	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -493,8 +493,9 @@
 		} else {
 			ret = write_blk(info, *blk, buf);
 			if (ret < 0)
-				quota_error(dquot->dq_sb, "Can't write quota "
-					    "tree block %u", blk);
+				quota_error(dquot->dq_sb,
+					    "Can't write quota tree block %u",
+					    *blk);
 		}
 	}
 out_buf:
diff --git a/fs/read_write.c b/fs/read_write.c
index 5d431ba..5520f8a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -30,18 +30,9 @@
 
 EXPORT_SYMBOL(generic_ro_fops);
 
-static int
-__negative_fpos_check(struct file *file, loff_t pos, size_t count)
+static inline int unsigned_offsets(struct file *file)
 {
-	/*
-	 * pos or pos+count is negative here, check overflow.
-	 * too big "count" will be caught in rw_verify_area().
-	 */
-	if ((pos < 0) && (pos + count < pos))
-		return -EOVERFLOW;
-	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
-		return 0;
-	return -EINVAL;
+	return file->f_mode & FMODE_UNSIGNED_OFFSET;
 }
 
 /**
@@ -75,7 +66,7 @@
 		break;
 	}
 
-	if (offset < 0 && __negative_fpos_check(file, offset, 0))
+	if (offset < 0 && !unsigned_offsets(file))
 		return -EINVAL;
 	if (offset > inode->i_sb->s_maxbytes)
 		return -EINVAL;
@@ -152,7 +143,7 @@
 			offset += file->f_pos;
 	}
 	retval = -EINVAL;
-	if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) {
+	if (offset >= 0 || unsigned_offsets(file)) {
 		if (offset != file->f_pos) {
 			file->f_pos = offset;
 			file->f_version = 0;
@@ -252,9 +243,13 @@
 	if (unlikely((ssize_t) count < 0))
 		return retval;
 	pos = *ppos;
-	if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) {
-		retval = __negative_fpos_check(file, pos, count);
-		if (retval)
+	if (unlikely(pos < 0)) {
+		if (!unsigned_offsets(file))
+			return retval;
+		if (count >= -pos) /* both values are in 0..LLONG_MAX */
+			return -EOVERFLOW;
+	} else if (unlikely((loff_t) (pos + count) < 0)) {
+		if (!unsigned_offsets(file))
 			return retval;
 	}
 
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index d31bce1..3eea859 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2551,8 +2551,6 @@
 	result = 0;
 
 	if (journal->j_dev_bd != NULL) {
-		if (journal->j_dev_bd->bd_dev != super->s_dev)
-			bd_release(journal->j_dev_bd);
 		result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
 		journal->j_dev_bd = NULL;
 	}
@@ -2570,7 +2568,7 @@
 {
 	int result;
 	dev_t jdev;
-	fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE;
+	fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
 	char b[BDEVNAME_SIZE];
 
 	result = 0;
@@ -2584,7 +2582,10 @@
 
 	/* there is no "jdev" option and journal is on separate device */
 	if ((!jdev_name || !jdev_name[0])) {
-		journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
+		if (jdev == super->s_dev)
+			blkdev_mode &= ~FMODE_EXCL;
+		journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode,
+						      journal);
 		journal->j_dev_mode = blkdev_mode;
 		if (IS_ERR(journal->j_dev_bd)) {
 			result = PTR_ERR(journal->j_dev_bd);
@@ -2593,22 +2594,14 @@
 					 "cannot init journal device '%s': %i",
 					 __bdevname(jdev, b), result);
 			return result;
-		} else if (jdev != super->s_dev) {
-			result = bd_claim(journal->j_dev_bd, journal);
-			if (result) {
-				blkdev_put(journal->j_dev_bd, blkdev_mode);
-				return result;
-			}
-
+		} else if (jdev != super->s_dev)
 			set_blocksize(journal->j_dev_bd, super->s_blocksize);
-		}
 
 		return 0;
 	}
 
 	journal->j_dev_mode = blkdev_mode;
-	journal->j_dev_bd = open_bdev_exclusive(jdev_name,
-						blkdev_mode, journal);
+	journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal);
 	if (IS_ERR(journal->j_dev_bd)) {
 		result = PTR_ERR(journal->j_dev_bd);
 		journal->j_dev_bd = NULL;
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index adbc6f5..45de98b 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -586,13 +586,13 @@
 	va_list args;
 	int mode, first, last;
 
-	va_start(args, bh);
-
 	if (!bh) {
 		printk("print_block: buffer is NULL\n");
 		return;
 	}
 
+	va_start(args, bh);
+
 	mode = va_arg(args, int);
 	first = va_arg(args, int);
 	last = va_arg(args, int);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b243117..2575682 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -529,9 +529,16 @@
 	return &ei->vfs_inode;
 }
 
+static void reiserfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
+}
+
 static void reiserfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
+	call_rcu(&inode->i_rcu, reiserfs_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 5d04a78..3cfb2e9 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -870,11 +870,14 @@
 	return err;
 }
 
-static int reiserfs_check_acl(struct inode *inode, int mask)
+static int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
 	struct posix_acl *acl;
 	int error = -EAGAIN; /* do regular unix permission checks by default */
 
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
 	acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
 
 	if (acl) {
@@ -951,8 +954,10 @@
 	return 0;
 }
 
-int reiserfs_permission(struct inode *inode, int mask)
+int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
 	/*
 	 * We don't do permission checks on the internal objects.
 	 * Permissions are determined by the "owning" object.
@@ -965,13 +970,16 @@
 	 * Stat data v1 doesn't support ACLs.
 	 */
 	if (get_inode_sd_version(inode) != STAT_DATA_V1)
-		return generic_permission(inode, mask, reiserfs_check_acl);
+		return generic_permission(inode, mask, flags,
+					reiserfs_check_acl);
 #endif
-	return generic_permission(inode, mask, NULL);
+	return generic_permission(inode, mask, flags, NULL);
 }
 
 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
 	return -EPERM;
 }
 
@@ -990,7 +998,7 @@
 				strlen(PRIVROOT_NAME));
 	if (!IS_ERR(dentry)) {
 		REISERFS_SB(s)->priv_root = dentry;
-		dentry->d_op = &xattr_lookup_poison_ops;
+		d_set_d_op(dentry, &xattr_lookup_poison_ops);
 		if (dentry->d_inode)
 			dentry->d_inode->i_flags |= S_PRIVATE;
 	} else
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 6647f90..2305e31 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -400,9 +400,16 @@
 /*
  * return a spent inode to the slab cache
  */
+static void romfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
+}
+
 static void romfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
+	call_rcu(&inode->i_rcu, romfs_i_callback);
 }
 
 /*
diff --git a/fs/select.c b/fs/select.c
index b7b10aa..e56560d 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -306,6 +306,8 @@
 		rts.tv_sec = rts.tv_nsec = 0;
 
 	if (timeval) {
+		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
+			memset(&rtv, 0, sizeof(rtv));
 		rtv.tv_sec = rts.tv_sec;
 		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
 
diff --git a/fs/splice.c b/fs/splice.c
index ce2f025..50a5d978 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -682,19 +682,14 @@
 {
 	struct file *file = sd->u.file;
 	loff_t pos = sd->pos;
-	int ret, more;
+	int more;
 
-	ret = buf->ops->confirm(pipe, buf);
-	if (!ret) {
-		more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
-		if (file->f_op && file->f_op->sendpage)
-			ret = file->f_op->sendpage(file, buf->page, buf->offset,
-						   sd->len, &pos, more);
-		else
-			ret = -EINVAL;
-	}
+	if (!likely(file->f_op && file->f_op->sendpage))
+		return -EINVAL;
 
-	return ret;
+	more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
+	return file->f_op->sendpage(file, buf->page, buf->offset,
+				    sd->len, &pos, more);
 }
 
 /*
@@ -727,13 +722,6 @@
 	void *fsdata;
 	int ret;
 
-	/*
-	 * make sure the data in this buffer is uptodate
-	 */
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
-
 	offset = sd->pos & ~PAGE_CACHE_MASK;
 
 	this_len = sd->len;
@@ -805,12 +793,17 @@
 		if (sd->len > sd->total_len)
 			sd->len = sd->total_len;
 
-		ret = actor(pipe, buf, sd);
-		if (ret <= 0) {
+		ret = buf->ops->confirm(pipe, buf);
+		if (unlikely(ret)) {
 			if (ret == -ENODATA)
 				ret = 0;
 			return ret;
 		}
+
+		ret = actor(pipe, buf, sd);
+		if (ret <= 0)
+			return ret;
+
 		buf->offset += ret;
 		buf->len -= ret;
 
@@ -1044,10 +1037,6 @@
 	int ret;
 	void *data;
 
-	ret = buf->ops->confirm(pipe, buf);
-	if (ret)
-		return ret;
-
 	data = buf->ops->map(pipe, buf, 0);
 	ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
 	buf->ops->unmap(pipe, buf, data);
@@ -1495,10 +1484,6 @@
 	char *src;
 	int ret;
 
-	ret = buf->ops->confirm(pipe, buf);
-	if (unlikely(ret))
-		return ret;
-
 	/*
 	 * See if we can use the atomic maps, by prefaulting in the
 	 * pages and doing an atomic copy
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 24de30b..20700b9 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -440,9 +440,16 @@
 }
 
 
+static void squashfs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
+}
+
 static void squashfs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode));
+	call_rcu(&inode->i_rcu, squashfs_i_callback);
 }
 
 
diff --git a/fs/super.c b/fs/super.c
index ca69615..4f6a357 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -30,6 +30,7 @@
 #include <linux/idr.h>
 #include <linux/mutex.h>
 #include <linux/backing-dev.h>
+#include <linux/rculist_bl.h>
 #include "internal.h"
 
 
@@ -71,7 +72,7 @@
 		INIT_LIST_HEAD(&s->s_files);
 #endif
 		INIT_LIST_HEAD(&s->s_instances);
-		INIT_HLIST_HEAD(&s->s_anon);
+		INIT_HLIST_BL_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
 		INIT_LIST_HEAD(&s->s_dentry_lru);
 		init_rwsem(&s->s_umount);
@@ -766,13 +767,13 @@
 {
 	struct block_device *bdev;
 	struct super_block *s;
-	fmode_t mode = FMODE_READ;
+	fmode_t mode = FMODE_READ | FMODE_EXCL;
 	int error = 0;
 
 	if (!(flags & MS_RDONLY))
 		mode |= FMODE_WRITE;
 
-	bdev = open_bdev_exclusive(dev_name, mode, fs_type);
+	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
 	if (IS_ERR(bdev))
 		return ERR_CAST(bdev);
 
@@ -801,13 +802,13 @@
 
 		/*
 		 * s_umount nests inside bd_mutex during
-		 * __invalidate_device().  close_bdev_exclusive()
-		 * acquires bd_mutex and can't be called under
-		 * s_umount.  Drop s_umount temporarily.  This is safe
-		 * as we're holding an active reference.
+		 * __invalidate_device().  blkdev_put() acquires
+		 * bd_mutex and can't be called under s_umount.  Drop
+		 * s_umount temporarily.  This is safe as we're
+		 * holding an active reference.
 		 */
 		up_write(&s->s_umount);
-		close_bdev_exclusive(bdev, mode);
+		blkdev_put(bdev, mode);
 		down_write(&s->s_umount);
 	} else {
 		char b[BDEVNAME_SIZE];
@@ -831,7 +832,7 @@
 error_s:
 	error = PTR_ERR(s);
 error_bdev:
-	close_bdev_exclusive(bdev, mode);
+	blkdev_put(bdev, mode);
 error:
 	return ERR_PTR(error);
 }
@@ -862,7 +863,8 @@
 	bdev->bd_super = NULL;
 	generic_shutdown_super(sb);
 	sync_blockdev(bdev);
-	close_bdev_exclusive(bdev, mode);
+	WARN_ON_ONCE(!(mode & FMODE_EXCL));
+	blkdev_put(bdev, mode | FMODE_EXCL);
 }
 
 EXPORT_SYMBOL(kill_block_super);
@@ -1139,7 +1141,7 @@
 	return mnt;
 
  err:
-	mntput(mnt);
+	mntput_long(mnt);
 	return ERR_PTR(err);
 }
 
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 7e54bac..ea9120a 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -231,7 +231,7 @@
 		goto repeat;
 }
 
-static int sysfs_dentry_delete(struct dentry *dentry)
+static int sysfs_dentry_delete(const struct dentry *dentry)
 {
 	struct sysfs_dirent *sd = dentry->d_fsdata;
 	return !!(sd->s_flags & SYSFS_FLAG_REMOVED);
@@ -239,9 +239,13 @@
 
 static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	struct sysfs_dirent *sd = dentry->d_fsdata;
+	struct sysfs_dirent *sd;
 	int is_dir;
 
+	if (nd->flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	sd = dentry->d_fsdata;
 	mutex_lock(&sysfs_mutex);
 
 	/* The sysfs dirent has been deleted */
@@ -701,7 +705,7 @@
 	/* instantiate and hash dentry */
 	ret = d_find_alias(inode);
 	if (!ret) {
-		dentry->d_op = &sysfs_dentry_ops;
+		d_set_d_op(dentry, &sysfs_dentry_ops);
 		dentry->d_fsdata = sysfs_get(sd);
 		d_add(dentry, inode);
 	} else {
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 442f34f..c8769dc2 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -165,10 +165,7 @@
 	struct attribute *const *attr;
 	int i;
 
-	if (grp)
-		dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name);
-	else
-		dir_sd = sysfs_get(kobj->sd);
+	dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name);
 	if (!dir_sd)
 		return -ENOENT;
 
@@ -195,10 +192,7 @@
 	struct sysfs_dirent *dir_sd;
 	struct attribute *const *attr;
 
-	if (grp)
-		dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name);
-	else
-		dir_sd = sysfs_get(kobj->sd);
+	dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name);
 	if (dir_sd) {
 		for (attr = grp->attrs; *attr; ++attr)
 			sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index cffb1fd..0a12eb8 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/sysfs.h>
 #include <linux/xattr.h>
 #include <linux/security.h>
 #include "sysfs.h"
@@ -348,13 +349,18 @@
 		return -ENOENT;
 }
 
-int sysfs_permission(struct inode *inode, int mask)
+int sysfs_permission(struct inode *inode, int mask, unsigned int flags)
 {
-	struct sysfs_dirent *sd = inode->i_private;
+	struct sysfs_dirent *sd;
+
+	if (flags & IPERM_FLAG_RCU)
+		return -ECHILD;
+
+	sd = inode->i_private;
 
 	mutex_lock(&sysfs_mutex);
 	sysfs_refresh_inode(sd, inode);
 	mutex_unlock(&sysfs_mutex);
 
-	return generic_permission(inode, mask, NULL);
+	return generic_permission(inode, mask, flags, NULL);
 }
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index d9be60a..3d28af3 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -9,6 +9,7 @@
  */
 
 #include <linux/lockdep.h>
+#include <linux/kobject_ns.h>
 #include <linux/fs.h>
 
 struct sysfs_open_dirent;
@@ -200,7 +201,7 @@
 struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd);
 void sysfs_evict_inode(struct inode *inode);
 int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr);
-int sysfs_permission(struct inode *inode, int mask);
+int sysfs_permission(struct inode *inode, int mask, unsigned int flags);
 int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
 int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
 int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index de44d06..0630eb9 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -333,9 +333,16 @@
 	return &si->vfs_inode;
 }
 
+static void sysv_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
+}
+
 static void sysv_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
+	call_rcu(&inode->i_rcu, sysv_i_callback);
 }
 
 static void init_once(void *p)
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 11e7f7d..b427b12 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -27,7 +27,8 @@
 	return err;
 }
 
-static int sysv_hash(struct dentry *dentry, struct qstr *qstr)
+static int sysv_hash(const struct dentry *dentry, const struct inode *inode,
+		struct qstr *qstr)
 {
 	/* Truncate the name in place, avoids having to define a compare
 	   function. */
@@ -47,7 +48,6 @@
 	struct inode * inode = NULL;
 	ino_t ino;
 
-	dentry->d_op = dir->i_sb->s_root->d_op;
 	if (dentry->d_name.len > SYSV_NAMELEN)
 		return ERR_PTR(-ENAMETOOLONG);
 	ino = sysv_inode_by_name(dentry);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 3d9c62b..f60c196 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -332,6 +332,10 @@
 	sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type;
 	/* set up enough so that it can read an inode */
 	sb->s_op = &sysv_sops;
+	if (sbi->s_forced_ro)
+		sb->s_flags |= MS_RDONLY;
+	if (sbi->s_truncate)
+		sb->s_d_op = &sysv_dentry_operations;
 	root_inode = sysv_iget(sb, SYSV_ROOT_INO);
 	if (IS_ERR(root_inode)) {
 		printk("SysV FS: get root inode failed\n");
@@ -343,10 +347,6 @@
 		printk("SysV FS: get root dentry failed\n");
 		return 0;
 	}
-	if (sbi->s_forced_ro)
-		sb->s_flags |= MS_RDONLY;
-	if (sbi->s_truncate)
-		sb->s_root->d_op = &sysv_dentry_operations;
 	return 1;
 }
 
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 91fac54..6e11c29 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -272,12 +272,20 @@
 	return &ui->vfs_inode;
 };
 
+static void ubifs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	struct ubifs_inode *ui = ubifs_inode(inode);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ubifs_inode_slab, ui);
+}
+
 static void ubifs_destroy_inode(struct inode *inode)
 {
 	struct ubifs_inode *ui = ubifs_inode(inode);
 
 	kfree(ui->data);
-	kmem_cache_free(ubifs_inode_slab, inode);
+	call_rcu(&inode->i_rcu, ubifs_i_callback);
 }
 
 /*
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index f8def3c..0e0e99b 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -1,6 +1,5 @@
 config UDF_FS
 	tristate "UDF file system support"
-	depends on BKL # needs serious work to remove
 	select CRC_ITU_T
 	help
 	  This is the new file system used on some CD-ROMs and DVDs. Say Y if
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index b608efa..306ee39 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -157,10 +157,9 @@
 				udf_debug("bit %ld already set\n", bit + i);
 				udf_debug("byte=%2x\n",
 					((char *)bh->b_data)[(bit + i) >> 3]);
-			} else {
-				udf_add_free_space(sb, sbi->s_partition, 1);
 			}
 		}
+		udf_add_free_space(sb, sbi->s_partition, count);
 		mark_buffer_dirty(bh);
 		if (overflow) {
 			block += count;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 51552bf..eb8bfe2 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -30,7 +30,6 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 
 #include "udf_i.h"
@@ -190,18 +189,14 @@
 	struct inode *dir = filp->f_path.dentry->d_inode;
 	int result;
 
-	lock_kernel();
-
 	if (filp->f_pos == 0) {
 		if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
-			unlock_kernel();
 			return 0;
 		}
 		filp->f_pos++;
 	}
 
 	result = do_udf_readdir(dir, filp, filldir, dirent);
-	unlock_kernel();
  	return result;
 }
 
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 66b9e7e..89c7848 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -32,7 +32,6 @@
 #include <linux/string.h> /* memset */
 #include <linux/capability.h>
 #include <linux/errno.h>
-#include <linux/smp_lock.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
 #include <linux/aio.h>
@@ -114,6 +113,7 @@
 	size_t count = iocb->ki_left;
 	struct udf_inode_info *iinfo = UDF_I(inode);
 
+	down_write(&iinfo->i_data_sem);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 		if (file->f_flags & O_APPEND)
 			pos = inode->i_size;
@@ -126,6 +126,7 @@
 			udf_expand_file_adinicb(inode, pos + count, &err);
 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 				udf_debug("udf_expand_adinicb: err=%d\n", err);
+				up_write(&iinfo->i_data_sem);
 				return err;
 			}
 		} else {
@@ -135,6 +136,7 @@
 				iinfo->i_lenAlloc = inode->i_size;
 		}
 	}
+	up_write(&iinfo->i_data_sem);
 
 	retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
 	if (retval > 0)
@@ -149,8 +151,6 @@
 	long old_block, new_block;
 	int result = -EINVAL;
 
-	lock_kernel();
-
 	if (file_permission(filp, MAY_READ) != 0) {
 		udf_debug("no permission to access inode %lu\n", inode->i_ino);
 		result = -EPERM;
@@ -196,7 +196,6 @@
 	}
 
 out:
-	unlock_kernel();
 	return result;
 }
 
@@ -204,10 +203,10 @@
 {
 	if (filp->f_mode & FMODE_WRITE) {
 		mutex_lock(&inode->i_mutex);
-		lock_kernel();
+		down_write(&UDF_I(inode)->i_data_sem);
 		udf_discard_prealloc(inode);
 		udf_truncate_tail_extent(inode);
-		unlock_kernel();
+		up_write(&UDF_I(inode)->i_data_sem);
 		mutex_unlock(&inode->i_mutex);
 	}
 	return 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 75d9304..6fb7e0a 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -92,28 +92,19 @@
 		return NULL;
 	}
 
-	mutex_lock(&sbi->s_alloc_mutex);
 	if (sbi->s_lvid_bh) {
-		struct logicalVolIntegrityDesc *lvid =
-			(struct logicalVolIntegrityDesc *)
-			sbi->s_lvid_bh->b_data;
-		struct logicalVolIntegrityDescImpUse *lvidiu =
-							udf_sb_lvidiu(sbi);
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				(lvid->logicalVolContentsUse);
+		struct logicalVolIntegrityDescImpUse *lvidiu;
+
+		iinfo->i_unique = lvid_get_unique_id(sb);
+		mutex_lock(&sbi->s_alloc_mutex);
+		lvidiu = udf_sb_lvidiu(sbi);
 		if (S_ISDIR(mode))
 			le32_add_cpu(&lvidiu->numDirs, 1);
 		else
 			le32_add_cpu(&lvidiu->numFiles, 1);
-		iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
 		udf_updated_lvid(sb);
+		mutex_unlock(&sbi->s_alloc_mutex);
 	}
-	mutex_unlock(&sbi->s_alloc_mutex);
 
 	inode_init_owner(inode, dir, mode);
 
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fc48f37..c6a2e78 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -31,7 +31,6 @@
 
 #include "udfdecl.h"
 #include <linux/mm.h>
-#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
@@ -51,6 +50,7 @@
 static mode_t udf_convert_permissions(struct fileEntry *);
 static int udf_update_inode(struct inode *, int);
 static void udf_fill_inode(struct inode *, struct buffer_head *);
+static int udf_sync_inode(struct inode *inode);
 static int udf_alloc_i_data(struct inode *inode, size_t size);
 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
 					sector_t *, int *);
@@ -79,9 +79,7 @@
 		want_delete = 1;
 		inode->i_size = 0;
 		udf_truncate(inode);
-		lock_kernel();
 		udf_update_inode(inode, IS_SYNC(inode));
-		unlock_kernel();
 	}
 	invalidate_inode_buffers(inode);
 	end_writeback(inode);
@@ -97,9 +95,7 @@
 	kfree(iinfo->i_ext.i_data);
 	iinfo->i_ext.i_data = NULL;
 	if (want_delete) {
-		lock_kernel();
 		udf_free_inode(inode);
-		unlock_kernel();
 	}
 }
 
@@ -302,10 +298,9 @@
 	err = -EIO;
 	new = 0;
 	bh = NULL;
-
-	lock_kernel();
-
 	iinfo = UDF_I(inode);
+
+	down_write(&iinfo->i_data_sem);
 	if (block == iinfo->i_next_alloc_block + 1) {
 		iinfo->i_next_alloc_block++;
 		iinfo->i_next_alloc_goal++;
@@ -324,7 +319,7 @@
 	map_bh(bh_result, inode->i_sb, phys);
 
 abort:
-	unlock_kernel();
+	up_write(&iinfo->i_data_sem);
 	return err;
 }
 
@@ -1022,16 +1017,16 @@
 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 		return;
 
-	lock_kernel();
 	iinfo = UDF_I(inode);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+		down_write(&iinfo->i_data_sem);
 		if (inode->i_sb->s_blocksize <
 				(udf_file_entry_alloc_offset(inode) +
 				 inode->i_size)) {
 			udf_expand_file_adinicb(inode, inode->i_size, &err);
 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 				inode->i_size = iinfo->i_lenAlloc;
-				unlock_kernel();
+				up_write(&iinfo->i_data_sem);
 				return;
 			} else
 				udf_truncate_extents(inode);
@@ -1042,10 +1037,13 @@
 				offset - udf_file_entry_alloc_offset(inode));
 			iinfo->i_lenAlloc = inode->i_size;
 		}
+		up_write(&iinfo->i_data_sem);
 	} else {
 		block_truncate_page(inode->i_mapping, inode->i_size,
 				    udf_get_block);
+		down_write(&iinfo->i_data_sem);
 		udf_truncate_extents(inode);
+		up_write(&iinfo->i_data_sem);
 	}
 
 	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
@@ -1053,7 +1051,6 @@
 		udf_sync_inode(inode);
 	else
 		mark_inode_dirty(inode);
-	unlock_kernel();
 }
 
 static void __udf_read_inode(struct inode *inode)
@@ -1202,6 +1199,7 @@
 		return;
 	}
 
+	read_lock(&sbi->s_cred_lock);
 	inode->i_uid = le32_to_cpu(fe->uid);
 	if (inode->i_uid == -1 ||
 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
@@ -1214,13 +1212,6 @@
 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
 		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
 
-	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
-	if (!inode->i_nlink)
-		inode->i_nlink = 1;
-
-	inode->i_size = le64_to_cpu(fe->informationLength);
-	iinfo->i_lenExtents = inode->i_size;
-
 	if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
 			sbi->s_fmode != UDF_INVALID_MODE)
 		inode->i_mode = sbi->s_fmode;
@@ -1230,6 +1221,14 @@
 	else
 		inode->i_mode = udf_convert_permissions(fe);
 	inode->i_mode &= ~sbi->s_umask;
+	read_unlock(&sbi->s_cred_lock);
+
+	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
+	if (!inode->i_nlink)
+		inode->i_nlink = 1;
+
+	inode->i_size = le64_to_cpu(fe->informationLength);
+	iinfo->i_lenExtents = inode->i_size;
 
 	if (iinfo->i_efe == 0) {
 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
@@ -1373,16 +1372,10 @@
 
 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-	int ret;
-
-	lock_kernel();
-	ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
-	unlock_kernel();
-
-	return ret;
+	return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 }
 
-int udf_sync_inode(struct inode *inode)
+static int udf_sync_inode(struct inode *inode)
 {
 	return udf_update_inode(inode, 1);
 }
@@ -2048,7 +2041,7 @@
 	struct extent_position epos = {};
 	int ret;
 
-	lock_kernel();
+	down_read(&UDF_I(inode)->i_data_sem);
 
 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
 						(EXT_RECORDED_ALLOCATED >> 30))
@@ -2056,7 +2049,7 @@
 	else
 		ret = 0;
 
-	unlock_kernel();
+	up_read(&UDF_I(inode)->i_data_sem);
 	brelse(epos.bh);
 
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 6d8dc02..2be0f9e 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -27,7 +27,6 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include <linux/sched.h>
 #include <linux/crc-itu-t.h>
@@ -228,10 +227,8 @@
 		}
 
 		if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) &&
-		    isdotdot) {
-			brelse(epos.bh);
-			return fi;
-		}
+		    isdotdot)
+			goto out_ok;
 
 		if (!lfi)
 			continue;
@@ -263,7 +260,6 @@
 	if (dentry->d_name.len > UDF_NAME_LEN - 2)
 		return ERR_PTR(-ENAMETOOLONG);
 
-	lock_kernel();
 #ifdef UDF_RECOVERY
 	/* temporary shorthand for specifying files by inode number */
 	if (!strncmp(dentry->d_name.name, ".B=", 3)) {
@@ -275,7 +271,6 @@
 		};
 		inode = udf_iget(dir->i_sb, lb);
 		if (!inode) {
-			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
 	} else
@@ -291,11 +286,9 @@
 		loc = lelb_to_cpu(cfi.icb.extLocation);
 		inode = udf_iget(dir->i_sb, &loc);
 		if (!inode) {
-			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
 	}
-	unlock_kernel();
 
 	return d_splice_alias(inode, dentry);
 }
@@ -476,15 +469,19 @@
 				f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
 		if (!fibh->ebh)
 			goto out_err;
+		/* Extents could have been merged, invalidate our position */
+		brelse(epos.bh);
+		epos.bh = NULL;
+		epos.block = dinfo->i_location;
+		epos.offset = udf_file_entry_alloc_offset(dir);
 
 		if (!fibh->soffset) {
-			if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
-			    (EXT_RECORDED_ALLOCATED >> 30)) {
-				block = eloc.logicalBlockNum + ((elen - 1) >>
+			/* Find the freshly allocated block */
+			while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
+				(EXT_RECORDED_ALLOCATED >> 30))
+				;
+			block = eloc.logicalBlockNum + ((elen - 1) >>
 					dir->i_sb->s_blocksize_bits);
-			} else
-				block++;
-
 			brelse(fibh->sbh);
 			fibh->sbh = fibh->ebh;
 			fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
@@ -562,10 +559,8 @@
 	int err;
 	struct udf_inode_info *iinfo;
 
-	lock_kernel();
 	inode = udf_new_inode(dir, mode, &err);
 	if (!inode) {
-		unlock_kernel();
 		return err;
 	}
 
@@ -583,7 +578,6 @@
 		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -596,7 +590,6 @@
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
-	unlock_kernel();
 	d_instantiate(dentry, inode);
 
 	return 0;
@@ -614,7 +607,6 @@
 	if (!old_valid_dev(rdev))
 		return -EINVAL;
 
-	lock_kernel();
 	err = -EIO;
 	inode = udf_new_inode(dir, mode, &err);
 	if (!inode)
@@ -627,7 +619,6 @@
 		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -646,7 +637,6 @@
 	err = 0;
 
 out:
-	unlock_kernel();
 	return err;
 }
 
@@ -659,7 +649,6 @@
 	struct udf_inode_info *dinfo = UDF_I(dir);
 	struct udf_inode_info *iinfo;
 
-	lock_kernel();
 	err = -EMLINK;
 	if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
 		goto out;
@@ -712,7 +701,6 @@
 	err = 0;
 
 out:
-	unlock_kernel();
 	return err;
 }
 
@@ -794,7 +782,6 @@
 	struct kernel_lb_addr tloc;
 
 	retval = -ENOENT;
-	lock_kernel();
 	fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
 	if (!fi)
 		goto out;
@@ -826,7 +813,6 @@
 	brelse(fibh.sbh);
 
 out:
-	unlock_kernel();
 	return retval;
 }
 
@@ -840,7 +826,6 @@
 	struct kernel_lb_addr tloc;
 
 	retval = -ENOENT;
-	lock_kernel();
 	fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
 	if (!fi)
 		goto out;
@@ -870,7 +855,6 @@
 	brelse(fibh.sbh);
 
 out:
-	unlock_kernel();
 	return retval;
 }
 
@@ -890,21 +874,21 @@
 	int block;
 	unsigned char *name = NULL;
 	int namelen;
-	struct buffer_head *bh;
 	struct udf_inode_info *iinfo;
+	struct super_block *sb = dir->i_sb;
 
-	lock_kernel();
 	inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
 	if (!inode)
 		goto out;
 
+	iinfo = UDF_I(inode);
+	down_write(&iinfo->i_data_sem);
 	name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
 	if (!name) {
 		err = -ENOMEM;
 		goto out_no_entry;
 	}
 
-	iinfo = UDF_I(inode);
 	inode->i_data.a_ops = &udf_symlink_aops;
 	inode->i_op = &udf_symlink_inode_operations;
 
@@ -912,7 +896,7 @@
 		struct kernel_lb_addr eloc;
 		uint32_t bsize;
 
-		block = udf_new_block(inode->i_sb, inode,
+		block = udf_new_block(sb, inode,
 				iinfo->i_location.partitionReferenceNum,
 				iinfo->i_location.logicalBlockNum, &err);
 		if (!block)
@@ -923,17 +907,17 @@
 		eloc.logicalBlockNum = block;
 		eloc.partitionReferenceNum =
 				iinfo->i_location.partitionReferenceNum;
-		bsize = inode->i_sb->s_blocksize;
+		bsize = sb->s_blocksize;
 		iinfo->i_lenExtents = bsize;
 		udf_add_aext(inode, &epos, &eloc, bsize, 0);
 		brelse(epos.bh);
 
-		block = udf_get_pblock(inode->i_sb, block,
+		block = udf_get_pblock(sb, block,
 				iinfo->i_location.partitionReferenceNum,
 				0);
-		epos.bh = udf_tgetblk(inode->i_sb, block);
+		epos.bh = udf_tgetblk(sb, block);
 		lock_buffer(epos.bh);
-		memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
+		memset(epos.bh->b_data, 0x00, bsize);
 		set_buffer_uptodate(epos.bh);
 		unlock_buffer(epos.bh);
 		mark_buffer_dirty_inode(epos.bh, inode);
@@ -941,7 +925,7 @@
 	} else
 		ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
 
-	eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
+	eoffset = sb->s_blocksize - udf_ext0_offset(inode);
 	pc = (struct pathComponent *)ea;
 
 	if (*symname == '/') {
@@ -981,7 +965,7 @@
 		}
 
 		if (pc->componentType == 5) {
-			namelen = udf_put_filename(inode->i_sb, compstart, name,
+			namelen = udf_put_filename(sb, compstart, name,
 						   symname - compstart);
 			if (!namelen)
 				goto out_no_entry;
@@ -1015,27 +999,16 @@
 	fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
 	if (!fi)
 		goto out_no_entry;
-	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-	bh = UDF_SB(inode->i_sb)->s_lvid_bh;
-	if (bh) {
-		struct logicalVolIntegrityDesc *lvid =
-				(struct logicalVolIntegrityDesc *)bh->b_data;
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				lvid->logicalVolContentsUse;
-		uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (UDF_SB(inode->i_sb)->s_lvid_bh) {
 		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
-		mark_buffer_dirty(bh);
+			cpu_to_le32(lvid_get_unique_id(sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
 		mark_inode_dirty(dir);
+	up_write(&iinfo->i_data_sem);
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
@@ -1044,10 +1017,10 @@
 
 out:
 	kfree(name);
-	unlock_kernel();
 	return err;
 
 out_no_entry:
+	up_write(&iinfo->i_data_sem);
 	inode_dec_link_count(inode);
 	iput(inode);
 	goto out;
@@ -1060,36 +1033,20 @@
 	struct udf_fileident_bh fibh;
 	struct fileIdentDesc cfi, *fi;
 	int err;
-	struct buffer_head *bh;
 
-	lock_kernel();
 	if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
-		unlock_kernel();
 		return -EMLINK;
 	}
 
 	fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
 	if (!fi) {
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
-	bh = UDF_SB(inode->i_sb)->s_lvid_bh;
-	if (bh) {
-		struct logicalVolIntegrityDesc *lvid =
-				(struct logicalVolIntegrityDesc *)bh->b_data;
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				(lvid->logicalVolContentsUse);
-		uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (UDF_SB(inode->i_sb)->s_lvid_bh) {
 		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
-		mark_buffer_dirty(bh);
+			cpu_to_le32(lvid_get_unique_id(inode->i_sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
@@ -1103,7 +1060,6 @@
 	mark_inode_dirty(inode);
 	ihold(inode);
 	d_instantiate(dentry, inode);
-	unlock_kernel();
 
 	return 0;
 }
@@ -1124,7 +1080,6 @@
 	struct kernel_lb_addr tloc;
 	struct udf_inode_info *old_iinfo = UDF_I(old_inode);
 
-	lock_kernel();
 	ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
 	if (ofi) {
 		if (ofibh.sbh != ofibh.ebh)
@@ -1248,7 +1203,6 @@
 			brelse(nfibh.ebh);
 		brelse(nfibh.sbh);
 	}
-	unlock_kernel();
 
 	return retval;
 }
@@ -1261,7 +1215,6 @@
 	struct fileIdentDesc cfi;
 	struct udf_fileident_bh fibh;
 
-	lock_kernel();
 	if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
 		goto out_unlock;
 
@@ -1273,11 +1226,9 @@
 	inode = udf_iget(child->d_inode->i_sb, &tloc);
 	if (!inode)
 		goto out_unlock;
-	unlock_kernel();
 
 	return d_obtain_alias(inode);
 out_unlock:
-	unlock_kernel();
 	return ERR_PTR(-EACCES);
 }
 
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 745eb20..a71090e 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -25,6 +25,7 @@
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/buffer_head.h>
+#include <linux/mutex.h>
 
 uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
 			uint16_t partition, uint32_t offset)
@@ -159,7 +160,9 @@
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	u16 reallocationTableLen;
 	struct buffer_head *bh;
+	int ret = 0;
 
+	mutex_lock(&sbi->s_alloc_mutex);
 	for (i = 0; i < sbi->s_partitions; i++) {
 		struct udf_part_map *map = &sbi->s_partmaps[i];
 		if (old_block > map->s_partition_root &&
@@ -175,8 +178,10 @@
 					break;
 				}
 
-			if (!st)
-				return 1;
+			if (!st) {
+				ret = 1;
+				goto out;
+			}
 
 			reallocationTableLen =
 					le16_to_cpu(st->reallocationTableLen);
@@ -207,14 +212,16 @@
 						     ((old_block -
 							map->s_partition_root) &
 						     (sdata->s_packet_len - 1));
-					return 0;
+					ret = 0;
+					goto out;
 				} else if (origLoc == packet) {
 					*new_block = le32_to_cpu(
 							entry->mappedLocation) +
 						     ((old_block -
 							map->s_partition_root) &
 						     (sdata->s_packet_len - 1));
-					return 0;
+					ret = 0;
+					goto out;
 				} else if (origLoc > packet)
 					break;
 			}
@@ -251,20 +258,24 @@
 					      st->mapEntry[k].mappedLocation) +
 					((old_block - map->s_partition_root) &
 					 (sdata->s_packet_len - 1));
-				return 0;
+				ret = 0;
+				goto out;
 			}
 
-			return 1;
+			ret = 1;
+			goto out;
 		} /* if old_block */
 	}
 
 	if (i == sbi->s_partitions) {
 		/* outside of partitions */
 		/* for now, fail =) */
-		return 1;
+		ret = 1;
 	}
 
-	return 0;
+out:
+	mutex_unlock(&sbi->s_alloc_mutex);
+	return ret;
 }
 
 static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 4a5c7c6..7b27b06 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -48,7 +48,6 @@
 #include <linux/stat.h>
 #include <linux/cdrom.h>
 #include <linux/nls.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include <linux/vfs.h>
 #include <linux/vmalloc.h>
@@ -135,13 +134,21 @@
 	ei->i_next_alloc_block = 0;
 	ei->i_next_alloc_goal = 0;
 	ei->i_strat4096 = 0;
+	init_rwsem(&ei->i_data_sem);
 
 	return &ei->vfs_inode;
 }
 
+static void udf_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
+}
+
 static void udf_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(udf_inode_cachep, UDF_I(inode));
+	call_rcu(&inode->i_rcu, udf_i_callback);
 }
 
 static void init_once(void *foo)
@@ -567,13 +574,14 @@
 	if (!udf_parse_options(options, &uopt, true))
 		return -EINVAL;
 
-	lock_kernel();
+	write_lock(&sbi->s_cred_lock);
 	sbi->s_flags = uopt.flags;
 	sbi->s_uid   = uopt.uid;
 	sbi->s_gid   = uopt.gid;
 	sbi->s_umask = uopt.umask;
 	sbi->s_fmode = uopt.fmode;
 	sbi->s_dmode = uopt.dmode;
+	write_unlock(&sbi->s_cred_lock);
 
 	if (sbi->s_lvid_bh) {
 		int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
@@ -590,7 +598,6 @@
 		udf_open_lvid(sb);
 
 out_unlock:
-	unlock_kernel();
 	return error;
 }
 
@@ -959,9 +966,9 @@
 		(sizeof(struct buffer_head *) * nr_groups);
 
 	if (size <= PAGE_SIZE)
-		bitmap = kmalloc(size, GFP_KERNEL);
+		bitmap = kzalloc(size, GFP_KERNEL);
 	else
-		bitmap = vmalloc(size); /* TODO: get rid of vmalloc */
+		bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
 
 	if (bitmap == NULL) {
 		udf_error(sb, __func__,
@@ -970,7 +977,6 @@
 		return NULL;
 	}
 
-	memset(bitmap, 0x00, size);
 	bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
 	bitmap->s_nr_groups = nr_groups;
 	return bitmap;
@@ -1774,6 +1780,8 @@
 
 	if (!bh)
 		return;
+
+	mutex_lock(&sbi->s_alloc_mutex);
 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
 	lvidiu = udf_sb_lvidiu(sbi);
 
@@ -1790,6 +1798,7 @@
 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
 	mark_buffer_dirty(bh);
 	sbi->s_lvid_dirty = 0;
+	mutex_unlock(&sbi->s_alloc_mutex);
 }
 
 static void udf_close_lvid(struct super_block *sb)
@@ -1802,6 +1811,7 @@
 	if (!bh)
 		return;
 
+	mutex_lock(&sbi->s_alloc_mutex);
 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
 	lvidiu = udf_sb_lvidiu(sbi);
 	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1822,6 +1832,34 @@
 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
 	mark_buffer_dirty(bh);
 	sbi->s_lvid_dirty = 0;
+	mutex_unlock(&sbi->s_alloc_mutex);
+}
+
+u64 lvid_get_unique_id(struct super_block *sb)
+{
+	struct buffer_head *bh;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct logicalVolIntegrityDesc *lvid;
+	struct logicalVolHeaderDesc *lvhd;
+	u64 uniqueID;
+	u64 ret;
+
+	bh = sbi->s_lvid_bh;
+	if (!bh)
+		return 0;
+
+	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+	lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
+
+	mutex_lock(&sbi->s_alloc_mutex);
+	ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (!(++uniqueID & 0xFFFFFFFF))
+		uniqueID += 16;
+	lvhd->uniqueID = cpu_to_le64(uniqueID);
+	mutex_unlock(&sbi->s_alloc_mutex);
+	mark_buffer_dirty(bh);
+
+	return ret;
 }
 
 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1879,8 +1917,6 @@
 	struct kernel_lb_addr rootdir, fileset;
 	struct udf_sb_info *sbi;
 
-	lock_kernel();
-
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
 	uopt.uid = -1;
 	uopt.gid = -1;
@@ -1889,10 +1925,8 @@
 	uopt.dmode = UDF_INVALID_MODE;
 
 	sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
-	if (!sbi) {
-		unlock_kernel();
+	if (!sbi)
 		return -ENOMEM;
-	}
 
 	sb->s_fs_info = sbi;
 
@@ -1929,6 +1963,7 @@
 	sbi->s_fmode = uopt.fmode;
 	sbi->s_dmode = uopt.dmode;
 	sbi->s_nls_map = uopt.nls_map;
+	rwlock_init(&sbi->s_cred_lock);
 
 	if (uopt.session == 0xFFFFFFFF)
 		sbi->s_session = udf_get_last_session(sb);
@@ -2038,7 +2073,6 @@
 		goto error_out;
 	}
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
-	unlock_kernel();
 	return 0;
 
 error_out:
@@ -2059,7 +2093,6 @@
 	kfree(sbi);
 	sb->s_fs_info = NULL;
 
-	unlock_kernel();
 	return -EINVAL;
 }
 
@@ -2098,8 +2131,6 @@
 
 	sbi = UDF_SB(sb);
 
-	lock_kernel();
-
 	if (sbi->s_vat_inode)
 		iput(sbi->s_vat_inode);
 	if (sbi->s_partitions)
@@ -2115,8 +2146,6 @@
 	kfree(sbi->s_partmaps);
 	kfree(sb->s_fs_info);
 	sb->s_fs_info = NULL;
-
-	unlock_kernel();
 }
 
 static int udf_sync_fs(struct super_block *sb, int wait)
@@ -2179,8 +2208,6 @@
 	uint16_t ident;
 	struct spaceBitmapDesc *bm;
 
-	lock_kernel();
-
 	loc.logicalBlockNum = bitmap->s_extPosition;
 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
 	bh = udf_read_ptagged(sb, &loc, 0, &ident);
@@ -2217,10 +2244,7 @@
 		}
 	}
 	brelse(bh);
-
 out:
-	unlock_kernel();
-
 	return accum;
 }
 
@@ -2233,8 +2257,7 @@
 	int8_t etype;
 	struct extent_position epos;
 
-	lock_kernel();
-
+	mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
 	epos.block = UDF_I(table)->i_location;
 	epos.offset = sizeof(struct unallocSpaceEntry);
 	epos.bh = NULL;
@@ -2243,8 +2266,7 @@
 		accum += (elen >> table->i_sb->s_blocksize_bits);
 
 	brelse(epos.bh);
-
-	unlock_kernel();
+	mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
 
 	return accum;
 }
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 1606478..b1d4488 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -27,7 +27,6 @@
 #include <linux/mm.h>
 #include <linux/stat.h>
 #include <linux/pagemap.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include "udf_i.h"
 
@@ -78,13 +77,16 @@
 	int err = -EIO;
 	unsigned char *p = kmap(page);
 	struct udf_inode_info *iinfo;
+	uint32_t pos;
 
-	lock_kernel();
 	iinfo = UDF_I(inode);
+	pos = udf_block_map(inode, 0);
+
+	down_read(&iinfo->i_data_sem);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 		symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
 	} else {
-		bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
+		bh = sb_bread(inode->i_sb, pos);
 
 		if (!bh)
 			goto out;
@@ -95,14 +97,14 @@
 	udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
 	brelse(bh);
 
-	unlock_kernel();
+	up_read(&iinfo->i_data_sem);
 	SetPageUptodate(page);
 	kunmap(page);
 	unlock_page(page);
 	return 0;
 
 out:
-	unlock_kernel();
+	up_read(&iinfo->i_data_sem);
 	SetPageError(page);
 	kunmap(page);
 	unlock_page(page);
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index e58d1de..d1bd31e 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,6 +1,18 @@
 #ifndef _UDF_I_H
 #define _UDF_I_H
 
+/*
+ * The i_data_sem and i_mutex serve for protection of allocation information
+ * of a regular files and symlinks. This includes all extents belonging to
+ * the file/symlink, a fact whether data are in-inode or in external data
+ * blocks, preallocation, goal block information... When extents are read,
+ * i_mutex or i_data_sem must be held (for reading is enough in case of
+ * i_data_sem). When extents are changed, i_data_sem must be held for writing
+ * and also i_mutex must be held.
+ *
+ * For directories i_mutex is used for all the necessary protection.
+ */
+
 struct udf_inode_info {
 	struct timespec		i_crtime;
 	/* Physical address of inode */
@@ -21,6 +33,7 @@
 		struct long_ad		*i_lad;
 		__u8		*i_data;
 	} i_ext;
+	struct rw_semaphore	i_data_sem;
 	struct inode vfs_inode;
 };
 
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index d113b72..4858c19 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -2,6 +2,7 @@
 #define __LINUX_UDF_SB_H
 
 #include <linux/mutex.h>
+#include <linux/bitops.h>
 
 /* Since UDF 2.01 is ISO 13346 based... */
 #define UDF_SUPER_MAGIC			0x15013346
@@ -128,6 +129,8 @@
 	uid_t			s_uid;
 	mode_t			s_fmode;
 	mode_t			s_dmode;
+	/* Lock protecting consistency of above permission settings */
+	rwlock_t		s_cred_lock;
 
 	/* Root Info */
 	struct timespec		s_record_time;
@@ -139,7 +142,7 @@
 	__u16			s_udfrev;
 
 	/* Miscellaneous flags */
-	__u32			s_flags;
+	unsigned long		s_flags;
 
 	/* Encoding info */
 	struct nls_table	*s_nls_map;
@@ -161,8 +164,19 @@
 
 int udf_compute_nr_groups(struct super_block *sb, u32 partition);
 
-#define UDF_QUERY_FLAG(X,Y)			( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
-#define UDF_SET_FLAG(X,Y)			( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
-#define UDF_CLEAR_FLAG(X,Y)			( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
+static inline int UDF_QUERY_FLAG(struct super_block *sb, int flag)
+{
+	return test_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_SET_FLAG(struct super_block *sb, int flag)
+{
+	set_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_CLEAR_FLAG(struct super_block *sb, int flag)
+{
+	clear_bit(flag, &UDF_SB(sb)->s_flags);
+}
 
 #endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 6995ab1..eba4820 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -111,6 +111,8 @@
 };
 
 /* super.c */
+
+__attribute__((format(printf, 3, 4)))
 extern void udf_warning(struct super_block *, const char *, const char *, ...);
 static inline void udf_updated_lvid(struct super_block *sb)
 {
@@ -123,6 +125,7 @@
 	sb->s_dirt = 1;
 	UDF_SB(sb)->s_lvid_dirty = 1;
 }
+extern u64 lvid_get_unique_id(struct super_block *sb);
 
 /* namei.c */
 extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
@@ -133,7 +136,6 @@
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
 /* inode.c */
 extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
-extern int udf_sync_inode(struct inode *);
 extern void udf_expand_file_adinicb(struct inode *, int, int *);
 extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
 extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 2c47dae..2c61ac5 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1412,9 +1412,16 @@
 	return &ei->vfs_inode;
 }
 
+static void ufs_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
+}
+
 static void ufs_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
+	call_rcu(&inode->i_rcu, ufs_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 0dce969..faca449 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -98,6 +98,7 @@
 				   kmem.o \
 				   xfs_aops.o \
 				   xfs_buf.o \
+				   xfs_discard.o \
 				   xfs_export.o \
 				   xfs_file.o \
 				   xfs_fs_subr.o \
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
deleted file mode 100644
index 4dfc7c3..0000000
--- a/fs/xfs/linux-2.6/sv.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_SUPPORT_SV_H__
-#define __XFS_SUPPORT_SV_H__
-
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-
-/*
- * Synchronisation variables.
- *
- * (Parameters "pri", "svf" and "rts" are not implemented)
- */
-
-typedef struct sv_s {
-	wait_queue_head_t waiters;
-} sv_t;
-
-static inline void _sv_wait(sv_t *sv, spinlock_t *lock)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	add_wait_queue_exclusive(&sv->waiters, &wait);
-	__set_current_state(TASK_UNINTERRUPTIBLE);
-	spin_unlock(lock);
-
-	schedule();
-
-	remove_wait_queue(&sv->waiters, &wait);
-}
-
-#define sv_init(sv,flag,name) \
-	init_waitqueue_head(&(sv)->waiters)
-#define sv_destroy(sv) \
-	/*NOTHING*/
-#define sv_wait(sv, pri, lock, s) \
-	_sv_wait(sv, lock)
-#define sv_signal(sv) \
-	wake_up(&(sv)->waiters)
-#define sv_broadcast(sv) \
-	wake_up_all(&(sv)->waiters)
-
-#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index b277186..39f4f80 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -219,12 +219,13 @@
 }
 
 int
-xfs_check_acl(struct inode *inode, int mask)
+xfs_check_acl(struct inode *inode, int mask, unsigned int flags)
 {
-	struct xfs_inode *ip = XFS_I(inode);
+	struct xfs_inode *ip;
 	struct posix_acl *acl;
 	int error = -EAGAIN;
 
+	ip = XFS_I(inode);
 	trace_xfs_check_acl(ip);
 
 	/*
@@ -234,6 +235,12 @@
 	if (!XFS_IFORK_Q(ip))
 		return -EAGAIN;
 
+	if (flags & IPERM_FLAG_RCU) {
+		if (!negative_cached_acl(inode, ACL_TYPE_ACCESS))
+			return -ECHILD;
+		return -EAGAIN;
+	}
+
 	acl = xfs_get_acl(inode, ACL_TYPE_ACCESS);
 	if (IS_ERR(acl))
 		return PTR_ERR(acl);
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 691f612..ec7bbb5 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,15 +38,6 @@
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
-/*
- * Types of I/O for bmap clustering and I/O completion tracking.
- */
-enum {
-	IO_READ,	/* mapping for a read */
-	IO_DELAY,	/* mapping covers delalloc region */
-	IO_UNWRITTEN,	/* mapping covers allocated but uninitialized data */
-	IO_NEW		/* just allocated */
-};
 
 /*
  * Prime number of hash buckets since address is used as the key.
@@ -182,9 +173,6 @@
 	xfs_inode_t		*ip = XFS_I(ioend->io_inode);
 	xfs_fsize_t		isize;
 
-	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
-	ASSERT(ioend->io_type != IO_READ);
-
 	if (unlikely(ioend->io_error))
 		return 0;
 
@@ -244,10 +232,8 @@
 	 * We might have to update the on-disk file size after extending
 	 * writes.
 	 */
-	if (ioend->io_type != IO_READ) {
-		error = xfs_setfilesize(ioend);
-		ASSERT(!error || error == EAGAIN);
-	}
+	error = xfs_setfilesize(ioend);
+	ASSERT(!error || error == EAGAIN);
 
 	/*
 	 * If we didn't complete processing of the ioend, requeue it to the
@@ -318,14 +304,63 @@
 xfs_map_blocks(
 	struct inode		*inode,
 	loff_t			offset,
-	ssize_t			count,
 	struct xfs_bmbt_irec	*imap,
-	int			flags)
+	int			type,
+	int			nonblocking)
 {
-	int			nmaps = 1;
-	int			new = 0;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	ssize_t			count = 1 << inode->i_blkbits;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	int			error = 0;
+	int			bmapi_flags = XFS_BMAPI_ENTIRE;
+	int			nimaps = 1;
 
-	return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -XFS_ERROR(EIO);
+
+	if (type == IO_UNWRITTEN)
+		bmapi_flags |= XFS_BMAPI_IGSTATE;
+
+	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+		if (nonblocking)
+			return -XFS_ERROR(EAGAIN);
+		xfs_ilock(ip, XFS_ILOCK_SHARED);
+	}
+
+	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+	       (ip->i_df.if_flags & XFS_IFEXTENTS));
+	ASSERT(offset <= mp->m_maxioffset);
+
+	if (offset + count > mp->m_maxioffset)
+		count = mp->m_maxioffset - offset;
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+			  bmapi_flags,  NULL, 0, imap, &nimaps, NULL);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	if (error)
+		return -XFS_ERROR(error);
+
+	if (type == IO_DELALLOC &&
+	    (!nimaps || isnullstartblock(imap->br_startblock))) {
+		error = xfs_iomap_write_allocate(ip, offset, count, imap);
+		if (!error)
+			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
+		return -XFS_ERROR(error);
+	}
+
+#ifdef DEBUG
+	if (type == IO_UNWRITTEN) {
+		ASSERT(nimaps);
+		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+	}
+#endif
+	if (nimaps)
+		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
+	return 0;
 }
 
 STATIC int
@@ -380,26 +415,18 @@
 
 	submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
 		   WRITE_SYNC_PLUG : WRITE, bio);
-	ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
-	bio_put(bio);
 }
 
 STATIC struct bio *
 xfs_alloc_ioend_bio(
 	struct buffer_head	*bh)
 {
-	struct bio		*bio;
 	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
-
-	do {
-		bio = bio_alloc(GFP_NOIO, nvecs);
-		nvecs >>= 1;
-	} while (!bio);
+	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
 
 	ASSERT(bio->bi_private == NULL);
 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
-	bio_get(bio);
 	return bio;
 }
 
@@ -470,9 +497,8 @@
 	/* Pass 1 - start writeback */
 	do {
 		next = ioend->io_list;
-		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
 			xfs_start_buffer_writeback(bh);
-		}
 	} while ((ioend = next) != NULL);
 
 	/* Pass 2 - submit I/O */
@@ -600,117 +626,13 @@
 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
 
-	lock_buffer(bh);
 	xfs_map_buffer(inode, bh, imap, offset);
-	bh->b_bdev = xfs_find_bdev_for_inode(inode);
 	set_buffer_mapped(bh);
 	clear_buffer_delay(bh);
 	clear_buffer_unwritten(bh);
 }
 
 /*
- * Look for a page at index that is suitable for clustering.
- */
-STATIC unsigned int
-xfs_probe_page(
-	struct page		*page,
-	unsigned int		pg_offset)
-{
-	struct buffer_head	*bh, *head;
-	int			ret = 0;
-
-	if (PageWriteback(page))
-		return 0;
-	if (!PageDirty(page))
-		return 0;
-	if (!page->mapping)
-		return 0;
-	if (!page_has_buffers(page))
-		return 0;
-
-	bh = head = page_buffers(page);
-	do {
-		if (!buffer_uptodate(bh))
-			break;
-		if (!buffer_mapped(bh))
-			break;
-		ret += bh->b_size;
-		if (ret >= pg_offset)
-			break;
-	} while ((bh = bh->b_this_page) != head);
-
-	return ret;
-}
-
-STATIC size_t
-xfs_probe_cluster(
-	struct inode		*inode,
-	struct page		*startpage,
-	struct buffer_head	*bh,
-	struct buffer_head	*head)
-{
-	struct pagevec		pvec;
-	pgoff_t			tindex, tlast, tloff;
-	size_t			total = 0;
-	int			done = 0, i;
-
-	/* First sum forwards in this page */
-	do {
-		if (!buffer_uptodate(bh) || !buffer_mapped(bh))
-			return total;
-		total += bh->b_size;
-	} while ((bh = bh->b_this_page) != head);
-
-	/* if we reached the end of the page, sum forwards in following pages */
-	tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-	tindex = startpage->index + 1;
-
-	/* Prune this back to avoid pathological behavior */
-	tloff = min(tlast, startpage->index + 64);
-
-	pagevec_init(&pvec, 0);
-	while (!done && tindex <= tloff) {
-		unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
-
-		if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
-			break;
-
-		for (i = 0; i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
-			size_t pg_offset, pg_len = 0;
-
-			if (tindex == tlast) {
-				pg_offset =
-				    i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
-				if (!pg_offset) {
-					done = 1;
-					break;
-				}
-			} else
-				pg_offset = PAGE_CACHE_SIZE;
-
-			if (page->index == tindex && trylock_page(page)) {
-				pg_len = xfs_probe_page(page, pg_offset);
-				unlock_page(page);
-			}
-
-			if (!pg_len) {
-				done = 1;
-				break;
-			}
-
-			total += pg_len;
-			tindex++;
-		}
-
-		pagevec_release(&pvec);
-		cond_resched();
-	}
-
-	return total;
-}
-
-/*
  * Test if a given page is suitable for writing as part of an unwritten
  * or delayed allocate extent.
  */
@@ -731,9 +653,9 @@
 			if (buffer_unwritten(bh))
 				acceptable = (type == IO_UNWRITTEN);
 			else if (buffer_delay(bh))
-				acceptable = (type == IO_DELAY);
+				acceptable = (type == IO_DELALLOC);
 			else if (buffer_dirty(bh) && buffer_mapped(bh))
-				acceptable = (type == IO_NEW);
+				acceptable = (type == IO_OVERWRITE);
 			else
 				break;
 		} while ((bh = bh->b_this_page) != head);
@@ -758,8 +680,7 @@
 	loff_t			tindex,
 	struct xfs_bmbt_irec	*imap,
 	xfs_ioend_t		**ioendp,
-	struct writeback_control *wbc,
-	int			all_bh)
+	struct writeback_control *wbc)
 {
 	struct buffer_head	*bh, *head;
 	xfs_off_t		end_offset;
@@ -814,37 +735,30 @@
 			continue;
 		}
 
-		if (buffer_unwritten(bh) || buffer_delay(bh)) {
+		if (buffer_unwritten(bh) || buffer_delay(bh) ||
+		    buffer_mapped(bh)) {
 			if (buffer_unwritten(bh))
 				type = IO_UNWRITTEN;
+			else if (buffer_delay(bh))
+				type = IO_DELALLOC;
 			else
-				type = IO_DELAY;
+				type = IO_OVERWRITE;
 
 			if (!xfs_imap_valid(inode, imap, offset)) {
 				done = 1;
 				continue;
 			}
 
-			ASSERT(imap->br_startblock != HOLESTARTBLOCK);
-			ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
-			xfs_map_at_offset(inode, bh, imap, offset);
+			lock_buffer(bh);
+			if (type != IO_OVERWRITE)
+				xfs_map_at_offset(inode, bh, imap, offset);
 			xfs_add_to_ioend(inode, bh, offset, type,
 					 ioendp, done);
 
 			page_dirty--;
 			count++;
 		} else {
-			type = IO_NEW;
-			if (buffer_mapped(bh) && all_bh) {
-				lock_buffer(bh);
-				xfs_add_to_ioend(inode, bh, offset,
-						type, ioendp, done);
-				count++;
-				page_dirty--;
-			} else {
-				done = 1;
-			}
+			done = 1;
 		}
 	} while (offset += len, (bh = bh->b_this_page) != head);
 
@@ -876,7 +790,6 @@
 	struct xfs_bmbt_irec	*imap,
 	xfs_ioend_t		**ioendp,
 	struct writeback_control *wbc,
-	int			all_bh,
 	pgoff_t			tlast)
 {
 	struct pagevec		pvec;
@@ -891,7 +804,7 @@
 
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			done = xfs_convert_page(inode, pvec.pages[i], tindex++,
-					imap, ioendp, wbc, all_bh);
+					imap, ioendp, wbc);
 			if (done)
 				break;
 		}
@@ -935,7 +848,7 @@
 	struct buffer_head	*bh, *head;
 	loff_t			offset = page_offset(page);
 
-	if (!xfs_is_delayed_page(page, IO_DELAY))
+	if (!xfs_is_delayed_page(page, IO_DELALLOC))
 		goto out_invalidate;
 
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1002,10 +915,10 @@
 	unsigned int		type;
 	__uint64_t              end_offset;
 	pgoff_t                 end_index, last_index;
-	ssize_t			size, len;
-	int			flags, err, imap_valid = 0, uptodate = 1;
+	ssize_t			len;
+	int			err, imap_valid = 0, uptodate = 1;
 	int			count = 0;
-	int			all_bh = 0;
+	int			nonblocking = 0;
 
 	trace_xfs_writepage(inode, page, 0);
 
@@ -1056,10 +969,14 @@
 
 	bh = head = page_buffers(page);
 	offset = page_offset(page);
-	flags = BMAPI_READ;
-	type = IO_NEW;
+	type = IO_OVERWRITE;
+
+	if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+		nonblocking = 1;
 
 	do {
+		int new_ioend = 0;
+
 		if (offset >= end_offset)
 			break;
 		if (!buffer_uptodate(bh))
@@ -1076,90 +993,54 @@
 			continue;
 		}
 
-		if (imap_valid)
-			imap_valid = xfs_imap_valid(inode, &imap, offset);
-
-		if (buffer_unwritten(bh) || buffer_delay(bh)) {
-			int new_ioend = 0;
-
-			/*
-			 * Make sure we don't use a read-only iomap
-			 */
-			if (flags == BMAPI_READ)
-				imap_valid = 0;
-
-			if (buffer_unwritten(bh)) {
+		if (buffer_unwritten(bh)) {
+			if (type != IO_UNWRITTEN) {
 				type = IO_UNWRITTEN;
-				flags = BMAPI_WRITE | BMAPI_IGNSTATE;
-			} else if (buffer_delay(bh)) {
-				type = IO_DELAY;
-				flags = BMAPI_ALLOCATE;
-
-				if (wbc->sync_mode == WB_SYNC_NONE)
-					flags |= BMAPI_TRYLOCK;
+				imap_valid = 0;
 			}
-
-			if (!imap_valid) {
-				/*
-				 * If we didn't have a valid mapping then we
-				 * need to ensure that we put the new mapping
-				 * in a new ioend structure. This needs to be
-				 * done to ensure that the ioends correctly
-				 * reflect the block mappings at io completion
-				 * for unwritten extent conversion.
-				 */
-				new_ioend = 1;
-				err = xfs_map_blocks(inode, offset, len,
-						&imap, flags);
-				if (err)
-					goto error;
-				imap_valid = xfs_imap_valid(inode, &imap,
-							    offset);
-			}
-			if (imap_valid) {
-				xfs_map_at_offset(inode, bh, &imap, offset);
-				xfs_add_to_ioend(inode, bh, offset, type,
-						 &ioend, new_ioend);
-				count++;
+		} else if (buffer_delay(bh)) {
+			if (type != IO_DELALLOC) {
+				type = IO_DELALLOC;
+				imap_valid = 0;
 			}
 		} else if (buffer_uptodate(bh)) {
-			/*
-			 * we got here because the buffer is already mapped.
-			 * That means it must already have extents allocated
-			 * underneath it. Map the extent by reading it.
-			 */
-			if (!imap_valid || flags != BMAPI_READ) {
-				flags = BMAPI_READ;
-				size = xfs_probe_cluster(inode, page, bh, head);
-				err = xfs_map_blocks(inode, offset, size,
-						&imap, flags);
-				if (err)
-					goto error;
-				imap_valid = xfs_imap_valid(inode, &imap,
-							    offset);
-			}
-
-			/*
-			 * We set the type to IO_NEW in case we are doing a
-			 * small write at EOF that is extending the file but
-			 * without needing an allocation. We need to update the
-			 * file size on I/O completion in this case so it is
-			 * the same case as having just allocated a new extent
-			 * that we are writing into for the first time.
-			 */
-			type = IO_NEW;
-			if (trylock_buffer(bh)) {
-				if (imap_valid)
-					all_bh = 1;
-				xfs_add_to_ioend(inode, bh, offset, type,
-						&ioend, !imap_valid);
-				count++;
-			} else {
+			if (type != IO_OVERWRITE) {
+				type = IO_OVERWRITE;
 				imap_valid = 0;
 			}
-		} else if (PageUptodate(page)) {
-			ASSERT(buffer_mapped(bh));
-			imap_valid = 0;
+		} else {
+			if (PageUptodate(page)) {
+				ASSERT(buffer_mapped(bh));
+				imap_valid = 0;
+			}
+			continue;
+		}
+
+		if (imap_valid)
+			imap_valid = xfs_imap_valid(inode, &imap, offset);
+		if (!imap_valid) {
+			/*
+			 * If we didn't have a valid mapping then we need to
+			 * put the new mapping into a separate ioend structure.
+			 * This ensures non-contiguous extents always have
+			 * separate ioends, which is particularly important
+			 * for unwritten extent conversion at I/O completion
+			 * time.
+			 */
+			new_ioend = 1;
+			err = xfs_map_blocks(inode, offset, &imap, type,
+					     nonblocking);
+			if (err)
+				goto error;
+			imap_valid = xfs_imap_valid(inode, &imap, offset);
+		}
+		if (imap_valid) {
+			lock_buffer(bh);
+			if (type != IO_OVERWRITE)
+				xfs_map_at_offset(inode, bh, &imap, offset);
+			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
+					 new_ioend);
+			count++;
 		}
 
 		if (!iohead)
@@ -1188,7 +1069,7 @@
 			end_index = last_index;
 
 		xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
-					wbc, all_bh, end_index);
+				  wbc, end_index);
 	}
 
 	if (iohead)
@@ -1257,13 +1138,19 @@
 	int			create,
 	int			direct)
 {
-	int			flags = create ? BMAPI_WRITE : BMAPI_READ;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	int			error = 0;
+	int			lockmode = 0;
 	struct xfs_bmbt_irec	imap;
+	int			nimaps = 1;
 	xfs_off_t		offset;
 	ssize_t			size;
-	int			nimap = 1;
 	int			new = 0;
-	int			error;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -XFS_ERROR(EIO);
 
 	offset = (xfs_off_t)iblock << inode->i_blkbits;
 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
@@ -1272,15 +1159,45 @@
 	if (!create && direct && offset >= i_size_read(inode))
 		return 0;
 
-	if (direct && create)
-		flags |= BMAPI_DIRECT;
+	if (create) {
+		lockmode = XFS_ILOCK_EXCL;
+		xfs_ilock(ip, lockmode);
+	} else {
+		lockmode = xfs_ilock_map_shared(ip);
+	}
 
-	error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
-			  &new);
+	ASSERT(offset <= mp->m_maxioffset);
+	if (offset + size > mp->m_maxioffset)
+		size = mp->m_maxioffset - offset;
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+	error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+			  XFS_BMAPI_ENTIRE,  NULL, 0, &imap, &nimaps, NULL);
 	if (error)
-		return -error;
-	if (nimap == 0)
-		return 0;
+		goto out_unlock;
+
+	if (create &&
+	    (!nimaps ||
+	     (imap.br_startblock == HOLESTARTBLOCK ||
+	      imap.br_startblock == DELAYSTARTBLOCK))) {
+		if (direct) {
+			error = xfs_iomap_write_direct(ip, offset, size,
+						       &imap, nimaps);
+		} else {
+			error = xfs_iomap_write_delay(ip, offset, size, &imap);
+		}
+		if (error)
+			goto out_unlock;
+
+		trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
+	} else if (nimaps) {
+		trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
+	} else {
+		trace_xfs_get_blocks_notfound(ip, offset, size);
+		goto out_unlock;
+	}
+	xfs_iunlock(ip, lockmode);
 
 	if (imap.br_startblock != HOLESTARTBLOCK &&
 	    imap.br_startblock != DELAYSTARTBLOCK) {
@@ -1347,6 +1264,10 @@
 	}
 
 	return 0;
+
+out_unlock:
+	xfs_iunlock(ip, lockmode);
+	return -error;
 }
 
 int
@@ -1434,7 +1355,7 @@
 	ssize_t			ret;
 
 	if (rw & WRITE) {
-		iocb->private = xfs_alloc_ioend(inode, IO_NEW);
+		iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
 
 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
 					    offset, nr_segs,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index c5057fb..71f721e 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -23,6 +23,22 @@
 extern mempool_t *xfs_ioend_pool;
 
 /*
+ * Types of I/O for bmap clustering and I/O completion tracking.
+ */
+enum {
+	IO_DIRECT = 0,	/* special case for direct I/O ioends */
+	IO_DELALLOC,	/* mapping covers delalloc region */
+	IO_UNWRITTEN,	/* mapping covers allocated but uninitialized data */
+	IO_OVERWRITE,	/* mapping covers already allocated extent */
+};
+
+#define XFS_IO_TYPES \
+	{ 0,			"" }, \
+	{ IO_DELALLOC,		"delalloc" }, \
+	{ IO_UNWRITTEN,		"unwritten" }, \
+	{ IO_OVERWRITE,		"overwrite" }
+
+/*
  * xfs_ioend struct manages large extent writes for XFS.
  * It can manage several multi-page bio's at once.
  */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4c5deb6..ac1c7e8 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -44,12 +44,7 @@
 
 static kmem_zone_t *xfs_buf_zone;
 STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
-static struct shrinker xfs_buf_shake = {
-	.shrink = xfsbufd_wakeup,
-	.seeks = DEFAULT_SEEKS,
-};
 
 static struct workqueue_struct *xfslogd_workqueue;
 struct workqueue_struct *xfsdatad_workqueue;
@@ -168,8 +163,79 @@
 }
 
 /*
- *	Internal xfs_buf_t object manipulation
+ * xfs_buf_lru_add - add a buffer to the LRU.
+ *
+ * The LRU takes a new reference to the buffer so that it will only be freed
+ * once the shrinker takes the buffer off the LRU.
  */
+STATIC void
+xfs_buf_lru_add(
+	struct xfs_buf	*bp)
+{
+	struct xfs_buftarg *btp = bp->b_target;
+
+	spin_lock(&btp->bt_lru_lock);
+	if (list_empty(&bp->b_lru)) {
+		atomic_inc(&bp->b_hold);
+		list_add_tail(&bp->b_lru, &btp->bt_lru);
+		btp->bt_lru_nr++;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * xfs_buf_lru_del - remove a buffer from the LRU
+ *
+ * The unlocked check is safe here because it only occurs when there are not
+ * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
+ * to optimise the shrinker removing the buffer from the LRU and calling
+ * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
+ * bt_lru_lock.
+ */
+STATIC void
+xfs_buf_lru_del(
+	struct xfs_buf	*bp)
+{
+	struct xfs_buftarg *btp = bp->b_target;
+
+	if (list_empty(&bp->b_lru))
+		return;
+
+	spin_lock(&btp->bt_lru_lock);
+	if (!list_empty(&bp->b_lru)) {
+		list_del_init(&bp->b_lru);
+		btp->bt_lru_nr--;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * When we mark a buffer stale, we remove the buffer from the LRU and clear the
+ * b_lru_ref count so that the buffer is freed immediately when the buffer
+ * reference count falls to zero. If the buffer is already on the LRU, we need
+ * to remove the reference that LRU holds on the buffer.
+ *
+ * This prevents build-up of stale buffers on the LRU.
+ */
+void
+xfs_buf_stale(
+	struct xfs_buf	*bp)
+{
+	bp->b_flags |= XBF_STALE;
+	atomic_set(&(bp)->b_lru_ref, 0);
+	if (!list_empty(&bp->b_lru)) {
+		struct xfs_buftarg *btp = bp->b_target;
+
+		spin_lock(&btp->bt_lru_lock);
+		if (!list_empty(&bp->b_lru)) {
+			list_del_init(&bp->b_lru);
+			btp->bt_lru_nr--;
+			atomic_dec(&bp->b_hold);
+		}
+		spin_unlock(&btp->bt_lru_lock);
+	}
+	ASSERT(atomic_read(&bp->b_hold) >= 1);
+}
 
 STATIC void
 _xfs_buf_initialize(
@@ -186,7 +252,9 @@
 
 	memset(bp, 0, sizeof(xfs_buf_t));
 	atomic_set(&bp->b_hold, 1);
+	atomic_set(&bp->b_lru_ref, 1);
 	init_completion(&bp->b_iowait);
+	INIT_LIST_HEAD(&bp->b_lru);
 	INIT_LIST_HEAD(&bp->b_list);
 	RB_CLEAR_NODE(&bp->b_rbnode);
 	sema_init(&bp->b_sema, 0); /* held, no waiters */
@@ -262,6 +330,8 @@
 {
 	trace_xfs_buf_free(bp, _RET_IP_);
 
+	ASSERT(list_empty(&bp->b_lru));
+
 	if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
 		uint		i;
 
@@ -337,7 +407,6 @@
 					__func__, gfp_mask);
 
 			XFS_STATS_INC(xb_page_retries);
-			xfsbufd_wakeup(NULL, 0, gfp_mask);
 			congestion_wait(BLK_RW_ASYNC, HZ/50);
 			goto retry;
 		}
@@ -827,7 +896,7 @@
 	trace_xfs_buf_rele(bp, _RET_IP_);
 
 	if (!pag) {
-		ASSERT(!bp->b_relse);
+		ASSERT(list_empty(&bp->b_lru));
 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 		if (atomic_dec_and_test(&bp->b_hold))
 			xfs_buf_free(bp);
@@ -835,13 +904,15 @@
 	}
 
 	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
+
 	ASSERT(atomic_read(&bp->b_hold) > 0);
 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
-		if (bp->b_relse) {
-			atomic_inc(&bp->b_hold);
+		if (!(bp->b_flags & XBF_STALE) &&
+			   atomic_read(&bp->b_lru_ref)) {
+			xfs_buf_lru_add(bp);
 			spin_unlock(&pag->pag_buf_lock);
-			bp->b_relse(bp);
 		} else {
+			xfs_buf_lru_del(bp);
 			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
 			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
 			spin_unlock(&pag->pag_buf_lock);
@@ -1438,51 +1509,84 @@
  */
 
 /*
- *	Wait for any bufs with callbacks that have been submitted but
- *	have not yet returned... walk the hash list for the target.
+ * Wait for any bufs with callbacks that have been submitted but have not yet
+ * returned. These buffers will have an elevated hold count, so wait on those
+ * while freeing all the buffers only held by the LRU.
  */
 void
 xfs_wait_buftarg(
 	struct xfs_buftarg	*btp)
 {
-	struct xfs_perag	*pag;
-	uint			i;
+	struct xfs_buf		*bp;
 
-	for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
-		pag = xfs_perag_get(btp->bt_mount, i);
-		spin_lock(&pag->pag_buf_lock);
-		while (rb_first(&pag->pag_buf_tree)) {
-			spin_unlock(&pag->pag_buf_lock);
+restart:
+	spin_lock(&btp->bt_lru_lock);
+	while (!list_empty(&btp->bt_lru)) {
+		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+		if (atomic_read(&bp->b_hold) > 1) {
+			spin_unlock(&btp->bt_lru_lock);
 			delay(100);
-			spin_lock(&pag->pag_buf_lock);
+			goto restart;
 		}
-		spin_unlock(&pag->pag_buf_lock);
-		xfs_perag_put(pag);
+		/*
+		 * clear the LRU reference count so the bufer doesn't get
+		 * ignored in xfs_buf_rele().
+		 */
+		atomic_set(&bp->b_lru_ref, 0);
+		spin_unlock(&btp->bt_lru_lock);
+		xfs_buf_rele(bp);
+		spin_lock(&btp->bt_lru_lock);
 	}
+	spin_unlock(&btp->bt_lru_lock);
 }
 
-/*
- *	buftarg list for delwrite queue processing
- */
-static LIST_HEAD(xfs_buftarg_list);
-static DEFINE_SPINLOCK(xfs_buftarg_lock);
-
-STATIC void
-xfs_register_buftarg(
-	xfs_buftarg_t           *btp)
+int
+xfs_buftarg_shrink(
+	struct shrinker		*shrink,
+	int			nr_to_scan,
+	gfp_t			mask)
 {
-	spin_lock(&xfs_buftarg_lock);
-	list_add(&btp->bt_list, &xfs_buftarg_list);
-	spin_unlock(&xfs_buftarg_lock);
-}
+	struct xfs_buftarg	*btp = container_of(shrink,
+					struct xfs_buftarg, bt_shrinker);
+	struct xfs_buf		*bp;
+	LIST_HEAD(dispose);
 
-STATIC void
-xfs_unregister_buftarg(
-	xfs_buftarg_t           *btp)
-{
-	spin_lock(&xfs_buftarg_lock);
-	list_del(&btp->bt_list);
-	spin_unlock(&xfs_buftarg_lock);
+	if (!nr_to_scan)
+		return btp->bt_lru_nr;
+
+	spin_lock(&btp->bt_lru_lock);
+	while (!list_empty(&btp->bt_lru)) {
+		if (nr_to_scan-- <= 0)
+			break;
+
+		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+
+		/*
+		 * Decrement the b_lru_ref count unless the value is already
+		 * zero. If the value is already zero, we need to reclaim the
+		 * buffer, otherwise it gets another trip through the LRU.
+		 */
+		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+			list_move_tail(&bp->b_lru, &btp->bt_lru);
+			continue;
+		}
+
+		/*
+		 * remove the buffer from the LRU now to avoid needing another
+		 * lock round trip inside xfs_buf_rele().
+		 */
+		list_move(&bp->b_lru, &dispose);
+		btp->bt_lru_nr--;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+
+	while (!list_empty(&dispose)) {
+		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
+		list_del_init(&bp->b_lru);
+		xfs_buf_rele(bp);
+	}
+
+	return btp->bt_lru_nr;
 }
 
 void
@@ -1490,17 +1594,14 @@
 	struct xfs_mount	*mp,
 	struct xfs_buftarg	*btp)
 {
+	unregister_shrinker(&btp->bt_shrinker);
+
 	xfs_flush_buftarg(btp, 1);
 	if (mp->m_flags & XFS_MOUNT_BARRIER)
 		xfs_blkdev_issue_flush(btp);
 	iput(btp->bt_mapping->host);
 
-	/* Unregister the buftarg first so that we don't get a
-	 * wakeup finding a non-existent task
-	 */
-	xfs_unregister_buftarg(btp);
 	kthread_stop(btp->bt_task);
-
 	kmem_free(btp);
 }
 
@@ -1597,20 +1698,13 @@
 	xfs_buftarg_t		*btp,
 	const char		*fsname)
 {
-	int	error = 0;
-
-	INIT_LIST_HEAD(&btp->bt_list);
 	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
 	spin_lock_init(&btp->bt_delwrite_lock);
 	btp->bt_flags = 0;
 	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
-	if (IS_ERR(btp->bt_task)) {
-		error = PTR_ERR(btp->bt_task);
-		goto out_error;
-	}
-	xfs_register_buftarg(btp);
-out_error:
-	return error;
+	if (IS_ERR(btp->bt_task))
+		return PTR_ERR(btp->bt_task);
+	return 0;
 }
 
 xfs_buftarg_t *
@@ -1627,12 +1721,17 @@
 	btp->bt_mount = mp;
 	btp->bt_dev =  bdev->bd_dev;
 	btp->bt_bdev = bdev;
+	INIT_LIST_HEAD(&btp->bt_lru);
+	spin_lock_init(&btp->bt_lru_lock);
 	if (xfs_setsize_buftarg_early(btp, bdev))
 		goto error;
 	if (xfs_mapping_buftarg(btp, bdev))
 		goto error;
 	if (xfs_alloc_delwrite_queue(btp, fsname))
 		goto error;
+	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
+	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&btp->bt_shrinker);
 	return btp;
 
 error:
@@ -1737,27 +1836,6 @@
 	flush_workqueue(queue);
 }
 
-STATIC int
-xfsbufd_wakeup(
-	struct shrinker		*shrink,
-	int			priority,
-	gfp_t			mask)
-{
-	xfs_buftarg_t		*btp;
-
-	spin_lock(&xfs_buftarg_lock);
-	list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
-		if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
-			continue;
-		if (list_empty(&btp->bt_delwrite_queue))
-			continue;
-		set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
-		wake_up_process(btp->bt_task);
-	}
-	spin_unlock(&xfs_buftarg_lock);
-	return 0;
-}
-
 /*
  * Move as many buffers as specified to the supplied list
  * idicating if we skipped any buffers to prevent deadlocks.
@@ -1952,7 +2030,6 @@
 	if (!xfsconvertd_workqueue)
 		goto out_destroy_xfsdatad_workqueue;
 
-	register_shrinker(&xfs_buf_shake);
 	return 0;
 
  out_destroy_xfsdatad_workqueue:
@@ -1968,7 +2045,6 @@
 void
 xfs_buf_terminate(void)
 {
-	unregister_shrinker(&xfs_buf_shake);
 	destroy_workqueue(xfsconvertd_workqueue);
 	destroy_workqueue(xfsdatad_workqueue);
 	destroy_workqueue(xfslogd_workqueue);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 383a3f3..cbe6595 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -128,10 +128,15 @@
 
 	/* per device delwri queue */
 	struct task_struct	*bt_task;
-	struct list_head	bt_list;
 	struct list_head	bt_delwrite_queue;
 	spinlock_t		bt_delwrite_lock;
 	unsigned long		bt_flags;
+
+	/* LRU control structures */
+	struct shrinker		bt_shrinker;
+	struct list_head	bt_lru;
+	spinlock_t		bt_lru_lock;
+	unsigned int		bt_lru_nr;
 } xfs_buftarg_t;
 
 /*
@@ -147,8 +152,6 @@
 
 struct xfs_buf;
 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
-typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
-typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
 
 #define XB_PAGES	2
 
@@ -164,9 +167,11 @@
 	xfs_off_t		b_file_offset;	/* offset in file */
 	size_t			b_buffer_length;/* size of buffer in bytes */
 	atomic_t		b_hold;		/* reference count */
+	atomic_t		b_lru_ref;	/* lru reclaim ref count */
 	xfs_buf_flags_t		b_flags;	/* status flags */
 	struct semaphore	b_sema;		/* semaphore for lockables */
 
+	struct list_head	b_lru;		/* lru list */
 	wait_queue_head_t	b_waiters;	/* unpin waiters */
 	struct list_head	b_list;
 	struct xfs_perag	*b_pag;		/* contains rbtree root */
@@ -176,7 +181,6 @@
 	void			*b_addr;	/* virtual address of buffer */
 	struct work_struct	b_iodone_work;
 	xfs_buf_iodone_t	b_iodone;	/* I/O completion function */
-	xfs_buf_relse_t		b_relse;	/* releasing function */
 	struct completion	b_iowait;	/* queue for I/O waiters */
 	void			*b_fspriv;
 	void			*b_fspriv2;
@@ -264,7 +268,8 @@
 #define XFS_BUF_ZEROFLAGS(bp)	((bp)->b_flags &= \
 		~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
 
-#define XFS_BUF_STALE(bp)	((bp)->b_flags |= XBF_STALE)
+void xfs_buf_stale(struct xfs_buf *bp);
+#define XFS_BUF_STALE(bp)	xfs_buf_stale(bp);
 #define XFS_BUF_UNSTALE(bp)	((bp)->b_flags &= ~XBF_STALE)
 #define XFS_BUF_ISSTALE(bp)	((bp)->b_flags & XBF_STALE)
 #define XFS_BUF_SUPER_STALE(bp)	do {				\
@@ -315,7 +320,6 @@
 #define XFS_BUF_FSPRIVATE2(bp, type)		((type)(bp)->b_fspriv2)
 #define XFS_BUF_SET_FSPRIVATE2(bp, val)		((bp)->b_fspriv2 = (void*)(val))
 #define XFS_BUF_SET_START(bp)			do { } while (0)
-#define XFS_BUF_SET_BRELSE_FUNC(bp, func)	((bp)->b_relse = (func))
 
 #define XFS_BUF_PTR(bp)			(xfs_caddr_t)((bp)->b_addr)
 #define XFS_BUF_SET_PTR(bp, val, cnt)	xfs_buf_associate_memory(bp, val, cnt)
@@ -328,9 +332,15 @@
 #define XFS_BUF_SIZE(bp)		((bp)->b_buffer_length)
 #define XFS_BUF_SET_SIZE(bp, cnt)	((bp)->b_buffer_length = (cnt))
 
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)	do { } while (0)
+static inline void
+xfs_buf_set_ref(
+	struct xfs_buf	*bp,
+	int		lru_ref)
+{
+	atomic_set(&bp->b_lru_ref, lru_ref);
+}
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)	xfs_buf_set_ref(bp, ref)
 #define XFS_BUF_SET_VTYPE(bp, type)		do { } while (0)
-#define XFS_BUF_SET_REF(bp, ref)		do { } while (0)
 
 #define XFS_BUF_ISPINNED(bp)	atomic_read(&((bp)->b_pin_count))
 
@@ -346,8 +356,7 @@
 
 static inline void xfs_buf_relse(xfs_buf_t *bp)
 {
-	if (!bp->b_relse)
-		xfs_buf_unlock(bp);
+	xfs_buf_unlock(bp);
 	xfs_buf_rele(bp);
 }
 
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
new file mode 100644
index 0000000..05201ae
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_discard.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_sb.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_discard.h"
+#include "xfs_trace.h"
+
+STATIC int
+xfs_trim_extents(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_fsblock_t		start,
+	xfs_fsblock_t		len,
+	xfs_fsblock_t		minlen,
+	__uint64_t		*blocks_trimmed)
+{
+	struct block_device	*bdev = mp->m_ddev_targp->bt_bdev;
+	struct xfs_btree_cur	*cur;
+	struct xfs_buf		*agbp;
+	struct xfs_perag	*pag;
+	int			error;
+	int			i;
+
+	pag = xfs_perag_get(mp, agno);
+
+	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	if (error || !agbp)
+		goto out_put_perag;
+
+	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
+
+	/*
+	 * Force out the log.  This means any transactions that might have freed
+	 * space before we took the AGF buffer lock are now on disk, and the
+	 * volatile disk cache is flushed.
+	 */
+	xfs_log_force(mp, XFS_LOG_SYNC);
+
+	/*
+	 * Look up the longest btree in the AGF and start with it.
+	 */
+	error = xfs_alloc_lookup_le(cur, 0,
+				    XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
+	if (error)
+		goto out_del_cursor;
+
+	/*
+	 * Loop until we are done with all extents that are large
+	 * enough to be worth discarding.
+	 */
+	while (i) {
+		xfs_agblock_t fbno;
+		xfs_extlen_t flen;
+
+		error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
+		if (error)
+			goto out_del_cursor;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
+		ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
+
+		/*
+		 * Too small?  Give up.
+		 */
+		if (flen < minlen) {
+			trace_xfs_discard_toosmall(mp, agno, fbno, flen);
+			goto out_del_cursor;
+		}
+
+		/*
+		 * If the extent is entirely outside of the range we are
+		 * supposed to discard skip it.  Do not bother to trim
+		 * down partially overlapping ranges for now.
+		 */
+		if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start ||
+		    XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) {
+			trace_xfs_discard_exclude(mp, agno, fbno, flen);
+			goto next_extent;
+		}
+
+		/*
+		 * If any blocks in the range are still busy, skip the
+		 * discard and try again the next time.
+		 */
+		if (xfs_alloc_busy_search(mp, agno, fbno, flen)) {
+			trace_xfs_discard_busy(mp, agno, fbno, flen);
+			goto next_extent;
+		}
+
+		trace_xfs_discard_extent(mp, agno, fbno, flen);
+		error = -blkdev_issue_discard(bdev,
+				XFS_AGB_TO_DADDR(mp, agno, fbno),
+				XFS_FSB_TO_BB(mp, flen),
+				GFP_NOFS, 0);
+		if (error)
+			goto out_del_cursor;
+		*blocks_trimmed += flen;
+
+next_extent:
+		error = xfs_btree_decrement(cur, 0, &i);
+		if (error)
+			goto out_del_cursor;
+	}
+
+out_del_cursor:
+	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	xfs_buf_relse(agbp);
+out_put_perag:
+	xfs_perag_put(pag);
+	return error;
+}
+
+int
+xfs_ioc_trim(
+	struct xfs_mount		*mp,
+	struct fstrim_range __user	*urange)
+{
+	struct request_queue	*q = mp->m_ddev_targp->bt_bdev->bd_disk->queue;
+	unsigned int		granularity = q->limits.discard_granularity;
+	struct fstrim_range	range;
+	xfs_fsblock_t		start, len, minlen;
+	xfs_agnumber_t		start_agno, end_agno, agno;
+	__uint64_t		blocks_trimmed = 0;
+	int			error, last_error = 0;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -XFS_ERROR(EPERM);
+	if (copy_from_user(&range, urange, sizeof(range)))
+		return -XFS_ERROR(EFAULT);
+
+	/*
+	 * Truncating down the len isn't actually quite correct, but using
+	 * XFS_B_TO_FSB would mean we trivially get overflows for values
+	 * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
+	 * used by the fstrim application.  In the end it really doesn't
+	 * matter as trimming blocks is an advisory interface.
+	 */
+	start = XFS_B_TO_FSBT(mp, range.start);
+	len = XFS_B_TO_FSBT(mp, range.len);
+	minlen = XFS_B_TO_FSB(mp, max_t(u64, granularity, range.minlen));
+
+	start_agno = XFS_FSB_TO_AGNO(mp, start);
+	if (start_agno >= mp->m_sb.sb_agcount)
+		return -XFS_ERROR(EINVAL);
+
+	end_agno = XFS_FSB_TO_AGNO(mp, start + len);
+	if (end_agno >= mp->m_sb.sb_agcount)
+		end_agno = mp->m_sb.sb_agcount - 1;
+
+	for (agno = start_agno; agno <= end_agno; agno++) {
+		error = -xfs_trim_extents(mp, agno, start, len, minlen,
+					  &blocks_trimmed);
+		if (error)
+			last_error = error;
+	}
+
+	if (last_error)
+		return last_error;
+
+	range.len = XFS_FSB_TO_B(mp, blocks_trimmed);
+	if (copy_to_user(urange, &range, sizeof(range)))
+		return -XFS_ERROR(EFAULT);
+	return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h
new file mode 100644
index 0000000..e82b6dd
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_discard.h
@@ -0,0 +1,8 @@
+#ifndef XFS_DISCARD_H
+#define XFS_DISCARD_H 1
+
+struct fstrim_range;
+
+extern int	xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *);
+
+#endif /* XFS_DISCARD_H */
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 3764d74..fc0114d 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -70,8 +70,16 @@
 	else
 		fileid_type = FILEID_INO32_GEN_PARENT;
 
-	/* filesystem may contain 64bit inode numbers */
-	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS))
+	/*
+	 * If the the filesystem may contain 64bit inode numbers, we need
+	 * to use larger file handles that can represent them.
+	 *
+	 * While we only allocate inodes that do not fit into 32 bits any
+	 * large enough filesystem may contain them, thus the slightly
+	 * confusing looking conditional below.
+	 */
+	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) ||
+	    (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES))
 		fileid_type |= XFS_FILEID_TYPE_64FLAG;
 
 	/*
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index ba8ad42..ef51eb43e 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -41,6 +41,40 @@
 static const struct vm_operations_struct xfs_file_vm_ops;
 
 /*
+ * Locking primitives for read and write IO paths to ensure we consistently use
+ * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
+ */
+static inline void
+xfs_rw_ilock(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_lock(&VFS_I(ip)->i_mutex);
+	xfs_ilock(ip, type);
+}
+
+static inline void
+xfs_rw_iunlock(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	xfs_iunlock(ip, type);
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_unlock(&VFS_I(ip)->i_mutex);
+}
+
+static inline void
+xfs_rw_ilock_demote(
+	struct xfs_inode	*ip,
+	int			type)
+{
+	xfs_ilock_demote(ip, type);
+	if (type & XFS_IOLOCK_EXCL)
+		mutex_unlock(&VFS_I(ip)->i_mutex);
+}
+
+/*
  *	xfs_iozero
  *
  *	xfs_iozero clears the specified range of buffer supplied,
@@ -262,22 +296,21 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	if (unlikely(ioflags & IO_ISDIRECT))
-		mutex_lock(&inode->i_mutex);
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
-
 	if (unlikely(ioflags & IO_ISDIRECT)) {
+		xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
+
 		if (inode->i_mapping->nrpages) {
 			ret = -xfs_flushinval_pages(ip,
 					(iocb->ki_pos & PAGE_CACHE_MASK),
 					-1, FI_REMAPF_LOCKED);
+			if (ret) {
+				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
+				return ret;
+			}
 		}
-		mutex_unlock(&inode->i_mutex);
-		if (ret) {
-			xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-			return ret;
-		}
-	}
+		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+	} else
+		xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 
 	trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
 
@@ -285,7 +318,7 @@
 	if (ret > 0)
 		XFS_STATS_ADD(xs_read_bytes, ret);
 
-	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 	return ret;
 }
 
@@ -309,7 +342,7 @@
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return -EIO;
 
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
 
 	trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
 
@@ -317,10 +350,61 @@
 	if (ret > 0)
 		XFS_STATS_ADD(xs_read_bytes, ret);
 
-	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 	return ret;
 }
 
+STATIC void
+xfs_aio_write_isize_update(
+	struct inode	*inode,
+	loff_t		*ppos,
+	ssize_t		bytes_written)
+{
+	struct xfs_inode	*ip = XFS_I(inode);
+	xfs_fsize_t		isize = i_size_read(inode);
+
+	if (bytes_written > 0)
+		XFS_STATS_ADD(xs_write_bytes, bytes_written);
+
+	if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
+					*ppos > isize))
+		*ppos = isize;
+
+	if (*ppos > ip->i_size) {
+		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
+		if (*ppos > ip->i_size)
+			ip->i_size = *ppos;
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+}
+
+/*
+ * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
+ * part of the I/O may have been written to disk before the error occured.  In
+ * this case the on-disk file size may have been adjusted beyond the in-memory
+ * file size and now needs to be truncated back.
+ */
+STATIC void
+xfs_aio_write_newsize_update(
+	struct xfs_inode	*ip)
+{
+	if (ip->i_new_size) {
+		xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
+		ip->i_new_size = 0;
+		if (ip->i_d.di_size > ip->i_size)
+			ip->i_d.di_size = ip->i_size;
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	}
+}
+
+/*
+ * xfs_file_splice_write() does not use xfs_rw_ilock() because
+ * generic_file_splice_write() takes the i_mutex itself. This, in theory,
+ * couuld cause lock inversions between the aio_write path and the splice path
+ * if someone is doing concurrent splice(2) based writes and write(2) based
+ * writes to the same inode. The only real way to fix this is to re-implement
+ * the generic code here with correct locking orders.
+ */
 STATIC ssize_t
 xfs_file_splice_write(
 	struct pipe_inode_info	*pipe,
@@ -331,7 +415,7 @@
 {
 	struct inode		*inode = outfilp->f_mapping->host;
 	struct xfs_inode	*ip = XFS_I(inode);
-	xfs_fsize_t		isize, new_size;
+	xfs_fsize_t		new_size;
 	int			ioflags = 0;
 	ssize_t			ret;
 
@@ -355,27 +439,9 @@
 	trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
 
 	ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
-	if (ret > 0)
-		XFS_STATS_ADD(xs_write_bytes, ret);
 
-	isize = i_size_read(inode);
-	if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
-		*ppos = isize;
-
-	if (*ppos > ip->i_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		if (*ppos > ip->i_size)
-			ip->i_size = *ppos;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-
-	if (ip->i_new_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		ip->i_new_size = 0;
-		if (ip->i_d.di_size > ip->i_size)
-			ip->i_d.di_size = ip->i_size;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
+	xfs_aio_write_isize_update(inode, ppos, ret);
+	xfs_aio_write_newsize_update(ip);
 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 	return ret;
 }
@@ -562,6 +628,194 @@
 	return error;
 }
 
+/*
+ * Common pre-write limit and setup checks.
+ *
+ * Returns with iolock held according to @iolock.
+ */
+STATIC ssize_t
+xfs_file_aio_write_checks(
+	struct file		*file,
+	loff_t			*pos,
+	size_t			*count,
+	int			*iolock)
+{
+	struct inode		*inode = file->f_mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	xfs_fsize_t		new_size;
+	int			error = 0;
+
+	error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
+	if (error) {
+		xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
+		*iolock = 0;
+		return error;
+	}
+
+	new_size = *pos + *count;
+	if (new_size > ip->i_size)
+		ip->i_new_size = new_size;
+
+	if (likely(!(file->f_mode & FMODE_NOCMTIME)))
+		file_update_time(file);
+
+	/*
+	 * If the offset is beyond the size of the file, we need to zero any
+	 * blocks that fall between the existing EOF and the start of this
+	 * write.
+	 */
+	if (*pos > ip->i_size)
+		error = -xfs_zero_eof(ip, *pos, ip->i_size);
+
+	xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
+	if (error)
+		return error;
+
+	/*
+	 * If we're writing the file then make sure to clear the setuid and
+	 * setgid bits if the process is not being run by root.  This keeps
+	 * people from modifying setuid and setgid binaries.
+	 */
+	return file_remove_suid(file);
+
+}
+
+/*
+ * xfs_file_dio_aio_write - handle direct IO writes
+ *
+ * Lock the inode appropriately to prepare for and issue a direct IO write.
+ * By separating it from the buffered write path we remove all the tricky to
+ * follow locking changes and looping.
+ *
+ * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
+ * until we're sure the bytes at the new EOF have been zeroed and/or the cached
+ * pages are flushed out.
+ *
+ * In most cases the direct IO writes will be done holding IOLOCK_SHARED
+ * allowing them to be done in parallel with reads and other direct IO writes.
+ * However, if the IO is not aligned to filesystem blocks, the direct IO layer
+ * needs to do sub-block zeroing and that requires serialisation against other
+ * direct IOs to the same block. In this case we need to serialise the
+ * submission of the unaligned IOs so that we don't get racing block zeroing in
+ * the dio layer.  To avoid the problem with aio, we also need to wait for
+ * outstanding IOs to complete so that unwritten extent conversion is completed
+ * before we try to map the overlapping block. This is currently implemented by
+ * hitting it with a big hammer (i.e. xfs_ioend_wait()).
+ *
+ * Returns with locks held indicated by @iolock and errors indicated by
+ * negative return values.
+ */
+STATIC ssize_t
+xfs_file_dio_aio_write(
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned long		nr_segs,
+	loff_t			pos,
+	size_t			ocount,
+	int			*iolock)
+{
+	struct file		*file = iocb->ki_filp;
+	struct address_space	*mapping = file->f_mapping;
+	struct inode		*inode = mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	ssize_t			ret = 0;
+	size_t			count = ocount;
+	int			unaligned_io = 0;
+	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
+					mp->m_rtdev_targp : mp->m_ddev_targp;
+
+	*iolock = 0;
+	if ((pos & target->bt_smask) || (count & target->bt_smask))
+		return -XFS_ERROR(EINVAL);
+
+	if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
+		unaligned_io = 1;
+
+	if (unaligned_io || mapping->nrpages || pos > ip->i_size)
+		*iolock = XFS_IOLOCK_EXCL;
+	else
+		*iolock = XFS_IOLOCK_SHARED;
+	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+
+	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+	if (ret)
+		return ret;
+
+	if (mapping->nrpages) {
+		WARN_ON(*iolock != XFS_IOLOCK_EXCL);
+		ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
+							FI_REMAPF_LOCKED);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * If we are doing unaligned IO, wait for all other IO to drain,
+	 * otherwise demote the lock if we had to flush cached pages
+	 */
+	if (unaligned_io)
+		xfs_ioend_wait(ip);
+	else if (*iolock == XFS_IOLOCK_EXCL) {
+		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+		*iolock = XFS_IOLOCK_SHARED;
+	}
+
+	trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
+	ret = generic_file_direct_write(iocb, iovp,
+			&nr_segs, pos, &iocb->ki_pos, count, ocount);
+
+	/* No fallback to buffered IO on errors for XFS. */
+	ASSERT(ret < 0 || ret == count);
+	return ret;
+}
+
+STATIC ssize_t
+xfs_file_buffered_aio_write(
+	struct kiocb		*iocb,
+	const struct iovec	*iovp,
+	unsigned long		nr_segs,
+	loff_t			pos,
+	size_t			ocount,
+	int			*iolock)
+{
+	struct file		*file = iocb->ki_filp;
+	struct address_space	*mapping = file->f_mapping;
+	struct inode		*inode = mapping->host;
+	struct xfs_inode	*ip = XFS_I(inode);
+	ssize_t			ret;
+	int			enospc = 0;
+	size_t			count = ocount;
+
+	*iolock = XFS_IOLOCK_EXCL;
+	xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
+
+	ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
+	if (ret)
+		return ret;
+
+	/* We can write back this queue in page reclaim */
+	current->backing_dev_info = mapping->backing_dev_info;
+
+write_retry:
+	trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
+	ret = generic_file_buffered_write(iocb, iovp, nr_segs,
+			pos, &iocb->ki_pos, count, ret);
+	/*
+	 * if we just got an ENOSPC, flush the inode now we aren't holding any
+	 * page locks and retry *once*
+	 */
+	if (ret == -ENOSPC && !enospc) {
+		ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
+		if (ret)
+			return ret;
+		enospc = 1;
+		goto write_retry;
+	}
+	current->backing_dev_info = NULL;
+	return ret;
+}
+
 STATIC ssize_t
 xfs_file_aio_write(
 	struct kiocb		*iocb,
@@ -573,234 +827,59 @@
 	struct address_space	*mapping = file->f_mapping;
 	struct inode		*inode = mapping->host;
 	struct xfs_inode	*ip = XFS_I(inode);
-	struct xfs_mount	*mp = ip->i_mount;
-	ssize_t			ret = 0, error = 0;
-	int			ioflags = 0;
-	xfs_fsize_t		isize, new_size;
+	ssize_t			ret;
 	int			iolock;
-	size_t			ocount = 0, count;
-	int			need_i_mutex;
+	size_t			ocount = 0;
 
 	XFS_STATS_INC(xs_write_calls);
 
 	BUG_ON(iocb->ki_pos != pos);
 
-	if (unlikely(file->f_flags & O_DIRECT))
-		ioflags |= IO_ISDIRECT;
-	if (file->f_mode & FMODE_NOCMTIME)
-		ioflags |= IO_INVIS;
+	ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
+	if (ret)
+		return ret;
 
-	error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
-	if (error)
-		return error;
-
-	count = ocount;
-	if (count == 0)
+	if (ocount == 0)
 		return 0;
 
-	xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
+	xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
 
-	if (XFS_FORCED_SHUTDOWN(mp))
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return -EIO;
 
-relock:
-	if (ioflags & IO_ISDIRECT) {
-		iolock = XFS_IOLOCK_SHARED;
-		need_i_mutex = 0;
-	} else {
-		iolock = XFS_IOLOCK_EXCL;
-		need_i_mutex = 1;
-		mutex_lock(&inode->i_mutex);
-	}
+	if (unlikely(file->f_flags & O_DIRECT))
+		ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
+						ocount, &iolock);
+	else
+		ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
+						ocount, &iolock);
 
-	xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
+	xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
 
-start:
-	error = -generic_write_checks(file, &pos, &count,
-					S_ISBLK(inode->i_mode));
-	if (error) {
-		xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-		goto out_unlock_mutex;
-	}
-
-	if (ioflags & IO_ISDIRECT) {
-		xfs_buftarg_t	*target =
-			XFS_IS_REALTIME_INODE(ip) ?
-				mp->m_rtdev_targp : mp->m_ddev_targp;
-
-		if ((pos & target->bt_smask) || (count & target->bt_smask)) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-			return XFS_ERROR(-EINVAL);
-		}
-
-		if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock);
-			iolock = XFS_IOLOCK_EXCL;
-			need_i_mutex = 1;
-			mutex_lock(&inode->i_mutex);
-			xfs_ilock(ip, XFS_ILOCK_EXCL|iolock);
-			goto start;
-		}
-	}
-
-	new_size = pos + count;
-	if (new_size > ip->i_size)
-		ip->i_new_size = new_size;
-
-	if (likely(!(ioflags & IO_INVIS)))
-		file_update_time(file);
-
-	/*
-	 * If the offset is beyond the size of the file, we have a couple
-	 * of things to do. First, if there is already space allocated
-	 * we need to either create holes or zero the disk or ...
-	 *
-	 * If there is a page where the previous size lands, we need
-	 * to zero it out up to the new size.
-	 */
-
-	if (pos > ip->i_size) {
-		error = xfs_zero_eof(ip, pos, ip->i_size);
-		if (error) {
-			xfs_iunlock(ip, XFS_ILOCK_EXCL);
-			goto out_unlock_internal;
-		}
-	}
-	xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-	/*
-	 * If we're writing the file then make sure to clear the
-	 * setuid and setgid bits if the process is not being run
-	 * by root.  This keeps people from modifying setuid and
-	 * setgid binaries.
-	 */
-	error = -file_remove_suid(file);
-	if (unlikely(error))
-		goto out_unlock_internal;
-
-	/* We can write back this queue in page reclaim */
-	current->backing_dev_info = mapping->backing_dev_info;
-
-	if ((ioflags & IO_ISDIRECT)) {
-		if (mapping->nrpages) {
-			WARN_ON(need_i_mutex == 0);
-			error = xfs_flushinval_pages(ip,
-					(pos & PAGE_CACHE_MASK),
-					-1, FI_REMAPF_LOCKED);
-			if (error)
-				goto out_unlock_internal;
-		}
-
-		if (need_i_mutex) {
-			/* demote the lock now the cached pages are gone */
-			xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
-			mutex_unlock(&inode->i_mutex);
-
-			iolock = XFS_IOLOCK_SHARED;
-			need_i_mutex = 0;
-		}
-
-		trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags);
-		ret = generic_file_direct_write(iocb, iovp,
-				&nr_segs, pos, &iocb->ki_pos, count, ocount);
-
-		/*
-		 * direct-io write to a hole: fall through to buffered I/O
-		 * for completing the rest of the request.
-		 */
-		if (ret >= 0 && ret != count) {
-			XFS_STATS_ADD(xs_write_bytes, ret);
-
-			pos += ret;
-			count -= ret;
-
-			ioflags &= ~IO_ISDIRECT;
-			xfs_iunlock(ip, iolock);
-			goto relock;
-		}
-	} else {
-		int enospc = 0;
-		ssize_t ret2 = 0;
-
-write_retry:
-		trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags);
-		ret2 = generic_file_buffered_write(iocb, iovp, nr_segs,
-				pos, &iocb->ki_pos, count, ret);
-		/*
-		 * if we just got an ENOSPC, flush the inode now we
-		 * aren't holding any page locks and retry *once*
-		 */
-		if (ret2 == -ENOSPC && !enospc) {
-			error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
-			if (error)
-				goto out_unlock_internal;
-			enospc = 1;
-			goto write_retry;
-		}
-		ret = ret2;
-	}
-
-	current->backing_dev_info = NULL;
-
-	isize = i_size_read(inode);
-	if (unlikely(ret < 0 && ret != -EFAULT && iocb->ki_pos > isize))
-		iocb->ki_pos = isize;
-
-	if (iocb->ki_pos > ip->i_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		if (iocb->ki_pos > ip->i_size)
-			ip->i_size = iocb->ki_pos;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-
-	error = -ret;
 	if (ret <= 0)
-		goto out_unlock_internal;
-
-	XFS_STATS_ADD(xs_write_bytes, ret);
+		goto out_unlock;
 
 	/* Handle various SYNC-type writes */
 	if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
 		loff_t end = pos + ret - 1;
-		int error2;
+		int error, error2;
 
-		xfs_iunlock(ip, iolock);
-		if (need_i_mutex)
-			mutex_unlock(&inode->i_mutex);
-
-		error2 = filemap_write_and_wait_range(mapping, pos, end);
-		if (!error)
-			error = error2;
-		if (need_i_mutex)
-			mutex_lock(&inode->i_mutex);
-		xfs_ilock(ip, iolock);
+		xfs_rw_iunlock(ip, iolock);
+		error = filemap_write_and_wait_range(mapping, pos, end);
+		xfs_rw_ilock(ip, iolock);
 
 		error2 = -xfs_file_fsync(file,
 					 (file->f_flags & __O_SYNC) ? 0 : 1);
-		if (!error)
-			error = error2;
+		if (error)
+			ret = error;
+		else if (error2)
+			ret = error2;
 	}
 
- out_unlock_internal:
-	if (ip->i_new_size) {
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
-		ip->i_new_size = 0;
-		/*
-		 * If this was a direct or synchronous I/O that failed (such
-		 * as ENOSPC) then part of the I/O may have been written to
-		 * disk before the error occured.  In this case the on-disk
-		 * file size may have been adjusted beyond the in-memory file
-		 * size and now needs to be truncated back.
-		 */
-		if (ip->i_d.di_size > ip->i_size)
-			ip->i_d.di_size = ip->i_size;
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-	}
-	xfs_iunlock(ip, iolock);
- out_unlock_mutex:
-	if (need_i_mutex)
-		mutex_unlock(&inode->i_mutex);
-	return -error;
+out_unlock:
+	xfs_aio_write_newsize_update(ip);
+	xfs_rw_iunlock(ip, iolock);
+	return ret;
 }
 
 STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index ad442d9..b06ede1 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -39,6 +39,7 @@
 #include "xfs_dfrag.h"
 #include "xfs_fsops.h"
 #include "xfs_vnodeops.h"
+#include "xfs_discard.h"
 #include "xfs_quota.h"
 #include "xfs_inode_item.h"
 #include "xfs_export.h"
@@ -1294,6 +1295,8 @@
 	trace_xfs_file_ioctl(ip);
 
 	switch (cmd) {
+	case FITRIM:
+		return xfs_ioc_trim(mp, arg);
 	case XFS_IOC_ALLOCSP:
 	case XFS_IOC_FREESP:
 	case XFS_IOC_RESVSP:
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 94d5fd6..da54403 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -516,6 +516,7 @@
 	loff_t		new_size = 0;
 	xfs_flock64_t	bf;
 	xfs_inode_t	*ip = XFS_I(inode);
+	int		cmd = XFS_IOC_RESVSP;
 
 	/* preallocation on directories not yet supported */
 	error = -ENODEV;
@@ -528,6 +529,9 @@
 
 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		cmd = XFS_IOC_UNRESVSP;
+
 	/* check the new inode size is valid before allocating */
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 	    offset + len > i_size_read(inode)) {
@@ -537,8 +541,7 @@
 			goto out_unlock;
 	}
 
-	error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
-				       0, XFS_ATTR_NOLOCK);
+	error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
 	if (error)
 		goto out_unlock;
 
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 214ddd7..0964949 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -37,7 +37,6 @@
 
 #include <kmem.h>
 #include <mrlock.h>
-#include <sv.h>
 #include <time.h>
 
 #include <support/debug.h>
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 064f964..9731898 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -606,7 +606,8 @@
 {
 	int			error = 0;
 
-	*bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp);
+	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+				    mp);
 	if (IS_ERR(*bdevp)) {
 		error = PTR_ERR(*bdevp);
 		printk("XFS: Invalid device [%s], error=%d\n", name, error);
@@ -620,7 +621,7 @@
 	struct block_device	*bdev)
 {
 	if (bdev)
-		close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
+		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
 }
 
 /*
@@ -834,8 +835,11 @@
 	struct xfs_ail		*ailp,
 	xfs_lsn_t		threshold_lsn)
 {
-	ailp->xa_target = threshold_lsn;
-	wake_up_process(ailp->xa_task);
+	/* only ever move the target forwards */
+	if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
+		ailp->xa_target = threshold_lsn;
+		wake_up_process(ailp->xa_task);
+	}
 }
 
 STATIC int
@@ -847,8 +851,17 @@
 	long		tout = 0; /* milliseconds */
 
 	while (!kthread_should_stop()) {
-		schedule_timeout_interruptible(tout ?
-				msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
+		/*
+		 * for short sleeps indicating congestion, don't allow us to
+		 * get woken early. Otherwise all we do is bang on the AIL lock
+		 * without making progress.
+		 */
+		if (tout && tout <= 20)
+			__set_current_state(TASK_KILLABLE);
+		else
+			__set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(tout ?
+				 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
 
 		/* swsusp */
 		try_to_freeze();
@@ -935,7 +948,7 @@
  * Slab object creation initialisation for the XFS inode.
  * This covers only the idempotent fields in the XFS inode;
  * all other fields need to be initialised on allocation
- * from the slab. This avoids the need to repeatedly intialise
+ * from the slab. This avoids the need to repeatedly initialise
  * fields in the xfs inode that left in the initialise state
  * when freeing the inode.
  */
@@ -1118,6 +1131,8 @@
 	 */
 	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+			&xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
 
 	xfs_inactive(ip);
 }
@@ -1399,7 +1414,7 @@
 
 	xfs_save_resvblks(mp);
 	xfs_quiesce_attr(mp);
-	return -xfs_fs_log_dummy(mp, SYNC_WAIT);
+	return -xfs_fs_log_dummy(mp);
 }
 
 STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index afb0d7c..e22f005 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -53,14 +53,30 @@
 {
 	struct inode		*inode = VFS_I(ip);
 
+	ASSERT(rcu_read_lock_held());
+
+	/*
+	 * check for stale RCU freed inode
+	 *
+	 * If the inode has been reallocated, it doesn't matter if it's not in
+	 * the AG we are walking - we are walking for writeback, so if it
+	 * passes all the "valid inode" checks and is dirty, then we'll write
+	 * it back anyway.  If it has been reallocated and still being
+	 * initialised, the XFS_INEW check below will catch it.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	if (!ip->i_ino)
+		goto out_unlock_noent;
+
+	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+		goto out_unlock_noent;
+	spin_unlock(&ip->i_flags_lock);
+
 	/* nothing to sync during shutdown */
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return EFSCORRUPTED;
 
-	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
-	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
-		return ENOENT;
-
 	/* If we can't grab the inode, it must on it's way to reclaim. */
 	if (!igrab(inode))
 		return ENOENT;
@@ -72,6 +88,10 @@
 
 	/* inode is valid */
 	return 0;
+
+out_unlock_noent:
+	spin_unlock(&ip->i_flags_lock);
+	return ENOENT;
 }
 
 STATIC int
@@ -98,12 +118,12 @@
 		int		error = 0;
 		int		i;
 
-		read_lock(&pag->pag_ici_lock);
+		rcu_read_lock();
 		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 					(void **)batch, first_index,
 					XFS_LOOKUP_BATCH);
 		if (!nr_found) {
-			read_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 			break;
 		}
 
@@ -118,18 +138,26 @@
 				batch[i] = NULL;
 
 			/*
-			 * Update the index for the next lookup. Catch overflows
-			 * into the next AG range which can occur if we have inodes
-			 * in the last block of the AG and we are currently
-			 * pointing to the last inode.
+			 * Update the index for the next lookup. Catch
+			 * overflows into the next AG range which can occur if
+			 * we have inodes in the last block of the AG and we
+			 * are currently pointing to the last inode.
+			 *
+			 * Because we may see inodes that are from the wrong AG
+			 * due to RCU freeing and reallocation, only update the
+			 * index if it lies in this AG. It was a race that lead
+			 * us to see this inode, so another lookup from the
+			 * same index will not find it again.
 			 */
+			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
+				continue;
 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 				done = 1;
 		}
 
 		/* unlock now we've grabbed the inodes. */
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 
 		for (i = 0; i < nr_found; i++) {
 			if (!batch[i])
@@ -334,7 +362,7 @@
 
 	/* mark the log as covered if needed */
 	if (xfs_log_need_covered(mp))
-		error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
+		error2 = xfs_fs_log_dummy(mp);
 
 	/* flush data-only devices */
 	if (mp->m_rtdev_targp)
@@ -475,13 +503,14 @@
 	int		error;
 
 	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
-		xfs_log_force(mp, 0);
-		xfs_reclaim_inodes(mp, 0);
 		/* dgc: errors ignored here */
-		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 		if (mp->m_super->s_frozen == SB_UNFROZEN &&
 		    xfs_log_need_covered(mp))
-			error = xfs_fs_log_dummy(mp, 0);
+			error = xfs_fs_log_dummy(mp);
+		else
+			xfs_log_force(mp, 0);
+		xfs_reclaim_inodes(mp, 0);
+		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
 	}
 	mp->m_sync_seq++;
 	wake_up(&mp->m_wait_single_sync_task);
@@ -592,12 +621,12 @@
 	struct xfs_perag *pag;
 
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 	spin_lock(&ip->i_flags_lock);
 	__xfs_inode_set_reclaim_tag(pag, ip);
 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 	spin_unlock(&ip->i_flags_lock);
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	xfs_perag_put(pag);
 }
 
@@ -639,9 +668,14 @@
 	struct xfs_inode	*ip,
 	int			flags)
 {
+	ASSERT(rcu_read_lock_held());
+
+	/* quick check for stale RCU freed inode */
+	if (!ip->i_ino)
+		return 1;
 
 	/*
-	 * do some unlocked checks first to avoid unnecceary lock traffic.
+	 * do some unlocked checks first to avoid unnecessary lock traffic.
 	 * The first is a flush lock check, the second is a already in reclaim
 	 * check. Only do these checks if we are not going to block on locks.
 	 */
@@ -654,11 +688,16 @@
 	 * The radix tree lock here protects a thread in xfs_iget from racing
 	 * with us starting reclaim on the inode.  Once we have the
 	 * XFS_IRECLAIM flag set it will not touch us.
+	 *
+	 * Due to RCU lookup, we may find inodes that have been freed and only
+	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
+	 * aren't candidates for reclaim at all, so we must check the
+	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
 	 */
 	spin_lock(&ip->i_flags_lock);
-	ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
-	if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
-		/* ignore as it is already under reclaim */
+	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
+	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
+		/* not a reclaim candidate. */
 		spin_unlock(&ip->i_flags_lock);
 		return 1;
 	}
@@ -795,12 +834,12 @@
 	 * added to the tree assert that it's been there before to catch
 	 * problems with the inode life time early on.
 	 */
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 	if (!radix_tree_delete(&pag->pag_ici_root,
 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
 		ASSERT(0);
 	__xfs_inode_clear_reclaim(pag, ip);
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 
 	/*
 	 * Here we do an (almost) spurious inode lock in order to coordinate
@@ -864,14 +903,14 @@
 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 			int	i;
 
-			write_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			nr_found = radix_tree_gang_lookup_tag(
 					&pag->pag_ici_root,
 					(void **)batch, first_index,
 					XFS_LOOKUP_BATCH,
 					XFS_ICI_RECLAIM_TAG);
 			if (!nr_found) {
-				write_unlock(&pag->pag_ici_lock);
+				rcu_read_unlock();
 				break;
 			}
 
@@ -891,14 +930,24 @@
 				 * occur if we have inodes in the last block of
 				 * the AG and we are currently pointing to the
 				 * last inode.
+				 *
+				 * Because we may see inodes that are from the
+				 * wrong AG due to RCU freeing and
+				 * reallocation, only update the index if it
+				 * lies in this AG. It was a race that lead us
+				 * to see this inode, so another lookup from
+				 * the same index will not find it again.
 				 */
+				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
+								pag->pag_agno)
+					continue;
 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 					done = 1;
 			}
 
 			/* unlock now we've grabbed the inodes. */
-			write_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 
 			for (i = 0; i < nr_found; i++) {
 				if (!batch[i])
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index 7bb5092..ee3cee0 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -18,6 +18,7 @@
 #include "xfs.h"
 #include <linux/sysctl.h>
 #include <linux/proc_fs.h>
+#include "xfs_error.h"
 
 static struct ctl_table_header *xfs_table_header;
 
@@ -51,6 +52,26 @@
 
 	return ret;
 }
+
+STATIC int
+xfs_panic_mask_proc_handler(
+	ctl_table	*ctl,
+	int		write,
+	void		__user *buffer,
+	size_t		*lenp,
+	loff_t		*ppos)
+{
+	int		ret, *valp = ctl->data;
+
+	ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
+	if (!ret && write) {
+		xfs_panic_mask = *valp;
+#ifdef DEBUG
+		xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
+#endif
+	}
+	return ret;
+}
 #endif /* CONFIG_PROC_FS */
 
 static ctl_table xfs_table[] = {
@@ -77,7 +98,7 @@
 		.data		= &xfs_params.panic_mask.val,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= xfs_panic_mask_proc_handler,
 		.extra1		= &xfs_params.panic_mask.min,
 		.extra2		= &xfs_params.panic_mask.max
 	},
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index acef2e9..2d0bcb4 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -766,8 +766,8 @@
 		__field(int, curr_res)
 		__field(int, unit_res)
 		__field(unsigned int, flags)
-		__field(void *, reserve_headq)
-		__field(void *, write_headq)
+		__field(int, reserveq)
+		__field(int, writeq)
 		__field(int, grant_reserve_cycle)
 		__field(int, grant_reserve_bytes)
 		__field(int, grant_write_cycle)
@@ -784,19 +784,21 @@
 		__entry->curr_res = tic->t_curr_res;
 		__entry->unit_res = tic->t_unit_res;
 		__entry->flags = tic->t_flags;
-		__entry->reserve_headq = log->l_reserve_headq;
-		__entry->write_headq = log->l_write_headq;
-		__entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
-		__entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
-		__entry->grant_write_cycle = log->l_grant_write_cycle;
-		__entry->grant_write_bytes = log->l_grant_write_bytes;
+		__entry->reserveq = list_empty(&log->l_reserveq);
+		__entry->writeq = list_empty(&log->l_writeq);
+		xlog_crack_grant_head(&log->l_grant_reserve_head,
+				&__entry->grant_reserve_cycle,
+				&__entry->grant_reserve_bytes);
+		xlog_crack_grant_head(&log->l_grant_write_head,
+				&__entry->grant_write_cycle,
+				&__entry->grant_write_bytes);
 		__entry->curr_cycle = log->l_curr_cycle;
 		__entry->curr_block = log->l_curr_block;
-		__entry->tail_lsn = log->l_tail_lsn;
+		__entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
 	),
 	TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
-		  "t_unit_res %u t_flags %s reserve_headq 0x%p "
-		  "write_headq 0x%p grant_reserve_cycle %d "
+		  "t_unit_res %u t_flags %s reserveq %s "
+		  "writeq %s grant_reserve_cycle %d "
 		  "grant_reserve_bytes %d grant_write_cycle %d "
 		  "grant_write_bytes %d curr_cycle %d curr_block %d "
 		  "tail_cycle %d tail_block %d",
@@ -807,8 +809,8 @@
 		  __entry->curr_res,
 		  __entry->unit_res,
 		  __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
-		  __entry->reserve_headq,
-		  __entry->write_headq,
+		  __entry->reserveq ? "empty" : "active",
+		  __entry->writeq ? "empty" : "active",
 		  __entry->grant_reserve_cycle,
 		  __entry->grant_reserve_bytes,
 		  __entry->grant_write_cycle,
@@ -835,6 +837,7 @@
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
@@ -842,6 +845,7 @@
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
@@ -935,10 +939,10 @@
 DEFINE_PAGE_EVENT(xfs_releasepage);
 DEFINE_PAGE_EVENT(xfs_invalidatepage);
 
-DECLARE_EVENT_CLASS(xfs_iomap_class,
+DECLARE_EVENT_CLASS(xfs_imap_class,
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
-		 int flags, struct xfs_bmbt_irec *irec),
-	TP_ARGS(ip, offset, count, flags, irec),
+		 int type, struct xfs_bmbt_irec *irec),
+	TP_ARGS(ip, offset, count, type, irec),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
 		__field(xfs_ino_t, ino)
@@ -946,7 +950,7 @@
 		__field(loff_t, new_size)
 		__field(loff_t, offset)
 		__field(size_t, count)
-		__field(int, flags)
+		__field(int, type)
 		__field(xfs_fileoff_t, startoff)
 		__field(xfs_fsblock_t, startblock)
 		__field(xfs_filblks_t, blockcount)
@@ -958,13 +962,13 @@
 		__entry->new_size = ip->i_new_size;
 		__entry->offset = offset;
 		__entry->count = count;
-		__entry->flags = flags;
+		__entry->type = type;
 		__entry->startoff = irec ? irec->br_startoff : 0;
 		__entry->startblock = irec ? irec->br_startblock : 0;
 		__entry->blockcount = irec ? irec->br_blockcount : 0;
 	),
 	TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
-		  "offset 0x%llx count %zd flags %s "
+		  "offset 0x%llx count %zd type %s "
 		  "startoff 0x%llx startblock %lld blockcount 0x%llx",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  __entry->ino,
@@ -972,20 +976,21 @@
 		  __entry->new_size,
 		  __entry->offset,
 		  __entry->count,
-		  __print_flags(__entry->flags, "|", BMAPI_FLAGS),
+		  __print_symbolic(__entry->type, XFS_IO_TYPES),
 		  __entry->startoff,
 		  (__int64_t)__entry->startblock,
 		  __entry->blockcount)
 )
 
 #define DEFINE_IOMAP_EVENT(name)	\
-DEFINE_EVENT(xfs_iomap_class, name,	\
+DEFINE_EVENT(xfs_imap_class, name,	\
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,	\
-		 int flags, struct xfs_bmbt_irec *irec),		\
-	TP_ARGS(ip, offset, count, flags, irec))
-DEFINE_IOMAP_EVENT(xfs_iomap_enter);
-DEFINE_IOMAP_EVENT(xfs_iomap_found);
-DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
+		 int type, struct xfs_bmbt_irec *irec),		\
+	TP_ARGS(ip, offset, count, type, irec))
+DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
 
 DECLARE_EVENT_CLASS(xfs_simple_io_class,
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1022,6 +1027,7 @@
 	TP_ARGS(ip, offset, count))
 DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
 DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
+DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
 
 
 TRACE_EVENT(xfs_itruncate_start,
@@ -1420,6 +1426,7 @@
 	TP_PROTO(struct xfs_alloc_arg *args), \
 	TP_ARGS(args))
 DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
 DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
 DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
 DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
@@ -1752,6 +1759,39 @@
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel);
 DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip);
 
+DECLARE_EVENT_CLASS(xfs_discard_class,
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+		 xfs_agblock_t agbno, xfs_extlen_t len),
+	TP_ARGS(mp, agno, agbno, len),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_agnumber_t, agno)
+		__field(xfs_agblock_t, agbno)
+		__field(xfs_extlen_t, len)
+	),
+	TP_fast_assign(
+		__entry->dev = mp->m_super->s_dev;
+		__entry->agno = agno;
+		__entry->agbno = agbno;
+		__entry->len = len;
+	),
+	TP_printk("dev %d:%d agno %u agbno %u len %u\n",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->agno,
+		  __entry->agbno,
+		  __entry->len)
+)
+
+#define DEFINE_DISCARD_EVENT(name) \
+DEFINE_EVENT(xfs_discard_class, name, \
+	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+		 xfs_agblock_t agbno, xfs_extlen_t len), \
+	TP_ARGS(mp, agno, agbno, len))
+DEFINE_DISCARD_EVENT(xfs_discard_extent);
+DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
+DEFINE_DISCARD_EVENT(xfs_discard_exclude);
+DEFINE_DISCARD_EVENT(xfs_discard_busy);
+
 #endif /* _TRACE_XFS_H */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index faf8e1a..d22aa31 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -149,7 +149,6 @@
 	ASSERT(list_empty(&dqp->q_freelist));
 
 	mutex_destroy(&dqp->q_qlock);
-	sv_destroy(&dqp->q_pinwait);
 	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
 
 	atomic_dec(&xfs_Gqm->qm_totaldquots);
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 975aa10..e6cf955 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -25,86 +25,78 @@
 #include "xfs_mount.h"
 #include "xfs_error.h"
 
-static char		message[1024];	/* keep it off the stack */
-static DEFINE_SPINLOCK(xfs_err_lock);
-
-/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
-#define XFS_MAX_ERR_LEVEL	7
-#define XFS_ERR_MASK		((1 << 3) - 1)
-static const char * const	err_level[XFS_MAX_ERR_LEVEL+1] =
-					{KERN_EMERG, KERN_ALERT, KERN_CRIT,
-					 KERN_ERR, KERN_WARNING, KERN_NOTICE,
-					 KERN_INFO, KERN_DEBUG};
-
 void
-cmn_err(register int level, char *fmt, ...)
+cmn_err(
+	const char	*lvl,
+	const char	*fmt,
+	...)
 {
-	char	*fp = fmt;
-	int	len;
-	ulong	flags;
-	va_list	ap;
+	struct va_format vaf;
+	va_list		args;
 
-	level &= XFS_ERR_MASK;
-	if (level > XFS_MAX_ERR_LEVEL)
-		level = XFS_MAX_ERR_LEVEL;
-	spin_lock_irqsave(&xfs_err_lock,flags);
-	va_start(ap, fmt);
-	if (*fmt == '!') fp++;
-	len = vsnprintf(message, sizeof(message), fp, ap);
-	if (len >= sizeof(message))
-		len = sizeof(message) - 1;
-	if (message[len-1] == '\n')
-		message[len-1] = 0;
-	printk("%s%s\n", err_level[level], message);
-	va_end(ap);
-	spin_unlock_irqrestore(&xfs_err_lock,flags);
-	BUG_ON(level == CE_PANIC);
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%s%pV", lvl, &vaf);
+	va_end(args);
+
+	BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
 }
 
 void
-xfs_fs_vcmn_err(
-	int			level,
+xfs_fs_cmn_err(
+	const char		*lvl,
 	struct xfs_mount	*mp,
-	char			*fmt,
-	va_list			ap)
+	const char		*fmt,
+	...)
 {
-	unsigned long		flags;
-	int			len = 0;
+	struct va_format	vaf;
+	va_list			args;
 
-	level &= XFS_ERR_MASK;
-	if (level > XFS_MAX_ERR_LEVEL)
-		level = XFS_MAX_ERR_LEVEL;
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 
-	spin_lock_irqsave(&xfs_err_lock,flags);
+	printk("%sFilesystem %s: %pV", lvl, mp->m_fsname, &vaf);
+	va_end(args);
 
-	if (mp) {
-		len = sprintf(message, "Filesystem \"%s\": ", mp->m_fsname);
+	BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0);
+}
 
-		/*
-		 * Skip the printk if we can't print anything useful
-		 * due to an over-long device name.
-		 */
-		if (len >= sizeof(message))
-			goto out;
+/* All callers to xfs_cmn_err use CE_ALERT, so don't bother testing lvl */
+void
+xfs_cmn_err(
+	int			panic_tag,
+	const char		*lvl,
+	struct xfs_mount	*mp,
+	const char		*fmt,
+	...)
+{
+	struct va_format	vaf;
+	va_list			args;
+	int			panic = 0;
+
+	if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
+		printk(KERN_ALERT "XFS: Transforming an alert into a BUG.");
+		panic = 1;
 	}
 
-	len = vsnprintf(message + len, sizeof(message) - len, fmt, ap);
-	if (len >= sizeof(message))
-		len = sizeof(message) - 1;
-	if (message[len-1] == '\n')
-		message[len-1] = 0;
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 
-	printk("%s%s\n", err_level[level], message);
- out:
-	spin_unlock_irqrestore(&xfs_err_lock,flags);
+	printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf);
+	va_end(args);
 
-	BUG_ON(level == CE_PANIC);
+	BUG_ON(panic);
 }
 
 void
 assfail(char *expr, char *file, int line)
 {
-	printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line);
+	printk(KERN_CRIT "Assertion failed: %s, file: %s, line: %d\n", expr,
+	       file, line);
 	BUG();
 }
 
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index d2d2046..05699f6 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -20,15 +20,22 @@
 
 #include <stdarg.h>
 
-#define CE_DEBUG        7               /* debug        */
-#define CE_CONT         6               /* continuation */
-#define CE_NOTE         5               /* notice       */
-#define CE_WARN         4               /* warning      */
-#define CE_ALERT        1               /* alert        */
-#define CE_PANIC        0               /* panic        */
+struct xfs_mount;
 
-extern void cmn_err(int, char *, ...)
-	__attribute__ ((format (printf, 2, 3)));
+#define CE_DEBUG        KERN_DEBUG
+#define CE_CONT         KERN_INFO
+#define CE_NOTE         KERN_NOTICE
+#define CE_WARN         KERN_WARNING
+#define CE_ALERT        KERN_ALERT
+#define CE_PANIC        KERN_EMERG
+
+void cmn_err(const char *lvl, const char *fmt, ...)
+		__attribute__ ((format (printf, 2, 3)));
+void xfs_fs_cmn_err( const char *lvl, struct xfs_mount *mp,
+		const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
+void xfs_cmn_err( int panic_tag, const char *lvl, struct xfs_mount *mp,
+		const char *fmt, ...) __attribute__ ((format (printf, 4, 5)));
+
 extern void assfail(char *expr, char *f, int l);
 
 #define ASSERT_ALWAYS(expr)	\
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 0135e2a..11dd720 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -42,7 +42,7 @@
 #define SGI_ACL_DEFAULT_SIZE	(sizeof(SGI_ACL_DEFAULT)-1)
 
 #ifdef CONFIG_XFS_POSIX_ACL
-extern int xfs_check_acl(struct inode *inode, int mask);
+extern int xfs_check_acl(struct inode *inode, int mask, unsigned int flags);
 extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
 extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl);
 extern int xfs_acl_chmod(struct inode *inode);
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 63c7a1a..58632cc 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -227,7 +227,7 @@
 
 	atomic_t        pagf_fstrms;    /* # of filestreams active in this AG */
 
-	rwlock_t	pag_ici_lock;	/* incore inode lock */
+	spinlock_t	pag_ici_lock;	/* incore inode cache lock */
 	struct radix_tree_root pag_ici_root;	/* incore inode cache root */
 	int		pag_ici_reclaimable;	/* reclaimable inodes */
 	struct mutex	pag_ici_reclaim_lock;	/* serialisation point */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 112abc4..f322798 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -41,10 +41,6 @@
 #define	XFSA_FIXUP_BNO_OK	1
 #define	XFSA_FIXUP_CNT_OK	2
 
-static int
-xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
-		    xfs_agblock_t bno, xfs_extlen_t len);
-
 /*
  * Prototypes for per-ag allocation routines
  */
@@ -94,7 +90,7 @@
  * Lookup the first record less than or equal to [bno, len]
  * in the btree given by cur.
  */
-STATIC int				/* error */
+int					/* error */
 xfs_alloc_lookup_le(
 	struct xfs_btree_cur	*cur,	/* btree cursor */
 	xfs_agblock_t		bno,	/* starting block of extent */
@@ -127,7 +123,7 @@
 /*
  * Get the data from the pointed-to record.
  */
-STATIC int				/* error */
+int					/* error */
 xfs_alloc_get_rec(
 	struct xfs_btree_cur	*cur,	/* btree cursor */
 	xfs_agblock_t		*bno,	/* output: starting block of extent */
@@ -577,61 +573,58 @@
 	xfs_extlen_t	rlen;	/* length of returned extent */
 
 	ASSERT(args->alignment == 1);
+
 	/*
 	 * Allocate/initialize a cursor for the by-number freespace btree.
 	 */
 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
-		args->agno, XFS_BTNUM_BNO);
+					  args->agno, XFS_BTNUM_BNO);
+
 	/*
 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
 	 * Look for the closest free block <= bno, it must contain bno
 	 * if any free block does.
 	 */
-	if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
+	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
+	if (error)
 		goto error0;
-	if (!i) {
-		/*
-		 * Didn't find it, return null.
-		 */
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		args->agbno = NULLAGBLOCK;
-		return 0;
-	}
+	if (!i)
+		goto not_found;
+
 	/*
 	 * Grab the freespace record.
 	 */
-	if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
+	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
+	if (error)
 		goto error0;
 	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
 	ASSERT(fbno <= args->agbno);
 	minend = args->agbno + args->minlen;
 	maxend = args->agbno + args->maxlen;
 	fend = fbno + flen;
+
 	/*
 	 * Give up if the freespace isn't long enough for the minimum request.
 	 */
-	if (fend < minend) {
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		args->agbno = NULLAGBLOCK;
-		return 0;
-	}
+	if (fend < minend)
+		goto not_found;
+
 	/*
 	 * End of extent will be smaller of the freespace end and the
 	 * maximal requested end.
-	 */
-	end = XFS_AGBLOCK_MIN(fend, maxend);
-	/*
+	 *
 	 * Fix the length according to mod and prod if given.
 	 */
+	end = XFS_AGBLOCK_MIN(fend, maxend);
 	args->len = end - args->agbno;
 	xfs_alloc_fix_len(args);
-	if (!xfs_alloc_fix_minleft(args)) {
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		return 0;
-	}
+	if (!xfs_alloc_fix_minleft(args))
+		goto not_found;
+
 	rlen = args->len;
 	ASSERT(args->agbno + rlen <= fend);
 	end = args->agbno + rlen;
+
 	/*
 	 * We are allocating agbno for rlen [agbno .. end]
 	 * Allocate/initialize a cursor for the by-size btree.
@@ -640,16 +633,25 @@
 		args->agno, XFS_BTNUM_CNT);
 	ASSERT(args->agbno + args->len <=
 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
-	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
-			args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
+	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
+				      args->len, XFSA_FIXUP_BNO_OK);
+	if (error) {
 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
 		goto error0;
 	}
+
 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
 
-	trace_xfs_alloc_exact_done(args);
 	args->wasfromfl = 0;
+	trace_xfs_alloc_exact_done(args);
+	return 0;
+
+not_found:
+	/* Didn't find it, return null. */
+	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+	args->agbno = NULLAGBLOCK;
+	trace_xfs_alloc_exact_notfound(args);
 	return 0;
 
 error0:
@@ -659,6 +661,95 @@
 }
 
 /*
+ * Search the btree in a given direction via the search cursor and compare
+ * the records found against the good extent we've already found.
+ */
+STATIC int
+xfs_alloc_find_best_extent(
+	struct xfs_alloc_arg	*args,	/* allocation argument structure */
+	struct xfs_btree_cur	**gcur,	/* good cursor */
+	struct xfs_btree_cur	**scur,	/* searching cursor */
+	xfs_agblock_t		gdiff,	/* difference for search comparison */
+	xfs_agblock_t		*sbno,	/* extent found by search */
+	xfs_extlen_t		*slen,
+	xfs_extlen_t		*slena,	/* aligned length */
+	int			dir)	/* 0 = search right, 1 = search left */
+{
+	xfs_agblock_t		bno;
+	xfs_agblock_t		new;
+	xfs_agblock_t		sdiff;
+	int			error;
+	int			i;
+
+	/* The good extent is perfect, no need to  search. */
+	if (!gdiff)
+		goto out_use_good;
+
+	/*
+	 * Look until we find a better one, run out of space or run off the end.
+	 */
+	do {
+		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
+		if (error)
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		xfs_alloc_compute_aligned(*sbno, *slen, args->alignment,
+					  args->minlen, &bno, slena);
+
+		/*
+		 * The good extent is closer than this one.
+		 */
+		if (!dir) {
+			if (bno >= args->agbno + gdiff)
+				goto out_use_good;
+		} else {
+			if (bno <= args->agbno - gdiff)
+				goto out_use_good;
+		}
+
+		/*
+		 * Same distance, compare length and pick the best.
+		 */
+		if (*slena >= args->minlen) {
+			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
+			xfs_alloc_fix_len(args);
+
+			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+						       args->alignment, *sbno,
+						       *slen, &new);
+
+			/*
+			 * Choose closer size and invalidate other cursor.
+			 */
+			if (sdiff < gdiff)
+				goto out_use_search;
+			goto out_use_good;
+		}
+
+		if (!dir)
+			error = xfs_btree_increment(*scur, 0, &i);
+		else
+			error = xfs_btree_decrement(*scur, 0, &i);
+		if (error)
+			goto error0;
+	} while (i);
+
+out_use_good:
+	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
+	*scur = NULL;
+	return 0;
+
+out_use_search:
+	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
+	*gcur = NULL;
+	return 0;
+
+error0:
+	/* caller invalidates cursors */
+	return error;
+}
+
+/*
  * Allocate a variable extent near bno in the allocation group agno.
  * Extent's length (returned in len) will be between minlen and maxlen,
  * and of the form k * prod + mod unless there's nothing that large.
@@ -925,203 +1016,45 @@
 			}
 		}
 	} while (bno_cur_lt || bno_cur_gt);
+
 	/*
 	 * Got both cursors still active, need to find better entry.
 	 */
 	if (bno_cur_lt && bno_cur_gt) {
-		/*
-		 * Left side is long enough, look for a right side entry.
-		 */
 		if (ltlena >= args->minlen) {
 			/*
-			 * Fix up the length.
+			 * Left side is good, look for a right side entry.
 			 */
 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
 			xfs_alloc_fix_len(args);
-			rlen = args->len;
-			ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
 				args->alignment, ltbno, ltlen, &ltnew);
+
+			error = xfs_alloc_find_best_extent(args,
+						&bno_cur_lt, &bno_cur_gt,
+						ltdiff, &gtbno, &gtlen, &gtlena,
+						0 /* search right */);
+		} else {
+			ASSERT(gtlena >= args->minlen);
+
 			/*
-			 * Not perfect.
-			 */
-			if (ltdiff) {
-				/*
-				 * Look until we find a better one, run out of
-				 * space, or run off the end.
-				 */
-				while (bno_cur_lt && bno_cur_gt) {
-					if ((error = xfs_alloc_get_rec(
-							bno_cur_gt, &gtbno,
-							&gtlen, &i)))
-						goto error0;
-					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-					xfs_alloc_compute_aligned(gtbno, gtlen,
-						args->alignment, args->minlen,
-						&gtbnoa, &gtlena);
-					/*
-					 * The left one is clearly better.
-					 */
-					if (gtbnoa >= args->agbno + ltdiff) {
-						xfs_btree_del_cursor(
-							bno_cur_gt,
-							XFS_BTREE_NOERROR);
-						bno_cur_gt = NULL;
-						break;
-					}
-					/*
-					 * If we reach a big enough entry,
-					 * compare the two and pick the best.
-					 */
-					if (gtlena >= args->minlen) {
-						args->len =
-							XFS_EXTLEN_MIN(gtlena,
-								args->maxlen);
-						xfs_alloc_fix_len(args);
-						rlen = args->len;
-						gtdiff = xfs_alloc_compute_diff(
-							args->agbno, rlen,
-							args->alignment,
-							gtbno, gtlen, &gtnew);
-						/*
-						 * Right side is better.
-						 */
-						if (gtdiff < ltdiff) {
-							xfs_btree_del_cursor(
-								bno_cur_lt,
-								XFS_BTREE_NOERROR);
-							bno_cur_lt = NULL;
-						}
-						/*
-						 * Left side is better.
-						 */
-						else {
-							xfs_btree_del_cursor(
-								bno_cur_gt,
-								XFS_BTREE_NOERROR);
-							bno_cur_gt = NULL;
-						}
-						break;
-					}
-					/*
-					 * Fell off the right end.
-					 */
-					if ((error = xfs_btree_increment(
-							bno_cur_gt, 0, &i)))
-						goto error0;
-					if (!i) {
-						xfs_btree_del_cursor(
-							bno_cur_gt,
-							XFS_BTREE_NOERROR);
-						bno_cur_gt = NULL;
-						break;
-					}
-				}
-			}
-			/*
-			 * The left side is perfect, trash the right side.
-			 */
-			else {
-				xfs_btree_del_cursor(bno_cur_gt,
-						     XFS_BTREE_NOERROR);
-				bno_cur_gt = NULL;
-			}
-		}
-		/*
-		 * It's the right side that was found first, look left.
-		 */
-		else {
-			/*
-			 * Fix up the length.
+			 * Right side is good, look for a left side entry.
 			 */
 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
 			xfs_alloc_fix_len(args);
-			rlen = args->len;
-			gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
 				args->alignment, gtbno, gtlen, &gtnew);
-			/*
-			 * Right side entry isn't perfect.
-			 */
-			if (gtdiff) {
-				/*
-				 * Look until we find a better one, run out of
-				 * space, or run off the end.
-				 */
-				while (bno_cur_lt && bno_cur_gt) {
-					if ((error = xfs_alloc_get_rec(
-							bno_cur_lt, &ltbno,
-							&ltlen, &i)))
-						goto error0;
-					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-					xfs_alloc_compute_aligned(ltbno, ltlen,
-						args->alignment, args->minlen,
-						&ltbnoa, &ltlena);
-					/*
-					 * The right one is clearly better.
-					 */
-					if (ltbnoa <= args->agbno - gtdiff) {
-						xfs_btree_del_cursor(
-							bno_cur_lt,
-							XFS_BTREE_NOERROR);
-						bno_cur_lt = NULL;
-						break;
-					}
-					/*
-					 * If we reach a big enough entry,
-					 * compare the two and pick the best.
-					 */
-					if (ltlena >= args->minlen) {
-						args->len = XFS_EXTLEN_MIN(
-							ltlena, args->maxlen);
-						xfs_alloc_fix_len(args);
-						rlen = args->len;
-						ltdiff = xfs_alloc_compute_diff(
-							args->agbno, rlen,
-							args->alignment,
-							ltbno, ltlen, &ltnew);
-						/*
-						 * Left side is better.
-						 */
-						if (ltdiff < gtdiff) {
-							xfs_btree_del_cursor(
-								bno_cur_gt,
-								XFS_BTREE_NOERROR);
-							bno_cur_gt = NULL;
-						}
-						/*
-						 * Right side is better.
-						 */
-						else {
-							xfs_btree_del_cursor(
-								bno_cur_lt,
-								XFS_BTREE_NOERROR);
-							bno_cur_lt = NULL;
-						}
-						break;
-					}
-					/*
-					 * Fell off the left end.
-					 */
-					if ((error = xfs_btree_decrement(
-							bno_cur_lt, 0, &i)))
-						goto error0;
-					if (!i) {
-						xfs_btree_del_cursor(bno_cur_lt,
-							XFS_BTREE_NOERROR);
-						bno_cur_lt = NULL;
-						break;
-					}
-				}
-			}
-			/*
-			 * The right side is perfect, trash the left side.
-			 */
-			else {
-				xfs_btree_del_cursor(bno_cur_lt,
-					XFS_BTREE_NOERROR);
-				bno_cur_lt = NULL;
-			}
+
+			error = xfs_alloc_find_best_extent(args,
+						&bno_cur_gt, &bno_cur_lt,
+						gtdiff, &ltbno, &ltlen, &ltlena,
+						1 /* search left */);
 		}
+
+		if (error)
+			goto error0;
 	}
+
 	/*
 	 * If we couldn't get anything, give up.
 	 */
@@ -1130,6 +1063,7 @@
 		args->agbno = NULLAGBLOCK;
 		return 0;
 	}
+
 	/*
 	 * At this point we have selected a freespace entry, either to the
 	 * left or to the right.  If it's on the right, copy all the
@@ -1146,6 +1080,7 @@
 		j = 1;
 	} else
 		j = 0;
+
 	/*
 	 * Fix up the length and compute the useful address.
 	 */
@@ -2676,7 +2611,7 @@
  * will require a synchronous transaction, but it can still be
  * used to distinguish between a partial or exact match.
  */
-static int
+int
 xfs_alloc_busy_search(
 	struct xfs_mount	*mp,
 	xfs_agnumber_t		agno,
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 895009a..0ab56b3 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -19,6 +19,7 @@
 #define	__XFS_ALLOC_H__
 
 struct xfs_buf;
+struct xfs_btree_cur;
 struct xfs_mount;
 struct xfs_perag;
 struct xfs_trans;
@@ -118,16 +119,16 @@
 		struct xfs_perag *pag);
 
 #ifdef __KERNEL__
-
 void
-xfs_alloc_busy_insert(xfs_trans_t *tp,
-		xfs_agnumber_t agno,
-		xfs_agblock_t bno,
-		xfs_extlen_t len);
+xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
+	xfs_agblock_t bno, xfs_extlen_t len);
 
 void
 xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp);
 
+int
+xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
+	xfs_agblock_t bno, xfs_extlen_t len);
 #endif	/* __KERNEL__ */
 
 /*
@@ -205,4 +206,18 @@
 	xfs_fsblock_t	bno,	/* starting block number of extent */
 	xfs_extlen_t	len);	/* length of extent */
 
+int					/* error */
+xfs_alloc_lookup_le(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		bno,	/* starting block of extent */
+	xfs_extlen_t		len,	/* length of extent */
+	int			*stat);	/* success/failure */
+
+int					/* error */
+xfs_alloc_get_rec(
+	struct xfs_btree_cur	*cur,	/* btree cursor */
+	xfs_agblock_t		*bno,	/* output: starting block of extent */
+	xfs_extlen_t		*len,	/* output: length of extent */
+	int			*stat);	/* output: success/failure */
+
 #endif	/* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index a6cff8e..71e90dc2 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -637,7 +637,7 @@
 	 * It didn't all fit, so we have to sort everything on hashval.
 	 */
 	sbsize = sf->hdr.count * sizeof(*sbuf);
-	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
+	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
 
 	/*
 	 * Scan the attribute list for the rest of the entries, storing
@@ -2386,7 +2386,7 @@
 				args.dp = context->dp;
 				args.whichfork = XFS_ATTR_FORK;
 				args.valuelen = valuelen;
-				args.value = kmem_alloc(valuelen, KM_SLEEP);
+				args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
 				args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
 				args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
 				retval = xfs_attr_rmtval_get(&args);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 04f9cca..2f9e97c 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -634,9 +634,8 @@
 		return error;
 	}
 	ASSERT(!bp || !XFS_BUF_GETERROR(bp));
-	if (bp != NULL) {
+	if (bp)
 		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
-	}
 	*bpp = bp;
 	return 0;
 }
@@ -944,13 +943,13 @@
 	switch (cur->bc_btnum) {
 	case XFS_BTNUM_BNO:
 	case XFS_BTNUM_CNT:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
 		break;
 	case XFS_BTNUM_INO:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, XFS_INO_BTREE_REF);
 		break;
 	case XFS_BTNUM_BMAP:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_BMAP_BTREE_REF);
 		break;
 	default:
 		ASSERT(0);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2686d0d..98c6f73 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -141,8 +141,7 @@
 #define		xfs_buf_item_log_check(x)
 #endif
 
-STATIC void	xfs_buf_error_relse(xfs_buf_t *bp);
-STATIC void	xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
+STATIC void	xfs_buf_do_callbacks(struct xfs_buf *bp);
 
 /*
  * This returns the number of log iovecs needed to log the
@@ -450,7 +449,7 @@
 		 * xfs_trans_ail_delete() drops the AIL lock.
 		 */
 		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
-			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
+			xfs_buf_do_callbacks(bp);
 			XFS_BUF_SET_FSPRIVATE(bp, NULL);
 			XFS_BUF_CLR_IODONE_FUNC(bp);
 		} else {
@@ -918,15 +917,26 @@
 	XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
 }
 
+/*
+ * We can have many callbacks on a buffer. Running the callbacks individually
+ * can cause a lot of contention on the AIL lock, so we allow for a single
+ * callback to be able to scan the remaining lip->li_bio_list for other items
+ * of the same type and callback to be processed in the first call.
+ *
+ * As a result, the loop walking the callback list below will also modify the
+ * list. it removes the first item from the list and then runs the callback.
+ * The loop then restarts from the new head of the list. This allows the
+ * callback to scan and modify the list attached to the buffer and we don't
+ * have to care about maintaining a next item pointer.
+ */
 STATIC void
 xfs_buf_do_callbacks(
-	xfs_buf_t	*bp,
-	xfs_log_item_t	*lip)
+	struct xfs_buf		*bp)
 {
-	xfs_log_item_t	*nlip;
+	struct xfs_log_item	*lip;
 
-	while (lip != NULL) {
-		nlip = lip->li_bio_list;
+	while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) {
+		XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list);
 		ASSERT(lip->li_cb != NULL);
 		/*
 		 * Clear the next pointer so we don't have any
@@ -936,7 +946,6 @@
 		 */
 		lip->li_bio_list = NULL;
 		lip->li_cb(bp, lip);
-		lip = nlip;
 	}
 }
 
@@ -949,129 +958,77 @@
  */
 void
 xfs_buf_iodone_callbacks(
-	xfs_buf_t	*bp)
+	struct xfs_buf		*bp)
 {
-	xfs_log_item_t	*lip;
-	static ulong	lasttime;
-	static xfs_buftarg_t *lasttarg;
-	xfs_mount_t	*mp;
+	struct xfs_log_item	*lip = bp->b_fspriv;
+	struct xfs_mount	*mp = lip->li_mountp;
+	static ulong		lasttime;
+	static xfs_buftarg_t	*lasttarg;
 
-	ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	if (likely(!XFS_BUF_GETERROR(bp)))
+		goto do_callbacks;
 
-	if (XFS_BUF_GETERROR(bp) != 0) {
-		/*
-		 * If we've already decided to shutdown the filesystem
-		 * because of IO errors, there's no point in giving this
-		 * a retry.
-		 */
-		mp = lip->li_mountp;
-		if (XFS_FORCED_SHUTDOWN(mp)) {
-			ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
-			XFS_BUF_SUPER_STALE(bp);
-			trace_xfs_buf_item_iodone(bp, _RET_IP_);
-			xfs_buf_do_callbacks(bp, lip);
-			XFS_BUF_SET_FSPRIVATE(bp, NULL);
-			XFS_BUF_CLR_IODONE_FUNC(bp);
-			xfs_buf_ioend(bp, 0);
-			return;
-		}
+	/*
+	 * If we've already decided to shutdown the filesystem because of
+	 * I/O errors, there's no point in giving this a retry.
+	 */
+	if (XFS_FORCED_SHUTDOWN(mp)) {
+		XFS_BUF_SUPER_STALE(bp);
+		trace_xfs_buf_item_iodone(bp, _RET_IP_);
+		goto do_callbacks;
+	}
 
-		if ((XFS_BUF_TARGET(bp) != lasttarg) ||
-		    (time_after(jiffies, (lasttime + 5*HZ)))) {
-			lasttime = jiffies;
-			cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
-					" block 0x%llx in %s",
-				XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
-			      (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
-		}
-		lasttarg = XFS_BUF_TARGET(bp);
+	if (XFS_BUF_TARGET(bp) != lasttarg ||
+	    time_after(jiffies, (lasttime + 5*HZ))) {
+		lasttime = jiffies;
+		cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
+				" block 0x%llx in %s",
+			XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
+		      (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
+	}
+	lasttarg = XFS_BUF_TARGET(bp);
 
-		if (XFS_BUF_ISASYNC(bp)) {
-			/*
-			 * If the write was asynchronous then noone will be
-			 * looking for the error.  Clear the error state
-			 * and write the buffer out again delayed write.
-			 *
-			 * XXXsup This is OK, so long as we catch these
-			 * before we start the umount; we don't want these
-			 * DELWRI metadata bufs to be hanging around.
-			 */
-			XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */
+	/*
+	 * If the write was asynchronous then noone will be looking for the
+	 * error.  Clear the error state and write the buffer out again.
+	 *
+	 * During sync or umount we'll write all pending buffers again
+	 * synchronous, which will catch these errors if they keep hanging
+	 * around.
+	 */
+	if (XFS_BUF_ISASYNC(bp)) {
+		XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */
 
-			if (!(XFS_BUF_ISSTALE(bp))) {
-				XFS_BUF_DELAYWRITE(bp);
-				XFS_BUF_DONE(bp);
-				XFS_BUF_SET_START(bp);
-			}
-			ASSERT(XFS_BUF_IODONE_FUNC(bp));
-			trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
-			xfs_buf_relse(bp);
-		} else {
-			/*
-			 * If the write of the buffer was not asynchronous,
-			 * then we want to make sure to return the error
-			 * to the caller of bwrite().  Because of this we
-			 * cannot clear the B_ERROR state at this point.
-			 * Instead we install a callback function that
-			 * will be called when the buffer is released, and
-			 * that routine will clear the error state and
-			 * set the buffer to be written out again after
-			 * some delay.
-			 */
-			/* We actually overwrite the existing b-relse
-			   function at times, but we're gonna be shutting down
-			   anyway. */
-			XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
+		if (!XFS_BUF_ISSTALE(bp)) {
+			XFS_BUF_DELAYWRITE(bp);
 			XFS_BUF_DONE(bp);
-			XFS_BUF_FINISH_IOWAIT(bp);
+			XFS_BUF_SET_START(bp);
 		}
+		ASSERT(XFS_BUF_IODONE_FUNC(bp));
+		trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
+		xfs_buf_relse(bp);
 		return;
 	}
 
-	xfs_buf_do_callbacks(bp, lip);
+	/*
+	 * If the write of the buffer was synchronous, we want to make
+	 * sure to return the error to the caller of xfs_bwrite().
+	 */
+	XFS_BUF_STALE(bp);
+	XFS_BUF_DONE(bp);
+	XFS_BUF_UNDELAYWRITE(bp);
+
+	trace_xfs_buf_error_relse(bp, _RET_IP_);
+	xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+
+do_callbacks:
+	xfs_buf_do_callbacks(bp);
 	XFS_BUF_SET_FSPRIVATE(bp, NULL);
 	XFS_BUF_CLR_IODONE_FUNC(bp);
 	xfs_buf_ioend(bp, 0);
 }
 
 /*
- * This is a callback routine attached to a buffer which gets an error
- * when being written out synchronously.
- */
-STATIC void
-xfs_buf_error_relse(
-	xfs_buf_t	*bp)
-{
-	xfs_log_item_t	*lip;
-	xfs_mount_t	*mp;
-
-	lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
-	mp = (xfs_mount_t *)lip->li_mountp;
-	ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
-
-	XFS_BUF_STALE(bp);
-	XFS_BUF_DONE(bp);
-	XFS_BUF_UNDELAYWRITE(bp);
-	XFS_BUF_ERROR(bp,0);
-
-	trace_xfs_buf_error_relse(bp, _RET_IP_);
-
-	if (! XFS_FORCED_SHUTDOWN(mp))
-		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
-	/*
-	 * We have to unpin the pinned buffers so do the
-	 * callbacks.
-	 */
-	xfs_buf_do_callbacks(bp, lip);
-	XFS_BUF_SET_FSPRIVATE(bp, NULL);
-	XFS_BUF_CLR_IODONE_FUNC(bp);
-	XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
-	xfs_buf_relse(bp);
-}
-
-
-/*
  * This is the iodone() function for buffers which have been
  * logged.  It is called when they are eventually flushed out.
  * It should remove the buf item from the AIL, and free the buf item.
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 0e2ed43..b6ecd20 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -105,17 +105,6 @@
 	xfs_buf_log_format_t	bli_format;	/* in-log header */
 } xfs_buf_log_item_t;
 
-/*
- * This structure is used during recovery to record the buf log
- * items which have been canceled and should not be replayed.
- */
-typedef struct xfs_buf_cancel {
-	xfs_daddr_t		bc_blkno;
-	uint			bc_len;
-	int			bc_refcount;
-	struct xfs_buf_cancel	*bc_next;
-} xfs_buf_cancel_t;
-
 void	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
 void	xfs_buf_item_relse(struct xfs_buf *);
 void	xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index c78cc6a..4c7db74 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -152,37 +152,6 @@
 }
 #endif /* DEBUG */
 
-
-void
-xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	xfs_fs_vcmn_err(level, mp, fmt, ap);
-	va_end(ap);
-}
-
-void
-xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
-{
-	va_list ap;
-
-#ifdef DEBUG
-	xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
-#endif
-
-	if (xfs_panic_mask && (xfs_panic_mask & panic_tag)
-	    && (level & CE_ALERT)) {
-		level &= ~CE_ALERT;
-		level |= CE_PANIC;
-		cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG.");
-	}
-	va_start(ap, fmt);
-	xfs_fs_vcmn_err(level, mp, fmt, ap);
-	va_end(ap);
-}
-
 void
 xfs_error_report(
 	const char		*tag,
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index f338847..10dce54 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -136,8 +136,8 @@
 	 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
 			(rf))))
 
-extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
-extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
+extern int xfs_errortag_add(int error_tag, struct xfs_mount *mp);
+extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud);
 #else
 #define XFS_TEST_ERROR(expr, mp, tag, rf)	(expr)
 #define xfs_errortag_add(tag, mp)		(ENOSYS)
@@ -162,21 +162,15 @@
 
 struct xfs_mount;
 
-extern void xfs_fs_vcmn_err(int level, struct xfs_mount *mp,
-		char *fmt, va_list ap)
-	__attribute__ ((format (printf, 3, 0)));
-extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp,
-			char *fmt, ...)
-	__attribute__ ((format (printf, 4, 5)));
-extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...)
-	__attribute__ ((format (printf, 3, 4)));
-
 extern void xfs_hex_dump(void *p, int length);
 
 #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
 	xfs_fs_cmn_err(level, mp, fmt "  Unmount and run xfs_repair.", ## args)
 
 #define xfs_fs_mount_cmn_err(f, fmt, args...) \
-	((f & XFS_MFSI_QUIET)? (void)0 : cmn_err(CE_WARN, "XFS: " fmt, ## args))
+	do { \
+		if (!(f & XFS_MFSI_QUIET)) 	\
+			cmn_err(CE_WARN, "XFS: " fmt, ## args); \
+	} while (0)
 
 #endif	/* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index a55e687..75f2ef6 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -48,6 +48,28 @@
 }
 
 /*
+ * Freeing the efi requires that we remove it from the AIL if it has already
+ * been placed there. However, the EFI may not yet have been placed in the AIL
+ * when called by xfs_efi_release() from EFD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the
+ * test_and_clear_bit(XFS_EFI_COMMITTED) to ensure only the last caller frees
+ * the EFI.
+ */
+STATIC void
+__xfs_efi_release(
+	struct xfs_efi_log_item	*efip)
+{
+	struct xfs_ail		*ailp = efip->efi_item.li_ailp;
+
+	if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) {
+		spin_lock(&ailp->xa_lock);
+		/* xfs_trans_ail_delete() drops the AIL lock. */
+		xfs_trans_ail_delete(ailp, &efip->efi_item);
+		xfs_efi_item_free(efip);
+	}
+}
+
+/*
  * This returns the number of iovecs needed to log the given efi item.
  * We only need 1 iovec for an efi item.  It just logs the efi_log_format
  * structure.
@@ -74,7 +96,8 @@
 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
 	uint			size;
 
-	ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
+	ASSERT(atomic_read(&efip->efi_next_extent) ==
+				efip->efi_format.efi_nextents);
 
 	efip->efi_format.efi_type = XFS_LI_EFI;
 
@@ -99,10 +122,12 @@
 }
 
 /*
- * While EFIs cannot really be pinned, the unpin operation is the
- * last place at which the EFI is manipulated during a transaction.
- * Here we coordinate with xfs_efi_cancel() to determine who gets to
- * free the EFI.
+ * While EFIs cannot really be pinned, the unpin operation is the last place at
+ * which the EFI is manipulated during a transaction.  If we are being asked to
+ * remove the EFI it's because the transaction has been cancelled and by
+ * definition that means the EFI cannot be in the AIL so remove it from the
+ * transaction and free it.  Otherwise coordinate with xfs_efi_release() (via
+ * XFS_EFI_COMMITTED) to determine who gets to free the EFI.
  */
 STATIC void
 xfs_efi_item_unpin(
@@ -110,20 +135,14 @@
 	int			remove)
 {
 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
-	struct xfs_ail		*ailp = lip->li_ailp;
 
-	spin_lock(&ailp->xa_lock);
-	if (efip->efi_flags & XFS_EFI_CANCELED) {
-		if (remove)
-			xfs_trans_del_item(lip);
-
-		/* xfs_trans_ail_delete() drops the AIL lock. */
-		xfs_trans_ail_delete(ailp, lip);
+	if (remove) {
+		ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
+		xfs_trans_del_item(lip);
 		xfs_efi_item_free(efip);
-	} else {
-		efip->efi_flags |= XFS_EFI_COMMITTED;
-		spin_unlock(&ailp->xa_lock);
+		return;
 	}
+	__xfs_efi_release(efip);
 }
 
 /*
@@ -152,16 +171,20 @@
 }
 
 /*
- * The EFI is logged only once and cannot be moved in the log, so
- * simply return the lsn at which it's been logged.  The canceled
- * flag is not paid any attention here.  Checking for that is delayed
- * until the EFI is unpinned.
+ * The EFI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged.  For bulk transaction committed
+ * processing, the EFI may be processed but not yet unpinned prior to the EFD
+ * being processed. Set the XFS_EFI_COMMITTED flag so this case can be detected
+ * when processing the EFD.
  */
 STATIC xfs_lsn_t
 xfs_efi_item_committed(
 	struct xfs_log_item	*lip,
 	xfs_lsn_t		lsn)
 {
+	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
+
+	set_bit(XFS_EFI_COMMITTED, &efip->efi_flags);
 	return lsn;
 }
 
@@ -230,6 +253,7 @@
 	xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
 	efip->efi_format.efi_nextents = nextents;
 	efip->efi_format.efi_id = (__psint_t)(void*)efip;
+	atomic_set(&efip->efi_next_extent, 0);
 
 	return efip;
 }
@@ -289,37 +313,18 @@
 }
 
 /*
- * This is called by the efd item code below to release references to
- * the given efi item.  Each efd calls this with the number of
- * extents that it has logged, and when the sum of these reaches
- * the total number of extents logged by this efi item we can free
- * the efi item.
- *
- * Freeing the efi item requires that we remove it from the AIL.
- * We'll use the AIL lock to protect our counters as well as
- * the removal from the AIL.
+ * This is called by the efd item code below to release references to the given
+ * efi item.  Each efd calls this with the number of extents that it has
+ * logged, and when the sum of these reaches the total number of extents logged
+ * by this efi item we can free the efi item.
  */
 void
 xfs_efi_release(xfs_efi_log_item_t	*efip,
 		uint			nextents)
 {
-	struct xfs_ail		*ailp = efip->efi_item.li_ailp;
-	int			extents_left;
-
-	ASSERT(efip->efi_next_extent > 0);
-	ASSERT(efip->efi_flags & XFS_EFI_COMMITTED);
-
-	spin_lock(&ailp->xa_lock);
-	ASSERT(efip->efi_next_extent >= nextents);
-	efip->efi_next_extent -= nextents;
-	extents_left = efip->efi_next_extent;
-	if (extents_left == 0) {
-		/* xfs_trans_ail_delete() drops the AIL lock. */
-		xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
-		xfs_efi_item_free(efip);
-	} else {
-		spin_unlock(&ailp->xa_lock);
-	}
+	ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
+	if (atomic_sub_and_test(nextents, &efip->efi_next_extent))
+		__xfs_efi_release(efip);
 }
 
 static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 0d22c56..375f68e 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -111,11 +111,10 @@
 #define	XFS_EFI_MAX_FAST_EXTENTS	16
 
 /*
- * Define EFI flags.
+ * Define EFI flag bits. Manipulated by set/clear/test_bit operators.
  */
-#define	XFS_EFI_RECOVERED	0x1
-#define	XFS_EFI_COMMITTED	0x2
-#define	XFS_EFI_CANCELED	0x4
+#define	XFS_EFI_RECOVERED	1
+#define	XFS_EFI_COMMITTED	2
 
 /*
  * This is the "extent free intention" log item.  It is used
@@ -125,8 +124,8 @@
  */
 typedef struct xfs_efi_log_item {
 	xfs_log_item_t		efi_item;
-	uint			efi_flags;	/* misc flags */
-	uint			efi_next_extent;
+	atomic_t		efi_next_extent;
+	unsigned long		efi_flags;	/* misc flags */
 	xfs_efi_log_format_t	efi_format;
 } xfs_efi_log_item_t;
 
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a7c116e..cec89dd 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -374,6 +374,7 @@
 		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
 	} else
 		mp->m_maxicount = 0;
+	xfs_set_low_space_thresholds(mp);
 
 	/* update secondary superblocks. */
 	for (agno = 1; agno < nagcount; agno++) {
@@ -611,12 +612,13 @@
  *
  * We cannot use an inode here for this - that will push dirty state back up
  * into the VFS and then periodic inode flushing will prevent log covering from
- * making progress. Hence we log a field in the superblock instead.
+ * making progress. Hence we log a field in the superblock instead and use a
+ * synchronous transaction to ensure the superblock is immediately unpinned
+ * and can be written back.
  */
 int
 xfs_fs_log_dummy(
-	xfs_mount_t	*mp,
-	int		flags)
+	xfs_mount_t	*mp)
 {
 	xfs_trans_t	*tp;
 	int		error;
@@ -631,8 +633,7 @@
 
 	/* log the UUID because it is an unchanging field */
 	xfs_mod_sb(tp, XFS_SB_UUID);
-	if (flags & SYNC_WAIT)
-		xfs_trans_set_sync(tp);
+	xfs_trans_set_sync(tp);
 	return xfs_trans_commit(tp, 0);
 }
 
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index a786c52..1b6a98b 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,6 +25,6 @@
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
 				xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags);
+extern int xfs_fs_log_dummy(struct xfs_mount *mp);
 
 #endif	/* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 0cdd269..cb9b6d1 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,6 +43,17 @@
 
 
 /*
+ * Define xfs inode iolock lockdep classes. We need to ensure that all active
+ * inodes are considered the same for lockdep purposes, including inodes that
+ * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
+ * guarantee the locks are considered the same when there are multiple lock
+ * initialisation siteѕ. Also, define a reclaimable inode class so it is
+ * obvious in lockdep reports which class the report is against.
+ */
+static struct lock_class_key xfs_iolock_active;
+struct lock_class_key xfs_iolock_reclaimable;
+
+/*
  * Allocate and initialise an xfs_inode.
  */
 STATIC struct xfs_inode *
@@ -69,8 +80,11 @@
 	ASSERT(atomic_read(&ip->i_pincount) == 0);
 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(completion_done(&ip->i_flush));
+	ASSERT(ip->i_ino == 0);
 
 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+			&xfs_iolock_active, "xfs_iolock_active");
 
 	/* initialise the xfs inode */
 	ip->i_ino = ino;
@@ -85,12 +99,20 @@
 	ip->i_size = 0;
 	ip->i_new_size = 0;
 
-	/* prevent anyone from using this yet */
-	VFS_I(ip)->i_state = I_NEW;
-
 	return ip;
 }
 
+STATIC void
+xfs_inode_free_callback(
+	struct rcu_head		*head)
+{
+	struct inode		*inode = container_of(head, struct inode, i_rcu);
+	struct xfs_inode	*ip = XFS_I(inode);
+
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_zone_free(xfs_inode_zone, ip);
+}
+
 void
 xfs_inode_free(
 	struct xfs_inode	*ip)
@@ -134,7 +156,18 @@
 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(completion_done(&ip->i_flush));
 
-	kmem_zone_free(xfs_inode_zone, ip);
+	/*
+	 * Because we use RCU freeing we need to ensure the inode always
+	 * appears to be reclaimed with an invalid inode number when in the
+	 * free state. The ip->i_flags_lock provides the barrier against lookup
+	 * races.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	ip->i_flags = XFS_IRECLAIM;
+	ip->i_ino = 0;
+	spin_unlock(&ip->i_flags_lock);
+
+	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 }
 
 /*
@@ -144,14 +177,29 @@
 xfs_iget_cache_hit(
 	struct xfs_perag	*pag,
 	struct xfs_inode	*ip,
+	xfs_ino_t		ino,
 	int			flags,
-	int			lock_flags) __releases(pag->pag_ici_lock)
+	int			lock_flags) __releases(RCU)
 {
 	struct inode		*inode = VFS_I(ip);
 	struct xfs_mount	*mp = ip->i_mount;
 	int			error;
 
+	/*
+	 * check for re-use of an inode within an RCU grace period due to the
+	 * radix tree nodes not being updated yet. We monitor for this by
+	 * setting the inode number to zero before freeing the inode structure.
+	 * If the inode has been reallocated and set up, then the inode number
+	 * will not match, so check for that, too.
+	 */
 	spin_lock(&ip->i_flags_lock);
+	if (ip->i_ino != ino) {
+		trace_xfs_iget_skip(ip);
+		XFS_STATS_INC(xs_ig_frecycle);
+		error = EAGAIN;
+		goto out_error;
+	}
+
 
 	/*
 	 * If we are racing with another cache hit that is currently
@@ -194,7 +242,7 @@
 		ip->i_flags |= XFS_IRECLAIM;
 
 		spin_unlock(&ip->i_flags_lock);
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 
 		error = -inode_init_always(mp->m_super, inode);
 		if (error) {
@@ -202,7 +250,7 @@
 			 * Re-initializing the inode failed, and we are in deep
 			 * trouble.  Try to re-add it to the reclaim list.
 			 */
-			read_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			spin_lock(&ip->i_flags_lock);
 
 			ip->i_flags &= ~XFS_INEW;
@@ -212,14 +260,20 @@
 			goto out_error;
 		}
 
-		write_lock(&pag->pag_ici_lock);
+		spin_lock(&pag->pag_ici_lock);
 		spin_lock(&ip->i_flags_lock);
 		ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
 		ip->i_flags |= XFS_INEW;
 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
 		inode->i_state = I_NEW;
+
+		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+		lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+				&xfs_iolock_active, "xfs_iolock_active");
+
 		spin_unlock(&ip->i_flags_lock);
-		write_unlock(&pag->pag_ici_lock);
+		spin_unlock(&pag->pag_ici_lock);
 	} else {
 		/* If the VFS inode is being torn down, pause and try again. */
 		if (!igrab(inode)) {
@@ -230,7 +284,7 @@
 
 		/* We've got a live one. */
 		spin_unlock(&ip->i_flags_lock);
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 		trace_xfs_iget_hit(ip);
 	}
 
@@ -244,7 +298,7 @@
 
 out_error:
 	spin_unlock(&ip->i_flags_lock);
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	return error;
 }
 
@@ -297,7 +351,7 @@
 			BUG();
 	}
 
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 
 	/* insert the new inode */
 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
@@ -312,14 +366,14 @@
 	ip->i_udquot = ip->i_gdquot = NULL;
 	xfs_iflags_set(ip, XFS_INEW);
 
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	radix_tree_preload_end();
 
 	*ipp = ip;
 	return 0;
 
 out_preload_end:
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	radix_tree_preload_end();
 	if (lock_flags)
 		xfs_iunlock(ip, lock_flags);
@@ -366,7 +420,7 @@
 	xfs_agino_t	agino;
 
 	/* reject inode numbers outside existing AGs */
-	if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
+	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 		return EINVAL;
 
 	/* get the perag structure and ensure that it's inode capable */
@@ -375,15 +429,15 @@
 
 again:
 	error = 0;
-	read_lock(&pag->pag_ici_lock);
+	rcu_read_lock();
 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 
 	if (ip) {
-		error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
+		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 		if (error)
 			goto out_error_or_again;
 	} else {
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 		XFS_STATS_INC(xs_ig_missed);
 
 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 108c7a0..be7cf62 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -887,7 +887,7 @@
 	 * around for a while.  This helps to keep recently accessed
 	 * meta-data in-core longer.
 	 */
-	XFS_BUF_SET_REF(bp, XFS_INO_REF);
+	xfs_buf_set_ref(bp, XFS_INO_REF);
 
 	/*
 	 * Use xfs_trans_brelse() to release the buffer containing the
@@ -2000,17 +2000,33 @@
 		 */
 		for (i = 0; i < ninodes; i++) {
 retry:
-			read_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			ip = radix_tree_lookup(&pag->pag_ici_root,
 					XFS_INO_TO_AGINO(mp, (inum + i)));
 
-			/* Inode not in memory or stale, nothing to do */
-			if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
-				read_unlock(&pag->pag_ici_lock);
+			/* Inode not in memory, nothing to do */
+			if (!ip) {
+				rcu_read_unlock();
 				continue;
 			}
 
 			/*
+			 * because this is an RCU protected lookup, we could
+			 * find a recently freed or even reallocated inode
+			 * during the lookup. We need to check under the
+			 * i_flags_lock for a valid inode here. Skip it if it
+			 * is not valid, the wrong inode or stale.
+			 */
+			spin_lock(&ip->i_flags_lock);
+			if (ip->i_ino != inum + i ||
+			    __xfs_iflags_test(ip, XFS_ISTALE)) {
+				spin_unlock(&ip->i_flags_lock);
+				rcu_read_unlock();
+				continue;
+			}
+			spin_unlock(&ip->i_flags_lock);
+
+			/*
 			 * Don't try to lock/unlock the current inode, but we
 			 * _cannot_ skip the other inodes that we did not find
 			 * in the list attached to the buffer and are not
@@ -2019,11 +2035,11 @@
 			 */
 			if (ip != free_ip &&
 			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
-				read_unlock(&pag->pag_ici_lock);
+				rcu_read_unlock();
 				delay(1);
 				goto retry;
 			}
-			read_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 
 			xfs_iflock(ip);
 			xfs_iflags_set(ip, XFS_ISTALE);
@@ -2629,7 +2645,7 @@
 
 	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
 	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
-	read_lock(&pag->pag_ici_lock);
+	rcu_read_lock();
 	/* really need a gang lookup range call here */
 	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
 					first_index, inodes_per_cluster);
@@ -2640,9 +2656,21 @@
 		iq = ilist[i];
 		if (iq == ip)
 			continue;
-		/* if the inode lies outside this cluster, we're done. */
-		if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
-			break;
+
+		/*
+		 * because this is an RCU protected lookup, we could find a
+		 * recently freed or even reallocated inode during the lookup.
+		 * We need to check under the i_flags_lock for a valid inode
+		 * here. Skip it if it is not valid or the wrong inode.
+		 */
+		spin_lock(&ip->i_flags_lock);
+		if (!ip->i_ino ||
+		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
+			spin_unlock(&ip->i_flags_lock);
+			continue;
+		}
+		spin_unlock(&ip->i_flags_lock);
+
 		/*
 		 * Do an un-protected check to see if the inode is dirty and
 		 * is a candidate for flushing.  These checks will be repeated
@@ -2692,7 +2720,7 @@
 	}
 
 out_free:
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	kmem_free(ilist);
 out_put:
 	xfs_perag_put(pag);
@@ -2704,7 +2732,7 @@
 	 * Corruption detected in the clustering loop.  Invalidate the
 	 * inode buffer and shut down the filesystem.
 	 */
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	/*
 	 * Clean up the buffer.  If it was B_DELWRI, just release it --
 	 * brelse can handle it with no problems.  If not, shut down the
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index fb2ca2e..5c95fa8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -376,12 +376,13 @@
 /*
  * In-core inode flags.
  */
-#define XFS_IRECLAIM    0x0001  /* we have started reclaiming this inode    */
-#define XFS_ISTALE	0x0002	/* inode has been staled */
-#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */
-#define XFS_INEW	0x0008	/* inode has just been allocated */
-#define XFS_IFILESTREAM	0x0010	/* inode is in a filestream directory */
-#define XFS_ITRUNCATED	0x0020	/* truncated down so flush-on-close */
+#define XFS_IRECLAIM		0x0001  /* started reclaiming this inode */
+#define XFS_ISTALE		0x0002	/* inode has been staled */
+#define XFS_IRECLAIMABLE	0x0004	/* inode can be reclaimed */
+#define XFS_INEW		0x0008	/* inode has just been allocated */
+#define XFS_IFILESTREAM		0x0010	/* inode is in a filestream directory */
+#define XFS_ITRUNCATED		0x0020	/* truncated down so flush-on-close */
+#define XFS_IDIRTY_RELEASE	0x0040	/* dirty release already seen */
 
 /*
  * Flags for inode locking.
@@ -438,6 +439,8 @@
 #define XFS_IOLOCK_DEP(flags)	(((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
 #define XFS_ILOCK_DEP(flags)	(((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
 
+extern struct lock_class_key xfs_iolock_reclaimable;
+
 /*
  * Flags for xfs_itruncate_start().
  */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7c8d30c..fd4f398 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -842,15 +842,64 @@
  * flushed to disk.  It is responsible for removing the inode item
  * from the AIL if it has not been re-logged, and unlocking the inode's
  * flush lock.
+ *
+ * To reduce AIL lock traffic as much as possible, we scan the buffer log item
+ * list for other inodes that will run this function. We remove them from the
+ * buffer list so we can process all the inode IO completions in one AIL lock
+ * traversal.
  */
 void
 xfs_iflush_done(
 	struct xfs_buf		*bp,
 	struct xfs_log_item	*lip)
 {
-	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
-	xfs_inode_t		*ip = iip->ili_inode;
+	struct xfs_inode_log_item *iip;
+	struct xfs_log_item	*blip;
+	struct xfs_log_item	*next;
+	struct xfs_log_item	*prev;
 	struct xfs_ail		*ailp = lip->li_ailp;
+	int			need_ail = 0;
+
+	/*
+	 * Scan the buffer IO completions for other inodes being completed and
+	 * attach them to the current inode log item.
+	 */
+	blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	prev = NULL;
+	while (blip != NULL) {
+		if (lip->li_cb != xfs_iflush_done) {
+			prev = blip;
+			blip = blip->li_bio_list;
+			continue;
+		}
+
+		/* remove from list */
+		next = blip->li_bio_list;
+		if (!prev) {
+			XFS_BUF_SET_FSPRIVATE(bp, next);
+		} else {
+			prev->li_bio_list = next;
+		}
+
+		/* add to current list */
+		blip->li_bio_list = lip->li_bio_list;
+		lip->li_bio_list = blip;
+
+		/*
+		 * while we have the item, do the unlocked check for needing
+		 * the AIL lock.
+		 */
+		iip = INODE_ITEM(blip);
+		if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
+			need_ail++;
+
+		blip = next;
+	}
+
+	/* make sure we capture the state of the initial inode. */
+	iip = INODE_ITEM(lip);
+	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
+		need_ail++;
 
 	/*
 	 * We only want to pull the item from the AIL if it is
@@ -861,28 +910,37 @@
 	 * the lock since it's cheaper, and then we recheck while
 	 * holding the lock before removing the inode from the AIL.
 	 */
-	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) {
+	if (need_ail) {
+		struct xfs_log_item *log_items[need_ail];
+		int i = 0;
 		spin_lock(&ailp->xa_lock);
-		if (lip->li_lsn == iip->ili_flush_lsn) {
-			/* xfs_trans_ail_delete() drops the AIL lock. */
-			xfs_trans_ail_delete(ailp, lip);
-		} else {
-			spin_unlock(&ailp->xa_lock);
+		for (blip = lip; blip; blip = blip->li_bio_list) {
+			iip = INODE_ITEM(blip);
+			if (iip->ili_logged &&
+			    blip->li_lsn == iip->ili_flush_lsn) {
+				log_items[i++] = blip;
+			}
+			ASSERT(i <= need_ail);
 		}
+		/* xfs_trans_ail_delete_bulk() drops the AIL lock. */
+		xfs_trans_ail_delete_bulk(ailp, log_items, i);
 	}
 
-	iip->ili_logged = 0;
 
 	/*
-	 * Clear the ili_last_fields bits now that we know that the
-	 * data corresponding to them is safely on disk.
+	 * clean up and unlock the flush lock now we are done. We can clear the
+	 * ili_last_fields bits now that we know that the data corresponding to
+	 * them is safely on disk.
 	 */
-	iip->ili_last_fields = 0;
+	for (blip = lip; blip; blip = next) {
+		next = blip->li_bio_list;
+		blip->li_bio_list = NULL;
 
-	/*
-	 * Release the inode's flush lock since we're done with it.
-	 */
-	xfs_ifunlock(ip);
+		iip = INODE_ITEM(blip);
+		iip->ili_logged = 0;
+		iip->ili_last_fields = 0;
+		xfs_ifunlock(iip->ili_inode);
+	}
 }
 
 /*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2057614..55582bd 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,127 +47,8 @@
 
 #define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
 						<< mp->m_writeio_log)
-#define XFS_STRAT_WRITE_IMAPS	2
 #define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP
 
-STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
-				  int, struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
-				 struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
-				struct xfs_bmbt_irec *, int *);
-
-int
-xfs_iomap(
-	struct xfs_inode	*ip,
-	xfs_off_t		offset,
-	ssize_t			count,
-	int			flags,
-	struct xfs_bmbt_irec	*imap,
-	int			*nimaps,
-	int			*new)
-{
-	struct xfs_mount	*mp = ip->i_mount;
-	xfs_fileoff_t		offset_fsb, end_fsb;
-	int			error = 0;
-	int			lockmode = 0;
-	int			bmapi_flags = 0;
-
-	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
-
-	*new = 0;
-
-	if (XFS_FORCED_SHUTDOWN(mp))
-		return XFS_ERROR(EIO);
-
-	trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
-
-	switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
-	case BMAPI_READ:
-		lockmode = xfs_ilock_map_shared(ip);
-		bmapi_flags = XFS_BMAPI_ENTIRE;
-		break;
-	case BMAPI_WRITE:
-		lockmode = XFS_ILOCK_EXCL;
-		if (flags & BMAPI_IGNSTATE)
-			bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
-		xfs_ilock(ip, lockmode);
-		break;
-	case BMAPI_ALLOCATE:
-		lockmode = XFS_ILOCK_SHARED;
-		bmapi_flags = XFS_BMAPI_ENTIRE;
-
-		/* Attempt non-blocking lock */
-		if (flags & BMAPI_TRYLOCK) {
-			if (!xfs_ilock_nowait(ip, lockmode))
-				return XFS_ERROR(EAGAIN);
-		} else {
-			xfs_ilock(ip, lockmode);
-		}
-		break;
-	default:
-		BUG();
-	}
-
-	ASSERT(offset <= mp->m_maxioffset);
-	if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
-		count = mp->m_maxioffset - offset;
-	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
-	offset_fsb = XFS_B_TO_FSBT(mp, offset);
-
-	error = xfs_bmapi(NULL, ip, offset_fsb,
-			(xfs_filblks_t)(end_fsb - offset_fsb),
-			bmapi_flags,  NULL, 0, imap,
-			nimaps, NULL);
-
-	if (error)
-		goto out;
-
-	switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
-	case BMAPI_WRITE:
-		/* If we found an extent, return it */
-		if (*nimaps &&
-		    (imap->br_startblock != HOLESTARTBLOCK) &&
-		    (imap->br_startblock != DELAYSTARTBLOCK)) {
-			trace_xfs_iomap_found(ip, offset, count, flags, imap);
-			break;
-		}
-
-		if (flags & BMAPI_DIRECT) {
-			error = xfs_iomap_write_direct(ip, offset, count, flags,
-						       imap, nimaps);
-		} else {
-			error = xfs_iomap_write_delay(ip, offset, count, flags,
-						      imap, nimaps);
-		}
-		if (!error) {
-			trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
-		}
-		*new = 1;
-		break;
-	case BMAPI_ALLOCATE:
-		/* If we found an extent, return it */
-		xfs_iunlock(ip, lockmode);
-		lockmode = 0;
-
-		if (*nimaps && !isnullstartblock(imap->br_startblock)) {
-			trace_xfs_iomap_found(ip, offset, count, flags, imap);
-			break;
-		}
-
-		error = xfs_iomap_write_allocate(ip, offset, count,
-						 imap, nimaps);
-		break;
-	}
-
-	ASSERT(*nimaps <= 1);
-
-out:
-	if (lockmode)
-		xfs_iunlock(ip, lockmode);
-	return XFS_ERROR(error);
-}
-
 STATIC int
 xfs_iomap_eof_align_last_fsb(
 	xfs_mount_t	*mp,
@@ -236,14 +117,13 @@
 	return EFSCORRUPTED;
 }
 
-STATIC int
+int
 xfs_iomap_write_direct(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		flags,
 	xfs_bmbt_irec_t *imap,
-	int		*nmaps)
+	int		nmaps)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb;
@@ -279,7 +159,7 @@
 		if (error)
 			goto error_out;
 	} else {
-		if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK))
+		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
 			last_fsb = MIN(last_fsb, (xfs_fileoff_t)
 					imap->br_blockcount +
 					imap->br_startoff);
@@ -331,7 +211,7 @@
 	xfs_trans_ijoin(tp, ip);
 
 	bmapi_flag = XFS_BMAPI_WRITE;
-	if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
+	if (offset < ip->i_size || extsz)
 		bmapi_flag |= XFS_BMAPI_PREALLOC;
 
 	/*
@@ -370,7 +250,6 @@
 		goto error_out;
 	}
 
-	*nmaps = 1;
 	return 0;
 
 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
@@ -379,7 +258,6 @@
 
 error1:	/* Just cancel transaction */
 	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-	*nmaps = 0;	/* nothing set-up here */
 
 error_out:
 	return XFS_ERROR(error);
@@ -389,6 +267,9 @@
  * If the caller is doing a write at the end of the file, then extend the
  * allocation out to the file system's write iosize.  We clean up any extra
  * space left over when the file is closed in xfs_inactive().
+ *
+ * If we find we already have delalloc preallocation beyond EOF, don't do more
+ * preallocation as it it not needed.
  */
 STATIC int
 xfs_iomap_eof_want_preallocate(
@@ -396,7 +277,6 @@
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		ioflag,
 	xfs_bmbt_irec_t *imap,
 	int		nimaps,
 	int		*prealloc)
@@ -405,6 +285,7 @@
 	xfs_filblks_t   count_fsb;
 	xfs_fsblock_t	firstblock;
 	int		n, error, imaps;
+	int		found_delalloc = 0;
 
 	*prealloc = 0;
 	if ((offset + count) <= ip->i_size)
@@ -429,20 +310,66 @@
 				return 0;
 			start_fsb += imap[n].br_blockcount;
 			count_fsb -= imap[n].br_blockcount;
+
+			if (imap[n].br_startblock == DELAYSTARTBLOCK)
+				found_delalloc = 1;
 		}
 	}
-	*prealloc = 1;
+	if (!found_delalloc)
+		*prealloc = 1;
 	return 0;
 }
 
-STATIC int
+/*
+ * If we don't have a user specified preallocation size, dynamically increase
+ * the preallocation size as the size of the file grows. Cap the maximum size
+ * at a single extent or less if the filesystem is near full. The closer the
+ * filesystem is to full, the smaller the maximum prealocation.
+ */
+STATIC xfs_fsblock_t
+xfs_iomap_prealloc_size(
+	struct xfs_mount	*mp,
+	struct xfs_inode	*ip)
+{
+	xfs_fsblock_t		alloc_blocks = 0;
+
+	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
+		int shift = 0;
+		int64_t freesp;
+
+		alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size);
+		alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
+					rounddown_pow_of_two(alloc_blocks));
+
+		xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
+		freesp = mp->m_sb.sb_fdblocks;
+		if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
+			shift = 2;
+			if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
+				shift++;
+		}
+		if (shift)
+			alloc_blocks >>= shift;
+	}
+
+	if (alloc_blocks < mp->m_writeio_blocks)
+		alloc_blocks = mp->m_writeio_blocks;
+
+	return alloc_blocks;
+}
+
+int
 xfs_iomap_write_delay(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		ioflag,
-	xfs_bmbt_irec_t *ret_imap,
-	int		*nmaps)
+	xfs_bmbt_irec_t *ret_imap)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb;
@@ -469,16 +396,19 @@
 	extsz = xfs_get_extsz_hint(ip);
 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
 
+
 	error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
-				ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
+				imap, XFS_WRITE_IMAPS, &prealloc);
 	if (error)
 		return error;
 
 retry:
 	if (prealloc) {
+		xfs_fsblock_t	alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
+
 		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
 		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
-		last_fsb = ioalign + mp->m_writeio_blocks;
+		last_fsb = ioalign + alloc_blocks;
 	} else {
 		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
 	}
@@ -496,22 +426,31 @@
 			  XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
 			  XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
 			  &nimaps, NULL);
-	if (error && (error != ENOSPC))
+	switch (error) {
+	case 0:
+	case ENOSPC:
+	case EDQUOT:
+		break;
+	default:
 		return XFS_ERROR(error);
+	}
 
 	/*
-	 * If bmapi returned us nothing, and if we didn't get back EDQUOT,
-	 * then we must have run out of space - flush all other inodes with
-	 * delalloc blocks and retry without EOF preallocation.
+	 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT.  For
+	 * ENOSPC, * flush all other inodes with delalloc blocks to free up
+	 * some of the excess reserved metadata space. For both cases, retry
+	 * without EOF preallocation.
 	 */
 	if (nimaps == 0) {
 		trace_xfs_delalloc_enospc(ip, offset, count);
 		if (flushed)
-			return XFS_ERROR(ENOSPC);
+			return XFS_ERROR(error ? error : ENOSPC);
 
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-		xfs_flush_inodes(ip);
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		if (error == ENOSPC) {
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+			xfs_flush_inodes(ip);
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+		}
 
 		flushed = 1;
 		error = 0;
@@ -523,8 +462,6 @@
 		return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
 
 	*ret_imap = imap[0];
-	*nmaps = 1;
-
 	return 0;
 }
 
@@ -538,13 +475,12 @@
  * We no longer bother to look at the incoming map - all we have to
  * guarantee is that whatever we allocate fills the required range.
  */
-STATIC int
+int
 xfs_iomap_write_allocate(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	xfs_bmbt_irec_t *imap,
-	int		*retmap)
+	xfs_bmbt_irec_t *imap)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb, last_block;
@@ -557,8 +493,6 @@
 	int		error = 0;
 	int		nres;
 
-	*retmap = 0;
-
 	/*
 	 * Make sure that the dquots are there.
 	 */
@@ -680,7 +614,6 @@
 		if ((offset_fsb >= imap->br_startoff) &&
 		    (offset_fsb < (imap->br_startoff +
 				   imap->br_blockcount))) {
-			*retmap = 1;
 			XFS_STATS_INC(xs_xstrat_quick);
 			return 0;
 		}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 7748a43..8061576 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,30 +18,15 @@
 #ifndef __XFS_IOMAP_H__
 #define __XFS_IOMAP_H__
 
-/* base extent manipulation calls */
-#define BMAPI_READ	(1 << 0)	/* read extents */
-#define BMAPI_WRITE	(1 << 1)	/* create extents */
-#define BMAPI_ALLOCATE	(1 << 2)	/* delayed allocate to real extents */
-
-/* modifiers */
-#define BMAPI_IGNSTATE	(1 << 4)	/* ignore unwritten state on read */
-#define BMAPI_DIRECT	(1 << 5)	/* direct instead of buffered write */
-#define BMAPI_MMA	(1 << 6)	/* allocate for mmap write */
-#define BMAPI_TRYLOCK	(1 << 7)	/* non-blocking request */
-
-#define BMAPI_FLAGS \
-	{ BMAPI_READ,		"READ" }, \
-	{ BMAPI_WRITE,		"WRITE" }, \
-	{ BMAPI_ALLOCATE,	"ALLOCATE" }, \
-	{ BMAPI_IGNSTATE,	"IGNSTATE" }, \
-	{ BMAPI_DIRECT,		"DIRECT" }, \
-	{ BMAPI_TRYLOCK,	"TRYLOCK" }
-
 struct xfs_inode;
 struct xfs_bmbt_irec;
 
-extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int,
-		     struct xfs_bmbt_irec *, int *, int *);
+extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *, int);
+extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *);
+extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *);
 extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
 
 #endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index cee4ab9..ae6fef1 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -47,7 +47,7 @@
 				xfs_buftarg_t	*log_target,
 				xfs_daddr_t	blk_offset,
 				int		num_bblks);
-STATIC int	 xlog_space_left(xlog_t *log, int cycle, int bytes);
+STATIC int	 xlog_space_left(struct log *log, atomic64_t *head);
 STATIC int	 xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
 STATIC void	 xlog_dealloc_log(xlog_t *log);
 
@@ -70,7 +70,7 @@
 /* local functions to manipulate grant head */
 STATIC int  xlog_grant_log_space(xlog_t		*log,
 				 xlog_ticket_t	*xtic);
-STATIC void xlog_grant_push_ail(xfs_mount_t	*mp,
+STATIC void xlog_grant_push_ail(struct log	*log,
 				int		need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,
 					   xlog_ticket_t *ticket);
@@ -81,98 +81,73 @@
 
 #if defined(DEBUG)
 STATIC void	xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void	xlog_verify_grant_head(xlog_t *log, int equals);
+STATIC void	xlog_verify_grant_tail(struct log *log);
 STATIC void	xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
 				  int count, boolean_t syncing);
 STATIC void	xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
 				     xfs_lsn_t tail_lsn);
 #else
 #define xlog_verify_dest_ptr(a,b)
-#define xlog_verify_grant_head(a,b)
+#define xlog_verify_grant_tail(a)
 #define xlog_verify_iclog(a,b,c,d)
 #define xlog_verify_tail_lsn(a,b,c)
 #endif
 
 STATIC int	xlog_iclogs_empty(xlog_t *log);
 
-
 static void
-xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+xlog_grant_sub_space(
+	struct log	*log,
+	atomic64_t	*head,
+	int		bytes)
 {
-	if (*qp) {
-		tic->t_next	    = (*qp);
-		tic->t_prev	    = (*qp)->t_prev;
-		(*qp)->t_prev->t_next = tic;
-		(*qp)->t_prev	    = tic;
-	} else {
-		tic->t_prev = tic->t_next = tic;
-		*qp = tic;
-	}
+	int64_t	head_val = atomic64_read(head);
+	int64_t new, old;
 
-	tic->t_flags |= XLOG_TIC_IN_Q;
+	do {
+		int	cycle, space;
+
+		xlog_crack_grant_head_val(head_val, &cycle, &space);
+
+		space -= bytes;
+		if (space < 0) {
+			space += log->l_logsize;
+			cycle--;
+		}
+
+		old = head_val;
+		new = xlog_assign_grant_head_val(cycle, space);
+		head_val = atomic64_cmpxchg(head, old, new);
+	} while (head_val != old);
 }
 
 static void
-xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+xlog_grant_add_space(
+	struct log	*log,
+	atomic64_t	*head,
+	int		bytes)
 {
-	if (tic == tic->t_next) {
-		*qp = NULL;
-	} else {
-		*qp = tic->t_next;
-		tic->t_next->t_prev = tic->t_prev;
-		tic->t_prev->t_next = tic->t_next;
-	}
+	int64_t	head_val = atomic64_read(head);
+	int64_t new, old;
 
-	tic->t_next = tic->t_prev = NULL;
-	tic->t_flags &= ~XLOG_TIC_IN_Q;
-}
+	do {
+		int		tmp;
+		int		cycle, space;
 
-static void
-xlog_grant_sub_space(struct log *log, int bytes)
-{
-	log->l_grant_write_bytes -= bytes;
-	if (log->l_grant_write_bytes < 0) {
-		log->l_grant_write_bytes += log->l_logsize;
-		log->l_grant_write_cycle--;
-	}
+		xlog_crack_grant_head_val(head_val, &cycle, &space);
 
-	log->l_grant_reserve_bytes -= bytes;
-	if ((log)->l_grant_reserve_bytes < 0) {
-		log->l_grant_reserve_bytes += log->l_logsize;
-		log->l_grant_reserve_cycle--;
-	}
+		tmp = log->l_logsize - space;
+		if (tmp > bytes)
+			space += bytes;
+		else {
+			space = bytes - tmp;
+			cycle++;
+		}
 
-}
-
-static void
-xlog_grant_add_space_write(struct log *log, int bytes)
-{
-	int tmp = log->l_logsize - log->l_grant_write_bytes;
-	if (tmp > bytes)
-		log->l_grant_write_bytes += bytes;
-	else {
-		log->l_grant_write_cycle++;
-		log->l_grant_write_bytes = bytes - tmp;
-	}
-}
-
-static void
-xlog_grant_add_space_reserve(struct log *log, int bytes)
-{
-	int tmp = log->l_logsize - log->l_grant_reserve_bytes;
-	if (tmp > bytes)
-		log->l_grant_reserve_bytes += bytes;
-	else {
-		log->l_grant_reserve_cycle++;
-		log->l_grant_reserve_bytes = bytes - tmp;
-	}
-}
-
-static inline void
-xlog_grant_add_space(struct log *log, int bytes)
-{
-	xlog_grant_add_space_write(log, bytes);
-	xlog_grant_add_space_reserve(log, bytes);
+		old = head_val;
+		new = xlog_assign_grant_head_val(cycle, space);
+		head_val = atomic64_cmpxchg(head, old, new);
+	} while (head_val != old);
 }
 
 static void
@@ -355,7 +330,7 @@
 
 		trace_xfs_log_reserve(log, internal_ticket);
 
-		xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
+		xlog_grant_push_ail(log, internal_ticket->t_unit_res);
 		retval = xlog_regrant_write_log_space(log, internal_ticket);
 	} else {
 		/* may sleep if need to allocate more tickets */
@@ -369,7 +344,7 @@
 
 		trace_xfs_log_reserve(log, internal_ticket);
 
-		xlog_grant_push_ail(mp,
+		xlog_grant_push_ail(log,
 				    (internal_ticket->t_unit_res *
 				     internal_ticket->t_cnt));
 		retval = xlog_grant_log_space(log, internal_ticket);
@@ -402,7 +377,7 @@
 		cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname);
 	else {
 		cmn_err(CE_NOTE,
-			"!Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
+			"Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
 			mp->m_fsname);
 		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 	}
@@ -584,8 +559,8 @@
 		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
 		      iclog->ic_state == XLOG_STATE_DIRTY)) {
 			if (!XLOG_FORCED_SHUTDOWN(log)) {
-				sv_wait(&iclog->ic_force_wait, PMEM,
-					&log->l_icloglock, s);
+				xlog_wait(&iclog->ic_force_wait,
+							&log->l_icloglock);
 			} else {
 				spin_unlock(&log->l_icloglock);
 			}
@@ -625,8 +600,8 @@
 			|| iclog->ic_state == XLOG_STATE_DIRTY
 			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
 
-				sv_wait(&iclog->ic_force_wait, PMEM,
-					&log->l_icloglock, s);
+				xlog_wait(&iclog->ic_force_wait,
+							&log->l_icloglock);
 		} else {
 			spin_unlock(&log->l_icloglock);
 		}
@@ -703,55 +678,46 @@
 {
 	xlog_ticket_t	*tic;
 	xlog_t		*log = mp->m_log;
-	int		need_bytes, free_bytes, cycle, bytes;
+	int		need_bytes, free_bytes;
 
 	if (XLOG_FORCED_SHUTDOWN(log))
 		return;
 
-	if (tail_lsn == 0) {
-		/* needed since sync_lsn is 64 bits */
-		spin_lock(&log->l_icloglock);
-		tail_lsn = log->l_last_sync_lsn;
-		spin_unlock(&log->l_icloglock);
-	}
+	if (tail_lsn == 0)
+		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
-	spin_lock(&log->l_grant_lock);
+	/* tail_lsn == 1 implies that we weren't passed a valid value.  */
+	if (tail_lsn != 1)
+		atomic64_set(&log->l_tail_lsn, tail_lsn);
 
-	/* Also an invalid lsn.  1 implies that we aren't passing in a valid
-	 * tail_lsn.
-	 */
-	if (tail_lsn != 1) {
-		log->l_tail_lsn = tail_lsn;
-	}
-
-	if ((tic = log->l_write_headq)) {
+	if (!list_empty_careful(&log->l_writeq)) {
 #ifdef DEBUG
 		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 			panic("Recovery problem");
 #endif
-		cycle = log->l_grant_write_cycle;
-		bytes = log->l_grant_write_bytes;
-		free_bytes = xlog_space_left(log, cycle, bytes);
-		do {
+		spin_lock(&log->l_grant_write_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+		list_for_each_entry(tic, &log->l_writeq, t_queue) {
 			ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
 
 			if (free_bytes < tic->t_unit_res && tail_lsn != 1)
 				break;
 			tail_lsn = 0;
 			free_bytes -= tic->t_unit_res;
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_write_headq);
+			trace_xfs_log_regrant_write_wake_up(log, tic);
+			wake_up(&tic->t_wait);
+		}
+		spin_unlock(&log->l_grant_write_lock);
 	}
-	if ((tic = log->l_reserve_headq)) {
+
+	if (!list_empty_careful(&log->l_reserveq)) {
 #ifdef DEBUG
 		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 			panic("Recovery problem");
 #endif
-		cycle = log->l_grant_reserve_cycle;
-		bytes = log->l_grant_reserve_bytes;
-		free_bytes = xlog_space_left(log, cycle, bytes);
-		do {
+		spin_lock(&log->l_grant_reserve_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+		list_for_each_entry(tic, &log->l_reserveq, t_queue) {
 			if (tic->t_flags & XLOG_TIC_PERM_RESERV)
 				need_bytes = tic->t_unit_res*tic->t_cnt;
 			else
@@ -760,12 +726,12 @@
 				break;
 			tail_lsn = 0;
 			free_bytes -= need_bytes;
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_reserve_headq);
+			trace_xfs_log_grant_wake_up(log, tic);
+			wake_up(&tic->t_wait);
+		}
+		spin_unlock(&log->l_grant_reserve_lock);
 	}
-	spin_unlock(&log->l_grant_lock);
-}	/* xfs_log_move_tail */
+}
 
 /*
  * Determine if we have a transaction that has gone to disk
@@ -831,23 +797,19 @@
  * We may be holding the log iclog lock upon entering this routine.
  */
 xfs_lsn_t
-xlog_assign_tail_lsn(xfs_mount_t *mp)
+xlog_assign_tail_lsn(
+	struct xfs_mount	*mp)
 {
-	xfs_lsn_t tail_lsn;
-	xlog_t	  *log = mp->m_log;
+	xfs_lsn_t		tail_lsn;
+	struct log		*log = mp->m_log;
 
 	tail_lsn = xfs_trans_ail_tail(mp->m_ail);
-	spin_lock(&log->l_grant_lock);
-	if (tail_lsn != 0) {
-		log->l_tail_lsn = tail_lsn;
-	} else {
-		tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
-	}
-	spin_unlock(&log->l_grant_lock);
+	if (!tail_lsn)
+		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
+	atomic64_set(&log->l_tail_lsn, tail_lsn);
 	return tail_lsn;
-}	/* xlog_assign_tail_lsn */
-
+}
 
 /*
  * Return the space in the log between the tail and the head.  The head
@@ -864,21 +826,26 @@
  * result is that we return the size of the log as the amount of space left.
  */
 STATIC int
-xlog_space_left(xlog_t *log, int cycle, int bytes)
+xlog_space_left(
+	struct log	*log,
+	atomic64_t	*head)
 {
-	int free_bytes;
-	int tail_bytes;
-	int tail_cycle;
+	int		free_bytes;
+	int		tail_bytes;
+	int		tail_cycle;
+	int		head_cycle;
+	int		head_bytes;
 
-	tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
-	tail_cycle = CYCLE_LSN(log->l_tail_lsn);
-	if ((tail_cycle == cycle) && (bytes >= tail_bytes)) {
-		free_bytes = log->l_logsize - (bytes - tail_bytes);
-	} else if ((tail_cycle + 1) < cycle) {
+	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
+	tail_bytes = BBTOB(tail_bytes);
+	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
+		free_bytes = log->l_logsize - (head_bytes - tail_bytes);
+	else if (tail_cycle + 1 < head_cycle)
 		return 0;
-	} else if (tail_cycle < cycle) {
-		ASSERT(tail_cycle == (cycle - 1));
-		free_bytes = tail_bytes - bytes;
+	else if (tail_cycle < head_cycle) {
+		ASSERT(tail_cycle == (head_cycle - 1));
+		free_bytes = tail_bytes - head_bytes;
 	} else {
 		/*
 		 * The reservation head is behind the tail.
@@ -889,12 +856,12 @@
 			"xlog_space_left: head behind tail\n"
 			"  tail_cycle = %d, tail_bytes = %d\n"
 			"  GH   cycle = %d, GH   bytes = %d",
-			tail_cycle, tail_bytes, cycle, bytes);
+			tail_cycle, tail_bytes, head_cycle, head_bytes);
 		ASSERT(0);
 		free_bytes = log->l_logsize;
 	}
 	return free_bytes;
-}	/* xlog_space_left */
+}
 
 
 /*
@@ -1047,12 +1014,16 @@
 	log->l_flags	   |= XLOG_ACTIVE_RECOVERY;
 
 	log->l_prev_block  = -1;
-	log->l_tail_lsn	   = xlog_assign_lsn(1, 0);
 	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
-	log->l_last_sync_lsn = log->l_tail_lsn;
+	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
+	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
 	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
-	log->l_grant_reserve_cycle = 1;
-	log->l_grant_write_cycle = 1;
+	xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
+	xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
+	INIT_LIST_HEAD(&log->l_reserveq);
+	INIT_LIST_HEAD(&log->l_writeq);
+	spin_lock_init(&log->l_grant_reserve_lock);
+	spin_lock_init(&log->l_grant_write_lock);
 
 	error = EFSCORRUPTED;
 	if (xfs_sb_version_hassector(&mp->m_sb)) {
@@ -1094,8 +1065,7 @@
 	log->l_xbuf = bp;
 
 	spin_lock_init(&log->l_icloglock);
-	spin_lock_init(&log->l_grant_lock);
-	sv_init(&log->l_flush_wait, 0, "flush_wait");
+	init_waitqueue_head(&log->l_flush_wait);
 
 	/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
 	ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1151,8 +1121,8 @@
 
 		ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
 		ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
-		sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
-		sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
+		init_waitqueue_head(&iclog->ic_force_wait);
+		init_waitqueue_head(&iclog->ic_write_wait);
 
 		iclogp = &iclog->ic_next;
 	}
@@ -1167,15 +1137,11 @@
 out_free_iclog:
 	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
 		prev_iclog = iclog->ic_next;
-		if (iclog->ic_bp) {
-			sv_destroy(&iclog->ic_force_wait);
-			sv_destroy(&iclog->ic_write_wait);
+		if (iclog->ic_bp)
 			xfs_buf_free(iclog->ic_bp);
-		}
 		kmem_free(iclog);
 	}
 	spinlock_destroy(&log->l_icloglock);
-	spinlock_destroy(&log->l_grant_lock);
 	xfs_buf_free(log->l_xbuf);
 out_free_log:
 	kmem_free(log);
@@ -1223,61 +1189,60 @@
  * water mark.  In this manner, we would be creating a low water mark.
  */
 STATIC void
-xlog_grant_push_ail(xfs_mount_t	*mp,
-		    int		need_bytes)
+xlog_grant_push_ail(
+	struct log	*log,
+	int		need_bytes)
 {
-    xlog_t	*log = mp->m_log;	/* pointer to the log */
-    xfs_lsn_t	tail_lsn;		/* lsn of the log tail */
-    xfs_lsn_t	threshold_lsn = 0;	/* lsn we'd like to be at */
-    int		free_blocks;		/* free blocks left to write to */
-    int		free_bytes;		/* free bytes left to write to */
-    int		threshold_block;	/* block in lsn we'd like to be at */
-    int		threshold_cycle;	/* lsn cycle we'd like to be at */
-    int		free_threshold;
+	xfs_lsn_t	threshold_lsn = 0;
+	xfs_lsn_t	last_sync_lsn;
+	int		free_blocks;
+	int		free_bytes;
+	int		threshold_block;
+	int		threshold_cycle;
+	int		free_threshold;
 
-    ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
+	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
 
-    spin_lock(&log->l_grant_lock);
-    free_bytes = xlog_space_left(log,
-				 log->l_grant_reserve_cycle,
-				 log->l_grant_reserve_bytes);
-    tail_lsn = log->l_tail_lsn;
-    free_blocks = BTOBBT(free_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+	free_blocks = BTOBBT(free_bytes);
 
-    /*
-     * Set the threshold for the minimum number of free blocks in the
-     * log to the maximum of what the caller needs, one quarter of the
-     * log, and 256 blocks.
-     */
-    free_threshold = BTOBB(need_bytes);
-    free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
-    free_threshold = MAX(free_threshold, 256);
-    if (free_blocks < free_threshold) {
-	threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
-	threshold_cycle = CYCLE_LSN(tail_lsn);
-	if (threshold_block >= log->l_logBBsize) {
-	    threshold_block -= log->l_logBBsize;
-	    threshold_cycle += 1;
-	}
-	threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block);
-
-	/* Don't pass in an lsn greater than the lsn of the last
-	 * log record known to be on disk.
+	/*
+	 * Set the threshold for the minimum number of free blocks in the
+	 * log to the maximum of what the caller needs, one quarter of the
+	 * log, and 256 blocks.
 	 */
-	if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
-	    threshold_lsn = log->l_last_sync_lsn;
-    }
-    spin_unlock(&log->l_grant_lock);
+	free_threshold = BTOBB(need_bytes);
+	free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
+	free_threshold = MAX(free_threshold, 256);
+	if (free_blocks >= free_threshold)
+		return;
 
-    /*
-     * Get the transaction layer to kick the dirty buffers out to
-     * disk asynchronously. No point in trying to do this if
-     * the filesystem is shutting down.
-     */
-    if (threshold_lsn &&
-	!XLOG_FORCED_SHUTDOWN(log))
-	    xfs_trans_ail_push(log->l_ailp, threshold_lsn);
-}	/* xlog_grant_push_ail */
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
+						&threshold_block);
+	threshold_block += free_threshold;
+	if (threshold_block >= log->l_logBBsize) {
+		threshold_block -= log->l_logBBsize;
+		threshold_cycle += 1;
+	}
+	threshold_lsn = xlog_assign_lsn(threshold_cycle,
+					threshold_block);
+	/*
+	 * Don't pass in an lsn greater than the lsn of the last
+	 * log record known to be on disk. Use a snapshot of the last sync lsn
+	 * so that it doesn't change between the compare and the set.
+	 */
+	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
+	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
+		threshold_lsn = last_sync_lsn;
+
+	/*
+	 * Get the transaction layer to kick the dirty buffers out to
+	 * disk asynchronously. No point in trying to do this if
+	 * the filesystem is shutting down.
+	 */
+	if (!XLOG_FORCED_SHUTDOWN(log))
+		xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+}
 
 /*
  * The bdstrat callback function for log bufs. This gives us a central
@@ -1372,9 +1337,8 @@
 		 roundoff < BBTOB(1)));
 
 	/* move grant heads by roundoff in sync */
-	spin_lock(&log->l_grant_lock);
-	xlog_grant_add_space(log, roundoff);
-	spin_unlock(&log->l_grant_lock);
+	xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
+	xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
 
 	/* put cycle number in every block */
 	xlog_pack_data(log, iclog, roundoff); 
@@ -1489,15 +1453,12 @@
 
 	iclog = log->l_iclog;
 	for (i=0; i<log->l_iclog_bufs; i++) {
-		sv_destroy(&iclog->ic_force_wait);
-		sv_destroy(&iclog->ic_write_wait);
 		xfs_buf_free(iclog->ic_bp);
 		next_iclog = iclog->ic_next;
 		kmem_free(iclog);
 		iclog = next_iclog;
 	}
 	spinlock_destroy(&log->l_icloglock);
-	spinlock_destroy(&log->l_grant_lock);
 
 	xfs_buf_free(log->l_xbuf);
 	log->l_mp->m_log = NULL;
@@ -2232,7 +2193,7 @@
 				lowest_lsn = xlog_get_lowest_lsn(log);
 				if (lowest_lsn &&
 				    XFS_LSN_CMP(lowest_lsn,
-				    		be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
+						be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
 					iclog = iclog->ic_next;
 					continue; /* Leave this iclog for
 						   * another thread */
@@ -2240,23 +2201,21 @@
 
 				iclog->ic_state = XLOG_STATE_CALLBACK;
 
-				spin_unlock(&log->l_icloglock);
 
-				/* l_last_sync_lsn field protected by
-				 * l_grant_lock. Don't worry about iclog's lsn.
-				 * No one else can be here except us.
+				/*
+				 * update the last_sync_lsn before we drop the
+				 * icloglock to ensure we are the only one that
+				 * can update it.
 				 */
-				spin_lock(&log->l_grant_lock);
-				ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
-				       be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
-				log->l_last_sync_lsn =
-					be64_to_cpu(iclog->ic_header.h_lsn);
-				spin_unlock(&log->l_grant_lock);
+				ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
+					be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
+				atomic64_set(&log->l_last_sync_lsn,
+					be64_to_cpu(iclog->ic_header.h_lsn));
 
-			} else {
-				spin_unlock(&log->l_icloglock);
+			} else
 				ioerrors++;
-			}
+
+			spin_unlock(&log->l_icloglock);
 
 			/*
 			 * Keep processing entries in the callback list until
@@ -2297,7 +2256,7 @@
 			xlog_state_clean_log(log);
 
 			/* wake up threads waiting in xfs_log_force() */
-			sv_broadcast(&iclog->ic_force_wait);
+			wake_up_all(&iclog->ic_force_wait);
 
 			iclog = iclog->ic_next;
 		} while (first_iclog != iclog);
@@ -2344,7 +2303,7 @@
 	spin_unlock(&log->l_icloglock);
 
 	if (wake)
-		sv_broadcast(&log->l_flush_wait);
+		wake_up_all(&log->l_flush_wait);
 }
 
 
@@ -2395,7 +2354,7 @@
 	 * iclog buffer, we wake them all, one will get to do the
 	 * I/O, the others get to wait for the result.
 	 */
-	sv_broadcast(&iclog->ic_write_wait);
+	wake_up_all(&iclog->ic_write_wait);
 	spin_unlock(&log->l_icloglock);
 	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
 }	/* xlog_state_done_syncing */
@@ -2444,7 +2403,7 @@
 		XFS_STATS_INC(xs_log_noiclogs);
 
 		/* Wait for log writes to have flushed */
-		sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0);
+		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
 		goto restart;
 	}
 
@@ -2527,6 +2486,18 @@
  *
  * Once a ticket gets put onto the reserveq, it will only return after
  * the needed reservation is satisfied.
+ *
+ * This function is structured so that it has a lock free fast path. This is
+ * necessary because every new transaction reservation will come through this
+ * path. Hence any lock will be globally hot if we take it unconditionally on
+ * every pass.
+ *
+ * As tickets are only ever moved on and off the reserveq under the
+ * l_grant_reserve_lock, we only need to take that lock if we are going
+ * to add the ticket to the queue and sleep. We can avoid taking the lock if the
+ * ticket was never added to the reserveq because the t_queue list head will be
+ * empty and we hold the only reference to it so it can safely be checked
+ * unlocked.
  */
 STATIC int
 xlog_grant_log_space(xlog_t	   *log,
@@ -2534,24 +2505,27 @@
 {
 	int		 free_bytes;
 	int		 need_bytes;
-#ifdef DEBUG
-	xfs_lsn_t	 tail_lsn;
-#endif
-
 
 #ifdef DEBUG
 	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 		panic("grant Recovery problem");
 #endif
 
-	/* Is there space or do we need to sleep? */
-	spin_lock(&log->l_grant_lock);
-
 	trace_xfs_log_grant_enter(log, tic);
 
+	need_bytes = tic->t_unit_res;
+	if (tic->t_flags & XFS_LOG_PERM_RESERV)
+		need_bytes *= tic->t_ocnt;
+
 	/* something is already sleeping; insert new transaction at end */
-	if (log->l_reserve_headq) {
-		xlog_ins_ticketq(&log->l_reserve_headq, tic);
+	if (!list_empty_careful(&log->l_reserveq)) {
+		spin_lock(&log->l_grant_reserve_lock);
+		/* recheck the queue now we are locked */
+		if (list_empty(&log->l_reserveq)) {
+			spin_unlock(&log->l_grant_reserve_lock);
+			goto redo;
+		}
+		list_add_tail(&tic->t_queue, &log->l_reserveq);
 
 		trace_xfs_log_grant_sleep1(log, tic);
 
@@ -2563,72 +2537,57 @@
 			goto error_return;
 
 		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
+		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+
 		/*
 		 * If we got an error, and the filesystem is shutting down,
 		 * we'll catch it down below. So just continue...
 		 */
 		trace_xfs_log_grant_wake1(log, tic);
-		spin_lock(&log->l_grant_lock);
 	}
-	if (tic->t_flags & XFS_LOG_PERM_RESERV)
-		need_bytes = tic->t_unit_res*tic->t_ocnt;
-	else
-		need_bytes = tic->t_unit_res;
 
 redo:
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
-	free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle,
-				     log->l_grant_reserve_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
 	if (free_bytes < need_bytes) {
-		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-			xlog_ins_ticketq(&log->l_reserve_headq, tic);
+		spin_lock(&log->l_grant_reserve_lock);
+		if (list_empty(&tic->t_queue))
+			list_add_tail(&tic->t_queue, &log->l_reserveq);
 
 		trace_xfs_log_grant_sleep2(log, tic);
 
-		spin_unlock(&log->l_grant_lock);
-		xlog_grant_push_ail(log->l_mp, need_bytes);
-		spin_lock(&log->l_grant_lock);
-
-		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
-
-		spin_lock(&log->l_grant_lock);
 		if (XLOG_FORCED_SHUTDOWN(log))
 			goto error_return;
 
-		trace_xfs_log_grant_wake2(log, tic);
+		xlog_grant_push_ail(log, need_bytes);
 
+		XFS_STATS_INC(xs_sleep_logspace);
+		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+
+		trace_xfs_log_grant_wake2(log, tic);
 		goto redo;
-	} else if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
+	}
+
+	if (!list_empty(&tic->t_queue)) {
+		spin_lock(&log->l_grant_reserve_lock);
+		list_del_init(&tic->t_queue);
+		spin_unlock(&log->l_grant_reserve_lock);
+	}
 
 	/* we've got enough space */
-	xlog_grant_add_space(log, need_bytes);
-#ifdef DEBUG
-	tail_lsn = log->l_tail_lsn;
-	/*
-	 * Check to make sure the grant write head didn't just over lap the
-	 * tail.  If the cycles are the same, we can't be overlapping.
-	 * Otherwise, make sure that the cycles differ by exactly one and
-	 * check the byte count.
-	 */
-	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
-		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
-		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
-	}
-#endif
+	xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
+	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
 	trace_xfs_log_grant_exit(log, tic);
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
+	xlog_verify_grant_tail(log);
 	return 0;
 
- error_return:
-	if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+error_return_unlocked:
+	spin_lock(&log->l_grant_reserve_lock);
+error_return:
+	list_del_init(&tic->t_queue);
+	spin_unlock(&log->l_grant_reserve_lock);
 	trace_xfs_log_grant_error(log, tic);
 
 	/*
@@ -2638,7 +2597,6 @@
 	 */
 	tic->t_curr_res = 0;
 	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-	spin_unlock(&log->l_grant_lock);
 	return XFS_ERROR(EIO);
 }	/* xlog_grant_log_space */
 
@@ -2646,17 +2604,14 @@
 /*
  * Replenish the byte reservation required by moving the grant write head.
  *
- *
+ * Similar to xlog_grant_log_space, the function is structured to have a lock
+ * free fast path.
  */
 STATIC int
 xlog_regrant_write_log_space(xlog_t	   *log,
 			     xlog_ticket_t *tic)
 {
 	int		free_bytes, need_bytes;
-	xlog_ticket_t	*ntic;
-#ifdef DEBUG
-	xfs_lsn_t	tail_lsn;
-#endif
 
 	tic->t_curr_res = tic->t_unit_res;
 	xlog_tic_reset_res(tic);
@@ -2669,12 +2624,9 @@
 		panic("regrant Recovery problem");
 #endif
 
-	spin_lock(&log->l_grant_lock);
-
 	trace_xfs_log_regrant_write_enter(log, tic);
-
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
 	/* If there are other waiters on the queue then give them a
 	 * chance at logspace before us. Wake up the first waiters,
@@ -2683,92 +2635,76 @@
 	 * this transaction.
 	 */
 	need_bytes = tic->t_unit_res;
-	if ((ntic = log->l_write_headq)) {
-		free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
-					     log->l_grant_write_bytes);
-		do {
+	if (!list_empty_careful(&log->l_writeq)) {
+		struct xlog_ticket *ntic;
+
+		spin_lock(&log->l_grant_write_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+		list_for_each_entry(ntic, &log->l_writeq, t_queue) {
 			ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
 
 			if (free_bytes < ntic->t_unit_res)
 				break;
 			free_bytes -= ntic->t_unit_res;
-			sv_signal(&ntic->t_wait);
-			ntic = ntic->t_next;
-		} while (ntic != log->l_write_headq);
+			wake_up(&ntic->t_wait);
+		}
 
-		if (ntic != log->l_write_headq) {
-			if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-				xlog_ins_ticketq(&log->l_write_headq, tic);
-
+		if (ntic != list_first_entry(&log->l_writeq,
+						struct xlog_ticket, t_queue)) {
+			if (list_empty(&tic->t_queue))
+				list_add_tail(&tic->t_queue, &log->l_writeq);
 			trace_xfs_log_regrant_write_sleep1(log, tic);
 
-			spin_unlock(&log->l_grant_lock);
-			xlog_grant_push_ail(log->l_mp, need_bytes);
-			spin_lock(&log->l_grant_lock);
+			xlog_grant_push_ail(log, need_bytes);
 
 			XFS_STATS_INC(xs_sleep_logspace);
-			sv_wait(&tic->t_wait, PINOD|PLTWAIT,
-				&log->l_grant_lock, s);
-
-			/* If we're shutting down, this tic is already
-			 * off the queue */
-			spin_lock(&log->l_grant_lock);
-			if (XLOG_FORCED_SHUTDOWN(log))
-				goto error_return;
-
+			xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
 			trace_xfs_log_regrant_write_wake1(log, tic);
-		}
+		} else
+			spin_unlock(&log->l_grant_write_lock);
 	}
 
 redo:
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
-	free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
-				     log->l_grant_write_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_write_head);
 	if (free_bytes < need_bytes) {
-		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-			xlog_ins_ticketq(&log->l_write_headq, tic);
-		spin_unlock(&log->l_grant_lock);
-		xlog_grant_push_ail(log->l_mp, need_bytes);
-		spin_lock(&log->l_grant_lock);
+		spin_lock(&log->l_grant_write_lock);
+		if (list_empty(&tic->t_queue))
+			list_add_tail(&tic->t_queue, &log->l_writeq);
 
-		XFS_STATS_INC(xs_sleep_logspace);
-		trace_xfs_log_regrant_write_sleep2(log, tic);
-
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
-
-		/* If we're shutting down, this tic is already off the queue */
-		spin_lock(&log->l_grant_lock);
 		if (XLOG_FORCED_SHUTDOWN(log))
 			goto error_return;
 
+		xlog_grant_push_ail(log, need_bytes);
+
+		XFS_STATS_INC(xs_sleep_logspace);
+		trace_xfs_log_regrant_write_sleep2(log, tic);
+		xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+
 		trace_xfs_log_regrant_write_wake2(log, tic);
 		goto redo;
-	} else if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_write_headq, tic);
+	}
+
+	if (!list_empty(&tic->t_queue)) {
+		spin_lock(&log->l_grant_write_lock);
+		list_del_init(&tic->t_queue);
+		spin_unlock(&log->l_grant_write_lock);
+	}
 
 	/* we've got enough space */
-	xlog_grant_add_space_write(log, need_bytes);
-#ifdef DEBUG
-	tail_lsn = log->l_tail_lsn;
-	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
-		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
-		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
-	}
-#endif
-
+	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
 	trace_xfs_log_regrant_write_exit(log, tic);
-
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
+	xlog_verify_grant_tail(log);
 	return 0;
 
 
+ error_return_unlocked:
+	spin_lock(&log->l_grant_write_lock);
  error_return:
-	if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+	list_del_init(&tic->t_queue);
+	spin_unlock(&log->l_grant_write_lock);
 	trace_xfs_log_regrant_write_error(log, tic);
 
 	/*
@@ -2778,7 +2714,6 @@
 	 */
 	tic->t_curr_res = 0;
 	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-	spin_unlock(&log->l_grant_lock);
 	return XFS_ERROR(EIO);
 }	/* xlog_regrant_write_log_space */
 
@@ -2799,27 +2734,24 @@
 	if (ticket->t_cnt > 0)
 		ticket->t_cnt--;
 
-	spin_lock(&log->l_grant_lock);
-	xlog_grant_sub_space(log, ticket->t_curr_res);
+	xlog_grant_sub_space(log, &log->l_grant_reserve_head,
+					ticket->t_curr_res);
+	xlog_grant_sub_space(log, &log->l_grant_write_head,
+					ticket->t_curr_res);
 	ticket->t_curr_res = ticket->t_unit_res;
 	xlog_tic_reset_res(ticket);
 
 	trace_xfs_log_regrant_reserve_sub(log, ticket);
 
-	xlog_verify_grant_head(log, 1);
-
 	/* just return if we still have some of the pre-reserved space */
-	if (ticket->t_cnt > 0) {
-		spin_unlock(&log->l_grant_lock);
+	if (ticket->t_cnt > 0)
 		return;
-	}
 
-	xlog_grant_add_space_reserve(log, ticket->t_unit_res);
+	xlog_grant_add_space(log, &log->l_grant_reserve_head,
+					ticket->t_unit_res);
 
 	trace_xfs_log_regrant_reserve_exit(log, ticket);
 
-	xlog_verify_grant_head(log, 0);
-	spin_unlock(&log->l_grant_lock);
 	ticket->t_curr_res = ticket->t_unit_res;
 	xlog_tic_reset_res(ticket);
 }	/* xlog_regrant_reserve_log_space */
@@ -2843,28 +2775,29 @@
 xlog_ungrant_log_space(xlog_t	     *log,
 		       xlog_ticket_t *ticket)
 {
+	int	bytes;
+
 	if (ticket->t_cnt > 0)
 		ticket->t_cnt--;
 
-	spin_lock(&log->l_grant_lock);
 	trace_xfs_log_ungrant_enter(log, ticket);
-
-	xlog_grant_sub_space(log, ticket->t_curr_res);
-
 	trace_xfs_log_ungrant_sub(log, ticket);
 
-	/* If this is a permanent reservation ticket, we may be able to free
+	/*
+	 * If this is a permanent reservation ticket, we may be able to free
 	 * up more space based on the remaining count.
 	 */
+	bytes = ticket->t_curr_res;
 	if (ticket->t_cnt > 0) {
 		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
-		xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
+		bytes += ticket->t_unit_res*ticket->t_cnt;
 	}
 
+	xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
+	xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
+
 	trace_xfs_log_ungrant_exit(log, ticket);
 
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
 	xfs_log_move_tail(log->l_mp, 1);
 }	/* xlog_ungrant_log_space */
 
@@ -2901,11 +2834,11 @@
 
 	if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
 		/* update tail before writing to iclog */
-		xlog_assign_tail_lsn(log->l_mp);
+		xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
 		sync++;
 		iclog->ic_state = XLOG_STATE_SYNCING;
-		iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
-		xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
+		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+		xlog_verify_tail_lsn(log, iclog, tail_lsn);
 		/* cycle incremented when incrementing curr_block */
 	}
 	spin_unlock(&log->l_icloglock);
@@ -3088,7 +3021,7 @@
 			return XFS_ERROR(EIO);
 		}
 		XFS_STATS_INC(xs_log_force_sleep);
-		sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
+		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 		/*
 		 * No need to grab the log lock here since we're
 		 * only deciding whether or not to return EIO
@@ -3206,8 +3139,8 @@
 
 				XFS_STATS_INC(xs_log_force_sleep);
 
-				sv_wait(&iclog->ic_prev->ic_write_wait,
-					PSWP, &log->l_icloglock, s);
+				xlog_wait(&iclog->ic_prev->ic_write_wait,
+							&log->l_icloglock);
 				if (log_flushed)
 					*log_flushed = 1;
 				already_slept = 1;
@@ -3235,7 +3168,7 @@
 				return XFS_ERROR(EIO);
 			}
 			XFS_STATS_INC(xs_log_force_sleep);
-			sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
+			xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 			/*
 			 * No need to grab the log lock here since we're
 			 * only deciding whether or not to return EIO
@@ -3310,10 +3243,8 @@
 	xlog_ticket_t	*ticket)
 {
 	ASSERT(atomic_read(&ticket->t_ref) > 0);
-	if (atomic_dec_and_test(&ticket->t_ref)) {
-		sv_destroy(&ticket->t_wait);
+	if (atomic_dec_and_test(&ticket->t_ref))
 		kmem_zone_free(xfs_log_ticket_zone, ticket);
-	}
 }
 
 xlog_ticket_t *
@@ -3435,6 +3366,7 @@
         }
 
 	atomic_set(&tic->t_ref, 1);
+	INIT_LIST_HEAD(&tic->t_queue);
 	tic->t_unit_res		= unit_bytes;
 	tic->t_curr_res		= unit_bytes;
 	tic->t_cnt		= cnt;
@@ -3445,7 +3377,7 @@
 	tic->t_trans_type	= 0;
 	if (xflags & XFS_LOG_PERM_RESERV)
 		tic->t_flags |= XLOG_TIC_PERM_RESERV;
-	sv_init(&tic->t_wait, SV_DEFAULT, "logtick");
+	init_waitqueue_head(&tic->t_wait);
 
 	xlog_tic_reset_res(tic);
 
@@ -3484,18 +3416,25 @@
 }
 
 STATIC void
-xlog_verify_grant_head(xlog_t *log, int equals)
+xlog_verify_grant_tail(
+	struct log	*log)
 {
-    if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) {
-	if (equals)
-	    ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes);
-	else
-	    ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes);
-    } else {
-	ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle);
-	ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes);
-    }
-}	/* xlog_verify_grant_head */
+	int		tail_cycle, tail_blocks;
+	int		cycle, space;
+
+	/*
+	 * Check to make sure the grant write head didn't just over lap the
+	 * tail.  If the cycles are the same, we can't be overlapping.
+	 * Otherwise, make sure that the cycles differ by exactly one and
+	 * check the byte count.
+	 */
+	xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
+	if (tail_cycle != cycle) {
+		ASSERT(cycle - 1 == tail_cycle);
+		ASSERT(space <= BBTOB(tail_blocks));
+	}
+}
 
 /* check if it will fit */
 STATIC void
@@ -3716,12 +3655,10 @@
 		xlog_cil_force(log);
 
 	/*
-	 * We must hold both the GRANT lock and the LOG lock,
-	 * before we mark the filesystem SHUTDOWN and wake
-	 * everybody up to tell the bad news.
+	 * mark the filesystem and the as in a shutdown state and wake
+	 * everybody up to tell them the bad news.
 	 */
 	spin_lock(&log->l_icloglock);
-	spin_lock(&log->l_grant_lock);
 	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
 	if (mp->m_sb_bp)
 		XFS_BUF_DONE(mp->m_sb_bp);
@@ -3742,27 +3679,21 @@
 	spin_unlock(&log->l_icloglock);
 
 	/*
-	 * We don't want anybody waiting for log reservations
-	 * after this. That means we have to wake up everybody
-	 * queued up on reserve_headq as well as write_headq.
-	 * In addition, we make sure in xlog_{re}grant_log_space
-	 * that we don't enqueue anything once the SHUTDOWN flag
-	 * is set, and this action is protected by the GRANTLOCK.
+	 * We don't want anybody waiting for log reservations after this. That
+	 * means we have to wake up everybody queued up on reserveq as well as
+	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
+	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
+	 * action is protected by the grant locks.
 	 */
-	if ((tic = log->l_reserve_headq)) {
-		do {
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_reserve_headq);
-	}
+	spin_lock(&log->l_grant_reserve_lock);
+	list_for_each_entry(tic, &log->l_reserveq, t_queue)
+		wake_up(&tic->t_wait);
+	spin_unlock(&log->l_grant_reserve_lock);
 
-	if ((tic = log->l_write_headq)) {
-		do {
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_write_headq);
-	}
-	spin_unlock(&log->l_grant_lock);
+	spin_lock(&log->l_grant_write_lock);
+	list_for_each_entry(tic, &log->l_writeq, t_queue)
+		wake_up(&tic->t_wait);
+	spin_unlock(&log->l_grant_write_lock);
 
 	if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
 		ASSERT(!logerror);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 23d6ceb..9dc8125 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -61,7 +61,7 @@
 	INIT_LIST_HEAD(&cil->xc_committing);
 	spin_lock_init(&cil->xc_cil_lock);
 	init_rwsem(&cil->xc_ctx_lock);
-	sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait");
+	init_waitqueue_head(&cil->xc_commit_wait);
 
 	INIT_LIST_HEAD(&ctx->committing);
 	INIT_LIST_HEAD(&ctx->busy_extents);
@@ -361,15 +361,10 @@
 	int	abort)
 {
 	struct xfs_cil_ctx	*ctx = args;
-	struct xfs_log_vec	*lv;
-	int			abortflag = abort ? XFS_LI_ABORTED : 0;
 	struct xfs_busy_extent	*busyp, *n;
 
-	/* unpin all the log items */
-	for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) {
-		xfs_trans_item_committed(lv->lv_item, ctx->start_lsn,
-							abortflag);
-	}
+	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
+					ctx->start_lsn, abort);
 
 	list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
 		xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
@@ -568,7 +563,7 @@
 			 * It is still being pushed! Wait for the push to
 			 * complete, then start again from the beginning.
 			 */
-			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
 			goto restart;
 		}
 	}
@@ -592,7 +587,7 @@
 	 */
 	spin_lock(&cil->xc_cil_lock);
 	ctx->commit_lsn = commit_lsn;
-	sv_broadcast(&cil->xc_commit_wait);
+	wake_up_all(&cil->xc_commit_wait);
 	spin_unlock(&cil->xc_cil_lock);
 
 	/* release the hounds! */
@@ -757,7 +752,7 @@
 			 * It is still being pushed! Wait for the push to
 			 * complete, then start again from the beginning.
 			 */
-			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
 			goto restart;
 		}
 		if (ctx->sequence != sequence)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index edcdfe0..d5f8be8 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -21,7 +21,6 @@
 struct xfs_buf;
 struct log;
 struct xlog_ticket;
-struct xfs_buf_cancel;
 struct xfs_mount;
 
 /*
@@ -54,7 +53,6 @@
 	BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
 	 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
 
-
 static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
 {
 	return ((xfs_lsn_t)cycle << 32) | block;
@@ -133,12 +131,10 @@
  */
 #define XLOG_TIC_INITED		0x1	/* has been initialized */
 #define XLOG_TIC_PERM_RESERV	0x2	/* permanent reservation */
-#define XLOG_TIC_IN_Q		0x4
 
 #define XLOG_TIC_FLAGS \
 	{ XLOG_TIC_INITED,	"XLOG_TIC_INITED" }, \
-	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }, \
-	{ XLOG_TIC_IN_Q,	"XLOG_TIC_IN_Q" }
+	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
 
 #endif	/* __KERNEL__ */
 
@@ -244,9 +240,8 @@
 } xlog_res_t;
 
 typedef struct xlog_ticket {
-	sv_t		   t_wait;	 /* ticket wait queue            : 20 */
-	struct xlog_ticket *t_next;	 /*			         :4|8 */
-	struct xlog_ticket *t_prev;	 /*				 :4|8 */
+	wait_queue_head_t  t_wait;	 /* ticket wait queue */
+	struct list_head   t_queue;	 /* reserve/write queue */
 	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
 	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
 	int		   t_curr_res;	 /* current reservation in bytes : 4  */
@@ -353,8 +348,8 @@
  * and move everything else out to subsequent cachelines.
  */
 typedef struct xlog_in_core {
-	sv_t			ic_force_wait;
-	sv_t			ic_write_wait;
+	wait_queue_head_t	ic_force_wait;
+	wait_queue_head_t	ic_write_wait;
 	struct xlog_in_core	*ic_next;
 	struct xlog_in_core	*ic_prev;
 	struct xfs_buf		*ic_bp;
@@ -421,7 +416,7 @@
 	struct xfs_cil_ctx	*xc_ctx;
 	struct rw_semaphore	xc_ctx_lock;
 	struct list_head	xc_committing;
-	sv_t			xc_commit_wait;
+	wait_queue_head_t	xc_commit_wait;
 	xfs_lsn_t		xc_current_sequence;
 };
 
@@ -491,7 +486,7 @@
 	struct xfs_buftarg	*l_targ;        /* buftarg of log */
 	uint			l_flags;
 	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
-	struct xfs_buf_cancel	**l_buf_cancel_table;
+	struct list_head	*l_buf_cancel_table;
 	int			l_iclog_hsize;  /* size of iclog header */
 	int			l_iclog_heads;  /* # of iclog header sectors */
 	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
@@ -503,29 +498,40 @@
 	int			l_logBBsize;    /* size of log in BB chunks */
 
 	/* The following block of fields are changed while holding icloglock */
-	sv_t			l_flush_wait ____cacheline_aligned_in_smp;
+	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
 						/* waiting for iclog flush */
 	int			l_covered_state;/* state of "covering disk
 						 * log entries" */
 	xlog_in_core_t		*l_iclog;       /* head log queue	*/
 	spinlock_t		l_icloglock;    /* grab to change iclog state */
-	xfs_lsn_t		l_tail_lsn;     /* lsn of 1st LR with unflushed
-						 * buffers */
-	xfs_lsn_t		l_last_sync_lsn;/* lsn of last LR on disk */
 	int			l_curr_cycle;   /* Cycle number of log writes */
 	int			l_prev_cycle;   /* Cycle number before last
 						 * block increment */
 	int			l_curr_block;   /* current logical log block */
 	int			l_prev_block;   /* previous logical log block */
 
-	/* The following block of fields are changed while holding grant_lock */
-	spinlock_t		l_grant_lock ____cacheline_aligned_in_smp;
-	xlog_ticket_t		*l_reserve_headq;
-	xlog_ticket_t		*l_write_headq;
-	int			l_grant_reserve_cycle;
-	int			l_grant_reserve_bytes;
-	int			l_grant_write_cycle;
-	int			l_grant_write_bytes;
+	/*
+	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
+	 * read without needing to hold specific locks. To avoid operations
+	 * contending with other hot objects, place each of them on a separate
+	 * cacheline.
+	 */
+	/* lsn of last LR on disk */
+	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
+	/* lsn of 1st LR with unflushed * buffers */
+	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
+
+	/*
+	 * ticket grant locks, queues and accounting have their own cachlines
+	 * as these are quite hot and can be operated on concurrently.
+	 */
+	spinlock_t		l_grant_reserve_lock ____cacheline_aligned_in_smp;
+	struct list_head	l_reserveq;
+	atomic64_t		l_grant_reserve_head;
+
+	spinlock_t		l_grant_write_lock ____cacheline_aligned_in_smp;
+	struct list_head	l_writeq;
+	atomic64_t		l_grant_write_head;
 
 	/* The following field are used for debugging; need to hold icloglock */
 #ifdef DEBUG
@@ -534,6 +540,9 @@
 
 } xlog_t;
 
+#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
+	((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
+
 #define XLOG_FORCED_SHUTDOWN(log)	((log)->l_flags & XLOG_IO_ERROR)
 
 /* common routines */
@@ -562,6 +571,61 @@
 				xlog_in_core_t **commit_iclog, uint flags);
 
 /*
+ * When we crack an atomic LSN, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from. This should always
+ * be used to smaple and crack LSNs taht are stored and updated in atomic
+ * variables.
+ */
+static inline void
+xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
+{
+	xfs_lsn_t val = atomic64_read(lsn);
+
+	*cycle = CYCLE_LSN(val);
+	*block = BLOCK_LSN(val);
+}
+
+/*
+ * Calculate and assign a value to an atomic LSN variable from component pieces.
+ */
+static inline void
+xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
+{
+	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
+}
+
+/*
+ * When we crack the grant head, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from.
+ */
+static inline void
+xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
+{
+	*cycle = val >> 32;
+	*space = val & 0xffffffff;
+}
+
+static inline void
+xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
+{
+	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
+}
+
+static inline int64_t
+xlog_assign_grant_head_val(int cycle, int space)
+{
+	return ((int64_t)cycle << 32) | space;
+}
+
+static inline void
+xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
+{
+	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
+}
+
+/*
  * Committed Item List interfaces
  */
 int	xlog_cil_init(struct log *log);
@@ -585,6 +649,21 @@
  */
 #define XLOG_UNMOUNT_REC_TYPE	(-1U)
 
+/*
+ * Wrapper function for waiting on a wait queue serialised against wakeups
+ * by a spinlock. This matches the semantics of all the wait queues used in the
+ * log code.
+ */
+static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	add_wait_queue_exclusive(wq, &wait);
+	__set_current_state(TASK_UNINTERRUPTIBLE);
+	spin_unlock(lock);
+	schedule();
+	remove_wait_queue(wq, &wait);
+}
 #endif	/* __KERNEL__ */
 
 #endif	/* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 966d3f9..aa0ebb7 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -53,6 +53,17 @@
 #endif
 
 /*
+ * This structure is used during recovery to record the buf log items which
+ * have been canceled and should not be replayed.
+ */
+struct xfs_buf_cancel {
+	xfs_daddr_t		bc_blkno;
+	uint			bc_len;
+	int			bc_refcount;
+	struct list_head	bc_list;
+};
+
+/*
  * Sector aligned buffer routines for buffer create/read/write/access
  */
 
@@ -925,12 +936,12 @@
 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
 	if (found == 2)
 		log->l_curr_cycle++;
-	log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
-	log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
-	log->l_grant_reserve_cycle = log->l_curr_cycle;
-	log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
-	log->l_grant_write_cycle = log->l_curr_cycle;
-	log->l_grant_write_bytes = BBTOB(log->l_curr_block);
+	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
+	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
+	xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
+					BBTOB(log->l_curr_block));
+	xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle,
+					BBTOB(log->l_curr_block));
 
 	/*
 	 * Look for unmount record.  If we find it, then we know there
@@ -960,7 +971,7 @@
 	}
 	after_umount_blk = (i + hblks + (int)
 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
-	tail_lsn = log->l_tail_lsn;
+	tail_lsn = atomic64_read(&log->l_tail_lsn);
 	if (*head_blk == after_umount_blk &&
 	    be32_to_cpu(rhead->h_num_logops) == 1) {
 		umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -975,12 +986,10 @@
 			 * log records will point recovery to after the
 			 * current unmount record.
 			 */
-			log->l_tail_lsn =
-				xlog_assign_lsn(log->l_curr_cycle,
-						after_umount_blk);
-			log->l_last_sync_lsn =
-				xlog_assign_lsn(log->l_curr_cycle,
-						after_umount_blk);
+			xlog_assign_atomic_lsn(&log->l_tail_lsn,
+					log->l_curr_cycle, after_umount_blk);
+			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
+					log->l_curr_cycle, after_umount_blk);
 			*tail_blk = after_umount_blk;
 
 			/*
@@ -1605,82 +1614,45 @@
  * record in the table to tell us how many times we expect to see this
  * record during the second pass.
  */
-STATIC void
-xlog_recover_do_buffer_pass1(
-	xlog_t			*log,
-	xfs_buf_log_format_t	*buf_f)
+STATIC int
+xlog_recover_buffer_pass1(
+	struct log		*log,
+	xlog_recover_item_t	*item)
 {
-	xfs_buf_cancel_t	*bcp;
-	xfs_buf_cancel_t	*nextp;
-	xfs_buf_cancel_t	*prevp;
-	xfs_buf_cancel_t	**bucket;
-	xfs_daddr_t		blkno = 0;
-	uint			len = 0;
-	ushort			flags = 0;
-
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		len = buf_f->blf_len;
-		flags = buf_f->blf_flags;
-		break;
-	}
+	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
+	struct list_head	*bucket;
+	struct xfs_buf_cancel	*bcp;
 
 	/*
 	 * If this isn't a cancel buffer item, then just return.
 	 */
-	if (!(flags & XFS_BLF_CANCEL)) {
+	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
-		return;
+		return 0;
 	}
 
 	/*
-	 * Insert an xfs_buf_cancel record into the hash table of
-	 * them.  If there is already an identical record, bump
-	 * its reference count.
+	 * Insert an xfs_buf_cancel record into the hash table of them.
+	 * If there is already an identical record, bump its reference count.
 	 */
-	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
-					  XLOG_BC_TABLE_SIZE];
-	/*
-	 * If the hash bucket is empty then just insert a new record into
-	 * the bucket.
-	 */
-	if (*bucket == NULL) {
-		bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
-						     KM_SLEEP);
-		bcp->bc_blkno = blkno;
-		bcp->bc_len = len;
-		bcp->bc_refcount = 1;
-		bcp->bc_next = NULL;
-		*bucket = bcp;
-		return;
-	}
-
-	/*
-	 * The hash bucket is not empty, so search for duplicates of our
-	 * record.  If we find one them just bump its refcount.  If not
-	 * then add us at the end of the list.
-	 */
-	prevp = NULL;
-	nextp = *bucket;
-	while (nextp != NULL) {
-		if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
-			nextp->bc_refcount++;
+	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
+	list_for_each_entry(bcp, bucket, bc_list) {
+		if (bcp->bc_blkno == buf_f->blf_blkno &&
+		    bcp->bc_len == buf_f->blf_len) {
+			bcp->bc_refcount++;
 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
-			return;
+			return 0;
 		}
-		prevp = nextp;
-		nextp = nextp->bc_next;
 	}
-	ASSERT(prevp != NULL);
-	bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
-					     KM_SLEEP);
-	bcp->bc_blkno = blkno;
-	bcp->bc_len = len;
+
+	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
+	bcp->bc_blkno = buf_f->blf_blkno;
+	bcp->bc_len = buf_f->blf_len;
 	bcp->bc_refcount = 1;
-	bcp->bc_next = NULL;
-	prevp->bc_next = bcp;
+	list_add_tail(&bcp->bc_list, bucket);
+
 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
+	return 0;
 }
 
 /*
@@ -1698,14 +1670,13 @@
  */
 STATIC int
 xlog_check_buffer_cancelled(
-	xlog_t			*log,
+	struct log		*log,
 	xfs_daddr_t		blkno,
 	uint			len,
 	ushort			flags)
 {
-	xfs_buf_cancel_t	*bcp;
-	xfs_buf_cancel_t	*prevp;
-	xfs_buf_cancel_t	**bucket;
+	struct list_head	*bucket;
+	struct xfs_buf_cancel	*bcp;
 
 	if (log->l_buf_cancel_table == NULL) {
 		/*
@@ -1716,128 +1687,70 @@
 		return 0;
 	}
 
-	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
-					  XLOG_BC_TABLE_SIZE];
-	bcp = *bucket;
-	if (bcp == NULL) {
-		/*
-		 * There is no corresponding entry in the table built
-		 * in pass one, so this buffer has not been cancelled.
-		 */
-		ASSERT(!(flags & XFS_BLF_CANCEL));
-		return 0;
+	/*
+	 * Search for an entry in the  cancel table that matches our buffer.
+	 */
+	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
+	list_for_each_entry(bcp, bucket, bc_list) {
+		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
+			goto found;
 	}
 
 	/*
-	 * Search for an entry in the buffer cancel table that
-	 * matches our buffer.
-	 */
-	prevp = NULL;
-	while (bcp != NULL) {
-		if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
-			/*
-			 * We've go a match, so return 1 so that the
-			 * recovery of this buffer is cancelled.
-			 * If this buffer is actually a buffer cancel
-			 * log item, then decrement the refcount on the
-			 * one in the table and remove it if this is the
-			 * last reference.
-			 */
-			if (flags & XFS_BLF_CANCEL) {
-				bcp->bc_refcount--;
-				if (bcp->bc_refcount == 0) {
-					if (prevp == NULL) {
-						*bucket = bcp->bc_next;
-					} else {
-						prevp->bc_next = bcp->bc_next;
-					}
-					kmem_free(bcp);
-				}
-			}
-			return 1;
-		}
-		prevp = bcp;
-		bcp = bcp->bc_next;
-	}
-	/*
-	 * We didn't find a corresponding entry in the table, so
-	 * return 0 so that the buffer is NOT cancelled.
+	 * We didn't find a corresponding entry in the table, so return 0 so
+	 * that the buffer is NOT cancelled.
 	 */
 	ASSERT(!(flags & XFS_BLF_CANCEL));
 	return 0;
-}
 
-STATIC int
-xlog_recover_do_buffer_pass2(
-	xlog_t			*log,
-	xfs_buf_log_format_t	*buf_f)
-{
-	xfs_daddr_t		blkno = 0;
-	ushort			flags = 0;
-	uint			len = 0;
-
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		flags = buf_f->blf_flags;
-		len = buf_f->blf_len;
-		break;
+found:
+	/*
+	 * We've go a match, so return 1 so that the recovery of this buffer
+	 * is cancelled.  If this buffer is actually a buffer cancel log
+	 * item, then decrement the refcount on the one in the table and
+	 * remove it if this is the last reference.
+	 */
+	if (flags & XFS_BLF_CANCEL) {
+		if (--bcp->bc_refcount == 0) {
+			list_del(&bcp->bc_list);
+			kmem_free(bcp);
+		}
 	}
-
-	return xlog_check_buffer_cancelled(log, blkno, len, flags);
+	return 1;
 }
 
 /*
- * Perform recovery for a buffer full of inodes.  In these buffers,
- * the only data which should be recovered is that which corresponds
- * to the di_next_unlinked pointers in the on disk inode structures.
- * The rest of the data for the inodes is always logged through the
- * inodes themselves rather than the inode buffer and is recovered
- * in xlog_recover_do_inode_trans().
+ * Perform recovery for a buffer full of inodes.  In these buffers, the only
+ * data which should be recovered is that which corresponds to the
+ * di_next_unlinked pointers in the on disk inode structures.  The rest of the
+ * data for the inodes is always logged through the inodes themselves rather
+ * than the inode buffer and is recovered in xlog_recover_inode_pass2().
  *
- * The only time when buffers full of inodes are fully recovered is
- * when the buffer is full of newly allocated inodes.  In this case
- * the buffer will not be marked as an inode buffer and so will be
- * sent to xlog_recover_do_reg_buffer() below during recovery.
+ * The only time when buffers full of inodes are fully recovered is when the
+ * buffer is full of newly allocated inodes.  In this case the buffer will
+ * not be marked as an inode buffer and so will be sent to
+ * xlog_recover_do_reg_buffer() below during recovery.
  */
 STATIC int
 xlog_recover_do_inode_buffer(
-	xfs_mount_t		*mp,
+	struct xfs_mount	*mp,
 	xlog_recover_item_t	*item,
-	xfs_buf_t		*bp,
+	struct xfs_buf		*bp,
 	xfs_buf_log_format_t	*buf_f)
 {
 	int			i;
-	int			item_index;
-	int			bit;
-	int			nbits;
-	int			reg_buf_offset;
-	int			reg_buf_bytes;
+	int			item_index = 0;
+	int			bit = 0;
+	int			nbits = 0;
+	int			reg_buf_offset = 0;
+	int			reg_buf_bytes = 0;
 	int			next_unlinked_offset;
 	int			inodes_per_buf;
 	xfs_agino_t		*logged_nextp;
 	xfs_agino_t		*buffer_nextp;
-	unsigned int		*data_map = NULL;
-	unsigned int		map_size = 0;
 
 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
 
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		data_map = buf_f->blf_data_map;
-		map_size = buf_f->blf_map_size;
-		break;
-	}
-	/*
-	 * Set the variables corresponding to the current region to
-	 * 0 so that we'll initialize them on the first pass through
-	 * the loop.
-	 */
-	reg_buf_offset = 0;
-	reg_buf_bytes = 0;
-	bit = 0;
-	nbits = 0;
-	item_index = 0;
 	inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
 	for (i = 0; i < inodes_per_buf; i++) {
 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
@@ -1852,18 +1765,18 @@
 			 * the current di_next_unlinked field.
 			 */
 			bit += nbits;
-			bit = xfs_next_bit(data_map, map_size, bit);
+			bit = xfs_next_bit(buf_f->blf_data_map,
+					   buf_f->blf_map_size, bit);
 
 			/*
 			 * If there are no more logged regions in the
 			 * buffer, then we're done.
 			 */
-			if (bit == -1) {
+			if (bit == -1)
 				return 0;
-			}
 
-			nbits = xfs_contig_bits(data_map, map_size,
-							 bit);
+			nbits = xfs_contig_bits(buf_f->blf_data_map,
+						buf_f->blf_map_size, bit);
 			ASSERT(nbits > 0);
 			reg_buf_offset = bit << XFS_BLF_SHIFT;
 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
@@ -1875,9 +1788,8 @@
 		 * di_next_unlinked field, then move on to the next
 		 * di_next_unlinked field.
 		 */
-		if (next_unlinked_offset < reg_buf_offset) {
+		if (next_unlinked_offset < reg_buf_offset)
 			continue;
-		}
 
 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
@@ -1913,36 +1825,29 @@
  * given buffer.  The bitmap in the buf log format structure indicates
  * where to place the logged data.
  */
-/*ARGSUSED*/
 STATIC void
 xlog_recover_do_reg_buffer(
 	struct xfs_mount	*mp,
 	xlog_recover_item_t	*item,
-	xfs_buf_t		*bp,
+	struct xfs_buf		*bp,
 	xfs_buf_log_format_t	*buf_f)
 {
 	int			i;
 	int			bit;
 	int			nbits;
-	unsigned int		*data_map = NULL;
-	unsigned int		map_size = 0;
 	int                     error;
 
 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
 
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		data_map = buf_f->blf_data_map;
-		map_size = buf_f->blf_map_size;
-		break;
-	}
 	bit = 0;
 	i = 1;  /* 0 is the buf format structure */
 	while (1) {
-		bit = xfs_next_bit(data_map, map_size, bit);
+		bit = xfs_next_bit(buf_f->blf_data_map,
+				   buf_f->blf_map_size, bit);
 		if (bit == -1)
 			break;
-		nbits = xfs_contig_bits(data_map, map_size, bit);
+		nbits = xfs_contig_bits(buf_f->blf_data_map,
+					buf_f->blf_map_size, bit);
 		ASSERT(nbits > 0);
 		ASSERT(item->ri_buf[i].i_addr != NULL);
 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
@@ -2176,77 +2081,46 @@
  * for more details on the implementation of the table of cancel records.
  */
 STATIC int
-xlog_recover_do_buffer_trans(
+xlog_recover_buffer_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	int			error;
-	int			cancel;
-	xfs_daddr_t		blkno;
-	int			len;
-	ushort			flags;
 	uint			buf_flags;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		/*
-		 * In this pass we're only looking for buf items
-		 * with the XFS_BLF_CANCEL bit set.
-		 */
-		xlog_recover_do_buffer_pass1(log, buf_f);
+	/*
+	 * In this pass we only want to recover all the buffers which have
+	 * not been cancelled and are not cancellation buffers themselves.
+	 */
+	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
+			buf_f->blf_len, buf_f->blf_flags)) {
+		trace_xfs_log_recover_buf_cancel(log, buf_f);
 		return 0;
-	} else {
-		/*
-		 * In this pass we want to recover all the buffers
-		 * which have not been cancelled and are not
-		 * cancellation buffers themselves.  The routine
-		 * we call here will tell us whether or not to
-		 * continue with the replay of this buffer.
-		 */
-		cancel = xlog_recover_do_buffer_pass2(log, buf_f);
-		if (cancel) {
-			trace_xfs_log_recover_buf_cancel(log, buf_f);
-			return 0;
-		}
-	}
-	trace_xfs_log_recover_buf_recover(log, buf_f);
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		len = buf_f->blf_len;
-		flags = buf_f->blf_flags;
-		break;
-	default:
-		xfs_fs_cmn_err(CE_ALERT, log->l_mp,
-			"xfs_log_recover: unknown buffer type 0x%x, logdev %s",
-			buf_f->blf_type, log->l_mp->m_logname ?
-			log->l_mp->m_logname : "internal");
-		XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
-				 XFS_ERRLEVEL_LOW, log->l_mp);
-		return XFS_ERROR(EFSCORRUPTED);
 	}
 
-	mp = log->l_mp;
+	trace_xfs_log_recover_buf_recover(log, buf_f);
+
 	buf_flags = XBF_LOCK;
-	if (!(flags & XFS_BLF_INODE_BUF))
+	if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
 		buf_flags |= XBF_MAPPED;
 
-	bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
+	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+			  buf_flags);
 	if (XFS_BUF_ISERROR(bp)) {
-		xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
-				  bp, blkno);
+		xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
+				  bp, buf_f->blf_blkno);
 		error = XFS_BUF_GETERROR(bp);
 		xfs_buf_relse(bp);
 		return error;
 	}
 
 	error = 0;
-	if (flags & XFS_BLF_INODE_BUF) {
+	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
-	} else if (flags &
+	} else if (buf_f->blf_flags &
 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
 	} else {
@@ -2286,16 +2160,14 @@
 }
 
 STATIC int
-xlog_recover_do_inode_trans(
+xlog_recover_inode_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_inode_log_format_t	*in_f;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	xfs_dinode_t		*dip;
-	xfs_ino_t		ino;
 	int			len;
 	xfs_caddr_t		src;
 	xfs_caddr_t		dest;
@@ -2305,10 +2177,6 @@
 	xfs_icdinode_t		*dicp;
 	int			need_free = 0;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-
 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
 		in_f = item->ri_buf[0].i_addr;
 	} else {
@@ -2318,8 +2186,6 @@
 		if (error)
 			goto error;
 	}
-	ino = in_f->ilf_ino;
-	mp = log->l_mp;
 
 	/*
 	 * Inode buffers can be freed, look out for it,
@@ -2354,8 +2220,8 @@
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
-			dip, bp, ino);
-		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
+			dip, bp, in_f->ilf_ino);
+		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
 				 XFS_ERRLEVEL_LOW, mp);
 		error = EFSCORRUPTED;
 		goto error;
@@ -2365,8 +2231,8 @@
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
-			item, ino);
-		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
+			item, in_f->ilf_ino);
+		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
 				 XFS_ERRLEVEL_LOW, mp);
 		error = EFSCORRUPTED;
 		goto error;
@@ -2394,12 +2260,12 @@
 	if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
-			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
+			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
 					 XFS_ERRLEVEL_LOW, mp, dicp);
 			xfs_buf_relse(bp);
 			xfs_fs_cmn_err(CE_ALERT, mp,
 				"xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
-				item, dip, bp, ino);
+				item, dip, bp, in_f->ilf_ino);
 			error = EFSCORRUPTED;
 			goto error;
 		}
@@ -2407,40 +2273,40 @@
 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
-			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
+			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
 					     XFS_ERRLEVEL_LOW, mp, dicp);
 			xfs_buf_relse(bp);
 			xfs_fs_cmn_err(CE_ALERT, mp,
 				"xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
-				item, dip, bp, ino);
+				item, dip, bp, in_f->ilf_ino);
 			error = EFSCORRUPTED;
 			goto error;
 		}
 	}
 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
-			item, dip, bp, ino,
+			item, dip, bp, in_f->ilf_ino,
 			dicp->di_nextents + dicp->di_anextents,
 			dicp->di_nblocks);
 		error = EFSCORRUPTED;
 		goto error;
 	}
 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
-			item, dip, bp, ino, dicp->di_forkoff);
+			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
 		error = EFSCORRUPTED;
 		goto error;
 	}
 	if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
@@ -2532,7 +2398,7 @@
 			break;
 
 		default:
-			xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
+			xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag");
 			ASSERT(0);
 			xfs_buf_relse(bp);
 			error = EIO;
@@ -2556,18 +2422,11 @@
  * of that type.
  */
 STATIC int
-xlog_recover_do_quotaoff_trans(
+xlog_recover_quotaoff_pass1(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
-	xfs_qoff_logformat_t	*qoff_f;
-
-	if (pass == XLOG_RECOVER_PASS2) {
-		return (0);
-	}
-
-	qoff_f = item->ri_buf[0].i_addr;
+	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
 	ASSERT(qoff_f);
 
 	/*
@@ -2588,22 +2447,17 @@
  * Recover a dquot record
  */
 STATIC int
-xlog_recover_do_dquot_trans(
+xlog_recover_dquot_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	struct xfs_disk_dquot	*ddq, *recddq;
 	int			error;
 	xfs_dq_logformat_t	*dq_f;
 	uint			type;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-	mp = log->l_mp;
 
 	/*
 	 * Filesystems are required to send in quota flags at mount time.
@@ -2647,7 +2501,7 @@
 	if ((error = xfs_qm_dqcheck(recddq,
 			   dq_f->qlf_id,
 			   0, XFS_QMOPT_DOWARN,
-			   "xlog_recover_do_dquot_trans (log copy)"))) {
+			   "xlog_recover_dquot_pass2 (log copy)"))) {
 		return XFS_ERROR(EIO);
 	}
 	ASSERT(dq_f->qlf_len == 1);
@@ -2670,7 +2524,7 @@
 	 * minimal initialization then.
 	 */
 	if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
-			   "xlog_recover_do_dquot_trans")) {
+			   "xlog_recover_dquot_pass2")) {
 		xfs_buf_relse(bp);
 		return XFS_ERROR(EIO);
 	}
@@ -2693,38 +2547,31 @@
  * LSN.
  */
 STATIC int
-xlog_recover_do_efi_trans(
+xlog_recover_efi_pass2(
 	xlog_t			*log,
 	xlog_recover_item_t	*item,
-	xfs_lsn_t		lsn,
-	int			pass)
+	xfs_lsn_t		lsn)
 {
 	int			error;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_efi_log_item_t	*efip;
 	xfs_efi_log_format_t	*efi_formatp;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-
 	efi_formatp = item->ri_buf[0].i_addr;
 
-	mp = log->l_mp;
 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
 					 &(efip->efi_format)))) {
 		xfs_efi_item_free(efip);
 		return error;
 	}
-	efip->efi_next_extent = efi_formatp->efi_nextents;
-	efip->efi_flags |= XFS_EFI_COMMITTED;
+	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
 
 	spin_lock(&log->l_ailp->xa_lock);
 	/*
 	 * xfs_trans_ail_update() drops the AIL lock.
 	 */
-	xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
+	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
 	return 0;
 }
 
@@ -2737,11 +2584,10 @@
  * efd format structure.  If we find it, we remove the efi from the
  * AIL and free it.
  */
-STATIC void
-xlog_recover_do_efd_trans(
+STATIC int
+xlog_recover_efd_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_efd_log_format_t	*efd_formatp;
 	xfs_efi_log_item_t	*efip = NULL;
@@ -2750,10 +2596,6 @@
 	struct xfs_ail_cursor	cur;
 	struct xfs_ail		*ailp = log->l_ailp;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return;
-	}
-
 	efd_formatp = item->ri_buf[0].i_addr;
 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
@@ -2785,62 +2627,6 @@
 	}
 	xfs_trans_ail_cursor_done(ailp, &cur);
 	spin_unlock(&ailp->xa_lock);
-}
-
-/*
- * Perform the transaction
- *
- * If the transaction modifies a buffer or inode, do it now.  Otherwise,
- * EFIs and EFDs get queued up by adding entries into the AIL for them.
- */
-STATIC int
-xlog_recover_do_trans(
-	xlog_t			*log,
-	xlog_recover_t		*trans,
-	int			pass)
-{
-	int			error = 0;
-	xlog_recover_item_t	*item;
-
-	error = xlog_recover_reorder_trans(log, trans, pass);
-	if (error)
-		return error;
-
-	list_for_each_entry(item, &trans->r_itemq, ri_list) {
-		trace_xfs_log_recover_item_recover(log, trans, item, pass);
-		switch (ITEM_TYPE(item)) {
-		case XFS_LI_BUF:
-			error = xlog_recover_do_buffer_trans(log, item, pass);
-			break;
-		case XFS_LI_INODE:
-			error = xlog_recover_do_inode_trans(log, item, pass);
-			break;
-		case XFS_LI_EFI:
-			error = xlog_recover_do_efi_trans(log, item,
-							  trans->r_lsn, pass);
-			break;
-		case XFS_LI_EFD:
-			xlog_recover_do_efd_trans(log, item, pass);
-			error = 0;
-			break;
-		case XFS_LI_DQUOT:
-			error = xlog_recover_do_dquot_trans(log, item, pass);
-			break;
-		case XFS_LI_QUOTAOFF:
-			error = xlog_recover_do_quotaoff_trans(log, item,
-							       pass);
-			break;
-		default:
-			xlog_warn(
-	"XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
-			ASSERT(0);
-			error = XFS_ERROR(EIO);
-			break;
-		}
-
-		if (error)
-			return error;
-	}
 
 	return 0;
 }
@@ -2852,7 +2638,7 @@
  */
 STATIC void
 xlog_recover_free_trans(
-	xlog_recover_t		*trans)
+	struct xlog_recover	*trans)
 {
 	xlog_recover_item_t	*item, *n;
 	int			i;
@@ -2871,17 +2657,95 @@
 }
 
 STATIC int
+xlog_recover_commit_pass1(
+	struct log		*log,
+	struct xlog_recover	*trans,
+	xlog_recover_item_t	*item)
+{
+	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
+
+	switch (ITEM_TYPE(item)) {
+	case XFS_LI_BUF:
+		return xlog_recover_buffer_pass1(log, item);
+	case XFS_LI_QUOTAOFF:
+		return xlog_recover_quotaoff_pass1(log, item);
+	case XFS_LI_INODE:
+	case XFS_LI_EFI:
+	case XFS_LI_EFD:
+	case XFS_LI_DQUOT:
+		/* nothing to do in pass 1 */
+		return 0;
+	default:
+		xlog_warn(
+	"XFS: invalid item type (%d) xlog_recover_commit_pass1",
+			ITEM_TYPE(item));
+		ASSERT(0);
+		return XFS_ERROR(EIO);
+	}
+}
+
+STATIC int
+xlog_recover_commit_pass2(
+	struct log		*log,
+	struct xlog_recover	*trans,
+	xlog_recover_item_t	*item)
+{
+	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
+
+	switch (ITEM_TYPE(item)) {
+	case XFS_LI_BUF:
+		return xlog_recover_buffer_pass2(log, item);
+	case XFS_LI_INODE:
+		return xlog_recover_inode_pass2(log, item);
+	case XFS_LI_EFI:
+		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
+	case XFS_LI_EFD:
+		return xlog_recover_efd_pass2(log, item);
+	case XFS_LI_DQUOT:
+		return xlog_recover_dquot_pass2(log, item);
+	case XFS_LI_QUOTAOFF:
+		/* nothing to do in pass2 */
+		return 0;
+	default:
+		xlog_warn(
+	"XFS: invalid item type (%d) xlog_recover_commit_pass2",
+			ITEM_TYPE(item));
+		ASSERT(0);
+		return XFS_ERROR(EIO);
+	}
+}
+
+/*
+ * Perform the transaction.
+ *
+ * If the transaction modifies a buffer or inode, do it now.  Otherwise,
+ * EFIs and EFDs get queued up by adding entries into the AIL for them.
+ */
+STATIC int
 xlog_recover_commit_trans(
-	xlog_t			*log,
-	xlog_recover_t		*trans,
+	struct log		*log,
+	struct xlog_recover	*trans,
 	int			pass)
 {
-	int			error;
+	int			error = 0;
+	xlog_recover_item_t	*item;
 
 	hlist_del(&trans->r_list);
-	if ((error = xlog_recover_do_trans(log, trans, pass)))
+
+	error = xlog_recover_reorder_trans(log, trans, pass);
+	if (error)
 		return error;
-	xlog_recover_free_trans(trans);			/* no error */
+
+	list_for_each_entry(item, &trans->r_itemq, ri_list) {
+		if (pass == XLOG_RECOVER_PASS1)
+			error = xlog_recover_commit_pass1(log, trans, item);
+		else
+			error = xlog_recover_commit_pass2(log, trans, item);
+		if (error)
+			return error;
+	}
+
+	xlog_recover_free_trans(trans);
 	return 0;
 }
 
@@ -3011,7 +2875,7 @@
 	xfs_extent_t		*extp;
 	xfs_fsblock_t		startblock_fsb;
 
-	ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
+	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
 
 	/*
 	 * First check the validity of the extents described by the
@@ -3050,7 +2914,7 @@
 					 extp->ext_len);
 	}
 
-	efip->efi_flags |= XFS_EFI_RECOVERED;
+	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
 	error = xfs_trans_commit(tp, 0);
 	return error;
 
@@ -3107,7 +2971,7 @@
 		 * Skip EFIs that we've already processed.
 		 */
 		efip = (xfs_efi_log_item_t *)lip;
-		if (efip->efi_flags & XFS_EFI_RECOVERED) {
+		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
 			continue;
 		}
@@ -3724,7 +3588,7 @@
 	xfs_daddr_t	head_blk,
 	xfs_daddr_t	tail_blk)
 {
-	int		error;
+	int		error, i;
 
 	ASSERT(head_blk != tail_blk);
 
@@ -3732,10 +3596,12 @@
 	 * First do a pass to find all of the cancelled buf log items.
 	 * Store them in the buf_cancel_table for use in the second pass.
 	 */
-	log->l_buf_cancel_table =
-		(xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
-						 sizeof(xfs_buf_cancel_t*),
+	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
+						 sizeof(struct list_head),
 						 KM_SLEEP);
+	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+
 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
 				      XLOG_RECOVER_PASS1);
 	if (error != 0) {
@@ -3754,7 +3620,7 @@
 		int	i;
 
 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
-			ASSERT(log->l_buf_cancel_table[i] == NULL);
+			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
 	}
 #endif	/* DEBUG */
 
@@ -3934,7 +3800,7 @@
 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
 	} else {
 		cmn_err(CE_DEBUG,
-			"!Ending clean XFS mount for filesystem: %s\n",
+			"Ending clean XFS mount for filesystem: %s\n",
 			log->l_mp->m_fsname);
 	}
 	return 0;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 19e9dfa1..d447aef 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -472,7 +472,7 @@
 			goto out_unwind;
 		pag->pag_agno = index;
 		pag->pag_mount = mp;
-		rwlock_init(&pag->pag_ici_lock);
+		spin_lock_init(&pag->pag_ici_lock);
 		mutex_init(&pag->pag_ici_reclaim_lock);
 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
 		spin_lock_init(&pag->pag_buf_lock);
@@ -975,6 +975,24 @@
 }
 
 /*
+ * precalculate the low space thresholds for dynamic speculative preallocation.
+ */
+void
+xfs_set_low_space_thresholds(
+	struct xfs_mount	*mp)
+{
+	int i;
+
+	for (i = 0; i < XFS_LOWSP_MAX; i++) {
+		__uint64_t space = mp->m_sb.sb_dblocks;
+
+		do_div(space, 100);
+		mp->m_low_space[i] = space * (i + 1);
+	}
+}
+
+
+/*
  * Set whether we're using inode alignment.
  */
 STATIC void
@@ -1196,6 +1214,9 @@
 	 */
 	xfs_set_rw_sizes(mp);
 
+	/* set the low space thresholds for dynamic preallocation */
+	xfs_set_low_space_thresholds(mp);
+
 	/*
 	 * Set the inode cluster size.
 	 * This may still be overridden by the file system
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5861b49..a62e897 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -103,6 +103,16 @@
 	xfs_mod_incore_sb(mp, field, delta, rsvd)
 #endif
 
+/* dynamic preallocation free space thresholds, 5% down to 1% */
+enum {
+	XFS_LOWSP_1_PCNT = 0,
+	XFS_LOWSP_2_PCNT,
+	XFS_LOWSP_3_PCNT,
+	XFS_LOWSP_4_PCNT,
+	XFS_LOWSP_5_PCNT,
+	XFS_LOWSP_MAX,
+};
+
 typedef struct xfs_mount {
 	struct super_block	*m_super;
 	xfs_tid_t		m_tid;		/* next unused tid for fs */
@@ -202,6 +212,8 @@
 	__int64_t		m_update_flags;	/* sb flags we need to update
 						   on the next remount,rw */
 	struct shrinker		m_inode_shrink;	/* inode reclaim shrinker */
+	int64_t			m_low_space[XFS_LOWSP_MAX];
+						/* low free space thresholds */
 } xfs_mount_t;
 
 /*
@@ -379,6 +391,8 @@
 
 extern int	xfs_dev_is_read_only(struct xfs_mount *, char *);
 
+extern void	xfs_set_low_space_thresholds(struct xfs_mount *);
+
 #endif	/* __KERNEL__ */
 
 extern void	xfs_mod_sb(struct xfs_trans *, __int64_t);
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 45ce15d..edfa178 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -408,7 +408,7 @@
 	spin_lock(&mru->lock);
 	if (mru->queued) {
 		spin_unlock(&mru->lock);
-		cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work);
+		cancel_delayed_work_sync(&mru->work);
 		spin_lock(&mru->lock);
 	}
 
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f6d956b..33dbc4e 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1137,7 +1137,7 @@
 	if (blkdelta)
 		xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
 out:
-	ASSERT(error = 0);
+	ASSERT(error == 0);
 	return;
 }
 
@@ -1350,7 +1350,7 @@
  * they could be immediately flushed and we'd have to race with the flusher
  * trying to pull the item from the AIL as we add it.
  */
-void
+static void
 xfs_trans_item_committed(
 	struct xfs_log_item	*lip,
 	xfs_lsn_t		commit_lsn,
@@ -1425,6 +1425,83 @@
 	xfs_trans_free(tp);
 }
 
+static inline void
+xfs_log_item_batch_insert(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items,
+	xfs_lsn_t		commit_lsn)
+{
+	int	i;
+
+	spin_lock(&ailp->xa_lock);
+	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
+	xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
+
+	for (i = 0; i < nr_items; i++)
+		IOP_UNPIN(log_items[i], 0);
+}
+
+/*
+ * Bulk operation version of xfs_trans_committed that takes a log vector of
+ * items to insert into the AIL. This uses bulk AIL insertion techniques to
+ * minimise lock traffic.
+ */
+void
+xfs_trans_committed_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_vec	*log_vector,
+	xfs_lsn_t		commit_lsn,
+	int			aborted)
+{
+#define LOG_ITEM_BATCH_SIZE	32
+	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
+	struct xfs_log_vec	*lv;
+	int			i = 0;
+
+	/* unpin all the log items */
+	for (lv = log_vector; lv; lv = lv->lv_next ) {
+		struct xfs_log_item	*lip = lv->lv_item;
+		xfs_lsn_t		item_lsn;
+
+		if (aborted)
+			lip->li_flags |= XFS_LI_ABORTED;
+		item_lsn = IOP_COMMITTED(lip, commit_lsn);
+
+		/* item_lsn of -1 means the item was freed */
+		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+			continue;
+
+		if (item_lsn != commit_lsn) {
+
+			/*
+			 * Not a bulk update option due to unusual item_lsn.
+			 * Push into AIL immediately, rechecking the lsn once
+			 * we have the ail lock. Then unpin the item.
+			 */
+			spin_lock(&ailp->xa_lock);
+			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
+				xfs_trans_ail_update(ailp, lip, item_lsn);
+			else
+				spin_unlock(&ailp->xa_lock);
+			IOP_UNPIN(lip, 0);
+			continue;
+		}
+
+		/* Item is a candidate for bulk AIL insert.  */
+		log_items[i++] = lv->lv_item;
+		if (i >= LOG_ITEM_BATCH_SIZE) {
+			xfs_log_item_batch_insert(ailp, log_items,
+					LOG_ITEM_BATCH_SIZE, commit_lsn);
+			i = 0;
+		}
+	}
+
+	/* make sure we insert the remainder! */
+	if (i)
+		xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
+}
+
 /*
  * Called from the trans_commit code when we notice that
  * the filesystem is in the middle of a forced shutdown.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 246286b..c2042b7 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -294,8 +294,8 @@
 #define	XFS_ALLOC_BTREE_REF	2
 #define	XFS_BMAP_BTREE_REF	2
 #define	XFS_DIR_BTREE_REF	2
+#define	XFS_INO_REF		2
 #define	XFS_ATTR_BTREE_REF	1
-#define	XFS_INO_REF		1
 #define	XFS_DQUOT_REF		1
 
 #ifdef __KERNEL__
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index dc90695..c5bbbc4 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,8 @@
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 
-STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
+STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
+STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
 STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
 STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
 
@@ -449,129 +449,152 @@
 		xfs_log_move_tail(ailp->xa_mount, 1);
 }	/* xfs_trans_unlocked_item */
 
-
 /*
- * Update the position of the item in the AIL with the new
- * lsn.  If it is not yet in the AIL, add it.  Otherwise, move
- * it to its new position by removing it and re-adding it.
+ * xfs_trans_ail_update - bulk AIL insertion operation.
  *
- * Wakeup anyone with an lsn less than the item's lsn.  If the item
- * we move in the AIL is the minimum one, update the tail lsn in the
- * log manager.
+ * @xfs_trans_ail_update takes an array of log items that all need to be
+ * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
+ * be added.  Otherwise, it will be repositioned  by removing it and re-adding
+ * it to the AIL. If we move the first item in the AIL, update the log tail to
+ * match the new minimum LSN in the AIL.
  *
- * This function must be called with the AIL lock held.  The lock
- * is dropped before returning.
+ * This function takes the AIL lock once to execute the update operations on
+ * all the items in the array, and as such should not be called with the AIL
+ * lock held. As a result, once we have the AIL lock, we need to check each log
+ * item LSN to confirm it needs to be moved forward in the AIL.
+ *
+ * To optimise the insert operation, we delete all the items from the AIL in
+ * the first pass, moving them into a temporary list, then splice the temporary
+ * list into the correct position in the AIL. This avoids needing to do an
+ * insert operation on every item.
+ *
+ * This function must be called with the AIL lock held.  The lock is dropped
+ * before returning.
  */
 void
-xfs_trans_ail_update(
-	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip,
-	xfs_lsn_t	lsn) __releases(ailp->xa_lock)
+xfs_trans_ail_update_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items,
+	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
 {
-	xfs_log_item_t		*dlip = NULL;
-	xfs_log_item_t		*mlip;	/* ptr to minimum lip */
+	xfs_log_item_t		*mlip;
 	xfs_lsn_t		tail_lsn;
+	int			mlip_changed = 0;
+	int			i;
+	LIST_HEAD(tmp);
 
 	mlip = xfs_ail_min(ailp);
 
-	if (lip->li_flags & XFS_LI_IN_AIL) {
-		dlip = xfs_ail_delete(ailp, lip);
-		ASSERT(dlip == lip);
-		xfs_trans_ail_cursor_clear(ailp, dlip);
-	} else {
-		lip->li_flags |= XFS_LI_IN_AIL;
-	}
+	for (i = 0; i < nr_items; i++) {
+		struct xfs_log_item *lip = log_items[i];
+		if (lip->li_flags & XFS_LI_IN_AIL) {
+			/* check if we really need to move the item */
+			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
+				continue;
 
-	lip->li_lsn = lsn;
-	xfs_ail_insert(ailp, lip);
-
-	if (mlip == dlip) {
-		mlip = xfs_ail_min(ailp);
-		/*
-		 * It is not safe to access mlip after the AIL lock is
-		 * dropped, so we must get a copy of li_lsn before we do
-		 * so.  This is especially important on 32-bit platforms
-		 * where accessing and updating 64-bit values like li_lsn
-		 * is not atomic.
-		 */
-		tail_lsn = mlip->li_lsn;
-		spin_unlock(&ailp->xa_lock);
-		xfs_log_move_tail(ailp->xa_mount, tail_lsn);
-	} else {
-		spin_unlock(&ailp->xa_lock);
-	}
-
-
-}	/* xfs_trans_update_ail */
-
-/*
- * Delete the given item from the AIL.  It must already be in
- * the AIL.
- *
- * Wakeup anyone with an lsn less than item's lsn.    If the item
- * we delete in the AIL is the minimum one, update the tail lsn in the
- * log manager.
- *
- * Clear the IN_AIL flag from the item, reset its lsn to 0, and
- * bump the AIL's generation count to indicate that the tree
- * has changed.
- *
- * This function must be called with the AIL lock held.  The lock
- * is dropped before returning.
- */
-void
-xfs_trans_ail_delete(
-	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip) __releases(ailp->xa_lock)
-{
-	xfs_log_item_t		*dlip;
-	xfs_log_item_t		*mlip;
-	xfs_lsn_t		tail_lsn;
-
-	if (lip->li_flags & XFS_LI_IN_AIL) {
-		mlip = xfs_ail_min(ailp);
-		dlip = xfs_ail_delete(ailp, lip);
-		ASSERT(dlip == lip);
-		xfs_trans_ail_cursor_clear(ailp, dlip);
-
-
-		lip->li_flags &= ~XFS_LI_IN_AIL;
-		lip->li_lsn = 0;
-
-		if (mlip == dlip) {
-			mlip = xfs_ail_min(ailp);
-			/*
-			 * It is not safe to access mlip after the AIL lock
-			 * is dropped, so we must get a copy of li_lsn
-			 * before we do so.  This is especially important
-			 * on 32-bit platforms where accessing and updating
-			 * 64-bit values like li_lsn is not atomic.
-			 */
-			tail_lsn = mlip ? mlip->li_lsn : 0;
-			spin_unlock(&ailp->xa_lock);
-			xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+			xfs_ail_delete(ailp, lip);
+			if (mlip == lip)
+				mlip_changed = 1;
 		} else {
-			spin_unlock(&ailp->xa_lock);
+			lip->li_flags |= XFS_LI_IN_AIL;
 		}
+		lip->li_lsn = lsn;
+		list_add(&lip->li_ail, &tmp);
 	}
-	else {
-		/*
-		 * If the file system is not being shutdown, we are in
-		 * serious trouble if we get to this stage.
-		 */
-		struct xfs_mount	*mp = ailp->xa_mount;
 
+	xfs_ail_splice(ailp, &tmp, lsn);
+
+	if (!mlip_changed) {
 		spin_unlock(&ailp->xa_lock);
-		if (!XFS_FORCED_SHUTDOWN(mp)) {
-			xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
-		"%s: attempting to delete a log item that is not in the AIL",
-					__func__);
-			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-		}
+		return;
 	}
+
+	/*
+	 * It is not safe to access mlip after the AIL lock is dropped, so we
+	 * must get a copy of li_lsn before we do so.  This is especially
+	 * important on 32-bit platforms where accessing and updating 64-bit
+	 * values like li_lsn is not atomic.
+	 */
+	mlip = xfs_ail_min(ailp);
+	tail_lsn = mlip->li_lsn;
+	spin_unlock(&ailp->xa_lock);
+	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
 }
 
+/*
+ * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
+ *
+ * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
+ * removed from the AIL. The caller is already holding the AIL lock, and done
+ * all the checks necessary to ensure the items passed in via @log_items are
+ * ready for deletion. This includes checking that the items are in the AIL.
+ *
+ * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
+ * flag from the item and reset the item's lsn to 0. If we remove the first
+ * item in the AIL, update the log tail to match the new minimum LSN in the
+ * AIL.
+ *
+ * This function will not drop the AIL lock until all items are removed from
+ * the AIL to minimise the amount of lock traffic on the AIL. This does not
+ * greatly increase the AIL hold time, but does significantly reduce the amount
+ * of traffic on the lock, especially during IO completion.
+ *
+ * This function must be called with the AIL lock held.  The lock is dropped
+ * before returning.
+ */
+void
+xfs_trans_ail_delete_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items) __releases(ailp->xa_lock)
+{
+	xfs_log_item_t		*mlip;
+	xfs_lsn_t		tail_lsn;
+	int			mlip_changed = 0;
+	int			i;
 
+	mlip = xfs_ail_min(ailp);
+
+	for (i = 0; i < nr_items; i++) {
+		struct xfs_log_item *lip = log_items[i];
+		if (!(lip->li_flags & XFS_LI_IN_AIL)) {
+			struct xfs_mount	*mp = ailp->xa_mount;
+
+			spin_unlock(&ailp->xa_lock);
+			if (!XFS_FORCED_SHUTDOWN(mp)) {
+				xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
+		"%s: attempting to delete a log item that is not in the AIL",
+						__func__);
+				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+			}
+			return;
+		}
+
+		xfs_ail_delete(ailp, lip);
+		lip->li_flags &= ~XFS_LI_IN_AIL;
+		lip->li_lsn = 0;
+		if (mlip == lip)
+			mlip_changed = 1;
+	}
+
+	if (!mlip_changed) {
+		spin_unlock(&ailp->xa_lock);
+		return;
+	}
+
+	/*
+	 * It is not safe to access mlip after the AIL lock is dropped, so we
+	 * must get a copy of li_lsn before we do so.  This is especially
+	 * important on 32-bit platforms where accessing and updating 64-bit
+	 * values like li_lsn is not atomic. It is possible we've emptied the
+	 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
+	 */
+	mlip = xfs_ail_min(ailp);
+	tail_lsn = mlip ? mlip->li_lsn : 0;
+	spin_unlock(&ailp->xa_lock);
+	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+}
 
 /*
  * The active item list (AIL) is a doubly linked list of log
@@ -623,16 +646,13 @@
 }
 
 /*
- * Insert the given log item into the AIL.
- * We almost always insert at the end of the list, so on inserts
- * we search from the end of the list to find where the
- * new item belongs.
+ * splice the log item list into the AIL at the given LSN.
  */
 STATIC void
-xfs_ail_insert(
+xfs_ail_splice(
 	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip)
-/* ARGSUSED */
+	struct list_head *list,
+	xfs_lsn_t	lsn)
 {
 	xfs_log_item_t	*next_lip;
 
@@ -640,39 +660,33 @@
 	 * If the list is empty, just insert the item.
 	 */
 	if (list_empty(&ailp->xa_ail)) {
-		list_add(&lip->li_ail, &ailp->xa_ail);
+		list_splice(list, &ailp->xa_ail);
 		return;
 	}
 
 	list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
-		if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)
+		if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
 			break;
 	}
 
 	ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
-	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
+	       (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
 
-	list_add(&lip->li_ail, &next_lip->li_ail);
-
-	xfs_ail_check(ailp, lip);
+	list_splice_init(list, &next_lip->li_ail);
 	return;
 }
 
 /*
  * Delete the given item from the AIL.  Return a pointer to the item.
  */
-/*ARGSUSED*/
-STATIC xfs_log_item_t *
+STATIC void
 xfs_ail_delete(
 	struct xfs_ail	*ailp,
 	xfs_log_item_t	*lip)
-/* ARGSUSED */
 {
 	xfs_ail_check(ailp, lip);
-
 	list_del(&lip->li_ail);
-
-	return lip;
+	xfs_trans_ail_cursor_clear(ailp, lip);
 }
 
 /*
@@ -682,7 +696,6 @@
 STATIC xfs_log_item_t *
 xfs_ail_min(
 	struct xfs_ail	*ailp)
-/* ARGSUSED */
 {
 	if (list_empty(&ailp->xa_ail))
 		return NULL;
@@ -699,7 +712,6 @@
 xfs_ail_next(
 	struct xfs_ail	*ailp,
 	xfs_log_item_t	*lip)
-/* ARGSUSED */
 {
 	if (lip->li_ail.next == &ailp->xa_ail)
 		return NULL;
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index f783d5e..f7590f5 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -69,12 +69,16 @@
 	tp->t_flags |= XFS_TRANS_DIRTY;
 	efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 
-	next_extent = efip->efi_next_extent;
+	/*
+	 * atomic_inc_return gives us the value after the increment;
+	 * we want to use it as an array index so we need to subtract 1 from
+	 * it.
+	 */
+	next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
 	ASSERT(next_extent < efip->efi_format.efi_nextents);
 	extp = &(efip->efi_format.efi_extents[next_extent]);
 	extp->ext_start = start_block;
 	extp->ext_len = ext_len;
-	efip->efi_next_extent++;
 }
 
 
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 62da86c..35162c2 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -22,15 +22,17 @@
 struct xfs_log_item_desc;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_ail;
+struct xfs_log_vec;
 
 void	xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void	xfs_trans_del_item(struct xfs_log_item *);
 void	xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
 				int flags);
-void	xfs_trans_item_committed(struct xfs_log_item *lip,
-				xfs_lsn_t commit_lsn, int aborted);
 void	xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
 
+void	xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
+				xfs_lsn_t commit_lsn, int aborted);
 /*
  * AIL traversal cursor.
  *
@@ -73,12 +75,29 @@
 /*
  * From xfs_trans_ail.c
  */
-void			xfs_trans_ail_update(struct xfs_ail *ailp,
-					struct xfs_log_item *lip, xfs_lsn_t lsn)
-					__releases(ailp->xa_lock);
-void			xfs_trans_ail_delete(struct xfs_ail *ailp,
-					struct xfs_log_item *lip)
-					__releases(ailp->xa_lock);
+void	xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
+				struct xfs_log_item **log_items, int nr_items,
+				xfs_lsn_t lsn) __releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_update(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
+{
+	xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
+}
+
+void	xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
+				struct xfs_log_item **log_items, int nr_items)
+				__releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_delete(
+	struct xfs_ail	*ailp,
+	xfs_log_item_t	*lip) __releases(ailp->xa_lock)
+{
+	xfs_trans_ail_delete_bulk(ailp, &lip, 1);
+}
+
 void			xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
 void			xfs_trans_unlocked_item(struct xfs_ail *,
 					xfs_log_item_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 8e4a63c..d8e6f8c 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -964,29 +964,48 @@
 			xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
 	}
 
-	if (ip->i_d.di_nlink != 0) {
-		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
-		     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
-		       ip->i_delayed_blks > 0)) &&
-		     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
-		    (!(ip->i_d.di_flags &
-				(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+	if (ip->i_d.di_nlink == 0)
+		return 0;
 
-			/*
-			 * If we can't get the iolock just skip truncating
-			 * the blocks past EOF because we could deadlock
-			 * with the mmap_sem otherwise.  We'll get another
-			 * chance to drop them once the last reference to
-			 * the inode is dropped, so we'll never leak blocks
-			 * permanently.
-			 */
-			error = xfs_free_eofblocks(mp, ip,
-						   XFS_FREE_EOF_TRYLOCK);
-			if (error)
-				return error;
-		}
+	if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
+	     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
+	       ip->i_delayed_blks > 0)) &&
+	     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
+	    (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+
+		/*
+		 * If we can't get the iolock just skip truncating the blocks
+		 * past EOF because we could deadlock with the mmap_sem
+		 * otherwise.  We'll get another chance to drop them once the
+		 * last reference to the inode is dropped, so we'll never leak
+		 * blocks permanently.
+		 *
+		 * Further, check if the inode is being opened, written and
+		 * closed frequently and we have delayed allocation blocks
+		 * oustanding (e.g. streaming writes from the NFS server),
+		 * truncating the blocks past EOF will cause fragmentation to
+		 * occur.
+		 *
+		 * In this case don't do the truncation, either, but we have to
+		 * be careful how we detect this case. Blocks beyond EOF show
+		 * up as i_delayed_blks even when the inode is clean, so we
+		 * need to truncate them away first before checking for a dirty
+		 * release. Hence on the first dirty close we will still remove
+		 * the speculative allocation, but after that we will leave it
+		 * in place.
+		 */
+		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
+			return 0;
+
+		error = xfs_free_eofblocks(mp, ip,
+					   XFS_FREE_EOF_TRYLOCK);
+		if (error)
+			return error;
+
+		/* delalloc blocks after truncation means it really is dirty */
+		if (ip->i_delayed_blks)
+			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
 	}
-
 	return 0;
 }
 
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 359ef11..78ca429 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -148,9 +148,7 @@
 	u32 suprise_removal_ok:1;
 	u32 power_manageable:1;
 	u32 performance_manageable:1;
-	u32 wake_capable:1;	/* Wakeup(_PRW) supported? */
-	u32 force_power_state:1;
-	u32 reserved:22;
+	u32 reserved:24;
 };
 
 /* File System */
@@ -242,20 +240,14 @@
 struct acpi_device_wakeup_flags {
 	u8 valid:1;		/* Can successfully enable wakeup? */
 	u8 run_wake:1;		/* Run-Wake GPE devices */
-	u8 always_enabled:1;	/* Run-wake devices that are always enabled */
 	u8 notifier_present:1;  /* Wake-up notify handler has been installed */
 };
 
-struct acpi_device_wakeup_state {
-	u8 enabled:1;
-};
-
 struct acpi_device_wakeup {
 	acpi_handle gpe_device;
 	u64 gpe_number;
 	u64 sleep_state;
 	struct acpi_handle_list resources;
-	struct acpi_device_wakeup_state state;
 	struct acpi_device_wakeup_flags flags;
 	int prepare_count;
 	int run_wake_count;
@@ -328,8 +320,8 @@
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
 				       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
-int acpi_bus_get_power(acpi_handle handle, int *state);
 int acpi_bus_set_power(acpi_handle handle, int state);
+int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
 bool acpi_bus_can_wakeup(acpi_handle handle);
 #ifdef CONFIG_ACPI_PROC_EVENT
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 53b7cfd..241b8a0 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20101013
+#define ACPI_CA_VERSION                 0x20101209
 
 #include "actypes.h"
 #include "actbl.h"
@@ -229,6 +229,10 @@
 acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
 
 acpi_status
+acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler,
+				 void *context);
+
+acpi_status
 acpi_install_fixed_event_handler(u32 acpi_event,
 				 acpi_event_handler handler, void *context);
 
@@ -258,11 +262,11 @@
 acpi_status
 acpi_install_gpe_handler(acpi_handle gpe_device,
 			 u32 gpe_number,
-			 u32 type, acpi_event_handler address, void *context);
+			 u32 type, acpi_gpe_handler address, void *context);
 
 acpi_status
 acpi_remove_gpe_handler(acpi_handle gpe_device,
-			u32 gpe_number, acpi_event_handler address);
+			u32 gpe_number, acpi_gpe_handler address);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
@@ -292,11 +296,13 @@
 
 acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number);
 
-acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number);
-
 acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number);
 
-acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action);
+acpi_status
+acpi_setup_gpe_for_wake(acpi_handle parent_device,
+			acpi_handle gpe_device, u32 gpe_number);
+
+acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action);
 
 acpi_status
 acpi_get_gpe_status(acpi_handle gpe_device,
@@ -315,7 +321,7 @@
 
 acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
 
-acpi_status acpi_update_gpes(void);
+acpi_status acpi_update_all_gpes(void);
 
 /*
  * Resource interfaces
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index c637b75..cd77aa7 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -119,7 +119,7 @@
 struct acpi_table_bert {
 	struct acpi_table_header header;	/* Common ACPI table header */
 	u32 region_length;	/* Length of the boot error region */
-	u64 address;		/* Physical addresss of the error region */
+	u64 address;		/* Physical address of the error region */
 };
 
 /* Boot Error Region (not a subtable, pointed to by Address field above) */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 2b134b6..939a431 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -656,33 +656,34 @@
 #define ACPI_GPE_MAX                    0xFF
 #define ACPI_NUM_GPE                    256
 
-/* Actions for acpi_gpe_wakeup, acpi_hw_low_set_gpe */
+/* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */
 
 #define ACPI_GPE_ENABLE                 0
 #define ACPI_GPE_DISABLE                1
-#define ACPI_GPE_COND_ENABLE            2
+#define ACPI_GPE_CONDITIONAL_ENABLE     2
 
 /*
  * GPE info flags - Per GPE
- * +-------+---+-+-+
- * |  7:4  |3:2|1|0|
- * +-------+---+-+-+
- *     |     |  | |
- *     |     |  | +--- Interrupt type: edge or level triggered
- *     |     |  +----- GPE can wake the system
- *     |     +-------- Type of dispatch:to method, handler, or none
- *     +-------------- <Reserved>
+ * +-------+-+-+---+
+ * |  7:4  |3|2|1:0|
+ * +-------+-+-+---+
+ *     |    | |  |
+ *     |    | |  +-- Type of dispatch:to method, handler, notify, or none
+ *     |    | +----- Interrupt type: edge or level triggered
+ *     |    +------- Is a Wake GPE
+ *     +------------ <Reserved>
  */
-#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x01
-#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x01
+#define ACPI_GPE_DISPATCH_NONE          (u8) 0x00
+#define ACPI_GPE_DISPATCH_METHOD        (u8) 0x01
+#define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x02
+#define ACPI_GPE_DISPATCH_NOTIFY        (u8) 0x03
+#define ACPI_GPE_DISPATCH_MASK          (u8) 0x03
+
+#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x04
 #define ACPI_GPE_EDGE_TRIGGERED         (u8) 0x00
+#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x04
 
-#define ACPI_GPE_CAN_WAKE		(u8) 0x02
-
-#define ACPI_GPE_DISPATCH_MASK          (u8) 0x0C
-#define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x04
-#define ACPI_GPE_DISPATCH_METHOD        (u8) 0x08
-#define ACPI_GPE_DISPATCH_NOT_USED      (u8) 0x00
+#define ACPI_GPE_CAN_WAKE               (u8) 0x08
 
 /*
  * Flags for GPE and Lock interfaces
@@ -894,9 +895,20 @@
 /*
  * Various handlers and callback procedures
  */
+typedef
+void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type,
+			       acpi_handle device,
+			       u32 event_number, void *context);
+
+#define ACPI_EVENT_TYPE_GPE         0
+#define ACPI_EVENT_TYPE_FIXED       1
+
 typedef u32(*acpi_event_handler) (void *context);
 
 typedef
+u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context);
+
+typedef
 void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context);
 
 typedef
@@ -951,6 +963,10 @@
 #define ACPI_INTERRUPT_NOT_HANDLED      0x00
 #define ACPI_INTERRUPT_HANDLED          0x01
 
+/* GPE handler return values */
+
+#define ACPI_REENABLE_GPE               0x80
+
 /* Length of 32-bit EISAID values when converted back to a string */
 
 #define ACPI_EISAID_STRING_SIZE         8	/* Includes null terminator */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index b336502..c4dbb13 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -19,6 +19,12 @@
 extern int hest_disable;
 extern int erst_disable;
 
+#ifdef CONFIG_ACPI_APEI
+void __init acpi_hest_init(void);
+#else
+static inline void acpi_hest_init(void) { return; }
+#endif
+
 typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
 int apei_hest_parse(apei_hest_func_t func, void *data);
 
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 1b62102..55192ac 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -324,6 +324,12 @@
 int acpi_processor_get_throttling_info(struct acpi_processor *pr);
 extern int acpi_processor_set_throttling(struct acpi_processor *pr,
 					 int state, bool force);
+/*
+ * Reevaluate whether the T-state is invalid after one cpu is
+ * onlined/offlined. In such case the flags.throttling will be updated.
+ */
+extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+			unsigned long action);
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
 /* in processor_idle.c */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 3577ca1..4644c9a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -211,6 +211,36 @@
 }
 #endif
 
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+{
+	insl((unsigned long)addr, buf, len);
+}
+
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+{
+	insw((unsigned long)addr, buf, len);
+}
+
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+{
+	insb((unsigned long)addr, buf, len);
+}
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+{
+	outsl((unsigned long)addr, buf, len);
+}
+
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+{
+	outsw((unsigned long)addr, buf, len);
+}
+
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+{
+	outsb((unsigned long)addr, buf, len);
+}
+
 #ifndef CONFIG_GENERIC_IOMAP
 #define ioread8(addr)		readb(addr)
 #define ioread16(addr)		readw(addr)
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h
index a321665..3f3f2d1 100644
--- a/include/asm-generic/ioctls.h
+++ b/include/asm-generic/ioctls.h
@@ -67,6 +67,7 @@
 #endif
 #define TIOCGPTN	_IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
 #define TIOCSPTLCK	_IOW('T', 0x31, int)  /* Lock/unlock Pty */
+#define TIOCGDEV	_IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */
 #define TCGETX		0x5432 /* SYS5 TCGETX compatibility */
 #define TCSETX		0x5433
 #define TCSETXF		0x5434
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h
index 5ae1d07..6bf9355 100644
--- a/include/asm-generic/irq_regs.h
+++ b/include/asm-generic/irq_regs.h
@@ -22,15 +22,15 @@
 
 static inline struct pt_regs *get_irq_regs(void)
 {
-	return __get_cpu_var(__irq_regs);
+	return __this_cpu_read(__irq_regs);
 }
 
 static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
 {
-	struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+	struct pt_regs *old_regs;
 
-	old_regs = *pp_regs;
-	*pp_regs = new_regs;
+	old_regs = __this_cpu_read(__irq_regs);
+	__this_cpu_write(__irq_regs, new_regs);
 	return old_regs;
 }
 
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 3da9e27..787abbb 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -45,6 +45,9 @@
 #define MADV_MERGEABLE   12		/* KSM may merge identical pages */
 #define MADV_UNMERGEABLE 13		/* KSM may not merge identical pages */
 
+#define MADV_HUGEPAGE	14		/* Worth backing with hugepages */
+#define MADV_NOHUGEPAGE	15		/* Not worth backing with hugepages */
+
 /* compatibility flags */
 #define MAP_FILE	0
 
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6f3c6ae..f1eddf7 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -5,67 +5,108 @@
 #ifdef CONFIG_MMU
 
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-/*
- * Largely same as above, but only sets the access flags (dirty,
- * accessed, and writable). Furthermore, we know it always gets set
- * to a "more permissive" setting, which allows most architectures
- * to optimize this. We return whether the PTE actually changed, which
- * in turn instructs the caller to do things like update__mmu_cache.
- * This used to be done in the caller, but sparc needs minor faults to
- * force that call on sun4c so we changed this macro slightly
- */
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-({									  \
-	int __changed = !pte_same(*(__ptep), __entry);			  \
-	if (__changed) {						  \
-		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
-		flush_tlb_page(__vma, __address);			  \
-	}								  \
-	__changed;							  \
-})
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pte_t *ptep,
+				 pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+				 unsigned long address, pmd_t *pmdp,
+				 pmd_t entry, int dirty);
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __address, __ptep)		\
-({									\
-	pte_t __pte = *(__ptep);					\
-	int r = 1;							\
-	if (!pte_young(__pte))						\
-		r = 0;							\
-	else								\
-		set_pte_at((__vma)->vm_mm, (__address),			\
-			   (__ptep), pte_mkold(__pte));			\
-	r;								\
-})
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pte_t *ptep)
+{
+	pte_t pte = *ptep;
+	int r = 1;
+	if (!pte_young(pte))
+		r = 0;
+	else
+		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+	return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	pmd_t pmd = *pmdp;
+	int r = 1;
+	if (!pmd_young(pmd))
+		r = 0;
+	else
+		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+	return r;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	BUG();
+	return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep)		\
-({									\
-	int __young;							\
-	__young = ptep_test_and_clear_young(__vma, __address, __ptep);	\
-	if (__young)							\
-		flush_tlb_page(__vma, __address);			\
-	__young;							\
-})
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(__mm, __address, __ptep)			\
-({									\
-	pte_t __pte = *(__ptep);					\
-	pte_clear((__mm), (__address), (__ptep));			\
-	__pte;								\
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long address,
+				       pte_t *ptep)
+{
+	pte_t pte = *ptep;
+	pte_clear(mm, address, ptep);
+	return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+				       unsigned long address,
+				       pmd_t *pmdp)
+{
+	pmd_t pmd = *pmdp;
+	pmd_clear(mm, address, pmdp);
+	return pmd;
 })
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+				       unsigned long address,
+				       pmd_t *pmdp)
+{
+	BUG();
+	return __pmd(0);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-#define ptep_get_and_clear_full(__mm, __address, __ptep, __full)	\
-({									\
-	pte_t __pte;							\
-	__pte = ptep_get_and_clear((__mm), (__address), (__ptep));	\
-	__pte;								\
-})
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+					    unsigned long address, pte_t *ptep,
+					    int full)
+{
+	pte_t pte;
+	pte = ptep_get_and_clear(mm, address, ptep);
+	return pte;
+}
 #endif
 
 /*
@@ -74,20 +115,25 @@
  * not present, or in the process of an address space destruction.
  */
 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
-#define pte_clear_not_present_full(__mm, __address, __ptep, __full)	\
-do {									\
-	pte_clear((__mm), (__address), (__ptep));			\
-} while (0)
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+					      unsigned long address,
+					      pte_t *ptep,
+					      int full)
+{
+	pte_clear(mm, address, ptep);
+}
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-#define ptep_clear_flush(__vma, __address, __ptep)			\
-({									\
-	pte_t __pte;							\
-	__pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep);	\
-	flush_tlb_page(__vma, __address);				\
-	__pte;								\
-})
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+			      unsigned long address,
+			      pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
+extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+			      unsigned long address,
+			      pmd_t *pmdp);
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -99,8 +145,49 @@
 }
 #endif
 
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	pmd_t old_pmd = *pmdp;
+	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
+			      unsigned long address,
+			      pmd_t *pmdp);
+#endif
+
 #ifndef __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B)	(pte_val(A) == pte_val(B))
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+	return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+	BUG();
+	return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
@@ -348,6 +435,24 @@
 				unsigned long size);
 #endif
 
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+	return 0;
+}
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+	return 0;
+}
+#ifndef __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+	BUG();
+	return 0;
+}
+#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bd69d79..6864933 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -67,7 +67,8 @@
  * Align to a 32 byte boundary equal to the
  * alignment gcc 4.5 uses for a struct
  */
-#define STRUCT_ALIGN() . = ALIGN(32)
+#define STRUCT_ALIGNMENT 32
+#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
 
 /* The actual configuration determine if the init/exit sections
  * are handled as text/data or they can be discarded (which
@@ -146,6 +147,13 @@
 #define TRACE_SYSCALLS()
 #endif
 
+
+#define KERNEL_DTB()							\
+	STRUCT_ALIGN();							\
+	VMLINUX_SYMBOL(__dtb_start) = .;				\
+	*(.dtb.init.rodata)						\
+	VMLINUX_SYMBOL(__dtb_end) = .;
+
 /* .data section */
 #define DATA_DATA							\
 	*(.data)							\
@@ -192,7 +200,8 @@
 
 #define READ_MOSTLY_DATA(align)						\
 	. = ALIGN(align);						\
-	*(.data..read_mostly)
+	*(.data..read_mostly)						\
+	. = ALIGN(align);
 
 #define CACHELINE_ALIGNED_DATA(align)					\
 	. = ALIGN(align);						\
@@ -468,7 +477,8 @@
 	MCOUNT_REC()							\
 	DEV_DISCARD(init.rodata)					\
 	CPU_DISCARD(init.rodata)					\
-	MEM_DISCARD(init.rodata)
+	MEM_DISCARD(init.rodata)					\
+	KERNEL_DTB()
 
 #define INIT_TEXT							\
 	*(.init.text)							\
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
new file mode 100644
index 0000000..c5813c8
--- /dev/null
+++ b/include/crypto/if_alg.h
@@ -0,0 +1,92 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_IF_ALG_H
+#define _CRYPTO_IF_ALG_H
+
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/if_alg.h>
+#include <linux/types.h>
+#include <net/sock.h>
+
+#define ALG_MAX_PAGES			16
+
+struct crypto_async_request;
+
+struct alg_sock {
+	/* struct sock must be the first member of struct alg_sock */
+	struct sock sk;
+
+	struct sock *parent;
+
+	const struct af_alg_type *type;
+	void *private;
+};
+
+struct af_alg_completion {
+	struct completion completion;
+	int err;
+};
+
+struct af_alg_control {
+	struct af_alg_iv *iv;
+	int op;
+};
+
+struct af_alg_type {
+	void *(*bind)(const char *name, u32 type, u32 mask);
+	void (*release)(void *private);
+	int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+	int (*accept)(void *private, struct sock *sk);
+
+	struct proto_ops *ops;
+	struct module *owner;
+	char name[14];
+};
+
+struct af_alg_sgl {
+	struct scatterlist sg[ALG_MAX_PAGES];
+	struct page *pages[ALG_MAX_PAGES];
+};
+
+int af_alg_register_type(const struct af_alg_type *type);
+int af_alg_unregister_type(const struct af_alg_type *type);
+
+int af_alg_release(struct socket *sock);
+int af_alg_accept(struct sock *sk, struct socket *newsock);
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+		   int write);
+void af_alg_free_sg(struct af_alg_sgl *sgl);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
+void af_alg_complete(struct crypto_async_request *req, int err);
+
+static inline struct alg_sock *alg_sk(struct sock *sk)
+{
+	return (struct alg_sock *)sk;
+}
+
+static inline void af_alg_release_parent(struct sock *sk)
+{
+	sock_put(alg_sk(sk)->parent);
+}
+
+static inline void af_alg_init_completion(struct af_alg_completion *completion)
+{
+	init_completion(&completion->completion);
+}
+
+#endif	/* _CRYPTO_IF_ALG_H */
diff --git a/include/crypto/padlock.h b/include/crypto/padlock.h
new file mode 100644
index 0000000..d2cfa2e
--- /dev/null
+++ b/include/crypto/padlock.h
@@ -0,0 +1,29 @@
+/*
+ * Driver for VIA PadLock
+ *
+ * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) 
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_PADLOCK_H
+#define _CRYPTO_PADLOCK_H
+
+#define PADLOCK_ALIGNMENT 16
+
+#define PFX	KBUILD_MODNAME ": "
+
+#define PADLOCK_CRA_PRIORITY	300
+#define PADLOCK_COMPOSITE_PRIORITY 400
+
+#ifdef CONFIG_64BIT
+#define STACK_ALIGN 16
+#else
+#define STACK_ALIGN 4
+#endif
+
+#endif	/* _CRYPTO_PADLOCK_H */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 833d208..4fd95a3 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -68,6 +68,21 @@
 	return (++sg)->length ? sg : (void *)sg_page(sg);
 }
 
+static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+					    struct scatterlist *sg,
+					    int chain, int num)
+{
+	if (chain) {
+		head->length += sg->length;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	if (sg)
+		scatterwalk_sg_chain(head, num, sg);
+	else
+		sg_mark_end(head);
+}
+
 static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
 						struct scatter_walk *walk_out)
 {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 274eaaa..a4694c6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -683,6 +683,21 @@
 	void *driver_priv; /**< Private structure for driver to use */
 };
 
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL             (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID        (1 << 0)
+#define DRM_SCANOUTPOS_INVBL        (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
+
 /**
  * DRM driver structure. This structure represent the common code for
  * a family of cards. There will one drm_device for each card present
@@ -760,6 +775,68 @@
 	 */
 	int (*device_is_agp) (struct drm_device *dev);
 
+	/**
+	 * Called by vblank timestamping code.
+	 *
+	 * Return the current display scanout position from a crtc.
+	 *
+	 * \param dev  DRM device.
+	 * \param crtc Id of the crtc to query.
+	 * \param *vpos Target location for current vertical scanout position.
+	 * \param *hpos Target location for current horizontal scanout position.
+	 *
+	 * Returns vpos as a positive number while in active scanout area.
+	 * Returns vpos as a negative number inside vblank, counting the number
+	 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+	 * until start of active scanout / end of vblank."
+	 *
+	 * \return Flags, or'ed together as follows:
+	 *
+	 * DRM_SCANOUTPOS_VALID = Query successfull.
+	 * DRM_SCANOUTPOS_INVBL = Inside vblank.
+	 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+	 * this flag means that returned position may be offset by a constant
+	 * but unknown small number of scanlines wrt. real scanout position.
+	 *
+	 */
+	int (*get_scanout_position) (struct drm_device *dev, int crtc,
+				     int *vpos, int *hpos);
+
+	/**
+	 * Called by \c drm_get_last_vbltimestamp. Should return a precise
+	 * timestamp when the most recent VBLANK interval ended or will end.
+	 *
+	 * Specifically, the timestamp in @vblank_time should correspond as
+	 * closely as possible to the time when the first video scanline of
+	 * the video frame after the end of VBLANK will start scanning out,
+	 * the time immmediately after end of the VBLANK interval. If the
+	 * @crtc is currently inside VBLANK, this will be a time in the future.
+	 * If the @crtc is currently scanning out a frame, this will be the
+	 * past start time of the current scanout. This is meant to adhere
+	 * to the OpenML OML_sync_control extension specification.
+	 *
+	 * \param dev dev DRM device handle.
+	 * \param crtc crtc for which timestamp should be returned.
+	 * \param *max_error Maximum allowable timestamp error in nanoseconds.
+	 *                   Implementation should strive to provide timestamp
+	 *                   with an error of at most *max_error nanoseconds.
+	 *                   Returns true upper bound on error for timestamp.
+	 * \param *vblank_time Target location for returned vblank timestamp.
+	 * \param flags 0 = Defaults, no special treatment needed.
+	 * \param       DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+	 *	        irq handler. Some drivers need to apply some workarounds
+	 *              for gpu-specific vblank irq quirks if flag is set.
+	 *
+	 * \returns
+	 * Zero if timestamping isn't supported in current display mode or a
+	 * negative number on failure. A positive status code on success,
+	 * which describes how the vblank_time timestamp was computed.
+	 */
+	int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+				     int *max_error,
+				     struct timeval *vblank_time,
+				     unsigned flags);
+
 	/* these have to be filled in */
 
 	irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
@@ -983,6 +1060,8 @@
 
 	wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
 	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+	struct timeval *_vblank_time;   /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+	spinlock_t vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
 	spinlock_t vbl_lock;
 	atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
 	u32 *last_vblank;               /* protected by dev->vbl_lock, used */
@@ -1041,12 +1120,14 @@
 	/*@{ */
 	spinlock_t object_name_lock;
 	struct idr object_name_idr;
-	uint32_t invalidate_domains;    /* domains pending invalidation */
-	uint32_t flush_domains;         /* domains pending flush */
 	/*@} */
-
+	int switch_power_state;
 };
 
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
 					     int feature)
 {
@@ -1284,11 +1365,22 @@
 			   struct drm_file *filp);
 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+				     struct timeval *vblanktime);
 extern void drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern void drm_vblank_off(struct drm_device *dev, int crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+				     struct timeval *tvblank, unsigned flags);
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+						 int crtc, int *max_error,
+						 struct timeval *vblank_time,
+						 unsigned flags,
+						 struct drm_crtc *refcrtc);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+
 /* Modesetting support */
 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
 extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1321,7 +1413,6 @@
 extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
 extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
-extern void drm_agp_chipset_flush(struct drm_device *dev);
 
 				/* Stub support (drm_stub.h) */
 extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1340,6 +1431,9 @@
 extern int drm_put_minor(struct drm_minor **minor);
 extern unsigned int drm_debug;
 
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
 extern struct dentry *drm_debugfs_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 029aa68..acd7fad 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -351,8 +351,14 @@
 
 	bool enabled;
 
+	/* Requested mode from modesetting. */
 	struct drm_display_mode mode;
 
+	/* Programmed mode in hw, after adjustments for encoders,
+	 * crtc, panel scaling etc. Needed for timestamping etc.
+	 */
+	struct drm_display_mode hwmode;
+
 	int x, y;
 	const struct drm_crtc_funcs *funcs;
 
@@ -360,6 +366,9 @@
 	uint32_t gamma_size;
 	uint16_t *gamma_store;
 
+	/* Constants needed for precise vblank and swap timestamping. */
+	s64 framedur_ns, linedur_ns, pixeldur_ns;
+
 	/* if you are using the helper */
 	void *helper_private;
 };
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index bf01531..e391777 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -62,11 +62,14 @@
 	struct list_head unused_nodes;
 	int num_unused;
 	spinlock_t unused_lock;
+	unsigned int scan_check_range : 1;
 	unsigned scan_alignment;
 	unsigned long scan_size;
 	unsigned long scan_hit_start;
 	unsigned scan_hit_size;
 	unsigned scanned_blocks;
+	unsigned long scan_start;
+	unsigned long scan_end;
 };
 
 /*
@@ -145,6 +148,10 @@
 
 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
 		      unsigned alignment);
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+				 unsigned alignment,
+				 unsigned long start,
+				 unsigned long end);
 int drm_mm_scan_add_block(struct drm_mm_node *node);
 int drm_mm_scan_remove_block(struct drm_mm_node *node);
 
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 883c1d4..fe29ae3 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -142,6 +142,42 @@
 	{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -419,6 +455,10 @@
 	{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0, 0, 0}
 
 #define r128_PCI_IDS \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index a2776e2..0039f1f 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -289,6 +289,7 @@
 #define I915_PARAM_HAS_BLT		 11
 #define I915_PARAM_HAS_RELAXED_FENCING	 12
 #define I915_PARAM_HAS_COHERENT_RINGS	 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -635,6 +636,17 @@
 #define I915_EXEC_RENDER                 (1<<0)
 #define I915_EXEC_BSD                    (2<<0)
 #define I915_EXEC_BLT                    (3<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
 	__u64 flags;
 	__u64 rsvd1;
 	__u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index d3c81946..9e343c0 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -2,17 +2,40 @@
 
 #ifndef _DRM_INTEL_GTT_H
 #define	_DRM_INTEL_GTT_H
-struct intel_gtt {
-	/* Number of stolen gtt entries at the beginning. */
-	unsigned int gtt_stolen_entries;
+
+const struct intel_gtt {
+	/* Size of memory reserved for graphics by the BIOS */
+	unsigned int stolen_size;
 	/* Total number of gtt entries. */
 	unsigned int gtt_total_entries;
 	/* Part of the gtt that is mappable by the cpu, for those chips where
 	 * this is not the full gtt. */
 	unsigned int gtt_mappable_entries;
-};
+	/* Whether i915 needs to use the dmar apis or not. */
+	unsigned int needs_dmar : 1;
+} *intel_gtt_get(void);
 
-struct intel_gtt *intel_gtt_get(void);
+void intel_gtt_chipset_flush(void);
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+			 struct scatterlist **sg_list, int *num_sg);
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+				 unsigned int sg_len,
+				 unsigned int pg_start,
+				 unsigned int flags);
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+			    struct page **pages, unsigned int flags);
+
+/* Special gtt memory types */
+#define AGP_DCACHE_MEMORY	1
+#define AGP_PHYS_MEMORY		2
+
+/* New caching attributes for gen6/sandybridge */
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
 #endif
-
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index bc5590b..e2cfe80 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,16 +71,14 @@
 #define NOUVEAU_GETPARAM_PCI_VENDOR      3
 #define NOUVEAU_GETPARAM_PCI_DEVICE      4
 #define NOUVEAU_GETPARAM_BUS_TYPE        5
-#define NOUVEAU_GETPARAM_FB_PHYSICAL     6
-#define NOUVEAU_GETPARAM_AGP_PHYSICAL    7
 #define NOUVEAU_GETPARAM_FB_SIZE         8
 #define NOUVEAU_GETPARAM_AGP_SIZE        9
-#define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
 #define NOUVEAU_GETPARAM_CHIPSET_ID      11
 #define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
 #define NOUVEAU_GETPARAM_GRAPH_UNITS     13
 #define NOUVEAU_GETPARAM_PTIMER_TIME     14
 #define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
 struct drm_nouveau_getparam {
 	uint64_t param;
 	uint64_t value;
@@ -171,7 +169,6 @@
 };
 
 #define NOUVEAU_GEM_CPU_PREP_NOWAIT                                  0x00000001
-#define NOUVEAU_GEM_CPU_PREP_NOBLOCK                                 0x00000002
 #define NOUVEAU_GEM_CPU_PREP_WRITE                                   0x00000004
 struct drm_nouveau_gem_cpu_prep {
 	uint32_t handle;
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 10f8b53..e95a86b 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -906,6 +906,7 @@
 #define RADEON_INFO_ACCEL_WORKING2	0x05
 #define RADEON_INFO_TILING_CONFIG	0x06
 #define RADEON_INFO_WANT_HYPERZ		0x07
+#define RADEON_INFO_WANT_CMASK		0x08 /* get access to CMASK on r300 */
 
 struct drm_radeon_info {
 	uint32_t		request;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc15..50852aa 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@
  * @is_iomem:		is this io memory ?
  * @size:		size in byte
  * @offset:		offset from the base address
+ * @io_reserved_vm:     The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count:  Refcounting the numbers of callers to ttm_mem_io_reserve
  *
  * Structure indicating the bus placement of an object.
  */
@@ -83,7 +85,8 @@
 	unsigned long	size;
 	unsigned long	offset;
 	bool		is_iomem;
-	bool		io_reserved;
+	bool		io_reserved_vm;
+	uint64_t        io_reserved_count;
 };
 
 
@@ -154,7 +157,6 @@
  * keeps one refcount. When this refcount reaches zero,
  * the object is destroyed.
  * @event_queue: Queue for processes waiting on buffer object status change.
- * @lock: spinlock protecting mostly synchronization members.
  * @mem: structure describing current placement.
  * @persistant_swap_storage: Usually the swap storage is deleted for buffers
  * pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +215,6 @@
 	struct kref kref;
 	struct kref list_kref;
 	wait_queue_head_t event_queue;
-	spinlock_t lock;
 
 	/**
 	 * Members protected by the bo::reserved lock.
@@ -237,6 +238,7 @@
 	struct list_head lru;
 	struct list_head ddestroy;
 	struct list_head swap;
+	struct list_head io_reserve_lru;
 	uint32_t val_seq;
 	bool seq_valid;
 
@@ -248,10 +250,10 @@
 	atomic_t reserved;
 
 	/**
-	 * Members protected by the bo::lock
+	 * Members protected by struct buffer_object_device::fence_lock
 	 * In addition, setting sync_obj to anything else
 	 * than NULL requires bo::reserved to be held. This allows for
-	 * checking NULL while reserved but not holding bo::lock.
+	 * checking NULL while reserved but not holding the mentioned lock.
 	 */
 
 	void *sync_obj_arg;
@@ -364,6 +366,44 @@
  */
 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
 
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+				bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
 /**
  * ttm_bo_lock_delayed_workqueue
  *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8e0c848..1da8af6 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -179,30 +179,6 @@
 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
 
-/**
- * struct ttm_mem_type_manager
- *
- * @has_type: The memory type has been initialized.
- * @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
- * @size: Size of the managed region.
- * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
- * as defined in ttm_placement_common.h
- * @default_caching: The default caching policy used for a buffer object
- * placed in this memory type if the user doesn't provide one.
- * @manager: The range manager used for this memory type. FIXME: If the aperture
- * has a page size different from the underlying system, the granularity
- * of this manager should take care of this. But the range allocating code
- * in ttm_bo.c needs to be modified for this.
- * @lru: The lru list for this memory type.
- *
- * This structure is used to identify and manage memory types for a device.
- * It's set up by the ttm_bo_driver::init_mem_type method.
- */
-
 struct ttm_mem_type_manager;
 
 struct ttm_mem_type_manager_func {
@@ -287,6 +263,36 @@
 	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
 };
 
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
 struct ttm_mem_type_manager {
 	struct ttm_bo_device *bdev;
 
@@ -303,6 +309,15 @@
 	uint32_t default_caching;
 	const struct ttm_mem_type_manager_func *func;
 	void *priv;
+	struct mutex io_reserve_mutex;
+	bool use_io_reserve_lru;
+	bool io_reserve_fastpath;
+
+	/*
+	 * Protected by @io_reserve_mutex:
+	 */
+
+	struct list_head io_reserve_lru;
 
 	/*
 	 * Protected by the global->lru_lock.
@@ -510,9 +525,12 @@
  *
  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
  * @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
  * @addr_space_mm: Range manager for the device address space.
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
+ * @val_seq: Current validation sequence.
  * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
  * If a GPU lockup has been detected, this is forced to 0.
  * @dev_mapping: A pointer to the struct address_space representing the
@@ -531,6 +549,7 @@
 	struct ttm_bo_driver *driver;
 	rwlock_t vm_lock;
 	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+	spinlock_t fence_lock;
 	/*
 	 * Protected by the vm lock.
 	 */
@@ -541,6 +560,7 @@
 	 * Protected by the global:lru lock.
 	 */
 	struct list_head ddestroy;
+	uint32_t val_seq;
 
 	/*
 	 * Protected by load / firstopen / lastclose /unload sync.
@@ -753,31 +773,6 @@
 
 extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
 
-/**
- * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
- *
- * @bo Pointer to a struct ttm_buffer_object.
- * @bus_base On return the base of the PCI region
- * @bus_offset On return the byte offset into the PCI region
- * @bus_size On return the byte size of the buffer object or zero if
- * the buffer object memory is not accessible through a PCI region.
- *
- * Returns:
- * -EINVAL if the buffer object is currently not mappable.
- * 0 otherwise.
- */
-
-extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
-			     struct ttm_mem_reg *mem,
-			     unsigned long *bus_base,
-			     unsigned long *bus_offset,
-			     unsigned long *bus_size);
-
-extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
-				struct ttm_mem_reg *mem);
-extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
-				struct ttm_mem_reg *mem);
-
 extern void ttm_bo_global_release(struct drm_global_reference *ref);
 extern int ttm_bo_global_init(struct drm_global_reference *ref);
 
@@ -810,6 +805,22 @@
 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 
 /**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+			   bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+
+/**
  * ttm_bo_reserve:
  *
  * @bo: A pointer to a struct ttm_buffer_object.
@@ -859,11 +870,44 @@
  * try again. (only if use_sequence == 1).
  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
  * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
  */
 extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 			  bool interruptible,
 			  bool no_wait, bool use_sequence, uint32_t sequence);
 
+
+/**
+ * ttm_bo_reserve_locked:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Must be called with struct ttm_bo_global::lru_lock held,
+ * and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+				 bool interruptible,
+				 bool no_wait, bool use_sequence,
+				 uint32_t sequence);
+
 /**
  * ttm_bo_unreserve
  *
@@ -874,6 +918,16 @@
 extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
 
 /**
+ * ttm_bo_unreserve_locked
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
+ */
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+
+/**
  * ttm_bo_wait_unreserved
  *
  * @bo: A pointer to a struct ttm_buffer_object.
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475..26cc7f9 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,10 @@
  * @bo:             refcounted buffer object pointer.
  * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
  * adding a new sync object.
- * @reservied:      Indicates whether @bo has been reserved for validation.
+ * @reserved:       Indicates whether @bo has been reserved for validation.
+ * @removed:        Indicates whether @bo has been removed from lru lists.
+ * @put_count:      Number of outstanding references on bo::list_kref.
+ * @old_sync_obj:   Pointer to a sync object about to be unreferenced
  */
 
 struct ttm_validate_buffer {
@@ -49,6 +52,9 @@
 	struct ttm_buffer_object *bo;
 	void *new_sync_obj_arg;
 	bool reserved;
+	bool removed;
+	int put_count;
+	void *old_sync_obj;
 };
 
 /**
@@ -66,7 +72,6 @@
  * function ttm_eu_reserve_buffers
  *
  * @list:    thread private list of ttm_validate_buffer structs.
- * @val_seq: A unique sequence number.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
  * If the function returns 0, all buffers are marked as "unfenced",
@@ -88,7 +93,7 @@
  * has failed.
  */
 
-extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
+extern int ttm_eu_reserve_buffers(struct list_head *list);
 
 /**
  * function ttm_eu_fence_buffer_objects.
diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h
new file mode 100644
index 0000000..9585501
--- /dev/null
+++ b/include/keys/encrypted-type.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _KEYS_ENCRYPTED_TYPE_H
+#define _KEYS_ENCRYPTED_TYPE_H
+
+#include <linux/key.h>
+#include <linux/rcupdate.h>
+
+struct encrypted_key_payload {
+	struct rcu_head rcu;
+	char *master_desc;	/* datablob: master key name */
+	char *datalen;		/* datablob: decrypted key length */
+	u8 *iv;			/* datablob: iv */
+	u8 *encrypted_data;	/* datablob: encrypted data */
+	unsigned short datablob_len;	/* length of datablob */
+	unsigned short decrypted_datalen;	/* decrypted data length */
+	u8 decrypted_data[0];	/* decrypted data +  datablob + hmac */
+};
+
+extern struct key_type key_type_encrypted;
+
+#endif /* _KEYS_ENCRYPTED_TYPE_H */
diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
new file mode 100644
index 0000000..56f82e5
--- /dev/null
+++ b/include/keys/trusted-type.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Author: David Safford <safford@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _KEYS_TRUSTED_TYPE_H
+#define _KEYS_TRUSTED_TYPE_H
+
+#include <linux/key.h>
+#include <linux/rcupdate.h>
+
+#define MIN_KEY_SIZE			32
+#define MAX_KEY_SIZE			128
+#define MAX_BLOB_SIZE			320
+
+struct trusted_key_payload {
+	struct rcu_head rcu;
+	unsigned int key_len;
+	unsigned int blob_len;
+	unsigned char migratable;
+	unsigned char key[MAX_KEY_SIZE + 1];
+	unsigned char blob[MAX_BLOB_SIZE];
+};
+
+extern struct key_type key_type_trusted;
+
+#endif /* _KEYS_TRUSTED_TYPE_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a354c19..2296d8b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -20,15 +20,18 @@
 objhdr-y += version.h
 
 ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
-		  $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
+		  $(srctree)/include/asm-$(SRCARCH)/a.out.h \
+		  $(INSTALL_HDR_PATH)/include/asm-*/a.out.h),)
 header-y += a.out.h
 endif
 ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
-		  $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
+		  $(srctree)/include/asm-$(SRCARCH)/kvm.h \
+		  $(INSTALL_HDR_PATH)/include/asm-*/kvm.h),)
 header-y += kvm.h
 endif
 ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \
-		  $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
+		  $(srctree)/include/asm-$(SRCARCH)/kvm_para.h \
+		  $(INSTALL_HDR_PATH)/include/asm-*/kvm_para.h),)
 header-y += kvm_para.h
 endif
 
@@ -155,6 +158,7 @@
 header-y += if.h
 header-y += if_addr.h
 header-y += if_addrlabel.h
+header-y += if_alg.h
 header-y += if_arcnet.h
 header-y += if_arp.h
 header-y += if_bonding.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 67c91b4..eb176bb 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -352,4 +352,14 @@
 	return -1;
 }
 #endif	/* !CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_SLEEP
+int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+	return 0;
+}
+#endif
+
 #endif	/*_LINUX_ACPI_H*/
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 09ea4a1..eaf6cd7 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -102,10 +102,8 @@
 extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
 extern int agp_bind_memory(struct agp_memory *, off_t);
 extern int agp_unbind_memory(struct agp_memory *);
-extern int agp_rebind_memory(void);
 extern void agp_enable(struct agp_bridge_data *, u32);
 extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
 extern void agp_backend_release(struct agp_bridge_data *);
-extern void agp_flush_chipset(struct agp_bridge_data *);
 
 #endif				/* _AGP_BACKEND_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 8b5c062..359df04 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -372,6 +372,7 @@
 struct audit_context;
 struct inode;
 struct netlink_skb_parms;
+struct path;
 struct linux_binprm;
 struct mq_attr;
 struct mqstat;
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
index 904dec7..a69554ef 100644
--- a/include/linux/bfin_mac.h
+++ b/include/linux/bfin_mac.h
@@ -24,6 +24,7 @@
 	const unsigned short *mac_peripherals;
 	int phy_mode;
 	unsigned int phy_mask;
+	unsigned short vlan1_mask, vlan2_mask;
 };
 
 #endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 64a7114..c3d6512 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -25,7 +25,7 @@
 /*
  * This structure is used to hold the arguments that are used when loading binaries.
  */
-struct linux_binprm{
+struct linux_binprm {
 	char buf[BINPRM_BUF_SIZE];
 #ifdef CONFIG_MMU
 	struct vm_area_struct *vma;
@@ -93,7 +93,6 @@
 	int (*load_shlib)(struct file *);
 	int (*core_dump)(struct coredump_params *cprm);
 	unsigned long min_coredump;	/* minimal dump size */
-	int hasvdso;
 };
 
 extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
@@ -113,7 +112,7 @@
 
 extern int prepare_binprm(struct linux_binprm *);
 extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
+extern int search_binary_handler(struct linux_binprm *, struct pt_regs *);
 extern int flush_old_exec(struct linux_binprm * bprm);
 extern void setup_new_exec(struct linux_binprm * bprm);
 
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 7113a32..e612575 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -1,6 +1,10 @@
 #ifndef __LINUX_BIT_SPINLOCK_H
 #define __LINUX_BIT_SPINLOCK_H
 
+#include <linux/kernel.h>
+#include <linux/preempt.h>
+#include <asm/atomic.h>
+
 /*
  *  bit-based spin_lock()
  *
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 36ab42c..4d18ff3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -115,6 +115,7 @@
 	void *elevator_private3;
 
 	struct gendisk *rq_disk;
+	struct hd_struct *part;
 	unsigned long start_time;
 #ifdef CONFIG_BLK_CGROUP
 	unsigned long long start_time_ns;
@@ -646,7 +647,6 @@
 
 extern int blk_register_queue(struct gendisk *disk);
 extern void blk_unregister_queue(struct gendisk *disk);
-extern void register_disk(struct gendisk *dev);
 extern void generic_make_request(struct bio *bio);
 extern void blk_rq_init(struct request_queue *q, struct request *rq);
 extern void blk_put_request(struct request *);
@@ -1256,6 +1256,9 @@
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*direct_access) (struct block_device *, sector_t,
 						void **, unsigned long *);
+	unsigned int (*check_events) (struct gendisk *disk,
+				      unsigned int clearing);
+	/* ->media_changed() is DEPRECATED, use ->check_events() instead */
 	int (*media_changed) (struct gendisk *);
 	void (*unlock_native_capacity) (struct gendisk *);
 	int (*revalidate_disk) (struct gendisk *);
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 90012b9..fb16a36 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -246,7 +246,6 @@
 /* Allow configuration of the secure attention key */
 /* Allow administration of the random device */
 /* Allow examination and configuration of disk quotas */
-/* Allow configuring the kernel's syslog (printk behaviour) */
 /* Allow setting the domainname */
 /* Allow setting the hostname */
 /* Allow calling bdflush() */
@@ -352,7 +351,11 @@
 
 #define CAP_MAC_ADMIN        33
 
-#define CAP_LAST_CAP         CAP_MAC_ADMIN
+/* Allow configuring the kernel's syslog (printk behaviour) */
+
+#define CAP_SYSLOG           34
+
+#define CAP_LAST_CAP         CAP_SYSLOG
 
 #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
 
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index f389e31..fb45919 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -28,8 +28,6 @@
 
 void cdev_del(struct cdev *);
 
-int cdev_index(struct inode *inode);
-
 void cd_forget(struct inode *);
 
 extern struct backing_dev_info directly_mappable_cdev_bdi;
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 78e9047..35eae4b 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -946,6 +946,8 @@
 /* device-related storage */
 	unsigned int options	: 30;	/* options flags */
 	unsigned mc_flags	: 2;	/* media change buffer flags */
+	unsigned int vfs_events;	/* cached events for vfs path */
+	unsigned int ioctl_events;	/* cached events for ioctl path */
     	int use_count;                  /* number of times device opened */
     	char name[20];                  /* name of the device type */
 /* per-device flags */
@@ -965,6 +967,8 @@
 	int (*open) (struct cdrom_device_info *, int);
 	void (*release) (struct cdrom_device_info *);
 	int (*drive_status) (struct cdrom_device_info *, int);
+	unsigned int (*check_events) (struct cdrom_device_info *cdi,
+				      unsigned int clearing, int slot);
 	int (*media_changed) (struct cdrom_device_info *, int);
 	int (*tray_move) (struct cdrom_device_info *, int);
 	int (*lock_door) (struct cdrom_device_info *, int);
@@ -993,6 +997,8 @@
 extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
 extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
 		       fmode_t mode, unsigned int cmd, unsigned long arg);
+extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
+				       unsigned int clearing);
 extern int cdrom_media_changed(struct cdrom_device_info *);
 
 extern int register_cdrom(struct cdrom_device_info *cdi);
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index c3c74ae..09dcc0c 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -43,6 +43,10 @@
 #define CEPH_FEATURE_NOSRCADDR      (1<<1)
 #define CEPH_FEATURE_MONCLOCKCHECK  (1<<2)
 #define CEPH_FEATURE_FLOCK          (1<<3)
+#define CEPH_FEATURE_SUBSCRIBE2     (1<<4)
+#define CEPH_FEATURE_MONNAMES       (1<<5)
+#define CEPH_FEATURE_RECONNECT_SEQ  (1<<6)
+#define CEPH_FEATURE_DIRLAYOUTHASH  (1<<7)
 
 
 /*
@@ -55,10 +59,10 @@
 	__le32 fl_stripe_count;    /* over this many objects */
 	__le32 fl_object_size;     /* until objects are this big, then move to
 				      new objects */
-	__le32 fl_cas_hash;        /* 0 = none; 1 = sha256 */
+	__le32 fl_cas_hash;        /* UNUSED.  0 = none; 1 = sha256 */
 
 	/* pg -> disk layout */
-	__le32 fl_object_stripe_unit;  /* for per-object parity, if any */
+	__le32 fl_object_stripe_unit;  /* UNUSED.  for per-object parity, if any */
 
 	/* object -> pg layout */
 	__le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
@@ -69,6 +73,12 @@
 
 int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
 
+struct ceph_dir_layout {
+	__u8   dl_dir_hash;   /* see ceph_hash.h for ids */
+	__u8   dl_unused1;
+	__u16  dl_unused2;
+	__u32  dl_unused3;
+} __attribute__ ((packed));
 
 /* crypto algorithms */
 #define CEPH_CRYPTO_NONE 0x0
@@ -457,7 +467,7 @@
 	struct ceph_timespec rctime;
 	struct ceph_frag_tree_head fragtree;  /* (must be at end of struct) */
 } __attribute__ ((packed));
-/* followed by frag array, then symlink string, then xattr blob */
+/* followed by frag array, symlink string, dir layout, xattr blob */
 
 /* reply_lease follows dname, and reply_inode */
 struct ceph_mds_reply_lease {
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index a108b42..c3011be 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -110,17 +110,12 @@
 
 /*
  * ceph_connection state bit flags
- *
- * QUEUED and BUSY are used together to ensure that only a single
- * thread is currently opening, reading or writing data to the socket.
  */
 #define LOSSYTX         0  /* we can close channel or drop messages on errors */
 #define CONNECTING	1
 #define NEGOTIATING	2
 #define KEEPALIVE_PENDING      3
 #define WRITE_PENDING	4  /* we have data ready to send */
-#define QUEUED          5  /* there is work queued on this connection */
-#define BUSY            6  /* work is being done */
 #define STANDBY		8  /* no outgoing messages, socket closed.  we keep
 			    * the ceph_connection around to maintain shared
 			    * state with the peer. */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed4ba11..ce104e3 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -564,7 +564,7 @@
 /*
  * To iterate across the tasks in a cgroup:
  *
- * 1) call cgroup_iter_start to intialize an iterator
+ * 1) call cgroup_iter_start to initialize an iterator
  *
  * 2) call cgroup_iter_next() to retrieve member tasks until it
  *    returns NULL or until you want to end the iteration
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
deleted file mode 100644
index 2e914d0..0000000
--- a/include/linux/coda_linux.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* 
- * Coda File System, Linux Kernel module
- * 
- * Original version, adapted from cfs_mach.c, (C) Carnegie Mellon University
- * Linux modifications (C) 1996, Peter J. Braam
- * Rewritten for Linux 2.1 (C) 1997 Carnegie Mellon University
- *
- * Carnegie Mellon University encourages users of this software to
- * contribute improvements to the Coda project.
- */
-
-#ifndef _LINUX_CODA_FS
-#define _LINUX_CODA_FS
-
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/wait.h>		
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/coda_fs_i.h>
-
-/* operations */
-extern const struct inode_operations coda_dir_inode_operations;
-extern const struct inode_operations coda_file_inode_operations;
-extern const struct inode_operations coda_ioctl_inode_operations;
-
-extern const struct address_space_operations coda_file_aops;
-extern const struct address_space_operations coda_symlink_aops;
-
-extern const struct file_operations coda_dir_operations;
-extern const struct file_operations coda_file_operations;
-extern const struct file_operations coda_ioctl_operations;
-
-/* operations shared over more than one file */
-int coda_open(struct inode *i, struct file *f);
-int coda_release(struct inode *i, struct file *f);
-int coda_permission(struct inode *inode, int mask);
-int coda_revalidate_inode(struct dentry *);
-int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-int coda_setattr(struct dentry *, struct iattr *);
-
-/* this file:  heloers */
-char *coda_f2s(struct CodaFid *f);
-int coda_isroot(struct inode *i);
-int coda_iscontrol(const char *name, size_t length);
-
-void coda_vattr_to_iattr(struct inode *, struct coda_vattr *);
-void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *);
-unsigned short coda_flags_to_cflags(unsigned short);
-
-/* sysctl.h */
-void coda_sysctl_init(void);
-void coda_sysctl_clean(void);
-
-#define CODA_ALLOC(ptr, cast, size) do { \
-    if (size < PAGE_SIZE) \
-        ptr = kmalloc((unsigned long) size, GFP_KERNEL); \
-    else \
-        ptr = (cast)vmalloc((unsigned long) size); \
-    if (!ptr) \
-        printk("kernel malloc returns 0 at %s:%d\n", __FILE__, __LINE__); \
-    else memset( ptr, 0, size ); \
-} while (0)
-
-
-#define CODA_FREE(ptr,size) \
-    do { if (size < PAGE_SIZE) kfree((ptr)); else vfree((ptr)); } while (0)
-
-/* inode to cnode access functions */
-
-static inline struct coda_inode_info *ITOC(struct inode *inode)
-{
-	return list_entry(inode, struct coda_inode_info, vfs_inode);
-}
-
-static __inline__ struct CodaFid *coda_i2f(struct inode *inode)
-{
-	return &(ITOC(inode)->c_fid);
-}
-
-static __inline__ char *coda_i2s(struct inode *inode)
-{
-	return coda_f2s(&(ITOC(inode)->c_fid));
-}
-
-/* this will not zap the inode away */
-static __inline__ void coda_flag_inode(struct inode *inode, int flag)
-{
-	struct coda_inode_info *cii = ITOC(inode);
-
-	spin_lock(&cii->c_lock);
-	cii->c_flags |= flag;
-	spin_unlock(&cii->c_lock);
-}		
-
-#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 5ac5155..dfa2ed4 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -11,6 +11,9 @@
 /* The full zone was compacted */
 #define COMPACT_COMPLETE	3
 
+#define COMPACT_MODE_DIRECT_RECLAIM	0
+#define COMPACT_MODE_KSWAPD		1
+
 #ifdef CONFIG_COMPACTION
 extern int sysctl_compact_memory;
 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -21,7 +24,12 @@
 
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
-			int order, gfp_t gfp_mask, nodemask_t *mask);
+			int order, gfp_t gfp_mask, nodemask_t *mask,
+			bool sync);
+extern unsigned long compaction_suitable(struct zone *zone, int order);
+extern unsigned long compact_zone_order(struct zone *zone, int order,
+					gfp_t gfp_mask, bool sync,
+					int compact_mode);
 
 /* Do not skip compaction more than 64 times */
 #define COMPACT_MAX_DEFER_SHIFT 6
@@ -54,7 +62,20 @@
 
 #else
 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
-			int order, gfp_t gfp_mask, nodemask_t *nodemask)
+			int order, gfp_t gfp_mask, nodemask_t *nodemask,
+			bool sync)
+{
+	return COMPACT_CONTINUE;
+}
+
+static inline unsigned long compaction_suitable(struct zone *zone, int order)
+{
+	return COMPACT_SKIPPED;
+}
+
+static inline unsigned long compact_zone_order(struct zone *zone, int order,
+					       gfp_t gfp_mask, bool sync,
+					       int compact_mode)
 {
 	return COMPACT_CONTINUE;
 }
diff --git a/include/linux/console.h b/include/linux/console.h
index 95cf6f0..9774fe6 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -126,6 +126,12 @@
 	struct	 console *next;
 };
 
+/*
+ * for_each_console() allows you to iterate on each console
+ */
+#define for_each_console(con) \
+	for (con = console_drivers; con != NULL; con = con->next)
+
 extern int console_set_on_cmdline;
 
 extern int add_preferred_console(char *name, int idx, char *options);
@@ -145,7 +151,7 @@
 extern int braille_register_console(struct console *, int index,
 		char *console_options, char *braille_options);
 extern int braille_unregister_console(struct console *);
-
+extern void console_sysfs_notify(void);
 extern int console_suspend_enabled;
 
 /* Suspend and resume console messages over PM events */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index bf972f8..3104aaf 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -39,10 +39,12 @@
  * Severity difinition for error_severity in struct cper_record_header
  * and section_severity in struct cper_section_descriptor
  */
-#define CPER_SEV_RECOVERABLE			0x0
-#define CPER_SEV_FATAL				0x1
-#define CPER_SEV_CORRECTED			0x2
-#define CPER_SEV_INFORMATIONAL			0x3
+enum {
+	CPER_SEV_RECOVERABLE,
+	CPER_SEV_FATAL,
+	CPER_SEV_CORRECTED,
+	CPER_SEV_INFORMATIONAL,
+};
 
 /*
  * Validation bits difinition for validation_bits in struct
@@ -201,6 +203,47 @@
 	UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F,	\
 		0xDF, 0xAA, 0x84, 0xEC)
 
+#define CPER_PROC_VALID_TYPE			0x0001
+#define CPER_PROC_VALID_ISA			0x0002
+#define CPER_PROC_VALID_ERROR_TYPE		0x0004
+#define CPER_PROC_VALID_OPERATION		0x0008
+#define CPER_PROC_VALID_FLAGS			0x0010
+#define CPER_PROC_VALID_LEVEL			0x0020
+#define CPER_PROC_VALID_VERSION			0x0040
+#define CPER_PROC_VALID_BRAND_INFO		0x0080
+#define CPER_PROC_VALID_ID			0x0100
+#define CPER_PROC_VALID_TARGET_ADDRESS		0x0200
+#define CPER_PROC_VALID_REQUESTOR_ID		0x0400
+#define CPER_PROC_VALID_RESPONDER_ID		0x0800
+#define CPER_PROC_VALID_IP			0x1000
+
+#define CPER_MEM_VALID_ERROR_STATUS		0x0001
+#define CPER_MEM_VALID_PHYSICAL_ADDRESS		0x0002
+#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK	0x0004
+#define CPER_MEM_VALID_NODE			0x0008
+#define CPER_MEM_VALID_CARD			0x0010
+#define CPER_MEM_VALID_MODULE			0x0020
+#define CPER_MEM_VALID_BANK			0x0040
+#define CPER_MEM_VALID_DEVICE			0x0080
+#define CPER_MEM_VALID_ROW			0x0100
+#define CPER_MEM_VALID_COLUMN			0x0200
+#define CPER_MEM_VALID_BIT_POSITION		0x0400
+#define CPER_MEM_VALID_REQUESTOR_ID		0x0800
+#define CPER_MEM_VALID_RESPONDER_ID		0x1000
+#define CPER_MEM_VALID_TARGET_ID		0x2000
+#define CPER_MEM_VALID_ERROR_TYPE		0x4000
+
+#define CPER_PCIE_VALID_PORT_TYPE		0x0001
+#define CPER_PCIE_VALID_VERSION			0x0002
+#define CPER_PCIE_VALID_COMMAND_STATUS		0x0004
+#define CPER_PCIE_VALID_DEVICE_ID		0x0008
+#define CPER_PCIE_VALID_SERIAL_NUMBER		0x0010
+#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS	0x0020
+#define CPER_PCIE_VALID_CAPABILITY		0x0040
+#define CPER_PCIE_VALID_AER_INFO		0x0080
+
+#define CPER_PCIE_SLOT_SHIFT			3
+
 /*
  * All tables and structs must be byte-packed to match CPER
  * specification, since the tables are provided by the system BIOS
@@ -306,6 +349,41 @@
 	__u8	error_type;
 };
 
+struct cper_sec_pcie {
+	__u64		validation_bits;
+	__u32		port_type;
+	struct {
+		__u8	minor;
+		__u8	major;
+		__u8	reserved[2];
+	}		version;
+	__u16		command;
+	__u16		status;
+	__u32		reserved;
+	struct {
+		__u16	vendor_id;
+		__u16	device_id;
+		__u8	class_code[3];
+		__u8	function;
+		__u8	device;
+		__u16	segment;
+		__u8	bus;
+		__u8	secondary_bus;
+		__u16	slot;
+		__u8	reserved;
+	}		device_id;
+	struct {
+		__u32	lower;
+		__u32	upper;
+	}		serial_number;
+	struct {
+		__u16	secondary_status;
+		__u16	control;
+	}		bridge;
+	__u8	capability[60];
+	__u8	aer_info[96];
+};
+
 /* Reset to default packing */
 #pragma pack()
 
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 1be416b..36719ea 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -47,13 +47,7 @@
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID	(0x01) /* is residency time measurable? */
-#define CPUIDLE_FLAG_CHECK_BM	(0x02) /* BM activity will exit state */
-#define CPUIDLE_FLAG_POLL	(0x10) /* no latency, no savings */
-#define CPUIDLE_FLAG_SHALLOW	(0x20) /* low latency, minimal savings */
-#define CPUIDLE_FLAG_BALANCED	(0x40) /* medium latency, moderate savings */
-#define CPUIDLE_FLAG_DEEP	(0x80) /* high latency, large savings */
 #define CPUIDLE_FLAG_IGNORE	(0x100) /* ignore during this idle period */
-#define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h
index 6fc2bed..0e7bf27 100644
--- a/include/linux/cramfs_fs.h
+++ b/include/linux/cramfs_fs.h
@@ -84,9 +84,11 @@
 				| CRAMFS_FLAG_WRONG_SIGNATURE \
 				| CRAMFS_FLAG_SHIFTED_ROOT_OFFSET )
 
+#ifdef __KERNEL__
 /* Uncompression interfaces to the underlying zlib */
 int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen);
 int cramfs_uncompress_init(void);
 void cramfs_uncompress_exit(void);
+#endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
index d5a1d48..6fe2114 100644
--- a/include/linux/cs5535.h
+++ b/include/linux/cs5535.h
@@ -103,14 +103,20 @@
 #define GPIO_POSITIVE_EDGE_STS	0x48
 #define GPIO_NEGATIVE_EDGE_STS	0x4C
 
+#define GPIO_FLTR7_AMOUNT	0xD8
+
 #define GPIO_MAP_X		0xE0
 #define GPIO_MAP_Y		0xE4
 #define GPIO_MAP_Z		0xE8
 #define GPIO_MAP_W		0xEC
 
+#define GPIO_FE7_SEL		0xF7
+
 void cs5535_gpio_set(unsigned offset, unsigned int reg);
 void cs5535_gpio_clear(unsigned offset, unsigned int reg);
 int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+int cs5535_gpio_set_irq(unsigned group, unsigned irq);
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme);
 
 /* MFGPTs */
 
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6a4aea3..59fcd24 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -4,7 +4,9 @@
 #include <asm/atomic.h>
 #include <linux/list.h>
 #include <linux/rculist.h>
+#include <linux/rculist_bl.h>
 #include <linux/spinlock.h>
+#include <linux/seqlock.h>
 #include <linux/cache.h>
 #include <linux/rcupdate.h>
 
@@ -45,6 +47,27 @@
 };
 extern struct dentry_stat_t dentry_stat;
 
+/*
+ * Compare 2 name strings, return 0 if they match, otherwise non-zero.
+ * The strings are both count bytes long, and count is non-zero.
+ */
+static inline int dentry_cmp(const unsigned char *cs, size_t scount,
+				const unsigned char *ct, size_t tcount)
+{
+	int ret;
+	if (scount != tcount)
+		return 1;
+	do {
+		ret = (*cs != *ct);
+		if (ret)
+			break;
+		cs++;
+		ct++;
+		tcount--;
+	} while (tcount);
+	return ret;
+}
+
 /* Name hashing routines. Initial hash value */
 /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
 #define init_name_hash()		0
@@ -81,25 +104,33 @@
  * large memory footprint increase).
  */
 #ifdef CONFIG_64BIT
-#define DNAME_INLINE_LEN_MIN 32 /* 192 bytes */
+# define DNAME_INLINE_LEN 32 /* 192 bytes */
 #else
-#define DNAME_INLINE_LEN_MIN 40 /* 128 bytes */
+# ifdef CONFIG_SMP
+#  define DNAME_INLINE_LEN 36 /* 128 bytes */
+# else
+#  define DNAME_INLINE_LEN 40 /* 128 bytes */
+# endif
 #endif
 
 struct dentry {
-	atomic_t d_count;
+	/* RCU lookup touched fields */
 	unsigned int d_flags;		/* protected by d_lock */
-	spinlock_t d_lock;		/* per dentry lock */
-	int d_mounted;
-	struct inode *d_inode;		/* Where the name belongs to - NULL is
-					 * negative */
-	/*
-	 * The next three fields are touched by __d_lookup.  Place them here
-	 * so they all fit in a cache line.
-	 */
-	struct hlist_node d_hash;	/* lookup hash list */
+	seqcount_t d_seq;		/* per dentry seqlock */
+	struct hlist_bl_node d_hash;	/* lookup hash list */
 	struct dentry *d_parent;	/* parent directory */
 	struct qstr d_name;
+	struct inode *d_inode;		/* Where the name belongs to - NULL is
+					 * negative */
+	unsigned char d_iname[DNAME_INLINE_LEN];	/* small names */
+
+	/* Ref lookup also touches following */
+	unsigned int d_count;		/* protected by d_lock */
+	spinlock_t d_lock;		/* per dentry lock */
+	const struct dentry_operations *d_op;
+	struct super_block *d_sb;	/* The root of the dentry tree */
+	unsigned long d_time;		/* used by d_revalidate */
+	void *d_fsdata;			/* fs-specific data */
 
 	struct list_head d_lru;		/* LRU list */
 	/*
@@ -111,12 +142,6 @@
 	} d_u;
 	struct list_head d_subdirs;	/* our children */
 	struct list_head d_alias;	/* inode alias list */
-	unsigned long d_time;		/* used by d_revalidate */
-	const struct dentry_operations *d_op;
-	struct super_block *d_sb;	/* The root of the dentry tree */
-	void *d_fsdata;			/* fs-specific data */
-
-	unsigned char d_iname[DNAME_INLINE_LEN_MIN];	/* small names */
 };
 
 /*
@@ -133,97 +158,62 @@
 
 struct dentry_operations {
 	int (*d_revalidate)(struct dentry *, struct nameidata *);
-	int (*d_hash) (struct dentry *, struct qstr *);
-	int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
-	int (*d_delete)(struct dentry *);
+	int (*d_hash)(const struct dentry *, const struct inode *,
+			struct qstr *);
+	int (*d_compare)(const struct dentry *, const struct inode *,
+			const struct dentry *, const struct inode *,
+			unsigned int, const char *, const struct qstr *);
+	int (*d_delete)(const struct dentry *);
 	void (*d_release)(struct dentry *);
 	void (*d_iput)(struct dentry *, struct inode *);
 	char *(*d_dname)(struct dentry *, char *, int);
-};
-
-/* the dentry parameter passed to d_hash and d_compare is the parent
- * directory of the entries to be compared. It is used in case these
- * functions need any directory specific information for determining
- * equivalency classes.  Using the dentry itself might not work, as it
- * might be a negative dentry which has no information associated with
- * it */
+} ____cacheline_aligned;
 
 /*
-locking rules:
-		big lock	dcache_lock	d_lock   may block
-d_revalidate:	no		no		no       yes
-d_hash		no		no		no       yes
-d_compare:	no		yes		yes      no
-d_delete:	no		yes		no       no
-d_release:	no		no		no       yes
-d_iput:		no		no		no       yes
+ * Locking rules for dentry_operations callbacks are to be found in
+ * Documentation/filesystems/Locking. Keep it updated!
+ *
+ * FUrther descriptions are found in Documentation/filesystems/vfs.txt.
+ * Keep it updated too!
  */
 
 /* d_flags entries */
 #define DCACHE_AUTOFS_PENDING 0x0001    /* autofs: "under construction" */
-#define DCACHE_NFSFS_RENAMED  0x0002    /* this dentry has been "silly
-					 * renamed" and has to be
-					 * deleted on the last dput()
-					 */
-#define	DCACHE_DISCONNECTED 0x0004
-     /* This dentry is possibly not currently connected to the dcache tree,
-      * in which case its parent will either be itself, or will have this
-      * flag as well.  nfsd will not use a dentry with this bit set, but will
-      * first endeavour to clear the bit either by discovering that it is
-      * connected, or by performing lookup operations.   Any filesystem which
-      * supports nfsd_operations MUST have a lookup function which, if it finds
-      * a directory inode with a DCACHE_DISCONNECTED dentry, will d_move
-      * that dentry into place and return that dentry rather than the passed one,
-      * typically using d_splice_alias.
-      */
+#define DCACHE_NFSFS_RENAMED  0x0002
+     /* this dentry has been "silly renamed" and has to be deleted on the last
+      * dput() */
+
+#define	DCACHE_DISCONNECTED	0x0004
+     /* This dentry is possibly not currently connected to the dcache tree, in
+      * which case its parent will either be itself, or will have this flag as
+      * well.  nfsd will not use a dentry with this bit set, but will first
+      * endeavour to clear the bit either by discovering that it is connected,
+      * or by performing lookup operations.   Any filesystem which supports
+      * nfsd_operations MUST have a lookup function which, if it finds a
+      * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that
+      * dentry into place and return that dentry rather than the passed one,
+      * typically using d_splice_alias. */
 
 #define DCACHE_REFERENCED	0x0008  /* Recently used, don't discard. */
 #define DCACHE_UNHASHED		0x0010	
-
-#define DCACHE_INOTIFY_PARENT_WATCHED	0x0020 /* Parent inode is watched by inotify */
+#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020
+     /* Parent inode is watched by inotify */
 
 #define DCACHE_COOKIE		0x0040	/* For use by dcookie subsystem */
-
-#define DCACHE_FSNOTIFY_PARENT_WATCHED	0x0080 /* Parent inode is watched by some fsnotify listener */
+#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080
+     /* Parent inode is watched by some fsnotify listener */
 
 #define DCACHE_CANT_MOUNT	0x0100
+#define DCACHE_GENOCIDE		0x0200
+#define DCACHE_MOUNTED		0x0400	/* is a mountpoint */
 
-extern spinlock_t dcache_lock;
+#define DCACHE_OP_HASH		0x1000
+#define DCACHE_OP_COMPARE	0x2000
+#define DCACHE_OP_REVALIDATE	0x4000
+#define DCACHE_OP_DELETE	0x8000
+
 extern seqlock_t rename_lock;
 
-/**
- * d_drop - drop a dentry
- * @dentry: dentry to drop
- *
- * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
- * be found through a VFS lookup any more. Note that this is different from
- * deleting the dentry - d_delete will try to mark the dentry negative if
- * possible, giving a successful _negative_ lookup, while d_drop will
- * just make the cache lookup fail.
- *
- * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
- * reason (NFS timeouts or autofs deletes).
- *
- * __d_drop requires dentry->d_lock.
- */
-
-static inline void __d_drop(struct dentry *dentry)
-{
-	if (!(dentry->d_flags & DCACHE_UNHASHED)) {
-		dentry->d_flags |= DCACHE_UNHASHED;
-		hlist_del_rcu(&dentry->d_hash);
-	}
-}
-
-static inline void d_drop(struct dentry *dentry)
-{
-	spin_lock(&dcache_lock);
-	spin_lock(&dentry->d_lock);
- 	__d_drop(dentry);
-	spin_unlock(&dentry->d_lock);
-	spin_unlock(&dcache_lock);
-}
-
 static inline int dname_external(struct dentry *dentry)
 {
 	return dentry->d_name.name != dentry->d_iname;
@@ -235,10 +225,14 @@
 extern void d_instantiate(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
 extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
+extern void __d_drop(struct dentry *dentry);
+extern void d_drop(struct dentry *dentry);
 extern void d_delete(struct dentry *);
+extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
 
 /* allocate/de-allocate */
 extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
 extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
 extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
 extern struct dentry * d_obtain_alias(struct inode *);
@@ -296,14 +290,40 @@
 	return res;
 }
 
+extern void dentry_update_name_case(struct dentry *, struct qstr *);
+
 /* used for rename() and baskets */
 extern void d_move(struct dentry *, struct dentry *);
 extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
 
 /* appendix may either be NULL or be used for transname suffixes */
-extern struct dentry * d_lookup(struct dentry *, struct qstr *);
-extern struct dentry * __d_lookup(struct dentry *, struct qstr *);
-extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
+extern struct dentry *d_lookup(struct dentry *, struct qstr *);
+extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
+extern struct dentry *__d_lookup(struct dentry *, struct qstr *);
+extern struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
+				unsigned *seq, struct inode **inode);
+
+/**
+ * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok
+ * @dentry: dentry to take a ref on
+ * @seq: seqcount to verify against
+ * Returns: 0 on failure, else 1.
+ *
+ * __d_rcu_to_refcount operates on a dentry,seq pair that was returned
+ * by __d_lookup_rcu, to get a reference on an rcu-walk dentry.
+ */
+static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
+{
+	int ret = 0;
+
+	assert_spin_locked(&dentry->d_lock);
+	if (!read_seqcount_retry(&dentry->d_seq, seq)) {
+		ret = 1;
+		dentry->d_count++;
+	}
+
+	return ret;
+}
 
 /* validate "insecure" dentry pointer */
 extern int d_validate(struct dentry *, struct dentry *);
@@ -316,34 +336,37 @@
 extern char *__d_path(const struct path *path, struct path *root, char *, int);
 extern char *d_path(const struct path *, char *, int);
 extern char *d_path_with_unreachable(const struct path *, char *, int);
-extern char *__dentry_path(struct dentry *, char *, int);
+extern char *dentry_path_raw(struct dentry *, char *, int);
 extern char *dentry_path(struct dentry *, char *, int);
 
 /* Allocation counts.. */
 
 /**
- *	dget, dget_locked	-	get a reference to a dentry
+ *	dget, dget_dlock -	get a reference to a dentry
  *	@dentry: dentry to get a reference to
  *
  *	Given a dentry or %NULL pointer increment the reference count
  *	if appropriate and return the dentry. A dentry will not be 
- *	destroyed when it has references. dget() should never be
- *	called for dentries with zero reference counter. For these cases
- *	(preferably none, functions in dcache.c are sufficient for normal
- *	needs and they take necessary precautions) you should hold dcache_lock
- *	and call dget_locked() instead of dget().
+ *	destroyed when it has references.
  */
- 
+static inline struct dentry *dget_dlock(struct dentry *dentry)
+{
+	if (dentry)
+		dentry->d_count++;
+	return dentry;
+}
+
 static inline struct dentry *dget(struct dentry *dentry)
 {
 	if (dentry) {
-		BUG_ON(!atomic_read(&dentry->d_count));
-		atomic_inc(&dentry->d_count);
+		spin_lock(&dentry->d_lock);
+		dget_dlock(dentry);
+		spin_unlock(&dentry->d_lock);
 	}
 	return dentry;
 }
 
-extern struct dentry * dget_locked(struct dentry *);
+extern struct dentry *dget_parent(struct dentry *dentry);
 
 /**
  *	d_unhashed -	is dentry hashed
@@ -374,21 +397,11 @@
 	spin_unlock(&dentry->d_lock);
 }
 
-static inline struct dentry *dget_parent(struct dentry *dentry)
-{
-	struct dentry *ret;
-
-	spin_lock(&dentry->d_lock);
-	ret = dget(dentry->d_parent);
-	spin_unlock(&dentry->d_lock);
-	return ret;
-}
-
 extern void dput(struct dentry *);
 
 static inline int d_mountpoint(struct dentry *dentry)
 {
-	return dentry->d_mounted;
+	return dentry->d_flags & DCACHE_MOUNTED;
 }
 
 extern struct vfsmount *lookup_mnt(struct path *);
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
index 24c806f..5ac3bdd 100644
--- a/include/linux/dcookies.h
+++ b/include/linux/dcookies.h
@@ -13,10 +13,10 @@
 #ifdef CONFIG_PROFILING
  
 #include <linux/dcache.h>
-#include <linux/path.h>
 #include <linux/types.h>
  
 struct dcookie_user;
+struct path;
  
 /**
  * dcookie_register - register a user of dcookies
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
index f9b06cc..8c0aef1 100644
--- a/include/linux/decompress/inflate.h
+++ b/include/linux/decompress/inflate.h
@@ -1,9 +1,6 @@
 #ifndef INFLATE_H
 #define INFLATE_H
 
-/* Other housekeeping constants */
-#define INBUFSIZ 4096
-
 int gunzip(unsigned char *inbuf, int len,
 	   int(*fill)(void*, unsigned int),
 	   int(*flush)(void*, unsigned int),
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index ad5ec1d..4cb72b9 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -61,8 +61,6 @@
 #define large_malloc(a) malloc(a)
 #define large_free(a) free(a)
 
-#define set_error_fn(x)
-
 #define INIT
 
 #else /* STATIC */
@@ -72,6 +70,7 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/string.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 /* Use defines rather than static inline in order to avoid spurious
@@ -84,9 +83,6 @@
 #define large_malloc(a) vmalloc(a)
 #define large_free(a) vfree(a)
 
-static void(*error)(char *m);
-#define set_error_fn(x) error = x;
-
 #define INIT __init
 #define STATIC
 
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
new file mode 100644
index 0000000..41728fc
--- /dev/null
+++ b/include/linux/decompress/unxz.h
@@ -0,0 +1,19 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef DECOMPRESS_UNXZ_H
+#define DECOMPRESS_UNXZ_H
+
+int unxz(unsigned char *in, int in_size,
+	 int (*fill)(void *dest, unsigned int size),
+	 int (*flush)(void *src, unsigned int size),
+	 unsigned char *out, int *in_used,
+	 void (*error)(char *x));
+
+#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 2970022..272496d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -193,6 +193,13 @@
 	char *error;
 };
 
+/* Each target can link one of these into the table */
+struct dm_target_callbacks {
+	struct list_head list;
+	int (*congested_fn) (struct dm_target_callbacks *, int);
+	void (*unplug_fn)(struct dm_target_callbacks *);
+};
+
 int dm_register_target(struct target_type *t);
 void dm_unregister_target(struct target_type *t);
 
@@ -269,6 +276,11 @@
 			sector_t start, sector_t len, char *params);
 
 /*
+ * Target_ctr should call this if it needs to add any callbacks.
+ */
+void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
+
+/*
  * Finally call this to make the table ready for use.
  */
 int dm_table_complete(struct dm_table *t);
diff --git a/include/linux/device.h b/include/linux/device.h
index dd48953..1bf5cf0 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -30,9 +30,8 @@
 struct device_driver;
 struct driver_private;
 struct class;
-struct class_private;
+struct subsys_private;
 struct bus_type;
-struct bus_type_private;
 struct device_node;
 
 struct bus_attribute {
@@ -65,7 +64,7 @@
 
 	const struct dev_pm_ops *pm;
 
-	struct bus_type_private *p;
+	struct subsys_private *p;
 };
 
 extern int __must_check bus_register(struct bus_type *bus);
@@ -197,6 +196,7 @@
 
 	struct class_attribute		*class_attrs;
 	struct device_attribute		*dev_attrs;
+	struct bin_attribute		*dev_bin_attrs;
 	struct kobject			*dev_kobj;
 
 	int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
@@ -213,7 +213,7 @@
 
 	const struct dev_pm_ops *pm;
 
-	struct class_private *p;
+	struct subsys_private *p;
 };
 
 struct class_dev_iter {
@@ -508,13 +508,13 @@
 
 static inline void device_enable_async_suspend(struct device *dev)
 {
-	if (dev->power.status == DPM_ON)
+	if (!dev->power.in_suspend)
 		dev->power.async_suspend = true;
 }
 
 static inline void device_disable_async_suspend(struct device *dev)
 {
-	if (dev->power.status == DPM_ON)
+	if (!dev->power.in_suspend)
 		dev->power.async_suspend = false;
 }
 
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 49eab36..78bbf47 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -44,7 +44,7 @@
  * Remove a device, destroy any tables.
  *
  * DM_DEV_RENAME:
- * Rename a device.
+ * Rename a device or set its uuid if none was previously supplied.
  *
  * DM_SUSPEND:
  * This performs both suspend and resume, depending which flag is
@@ -267,9 +267,9 @@
 #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR	4
-#define DM_VERSION_MINOR	18
-#define DM_VERSION_PATCHLEVEL	0
-#define DM_VERSION_EXTRA	"-ioctl (2010-06-29)"
+#define DM_VERSION_MINOR	19
+#define DM_VERSION_PATCHLEVEL	1
+#define DM_VERSION_EXTRA	"-ioctl (2011-01-07)"
 
 /* Status bits */
 #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
@@ -322,4 +322,10 @@
  */
 #define DM_UEVENT_GENERATED_FLAG	(1 << 13) /* Out */
 
+/*
+ * If set, rename changes the uuid not the name.  Only permitted
+ * if no uuid was previously supplied: an existing uuid cannot be changed.
+ */
+#define DM_UUID_FLAG			(1 << 14) /* In */
+
 #endif				/* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h
index 0c3c3a2..eeace7d 100644
--- a/include/linux/dm-log-userspace.h
+++ b/include/linux/dm-log-userspace.h
@@ -370,6 +370,16 @@
 #define DM_ULOG_REQUEST_TYPE(request_type) \
 	(DM_ULOG_REQUEST_MASK & (request_type))
 
+/*
+ * DM_ULOG_REQUEST_VERSION is incremented when there is a
+ * change to the way information is passed between kernel
+ * and userspace.  This could be a structure change of
+ * dm_ulog_request or a change in the way requests are
+ * issued/handled.  Changes are outlined here:
+ *	version 1:  Initial implementation
+ */
+#define DM_ULOG_REQUEST_VERSION 1
+
 struct dm_ulog_request {
 	/*
 	 * The local unique identifier (luid) and the universally unique
@@ -383,8 +393,9 @@
 	 */
 	uint64_t luid;
 	char uuid[DM_UUID_LEN];
-	char padding[7];        /* Padding because DM_UUID_LEN = 129 */
+	char padding[3];        /* Padding because DM_UUID_LEN = 129 */
 
+	uint32_t version;       /* See DM_ULOG_REQUEST_VERSION */
 	int32_t error;          /* Used to report back processing errors */
 
 	uint32_t seq;           /* Sequence number for request */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a90b389..1c70028 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -44,34 +44,24 @@
 extern int ddebug_remove_module(const char *mod_name);
 
 #define dynamic_pr_debug(fmt, ...) do {					\
-	__label__ do_printk;						\
-	__label__ out;							\
 	static struct _ddebug descriptor				\
 	__used								\
 	__attribute__((section("__verbose"), aligned(8))) =		\
 	{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,		\
 		_DPRINTK_FLAGS_DEFAULT };				\
-	JUMP_LABEL(&descriptor.enabled, do_printk);			\
-	goto out;							\
-do_printk:								\
-	printk(KERN_DEBUG pr_fmt(fmt),	##__VA_ARGS__);			\
-out:	;								\
+	if (unlikely(descriptor.enabled))				\
+		printk(KERN_DEBUG pr_fmt(fmt),	##__VA_ARGS__);		\
 	} while (0)
 
 
 #define dynamic_dev_dbg(dev, fmt, ...) do {				\
-	__label__ do_printk;						\
-	__label__ out;							\
 	static struct _ddebug descriptor				\
 	__used								\
 	__attribute__((section("__verbose"), aligned(8))) =		\
 	{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,		\
 		_DPRINTK_FLAGS_DEFAULT };				\
-	JUMP_LABEL(&descriptor.enabled, do_printk);			\
-	goto out;							\
-do_printk:								\
-	dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);		\
-out:	;								\
+	if (unlikely(descriptor.enabled))				\
+		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 	} while (0)
 
 #else
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 4fd978e..4d85797 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -195,15 +195,9 @@
 /*
  * io context count accounting
  */
-#define elv_ioc_count_mod(name, __val)				\
-	do {							\
-		preempt_disable();				\
-		__get_cpu_var(name) += (__val);			\
-		preempt_enable();				\
-	} while (0)
-
-#define elv_ioc_count_inc(name)	elv_ioc_count_mod(name, 1)
-#define elv_ioc_count_dec(name)	elv_ioc_count_mod(name, -1)
+#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
+#define elv_ioc_count_inc(name)	this_cpu_inc(name)
+#define elv_ioc_count_dec(name)	this_cpu_dec(name)
 
 #define elv_ioc_count_read(name)				\
 ({								\
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f16a010..ab68f78 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -48,8 +48,10 @@
 
 
 
-extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
+extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+					    unsigned int rxqs);
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
+#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
 
 /**
  * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
@@ -97,6 +99,17 @@
 }
 
 /**
+ * is_unicast_ether_addr - Determine if the Ethernet address is unicast
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is a unicast address.
+ */
+static inline int is_unicast_ether_addr(const u8 *addr)
+{
+	return !is_multicast_ether_addr(addr);
+}
+
+/**
  * is_valid_ether_addr - Determine if the given Ethernet address is valid
  * @addr: Pointer to a six-byte array containing the Ethernet address
  *
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 6ce1bca..65990ef 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -724,21 +724,30 @@
 					 ~EXT3_DIR_ROUND)
 #define EXT3_MAX_REC_LEN		((1<<16)-1)
 
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
 static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
 {
 	unsigned len = le16_to_cpu(dlen);
 
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == EXT3_MAX_REC_LEN)
 		return 1 << 16;
+#endif
 	return len;
 }
 
 static inline __le16 ext3_rec_len_to_disk(unsigned len)
 {
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(EXT3_MAX_REC_LEN);
 	else if (len > (1 << 16))
 		BUG();
+#endif
 	return cpu_to_le16(len);
 }
 
@@ -856,6 +865,7 @@
 extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
 extern void ext3_init_block_alloc_info(struct inode *);
 extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
+extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
 
 /* dir.c */
 extern int ext3_check_dir_entry(const char *, struct inode *,
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 3c15510..73e0b62 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -2,6 +2,7 @@
 #define _FALLOC_H_
 
 #define FALLOC_FL_KEEP_SIZE	0x01 /* default is extend size */
+#define FALLOC_FL_PUNCH_HOLE	0x02 /* de-allocates range */
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/fec.h b/include/linux/fec.h
index 5d3523d..bcff455 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -3,6 +3,8 @@
  * Copyright (c) 2009 Orex Computed Radiography
  *   Baruch Siach <baruch@tkos.co.il>
  *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
  * Header file for the FEC platform data
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,6 +18,7 @@
 
 struct fec_platform_data {
 	phy_interface_t phy;
+	unsigned char mac[ETH_ALEN];
 };
 
 #endif
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 68c642d..59ea406 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -273,7 +273,7 @@
  * @closure:	See &fw_cdev_event_common;
  *		set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
  * @type:	%FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
- * @completed:	Offset into the receive buffer; data before this offest is valid
+ * @completed:	Offset into the receive buffer; data before this offset is valid
  *
  * This event is sent in multichannel contexts (context type
  * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 1cd637e..9a3f5f9 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -302,9 +302,9 @@
 struct fw_transaction {
 	int node_id; /* The generation is implied; it is always the current. */
 	int tlabel;
-	int timestamp;
 	struct list_head link;
 	struct fw_card *card;
+	bool is_split_transaction;
 	struct timer_list split_timeout_timer;
 
 	struct fw_packet packet;
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
index c6dcc1d..43fe52fc 100644
--- a/include/linux/firmware-map.h
+++ b/include/linux/firmware-map.h
@@ -17,7 +17,6 @@
 #define _LINUX_FIRMWARE_MAP_H
 
 #include <linux/list.h>
-#include <linux/kobject.h>
 
 /*
  * provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index 631b77f..70e4efa 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -71,7 +71,7 @@
 int flex_array_shrink(struct flex_array *fa);
 
 #define flex_array_put_ptr(fa, nr, src, gfp) \
-	flex_array_put(fa, nr, &(void *)(src), gfp)
+	flex_array_put(fa, nr, (void *)&(src), gfp)
 
 void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 090f0ea..08824e0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -382,7 +382,6 @@
 #include <linux/path.h>
 #include <linux/stat.h>
 #include <linux/cache.h>
-#include <linux/kobject.h>
 #include <linux/list.h>
 #include <linux/radix-tree.h>
 #include <linux/prio_tree.h>
@@ -392,6 +391,7 @@
 #include <linux/capability.h>
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
+#include <linux/rculist_bl.h>
 
 #include <asm/atomic.h>
 #include <asm/byteorder.h>
@@ -401,6 +401,7 @@
 struct iovec;
 struct nameidata;
 struct kiocb;
+struct kobject;
 struct pipe_inode_info;
 struct poll_table_struct;
 struct kstatfs;
@@ -663,8 +664,9 @@
 	void *			bd_claiming;
 	void *			bd_holder;
 	int			bd_holders;
+	bool			bd_write_holder;
 #ifdef CONFIG_SYSFS
-	struct list_head	bd_holder_list;
+	struct list_head	bd_holder_disks;
 #endif
 	struct block_device *	bd_contains;
 	unsigned		bd_block_size;
@@ -733,16 +735,31 @@
 #define ACL_NOT_CACHED ((void *)(-1))
 
 struct inode {
+	/* RCU path lookup touches following: */
+	umode_t			i_mode;
+	uid_t			i_uid;
+	gid_t			i_gid;
+	const struct inode_operations	*i_op;
+	struct super_block	*i_sb;
+
+	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
+	unsigned int		i_flags;
+	struct mutex		i_mutex;
+
+	unsigned long		i_state;
+	unsigned long		dirtied_when;	/* jiffies of first dirtying */
+
 	struct hlist_node	i_hash;
 	struct list_head	i_wb_list;	/* backing dev IO list */
 	struct list_head	i_lru;		/* inode LRU list */
 	struct list_head	i_sb_list;
-	struct list_head	i_dentry;
+	union {
+		struct list_head	i_dentry;
+		struct rcu_head		i_rcu;
+	};
 	unsigned long		i_ino;
 	atomic_t		i_count;
 	unsigned int		i_nlink;
-	uid_t			i_uid;
-	gid_t			i_gid;
 	dev_t			i_rdev;
 	unsigned int		i_blkbits;
 	u64			i_version;
@@ -755,13 +772,8 @@
 	struct timespec		i_ctime;
 	blkcnt_t		i_blocks;
 	unsigned short          i_bytes;
-	umode_t			i_mode;
-	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
-	struct mutex		i_mutex;
 	struct rw_semaphore	i_alloc_sem;
-	const struct inode_operations	*i_op;
 	const struct file_operations	*i_fop;	/* former ->i_op->default_file_ops */
-	struct super_block	*i_sb;
 	struct file_lock	*i_flock;
 	struct address_space	*i_mapping;
 	struct address_space	i_data;
@@ -782,11 +794,6 @@
 	struct hlist_head	i_fsnotify_marks;
 #endif
 
-	unsigned long		i_state;
-	unsigned long		dirtied_when;	/* jiffies of first dirtying */
-
-	unsigned int		i_flags;
-
 #ifdef CONFIG_IMA
 	/* protected by i_lock */
 	unsigned int		i_readcount; /* struct files open RO */
@@ -1059,7 +1066,6 @@
 	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *);
-	int (*fl_mylease)(struct file_lock *, struct file_lock *);
 	int (*fl_change)(struct file_lock **, int);
 };
 
@@ -1372,13 +1378,13 @@
 	const struct xattr_handler **s_xattr;
 
 	struct list_head	s_inodes;	/* all inodes */
-	struct hlist_head	s_anon;		/* anonymous dentries for (nfs) exporting */
+	struct hlist_bl_head	s_anon;		/* anonymous dentries for (nfs) exporting */
 #ifdef CONFIG_SMP
 	struct list_head __percpu *s_files;
 #else
 	struct list_head	s_files;
 #endif
-	/* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
+	/* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
 	struct list_head	s_dentry_lru;	/* unused dentry lru */
 	int			s_nr_dentry_unused;	/* # of dentry on lru */
 
@@ -1417,6 +1423,7 @@
 	 * generic_show_options()
 	 */
 	char __rcu *s_options;
+	const struct dentry_operations *s_d_op; /* default d_op for dentries */
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
@@ -1545,9 +1552,18 @@
 	int (*setlease)(struct file *, long, struct file_lock **);
 };
 
+#define IPERM_FLAG_RCU	0x0001
+
 struct inode_operations {
-	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
 	struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+	void * (*follow_link) (struct dentry *, struct nameidata *);
+	int (*permission) (struct inode *, int, unsigned int);
+	int (*check_acl)(struct inode *, int, unsigned int);
+
+	int (*readlink) (struct dentry *, char __user *,int);
+	void (*put_link) (struct dentry *, struct nameidata *, void *);
+
+	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
 	int (*link) (struct dentry *,struct inode *,struct dentry *);
 	int (*unlink) (struct inode *,struct dentry *);
 	int (*symlink) (struct inode *,struct dentry *,const char *);
@@ -1556,12 +1572,7 @@
 	int (*mknod) (struct inode *,struct dentry *,int,dev_t);
 	int (*rename) (struct inode *, struct dentry *,
 			struct inode *, struct dentry *);
-	int (*readlink) (struct dentry *, char __user *,int);
-	void * (*follow_link) (struct dentry *, struct nameidata *);
-	void (*put_link) (struct dentry *, struct nameidata *, void *);
 	void (*truncate) (struct inode *);
-	int (*permission) (struct inode *, int);
-	int (*check_acl)(struct inode *, int);
 	int (*setattr) (struct dentry *, struct iattr *);
 	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
 	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1573,7 +1584,7 @@
 			  loff_t len);
 	int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
 		      u64 len);
-};
+} ____cacheline_aligned;
 
 struct seq_file;
 
@@ -1824,7 +1835,9 @@
 			int (*set)(struct super_block *,void *),
 			void *data);
 extern struct dentry *mount_pseudo(struct file_system_type *, char *,
-	const struct super_operations *ops, unsigned long);
+	const struct super_operations *ops,
+	const struct dentry_operations *dops,
+	unsigned long);
 extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
 
 static inline void sb_mark_dirty(struct super_block *sb)
@@ -2006,7 +2019,6 @@
 extern void bd_set_size(struct block_device *, loff_t size);
 extern void bd_forget(struct inode *inode);
 extern void bdput(struct block_device *);
-extern struct block_device *open_by_devnum(dev_t, fmode_t);
 extern void invalidate_bdev(struct block_device *);
 extern int sync_blockdev(struct block_device *bdev);
 extern struct super_block *freeze_bdev(struct block_device *);
@@ -2037,16 +2049,26 @@
 extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
 extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
 extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *, fmode_t);
-extern int blkdev_put(struct block_device *, fmode_t);
-extern int bd_claim(struct block_device *, void *);
-extern void bd_release(struct block_device *);
+extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
+extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+					       void *holder);
+extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
+					      void *holder);
+extern int blkdev_put(struct block_device *bdev, fmode_t mode);
 #ifdef CONFIG_SYSFS
-extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
-extern void bd_release_from_disk(struct block_device *, struct gendisk *);
+extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+extern void bd_unlink_disk_holder(struct block_device *bdev,
+				  struct gendisk *disk);
 #else
-#define bd_claim_by_disk(bdev, holder, disk)	bd_claim(bdev, holder)
-#define bd_release_from_disk(bdev, disk)	bd_release(bdev)
+static inline int bd_link_disk_holder(struct block_device *bdev,
+				      struct gendisk *disk)
+{
+	return 0;
+}
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+					 struct gendisk *disk)
+{
+}
 #endif
 #endif
 
@@ -2082,8 +2104,6 @@
 extern const char *__bdevname(dev_t, char *buffer);
 extern const char *bdevname(struct block_device *bdev, char *buffer);
 extern struct block_device *lookup_bdev(const char *);
-extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
-extern void close_bdev_exclusive(struct block_device *, fmode_t);
 extern void blkdev_show(struct seq_file *,off_t);
 
 #else
@@ -2158,8 +2178,8 @@
 #endif
 extern int notify_change(struct dentry *, struct iattr *);
 extern int inode_permission(struct inode *, int);
-extern int generic_permission(struct inode *, int,
-		int (*check_acl)(struct inode *, int));
+extern int generic_permission(struct inode *, int, unsigned int,
+		int (*check_acl)(struct inode *, int, unsigned int));
 
 static inline bool execute_ok(struct inode *inode)
 {
@@ -2230,6 +2250,7 @@
 extern void end_writeback(struct inode *);
 extern void __destroy_inode(struct inode *);
 extern struct inode *new_inode(struct super_block *);
+extern void free_inode_nonrcu(struct inode *inode);
 extern int should_remove_suid(struct dentry *);
 extern int file_remove_suid(struct file *);
 
@@ -2446,6 +2467,10 @@
 {
 	ino_t res;
 
+	/*
+	 * Don't strictly need d_lock here? If the parent ino could change
+	 * then surely we'd have a deeper race in the caller?
+	 */
 	spin_lock(&dentry->d_lock);
 	res = dentry->d_parent->d_inode->i_ino;
 	spin_unlock(&dentry->d_lock);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index a42b5bf..003dc0f 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -2,10 +2,13 @@
 #define _LINUX_FS_STRUCT_H
 
 #include <linux/path.h>
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
 
 struct fs_struct {
 	int users;
 	spinlock_t lock;
+	seqcount_t seq;
 	int umask;
 	int in_exec;
 	struct path root, pwd;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index b10bcde..2a53f10 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -17,7 +17,6 @@
 
 /*
  * fsnotify_d_instantiate - instantiate a dentry for inode
- * Called with dcache_lock held.
  */
 static inline void fsnotify_d_instantiate(struct dentry *dentry,
 					  struct inode *inode)
@@ -62,7 +61,6 @@
 
 /*
  * fsnotify_d_move - dentry has been moved
- * Called with dcache_lock and dentry->d_lock held.
  */
 static inline void fsnotify_d_move(struct dentry *dentry)
 {
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 7380763..69ad89b 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -329,9 +329,15 @@
 {
 	struct dentry *parent;
 
-	assert_spin_locked(&dcache_lock);
 	assert_spin_locked(&dentry->d_lock);
 
+	/*
+	 * Serialisation of setting PARENT_WATCHED on the dentries is provided
+	 * by d_lock. If inotify_inode_watched changes after we have taken
+	 * d_lock, the following __fsnotify_update_child_dentry_flags call will
+	 * find our entry, so it will spin until we complete here, and update
+	 * us with the new state.
+	 */
 	parent = dentry->d_parent;
 	if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
 		dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
@@ -341,15 +347,12 @@
 
 /*
  * fsnotify_d_instantiate - instantiate a dentry for inode
- * Called with dcache_lock held.
  */
 static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
 {
 	if (!inode)
 		return;
 
-	assert_spin_locked(&dcache_lock);
-
 	spin_lock(&dentry->d_lock);
 	__fsnotify_update_dcache_flags(dentry);
 	spin_unlock(&dentry->d_lock);
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index c3c578e..d464de5 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -41,6 +41,12 @@
  * 7.15
  *  - add store notify
  *  - add retrieve notify
+ *
+ * 7.16
+ *  - add BATCH_FORGET request
+ *  - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct
+ *    fuse_ioctl_iovec' instead of ambiguous 'struct iovec'
+ *  - add FUSE_IOCTL_32BIT flag
  */
 
 #ifndef _LINUX_FUSE_H
@@ -72,7 +78,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 15
+#define FUSE_KERNEL_MINOR_VERSION 16
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -200,12 +206,14 @@
  * FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine
  * FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed
  * FUSE_IOCTL_RETRY: retry with new iovecs
+ * FUSE_IOCTL_32BIT: 32bit ioctl
  *
  * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs
  */
 #define FUSE_IOCTL_COMPAT	(1 << 0)
 #define FUSE_IOCTL_UNRESTRICTED	(1 << 1)
 #define FUSE_IOCTL_RETRY	(1 << 2)
+#define FUSE_IOCTL_32BIT	(1 << 3)
 
 #define FUSE_IOCTL_MAX_IOV	256
 
@@ -256,6 +264,7 @@
 	FUSE_IOCTL         = 39,
 	FUSE_POLL          = 40,
 	FUSE_NOTIFY_REPLY  = 41,
+	FUSE_BATCH_FORGET  = 42,
 
 	/* CUSE specific operations */
 	CUSE_INIT          = 4096,
@@ -290,6 +299,16 @@
 	__u64	nlookup;
 };
 
+struct fuse_forget_one {
+	__u64	nodeid;
+	__u64	nlookup;
+};
+
+struct fuse_batch_forget_in {
+	__u32	count;
+	__u32	dummy;
+};
+
 struct fuse_getattr_in {
 	__u32	getattr_flags;
 	__u32	dummy;
@@ -510,6 +529,11 @@
 	__u32	out_size;
 };
 
+struct fuse_ioctl_iovec {
+	__u64	base;
+	__u64	len;
+};
+
 struct fuse_ioctl_out {
 	__s32	result;
 	__u32	flags;
diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h
index 574bea4..0437e37 100644
--- a/include/linux/generic_acl.h
+++ b/include/linux/generic_acl.h
@@ -10,6 +10,6 @@
 
 int generic_acl_init(struct inode *, struct inode *);
 int generic_acl_chmod(struct inode *);
-int generic_check_acl(struct inode *inode, int mask);
+int generic_check_acl(struct inode *inode, int mask, unsigned int flags);
 
 #endif /* LINUX_GENERIC_ACL_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 7a7b9c1..c0d5f69 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -115,6 +115,7 @@
 #else
 	struct disk_stats dkstats;
 #endif
+	atomic_t ref;
 	struct rcu_head rcu_head;
 };
 
@@ -127,6 +128,11 @@
 #define GENHD_FL_EXT_DEVT			64 /* allow extended devt */
 #define GENHD_FL_NATIVE_CAPACITY		128
 
+enum {
+	DISK_EVENT_MEDIA_CHANGE			= 1 << 0, /* media changed */
+	DISK_EVENT_EJECT_REQUEST		= 1 << 1, /* eject requested */
+};
+
 #define BLK_SCSI_MAX_CMDS	(256)
 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
 
@@ -143,6 +149,8 @@
 	struct hd_struct __rcu *part[];
 };
 
+struct disk_events;
+
 struct gendisk {
 	/* major, first_minor and minors are input parameters only,
 	 * don't use directly.  Use disk_devt() and disk_max_parts().
@@ -154,6 +162,10 @@
 
 	char disk_name[DISK_NAME_LEN];	/* name of major driver */
 	char *(*devnode)(struct gendisk *gd, mode_t *mode);
+
+	unsigned int events;		/* supported events */
+	unsigned int async_events;	/* async events, subset of all */
+
 	/* Array of pointers to partitions indexed by partno.
 	 * Protected with matching bdev lock but stat and other
 	 * non-critical accesses use RCU.  Always access through
@@ -171,9 +183,8 @@
 	struct kobject *slave_dir;
 
 	struct timer_rand_state *random;
-
 	atomic_t sync_io;		/* RAID */
-	struct work_struct async_notify;
+	struct disk_events *ev;
 #ifdef  CONFIG_BLK_DEV_INTEGRITY
 	struct blk_integrity *integrity;
 #endif
@@ -395,7 +406,6 @@
 /* block/genhd.c */
 extern void add_disk(struct gendisk *disk);
 extern void del_gendisk(struct gendisk *gp);
-extern void unlink_gendisk(struct gendisk *gp);
 extern struct gendisk *get_gendisk(dev_t dev, int *partno);
 extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
 
@@ -407,6 +417,11 @@
 	return disk->part0.policy;
 }
 
+extern void disk_block_events(struct gendisk *disk);
+extern void disk_unblock_events(struct gendisk *disk);
+extern void disk_check_events(struct gendisk *disk);
+extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
+
 /* drivers/char/random.c */
 extern void add_disk_randomness(struct gendisk *disk);
 extern void rand_initialize_disk(struct gendisk *disk);
@@ -583,6 +598,7 @@
 						     sector_t len, int flags,
 						     struct partition_meta_info
 						       *info);
+extern void __delete_partition(struct hd_struct *);
 extern void delete_partition(struct gendisk *, int);
 extern void printk_all_partitions(void);
 
@@ -611,6 +627,29 @@
 			       const char *buf, size_t count);
 #endif /* CONFIG_FAIL_MAKE_REQUEST */
 
+static inline void hd_ref_init(struct hd_struct *part)
+{
+	atomic_set(&part->ref, 1);
+	smp_mb();
+}
+
+static inline void hd_struct_get(struct hd_struct *part)
+{
+	atomic_inc(&part->ref);
+	smp_mb__after_atomic_inc();
+}
+
+static inline int hd_struct_try_get(struct hd_struct *part)
+{
+	return atomic_inc_not_zero(&part->ref);
+}
+
+static inline void hd_struct_put(struct hd_struct *part)
+{
+	if (atomic_dec_and_test(&part->ref))
+		__delete_partition(part);
+}
+
 #else /* CONFIG_BLOCK */
 
 static inline void printk_all_partitions(void) { }
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f54adfc..a3b148a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -34,6 +34,7 @@
 #else
 #define ___GFP_NOTRACK		0
 #endif
+#define ___GFP_NO_KSWAPD	0x400000u
 
 /*
  * GFP bitmasks..
@@ -81,13 +82,15 @@
 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
 #define __GFP_NOTRACK	((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */
 
+#define __GFP_NO_KSWAPD	((__force gfp_t)___GFP_NO_KSWAPD)
+
 /*
  * This may seem redundant, but it's a way of annotating false positives vs.
  * allocations that simply cannot be supported (e.g. page tables).
  */
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
 
-#define __GFP_BITS_SHIFT 22	/* Room for 22 __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 23	/* Room for 23 __GFP_FOO bits */
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /* This equals 0, but use constants in case they ever change */
@@ -106,6 +109,9 @@
 				 __GFP_HARDWALL | __GFP_HIGHMEM | \
 				 __GFP_MOVABLE)
 #define GFP_IOFS	(__GFP_IO | __GFP_FS)
+#define GFP_TRANSHUGE	(GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+			 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
+			 __GFP_NO_KSWAPD)
 
 #ifdef CONFIG_NUMA
 #define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
@@ -325,14 +331,17 @@
 {
 	return alloc_pages_current(gfp_mask, order);
 }
-extern struct page *alloc_page_vma(gfp_t gfp_mask,
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
 			struct vm_area_struct *vma, unsigned long addr);
 #else
 #define alloc_pages(gfp_mask, order) \
 		alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
+#define alloc_pages_vma(gfp_mask, order, vma, addr)	\
+	alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+#define alloc_page_vma(gfp_mask, vma, addr)	\
+	alloc_pages_vma(gfp_mask, 0, vma, addr)
 
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/gpio-i2cmux.h
new file mode 100644
index 0000000..4a333bb
--- /dev/null
+++ b/include/linux/gpio-i2cmux.h
@@ -0,0 +1,38 @@
+/*
+ * gpio-i2cmux interface to platform code
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_GPIO_I2CMUX_H
+#define _LINUX_GPIO_I2CMUX_H
+
+/* MUX has no specific idle mode */
+#define GPIO_I2CMUX_NO_IDLE	((unsigned)-1)
+
+/**
+ * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of bitmasks of GPIO settings (low/high) for each
+ *	position
+ * @n_values: Number of multiplexer positions (busses to instantiate)
+ * @gpios: Array of GPIO numbers used to control MUX
+ * @n_gpios: Number of GPIOs used to control MUX
+ * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ */
+struct gpio_i2cmux_platform_data {
+	int parent;
+	int base_nr;
+	const unsigned *values;
+	int n_values;
+	const unsigned *gpios;
+	int n_gpios;
+	unsigned idle;
+};
+
+#endif /* _LINUX_GPIO_I2CMUX_H */
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index e41f7dd..32720ba 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 
 struct device;
+struct gpio;
 struct gpio_chip;
 
 /*
@@ -34,6 +35,17 @@
 	return -ENOSYS;
 }
 
+static inline int gpio_request_one(unsigned gpio,
+					unsigned long flags, const char *label)
+{
+	return -ENOSYS;
+}
+
+static inline int gpio_request_array(struct gpio *array, size_t num)
+{
+	return -ENOSYS;
+}
+
 static inline void gpio_free(unsigned gpio)
 {
 	might_sleep();
@@ -42,6 +54,14 @@
 	WARN_ON(1);
 }
 
+static inline void gpio_free_array(struct gpio *array, size_t num)
+{
+	might_sleep();
+
+	/* GPIO can never have been requested */
+	WARN_ON(1);
+}
+
 static inline int gpio_direction_input(unsigned gpio)
 {
 	return -ENOSYS;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index bb0f56f..d91c25e 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -402,7 +402,7 @@
 	__u16 dpad;			/* dpad input code */
 };
 
-#define HID_MAX_FIELDS 64
+#define HID_MAX_FIELDS 128
 
 struct hid_report {
 	struct list_head list;
@@ -593,6 +593,7 @@
  * @report_fixup: called before report descriptor parsing (NULL means nop)
  * @input_mapping: invoked on input registering before mapping an usage
  * @input_mapped: invoked on input registering after mapping an usage
+ * @feature_mapping: invoked on feature registering
  * @suspend: invoked on suspend (NULL means nop)
  * @resume: invoked on resume if device was not reset (NULL means nop)
  * @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -636,6 +637,9 @@
 	int (*input_mapped)(struct hid_device *hdev,
 			struct hid_input *hidinput, struct hid_field *field,
 			struct hid_usage *usage, unsigned long **bit, int *max);
+	void (*feature_mapping)(struct hid_device *hdev,
+			struct hid_input *hidinput, struct hid_field *field,
+			struct hid_usage *usage);
 #ifdef CONFIG_PM
 	int (*suspend)(struct hid_device *hdev, pm_message_t message);
 	int (*resume)(struct hid_device *hdev);
@@ -820,6 +824,49 @@
 	hdev->ll_driver->stop(hdev);
 }
 
+/**
+ * hid_hw_open - signal underlaying HW to start delivering events
+ *
+ * @hdev: hid device
+ *
+ * Tell underlying HW to start delivering events from the device.
+ * This function should be called sometime after successful call
+ * to hid_hiw_start().
+ */
+static inline int __must_check hid_hw_open(struct hid_device *hdev)
+{
+	return hdev->ll_driver->open(hdev);
+}
+
+/**
+ * hid_hw_close - signal underlaying HW to stop delivering events
+ *
+ * @hdev: hid device
+ *
+ * This function indicates that we are not interested in the events
+ * from this device anymore. Delivery of events may or may not stop,
+ * depending on the number of users still outstanding.
+ */
+static inline void hid_hw_close(struct hid_device *hdev)
+{
+	hdev->ll_driver->close(hdev);
+}
+
+/**
+ * hid_hw_power - requests underlying HW to go into given power mode
+ *
+ * @hdev: hid device
+ * @level: requested power level (one of %PM_HINT_* defines)
+ *
+ * This function requests underlying hardware to enter requested power
+ * mode.
+ */
+
+static inline int hid_hw_power(struct hid_device *hdev, int level)
+{
+	return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0;
+}
+
 void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
 		int interrupt);
 
@@ -838,12 +885,32 @@
 #define hid_pidff_init NULL
 #endif
 
-#define dbg_hid(format, arg...) if (hid_debug) \
-				printk(KERN_DEBUG "%s: " format ,\
-				__FILE__ , ## arg)
-#define err_hid(format, arg...) printk(KERN_ERR "%s: " format "\n" , \
-		__FILE__ , ## arg)
-#endif /* HID_FF */
+#define dbg_hid(format, arg...)						\
+do {									\
+	if (hid_debug)							\
+		printk(KERN_DEBUG "%s: " format, __FILE__, ##arg);	\
+} while (0)
+
+#define hid_printk(level, hid, fmt, arg...)		\
+	dev_printk(level, &(hid)->dev, fmt, ##arg)
+#define hid_emerg(hid, fmt, arg...)			\
+	dev_emerg(&(hid)->dev, fmt, ##arg)
+#define hid_crit(hid, fmt, arg...)			\
+	dev_crit(&(hid)->dev, fmt, ##arg)
+#define hid_alert(hid, fmt, arg...)			\
+	dev_alert(&(hid)->dev, fmt, ##arg)
+#define hid_err(hid, fmt, arg...)			\
+	dev_err(&(hid)->dev, fmt, ##arg)
+#define hid_notice(hid, fmt, arg...)			\
+	dev_notice(&(hid)->dev, fmt, ##arg)
+#define hid_warn(hid, fmt, arg...)			\
+	dev_warn(&(hid)->dev, fmt, ##arg)
+#define hid_info(hid, fmt, arg...)			\
+	dev_info(&(hid)->dev, fmt, ##arg)
+#define hid_dbg(hid, fmt, arg...)			\
+	dev_dbg(&(hid)->dev, fmt, ##arg)
+
+#endif /* __KERNEL__ */
 
 #endif
 
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index b676c58..3a93f73 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -81,7 +81,8 @@
 
 static inline int kmap_atomic_idx_push(void)
 {
-	int idx = __get_cpu_var(__kmap_atomic_idx)++;
+	int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+
 #ifdef CONFIG_DEBUG_HIGHMEM
 	WARN_ON_ONCE(in_irq() && !irqs_disabled());
 	BUG_ON(idx > KM_TYPE_NR);
@@ -91,16 +92,18 @@
 
 static inline int kmap_atomic_idx(void)
 {
-	return __get_cpu_var(__kmap_atomic_idx) - 1;
+	return __this_cpu_read(__kmap_atomic_idx) - 1;
 }
 
-static inline int kmap_atomic_idx_pop(void)
+static inline void kmap_atomic_idx_pop(void)
 {
-	int idx = --__get_cpu_var(__kmap_atomic_idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
+	int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+
 	BUG_ON(idx < 0);
+#else
+	__this_cpu_dec(__kmap_atomic_idx);
 #endif
-	return idx;
 }
 
 #endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 330586f..f376ddc 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -131,7 +131,6 @@
  * @index:		clock type index for per_cpu support when moving a
  *			timer to a base on another cpu.
  * @active:		red black tree root node for the active timers
- * @first:		pointer to the timer node which expires first
  * @resolution:		the resolution of the clock, in nanoseconds
  * @get_time:		function to retrieve the current time of the clock
  * @softirq_time:	the time when running the hrtimer queue in the softirq
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
new file mode 100644
index 0000000..8e6c8c4
--- /dev/null
+++ b/include/linux/huge_mm.h
@@ -0,0 +1,179 @@
+#ifndef _LINUX_HUGE_MM_H
+#define _LINUX_HUGE_MM_H
+
+extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
+				      struct vm_area_struct *vma,
+				      unsigned long address, pmd_t *pmd,
+				      unsigned int flags);
+extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+			 struct vm_area_struct *vma);
+extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+			       unsigned long address, pmd_t *pmd,
+			       pmd_t orig_pmd);
+extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
+extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
+					  unsigned long addr,
+					  pmd_t *pmd,
+					  unsigned int flags);
+extern int zap_huge_pmd(struct mmu_gather *tlb,
+			struct vm_area_struct *vma,
+			pmd_t *pmd);
+extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+			unsigned long addr, unsigned long end,
+			unsigned char *vec);
+extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+			unsigned long addr, pgprot_t newprot);
+
+enum transparent_hugepage_flag {
+	TRANSPARENT_HUGEPAGE_FLAG,
+	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+	TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
+	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
+#ifdef CONFIG_DEBUG_VM
+	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
+#endif
+};
+
+enum page_check_address_pmd_flag {
+	PAGE_CHECK_ADDRESS_PMD_FLAG,
+	PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
+	PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
+};
+extern pmd_t *page_check_address_pmd(struct page *page,
+				     struct mm_struct *mm,
+				     unsigned long address,
+				     enum page_check_address_pmd_flag flag);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define HPAGE_PMD_SHIFT HPAGE_SHIFT
+#define HPAGE_PMD_MASK HPAGE_MASK
+#define HPAGE_PMD_SIZE HPAGE_SIZE
+
+#define transparent_hugepage_enabled(__vma)				\
+	((transparent_hugepage_flags &					\
+	  (1<<TRANSPARENT_HUGEPAGE_FLAG) ||				\
+	  (transparent_hugepage_flags &					\
+	   (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&			\
+	   ((__vma)->vm_flags & VM_HUGEPAGE))) &&			\
+	 !((__vma)->vm_flags & VM_NOHUGEPAGE))
+#define transparent_hugepage_defrag(__vma)				\
+	((transparent_hugepage_flags &					\
+	  (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||			\
+	 (transparent_hugepage_flags &					\
+	  (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&		\
+	  (__vma)->vm_flags & VM_HUGEPAGE))
+#ifdef CONFIG_DEBUG_VM
+#define transparent_hugepage_debug_cow()				\
+	(transparent_hugepage_flags &					\
+	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
+#else /* CONFIG_DEBUG_VM */
+#define transparent_hugepage_debug_cow() 0
+#endif /* CONFIG_DEBUG_VM */
+
+extern unsigned long transparent_hugepage_flags;
+extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+			  pmd_t *dst_pmd, pmd_t *src_pmd,
+			  struct vm_area_struct *vma,
+			  unsigned long addr, unsigned long end);
+extern int handle_pte_fault(struct mm_struct *mm,
+			    struct vm_area_struct *vma, unsigned long address,
+			    pte_t *pte, pmd_t *pmd, unsigned int flags);
+extern int split_huge_page(struct page *page);
+extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
+#define split_huge_page_pmd(__mm, __pmd)				\
+	do {								\
+		pmd_t *____pmd = (__pmd);				\
+		if (unlikely(pmd_trans_huge(*____pmd)))			\
+			__split_huge_page_pmd(__mm, ____pmd);		\
+	}  while (0)
+#define wait_split_huge_page(__anon_vma, __pmd)				\
+	do {								\
+		pmd_t *____pmd = (__pmd);				\
+		spin_unlock_wait(&(__anon_vma)->root->lock);		\
+		/*							\
+		 * spin_unlock_wait() is just a loop in C and so the	\
+		 * CPU can reorder anything around it.			\
+		 */							\
+		smp_mb();						\
+		BUG_ON(pmd_trans_splitting(*____pmd) ||			\
+		       pmd_trans_huge(*____pmd));			\
+	} while (0)
+#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
+#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+#if HPAGE_PMD_ORDER > MAX_ORDER
+#error "hugepages can't be allocated by the buddy allocator"
+#endif
+extern int hugepage_madvise(struct vm_area_struct *vma,
+			    unsigned long *vm_flags, int advice);
+extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+				    unsigned long start,
+				    unsigned long end,
+				    long adjust_next);
+static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+					 unsigned long start,
+					 unsigned long end,
+					 long adjust_next)
+{
+	if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+		return;
+	__vma_adjust_trans_huge(vma, start, end, adjust_next);
+}
+static inline int hpage_nr_pages(struct page *page)
+{
+	if (unlikely(PageTransHuge(page)))
+		return HPAGE_PMD_NR;
+	return 1;
+}
+static inline struct page *compound_trans_head(struct page *page)
+{
+	if (PageTail(page)) {
+		struct page *head;
+		head = page->first_page;
+		smp_rmb();
+		/*
+		 * head may be a dangling pointer.
+		 * __split_huge_page_refcount clears PageTail before
+		 * overwriting first_page, so if PageTail is still
+		 * there it means the head pointer isn't dangling.
+		 */
+		if (PageTail(page))
+			return head;
+	}
+	return page;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
+#define HPAGE_PMD_MASK ({ BUG(); 0; })
+#define HPAGE_PMD_SIZE ({ BUG(); 0; })
+
+#define hpage_nr_pages(x) 1
+
+#define transparent_hugepage_enabled(__vma) 0
+
+#define transparent_hugepage_flags 0UL
+static inline int split_huge_page(struct page *page)
+{
+	return 0;
+}
+#define split_huge_page_pmd(__mm, __pmd)	\
+	do { } while (0)
+#define wait_split_huge_page(__anon_vma, __pmd)	\
+	do { } while (0)
+#define compound_trans_head(page) compound_head(page)
+static inline int hugepage_madvise(struct vm_area_struct *vma,
+				   unsigned long *vm_flags, int advice)
+{
+	BUG();
+	return 0;
+}
+static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+					 unsigned long start,
+					 unsigned long end,
+					 long adjust_next)
+{
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h
index 78ebf50..7472449 100644
--- a/include/linux/i2c-omap.h
+++ b/include/linux/i2c-omap.h
@@ -1,9 +1,14 @@
 #ifndef __I2C_OMAP_H__
 #define __I2C_OMAP_H__
 
+#include <linux/platform_device.h>
+
 struct omap_i2c_bus_platform_data {
 	u32		clkrate;
 	void		(*set_mpu_wkup_lat)(struct device *dev, long set);
+	int		(*device_enable) (struct platform_device *pdev);
+	int		(*device_shutdown) (struct platform_device *pdev);
+	int		(*device_idle) (struct platform_device *pdev);
 };
 
 #endif
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 56cfe23..903576d 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -57,9 +57,10 @@
  * transmit an arbitrary number of messages without interruption.
  * @count must be be less than 64k since msg.len is u16.
  */
-extern int i2c_master_send(struct i2c_client *client, const char *buf,
+extern int i2c_master_send(const struct i2c_client *client, const char *buf,
 			   int count);
-extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
+extern int i2c_master_recv(const struct i2c_client *client, char *buf,
+			   int count);
 
 /* Transfer num messages.
  */
@@ -78,23 +79,25 @@
 /* Now follow the 'nice' access routines. These also document the calling
    conventions of i2c_smbus_xfer. */
 
-extern s32 i2c_smbus_read_byte(struct i2c_client *client);
-extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
-extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
-extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
+extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
+				    u8 command);
+extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
 				     u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
-extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
+				    u8 command);
+extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
 				     u8 command, u16 value);
 /* Returns the number of read bytes */
-extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
 				     u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
 				      u8 command, u8 length, const u8 *values);
 /* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
 					 u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
 					  u8 command, u8 length,
 					  const u8 *values);
 #endif /* I2C */
diff --git a/include/linux/i2c/ds620.h b/include/linux/i2c/ds620.h
new file mode 100644
index 0000000..736bb87
--- /dev/null
+++ b/include/linux/i2c/ds620.h
@@ -0,0 +1,21 @@
+#ifndef _LINUX_DS620_H
+#define _LINUX_DS620_H
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+/* platform data for the DS620 temperature sensor and thermostat */
+
+struct ds620_platform_data {
+	/*
+	 *  Thermostat output pin PO mode:
+	 *  0 = always low (default)
+	 *  1 = PO_LOW
+	 *  2 = PO_HIGH
+	 *
+	 * (see Documentation/hwmon/ds620)
+	 */
+	int pomode;
+};
+
+#endif /* _LINUX_DS620_H */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index c760991..61b9609 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -593,6 +593,13 @@
 
 struct twl4030_usb_data {
 	enum twl4030_usb_mode	usb_mode;
+
+	int		(*phy_init)(struct device *dev);
+	int		(*phy_exit)(struct device *dev);
+	/* Power on/off the PHY */
+	int		(*phy_power)(struct device *dev, int iD, int on);
+	/* enable/disable  phy clocks */
+	int		(*phy_set_clock)(struct device *dev, int on);
 };
 
 struct twl4030_ins {
diff --git a/include/linux/if_alg.h b/include/linux/if_alg.h
new file mode 100644
index 0000000..0f9acce
--- /dev/null
+++ b/include/linux/if_alg.h
@@ -0,0 +1,40 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _LINUX_IF_ALG_H
+#define _LINUX_IF_ALG_H
+
+#include <linux/types.h>
+
+struct sockaddr_alg {
+	__u16	salg_family;
+	__u8	salg_type[14];
+	__u32	salg_feat;
+	__u32	salg_mask;
+	__u8	salg_name[64];
+};
+
+struct af_alg_iv {
+	__u32	ivlen;
+	__u8	iv[0];
+};
+
+/* Socket options */
+#define ALG_SET_KEY			1
+#define ALG_SET_IV			2
+#define ALG_SET_OP			3
+
+/* Operations */
+#define ALG_OP_DECRYPT			0
+#define ALG_OP_ENCRYPT			1
+
+#endif	/* _LINUX_IF_ALG_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index f7e73c3..dd3f201 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -103,7 +103,7 @@
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
-typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
+typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
 
 #endif
diff --git a/include/linux/input.h b/include/linux/input.h
index f7a6e19..e428382 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -112,6 +112,7 @@
 #define EVIOCGNAME(len)		_IOC(_IOC_READ, 'E', 0x06, len)		/* get device name */
 #define EVIOCGPHYS(len)		_IOC(_IOC_READ, 'E', 0x07, len)		/* get physical location */
 #define EVIOCGUNIQ(len)		_IOC(_IOC_READ, 'E', 0x08, len)		/* get unique identifier */
+#define EVIOCGPROP(len)		_IOC(_IOC_READ, 'E', 0x09, len)		/* get device properties */
 
 #define EVIOCGKEY(len)		_IOC(_IOC_READ, 'E', 0x18, len)		/* get global key state */
 #define EVIOCGLED(len)		_IOC(_IOC_READ, 'E', 0x19, len)		/* get all LEDs */
@@ -129,6 +130,18 @@
 #define EVIOCGRAB		_IOW('E', 0x90, int)			/* Grab/Release device */
 
 /*
+ * Device properties and quirks
+ */
+
+#define INPUT_PROP_POINTER		0x00	/* needs a pointer */
+#define INPUT_PROP_DIRECT		0x01	/* direct input devices */
+#define INPUT_PROP_BUTTONPAD		0x02	/* has button(s) under pad */
+#define INPUT_PROP_SEMI_MT		0x03	/* touch rectangle only */
+
+#define INPUT_PROP_MAX			0x1f
+#define INPUT_PROP_CNT			(INPUT_PROP_MAX + 1)
+
+/*
  * Event types
  */
 
@@ -760,11 +773,12 @@
 #define ABS_MT_BLOB_ID		0x38	/* Group a set of packets as a blob */
 #define ABS_MT_TRACKING_ID	0x39	/* Unique ID of initiated contact */
 #define ABS_MT_PRESSURE		0x3a	/* Pressure on contact area */
+#define ABS_MT_DISTANCE		0x3b	/* Contact hover distance */
 
 #ifdef __KERNEL__
 /* Implementation details, userspace should not care about these */
 #define ABS_MT_FIRST		ABS_MT_TOUCH_MAJOR
-#define ABS_MT_LAST		ABS_MT_PRESSURE
+#define ABS_MT_LAST		ABS_MT_DISTANCE
 #endif
 
 #define ABS_MAX			0x3f
@@ -788,6 +802,7 @@
 #define SW_CAMERA_LENS_COVER	0x09  /* set = lens covered */
 #define SW_KEYPAD_SLIDE		0x0a  /* set = keypad slide out */
 #define SW_FRONT_PROXIMITY	0x0b  /* set = front proximity sensor active */
+#define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_MAX			0x0f
 #define SW_CNT			(SW_MAX+1)
 
@@ -875,6 +890,7 @@
  */
 #define MT_TOOL_FINGER		0
 #define MT_TOOL_PEN		1
+#define MT_TOOL_MAX		1
 
 /*
  * Values describing the status of a force-feedback effect
@@ -1110,19 +1126,12 @@
 #include <linux/mod_devicetable.h>
 
 /**
- * struct input_mt_slot - represents the state of an input MT slot
- * @abs: holds current values of ABS_MT axes for this slot
- */
-struct input_mt_slot {
-	int abs[ABS_MT_LAST - ABS_MT_FIRST + 1];
-};
-
-/**
  * struct input_dev - represents an input device
  * @name: name of the device
  * @phys: physical path to the device in the system hierarchy
  * @uniq: unique identification code for the device (if device has it)
  * @id: id of the device (struct input_id)
+ * @propbit: bitmap of device properties and quirks
  * @evbit: bitmap of types of events supported by the device (EV_KEY,
  *	EV_REL, etc.)
  * @keybit: bitmap of keys/buttons this device has
@@ -1157,6 +1166,7 @@
  *	of tracked contacts
  * @mtsize: number of MT slots the device uses
  * @slot: MT slot currently being transmitted
+ * @trkid: stores MT tracking ID for the current contact
  * @absinfo: array of &struct input_absinfo elements holding information
  *	about absolute axes (current value, min, max, flat, fuzz,
  *	resolution)
@@ -1205,6 +1215,8 @@
 	const char *uniq;
 	struct input_id id;
 
+	unsigned long propbit[BITS_TO_LONGS(INPUT_PROP_CNT)];
+
 	unsigned long evbit[BITS_TO_LONGS(EV_CNT)];
 	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
 	unsigned long relbit[BITS_TO_LONGS(REL_CNT)];
@@ -1241,6 +1253,7 @@
 	struct input_mt_slot *mt;
 	int mtsize;
 	int slot;
+	int trkid;
 
 	struct input_absinfo *absinfo;
 
@@ -1490,11 +1503,6 @@
 	input_event(dev, EV_SYN, SYN_MT_REPORT, 0);
 }
 
-static inline void input_mt_slot(struct input_dev *dev, int slot)
-{
-	input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
-}
-
 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code);
 
 /**
@@ -1607,8 +1615,5 @@
 int input_ff_create_memless(struct input_dev *dev, void *data,
 		int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
 
-int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots);
-void input_mt_destroy_slots(struct input_dev *dev);
-
 #endif
 #endif
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
new file mode 100644
index 0000000..1affd0d
--- /dev/null
+++ b/include/linux/input/as5011.h
@@ -0,0 +1,20 @@
+#ifndef _AS5011_H
+#define _AS5011_H
+
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct as5011_platform_data {
+	unsigned int button_gpio;
+	unsigned int axis_irq; /* irq number */
+	unsigned long axis_irqflags;
+	char xp, xn; /* threshold for x axis */
+	char yp, yn; /* threshold for y axis */
+};
+
+#endif /* _AS5011_H */
diff --git a/include/linux/input/cma3000.h b/include/linux/input/cma3000.h
new file mode 100644
index 0000000..cbbaac2
--- /dev/null
+++ b/include/linux/input/cma3000.h
@@ -0,0 +1,59 @@
+/*
+ * VTI CMA3000_Dxx Accelerometer driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_CMA3000_H
+#define _LINUX_CMA3000_H
+
+#define CMAMODE_DEFAULT    0
+#define CMAMODE_MEAS100    1
+#define CMAMODE_MEAS400    2
+#define CMAMODE_MEAS40     3
+#define CMAMODE_MOTDET     4
+#define CMAMODE_FF100      5
+#define CMAMODE_FF400      6
+#define CMAMODE_POFF       7
+
+#define CMARANGE_2G   2000
+#define CMARANGE_8G   8000
+
+/**
+ * struct cma3000_i2c_platform_data - CMA3000 Platform data
+ * @fuzz_x: Noise on X Axis
+ * @fuzz_y: Noise on Y Axis
+ * @fuzz_z: Noise on Z Axis
+ * @g_range: G range in milli g i.e 2000 or 8000
+ * @mode: Operating mode
+ * @mdthr: Motion detect threshold value
+ * @mdfftmr: Motion detect and free fall time value
+ * @ffthr: Free fall threshold value
+ */
+
+struct cma3000_platform_data {
+	int fuzz_x;
+	int fuzz_y;
+	int fuzz_z;
+	int g_range;
+	uint8_t mode;
+	uint8_t mdthr;
+	uint8_t mdfftmr;
+	uint8_t ffthr;
+	unsigned long irqflags;
+};
+
+#endif
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 80352ad..6974746 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -9,7 +9,7 @@
 
 #define KEY(row, col, val)	((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
 				 (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
-				 (val & 0xffff))
+				 ((val) & 0xffff))
 
 #define KEY_ROW(k)		(((k) >> 24) & 0xff)
 #define KEY_COL(k)		(((k) >> 16) & 0xff)
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
new file mode 100644
index 0000000..b3ac06a
--- /dev/null
+++ b/include/linux/input/mt.h
@@ -0,0 +1,57 @@
+#ifndef _INPUT_MT_H
+#define _INPUT_MT_H
+
+/*
+ * Input Multitouch Library
+ *
+ * Copyright (c) 2010 Henrik Rydberg
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/input.h>
+
+#define TRKID_MAX	0xffff
+
+/**
+ * struct input_mt_slot - represents the state of an input MT slot
+ * @abs: holds current values of ABS_MT axes for this slot
+ */
+struct input_mt_slot {
+	int abs[ABS_MT_LAST - ABS_MT_FIRST + 1];
+};
+
+static inline void input_mt_set_value(struct input_mt_slot *slot,
+				      unsigned code, int value)
+{
+	slot->abs[code - ABS_MT_FIRST] = value;
+}
+
+static inline int input_mt_get_value(const struct input_mt_slot *slot,
+				     unsigned code)
+{
+	return slot->abs[code - ABS_MT_FIRST];
+}
+
+int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots);
+void input_mt_destroy_slots(struct input_dev *dev);
+
+static inline int input_mt_new_trkid(struct input_dev *dev)
+{
+	return dev->trkid++ & TRKID_MAX;
+}
+
+static inline void input_mt_slot(struct input_dev *dev, int slot)
+{
+	input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
+}
+
+void input_mt_report_slot_state(struct input_dev *dev,
+				unsigned int tool_type, bool active);
+
+void input_mt_report_finger_count(struct input_dev *dev, int count);
+void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
+
+#endif
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
deleted file mode 100644
index 1d19ab2..0000000
--- a/include/linux/intel-gtt.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Common Intel AGPGART and GTT definitions.
- */
-#ifndef _INTEL_GTT_H
-#define _INTEL_GTT_H
-
-#include <linux/agp_backend.h>
-
-/* This is for Intel only GTT controls.
- *
- * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
- */
-
-#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
-#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
-
-/* flag for GFDT type */
-#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
-
-#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 65aae34..045f2f2 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -454,6 +454,44 @@
 /* Validate that the given IPMI address is valid. */
 int ipmi_validate_addr(struct ipmi_addr *addr, int len);
 
+/*
+ * How did the IPMI driver find out about the device?
+ */
+enum ipmi_addr_src {
+	SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+	SI_PCI,	SI_DEVICETREE, SI_DEFAULT
+};
+
+union ipmi_smi_info_union {
+	/*
+	 * the acpi_info element is defined for the SI_ACPI
+	 * address type
+	 */
+	struct {
+		void *acpi_handle;
+	} acpi_info;
+};
+
+struct ipmi_smi_info {
+	enum ipmi_addr_src addr_src;
+
+	/*
+	 * Base device for the interface.  Don't forget to put this when
+	 * you are done.
+	 */
+	struct device *dev;
+
+	/*
+	 * The addr_info provides more detailed info for some IPMI
+	 * devices, depending on the addr_src.  Currently only SI_ACPI
+	 * info is provided.
+	 */
+	union ipmi_smi_info_union addr_info;
+};
+
+/* This is to get the private info of ipmi_smi_t */
+extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+
 #endif /* __KERNEL__ */
 
 
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 4b48318..906590a 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/ipmi.h>
 
 /* This files describes the interface for IPMI system management interface
    drivers to bind into the IPMI message handler. */
@@ -86,6 +87,13 @@
 	int (*start_processing)(void       *send_info,
 				ipmi_smi_t new_intf);
 
+	/*
+	 * Get the detailed private info of the low level interface and store
+	 * it into the structure of ipmi_smi_data. For example: the
+	 * ACPI device handle will be returned for the pnp_acpi IPMI device.
+	 */
+	int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
+
 	/* Called to enqueue an SMI message to be sent.  This
 	   operation is not allowed to fail.  If an error occurs, it
 	   should report back the error in a received message.  It may
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 979c68c..6a64c6f 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -57,7 +57,7 @@
 #endif
 
 	struct timer_rand_state *timer_rand_state;
-	unsigned int		*kstat_irqs;
+	unsigned int __percpu	*kstat_irqs;
 	irq_flow_handler_t	handle_irq;
 	struct irqaction	*action;	/* IRQ action list */
 	unsigned int		status;		/* IRQ status */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 2ae86aa..27e79c2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -94,7 +94,7 @@
  *
  * This is an opaque datatype.
  **/
-typedef struct handle_s		handle_t;	/* Atomic operation type */
+typedef struct jbd2_journal_handle handle_t;	/* Atomic operation type */
 
 
 /**
@@ -416,7 +416,7 @@
  * in so it can be fixed later.
  */
 
-struct handle_s
+struct jbd2_journal_handle
 {
 	/* Which compound transaction is this update a part of? */
 	transaction_t		*h_transaction;
@@ -1158,6 +1158,22 @@
 	kmem_cache_free(jbd2_handle_cache, handle);
 }
 
+/*
+ * jbd2_inode management (optional, for those file systems that want to use
+ * dynamically allocated jbd2_inode structures)
+ */
+extern struct kmem_cache *jbd2_inode_cache;
+
+static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
+{
+	return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
+}
+
+static inline void jbd2_free_inode(struct jbd2_inode *jinode)
+{
+	kmem_cache_free(jbd2_inode_cache, jinode);
+}
+
 /* Primary revoke support */
 #define JOURNAL_REVOKE_DEFAULT_HASH 256
 extern int	   jbd2_journal_init_revoke(journal_t *, int);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index b6de9a6..5a9d905 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -56,6 +56,8 @@
 
 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
 #define roundup(x, y) (					\
 {							\
 	const typeof(y) __y = y;			\
@@ -141,9 +143,22 @@
 
 #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
 
-#define abs(x) ({				\
-		long __x = (x);			\
-		(__x < 0) ? -__x : __x;		\
+/*
+ * abs() handles unsigned and signed longs, ints, shorts and chars.  For all
+ * input types abs() returns a signed long.
+ * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
+ * for those.
+ */
+#define abs(x) ({						\
+		long ret;					\
+		if (sizeof(x) == sizeof(long)) {		\
+			long __x = (x);				\
+			ret = (__x < 0) ? -__x : __x;		\
+		} else {					\
+			int __x = (x);				\
+			ret = (__x < 0) ? -__x : __x;		\
+		}						\
+		ret;						\
 	})
 
 #define abs64(x) ({				\
@@ -263,6 +278,7 @@
 }
 
 extern int hex_to_bin(char ch);
+extern void hex2bin(u8 *dst, const char *src, size_t count);
 
 /*
  * General tracing related utility functions - trace_printk(),
@@ -584,6 +600,13 @@
 #define NUMA_BUILD 0
 #endif
 
+/* This helps us avoid #ifdef CONFIG_COMPACTION */
+#ifdef CONFIG_COMPACTION
+#define COMPACTION_BUILD 1
+#else
+#define COMPACTION_BUILD 0
+#endif
+
 /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
 # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ad54c84..0cce2db 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -46,16 +46,14 @@
 extern unsigned long long nr_context_switches(void);
 
 #ifndef CONFIG_GENERIC_HARDIRQS
-#define kstat_irqs_this_cpu(irq) \
-	(kstat_this_cpu.irqs[irq])
 
 struct irq_desc;
 
 static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
 					    struct irq_desc *desc)
 {
-	kstat_this_cpu.irqs[irq]++;
-	kstat_this_cpu.irqs_sum++;
+	__this_cpu_inc(kstat.irqs[irq]);
+	__this_cpu_inc(kstat.irqs_sum);
 }
 
 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
@@ -65,17 +63,18 @@
 #else
 #include <linux/irq.h>
 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
-#define kstat_irqs_this_cpu(DESC) \
-	((DESC)->kstat_irqs[smp_processor_id()])
-#define kstat_incr_irqs_this_cpu(irqno, DESC) do {\
-	((DESC)->kstat_irqs[smp_processor_id()]++);\
-	kstat_this_cpu.irqs_sum++; } while (0)
+
+#define kstat_incr_irqs_this_cpu(irqno, DESC)		\
+do {							\
+	__this_cpu_inc(*(DESC)->kstat_irqs);		\
+	__this_cpu_inc(kstat.irqs_sum);			\
+} while (0)
 
 #endif
 
 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
 {
-	kstat_this_cpu.softirqs[irq]++;
+	__this_cpu_inc(kstat.softirqs[irq]);
 }
 
 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
new file mode 100644
index 0000000..6b394f0
--- /dev/null
+++ b/include/linux/khugepaged.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_KHUGEPAGED_H
+#define _LINUX_KHUGEPAGED_H
+
+#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_exit(struct mm_struct *mm);
+extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma);
+
+#define khugepaged_enabled()					       \
+	(transparent_hugepage_flags &				       \
+	 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |		       \
+	  (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define khugepaged_always()				\
+	(transparent_hugepage_flags &			\
+	 (1<<TRANSPARENT_HUGEPAGE_FLAG))
+#define khugepaged_req_madv()					\
+	(transparent_hugepage_flags &				\
+	 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
+#define khugepaged_defrag()					\
+	(transparent_hugepage_flags &				\
+	 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
+
+static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+	if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
+		return __khugepaged_enter(mm);
+	return 0;
+}
+
+static inline void khugepaged_exit(struct mm_struct *mm)
+{
+	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+		__khugepaged_exit(mm);
+}
+
+static inline int khugepaged_enter(struct vm_area_struct *vma)
+{
+	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
+		if ((khugepaged_always() ||
+		     (khugepaged_req_madv() &&
+		      vma->vm_flags & VM_HUGEPAGE)) &&
+		    !(vma->vm_flags & VM_NOHUGEPAGE))
+			if (__khugepaged_enter(vma->vm_mm))
+				return -ENOMEM;
+	return 0;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+	return 0;
+}
+static inline void khugepaged_exit(struct mm_struct *mm)
+{
+}
+static inline int khugepaged_enter(struct vm_area_struct *vma)
+{
+	return 0;
+}
+static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+{
+	return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 24b4414..2a0d7d6 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -18,6 +18,10 @@
 	KMSG_DUMP_OOPS,
 	KMSG_DUMP_PANIC,
 	KMSG_DUMP_KEXEC,
+	KMSG_DUMP_RESTART,
+	KMSG_DUMP_HALT,
+	KMSG_DUMP_POWEROFF,
+	KMSG_DUMP_EMERG,
 };
 
 /**
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index b78edb5..dd7c12e 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -305,12 +305,12 @@
 /* kprobe_running() will just return the current_kprobe on this CPU */
 static inline struct kprobe *kprobe_running(void)
 {
-	return (__get_cpu_var(current_kprobe));
+	return (__this_cpu_read(current_kprobe));
 }
 
 static inline void reset_current_kprobe(void)
 {
-	__get_cpu_var(current_kprobe) = NULL;
+	__this_cpu_write(current_kprobe, NULL);
 }
 
 static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 6cc38fc..d4a62ab2e 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -24,5 +24,7 @@
 void kref_init(struct kref *kref);
 void kref_get(struct kref *kref);
 int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+int kref_sub(struct kref *kref, unsigned int count,
+	     void (*release) (struct kref *kref));
 
 #endif /* _KREF_H_ */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 919ae53..ea2dc1a 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -540,6 +540,7 @@
 #endif
 #define KVM_CAP_PPC_GET_PVINFO 57
 #define KVM_CAP_PPC_IRQ_LEVEL 58
+#define KVM_CAP_ASYNC_PF 59
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a055742..b5021db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -16,6 +16,8 @@
 #include <linux/mm.h>
 #include <linux/preempt.h>
 #include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -40,6 +42,7 @@
 #define KVM_REQ_KICK               9
 #define KVM_REQ_DEACTIVATE_FPU    10
 #define KVM_REQ_EVENT             11
+#define KVM_REQ_APF_HALT          12
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
 
@@ -74,6 +77,27 @@
 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 			      struct kvm_io_device *dev);
 
+#ifdef CONFIG_KVM_ASYNC_PF
+struct kvm_async_pf {
+	struct work_struct work;
+	struct list_head link;
+	struct list_head queue;
+	struct kvm_vcpu *vcpu;
+	struct mm_struct *mm;
+	gva_t gva;
+	unsigned long addr;
+	struct kvm_arch_async_pf arch;
+	struct page *page;
+	bool done;
+};
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+		       struct kvm_arch_async_pf *arch);
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
+#endif
+
 struct kvm_vcpu {
 	struct kvm *kvm;
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -104,6 +128,15 @@
 	gpa_t mmio_phys_addr;
 #endif
 
+#ifdef CONFIG_KVM_ASYNC_PF
+	struct {
+		u32 queued;
+		struct list_head queue;
+		struct list_head done;
+		spinlock_t lock;
+	} async_pf;
+#endif
+
 	struct kvm_vcpu_arch arch;
 };
 
@@ -113,16 +146,19 @@
  */
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
+struct kvm_lpage_info {
+	unsigned long rmap_pde;
+	int write_count;
+};
+
 struct kvm_memory_slot {
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long flags;
 	unsigned long *rmap;
 	unsigned long *dirty_bitmap;
-	struct {
-		unsigned long rmap_pde;
-		int write_count;
-	} *lpage_info[KVM_NR_PAGE_SIZES - 1];
+	unsigned long *dirty_bitmap_head;
+	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 	unsigned long userspace_addr;
 	int user_alloc;
 	int id;
@@ -169,6 +205,7 @@
 
 struct kvm_memslots {
 	int nmemslots;
+	u64 generation;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
 };
@@ -206,6 +243,10 @@
 
 	struct mutex irq_lock;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
+	/*
+	 * Update side is protected by irq_lock and,
+	 * if configured, irqfds.lock.
+	 */
 	struct kvm_irq_routing_table __rcu *irq_routing;
 	struct hlist_head mask_notifier_list;
 	struct hlist_head irq_ack_notifier_list;
@@ -216,6 +257,7 @@
 	unsigned long mmu_notifier_seq;
 	long mmu_notifier_count;
 #endif
+	long tlbs_dirty;
 };
 
 /* The guest did something we don't support. */
@@ -302,7 +344,11 @@
 
 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+		       bool write_fault, bool *writable);
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+		      bool *writable);
 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 			 struct kvm_memory_slot *slot, gfn_t gfn);
 int memslot_id(struct kvm *kvm, gfn_t gfn);
@@ -321,18 +367,25 @@
 			 int offset, int len);
 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 		    unsigned long len);
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			   void *data, unsigned long len);
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			      gpa_t gpa);
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			     gfn_t gfn);
 
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+
 void kvm_flush_remote_tlbs(struct kvm *kvm);
 void kvm_reload_remote_mmus(struct kvm *kvm);
 
@@ -398,7 +451,19 @@
 
 void kvm_free_physmem(struct kvm *kvm);
 
-struct  kvm *kvm_arch_create_vm(void);
+#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+static inline struct kvm *kvm_arch_alloc_vm(void)
+{
+	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+}
+
+static inline void kvm_arch_free_vm(struct kvm *kvm)
+{
+	kfree(kvm);
+}
+#endif
+
+int kvm_arch_init_vm(struct kvm *kvm);
 void kvm_arch_destroy_vm(struct kvm *kvm);
 void kvm_free_all_assigned_devices(struct kvm *kvm);
 void kvm_arch_sync_events(struct kvm *kvm);
@@ -414,16 +479,8 @@
 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 };
 
-#define KVM_ASSIGNED_MSIX_PENDING		0x1
-struct kvm_guest_msix_entry {
-	u32 vector;
-	u16 entry;
-	u16 flags;
-};
-
 struct kvm_assigned_dev_kernel {
 	struct kvm_irq_ack_notifier ack_notifier;
-	struct work_struct interrupt_work;
 	struct list_head list;
 	int assigned_dev_id;
 	int host_segnr;
@@ -434,13 +491,14 @@
 	bool host_irq_disabled;
 	struct msix_entry *host_msix_entries;
 	int guest_irq;
-	struct kvm_guest_msix_entry *guest_msix_entries;
+	struct msix_entry *guest_msix_entries;
 	unsigned long irq_requested_type;
 	int irq_source_id;
 	int flags;
 	struct pci_dev *dev;
 	struct kvm *kvm;
-	spinlock_t assigned_dev_lock;
+	spinlock_t intx_lock;
+	char irq_name[32];
 };
 
 struct kvm_irq_mask_notifier {
@@ -462,6 +520,8 @@
 				   unsigned long *deliver_bitmask);
 #endif
 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
+		int irq_source_id, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 				   struct kvm_irq_ack_notifier *kian);
@@ -603,17 +663,28 @@
 void kvm_eventfd_init(struct kvm *kvm);
 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
 void kvm_irqfd_release(struct kvm *kvm);
+void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 #else
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
+
 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
 {
 	return -EINVAL;
 }
 
 static inline void kvm_irqfd_release(struct kvm *kvm) {}
+
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+static inline void kvm_irq_routing_update(struct kvm *kvm,
+					  struct kvm_irq_routing_table *irq_rt)
+{
+	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+}
+#endif
+
 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
 	return -ENOSYS;
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 7ac0d4e..fa7cc72 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -67,4 +67,11 @@
 	u32 dest_id;
 };
 
+struct gfn_to_hva_cache {
+	u64 generation;
+	gpa_t gpa;
+	unsigned long hva;
+	struct kvm_memory_slot *memslot;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
index 38368d7..fd548d2 100644
--- a/include/linux/leds-lp5521.h
+++ b/include/linux/leds-lp5521.h
@@ -42,6 +42,7 @@
 	int	(*setup_resources)(void);
 	void	(*release_resources)(void);
 	void	(*enable)(bool state);
+	const char *label;
 };
 
 #endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
index 7967476..2694289 100644
--- a/include/linux/leds-lp5523.h
+++ b/include/linux/leds-lp5523.h
@@ -42,6 +42,7 @@
 	int	(*setup_resources)(void);
 	void	(*release_resources)(void);
 	void	(*enable)(bool state);
+	const	char *label;
 };
 
 #endif /* __LINUX_LP5523_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index d947b12..c9c5d7a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -996,8 +996,7 @@
 extern int ata_sas_port_start(struct ata_port *ap);
 extern void ata_sas_port_stop(struct ata_port *ap);
 extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
-extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
-			    struct ata_port *ap);
+extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
 extern int sata_scr_valid(struct ata_link *link);
 extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
 extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@@ -1040,8 +1039,7 @@
 					struct ata_taskfile *tf, u16 *id);
 extern void ata_qc_complete(struct ata_queued_cmd *qc);
 extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
-extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
-			      void (*done)(struct scsi_cmnd *));
+extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
 extern int ata_std_bios_param(struct scsi_device *sdev,
 			      struct block_device *bdev,
 			      sector_t capacity, int geom[]);
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
new file mode 100644
index 0000000..5bad17d
--- /dev/null
+++ b/include/linux/list_bl.h
@@ -0,0 +1,145 @@
+#ifndef _LINUX_LIST_BL_H
+#define _LINUX_LIST_BL_H
+
+#include <linux/list.h>
+
+/*
+ * Special version of lists, where head of the list has a lock in the lowest
+ * bit. This is useful for scalable hash tables without increasing memory
+ * footprint overhead.
+ *
+ * For modification operations, the 0 bit of hlist_bl_head->first
+ * pointer must be set.
+ *
+ * With some small modifications, this can easily be adapted to store several
+ * arbitrary bits (not just a single lock bit), if the need arises to store
+ * some fast and compact auxiliary data.
+ */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+#define LIST_BL_LOCKMASK	1UL
+#else
+#define LIST_BL_LOCKMASK	0UL
+#endif
+
+#ifdef CONFIG_DEBUG_LIST
+#define LIST_BL_BUG_ON(x) BUG_ON(x)
+#else
+#define LIST_BL_BUG_ON(x)
+#endif
+
+
+struct hlist_bl_head {
+	struct hlist_bl_node *first;
+};
+
+struct hlist_bl_node {
+	struct hlist_bl_node *next, **pprev;
+};
+#define INIT_HLIST_BL_HEAD(ptr) \
+	((ptr)->first = NULL)
+
+static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
+{
+	h->next = NULL;
+	h->pprev = NULL;
+}
+
+#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
+
+static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
+{
+	return !h->pprev;
+}
+
+static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
+{
+	return (struct hlist_bl_node *)
+		((unsigned long)h->first & ~LIST_BL_LOCKMASK);
+}
+
+static inline void hlist_bl_set_first(struct hlist_bl_head *h,
+					struct hlist_bl_node *n)
+{
+	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
+	LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
+							LIST_BL_LOCKMASK);
+	h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
+}
+
+static inline int hlist_bl_empty(const struct hlist_bl_head *h)
+{
+	return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
+}
+
+static inline void hlist_bl_add_head(struct hlist_bl_node *n,
+					struct hlist_bl_head *h)
+{
+	struct hlist_bl_node *first = hlist_bl_first(h);
+
+	n->next = first;
+	if (first)
+		first->pprev = &n->next;
+	n->pprev = &h->first;
+	hlist_bl_set_first(h, n);
+}
+
+static inline void __hlist_bl_del(struct hlist_bl_node *n)
+{
+	struct hlist_bl_node *next = n->next;
+	struct hlist_bl_node **pprev = n->pprev;
+
+	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
+
+	/* pprev may be `first`, so be careful not to lose the lock bit */
+	*pprev = (struct hlist_bl_node *)
+			((unsigned long)next |
+			 ((unsigned long)*pprev & LIST_BL_LOCKMASK));
+	if (next)
+		next->pprev = pprev;
+}
+
+static inline void hlist_bl_del(struct hlist_bl_node *n)
+{
+	__hlist_bl_del(n);
+	n->next = LIST_POISON1;
+	n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_bl_del_init(struct hlist_bl_node *n)
+{
+	if (!hlist_bl_unhashed(n)) {
+		__hlist_bl_del(n);
+		INIT_HLIST_BL_NODE(n);
+	}
+}
+
+/**
+ * hlist_bl_for_each_entry	- iterate over list of given type
+ * @tpos:	the type * to use as a loop cursor.
+ * @pos:	the &struct hlist_node to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_node within the struct.
+ *
+ */
+#define hlist_bl_for_each_entry(tpos, pos, head, member)		\
+	for (pos = hlist_bl_first(head);				\
+	     pos &&							\
+		({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
+	     pos = pos->next)
+
+/**
+ * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @tpos:	the type * to use as a loop cursor.
+ * @pos:	the &struct hlist_node to use as a loop cursor.
+ * @n:		another &struct hlist_node to use as temporary storage
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_node within the struct.
+ */
+#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member)	 \
+	for (pos = hlist_bl_first(head);				 \
+	     pos && ({ n = pos->next; 1; }) && 				 \
+		({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
+	     pos = n)
+
+#endif
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 34b2b7f..257d377 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -44,14 +44,4 @@
 #define NLMDBG_XDR		0x0100
 #define NLMDBG_ALL		0x7fff
 
-
-/*
- * Support for printing NLM cookies in dprintk()
- */
-#ifdef RPC_DEBUG
-struct nlm_cookie;
-/* Call this function with the BKL held (it uses a static buffer) */
-extern const char *nlmdbg_cookie2a(const struct nlm_cookie *);
-#endif
-
 #endif /* LINUX_LOCKD_DEBUG_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 2dee05e..ff9abff 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -202,9 +202,9 @@
  * Lockd client functions
  */
 struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
-void		  nlm_release_call(struct nlm_rqst *);
 int		  nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
 int		  nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
+void		  nlmclnt_release_call(struct nlm_rqst *);
 struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
 void		  nlmclnt_finish_block(struct nlm_wait *block);
 int		  nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
@@ -223,13 +223,14 @@
 					const u32 version,
 					const char *hostname,
 					int noresvport);
+void		  nlmclnt_release_host(struct nlm_host *);
 struct nlm_host  *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
 					const char *hostname,
 					const size_t hostname_len);
+void		  nlmsvc_release_host(struct nlm_host *);
 struct rpc_clnt * nlm_bind_host(struct nlm_host *);
 void		  nlm_rebind_host(struct nlm_host *);
 struct nlm_host * nlm_get_host(struct nlm_host *);
-void		  nlm_release_host(struct nlm_host *);
 void		  nlm_shutdown_hosts(void);
 void		  nlm_host_rebooted(const struct nlm_reboot *);
 
@@ -267,6 +268,7 @@
 void		  nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
 					nlm_host_match_fn_t match);
 void		  nlmsvc_grant_reply(struct nlm_cookie *, __be32);
+void		  nlmsvc_release_call(struct nlm_rqst *);
 
 /*
  * File handling for the server personality
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 54cbbac..5525d37 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -18,6 +18,17 @@
 	} e_index;
 };
 
+struct mb_cache {
+	struct list_head		c_cache_list;
+	const char			*c_name;
+	atomic_t			c_entry_count;
+	int				c_max_entries;
+	int				c_bucket_bits;
+	struct kmem_cache		*c_entry_cache;
+	struct list_head		*c_block_hash;
+	struct list_head		*c_index_hash;
+};
+
 /* Functions on caches */
 
 struct mb_cache *mb_cache_create(const char *, int);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 159a076..6a576f9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -25,6 +25,11 @@
 struct page;
 struct mm_struct;
 
+/* Stats that can be updated by kernel. */
+enum mem_cgroup_page_stat_item {
+	MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+};
+
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
 					struct list_head *dst,
 					unsigned long *scanned, int order,
@@ -93,7 +98,7 @@
 mem_cgroup_prepare_migration(struct page *page,
 	struct page *newpage, struct mem_cgroup **ptr);
 extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
-	struct page *oldpage, struct page *newpage);
+	struct page *oldpage, struct page *newpage, bool migration_ok);
 
 /*
  * For memory reclaim.
@@ -121,7 +126,22 @@
 	return false;
 }
 
-void mem_cgroup_update_file_mapped(struct page *page, int val);
+void mem_cgroup_update_page_stat(struct page *page,
+				 enum mem_cgroup_page_stat_item idx,
+				 int val);
+
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
+{
+	mem_cgroup_update_page_stat(page, idx, 1);
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
+{
+	mem_cgroup_update_page_stat(page, idx, -1);
+}
+
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
 						gfp_t gfp_mask);
 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
@@ -231,8 +251,7 @@
 }
 
 static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
-					struct page *oldpage,
-					struct page *newpage)
+		struct page *oldpage, struct page *newpage, bool migration_ok)
 {
 }
 
@@ -293,8 +312,13 @@
 {
 }
 
-static inline void mem_cgroup_update_file_mapped(struct page *page,
-							int val)
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
+{
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+					    enum mem_cgroup_page_stat_item idx)
 {
 }
 
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 31c237a..24376fe 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -13,12 +13,16 @@
 #ifdef CONFIG_MEMORY_HOTPLUG
 
 /*
- * Types for free bootmem.
- * The normal smallest mapcount is -1. Here is smaller value than it.
+ * Types for free bootmem stored in page->lru.next. These have to be in
+ * some random range in unsigned long space for debugging purposes.
  */
-#define SECTION_INFO		(-1 - 1)
-#define MIX_SECTION_INFO	(-1 - 2)
-#define NODE_INFO		(-1 - 3)
+enum {
+	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+	MIX_SECTION_INFO,
+	NODE_INFO,
+	MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
 
 /*
  * pgdat resizing functions
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
index d63b605..37f56b7 100644
--- a/include/linux/mfd/ab8500.h
+++ b/include/linux/mfd/ab8500.h
@@ -74,32 +74,37 @@
 #define AB8500_INT_ACC_DETECT_21DB_F	37
 #define AB8500_INT_ACC_DETECT_21DB_R	38
 #define AB8500_INT_GP_SW_ADC_CONV_END	39
-#define AB8500_INT_BTEMP_LOW		72
-#define AB8500_INT_BTEMP_LOW_MEDIUM	73
-#define AB8500_INT_BTEMP_MEDIUM_HIGH	74
-#define AB8500_INT_BTEMP_HIGH		75
-#define AB8500_INT_USB_CHARGER_NOT_OK	81
-#define AB8500_INT_ID_WAKEUP_R		82
-#define AB8500_INT_ID_DET_R1R		84
-#define AB8500_INT_ID_DET_R2R		85
-#define AB8500_INT_ID_DET_R3R		86
-#define AB8500_INT_ID_DET_R4R		87
-#define AB8500_INT_ID_WAKEUP_F		88
-#define AB8500_INT_ID_DET_R1F		90
-#define AB8500_INT_ID_DET_R2F		91
-#define AB8500_INT_ID_DET_R3F		92
-#define AB8500_INT_ID_DET_R4F		93
-#define AB8500_INT_USB_CHG_DET_DONE	94
-#define AB8500_INT_USB_CH_TH_PROT_F	96
-#define AB8500_INT_USB_CH_TH_PROP_R	97
-#define AB8500_INT_MAIN_CH_TH_PROP_F	98
-#define AB8500_INT_MAIN_CH_TH_PROT_R	99
-#define AB8500_INT_USB_CHARGER_NOT_OKF	103
+#define AB8500_INT_ADP_SOURCE_ERROR	72
+#define AB8500_INT_ADP_SINK_ERROR	73
+#define AB8500_INT_ADP_PROBE_PLUG	74
+#define AB8500_INT_ADP_PROBE_UNPLUG	75
+#define AB8500_INT_ADP_SENSE_OFF	76
+#define AB8500_INT_USB_PHY_POWER_ERR	78
+#define AB8500_INT_USB_LINK_STATUS	79
+#define AB8500_INT_BTEMP_LOW		80
+#define AB8500_INT_BTEMP_LOW_MEDIUM	81
+#define AB8500_INT_BTEMP_MEDIUM_HIGH	82
+#define AB8500_INT_BTEMP_HIGH		83
+#define AB8500_INT_USB_CHARGER_NOT_OK	89
+#define AB8500_INT_ID_WAKEUP_R		90
+#define AB8500_INT_ID_DET_R1R		92
+#define AB8500_INT_ID_DET_R2R		93
+#define AB8500_INT_ID_DET_R3R		94
+#define AB8500_INT_ID_DET_R4R		95
+#define AB8500_INT_ID_WAKEUP_F		96
+#define AB8500_INT_ID_DET_R1F		98
+#define AB8500_INT_ID_DET_R2F		99
+#define AB8500_INT_ID_DET_R3F		100
+#define AB8500_INT_ID_DET_R4F		101
+#define AB8500_INT_USB_CHG_DET_DONE	102
+#define AB8500_INT_USB_CH_TH_PROT_F	104
+#define AB8500_INT_USB_CH_TH_PROT_R    105
+#define AB8500_INT_MAIN_CH_TH_PROT_F   106
+#define AB8500_INT_MAIN_CH_TH_PROT_R	107
+#define AB8500_INT_USB_CHARGER_NOT_OKF	111
 
-#define AB8500_NR_IRQS			104
-#define AB8500_NUM_IRQ_REGS		13
-
-#define AB8500_NUM_REGULATORS   15
+#define AB8500_NR_IRQS			112
+#define AB8500_NUM_IRQ_REGS		14
 
 /**
  * struct ab8500 - ab8500 internal structure
@@ -145,7 +150,8 @@
 struct ab8500_platform_data {
 	int irq_base;
 	void (*init) (struct ab8500 *);
-	struct regulator_init_data *regulator[AB8500_NUM_REGULATORS];
+	int num_regulator;
+	struct regulator_init_data *regulator;
 };
 
 extern int __devinit ab8500_init(struct ab8500 *ab8500);
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index cb93d80..835996e 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -39,7 +39,7 @@
 	size_t			data_size;
 
 	/*
-	 * This resources can be specified relatievly to the parent device.
+	 * This resources can be specified relatively to the parent device.
 	 * For accessing device you should use resources from device
 	 */
 	int			num_resources;
@@ -47,6 +47,12 @@
 
 	/* don't check for resource conflicts */
 	bool			ignore_resource_conflicts;
+
+	/*
+	 * Disable runtime PM callbacks for this subdevice - see
+	 * pm_runtime_no_callbacks().
+	 */
+	bool			pm_runtime_no_callbacks;
 };
 
 extern int mfd_add_devices(struct device *parent, int id,
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 7363dea..effa5d3 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -159,10 +159,12 @@
 	u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
 	u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
 	int type;
+	bool wakeup;
 };
 
 int max8998_irq_init(struct max8998_dev *max8998);
 void max8998_irq_exit(struct max8998_dev *max8998);
+int max8998_irq_resume(struct max8998_dev *max8998);
 
 extern int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
 extern int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count,
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index f8c9f88..61daa16 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -70,24 +70,43 @@
  * @num_regulators: number of regultors used
  * @irq_base: base IRQ number for max8998, required for IRQs
  * @ono: power onoff IRQ number for max8998
- * @buck1_max_voltage1: BUCK1 maximum alowed voltage register 1
- * @buck1_max_voltage2: BUCK1 maximum alowed voltage register 2
- * @buck2_max_voltage: BUCK2 maximum alowed voltage
+ * @buck_voltage_lock: Do NOT change the values of the following six
+ *   registers set by buck?_voltage?. The voltage of BUCK1/2 cannot
+ *   be other than the preset values.
+ * @buck1_voltage1: BUCK1 DVS mode 1 voltage register
+ * @buck1_voltage2: BUCK1 DVS mode 2 voltage register
+ * @buck1_voltage3: BUCK1 DVS mode 3 voltage register
+ * @buck1_voltage4: BUCK1 DVS mode 4 voltage register
+ * @buck2_voltage1: BUCK2 DVS mode 1 voltage register
+ * @buck2_voltage2: BUCK2 DVS mode 2 voltage register
  * @buck1_set1: BUCK1 gpio pin 1 to set output voltage
  * @buck1_set2: BUCK1 gpio pin 2 to set output voltage
+ * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2
  * @buck2_set3: BUCK2 gpio pin to set output voltage
+ * @buck2_default_idx: Default for BUCK2 gpio pin.
+ * @wakeup: Allow to wake up from suspend
+ * @rtc_delay: LP3974 RTC chip bug that requires delay after a register
+ * write before reading it.
  */
 struct max8998_platform_data {
 	struct max8998_regulator_data	*regulators;
 	int				num_regulators;
 	int				irq_base;
 	int				ono;
-	int                             buck1_max_voltage1;
-	int                             buck1_max_voltage2;
-	int                             buck2_max_voltage;
+	bool				buck_voltage_lock;
+	int				buck1_voltage1;
+	int				buck1_voltage2;
+	int				buck1_voltage3;
+	int				buck1_voltage4;
+	int				buck2_voltage1;
+	int				buck2_voltage2;
 	int				buck1_set1;
 	int				buck1_set2;
+	int				buck1_default_idx;
 	int				buck2_set3;
+	int				buck2_default_idx;
+	bool				wakeup;
+	bool				rtc_delay;
 };
 
 #endif /*  __LINUX_MFD_MAX8998_H */
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b4c741e..7d0f3d6 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
  * Copyright 2009-2010 Pengutronix
  * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
  *
@@ -122,39 +123,39 @@
 		unsigned int channel, unsigned int *sample);
 
 
-#define	MC13783_SW_SW1A		0
-#define	MC13783_SW_SW1B		1
-#define	MC13783_SW_SW2A		2
-#define	MC13783_SW_SW2B		3
-#define	MC13783_SW_SW3		4
-#define	MC13783_SW_PLL		5
-#define	MC13783_REGU_VAUDIO	6
-#define	MC13783_REGU_VIOHI	7
-#define	MC13783_REGU_VIOLO	8
-#define	MC13783_REGU_VDIG	9
-#define	MC13783_REGU_VGEN	10
-#define	MC13783_REGU_VRFDIG	11
-#define	MC13783_REGU_VRFREF	12
-#define	MC13783_REGU_VRFCP	13
-#define	MC13783_REGU_VSIM	14
-#define	MC13783_REGU_VESIM	15
-#define	MC13783_REGU_VCAM	16
-#define	MC13783_REGU_VRFBG	17
-#define	MC13783_REGU_VVIB	18
-#define	MC13783_REGU_VRF1	19
-#define	MC13783_REGU_VRF2	20
-#define	MC13783_REGU_VMMC1	21
-#define	MC13783_REGU_VMMC2	22
-#define	MC13783_REGU_GPO1	23
-#define	MC13783_REGU_GPO2	24
-#define	MC13783_REGU_GPO3	25
-#define	MC13783_REGU_GPO4	26
-#define	MC13783_REGU_V1		27
-#define	MC13783_REGU_V2		28
-#define	MC13783_REGU_V3		29
-#define	MC13783_REGU_V4		30
-#define	MC13783_REGU_PWGT1SPI	31
-#define	MC13783_REGU_PWGT2SPI	32
+#define	MC13783_REG_SW1A		0
+#define	MC13783_REG_SW1B		1
+#define	MC13783_REG_SW2A		2
+#define	MC13783_REG_SW2B		3
+#define	MC13783_REG_SW3		4
+#define	MC13783_REG_PLL		5
+#define	MC13783_REG_VAUDIO	6
+#define	MC13783_REG_VIOHI	7
+#define	MC13783_REG_VIOLO	8
+#define	MC13783_REG_VDIG	9
+#define	MC13783_REG_VGEN	10
+#define	MC13783_REG_VRFDIG	11
+#define	MC13783_REG_VRFREF	12
+#define	MC13783_REG_VRFCP	13
+#define	MC13783_REG_VSIM	14
+#define	MC13783_REG_VESIM	15
+#define	MC13783_REG_VCAM	16
+#define	MC13783_REG_VRFBG	17
+#define	MC13783_REG_VVIB	18
+#define	MC13783_REG_VRF1	19
+#define	MC13783_REG_VRF2	20
+#define	MC13783_REG_VMMC1	21
+#define	MC13783_REG_VMMC2	22
+#define	MC13783_REG_GPO1	23
+#define	MC13783_REG_GPO2	24
+#define	MC13783_REG_GPO3	25
+#define	MC13783_REG_GPO4	26
+#define	MC13783_REG_V1		27
+#define	MC13783_REG_V2		28
+#define	MC13783_REG_V3		29
+#define	MC13783_REG_V4		30
+#define	MC13783_REG_PWGT1SPI	31
+#define	MC13783_REG_PWGT2SPI	32
 
 #define MC13783_IRQ_ADCDONE	MC13XXX_IRQ_ADCDONE
 #define MC13783_IRQ_ADCBISDONE	MC13XXX_IRQ_ADCBISDONE
diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h
new file mode 100644
index 0000000..a00f2be
--- /dev/null
+++ b/include/linux/mfd/mc13892.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MC13892_H
+#define __LINUX_MFD_MC13892_H
+
+#include <linux/mfd/mc13xxx.h>
+
+#define MC13892_SW1		0
+#define MC13892_SW2		1
+#define MC13892_SW3		2
+#define MC13892_SW4		3
+#define MC13892_SWBST	4
+#define MC13892_VIOHI	5
+#define MC13892_VPLL	6
+#define MC13892_VDIG	7
+#define MC13892_VSD	8
+#define MC13892_VUSB2	9
+#define MC13892_VVIDEO	10
+#define MC13892_VAUDIO	11
+#define MC13892_VCAM	12
+#define MC13892_VGEN1	13
+#define MC13892_VGEN2	14
+#define MC13892_VGEN3	15
+#define MC13892_VUSB	16
+#define MC13892_GPO1	17
+#define MC13892_GPO2	18
+#define MC13892_GPO3	19
+#define MC13892_GPO4	20
+#define MC13892_PWGT1SPI	21
+#define MC13892_PWGT2SPI	22
+#define MC13892_VCOINCELL	23
+
+#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 085f041..8e70310 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -57,6 +57,10 @@
  * is configured in 4-bit mode.
  */
 #define TMIO_MMC_BLKSZ_2BYTES		(1 << 1)
+/*
+ * Some controllers can support SDIO IRQ signalling.
+ */
+#define TMIO_MMC_SDIO_IRQ		(1 << 2)
 
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -66,6 +70,7 @@
 struct tmio_mmc_dma {
 	void *chan_priv_tx;
 	void *chan_priv_rx;
+	int alignment_shift;
 };
 
 /*
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index a1239c4..903280d 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -245,6 +245,7 @@
 	WM8320 = 0x8320,
 	WM8321 = 0x8321,
 	WM8325 = 0x8325,
+	WM8326 = 0x8326,
 };
 
 struct wm831x {
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index de79bae..3fd3684 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -17,6 +17,11 @@
 
 #include <linux/interrupt.h>
 
+enum wm8994_type {
+	WM8994 = 0,
+	WM8958 = 1,
+};
+
 struct regulator_dev;
 struct regulator_bulk_data;
 
@@ -48,6 +53,8 @@
 	struct mutex io_lock;
 	struct mutex irq_lock;
 
+	enum wm8994_type type;
+
 	struct device *dev;
 	int (*read_dev)(struct wm8994 *wm8994, unsigned short reg,
 			int bytes, void *dest);
@@ -68,6 +75,7 @@
 	u16 gpio_regs[WM8994_NUM_GPIO_REGS];
 
 	struct regulator_dev *dbvdd;
+	int num_supplies;
 	struct regulator_bulk_data *supplies;
 };
 
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index add8a1b..9eab263 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -30,6 +30,8 @@
 
 #define WM8994_DRC_REGS 5
 #define WM8994_EQ_REGS  20
+#define WM8958_MBC_CUTOFF_REGS 20
+#define WM8958_MBC_COEFF_REGS  48
 
 /**
  * DRC configurations are specified with a label and a set of register
@@ -59,6 +61,18 @@
         u16 regs[WM8994_EQ_REGS];
 };
 
+/**
+ * Multiband compressor configurations are specified with a label and
+ * two sets of values to write.  Configurations are expected to be
+ * generated using the multiband compressor configuration panel in
+ * WISCE - see http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8958_mbc_cfg {
+	const char *name;
+	u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS];
+	u16 coeff_regs[WM8958_MBC_COEFF_REGS];
+};
+
 struct wm8994_pdata {
 	int gpio_base;
 
@@ -78,6 +92,9 @@
         int num_retune_mobile_cfgs;
         struct wm8994_retune_mobile_cfg *retune_mobile_cfgs;
 
+	int num_mbc_cfgs;
+	struct wm8958_mbc_cfg *mbc_cfgs;
+
         /* LINEOUT can be differential or single ended */
         unsigned int lineout1_diff:1;
         unsigned int lineout2_diff:1;
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 967f62f..be072fa 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -64,12 +64,16 @@
 #define WM8994_LDO_1                            0x3B
 #define WM8994_LDO_2                            0x3C
 #define WM8994_CHARGE_PUMP_1                    0x4C
+#define WM8958_CHARGE_PUMP_2                    0x4D
 #define WM8994_CLASS_W_1                        0x51
 #define WM8994_DC_SERVO_1                       0x54
 #define WM8994_DC_SERVO_2                       0x55
 #define WM8994_DC_SERVO_4                       0x57
 #define WM8994_DC_SERVO_READBACK                0x58
 #define WM8994_ANALOGUE_HP_1                    0x60
+#define WM8958_MIC_DETECT_1                     0xD0
+#define WM8958_MIC_DETECT_2                     0xD1
+#define WM8958_MIC_DETECT_3                     0xD2
 #define WM8994_CHIP_REVISION                    0x100
 #define WM8994_CONTROL_INTERFACE                0x101
 #define WM8994_WRITE_SEQUENCER_CTRL_1           0x110
@@ -109,6 +113,10 @@
 #define WM8994_AIF2DAC_LRCLK                    0x315
 #define WM8994_AIF2DAC_DATA                     0x316
 #define WM8994_AIF2ADC_DATA                     0x317
+#define WM8958_AIF3_CONTROL_1                   0x320
+#define WM8958_AIF3_CONTROL_2                   0x321
+#define WM8958_AIF3DAC_DATA                     0x322
+#define WM8958_AIF3ADC_DATA                     0x323
 #define WM8994_AIF1_ADC1_LEFT_VOLUME            0x400
 #define WM8994_AIF1_ADC1_RIGHT_VOLUME           0x401
 #define WM8994_AIF1_DAC1_LEFT_VOLUME            0x402
@@ -242,6 +250,83 @@
 #define WM8994_INTERRUPT_STATUS_2_MASK          0x739
 #define WM8994_INTERRUPT_CONTROL                0x740
 #define WM8994_IRQ_DEBOUNCE                     0x748
+#define WM8958_DSP2_PROGRAM                     0x900
+#define WM8958_DSP2_CONFIG                      0x901
+#define WM8958_DSP2_MAGICNUM                    0xA00
+#define WM8958_DSP2_RELEASEYEAR                 0xA01
+#define WM8958_DSP2_RELEASEMONTHDAY             0xA02
+#define WM8958_DSP2_RELEASETIME                 0xA03
+#define WM8958_DSP2_VERMAJMIN                   0xA04
+#define WM8958_DSP2_VERBUILD                    0xA05
+#define WM8958_DSP2_EXECCONTROL                 0xA0D
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1     0x2200
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_2     0x2201
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_1     0x2202
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_2     0x2203
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_1     0x2204
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_2     0x2205
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_1     0x2206
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_2     0x2207
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_1     0x2208
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_2     0x2209
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_1     0x220A
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_2     0x220B
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_1     0x220C
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_2     0x220D
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_1     0x220E
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_2     0x220F
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_1     0x2210
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_2     0x2211
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_1        0x2212
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_2        0x2213
+#define WM8958_MBC_BAND_1_K_1                   0x2400
+#define WM8958_MBC_BAND_1_K_2                   0x2401
+#define WM8958_MBC_BAND_1_N1_1                  0x2402
+#define WM8958_MBC_BAND_1_N1_2                  0x2403
+#define WM8958_MBC_BAND_1_N2_1                  0x2404
+#define WM8958_MBC_BAND_1_N2_2                  0x2405
+#define WM8958_MBC_BAND_1_N3_1                  0x2406
+#define WM8958_MBC_BAND_1_N3_2                  0x2407
+#define WM8958_MBC_BAND_1_N4_1                  0x2408
+#define WM8958_MBC_BAND_1_N4_2                  0x2409
+#define WM8958_MBC_BAND_1_N5_1                  0x240A
+#define WM8958_MBC_BAND_1_N5_2                  0x240B
+#define WM8958_MBC_BAND_1_X1_1                  0x240C
+#define WM8958_MBC_BAND_1_X1_2                  0x240D
+#define WM8958_MBC_BAND_1_X2_1                  0x240E
+#define WM8958_MBC_BAND_1_X2_2                  0x240F
+#define WM8958_MBC_BAND_1_X3_1                  0x2410
+#define WM8958_MBC_BAND_1_X3_2                  0x2411
+#define WM8958_MBC_BAND_1_ATTACK_1              0x2412
+#define WM8958_MBC_BAND_1_ATTACK_2              0x2413
+#define WM8958_MBC_BAND_1_DECAY_1               0x2414
+#define WM8958_MBC_BAND_1_DECAY_2               0x2415
+#define WM8958_MBC_BAND_2_K_1                   0x2416
+#define WM8958_MBC_BAND_2_K_2                   0x2417
+#define WM8958_MBC_BAND_2_N1_1                  0x2418
+#define WM8958_MBC_BAND_2_N1_2                  0x2419
+#define WM8958_MBC_BAND_2_N2_1                  0x241A
+#define WM8958_MBC_BAND_2_N2_2                  0x241B
+#define WM8958_MBC_BAND_2_N3_1                  0x241C
+#define WM8958_MBC_BAND_2_N3_2                  0x241D
+#define WM8958_MBC_BAND_2_N4_1                  0x241E
+#define WM8958_MBC_BAND_2_N4_2                  0x241F
+#define WM8958_MBC_BAND_2_N5_1                  0x2420
+#define WM8958_MBC_BAND_2_N5_2                  0x2421
+#define WM8958_MBC_BAND_2_X1_1                  0x2422
+#define WM8958_MBC_BAND_2_X1_2                  0x2423
+#define WM8958_MBC_BAND_2_X2_1                  0x2424
+#define WM8958_MBC_BAND_2_X2_2                  0x2425
+#define WM8958_MBC_BAND_2_X3_1                  0x2426
+#define WM8958_MBC_BAND_2_X3_2                  0x2427
+#define WM8958_MBC_BAND_2_ATTACK_1              0x2428
+#define WM8958_MBC_BAND_2_ATTACK_2              0x2429
+#define WM8958_MBC_BAND_2_DECAY_1               0x242A
+#define WM8958_MBC_BAND_2_DECAY_2               0x242B
+#define WM8958_MBC_B2_PG2_1                     0x242C
+#define WM8958_MBC_B2_PG2_2                     0x242D
+#define WM8958_MBC_B1_PG2_1                     0x242E
+#define WM8958_MBC_B1_PG2_2                     0x242F
 #define WM8994_WRITE_SEQUENCER_0                0x3000
 #define WM8994_WRITE_SEQUENCER_1                0x3001
 #define WM8994_WRITE_SEQUENCER_2                0x3002
@@ -992,6 +1077,12 @@
 /*
  * R6 (0x06) - Power Management (6)
  */
+#define WM8958_AIF3ADC_SRC_MASK                 0x0600  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_SHIFT                     9  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_WIDTH                     2  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF2DAC_SRC_MASK                 0x0180  /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_SHIFT                     7  /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_WIDTH                     2  /* AIF2DAC_SRC - [8:7] */
 #define WM8994_AIF3_TRI                         0x0020  /* AIF3_TRI */
 #define WM8994_AIF3_TRI_MASK                    0x0020  /* AIF3_TRI */
 #define WM8994_AIF3_TRI_SHIFT                        5  /* AIF3_TRI */
@@ -1836,6 +1927,14 @@
 #define WM8994_CP_ENA_WIDTH                          1  /* CP_ENA */
 
 /*
+ * R77 (0x4D) - Charge Pump (2)
+ */
+#define WM8958_CP_DISCH                         0x8000  /* CP_DISCH */
+#define WM8958_CP_DISCH_MASK                    0x8000  /* CP_DISCH */
+#define WM8958_CP_DISCH_SHIFT                       15  /* CP_DISCH */
+#define WM8958_CP_DISCH_WIDTH                        1  /* CP_DISCH */
+
+/*
  * R81 (0x51) - Class W (1)
  */
 #define WM8994_CP_DYN_SRC_SEL_MASK              0x0300  /* CP_DYN_SRC_SEL - [9:8] */
@@ -1952,6 +2051,46 @@
 #define WM8994_HPOUT1R_DLY_WIDTH                     1  /* HPOUT1R_DLY */
 
 /*
+ * R208 (0xD0) - Mic Detect 1
+ */
+#define WM8958_MICD_BIAS_STARTTIME_MASK         0xF000  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_SHIFT            12  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_WIDTH             4  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_RATE_MASK                   0x0F00  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_SHIFT                       8  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_WIDTH                       4  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_DBTIME                      0x0002  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_MASK                 0x0002  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_SHIFT                     1  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_WIDTH                     1  /* MICD_DBTIME */
+#define WM8958_MICD_ENA                         0x0001  /* MICD_ENA */
+#define WM8958_MICD_ENA_MASK                    0x0001  /* MICD_ENA */
+#define WM8958_MICD_ENA_SHIFT                        0  /* MICD_ENA */
+#define WM8958_MICD_ENA_WIDTH                        1  /* MICD_ENA */
+
+/*
+ * R209 (0xD1) - Mic Detect 2
+ */
+#define WM8958_MICD_LVL_SEL_MASK                0x00FF  /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_SHIFT                    0  /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_WIDTH                    8  /* MICD_LVL_SEL - [7:0] */
+
+/*
+ * R210 (0xD2) - Mic Detect 3
+ */
+#define WM8958_MICD_LVL_MASK                    0x07FC  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_SHIFT                        2  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_WIDTH                        9  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_VALID                       0x0002  /* MICD_VALID */
+#define WM8958_MICD_VALID_MASK                  0x0002  /* MICD_VALID */
+#define WM8958_MICD_VALID_SHIFT                      1  /* MICD_VALID */
+#define WM8958_MICD_VALID_WIDTH                      1  /* MICD_VALID */
+#define WM8958_MICD_STS                         0x0001  /* MICD_STS */
+#define WM8958_MICD_STS_MASK                    0x0001  /* MICD_STS */
+#define WM8958_MICD_STS_SHIFT                        0  /* MICD_STS */
+#define WM8958_MICD_STS_WIDTH                        1  /* MICD_STS */
+
+/*
  * R256 (0x100) - Chip Revision
  */
 #define WM8994_CHIP_REV_MASK                    0x000F  /* CHIP_REV - [3:0] */
@@ -2069,6 +2208,14 @@
 /*
  * R520 (0x208) - Clocking (1)
  */
+#define WM8958_DSP2CLK_ENA                      0x4000  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_MASK                 0x4000  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_SHIFT                    14  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_WIDTH                     1  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_SRC                      0x1000  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_MASK                 0x1000  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_SHIFT                    12  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_WIDTH                     1  /* DSP2CLK_SRC */
 #define WM8994_TOCLK_ENA                        0x0010  /* TOCLK_ENA */
 #define WM8994_TOCLK_ENA_MASK                   0x0010  /* TOCLK_ENA */
 #define WM8994_TOCLK_ENA_SHIFT                       4  /* TOCLK_ENA */
@@ -2553,6 +2700,63 @@
 #define WM8994_AIF2ADCR_DAT_INV_WIDTH                1  /* AIF2ADCR_DAT_INV */
 
 /*
+ * R800 (0x320) - AIF3 Control (1)
+ */
+#define WM8958_AIF3_LRCLK_INV                   0x0080  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_MASK              0x0080  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_SHIFT                  7  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_WIDTH                  1  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_WL_MASK                     0x0060  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_SHIFT                         5  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_WIDTH                         2  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_FMT_MASK                    0x0018  /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_SHIFT                        3  /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_WIDTH                        2  /* AIF3_FMT - [4:3] */
+
+/*
+ * R801 (0x321) - AIF3 Control (2)
+ */
+#define WM8958_AIF3DAC_BOOST_MASK               0x0C00  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_SHIFT                  10  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_WIDTH                   2  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_COMP                     0x0010  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_MASK                0x0010  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_SHIFT                    4  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_WIDTH                    1  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMPMODE                 0x0008  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_MASK            0x0008  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_SHIFT                3  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_WIDTH                1  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3ADC_COMP                     0x0004  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_MASK                0x0004  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_SHIFT                    2  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_WIDTH                    1  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMPMODE                 0x0002  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_MASK            0x0002  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_SHIFT                1  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_WIDTH                1  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3_LOOPBACK                    0x0001  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_MASK               0x0001  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_SHIFT                   0  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_WIDTH                   1  /* AIF3_LOOPBACK */
+
+/*
+ * R802 (0x322) - AIF3DAC Data
+ */
+#define WM8958_AIF3DAC_DAT_INV                  0x0001  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_MASK             0x0001  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_SHIFT                 0  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_WIDTH                 1  /* AIF3DAC_DAT_INV */
+
+/*
+ * R803 (0x323) - AIF3ADC Data
+ */
+#define WM8958_AIF3ADC_DAT_INV                  0x0001  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_MASK             0x0001  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_SHIFT                 0  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_WIDTH                 1  /* AIF3ADC_DAT_INV */
+
+/*
  * R1024 (0x400) - AIF1 ADC1 Left Volume
  */
 #define WM8994_AIF1ADC1_VU                      0x0100  /* AIF1ADC1_VU */
@@ -4289,4 +4493,102 @@
 #define WM8994_TEMP_SHUT_DB_SHIFT                    0  /* TEMP_SHUT_DB */
 #define WM8994_TEMP_SHUT_DB_WIDTH                    1  /* TEMP_SHUT_DB */
 
+/*
+ * R2304 (0x900) - DSP2_Program
+ */
+#define WM8958_DSP2_ENA                         0x0001  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_MASK                    0x0001  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_SHIFT                        0  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_WIDTH                        1  /* DSP2_ENA */
+
+/*
+ * R2305 (0x901) - DSP2_Config
+ */
+#define WM8958_MBC_SEL_MASK                     0x0030  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_SHIFT                         4  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_WIDTH                         2  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_ENA                          0x0001  /* MBC_ENA */
+#define WM8958_MBC_ENA_MASK                     0x0001  /* MBC_ENA */
+#define WM8958_MBC_ENA_SHIFT                         0  /* MBC_ENA */
+#define WM8958_MBC_ENA_WIDTH                         1  /* MBC_ENA */
+
+/*
+ * R2560 (0xA00) - DSP2_MagicNum
+ */
+#define WM8958_DSP2_MAGIC_NUM_MASK              0xFFFF  /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_SHIFT                  0  /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_WIDTH                 16  /* DSP2_MAGIC_NUM - [15:0] */
+
+/*
+ * R2561 (0xA01) - DSP2_ReleaseYear
+ */
+#define WM8958_DSP2_RELEASE_YEAR_MASK           0xFFFF  /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_SHIFT               0  /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_WIDTH              16  /* DSP2_RELEASE_YEAR - [15:0] */
+
+/*
+ * R2562 (0xA02) - DSP2_ReleaseMonthDay
+ */
+#define WM8958_DSP2_RELEASE_MONTH_MASK          0xFF00  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_SHIFT              8  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_WIDTH              8  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_DAY_MASK            0x00FF  /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_SHIFT                0  /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_WIDTH                8  /* DSP2_RELEASE_DAY - [7:0] */
+
+/*
+ * R2563 (0xA03) - DSP2_ReleaseTime
+ */
+#define WM8958_DSP2_RELEASE_HOURS_MASK          0xFF00  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_SHIFT              8  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_WIDTH              8  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_MINS_MASK           0x00FF  /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_SHIFT               0  /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_WIDTH               8  /* DSP2_RELEASE_MINS - [7:0] */
+
+/*
+ * R2564 (0xA04) - DSP2_VerMajMin
+ */
+#define WM8958_DSP2_MAJOR_VER_MASK              0xFF00  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_SHIFT                  8  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_WIDTH                  8  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MINOR_VER_MASK              0x00FF  /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_SHIFT                  0  /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_WIDTH                  8  /* DSP2_MINOR_VER - [7:0] */
+
+/*
+ * R2565 (0xA05) - DSP2_VerBuild
+ */
+#define WM8958_DSP2_BUILD_VER_MASK              0xFFFF  /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_SHIFT                  0  /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_WIDTH                 16  /* DSP2_BUILD_VER - [15:0] */
+
+/*
+ * R2573 (0xA0D) - DSP2_ExecControl
+ */
+#define WM8958_DSP2_STOPC                       0x0020  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_MASK                  0x0020  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_SHIFT                      5  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_WIDTH                      1  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPS                       0x0010  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_MASK                  0x0010  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_SHIFT                      4  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_WIDTH                      1  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPI                       0x0008  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_MASK                  0x0008  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_SHIFT                      3  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_WIDTH                      1  /* DSP2_STOPI */
+#define WM8958_DSP2_STOP                        0x0004  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_MASK                   0x0004  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_SHIFT                       2  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_WIDTH                       1  /* DSP2_STOP */
+#define WM8958_DSP2_RUNR                        0x0002  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_MASK                   0x0002  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_SHIFT                       1  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_WIDTH                       1  /* DSP2_RUNR */
+#define WM8958_DSP2_RUN                         0x0001  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_MASK                    0x0001  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_SHIFT                        0  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_WIDTH                        1  /* DSP2_RUN */
+
 #endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 085527f..e39aeec 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,9 +13,11 @@
 extern int migrate_page(struct address_space *,
 			struct page *, struct page *);
 extern int migrate_pages(struct list_head *l, new_page_t x,
-			unsigned long private, int offlining);
+			unsigned long private, bool offlining,
+			bool sync);
 extern int migrate_huge_pages(struct list_head *l, new_page_t x,
-			unsigned long private, int offlining);
+			unsigned long private, bool offlining,
+			bool sync);
 
 extern int fail_migrate_page(struct address_space *,
 			struct page *, struct page *);
@@ -33,9 +35,11 @@
 
 static inline void putback_lru_pages(struct list_head *l) {}
 static inline int migrate_pages(struct list_head *l, new_page_t x,
-		unsigned long private, int offlining) { return -ENOSYS; }
+		unsigned long private, bool offlining,
+		bool sync) { return -ENOSYS; }
 static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
-		unsigned long private, int offlining) { return -ENOSYS; }
+		unsigned long private, bool offlining,
+		bool sync) { return -ENOSYS; }
 
 static inline int migrate_prep(void) { return -ENOSYS; }
 static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 721f451..956a355 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -14,6 +14,7 @@
 #include <linux/mm_types.h>
 #include <linux/range.h>
 #include <linux/pfn.h>
+#include <linux/bit_spinlock.h>
 
 struct mempolicy;
 struct anon_vma;
@@ -82,6 +83,7 @@
 #define VM_GROWSUP	0x00000200
 #else
 #define VM_GROWSUP	0x00000000
+#define VM_NOHUGEPAGE	0x00000200	/* MADV_NOHUGEPAGE marked this vma */
 #endif
 #define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
@@ -101,7 +103,11 @@
 #define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
+#else
+#define VM_HUGEPAGE	0x01000000	/* MADV_HUGEPAGE marked this vma */
+#endif
 #define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
 #define VM_ALWAYSDUMP	0x04000000	/* Always include in core dumps */
 
@@ -242,6 +248,7 @@
  * files which need it (119 of them)
  */
 #include <linux/page-flags.h>
+#include <linux/huge_mm.h>
 
 /*
  * Methods to modify the page usage count.
@@ -305,6 +312,39 @@
 }
 #endif
 
+static inline void compound_lock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	bit_spin_lock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline void compound_unlock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	bit_spin_unlock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline unsigned long compound_lock_irqsave(struct page *page)
+{
+	unsigned long uninitialized_var(flags);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	local_irq_save(flags);
+	compound_lock(page);
+#endif
+	return flags;
+}
+
+static inline void compound_unlock_irqrestore(struct page *page,
+					      unsigned long flags)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	compound_unlock(page);
+	local_irq_restore(flags);
+#endif
+}
+
 static inline struct page *compound_head(struct page *page)
 {
 	if (unlikely(PageTail(page)))
@@ -319,9 +359,29 @@
 
 static inline void get_page(struct page *page)
 {
-	page = compound_head(page);
-	VM_BUG_ON(atomic_read(&page->_count) == 0);
+	/*
+	 * Getting a normal page or the head of a compound page
+	 * requires to already have an elevated page->_count. Only if
+	 * we're getting a tail page, the elevated page->_count is
+	 * required only in the head page, so for tail pages the
+	 * bugcheck only verifies that the page->_count isn't
+	 * negative.
+	 */
+	VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
 	atomic_inc(&page->_count);
+	/*
+	 * Getting a tail page will elevate both the head and tail
+	 * page->_count(s).
+	 */
+	if (unlikely(PageTail(page))) {
+		/*
+		 * This is safe only because
+		 * __split_huge_page_refcount can't run under
+		 * get_page().
+		 */
+		VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
+		atomic_inc(&page->first_page->_count);
+	}
 }
 
 static inline struct page *virt_to_head_page(const void *x)
@@ -339,6 +399,27 @@
 	atomic_set(&page->_count, 1);
 }
 
+/*
+ * PageBuddy() indicate that the page is free and in the buddy system
+ * (see mm/page_alloc.c).
+ */
+static inline int PageBuddy(struct page *page)
+{
+	return atomic_read(&page->_mapcount) == -2;
+}
+
+static inline void __SetPageBuddy(struct page *page)
+{
+	VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
+	atomic_set(&page->_mapcount, -2);
+}
+
+static inline void __ClearPageBuddy(struct page *page)
+{
+	VM_BUG_ON(!PageBuddy(page));
+	atomic_set(&page->_mapcount, -1);
+}
+
 void put_page(struct page *page);
 void put_pages_list(struct list_head *pages);
 
@@ -370,12 +451,39 @@
 	return (unsigned long)page[1].lru.prev;
 }
 
+static inline int compound_trans_order(struct page *page)
+{
+	int order;
+	unsigned long flags;
+
+	if (!PageHead(page))
+		return 0;
+
+	flags = compound_lock_irqsave(page);
+	order = compound_order(page);
+	compound_unlock_irqrestore(page, flags);
+	return order;
+}
+
 static inline void set_compound_order(struct page *page, unsigned long order)
 {
 	page[1].lru.prev = (void *)order;
 }
 
 /*
+ * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
+ * servicing faults for write access.  In the normal case, do always want
+ * pte_mkwrite.  But get_user_pages can cause write faults for mappings
+ * that do not have writing enabled, when used by access_process_vm.
+ */
+static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+	if (likely(vma->vm_flags & VM_WRITE))
+		pte = pte_mkwrite(pte);
+	return pte;
+}
+
+/*
  * Multiple processes may "see" the same page. E.g. for untouched
  * mappings of /dev/null, all processes see the same page full of
  * zeroes, and text pages of executables and shared libraries have
@@ -657,7 +765,7 @@
 	VM_BUG_ON(PageSlab(page));
 	if (unlikely(PageSwapCache(page)))
 		mapping = &swapper_space;
-	else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+	else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 		mapping = NULL;
 	return mapping;
 }
@@ -1064,7 +1172,8 @@
 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
 #endif
 
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
+int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+		pmd_t *pmd, unsigned long address);
 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
 
 /*
@@ -1133,16 +1242,18 @@
 	pte_unmap(pte);					\
 } while (0)
 
-#define pte_alloc_map(mm, pmd, address)			\
-	((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
-		NULL: pte_offset_map(pmd, address))
+#define pte_alloc_map(mm, vma, pmd, address)				\
+	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,	\
+							pmd, address))?	\
+	 NULL: pte_offset_map(pmd, address))
 
 #define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
-	((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,	\
+							pmd, address))?	\
 		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
 
 #define pte_alloc_kernel(pmd, address)			\
-	((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
 		NULL: pte_offset_kernel(pmd, address))
 
 extern void free_area_init(unsigned long * zones_size);
@@ -1415,6 +1526,8 @@
 #define FOLL_GET	0x04	/* do get_page on page */
 #define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
 #define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
+#define FOLL_MLOCK	0x40	/* mark page as mlocked */
+#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
@@ -1518,5 +1631,14 @@
 
 extern void dump_page(struct page *page);
 
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+extern void clear_huge_page(struct page *page,
+			    unsigned long addr,
+			    unsigned int pages_per_huge_page);
+extern void copy_user_huge_page(struct page *dst, struct page *src,
+				unsigned long addr, struct vm_area_struct *vma,
+				unsigned int pages_per_huge_page);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8835b87..8f7d247 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,6 +1,8 @@
 #ifndef LINUX_MM_INLINE_H
 #define LINUX_MM_INLINE_H
 
+#include <linux/huge_mm.h>
+
 /**
  * page_is_file_cache - should the page be on a file LRU or anon LRU?
  * @page: the page to test
@@ -20,18 +22,25 @@
 }
 
 static inline void
+__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
+		       struct list_head *head)
+{
+	list_add(&page->lru, head);
+	__mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
+	mem_cgroup_add_lru_list(page, l);
+}
+
+static inline void
 add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
 {
-	list_add(&page->lru, &zone->lru[l].list);
-	__inc_zone_state(zone, NR_LRU_BASE + l);
-	mem_cgroup_add_lru_list(page, l);
+	__add_page_to_lru_list(zone, page, l, &zone->lru[l].list);
 }
 
 static inline void
 del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
 {
 	list_del(&page->lru);
-	__dec_zone_state(zone, NR_LRU_BASE + l);
+	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
 	mem_cgroup_del_lru_list(page, l);
 }
 
@@ -66,7 +75,7 @@
 			l += LRU_ACTIVE;
 		}
 	}
-	__dec_zone_state(zone, NR_LRU_BASE + l);
+	__mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
 	mem_cgroup_del_lru_list(page, l);
 }
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bb7288a..26bc4e2 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -310,6 +310,9 @@
 #ifdef CONFIG_MMU_NOTIFIER
 	struct mmu_notifier_mm *mmu_notifier_mm;
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	pgtable_t pmd_huge_pte; /* protected by page_table_lock */
+#endif
 	/* How many tasks sharing this mm are OOM_DISABLE */
 	atomic_t oom_disable_count;
 };
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
new file mode 100644
index 0000000..16b0261
--- /dev/null
+++ b/include/linux/mmc/dw_mmc.h
@@ -0,0 +1,217 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MMC_DW_MMC_H_
+#define _LINUX_MMC_DW_MMC_H_
+
+#define MAX_MCI_SLOTS	2
+
+enum dw_mci_state {
+	STATE_IDLE = 0,
+	STATE_SENDING_CMD,
+	STATE_SENDING_DATA,
+	STATE_DATA_BUSY,
+	STATE_SENDING_STOP,
+	STATE_DATA_ERROR,
+};
+
+enum {
+	EVENT_CMD_COMPLETE = 0,
+	EVENT_XFER_COMPLETE,
+	EVENT_DATA_COMPLETE,
+	EVENT_DATA_ERROR,
+	EVENT_XFER_ERROR
+};
+
+struct mmc_data;
+
+/**
+ * struct dw_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @pio_offset: Offset into the current scatterlist entry.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ *	or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ *	transfer is in progress.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ *	command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @data_status: Snapshot of SR taken upon completion of the current
+ *	data transfer. Only valid when EVENT_DATA_COMPLETE or
+ *	EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ *	to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @card_tasklet: Tasklet handling card detect.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ *	to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ *	processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ *	rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @pdev: Platform device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @slot: Slots sharing this MMC controller.
+ * @data_shift: log2 of FIFO item size.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * The @mrq field of struct dw_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct dw_mci {
+	spinlock_t		lock;
+	void __iomem		*regs;
+
+	struct scatterlist	*sg;
+	unsigned int		pio_offset;
+
+	struct dw_mci_slot	*cur_slot;
+	struct mmc_request	*mrq;
+	struct mmc_command	*cmd;
+	struct mmc_data		*data;
+
+	/* DMA interface members*/
+	int			use_dma;
+
+	dma_addr_t		sg_dma;
+	void			*sg_cpu;
+	struct dw_mci_dma_ops	*dma_ops;
+#ifdef CONFIG_MMC_DW_IDMAC
+	unsigned int		ring_size;
+#else
+	struct dw_mci_dma_data	*dma_data;
+#endif
+	u32			cmd_status;
+	u32			data_status;
+	u32			stop_cmdr;
+	u32			dir_status;
+	struct tasklet_struct	tasklet;
+	struct tasklet_struct	card_tasklet;
+	unsigned long		pending_events;
+	unsigned long		completed_events;
+	enum dw_mci_state	state;
+	struct list_head	queue;
+
+	u32			bus_hz;
+	u32			current_speed;
+	u32			num_slots;
+	struct platform_device	*pdev;
+	struct dw_mci_board	*pdata;
+	struct dw_mci_slot	*slot[MAX_MCI_SLOTS];
+
+	/* FIFO push and pull */
+	int			data_shift;
+	void (*push_data)(struct dw_mci *host, void *buf, int cnt);
+	void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+
+	/* Workaround flags */
+	u32			quirks;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct dw_mci_dma_ops {
+	/* DMA Ops */
+	int (*init)(struct dw_mci *host);
+	void (*start)(struct dw_mci *host, unsigned int sg_len);
+	void (*complete)(struct dw_mci *host);
+	void (*stop)(struct dw_mci *host);
+	void (*cleanup)(struct dw_mci *host);
+	void (*exit)(struct dw_mci *host);
+};
+
+/* IP Quirks/flags. */
+/* No special quirks or flags to cater for */
+#define DW_MCI_QUIRK_NONE		0
+/* DTO fix for command transmission with IDMAC configured */
+#define DW_MCI_QUIRK_IDMAC_DTO		1
+/* delay needed between retries on some 2.11a implementations */
+#define DW_MCI_QUIRK_RETRY_DELAY	2
+/* High Speed Capable - Supports HS cards (upto 50MHz) */
+#define DW_MCI_QUIRK_HIGHSPEED		4
+
+
+struct dma_pdata;
+
+struct block_settings {
+	unsigned short	max_segs;	/* see blk_queue_max_segments */
+	unsigned int	max_blk_size;	/* maximum size of one mmc block */
+	unsigned int	max_blk_count;	/* maximum number of blocks in one req*/
+	unsigned int	max_req_size;	/* maximum number of bytes in one req*/
+	unsigned int	max_seg_size;	/* see blk_queue_max_segment_size */
+};
+
+/* Board platform data */
+struct dw_mci_board {
+	u32 num_slots;
+
+	u32 quirks; /* Workaround / Quirk flags */
+	unsigned int bus_hz; /* Bus speed */
+
+	/* delay in mS before detecting cards after interrupt */
+	u32 detect_delay_ms;
+
+	int (*init)(u32 slot_id, irq_handler_t , void *);
+	int (*get_ro)(u32 slot_id);
+	int (*get_cd)(u32 slot_id);
+	int (*get_ocr)(u32 slot_id);
+	int (*get_bus_wd)(u32 slot_id);
+	/*
+	 * Enable power to selected slot and set voltage to desired level.
+	 * Voltage levels are specified using MMC_VDD_xxx defines defined
+	 * in linux/mmc/host.h file.
+	 */
+	void (*setpower)(u32 slot_id, u32 volt);
+	void (*exit)(u32 slot_id);
+	void (*select_slot)(u32 slot_id);
+
+	struct dw_mci_dma_ops *dma_ops;
+	struct dma_pdata *data;
+	struct block_settings *blk_settings;
+};
+
+#endif /* _LINUX_MMC_DW_MMC_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 30f6fad..bcb793e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -131,6 +131,9 @@
 	unsigned int		f_max;
 	unsigned int		f_init;
 	u32			ocr_avail;
+	u32			ocr_avail_sdio;	/* SDIO-specific OCR */
+	u32			ocr_avail_sd;	/* SD-specific OCR */
+	u32			ocr_avail_mmc;	/* MMC-specific OCR */
 	struct notifier_block	pm_notify;
 
 #define MMC_VDD_165_195		0x00000080	/* VDD voltage 1.65 - 1.95 */
@@ -169,9 +172,20 @@
 #define MMC_CAP_1_2V_DDR	(1 << 12)	/* can support */
 						/* DDR mode at 1.2V */
 #define MMC_CAP_POWER_OFF_CARD	(1 << 13)	/* Can power off after boot */
+#define MMC_CAP_BUS_WIDTH_TEST	(1 << 14)	/* CMD14/CMD19 bus width ok */
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
+#ifdef CONFIG_MMC_CLKGATE
+	int			clk_requests;	/* internal reference counter */
+	unsigned int		clk_delay;	/* number of MCI clk hold cycles */
+	bool			clk_gated;	/* clock gated */
+	struct work_struct	clk_gate_work; /* delayed clock gate */
+	unsigned int		clk_old;	/* old clock value cache */
+	spinlock_t		clk_lock;	/* lock for clk fields */
+	struct mutex		clk_gate_mutex;	/* mutex for clock gating */
+#endif
+
 	/* host specific block data */
 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
 	unsigned short		max_segs;	/* see blk_queue_max_segments */
@@ -307,5 +321,10 @@
 	return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
 }
 
+static inline int mmc_card_is_powered_resumed(struct mmc_host *host)
+{
+	return host->pm_flags & MMC_PM_KEEP_POWER;
+}
+
 #endif
 
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 956fbd87..612301f 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -40,7 +40,9 @@
 #define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
 #define MMC_STOP_TRANSMISSION    12   /* ac                      R1b */
 #define MMC_SEND_STATUS          13   /* ac   [31:16] RCA        R1  */
+#define MMC_BUS_TEST_R           14   /* adtc                    R1  */
 #define MMC_GO_INACTIVE_STATE    15   /* ac   [31:16] RCA            */
+#define MMC_BUS_TEST_W           19   /* adtc                    R1  */
 #define MMC_SPI_READ_OCR         58   /* spi                  spi_R3 */
 #define MMC_SPI_CRC_ON_OFF       59   /* spi  [0:0] flag      spi_R1 */
 
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 1fdc673..83bd9f7 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -83,6 +83,8 @@
 #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12		(1<<28)
 /* Controller doesn't have HISPD bit field in HI-SPEED SD card */
 #define SDHCI_QUIRK_NO_HISPD_BIT			(1<<29)
+/* Controller treats ADMA descriptors with length 0000h incorrectly */
+#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC		(1<<30)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
@@ -139,6 +141,10 @@
 
 	unsigned int caps;	/* Alternative capabilities */
 
+	unsigned int            ocr_avail_sdio;	/* OCR bit masks */
+	unsigned int            ocr_avail_sd;
+	unsigned int            ocr_avail_mmc;
+
 	unsigned long private[0] ____cacheline_aligned;
 };
 #endif /* __SDHCI_H */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index 5c99da1..bf17350 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -14,8 +14,9 @@
 #ifndef __SH_MMCIF_H__
 #define __SH_MMCIF_H__
 
-#include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sh_dma.h>
 
 /*
  * MMCIF : CE_CLK_CTRL [19:16]
@@ -31,13 +32,19 @@
  * 1111 : Peripheral clock (sup_pclk set '1')
  */
 
+struct sh_mmcif_dma {
+	struct sh_dmae_slave chan_priv_tx;
+	struct sh_dmae_slave chan_priv_rx;
+};
+
 struct sh_mmcif_plat_data {
 	void (*set_pwr)(struct platform_device *pdev, int state);
 	void (*down_pwr)(struct platform_device *pdev);
 	int (*get_cd)(struct platform_device *pdef);
-	u8	sup_pclk;	/* 1 :SH7757, 0: SH7724/SH7372 */
-	unsigned long caps;
-	u32	ocr;
+	struct sh_mmcif_dma	*dma;
+	u8			sup_pclk;	/* 1 :SH7757, 0: SH7724/SH7372 */
+	unsigned long		caps;
+	u32			ocr;
 };
 
 #define MMCIF_CE_CMD_SET	0x00000000
@@ -59,6 +66,32 @@
 #define MMCIF_CE_HOST_STS2	0x0000004C
 #define MMCIF_CE_VERSION	0x0000007C
 
+/* CE_BUF_ACC */
+#define BUF_ACC_DMAWEN		(1 << 25)
+#define BUF_ACC_DMAREN		(1 << 24)
+#define BUF_ACC_BUSW_32		(0 << 17)
+#define BUF_ACC_BUSW_16		(1 << 17)
+#define BUF_ACC_ATYP		(1 << 16)
+
+/* CE_CLK_CTRL */
+#define CLK_ENABLE		(1 << 24) /* 1: output mmc clock */
+#define CLK_CLEAR		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define CLK_SUP_PCLK		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
+#define CLKDIV_4		(1<<16) /* mmc clock frequency.
+					 * n: bus clock/(2^(n+1)) */
+#define CLKDIV_256		(7<<16) /* mmc clock frequency. (see above) */
+#define SRSPTO_256		((1 << 13) | (0 << 12)) /* resp timeout */
+#define SRBSYTO_29		((1 << 11) | (1 << 10) |	\
+				 (1 << 9) | (1 << 8)) /* resp busy timeout */
+#define SRWDTO_29		((1 << 7) | (1 << 6) |		\
+				 (1 << 5) | (1 << 4)) /* read/write timeout */
+#define SCCSTO_29		((1 << 3) | (1 << 2) |		\
+				 (1 << 1) | (1 << 0)) /* ccs timeout */
+
+/* CE_VERSION */
+#define SOFT_RST_ON		(1 << 31)
+#define SOFT_RST_OFF		0
+
 static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
 {
 	return readl(addr + reg);
@@ -71,6 +104,9 @@
 
 #define SH_MMCIF_BBS 512 /* boot block size */
 
+enum { MMCIF_PROGRESS_ENTER, MMCIF_PROGRESS_INIT,
+       MMCIF_PROGRESS_LOAD, MMCIF_PROGRESS_DONE };
+
 static inline void sh_mmcif_boot_cmd_send(void __iomem *base,
 					  unsigned long cmd, unsigned long arg)
 {
@@ -133,6 +169,17 @@
 	unsigned long k;
 	int ret = 0;
 
+	/* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */
+	sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL,
+			CLK_ENABLE | CLKDIV_4 | SRSPTO_256 |
+			SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+
+	/* CMD9 - Get CSD */
+	sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000);
+
+	/* CMD7 - Select the card */
+	sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000);
+
 	/* CMD16 - Set the block size */
 	sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS);
 
@@ -145,21 +192,20 @@
 
 static inline void sh_mmcif_boot_init(void __iomem *base)
 {
-	unsigned long tmp;
-
 	/* reset */
-	tmp = sh_mmcif_readl(base, MMCIF_CE_VERSION);
-	sh_mmcif_writel(base, MMCIF_CE_VERSION, tmp | 0x80000000);
-	sh_mmcif_writel(base, MMCIF_CE_VERSION, tmp & ~0x80000000);
+	sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_ON);
+	sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_OFF);
 
 	/* byte swap */
-	sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, 0x00010000);
+	sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
 
 	/* Set block size in MMCIF hardware */
 	sh_mmcif_writel(base, MMCIF_CE_BLOCK_SET, SH_MMCIF_BBS);
 
-	/* Enable the clock, set it to Bus clock/256 (about 325Khz)*/
-	sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, 0x01072fff);
+	/* Enable the clock, set it to Bus clock/256 (about 325Khz). */
+	sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL,
+			CLK_ENABLE | CLKDIV_256 | SRSPTO_256 |
+			SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
 
 	/* CMD0 */
 	sh_mmcif_boot_cmd(base, 0x00000040, 0);
@@ -177,25 +223,4 @@
 	sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
 }
 
-static inline void sh_mmcif_boot_slurp(void __iomem *base,
-				       unsigned char *buf,
-				       unsigned long no_bytes)
-{
-	unsigned long tmp;
-
-	/* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */
-	sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL, 0x01012fff);
-
-	/* CMD9 - Get CSD */
-	sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000);
-
-	/* CMD7 - Select the card */
-	sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000);
-
-	tmp = no_bytes / SH_MMCIF_BBS;
-	tmp += (no_bytes % SH_MMCIF_BBS) ? 1 : 0;
-
-	sh_mmcif_boot_do_read(base, 512, tmp, buf);
-}
-
 #endif /* __SH_MMCIF_H__ */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 43dcfbd..cc2e7df 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -62,6 +62,16 @@
 				 unsigned long address);
 
 	/*
+	 * test_young is called to check the young/accessed bitflag in
+	 * the secondary pte. This is used to know if the page is
+	 * frequently used without actually clearing the flag or tearing
+	 * down the secondary mapping on the page.
+	 */
+	int (*test_young)(struct mmu_notifier *mn,
+			  struct mm_struct *mm,
+			  unsigned long address);
+
+	/*
 	 * change_pte is called in cases that pte mapping to page is changed:
 	 * for example, when ksm remaps pte to point to a new shared page.
 	 */
@@ -163,6 +173,8 @@
 extern void __mmu_notifier_release(struct mm_struct *mm);
 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 					  unsigned long address);
+extern int __mmu_notifier_test_young(struct mm_struct *mm,
+				     unsigned long address);
 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
 				      unsigned long address, pte_t pte);
 extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
@@ -186,6 +198,14 @@
 	return 0;
 }
 
+static inline int mmu_notifier_test_young(struct mm_struct *mm,
+					  unsigned long address)
+{
+	if (mm_has_notifiers(mm))
+		return __mmu_notifier_test_young(mm, address);
+	return 0;
+}
+
 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
 					   unsigned long address, pte_t pte)
 {
@@ -243,6 +263,32 @@
 	__pte;								\
 })
 
+#define pmdp_clear_flush_notify(__vma, __address, __pmdp)		\
+({									\
+	pmd_t __pmd;							\
+	struct vm_area_struct *___vma = __vma;				\
+	unsigned long ___address = __address;				\
+	VM_BUG_ON(__address & ~HPAGE_PMD_MASK);				\
+	mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address,	\
+					    (__address)+HPAGE_PMD_SIZE);\
+	__pmd = pmdp_clear_flush(___vma, ___address, __pmdp);		\
+	mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address,	\
+					  (__address)+HPAGE_PMD_SIZE);	\
+	__pmd;								\
+})
+
+#define pmdp_splitting_flush_notify(__vma, __address, __pmdp)		\
+({									\
+	struct vm_area_struct *___vma = __vma;				\
+	unsigned long ___address = __address;				\
+	VM_BUG_ON(__address & ~HPAGE_PMD_MASK);				\
+	mmu_notifier_invalidate_range_start(___vma->vm_mm, ___address,	\
+					    (__address)+HPAGE_PMD_SIZE);\
+	pmdp_splitting_flush(___vma, ___address, __pmdp);		\
+	mmu_notifier_invalidate_range_end(___vma->vm_mm, ___address,	\
+					  (__address)+HPAGE_PMD_SIZE);	\
+})
+
 #define ptep_clear_flush_young_notify(__vma, __address, __ptep)		\
 ({									\
 	int __young;							\
@@ -254,6 +300,17 @@
 	__young;							\
 })
 
+#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp)		\
+({									\
+	int __young;							\
+	struct vm_area_struct *___vma = __vma;				\
+	unsigned long ___address = __address;				\
+	__young = pmdp_clear_flush_young(___vma, ___address, __pmdp);	\
+	__young |= mmu_notifier_clear_flush_young(___vma->vm_mm,	\
+						  ___address);		\
+	__young;							\
+})
+
 #define set_pte_at_notify(__mm, __address, __ptep, __pte)		\
 ({									\
 	struct mm_struct *___mm = __mm;					\
@@ -276,6 +333,12 @@
 	return 0;
 }
 
+static inline int mmu_notifier_test_young(struct mm_struct *mm,
+					  unsigned long address)
+{
+	return 0;
+}
+
 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
 					   unsigned long address, pte_t pte)
 {
@@ -305,7 +368,10 @@
 }
 
 #define ptep_clear_flush_young_notify ptep_clear_flush_young
+#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
 #define ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_splitting_flush_notify pmdp_splitting_flush
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 39c24eb..02ecb01 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -114,6 +114,7 @@
 	NUMA_LOCAL,		/* allocation from local node */
 	NUMA_OTHER,		/* allocation from other node */
 #endif
+	NR_ANON_TRANSPARENT_HUGEPAGES,
 	NR_VM_ZONE_STAT_ITEMS };
 
 /*
@@ -458,12 +459,6 @@
 	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
-#ifdef CONFIG_SMP
-unsigned long zone_nr_free_pages(struct zone *zone);
-#else
-#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
-#endif /* CONFIG_SMP */
-
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
@@ -645,6 +640,7 @@
 	wait_queue_head_t kswapd_wait;
 	struct task_struct *kswapd;
 	int kswapd_max_order;
+	enum zone_type classzone_idx;
 } pg_data_t;
 
 #define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
@@ -660,8 +656,10 @@
 
 extern struct mutex zonelists_mutex;
 void build_all_zonelists(void *data);
-void wakeup_kswapd(struct zone *zone, int order);
-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+		int classzone_idx, int alloc_flags);
+bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
 		int classzone_idx, int alloc_flags);
 enum memmap_context {
 	MEMMAP_EARLY,
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5e7a594..1869ea2 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -13,6 +13,7 @@
 #include <linux/list.h>
 #include <linux/nodemask.h>
 #include <linux/spinlock.h>
+#include <linux/seqlock.h>
 #include <asm/atomic.h>
 
 struct super_block;
@@ -46,12 +47,24 @@
 
 #define MNT_INTERNAL	0x4000
 
+struct mnt_pcp {
+	int mnt_count;
+	int mnt_writers;
+};
+
 struct vfsmount {
 	struct list_head mnt_hash;
 	struct vfsmount *mnt_parent;	/* fs we are mounted on */
 	struct dentry *mnt_mountpoint;	/* dentry of mountpoint */
 	struct dentry *mnt_root;	/* root of the mounted tree */
 	struct super_block *mnt_sb;	/* pointer to superblock */
+#ifdef CONFIG_SMP
+	struct mnt_pcp __percpu *mnt_pcp;
+	atomic_t mnt_longrefs;
+#else
+	int mnt_count;
+	int mnt_writers;
+#endif
 	struct list_head mnt_mounts;	/* list of children, anchored here */
 	struct list_head mnt_child;	/* and going through their mnt_child */
 	int mnt_flags;
@@ -70,57 +83,25 @@
 	struct mnt_namespace *mnt_ns;	/* containing namespace */
 	int mnt_id;			/* mount identifier */
 	int mnt_group_id;		/* peer group identifier */
-	/*
-	 * We put mnt_count & mnt_expiry_mark at the end of struct vfsmount
-	 * to let these frequently modified fields in a separate cache line
-	 * (so that reads of mnt_flags wont ping-pong on SMP machines)
-	 */
-	atomic_t mnt_count;
 	int mnt_expiry_mark;		/* true if marked for expiry */
 	int mnt_pinned;
 	int mnt_ghosts;
-#ifdef CONFIG_SMP
-	int __percpu *mnt_writers;
-#else
-	int mnt_writers;
-#endif
 };
 
-static inline int *get_mnt_writers_ptr(struct vfsmount *mnt)
-{
-#ifdef CONFIG_SMP
-	return mnt->mnt_writers;
-#else
-	return &mnt->mnt_writers;
-#endif
-}
-
-static inline struct vfsmount *mntget(struct vfsmount *mnt)
-{
-	if (mnt)
-		atomic_inc(&mnt->mnt_count);
-	return mnt;
-}
-
 struct file; /* forward dec */
 
 extern int mnt_want_write(struct vfsmount *mnt);
 extern int mnt_want_write_file(struct file *file);
 extern int mnt_clone_write(struct vfsmount *mnt);
 extern void mnt_drop_write(struct vfsmount *mnt);
-extern void mntput_no_expire(struct vfsmount *mnt);
+extern void mntput(struct vfsmount *mnt);
+extern struct vfsmount *mntget(struct vfsmount *mnt);
+extern void mntput_long(struct vfsmount *mnt);
+extern struct vfsmount *mntget_long(struct vfsmount *mnt);
 extern void mnt_pin(struct vfsmount *mnt);
 extern void mnt_unpin(struct vfsmount *mnt);
 extern int __mnt_is_readonly(struct vfsmount *mnt);
 
-static inline void mntput(struct vfsmount *mnt)
-{
-	if (mnt) {
-		mnt->mnt_expiry_mark = 0;
-		mntput_no_expire(mnt);
-	}
-}
-
 extern struct vfsmount *do_kern_mount(const char *fstype, int flags,
 				      const char *name, void *data);
 
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 05b441d..18d06ad 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -19,7 +19,10 @@
 	struct path	path;
 	struct qstr	last;
 	struct path	root;
+	struct file	*file;
+	struct inode	*inode; /* path.dentry.d_inode */
 	unsigned int	flags;
+	unsigned	seq;
 	int		last_type;
 	unsigned	depth;
 	char *saved_names[MAX_NESTED_LINKS + 1];
@@ -41,14 +44,15 @@
  *  - require a directory
  *  - ending slashes ok even for nonexistent files
  *  - internal "there are more path components" flag
- *  - locked when lookup done with dcache_lock held
  *  - dentry cache is untrusted; force a real lookup
  */
-#define LOOKUP_FOLLOW		 1
-#define LOOKUP_DIRECTORY	 2
-#define LOOKUP_CONTINUE		 4
-#define LOOKUP_PARENT		16
-#define LOOKUP_REVAL		64
+#define LOOKUP_FOLLOW		0x0001
+#define LOOKUP_DIRECTORY	0x0002
+#define LOOKUP_CONTINUE		0x0004
+
+#define LOOKUP_PARENT		0x0010
+#define LOOKUP_REVAL		0x0020
+#define LOOKUP_RCU		0x0040
 /*
  * Intent data
  */
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index ef66306..e13eefe 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -143,104 +143,4 @@
 #define NCP_MAXPATHLEN 255
 #define NCP_MAXNAMELEN 14
 
-#ifdef __KERNEL__
-
-#include <linux/ncp_fs_i.h>
-#include <linux/ncp_fs_sb.h>
-
-/* define because it is easy to change PRINTK to {*}PRINTK */
-#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args)
-
-#undef NCPFS_PARANOIA
-#ifdef NCPFS_PARANOIA
-#define PPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define PPRINTK(format, args...)
-#endif
-
-#ifndef DEBUG_NCP
-#define DEBUG_NCP 0
-#endif
-#if DEBUG_NCP > 0
-#define DPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define DPRINTK(format, args...)
-#endif
-#if DEBUG_NCP > 1
-#define DDPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define DDPRINTK(format, args...)
-#endif
-
-#define NCP_MAX_RPC_TIMEOUT (6*HZ)
-
-
-struct ncp_entry_info {
-	struct nw_info_struct	i;
-	ino_t			ino;
-	int			opened;
-	int			access;
-	unsigned int		volume;
-	__u8			file_handle[6];
-};
-
-static inline struct ncp_server *NCP_SBP(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-
-#define NCP_SERVER(inode)	NCP_SBP((inode)->i_sb)
-static inline struct ncp_inode_info *NCP_FINFO(struct inode *inode)
-{
-	return container_of(inode, struct ncp_inode_info, vfs_inode);
-}
-
-/* linux/fs/ncpfs/inode.c */
-int ncp_notify_change(struct dentry *, struct iattr *);
-struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *);
-void ncp_update_inode(struct inode *, struct ncp_entry_info *);
-void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
-
-/* linux/fs/ncpfs/dir.c */
-extern const struct inode_operations ncp_dir_inode_operations;
-extern const struct file_operations ncp_dir_operations;
-extern const struct dentry_operations ncp_root_dentry_operations;
-int ncp_conn_logged_in(struct super_block *);
-int ncp_date_dos2unix(__le16 time, __le16 date);
-void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
-
-/* linux/fs/ncpfs/ioctl.c */
-long ncp_ioctl(struct file *, unsigned int, unsigned long);
-long ncp_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* linux/fs/ncpfs/sock.c */
-int ncp_request2(struct ncp_server *server, int function,
-	void* reply, int max_reply_size);
-static inline int ncp_request(struct ncp_server *server, int function) {
-	return ncp_request2(server, function, server->packet, server->packet_size);
-}
-int ncp_connect(struct ncp_server *server);
-int ncp_disconnect(struct ncp_server *server);
-void ncp_lock_server(struct ncp_server *server);
-void ncp_unlock_server(struct ncp_server *server);
-
-/* linux/fs/ncpfs/symlink.c */
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern const struct address_space_operations ncp_symlink_aops;
-int ncp_symlink(struct inode*, struct dentry*, const char*);
-#endif
-
-/* linux/fs/ncpfs/file.c */
-extern const struct inode_operations ncp_file_inode_operations;
-extern const struct file_operations ncp_file_operations;
-int ncp_make_open(struct inode *, int);
-
-/* linux/fs/ncpfs/mmap.c */
-int ncp_mmap(struct file *, struct vm_area_struct *);
-
-/* linux/fs/ncpfs/ncplib_kernel.c */
-int ncp_make_closed(struct inode *);
-
-#endif				/* __KERNEL__ */
-
 #endif				/* _LINUX_NCP_FS_H */
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
deleted file mode 100644
index d64b0e8..0000000
--- a/include/linux/ncp_fs_sb.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- *  ncp_fs_sb.h
- *
- *  Copyright (C) 1995, 1996 by Volker Lendecke
- *
- */
-
-#ifndef _NCP_FS_SB
-#define _NCP_FS_SB
-
-#include <linux/types.h>
-#include <linux/ncp_mount.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
-#include <linux/backing-dev.h>
-
-#ifdef __KERNEL__
-
-#include <linux/workqueue.h>
-
-#define NCP_DEFAULT_OPTIONS 0		/* 2 for packet signatures */
-
-struct sock;
-
-struct ncp_server {
-
-	struct ncp_mount_data_kernel m;	/* Nearly all of the mount data is of
-					   interest for us later, so we store
-					   it completely. */
-
-	__u8 name_space[NCP_NUMBER_OF_VOLUMES + 2];
-
-	struct file *ncp_filp;	/* File pointer to ncp socket */
-	struct socket *ncp_sock;/* ncp socket */
-	struct file *info_filp;
-	struct socket *info_sock;
-
-	u8 sequence;
-	u8 task;
-	u16 connection;		/* Remote connection number */
-
-	u8 completion;		/* Status message from server */
-	u8 conn_status;		/* Bit 4 = 1 ==> Server going down, no
-				   requests allowed anymore.
-				   Bit 0 = 1 ==> Server is down. */
-
-	int buffer_size;	/* Negotiated bufsize */
-
-	int reply_size;		/* Size of last reply */
-
-	int packet_size;
-	unsigned char *packet;	/* Here we prepare requests and
-				   receive replies */
-	unsigned char *txbuf;	/* Storage for current request */
-	unsigned char *rxbuf;	/* Storage for reply to current request */
-
-	int lock;		/* To prevent mismatch in protocols. */
-	struct mutex mutex;
-
-	int current_size;	/* for packet preparation */
-	int has_subfunction;
-	int ncp_reply_size;
-
-	int root_setuped;
-	struct mutex root_setup_lock;
-
-	/* info for packet signing */
-	int sign_wanted;	/* 1=Server needs signed packets */
-	int sign_active;	/* 0=don't do signing, 1=do */
-	char sign_root[8];	/* generated from password and encr. key */
-	char sign_last[16];	
-
-	/* Authentication info: NDS or BINDERY, username */
-	struct {
-		int	auth_type;
-		size_t	object_name_len;
-		void*	object_name;
-		int	object_type;
-	} auth;
-	/* Password info */
-	struct {
-		size_t	len;
-		void*	data;
-	} priv;
-	struct rw_semaphore auth_rwsem;
-
-	/* nls info: codepage for volume and charset for I/O */
-	struct nls_table *nls_vol;
-	struct nls_table *nls_io;
-
-	/* maximum age in jiffies */
-	atomic_t dentry_ttl;
-
-	/* miscellaneous */
-	unsigned int flags;
-
-	spinlock_t requests_lock;	/* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
-
-	void (*data_ready)(struct sock* sk, int len);
-	void (*error_report)(struct sock* sk);
-	void (*write_space)(struct sock* sk);	/* STREAM mode only */
-	struct {
-		struct work_struct tq;		/* STREAM/DGRAM: data/error ready */
-		struct ncp_request_reply* creq;	/* STREAM/DGRAM: awaiting reply from this request */
-		struct mutex creq_mutex;	/* DGRAM only: lock accesses to rcv.creq */
-
-		unsigned int state;		/* STREAM only: receiver state */
-		struct {
-			__u32 magic __packed;
-			__u32 len __packed;
-			__u16 type __packed;
-			__u16 p1 __packed;
-			__u16 p2 __packed;
-			__u16 p3 __packed;
-			__u16 type2 __packed;
-		} buf;				/* STREAM only: temporary buffer */
-		unsigned char* ptr;		/* STREAM only: pointer to data */
-		size_t len;			/* STREAM only: length of data to receive */
-	} rcv;
-	struct {
-		struct list_head requests;	/* STREAM only: queued requests */
-		struct work_struct tq;		/* STREAM only: transmitter ready */
-		struct ncp_request_reply* creq;	/* STREAM only: currently transmitted entry */
-	} tx;
-	struct timer_list timeout_tm;		/* DGRAM only: timeout timer */
-	struct work_struct timeout_tq;		/* DGRAM only: associated queue, we run timers from process context */
-	int timeout_last;			/* DGRAM only: current timeout length */
-	int timeout_retries;			/* DGRAM only: retries left */
-	struct {
-		size_t len;
-		__u8 data[128];
-	} unexpected_packet;
-	struct backing_dev_info bdi;
-};
-
-extern void ncp_tcp_rcv_proc(struct work_struct *work);
-extern void ncp_tcp_tx_proc(struct work_struct *work);
-extern void ncpdgram_rcv_proc(struct work_struct *work);
-extern void ncpdgram_timeout_proc(struct work_struct *work);
-extern void ncpdgram_timeout_call(unsigned long server);
-extern void ncp_tcp_data_ready(struct sock* sk, int len);
-extern void ncp_tcp_write_space(struct sock* sk);
-extern void ncp_tcp_error_report(struct sock* sk);
-
-#define NCP_FLAG_UTF8	1
-
-#define NCP_CLR_FLAG(server, flag)	((server)->flags &= ~(flag))
-#define NCP_SET_FLAG(server, flag)	((server)->flags |= (flag))
-#define NCP_IS_FLAG(server, flag)	((server)->flags & (flag))
-
-static inline int ncp_conn_valid(struct ncp_server *server)
-{
-	return ((server->conn_status & 0x11) == 0);
-}
-
-static inline void ncp_invalidate_conn(struct ncp_server *server)
-{
-	server->conn_status |= 0x01;
-}
-
-#endif				/* __KERNEL__ */
-
-#endif
- 
diff --git a/include/linux/ncp_mount.h b/include/linux/ncp_mount.h
index a2b549e..dfcbea2 100644
--- a/include/linux/ncp_mount.h
+++ b/include/linux/ncp_mount.h
@@ -68,26 +68,4 @@
 
 #define NCP_MOUNT_VERSION_V5	(5)	/* Text only */
 
-#ifdef __KERNEL__
-
-struct ncp_mount_data_kernel {
-	unsigned long    flags;		/* NCP_MOUNT_* flags */
-	unsigned int	 int_flags;	/* internal flags */
-#define NCP_IMOUNT_LOGGEDIN_POSSIBLE	0x0001
-	__kernel_uid32_t mounted_uid;	/* Who may umount() this filesystem? */
-	struct pid      *wdog_pid;	/* Who cares for our watchdog packets? */
-	unsigned int     ncp_fd;	/* The socket to the ncp port */
-	unsigned int     time_out;	/* How long should I wait after
-					   sending a NCP request? */
-	unsigned int     retry_count;	/* And how often should I retry? */
-	unsigned char	 mounted_vol[NCP_VOLNAME_LEN + 1];
-	__kernel_uid32_t uid;
-	__kernel_gid32_t gid;
-	__kernel_mode_t  file_mode;
-	__kernel_mode_t  dir_mode;
-	int		 info_fd;
-};
-
-#endif /* __KERNEL__ */
-
 #endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0f6b1c9..d971346 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -520,9 +520,6 @@
 	 * please use this field instead of dev->trans_start
 	 */
 	unsigned long		trans_start;
-	u64			tx_bytes;
-	u64			tx_packets;
-	u64			tx_dropped;
 } ____cacheline_aligned_in_smp;
 
 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -2191,11 +2188,15 @@
 extern void		ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 				       void (*setup)(struct net_device *),
-				       unsigned int queue_count);
+				       unsigned int txqs, unsigned int rxqs);
 #define alloc_netdev(sizeof_priv, name, setup) \
-	alloc_netdev_mq(sizeof_priv, name, setup, 1)
+	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+
+#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
+	alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
+
 extern int		register_netdev(struct net_device *dev);
 extern void		unregister_netdev(struct net_device *dev);
 
@@ -2261,8 +2262,6 @@
 extern void		dev_mcast_init(void);
 extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 					       struct rtnl_link_stats64 *storage);
-extern void		dev_txq_stats_fold(const struct net_device *dev,
-					   struct rtnl_link_stats64 *stats);
 
 extern int		netdev_max_backlog;
 extern int		netdev_tstamp_prequeue;
@@ -2303,7 +2302,7 @@
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 					struct net_device *dev);
 
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
+int netif_skb_features(struct sk_buff *skb);
 
 static inline int net_gso_ok(int features, int gso_type)
 {
@@ -2317,16 +2316,10 @@
 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 }
 
-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+static inline int netif_needs_gso(struct sk_buff *skb, int features)
 {
-	if (skb_is_gso(skb)) {
-		int features = netif_get_vlan_features(skb, dev);
-
-		return (!skb_gso_ok(skb, features) ||
-			unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
-	}
-
-	return 0;
+	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
+		unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
 }
 
 static inline void netif_set_gso_max_size(struct net_device *dev,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 742bec0..6712e71 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -472,7 +472,7 @@
  *  necessary for reading the counters.
  */
 struct xt_info_lock {
-	spinlock_t lock;
+	seqlock_t lock;
 	unsigned char readers;
 };
 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@@ -497,7 +497,7 @@
 	local_bh_disable();
 	lock = &__get_cpu_var(xt_info_locks);
 	if (likely(!lock->readers++))
-		spin_lock(&lock->lock);
+		write_seqlock(&lock->lock);
 }
 
 static inline void xt_info_rdunlock_bh(void)
@@ -505,7 +505,7 @@
 	struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
 
 	if (likely(!--lock->readers))
-		spin_unlock(&lock->lock);
+		write_sequnlock(&lock->lock);
 	local_bh_enable();
 }
 
@@ -516,12 +516,12 @@
  */
 static inline void xt_info_wrlock(unsigned int cpu)
 {
-	spin_lock(&per_cpu(xt_info_locks, cpu).lock);
+	write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
 }
 
 static inline void xt_info_wrunlock(unsigned int cpu)
 {
-	spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
+	write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
 }
 
 /*
diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h
new file mode 100644
index 0000000..7ab8521
--- /dev/null
+++ b/include/linux/nfc/pn544.h
@@ -0,0 +1,97 @@
+/*
+ * Driver include for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PN544_H_
+#define _PN544_H_
+
+#include <linux/i2c.h>
+
+#define PN544_DRIVER_NAME	"pn544"
+#define PN544_MAXWINDOW_SIZE	7
+#define PN544_WINDOW_SIZE	4
+#define PN544_RETRIES		10
+#define PN544_MAX_I2C_TRANSFER	0x0400
+#define PN544_MSG_MAX_SIZE	0x21 /* at normal HCI mode */
+
+/* ioctl */
+#define PN544_CHAR_BASE		'P'
+#define PN544_IOR(num, dtype)	_IOR(PN544_CHAR_BASE, num, dtype)
+#define PN544_IOW(num, dtype)	_IOW(PN544_CHAR_BASE, num, dtype)
+#define PN544_GET_FW_MODE	PN544_IOW(1, unsigned int)
+#define PN544_SET_FW_MODE	PN544_IOW(2, unsigned int)
+#define PN544_GET_DEBUG		PN544_IOW(3, unsigned int)
+#define PN544_SET_DEBUG		PN544_IOW(4, unsigned int)
+
+/* Timing restrictions (ms) */
+#define PN544_RESETVEN_TIME	30 /* 7 */
+#define PN544_PVDDVEN_TIME	0
+#define PN544_VBATVEN_TIME	0
+#define PN544_GPIO4VEN_TIME	0
+#define PN544_WAKEUP_ACK	5
+#define PN544_WAKEUP_GUARD	(PN544_WAKEUP_ACK + 1)
+#define PN544_INACTIVITY_TIME	1000
+#define PN544_INTERFRAME_DELAY	200 /* us */
+#define PN544_BAUDRATE_CHANGE	150 /* us */
+
+/* Debug bits */
+#define PN544_DEBUG_BUF		0x01
+#define PN544_DEBUG_READ	0x02
+#define PN544_DEBUG_WRITE	0x04
+#define PN544_DEBUG_IRQ		0x08
+#define PN544_DEBUG_CALLS	0x10
+#define PN544_DEBUG_MODE	0x20
+
+/* Normal (HCI) mode */
+#define PN544_LLC_HCI_OVERHEAD	3 /* header + crc (to length) */
+#define PN544_LLC_MIN_SIZE	(1 + PN544_LLC_HCI_OVERHEAD) /* length + */
+#define PN544_LLC_MAX_DATA	(PN544_MSG_MAX_SIZE - 2)
+#define PN544_LLC_MAX_HCI_SIZE	(PN544_LLC_MAX_DATA - 2)
+
+struct pn544_llc_packet {
+	unsigned char length; /* of rest of packet */
+	unsigned char header;
+	unsigned char data[PN544_LLC_MAX_DATA]; /* includes crc-ccitt */
+};
+
+/* Firmware upgrade mode */
+#define PN544_FW_HEADER_SIZE	3
+/* max fw transfer is 1024bytes, but I2C limits it to 0xC0 */
+#define PN544_MAX_FW_DATA	(PN544_MAX_I2C_TRANSFER - PN544_FW_HEADER_SIZE)
+
+struct pn544_fw_packet {
+	unsigned char command; /* status in answer */
+	unsigned char length[2]; /* big-endian order (msf) */
+	unsigned char data[PN544_MAX_FW_DATA];
+};
+
+#ifdef __KERNEL__
+/* board config */
+struct pn544_nfc_platform_data {
+	int (*request_resources) (struct i2c_client *client);
+	void (*free_resources) (void);
+	void (*enable) (int fw);
+	int (*test) (void);
+	void (*disable) (void);
+};
+#endif /* __KERNEL__ */
+
+#endif /* _PN544_H_ */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index ac33806..6ccfe3b 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -11,6 +11,9 @@
 #define NFS3_MAXGROUPS		16
 #define NFS3_FHSIZE		64
 #define NFS3_COOKIESIZE		4
+#define NFS3_CREATEVERFSIZE	8
+#define NFS3_COOKIEVERFSIZE	8
+#define NFS3_WRITEVERFSIZE	8
 #define NFS3_FIFO_DEV		(-1)
 #define NFS3MODE_FMT		0170000
 #define NFS3MODE_DIR		0040000
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 4925b22..134716e 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -65,6 +65,9 @@
 
 #define NFS4_CDFC4_FORE	0x1
 #define NFS4_CDFC4_BACK 0x2
+#define NFS4_CDFC4_BOTH 0x3
+#define NFS4_CDFC4_FORE_OR_BOTH 0x3
+#define NFS4_CDFC4_BACK_OR_BOTH 0x7
 
 #define NFS4_SET_TO_SERVER_TIME	0
 #define NFS4_SET_TO_CLIENT_TIME	1
@@ -111,9 +114,13 @@
 
 #define EXCHGID4_FLAG_SUPP_MOVED_REFER		0x00000001
 #define EXCHGID4_FLAG_SUPP_MOVED_MIGR		0x00000002
+#define EXCHGID4_FLAG_BIND_PRINC_STATEID	0x00000100
+
 #define EXCHGID4_FLAG_USE_NON_PNFS		0x00010000
 #define EXCHGID4_FLAG_USE_PNFS_MDS		0x00020000
 #define EXCHGID4_FLAG_USE_PNFS_DS		0x00040000
+#define EXCHGID4_FLAG_MASK_PNFS			0x00070000
+
 #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A	0x40000000
 #define EXCHGID4_FLAG_CONFIRMED_R		0x80000000
 /*
@@ -121,8 +128,8 @@
  * they're set in the argument or response, have separate
  * invalid flag masks for arg (_A) and resp (_R).
  */
-#define EXCHGID4_FLAG_MASK_A			0x40070003
-#define EXCHGID4_FLAG_MASK_R			0x80070003
+#define EXCHGID4_FLAG_MASK_A			0x40070103
+#define EXCHGID4_FLAG_MASK_R			0x80070103
 
 #define SEQ4_STATUS_CB_PATH_DOWN		0x00000001
 #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING	0x00000002
@@ -136,6 +143,9 @@
 #define SEQ4_STATUS_CB_PATH_DOWN_SESSION	0x00000200
 #define SEQ4_STATUS_BACKCHANNEL_FAULT		0x00000400
 
+#define NFS4_SECINFO_STYLE4_CURRENT_FH	0
+#define NFS4_SECINFO_STYLE4_PARENT	1
+
 #define NFS4_MAX_UINT64	(~(u64)0)
 
 /* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
diff --git a/include/linux/nfs4_acl.h b/include/linux/nfs4_acl.h
deleted file mode 100644
index c9c05a7..0000000
--- a/include/linux/nfs4_acl.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *  include/linux/nfs4_acl.c
- *
- *  Common NFSv4 ACL handling definitions.
- *
- *  Copyright (c) 2002 The Regents of the University of Michigan.
- *  All rights reserved.
- *
- *  Marius Aamodt Eriksen <marius@umich.edu>
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- *  3. Neither the name of the University nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef LINUX_NFS4_ACL_H
-#define LINUX_NFS4_ACL_H
-
-#include <linux/posix_acl.h>
-
-/* Maximum ACL we'll accept from client; chosen (somewhat arbitrarily) to
- * fit in a page: */
-#define NFS4_ACL_MAX 170
-
-struct nfs4_acl *nfs4_acl_new(int);
-int nfs4_acl_get_whotype(char *, u32);
-int nfs4_acl_write_who(int who, char *p);
-int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
-		                        uid_t who, u32 mask);
-
-#define NFS4_ACL_TYPE_DEFAULT	0x01
-#define NFS4_ACL_DIR		0x02
-#define NFS4_ACL_OWNER		0x04
-
-struct nfs4_acl *nfs4_acl_posix_to_nfsv4(struct posix_acl *,
-				struct posix_acl *, unsigned int flags);
-int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *, struct posix_acl **,
-				struct posix_acl **, unsigned int flags);
-
-#endif /* LINUX_NFS4_ACL_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 29d504d..0779bb8 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -351,7 +351,7 @@
 extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
 extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
 extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int nfs_permission(struct inode *, int);
+extern int nfs_permission(struct inode *, int, unsigned int);
 extern int nfs_open(struct inode *, struct file *);
 extern int nfs_release(struct inode *, struct file *);
 extern int nfs_attribute_timeout(struct inode *inode);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 452d964..b197563 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -47,11 +47,6 @@
 	u64			cl_clientid;	/* constant */
 	unsigned long		cl_state;
 
-	struct rb_root		cl_openowner_id;
-	struct rb_root		cl_lockowner_id;
-
-	struct list_head	cl_delegations;
-	struct rb_root		cl_state_owners;
 	spinlock_t		cl_lock;
 
 	unsigned long		cl_lease_time;
@@ -71,6 +66,7 @@
 	 */
 	char			cl_ipaddr[48];
 	unsigned char		cl_id_uniquifier;
+	u32			cl_cb_ident;	/* v4.0 callback identifier */
 	const struct nfs4_minor_version_ops *cl_mvops;
 #endif /* CONFIG_NFS_V4 */
 
@@ -148,7 +144,14 @@
 						   that are supported on this
 						   filesystem */
 	struct pnfs_layoutdriver_type  *pnfs_curr_ld; /* Active layout driver */
+	struct rpc_wait_queue	roc_rpcwaitq;
+
+	/* the following fields are protected by nfs_client->cl_lock */
+	struct rb_root		state_owners;
+	struct rb_root		openowner_id;
+	struct rb_root		lockowner_id;
 #endif
+	struct list_head	delegations;
 	void (*destroy)(struct nfs_server *);
 
 	atomic_t active; /* Keep trace of any activity to this server */
@@ -196,6 +199,7 @@
 						 * op for dynamic resizing */
 	int		target_max_slots;	/* Set by CB_RECALL_SLOT as
 						 * the new max_slots */
+	struct completion complete;
 };
 
 static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
@@ -212,7 +216,6 @@
 	unsigned long			session_state;
 	u32				hash_alg;
 	u32				ssv_len;
-	struct completion		complete;
 
 	/* The fore and back channel */
 	struct nfs4_channel_attrs	fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 80f0719..b006857 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -208,6 +208,7 @@
 	struct inode *inode;
 	struct nfs_open_context *ctx;
 	struct nfs4_sequence_args seq_args;
+	nfs4_stateid stateid;
 };
 
 struct nfs4_layoutget_res {
@@ -223,7 +224,6 @@
 	struct nfs4_layoutget_args args;
 	struct nfs4_layoutget_res res;
 	struct pnfs_layout_segment **lsegpp;
-	int status;
 };
 
 struct nfs4_getdeviceinfo_args {
@@ -317,6 +317,7 @@
 struct nfs_lowner {
 	__u64			clientid;
 	__u64			id;
+	dev_t			s_dev;
 };
 
 struct nfs_lock_args {
@@ -484,6 +485,7 @@
 	struct nfs_fh *		fh;
 	struct nfs_fattr *	fattr;
 	unsigned char		d_type;
+	struct nfs_server *	server;
 };
 
 /*
@@ -1089,7 +1091,7 @@
 	int	(*pathconf) (struct nfs_server *, struct nfs_fh *,
 			     struct nfs_pathconf *);
 	int	(*set_capabilities)(struct nfs_server *, struct nfs_fh *);
-	__be32 *(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int plus);
+	int	(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
 	void	(*read_setup)   (struct nfs_read_data *, struct rpc_message *);
 	int	(*read_done)  (struct rpc_task *, struct nfs_read_data *);
 	void	(*write_setup)  (struct nfs_write_data *, struct rpc_message *);
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 8ae78a6..bd31615 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -35,7 +35,7 @@
 #define NFSEXP_NOHIDE		0x0200
 #define NFSEXP_NOSUBTREECHECK	0x0400
 #define	NFSEXP_NOAUTHNLM	0x0800		/* Don't authenticate NLM requests - just trust */
-#define NFSEXP_MSNFS		0x1000	/* do silly things that MS clients expect */
+#define NFSEXP_MSNFS		0x1000	/* do silly things that MS clients expect; no longer supported */
 #define NFSEXP_FSID		0x2000
 #define	NFSEXP_CROSSMOUNT	0x4000
 #define	NFSEXP_NOACL		0x8000	/* reserved for possible ACL related use */
diff --git a/include/linux/nfsd_idmap.h b/include/linux/nfsd_idmap.h
deleted file mode 100644
index d4a2ac1..0000000
--- a/include/linux/nfsd_idmap.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *  include/linux/nfsd_idmap.h
- *
- *  Mapping of UID to name and vice versa.
- *
- *  Copyright (c) 2002, 2003 The Regents of the University of
- *  Michigan.  All rights reserved.
-> *
- *  Marius Aamodt Eriksen <marius@umich.edu>
- *
- *  Redistribution and use in source and binary forms, with or without
- *  modification, are permitted provided that the following conditions
- *  are met:
- *
- *  1. Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *  2. Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in the
- *     documentation and/or other materials provided with the distribution.
- *  3. Neither the name of the University nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef LINUX_NFSD_IDMAP_H
-#define LINUX_NFSD_IDMAP_H
-
-#include <linux/in.h>
-#include <linux/sunrpc/svc.h>
-
-/* XXX from linux/nfs_idmap.h */
-#define IDMAP_NAMESZ 128
-
-#ifdef CONFIG_NFSD_V4
-int nfsd_idmap_init(void);
-void nfsd_idmap_shutdown(void);
-#else
-static inline int nfsd_idmap_init(void)
-{
-	return 0;
-}
-static inline void nfsd_idmap_shutdown(void)
-{
-}
-#endif
-
-int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *);
-int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *);
-int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *);
-int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *);
-
-#endif /* LINUX_NFSD_IDMAP_H */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2b89b71..821ffb9 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -148,6 +148,10 @@
  * @NL80211_CMD_SET_MPATH:  Set mesh path attributes for mesh path to
  * 	destination %NL80211_ATTR_MAC on the interface identified by
  * 	%NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_NEW_MPATH: Create a new mesh path for the destination given by
+ *	%NL80211_ATTR_MAC via %NL80211_ATTR_MPATH_NEXT_HOP.
+ * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by
+ *	%NL80211_ATTR_MAC.
  * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the
  *	the interface identified by %NL80211_ATTR_IFINDEX.
  * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
@@ -612,7 +616,7 @@
  *	consisting of a nested array.
  *
  * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes).
- * @NL80211_ATTR_PLINK_ACTION: action to perform on the mesh peer link.
+ * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link.
  * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path.
  * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path
  * 	info given for %NL80211_CMD_GET_MPATH, nested attribute described at
@@ -879,7 +883,9 @@
  *	See &enum nl80211_key_default_types.
  *
  * @NL80211_ATTR_MESH_SETUP: Optional mesh setup parameters.  These cannot be
- * changed once the mesh is active.
+ *	changed once the mesh is active.
+ * @NL80211_ATTR_MESH_CONFIG: Mesh configuration parameters, a nested attribute
+ *	containing attributes from &enum nl80211_meshconf_params.
  *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -1225,8 +1231,6 @@
  * @NL80211_STA_INFO_INACTIVE_TIME: time since last activity (u32, msecs)
  * @NL80211_STA_INFO_RX_BYTES: total received bytes (u32, from this station)
  * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
- * @__NL80211_STA_INFO_AFTER_LAST: internal
- * @NL80211_STA_INFO_MAX: highest possible station info attribute
  * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
  * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
  * 	containing info as possible, see &enum nl80211_sta_info_txrate.
@@ -1236,6 +1240,11 @@
  * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station)
  * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station)
  * @NL80211_STA_INFO_SIGNAL_AVG: signal strength average (u8, dBm)
+ * @NL80211_STA_INFO_LLID: the station's mesh LLID
+ * @NL80211_STA_INFO_PLID: the station's mesh PLID
+ * @NL80211_STA_INFO_PLINK_STATE: peer link state for the station
+ * @__NL80211_STA_INFO_AFTER_LAST: internal
+ * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
 enum nl80211_sta_info {
 	__NL80211_STA_INFO_INVALID,
@@ -1626,7 +1635,7 @@
  * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
  * that it takes for an HWMP information element to propagate across the mesh
  *
- * @NL80211_MESHCONF_ROOTMODE: whether root mode is enabled or not
+ * @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not
  *
  * @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a
  * source mesh point for path selection elements.
@@ -1678,6 +1687,7 @@
  * element that vendors will use to identify the path selection methods and
  * metrics in use.
  *
+ * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
  * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
  */
 enum nl80211_mesh_setup_params {
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 8aea06f..2feda6ee 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -3,7 +3,7 @@
 #include <linux/ioport.h>
 #include <linux/of.h>
 
-extern u64 of_translate_address(struct device_node *np, const u32 *addr);
+extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
 extern int of_address_to_resource(struct device_node *dev, int index,
 				  struct resource *r);
 extern void __iomem *of_iomap(struct device_node *device, int index);
@@ -21,7 +21,7 @@
 #endif
 
 #ifdef CONFIG_PCI
-extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no,
+extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
 			       u64 *size, unsigned int *flags);
 extern int of_pci_address_to_resource(struct device_node *dev, int bar,
 				      struct resource *r);
@@ -32,7 +32,7 @@
 	return -ENOSYS;
 }
 
-static inline const u32 *of_get_pci_address(struct device_node *dev,
+static inline const __be32 *of_get_pci_address(struct device_node *dev,
 		int bar_no, u64 *size, unsigned int *flags)
 {
 	return NULL;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 7bbf5b3..0ef22a1 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -58,6 +58,23 @@
 };
 
 #if defined(CONFIG_OF_FLATTREE)
+
+struct device_node;
+
+/* For scanning an arbitrary device-tree at any time */
+extern char *of_fdt_get_string(struct boot_param_header *blob, u32 offset);
+extern void *of_fdt_get_property(struct boot_param_header *blob,
+				 unsigned long node,
+				 const char *name,
+				 unsigned long *size);
+extern int of_fdt_is_compatible(struct boot_param_header *blob,
+				unsigned long node,
+				const char *compat);
+extern int of_fdt_match(struct boot_param_header *blob, unsigned long node,
+			const char **compat);
+extern void of_fdt_unflatten_tree(unsigned long *blob,
+			       struct device_node **mynodes);
+
 /* TBD: Temporary export of fdt globals - remove when code fully merged */
 extern int __initdata dt_root_addr_cells;
 extern int __initdata dt_root_size_cells;
@@ -71,6 +88,7 @@
 extern void *of_get_flat_dt_prop(unsigned long node, const char *name,
 				 unsigned long *size);
 extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
+extern int of_flat_dt_match(unsigned long node, const char **matches);
 extern unsigned long of_get_flat_dt_root(void);
 
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
new file mode 100644
index 0000000..e913081
--- /dev/null
+++ b/include/linux/of_net.h
@@ -0,0 +1,15 @@
+/*
+ * OF helpers for network devices.
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_OF_NET_H
+#define __LINUX_OF_NET_H
+
+#ifdef CONFIG_OF_NET
+#include <linux/of.h>
+extern const void *of_get_mac_address(struct device_node *np);
+#endif
+
+#endif /* __LINUX_OF_NET_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5f38c46..0db8037 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -48,9 +48,6 @@
  * struct page (these bits with information) are always mapped into kernel
  * address space...
  *
- * PG_buddy is set to indicate that the page is free and in the buddy system
- * (see mm/page_alloc.c).
- *
  * PG_hwpoison indicates that a page got corrupted in hardware and contains
  * data with incorrect ECC bits that triggered a machine check. Accessing is
  * not safe since it may cause another machine check. Don't touch!
@@ -96,7 +93,6 @@
 	PG_swapcache,		/* Swap page: swp_entry_t in private */
 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
 	PG_reclaim,		/* To be reclaimed asap */
-	PG_buddy,		/* Page is free, on buddy lists */
 	PG_swapbacked,		/* Page is backed by RAM/swap */
 	PG_unevictable,		/* Page is "unevictable"  */
 #ifdef CONFIG_MMU
@@ -108,6 +104,9 @@
 #ifdef CONFIG_MEMORY_FAILURE
 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	PG_compound_lock,
+#endif
 	__NR_PAGEFLAGS,
 
 	/* Filesystems */
@@ -198,7 +197,7 @@
 struct page;	/* forward declaration */
 
 TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
-PAGEFLAG(Error, error)
+PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
 PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
@@ -230,7 +229,6 @@
  * risky: they bypass page accounting.
  */
 TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
-__PAGEFLAG(Buddy, buddy)
 PAGEFLAG(MappedToDisk, mappedtodisk)
 
 /* PG_readahead is only used for file reads; PG_reclaim is only for writes */
@@ -344,7 +342,7 @@
  * tests can be used in performance sensitive paths. PageCompound is
  * generally not used in hot code paths.
  */
-__PAGEFLAG(Head, head)
+__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
 __PAGEFLAG(Tail, tail)
 
 static inline int PageCompound(struct page *page)
@@ -352,6 +350,13 @@
 	return page->flags & ((1L << PG_head) | (1L << PG_tail));
 
 }
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ClearPageCompound(struct page *page)
+{
+	BUG_ON(!PageHead(page));
+	ClearPageHead(page);
+}
+#endif
 #else
 /*
  * Reduce page flag use as much as possible by overlapping
@@ -389,14 +394,61 @@
 	page->flags &= ~PG_head_tail_mask;
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ClearPageCompound(struct page *page)
+{
+	BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
+	clear_bit(PG_compound, &page->flags);
+}
+#endif
+
 #endif /* !PAGEFLAGS_EXTENDED */
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * PageHuge() only returns true for hugetlbfs pages, but not for
+ * normal or transparent huge pages.
+ *
+ * PageTransHuge() returns true for both transparent huge and
+ * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
+ * called only in the core VM paths where hugetlbfs pages can't exist.
+ */
+static inline int PageTransHuge(struct page *page)
+{
+	VM_BUG_ON(PageTail(page));
+	return PageHead(page);
+}
+
+static inline int PageTransCompound(struct page *page)
+{
+	return PageCompound(page);
+}
+
+#else
+
+static inline int PageTransHuge(struct page *page)
+{
+	return 0;
+}
+
+static inline int PageTransCompound(struct page *page)
+{
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_MMU
 #define __PG_MLOCKED		(1 << PG_mlocked)
 #else
 #define __PG_MLOCKED		0
 #endif
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __PG_COMPOUND_LOCK		(1 << PG_compound_lock)
+#else
+#define __PG_COMPOUND_LOCK		0
+#endif
+
 /*
  * Flags checked when a page is freed.  Pages being freed should not have
  * these flags set.  It they are, there is a problem.
@@ -404,9 +456,10 @@
 #define PAGE_FLAGS_CHECK_AT_FREE \
 	(1 << PG_lru	 | 1 << PG_locked    | \
 	 1 << PG_private | 1 << PG_private_2 | \
-	 1 << PG_buddy	 | 1 << PG_writeback | 1 << PG_reserved | \
+	 1 << PG_writeback | 1 << PG_reserved | \
 	 1 << PG_slab	 | 1 << PG_swapcache | 1 << PG_active | \
-	 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON)
+	 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+	 __PG_COMPOUND_LOCK)
 
 /*
  * Flags checked when a page is prepped for return by the page allocator.
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index b02195d..6d6cb7a 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -35,12 +35,15 @@
 
 enum {
 	/* flags for mem_cgroup */
-	PCG_LOCK,  /* page cgroup is locked */
+	PCG_LOCK,  /* Lock for pc->mem_cgroup and following bits. */
 	PCG_CACHE, /* charged as cache */
 	PCG_USED, /* this object is in use. */
-	PCG_ACCT_LRU, /* page has been accounted for */
-	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
 	PCG_MIGRATION, /* under page migration */
+	/* flags for mem_cgroup and file and I/O status */
+	PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
+	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
+	/* No lock in page_cgroup */
+	PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
 };
 
 #define TESTPCGFLAG(uname, lname)			\
@@ -94,6 +97,10 @@
 
 static inline void lock_page_cgroup(struct page_cgroup *pc)
 {
+	/*
+	 * Don't take this lock in IRQ context.
+	 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
+	 */
 	bit_spin_lock(PCG_LOCK, &pc->flags);
 }
 
@@ -107,6 +114,24 @@
 	return bit_spin_is_locked(PCG_LOCK, &pc->flags);
 }
 
+static inline void move_lock_page_cgroup(struct page_cgroup *pc,
+	unsigned long *flags)
+{
+	/*
+	 * We know updates to pc->flags of page cache's stats are from both of
+	 * usual context or IRQ context. Disable IRQ to avoid deadlock.
+	 */
+	local_irq_save(*flags);
+	bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
+}
+
+static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
+	unsigned long *flags)
+{
+	bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
+	local_irq_restore(*flags);
+}
+
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
 struct page_cgroup;
 
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2d1ffe3..9c66e994 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -48,7 +48,7 @@
 
 static inline int mapping_unevictable(struct address_space *mapping)
 {
-	if (likely(mapping))
+	if (mapping)
 		return test_bit(AS_UNEVICTABLE, &mapping->flags);
 	return !!mapping;
 }
diff --git a/include/linux/path.h b/include/linux/path.h
index edc98de..a581e8c 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -10,7 +10,9 @@
 };
 
 extern void path_get(struct path *);
+extern void path_get_long(struct path *);
 extern void path_put(struct path *);
+extern void path_put_long(struct path *);
 
 static inline int path_equal(const struct path *path1, const struct path *path2)
 {
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index c8b6473..479d9bb 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -40,4 +40,10 @@
 { return NULL; }
 #endif
 
+#ifdef CONFIG_ACPI_APEI
+extern bool aer_acpi_firmware_first(void);
+#else
+static inline bool aer_acpi_firmware_first(void) { return false; }
+#endif
+
 #endif	/* _PCI_ACPI_H_ */
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
index 91ba0b3..ce68105 100644
--- a/include/linux/pci-aspm.h
+++ b/include/linux/pci-aspm.h
@@ -27,6 +27,7 @@
 extern void pcie_aspm_exit_link_state(struct pci_dev *pdev);
 extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
 extern void pci_disable_link_state(struct pci_dev *pdev, int state);
+extern void pcie_clear_aspm(void);
 extern void pcie_no_aspm(void);
 #else
 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
@@ -41,7 +42,9 @@
 static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
 {
 }
-
+static inline void pcie_clear_aspm(void)
+{
+}
 static inline void pcie_no_aspm(void)
 {
 }
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 7454408..559d028 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -806,7 +806,7 @@
 
 /* Power management related routines */
 int pci_save_state(struct pci_dev *dev);
-int pci_restore_state(struct pci_dev *dev);
+void pci_restore_state(struct pci_dev *dev);
 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
@@ -820,7 +820,6 @@
 int pci_back_from_sleep(struct pci_dev *dev);
 bool pci_dev_run_wake(struct pci_dev *dev);
 bool pci_check_pme_status(struct pci_dev *dev);
-void pci_wakeup_event(struct pci_dev *dev);
 void pci_pme_wakeup_bus(struct pci_bus *bus);
 
 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
@@ -994,6 +993,14 @@
 extern int pci_msi_enabled(void);
 #endif
 
+#ifdef CONFIG_PCIEPORTBUS
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+#else
+#define pcie_ports_disabled	true
+#define pcie_ports_auto		false
+#endif
+
 #ifndef CONFIG_PCIEASPM
 static inline int pcie_aspm_enabled(void)
 {
@@ -1003,6 +1010,14 @@
 extern int pcie_aspm_enabled(void);
 #endif
 
+#ifdef CONFIG_PCIEAER
+void pci_no_aer(void);
+bool pci_aer_available(void);
+#else
+static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
+#endif
+
 #ifndef CONFIG_PCIE_ECRC
 static inline void pcie_set_ecrc_checking(struct pci_dev *dev)
 {
@@ -1168,10 +1183,8 @@
 	return 0;
 }
 
-static inline int pci_restore_state(struct pci_dev *dev)
-{
-	return 0;
-}
+static inline void pci_restore_state(struct pci_dev *dev)
+{ }
 
 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cb845c1..3adb06e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -518,6 +518,7 @@
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC	0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK	0x1304
 #define PCI_DEVICE_ID_AMD_15H_NB_MISC	0x1603
+#define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
 #define PCI_DEVICE_ID_AMD_LANCE		0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
 #define PCI_DEVICE_ID_AMD_SCSI		0x2020
@@ -1650,6 +1651,11 @@
 #define PCI_DEVICE_ID_O2_6836		0x6836
 #define PCI_DEVICE_ID_O2_6812		0x6872
 #define PCI_DEVICE_ID_O2_6933		0x6933
+#define PCI_DEVICE_ID_O2_8120		0x8120
+#define PCI_DEVICE_ID_O2_8220		0x8220
+#define PCI_DEVICE_ID_O2_8221		0x8221
+#define PCI_DEVICE_ID_O2_8320		0x8320
+#define PCI_DEVICE_ID_O2_8321		0x8321
 
 #define PCI_VENDOR_ID_3DFX		0x121a
 #define PCI_DEVICE_ID_3DFX_VOODOO	0x0001
@@ -2363,6 +2369,10 @@
 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD	0x2381
 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS	0x2383
+#define PCI_DEVICE_ID_JMICRON_JMB385_MS	0x2388
+#define PCI_DEVICE_ID_JMICRON_JMB388_SD	0x2391
+#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392
+#define PCI_DEVICE_ID_JMICRON_JMB390_MS	0x2393
 
 #define PCI_VENDOR_ID_KORENIX		0x1982
 #define PCI_DEVICE_ID_KORENIX_JETCARDF0	0x1600
@@ -2468,7 +2478,8 @@
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN	0x1c41
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX	0x1c5f
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS	0x1d22
-#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC	0x1d40
+#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0	0x1d40
+#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1	0x1d41
 #define PCI_DEVICE_ID_INTEL_82801AA_0	0x2410
 #define PCI_DEVICE_ID_INTEL_82801AA_1	0x2411
 #define PCI_DEVICE_ID_INTEL_82801AA_3	0x2413
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index af83076..5b7e6b1 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -309,6 +309,14 @@
 #define PCI_MSIX_PBA		8
 #define  PCI_MSIX_FLAGS_BIRMASK	(7 << 0)
 
+/* MSI-X entry's format */
+#define PCI_MSIX_ENTRY_SIZE		16
+#define  PCI_MSIX_ENTRY_LOWER_ADDR	0
+#define  PCI_MSIX_ENTRY_UPPER_ADDR	4
+#define  PCI_MSIX_ENTRY_DATA		8
+#define  PCI_MSIX_ENTRY_VECTOR_CTRL	12
+#define   PCI_MSIX_ENTRY_CTRL_MASKBIT	1
+
 /* CompactPCI Hotswap Register */
 
 #define PCI_CHSWP_CSR		2	/* Control and Status Register */
@@ -496,6 +504,8 @@
 #define  PCI_EXP_RTCTL_CRSSVE	0x10	/* CRS Software Visibility Enable */
 #define PCI_EXP_RTCAP		30	/* Root Capabilities */
 #define PCI_EXP_RTSTA		32	/* Root Status */
+#define PCI_EXP_RTSTA_PME	0x10000 /* PME status */
+#define PCI_EXP_RTSTA_PENDING	0x20000 /* PME pending */
 #define PCI_EXP_DEVCAP2		36	/* Device Capabilities 2 */
 #define  PCI_EXP_DEVCAP2_ARI	0x20	/* Alternative Routing-ID */
 #define PCI_EXP_DEVCTL2		40	/* Device Control 2 */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5095b83..27c3c6f 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -240,6 +240,21 @@
 	pscr_ret__;							\
 })
 
+#define __pcpu_size_call_return2(stem, variable, ...)			\
+({									\
+	typeof(variable) pscr2_ret__;					\
+	__verify_pcpu_ptr(&(variable));					\
+	switch(sizeof(variable)) {					\
+	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
+	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
+	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
+	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
+	default:							\
+		__bad_size_call_parameter(); break;			\
+	}								\
+	pscr2_ret__;							\
+})
+
 #define __pcpu_size_call(stem, variable, ...)				\
 do {									\
 	__verify_pcpu_ptr(&(variable));					\
@@ -402,6 +417,89 @@
 # define this_cpu_xor(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
 #endif
 
+#define _this_cpu_generic_add_return(pcp, val)				\
+({									\
+	typeof(pcp) ret__;						\
+	preempt_disable();						\
+	__this_cpu_add(pcp, val);					\
+	ret__ = __this_cpu_read(pcp);					\
+	preempt_enable();						\
+	ret__;								\
+})
+
+#ifndef this_cpu_add_return
+# ifndef this_cpu_add_return_1
+#  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_2
+#  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_4
+#  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_8
+#  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val)
+# endif
+# define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val))
+#define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
+
+#define _this_cpu_generic_xchg(pcp, nval)				\
+({	typeof(pcp) ret__;						\
+	preempt_disable();						\
+	ret__ = __this_cpu_read(pcp);					\
+	__this_cpu_write(pcp, nval);					\
+	preempt_enable();						\
+	ret__;								\
+})
+
+#ifndef this_cpu_xchg
+# ifndef this_cpu_xchg_1
+#  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_2
+#  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_4
+#  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_8
+#  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
+# endif
+# define this_cpu_xchg(pcp, nval)	\
+	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\
+({	typeof(pcp) ret__;						\
+	preempt_disable();						\
+	ret__ = __this_cpu_read(pcp);					\
+	if (ret__ == (oval))						\
+		__this_cpu_write(pcp, nval);				\
+	preempt_enable();						\
+	ret__;								\
+})
+
+#ifndef this_cpu_cmpxchg
+# ifndef this_cpu_cmpxchg_1
+#  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_2
+#  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_4
+#  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_8
+#  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define this_cpu_cmpxchg(pcp, oval, nval)	\
+	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
 /*
  * Generic percpu operations that do not require preemption handling.
  * Either we do not care about races or the caller has the
@@ -529,11 +627,87 @@
 # define __this_cpu_xor(pcp, val)	__pcpu_size_call(__this_cpu_xor_, (pcp), (val))
 #endif
 
+#define __this_cpu_generic_add_return(pcp, val)				\
+({									\
+	__this_cpu_add(pcp, val);					\
+	__this_cpu_read(pcp);						\
+})
+
+#ifndef __this_cpu_add_return
+# ifndef __this_cpu_add_return_1
+#  define __this_cpu_add_return_1(pcp, val)	__this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_2
+#  define __this_cpu_add_return_2(pcp, val)	__this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_4
+#  define __this_cpu_add_return_4(pcp, val)	__this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_8
+#  define __this_cpu_add_return_8(pcp, val)	__this_cpu_generic_add_return(pcp, val)
+# endif
+# define __this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define __this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
+
+#define __this_cpu_generic_xchg(pcp, nval)				\
+({	typeof(pcp) ret__;						\
+	ret__ = __this_cpu_read(pcp);					\
+	__this_cpu_write(pcp, nval);					\
+	ret__;								\
+})
+
+#ifndef __this_cpu_xchg
+# ifndef __this_cpu_xchg_1
+#  define __this_cpu_xchg_1(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_2
+#  define __this_cpu_xchg_2(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_4
+#  define __this_cpu_xchg_4(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_8
+#  define __this_cpu_xchg_8(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
+# endif
+# define __this_cpu_xchg(pcp, nval)	\
+	__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define __this_cpu_generic_cmpxchg(pcp, oval, nval)			\
+({									\
+	typeof(pcp) ret__;						\
+	ret__ = __this_cpu_read(pcp);					\
+	if (ret__ == (oval))						\
+		__this_cpu_write(pcp, nval);				\
+	ret__;								\
+})
+
+#ifndef __this_cpu_cmpxchg
+# ifndef __this_cpu_cmpxchg_1
+#  define __this_cpu_cmpxchg_1(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_2
+#  define __this_cpu_cmpxchg_2(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_4
+#  define __this_cpu_cmpxchg_4(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_8
+#  define __this_cpu_cmpxchg_8(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define __this_cpu_cmpxchg(pcp, oval, nval)	\
+	__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
 /*
  * IRQ safe versions of the per cpu RMW operations. Note that these operations
  * are *not* safe against modification of the same variable from another
  * processors (which one gets when using regular atomic operations)
- . They are guaranteed to be atomic vs. local interrupts and
+ * They are guaranteed to be atomic vs. local interrupts and
  * preemption only.
  */
 #define irqsafe_cpu_generic_to_op(pcp, val, op)				\
@@ -620,4 +794,33 @@
 # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
 #endif
 
+#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)			\
+({									\
+	typeof(pcp) ret__;						\
+	unsigned long flags;						\
+	local_irq_save(flags);						\
+	ret__ = __this_cpu_read(pcp);					\
+	if (ret__ == (oval))						\
+		__this_cpu_write(pcp, nval);				\
+	local_irq_restore(flags);					\
+	ret__;								\
+})
+
+#ifndef irqsafe_cpu_cmpxchg
+# ifndef irqsafe_cpu_cmpxchg_1
+#  define irqsafe_cpu_cmpxchg_1(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_2
+#  define irqsafe_cpu_cmpxchg_2(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_4
+#  define irqsafe_cpu_cmpxchg_4(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_8
+#  define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define irqsafe_cpu_cmpxchg(pcp, oval, nval)		\
+	__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
 #endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index bb27d7e..77257c9 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -30,6 +30,7 @@
  *	struct pipe_inode_info - a linux kernel pipe
  *	@wait: reader/writer wait point in case of empty/full pipe
  *	@nrbufs: the number of non-empty pipe buffers in this pipe
+ *	@buffers: total number of buffers (should be a power of 2)
  *	@curbuf: the current pipe buffer entry
  *	@tmp_page: cached released page
  *	@readers: number of current readers of this pipe
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 40f3f45..dd9c7ab 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -367,45 +367,6 @@
 					{ .event = PM_EVENT_AUTO_RESUME, })
 
 /**
- * Device power management states
- *
- * These state labels are used internally by the PM core to indicate the current
- * status of a device with respect to the PM core operations.
- *
- * DPM_ON		Device is regarded as operational.  Set this way
- *			initially and when ->complete() is about to be called.
- *			Also set when ->prepare() fails.
- *
- * DPM_PREPARING	Device is going to be prepared for a PM transition.  Set
- *			when ->prepare() is about to be called.
- *
- * DPM_RESUMING		Device is going to be resumed.  Set when ->resume(),
- *			->thaw(), or ->restore() is about to be called.
- *
- * DPM_SUSPENDING	Device has been prepared for a power transition.  Set
- *			when ->prepare() has just succeeded.
- *
- * DPM_OFF		Device is regarded as inactive.  Set immediately after
- *			->suspend(), ->freeze(), or ->poweroff() has succeeded.
- *			Also set when ->resume()_noirq, ->thaw_noirq(), or
- *			->restore_noirq() is about to be called.
- *
- * DPM_OFF_IRQ		Device is in a "deep sleep".  Set immediately after
- *			->suspend_noirq(), ->freeze_noirq(), or
- *			->poweroff_noirq() has just succeeded.
- */
-
-enum dpm_state {
-	DPM_INVALID,
-	DPM_ON,
-	DPM_PREPARING,
-	DPM_RESUMING,
-	DPM_SUSPENDING,
-	DPM_OFF,
-	DPM_OFF_IRQ,
-};
-
-/**
  * Device run-time power management status.
  *
  * These status labels are used internally by the PM core to indicate the
@@ -463,8 +424,8 @@
 struct dev_pm_info {
 	pm_message_t		power_state;
 	unsigned int		can_wakeup:1;
-	unsigned		async_suspend:1;
-	enum dpm_state		status;		/* Owned by the PM core */
+	unsigned int		async_suspend:1;
+	unsigned int		in_suspend:1;	/* Owned by the PM core */
 	spinlock_t		lock;
 #ifdef CONFIG_PM_SLEEP
 	struct list_head	entry;
@@ -486,6 +447,7 @@
 	unsigned int		run_wake:1;
 	unsigned int		runtime_auto:1;
 	unsigned int		no_callbacks:1;
+	unsigned int		irq_safe:1;
 	unsigned int		use_autosuspend:1;
 	unsigned int		timer_autosuspends:1;
 	enum rpm_request	request;
@@ -610,4 +572,11 @@
 #define PM_APM	1
 #define PM_ACPI	2
 
+extern int pm_generic_suspend(struct device *dev);
+extern int pm_generic_resume(struct device *dev);
+extern int pm_generic_freeze(struct device *dev);
+extern int pm_generic_thaw(struct device *dev);
+extern int pm_generic_restore(struct device *dev);
+extern int pm_generic_poweroff(struct device *dev);
+
 #endif /* _LINUX_PM_H */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d19f1cc..d34f067 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -40,6 +40,7 @@
 extern int pm_generic_runtime_suspend(struct device *dev);
 extern int pm_generic_runtime_resume(struct device *dev);
 extern void pm_runtime_no_callbacks(struct device *dev);
+extern void pm_runtime_irq_safe(struct device *dev);
 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
 extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
@@ -81,6 +82,11 @@
 		&& !dev->power.disable_depth;
 }
 
+static inline bool pm_runtime_enabled(struct device *dev)
+{
+	return !dev->power.disable_depth;
+}
+
 static inline void pm_runtime_mark_last_busy(struct device *dev)
 {
 	ACCESS_ONCE(dev->power.last_busy) = jiffies;
@@ -119,11 +125,13 @@
 static inline bool device_run_wake(struct device *dev) { return false; }
 static inline void device_set_run_wake(struct device *dev, bool enable) {}
 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
+static inline bool pm_runtime_enabled(struct device *dev) { return false; }
 
 static inline int pm_generic_runtime_idle(struct device *dev) { return 0; }
 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
 static inline void pm_runtime_no_callbacks(struct device *dev) {}
+static inline void pm_runtime_irq_safe(struct device *dev) {}
 
 static inline void pm_runtime_mark_last_busy(struct device *dev) {}
 static inline void __pm_runtime_use_autosuspend(struct device *dev,
@@ -196,6 +204,11 @@
 	return __pm_runtime_idle(dev, RPM_GET_PUT);
 }
 
+static inline int pm_runtime_put_sync_suspend(struct device *dev)
+{
+	return __pm_runtime_suspend(dev, RPM_GET_PUT);
+}
+
 static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
 {
 	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 56e76af..1a2ccd6 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -57,7 +57,7 @@
 };
 
 /*
- * Structures and helpers for sys_poll/sys_poll
+ * Structures and helpers for select/poll syscall
  */
 struct poll_wqueues {
 	poll_table pt;
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 6760816..d68283a 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -108,6 +108,25 @@
 	return acl;
 }
 
+static inline int negative_cached_acl(struct inode *inode, int type)
+{
+	struct posix_acl **p, *acl;
+	switch (type) {
+	case ACL_TYPE_ACCESS:
+		p = &inode->i_acl;
+		break;
+	case ACL_TYPE_DEFAULT:
+		p = &inode->i_default_acl;
+		break;
+	default:
+		BUG();
+	}
+	acl = ACCESS_ONCE(*p);
+	if (acl)
+		return 0;
+	return 1;
+}
+
 static inline void set_cached_acl(struct inode *inode,
 				  int type,
 				  struct posix_acl *acl)
diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h
new file mode 100644
index 0000000..de1dfe0
--- /dev/null
+++ b/include/linux/power/gpio-charger.h
@@ -0,0 +1,41 @@
+/*
+ *  Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_POWER_GPIO_CHARGER_H__
+#define __LINUX_POWER_GPIO_CHARGER_H__
+
+#include <linux/power_supply.h>
+#include <linux/types.h>
+
+/**
+ * struct gpio_charger_platform_data - platform_data for gpio_charger devices
+ * @name:		Name for the chargers power_supply device
+ * @type:		Type of the charger
+ * @gpio:		GPIO which is used to indicate the chargers status
+ * @gpio_active_low:	Should be set to 1 if the GPIO is active low otherwise 0
+ * @supplied_to:	Array of battery names to which this chargers supplies power
+ * @num_supplicants:	Number of entries in the supplied_to array
+ */
+struct gpio_charger_platform_data {
+	const char *name;
+	enum power_supply_type type;
+
+	int gpio;
+	int gpio_active_low;
+
+	char **supplied_to;
+	size_t num_supplicants;
+};
+
+#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
new file mode 100644
index 0000000..7995deb
--- /dev/null
+++ b/include/linux/power/max17042_battery.h
@@ -0,0 +1,30 @@
+/*
+ * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ *  Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __MAX17042_BATTERY_H_
+#define __MAX17042_BATTERY_H_
+
+struct max17042_platform_data {
+	bool enable_current_sense;
+};
+
+#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/pps.h b/include/linux/pps.h
index 0194ab0..a9bb1d9 100644
--- a/include/linux/pps.h
+++ b/include/linux/pps.h
@@ -114,11 +114,18 @@
 	struct pps_ktime timeout;
 };
 
+struct pps_bind_args {
+	int tsformat;	/* format of time stamps */
+	int edge;	/* selected event type */
+	int consumer;	/* selected kernel consumer */
+};
+
 #include <linux/ioctl.h>
 
 #define PPS_GETPARAMS		_IOR('p', 0xa1, struct pps_kparams *)
 #define PPS_SETPARAMS		_IOW('p', 0xa2, struct pps_kparams *)
 #define PPS_GETCAP		_IOR('p', 0xa3, int *)
 #define PPS_FETCH		_IOWR('p', 0xa4, struct pps_fdata *)
+#define PPS_KC_BIND		_IOW('p', 0xa5, struct pps_bind_args *)
 
 #endif /* _PPS_H_ */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index e0a193f..9404854 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -18,6 +18,9 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#ifndef LINUX_PPS_KERNEL_H
+#define LINUX_PPS_KERNEL_H
+
 #include <linux/pps.h>
 
 #include <linux/cdev.h>
@@ -28,18 +31,28 @@
  * Global defines
  */
 
+struct pps_device;
+
 /* The specific PPS source info */
 struct pps_source_info {
 	char name[PPS_MAX_NAME_LEN];		/* simbolic name */
 	char path[PPS_MAX_NAME_LEN];		/* path of connected device */
 	int mode;				/* PPS's allowed mode */
 
-	void (*echo)(int source, int event, void *data); /* PPS echo function */
+	void (*echo)(struct pps_device *pps,
+			int event, void *data);	/* PPS echo function */
 
 	struct module *owner;
 	struct device *dev;
 };
 
+struct pps_event_time {
+#ifdef CONFIG_NTP_PPS
+	struct timespec ts_raw;
+#endif /* CONFIG_NTP_PPS */
+	struct timespec ts_real;
+};
+
 /* The main struct */
 struct pps_device {
 	struct pps_source_info info;		/* PSS source info */
@@ -52,38 +65,56 @@
 	struct pps_ktime clear_tu;
 	int current_mode;			/* PPS mode at event time */
 
-	int go;					/* PPS event is arrived? */
+	unsigned int last_ev;			/* last PPS event id */
 	wait_queue_head_t queue;		/* PPS event queue */
 
 	unsigned int id;			/* PPS source unique ID */
 	struct cdev cdev;
 	struct device *dev;
-	int devno;
 	struct fasync_struct *async_queue;	/* fasync method */
 	spinlock_t lock;
-
-	atomic_t usage;				/* usage count */
 };
 
 /*
  * Global variables
  */
 
-extern spinlock_t pps_idr_lock;
-extern struct idr pps_idr;
-extern struct timespec pps_irq_ts[];
-
 extern struct device_attribute pps_attrs[];
 
 /*
  * Exported functions
  */
 
-struct pps_device *pps_get_source(int source);
-extern void pps_put_source(struct pps_device *pps);
-extern int pps_register_source(struct pps_source_info *info,
-				int default_params);
-extern void pps_unregister_source(int source);
+extern struct pps_device *pps_register_source(
+		struct pps_source_info *info, int default_params);
+extern void pps_unregister_source(struct pps_device *pps);
 extern int pps_register_cdev(struct pps_device *pps);
 extern void pps_unregister_cdev(struct pps_device *pps);
-extern void pps_event(int source, struct pps_ktime *ts, int event, void *data);
+extern void pps_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event, void *data);
+
+static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
+		struct timespec ts)
+{
+	kt->sec = ts.tv_sec;
+	kt->nsec = ts.tv_nsec;
+}
+
+#ifdef CONFIG_NTP_PPS
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+	getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real);
+}
+
+#else /* CONFIG_NTP_PPS */
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+	getnstimeofday(&ts->ts_real);
+}
+
+#endif /* CONFIG_NTP_PPS */
+
+#endif /* LINUX_PPS_KERNEL_H */
+
diff --git a/include/linux/printk.h b/include/linux/printk.h
index b772ca5..ee048e7 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -4,14 +4,14 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
-#define	KERN_EMERG	"<0>"	/* system is unusable			*/
-#define	KERN_ALERT	"<1>"	/* action must be taken immediately	*/
-#define	KERN_CRIT	"<2>"	/* critical conditions			*/
-#define	KERN_ERR	"<3>"	/* error conditions			*/
-#define	KERN_WARNING	"<4>"	/* warning conditions			*/
-#define	KERN_NOTICE	"<5>"	/* normal but significant condition	*/
-#define	KERN_INFO	"<6>"	/* informational			*/
-#define	KERN_DEBUG	"<7>"	/* debug-level messages			*/
+#define KERN_EMERG	"<0>"	/* system is unusable			*/
+#define KERN_ALERT	"<1>"	/* action must be taken immediately	*/
+#define KERN_CRIT	"<2>"	/* critical conditions			*/
+#define KERN_ERR	"<3>"	/* error conditions			*/
+#define KERN_WARNING	"<4>"	/* warning conditions			*/
+#define KERN_NOTICE	"<5>"	/* normal but significant condition	*/
+#define KERN_INFO	"<6>"	/* informational			*/
+#define KERN_DEBUG	"<7>"	/* debug-level messages			*/
 
 /* Use the default kernel loglevel */
 #define KERN_DEFAULT	"<d>"
@@ -20,7 +20,7 @@
  * line that had no enclosing \n). Only to be used by core/arch code
  * during early bootup (a continued line is not SMP-safe otherwise).
  */
-#define	KERN_CONT	"<c>"
+#define KERN_CONT	"<c>"
 
 extern int console_printk[];
 
@@ -29,6 +29,17 @@
 #define minimum_console_loglevel (console_printk[2])
 #define default_console_loglevel (console_printk[3])
 
+static inline void console_silent(void)
+{
+	console_loglevel = 0;
+}
+
+static inline void console_verbose(void)
+{
+	if (console_loglevel)
+		console_loglevel = 15;
+}
+
 struct va_format {
 	const char *fmt;
 	va_list *va;
@@ -65,11 +76,27 @@
  */
 #define HW_ERR		"[Hardware Error]: "
 
+/*
+ * Dummy printk for disabled debugging statements to use whilst maintaining
+ * gcc's format and side-effect checking.
+ */
+static inline __attribute__ ((format (printf, 1, 2)))
+int no_printk(const char *fmt, ...)
+{
+	return 0;
+}
+
+extern asmlinkage __attribute__ ((format (printf, 1, 2)))
+void early_printk(const char *fmt, ...);
+
+extern int printk_needs_cpu(int cpu);
+extern void printk_tick(void);
+
 #ifdef CONFIG_PRINTK
-asmlinkage int vprintk(const char *fmt, va_list args)
-	__attribute__ ((format (printf, 1, 0)));
-asmlinkage int printk(const char * fmt, ...)
-	__attribute__ ((format (printf, 1, 2))) __cold;
+asmlinkage __attribute__ ((format (printf, 1, 0)))
+int vprintk(const char *fmt, va_list args);
+asmlinkage __attribute__ ((format (printf, 1, 2))) __cold
+int printk(const char *fmt, ...);
 
 /*
  * Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -83,99 +110,56 @@
 
 extern int printk_delay_msec;
 extern int dmesg_restrict;
-
-/*
- * Print a one-time message (analogous to WARN_ONCE() et al):
- */
-#define printk_once(x...) ({			\
-	static bool __print_once;		\
-						\
-	if (!__print_once) {			\
-		__print_once = true;		\
-		printk(x);			\
-	}					\
-})
+extern int kptr_restrict;
 
 void log_buf_kexec_setup(void);
 #else
-static inline int vprintk(const char *s, va_list args)
-	__attribute__ ((format (printf, 1, 0)));
-static inline int vprintk(const char *s, va_list args) { return 0; }
-static inline int printk(const char *s, ...)
-	__attribute__ ((format (printf, 1, 2)));
-static inline int __cold printk(const char *s, ...) { return 0; }
-static inline int printk_ratelimit(void) { return 0; }
-static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
-					  unsigned int interval_msec)	\
-		{ return false; }
-
-/* No effect, but we still get type checking even in the !PRINTK case: */
-#define printk_once(x...) printk(x)
+static inline __attribute__ ((format (printf, 1, 0)))
+int vprintk(const char *s, va_list args)
+{
+	return 0;
+}
+static inline __attribute__ ((format (printf, 1, 2))) __cold
+int printk(const char *s, ...)
+{
+	return 0;
+}
+static inline int printk_ratelimit(void)
+{
+	return 0;
+}
+static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+					  unsigned int interval_msec)
+{
+	return false;
+}
 
 static inline void log_buf_kexec_setup(void)
 {
 }
 #endif
 
-/*
- * Dummy printk for disabled debugging statements to use whilst maintaining
- * gcc's format and side-effect checking.
- */
-static inline __attribute__ ((format (printf, 1, 2)))
-int no_printk(const char *s, ...) { return 0; }
-
-extern int printk_needs_cpu(int cpu);
-extern void printk_tick(void);
-
-extern void asmlinkage __attribute__((format(printf, 1, 2)))
-	early_printk(const char *fmt, ...);
-
-static inline void console_silent(void)
-{
-	console_loglevel = 0;
-}
-
-static inline void console_verbose(void)
-{
-	if (console_loglevel)
-		console_loglevel = 15;
-}
-
 extern void dump_stack(void) __cold;
 
-enum {
-	DUMP_PREFIX_NONE,
-	DUMP_PREFIX_ADDRESS,
-	DUMP_PREFIX_OFFSET
-};
-extern void hex_dump_to_buffer(const void *buf, size_t len,
-				int rowsize, int groupsize,
-				char *linebuf, size_t linebuflen, bool ascii);
-extern void print_hex_dump(const char *level, const char *prefix_str,
-				int prefix_type, int rowsize, int groupsize,
-				const void *buf, size_t len, bool ascii);
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
-			const void *buf, size_t len);
-
 #ifndef pr_fmt
 #define pr_fmt(fmt) fmt
 #endif
 
 #define pr_emerg(fmt, ...) \
-        printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_alert(fmt, ...) \
-        printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_crit(fmt, ...) \
-        printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_err(fmt, ...) \
-        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warning(fmt, ...) \
-        printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warn pr_warning
 #define pr_notice(fmt, ...) \
-        printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info(fmt, ...) \
-        printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_cont(fmt, ...) \
 	printk(KERN_CONT fmt, ##__VA_ARGS__)
 
@@ -185,7 +169,7 @@
 	printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #else
 #define pr_devel(fmt, ...) \
-	({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #endif
 
 /* If you are writing a driver, please use dev_dbg instead */
@@ -198,7 +182,51 @@
 	dynamic_pr_debug(fmt, ##__VA_ARGS__)
 #else
 #define pr_debug(fmt, ...) \
-	({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+
+#ifdef CONFIG_PRINTK
+#define printk_once(fmt, ...)			\
+({						\
+	static bool __print_once;		\
+						\
+	if (!__print_once) {			\
+		__print_once = true;		\
+		printk(fmt, ##__VA_ARGS__);	\
+	}					\
+})
+#else
+#define printk_once(fmt, ...)			\
+	no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+#define pr_emerg_once(fmt, ...)					\
+	printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_once(fmt, ...)					\
+	printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_once(fmt, ...)					\
+	printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_once(fmt, ...)					\
+	printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn_once(fmt, ...)					\
+	printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_once(fmt, ...)				\
+	printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_once(fmt, ...)					\
+	printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont_once(fmt, ...)					\
+	printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__)
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_once(fmt, ...)					\
+	printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_once(fmt, ...)					\
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #endif
 
 /*
@@ -206,7 +234,8 @@
  * no local ratelimit_state used in the !PRINTK case
  */
 #ifdef CONFIG_PRINTK
-#define printk_ratelimited(fmt, ...)  ({				\
+#define printk_ratelimited(fmt, ...)					\
+({									\
 	static DEFINE_RATELIMIT_STATE(_rs,				\
 				      DEFAULT_RATELIMIT_INTERVAL,	\
 				      DEFAULT_RATELIMIT_BURST);		\
@@ -215,34 +244,59 @@
 		printk(fmt, ##__VA_ARGS__);				\
 })
 #else
-/* No effect, but we still get type checking even in the !PRINTK case: */
-#define printk_ratelimited printk
+#define printk_ratelimited(fmt, ...)					\
+	no_printk(fmt, ##__VA_ARGS__)
 #endif
 
-#define pr_emerg_ratelimited(fmt, ...) \
+#define pr_emerg_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert_ratelimited(fmt, ...) \
+#define pr_alert_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit_ratelimited(fmt, ...) \
+#define pr_crit_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err_ratelimited(fmt, ...) \
+#define pr_err_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning_ratelimited(fmt, ...) \
+#define pr_warn_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn_ratelimited pr_warning_ratelimited
-#define pr_notice_ratelimited(fmt, ...) \
+#define pr_notice_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info_ratelimited(fmt, ...) \
+#define pr_info_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 /* no pr_cont_ratelimited, don't do that... */
 /* If you are writing a driver, please use dev_dbg instead */
 #if defined(DEBUG)
-#define pr_debug_ratelimited(fmt, ...) \
+#define pr_debug_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #else
 #define pr_debug_ratelimited(fmt, ...) \
-	({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
-				     ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+enum {
+	DUMP_PREFIX_NONE,
+	DUMP_PREFIX_ADDRESS,
+	DUMP_PREFIX_OFFSET
+};
+extern void hex_dump_to_buffer(const void *buf, size_t len,
+			       int rowsize, int groupsize,
+			       char *linebuf, size_t linebuflen, bool ascii);
+#ifdef CONFIG_PRINTK
+extern void print_hex_dump(const char *level, const char *prefix_str,
+			   int prefix_type, int rowsize, int groupsize,
+			   const void *buf, size_t len, bool ascii);
+extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+				 const void *buf, size_t len);
+#else
+static inline void print_hex_dump(const char *level, const char *prefix_str,
+				  int prefix_type, int rowsize, int groupsize,
+				  const void *buf, size_t len, bool ascii)
+{
+}
+static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+					const void *buf, size_t len)
+{
+}
+
 #endif
 
 #endif
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
new file mode 100644
index 0000000..2f691e4
--- /dev/null
+++ b/include/linux/pxa2xx_ssp.h
@@ -0,0 +1,209 @@
+/*
+ *  pxa2xx_ssp.h
+ *
+ *  Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver supports the following PXA CPU/SSP ports:-
+ *
+ *       PXA250     SSP
+ *       PXA255     SSP, NSSP
+ *       PXA26x     SSP, NSSP, ASSP
+ *       PXA27x     SSP1, SSP2, SSP3
+ *       PXA3xx     SSP1, SSP2, SSP3, SSP4
+ */
+
+#ifndef __LINUX_SSP_H
+#define __LINUX_SSP_H
+
+#include <linux/list.h>
+#include <linux/io.h>
+
+/*
+ * SSP Serial Port Registers
+ * PXA250, PXA255, PXA26x and PXA27x SSP controllers are all slightly different.
+ * PXA255, PXA26x and PXA27x have extra ports, registers and bits.
+ */
+
+#define SSCR0		(0x00)  /* SSP Control Register 0 */
+#define SSCR1		(0x04)  /* SSP Control Register 1 */
+#define SSSR		(0x08)  /* SSP Status Register */
+#define SSITR		(0x0C)  /* SSP Interrupt Test Register */
+#define SSDR		(0x10)  /* SSP Data Write/Data Read Register */
+
+#define SSTO		(0x28)  /* SSP Time Out Register */
+#define SSPSP		(0x2C)  /* SSP Programmable Serial Protocol */
+#define SSTSA		(0x30)  /* SSP Tx Timeslot Active */
+#define SSRSA		(0x34)  /* SSP Rx Timeslot Active */
+#define SSTSS		(0x38)  /* SSP Timeslot Status */
+#define SSACD		(0x3C)  /* SSP Audio Clock Divider */
+#define SSACDD		(0x40)	/* SSP Audio Clock Dither Divider */
+
+/* Common PXA2xx bits first */
+#define SSCR0_DSS	(0x0000000f)	/* Data Size Select (mask) */
+#define SSCR0_DataSize(x)  ((x) - 1)	/* Data Size Select [4..16] */
+#define SSCR0_FRF	(0x00000030)	/* FRame Format (mask) */
+#define SSCR0_Motorola	(0x0 << 4)	/* Motorola's Serial Peripheral Interface (SPI) */
+#define SSCR0_TI	(0x1 << 4)	/* Texas Instruments' Synchronous Serial Protocol (SSP) */
+#define SSCR0_National	(0x2 << 4)	/* National Microwire */
+#define SSCR0_ECS	(1 << 6)	/* External clock select */
+#define SSCR0_SSE	(1 << 7)	/* Synchronous Serial Port Enable */
+#define SSCR0_SCR(x)	((x) << 8)	/* Serial Clock Rate (mask) */
+
+/* PXA27x, PXA3xx */
+#define SSCR0_EDSS	(1 << 20)	/* Extended data size select */
+#define SSCR0_NCS	(1 << 21)	/* Network clock select */
+#define SSCR0_RIM	(1 << 22)	/* Receive FIFO overrrun interrupt mask */
+#define SSCR0_TUM	(1 << 23)	/* Transmit FIFO underrun interrupt mask */
+#define SSCR0_FRDC	(0x07000000)	/* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24)	/* Time slots per frame [1..8] */
+#define SSCR0_FPCKE	(1 << 29)	/* FIFO packing enable */
+#define SSCR0_ACS	(1 << 30)	/* Audio clock select */
+#define SSCR0_MOD	(1 << 31)	/* Mode (normal or network) */
+
+
+#define SSCR1_RIE	(1 << 0)	/* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE	(1 << 1)	/* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM	(1 << 2)	/* Loop-Back Mode */
+#define SSCR1_SPO	(1 << 3)	/* Motorola SPI SSPSCLK polarity setting */
+#define SSCR1_SPH	(1 << 4)	/* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS	(1 << 5)	/* Microwire Transmit Data Size */
+
+#define SSSR_ALT_FRM_MASK	3	/* Masks the SFRM signal number */
+#define SSSR_TNF	(1 << 2)	/* Transmit FIFO Not Full */
+#define SSSR_RNE	(1 << 3)	/* Receive FIFO Not Empty */
+#define SSSR_BSY	(1 << 4)	/* SSP Busy */
+#define SSSR_TFS	(1 << 5)	/* Transmit FIFO Service Request */
+#define SSSR_RFS	(1 << 6)	/* Receive FIFO Service Request */
+#define SSSR_ROR	(1 << 7)	/* Receive FIFO Overrun */
+
+#ifdef CONFIG_ARCH_PXA
+#define RX_THRESH_DFLT	8
+#define TX_THRESH_DFLT	8
+
+#define SSSR_TFL_MASK	(0xf << 8)	/* Transmit FIFO Level mask */
+#define SSSR_RFL_MASK	(0xf << 12)	/* Receive FIFO Level mask */
+
+#define SSCR1_TFT	(0x000003c0)	/* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT	(0x00003c00)	/* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#else
+
+#define RX_THRESH_DFLT	2
+#define TX_THRESH_DFLT	2
+
+#define SSSR_TFL_MASK	(0x3 << 8)	/* Transmit FIFO Level mask */
+#define SSSR_RFL_MASK	(0x3 << 12)	/* Receive FIFO Level mask */
+
+#define SSCR1_TFT	(0x000000c0)	/* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
+#define SSCR1_RFT	(0x00000c00)	/* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+#endif
+
+/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+#define SSCR0_TISSP		(1 << 4)	/* TI Sync Serial Protocol */
+#define SSCR0_PSP		(3 << 4)	/* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP		(1 << 31)	/* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE		(1 << 30)	/* TXD Tristate Enable */
+#define SSCR1_EBCEI		(1 << 29)	/* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR		(1 << 28)	/* Slave Clock free Running */
+#define SSCR1_ECRA		(1 << 27)	/* Enable Clock Request A */
+#define SSCR1_ECRB		(1 << 26)	/* Enable Clock request B */
+#define SSCR1_SCLKDIR		(1 << 25)	/* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR		(1 << 24)	/* Frame Direction */
+#define SSCR1_RWOT		(1 << 23)	/* Receive Without Transmit */
+#define SSCR1_TRAIL		(1 << 22)	/* Trailing Byte */
+#define SSCR1_TSRE		(1 << 21)	/* Transmit Service Request Enable */
+#define SSCR1_RSRE		(1 << 20)	/* Receive Service Request Enable */
+#define SSCR1_TINTE		(1 << 19)	/* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE		(1 << 18)	/* Peripheral Trailing Byte Interupt Enable */
+#define SSCR1_IFS		(1 << 16)	/* Invert Frame Signal */
+#define SSCR1_STRF		(1 << 15)	/* Select FIFO or EFWR */
+#define SSCR1_EFWR		(1 << 14)	/* Enable FIFO Write/Read */
+
+#define SSSR_BCE		(1 << 23)	/* Bit Count Error */
+#define SSSR_CSS		(1 << 22)	/* Clock Synchronisation Status */
+#define SSSR_TUR		(1 << 21)	/* Transmit FIFO Under Run */
+#define SSSR_EOC		(1 << 20)	/* End Of Chain */
+#define SSSR_TINT		(1 << 19)	/* Receiver Time-out Interrupt */
+#define SSSR_PINT		(1 << 18)	/* Peripheral Trailing Byte Interrupt */
+
+
+#define SSPSP_SCMODE(x)		((x) << 0)	/* Serial Bit Rate Clock Mode */
+#define SSPSP_SFRMP		(1 << 2)	/* Serial Frame Polarity */
+#define SSPSP_ETDS		(1 << 3)	/* End of Transfer data State */
+#define SSPSP_STRTDLY(x)	((x) << 4)	/* Start Delay */
+#define SSPSP_DMYSTRT(x)	((x) << 7)	/* Dummy Start */
+#define SSPSP_SFRMDLY(x)	((x) << 9)	/* Serial Frame Delay */
+#define SSPSP_SFRMWDTH(x)	((x) << 16)	/* Serial Frame Width */
+#define SSPSP_DMYSTOP(x)	((x) << 23)	/* Dummy Stop */
+#define SSPSP_FSRT		(1 << 25)	/* Frame Sync Relative Timing */
+
+/* PXA3xx */
+#define SSPSP_EDMYSTRT(x)	((x) << 26)     /* Extended Dummy Start */
+#define SSPSP_EDMYSTOP(x)	((x) << 28)     /* Extended Dummy Stop */
+#define SSPSP_TIMING_MASK	(0x7f8001f0)
+
+#define SSACD_SCDB		(1 << 3)	/* SSPSYSCLK Divider Bypass */
+#define SSACD_ACPS(x)		((x) << 4)	/* Audio clock PLL select */
+#define SSACD_ACDS(x)		((x) << 0)	/* Audio clock divider select */
+#define SSACD_SCDX8		(1 << 7)	/* SYSCLK division ratio select */
+
+enum pxa_ssp_type {
+	SSP_UNDEFINED = 0,
+	PXA25x_SSP,  /* pxa 210, 250, 255, 26x */
+	PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
+	PXA27x_SSP,
+	PXA168_SSP,
+	CE4100_SSP,
+};
+
+struct ssp_device {
+	struct platform_device *pdev;
+	struct list_head	node;
+
+	struct clk	*clk;
+	void __iomem	*mmio_base;
+	unsigned long	phys_base;
+
+	const char	*label;
+	int		port_id;
+	int		type;
+	int		use_count;
+	int		irq;
+	int		drcmr_rx;
+	int		drcmr_tx;
+};
+
+/**
+ * pxa_ssp_write_reg - Write to a SSP register
+ *
+ * @dev: SSP device to access
+ * @reg: Register to write to
+ * @val: Value to be written.
+ */
+static inline void pxa_ssp_write_reg(struct ssp_device *dev, u32 reg, u32 val)
+{
+	__raw_writel(val, dev->mmio_base + reg);
+}
+
+/**
+ * pxa_ssp_read_reg - Read from a SSP register
+ *
+ * @dev: SSP device to access
+ * @reg: Register to read from
+ */
+static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
+{
+	return __raw_readl(dev->mmio_base + reg);
+}
+
+struct ssp_device *pxa_ssp_request(int port, const char *label);
+void pxa_ssp_free(struct ssp_device *);
+#endif
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d1a9193..223b14c 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -31,8 +31,9 @@
 #define quota_error(sb, fmt, args...) \
 	__quota_error((sb), __func__, fmt , ## args)
 
-extern void __quota_error(struct super_block *sb, const char *func,
-			 const char *fmt, ...);
+extern __attribute__((format (printf, 3, 4)))
+void __quota_error(struct super_block *sb, const char *func,
+		   const char *fmt, ...);
 
 /*
  * declaration of quota_function calls in kernel.
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ab2baa5..23241c2 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -146,6 +146,22 @@
 }
 
 /**
+ * radix_tree_deref_slot_protected	- dereference a slot without RCU lock but with tree lock held
+ * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
+ * Returns:	item that was stored in that slot with any direct pointer flag
+ *		removed.
+ *
+ * Similar to radix_tree_deref_slot but only used during migration when a pages
+ * mapping is being moved. The caller does not hold the RCU read lock but it
+ * must hold the tree lock to prevent parallel updates.
+ */
+static inline void *radix_tree_deref_slot_protected(void **pslot,
+							spinlock_t *treelock)
+{
+	return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
+}
+
+/**
  * radix_tree_deref_retry	- check radix_tree_deref_slot
  * @arg:	pointer returned by radix_tree_deref_slot
  * Returns:	0 if retry is not required, otherwise retry is required
diff --git a/include/linux/rar_register.h b/include/linux/rar_register.h
index ffa8057..5c61181 100644
--- a/include/linux/rar_register.h
+++ b/include/linux/rar_register.h
@@ -34,11 +34,27 @@
 
 struct rar_device;
 
+#if defined(CONFIG_RAR_REGISTER)
 int register_rar(int num,
 		int (*callback)(unsigned long data), unsigned long data);
 void unregister_rar(int num);
 int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end);
 int rar_lock(int rar_index);
+#else
+extern void unregister_rar(int num)  { }
+extern int rar_lock(int rar_index) { return -EIO; }
+
+extern inline int register_rar(int num,
+		int (*callback)(unsigned long data), unsigned long data)
+{
+	return -ENODEV;
+}
+
+extern int rar_get_address(int rar_index, dma_addr_t *start, dma_addr_t *end)
+{
+	return -ENODEV;
+}
+#endif	/* RAR_REGISTER */
 
 #endif  /* __KERNEL__ */
 #endif  /* _RAR_REGISTER_H */
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
new file mode 100644
index 0000000..cf1244f
--- /dev/null
+++ b/include/linux/rculist_bl.h
@@ -0,0 +1,128 @@
+#ifndef _LINUX_RCULIST_BL_H
+#define _LINUX_RCULIST_BL_H
+
+/*
+ * RCU-protected bl list version. See include/linux/list_bl.h.
+ */
+#include <linux/list_bl.h>
+#include <linux/rcupdate.h>
+
+static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
+					struct hlist_bl_node *n)
+{
+	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
+	LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
+							LIST_BL_LOCKMASK);
+	rcu_assign_pointer(h->first,
+		(struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
+}
+
+static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
+{
+	return (struct hlist_bl_node *)
+		((unsigned long)rcu_dereference(h->first) & ~LIST_BL_LOCKMASK);
+}
+
+/**
+ * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_bl_unhashed() on the node returns true after this. It is
+ * useful for RCU based read lockfree traversal if the writer side
+ * must know if the list entry is still hashed or already unhashed.
+ *
+ * In particular, it means that we can not poison the forward pointers
+ * that may still be used for walking the hash list and we can only
+ * zero the pprev pointer so list_unhashed() will return true after
+ * this.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another
+ * list-mutation primitive, such as hlist_bl_add_head_rcu() or
+ * hlist_bl_del_rcu(), running on this same list.  However, it is
+ * perfectly legal to run concurrently with the _rcu list-traversal
+ * primitives, such as hlist_bl_for_each_entry_rcu().
+ */
+static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
+{
+	if (!hlist_bl_unhashed(n)) {
+		__hlist_bl_del(n);
+		n->pprev = NULL;
+	}
+}
+
+/**
+ * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_bl_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
+ * or hlist_bl_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_bl_for_each_entry().
+ */
+static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
+{
+	__hlist_bl_del(n);
+	n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_bl_add_head_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_bl,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
+ * or hlist_bl_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
+					struct hlist_bl_head *h)
+{
+	struct hlist_bl_node *first;
+
+	/* don't need hlist_bl_first_rcu because we're under lock */
+	first = hlist_bl_first(h);
+
+	n->next = first;
+	if (first)
+		first->pprev = &n->next;
+	n->pprev = &h->first;
+
+	/* need _rcu because we can have concurrent lock free readers */
+	hlist_bl_set_first_rcu(h, n);
+}
+/**
+ * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
+ * @tpos:	the type * to use as a loop cursor.
+ * @pos:	the &struct hlist_bl_node to use as a loop cursor.
+ * @head:	the head for your list.
+ * @member:	the name of the hlist_bl_node within the struct.
+ *
+ */
+#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member)		\
+	for (pos = hlist_bl_first_rcu(head);				\
+		pos &&							\
+		({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
+		pos = rcu_dereference_raw(pos->next))
+
+#endif
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index f509877..6a210f1 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -11,15 +11,17 @@
 #define __LINUX_MFD_AB8500_REGULATOR_H
 
 /* AB8500 regulators */
-#define AB8500_LDO_AUX1         0
-#define AB8500_LDO_AUX2         1
-#define AB8500_LDO_AUX3         2
-#define AB8500_LDO_INTCORE      3
-#define AB8500_LDO_TVOUT        4
-#define AB8500_LDO_AUDIO	5
-#define AB8500_LDO_ANAMIC1      6
-#define AB8500_LDO_ANAMIC2      7
-#define AB8500_LDO_DMIC         8
-#define AB8500_LDO_ANA          9
-
+enum ab8500_regulator_id {
+	AB8500_LDO_AUX1,
+	AB8500_LDO_AUX2,
+	AB8500_LDO_AUX3,
+	AB8500_LDO_INTCORE,
+	AB8500_LDO_TVOUT,
+	AB8500_LDO_AUDIO,
+	AB8500_LDO_ANAMIC1,
+	AB8500_LDO_ANAMIC2,
+	AB8500_LDO_DMIC,
+	AB8500_LDO_ANA,
+	AB8500_NUM_REGULATORS,
+};
 #endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index ebd7472..7954f6b 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -154,6 +154,7 @@
 				   int min_uV, int max_uV);
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
 int regulator_get_voltage(struct regulator *regulator);
+int regulator_sync_voltage(struct regulator *regulator);
 int regulator_set_current_limit(struct regulator *regulator,
 			       int min_uA, int max_uA);
 int regulator_get_current_limit(struct regulator *regulator);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 592cd7c..b8ed16a 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -42,7 +42,11 @@
  *
  * @set_voltage: Set the voltage for the regulator within the range specified.
  *               The driver should select the voltage closest to min_uV.
+ * @set_voltage_sel: Set the voltage for the regulator using the specified
+ *                   selector.
  * @get_voltage: Return the currently configured voltage for the regulator.
+ * @get_voltage_sel: Return the currently configured voltage selector for the
+ *                   regulator.
  * @list_voltage: Return one of the supported voltages, in microvolts; zero
  *	if the selector indicates a voltage that is unusable on this system;
  *	or negative errno.  Selectors range from zero to one less than
@@ -79,8 +83,11 @@
 	int (*list_voltage) (struct regulator_dev *, unsigned selector);
 
 	/* get/set regulator voltage */
-	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
+	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
+			    unsigned *selector);
+	int (*set_voltage_sel) (struct regulator_dev *, unsigned selector);
 	int (*get_voltage) (struct regulator_dev *);
+	int (*get_voltage_sel) (struct regulator_dev *);
 
 	/* get/set regulator current  */
 	int (*set_current_limit) (struct regulator_dev *,
@@ -168,9 +175,9 @@
  */
 struct regulator_dev {
 	struct regulator_desc *desc;
-	int use_count;
-	int open_count;
 	int exclusive;
+	u32 use_count;
+	u32 open_count;
 
 	/* lists we belong to */
 	struct list_head list; /* list of all regulators */
@@ -188,10 +195,14 @@
 	struct regulator_dev *supply;	/* for tree */
 
 	void *reg_data;		/* regulator_dev data */
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs;
+#endif
 };
 
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, struct regulator_init_data *init_data,
+	struct device *dev, const struct regulator_init_data *init_data,
 	void *driver_data);
 void regulator_unregister(struct regulator_dev *rdev);
 
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
index b2cf208..3b94c91 100644
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -41,7 +41,7 @@
 int reiserfs_lookup_privroot(struct super_block *sb);
 int reiserfs_delete_xattrs(struct inode *inode);
 int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
-int reiserfs_permission(struct inode *inode, int mask);
+int reiserfs_permission(struct inode *inode, int mask, unsigned int flags);
 
 #ifdef CONFIG_REISERFS_FS_XATTR
 #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 0bed941..ff681eb 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -66,14 +66,62 @@
 
 #define RIO_PW_MSG_SIZE		64
 
+/*
+ * A component tag value (stored in the component tag CSR) is used as device's
+ * unique identifier assigned during enumeration. Besides being used for
+ * identifying switches (which do not have device ID register), it also is used
+ * by error management notification and therefore has to be assigned
+ * to endpoints as well.
+ */
+#define RIO_CTAG_RESRVD	0xfffe0000 /* Reserved */
+#define RIO_CTAG_UDEVID	0x0001ffff /* Unique device identifier */
+
 extern struct bus_type rio_bus_type;
 extern struct device rio_bus;
 extern struct list_head rio_devices;	/* list of all devices */
 
 struct rio_mport;
+struct rio_dev;
 union rio_pw_msg;
 
 /**
+ * struct rio_switch - RIO switch info
+ * @node: Node in global list of switches
+ * @switchid: Switch ID that is unique across a network
+ * @route_table: Copy of switch routing table
+ * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
+ * @add_entry: Callback for switch-specific route add function
+ * @get_entry: Callback for switch-specific route get function
+ * @clr_table: Callback for switch-specific clear route table function
+ * @set_domain: Callback for switch-specific domain setting function
+ * @get_domain: Callback for switch-specific domain get function
+ * @em_init: Callback for switch-specific error management init function
+ * @em_handle: Callback for switch-specific error management handler function
+ * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
+ * @nextdev: Array of per-port pointers to the next attached device
+ */
+struct rio_switch {
+	struct list_head node;
+	u16 switchid;
+	u8 *route_table;
+	u32 port_ok;
+	int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table, u16 route_destid, u8 route_port);
+	int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table, u16 route_destid, u8 *route_port);
+	int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table);
+	int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 sw_domain);
+	int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 *sw_domain);
+	int (*em_init) (struct rio_dev *dev);
+	int (*em_handle) (struct rio_dev *dev, u8 swport);
+	int (*sw_sysfs) (struct rio_dev *dev, int create);
+	struct rio_dev *nextdev[0];
+};
+
+/**
  * struct rio_dev - RIO device info
  * @global_list: Node in list of all RIO devices
  * @net_list: Node in list of RIO devices in a network
@@ -93,13 +141,14 @@
  * @phys_efptr: RIO device extended features pointer
  * @em_efptr: RIO Error Management features pointer
  * @dma_mask: Mask of bits of RIO address this device implements
- * @rswitch: Pointer to &struct rio_switch if valid for this device
  * @driver: Driver claiming this device
  * @dev: Device model device
  * @riores: RIO resources this device owns
  * @pwcback: port-write callback function for this device
- * @destid: Network destination ID
+ * @destid: Network destination ID (or associated destid for switch)
+ * @hopcount: Hopcount to this device
  * @prev: Previous RIO device connected to the current one
+ * @rswitch: struct rio_switch (if valid for this device)
  */
 struct rio_dev {
 	struct list_head global_list;	/* node in list of all RIO devices */
@@ -120,18 +169,20 @@
 	u32 phys_efptr;
 	u32 em_efptr;
 	u64 dma_mask;
-	struct rio_switch *rswitch;	/* RIO switch info */
 	struct rio_driver *driver;	/* RIO driver claiming this device */
 	struct device dev;	/* LDM device structure */
 	struct resource riores[RIO_MAX_DEV_RESOURCES];
 	int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
 	u16 destid;
+	u8 hopcount;
 	struct rio_dev *prev;
+	struct rio_switch rswitch[0];	/* RIO switch info */
 };
 
 #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
 #define rio_dev_f(n) list_entry(n, struct rio_dev, net_list)
 #define	to_rio_dev(n) container_of(n, struct rio_dev, dev)
+#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
 
 /**
  * struct rio_msg - RIO message event
@@ -224,49 +275,6 @@
 #define RIO_SW_SYSFS_CREATE	1	/* Create switch attributes */
 #define RIO_SW_SYSFS_REMOVE	0	/* Remove switch attributes */
 
-/**
- * struct rio_switch - RIO switch info
- * @node: Node in global list of switches
- * @rdev: Associated RIO device structure
- * @switchid: Switch ID that is unique across a network
- * @hopcount: Hopcount to this switch
- * @destid: Associated destid in the path
- * @route_table: Copy of switch routing table
- * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
- * @add_entry: Callback for switch-specific route add function
- * @get_entry: Callback for switch-specific route get function
- * @clr_table: Callback for switch-specific clear route table function
- * @set_domain: Callback for switch-specific domain setting function
- * @get_domain: Callback for switch-specific domain get function
- * @em_init: Callback for switch-specific error management initialization function
- * @em_handle: Callback for switch-specific error management handler function
- * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
- * @nextdev: Array of per-port pointers to the next attached device
- */
-struct rio_switch {
-	struct list_head node;
-	struct rio_dev *rdev;
-	u16 switchid;
-	u16 hopcount;
-	u16 destid;
-	u8 *route_table;
-	u32 port_ok;
-	int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			  u16 table, u16 route_destid, u8 route_port);
-	int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			  u16 table, u16 route_destid, u8 * route_port);
-	int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			  u16 table);
-	int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			   u8 sw_domain);
-	int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			   u8 *sw_domain);
-	int (*em_init) (struct rio_dev *dev);
-	int (*em_handle) (struct rio_dev *dev, u8 swport);
-	int (*sw_sysfs) (struct rio_dev *dev, int create);
-	struct rio_dev *nextdev[0];
-};
-
 /* Low-level architecture-dependent routines */
 
 /**
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index edc55da..e09e565 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -150,16 +150,8 @@
 static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset,
 				     u32 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_read_config_32(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 /**
@@ -174,16 +166,8 @@
 static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset,
 				      u32 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
-					 offset, data);
+	return rio_mport_write_config_32(rdev->net->hport, rdev->destid,
+					 rdev->hopcount, offset, data);
 };
 
 /**
@@ -198,16 +182,8 @@
 static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset,
 				     u16 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_16(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_read_config_16(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 /**
@@ -222,16 +198,8 @@
 static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset,
 				      u16 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_16(rdev->net->hport, destid, hopcount,
-					 offset, data);
+	return rio_mport_write_config_16(rdev->net->hport, rdev->destid,
+					 rdev->hopcount, offset, data);
 };
 
 /**
@@ -245,16 +213,8 @@
  */
 static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_8(rdev->net->hport, destid, hopcount,
-				       offset, data);
+	return rio_mport_read_config_8(rdev->net->hport, rdev->destid,
+				       rdev->hopcount, offset, data);
 };
 
 /**
@@ -268,16 +228,8 @@
  */
 static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_8(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_write_config_8(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid,
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index ee7b6ad..7410d33 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -36,5 +36,7 @@
 #define RIO_DID_IDTCPS10Q		0x035e
 #define RIO_DID_IDTCPS1848		0x0374
 #define RIO_DID_IDTCPS1616		0x0379
+#define RIO_DID_IDTVPS1616		0x0377
+#define RIO_DID_IDTSPS1616		0x0378
 
 #endif				/* LINUX_RIO_IDS_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bb83c0d..e9fd04c 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -198,6 +198,8 @@
 };
 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 
+bool is_vma_temporary_stack(struct vm_area_struct *vma);
+
 int try_to_unmap(struct page *, enum ttu_flags flags);
 int try_to_unmap_one(struct page *, struct vm_area_struct *,
 			unsigned long address, enum ttu_flags flags);
diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h
index c490fbc..5f57f93 100644
--- a/include/linux/romfs_fs.h
+++ b/include/linux/romfs_fs.h
@@ -1,6 +1,9 @@
 #ifndef __LINUX_ROMFS_FS_H
 #define __LINUX_ROMFS_FS_H
 
+#include <linux/types.h>
+#include <linux/fs.h>
+
 /* The basic structures of the romfs filesystem */
 
 #define ROMBSIZE BLOCK_SIZE
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 14dbc83..3c995b4 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -107,12 +107,17 @@
 extern int rtc_valid_tm(struct rtc_time *tm);
 extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
 extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
+ktime_t rtc_tm_to_ktime(struct rtc_time tm);
+struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+
 
 #include <linux/device.h>
 #include <linux/seq_file.h>
 #include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
+#include <linux/timerqueue.h>
+#include <linux/workqueue.h>
 
 extern struct class *rtc_class;
 
@@ -151,7 +156,19 @@
 };
 
 #define RTC_DEVICE_NAME_SIZE 20
-struct rtc_task;
+typedef struct rtc_task {
+	void (*func)(void *private_data);
+	void *private_data;
+} rtc_task_t;
+
+
+struct rtc_timer {
+	struct rtc_task	task;
+	struct timerqueue_node node;
+	ktime_t period;
+	int enabled;
+};
+
 
 /* flags */
 #define RTC_DEV_BUSY 0
@@ -179,16 +196,13 @@
 	spinlock_t irq_task_lock;
 	int irq_freq;
 	int max_user_freq;
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	struct work_struct uie_task;
-	struct timer_list uie_timer;
-	/* Those fields are protected by rtc->irq_lock */
-	unsigned int oldsecs;
-	unsigned int uie_irq_active:1;
-	unsigned int stop_uie_polling:1;
-	unsigned int uie_task_active:1;
-	unsigned int uie_timer_active:1;
-#endif
+
+	struct timerqueue_head timerqueue;
+	struct rtc_timer aie_timer;
+	struct rtc_timer uie_rtctimer;
+	struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
+	int pie_enabled;
+	struct work_struct irqwork;
 };
 #define to_rtc_device(d) container_of(d, struct rtc_device, dev)
 
@@ -224,15 +238,22 @@
 extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
 						unsigned int enabled);
 
-typedef struct rtc_task {
-	void (*func)(void *private_data);
-	void *private_data;
-} rtc_task_t;
+void rtc_aie_update_irq(void *private);
+void rtc_uie_update_irq(void *private);
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
 
 int rtc_register(rtc_task_t *task);
 int rtc_unregister(rtc_task_t *task);
 int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
 
+void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+			ktime_t expires, ktime_t period);
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
+void rtc_timer_do_work(struct work_struct *work);
+
 static inline bool is_leap_year(unsigned int year)
 {
 	return (!(year % 4) && (year % 100)) || !(year % 400);
diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h
index dbce22f..fbe58b7 100644
--- a/include/linux/s3c_adc_battery.h
+++ b/include/linux/s3c_adc_battery.h
@@ -14,6 +14,7 @@
 	void (*disable_charger)(void);
 
 	int gpio_charge_finished;
+	int gpio_inverted;
 
 	const struct s3c_adc_bat_thresh *lut_noac;
 	unsigned int lut_noac_cnt;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 777cd01..d747f94 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -21,7 +21,8 @@
 #define CLONE_DETACHED		0x00400000	/* Unused, ignored */
 #define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
 #define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
-#define CLONE_STOPPED		0x02000000	/* Start in stopped state */
+/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
+   and is now available for re-use. */
 #define CLONE_NEWUTS		0x04000000	/* New utsname group? */
 #define CLONE_NEWIPC		0x08000000	/* New ipcs */
 #define CLONE_NEWUSER		0x10000000	/* New user namespace */
@@ -70,7 +71,6 @@
 #include <linux/smp.h>
 #include <linux/sem.h>
 #include <linux/signal.h>
-#include <linux/path.h>
 #include <linux/compiler.h>
 #include <linux/completion.h>
 #include <linux/pid.h>
@@ -88,7 +88,6 @@
 #include <linux/timer.h>
 #include <linux/hrtimer.h>
 #include <linux/task_io_accounting.h>
-#include <linux/kobject.h>
 #include <linux/latencytop.h>
 #include <linux/cred.h>
 
@@ -435,6 +434,7 @@
 #endif
 					/* leave room for more dump flags */
 #define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
+#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 
@@ -635,6 +635,8 @@
 
 	int oom_adj;		/* OOM kill score adjustment (bit shift) */
 	int oom_score_adj;	/* OOM kill score adjustment */
+	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
+				 * Only settable by CAP_SYS_RESOURCE. */
 
 	struct mutex cred_guard_mutex;	/* guard against foreign influences on
 					 * credential calculations
@@ -685,7 +687,7 @@
 	atomic_t fanotify_listeners;
 #endif
 #ifdef CONFIG_EPOLL
-	atomic_t epoll_watches;	/* The number of file descriptors currently watched */
+	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
 #endif
 #ifdef CONFIG_POSIX_MQUEUE
 	/* protected by mq_lock	*/
@@ -2511,7 +2513,7 @@
 
 #ifdef CONFIG_CGROUP_SCHED
 
-extern struct task_group init_task_group;
+extern struct task_group root_task_group;
 
 extern struct task_group *sched_create_group(struct task_group *parent);
 extern void sched_destroy_group(struct task_group *tg);
diff --git a/include/linux/security.h b/include/linux/security.h
index d47a4c2..c642bb8 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -457,7 +457,6 @@
  *	called when the actual read/write operations are performed.
  *	@inode contains the inode structure to check.
  *	@mask contains the permission mask.
- *	@nd contains the nameidata (may be NULL).
  *	Return 0 if permission is granted.
  * @inode_setattr:
  *	Check permission before setting file attributes.  Note that the kernel
@@ -1059,8 +1058,7 @@
  *	@cred points to the credentials to provide the context against which to
  *	evaluate the security data on the key.
  *	@perm describes the combination of permissions required of this key.
- *	Return 1 if permission granted, 0 if permission denied and -ve it the
- *	normal permissions model should be effected.
+ *	Return 0 if permission is granted, -ve error otherwise.
  * @key_getsecurity:
  *	Get a textual representation of the security context attached to a key
  *	for the purposes of honouring KEYCTL_GETSECURITY.  This function
@@ -1713,6 +1711,7 @@
 int security_inode_readlink(struct dentry *dentry);
 int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
 int security_inode_permission(struct inode *inode, int mask);
+int security_inode_exec_permission(struct inode *inode, unsigned int flags);
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
 int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
 int security_inode_setxattr(struct dentry *dentry, const char *name,
@@ -2102,6 +2101,12 @@
 	return 0;
 }
 
+static inline int security_inode_exec_permission(struct inode *inode,
+						  unsigned int flags)
+{
+	return 0;
+}
+
 static inline int security_inode_setattr(struct dentry *dentry,
 					  struct iattr *attr)
 {
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 632205c..e98cd2e 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -107,7 +107,7 @@
 {
 	smp_rmb();
 
-	return (sl->sequence != start);
+	return unlikely(sl->sequence != start);
 }
 
 
@@ -125,14 +125,25 @@
 #define SEQCNT_ZERO { 0 }
 #define seqcount_init(x)	do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
 
-/* Start of read using pointer to a sequence counter only.  */
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
+/**
+ * __read_seqcount_begin - begin a seq-read critical section (without barrier)
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
+ * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
+ * provided before actually loading any of the variables that are to be
+ * protected in this critical section.
+ *
+ * Use carefully, only in critical code, and comment how the barrier is
+ * provided.
+ */
+static inline unsigned __read_seqcount_begin(const seqcount_t *s)
 {
 	unsigned ret;
 
 repeat:
 	ret = s->sequence;
-	smp_rmb();
 	if (unlikely(ret & 1)) {
 		cpu_relax();
 		goto repeat;
@@ -140,14 +151,56 @@
 	return ret;
 }
 
-/*
- * Test if reader processed invalid data because sequence number has changed.
+/**
+ * read_seqcount_begin - begin a seq-read critical section
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * read_seqcount_begin opens a read critical section of the given seqcount.
+ * Validity of the critical section is tested by checking read_seqcount_retry
+ * function.
+ */
+static inline unsigned read_seqcount_begin(const seqcount_t *s)
+{
+	unsigned ret = __read_seqcount_begin(s);
+	smp_rmb();
+	return ret;
+}
+
+/**
+ * __read_seqcount_retry - end a seq-read critical section (without barrier)
+ * @s: pointer to seqcount_t
+ * @start: count, from read_seqcount_begin
+ * Returns: 1 if retry is required, else 0
+ *
+ * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
+ * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
+ * provided before actually loading any of the variables that are to be
+ * protected in this critical section.
+ *
+ * Use carefully, only in critical code, and comment how the barrier is
+ * provided.
+ */
+static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+{
+	return unlikely(s->sequence != start);
+}
+
+/**
+ * read_seqcount_retry - end a seq-read critical section
+ * @s: pointer to seqcount_t
+ * @start: count, from read_seqcount_begin
+ * Returns: 1 if retry is required, else 0
+ *
+ * read_seqcount_retry closes a read critical section of the given seqcount.
+ * If the critical section was invalid, it must be ignored (and typically
+ * retried).
  */
 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
 {
 	smp_rmb();
 
-	return s->sequence != start;
+	return __read_seqcount_retry(s, start);
 }
 
 
@@ -167,6 +220,19 @@
 	s->sequence++;
 }
 
+/**
+ * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * @s: pointer to seqcount_t
+ *
+ * After write_seqcount_barrier, no read-side seq operations will complete
+ * successfully and see data older than this.
+ */
+static inline void write_seqcount_barrier(seqcount_t *s)
+{
+	smp_wmb();
+	s->sequence+=2;
+}
+
 /*
  * Possible sw/hw IRQ protected versions of the interfaces.
  */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 212eb4c..758c5b0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -95,7 +95,7 @@
 /* PPC CPM type number */
 #define PORT_CPM        58
 
-/* MPC52xx type numbers */
+/* MPC52xx (and MPC512x) type numbers */
 #define PORT_MPC52xx	59
 
 /* IBM icom */
@@ -199,6 +199,9 @@
 /* TI OMAP-UART */
 #define PORT_OMAP	96
 
+/* VIA VT8500 SoC */
+#define PORT_VT8500	97
+
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
@@ -209,6 +212,7 @@
 #include <linux/tty.h>
 #include <linux/mutex.h>
 #include <linux/sysrq.h>
+#include <linux/pps_kernel.h>
 
 struct uart_port;
 struct serial_struct;
@@ -311,6 +315,7 @@
 #define UPIO_TSI		(5)			/* Tsi108/109 type IO */
 #define UPIO_DWAPB		(6)			/* DesignWare APB UART */
 #define UPIO_RM9000		(7)			/* RM9000 type IO */
+#define UPIO_DWAPB32		(8)			/* DesignWare APB UART (32 bit accesses) */
 
 	unsigned int		read_status_mask;	/* driver specific */
 	unsigned int		ignore_status_mask;	/* driver specific */
@@ -361,6 +366,7 @@
 	struct device		*dev;			/* parent device */
 	unsigned char		hub6;			/* this should be in the 8250 driver */
 	unsigned char		suspended;
+	unsigned char		irq_wake;
 	unsigned char		unused[2];
 	void			*private_data;		/* generic platform data pointer */
 };
@@ -523,10 +529,10 @@
 	struct uart_state *state = uport->state;
 	struct tty_port *port = &state->port;
 	struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
-	struct timespec ts;
+	struct pps_event_time ts;
 
 	if (ld && ld->ops->dcd_change)
-		getnstimeofday(&ts);
+		pps_get_ts(&ts);
 
 	uport->icount.dcd++;
 #ifdef CONFIG_HARD_PPS
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index c7a0ce1..3ecb71a 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -99,6 +99,13 @@
 #define UART_LCR_WLEN7		0x02 /* Wordlength: 7 bits */
 #define UART_LCR_WLEN8		0x03 /* Wordlength: 8 bits */
 
+/*
+ * Access to some registers depends on register access / configuration
+ * mode.
+ */
+#define UART_LCR_CONF_MODE_A	UART_LCR_DLAB	/* Configutation mode A */
+#define UART_LCR_CONF_MODE_B	0xBF		/* Configutation mode B */
+
 #define UART_MCR	4	/* Out: Modem Control Register */
 #define UART_MCR_CLKSEL		0x80 /* Divide clock by 4 (TI16C752, EFR[4]=1) */
 #define UART_MCR_TCRTLR		0x40 /* Access TCR/TLR (TI16C752, EFR[4]=1) */
@@ -341,5 +348,17 @@
 #define UART_OMAP_SYSS		0x16	/* System status register */
 #define UART_OMAP_WER		0x17	/* Wake-up enable register */
 
+/*
+ * These are the definitions for the MDR1 register
+ */
+#define UART_OMAP_MDR1_16X_MODE		0x00	/* UART 16x mode */
+#define UART_OMAP_MDR1_SIR_MODE		0x01	/* SIR mode */
+#define UART_OMAP_MDR1_16X_ABAUD_MODE	0x02	/* UART 16x auto-baud */
+#define UART_OMAP_MDR1_13X_MODE		0x03	/* UART 13x mode */
+#define UART_OMAP_MDR1_MIR_MODE		0x04	/* MIR mode */
+#define UART_OMAP_MDR1_FIR_MODE		0x05	/* FIR mode */
+#define UART_OMAP_MDR1_CIR_MODE		0x06	/* CIR mode */
+#define UART_OMAP_MDR1_DISABLE		0x07	/* Disable (default state) */
+
 #endif /* _LINUX_SERIAL_REG_H */
 
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index baed212..1630d9c 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -8,6 +8,23 @@
  * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts)
  */
 
+enum {
+	SCBRR_ALGO_1,		/* ((clk + 16 * bps) / (16 * bps) - 1) */
+	SCBRR_ALGO_2,		/* ((clk + 16 * bps) / (32 * bps) - 1) */
+	SCBRR_ALGO_3,		/* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */
+	SCBRR_ALGO_4,		/* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */
+	SCBRR_ALGO_5,		/* (((clk * 1000 / 32) / bps) - 1) */
+};
+
+#define SCSCR_TIE	(1 << 7)
+#define SCSCR_RIE	(1 << 6)
+#define SCSCR_TE	(1 << 5)
+#define SCSCR_RE	(1 << 4)
+#define SCSCR_REIE	(1 << 3)	/* not supported by all parts */
+#define SCSCR_TOIE	(1 << 2)	/* not supported by all parts */
+#define SCSCR_CKE1	(1 << 1)
+#define SCSCR_CKE0	(1 << 0)
+
 /* Offsets into the sci_port->irqs array */
 enum {
 	SCIx_ERI_IRQ,
@@ -29,7 +46,12 @@
 	unsigned int	type;			/* SCI / SCIF / IRDA */
 	upf_t		flags;			/* UPF_* flags */
 	char		*clk;			/* clock string */
+
+	unsigned int	scbrr_algo_id;		/* SCBRR calculation algo */
+	unsigned int	scscr;			/* SCSCR initialization */
+
 	struct device	*dma_dev;
+
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
 	unsigned int dma_slave_tx;
 	unsigned int dma_slave_rx;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 20ec0a64..bf221d6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -255,6 +255,11 @@
 typedef unsigned char *sk_buff_data_t;
 #endif
 
+#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
+    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
+#endif
+
 /** 
  *	struct sk_buff - socket buffer
  *	@next: Next buffer in list
@@ -362,6 +367,8 @@
 	void			(*destructor)(struct sk_buff *skb);
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	struct nf_conntrack	*nfct;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	struct sk_buff		*nfct_reasm;
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
@@ -2057,6 +2064,8 @@
 	if (nfct)
 		atomic_inc(&nfct->use);
 }
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
 {
 	if (skb)
@@ -2085,6 +2094,8 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(skb->nfct);
 	skb->nfct = NULL;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(skb->nfct_reasm);
 	skb->nfct_reasm = NULL;
 #endif
@@ -2101,6 +2112,8 @@
 	dst->nfct = src->nfct;
 	nf_conntrack_get(src->nfct);
 	dst->nfctinfo = src->nfctinfo;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	dst->nfct_reasm = src->nfct_reasm;
 	nf_conntrack_get_reasm(src->nfct_reasm);
 #endif
@@ -2114,6 +2127,8 @@
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(dst->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(dst->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 59260e2..fa90866 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -106,8 +106,6 @@
 void kmem_cache_free(struct kmem_cache *, void *);
 unsigned int kmem_cache_size(struct kmem_cache *);
 const char *kmem_cache_name(struct kmem_cache *);
-int kern_ptr_validate(const void *ptr, unsigned long size);
-int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
 
 /*
  * Please use this macro to create slab caches. Simply specify the
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 791a502..83203ae 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -138,11 +138,12 @@
 void *__kmalloc(size_t size, gfp_t flags);
 
 #ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
+extern void *kmem_cache_alloc_trace(size_t size,
+				    struct kmem_cache *cachep, gfp_t flags);
 extern size_t slab_buffer_size(struct kmem_cache *cachep);
 #else
 static __always_inline void *
-kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
 {
 	return kmem_cache_alloc(cachep, flags);
 }
@@ -179,10 +180,7 @@
 #endif
 			cachep = malloc_sizes[i].cs_cachep;
 
-		ret = kmem_cache_alloc_notrace(cachep, flags);
-
-		trace_kmalloc(_THIS_IP_, ret,
-			      size, slab_buffer_size(cachep), flags);
+		ret = kmem_cache_alloc_trace(size, cachep, flags);
 
 		return ret;
 	}
@@ -194,14 +192,16 @@
 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
 #ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
-					   gfp_t flags,
-					   int nodeid);
+extern void *kmem_cache_alloc_node_trace(size_t size,
+					 struct kmem_cache *cachep,
+					 gfp_t flags,
+					 int nodeid);
 #else
 static __always_inline void *
-kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
-			      gfp_t flags,
-			      int nodeid)
+kmem_cache_alloc_node_trace(size_t size,
+			    struct kmem_cache *cachep,
+			    gfp_t flags,
+			    int nodeid)
 {
 	return kmem_cache_alloc_node(cachep, flags, nodeid);
 }
@@ -210,7 +210,6 @@
 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	struct kmem_cache *cachep;
-	void *ret;
 
 	if (__builtin_constant_p(size)) {
 		int i = 0;
@@ -234,13 +233,7 @@
 #endif
 			cachep = malloc_sizes[i].cs_cachep;
 
-		ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
-
-		trace_kmalloc_node(_THIS_IP_, ret,
-				   size, slab_buffer_size(cachep),
-				   flags, node);
-
-		return ret;
+		return kmem_cache_alloc_node_trace(size, cachep, flags, node);
 	}
 	return __kmalloc_node(size, flags, node);
 }
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e4f5ed1..8b6e8ae 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,9 +10,8 @@
 #include <linux/gfp.h>
 #include <linux/workqueue.h>
 #include <linux/kobject.h>
-#include <linux/kmemleak.h>
 
-#include <trace/events/kmem.h>
+#include <linux/kmemleak.h>
 
 enum stat_item {
 	ALLOC_FASTPATH,		/* Allocation from cpu slab */
@@ -216,31 +215,40 @@
 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 void *__kmalloc(size_t size, gfp_t flags);
 
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+	void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+	kmemleak_alloc(ret, size, 1, flags);
+	return ret;
+}
+
 #ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
+extern void *
+kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
 #else
 static __always_inline void *
-kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
 	return kmem_cache_alloc(s, gfpflags);
 }
+
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+	return kmalloc_order(size, flags, order);
+}
 #endif
 
 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 {
 	unsigned int order = get_order(size);
-	void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
-
-	kmemleak_alloc(ret, size, 1, flags);
-	trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
-
-	return ret;
+	return kmalloc_order_trace(size, flags, order);
 }
 
 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 {
-	void *ret;
-
 	if (__builtin_constant_p(size)) {
 		if (size > SLUB_MAX_SIZE)
 			return kmalloc_large(size, flags);
@@ -251,11 +259,7 @@
 			if (!s)
 				return ZERO_SIZE_PTR;
 
-			ret = kmem_cache_alloc_notrace(s, flags);
-
-			trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
-
-			return ret;
+			return kmem_cache_alloc_trace(s, flags, size);
 		}
 	}
 	return __kmalloc(size, flags);
@@ -266,14 +270,14 @@
 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 
 #ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 					   gfp_t gfpflags,
-					   int node);
+					   int node, size_t size);
 #else
 static __always_inline void *
-kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
 			      gfp_t gfpflags,
-			      int node)
+			      int node, size_t size)
 {
 	return kmem_cache_alloc_node(s, gfpflags, node);
 }
@@ -281,8 +285,6 @@
 
 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
-	void *ret;
-
 	if (__builtin_constant_p(size) &&
 		size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
 			struct kmem_cache *s = kmalloc_slab(size);
@@ -290,12 +292,7 @@
 		if (!s)
 			return ZERO_SIZE_PTR;
 
-		ret = kmem_cache_alloc_node_notrace(s, flags, node);
-
-		trace_kmalloc_node(_THIS_IP_, ret,
-				   size, s->size, flags, node);
-
-		return ret;
+		return kmem_cache_alloc_node_trace(s, flags, node, size);
 	}
 	return __kmalloc_node(size, flags, node);
 }
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5f65f14..edbb1d0 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -191,7 +191,8 @@
 #define AF_PHONET	35	/* Phonet sockets		*/
 #define AF_IEEE802154	36	/* IEEE802154 sockets		*/
 #define AF_CAIF		37	/* CAIF sockets			*/
-#define AF_MAX		38	/* For now.. */
+#define AF_ALG		38	/* Algorithm sockets		*/
+#define AF_MAX		39	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -232,6 +233,7 @@
 #define PF_PHONET	AF_PHONET
 #define PF_IEEE802154	AF_IEEE802154
 #define PF_CAIF		AF_CAIF
+#define PF_ALG		AF_ALG
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
@@ -305,6 +307,7 @@
 #define SOL_RDS		276
 #define SOL_IUCV	277
 #define SOL_CAIF	278
+#define SOL_ALG		279
 
 /* IPX options */
 #define IPX_TYPE	1
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index 4f95c1a..0e6dc38 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -112,6 +112,7 @@
 #define SONYPI_EVENT_VOLUME_DEC_PRESSED		70
 #define SONYPI_EVENT_BRIGHTNESS_PRESSED		71
 #define SONYPI_EVENT_MEDIA_PRESSED		72
+#define SONYPI_EVENT_VENDOR_PRESSED		73
 
 /* get/set brightness */
 #define SONYPI_IOCGBRT		_IOR('v', 0, __u8)
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
index c91302f..6cd10f6 100644
--- a/include/linux/spi/dw_spi.h
+++ b/include/linux/spi/dw_spi.h
@@ -1,5 +1,6 @@
 #ifndef DW_SPI_HEADER_H
 #define DW_SPI_HEADER_H
+
 #include <linux/io.h>
 
 /* Bit fields in CTRLR0 */
@@ -82,6 +83,13 @@
 				though only low 16 bits matters */
 } __packed;
 
+struct dw_spi;
+struct dw_spi_dma_ops {
+	int (*dma_init)(struct dw_spi *dws);
+	void (*dma_exit)(struct dw_spi *dws);
+	int (*dma_transfer)(struct dw_spi *dws, int cs_change);
+};
+
 struct dw_spi {
 	struct spi_master	*master;
 	struct spi_device	*cur_dev;
@@ -136,13 +144,15 @@
 	/* Dma info */
 	int			dma_inited;
 	struct dma_chan		*txchan;
+	struct scatterlist	tx_sgl;
 	struct dma_chan		*rxchan;
-	int			txdma_done;
-	int			rxdma_done;
-	u64			tx_param;
-	u64			rx_param;
+	struct scatterlist	rx_sgl;
+	int			dma_chan_done;
 	struct device		*dma_dev;
-	dma_addr_t		dma_addr;
+	dma_addr_t		dma_addr; /* phy address of the Data register */
+	struct dw_spi_dma_ops	*dma_ops;
+	void			*dma_priv; /* platform relate info */
+	struct pci_dev		*dmac;
 
 	/* Bus interface info */
 	void			*priv;
@@ -216,4 +226,8 @@
 extern void dw_spi_remove_host(struct dw_spi *dws);
 extern int dw_spi_suspend_host(struct dw_spi *dws);
 extern int dw_spi_resume_host(struct dw_spi *dws);
+extern void dw_spi_xfer_done(struct dw_spi *dws);
+
+/* platform related setup */
+extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
 #endif /* DW_SPI_HEADER_H */
diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h
new file mode 100644
index 0000000..a68f3b1
--- /dev/null
+++ b/include/linux/spi/ifx_modem.h
@@ -0,0 +1,14 @@
+#ifndef LINUX_IFX_MODEM_H
+#define LINUX_IFX_MODEM_H
+
+struct ifx_modem_platform_data {
+	unsigned short rst_out; /* modem reset out */
+	unsigned short pwr_on;  /* power on */
+	unsigned short rst_pmu; /* reset modem */
+	unsigned short tx_pwr;  /* modem power threshold */
+	unsigned short srdy;    /* SRDY */
+	unsigned short mrdy;    /* MRDY */
+	unsigned short is_6160;	/* Modem type */
+};
+
+#endif
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
new file mode 100644
index 0000000..d3e1075
--- /dev/null
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __linux_pxa2xx_spi_h
+#define __linux_pxa2xx_spi_h
+
+#include <linux/pxa2xx_ssp.h>
+
+#define PXA2XX_CS_ASSERT (0x01)
+#define PXA2XX_CS_DEASSERT (0x02)
+
+/* device.platform_data for SSP controller devices */
+struct pxa2xx_spi_master {
+	u32 clock_enable;
+	u16 num_chipselect;
+	u8 enable_dma;
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct pxa2xx_spi_chip {
+	u8 tx_threshold;
+	u8 rx_threshold;
+	u8 dma_burst_size;
+	u32 timeout;
+	u8 enable_loopback;
+	int gpio_cs;
+	void (*cs_control)(u32 command);
+};
+
+#ifdef CONFIG_ARCH_PXA
+
+#include <linux/clk.h>
+#include <mach/dma.h>
+
+extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
+
+#else
+/*
+ * This is the implemtation for CE4100 on x86. ARM defines them in mach/ or
+ * plat/ include path.
+ * The CE4100 does not provide DMA support. This bits are here to let the driver
+ * compile and will never be used. Maybe we get DMA support at a later point in
+ * time.
+ */
+
+#define DCSR(n)         (n)
+#define DSADR(n)        (n)
+#define DTADR(n)        (n)
+#define DCMD(n)         (n)
+#define DRCMR(n)        (n)
+
+#define DCSR_RUN	(1 << 31)	/* Run Bit */
+#define DCSR_NODESC	(1 << 30)	/* No-Descriptor Fetch */
+#define DCSR_STOPIRQEN	(1 << 29)	/* Stop Interrupt Enable */
+#define DCSR_REQPEND	(1 << 8)	/* Request Pending (read-only) */
+#define DCSR_STOPSTATE	(1 << 3)	/* Stop State (read-only) */
+#define DCSR_ENDINTR	(1 << 2)	/* End Interrupt */
+#define DCSR_STARTINTR	(1 << 1)	/* Start Interrupt */
+#define DCSR_BUSERR	(1 << 0)	/* Bus Error Interrupt */
+
+#define DCSR_EORIRQEN	(1 << 28)	/* End of Receive Interrupt Enable */
+#define DCSR_EORJMPEN	(1 << 27)	/* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN	(1 << 26)	/* STOP on an EOR */
+#define DCSR_SETCMPST	(1 << 25)	/* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST	(1 << 24)	/* Clear Descriptor Compare Status */
+#define DCSR_CMPST	(1 << 10)	/* The Descriptor Compare Status */
+#define DCSR_EORINTR	(1 << 9)	/* The end of Receive */
+
+#define DRCMR_MAPVLD	(1 << 7)	/* Map Valid */
+#define DRCMR_CHLNUM	0x1f		/* mask for Channel Number */
+
+#define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor */
+#define DDADR_STOP	(1 << 0)	/* Stop */
+
+#define DCMD_INCSRCADDR	(1 << 31)	/* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR	(1 << 30)	/* Target Address Increment Setting. */
+#define DCMD_FLOWSRC	(1 << 29)	/* Flow Control by the source. */
+#define DCMD_FLOWTRG	(1 << 28)	/* Flow Control by the target. */
+#define DCMD_STARTIRQEN	(1 << 22)	/* Start Interrupt Enable */
+#define DCMD_ENDIRQEN	(1 << 21)	/* End Interrupt Enable */
+#define DCMD_ENDIAN	(1 << 18)	/* Device Endian-ness. */
+#define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
+#define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
+#define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
+#define DCMD_WIDTH1	(1 << 14)	/* 1 byte width */
+#define DCMD_WIDTH2	(2 << 14)	/* 2 byte width (HalfWord) */
+#define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
+#define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
+
+/*
+ * Descriptor structure for PXA's DMA engine
+ * Note: this structure must always be aligned to a 16-byte boundary.
+ */
+
+typedef enum {
+	DMA_PRIO_HIGH = 0,
+	DMA_PRIO_MEDIUM = 1,
+	DMA_PRIO_LOW = 2
+} pxa_dma_prio;
+
+/*
+ * DMA registration
+ */
+
+static inline int pxa_request_dma(char *name,
+		pxa_dma_prio prio,
+		void (*irq_handler)(int, void *),
+		void *data)
+{
+	return -ENODEV;
+}
+
+static inline void pxa_free_dma(int dma_ch)
+{
+}
+
+/*
+ * The CE4100 does not have the clk framework implemented and SPI clock can
+ * not be switched on/off or the divider changed.
+ */
+static inline void clk_disable(struct clk *clk)
+{
+}
+
+static inline int clk_enable(struct clk *clk)
+{
+	return 0;
+}
+
+static inline unsigned long clk_get_rate(struct clk *clk)
+{
+	return 3686400;
+}
+
+#endif
+#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index b202475..8521067 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -110,9 +110,9 @@
 	__be32 *		(*crmarshal)(struct rpc_task *, __be32 *);
 	int			(*crrefresh)(struct rpc_task *);
 	__be32 *		(*crvalidate)(struct rpc_task *, __be32 *);
-	int			(*crwrap_req)(struct rpc_task *, kxdrproc_t,
+	int			(*crwrap_req)(struct rpc_task *, kxdreproc_t,
 						void *, __be32 *, void *);
-	int			(*crunwrap_resp)(struct rpc_task *, kxdrproc_t,
+	int			(*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
 						void *, __be32 *, void *);
 };
 
@@ -139,8 +139,8 @@
 void			put_rpccred(struct rpc_cred *);
 __be32 *		rpcauth_marshcred(struct rpc_task *, __be32 *);
 __be32 *		rpcauth_checkverf(struct rpc_task *, __be32 *);
-int			rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj);
-int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, __be32 *data, void *obj);
+int			rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
+int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
 int			rpcauth_refreshcred(struct rpc_task *);
 void			rpcauth_invalcred(struct rpc_task *);
 int			rpcauth_uptodatecred(struct rpc_task *);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 7c91260..c50b458 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -43,10 +43,18 @@
  */
 static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
 {
-	if (rqstp->rq_server->bc_xprt)
+	if (rqstp->rq_server->sv_bc_xprt)
 		return 1;
 	return 0;
 }
+static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
+{
+	if (svc_is_backchannel(rqstp))
+		return (struct nfs4_sessionid *)
+			rqstp->rq_server->sv_bc_xprt->xpt_bc_sid;
+	return NULL;
+}
+
 #else /* CONFIG_NFS_V4_1 */
 static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
 					 unsigned int min_reqs)
@@ -59,6 +67,11 @@
 	return 0;
 }
 
+static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
+{
+	return NULL;
+}
+
 static inline void xprt_free_bc_request(struct rpc_rqst *req)
 {
 }
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 6950c98..7898ea1 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -13,6 +13,7 @@
 #ifndef _LINUX_SUNRPC_CACHE_H_
 #define _LINUX_SUNRPC_CACHE_H_
 
+#include <linux/kref.h>
 #include <linux/slab.h>
 #include <asm/atomic.h>
 #include <linux/proc_fs.h>
@@ -255,10 +256,13 @@
 	return rv - boot.tv_sec;
 }
 
+#ifdef CONFIG_NFSD_DEPRECATED
 static inline void sunrpc_invalidate(struct cache_head *h,
 				     struct cache_detail *detail)
 {
 	h->expiry_time = seconds_since_boot() - 1;
 	detail->nextcheck = seconds_since_boot();
 }
+#endif /* CONFIG_NFSD_DEPRECATED */
+
 #endif /*  _LINUX_SUNRPC_CACHE_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index a5a55f2..ef9476a 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -89,8 +89,8 @@
  */
 struct rpc_procinfo {
 	u32			p_proc;		/* RPC procedure number */
-	kxdrproc_t		p_encode;	/* XDR encode function */
-	kxdrproc_t		p_decode;	/* XDR decode function */
+	kxdreproc_t		p_encode;	/* XDR encode function */
+	kxdrdproc_t		p_decode;	/* XDR decode function */
 	unsigned int		p_arglen;	/* argument hdr length (u32) */
 	unsigned int		p_replen;	/* reply hdr length (u32) */
 	unsigned int		p_count;	/* call count */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 5a3085b..ea29330 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -99,7 +99,7 @@
 	spinlock_t		sv_cb_lock;	/* protects the svc_cb_list */
 	wait_queue_head_t	sv_cb_waitq;	/* sleep here if there are no
 						 * entries in the svc_cb_list */
-	struct svc_xprt		*bc_xprt;
+	struct svc_xprt		*sv_bc_xprt;	/* callback on fore channel */
 #endif /* CONFIG_NFS_V4_1 */
 };
 
@@ -269,6 +269,7 @@
 	struct cache_req	rq_chandle;	/* handle passed to caches for 
 						 * request delaying 
 						 */
+	bool			rq_dropme;
 	/* Catering to nfsd */
 	struct auth_domain *	rq_client;	/* RPC peer info */
 	struct auth_domain *	rq_gssclient;	/* "gss/"-style peer info */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index aea0d438..059877b 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -63,7 +63,6 @@
 #define XPT_LISTENER	11		/* listening endpoint */
 #define XPT_CACHE_AUTH	12		/* cache auth info */
 
-	struct svc_pool		*xpt_pool;	/* current pool iff queued */
 	struct svc_serv		*xpt_server;	/* service for transport */
 	atomic_t    	    	xpt_reserved;	/* space on outq that is rsvd */
 	struct mutex		xpt_mutex;	/* to serialize sending data */
@@ -78,8 +77,10 @@
 	size_t			xpt_remotelen;	/* length of address */
 	struct rpc_wait_queue	xpt_bc_pending;	/* backchannel wait queue */
 	struct list_head	xpt_users;	/* callbacks on free */
+	void			*xpt_bc_sid;	/* back channel session ID */
 
 	struct net		*xpt_net;
+	struct rpc_xprt		*xpt_bc_xprt;	/* NFSv4.1 backchannel */
 };
 
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 1b353a7..04dba23 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,7 +28,6 @@
 	/* private TCP part */
 	u32			sk_reclen;	/* length of record */
 	u32			sk_tcplen;	/* current read length */
-	struct rpc_xprt		*sk_bc_xprt;	/* NFSv4.1 backchannel xprt */
 };
 
 /*
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 498ab93..fc84b7a 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -33,8 +33,8 @@
 };
 
 /*
- * This is the generic XDR function. rqstp is either a rpc_rqst (client
- * side) or svc_rqst pointer (server side).
+ * This is the legacy generic XDR function. rqstp is either a rpc_rqst
+ * (client side) or svc_rqst pointer (server side).
  * Encode functions always assume there's enough room in the buffer.
  */
 typedef int	(*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
@@ -201,14 +201,22 @@
 
 	__be32 *end;		/* end of available buffer space */
 	struct kvec *iov;	/* pointer to the current kvec */
+	struct kvec scratch;	/* Scratch buffer */
+	struct page **page_ptr;	/* pointer to the current page */
 };
 
+/*
+ * These are the xdr_stream style generic XDR encode and decode functions.
+ */
+typedef void	(*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+typedef int	(*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+
 extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
 extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
 		unsigned int base, unsigned int len);
 extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
-extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes);
+extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
 extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
 extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 89d10d2..bef0f53 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -321,6 +321,7 @@
 #define XPRT_CLOSING		(6)
 #define XPRT_CONNECTION_ABORT	(7)
 #define XPRT_CONNECTION_CLOSE	(8)
+#define XPRT_INITIALIZED	(9)
 
 static inline void xprt_set_connected(struct rpc_xprt *xprt)
 {
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 2669751..5a89e36 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -122,7 +122,7 @@
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
  */
-extern void suspend_set_ops(struct platform_suspend_ops *ops);
+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
 extern int suspend_valid_only_mem(suspend_state_t state);
 
 /**
@@ -147,7 +147,7 @@
 #else /* !CONFIG_SUSPEND */
 #define suspend_valid_only_mem	NULL
 
-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
 #endif /* !CONFIG_SUSPEND */
 
@@ -245,7 +245,7 @@
 extern void swsusp_unset_page_free(struct page *);
 extern unsigned long get_safe_page(gfp_t gfp_mask);
 
-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
 extern bool system_entering_hibernation(void);
 #else /* CONFIG_HIBERNATION */
@@ -253,28 +253,11 @@
 static inline void swsusp_set_page_free(struct page *p) {}
 static inline void swsusp_unset_page_free(struct page *p) {}
 
-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
 static inline int hibernate(void) { return -ENOSYS; }
 static inline bool system_entering_hibernation(void) { return false; }
 #endif /* CONFIG_HIBERNATION */
 
-#ifdef CONFIG_SUSPEND_NVS
-extern int suspend_nvs_register(unsigned long start, unsigned long size);
-extern int suspend_nvs_alloc(void);
-extern void suspend_nvs_free(void);
-extern void suspend_nvs_save(void);
-extern void suspend_nvs_restore(void);
-#else /* CONFIG_SUSPEND_NVS */
-static inline int suspend_nvs_register(unsigned long a, unsigned long b)
-{
-	return 0;
-}
-static inline int suspend_nvs_alloc(void) { return 0; }
-static inline void suspend_nvs_free(void) {}
-static inline void suspend_nvs_save(void) {}
-static inline void suspend_nvs_restore(void) {}
-#endif /* CONFIG_SUSPEND_NVS */
-
 #ifdef CONFIG_PM_SLEEP
 void save_processor_state(void);
 void restore_processor_state(void);
@@ -292,7 +275,7 @@
 /* drivers/base/power/wakeup.c */
 extern bool events_check_enabled;
 
-extern bool pm_check_wakeup_events(void);
+extern bool pm_wakeup_pending(void);
 extern bool pm_get_wakeup_count(unsigned int *count);
 extern bool pm_save_wakeup_count(unsigned int count);
 #else /* !CONFIG_PM_SLEEP */
@@ -309,7 +292,7 @@
 
 #define pm_notifier(fn, pri)	do { (void)(fn); } while (0)
 
-static inline bool pm_check_wakeup_events(void) { return true; }
+static inline bool pm_wakeup_pending(void) { return false; }
 #endif /* !CONFIG_PM_SLEEP */
 
 extern struct mutex pm_mutex;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index eba53e7..4d55932 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -208,6 +208,8 @@
 /* linux/mm/swap.c */
 extern void __lru_cache_add(struct page *, enum lru_list lru);
 extern void lru_cache_add_lru(struct page *, enum lru_list lru);
+extern void lru_add_page_tail(struct zone* zone,
+			      struct page *page, struct page *page_tail);
 extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 1de8b9eb..8651556 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -77,7 +77,7 @@
 	char type[THERMAL_NAME_LENGTH];
 	struct device device;
 	void *devdata;
-	struct thermal_cooling_device_ops *ops;
+	const struct thermal_cooling_device_ops *ops;
 	struct list_head node;
 };
 
@@ -114,7 +114,7 @@
 	int last_temperature;
 	bool passive;
 	unsigned int forced_passive;
-	struct thermal_zone_device_ops *ops;
+	const struct thermal_zone_device_ops *ops;
 	struct list_head cooling_devices;
 	struct idr idr;
 	struct mutex lock;	/* protect cooling devices list */
@@ -127,13 +127,41 @@
 	struct thermal_hwmon_attr temp_crit;	/* hwmon sys attr */
 #endif
 };
+/* Adding event notification support elements */
+#define THERMAL_GENL_FAMILY_NAME                "thermal_event"
+#define THERMAL_GENL_VERSION                    0x01
+#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_group"
+
+enum events {
+	THERMAL_AUX0,
+	THERMAL_AUX1,
+	THERMAL_CRITICAL,
+	THERMAL_DEV_FAULT,
+};
+
+struct thermal_genl_event {
+	u32 orig;
+	enum events event;
+};
+/* attributes of thermal_genl_family */
+enum {
+	THERMAL_GENL_ATTR_UNSPEC,
+	THERMAL_GENL_ATTR_EVENT,
+	__THERMAL_GENL_ATTR_MAX,
+};
+#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
+
+/* commands supported by the thermal_genl_family */
+enum {
+	THERMAL_GENL_CMD_UNSPEC,
+	THERMAL_GENL_CMD_EVENT,
+	__THERMAL_GENL_CMD_MAX,
+};
+#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
 
 struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
-							 struct
-							 thermal_zone_device_ops
-							 *, int tc1, int tc2,
-							 int passive_freq,
-							 int polling_freq);
+		const struct thermal_zone_device_ops *, int tc1, int tc2,
+		int passive_freq, int polling_freq);
 void thermal_zone_device_unregister(struct thermal_zone_device *);
 
 int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
@@ -142,9 +170,8 @@
 				       struct thermal_cooling_device *);
 void thermal_zone_device_update(struct thermal_zone_device *);
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
-							       struct
-							       thermal_cooling_device_ops
-							       *);
+		const struct thermal_cooling_device_ops *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+extern int generate_netlink_event(u32 orig, enum events event);
 
 #endif /* __THERMAL_H__ */
diff --git a/include/linux/time.h b/include/linux/time.h
index 9f15ac7..1e6d3b5 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -158,6 +158,8 @@
 extern int do_getitimer(int which, struct itimerval *value);
 extern void getnstimeofday(struct timespec *tv);
 extern void getrawmonotonic(struct timespec *ts);
+extern void getnstime_raw_and_real(struct timespec *ts_raw,
+		struct timespec *ts_real);
 extern void getboottime(struct timespec *ts);
 extern void monotonic_to_bootbased(struct timespec *ts);
 
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 32d852f..d23999f 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -268,6 +268,7 @@
 extern void second_overflow(void);
 extern void update_ntp_one_tick(void);
 extern int do_adjtimex(struct timex *);
+extern void hardpps(const struct timespec *, const struct timespec *);
 
 int read_current_timer(unsigned long *timer_val);
 
diff --git a/include/linux/toshiba.h b/include/linux/toshiba.h
index 6a7c4ed..772dedb 100644
--- a/include/linux/toshiba.h
+++ b/include/linux/toshiba.h
@@ -33,6 +33,8 @@
 	unsigned int edi __attribute__ ((packed));
 } SMMRegisters;
 
+#ifdef __KERNEL__
 int tosh_smm(SMMRegisters *regs);
+#endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index ac5d1c1..fdc718a 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -31,6 +31,7 @@
 
 extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
 extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
+extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
 #else
 static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
 	return -ENODEV;
@@ -38,5 +39,8 @@
 static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
 	return -ENODEV;
 }
+static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
+	return -ENODEV;
+}
 #endif
 #endif
diff --git a/include/linux/tpm_command.h b/include/linux/tpm_command.h
new file mode 100644
index 0000000..727512e
--- /dev/null
+++ b/include/linux/tpm_command.h
@@ -0,0 +1,28 @@
+#ifndef __LINUX_TPM_COMMAND_H__
+#define __LINUX_TPM_COMMAND_H__
+
+/*
+ * TPM Command constants from specifications at
+ * http://www.trustedcomputinggroup.org
+ */
+
+/* Command TAGS */
+#define TPM_TAG_RQU_COMMAND             193
+#define TPM_TAG_RQU_AUTH1_COMMAND       194
+#define TPM_TAG_RQU_AUTH2_COMMAND       195
+#define TPM_TAG_RSP_COMMAND             196
+#define TPM_TAG_RSP_AUTH1_COMMAND       197
+#define TPM_TAG_RSP_AUTH2_COMMAND       198
+
+/* Command Ordinals */
+#define TPM_ORD_GETRANDOM               70
+#define TPM_ORD_OSAP                    11
+#define TPM_ORD_OIAP                    10
+#define TPM_ORD_SEAL                    23
+#define TPM_ORD_UNSEAL                  24
+
+/* Other constants */
+#define SRKHANDLE                       0x40000000
+#define TPM_NONCE_SIZE                  20
+
+#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d3e4f87..c681461 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -32,7 +32,7 @@
 	int state;			/* State. */
 	void (*regfunc)(void);
 	void (*unregfunc)(void);
-	struct tracepoint_func *funcs;
+	struct tracepoint_func __rcu *funcs;
 } __attribute__((aligned(32)));		/*
 					 * Aligned on 32 bytes because it is
 					 * globally visible and gcc happily
@@ -326,7 +326,7 @@
  *		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  *		__entry->next_pid	= next->pid;
  *		__entry->next_prio	= next->prio;
- *	)
+ *	),
  *
  *	*
  *	* Formatted output of a trace record via TP_printk().
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index db2d227..c3d43eb 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -102,7 +102,7 @@
  * 	    unsigned int cmd, unsigned long arg);
  *
  * 	This routine allows the tty driver to implement
- *	device-specific ioctl's.  If the ioctl number passed in cmd
+ *	device-specific ioctls.  If the ioctl number passed in cmd
  * 	is not recognized by the driver, it should return ENOIOCTLCMD.
  *
  *	Optional
@@ -167,12 +167,12 @@
  * 
  * void (*hangup)(struct tty_struct *tty);
  *
- * 	This routine notifies the tty driver that it should hangup the
+ * 	This routine notifies the tty driver that it should hang up the
  * 	tty device.
  *
  *	Optional:
  *
- * int (*break_ctl)(struct tty_stuct *tty, int state);
+ * int (*break_ctl)(struct tty_struct *tty, int state);
  *
  * 	This optional routine requests the tty driver to turn on or
  * 	off BREAK status on the RS-232 port.  If state is -1,
@@ -235,6 +235,7 @@
 #include <linux/fs.h>
 #include <linux/list.h>
 #include <linux/cdev.h>
+#include <linux/termios.h>
 
 struct tty_struct;
 struct tty_driver;
@@ -357,7 +358,7 @@
  * 	overruns, either.)
  *
  * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need
- *	to be registered with a call to tty_register_driver() when the
+ *	to be registered with a call to tty_register_device() when the
  *	device is found in the system and unregistered with a call to
  *	tty_unregister_device() so the devices will be show up
  *	properly in sysfs.  If not set, driver->num entries will be
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 526d66f..ff7dc08 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -101,14 +101,15 @@
  *	any pending driver I/O is completed.
  *
  * void (*dcd_change)(struct tty_struct *tty, unsigned int status,
- * 			struct timespec *ts)
+ *			struct pps_event_time *ts)
  *
  *	Tells the discipline that the DCD pin has changed its status and
- *	the relative timestamp. Pointer ts can be NULL.
+ *	the relative timestamp. Pointer ts cannot be NULL.
  */
 
 #include <linux/fs.h>
 #include <linux/wait.h>
+#include <linux/pps_kernel.h>
 
 struct tty_ldisc_ops {
 	int	magic;
@@ -143,7 +144,7 @@
 			       char *fp, int count);
 	void	(*write_wakeup)(struct tty_struct *);
 	void	(*dcd_change)(struct tty_struct *, unsigned int,
-				struct timespec *);
+				struct pps_event_time *);
 
 	struct  module *owner;
 	
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index fa261a0..8da8c4e 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -67,21 +67,21 @@
 #endif
 };
 
-static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	write_seqcount_begin(&syncp->seq);
 #endif
 }
 
-static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	write_seqcount_end(&syncp->seq);
 #endif
 }
 
-static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	return read_seqcount_begin(&syncp->seq);
@@ -93,7 +93,7 @@
 #endif
 }
 
-static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 					 unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
@@ -112,7 +112,7 @@
  * - UP 32bit must disable BH.
  * - 64bit have no problem atomically reading u64 values, irq safe.
  */
-static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
+static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	return read_seqcount_begin(&syncp->seq);
@@ -124,7 +124,7 @@
 #endif
 }
 
-static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
+static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
 					 unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 05f7fed2..d28c726 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -104,6 +104,7 @@
 #define UI_SET_FFBIT		_IOW(UINPUT_IOCTL_BASE, 107, int)
 #define UI_SET_PHYS		_IOW(UINPUT_IOCTL_BASE, 108, char*)
 #define UI_SET_SWBIT		_IOW(UINPUT_IOCTL_BASE, 109, int)
+#define UI_SET_PROPBIT		_IOW(UINPUT_IOCTL_BASE, 110, int)
 
 #define UI_BEGIN_FF_UPLOAD	_IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload)
 #define UI_END_FF_UPLOAD	_IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload)
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index c9a6abd..c0d817d 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -3,9 +3,9 @@
 
 #include <linux/kernel.h>
 
-struct __una_u16 { u16 x; } __attribute__((packed));
-struct __una_u32 { u32 x; } __attribute__((packed));
-struct __una_u64 { u64 x; } __attribute__((packed));
+struct __una_u16 { u16 x; } __packed;
+struct __una_u32 { u32 x; } __packed;
+struct __una_u64 { u64 x; } __packed;
 
 static inline u16 __get_unaligned_cpu16(const void *p)
 {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a28eb25..bd69b65 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -20,6 +20,7 @@
 #include <linux/completion.h>	/* for struct completion */
 #include <linux/sched.h>	/* for current && schedule_timeout */
 #include <linux/mutex.h>	/* for struct mutex */
+#include <linux/pm_runtime.h>	/* for runtime PM */
 
 struct usb_device;
 struct usb_driver;
@@ -411,8 +412,6 @@
  * @quirks: quirks of the whole device
  * @urbnum: number of URBs submitted for the whole device
  * @active_duration: total time device is not suspended
- * @last_busy: time of last use
- * @autosuspend_delay: in jiffies
  * @connect_time: time device was first connected
  * @do_remote_wakeup:  remote wakeup should be enabled
  * @reset_resume: needs reset instead of resume
@@ -485,8 +484,6 @@
 	unsigned long active_duration;
 
 #ifdef CONFIG_PM
-	unsigned long last_busy;
-	int autosuspend_delay;
 	unsigned long connect_time;
 
 	unsigned do_remote_wakeup:1;
@@ -531,7 +528,7 @@
 
 static inline void usb_mark_last_busy(struct usb_device *udev)
 {
-	udev->last_busy = jiffies;
+	pm_runtime_mark_last_busy(&udev->dev);
 }
 
 #else
diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
index 119194c..10ec069 100644
--- a/include/linux/usb/ch11.h
+++ b/include/linux/usb/ch11.h
@@ -28,6 +28,13 @@
 #define HUB_STOP_TT		11
 
 /*
+ * Hub class additional requests defined by USB 3.0 spec
+ * See USB 3.0 spec Table 10-6
+ */
+#define HUB_SET_DEPTH		12
+#define HUB_GET_PORT_ERR_COUNT	13
+
+/*
  * Hub Class feature numbers
  * See USB 2.0 spec Table 11-17
  */
@@ -56,6 +63,20 @@
 #define USB_PORT_FEAT_C_PORT_L1         23
 
 /*
+ * Port feature selectors added by USB 3.0 spec.
+ * See USB 3.0 spec Table 10-7
+ */
+#define USB_PORT_FEAT_LINK_STATE		5
+#define USB_PORT_FEAT_U1_TIMEOUT		23
+#define USB_PORT_FEAT_U2_TIMEOUT		24
+#define USB_PORT_FEAT_C_LINK_STATE		25
+#define USB_PORT_FEAT_C_CONFIG_ERR		26
+#define USB_PORT_FEAT_REMOTE_WAKE_MASK		27
+#define USB_PORT_FEAT_BH_PORT_RESET		28
+#define USB_PORT_FEAT_C_BH_PORT_RESET		29
+#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT	30
+
+/*
  * Hub Status and Hub Change results
  * See USB 2.0 spec Table 11-19 and Table 11-20
  */
@@ -84,6 +105,32 @@
 #define USB_PORT_STAT_SUPER_SPEED	0x8000	/* Linux-internal */
 
 /*
+ * Additions to wPortStatus bit field from USB 3.0
+ * See USB 3.0 spec Table 10-10
+ */
+#define USB_PORT_STAT_LINK_STATE	0x01e0
+#define USB_SS_PORT_STAT_POWER		0x0200
+#define USB_PORT_STAT_SPEED_5GBPS	0x0000
+/* Valid only if port is enabled */
+
+/*
+ * Definitions for PORT_LINK_STATE values
+ * (bits 5-8) in wPortStatus
+ */
+#define USB_SS_PORT_LS_U0		0x0000
+#define USB_SS_PORT_LS_U1		0x0020
+#define USB_SS_PORT_LS_U2		0x0040
+#define USB_SS_PORT_LS_U3		0x0060
+#define USB_SS_PORT_LS_SS_DISABLED	0x0080
+#define USB_SS_PORT_LS_RX_DETECT	0x00a0
+#define USB_SS_PORT_LS_SS_INACTIVE	0x00c0
+#define USB_SS_PORT_LS_POLLING		0x00e0
+#define USB_SS_PORT_LS_RECOVERY		0x0100
+#define USB_SS_PORT_LS_HOT_RESET	0x0120
+#define USB_SS_PORT_LS_COMP_MOD		0x0140
+#define USB_SS_PORT_LS_LOOPBACK		0x0160
+
+/*
  * wPortChange bit field
  * See USB 2.0 spec Table 11-22
  * Bits 0 to 4 shown, bits 5 to 15 are reserved
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index f917bbb..ab46194 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -124,6 +124,16 @@
 #define USB_DEVICE_DEBUG_MODE		6	/* (special devices only) */
 
 /*
+ * Test Mode Selectors
+ * See USB 2.0 spec Table 9-7
+ */
+#define	TEST_J		1
+#define	TEST_K		2
+#define	TEST_SE0_NAK	3
+#define	TEST_PACKET	4
+#define	TEST_FORCE_EN	5
+
+/*
  * New Feature Selectors as added by USB 3.0
  * See USB 3.0 spec Table 9-6
  */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0b6e751..dd6ee49 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -471,6 +471,10 @@
 
 /*-------------------------------------------------------------------------*/
 
+/* class requests from USB 3.0 hub spec, table 10-5 */
+#define SetHubDepth		(0x3000 | HUB_SET_DEPTH)
+#define GetPortErrorCount	(0x8000 | HUB_GET_PORT_ERR_COUNT)
+
 /*
  * Generic bandwidth allocation constants/support
  */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
new file mode 100644
index 0000000..3675e03
--- /dev/null
+++ b/include/linux/usb/msm_hsusb.h
@@ -0,0 +1,112 @@
+/* linux/include/asm-arm/arch-msm/hsusb.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_HSUSB_H
+#define __ASM_ARCH_MSM_HSUSB_H
+
+#include <linux/types.h>
+#include <linux/usb/otg.h>
+
+/**
+ * Supported USB modes
+ *
+ * USB_PERIPHERAL       Only peripheral mode is supported.
+ * USB_HOST             Only host mode is supported.
+ * USB_OTG              OTG mode is supported.
+ *
+ */
+enum usb_mode_type {
+	USB_NONE = 0,
+	USB_PERIPHERAL,
+	USB_HOST,
+	USB_OTG,
+};
+
+/**
+ * OTG control
+ *
+ * OTG_NO_CONTROL	Id/VBUS notifications not required. Useful in host
+ *                      only configuration.
+ * OTG_PHY_CONTROL	Id/VBUS notifications comes form USB PHY.
+ * OTG_PMIC_CONTROL	Id/VBUS notifications comes from PMIC hardware.
+ * OTG_USER_CONTROL	Id/VBUS notifcations comes from User via sysfs.
+ *
+ */
+enum otg_control_type {
+	OTG_NO_CONTROL = 0,
+	OTG_PHY_CONTROL,
+	OTG_PMIC_CONTROL,
+	OTG_USER_CONTROL,
+};
+
+/**
+ * struct msm_otg_platform_data - platform device data
+ *              for msm72k_otg driver.
+ * @phy_init_seq: PHY configuration sequence. val, reg pairs
+ *              terminated by -1.
+ * @vbus_power: VBUS power on/off routine.
+ * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
+ * @mode: Supported mode (OTG/peripheral/host).
+ * @otg_control: OTG switch controlled by user/Id pin
+ * @default_mode: Default operational mode. Applicable only if
+ *              OTG switch is controller by user.
+ *
+ */
+struct msm_otg_platform_data {
+	int *phy_init_seq;
+	void (*vbus_power)(bool on);
+	unsigned power_budget;
+	enum usb_mode_type mode;
+	enum otg_control_type otg_control;
+	enum usb_mode_type default_mode;
+	void (*setup_gpio)(enum usb_otg_state state);
+};
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @clk: clock struct of usb_hs_clk.
+ * @pclk: clock struct of usb_hs_pclk.
+ * @phy_reset_clk: clock struct of usb_phy_clk.
+ * @core_clk: clock struct of usb_hs_core_clk.
+ * @regs: ioremapped register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: Async interrupt arrived.
+ *
+ */
+struct msm_otg {
+	struct otg_transceiver otg;
+	struct msm_otg_platform_data *pdata;
+	int irq;
+	struct clk *clk;
+	struct clk *pclk;
+	struct clk *phy_reset_clk;
+	struct clk *core_clk;
+	void __iomem *regs;
+#define ID		0
+#define B_SESS_VLD	1
+	unsigned long inputs;
+	struct work_struct sm_work;
+	atomic_t in_lpm;
+	int async_int;
+};
+
+#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
new file mode 100644
index 0000000..b92e173
--- /dev/null
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
+#define __LINUX_USB_GADGET_MSM72K_UDC_H__
+
+#ifdef CONFIG_ARCH_MSM7X00A
+#define USB_SBUSCFG          (MSM_USB_BASE + 0x0090)
+#else
+#define USB_AHBBURST         (MSM_USB_BASE + 0x0090)
+#define USB_AHBMODE          (MSM_USB_BASE + 0x0098)
+#endif
+#define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
+
+#define USB_USBCMD           (MSM_USB_BASE + 0x0140)
+#define USB_PORTSC           (MSM_USB_BASE + 0x0184)
+#define USB_OTGSC            (MSM_USB_BASE + 0x01A4)
+#define USB_USBMODE          (MSM_USB_BASE + 0x01A8)
+
+#define USBCMD_RESET   2
+#define USB_USBINTR          (MSM_USB_BASE + 0x0148)
+
+#define PORTSC_PHCD            (1 << 23) /* phy suspend mode */
+#define PORTSC_PTS_MASK         (3 << 30)
+#define PORTSC_PTS_ULPI         (3 << 30)
+
+#define USB_ULPI_VIEWPORT    (MSM_USB_BASE + 0x0170)
+#define ULPI_RUN              (1 << 30)
+#define ULPI_WRITE            (1 << 29)
+#define ULPI_READ             (0 << 29)
+#define ULPI_ADDR(n)          (((n) & 255) << 16)
+#define ULPI_DATA(n)          ((n) & 255)
+#define ULPI_DATA_READ(n)     (((n) >> 8) & 255)
+
+#define ASYNC_INTR_CTRL         (1 << 29) /* Enable async interrupt */
+#define ULPI_STP_CTRL           (1 << 30) /* Block communication with PHY */
+
+/* OTG definitions */
+#define OTGSC_INTSTS_MASK	(0x7f << 16)
+#define OTGSC_ID		(1 << 8)
+#define OTGSC_BSV		(1 << 11)
+#define OTGSC_IDIS		(1 << 16)
+#define OTGSC_BSVIS		(1 << 19)
+#define OTGSC_IDIE		(1 << 24)
+#define OTGSC_BSVIE		(1 << 27)
+
+#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 2387f9f..eb50525 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -3,7 +3,7 @@
  * Inventra (Multidrop) Highspeed Dual-Role Controllers:  (M)HDRC.
  *
  * Board initialization should put one of these into dev->platform_data,
- * probably on some platform_device named "musb_hdrc".  It encapsulates
+ * probably on some platform_device named "musb-hdrc".  It encapsulates
  * key configuration differences between boards.
  */
 
@@ -120,14 +120,14 @@
 	/* Power the device on or off */
 	int		(*set_power)(int state);
 
-	/* Turn device clock on or off */
-	int		(*set_clock)(struct clk *clock, int is_on);
-
 	/* MUSB configuration-specific details */
 	struct musb_hdrc_config	*config;
 
 	/* Architecture specific board data	*/
 	void		*board_data;
+
+	/* Platform specific struct musb_ops pointer */
+	const void	*platform_ops;
 };
 
 
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 0a5b371..a1a1e7a 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -116,7 +116,7 @@
 /* for board-specific init logic */
 extern int otg_set_transceiver(struct otg_transceiver *);
 
-#if defined(CONFIG_NOP_USB_XCEIV) || defined(CONFIG_NOP_USB_XCEIV_MODULE)
+#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
 /* sometimes transceivers are accessed only through e.g. ULPI */
 extern void usb_nop_xceiv_register(void);
 extern void usb_nop_xceiv_unregister(void);
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 8178156..faf4679 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -6,7 +6,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 
-#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 8)
+#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
 
 struct user_namespace {
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index ae9ab13..4b9a7f5 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -33,6 +33,7 @@
 void vga_switcheroo_unregister_client(struct pci_dev *dev);
 int vga_switcheroo_register_client(struct pci_dev *dev,
 				   void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+				   void (*reprobe)(struct pci_dev *dev),
 				   bool (*can_switch)(struct pci_dev *dev));
 
 void vga_switcheroo_client_fb_set(struct pci_dev *dev,
@@ -48,6 +49,7 @@
 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
 static inline int vga_switcheroo_register_client(struct pci_dev *dev,
 					  void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+					  void (*reprobe)(struct pci_dev *dev),
 					  bool (*can_switch)(struct pci_dev *dev)) { return 0; }
 static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
 static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 44b54f6..4ed6fcd 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -59,8 +59,9 @@
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
-extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
-				pgprot_t prot);
+extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+			unsigned long start, unsigned long end, gfp_t gfp_mask,
+			pgprot_t prot, int node, void *caller);
 extern void vfree(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
@@ -90,9 +91,6 @@
 					unsigned long flags,
 					unsigned long start, unsigned long end,
 					void *caller);
-extern struct vm_struct *get_vm_area_node(unsigned long size,
-					  unsigned long flags, int node,
-					  gfp_t gfp_mask);
 extern struct vm_struct *remove_vm_area(const void *addr);
 
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
@@ -120,7 +118,7 @@
 #ifdef CONFIG_SMP
 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 				     const size_t *sizes, int nr_vms,
-				     size_t align, gfp_t gfp_mask);
+				     size_t align);
 
 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
 #endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index eaaea37..833e676 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -254,6 +254,11 @@
 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
 void refresh_cpu_vm_stats(int);
+
+int calculate_pressure_threshold(struct zone *zone);
+int calculate_normal_threshold(struct zone *zone);
+void set_pgdat_percpu_threshold(pg_data_t *pgdat,
+				int (*calculate_pressure)(struct zone *));
 #else /* CONFIG_SMP */
 
 /*
@@ -298,6 +303,8 @@
 #define dec_zone_page_state __dec_zone_page_state
 #define mod_zone_page_state __mod_zone_page_state
 
+#define set_pgdat_percpu_threshold(pgdat, callback) { }
+
 static inline void refresh_cpu_vm_stats(int cpu) { }
 #endif
 
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index bd257fe..1ac1158 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -409,7 +409,7 @@
 }
 
 /* Obsolete. use cancel_delayed_work_sync() */
-static inline
+static inline __deprecated
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
 					struct delayed_work *work)
 {
@@ -417,7 +417,7 @@
 }
 
 /* Obsolete. use cancel_delayed_work_sync() */
-static inline
+static inline __deprecated
 void cancel_rearming_delayed_work(struct delayed_work *work)
 {
 	cancel_delayed_work_sync(work);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index f1e5bde..e6131ef 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -40,9 +40,13 @@
 #define XATTR_SMACK_SUFFIX "SMACK64"
 #define XATTR_SMACK_IPIN "SMACK64IPIN"
 #define XATTR_SMACK_IPOUT "SMACK64IPOUT"
+#define XATTR_SMACK_EXEC "SMACK64EXEC"
+#define XATTR_SMACK_TRANSMUTE "SMACK64TRANSMUTE"
 #define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX
 #define XATTR_NAME_SMACKIPIN	XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN
 #define XATTR_NAME_SMACKIPOUT	XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT
+#define XATTR_NAME_SMACKEXEC	XATTR_SECURITY_PREFIX XATTR_SMACK_EXEC
+#define XATTR_NAME_SMACKTRANSMUTE XATTR_SECURITY_PREFIX XATTR_SMACK_TRANSMUTE
 
 #define XATTR_CAPS_SUFFIX "capability"
 #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
diff --git a/include/linux/xz.h b/include/linux/xz.h
new file mode 100644
index 0000000..64cffa6
--- /dev/null
+++ b/include/linux/xz.h
@@ -0,0 +1,264 @@
+/*
+ * XZ decompressor
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_H
+#define XZ_H
+
+#ifdef __KERNEL__
+#	include <linux/stddef.h>
+#	include <linux/types.h>
+#else
+#	include <stddef.h>
+#	include <stdint.h>
+#endif
+
+/* In Linux, this is used to make extern functions static when needed. */
+#ifndef XZ_EXTERN
+#	define XZ_EXTERN extern
+#endif
+
+/**
+ * enum xz_mode - Operation mode
+ *
+ * @XZ_SINGLE:              Single-call mode. This uses less RAM than
+ *                          than multi-call modes, because the LZMA2
+ *                          dictionary doesn't need to be allocated as
+ *                          part of the decoder state. All required data
+ *                          structures are allocated at initialization,
+ *                          so xz_dec_run() cannot return XZ_MEM_ERROR.
+ * @XZ_PREALLOC:            Multi-call mode with preallocated LZMA2
+ *                          dictionary buffer. All data structures are
+ *                          allocated at initialization, so xz_dec_run()
+ *                          cannot return XZ_MEM_ERROR.
+ * @XZ_DYNALLOC:            Multi-call mode. The LZMA2 dictionary is
+ *                          allocated once the required size has been
+ *                          parsed from the stream headers. If the
+ *                          allocation fails, xz_dec_run() will return
+ *                          XZ_MEM_ERROR.
+ *
+ * It is possible to enable support only for a subset of the above
+ * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC,
+ * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled
+ * with support for all operation modes, but the preboot code may
+ * be built with fewer features to minimize code size.
+ */
+enum xz_mode {
+	XZ_SINGLE,
+	XZ_PREALLOC,
+	XZ_DYNALLOC
+};
+
+/**
+ * enum xz_ret - Return codes
+ * @XZ_OK:                  Everything is OK so far. More input or more
+ *                          output space is required to continue. This
+ *                          return code is possible only in multi-call mode
+ *                          (XZ_PREALLOC or XZ_DYNALLOC).
+ * @XZ_STREAM_END:          Operation finished successfully.
+ * @XZ_UNSUPPORTED_CHECK:   Integrity check type is not supported. Decoding
+ *                          is still possible in multi-call mode by simply
+ *                          calling xz_dec_run() again.
+ *                          Note that this return value is used only if
+ *                          XZ_DEC_ANY_CHECK was defined at build time,
+ *                          which is not used in the kernel. Unsupported
+ *                          check types return XZ_OPTIONS_ERROR if
+ *                          XZ_DEC_ANY_CHECK was not defined at build time.
+ * @XZ_MEM_ERROR:           Allocating memory failed. This return code is
+ *                          possible only if the decoder was initialized
+ *                          with XZ_DYNALLOC. The amount of memory that was
+ *                          tried to be allocated was no more than the
+ *                          dict_max argument given to xz_dec_init().
+ * @XZ_MEMLIMIT_ERROR:      A bigger LZMA2 dictionary would be needed than
+ *                          allowed by the dict_max argument given to
+ *                          xz_dec_init(). This return value is possible
+ *                          only in multi-call mode (XZ_PREALLOC or
+ *                          XZ_DYNALLOC); the single-call mode (XZ_SINGLE)
+ *                          ignores the dict_max argument.
+ * @XZ_FORMAT_ERROR:        File format was not recognized (wrong magic
+ *                          bytes).
+ * @XZ_OPTIONS_ERROR:       This implementation doesn't support the requested
+ *                          compression options. In the decoder this means
+ *                          that the header CRC32 matches, but the header
+ *                          itself specifies something that we don't support.
+ * @XZ_DATA_ERROR:          Compressed data is corrupt.
+ * @XZ_BUF_ERROR:           Cannot make any progress. Details are slightly
+ *                          different between multi-call and single-call
+ *                          mode; more information below.
+ *
+ * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls
+ * to XZ code cannot consume any input and cannot produce any new output.
+ * This happens when there is no new input available, or the output buffer
+ * is full while at least one output byte is still pending. Assuming your
+ * code is not buggy, you can get this error only when decoding a compressed
+ * stream that is truncated or otherwise corrupt.
+ *
+ * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer
+ * is too small or the compressed input is corrupt in a way that makes the
+ * decoder produce more output than the caller expected. When it is
+ * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR
+ * is used instead of XZ_BUF_ERROR.
+ */
+enum xz_ret {
+	XZ_OK,
+	XZ_STREAM_END,
+	XZ_UNSUPPORTED_CHECK,
+	XZ_MEM_ERROR,
+	XZ_MEMLIMIT_ERROR,
+	XZ_FORMAT_ERROR,
+	XZ_OPTIONS_ERROR,
+	XZ_DATA_ERROR,
+	XZ_BUF_ERROR
+};
+
+/**
+ * struct xz_buf - Passing input and output buffers to XZ code
+ * @in:         Beginning of the input buffer. This may be NULL if and only
+ *              if in_pos is equal to in_size.
+ * @in_pos:     Current position in the input buffer. This must not exceed
+ *              in_size.
+ * @in_size:    Size of the input buffer
+ * @out:        Beginning of the output buffer. This may be NULL if and only
+ *              if out_pos is equal to out_size.
+ * @out_pos:    Current position in the output buffer. This must not exceed
+ *              out_size.
+ * @out_size:   Size of the output buffer
+ *
+ * Only the contents of the output buffer from out[out_pos] onward, and
+ * the variables in_pos and out_pos are modified by the XZ code.
+ */
+struct xz_buf {
+	const uint8_t *in;
+	size_t in_pos;
+	size_t in_size;
+
+	uint8_t *out;
+	size_t out_pos;
+	size_t out_size;
+};
+
+/**
+ * struct xz_dec - Opaque type to hold the XZ decoder state
+ */
+struct xz_dec;
+
+/**
+ * xz_dec_init() - Allocate and initialize a XZ decoder state
+ * @mode:       Operation mode
+ * @dict_max:   Maximum size of the LZMA2 dictionary (history buffer) for
+ *              multi-call decoding. This is ignored in single-call mode
+ *              (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes
+ *              or 2^n + 2^(n-1) bytes (the latter sizes are less common
+ *              in practice), so other values for dict_max don't make sense.
+ *              In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB,
+ *              512 KiB, and 1 MiB are probably the only reasonable values,
+ *              except for kernel and initramfs images where a bigger
+ *              dictionary can be fine and useful.
+ *
+ * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at
+ * once. The caller must provide enough output space or the decoding will
+ * fail. The output space is used as the dictionary buffer, which is why
+ * there is no need to allocate the dictionary as part of the decoder's
+ * internal state.
+ *
+ * Because the output buffer is used as the workspace, streams encoded using
+ * a big dictionary are not a problem in single-call mode. It is enough that
+ * the output buffer is big enough to hold the actual uncompressed data; it
+ * can be smaller than the dictionary size stored in the stream headers.
+ *
+ * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes
+ * of memory is preallocated for the LZMA2 dictionary. This way there is no
+ * risk that xz_dec_run() could run out of memory, since xz_dec_run() will
+ * never allocate any memory. Instead, if the preallocated dictionary is too
+ * small for decoding the given input stream, xz_dec_run() will return
+ * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be
+ * decoded to avoid allocating excessive amount of memory for the dictionary.
+ *
+ * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC):
+ * dict_max specifies the maximum allowed dictionary size that xz_dec_run()
+ * may allocate once it has parsed the dictionary size from the stream
+ * headers. This way excessive allocations can be avoided while still
+ * limiting the maximum memory usage to a sane value to prevent running the
+ * system out of memory when decompressing streams from untrusted sources.
+ *
+ * On success, xz_dec_init() returns a pointer to struct xz_dec, which is
+ * ready to be used with xz_dec_run(). If memory allocation fails,
+ * xz_dec_init() returns NULL.
+ */
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+
+/**
+ * xz_dec_run() - Run the XZ decoder
+ * @s:          Decoder state allocated using xz_dec_init()
+ * @b:          Input and output buffers
+ *
+ * The possible return values depend on build options and operation mode.
+ * See enum xz_ret for details.
+ *
+ * Note that if an error occurs in single-call mode (return value is not
+ * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the
+ * contents of the output buffer from b->out[b->out_pos] onward are
+ * undefined. This is true even after XZ_BUF_ERROR, because with some filter
+ * chains, there may be a second pass over the output buffer, and this pass
+ * cannot be properly done if the output buffer is truncated. Thus, you
+ * cannot give the single-call decoder a too small buffer and then expect to
+ * get that amount valid data from the beginning of the stream. You must use
+ * the multi-call decoder if you don't want to uncompress the whole stream.
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+
+/**
+ * xz_dec_reset() - Reset an already allocated decoder state
+ * @s:          Decoder state allocated using xz_dec_init()
+ *
+ * This function can be used to reset the multi-call decoder state without
+ * freeing and reallocating memory with xz_dec_end() and xz_dec_init().
+ *
+ * In single-call mode, xz_dec_reset() is always called in the beginning of
+ * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
+ * multi-call mode.
+ */
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+
+/**
+ * xz_dec_end() - Free the memory allocated for the decoder state
+ * @s:          Decoder state allocated using xz_dec_init(). If s is NULL,
+ *              this function does nothing.
+ */
+XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+
+/*
+ * Standalone build (userspace build or in-kernel build for boot time use)
+ * needs a CRC32 implementation. For normal in-kernel use, kernel's own
+ * CRC32 module is used instead, and users of this module don't need to
+ * care about the functions below.
+ */
+#ifndef XZ_INTERNAL_CRC32
+#	ifdef __KERNEL__
+#		define XZ_INTERNAL_CRC32 0
+#	else
+#		define XZ_INTERNAL_CRC32 1
+#	endif
+#endif
+
+#if XZ_INTERNAL_CRC32
+/*
+ * This must be called before any other xz_* function to initialize
+ * the CRC32 lookup table.
+ */
+XZ_EXTERN void xz_crc32_init(void);
+
+/*
+ * Update CRC32 value using the polynomial from IEEE-802.3. To start a new
+ * calculation, the third argument must be zero. To continue the calculation,
+ * the previously returned value is passed as the third argument.
+ */
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+#endif
+#endif
diff --git a/include/media/davinci/vpss.h b/include/media/davinci/vpss.h
index c59cc02..b586495 100644
--- a/include/media/davinci/vpss.h
+++ b/include/media/davinci/vpss.h
@@ -44,7 +44,7 @@
 	short pplen;
 };
 
-/* Used for enable/diable VPSS Clock */
+/* Used for enable/disable VPSS Clock */
 enum vpss_clock_sel {
 	/* DM355/DM365 */
 	VPSS_CCDC_CLOCK,
diff --git a/include/net/ah.h b/include/net/ah.h
index f0129f7..ca95b98 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -4,7 +4,7 @@
 #include <linux/skbuff.h>
 
 /* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN	12
+#define MAX_AH_AUTH_LEN	64
 
 struct crypto_ahash;
 
diff --git a/include/net/arp.h b/include/net/arp.h
index f4cf6ce..91f0568 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -25,5 +25,6 @@
 				  const unsigned char *src_hw,
 				  const unsigned char *target_hw);
 extern void arp_xmit(struct sk_buff *skb);
+int arp_invalidate(struct net_device *dev, __be32 ip);
 
 #endif	/* _ARP_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bcc9f44..1322695 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1103,6 +1103,8 @@
  * @change_mpath: change a given mesh path
  * @get_mpath: get a mesh path for the given parameters
  * @dump_mpath: dump mesh path callback -- resume dump at index @idx
+ * @join_mesh: join the mesh network with the specified parameters
+ * @leave_mesh: leave the current mesh network
  *
  * @get_mesh_config: Get the current mesh configuration
  *
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5b3fd5a..62c0ce2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -337,6 +337,10 @@
  * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame
  * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this
  *	frame and selects the maximum number of streams that it can use.
+ * @IEEE80211_TX_CTL_TX_OFFCHAN: Marks this packet to be transmitted on
+ *	the off-channel channel when a remain-on-channel offload is done
+ *	in hardware -- normal packets still flow and are expected to be
+ *	handled properly by the device.
  *
  * Note: If you have to add new flags to the enumeration, then don't
  *	 forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -1753,6 +1757,16 @@
  *	(also see nl80211.h @NL80211_ATTR_WIPHY_ANTENNA_TX).
  *
  * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
+ *
+ * @remain_on_channel: Starts an off-channel period on the given channel, must
+ *	call back to ieee80211_ready_on_channel() when on that channel. Note
+ *	that normal channel traffic is not stopped as this is intended for hw
+ *	offload. Frames to transmit on the off-channel channel are transmitted
+ *	normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the
+ *	duration (which will always be non-zero) expires, the driver must call
+ *	ieee80211_remain_on_channel_expired(). This callback may sleep.
+ * @cancel_remain_on_channel: Requests that an ongoing off-channel period is
+ *	aborted before it expires. This callback may sleep.
  */
 struct ieee80211_ops {
 	int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 1ee717e..a4c9936 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -7,16 +7,6 @@
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
 
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-			       struct net_device *in,
-			       struct net_device *out,
-			       int (*okfn)(struct sk_buff *));
-
-struct inet_frags_ctl;
-
 #include <linux/sysctl.h>
 extern struct ctl_table nf_ct_ipv6_sysctl_table[];
 
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 94dd54d..fd79c9a 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -3,4 +3,14 @@
 
 extern void nf_defrag_ipv6_enable(void);
 
+extern int nf_ct_frag6_init(void);
+extern void nf_ct_frag6_cleanup(void);
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+			       struct net_device *in,
+			       struct net_device *out,
+			       int (*okfn)(struct sk_buff *));
+
+struct inet_frags_ctl;
+
 #endif /* _NF_DEFRAG_IPV6_H */
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index d5df797..5395e09 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -107,8 +107,8 @@
 	int			sock_type;
 };
 
-int phonet_proto_register(int protocol, struct phonet_protocol *pp);
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
+int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
 
 int phonet_sysctl_init(void);
 void phonet_sysctl_exit(void);
diff --git a/include/net/red.h b/include/net/red.h
index 995108e..3319f16 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -97,7 +97,6 @@
 	u32		forced_mark;	/* Forced marks, qavg > max_thresh */
 	u32		pdrop;          /* Drops due to queue limits */
 	u32		other;          /* Drops due to drop() calls */
-	u32		backlog;
 };
 
 struct red_parms {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 0af57eb..e9eee99 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -207,7 +207,7 @@
 	return q->q.qlen;
 }
 
-static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
 {
 	return (struct qdisc_skb_cb *)skb->cb;
 }
@@ -394,7 +394,7 @@
 	return true;
 }
 
-static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
 {
 	return qdisc_skb_cb(skb)->pkt_len;
 }
@@ -426,10 +426,18 @@
 	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
 }
 
-static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
+
+static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+				 const struct sk_buff *skb)
 {
-	sch->bstats.bytes += len;
-	sch->bstats.packets++;
+	bstats->bytes += qdisc_pkt_len(skb);
+	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+}
+
+static inline void qdisc_bstats_update(struct Qdisc *sch,
+				       const struct sk_buff *skb)
+{
+	bstats_update(&sch->bstats, skb);
 }
 
 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -437,7 +445,7 @@
 {
 	__skb_queue_tail(list, skb);
 	sch->qstats.backlog += qdisc_pkt_len(skb);
-	__qdisc_update_bstats(sch, qdisc_pkt_len(skb));
+	qdisc_bstats_update(sch, skb);
 
 	return NET_XMIT_SUCCESS;
 }
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 2b2769c..2a128c8 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -99,8 +99,8 @@
 #define SCTP_SOCKOPT_PEELOFF	102	/* peel off association. */
 /* Options 104-106 are deprecated and removed. Do not use this space */
 #define SCTP_SOCKOPT_CONNECTX_OLD	107	/* CONNECTX old requests. */
-#define SCTP_GET_PEER_ADDRS	108		/* Get all peer addresss. */
-#define SCTP_GET_LOCAL_ADDRS	109		/* Get all local addresss. */
+#define SCTP_GET_PEER_ADDRS	108		/* Get all peer address. */
+#define SCTP_GET_LOCAL_ADDRS	109		/* Get all local address. */
 #define SCTP_SOCKOPT_CONNECTX	110		/* CONNECTX requests. */
 #define SCTP_SOCKOPT_CONNECTX3	111	/* CONNECTX requests (updated) */
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 21a02f7..d884d26 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -152,14 +152,18 @@
 	 * fields between dontcopy_begin/dontcopy_end
 	 * are not copied in sock_copy()
 	 */
+	/* private: */
 	int			skc_dontcopy_begin[0];
+	/* public: */
 	union {
 		struct hlist_node	skc_node;
 		struct hlist_nulls_node skc_nulls_node;
 	};
 	int			skc_tx_queue_mapping;
 	atomic_t		skc_refcnt;
+	/* private: */
 	int                     skc_dontcopy_end[0];
+	/* public: */
 };
 
 /**
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
index 8e9b222..8a143ca 100644
--- a/include/scsi/fc/fc_fcp.h
+++ b/include/scsi/fc/fc_fcp.h
@@ -46,7 +46,7 @@
  */
 struct fcp_cmnd {
 	__u8		fc_lun[8];	/* logical unit number */
-	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_cmdref;	/* command reference number */
 	__u8		fc_pri_ta;	/* priority and task attribute */
 	__u8		fc_tm_flags;	/* task management flags */
 	__u8		fc_flags;	/* additional len & flags */
@@ -58,7 +58,7 @@
 
 struct fcp_cmnd32 {
 	__u8		fc_lun[8];	/* logical unit number */
-	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_cmdref;	/* command reference number */
 	__u8		fc_pri_ta;	/* priority and task attribute */
 	__u8		fc_tm_flags;	/* task management flags */
 	__u8		fc_flags;	/* additional len & flags */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index a8631ac..c3e1cbc 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -263,6 +263,7 @@
 	ISCSI_ERR_INVALID_HOST		= ISCSI_ERR_BASE + 18,
 	ISCSI_ERR_XMIT_FAILED		= ISCSI_ERR_BASE + 19,
 	ISCSI_ERR_TCP_CONN_CLOSE	= ISCSI_ERR_BASE + 20,
+	ISCSI_ERR_SCSI_EH_SESSION_RST	= ISCSI_ERR_BASE + 21,
 };
 
 /*
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 5c4c167..f53c8e3 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -221,8 +221,8 @@
  * @InputRequests:         Number of input requests
  * @OutputRequests:        Number of output requests
  * @ControlRequests:       Number of control requests
- * @InputMegabytes:        Number of received megabytes
- * @OutputMegabytes:       Number of transmitted megabytes
+ * @InputBytes:            Number of received bytes
+ * @OutputBytes:           Number of transmitted bytes
  * @VLinkFailureCount:     Number of virtual link failures
  * @MissDiscAdvCount:      Number of missing FIP discovery advertisement
  */
@@ -241,8 +241,8 @@
 	u64		InputRequests;
 	u64		OutputRequests;
 	u64		ControlRequests;
-	u64		InputMegabytes;
-	u64		OutputMegabytes;
+	u64		InputBytes;
+	u64		OutputBytes;
 	u64		VLinkFailureCount;
 	u64		MissDiscAdvCount;
 };
@@ -263,7 +263,6 @@
  * struct fc_fcp_pkt - FCP request structure (one for each scsi_cmnd request)
  * @lp:              The associated local port
  * @state:           The state of the I/O
- * @tgt_flags:       Target's flags
  * @ref_cnt:         Reference count
  * @scsi_pkt_lock:   Lock to protect the SCSI packet (must be taken before the
  *                   host_lock if both are to be held at the same time)
@@ -298,7 +297,6 @@
 	/* Housekeeping information */
 	struct fc_lport   *lp;
 	u16		  state;
-	u16		  tgt_flags;
 	atomic_t	  ref_cnt;
 	spinlock_t	  scsi_pkt_lock;
 
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 06f1b5a..feb6a94 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -92,10 +92,12 @@
  * @timer_work:	   &work_struct for doing keep-alives and resets.
  * @recv_work:	   &work_struct for receiving FIP frames.
  * @fip_recv_list: list of received FIP frames.
+ * @flogi_req:	   clone of FLOGI request sent
  * @rnd_state:	   state for pseudo-random number generator.
  * @port_id:	   proposed or selected local-port ID.
  * @user_mfs:	   configured maximum FC frame size, including FC header.
  * @flogi_oxid:    exchange ID of most recent fabric login.
+ * @flogi_req_send: send of FLOGI requested
  * @flogi_count:   number of FLOGI attempts in AUTO mode.
  * @map_dest:	   use the FC_MAP mode for destination MAC addresses.
  * @spma:	   supports SPMA server-provided MACs mode
@@ -106,6 +108,7 @@
  * @update_mac:    LLD-supplied function to handle changes to MAC addresses.
  * @get_src_addr:  LLD-supplied function to supply a source MAC address.
  * @ctlr_mutex:	   lock protecting this structure.
+ * @ctlr_lock:     spinlock covering flogi_req
  *
  * This structure is used by all FCoE drivers.  It contains information
  * needed by all FCoE low-level drivers (LLDs) as well as internal state
@@ -126,12 +129,14 @@
 	struct work_struct timer_work;
 	struct work_struct recv_work;
 	struct sk_buff_head fip_recv_list;
+	struct sk_buff *flogi_req;
 
 	struct rnd_state rnd_state;
 	u32 port_id;
 
 	u16 user_mfs;
 	u16 flogi_oxid;
+	u8 flogi_req_send;
 	u8 flogi_count;
 	u8 map_dest;
 	u8 spma;
@@ -143,6 +148,7 @@
 	void (*update_mac)(struct fc_lport *, u8 *addr);
 	u8 * (*get_src_addr)(struct fc_lport *);
 	struct mutex ctlr_mutex;
+	spinlock_t ctlr_lock;
 };
 
 /**
@@ -155,6 +161,7 @@
  * @fcf_mac:	 Ethernet address of the FCF
  * @vfid:	 virtual fabric ID
  * @pri:	 selection priority, smaller values are better
+ * @flogi_sent:	 current FLOGI sent to this FCF
  * @flags:	 flags received from advertisement
  * @fka_period:	 keep-alive period, in jiffies
  *
@@ -176,6 +183,7 @@
 	u8 fcf_mac[ETH_ALEN];
 
 	u8 pri;
+	u8 flogi_sent;
 	u16 flags;
 	u32 fka_period;
 	u8 fd_flags:1;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index b81d969..748382b 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -89,6 +89,7 @@
 	ISCSI_TASK_RUNNING,
 	ISCSI_TASK_ABRT_TMF,		/* aborted due to TMF */
 	ISCSI_TASK_ABRT_SESS_RECOV,	/* aborted due to session recovery */
+	ISCSI_TASK_REQUEUE_SCSIQ,	/* qcmd requeueing to scsi-ml */
 };
 
 struct iscsi_r2t_info {
@@ -341,7 +342,7 @@
 extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
 extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
 extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
-extern int iscsi_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *sc);
+extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc);
 
 /*
  * iSCSI host helpers.
@@ -419,6 +420,7 @@
 extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
 extern void iscsi_requeue_task(struct iscsi_task *task);
 extern void iscsi_put_task(struct iscsi_task *task);
+extern void __iscsi_put_task(struct iscsi_task *task);
 extern void __iscsi_get_task(struct iscsi_task *task);
 extern void iscsi_complete_scsi_task(struct iscsi_task *task,
 				     uint32_t exp_cmdsn, uint32_t max_cmdsn);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 90ce527..8f6bb9c 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -361,6 +361,8 @@
 	/* The class calls this to send a task for execution. */
 	int lldd_max_execute_num;
 	int lldd_queue_size;
+	int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
+				* their siblings when forming wide ports */
 
 	/* LLDD calls these to notify the class of an event. */
 	void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 216af85..648d233 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -104,6 +104,7 @@
 #define UNMAP		      0x42
 #define READ_TOC              0x43
 #define READ_HEADER           0x44
+#define GET_EVENT_STATUS_NOTIFICATION 0x4a
 #define LOG_SELECT            0x4c
 #define LOG_SENSE             0x4d
 #define XDWRITEREAD_10        0x53
@@ -115,33 +116,61 @@
 #define PERSISTENT_RESERVE_OUT 0x5f
 #define VARIABLE_LENGTH_CMD   0x7f
 #define REPORT_LUNS           0xa0
+#define SECURITY_PROTOCOL_IN  0xa2
 #define MAINTENANCE_IN        0xa3
 #define MAINTENANCE_OUT       0xa4
 #define MOVE_MEDIUM           0xa5
 #define EXCHANGE_MEDIUM       0xa6
 #define READ_12               0xa8
 #define WRITE_12              0xaa
+#define READ_MEDIA_SERIAL_NUMBER 0xab
 #define WRITE_VERIFY_12       0xae
 #define VERIFY_12	      0xaf
 #define SEARCH_HIGH_12        0xb0
 #define SEARCH_EQUAL_12       0xb1
 #define SEARCH_LOW_12         0xb2
+#define SECURITY_PROTOCOL_OUT 0xb5
 #define READ_ELEMENT_STATUS   0xb8
 #define SEND_VOLUME_TAG       0xb6
 #define WRITE_LONG_2          0xea
+#define EXTENDED_COPY         0x83
+#define RECEIVE_COPY_RESULTS  0x84
+#define ACCESS_CONTROL_IN     0x86
+#define ACCESS_CONTROL_OUT    0x87
 #define READ_16               0x88
 #define WRITE_16              0x8a
+#define READ_ATTRIBUTE        0x8c
+#define WRITE_ATTRIBUTE	      0x8d
 #define VERIFY_16	      0x8f
 #define WRITE_SAME_16	      0x93
 #define SERVICE_ACTION_IN     0x9e
 /* values for service action in */
 #define	SAI_READ_CAPACITY_16  0x10
 #define SAI_GET_LBA_STATUS    0x12
+/* values for VARIABLE_LENGTH_CMD service action codes
+ * see spc4r17 Section D.3.5, table D.7 and D.8 */
+#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
 /* values for maintenance in */
+#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
 #define MI_REPORT_TARGET_PGS  0x0a
+#define MI_REPORT_ALIASES     0x0b
+#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
+#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
+#define MI_REPORT_PRIORITY   0x0e
+#define MI_REPORT_TIMESTAMP  0x0f
+#define MI_MANAGEMENT_PROTOCOL_IN 0x10
 /* values for maintenance out */
+#define MO_SET_IDENTIFYING_INFORMATION 0x06
 #define MO_SET_TARGET_PGS     0x0a
+#define MO_CHANGE_ALIASES     0x0b
+#define MO_SET_PRIORITY       0x0e
+#define MO_SET_TIMESTAMP      0x0f
+#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
 /* values for variable length command */
+#define XDREAD_32	      0x03
+#define XDWRITE_32	      0x04
+#define XPWRITE_32	      0x06
+#define XDWRITEREAD_32	      0x07
 #define READ_32		      0x09
 #define VERIFY_32	      0x0a
 #define WRITE_32	      0x0b
diff --git a/include/sound/alc5623.h b/include/sound/alc5623.h
new file mode 100644
index 0000000..422c97d
--- /dev/null
+++ b/include/sound/alc5623.h
@@ -0,0 +1,15 @@
+#ifndef _INCLUDE_SOUND_ALC5623_H
+#define _INCLUDE_SOUND_ALC5623_H
+struct alc5623_platform_data {
+	/* configure :                              */
+	/* Lineout/Speaker Amps Vmid ratio control  */
+	/* enable/disable adc/dac high pass filters */
+	unsigned int add_ctrl;
+	/* configure :                              */
+	/* output to enable when jack is low        */
+	/* output to enable when jack is high       */
+	/* jack detect (gpio/nc/jack detect [12]    */
+	unsigned int jack_det_ctrl;
+};
+#endif
+
diff --git a/include/sound/asound.h b/include/sound/asound.h
index a1803ec..5d6074f 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -259,6 +259,7 @@
 #define SNDRV_PCM_INFO_HALF_DUPLEX	0x00100000	/* only half duplex */
 #define SNDRV_PCM_INFO_JOINT_DUPLEX	0x00200000	/* playback and capture stream are somewhat correlated */
 #define SNDRV_PCM_INFO_SYNC_START	0x00400000	/* pcm support some kind of sync go */
+#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP	0x00800000	/* period wakeup can be disabled */
 #define SNDRV_PCM_INFO_FIFO_IN_FRAMES	0x80000000	/* internal kernel flag - FIFO size is in frames */
 
 typedef int __bitwise snd_pcm_state_t;
@@ -334,6 +335,8 @@
 #define	SNDRV_PCM_HW_PARAM_LAST_INTERVAL	SNDRV_PCM_HW_PARAM_TICK_TIME
 
 #define SNDRV_PCM_HW_PARAMS_NORESAMPLE	(1<<0)	/* avoid rate resampling */
+#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER	(1<<1)	/* export buffer */
+#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP	(1<<2)	/* disable period wakeups */
 
 struct snd_interval {
 	unsigned int min, max;
diff --git a/include/sound/control.h b/include/sound/control.h
index 112374d..7715e6f 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -160,12 +160,14 @@
 }
 
 /*
- * Frequently used control callbacks
+ * Frequently used control callbacks/helpers
  */
 int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_info *uinfo);
 int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo);
+int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
+		      unsigned int items, const char *const names[]);
 
 /*
  * virtual master control
diff --git a/include/sound/hdsp.h b/include/sound/hdsp.h
index d98a78d..0909a38 100644
--- a/include/sound/hdsp.h
+++ b/include/sound/hdsp.h
@@ -28,6 +28,7 @@
 	Multiface,
 	H9652,
 	H9632,
+	RPM,
 	Undefined,
 };
 
diff --git a/include/sound/minors.h b/include/sound/minors.h
index a81798a..8f76420 100644
--- a/include/sound/minors.h
+++ b/include/sound/minors.h
@@ -31,8 +31,8 @@
 /* these minors can still be used for autoloading devices (/dev/aload*) */
 #define SNDRV_MINOR_CONTROL		0	/* 0 */
 #define SNDRV_MINOR_GLOBAL		1	/* 1 */
-#define SNDRV_MINOR_SEQUENCER		(SNDRV_MINOR_GLOBAL + 0 * 32)
-#define SNDRV_MINOR_TIMER		(SNDRV_MINOR_GLOBAL + 1 * 32)
+#define SNDRV_MINOR_SEQUENCER		1	/* SNDRV_MINOR_GLOBAL + 0 * 32 */
+#define SNDRV_MINOR_TIMER		33	/* SNDRV_MINOR_GLOBAL + 1 * 32 */
 
 #ifndef CONFIG_SND_DYNAMIC_MINORS
 						/* 2 - 3 (reserved) */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index dfd9b76..e731f8d 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -297,6 +297,7 @@
 	unsigned int info;
 	unsigned int rate_num;
 	unsigned int rate_den;
+	unsigned int no_period_wakeup: 1;
 
 	/* -- SW params -- */
 	int tstamp_mode;		/* mmap timestamp is updated */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e7b6802..1bafe95 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -16,8 +16,6 @@
 
 #include <linux/list.h>
 
-#include <sound/soc.h>
-
 struct snd_pcm_substream;
 
 /*
@@ -205,7 +203,7 @@
 	int (*resume)(struct snd_soc_dai *dai);
 
 	/* ops */
-	struct snd_soc_dai_ops *ops;
+	const struct snd_soc_dai_ops *ops;
 
 	/* DAI capabilities */
 	struct snd_soc_pcm_stream capture;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 8fd3b41..8031769 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,7 +16,6 @@
 #include <linux/device.h>
 #include <linux/types.h>
 #include <sound/control.h>
-#include <sound/soc.h>
 
 /* widget has no PM register bit */
 #define SND_SOC_NOPM	-1
@@ -72,6 +71,10 @@
 	 wcontrols, wncontrols) \
 {	.id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols}
+#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
+	 wcontrols, wncontrols) \
+{	.id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols}
 #define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
 	 wcontrols, wncontrols)\
 {	.id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
@@ -90,6 +93,9 @@
 #define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
 {	.id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1}
+#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
+{	.id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1}
 #define SND_SOC_DAPM_VALUE_MUX(wname, wreg, wshift, winvert, wcontrols) \
 {	.id = snd_soc_dapm_value_mux, .name = wname, .reg = wreg, \
 	.shift = wshift, .invert = winvert, .kcontrols = wcontrols, \
@@ -116,6 +122,11 @@
 {	.id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \
 	.event = wevent, .event_flags = wflags}
+#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
+	wncontrols, wevent, wflags) \
+{	.id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \
+	.event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
 	wncontrols, wevent, wflags) \
 {	.id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
@@ -140,6 +151,11 @@
 {	.id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \
 	.event = wevent, .event_flags = wflags}
+#define SND_SOC_DAPM_VIRT_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
+	wevent, wflags) \
+{	.id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \
+	.event = wevent, .event_flags = wflags}
 
 /* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
 #define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
@@ -219,13 +235,6 @@
 	.info = snd_soc_info_volsw, \
 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
-#define SOC_DAPM_DOUBLE(xname, reg, shift_left, shift_right, max, invert, \
-	power) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
-	.info = snd_soc_info_volsw, \
- 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- 	.private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\
-		((max) << 16) | ((invert) << 24) }
 #define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
 	.info = snd_soc_info_volsw, \
@@ -233,15 +242,6 @@
 	.tlv.p = (tlv_array), \
 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
-#define SOC_DAPM_DOUBLE_TLV(xname, reg, shift_left, shift_right, max, invert, \
-	power, tlv_array) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
-	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
-	.tlv.p = (tlv_array), \
-	.info = snd_soc_info_volsw, \
-	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
-	.private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\
-		((max) << 16) | ((invert) << 24) }
 #define SOC_DAPM_ENUM(xname, xenum) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
 	.info = snd_soc_info_enum_double, \
@@ -297,6 +297,7 @@
 struct snd_soc_dapm_path;
 struct snd_soc_dapm_pin;
 struct snd_soc_dapm_route;
+struct snd_soc_dapm_context;
 
 int dapm_reg_event(struct snd_soc_dapm_widget *w,
 		   struct snd_kcontrol *kcontrol, int event);
@@ -324,16 +325,16 @@
 	struct snd_ctl_elem_value *uncontrol);
 int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *uncontrol);
-int snd_soc_dapm_new_control(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget);
-int snd_soc_dapm_new_controls(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget,
 	int num);
 
 /* dapm path setup */
-int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec);
-void snd_soc_dapm_free(struct snd_soc_codec *codec);
-int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
 			    const struct snd_soc_dapm_route *route, int num);
 
 /* dapm events */
@@ -343,27 +344,33 @@
 
 /* dapm sys fs - used by the core */
 int snd_soc_dapm_sys_add(struct device *dev);
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec);
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm);
 
 /* dapm audio pin control and status */
-int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_sync(struct snd_soc_codec *codec);
-int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec,
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm,
+			    const char *pin);
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
+			     const char *pin);
+int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
+				const char *pin);
+int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
 				  const char *pin);
-int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin);
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
+				const char *pin);
 
 /* dapm widget types */
 enum snd_soc_dapm_type {
 	snd_soc_dapm_input = 0,		/* input pin */
 	snd_soc_dapm_output,		/* output pin */
 	snd_soc_dapm_mux,			/* selects 1 analog signal from many inputs */
+	snd_soc_dapm_virt_mux,			/* virtual version of snd_soc_dapm_mux */
 	snd_soc_dapm_value_mux,			/* selects 1 analog signal from many inputs */
 	snd_soc_dapm_mixer,			/* mixes several analog signals together */
 	snd_soc_dapm_mixer_named_ctl,		/* mixer with named controls */
 	snd_soc_dapm_pga,			/* programmable gain/attenuation (volume) */
+	snd_soc_dapm_out_drv,			/* output driver */
 	snd_soc_dapm_adc,			/* analog to digital converter */
 	snd_soc_dapm_dac,			/* digital to analog converter */
 	snd_soc_dapm_micbias,		/* microphone bias (power) */
@@ -425,6 +432,7 @@
 	char *sname;	/* stream name */
 	struct snd_soc_codec *codec;
 	struct list_head list;
+	struct snd_soc_dapm_context *dapm;
 
 	/* dapm control */
 	short reg;						/* negative reg = no direct dapm */
@@ -461,4 +469,35 @@
 	struct list_head power_list;
 };
 
+struct snd_soc_dapm_update {
+	struct snd_soc_dapm_widget *widget;
+	struct snd_kcontrol *kcontrol;
+	int reg;
+	int mask;
+	int val;
+};
+
+/* DAPM context */
+struct snd_soc_dapm_context {
+	int n_widgets; /* number of widgets in this context */
+	enum snd_soc_bias_level bias_level;
+	enum snd_soc_bias_level suspend_bias_level;
+	struct delayed_work delayed_work;
+	unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
+
+	struct snd_soc_dapm_update *update;
+
+	struct device *dev; /* from parent - for debug */
+	struct snd_soc_codec *codec; /* parent codec */
+	struct snd_soc_card *card; /* parent card */
+
+	/* used during DAPM updates */
+	int dev_power;
+	struct list_head list;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_dapm;
+#endif
+};
+
 #endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 5c3bce8..74921f2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -222,10 +222,8 @@
 
 struct snd_jack;
 struct snd_soc_card;
-struct snd_soc_device;
 struct snd_soc_pcm_stream;
 struct snd_soc_ops;
-struct snd_soc_dai_mode;
 struct snd_soc_pcm_runtime;
 struct snd_soc_dai;
 struct snd_soc_dai_driver;
@@ -235,9 +233,10 @@
 struct snd_soc_codec;
 struct snd_soc_codec_driver;
 struct soc_enum;
-struct snd_soc_ac97_ops;
 struct snd_soc_jack;
 struct snd_soc_jack_pin;
+struct snd_soc_cache_ops;
+#include <sound/soc-dapm.h>
 
 #ifdef CONFIG_GPIOLIB
 struct snd_soc_jack_gpio;
@@ -253,17 +252,30 @@
 	SND_SOC_SPI,
 };
 
+enum snd_soc_compress_type {
+	SND_SOC_FLAT_COMPRESSION = 1,
+	SND_SOC_LZO_COMPRESSION,
+	SND_SOC_RBTREE_COMPRESSION
+};
+
 int snd_soc_register_platform(struct device *dev,
 		struct snd_soc_platform_driver *platform_drv);
 void snd_soc_unregister_platform(struct device *dev);
 int snd_soc_register_codec(struct device *dev,
-		struct snd_soc_codec_driver *codec_drv,
+		const struct snd_soc_codec_driver *codec_drv,
 		struct snd_soc_dai_driver *dai_drv, int num_dai);
 void snd_soc_unregister_codec(struct device *dev);
 int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg);
 int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
 			       int addr_bits, int data_bits,
 			       enum snd_soc_control_type control);
+int snd_soc_cache_sync(struct snd_soc_codec *codec);
+int snd_soc_cache_init(struct snd_soc_codec *codec);
+int snd_soc_cache_exit(struct snd_soc_codec *codec);
+int snd_soc_cache_write(struct snd_soc_codec *codec,
+			unsigned int reg, unsigned int value);
+int snd_soc_cache_read(struct snd_soc_codec *codec,
+		       unsigned int reg, unsigned int *value);
 
 /* Utility functions to get clock rates from various things */
 int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -420,23 +432,37 @@
 	int (*trigger)(struct snd_pcm_substream *, int);
 };
 
+/* SoC cache ops */
+struct snd_soc_cache_ops {
+	const char *name;
+	enum snd_soc_compress_type id;
+	int (*init)(struct snd_soc_codec *codec);
+	int (*exit)(struct snd_soc_codec *codec);
+	int (*read)(struct snd_soc_codec *codec, unsigned int reg,
+		unsigned int *value);
+	int (*write)(struct snd_soc_codec *codec, unsigned int reg,
+		unsigned int value);
+	int (*sync)(struct snd_soc_codec *codec);
+};
+
 /* SoC Audio Codec device */
 struct snd_soc_codec {
 	const char *name;
+	const char *name_prefix;
 	int id;
 	struct device *dev;
-	struct snd_soc_codec_driver *driver;
+	const struct snd_soc_codec_driver *driver;
 
 	struct mutex mutex;
 	struct snd_soc_card *card;
 	struct list_head list;
 	struct list_head card_list;
 	int num_dai;
+	enum snd_soc_compress_type compress_type;
 
 	/* runtime */
 	struct snd_ac97 *ac97;  /* for ad-hoc ac97 devices */
 	unsigned int active;
-	unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
 	unsigned int cache_only:1;  /* Suppress writes to hardware */
 	unsigned int cache_sync:1; /* Cache needs to be synced to hardware */
 	unsigned int suspended:1; /* Codec is in suspend PM state */
@@ -444,25 +470,25 @@
 	unsigned int ac97_registered:1; /* Codec has been AC97 registered */
 	unsigned int ac97_created:1; /* Codec has been created by SoC */
 	unsigned int sysfs_registered:1; /* codec has been sysfs registered */
+	unsigned int cache_init:1; /* codec cache has been initialized */
 
 	/* codec IO */
 	void *control_data; /* codec control (i2c/3wire) data */
 	hw_write_t hw_write;
 	unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
+	unsigned int (*read)(struct snd_soc_codec *, unsigned int);
+	int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
 	void *reg_cache;
+	const void *reg_def_copy;
+	const struct snd_soc_cache_ops *cache_ops;
+	struct mutex cache_rw_mutex;
 
 	/* dapm */
-	u32 pop_time;
-	struct list_head dapm_widgets;
-	struct list_head dapm_paths;
-	enum snd_soc_bias_level bias_level;
-	enum snd_soc_bias_level suspend_bias_level;
-	struct delayed_work delayed_work;
+	struct snd_soc_dapm_context dapm;
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_codec_root;
 	struct dentry *debugfs_reg;
-	struct dentry *debugfs_pop_time;
 	struct dentry *debugfs_dapm;
 #endif
 };
@@ -488,6 +514,7 @@
 	short reg_cache_step;
 	short reg_word_size;
 	const void *reg_cache_default;
+	enum snd_soc_compress_type compress_type;
 
 	/* codec bias level */
 	int (*set_bias_level)(struct snd_soc_codec *,
@@ -554,6 +581,30 @@
 	struct snd_soc_ops *ops;
 };
 
+struct snd_soc_codec_conf {
+	const char *dev_name;
+
+	/*
+	 * optional map of kcontrol, widget and path name prefixes that are
+	 * associated per device
+	 */
+	const char *name_prefix;
+
+	/*
+	 * set this to the desired compression type if you want to
+	 * override the one supplied in codec->driver->compress_type
+	 */
+	enum snd_soc_compress_type compress_type;
+};
+
+struct snd_soc_aux_dev {
+	const char *name;		/* Codec name */
+	const char *codec_name;		/* for multi-codec */
+
+	/* codec/machine specific init - e.g. add machine controls */
+	int (*init)(struct snd_soc_dapm_context *dapm);
+};
+
 /* SoC card */
 struct snd_soc_card {
 	const char *name;
@@ -579,6 +630,8 @@
 	/* callbacks */
 	int (*set_bias_level)(struct snd_soc_card *,
 			      enum snd_soc_bias_level level);
+	int (*set_bias_level_post)(struct snd_soc_card *,
+				   enum snd_soc_bias_level level);
 
 	long pmdown_time;
 
@@ -588,12 +641,35 @@
 	struct snd_soc_pcm_runtime *rtd;
 	int num_rtd;
 
+	/* optional codec specific configuration */
+	struct snd_soc_codec_conf *codec_conf;
+	int num_configs;
+
+	/*
+	 * optional auxiliary devices such as amplifiers or codecs with DAI
+	 * link unused
+	 */
+	struct snd_soc_aux_dev *aux_dev;
+	int num_aux_devs;
+	struct snd_soc_pcm_runtime *rtd_aux;
+	int num_aux_rtd;
+
 	struct work_struct deferred_resume_work;
 
 	/* lists of probed devices belonging to this card */
 	struct list_head codec_dev_list;
 	struct list_head platform_dev_list;
 	struct list_head dai_dev_list;
+
+	struct list_head widgets;
+	struct list_head paths;
+	struct list_head dapm_list;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_card_root;
+	struct dentry *debugfs_pop_time;
+#endif
+	u32 pop_time;
 };
 
 /* SoC machine DAI configuration, glues a codec and cpu DAI together */
@@ -639,17 +715,9 @@
 };
 
 /* codec IO */
-static inline unsigned int snd_soc_read(struct snd_soc_codec *codec,
-					unsigned int reg)
-{
-	return codec->driver->read(codec, reg);
-}
-
-static inline unsigned int snd_soc_write(struct snd_soc_codec *codec,
-					 unsigned int reg, unsigned int val)
-{
-	return codec->driver->write(codec, reg, val);
-}
+unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
+unsigned int snd_soc_write(struct snd_soc_codec *codec,
+			   unsigned int reg, unsigned int val);
 
 /* device driver data */
 
diff --git a/include/target/configfs_macros.h b/include/target/configfs_macros.h
new file mode 100644
index 0000000..7fe7460
--- /dev/null
+++ b/include/target/configfs_macros.h
@@ -0,0 +1,147 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * configfs_macros.h - extends macros for configfs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * 	sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * Based on kobject.h:
+ *      Copyright (c) 2002-2003	Patrick Mochel
+ *      Copyright (c) 2002-2003	Open Source Development Labs
+ *
+ * configfs Copyright (C) 2005 Oracle.  All rights reserved.
+ *
+ * Added CONFIGFS_EATTR() macros from original configfs.h macros
+ * Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * Please read Documentation/filesystems/configfs.txt before using the
+ * configfs interface, ESPECIALLY the parts about reference counts and
+ * item destructors.
+ */
+
+#ifndef _CONFIGFS_MACROS_H_
+#define _CONFIGFS_MACROS_H_
+
+#include <linux/configfs.h>
+
+/*
+ * Users often need to create attribute structures for their configurable
+ * attributes, containing a configfs_attribute member and function pointers
+ * for the show() and store() operations on that attribute. If they don't
+ * need anything else on the extended attribute structure, they can use
+ * this macro to define it.  The argument _name isends up as
+ * 'struct _name_attribute, as well as names of to CONFIGFS_ATTR_OPS() below.
+ * The argument _item is the name of the structure containing the
+ * struct config_item or struct config_group structure members
+ */
+#define CONFIGFS_EATTR_STRUCT(_name, _item)				\
+struct _name##_attribute {						\
+	struct configfs_attribute attr;					\
+	ssize_t (*show)(struct _item *, char *);			\
+	ssize_t (*store)(struct _item *, const char *, size_t);		\
+}
+
+/*
+ * With the extended attribute structure, users can use this macro
+ * (similar to sysfs' __ATTR) to make defining attributes easier.
+ * An example:
+ * #define MYITEM_EATTR(_name, _mode, _show, _store)	\
+ * struct myitem_attribute childless_attr_##_name =	\
+ *         __CONFIGFS_EATTR(_name, _mode, _show, _store)
+ */
+#define __CONFIGFS_EATTR(_name, _mode, _show, _store)			\
+{									\
+	.attr	= {							\
+			.ca_name = __stringify(_name),			\
+			.ca_mode = _mode,				\
+			.ca_owner = THIS_MODULE,			\
+	},								\
+	.show	= _show,						\
+	.store	= _store,						\
+}
+/* Here is a readonly version, only requiring a show() operation */
+#define __CONFIGFS_EATTR_RO(_name, _show)				\
+{									\
+	.attr	= {							\
+			.ca_name = __stringify(_name),			\
+			.ca_mode = 0444,				\
+			.ca_owner = THIS_MODULE,			\
+	},								\
+	.show	= _show,						\
+}
+
+/*
+ * With these extended attributes, the simple show_attribute() and
+ * store_attribute() operations need to call the show() and store() of the
+ * attributes.  This is a common pattern, so we provide a macro to define
+ * them.  The argument _name is the name of the attribute defined by
+ * CONFIGFS_ATTR_STRUCT(). The argument _item is the name of the structure
+ * containing the struct config_item or struct config_group structure member.
+ * The argument _item_member is the actual name of the struct config_* struct
+ * in your _item structure.  Meaning  my_structure->some_config_group.
+ *		                      ^^_item^^^^^  ^^_item_member^^^
+ * This macro expects the attributes to be named "struct <name>_attribute".
+ */
+#define CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member)		\
+static struct _item *to_##_name(struct config_item *ci)			\
+{									\
+	return (ci) ? container_of(to_config_group(ci), struct _item,	\
+		_item_member) : NULL;					\
+}
+
+#define CONFIGFS_EATTR_OPS_SHOW(_name, _item)				\
+static ssize_t _name##_attr_show(struct config_item *item,		\
+				 struct configfs_attribute *attr,	\
+				 char *page)				\
+{									\
+	struct _item *_item = to_##_name(item);				\
+	struct _name##_attribute * _name##_attr =			\
+		container_of(attr, struct _name##_attribute, attr);	\
+	ssize_t ret = 0;						\
+									\
+	if (_name##_attr->show)						\
+		ret = _name##_attr->show(_item, page);			\
+	return ret;							\
+}
+
+#define CONFIGFS_EATTR_OPS_STORE(_name, _item)				\
+static ssize_t _name##_attr_store(struct config_item *item,		\
+				  struct configfs_attribute *attr,	\
+				  const char *page, size_t count)	\
+{									\
+	struct _item *_item = to_##_name(item);				\
+	struct _name##_attribute * _name##_attr =			\
+		container_of(attr, struct _name##_attribute, attr);	\
+	ssize_t ret = -EINVAL;						\
+									\
+	if (_name##_attr->store)					\
+		ret = _name##_attr->store(_item, page, count);		\
+	return ret;							\
+}
+
+#define CONFIGFS_EATTR_OPS(_name, _item, _item_member)			\
+	CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member);		\
+	CONFIGFS_EATTR_OPS_SHOW(_name, _item);				\
+	CONFIGFS_EATTR_OPS_STORE(_name, _item);
+
+#define CONFIGFS_EATTR_OPS_RO(_name, _item, _item_member)		\
+	CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member);		\
+	CONFIGFS_EATTR_OPS_SHOW(_name, _item);
+
+#endif /* _CONFIGFS_MACROS_H_ */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
new file mode 100644
index 0000000..07fdfb6
--- /dev/null
+++ b/include/target/target_core_base.h
@@ -0,0 +1,937 @@
+#ifndef TARGET_CORE_BASE_H
+#define TARGET_CORE_BASE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_cmnd.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include "target_core_mib.h"
+
+#define TARGET_CORE_MOD_VERSION		"v4.0.0-rc6"
+#define SHUTDOWN_SIGS	(sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
+
+/* Used by transport_generic_allocate_iovecs() */
+#define TRANSPORT_IOV_DATA_BUFFER		5
+/* Maximum Number of LUNs per Target Portal Group */
+#define TRANSPORT_MAX_LUNS_PER_TPG		256
+/*
+ * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
+ *
+ * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
+ * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
+ * 16-byte CDBs by default and require an extra allocation for
+ * 32-byte CDBs to becasue of legacy issues.
+ *
+ * Within TCM Core there are no such legacy limitiations, so we go ahead
+ * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
+ * within all TCM Core and subsystem plugin code.
+ */
+#define TCM_MAX_COMMAND_SIZE			32
+/*
+ * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
+ * defined 96, but the real limit is 252 (or 260 including the header)
+ */
+#define TRANSPORT_SENSE_BUFFER			SCSI_SENSE_BUFFERSIZE
+/* Used by transport_send_check_condition_and_sense() */
+#define SPC_SENSE_KEY_OFFSET			2
+#define SPC_ASC_KEY_OFFSET			12
+#define SPC_ASCQ_KEY_OFFSET			13
+#define TRANSPORT_IQN_LEN			224
+/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
+#define LU_GROUP_NAME_BUF			256
+/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
+#define TG_PT_GROUP_NAME_BUF			256
+/* Used to parse VPD into struct t10_vpd */
+#define VPD_TMP_BUF_SIZE			128
+/* Used by transport_generic_cmd_sequencer() */
+#define READ_BLOCK_LEN          		6
+#define READ_CAP_LEN            		8
+#define READ_POSITION_LEN       		20
+#define INQUIRY_LEN				36
+/* Used by transport_get_inquiry_vpd_serial() */
+#define INQUIRY_VPD_SERIAL_LEN			254
+/* Used by transport_get_inquiry_vpd_device_ident() */
+#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN	254
+
+/* struct se_hba->hba_flags */
+enum hba_flags_table {
+	HBA_FLAGS_INTERNAL_USE	= 0x01,
+	HBA_FLAGS_PSCSI_MODE	= 0x02,
+};
+
+/* struct se_lun->lun_status */
+enum transport_lun_status_table {
+	TRANSPORT_LUN_STATUS_FREE = 0,
+	TRANSPORT_LUN_STATUS_ACTIVE = 1,
+};
+
+/* struct se_portal_group->se_tpg_type */
+enum transport_tpg_type_table {
+	TRANSPORT_TPG_TYPE_NORMAL = 0,
+	TRANSPORT_TPG_TYPE_DISCOVERY = 1,
+};
+
+/* Used for generate timer flags */
+enum timer_flags_table {
+	TF_RUNNING	= 0x01,
+	TF_STOP		= 0x02,
+};
+
+/* Special transport agnostic struct se_cmd->t_states */
+enum transport_state_table {
+	TRANSPORT_NO_STATE	= 0,
+	TRANSPORT_NEW_CMD	= 1,
+	TRANSPORT_DEFERRED_CMD	= 2,
+	TRANSPORT_WRITE_PENDING	= 3,
+	TRANSPORT_PROCESS_WRITE	= 4,
+	TRANSPORT_PROCESSING	= 5,
+	TRANSPORT_COMPLETE_OK	= 6,
+	TRANSPORT_COMPLETE_FAILURE = 7,
+	TRANSPORT_COMPLETE_TIMEOUT = 8,
+	TRANSPORT_PROCESS_TMR	= 9,
+	TRANSPORT_TMR_COMPLETE	= 10,
+	TRANSPORT_ISTATE_PROCESSING = 11,
+	TRANSPORT_ISTATE_PROCESSED = 12,
+	TRANSPORT_KILL		= 13,
+	TRANSPORT_REMOVE	= 14,
+	TRANSPORT_FREE		= 15,
+	TRANSPORT_NEW_CMD_MAP	= 16,
+};
+
+/* Used for struct se_cmd->se_cmd_flags */
+enum se_cmd_flags_table {
+	SCF_SUPPORTED_SAM_OPCODE	= 0x00000001,
+	SCF_TRANSPORT_TASK_SENSE	= 0x00000002,
+	SCF_EMULATED_TASK_SENSE		= 0x00000004,
+	SCF_SCSI_DATA_SG_IO_CDB		= 0x00000008,
+	SCF_SCSI_CONTROL_SG_IO_CDB	= 0x00000010,
+	SCF_SCSI_CONTROL_NONSG_IO_CDB	= 0x00000020,
+	SCF_SCSI_NON_DATA_CDB		= 0x00000040,
+	SCF_SCSI_CDB_EXCEPTION		= 0x00000080,
+	SCF_SCSI_RESERVATION_CONFLICT	= 0x00000100,
+	SCF_CMD_PASSTHROUGH_NOALLOC	= 0x00000200,
+	SCF_SE_CMD_FAILED		= 0x00000400,
+	SCF_SE_LUN_CMD			= 0x00000800,
+	SCF_SE_ALLOW_EOO		= 0x00001000,
+	SCF_SE_DISABLE_ONLINE_CHECK	= 0x00002000,
+	SCF_SENT_CHECK_CONDITION	= 0x00004000,
+	SCF_OVERFLOW_BIT		= 0x00008000,
+	SCF_UNDERFLOW_BIT		= 0x00010000,
+	SCF_SENT_DELAYED_TAS		= 0x00020000,
+	SCF_ALUA_NON_OPTIMIZED		= 0x00040000,
+	SCF_DELAYED_CMD_FROM_SAM_ATTR	= 0x00080000,
+	SCF_PASSTHROUGH_SG_TO_MEM	= 0x00100000,
+	SCF_PASSTHROUGH_CONTIG_TO_SG	= 0x00200000,
+	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
+	SCF_EMULATE_SYNC_CACHE		= 0x00800000,
+	SCF_EMULATE_CDB_ASYNC		= 0x01000000,
+	SCF_EMULATE_SYNC_UNMAP		= 0x02000000
+};
+
+/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
+enum transport_lunflags_table {
+	TRANSPORT_LUNFLAGS_NO_ACCESS		= 0x00,
+	TRANSPORT_LUNFLAGS_INITIATOR_ACCESS	= 0x01,
+	TRANSPORT_LUNFLAGS_READ_ONLY		= 0x02,
+	TRANSPORT_LUNFLAGS_READ_WRITE		= 0x04,
+};
+
+/* struct se_device->dev_status */
+enum transport_device_status_table {
+	TRANSPORT_DEVICE_ACTIVATED		= 0x01,
+	TRANSPORT_DEVICE_DEACTIVATED		= 0x02,
+	TRANSPORT_DEVICE_QUEUE_FULL		= 0x04,
+	TRANSPORT_DEVICE_SHUTDOWN		= 0x08,
+	TRANSPORT_DEVICE_OFFLINE_ACTIVATED	= 0x10,
+	TRANSPORT_DEVICE_OFFLINE_DEACTIVATED	= 0x20,
+};
+
+/*
+ * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
+ * to signal which ASC/ASCQ sense payload should be built.
+ */
+enum tcm_sense_reason_table {
+	TCM_NON_EXISTENT_LUN			= 0x01,
+	TCM_UNSUPPORTED_SCSI_OPCODE		= 0x02,
+	TCM_INCORRECT_AMOUNT_OF_DATA		= 0x03,
+	TCM_UNEXPECTED_UNSOLICITED_DATA		= 0x04,
+	TCM_SERVICE_CRC_ERROR			= 0x05,
+	TCM_SNACK_REJECTED			= 0x06,
+	TCM_SECTOR_COUNT_TOO_MANY		= 0x07,
+	TCM_INVALID_CDB_FIELD			= 0x08,
+	TCM_INVALID_PARAMETER_LIST		= 0x09,
+	TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE	= 0x0a,
+	TCM_UNKNOWN_MODE_PAGE			= 0x0b,
+	TCM_WRITE_PROTECTED			= 0x0c,
+	TCM_CHECK_CONDITION_ABORT_CMD		= 0x0d,
+	TCM_CHECK_CONDITION_UNIT_ATTENTION	= 0x0e,
+	TCM_CHECK_CONDITION_NOT_READY		= 0x0f,
+};
+
+struct se_obj {
+	atomic_t obj_access_count;
+} ____cacheline_aligned;
+
+/*
+ * Used by TCM Core internally to signal if ALUA emulation is enabled or
+ * disabled, or running in with TCM/pSCSI passthrough mode
+ */
+typedef enum {
+	SPC_ALUA_PASSTHROUGH,
+	SPC2_ALUA_DISABLED,
+	SPC3_ALUA_EMULATED
+} t10_alua_index_t;
+
+/*
+ * Used by TCM Core internally to signal if SAM Task Attribute emulation
+ * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
+ */
+typedef enum {
+	SAM_TASK_ATTR_PASSTHROUGH,
+	SAM_TASK_ATTR_UNTAGGED,
+	SAM_TASK_ATTR_EMULATED
+} t10_task_attr_index_t;
+
+struct se_cmd;
+
+struct t10_alua {
+	t10_alua_index_t alua_type;
+	/* ALUA Target Port Group ID */
+	u16	alua_tg_pt_gps_counter;
+	u32	alua_tg_pt_gps_count;
+	spinlock_t tg_pt_gps_lock;
+	struct se_subsystem_dev *t10_sub_dev;
+	/* Used for default ALUA Target Port Group */
+	struct t10_alua_tg_pt_gp *default_tg_pt_gp;
+	/* Used for default ALUA Target Port Group ConfigFS group */
+	struct config_group alua_tg_pt_gps_group;
+	int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
+	struct list_head tg_pt_gps_list;
+} ____cacheline_aligned;
+
+struct t10_alua_lu_gp {
+	u16	lu_gp_id;
+	int	lu_gp_valid_id;
+	u32	lu_gp_members;
+	atomic_t lu_gp_shutdown;
+	atomic_t lu_gp_ref_cnt;
+	spinlock_t lu_gp_lock;
+	struct config_group lu_gp_group;
+	struct list_head lu_gp_list;
+	struct list_head lu_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_lu_gp_member {
+	int lu_gp_assoc:1;
+	atomic_t lu_gp_mem_ref_cnt;
+	spinlock_t lu_gp_mem_lock;
+	struct t10_alua_lu_gp *lu_gp;
+	struct se_device *lu_gp_mem_dev;
+	struct list_head lu_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_tg_pt_gp {
+	u16	tg_pt_gp_id;
+	int	tg_pt_gp_valid_id;
+	int	tg_pt_gp_alua_access_status;
+	int	tg_pt_gp_alua_access_type;
+	int	tg_pt_gp_nonop_delay_msecs;
+	int	tg_pt_gp_trans_delay_msecs;
+	int	tg_pt_gp_pref;
+	int	tg_pt_gp_write_metadata;
+	/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
+#define ALUA_MD_BUF_LEN				1024
+	u32	tg_pt_gp_md_buf_len;
+	u32	tg_pt_gp_members;
+	atomic_t tg_pt_gp_alua_access_state;
+	atomic_t tg_pt_gp_ref_cnt;
+	spinlock_t tg_pt_gp_lock;
+	struct mutex tg_pt_gp_md_mutex;
+	struct se_subsystem_dev *tg_pt_gp_su_dev;
+	struct config_group tg_pt_gp_group;
+	struct list_head tg_pt_gp_list;
+	struct list_head tg_pt_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_alua_tg_pt_gp_member {
+	int tg_pt_gp_assoc:1;
+	atomic_t tg_pt_gp_mem_ref_cnt;
+	spinlock_t tg_pt_gp_mem_lock;
+	struct t10_alua_tg_pt_gp *tg_pt_gp;
+	struct se_port *tg_pt;
+	struct list_head tg_pt_gp_mem_list;
+} ____cacheline_aligned;
+
+struct t10_vpd {
+	unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
+	int protocol_identifier_set;
+	u32 protocol_identifier;
+	u32 device_identifier_code_set;
+	u32 association;
+	u32 device_identifier_type;
+	struct list_head vpd_list;
+} ____cacheline_aligned;
+
+struct t10_wwn {
+	unsigned char vendor[8];
+	unsigned char model[16];
+	unsigned char revision[4];
+	unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN];
+	spinlock_t t10_vpd_lock;
+	struct se_subsystem_dev *t10_sub_dev;
+	struct config_group t10_wwn_group;
+	struct list_head t10_vpd_list;
+} ____cacheline_aligned;
+
+
+/*
+ * Used by TCM Core internally to signal if >= SPC-3 peristent reservations
+ * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
+ * mode
+ */
+typedef enum {
+	SPC_PASSTHROUGH,
+	SPC2_RESERVATIONS,
+	SPC3_PERSISTENT_RESERVATIONS
+} t10_reservations_index_t;
+
+struct t10_pr_registration {
+	/* Used for fabrics that contain WWN+ISID */
+#define PR_REG_ISID_LEN				16
+	/* PR_REG_ISID_LEN + ',i,0x' */
+#define PR_REG_ISID_ID_LEN			(PR_REG_ISID_LEN + 5)
+	char pr_reg_isid[PR_REG_ISID_LEN];
+	/* Used during APTPL metadata reading */
+#define PR_APTPL_MAX_IPORT_LEN			256
+	unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
+	/* Used during APTPL metadata reading */
+#define PR_APTPL_MAX_TPORT_LEN			256
+	unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
+	/* For writing out live meta data */
+	unsigned char *pr_aptpl_buf;
+	u16 pr_aptpl_rpti;
+	u16 pr_reg_tpgt;
+	/* Reservation effects all target ports */
+	int pr_reg_all_tg_pt;
+	/* Activate Persistence across Target Power Loss */
+	int pr_reg_aptpl;
+	int pr_res_holder;
+	int pr_res_type;
+	int pr_res_scope;
+	/* Used for fabric initiator WWPNs using a ISID */
+	int isid_present_at_reg:1;
+	u32 pr_res_mapped_lun;
+	u32 pr_aptpl_target_lun;
+	u32 pr_res_generation;
+	u64 pr_reg_bin_isid;
+	u64 pr_res_key;
+	atomic_t pr_res_holders;
+	struct se_node_acl *pr_reg_nacl;
+	struct se_dev_entry *pr_reg_deve;
+	struct se_lun *pr_reg_tg_pt_lun;
+	struct list_head pr_reg_list;
+	struct list_head pr_reg_abort_list;
+	struct list_head pr_reg_aptpl_list;
+	struct list_head pr_reg_atp_list;
+	struct list_head pr_reg_atp_mem_list;
+} ____cacheline_aligned;
+
+/*
+ * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
+ * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
+ * core_setup_reservations()
+ */
+struct t10_reservation_ops {
+	int (*t10_reservation_check)(struct se_cmd *, u32 *);
+	int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
+	int (*t10_pr_register)(struct se_cmd *);
+	int (*t10_pr_clear)(struct se_cmd *);
+};
+
+struct t10_reservation_template {
+	/* Reservation effects all target ports */
+	int pr_all_tg_pt;
+	/* Activate Persistence across Target Power Loss enabled
+	 * for SCSI device */
+	int pr_aptpl_active;
+	/* Used by struct t10_reservation_template->pr_aptpl_buf_len */
+#define PR_APTPL_BUF_LEN			8192
+	u32 pr_aptpl_buf_len;
+	u32 pr_generation;
+	t10_reservations_index_t res_type;
+	spinlock_t registration_lock;
+	spinlock_t aptpl_reg_lock;
+	/*
+	 * This will always be set by one individual I_T Nexus.
+	 * However with all_tg_pt=1, other I_T Nexus from the
+	 * same initiator can access PR reg/res info on a different
+	 * target port.
+	 *
+	 * There is also the 'All Registrants' case, where there is
+	 * a single *pr_res_holder of the reservation, but all
+	 * registrations are considered reservation holders.
+	 */
+	struct se_node_acl *pr_res_holder;
+	struct list_head registration_list;
+	struct list_head aptpl_reg_list;
+	struct t10_reservation_ops pr_ops;
+} ____cacheline_aligned;
+
+struct se_queue_req {
+	int			state;
+	void			*cmd;
+	struct list_head	qr_list;
+} ____cacheline_aligned;
+
+struct se_queue_obj {
+	atomic_t		queue_cnt;
+	spinlock_t		cmd_queue_lock;
+	struct list_head	qobj_list;
+	wait_queue_head_t	thread_wq;
+} ____cacheline_aligned;
+
+/*
+ * Used one per struct se_cmd to hold all extra struct se_task
+ * metadata.  This structure is setup and allocated in
+ * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
+ */
+struct se_transport_task {
+	unsigned char		*t_task_cdb;
+	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+	unsigned long long	t_task_lba;
+	int			t_tasks_failed;
+	int			t_tasks_fua;
+	int			t_tasks_bidi:1;
+	u32			t_task_cdbs;
+	u32			t_tasks_check;
+	u32			t_tasks_no;
+	u32			t_tasks_sectors;
+	u32			t_tasks_se_num;
+	u32			t_tasks_se_bidi_num;
+	u32			t_tasks_sg_chained_no;
+	atomic_t		t_fe_count;
+	atomic_t		t_se_count;
+	atomic_t		t_task_cdbs_left;
+	atomic_t		t_task_cdbs_ex_left;
+	atomic_t		t_task_cdbs_timeout_left;
+	atomic_t		t_task_cdbs_sent;
+	atomic_t		t_transport_aborted;
+	atomic_t		t_transport_active;
+	atomic_t		t_transport_complete;
+	atomic_t		t_transport_queue_active;
+	atomic_t		t_transport_sent;
+	atomic_t		t_transport_stop;
+	atomic_t		t_transport_timeout;
+	atomic_t		transport_dev_active;
+	atomic_t		transport_lun_active;
+	atomic_t		transport_lun_fe_stop;
+	atomic_t		transport_lun_stop;
+	spinlock_t		t_state_lock;
+	struct completion	t_transport_stop_comp;
+	struct completion	transport_lun_fe_stop_comp;
+	struct completion	transport_lun_stop_comp;
+	struct scatterlist	*t_tasks_sg_chained;
+	struct scatterlist	t_tasks_sg_bounce;
+	void			*t_task_buf;
+	/*
+	 * Used for pre-registered fabric SGL passthrough WRITE and READ
+	 * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
+	 * and other HW target mode fabric modules.
+	 */
+	struct scatterlist	*t_task_pt_sgl;
+	struct list_head	*t_mem_list;
+	/* Used for BIDI READ */
+	struct list_head	*t_mem_bidi_list;
+	struct list_head	t_task_list;
+} ____cacheline_aligned;
+
+struct se_task {
+	unsigned char	task_sense;
+	struct scatterlist *task_sg;
+	struct scatterlist *task_sg_bidi;
+	u8		task_scsi_status;
+	u8		task_flags;
+	int		task_error_status;
+	int		task_state_flags;
+	int		task_padded_sg:1;
+	unsigned long long	task_lba;
+	u32		task_no;
+	u32		task_sectors;
+	u32		task_size;
+	u32		task_sg_num;
+	u32		task_sg_offset;
+	enum dma_data_direction	task_data_direction;
+	struct se_cmd *task_se_cmd;
+	struct se_device	*se_dev;
+	struct completion	task_stop_comp;
+	atomic_t	task_active;
+	atomic_t	task_execute_queue;
+	atomic_t	task_timeout;
+	atomic_t	task_sent;
+	atomic_t	task_stop;
+	atomic_t	task_state_active;
+	struct timer_list	task_timer;
+	struct se_device *se_obj_ptr;
+	struct list_head t_list;
+	struct list_head t_execute_list;
+	struct list_head t_state_list;
+} ____cacheline_aligned;
+
+#define TASK_CMD(task)	((struct se_cmd *)task->task_se_cmd)
+#define TASK_DEV(task)	((struct se_device *)task->se_dev)
+
+struct se_cmd {
+	/* SAM response code being sent to initiator */
+	u8			scsi_status;
+	u8			scsi_asc;
+	u8			scsi_ascq;
+	u8			scsi_sense_reason;
+	u16			scsi_sense_length;
+	/* Delay for ALUA Active/NonOptimized state access in milliseconds */
+	int			alua_nonop_delay;
+	/* See include/linux/dma-mapping.h */
+	enum dma_data_direction	data_direction;
+	/* For SAM Task Attribute */
+	int			sam_task_attr;
+	/* Transport protocol dependent state, see transport_state_table */
+	enum transport_state_table t_state;
+	/* Transport protocol dependent state for out of order CmdSNs */
+	int			deferred_t_state;
+	/* Transport specific error status */
+	int			transport_error_status;
+	/* See se_cmd_flags_table */
+	u32			se_cmd_flags;
+	u32			se_ordered_id;
+	/* Total size in bytes associated with command */
+	u32			data_length;
+	/* SCSI Presented Data Transfer Length */
+	u32			cmd_spdtl;
+	u32			residual_count;
+	u32			orig_fe_lun;
+	/* Persistent Reservation key */
+	u64			pr_res_key;
+	atomic_t                transport_sent;
+	/* Used for sense data */
+	void			*sense_buffer;
+	struct list_head	se_delayed_list;
+	struct list_head	se_ordered_list;
+	struct list_head	se_lun_list;
+	struct se_device      *se_dev;
+	struct se_dev_entry   *se_deve;
+	struct se_device	*se_obj_ptr;
+	struct se_device	*se_orig_obj_ptr;
+	struct se_lun		*se_lun;
+	/* Only used for internal passthrough and legacy TCM fabric modules */
+	struct se_session	*se_sess;
+	struct se_tmr_req	*se_tmr_req;
+	/* t_task is setup to t_task_backstore in transport_init_se_cmd() */
+	struct se_transport_task *t_task;
+	struct se_transport_task t_task_backstore;
+	struct target_core_fabric_ops *se_tfo;
+	int (*transport_emulate_cdb)(struct se_cmd *);
+	void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
+	void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
+	void (*transport_complete_callback)(struct se_cmd *);
+} ____cacheline_aligned;
+
+#define T_TASK(cmd)     ((struct se_transport_task *)(cmd->t_task))
+#define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo)
+
+struct se_tmr_req {
+	/* Task Management function to be preformed */
+	u8			function;
+	/* Task Management response to send */
+	u8			response;
+	int			call_transport;
+	/* Reference to ITT that Task Mgmt should be preformed */
+	u32			ref_task_tag;
+	/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
+	u64			ref_task_lun;
+	void 			*fabric_tmr_ptr;
+	struct se_cmd		*task_cmd;
+	struct se_cmd		*ref_cmd;
+	struct se_device	*tmr_dev;
+	struct se_lun		*tmr_lun;
+	struct list_head	tmr_list;
+} ____cacheline_aligned;
+
+struct se_ua {
+	u8			ua_asc;
+	u8			ua_ascq;
+	struct se_node_acl	*ua_nacl;
+	struct list_head	ua_dev_list;
+	struct list_head	ua_nacl_list;
+} ____cacheline_aligned;
+
+struct se_node_acl {
+	char			initiatorname[TRANSPORT_IQN_LEN];
+	/* Used to signal demo mode created ACL, disabled by default */
+	int			dynamic_node_acl:1;
+	u32			queue_depth;
+	u32			acl_index;
+	u64			num_cmds;
+	u64			read_bytes;
+	u64			write_bytes;
+	spinlock_t		stats_lock;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		acl_pr_ref_count;
+	/* Used for MIB access */
+	atomic_t		mib_ref_count;
+	struct se_dev_entry	*device_list;
+	struct se_session	*nacl_sess;
+	struct se_portal_group *se_tpg;
+	spinlock_t		device_list_lock;
+	spinlock_t		nacl_sess_lock;
+	struct config_group	acl_group;
+	struct config_group	acl_attrib_group;
+	struct config_group	acl_auth_group;
+	struct config_group	acl_param_group;
+	struct config_group	*acl_default_groups[4];
+	struct list_head	acl_list;
+	struct list_head	acl_sess_list;
+} ____cacheline_aligned;
+
+struct se_session {
+	/* Used for MIB access */
+	atomic_t		mib_ref_count;
+	u64			sess_bin_isid;
+	struct se_node_acl	*se_node_acl;
+	struct se_portal_group *se_tpg;
+	void			*fabric_sess_ptr;
+	struct list_head	sess_list;
+	struct list_head	sess_acl_list;
+} ____cacheline_aligned;
+
+#define SE_SESS(cmd)		((struct se_session *)(cmd)->se_sess)
+#define SE_NODE_ACL(sess)	((struct se_node_acl *)(sess)->se_node_acl)
+
+struct se_device;
+struct se_transform_info;
+struct scatterlist;
+
+struct se_lun_acl {
+	char			initiatorname[TRANSPORT_IQN_LEN];
+	u32			mapped_lun;
+	struct se_node_acl	*se_lun_nacl;
+	struct se_lun		*se_lun;
+	struct list_head	lacl_list;
+	struct config_group	se_lun_group;
+}  ____cacheline_aligned;
+
+struct se_dev_entry {
+	int			def_pr_registered:1;
+	/* See transport_lunflags_table */
+	u32			lun_flags;
+	u32			deve_cmds;
+	u32			mapped_lun;
+	u32			average_bytes;
+	u32			last_byte_count;
+	u32			total_cmds;
+	u32			total_bytes;
+	u64			pr_res_key;
+	u64			creation_time;
+	u32			attach_count;
+	u64			read_bytes;
+	u64			write_bytes;
+	atomic_t		ua_count;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		pr_ref_count;
+	struct se_lun_acl	*se_lun_acl;
+	spinlock_t		ua_lock;
+	struct se_lun		*se_lun;
+	struct list_head	alua_port_list;
+	struct list_head	ua_list;
+}  ____cacheline_aligned;
+
+struct se_dev_limits {
+	/* Max supported HW queue depth */
+	u32		hw_queue_depth;
+	/* Max supported virtual queue depth */
+	u32		queue_depth;
+	/* From include/linux/blkdev.h for the other HW/SW limits. */
+	struct queue_limits limits;
+} ____cacheline_aligned;
+
+struct se_dev_attrib {
+	int		emulate_dpo;
+	int		emulate_fua_write;
+	int		emulate_fua_read;
+	int		emulate_write_cache;
+	int		emulate_ua_intlck_ctrl;
+	int		emulate_tas;
+	int		emulate_tpu;
+	int		emulate_tpws;
+	int		emulate_reservations;
+	int		emulate_alua;
+	int		enforce_pr_isids;
+	u32		hw_block_size;
+	u32		block_size;
+	u32		hw_max_sectors;
+	u32		max_sectors;
+	u32		optimal_sectors;
+	u32		hw_queue_depth;
+	u32		queue_depth;
+	u32		task_timeout;
+	u32		max_unmap_lba_count;
+	u32		max_unmap_block_desc_count;
+	u32		unmap_granularity;
+	u32		unmap_granularity_alignment;
+	struct se_subsystem_dev *da_sub_dev;
+	struct config_group da_group;
+} ____cacheline_aligned;
+
+struct se_subsystem_dev {
+/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
+#define SE_DEV_ALIAS_LEN		512
+	unsigned char	se_dev_alias[SE_DEV_ALIAS_LEN];
+/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
+#define SE_UDEV_PATH_LEN		512
+	unsigned char	se_dev_udev_path[SE_UDEV_PATH_LEN];
+	u32		su_dev_flags;
+	struct se_hba *se_dev_hba;
+	struct se_device *se_dev_ptr;
+	struct se_dev_attrib se_dev_attrib;
+	/* T10 Asymmetric Logical Unit Assignment for Target Ports */
+	struct t10_alua	t10_alua;
+	/* T10 Inquiry and VPD WWN Information */
+	struct t10_wwn	t10_wwn;
+	/* T10 SPC-2 + SPC-3 Reservations */
+	struct t10_reservation_template t10_reservation;
+	spinlock_t      se_dev_lock;
+	void            *se_dev_su_ptr;
+	struct list_head g_se_dev_list;
+	struct config_group se_dev_group;
+	/* For T10 Reservations */
+	struct config_group se_dev_pr_group;
+} ____cacheline_aligned;
+
+#define T10_ALUA(su_dev)	(&(su_dev)->t10_alua)
+#define T10_RES(su_dev)		(&(su_dev)->t10_reservation)
+#define T10_PR_OPS(su_dev)	(&(su_dev)->t10_reservation.pr_ops)
+
+struct se_device {
+	/* Set to 1 if thread is NOT sleeping on thread_sem */
+	u8			thread_active;
+	u8			dev_status_timer_flags;
+	/* RELATIVE TARGET PORT IDENTIFER Counter */
+	u16			dev_rpti_counter;
+	/* Used for SAM Task Attribute ordering */
+	u32			dev_cur_ordered_id;
+	u32			dev_flags;
+	u32			dev_port_count;
+	/* See transport_device_status_table */
+	u32			dev_status;
+	u32			dev_tcq_window_closed;
+	/* Physical device queue depth */
+	u32			queue_depth;
+	/* Used for SPC-2 reservations enforce of ISIDs */
+	u64			dev_res_bin_isid;
+	t10_task_attr_index_t	dev_task_attr_type;
+	/* Pointer to transport specific device structure */
+	void 			*dev_ptr;
+	u32			dev_index;
+	u64			creation_time;
+	u32			num_resets;
+	u64			num_cmds;
+	u64			read_bytes;
+	u64			write_bytes;
+	spinlock_t		stats_lock;
+	/* Active commands on this virtual SE device */
+	atomic_t		active_cmds;
+	atomic_t		simple_cmds;
+	atomic_t		depth_left;
+	atomic_t		dev_ordered_id;
+	atomic_t		dev_tur_active;
+	atomic_t		execute_tasks;
+	atomic_t		dev_status_thr_count;
+	atomic_t		dev_hoq_count;
+	atomic_t		dev_ordered_sync;
+	struct se_obj		dev_obj;
+	struct se_obj		dev_access_obj;
+	struct se_obj		dev_export_obj;
+	struct se_queue_obj	*dev_queue_obj;
+	struct se_queue_obj	*dev_status_queue_obj;
+	spinlock_t		delayed_cmd_lock;
+	spinlock_t		ordered_cmd_lock;
+	spinlock_t		execute_task_lock;
+	spinlock_t		state_task_lock;
+	spinlock_t		dev_alua_lock;
+	spinlock_t		dev_reservation_lock;
+	spinlock_t		dev_state_lock;
+	spinlock_t		dev_status_lock;
+	spinlock_t		dev_status_thr_lock;
+	spinlock_t		se_port_lock;
+	spinlock_t		se_tmr_lock;
+	/* Used for legacy SPC-2 reservationsa */
+	struct se_node_acl	*dev_reserved_node_acl;
+	/* Used for ALUA Logical Unit Group membership */
+	struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
+	/* Used for SPC-3 Persistent Reservations */
+	struct t10_pr_registration *dev_pr_res_holder;
+	struct list_head	dev_sep_list;
+	struct list_head	dev_tmr_list;
+	struct timer_list	dev_status_timer;
+	/* Pointer to descriptor for processing thread */
+	struct task_struct	*process_thread;
+	pid_t			process_thread_pid;
+	struct task_struct		*dev_mgmt_thread;
+	struct list_head	delayed_cmd_list;
+	struct list_head	ordered_cmd_list;
+	struct list_head	execute_task_list;
+	struct list_head	state_task_list;
+	/* Pointer to associated SE HBA */
+	struct se_hba		*se_hba;
+	struct se_subsystem_dev *se_sub_dev;
+	/* Pointer to template of function pointers for transport */
+	struct se_subsystem_api *transport;
+	/* Linked list for struct se_hba struct se_device list */
+	struct list_head	dev_list;
+	/* Linked list for struct se_global->g_se_dev_list */
+	struct list_head	g_se_dev_list;
+}  ____cacheline_aligned;
+
+#define SE_DEV(cmd)		((struct se_device *)(cmd)->se_lun->lun_se_dev)
+#define SU_DEV(dev)		((struct se_subsystem_dev *)(dev)->se_sub_dev)
+#define DEV_ATTRIB(dev)		(&(dev)->se_sub_dev->se_dev_attrib)
+#define DEV_T10_WWN(dev)	(&(dev)->se_sub_dev->t10_wwn)
+
+struct se_hba {
+	u16			hba_tpgt;
+	u32			hba_id;
+	/* See hba_flags_table */
+	u32			hba_flags;
+	/* Virtual iSCSI devices attached. */
+	u32			dev_count;
+	u32			hba_index;
+	atomic_t		dev_mib_access_count;
+	atomic_t		load_balance_queue;
+	atomic_t		left_queue_depth;
+	/* Maximum queue depth the HBA can handle. */
+	atomic_t		max_queue_depth;
+	/* Pointer to transport specific host structure. */
+	void			*hba_ptr;
+	/* Linked list for struct se_device */
+	struct list_head	hba_dev_list;
+	struct list_head	hba_list;
+	spinlock_t		device_lock;
+	spinlock_t		hba_queue_lock;
+	struct config_group	hba_group;
+	struct mutex		hba_access_mutex;
+	struct se_subsystem_api *transport;
+}  ____cacheline_aligned;
+
+#define SE_HBA(d)		((struct se_hba *)(d)->se_hba)
+
+struct se_lun {
+	/* See transport_lun_status_table */
+	enum transport_lun_status_table lun_status;
+	u32			lun_access;
+	u32			lun_flags;
+	u32			unpacked_lun;
+	atomic_t		lun_acl_count;
+	spinlock_t		lun_acl_lock;
+	spinlock_t		lun_cmd_lock;
+	spinlock_t		lun_sep_lock;
+	struct completion	lun_shutdown_comp;
+	struct list_head	lun_cmd_list;
+	struct list_head	lun_acl_list;
+	struct se_device	*lun_se_dev;
+	struct config_group	lun_group;
+	struct se_port	*lun_sep;
+} ____cacheline_aligned;
+
+#define SE_LUN(c)		((struct se_lun *)(c)->se_lun)
+
+struct se_port {
+	/* RELATIVE TARGET PORT IDENTIFER */
+	u16		sep_rtpi;
+	int		sep_tg_pt_secondary_stat;
+	int		sep_tg_pt_secondary_write_md;
+	u32		sep_index;
+	struct scsi_port_stats sep_stats;
+	/* Used for ALUA Target Port Groups membership */
+	atomic_t	sep_tg_pt_gp_active;
+	atomic_t	sep_tg_pt_secondary_offline;
+	/* Used for PR ALL_TG_PT=1 */
+	atomic_t	sep_tg_pt_ref_cnt;
+	spinlock_t	sep_alua_lock;
+	struct mutex	sep_tg_pt_md_mutex;
+	struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
+	struct se_lun *sep_lun;
+	struct se_portal_group *sep_tpg;
+	struct list_head sep_alua_list;
+	struct list_head sep_list;
+} ____cacheline_aligned;
+
+struct se_tpg_np {
+	struct config_group	tpg_np_group;
+} ____cacheline_aligned;
+
+struct se_portal_group {
+	/* Type of target portal group, see transport_tpg_type_table */
+	enum transport_tpg_type_table se_tpg_type;
+	/* Number of ACLed Initiator Nodes for this TPG */
+	u32			num_node_acls;
+	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
+	atomic_t		tpg_pr_ref_count;
+	/* Spinlock for adding/removing ACLed Nodes */
+	spinlock_t		acl_node_lock;
+	/* Spinlock for adding/removing sessions */
+	spinlock_t		session_lock;
+	spinlock_t		tpg_lun_lock;
+	/* Pointer to $FABRIC_MOD portal group */
+	void			*se_tpg_fabric_ptr;
+	struct list_head	se_tpg_list;
+	/* linked list for initiator ACL list */
+	struct list_head	acl_node_list;
+	struct se_lun		*tpg_lun_list;
+	struct se_lun		tpg_virt_lun0;
+	/* List of TCM sessions assoicated wth this TPG */
+	struct list_head	tpg_sess_list;
+	/* Pointer to $FABRIC_MOD dependent code */
+	struct target_core_fabric_ops *se_tpg_tfo;
+	struct se_wwn		*se_tpg_wwn;
+	struct config_group	tpg_group;
+	struct config_group	*tpg_default_groups[6];
+	struct config_group	tpg_lun_group;
+	struct config_group	tpg_np_group;
+	struct config_group	tpg_acl_group;
+	struct config_group	tpg_attrib_group;
+	struct config_group	tpg_param_group;
+} ____cacheline_aligned;
+
+#define TPG_TFO(se_tpg)	((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo)
+
+struct se_wwn {
+	struct target_fabric_configfs *wwn_tf;
+	struct config_group	wwn_group;
+} ____cacheline_aligned;
+
+struct se_global {
+	u16			alua_lu_gps_counter;
+	int			g_sub_api_initialized;
+	u32			in_shutdown;
+	u32			alua_lu_gps_count;
+	u32			g_hba_id_counter;
+	struct config_group	target_core_hbagroup;
+	struct config_group	alua_group;
+	struct config_group	alua_lu_gps_group;
+	struct list_head	g_lu_gps_list;
+	struct list_head	g_se_tpg_list;
+	struct list_head	g_hba_list;
+	struct list_head	g_se_dev_list;
+	struct se_hba		*g_lun0_hba;
+	struct se_subsystem_dev *g_lun0_su_dev;
+	struct se_device	*g_lun0_dev;
+	struct t10_alua_lu_gp	*default_lu_gp;
+	spinlock_t		g_device_lock;
+	spinlock_t		hba_lock;
+	spinlock_t		se_tpg_lock;
+	spinlock_t		lu_gps_lock;
+	spinlock_t		plugin_class_lock;
+} ____cacheline_aligned;
+
+#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h
new file mode 100644
index 0000000..40e6e74
--- /dev/null
+++ b/include/target/target_core_configfs.h
@@ -0,0 +1,52 @@
+#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
+
+#define TARGET_CORE_CONFIG_ROOT	"/sys/kernel/config"
+
+#define TARGET_CORE_NAME_MAX_LEN	64
+#define TARGET_FABRIC_NAME_SIZE		32
+
+extern struct target_fabric_configfs *target_fabric_configfs_init(
+				struct module *, const char *);
+extern void target_fabric_configfs_free(struct target_fabric_configfs *);
+extern int target_fabric_configfs_register(struct target_fabric_configfs *);
+extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
+
+struct target_fabric_configfs_template {
+	struct config_item_type tfc_discovery_cit;
+	struct config_item_type	tfc_wwn_cit;
+	struct config_item_type tfc_tpg_cit;
+	struct config_item_type tfc_tpg_base_cit;
+	struct config_item_type tfc_tpg_lun_cit;
+	struct config_item_type tfc_tpg_port_cit;
+	struct config_item_type tfc_tpg_np_cit;
+	struct config_item_type tfc_tpg_np_base_cit;
+	struct config_item_type tfc_tpg_attrib_cit;
+	struct config_item_type tfc_tpg_param_cit;
+	struct config_item_type tfc_tpg_nacl_cit;
+	struct config_item_type tfc_tpg_nacl_base_cit;
+	struct config_item_type tfc_tpg_nacl_attrib_cit;
+	struct config_item_type tfc_tpg_nacl_auth_cit;
+	struct config_item_type tfc_tpg_nacl_param_cit;
+	struct config_item_type tfc_tpg_mappedlun_cit;
+};
+
+struct target_fabric_configfs {
+	char			tf_name[TARGET_FABRIC_NAME_SIZE];
+	atomic_t		tf_access_cnt;
+	struct list_head	tf_list;
+	struct config_group	tf_group;
+	struct config_group	tf_disc_group;
+	struct config_group	*tf_default_groups[2];
+	/* Pointer to fabric's config_item */
+	struct config_item	*tf_fabric;
+	/* Passed from fabric modules */
+	struct config_item_type	*tf_fabric_cit;
+	/* Pointer to target core subsystem */
+	struct configfs_subsystem *tf_subsys;
+	/* Pointer to fabric's struct module */
+	struct module *tf_module;
+	struct target_core_fabric_ops tf_ops;
+	struct target_fabric_configfs_template tf_cit_tmpl;
+};
+
+#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
new file mode 100644
index 0000000..52b18a5
--- /dev/null
+++ b/include/target/target_core_device.h
@@ -0,0 +1,61 @@
+#ifndef TARGET_CORE_DEVICE_H
+#define TARGET_CORE_DEVICE_H
+
+extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32);
+extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
+extern struct se_dev_entry *core_get_se_deve_from_rtpi(
+					struct se_node_acl *, u16);
+extern int core_free_device_list_for_node(struct se_node_acl *,
+					struct se_portal_group *);
+extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
+extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
+					u32, struct se_node_acl *,
+					struct se_portal_group *, int);
+extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+extern int core_dev_export(struct se_device *, struct se_portal_group *,
+					struct se_lun *);
+extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
+					struct se_lun *);
+extern int transport_core_report_lun_response(struct se_cmd *);
+extern void se_release_device_for_hba(struct se_device *);
+extern void se_release_vpd_for_dev(struct se_device *);
+extern void se_clear_dev_ports(struct se_device *);
+extern int se_free_virtual_device(struct se_device *, struct se_hba *);
+extern int se_dev_check_online(struct se_device *);
+extern int se_dev_check_shutdown(struct se_device *);
+extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+extern int se_dev_set_task_timeout(struct se_device *, u32);
+extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+extern int se_dev_set_unmap_granularity(struct se_device *, u32);
+extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+extern int se_dev_set_emulate_dpo(struct se_device *, int);
+extern int se_dev_set_emulate_fua_write(struct se_device *, int);
+extern int se_dev_set_emulate_fua_read(struct se_device *, int);
+extern int se_dev_set_emulate_write_cache(struct se_device *, int);
+extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+extern int se_dev_set_emulate_tas(struct se_device *, int);
+extern int se_dev_set_emulate_tpu(struct se_device *, int);
+extern int se_dev_set_emulate_tpws(struct se_device *, int);
+extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
+extern int se_dev_set_queue_depth(struct se_device *, u32);
+extern int se_dev_set_max_sectors(struct se_device *, u32);
+extern int se_dev_set_optimal_sectors(struct se_device *, u32);
+extern int se_dev_set_block_size(struct se_device *, u32);
+extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+					struct se_device *, u32);
+extern int core_dev_del_lun(struct se_portal_group *, u32);
+extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+							u32, char *, int *);
+extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun_acl *, u32, u32);
+extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun *, struct se_lun_acl *);
+extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+						struct se_lun_acl *lacl);
+extern int core_dev_setup_virtual_lun0(void);
+extern void core_dev_release_virtual_lun0(void);
+
+#endif /* TARGET_CORE_DEVICE_H */
diff --git a/include/target/target_core_fabric_configfs.h b/include/target/target_core_fabric_configfs.h
new file mode 100644
index 0000000..a26fb75
--- /dev/null
+++ b/include/target/target_core_fabric_configfs.h
@@ -0,0 +1,106 @@
+/*
+ * Used for tfc_wwn_cit attributes
+ */
+
+#include <target/configfs_macros.h>
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_attrib, se_node_acl);
+#define TF_NACL_ATTRIB_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_attrib_attribute _fabric##_nacl_attrib_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_attrib_show_##_name,				\
+	_fabric##_nacl_attrib_store_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_auth, se_node_acl);
+#define TF_NACL_AUTH_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_auth_show_##_name,				\
+	_fabric##_nacl_auth_store_##_name);
+
+#define TF_NACL_AUTH_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_auth_show_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_param, se_node_acl);
+#define TF_NACL_PARAM_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_param_show_##_name,				\
+	_fabric##_nacl_param_store_##_name);
+
+#define TF_NACL_PARAM_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_param_show_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_nacl_base, se_node_acl);
+#define TF_NACL_BASE_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_nacl_show_##_name,					\
+	_fabric##_nacl_store_##_name);
+
+#define TF_NACL_BASE_ATTR_RO(_fabric, _name)				\
+static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_nacl_show_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_np_base, se_tpg_np);
+#define TF_NP_BASE_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_np_base_attribute _fabric##_np_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_np_show_##_name,					\
+	_fabric##_np_store_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg_attrib, se_portal_group);
+#define TF_TPG_ATTRIB_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_attrib_show_##_name,				\
+	_fabric##_tpg_attrib_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group);
+#define TF_TPG_PARAM_ATTR(_fabric, _name, _mode)			\
+static struct target_fabric_tpg_param_attribute _fabric##_tpg_param_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_param_show_##_name,				\
+	_fabric##_tpg_param_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_tpg, se_portal_group);
+#define TF_TPG_BASE_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_tpg_attribute _fabric##_tpg_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_tpg_show_##_name,					\
+	_fabric##_tpg_store_##_name);
+
+
+CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
+#define TF_WWN_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_wwn_attribute _fabric##_wwn_##_name =	\
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_wwn_show_attr_##_name,				\
+	_fabric##_wwn_store_attr_##_name);
+
+#define TF_WWN_ATTR_RO(_fabric, _name)					\
+static struct target_fabric_wwn_attribute _fabric##_wwn_##_name =	\
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_wwn_show_attr_##_name);
+
+CONFIGFS_EATTR_STRUCT(target_fabric_discovery, target_fabric_configfs);
+#define TF_DISC_ATTR(_fabric, _name, _mode)				\
+static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
+	__CONFIGFS_EATTR(_name, _mode,					\
+	_fabric##_disc_show_##_name,					\
+	_fabric##_disc_store_##_name);
+
+#define TF_DISC_ATTR_RO(_fabric, _name)					\
+static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
+	__CONFIGFS_EATTR_RO(_name,					\
+	_fabric##_disc_show_##_name);
+
+extern int target_fabric_setup_cits(struct target_fabric_configfs *);
diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h
new file mode 100644
index 0000000..c2f8d0e
--- /dev/null
+++ b/include/target/target_core_fabric_lib.h
@@ -0,0 +1,28 @@
+#ifndef TARGET_CORE_FABRIC_LIB_H
+#define TARGET_CORE_FABRIC_LIB_H
+
+extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
+extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *, unsigned char *);
+extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+			struct t10_pr_registration *, int *);
+extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
+			const char *, u32 *, char **);
+
+#endif /* TARGET_CORE_FABRIC_LIB_H */
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
new file mode 100644
index 0000000..f3ac12b
--- /dev/null
+++ b/include/target/target_core_fabric_ops.h
@@ -0,0 +1,100 @@
+/* Defined in target_core_configfs.h */
+struct target_fabric_configfs;
+
+struct target_core_fabric_ops {
+	struct configfs_subsystem *tf_subsys;
+	/*
+	 * Optional to signal struct se_task->task_sg[] padding entries
+	 * for scatterlist chaining using transport_do_task_sg_link(),
+	 * disabled by default
+	 */
+	int task_sg_chaining:1;
+	char *(*get_fabric_name)(void);
+	u8 (*get_fabric_proto_ident)(struct se_portal_group *);
+	char *(*tpg_get_wwn)(struct se_portal_group *);
+	u16 (*tpg_get_tag)(struct se_portal_group *);
+	u32 (*tpg_get_default_depth)(struct se_portal_group *);
+	u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *,
+				unsigned char *);
+	u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
+				struct se_node_acl *,
+				struct t10_pr_registration *, int *);
+	char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
+				const char *, u32 *, char **);
+	int (*tpg_check_demo_mode)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
+	int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
+	int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
+	struct se_node_acl *(*tpg_alloc_fabric_acl)(
+					struct se_portal_group *);
+	void (*tpg_release_fabric_acl)(struct se_portal_group *,
+					struct se_node_acl *);
+	u32 (*tpg_get_inst_index)(struct se_portal_group *);
+	/*
+	 * Optional function pointer for TCM to perform command map
+	 * from TCM processing thread context, for those struct se_cmd
+	 * initally allocated in interrupt context.
+	 */
+	int (*new_cmd_map)(struct se_cmd *);
+	/*
+	 * Optional function pointer for TCM fabric modules that use
+	 * Linux/NET sockets to allocate struct iovec array to struct se_cmd
+	 */
+	int (*alloc_cmd_iovecs)(struct se_cmd *);
+	/*
+	 * Optional to release struct se_cmd and fabric dependent allocated
+	 * I/O descriptor in transport_cmd_check_stop()
+	 */
+	void (*check_stop_free)(struct se_cmd *);
+	void (*release_cmd_to_pool)(struct se_cmd *);
+	void (*release_cmd_direct)(struct se_cmd *);
+	/*
+	 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
+	 */
+	int (*shutdown_session)(struct se_session *);
+	void (*close_session)(struct se_session *);
+	void (*stop_session)(struct se_session *, int, int);
+	void (*fall_back_to_erl0)(struct se_session *);
+	int (*sess_logged_in)(struct se_session *);
+	u32 (*sess_get_index)(struct se_session *);
+	/*
+	 * Used only for SCSI fabrics that contain multi-value TransportIDs
+	 * (like iSCSI).  All other SCSI fabrics should set this to NULL.
+	 */
+	u32 (*sess_get_initiator_sid)(struct se_session *,
+				      unsigned char *, u32);
+	int (*write_pending)(struct se_cmd *);
+	int (*write_pending_status)(struct se_cmd *);
+	void (*set_default_node_attributes)(struct se_node_acl *);
+	u32 (*get_task_tag)(struct se_cmd *);
+	int (*get_cmd_state)(struct se_cmd *);
+	void (*new_cmd_failure)(struct se_cmd *);
+	int (*queue_data_in)(struct se_cmd *);
+	int (*queue_status)(struct se_cmd *);
+	int (*queue_tm_rsp)(struct se_cmd *);
+	u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
+	u16 (*get_fabric_sense_len)(void);
+	int (*is_state_remove)(struct se_cmd *);
+	u64 (*pack_lun)(unsigned int);
+	/*
+	 * fabric module calls for target_core_fabric_configfs.c
+	 */
+	struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
+				struct config_group *, const char *);
+	void (*fabric_drop_wwn)(struct se_wwn *);
+	struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
+				struct config_group *, const char *);
+	void (*fabric_drop_tpg)(struct se_portal_group *);
+	int (*fabric_post_link)(struct se_portal_group *,
+				struct se_lun *);
+	void (*fabric_pre_unlink)(struct se_portal_group *,
+				struct se_lun *);
+	struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_np)(struct se_tpg_np *);
+	struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
+				struct config_group *, const char *);
+	void (*fabric_drop_nodeacl)(struct se_node_acl *);
+};
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
new file mode 100644
index 0000000..6c8248b
--- /dev/null
+++ b/include/target/target_core_tmr.h
@@ -0,0 +1,43 @@
+#ifndef TARGET_CORE_TMR_H
+#define TARGET_CORE_TMR_H
+
+/* task management function values */
+#ifdef ABORT_TASK
+#undef ABORT_TASK
+#endif /* ABORT_TASK */
+#define ABORT_TASK				1
+#ifdef ABORT_TASK_SET
+#undef ABORT_TASK_SET
+#endif /* ABORT_TASK_SET */
+#define ABORT_TASK_SET				2
+#ifdef CLEAR_ACA
+#undef CLEAR_ACA
+#endif /* CLEAR_ACA */
+#define CLEAR_ACA				3
+#ifdef CLEAR_TASK_SET
+#undef CLEAR_TASK_SET
+#endif /* CLEAR_TASK_SET */
+#define CLEAR_TASK_SET				4
+#define LUN_RESET				5
+#define TARGET_WARM_RESET			6
+#define TARGET_COLD_RESET			7
+#define TASK_REASSIGN				8
+
+/* task management response values */
+#define TMR_FUNCTION_COMPLETE			0
+#define TMR_TASK_DOES_NOT_EXIST			1
+#define TMR_LUN_DOES_NOT_EXIST			2
+#define TMR_TASK_STILL_ALLEGIANT		3
+#define TMR_TASK_FAILOVER_NOT_SUPPORTED		4
+#define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED	5
+#define TMR_FUNCTION_AUTHORIZATION_FAILED	6
+#define TMR_FUNCTION_REJECTED			255
+
+extern struct kmem_cache *se_tmr_req_cache;
+
+extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
+extern void core_tmr_release_req(struct se_tmr_req *);
+extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+				struct list_head *, struct se_cmd *);
+
+#endif /* TARGET_CORE_TMR_H */
diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h
new file mode 100644
index 0000000..77e1872
--- /dev/null
+++ b/include/target/target_core_tpg.h
@@ -0,0 +1,35 @@
+#ifndef TARGET_CORE_TPG_H
+#define TARGET_CORE_TPG_H
+
+extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+						const char *);
+extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+						unsigned char *);
+extern void core_tpg_add_node_to_devs(struct se_node_acl *,
+						struct se_portal_group *);
+extern struct se_node_acl *core_tpg_check_initiator_node_acl(
+						struct se_portal_group *,
+						unsigned char *);
+extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
+extern void core_tpg_clear_object_luns(struct se_portal_group *);
+extern struct se_node_acl *core_tpg_add_initiator_node_acl(
+					struct se_portal_group *,
+					struct se_node_acl *,
+					const char *, u32);
+extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
+						struct se_node_acl *, int);
+extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
+						unsigned char *, u32, int);
+extern int core_tpg_register(struct target_core_fabric_ops *,
+					struct se_wwn *,
+					struct se_portal_group *, void *,
+					int);
+extern int core_tpg_deregister(struct se_portal_group *);
+extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
+				void *);
+extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
+extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+#endif /* TARGET_CORE_TPG_H */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
new file mode 100644
index 0000000..66f44e5
--- /dev/null
+++ b/include/target/target_core_transport.h
@@ -0,0 +1,351 @@
+#ifndef TARGET_CORE_TRANSPORT_H
+#define TARGET_CORE_TRANSPORT_H
+
+#define TARGET_CORE_VERSION			TARGET_CORE_MOD_VERSION
+
+/* Attempts before moving from SHORT to LONG */
+#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD	3
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT	3  /* In milliseconds */
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG	10 /* In milliseconds */
+
+#define PYX_TRANSPORT_STATUS_INTERVAL		5 /* In seconds */
+
+#define PYX_TRANSPORT_SENT_TO_TRANSPORT		0
+#define PYX_TRANSPORT_WRITE_PENDING		1
+
+#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE	-1
+#define PYX_TRANSPORT_HBA_QUEUE_FULL		-2
+#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS	-3
+#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES	-4
+#define PYX_TRANSPORT_INVALID_CDB_FIELD		-5
+#define PYX_TRANSPORT_INVALID_PARAMETER_LIST	-6
+#define PYX_TRANSPORT_LU_COMM_FAILURE		-7
+#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE		-8
+#define PYX_TRANSPORT_WRITE_PROTECTED		-9
+#define PYX_TRANSPORT_TASK_TIMEOUT		-10
+#define PYX_TRANSPORT_RESERVATION_CONFLICT	-11
+#define PYX_TRANSPORT_ILLEGAL_REQUEST		-12
+#define PYX_TRANSPORT_USE_SENSE_REASON		-13
+
+#ifndef SAM_STAT_RESERVATION_CONFLICT
+#define SAM_STAT_RESERVATION_CONFLICT		0x18
+#endif
+
+#define TRANSPORT_PLUGIN_FREE			0
+#define TRANSPORT_PLUGIN_REGISTERED		1
+
+#define TRANSPORT_PLUGIN_PHBA_PDEV		1
+#define TRANSPORT_PLUGIN_VHBA_PDEV		2
+#define TRANSPORT_PLUGIN_VHBA_VDEV		3
+
+/* For SE OBJ Plugins, in seconds */
+#define TRANSPORT_TIMEOUT_TUR			10
+#define TRANSPORT_TIMEOUT_TYPE_DISK		60
+#define TRANSPORT_TIMEOUT_TYPE_ROM		120
+#define TRANSPORT_TIMEOUT_TYPE_TAPE		600
+#define TRANSPORT_TIMEOUT_TYPE_OTHER		300
+
+/* For se_task->task_state_flags */
+#define TSF_EXCEPTION_CLEARED			0x01
+
+/*
+ * struct se_subsystem_dev->su_dev_flags
+*/
+#define SDF_FIRMWARE_VPD_UNIT_SERIAL		0x00000001
+#define SDF_EMULATED_VPD_UNIT_SERIAL		0x00000002
+#define SDF_USING_UDEV_PATH			0x00000004
+#define SDF_USING_ALIAS				0x00000008
+
+/*
+ * struct se_device->dev_flags
+ */
+#define DF_READ_ONLY				0x00000001
+#define DF_SPC2_RESERVATIONS			0x00000002
+#define DF_SPC2_RESERVATIONS_WITH_ISID		0x00000004
+
+/* struct se_dev_attrib sanity values */
+/* 10 Minutes */
+#define DA_TASK_TIMEOUT_MAX			600
+/* Default max_unmap_lba_count */
+#define DA_MAX_UNMAP_LBA_COUNT			0
+/* Default max_unmap_block_desc_count */
+#define DA_MAX_UNMAP_BLOCK_DESC_COUNT		0
+/* Default unmap_granularity */
+#define DA_UNMAP_GRANULARITY_DEFAULT		0
+/* Default unmap_granularity_alignment */
+#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT	0
+/* Emulation for Direct Page Out */
+#define DA_EMULATE_DPO				0
+/* Emulation for Forced Unit Access WRITEs */
+#define DA_EMULATE_FUA_WRITE			1
+/* Emulation for Forced Unit Access READs */
+#define DA_EMULATE_FUA_READ			0
+/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
+#define DA_EMULATE_WRITE_CACHE			0
+/* Emulation for UNIT ATTENTION Interlock Control */
+#define DA_EMULATE_UA_INTLLCK_CTRL		0
+/* Emulation for TASK_ABORTED status (TAS) by default */
+#define DA_EMULATE_TAS				1
+/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
+#define DA_EMULATE_TPU				0
+/*
+ * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
+ * block/blk-lib.c:blkdev_issue_discard()
+ */
+#define DA_EMULATE_TPWS				0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_RESERVATIONS			0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_ALUA				0
+/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
+#define DA_ENFORCE_PR_ISIDS			1
+#define DA_STATUS_MAX_SECTORS_MIN		16
+#define DA_STATUS_MAX_SECTORS_MAX		8192
+
+#define SE_MODE_PAGE_BUF			512
+
+#define MOD_MAX_SECTORS(ms, bs)			(ms % (PAGE_SIZE / bs))
+
+struct se_mem;
+struct se_subsystem_api;
+
+extern int init_se_global(void);
+extern void release_se_global(void);
+extern void transport_init_queue_obj(struct se_queue_obj *);
+extern int transport_subsystem_check_init(void);
+extern int transport_subsystem_register(struct se_subsystem_api *);
+extern void transport_subsystem_release(struct se_subsystem_api *);
+extern void transport_load_plugins(void);
+extern struct se_session *transport_init_session(void);
+extern void __transport_register_session(struct se_portal_group *,
+					struct se_node_acl *,
+					struct se_session *, void *);
+extern void transport_register_session(struct se_portal_group *,
+					struct se_node_acl *,
+					struct se_session *, void *);
+extern void transport_free_session(struct se_session *);
+extern void transport_deregister_session_configfs(struct se_session *);
+extern void transport_deregister_session(struct se_session *);
+extern void transport_cmd_finish_abort(struct se_cmd *, int);
+extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
+extern void transport_complete_sync_cache(struct se_cmd *, int);
+extern void transport_complete_task(struct se_task *, int);
+extern void transport_add_task_to_execute_queue(struct se_task *,
+						struct se_task *,
+						struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+extern void transport_dump_dev_state(struct se_device *, char *, int *);
+extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
+					unsigned long long, char *, int *);
+extern void transport_dump_vpd_proto_id(struct t10_vpd *,
+					unsigned char *, int);
+extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_assoc(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_ident_type(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
+extern int transport_dump_vpd_ident(struct t10_vpd *,
+					unsigned char *, int);
+extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
+extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
+					struct se_subsystem_api *,
+					struct se_subsystem_dev *, u32,
+					void *, struct se_dev_limits *,
+					const char *, const char *);
+extern void transport_device_setup_cmd(struct se_cmd *);
+extern void transport_init_se_cmd(struct se_cmd *,
+					struct target_core_fabric_ops *,
+					struct se_session *, u32, int, int,
+					unsigned char *);
+extern void transport_free_se_cmd(struct se_cmd *);
+extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+extern int transport_generic_handle_cdb(struct se_cmd *);
+extern int transport_generic_handle_cdb_map(struct se_cmd *);
+extern int transport_generic_handle_data(struct se_cmd *);
+extern void transport_new_cmd_failure(struct se_cmd *);
+extern int transport_generic_handle_tmr(struct se_cmd *);
+extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
+extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
+extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
+				struct scatterlist *, u32);
+extern int transport_clear_lun_from_sessions(struct se_lun *);
+extern int transport_check_aborted_status(struct se_cmd *, int);
+extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
+extern void transport_send_task_abort(struct se_cmd *);
+extern void transport_release_cmd_to_pool(struct se_cmd *);
+extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
+extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
+extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
+extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
+					void *, struct se_mem *,
+					struct se_mem **, u32 *, u32 *);
+extern void transport_do_task_sg_chain(struct se_cmd *);
+extern void transport_generic_process_write(struct se_cmd *);
+extern int transport_generic_do_tmr(struct se_cmd *);
+/* From target_core_alua.c */
+extern int core_alua_check_nonop_delay(struct se_cmd *);
+
+/*
+ * Each se_transport_task_t can have N number of possible struct se_task's
+ * for the storage transport(s) to possibly execute.
+ * Used primarily for splitting up CDBs that exceed the physical storage
+ * HBA's maximum sector count per task.
+ */
+struct se_mem {
+	struct page	*se_page;
+	u32		se_len;
+	u32		se_off;
+	struct list_head se_list;
+} ____cacheline_aligned;
+
+/*
+ * 	Each type of disk transport supported MUST have a template defined
+ *	within its .h file.
+ */
+struct se_subsystem_api {
+	/*
+	 * The Name. :-)
+	 */
+	char name[16];
+	/*
+	 * Transport Type.
+	 */
+	u8 transport_type;
+	/*
+	 * struct module for struct se_hba references
+	 */
+	struct module *owner;
+	/*
+	 * Used for global se_subsystem_api list_head
+	 */
+	struct list_head sub_api_list;
+	/*
+	 * For SCF_SCSI_NON_DATA_CDB
+	 */
+	int (*cdb_none)(struct se_task *);
+	/*
+	 * For SCF_SCSI_CONTROL_NONSG_IO_CDB
+	 */
+	int (*map_task_non_SG)(struct se_task *);
+	/*
+	 * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
+	 */
+	int (*map_task_SG)(struct se_task *);
+	/*
+	 * attach_hba():
+	 */
+	int (*attach_hba)(struct se_hba *, u32);
+	/*
+	 * detach_hba():
+	 */
+	void (*detach_hba)(struct se_hba *);
+	/*
+	 * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
+	 *		Linux/SCSI struct Scsi_Host passthrough
+	*/
+	int (*pmode_enable_hba)(struct se_hba *, unsigned long);
+	/*
+	 * allocate_virtdevice():
+	 */
+	void *(*allocate_virtdevice)(struct se_hba *, const char *);
+	/*
+	 * create_virtdevice(): Only for Virtual HBAs
+	 */
+	struct se_device *(*create_virtdevice)(struct se_hba *,
+				struct se_subsystem_dev *, void *);
+	/*
+	 * free_device():
+	 */
+	void (*free_device)(void *);
+
+	/*
+	 * dpo_emulated():
+	 */
+	int (*dpo_emulated)(struct se_device *);
+	/*
+	 * fua_write_emulated():
+	 */
+	int (*fua_write_emulated)(struct se_device *);
+	/*
+	 * fua_read_emulated():
+	 */
+	int (*fua_read_emulated)(struct se_device *);
+	/*
+	 * write_cache_emulated():
+	 */
+	int (*write_cache_emulated)(struct se_device *);
+	/*
+	 * transport_complete():
+	 *
+	 * Use transport_generic_complete() for majority of DAS transport
+	 * drivers.  Provided out of convenience.
+	 */
+	int (*transport_complete)(struct se_task *task);
+	struct se_task *(*alloc_task)(struct se_cmd *);
+	/*
+	 * do_task():
+	 */
+	int (*do_task)(struct se_task *);
+	/*
+	 * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
+	 * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
+	 */
+	int (*do_discard)(struct se_device *, sector_t, u32);
+	/*
+	 * Used  by virtual subsystem plugins IBLOCK and FILEIO to emulate
+	 * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
+	 */
+	void (*do_sync_cache)(struct se_task *);
+	/*
+	 * free_task():
+	 */
+	void (*free_task)(struct se_task *);
+	/*
+	 * check_configfs_dev_params():
+	 */
+	ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
+	/*
+	 * set_configfs_dev_params():
+	 */
+	ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
+						const char *, ssize_t);
+	/*
+	 * show_configfs_dev_params():
+	 */
+	ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
+						char *);
+	/*
+	 * get_cdb():
+	 */
+	unsigned char *(*get_cdb)(struct se_task *);
+	/*
+	 * get_device_rev():
+	 */
+	u32 (*get_device_rev)(struct se_device *);
+	/*
+	 * get_device_type():
+	 */
+	u32 (*get_device_type)(struct se_device *);
+	/*
+	 * Get the sector_t from a subsystem backstore..
+	 */
+	sector_t (*get_blocks)(struct se_device *);
+	/*
+	 * do_se_mem_map():
+	 */
+	int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
+				struct se_mem *, struct se_mem **, u32 *, u32 *);
+	/*
+	 * get_sense_buffer():
+	 */
+	unsigned char *(*get_sense_buffer)(struct se_task *);
+} ____cacheline_aligned;
+
+#define TRANSPORT(dev)		((dev)->transport)
+#define HBA_TRANSPORT(hba)	((hba)->transport)
+
+extern struct se_global *se_global;
+
+#endif /* TARGET_CORE_TRANSPORT_H */
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index b0b4eb2..da39b22 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -21,6 +21,16 @@
 #undef CREATE_TRACE_POINTS
 
 #include <linux/stringify.h>
+/*
+ * module.h includes tracepoints, and because ftrace.h
+ * pulls in module.h:
+ *  trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
+ *  linux/ftrace.h -> linux/module.h
+ * we must include module.h here before we play with any of
+ * the TRACE_EVENT() macros, otherwise the tracepoints included
+ * by module.h may break the build.
+ */
+#include <linux/module.h>
 
 #undef TRACE_EVENT
 #define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
new file mode 100644
index 0000000..186e84d
--- /dev/null
+++ b/include/trace/events/asoc.h
@@ -0,0 +1,235 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM asoc
+
+#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ASOC_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct snd_soc_jack;
+struct snd_soc_codec;
+struct snd_soc_card;
+struct snd_soc_dapm_widget;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(snd_soc_reg,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		codec->name	)
+		__field(	int,		id		)
+		__field(	unsigned int,	reg		)
+		__field(	unsigned int,	val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, codec->name);
+		__entry->id = codec->id;
+		__entry->reg = reg;
+		__entry->val = val;
+	),
+
+	TP_printk("codec=%s.%d reg=%x val=%x", __get_str(name),
+		  (int)__entry->id, (unsigned int)__entry->reg,
+		  (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_write,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val)
+
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_card,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		card->name	)
+		__field(	int,		val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, card->name);
+		__entry->val = val;
+	),
+
+	TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val)
+
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card),
+
+	TP_STRUCT__entry(
+		__string(	name,	card->name	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, card->name);
+	),
+
+	TP_printk("card=%s", __get_str(name))
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val),
+
+	TP_STRUCT__entry(
+		__string(	name,	w->name		)
+		__field(	int,	val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, w->name);
+		__entry->val = val;
+	),
+
+	TP_printk("widget=%s val=%d", __get_str(name),
+		  (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+TRACE_EVENT(snd_soc_jack_irq,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(	name,	name		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("%s", __get_str(name))
+);
+
+TRACE_EVENT(snd_soc_jack_report,
+
+	TP_PROTO(struct snd_soc_jack *jack, int mask, int val),
+
+	TP_ARGS(jack, mask, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		jack->jack->name	)
+		__field(	int,		mask			)
+		__field(	int,		val			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, jack->jack->name);
+		__entry->mask = mask;
+		__entry->val = val;
+	),
+
+	TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val,
+		  (int)__entry->mask)
+);
+
+TRACE_EVENT(snd_soc_jack_notify,
+
+	TP_PROTO(struct snd_soc_jack *jack, int val),
+
+	TP_ARGS(jack, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		jack->jack->name	)
+		__field(	int,		val			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, jack->jack->name);
+		__entry->val = val;
+	),
+
+	TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
+);
+
+#endif /* _TRACE_ASOC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index d8ce278..aba421d 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -206,15 +206,16 @@
  * block_bio_complete - completed all work on the block operation
  * @q: queue holding the block operation
  * @bio: block operation completed
+ * @error: io error value
  *
  * This tracepoint indicates there is no further work to do on this
  * block IO operation @bio.
  */
 TRACE_EVENT(block_bio_complete,
 
-	TP_PROTO(struct request_queue *q, struct bio *bio),
+	TP_PROTO(struct request_queue *q, struct bio *bio, int error),
 
-	TP_ARGS(q, bio),
+	TP_ARGS(q, bio, error),
 
 	TP_STRUCT__entry(
 		__field( dev_t,		dev		)
@@ -228,6 +229,7 @@
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_sector;
 		__entry->nr_sector	= bio->bi_size >> 9;
+		__entry->error		= error;
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 	),
 
@@ -486,16 +488,16 @@
 );
 
 /**
- * block_remap - map request for a partition to the raw device
+ * block_bio_remap - map request for a logical device to the raw device
  * @q: queue holding the operation
  * @bio: revised operation
  * @dev: device for the operation
  * @from: original sector for the operation
  *
- * An operation for a partition on a block device has been mapped to the
+ * An operation for a logical device has been mapped to the
  * raw block device.
  */
-TRACE_EVENT(block_remap,
+TRACE_EVENT(block_bio_remap,
 
 	TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
 		 sector_t from),
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
new file mode 100644
index 0000000..388bcdd
--- /dev/null
+++ b/include/trace/events/compaction.h
@@ -0,0 +1,74 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM compaction
+
+#if !defined(_TRACE_COMPACTION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COMPACTION_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include "gfpflags.h"
+
+DECLARE_EVENT_CLASS(mm_compaction_isolate_template,
+
+	TP_PROTO(unsigned long nr_scanned,
+		unsigned long nr_taken),
+
+	TP_ARGS(nr_scanned, nr_taken),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, nr_scanned)
+		__field(unsigned long, nr_taken)
+	),
+
+	TP_fast_assign(
+		__entry->nr_scanned = nr_scanned;
+		__entry->nr_taken = nr_taken;
+	),
+
+	TP_printk("nr_scanned=%lu nr_taken=%lu",
+		__entry->nr_scanned,
+		__entry->nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_migratepages,
+
+	TP_PROTO(unsigned long nr_scanned,
+		unsigned long nr_taken),
+
+	TP_ARGS(nr_scanned, nr_taken)
+);
+
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
+	TP_PROTO(unsigned long nr_scanned,
+		unsigned long nr_taken),
+
+	TP_ARGS(nr_scanned, nr_taken)
+);
+
+TRACE_EVENT(mm_compaction_migratepages,
+
+	TP_PROTO(unsigned long nr_migrated,
+		unsigned long nr_failed),
+
+	TP_ARGS(nr_migrated, nr_failed),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, nr_migrated)
+		__field(unsigned long, nr_failed)
+	),
+
+	TP_fast_assign(
+		__entry->nr_migrated = nr_migrated;
+		__entry->nr_failed = nr_failed;
+	),
+
+	TP_printk("nr_migrated=%lu nr_failed=%lu",
+		__entry->nr_migrated,
+		__entry->nr_failed)
+);
+
+
+#endif /* _TRACE_COMPACTION_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6dd3a51..46e3cd8 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -6,6 +6,36 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
 
+#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
+
+#define kvm_trace_exit_reason						\
+	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
+	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
+	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
+	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
+	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
+
+TRACE_EVENT(kvm_userspace_exit,
+	    TP_PROTO(__u32 reason, int errno),
+	    TP_ARGS(reason, errno),
+
+	TP_STRUCT__entry(
+		__field(	__u32,		reason		)
+		__field(	int,		errno		)
+	),
+
+	TP_fast_assign(
+		__entry->reason		= reason;
+		__entry->errno		= errno;
+	),
+
+	TP_printk("reason %s (%d)",
+		  __entry->errno < 0 ?
+		  (__entry->errno == -EINTR ? "restart" : "error") :
+		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
+		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
+);
+
 #if defined(__KVM_HAVE_IOAPIC)
 TRACE_EVENT(kvm_set_irq,
 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
@@ -185,6 +215,97 @@
 		  __entry->referenced ? "YOUNG" : "OLD")
 );
 
+#ifdef CONFIG_KVM_ASYNC_PF
+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn),
+
+	TP_STRUCT__entry(
+		__field(__u64, gva)
+		__field(u64, gfn)
+	),
+
+	TP_fast_assign(
+		__entry->gva = gva;
+		__entry->gfn = gfn;
+	),
+
+	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn)
+);
+
+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva),
+
+	TP_STRUCT__entry(
+		__field(__u64, token)
+		__field(__u64, gva)
+	),
+
+	TP_fast_assign(
+		__entry->token = token;
+		__entry->gva = gva;
+	),
+
+	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
+
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva)
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva)
+);
+
+TRACE_EVENT(
+	kvm_async_pf_completed,
+	TP_PROTO(unsigned long address, struct page *page, u64 gva),
+	TP_ARGS(address, page, gva),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, address)
+		__field(pfn_t, pfn)
+		__field(u64, gva)
+		),
+
+	TP_fast_assign(
+		__entry->address = address;
+		__entry->pfn = page ? page_to_pfn(page) : 0;
+		__entry->gva = gva;
+		),
+
+	TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
+		  __entry->address, __entry->pfn)
+);
+
+#endif
+
 #endif /* _TRACE_KVM_MAIN_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h
new file mode 100644
index 0000000..37502a7
--- /dev/null
+++ b/include/trace/events/regulator.h
@@ -0,0 +1,141 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regulator
+
+#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGULATOR_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Events which just log themselves and the regulator name for enable/disable
+ * type tracking.
+ */
+DECLARE_EVENT_CLASS(regulator_basic,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(	name,	name	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("name=%s", __get_str(name))
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_delay,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_complete,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable_complete,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+/*
+ * Events that take a range of numerical values, mostly for voltages
+ * and so on.
+ */
+DECLARE_EVENT_CLASS(regulator_range,
+
+	TP_PROTO(const char *name, int min, int max),
+
+	TP_ARGS(name, min, max),
+
+	TP_STRUCT__entry(
+		__string(	name,		name		)
+		__field(        int,            min             )
+		__field(        int,            max             )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->min  = min;
+		__entry->max  = max;
+	),
+
+	TP_printk("name=%s (%d-%d)", __get_str(name),
+		  (int)__entry->min, (int)__entry->max)
+);
+
+DEFINE_EVENT(regulator_range, regulator_set_voltage,
+
+	TP_PROTO(const char *name, int min, int max),
+
+	TP_ARGS(name, min, max)
+
+);
+
+
+/*
+ * Events that take a single value, mostly for readback and refcounts.
+ */
+DECLARE_EVENT_CLASS(regulator_value,
+
+	TP_PROTO(const char *name, unsigned int val),
+
+	TP_ARGS(name, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		name		)
+		__field(        unsigned int,   val             )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->val  = val;
+	),
+
+	TP_printk("name=%s, val=%u", __get_str(name),
+		  (int)__entry->val)
+);
+
+DEFINE_EVENT(regulator_value, regulator_set_voltage_complete,
+
+	TP_PROTO(const char *name, unsigned int value),
+
+	TP_ARGS(name, value)
+
+);
+
+#endif /* _TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 75ce9d5..f10293c 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -25,9 +25,7 @@
 
 	TP_fast_assign(
 		__entry->skbaddr = skb;
-		if (skb) {
-			__entry->protocol = ntohs(skb->protocol);
-		}
+		__entry->protocol = ntohs(skb->protocol);
 		__entry->location = location;
 	),
 
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index c255fcc..ea422aa 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -25,13 +25,13 @@
 
 #define trace_reclaim_flags(page, sync) ( \
 	(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
-	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
+	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC)   \
 	)
 
 #define trace_shrink_flags(file, sync) ( \
-	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \
+	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
 			(file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) |  \
-	(sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+	(sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
 	)
 
 TRACE_EVENT(mm_vmscan_kswapd_sleep,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 89a2b2d..4e249b9 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -81,6 +81,7 @@
 	TP_ARGS(bdi))
 
 DEFINE_WRITEBACK_EVENT(writeback_nowork);
+DEFINE_WRITEBACK_EVENT(writeback_wake_background);
 DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
 DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h
new file mode 100644
index 0000000..eb23f41
--- /dev/null
+++ b/include/xen/gntdev.h
@@ -0,0 +1,119 @@
+/******************************************************************************
+ * gntdev.h
+ * 
+ * Interface to /dev/xen/gntdev.
+ * 
+ * Copyright (c) 2007, D G Murray
+ * 
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_PUBLIC_GNTDEV_H__
+#define __LINUX_PUBLIC_GNTDEV_H__
+
+struct ioctl_gntdev_grant_ref {
+	/* The domain ID of the grant to be mapped. */
+	uint32_t domid;
+	/* The grant reference of the grant to be mapped. */
+	uint32_t ref;
+};
+
+/*
+ * Inserts the grant references into the mapping table of an instance
+ * of gntdev. N.B. This does not perform the mapping, which is deferred
+ * until mmap() is called with @index as the offset.
+ */
+#define IOCTL_GNTDEV_MAP_GRANT_REF \
+_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
+struct ioctl_gntdev_map_grant_ref {
+	/* IN parameters */
+	/* The number of grants to be mapped. */
+	uint32_t count;
+	uint32_t pad;
+	/* OUT parameters */
+	/* The offset to be used on a subsequent call to mmap(). */
+	uint64_t index;
+	/* Variable IN parameter. */
+	/* Array of grant references, of size @count. */
+	struct ioctl_gntdev_grant_ref refs[1];
+};
+
+/*
+ * Removes the grant references from the mapping table of an instance of
+ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
+ * before this ioctl is called, or an error will result.
+ */
+#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
+_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
+struct ioctl_gntdev_unmap_grant_ref {
+	/* IN parameters */
+	/* The offset was returned by the corresponding map operation. */
+	uint64_t index;
+	/* The number of pages to be unmapped. */
+	uint32_t count;
+	uint32_t pad;
+};
+
+/*
+ * Returns the offset in the driver's address space that corresponds
+ * to @vaddr. This can be used to perform a munmap(), followed by an
+ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
+ * the caller. The number of pages that were allocated at the same time as
+ * @vaddr is returned in @count.
+ *
+ * N.B. Where more than one page has been mapped into a contiguous range, the
+ *      supplied @vaddr must correspond to the start of the range; otherwise
+ *      an error will result. It is only possible to munmap() the entire
+ *      contiguously-allocated range at once, and not any subrange thereof.
+ */
+#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
+_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
+struct ioctl_gntdev_get_offset_for_vaddr {
+	/* IN parameters */
+	/* The virtual address of the first mapped page in a range. */
+	uint64_t vaddr;
+	/* OUT parameters */
+	/* The offset that was used in the initial mmap() operation. */
+	uint64_t offset;
+	/* The number of pages mapped in the VM area that begins at @vaddr. */
+	uint32_t count;
+	uint32_t pad;
+};
+
+/*
+ * Sets the maximum number of grants that may mapped at once by this gntdev
+ * instance.
+ *
+ * N.B. This must be called before any other ioctl is performed on the device.
+ */
+#define IOCTL_GNTDEV_SET_MAX_GRANTS \
+_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
+struct ioctl_gntdev_set_max_grants {
+	/* IN parameter */
+	/* The maximum number of grants that may be mapped at once. */
+	uint32_t count;
+};
+
+#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 9a73170..b1fab6b 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -37,10 +37,16 @@
 #ifndef __ASM_GNTTAB_H__
 #define __ASM_GNTTAB_H__
 
-#include <asm/xen/hypervisor.h>
+#include <asm/page.h>
+
+#include <xen/interface/xen.h>
 #include <xen/interface/grant_table.h>
+
+#include <asm/xen/hypervisor.h>
 #include <asm/xen/grant_table.h>
 
+#include <xen/features.h>
+
 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
 #define NR_GRANT_FRAMES 4
 
@@ -107,6 +113,37 @@
 void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
 				       unsigned long pfn);
 
+static inline void
+gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
+		  uint32_t flags, grant_ref_t ref, domid_t domid)
+{
+	if (flags & GNTMAP_contains_pte)
+		map->host_addr = addr;
+	else if (xen_feature(XENFEAT_auto_translated_physmap))
+		map->host_addr = __pa(addr);
+	else
+		map->host_addr = addr;
+
+	map->flags = flags;
+	map->ref = ref;
+	map->dom = domid;
+}
+
+static inline void
+gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
+		    uint32_t flags, grant_handle_t handle)
+{
+	if (flags & GNTMAP_contains_pte)
+		unmap->host_addr = addr;
+	else if (xen_feature(XENFEAT_auto_translated_physmap))
+		unmap->host_addr = __pa(addr);
+	else
+		unmap->host_addr = addr;
+
+	unmap->handle = handle;
+	unmap->dev_bus_addr = 0;
+}
+
 int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
 			   unsigned long max_nr_gframes,
 			   struct grant_entry **__shared);
@@ -118,4 +155,9 @@
 
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
 
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+		    struct page **pages, unsigned int count);
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+		      struct page **pages, unsigned int count);
+
 #endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 43e2d7d..7a1d15f 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -94,7 +94,7 @@
 	int (*remove)(struct xenbus_device *dev);
 	int (*suspend)(struct xenbus_device *dev, pm_message_t state);
 	int (*resume)(struct xenbus_device *dev);
-	int (*uevent)(struct xenbus_device *, char **, int, char *, int);
+	int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
 	struct device_driver driver;
 	int (*read_otherend_details)(struct xenbus_device *dev);
 	int (*is_ready)(struct xenbus_device *dev);
diff --git a/init/Kconfig b/init/Kconfig
index 8dfd094..4f6cdbf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -130,13 +130,16 @@
 config HAVE_KERNEL_LZMA
 	bool
 
+config HAVE_KERNEL_XZ
+	bool
+
 config HAVE_KERNEL_LZO
 	bool
 
 choice
 	prompt "Kernel compression mode"
 	default KERNEL_GZIP
-	depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO
+	depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO
 	help
 	  The linux kernel is a kind of self-extracting executable.
 	  Several compression algorithms are available, which differ
@@ -181,6 +184,21 @@
 	  two. Compression is slowest.	The kernel size is about 33%
 	  smaller with LZMA in comparison to gzip.
 
+config KERNEL_XZ
+	bool "XZ"
+	depends on HAVE_KERNEL_XZ
+	help
+	  XZ uses the LZMA2 algorithm and instruction set specific
+	  BCJ filters which can improve compression ratio of executable
+	  code. The size of the kernel is about 30% smaller with XZ in
+	  comparison to gzip. On architectures for which there is a BCJ
+	  filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
+	  will create a few percent smaller kernel than plain LZMA.
+
+	  The speed is about the same as with LZMA: The decompression
+	  speed of XZ is better than that of bzip2 but worse than gzip
+	  and LZO. Compression is slow.
+
 config KERNEL_LZO
 	bool "LZO"
 	depends on HAVE_KERNEL_LZO
@@ -673,7 +691,7 @@
 	help
 	  Memory Resource Controller Swap Extension comes with its price in
 	  a bigger memory consumption. General purpose distribution kernels
-	  which want to enable the feautre but keep it disabled by default
+	  which want to enable the feature but keep it disabled by default
 	  and let the user enable it by swapaccount boot command line
 	  parameter should have this option unselected.
 	  For those who want to have the feature enabled by default should
diff --git a/init/main.c b/init/main.c
index ea51770..00799c1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -777,9 +777,6 @@
 
 	for (fn = __early_initcall_end; fn < __initcall_end; fn++)
 		do_one_initcall(*fn);
-
-	/* Make sure there is no pending stuff from the initcall sequence */
-	flush_scheduled_work();
 }
 
 /*
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 035f439..14fb6d6 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -237,9 +237,16 @@
 	return &ei->vfs_inode;
 }
 
+static void mqueue_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
+}
+
 static void mqueue_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
+	call_rcu(&inode->i_rcu, mqueue_i_callback);
 }
 
 static void mqueue_evict_inode(struct inode *inode)
diff --git a/kernel/Makefile b/kernel/Makefile
index 0b5ff08..353d3fe 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -43,7 +43,7 @@
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
-obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
+obj-$(CONFIG_SMP) += smp.o
 ifneq ($(CONFIG_SMP),y)
 obj-y += up.o
 endif
@@ -100,6 +100,7 @@
 obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_X86_DS) += trace/
 obj-$(CONFIG_RING_BUFFER) += trace/
+obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_SMP) += sched_cpupri.o
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
@@ -121,7 +122,7 @@
 # config_data.h contains the same information as ikconfig.h but gzipped.
 # Info from config_data can be extracted from /proc/config*
 targets += config_data.gz
-$(obj)/config_data.gz: .config FORCE
+$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
 	$(call if_changed,gzip)
 
 quiet_cmd_ikconfiggz = IKCFG   $@
diff --git a/kernel/audit.c b/kernel/audit.c
index 77770a0..e495624 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -400,7 +400,7 @@
 	if (err < 0) {
 		BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
 		printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
-		audit_log_lost("auditd dissapeared\n");
+		audit_log_lost("auditd disappeared\n");
 		audit_pid = 0;
 		/* we might get lucky and get this in the next auditd */
 		audit_hold_skb(skb);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 66a416b..b24d702 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -764,6 +764,7 @@
  */
 
 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
+static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cgrp);
 static const struct inode_operations cgroup_dir_inode_operations;
@@ -860,6 +861,11 @@
 	iput(inode);
 }
 
+static int cgroup_delete(const struct dentry *d)
+{
+	return 1;
+}
+
 static void remove_dir(struct dentry *d)
 {
 	struct dentry *parent = dget(d->d_parent);
@@ -874,25 +880,29 @@
 	struct list_head *node;
 
 	BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
-	spin_lock(&dcache_lock);
+	spin_lock(&dentry->d_lock);
 	node = dentry->d_subdirs.next;
 	while (node != &dentry->d_subdirs) {
 		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
+
+		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
 		list_del_init(node);
 		if (d->d_inode) {
 			/* This should never be called on a cgroup
 			 * directory with child cgroups */
 			BUG_ON(d->d_inode->i_mode & S_IFDIR);
-			d = dget_locked(d);
-			spin_unlock(&dcache_lock);
+			dget_dlock(d);
+			spin_unlock(&d->d_lock);
+			spin_unlock(&dentry->d_lock);
 			d_delete(d);
 			simple_unlink(dentry->d_inode, d);
 			dput(d);
-			spin_lock(&dcache_lock);
-		}
+			spin_lock(&dentry->d_lock);
+		} else
+			spin_unlock(&d->d_lock);
 		node = dentry->d_subdirs.next;
 	}
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
 }
 
 /*
@@ -900,11 +910,16 @@
  */
 static void cgroup_d_remove_dir(struct dentry *dentry)
 {
+	struct dentry *parent;
+
 	cgroup_clear_directory(dentry);
 
-	spin_lock(&dcache_lock);
+	parent = dentry->d_parent;
+	spin_lock(&parent->d_lock);
+	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 	list_del_init(&dentry->d_u.d_child);
-	spin_unlock(&dcache_lock);
+	spin_unlock(&dentry->d_lock);
+	spin_unlock(&parent->d_lock);
 	remove_dir(dentry);
 }
 
@@ -1440,6 +1455,11 @@
 
 static int cgroup_get_rootdir(struct super_block *sb)
 {
+	static const struct dentry_operations cgroup_dops = {
+		.d_iput = cgroup_diput,
+		.d_delete = cgroup_delete,
+	};
+
 	struct inode *inode =
 		cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
 	struct dentry *dentry;
@@ -1457,6 +1477,8 @@
 		return -ENOMEM;
 	}
 	sb->s_root = dentry;
+	/* for everything else we want ->d_op set */
+	sb->s_d_op = &cgroup_dops;
 	return 0;
 }
 
@@ -2180,12 +2202,20 @@
 };
 
 static const struct inode_operations cgroup_dir_inode_operations = {
-	.lookup = simple_lookup,
+	.lookup = cgroup_lookup,
 	.mkdir = cgroup_mkdir,
 	.rmdir = cgroup_rmdir,
 	.rename = cgroup_rename,
 };
 
+static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+	if (dentry->d_name.len > NAME_MAX)
+		return ERR_PTR(-ENAMETOOLONG);
+	d_add(dentry, NULL);
+	return NULL;
+}
+
 /*
  * Check if a file is a control file
  */
@@ -2199,10 +2229,6 @@
 static int cgroup_create_file(struct dentry *dentry, mode_t mode,
 				struct super_block *sb)
 {
-	static const struct dentry_operations cgroup_dops = {
-		.d_iput = cgroup_diput,
-	};
-
 	struct inode *inode;
 
 	if (!dentry)
@@ -2228,7 +2254,6 @@
 		inode->i_size = 0;
 		inode->i_fop = &cgroup_file_operations;
 	}
-	dentry->d_op = &cgroup_dops;
 	d_instantiate(dentry, inode);
 	dget(dentry);	/* Extra count - pin the dentry in core */
 	return 0;
@@ -3638,9 +3663,7 @@
 	list_del(&cgrp->sibling);
 	cgroup_unlock_hierarchy(cgrp->root);
 
-	spin_lock(&cgrp->dentry->d_lock);
 	d = dget(cgrp->dentry);
-	spin_unlock(&d->d_lock);
 
 	cgroup_d_remove_dir(d);
 	dput(d);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index a6e7297..bd3e8e2 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2914,7 +2914,7 @@
 	}
 }
 
-/* Intialize kdb_printf, breakpoint tables and kdb state */
+/* Initialize kdb_printf, breakpoint tables and kdb state */
 void __init kdb_init(int lvl)
 {
 	static int kdb_init_lvl = KDB_NOT_INITIALIZED;
diff --git a/kernel/exit.c b/kernel/exit.c
index 676149a..f9a45eb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -69,7 +69,7 @@
 
 		list_del_rcu(&p->tasks);
 		list_del_init(&p->sibling);
-		__get_cpu_var(process_counts)--;
+		__this_cpu_dec(process_counts);
 	}
 	list_del_rcu(&p->thread_group);
 }
@@ -994,6 +994,15 @@
 	exit_fs(tsk);
 	check_stack_usage();
 	exit_thread();
+
+	/*
+	 * Flush inherited counters to the parent - before the parent
+	 * gets woken up by child-exit notifications.
+	 *
+	 * because of cgroup mode, must be called before cgroup_exit()
+	 */
+	perf_event_exit_task(tsk);
+
 	cgroup_exit(tsk, 1);
 
 	if (group_dead)
@@ -1007,11 +1016,6 @@
 	 * FIXME: do that only when needed, using sched_exit tracepoint
 	 */
 	flush_ptrace_hw_breakpoint(tsk);
-	/*
-	 * Flush inherited counters to the parent - before the parent
-	 * gets woken up by child-exit notifications.
-	 */
-	perf_event_exit_task(tsk);
 
 	exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
diff --git a/kernel/fork.c b/kernel/fork.c
index 7d164e2..25e4291 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -66,6 +66,7 @@
 #include <linux/posix-timers.h>
 #include <linux/user-return-notifier.h>
 #include <linux/oom.h>
+#include <linux/khugepaged.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -169,15 +170,14 @@
 static inline void free_signal_struct(struct signal_struct *sig)
 {
 	taskstats_tgid_free(sig);
+	sched_autogroup_exit(sig);
 	kmem_cache_free(signal_cachep, sig);
 }
 
 static inline void put_signal_struct(struct signal_struct *sig)
 {
-	if (atomic_dec_and_test(&sig->sigcnt)) {
-		sched_autogroup_exit(sig);
+	if (atomic_dec_and_test(&sig->sigcnt))
 		free_signal_struct(sig);
-	}
 }
 
 void __put_task_struct(struct task_struct *tsk)
@@ -331,6 +331,9 @@
 	retval = ksm_fork(mm, oldmm);
 	if (retval)
 		goto out;
+	retval = khugepaged_fork(mm, oldmm);
+	if (retval)
+		goto out;
 
 	prev = NULL;
 	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
@@ -530,6 +533,9 @@
 	mm_free_pgd(mm);
 	destroy_context(mm);
 	mmu_notifier_mm_destroy(mm);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	VM_BUG_ON(mm->pmd_huge_pte);
+#endif
 	free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
@@ -544,6 +550,7 @@
 	if (atomic_dec_and_test(&mm->mm_users)) {
 		exit_aio(mm);
 		ksm_exit(mm);
+		khugepaged_exit(mm); /* must run before exit_mmap */
 		exit_mmap(mm);
 		set_mm_exe_file(mm, NULL);
 		if (!list_empty(&mm->mmlist)) {
@@ -670,6 +677,10 @@
 	mm->token_priority = 0;
 	mm->last_interval = 0;
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	mm->pmd_huge_pte = NULL;
+#endif
+
 	if (!mm_init(mm, tsk))
 		goto fail_nomem;
 
@@ -911,6 +922,7 @@
 
 	sig->oom_adj = current->signal->oom_adj;
 	sig->oom_score_adj = current->signal->oom_score_adj;
+	sig->oom_score_adj_min = current->signal->oom_score_adj_min;
 
 	mutex_init(&sig->cred_guard_mutex);
 
@@ -1286,7 +1298,7 @@
 			attach_pid(p, PIDTYPE_SID, task_session(current));
 			list_add_tail(&p->sibling, &p->real_parent->children);
 			list_add_tail_rcu(&p->tasks, &init_task.tasks);
-			__get_cpu_var(process_counts)++;
+			__this_cpu_inc(process_counts);
 		}
 		attach_pid(p, PIDTYPE_PID, pid);
 		nr_threads++;
@@ -1318,7 +1330,7 @@
 	}
 bad_fork_cleanup_signal:
 	if (!(clone_flags & CLONE_THREAD))
-		put_signal_struct(p->signal);
+		free_signal_struct(p->signal);
 bad_fork_cleanup_sighand:
 	__cleanup_sighand(p->sighand);
 bad_fork_cleanup_fs:
@@ -1411,23 +1423,6 @@
 	}
 
 	/*
-	 * We hope to recycle these flags after 2.6.26
-	 */
-	if (unlikely(clone_flags & CLONE_STOPPED)) {
-		static int __read_mostly count = 100;
-
-		if (count > 0 && printk_ratelimit()) {
-			char comm[TASK_COMM_LEN];
-
-			count--;
-			printk(KERN_INFO "fork(): process `%s' used deprecated "
-					"clone flags 0x%lx\n",
-				get_task_comm(comm, current),
-				clone_flags & CLONE_STOPPED);
-		}
-	}
-
-	/*
 	 * When called from kernel_thread, don't do user tracing stuff.
 	 */
 	if (likely(user_mode(regs)))
@@ -1465,16 +1460,7 @@
 		 */
 		p->flags &= ~PF_STARTING;
 
-		if (unlikely(clone_flags & CLONE_STOPPED)) {
-			/*
-			 * We'll start up with an immediate SIGSTOP.
-			 */
-			sigaddset(&p->pending.signal, SIGSTOP);
-			set_tsk_thread_flag(p, TIF_SIGPENDING);
-			__set_task_state(p, TASK_STOPPED);
-		} else {
-			wake_up_new_task(p, clone_flags);
-		}
+		wake_up_new_task(p, clone_flags);
 
 		tracehook_report_clone_complete(trace, regs,
 						clone_flags, nr, p);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index bd1d42b..66ecd2e 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -104,8 +104,13 @@
 	}
 
 	if (should_send_signal(p)) {
-		if (!signal_pending(p))
-			fake_signal_wake_up(p);
+		fake_signal_wake_up(p);
+		/*
+		 * fake_signal_wake_up() goes through p's scheduler
+		 * lock and guarantees that TASK_STOPPED/TRACED ->
+		 * TASK_RUNNING transition can't race with task state
+		 * testing in try_to_freeze_tasks().
+		 */
 	} else if (sig_only) {
 		return false;
 	} else {
diff --git a/kernel/futex.c b/kernel/futex.c
index 3019b92..5207563 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -233,7 +233,7 @@
 {
 	unsigned long address = (unsigned long)uaddr;
 	struct mm_struct *mm = current->mm;
-	struct page *page;
+	struct page *page, *page_head;
 	int err;
 
 	/*
@@ -265,11 +265,46 @@
 	if (err < 0)
 		return err;
 
-	page = compound_head(page);
-	lock_page(page);
-	if (!page->mapping) {
-		unlock_page(page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	page_head = page;
+	if (unlikely(PageTail(page))) {
 		put_page(page);
+		/* serialize against __split_huge_page_splitting() */
+		local_irq_disable();
+		if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
+			page_head = compound_head(page);
+			/*
+			 * page_head is valid pointer but we must pin
+			 * it before taking the PG_lock and/or
+			 * PG_compound_lock. The moment we re-enable
+			 * irqs __split_huge_page_splitting() can
+			 * return and the head page can be freed from
+			 * under us. We can't take the PG_lock and/or
+			 * PG_compound_lock on a page that could be
+			 * freed from under us.
+			 */
+			if (page != page_head) {
+				get_page(page_head);
+				put_page(page);
+			}
+			local_irq_enable();
+		} else {
+			local_irq_enable();
+			goto again;
+		}
+	}
+#else
+	page_head = compound_head(page);
+	if (page != page_head) {
+		get_page(page_head);
+		put_page(page);
+	}
+#endif
+
+	lock_page(page_head);
+	if (!page_head->mapping) {
+		unlock_page(page_head);
+		put_page(page_head);
 		goto again;
 	}
 
@@ -280,20 +315,20 @@
 	 * it's a read-only handle, it's expected that futexes attach to
 	 * the object not the particular process.
 	 */
-	if (PageAnon(page)) {
+	if (PageAnon(page_head)) {
 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
 		key->private.mm = mm;
 		key->private.address = address;
 	} else {
 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-		key->shared.inode = page->mapping->host;
-		key->shared.pgoff = page->index;
+		key->shared.inode = page_head->mapping->host;
+		key->shared.pgoff = page_head->index;
 	}
 
 	get_futex_key_refs(key);
 
-	unlock_page(page);
-	put_page(page);
+	unlock_page(page_head);
+	put_page(page_head);
 	return 0;
 }
 
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f2429fc..0c8d7c0 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -497,7 +497,7 @@
  */
 static inline int hrtimer_hres_active(void)
 {
-	return __get_cpu_var(hrtimer_bases).hres_active;
+	return __this_cpu_read(hrtimer_bases.hres_active);
 }
 
 /*
@@ -1745,7 +1745,7 @@
 	}
 
 	/*
-	 * A NULL parameter means "inifinte"
+	 * A NULL parameter means "infinite"
 	 */
 	if (!expires) {
 		schedule();
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9988d03..282f202 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -72,6 +72,8 @@
 
 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
 {
+	int cpu;
+
 	desc->irq_data.irq = irq;
 	desc->irq_data.chip = &no_irq_chip;
 	desc->irq_data.chip_data = NULL;
@@ -83,7 +85,8 @@
 	desc->irq_count = 0;
 	desc->irqs_unhandled = 0;
 	desc->name = NULL;
-	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
 	desc_smp_init(desc, node);
 }
 
@@ -133,8 +136,7 @@
 	if (!desc)
 		return NULL;
 	/* allocate based on nr_cpu_ids */
-	desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
-					 gfp, node);
+	desc->kstat_irqs = alloc_percpu(unsigned int);
 	if (!desc->kstat_irqs)
 		goto err_desc;
 
@@ -149,7 +151,7 @@
 	return desc;
 
 err_kstat:
-	kfree(desc->kstat_irqs);
+	free_percpu(desc->kstat_irqs);
 err_desc:
 	kfree(desc);
 	return NULL;
@@ -166,7 +168,7 @@
 	mutex_unlock(&sparse_irq_lock);
 
 	free_masks(desc);
-	kfree(desc->kstat_irqs);
+	free_percpu(desc->kstat_irqs);
 	kfree(desc);
 }
 
@@ -234,7 +236,6 @@
 	}
 };
 
-static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
 int __init early_irq_init(void)
 {
 	int count, i, node = first_online_node;
@@ -250,7 +251,8 @@
 	for (i = 0; i < count; i++) {
 		desc[i].irq_data.irq = i;
 		desc[i].irq_data.chip = &no_irq_chip;
-		desc[i].kstat_irqs = kstat_irqs_all[i];
+		/* TODO : do this allocation on-demand ... */
+		desc[i].kstat_irqs = alloc_percpu(unsigned int);
 		alloc_masks(desc + i, GFP_KERNEL, node);
 		desc_smp_init(desc + i, node);
 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
@@ -275,6 +277,22 @@
 
 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
 {
+#if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
+	struct irq_desc *desc;
+	unsigned int i;
+
+	for (i = 0; i < cnt; i++) {
+		desc = irq_to_desc(start + i);
+		if (desc && !desc->kstat_irqs) {
+			unsigned int __percpu *stats = alloc_percpu(unsigned int);
+
+			if (!stats)
+				return -1;
+			if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
+				free_percpu(stats);
+		}
+	}
+#endif
 	return start;
 }
 #endif /* !CONFIG_SPARSE_IRQ */
@@ -391,7 +409,9 @@
 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
-	return desc ? desc->kstat_irqs[cpu] : 0;
+
+	return desc && desc->kstat_irqs ?
+			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
 }
 
 #ifdef CONFIG_GENERIC_HARDIRQS
@@ -401,10 +421,10 @@
 	int cpu;
 	int sum = 0;
 
-	if (!desc)
+	if (!desc || !desc->kstat_irqs)
 		return 0;
 	for_each_possible_cpu(cpu)
-		sum += desc->kstat_irqs[cpu];
+		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
 	return sum;
 }
 #endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 91a5fa2..0caa59f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -577,7 +577,7 @@
  */
 static int irq_thread(void *data)
 {
-	static struct sched_param param = {
+	static const struct sched_param param = {
 		.sched_priority = MAX_USER_RT_PRIO/2,
 	};
 	struct irqaction *action = data;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 90f8819..c58fa7d 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -77,21 +77,21 @@
  */
 static void __irq_work_queue(struct irq_work *entry)
 {
-	struct irq_work **head, *next;
+	struct irq_work *next;
 
-	head = &get_cpu_var(irq_work_list);
+	preempt_disable();
 
 	do {
-		next = *head;
+		next = __this_cpu_read(irq_work_list);
 		/* Can assign non-atomic because we keep the flags set. */
 		entry->next = next_flags(next, IRQ_WORK_FLAGS);
-	} while (cmpxchg(head, next, entry) != next);
+	} while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
 
 	/* The list was empty, raise self-interrupt to start processing. */
 	if (!irq_work_next(entry))
 		arch_irq_work_raise();
 
-	put_cpu_var(irq_work_list);
+	preempt_enable();
 }
 
 /*
@@ -120,16 +120,16 @@
  */
 void irq_work_run(void)
 {
-	struct irq_work *list, **head;
+	struct irq_work *list;
 
-	head = &__get_cpu_var(irq_work_list);
-	if (*head == NULL)
+	if (this_cpu_read(irq_work_list) == NULL)
 		return;
 
 	BUG_ON(!in_irq());
 	BUG_ON(!irqs_disabled());
 
-	list = xchg(head, NULL);
+	list = this_cpu_xchg(irq_work_list, NULL);
+
 	while (list != NULL) {
 		struct irq_work *entry = list;
 
diff --git a/kernel/kexec.c b/kernel/kexec.c
index b55045b..ec19b92 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -163,7 +163,7 @@
 	 * just verifies it is an address we can use.
 	 *
 	 * Since the kernel does everything in page size chunks ensure
-	 * the destination addreses are page aligned.  Too many
+	 * the destination addresses are page aligned.  Too many
 	 * special cases crop of when we don't do this.  The most
 	 * insidious is getting overlapping destination addresses
 	 * simply because addresses are changed to page size
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7663e5d..7798181 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -317,12 +317,12 @@
 /* We have preemption disabled.. so it is safe to use __ versions */
 static inline void set_kprobe_instance(struct kprobe *kp)
 {
-	__get_cpu_var(kprobe_instance) = kp;
+	__this_cpu_write(kprobe_instance, kp);
 }
 
 static inline void reset_kprobe_instance(void)
 {
-	__get_cpu_var(kprobe_instance) = NULL;
+	__this_cpu_write(kprobe_instance, NULL);
 }
 
 /*
@@ -965,7 +965,7 @@
 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
 					int trapnr)
 {
-	struct kprobe *cur = __get_cpu_var(kprobe_instance);
+	struct kprobe *cur = __this_cpu_read(kprobe_instance);
 
 	/*
 	 * if we faulted "during" the execution of a user specified
@@ -980,7 +980,7 @@
 
 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
-	struct kprobe *cur = __get_cpu_var(kprobe_instance);
+	struct kprobe *cur = __this_cpu_read(kprobe_instance);
 	int ret = 0;
 
 	if (cur && cur->break_handler) {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5355cfd..c55afba 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -148,7 +148,7 @@
 	wait_for_completion(&create.done);
 
 	if (!IS_ERR(create.result)) {
-		static struct sched_param param = { .sched_priority = 0 };
+		static const struct sched_param param = { .sched_priority = 0 };
 		va_list args;
 
 		va_start(args, namefmt);
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 17110a4..ee74b35 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -241,24 +241,19 @@
 	seq_puts(m, "Latency Top version : v0.1\n");
 
 	for (i = 0; i < MAXLR; i++) {
-		if (latency_record[i].backtrace[0]) {
+		struct latency_record *lr = &latency_record[i];
+
+		if (lr->backtrace[0]) {
 			int q;
-			seq_printf(m, "%i %lu %lu ",
-				latency_record[i].count,
-				latency_record[i].time,
-				latency_record[i].max);
+			seq_printf(m, "%i %lu %lu",
+				   lr->count, lr->time, lr->max);
 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
-				char sym[KSYM_SYMBOL_LEN];
-				char *c;
-				if (!latency_record[i].backtrace[q])
+				unsigned long bt = lr->backtrace[q];
+				if (!bt)
 					break;
-				if (latency_record[i].backtrace[q] == ULONG_MAX)
+				if (bt == ULONG_MAX)
 					break;
-				sprint_symbol(sym, latency_record[i].backtrace[q]);
-				c = strchr(sym, '+');
-				if (c)
-					*c = 0;
-				seq_printf(m, "%s ", sym);
+				seq_printf(m, " %ps", (void *)bt);
 			}
 			seq_printf(m, "\n");
 		}
diff --git a/kernel/panic.c b/kernel/panic.c
index 4c13b1a..991bb87 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -34,6 +34,7 @@
 static DEFINE_SPINLOCK(pause_on_oops_lock);
 
 int panic_timeout;
+EXPORT_SYMBOL_GPL(panic_timeout);
 
 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
 
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 11847bf..05ebe84 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -38,6 +38,12 @@
 
 #include <asm/irq_regs.h>
 
+enum event_type_t {
+	EVENT_FLEXIBLE = 0x1,
+	EVENT_PINNED = 0x2,
+	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
 atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -65,6 +71,12 @@
 
 static atomic64_t perf_event_id;
 
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+			      enum event_type_t event_type);
+
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+			     enum event_type_t event_type);
+
 void __weak perf_event_print_debug(void)	{ }
 
 extern __weak const char *perf_pmu_name(void)
@@ -72,6 +84,11 @@
 	return "pmu";
 }
 
+static inline u64 perf_clock(void)
+{
+	return local_clock();
+}
+
 void perf_pmu_disable(struct pmu *pmu)
 {
 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -240,11 +257,6 @@
 	put_ctx(ctx);
 }
 
-static inline u64 perf_clock(void)
-{
-	return local_clock();
-}
-
 /*
  * Update the record of the current time in a context.
  */
@@ -256,6 +268,12 @@
 	ctx->timestamp = now;
 }
 
+static u64 perf_event_time(struct perf_event *event)
+{
+	struct perf_event_context *ctx = event->ctx;
+	return ctx ? ctx->time : 0;
+}
+
 /*
  * Update the total_time_enabled and total_time_running fields for a event.
  */
@@ -269,7 +287,7 @@
 		return;
 
 	if (ctx->is_active)
-		run_end = ctx->time;
+		run_end = perf_event_time(event);
 	else
 		run_end = event->tstamp_stopped;
 
@@ -278,7 +296,7 @@
 	if (event->state == PERF_EVENT_STATE_INACTIVE)
 		run_end = event->tstamp_stopped;
 	else
-		run_end = ctx->time;
+		run_end = perf_event_time(event);
 
 	event->total_time_running = run_end - event->tstamp_running;
 }
@@ -534,6 +552,7 @@
 		  struct perf_cpu_context *cpuctx,
 		  struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
 	u64 delta;
 	/*
 	 * An event which could not be activated because of
@@ -545,7 +564,7 @@
 	    && !event_filter_match(event)) {
 		delta = ctx->time - event->tstamp_stopped;
 		event->tstamp_running += delta;
-		event->tstamp_stopped = ctx->time;
+		event->tstamp_stopped = tstamp;
 	}
 
 	if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -556,7 +575,7 @@
 		event->pending_disable = 0;
 		event->state = PERF_EVENT_STATE_OFF;
 	}
-	event->tstamp_stopped = ctx->time;
+	event->tstamp_stopped = tstamp;
 	event->pmu->del(event, 0);
 	event->oncpu = -1;
 
@@ -768,6 +787,8 @@
 		 struct perf_cpu_context *cpuctx,
 		 struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
+
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
@@ -784,9 +805,9 @@
 		return -EAGAIN;
 	}
 
-	event->tstamp_running += ctx->time - event->tstamp_stopped;
+	event->tstamp_running += tstamp - event->tstamp_stopped;
 
-	event->shadow_ctx_time = ctx->time - ctx->timestamp;
+	event->shadow_ctx_time = tstamp - ctx->timestamp;
 
 	if (!is_software_event(event))
 		cpuctx->active_oncpu++;
@@ -898,11 +919,13 @@
 static void add_event_to_ctx(struct perf_event *event,
 			       struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
+
 	list_add_event(event, ctx);
 	perf_group_attach(event);
-	event->tstamp_enabled = ctx->time;
-	event->tstamp_running = ctx->time;
-	event->tstamp_stopped = ctx->time;
+	event->tstamp_enabled = tstamp;
+	event->tstamp_running = tstamp;
+	event->tstamp_stopped = tstamp;
 }
 
 /*
@@ -937,7 +960,7 @@
 
 	add_event_to_ctx(event, ctx);
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		goto unlock;
 
 	/*
@@ -1042,14 +1065,13 @@
 					struct perf_event_context *ctx)
 {
 	struct perf_event *sub;
+	u64 tstamp = perf_event_time(event);
 
 	event->state = PERF_EVENT_STATE_INACTIVE;
-	event->tstamp_enabled = ctx->time - event->total_time_enabled;
+	event->tstamp_enabled = tstamp - event->total_time_enabled;
 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
-		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
-			sub->tstamp_enabled =
-				ctx->time - sub->total_time_enabled;
-		}
+		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
 	}
 }
 
@@ -1082,7 +1104,7 @@
 		goto unlock;
 	__perf_event_mark_enabled(event, ctx);
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		goto unlock;
 
 	/*
@@ -1193,12 +1215,6 @@
 	return 0;
 }
 
-enum event_type_t {
-	EVENT_FLEXIBLE = 0x1,
-	EVENT_PINNED = 0x2,
-	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
 static void ctx_sched_out(struct perf_event_context *ctx,
 			  struct perf_cpu_context *cpuctx,
 			  enum event_type_t event_type)
@@ -1435,7 +1451,7 @@
 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
 		if (event->state <= PERF_EVENT_STATE_OFF)
 			continue;
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		if (group_can_go_on(event, cpuctx, 1))
@@ -1467,7 +1483,7 @@
 		 * Listen to the 'cpu' scheduling filter constraint
 		 * of events:
 		 */
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1694,7 +1710,7 @@
 		if (event->state != PERF_EVENT_STATE_ACTIVE)
 			continue;
 
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		hwc = &event->hw;
@@ -3893,7 +3909,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if (event->attr.comm || event->attr.mmap ||
@@ -4030,7 +4046,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if (event->attr.comm)
@@ -4178,7 +4194,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if ((!executable && event->attr.mmap_data) ||
@@ -4648,7 +4664,7 @@
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-void inline perf_swevent_put_recursion_context(int rctx)
+inline void perf_swevent_put_recursion_context(int rctx)
 {
 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
 
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index a5aff3e..26572996 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -100,13 +100,9 @@
 	depends on PM_ADVANCED_DEBUG
 	default n
 
-config SUSPEND_NVS
-       bool
-
 config SUSPEND
 	bool "Suspend to RAM and standby"
 	depends on PM && ARCH_SUSPEND_POSSIBLE
-	select SUSPEND_NVS if HAS_IOMEM
 	default y
 	---help---
 	  Allow the system to enter sleep states in which main memory is
@@ -140,7 +136,6 @@
 	depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
-	select SUSPEND_NVS if HAS_IOMEM
 	---help---
 	  Enable the suspend to disk (STD) functionality, which is usually
 	  called "hibernation" in user interfaces.  STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index f9063c6..c350e18 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -1,7 +1,4 @@
-
-ifeq ($(CONFIG_PM_DEBUG),y)
-EXTRA_CFLAGS	+=	-DDEBUG
-endif
+ccflags-$(CONFIG_PM_DEBUG)	:=	-DDEBUG
 
 obj-$(CONFIG_PM)		+= main.o
 obj-$(CONFIG_PM_SLEEP)		+= console.o
@@ -10,6 +7,5 @@
 obj-$(CONFIG_PM_TEST_SUSPEND)	+= suspend_test.o
 obj-$(CONFIG_HIBERNATION)	+= hibernate.o snapshot.o swap.o user.o \
 				   block_io.o
-obj-$(CONFIG_SUSPEND_NVS)	+= nvs.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 048d0b5..1832bd2 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -51,18 +51,18 @@
 
 static int hibernation_mode = HIBERNATION_SHUTDOWN;
 
-static struct platform_hibernation_ops *hibernation_ops;
+static const struct platform_hibernation_ops *hibernation_ops;
 
 /**
  * hibernation_set_ops - set the global hibernate operations
  * @ops: the hibernation operations to use in subsequent hibernation transitions
  */
 
-void hibernation_set_ops(struct platform_hibernation_ops *ops)
+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
 {
 	if (ops && !(ops->begin && ops->end &&  ops->pre_snapshot
 	    && ops->prepare && ops->finish && ops->enter && ops->pre_restore
-	    && ops->restore_cleanup)) {
+	    && ops->restore_cleanup && ops->leave)) {
 		WARN_ON(1);
 		return;
 	}
@@ -278,7 +278,7 @@
 		goto Enable_irqs;
 	}
 
-	if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events())
+	if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
 		goto Power_up;
 
 	in_suspend = 1;
@@ -516,7 +516,7 @@
 
 	local_irq_disable();
 	sysdev_suspend(PMSG_HIBERNATE);
-	if (!pm_check_wakeup_events()) {
+	if (pm_wakeup_pending()) {
 		error = -EAGAIN;
 		goto Power_up;
 	}
@@ -647,6 +647,7 @@
 		swsusp_free();
 		if (!error)
 			power_down();
+		in_suspend = 0;
 		pm_restore_gfp_mask();
 	} else {
 		pr_debug("PM: Image restored successfully.\n");
diff --git a/kernel/power/nvs.c b/kernel/power/nvs.c
deleted file mode 100644
index 1836db6..0000000
--- a/kernel/power/nvs.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
- *
- * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-
-/*
- * Platforms, like ACPI, may want us to save some memory used by them during
- * suspend and to restore the contents of this memory during the subsequent
- * resume.  The code below implements a mechanism allowing us to do that.
- */
-
-struct nvs_page {
-	unsigned long phys_start;
-	unsigned int size;
-	void *kaddr;
-	void *data;
-	struct list_head node;
-};
-
-static LIST_HEAD(nvs_list);
-
-/**
- *	suspend_nvs_register - register platform NVS memory region to save
- *	@start - physical address of the region
- *	@size - size of the region
- *
- *	The NVS region need not be page-aligned (both ends) and we arrange
- *	things so that the data from page-aligned addresses in this region will
- *	be copied into separate RAM pages.
- */
-int suspend_nvs_register(unsigned long start, unsigned long size)
-{
-	struct nvs_page *entry, *next;
-
-	while (size > 0) {
-		unsigned int nr_bytes;
-
-		entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
-		if (!entry)
-			goto Error;
-
-		list_add_tail(&entry->node, &nvs_list);
-		entry->phys_start = start;
-		nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
-		entry->size = (size < nr_bytes) ? size : nr_bytes;
-
-		start += entry->size;
-		size -= entry->size;
-	}
-	return 0;
-
- Error:
-	list_for_each_entry_safe(entry, next, &nvs_list, node) {
-		list_del(&entry->node);
-		kfree(entry);
-	}
-	return -ENOMEM;
-}
-
-/**
- *	suspend_nvs_free - free data pages allocated for saving NVS regions
- */
-void suspend_nvs_free(void)
-{
-	struct nvs_page *entry;
-
-	list_for_each_entry(entry, &nvs_list, node)
-		if (entry->data) {
-			free_page((unsigned long)entry->data);
-			entry->data = NULL;
-			if (entry->kaddr) {
-				iounmap(entry->kaddr);
-				entry->kaddr = NULL;
-			}
-		}
-}
-
-/**
- *	suspend_nvs_alloc - allocate memory necessary for saving NVS regions
- */
-int suspend_nvs_alloc(void)
-{
-	struct nvs_page *entry;
-
-	list_for_each_entry(entry, &nvs_list, node) {
-		entry->data = (void *)__get_free_page(GFP_KERNEL);
-		if (!entry->data) {
-			suspend_nvs_free();
-			return -ENOMEM;
-		}
-	}
-	return 0;
-}
-
-/**
- *	suspend_nvs_save - save NVS memory regions
- */
-void suspend_nvs_save(void)
-{
-	struct nvs_page *entry;
-
-	printk(KERN_INFO "PM: Saving platform NVS memory\n");
-
-	list_for_each_entry(entry, &nvs_list, node)
-		if (entry->data) {
-			entry->kaddr = ioremap(entry->phys_start, entry->size);
-			memcpy(entry->data, entry->kaddr, entry->size);
-		}
-}
-
-/**
- *	suspend_nvs_restore - restore NVS memory regions
- *
- *	This function is going to be called with interrupts disabled, so it
- *	cannot iounmap the virtual addresses used to access the NVS region.
- */
-void suspend_nvs_restore(void)
-{
-	struct nvs_page *entry;
-
-	printk(KERN_INFO "PM: Restoring platform NVS memory\n");
-
-	list_for_each_entry(entry, &nvs_list, node)
-		if (entry->data)
-			memcpy(entry->kaddr, entry->data, entry->size);
-}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index e50b4c1..d6d2a10 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -64,6 +64,12 @@
 			 * perturb a task in TASK_STOPPED or TASK_TRACED.
 			 * It is "frozen enough".  If the task does wake
 			 * up, it will immediately call try_to_freeze.
+			 *
+			 * Because freeze_task() goes through p's
+			 * scheduler lock after setting TIF_FREEZE, it's
+			 * guaranteed that either we see TASK_RUNNING or
+			 * try_to_stop() after schedule() in ptrace/signal
+			 * stop sees TIF_FREEZE.
 			 */
 			if (!task_is_stopped_or_traced(p) &&
 			    !freezer_should_skip(p))
@@ -79,7 +85,7 @@
 		if (!todo || time_after(jiffies, end_time))
 			break;
 
-		if (!pm_check_wakeup_events()) {
+		if (pm_wakeup_pending()) {
 			wakeup = true;
 			break;
 		}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 031d5e3..de6f86b 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -31,13 +31,13 @@
 	[PM_SUSPEND_MEM]	= "mem",
 };
 
-static struct platform_suspend_ops *suspend_ops;
+static const struct platform_suspend_ops *suspend_ops;
 
 /**
  *	suspend_set_ops - Set the global suspend method table.
  *	@ops:	Pointer to ops structure.
  */
-void suspend_set_ops(struct platform_suspend_ops *ops)
+void suspend_set_ops(const struct platform_suspend_ops *ops)
 {
 	mutex_lock(&pm_mutex);
 	suspend_ops = ops;
@@ -164,7 +164,7 @@
 
 	error = sysdev_suspend(PMSG_SUSPEND);
 	if (!error) {
-		if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) {
+		if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
 			error = suspend_ops->enter(state);
 			events_check_enabled = false;
 		}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8c7e483..7c97c3a 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -224,7 +224,7 @@
 		return res;
 
 	root_swap = res;
-	res = blkdev_get(hib_resume_bdev, FMODE_WRITE);
+	res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 	if (res)
 		return res;
 
@@ -888,7 +888,7 @@
 /**
  *	swsusp_read - read the hibernation image.
  *	@flags_p: flags passed by the "frozen" kernel in the image header should
- *		  be written into this memeory location
+ *		  be written into this memory location
  */
 
 int swsusp_read(unsigned int *flags_p)
@@ -930,7 +930,8 @@
 {
 	int error;
 
-	hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
+	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
+					    FMODE_READ, NULL);
 	if (!IS_ERR(hib_resume_bdev)) {
 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
 		clear_page(swsusp_header);
diff --git a/kernel/printk.c b/kernel/printk.c
index ab3ffc5..53d9a9e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -39,16 +39,11 @@
 #include <linux/syslog.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/rculist.h>
 
 #include <asm/uaccess.h>
 
 /*
- * for_each_console() allows you to iterate on each console
- */
-#define for_each_console(con) \
-	for (con = console_drivers; con != NULL; con = con->next)
-
-/*
  * Architectures can override it:
  */
 void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
@@ -279,12 +274,12 @@
 	 * at open time.
 	 */
 	if (type == SYSLOG_ACTION_OPEN || !from_file) {
-		if (dmesg_restrict && !capable(CAP_SYS_ADMIN))
-			return -EPERM;
+		if (dmesg_restrict && !capable(CAP_SYSLOG))
+			goto warn; /* switch to return -EPERM after 2.6.39 */
 		if ((type != SYSLOG_ACTION_READ_ALL &&
 		     type != SYSLOG_ACTION_SIZE_BUFFER) &&
-		    !capable(CAP_SYS_ADMIN))
-			return -EPERM;
+		    !capable(CAP_SYSLOG))
+			goto warn; /* switch to return -EPERM after 2.6.39 */
 	}
 
 	error = security_syslog(type);
@@ -428,6 +423,12 @@
 	}
 out:
 	return error;
+warn:
+	/* remove after 2.6.39 */
+	if (capable(CAP_SYS_ADMIN))
+		WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
+		  "but no CAP_SYSLOG (deprecated and denied).\n");
+	return -EPERM;
 }
 
 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
@@ -1359,6 +1360,7 @@
 		spin_unlock_irqrestore(&logbuf_lock, flags);
 	}
 	release_console_sem();
+	console_sysfs_notify();
 
 	/*
 	 * By unregistering the bootconsoles after we enable the real console
@@ -1417,6 +1419,7 @@
 		console_drivers->flags |= CON_CONSDEV;
 
 	release_console_sem();
+	console_sysfs_notify();
 	return res;
 }
 EXPORT_SYMBOL(unregister_console);
@@ -1500,7 +1503,7 @@
 	/* Don't allow registering multiple times */
 	if (!dumper->registered) {
 		dumper->registered = 1;
-		list_add_tail(&dumper->list, &dump_list);
+		list_add_tail_rcu(&dumper->list, &dump_list);
 		err = 0;
 	}
 	spin_unlock_irqrestore(&dump_list_lock, flags);
@@ -1524,29 +1527,16 @@
 	spin_lock_irqsave(&dump_list_lock, flags);
 	if (dumper->registered) {
 		dumper->registered = 0;
-		list_del(&dumper->list);
+		list_del_rcu(&dumper->list);
 		err = 0;
 	}
 	spin_unlock_irqrestore(&dump_list_lock, flags);
+	synchronize_rcu();
 
 	return err;
 }
 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
 
-static const char * const kmsg_reasons[] = {
-	[KMSG_DUMP_OOPS]	= "oops",
-	[KMSG_DUMP_PANIC]	= "panic",
-	[KMSG_DUMP_KEXEC]	= "kexec",
-};
-
-static const char *kmsg_to_str(enum kmsg_dump_reason reason)
-{
-	if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
-		return "unknown";
-
-	return kmsg_reasons[reason];
-}
-
 /**
  * kmsg_dump - dump kernel log to kernel message dumpers.
  * @reason: the reason (oops, panic etc) for dumping
@@ -1585,13 +1575,9 @@
 		l2 = chars;
 	}
 
-	if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
-		printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
-				kmsg_to_str(reason));
-		return;
-	}
-	list_for_each_entry(dumper, &dump_list, list)
+	rcu_read_lock();
+	list_for_each_entry_rcu(dumper, &dump_list, list)
 		dumper->dump(dumper, reason, s1, l1, s2, l2);
-	spin_unlock_irqrestore(&dump_list_lock, flags);
+	rcu_read_unlock();
 }
 #endif
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d0ddfea..dd4aea8 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -364,8 +364,8 @@
 	WARN_ON_ONCE(rdtp->dynticks & 0x1);
 
 	/* If the interrupt queued a callback, get out of dyntick mode. */
-	if (__get_cpu_var(rcu_sched_data).nxtlist ||
-	    __get_cpu_var(rcu_bh_data).nxtlist)
+	if (__this_cpu_read(rcu_sched_data.nxtlist) ||
+	    __this_cpu_read(rcu_bh_data.nxtlist))
 		set_need_resched();
 }
 
diff --git a/kernel/sched.c b/kernel/sched.c
index 0494908..ea3e5ef 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -278,14 +278,12 @@
 #endif
 };
 
-#define root_task_group init_task_group
-
 /* task_group_lock serializes the addition/removal of task groups */
 static DEFINE_SPINLOCK(task_group_lock);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
-# define INIT_TASK_GROUP_LOAD	NICE_0_LOAD
+# define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
 
 /*
  * A weight of 0 or 1 can cause arithmetics problems.
@@ -298,13 +296,13 @@
 #define MIN_SHARES	2
 #define MAX_SHARES	(1UL << 18)
 
-static int init_task_group_load = INIT_TASK_GROUP_LOAD;
+static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
 #endif
 
 /* Default task group.
  *	Every task in system belong to this group at bootup.
  */
-struct task_group init_task_group;
+struct task_group root_task_group;
 
 #endif	/* CONFIG_CGROUP_SCHED */
 
@@ -743,7 +741,7 @@
 	buf[cnt] = 0;
 	cmp = strstrip(buf);
 
-	if (strncmp(buf, "NO_", 3) == 0) {
+	if (strncmp(cmp, "NO_", 3) == 0) {
 		neg = 1;
 		cmp += 3;
 	}
@@ -2507,7 +2505,7 @@
  * try_to_wake_up_local - try to wake up a local task with rq lock held
  * @p: the thread to be awakened
  *
- * Put @p on the run-queue if it's not alredy there.  The caller must
+ * Put @p on the run-queue if it's not already there.  The caller must
  * ensure that this_rq() is locked, @p is bound to this_rq() and not
  * the current task.  this_rq() stays locked over invocation.
  */
@@ -7848,7 +7846,7 @@
 	cfs_rq->tg = tg;
 
 	tg->se[cpu] = se;
-	/* se could be NULL for init_task_group */
+	/* se could be NULL for root_task_group */
 	if (!se)
 		return;
 
@@ -7908,18 +7906,18 @@
 		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-		init_task_group.se = (struct sched_entity **)ptr;
+		root_task_group.se = (struct sched_entity **)ptr;
 		ptr += nr_cpu_ids * sizeof(void **);
 
-		init_task_group.cfs_rq = (struct cfs_rq **)ptr;
+		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
 		ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 #ifdef CONFIG_RT_GROUP_SCHED
-		init_task_group.rt_se = (struct sched_rt_entity **)ptr;
+		root_task_group.rt_se = (struct sched_rt_entity **)ptr;
 		ptr += nr_cpu_ids * sizeof(void **);
 
-		init_task_group.rt_rq = (struct rt_rq **)ptr;
+		root_task_group.rt_rq = (struct rt_rq **)ptr;
 		ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_RT_GROUP_SCHED */
@@ -7939,13 +7937,13 @@
 			global_rt_period(), global_rt_runtime());
 
 #ifdef CONFIG_RT_GROUP_SCHED
-	init_rt_bandwidth(&init_task_group.rt_bandwidth,
+	init_rt_bandwidth(&root_task_group.rt_bandwidth,
 			global_rt_period(), global_rt_runtime());
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 #ifdef CONFIG_CGROUP_SCHED
-	list_add(&init_task_group.list, &task_groups);
-	INIT_LIST_HEAD(&init_task_group.children);
+	list_add(&root_task_group.list, &task_groups);
+	INIT_LIST_HEAD(&root_task_group.children);
 	autogroup_init(&init_task);
 #endif /* CONFIG_CGROUP_SCHED */
 
@@ -7960,34 +7958,34 @@
 		init_cfs_rq(&rq->cfs, rq);
 		init_rt_rq(&rq->rt, rq);
 #ifdef CONFIG_FAIR_GROUP_SCHED
-		init_task_group.shares = init_task_group_load;
+		root_task_group.shares = root_task_group_load;
 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
 		/*
-		 * How much cpu bandwidth does init_task_group get?
+		 * How much cpu bandwidth does root_task_group get?
 		 *
 		 * In case of task-groups formed thr' the cgroup filesystem, it
 		 * gets 100% of the cpu resources in the system. This overall
 		 * system cpu resource is divided among the tasks of
-		 * init_task_group and its child task-groups in a fair manner,
+		 * root_task_group and its child task-groups in a fair manner,
 		 * based on each entity's (task or task-group's) weight
 		 * (se->load.weight).
 		 *
-		 * In other words, if init_task_group has 10 tasks of weight
+		 * In other words, if root_task_group has 10 tasks of weight
 		 * 1024) and two child groups A0 and A1 (of weight 1024 each),
 		 * then A0's share of the cpu resource is:
 		 *
 		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
 		 *
-		 * We achieve this by letting init_task_group's tasks sit
-		 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
+		 * We achieve this by letting root_task_group's tasks sit
+		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
 		 */
-		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL);
+		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
 #ifdef CONFIG_RT_GROUP_SCHED
 		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
-		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL);
+		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
 #endif
 
 		for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -8379,6 +8377,7 @@
 {
 	free_fair_sched_group(tg);
 	free_rt_sched_group(tg);
+	autogroup_free(tg);
 	kfree(tg);
 }
 
@@ -8812,7 +8811,7 @@
 
 	if (!cgrp->parent) {
 		/* This is early initialization for the top cgroup */
-		return &init_task_group.css;
+		return &root_task_group.css;
 	}
 
 	parent = cgroup_tg(cgrp->parent);
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index c80fedc..32a723b 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -9,10 +9,10 @@
 static struct autogroup autogroup_default;
 static atomic_t autogroup_seq_nr;
 
-static void autogroup_init(struct task_struct *init_task)
+static void __init autogroup_init(struct task_struct *init_task)
 {
-	autogroup_default.tg = &init_task_group;
-	init_task_group.autogroup = &autogroup_default;
+	autogroup_default.tg = &root_task_group;
+	root_task_group.autogroup = &autogroup_default;
 	kref_init(&autogroup_default.kref);
 	init_rwsem(&autogroup_default.lock);
 	init_task->signal->autogroup = &autogroup_default;
@@ -63,7 +63,7 @@
 	if (!ag)
 		goto out_fail;
 
-	tg = sched_create_group(&init_task_group);
+	tg = sched_create_group(&root_task_group);
 
 	if (IS_ERR(tg))
 		goto out_free;
diff --git a/kernel/smp.c b/kernel/smp.c
index 12ed8b0..4ec30e0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/cpu.h>
 
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 static struct {
 	struct list_head	queue;
 	raw_spinlock_t		lock;
@@ -529,3 +530,21 @@
 {
 	raw_spin_unlock_irq(&call_function.lock);
 }
+#endif /* USE_GENERIC_SMP_HELPERS */
+
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(void (*func) (void *info), void *info, int wait)
+{
+	int ret = 0;
+
+	preempt_disable();
+	ret = smp_call_function(func, info, wait);
+	local_irq_disable();
+	func(info);
+	local_irq_enable();
+	preempt_enable();
+	return ret;
+}
+EXPORT_SYMBOL(on_each_cpu);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d4d918a..68eb5ef 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -70,7 +70,7 @@
 static void wakeup_softirqd(void)
 {
 	/* Interrupts are disabled: no need to stop preemption */
-	struct task_struct *tsk = __get_cpu_var(ksoftirqd);
+	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
 	if (tsk && tsk->state != TASK_RUNNING)
 		wake_up_process(tsk);
@@ -388,8 +388,8 @@
 
 	local_irq_save(flags);
 	t->next = NULL;
-	*__get_cpu_var(tasklet_vec).tail = t;
-	__get_cpu_var(tasklet_vec).tail = &(t->next);
+	*__this_cpu_read(tasklet_vec.tail) = t;
+	__this_cpu_write(tasklet_vec.tail, &(t->next));
 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
 	local_irq_restore(flags);
 }
@@ -402,8 +402,8 @@
 
 	local_irq_save(flags);
 	t->next = NULL;
-	*__get_cpu_var(tasklet_hi_vec).tail = t;
-	__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+	*__this_cpu_read(tasklet_hi_vec.tail) = t;
+	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
 	raise_softirq_irqoff(HI_SOFTIRQ);
 	local_irq_restore(flags);
 }
@@ -414,8 +414,8 @@
 {
 	BUG_ON(!irqs_disabled());
 
-	t->next = __get_cpu_var(tasklet_hi_vec).head;
-	__get_cpu_var(tasklet_hi_vec).head = t;
+	t->next = __this_cpu_read(tasklet_hi_vec.head);
+	__this_cpu_write(tasklet_hi_vec.head, t);
 	__raise_softirq_irqoff(HI_SOFTIRQ);
 }
 
@@ -426,9 +426,9 @@
 	struct tasklet_struct *list;
 
 	local_irq_disable();
-	list = __get_cpu_var(tasklet_vec).head;
-	__get_cpu_var(tasklet_vec).head = NULL;
-	__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
+	list = __this_cpu_read(tasklet_vec.head);
+	__this_cpu_write(tasklet_vec.head, NULL);
+	__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
 	local_irq_enable();
 
 	while (list) {
@@ -449,8 +449,8 @@
 
 		local_irq_disable();
 		t->next = NULL;
-		*__get_cpu_var(tasklet_vec).tail = t;
-		__get_cpu_var(tasklet_vec).tail = &(t->next);
+		*__this_cpu_read(tasklet_vec.tail) = t;
+		__this_cpu_write(tasklet_vec.tail, &(t->next));
 		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
 		local_irq_enable();
 	}
@@ -461,9 +461,9 @@
 	struct tasklet_struct *list;
 
 	local_irq_disable();
-	list = __get_cpu_var(tasklet_hi_vec).head;
-	__get_cpu_var(tasklet_hi_vec).head = NULL;
-	__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
+	list = __this_cpu_read(tasklet_hi_vec.head);
+	__this_cpu_write(tasklet_hi_vec.head, NULL);
+	__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
 	local_irq_enable();
 
 	while (list) {
@@ -484,8 +484,8 @@
 
 		local_irq_disable();
 		t->next = NULL;
-		*__get_cpu_var(tasklet_hi_vec).tail = t;
-		__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+		*__this_cpu_read(tasklet_hi_vec.tail) = t;
+		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
 		__raise_softirq_irqoff(HI_SOFTIRQ);
 		local_irq_enable();
 	}
@@ -802,16 +802,16 @@
 
 	/* Find end, append list for that CPU. */
 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
-		*(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
-		__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
+		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
 		per_cpu(tasklet_vec, cpu).head = NULL;
 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
 	}
 	raise_softirq_irqoff(TASKLET_SOFTIRQ);
 
 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
-		*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
-		__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
+		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
 		per_cpu(tasklet_hi_vec, cpu).head = NULL;
 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
 	}
@@ -853,7 +853,7 @@
 			     cpumask_any(cpu_online_mask));
 	case CPU_DEAD:
 	case CPU_DEAD_FROZEN: {
-		static struct sched_param param = {
+		static const struct sched_param param = {
 			.sched_priority = MAX_RT_PRIO-1
 		};
 
@@ -885,25 +885,6 @@
 }
 early_initcall(spawn_ksoftirqd);
 
-#ifdef CONFIG_SMP
-/*
- * Call a function on all processors
- */
-int on_each_cpu(void (*func) (void *info), void *info, int wait)
-{
-	int ret = 0;
-
-	preempt_disable();
-	ret = smp_call_function(func, info, wait);
-	local_irq_disable();
-	func(info);
-	local_irq_enable();
-	preempt_enable();
-	return ret;
-}
-EXPORT_SYMBOL(on_each_cpu);
-#endif
-
 /*
  * [ These __weak aliases are kept in a separate compilation unit, so that
  *   GCC does not inline them incorrectly. ]
diff --git a/kernel/sys.c b/kernel/sys.c
index 2745dcd..31b71a2 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -43,6 +43,8 @@
 #include <linux/kprobes.h>
 #include <linux/user_namespace.h>
 
+#include <linux/kmsg_dump.h>
+
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/unistd.h>
@@ -285,6 +287,7 @@
  */
 void emergency_restart(void)
 {
+	kmsg_dump(KMSG_DUMP_EMERG);
 	machine_emergency_restart();
 }
 EXPORT_SYMBOL_GPL(emergency_restart);
@@ -312,6 +315,7 @@
 		printk(KERN_EMERG "Restarting system.\n");
 	else
 		printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
+	kmsg_dump(KMSG_DUMP_RESTART);
 	machine_restart(cmd);
 }
 EXPORT_SYMBOL_GPL(kernel_restart);
@@ -333,6 +337,7 @@
 	kernel_shutdown_prepare(SYSTEM_HALT);
 	sysdev_shutdown();
 	printk(KERN_EMERG "System halted.\n");
+	kmsg_dump(KMSG_DUMP_HALT);
 	machine_halt();
 }
 
@@ -351,6 +356,7 @@
 	disable_nonboot_cpus();
 	sysdev_shutdown();
 	printk(KERN_EMERG "Power down.\n");
+	kmsg_dump(KMSG_DUMP_POWEROFF);
 	machine_power_off();
 }
 EXPORT_SYMBOL_GPL(kernel_power_off);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ae5cbb1..bc86bb3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 #include <linux/signal.h>
+#include <linux/printk.h>
 #include <linux/proc_fs.h>
 #include <linux/security.h>
 #include <linux/ctype.h>
@@ -245,10 +246,6 @@
 		.mode		= 0555,
 		.child		= dev_table,
 	},
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -710,6 +707,15 @@
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
+	{
+		.procname	= "kptr_restrict",
+		.data		= &kptr_restrict,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &two,
+	},
 #endif
 	{
 		.procname	= "ngroups_max",
@@ -962,10 +968,6 @@
 		.proc_handler	= proc_dointvec,
 	},
 #endif
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -1326,11 +1328,6 @@
 		.extra2		= &one,
 	},
 #endif
-
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -1486,10 +1483,6 @@
 		.proc_handler	= &pipe_proc_fn,
 		.extra1		= &pipe_min_size,
 	},
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -2899,7 +2892,7 @@
 	}
 }
 
-#else /* CONFIG_PROC_FS */
+#else /* CONFIG_PROC_SYSCTL */
 
 int proc_dostring(struct ctl_table *table, int write,
 		  void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2951,7 +2944,7 @@
 }
 
 
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_PROC_SYSCTL */
 
 /*
  * No sense putting this after each symbol definition, twice,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 4b2545a..b875bed 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1192,7 +1192,7 @@
 
 		buf[result] = '\0';
 
-		/* Convert the decnet addresss to binary */
+		/* Convert the decnet address to binary */
 		result = -EIO;
 		nodep = strchr(buf, '.') + 1;
 		if (!nodep)
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 3308fd7..3971c6b 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -89,8 +89,7 @@
 		return -ENOMEM;
 
 	if (!info) {
-		int seq = get_cpu_var(taskstats_seqnum)++;
-		put_cpu_var(taskstats_seqnum);
+		int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
 
 		reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
 	} else
@@ -349,7 +348,7 @@
 	return ret;
 }
 
-#ifdef CONFIG_IA64
+#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 #define TASKSTATS_NEEDS_PADDING 1
 #endif
 
@@ -612,7 +611,7 @@
 		fill_tgid_exit(tsk);
 	}
 
-	listeners = &__raw_get_cpu_var(listener_array);
+	listeners = __this_cpu_ptr(&listener_array);
 	if (list_empty(&listeners->list))
 		return;
 
diff --git a/kernel/time.c b/kernel/time.c
index ba9b338..3217435 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -238,7 +238,7 @@
  * Avoid unnecessary multiplications/divisions in the
  * two most common HZ cases:
  */
-unsigned int inline jiffies_to_msecs(const unsigned long j)
+inline unsigned int jiffies_to_msecs(const unsigned long j)
 {
 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 	return (MSEC_PER_SEC / HZ) * j;
@@ -254,7 +254,7 @@
 }
 EXPORT_SYMBOL(jiffies_to_msecs);
 
-unsigned int inline jiffies_to_usecs(const unsigned long j)
+inline unsigned int jiffies_to_usecs(const unsigned long j)
 {
 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
 	return (USEC_PER_SEC / HZ) * j;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index df140cd..c50a034 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -679,7 +679,7 @@
 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
 {
 
-	/* Intialize mult/shift and max_idle_ns */
+	/* Initialize mult/shift and max_idle_ns */
 	__clocksource_updatefreq_scale(cs, scale, freq);
 
 	/* Add clocksource to the clcoksource list */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index d232189..5c00242 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -14,6 +14,7 @@
 #include <linux/timex.h>
 #include <linux/time.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 
 /*
  * NTP timekeeping variables:
@@ -74,6 +75,162 @@
 /* constant (boot-param configurable) NTP tick adjustment (upscaled)	*/
 static s64			ntp_tick_adj;
 
+#ifdef CONFIG_NTP_PPS
+
+/*
+ * The following variables are used when a pulse-per-second (PPS) signal
+ * is available. They establish the engineering parameters of the clock
+ * discipline loop when controlled by the PPS signal.
+ */
+#define PPS_VALID	10	/* PPS signal watchdog max (s) */
+#define PPS_POPCORN	4	/* popcorn spike threshold (shift) */
+#define PPS_INTMIN	2	/* min freq interval (s) (shift) */
+#define PPS_INTMAX	8	/* max freq interval (s) (shift) */
+#define PPS_INTCOUNT	4	/* number of consecutive good intervals to
+				   increase pps_shift or consecutive bad
+				   intervals to decrease it */
+#define PPS_MAXWANDER	100000	/* max PPS freq wander (ns/s) */
+
+static int pps_valid;		/* signal watchdog counter */
+static long pps_tf[3];		/* phase median filter */
+static long pps_jitter;		/* current jitter (ns) */
+static struct timespec pps_fbase; /* beginning of the last freq interval */
+static int pps_shift;		/* current interval duration (s) (shift) */
+static int pps_intcnt;		/* interval counter */
+static s64 pps_freq;		/* frequency offset (scaled ns/s) */
+static long pps_stabil;		/* current stability (scaled ns/s) */
+
+/*
+ * PPS signal quality monitors
+ */
+static long pps_calcnt;		/* calibration intervals */
+static long pps_jitcnt;		/* jitter limit exceeded */
+static long pps_stbcnt;		/* stability limit exceeded */
+static long pps_errcnt;		/* calibration errors */
+
+
+/* PPS kernel consumer compensates the whole phase error immediately.
+ * Otherwise, reduce the offset by a fixed factor times the time constant.
+ */
+static inline s64 ntp_offset_chunk(s64 offset)
+{
+	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
+		return offset;
+	else
+		return shift_right(offset, SHIFT_PLL + time_constant);
+}
+
+static inline void pps_reset_freq_interval(void)
+{
+	/* the PPS calibration interval may end
+	   surprisingly early */
+	pps_shift = PPS_INTMIN;
+	pps_intcnt = 0;
+}
+
+/**
+ * pps_clear - Clears the PPS state variables
+ *
+ * Must be called while holding a write on the xtime_lock
+ */
+static inline void pps_clear(void)
+{
+	pps_reset_freq_interval();
+	pps_tf[0] = 0;
+	pps_tf[1] = 0;
+	pps_tf[2] = 0;
+	pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
+	pps_freq = 0;
+}
+
+/* Decrease pps_valid to indicate that another second has passed since
+ * the last PPS signal. When it reaches 0, indicate that PPS signal is
+ * missing.
+ *
+ * Must be called while holding a write on the xtime_lock
+ */
+static inline void pps_dec_valid(void)
+{
+	if (pps_valid > 0)
+		pps_valid--;
+	else {
+		time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+				 STA_PPSWANDER | STA_PPSERROR);
+		pps_clear();
+	}
+}
+
+static inline void pps_set_freq(s64 freq)
+{
+	pps_freq = freq;
+}
+
+static inline int is_error_status(int status)
+{
+	return (time_status & (STA_UNSYNC|STA_CLOCKERR))
+		/* PPS signal lost when either PPS time or
+		 * PPS frequency synchronization requested
+		 */
+		|| ((time_status & (STA_PPSFREQ|STA_PPSTIME))
+			&& !(time_status & STA_PPSSIGNAL))
+		/* PPS jitter exceeded when
+		 * PPS time synchronization requested */
+		|| ((time_status & (STA_PPSTIME|STA_PPSJITTER))
+			== (STA_PPSTIME|STA_PPSJITTER))
+		/* PPS wander exceeded or calibration error when
+		 * PPS frequency synchronization requested
+		 */
+		|| ((time_status & STA_PPSFREQ)
+			&& (time_status & (STA_PPSWANDER|STA_PPSERROR)));
+}
+
+static inline void pps_fill_timex(struct timex *txc)
+{
+	txc->ppsfreq	   = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
+					 PPM_SCALE_INV, NTP_SCALE_SHIFT);
+	txc->jitter	   = pps_jitter;
+	if (!(time_status & STA_NANO))
+		txc->jitter /= NSEC_PER_USEC;
+	txc->shift	   = pps_shift;
+	txc->stabil	   = pps_stabil;
+	txc->jitcnt	   = pps_jitcnt;
+	txc->calcnt	   = pps_calcnt;
+	txc->errcnt	   = pps_errcnt;
+	txc->stbcnt	   = pps_stbcnt;
+}
+
+#else /* !CONFIG_NTP_PPS */
+
+static inline s64 ntp_offset_chunk(s64 offset)
+{
+	return shift_right(offset, SHIFT_PLL + time_constant);
+}
+
+static inline void pps_reset_freq_interval(void) {}
+static inline void pps_clear(void) {}
+static inline void pps_dec_valid(void) {}
+static inline void pps_set_freq(s64 freq) {}
+
+static inline int is_error_status(int status)
+{
+	return status & (STA_UNSYNC|STA_CLOCKERR);
+}
+
+static inline void pps_fill_timex(struct timex *txc)
+{
+	/* PPS is not implemented, so these are zero */
+	txc->ppsfreq	   = 0;
+	txc->jitter	   = 0;
+	txc->shift	   = 0;
+	txc->stabil	   = 0;
+	txc->jitcnt	   = 0;
+	txc->calcnt	   = 0;
+	txc->errcnt	   = 0;
+	txc->stbcnt	   = 0;
+}
+
+#endif /* CONFIG_NTP_PPS */
+
 /*
  * NTP methods:
  */
@@ -185,6 +342,9 @@
 
 	tick_length	= tick_length_base;
 	time_offset	= 0;
+
+	/* Clear PPS state variables */
+	pps_clear();
 }
 
 /*
@@ -250,16 +410,16 @@
 		time_status |= STA_UNSYNC;
 	}
 
-	/*
-	 * Compute the phase adjustment for the next second. The offset is
-	 * reduced by a fixed factor times the time constant.
-	 */
+	/* Compute the phase adjustment for the next second */
 	tick_length	 = tick_length_base;
 
-	delta		 = shift_right(time_offset, SHIFT_PLL + time_constant);
+	delta		 = ntp_offset_chunk(time_offset);
 	time_offset	-= delta;
 	tick_length	+= delta;
 
+	/* Check PPS signal */
+	pps_dec_valid();
+
 	if (!time_adjust)
 		return;
 
@@ -369,6 +529,8 @@
 	if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
 		time_state = TIME_OK;
 		time_status = STA_UNSYNC;
+		/* restart PPS frequency calibration */
+		pps_reset_freq_interval();
 	}
 
 	/*
@@ -418,6 +580,8 @@
 		time_freq = txc->freq * PPM_SCALE;
 		time_freq = min(time_freq, MAXFREQ_SCALED);
 		time_freq = max(time_freq, -MAXFREQ_SCALED);
+		/* update pps_freq */
+		pps_set_freq(time_freq);
 	}
 
 	if (txc->modes & ADJ_MAXERROR)
@@ -508,7 +672,8 @@
 	}
 
 	result = time_state;	/* mostly `TIME_OK' */
-	if (time_status & (STA_UNSYNC|STA_CLOCKERR))
+	/* check for errors */
+	if (is_error_status(time_status))
 		result = TIME_ERROR;
 
 	txc->freq	   = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
@@ -522,15 +687,8 @@
 	txc->tick	   = tick_usec;
 	txc->tai	   = time_tai;
 
-	/* PPS is not implemented, so these are zero */
-	txc->ppsfreq	   = 0;
-	txc->jitter	   = 0;
-	txc->shift	   = 0;
-	txc->stabil	   = 0;
-	txc->jitcnt	   = 0;
-	txc->calcnt	   = 0;
-	txc->errcnt	   = 0;
-	txc->stbcnt	   = 0;
+	/* fill PPS status fields */
+	pps_fill_timex(txc);
 
 	write_sequnlock_irq(&xtime_lock);
 
@@ -544,6 +702,243 @@
 	return result;
 }
 
+#ifdef	CONFIG_NTP_PPS
+
+/* actually struct pps_normtime is good old struct timespec, but it is
+ * semantically different (and it is the reason why it was invented):
+ * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
+ * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
+struct pps_normtime {
+	__kernel_time_t	sec;	/* seconds */
+	long		nsec;	/* nanoseconds */
+};
+
+/* normalize the timestamp so that nsec is in the
+   ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
+static inline struct pps_normtime pps_normalize_ts(struct timespec ts)
+{
+	struct pps_normtime norm = {
+		.sec = ts.tv_sec,
+		.nsec = ts.tv_nsec
+	};
+
+	if (norm.nsec > (NSEC_PER_SEC >> 1)) {
+		norm.nsec -= NSEC_PER_SEC;
+		norm.sec++;
+	}
+
+	return norm;
+}
+
+/* get current phase correction and jitter */
+static inline long pps_phase_filter_get(long *jitter)
+{
+	*jitter = pps_tf[0] - pps_tf[1];
+	if (*jitter < 0)
+		*jitter = -*jitter;
+
+	/* TODO: test various filters */
+	return pps_tf[0];
+}
+
+/* add the sample to the phase filter */
+static inline void pps_phase_filter_add(long err)
+{
+	pps_tf[2] = pps_tf[1];
+	pps_tf[1] = pps_tf[0];
+	pps_tf[0] = err;
+}
+
+/* decrease frequency calibration interval length.
+ * It is halved after four consecutive unstable intervals.
+ */
+static inline void pps_dec_freq_interval(void)
+{
+	if (--pps_intcnt <= -PPS_INTCOUNT) {
+		pps_intcnt = -PPS_INTCOUNT;
+		if (pps_shift > PPS_INTMIN) {
+			pps_shift--;
+			pps_intcnt = 0;
+		}
+	}
+}
+
+/* increase frequency calibration interval length.
+ * It is doubled after four consecutive stable intervals.
+ */
+static inline void pps_inc_freq_interval(void)
+{
+	if (++pps_intcnt >= PPS_INTCOUNT) {
+		pps_intcnt = PPS_INTCOUNT;
+		if (pps_shift < PPS_INTMAX) {
+			pps_shift++;
+			pps_intcnt = 0;
+		}
+	}
+}
+
+/* update clock frequency based on MONOTONIC_RAW clock PPS signal
+ * timestamps
+ *
+ * At the end of the calibration interval the difference between the
+ * first and last MONOTONIC_RAW clock timestamps divided by the length
+ * of the interval becomes the frequency update. If the interval was
+ * too long, the data are discarded.
+ * Returns the difference between old and new frequency values.
+ */
+static long hardpps_update_freq(struct pps_normtime freq_norm)
+{
+	long delta, delta_mod;
+	s64 ftemp;
+
+	/* check if the frequency interval was too long */
+	if (freq_norm.sec > (2 << pps_shift)) {
+		time_status |= STA_PPSERROR;
+		pps_errcnt++;
+		pps_dec_freq_interval();
+		pr_err("hardpps: PPSERROR: interval too long - %ld s\n",
+				freq_norm.sec);
+		return 0;
+	}
+
+	/* here the raw frequency offset and wander (stability) is
+	 * calculated. If the wander is less than the wander threshold
+	 * the interval is increased; otherwise it is decreased.
+	 */
+	ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
+			freq_norm.sec);
+	delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
+	pps_freq = ftemp;
+	if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
+		pr_warning("hardpps: PPSWANDER: change=%ld\n", delta);
+		time_status |= STA_PPSWANDER;
+		pps_stbcnt++;
+		pps_dec_freq_interval();
+	} else {	/* good sample */
+		pps_inc_freq_interval();
+	}
+
+	/* the stability metric is calculated as the average of recent
+	 * frequency changes, but is used only for performance
+	 * monitoring
+	 */
+	delta_mod = delta;
+	if (delta_mod < 0)
+		delta_mod = -delta_mod;
+	pps_stabil += (div_s64(((s64)delta_mod) <<
+				(NTP_SCALE_SHIFT - SHIFT_USEC),
+				NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
+
+	/* if enabled, the system clock frequency is updated */
+	if ((time_status & STA_PPSFREQ) != 0 &&
+	    (time_status & STA_FREQHOLD) == 0) {
+		time_freq = pps_freq;
+		ntp_update_frequency();
+	}
+
+	return delta;
+}
+
+/* correct REALTIME clock phase error against PPS signal */
+static void hardpps_update_phase(long error)
+{
+	long correction = -error;
+	long jitter;
+
+	/* add the sample to the median filter */
+	pps_phase_filter_add(correction);
+	correction = pps_phase_filter_get(&jitter);
+
+	/* Nominal jitter is due to PPS signal noise. If it exceeds the
+	 * threshold, the sample is discarded; otherwise, if so enabled,
+	 * the time offset is updated.
+	 */
+	if (jitter > (pps_jitter << PPS_POPCORN)) {
+		pr_warning("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
+		       jitter, (pps_jitter << PPS_POPCORN));
+		time_status |= STA_PPSJITTER;
+		pps_jitcnt++;
+	} else if (time_status & STA_PPSTIME) {
+		/* correct the time using the phase offset */
+		time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
+				NTP_INTERVAL_FREQ);
+		/* cancel running adjtime() */
+		time_adjust = 0;
+	}
+	/* update jitter */
+	pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
+}
+
+/*
+ * hardpps() - discipline CPU clock oscillator to external PPS signal
+ *
+ * This routine is called at each PPS signal arrival in order to
+ * discipline the CPU clock oscillator to the PPS signal. It takes two
+ * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
+ * is used to correct clock phase error and the latter is used to
+ * correct the frequency.
+ *
+ * This code is based on David Mills's reference nanokernel
+ * implementation. It was mostly rewritten but keeps the same idea.
+ */
+void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
+{
+	struct pps_normtime pts_norm, freq_norm;
+	unsigned long flags;
+
+	pts_norm = pps_normalize_ts(*phase_ts);
+
+	write_seqlock_irqsave(&xtime_lock, flags);
+
+	/* clear the error bits, they will be set again if needed */
+	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
+
+	/* indicate signal presence */
+	time_status |= STA_PPSSIGNAL;
+	pps_valid = PPS_VALID;
+
+	/* when called for the first time,
+	 * just start the frequency interval */
+	if (unlikely(pps_fbase.tv_sec == 0)) {
+		pps_fbase = *raw_ts;
+		write_sequnlock_irqrestore(&xtime_lock, flags);
+		return;
+	}
+
+	/* ok, now we have a base for frequency calculation */
+	freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase));
+
+	/* check that the signal is in the range
+	 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
+	if ((freq_norm.sec == 0) ||
+			(freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
+			(freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
+		time_status |= STA_PPSJITTER;
+		/* restart the frequency calibration interval */
+		pps_fbase = *raw_ts;
+		write_sequnlock_irqrestore(&xtime_lock, flags);
+		pr_err("hardpps: PPSJITTER: bad pulse\n");
+		return;
+	}
+
+	/* signal is ok */
+
+	/* check if the current frequency interval is finished */
+	if (freq_norm.sec >= (1 << pps_shift)) {
+		pps_calcnt++;
+		/* restart the frequency calibration interval */
+		pps_fbase = *raw_ts;
+		hardpps_update_freq(freq_norm);
+	}
+
+	hardpps_update_phase(pts_norm.nsec);
+
+	write_sequnlock_irqrestore(&xtime_lock, flags);
+}
+EXPORT_SYMBOL(hardpps);
+
+#endif	/* CONFIG_NTP_PPS */
+
 static int __init ntp_tick_adj_setup(char *str)
 {
 	ntp_tick_adj = simple_strtol(str, NULL, 0);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index b6b898d..051bc80 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -49,7 +49,7 @@
  */
 int tick_is_oneshot_available(void)
 {
-	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
 	return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
 }
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index aada0e5..5cbc101 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -95,7 +95,7 @@
  */
 int tick_program_event(ktime_t expires, int force)
 {
-	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
 	return tick_dev_program_event(dev, expires, force);
 }
@@ -167,7 +167,7 @@
 	int ret;
 
 	local_irq_save(flags);
-	ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT;
+	ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
 	local_irq_restore(flags);
 
 	return ret;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5bb86da..5536aaf 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -288,6 +288,49 @@
 }
 EXPORT_SYMBOL_GPL(ktime_get_ts);
 
+#ifdef CONFIG_NTP_PPS
+
+/**
+ * getnstime_raw_and_real - get day and raw monotonic time in timespec format
+ * @ts_raw:	pointer to the timespec to be set to raw monotonic time
+ * @ts_real:	pointer to the timespec to be set to the time of day
+ *
+ * This function reads both the time of day and raw monotonic time at the
+ * same time atomically and stores the resulting timestamps in timespec
+ * format.
+ */
+void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
+{
+	unsigned long seq;
+	s64 nsecs_raw, nsecs_real;
+
+	WARN_ON_ONCE(timekeeping_suspended);
+
+	do {
+		u32 arch_offset;
+
+		seq = read_seqbegin(&xtime_lock);
+
+		*ts_raw = raw_time;
+		*ts_real = xtime;
+
+		nsecs_raw = timekeeping_get_ns_raw();
+		nsecs_real = timekeeping_get_ns();
+
+		/* If arch requires, add in gettimeoffset() */
+		arch_offset = arch_gettimeoffset();
+		nsecs_raw += arch_offset;
+		nsecs_real += arch_offset;
+
+	} while (read_seqretry(&xtime_lock, seq));
+
+	timespec_add_ns(ts_raw, nsecs_raw);
+	timespec_add_ns(ts_real, nsecs_real);
+}
+EXPORT_SYMBOL(getnstime_raw_and_real);
+
+#endif /* CONFIG_NTP_PPS */
+
 /**
  * do_gettimeofday - Returns the time of day in a timeval
  * @tv:		pointer to the timeval to be set
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 53f3381..761c510 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@
 endif
 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
-obj-$(CONFIG_EVENT_TRACING) += power-traces.o
+obj-$(CONFIG_TRACEPOINTS) += power-traces.o
 ifeq ($(CONFIG_TRACING),y)
 obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 endif
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7b8ec02..153562d 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -758,53 +758,58 @@
  * @q:		queue the io is for
  * @bio:	the source bio
  * @what:	the action
+ * @error:	error, if any
  *
  * Description:
  *     Records an action against a bio. Will log the bio offset + size.
  *
  **/
 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-				     u32 what)
+			      u32 what, int error)
 {
 	struct blk_trace *bt = q->blk_trace;
 
 	if (likely(!bt))
 		return;
 
+	if (!error && !bio_flagged(bio, BIO_UPTODATE))
+		error = EIO;
+
 	__blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-			!bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+			error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
 				     struct request_queue *q, struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
+	blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
-				       struct request_queue *q, struct bio *bio)
+				       struct request_queue *q, struct bio *bio,
+				       int error)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
+	blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
 					struct request_queue *q,
 					struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+	blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 }
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
 					 struct request_queue *q,
 					 struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+	blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 }
 
 static void blk_add_trace_bio_queue(void *ignore,
 				    struct request_queue *q, struct bio *bio)
 {
-	blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+	blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 }
 
 static void blk_add_trace_getrq(void *ignore,
@@ -812,7 +817,7 @@
 				struct bio *bio, int rw)
 {
 	if (bio)
-		blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
+		blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
 	else {
 		struct blk_trace *bt = q->blk_trace;
 
@@ -827,7 +832,7 @@
 				  struct bio *bio, int rw)
 {
 	if (bio)
-		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
+		blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
 	else {
 		struct blk_trace *bt = q->blk_trace;
 
@@ -887,7 +892,7 @@
 }
 
 /**
- * blk_add_trace_remap - Add a trace for a remap operation
+ * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
  * @ignore:	trace callback data parameter (not used)
  * @q:		queue the io is for
  * @bio:	the source bio
@@ -899,9 +904,9 @@
  *     it spans a stripe (or similar). Add a trace for that action.
  *
  **/
-static void blk_add_trace_remap(void *ignore,
-				struct request_queue *q, struct bio *bio,
-				dev_t dev, sector_t from)
+static void blk_add_trace_bio_remap(void *ignore,
+				    struct request_queue *q, struct bio *bio,
+				    dev_t dev, sector_t from)
 {
 	struct blk_trace *bt = q->blk_trace;
 	struct blk_io_trace_remap r;
@@ -1016,7 +1021,7 @@
 	WARN_ON(ret);
 	ret = register_trace_block_split(blk_add_trace_split, NULL);
 	WARN_ON(ret);
-	ret = register_trace_block_remap(blk_add_trace_remap, NULL);
+	ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
 	WARN_ON(ret);
 	ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
 	WARN_ON(ret);
@@ -1025,7 +1030,7 @@
 static void blk_unregister_tracepoints(void)
 {
 	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
-	unregister_trace_block_remap(blk_add_trace_remap, NULL);
+	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
 	unregister_trace_block_split(blk_add_trace_split, NULL);
 	unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
 	unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8cf959..dc53ecb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1313,12 +1313,10 @@
 
 	__this_cpu_inc(user_stack_count);
 
-
-
 	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
 					  sizeof(*entry), flags, pc);
 	if (!event)
-		return;
+		goto out_drop_count;
 	entry	= ring_buffer_event_data(event);
 
 	entry->tgid		= current->tgid;
@@ -1333,8 +1331,8 @@
 	if (!filter_check_discard(call, entry, buffer, event))
 		ring_buffer_unlock_commit(buffer, event);
 
+ out_drop_count:
 	__this_cpu_dec(user_stack_count);
-
  out:
 	preempt_enable();
 }
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e3dfeca..6cf2237 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -53,7 +53,7 @@
  */
 
 /*
- * Function trace entry - function address and parent function addres:
+ * Function trace entry - function address and parent function address:
  */
 FTRACE_ENTRY(function, ftrace_entry,
 
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 562c56e..659732e 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -558,7 +558,7 @@
 static int trace_wakeup_test_thread(void *data)
 {
 	/* Make this a RT thread, doesn't need to be too high */
-	static struct sched_param param = { .sched_priority = 5 };
+	static const struct sched_param param = { .sched_priority = 5 };
 	struct completion *x = data;
 
 	sched_setscheduler(current, SCHED_FIFO, &param);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 2591583..9da289c 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -12,6 +12,8 @@
 #include <linux/highuid.h>
 #include <linux/cred.h>
 
+static struct kmem_cache *user_ns_cachep __read_mostly;
+
 /*
  * Create a new user namespace, deriving the creator from the user in the
  * passed credentials, and replacing that user with the new root user for the
@@ -26,7 +28,7 @@
 	struct user_struct *root_user;
 	int n;
 
-	ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL);
+	ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL);
 	if (!ns)
 		return -ENOMEM;
 
@@ -38,7 +40,7 @@
 	/* Alloc new root user.  */
 	root_user = alloc_uid(ns, 0);
 	if (!root_user) {
-		kfree(ns);
+		kmem_cache_free(user_ns_cachep, ns);
 		return -ENOMEM;
 	}
 
@@ -71,7 +73,7 @@
 	struct user_namespace *ns =
 		container_of(work, struct user_namespace, destroyer);
 	free_uid(ns->creator);
-	kfree(ns);
+	kmem_cache_free(user_ns_cachep, ns);
 }
 
 void free_user_ns(struct kref *kref)
@@ -126,3 +128,10 @@
 	/* No useful relationship so no mapping */
 	return overflowgid;
 }
+
+static __init int user_namespaces_init(void)
+{
+	user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
+	return 0;
+}
+module_init(user_namespaces_init);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6e7b575..d7ebdf4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -118,12 +118,12 @@
 {
 	int this_cpu = smp_processor_id();
 
-	__get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
+	__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
 }
 
 void touch_softlockup_watchdog(void)
 {
-	__raw_get_cpu_var(watchdog_touch_ts) = 0;
+	__this_cpu_write(watchdog_touch_ts, 0);
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -167,12 +167,12 @@
 /* watchdog detector functions */
 static int is_hardlockup(void)
 {
-	unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
+	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
 
-	if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
+	if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
 		return 1;
 
-	__get_cpu_var(hrtimer_interrupts_saved) = hrint;
+	__this_cpu_write(hrtimer_interrupts_saved, hrint);
 	return 0;
 }
 #endif
@@ -205,8 +205,8 @@
 	/* Ensure the watchdog never gets throttled */
 	event->hw.interrupts = 0;
 
-	if (__get_cpu_var(watchdog_nmi_touch) == true) {
-		__get_cpu_var(watchdog_nmi_touch) = false;
+	if (__this_cpu_read(watchdog_nmi_touch) == true) {
+		__this_cpu_write(watchdog_nmi_touch, false);
 		return;
 	}
 
@@ -220,7 +220,7 @@
 		int this_cpu = smp_processor_id();
 
 		/* only print hardlockups once */
-		if (__get_cpu_var(hard_watchdog_warn) == true)
+		if (__this_cpu_read(hard_watchdog_warn) == true)
 			return;
 
 		if (hardlockup_panic)
@@ -228,16 +228,16 @@
 		else
 			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
 
-		__get_cpu_var(hard_watchdog_warn) = true;
+		__this_cpu_write(hard_watchdog_warn, true);
 		return;
 	}
 
-	__get_cpu_var(hard_watchdog_warn) = false;
+	__this_cpu_write(hard_watchdog_warn, false);
 	return;
 }
 static void watchdog_interrupt_count(void)
 {
-	__get_cpu_var(hrtimer_interrupts)++;
+	__this_cpu_inc(hrtimer_interrupts);
 }
 #else
 static inline void watchdog_interrupt_count(void) { return; }
@@ -246,7 +246,7 @@
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
-	unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
+	unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
 	struct pt_regs *regs = get_irq_regs();
 	int duration;
 
@@ -254,18 +254,18 @@
 	watchdog_interrupt_count();
 
 	/* kick the softlockup detector */
-	wake_up_process(__get_cpu_var(softlockup_watchdog));
+	wake_up_process(__this_cpu_read(softlockup_watchdog));
 
 	/* .. and repeat */
 	hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
 
 	if (touch_ts == 0) {
-		if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
+		if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
 			/*
 			 * If the time stamp was touched atomically
 			 * make sure the scheduler tick is up to date.
 			 */
-			__get_cpu_var(softlockup_touch_sync) = false;
+			__this_cpu_write(softlockup_touch_sync, false);
 			sched_clock_tick();
 		}
 		__touch_watchdog();
@@ -281,7 +281,7 @@
 	duration = is_softlockup(touch_ts);
 	if (unlikely(duration)) {
 		/* only warn once */
-		if (__get_cpu_var(soft_watchdog_warn) == true)
+		if (__this_cpu_read(soft_watchdog_warn) == true)
 			return HRTIMER_RESTART;
 
 		printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -296,9 +296,9 @@
 
 		if (softlockup_panic)
 			panic("softlockup: hung tasks");
-		__get_cpu_var(soft_watchdog_warn) = true;
+		__this_cpu_write(soft_watchdog_warn, true);
 	} else
-		__get_cpu_var(soft_watchdog_warn) = false;
+		__this_cpu_write(soft_watchdog_warn, false);
 
 	return HRTIMER_RESTART;
 }
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e785b0f..8ee6ec8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -932,6 +932,38 @@
 		wake_up_worker(gcwq);
 }
 
+/*
+ * Test whether @work is being queued from another work executing on the
+ * same workqueue.  This is rather expensive and should only be used from
+ * cold paths.
+ */
+static bool is_chained_work(struct workqueue_struct *wq)
+{
+	unsigned long flags;
+	unsigned int cpu;
+
+	for_each_gcwq_cpu(cpu) {
+		struct global_cwq *gcwq = get_gcwq(cpu);
+		struct worker *worker;
+		struct hlist_node *pos;
+		int i;
+
+		spin_lock_irqsave(&gcwq->lock, flags);
+		for_each_busy_worker(worker, i, pos, gcwq) {
+			if (worker->task != current)
+				continue;
+			spin_unlock_irqrestore(&gcwq->lock, flags);
+			/*
+			 * I'm @worker, no locking necessary.  See if @work
+			 * is headed to the same workqueue.
+			 */
+			return worker->current_cwq->wq == wq;
+		}
+		spin_unlock_irqrestore(&gcwq->lock, flags);
+	}
+	return false;
+}
+
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 			 struct work_struct *work)
 {
@@ -943,7 +975,9 @@
 
 	debug_work_activate(work);
 
-	if (WARN_ON_ONCE(wq->flags & WQ_DYING))
+	/* if dying, only works from the same workqueue are allowed */
+	if (unlikely(wq->flags & WQ_DYING) &&
+	    WARN_ON_ONCE(!is_chained_work(wq)))
 		return;
 
 	/* determine gcwq to use */
@@ -2936,11 +2970,35 @@
  */
 void destroy_workqueue(struct workqueue_struct *wq)
 {
+	unsigned int flush_cnt = 0;
 	unsigned int cpu;
 
+	/*
+	 * Mark @wq dying and drain all pending works.  Once WQ_DYING is
+	 * set, only chain queueing is allowed.  IOW, only currently
+	 * pending or running work items on @wq can queue further work
+	 * items on it.  @wq is flushed repeatedly until it becomes empty.
+	 * The number of flushing is detemined by the depth of chaining and
+	 * should be relatively short.  Whine if it takes too long.
+	 */
 	wq->flags |= WQ_DYING;
+reflush:
 	flush_workqueue(wq);
 
+	for_each_cwq_cpu(cpu, wq) {
+		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+		if (!cwq->nr_active && list_empty(&cwq->delayed_works))
+			continue;
+
+		if (++flush_cnt == 10 ||
+		    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
+			printk(KERN_WARNING "workqueue %s: flush on "
+			       "destruction isn't complete after %u tries\n",
+			       wq->name, flush_cnt);
+		goto reflush;
+	}
+
 	/*
 	 * wq list is used to freeze wq, remove from list after
 	 * flushing is complete in case freeze races us.
diff --git a/lib/Kconfig b/lib/Kconfig
index 3116aa6..0ee67e0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -106,6 +106,8 @@
 config LZO_DECOMPRESS
 	tristate
 
+source "lib/xz/Kconfig"
+
 #
 # These all provide a common interface (hence the apparent duplication with
 # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
@@ -120,6 +122,10 @@
 config DECOMPRESS_LZMA
 	tristate
 
+config DECOMPRESS_XZ
+	select XZ_DEC
+	tristate
+
 config DECOMPRESS_LZO
 	select LZO_DECOMPRESS
 	tristate
diff --git a/lib/Makefile b/lib/Makefile
index d7b6e30..cbb774f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@
 	 idr.o int_sqrt.o extable.o prio_tree.o \
 	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
 	 proportions.o prio_heap.o ratelimit.o show_mem.o \
-	 is_single_threaded.o plist.o decompress.o flex_array.o
+	 is_single_threaded.o plist.o decompress.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -21,7 +21,7 @@
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-	 string_helpers.o gcd.o lcm.o list_sort.o uuid.o
+	 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -69,11 +69,13 @@
 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
 obj-$(CONFIG_LZO_COMPRESS) += lzo/
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
+obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
 
 lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
 lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
 lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
+lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
 lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
 
 obj-$(CONFIG_TEXTSEARCH) += textsearch.o
diff --git a/lib/decompress.c b/lib/decompress.c
index a760681..3d766b7 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -8,6 +8,7 @@
 
 #include <linux/decompress/bunzip2.h>
 #include <linux/decompress/unlzma.h>
+#include <linux/decompress/unxz.h>
 #include <linux/decompress/inflate.h>
 #include <linux/decompress/unlzo.h>
 
@@ -23,6 +24,9 @@
 #ifndef CONFIG_DECOMPRESS_LZMA
 # define unlzma NULL
 #endif
+#ifndef CONFIG_DECOMPRESS_XZ
+# define unxz NULL
+#endif
 #ifndef CONFIG_DECOMPRESS_LZO
 # define unlzo NULL
 #endif
@@ -36,6 +40,7 @@
 	{ {037, 0236}, "gzip", gunzip },
 	{ {0x42, 0x5a}, "bzip2", bunzip2 },
 	{ {0x5d, 0x00}, "lzma", unlzma },
+	{ {0xfd, 0x37}, "xz", unxz },
 	{ {0x89, 0x4c}, "lzo", unlzo },
 	{ {0, 0}, NULL, NULL }
 };
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 81c8bb1..a7b80c1 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -49,7 +49,6 @@
 #define PREBOOT
 #else
 #include <linux/decompress/bunzip2.h>
-#include <linux/slab.h>
 #endif /* STATIC */
 
 #include <linux/decompress/mm.h>
@@ -682,13 +681,12 @@
 			int(*flush)(void*, unsigned int),
 			unsigned char *outbuf,
 			int *pos,
-			void(*error_fn)(char *x))
+			void(*error)(char *x))
 {
 	struct bunzip_data *bd;
 	int i = -1;
 	unsigned char *inbuf;
 
-	set_error_fn(error_fn);
 	if (flush)
 		outbuf = malloc(BZIP2_IOBUF_SIZE);
 
@@ -751,8 +749,8 @@
 			int(*flush)(void*, unsigned int),
 			unsigned char *outbuf,
 			int *pos,
-			void(*error_fn)(char *x))
+			void(*error)(char *x))
 {
-	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
+	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
 }
 #endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index fc686c7..19ff89e 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,7 +19,6 @@
 #include "zlib_inflate/inflate.h"
 
 #include "zlib_inflate/infutil.h"
-#include <linux/slab.h>
 
 #endif /* STATIC */
 
@@ -27,7 +26,7 @@
 
 #define GZIP_IOBUF_SIZE (16*1024)
 
-static int nofill(void *buffer, unsigned int len)
+static int INIT nofill(void *buffer, unsigned int len)
 {
 	return -1;
 }
@@ -38,13 +37,12 @@
 		       int(*flush)(void*, unsigned int),
 		       unsigned char *out_buf,
 		       int *pos,
-		       void(*error_fn)(char *x)) {
+		       void(*error)(char *x)) {
 	u8 *zbuf;
 	struct z_stream_s *strm;
 	int rc;
 	size_t out_len;
 
-	set_error_fn(error_fn);
 	rc = -1;
 	if (flush) {
 		out_len = 0x8000; /* 32 K */
@@ -100,13 +98,22 @@
 	 * possible asciz filename)
 	 */
 	strm->next_in = zbuf + 10;
+	strm->avail_in = len - 10;
 	/* skip over asciz filename */
 	if (zbuf[3] & 0x8) {
-		while (strm->next_in[0])
-			strm->next_in++;
-		strm->next_in++;
+		do {
+			/*
+			 * If the filename doesn't fit into the buffer,
+			 * the file is very probably corrupt. Don't try
+			 * to read more data.
+			 */
+			if (strm->avail_in == 0) {
+				error("header error");
+				goto gunzip_5;
+			}
+			--strm->avail_in;
+		} while (*strm->next_in++);
 	}
-	strm->avail_in = len - (strm->next_in - zbuf);
 
 	strm->next_out = out_buf;
 	strm->avail_out = out_len;
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index ca82fde..476c65a 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -33,7 +33,6 @@
 #define PREBOOT
 #else
 #include <linux/decompress/unlzma.h>
-#include <linux/slab.h>
 #endif /* STATIC */
 
 #include <linux/decompress/mm.h>
@@ -74,6 +73,7 @@
 	uint32_t code;
 	uint32_t range;
 	uint32_t bound;
+	void (*error)(char *);
 };
 
 
@@ -82,7 +82,7 @@
 #define RC_MODEL_TOTAL_BITS 11
 
 
-static int nofill(void *buffer, unsigned int len)
+static int INIT nofill(void *buffer, unsigned int len)
 {
 	return -1;
 }
@@ -92,7 +92,7 @@
 {
 	rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
 	if (rc->buffer_size <= 0)
-		error("unexpected EOF");
+		rc->error("unexpected EOF");
 	rc->ptr = rc->buffer;
 	rc->buffer_end = rc->buffer + rc->buffer_size;
 }
@@ -127,12 +127,6 @@
 }
 
 
-/* Called once. TODO: bb_maybe_free() */
-static inline void INIT rc_free(struct rc *rc)
-{
-	free(rc->buffer);
-}
-
 /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
 static void INIT rc_do_normalize(struct rc *rc)
 {
@@ -169,7 +163,7 @@
 	rc->range = rc->bound;
 	*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
 }
-static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
+static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p)
 {
 	rc->range -= rc->bound;
 	rc->code -= rc->bound;
@@ -319,32 +313,38 @@
 
 }
 
-static inline void INIT write_byte(struct writer *wr, uint8_t byte)
+static inline int INIT write_byte(struct writer *wr, uint8_t byte)
 {
 	wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
 	if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
 		wr->buffer_pos = 0;
 		wr->global_pos += wr->header->dict_size;
-		wr->flush((char *)wr->buffer, wr->header->dict_size);
+		if (wr->flush((char *)wr->buffer, wr->header->dict_size)
+				!= wr->header->dict_size)
+			return -1;
 	}
+	return 0;
 }
 
 
-static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
+static inline int INIT copy_byte(struct writer *wr, uint32_t offs)
 {
-	write_byte(wr, peek_old_byte(wr, offs));
+	return write_byte(wr, peek_old_byte(wr, offs));
 }
 
-static inline void INIT copy_bytes(struct writer *wr,
+static inline int INIT copy_bytes(struct writer *wr,
 					 uint32_t rep0, int len)
 {
 	do {
-		copy_byte(wr, rep0);
+		if (copy_byte(wr, rep0))
+			return -1;
 		len--;
 	} while (len != 0 && wr->buffer_pos < wr->header->dst_size);
+
+	return len;
 }
 
-static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
+static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
 				     struct cstate *cst, uint16_t *p,
 				     int pos_state, uint16_t *prob,
 				     int lc, uint32_t literal_pos_mask) {
@@ -378,16 +378,17 @@
 		uint16_t *prob_lit = prob + mi;
 		rc_get_bit(rc, prob_lit, &mi);
 	}
-	write_byte(wr, mi);
 	if (cst->state < 4)
 		cst->state = 0;
 	else if (cst->state < 10)
 		cst->state -= 3;
 	else
 		cst->state -= 6;
+
+	return write_byte(wr, mi);
 }
 
-static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
+static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
 					    struct cstate *cst, uint16_t *p,
 					    int pos_state, uint16_t *prob) {
   int offset;
@@ -418,8 +419,7 @@
 
 				cst->state = cst->state < LZMA_NUM_LIT_STATES ?
 					9 : 11;
-				copy_byte(wr, cst->rep0);
-				return;
+				return copy_byte(wr, cst->rep0);
 			} else {
 				rc_update_bit_1(rc, prob);
 			}
@@ -521,12 +521,15 @@
 		} else
 			cst->rep0 = pos_slot;
 		if (++(cst->rep0) == 0)
-			return;
+			return 0;
+		if (cst->rep0 > wr->header->dict_size
+				|| cst->rep0 > get_pos(wr))
+			return -1;
 	}
 
 	len += LZMA_MATCH_MIN_LEN;
 
-	copy_bytes(wr, cst->rep0, len);
+	return copy_bytes(wr, cst->rep0, len);
 }
 
 
@@ -536,7 +539,7 @@
 			      int(*flush)(void*, unsigned int),
 			      unsigned char *output,
 			      int *posp,
-			      void(*error_fn)(char *x)
+			      void(*error)(char *x)
 	)
 {
 	struct lzma_header header;
@@ -552,7 +555,7 @@
 	unsigned char *inbuf;
 	int ret = -1;
 
-	set_error_fn(error_fn);
+	rc.error = error;
 
 	if (buf)
 		inbuf = buf;
@@ -580,8 +583,10 @@
 		((unsigned char *)&header)[i] = *rc.ptr++;
 	}
 
-	if (header.pos >= (9 * 5 * 5))
+	if (header.pos >= (9 * 5 * 5)) {
 		error("bad header");
+		goto exit_1;
+	}
 
 	mi = 0;
 	lc = header.pos;
@@ -627,21 +632,29 @@
 		int pos_state =	get_pos(&wr) & pos_state_mask;
 		uint16_t *prob = p + LZMA_IS_MATCH +
 			(cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
-		if (rc_is_bit_0(&rc, prob))
-			process_bit0(&wr, &rc, &cst, p, pos_state, prob,
-				     lc, literal_pos_mask);
-		else {
-			process_bit1(&wr, &rc, &cst, p, pos_state, prob);
+		if (rc_is_bit_0(&rc, prob)) {
+			if (process_bit0(&wr, &rc, &cst, p, pos_state, prob,
+					lc, literal_pos_mask)) {
+				error("LZMA data is corrupt");
+				goto exit_3;
+			}
+		} else {
+			if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) {
+				error("LZMA data is corrupt");
+				goto exit_3;
+			}
 			if (cst.rep0 == 0)
 				break;
 		}
+		if (rc.buffer_size <= 0)
+			goto exit_3;
 	}
 
 	if (posp)
 		*posp = rc.ptr-rc.buffer;
-	if (wr.flush)
-		wr.flush(wr.buffer, wr.buffer_pos);
-	ret = 0;
+	if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos)
+		ret = 0;
+exit_3:
 	large_free(p);
 exit_2:
 	if (!output)
@@ -659,9 +672,9 @@
 			      int(*flush)(void*, unsigned int),
 			      unsigned char *output,
 			      int *posp,
-			      void(*error_fn)(char *x)
+			      void(*error)(char *x)
 	)
 {
-	return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
+	return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
 }
 #endif
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index bcb3a4b..5a7a2ad 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -33,7 +33,6 @@
 #ifdef STATIC
 #include "lzo/lzo1x_decompress.c"
 #else
-#include <linux/slab.h>
 #include <linux/decompress/unlzo.h>
 #endif
 
@@ -49,14 +48,25 @@
 
 #define LZO_BLOCK_SIZE        (256*1024l)
 #define HEADER_HAS_FILTER      0x00000800L
+#define HEADER_SIZE_MIN       (9 + 7     + 4 + 8     + 1       + 4)
+#define HEADER_SIZE_MAX       (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
 
-STATIC inline int INIT parse_header(u8 *input, u8 *skip)
+STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
 {
 	int l;
 	u8 *parse = input;
+	u8 *end = input + in_len;
 	u8 level = 0;
 	u16 version;
 
+	/*
+	 * Check that there's enough input to possibly have a valid header.
+	 * Then it is possible to parse several fields until the minimum
+	 * size may have been used.
+	 */
+	if (in_len < HEADER_SIZE_MIN)
+		return 0;
+
 	/* read magic: 9 first bits */
 	for (l = 0; l < 9; l++) {
 		if (*parse++ != lzop_magic[l])
@@ -74,6 +84,15 @@
 	else
 		parse += 4; /* flags */
 
+	/*
+	 * At least mode, mtime_low, filename length, and checksum must
+	 * be left to be parsed. If also mtime_high is present, it's OK
+	 * because the next input buffer check is after reading the
+	 * filename length.
+	 */
+	if (end - parse < 8 + 1 + 4)
+		return 0;
+
 	/* skip mode and mtime_low */
 	parse += 8;
 	if (version >= 0x0940)
@@ -81,6 +100,8 @@
 
 	l = *parse++;
 	/* don't care about the file name, and skip checksum */
+	if (end - parse < l + 4)
+		return 0;
 	parse += l + 4;
 
 	*skip = parse - input;
@@ -91,16 +112,15 @@
 				int (*fill) (void *, unsigned int),
 				int (*flush) (void *, unsigned int),
 				u8 *output, int *posp,
-				void (*error_fn) (char *x))
+				void (*error) (char *x))
 {
-	u8 skip = 0, r = 0;
+	u8 r = 0;
+	int skip = 0;
 	u32 src_len, dst_len;
 	size_t tmp;
 	u8 *in_buf, *in_buf_save, *out_buf;
 	int ret = -1;
 
-	set_error_fn(error_fn);
-
 	if (output) {
 		out_buf = output;
 	} else if (!flush) {
@@ -119,8 +139,8 @@
 		goto exit_1;
 	} else if (input) {
 		in_buf = input;
-	} else if (!fill || !posp) {
-		error("NULL input pointer and missing position pointer or fill function");
+	} else if (!fill) {
+		error("NULL input pointer and missing fill function");
 		goto exit_1;
 	} else {
 		in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
@@ -134,22 +154,47 @@
 	if (posp)
 		*posp = 0;
 
-	if (fill)
-		fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
+	if (fill) {
+		/*
+		 * Start from in_buf + HEADER_SIZE_MAX to make it possible
+		 * to use memcpy() to copy the unused data to the beginning
+		 * of the buffer. This way memmove() isn't needed which
+		 * is missing from pre-boot environments of most archs.
+		 */
+		in_buf += HEADER_SIZE_MAX;
+		in_len = fill(in_buf, HEADER_SIZE_MAX);
+	}
 
-	if (!parse_header(input, &skip)) {
+	if (!parse_header(in_buf, &skip, in_len)) {
 		error("invalid header");
 		goto exit_2;
 	}
 	in_buf += skip;
+	in_len -= skip;
+
+	if (fill) {
+		/* Move the unused data to the beginning of the buffer. */
+		memcpy(in_buf_save, in_buf, in_len);
+		in_buf = in_buf_save;
+	}
 
 	if (posp)
 		*posp = skip;
 
 	for (;;) {
 		/* read uncompressed block size */
+		if (fill && in_len < 4) {
+			skip = fill(in_buf + in_len, 4 - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < 4) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		dst_len = get_unaligned_be32(in_buf);
 		in_buf += 4;
+		in_len -= 4;
 
 		/* exit if last block */
 		if (dst_len == 0) {
@@ -164,8 +209,18 @@
 		}
 
 		/* read compressed block size, and skip block checksum info */
+		if (fill && in_len < 8) {
+			skip = fill(in_buf + in_len, 8 - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < 8) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		src_len = get_unaligned_be32(in_buf);
 		in_buf += 8;
+		in_len -= 8;
 
 		if (src_len <= 0 || src_len > dst_len) {
 			error("file corrupted");
@@ -173,6 +228,15 @@
 		}
 
 		/* decompress */
+		if (fill && in_len < src_len) {
+			skip = fill(in_buf + in_len, src_len - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < src_len) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		tmp = dst_len;
 
 		/* When the input data is not compressed at all,
@@ -190,17 +254,26 @@
 			}
 		}
 
-		if (flush)
-			flush(out_buf, dst_len);
+		if (flush && flush(out_buf, dst_len) != dst_len)
+			goto exit_2;
 		if (output)
 			out_buf += dst_len;
 		if (posp)
 			*posp += src_len + 12;
+
+		in_buf += src_len;
+		in_len -= src_len;
 		if (fill) {
+			/*
+			 * If there happens to still be unused data left in
+			 * in_buf, move it to the beginning of the buffer.
+			 * Use a loop to avoid memmove() dependency.
+			 */
+			if (in_len > 0)
+				for (skip = 0; skip < in_len; ++skip)
+					in_buf_save[skip] = in_buf[skip];
 			in_buf = in_buf_save;
-			fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
-		} else
-			in_buf += src_len;
+		}
 	}
 
 	ret = 0;
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
new file mode 100644
index 0000000..cecd23d
--- /dev/null
+++ b/lib/decompress_unxz.c
@@ -0,0 +1,397 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * Important notes about in-place decompression
+ *
+ * At least on x86, the kernel is decompressed in place: the compressed data
+ * is placed to the end of the output buffer, and the decompressor overwrites
+ * most of the compressed data. There must be enough safety margin to
+ * guarantee that the write position is always behind the read position.
+ *
+ * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
+ * Note that the margin with XZ is bigger than with Deflate (gzip)!
+ *
+ * The worst case for in-place decompression is that the beginning of
+ * the file is compressed extremely well, and the rest of the file is
+ * uncompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding uncompressible data.
+ *
+ * The structure of the .xz file in case of a compresed kernel is as follows.
+ * Sizes (as bytes) of the fields are in parenthesis.
+ *
+ *    Stream Header (12)
+ *    Block Header:
+ *      Block Header (8-12)
+ *      Compressed Data (N)
+ *      Block Padding (0-3)
+ *      CRC32 (4)
+ *    Index (8-20)
+ *    Stream Footer (12)
+ *
+ * Normally there is exactly one Block, but let's assume that there are
+ * 2-4 Blocks just in case. Because Stream Header and also Block Header
+ * of the first Block don't make the decompressor produce any uncompressed
+ * data, we can ignore them from our calculations. Block Headers of possible
+ * additional Blocks have to be taken into account still. With these
+ * assumptions, it is safe to assume that the total header overhead is
+ * less than 128 bytes.
+ *
+ * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
+ * doesn't change the size of the data, it is enough to calculate the
+ * safety margin for LZMA2.
+ *
+ * LZMA2 stores the data in chunks. Each chunk has a header whose size is
+ * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
+ * the maximum chunk header size is 8 bytes. After the chunk header, there
+ * may be up to 64 KiB of actual payload in the chunk. Often the payload is
+ * quite a bit smaller though; to be safe, let's assume that an average
+ * chunk has only 32 KiB of payload.
+ *
+ * The maximum uncompressed size of the payload is 2 MiB. The minimum
+ * uncompressed size of the payload is in practice never less than the
+ * payload size itself. The LZMA2 format would allow uncompressed size
+ * to be less than the payload size, but no sane compressor creates such
+ * files. LZMA2 supports storing uncompressible data in uncompressed form,
+ * so there's never a need to create payloads whose uncompressed size is
+ * smaller than the compressed size.
+ *
+ * The assumption, that the uncompressed size of the payload is never
+ * smaller than the payload itself, is valid only when talking about
+ * the payload as a whole. It is possible that the payload has parts where
+ * the decompressor consumes more input than it produces output. Calculating
+ * the worst case for this would be tricky. Instead of trying to do that,
+ * let's simply make sure that the decompressor never overwrites any bytes
+ * of the payload which it is currently reading.
+ *
+ * Now we have enough information to calculate the safety margin. We need
+ *   - 128 bytes for the .xz file format headers;
+ *   - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
+ *     per chunk, each chunk having average payload size of 32 KiB); and
+ *   - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
+ *     the decompressor never overwrites anything from the LZMA2 chunk
+ *     payload it is currently reading.
+ *
+ * We get the following formula:
+ *
+ *    safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
+ *                  = 128 + (uncompressed_size >> 12) + 65536
+ *
+ * For comparision, according to arch/x86/boot/compressed/misc.c, the
+ * equivalent formula for Deflate is this:
+ *
+ *    safety_margin = 18 + (uncompressed_size >> 12) + 32768
+ *
+ * Thus, when updating Deflate-only in-place kernel decompressor to
+ * support XZ, the fixed overhead has to be increased from 18+32768 bytes
+ * to 128+65536 bytes.
+ */
+
+/*
+ * STATIC is defined to "static" if we are being built for kernel
+ * decompression (pre-boot code). <linux/decompress/mm.h> will define
+ * STATIC to empty if it wasn't already defined. Since we will need to
+ * know later if we are being used for kernel decompression, we define
+ * XZ_PREBOOT here.
+ */
+#ifdef STATIC
+#	define XZ_PREBOOT
+#endif
+#ifdef __KERNEL__
+#	include <linux/decompress/mm.h>
+#endif
+#define XZ_EXTERN STATIC
+
+#ifndef XZ_PREBOOT
+#	include <linux/slab.h>
+#	include <linux/xz.h>
+#else
+/*
+ * Use the internal CRC32 code instead of kernel's CRC32 module, which
+ * is not available in early phase of booting.
+ */
+#define XZ_INTERNAL_CRC32 1
+
+/*
+ * For boot time use, we enable only the BCJ filter of the current
+ * architecture or none if no BCJ filter is available for the architecture.
+ */
+#ifdef CONFIG_X86
+#	define XZ_DEC_X86
+#endif
+#ifdef CONFIG_PPC
+#	define XZ_DEC_POWERPC
+#endif
+#ifdef CONFIG_ARM
+#	define XZ_DEC_ARM
+#endif
+#ifdef CONFIG_IA64
+#	define XZ_DEC_IA64
+#endif
+#ifdef CONFIG_SPARC
+#	define XZ_DEC_SPARC
+#endif
+
+/*
+ * This will get the basic headers so that memeq() and others
+ * can be defined.
+ */
+#include "xz/xz_private.h"
+
+/*
+ * Replace the normal allocation functions with the versions from
+ * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
+ * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
+ * Workaround it here because the other decompressors don't need it.
+ */
+#undef kmalloc
+#undef kfree
+#undef vmalloc
+#undef vfree
+#define kmalloc(size, flags) malloc(size)
+#define kfree(ptr) free(ptr)
+#define vmalloc(size) malloc(size)
+#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
+
+/*
+ * FIXME: Not all basic memory functions are provided in architecture-specific
+ * files (yet). We define our own versions here for now, but this should be
+ * only a temporary solution.
+ *
+ * memeq and memzero are not used much and any remotely sane implementation
+ * is fast enough. memcpy/memmove speed matters in multi-call mode, but
+ * the kernel image is decompressed in single-call mode, in which only
+ * memcpy speed can matter and only if there is a lot of uncompressible data
+ * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
+ * functions below should just be kept small; it's probably not worth
+ * optimizing for speed.
+ */
+
+#ifndef memeq
+static bool memeq(const void *a, const void *b, size_t size)
+{
+	const uint8_t *x = a;
+	const uint8_t *y = b;
+	size_t i;
+
+	for (i = 0; i < size; ++i)
+		if (x[i] != y[i])
+			return false;
+
+	return true;
+}
+#endif
+
+#ifndef memzero
+static void memzero(void *buf, size_t size)
+{
+	uint8_t *b = buf;
+	uint8_t *e = b + size;
+
+	while (b != e)
+		*b++ = '\0';
+}
+#endif
+
+#ifndef memmove
+/* Not static to avoid a conflict with the prototype in the Linux headers. */
+void *memmove(void *dest, const void *src, size_t size)
+{
+	uint8_t *d = dest;
+	const uint8_t *s = src;
+	size_t i;
+
+	if (d < s) {
+		for (i = 0; i < size; ++i)
+			d[i] = s[i];
+	} else if (d > s) {
+		i = size;
+		while (i-- > 0)
+			d[i] = s[i];
+	}
+
+	return dest;
+}
+#endif
+
+/*
+ * Since we need memmove anyway, would use it as memcpy too.
+ * Commented out for now to avoid breaking things.
+ */
+/*
+#ifndef memcpy
+#	define memcpy memmove
+#endif
+*/
+
+#include "xz/xz_crc32.c"
+#include "xz/xz_dec_stream.c"
+#include "xz/xz_dec_lzma2.c"
+#include "xz/xz_dec_bcj.c"
+
+#endif /* XZ_PREBOOT */
+
+/* Size of the input and output buffers in multi-call mode */
+#define XZ_IOBUF_SIZE 4096
+
+/*
+ * This function implements the API defined in <linux/decompress/generic.h>.
+ *
+ * This wrapper will automatically choose single-call or multi-call mode
+ * of the native XZ decoder API. The single-call mode can be used only when
+ * both input and output buffers are available as a single chunk, i.e. when
+ * fill() and flush() won't be used.
+ */
+STATIC int INIT unxz(unsigned char *in, int in_size,
+		     int (*fill)(void *dest, unsigned int size),
+		     int (*flush)(void *src, unsigned int size),
+		     unsigned char *out, int *in_used,
+		     void (*error)(char *x))
+{
+	struct xz_buf b;
+	struct xz_dec *s;
+	enum xz_ret ret;
+	bool must_free_in = false;
+
+#if XZ_INTERNAL_CRC32
+	xz_crc32_init();
+#endif
+
+	if (in_used != NULL)
+		*in_used = 0;
+
+	if (fill == NULL && flush == NULL)
+		s = xz_dec_init(XZ_SINGLE, 0);
+	else
+		s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
+
+	if (s == NULL)
+		goto error_alloc_state;
+
+	if (flush == NULL) {
+		b.out = out;
+		b.out_size = (size_t)-1;
+	} else {
+		b.out_size = XZ_IOBUF_SIZE;
+		b.out = malloc(XZ_IOBUF_SIZE);
+		if (b.out == NULL)
+			goto error_alloc_out;
+	}
+
+	if (in == NULL) {
+		must_free_in = true;
+		in = malloc(XZ_IOBUF_SIZE);
+		if (in == NULL)
+			goto error_alloc_in;
+	}
+
+	b.in = in;
+	b.in_pos = 0;
+	b.in_size = in_size;
+	b.out_pos = 0;
+
+	if (fill == NULL && flush == NULL) {
+		ret = xz_dec_run(s, &b);
+	} else {
+		do {
+			if (b.in_pos == b.in_size && fill != NULL) {
+				if (in_used != NULL)
+					*in_used += b.in_pos;
+
+				b.in_pos = 0;
+
+				in_size = fill(in, XZ_IOBUF_SIZE);
+				if (in_size < 0) {
+					/*
+					 * This isn't an optimal error code
+					 * but it probably isn't worth making
+					 * a new one either.
+					 */
+					ret = XZ_BUF_ERROR;
+					break;
+				}
+
+				b.in_size = in_size;
+			}
+
+			ret = xz_dec_run(s, &b);
+
+			if (flush != NULL && (b.out_pos == b.out_size
+					|| (ret != XZ_OK && b.out_pos > 0))) {
+				/*
+				 * Setting ret here may hide an error
+				 * returned by xz_dec_run(), but probably
+				 * it's not too bad.
+				 */
+				if (flush(b.out, b.out_pos) != (int)b.out_pos)
+					ret = XZ_BUF_ERROR;
+
+				b.out_pos = 0;
+			}
+		} while (ret == XZ_OK);
+
+		if (must_free_in)
+			free(in);
+
+		if (flush != NULL)
+			free(b.out);
+	}
+
+	if (in_used != NULL)
+		*in_used += b.in_pos;
+
+	xz_dec_end(s);
+
+	switch (ret) {
+	case XZ_STREAM_END:
+		return 0;
+
+	case XZ_MEM_ERROR:
+		/* This can occur only in multi-call mode. */
+		error("XZ decompressor ran out of memory");
+		break;
+
+	case XZ_FORMAT_ERROR:
+		error("Input is not in the XZ format (wrong magic bytes)");
+		break;
+
+	case XZ_OPTIONS_ERROR:
+		error("Input was encoded with settings that are not "
+				"supported by this XZ decoder");
+		break;
+
+	case XZ_DATA_ERROR:
+	case XZ_BUF_ERROR:
+		error("XZ-compressed data is corrupt");
+		break;
+
+	default:
+		error("Bug in the XZ decompressor");
+		break;
+	}
+
+	return -1;
+
+error_alloc_in:
+	if (flush != NULL)
+		free(b.out);
+
+error_alloc_out:
+	xz_dec_end(s);
+
+error_alloc_state:
+	error("XZ decompressor ran out of memory");
+	return -1;
+}
+
+/*
+ * This macro is used by architecture-specific files to decompress
+ * the kernel image.
+ */
+#define decompress unxz
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 3094318..b335acb 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -141,11 +141,10 @@
 			else if (!dp->flags)
 				dt->num_enabled++;
 			dp->flags = newflags;
-			if (newflags) {
-				jump_label_enable(&dp->enabled);
-			} else {
-				jump_label_disable(&dp->enabled);
-			}
+			if (newflags)
+				dp->enabled = 1;
+			else
+				dp->enabled = 0;
 			if (verbose)
 				printk(KERN_INFO
 					"ddebug: changed %s:%d [%s]%s %s\n",
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 77a6fea..c0ea40b 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -23,6 +23,7 @@
 #include <linux/flex_array.h>
 #include <linux/slab.h>
 #include <linux/stddef.h>
+#include <linux/module.h>
 
 struct flex_array_part {
 	char elements[FLEX_ARRAY_PART_SIZE];
@@ -103,6 +104,7 @@
 						FLEX_ARRAY_BASE_BYTES_LEFT);
 	return ret;
 }
+EXPORT_SYMBOL(flex_array_alloc);
 
 static int fa_element_to_part_nr(struct flex_array *fa,
 					unsigned int element_nr)
@@ -126,12 +128,14 @@
 	for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
 		kfree(fa->parts[part_nr]);
 }
+EXPORT_SYMBOL(flex_array_free_parts);
 
 void flex_array_free(struct flex_array *fa)
 {
 	flex_array_free_parts(fa);
 	kfree(fa);
 }
+EXPORT_SYMBOL(flex_array_free);
 
 static unsigned int index_inside_part(struct flex_array *fa,
 					unsigned int element_nr)
@@ -196,6 +200,7 @@
 	memcpy(dst, src, fa->element_size);
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_put);
 
 /**
  * flex_array_clear - clear element in array at @element_nr
@@ -223,6 +228,7 @@
 	memset(dst, FLEX_ARRAY_FREE, fa->element_size);
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_clear);
 
 /**
  * flex_array_prealloc - guarantee that array space exists
@@ -259,6 +265,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_prealloc);
 
 /**
  * flex_array_get - pull data back out of the array
@@ -288,6 +295,7 @@
 	}
 	return &part->elements[index_inside_part(fa, element_nr)];
 }
+EXPORT_SYMBOL(flex_array_get);
 
 /**
  * flex_array_get_ptr - pull a ptr back out of the array
@@ -308,6 +316,7 @@
 
 	return *tmp;
 }
+EXPORT_SYMBOL(flex_array_get_ptr);
 
 static int part_is_free(struct flex_array_part *part)
 {
@@ -348,3 +357,4 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL(flex_array_shrink);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 5d7a480..f5fe6ba 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -34,6 +34,22 @@
 EXPORT_SYMBOL(hex_to_bin);
 
 /**
+ * hex2bin - convert an ascii hexadecimal string to its binary representation
+ * @dst: binary result
+ * @src: ascii hexadecimal string
+ * @count: result length
+ */
+void hex2bin(u8 *dst, const char *src, size_t count)
+{
+	while (count--) {
+		*dst = hex_to_bin(*src++) << 4;
+		*dst += hex_to_bin(*src++);
+		dst++;
+	}
+}
+EXPORT_SYMBOL(hex2bin);
+
+/**
  * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
  * @buf: data blob to dump
  * @len: number of bytes in the @buf
@@ -138,6 +154,7 @@
 }
 EXPORT_SYMBOL(hex_dump_to_buffer);
 
+#ifdef CONFIG_PRINTK
 /**
  * print_hex_dump - print a text hex dump to syslog for a binary blob of data
  * @level: kernel log level (e.g. KERN_DEBUG)
@@ -222,3 +239,4 @@
 		       buf, len, true);
 }
 EXPORT_SYMBOL(print_hex_dump_bytes);
+#endif
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 5730ecd..da4e2ad 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/io.h>
+#include <linux/module.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 
@@ -90,3 +91,4 @@
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/kref.c b/lib/kref.c
index d3d227a..3efb882 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -62,6 +62,36 @@
 	return 0;
 }
 
+
+/**
+ * kref_sub - subtract a number of refcounts for object.
+ * @kref: object.
+ * @count: Number of recounts to subtract.
+ * @release: pointer to the function that will clean up the object when the
+ *	     last reference to the object is released.
+ *	     This pointer is required, and it is not acceptable to pass kfree
+ *	     in as this function.
+ *
+ * Subtract @count from the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0.  Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory.  Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+int kref_sub(struct kref *kref, unsigned int count,
+	     void (*release)(struct kref *kref))
+{
+	WARN_ON(release == NULL);
+	WARN_ON(release == (void (*)(struct kref *))kfree);
+
+	if (atomic_sub_and_test((int) count, &kref->refcount)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
 EXPORT_SYMBOL(kref_init);
 EXPORT_SYMBOL(kref_get);
 EXPORT_SYMBOL(kref_put);
+EXPORT_SYMBOL(kref_sub);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 00e8a02..5021cbc 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -167,7 +167,7 @@
  * @policy: validation policy
  *
  * Parses a stream of attributes and stores a pointer to each attribute in
- * the tb array accessable via the attribute type. Attributes with a type
+ * the tb array accessible via the attribute type. Attributes with a type
  * exceeding maxtype will be silently ignored for backwards compatibility
  * reasons. policy may be set to NULL if no validation is required.
  *
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 604678d..28f2c33 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,18 +72,16 @@
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 {
 	s64 count;
-	s32 *pcount;
 
 	preempt_disable();
-	pcount = this_cpu_ptr(fbc->counters);
-	count = *pcount + amount;
+	count = __this_cpu_read(*fbc->counters) + amount;
 	if (count >= batch || count <= -batch) {
 		spin_lock(&fbc->lock);
 		fbc->count += count;
-		*pcount = 0;
+		__this_cpu_write(*fbc->counters, 0);
 		spin_unlock(&fbc->lock);
 	} else {
-		*pcount = count;
+		__this_cpu_write(*fbc->counters, count);
 	}
 	preempt_enable();
 }
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 7c06ee5..c47bbe1 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,7 +60,7 @@
 static char *io_tlb_start, *io_tlb_end;
 
 /*
- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
+ * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
  * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
  */
 static unsigned long io_tlb_nslabs;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c150d3d..d3023df 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -936,6 +936,8 @@
 	return string(buf, end, uuid, spec);
 }
 
+int kptr_restrict = 1;
+
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
  * by an extra set of alphanumeric characters that are extended format
@@ -979,6 +981,7 @@
  *       Implements a "recursive vsnprintf".
  *       Do not use this feature without some mechanism to verify the
  *       correctness of the format string and va_list arguments.
+ * - 'K' For a kernel pointer that should be hidden from unprivileged users
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -1035,6 +1038,25 @@
 		return buf + vsnprintf(buf, end - buf,
 				       ((struct va_format *)ptr)->fmt,
 				       *(((struct va_format *)ptr)->va));
+	case 'K':
+		/*
+		 * %pK cannot be used in IRQ context because its test
+		 * for CAP_SYSLOG would be meaningless.
+		 */
+		if (in_irq() || in_serving_softirq() || in_nmi()) {
+			if (spec.field_width == -1)
+				spec.field_width = 2 * sizeof(void *);
+			return string(buf, end, "pK-error", spec);
+		} else if ((kptr_restrict == 0) ||
+			 (kptr_restrict == 1 &&
+			  has_capability_noaudit(current, CAP_SYSLOG)))
+			break;
+
+		if (spec.field_width == -1) {
+			spec.field_width = 2 * sizeof(void *);
+			spec.flags |= ZEROPAD;
+		}
+		return number(buf, end, 0, spec);
 	}
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
@@ -1451,7 +1473,7 @@
  * @args: Arguments for the format string
  *
  * The return value is the number of characters which have been written into
- * the @buf not including the trailing '\0'. If @size is <= 0 the function
+ * the @buf not including the trailing '\0'. If @size is == 0 the function
  * returns 0.
  *
  * Call this function if you are already dealing with a va_list.
@@ -1465,7 +1487,11 @@
 
 	i = vsnprintf(buf, size, fmt, args);
 
-	return (i >= size) ? (size - 1) : i;
+	if (likely(i < size))
+		return i;
+	if (size != 0)
+		return size - 1;
+	return 0;
 }
 EXPORT_SYMBOL(vscnprintf);
 
@@ -1513,14 +1539,10 @@
 	int i;
 
 	va_start(args, fmt);
-	i = vsnprintf(buf, size, fmt, args);
+	i = vscnprintf(buf, size, fmt, args);
 	va_end(args);
 
-	if (likely(i < size))
-		return i;
-	if (size != 0)
-		return size - 1;
-	return 0;
+	return i;
 }
 EXPORT_SYMBOL(scnprintf);
 
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
new file mode 100644
index 0000000..e3b6e18
--- /dev/null
+++ b/lib/xz/Kconfig
@@ -0,0 +1,59 @@
+config XZ_DEC
+	tristate "XZ decompression support"
+	select CRC32
+	help
+	  LZMA2 compression algorithm and BCJ filters are supported using
+	  the .xz file format as the container. For integrity checking,
+	  CRC32 is supported. See Documentation/xz.txt for more information.
+
+config XZ_DEC_X86
+	bool "x86 BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_POWERPC
+	bool "PowerPC BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_IA64
+	bool "IA-64 BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_ARM
+	bool "ARM BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_ARMTHUMB
+	bool "ARM-Thumb BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_SPARC
+	bool "SPARC BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_BCJ
+	bool
+	default n
+
+config XZ_DEC_TEST
+	tristate "XZ decompressor tester"
+	default n
+	depends on XZ_DEC
+	help
+	  This allows passing .xz files to the in-kernel XZ decoder via
+	  a character special file. It calculates CRC32 of the decompressed
+	  data and writes diagnostics to the system log.
+
+	  Unless you are developing the XZ decoder, you don't need this
+	  and should say N.
diff --git a/lib/xz/Makefile b/lib/xz/Makefile
new file mode 100644
index 0000000..a7fa769
--- /dev/null
+++ b/lib/xz/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XZ_DEC) += xz_dec.o
+xz_dec-y := xz_dec_syms.o xz_dec_stream.o xz_dec_lzma2.o
+xz_dec-$(CONFIG_XZ_DEC_BCJ) += xz_dec_bcj.o
+
+obj-$(CONFIG_XZ_DEC_TEST) += xz_dec_test.o
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
new file mode 100644
index 0000000..34532d1
--- /dev/null
+++ b/lib/xz/xz_crc32.c
@@ -0,0 +1,59 @@
+/*
+ * CRC32 using the polynomial from IEEE-802.3
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * This is not the fastest implementation, but it is pretty compact.
+ * The fastest versions of xz_crc32() on modern CPUs without hardware
+ * accelerated CRC instruction are 3-5 times as fast as this version,
+ * but they are bigger and use more memory for the lookup table.
+ */
+
+#include "xz_private.h"
+
+/*
+ * STATIC_RW_DATA is used in the pre-boot environment on some architectures.
+ * See <linux/decompress/mm.h> for details.
+ */
+#ifndef STATIC_RW_DATA
+#	define STATIC_RW_DATA static
+#endif
+
+STATIC_RW_DATA uint32_t xz_crc32_table[256];
+
+XZ_EXTERN void xz_crc32_init(void)
+{
+	const uint32_t poly = 0xEDB88320;
+
+	uint32_t i;
+	uint32_t j;
+	uint32_t r;
+
+	for (i = 0; i < 256; ++i) {
+		r = i;
+		for (j = 0; j < 8; ++j)
+			r = (r >> 1) ^ (poly & ~((r & 1) - 1));
+
+		xz_crc32_table[i] = r;
+	}
+
+	return;
+}
+
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+{
+	crc = ~crc;
+
+	while (size != 0) {
+		crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
+		--size;
+	}
+
+	return ~crc;
+}
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
new file mode 100644
index 0000000..e51e255
--- /dev/null
+++ b/lib/xz/xz_dec_bcj.c
@@ -0,0 +1,561 @@
+/*
+ * Branch/Call/Jump (BCJ) filter decoders
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+
+/*
+ * The rest of the file is inside this ifdef. It makes things a little more
+ * convenient when building without support for any BCJ filters.
+ */
+#ifdef XZ_DEC_BCJ
+
+struct xz_dec_bcj {
+	/* Type of the BCJ filter being used */
+	enum {
+		BCJ_X86 = 4,        /* x86 or x86-64 */
+		BCJ_POWERPC = 5,    /* Big endian only */
+		BCJ_IA64 = 6,       /* Big or little endian */
+		BCJ_ARM = 7,        /* Little endian only */
+		BCJ_ARMTHUMB = 8,   /* Little endian only */
+		BCJ_SPARC = 9       /* Big or little endian */
+	} type;
+
+	/*
+	 * Return value of the next filter in the chain. We need to preserve
+	 * this information across calls, because we must not call the next
+	 * filter anymore once it has returned XZ_STREAM_END.
+	 */
+	enum xz_ret ret;
+
+	/* True if we are operating in single-call mode. */
+	bool single_call;
+
+	/*
+	 * Absolute position relative to the beginning of the uncompressed
+	 * data (in a single .xz Block). We care only about the lowest 32
+	 * bits so this doesn't need to be uint64_t even with big files.
+	 */
+	uint32_t pos;
+
+	/* x86 filter state */
+	uint32_t x86_prev_mask;
+
+	/* Temporary space to hold the variables from struct xz_buf */
+	uint8_t *out;
+	size_t out_pos;
+	size_t out_size;
+
+	struct {
+		/* Amount of already filtered data in the beginning of buf */
+		size_t filtered;
+
+		/* Total amount of data currently stored in buf  */
+		size_t size;
+
+		/*
+		 * Buffer to hold a mix of filtered and unfiltered data. This
+		 * needs to be big enough to hold Alignment + 2 * Look-ahead:
+		 *
+		 * Type         Alignment   Look-ahead
+		 * x86              1           4
+		 * PowerPC          4           0
+		 * IA-64           16           0
+		 * ARM              4           0
+		 * ARM-Thumb        2           2
+		 * SPARC            4           0
+		 */
+		uint8_t buf[16];
+	} temp;
+};
+
+#ifdef XZ_DEC_X86
+/*
+ * This is used to test the most significant byte of a memory address
+ * in an x86 instruction.
+ */
+static inline int bcj_x86_test_msbyte(uint8_t b)
+{
+	return b == 0x00 || b == 0xFF;
+}
+
+static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	static const bool mask_to_allowed_status[8]
+		= { true, true, true, false, true, false, false, false };
+
+	static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 };
+
+	size_t i;
+	size_t prev_pos = (size_t)-1;
+	uint32_t prev_mask = s->x86_prev_mask;
+	uint32_t src;
+	uint32_t dest;
+	uint32_t j;
+	uint8_t b;
+
+	if (size <= 4)
+		return 0;
+
+	size -= 4;
+	for (i = 0; i < size; ++i) {
+		if ((buf[i] & 0xFE) != 0xE8)
+			continue;
+
+		prev_pos = i - prev_pos;
+		if (prev_pos > 3) {
+			prev_mask = 0;
+		} else {
+			prev_mask = (prev_mask << (prev_pos - 1)) & 7;
+			if (prev_mask != 0) {
+				b = buf[i + 4 - mask_to_bit_num[prev_mask]];
+				if (!mask_to_allowed_status[prev_mask]
+						|| bcj_x86_test_msbyte(b)) {
+					prev_pos = i;
+					prev_mask = (prev_mask << 1) | 1;
+					continue;
+				}
+			}
+		}
+
+		prev_pos = i;
+
+		if (bcj_x86_test_msbyte(buf[i + 4])) {
+			src = get_unaligned_le32(buf + i + 1);
+			while (true) {
+				dest = src - (s->pos + (uint32_t)i + 5);
+				if (prev_mask == 0)
+					break;
+
+				j = mask_to_bit_num[prev_mask] * 8;
+				b = (uint8_t)(dest >> (24 - j));
+				if (!bcj_x86_test_msbyte(b))
+					break;
+
+				src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
+			}
+
+			dest &= 0x01FFFFFF;
+			dest |= (uint32_t)0 - (dest & 0x01000000);
+			put_unaligned_le32(dest, buf + i + 1);
+			i += 4;
+		} else {
+			prev_mask = (prev_mask << 1) | 1;
+		}
+	}
+
+	prev_pos = i - prev_pos;
+	s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_POWERPC
+static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t instr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		instr = get_unaligned_be32(buf + i);
+		if ((instr & 0xFC000003) == 0x48000001) {
+			instr &= 0x03FFFFFC;
+			instr -= s->pos + (uint32_t)i;
+			instr &= 0x03FFFFFC;
+			instr |= 0x48000001;
+			put_unaligned_be32(instr, buf + i);
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_IA64
+static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	static const uint8_t branch_table[32] = {
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		4, 4, 6, 6, 0, 0, 7, 7,
+		4, 4, 0, 0, 4, 4, 0, 0
+	};
+
+	/*
+	 * The local variables take a little bit stack space, but it's less
+	 * than what LZMA2 decoder takes, so it doesn't make sense to reduce
+	 * stack usage here without doing that for the LZMA2 decoder too.
+	 */
+
+	/* Loop counters */
+	size_t i;
+	size_t j;
+
+	/* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
+	uint32_t slot;
+
+	/* Bitwise offset of the instruction indicated by slot */
+	uint32_t bit_pos;
+
+	/* bit_pos split into byte and bit parts */
+	uint32_t byte_pos;
+	uint32_t bit_res;
+
+	/* Address part of an instruction */
+	uint32_t addr;
+
+	/* Mask used to detect which instructions to convert */
+	uint32_t mask;
+
+	/* 41-bit instruction stored somewhere in the lowest 48 bits */
+	uint64_t instr;
+
+	/* Instruction normalized with bit_res for easier manipulation */
+	uint64_t norm;
+
+	for (i = 0; i + 16 <= size; i += 16) {
+		mask = branch_table[buf[i] & 0x1F];
+		for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
+			if (((mask >> slot) & 1) == 0)
+				continue;
+
+			byte_pos = bit_pos >> 3;
+			bit_res = bit_pos & 7;
+			instr = 0;
+			for (j = 0; j < 6; ++j)
+				instr |= (uint64_t)(buf[i + j + byte_pos])
+						<< (8 * j);
+
+			norm = instr >> bit_res;
+
+			if (((norm >> 37) & 0x0F) == 0x05
+					&& ((norm >> 9) & 0x07) == 0) {
+				addr = (norm >> 13) & 0x0FFFFF;
+				addr |= ((uint32_t)(norm >> 36) & 1) << 20;
+				addr <<= 4;
+				addr -= s->pos + (uint32_t)i;
+				addr >>= 4;
+
+				norm &= ~((uint64_t)0x8FFFFF << 13);
+				norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
+				norm |= (uint64_t)(addr & 0x100000)
+						<< (36 - 20);
+
+				instr &= (1 << bit_res) - 1;
+				instr |= norm << bit_res;
+
+				for (j = 0; j < 6; j++)
+					buf[i + j + byte_pos]
+						= (uint8_t)(instr >> (8 * j));
+			}
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARM
+static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t addr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		if (buf[i + 3] == 0xEB) {
+			addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
+					| ((uint32_t)buf[i + 2] << 16);
+			addr <<= 2;
+			addr -= s->pos + (uint32_t)i + 8;
+			addr >>= 2;
+			buf[i] = (uint8_t)addr;
+			buf[i + 1] = (uint8_t)(addr >> 8);
+			buf[i + 2] = (uint8_t)(addr >> 16);
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARMTHUMB
+static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t addr;
+
+	for (i = 0; i + 4 <= size; i += 2) {
+		if ((buf[i + 1] & 0xF8) == 0xF0
+				&& (buf[i + 3] & 0xF8) == 0xF8) {
+			addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
+					| ((uint32_t)buf[i] << 11)
+					| (((uint32_t)buf[i + 3] & 0x07) << 8)
+					| (uint32_t)buf[i + 2];
+			addr <<= 1;
+			addr -= s->pos + (uint32_t)i + 4;
+			addr >>= 1;
+			buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
+			buf[i] = (uint8_t)(addr >> 11);
+			buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
+			buf[i + 2] = (uint8_t)addr;
+			i += 2;
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_SPARC
+static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t instr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		instr = get_unaligned_be32(buf + i);
+		if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
+			instr <<= 2;
+			instr -= s->pos + (uint32_t)i;
+			instr >>= 2;
+			instr = ((uint32_t)0x40000000 - (instr & 0x400000))
+					| 0x40000000 | (instr & 0x3FFFFF);
+			put_unaligned_be32(instr, buf + i);
+		}
+	}
+
+	return i;
+}
+#endif
+
+/*
+ * Apply the selected BCJ filter. Update *pos and s->pos to match the amount
+ * of data that got filtered.
+ *
+ * NOTE: This is implemented as a switch statement to avoid using function
+ * pointers, which could be problematic in the kernel boot code, which must
+ * avoid pointers to static data (at least on x86).
+ */
+static void bcj_apply(struct xz_dec_bcj *s,
+		      uint8_t *buf, size_t *pos, size_t size)
+{
+	size_t filtered;
+
+	buf += *pos;
+	size -= *pos;
+
+	switch (s->type) {
+#ifdef XZ_DEC_X86
+	case BCJ_X86:
+		filtered = bcj_x86(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_POWERPC
+	case BCJ_POWERPC:
+		filtered = bcj_powerpc(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_IA64
+	case BCJ_IA64:
+		filtered = bcj_ia64(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_ARM
+	case BCJ_ARM:
+		filtered = bcj_arm(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+	case BCJ_ARMTHUMB:
+		filtered = bcj_armthumb(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_SPARC
+	case BCJ_SPARC:
+		filtered = bcj_sparc(s, buf, size);
+		break;
+#endif
+	default:
+		/* Never reached but silence compiler warnings. */
+		filtered = 0;
+		break;
+	}
+
+	*pos += filtered;
+	s->pos += filtered;
+}
+
+/*
+ * Flush pending filtered data from temp to the output buffer.
+ * Move the remaining mixture of possibly filtered and unfiltered
+ * data to the beginning of temp.
+ */
+static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
+{
+	size_t copy_size;
+
+	copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos);
+	memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
+	b->out_pos += copy_size;
+
+	s->temp.filtered -= copy_size;
+	s->temp.size -= copy_size;
+	memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
+}
+
+/*
+ * The BCJ filter functions are primitive in sense that they process the
+ * data in chunks of 1-16 bytes. To hide this issue, this function does
+ * some buffering.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+				     struct xz_dec_lzma2 *lzma2,
+				     struct xz_buf *b)
+{
+	size_t out_start;
+
+	/*
+	 * Flush pending already filtered data to the output buffer. Return
+	 * immediatelly if we couldn't flush everything, or if the next
+	 * filter in the chain had already returned XZ_STREAM_END.
+	 */
+	if (s->temp.filtered > 0) {
+		bcj_flush(s, b);
+		if (s->temp.filtered > 0)
+			return XZ_OK;
+
+		if (s->ret == XZ_STREAM_END)
+			return XZ_STREAM_END;
+	}
+
+	/*
+	 * If we have more output space than what is currently pending in
+	 * temp, copy the unfiltered data from temp to the output buffer
+	 * and try to fill the output buffer by decoding more data from the
+	 * next filter in the chain. Apply the BCJ filter on the new data
+	 * in the output buffer. If everything cannot be filtered, copy it
+	 * to temp and rewind the output buffer position accordingly.
+	 */
+	if (s->temp.size < b->out_size - b->out_pos) {
+		out_start = b->out_pos;
+		memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
+		b->out_pos += s->temp.size;
+
+		s->ret = xz_dec_lzma2_run(lzma2, b);
+		if (s->ret != XZ_STREAM_END
+				&& (s->ret != XZ_OK || s->single_call))
+			return s->ret;
+
+		bcj_apply(s, b->out, &out_start, b->out_pos);
+
+		/*
+		 * As an exception, if the next filter returned XZ_STREAM_END,
+		 * we can do that too, since the last few bytes that remain
+		 * unfiltered are meant to remain unfiltered.
+		 */
+		if (s->ret == XZ_STREAM_END)
+			return XZ_STREAM_END;
+
+		s->temp.size = b->out_pos - out_start;
+		b->out_pos -= s->temp.size;
+		memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+	}
+
+	/*
+	 * If we have unfiltered data in temp, try to fill by decoding more
+	 * data from the next filter. Apply the BCJ filter on temp. Then we
+	 * hopefully can fill the actual output buffer by copying filtered
+	 * data from temp. A mix of filtered and unfiltered data may be left
+	 * in temp; it will be taken care on the next call to this function.
+	 */
+	if (s->temp.size > 0) {
+		/* Make b->out{,_pos,_size} temporarily point to s->temp. */
+		s->out = b->out;
+		s->out_pos = b->out_pos;
+		s->out_size = b->out_size;
+		b->out = s->temp.buf;
+		b->out_pos = s->temp.size;
+		b->out_size = sizeof(s->temp.buf);
+
+		s->ret = xz_dec_lzma2_run(lzma2, b);
+
+		s->temp.size = b->out_pos;
+		b->out = s->out;
+		b->out_pos = s->out_pos;
+		b->out_size = s->out_size;
+
+		if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
+			return s->ret;
+
+		bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
+
+		/*
+		 * If the next filter returned XZ_STREAM_END, we mark that
+		 * everything is filtered, since the last unfiltered bytes
+		 * of the stream are meant to be left as is.
+		 */
+		if (s->ret == XZ_STREAM_END)
+			s->temp.filtered = s->temp.size;
+
+		bcj_flush(s, b);
+		if (s->temp.filtered > 0)
+			return XZ_OK;
+	}
+
+	return s->ret;
+}
+
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
+{
+	struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s != NULL)
+		s->single_call = single_call;
+
+	return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
+{
+	switch (id) {
+#ifdef XZ_DEC_X86
+	case BCJ_X86:
+#endif
+#ifdef XZ_DEC_POWERPC
+	case BCJ_POWERPC:
+#endif
+#ifdef XZ_DEC_IA64
+	case BCJ_IA64:
+#endif
+#ifdef XZ_DEC_ARM
+	case BCJ_ARM:
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+	case BCJ_ARMTHUMB:
+#endif
+#ifdef XZ_DEC_SPARC
+	case BCJ_SPARC:
+#endif
+		break;
+
+	default:
+		/* Unsupported Filter ID */
+		return XZ_OPTIONS_ERROR;
+	}
+
+	s->type = id;
+	s->ret = XZ_OK;
+	s->pos = 0;
+	s->x86_prev_mask = 0;
+	s->temp.filtered = 0;
+	s->temp.size = 0;
+
+	return XZ_OK;
+}
+
+#endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
new file mode 100644
index 0000000..ea5fa4f
--- /dev/null
+++ b/lib/xz/xz_dec_lzma2.c
@@ -0,0 +1,1171 @@
+/*
+ * LZMA2 decoder
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_lzma2.h"
+
+/*
+ * Range decoder initialization eats the first five bytes of each LZMA chunk.
+ */
+#define RC_INIT_BYTES 5
+
+/*
+ * Minimum number of usable input buffer to safely decode one LZMA symbol.
+ * The worst case is that we decode 22 bits using probabilities and 26
+ * direct bits. This may decode at maximum of 20 bytes of input. However,
+ * lzma_main() does an extra normalization before returning, thus we
+ * need to put 21 here.
+ */
+#define LZMA_IN_REQUIRED 21
+
+/*
+ * Dictionary (history buffer)
+ *
+ * These are always true:
+ *    start <= pos <= full <= end
+ *    pos <= limit <= end
+ *
+ * In multi-call mode, also these are true:
+ *    end == size
+ *    size <= size_max
+ *    allocated <= size
+ *
+ * Most of these variables are size_t to support single-call mode,
+ * in which the dictionary variables address the actual output
+ * buffer directly.
+ */
+struct dictionary {
+	/* Beginning of the history buffer */
+	uint8_t *buf;
+
+	/* Old position in buf (before decoding more data) */
+	size_t start;
+
+	/* Position in buf */
+	size_t pos;
+
+	/*
+	 * How full dictionary is. This is used to detect corrupt input that
+	 * would read beyond the beginning of the uncompressed stream.
+	 */
+	size_t full;
+
+	/* Write limit; we don't write to buf[limit] or later bytes. */
+	size_t limit;
+
+	/*
+	 * End of the dictionary buffer. In multi-call mode, this is
+	 * the same as the dictionary size. In single-call mode, this
+	 * indicates the size of the output buffer.
+	 */
+	size_t end;
+
+	/*
+	 * Size of the dictionary as specified in Block Header. This is used
+	 * together with "full" to detect corrupt input that would make us
+	 * read beyond the beginning of the uncompressed stream.
+	 */
+	uint32_t size;
+
+	/*
+	 * Maximum allowed dictionary size in multi-call mode.
+	 * This is ignored in single-call mode.
+	 */
+	uint32_t size_max;
+
+	/*
+	 * Amount of memory currently allocated for the dictionary.
+	 * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
+	 * size_max is always the same as the allocated size.)
+	 */
+	uint32_t allocated;
+
+	/* Operation mode */
+	enum xz_mode mode;
+};
+
+/* Range decoder */
+struct rc_dec {
+	uint32_t range;
+	uint32_t code;
+
+	/*
+	 * Number of initializing bytes remaining to be read
+	 * by rc_read_init().
+	 */
+	uint32_t init_bytes_left;
+
+	/*
+	 * Buffer from which we read our input. It can be either
+	 * temp.buf or the caller-provided input buffer.
+	 */
+	const uint8_t *in;
+	size_t in_pos;
+	size_t in_limit;
+};
+
+/* Probabilities for a length decoder. */
+struct lzma_len_dec {
+	/* Probability of match length being at least 10 */
+	uint16_t choice;
+
+	/* Probability of match length being at least 18 */
+	uint16_t choice2;
+
+	/* Probabilities for match lengths 2-9 */
+	uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
+
+	/* Probabilities for match lengths 10-17 */
+	uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
+
+	/* Probabilities for match lengths 18-273 */
+	uint16_t high[LEN_HIGH_SYMBOLS];
+};
+
+struct lzma_dec {
+	/* Distances of latest four matches */
+	uint32_t rep0;
+	uint32_t rep1;
+	uint32_t rep2;
+	uint32_t rep3;
+
+	/* Types of the most recently seen LZMA symbols */
+	enum lzma_state state;
+
+	/*
+	 * Length of a match. This is updated so that dict_repeat can
+	 * be called again to finish repeating the whole match.
+	 */
+	uint32_t len;
+
+	/*
+	 * LZMA properties or related bit masks (number of literal
+	 * context bits, a mask dervied from the number of literal
+	 * position bits, and a mask dervied from the number
+	 * position bits)
+	 */
+	uint32_t lc;
+	uint32_t literal_pos_mask; /* (1 << lp) - 1 */
+	uint32_t pos_mask;         /* (1 << pb) - 1 */
+
+	/* If 1, it's a match. Otherwise it's a single 8-bit literal. */
+	uint16_t is_match[STATES][POS_STATES_MAX];
+
+	/* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
+	uint16_t is_rep[STATES];
+
+	/*
+	 * If 0, distance of a repeated match is rep0.
+	 * Otherwise check is_rep1.
+	 */
+	uint16_t is_rep0[STATES];
+
+	/*
+	 * If 0, distance of a repeated match is rep1.
+	 * Otherwise check is_rep2.
+	 */
+	uint16_t is_rep1[STATES];
+
+	/* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
+	uint16_t is_rep2[STATES];
+
+	/*
+	 * If 1, the repeated match has length of one byte. Otherwise
+	 * the length is decoded from rep_len_decoder.
+	 */
+	uint16_t is_rep0_long[STATES][POS_STATES_MAX];
+
+	/*
+	 * Probability tree for the highest two bits of the match
+	 * distance. There is a separate probability tree for match
+	 * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
+	 */
+	uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
+
+	/*
+	 * Probility trees for additional bits for match distance
+	 * when the distance is in the range [4, 127].
+	 */
+	uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
+
+	/*
+	 * Probability tree for the lowest four bits of a match
+	 * distance that is equal to or greater than 128.
+	 */
+	uint16_t dist_align[ALIGN_SIZE];
+
+	/* Length of a normal match */
+	struct lzma_len_dec match_len_dec;
+
+	/* Length of a repeated match */
+	struct lzma_len_dec rep_len_dec;
+
+	/* Probabilities of literals */
+	uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
+};
+
+struct lzma2_dec {
+	/* Position in xz_dec_lzma2_run(). */
+	enum lzma2_seq {
+		SEQ_CONTROL,
+		SEQ_UNCOMPRESSED_1,
+		SEQ_UNCOMPRESSED_2,
+		SEQ_COMPRESSED_0,
+		SEQ_COMPRESSED_1,
+		SEQ_PROPERTIES,
+		SEQ_LZMA_PREPARE,
+		SEQ_LZMA_RUN,
+		SEQ_COPY
+	} sequence;
+
+	/* Next position after decoding the compressed size of the chunk. */
+	enum lzma2_seq next_sequence;
+
+	/* Uncompressed size of LZMA chunk (2 MiB at maximum) */
+	uint32_t uncompressed;
+
+	/*
+	 * Compressed size of LZMA chunk or compressed/uncompressed
+	 * size of uncompressed chunk (64 KiB at maximum)
+	 */
+	uint32_t compressed;
+
+	/*
+	 * True if dictionary reset is needed. This is false before
+	 * the first chunk (LZMA or uncompressed).
+	 */
+	bool need_dict_reset;
+
+	/*
+	 * True if new LZMA properties are needed. This is false
+	 * before the first LZMA chunk.
+	 */
+	bool need_props;
+};
+
+struct xz_dec_lzma2 {
+	/*
+	 * The order below is important on x86 to reduce code size and
+	 * it shouldn't hurt on other platforms. Everything up to and
+	 * including lzma.pos_mask are in the first 128 bytes on x86-32,
+	 * which allows using smaller instructions to access those
+	 * variables. On x86-64, fewer variables fit into the first 128
+	 * bytes, but this is still the best order without sacrificing
+	 * the readability by splitting the structures.
+	 */
+	struct rc_dec rc;
+	struct dictionary dict;
+	struct lzma2_dec lzma2;
+	struct lzma_dec lzma;
+
+	/*
+	 * Temporary buffer which holds small number of input bytes between
+	 * decoder calls. See lzma2_lzma() for details.
+	 */
+	struct {
+		uint32_t size;
+		uint8_t buf[3 * LZMA_IN_REQUIRED];
+	} temp;
+};
+
+/**************
+ * Dictionary *
+ **************/
+
+/*
+ * Reset the dictionary state. When in single-call mode, set up the beginning
+ * of the dictionary to point to the actual output buffer.
+ */
+static void dict_reset(struct dictionary *dict, struct xz_buf *b)
+{
+	if (DEC_IS_SINGLE(dict->mode)) {
+		dict->buf = b->out + b->out_pos;
+		dict->end = b->out_size - b->out_pos;
+	}
+
+	dict->start = 0;
+	dict->pos = 0;
+	dict->limit = 0;
+	dict->full = 0;
+}
+
+/* Set dictionary write limit */
+static void dict_limit(struct dictionary *dict, size_t out_max)
+{
+	if (dict->end - dict->pos <= out_max)
+		dict->limit = dict->end;
+	else
+		dict->limit = dict->pos + out_max;
+}
+
+/* Return true if at least one byte can be written into the dictionary. */
+static inline bool dict_has_space(const struct dictionary *dict)
+{
+	return dict->pos < dict->limit;
+}
+
+/*
+ * Get a byte from the dictionary at the given distance. The distance is
+ * assumed to valid, or as a special case, zero when the dictionary is
+ * still empty. This special case is needed for single-call decoding to
+ * avoid writing a '\0' to the end of the destination buffer.
+ */
+static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
+{
+	size_t offset = dict->pos - dist - 1;
+
+	if (dist >= dict->pos)
+		offset += dict->end;
+
+	return dict->full > 0 ? dict->buf[offset] : 0;
+}
+
+/*
+ * Put one byte into the dictionary. It is assumed that there is space for it.
+ */
+static inline void dict_put(struct dictionary *dict, uint8_t byte)
+{
+	dict->buf[dict->pos++] = byte;
+
+	if (dict->full < dict->pos)
+		dict->full = dict->pos;
+}
+
+/*
+ * Repeat given number of bytes from the given distance. If the distance is
+ * invalid, false is returned. On success, true is returned and *len is
+ * updated to indicate how many bytes were left to be repeated.
+ */
+static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
+{
+	size_t back;
+	uint32_t left;
+
+	if (dist >= dict->full || dist >= dict->size)
+		return false;
+
+	left = min_t(size_t, dict->limit - dict->pos, *len);
+	*len -= left;
+
+	back = dict->pos - dist - 1;
+	if (dist >= dict->pos)
+		back += dict->end;
+
+	do {
+		dict->buf[dict->pos++] = dict->buf[back++];
+		if (back == dict->end)
+			back = 0;
+	} while (--left > 0);
+
+	if (dict->full < dict->pos)
+		dict->full = dict->pos;
+
+	return true;
+}
+
+/* Copy uncompressed data as is from input to dictionary and output buffers. */
+static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
+			      uint32_t *left)
+{
+	size_t copy_size;
+
+	while (*left > 0 && b->in_pos < b->in_size
+			&& b->out_pos < b->out_size) {
+		copy_size = min(b->in_size - b->in_pos,
+				b->out_size - b->out_pos);
+		if (copy_size > dict->end - dict->pos)
+			copy_size = dict->end - dict->pos;
+		if (copy_size > *left)
+			copy_size = *left;
+
+		*left -= copy_size;
+
+		memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
+		dict->pos += copy_size;
+
+		if (dict->full < dict->pos)
+			dict->full = dict->pos;
+
+		if (DEC_IS_MULTI(dict->mode)) {
+			if (dict->pos == dict->end)
+				dict->pos = 0;
+
+			memcpy(b->out + b->out_pos, b->in + b->in_pos,
+					copy_size);
+		}
+
+		dict->start = dict->pos;
+
+		b->out_pos += copy_size;
+		b->in_pos += copy_size;
+	}
+}
+
+/*
+ * Flush pending data from dictionary to b->out. It is assumed that there is
+ * enough space in b->out. This is guaranteed because caller uses dict_limit()
+ * before decoding data into the dictionary.
+ */
+static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
+{
+	size_t copy_size = dict->pos - dict->start;
+
+	if (DEC_IS_MULTI(dict->mode)) {
+		if (dict->pos == dict->end)
+			dict->pos = 0;
+
+		memcpy(b->out + b->out_pos, dict->buf + dict->start,
+				copy_size);
+	}
+
+	dict->start = dict->pos;
+	b->out_pos += copy_size;
+	return copy_size;
+}
+
+/*****************
+ * Range decoder *
+ *****************/
+
+/* Reset the range decoder. */
+static void rc_reset(struct rc_dec *rc)
+{
+	rc->range = (uint32_t)-1;
+	rc->code = 0;
+	rc->init_bytes_left = RC_INIT_BYTES;
+}
+
+/*
+ * Read the first five initial bytes into rc->code if they haven't been
+ * read already. (Yes, the first byte gets completely ignored.)
+ */
+static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
+{
+	while (rc->init_bytes_left > 0) {
+		if (b->in_pos == b->in_size)
+			return false;
+
+		rc->code = (rc->code << 8) + b->in[b->in_pos++];
+		--rc->init_bytes_left;
+	}
+
+	return true;
+}
+
+/* Return true if there may not be enough input for the next decoding loop. */
+static inline bool rc_limit_exceeded(const struct rc_dec *rc)
+{
+	return rc->in_pos > rc->in_limit;
+}
+
+/*
+ * Return true if it is possible (from point of view of range decoder) that
+ * we have reached the end of the LZMA chunk.
+ */
+static inline bool rc_is_finished(const struct rc_dec *rc)
+{
+	return rc->code == 0;
+}
+
+/* Read the next input byte if needed. */
+static __always_inline void rc_normalize(struct rc_dec *rc)
+{
+	if (rc->range < RC_TOP_VALUE) {
+		rc->range <<= RC_SHIFT_BITS;
+		rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
+	}
+}
+
+/*
+ * Decode one bit. In some versions, this function has been splitted in three
+ * functions so that the compiler is supposed to be able to more easily avoid
+ * an extra branch. In this particular version of the LZMA decoder, this
+ * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
+ * on x86). Using a non-splitted version results in nicer looking code too.
+ *
+ * NOTE: This must return an int. Do not make it return a bool or the speed
+ * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
+ * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
+ */
+static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
+{
+	uint32_t bound;
+	int bit;
+
+	rc_normalize(rc);
+	bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
+	if (rc->code < bound) {
+		rc->range = bound;
+		*prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
+		bit = 0;
+	} else {
+		rc->range -= bound;
+		rc->code -= bound;
+		*prob -= *prob >> RC_MOVE_BITS;
+		bit = 1;
+	}
+
+	return bit;
+}
+
+/* Decode a bittree starting from the most significant bit. */
+static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
+					   uint16_t *probs, uint32_t limit)
+{
+	uint32_t symbol = 1;
+
+	do {
+		if (rc_bit(rc, &probs[symbol]))
+			symbol = (symbol << 1) + 1;
+		else
+			symbol <<= 1;
+	} while (symbol < limit);
+
+	return symbol;
+}
+
+/* Decode a bittree starting from the least significant bit. */
+static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
+					       uint16_t *probs,
+					       uint32_t *dest, uint32_t limit)
+{
+	uint32_t symbol = 1;
+	uint32_t i = 0;
+
+	do {
+		if (rc_bit(rc, &probs[symbol])) {
+			symbol = (symbol << 1) + 1;
+			*dest += 1 << i;
+		} else {
+			symbol <<= 1;
+		}
+	} while (++i < limit);
+}
+
+/* Decode direct bits (fixed fifty-fifty probability) */
+static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
+{
+	uint32_t mask;
+
+	do {
+		rc_normalize(rc);
+		rc->range >>= 1;
+		rc->code -= rc->range;
+		mask = (uint32_t)0 - (rc->code >> 31);
+		rc->code += rc->range & mask;
+		*dest = (*dest << 1) + (mask + 1);
+	} while (--limit > 0);
+}
+
+/********
+ * LZMA *
+ ********/
+
+/* Get pointer to literal coder probability array. */
+static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
+{
+	uint32_t prev_byte = dict_get(&s->dict, 0);
+	uint32_t low = prev_byte >> (8 - s->lzma.lc);
+	uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
+	return s->lzma.literal[low + high];
+}
+
+/* Decode a literal (one 8-bit byte) */
+static void lzma_literal(struct xz_dec_lzma2 *s)
+{
+	uint16_t *probs;
+	uint32_t symbol;
+	uint32_t match_byte;
+	uint32_t match_bit;
+	uint32_t offset;
+	uint32_t i;
+
+	probs = lzma_literal_probs(s);
+
+	if (lzma_state_is_literal(s->lzma.state)) {
+		symbol = rc_bittree(&s->rc, probs, 0x100);
+	} else {
+		symbol = 1;
+		match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
+		offset = 0x100;
+
+		do {
+			match_bit = match_byte & offset;
+			match_byte <<= 1;
+			i = offset + match_bit + symbol;
+
+			if (rc_bit(&s->rc, &probs[i])) {
+				symbol = (symbol << 1) + 1;
+				offset &= match_bit;
+			} else {
+				symbol <<= 1;
+				offset &= ~match_bit;
+			}
+		} while (symbol < 0x100);
+	}
+
+	dict_put(&s->dict, (uint8_t)symbol);
+	lzma_state_literal(&s->lzma.state);
+}
+
+/* Decode the length of the match into s->lzma.len. */
+static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
+		     uint32_t pos_state)
+{
+	uint16_t *probs;
+	uint32_t limit;
+
+	if (!rc_bit(&s->rc, &l->choice)) {
+		probs = l->low[pos_state];
+		limit = LEN_LOW_SYMBOLS;
+		s->lzma.len = MATCH_LEN_MIN;
+	} else {
+		if (!rc_bit(&s->rc, &l->choice2)) {
+			probs = l->mid[pos_state];
+			limit = LEN_MID_SYMBOLS;
+			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
+		} else {
+			probs = l->high;
+			limit = LEN_HIGH_SYMBOLS;
+			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
+					+ LEN_MID_SYMBOLS;
+		}
+	}
+
+	s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
+}
+
+/* Decode a match. The distance will be stored in s->lzma.rep0. */
+static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+	uint16_t *probs;
+	uint32_t dist_slot;
+	uint32_t limit;
+
+	lzma_state_match(&s->lzma.state);
+
+	s->lzma.rep3 = s->lzma.rep2;
+	s->lzma.rep2 = s->lzma.rep1;
+	s->lzma.rep1 = s->lzma.rep0;
+
+	lzma_len(s, &s->lzma.match_len_dec, pos_state);
+
+	probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
+	dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
+
+	if (dist_slot < DIST_MODEL_START) {
+		s->lzma.rep0 = dist_slot;
+	} else {
+		limit = (dist_slot >> 1) - 1;
+		s->lzma.rep0 = 2 + (dist_slot & 1);
+
+		if (dist_slot < DIST_MODEL_END) {
+			s->lzma.rep0 <<= limit;
+			probs = s->lzma.dist_special + s->lzma.rep0
+					- dist_slot - 1;
+			rc_bittree_reverse(&s->rc, probs,
+					&s->lzma.rep0, limit);
+		} else {
+			rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
+			s->lzma.rep0 <<= ALIGN_BITS;
+			rc_bittree_reverse(&s->rc, s->lzma.dist_align,
+					&s->lzma.rep0, ALIGN_BITS);
+		}
+	}
+}
+
+/*
+ * Decode a repeated match. The distance is one of the four most recently
+ * seen matches. The distance will be stored in s->lzma.rep0.
+ */
+static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+	uint32_t tmp;
+
+	if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
+		if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
+				s->lzma.state][pos_state])) {
+			lzma_state_short_rep(&s->lzma.state);
+			s->lzma.len = 1;
+			return;
+		}
+	} else {
+		if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
+			tmp = s->lzma.rep1;
+		} else {
+			if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
+				tmp = s->lzma.rep2;
+			} else {
+				tmp = s->lzma.rep3;
+				s->lzma.rep3 = s->lzma.rep2;
+			}
+
+			s->lzma.rep2 = s->lzma.rep1;
+		}
+
+		s->lzma.rep1 = s->lzma.rep0;
+		s->lzma.rep0 = tmp;
+	}
+
+	lzma_state_long_rep(&s->lzma.state);
+	lzma_len(s, &s->lzma.rep_len_dec, pos_state);
+}
+
+/* LZMA decoder core */
+static bool lzma_main(struct xz_dec_lzma2 *s)
+{
+	uint32_t pos_state;
+
+	/*
+	 * If the dictionary was reached during the previous call, try to
+	 * finish the possibly pending repeat in the dictionary.
+	 */
+	if (dict_has_space(&s->dict) && s->lzma.len > 0)
+		dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
+
+	/*
+	 * Decode more LZMA symbols. One iteration may consume up to
+	 * LZMA_IN_REQUIRED - 1 bytes.
+	 */
+	while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
+		pos_state = s->dict.pos & s->lzma.pos_mask;
+
+		if (!rc_bit(&s->rc, &s->lzma.is_match[
+				s->lzma.state][pos_state])) {
+			lzma_literal(s);
+		} else {
+			if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
+				lzma_rep_match(s, pos_state);
+			else
+				lzma_match(s, pos_state);
+
+			if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
+				return false;
+		}
+	}
+
+	/*
+	 * Having the range decoder always normalized when we are outside
+	 * this function makes it easier to correctly handle end of the chunk.
+	 */
+	rc_normalize(&s->rc);
+
+	return true;
+}
+
+/*
+ * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
+ * here, because LZMA state may be reset without resetting the dictionary.
+ */
+static void lzma_reset(struct xz_dec_lzma2 *s)
+{
+	uint16_t *probs;
+	size_t i;
+
+	s->lzma.state = STATE_LIT_LIT;
+	s->lzma.rep0 = 0;
+	s->lzma.rep1 = 0;
+	s->lzma.rep2 = 0;
+	s->lzma.rep3 = 0;
+
+	/*
+	 * All probabilities are initialized to the same value. This hack
+	 * makes the code smaller by avoiding a separate loop for each
+	 * probability array.
+	 *
+	 * This could be optimized so that only that part of literal
+	 * probabilities that are actually required. In the common case
+	 * we would write 12 KiB less.
+	 */
+	probs = s->lzma.is_match[0];
+	for (i = 0; i < PROBS_TOTAL; ++i)
+		probs[i] = RC_BIT_MODEL_TOTAL / 2;
+
+	rc_reset(&s->rc);
+}
+
+/*
+ * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
+ * from the decoded lp and pb values. On success, the LZMA decoder state is
+ * reset and true is returned.
+ */
+static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
+{
+	if (props > (4 * 5 + 4) * 9 + 8)
+		return false;
+
+	s->lzma.pos_mask = 0;
+	while (props >= 9 * 5) {
+		props -= 9 * 5;
+		++s->lzma.pos_mask;
+	}
+
+	s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
+
+	s->lzma.literal_pos_mask = 0;
+	while (props >= 9) {
+		props -= 9;
+		++s->lzma.literal_pos_mask;
+	}
+
+	s->lzma.lc = props;
+
+	if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
+		return false;
+
+	s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
+
+	lzma_reset(s);
+
+	return true;
+}
+
+/*********
+ * LZMA2 *
+ *********/
+
+/*
+ * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
+ * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
+ * wrapper function takes care of making the LZMA decoder's assumption safe.
+ *
+ * As long as there is plenty of input left to be decoded in the current LZMA
+ * chunk, we decode directly from the caller-supplied input buffer until
+ * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
+ * s->temp.buf, which (hopefully) gets filled on the next call to this
+ * function. We decode a few bytes from the temporary buffer so that we can
+ * continue decoding from the caller-supplied input buffer again.
+ */
+static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
+{
+	size_t in_avail;
+	uint32_t tmp;
+
+	in_avail = b->in_size - b->in_pos;
+	if (s->temp.size > 0 || s->lzma2.compressed == 0) {
+		tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
+		if (tmp > s->lzma2.compressed - s->temp.size)
+			tmp = s->lzma2.compressed - s->temp.size;
+		if (tmp > in_avail)
+			tmp = in_avail;
+
+		memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
+
+		if (s->temp.size + tmp == s->lzma2.compressed) {
+			memzero(s->temp.buf + s->temp.size + tmp,
+					sizeof(s->temp.buf)
+						- s->temp.size - tmp);
+			s->rc.in_limit = s->temp.size + tmp;
+		} else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
+			s->temp.size += tmp;
+			b->in_pos += tmp;
+			return true;
+		} else {
+			s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
+		}
+
+		s->rc.in = s->temp.buf;
+		s->rc.in_pos = 0;
+
+		if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
+			return false;
+
+		s->lzma2.compressed -= s->rc.in_pos;
+
+		if (s->rc.in_pos < s->temp.size) {
+			s->temp.size -= s->rc.in_pos;
+			memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
+					s->temp.size);
+			return true;
+		}
+
+		b->in_pos += s->rc.in_pos - s->temp.size;
+		s->temp.size = 0;
+	}
+
+	in_avail = b->in_size - b->in_pos;
+	if (in_avail >= LZMA_IN_REQUIRED) {
+		s->rc.in = b->in;
+		s->rc.in_pos = b->in_pos;
+
+		if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
+			s->rc.in_limit = b->in_pos + s->lzma2.compressed;
+		else
+			s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
+
+		if (!lzma_main(s))
+			return false;
+
+		in_avail = s->rc.in_pos - b->in_pos;
+		if (in_avail > s->lzma2.compressed)
+			return false;
+
+		s->lzma2.compressed -= in_avail;
+		b->in_pos = s->rc.in_pos;
+	}
+
+	in_avail = b->in_size - b->in_pos;
+	if (in_avail < LZMA_IN_REQUIRED) {
+		if (in_avail > s->lzma2.compressed)
+			in_avail = s->lzma2.compressed;
+
+		memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
+		s->temp.size = in_avail;
+		b->in_pos += in_avail;
+	}
+
+	return true;
+}
+
+/*
+ * Take care of the LZMA2 control layer, and forward the job of actual LZMA
+ * decoding or copying of uncompressed chunks to other functions.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+				       struct xz_buf *b)
+{
+	uint32_t tmp;
+
+	while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
+		switch (s->lzma2.sequence) {
+		case SEQ_CONTROL:
+			/*
+			 * LZMA2 control byte
+			 *
+			 * Exact values:
+			 *   0x00   End marker
+			 *   0x01   Dictionary reset followed by
+			 *          an uncompressed chunk
+			 *   0x02   Uncompressed chunk (no dictionary reset)
+			 *
+			 * Highest three bits (s->control & 0xE0):
+			 *   0xE0   Dictionary reset, new properties and state
+			 *          reset, followed by LZMA compressed chunk
+			 *   0xC0   New properties and state reset, followed
+			 *          by LZMA compressed chunk (no dictionary
+			 *          reset)
+			 *   0xA0   State reset using old properties,
+			 *          followed by LZMA compressed chunk (no
+			 *          dictionary reset)
+			 *   0x80   LZMA chunk (no dictionary or state reset)
+			 *
+			 * For LZMA compressed chunks, the lowest five bits
+			 * (s->control & 1F) are the highest bits of the
+			 * uncompressed size (bits 16-20).
+			 *
+			 * A new LZMA2 stream must begin with a dictionary
+			 * reset. The first LZMA chunk must set new
+			 * properties and reset the LZMA state.
+			 *
+			 * Values that don't match anything described above
+			 * are invalid and we return XZ_DATA_ERROR.
+			 */
+			tmp = b->in[b->in_pos++];
+
+			if (tmp >= 0xE0 || tmp == 0x01) {
+				s->lzma2.need_props = true;
+				s->lzma2.need_dict_reset = false;
+				dict_reset(&s->dict, b);
+			} else if (s->lzma2.need_dict_reset) {
+				return XZ_DATA_ERROR;
+			}
+
+			if (tmp >= 0x80) {
+				s->lzma2.uncompressed = (tmp & 0x1F) << 16;
+				s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
+
+				if (tmp >= 0xC0) {
+					/*
+					 * When there are new properties,
+					 * state reset is done at
+					 * SEQ_PROPERTIES.
+					 */
+					s->lzma2.need_props = false;
+					s->lzma2.next_sequence
+							= SEQ_PROPERTIES;
+
+				} else if (s->lzma2.need_props) {
+					return XZ_DATA_ERROR;
+
+				} else {
+					s->lzma2.next_sequence
+							= SEQ_LZMA_PREPARE;
+					if (tmp >= 0xA0)
+						lzma_reset(s);
+				}
+			} else {
+				if (tmp == 0x00)
+					return XZ_STREAM_END;
+
+				if (tmp > 0x02)
+					return XZ_DATA_ERROR;
+
+				s->lzma2.sequence = SEQ_COMPRESSED_0;
+				s->lzma2.next_sequence = SEQ_COPY;
+			}
+
+			break;
+
+		case SEQ_UNCOMPRESSED_1:
+			s->lzma2.uncompressed
+					+= (uint32_t)b->in[b->in_pos++] << 8;
+			s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
+			break;
+
+		case SEQ_UNCOMPRESSED_2:
+			s->lzma2.uncompressed
+					+= (uint32_t)b->in[b->in_pos++] + 1;
+			s->lzma2.sequence = SEQ_COMPRESSED_0;
+			break;
+
+		case SEQ_COMPRESSED_0:
+			s->lzma2.compressed
+					= (uint32_t)b->in[b->in_pos++] << 8;
+			s->lzma2.sequence = SEQ_COMPRESSED_1;
+			break;
+
+		case SEQ_COMPRESSED_1:
+			s->lzma2.compressed
+					+= (uint32_t)b->in[b->in_pos++] + 1;
+			s->lzma2.sequence = s->lzma2.next_sequence;
+			break;
+
+		case SEQ_PROPERTIES:
+			if (!lzma_props(s, b->in[b->in_pos++]))
+				return XZ_DATA_ERROR;
+
+			s->lzma2.sequence = SEQ_LZMA_PREPARE;
+
+		case SEQ_LZMA_PREPARE:
+			if (s->lzma2.compressed < RC_INIT_BYTES)
+				return XZ_DATA_ERROR;
+
+			if (!rc_read_init(&s->rc, b))
+				return XZ_OK;
+
+			s->lzma2.compressed -= RC_INIT_BYTES;
+			s->lzma2.sequence = SEQ_LZMA_RUN;
+
+		case SEQ_LZMA_RUN:
+			/*
+			 * Set dictionary limit to indicate how much we want
+			 * to be encoded at maximum. Decode new data into the
+			 * dictionary. Flush the new data from dictionary to
+			 * b->out. Check if we finished decoding this chunk.
+			 * In case the dictionary got full but we didn't fill
+			 * the output buffer yet, we may run this loop
+			 * multiple times without changing s->lzma2.sequence.
+			 */
+			dict_limit(&s->dict, min_t(size_t,
+					b->out_size - b->out_pos,
+					s->lzma2.uncompressed));
+			if (!lzma2_lzma(s, b))
+				return XZ_DATA_ERROR;
+
+			s->lzma2.uncompressed -= dict_flush(&s->dict, b);
+
+			if (s->lzma2.uncompressed == 0) {
+				if (s->lzma2.compressed > 0 || s->lzma.len > 0
+						|| !rc_is_finished(&s->rc))
+					return XZ_DATA_ERROR;
+
+				rc_reset(&s->rc);
+				s->lzma2.sequence = SEQ_CONTROL;
+
+			} else if (b->out_pos == b->out_size
+					|| (b->in_pos == b->in_size
+						&& s->temp.size
+						< s->lzma2.compressed)) {
+				return XZ_OK;
+			}
+
+			break;
+
+		case SEQ_COPY:
+			dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
+			if (s->lzma2.compressed > 0)
+				return XZ_OK;
+
+			s->lzma2.sequence = SEQ_CONTROL;
+			break;
+		}
+	}
+
+	return XZ_OK;
+}
+
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+						   uint32_t dict_max)
+{
+	struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s == NULL)
+		return NULL;
+
+	s->dict.mode = mode;
+	s->dict.size_max = dict_max;
+
+	if (DEC_IS_PREALLOC(mode)) {
+		s->dict.buf = vmalloc(dict_max);
+		if (s->dict.buf == NULL) {
+			kfree(s);
+			return NULL;
+		}
+	} else if (DEC_IS_DYNALLOC(mode)) {
+		s->dict.buf = NULL;
+		s->dict.allocated = 0;
+	}
+
+	return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
+{
+	/* This limits dictionary size to 3 GiB to keep parsing simpler. */
+	if (props > 39)
+		return XZ_OPTIONS_ERROR;
+
+	s->dict.size = 2 + (props & 1);
+	s->dict.size <<= (props >> 1) + 11;
+
+	if (DEC_IS_MULTI(s->dict.mode)) {
+		if (s->dict.size > s->dict.size_max)
+			return XZ_MEMLIMIT_ERROR;
+
+		s->dict.end = s->dict.size;
+
+		if (DEC_IS_DYNALLOC(s->dict.mode)) {
+			if (s->dict.allocated < s->dict.size) {
+				vfree(s->dict.buf);
+				s->dict.buf = vmalloc(s->dict.size);
+				if (s->dict.buf == NULL) {
+					s->dict.allocated = 0;
+					return XZ_MEM_ERROR;
+				}
+			}
+		}
+	}
+
+	s->lzma.len = 0;
+
+	s->lzma2.sequence = SEQ_CONTROL;
+	s->lzma2.need_dict_reset = true;
+
+	s->temp.size = 0;
+
+	return XZ_OK;
+}
+
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
+{
+	if (DEC_IS_MULTI(s->dict.mode))
+		vfree(s->dict.buf);
+
+	kfree(s);
+}
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
new file mode 100644
index 0000000..ac809b1
--- /dev/null
+++ b/lib/xz/xz_dec_stream.c
@@ -0,0 +1,821 @@
+/*
+ * .xz Stream decoder
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_stream.h"
+
+/* Hash used to validate the Index field */
+struct xz_dec_hash {
+	vli_type unpadded;
+	vli_type uncompressed;
+	uint32_t crc32;
+};
+
+struct xz_dec {
+	/* Position in dec_main() */
+	enum {
+		SEQ_STREAM_HEADER,
+		SEQ_BLOCK_START,
+		SEQ_BLOCK_HEADER,
+		SEQ_BLOCK_UNCOMPRESS,
+		SEQ_BLOCK_PADDING,
+		SEQ_BLOCK_CHECK,
+		SEQ_INDEX,
+		SEQ_INDEX_PADDING,
+		SEQ_INDEX_CRC32,
+		SEQ_STREAM_FOOTER
+	} sequence;
+
+	/* Position in variable-length integers and Check fields */
+	uint32_t pos;
+
+	/* Variable-length integer decoded by dec_vli() */
+	vli_type vli;
+
+	/* Saved in_pos and out_pos */
+	size_t in_start;
+	size_t out_start;
+
+	/* CRC32 value in Block or Index */
+	uint32_t crc32;
+
+	/* Type of the integrity check calculated from uncompressed data */
+	enum xz_check check_type;
+
+	/* Operation mode */
+	enum xz_mode mode;
+
+	/*
+	 * True if the next call to xz_dec_run() is allowed to return
+	 * XZ_BUF_ERROR.
+	 */
+	bool allow_buf_error;
+
+	/* Information stored in Block Header */
+	struct {
+		/*
+		 * Value stored in the Compressed Size field, or
+		 * VLI_UNKNOWN if Compressed Size is not present.
+		 */
+		vli_type compressed;
+
+		/*
+		 * Value stored in the Uncompressed Size field, or
+		 * VLI_UNKNOWN if Uncompressed Size is not present.
+		 */
+		vli_type uncompressed;
+
+		/* Size of the Block Header field */
+		uint32_t size;
+	} block_header;
+
+	/* Information collected when decoding Blocks */
+	struct {
+		/* Observed compressed size of the current Block */
+		vli_type compressed;
+
+		/* Observed uncompressed size of the current Block */
+		vli_type uncompressed;
+
+		/* Number of Blocks decoded so far */
+		vli_type count;
+
+		/*
+		 * Hash calculated from the Block sizes. This is used to
+		 * validate the Index field.
+		 */
+		struct xz_dec_hash hash;
+	} block;
+
+	/* Variables needed when verifying the Index field */
+	struct {
+		/* Position in dec_index() */
+		enum {
+			SEQ_INDEX_COUNT,
+			SEQ_INDEX_UNPADDED,
+			SEQ_INDEX_UNCOMPRESSED
+		} sequence;
+
+		/* Size of the Index in bytes */
+		vli_type size;
+
+		/* Number of Records (matches block.count in valid files) */
+		vli_type count;
+
+		/*
+		 * Hash calculated from the Records (matches block.hash in
+		 * valid files).
+		 */
+		struct xz_dec_hash hash;
+	} index;
+
+	/*
+	 * Temporary buffer needed to hold Stream Header, Block Header,
+	 * and Stream Footer. The Block Header is the biggest (1 KiB)
+	 * so we reserve space according to that. buf[] has to be aligned
+	 * to a multiple of four bytes; the size_t variables before it
+	 * should guarantee this.
+	 */
+	struct {
+		size_t pos;
+		size_t size;
+		uint8_t buf[1024];
+	} temp;
+
+	struct xz_dec_lzma2 *lzma2;
+
+#ifdef XZ_DEC_BCJ
+	struct xz_dec_bcj *bcj;
+	bool bcj_active;
+#endif
+};
+
+#ifdef XZ_DEC_ANY_CHECK
+/* Sizes of the Check field with different Check IDs */
+static const uint8_t check_sizes[16] = {
+	0,
+	4, 4, 4,
+	8, 8, 8,
+	16, 16, 16,
+	32, 32, 32,
+	64, 64, 64
+};
+#endif
+
+/*
+ * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller
+ * must have set s->temp.pos to indicate how much data we are supposed
+ * to copy into s->temp.buf. Return true once s->temp.pos has reached
+ * s->temp.size.
+ */
+static bool fill_temp(struct xz_dec *s, struct xz_buf *b)
+{
+	size_t copy_size = min_t(size_t,
+			b->in_size - b->in_pos, s->temp.size - s->temp.pos);
+
+	memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size);
+	b->in_pos += copy_size;
+	s->temp.pos += copy_size;
+
+	if (s->temp.pos == s->temp.size) {
+		s->temp.pos = 0;
+		return true;
+	}
+
+	return false;
+}
+
+/* Decode a variable-length integer (little-endian base-128 encoding) */
+static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in,
+			   size_t *in_pos, size_t in_size)
+{
+	uint8_t byte;
+
+	if (s->pos == 0)
+		s->vli = 0;
+
+	while (*in_pos < in_size) {
+		byte = in[*in_pos];
+		++*in_pos;
+
+		s->vli |= (vli_type)(byte & 0x7F) << s->pos;
+
+		if ((byte & 0x80) == 0) {
+			/* Don't allow non-minimal encodings. */
+			if (byte == 0 && s->pos != 0)
+				return XZ_DATA_ERROR;
+
+			s->pos = 0;
+			return XZ_STREAM_END;
+		}
+
+		s->pos += 7;
+		if (s->pos == 7 * VLI_BYTES_MAX)
+			return XZ_DATA_ERROR;
+	}
+
+	return XZ_OK;
+}
+
+/*
+ * Decode the Compressed Data field from a Block. Update and validate
+ * the observed compressed and uncompressed sizes of the Block so that
+ * they don't exceed the values possibly stored in the Block Header
+ * (validation assumes that no integer overflow occurs, since vli_type
+ * is normally uint64_t). Update the CRC32 if presence of the CRC32
+ * field was indicated in Stream Header.
+ *
+ * Once the decoding is finished, validate that the observed sizes match
+ * the sizes possibly stored in the Block Header. Update the hash and
+ * Block count, which are later used to validate the Index field.
+ */
+static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	s->in_start = b->in_pos;
+	s->out_start = b->out_pos;
+
+#ifdef XZ_DEC_BCJ
+	if (s->bcj_active)
+		ret = xz_dec_bcj_run(s->bcj, s->lzma2, b);
+	else
+#endif
+		ret = xz_dec_lzma2_run(s->lzma2, b);
+
+	s->block.compressed += b->in_pos - s->in_start;
+	s->block.uncompressed += b->out_pos - s->out_start;
+
+	/*
+	 * There is no need to separately check for VLI_UNKNOWN, since
+	 * the observed sizes are always smaller than VLI_UNKNOWN.
+	 */
+	if (s->block.compressed > s->block_header.compressed
+			|| s->block.uncompressed
+				> s->block_header.uncompressed)
+		return XZ_DATA_ERROR;
+
+	if (s->check_type == XZ_CHECK_CRC32)
+		s->crc32 = xz_crc32(b->out + s->out_start,
+				b->out_pos - s->out_start, s->crc32);
+
+	if (ret == XZ_STREAM_END) {
+		if (s->block_header.compressed != VLI_UNKNOWN
+				&& s->block_header.compressed
+					!= s->block.compressed)
+			return XZ_DATA_ERROR;
+
+		if (s->block_header.uncompressed != VLI_UNKNOWN
+				&& s->block_header.uncompressed
+					!= s->block.uncompressed)
+			return XZ_DATA_ERROR;
+
+		s->block.hash.unpadded += s->block_header.size
+				+ s->block.compressed;
+
+#ifdef XZ_DEC_ANY_CHECK
+		s->block.hash.unpadded += check_sizes[s->check_type];
+#else
+		if (s->check_type == XZ_CHECK_CRC32)
+			s->block.hash.unpadded += 4;
+#endif
+
+		s->block.hash.uncompressed += s->block.uncompressed;
+		s->block.hash.crc32 = xz_crc32(
+				(const uint8_t *)&s->block.hash,
+				sizeof(s->block.hash), s->block.hash.crc32);
+
+		++s->block.count;
+	}
+
+	return ret;
+}
+
+/* Update the Index size and the CRC32 value. */
+static void index_update(struct xz_dec *s, const struct xz_buf *b)
+{
+	size_t in_used = b->in_pos - s->in_start;
+	s->index.size += in_used;
+	s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32);
+}
+
+/*
+ * Decode the Number of Records, Unpadded Size, and Uncompressed Size
+ * fields from the Index field. That is, Index Padding and CRC32 are not
+ * decoded by this function.
+ *
+ * This can return XZ_OK (more input needed), XZ_STREAM_END (everything
+ * successfully decoded), or XZ_DATA_ERROR (input is corrupt).
+ */
+static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	do {
+		ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
+		if (ret != XZ_STREAM_END) {
+			index_update(s, b);
+			return ret;
+		}
+
+		switch (s->index.sequence) {
+		case SEQ_INDEX_COUNT:
+			s->index.count = s->vli;
+
+			/*
+			 * Validate that the Number of Records field
+			 * indicates the same number of Records as
+			 * there were Blocks in the Stream.
+			 */
+			if (s->index.count != s->block.count)
+				return XZ_DATA_ERROR;
+
+			s->index.sequence = SEQ_INDEX_UNPADDED;
+			break;
+
+		case SEQ_INDEX_UNPADDED:
+			s->index.hash.unpadded += s->vli;
+			s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
+			break;
+
+		case SEQ_INDEX_UNCOMPRESSED:
+			s->index.hash.uncompressed += s->vli;
+			s->index.hash.crc32 = xz_crc32(
+					(const uint8_t *)&s->index.hash,
+					sizeof(s->index.hash),
+					s->index.hash.crc32);
+			--s->index.count;
+			s->index.sequence = SEQ_INDEX_UNPADDED;
+			break;
+		}
+	} while (s->index.count > 0);
+
+	return XZ_STREAM_END;
+}
+
+/*
+ * Validate that the next four input bytes match the value of s->crc32.
+ * s->pos must be zero when starting to validate the first byte.
+ */
+static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b)
+{
+	do {
+		if (b->in_pos == b->in_size)
+			return XZ_OK;
+
+		if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++])
+			return XZ_DATA_ERROR;
+
+		s->pos += 8;
+
+	} while (s->pos < 32);
+
+	s->crc32 = 0;
+	s->pos = 0;
+
+	return XZ_STREAM_END;
+}
+
+#ifdef XZ_DEC_ANY_CHECK
+/*
+ * Skip over the Check field when the Check ID is not supported.
+ * Returns true once the whole Check field has been skipped over.
+ */
+static bool check_skip(struct xz_dec *s, struct xz_buf *b)
+{
+	while (s->pos < check_sizes[s->check_type]) {
+		if (b->in_pos == b->in_size)
+			return false;
+
+		++b->in_pos;
+		++s->pos;
+	}
+
+	s->pos = 0;
+
+	return true;
+}
+#endif
+
+/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */
+static enum xz_ret dec_stream_header(struct xz_dec *s)
+{
+	if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE))
+		return XZ_FORMAT_ERROR;
+
+	if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0)
+			!= get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2))
+		return XZ_DATA_ERROR;
+
+	if (s->temp.buf[HEADER_MAGIC_SIZE] != 0)
+		return XZ_OPTIONS_ERROR;
+
+	/*
+	 * Of integrity checks, we support only none (Check ID = 0) and
+	 * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined,
+	 * we will accept other check types too, but then the check won't
+	 * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
+	 */
+	s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
+
+#ifdef XZ_DEC_ANY_CHECK
+	if (s->check_type > XZ_CHECK_MAX)
+		return XZ_OPTIONS_ERROR;
+
+	if (s->check_type > XZ_CHECK_CRC32)
+		return XZ_UNSUPPORTED_CHECK;
+#else
+	if (s->check_type > XZ_CHECK_CRC32)
+		return XZ_OPTIONS_ERROR;
+#endif
+
+	return XZ_OK;
+}
+
+/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */
+static enum xz_ret dec_stream_footer(struct xz_dec *s)
+{
+	if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE))
+		return XZ_DATA_ERROR;
+
+	if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf))
+		return XZ_DATA_ERROR;
+
+	/*
+	 * Validate Backward Size. Note that we never added the size of the
+	 * Index CRC32 field to s->index.size, thus we use s->index.size / 4
+	 * instead of s->index.size / 4 - 1.
+	 */
+	if ((s->index.size >> 2) != get_le32(s->temp.buf + 4))
+		return XZ_DATA_ERROR;
+
+	if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type)
+		return XZ_DATA_ERROR;
+
+	/*
+	 * Use XZ_STREAM_END instead of XZ_OK to be more convenient
+	 * for the caller.
+	 */
+	return XZ_STREAM_END;
+}
+
+/* Decode the Block Header and initialize the filter chain. */
+static enum xz_ret dec_block_header(struct xz_dec *s)
+{
+	enum xz_ret ret;
+
+	/*
+	 * Validate the CRC32. We know that the temp buffer is at least
+	 * eight bytes so this is safe.
+	 */
+	s->temp.size -= 4;
+	if (xz_crc32(s->temp.buf, s->temp.size, 0)
+			!= get_le32(s->temp.buf + s->temp.size))
+		return XZ_DATA_ERROR;
+
+	s->temp.pos = 2;
+
+	/*
+	 * Catch unsupported Block Flags. We support only one or two filters
+	 * in the chain, so we catch that with the same test.
+	 */
+#ifdef XZ_DEC_BCJ
+	if (s->temp.buf[1] & 0x3E)
+#else
+	if (s->temp.buf[1] & 0x3F)
+#endif
+		return XZ_OPTIONS_ERROR;
+
+	/* Compressed Size */
+	if (s->temp.buf[1] & 0x40) {
+		if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+					!= XZ_STREAM_END)
+			return XZ_DATA_ERROR;
+
+		s->block_header.compressed = s->vli;
+	} else {
+		s->block_header.compressed = VLI_UNKNOWN;
+	}
+
+	/* Uncompressed Size */
+	if (s->temp.buf[1] & 0x80) {
+		if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+				!= XZ_STREAM_END)
+			return XZ_DATA_ERROR;
+
+		s->block_header.uncompressed = s->vli;
+	} else {
+		s->block_header.uncompressed = VLI_UNKNOWN;
+	}
+
+#ifdef XZ_DEC_BCJ
+	/* If there are two filters, the first one must be a BCJ filter. */
+	s->bcj_active = s->temp.buf[1] & 0x01;
+	if (s->bcj_active) {
+		if (s->temp.size - s->temp.pos < 2)
+			return XZ_OPTIONS_ERROR;
+
+		ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]);
+		if (ret != XZ_OK)
+			return ret;
+
+		/*
+		 * We don't support custom start offset,
+		 * so Size of Properties must be zero.
+		 */
+		if (s->temp.buf[s->temp.pos++] != 0x00)
+			return XZ_OPTIONS_ERROR;
+	}
+#endif
+
+	/* Valid Filter Flags always take at least two bytes. */
+	if (s->temp.size - s->temp.pos < 2)
+		return XZ_DATA_ERROR;
+
+	/* Filter ID = LZMA2 */
+	if (s->temp.buf[s->temp.pos++] != 0x21)
+		return XZ_OPTIONS_ERROR;
+
+	/* Size of Properties = 1-byte Filter Properties */
+	if (s->temp.buf[s->temp.pos++] != 0x01)
+		return XZ_OPTIONS_ERROR;
+
+	/* Filter Properties contains LZMA2 dictionary size. */
+	if (s->temp.size - s->temp.pos < 1)
+		return XZ_DATA_ERROR;
+
+	ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]);
+	if (ret != XZ_OK)
+		return ret;
+
+	/* The rest must be Header Padding. */
+	while (s->temp.pos < s->temp.size)
+		if (s->temp.buf[s->temp.pos++] != 0x00)
+			return XZ_OPTIONS_ERROR;
+
+	s->temp.pos = 0;
+	s->block.compressed = 0;
+	s->block.uncompressed = 0;
+
+	return XZ_OK;
+}
+
+static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	/*
+	 * Store the start position for the case when we are in the middle
+	 * of the Index field.
+	 */
+	s->in_start = b->in_pos;
+
+	while (true) {
+		switch (s->sequence) {
+		case SEQ_STREAM_HEADER:
+			/*
+			 * Stream Header is copied to s->temp, and then
+			 * decoded from there. This way if the caller
+			 * gives us only little input at a time, we can
+			 * still keep the Stream Header decoding code
+			 * simple. Similar approach is used in many places
+			 * in this file.
+			 */
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			/*
+			 * If dec_stream_header() returns
+			 * XZ_UNSUPPORTED_CHECK, it is still possible
+			 * to continue decoding if working in multi-call
+			 * mode. Thus, update s->sequence before calling
+			 * dec_stream_header().
+			 */
+			s->sequence = SEQ_BLOCK_START;
+
+			ret = dec_stream_header(s);
+			if (ret != XZ_OK)
+				return ret;
+
+		case SEQ_BLOCK_START:
+			/* We need one byte of input to continue. */
+			if (b->in_pos == b->in_size)
+				return XZ_OK;
+
+			/* See if this is the beginning of the Index field. */
+			if (b->in[b->in_pos] == 0) {
+				s->in_start = b->in_pos++;
+				s->sequence = SEQ_INDEX;
+				break;
+			}
+
+			/*
+			 * Calculate the size of the Block Header and
+			 * prepare to decode it.
+			 */
+			s->block_header.size
+				= ((uint32_t)b->in[b->in_pos] + 1) * 4;
+
+			s->temp.size = s->block_header.size;
+			s->temp.pos = 0;
+			s->sequence = SEQ_BLOCK_HEADER;
+
+		case SEQ_BLOCK_HEADER:
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			ret = dec_block_header(s);
+			if (ret != XZ_OK)
+				return ret;
+
+			s->sequence = SEQ_BLOCK_UNCOMPRESS;
+
+		case SEQ_BLOCK_UNCOMPRESS:
+			ret = dec_block(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->sequence = SEQ_BLOCK_PADDING;
+
+		case SEQ_BLOCK_PADDING:
+			/*
+			 * Size of Compressed Data + Block Padding
+			 * must be a multiple of four. We don't need
+			 * s->block.compressed for anything else
+			 * anymore, so we use it here to test the size
+			 * of the Block Padding field.
+			 */
+			while (s->block.compressed & 3) {
+				if (b->in_pos == b->in_size)
+					return XZ_OK;
+
+				if (b->in[b->in_pos++] != 0)
+					return XZ_DATA_ERROR;
+
+				++s->block.compressed;
+			}
+
+			s->sequence = SEQ_BLOCK_CHECK;
+
+		case SEQ_BLOCK_CHECK:
+			if (s->check_type == XZ_CHECK_CRC32) {
+				ret = crc32_validate(s, b);
+				if (ret != XZ_STREAM_END)
+					return ret;
+			}
+#ifdef XZ_DEC_ANY_CHECK
+			else if (!check_skip(s, b)) {
+				return XZ_OK;
+			}
+#endif
+
+			s->sequence = SEQ_BLOCK_START;
+			break;
+
+		case SEQ_INDEX:
+			ret = dec_index(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->sequence = SEQ_INDEX_PADDING;
+
+		case SEQ_INDEX_PADDING:
+			while ((s->index.size + (b->in_pos - s->in_start))
+					& 3) {
+				if (b->in_pos == b->in_size) {
+					index_update(s, b);
+					return XZ_OK;
+				}
+
+				if (b->in[b->in_pos++] != 0)
+					return XZ_DATA_ERROR;
+			}
+
+			/* Finish the CRC32 value and Index size. */
+			index_update(s, b);
+
+			/* Compare the hashes to validate the Index field. */
+			if (!memeq(&s->block.hash, &s->index.hash,
+					sizeof(s->block.hash)))
+				return XZ_DATA_ERROR;
+
+			s->sequence = SEQ_INDEX_CRC32;
+
+		case SEQ_INDEX_CRC32:
+			ret = crc32_validate(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->temp.size = STREAM_HEADER_SIZE;
+			s->sequence = SEQ_STREAM_FOOTER;
+
+		case SEQ_STREAM_FOOTER:
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			return dec_stream_footer(s);
+		}
+	}
+
+	/* Never reached */
+}
+
+/*
+ * xz_dec_run() is a wrapper for dec_main() to handle some special cases in
+ * multi-call and single-call decoding.
+ *
+ * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we
+ * are not going to make any progress anymore. This is to prevent the caller
+ * from calling us infinitely when the input file is truncated or otherwise
+ * corrupt. Since zlib-style API allows that the caller fills the input buffer
+ * only when the decoder doesn't produce any new output, we have to be careful
+ * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only
+ * after the second consecutive call to xz_dec_run() that makes no progress.
+ *
+ * In single-call mode, if we couldn't decode everything and no error
+ * occurred, either the input is truncated or the output buffer is too small.
+ * Since we know that the last input byte never produces any output, we know
+ * that if all the input was consumed and decoding wasn't finished, the file
+ * must be corrupt. Otherwise the output buffer has to be too small or the
+ * file is corrupt in a way that decoding it produces too big output.
+ *
+ * If single-call decoding fails, we reset b->in_pos and b->out_pos back to
+ * their original values. This is because with some filter chains there won't
+ * be any valid uncompressed data in the output buffer unless the decoding
+ * actually succeeds (that's the price to pay of using the output buffer as
+ * the workspace).
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
+{
+	size_t in_start;
+	size_t out_start;
+	enum xz_ret ret;
+
+	if (DEC_IS_SINGLE(s->mode))
+		xz_dec_reset(s);
+
+	in_start = b->in_pos;
+	out_start = b->out_pos;
+	ret = dec_main(s, b);
+
+	if (DEC_IS_SINGLE(s->mode)) {
+		if (ret == XZ_OK)
+			ret = b->in_pos == b->in_size
+					? XZ_DATA_ERROR : XZ_BUF_ERROR;
+
+		if (ret != XZ_STREAM_END) {
+			b->in_pos = in_start;
+			b->out_pos = out_start;
+		}
+
+	} else if (ret == XZ_OK && in_start == b->in_pos
+			&& out_start == b->out_pos) {
+		if (s->allow_buf_error)
+			ret = XZ_BUF_ERROR;
+
+		s->allow_buf_error = true;
+	} else {
+		s->allow_buf_error = false;
+	}
+
+	return ret;
+}
+
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
+{
+	struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s == NULL)
+		return NULL;
+
+	s->mode = mode;
+
+#ifdef XZ_DEC_BCJ
+	s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode));
+	if (s->bcj == NULL)
+		goto error_bcj;
+#endif
+
+	s->lzma2 = xz_dec_lzma2_create(mode, dict_max);
+	if (s->lzma2 == NULL)
+		goto error_lzma2;
+
+	xz_dec_reset(s);
+	return s;
+
+error_lzma2:
+#ifdef XZ_DEC_BCJ
+	xz_dec_bcj_end(s->bcj);
+error_bcj:
+#endif
+	kfree(s);
+	return NULL;
+}
+
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
+{
+	s->sequence = SEQ_STREAM_HEADER;
+	s->allow_buf_error = false;
+	s->pos = 0;
+	s->crc32 = 0;
+	memzero(&s->block, sizeof(s->block));
+	memzero(&s->index, sizeof(s->index));
+	s->temp.pos = 0;
+	s->temp.size = STREAM_HEADER_SIZE;
+}
+
+XZ_EXTERN void xz_dec_end(struct xz_dec *s)
+{
+	if (s != NULL) {
+		xz_dec_lzma2_end(s->lzma2);
+#ifdef XZ_DEC_BCJ
+		xz_dec_bcj_end(s->bcj);
+#endif
+		kfree(s);
+	}
+}
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
new file mode 100644
index 0000000..32eb3c0
--- /dev/null
+++ b/lib/xz/xz_dec_syms.c
@@ -0,0 +1,26 @@
+/*
+ * XZ decoder module information
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/module.h>
+#include <linux/xz.h>
+
+EXPORT_SYMBOL(xz_dec_init);
+EXPORT_SYMBOL(xz_dec_reset);
+EXPORT_SYMBOL(xz_dec_run);
+EXPORT_SYMBOL(xz_dec_end);
+
+MODULE_DESCRIPTION("XZ decompressor");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c
new file mode 100644
index 0000000..da28a19
--- /dev/null
+++ b/lib/xz/xz_dec_test.c
@@ -0,0 +1,220 @@
+/*
+ * XZ decoder tester
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/crc32.h>
+#include <linux/xz.h>
+
+/* Maximum supported dictionary size */
+#define DICT_MAX (1 << 20)
+
+/* Device name to pass to register_chrdev(). */
+#define DEVICE_NAME "xz_dec_test"
+
+/* Dynamically allocated device major number */
+static int device_major;
+
+/*
+ * We reuse the same decoder state, and thus can decode only one
+ * file at a time.
+ */
+static bool device_is_open;
+
+/* XZ decoder state */
+static struct xz_dec *state;
+
+/*
+ * Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after
+ * it has returned XZ_STREAM_END, so we make this static.
+ */
+static enum xz_ret ret;
+
+/*
+ * Input and output buffers. The input buffer is used as a temporary safe
+ * place for the data coming from the userspace.
+ */
+static uint8_t buffer_in[1024];
+static uint8_t buffer_out[1024];
+
+/*
+ * Structure to pass the input and output buffers to the XZ decoder.
+ * A few of the fields are never modified so we initialize them here.
+ */
+static struct xz_buf buffers = {
+	.in = buffer_in,
+	.out = buffer_out,
+	.out_size = sizeof(buffer_out)
+};
+
+/*
+ * CRC32 of uncompressed data. This is used to give the user a simple way
+ * to check that the decoder produces correct output.
+ */
+static uint32_t crc;
+
+static int xz_dec_test_open(struct inode *i, struct file *f)
+{
+	if (device_is_open)
+		return -EBUSY;
+
+	device_is_open = true;
+
+	xz_dec_reset(state);
+	ret = XZ_OK;
+	crc = 0xFFFFFFFF;
+
+	buffers.in_pos = 0;
+	buffers.in_size = 0;
+	buffers.out_pos = 0;
+
+	printk(KERN_INFO DEVICE_NAME ": opened\n");
+	return 0;
+}
+
+static int xz_dec_test_release(struct inode *i, struct file *f)
+{
+	device_is_open = false;
+
+	if (ret == XZ_OK)
+		printk(KERN_INFO DEVICE_NAME ": input was truncated\n");
+
+	printk(KERN_INFO DEVICE_NAME ": closed\n");
+	return 0;
+}
+
+/*
+ * Decode the data given to us from the userspace. CRC32 of the uncompressed
+ * data is calculated and is printed at the end of successful decoding. The
+ * uncompressed data isn't stored anywhere for further use.
+ *
+ * The .xz file must have exactly one Stream and no Stream Padding. The data
+ * after the first Stream is considered to be garbage.
+ */
+static ssize_t xz_dec_test_write(struct file *file, const char __user *buf,
+				 size_t size, loff_t *pos)
+{
+	size_t remaining;
+
+	if (ret != XZ_OK) {
+		if (size > 0)
+			printk(KERN_INFO DEVICE_NAME ": %zu bytes of "
+					"garbage at the end of the file\n",
+					size);
+
+		return -ENOSPC;
+	}
+
+	printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n",
+			size);
+
+	remaining = size;
+	while ((remaining > 0 || buffers.out_pos == buffers.out_size)
+			&& ret == XZ_OK) {
+		if (buffers.in_pos == buffers.in_size) {
+			buffers.in_pos = 0;
+			buffers.in_size = min(remaining, sizeof(buffer_in));
+			if (copy_from_user(buffer_in, buf, buffers.in_size))
+				return -EFAULT;
+
+			buf += buffers.in_size;
+			remaining -= buffers.in_size;
+		}
+
+		buffers.out_pos = 0;
+		ret = xz_dec_run(state, &buffers);
+		crc = crc32(crc, buffer_out, buffers.out_pos);
+	}
+
+	switch (ret) {
+	case XZ_OK:
+		printk(KERN_INFO DEVICE_NAME ": XZ_OK\n");
+		return size;
+
+	case XZ_STREAM_END:
+		printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, "
+				"CRC32 = 0x%08X\n", ~crc);
+		return size - remaining - (buffers.in_size - buffers.in_pos);
+
+	case XZ_MEMLIMIT_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n");
+		break;
+
+	case XZ_FORMAT_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n");
+		break;
+
+	case XZ_OPTIONS_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n");
+		break;
+
+	case XZ_DATA_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n");
+		break;
+
+	case XZ_BUF_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n");
+		break;
+
+	default:
+		printk(KERN_INFO DEVICE_NAME ": Bug detected!\n");
+		break;
+	}
+
+	return -EIO;
+}
+
+/* Allocate the XZ decoder state and register the character device. */
+static int __init xz_dec_test_init(void)
+{
+	static const struct file_operations fileops = {
+		.owner = THIS_MODULE,
+		.open = &xz_dec_test_open,
+		.release = &xz_dec_test_release,
+		.write = &xz_dec_test_write
+	};
+
+	state = xz_dec_init(XZ_PREALLOC, DICT_MAX);
+	if (state == NULL)
+		return -ENOMEM;
+
+	device_major = register_chrdev(0, DEVICE_NAME, &fileops);
+	if (device_major < 0) {
+		xz_dec_end(state);
+		return device_major;
+	}
+
+	printk(KERN_INFO DEVICE_NAME ": module loaded\n");
+	printk(KERN_INFO DEVICE_NAME ": Create a device node with "
+			"'mknod " DEVICE_NAME " c %d 0' and write .xz files "
+			"to it.\n", device_major);
+	return 0;
+}
+
+static void __exit xz_dec_test_exit(void)
+{
+	unregister_chrdev(device_major, DEVICE_NAME);
+	xz_dec_end(state);
+	printk(KERN_INFO DEVICE_NAME ": module unloaded\n");
+}
+
+module_init(xz_dec_test_init);
+module_exit(xz_dec_test_exit);
+
+MODULE_DESCRIPTION("XZ decompressor tester");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
new file mode 100644
index 0000000..071d67b
--- /dev/null
+++ b/lib/xz/xz_lzma2.h
@@ -0,0 +1,204 @@
+/*
+ * LZMA2 definitions
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_LZMA2_H
+#define XZ_LZMA2_H
+
+/* Range coder constants */
+#define RC_SHIFT_BITS 8
+#define RC_TOP_BITS 24
+#define RC_TOP_VALUE (1 << RC_TOP_BITS)
+#define RC_BIT_MODEL_TOTAL_BITS 11
+#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS)
+#define RC_MOVE_BITS 5
+
+/*
+ * Maximum number of position states. A position state is the lowest pb
+ * number of bits of the current uncompressed offset. In some places there
+ * are different sets of probabilities for different position states.
+ */
+#define POS_STATES_MAX (1 << 4)
+
+/*
+ * This enum is used to track which LZMA symbols have occurred most recently
+ * and in which order. This information is used to predict the next symbol.
+ *
+ * Symbols:
+ *  - Literal: One 8-bit byte
+ *  - Match: Repeat a chunk of data at some distance
+ *  - Long repeat: Multi-byte match at a recently seen distance
+ *  - Short repeat: One-byte repeat at a recently seen distance
+ *
+ * The symbol names are in from STATE_oldest_older_previous. REP means
+ * either short or long repeated match, and NONLIT means any non-literal.
+ */
+enum lzma_state {
+	STATE_LIT_LIT,
+	STATE_MATCH_LIT_LIT,
+	STATE_REP_LIT_LIT,
+	STATE_SHORTREP_LIT_LIT,
+	STATE_MATCH_LIT,
+	STATE_REP_LIT,
+	STATE_SHORTREP_LIT,
+	STATE_LIT_MATCH,
+	STATE_LIT_LONGREP,
+	STATE_LIT_SHORTREP,
+	STATE_NONLIT_MATCH,
+	STATE_NONLIT_REP
+};
+
+/* Total number of states */
+#define STATES 12
+
+/* The lowest 7 states indicate that the previous state was a literal. */
+#define LIT_STATES 7
+
+/* Indicate that the latest symbol was a literal. */
+static inline void lzma_state_literal(enum lzma_state *state)
+{
+	if (*state <= STATE_SHORTREP_LIT_LIT)
+		*state = STATE_LIT_LIT;
+	else if (*state <= STATE_LIT_SHORTREP)
+		*state -= 3;
+	else
+		*state -= 6;
+}
+
+/* Indicate that the latest symbol was a match. */
+static inline void lzma_state_match(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH;
+}
+
+/* Indicate that the latest state was a long repeated match. */
+static inline void lzma_state_long_rep(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP;
+}
+
+/* Indicate that the latest symbol was a short match. */
+static inline void lzma_state_short_rep(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP;
+}
+
+/* Test if the previous symbol was a literal. */
+static inline bool lzma_state_is_literal(enum lzma_state state)
+{
+	return state < LIT_STATES;
+}
+
+/* Each literal coder is divided in three sections:
+ *   - 0x001-0x0FF: Without match byte
+ *   - 0x101-0x1FF: With match byte; match bit is 0
+ *   - 0x201-0x2FF: With match byte; match bit is 1
+ *
+ * Match byte is used when the previous LZMA symbol was something else than
+ * a literal (that is, it was some kind of match).
+ */
+#define LITERAL_CODER_SIZE 0x300
+
+/* Maximum number of literal coders */
+#define LITERAL_CODERS_MAX (1 << 4)
+
+/* Minimum length of a match is two bytes. */
+#define MATCH_LEN_MIN 2
+
+/* Match length is encoded with 4, 5, or 10 bits.
+ *
+ * Length   Bits
+ *  2-9      4 = Choice=0 + 3 bits
+ * 10-17     5 = Choice=1 + Choice2=0 + 3 bits
+ * 18-273   10 = Choice=1 + Choice2=1 + 8 bits
+ */
+#define LEN_LOW_BITS 3
+#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS)
+#define LEN_MID_BITS 3
+#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS)
+#define LEN_HIGH_BITS 8
+#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS)
+#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS)
+
+/*
+ * Maximum length of a match is 273 which is a result of the encoding
+ * described above.
+ */
+#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1)
+
+/*
+ * Different sets of probabilities are used for match distances that have
+ * very short match length: Lengths of 2, 3, and 4 bytes have a separate
+ * set of probabilities for each length. The matches with longer length
+ * use a shared set of probabilities.
+ */
+#define DIST_STATES 4
+
+/*
+ * Get the index of the appropriate probability array for decoding
+ * the distance slot.
+ */
+static inline uint32_t lzma_get_dist_state(uint32_t len)
+{
+	return len < DIST_STATES + MATCH_LEN_MIN
+			? len - MATCH_LEN_MIN : DIST_STATES - 1;
+}
+
+/*
+ * The highest two bits of a 32-bit match distance are encoded using six bits.
+ * This six-bit value is called a distance slot. This way encoding a 32-bit
+ * value takes 6-36 bits, larger values taking more bits.
+ */
+#define DIST_SLOT_BITS 6
+#define DIST_SLOTS (1 << DIST_SLOT_BITS)
+
+/* Match distances up to 127 are fully encoded using probabilities. Since
+ * the highest two bits (distance slot) are always encoded using six bits,
+ * the distances 0-3 don't need any additional bits to encode, since the
+ * distance slot itself is the same as the actual distance. DIST_MODEL_START
+ * indicates the first distance slot where at least one additional bit is
+ * needed.
+ */
+#define DIST_MODEL_START 4
+
+/*
+ * Match distances greater than 127 are encoded in three pieces:
+ *   - distance slot: the highest two bits
+ *   - direct bits: 2-26 bits below the highest two bits
+ *   - alignment bits: four lowest bits
+ *
+ * Direct bits don't use any probabilities.
+ *
+ * The distance slot value of 14 is for distances 128-191.
+ */
+#define DIST_MODEL_END 14
+
+/* Distance slots that indicate a distance <= 127. */
+#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2)
+#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
+
+/*
+ * For match distances greater than 127, only the highest two bits and the
+ * lowest four bits (alignment) is encoded using probabilities.
+ */
+#define ALIGN_BITS 4
+#define ALIGN_SIZE (1 << ALIGN_BITS)
+#define ALIGN_MASK (ALIGN_SIZE - 1)
+
+/* Total number of all probability variables */
+#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE)
+
+/*
+ * LZMA remembers the four most recent match distances. Reusing these
+ * distances tends to take less space than re-encoding the actual
+ * distance value.
+ */
+#define REPS 4
+
+#endif
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
new file mode 100644
index 0000000..a65633e
--- /dev/null
+++ b/lib/xz/xz_private.h
@@ -0,0 +1,156 @@
+/*
+ * Private includes and definitions
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_PRIVATE_H
+#define XZ_PRIVATE_H
+
+#ifdef __KERNEL__
+#	include <linux/xz.h>
+#	include <asm/byteorder.h>
+#	include <asm/unaligned.h>
+	/* XZ_PREBOOT may be defined only via decompress_unxz.c. */
+#	ifndef XZ_PREBOOT
+#		include <linux/slab.h>
+#		include <linux/vmalloc.h>
+#		include <linux/string.h>
+#		ifdef CONFIG_XZ_DEC_X86
+#			define XZ_DEC_X86
+#		endif
+#		ifdef CONFIG_XZ_DEC_POWERPC
+#			define XZ_DEC_POWERPC
+#		endif
+#		ifdef CONFIG_XZ_DEC_IA64
+#			define XZ_DEC_IA64
+#		endif
+#		ifdef CONFIG_XZ_DEC_ARM
+#			define XZ_DEC_ARM
+#		endif
+#		ifdef CONFIG_XZ_DEC_ARMTHUMB
+#			define XZ_DEC_ARMTHUMB
+#		endif
+#		ifdef CONFIG_XZ_DEC_SPARC
+#			define XZ_DEC_SPARC
+#		endif
+#		define memeq(a, b, size) (memcmp(a, b, size) == 0)
+#		define memzero(buf, size) memset(buf, 0, size)
+#	endif
+#	define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+#else
+	/*
+	 * For userspace builds, use a separate header to define the required
+	 * macros and functions. This makes it easier to adapt the code into
+	 * different environments and avoids clutter in the Linux kernel tree.
+	 */
+#	include "xz_config.h"
+#endif
+
+/* If no specific decoding mode is requested, enable support for all modes. */
+#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \
+		&& !defined(XZ_DEC_DYNALLOC)
+#	define XZ_DEC_SINGLE
+#	define XZ_DEC_PREALLOC
+#	define XZ_DEC_DYNALLOC
+#endif
+
+/*
+ * The DEC_IS_foo(mode) macros are used in "if" statements. If only some
+ * of the supported modes are enabled, these macros will evaluate to true or
+ * false at compile time and thus allow the compiler to omit unneeded code.
+ */
+#ifdef XZ_DEC_SINGLE
+#	define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE)
+#else
+#	define DEC_IS_SINGLE(mode) (false)
+#endif
+
+#ifdef XZ_DEC_PREALLOC
+#	define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC)
+#else
+#	define DEC_IS_PREALLOC(mode) (false)
+#endif
+
+#ifdef XZ_DEC_DYNALLOC
+#	define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC)
+#else
+#	define DEC_IS_DYNALLOC(mode) (false)
+#endif
+
+#if !defined(XZ_DEC_SINGLE)
+#	define DEC_IS_MULTI(mode) (true)
+#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC)
+#	define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE)
+#else
+#	define DEC_IS_MULTI(mode) (false)
+#endif
+
+/*
+ * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ.
+ * XZ_DEC_BCJ is used to enable generic support for BCJ decoders.
+ */
+#ifndef XZ_DEC_BCJ
+#	if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
+			|| defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
+			|| defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
+			|| defined(XZ_DEC_SPARC)
+#		define XZ_DEC_BCJ
+#	endif
+#endif
+
+/*
+ * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
+ * before calling xz_dec_lzma2_run().
+ */
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+						   uint32_t dict_max);
+
+/*
+ * Decode the LZMA2 properties (one byte) and reset the decoder. Return
+ * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not
+ * big enough, and XZ_OPTIONS_ERROR if props indicates something that this
+ * decoder doesn't support.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
+					 uint8_t props);
+
+/* Decode raw LZMA2 stream from b->in to b->out. */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+				       struct xz_buf *b);
+
+/* Free the memory allocated for the LZMA2 decoder. */
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
+
+#ifdef XZ_DEC_BCJ
+/*
+ * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
+ * calling xz_dec_bcj_run().
+ */
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
+
+/*
+ * Decode the Filter ID of a BCJ filter. This implementation doesn't
+ * support custom start offsets, so no decoding of Filter Properties
+ * is needed. Returns XZ_OK if the given Filter ID is supported.
+ * Otherwise XZ_OPTIONS_ERROR is returned.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
+
+/*
+ * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
+ * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
+ * must be called directly.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+				     struct xz_dec_lzma2 *lzma2,
+				     struct xz_buf *b);
+
+/* Free the memory allocated for the BCJ filters. */
+#define xz_dec_bcj_end(s) kfree(s)
+#endif
+
+#endif
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
new file mode 100644
index 0000000..66cb5a7
--- /dev/null
+++ b/lib/xz/xz_stream.h
@@ -0,0 +1,62 @@
+/*
+ * Definitions for handling the .xz file format
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_STREAM_H
+#define XZ_STREAM_H
+
+#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32
+#	include <linux/crc32.h>
+#	undef crc32
+#	define xz_crc32(buf, size, crc) \
+		(~crc32_le(~(uint32_t)(crc), buf, size))
+#endif
+
+/*
+ * See the .xz file format specification at
+ * http://tukaani.org/xz/xz-file-format.txt
+ * to understand the container format.
+ */
+
+#define STREAM_HEADER_SIZE 12
+
+#define HEADER_MAGIC "\3757zXZ"
+#define HEADER_MAGIC_SIZE 6
+
+#define FOOTER_MAGIC "YZ"
+#define FOOTER_MAGIC_SIZE 2
+
+/*
+ * Variable-length integer can hold a 63-bit unsigned integer or a special
+ * value indicating that the value is unknown.
+ *
+ * Experimental: vli_type can be defined to uint32_t to save a few bytes
+ * in code size (no effect on speed). Doing so limits the uncompressed and
+ * compressed size of the file to less than 256 MiB and may also weaken
+ * error detection slightly.
+ */
+typedef uint64_t vli_type;
+
+#define VLI_MAX ((vli_type)-1 / 2)
+#define VLI_UNKNOWN ((vli_type)-1)
+
+/* Maximum encoded size of a VLI */
+#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7)
+
+/* Integrity Check types */
+enum xz_check {
+	XZ_CHECK_NONE = 0,
+	XZ_CHECK_CRC32 = 1,
+	XZ_CHECK_CRC64 = 4,
+	XZ_CHECK_SHA256 = 10
+};
+
+/* Maximum possible Check ID */
+#define XZ_CHECK_MAX 15
+
+#endif
diff --git a/mm/Kconfig b/mm/Kconfig
index c2c8a4a..3ad483b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -302,6 +302,44 @@
 
 	  See Documentation/nommu-mmap.txt for more information.
 
+config TRANSPARENT_HUGEPAGE
+	bool "Transparent Hugepage Support"
+	depends on X86 && MMU
+	select COMPACTION
+	help
+	  Transparent Hugepages allows the kernel to use huge pages and
+	  huge tlb transparently to the applications whenever possible.
+	  This feature can improve computing performance to certain
+	  applications by speeding up page faults during memory
+	  allocation, by reducing the number of tlb misses and by speeding
+	  up the pagetable walking.
+
+	  If memory constrained on embedded, you may want to say N.
+
+choice
+	prompt "Transparent Hugepage Support sysfs defaults"
+	depends on TRANSPARENT_HUGEPAGE
+	default TRANSPARENT_HUGEPAGE_ALWAYS
+	help
+	  Selects the sysfs defaults for Transparent Hugepage Support.
+
+	config TRANSPARENT_HUGEPAGE_ALWAYS
+		bool "always"
+	help
+	  Enabling Transparent Hugepage always, can increase the
+	  memory footprint of applications without a guaranteed
+	  benefit but it will work automatically for all applications.
+
+	config TRANSPARENT_HUGEPAGE_MADVISE
+		bool "madvise"
+	help
+	  Enabling Transparent Hugepage madvise, will only provide a
+	  performance improvement benefit to the applications using
+	  madvise(MADV_HUGEPAGE) but it won't risk to increase the
+	  memory footprint of applications without a guaranteed
+	  benefit.
+endchoice
+
 #
 # UP and nommu archs use km based percpu allocator
 #
diff --git a/mm/Makefile b/mm/Makefile
index f73f75a..2b1b575 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,7 +5,7 @@
 mmu-y			:= nommu.o
 mmu-$(CONFIG_MMU)	:= fremap.o highmem.o madvise.o memory.o mincore.o \
 			   mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
-			   vmalloc.o pagewalk.o
+			   vmalloc.o pagewalk.o pgtable-generic.o
 
 obj-y			:= bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
 			   maccess.o page_alloc.o page-writeback.o \
@@ -37,6 +37,7 @@
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
 obj-$(CONFIG_QUICKLIST) += quicklist.o
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
 obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 1a8894e..6d592a0 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,6 +16,9 @@
 #include <linux/sysfs.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/compaction.h>
+
 /*
  * compact_control is used to track pages being migrated and the free pages
  * they are being migrated to during memory compaction. The free_pfn starts
@@ -30,6 +33,7 @@
 	unsigned long nr_migratepages;	/* Number of pages to migrate */
 	unsigned long free_pfn;		/* isolate_freepages search base */
 	unsigned long migrate_pfn;	/* isolate_migratepages search base */
+	bool sync;			/* Synchronous migration */
 
 	/* Account for isolated anon and file pages */
 	unsigned long nr_anon;
@@ -38,6 +42,8 @@
 	unsigned int order;		/* order a direct compactor needs */
 	int migratetype;		/* MOVABLE, RECLAIMABLE etc */
 	struct zone *zone;
+
+	int compact_mode;
 };
 
 static unsigned long release_freepages(struct list_head *freelist)
@@ -60,7 +66,7 @@
 				struct list_head *freelist)
 {
 	unsigned long zone_end_pfn, end_pfn;
-	int total_isolated = 0;
+	int nr_scanned = 0, total_isolated = 0;
 	struct page *cursor;
 
 	/* Get the last PFN we should scan for free pages at */
@@ -81,6 +87,7 @@
 
 		if (!pfn_valid_within(blockpfn))
 			continue;
+		nr_scanned++;
 
 		if (!PageBuddy(page))
 			continue;
@@ -100,6 +107,7 @@
 		}
 	}
 
+	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
 	return total_isolated;
 }
 
@@ -234,6 +242,8 @@
 					struct compact_control *cc)
 {
 	unsigned long low_pfn, end_pfn;
+	unsigned long last_pageblock_nr = 0, pageblock_nr;
+	unsigned long nr_scanned = 0, nr_isolated = 0;
 	struct list_head *migratelist = &cc->migratepages;
 
 	/* Do not scan outside zone boundaries */
@@ -266,20 +276,51 @@
 		struct page *page;
 		if (!pfn_valid_within(low_pfn))
 			continue;
+		nr_scanned++;
 
 		/* Get the page and skip if free */
 		page = pfn_to_page(low_pfn);
 		if (PageBuddy(page))
 			continue;
 
+		/*
+		 * For async migration, also only scan in MOVABLE blocks. Async
+		 * migration is optimistic to see if the minimum amount of work
+		 * satisfies the allocation
+		 */
+		pageblock_nr = low_pfn >> pageblock_order;
+		if (!cc->sync && last_pageblock_nr != pageblock_nr &&
+				get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+			low_pfn += pageblock_nr_pages;
+			low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
+			last_pageblock_nr = pageblock_nr;
+			continue;
+		}
+
+		if (!PageLRU(page))
+			continue;
+
+		/*
+		 * PageLRU is set, and lru_lock excludes isolation,
+		 * splitting and collapsing (collapsing has already
+		 * happened if PageLRU is set).
+		 */
+		if (PageTransHuge(page)) {
+			low_pfn += (1 << compound_order(page)) - 1;
+			continue;
+		}
+
 		/* Try isolate the page */
 		if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
 			continue;
 
+		VM_BUG_ON(PageTransCompound(page));
+
 		/* Successfully isolated */
 		del_page_from_lru_list(zone, page, page_lru(page));
 		list_add(&page->lru, migratelist);
 		cc->nr_migratepages++;
+		nr_isolated++;
 
 		/* Avoid isolating too much */
 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
@@ -291,6 +332,8 @@
 	spin_unlock_irq(&zone->lru_lock);
 	cc->migrate_pfn = low_pfn;
 
+	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+
 	return cc->nr_migratepages;
 }
 
@@ -341,10 +384,10 @@
 }
 
 static int compact_finished(struct zone *zone,
-						struct compact_control *cc)
+			    struct compact_control *cc)
 {
 	unsigned int order;
-	unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order);
+	unsigned long watermark;
 
 	if (fatal_signal_pending(current))
 		return COMPACT_PARTIAL;
@@ -354,12 +397,27 @@
 		return COMPACT_COMPLETE;
 
 	/* Compaction run is not finished if the watermark is not met */
+	if (cc->compact_mode != COMPACT_MODE_KSWAPD)
+		watermark = low_wmark_pages(zone);
+	else
+		watermark = high_wmark_pages(zone);
+	watermark += (1 << cc->order);
+
 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
 		return COMPACT_CONTINUE;
 
 	if (cc->order == -1)
 		return COMPACT_CONTINUE;
 
+	/*
+	 * Generating only one page of the right order is not enough
+	 * for kswapd, we must continue until we're above the high
+	 * watermark as a pool for high order GFP_ATOMIC allocations
+	 * too.
+	 */
+	if (cc->compact_mode == COMPACT_MODE_KSWAPD)
+		return COMPACT_CONTINUE;
+
 	/* Direct compactor: Is a suitable page free? */
 	for (order = cc->order; order < MAX_ORDER; order++) {
 		/* Job done if page is free of the right migratetype */
@@ -374,10 +432,62 @@
 	return COMPACT_CONTINUE;
 }
 
+/*
+ * compaction_suitable: Is this suitable to run compaction on this zone now?
+ * Returns
+ *   COMPACT_SKIPPED  - If there are too few free pages for compaction
+ *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
+ *   COMPACT_CONTINUE - If compaction should run now
+ */
+unsigned long compaction_suitable(struct zone *zone, int order)
+{
+	int fragindex;
+	unsigned long watermark;
+
+	/*
+	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
+	 * This is because during migration, copies of pages need to be
+	 * allocated and for a short time, the footprint is higher
+	 */
+	watermark = low_wmark_pages(zone) + (2UL << order);
+	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+		return COMPACT_SKIPPED;
+
+	/*
+	 * fragmentation index determines if allocation failures are due to
+	 * low memory or external fragmentation
+	 *
+	 * index of -1 implies allocations might succeed dependingon watermarks
+	 * index towards 0 implies failure is due to lack of memory
+	 * index towards 1000 implies failure is due to fragmentation
+	 *
+	 * Only compact if a failure would be due to fragmentation.
+	 */
+	fragindex = fragmentation_index(zone, order);
+	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
+		return COMPACT_SKIPPED;
+
+	if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0))
+		return COMPACT_PARTIAL;
+
+	return COMPACT_CONTINUE;
+}
+
 static int compact_zone(struct zone *zone, struct compact_control *cc)
 {
 	int ret;
 
+	ret = compaction_suitable(zone, cc->order);
+	switch (ret) {
+	case COMPACT_PARTIAL:
+	case COMPACT_SKIPPED:
+		/* Compaction is likely to fail */
+		return ret;
+	case COMPACT_CONTINUE:
+		/* Fall through to compaction */
+		;
+	}
+
 	/* Setup to move all movable pages to the end of the zone */
 	cc->migrate_pfn = zone->zone_start_pfn;
 	cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
@@ -393,7 +503,8 @@
 
 		nr_migrate = cc->nr_migratepages;
 		migrate_pages(&cc->migratepages, compaction_alloc,
-						(unsigned long)cc, 0);
+				(unsigned long)cc, false,
+				cc->sync);
 		update_nr_listpages(cc);
 		nr_remaining = cc->nr_migratepages;
 
@@ -401,6 +512,8 @@
 		count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
 		if (nr_remaining)
 			count_vm_events(COMPACTPAGEFAILED, nr_remaining);
+		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
+						nr_remaining);
 
 		/* Release LRU pages not migrated */
 		if (!list_empty(&cc->migratepages)) {
@@ -417,8 +530,10 @@
 	return ret;
 }
 
-static unsigned long compact_zone_order(struct zone *zone,
-						int order, gfp_t gfp_mask)
+unsigned long compact_zone_order(struct zone *zone,
+				 int order, gfp_t gfp_mask,
+				 bool sync,
+				 int compact_mode)
 {
 	struct compact_control cc = {
 		.nr_freepages = 0,
@@ -426,6 +541,8 @@
 		.order = order,
 		.migratetype = allocflags_to_migratetype(gfp_mask),
 		.zone = zone,
+		.sync = sync,
+		.compact_mode = compact_mode,
 	};
 	INIT_LIST_HEAD(&cc.freepages);
 	INIT_LIST_HEAD(&cc.migratepages);
@@ -441,16 +558,17 @@
  * @order: The order of the current allocation
  * @gfp_mask: The GFP mask of the current allocation
  * @nodemask: The allowed nodes to allocate from
+ * @sync: Whether migration is synchronous or not
  *
  * This is the main entry point for direct page compaction.
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
-			int order, gfp_t gfp_mask, nodemask_t *nodemask)
+			int order, gfp_t gfp_mask, nodemask_t *nodemask,
+			bool sync)
 {
 	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
 	int may_enter_fs = gfp_mask & __GFP_FS;
 	int may_perform_io = gfp_mask & __GFP_IO;
-	unsigned long watermark;
 	struct zoneref *z;
 	struct zone *zone;
 	int rc = COMPACT_SKIPPED;
@@ -460,7 +578,7 @@
 	 * made because an assumption is made that the page allocator can satisfy
 	 * the "cheaper" orders without taking special steps
 	 */
-	if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io)
+	if (!order || !may_enter_fs || !may_perform_io)
 		return rc;
 
 	count_vm_event(COMPACTSTALL);
@@ -468,43 +586,14 @@
 	/* Compact each zone in the list */
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
 								nodemask) {
-		int fragindex;
 		int status;
 
-		/*
-		 * Watermarks for order-0 must be met for compaction. Note
-		 * the 2UL. This is because during migration, copies of
-		 * pages need to be allocated and for a short time, the
-		 * footprint is higher
-		 */
-		watermark = low_wmark_pages(zone) + (2UL << order);
-		if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
-			continue;
-
-		/*
-		 * fragmentation index determines if allocation failures are
-		 * due to low memory or external fragmentation
-		 *
-		 * index of -1 implies allocations might succeed depending
-		 * 	on watermarks
-		 * index towards 0 implies failure is due to lack of memory
-		 * index towards 1000 implies failure is due to fragmentation
-		 *
-		 * Only compact if a failure would be due to fragmentation.
-		 */
-		fragindex = fragmentation_index(zone, order);
-		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
-			continue;
-
-		if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
-			rc = COMPACT_PARTIAL;
-			break;
-		}
-
-		status = compact_zone_order(zone, order, gfp_mask);
+		status = compact_zone_order(zone, order, gfp_mask, sync,
+					    COMPACT_MODE_DIRECT_RECLAIM);
 		rc = max(status, rc);
 
-		if (zone_watermark_ok(zone, order, watermark, 0, 0))
+		/* If a normal allocation would succeed, stop compacting */
+		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
 			break;
 	}
 
@@ -531,6 +620,7 @@
 			.nr_freepages = 0,
 			.nr_migratepages = 0,
 			.order = -1,
+			.compact_mode = COMPACT_MODE_DIRECT_RECLAIM,
 		};
 
 		zone = &pgdat->node_zones[zoneid];
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 4df2de7..03bf3bb 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -324,7 +324,7 @@
 		if (mem_flags & __GFP_WAIT) {
 			DECLARE_WAITQUEUE(wait, current);
 
-			__set_current_state(TASK_INTERRUPTIBLE);
+			__set_current_state(TASK_UNINTERRUPTIBLE);
 			__add_wait_queue(&pool->waitq, &wait);
 			spin_unlock_irqrestore(&pool->lock, flags);
 
@@ -355,20 +355,15 @@
 
 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
 {
-	unsigned long flags;
 	struct dma_page *page;
 
-	spin_lock_irqsave(&pool->lock, flags);
 	list_for_each_entry(page, &pool->page_list, page_list) {
 		if (dma < page->dma)
 			continue;
 		if (dma < (page->dma + pool->allocation))
-			goto done;
+			return page;
 	}
-	page = NULL;
- done:
-	spin_unlock_irqrestore(&pool->lock, flags);
-	return page;
+	return NULL;
 }
 
 /**
@@ -386,8 +381,10 @@
 	unsigned long flags;
 	unsigned int offset;
 
+	spin_lock_irqsave(&pool->lock, flags);
 	page = pool_find_page(pool, dma);
 	if (!page) {
+		spin_unlock_irqrestore(&pool->lock, flags);
 		if (pool->dev)
 			dev_err(pool->dev,
 				"dma_pool_free %s, %p/%lx (bad dma)\n",
@@ -401,6 +398,7 @@
 	offset = vaddr - page->vaddr;
 #ifdef	DMAPOOL_DEBUG
 	if ((dma - page->dma) != offset) {
+		spin_unlock_irqrestore(&pool->lock, flags);
 		if (pool->dev)
 			dev_err(pool->dev,
 				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
@@ -418,6 +416,7 @@
 				chain = *(int *)(page->vaddr + chain);
 				continue;
 			}
+			spin_unlock_irqrestore(&pool->lock, flags);
 			if (pool->dev)
 				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
 					"already free\n", pool->name,
@@ -432,7 +431,6 @@
 	memset(vaddr, POOL_POISON_FREED, pool->size);
 #endif
 
-	spin_lock_irqsave(&pool->lock, flags);
 	page->in_use--;
 	*(int *)vaddr = page->offset;
 	page->offset = offset;
diff --git a/mm/filemap.c b/mm/filemap.c
index 6b9aee2..83a45d3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -102,9 +102,6 @@
  *    ->inode_lock		(zap_pte_range->set_page_dirty)
  *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
  *
- *  ->task->proc_lock
- *    ->dcache_lock		(proc_pid_lookup)
- *
  *  (code doesn't rely on that order, so you could switch it around)
  *  ->tasklist_lock             (memory_failure, collect_procs_ao)
  *    ->i_mmap_lock
@@ -301,7 +298,7 @@
 				continue;
 
 			wait_on_page_writeback(page);
-			if (PageError(page))
+			if (TestClearPageError(page))
 				ret = -EIO;
 		}
 		pagevec_release(&pvec);
@@ -840,9 +837,6 @@
 		if (radix_tree_deref_retry(page))
 			goto restart;
 
-		if (page->mapping == NULL || page->index != index)
-			break;
-
 		if (!page_cache_get_speculative(page))
 			goto repeat;
 
@@ -852,6 +846,16 @@
 			goto repeat;
 		}
 
+		/*
+		 * must check mapping and index after taking the ref.
+		 * otherwise we can get both false positives and false
+		 * negatives, which is just confusing to the caller.
+		 */
+		if (page->mapping == NULL || page->index != index) {
+			page_cache_release(page);
+			break;
+		}
+
 		pages[ret] = page;
 		ret++;
 		index++;
@@ -2223,7 +2227,7 @@
 		gfp_notmask = __GFP_FS;
 repeat:
 	page = find_lock_page(mapping, index);
-	if (likely(page))
+	if (page)
 		return page;
 
 	page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
new file mode 100644
index 0000000..004c9c2
--- /dev/null
+++ b/mm/huge_memory.c
@@ -0,0 +1,2346 @@
+/*
+ *  Copyright (C) 2009  Red Hat, Inc.
+ *
+ *  This work is licensed under the terms of the GNU GPL, version 2. See
+ *  the COPYING file in the top-level directory.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/hugetlb.h>
+#include <linux/mmu_notifier.h>
+#include <linux/rmap.h>
+#include <linux/swap.h>
+#include <linux/mm_inline.h>
+#include <linux/kthread.h>
+#include <linux/khugepaged.h>
+#include <linux/freezer.h>
+#include <linux/mman.h>
+#include <asm/tlb.h>
+#include <asm/pgalloc.h>
+#include "internal.h"
+
+/*
+ * By default transparent hugepage support is enabled for all mappings
+ * and khugepaged scans all mappings. Defrag is only invoked by
+ * khugepaged hugepage allocations and by page faults inside
+ * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
+ * allocations.
+ */
+unsigned long transparent_hugepage_flags __read_mostly =
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
+	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
+	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
+#endif
+	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
+	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+
+/* default scan 8*512 pte (or vmas) every 30 second */
+static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
+static unsigned int khugepaged_pages_collapsed;
+static unsigned int khugepaged_full_scans;
+static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
+/* during fragmentation poll the hugepage allocator once every minute */
+static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
+static struct task_struct *khugepaged_thread __read_mostly;
+static DEFINE_MUTEX(khugepaged_mutex);
+static DEFINE_SPINLOCK(khugepaged_mm_lock);
+static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
+/*
+ * default collapse hugepages if there is at least one pte mapped like
+ * it would have happened if the vma was large enough during page
+ * fault.
+ */
+static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
+
+static int khugepaged(void *none);
+static int mm_slots_hash_init(void);
+static int khugepaged_slab_init(void);
+static void khugepaged_slab_free(void);
+
+#define MM_SLOTS_HASH_HEADS 1024
+static struct hlist_head *mm_slots_hash __read_mostly;
+static struct kmem_cache *mm_slot_cache __read_mostly;
+
+/**
+ * struct mm_slot - hash lookup from mm to mm_slot
+ * @hash: hash collision list
+ * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
+ * @mm: the mm that this information is valid for
+ */
+struct mm_slot {
+	struct hlist_node hash;
+	struct list_head mm_node;
+	struct mm_struct *mm;
+};
+
+/**
+ * struct khugepaged_scan - cursor for scanning
+ * @mm_head: the head of the mm list to scan
+ * @mm_slot: the current mm_slot we are scanning
+ * @address: the next address inside that to be scanned
+ *
+ * There is only the one khugepaged_scan instance of this cursor structure.
+ */
+struct khugepaged_scan {
+	struct list_head mm_head;
+	struct mm_slot *mm_slot;
+	unsigned long address;
+} khugepaged_scan = {
+	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
+};
+
+
+static int set_recommended_min_free_kbytes(void)
+{
+	struct zone *zone;
+	int nr_zones = 0;
+	unsigned long recommended_min;
+	extern int min_free_kbytes;
+
+	if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
+		      &transparent_hugepage_flags) &&
+	    !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+		      &transparent_hugepage_flags))
+		return 0;
+
+	for_each_populated_zone(zone)
+		nr_zones++;
+
+	/* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
+	recommended_min = pageblock_nr_pages * nr_zones * 2;
+
+	/*
+	 * Make sure that on average at least two pageblocks are almost free
+	 * of another type, one for a migratetype to fall back to and a
+	 * second to avoid subsequent fallbacks of other types There are 3
+	 * MIGRATE_TYPES we care about.
+	 */
+	recommended_min += pageblock_nr_pages * nr_zones *
+			   MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
+
+	/* don't ever allow to reserve more than 5% of the lowmem */
+	recommended_min = min(recommended_min,
+			      (unsigned long) nr_free_buffer_pages() / 20);
+	recommended_min <<= (PAGE_SHIFT-10);
+
+	if (recommended_min > min_free_kbytes)
+		min_free_kbytes = recommended_min;
+	setup_per_zone_wmarks();
+	return 0;
+}
+late_initcall(set_recommended_min_free_kbytes);
+
+static int start_khugepaged(void)
+{
+	int err = 0;
+	if (khugepaged_enabled()) {
+		int wakeup;
+		if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
+			err = -ENOMEM;
+			goto out;
+		}
+		mutex_lock(&khugepaged_mutex);
+		if (!khugepaged_thread)
+			khugepaged_thread = kthread_run(khugepaged, NULL,
+							"khugepaged");
+		if (unlikely(IS_ERR(khugepaged_thread))) {
+			printk(KERN_ERR
+			       "khugepaged: kthread_run(khugepaged) failed\n");
+			err = PTR_ERR(khugepaged_thread);
+			khugepaged_thread = NULL;
+		}
+		wakeup = !list_empty(&khugepaged_scan.mm_head);
+		mutex_unlock(&khugepaged_mutex);
+		if (wakeup)
+			wake_up_interruptible(&khugepaged_wait);
+
+		set_recommended_min_free_kbytes();
+	} else
+		/* wakeup to exit */
+		wake_up_interruptible(&khugepaged_wait);
+out:
+	return err;
+}
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t double_flag_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf,
+				enum transparent_hugepage_flag enabled,
+				enum transparent_hugepage_flag req_madv)
+{
+	if (test_bit(enabled, &transparent_hugepage_flags)) {
+		VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
+		return sprintf(buf, "[always] madvise never\n");
+	} else if (test_bit(req_madv, &transparent_hugepage_flags))
+		return sprintf(buf, "always [madvise] never\n");
+	else
+		return sprintf(buf, "always madvise [never]\n");
+}
+static ssize_t double_flag_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf, size_t count,
+				 enum transparent_hugepage_flag enabled,
+				 enum transparent_hugepage_flag req_madv)
+{
+	if (!memcmp("always", buf,
+		    min(sizeof("always")-1, count))) {
+		set_bit(enabled, &transparent_hugepage_flags);
+		clear_bit(req_madv, &transparent_hugepage_flags);
+	} else if (!memcmp("madvise", buf,
+			   min(sizeof("madvise")-1, count))) {
+		clear_bit(enabled, &transparent_hugepage_flags);
+		set_bit(req_madv, &transparent_hugepage_flags);
+	} else if (!memcmp("never", buf,
+			   min(sizeof("never")-1, count))) {
+		clear_bit(enabled, &transparent_hugepage_flags);
+		clear_bit(req_madv, &transparent_hugepage_flags);
+	} else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t enabled_show(struct kobject *kobj,
+			    struct kobj_attribute *attr, char *buf)
+{
+	return double_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_FLAG,
+				TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
+}
+static ssize_t enabled_store(struct kobject *kobj,
+			     struct kobj_attribute *attr,
+			     const char *buf, size_t count)
+{
+	ssize_t ret;
+
+	ret = double_flag_store(kobj, attr, buf, count,
+				TRANSPARENT_HUGEPAGE_FLAG,
+				TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
+
+	if (ret > 0) {
+		int err = start_khugepaged();
+		if (err)
+			ret = err;
+	}
+
+	if (ret > 0 &&
+	    (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
+		      &transparent_hugepage_flags) ||
+	     test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+		      &transparent_hugepage_flags)))
+		set_recommended_min_free_kbytes();
+
+	return ret;
+}
+static struct kobj_attribute enabled_attr =
+	__ATTR(enabled, 0644, enabled_show, enabled_store);
+
+static ssize_t single_flag_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf,
+				enum transparent_hugepage_flag flag)
+{
+	if (test_bit(flag, &transparent_hugepage_flags))
+		return sprintf(buf, "[yes] no\n");
+	else
+		return sprintf(buf, "yes [no]\n");
+}
+static ssize_t single_flag_store(struct kobject *kobj,
+				 struct kobj_attribute *attr,
+				 const char *buf, size_t count,
+				 enum transparent_hugepage_flag flag)
+{
+	if (!memcmp("yes", buf,
+		    min(sizeof("yes")-1, count))) {
+		set_bit(flag, &transparent_hugepage_flags);
+	} else if (!memcmp("no", buf,
+			   min(sizeof("no")-1, count))) {
+		clear_bit(flag, &transparent_hugepage_flags);
+	} else
+		return -EINVAL;
+
+	return count;
+}
+
+/*
+ * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
+ * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
+ * memory just to allocate one more hugepage.
+ */
+static ssize_t defrag_show(struct kobject *kobj,
+			   struct kobj_attribute *attr, char *buf)
+{
+	return double_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+				TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
+}
+static ssize_t defrag_store(struct kobject *kobj,
+			    struct kobj_attribute *attr,
+			    const char *buf, size_t count)
+{
+	return double_flag_store(kobj, attr, buf, count,
+				 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+				 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
+}
+static struct kobj_attribute defrag_attr =
+	__ATTR(defrag, 0644, defrag_show, defrag_store);
+
+#ifdef CONFIG_DEBUG_VM
+static ssize_t debug_cow_show(struct kobject *kobj,
+				struct kobj_attribute *attr, char *buf)
+{
+	return single_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
+}
+static ssize_t debug_cow_store(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       const char *buf, size_t count)
+{
+	return single_flag_store(kobj, attr, buf, count,
+				 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
+}
+static struct kobj_attribute debug_cow_attr =
+	__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
+#endif /* CONFIG_DEBUG_VM */
+
+static struct attribute *hugepage_attr[] = {
+	&enabled_attr.attr,
+	&defrag_attr.attr,
+#ifdef CONFIG_DEBUG_VM
+	&debug_cow_attr.attr,
+#endif
+	NULL,
+};
+
+static struct attribute_group hugepage_attr_group = {
+	.attrs = hugepage_attr,
+};
+
+static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
+}
+
+static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
+					  struct kobj_attribute *attr,
+					  const char *buf, size_t count)
+{
+	unsigned long msecs;
+	int err;
+
+	err = strict_strtoul(buf, 10, &msecs);
+	if (err || msecs > UINT_MAX)
+		return -EINVAL;
+
+	khugepaged_scan_sleep_millisecs = msecs;
+	wake_up_interruptible(&khugepaged_wait);
+
+	return count;
+}
+static struct kobj_attribute scan_sleep_millisecs_attr =
+	__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
+	       scan_sleep_millisecs_store);
+
+static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
+					  struct kobj_attribute *attr,
+					  char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
+}
+
+static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
+					   struct kobj_attribute *attr,
+					   const char *buf, size_t count)
+{
+	unsigned long msecs;
+	int err;
+
+	err = strict_strtoul(buf, 10, &msecs);
+	if (err || msecs > UINT_MAX)
+		return -EINVAL;
+
+	khugepaged_alloc_sleep_millisecs = msecs;
+	wake_up_interruptible(&khugepaged_wait);
+
+	return count;
+}
+static struct kobj_attribute alloc_sleep_millisecs_attr =
+	__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
+	       alloc_sleep_millisecs_store);
+
+static ssize_t pages_to_scan_show(struct kobject *kobj,
+				  struct kobj_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
+}
+static ssize_t pages_to_scan_store(struct kobject *kobj,
+				   struct kobj_attribute *attr,
+				   const char *buf, size_t count)
+{
+	int err;
+	unsigned long pages;
+
+	err = strict_strtoul(buf, 10, &pages);
+	if (err || !pages || pages > UINT_MAX)
+		return -EINVAL;
+
+	khugepaged_pages_to_scan = pages;
+
+	return count;
+}
+static struct kobj_attribute pages_to_scan_attr =
+	__ATTR(pages_to_scan, 0644, pages_to_scan_show,
+	       pages_to_scan_store);
+
+static ssize_t pages_collapsed_show(struct kobject *kobj,
+				    struct kobj_attribute *attr,
+				    char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
+}
+static struct kobj_attribute pages_collapsed_attr =
+	__ATTR_RO(pages_collapsed);
+
+static ssize_t full_scans_show(struct kobject *kobj,
+			       struct kobj_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_full_scans);
+}
+static struct kobj_attribute full_scans_attr =
+	__ATTR_RO(full_scans);
+
+static ssize_t khugepaged_defrag_show(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buf)
+{
+	return single_flag_show(kobj, attr, buf,
+				TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+}
+static ssize_t khugepaged_defrag_store(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       const char *buf, size_t count)
+{
+	return single_flag_store(kobj, attr, buf, count,
+				 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
+}
+static struct kobj_attribute khugepaged_defrag_attr =
+	__ATTR(defrag, 0644, khugepaged_defrag_show,
+	       khugepaged_defrag_store);
+
+/*
+ * max_ptes_none controls if khugepaged should collapse hugepages over
+ * any unmapped ptes in turn potentially increasing the memory
+ * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
+ * reduce the available free memory in the system as it
+ * runs. Increasing max_ptes_none will instead potentially reduce the
+ * free memory in the system during the khugepaged scan.
+ */
+static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
+					     struct kobj_attribute *attr,
+					     char *buf)
+{
+	return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
+}
+static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
+					      struct kobj_attribute *attr,
+					      const char *buf, size_t count)
+{
+	int err;
+	unsigned long max_ptes_none;
+
+	err = strict_strtoul(buf, 10, &max_ptes_none);
+	if (err || max_ptes_none > HPAGE_PMD_NR-1)
+		return -EINVAL;
+
+	khugepaged_max_ptes_none = max_ptes_none;
+
+	return count;
+}
+static struct kobj_attribute khugepaged_max_ptes_none_attr =
+	__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
+	       khugepaged_max_ptes_none_store);
+
+static struct attribute *khugepaged_attr[] = {
+	&khugepaged_defrag_attr.attr,
+	&khugepaged_max_ptes_none_attr.attr,
+	&pages_to_scan_attr.attr,
+	&pages_collapsed_attr.attr,
+	&full_scans_attr.attr,
+	&scan_sleep_millisecs_attr.attr,
+	&alloc_sleep_millisecs_attr.attr,
+	NULL,
+};
+
+static struct attribute_group khugepaged_attr_group = {
+	.attrs = khugepaged_attr,
+	.name = "khugepaged",
+};
+#endif /* CONFIG_SYSFS */
+
+static int __init hugepage_init(void)
+{
+	int err;
+#ifdef CONFIG_SYSFS
+	static struct kobject *hugepage_kobj;
+#endif
+
+	err = -EINVAL;
+	if (!has_transparent_hugepage()) {
+		transparent_hugepage_flags = 0;
+		goto out;
+	}
+
+#ifdef CONFIG_SYSFS
+	err = -ENOMEM;
+	hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
+	if (unlikely(!hugepage_kobj)) {
+		printk(KERN_ERR "hugepage: failed kobject create\n");
+		goto out;
+	}
+
+	err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
+	if (err) {
+		printk(KERN_ERR "hugepage: failed register hugeage group\n");
+		goto out;
+	}
+
+	err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
+	if (err) {
+		printk(KERN_ERR "hugepage: failed register hugeage group\n");
+		goto out;
+	}
+#endif
+
+	err = khugepaged_slab_init();
+	if (err)
+		goto out;
+
+	err = mm_slots_hash_init();
+	if (err) {
+		khugepaged_slab_free();
+		goto out;
+	}
+
+	/*
+	 * By default disable transparent hugepages on smaller systems,
+	 * where the extra memory used could hurt more than TLB overhead
+	 * is likely to save.  The admin can still enable it through /sys.
+	 */
+	if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
+		transparent_hugepage_flags = 0;
+
+	start_khugepaged();
+
+	set_recommended_min_free_kbytes();
+
+out:
+	return err;
+}
+module_init(hugepage_init)
+
+static int __init setup_transparent_hugepage(char *str)
+{
+	int ret = 0;
+	if (!str)
+		goto out;
+	if (!strcmp(str, "always")) {
+		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
+			&transparent_hugepage_flags);
+		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+			  &transparent_hugepage_flags);
+		ret = 1;
+	} else if (!strcmp(str, "madvise")) {
+		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
+			  &transparent_hugepage_flags);
+		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+			&transparent_hugepage_flags);
+		ret = 1;
+	} else if (!strcmp(str, "never")) {
+		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
+			  &transparent_hugepage_flags);
+		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+			  &transparent_hugepage_flags);
+		ret = 1;
+	}
+out:
+	if (!ret)
+		printk(KERN_WARNING
+		       "transparent_hugepage= cannot parse, ignored\n");
+	return ret;
+}
+__setup("transparent_hugepage=", setup_transparent_hugepage);
+
+static void prepare_pmd_huge_pte(pgtable_t pgtable,
+				 struct mm_struct *mm)
+{
+	assert_spin_locked(&mm->page_table_lock);
+
+	/* FIFO */
+	if (!mm->pmd_huge_pte)
+		INIT_LIST_HEAD(&pgtable->lru);
+	else
+		list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
+	mm->pmd_huge_pte = pgtable;
+}
+
+static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+	if (likely(vma->vm_flags & VM_WRITE))
+		pmd = pmd_mkwrite(pmd);
+	return pmd;
+}
+
+static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
+					struct vm_area_struct *vma,
+					unsigned long haddr, pmd_t *pmd,
+					struct page *page)
+{
+	int ret = 0;
+	pgtable_t pgtable;
+
+	VM_BUG_ON(!PageCompound(page));
+	pgtable = pte_alloc_one(mm, haddr);
+	if (unlikely(!pgtable)) {
+		mem_cgroup_uncharge_page(page);
+		put_page(page);
+		return VM_FAULT_OOM;
+	}
+
+	clear_huge_page(page, haddr, HPAGE_PMD_NR);
+	__SetPageUptodate(page);
+
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_none(*pmd))) {
+		spin_unlock(&mm->page_table_lock);
+		mem_cgroup_uncharge_page(page);
+		put_page(page);
+		pte_free(mm, pgtable);
+	} else {
+		pmd_t entry;
+		entry = mk_pmd(page, vma->vm_page_prot);
+		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+		entry = pmd_mkhuge(entry);
+		/*
+		 * The spinlocking to take the lru_lock inside
+		 * page_add_new_anon_rmap() acts as a full memory
+		 * barrier to be sure clear_huge_page writes become
+		 * visible after the set_pmd_at() write.
+		 */
+		page_add_new_anon_rmap(page, vma, haddr);
+		set_pmd_at(mm, haddr, pmd, entry);
+		prepare_pmd_huge_pte(pgtable, mm);
+		add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+		spin_unlock(&mm->page_table_lock);
+	}
+
+	return ret;
+}
+
+static inline gfp_t alloc_hugepage_gfpmask(int defrag)
+{
+	return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
+}
+
+static inline struct page *alloc_hugepage_vma(int defrag,
+					      struct vm_area_struct *vma,
+					      unsigned long haddr)
+{
+	return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
+			       HPAGE_PMD_ORDER, vma, haddr);
+}
+
+#ifndef CONFIG_NUMA
+static inline struct page *alloc_hugepage(int defrag)
+{
+	return alloc_pages(alloc_hugepage_gfpmask(defrag),
+			   HPAGE_PMD_ORDER);
+}
+#endif
+
+int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+			       unsigned long address, pmd_t *pmd,
+			       unsigned int flags)
+{
+	struct page *page;
+	unsigned long haddr = address & HPAGE_PMD_MASK;
+	pte_t *pte;
+
+	if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
+		if (unlikely(anon_vma_prepare(vma)))
+			return VM_FAULT_OOM;
+		if (unlikely(khugepaged_enter(vma)))
+			return VM_FAULT_OOM;
+		page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					  vma, haddr);
+		if (unlikely(!page))
+			goto out;
+		if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
+			put_page(page);
+			goto out;
+		}
+
+		return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+	}
+out:
+	/*
+	 * Use __pte_alloc instead of pte_alloc_map, because we can't
+	 * run pte_offset_map on the pmd, if an huge pmd could
+	 * materialize from under us from a different thread.
+	 */
+	if (unlikely(__pte_alloc(mm, vma, pmd, address)))
+		return VM_FAULT_OOM;
+	/* if an huge pmd materialized from under us just retry later */
+	if (unlikely(pmd_trans_huge(*pmd)))
+		return 0;
+	/*
+	 * A regular pmd is established and it can't morph into a huge pmd
+	 * from under us anymore at this point because we hold the mmap_sem
+	 * read mode and khugepaged takes it in write mode. So now it's
+	 * safe to run pte_offset_map().
+	 */
+	pte = pte_offset_map(pmd, address);
+	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
+}
+
+int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+		  struct vm_area_struct *vma)
+{
+	struct page *src_page;
+	pmd_t pmd;
+	pgtable_t pgtable;
+	int ret;
+
+	ret = -ENOMEM;
+	pgtable = pte_alloc_one(dst_mm, addr);
+	if (unlikely(!pgtable))
+		goto out;
+
+	spin_lock(&dst_mm->page_table_lock);
+	spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
+
+	ret = -EAGAIN;
+	pmd = *src_pmd;
+	if (unlikely(!pmd_trans_huge(pmd))) {
+		pte_free(dst_mm, pgtable);
+		goto out_unlock;
+	}
+	if (unlikely(pmd_trans_splitting(pmd))) {
+		/* split huge page running from under us */
+		spin_unlock(&src_mm->page_table_lock);
+		spin_unlock(&dst_mm->page_table_lock);
+		pte_free(dst_mm, pgtable);
+
+		wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
+		goto out;
+	}
+	src_page = pmd_page(pmd);
+	VM_BUG_ON(!PageHead(src_page));
+	get_page(src_page);
+	page_dup_rmap(src_page);
+	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+
+	pmdp_set_wrprotect(src_mm, addr, src_pmd);
+	pmd = pmd_mkold(pmd_wrprotect(pmd));
+	set_pmd_at(dst_mm, addr, dst_pmd, pmd);
+	prepare_pmd_huge_pte(pgtable, dst_mm);
+
+	ret = 0;
+out_unlock:
+	spin_unlock(&src_mm->page_table_lock);
+	spin_unlock(&dst_mm->page_table_lock);
+out:
+	return ret;
+}
+
+/* no "address" argument so destroys page coloring of some arch */
+pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
+{
+	pgtable_t pgtable;
+
+	assert_spin_locked(&mm->page_table_lock);
+
+	/* FIFO */
+	pgtable = mm->pmd_huge_pte;
+	if (list_empty(&pgtable->lru))
+		mm->pmd_huge_pte = NULL;
+	else {
+		mm->pmd_huge_pte = list_entry(pgtable->lru.next,
+					      struct page, lru);
+		list_del(&pgtable->lru);
+	}
+	return pgtable;
+}
+
+static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
+					struct vm_area_struct *vma,
+					unsigned long address,
+					pmd_t *pmd, pmd_t orig_pmd,
+					struct page *page,
+					unsigned long haddr)
+{
+	pgtable_t pgtable;
+	pmd_t _pmd;
+	int ret = 0, i;
+	struct page **pages;
+
+	pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
+			GFP_KERNEL);
+	if (unlikely(!pages)) {
+		ret |= VM_FAULT_OOM;
+		goto out;
+	}
+
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+					  vma, address);
+		if (unlikely(!pages[i] ||
+			     mem_cgroup_newpage_charge(pages[i], mm,
+						       GFP_KERNEL))) {
+			if (pages[i])
+				put_page(pages[i]);
+			mem_cgroup_uncharge_start();
+			while (--i >= 0) {
+				mem_cgroup_uncharge_page(pages[i]);
+				put_page(pages[i]);
+			}
+			mem_cgroup_uncharge_end();
+			kfree(pages);
+			ret |= VM_FAULT_OOM;
+			goto out;
+		}
+	}
+
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		copy_user_highpage(pages[i], page + i,
+				   haddr + PAGE_SHIFT*i, vma);
+		__SetPageUptodate(pages[i]);
+		cond_resched();
+	}
+
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_same(*pmd, orig_pmd)))
+		goto out_free_pages;
+	VM_BUG_ON(!PageHead(page));
+
+	pmdp_clear_flush_notify(vma, haddr, pmd);
+	/* leave pmd empty until pte is filled */
+
+	pgtable = get_pmd_huge_pte(mm);
+	pmd_populate(mm, &_pmd, pgtable);
+
+	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+		pte_t *pte, entry;
+		entry = mk_pte(pages[i], vma->vm_page_prot);
+		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+		page_add_new_anon_rmap(pages[i], vma, haddr);
+		pte = pte_offset_map(&_pmd, haddr);
+		VM_BUG_ON(!pte_none(*pte));
+		set_pte_at(mm, haddr, pte, entry);
+		pte_unmap(pte);
+	}
+	kfree(pages);
+
+	mm->nr_ptes++;
+	smp_wmb(); /* make pte visible before pmd */
+	pmd_populate(mm, pmd, pgtable);
+	page_remove_rmap(page);
+	spin_unlock(&mm->page_table_lock);
+
+	ret |= VM_FAULT_WRITE;
+	put_page(page);
+
+out:
+	return ret;
+
+out_free_pages:
+	spin_unlock(&mm->page_table_lock);
+	mem_cgroup_uncharge_start();
+	for (i = 0; i < HPAGE_PMD_NR; i++) {
+		mem_cgroup_uncharge_page(pages[i]);
+		put_page(pages[i]);
+	}
+	mem_cgroup_uncharge_end();
+	kfree(pages);
+	goto out;
+}
+
+int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
+{
+	int ret = 0;
+	struct page *page, *new_page;
+	unsigned long haddr;
+
+	VM_BUG_ON(!vma->anon_vma);
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_same(*pmd, orig_pmd)))
+		goto out_unlock;
+
+	page = pmd_page(orig_pmd);
+	VM_BUG_ON(!PageCompound(page) || !PageHead(page));
+	haddr = address & HPAGE_PMD_MASK;
+	if (page_mapcount(page) == 1) {
+		pmd_t entry;
+		entry = pmd_mkyoung(orig_pmd);
+		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+		if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
+			update_mmu_cache(vma, address, entry);
+		ret |= VM_FAULT_WRITE;
+		goto out_unlock;
+	}
+	get_page(page);
+	spin_unlock(&mm->page_table_lock);
+
+	if (transparent_hugepage_enabled(vma) &&
+	    !transparent_hugepage_debug_cow())
+		new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+					      vma, haddr);
+	else
+		new_page = NULL;
+
+	if (unlikely(!new_page)) {
+		ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+						   pmd, orig_pmd, page, haddr);
+		put_page(page);
+		goto out;
+	}
+
+	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+		put_page(new_page);
+		put_page(page);
+		ret |= VM_FAULT_OOM;
+		goto out;
+	}
+
+	copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
+	__SetPageUptodate(new_page);
+
+	spin_lock(&mm->page_table_lock);
+	put_page(page);
+	if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+		mem_cgroup_uncharge_page(new_page);
+		put_page(new_page);
+	} else {
+		pmd_t entry;
+		VM_BUG_ON(!PageHead(page));
+		entry = mk_pmd(new_page, vma->vm_page_prot);
+		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+		entry = pmd_mkhuge(entry);
+		pmdp_clear_flush_notify(vma, haddr, pmd);
+		page_add_new_anon_rmap(new_page, vma, haddr);
+		set_pmd_at(mm, haddr, pmd, entry);
+		update_mmu_cache(vma, address, entry);
+		page_remove_rmap(page);
+		put_page(page);
+		ret |= VM_FAULT_WRITE;
+	}
+out_unlock:
+	spin_unlock(&mm->page_table_lock);
+out:
+	return ret;
+}
+
+struct page *follow_trans_huge_pmd(struct mm_struct *mm,
+				   unsigned long addr,
+				   pmd_t *pmd,
+				   unsigned int flags)
+{
+	struct page *page = NULL;
+
+	assert_spin_locked(&mm->page_table_lock);
+
+	if (flags & FOLL_WRITE && !pmd_write(*pmd))
+		goto out;
+
+	page = pmd_page(*pmd);
+	VM_BUG_ON(!PageHead(page));
+	if (flags & FOLL_TOUCH) {
+		pmd_t _pmd;
+		/*
+		 * We should set the dirty bit only for FOLL_WRITE but
+		 * for now the dirty bit in the pmd is meaningless.
+		 * And if the dirty bit will become meaningful and
+		 * we'll only set it with FOLL_WRITE, an atomic
+		 * set_bit will be required on the pmd to set the
+		 * young bit, instead of the current set_pmd_at.
+		 */
+		_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+		set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
+	}
+	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
+	VM_BUG_ON(!PageCompound(page));
+	if (flags & FOLL_GET)
+		get_page(page);
+
+out:
+	return page;
+}
+
+int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		 pmd_t *pmd)
+{
+	int ret = 0;
+
+	spin_lock(&tlb->mm->page_table_lock);
+	if (likely(pmd_trans_huge(*pmd))) {
+		if (unlikely(pmd_trans_splitting(*pmd))) {
+			spin_unlock(&tlb->mm->page_table_lock);
+			wait_split_huge_page(vma->anon_vma,
+					     pmd);
+		} else {
+			struct page *page;
+			pgtable_t pgtable;
+			pgtable = get_pmd_huge_pte(tlb->mm);
+			page = pmd_page(*pmd);
+			pmd_clear(pmd);
+			page_remove_rmap(page);
+			VM_BUG_ON(page_mapcount(page) < 0);
+			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+			VM_BUG_ON(!PageHead(page));
+			spin_unlock(&tlb->mm->page_table_lock);
+			tlb_remove_page(tlb, page);
+			pte_free(tlb->mm, pgtable);
+			ret = 1;
+		}
+	} else
+		spin_unlock(&tlb->mm->page_table_lock);
+
+	return ret;
+}
+
+int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+		unsigned long addr, unsigned long end,
+		unsigned char *vec)
+{
+	int ret = 0;
+
+	spin_lock(&vma->vm_mm->page_table_lock);
+	if (likely(pmd_trans_huge(*pmd))) {
+		ret = !pmd_trans_splitting(*pmd);
+		spin_unlock(&vma->vm_mm->page_table_lock);
+		if (unlikely(!ret))
+			wait_split_huge_page(vma->anon_vma, pmd);
+		else {
+			/*
+			 * All logical pages in the range are present
+			 * if backed by a huge page.
+			 */
+			memset(vec, 1, (end - addr) >> PAGE_SHIFT);
+		}
+	} else
+		spin_unlock(&vma->vm_mm->page_table_lock);
+
+	return ret;
+}
+
+int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+		unsigned long addr, pgprot_t newprot)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int ret = 0;
+
+	spin_lock(&mm->page_table_lock);
+	if (likely(pmd_trans_huge(*pmd))) {
+		if (unlikely(pmd_trans_splitting(*pmd))) {
+			spin_unlock(&mm->page_table_lock);
+			wait_split_huge_page(vma->anon_vma, pmd);
+		} else {
+			pmd_t entry;
+
+			entry = pmdp_get_and_clear(mm, addr, pmd);
+			entry = pmd_modify(entry, newprot);
+			set_pmd_at(mm, addr, pmd, entry);
+			spin_unlock(&vma->vm_mm->page_table_lock);
+			flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
+			ret = 1;
+		}
+	} else
+		spin_unlock(&vma->vm_mm->page_table_lock);
+
+	return ret;
+}
+
+pmd_t *page_check_address_pmd(struct page *page,
+			      struct mm_struct *mm,
+			      unsigned long address,
+			      enum page_check_address_pmd_flag flag)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd, *ret = NULL;
+
+	if (address & ~HPAGE_PMD_MASK)
+		goto out;
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, address);
+	if (pmd_none(*pmd))
+		goto out;
+	if (pmd_page(*pmd) != page)
+		goto out;
+	/*
+	 * split_vma() may create temporary aliased mappings. There is
+	 * no risk as long as all huge pmd are found and have their
+	 * splitting bit set before __split_huge_page_refcount
+	 * runs. Finding the same huge pmd more than once during the
+	 * same rmap walk is not a problem.
+	 */
+	if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
+	    pmd_trans_splitting(*pmd))
+		goto out;
+	if (pmd_trans_huge(*pmd)) {
+		VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
+			  !pmd_trans_splitting(*pmd));
+		ret = pmd;
+	}
+out:
+	return ret;
+}
+
+static int __split_huge_page_splitting(struct page *page,
+				       struct vm_area_struct *vma,
+				       unsigned long address)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	pmd_t *pmd;
+	int ret = 0;
+
+	spin_lock(&mm->page_table_lock);
+	pmd = page_check_address_pmd(page, mm, address,
+				     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
+	if (pmd) {
+		/*
+		 * We can't temporarily set the pmd to null in order
+		 * to split it, the pmd must remain marked huge at all
+		 * times or the VM won't take the pmd_trans_huge paths
+		 * and it won't wait on the anon_vma->root->lock to
+		 * serialize against split_huge_page*.
+		 */
+		pmdp_splitting_flush_notify(vma, address, pmd);
+		ret = 1;
+	}
+	spin_unlock(&mm->page_table_lock);
+
+	return ret;
+}
+
+static void __split_huge_page_refcount(struct page *page)
+{
+	int i;
+	unsigned long head_index = page->index;
+	struct zone *zone = page_zone(page);
+	int zonestat;
+
+	/* prevent PageLRU to go away from under us, and freeze lru stats */
+	spin_lock_irq(&zone->lru_lock);
+	compound_lock(page);
+
+	for (i = 1; i < HPAGE_PMD_NR; i++) {
+		struct page *page_tail = page + i;
+
+		/* tail_page->_count cannot change */
+		atomic_sub(atomic_read(&page_tail->_count), &page->_count);
+		BUG_ON(page_count(page) <= 0);
+		atomic_add(page_mapcount(page) + 1, &page_tail->_count);
+		BUG_ON(atomic_read(&page_tail->_count) <= 0);
+
+		/* after clearing PageTail the gup refcount can be released */
+		smp_mb();
+
+		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+		page_tail->flags |= (page->flags &
+				     ((1L << PG_referenced) |
+				      (1L << PG_swapbacked) |
+				      (1L << PG_mlocked) |
+				      (1L << PG_uptodate)));
+		page_tail->flags |= (1L << PG_dirty);
+
+		/*
+		 * 1) clear PageTail before overwriting first_page
+		 * 2) clear PageTail before clearing PageHead for VM_BUG_ON
+		 */
+		smp_wmb();
+
+		/*
+		 * __split_huge_page_splitting() already set the
+		 * splitting bit in all pmd that could map this
+		 * hugepage, that will ensure no CPU can alter the
+		 * mapcount on the head page. The mapcount is only
+		 * accounted in the head page and it has to be
+		 * transferred to all tail pages in the below code. So
+		 * for this code to be safe, the split the mapcount
+		 * can't change. But that doesn't mean userland can't
+		 * keep changing and reading the page contents while
+		 * we transfer the mapcount, so the pmd splitting
+		 * status is achieved setting a reserved bit in the
+		 * pmd, not by clearing the present bit.
+		*/
+		BUG_ON(page_mapcount(page_tail));
+		page_tail->_mapcount = page->_mapcount;
+
+		BUG_ON(page_tail->mapping);
+		page_tail->mapping = page->mapping;
+
+		page_tail->index = ++head_index;
+
+		BUG_ON(!PageAnon(page_tail));
+		BUG_ON(!PageUptodate(page_tail));
+		BUG_ON(!PageDirty(page_tail));
+		BUG_ON(!PageSwapBacked(page_tail));
+
+		lru_add_page_tail(zone, page, page_tail);
+	}
+
+	__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+	__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
+
+	/*
+	 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
+	 * so adjust those appropriately if this page is on the LRU.
+	 */
+	if (PageLRU(page)) {
+		zonestat = NR_LRU_BASE + page_lru(page);
+		__mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
+	}
+
+	ClearPageCompound(page);
+	compound_unlock(page);
+	spin_unlock_irq(&zone->lru_lock);
+
+	for (i = 1; i < HPAGE_PMD_NR; i++) {
+		struct page *page_tail = page + i;
+		BUG_ON(page_count(page_tail) <= 0);
+		/*
+		 * Tail pages may be freed if there wasn't any mapping
+		 * like if add_to_swap() is running on a lru page that
+		 * had its mapping zapped. And freeing these pages
+		 * requires taking the lru_lock so we do the put_page
+		 * of the tail pages after the split is complete.
+		 */
+		put_page(page_tail);
+	}
+
+	/*
+	 * Only the head page (now become a regular page) is required
+	 * to be pinned by the caller.
+	 */
+	BUG_ON(page_count(page) <= 0);
+}
+
+static int __split_huge_page_map(struct page *page,
+				 struct vm_area_struct *vma,
+				 unsigned long address)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	pmd_t *pmd, _pmd;
+	int ret = 0, i;
+	pgtable_t pgtable;
+	unsigned long haddr;
+
+	spin_lock(&mm->page_table_lock);
+	pmd = page_check_address_pmd(page, mm, address,
+				     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
+	if (pmd) {
+		pgtable = get_pmd_huge_pte(mm);
+		pmd_populate(mm, &_pmd, pgtable);
+
+		for (i = 0, haddr = address; i < HPAGE_PMD_NR;
+		     i++, haddr += PAGE_SIZE) {
+			pte_t *pte, entry;
+			BUG_ON(PageCompound(page+i));
+			entry = mk_pte(page + i, vma->vm_page_prot);
+			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+			if (!pmd_write(*pmd))
+				entry = pte_wrprotect(entry);
+			else
+				BUG_ON(page_mapcount(page) != 1);
+			if (!pmd_young(*pmd))
+				entry = pte_mkold(entry);
+			pte = pte_offset_map(&_pmd, haddr);
+			BUG_ON(!pte_none(*pte));
+			set_pte_at(mm, haddr, pte, entry);
+			pte_unmap(pte);
+		}
+
+		mm->nr_ptes++;
+		smp_wmb(); /* make pte visible before pmd */
+		/*
+		 * Up to this point the pmd is present and huge and
+		 * userland has the whole access to the hugepage
+		 * during the split (which happens in place). If we
+		 * overwrite the pmd with the not-huge version
+		 * pointing to the pte here (which of course we could
+		 * if all CPUs were bug free), userland could trigger
+		 * a small page size TLB miss on the small sized TLB
+		 * while the hugepage TLB entry is still established
+		 * in the huge TLB. Some CPU doesn't like that. See
+		 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
+		 * Erratum 383 on page 93. Intel should be safe but is
+		 * also warns that it's only safe if the permission
+		 * and cache attributes of the two entries loaded in
+		 * the two TLB is identical (which should be the case
+		 * here). But it is generally safer to never allow
+		 * small and huge TLB entries for the same virtual
+		 * address to be loaded simultaneously. So instead of
+		 * doing "pmd_populate(); flush_tlb_range();" we first
+		 * mark the current pmd notpresent (atomically because
+		 * here the pmd_trans_huge and pmd_trans_splitting
+		 * must remain set at all times on the pmd until the
+		 * split is complete for this pmd), then we flush the
+		 * SMP TLB and finally we write the non-huge version
+		 * of the pmd entry with pmd_populate.
+		 */
+		set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+		pmd_populate(mm, pmd, pgtable);
+		ret = 1;
+	}
+	spin_unlock(&mm->page_table_lock);
+
+	return ret;
+}
+
+/* must be called with anon_vma->root->lock hold */
+static void __split_huge_page(struct page *page,
+			      struct anon_vma *anon_vma)
+{
+	int mapcount, mapcount2;
+	struct anon_vma_chain *avc;
+
+	BUG_ON(!PageHead(page));
+	BUG_ON(PageTail(page));
+
+	mapcount = 0;
+	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+		struct vm_area_struct *vma = avc->vma;
+		unsigned long addr = vma_address(page, vma);
+		BUG_ON(is_vma_temporary_stack(vma));
+		if (addr == -EFAULT)
+			continue;
+		mapcount += __split_huge_page_splitting(page, vma, addr);
+	}
+	/*
+	 * It is critical that new vmas are added to the tail of the
+	 * anon_vma list. This guarantes that if copy_huge_pmd() runs
+	 * and establishes a child pmd before
+	 * __split_huge_page_splitting() freezes the parent pmd (so if
+	 * we fail to prevent copy_huge_pmd() from running until the
+	 * whole __split_huge_page() is complete), we will still see
+	 * the newly established pmd of the child later during the
+	 * walk, to be able to set it as pmd_trans_splitting too.
+	 */
+	if (mapcount != page_mapcount(page))
+		printk(KERN_ERR "mapcount %d page_mapcount %d\n",
+		       mapcount, page_mapcount(page));
+	BUG_ON(mapcount != page_mapcount(page));
+
+	__split_huge_page_refcount(page);
+
+	mapcount2 = 0;
+	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+		struct vm_area_struct *vma = avc->vma;
+		unsigned long addr = vma_address(page, vma);
+		BUG_ON(is_vma_temporary_stack(vma));
+		if (addr == -EFAULT)
+			continue;
+		mapcount2 += __split_huge_page_map(page, vma, addr);
+	}
+	if (mapcount != mapcount2)
+		printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
+		       mapcount, mapcount2, page_mapcount(page));
+	BUG_ON(mapcount != mapcount2);
+}
+
+int split_huge_page(struct page *page)
+{
+	struct anon_vma *anon_vma;
+	int ret = 1;
+
+	BUG_ON(!PageAnon(page));
+	anon_vma = page_lock_anon_vma(page);
+	if (!anon_vma)
+		goto out;
+	ret = 0;
+	if (!PageCompound(page))
+		goto out_unlock;
+
+	BUG_ON(!PageSwapBacked(page));
+	__split_huge_page(page, anon_vma);
+
+	BUG_ON(PageCompound(page));
+out_unlock:
+	page_unlock_anon_vma(anon_vma);
+out:
+	return ret;
+}
+
+int hugepage_madvise(struct vm_area_struct *vma,
+		     unsigned long *vm_flags, int advice)
+{
+	switch (advice) {
+	case MADV_HUGEPAGE:
+		/*
+		 * Be somewhat over-protective like KSM for now!
+		 */
+		if (*vm_flags & (VM_HUGEPAGE |
+				 VM_SHARED   | VM_MAYSHARE   |
+				 VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
+				 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+				 VM_MIXEDMAP | VM_SAO))
+			return -EINVAL;
+		*vm_flags &= ~VM_NOHUGEPAGE;
+		*vm_flags |= VM_HUGEPAGE;
+		/*
+		 * If the vma become good for khugepaged to scan,
+		 * register it here without waiting a page fault that
+		 * may not happen any time soon.
+		 */
+		if (unlikely(khugepaged_enter_vma_merge(vma)))
+			return -ENOMEM;
+		break;
+	case MADV_NOHUGEPAGE:
+		/*
+		 * Be somewhat over-protective like KSM for now!
+		 */
+		if (*vm_flags & (VM_NOHUGEPAGE |
+				 VM_SHARED   | VM_MAYSHARE   |
+				 VM_PFNMAP   | VM_IO      | VM_DONTEXPAND |
+				 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
+				 VM_MIXEDMAP | VM_SAO))
+			return -EINVAL;
+		*vm_flags &= ~VM_HUGEPAGE;
+		*vm_flags |= VM_NOHUGEPAGE;
+		/*
+		 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
+		 * this vma even if we leave the mm registered in khugepaged if
+		 * it got registered before VM_NOHUGEPAGE was set.
+		 */
+		break;
+	}
+
+	return 0;
+}
+
+static int __init khugepaged_slab_init(void)
+{
+	mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
+					  sizeof(struct mm_slot),
+					  __alignof__(struct mm_slot), 0, NULL);
+	if (!mm_slot_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void __init khugepaged_slab_free(void)
+{
+	kmem_cache_destroy(mm_slot_cache);
+	mm_slot_cache = NULL;
+}
+
+static inline struct mm_slot *alloc_mm_slot(void)
+{
+	if (!mm_slot_cache)	/* initialization failed */
+		return NULL;
+	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
+}
+
+static inline void free_mm_slot(struct mm_slot *mm_slot)
+{
+	kmem_cache_free(mm_slot_cache, mm_slot);
+}
+
+static int __init mm_slots_hash_init(void)
+{
+	mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
+				GFP_KERNEL);
+	if (!mm_slots_hash)
+		return -ENOMEM;
+	return 0;
+}
+
+#if 0
+static void __init mm_slots_hash_free(void)
+{
+	kfree(mm_slots_hash);
+	mm_slots_hash = NULL;
+}
+#endif
+
+static struct mm_slot *get_mm_slot(struct mm_struct *mm)
+{
+	struct mm_slot *mm_slot;
+	struct hlist_head *bucket;
+	struct hlist_node *node;
+
+	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+				% MM_SLOTS_HASH_HEADS];
+	hlist_for_each_entry(mm_slot, node, bucket, hash) {
+		if (mm == mm_slot->mm)
+			return mm_slot;
+	}
+	return NULL;
+}
+
+static void insert_to_mm_slots_hash(struct mm_struct *mm,
+				    struct mm_slot *mm_slot)
+{
+	struct hlist_head *bucket;
+
+	bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
+				% MM_SLOTS_HASH_HEADS];
+	mm_slot->mm = mm;
+	hlist_add_head(&mm_slot->hash, bucket);
+}
+
+static inline int khugepaged_test_exit(struct mm_struct *mm)
+{
+	return atomic_read(&mm->mm_users) == 0;
+}
+
+int __khugepaged_enter(struct mm_struct *mm)
+{
+	struct mm_slot *mm_slot;
+	int wakeup;
+
+	mm_slot = alloc_mm_slot();
+	if (!mm_slot)
+		return -ENOMEM;
+
+	/* __khugepaged_exit() must not run from under us */
+	VM_BUG_ON(khugepaged_test_exit(mm));
+	if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
+		free_mm_slot(mm_slot);
+		return 0;
+	}
+
+	spin_lock(&khugepaged_mm_lock);
+	insert_to_mm_slots_hash(mm, mm_slot);
+	/*
+	 * Insert just behind the scanning cursor, to let the area settle
+	 * down a little.
+	 */
+	wakeup = list_empty(&khugepaged_scan.mm_head);
+	list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
+	spin_unlock(&khugepaged_mm_lock);
+
+	atomic_inc(&mm->mm_count);
+	if (wakeup)
+		wake_up_interruptible(&khugepaged_wait);
+
+	return 0;
+}
+
+int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
+{
+	unsigned long hstart, hend;
+	if (!vma->anon_vma)
+		/*
+		 * Not yet faulted in so we will register later in the
+		 * page fault if needed.
+		 */
+		return 0;
+	if (vma->vm_file || vma->vm_ops)
+		/* khugepaged not yet working on file or special mappings */
+		return 0;
+	VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+	hend = vma->vm_end & HPAGE_PMD_MASK;
+	if (hstart < hend)
+		return khugepaged_enter(vma);
+	return 0;
+}
+
+void __khugepaged_exit(struct mm_struct *mm)
+{
+	struct mm_slot *mm_slot;
+	int free = 0;
+
+	spin_lock(&khugepaged_mm_lock);
+	mm_slot = get_mm_slot(mm);
+	if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
+		hlist_del(&mm_slot->hash);
+		list_del(&mm_slot->mm_node);
+		free = 1;
+	}
+
+	if (free) {
+		spin_unlock(&khugepaged_mm_lock);
+		clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
+		free_mm_slot(mm_slot);
+		mmdrop(mm);
+	} else if (mm_slot) {
+		spin_unlock(&khugepaged_mm_lock);
+		/*
+		 * This is required to serialize against
+		 * khugepaged_test_exit() (which is guaranteed to run
+		 * under mmap sem read mode). Stop here (after we
+		 * return all pagetables will be destroyed) until
+		 * khugepaged has finished working on the pagetables
+		 * under the mmap_sem.
+		 */
+		down_write(&mm->mmap_sem);
+		up_write(&mm->mmap_sem);
+	} else
+		spin_unlock(&khugepaged_mm_lock);
+}
+
+static void release_pte_page(struct page *page)
+{
+	/* 0 stands for page_is_file_cache(page) == false */
+	dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
+	unlock_page(page);
+	putback_lru_page(page);
+}
+
+static void release_pte_pages(pte_t *pte, pte_t *_pte)
+{
+	while (--_pte >= pte) {
+		pte_t pteval = *_pte;
+		if (!pte_none(pteval))
+			release_pte_page(pte_page(pteval));
+	}
+}
+
+static void release_all_pte_pages(pte_t *pte)
+{
+	release_pte_pages(pte, pte + HPAGE_PMD_NR);
+}
+
+static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+					unsigned long address,
+					pte_t *pte)
+{
+	struct page *page;
+	pte_t *_pte;
+	int referenced = 0, isolated = 0, none = 0;
+	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
+	     _pte++, address += PAGE_SIZE) {
+		pte_t pteval = *_pte;
+		if (pte_none(pteval)) {
+			if (++none <= khugepaged_max_ptes_none)
+				continue;
+			else {
+				release_pte_pages(pte, _pte);
+				goto out;
+			}
+		}
+		if (!pte_present(pteval) || !pte_write(pteval)) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		page = vm_normal_page(vma, address, pteval);
+		if (unlikely(!page)) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		VM_BUG_ON(PageCompound(page));
+		BUG_ON(!PageAnon(page));
+		VM_BUG_ON(!PageSwapBacked(page));
+
+		/* cannot use mapcount: can't collapse if there's a gup pin */
+		if (page_count(page) != 1) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		/*
+		 * We can do it before isolate_lru_page because the
+		 * page can't be freed from under us. NOTE: PG_lock
+		 * is needed to serialize against split_huge_page
+		 * when invoked from the VM.
+		 */
+		if (!trylock_page(page)) {
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		/*
+		 * Isolate the page to avoid collapsing an hugepage
+		 * currently in use by the VM.
+		 */
+		if (isolate_lru_page(page)) {
+			unlock_page(page);
+			release_pte_pages(pte, _pte);
+			goto out;
+		}
+		/* 0 stands for page_is_file_cache(page) == false */
+		inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
+		VM_BUG_ON(!PageLocked(page));
+		VM_BUG_ON(PageLRU(page));
+
+		/* If there is no mapped pte young don't collapse the page */
+		if (pte_young(pteval) || PageReferenced(page) ||
+		    mmu_notifier_test_young(vma->vm_mm, address))
+			referenced = 1;
+	}
+	if (unlikely(!referenced))
+		release_all_pte_pages(pte);
+	else
+		isolated = 1;
+out:
+	return isolated;
+}
+
+static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
+				      struct vm_area_struct *vma,
+				      unsigned long address,
+				      spinlock_t *ptl)
+{
+	pte_t *_pte;
+	for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
+		pte_t pteval = *_pte;
+		struct page *src_page;
+
+		if (pte_none(pteval)) {
+			clear_user_highpage(page, address);
+			add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
+		} else {
+			src_page = pte_page(pteval);
+			copy_user_highpage(page, src_page, address, vma);
+			VM_BUG_ON(page_mapcount(src_page) != 1);
+			VM_BUG_ON(page_count(src_page) != 2);
+			release_pte_page(src_page);
+			/*
+			 * ptl mostly unnecessary, but preempt has to
+			 * be disabled to update the per-cpu stats
+			 * inside page_remove_rmap().
+			 */
+			spin_lock(ptl);
+			/*
+			 * paravirt calls inside pte_clear here are
+			 * superfluous.
+			 */
+			pte_clear(vma->vm_mm, address, _pte);
+			page_remove_rmap(src_page);
+			spin_unlock(ptl);
+			free_page_and_swap_cache(src_page);
+		}
+
+		address += PAGE_SIZE;
+		page++;
+	}
+}
+
+static void collapse_huge_page(struct mm_struct *mm,
+			       unsigned long address,
+			       struct page **hpage,
+			       struct vm_area_struct *vma)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd, _pmd;
+	pte_t *pte;
+	pgtable_t pgtable;
+	struct page *new_page;
+	spinlock_t *ptl;
+	int isolated;
+	unsigned long hstart, hend;
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+#ifndef CONFIG_NUMA
+	VM_BUG_ON(!*hpage);
+	new_page = *hpage;
+#else
+	VM_BUG_ON(*hpage);
+	/*
+	 * Allocate the page while the vma is still valid and under
+	 * the mmap_sem read mode so there is no memory allocation
+	 * later when we take the mmap_sem in write mode. This is more
+	 * friendly behavior (OTOH it may actually hide bugs) to
+	 * filesystems in userland with daemons allocating memory in
+	 * the userland I/O paths.  Allocating memory with the
+	 * mmap_sem in read mode is good idea also to allow greater
+	 * scalability.
+	 */
+	new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
+	if (unlikely(!new_page)) {
+		up_read(&mm->mmap_sem);
+		*hpage = ERR_PTR(-ENOMEM);
+		return;
+	}
+#endif
+	if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+		up_read(&mm->mmap_sem);
+		put_page(new_page);
+		return;
+	}
+
+	/* after allocating the hugepage upgrade to mmap_sem write mode */
+	up_read(&mm->mmap_sem);
+
+	/*
+	 * Prevent all access to pagetables with the exception of
+	 * gup_fast later hanlded by the ptep_clear_flush and the VM
+	 * handled by the anon_vma lock + PG_lock.
+	 */
+	down_write(&mm->mmap_sem);
+	if (unlikely(khugepaged_test_exit(mm)))
+		goto out;
+
+	vma = find_vma(mm, address);
+	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+	hend = vma->vm_end & HPAGE_PMD_MASK;
+	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
+		goto out;
+
+	if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
+	    (vma->vm_flags & VM_NOHUGEPAGE))
+		goto out;
+
+	/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
+	if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+		goto out;
+	VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, address);
+	/* pmd can't go away or become huge under us */
+	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+		goto out;
+
+	anon_vma_lock(vma->anon_vma);
+
+	pte = pte_offset_map(pmd, address);
+	ptl = pte_lockptr(mm, pmd);
+
+	spin_lock(&mm->page_table_lock); /* probably unnecessary */
+	/*
+	 * After this gup_fast can't run anymore. This also removes
+	 * any huge TLB entry from the CPU so we won't allow
+	 * huge and small TLB entries for the same virtual address
+	 * to avoid the risk of CPU bugs in that area.
+	 */
+	_pmd = pmdp_clear_flush_notify(vma, address, pmd);
+	spin_unlock(&mm->page_table_lock);
+
+	spin_lock(ptl);
+	isolated = __collapse_huge_page_isolate(vma, address, pte);
+	spin_unlock(ptl);
+	pte_unmap(pte);
+
+	if (unlikely(!isolated)) {
+		spin_lock(&mm->page_table_lock);
+		BUG_ON(!pmd_none(*pmd));
+		set_pmd_at(mm, address, pmd, _pmd);
+		spin_unlock(&mm->page_table_lock);
+		anon_vma_unlock(vma->anon_vma);
+		mem_cgroup_uncharge_page(new_page);
+		goto out;
+	}
+
+	/*
+	 * All pages are isolated and locked so anon_vma rmap
+	 * can't run anymore.
+	 */
+	anon_vma_unlock(vma->anon_vma);
+
+	__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
+	__SetPageUptodate(new_page);
+	pgtable = pmd_pgtable(_pmd);
+	VM_BUG_ON(page_count(pgtable) != 1);
+	VM_BUG_ON(page_mapcount(pgtable) != 0);
+
+	_pmd = mk_pmd(new_page, vma->vm_page_prot);
+	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
+	_pmd = pmd_mkhuge(_pmd);
+
+	/*
+	 * spin_lock() below is not the equivalent of smp_wmb(), so
+	 * this is needed to avoid the copy_huge_page writes to become
+	 * visible after the set_pmd_at() write.
+	 */
+	smp_wmb();
+
+	spin_lock(&mm->page_table_lock);
+	BUG_ON(!pmd_none(*pmd));
+	page_add_new_anon_rmap(new_page, vma, address);
+	set_pmd_at(mm, address, pmd, _pmd);
+	update_mmu_cache(vma, address, entry);
+	prepare_pmd_huge_pte(pgtable, mm);
+	mm->nr_ptes--;
+	spin_unlock(&mm->page_table_lock);
+
+#ifndef CONFIG_NUMA
+	*hpage = NULL;
+#endif
+	khugepaged_pages_collapsed++;
+out_up_write:
+	up_write(&mm->mmap_sem);
+	return;
+
+out:
+#ifdef CONFIG_NUMA
+	put_page(new_page);
+#endif
+	goto out_up_write;
+}
+
+static int khugepaged_scan_pmd(struct mm_struct *mm,
+			       struct vm_area_struct *vma,
+			       unsigned long address,
+			       struct page **hpage)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte, *_pte;
+	int ret = 0, referenced = 0, none = 0;
+	struct page *page;
+	unsigned long _address;
+	spinlock_t *ptl;
+
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		goto out;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		goto out;
+
+	pmd = pmd_offset(pud, address);
+	if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
+		goto out;
+
+	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+	for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
+	     _pte++, _address += PAGE_SIZE) {
+		pte_t pteval = *_pte;
+		if (pte_none(pteval)) {
+			if (++none <= khugepaged_max_ptes_none)
+				continue;
+			else
+				goto out_unmap;
+		}
+		if (!pte_present(pteval) || !pte_write(pteval))
+			goto out_unmap;
+		page = vm_normal_page(vma, _address, pteval);
+		if (unlikely(!page))
+			goto out_unmap;
+		VM_BUG_ON(PageCompound(page));
+		if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
+			goto out_unmap;
+		/* cannot use mapcount: can't collapse if there's a gup pin */
+		if (page_count(page) != 1)
+			goto out_unmap;
+		if (pte_young(pteval) || PageReferenced(page) ||
+		    mmu_notifier_test_young(vma->vm_mm, address))
+			referenced = 1;
+	}
+	if (referenced)
+		ret = 1;
+out_unmap:
+	pte_unmap_unlock(pte, ptl);
+	if (ret)
+		/* collapse_huge_page will return with the mmap_sem released */
+		collapse_huge_page(mm, address, hpage, vma);
+out:
+	return ret;
+}
+
+static void collect_mm_slot(struct mm_slot *mm_slot)
+{
+	struct mm_struct *mm = mm_slot->mm;
+
+	VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+
+	if (khugepaged_test_exit(mm)) {
+		/* free mm_slot */
+		hlist_del(&mm_slot->hash);
+		list_del(&mm_slot->mm_node);
+
+		/*
+		 * Not strictly needed because the mm exited already.
+		 *
+		 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
+		 */
+
+		/* khugepaged_mm_lock actually not necessary for the below */
+		free_mm_slot(mm_slot);
+		mmdrop(mm);
+	}
+}
+
+static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
+					    struct page **hpage)
+{
+	struct mm_slot *mm_slot;
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	int progress = 0;
+
+	VM_BUG_ON(!pages);
+	VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+
+	if (khugepaged_scan.mm_slot)
+		mm_slot = khugepaged_scan.mm_slot;
+	else {
+		mm_slot = list_entry(khugepaged_scan.mm_head.next,
+				     struct mm_slot, mm_node);
+		khugepaged_scan.address = 0;
+		khugepaged_scan.mm_slot = mm_slot;
+	}
+	spin_unlock(&khugepaged_mm_lock);
+
+	mm = mm_slot->mm;
+	down_read(&mm->mmap_sem);
+	if (unlikely(khugepaged_test_exit(mm)))
+		vma = NULL;
+	else
+		vma = find_vma(mm, khugepaged_scan.address);
+
+	progress++;
+	for (; vma; vma = vma->vm_next) {
+		unsigned long hstart, hend;
+
+		cond_resched();
+		if (unlikely(khugepaged_test_exit(mm))) {
+			progress++;
+			break;
+		}
+
+		if ((!(vma->vm_flags & VM_HUGEPAGE) &&
+		     !khugepaged_always()) ||
+		    (vma->vm_flags & VM_NOHUGEPAGE)) {
+			progress++;
+			continue;
+		}
+
+		/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
+		if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
+			khugepaged_scan.address = vma->vm_end;
+			progress++;
+			continue;
+		}
+		VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
+
+		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+		hend = vma->vm_end & HPAGE_PMD_MASK;
+		if (hstart >= hend) {
+			progress++;
+			continue;
+		}
+		if (khugepaged_scan.address < hstart)
+			khugepaged_scan.address = hstart;
+		if (khugepaged_scan.address > hend) {
+			khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
+			progress++;
+			continue;
+		}
+		BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+
+		while (khugepaged_scan.address < hend) {
+			int ret;
+			cond_resched();
+			if (unlikely(khugepaged_test_exit(mm)))
+				goto breakouterloop;
+
+			VM_BUG_ON(khugepaged_scan.address < hstart ||
+				  khugepaged_scan.address + HPAGE_PMD_SIZE >
+				  hend);
+			ret = khugepaged_scan_pmd(mm, vma,
+						  khugepaged_scan.address,
+						  hpage);
+			/* move to next address */
+			khugepaged_scan.address += HPAGE_PMD_SIZE;
+			progress += HPAGE_PMD_NR;
+			if (ret)
+				/* we released mmap_sem so break loop */
+				goto breakouterloop_mmap_sem;
+			if (progress >= pages)
+				goto breakouterloop;
+		}
+	}
+breakouterloop:
+	up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
+breakouterloop_mmap_sem:
+
+	spin_lock(&khugepaged_mm_lock);
+	BUG_ON(khugepaged_scan.mm_slot != mm_slot);
+	/*
+	 * Release the current mm_slot if this mm is about to die, or
+	 * if we scanned all vmas of this mm.
+	 */
+	if (khugepaged_test_exit(mm) || !vma) {
+		/*
+		 * Make sure that if mm_users is reaching zero while
+		 * khugepaged runs here, khugepaged_exit will find
+		 * mm_slot not pointing to the exiting mm.
+		 */
+		if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
+			khugepaged_scan.mm_slot = list_entry(
+				mm_slot->mm_node.next,
+				struct mm_slot, mm_node);
+			khugepaged_scan.address = 0;
+		} else {
+			khugepaged_scan.mm_slot = NULL;
+			khugepaged_full_scans++;
+		}
+
+		collect_mm_slot(mm_slot);
+	}
+
+	return progress;
+}
+
+static int khugepaged_has_work(void)
+{
+	return !list_empty(&khugepaged_scan.mm_head) &&
+		khugepaged_enabled();
+}
+
+static int khugepaged_wait_event(void)
+{
+	return !list_empty(&khugepaged_scan.mm_head) ||
+		!khugepaged_enabled();
+}
+
+static void khugepaged_do_scan(struct page **hpage)
+{
+	unsigned int progress = 0, pass_through_head = 0;
+	unsigned int pages = khugepaged_pages_to_scan;
+
+	barrier(); /* write khugepaged_pages_to_scan to local stack */
+
+	while (progress < pages) {
+		cond_resched();
+
+#ifndef CONFIG_NUMA
+		if (!*hpage) {
+			*hpage = alloc_hugepage(khugepaged_defrag());
+			if (unlikely(!*hpage))
+				break;
+		}
+#else
+		if (IS_ERR(*hpage))
+			break;
+#endif
+
+		if (unlikely(kthread_should_stop() || freezing(current)))
+			break;
+
+		spin_lock(&khugepaged_mm_lock);
+		if (!khugepaged_scan.mm_slot)
+			pass_through_head++;
+		if (khugepaged_has_work() &&
+		    pass_through_head < 2)
+			progress += khugepaged_scan_mm_slot(pages - progress,
+							    hpage);
+		else
+			progress = pages;
+		spin_unlock(&khugepaged_mm_lock);
+	}
+}
+
+static void khugepaged_alloc_sleep(void)
+{
+	DEFINE_WAIT(wait);
+	add_wait_queue(&khugepaged_wait, &wait);
+	schedule_timeout_interruptible(
+		msecs_to_jiffies(
+			khugepaged_alloc_sleep_millisecs));
+	remove_wait_queue(&khugepaged_wait, &wait);
+}
+
+#ifndef CONFIG_NUMA
+static struct page *khugepaged_alloc_hugepage(void)
+{
+	struct page *hpage;
+
+	do {
+		hpage = alloc_hugepage(khugepaged_defrag());
+		if (!hpage)
+			khugepaged_alloc_sleep();
+	} while (unlikely(!hpage) &&
+		 likely(khugepaged_enabled()));
+	return hpage;
+}
+#endif
+
+static void khugepaged_loop(void)
+{
+	struct page *hpage;
+
+#ifdef CONFIG_NUMA
+	hpage = NULL;
+#endif
+	while (likely(khugepaged_enabled())) {
+#ifndef CONFIG_NUMA
+		hpage = khugepaged_alloc_hugepage();
+		if (unlikely(!hpage))
+			break;
+#else
+		if (IS_ERR(hpage)) {
+			khugepaged_alloc_sleep();
+			hpage = NULL;
+		}
+#endif
+
+		khugepaged_do_scan(&hpage);
+#ifndef CONFIG_NUMA
+		if (hpage)
+			put_page(hpage);
+#endif
+		try_to_freeze();
+		if (unlikely(kthread_should_stop()))
+			break;
+		if (khugepaged_has_work()) {
+			DEFINE_WAIT(wait);
+			if (!khugepaged_scan_sleep_millisecs)
+				continue;
+			add_wait_queue(&khugepaged_wait, &wait);
+			schedule_timeout_interruptible(
+				msecs_to_jiffies(
+					khugepaged_scan_sleep_millisecs));
+			remove_wait_queue(&khugepaged_wait, &wait);
+		} else if (khugepaged_enabled())
+			wait_event_freezable(khugepaged_wait,
+					     khugepaged_wait_event());
+	}
+}
+
+static int khugepaged(void *none)
+{
+	struct mm_slot *mm_slot;
+
+	set_freezable();
+	set_user_nice(current, 19);
+
+	/* serialize with start_khugepaged() */
+	mutex_lock(&khugepaged_mutex);
+
+	for (;;) {
+		mutex_unlock(&khugepaged_mutex);
+		BUG_ON(khugepaged_thread != current);
+		khugepaged_loop();
+		BUG_ON(khugepaged_thread != current);
+
+		mutex_lock(&khugepaged_mutex);
+		if (!khugepaged_enabled())
+			break;
+		if (unlikely(kthread_should_stop()))
+			break;
+	}
+
+	spin_lock(&khugepaged_mm_lock);
+	mm_slot = khugepaged_scan.mm_slot;
+	khugepaged_scan.mm_slot = NULL;
+	if (mm_slot)
+		collect_mm_slot(mm_slot);
+	spin_unlock(&khugepaged_mm_lock);
+
+	khugepaged_thread = NULL;
+	mutex_unlock(&khugepaged_mutex);
+
+	return 0;
+}
+
+void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
+{
+	struct page *page;
+
+	spin_lock(&mm->page_table_lock);
+	if (unlikely(!pmd_trans_huge(*pmd))) {
+		spin_unlock(&mm->page_table_lock);
+		return;
+	}
+	page = pmd_page(*pmd);
+	VM_BUG_ON(!page_count(page));
+	get_page(page);
+	spin_unlock(&mm->page_table_lock);
+
+	split_huge_page(page);
+
+	put_page(page);
+	BUG_ON(pmd_trans_huge(*pmd));
+}
+
+static void split_huge_page_address(struct mm_struct *mm,
+				    unsigned long address)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
+
+	pgd = pgd_offset(mm, address);
+	if (!pgd_present(*pgd))
+		return;
+
+	pud = pud_offset(pgd, address);
+	if (!pud_present(*pud))
+		return;
+
+	pmd = pmd_offset(pud, address);
+	if (!pmd_present(*pmd))
+		return;
+	/*
+	 * Caller holds the mmap_sem write mode, so a huge pmd cannot
+	 * materialize from under us.
+	 */
+	split_huge_page_pmd(mm, pmd);
+}
+
+void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+			     unsigned long start,
+			     unsigned long end,
+			     long adjust_next)
+{
+	/*
+	 * If the new start address isn't hpage aligned and it could
+	 * previously contain an hugepage: check if we need to split
+	 * an huge pmd.
+	 */
+	if (start & ~HPAGE_PMD_MASK &&
+	    (start & HPAGE_PMD_MASK) >= vma->vm_start &&
+	    (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
+		split_huge_page_address(vma->vm_mm, start);
+
+	/*
+	 * If the new end address isn't hpage aligned and it could
+	 * previously contain an hugepage: check if we need to split
+	 * an huge pmd.
+	 */
+	if (end & ~HPAGE_PMD_MASK &&
+	    (end & HPAGE_PMD_MASK) >= vma->vm_start &&
+	    (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
+		split_huge_page_address(vma->vm_mm, end);
+
+	/*
+	 * If we're also updating the vma->vm_next->vm_start, if the new
+	 * vm_next->vm_start isn't page aligned and it could previously
+	 * contain an hugepage: check if we need to split an huge pmd.
+	 */
+	if (adjust_next > 0) {
+		struct vm_area_struct *next = vma->vm_next;
+		unsigned long nstart = next->vm_start;
+		nstart += adjust_next << PAGE_SHIFT;
+		if (nstart & ~HPAGE_PMD_MASK &&
+		    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
+		    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
+			split_huge_page_address(next->vm_mm, nstart);
+	}
+}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8585524..bb0b7c1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -394,71 +394,6 @@
 	return 0;
 }
 
-static void clear_gigantic_page(struct page *page,
-			unsigned long addr, unsigned long sz)
-{
-	int i;
-	struct page *p = page;
-
-	might_sleep();
-	for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
-		cond_resched();
-		clear_user_highpage(p, addr + i * PAGE_SIZE);
-	}
-}
-static void clear_huge_page(struct page *page,
-			unsigned long addr, unsigned long sz)
-{
-	int i;
-
-	if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
-		clear_gigantic_page(page, addr, sz);
-		return;
-	}
-
-	might_sleep();
-	for (i = 0; i < sz/PAGE_SIZE; i++) {
-		cond_resched();
-		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
-	}
-}
-
-static void copy_user_gigantic_page(struct page *dst, struct page *src,
-			   unsigned long addr, struct vm_area_struct *vma)
-{
-	int i;
-	struct hstate *h = hstate_vma(vma);
-	struct page *dst_base = dst;
-	struct page *src_base = src;
-
-	for (i = 0; i < pages_per_huge_page(h); ) {
-		cond_resched();
-		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
-
-		i++;
-		dst = mem_map_next(dst, dst_base, i);
-		src = mem_map_next(src, src_base, i);
-	}
-}
-
-static void copy_user_huge_page(struct page *dst, struct page *src,
-			   unsigned long addr, struct vm_area_struct *vma)
-{
-	int i;
-	struct hstate *h = hstate_vma(vma);
-
-	if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
-		copy_user_gigantic_page(dst, src, addr, vma);
-		return;
-	}
-
-	might_sleep();
-	for (i = 0; i < pages_per_huge_page(h); i++) {
-		cond_resched();
-		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
-	}
-}
-
 static void copy_gigantic_page(struct page *dst, struct page *src)
 {
 	int i;
@@ -1428,6 +1363,7 @@
 
 	return sprintf(buf, "%lu\n", nr_huge_pages);
 }
+
 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
 			struct kobject *kobj, struct kobj_attribute *attr,
 			const char *buf, size_t len)
@@ -1440,9 +1376,14 @@
 
 	err = strict_strtoul(buf, 10, &count);
 	if (err)
-		return 0;
+		goto out;
 
 	h = kobj_to_hstate(kobj, &nid);
+	if (h->order >= MAX_ORDER) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	if (nid == NUMA_NO_NODE) {
 		/*
 		 * global hstate attribute
@@ -1468,6 +1409,9 @@
 		NODEMASK_FREE(nodes_allowed);
 
 	return len;
+out:
+	NODEMASK_FREE(nodes_allowed);
+	return err;
 }
 
 static ssize_t nr_hugepages_show(struct kobject *kobj,
@@ -1510,6 +1454,7 @@
 	struct hstate *h = kobj_to_hstate(kobj, NULL);
 	return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
 }
+
 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
 		struct kobj_attribute *attr, const char *buf, size_t count)
 {
@@ -1517,9 +1462,12 @@
 	unsigned long input;
 	struct hstate *h = kobj_to_hstate(kobj, NULL);
 
+	if (h->order >= MAX_ORDER)
+		return -EINVAL;
+
 	err = strict_strtoul(buf, 10, &input);
 	if (err)
-		return 0;
+		return err;
 
 	spin_lock(&hugetlb_lock);
 	h->nr_overcommit_huge_pages = input;
@@ -1922,13 +1870,19 @@
 {
 	struct hstate *h = &default_hstate;
 	unsigned long tmp;
+	int ret;
 
 	if (!write)
 		tmp = h->max_huge_pages;
 
+	if (write && h->order >= MAX_ORDER)
+		return -EINVAL;
+
 	table->data = &tmp;
 	table->maxlen = sizeof(unsigned long);
-	proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	if (ret)
+		goto out;
 
 	if (write) {
 		NODEMASK_ALLOC(nodemask_t, nodes_allowed,
@@ -1943,8 +1897,8 @@
 		if (nodes_allowed != &node_states[N_HIGH_MEMORY])
 			NODEMASK_FREE(nodes_allowed);
 	}
-
-	return 0;
+out:
+	return ret;
 }
 
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
@@ -1982,21 +1936,27 @@
 {
 	struct hstate *h = &default_hstate;
 	unsigned long tmp;
+	int ret;
 
 	if (!write)
 		tmp = h->nr_overcommit_huge_pages;
 
+	if (write && h->order >= MAX_ORDER)
+		return -EINVAL;
+
 	table->data = &tmp;
 	table->maxlen = sizeof(unsigned long);
-	proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+	if (ret)
+		goto out;
 
 	if (write) {
 		spin_lock(&hugetlb_lock);
 		h->nr_overcommit_huge_pages = tmp;
 		spin_unlock(&hugetlb_lock);
 	}
-
-	return 0;
+out:
+	return ret;
 }
 
 #endif /* CONFIG_SYSCTL */
@@ -2454,7 +2414,8 @@
 		return VM_FAULT_OOM;
 	}
 
-	copy_user_huge_page(new_page, old_page, address, vma);
+	copy_user_huge_page(new_page, old_page, address, vma,
+			    pages_per_huge_page(h));
 	__SetPageUptodate(new_page);
 
 	/*
@@ -2558,7 +2519,7 @@
 			ret = -PTR_ERR(page);
 			goto out;
 		}
-		clear_huge_page(page, address, huge_page_size(h));
+		clear_huge_page(page, address, pages_per_huge_page(h));
 		__SetPageUptodate(page);
 
 		if (vma->vm_flags & VM_MAYSHARE) {
diff --git a/mm/internal.h b/mm/internal.h
index dedb0af..4c98630 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -39,6 +39,15 @@
 
 extern unsigned long highest_memmap_pfn;
 
+#ifdef CONFIG_SMP
+extern int putback_active_lru_page(struct zone *zone, struct page *page);
+#else
+static inline int putback_active_lru_page(struct zone *zone, struct page *page)
+{
+	return 0;
+}
+#endif
+
 /*
  * in mm/vmscan.c:
  */
@@ -134,6 +143,10 @@
 	}
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern unsigned long vma_address(struct page *page,
+				 struct vm_area_struct *vma);
+#endif
 #else /* !CONFIG_MMU */
 static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
 {
@@ -243,7 +256,8 @@
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		     unsigned long start, int len, unsigned int foll_flags,
-		     struct page **pages, struct vm_area_struct **vmas);
+		     struct page **pages, struct vm_area_struct **vmas,
+		     int *nonblocking);
 
 #define ZONE_RECLAIM_NOSCAN	-2
 #define ZONE_RECLAIM_FULL	-1
diff --git a/mm/ksm.c b/mm/ksm.c
index 43bc893..c2b2a94 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -34,6 +34,7 @@
 #include <linux/swap.h>
 #include <linux/ksm.h>
 #include <linux/hash.h>
+#include <linux/freezer.h>
 
 #include <asm/tlbflush.h>
 #include "internal.h"
@@ -411,6 +412,20 @@
 	up_read(&mm->mmap_sem);
 }
 
+static struct page *page_trans_compound_anon(struct page *page)
+{
+	if (PageTransCompound(page)) {
+		struct page *head = compound_trans_head(page);
+		/*
+		 * head may actually be splitted and freed from under
+		 * us but it's ok here.
+		 */
+		if (PageAnon(head))
+			return head;
+	}
+	return NULL;
+}
+
 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
 {
 	struct mm_struct *mm = rmap_item->mm;
@@ -430,7 +445,7 @@
 	page = follow_page(vma, addr, FOLL_GET);
 	if (IS_ERR_OR_NULL(page))
 		goto out;
-	if (PageAnon(page)) {
+	if (PageAnon(page) || page_trans_compound_anon(page)) {
 		flush_anon_page(vma, page, addr);
 		flush_dcache_page(page);
 	} else {
@@ -708,6 +723,7 @@
 	if (addr == -EFAULT)
 		goto out;
 
+	BUG_ON(PageTransCompound(page));
 	ptep = page_check_address(page, mm, addr, &ptl, 0);
 	if (!ptep)
 		goto out;
@@ -783,6 +799,7 @@
 		goto out;
 
 	pmd = pmd_offset(pud, addr);
+	BUG_ON(pmd_trans_huge(*pmd));
 	if (!pmd_present(*pmd))
 		goto out;
 
@@ -800,6 +817,8 @@
 	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
 
 	page_remove_rmap(page);
+	if (!page_mapped(page))
+		try_to_free_swap(page);
 	put_page(page);
 
 	pte_unmap_unlock(ptep, ptl);
@@ -808,6 +827,33 @@
 	return err;
 }
 
+static int page_trans_compound_anon_split(struct page *page)
+{
+	int ret = 0;
+	struct page *transhuge_head = page_trans_compound_anon(page);
+	if (transhuge_head) {
+		/* Get the reference on the head to split it. */
+		if (get_page_unless_zero(transhuge_head)) {
+			/*
+			 * Recheck we got the reference while the head
+			 * was still anonymous.
+			 */
+			if (PageAnon(transhuge_head))
+				ret = split_huge_page(transhuge_head);
+			else
+				/*
+				 * Retry later if split_huge_page run
+				 * from under us.
+				 */
+				ret = 1;
+			put_page(transhuge_head);
+		} else
+			/* Retry later if split_huge_page run from under us. */
+			ret = 1;
+	}
+	return ret;
+}
+
 /*
  * try_to_merge_one_page - take two pages and merge them into one
  * @vma: the vma that holds the pte pointing to page
@@ -828,6 +874,9 @@
 
 	if (!(vma->vm_flags & VM_MERGEABLE))
 		goto out;
+	if (PageTransCompound(page) && page_trans_compound_anon_split(page))
+		goto out;
+	BUG_ON(PageTransCompound(page));
 	if (!PageAnon(page))
 		goto out;
 
@@ -1247,6 +1296,18 @@
 
 	slot = ksm_scan.mm_slot;
 	if (slot == &ksm_mm_head) {
+		/*
+		 * A number of pages can hang around indefinitely on per-cpu
+		 * pagevecs, raised page count preventing write_protect_page
+		 * from merging them.  Though it doesn't really matter much,
+		 * it is puzzling to see some stuck in pages_volatile until
+		 * other activity jostles them out, and they also prevented
+		 * LTP's KSM test from succeeding deterministically; so drain
+		 * them here (here rather than on entry to ksm_do_scan(),
+		 * so we don't IPI too often when pages_to_scan is set low).
+		 */
+		lru_add_drain_all();
+
 		root_unstable_tree = RB_ROOT;
 
 		spin_lock(&ksm_mmlist_lock);
@@ -1277,7 +1338,13 @@
 			if (ksm_test_exit(mm))
 				break;
 			*page = follow_page(vma, ksm_scan.address, FOLL_GET);
-			if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
+			if (IS_ERR_OR_NULL(*page)) {
+				ksm_scan.address += PAGE_SIZE;
+				cond_resched();
+				continue;
+			}
+			if (PageAnon(*page) ||
+			    page_trans_compound_anon(*page)) {
 				flush_anon_page(vma, *page, ksm_scan.address);
 				flush_dcache_page(*page);
 				rmap_item = get_next_rmap_item(slot,
@@ -1291,8 +1358,7 @@
 				up_read(&mm->mmap_sem);
 				return rmap_item;
 			}
-			if (!IS_ERR_OR_NULL(*page))
-				put_page(*page);
+			put_page(*page);
 			ksm_scan.address += PAGE_SIZE;
 			cond_resched();
 		}
@@ -1352,7 +1418,7 @@
 	struct rmap_item *rmap_item;
 	struct page *uninitialized_var(page);
 
-	while (scan_npages--) {
+	while (scan_npages-- && likely(!freezing(current))) {
 		cond_resched();
 		rmap_item = scan_get_next_rmap_item(&page);
 		if (!rmap_item)
@@ -1370,6 +1436,7 @@
 
 static int ksm_scan_thread(void *nothing)
 {
+	set_freezable();
 	set_user_nice(current, 5);
 
 	while (!kthread_should_stop()) {
@@ -1378,11 +1445,13 @@
 			ksm_do_scan(ksm_thread_pages_to_scan);
 		mutex_unlock(&ksm_thread_mutex);
 
+		try_to_freeze();
+
 		if (ksmd_should_run()) {
 			schedule_timeout_interruptible(
 				msecs_to_jiffies(ksm_thread_sleep_millisecs));
 		} else {
-			wait_event_interruptible(ksm_thread_wait,
+			wait_event_freezable(ksm_thread_wait,
 				ksmd_should_run() || kthread_should_stop());
 		}
 	}
diff --git a/mm/madvise.c b/mm/madvise.c
index 319528b..2221491 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -71,6 +71,12 @@
 		if (error)
 			goto out;
 		break;
+	case MADV_HUGEPAGE:
+	case MADV_NOHUGEPAGE:
+		error = hugepage_madvise(vma, &new_flags, behavior);
+		if (error)
+			goto out;
+		break;
 	}
 
 	if (new_flags == vma->vm_flags) {
@@ -283,6 +289,10 @@
 	case MADV_MERGEABLE:
 	case MADV_UNMERGEABLE:
 #endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	case MADV_HUGEPAGE:
+	case MADV_NOHUGEPAGE:
+#endif
 		return 1;
 
 	default:
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 00bb8a6..8ab8410 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -292,7 +292,6 @@
 	unsigned long moved_charge;
 	unsigned long moved_swap;
 	struct task_struct *moving_task;	/* a task moving charges */
-	struct mm_struct *mm;
 	wait_queue_head_t waitq;		/* a waitq for other context */
 } mc = {
 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
@@ -821,7 +820,6 @@
 		return;
 	VM_BUG_ON(list_empty(&pc->lru));
 	list_del_init(&pc->lru);
-	return;
 }
 
 void mem_cgroup_del_lru(struct page *page)
@@ -1087,7 +1085,7 @@
 		case 0:
 			list_move(&page->lru, dst);
 			mem_cgroup_del_lru(page);
-			nr_taken++;
+			nr_taken += hpage_nr_pages(page);
 			break;
 		case -EBUSY:
 			/* we don't affect global LRU but rotate in our LRU */
@@ -1312,8 +1310,9 @@
 	u64 limit;
 	u64 memsw;
 
-	limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
-			total_swap_pages;
+	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+	limit += total_swap_pages << PAGE_SHIFT;
+
 	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
 	/*
 	 * If memsw is finite and limits the amount of swap space available
@@ -1600,11 +1599,13 @@
  * possibility of race condition. If there is, we take a lock.
  */
 
-static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
+void mem_cgroup_update_page_stat(struct page *page,
+				 enum mem_cgroup_page_stat_item idx, int val)
 {
 	struct mem_cgroup *mem;
 	struct page_cgroup *pc = lookup_page_cgroup(page);
 	bool need_unlock = false;
+	unsigned long uninitialized_var(flags);
 
 	if (unlikely(!pc))
 		return;
@@ -1616,37 +1617,34 @@
 	/* pc->mem_cgroup is unstable ? */
 	if (unlikely(mem_cgroup_stealed(mem))) {
 		/* take a lock against to access pc->mem_cgroup */
-		lock_page_cgroup(pc);
+		move_lock_page_cgroup(pc, &flags);
 		need_unlock = true;
 		mem = pc->mem_cgroup;
 		if (!mem || !PageCgroupUsed(pc))
 			goto out;
 	}
 
-	this_cpu_add(mem->stat->count[idx], val);
-
 	switch (idx) {
-	case MEM_CGROUP_STAT_FILE_MAPPED:
+	case MEMCG_NR_FILE_MAPPED:
 		if (val > 0)
 			SetPageCgroupFileMapped(pc);
 		else if (!page_mapped(page))
 			ClearPageCgroupFileMapped(pc);
+		idx = MEM_CGROUP_STAT_FILE_MAPPED;
 		break;
 	default:
 		BUG();
 	}
 
+	this_cpu_add(mem->stat->count[idx], val);
+
 out:
 	if (unlikely(need_unlock))
-		unlock_page_cgroup(pc);
+		move_unlock_page_cgroup(pc, &flags);
 	rcu_read_unlock();
 	return;
 }
-
-void mem_cgroup_update_file_mapped(struct page *page, int val)
-{
-	mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
-}
+EXPORT_SYMBOL(mem_cgroup_update_page_stat);
 
 /*
  * size of first charge trial. "32" comes from vmscan.c's magic value.
@@ -1887,12 +1885,14 @@
  * oom-killer can be invoked.
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
-		gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
+				   gfp_t gfp_mask,
+				   struct mem_cgroup **memcg, bool oom,
+				   int page_size)
 {
 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
 	struct mem_cgroup *mem = NULL;
 	int ret;
-	int csize = CHARGE_SIZE;
+	int csize = max(CHARGE_SIZE, (unsigned long) page_size);
 
 	/*
 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1917,7 +1917,7 @@
 		VM_BUG_ON(css_is_removed(&mem->css));
 		if (mem_cgroup_is_root(mem))
 			goto done;
-		if (consume_stock(mem))
+		if (page_size == PAGE_SIZE && consume_stock(mem))
 			goto done;
 		css_get(&mem->css);
 	} else {
@@ -1940,7 +1940,7 @@
 			rcu_read_unlock();
 			goto done;
 		}
-		if (consume_stock(mem)) {
+		if (page_size == PAGE_SIZE && consume_stock(mem)) {
 			/*
 			 * It seems dagerous to access memcg without css_get().
 			 * But considering how consume_stok works, it's not
@@ -1981,7 +1981,7 @@
 		case CHARGE_OK:
 			break;
 		case CHARGE_RETRY: /* not in OOM situation but retry */
-			csize = PAGE_SIZE;
+			csize = page_size;
 			css_put(&mem->css);
 			mem = NULL;
 			goto again;
@@ -2002,8 +2002,8 @@
 		}
 	} while (ret != CHARGE_OK);
 
-	if (csize > PAGE_SIZE)
-		refill_stock(mem, csize - PAGE_SIZE);
+	if (csize > page_size)
+		refill_stock(mem, csize - page_size);
 	css_put(&mem->css);
 done:
 	*memcg = mem;
@@ -2031,9 +2031,10 @@
 	}
 }
 
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+				     int page_size)
 {
-	__mem_cgroup_cancel_charge(mem, 1);
+	__mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
 }
 
 /*
@@ -2087,22 +2088,10 @@
  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
  * USED state. If already USED, uncharge and return.
  */
-
-static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
-				     struct page_cgroup *pc,
-				     enum charge_type ctype)
+static void ____mem_cgroup_commit_charge(struct mem_cgroup *mem,
+					 struct page_cgroup *pc,
+					 enum charge_type ctype)
 {
-	/* try_charge() can return NULL to *memcg, taking care of it. */
-	if (!mem)
-		return;
-
-	lock_page_cgroup(pc);
-	if (unlikely(PageCgroupUsed(pc))) {
-		unlock_page_cgroup(pc);
-		mem_cgroup_cancel_charge(mem);
-		return;
-	}
-
 	pc->mem_cgroup = mem;
 	/*
 	 * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2127,6 +2116,33 @@
 	}
 
 	mem_cgroup_charge_statistics(mem, pc, true);
+}
+
+static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+				       struct page_cgroup *pc,
+				       enum charge_type ctype,
+				       int page_size)
+{
+	int i;
+	int count = page_size >> PAGE_SHIFT;
+
+	/* try_charge() can return NULL to *memcg, taking care of it. */
+	if (!mem)
+		return;
+
+	lock_page_cgroup(pc);
+	if (unlikely(PageCgroupUsed(pc))) {
+		unlock_page_cgroup(pc);
+		mem_cgroup_cancel_charge(mem, page_size);
+		return;
+	}
+
+	/*
+	 * we don't need page_cgroup_lock about tail pages, becase they are not
+	 * accessed by any other context at this point.
+	 */
+	for (i = 0; i < count; i++)
+		____mem_cgroup_commit_charge(mem, pc + i, ctype);
 
 	unlock_page_cgroup(pc);
 	/*
@@ -2173,7 +2189,7 @@
 	mem_cgroup_charge_statistics(from, pc, false);
 	if (uncharge)
 		/* This is not "cancel", but cancel_charge does all we need. */
-		mem_cgroup_cancel_charge(from);
+		mem_cgroup_cancel_charge(from, PAGE_SIZE);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
@@ -2195,9 +2211,13 @@
 		struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
 {
 	int ret = -EINVAL;
+	unsigned long flags;
+
 	lock_page_cgroup(pc);
 	if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
+		move_lock_page_cgroup(pc, &flags);
 		__mem_cgroup_move_account(pc, from, to, uncharge);
+		move_unlock_page_cgroup(pc, &flags);
 		ret = 0;
 	}
 	unlock_page_cgroup(pc);
@@ -2234,13 +2254,14 @@
 		goto put;
 
 	parent = mem_cgroup_from_cont(pcg);
-	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false,
+				      PAGE_SIZE);
 	if (ret || !parent)
 		goto put_back;
 
 	ret = mem_cgroup_move_account(pc, child, parent, true);
 	if (ret)
-		mem_cgroup_cancel_charge(parent);
+		mem_cgroup_cancel_charge(parent, PAGE_SIZE);
 put_back:
 	putback_lru_page(page);
 put:
@@ -2261,6 +2282,12 @@
 	struct mem_cgroup *mem = NULL;
 	struct page_cgroup *pc;
 	int ret;
+	int page_size = PAGE_SIZE;
+
+	if (PageTransHuge(page)) {
+		page_size <<= compound_order(page);
+		VM_BUG_ON(!PageTransHuge(page));
+	}
 
 	pc = lookup_page_cgroup(page);
 	/* can happen at boot */
@@ -2268,11 +2295,11 @@
 		return 0;
 	prefetchw(pc);
 
-	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
 	if (ret || !mem)
 		return ret;
 
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, page_size);
 	return 0;
 }
 
@@ -2281,8 +2308,6 @@
 {
 	if (mem_cgroup_disabled())
 		return 0;
-	if (PageCompound(page))
-		return 0;
 	/*
 	 * If already mapped, we don't have to account.
 	 * If page cache, page->mapping has address_space.
@@ -2388,13 +2413,13 @@
 	if (!mem)
 		goto charge_cur_mm;
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
 	css_put(&mem->css);
 	return ret;
 charge_cur_mm:
 	if (unlikely(!mm))
 		mm = &init_mm;
-	return __mem_cgroup_try_charge(mm, mask, ptr, true);
+	return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
 }
 
 static void
@@ -2410,7 +2435,7 @@
 	cgroup_exclude_rmdir(&ptr->css);
 	pc = lookup_page_cgroup(page);
 	mem_cgroup_lru_del_before_commit_swapcache(page);
-	__mem_cgroup_commit_charge(ptr, pc, ctype);
+	__mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
 	mem_cgroup_lru_add_after_commit_swapcache(page);
 	/*
 	 * Now swap is on-memory. This means this page may be
@@ -2459,11 +2484,12 @@
 		return;
 	if (!mem)
 		return;
-	mem_cgroup_cancel_charge(mem);
+	mem_cgroup_cancel_charge(mem, PAGE_SIZE);
 }
 
 static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
+	      int page_size)
 {
 	struct memcg_batch_info *batch = NULL;
 	bool uncharge_memsw = true;
@@ -2490,6 +2516,9 @@
 	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
 		goto direct_uncharge;
 
+	if (page_size != PAGE_SIZE)
+		goto direct_uncharge;
+
 	/*
 	 * In typical case, batch->memcg == mem. This means we can
 	 * merge a series of uncharges to an uncharge of res_counter.
@@ -2503,9 +2532,9 @@
 		batch->memsw_bytes += PAGE_SIZE;
 	return;
 direct_uncharge:
-	res_counter_uncharge(&mem->res, PAGE_SIZE);
+	res_counter_uncharge(&mem->res, page_size);
 	if (uncharge_memsw)
-		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+		res_counter_uncharge(&mem->memsw, page_size);
 	if (unlikely(batch->memcg != mem))
 		memcg_oom_recover(mem);
 	return;
@@ -2517,8 +2546,11 @@
 static struct mem_cgroup *
 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 {
+	int i;
+	int count;
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
+	int page_size = PAGE_SIZE;
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -2526,6 +2558,12 @@
 	if (PageSwapCache(page))
 		return NULL;
 
+	if (PageTransHuge(page)) {
+		page_size <<= compound_order(page);
+		VM_BUG_ON(!PageTransHuge(page));
+	}
+
+	count = page_size >> PAGE_SHIFT;
 	/*
 	 * Check if our page_cgroup is valid
 	 */
@@ -2558,7 +2596,8 @@
 		break;
 	}
 
-	mem_cgroup_charge_statistics(mem, pc, false);
+	for (i = 0; i < count; i++)
+		mem_cgroup_charge_statistics(mem, pc + i, false);
 
 	ClearPageCgroupUsed(pc);
 	/*
@@ -2579,7 +2618,7 @@
 		mem_cgroup_get(mem);
 	}
 	if (!mem_cgroup_is_root(mem))
-		__do_uncharge(mem, ctype);
+		__do_uncharge(mem, ctype, page_size);
 
 	return mem;
 
@@ -2774,6 +2813,7 @@
 	enum charge_type ctype;
 	int ret = 0;
 
+	VM_BUG_ON(PageTransHuge(page));
 	if (mem_cgroup_disabled())
 		return 0;
 
@@ -2823,7 +2863,7 @@
 		return 0;
 
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
+	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
 	css_put(&mem->css);/* drop extra refcnt */
 	if (ret || *ptr == NULL) {
 		if (PageAnon(page)) {
@@ -2850,13 +2890,13 @@
 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
 	else
 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
 	return ret;
 }
 
 /* remove redundant charge if migration failed*/
 void mem_cgroup_end_migration(struct mem_cgroup *mem,
-	struct page *oldpage, struct page *newpage)
+	struct page *oldpage, struct page *newpage, bool migration_ok)
 {
 	struct page *used, *unused;
 	struct page_cgroup *pc;
@@ -2865,8 +2905,7 @@
 		return;
 	/* blocks rmdir() */
 	cgroup_exclude_rmdir(&mem->css);
-	/* at migration success, oldpage->mapping is NULL. */
-	if (oldpage->mapping) {
+	if (!migration_ok) {
 		used = oldpage;
 		unused = newpage;
 	} else {
@@ -4176,13 +4215,11 @@
 	 */
 	if (!node_state(node, N_NORMAL_MEMORY))
 		tmp = -1;
-	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
+	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
 	if (!pn)
 		return 1;
 
 	mem->info.nodeinfo[node] = pn;
-	memset(pn, 0, sizeof(*pn));
-
 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
 		mz = &pn->zoneinfo[zone];
 		for_each_lru(l)
@@ -4206,14 +4243,13 @@
 
 	/* Can be very big if MAX_NUMNODES is very big */
 	if (size < PAGE_SIZE)
-		mem = kmalloc(size, GFP_KERNEL);
+		mem = kzalloc(size, GFP_KERNEL);
 	else
-		mem = vmalloc(size);
+		mem = vzalloc(size);
 
 	if (!mem)
 		return NULL;
 
-	memset(mem, 0, size);
 	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
 	if (!mem->stat)
 		goto out_free;
@@ -4461,7 +4497,8 @@
 			batch_count = PRECHARGE_COUNT_AT_ONCE;
 			cond_resched();
 		}
-		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+					      PAGE_SIZE);
 		if (ret || !mem)
 			/* mem_cgroup_clear_mc() will do uncharge later */
 			return -ENOMEM;
@@ -4623,6 +4660,7 @@
 	pte_t *pte;
 	spinlock_t *ptl;
 
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; pte++, addr += PAGE_SIZE)
 		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4638,7 +4676,7 @@
 	unsigned long precharge;
 	struct vm_area_struct *vma;
 
-	/* We've already held the mmap_sem */
+	down_read(&mm->mmap_sem);
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		struct mm_walk mem_cgroup_count_precharge_walk = {
 			.pmd_entry = mem_cgroup_count_precharge_pte_range,
@@ -4650,6 +4688,7 @@
 		walk_page_range(vma->vm_start, vma->vm_end,
 					&mem_cgroup_count_precharge_walk);
 	}
+	up_read(&mm->mmap_sem);
 
 	precharge = mc.precharge;
 	mc.precharge = 0;
@@ -4659,10 +4698,15 @@
 
 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
 {
-	return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
+	unsigned long precharge = mem_cgroup_count_precharge(mm);
+
+	VM_BUG_ON(mc.moving_task);
+	mc.moving_task = current;
+	return mem_cgroup_do_precharge(precharge);
 }
 
-static void mem_cgroup_clear_mc(void)
+/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
+static void __mem_cgroup_clear_mc(void)
 {
 	struct mem_cgroup *from = mc.from;
 	struct mem_cgroup *to = mc.to;
@@ -4697,23 +4741,28 @@
 						PAGE_SIZE * mc.moved_swap);
 		}
 		/* we've already done mem_cgroup_get(mc.to) */
-
 		mc.moved_swap = 0;
 	}
-	if (mc.mm) {
-		up_read(&mc.mm->mmap_sem);
-		mmput(mc.mm);
-	}
+	memcg_oom_recover(from);
+	memcg_oom_recover(to);
+	wake_up_all(&mc.waitq);
+}
+
+static void mem_cgroup_clear_mc(void)
+{
+	struct mem_cgroup *from = mc.from;
+
+	/*
+	 * we must clear moving_task before waking up waiters at the end of
+	 * task migration.
+	 */
+	mc.moving_task = NULL;
+	__mem_cgroup_clear_mc();
 	spin_lock(&mc.lock);
 	mc.from = NULL;
 	mc.to = NULL;
 	spin_unlock(&mc.lock);
-	mc.moving_task = NULL;
-	mc.mm = NULL;
 	mem_cgroup_end_move(from);
-	memcg_oom_recover(from);
-	memcg_oom_recover(to);
-	wake_up_all(&mc.waitq);
 }
 
 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
@@ -4735,38 +4784,23 @@
 			return 0;
 		/* We move charges only when we move a owner of the mm */
 		if (mm->owner == p) {
-			/*
-			 * We do all the move charge works under one mmap_sem to
-			 * avoid deadlock with down_write(&mmap_sem)
-			 * -> try_charge() -> if (mc.moving_task) -> sleep.
-			 */
-			down_read(&mm->mmap_sem);
-
 			VM_BUG_ON(mc.from);
 			VM_BUG_ON(mc.to);
 			VM_BUG_ON(mc.precharge);
 			VM_BUG_ON(mc.moved_charge);
 			VM_BUG_ON(mc.moved_swap);
-			VM_BUG_ON(mc.moving_task);
-			VM_BUG_ON(mc.mm);
-
 			mem_cgroup_start_move(from);
 			spin_lock(&mc.lock);
 			mc.from = from;
 			mc.to = mem;
-			mc.precharge = 0;
-			mc.moved_charge = 0;
-			mc.moved_swap = 0;
 			spin_unlock(&mc.lock);
-			mc.moving_task = current;
-			mc.mm = mm;
+			/* We set mc.moving_task later */
 
 			ret = mem_cgroup_precharge_mc(mm);
 			if (ret)
 				mem_cgroup_clear_mc();
-			/* We call up_read() and mmput() in clear_mc(). */
-		} else
-			mmput(mm);
+		}
+		mmput(mm);
 	}
 	return ret;
 }
@@ -4789,6 +4823,7 @@
 	spinlock_t *ptl;
 
 retry:
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; addr += PAGE_SIZE) {
 		pte_t ptent = *(pte++);
@@ -4854,7 +4889,19 @@
 	struct vm_area_struct *vma;
 
 	lru_add_drain_all();
-	/* We've already held the mmap_sem */
+retry:
+	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+		/*
+		 * Someone who are holding the mmap_sem might be waiting in
+		 * waitq. So we cancel all extra charges, wake up all waiters,
+		 * and retry. Because we cancel precharges, we might not be able
+		 * to move enough charges, but moving charge is a best-effort
+		 * feature anyway, so it wouldn't be a big problem.
+		 */
+		__mem_cgroup_clear_mc();
+		cond_resched();
+		goto retry;
+	}
 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 		int ret;
 		struct mm_walk mem_cgroup_move_charge_walk = {
@@ -4873,6 +4920,7 @@
 			 */
 			break;
 	}
+	up_read(&mm->mmap_sem);
 }
 
 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
@@ -4881,11 +4929,17 @@
 				struct task_struct *p,
 				bool threadgroup)
 {
-	if (!mc.mm)
+	struct mm_struct *mm;
+
+	if (!mc.to)
 		/* no need to move charge */
 		return;
 
-	mem_cgroup_move_charge(mc.mm);
+	mm = get_task_mm(p);
+	if (mm) {
+		mem_cgroup_move_charge(mm);
+		mmput(mm);
+	}
 	mem_cgroup_clear_mc();
 }
 #else	/* !CONFIG_MMU */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 46ab2c0..548fbd7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -203,7 +203,7 @@
 #ifdef __ARCH_SI_TRAPNO
 	si.si_trapno = trapno;
 #endif
-	si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
+	si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
 	/*
 	 * Don't use force here, it's convenient if the signal
 	 * can be temporarily blocked.
@@ -386,6 +386,8 @@
 	struct task_struct *tsk;
 	struct anon_vma *av;
 
+	if (!PageHuge(page) && unlikely(split_huge_page(page)))
+		return;
 	read_lock(&tasklist_lock);
 	av = page_lock_anon_vma(page);
 	if (av == NULL)	/* Not actually mapped anymore */
@@ -928,7 +930,7 @@
 static void set_page_hwpoison_huge_page(struct page *hpage)
 {
 	int i;
-	int nr_pages = 1 << compound_order(hpage);
+	int nr_pages = 1 << compound_trans_order(hpage);
 	for (i = 0; i < nr_pages; i++)
 		SetPageHWPoison(hpage + i);
 }
@@ -936,7 +938,7 @@
 static void clear_page_hwpoison_huge_page(struct page *hpage)
 {
 	int i;
-	int nr_pages = 1 << compound_order(hpage);
+	int nr_pages = 1 << compound_trans_order(hpage);
 	for (i = 0; i < nr_pages; i++)
 		ClearPageHWPoison(hpage + i);
 }
@@ -966,7 +968,7 @@
 		return 0;
 	}
 
-	nr_pages = 1 << compound_order(hpage);
+	nr_pages = 1 << compound_trans_order(hpage);
 	atomic_long_add(nr_pages, &mce_bad_pages);
 
 	/*
@@ -1164,7 +1166,7 @@
 		return 0;
 	}
 
-	nr_pages = 1 << compound_order(page);
+	nr_pages = 1 << compound_trans_order(page);
 
 	if (!get_page_unless_zero(page)) {
 		/*
@@ -1290,9 +1292,10 @@
 	/* Keep page count to indicate a given hugepage is isolated. */
 
 	list_add(&hpage->lru, &pagelist);
-	ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+	ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
+				true);
 	if (ret) {
-			putback_lru_pages(&pagelist);
+		putback_lru_pages(&pagelist);
 		pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
 			 pfn, ret, page->flags);
 		if (ret > 0)
@@ -1301,7 +1304,7 @@
 	}
 done:
 	if (!PageHWPoison(hpage))
-		atomic_long_add(1 << compound_order(hpage), &mce_bad_pages);
+		atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
 	set_page_hwpoison_huge_page(hpage);
 	dequeue_hwpoisoned_huge_page(hpage);
 	/* keep elevated page count for bad page */
@@ -1413,7 +1416,8 @@
 		LIST_HEAD(pagelist);
 
 		list_add(&page->lru, &pagelist);
-		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+								0, true);
 		if (ret) {
 			pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
 				pfn, ret, page->flags);
diff --git a/mm/memory.c b/mm/memory.c
index 02e48aa..31250fa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -394,9 +394,11 @@
 	}
 }
 
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+		pmd_t *pmd, unsigned long address)
 {
 	pgtable_t new = pte_alloc_one(mm, address);
+	int wait_split_huge_page;
 	if (!new)
 		return -ENOMEM;
 
@@ -416,14 +418,18 @@
 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 
 	spin_lock(&mm->page_table_lock);
-	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
+	wait_split_huge_page = 0;
+	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 		mm->nr_ptes++;
 		pmd_populate(mm, pmd, new);
 		new = NULL;
-	}
+	} else if (unlikely(pmd_trans_splitting(*pmd)))
+		wait_split_huge_page = 1;
 	spin_unlock(&mm->page_table_lock);
 	if (new)
 		pte_free(mm, new);
+	if (wait_split_huge_page)
+		wait_split_huge_page(vma->anon_vma, pmd);
 	return 0;
 }
 
@@ -436,10 +442,11 @@
 	smp_wmb(); /* See comment in __pte_alloc */
 
 	spin_lock(&init_mm.page_table_lock);
-	if (!pmd_present(*pmd)) {	/* Has another populated it ? */
+	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
 		pmd_populate_kernel(&init_mm, pmd, new);
 		new = NULL;
-	}
+	} else
+		VM_BUG_ON(pmd_trans_splitting(*pmd));
 	spin_unlock(&init_mm.page_table_lock);
 	if (new)
 		pte_free_kernel(&init_mm, new);
@@ -719,9 +726,9 @@
 	return 0;
 }
 
-static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
-		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
-		unsigned long addr, unsigned long end)
+int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+		   pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
+		   unsigned long addr, unsigned long end)
 {
 	pte_t *orig_src_pte, *orig_dst_pte;
 	pte_t *src_pte, *dst_pte;
@@ -795,6 +802,17 @@
 	src_pmd = pmd_offset(src_pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*src_pmd)) {
+			int err;
+			VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
+			err = copy_huge_pmd(dst_mm, src_mm,
+					    dst_pmd, src_pmd, addr, vma);
+			if (err == -ENOMEM)
+				return -ENOMEM;
+			if (!err)
+				continue;
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(src_pmd))
 			continue;
 		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
@@ -997,6 +1015,16 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*pmd)) {
+			if (next-addr != HPAGE_PMD_SIZE) {
+				VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+				split_huge_page_pmd(vma->vm_mm, pmd);
+			} else if (zap_huge_pmd(tlb, vma, pmd)) {
+				(*zap_work)--;
+				continue;
+			}
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(pmd)) {
 			(*zap_work)--;
 			continue;
@@ -1262,7 +1290,7 @@
 	pud = pud_offset(pgd, address);
 	if (pud_none(*pud))
 		goto no_page_table;
-	if (pud_huge(*pud)) {
+	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
 		BUG_ON(flags & FOLL_GET);
 		page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
 		goto out;
@@ -1273,11 +1301,32 @@
 	pmd = pmd_offset(pud, address);
 	if (pmd_none(*pmd))
 		goto no_page_table;
-	if (pmd_huge(*pmd)) {
+	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
 		BUG_ON(flags & FOLL_GET);
 		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
 		goto out;
 	}
+	if (pmd_trans_huge(*pmd)) {
+		if (flags & FOLL_SPLIT) {
+			split_huge_page_pmd(mm, pmd);
+			goto split_fallthrough;
+		}
+		spin_lock(&mm->page_table_lock);
+		if (likely(pmd_trans_huge(*pmd))) {
+			if (unlikely(pmd_trans_splitting(*pmd))) {
+				spin_unlock(&mm->page_table_lock);
+				wait_split_huge_page(vma->anon_vma, pmd);
+			} else {
+				page = follow_trans_huge_pmd(mm, address,
+							     pmd, flags);
+				spin_unlock(&mm->page_table_lock);
+				goto out;
+			}
+		} else
+			spin_unlock(&mm->page_table_lock);
+		/* fall through */
+	}
+split_fallthrough:
 	if (unlikely(pmd_bad(*pmd)))
 		goto no_page_table;
 
@@ -1310,6 +1359,28 @@
 		 */
 		mark_page_accessed(page);
 	}
+	if (flags & FOLL_MLOCK) {
+		/*
+		 * The preliminary mapping check is mainly to avoid the
+		 * pointless overhead of lock_page on the ZERO_PAGE
+		 * which might bounce very badly if there is contention.
+		 *
+		 * If the page is already locked, we don't need to
+		 * handle it now - vmscan will handle it later if and
+		 * when it attempts to reclaim the page.
+		 */
+		if (page->mapping && trylock_page(page)) {
+			lru_add_drain();  /* push cached pages to LRU */
+			/*
+			 * Because we lock page here and migration is
+			 * blocked by the pte's page reference, we need
+			 * only check for file-cache page truncation.
+			 */
+			if (page->mapping)
+				mlock_vma_page(page);
+			unlock_page(page);
+		}
+	}
 unlock:
 	pte_unmap_unlock(ptep, ptl);
 out:
@@ -1341,7 +1412,8 @@
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		     unsigned long start, int nr_pages, unsigned int gup_flags,
-		     struct page **pages, struct vm_area_struct **vmas)
+		     struct page **pages, struct vm_area_struct **vmas,
+		     int *nonblocking)
 {
 	int i;
 	unsigned long vm_flags;
@@ -1386,6 +1458,7 @@
 			pmd = pmd_offset(pud, pg);
 			if (pmd_none(*pmd))
 				return i ? : -EFAULT;
+			VM_BUG_ON(pmd_trans_huge(*pmd));
 			pte = pte_offset_map(pmd, pg);
 			if (pte_none(*pte)) {
 				pte_unmap(pte);
@@ -1441,10 +1514,15 @@
 			cond_resched();
 			while (!(page = follow_page(vma, start, foll_flags))) {
 				int ret;
+				unsigned int fault_flags = 0;
+
+				if (foll_flags & FOLL_WRITE)
+					fault_flags |= FAULT_FLAG_WRITE;
+				if (nonblocking)
+					fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 
 				ret = handle_mm_fault(mm, vma, start,
-					(foll_flags & FOLL_WRITE) ?
-					FAULT_FLAG_WRITE : 0);
+							fault_flags);
 
 				if (ret & VM_FAULT_ERROR) {
 					if (ret & VM_FAULT_OOM)
@@ -1460,6 +1538,11 @@
 				else
 					tsk->min_flt++;
 
+				if (ret & VM_FAULT_RETRY) {
+					*nonblocking = 0;
+					return i;
+				}
+
 				/*
 				 * The VM_FAULT_WRITE bit tells us that
 				 * do_wp_page has broken COW when necessary,
@@ -1559,7 +1642,8 @@
 	if (force)
 		flags |= FOLL_FORCE;
 
-	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+				NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -1584,7 +1668,8 @@
 	struct page *page;
 
 	if (__get_user_pages(current, current->mm, addr, 1,
-			FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
+			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
+			     NULL) < 1)
 		return NULL;
 	flush_cache_page(vma, addr, page_to_pfn(page));
 	return page;
@@ -1598,8 +1683,10 @@
 	pud_t * pud = pud_alloc(mm, pgd, addr);
 	if (pud) {
 		pmd_t * pmd = pmd_alloc(mm, pud, addr);
-		if (pmd)
+		if (pmd) {
+			VM_BUG_ON(pmd_trans_huge(*pmd));
 			return pte_alloc_map_lock(mm, pmd, addr, ptl);
+		}
 	}
 	return NULL;
 }
@@ -1818,6 +1905,7 @@
 	pmd = pmd_alloc(mm, pud, addr);
 	if (!pmd)
 		return -ENOMEM;
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	do {
 		next = pmd_addr_end(addr, end);
 		if (remap_pte_range(mm, pmd, addr, next,
@@ -2048,19 +2136,6 @@
 	return same;
 }
 
-/*
- * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
- * servicing faults for write access.  In the normal case, do always want
- * pte_mkwrite.  But get_user_pages can cause write faults for mappings
- * that do not have writing enabled, when used by access_process_vm.
- */
-static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
-{
-	if (likely(vma->vm_flags & VM_WRITE))
-		pte = pte_mkwrite(pte);
-	return pte;
-}
-
 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
 {
 	/*
@@ -2112,7 +2187,7 @@
 {
 	struct page *old_page, *new_page;
 	pte_t entry;
-	int reuse = 0, ret = 0;
+	int ret = 0;
 	int page_mkwrite = 0;
 	struct page *dirty_page = NULL;
 
@@ -2149,14 +2224,16 @@
 			}
 			page_cache_release(old_page);
 		}
-		reuse = reuse_swap_page(old_page);
-		if (reuse)
+		if (reuse_swap_page(old_page)) {
 			/*
 			 * The page is all ours.  Move it to our anon_vma so
 			 * the rmap code will not search our parent or siblings.
 			 * Protected against the rmap code by the page lock.
 			 */
 			page_move_anon_rmap(old_page, vma, address);
+			unlock_page(old_page);
+			goto reuse;
+		}
 		unlock_page(old_page);
 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
 					(VM_WRITE|VM_SHARED))) {
@@ -2220,18 +2297,52 @@
 		}
 		dirty_page = old_page;
 		get_page(dirty_page);
-		reuse = 1;
-	}
 
-	if (reuse) {
 reuse:
 		flush_cache_page(vma, address, pte_pfn(orig_pte));
 		entry = pte_mkyoung(orig_pte);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		if (ptep_set_access_flags(vma, address, page_table, entry,1))
 			update_mmu_cache(vma, address, page_table);
+		pte_unmap_unlock(page_table, ptl);
 		ret |= VM_FAULT_WRITE;
-		goto unlock;
+
+		if (!dirty_page)
+			return ret;
+
+		/*
+		 * Yes, Virginia, this is actually required to prevent a race
+		 * with clear_page_dirty_for_io() from clearing the page dirty
+		 * bit after it clear all dirty ptes, but before a racing
+		 * do_wp_page installs a dirty pte.
+		 *
+		 * do_no_page is protected similarly.
+		 */
+		if (!page_mkwrite) {
+			wait_on_page_locked(dirty_page);
+			set_page_dirty_balance(dirty_page, page_mkwrite);
+		}
+		put_page(dirty_page);
+		if (page_mkwrite) {
+			struct address_space *mapping = dirty_page->mapping;
+
+			set_page_dirty(dirty_page);
+			unlock_page(dirty_page);
+			page_cache_release(dirty_page);
+			if (mapping)	{
+				/*
+				 * Some device drivers do not set page.mapping
+				 * but still dirty their pages
+				 */
+				balance_dirty_pages_ratelimited(mapping);
+			}
+		}
+
+		/* file_update_time outside page_lock */
+		if (vma->vm_file)
+			file_update_time(vma->vm_file);
+
+		return ret;
 	}
 
 	/*
@@ -2337,39 +2448,6 @@
 		page_cache_release(old_page);
 unlock:
 	pte_unmap_unlock(page_table, ptl);
-	if (dirty_page) {
-		/*
-		 * Yes, Virginia, this is actually required to prevent a race
-		 * with clear_page_dirty_for_io() from clearing the page dirty
-		 * bit after it clear all dirty ptes, but before a racing
-		 * do_wp_page installs a dirty pte.
-		 *
-		 * do_no_page is protected similarly.
-		 */
-		if (!page_mkwrite) {
-			wait_on_page_locked(dirty_page);
-			set_page_dirty_balance(dirty_page, page_mkwrite);
-		}
-		put_page(dirty_page);
-		if (page_mkwrite) {
-			struct address_space *mapping = dirty_page->mapping;
-
-			set_page_dirty(dirty_page);
-			unlock_page(dirty_page);
-			page_cache_release(dirty_page);
-			if (mapping)	{
-				/*
-				 * Some device drivers do not set page.mapping
-				 * but still dirty their pages
-				 */
-				balance_dirty_pages_ratelimited(mapping);
-			}
-		}
-
-		/* file_update_time outside page_lock */
-		if (vma->vm_file)
-			file_update_time(vma->vm_file);
-	}
 	return ret;
 oom_free_new:
 	page_cache_release(new_page);
@@ -3147,9 +3225,9 @@
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-static inline int handle_pte_fault(struct mm_struct *mm,
-		struct vm_area_struct *vma, unsigned long address,
-		pte_t *pte, pmd_t *pmd, unsigned int flags)
+int handle_pte_fault(struct mm_struct *mm,
+		     struct vm_area_struct *vma, unsigned long address,
+		     pte_t *pte, pmd_t *pmd, unsigned int flags)
 {
 	pte_t entry;
 	spinlock_t *ptl;
@@ -3228,9 +3306,40 @@
 	pmd = pmd_alloc(mm, pud, address);
 	if (!pmd)
 		return VM_FAULT_OOM;
-	pte = pte_alloc_map(mm, pmd, address);
-	if (!pte)
+	if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
+		if (!vma->vm_ops)
+			return do_huge_pmd_anonymous_page(mm, vma, address,
+							  pmd, flags);
+	} else {
+		pmd_t orig_pmd = *pmd;
+		barrier();
+		if (pmd_trans_huge(orig_pmd)) {
+			if (flags & FAULT_FLAG_WRITE &&
+			    !pmd_write(orig_pmd) &&
+			    !pmd_trans_splitting(orig_pmd))
+				return do_huge_pmd_wp_page(mm, vma, address,
+							   pmd, orig_pmd);
+			return 0;
+		}
+	}
+
+	/*
+	 * Use __pte_alloc instead of pte_alloc_map, because we can't
+	 * run pte_offset_map on the pmd, if an huge pmd could
+	 * materialize from under us from a different thread.
+	 */
+	if (unlikely(__pte_alloc(mm, vma, pmd, address)))
 		return VM_FAULT_OOM;
+	/* if an huge pmd materialized from under us just retry later */
+	if (unlikely(pmd_trans_huge(*pmd)))
+		return 0;
+	/*
+	 * A regular pmd is established and it can't morph into a huge pmd
+	 * from under us anymore at this point because we hold the mmap_sem
+	 * read mode and khugepaged takes it in write mode. So now it's
+	 * safe to run pte_offset_map().
+	 */
+	pte = pte_offset_map(pmd, address);
 
 	return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
@@ -3296,7 +3405,12 @@
 	vma = find_vma(current->mm, addr);
 	if (!vma)
 		return -ENOMEM;
-	write = (vma->vm_flags & VM_WRITE) != 0;
+	/*
+	 * We want to touch writable mappings with a write fault in order
+	 * to break COW, except for shared mappings because these don't COW
+	 * and we would not want to dirty them for nothing.
+	 */
+	write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
 	BUG_ON(addr >= end);
 	BUG_ON(end > vma->vm_end);
 	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
@@ -3368,6 +3482,7 @@
 		goto out;
 
 	pmd = pmd_offset(pud, address);
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
 		goto out;
 
@@ -3608,3 +3723,74 @@
 }
 EXPORT_SYMBOL(might_fault);
 #endif
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+static void clear_gigantic_page(struct page *page,
+				unsigned long addr,
+				unsigned int pages_per_huge_page)
+{
+	int i;
+	struct page *p = page;
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page;
+	     i++, p = mem_map_next(p, page, i)) {
+		cond_resched();
+		clear_user_highpage(p, addr + i * PAGE_SIZE);
+	}
+}
+void clear_huge_page(struct page *page,
+		     unsigned long addr, unsigned int pages_per_huge_page)
+{
+	int i;
+
+	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+		clear_gigantic_page(page, addr, pages_per_huge_page);
+		return;
+	}
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page; i++) {
+		cond_resched();
+		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
+	}
+}
+
+static void copy_user_gigantic_page(struct page *dst, struct page *src,
+				    unsigned long addr,
+				    struct vm_area_struct *vma,
+				    unsigned int pages_per_huge_page)
+{
+	int i;
+	struct page *dst_base = dst;
+	struct page *src_base = src;
+
+	for (i = 0; i < pages_per_huge_page; ) {
+		cond_resched();
+		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+
+		i++;
+		dst = mem_map_next(dst, dst_base, i);
+		src = mem_map_next(src, src_base, i);
+	}
+}
+
+void copy_user_huge_page(struct page *dst, struct page *src,
+			 unsigned long addr, struct vm_area_struct *vma,
+			 unsigned int pages_per_huge_page)
+{
+	int i;
+
+	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+		copy_user_gigantic_page(dst, src, addr, vma,
+					pages_per_huge_page);
+		return;
+	}
+
+	might_sleep();
+	for (i = 0; i < pages_per_huge_page; i++) {
+		cond_resched();
+		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
+	}
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2c6523a..e92f047 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -82,9 +82,10 @@
 
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
-static void get_page_bootmem(unsigned long info,  struct page *page, int type)
+static void get_page_bootmem(unsigned long info,  struct page *page,
+			     unsigned long type)
 {
-	atomic_set(&page->_mapcount, type);
+	page->lru.next = (struct list_head *) type;
 	SetPagePrivate(page);
 	set_page_private(page, info);
 	atomic_inc(&page->_count);
@@ -94,15 +95,16 @@
  * so use __ref to tell modpost not to generate a warning */
 void __ref put_page_bootmem(struct page *page)
 {
-	int type;
+	unsigned long type;
 
-	type = atomic_read(&page->_mapcount);
-	BUG_ON(type >= -1);
+	type = (unsigned long) page->lru.next;
+	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
+	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
 
 	if (atomic_dec_return(&page->_count) == 1) {
 		ClearPagePrivate(page);
 		set_page_private(page, 0);
-		reset_page_mapcount(page);
+		INIT_LIST_HEAD(&page->lru);
 		__free_pages_bootmem(page, 0);
 	}
 
@@ -733,7 +735,8 @@
 			goto out;
 		}
 		/* this function returns # of failed pages */
-		ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
+		ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
+								true, true);
 		if (ret)
 			putback_lru_pages(&source);
 	}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 11ff260..368fc9d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -514,6 +514,7 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		split_huge_page_pmd(vma->vm_mm, pmd);
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
 		if (check_pte_range(vma, pmd, addr, next, nodes,
@@ -935,7 +936,8 @@
 		return PTR_ERR(vma);
 
 	if (!list_empty(&pagelist)) {
-		err = migrate_pages(&pagelist, new_node_page, dest, 0);
+		err = migrate_pages(&pagelist, new_node_page, dest,
+								false, true);
 		if (err)
 			putback_lru_pages(&pagelist);
 	}
@@ -1155,7 +1157,8 @@
 
 		if (!list_empty(&pagelist)) {
 			nr_failed = migrate_pages(&pagelist, new_vma_page,
-						(unsigned long)vma, 0);
+						(unsigned long)vma,
+						false, true);
 			if (nr_failed)
 				putback_lru_pages(&pagelist);
 		}
@@ -1308,16 +1311,13 @@
 
 	/* Find the mm_struct */
 	rcu_read_lock();
-	read_lock(&tasklist_lock);
 	task = pid ? find_task_by_vpid(pid) : current;
 	if (!task) {
-		read_unlock(&tasklist_lock);
 		rcu_read_unlock();
 		err = -ESRCH;
 		goto out;
 	}
 	mm = get_task_mm(task);
-	read_unlock(&tasklist_lock);
 	rcu_read_unlock();
 
 	err = -EINVAL;
@@ -1796,7 +1796,7 @@
 }
 
 /**
- * 	alloc_page_vma	- Allocate a page for a VMA.
+ * 	alloc_pages_vma	- Allocate a page for a VMA.
  *
  * 	@gfp:
  *      %GFP_USER    user allocation.
@@ -1805,6 +1805,7 @@
  *      %GFP_FS      allocation should not call back into a file system.
  *      %GFP_ATOMIC  don't sleep.
  *
+ *	@order:Order of the GFP allocation.
  * 	@vma:  Pointer to VMA or NULL if not available.
  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
  *
@@ -1818,7 +1819,8 @@
  *	Should be called with the mm_sem of the vma hold.
  */
 struct page *
-alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
+alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+		unsigned long addr)
 {
 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
 	struct zonelist *zl;
@@ -1830,7 +1832,7 @@
 
 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
 		mpol_cond_put(pol);
-		page = alloc_page_interleave(gfp, 0, nid);
+		page = alloc_page_interleave(gfp, order, nid);
 		put_mems_allowed();
 		return page;
 	}
@@ -1839,7 +1841,7 @@
 		/*
 		 * slow path: ref counted shared policy
 		 */
-		struct page *page =  __alloc_pages_nodemask(gfp, 0,
+		struct page *page =  __alloc_pages_nodemask(gfp, order,
 						zl, policy_nodemask(gfp, pol));
 		__mpol_put(pol);
 		put_mems_allowed();
@@ -1848,7 +1850,8 @@
 	/*
 	 * fast path:  default or task policy
 	 */
-	page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
+	page = __alloc_pages_nodemask(gfp, order, zl,
+				      policy_nodemask(gfp, pol));
 	put_mems_allowed();
 	return page;
 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 6ae8a66..46fe8cc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -113,6 +113,8 @@
 			goto out;
 
 		pmd = pmd_offset(pud, addr);
+		if (pmd_trans_huge(*pmd))
+			goto out;
 		if (!pmd_present(*pmd))
 			goto out;
 
@@ -246,7 +248,7 @@
 
 	expected_count = 2 + page_has_private(page);
 	if (page_count(page) != expected_count ||
-			(struct page *)radix_tree_deref_slot(pslot) != page) {
+		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 		spin_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
@@ -318,7 +320,7 @@
 
 	expected_count = 2 + page_has_private(page);
 	if (page_count(page) != expected_count ||
-	    (struct page *)radix_tree_deref_slot(pslot) != page) {
+		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 		spin_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
@@ -614,13 +616,12 @@
  * to the newly allocated page in newpage.
  */
 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
-			struct page *page, int force, int offlining)
+			struct page *page, int force, bool offlining, bool sync)
 {
 	int rc = 0;
 	int *result = NULL;
 	struct page *newpage = get_new_page(page, private, &result);
 	int remap_swapcache = 1;
-	int rcu_locked = 0;
 	int charge = 0;
 	struct mem_cgroup *mem = NULL;
 	struct anon_vma *anon_vma = NULL;
@@ -632,6 +633,9 @@
 		/* page was freed from under us. So we are done. */
 		goto move_newpage;
 	}
+	if (unlikely(PageTransHuge(page)))
+		if (unlikely(split_huge_page(page)))
+			goto move_newpage;
 
 	/* prepare cgroup just returns 0 or -ENOMEM */
 	rc = -EAGAIN;
@@ -639,6 +643,23 @@
 	if (!trylock_page(page)) {
 		if (!force)
 			goto move_newpage;
+
+		/*
+		 * It's not safe for direct compaction to call lock_page.
+		 * For example, during page readahead pages are added locked
+		 * to the LRU. Later, when the IO completes the pages are
+		 * marked uptodate and unlocked. However, the queueing
+		 * could be merging multiple pages for one bio (e.g.
+		 * mpage_readpages). If an allocation happens for the
+		 * second or third page, the process can end up locking
+		 * the same page twice and deadlocking. Rather than
+		 * trying to be clever about what pages can be locked,
+		 * avoid the use of lock_page for direct compaction
+		 * altogether.
+		 */
+		if (current->flags & PF_MEMALLOC)
+			goto move_newpage;
+
 		lock_page(page);
 	}
 
@@ -665,27 +686,33 @@
 	BUG_ON(charge);
 
 	if (PageWriteback(page)) {
-		if (!force)
+		if (!force || !sync)
 			goto uncharge;
 		wait_on_page_writeback(page);
 	}
 	/*
 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
 	 * we cannot notice that anon_vma is freed while we migrates a page.
-	 * This rcu_read_lock() delays freeing anon_vma pointer until the end
+	 * This get_anon_vma() delays freeing anon_vma pointer until the end
 	 * of migration. File cache pages are no problem because of page_lock()
 	 * File Caches may use write_page() or lock_page() in migration, then,
 	 * just care Anon page here.
 	 */
 	if (PageAnon(page)) {
-		rcu_read_lock();
-		rcu_locked = 1;
-
-		/* Determine how to safely use anon_vma */
-		if (!page_mapped(page)) {
-			if (!PageSwapCache(page))
-				goto rcu_unlock;
-
+		/*
+		 * Only page_lock_anon_vma() understands the subtleties of
+		 * getting a hold on an anon_vma from outside one of its mms.
+		 */
+		anon_vma = page_lock_anon_vma(page);
+		if (anon_vma) {
+			/*
+			 * Take a reference count on the anon_vma if the
+			 * page is mapped so that it is guaranteed to
+			 * exist when the page is remapped later
+			 */
+			get_anon_vma(anon_vma);
+			page_unlock_anon_vma(anon_vma);
+		} else if (PageSwapCache(page)) {
 			/*
 			 * We cannot be sure that the anon_vma of an unmapped
 			 * swapcache page is safe to use because we don't
@@ -700,13 +727,7 @@
 			 */
 			remap_swapcache = 0;
 		} else {
-			/*
-			 * Take a reference count on the anon_vma if the
-			 * page is mapped so that it is guaranteed to
-			 * exist when the page is remapped later
-			 */
-			anon_vma = page_anon_vma(page);
-			get_anon_vma(anon_vma);
+			goto uncharge;
 		}
 	}
 
@@ -723,16 +744,10 @@
 	 * free the metadata, so the page can be freed.
 	 */
 	if (!page->mapping) {
-		if (!PageAnon(page) && page_has_private(page)) {
-			/*
-			 * Go direct to try_to_free_buffers() here because
-			 * a) that's what try_to_release_page() would do anyway
-			 * b) we may be under rcu_read_lock() here, so we can't
-			 *    use GFP_KERNEL which is what try_to_release_page()
-			 *    needs to be effective.
-			 */
+		VM_BUG_ON(PageAnon(page));
+		if (page_has_private(page)) {
 			try_to_free_buffers(page);
-			goto rcu_unlock;
+			goto uncharge;
 		}
 		goto skip_unmap;
 	}
@@ -746,17 +761,14 @@
 
 	if (rc && remap_swapcache)
 		remove_migration_ptes(page, page);
-rcu_unlock:
 
 	/* Drop an anon_vma reference if we took one */
 	if (anon_vma)
 		drop_anon_vma(anon_vma);
 
-	if (rcu_locked)
-		rcu_read_unlock();
 uncharge:
 	if (!charge)
-		mem_cgroup_end_migration(mem, page, newpage);
+		mem_cgroup_end_migration(mem, page, newpage, rc == 0);
 unlock:
 	unlock_page(page);
 
@@ -810,12 +822,11 @@
  */
 static int unmap_and_move_huge_page(new_page_t get_new_page,
 				unsigned long private, struct page *hpage,
-				int force, int offlining)
+				int force, bool offlining, bool sync)
 {
 	int rc = 0;
 	int *result = NULL;
 	struct page *new_hpage = get_new_page(hpage, private, &result);
-	int rcu_locked = 0;
 	struct anon_vma *anon_vma = NULL;
 
 	if (!new_hpage)
@@ -824,18 +835,16 @@
 	rc = -EAGAIN;
 
 	if (!trylock_page(hpage)) {
-		if (!force)
+		if (!force || !sync)
 			goto out;
 		lock_page(hpage);
 	}
 
 	if (PageAnon(hpage)) {
-		rcu_read_lock();
-		rcu_locked = 1;
-
-		if (page_mapped(hpage)) {
-			anon_vma = page_anon_vma(hpage);
-			atomic_inc(&anon_vma->external_refcount);
+		anon_vma = page_lock_anon_vma(hpage);
+		if (anon_vma) {
+			get_anon_vma(anon_vma);
+			page_unlock_anon_vma(anon_vma);
 		}
 	}
 
@@ -847,16 +856,8 @@
 	if (rc)
 		remove_migration_ptes(hpage, hpage);
 
-	if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount,
-					    &anon_vma->lock)) {
-		int empty = list_empty(&anon_vma->head);
-		spin_unlock(&anon_vma->lock);
-		if (empty)
-			anon_vma_free(anon_vma);
-	}
-
-	if (rcu_locked)
-		rcu_read_unlock();
+	if (anon_vma)
+		drop_anon_vma(anon_vma);
 out:
 	unlock_page(hpage);
 
@@ -892,7 +893,8 @@
  * Return: Number of pages not migrated or error code.
  */
 int migrate_pages(struct list_head *from,
-		new_page_t get_new_page, unsigned long private, int offlining)
+		new_page_t get_new_page, unsigned long private, bool offlining,
+		bool sync)
 {
 	int retry = 1;
 	int nr_failed = 0;
@@ -912,7 +914,8 @@
 			cond_resched();
 
 			rc = unmap_and_move(get_new_page, private,
-						page, pass > 2, offlining);
+						page, pass > 2, offlining,
+						sync);
 
 			switch(rc) {
 			case -ENOMEM:
@@ -941,7 +944,8 @@
 }
 
 int migrate_huge_pages(struct list_head *from,
-		new_page_t get_new_page, unsigned long private, int offlining)
+		new_page_t get_new_page, unsigned long private, bool offlining,
+		bool sync)
 {
 	int retry = 1;
 	int nr_failed = 0;
@@ -957,7 +961,8 @@
 			cond_resched();
 
 			rc = unmap_and_move_huge_page(get_new_page,
-					private, page, pass > 2, offlining);
+					private, page, pass > 2, offlining,
+					sync);
 
 			switch(rc) {
 			case -ENOMEM:
@@ -1042,7 +1047,7 @@
 		if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
 			goto set_status;
 
-		page = follow_page(vma, pp->addr, FOLL_GET);
+		page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
 
 		err = PTR_ERR(page);
 		if (IS_ERR(page))
@@ -1090,7 +1095,7 @@
 	err = 0;
 	if (!list_empty(&pagelist)) {
 		err = migrate_pages(&pagelist, new_page_node,
-				(unsigned long)pm, 0);
+				(unsigned long)pm, 0, true);
 		if (err)
 			putback_lru_pages(&pagelist);
 	}
diff --git a/mm/mincore.c b/mm/mincore.c
index 9ac42dc..a4e6b9d 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -154,6 +154,13 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*pmd)) {
+			if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
+				vec += (next - addr) >> PAGE_SHIFT;
+				continue;
+			}
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(pmd))
 			mincore_unmapped_range(vma, addr, next, vec);
 		else
diff --git a/mm/mlock.c b/mm/mlock.c
index b70919c..13e81ee 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -155,13 +155,12 @@
  * vma->vm_mm->mmap_sem must be held for at least read.
  */
 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-				    unsigned long start, unsigned long end)
+				    unsigned long start, unsigned long end,
+				    int *nonblocking)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long addr = start;
-	struct page *pages[16]; /* 16 gives a reasonable batch */
 	int nr_pages = (end - start) / PAGE_SIZE;
-	int ret = 0;
 	int gup_flags;
 
 	VM_BUG_ON(start & ~PAGE_MASK);
@@ -170,73 +169,26 @@
 	VM_BUG_ON(end   > vma->vm_end);
 	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-	gup_flags = FOLL_TOUCH | FOLL_GET;
-	if (vma->vm_flags & VM_WRITE)
+	gup_flags = FOLL_TOUCH;
+	/*
+	 * We want to touch writable mappings with a write fault in order
+	 * to break COW, except for shared mappings because these don't COW
+	 * and we would not want to dirty them for nothing.
+	 */
+	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
 		gup_flags |= FOLL_WRITE;
 
+	if (vma->vm_flags & VM_LOCKED)
+		gup_flags |= FOLL_MLOCK;
+
 	/* We don't try to access the guard page of a stack vma */
 	if (stack_guard_page(vma, start)) {
 		addr += PAGE_SIZE;
 		nr_pages--;
 	}
 
-	while (nr_pages > 0) {
-		int i;
-
-		cond_resched();
-
-		/*
-		 * get_user_pages makes pages present if we are
-		 * setting mlock. and this extra reference count will
-		 * disable migration of this page.  However, page may
-		 * still be truncated out from under us.
-		 */
-		ret = __get_user_pages(current, mm, addr,
-				min_t(int, nr_pages, ARRAY_SIZE(pages)),
-				gup_flags, pages, NULL);
-		/*
-		 * This can happen for, e.g., VM_NONLINEAR regions before
-		 * a page has been allocated and mapped at a given offset,
-		 * or for addresses that map beyond end of a file.
-		 * We'll mlock the pages if/when they get faulted in.
-		 */
-		if (ret < 0)
-			break;
-
-		lru_add_drain();	/* push cached pages to LRU */
-
-		for (i = 0; i < ret; i++) {
-			struct page *page = pages[i];
-
-			if (page->mapping) {
-				/*
-				 * That preliminary check is mainly to avoid
-				 * the pointless overhead of lock_page on the
-				 * ZERO_PAGE: which might bounce very badly if
-				 * there is contention.  However, we're still
-				 * dirtying its cacheline with get/put_page:
-				 * we'll add another __get_user_pages flag to
-				 * avoid it if that case turns out to matter.
-				 */
-				lock_page(page);
-				/*
-				 * Because we lock page here and migration is
-				 * blocked by the elevated reference, we need
-				 * only check for file-cache page truncation.
-				 */
-				if (page->mapping)
-					mlock_vma_page(page);
-				unlock_page(page);
-			}
-			put_page(page);	/* ref from get_user_pages() */
-		}
-
-		addr += ret * PAGE_SIZE;
-		nr_pages -= ret;
-		ret = 0;
-	}
-
-	return ret;	/* 0 or negative error code */
+	return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+				NULL, NULL, nonblocking);
 }
 
 /*
@@ -280,7 +232,7 @@
 			is_vm_hugetlb_page(vma) ||
 			vma == get_gate_vma(current))) {
 
-		__mlock_vma_pages_range(vma, start, end);
+		__mlock_vma_pages_range(vma, start, end, NULL);
 
 		/* Hide errors from mmap() and other callers */
 		return 0;
@@ -372,18 +324,10 @@
 	int ret = 0;
 	int lock = newflags & VM_LOCKED;
 
-	if (newflags == vma->vm_flags ||
-			(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
+	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))
 		goto out;	/* don't set VM_LOCKED,  don't count */
 
-	if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
-			is_vm_hugetlb_page(vma) ||
-			vma == get_gate_vma(current)) {
-		if (lock)
-			make_pages_present(start, end);
-		goto out;	/* don't set VM_LOCKED,  don't count */
-	}
-
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma));
@@ -419,14 +363,10 @@
 	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
 	 */
 
-	if (lock) {
+	if (lock)
 		vma->vm_flags = newflags;
-		ret = __mlock_vma_pages_range(vma, start, end);
-		if (ret < 0)
-			ret = __mlock_posix_error_return(ret);
-	} else {
+	else
 		munlock_vma_pages_range(vma, start, end);
-	}
 
 out:
 	*prev = vma;
@@ -439,7 +379,8 @@
 	struct vm_area_struct * vma, * prev;
 	int error;
 
-	len = PAGE_ALIGN(len);
+	VM_BUG_ON(start & ~PAGE_MASK);
+	VM_BUG_ON(len != PAGE_ALIGN(len));
 	end = start + len;
 	if (end < start)
 		return -EINVAL;
@@ -482,6 +423,62 @@
 	return error;
 }
 
+static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long end, nstart, nend;
+	struct vm_area_struct *vma = NULL;
+	int locked = 0;
+	int ret = 0;
+
+	VM_BUG_ON(start & ~PAGE_MASK);
+	VM_BUG_ON(len != PAGE_ALIGN(len));
+	end = start + len;
+
+	for (nstart = start; nstart < end; nstart = nend) {
+		/*
+		 * We want to fault in pages for [nstart; end) address range.
+		 * Find first corresponding VMA.
+		 */
+		if (!locked) {
+			locked = 1;
+			down_read(&mm->mmap_sem);
+			vma = find_vma(mm, nstart);
+		} else if (nstart >= vma->vm_end)
+			vma = vma->vm_next;
+		if (!vma || vma->vm_start >= end)
+			break;
+		/*
+		 * Set [nstart; nend) to intersection of desired address
+		 * range with the first VMA. Also, skip undesirable VMA types.
+		 */
+		nend = min(end, vma->vm_end);
+		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+			continue;
+		if (nstart < vma->vm_start)
+			nstart = vma->vm_start;
+		/*
+		 * Now fault in a range of pages. __mlock_vma_pages_range()
+		 * double checks the vma flags, so that it won't mlock pages
+		 * if the vma was already munlocked.
+		 */
+		ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
+		if (ret < 0) {
+			if (ignore_errors) {
+				ret = 0;
+				continue;	/* continue at next VMA */
+			}
+			ret = __mlock_posix_error_return(ret);
+			break;
+		}
+		nend = nstart + ret * PAGE_SIZE;
+		ret = 0;
+	}
+	if (locked)
+		up_read(&mm->mmap_sem);
+	return ret;	/* 0 or negative error code */
+}
+
 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 {
 	unsigned long locked;
@@ -507,6 +504,8 @@
 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
 		error = do_mlock(start, len, 1);
 	up_write(&current->mm->mmap_sem);
+	if (!error)
+		error = do_mlock_pages(start, len, 0);
 	return error;
 }
 
@@ -571,6 +570,10 @@
 	    capable(CAP_IPC_LOCK))
 		ret = do_mlockall(flags);
 	up_write(&current->mm->mmap_sem);
+	if (!ret && (flags & MCL_CURRENT)) {
+		/* Ignore errors */
+		do_mlock_pages(0, TASK_SIZE, 1);
+	}
 out:
 	return ret;
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 50a4aa0..2ec8eb5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -29,6 +29,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/perf_event.h>
 #include <linux/audit.h>
+#include <linux/khugepaged.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -253,7 +254,15 @@
 	down_write(&mm->mmap_sem);
 
 #ifdef CONFIG_COMPAT_BRK
-	min_brk = mm->end_code;
+	/*
+	 * CONFIG_COMPAT_BRK can still be overridden by setting
+	 * randomize_va_space to 2, which will still cause mm->start_brk
+	 * to be arbitrarily shifted
+	 */
+	if (mm->start_brk > PAGE_ALIGN(mm->end_data))
+		min_brk = mm->start_brk;
+	else
+		min_brk = mm->end_data;
 #else
 	min_brk = mm->start_brk;
 #endif
@@ -588,6 +597,8 @@
 		}
 	}
 
+	vma_adjust_trans_huge(vma, start, end, adjust_next);
+
 	/*
 	 * When changing only vma->vm_end, we don't really need anon_vma
 	 * lock. This is a fairly rare case by itself, but the anon_vma
@@ -815,6 +826,7 @@
 				end, prev->vm_pgoff, NULL);
 		if (err)
 			return NULL;
+		khugepaged_enter_vma_merge(prev);
 		return prev;
 	}
 
@@ -833,6 +845,7 @@
 				next->vm_pgoff - pglen, NULL);
 		if (err)
 			return NULL;
+		khugepaged_enter_vma_merge(area);
 		return area;
 	}
 
@@ -1761,6 +1774,7 @@
 		}
 	}
 	vma_unlock_anon_vma(vma);
+	khugepaged_enter_vma_merge(vma);
 	return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1808,6 +1822,7 @@
 		}
 	}
 	vma_unlock_anon_vma(vma);
+	khugepaged_enter_vma_merge(vma);
 	return error;
 }
 
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 438951d..8d032de 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -100,6 +100,26 @@
 	return young;
 }
 
+int __mmu_notifier_test_young(struct mm_struct *mm,
+			      unsigned long address)
+{
+	struct mmu_notifier *mn;
+	struct hlist_node *n;
+	int young = 0;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+		if (mn->ops->test_young) {
+			young = mn->ops->test_young(mn, mm, address);
+			if (young)
+				break;
+		}
+	}
+	rcu_read_unlock();
+
+	return young;
+}
+
 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 			       pte_t pte)
 {
diff --git a/mm/mmzone.c b/mm/mmzone.c
index e35bfb8..f5b7d17 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -87,24 +87,3 @@
 	return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
-
-#ifdef CONFIG_SMP
-/* Called when a more accurate view of NR_FREE_PAGES is needed */
-unsigned long zone_nr_free_pages(struct zone *zone)
-{
-	unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
-
-	/*
-	 * While kswapd is awake, it is considered the zone is under some
-	 * memory pressure. Under pressure, there is a risk that
-	 * per-cpu-counter-drift will allow the min watermark to be breached
-	 * potentially causing a live-lock. While kswapd is awake and
-	 * free pages are low, get a better estimate for free pages
-	 */
-	if (nr_free_pages < zone->percpu_drift_mark &&
-			!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
-		return zone_page_state_snapshot(zone, NR_FREE_PAGES);
-
-	return nr_free_pages;
-}
-#endif /* CONFIG_SMP */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 4c51338..5a688a2 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -78,7 +78,7 @@
 	pte_unmap_unlock(pte - 1, ptl);
 }
 
-static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
+static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 		unsigned long addr, unsigned long end, pgprot_t newprot,
 		int dirty_accountable)
 {
@@ -88,13 +88,21 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (pmd_trans_huge(*pmd)) {
+			if (next - addr != HPAGE_PMD_SIZE)
+				split_huge_page_pmd(vma->vm_mm, pmd);
+			else if (change_huge_pmd(vma, pmd, addr, newprot))
+				continue;
+			/* fall through */
+		}
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
-		change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
+		change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
+				 dirty_accountable);
 	} while (pmd++, addr = next, addr != end);
 }
 
-static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 		unsigned long addr, unsigned long end, pgprot_t newprot,
 		int dirty_accountable)
 {
@@ -106,7 +114,8 @@
 		next = pud_addr_end(addr, end);
 		if (pud_none_or_clear_bad(pud))
 			continue;
-		change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
+		change_pmd_range(vma, pud, addr, next, newprot,
+				 dirty_accountable);
 	} while (pud++, addr = next, addr != end);
 }
 
@@ -126,7 +135,8 @@
 		next = pgd_addr_end(addr, end);
 		if (pgd_none_or_clear_bad(pgd))
 			continue;
-		change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
+		change_pud_range(vma, pgd, addr, next, newprot,
+				 dirty_accountable);
 	} while (pgd++, addr = next, addr != end);
 	flush_tlb_range(vma, start, end);
 }
diff --git a/mm/mremap.c b/mm/mremap.c
index 563fbdd..9925b63 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -41,13 +41,15 @@
 		return NULL;
 
 	pmd = pmd_offset(pud, addr);
+	split_huge_page_pmd(mm, pmd);
 	if (pmd_none_or_clear_bad(pmd))
 		return NULL;
 
 	return pmd;
 }
 
-static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
+static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+			    unsigned long addr)
 {
 	pgd_t *pgd;
 	pud_t *pud;
@@ -62,7 +64,8 @@
 	if (!pmd)
 		return NULL;
 
-	if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
+	VM_BUG_ON(pmd_trans_huge(*pmd));
+	if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr))
 		return NULL;
 
 	return pmd;
@@ -147,7 +150,7 @@
 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
 		if (!old_pmd)
 			continue;
-		new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
+		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
 		if (!new_pmd)
 			break;
 		next = (new_addr + PMD_SIZE) & PMD_MASK;
diff --git a/mm/nommu.c b/mm/nommu.c
index ef4045d..f59e142 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -127,7 +127,8 @@
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 		     unsigned long start, int nr_pages, unsigned int foll_flags,
-		     struct page **pages, struct vm_area_struct **vmas)
+		     struct page **pages, struct vm_area_struct **vmas,
+		     int *retry)
 {
 	struct vm_area_struct *vma;
 	unsigned long vm_flags;
@@ -185,7 +186,8 @@
 	if (force)
 		flags |= FOLL_FORCE;
 
-	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+				NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b4edfe7..2cb01f6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -404,15 +404,18 @@
  * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
  * - vm.dirty_ratio             or  vm.dirty_bytes
  * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
- * runtime tasks.
+ * real-time tasks.
  */
 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 {
 	unsigned long background;
 	unsigned long dirty;
-	unsigned long available_memory = determine_dirtyable_memory();
+	unsigned long uninitialized_var(available_memory);
 	struct task_struct *tsk;
 
+	if (!vm_dirty_bytes || !dirty_background_bytes)
+		available_memory = determine_dirtyable_memory();
+
 	if (vm_dirty_bytes)
 		dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
 	else
@@ -1103,7 +1106,7 @@
 int __set_page_dirty_no_writeback(struct page *page)
 {
 	if (!PageDirty(page))
-		SetPageDirty(page);
+		return !TestSetPageDirty(page);
 	return 0;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ff7e158..90c1439 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -357,6 +357,7 @@
 	}
 }
 
+/* update __split_huge_page_refcount if you change this function */
 static int destroy_compound_page(struct page *page, unsigned long order)
 {
 	int i;
@@ -426,18 +427,10 @@
  *
  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  */
-static inline struct page *
-__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
-{
-	unsigned long buddy_idx = page_idx ^ (1 << order);
-
-	return page + (buddy_idx - page_idx);
-}
-
 static inline unsigned long
-__find_combined_index(unsigned long page_idx, unsigned int order)
+__find_buddy_index(unsigned long page_idx, unsigned int order)
 {
-	return (page_idx & ~(1 << order));
+	return page_idx ^ (1 << order);
 }
 
 /*
@@ -448,8 +441,8 @@
  * (c) a page and its buddy have the same order &&
  * (d) a page and its buddy are in the same zone.
  *
- * For recording whether a page is in the buddy system, we use PG_buddy.
- * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
+ * For recording whether a page is in the buddy system, we set ->_mapcount -2.
+ * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
  *
  * For recording page's order, we use page_private(page).
  */
@@ -482,7 +475,7 @@
  * as necessary, plus some accounting needed to play nicely with other
  * parts of the VM system.
  * At each level, we keep a list of pages, which are heads of continuous
- * free pages of length of (1 << order) and marked with PG_buddy. Page's
+ * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  * order is recorded in page_private(page) field.
  * So when we are allocating or freeing one, we can derive the state of the
  * other.  That is, if we allocate a small block, and both were   
@@ -499,6 +492,7 @@
 {
 	unsigned long page_idx;
 	unsigned long combined_idx;
+	unsigned long uninitialized_var(buddy_idx);
 	struct page *buddy;
 
 	if (unlikely(PageCompound(page)))
@@ -513,7 +507,8 @@
 	VM_BUG_ON(bad_range(zone, page));
 
 	while (order < MAX_ORDER-1) {
-		buddy = __page_find_buddy(page, page_idx, order);
+		buddy_idx = __find_buddy_index(page_idx, order);
+		buddy = page + (buddy_idx - page_idx);
 		if (!page_is_buddy(page, buddy, order))
 			break;
 
@@ -521,7 +516,7 @@
 		list_del(&buddy->lru);
 		zone->free_area[order].nr_free--;
 		rmv_page_order(buddy);
-		combined_idx = __find_combined_index(page_idx, order);
+		combined_idx = buddy_idx & page_idx;
 		page = page + (combined_idx - page_idx);
 		page_idx = combined_idx;
 		order++;
@@ -538,9 +533,10 @@
 	 */
 	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
 		struct page *higher_page, *higher_buddy;
-		combined_idx = __find_combined_index(page_idx, order);
-		higher_page = page + combined_idx - page_idx;
-		higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
+		combined_idx = buddy_idx & page_idx;
+		higher_page = page + (combined_idx - page_idx);
+		buddy_idx = __find_buddy_index(combined_idx, order + 1);
+		higher_buddy = page + (buddy_idx - combined_idx);
 		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
 			list_add_tail(&page->lru,
 				&zone->free_area[order].free_list[migratetype]);
@@ -651,13 +647,10 @@
 	trace_mm_page_free_direct(page, order);
 	kmemcheck_free_shadow(page, order);
 
-	for (i = 0; i < (1 << order); i++) {
-		struct page *pg = page + i;
-
-		if (PageAnon(pg))
-			pg->mapping = NULL;
-		bad += free_pages_check(pg);
-	}
+	if (PageAnon(page))
+		page->mapping = NULL;
+	for (i = 0; i < (1 << order); i++)
+		bad += free_pages_check(page + i);
 	if (bad)
 		return false;
 
@@ -1460,24 +1453,24 @@
 #endif /* CONFIG_FAIL_PAGE_ALLOC */
 
 /*
- * Return 1 if free pages are above 'mark'. This takes into account the order
+ * Return true if free pages are above 'mark'. This takes into account the order
  * of the allocation.
  */
-int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
-		      int classzone_idx, int alloc_flags)
+static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+		      int classzone_idx, int alloc_flags, long free_pages)
 {
 	/* free_pages my go negative - that's OK */
 	long min = mark;
-	long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
 	int o;
 
+	free_pages -= (1 << order) + 1;
 	if (alloc_flags & ALLOC_HIGH)
 		min -= min / 2;
 	if (alloc_flags & ALLOC_HARDER)
 		min -= min / 4;
 
 	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
-		return 0;
+		return false;
 	for (o = 0; o < order; o++) {
 		/* At the next order, this order's pages become unavailable */
 		free_pages -= z->free_area[o].nr_free << o;
@@ -1486,9 +1479,28 @@
 		min >>= 1;
 
 		if (free_pages <= min)
-			return 0;
+			return false;
 	}
-	return 1;
+	return true;
+}
+
+bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+		      int classzone_idx, int alloc_flags)
+{
+	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
+					zone_page_state(z, NR_FREE_PAGES));
+}
+
+bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
+		      int classzone_idx, int alloc_flags)
+{
+	long free_pages = zone_page_state(z, NR_FREE_PAGES);
+
+	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
+		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
+
+	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
+								free_pages);
 }
 
 #ifdef CONFIG_NUMA
@@ -1793,15 +1805,18 @@
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	struct zonelist *zonelist, enum zone_type high_zoneidx,
 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-	int migratetype, unsigned long *did_some_progress)
+	int migratetype, unsigned long *did_some_progress,
+	bool sync_migration)
 {
 	struct page *page;
 
 	if (!order || compaction_deferred(preferred_zone))
 		return NULL;
 
+	current->flags |= PF_MEMALLOC;
 	*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
-								nodemask);
+						nodemask, sync_migration);
+	current->flags &= ~PF_MEMALLOC;
 	if (*did_some_progress != COMPACT_SKIPPED) {
 
 		/* Page migration frees to the PCP lists but we want merging */
@@ -1837,7 +1852,8 @@
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	struct zonelist *zonelist, enum zone_type high_zoneidx,
 	nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-	int migratetype, unsigned long *did_some_progress)
+	int migratetype, unsigned long *did_some_progress,
+	bool sync_migration)
 {
 	return NULL;
 }
@@ -1852,23 +1868,22 @@
 {
 	struct page *page = NULL;
 	struct reclaim_state reclaim_state;
-	struct task_struct *p = current;
 	bool drained = false;
 
 	cond_resched();
 
 	/* We now go into synchronous reclaim */
 	cpuset_memory_pressure_bump();
-	p->flags |= PF_MEMALLOC;
+	current->flags |= PF_MEMALLOC;
 	lockdep_set_current_reclaim_state(gfp_mask);
 	reclaim_state.reclaimed_slab = 0;
-	p->reclaim_state = &reclaim_state;
+	current->reclaim_state = &reclaim_state;
 
 	*did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
 
-	p->reclaim_state = NULL;
+	current->reclaim_state = NULL;
 	lockdep_clear_current_reclaim_state();
-	p->flags &= ~PF_MEMALLOC;
+	current->flags &= ~PF_MEMALLOC;
 
 	cond_resched();
 
@@ -1920,19 +1935,19 @@
 
 static inline
 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
-						enum zone_type high_zoneidx)
+						enum zone_type high_zoneidx,
+						enum zone_type classzone_idx)
 {
 	struct zoneref *z;
 	struct zone *zone;
 
 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
-		wakeup_kswapd(zone, order);
+		wakeup_kswapd(zone, order, classzone_idx);
 }
 
 static inline int
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
-	struct task_struct *p = current;
 	int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
 	const gfp_t wait = gfp_mask & __GFP_WAIT;
 
@@ -1948,18 +1963,23 @@
 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
 	if (!wait) {
-		alloc_flags |= ALLOC_HARDER;
+		/*
+		 * Not worth trying to allocate harder for
+		 * __GFP_NOMEMALLOC even if it can't schedule.
+		 */
+		if  (!(gfp_mask & __GFP_NOMEMALLOC))
+			alloc_flags |= ALLOC_HARDER;
 		/*
 		 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
 		 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
 		 */
 		alloc_flags &= ~ALLOC_CPUSET;
-	} else if (unlikely(rt_task(p)) && !in_interrupt())
+	} else if (unlikely(rt_task(current)) && !in_interrupt())
 		alloc_flags |= ALLOC_HARDER;
 
 	if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
 		if (!in_interrupt() &&
-		    ((p->flags & PF_MEMALLOC) ||
+		    ((current->flags & PF_MEMALLOC) ||
 		     unlikely(test_thread_flag(TIF_MEMDIE))))
 			alloc_flags |= ALLOC_NO_WATERMARKS;
 	}
@@ -1978,7 +1998,7 @@
 	int alloc_flags;
 	unsigned long pages_reclaimed = 0;
 	unsigned long did_some_progress;
-	struct task_struct *p = current;
+	bool sync_migration = false;
 
 	/*
 	 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2003,7 +2023,9 @@
 		goto nopage;
 
 restart:
-	wake_all_kswapd(order, zonelist, high_zoneidx);
+	if (!(gfp_mask & __GFP_NO_KSWAPD))
+		wake_all_kswapd(order, zonelist, high_zoneidx,
+						zone_idx(preferred_zone));
 
 	/*
 	 * OK, we're below the kswapd watermark and have kicked background
@@ -2034,21 +2056,26 @@
 		goto nopage;
 
 	/* Avoid recursion of direct reclaim */
-	if (p->flags & PF_MEMALLOC)
+	if (current->flags & PF_MEMALLOC)
 		goto nopage;
 
 	/* Avoid allocations with no watermarks from looping endlessly */
 	if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
 		goto nopage;
 
-	/* Try direct compaction */
+	/*
+	 * Try direct compaction. The first pass is asynchronous. Subsequent
+	 * attempts after direct reclaim are synchronous
+	 */
 	page = __alloc_pages_direct_compact(gfp_mask, order,
 					zonelist, high_zoneidx,
 					nodemask,
 					alloc_flags, preferred_zone,
-					migratetype, &did_some_progress);
+					migratetype, &did_some_progress,
+					sync_migration);
 	if (page)
 		goto got_pg;
+	sync_migration = true;
 
 	/* Try direct reclaim and then allocating */
 	page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2102,13 +2129,27 @@
 		/* Wait for some write requests to complete then retry */
 		wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
 		goto rebalance;
+	} else {
+		/*
+		 * High-order allocations do not necessarily loop after
+		 * direct reclaim and reclaim/compaction depends on compaction
+		 * being called after reclaim so call directly if necessary
+		 */
+		page = __alloc_pages_direct_compact(gfp_mask, order,
+					zonelist, high_zoneidx,
+					nodemask,
+					alloc_flags, preferred_zone,
+					migratetype, &did_some_progress,
+					sync_migration);
+		if (page)
+			goto got_pg;
 	}
 
 nopage:
 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
 		printk(KERN_WARNING "%s: page allocation failure."
 			" order:%d, mode:0x%x\n",
-			p->comm, order, gfp_mask);
+			current->comm, order, gfp_mask);
 		dump_stack();
 		show_mem();
 	}
@@ -2442,7 +2483,7 @@
 			" all_unreclaimable? %s"
 			"\n",
 			zone->name,
-			K(zone_nr_free_pages(zone)),
+			K(zone_page_state(zone, NR_FREE_PAGES)),
 			K(min_wmark_pages(zone)),
 			K(low_wmark_pages(zone)),
 			K(high_wmark_pages(zone)),
@@ -2585,9 +2626,16 @@
 
 static __init int setup_numa_zonelist_order(char *s)
 {
-	if (s)
-		return __parse_numa_zonelist_order(s);
-	return 0;
+	int ret;
+
+	if (!s)
+		return 0;
+
+	ret = __parse_numa_zonelist_order(s);
+	if (ret == 0)
+		strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
+
+	return ret;
 }
 early_param("numa_zonelist_order", setup_numa_zonelist_order);
 
@@ -4014,7 +4062,7 @@
 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
 }
 #else
-static void inline setup_usemap(struct pglist_data *pgdat,
+static inline void setup_usemap(struct pglist_data *pgdat,
 				struct zone *zone, unsigned long zonesize) {}
 #endif /* CONFIG_SPARSEMEM */
 
@@ -5517,7 +5565,6 @@
 	{1UL << PG_swapcache,		"swapcache"	},
 	{1UL << PG_mappedtodisk,	"mappedtodisk"	},
 	{1UL << PG_reclaim,		"reclaim"	},
-	{1UL << PG_buddy,		"buddy"		},
 	{1UL << PG_swapbacked,		"swapbacked"	},
 	{1UL << PG_unevictable,		"unevictable"	},
 #ifdef CONFIG_MMU
@@ -5565,7 +5612,7 @@
 {
 	printk(KERN_ALERT
 	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
-		page, page_count(page), page_mapcount(page),
+		page, atomic_read(&page->_count), page_mapcount(page),
 		page->mapping, page->index);
 	dump_page_flags(page->flags);
 }
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 38cc58b..7cfa6ae 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -34,6 +34,7 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		split_huge_page_pmd(walk->mm, pmd);
 		if (pmd_none_or_clear_bad(pmd)) {
 			if (walk->pte_hole)
 				err = walk->pte_hole(addr, next, walk);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 7d9c1d0..ea53496 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -421,7 +421,7 @@
 		return NULL;
 
 	vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
-				pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL);
+				pcpu_nr_groups, pcpu_atom_size);
 	if (!vms) {
 		pcpu_free_chunk(chunk);
 		return NULL;
diff --git a/mm/percpu.c b/mm/percpu.c
index 02ba912..3f93001 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -258,7 +258,7 @@
 
 /*
  * (Un)populated page region iterators.  Iterate over (un)populated
- * page regions betwen @start and @end in @chunk.  @rs and @re should
+ * page regions between @start and @end in @chunk.  @rs and @re should
  * be integer variables and will be set to start and end page index of
  * the current region.
  */
@@ -293,12 +293,8 @@
 
 	if (size <= PAGE_SIZE)
 		return kzalloc(size, GFP_KERNEL);
-	else {
-		void *ptr = vmalloc(size);
-		if (ptr)
-			memset(ptr, 0, size);
-		return ptr;
-	}
+	else
+		return vzalloc(size);
 }
 
 /**
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
new file mode 100644
index 0000000..d030548
--- /dev/null
+++ b/mm/pgtable-generic.c
@@ -0,0 +1,123 @@
+/*
+ *  mm/pgtable-generic.c
+ *
+ *  Generic pgtable methods declared in asm-generic/pgtable.h
+ *
+ *  Copyright (C) 2010  Linus Torvalds
+ */
+
+#include <asm/tlb.h>
+#include <asm-generic/pgtable.h>
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Only sets the access flags (dirty, accessed, and
+ * writable). Furthermore, we know it always gets set to a "more
+ * permissive" setting, which allows most architectures to optimize
+ * this. We return whether the PTE actually changed, which in turn
+ * instructs the caller to do things like update__mmu_cache.  This
+ * used to be done in the caller, but sparc needs minor faults to
+ * force that call on sun4c so we changed this macro slightly
+ */
+int ptep_set_access_flags(struct vm_area_struct *vma,
+			  unsigned long address, pte_t *ptep,
+			  pte_t entry, int dirty)
+{
+	int changed = !pte_same(*ptep, entry);
+	if (changed) {
+		set_pte_at(vma->vm_mm, address, ptep, entry);
+		flush_tlb_page(vma, address);
+	}
+	return changed;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+			  unsigned long address, pmd_t *pmdp,
+			  pmd_t entry, int dirty)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	int changed = !pmd_same(*pmdp, entry);
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	if (changed) {
+		set_pmd_at(vma->vm_mm, address, pmdp, entry);
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	}
+	return changed;
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+	BUG();
+	return 0;
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pte_t *ptep)
+{
+	int young;
+	young = ptep_test_and_clear_young(vma, address, ptep);
+	if (young)
+		flush_tlb_page(vma, address);
+	return young;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+			   unsigned long address, pmd_t *pmdp)
+{
+	int young;
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+	BUG();
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	young = pmdp_test_and_clear_young(vma, address, pmdp);
+	if (young)
+		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	return young;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
+		       pte_t *ptep)
+{
+	pte_t pte;
+	pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
+	flush_tlb_page(vma, address);
+	return pte;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
+pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
+		       pmd_t *pmdp)
+{
+	pmd_t pmd;
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+	BUG();
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
+	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+	return pmd;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+			   pmd_t *pmdp)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+	pmd_t pmd = pmd_mksplitting(*pmdp);
+	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+	/* tlb flush only to serialize against gup-fast */
+	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+	BUG();
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+}
+#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index 1a8bf76..f21f4a1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -94,7 +94,7 @@
  * anonymous pages mapped into it with that anon_vma.
  *
  * The common case will be that we already have one, but if
- * if not we either need to find an adjacent mapping that we
+ * not we either need to find an adjacent mapping that we
  * can re-use the anon_vma from (very common when the only
  * reason for splitting a vma has been mprotect()), or we
  * allocate a new one.
@@ -177,6 +177,10 @@
 	list_add(&avc->same_vma, &vma->anon_vma_chain);
 
 	anon_vma_lock(anon_vma);
+	/*
+	 * It's critical to add new vmas to the tail of the anon_vma,
+	 * see comment in huge_memory.c:__split_huge_page().
+	 */
 	list_add_tail(&avc->same_anon_vma, &anon_vma->head);
 	anon_vma_unlock(anon_vma);
 }
@@ -360,7 +364,7 @@
  * Returns virtual address or -EFAULT if page's index/offset is not
  * within the range mapped the @vma.
  */
-static inline unsigned long
+inline unsigned long
 vma_address(struct page *page, struct vm_area_struct *vma)
 {
 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -435,6 +439,8 @@
 	pmd = pmd_offset(pud, address);
 	if (!pmd_present(*pmd))
 		return NULL;
+	if (pmd_trans_huge(*pmd))
+		return NULL;
 
 	pte = pte_offset_map(pmd, address);
 	/* Make a quick check before getting the lock */
@@ -489,35 +495,17 @@
 			unsigned long *vm_flags)
 {
 	struct mm_struct *mm = vma->vm_mm;
-	pte_t *pte;
-	spinlock_t *ptl;
 	int referenced = 0;
 
-	pte = page_check_address(page, mm, address, &ptl, 0);
-	if (!pte)
-		goto out;
-
 	/*
 	 * Don't want to elevate referenced for mlocked page that gets this far,
 	 * in order that it progresses to try_to_unmap and is moved to the
 	 * unevictable list.
 	 */
 	if (vma->vm_flags & VM_LOCKED) {
-		*mapcount = 1;	/* break early from loop */
+		*mapcount = 0;	/* break early from loop */
 		*vm_flags |= VM_LOCKED;
-		goto out_unmap;
-	}
-
-	if (ptep_clear_flush_young_notify(vma, address, pte)) {
-		/*
-		 * Don't treat a reference through a sequentially read
-		 * mapping as such.  If the page has been used in
-		 * another mapping, we will catch it; if this other
-		 * mapping is already gone, the unmap path will have
-		 * set PG_referenced or activated the page.
-		 */
-		if (likely(!VM_SequentialReadHint(vma)))
-			referenced++;
+		goto out;
 	}
 
 	/* Pretend the page is referenced if the task has the
@@ -526,9 +514,39 @@
 			rwsem_is_locked(&mm->mmap_sem))
 		referenced++;
 
-out_unmap:
+	if (unlikely(PageTransHuge(page))) {
+		pmd_t *pmd;
+
+		spin_lock(&mm->page_table_lock);
+		pmd = page_check_address_pmd(page, mm, address,
+					     PAGE_CHECK_ADDRESS_PMD_FLAG);
+		if (pmd && !pmd_trans_splitting(*pmd) &&
+		    pmdp_clear_flush_young_notify(vma, address, pmd))
+			referenced++;
+		spin_unlock(&mm->page_table_lock);
+	} else {
+		pte_t *pte;
+		spinlock_t *ptl;
+
+		pte = page_check_address(page, mm, address, &ptl, 0);
+		if (!pte)
+			goto out;
+
+		if (ptep_clear_flush_young_notify(vma, address, pte)) {
+			/*
+			 * Don't treat a reference through a sequentially read
+			 * mapping as such.  If the page has been used in
+			 * another mapping, we will catch it; if this other
+			 * mapping is already gone, the unmap path will have
+			 * set PG_referenced or activated the page.
+			 */
+			if (likely(!VM_SequentialReadHint(vma)))
+				referenced++;
+		}
+		pte_unmap_unlock(pte, ptl);
+	}
+
 	(*mapcount)--;
-	pte_unmap_unlock(pte, ptl);
 
 	if (referenced)
 		*vm_flags |= vma->vm_flags;
@@ -864,8 +882,13 @@
 	struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
 	int first = atomic_inc_and_test(&page->_mapcount);
-	if (first)
-		__inc_zone_page_state(page, NR_ANON_PAGES);
+	if (first) {
+		if (!PageTransHuge(page))
+			__inc_zone_page_state(page, NR_ANON_PAGES);
+		else
+			__inc_zone_page_state(page,
+					      NR_ANON_TRANSPARENT_HUGEPAGES);
+	}
 	if (unlikely(PageKsm(page)))
 		return;
 
@@ -893,7 +916,10 @@
 	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 	SetPageSwapBacked(page);
 	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
-	__inc_zone_page_state(page, NR_ANON_PAGES);
+	if (!PageTransHuge(page))
+		__inc_zone_page_state(page, NR_ANON_PAGES);
+	else
+		__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
 	__page_set_anon_rmap(page, vma, address, 1);
 	if (page_evictable(page, vma))
 		lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -911,7 +937,7 @@
 {
 	if (atomic_inc_and_test(&page->_mapcount)) {
 		__inc_zone_page_state(page, NR_FILE_MAPPED);
-		mem_cgroup_update_file_mapped(page, 1);
+		mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
 	}
 }
 
@@ -946,10 +972,14 @@
 		return;
 	if (PageAnon(page)) {
 		mem_cgroup_uncharge_page(page);
-		__dec_zone_page_state(page, NR_ANON_PAGES);
+		if (!PageTransHuge(page))
+			__dec_zone_page_state(page, NR_ANON_PAGES);
+		else
+			__dec_zone_page_state(page,
+					      NR_ANON_TRANSPARENT_HUGEPAGES);
 	} else {
 		__dec_zone_page_state(page, NR_FILE_MAPPED);
-		mem_cgroup_update_file_mapped(page, -1);
+		mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
 	}
 	/*
 	 * It would be tidy to reset the PageAnon mapping here,
@@ -1202,7 +1232,7 @@
 	return ret;
 }
 
-static bool is_vma_temporary_stack(struct vm_area_struct *vma)
+bool is_vma_temporary_stack(struct vm_area_struct *vma)
 {
 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
 
@@ -1400,6 +1430,7 @@
 	int ret;
 
 	BUG_ON(!PageLocked(page));
+	VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
 
 	if (unlikely(PageKsm(page)))
 		ret = try_to_unmap_ksm(page, flags);
diff --git a/mm/shmem.c b/mm/shmem.c
index 47fdeeb..5ee67c9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2415,13 +2415,20 @@
 	return &p->vfs_inode;
 }
 
+static void shmem_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
+}
+
 static void shmem_destroy_inode(struct inode *inode)
 {
 	if ((inode->i_mode & S_IFMT) == S_IFREG) {
 		/* only struct inode is valid if it's an inline symlink */
 		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
 	}
-	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
+	call_rcu(&inode->i_rcu, shmem_i_callback);
 }
 
 static void init_once(void *foo)
diff --git a/mm/slab.c b/mm/slab.c
index b1e40da..2640374 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -829,12 +829,12 @@
 
 static void next_reap_node(void)
 {
-	int node = __get_cpu_var(slab_reap_node);
+	int node = __this_cpu_read(slab_reap_node);
 
 	node = next_node(node, node_online_map);
 	if (unlikely(node >= MAX_NUMNODES))
 		node = first_node(node_online_map);
-	__get_cpu_var(slab_reap_node) = node;
+	__this_cpu_write(slab_reap_node, node);
 }
 
 #else
@@ -1012,7 +1012,7 @@
  */
 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 {
-	int node = __get_cpu_var(slab_reap_node);
+	int node = __this_cpu_read(slab_reap_node);
 
 	if (l3->alien) {
 		struct array_cache *ac = l3->alien[node];
@@ -1293,7 +1293,7 @@
 		 * anything expensive but will only modify reap_work
 		 * and reschedule the timer.
 		*/
-		cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
+		cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
 		/* Now the cache_reaper is guaranteed to be not running. */
 		per_cpu(slab_reap_work, cpu).work.func = NULL;
   		break;
@@ -2781,7 +2781,7 @@
 /*
  * Map pages beginning at addr to the given cache and slab. This is required
  * for the slab allocator to be able to lookup the cache and slab of a
- * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
+ * virtual address for kfree, ksize, and slab debugging.
  */
 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
 			   void *addr)
@@ -3653,43 +3653,20 @@
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 #ifdef CONFIG_TRACING
-void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+void *
+kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
 {
-	return __cache_alloc(cachep, flags, __builtin_return_address(0));
+	void *ret;
+
+	ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
+
+	trace_kmalloc(_RET_IP_, ret,
+		      size, slab_buffer_size(cachep), flags);
+	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_trace);
 #endif
 
-/**
- * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
- * @cachep: the cache we're checking against
- * @ptr: pointer to validate
- *
- * This verifies that the untrusted pointer looks sane;
- * it is _not_ a guarantee that the pointer is actually
- * part of the slab cache in question, but it at least
- * validates that the pointer can be dereferenced and
- * looks half-way sane.
- *
- * Currently only used for dentry validation.
- */
-int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
-{
-	unsigned long size = cachep->buffer_size;
-	struct page *page;
-
-	if (unlikely(!kern_ptr_validate(ptr, size)))
-		goto out;
-	page = virt_to_page(ptr);
-	if (unlikely(!PageSlab(page)))
-		goto out;
-	if (unlikely(page_get_cache(page) != cachep))
-		goto out;
-	return 1;
-out:
-	return 0;
-}
-
 #ifdef CONFIG_NUMA
 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
@@ -3705,31 +3682,32 @@
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
 #ifdef CONFIG_TRACING
-void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
-				    gfp_t flags,
-				    int nodeid)
+void *kmem_cache_alloc_node_trace(size_t size,
+				  struct kmem_cache *cachep,
+				  gfp_t flags,
+				  int nodeid)
 {
-	return __cache_alloc_node(cachep, flags, nodeid,
+	void *ret;
+
+	ret = __cache_alloc_node(cachep, flags, nodeid,
 				  __builtin_return_address(0));
+	trace_kmalloc_node(_RET_IP_, ret,
+			   size, slab_buffer_size(cachep),
+			   flags, nodeid);
+	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
 #endif
 
 static __always_inline void *
 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
 {
 	struct kmem_cache *cachep;
-	void *ret;
 
 	cachep = kmem_find_general_cachep(size, flags);
 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
 		return cachep;
-	ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
-
-	trace_kmalloc_node((unsigned long) caller, ret,
-			   size, cachep->buffer_size, flags, node);
-
-	return ret;
+	return kmem_cache_alloc_node_trace(size, cachep, flags, node);
 }
 
 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
diff --git a/mm/slob.c b/mm/slob.c
index 617b6d6..3588eaa 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -678,11 +678,6 @@
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
-int kmem_ptr_validate(struct kmem_cache *a, const void *b)
-{
-	return 0;
-}
-
 static unsigned int slob_ready __read_mostly;
 
 int slab_is_available(void)
diff --git a/mm/slub.c b/mm/slub.c
index bec0e35..c7ef007 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -28,6 +28,8 @@
 #include <linux/math64.h>
 #include <linux/fault-inject.h>
 
+#include <trace/events/kmem.h>
+
 /*
  * Lock order:
  *   1. slab_lock(page)
@@ -1774,11 +1776,21 @@
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 #ifdef CONFIG_TRACING
-void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
-	return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
+	void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
+	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
+	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_trace);
+
+void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+	void *ret = kmalloc_order(size, flags, order);
+	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
+	return ret;
+}
+EXPORT_SYMBOL(kmalloc_order_trace);
 #endif
 
 #ifdef CONFIG_NUMA
@@ -1794,13 +1806,17 @@
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
 #ifdef CONFIG_TRACING
-void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 				    gfp_t gfpflags,
-				    int node)
+				    int node, size_t size)
 {
-	return slab_alloc(s, gfpflags, node, _RET_IP_);
+	void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
+
+	trace_kmalloc_node(_RET_IP_, ret,
+			   size, s->size, gfpflags, node);
+	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
 #endif
 #endif
 
@@ -1917,17 +1933,6 @@
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
-/* Figure out on which slab page the object resides */
-static struct page *get_object_page(const void *x)
-{
-	struct page *page = virt_to_head_page(x);
-
-	if (!PageSlab(page))
-		return NULL;
-
-	return page;
-}
-
 /*
  * Object placement in a slab is made very easy because we always start at
  * offset 0. If we tune the size of the object to the alignment then we can
@@ -2386,35 +2391,6 @@
 }
 
 /*
- * Check if a given pointer is valid
- */
-int kmem_ptr_validate(struct kmem_cache *s, const void *object)
-{
-	struct page *page;
-
-	if (!kern_ptr_validate(object, s->size))
-		return 0;
-
-	page = get_object_page(object);
-
-	if (!page || s != page->slab)
-		/* No slab or wrong slab */
-		return 0;
-
-	if (!check_valid_pointer(s, page, object))
-		return 0;
-
-	/*
-	 * We could also check if the object is on the slabs freelist.
-	 * But this would be too expensive and it seems that the main
-	 * purpose of kmem_ptr_valid() is to check if the object belongs
-	 * to a certain slab.
-	 */
-	return 1;
-}
-EXPORT_SYMBOL(kmem_ptr_validate);
-
-/*
  * Determine the size of a slab object
  */
 unsigned int kmem_cache_size(struct kmem_cache *s)
@@ -3660,7 +3636,7 @@
 		len += sprintf(buf + len, "%7ld ", l->count);
 
 		if (l->addr)
-			len += sprint_symbol(buf + len, (unsigned long)l->addr);
+			len += sprintf(buf + len, "%pS", (void *)l->addr);
 		else
 			len += sprintf(buf + len, "<not-available>");
 
@@ -3970,12 +3946,9 @@
 
 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
 {
-	if (s->ctor) {
-		int n = sprint_symbol(buf, (unsigned long)s->ctor);
-
-		return n + sprintf(buf + n, "\n");
-	}
-	return 0;
+	if (!s->ctor)
+		return 0;
+	return sprintf(buf, "%pS\n", s->ctor);
 }
 SLAB_ATTR_RO(ctor);
 
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 29d6cbf..64b9840 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -9,7 +9,7 @@
  *
  * However, virtual mappings need a page table and TLBs. Many Linux
  * architectures already map their physical space using 1-1 mappings
- * via TLBs. For those arches the virtual memmory map is essentially
+ * via TLBs. For those arches the virtual memory map is essentially
  * for free if we use the same page size as the 1-1 mappings. In that
  * case the overhead consists of a few additional pages that are
  * allocated to create a view of memory for vmemmap.
diff --git a/mm/sparse.c b/mm/sparse.c
index 95ac219..9325020 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -671,10 +671,10 @@
 static void free_map_bootmem(struct page *page, unsigned long nr_pages)
 {
 	unsigned long maps_section_nr, removing_section_nr, i;
-	int magic;
+	unsigned long magic;
 
 	for (i = 0; i < nr_pages; i++, page++) {
-		magic = atomic_read(&page->_mapcount);
+		magic = (unsigned long) page->lru.next;
 
 		BUG_ON(magic == NODE_INFO);
 
diff --git a/mm/swap.c b/mm/swap.c
index 3f48542..bbc1ce9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -56,17 +56,97 @@
 		del_page_from_lru(zone, page);
 		spin_unlock_irqrestore(&zone->lru_lock, flags);
 	}
+}
+
+static void __put_single_page(struct page *page)
+{
+	__page_cache_release(page);
 	free_hot_cold_page(page, 0);
 }
 
+static void __put_compound_page(struct page *page)
+{
+	compound_page_dtor *dtor;
+
+	__page_cache_release(page);
+	dtor = get_compound_page_dtor(page);
+	(*dtor)(page);
+}
+
 static void put_compound_page(struct page *page)
 {
-	page = compound_head(page);
-	if (put_page_testzero(page)) {
-		compound_page_dtor *dtor;
-
-		dtor = get_compound_page_dtor(page);
-		(*dtor)(page);
+	if (unlikely(PageTail(page))) {
+		/* __split_huge_page_refcount can run under us */
+		struct page *page_head = page->first_page;
+		smp_rmb();
+		/*
+		 * If PageTail is still set after smp_rmb() we can be sure
+		 * that the page->first_page we read wasn't a dangling pointer.
+		 * See __split_huge_page_refcount() smp_wmb().
+		 */
+		if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
+			unsigned long flags;
+			/*
+			 * Verify that our page_head wasn't converted
+			 * to a a regular page before we got a
+			 * reference on it.
+			 */
+			if (unlikely(!PageHead(page_head))) {
+				/* PageHead is cleared after PageTail */
+				smp_rmb();
+				VM_BUG_ON(PageTail(page));
+				goto out_put_head;
+			}
+			/*
+			 * Only run compound_lock on a valid PageHead,
+			 * after having it pinned with
+			 * get_page_unless_zero() above.
+			 */
+			smp_mb();
+			/* page_head wasn't a dangling pointer */
+			flags = compound_lock_irqsave(page_head);
+			if (unlikely(!PageTail(page))) {
+				/* __split_huge_page_refcount run before us */
+				compound_unlock_irqrestore(page_head, flags);
+				VM_BUG_ON(PageHead(page_head));
+			out_put_head:
+				if (put_page_testzero(page_head))
+					__put_single_page(page_head);
+			out_put_single:
+				if (put_page_testzero(page))
+					__put_single_page(page);
+				return;
+			}
+			VM_BUG_ON(page_head != page->first_page);
+			/*
+			 * We can release the refcount taken by
+			 * get_page_unless_zero now that
+			 * split_huge_page_refcount is blocked on the
+			 * compound_lock.
+			 */
+			if (put_page_testzero(page_head))
+				VM_BUG_ON(1);
+			/* __split_huge_page_refcount will wait now */
+			VM_BUG_ON(atomic_read(&page->_count) <= 0);
+			atomic_dec(&page->_count);
+			VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
+			compound_unlock_irqrestore(page_head, flags);
+			if (put_page_testzero(page_head)) {
+				if (PageHead(page_head))
+					__put_compound_page(page_head);
+				else
+					__put_single_page(page_head);
+			}
+		} else {
+			/* page_head is a dangling pointer */
+			VM_BUG_ON(PageTail(page));
+			goto out_put_single;
+		}
+	} else if (put_page_testzero(page)) {
+		if (PageHead(page))
+			__put_compound_page(page);
+		else
+			__put_single_page(page);
 	}
 }
 
@@ -75,7 +155,7 @@
 	if (unlikely(PageCompound(page)))
 		put_compound_page(page);
 	else if (put_page_testzero(page))
-		__page_cache_release(page);
+		__put_single_page(page);
 }
 EXPORT_SYMBOL(put_page);
 
@@ -98,15 +178,13 @@
 }
 EXPORT_SYMBOL(put_pages_list);
 
-/*
- * pagevec_move_tail() must be called with IRQ disabled.
- * Otherwise this may cause nasty races.
- */
-static void pagevec_move_tail(struct pagevec *pvec)
+static void pagevec_lru_move_fn(struct pagevec *pvec,
+				void (*move_fn)(struct page *page, void *arg),
+				void *arg)
 {
 	int i;
-	int pgmoved = 0;
 	struct zone *zone = NULL;
+	unsigned long flags = 0;
 
 	for (i = 0; i < pagevec_count(pvec); i++) {
 		struct page *page = pvec->pages[i];
@@ -114,29 +192,49 @@
 
 		if (pagezone != zone) {
 			if (zone)
-				spin_unlock(&zone->lru_lock);
+				spin_unlock_irqrestore(&zone->lru_lock, flags);
 			zone = pagezone;
-			spin_lock(&zone->lru_lock);
+			spin_lock_irqsave(&zone->lru_lock, flags);
 		}
-		if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-			int lru = page_lru_base_type(page);
-			list_move_tail(&page->lru, &zone->lru[lru].list);
-			pgmoved++;
-		}
+
+		(*move_fn)(page, arg);
 	}
 	if (zone)
-		spin_unlock(&zone->lru_lock);
-	__count_vm_events(PGROTATED, pgmoved);
-	release_pages(pvec->pages, pvec->nr, pvec->cold);
+		spin_unlock_irqrestore(&zone->lru_lock, flags);
+	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
 	pagevec_reinit(pvec);
 }
 
+static void pagevec_move_tail_fn(struct page *page, void *arg)
+{
+	int *pgmoved = arg;
+	struct zone *zone = page_zone(page);
+
+	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+		int lru = page_lru_base_type(page);
+		list_move_tail(&page->lru, &zone->lru[lru].list);
+		(*pgmoved)++;
+	}
+}
+
+/*
+ * pagevec_move_tail() must be called with IRQ disabled.
+ * Otherwise this may cause nasty races.
+ */
+static void pagevec_move_tail(struct pagevec *pvec)
+{
+	int pgmoved = 0;
+
+	pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
+	__count_vm_events(PGROTATED, pgmoved);
+}
+
 /*
  * Writeback is about to end against a page which has been marked for immediate
  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
  * inactive list.
  */
-void  rotate_reclaimable_page(struct page *page)
+void rotate_reclaimable_page(struct page *page)
 {
 	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
 	    !PageUnevictable(page) && PageLRU(page)) {
@@ -173,27 +271,94 @@
 }
 
 /*
- * FIXME: speed this up?
+ * A page will go to active list either by activate_page or putback_lru_page.
+ * In the activate_page case, the page hasn't active bit set. The page might
+ * not in LRU list because it's isolated before it gets a chance to be moved to
+ * active list. The window is small because pagevec just stores several pages.
+ * For such case, we do nothing for such page.
+ * In the putback_lru_page case, the page isn't in lru list but has active
+ * bit set
  */
+static void __activate_page(struct page *page, void *arg)
+{
+	struct zone *zone = page_zone(page);
+	int file = page_is_file_cache(page);
+	int lru = page_lru_base_type(page);
+	bool putback = !PageLRU(page);
+
+	/* The page is isolated before it's moved to active list */
+	if (!PageLRU(page) && !PageActive(page))
+		return;
+	if ((PageLRU(page) && PageActive(page)) || PageUnevictable(page))
+		return;
+
+	if (!putback)
+		del_page_from_lru_list(zone, page, lru);
+	else
+		SetPageLRU(page);
+
+	SetPageActive(page);
+	lru += LRU_ACTIVE;
+	add_page_to_lru_list(zone, page, lru);
+
+	if (putback)
+		return;
+	__count_vm_event(PGACTIVATE);
+	update_page_reclaim_stat(zone, page, file, 1);
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
+
+static void activate_page_drain(int cpu)
+{
+	struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
+
+	if (pagevec_count(pvec))
+		pagevec_lru_move_fn(pvec, __activate_page, NULL);
+}
+
+void activate_page(struct page *page)
+{
+	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+
+		page_cache_get(page);
+		if (!pagevec_add(pvec, page))
+			pagevec_lru_move_fn(pvec, __activate_page, NULL);
+		put_cpu_var(activate_page_pvecs);
+	}
+}
+
+/* Caller should hold zone->lru_lock */
+int putback_active_lru_page(struct zone *zone, struct page *page)
+{
+	struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+
+	if (!pagevec_add(pvec, page)) {
+		spin_unlock_irq(&zone->lru_lock);
+		pagevec_lru_move_fn(pvec, __activate_page, NULL);
+		spin_lock_irq(&zone->lru_lock);
+	}
+	put_cpu_var(activate_page_pvecs);
+	return 1;
+}
+
+#else
+static inline void activate_page_drain(int cpu)
+{
+}
+
 void activate_page(struct page *page)
 {
 	struct zone *zone = page_zone(page);
 
 	spin_lock_irq(&zone->lru_lock);
-	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-		int file = page_is_file_cache(page);
-		int lru = page_lru_base_type(page);
-		del_page_from_lru_list(zone, page, lru);
-
-		SetPageActive(page);
-		lru += LRU_ACTIVE;
-		add_page_to_lru_list(zone, page, lru);
-		__count_vm_event(PGACTIVATE);
-
-		update_page_reclaim_stat(zone, page, file, 1);
-	}
+	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page))
+		__activate_page(page, NULL);
 	spin_unlock_irq(&zone->lru_lock);
 }
+#endif
 
 /*
  * Mark a page as having seen activity.
@@ -292,6 +457,7 @@
 		pagevec_move_tail(pvec);
 		local_irq_restore(flags);
 	}
+	activate_page_drain(cpu);
 }
 
 void lru_add_drain(void)
@@ -399,44 +565,70 @@
 
 EXPORT_SYMBOL(__pagevec_release);
 
+/* used by __split_huge_page_refcount() */
+void lru_add_page_tail(struct zone* zone,
+		       struct page *page, struct page *page_tail)
+{
+	int active;
+	enum lru_list lru;
+	const int file = 0;
+	struct list_head *head;
+
+	VM_BUG_ON(!PageHead(page));
+	VM_BUG_ON(PageCompound(page_tail));
+	VM_BUG_ON(PageLRU(page_tail));
+	VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
+
+	SetPageLRU(page_tail);
+
+	if (page_evictable(page_tail, NULL)) {
+		if (PageActive(page)) {
+			SetPageActive(page_tail);
+			active = 1;
+			lru = LRU_ACTIVE_ANON;
+		} else {
+			active = 0;
+			lru = LRU_INACTIVE_ANON;
+		}
+		update_page_reclaim_stat(zone, page_tail, file, active);
+		if (likely(PageLRU(page)))
+			head = page->lru.prev;
+		else
+			head = &zone->lru[lru].list;
+		__add_page_to_lru_list(zone, page_tail, lru, head);
+	} else {
+		SetPageUnevictable(page_tail);
+		add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
+	}
+}
+
+static void ____pagevec_lru_add_fn(struct page *page, void *arg)
+{
+	enum lru_list lru = (enum lru_list)arg;
+	struct zone *zone = page_zone(page);
+	int file = is_file_lru(lru);
+	int active = is_active_lru(lru);
+
+	VM_BUG_ON(PageActive(page));
+	VM_BUG_ON(PageUnevictable(page));
+	VM_BUG_ON(PageLRU(page));
+
+	SetPageLRU(page);
+	if (active)
+		SetPageActive(page);
+	update_page_reclaim_stat(zone, page, file, active);
+	add_page_to_lru_list(zone, page, lru);
+}
+
 /*
  * Add the passed pages to the LRU, then drop the caller's refcount
  * on them.  Reinitialises the caller's pagevec.
  */
 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 {
-	int i;
-	struct zone *zone = NULL;
-
 	VM_BUG_ON(is_unevictable_lru(lru));
 
-	for (i = 0; i < pagevec_count(pvec); i++) {
-		struct page *page = pvec->pages[i];
-		struct zone *pagezone = page_zone(page);
-		int file;
-		int active;
-
-		if (pagezone != zone) {
-			if (zone)
-				spin_unlock_irq(&zone->lru_lock);
-			zone = pagezone;
-			spin_lock_irq(&zone->lru_lock);
-		}
-		VM_BUG_ON(PageActive(page));
-		VM_BUG_ON(PageUnevictable(page));
-		VM_BUG_ON(PageLRU(page));
-		SetPageLRU(page);
-		active = is_active_lru(lru);
-		file = is_file_lru(lru);
-		if (active)
-			SetPageActive(page);
-		update_page_reclaim_stat(zone, page, file, active);
-		add_page_to_lru_list(zone, page, lru);
-	}
-	if (zone)
-		spin_unlock_irq(&zone->lru_lock);
-	release_pages(pvec->pages, pvec->nr, pvec->cold);
-	pagevec_reinit(pvec);
+	pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
 }
 
 EXPORT_SYMBOL(____pagevec_lru_add);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e10f583..5c8cfab 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -157,6 +157,12 @@
 	if (!entry.val)
 		return 0;
 
+	if (unlikely(PageTransHuge(page)))
+		if (unlikely(split_huge_page(page))) {
+			swapcache_free(entry, NULL);
+			return 0;
+		}
+
 	/*
 	 * Radix-tree node allocations from PF_MEMALLOC contexts could
 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 67ddaaf..07a458d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -964,6 +964,8 @@
 	pmd = pmd_offset(pud, addr);
 	do {
 		next = pmd_addr_end(addr, end);
+		if (unlikely(pmd_trans_huge(*pmd)))
+			continue;
 		if (pmd_none_or_clear_bad(pmd))
 			continue;
 		ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
@@ -1677,7 +1679,7 @@
 	if (S_ISBLK(inode->i_mode)) {
 		struct block_device *bdev = I_BDEV(inode);
 		set_blocksize(bdev, p->old_block_size);
-		bd_release(bdev);
+		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 	} else {
 		mutex_lock(&inode->i_mutex);
 		inode->i_flags &= ~S_SWAPFILE;
@@ -1939,7 +1941,8 @@
 	error = -EINVAL;
 	if (S_ISBLK(inode->i_mode)) {
 		bdev = I_BDEV(inode);
-		error = bd_claim(bdev, sys_swapon);
+		error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
+				   sys_swapon);
 		if (error < 0) {
 			bdev = NULL;
 			error = -EINVAL;
@@ -2136,7 +2139,7 @@
 bad_swap:
 	if (bdev) {
 		set_blocksize(bdev, p->old_block_size);
-		bd_release(bdev);
+		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
 	}
 	destroy_swap_extents(p);
 	swap_cgroup_swapoff(type);
diff --git a/mm/util.c b/mm/util.c
index 73dac81..f126975 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -186,27 +186,6 @@
 }
 EXPORT_SYMBOL(kzfree);
 
-int kern_ptr_validate(const void *ptr, unsigned long size)
-{
-	unsigned long addr = (unsigned long)ptr;
-	unsigned long min_addr = PAGE_OFFSET;
-	unsigned long align_mask = sizeof(void *) - 1;
-
-	if (unlikely(addr < min_addr))
-		goto out;
-	if (unlikely(addr > (unsigned long)high_memory - size))
-		goto out;
-	if (unlikely(addr & align_mask))
-		goto out;
-	if (unlikely(!kern_addr_valid(addr)))
-		goto out;
-	if (unlikely(!kern_addr_valid(addr + size - 1)))
-		goto out;
-	return 1;
-out:
-	return 0;
-}
-
 /*
  * strndup_user - duplicate an existing string from user space
  * @s: The string to duplicate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index eb5cc7d..f9b1667 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -748,7 +748,7 @@
 	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
 					VMALLOC_START, VMALLOC_END,
 					node, gfp_mask);
-	if (unlikely(IS_ERR(va))) {
+	if (IS_ERR(va)) {
 		kfree(vb);
 		return ERR_CAST(va);
 	}
@@ -1175,6 +1175,7 @@
 {
 	vunmap_page_range(addr, addr + size);
 }
+EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
 
 /**
  * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
@@ -1315,13 +1316,6 @@
 						-1, GFP_KERNEL, caller);
 }
 
-struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
-				   int node, gfp_t gfp_mask)
-{
-	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-				  node, gfp_mask, __builtin_return_address(0));
-}
-
 static struct vm_struct *find_vm_area(const void *addr)
 {
 	struct vmap_area *va;
@@ -1537,17 +1531,47 @@
 	return NULL;
 }
 
-void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
+/**
+ *	__vmalloc_node_range  -  allocate virtually contiguous memory
+ *	@size:		allocation size
+ *	@align:		desired alignment
+ *	@start:		vm area range start
+ *	@end:		vm area range end
+ *	@gfp_mask:	flags for the page level allocator
+ *	@prot:		protection mask for the allocated pages
+ *	@node:		node to use for allocation or -1
+ *	@caller:	caller's return address
+ *
+ *	Allocate enough pages to cover @size from the page level
+ *	allocator with @gfp_mask flags.  Map them into contiguous
+ *	kernel virtual space, using a pagetable protection of @prot.
+ */
+void *__vmalloc_node_range(unsigned long size, unsigned long align,
+			unsigned long start, unsigned long end, gfp_t gfp_mask,
+			pgprot_t prot, int node, void *caller)
 {
-	void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
-					 __builtin_return_address(0));
+	struct vm_struct *area;
+	void *addr;
+	unsigned long real_size = size;
+
+	size = PAGE_ALIGN(size);
+	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+		return NULL;
+
+	area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
+				  gfp_mask, caller);
+
+	if (!area)
+		return NULL;
+
+	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
 
 	/*
 	 * A ref_count = 3 is needed because the vm_struct and vmap_area
 	 * structures allocated in the __get_vm_area_node() function contain
 	 * references to the virtual address of the vmalloc'ed block.
 	 */
-	kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
+	kmemleak_alloc(addr, real_size, 3, gfp_mask);
 
 	return addr;
 }
@@ -1569,30 +1593,8 @@
 			    gfp_t gfp_mask, pgprot_t prot,
 			    int node, void *caller)
 {
-	struct vm_struct *area;
-	void *addr;
-	unsigned long real_size = size;
-
-	size = PAGE_ALIGN(size);
-	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
-		return NULL;
-
-	area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
-				  VMALLOC_END, node, gfp_mask, caller);
-
-	if (!area)
-		return NULL;
-
-	addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
-
-	/*
-	 * A ref_count = 3 is needed because the vm_struct and vmap_area
-	 * structures allocated in the __get_vm_area_node() function contain
-	 * references to the virtual address of the vmalloc'ed block.
-	 */
-	kmemleak_alloc(addr, real_size, 3, gfp_mask);
-
-	return addr;
+	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
+				gfp_mask, prot, node, caller);
 }
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
@@ -2203,17 +2205,16 @@
  * @sizes: array containing size of each area
  * @nr_vms: the number of areas to allocate
  * @align: alignment, all entries in @offsets and @sizes must be aligned to this
- * @gfp_mask: allocation mask
  *
  * Returns: kmalloc'd vm_struct pointer array pointing to allocated
  *	    vm_structs on success, %NULL on failure
  *
  * Percpu allocator wants to use congruent vm areas so that it can
  * maintain the offsets among percpu areas.  This function allocates
- * congruent vmalloc areas for it.  These areas tend to be scattered
- * pretty far, distance between two areas easily going up to
- * gigabytes.  To avoid interacting with regular vmallocs, these areas
- * are allocated from top.
+ * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
+ * be scattered pretty far, distance between two areas easily going up
+ * to gigabytes.  To avoid interacting with regular vmallocs, these
+ * areas are allocated from top.
  *
  * Despite its complicated look, this allocator is rather simple.  It
  * does everything top-down and scans areas from the end looking for
@@ -2224,7 +2225,7 @@
  */
 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
 				     const size_t *sizes, int nr_vms,
-				     size_t align, gfp_t gfp_mask)
+				     size_t align)
 {
 	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
 	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
@@ -2234,8 +2235,6 @@
 	unsigned long base, start, end, last_end;
 	bool purged = false;
 
-	gfp_mask &= GFP_RECLAIM_MASK;
-
 	/* verify parameters and allocate data structures */
 	BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
 	for (last_area = 0, area = 0; area < nr_vms; area++) {
@@ -2268,14 +2267,14 @@
 		return NULL;
 	}
 
-	vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask);
-	vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask);
+	vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
+	vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
 	if (!vas || !vms)
 		goto err_free;
 
 	for (area = 0; area < nr_vms; area++) {
-		vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask);
-		vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask);
+		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
+		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
 		if (!vas[area] || !vms[area])
 			goto err_free;
 	}
@@ -2456,13 +2455,8 @@
 	seq_printf(m, "0x%p-0x%p %7ld",
 		v->addr, v->addr + v->size, v->size);
 
-	if (v->caller) {
-		char buff[KSYM_SYMBOL_LEN];
-
-		seq_putc(m, ' ');
-		sprint_symbol(buff, (unsigned long)v->caller);
-		seq_puts(m, buff);
-	}
+	if (v->caller)
+		seq_printf(m, " %pS", v->caller);
 
 	if (v->nr_pages)
 		seq_printf(m, " pages=%d", v->nr_pages);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9ca587c..99999a9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -32,6 +32,7 @@
 #include <linux/topology.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
+#include <linux/compaction.h>
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 #include <linux/delay.h>
@@ -40,6 +41,7 @@
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
+#include <linux/compaction.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -51,11 +53,23 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/vmscan.h>
 
-enum lumpy_mode {
-	LUMPY_MODE_NONE,
-	LUMPY_MODE_ASYNC,
-	LUMPY_MODE_SYNC,
-};
+/*
+ * reclaim_mode determines how the inactive list is shrunk
+ * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
+ * RECLAIM_MODE_ASYNC:  Do not block
+ * RECLAIM_MODE_SYNC:   Allow blocking e.g. call wait_on_page_writeback
+ * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
+ *			page from the LRU and reclaim all pages within a
+ *			naturally aligned range
+ * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
+ *			order-0 pages and then compact the zone
+ */
+typedef unsigned __bitwise__ reclaim_mode_t;
+#define RECLAIM_MODE_SINGLE		((__force reclaim_mode_t)0x01u)
+#define RECLAIM_MODE_ASYNC		((__force reclaim_mode_t)0x02u)
+#define RECLAIM_MODE_SYNC		((__force reclaim_mode_t)0x04u)
+#define RECLAIM_MODE_LUMPYRECLAIM	((__force reclaim_mode_t)0x08u)
+#define RECLAIM_MODE_COMPACTION		((__force reclaim_mode_t)0x10u)
 
 struct scan_control {
 	/* Incremented by the number of inactive pages that were scanned */
@@ -88,7 +102,7 @@
 	 * Intend to reclaim enough continuous memory rather than reclaim
 	 * enough amount of memory. i.e, mode for high order allocation.
 	 */
-	enum lumpy_mode lumpy_reclaim_mode;
+	reclaim_mode_t reclaim_mode;
 
 	/* Which cgroup do we reclaim from */
 	struct mem_cgroup *mem_cgroup;
@@ -271,34 +285,37 @@
 	return ret;
 }
 
-static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc,
+static void set_reclaim_mode(int priority, struct scan_control *sc,
 				   bool sync)
 {
-	enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC;
+	reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
 
 	/*
-	 * Some reclaim have alredy been failed. No worth to try synchronous
-	 * lumpy reclaim.
+	 * Initially assume we are entering either lumpy reclaim or
+	 * reclaim/compaction.Depending on the order, we will either set the
+	 * sync mode or just reclaim order-0 pages later.
 	 */
-	if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
-		return;
+	if (COMPACTION_BUILD)
+		sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
+	else
+		sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
 
 	/*
-	 * If we need a large contiguous chunk of memory, or have
-	 * trouble getting a small set of contiguous pages, we
-	 * will reclaim both active and inactive pages.
+	 * Avoid using lumpy reclaim or reclaim/compaction if possible by
+	 * restricting when its set to either costly allocations or when
+	 * under memory pressure
 	 */
 	if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-		sc->lumpy_reclaim_mode = mode;
+		sc->reclaim_mode |= syncmode;
 	else if (sc->order && priority < DEF_PRIORITY - 2)
-		sc->lumpy_reclaim_mode = mode;
+		sc->reclaim_mode |= syncmode;
 	else
-		sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+		sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 }
 
-static void disable_lumpy_reclaim_mode(struct scan_control *sc)
+static void reset_reclaim_mode(struct scan_control *sc)
 {
-	sc->lumpy_reclaim_mode = LUMPY_MODE_NONE;
+	sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
 }
 
 static inline int is_page_cache_freeable(struct page *page)
@@ -429,7 +446,7 @@
 		 * first attempt to free a range of pages fails.
 		 */
 		if (PageWriteback(page) &&
-		    sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC)
+		    (sc->reclaim_mode & RECLAIM_MODE_SYNC))
 			wait_on_page_writeback(page);
 
 		if (!PageWriteback(page)) {
@@ -437,7 +454,7 @@
 			ClearPageReclaim(page);
 		}
 		trace_mm_vmscan_writepage(page,
-			trace_reclaim_flags(page, sc->lumpy_reclaim_mode));
+			trace_reclaim_flags(page, sc->reclaim_mode));
 		inc_zone_page_state(page, NR_VMSCAN_WRITE);
 		return PAGE_SUCCESS;
 	}
@@ -622,7 +639,7 @@
 	referenced_page = TestClearPageReferenced(page);
 
 	/* Lumpy reclaim - ignore references */
-	if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE)
+	if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
 		return PAGEREF_RECLAIM;
 
 	/*
@@ -739,7 +756,7 @@
 			 * for any page for which writeback has already
 			 * started.
 			 */
-			if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC &&
+			if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
 			    may_enter_fs)
 				wait_on_page_writeback(page);
 			else {
@@ -895,7 +912,7 @@
 			try_to_free_swap(page);
 		unlock_page(page);
 		putback_lru_page(page);
-		disable_lumpy_reclaim_mode(sc);
+		reset_reclaim_mode(sc);
 		continue;
 
 activate_locked:
@@ -908,7 +925,7 @@
 keep_locked:
 		unlock_page(page);
 keep:
-		disable_lumpy_reclaim_mode(sc);
+		reset_reclaim_mode(sc);
 keep_lumpy:
 		list_add(&page->lru, &ret_pages);
 		VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
@@ -1028,7 +1045,7 @@
 		case 0:
 			list_move(&page->lru, dst);
 			mem_cgroup_del_lru(page);
-			nr_taken++;
+			nr_taken += hpage_nr_pages(page);
 			break;
 
 		case -EBUSY:
@@ -1086,7 +1103,7 @@
 			if (__isolate_lru_page(cursor_page, mode, file) == 0) {
 				list_move(&cursor_page->lru, dst);
 				mem_cgroup_del_lru(cursor_page);
-				nr_taken++;
+				nr_taken += hpage_nr_pages(page);
 				nr_lumpy_taken++;
 				if (PageDirty(cursor_page))
 					nr_lumpy_dirty++;
@@ -1141,14 +1158,15 @@
 	struct page *page;
 
 	list_for_each_entry(page, page_list, lru) {
+		int numpages = hpage_nr_pages(page);
 		lru = page_lru_base_type(page);
 		if (PageActive(page)) {
 			lru += LRU_ACTIVE;
 			ClearPageActive(page);
-			nr_active++;
+			nr_active += numpages;
 		}
 		if (count)
-			count[lru]++;
+			count[lru] += numpages;
 	}
 
 	return nr_active;
@@ -1253,13 +1271,16 @@
 			spin_lock_irq(&zone->lru_lock);
 			continue;
 		}
-		SetPageLRU(page);
 		lru = page_lru(page);
-		add_page_to_lru_list(zone, page, lru);
 		if (is_active_lru(lru)) {
 			int file = is_file_lru(lru);
-			reclaim_stat->recent_rotated[file]++;
+			int numpages = hpage_nr_pages(page);
+			reclaim_stat->recent_rotated[file] += numpages;
+			if (putback_active_lru_page(zone, page))
+				continue;
 		}
+		SetPageLRU(page);
+		add_page_to_lru_list(zone, page, lru);
 		if (!pagevec_add(&pvec, page)) {
 			spin_unlock_irq(&zone->lru_lock);
 			__pagevec_release(&pvec);
@@ -1324,7 +1345,7 @@
 		return false;
 
 	/* Only stall on lumpy reclaim */
-	if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE)
+	if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
 		return false;
 
 	/* If we have relaimed everything on the isolated list, no stall */
@@ -1368,15 +1389,15 @@
 			return SWAP_CLUSTER_MAX;
 	}
 
-	set_lumpy_reclaim_mode(priority, sc, false);
+	set_reclaim_mode(priority, sc, false);
 	lru_add_drain();
 	spin_lock_irq(&zone->lru_lock);
 
 	if (scanning_global_lru(sc)) {
 		nr_taken = isolate_pages_global(nr_to_scan,
 			&page_list, &nr_scanned, sc->order,
-			sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
-					ISOLATE_INACTIVE : ISOLATE_BOTH,
+			sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+					ISOLATE_BOTH : ISOLATE_INACTIVE,
 			zone, 0, file);
 		zone->pages_scanned += nr_scanned;
 		if (current_is_kswapd())
@@ -1388,8 +1409,8 @@
 	} else {
 		nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
 			&page_list, &nr_scanned, sc->order,
-			sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ?
-					ISOLATE_INACTIVE : ISOLATE_BOTH,
+			sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+					ISOLATE_BOTH : ISOLATE_INACTIVE,
 			zone, sc->mem_cgroup,
 			0, file);
 		/*
@@ -1411,7 +1432,7 @@
 
 	/* Check if we should syncronously wait for writeback */
 	if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
-		set_lumpy_reclaim_mode(priority, sc, true);
+		set_reclaim_mode(priority, sc, true);
 		nr_reclaimed += shrink_page_list(&page_list, zone, sc);
 	}
 
@@ -1426,7 +1447,7 @@
 		zone_idx(zone),
 		nr_scanned, nr_reclaimed,
 		priority,
-		trace_shrink_flags(file, sc->lumpy_reclaim_mode));
+		trace_shrink_flags(file, sc->reclaim_mode));
 	return nr_reclaimed;
 }
 
@@ -1466,7 +1487,7 @@
 
 		list_move(&page->lru, &zone->lru[lru].list);
 		mem_cgroup_add_lru_list(page, lru);
-		pgmoved++;
+		pgmoved += hpage_nr_pages(page);
 
 		if (!pagevec_add(&pvec, page) || list_empty(list)) {
 			spin_unlock_irq(&zone->lru_lock);
@@ -1534,7 +1555,7 @@
 		}
 
 		if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
-			nr_rotated++;
+			nr_rotated += hpage_nr_pages(page);
 			/*
 			 * Identify referenced, file-backed active pages and
 			 * give them one more trip around the active list. So
@@ -1805,6 +1826,57 @@
 }
 
 /*
+ * Reclaim/compaction depends on a number of pages being freed. To avoid
+ * disruption to the system, a small number of order-0 pages continue to be
+ * rotated and reclaimed in the normal fashion. However, by the time we get
+ * back to the allocator and call try_to_compact_zone(), we ensure that
+ * there are enough free pages for it to be likely successful
+ */
+static inline bool should_continue_reclaim(struct zone *zone,
+					unsigned long nr_reclaimed,
+					unsigned long nr_scanned,
+					struct scan_control *sc)
+{
+	unsigned long pages_for_compaction;
+	unsigned long inactive_lru_pages;
+
+	/* If not in reclaim/compaction mode, stop */
+	if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+		return false;
+
+	/*
+	 * If we failed to reclaim and have scanned the full list, stop.
+	 * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far
+	 *       faster but obviously would be less likely to succeed
+	 *       allocation. If this is desirable, use GFP_REPEAT to decide
+	 *       if both reclaimed and scanned should be checked or just
+	 *       reclaimed
+	 */
+	if (!nr_reclaimed && !nr_scanned)
+		return false;
+
+	/*
+	 * If we have not reclaimed enough pages for compaction and the
+	 * inactive lists are large enough, continue reclaiming
+	 */
+	pages_for_compaction = (2UL << sc->order);
+	inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
+				zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+	if (sc->nr_reclaimed < pages_for_compaction &&
+			inactive_lru_pages > pages_for_compaction)
+		return true;
+
+	/* If compaction would go ahead or the allocation would succeed, stop */
+	switch (compaction_suitable(zone, sc->order)) {
+	case COMPACT_PARTIAL:
+	case COMPACT_CONTINUE:
+		return false;
+	default:
+		return true;
+	}
+}
+
+/*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
 static void shrink_zone(int priority, struct zone *zone,
@@ -1813,9 +1885,12 @@
 	unsigned long nr[NR_LRU_LISTS];
 	unsigned long nr_to_scan;
 	enum lru_list l;
-	unsigned long nr_reclaimed = sc->nr_reclaimed;
+	unsigned long nr_reclaimed;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+	unsigned long nr_scanned = sc->nr_scanned;
 
+restart:
+	nr_reclaimed = 0;
 	get_scan_count(zone, sc, nr, priority);
 
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1841,8 +1916,7 @@
 		if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
 			break;
 	}
-
-	sc->nr_reclaimed = nr_reclaimed;
+	sc->nr_reclaimed += nr_reclaimed;
 
 	/*
 	 * Even if we did not try to evict anon pages at all, we want to
@@ -1851,6 +1925,11 @@
 	if (inactive_anon_is_low(zone, sc))
 		shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
+	/* reclaim/compaction might need reclaim to continue */
+	if (should_continue_reclaim(zone, nr_reclaimed,
+					sc->nr_scanned - nr_scanned, sc))
+		goto restart;
+
 	throttle_vm_writeout(sc->gfp_mask);
 }
 
@@ -2124,38 +2203,87 @@
 }
 #endif
 
+/*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+ * by the callers classzone_idx are added to balanced_pages. The total of
+ * balanced pages must be at least 25% of the zones allowed by classzone_idx
+ * for the node to be considered balanced. Forcing all zones to be balanced
+ * for high orders can cause excessive reclaim when there are imbalanced zones.
+ * The choice of 25% is due to
+ *   o a 16M DMA zone that is balanced will not balance a zone on any
+ *     reasonable sized machine
+ *   o On all other machines, the top zone must be at least a reasonable
+ *     precentage of the middle zones. For example, on 32-bit x86, highmem
+ *     would need to be at least 256M for it to be balance a whole node.
+ *     Similarly, on x86-64 the Normal zone would need to be at least 1G
+ *     to balance a node on its own. These seemed like reasonable ratios.
+ */
+static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
+						int classzone_idx)
+{
+	unsigned long present_pages = 0;
+	int i;
+
+	for (i = 0; i <= classzone_idx; i++)
+		present_pages += pgdat->node_zones[i].present_pages;
+
+	return balanced_pages > (present_pages >> 2);
+}
+
 /* is kswapd sleeping prematurely? */
-static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+					int classzone_idx)
 {
 	int i;
+	unsigned long balanced = 0;
+	bool all_zones_ok = true;
 
 	/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
 	if (remaining)
-		return 1;
+		return true;
 
-	/* If after HZ/10, a zone is below the high mark, it's premature */
+	/* Check the watermark levels */
 	for (i = 0; i < pgdat->nr_zones; i++) {
 		struct zone *zone = pgdat->node_zones + i;
 
 		if (!populated_zone(zone))
 			continue;
 
-		if (zone->all_unreclaimable)
+		/*
+		 * balance_pgdat() skips over all_unreclaimable after
+		 * DEF_PRIORITY. Effectively, it considers them balanced so
+		 * they must be considered balanced here as well if kswapd
+		 * is to sleep
+		 */
+		if (zone->all_unreclaimable) {
+			balanced += zone->present_pages;
 			continue;
+		}
 
-		if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
-								0, 0))
-			return 1;
+		if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+							classzone_idx, 0))
+			all_zones_ok = false;
+		else
+			balanced += zone->present_pages;
 	}
 
-	return 0;
+	/*
+	 * For high-order requests, the balanced zones must contain at least
+	 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
+	 * must be balanced
+	 */
+	if (order)
+		return pgdat_balanced(pgdat, balanced, classzone_idx);
+	else
+		return !all_zones_ok;
 }
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at high_wmark_pages(zone).
  *
- * Returns the number of pages which were actually freed.
+ * Returns the final order kswapd was reclaiming at
  *
  * There is special handling here for zones which are full of pinned pages.
  * This can happen if the pages are all mlocked, or if they are all used by
@@ -2172,11 +2300,14 @@
  * interoperates with the page allocator fallback scheme to ensure that aging
  * of pages is balanced across the zones.
  */
-static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
+static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
+							int *classzone_idx)
 {
 	int all_zones_ok;
+	unsigned long balanced;
 	int priority;
 	int i;
+	int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
 	unsigned long total_scanned;
 	struct reclaim_state *reclaim_state = current->reclaim_state;
 	struct scan_control sc = {
@@ -2199,7 +2330,6 @@
 	count_vm_event(PAGEOUTRUN);
 
 	for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-		int end_zone = 0;	/* Inclusive.  0 = ZONE_DMA */
 		unsigned long lru_pages = 0;
 		int has_under_min_watermark_zone = 0;
 
@@ -2208,6 +2338,7 @@
 			disable_swap_token();
 
 		all_zones_ok = 1;
+		balanced = 0;
 
 		/*
 		 * Scan in the highmem->dma direction for the highest
@@ -2230,9 +2361,10 @@
 				shrink_active_list(SWAP_CLUSTER_MAX, zone,
 							&sc, priority, 0);
 
-			if (!zone_watermark_ok(zone, order,
+			if (!zone_watermark_ok_safe(zone, order,
 					high_wmark_pages(zone), 0, 0)) {
 				end_zone = i;
+				*classzone_idx = i;
 				break;
 			}
 		}
@@ -2255,6 +2387,7 @@
 		 * cause too much scanning of the lower zones.
 		 */
 		for (i = 0; i <= end_zone; i++) {
+			int compaction;
 			struct zone *zone = pgdat->node_zones + i;
 			int nr_slab;
 
@@ -2276,7 +2409,7 @@
 			 * We put equal pressure on every zone, unless one
 			 * zone has way too many pages free already.
 			 */
-			if (!zone_watermark_ok(zone, order,
+			if (!zone_watermark_ok_safe(zone, order,
 					8*high_wmark_pages(zone), end_zone, 0))
 				shrink_zone(priority, zone, &sc);
 			reclaim_state->reclaimed_slab = 0;
@@ -2284,9 +2417,26 @@
 						lru_pages);
 			sc.nr_reclaimed += reclaim_state->reclaimed_slab;
 			total_scanned += sc.nr_scanned;
+
+			compaction = 0;
+			if (order &&
+			    zone_watermark_ok(zone, 0,
+					       high_wmark_pages(zone),
+					      end_zone, 0) &&
+			    !zone_watermark_ok(zone, order,
+					       high_wmark_pages(zone),
+					       end_zone, 0)) {
+				compact_zone_order(zone,
+						   order,
+						   sc.gfp_mask, false,
+						   COMPACT_MODE_KSWAPD);
+				compaction = 1;
+			}
+
 			if (zone->all_unreclaimable)
 				continue;
-			if (nr_slab == 0 && !zone_reclaimable(zone))
+			if (!compaction && nr_slab == 0 &&
+			    !zone_reclaimable(zone))
 				zone->all_unreclaimable = 1;
 			/*
 			 * If we've done a decent amount of scanning and
@@ -2297,7 +2447,7 @@
 			    total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
 				sc.may_writepage = 1;
 
-			if (!zone_watermark_ok(zone, order,
+			if (!zone_watermark_ok_safe(zone, order,
 					high_wmark_pages(zone), end_zone, 0)) {
 				all_zones_ok = 0;
 				/*
@@ -2305,7 +2455,7 @@
 				 * means that we have a GFP_ATOMIC allocation
 				 * failure risk. Hurry up!
 				 */
-				if (!zone_watermark_ok(zone, order,
+				if (!zone_watermark_ok_safe(zone, order,
 					    min_wmark_pages(zone), end_zone, 0))
 					has_under_min_watermark_zone = 1;
 			} else {
@@ -2317,10 +2467,12 @@
 				 * spectulatively avoid congestion waits
 				 */
 				zone_clear_flag(zone, ZONE_CONGESTED);
+				if (i <= *classzone_idx)
+					balanced += zone->present_pages;
 			}
 
 		}
-		if (all_zones_ok)
+		if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
 			break;		/* kswapd: all done */
 		/*
 		 * OK, kswapd is getting into trouble.  Take a nap, then take
@@ -2343,7 +2495,13 @@
 			break;
 	}
 out:
-	if (!all_zones_ok) {
+
+	/*
+	 * order-0: All zones must meet high watermark for a balanced node
+	 * high-order: Balanced zones must make up at least 25% of the node
+	 *             for the node to be balanced
+	 */
+	if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
 		cond_resched();
 
 		try_to_freeze();
@@ -2368,7 +2526,88 @@
 		goto loop_again;
 	}
 
-	return sc.nr_reclaimed;
+	/*
+	 * If kswapd was reclaiming at a higher order, it has the option of
+	 * sleeping without all zones being balanced. Before it does, it must
+	 * ensure that the watermarks for order-0 on *all* zones are met and
+	 * that the congestion flags are cleared. The congestion flag must
+	 * be cleared as kswapd is the only mechanism that clears the flag
+	 * and it is potentially going to sleep here.
+	 */
+	if (order) {
+		for (i = 0; i <= end_zone; i++) {
+			struct zone *zone = pgdat->node_zones + i;
+
+			if (!populated_zone(zone))
+				continue;
+
+			if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+				continue;
+
+			/* Confirm the zone is balanced for order-0 */
+			if (!zone_watermark_ok(zone, 0,
+					high_wmark_pages(zone), 0, 0)) {
+				order = sc.order = 0;
+				goto loop_again;
+			}
+
+			/* If balanced, clear the congested flag */
+			zone_clear_flag(zone, ZONE_CONGESTED);
+		}
+	}
+
+	/*
+	 * Return the order we were reclaiming at so sleeping_prematurely()
+	 * makes a decision on the order we were last reclaiming at. However,
+	 * if another caller entered the allocator slow path while kswapd
+	 * was awake, order will remain at the higher level
+	 */
+	*classzone_idx = end_zone;
+	return order;
+}
+
+static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+{
+	long remaining = 0;
+	DEFINE_WAIT(wait);
+
+	if (freezing(current) || kthread_should_stop())
+		return;
+
+	prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+
+	/* Try to sleep for a short interval */
+	if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+		remaining = schedule_timeout(HZ/10);
+		finish_wait(&pgdat->kswapd_wait, &wait);
+		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+	}
+
+	/*
+	 * After a short sleep, check if it was a premature sleep. If not, then
+	 * go fully to sleep until explicitly woken up.
+	 */
+	if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+		trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
+
+		/*
+		 * vmstat counters are not perfectly accurate and the estimated
+		 * value for counters such as NR_FREE_PAGES can deviate from the
+		 * true value by nr_online_cpus * threshold. To avoid the zone
+		 * watermarks being breached while under pressure, we reduce the
+		 * per-cpu vmstat threshold while kswapd is awake and restore
+		 * them before going back to sleep.
+		 */
+		set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
+		schedule();
+		set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
+	} else {
+		if (remaining)
+			count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+		else
+			count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+	}
+	finish_wait(&pgdat->kswapd_wait, &wait);
 }
 
 /*
@@ -2387,9 +2626,10 @@
 static int kswapd(void *p)
 {
 	unsigned long order;
+	int classzone_idx;
 	pg_data_t *pgdat = (pg_data_t*)p;
 	struct task_struct *tsk = current;
-	DEFINE_WAIT(wait);
+
 	struct reclaim_state reclaim_state = {
 		.reclaimed_slab = 0,
 	};
@@ -2417,49 +2657,30 @@
 	set_freezable();
 
 	order = 0;
+	classzone_idx = MAX_NR_ZONES - 1;
 	for ( ; ; ) {
 		unsigned long new_order;
+		int new_classzone_idx;
 		int ret;
 
-		prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
 		new_order = pgdat->kswapd_max_order;
+		new_classzone_idx = pgdat->classzone_idx;
 		pgdat->kswapd_max_order = 0;
-		if (order < new_order) {
+		pgdat->classzone_idx = MAX_NR_ZONES - 1;
+		if (order < new_order || classzone_idx > new_classzone_idx) {
 			/*
 			 * Don't sleep if someone wants a larger 'order'
-			 * allocation
+			 * allocation or has tigher zone constraints
 			 */
 			order = new_order;
+			classzone_idx = new_classzone_idx;
 		} else {
-			if (!freezing(current) && !kthread_should_stop()) {
-				long remaining = 0;
-
-				/* Try to sleep for a short interval */
-				if (!sleeping_prematurely(pgdat, order, remaining)) {
-					remaining = schedule_timeout(HZ/10);
-					finish_wait(&pgdat->kswapd_wait, &wait);
-					prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
-				}
-
-				/*
-				 * After a short sleep, check if it was a
-				 * premature sleep. If not, then go fully
-				 * to sleep until explicitly woken up
-				 */
-				if (!sleeping_prematurely(pgdat, order, remaining)) {
-					trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
-					schedule();
-				} else {
-					if (remaining)
-						count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
-					else
-						count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
-				}
-			}
-
+			kswapd_try_to_sleep(pgdat, order, classzone_idx);
 			order = pgdat->kswapd_max_order;
+			classzone_idx = pgdat->classzone_idx;
+			pgdat->kswapd_max_order = 0;
+			pgdat->classzone_idx = MAX_NR_ZONES - 1;
 		}
-		finish_wait(&pgdat->kswapd_wait, &wait);
 
 		ret = try_to_freeze();
 		if (kthread_should_stop())
@@ -2471,7 +2692,7 @@
 		 */
 		if (!ret) {
 			trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
-			balance_pgdat(pgdat, order);
+			order = balance_pgdat(pgdat, order, &classzone_idx);
 		}
 	}
 	return 0;
@@ -2480,23 +2701,26 @@
 /*
  * A zone is low on free memory, so wake its kswapd task to service it.
  */
-void wakeup_kswapd(struct zone *zone, int order)
+void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
 {
 	pg_data_t *pgdat;
 
 	if (!populated_zone(zone))
 		return;
 
-	pgdat = zone->zone_pgdat;
-	if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
-		return;
-	if (pgdat->kswapd_max_order < order)
-		pgdat->kswapd_max_order = order;
-	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
 	if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
 		return;
+	pgdat = zone->zone_pgdat;
+	if (pgdat->kswapd_max_order < order) {
+		pgdat->kswapd_max_order = order;
+		pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
+	}
 	if (!waitqueue_active(&pgdat->kswapd_wait))
 		return;
+	if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
+		return;
+
+	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
 	wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8f62f17..0c3b504 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -83,7 +83,31 @@
 
 #ifdef CONFIG_SMP
 
-static int calculate_threshold(struct zone *zone)
+int calculate_pressure_threshold(struct zone *zone)
+{
+	int threshold;
+	int watermark_distance;
+
+	/*
+	 * As vmstats are not up to date, there is drift between the estimated
+	 * and real values. For high thresholds and a high number of CPUs, it
+	 * is possible for the min watermark to be breached while the estimated
+	 * value looks fine. The pressure threshold is a reduced value such
+	 * that even the maximum amount of drift will not accidentally breach
+	 * the min watermark
+	 */
+	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
+	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
+
+	/*
+	 * Maximum threshold is 125
+	 */
+	threshold = min(125, threshold);
+
+	return threshold;
+}
+
+int calculate_normal_threshold(struct zone *zone)
 {
 	int threshold;
 	int mem;	/* memory in 128 MB units */
@@ -142,7 +166,7 @@
 	for_each_populated_zone(zone) {
 		unsigned long max_drift, tolerate_drift;
 
-		threshold = calculate_threshold(zone);
+		threshold = calculate_normal_threshold(zone);
 
 		for_each_online_cpu(cpu)
 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
@@ -161,42 +185,50 @@
 	}
 }
 
+void set_pgdat_percpu_threshold(pg_data_t *pgdat,
+				int (*calculate_pressure)(struct zone *))
+{
+	struct zone *zone;
+	int cpu;
+	int threshold;
+	int i;
+
+	for (i = 0; i < pgdat->nr_zones; i++) {
+		zone = &pgdat->node_zones[i];
+		if (!zone->percpu_drift_mark)
+			continue;
+
+		threshold = (*calculate_pressure)(zone);
+		for_each_possible_cpu(cpu)
+			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+							= threshold;
+	}
+}
+
 /*
  * For use when we know that interrupts are disabled.
  */
 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
 				int delta)
 {
-	struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-
-	s8 *p = pcp->vm_stat_diff + item;
+	struct per_cpu_pageset __percpu *pcp = zone->pageset;
+	s8 __percpu *p = pcp->vm_stat_diff + item;
 	long x;
+	long t;
 
-	x = delta + *p;
+	x = delta + __this_cpu_read(*p);
 
-	if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
+	t = __this_cpu_read(pcp->stat_threshold);
+
+	if (unlikely(x > t || x < -t)) {
 		zone_page_state_add(x, zone, item);
 		x = 0;
 	}
-	*p = x;
+	__this_cpu_write(*p, x);
 }
 EXPORT_SYMBOL(__mod_zone_page_state);
 
 /*
- * For an unknown interrupt state
- */
-void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
-					int delta)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	__mod_zone_page_state(zone, item, delta);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(mod_zone_page_state);
-
-/*
  * Optimized increment and decrement functions.
  *
  * These are only for a single page and therefore can take a struct page *
@@ -221,16 +253,17 @@
  */
 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-	struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-	s8 *p = pcp->vm_stat_diff + item;
+	struct per_cpu_pageset __percpu *pcp = zone->pageset;
+	s8 __percpu *p = pcp->vm_stat_diff + item;
+	s8 v, t;
 
-	(*p)++;
+	v = __this_cpu_inc_return(*p);
+	t = __this_cpu_read(pcp->stat_threshold);
+	if (unlikely(v > t)) {
+		s8 overstep = t >> 1;
 
-	if (unlikely(*p > pcp->stat_threshold)) {
-		int overstep = pcp->stat_threshold / 2;
-
-		zone_page_state_add(*p + overstep, zone, item);
-		*p = -overstep;
+		zone_page_state_add(v + overstep, zone, item);
+		__this_cpu_write(*p, -overstep);
 	}
 }
 
@@ -242,16 +275,17 @@
 
 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-	struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-	s8 *p = pcp->vm_stat_diff + item;
+	struct per_cpu_pageset __percpu *pcp = zone->pageset;
+	s8 __percpu *p = pcp->vm_stat_diff + item;
+	s8 v, t;
 
-	(*p)--;
+	v = __this_cpu_dec_return(*p);
+	t = __this_cpu_read(pcp->stat_threshold);
+	if (unlikely(v < - t)) {
+		s8 overstep = t >> 1;
 
-	if (unlikely(*p < - pcp->stat_threshold)) {
-		int overstep = pcp->stat_threshold / 2;
-
-		zone_page_state_add(*p - overstep, zone, item);
-		*p = overstep;
+		zone_page_state_add(v - overstep, zone, item);
+		__this_cpu_write(*p, overstep);
 	}
 }
 
@@ -261,6 +295,92 @@
 }
 EXPORT_SYMBOL(__dec_zone_page_state);
 
+#ifdef CONFIG_CMPXCHG_LOCAL
+/*
+ * If we have cmpxchg_local support then we do not need to incur the overhead
+ * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
+ *
+ * mod_state() modifies the zone counter state through atomic per cpu
+ * operations.
+ *
+ * Overstep mode specifies how overstep should handled:
+ *     0       No overstepping
+ *     1       Overstepping half of threshold
+ *     -1      Overstepping minus half of threshold
+*/
+static inline void mod_state(struct zone *zone,
+       enum zone_stat_item item, int delta, int overstep_mode)
+{
+	struct per_cpu_pageset __percpu *pcp = zone->pageset;
+	s8 __percpu *p = pcp->vm_stat_diff + item;
+	long o, n, t, z;
+
+	do {
+		z = 0;  /* overflow to zone counters */
+
+		/*
+		 * The fetching of the stat_threshold is racy. We may apply
+		 * a counter threshold to the wrong the cpu if we get
+		 * rescheduled while executing here. However, the following
+		 * will apply the threshold again and therefore bring the
+		 * counter under the threshold.
+		 */
+		t = this_cpu_read(pcp->stat_threshold);
+
+		o = this_cpu_read(*p);
+		n = delta + o;
+
+		if (n > t || n < -t) {
+			int os = overstep_mode * (t >> 1) ;
+
+			/* Overflow must be added to zone counters */
+			z = n + os;
+			n = -os;
+		}
+	} while (this_cpu_cmpxchg(*p, o, n) != o);
+
+	if (z)
+		zone_page_state_add(z, zone, item);
+}
+
+void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+					int delta)
+{
+	mod_state(zone, item, delta, 0);
+}
+EXPORT_SYMBOL(mod_zone_page_state);
+
+void inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+	mod_state(zone, item, 1, 1);
+}
+
+void inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+	mod_state(page_zone(page), item, 1, 1);
+}
+EXPORT_SYMBOL(inc_zone_page_state);
+
+void dec_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+	mod_state(page_zone(page), item, -1, -1);
+}
+EXPORT_SYMBOL(dec_zone_page_state);
+#else
+/*
+ * Use interrupt disable to serialize counter updates
+ */
+void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+					int delta)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	__mod_zone_page_state(zone, item, delta);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(mod_zone_page_state);
+
 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
 	unsigned long flags;
@@ -291,6 +411,7 @@
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(dec_zone_page_state);
+#endif
 
 /*
  * Update the zone counters for one cpu.
@@ -759,6 +880,7 @@
 	"numa_local",
 	"numa_other",
 #endif
+	"nr_anon_transparent_hugepages",
 	"nr_dirty_threshold",
 	"nr_dirty_background_threshold",
 
@@ -834,7 +956,7 @@
 		   "\n        scanned  %lu"
 		   "\n        spanned  %lu"
 		   "\n        present  %lu",
-		   zone_nr_free_pages(zone),
+		   zone_page_state(zone, NR_FREE_PAGES),
 		   min_wmark_pages(zone),
 		   low_wmark_pages(zone),
 		   high_wmark_pages(zone),
@@ -1033,7 +1155,7 @@
 		break;
 	case CPU_DOWN_PREPARE:
 	case CPU_DOWN_PREPARE_FROZEN:
-		cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
+		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
 		per_cpu(vmstat_work, cpu).work.func = NULL;
 		break;
 	case CPU_DOWN_FAILED:
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac..1e308f2 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@
 			break;
 		case 's':{
 				char **sptr = va_arg(ap, char **);
-				int16_t len;
-				int size;
+				uint16_t len;
 
 				errcode = p9pdu_readf(pdu, proto_version,
 								"w", &len);
 				if (errcode)
 					break;
 
-				size = max_t(int16_t, len, 0);
-
-				*sptr = kmalloc(size + 1, GFP_KERNEL);
+				*sptr = kmalloc(len + 1, GFP_KERNEL);
 				if (*sptr == NULL) {
 					errcode = -EFAULT;
 					break;
 				}
-				if (pdu_read(pdu, *sptr, size)) {
+				if (pdu_read(pdu, *sptr, len)) {
 					errcode = -EFAULT;
 					kfree(*sptr);
 					*sptr = NULL;
 				} else
-					(*sptr)[size] = 0;
+					(*sptr)[len] = 0;
 			}
 			break;
 		case 'Q':{
@@ -234,14 +231,14 @@
 			}
 			break;
 		case 'D':{
-				int32_t *count = va_arg(ap, int32_t *);
+				uint32_t *count = va_arg(ap, uint32_t *);
 				void **data = va_arg(ap, void **);
 
 				errcode =
 				    p9pdu_readf(pdu, proto_version, "d", count);
 				if (!errcode) {
 					*count =
-					    min_t(int32_t, *count,
+					    min_t(uint32_t, *count,
 						  pdu->size - pdu->offset);
 					*data = &pdu->sdata[pdu->offset];
 				}
@@ -404,9 +401,10 @@
 			break;
 		case 's':{
 				const char *sptr = va_arg(ap, const char *);
-				int16_t len = 0;
+				uint16_t len = 0;
 				if (sptr)
-					len = min_t(int16_t, strlen(sptr), USHRT_MAX);
+					len = min_t(uint16_t, strlen(sptr),
+								USHRT_MAX);
 
 				errcode = p9pdu_writef(pdu, proto_version,
 								"w", len);
@@ -438,7 +436,7 @@
 						 stbuf->n_gid, stbuf->n_muid);
 			} break;
 		case 'D':{
-				int32_t count = va_arg(ap, int32_t);
+				uint32_t count = va_arg(ap, uint32_t);
 				const void *data = va_arg(ap, const void *);
 
 				errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/Kconfig b/net/Kconfig
index ad0aafe..7284062 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -253,7 +253,9 @@
 	what was just said, you don't need it: say N.
 
 	Documentation on how to use TCP connection probing can be found
-	at http://linux-net.osdl.org/index.php/TcpProbe
+	at:
+	
+	  http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
 
 	To compile this code as a module, choose M here: the
 	module will be called tcp_probe.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 179e04b..38754fd 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1607,7 +1607,7 @@
 	struct lec_arp_table *entry;
 	int i;
 
-	cancel_rearming_delayed_work(&priv->lec_arp_work);
+	cancel_delayed_work_sync(&priv->lec_arp_work);
 
 	/*
 	 * Remove all entries
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index bb86d29..6da5dae 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,7 +1392,7 @@
 	ax25_cb *ax25;
 	int err = 0;
 
-	memset(fsa, 0, sizeof(fsa));
+	memset(fsa, 0, sizeof(*fsa));
 	lock_sock(sk);
 	ax25 = ax25_sk(sk);
 
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf5..8184c03 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@
 		if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
 			return -ENOPROTOOPT;
 		lock_sock(&(cf_sk->sk));
-		cf_sk->conn_req.param.size = ol;
 		if (ol > sizeof(cf_sk->conn_req.param.data) ||
 			copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
 			release_sock(&cf_sk->sk);
 			return -EINVAL;
 		}
+		cf_sk->conn_req.param.size = ol;
 		release_sock(&cf_sk->sk);
 		return 0;
 
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c..fa9dab3 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@
 	struct chnl_net *priv  = container_of(layr, struct chnl_net, chnl);
 	int pktlen;
 	int err = 0;
+	const u8 *ip_version;
+	u8 buf;
 
 	priv = container_of(layr, struct chnl_net, chnl);
 
@@ -90,7 +92,21 @@
 	 * send the packet to the net stack.
 	 */
 	skb->dev = priv->netdev;
-	skb->protocol = htons(ETH_P_IP);
+
+	/* check the version of IP */
+	ip_version = skb_header_pointer(skb, 0, 1, &buf);
+	if (!ip_version)
+		return -EINVAL;
+	switch (*ip_version >> 4) {
+	case 4:
+		skb->protocol = htons(ETH_P_IP);
+		break;
+	case 6:
+		skb->protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	/* If we change the header in loop mode, the checksum is corrupted. */
 	if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 815ef88..0a1b53b 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,6 @@
 
 #include <linux/ceph/types.h>
+#include <linux/module.h>
 
 /*
  * Robert Jenkin's hash function.
@@ -104,6 +105,7 @@
 		return -1;
 	}
 }
+EXPORT_SYMBOL(ceph_str_hash);
 
 const char *ceph_str_hash_name(int type)
 {
@@ -116,3 +118,4 @@
 		return "unknown";
 	}
 }
+EXPORT_SYMBOL(ceph_str_hash_name);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b6ff4a1..dff633d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -96,7 +96,7 @@
 
 int ceph_msgr_init(void)
 {
-	ceph_msgr_wq = create_workqueue("ceph-msgr");
+	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
 	if (!ceph_msgr_wq) {
 		pr_err("msgr_init failed to create workqueue\n");
 		return -ENOMEM;
@@ -1920,20 +1920,6 @@
 /*
  * Atomically queue work on a connection.  Bump @con reference to
  * avoid races with connection teardown.
- *
- * There is some trickery going on with QUEUED and BUSY because we
- * only want a _single_ thread operating on each connection at any
- * point in time, but we want to use all available CPUs.
- *
- * The worker thread only proceeds if it can atomically set BUSY.  It
- * clears QUEUED and does it's thing.  When it thinks it's done, it
- * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
- * (tries again to set BUSY).
- *
- * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
- * try to queue work.  If that fails (work is already queued, or BUSY)
- * we give up (work also already being done or is queued) but leave QUEUED
- * set so that the worker thread will loop if necessary.
  */
 static void queue_con(struct ceph_connection *con)
 {
@@ -1948,11 +1934,7 @@
 		return;
 	}
 
-	set_bit(QUEUED, &con->state);
-	if (test_bit(BUSY, &con->state)) {
-		dout("queue_con %p - already BUSY\n", con);
-		con->ops->put(con);
-	} else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
+	if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
 		dout("queue_con %p - already queued\n", con);
 		con->ops->put(con);
 	} else {
@@ -1967,15 +1949,6 @@
 {
 	struct ceph_connection *con = container_of(work, struct ceph_connection,
 						   work.work);
-	int backoff = 0;
-
-more:
-	if (test_and_set_bit(BUSY, &con->state) != 0) {
-		dout("con_work %p BUSY already set\n", con);
-		goto out;
-	}
-	dout("con_work %p start, clearing QUEUED\n", con);
-	clear_bit(QUEUED, &con->state);
 
 	mutex_lock(&con->mutex);
 
@@ -1994,28 +1967,13 @@
 	    try_read(con) < 0 ||
 	    try_write(con) < 0) {
 		mutex_unlock(&con->mutex);
-		backoff = 1;
 		ceph_fault(con);     /* error/fault path */
 		goto done_unlocked;
 	}
 
 done:
 	mutex_unlock(&con->mutex);
-
 done_unlocked:
-	clear_bit(BUSY, &con->state);
-	dout("con->state=%lu\n", con->state);
-	if (test_bit(QUEUED, &con->state)) {
-		if (!backoff || test_bit(OPENING, &con->state)) {
-			dout("con_work %p QUEUED reset, looping\n", con);
-			goto more;
-		}
-		dout("con_work %p QUEUED reset, but just faulted\n", con);
-		clear_bit(QUEUED, &con->state);
-	}
-	dout("con_work %p done\n", con);
-
-out:
 	con->ops->put(con);
 }
 
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d73f3f6..71603ac 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -605,8 +605,10 @@
 			goto bad;
 		}
 		err = __decode_pool(p, end, pi);
-		if (err < 0)
+		if (err < 0) {
+			kfree(pi);
 			goto bad;
+		}
 		__insert_pg_pool(&map->pg_pools, pi);
 	}
 
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269..54277df 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1732,33 +1732,6 @@
 }
 EXPORT_SYMBOL(netif_device_attach);
 
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
-{
-	return ((features & NETIF_F_NO_CSUM) ||
-		((features & NETIF_F_V4_CSUM) &&
-		 protocol == htons(ETH_P_IP)) ||
-		((features & NETIF_F_V6_CSUM) &&
-		 protocol == htons(ETH_P_IPV6)) ||
-		((features & NETIF_F_FCOE_CRC) &&
-		 protocol == htons(ETH_P_FCOE)));
-}
-
-static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
-{
-	__be16 protocol = skb->protocol;
-	int features = dev->features;
-
-	if (vlan_tx_tag_present(skb)) {
-		features &= dev->vlan_features;
-	} else if (protocol == htons(ETH_P_8021Q)) {
-		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-		protocol = veh->h_vlan_encapsulated_proto;
-		features &= dev->vlan_features;
-	}
-
-	return can_checksum_protocol(features, protocol);
-}
-
 /**
  * skb_dev_set -- assign a new device to a buffer
  * @skb: buffer for the new device
@@ -1971,16 +1944,14 @@
 /**
  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
  *	@skb: buffer to segment
+ *	@features: device features as applicable to this skb
  *
  *	This function segments the given skb and stores the list of segments
  *	in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb)
+static int dev_gso_segment(struct sk_buff *skb, int features)
 {
-	struct net_device *dev = skb->dev;
 	struct sk_buff *segs;
-	int features = dev->features & ~(illegal_highdma(dev, skb) ?
-					 NETIF_F_SG : 0);
 
 	segs = skb_gso_segment(skb, features);
 
@@ -2017,22 +1988,52 @@
 	}
 }
 
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+	return ((features & NETIF_F_GEN_CSUM) ||
+		((features & NETIF_F_V4_CSUM) &&
+		 protocol == htons(ETH_P_IP)) ||
+		((features & NETIF_F_V6_CSUM) &&
+		 protocol == htons(ETH_P_IPV6)) ||
+		((features & NETIF_F_FCOE_CRC) &&
+		 protocol == htons(ETH_P_FCOE)));
+}
+
+static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+{
+	if (!can_checksum_protocol(protocol, features)) {
+		features &= ~NETIF_F_ALL_CSUM;
+		features &= ~NETIF_F_SG;
+	} else if (illegal_highdma(skb->dev, skb)) {
+		features &= ~NETIF_F_SG;
+	}
+
+	return features;
+}
+
+int netif_skb_features(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
+	int features = skb->dev->features;
 
 	if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 		protocol = veh->h_vlan_encapsulated_proto;
-	} else if (!skb->vlan_tci)
-		return dev->features;
+	} else if (!vlan_tx_tag_present(skb)) {
+		return harmonize_features(skb, protocol, features);
+	}
 
-	if (protocol != htons(ETH_P_8021Q))
-		return dev->features & dev->vlan_features;
-	else
-		return 0;
+	features &= skb->dev->vlan_features;
+
+	if (protocol != htons(ETH_P_8021Q)) {
+		return harmonize_features(skb, protocol, features);
+	} else {
+		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+				NETIF_F_GEN_CSUM;
+		return harmonize_features(skb, protocol, features);
+	}
 }
-EXPORT_SYMBOL(netif_get_vlan_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 /*
  * Returns true if either:
@@ -2042,22 +2043,13 @@
  *	   support DMA from it.
  */
 static inline int skb_needs_linearize(struct sk_buff *skb,
-				      struct net_device *dev)
+				      int features)
 {
-	if (skb_is_nonlinear(skb)) {
-		int features = dev->features;
-
-		if (vlan_tx_tag_present(skb))
-			features &= dev->vlan_features;
-
-		return (skb_has_frag_list(skb) &&
-			!(features & NETIF_F_FRAGLIST)) ||
+	return skb_is_nonlinear(skb) &&
+			((skb_has_frag_list(skb) &&
+				!(features & NETIF_F_FRAGLIST)) ||
 			(skb_shinfo(skb)->nr_frags &&
-			(!(features & NETIF_F_SG) ||
-			illegal_highdma(dev, skb)));
-	}
-
-	return 0;
+				!(features & NETIF_F_SG)));
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2059,8 @@
 	int rc = NETDEV_TX_OK;
 
 	if (likely(!skb->next)) {
+		int features;
+
 		/*
 		 * If device doesnt need skb->dst, release it right now while
 		 * its hot in this cpu cache
@@ -2079,8 +2073,10 @@
 
 		skb_orphan_try(skb);
 
+		features = netif_skb_features(skb);
+
 		if (vlan_tx_tag_present(skb) &&
-		    !(dev->features & NETIF_F_HW_VLAN_TX)) {
+		    !(features & NETIF_F_HW_VLAN_TX)) {
 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
 			if (unlikely(!skb))
 				goto out;
@@ -2088,13 +2084,13 @@
 			skb->vlan_tci = 0;
 		}
 
-		if (netif_needs_gso(dev, skb)) {
-			if (unlikely(dev_gso_segment(skb)))
+		if (netif_needs_gso(skb, features)) {
+			if (unlikely(dev_gso_segment(skb, features)))
 				goto out_kfree_skb;
 			if (skb->next)
 				goto gso;
 		} else {
-			if (skb_needs_linearize(skb, dev) &&
+			if (skb_needs_linearize(skb, features) &&
 			    __skb_linearize(skb))
 				goto out_kfree_skb;
 
@@ -2105,7 +2101,7 @@
 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
 				skb_set_transport_header(skb,
 					skb_checksum_start_offset(skb));
-				if (!dev_can_checksum(dev, skb) &&
+				if (!(features & NETIF_F_ALL_CSUM) &&
 				     skb_checksum_help(skb))
 					goto out_kfree_skb;
 			}
@@ -2301,7 +2297,10 @@
 		 */
 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
 			skb_dst_force(skb);
-		__qdisc_update_bstats(q, skb->len);
+
+		qdisc_skb_cb(skb)->pkt_len = skb->len;
+		qdisc_bstats_update(q, skb);
+
 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
 			if (unlikely(contended)) {
 				spin_unlock(&q->busylock);
@@ -5524,34 +5523,6 @@
 	}
 }
 
-/**
- *	dev_txq_stats_fold - fold tx_queues stats
- *	@dev: device to get statistics from
- *	@stats: struct rtnl_link_stats64 to hold results
- */
-void dev_txq_stats_fold(const struct net_device *dev,
-			struct rtnl_link_stats64 *stats)
-{
-	u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
-	unsigned int i;
-	struct netdev_queue *txq;
-
-	for (i = 0; i < dev->num_tx_queues; i++) {
-		txq = netdev_get_tx_queue(dev, i);
-		spin_lock_bh(&txq->_xmit_lock);
-		tx_bytes   += txq->tx_bytes;
-		tx_packets += txq->tx_packets;
-		tx_dropped += txq->tx_dropped;
-		spin_unlock_bh(&txq->_xmit_lock);
-	}
-	if (tx_bytes || tx_packets || tx_dropped) {
-		stats->tx_bytes   = tx_bytes;
-		stats->tx_packets = tx_packets;
-		stats->tx_dropped = tx_dropped;
-	}
-}
-EXPORT_SYMBOL(dev_txq_stats_fold);
-
 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
  * fields in the same order, with only the type differing.
  */
@@ -5595,7 +5566,6 @@
 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
 	} else {
 		netdev_stats_to_stats64(storage, &dev->stats);
-		dev_txq_stats_fold(dev, storage);
 	}
 	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
 	return storage;
@@ -5621,18 +5591,20 @@
 }
 
 /**
- *	alloc_netdev_mq - allocate network device
+ *	alloc_netdev_mqs - allocate network device
  *	@sizeof_priv:	size of private data to allocate space for
  *	@name:		device name format string
  *	@setup:		callback to initialize device
- *	@queue_count:	the number of subqueues to allocate
+ *	@txqs:		the number of TX subqueues to allocate
+ *	@rxqs:		the number of RX subqueues to allocate
  *
  *	Allocates a struct net_device with private data area for driver use
  *	and performs basic initialization.  Also allocates subquue structs
- *	for each queue on the device at the end of the netdevice.
+ *	for each queue on the device.
  */
-struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
-		void (*setup)(struct net_device *), unsigned int queue_count)
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+		void (*setup)(struct net_device *),
+		unsigned int txqs, unsigned int rxqs)
 {
 	struct net_device *dev;
 	size_t alloc_size;
@@ -5640,12 +5612,20 @@
 
 	BUG_ON(strlen(name) >= sizeof(dev->name));
 
-	if (queue_count < 1) {
+	if (txqs < 1) {
 		pr_err("alloc_netdev: Unable to allocate device "
 		       "with zero queues.\n");
 		return NULL;
 	}
 
+#ifdef CONFIG_RPS
+	if (rxqs < 1) {
+		pr_err("alloc_netdev: Unable to allocate device "
+		       "with zero RX queues.\n");
+		return NULL;
+	}
+#endif
+
 	alloc_size = sizeof(struct net_device);
 	if (sizeof_priv) {
 		/* ensure 32-byte alignment of private area */
@@ -5676,14 +5656,14 @@
 
 	dev_net_set(dev, &init_net);
 
-	dev->num_tx_queues = queue_count;
-	dev->real_num_tx_queues = queue_count;
+	dev->num_tx_queues = txqs;
+	dev->real_num_tx_queues = txqs;
 	if (netif_alloc_netdev_queues(dev))
 		goto free_pcpu;
 
 #ifdef CONFIG_RPS
-	dev->num_rx_queues = queue_count;
-	dev->real_num_rx_queues = queue_count;
+	dev->num_rx_queues = rxqs;
+	dev->real_num_rx_queues = rxqs;
 	if (netif_alloc_rx_queues(dev))
 		goto free_pcpu;
 #endif
@@ -5711,7 +5691,7 @@
 	kfree(p);
 	return NULL;
 }
-EXPORT_SYMBOL(alloc_netdev_mq);
+EXPORT_SYMBOL(alloc_netdev_mqs);
 
 /**
  *	free_netdev - free network device
@@ -6209,7 +6189,7 @@
 static void __net_exit default_device_exit_batch(struct list_head *net_list)
 {
 	/* At exit all network devices most be removed from a network
-	 * namespace.  Do this in the reverse order of registeration.
+	 * namespace.  Do this in the reverse order of registration.
 	 * Do this across as many network namespaces as possible to
 	 * improve batching efficiency.
 	 */
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b27d4e..afc5837 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -158,7 +158,7 @@
 /**
  *	sk_run_filter - run a filter on a socket
  *	@skb: buffer to run the filter on
- *	@filter: filter to apply
+ *	@fentry: filter to apply
  *
  * Decode and apply filter instructions to the skb->data.
  * Return length to keep, 0 for none. @skb is the data we are
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 72d9b50..02dc2cb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -923,7 +923,7 @@
 
 		skb_queue_purge(&npinfo->arp_tx);
 		skb_queue_purge(&npinfo->txq);
-		cancel_rearming_delayed_work(&npinfo->tx_work);
+		cancel_delayed_work_sync(&npinfo->tx_work);
 
 		/* clean after last, unfinished work */
 		__skb_queue_purge(&npinfo->txq);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57..a5f7535 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1820,7 +1820,7 @@
 	if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+	if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		struct sock *rtnl;
 		rtnl_dumpit_func dumpit;
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 19d6c21..d31bb36 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -380,6 +380,8 @@
 	}
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	nf_conntrack_put(skb->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 	nf_conntrack_put_reasm(skb->nfct_reasm);
 #endif
 #ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/net/core/sock.c b/net/core/sock.c
index a658aeb..7dfed79 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -157,7 +157,7 @@
   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
-  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
+  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
   "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -173,7 +173,7 @@
   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
-  "slock-AF_IEEE802154", "slock-AF_CAIF" ,
+  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
   "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -189,7 +189,7 @@
   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
-  "clock-AF_IEEE802154", "clock-AF_CAIF" ,
+  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
   "clock-AF_MAX"
 };
 
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index ad6dffd..b75968a 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -49,7 +49,9 @@
 	what was just said, you don't need it: say N.
 
 	Documentation on how to use DCCP connection probing can be found
-	at http://linux-net.osdl.org/index.php/DccpProbe
+	at:
+	
+	  http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
 
 	To compile this code as a module, choose M here: the
 	module will be called dccp_probe.
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 4508705..5fdb072 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 
-	dp->dccps_gsr = seq;
+	if (after48(seq, dp->dccps_gsr))
+		dp->dccps_gsr = seq;
 	/* Sequence validity window depends on remote Sequence Window (7.5.1) */
 	dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
 	/*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247..8cde009 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@
 		 */
 		if (time_before(now, (dp->dccps_rate_last +
 				      sysctl_dccp_sync_ratelimit)))
-			return 0;
+			return -1;
 
 		DCCP_WARN("Step 6 failed for %s packet, "
 			  "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 5639438..4234882 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
 /* Boundary values */
 static int		zero     = 0,
 			u8_max   = 0xFF;
-static unsigned long	seqw_min = 32;
+static unsigned long	seqw_min = DCCPF_SEQ_WMIN,
+			seqw_max = 0xFFFFFFFF;		/* maximum on 32 bit */
 
 static struct ctl_table dccp_default_table[] = {
 	{
@@ -31,6 +32,7 @@
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &seqw_min,		/* RFC 4340, 7.5.2 */
+		.extra2		= &seqw_max,
 	},
 	{
 		.procname	= "rx_ccid",
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0ba1563..0dcaa90 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1130,7 +1130,7 @@
 /*
  * This processes a device up event. We only start up
  * the loopback device & ethernet devices with correct
- * MAC addreses automatically. Others must be started
+ * MAC addresses automatically. Others must be started
  * specifically.
  *
  * FIXME: How should we configure the loopback address ? If we could dispense
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 6112a12..0c877a7 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -390,7 +390,7 @@
 	if (dst->link_poll_needed)
 		del_timer_sync(&dst->link_poll_timer);
 
-	flush_scheduled_work();
+	flush_work_sync(&dst->link_poll_work);
 
 	for (i = 0; i < dst->pd->nr_chips; i++) {
 		struct dsa_switch *ds = dst->ds[i];
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f..44d2b42 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@
 EXPORT_SYMBOL(ether_setup);
 
 /**
- * alloc_etherdev_mq - Allocates and sets up an Ethernet device
+ * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
  * @sizeof_priv: Size of additional driver-private structure to be allocated
  *	for this Ethernet device
- * @queue_count: The number of queues this device has.
+ * @txqs: The number of TX queues this device has.
+ * @rxqs: The number of RX queues this device has.
  *
  * Fill in the fields of the device structure with Ethernet-generic
  * values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@
  * this private data area.
  */
 
-struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+				      unsigned int rxqs)
 {
-	return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
+	return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
 }
-EXPORT_SYMBOL(alloc_etherdev_mq);
+EXPORT_SYMBOL(alloc_etherdev_mqs);
 
 static size_t _format_mac_addr(char *buf, int buflen,
 			       const unsigned char *addr, int len)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9e95d7f..a5a1050 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -432,7 +432,9 @@
 	---help---
 	  Support for INET (TCP, DCCP, etc) socket monitoring interface used by
 	  native Linux tools such as ss. ss is included in iproute2, currently
-	  downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
+	  downloadable at:
+	  
+	    http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2
 
 	  If unsure, say Y.
 
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec..86961be 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@
 
 	skb->ip_summed = CHECKSUM_NONE;
 
-	ah = (struct ip_auth_hdr *)skb->data;
-	iph = ip_hdr(skb);
-	ihl = ip_hdrlen(skb);
 
 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
 	nfrags = err;
 
+	ah = (struct ip_auth_hdr *)skb->data;
+	iph = ip_hdr(skb);
+	ihl = ip_hdrlen(skb);
+
 	work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
 	if (!work_iph)
 		goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b9..04c8b69 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1143,6 +1143,23 @@
 	return err;
 }
 
+int arp_invalidate(struct net_device *dev, __be32 ip)
+{
+	struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
+	int err = -ENXIO;
+
+	if (neigh) {
+		if (neigh->nud_state & ~NUD_NOARP)
+			err = neigh_update(neigh, NULL, NUD_FAILED,
+					   NEIGH_UPDATE_F_OVERRIDE|
+					   NEIGH_UPDATE_F_ADMIN);
+		neigh_release(neigh);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(arp_invalidate);
+
 static int arp_req_delete_public(struct net *net, struct arpreq *r,
 		struct net_device *dev)
 {
@@ -1163,7 +1180,6 @@
 {
 	int err;
 	__be32 ip;
-	struct neighbour *neigh;
 
 	if (r->arp_flags & ATF_PUBL)
 		return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1197,7 @@
 		if (!dev)
 			return -EINVAL;
 	}
-	err = -ENXIO;
-	neigh = neigh_lookup(&arp_tbl, &ip, dev);
-	if (neigh) {
-		if (neigh->nud_state & ~NUD_NOARP)
-			err = neigh_update(neigh, NULL, NUD_FAILED,
-					   NEIGH_UPDATE_F_OVERRIDE|
-					   NEIGH_UPDATE_F_ADMIN);
-		neigh_release(neigh);
-	}
-	return err;
+	return arp_invalidate(dev, ip);
 }
 
 /*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e3181..97e5fb7 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@
 		     !sk2->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 			if (!reuse || !sk2->sk_reuse ||
-			    sk2->sk_state == TCP_LISTEN) {
+			    ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
 				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
 				    sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@
 					    (tb->num_owners < smallest_size || smallest_size == -1)) {
 						smallest_size = tb->num_owners;
 						smallest_rover = rover;
-						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
 							spin_unlock(&head->lock);
 							snum = smallest_rover;
 							goto have_snum;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada171..2746c1f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@
 	    nlmsg_len(nlh) < hdrlen)
 		return -EINVAL;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		if (nlmsg_attrlen(nlh, hdrlen)) {
 			struct nlattr *attr;
 
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340..e855fff 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@
 	struct arpt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i;
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@
 	 * about).
 	 */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@
 	struct arpt_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d63..652efea 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@
 	struct ipt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU.
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i; /* macro does multi eval of i */
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@
 	struct ipt_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dc7c096..406f320 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1350,7 +1350,7 @@
 	return 0;
 }
 
-/* Intialize TSO state of a skb.
+/* Initialize TSO state of a skb.
  * This must be invoked the first time we consider transmitting
  * SKB onto the wire.
  */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 059a3de..978e80e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -300,7 +300,7 @@
 			goto out;
 		}
 
-		/* Reproduce AF_INET checks to make the bindings consitant */
+		/* Reproduce AF_INET checks to make the bindings consistent */
 		v4addr = addr->sin6_addr.s6_addr32[3];
 		chk_addr_ret = inet_addr_type(net, v4addr);
 		if (!sysctl_ip_nonlocal_bind &&
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4e..1aba54a 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@
 	if (!pskb_may_pull(skb, ah_hlen))
 		goto out;
 
-	ip6h = ipv6_hdr(skb);
-
-	skb_push(skb, hdr_len);
 
 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
 	nfrags = err;
 
+	ah = (struct ip_auth_hdr *)skb->data;
+	ip6h = ipv6_hdr(skb);
+
+	skb_push(skb, hdr_len);
+
 	work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
 	if (!work_iph)
 		goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d..d144e62 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@
 		     !sk2->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
 		    (!sk->sk_reuse || !sk2->sk_reuse ||
-		     sk2->sk_state == TCP_LISTEN) &&
+		     ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
 		     ipv6_rcv_saddr_equal(sk, sk2))
 			break;
 	}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 94b5bf1..5f8d242 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -401,6 +401,9 @@
 		goto drop;
 	}
 
+	if (skb->pkt_type != PACKET_HOST)
+		goto drop;
+
 	skb_forward_csum(skb);
 
 	/*
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 4555823..7d227c6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@
 	struct ip6t_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i;
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@
 	struct ip6t_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 99abfb5..97c5b21 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -19,13 +19,15 @@
 
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_l3proto.h>
 #include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#endif
+#include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
@@ -33,8 +35,10 @@
 {
 	u16 zone = NF_CT_DEFAULT_ZONE;
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	if (skb->nfct)
 		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+#endif
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 	if (skb->nf_bridge &&
@@ -56,9 +60,11 @@
 {
 	struct sk_buff *reasm;
 
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	/* Previously seen (loopback)?	*/
 	if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
 		return NF_ACCEPT;
+#endif
 
 	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
 	/* queued */
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f7db676..1ee5dab 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -36,6 +36,7 @@
 #define KMSG_COMPONENT "iucv"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kernel_stat.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/spinlock.h>
@@ -1804,6 +1805,7 @@
 	struct iucv_irq_data *p;
 	struct iucv_irq_list *work;
 
+	kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++;
 	p = iucv_irq_data[smp_processor_id()];
 	if (p->ippathid >= iucv_max_pathid) {
 		WARN_ON(p->ippathid >= iucv_max_pathid);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c6f2936..22f7ad5 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3430,7 +3430,7 @@
 {
 	EnterFunction(2);
 	ip_vs_trash_cleanup();
-	cancel_rearming_delayed_work(&defense_work);
+	cancel_delayed_work_sync(&defense_work);
 	cancel_work_sync(&defense_work.work);
 	ip_vs_kill_estimator(&ip_vs_stats);
 	unregister_sysctl_table(sysctl_header);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0cdba50..2b7eef3 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -645,25 +645,23 @@
 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
 	u_int8_t l3proto = nfmsg->nfgen_family;
 
-	rcu_read_lock();
+	spin_lock_bh(&nf_conntrack_lock);
 	last = (struct nf_conn *)cb->args[1];
 	for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
 restart:
-		hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
+		hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
 					 hnnode) {
 			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
 				continue;
 			ct = nf_ct_tuplehash_to_ctrack(h);
-			if (!atomic_inc_not_zero(&ct->ct_general.use))
-				continue;
 			/* Dump entries of a given L3 protocol number.
 			 * If it is not specified, ie. l3proto == 0,
 			 * then dump everything. */
 			if (l3proto && nf_ct_l3num(ct) != l3proto)
-				goto releasect;
+				continue;
 			if (cb->args[1]) {
 				if (ct != last)
-					goto releasect;
+					continue;
 				cb->args[1] = 0;
 			}
 			if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -681,8 +679,6 @@
 				if (acct)
 					memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
 			}
-releasect:
-		nf_ct_put(ct);
 		}
 		if (cb->args[1]) {
 			cb->args[1] = 0;
@@ -690,7 +686,7 @@
 		}
 	}
 out:
-	rcu_read_unlock();
+	spin_unlock_bh(&nf_conntrack_lock);
 	if (last)
 		nf_ct_put(last);
 
@@ -928,7 +924,7 @@
 	u16 zone;
 	int err;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP)
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
 		return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
 					  ctnetlink_done);
 
@@ -976,7 +972,8 @@
 free:
 	kfree_skb(skb2);
 out:
-	return err;
+	/* this avoids a loop in nfnetlink. */
+	return err == -EAGAIN ? -ENOBUFS : err;
 }
 
 #ifdef CONFIG_NF_NAT_NEEDED
@@ -1790,7 +1787,7 @@
 	u16 zone;
 	int err;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		return netlink_dump_start(ctnl, skb, nlh,
 					  ctnetlink_exp_dump_table,
 					  ctnetlink_exp_done);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8046350..c942376 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@
 
 	for_each_possible_cpu(i) {
 		struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
-		spin_lock_init(&lock->lock);
+
+		seqlock_init(&lock->lock);
 		lock->readers = 0;
 	}
 
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99..f83cb37 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@
 	    security_netlink_recv(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		if (ops->dumpit == NULL)
 			return -EOPNOTSUPP;
 
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb..1072b2c 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
 /* Transport protocol registration */
 static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
 
-static struct phonet_protocol *phonet_proto_get(int protocol)
+static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
 {
 	struct phonet_protocol *pp;
 
@@ -458,7 +458,7 @@
 
 static DEFINE_MUTEX(proto_tab_lock);
 
-int __init_or_module phonet_proto_register(int protocol,
+int __init_or_module phonet_proto_register(unsigned int protocol,
 						struct phonet_protocol *pp)
 {
 	int err = 0;
@@ -481,7 +481,7 @@
 }
 EXPORT_SYMBOL(phonet_proto_register);
 
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
 {
 	mutex_lock(&proto_tab_lock);
 	BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0b9bb20..74c064c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -808,7 +808,7 @@
 		goto error_call_jar;
 	}
 
-	rxrpc_workqueue = create_workqueue("krxrpcd");
+	rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
 	if (!rxrpc_workqueue) {
 		printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
 		goto error_work_queue;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a36270a..f04d4a4 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -24,7 +24,7 @@
 	  To administer these schedulers, you'll need the user-level utilities
 	  from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
 	  That package also contains some documentation; for more, check out
-	  <http://linux-net.osdl.org/index.php/Iproute2>.
+	  <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
 
 	  This Quality of Service (QoS) support will enable you to use
 	  Differentiated Services (diffserv) and Resource Reservation Protocol
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce..83ddfc0 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@
 
 	spin_lock(&p->tcf_lock);
 	p->tcf_tm.lastuse = jiffies;
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 	action = p->tcf_action;
 	update_flags = p->update_flags;
 	spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef96..c2a7c20 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@
 	spin_lock(&ipt->tcf_lock);
 
 	ipt->tcf_tm.lastuse = jiffies;
-	ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	ipt->tcf_bstats.packets++;
+	bstats_update(&ipt->tcf_bstats, skb);
 
 	/* yes, we have to worry about both in and out dev
 	 worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be..d765067 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@
 
 	spin_lock(&m->tcf_lock);
 	m->tcf_tm.lastuse = jiffies;
-	m->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	m->tcf_bstats.packets++;
+	bstats_update(&m->tcf_bstats, skb);
 
 	dev = m->tcfm_dev;
 	if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb83..178a4bd 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@
 	egress = p->flags & TCA_NAT_FLAG_EGRESS;
 	action = p->tcf_action;
 
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 
 	spin_unlock(&p->tcf_lock);
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9..445bef7 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@
 bad:
 	p->tcf_qstats.overlimits++;
 done:
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 	spin_unlock(&p->tcf_lock);
 	return p->tcf_action;
 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf743..e2f08b1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@
 
 	spin_lock(&police->tcf_lock);
 
-	police->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	police->tcf_bstats.packets++;
+	bstats_update(&police->tcf_bstats, skb);
 
 	if (police->tcfp_ewma_rate &&
 	    police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3..7287cff 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@
 
 	spin_lock(&d->tcf_lock);
 	d->tcf_tm.lastuse = jiffies;
-	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	d->tcf_bstats.packets++;
+	bstats_update(&d->tcf_bstats, skb);
 
 	/* print policy string followed by _ then packet count
 	 * Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4e..836f5fe 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@
 
 	spin_lock(&d->tcf_lock);
 	d->tcf_tm.lastuse = jiffies;
-	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	d->tcf_bstats.packets++;
+	bstats_update(&d->tcf_bstats, skb);
 
 	if (d->flags & SKBEDIT_F_PRIORITY)
 		skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2825407..943d733 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@
 		}
 		return ret;
 	}
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
-	flow->bstats.bytes += qdisc_pkt_len(skb);
-	flow->bstats.packets++;
+	qdisc_bstats_update(sch, skb);
+	bstats_update(&flow->bstats, skb);
 	/*
 	 * Okay, this may seem weird. We pretend we've dropped the packet if
 	 * it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb76315..c80d1c2 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,7 @@
 	ret = qdisc_enqueue(skb, cl->q);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
-		sch->bstats.packets++;
-		sch->bstats.bytes += qdisc_pkt_len(skb);
+		qdisc_bstats_update(sch, skb);
 		cbq_mark_toplevel(q, cl);
 		if (!cl->next_alive)
 			cbq_activate_class(cl);
@@ -650,8 +649,7 @@
 		ret = qdisc_enqueue(skb, cl->q);
 		if (ret == NET_XMIT_SUCCESS) {
 			sch->q.qlen++;
-			sch->bstats.packets++;
-			sch->bstats.bytes += qdisc_pkt_len(skb);
+			qdisc_bstats_update(sch, skb);
 			if (!cl->next_alive)
 				cbq_activate_class(cl);
 			return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b531..de55e64 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@
 {
 	struct drr_sched *q = qdisc_priv(sch);
 	struct drr_class *cl;
-	unsigned int len;
 	int err;
 
 	cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@
 		return err;
 	}
 
-	len = qdisc_pkt_len(skb);
 	err = qdisc_enqueue(skb, cl->qdisc);
 	if (unlikely(err != NET_XMIT_SUCCESS)) {
 		if (net_xmit_drop_count(err)) {
@@ -377,10 +375,8 @@
 		cl->deficit = cl->quantum;
 	}
 
-	cl->bstats.packets++;
-	cl->bstats.bytes += len;
-	sch->bstats.packets++;
-	sch->bstats.bytes += len;
+	bstats_update(&cl->bstats, skb);
+	qdisc_bstats_update(sch, skb);
 
 	sch->q.qlen++;
 	return err;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d6..60f4bdd 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,7 @@
 		return err;
 	}
 
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b..2e45791 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,8 @@
 	if (cl->qdisc->q.qlen == 1)
 		set_active(cl, qdisc_pkt_len(skb));
 
-	cl->bstats.packets++;
-	cl->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	bstats_update(&cl->bstats, skb);
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d..984c1b0 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,12 @@
 		}
 		return ret;
 	} else {
-		cl->bstats.packets +=
-			skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-		cl->bstats.bytes += qdisc_pkt_len(skb);
+		bstats_update(&cl->bstats, skb);
 		htb_activate(q, cl);
 	}
 
 	sch->q.qlen++;
-	sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	qdisc_bstats_update(sch, skb);
 	return NET_XMIT_SUCCESS;
 }
 
@@ -648,12 +645,10 @@
 				htb_add_to_wait_tree(q, cl, diff);
 		}
 
-		/* update byte stats except for leaves which are already updated */
-		if (cl->level) {
-			cl->bstats.bytes += bytes;
-			cl->bstats.packets += skb_is_gso(skb)?
-					skb_shinfo(skb)->gso_segs:1;
-		}
+		/* update basic stats except for leaves which are already updated */
+		if (cl->level)
+			bstats_update(&cl->bstats, skb);
+
 		cl = cl->parent;
 	}
 }
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a..bce1665 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@
 
 	result = tc_classify(skb, p->filter_list, &res);
 
-	sch->bstats.packets++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	qdisc_bstats_update(sch, skb);
 	switch (result) {
 	case TC_ACT_SHOT:
 		result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690de..21f13da 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,7 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c0..1c4bce8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,7 @@
 
 	if (likely(ret == NET_XMIT_SUCCESS)) {
 		sch->q.qlen++;
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 	} else if (net_xmit_drop_count(ret)) {
 		sch->qstats.drops++;
 	}
@@ -477,8 +476,7 @@
 		__skb_queue_after(list, skb, nskb);
 
 		sch->qstats.backlog += qdisc_pkt_len(nskb);
-		sch->bstats.bytes += qdisc_pkt_len(nskb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, nskb);
 
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bc..966158d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,7 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a67ba3c..a6009c5 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,7 @@
 
 	ret = qdisc_enqueue(skb, child);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 	} else if (net_xmit_drop_count(ret)) {
 		q->stats.pdrop++;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d54ac94..239ec53 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -403,8 +403,7 @@
 		slot->allot = q->scaled_quantum;
 	}
 	if (++sch->q.qlen <= q->limit) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		return NET_XMIT_SUCCESS;
 	}
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d..77565e7 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,7 @@
 	}
 
 	sch->q.qlen++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
+	qdisc_bstats_update(sch, skb);
 	return NET_XMIT_SUCCESS;
 }
 
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a..84ce48e 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -59,6 +59,10 @@
 	struct net_device *dev;
 	struct Qdisc *slaves;
 	struct list_head master_list;
+	unsigned long	tx_bytes;
+	unsigned long	tx_packets;
+	unsigned long	tx_errors;
+	unsigned long	tx_dropped;
 };
 
 struct teql_sched_data
@@ -83,8 +87,7 @@
 
 	if (q->q.qlen < dev->tx_queue_len) {
 		__skb_queue_tail(&q->q, skb);
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		return NET_XMIT_SUCCESS;
 	}
 
@@ -275,7 +278,6 @@
 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct teql_master *master = netdev_priv(dev);
-	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
 	struct Qdisc *start, *q;
 	int busy;
 	int nores;
@@ -315,8 +317,8 @@
 					__netif_tx_unlock(slave_txq);
 					master->slaves = NEXT_SLAVE(q);
 					netif_wake_queue(dev);
-					txq->tx_packets++;
-					txq->tx_bytes += length;
+					master->tx_packets++;
+					master->tx_bytes += length;
 					return NETDEV_TX_OK;
 				}
 				__netif_tx_unlock(slave_txq);
@@ -343,10 +345,10 @@
 		netif_stop_queue(dev);
 		return NETDEV_TX_BUSY;
 	}
-	dev->stats.tx_errors++;
+	master->tx_errors++;
 
 drop:
-	txq->tx_dropped++;
+	master->tx_dropped++;
 	dev_kfree_skb(skb);
 	return NETDEV_TX_OK;
 }
@@ -399,6 +401,18 @@
 	return 0;
 }
 
+static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
+						     struct rtnl_link_stats64 *stats)
+{
+	struct teql_master *m = netdev_priv(dev);
+
+	stats->tx_packets	= m->tx_packets;
+	stats->tx_bytes		= m->tx_bytes;
+	stats->tx_errors	= m->tx_errors;
+	stats->tx_dropped	= m->tx_dropped;
+	return stats;
+}
+
 static int teql_master_mtu(struct net_device *dev, int new_mtu)
 {
 	struct teql_master *m = netdev_priv(dev);
@@ -423,6 +437,7 @@
 	.ndo_open	= teql_master_open,
 	.ndo_stop	= teql_master_close,
 	.ndo_start_xmit	= teql_master_xmit,
+	.ndo_get_stats64 = teql_master_stats64,
 	.ndo_change_mtu	= teql_master_mtu,
 };
 
diff --git a/net/socket.c b/net/socket.c
index c1663c0..ac2219f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -262,6 +262,7 @@
 }
 
 
+
 static void wq_free_rcu(struct rcu_head *head)
 {
 	struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
@@ -305,20 +306,6 @@
 	.statfs		= simple_statfs,
 };
 
-static struct dentry *sockfs_mount(struct file_system_type *fs_type,
-			 int flags, const char *dev_name, void *data)
-{
-	return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
-}
-
-static struct vfsmount *sock_mnt __read_mostly;
-
-static struct file_system_type sock_fs_type = {
-	.name =		"sockfs",
-	.mount =	sockfs_mount,
-	.kill_sb =	kill_anon_super,
-};
-
 /*
  * sockfs_dname() is called from d_path().
  */
@@ -332,6 +319,21 @@
 	.d_dname  = sockfs_dname,
 };
 
+static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+			 int flags, const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "socket:", &sockfs_ops,
+		&sockfs_dentry_operations, SOCKFS_MAGIC);
+}
+
+static struct vfsmount *sock_mnt __read_mostly;
+
+static struct file_system_type sock_fs_type = {
+	.name =		"sockfs",
+	.mount =	sockfs_mount,
+	.kill_sb =	kill_anon_super,
+};
+
 /*
  *	Obtains the first available file descriptor and sets it up for use.
  *
@@ -360,14 +362,13 @@
 	if (unlikely(fd < 0))
 		return fd;
 
-	path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
+	path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name);
 	if (unlikely(!path.dentry)) {
 		put_unused_fd(fd);
 		return -ENOMEM;
 	}
 	path.mnt = mntget(sock_mnt);
 
-	path.dentry->d_op = &sockfs_dentry_operations;
 	d_instantiate(path.dentry, SOCK_INODE(sock));
 	SOCK_INODE(sock)->i_fop = &socket_file_ops;
 
@@ -2390,6 +2391,8 @@
 
 static int __init sock_init(void)
 {
+	int err;
+
 	/*
 	 *      Initialize sock SLAB cache.
 	 */
@@ -2406,8 +2409,15 @@
 	 */
 
 	init_inodecache();
-	register_filesystem(&sock_fs_type);
+
+	err = register_filesystem(&sock_fs_type);
+	if (err)
+		goto out_fs;
 	sock_mnt = kern_mount(&sock_fs_type);
+	if (IS_ERR(sock_mnt)) {
+		err = PTR_ERR(sock_mnt);
+		goto out_mount;
+	}
 
 	/* The real protocol initialization is performed in later initcalls.
 	 */
@@ -2420,7 +2430,13 @@
 	skb_timestamping_init();
 #endif
 
-	return 0;
+out:
+	return err;
+
+out_mount:
+	unregister_filesystem(&sock_fs_type);
+out_fs:
+	goto out;
 }
 
 core_initcall(sock_init);	/* early initcall */
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe6784..67e3127 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@
 	return cred->cr_ops->crvalidate(task, p);
 }
 
+static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+				   __be32 *data, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
+	encode(rqstp, &xdr, obj);
+}
+
 int
-rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
+rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
 		__be32 *data, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@
 	if (cred->cr_ops->crwrap_req)
 		return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
 	/* By default, we encode the arguments normally. */
-	return encode(rqstp, data, obj);
+	rpcauth_wrap_req_encode(encode, rqstp, data, obj);
+	return 0;
+}
+
+static int
+rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+			  __be32 *data, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
+	return decode(rqstp, &xdr, obj);
 }
 
 int
-rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
+rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
 		__be32 *data, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@
 		return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
 						   data, obj);
 	/* By default, we decode the arguments normally. */
-	return decode(rqstp, data, obj);
+	return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
 }
 
 int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce3..45dbf15 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@
 	return NULL;
 }
 
+static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+				__be32 *p, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
+	encode(rqstp, &xdr, obj);
+}
+
 static inline int
 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
-		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+		   kxdreproc_t encode, struct rpc_rqst *rqstp,
+		   __be32 *p, void *obj)
 {
 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
 	struct xdr_buf	integ_buf;
@@ -1249,9 +1259,7 @@
 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
 	*p++ = htonl(rqstp->rq_seqno);
 
-	status = encode(rqstp, p, obj);
-	if (status)
-		return status;
+	gss_wrap_req_encode(encode, rqstp, p, obj);
 
 	if (xdr_buf_subsegment(snd_buf, &integ_buf,
 				offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@
 
 static inline int
 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
-		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+		  kxdreproc_t encode, struct rpc_rqst *rqstp,
+		  __be32 *p, void *obj)
 {
 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
 	u32		offset;
@@ -1342,9 +1351,7 @@
 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
 	*p++ = htonl(rqstp->rq_seqno);
 
-	status = encode(rqstp, p, obj);
-	if (status)
-		return status;
+	gss_wrap_req_encode(encode, rqstp, p, obj);
 
 	status = alloc_enc_pages(rqstp);
 	if (status)
@@ -1394,7 +1401,7 @@
 
 static int
 gss_wrap_req(struct rpc_task *task,
-	     kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
+	     kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@
 		/* The spec seems a little ambiguous here, but I think that not
 		 * wrapping context destruction requests makes the most sense.
 		 */
-		status = encode(rqstp, p, obj);
+		gss_wrap_req_encode(encode, rqstp, p, obj);
+		status = 0;
 		goto out;
 	}
 	switch (gss_cred->gc_service) {
 		case RPC_GSS_SVC_NONE:
-			status = encode(rqstp, p, obj);
+			gss_wrap_req_encode(encode, rqstp, p, obj);
+			status = 0;
 			break;
 		case RPC_GSS_SVC_INTEGRITY:
 			status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@
 	return 0;
 }
 
+static int
+gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+		      __be32 *p, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	return decode(rqstp, &xdr, obj);
+}
 
 static int
 gss_unwrap_resp(struct rpc_task *task,
-		kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
+		kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@
 	cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
 						+ (savedlen - head->iov_len);
 out_decode:
-	status = decode(rqstp, p, obj);
+	status = gss_unwrap_req_decode(decode, rqstp, p, obj);
 out:
 	gss_put_ctx(ctx);
 	dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 75ee993..9576f35 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -137,7 +137,7 @@
 		ms_usage = 13;
 		break;
 	default:
-		return EINVAL;;
+		return -EINVAL;
 	}
 	salt[0] = (ms_usage >> 0) & 0xff;
 	salt[1] = (ms_usage >> 8) & 0xff;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index dec2a6f..bcdae78 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -67,7 +67,6 @@
 
 #define	RSI_HASHBITS	6
 #define	RSI_HASHMAX	(1<<RSI_HASHBITS)
-#define	RSI_HASHMASK	(RSI_HASHMAX-1)
 
 struct rsi {
 	struct cache_head	h;
@@ -319,7 +318,6 @@
 
 #define	RSC_HASHBITS	10
 #define	RSC_HASHMAX	(1<<RSC_HASHBITS)
-#define	RSC_HASHMASK	(RSC_HASHMAX-1)
 
 #define GSS_SEQ_WIN	128
 
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0c..1dd1a68 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@
 		ret = task->tk_status;
 		rpc_put_task(task);
 	}
-	return ret;
 	dprintk("RPC:       bc_send ret= %d\n", ret);
+	return ret;
 }
 
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e433e75..72ad836 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -37,7 +37,7 @@
 
 #define	 RPCDBG_FACILITY RPCDBG_CACHE
 
-static void cache_defer_req(struct cache_req *req, struct cache_head *item);
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
 static void cache_revisit_request(struct cache_head *item);
 
 static void cache_init(struct cache_head *h)
@@ -128,6 +128,7 @@
 {
 	head->expiry_time = expiry;
 	head->last_refresh = seconds_since_boot();
+	smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 	set_bit(CACHE_VALID, &head->flags);
 }
 
@@ -208,11 +209,36 @@
 		/* entry is valid */
 		if (test_bit(CACHE_NEGATIVE, &h->flags))
 			return -ENOENT;
-		else
+		else {
+			/*
+			 * In combination with write barrier in
+			 * sunrpc_cache_update, ensures that anyone
+			 * using the cache entry after this sees the
+			 * updated contents:
+			 */
+			smp_rmb();
 			return 0;
+		}
 	}
 }
 
+static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
+{
+	int rv;
+
+	write_lock(&detail->hash_lock);
+	rv = cache_is_valid(detail, h);
+	if (rv != -EAGAIN) {
+		write_unlock(&detail->hash_lock);
+		return rv;
+	}
+	set_bit(CACHE_NEGATIVE, &h->flags);
+	cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
+	write_unlock(&detail->hash_lock);
+	cache_fresh_unlocked(h, detail);
+	return -ENOENT;
+}
+
 /*
  * This is the generic cache management routine for all
  * the authentication caches.
@@ -251,14 +277,8 @@
 			case -EINVAL:
 				clear_bit(CACHE_PENDING, &h->flags);
 				cache_revisit_request(h);
-				if (rv == -EAGAIN) {
-					set_bit(CACHE_NEGATIVE, &h->flags);
-					cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
-					cache_fresh_unlocked(h, detail);
-					rv = -ENOENT;
-				}
+				rv = try_to_negate_entry(detail, h);
 				break;
-
 			case -EAGAIN:
 				clear_bit(CACHE_PENDING, &h->flags);
 				cache_revisit_request(h);
@@ -268,9 +288,11 @@
 	}
 
 	if (rv == -EAGAIN) {
-		cache_defer_req(rqstp, h);
-		if (!test_bit(CACHE_PENDING, &h->flags)) {
-			/* Request is not deferred */
+		if (!cache_defer_req(rqstp, h)) {
+			/*
+			 * Request was not deferred; handle it as best
+			 * we can ourselves:
+			 */
 			rv = cache_is_valid(detail, h);
 			if (rv == -EAGAIN)
 				rv = -ETIMEDOUT;
@@ -618,18 +640,19 @@
 		discard->revisit(discard, 1);
 }
 
-static void cache_defer_req(struct cache_req *req, struct cache_head *item)
+/* Return true if and only if a deferred request is queued. */
+static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 {
 	struct cache_deferred_req *dreq;
 
 	if (req->thread_wait) {
 		cache_wait_req(req, item);
 		if (!test_bit(CACHE_PENDING, &item->flags))
-			return;
+			return false;
 	}
 	dreq = req->defer(req);
 	if (dreq == NULL)
-		return;
+		return false;
 	setup_deferral(dreq, item, 1);
 	if (!test_bit(CACHE_PENDING, &item->flags))
 		/* Bit could have been cleared before we managed to
@@ -638,6 +661,7 @@
 		cache_revisit_request(item);
 
 	cache_limit_defers();
+	return true;
 }
 
 static void cache_revisit_request(struct cache_head *item)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 92ce94f..57d344c 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1095,7 +1095,7 @@
 rpc_xdr_encode(struct rpc_task *task)
 {
 	struct rpc_rqst	*req = task->tk_rqstp;
-	kxdrproc_t	encode;
+	kxdreproc_t	encode;
 	__be32		*p;
 
 	dprint_status(task);
@@ -1535,7 +1535,7 @@
 {
 	struct rpc_clnt	*clnt = task->tk_client;
 	struct rpc_rqst	*req = task->tk_rqstp;
-	kxdrproc_t	decode = task->tk_msg.rpc_proc->p_decode;
+	kxdrdproc_t	decode = task->tk_msg.rpc_proc->p_decode;
 	__be32		*p;
 
 	dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1776,12 +1776,11 @@
 	goto out_garbage;
 }
 
-static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
+static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
 {
-	return 0;
 }
 
-static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
+static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
 {
 	return 0;
 }
@@ -1830,23 +1829,15 @@
 			  const struct rpc_task *task)
 {
 	const char *rpc_waitq = "none";
-	char *p, action[KSYM_SYMBOL_LEN];
 
 	if (RPC_IS_QUEUED(task))
 		rpc_waitq = rpc_qname(task->tk_waitqueue);
 
-	/* map tk_action pointer to a function name; then trim off
-	 * the "+0x0 [sunrpc]" */
-	sprint_symbol(action, (unsigned long)task->tk_action);
-	p = strchr(action, '+');
-	if (p)
-		*p = '\0';
-
-	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
+	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
 		task->tk_pid, task->tk_flags, task->tk_status,
 		clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
 		clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
-		action, rpc_waitq);
+		task->tk_action, rpc_waitq);
 }
 
 void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 10a17a3..72bc536 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -162,9 +162,17 @@
 }
 
 static void
+rpc_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+	INIT_LIST_HEAD(&inode->i_dentry);
+	kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
+}
+
+static void
 rpc_destroy_inode(struct inode *inode)
 {
-	kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
+	call_rcu(&inode->i_rcu, rpc_i_callback);
 }
 
 static int
@@ -430,7 +438,7 @@
 }
 EXPORT_SYMBOL_GPL(rpc_put_mount);
 
-static int rpc_delete_dentry(struct dentry *dentry)
+static int rpc_delete_dentry(const struct dentry *dentry)
 {
 	return 1;
 }
@@ -466,7 +474,7 @@
 {
 	struct inode *inode;
 
-	BUG_ON(!d_unhashed(dentry));
+	d_drop(dentry);
 	inode = rpc_get_inode(dir->i_sb, mode);
 	if (!inode)
 		goto out_err;
@@ -583,7 +591,7 @@
 		}
 	}
 	if (!dentry->d_inode)
-		dentry->d_op = &rpc_dentry_operations;
+		d_set_d_op(dentry, &rpc_dentry_operations);
 out_err:
 	return dentry;
 }
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca..c652e4c 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@
 	RPCBPROC_GETSTAT,
 };
 
-#define RPCB_HIGHPROC_2		RPCBPROC_CALLIT
-#define RPCB_HIGHPROC_3		RPCBPROC_TADDR2UADDR
-#define RPCB_HIGHPROC_4		RPCBPROC_GETSTAT
-
 /*
  * r_owner
  *
@@ -693,46 +689,37 @@
  * XDR functions for rpcbind
  */
 
-static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
-			    const struct rpcbind_args *rpcb)
+static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
 	dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
 			task->tk_pid, task->tk_msg.rpc_proc->p_name,
 			rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
-	p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
-	if (unlikely(p == NULL))
-		return -EIO;
-
-	*p++ = htonl(rpcb->r_prog);
-	*p++ = htonl(rpcb->r_vers);
-	*p++ = htonl(rpcb->r_prot);
-	*p   = htonl(rpcb->r_port);
-
-	return 0;
+	p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
+	*p++ = cpu_to_be32(rpcb->r_prog);
+	*p++ = cpu_to_be32(rpcb->r_vers);
+	*p++ = cpu_to_be32(rpcb->r_prot);
+	*p   = cpu_to_be32(rpcb->r_port);
 }
 
-static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
 			    struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
 	unsigned long port;
-
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	__be32 *p;
 
 	rpcb->r_port = 0;
 
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
-	port = ntohl(*p);
+	port = be32_to_cpup(p);
 	dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
 			task->tk_msg.rpc_proc->p_name, port);
 	if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@
 	return 0;
 }
 
-static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
 			unsigned int *boolp)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
 	*boolp = 0;
-	if (*p)
+	if (*p != xdr_zero)
 		*boolp = 1;
 
 	dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@
 	return 0;
 }
 
-static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
-				const u32 maxstrlen)
+static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
+			       const u32 maxstrlen)
 {
-	u32 len;
 	__be32 *p;
+	u32 len;
 
-	if (unlikely(string == NULL))
-		return -EIO;
 	len = strlen(string);
-	if (unlikely(len > maxstrlen))
-		return -EIO;
-
-	p = xdr_reserve_space(xdr, sizeof(__be32) + len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(len > maxstrlen);
+	p = xdr_reserve_space(xdr, 4 + len);
 	xdr_encode_opaque(p, string, len);
-
-	return 0;
 }
 
-static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
-			    const struct rpcbind_args *rpcb)
+static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
 	dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
 			task->tk_pid, task->tk_msg.rpc_proc->p_name,
 			rpcb->r_prog, rpcb->r_vers,
 			rpcb->r_netid, rpcb->r_addr);
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
+	*p++ = cpu_to_be32(rpcb->r_prog);
+	*p = cpu_to_be32(rpcb->r_vers);
 
-	p = xdr_reserve_space(&xdr,
-			sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
-	if (unlikely(p == NULL))
-		return -EIO;
-	*p++ = htonl(rpcb->r_prog);
-	*p = htonl(rpcb->r_vers);
-
-	if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
-		return -EIO;
-	if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
-		return -EIO;
-	if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
-		return -EIO;
-
-	return 0;
+	encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
+	encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
+	encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
 }
 
-static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
 			    struct rpcbind_args *rpcb)
 {
 	struct sockaddr_storage address;
 	struct sockaddr *sap = (struct sockaddr *)&address;
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 	u32 len;
 
 	rpcb->r_port = 0;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		goto out_fail;
-	len = ntohl(*p);
+	len = be32_to_cpup(p);
 
 	/*
 	 * If the returned universal address is a null string,
@@ -845,7 +810,7 @@
 	if (unlikely(len > RPCBIND_MAXUADDRLEN))
 		goto out_fail;
 
-	p = xdr_inline_decode(&xdr, len);
+	p = xdr_inline_decode(xdr, len);
 	if (unlikely(p == NULL))
 		goto out_fail;
 	dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@
 static struct rpc_procinfo rpcb_procedures2[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -881,8 +846,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -891,8 +856,8 @@
 	},
 	[RPCBPROC_GETPORT] = {
 		.p_proc		= RPCBPROC_GETPORT,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getport,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getport,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_getportres_sz,
 		.p_statidx	= RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@
 static struct rpc_procinfo rpcb_procedures3[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -914,8 +879,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -924,8 +889,8 @@
 	},
 	[RPCBPROC_GETADDR] = {
 		.p_proc		= RPCBPROC_GETADDR,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getaddr,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getaddr,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_getaddrres_sz,
 		.p_statidx	= RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@
 static struct rpc_procinfo rpcb_procedures4[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -947,8 +912,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -957,8 +922,8 @@
 	},
 	[RPCBPROC_GETADDR] = {
 		.p_proc		= RPCBPROC_GETADDR,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getaddr,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getaddr,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_getaddrres_sz,
 		.p_statidx	= RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@
 
 static struct rpc_version rpcb_version2 = {
 	.number		= RPCBVERS_2,
-	.nrprocs	= RPCB_HIGHPROC_2,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures2),
 	.procs		= rpcb_procedures2
 };
 
 static struct rpc_version rpcb_version3 = {
 	.number		= RPCBVERS_3,
-	.nrprocs	= RPCB_HIGHPROC_3,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures3),
 	.procs		= rpcb_procedures3
 };
 
 static struct rpc_version rpcb_version4 = {
 	.number		= RPCBVERS_4,
-	.nrprocs	= RPCB_HIGHPROC_4,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures4),
 	.procs		= rpcb_procedures4
 };
 
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42..08e05a8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@
 	if (svc_serv_is_pooled(serv))
 		svc_pool_map_put();
 
-#if defined(CONFIG_NFS_V4_1)
-	svc_sock_destroy(serv->bc_xprt);
-#endif /* CONFIG_NFS_V4_1 */
-
 	svc_unregister(serv);
 	kfree(serv->sv_pools);
 	kfree(serv);
@@ -1005,6 +1001,7 @@
 	rqstp->rq_splice_ok = 1;
 	/* Will be turned off only when NFSv4 Sessions are used */
 	rqstp->rq_usedeferral = 1;
+	rqstp->rq_dropme = false;
 
 	/* Setup reply header */
 	rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
@@ -1106,7 +1103,7 @@
 		*statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
 
 		/* Encode reply */
-		if (*statp == rpc_drop_reply) {
+		if (rqstp->rq_dropme) {
 			if (procp->pc_release)
 				procp->pc_release(rqstp, NULL, rqstp->rq_resp);
 			goto dropit;
@@ -1147,7 +1144,6 @@
  dropit:
 	svc_authorise(rqstp);	/* doesn't hurt to call this twice */
 	dprintk("svc: svc_process dropit\n");
-	svc_drop(rqstp);
 	return 0;
 
 err_short_len:
@@ -1218,7 +1214,6 @@
 	struct kvec		*resv = &rqstp->rq_res.head[0];
 	struct svc_serv		*serv = rqstp->rq_server;
 	u32			dir;
-	int			error;
 
 	/*
 	 * Setup response xdr_buf.
@@ -1246,11 +1241,13 @@
 		return 0;
 	}
 
-	error = svc_process_common(rqstp, argv, resv);
-	if (error <= 0)
-		return error;
-
-	return svc_send(rqstp);
+	/* Returns 1 for send, 0 for drop */
+	if (svc_process_common(rqstp, argv, resv))
+		return svc_send(rqstp);
+	else {
+		svc_drop(rqstp);
+		return 0;
+	}
 }
 
 #if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1261,9 @@
 {
 	struct kvec	*argv = &rqstp->rq_arg.head[0];
 	struct kvec	*resv = &rqstp->rq_res.head[0];
-	int 		error;
 
 	/* Build the svc_rqst used by the common processing routine */
-	rqstp->rq_xprt = serv->bc_xprt;
+	rqstp->rq_xprt = serv->sv_bc_xprt;
 	rqstp->rq_xid = req->rq_xid;
 	rqstp->rq_prot = req->rq_xprt->prot;
 	rqstp->rq_server = serv;
@@ -1292,12 +1288,15 @@
 	svc_getu32(argv);	/* XID */
 	svc_getnl(argv);	/* CALLDIR */
 
-	error = svc_process_common(rqstp, argv, resv);
-	if (error <= 0)
-		return error;
-
-	memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
-	return bc_send(req);
+	/* Returns 1 for send, 0 for drop */
+	if (svc_process_common(rqstp, argv, resv)) {
+		memcpy(&req->rq_snd_buf, &rqstp->rq_res,
+						sizeof(req->rq_snd_buf));
+		return bc_send(req);
+	} else {
+		/* Nothing to do to drop request */
+		return 0;
+	}
 }
 EXPORT_SYMBOL(bc_svc_process);
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3f2c555..ab86b79 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -13,6 +13,7 @@
 #include <linux/sunrpc/stats.h>
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
 
 #define RPCDBG_FACILITY	RPCDBG_SVCXPRT
 
@@ -128,6 +129,9 @@
 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
 		svcauth_unix_info_release(xprt);
 	put_net(xprt->xpt_net);
+	/* See comment on corresponding get in xs_setup_bc_tcp(): */
+	if (xprt->xpt_bc_xprt)
+		xprt_put(xprt->xpt_bc_xprt);
 	xprt->xpt_ops->xpo_free(xprt);
 	module_put(owner);
 }
@@ -303,6 +307,15 @@
 	list_del(&rqstp->rq_list);
 }
 
+static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+{
+	if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
+		return true;
+	if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
+		return xprt->xpt_ops->xpo_has_wspace(xprt);
+	return false;
+}
+
 /*
  * Queue up a transport with data pending. If there are idle nfsd
  * processes, wake 'em up.
@@ -315,8 +328,7 @@
 	struct svc_rqst	*rqstp;
 	int cpu;
 
-	if (!(xprt->xpt_flags &
-	      ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
+	if (!svc_xprt_has_something_to_do(xprt))
 		return;
 
 	cpu = get_cpu();
@@ -343,28 +355,7 @@
 		dprintk("svc: transport %p busy, not enqueued\n", xprt);
 		goto out_unlock;
 	}
-	BUG_ON(xprt->xpt_pool != NULL);
-	xprt->xpt_pool = pool;
 
-	/* Handle pending connection */
-	if (test_bit(XPT_CONN, &xprt->xpt_flags))
-		goto process;
-
-	/* Handle close in-progress */
-	if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
-		goto process;
-
-	/* Check if we have space to reply to a request */
-	if (!xprt->xpt_ops->xpo_has_wspace(xprt)) {
-		/* Don't enqueue while not enough space for reply */
-		dprintk("svc: no write space, transport %p  not enqueued\n",
-			xprt);
-		xprt->xpt_pool = NULL;
-		clear_bit(XPT_BUSY, &xprt->xpt_flags);
-		goto out_unlock;
-	}
-
- process:
 	if (!list_empty(&pool->sp_threads)) {
 		rqstp = list_entry(pool->sp_threads.next,
 				   struct svc_rqst,
@@ -381,13 +372,11 @@
 		rqstp->rq_reserved = serv->sv_max_mesg;
 		atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
 		pool->sp_stats.threads_woken++;
-		BUG_ON(xprt->xpt_pool != pool);
 		wake_up(&rqstp->rq_wait);
 	} else {
 		dprintk("svc: transport %p put into queue\n", xprt);
 		list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
 		pool->sp_stats.sockets_queued++;
-		BUG_ON(xprt->xpt_pool != pool);
 	}
 
 out_unlock:
@@ -426,7 +415,6 @@
 void svc_xprt_received(struct svc_xprt *xprt)
 {
 	BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
-	xprt->xpt_pool = NULL;
 	/* As soon as we clear busy, the xprt could be closed and
 	 * 'put', so we need a reference to call svc_xprt_enqueue with:
 	 */
@@ -722,7 +710,10 @@
 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
 		dprintk("svc_recv: found XPT_CLOSE\n");
 		svc_delete_xprt(xprt);
-	} else if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+		/* Leave XPT_BUSY set on the dead xprt: */
+		goto out;
+	}
+	if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
 		struct svc_xprt *newxpt;
 		newxpt = xprt->xpt_ops->xpo_accept(xprt);
 		if (newxpt) {
@@ -747,28 +738,23 @@
 			spin_unlock_bh(&serv->sv_lock);
 			svc_xprt_received(newxpt);
 		}
-		svc_xprt_received(xprt);
-	} else {
+	} else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
 		dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
 			rqstp, pool->sp_id, xprt,
 			atomic_read(&xprt->xpt_ref.refcount));
 		rqstp->rq_deferred = svc_deferred_dequeue(xprt);
-		if (rqstp->rq_deferred) {
-			svc_xprt_received(xprt);
+		if (rqstp->rq_deferred)
 			len = svc_deferred_recv(rqstp);
-		} else {
+		else
 			len = xprt->xpt_ops->xpo_recvfrom(rqstp);
-			svc_xprt_received(xprt);
-		}
 		dprintk("svc: got len=%d\n", len);
 	}
+	svc_xprt_received(xprt);
 
 	/* No data, incomplete (TCP) read, or accept() */
-	if (len == 0 || len == -EAGAIN) {
-		rqstp->rq_res.len = 0;
-		svc_xprt_release(rqstp);
-		return -EAGAIN;
-	}
+	if (len == 0 || len == -EAGAIN)
+		goto out;
+
 	clear_bit(XPT_OLD, &xprt->xpt_flags);
 
 	rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
@@ -777,6 +763,10 @@
 	if (serv->sv_stats)
 		serv->sv_stats->netcnt++;
 	return len;
+out:
+	rqstp->rq_res.len = 0;
+	svc_xprt_release(rqstp);
+	return -EAGAIN;
 }
 EXPORT_SYMBOL_GPL(svc_recv);
 
@@ -935,7 +925,12 @@
 	if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
 		/* someone else will have to effect the close */
 		return;
-
+	/*
+	 * We expect svc_close_xprt() to work even when no threads are
+	 * running (e.g., while configuring the server before starting
+	 * any threads), so if the transport isn't busy, we delete
+	 * it ourself:
+	 */
 	svc_delete_xprt(xprt);
 }
 EXPORT_SYMBOL_GPL(svc_close_xprt);
@@ -945,16 +940,16 @@
 	struct svc_xprt *xprt;
 	struct svc_xprt *tmp;
 
+	/*
+	 * The server is shutting down, and no more threads are running.
+	 * svc_xprt_enqueue() might still be running, but at worst it
+	 * will re-add the xprt to sp_sockets, which will soon get
+	 * freed.  So we don't bother with any more locking, and don't
+	 * leave the close to the (nonexistent) server threads:
+	 */
 	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
-		if (test_bit(XPT_BUSY, &xprt->xpt_flags)) {
-			/* Waiting to be processed, but no threads left,
-			 * So just remove it from the waiting list
-			 */
-			list_del_init(&xprt->xpt_ready);
-			clear_bit(XPT_BUSY, &xprt->xpt_flags);
-		}
-		svc_close_xprt(xprt);
+		svc_delete_xprt(xprt);
 	}
 }
 
@@ -1028,6 +1023,7 @@
 	}
 	svc_xprt_get(rqstp->rq_xprt);
 	dr->xprt = rqstp->rq_xprt;
+	rqstp->rq_dropme = true;
 
 	dr->handle.revisit = svc_revisit;
 	return &dr->handle;
@@ -1065,14 +1061,13 @@
 	if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
 		return NULL;
 	spin_lock(&xprt->xpt_lock);
-	clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
 	if (!list_empty(&xprt->xpt_deferred)) {
 		dr = list_entry(xprt->xpt_deferred.next,
 				struct svc_deferred_req,
 				handle.recent);
 		list_del_init(&dr->handle.recent);
-		set_bit(XPT_DEFERRED, &xprt->xpt_flags);
-	}
+	} else
+		clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
 	spin_unlock(&xprt->xpt_lock);
 	return dr;
 }
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 4e9393c..7963569 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -118,7 +118,6 @@
 
 #define	DN_HASHBITS	6
 #define	DN_HASHMAX	(1<<DN_HASHBITS)
-#define	DN_HASHMASK	(DN_HASHMAX-1)
 
 static struct hlist_head	auth_domain_table[DN_HASHMAX];
 static spinlock_t	auth_domain_lock =
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 560677d..30916b0 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -30,7 +30,9 @@
 
 struct unix_domain {
 	struct auth_domain	h;
+#ifdef CONFIG_NFSD_DEPRECATED
 	int	addr_changes;
+#endif /* CONFIG_NFSD_DEPRECATED */
 	/* other stuff later */
 };
 
@@ -64,7 +66,9 @@
 			return NULL;
 		}
 		new->h.flavour = &svcauth_unix;
+#ifdef CONFIG_NFSD_DEPRECATED
 		new->addr_changes = 0;
+#endif /* CONFIG_NFSD_DEPRECATED */
 		rv = auth_domain_lookup(name, &new->h);
 	}
 }
@@ -85,14 +89,15 @@
  */
 #define	IP_HASHBITS	8
 #define	IP_HASHMAX	(1<<IP_HASHBITS)
-#define	IP_HASHMASK	(IP_HASHMAX-1)
 
 struct ip_map {
 	struct cache_head	h;
 	char			m_class[8]; /* e.g. "nfsd" */
 	struct in6_addr		m_addr;
 	struct unix_domain	*m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
 	int			m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
 };
 
 static void ip_map_put(struct kref *kref)
@@ -146,7 +151,9 @@
 
 	kref_get(&item->m_client->h.ref);
 	new->m_client = item->m_client;
+#ifdef CONFIG_NFSD_DEPRECATED
 	new->m_add_change = item->m_add_change;
+#endif /* CONFIG_NFSD_DEPRECATED */
 }
 static struct cache_head *ip_map_alloc(void)
 {
@@ -331,6 +338,7 @@
 	ip.h.flags = 0;
 	if (!udom)
 		set_bit(CACHE_NEGATIVE, &ip.h.flags);
+#ifdef CONFIG_NFSD_DEPRECATED
 	else {
 		ip.m_add_change = udom->addr_changes;
 		/* if this is from the legacy set_client system call,
@@ -339,6 +347,7 @@
 		if (expiry == NEVER)
 			ip.m_add_change++;
 	}
+#endif /* CONFIG_NFSD_DEPRECATED */
 	ip.h.expiry_time = expiry;
 	ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
 				 hash_str(ipm->m_class, IP_HASHBITS) ^
@@ -358,6 +367,7 @@
 	return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
 }
 
+#ifdef CONFIG_NFSD_DEPRECATED
 int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
 {
 	struct unix_domain *udom;
@@ -402,8 +412,7 @@
 		return NULL;
 
 	if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
-		if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
-			auth_domain_put(&ipm->m_client->h);
+		sunrpc_invalidate(&ipm->h, sn->ip_map_cache);
 		rv = NULL;
 	} else {
 		rv = &ipm->m_client->h;
@@ -413,6 +422,7 @@
 	return rv;
 }
 EXPORT_SYMBOL_GPL(auth_unix_lookup);
+#endif /* CONFIG_NFSD_DEPRECATED */
 
 void svcauth_unix_purge(void)
 {
@@ -497,7 +507,6 @@
  */
 #define	GID_HASHBITS	8
 #define	GID_HASHMAX	(1<<GID_HASHBITS)
-#define	GID_HASHMASK	(GID_HASHMAX - 1)
 
 struct unix_gid {
 	struct cache_head	h;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e1..7bd3bbb 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@
 static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
 					  struct net *, struct sockaddr *,
 					  int, int);
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+					     struct net *, struct sockaddr *,
+					     int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key svc_key[2];
 static struct lock_class_key svc_slock_key[2];
@@ -324,19 +331,21 @@
 			len = onelen;
 			break;
 		}
-		if (toclose && strcmp(toclose, buf + len) == 0)
+		if (toclose && strcmp(toclose, buf + len) == 0) {
 			closesk = svsk;
-		else
+			svc_xprt_get(&closesk->sk_xprt);
+		} else
 			len += onelen;
 	}
 	spin_unlock_bh(&serv->sv_lock);
 
-	if (closesk)
+	if (closesk) {
 		/* Should unregister with portmap, but you cannot
 		 * unregister just one protocol...
 		 */
 		svc_close_xprt(&closesk->sk_xprt);
-	else if (toclose)
+		svc_xprt_put(&closesk->sk_xprt);
+	} else if (toclose)
 		return -ENOENT;
 	return len;
 }
@@ -985,15 +994,17 @@
 		vec[0] = rqstp->rq_arg.head[0];
 	} else {
 		/* REPLY */
-		if (svsk->sk_bc_xprt)
-			req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
+		struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
+
+		if (bc_xprt)
+			req = xprt_lookup_rqst(bc_xprt, xid);
 
 		if (!req) {
 			printk(KERN_NOTICE
 				"%s: Got unrecognized reply: "
-				"calldir 0x%x sk_bc_xprt %p xid %08x\n",
+				"calldir 0x%x xpt_bc_xprt %p xid %08x\n",
 				__func__, ntohl(calldir),
-				svsk->sk_bc_xprt, xid);
+				bc_xprt, xid);
 			vec[0] = rqstp->rq_arg.head[0];
 			goto out;
 		}
@@ -1184,6 +1195,57 @@
 	return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
 }
 
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+					     struct net *, struct sockaddr *,
+					     int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+
+static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
+				       struct net *net,
+				       struct sockaddr *sa, int salen,
+				       int flags)
+{
+	return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
+}
+
+static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
+{
+}
+
+static struct svc_xprt_ops svc_tcp_bc_ops = {
+	.xpo_create = svc_bc_tcp_create,
+	.xpo_detach = svc_bc_tcp_sock_detach,
+	.xpo_free = svc_bc_sock_free,
+	.xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
+};
+
+static struct svc_xprt_class svc_tcp_bc_class = {
+	.xcl_name = "tcp-bc",
+	.xcl_owner = THIS_MODULE,
+	.xcl_ops = &svc_tcp_bc_ops,
+	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+};
+
+static void svc_init_bc_xprt_sock(void)
+{
+	svc_reg_xprt_class(&svc_tcp_bc_class);
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+	svc_unreg_xprt_class(&svc_tcp_bc_class);
+}
+#else /* CONFIG_NFS_V4_1 */
+static void svc_init_bc_xprt_sock(void)
+{
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
 static struct svc_xprt_ops svc_tcp_ops = {
 	.xpo_create = svc_tcp_create,
 	.xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1269,14 @@
 {
 	svc_reg_xprt_class(&svc_tcp_class);
 	svc_reg_xprt_class(&svc_udp_class);
+	svc_init_bc_xprt_sock();
 }
 
 void svc_cleanup_xprt_sock(void)
 {
 	svc_unreg_xprt_class(&svc_tcp_class);
 	svc_unreg_xprt_class(&svc_udp_class);
+	svc_cleanup_bc_xprt_sock();
 }
 
 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1573,45 @@
 	kfree(svsk);
 }
 
+#if defined(CONFIG_NFS_V4_1)
 /*
- * Create a svc_xprt.
- *
- * For internal use only (e.g. nfsv4.1 backchannel).
- * Callers should typically use the xpo_create() method.
+ * Create a back channel svc_xprt which shares the fore channel socket.
  */
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
+					     int protocol,
+					     struct net *net,
+					     struct sockaddr *sin, int len,
+					     int flags)
 {
 	struct svc_sock *svsk;
-	struct svc_xprt *xprt = NULL;
+	struct svc_xprt *xprt;
 
-	dprintk("svc: %s\n", __func__);
+	if (protocol != IPPROTO_TCP) {
+		printk(KERN_WARNING "svc: only TCP sockets"
+			" supported on shared back channel\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
 	if (!svsk)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	xprt = &svsk->sk_xprt;
-	if (prot == IPPROTO_TCP)
-		svc_xprt_init(&svc_tcp_class, xprt, serv);
-	else if (prot == IPPROTO_UDP)
-		svc_xprt_init(&svc_udp_class, xprt, serv);
-	else
-		BUG();
-out:
-	dprintk("svc: %s return %p\n", __func__, xprt);
+	svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
+
+	serv->sv_bc_xprt = xprt;
+
 	return xprt;
 }
-EXPORT_SYMBOL_GPL(svc_sock_create);
 
 /*
- * Destroy a svc_sock.
+ * Free a back channel svc_sock.
  */
-void svc_sock_destroy(struct svc_xprt *xprt)
+static void svc_bc_sock_free(struct svc_xprt *xprt)
 {
-	if (xprt)
+	if (xprt) {
+		kfree(xprt->xpt_bc_sid);
 		kfree(container_of(xprt, struct svc_sock, sk_xprt));
+	}
 }
-EXPORT_SYMBOL_GPL(svc_sock_destroy);
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841..679cd67 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@
 }
 EXPORT_SYMBOL_GPL(xdr_write_pages);
 
+static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
+		__be32 *p, unsigned int len)
+{
+	if (len > iov->iov_len)
+		len = iov->iov_len;
+	if (p == NULL)
+		p = (__be32*)iov->iov_base;
+	xdr->p = p;
+	xdr->end = (__be32*)(iov->iov_base + len);
+	xdr->iov = iov;
+	xdr->page_ptr = NULL;
+}
+
+static int xdr_set_page_base(struct xdr_stream *xdr,
+		unsigned int base, unsigned int len)
+{
+	unsigned int pgnr;
+	unsigned int maxlen;
+	unsigned int pgoff;
+	unsigned int pgend;
+	void *kaddr;
+
+	maxlen = xdr->buf->page_len;
+	if (base >= maxlen)
+		return -EINVAL;
+	maxlen -= base;
+	if (len > maxlen)
+		len = maxlen;
+
+	base += xdr->buf->page_base;
+
+	pgnr = base >> PAGE_SHIFT;
+	xdr->page_ptr = &xdr->buf->pages[pgnr];
+	kaddr = page_address(*xdr->page_ptr);
+
+	pgoff = base & ~PAGE_MASK;
+	xdr->p = (__be32*)(kaddr + pgoff);
+
+	pgend = pgoff + len;
+	if (pgend > PAGE_SIZE)
+		pgend = PAGE_SIZE;
+	xdr->end = (__be32*)(kaddr + pgend);
+	xdr->iov = NULL;
+	return 0;
+}
+
+static void xdr_set_next_page(struct xdr_stream *xdr)
+{
+	unsigned int newbase;
+
+	newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
+	newbase -= xdr->buf->page_base;
+
+	if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
+		xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+}
+
+static bool xdr_set_next_buffer(struct xdr_stream *xdr)
+{
+	if (xdr->page_ptr != NULL)
+		xdr_set_next_page(xdr);
+	else if (xdr->iov == xdr->buf->head) {
+		if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
+			xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+	}
+	return xdr->p != xdr->end;
+}
+
 /**
  * xdr_init_decode - Initialize an xdr_stream for decoding data.
  * @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@
  */
 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 {
-	struct kvec *iov = buf->head;
-	unsigned int len = iov->iov_len;
-
-	if (len > buf->len)
-		len = buf->len;
 	xdr->buf = buf;
-	xdr->iov = iov;
-	xdr->p = p;
-	xdr->end = (__be32 *)((char *)iov->iov_base + len);
+	xdr->scratch.iov_base = NULL;
+	xdr->scratch.iov_len = 0;
+	if (buf->head[0].iov_len != 0)
+		xdr_set_iov(xdr, buf->head, p, buf->len);
+	else if (buf->page_len != 0)
+		xdr_set_page_base(xdr, 0, buf->len);
 }
 EXPORT_SYMBOL_GPL(xdr_init_decode);
 
-/**
- * xdr_inline_peek - Allow read-ahead in the XDR data stream
- * @xdr: pointer to xdr_stream struct
- * @nbytes: number of bytes of data to decode
- *
- * Check if the input buffer is long enough to enable us to decode
- * 'nbytes' more bytes of data starting at the current position.
- * If so return the current pointer without updating the current
- * pointer position.
- */
-__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
+static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 {
 	__be32 *p = xdr->p;
 	__be32 *q = p + XDR_QUADLEN(nbytes);
 
 	if (unlikely(q > xdr->end || q < p))
 		return NULL;
+	xdr->p = q;
 	return p;
 }
-EXPORT_SYMBOL_GPL(xdr_inline_peek);
 
 /**
- * xdr_inline_decode - Retrieve non-page XDR data to decode
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+	xdr->scratch.iov_base = buf;
+	xdr->scratch.iov_len = buflen;
+}
+EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
+
+static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
+{
+	__be32 *p;
+	void *cpdest = xdr->scratch.iov_base;
+	size_t cplen = (char *)xdr->end - (char *)xdr->p;
+
+	if (nbytes > xdr->scratch.iov_len)
+		return NULL;
+	memcpy(cpdest, xdr->p, cplen);
+	cpdest += cplen;
+	nbytes -= cplen;
+	if (!xdr_set_next_buffer(xdr))
+		return NULL;
+	p = __xdr_inline_decode(xdr, nbytes);
+	if (p == NULL)
+		return NULL;
+	memcpy(cpdest, p, nbytes);
+	return xdr->scratch.iov_base;
+}
+
+/**
+ * xdr_inline_decode - Retrieve XDR data to decode
  * @xdr: pointer to xdr_stream struct
  * @nbytes: number of bytes of data to decode
  *
@@ -605,13 +699,16 @@
  */
 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 {
-	__be32 *p = xdr->p;
-	__be32 *q = p + XDR_QUADLEN(nbytes);
+	__be32 *p;
 
-	if (unlikely(q > xdr->end || q < p))
+	if (nbytes == 0)
+		return xdr->p;
+	if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
 		return NULL;
-	xdr->p = q;
-	return p;
+	p = __xdr_inline_decode(xdr, nbytes);
+	if (p != NULL)
+		return p;
+	return xdr_copy_to_scratch(xdr, nbytes);
 }
 EXPORT_SYMBOL_GPL(xdr_inline_decode);
 
@@ -671,16 +768,12 @@
  */
 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 {
-	char * kaddr = page_address(xdr->buf->pages[0]);
 	xdr_read_pages(xdr, len);
 	/*
 	 * Position current pointer at beginning of tail, and
 	 * set remaining message length.
 	 */
-	if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
-		len = PAGE_CACHE_SIZE - xdr->buf->page_base;
-	xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
-	xdr->end = (__be32 *)((char *)xdr->p + len);
+	xdr_set_page_base(xdr, 0, len);
 }
 EXPORT_SYMBOL_GPL(xdr_enter_page);
 
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4c8f18a..856274d 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -965,6 +965,7 @@
 	xprt = kzalloc(size, GFP_KERNEL);
 	if (xprt == NULL)
 		goto out;
+	kref_init(&xprt->kref);
 
 	xprt->max_reqs = max_req;
 	xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
@@ -1101,8 +1102,10 @@
 				-PTR_ERR(xprt));
 		return xprt;
 	}
+	if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
+		/* ->setup returned a pre-initialized xprt: */
+		return xprt;
 
-	kref_init(&xprt->kref);
 	spin_lock_init(&xprt->transport_lock);
 	spin_lock_init(&xprt->reserve_lock);
 
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index dfcab5a..c431f5a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -770,7 +770,7 @@
 
 	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
 
-	cancel_rearming_delayed_work(&transport->connect_worker);
+	cancel_delayed_work_sync(&transport->connect_worker);
 
 	xs_close(xprt);
 	xs_free_peer_addresses(xprt);
@@ -2359,6 +2359,15 @@
 	struct svc_sock *bc_sock;
 	struct rpc_xprt *ret;
 
+	if (args->bc_xprt->xpt_bc_xprt) {
+		/*
+		 * This server connection already has a backchannel
+		 * export; we can't create a new one, as we wouldn't be
+		 * able to match replies based on xid any more.  So,
+		 * reuse the already-existing one:
+		 */
+		 return args->bc_xprt->xpt_bc_xprt;
+	}
 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
 	if (IS_ERR(xprt))
 		return xprt;
@@ -2375,16 +2384,6 @@
 	xprt->reestablish_timeout = 0;
 	xprt->idle_timeout = 0;
 
-	/*
-	 * The backchannel uses the same socket connection as the
-	 * forechannel
-	 */
-	xprt->bc_xprt = args->bc_xprt;
-	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
-	bc_sock->sk_bc_xprt = xprt;
-	transport->sock = bc_sock->sk_sock;
-	transport->inet = bc_sock->sk_sk;
-
 	xprt->ops = &bc_tcp_ops;
 
 	switch (addr->sa_family) {
@@ -2407,6 +2406,20 @@
 			xprt->address_strings[RPC_DISPLAY_PROTO]);
 
 	/*
+	 * Once we've associated a backchannel xprt with a connection,
+	 * we want to keep it around as long as long as the connection
+	 * lasts, in case we need to start using it for a backchannel
+	 * again; this reference won't be dropped until bc_xprt is
+	 * destroyed.
+	 */
+	xprt_get(xprt);
+	args->bc_xprt->xpt_bc_xprt = xprt;
+	xprt->bc_xprt = args->bc_xprt;
+	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
+	transport->sock = bc_sock->sk_sock;
+	transport->inet = bc_sock->sk_sk;
+
+	/*
 	 * Since we don't want connections for the backchannel, we set
 	 * the xprt status to connected
 	 */
@@ -2415,6 +2428,7 @@
 
 	if (try_module_get(THIS_MODULE))
 		return xprt;
+	xprt_put(xprt);
 	ret = ERR_PTR(-EINVAL);
 out_err:
 	xprt_free(xprt);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb8895..d5e1e0b 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
 #include <net/sock.h>
 #include <net/xfrm.h>
 #include <net/netlink.h>
+#include <net/ah.h>
 #include <asm/uaccess.h>
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <linux/in6.h>
@@ -302,7 +303,8 @@
 	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
 	if (!algo)
 		return -ENOSYS;
-	if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+	if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
+	    ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
 		return -EINVAL;
 	*props = algo->desc.sadb_alg_id;
 
@@ -2187,7 +2189,7 @@
 
 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
 	     type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
-	    (nlh->nlmsg_flags & NLM_F_DUMP)) {
+	    (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		if (link->dump == NULL)
 			return -EINVAL;
 
diff --git a/scripts/.gitignore b/scripts/.gitignore
index c5d5db5..e2741d2 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -7,3 +7,4 @@
 bin2c
 unifdef
 ihex2fw
+recordmcount
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 4c72c11..1c702ca 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -200,6 +200,29 @@
 cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -f -9 > $@) || \
 	(rm -f $@ ; false)
 
+# DTC
+# ---------------------------------------------------------------------------
+
+# Generate an assembly file to wrap the output of the device tree compiler
+quiet_cmd_dt_S_dtb= DTB    $@
+cmd_dt_S_dtb=						\
+(							\
+	echo '\#include <asm-generic/vmlinux.lds.h>'; 	\
+	echo '.section .dtb.init.rodata,"a"';		\
+	echo '.balign STRUCT_ALIGNMENT';		\
+	echo '.global __dtb_$(*F)_begin';		\
+	echo '__dtb_$(*F)_begin:';			\
+	echo '.incbin "$<" ';				\
+	echo '__dtb_$(*F)_end:';			\
+	echo '.global __dtb_$(*F)_end';			\
+	echo '.balign STRUCT_ALIGNMENT'; 		\
+) > $@
+
+$(obj)/%.dtb.S: $(obj)/%.dtb
+	$(call cmd,dt_S_dtb)
+
+quiet_cmd_dtc = DTC     $@
+cmd_dtc = $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 $(DTC_FLAGS) $<
 
 # Bzip2
 # ---------------------------------------------------------------------------
@@ -239,6 +262,34 @@
 	lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
 	(rm -f $@ ; false)
 
+# XZ
+# ---------------------------------------------------------------------------
+# Use xzkern to compress the kernel image and xzmisc to compress other things.
+#
+# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage
+# of the kernel decompressor. A BCJ filter is used if it is available for
+# the target architecture. xzkern also appends uncompressed size of the data
+# using size_append. The .xz format has the size information available at
+# the end of the file too, but it's in more complex format and it's good to
+# avoid changing the part of the boot code that reads the uncompressed size.
+# Note that the bytes added by size_append will make the xz tool think that
+# the file is corrupt. This is expected.
+#
+# xzmisc doesn't use size_append, so it can be used to create normal .xz
+# files. xzmisc uses smaller LZMA2 dictionary than xzkern, because a very
+# big dictionary would increase the memory usage too much in the multi-call
+# decompression mode. A BCJ filter isn't used either.
+quiet_cmd_xzkern = XZKERN  $@
+cmd_xzkern = (cat $(filter-out FORCE,$^) | \
+	sh $(srctree)/scripts/xz_wrap.sh && \
+	$(call size_append, $(filter-out FORCE,$^))) > $@ || \
+	(rm -f $@ ; false)
+
+quiet_cmd_xzmisc = XZMISC  $@
+cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
+	xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
+	(rm -f $@ ; false)
+
 # misc stuff
 # ---------------------------------------------------------------------------
 quote:="
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index ea26b23..c9a16ab 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -138,38 +138,36 @@
 	printf("cmd_%s := %s\n\n", target, cmdline);
 }
 
-char * str_config  = NULL;
-int    size_config = 0;
-int    len_config  = 0;
+struct item {
+	struct item	*next;
+	unsigned int	len;
+	unsigned int	hash;
+	char		name[0];
+};
 
-/*
- * Grow the configuration string to a desired length.
- * Usually the first growth is plenty.
- */
-static void grow_config(int len)
+#define HASHSZ 256
+static struct item *hashtab[HASHSZ];
+
+static unsigned int strhash(const char *str, unsigned int sz)
 {
-	while (len_config + len > size_config) {
-		if (size_config == 0)
-			size_config = 2048;
-		str_config = realloc(str_config, size_config *= 2);
-		if (str_config == NULL)
-			{ perror("fixdep:malloc"); exit(1); }
-	}
+	/* fnv32 hash */
+	unsigned int i, hash = 2166136261U;
+
+	for (i = 0; i < sz; i++)
+		hash = (hash ^ str[i]) * 0x01000193;
+	return hash;
 }
 
-
-
 /*
  * Lookup a value in the configuration string.
  */
-static int is_defined_config(const char * name, int len)
+static int is_defined_config(const char *name, int len, unsigned int hash)
 {
-	const char * pconfig;
-	const char * plast = str_config + len_config - len;
-	for ( pconfig = str_config + 1; pconfig < plast; pconfig++ ) {
-		if (pconfig[ -1] == '\n'
-		&&  pconfig[len] == '\n'
-		&&  !memcmp(pconfig, name, len))
+	struct item *aux;
+
+	for (aux = hashtab[hash % HASHSZ]; aux; aux = aux->next) {
+		if (aux->hash == hash && aux->len == len &&
+		    memcmp(aux->name, name, len) == 0)
 			return 1;
 	}
 	return 0;
@@ -178,13 +176,19 @@
 /*
  * Add a new value to the configuration string.
  */
-static void define_config(const char * name, int len)
+static void define_config(const char *name, int len, unsigned int hash)
 {
-	grow_config(len + 1);
+	struct item *aux = malloc(sizeof(*aux) + len);
 
-	memcpy(str_config+len_config, name, len);
-	len_config += len;
-	str_config[len_config++] = '\n';
+	if (!aux) {
+		perror("fixdep:malloc");
+		exit(1);
+	}
+	memcpy(aux->name, name, len);
+	aux->len = len;
+	aux->hash = hash;
+	aux->next = hashtab[hash % HASHSZ];
+	hashtab[hash % HASHSZ] = aux;
 }
 
 /*
@@ -192,40 +196,49 @@
  */
 static void clear_config(void)
 {
-	len_config = 0;
-	define_config("", 0);
+	struct item *aux, *next;
+	unsigned int i;
+
+	for (i = 0; i < HASHSZ; i++) {
+		for (aux = hashtab[i]; aux; aux = next) {
+			next = aux->next;
+			free(aux);
+		}
+		hashtab[i] = NULL;
+	}
 }
 
 /*
  * Record the use of a CONFIG_* word.
  */
-static void use_config(char *m, int slen)
+static void use_config(const char *m, int slen)
 {
-	char s[PATH_MAX];
-	char *p;
+	unsigned int hash = strhash(m, slen);
+	int c, i;
 
-	if (is_defined_config(m, slen))
+	if (is_defined_config(m, slen, hash))
 	    return;
 
-	define_config(m, slen);
+	define_config(m, slen, hash);
 
-	memcpy(s, m, slen); s[slen] = 0;
-
-	for (p = s; p < s + slen; p++) {
-		if (*p == '_')
-			*p = '/';
+	printf("    $(wildcard include/config/");
+	for (i = 0; i < slen; i++) {
+		c = m[i];
+		if (c == '_')
+			c = '/';
 		else
-			*p = tolower((int)*p);
+			c = tolower(c);
+		putchar(c);
 	}
-	printf("    $(wildcard include/config/%s.h) \\\n", s);
+	printf(".h) \\\n");
 }
 
-static void parse_config_file(char *map, size_t len)
+static void parse_config_file(const char *map, size_t len)
 {
-	int *end = (int *) (map + len);
+	const int *end = (const int *) (map + len);
 	/* start at +1, so that p can never be < map */
-	int *m   = (int *) map + 1;
-	char *p, *q;
+	const int *m   = (const int *) map + 1;
+	const char *p, *q;
 
 	for (; m < end; m++) {
 		if (*m == INT_CONF) { p = (char *) m  ; goto conf; }
@@ -265,7 +278,7 @@
 	return memcmp(s + slen - sublen, sub, sublen);
 }
 
-static void do_config_file(char *filename)
+static void do_config_file(const char *filename)
 {
 	struct stat st;
 	int fd;
@@ -273,7 +286,7 @@
 
 	fd = open(filename, O_RDONLY);
 	if (fd < 0) {
-		fprintf(stderr, "fixdep: ");
+		fprintf(stderr, "fixdep: error opening config file: ");
 		perror(filename);
 		exit(2);
 	}
@@ -344,11 +357,15 @@
 
 	fd = open(depfile, O_RDONLY);
 	if (fd < 0) {
-		fprintf(stderr, "fixdep: ");
+		fprintf(stderr, "fixdep: error opening depfile: ");
 		perror(depfile);
 		exit(2);
 	}
-	fstat(fd, &st);
+	if (fstat(fd, &st) < 0) {
+                fprintf(stderr, "fixdep: error fstat'ing depfile: ");
+                perror(depfile);
+                exit(2);
+        }
 	if (st.st_size == 0) {
 		fprintf(stderr,"fixdep: %s is empty\n",depfile);
 		close(fd);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e3c7fc0..4c0383d 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -859,7 +859,7 @@
 				$av_preprocessor = 0;
 			}
 
-		} elsif ($cur =~ /^(\(\s*$Type\s*)\)/) {
+		} elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
 			print "CAST($1)\n" if ($dbg_values > 1);
 			push(@av_paren_type, $type);
 			$type = 'C';
@@ -2743,6 +2743,11 @@
 			WARN("plain inline is preferred over $1\n" . $herecurr);
 		}
 
+# Check for __attribute__ packed, prefer __packed
+		if ($line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
+			WARN("__packed is preferred over __attribute__((packed))\n" . $herecurr);
+		}
+
 # check for sizeof(&)
 		if ($line =~ /\bsizeof\s*\(\s*\&/) {
 			WARN("sizeof(& should be avoided\n" . $herecurr);
@@ -2785,10 +2790,15 @@
 		}
 
 # check for pointless casting of kmalloc return
-		if ($line =~ /\*\s*\)\s*k[czm]alloc\b/) {
+		if ($line =~ /\*\s*\)\s*[kv][czm]alloc(_node){0,1}\b/) {
 			WARN("unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
 		}
 
+# check for multiple semicolons
+		if ($line =~ /;\s*;\s*$/) {
+		    WARN("Statements terminations use 1 semicolon\n" . $herecurr);
+		}
+
 # check for gcc specific __FUNCTION__
 		if ($line =~ /__FUNCTION__/) {
 			WARN("__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr);
@@ -2892,6 +2902,11 @@
 				ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
 			}
 		}
+
+		if ($line =~ /debugfs_create_file.*S_IWUGO/ ||
+		    $line =~ /DEVICE_ATTR.*S_IWUGO/ ) {
+			WARN("Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+		}
 	}
 
 	# If we have no input at all, then there is nothing to report on
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index 6bb42e7..3ab316e 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -6,7 +6,7 @@
 # and listed below so they are ignored.
 #
 # Usage:
-# syscallchk gcc gcc-options
+# checksyscalls.sh gcc gcc-options
 #
 
 ignore_list() {
@@ -204,5 +204,5 @@
 \#endif/p' $1
 }
 
-(ignore_list && syscall_list ${srctree}/arch/x86/include/asm/unistd_32.h) | \
+(ignore_list && syscall_list $(dirname $0)/../arch/x86/include/asm/unistd_32.h) | \
 $* -E -x c - > /dev/null
diff --git a/scripts/coccinelle/misc/doubleinit.cocci b/scripts/coccinelle/misc/doubleinit.cocci
index 55d7dc1..156b20a 100644
--- a/scripts/coccinelle/misc/doubleinit.cocci
+++ b/scripts/coccinelle/misc/doubleinit.cocci
@@ -7,7 +7,7 @@
 // Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
 // Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
-// Comments:
+// Comments: requires at least Coccinelle 0.2.4, lex or parse error otherwise
 // Options: -no_includes -include_headers
 
 virtual org
@@ -19,7 +19,7 @@
 expression E;
 @@
 
-struct I s =@p0 { ... .fld@p = E, ...};
+struct I s =@p0 { ..., .fld@p = E, ...};
 
 @s@
 identifier I, s, r.fld;
@@ -27,7 +27,7 @@
 expression E;
 @@
 
-struct I s =@p0 { ... .fld@p = E, ...};
+struct I s =@p0 { ..., .fld@p = E, ...};
 
 @script:python depends on org@
 p0 << r.p0;
diff --git a/scripts/coccinelle/null/deref_null.cocci b/scripts/coccinelle/null/deref_null.cocci
index 9969d76..cdac6cf 100644
--- a/scripts/coccinelle/null/deref_null.cocci
+++ b/scripts/coccinelle/null/deref_null.cocci
@@ -11,21 +11,10 @@
 // Options:
 
 virtual context
-virtual patch
 virtual org
 virtual report
 
-@initialize:python depends on !context && patch && !org && !report@
-
-import sys
-print >> sys.stderr, "This semantic patch does not support the 'patch' mode."
-
-@depends on patch@
-@@
-
-this_rule_should_never_matches();
-
-@ifm depends on !patch@
+@ifm@
 expression *E;
 statement S1,S2;
 position p1;
@@ -35,7 +24,7 @@
 
 // The following two rules are separate, because both can match a single
 // expression in different ways
-@pr1 depends on !patch expression@
+@pr1 expression@
 expression *ifm.E;
 identifier f;
 position p1;
@@ -43,7 +32,7 @@
 
  (E != NULL && ...) ? <+...E->f@p1...+> : ...
 
-@pr2 depends on !patch expression@
+@pr2 expression@
 expression *ifm.E;
 identifier f;
 position p2;
@@ -59,7 +48,7 @@
 
 // For org and report modes
 
-@r depends on !context && !patch && (org || report) exists@
+@r depends on !context && (org || report) exists@
 expression subE <= ifm.E;
 expression *ifm.E;
 expression E1,E2;
@@ -99,7 +88,7 @@
 }
 else S3
 
-@script:python depends on !context && !patch && !org && report@
+@script:python depends on !context && !org && report@
 p << r.p;
 p1 << ifm.p1;
 x << ifm.E;
@@ -109,7 +98,7 @@
 coccilib.report.print_report(p[0], msg)
 cocci.include_match(False)
 
-@script:python depends on !context && !patch && org && !report@
+@script:python depends on !context && org && !report@
 p << r.p;
 p1 << ifm.p1;
 x << ifm.E;
@@ -120,7 +109,7 @@
 cocci.print_main(msg_safe,p)
 cocci.include_match(False)
 
-@s depends on !context && !patch && (org || report) exists@
+@s depends on !context && (org || report) exists@
 expression subE <= ifm.E;
 expression *ifm.E;
 expression E1,E2;
@@ -159,7 +148,7 @@
 }
 else S3
 
-@script:python depends on !context && !patch && !org && report@
+@script:python depends on !context && !org && report@
 p << s.p;
 p1 << ifm.p1;
 x << ifm.E;
@@ -168,7 +157,7 @@
 msg="ERROR: %s is NULL but dereferenced." % (x)
 coccilib.report.print_report(p[0], msg)
 
-@script:python depends on !context && !patch && org && !report@
+@script:python depends on !context && org && !report@
 p << s.p;
 p1 << ifm.p1;
 x << ifm.E;
@@ -180,7 +169,7 @@
 
 // For context mode
 
-@depends on context && !patch && !org && !report exists@
+@depends on context && !org && !report exists@
 expression subE <= ifm.E;
 expression *ifm.E;
 expression E1,E2;
@@ -223,7 +212,7 @@
 // The following three rules are duplicates of ifm, pr1 and pr2 respectively.
 // It is need because the previous rule as already made a "change".
 
-@ifm1 depends on !patch@
+@ifm1@
 expression *E;
 statement S1,S2;
 position p1;
@@ -231,7 +220,7 @@
 
 if@p1 ((E == NULL && ...) || ...) S1 else S2
 
-@pr11 depends on !patch expression@
+@pr11 expression@
 expression *ifm1.E;
 identifier f;
 position p1;
@@ -239,7 +228,7 @@
 
  (E != NULL && ...) ? <+...E->f@p1...+> : ...
 
-@pr12 depends on !patch expression@
+@pr12 expression@
 expression *ifm1.E;
 identifier f;
 position p2;
@@ -253,7 +242,7 @@
  sizeof(<+...E->f@p2...+>)
 )
 
-@depends on context && !patch && !org && !report exists@
+@depends on context && !org && !report exists@
 expression subE <= ifm1.E;
 expression *ifm1.E;
 expression E1,E2;
diff --git a/scripts/config b/scripts/config
index 608d7fd..a7c7c4b 100755
--- a/scripts/config
+++ b/scripts/config
@@ -10,8 +10,10 @@
 	--enable|-e option   Enable option
 	--disable|-d option  Disable option
 	--module|-m option   Turn option into a module
-	--set-str option value
-	                     Set option to "value"
+	--set-str option string
+	                     Set option to "string"
+	--set-val option value
+	                     Set option to value
 	--state|-s option    Print state of option (n,y,m,undef)
 
 	--enable-after|-E beforeopt option
@@ -86,7 +88,7 @@
 		B=$ARG
 		shift 2
 		;;
-	--*)
+	-*)
 		checkarg "$1"
 		shift
 		;;
@@ -109,6 +111,11 @@
 		shift
 		;;
 
+	--set-val)
+		set_var "CONFIG_$ARG" "CONFIG_$ARG=$1"
+		shift
+		;;
+
 	--state|-s)
 		if grep -q "# CONFIG_$ARG is not set" $FN ; then
 			echo n
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index 01cdb36..04a31c1 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -4,7 +4,7 @@
 always		:= $(hostprogs-y)
 
 dtc-objs	:= dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
-		   srcpos.o checks.o
+		   srcpos.o checks.o util.o
 dtc-objs	+= dtc-lexer.lex.o dtc-parser.tab.o
 
 # Source files need to get at the userspace version of libfdt_env.h to compile
@@ -19,6 +19,7 @@
 HOSTCFLAGS_livetree.o := $(HOSTCFLAGS_DTC)
 HOSTCFLAGS_srcpos.o := $(HOSTCFLAGS_DTC)
 HOSTCFLAGS_treesource.o := $(HOSTCFLAGS_DTC)
+HOSTCFLAGS_util.o := $(HOSTCFLAGS_DTC)
 
 HOSTCFLAGS_dtc-lexer.lex.o := $(HOSTCFLAGS_DTC)
 HOSTCFLAGS_dtc-parser.tab.o := $(HOSTCFLAGS_DTC)
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 9548579..a662a00 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -278,32 +278,112 @@
 }
 PROP_CHECK(property_name_chars, PROPNODECHARS, ERROR);
 
-static void check_explicit_phandles(struct check *c, struct node *root,
-					  struct node *node)
+#define DESCLABEL_FMT	"%s%s%s%s%s"
+#define DESCLABEL_ARGS(node,prop,mark)		\
+	((mark) ? "value of " : ""),		\
+	((prop) ? "'" : ""), \
+	((prop) ? (prop)->name : ""), \
+	((prop) ? "' in " : ""), (node)->fullpath
+
+static void check_duplicate_label(struct check *c, struct node *dt,
+				  const char *label, struct node *node,
+				  struct property *prop, struct marker *mark)
 {
-	struct property *prop;
+	struct node *othernode = NULL;
+	struct property *otherprop = NULL;
+	struct marker *othermark = NULL;
+
+	othernode = get_node_by_label(dt, label);
+
+	if (!othernode)
+		otherprop = get_property_by_label(dt, label, &othernode);
+	if (!othernode)
+		othermark = get_marker_label(dt, label, &othernode,
+					       &otherprop);
+
+	if (!othernode)
+		return;
+
+	if ((othernode != node) || (otherprop != prop) || (othermark != mark))
+		FAIL(c, "Duplicate label '%s' on " DESCLABEL_FMT
+		     " and " DESCLABEL_FMT,
+		     label, DESCLABEL_ARGS(node, prop, mark),
+		     DESCLABEL_ARGS(othernode, otherprop, othermark));
+}
+
+static void check_duplicate_label_node(struct check *c, struct node *dt,
+				       struct node *node)
+{
+	struct label *l;
+
+	for_each_label(node->labels, l)
+		check_duplicate_label(c, dt, l->label, node, NULL, NULL);
+}
+static void check_duplicate_label_prop(struct check *c, struct node *dt,
+				       struct node *node, struct property *prop)
+{
+	struct marker *m = prop->val.markers;
+	struct label *l;
+
+	for_each_label(prop->labels, l)
+		check_duplicate_label(c, dt, l->label, node, prop, NULL);
+
+	for_each_marker_of_type(m, LABEL)
+		check_duplicate_label(c, dt, m->ref, node, prop, m);
+}
+CHECK(duplicate_label, NULL, check_duplicate_label_node,
+      check_duplicate_label_prop, NULL, ERROR);
+
+static void check_explicit_phandles(struct check *c, struct node *root,
+				    struct node *node, struct property *prop)
+{
+	struct marker *m;
 	struct node *other;
 	cell_t phandle;
 
-	prop = get_property(node, "linux,phandle");
-	if (! prop)
-		return; /* No phandle, that's fine */
+	if (!streq(prop->name, "phandle")
+	    && !streq(prop->name, "linux,phandle"))
+		return;
 
 	if (prop->val.len != sizeof(cell_t)) {
-		FAIL(c, "%s has bad length (%d) linux,phandle property",
-		     node->fullpath, prop->val.len);
+		FAIL(c, "%s has bad length (%d) %s property",
+		     node->fullpath, prop->val.len, prop->name);
+		return;
+	}
+
+	m = prop->val.markers;
+	for_each_marker_of_type(m, REF_PHANDLE) {
+		assert(m->offset == 0);
+		if (node != get_node_by_ref(root, m->ref))
+			/* "Set this node's phandle equal to some
+			 * other node's phandle".  That's nonsensical
+			 * by construction. */ {
+			FAIL(c, "%s in %s is a reference to another node",
+			     prop->name, node->fullpath);
+			return;
+		}
+		/* But setting this node's phandle equal to its own
+		 * phandle is allowed - that means allocate a unique
+		 * phandle for this node, even if it's not otherwise
+		 * referenced.  The value will be filled in later, so
+		 * no further checking for now. */
 		return;
 	}
 
 	phandle = propval_cell(prop);
+
 	if ((phandle == 0) || (phandle == -1)) {
-		FAIL(c, "%s has invalid linux,phandle value 0x%x",
-		     node->fullpath, phandle);
+		FAIL(c, "%s has bad value (0x%x) in %s property",
+		     node->fullpath, phandle, prop->name);
 		return;
 	}
 
+	if (node->phandle && (node->phandle != phandle))
+		FAIL(c, "%s has %s property which replaces existing phandle information",
+		     node->fullpath, prop->name);
+
 	other = get_node_by_phandle(root, phandle);
-	if (other) {
+	if (other && (other != node)) {
 		FAIL(c, "%s has duplicated phandle 0x%x (seen before at %s)",
 		     node->fullpath, phandle, other->fullpath);
 		return;
@@ -311,7 +391,7 @@
 
 	node->phandle = phandle;
 }
-NODE_CHECK(explicit_phandles, NULL, ERROR);
+PROP_CHECK(explicit_phandles, NULL, ERROR);
 
 static void check_name_properties(struct check *c, struct node *root,
 				  struct node *node)
@@ -549,6 +629,9 @@
 	&duplicate_node_names, &duplicate_property_names,
 	&node_name_chars, &node_name_format, &property_name_chars,
 	&name_is_string, &name_properties,
+
+	&duplicate_label,
+
 	&explicit_phandles,
 	&phandle_references, &path_references,
 
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index a627bbe..e866ea5 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -18,7 +18,7 @@
  *                                                                   USA
  */
 
-%option noyywrap noinput nounput yylineno
+%option noyywrap nounput noinput never-interactive
 
 %x INCLUDE
 %x BYTESTRING
@@ -38,6 +38,13 @@
 #include "srcpos.h"
 #include "dtc-parser.tab.h"
 
+YYLTYPE yylloc;
+
+/* CAUTION: this will stop working if we ever use yyless() or yyunput() */
+#define	YY_USER_ACTION \
+	{ \
+		srcpos_update(&yylloc, yytext, yyleng); \
+	}
 
 /*#define LEXDEBUG	1*/
 
@@ -47,15 +54,10 @@
 #define DPRINT(fmt, ...)	do { } while (0)
 #endif
 
-static int dts_version; /* = 0 */
+static int dts_version = 1;
 
-#define BEGIN_DEFAULT()	if (dts_version == 0) { \
-				DPRINT("<INITIAL>\n"); \
-				BEGIN(INITIAL); \
-			} else { \
-				DPRINT("<V1>\n"); \
+#define BEGIN_DEFAULT()		DPRINT("<V1>\n"); \
 				BEGIN(V1); \
-			}
 
 static void push_input_file(const char *filename);
 static int pop_input_file(void);
@@ -75,18 +77,13 @@
 		}
 
 <*>{STRING}	{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("String: %s\n", yytext);
 			yylval.data = data_copy_escape_string(yytext+1,
 					yyleng-2);
-			yylloc.first_line = yylineno;
 			return DT_STRING;
 		}
 
 <*>"/dts-v1/"	{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Keyword: /dts-v1/\n");
 			dts_version = 1;
 			BEGIN_DEFAULT();
@@ -94,106 +91,57 @@
 		}
 
 <*>"/memreserve/"	{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Keyword: /memreserve/\n");
 			BEGIN_DEFAULT();
 			return DT_MEMRESERVE;
 		}
 
 <*>{LABEL}:	{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Label: %s\n", yytext);
-			yylval.labelref = strdup(yytext);
+			yylval.labelref = xstrdup(yytext);
 			yylval.labelref[yyleng-1] = '\0';
 			return DT_LABEL;
 		}
 
-<INITIAL>[bodh]# {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			if (*yytext == 'b')
-				yylval.cbase = 2;
-			else if (*yytext == 'o')
-				yylval.cbase = 8;
-			else if (*yytext == 'd')
-				yylval.cbase = 10;
-			else
-				yylval.cbase = 16;
-			DPRINT("Base: %d\n", yylval.cbase);
-			return DT_BASE;
-		}
-
-<INITIAL>[0-9a-fA-F]+	{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			yylval.literal = strdup(yytext);
-			DPRINT("Literal: '%s'\n", yylval.literal);
-			return DT_LEGACYLITERAL;
-		}
-
 <V1>[0-9]+|0[xX][0-9a-fA-F]+      {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			yylval.literal = strdup(yytext);
+			yylval.literal = xstrdup(yytext);
 			DPRINT("Literal: '%s'\n", yylval.literal);
 			return DT_LITERAL;
 		}
 
-\&{LABEL}	{	/* label reference */
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
+<*>\&{LABEL}	{	/* label reference */
 			DPRINT("Ref: %s\n", yytext+1);
-			yylval.labelref = strdup(yytext+1);
+			yylval.labelref = xstrdup(yytext+1);
 			return DT_REF;
 		}
 
-"&{/"{PATHCHAR}+\}	{	/* new-style path reference */
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
+<*>"&{/"{PATHCHAR}+\}	{	/* new-style path reference */
 			yytext[yyleng-1] = '\0';
 			DPRINT("Ref: %s\n", yytext+2);
-			yylval.labelref = strdup(yytext+2);
-			return DT_REF;
-		}
-
-<INITIAL>"&/"{PATHCHAR}+ {	/* old-style path reference */
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			DPRINT("Ref: %s\n", yytext+1);
-			yylval.labelref = strdup(yytext+1);
+			yylval.labelref = xstrdup(yytext+2);
 			return DT_REF;
 		}
 
 <BYTESTRING>[0-9a-fA-F]{2} {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			yylval.byte = strtol(yytext, NULL, 16);
 			DPRINT("Byte: %02x\n", (int)yylval.byte);
 			return DT_BYTE;
 		}
 
 <BYTESTRING>"]"	{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("/BYTESTRING\n");
 			BEGIN_DEFAULT();
 			return ']';
 		}
 
 <PROPNODENAME>{PROPNODECHAR}+ {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("PropNodeName: %s\n", yytext);
-			yylval.propnodename = strdup(yytext);
+			yylval.propnodename = xstrdup(yytext);
 			BEGIN_DEFAULT();
 			return DT_PROPNODENAME;
 		}
 
 "/incbin/"	{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Binary Include\n");
 			return DT_INCBIN;
 		}
@@ -203,8 +151,6 @@
 <*>{LINECOMMENT}+ /* eat C++-style comments */
 
 <*>.		{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Char: %c (\\x%02x)\n", yytext[0],
 				(unsigned)yytext[0]);
 			if (yytext[0] == '[') {
@@ -221,100 +167,25 @@
 
 %%
 
-
-/*
- * Stack of nested include file contexts.
- */
-
-struct incl_file {
-	struct dtc_file *file;
-	YY_BUFFER_STATE yy_prev_buf;
-	int yy_prev_lineno;
-	struct incl_file *prev;
-};
-
-static struct incl_file *incl_file_stack;
-
-
-/*
- * Detect infinite include recursion.
- */
-#define MAX_INCLUDE_DEPTH	(100)
-
-static int incl_depth = 0;
-
-
 static void push_input_file(const char *filename)
 {
-	struct incl_file *incl_file;
-	struct dtc_file *newfile;
-	struct search_path search, *searchptr = NULL;
-
 	assert(filename);
 
-	if (incl_depth++ >= MAX_INCLUDE_DEPTH)
-		die("Includes nested too deeply");
+	srcfile_push(filename);
 
-	if (srcpos_file) {
-		search.dir = srcpos_file->dir;
-		search.next = NULL;
-		search.prev = NULL;
-		searchptr = &search;
-	}
+	yyin = current_srcfile->f;
 
-	newfile = dtc_open_file(filename, searchptr);
-
-	incl_file = xmalloc(sizeof(struct incl_file));
-
-	/*
-	 * Save current context.
-	 */
-	incl_file->yy_prev_buf = YY_CURRENT_BUFFER;
-	incl_file->yy_prev_lineno = yylineno;
-	incl_file->file = srcpos_file;
-	incl_file->prev = incl_file_stack;
-
-	incl_file_stack = incl_file;
-
-	/*
-	 * Establish new context.
-	 */
-	srcpos_file = newfile;
-	yylineno = 1;
-	yyin = newfile->file;
-	yy_switch_to_buffer(yy_create_buffer(yyin, YY_BUF_SIZE));
+	yypush_buffer_state(yy_create_buffer(yyin, YY_BUF_SIZE));
 }
 
 
 static int pop_input_file(void)
 {
-	struct incl_file *incl_file;
-
-	if (incl_file_stack == 0)
+	if (srcfile_pop() == 0)
 		return 0;
 
-	dtc_close_file(srcpos_file);
-
-	/*
-	 * Pop.
-	 */
-	--incl_depth;
-	incl_file = incl_file_stack;
-	incl_file_stack = incl_file->prev;
-
-	/*
-	 * Recover old context.
-	 */
-	yy_delete_buffer(YY_CURRENT_BUFFER);
-	yy_switch_to_buffer(incl_file->yy_prev_buf);
-	yylineno = incl_file->yy_prev_lineno;
-	srcpos_file = incl_file->file;
-	yyin = incl_file->file ? incl_file->file->file : NULL;
-
-	/*
-	 * Free old state.
-	 */
-	free(incl_file);
+	yypop_buffer_state();
+	yyin = current_srcfile->f;
 
 	return 1;
 }
diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped
index e27cc63..50c4420 100644
--- a/scripts/dtc/dtc-lexer.lex.c_shipped
+++ b/scripts/dtc/dtc-lexer.lex.c_shipped
@@ -170,20 +170,7 @@
 #define EOB_ACT_END_OF_FILE 1
 #define EOB_ACT_LAST_MATCH 2
 
-    /* Note: We specifically omit the test for yy_rule_can_match_eol because it requires
-     *       access to the local variable yy_act. Since yyless() is a macro, it would break
-     *       existing scanners that call yyless() from OUTSIDE yylex. 
-     *       One obvious solution it to make yy_act a global. I tried that, and saw
-     *       a 5% performance hit in a non-yylineno scanner, because yy_act is
-     *       normally declared as a register variable-- so it is not worth it.
-     */
-    #define  YY_LESS_LINENO(n) \
-            do { \
-                int yyl;\
-                for ( yyl = n; yyl < yyleng; ++yyl )\
-                    if ( yytext[yyl] == '\n' )\
-                        --yylineno;\
-            }while(0)
+    #define YY_LESS_LINENO(n)
     
 /* Return all but the first "n" matched characters back to the input stream. */
 #define yyless(n) \
@@ -385,8 +372,8 @@
 	*yy_cp = '\0'; \
 	(yy_c_buf_p) = yy_cp;
 
-#define YY_NUM_RULES 20
-#define YY_END_OF_BUFFER 21
+#define YY_NUM_RULES 17
+#define YY_END_OF_BUFFER 18
 /* This struct is not used in this scanner,
    but its presence is necessary. */
 struct yy_trans_info
@@ -394,20 +381,19 @@
 	flex_int32_t yy_verify;
 	flex_int32_t yy_nxt;
 	};
-static yyconst flex_int16_t yy_accept[104] =
+static yyconst flex_int16_t yy_accept[94] =
     {   0,
         0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-       21,   19,   16,   16,   19,   19,   19,    7,    7,   19,
-        7,   19,   19,   19,   19,   13,   14,   14,   19,    8,
-        8,   16,    0,    2,    0,    0,    9,    0,    0,    0,
-        0,    0,    0,    7,    7,    5,    0,    6,    0,   12,
-       12,   14,   14,    8,    0,   11,    9,    0,    0,    0,
-        0,   18,    0,    0,    0,    0,    8,    0,   17,    0,
-        0,    0,    0,    0,   10,    0,    0,    0,    0,    0,
-        0,    0,    0,    0,    0,    0,    0,    0,    3,   15,
+       18,   16,   13,   13,   16,   16,   16,   16,   16,   16,
+       16,   10,   11,   11,    6,    6,   13,    0,    2,    0,
+        7,    0,    0,    0,    0,    0,    0,    0,    5,    0,
+        9,    9,   11,   11,    6,    0,    7,    0,    0,    0,
+        0,   15,    0,    0,    0,    0,    6,    0,   14,    0,
+        0,    0,    0,    0,    8,    0,    0,    0,    0,    0,
+        0,    0,    0,    0,    0,    0,    0,    0,    3,   12,
         0,    0,    0,    0,    0,    0,    0,    0,    1,    0,
-
         0,    4,    0
+
     } ;
 
 static yyconst flex_int32_t yy_ec[256] =
@@ -416,16 +402,16 @@
         2,    2,    2,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    2,    1,    4,    5,    1,    1,    6,    1,    1,
-        1,    7,    8,    8,    9,    8,   10,   11,   12,   13,
-       13,   13,   13,   13,   13,   13,   13,   14,    1,    1,
-        1,    1,    8,    8,   15,   15,   15,   15,   15,   15,
-       16,   16,   16,   16,   16,   16,   16,   16,   16,   16,
-       16,   16,   16,   16,   16,   16,   16,   17,   16,   16,
-        1,   18,   19,    1,   16,    1,   15,   20,   21,   22,
+        1,    7,    5,    5,    8,    5,    9,   10,   11,   12,
+       12,   12,   12,   12,   12,   12,   12,   13,    1,    1,
+        1,    1,    5,    5,   14,   14,   14,   14,   14,   14,
+       15,   15,   15,   15,   15,   15,   15,   15,   15,   15,
+       15,   15,   15,   15,   15,   15,   15,   16,   15,   15,
+        1,   17,   18,    1,   15,    1,   14,   19,   20,   21,
 
-       23,   15,   16,   24,   25,   16,   16,   26,   27,   28,
-       24,   16,   16,   29,   30,   31,   32,   33,   16,   17,
-       16,   16,   34,    1,   35,    1,    1,    1,    1,    1,
+       22,   14,   15,   15,   23,   15,   15,   24,   25,   26,
+       15,   15,   15,   27,   28,   29,   30,   31,   15,   16,
+       15,   15,   32,    1,   33,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
@@ -442,136 +428,114 @@
         1,    1,    1,    1,    1
     } ;
 
-static yyconst flex_int32_t yy_meta[36] =
+static yyconst flex_int32_t yy_meta[34] =
     {   0,
-        1,    1,    1,    1,    2,    1,    2,    2,    2,    3,
-        4,    4,    4,    5,    6,    7,    7,    1,    1,    6,
-        6,    6,    6,    7,    7,    7,    7,    7,    7,    7,
-        7,    7,    7,    8,    1
+        1,    1,    1,    1,    2,    1,    2,    2,    3,    4,
+        4,    4,    5,    6,    7,    7,    1,    1,    6,    6,
+        6,    6,    7,    7,    7,    7,    7,    7,    7,    7,
+        7,    8,    1
     } ;
 
-static yyconst flex_int16_t yy_base[117] =
+static yyconst flex_int16_t yy_base[106] =
     {   0,
-        0,    0,   30,    0,   44,    0,   67,    0,   97,  105,
-      302,  303,   35,   44,   40,   94,  112,    0,  129,  152,
-      296,  295,  159,    0,  176,  303,    0,  116,   95,  165,
-       49,   46,  102,  303,  296,    0,    0,  288,  290,  293,
-      264,  266,  270,    0,    0,  303,    0,  303,  264,  303,
-        0,    0,  195,  101,    0,    0,    0,    0,  284,  125,
-      277,  265,  225,  230,  216,  218,    0,  202,  224,  221,
-      217,  107,  196,  188,  303,  206,  179,  186,  178,  185,
-      183,  162,  161,  150,  169,  160,  145,  125,  303,  303,
-      137,  109,  190,  103,  203,  167,  108,  197,  303,  123,
+        0,    0,  237,  236,   25,    0,   47,    0,   30,   71,
+      244,  247,   82,   84,   84,  211,   95,  229,  218,    0,
+      111,  247,    0,   84,   83,   95,  106,   86,  247,  237,
+        0,  230,  231,  234,  207,  209,  212,  220,  247,  206,
+      247,  218,    0,  106,  116,    0,    0,    0,  223,   89,
+      226,  219,  199,  206,  200,  204,    0,  190,  213,  212,
+      202,   91,  178,  161,  247,  172,  144,  150,  140,  130,
+      140,  124,  128,  120,  138,  137,  123,  122,  247,  247,
+      134,  114,  132,   86,  135,  125,   90,  136,  247,   97,
+       29,  247,  247,  153,  156,  161,  165,  170,  176,  180,
 
-       29,  303,  303,  215,  221,  226,  229,  234,  240,  246,
-      250,  257,  265,  270,  275,  282
+      187,  195,  200,  205,  212
     } ;
 
-static yyconst flex_int16_t yy_def[117] =
+static yyconst flex_int16_t yy_def[106] =
     {   0,
-      103,    1,    1,    3,    3,    5,  103,    7,    3,    3,
-      103,  103,  103,  103,  104,  105,  103,  106,  103,   19,
-       19,   20,  103,  107,   20,  103,  108,  109,  105,  103,
-      103,  103,  104,  103,  104,  110,  111,  103,  112,  113,
-      103,  103,  103,  106,   19,  103,   20,  103,  103,  103,
-       20,  108,  109,  103,  114,  110,  111,  115,  112,  112,
-      113,  103,  103,  103,  103,  103,  114,  115,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  116,  103,  116,  103,  116,
+       93,    1,    1,    1,    1,    5,   93,    7,    1,    1,
+       93,   93,   93,   93,   94,   95,   93,   96,   17,   97,
+       96,   93,   98,   99,   93,   93,   93,   94,   93,   94,
+      100,   93,  101,  102,   93,   93,   93,   96,   93,   93,
+       93,   96,   98,   99,   93,  103,  100,  104,  101,  101,
+      102,   93,   93,   93,   93,   93,  103,  104,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
+       93,   93,   93,   93,   93,  105,   93,  105,   93,  105,
+       93,   93,    0,   93,   93,   93,   93,   93,   93,   93,
 
-      103,  103,    0,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103
+       93,   93,   93,   93,   93
     } ;
 
-static yyconst flex_int16_t yy_nxt[339] =
+static yyconst flex_int16_t yy_nxt[281] =
     {   0,
-       12,   13,   14,   15,   12,   16,   12,   12,   12,   17,
-       18,   18,   18,   12,   19,   20,   20,   12,   12,   21,
-       19,   21,   19,   22,   20,   20,   20,   20,   20,   20,
-       20,   20,   20,   12,   12,   12,   32,   32,  102,   23,
-       12,   12,   12,   34,   20,   32,   32,   32,   32,   20,
-       20,   20,   20,   20,   24,   24,   24,   35,   25,   54,
-       54,   54,   26,   25,   25,   25,   25,   12,   13,   14,
-       15,   27,   12,   27,   27,   27,   23,   27,   27,   27,
-       12,   28,   28,   28,   12,   12,   28,   28,   28,   28,
-       28,   28,   28,   28,   28,   28,   28,   28,   28,   28,
+       12,   13,   14,   15,   12,   16,   12,   12,   17,   12,
+       12,   12,   12,   18,   18,   18,   12,   12,   18,   18,
+       18,   18,   18,   18,   18,   18,   18,   18,   18,   18,
+       18,   12,   12,   19,   20,   20,   20,   92,   21,   25,
+       26,   26,   22,   21,   21,   21,   21,   12,   13,   14,
+       15,   23,   16,   23,   23,   19,   23,   23,   23,   12,
+       24,   24,   24,   12,   12,   24,   24,   24,   24,   24,
+       24,   24,   24,   24,   24,   24,   24,   24,   12,   12,
+       25,   26,   26,   27,   27,   27,   27,   29,   43,   29,
+       43,   43,   45,   45,   45,   50,   39,   59,   46,   93,
 
-       12,   12,   29,   36,  103,   34,   17,   30,   31,   31,
-       29,   54,   54,   54,   17,   30,   31,   31,   39,   35,
-       52,   40,   52,   52,   52,  103,   78,   38,   38,   46,
-      101,   60,   79,   41,   69,   97,   42,   94,   43,   45,
-       45,   45,   46,   45,   47,   47,   93,   92,   45,   45,
-       45,   45,   47,   47,   47,   47,   47,   47,   47,   47,
-       47,   47,   47,   47,   47,   39,   47,   91,   40,   90,
-       99,   47,   47,   47,   47,   54,   54,   54,   89,   88,
-       41,   55,   87,   49,  100,   43,   51,   51,   51,   86,
-       51,   95,   95,   96,   85,   51,   51,   51,   51,   52,
+       30,   33,   30,   34,   45,   45,   45,   27,   27,   68,
+       43,   91,   43,   43,   69,   35,   87,   36,   39,   37,
+       42,   42,   42,   39,   42,   45,   45,   45,   89,   42,
+       42,   42,   42,   85,   85,   86,   85,   85,   86,   89,
+       84,   90,   83,   82,   81,   80,   79,   78,   77,   76,
+       75,   74,   90,   28,   28,   28,   28,   28,   28,   28,
+       28,   31,   31,   31,   38,   38,   38,   38,   41,   73,
+       41,   43,   72,   43,   71,   43,   43,   44,   33,   44,
+       44,   44,   44,   47,   69,   47,   47,   49,   49,   49,
+       49,   49,   49,   49,   49,   51,   51,   51,   51,   51,
 
-       99,   52,   52,   52,   95,   95,   96,   84,   46,   83,
-       82,   81,   39,   79,  100,   33,   33,   33,   33,   33,
-       33,   33,   33,   37,   80,   77,   37,   37,   37,   44,
-       40,   44,   50,   76,   50,   52,   75,   52,   74,   52,
-       52,   53,   73,   53,   53,   53,   53,   56,   56,   56,
-       72,   56,   56,   57,   71,   57,   57,   59,   59,   59,
-       59,   59,   59,   59,   59,   61,   61,   61,   61,   61,
-       61,   61,   61,   67,   70,   67,   68,   68,   68,   62,
-       68,   68,   98,   98,   98,   98,   98,   98,   98,   98,
-       60,   66,   65,   64,   63,   62,   60,   58,  103,   48,
-
-       48,  103,   11,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103
+       51,   51,   51,   57,   70,   57,   58,   58,   58,   67,
+       58,   58,   88,   88,   88,   88,   88,   88,   88,   88,
+       34,   66,   65,   64,   63,   62,   61,   60,   52,   50,
+       39,   56,   39,   55,   54,   53,   52,   50,   48,   93,
+       40,   39,   32,   93,   19,   19,   11,   93,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93
     } ;
 
-static yyconst flex_int16_t yy_chk[339] =
+static yyconst flex_int16_t yy_chk[281] =
     {   0,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    1,    1,    1,    1,    3,   13,   13,  101,    3,
-        3,    3,    3,   15,    3,   14,   14,   32,   32,    3,
-        3,    3,    3,    3,    5,    5,    5,   15,    5,   31,
-       31,   31,    5,    5,    5,    5,    5,    7,    7,    7,
+        1,    1,    1,    5,    5,    5,    5,   91,    5,    9,
+        9,    9,    5,    5,    5,    5,    5,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
+       10,   10,   10,   13,   13,   14,   14,   15,   24,   28,
+       24,   24,   25,   25,   25,   50,   24,   50,   25,   90,
 
-        7,    7,    9,   16,   29,   33,    9,    9,    9,    9,
-       10,   54,   54,   54,   10,   10,   10,   10,   17,   33,
-       28,   17,   28,   28,   28,  100,   72,   16,   29,   28,
-       97,   60,   72,   17,   60,   94,   17,   92,   17,   19,
-       19,   19,   19,   19,   19,   19,   91,   88,   19,   19,
-       19,   19,   19,   19,   19,   19,   19,   19,   19,   19,
-       19,   19,   20,   20,   20,   23,   20,   87,   23,   86,
-       96,   20,   20,   20,   20,   30,   30,   30,   85,   84,
-       23,   30,   83,   23,   96,   23,   25,   25,   25,   82,
-       25,   93,   93,   93,   81,   25,   25,   25,   25,   53,
+       15,   17,   28,   17,   26,   26,   26,   27,   27,   62,
+       44,   87,   44,   44,   62,   17,   84,   17,   44,   17,
+       21,   21,   21,   21,   21,   45,   45,   45,   86,   21,
+       21,   21,   21,   83,   83,   83,   85,   85,   85,   88,
+       82,   86,   81,   78,   77,   76,   75,   74,   73,   72,
+       71,   70,   88,   94,   94,   94,   94,   94,   94,   94,
+       94,   95,   95,   95,   96,   96,   96,   96,   97,   69,
+       97,   98,   68,   98,   67,   98,   98,   99,   66,   99,
+       99,   99,   99,  100,   64,  100,  100,  101,  101,  101,
+      101,  101,  101,  101,  101,  102,  102,  102,  102,  102,
 
-       98,   53,   53,   53,   95,   95,   95,   80,   53,   79,
-       78,   77,   76,   74,   98,  104,  104,  104,  104,  104,
-      104,  104,  104,  105,   73,   71,  105,  105,  105,  106,
-       70,  106,  107,   69,  107,  108,   68,  108,   66,  108,
-      108,  109,   65,  109,  109,  109,  109,  110,  110,  110,
-       64,  110,  110,  111,   63,  111,  111,  112,  112,  112,
-      112,  112,  112,  112,  112,  113,  113,  113,  113,  113,
-      113,  113,  113,  114,   62,  114,  115,  115,  115,   61,
-      115,  115,  116,  116,  116,  116,  116,  116,  116,  116,
-       59,   49,   43,   42,   41,   40,   39,   38,   35,   22,
-
-       21,   11,  103,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103,  103,  103,
-      103,  103,  103,  103,  103,  103,  103,  103
+      102,  102,  102,  103,   63,  103,  104,  104,  104,   61,
+      104,  104,  105,  105,  105,  105,  105,  105,  105,  105,
+       60,   59,   58,   56,   55,   54,   53,   52,   51,   49,
+       42,   40,   38,   37,   36,   35,   34,   33,   32,   30,
+       19,   18,   16,   11,    4,    3,   93,   93,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93,
+       93,   93,   93,   93,   93,   93,   93,   93,   93,   93
     } ;
 
-/* Table of booleans, true if rule could match eol. */
-static yyconst flex_int32_t yy_rule_can_match_eol[21] =
-    {   0,
-1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 
-    0,     };
-
 static yy_state_type yy_last_accepting_state;
 static char *yy_last_accepting_cpos;
 
@@ -616,6 +580,13 @@
 #include "srcpos.h"
 #include "dtc-parser.tab.h"
 
+YYLTYPE yylloc;
+
+/* CAUTION: this will stop working if we ever use yyless() or yyunput() */
+#define	YY_USER_ACTION \
+	{ \
+		srcpos_update(&yylloc, yytext, yyleng); \
+	}
 
 /*#define LEXDEBUG	1*/
 
@@ -625,19 +596,14 @@
 #define DPRINT(fmt, ...)	do { } while (0)
 #endif
 
-static int dts_version; /* = 0 */
+static int dts_version = 1;
 
-#define BEGIN_DEFAULT()	if (dts_version == 0) { \
-				DPRINT("<INITIAL>\n"); \
-				BEGIN(INITIAL); \
-			} else { \
-				DPRINT("<V1>\n"); \
+#define BEGIN_DEFAULT()		DPRINT("<V1>\n"); \
 				BEGIN(V1); \
-			}
 
 static void push_input_file(const char *filename);
 static int pop_input_file(void);
-#line 641 "dtc-lexer.lex.c"
+#line 607 "dtc-lexer.lex.c"
 
 #define INITIAL 0
 #define INCLUDE 1
@@ -826,9 +792,9 @@
 	register char *yy_cp, *yy_bp;
 	register int yy_act;
     
-#line 64 "dtc-lexer.l"
+#line 66 "dtc-lexer.l"
 
-#line 832 "dtc-lexer.lex.c"
+#line 798 "dtc-lexer.lex.c"
 
 	if ( !(yy_init) )
 		{
@@ -881,35 +847,21 @@
 			while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
 				{
 				yy_current_state = (int) yy_def[yy_current_state];
-				if ( yy_current_state >= 104 )
+				if ( yy_current_state >= 94 )
 					yy_c = yy_meta[(unsigned int) yy_c];
 				}
 			yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
 			++yy_cp;
 			}
-		while ( yy_base[yy_current_state] != 303 );
+		while ( yy_current_state != 93 );
+		yy_cp = (yy_last_accepting_cpos);
+		yy_current_state = (yy_last_accepting_state);
 
 yy_find_action:
 		yy_act = yy_accept[yy_current_state];
-		if ( yy_act == 0 )
-			{ /* have to back up */
-			yy_cp = (yy_last_accepting_cpos);
-			yy_current_state = (yy_last_accepting_state);
-			yy_act = yy_accept[yy_current_state];
-			}
 
 		YY_DO_BEFORE_ACTION;
 
-		if ( yy_act != YY_END_OF_BUFFER && yy_rule_can_match_eol[yy_act] )
-			{
-			int yyl;
-			for ( yyl = 0; yyl < yyleng; ++yyl )
-				if ( yytext[yyl] == '\n' )
-					   
-    yylineno++;
-;
-			}
-
 do_action:	/* This label is used only to access EOF actions. */
 
 		switch ( yy_act )
@@ -924,7 +876,7 @@
 case 1:
 /* rule 1 can match eol */
 YY_RULE_SETUP
-#line 65 "dtc-lexer.l"
+#line 67 "dtc-lexer.l"
 {
 			char *name = strchr(yytext, '\"') + 1;
 			yytext[yyleng-1] = '\0';
@@ -936,7 +888,7 @@
 case YY_STATE_EOF(BYTESTRING):
 case YY_STATE_EOF(PROPNODENAME):
 case YY_STATE_EOF(V1):
-#line 71 "dtc-lexer.l"
+#line 73 "dtc-lexer.l"
 {
 			if (!pop_input_file()) {
 				yyterminate();
@@ -946,23 +898,18 @@
 case 2:
 /* rule 2 can match eol */
 YY_RULE_SETUP
-#line 77 "dtc-lexer.l"
+#line 79 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("String: %s\n", yytext);
 			yylval.data = data_copy_escape_string(yytext+1,
 					yyleng-2);
-			yylloc.first_line = yylineno;
 			return DT_STRING;
 		}
 	YY_BREAK
 case 3:
 YY_RULE_SETUP
-#line 87 "dtc-lexer.l"
+#line 86 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Keyword: /dts-v1/\n");
 			dts_version = 1;
 			BEGIN_DEFAULT();
@@ -971,10 +918,8 @@
 	YY_BREAK
 case 4:
 YY_RULE_SETUP
-#line 96 "dtc-lexer.l"
+#line 93 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Keyword: /memreserve/\n");
 			BEGIN_DEFAULT();
 			return DT_MEMRESERVE;
@@ -982,158 +927,100 @@
 	YY_BREAK
 case 5:
 YY_RULE_SETUP
-#line 104 "dtc-lexer.l"
+#line 99 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Label: %s\n", yytext);
-			yylval.labelref = strdup(yytext);
+			yylval.labelref = xstrdup(yytext);
 			yylval.labelref[yyleng-1] = '\0';
 			return DT_LABEL;
 		}
 	YY_BREAK
 case 6:
 YY_RULE_SETUP
-#line 113 "dtc-lexer.l"
+#line 106 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			if (*yytext == 'b')
-				yylval.cbase = 2;
-			else if (*yytext == 'o')
-				yylval.cbase = 8;
-			else if (*yytext == 'd')
-				yylval.cbase = 10;
-			else
-				yylval.cbase = 16;
-			DPRINT("Base: %d\n", yylval.cbase);
-			return DT_BASE;
-		}
-	YY_BREAK
-case 7:
-YY_RULE_SETUP
-#line 128 "dtc-lexer.l"
-{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			yylval.literal = strdup(yytext);
-			DPRINT("Literal: '%s'\n", yylval.literal);
-			return DT_LEGACYLITERAL;
-		}
-	YY_BREAK
-case 8:
-YY_RULE_SETUP
-#line 136 "dtc-lexer.l"
-{
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			yylval.literal = strdup(yytext);
+			yylval.literal = xstrdup(yytext);
 			DPRINT("Literal: '%s'\n", yylval.literal);
 			return DT_LITERAL;
 		}
 	YY_BREAK
-case 9:
+case 7:
 YY_RULE_SETUP
-#line 144 "dtc-lexer.l"
+#line 112 "dtc-lexer.l"
 {	/* label reference */
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Ref: %s\n", yytext+1);
-			yylval.labelref = strdup(yytext+1);
+			yylval.labelref = xstrdup(yytext+1);
 			return DT_REF;
 		}
 	YY_BREAK
-case 10:
+case 8:
 YY_RULE_SETUP
-#line 152 "dtc-lexer.l"
+#line 118 "dtc-lexer.l"
 {	/* new-style path reference */
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			yytext[yyleng-1] = '\0';
 			DPRINT("Ref: %s\n", yytext+2);
-			yylval.labelref = strdup(yytext+2);
+			yylval.labelref = xstrdup(yytext+2);
 			return DT_REF;
 		}
 	YY_BREAK
-case 11:
+case 9:
 YY_RULE_SETUP
-#line 161 "dtc-lexer.l"
-{	/* old-style path reference */
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
-			DPRINT("Ref: %s\n", yytext+1);
-			yylval.labelref = strdup(yytext+1);
-			return DT_REF;
-		}
-	YY_BREAK
-case 12:
-YY_RULE_SETUP
-#line 169 "dtc-lexer.l"
+#line 125 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			yylval.byte = strtol(yytext, NULL, 16);
 			DPRINT("Byte: %02x\n", (int)yylval.byte);
 			return DT_BYTE;
 		}
 	YY_BREAK
-case 13:
+case 10:
 YY_RULE_SETUP
-#line 177 "dtc-lexer.l"
+#line 131 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("/BYTESTRING\n");
 			BEGIN_DEFAULT();
 			return ']';
 		}
 	YY_BREAK
-case 14:
+case 11:
 YY_RULE_SETUP
-#line 185 "dtc-lexer.l"
+#line 137 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("PropNodeName: %s\n", yytext);
-			yylval.propnodename = strdup(yytext);
+			yylval.propnodename = xstrdup(yytext);
 			BEGIN_DEFAULT();
 			return DT_PROPNODENAME;
 		}
 	YY_BREAK
-case 15:
+case 12:
 YY_RULE_SETUP
-#line 194 "dtc-lexer.l"
+#line 144 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Binary Include\n");
 			return DT_INCBIN;
 		}
 	YY_BREAK
-case 16:
-/* rule 16 can match eol */
+case 13:
+/* rule 13 can match eol */
 YY_RULE_SETUP
-#line 201 "dtc-lexer.l"
+#line 149 "dtc-lexer.l"
 /* eat whitespace */
 	YY_BREAK
-case 17:
-/* rule 17 can match eol */
+case 14:
+/* rule 14 can match eol */
 YY_RULE_SETUP
-#line 202 "dtc-lexer.l"
+#line 150 "dtc-lexer.l"
 /* eat C-style comments */
 	YY_BREAK
-case 18:
-/* rule 18 can match eol */
+case 15:
+/* rule 15 can match eol */
 YY_RULE_SETUP
-#line 203 "dtc-lexer.l"
+#line 151 "dtc-lexer.l"
 /* eat C++-style comments */
 	YY_BREAK
-case 19:
+case 16:
 YY_RULE_SETUP
-#line 205 "dtc-lexer.l"
+#line 153 "dtc-lexer.l"
 {
-			yylloc.file = srcpos_file;
-			yylloc.first_line = yylineno;
 			DPRINT("Char: %c (\\x%02x)\n", yytext[0],
 				(unsigned)yytext[0]);
 			if (yytext[0] == '[') {
@@ -1148,12 +1035,12 @@
 			return yytext[0];
 		}
 	YY_BREAK
-case 20:
+case 17:
 YY_RULE_SETUP
-#line 222 "dtc-lexer.l"
+#line 168 "dtc-lexer.l"
 ECHO;
 	YY_BREAK
-#line 1157 "dtc-lexer.lex.c"
+#line 1044 "dtc-lexer.lex.c"
 
 	case YY_END_OF_BUFFER:
 		{
@@ -1218,7 +1105,8 @@
 
 			else
 				{
-				yy_cp = (yy_c_buf_p);
+				yy_cp = (yy_last_accepting_cpos);
+				yy_current_state = (yy_last_accepting_state);
 				goto yy_find_action;
 				}
 			}
@@ -1443,7 +1331,7 @@
 		while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
 			{
 			yy_current_state = (int) yy_def[yy_current_state];
-			if ( yy_current_state >= 104 )
+			if ( yy_current_state >= 94 )
 				yy_c = yy_meta[(unsigned int) yy_c];
 			}
 		yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
@@ -1471,11 +1359,11 @@
 	while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
 		{
 		yy_current_state = (int) yy_def[yy_current_state];
-		if ( yy_current_state >= 104 )
+		if ( yy_current_state >= 94 )
 			yy_c = yy_meta[(unsigned int) yy_c];
 		}
 	yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
-	yy_is_jam = (yy_current_state == 103);
+	yy_is_jam = (yy_current_state == 93);
 
 	return yy_is_jam ? 0 : yy_current_state;
 }
@@ -1550,11 +1438,6 @@
 	*(yy_c_buf_p) = '\0';	/* preserve yytext */
 	(yy_hold_char) = *++(yy_c_buf_p);
 
-	if ( c == '\n' )
-		   
-    yylineno++;
-;
-
 	return c;
 }
 #endif	/* ifndef YY_NO_INPUT */
@@ -1669,10 +1552,6 @@
 	yyfree((void *) b  );
 }
 
-#ifndef __cplusplus
-extern int isatty (int );
-#endif /* __cplusplus */
-    
 /* Initializes or reinitializes a buffer.
  * This function is sometimes called more than once on the same buffer,
  * such as during a yyrestart() or at EOF.
@@ -1696,7 +1575,7 @@
         b->yy_bs_column = 0;
     }
 
-        b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0;
+        b->yy_is_interactive = 0;
     
 	errno = oerrno;
 }
@@ -2025,9 +1904,6 @@
      * This function is called from yylex_destroy(), so don't allocate here.
      */
 
-    /* We do not touch yylineno unless the option is enabled. */
-    yylineno =  1;
-    
     (yy_buffer_stack) = 0;
     (yy_buffer_stack_top) = 0;
     (yy_buffer_stack_max) = 0;
@@ -2120,104 +1996,29 @@
 
 #define YYTABLES_NAME "yytables"
 
-#line 222 "dtc-lexer.l"
+#line 168 "dtc-lexer.l"
 
 
 
-
-/*
- * Stack of nested include file contexts.
- */
-
-struct incl_file {
-	struct dtc_file *file;
-	YY_BUFFER_STATE yy_prev_buf;
-	int yy_prev_lineno;
-	struct incl_file *prev;
-};
-
-static struct incl_file *incl_file_stack;
-
-
-/*
- * Detect infinite include recursion.
- */
-#define MAX_INCLUDE_DEPTH	(100)
-
-static int incl_depth = 0;
-
-
 static void push_input_file(const char *filename)
 {
-	struct incl_file *incl_file;
-	struct dtc_file *newfile;
-	struct search_path search, *searchptr = NULL;
-
 	assert(filename);
 
-	if (incl_depth++ >= MAX_INCLUDE_DEPTH)
-		die("Includes nested too deeply");
+	srcfile_push(filename);
 
-	if (srcpos_file) {
-		search.dir = srcpos_file->dir;
-		search.next = NULL;
-		search.prev = NULL;
-		searchptr = &search;
-	}
+	yyin = current_srcfile->f;
 
-	newfile = dtc_open_file(filename, searchptr);
-
-	incl_file = xmalloc(sizeof(struct incl_file));
-
-	/*
-	 * Save current context.
-	 */
-	incl_file->yy_prev_buf = YY_CURRENT_BUFFER;
-	incl_file->yy_prev_lineno = yylineno;
-	incl_file->file = srcpos_file;
-	incl_file->prev = incl_file_stack;
-
-	incl_file_stack = incl_file;
-
-	/*
-	 * Establish new context.
-	 */
-	srcpos_file = newfile;
-	yylineno = 1;
-	yyin = newfile->file;
-	yy_switch_to_buffer(yy_create_buffer(yyin,YY_BUF_SIZE));
+	yypush_buffer_state(yy_create_buffer(yyin,YY_BUF_SIZE));
 }
 
 
 static int pop_input_file(void)
 {
-	struct incl_file *incl_file;
-
-	if (incl_file_stack == 0)
+	if (srcfile_pop() == 0)
 		return 0;
 
-	dtc_close_file(srcpos_file);
-
-	/*
-	 * Pop.
-	 */
-	--incl_depth;
-	incl_file = incl_file_stack;
-	incl_file_stack = incl_file->prev;
-
-	/*
-	 * Recover old context.
-	 */
-	yy_delete_buffer(YY_CURRENT_BUFFER);
-	yy_switch_to_buffer(incl_file->yy_prev_buf);
-	yylineno = incl_file->yy_prev_lineno;
-	srcpos_file = incl_file->file;
-	yyin = incl_file->file ? incl_file->file->file : NULL;
-
-	/*
-	 * Free old state.
-	 */
-	free(incl_file);
+	yypop_buffer_state();
+	yyin = current_srcfile->f;
 
 	return 1;
 }
diff --git a/scripts/dtc/dtc-parser.tab.c_shipped b/scripts/dtc/dtc-parser.tab.c_shipped
index 2712937..9be2eea 100644
--- a/scripts/dtc/dtc-parser.tab.c_shipped
+++ b/scripts/dtc/dtc-parser.tab.c_shipped
@@ -1,24 +1,23 @@
-/* A Bison parser, made by GNU Bison 2.3.  */
+
+/* A Bison parser, made by GNU Bison 2.4.1.  */
 
 /* Skeleton implementation for Bison's Yacc-like parsers in C
-
-   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+   
+      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
    Free Software Foundation, Inc.
-
-   This program is free software; you can redistribute it and/or modify
+   
+   This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2, or (at your option)
-   any later version.
-
+   the Free Software Foundation, either version 3 of the License, or
+   (at your option) any later version.
+   
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-
+   
    You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 51 Franklin Street, Fifth Floor,
-   Boston, MA 02110-1301, USA.  */
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
 
 /* As a special exception, you may create a larger work that contains
    part or all of the Bison parser skeleton and distribute that work
@@ -29,7 +28,7 @@
    special exception, which will cause the skeleton and the resulting
    Bison output files to be licensed under the GNU General Public
    License without this special exception.
-
+   
    This special exception was added by the Free Software Foundation in
    version 2.2 of Bison.  */
 
@@ -47,7 +46,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.3"
+#define YYBISON_VERSION "2.4.1"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
@@ -55,55 +54,32 @@
 /* Pure parsers.  */
 #define YYPURE 0
 
+/* Push parsers.  */
+#define YYPUSH 0
+
+/* Pull parsers.  */
+#define YYPULL 1
+
 /* Using locations.  */
-#define YYLSP_NEEDED 1
-
-
-
-/* Tokens.  */
-#ifndef YYTOKENTYPE
-# define YYTOKENTYPE
-   /* Put the tokens into the symbol table, so that GDB and other debuggers
-      know about them.  */
-   enum yytokentype {
-     DT_V1 = 258,
-     DT_MEMRESERVE = 259,
-     DT_PROPNODENAME = 260,
-     DT_LITERAL = 261,
-     DT_LEGACYLITERAL = 262,
-     DT_BASE = 263,
-     DT_BYTE = 264,
-     DT_STRING = 265,
-     DT_LABEL = 266,
-     DT_REF = 267,
-     DT_INCBIN = 268
-   };
-#endif
-/* Tokens.  */
-#define DT_V1 258
-#define DT_MEMRESERVE 259
-#define DT_PROPNODENAME 260
-#define DT_LITERAL 261
-#define DT_LEGACYLITERAL 262
-#define DT_BASE 263
-#define DT_BYTE 264
-#define DT_STRING 265
-#define DT_LABEL 266
-#define DT_REF 267
-#define DT_INCBIN 268
-
+#define YYLSP_NEEDED 0
 
 
 
 /* Copy the first part of user declarations.  */
-#line 23 "dtc-parser.y"
+
+/* Line 189 of yacc.c  */
+#line 21 "dtc-parser.y"
 
 #include <stdio.h>
 
 #include "dtc.h"
 #include "srcpos.h"
 
+YYLTYPE yylloc;
+
 extern int yylex(void);
+extern void print_error(char const *fmt, ...);
+extern void yyerror(char const *s);
 
 extern struct boot_info *the_boot_info;
 extern int treesource_error;
@@ -111,6 +87,9 @@
 static unsigned long long eval_literal(const char *s, int base, int bits);
 
 
+/* Line 189 of yacc.c  */
+#line 92 "dtc-parser.tab.c"
+
 /* Enabling traces.  */
 #ifndef YYDEBUG
 # define YYDEBUG 0
@@ -129,10 +108,35 @@
 # define YYTOKEN_TABLE 0
 #endif
 
+
+/* Tokens.  */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+   /* Put the tokens into the symbol table, so that GDB and other debuggers
+      know about them.  */
+   enum yytokentype {
+     DT_V1 = 258,
+     DT_MEMRESERVE = 259,
+     DT_PROPNODENAME = 260,
+     DT_LITERAL = 261,
+     DT_BASE = 262,
+     DT_BYTE = 263,
+     DT_STRING = 264,
+     DT_LABEL = 265,
+     DT_REF = 266,
+     DT_INCBIN = 267
+   };
+#endif
+
+
+
 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
 typedef union YYSTYPE
-#line 37 "dtc-parser.y"
 {
+
+/* Line 214 of yacc.c  */
+#line 39 "dtc-parser.y"
+
 	char *propnodename;
 	char *literal;
 	char *labelref;
@@ -147,34 +151,23 @@
 	struct node *node;
 	struct node *nodelist;
 	struct reserve_info *re;
-}
-/* Line 187 of yacc.c.  */
-#line 153 "dtc-parser.tab.c"
-	YYSTYPE;
+
+
+
+/* Line 214 of yacc.c  */
+#line 159 "dtc-parser.tab.c"
+} YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
 # define YYSTYPE_IS_DECLARED 1
-# define YYSTYPE_IS_TRIVIAL 1
-#endif
-
-#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
-typedef struct YYLTYPE
-{
-  int first_line;
-  int first_column;
-  int last_line;
-  int last_column;
-} YYLTYPE;
-# define yyltype YYLTYPE /* obsolescent; will be withdrawn */
-# define YYLTYPE_IS_DECLARED 1
-# define YYLTYPE_IS_TRIVIAL 1
 #endif
 
 
 /* Copy the second part of user declarations.  */
 
 
-/* Line 216 of yacc.c.  */
-#line 178 "dtc-parser.tab.c"
+/* Line 264 of yacc.c  */
+#line 171 "dtc-parser.tab.c"
 
 #ifdef short
 # undef short
@@ -249,14 +242,14 @@
 #if (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 static int
-YYID (int i)
+YYID (int yyi)
 #else
 static int
-YYID (i)
-    int i;
+YYID (yyi)
+    int yyi;
 #endif
 {
-  return i;
+  return yyi;
 }
 #endif
 
@@ -332,15 +325,13 @@
 
 #if (! defined yyoverflow \
      && (! defined __cplusplus \
-	 || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \
-	     && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+	 || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
 
 /* A type that is properly aligned for any stack member.  */
 union yyalloc
 {
-  yytype_int16 yyss;
-  YYSTYPE yyvs;
-    YYLTYPE yyls;
+  yytype_int16 yyss_alloc;
+  YYSTYPE yyvs_alloc;
 };
 
 /* The size of the maximum gap between one aligned stack and the next.  */
@@ -349,8 +340,8 @@
 /* The size of an array large to enough to hold all stacks, each with
    N elements.  */
 # define YYSTACK_BYTES(N) \
-     ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \
-      + 2 * YYSTACK_GAP_MAXIMUM)
+     ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
+      + YYSTACK_GAP_MAXIMUM)
 
 /* Copy COUNT objects from FROM to TO.  The source and destination do
    not overlap.  */
@@ -375,12 +366,12 @@
    elements in the stack, and YYPTR gives the new location of the
    stack.  Advance YYPTR to a properly aligned location for the next
    stack.  */
-# define YYSTACK_RELOCATE(Stack)					\
+# define YYSTACK_RELOCATE(Stack_alloc, Stack)				\
     do									\
       {									\
 	YYSIZE_T yynewbytes;						\
-	YYCOPY (&yyptr->Stack, Stack, yysize);				\
-	Stack = &yyptr->Stack;						\
+	YYCOPY (&yyptr->Stack_alloc, Stack, yysize);			\
+	Stack = &yyptr->Stack_alloc;					\
 	yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
 	yyptr += yynewbytes / sizeof (*yyptr);				\
       }									\
@@ -389,22 +380,22 @@
 #endif
 
 /* YYFINAL -- State number of the termination state.  */
-#define YYFINAL  9
+#define YYFINAL  4
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   73
+#define YYLAST   56
 
 /* YYNTOKENS -- Number of terminals.  */
-#define YYNTOKENS  27
+#define YYNTOKENS  25
 /* YYNNTS -- Number of nonterminals.  */
-#define YYNNTS  20
+#define YYNNTS  16
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  45
+#define YYNRULES  39
 /* YYNRULES -- Number of states.  */
-#define YYNSTATES  76
+#define YYNSTATES  67
 
 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
 #define YYUNDEFTOK  2
-#define YYMAXUTOK   268
+#define YYMAXUTOK   267
 
 #define YYTRANSLATE(YYX)						\
   ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
@@ -416,15 +407,15 @@
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-      24,    26,     2,     2,    25,    15,     2,    16,     2,     2,
-       2,     2,     2,     2,     2,     2,     2,     2,     2,    14,
-      20,    19,    21,     2,     2,     2,     2,     2,     2,     2,
+      22,    24,     2,     2,    23,     2,     2,    14,     2,     2,
+       2,     2,     2,     2,     2,     2,     2,     2,     2,    13,
+      18,    17,    19,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,    22,     2,    23,     2,     2,     2,     2,     2,     2,
+       2,    20,     2,    21,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,     2,     2,    17,     2,    18,     2,     2,     2,     2,
+       2,     2,     2,    15,     2,    16,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
@@ -438,7 +429,7 @@
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     1,     2,     3,     4,
-       5,     6,     7,     8,     9,    10,    11,    12,    13
+       5,     6,     7,     8,     9,    10,    11,    12
 };
 
 #if YYDEBUG
@@ -446,41 +437,37 @@
    YYRHS.  */
 static const yytype_uint8 yyprhs[] =
 {
-       0,     0,     3,     8,    11,    12,    15,    21,    22,    25,
-      27,    34,    36,    38,    41,    47,    48,    51,    57,    61,
-      64,    69,    74,    77,    87,    93,    96,    97,   100,   103,
-     104,   107,   110,   113,   114,   116,   118,   121,   122,   125,
-     128,   129,   132,   135,   139,   140
+       0,     0,     3,     8,     9,    12,    17,    20,    22,    25,
+      29,    33,    39,    40,    43,    48,    51,    54,    57,    62,
+      67,    70,    80,    86,    89,    90,    93,    96,    97,   100,
+     103,   106,   108,   109,   112,   115,   116,   119,   122,   125
 };
 
 /* YYRHS -- A `-1'-separated list of the rules' RHS.  */
 static const yytype_int8 yyrhs[] =
 {
-      28,     0,    -1,     3,    14,    29,    34,    -1,    31,    34,
-      -1,    -1,    30,    29,    -1,    46,     4,    33,    33,    14,
-      -1,    -1,    32,    31,    -1,    30,    -1,    46,     4,    33,
-      15,    33,    14,    -1,     6,    -1,     7,    -1,    16,    35,
-      -1,    17,    36,    44,    18,    14,    -1,    -1,    36,    37,
-      -1,    46,     5,    19,    38,    14,    -1,    46,     5,    14,
-      -1,    39,    10,    -1,    39,    20,    40,    21,    -1,    39,
-      22,    43,    23,    -1,    39,    12,    -1,    39,    13,    24,
-      10,    25,    33,    25,    33,    26,    -1,    39,    13,    24,
-      10,    26,    -1,    38,    11,    -1,    -1,    38,    25,    -1,
-      39,    11,    -1,    -1,    40,    42,    -1,    40,    12,    -1,
-      40,    11,    -1,    -1,     8,    -1,     6,    -1,    41,     7,
-      -1,    -1,    43,     9,    -1,    43,    11,    -1,    -1,    45,
-      44,    -1,    45,    37,    -1,    46,     5,    35,    -1,    -1,
-      11,    -1
+      26,     0,    -1,     3,    13,    27,    30,    -1,    -1,    28,
+      27,    -1,     4,    29,    29,    13,    -1,    10,    28,    -1,
+       6,    -1,    14,    31,    -1,    30,    14,    31,    -1,    30,
+      11,    31,    -1,    15,    32,    39,    16,    13,    -1,    -1,
+      32,    33,    -1,     5,    17,    34,    13,    -1,     5,    13,
+      -1,    10,    33,    -1,    35,     9,    -1,    35,    18,    36,
+      19,    -1,    35,    20,    38,    21,    -1,    35,    11,    -1,
+      35,    12,    22,     9,    23,    29,    23,    29,    24,    -1,
+      35,    12,    22,     9,    24,    -1,    34,    10,    -1,    -1,
+      34,    23,    -1,    35,    10,    -1,    -1,    36,    37,    -1,
+      36,    11,    -1,    36,    10,    -1,     6,    -1,    -1,    38,
+       8,    -1,    38,    10,    -1,    -1,    40,    39,    -1,    40,
+      33,    -1,     5,    31,    -1,    10,    40,    -1
 };
 
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
 static const yytype_uint16 yyrline[] =
 {
-       0,    89,    89,    93,   101,   104,   111,   119,   122,   129,
-     133,   140,   144,   151,   158,   166,   169,   176,   180,   187,
-     191,   195,   199,   203,   220,   231,   239,   242,   246,   254,
-     257,   261,   266,   274,   277,   281,   285,   293,   296,   300,
-     308,   311,   315,   323,   331,   334
+       0,    86,    86,    95,    98,   105,   109,   117,   124,   128,
+     132,   145,   153,   156,   163,   167,   171,   179,   183,   187,
+     191,   195,   212,   222,   230,   233,   237,   245,   248,   252,
+     257,   264,   272,   275,   279,   287,   290,   294,   302,   306
 };
 #endif
 
@@ -490,13 +477,12 @@
 static const char *const yytname[] =
 {
   "$end", "error", "$undefined", "DT_V1", "DT_MEMRESERVE",
-  "DT_PROPNODENAME", "DT_LITERAL", "DT_LEGACYLITERAL", "DT_BASE",
-  "DT_BYTE", "DT_STRING", "DT_LABEL", "DT_REF", "DT_INCBIN", "';'", "'-'",
-  "'/'", "'{'", "'}'", "'='", "'<'", "'>'", "'['", "']'", "'('", "','",
-  "')'", "$accept", "sourcefile", "memreserves", "memreserve",
-  "v0_memreserves", "v0_memreserve", "addr", "devicetree", "nodedef",
-  "proplist", "propdef", "propdata", "propdataprefix", "celllist",
-  "cellbase", "cellval", "bytestring", "subnodes", "subnode", "label", 0
+  "DT_PROPNODENAME", "DT_LITERAL", "DT_BASE", "DT_BYTE", "DT_STRING",
+  "DT_LABEL", "DT_REF", "DT_INCBIN", "';'", "'/'", "'{'", "'}'", "'='",
+  "'<'", "'>'", "'['", "']'", "'('", "','", "')'", "$accept", "sourcefile",
+  "memreserves", "memreserve", "addr", "devicetree", "nodedef", "proplist",
+  "propdef", "propdata", "propdataprefix", "celllist", "cellval",
+  "bytestring", "subnodes", "subnode", 0
 };
 #endif
 
@@ -506,29 +492,27 @@
 static const yytype_uint16 yytoknum[] =
 {
        0,   256,   257,   258,   259,   260,   261,   262,   263,   264,
-     265,   266,   267,   268,    59,    45,    47,   123,   125,    61,
-      60,    62,    91,    93,    40,    44,    41
+     265,   266,   267,    59,    47,   123,   125,    61,    60,    62,
+      91,    93,    40,    44,    41
 };
 # endif
 
 /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
 static const yytype_uint8 yyr1[] =
 {
-       0,    27,    28,    28,    29,    29,    30,    31,    31,    32,
-      32,    33,    33,    34,    35,    36,    36,    37,    37,    38,
-      38,    38,    38,    38,    38,    38,    39,    39,    39,    40,
-      40,    40,    40,    41,    41,    42,    42,    43,    43,    43,
-      44,    44,    44,    45,    46,    46
+       0,    25,    26,    27,    27,    28,    28,    29,    30,    30,
+      30,    31,    32,    32,    33,    33,    33,    34,    34,    34,
+      34,    34,    34,    34,    35,    35,    35,    36,    36,    36,
+      36,    37,    38,    38,    38,    39,    39,    39,    40,    40
 };
 
 /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
 static const yytype_uint8 yyr2[] =
 {
-       0,     2,     4,     2,     0,     2,     5,     0,     2,     1,
-       6,     1,     1,     2,     5,     0,     2,     5,     3,     2,
-       4,     4,     2,     9,     5,     2,     0,     2,     2,     0,
-       2,     2,     2,     0,     1,     1,     2,     0,     2,     2,
-       0,     2,     2,     3,     0,     1
+       0,     2,     4,     0,     2,     4,     2,     1,     2,     3,
+       3,     5,     0,     2,     4,     2,     2,     2,     4,     4,
+       2,     9,     5,     2,     0,     2,     2,     0,     2,     2,
+       2,     1,     0,     2,     2,     0,     2,     2,     2,     2
 };
 
 /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
@@ -536,86 +520,79 @@
    means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
-       7,     0,    45,     0,     9,     0,     7,     0,     4,     1,
-       0,     3,     8,     0,     0,     4,     0,    15,    13,    11,
-      12,     0,     2,     5,     0,    40,     0,     0,     0,    16,
-       0,    40,     0,     0,     6,     0,    42,    41,     0,    10,
-      14,    18,    26,    43,     0,     0,    25,    17,    27,    19,
-      28,    22,     0,    29,    37,     0,    33,     0,     0,    35,
-      34,    32,    31,    20,     0,    30,    38,    39,    21,     0,
-      24,    36,     0,     0,     0,    23
+       0,     0,     0,     3,     1,     0,     0,     0,     3,     7,
+       0,     6,     0,     2,     4,     0,    12,     8,     0,     0,
+       5,    35,    10,     9,     0,     0,    13,     0,    35,    15,
+      24,    38,    16,    39,     0,    37,    36,     0,     0,    11,
+      23,    14,    25,    17,    26,    20,     0,    27,    32,     0,
+       0,     0,     0,    31,    30,    29,    18,    28,    33,    34,
+      19,     0,    22,     0,     0,     0,    21
 };
 
 /* YYDEFGOTO[NTERM-NUM].  */
 static const yytype_int8 yydefgoto[] =
 {
-      -1,     3,    14,     4,     5,     6,    27,    11,    18,    25,
-      29,    44,    45,    56,    64,    65,    57,    30,    31,     7
+      -1,     2,     7,     8,    10,    13,    17,    21,    26,    37,
+      38,    50,    57,    51,    27,    28
 };
 
 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
    STATE-NUM.  */
-#define YYPACT_NINF -14
+#define YYPACT_NINF -12
 static const yytype_int8 yypact[] =
 {
-      30,   -11,   -14,     7,   -14,    -1,    27,    13,    27,   -14,
-       8,   -14,   -14,    40,    -1,    27,    35,   -14,   -14,   -14,
-     -14,    21,   -14,   -14,    40,    24,    40,    28,    40,   -14,
-      32,    24,    46,    38,   -14,    39,   -14,   -14,    26,   -14,
-     -14,   -14,   -14,   -14,    -9,    10,   -14,   -14,   -14,   -14,
-     -14,   -14,    31,   -14,   -14,    44,    -2,     3,    23,   -14,
-     -14,   -14,   -14,   -14,    50,   -14,   -14,   -14,   -14,    40,
-     -14,   -14,    33,    40,    36,   -14
+      10,   -11,    18,    -1,   -12,    22,    -1,    15,    -1,   -12,
+      22,   -12,    20,     1,   -12,    17,   -12,   -12,    20,    20,
+     -12,     6,   -12,   -12,    21,     6,   -12,    23,     6,   -12,
+     -12,   -12,   -12,   -12,    28,   -12,   -12,    -6,    13,   -12,
+     -12,   -12,   -12,   -12,   -12,   -12,    24,   -12,   -12,    33,
+      -5,     0,    -4,   -12,   -12,   -12,   -12,   -12,   -12,   -12,
+     -12,    22,   -12,    25,    22,    19,   -12
 };
 
 /* YYPGOTO[NTERM-NUM].  */
 static const yytype_int8 yypgoto[] =
 {
-     -14,   -14,    48,    29,    53,   -14,   -13,    47,    34,   -14,
-      37,   -14,   -14,   -14,   -14,   -14,   -14,    42,   -14,    -7
+     -12,   -12,    36,    39,   -10,   -12,     8,   -12,    12,   -12,
+     -12,   -12,   -12,   -12,    27,    31
 };
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
    positive, shift that token.  If negative, reduce the rule which
    number is the opposite.  If zero, do what YYDEFACT says.
    If YYTABLE_NINF, syntax error.  */
-#define YYTABLE_NINF -45
-static const yytype_int8 yytable[] =
+#define YYTABLE_NINF -1
+static const yytype_uint8 yytable[] =
 {
-      21,    16,    46,     8,    59,    47,    60,     9,    16,    61,
-      62,    28,    66,    33,    67,    10,    48,    13,    32,    63,
-      49,    50,    51,    52,    32,    17,    68,    19,    20,   -44,
-      53,   -44,    54,     1,   -44,     2,    26,    15,     2,    24,
-      41,     2,    34,    17,    15,    42,    19,    20,    69,    70,
-      35,    38,    39,    40,    58,    55,    72,    71,    73,    12,
-      74,    22,    75,    23,     0,     0,     0,     0,    36,     0,
-       0,     0,    43,    37
+      15,    53,     3,     5,    40,    54,    55,    41,    58,     6,
+      59,    24,    18,     1,    56,    19,    25,    42,     4,    61,
+      62,    60,    43,    44,    45,    46,    22,    23,     9,    12,
+      20,    47,    31,    48,    29,    16,    16,    32,    30,    34,
+      35,    39,    52,    66,    14,    11,    49,     0,    64,     0,
+       0,    63,     0,     0,    65,    36,    33
 };
 
 static const yytype_int8 yycheck[] =
 {
-      13,     8,    11,    14,     6,    14,     8,     0,    15,    11,
-      12,    24,     9,    26,    11,    16,    25,     4,    25,    21,
-      10,    11,    12,    13,    31,    17,    23,     6,     7,     5,
-      20,     4,    22,     3,     4,    11,    15,     8,    11,     4,
-      14,    11,    14,    17,    15,    19,     6,     7,    25,    26,
-      18,     5,    14,    14,    10,    24,    69,     7,    25,     6,
-      73,    14,    26,    15,    -1,    -1,    -1,    -1,    31,    -1,
-      -1,    -1,    38,    31
+      10,     6,    13,     4,    10,    10,    11,    13,     8,    10,
+      10,     5,    11,     3,    19,    14,    10,    23,     0,    23,
+      24,    21,     9,    10,    11,    12,    18,    19,     6,    14,
+      13,    18,    24,    20,    13,    15,    15,    25,    17,    16,
+      28,    13,     9,    24,     8,     6,    22,    -1,    23,    -1,
+      -1,    61,    -1,    -1,    64,    28,    25
 };
 
 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
    symbol of state STATE-NUM.  */
 static const yytype_uint8 yystos[] =
 {
-       0,     3,    11,    28,    30,    31,    32,    46,    14,     0,
-      16,    34,    31,     4,    29,    30,    46,    17,    35,     6,
-       7,    33,    34,    29,     4,    36,    15,    33,    33,    37,
-      44,    45,    46,    33,    14,    18,    37,    44,     5,    14,
-      14,    14,    19,    35,    38,    39,    11,    14,    25,    10,
-      11,    12,    13,    20,    22,    24,    40,    43,    10,     6,
-       8,    11,    12,    21,    41,    42,     9,    11,    23,    25,
-      26,     7,    33,    25,    33,    26
+       0,     3,    26,    13,     0,     4,    10,    27,    28,     6,
+      29,    28,    14,    30,    27,    29,    15,    31,    11,    14,
+      13,    32,    31,    31,     5,    10,    33,    39,    40,    13,
+      17,    31,    33,    40,    16,    33,    39,    34,    35,    13,
+      10,    13,    23,     9,    10,    11,    12,    18,    20,    22,
+      36,    38,     9,     6,    10,    11,    19,    37,     8,    10,
+      21,    23,    24,    29,    23,    29,    24
 };
 
 #define yyerrok		(yyerrstatus = 0)
@@ -728,7 +705,7 @@
     {									  \
       YYFPRINTF (stderr, "%s ", Title);					  \
       yy_symbol_print (stderr,						  \
-		  Type, Value, Location); \
+		  Type, Value); \
       YYFPRINTF (stderr, "\n");						  \
     }									  \
 } while (YYID (0))
@@ -742,19 +719,17 @@
 #if (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 static void
-yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp)
+yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
 #else
 static void
-yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp)
+yy_symbol_value_print (yyoutput, yytype, yyvaluep)
     FILE *yyoutput;
     int yytype;
     YYSTYPE const * const yyvaluep;
-    YYLTYPE const * const yylocationp;
 #endif
 {
   if (!yyvaluep)
     return;
-  YYUSE (yylocationp);
 # ifdef YYPRINT
   if (yytype < YYNTOKENS)
     YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
@@ -776,14 +751,13 @@
 #if (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 static void
-yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp)
+yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
 #else
 static void
-yy_symbol_print (yyoutput, yytype, yyvaluep, yylocationp)
+yy_symbol_print (yyoutput, yytype, yyvaluep)
     FILE *yyoutput;
     int yytype;
     YYSTYPE const * const yyvaluep;
-    YYLTYPE const * const yylocationp;
 #endif
 {
   if (yytype < YYNTOKENS)
@@ -791,9 +765,7 @@
   else
     YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
 
-  YY_LOCATION_PRINT (yyoutput, *yylocationp);
-  YYFPRINTF (yyoutput, ": ");
-  yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp);
+  yy_symbol_value_print (yyoutput, yytype, yyvaluep);
   YYFPRINTF (yyoutput, ")");
 }
 
@@ -805,17 +777,20 @@
 #if (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 static void
-yy_stack_print (yytype_int16 *bottom, yytype_int16 *top)
+yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
 #else
 static void
-yy_stack_print (bottom, top)
-    yytype_int16 *bottom;
-    yytype_int16 *top;
+yy_stack_print (yybottom, yytop)
+    yytype_int16 *yybottom;
+    yytype_int16 *yytop;
 #endif
 {
   YYFPRINTF (stderr, "Stack now");
-  for (; bottom <= top; ++bottom)
-    YYFPRINTF (stderr, " %d", *bottom);
+  for (; yybottom <= yytop; yybottom++)
+    {
+      int yybot = *yybottom;
+      YYFPRINTF (stderr, " %d", yybot);
+    }
   YYFPRINTF (stderr, "\n");
 }
 
@@ -833,12 +808,11 @@
 #if (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 static void
-yy_reduce_print (YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule)
+yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
 #else
 static void
-yy_reduce_print (yyvsp, yylsp, yyrule)
+yy_reduce_print (yyvsp, yyrule)
     YYSTYPE *yyvsp;
-    YYLTYPE *yylsp;
     int yyrule;
 #endif
 {
@@ -850,18 +824,18 @@
   /* The symbols being reduced.  */
   for (yyi = 0; yyi < yynrhs; yyi++)
     {
-      fprintf (stderr, "   $%d = ", yyi + 1);
+      YYFPRINTF (stderr, "   $%d = ", yyi + 1);
       yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
 		       &(yyvsp[(yyi + 1) - (yynrhs)])
-		       , &(yylsp[(yyi + 1) - (yynrhs)])		       );
-      fprintf (stderr, "\n");
+		       		       );
+      YYFPRINTF (stderr, "\n");
     }
 }
 
 # define YY_REDUCE_PRINT(Rule)		\
 do {					\
   if (yydebug)				\
-    yy_reduce_print (yyvsp, yylsp, Rule); \
+    yy_reduce_print (yyvsp, Rule); \
 } while (YYID (0))
 
 /* Nonzero means print parse trace.  It is left uninitialized so that
@@ -1112,18 +1086,16 @@
 #if (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 static void
-yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp)
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
 #else
 static void
-yydestruct (yymsg, yytype, yyvaluep, yylocationp)
+yydestruct (yymsg, yytype, yyvaluep)
     const char *yymsg;
     int yytype;
     YYSTYPE *yyvaluep;
-    YYLTYPE *yylocationp;
 #endif
 {
   YYUSE (yyvaluep);
-  YYUSE (yylocationp);
 
   if (!yymsg)
     yymsg = "Deleting";
@@ -1136,10 +1108,8 @@
 	break;
     }
 }
-
 
 /* Prevent warnings from -Wmissing-prototypes.  */
-
 #ifdef YYPARSE_PARAM
 #if defined __STDC__ || defined __cplusplus
 int yyparse (void *YYPARSE_PARAM);
@@ -1155,23 +1125,20 @@
 #endif /* ! YYPARSE_PARAM */
 
 
-
-/* The look-ahead symbol.  */
+/* The lookahead symbol.  */
 int yychar;
 
-/* The semantic value of the look-ahead symbol.  */
+/* The semantic value of the lookahead symbol.  */
 YYSTYPE yylval;
 
 /* Number of syntax errors so far.  */
 int yynerrs;
-/* Location data for the look-ahead symbol.  */
-YYLTYPE yylloc;
 
 
 
-/*----------.
-| yyparse.  |
-`----------*/
+/*-------------------------.
+| yyparse or yypush_parse.  |
+`-------------------------*/
 
 #ifdef YYPARSE_PARAM
 #if (defined __STDC__ || defined __C99__FUNC__ \
@@ -1195,14 +1162,39 @@
 #endif
 #endif
 {
-  
-  int yystate;
+
+
+    int yystate;
+    /* Number of tokens to shift before error messages enabled.  */
+    int yyerrstatus;
+
+    /* The stacks and their tools:
+       `yyss': related to states.
+       `yyvs': related to semantic values.
+
+       Refer to the stacks thru separate pointers, to allow yyoverflow
+       to reallocate them elsewhere.  */
+
+    /* The state stack.  */
+    yytype_int16 yyssa[YYINITDEPTH];
+    yytype_int16 *yyss;
+    yytype_int16 *yyssp;
+
+    /* The semantic value stack.  */
+    YYSTYPE yyvsa[YYINITDEPTH];
+    YYSTYPE *yyvs;
+    YYSTYPE *yyvsp;
+
+    YYSIZE_T yystacksize;
+
   int yyn;
   int yyresult;
-  /* Number of tokens to shift before error messages enabled.  */
-  int yyerrstatus;
-  /* Look-ahead token as an internal (translated) token number.  */
-  int yytoken = 0;
+  /* Lookahead token as an internal (translated) token number.  */
+  int yytoken;
+  /* The variables used to return semantic value and location from the
+     action routines.  */
+  YYSTYPE yyval;
+
 #if YYERROR_VERBOSE
   /* Buffer for error messages, and its allocated size.  */
   char yymsgbuf[128];
@@ -1210,64 +1202,30 @@
   YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
 #endif
 
-  /* Three stacks and their tools:
-     `yyss': related to states,
-     `yyvs': related to semantic values,
-     `yyls': related to locations.
-
-     Refer to the stacks thru separate pointers, to allow yyoverflow
-     to reallocate them elsewhere.  */
-
-  /* The state stack.  */
-  yytype_int16 yyssa[YYINITDEPTH];
-  yytype_int16 *yyss = yyssa;
-  yytype_int16 *yyssp;
-
-  /* The semantic value stack.  */
-  YYSTYPE yyvsa[YYINITDEPTH];
-  YYSTYPE *yyvs = yyvsa;
-  YYSTYPE *yyvsp;
-
-  /* The location stack.  */
-  YYLTYPE yylsa[YYINITDEPTH];
-  YYLTYPE *yyls = yylsa;
-  YYLTYPE *yylsp;
-  /* The locations where the error started and ended.  */
-  YYLTYPE yyerror_range[2];
-
-#define YYPOPSTACK(N)   (yyvsp -= (N), yyssp -= (N), yylsp -= (N))
-
-  YYSIZE_T yystacksize = YYINITDEPTH;
-
-  /* The variables used to return semantic value and location from the
-     action routines.  */
-  YYSTYPE yyval;
-  YYLTYPE yyloc;
+#define YYPOPSTACK(N)   (yyvsp -= (N), yyssp -= (N))
 
   /* The number of symbols on the RHS of the reduced rule.
      Keep to zero when no symbol should be popped.  */
   int yylen = 0;
 
+  yytoken = 0;
+  yyss = yyssa;
+  yyvs = yyvsa;
+  yystacksize = YYINITDEPTH;
+
   YYDPRINTF ((stderr, "Starting parse\n"));
 
   yystate = 0;
   yyerrstatus = 0;
   yynerrs = 0;
-  yychar = YYEMPTY;		/* Cause a token to be read.  */
+  yychar = YYEMPTY; /* Cause a token to be read.  */
 
   /* Initialize stack pointers.
      Waste one element of value and location stack
      so that they stay on the same level as the state stack.
      The wasted elements are never initialized.  */
-
   yyssp = yyss;
   yyvsp = yyvs;
-  yylsp = yyls;
-#if YYLTYPE_IS_TRIVIAL
-  /* Initialize the default location before parsing starts.  */
-  yylloc.first_line   = yylloc.last_line   = 1;
-  yylloc.first_column = yylloc.last_column = 0;
-#endif
 
   goto yysetstate;
 
@@ -1294,7 +1252,6 @@
 	   memory.  */
 	YYSTYPE *yyvs1 = yyvs;
 	yytype_int16 *yyss1 = yyss;
-	YYLTYPE *yyls1 = yyls;
 
 	/* Each stack pointer address is followed by the size of the
 	   data in use in that stack, in bytes.  This used to be a
@@ -1303,9 +1260,8 @@
 	yyoverflow (YY_("memory exhausted"),
 		    &yyss1, yysize * sizeof (*yyssp),
 		    &yyvs1, yysize * sizeof (*yyvsp),
-		    &yyls1, yysize * sizeof (*yylsp),
 		    &yystacksize);
-	yyls = yyls1;
+
 	yyss = yyss1;
 	yyvs = yyvs1;
       }
@@ -1326,9 +1282,8 @@
 	  (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
 	if (! yyptr)
 	  goto yyexhaustedlab;
-	YYSTACK_RELOCATE (yyss);
-	YYSTACK_RELOCATE (yyvs);
-	YYSTACK_RELOCATE (yyls);
+	YYSTACK_RELOCATE (yyss_alloc, yyss);
+	YYSTACK_RELOCATE (yyvs_alloc, yyvs);
 #  undef YYSTACK_RELOCATE
 	if (yyss1 != yyssa)
 	  YYSTACK_FREE (yyss1);
@@ -1338,7 +1293,6 @@
 
       yyssp = yyss + yysize - 1;
       yyvsp = yyvs + yysize - 1;
-      yylsp = yyls + yysize - 1;
 
       YYDPRINTF ((stderr, "Stack size increased to %lu\n",
 		  (unsigned long int) yystacksize));
@@ -1349,6 +1303,9 @@
 
   YYDPRINTF ((stderr, "Entering state %d\n", yystate));
 
+  if (yystate == YYFINAL)
+    YYACCEPT;
+
   goto yybackup;
 
 /*-----------.
@@ -1357,16 +1314,16 @@
 yybackup:
 
   /* Do appropriate processing given the current state.  Read a
-     look-ahead token if we need one and don't already have one.  */
+     lookahead token if we need one and don't already have one.  */
 
-  /* First try to decide what to do without reference to look-ahead token.  */
+  /* First try to decide what to do without reference to lookahead token.  */
   yyn = yypact[yystate];
   if (yyn == YYPACT_NINF)
     goto yydefault;
 
-  /* Not known => get a look-ahead token if don't already have one.  */
+  /* Not known => get a lookahead token if don't already have one.  */
 
-  /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol.  */
+  /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol.  */
   if (yychar == YYEMPTY)
     {
       YYDPRINTF ((stderr, "Reading a token: "));
@@ -1398,24 +1355,20 @@
       goto yyreduce;
     }
 
-  if (yyn == YYFINAL)
-    YYACCEPT;
-
   /* Count tokens shifted since error; after three, turn off error
      status.  */
   if (yyerrstatus)
     yyerrstatus--;
 
-  /* Shift the look-ahead token.  */
+  /* Shift the lookahead token.  */
   YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
 
-  /* Discard the shifted token unless it is eof.  */
-  if (yychar != YYEOF)
-    yychar = YYEMPTY;
+  /* Discard the shifted token.  */
+  yychar = YYEMPTY;
 
   yystate = yyn;
   *++yyvsp = yylval;
-  *++yylsp = yylloc;
+
   goto yynewstate;
 
 
@@ -1446,337 +1399,387 @@
      GCC warning that YYVAL may be used uninitialized.  */
   yyval = yyvsp[1-yylen];
 
-  /* Default location.  */
-  YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen);
+
   YY_REDUCE_PRINT (yyn);
   switch (yyn)
     {
         case 2:
-#line 90 "dtc-parser.y"
+
+/* Line 1455 of yacc.c  */
+#line 87 "dtc-parser.y"
     {
-			the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node), 0);
+			the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node),
+							guess_boot_cpuid((yyvsp[(4) - (4)].node)));
 		;}
     break;
 
   case 3:
-#line 94 "dtc-parser.y"
+
+/* Line 1455 of yacc.c  */
+#line 95 "dtc-parser.y"
     {
-			the_boot_info = build_boot_info((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].node), 0);
+			(yyval.re) = NULL;
 		;}
     break;
 
   case 4:
-#line 101 "dtc-parser.y"
+
+/* Line 1455 of yacc.c  */
+#line 99 "dtc-parser.y"
     {
-			(yyval.re) = NULL;
+			(yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
 		;}
     break;
 
   case 5:
-#line 105 "dtc-parser.y"
+
+/* Line 1455 of yacc.c  */
+#line 106 "dtc-parser.y"
     {
-			(yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
+			(yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].addr), (yyvsp[(3) - (4)].addr));
 		;}
     break;
 
   case 6:
-#line 112 "dtc-parser.y"
+
+/* Line 1455 of yacc.c  */
+#line 110 "dtc-parser.y"
     {
-			(yyval.re) = build_reserve_entry((yyvsp[(3) - (5)].addr), (yyvsp[(4) - (5)].addr), (yyvsp[(1) - (5)].labelref));
+			add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref));
+			(yyval.re) = (yyvsp[(2) - (2)].re);
 		;}
     break;
 
   case 7:
-#line 119 "dtc-parser.y"
-    {
-			(yyval.re) = NULL;
-		;}
-    break;
 
-  case 8:
-#line 123 "dtc-parser.y"
-    {
-			(yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
-		;}
-    break;
-
-  case 9:
-#line 130 "dtc-parser.y"
-    {
-			(yyval.re) = (yyvsp[(1) - (1)].re);
-		;}
-    break;
-
-  case 10:
-#line 134 "dtc-parser.y"
-    {
-			(yyval.re) = build_reserve_entry((yyvsp[(3) - (6)].addr), (yyvsp[(5) - (6)].addr) - (yyvsp[(3) - (6)].addr) + 1, (yyvsp[(1) - (6)].labelref));
-		;}
-    break;
-
-  case 11:
-#line 141 "dtc-parser.y"
+/* Line 1455 of yacc.c  */
+#line 118 "dtc-parser.y"
     {
 			(yyval.addr) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
 		;}
     break;
 
-  case 12:
-#line 145 "dtc-parser.y"
+  case 8:
+
+/* Line 1455 of yacc.c  */
+#line 125 "dtc-parser.y"
     {
-			(yyval.addr) = eval_literal((yyvsp[(1) - (1)].literal), 16, 64);
+			(yyval.node) = name_node((yyvsp[(2) - (2)].node), "");
 		;}
     break;
 
-  case 13:
-#line 152 "dtc-parser.y"
+  case 9:
+
+/* Line 1455 of yacc.c  */
+#line 129 "dtc-parser.y"
     {
-			(yyval.node) = name_node((yyvsp[(2) - (2)].node), "", NULL);
+			(yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
 		;}
     break;
 
-  case 14:
-#line 159 "dtc-parser.y"
+  case 10:
+
+/* Line 1455 of yacc.c  */
+#line 133 "dtc-parser.y"
+    {
+			struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref));
+
+			if (target)
+				merge_nodes(target, (yyvsp[(3) - (3)].node));
+			else
+				print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref));
+			(yyval.node) = (yyvsp[(1) - (3)].node);
+		;}
+    break;
+
+  case 11:
+
+/* Line 1455 of yacc.c  */
+#line 146 "dtc-parser.y"
     {
 			(yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist));
 		;}
     break;
 
-  case 15:
-#line 166 "dtc-parser.y"
+  case 12:
+
+/* Line 1455 of yacc.c  */
+#line 153 "dtc-parser.y"
     {
 			(yyval.proplist) = NULL;
 		;}
     break;
 
-  case 16:
-#line 170 "dtc-parser.y"
+  case 13:
+
+/* Line 1455 of yacc.c  */
+#line 157 "dtc-parser.y"
     {
 			(yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist));
 		;}
     break;
 
+  case 14:
+
+/* Line 1455 of yacc.c  */
+#line 164 "dtc-parser.y"
+    {
+			(yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data));
+		;}
+    break;
+
+  case 15:
+
+/* Line 1455 of yacc.c  */
+#line 168 "dtc-parser.y"
+    {
+			(yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data);
+		;}
+    break;
+
+  case 16:
+
+/* Line 1455 of yacc.c  */
+#line 172 "dtc-parser.y"
+    {
+			add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref));
+			(yyval.prop) = (yyvsp[(2) - (2)].prop);
+		;}
+    break;
+
   case 17:
-#line 177 "dtc-parser.y"
-    {
-			(yyval.prop) = build_property((yyvsp[(2) - (5)].propnodename), (yyvsp[(4) - (5)].data), (yyvsp[(1) - (5)].labelref));
-		;}
-    break;
 
-  case 18:
-#line 181 "dtc-parser.y"
-    {
-			(yyval.prop) = build_property((yyvsp[(2) - (3)].propnodename), empty_data, (yyvsp[(1) - (3)].labelref));
-		;}
-    break;
-
-  case 19:
-#line 188 "dtc-parser.y"
+/* Line 1455 of yacc.c  */
+#line 180 "dtc-parser.y"
     {
 			(yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
 		;}
     break;
 
+  case 18:
+
+/* Line 1455 of yacc.c  */
+#line 184 "dtc-parser.y"
+    {
+			(yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
+		;}
+    break;
+
+  case 19:
+
+/* Line 1455 of yacc.c  */
+#line 188 "dtc-parser.y"
+    {
+			(yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
+		;}
+    break;
+
   case 20:
+
+/* Line 1455 of yacc.c  */
 #line 192 "dtc-parser.y"
     {
-			(yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
-		;}
-    break;
-
-  case 21:
-#line 196 "dtc-parser.y"
-    {
-			(yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
-		;}
-    break;
-
-  case 22:
-#line 200 "dtc-parser.y"
-    {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
 		;}
     break;
 
-  case 23:
-#line 204 "dtc-parser.y"
+  case 21:
+
+/* Line 1455 of yacc.c  */
+#line 196 "dtc-parser.y"
     {
-			struct search_path path = { srcpos_file->dir, NULL, NULL };
-			struct dtc_file *file = dtc_open_file((yyvsp[(4) - (9)].data).val, &path);
-			struct data d = empty_data;
+			FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL);
+			struct data d;
 
 			if ((yyvsp[(6) - (9)].addr) != 0)
-				if (fseek(file->file, (yyvsp[(6) - (9)].addr), SEEK_SET) != 0)
-					yyerrorf("Couldn't seek to offset %llu in \"%s\": %s",
-						 (unsigned long long)(yyvsp[(6) - (9)].addr),
-						 (yyvsp[(4) - (9)].data).val, strerror(errno));
+				if (fseek(f, (yyvsp[(6) - (9)].addr), SEEK_SET) != 0)
+					print_error("Couldn't seek to offset %llu in \"%s\": %s",
+						     (unsigned long long)(yyvsp[(6) - (9)].addr),
+						     (yyvsp[(4) - (9)].data).val,
+						     strerror(errno));
 
-			d = data_copy_file(file->file, (yyvsp[(8) - (9)].addr));
+			d = data_copy_file(f, (yyvsp[(8) - (9)].addr));
 
 			(yyval.data) = data_merge((yyvsp[(1) - (9)].data), d);
-			dtc_close_file(file);
+			fclose(f);
 		;}
     break;
 
-  case 24:
-#line 221 "dtc-parser.y"
+  case 22:
+
+/* Line 1455 of yacc.c  */
+#line 213 "dtc-parser.y"
     {
-			struct search_path path = { srcpos_file->dir, NULL, NULL };
-			struct dtc_file *file = dtc_open_file((yyvsp[(4) - (5)].data).val, &path);
+			FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL);
 			struct data d = empty_data;
 
-			d = data_copy_file(file->file, -1);
+			d = data_copy_file(f, -1);
 
 			(yyval.data) = data_merge((yyvsp[(1) - (5)].data), d);
-			dtc_close_file(file);
+			fclose(f);
 		;}
     break;
 
-  case 25:
-#line 232 "dtc-parser.y"
+  case 23:
+
+/* Line 1455 of yacc.c  */
+#line 223 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
 		;}
     break;
 
-  case 26:
-#line 239 "dtc-parser.y"
+  case 24:
+
+/* Line 1455 of yacc.c  */
+#line 230 "dtc-parser.y"
     {
 			(yyval.data) = empty_data;
 		;}
     break;
 
-  case 27:
-#line 243 "dtc-parser.y"
+  case 25:
+
+/* Line 1455 of yacc.c  */
+#line 234 "dtc-parser.y"
     {
 			(yyval.data) = (yyvsp[(1) - (2)].data);
 		;}
     break;
 
-  case 28:
-#line 247 "dtc-parser.y"
+  case 26:
+
+/* Line 1455 of yacc.c  */
+#line 238 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
 		;}
     break;
 
-  case 29:
-#line 254 "dtc-parser.y"
+  case 27:
+
+/* Line 1455 of yacc.c  */
+#line 245 "dtc-parser.y"
     {
 			(yyval.data) = empty_data;
 		;}
     break;
 
-  case 30:
-#line 258 "dtc-parser.y"
+  case 28:
+
+/* Line 1455 of yacc.c  */
+#line 249 "dtc-parser.y"
     {
 			(yyval.data) = data_append_cell((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].cell));
 		;}
     break;
 
-  case 31:
-#line 262 "dtc-parser.y"
+  case 29:
+
+/* Line 1455 of yacc.c  */
+#line 253 "dtc-parser.y"
     {
 			(yyval.data) = data_append_cell(data_add_marker((yyvsp[(1) - (2)].data), REF_PHANDLE,
 							      (yyvsp[(2) - (2)].labelref)), -1);
 		;}
     break;
 
-  case 32:
-#line 267 "dtc-parser.y"
+  case 30:
+
+/* Line 1455 of yacc.c  */
+#line 258 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
 		;}
     break;
 
-  case 33:
-#line 274 "dtc-parser.y"
-    {
-			(yyval.cbase) = 16;
-		;}
-    break;
+  case 31:
 
-  case 35:
-#line 282 "dtc-parser.y"
+/* Line 1455 of yacc.c  */
+#line 265 "dtc-parser.y"
     {
 			(yyval.cell) = eval_literal((yyvsp[(1) - (1)].literal), 0, 32);
 		;}
     break;
 
-  case 36:
-#line 286 "dtc-parser.y"
-    {
-			(yyval.cell) = eval_literal((yyvsp[(2) - (2)].literal), (yyvsp[(1) - (2)].cbase), 32);
-		;}
-    break;
+  case 32:
 
-  case 37:
-#line 293 "dtc-parser.y"
+/* Line 1455 of yacc.c  */
+#line 272 "dtc-parser.y"
     {
 			(yyval.data) = empty_data;
 		;}
     break;
 
-  case 38:
-#line 297 "dtc-parser.y"
+  case 33:
+
+/* Line 1455 of yacc.c  */
+#line 276 "dtc-parser.y"
     {
 			(yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
 		;}
     break;
 
-  case 39:
-#line 301 "dtc-parser.y"
+  case 34:
+
+/* Line 1455 of yacc.c  */
+#line 280 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
 		;}
     break;
 
-  case 40:
-#line 308 "dtc-parser.y"
+  case 35:
+
+/* Line 1455 of yacc.c  */
+#line 287 "dtc-parser.y"
     {
 			(yyval.nodelist) = NULL;
 		;}
     break;
 
-  case 41:
-#line 312 "dtc-parser.y"
+  case 36:
+
+/* Line 1455 of yacc.c  */
+#line 291 "dtc-parser.y"
     {
 			(yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist));
 		;}
     break;
 
-  case 42:
-#line 316 "dtc-parser.y"
+  case 37:
+
+/* Line 1455 of yacc.c  */
+#line 295 "dtc-parser.y"
     {
-			yyerror("syntax error: properties must precede subnodes");
+			print_error("syntax error: properties must precede subnodes");
 			YYERROR;
 		;}
     break;
 
-  case 43:
-#line 324 "dtc-parser.y"
+  case 38:
+
+/* Line 1455 of yacc.c  */
+#line 303 "dtc-parser.y"
     {
-			(yyval.node) = name_node((yyvsp[(3) - (3)].node), (yyvsp[(2) - (3)].propnodename), (yyvsp[(1) - (3)].labelref));
+			(yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename));
 		;}
     break;
 
-  case 44:
-#line 331 "dtc-parser.y"
-    {
-			(yyval.labelref) = NULL;
-		;}
-    break;
+  case 39:
 
-  case 45:
-#line 335 "dtc-parser.y"
+/* Line 1455 of yacc.c  */
+#line 307 "dtc-parser.y"
     {
-			(yyval.labelref) = (yyvsp[(1) - (1)].labelref);
+			add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref));
+			(yyval.node) = (yyvsp[(2) - (2)].node);
 		;}
     break;
 
 
-/* Line 1267 of yacc.c.  */
-#line 1780 "dtc-parser.tab.c"
+
+/* Line 1455 of yacc.c  */
+#line 1783 "dtc-parser.tab.c"
       default: break;
     }
   YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
@@ -1786,7 +1789,6 @@
   YY_STACK_PRINT (yyss, yyssp);
 
   *++yyvsp = yyval;
-  *++yylsp = yyloc;
 
   /* Now `shift' the result of the reduction.  Determine what state
      that goes to, based on the state we popped back to and the rule
@@ -1848,11 +1850,11 @@
 #endif
     }
 
-  yyerror_range[0] = yylloc;
+
 
   if (yyerrstatus == 3)
     {
-      /* If just tried and failed to reuse look-ahead token after an
+      /* If just tried and failed to reuse lookahead token after an
 	 error, discard it.  */
 
       if (yychar <= YYEOF)
@@ -1864,12 +1866,12 @@
       else
 	{
 	  yydestruct ("Error: discarding",
-		      yytoken, &yylval, &yylloc);
+		      yytoken, &yylval);
 	  yychar = YYEMPTY;
 	}
     }
 
-  /* Else will try to reuse look-ahead token after shifting the error
+  /* Else will try to reuse lookahead token after shifting the error
      token.  */
   goto yyerrlab1;
 
@@ -1885,7 +1887,6 @@
   if (/*CONSTCOND*/ 0)
      goto yyerrorlab;
 
-  yyerror_range[0] = yylsp[1-yylen];
   /* Do not reclaim the symbols of the rule which action triggered
      this YYERROR.  */
   YYPOPSTACK (yylen);
@@ -1919,24 +1920,16 @@
       if (yyssp == yyss)
 	YYABORT;
 
-      yyerror_range[0] = *yylsp;
+
       yydestruct ("Error: popping",
-		  yystos[yystate], yyvsp, yylsp);
+		  yystos[yystate], yyvsp);
       YYPOPSTACK (1);
       yystate = *yyssp;
       YY_STACK_PRINT (yyss, yyssp);
     }
 
-  if (yyn == YYFINAL)
-    YYACCEPT;
-
   *++yyvsp = yylval;
 
-  yyerror_range[1] = yylloc;
-  /* Using YYLLOC is tempting, but would change the location of
-     the look-ahead.  YYLOC is available though.  */
-  YYLLOC_DEFAULT (yyloc, (yyerror_range - 1), 2);
-  *++yylsp = yyloc;
 
   /* Shift the error token.  */
   YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
@@ -1959,7 +1952,7 @@
   yyresult = 1;
   goto yyreturn;
 
-#ifndef yyoverflow
+#if !defined(yyoverflow) || YYERROR_VERBOSE
 /*-------------------------------------------------.
 | yyexhaustedlab -- memory exhaustion comes here.  |
 `-------------------------------------------------*/
@@ -1970,9 +1963,9 @@
 #endif
 
 yyreturn:
-  if (yychar != YYEOF && yychar != YYEMPTY)
+  if (yychar != YYEMPTY)
      yydestruct ("Cleanup: discarding lookahead",
-		 yytoken, &yylval, &yylloc);
+		 yytoken, &yylval);
   /* Do not reclaim the symbols of the rule which action triggered
      this YYABORT or YYACCEPT.  */
   YYPOPSTACK (yylen);
@@ -1980,7 +1973,7 @@
   while (yyssp != yyss)
     {
       yydestruct ("Cleanup: popping",
-		  yystos[*yyssp], yyvsp, yylsp);
+		  yystos[*yyssp], yyvsp);
       YYPOPSTACK (1);
     }
 #ifndef yyoverflow
@@ -1996,29 +1989,24 @@
 }
 
 
-#line 340 "dtc-parser.y"
+
+/* Line 1675 of yacc.c  */
+#line 313 "dtc-parser.y"
 
 
-void yyerrorf(char const *s, ...)
+void print_error(char const *fmt, ...)
 {
-	const char *fname = srcpos_file ? srcpos_file->name : "<no-file>";
 	va_list va;
-	va_start(va, s);
 
-	if (strcmp(fname, "-") == 0)
-		fname = "stdin";
-
-	fprintf(stderr, "%s:%d ", fname, yylloc.first_line);
-	vfprintf(stderr, s, va);
-	fprintf(stderr, "\n");
+	va_start(va, fmt);
+	srcpos_verror(&yylloc, fmt, va);
+	va_end(va);
 
 	treesource_error = 1;
-	va_end(va);
 }
 
-void yyerror (char const *s)
-{
-	yyerrorf("%s", s);
+void yyerror(char const *s) {
+	print_error("%s", s);
 }
 
 static unsigned long long eval_literal(const char *s, int base, int bits)
@@ -2029,12 +2017,12 @@
 	errno = 0;
 	val = strtoull(s, &e, base);
 	if (*e)
-		yyerror("bad characters in literal");
+		print_error("bad characters in literal");
 	else if ((errno == ERANGE)
 		 || ((bits < 64) && (val >= (1ULL << bits))))
-		yyerror("literal out of range");
+		print_error("literal out of range");
 	else if (errno != 0)
-		yyerror("bad literal");
+		print_error("bad literal");
 	return val;
 }
 
diff --git a/scripts/dtc/dtc-parser.tab.h_shipped b/scripts/dtc/dtc-parser.tab.h_shipped
index ba99100..95c9547 100644
--- a/scripts/dtc/dtc-parser.tab.h_shipped
+++ b/scripts/dtc/dtc-parser.tab.h_shipped
@@ -1,24 +1,23 @@
-/* A Bison parser, made by GNU Bison 2.3.  */
+
+/* A Bison parser, made by GNU Bison 2.4.1.  */
 
 /* Skeleton interface for Bison's Yacc-like parsers in C
-
-   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+   
+      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
    Free Software Foundation, Inc.
-
-   This program is free software; you can redistribute it and/or modify
+   
+   This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2, or (at your option)
-   any later version.
-
+   the Free Software Foundation, either version 3 of the License, or
+   (at your option) any later version.
+   
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.
-
+   
    You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 51 Franklin Street, Fifth Floor,
-   Boston, MA 02110-1301, USA.  */
+   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
 
 /* As a special exception, you may create a larger work that contains
    part or all of the Bison parser skeleton and distribute that work
@@ -29,10 +28,11 @@
    special exception, which will cause the skeleton and the resulting
    Bison output files to be licensed under the GNU General Public
    License without this special exception.
-
+   
    This special exception was added by the Free Software Foundation in
    version 2.2 of Bison.  */
 
+
 /* Tokens.  */
 #ifndef YYTOKENTYPE
 # define YYTOKENTYPE
@@ -43,35 +43,24 @@
      DT_MEMRESERVE = 259,
      DT_PROPNODENAME = 260,
      DT_LITERAL = 261,
-     DT_LEGACYLITERAL = 262,
-     DT_BASE = 263,
-     DT_BYTE = 264,
-     DT_STRING = 265,
-     DT_LABEL = 266,
-     DT_REF = 267,
-     DT_INCBIN = 268
+     DT_BASE = 262,
+     DT_BYTE = 263,
+     DT_STRING = 264,
+     DT_LABEL = 265,
+     DT_REF = 266,
+     DT_INCBIN = 267
    };
 #endif
-/* Tokens.  */
-#define DT_V1 258
-#define DT_MEMRESERVE 259
-#define DT_PROPNODENAME 260
-#define DT_LITERAL 261
-#define DT_LEGACYLITERAL 262
-#define DT_BASE 263
-#define DT_BYTE 264
-#define DT_STRING 265
-#define DT_LABEL 266
-#define DT_REF 267
-#define DT_INCBIN 268
-
 
 
 
 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
 typedef union YYSTYPE
-#line 37 "dtc-parser.y"
 {
+
+/* Line 1676 of yacc.c  */
+#line 39 "dtc-parser.y"
+
 	char *propnodename;
 	char *literal;
 	char *labelref;
@@ -86,28 +75,17 @@
 	struct node *node;
 	struct node *nodelist;
 	struct reserve_info *re;
-}
-/* Line 1489 of yacc.c.  */
-#line 92 "dtc-parser.tab.h"
-	YYSTYPE;
+
+
+
+/* Line 1676 of yacc.c  */
+#line 83 "dtc-parser.tab.h"
+} YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
 # define YYSTYPE_IS_DECLARED 1
-# define YYSTYPE_IS_TRIVIAL 1
 #endif
 
 extern YYSTYPE yylval;
 
-#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
-typedef struct YYLTYPE
-{
-  int first_line;
-  int first_column;
-  int last_line;
-  int last_column;
-} YYLTYPE;
-# define yyltype YYLTYPE /* obsolescent; will be withdrawn */
-# define YYLTYPE_IS_DECLARED 1
-# define YYLTYPE_IS_TRIVIAL 1
-#endif
 
-extern YYLTYPE yylloc;
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index b2ab562..5e84a67 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -18,15 +18,17 @@
  *                                                                   USA
  */
 
-%locations
-
 %{
 #include <stdio.h>
 
 #include "dtc.h"
 #include "srcpos.h"
 
+YYLTYPE yylloc;
+
 extern int yylex(void);
+extern void print_error(char const *fmt, ...);
+extern void yyerror(char const *s);
 
 extern struct boot_info *the_boot_info;
 extern int treesource_error;
@@ -55,7 +57,6 @@
 %token DT_MEMRESERVE
 %token <propnodename> DT_PROPNODENAME
 %token <literal> DT_LITERAL
-%token <literal> DT_LEGACYLITERAL
 %token <cbase> DT_BASE
 %token <byte> DT_BYTE
 %token <data> DT_STRING
@@ -67,11 +68,8 @@
 %type <data> propdataprefix
 %type <re> memreserve
 %type <re> memreserves
-%type <re> v0_memreserve
-%type <re> v0_memreserves
 %type <addr> addr
 %type <data> celllist
-%type <cbase> cellbase
 %type <cell> cellval
 %type <data> bytestring
 %type <prop> propdef
@@ -81,18 +79,14 @@
 %type <node> nodedef
 %type <node> subnode
 %type <nodelist> subnodes
-%type <labelref> label
 
 %%
 
 sourcefile:
 	  DT_V1 ';' memreserves devicetree
 		{
-			the_boot_info = build_boot_info($3, $4, 0);
-		}
-	| v0_memreserves devicetree
-		{
-			the_boot_info = build_boot_info($1, $2, 0);
+			the_boot_info = build_boot_info($3, $4,
+							guess_boot_cpuid($4));
 		}
 	;
 
@@ -108,31 +102,14 @@
 	;
 
 memreserve:
-	  label DT_MEMRESERVE addr addr ';'
+	  DT_MEMRESERVE addr addr ';'
 		{
-			$$ = build_reserve_entry($3, $4, $1);
+			$$ = build_reserve_entry($2, $3);
 		}
-	;
-
-v0_memreserves:
-	  /* empty */
+	| DT_LABEL memreserve
 		{
-			$$ = NULL;
-		}
-	| v0_memreserve v0_memreserves
-		{
-			$$ = chain_reserve_entry($1, $2);
-		};
-	;
-
-v0_memreserve:
-	  memreserve
-		{
-			$$ = $1;
-		}
-	| label DT_MEMRESERVE addr '-' addr ';'
-		{
-			$$ = build_reserve_entry($3, $5 - $3 + 1, $1);
+			add_label(&$2->labels, $1);
+			$$ = $2;
 		}
 	;
 
@@ -141,16 +118,26 @@
 		{
 			$$ = eval_literal($1, 0, 64);
 		}
-	| DT_LEGACYLITERAL
-		{
-			$$ = eval_literal($1, 16, 64);
-		}
 	  ;
 
 devicetree:
 	  '/' nodedef
 		{
-			$$ = name_node($2, "", NULL);
+			$$ = name_node($2, "");
+		}
+	| devicetree '/' nodedef
+		{
+			$$ = merge_nodes($1, $3);
+		}
+	| devicetree DT_REF nodedef
+		{
+			struct node *target = get_node_by_ref($1, $2);
+
+			if (target)
+				merge_nodes(target, $3);
+			else
+				print_error("label or path, '%s', not found", $2);
+			$$ = $1;
 		}
 	;
 
@@ -173,13 +160,18 @@
 	;
 
 propdef:
-	  label DT_PROPNODENAME '=' propdata ';'
+	  DT_PROPNODENAME '=' propdata ';'
 		{
-			$$ = build_property($2, $4, $1);
+			$$ = build_property($1, $3);
 		}
-	| label DT_PROPNODENAME ';'
+	| DT_PROPNODENAME ';'
 		{
-			$$ = build_property($2, empty_data, $1);
+			$$ = build_property($1, empty_data);
+		}
+	| DT_LABEL propdef
+		{
+			add_label(&$2->labels, $1);
+			$$ = $2;
 		}
 	;
 
@@ -202,31 +194,30 @@
 		}
 	| propdataprefix DT_INCBIN '(' DT_STRING ',' addr ',' addr ')'
 		{
-			struct search_path path = { srcpos_file->dir, NULL, NULL };
-			struct dtc_file *file = dtc_open_file($4.val, &path);
-			struct data d = empty_data;
+			FILE *f = srcfile_relative_open($4.val, NULL);
+			struct data d;
 
 			if ($6 != 0)
-				if (fseek(file->file, $6, SEEK_SET) != 0)
-					yyerrorf("Couldn't seek to offset %llu in \"%s\": %s",
-						 (unsigned long long)$6,
-						 $4.val, strerror(errno));
+				if (fseek(f, $6, SEEK_SET) != 0)
+					print_error("Couldn't seek to offset %llu in \"%s\": %s",
+						     (unsigned long long)$6,
+						     $4.val,
+						     strerror(errno));
 
-			d = data_copy_file(file->file, $8);
+			d = data_copy_file(f, $8);
 
 			$$ = data_merge($1, d);
-			dtc_close_file(file);
+			fclose(f);
 		}
 	| propdataprefix DT_INCBIN '(' DT_STRING ')'
 		{
-			struct search_path path = { srcpos_file->dir, NULL, NULL };
-			struct dtc_file *file = dtc_open_file($4.val, &path);
+			FILE *f = srcfile_relative_open($4.val, NULL);
 			struct data d = empty_data;
 
-			d = data_copy_file(file->file, -1);
+			d = data_copy_file(f, -1);
 
 			$$ = data_merge($1, d);
-			dtc_close_file(file);
+			fclose(f);
 		}
 	| propdata DT_LABEL
 		{
@@ -269,23 +260,11 @@
 		}
 	;
 
-cellbase:
-	  /* empty */
-		{
-			$$ = 16;
-		}
-	| DT_BASE
-	;
-
 cellval:
 	  DT_LITERAL
 		{
 			$$ = eval_literal($1, 0, 32);
 		}
-	| cellbase DT_LEGACYLITERAL
-		{
-			$$ = eval_literal($2, $1, 32);
-		}
 	;
 
 bytestring:
@@ -308,57 +287,44 @@
 		{
 			$$ = NULL;
 		}
-	|  subnode subnodes
+	| subnode subnodes
 		{
 			$$ = chain_node($1, $2);
 		}
 	| subnode propdef
 		{
-			yyerror("syntax error: properties must precede subnodes");
+			print_error("syntax error: properties must precede subnodes");
 			YYERROR;
 		}
 	;
 
 subnode:
-	  label DT_PROPNODENAME nodedef
+	  DT_PROPNODENAME nodedef
 		{
-			$$ = name_node($3, $2, $1);
+			$$ = name_node($2, $1);
 		}
-	;
-
-label:
-	  /* empty */
+	| DT_LABEL subnode
 		{
-			$$ = NULL;
-		}
-	| DT_LABEL
-		{
-			$$ = $1;
+			add_label(&$2->labels, $1);
+			$$ = $2;
 		}
 	;
 
 %%
 
-void yyerrorf(char const *s, ...)
+void print_error(char const *fmt, ...)
 {
-	const char *fname = srcpos_file ? srcpos_file->name : "<no-file>";
 	va_list va;
-	va_start(va, s);
 
-	if (strcmp(fname, "-") == 0)
-		fname = "stdin";
-
-	fprintf(stderr, "%s:%d ", fname, yylloc.first_line);
-	vfprintf(stderr, s, va);
-	fprintf(stderr, "\n");
+	va_start(va, fmt);
+	srcpos_verror(&yylloc, fmt, va);
+	va_end(va);
 
 	treesource_error = 1;
-	va_end(va);
 }
 
-void yyerror (char const *s)
-{
-	yyerrorf("%s", s);
+void yyerror(char const *s) {
+	print_error("%s", s);
 }
 
 static unsigned long long eval_literal(const char *s, int base, int bits)
@@ -369,11 +335,11 @@
 	errno = 0;
 	val = strtoull(s, &e, base);
 	if (*e)
-		yyerror("bad characters in literal");
+		print_error("bad characters in literal");
 	else if ((errno == ERANGE)
 		 || ((bits < 64) && (val >= (1ULL << bits))))
-		yyerror("literal out of range");
+		print_error("literal out of range");
 	else if (errno != 0)
-		yyerror("bad literal");
+		print_error("bad literal");
 	return val;
 }
diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c
index d8fd43b..cbc0193 100644
--- a/scripts/dtc/dtc.c
+++ b/scripts/dtc/dtc.c
@@ -30,30 +30,7 @@
 int reservenum;		/* Number of memory reservation slots */
 int minsize;		/* Minimum blob size */
 int padsize;		/* Additional padding to blob */
-
-char *join_path(const char *path, const char *name)
-{
-	int lenp = strlen(path);
-	int lenn = strlen(name);
-	int len;
-	int needslash = 1;
-	char *str;
-
-	len = lenp + lenn + 2;
-	if ((lenp > 0) && (path[lenp-1] == '/')) {
-		needslash = 0;
-		len--;
-	}
-
-	str = xmalloc(len);
-	memcpy(str, path, lenp);
-	if (needslash) {
-		str[lenp] = '/';
-		lenp++;
-	}
-	memcpy(str+lenp, name, lenn+1);
-	return str;
-}
+int phandle_format = PHANDLE_BOTH;	/* Use linux,phandle or phandle properties */
 
 static void fill_fullpaths(struct node *tree, const char *prefix)
 {
@@ -104,8 +81,15 @@
 	fprintf(stderr, "\t\tSet the physical boot cpu\n");
 	fprintf(stderr, "\t-f\n");
 	fprintf(stderr, "\t\tForce - try to produce output even if the input tree has errors\n");
+	fprintf(stderr, "\t-s\n");
+	fprintf(stderr, "\t\tSort nodes and properties before outputting (only useful for\n\t\tcomparing trees)\n");
 	fprintf(stderr, "\t-v\n");
 	fprintf(stderr, "\t\tPrint DTC version and exit\n");
+	fprintf(stderr, "\t-H <phandle format>\n");
+	fprintf(stderr, "\t\tphandle formats are:\n");
+	fprintf(stderr, "\t\t\tlegacy - \"linux,phandle\" properties only\n");
+	fprintf(stderr, "\t\t\tepapr - \"phandle\" properties only\n");
+	fprintf(stderr, "\t\t\tboth - Both \"linux,phandle\" and \"phandle\" properties\n");
 	exit(3);
 }
 
@@ -115,7 +99,7 @@
 	const char *inform = "dts";
 	const char *outform = "dts";
 	const char *outname = "-";
-	int force = 0, check = 0;
+	int force = 0, check = 0, sort = 0;
 	const char *arg;
 	int opt;
 	FILE *outf = NULL;
@@ -127,7 +111,7 @@
 	minsize    = 0;
 	padsize    = 0;
 
-	while ((opt = getopt(argc, argv, "hI:O:o:V:R:S:p:fcqb:v")) != EOF) {
+	while ((opt = getopt(argc, argv, "hI:O:o:V:R:S:p:fcqb:vH:s")) != EOF) {
 		switch (opt) {
 		case 'I':
 			inform = optarg;
@@ -165,6 +149,22 @@
 		case 'v':
 			printf("Version: %s\n", DTC_VERSION);
 			exit(0);
+		case 'H':
+			if (streq(optarg, "legacy"))
+				phandle_format = PHANDLE_LEGACY;
+			else if (streq(optarg, "epapr"))
+				phandle_format = PHANDLE_EPAPR;
+			else if (streq(optarg, "both"))
+				phandle_format = PHANDLE_BOTH;
+			else
+				die("Invalid argument \"%s\" to -H option\n",
+				    optarg);
+			break;
+
+		case 's':
+			sort = 1;
+			break;
+
 		case 'h':
 		default:
 			usage();
@@ -182,6 +182,9 @@
 	if (minsize && padsize)
 		die("Can't set both -p and -S\n");
 
+	if (minsize)
+		fprintf(stderr, "DTC: Use of \"-S\" is deprecated; it will be removed soon, use \"-p\" instead\n");
+
 	fprintf(stderr, "DTC: %s->%s  on file \"%s\"\n",
 		inform, outform, arg);
 
@@ -200,6 +203,8 @@
 	fill_fullpaths(bi->dt, "");
 	process_checks(force, bi);
 
+	if (sort)
+		sort_tree(bi);
 
 	if (streq(outname, "-")) {
 		outf = stdout;
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index 08d54c8..f37c97e 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -34,7 +34,17 @@
 #include <libfdt_env.h>
 #include <fdt.h>
 
+#include "util.h"
+
+#ifdef DEBUG
+#define debug(fmt,args...)	printf(fmt, ##args)
+#else
+#define debug(fmt,args...)
+#endif
+
+
 #define DEFAULT_FDT_VERSION	17
+
 /*
  * Command line options
  */
@@ -42,36 +52,11 @@
 extern int reservenum;		/* Number of memory reservation slots */
 extern int minsize;		/* Minimum blob size */
 extern int padsize;		/* Additional padding to blob */
+extern int phandle_format;	/* Use linux,phandle or phandle properties */
 
-static inline void __attribute__((noreturn)) die(char * str, ...)
-{
-	va_list ap;
-
-	va_start(ap, str);
-	fprintf(stderr, "FATAL ERROR: ");
-	vfprintf(stderr, str, ap);
-	exit(1);
-}
-
-static inline void *xmalloc(size_t len)
-{
-	void *new = malloc(len);
-
-	if (! new)
-		die("malloc() failed\n");
-
-	return new;
-}
-
-static inline void *xrealloc(void *p, size_t len)
-{
-	void *new = realloc(p, len);
-
-	if (! new)
-		die("realloc() failed (len=%d)\n", len);
-
-	return new;
-}
+#define PHANDLE_LEGACY	0x1
+#define PHANDLE_EPAPR	0x2
+#define PHANDLE_BOTH	0x3
 
 typedef uint32_t cell_t;
 
@@ -140,13 +125,18 @@
 #define MAX_NODENAME_LEN	31
 
 /* Live trees */
+struct label {
+	char *label;
+	struct label *next;
+};
+
 struct property {
 	char *name;
 	struct data val;
 
 	struct property *next;
 
-	char *label;
+	struct label *labels;
 };
 
 struct node {
@@ -163,22 +153,28 @@
 	cell_t phandle;
 	int addr_cells, size_cells;
 
-	char *label;
+	struct label *labels;
 };
 
+#define for_each_label(l0, l) \
+	for ((l) = (l0); (l); (l) = (l)->next)
+
 #define for_each_property(n, p) \
 	for ((p) = (n)->proplist; (p); (p) = (p)->next)
 
 #define for_each_child(n, c)	\
 	for ((c) = (n)->children; (c); (c) = (c)->next_sibling)
 
-struct property *build_property(char *name, struct data val, char *label);
+void add_label(struct label **labels, char *label);
+
+struct property *build_property(char *name, struct data val);
 struct property *chain_property(struct property *first, struct property *list);
 struct property *reverse_properties(struct property *first);
 
 struct node *build_node(struct property *proplist, struct node *children);
-struct node *name_node(struct node *node, char *name, char *label);
+struct node *name_node(struct node *node, char *name);
 struct node *chain_node(struct node *first, struct node *list);
+struct node *merge_nodes(struct node *old_node, struct node *new_node);
 
 void add_property(struct node *node, struct property *prop);
 void add_child(struct node *parent, struct node *child);
@@ -186,6 +182,10 @@
 const char *get_unitname(struct node *node);
 struct property *get_property(struct node *node, const char *propname);
 cell_t propval_cell(struct property *prop);
+struct property *get_property_by_label(struct node *tree, const char *label,
+				       struct node **node);
+struct marker *get_marker_label(struct node *tree, const char *label,
+				struct node **node, struct property **prop);
 struct node *get_subnode(struct node *node, const char *nodename);
 struct node *get_node_by_path(struct node *tree, const char *path);
 struct node *get_node_by_label(struct node *tree, const char *label);
@@ -193,6 +193,8 @@
 struct node *get_node_by_ref(struct node *tree, const char *ref);
 cell_t get_node_phandle(struct node *root, struct node *node);
 
+uint32_t guess_boot_cpuid(struct node *tree);
+
 /* Boot info (tree plus memreserve information */
 
 struct reserve_info {
@@ -200,10 +202,10 @@
 
 	struct reserve_info *next;
 
-	char *label;
+	struct label *labels;
 };
 
-struct reserve_info *build_reserve_entry(uint64_t start, uint64_t len, char *label);
+struct reserve_info *build_reserve_entry(uint64_t start, uint64_t len);
 struct reserve_info *chain_reserve_entry(struct reserve_info *first,
 					 struct reserve_info *list);
 struct reserve_info *add_reserve_entry(struct reserve_info *list,
@@ -218,6 +220,7 @@
 
 struct boot_info *build_boot_info(struct reserve_info *reservelist,
 				  struct node *tree, uint32_t boot_cpuid_phys);
+void sort_tree(struct boot_info *bi);
 
 /* Checks */
 
@@ -239,8 +242,4 @@
 
 struct boot_info *dt_from_fs(const char *dirname);
 
-/* misc */
-
-char *join_path(const char *path, const char *name);
-
 #endif /* _DTC_H */
diff --git a/scripts/dtc/flattree.c b/scripts/dtc/flattree.c
index 76acd28..ead0332 100644
--- a/scripts/dtc/flattree.c
+++ b/scripts/dtc/flattree.c
@@ -52,9 +52,9 @@
 	void (*string)(void *, char *, int);
 	void (*align)(void *, int);
 	void (*data)(void *, struct data);
-	void (*beginnode)(void *, const char *);
-	void (*endnode)(void *, const char *);
-	void (*property)(void *, const char *);
+	void (*beginnode)(void *, struct label *labels);
+	void (*endnode)(void *, struct label *labels);
+	void (*property)(void *, struct label *labels);
 };
 
 static void bin_emit_cell(void *e, cell_t val)
@@ -89,17 +89,17 @@
 	*dtbuf = data_append_data(*dtbuf, d.val, d.len);
 }
 
-static void bin_emit_beginnode(void *e, const char *label)
+static void bin_emit_beginnode(void *e, struct label *labels)
 {
 	bin_emit_cell(e, FDT_BEGIN_NODE);
 }
 
-static void bin_emit_endnode(void *e, const char *label)
+static void bin_emit_endnode(void *e, struct label *labels)
 {
 	bin_emit_cell(e, FDT_END_NODE);
 }
 
-static void bin_emit_property(void *e, const char *label)
+static void bin_emit_property(void *e, struct label *labels)
 {
 	bin_emit_cell(e, FDT_PROP);
 }
@@ -127,11 +127,21 @@
 	fprintf(f, "%s\t= . + %d\n", label, offset);
 }
 
+#define ASM_EMIT_BELONG(f, fmt, ...) \
+	{ \
+		fprintf((f), "\t.byte\t((" fmt ") >> 24) & 0xff\n", __VA_ARGS__); \
+		fprintf((f), "\t.byte\t((" fmt ") >> 16) & 0xff\n", __VA_ARGS__); \
+		fprintf((f), "\t.byte\t((" fmt ") >> 8) & 0xff\n", __VA_ARGS__); \
+		fprintf((f), "\t.byte\t(" fmt ") & 0xff\n", __VA_ARGS__); \
+	}
+
 static void asm_emit_cell(void *e, cell_t val)
 {
 	FILE *f = e;
 
-	fprintf(f, "\t.long\t0x%x\n", val);
+	fprintf(f, "\t.byte 0x%02x; .byte 0x%02x; .byte 0x%02x; .byte 0x%02x\n",
+		(val >> 24) & 0xff, (val >> 16) & 0xff,
+		(val >> 8) & 0xff, val & 0xff);
 }
 
 static void asm_emit_string(void *e, char *str, int len)
@@ -156,7 +166,7 @@
 {
 	FILE *f = e;
 
-	fprintf(f, "\t.balign\t%d\n", a);
+	fprintf(f, "\t.balign\t%d, 0\n", a);
 }
 
 static void asm_emit_data(void *e, struct data d)
@@ -169,8 +179,7 @@
 		emit_offset_label(f, m->ref, m->offset);
 
 	while ((d.len - off) >= sizeof(uint32_t)) {
-		fprintf(f, "\t.long\t0x%x\n",
-			fdt32_to_cpu(*((uint32_t *)(d.val+off))));
+		asm_emit_cell(e, fdt32_to_cpu(*((uint32_t *)(d.val+off))));
 		off += sizeof(uint32_t);
 	}
 
@@ -182,37 +191,43 @@
 	assert(off == d.len);
 }
 
-static void asm_emit_beginnode(void *e, const char *label)
+static void asm_emit_beginnode(void *e, struct label *labels)
 {
 	FILE *f = e;
+	struct label *l;
 
-	if (label) {
-		fprintf(f, "\t.globl\t%s\n", label);
-		fprintf(f, "%s:\n", label);
+	for_each_label(labels, l) {
+		fprintf(f, "\t.globl\t%s\n", l->label);
+		fprintf(f, "%s:\n", l->label);
 	}
-	fprintf(f, "\t.long\tFDT_BEGIN_NODE\n");
+	fprintf(f, "\t/* FDT_BEGIN_NODE */\n");
+	asm_emit_cell(e, FDT_BEGIN_NODE);
 }
 
-static void asm_emit_endnode(void *e, const char *label)
+static void asm_emit_endnode(void *e, struct label *labels)
 {
 	FILE *f = e;
+	struct label *l;
 
-	fprintf(f, "\t.long\tFDT_END_NODE\n");
-	if (label) {
-		fprintf(f, "\t.globl\t%s_end\n", label);
-		fprintf(f, "%s_end:\n", label);
+	fprintf(f, "\t/* FDT_END_NODE */\n");
+	asm_emit_cell(e, FDT_END_NODE);
+	for_each_label(labels, l) {
+		fprintf(f, "\t.globl\t%s_end\n", l->label);
+		fprintf(f, "%s_end:\n", l->label);
 	}
 }
 
-static void asm_emit_property(void *e, const char *label)
+static void asm_emit_property(void *e, struct label *labels)
 {
 	FILE *f = e;
+	struct label *l;
 
-	if (label) {
-		fprintf(f, "\t.globl\t%s\n", label);
-		fprintf(f, "%s:\n", label);
+	for_each_label(labels, l) {
+		fprintf(f, "\t.globl\t%s\n", l->label);
+		fprintf(f, "%s:\n", l->label);
 	}
-	fprintf(f, "\t.long\tFDT_PROP\n");
+	fprintf(f, "\t/* FDT_PROP */\n");
+	asm_emit_cell(e, FDT_PROP);
 }
 
 static struct emitter asm_emitter = {
@@ -248,7 +263,7 @@
 	struct node *child;
 	int seen_name_prop = 0;
 
-	emit->beginnode(etarget, tree->label);
+	emit->beginnode(etarget, tree->labels);
 
 	if (vi->flags & FTF_FULLPATH)
 		emit->string(etarget, tree->fullpath, 0);
@@ -265,7 +280,7 @@
 
 		nameoff = stringtable_insert(strbuf, prop->name);
 
-		emit->property(etarget, prop->label);
+		emit->property(etarget, prop->labels);
 		emit->cell(etarget, prop->val.len);
 		emit->cell(etarget, nameoff);
 
@@ -292,7 +307,7 @@
 		flatten_tree(child, emit, etarget, strbuf, vi);
 	}
 
-	emit->endnode(etarget, tree->label);
+	emit->endnode(etarget, tree->labels);
 }
 
 static struct data flatten_reserve_list(struct reserve_info *reservelist,
@@ -413,10 +428,13 @@
 	if (padlen > 0)
 		blob = data_append_zeroes(blob, padlen);
 
-	fwrite(blob.val, blob.len, 1, f);
-
-	if (ferror(f))
-		die("Error writing device tree blob: %s\n", strerror(errno));
+	if (fwrite(blob.val, blob.len, 1, f) != 1) {
+		if (ferror(f))
+			die("Error writing device tree blob: %s\n",
+			    strerror(errno));
+		else
+			die("Short write on device tree blob\n");
+	}
 
 	/*
 	 * data_merge() frees the right-hand element so only the blob
@@ -455,39 +473,44 @@
 		die("Unknown device tree blob version %d\n", version);
 
 	fprintf(f, "/* autogenerated by dtc, do not edit */\n\n");
-	fprintf(f, "#define FDT_MAGIC 0x%x\n", FDT_MAGIC);
-	fprintf(f, "#define FDT_BEGIN_NODE 0x%x\n", FDT_BEGIN_NODE);
-	fprintf(f, "#define FDT_END_NODE 0x%x\n", FDT_END_NODE);
-	fprintf(f, "#define FDT_PROP 0x%x\n", FDT_PROP);
-	fprintf(f, "#define FDT_END 0x%x\n", FDT_END);
-	fprintf(f, "\n");
 
 	emit_label(f, symprefix, "blob_start");
 	emit_label(f, symprefix, "header");
-	fprintf(f, "\t.long\tFDT_MAGIC\t\t\t\t/* magic */\n");
-	fprintf(f, "\t.long\t_%s_blob_abs_end - _%s_blob_start\t/* totalsize */\n",
-		symprefix, symprefix);
-	fprintf(f, "\t.long\t_%s_struct_start - _%s_blob_start\t/* off_dt_struct */\n",
-		symprefix, symprefix);
-	fprintf(f, "\t.long\t_%s_strings_start - _%s_blob_start\t/* off_dt_strings */\n",
-		symprefix, symprefix);
-	fprintf(f, "\t.long\t_%s_reserve_map - _%s_blob_start\t/* off_dt_strings */\n",
-		symprefix, symprefix);
-	fprintf(f, "\t.long\t%d\t\t\t\t\t/* version */\n", vi->version);
-	fprintf(f, "\t.long\t%d\t\t\t\t\t/* last_comp_version */\n",
-		vi->last_comp_version);
-
-	if (vi->flags & FTF_BOOTCPUID)
-		fprintf(f, "\t.long\t%i\t\t\t\t\t/* boot_cpuid_phys */\n",
-			bi->boot_cpuid_phys);
-
-	if (vi->flags & FTF_STRTABSIZE)
-		fprintf(f, "\t.long\t_%s_strings_end - _%s_strings_start\t/* size_dt_strings */\n",
+	fprintf(f, "\t/* magic */\n");
+	asm_emit_cell(f, FDT_MAGIC);
+	fprintf(f, "\t/* totalsize */\n");
+	ASM_EMIT_BELONG(f, "_%s_blob_abs_end - _%s_blob_start",
 			symprefix, symprefix);
+	fprintf(f, "\t/* off_dt_struct */\n");
+	ASM_EMIT_BELONG(f, "_%s_struct_start - _%s_blob_start",
+		symprefix, symprefix);
+	fprintf(f, "\t/* off_dt_strings */\n");
+	ASM_EMIT_BELONG(f, "_%s_strings_start - _%s_blob_start",
+		symprefix, symprefix);
+	fprintf(f, "\t/* off_mem_rsvmap */\n");
+	ASM_EMIT_BELONG(f, "_%s_reserve_map - _%s_blob_start",
+		symprefix, symprefix);
+	fprintf(f, "\t/* version */\n");
+	asm_emit_cell(f, vi->version);
+	fprintf(f, "\t/* last_comp_version */\n");
+	asm_emit_cell(f, vi->last_comp_version);
 
-	if (vi->flags & FTF_STRUCTSIZE)
-		fprintf(f, "\t.long\t_%s_struct_end - _%s_struct_start\t/* size_dt_struct */\n",
+	if (vi->flags & FTF_BOOTCPUID) {
+		fprintf(f, "\t/* boot_cpuid_phys */\n");
+		asm_emit_cell(f, bi->boot_cpuid_phys);
+	}
+
+	if (vi->flags & FTF_STRTABSIZE) {
+		fprintf(f, "\t/* size_dt_strings */\n");
+		ASM_EMIT_BELONG(f, "_%s_strings_end - _%s_strings_start",
+				symprefix, symprefix);
+	}
+
+	if (vi->flags & FTF_STRUCTSIZE) {
+		fprintf(f, "\t/* size_dt_struct */\n");
+		ASM_EMIT_BELONG(f, "_%s_struct_end - _%s_struct_start",
 			symprefix, symprefix);
+	}
 
 	/*
 	 * Reserve map entries.
@@ -505,16 +528,17 @@
 	 * as it appears .quad isn't available in some assemblers.
 	 */
 	for (re = bi->reservelist; re; re = re->next) {
-		if (re->label) {
-			fprintf(f, "\t.globl\t%s\n", re->label);
-			fprintf(f, "%s:\n", re->label);
+		struct label *l;
+
+		for_each_label(re->labels, l) {
+			fprintf(f, "\t.globl\t%s\n", l->label);
+			fprintf(f, "%s:\n", l->label);
 		}
-		fprintf(f, "\t.long\t0x%08x, 0x%08x\n",
-			(unsigned int)(re->re.address >> 32),
-			(unsigned int)(re->re.address & 0xffffffff));
-		fprintf(f, "\t.long\t0x%08x, 0x%08x\n",
-			(unsigned int)(re->re.size >> 32),
-			(unsigned int)(re->re.size & 0xffffffff));
+		ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->re.address >> 32));
+		ASM_EMIT_BELONG(f, "0x%08x",
+				(unsigned int)(re->re.address & 0xffffffff));
+		ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->re.size >> 32));
+		ASM_EMIT_BELONG(f, "0x%08x", (unsigned int)(re->re.size & 0xffffffff));
 	}
 	for (i = 0; i < reservenum; i++) {
 		fprintf(f, "\t.long\t0, 0\n\t.long\t0, 0\n");
@@ -524,7 +548,9 @@
 
 	emit_label(f, symprefix, "struct_start");
 	flatten_tree(bi->dt, &asm_emitter, f, &strbuf, vi);
-	fprintf(f, "\t.long\tFDT_END\n");
+
+	fprintf(f, "\t/* FDT_END */\n");
+	asm_emit_cell(f, FDT_END);
 	emit_label(f, symprefix, "struct_end");
 
 	emit_label(f, symprefix, "strings_start");
@@ -601,7 +627,7 @@
 		len++;
 	} while ((*p++) != '\0');
 
-	str = strdup(inb->ptr);
+	str = xstrdup(inb->ptr);
 
 	inb->ptr += len;
 
@@ -643,7 +669,7 @@
 		p++;
 	}
 
-	return strdup(inb->base + offset);
+	return xstrdup(inb->base + offset);
 }
 
 static struct property *flat_read_property(struct inbuf *dtbuf,
@@ -663,7 +689,7 @@
 
 	val = flat_read_data(dtbuf, proplen);
 
-	return build_property(name, val, NULL);
+	return build_property(name, val);
 }
 
 
@@ -688,7 +714,7 @@
 		if (re.size == 0)
 			break;
 
-		new = build_reserve_entry(re.address, re.size, NULL);
+		new = build_reserve_entry(re.address, re.size);
 		reservelist = add_reserve_entry(reservelist, new);
 	}
 
@@ -710,7 +736,7 @@
 	if (!streq(ppath, "/"))
 		plen++;
 
-	return strdup(cpath + plen);
+	return xstrdup(cpath + plen);
 }
 
 static struct node *unflatten_tree(struct inbuf *dtbuf,
@@ -776,7 +802,7 @@
 
 struct boot_info *dt_from_blob(const char *fname)
 {
-	struct dtc_file *dtcf;
+	FILE *f;
 	uint32_t magic, totalsize, version, size_dt, boot_cpuid_phys;
 	uint32_t off_dt, off_str, off_mem_rsvmap;
 	int rc;
@@ -791,14 +817,14 @@
 	uint32_t val;
 	int flags = 0;
 
-	dtcf = dtc_open_file(fname, NULL);
+	f = srcfile_relative_open(fname, NULL);
 
-	rc = fread(&magic, sizeof(magic), 1, dtcf->file);
-	if (ferror(dtcf->file))
+	rc = fread(&magic, sizeof(magic), 1, f);
+	if (ferror(f))
 		die("Error reading DT blob magic number: %s\n",
 		    strerror(errno));
 	if (rc < 1) {
-		if (feof(dtcf->file))
+		if (feof(f))
 			die("EOF reading DT blob magic number\n");
 		else
 			die("Mysterious short read reading magic number\n");
@@ -808,11 +834,11 @@
 	if (magic != FDT_MAGIC)
 		die("Blob has incorrect magic number\n");
 
-	rc = fread(&totalsize, sizeof(totalsize), 1, dtcf->file);
-	if (ferror(dtcf->file))
+	rc = fread(&totalsize, sizeof(totalsize), 1, f);
+	if (ferror(f))
 		die("Error reading DT blob size: %s\n", strerror(errno));
 	if (rc < 1) {
-		if (feof(dtcf->file))
+		if (feof(f))
 			die("EOF reading DT blob size\n");
 		else
 			die("Mysterious short read reading blob size\n");
@@ -832,12 +858,12 @@
 	p = blob + sizeof(magic)  + sizeof(totalsize);
 
 	while (sizeleft) {
-		if (feof(dtcf->file))
+		if (feof(f))
 			die("EOF before reading %d bytes of DT blob\n",
 			    totalsize);
 
-		rc = fread(p, 1, sizeleft, dtcf->file);
-		if (ferror(dtcf->file))
+		rc = fread(p, 1, sizeleft, f);
+		if (ferror(f))
 			die("Error reading DT blob: %s\n",
 			    strerror(errno));
 
@@ -900,7 +926,7 @@
 
 	free(blob);
 
-	dtc_close_file(dtcf);
+	fclose(f);
 
 	return build_boot_info(reservelist, tree, boot_cpuid_phys);
 }
diff --git a/scripts/dtc/fstree.c b/scripts/dtc/fstree.c
index 8fe1bdf..f377453 100644
--- a/scripts/dtc/fstree.c
+++ b/scripts/dtc/fstree.c
@@ -58,10 +58,9 @@
 					"WARNING: Cannot open %s: %s\n",
 					tmpnam, strerror(errno));
 			} else {
-				prop = build_property(strdup(de->d_name),
+				prop = build_property(xstrdup(de->d_name),
 						      data_copy_file(pfile,
-								     st.st_size),
-						      NULL);
+								     st.st_size));
 				add_property(tree, prop);
 				fclose(pfile);
 			}
@@ -69,8 +68,7 @@
 			struct node *newchild;
 
 			newchild = read_fstree(tmpnam);
-			newchild = name_node(newchild, strdup(de->d_name),
-					     NULL);
+			newchild = name_node(newchild, xstrdup(de->d_name));
 			add_child(tree, newchild);
 		}
 
@@ -86,8 +84,8 @@
 	struct node *tree;
 
 	tree = read_fstree(dirname);
-	tree = name_node(tree, "", NULL);
+	tree = name_node(tree, "");
 
-	return build_boot_info(NULL, tree, 0);
+	return build_boot_info(NULL, tree, guess_boot_cpuid(tree));
 }
 
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index 0ca3de5..c9209d5 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -24,17 +24,30 @@
  * Tree building functions
  */
 
-struct property *build_property(char *name, struct data val, char *label)
+void add_label(struct label **labels, char *label)
+{
+	struct label *new;
+
+	/* Make sure the label isn't already there */
+	for_each_label(*labels, new)
+		if (streq(new->label, label))
+			return;
+
+	new = xmalloc(sizeof(*new));
+	new->label = label;
+	new->next = *labels;
+	*labels = new;
+}
+
+struct property *build_property(char *name, struct data val)
 {
 	struct property *new = xmalloc(sizeof(*new));
 
+	memset(new, 0, sizeof(*new));
+
 	new->name = name;
 	new->val = val;
 
-	new->next = NULL;
-
-	new->label = label;
-
 	return new;
 }
 
@@ -78,17 +91,82 @@
 	return new;
 }
 
-struct node *name_node(struct node *node, char *name, char * label)
+struct node *name_node(struct node *node, char *name)
 {
 	assert(node->name == NULL);
 
 	node->name = name;
 
-	node->label = label;
-
 	return node;
 }
 
+struct node *merge_nodes(struct node *old_node, struct node *new_node)
+{
+	struct property *new_prop, *old_prop;
+	struct node *new_child, *old_child;
+	struct label *l;
+
+	/* Add new node labels to old node */
+	for_each_label(new_node->labels, l)
+		add_label(&old_node->labels, l->label);
+
+	/* Move properties from the new node to the old node.  If there
+	 * is a collision, replace the old value with the new */
+	while (new_node->proplist) {
+		/* Pop the property off the list */
+		new_prop = new_node->proplist;
+		new_node->proplist = new_prop->next;
+		new_prop->next = NULL;
+
+		/* Look for a collision, set new value if there is */
+		for_each_property(old_node, old_prop) {
+			if (streq(old_prop->name, new_prop->name)) {
+				/* Add new labels to old property */
+				for_each_label(new_prop->labels, l)
+					add_label(&old_prop->labels, l->label);
+
+				old_prop->val = new_prop->val;
+				free(new_prop);
+				new_prop = NULL;
+				break;
+			}
+		}
+
+		/* if no collision occurred, add property to the old node. */
+		if (new_prop)
+			add_property(old_node, new_prop);
+	}
+
+	/* Move the override child nodes into the primary node.  If
+	 * there is a collision, then merge the nodes. */
+	while (new_node->children) {
+		/* Pop the child node off the list */
+		new_child = new_node->children;
+		new_node->children = new_child->next_sibling;
+		new_child->parent = NULL;
+		new_child->next_sibling = NULL;
+
+		/* Search for a collision.  Merge if there is */
+		for_each_child(old_node, old_child) {
+			if (streq(old_child->name, new_child->name)) {
+				merge_nodes(old_child, new_child);
+				new_child = NULL;
+				break;
+			}
+		}
+
+		/* if no collision occured, add child to the old node. */
+		if (new_child)
+			add_child(old_node, new_child);
+	}
+
+	/* The new node contents are now merged into the old node.  Free
+	 * the new node. */
+	free(new_node);
+
+	return old_node;
+}
+
 struct node *chain_node(struct node *first, struct node *list)
 {
 	assert(first->next_sibling == NULL);
@@ -124,18 +202,15 @@
 	*p = child;
 }
 
-struct reserve_info *build_reserve_entry(uint64_t address, uint64_t size,
-					 char *label)
+struct reserve_info *build_reserve_entry(uint64_t address, uint64_t size)
 {
 	struct reserve_info *new = xmalloc(sizeof(*new));
 
+	memset(new, 0, sizeof(*new));
+
 	new->re.address = address;
 	new->re.size = size;
 
-	new->next = NULL;
-
-	new->label = label;
-
 	return new;
 }
 
@@ -208,6 +283,60 @@
 	return fdt32_to_cpu(*((cell_t *)prop->val.val));
 }
 
+struct property *get_property_by_label(struct node *tree, const char *label,
+				       struct node **node)
+{
+	struct property *prop;
+	struct node *c;
+
+	*node = tree;
+
+	for_each_property(tree, prop) {
+		struct label *l;
+
+		for_each_label(prop->labels, l)
+			if (streq(l->label, label))
+				return prop;
+	}
+
+	for_each_child(tree, c) {
+		prop = get_property_by_label(c, label, node);
+		if (prop)
+			return prop;
+	}
+
+	*node = NULL;
+	return NULL;
+}
+
+struct marker *get_marker_label(struct node *tree, const char *label,
+				struct node **node, struct property **prop)
+{
+	struct marker *m;
+	struct property *p;
+	struct node *c;
+
+	*node = tree;
+
+	for_each_property(tree, p) {
+		*prop = p;
+		m = p->val.markers;
+		for_each_marker_of_type(m, LABEL)
+			if (streq(m->ref, label))
+				return m;
+	}
+
+	for_each_child(tree, c) {
+		m = get_marker_label(c, label, node, prop);
+		if (m)
+			return m;
+	}
+
+	*prop = NULL;
+	*node = NULL;
+	return NULL;
+}
+
 struct node *get_subnode(struct node *node, const char *nodename)
 {
 	struct node *child;
@@ -245,11 +374,13 @@
 struct node *get_node_by_label(struct node *tree, const char *label)
 {
 	struct node *child, *node;
+	struct label *l;
 
 	assert(label && (strlen(label) > 0));
 
-	if (tree->label && streq(tree->label, label))
-		return tree;
+	for_each_label(tree->labels, l)
+		if (streq(l->label, label))
+			return tree;
 
 	for_each_child(tree, child) {
 		node = get_node_by_label(child, label);
@@ -293,16 +424,186 @@
 	if ((node->phandle != 0) && (node->phandle != -1))
 		return node->phandle;
 
-	assert(! get_property(node, "linux,phandle"));
-
 	while (get_node_by_phandle(root, phandle))
 		phandle++;
 
 	node->phandle = phandle;
-	add_property(node,
-		     build_property("linux,phandle",
-				    data_append_cell(empty_data, phandle),
-				    NULL));
+
+	if (!get_property(node, "linux,phandle")
+	    && (phandle_format & PHANDLE_LEGACY))
+		add_property(node,
+			     build_property("linux,phandle",
+					    data_append_cell(empty_data, phandle)));
+
+	if (!get_property(node, "phandle")
+	    && (phandle_format & PHANDLE_EPAPR))
+		add_property(node,
+			     build_property("phandle",
+					    data_append_cell(empty_data, phandle)));
+
+	/* If the node *does* have a phandle property, we must
+	 * be dealing with a self-referencing phandle, which will be
+	 * fixed up momentarily in the caller */
 
 	return node->phandle;
 }
+
+uint32_t guess_boot_cpuid(struct node *tree)
+{
+	struct node *cpus, *bootcpu;
+	struct property *reg;
+
+	cpus = get_node_by_path(tree, "/cpus");
+	if (!cpus)
+		return 0;
+
+
+	bootcpu = cpus->children;
+	if (!bootcpu)
+		return 0;
+
+	reg = get_property(bootcpu, "reg");
+	if (!reg || (reg->val.len != sizeof(uint32_t)))
+		return 0;
+
+	/* FIXME: Sanity check node? */
+
+	return propval_cell(reg);
+}
+
+static int cmp_reserve_info(const void *ax, const void *bx)
+{
+	const struct reserve_info *a, *b;
+
+	a = *((const struct reserve_info * const *)ax);
+	b = *((const struct reserve_info * const *)bx);
+
+	if (a->re.address < b->re.address)
+		return -1;
+	else if (a->re.address > b->re.address)
+		return 1;
+	else if (a->re.size < b->re.size)
+		return -1;
+	else if (a->re.size > b->re.size)
+		return 1;
+	else
+		return 0;
+}
+
+static void sort_reserve_entries(struct boot_info *bi)
+{
+	struct reserve_info *ri, **tbl;
+	int n = 0, i = 0;
+
+	for (ri = bi->reservelist;
+	     ri;
+	     ri = ri->next)
+		n++;
+
+	if (n == 0)
+		return;
+
+	tbl = xmalloc(n * sizeof(*tbl));
+
+	for (ri = bi->reservelist;
+	     ri;
+	     ri = ri->next)
+		tbl[i++] = ri;
+
+	qsort(tbl, n, sizeof(*tbl), cmp_reserve_info);
+
+	bi->reservelist = tbl[0];
+	for (i = 0; i < (n-1); i++)
+		tbl[i]->next = tbl[i+1];
+	tbl[n-1]->next = NULL;
+
+	free(tbl);
+}
+
+static int cmp_prop(const void *ax, const void *bx)
+{
+	const struct property *a, *b;
+
+	a = *((const struct property * const *)ax);
+	b = *((const struct property * const *)bx);
+
+	return strcmp(a->name, b->name);
+}
+
+static void sort_properties(struct node *node)
+{
+	int n = 0, i = 0;
+	struct property *prop, **tbl;
+
+	for_each_property(node, prop)
+		n++;
+
+	if (n == 0)
+		return;
+
+	tbl = xmalloc(n * sizeof(*tbl));
+
+	for_each_property(node, prop)
+		tbl[i++] = prop;
+
+	qsort(tbl, n, sizeof(*tbl), cmp_prop);
+
+	node->proplist = tbl[0];
+	for (i = 0; i < (n-1); i++)
+		tbl[i]->next = tbl[i+1];
+	tbl[n-1]->next = NULL;
+
+	free(tbl);
+}
+
+static int cmp_subnode(const void *ax, const void *bx)
+{
+	const struct node *a, *b;
+
+	a = *((const struct node * const *)ax);
+	b = *((const struct node * const *)bx);
+
+	return strcmp(a->name, b->name);
+}
+
+static void sort_subnodes(struct node *node)
+{
+	int n = 0, i = 0;
+	struct node *subnode, **tbl;
+
+	for_each_child(node, subnode)
+		n++;
+
+	if (n == 0)
+		return;
+
+	tbl = xmalloc(n * sizeof(*tbl));
+
+	for_each_child(node, subnode)
+		tbl[i++] = subnode;
+
+	qsort(tbl, n, sizeof(*tbl), cmp_subnode);
+
+	node->children = tbl[0];
+	for (i = 0; i < (n-1); i++)
+		tbl[i]->next_sibling = tbl[i+1];
+	tbl[n-1]->next_sibling = NULL;
+
+	free(tbl);
+}
+
+static void sort_node(struct node *node)
+{
+	struct node *c;
+
+	sort_properties(node);
+	sort_subnodes(node);
+	for_each_child(node, c)
+		sort_node(c);
+}
+
+void sort_tree(struct boot_info *bi)
+{
+	sort_reserve_entries(bi);
+	sort_node(bi->dt);
+}
diff --git a/scripts/dtc/srcpos.c b/scripts/dtc/srcpos.c
index 9641b76..2dbc874 100644
--- a/scripts/dtc/srcpos.c
+++ b/scripts/dtc/srcpos.c
@@ -17,100 +17,232 @@
  *                                                                   USA
  */
 
+#define _GNU_SOURCE
+
+#include <stdio.h>
+
 #include "dtc.h"
 #include "srcpos.h"
 
-/*
- * Like yylineno, this is the current open file pos.
- */
 
-struct dtc_file *srcpos_file;
-
-static int dtc_open_one(struct dtc_file *file,
-                        const char *search,
-                        const char *fname)
+static char *dirname(const char *path)
 {
+	const char *slash = strrchr(path, '/');
+
+	if (slash) {
+		int len = slash - path;
+		char *dir = xmalloc(len + 1);
+
+		memcpy(dir, path, len);
+		dir[len] = '\0';
+		return dir;
+	}
+	return NULL;
+}
+
+struct srcfile_state *current_srcfile; /* = NULL */
+
+/* Detect infinite include recursion. */
+#define MAX_SRCFILE_DEPTH     (100)
+static int srcfile_depth; /* = 0 */
+
+FILE *srcfile_relative_open(const char *fname, char **fullnamep)
+{
+	FILE *f;
 	char *fullname;
 
-	if (search) {
-		fullname = xmalloc(strlen(search) + strlen(fname) + 2);
-
-		strcpy(fullname, search);
-		strcat(fullname, "/");
-		strcat(fullname, fname);
-	} else {
-		fullname = strdup(fname);
-	}
-
-	file->file = fopen(fullname, "r");
-	if (!file->file) {
-		free(fullname);
-		return 0;
-	}
-
-	file->name = fullname;
-	return 1;
-}
-
-
-struct dtc_file *dtc_open_file(const char *fname,
-                               const struct search_path *search)
-{
-	static const struct search_path default_search = { NULL, NULL, NULL };
-
-	struct dtc_file *file;
-	const char *slash;
-
-	file = xmalloc(sizeof(struct dtc_file));
-
-	slash = strrchr(fname, '/');
-	if (slash) {
-		char *dir = xmalloc(slash - fname + 1);
-
-		memcpy(dir, fname, slash - fname);
-		dir[slash - fname] = 0;
-		file->dir = dir;
-	} else {
-		file->dir = NULL;
-	}
-
 	if (streq(fname, "-")) {
-		file->name = "stdin";
-		file->file = stdin;
-		return file;
+		f = stdin;
+		fullname = xstrdup("<stdin>");
+	} else {
+		if (!current_srcfile || !current_srcfile->dir
+		    || (fname[0] == '/'))
+			fullname = xstrdup(fname);
+		else
+			fullname = join_path(current_srcfile->dir, fname);
+
+		f = fopen(fullname, "r");
+		if (!f)
+			die("Couldn't open \"%s\": %s\n", fname,
+			    strerror(errno));
 	}
 
-	if (fname[0] == '/') {
-		file->file = fopen(fname, "r");
-		if (!file->file)
-			goto fail;
+	if (fullnamep)
+		*fullnamep = fullname;
+	else
+		free(fullname);
 
-		file->name = strdup(fname);
-		return file;
-	}
-
-	if (!search)
-		search = &default_search;
-
-	while (search) {
-		if (dtc_open_one(file, search->dir, fname))
-			return file;
-
-		if (errno != ENOENT)
-			goto fail;
-
-		search = search->next;
-	}
-
-fail:
-	die("Couldn't open \"%s\": %s\n", fname, strerror(errno));
+	return f;
 }
 
-void dtc_close_file(struct dtc_file *file)
+void srcfile_push(const char *fname)
 {
-	if (fclose(file->file))
-		die("Error closing \"%s\": %s\n", file->name, strerror(errno));
+	struct srcfile_state *srcfile;
 
-	free(file->dir);
-	free(file);
+	if (srcfile_depth++ >= MAX_SRCFILE_DEPTH)
+		die("Includes nested too deeply");
+
+	srcfile = xmalloc(sizeof(*srcfile));
+
+	srcfile->f = srcfile_relative_open(fname, &srcfile->name);
+	srcfile->dir = dirname(srcfile->name);
+	srcfile->prev = current_srcfile;
+
+	srcfile->lineno = 1;
+	srcfile->colno = 1;
+
+	current_srcfile = srcfile;
+}
+
+int srcfile_pop(void)
+{
+	struct srcfile_state *srcfile = current_srcfile;
+
+	assert(srcfile);
+
+	current_srcfile = srcfile->prev;
+
+	if (fclose(srcfile->f))
+		die("Error closing \"%s\": %s\n", srcfile->name,
+		    strerror(errno));
+
+	/* FIXME: We allow the srcfile_state structure to leak,
+	 * because it could still be referenced from a location
+	 * variable being carried through the parser somewhere.  To
+	 * fix this we could either allocate all the files from a
+	 * table, or use a pool allocator. */
+
+	return current_srcfile ? 1 : 0;
+}
+
+/*
+ * The empty source position.
+ */
+
+struct srcpos srcpos_empty = {
+	.first_line = 0,
+	.first_column = 0,
+	.last_line = 0,
+	.last_column = 0,
+	.file = NULL,
+};
+
+#define TAB_SIZE      8
+
+void srcpos_update(struct srcpos *pos, const char *text, int len)
+{
+	int i;
+
+	pos->file = current_srcfile;
+
+	pos->first_line = current_srcfile->lineno;
+	pos->first_column = current_srcfile->colno;
+
+	for (i = 0; i < len; i++)
+		if (text[i] == '\n') {
+			current_srcfile->lineno++;
+			current_srcfile->colno = 1;
+		} else if (text[i] == '\t') {
+			current_srcfile->colno =
+				ALIGN(current_srcfile->colno, TAB_SIZE);
+		} else {
+			current_srcfile->colno++;
+		}
+
+	pos->last_line = current_srcfile->lineno;
+	pos->last_column = current_srcfile->colno;
+}
+
+struct srcpos *
+srcpos_copy(struct srcpos *pos)
+{
+	struct srcpos *pos_new;
+
+	pos_new = xmalloc(sizeof(struct srcpos));
+	memcpy(pos_new, pos, sizeof(struct srcpos));
+
+	return pos_new;
+}
+
+
+
+void
+srcpos_dump(struct srcpos *pos)
+{
+	printf("file        : \"%s\"\n",
+	       pos->file ? (char *) pos->file : "<no file>");
+	printf("first_line  : %d\n", pos->first_line);
+	printf("first_column: %d\n", pos->first_column);
+	printf("last_line   : %d\n", pos->last_line);
+	printf("last_column : %d\n", pos->last_column);
+	printf("file        : %s\n", pos->file->name);
+}
+
+
+char *
+srcpos_string(struct srcpos *pos)
+{
+	const char *fname = "<no-file>";
+	char *pos_str;
+	int rc;
+
+	if (pos)
+		fname = pos->file->name;
+
+
+	if (pos->first_line != pos->last_line)
+		rc = asprintf(&pos_str, "%s:%d.%d-%d.%d", fname,
+			      pos->first_line, pos->first_column,
+			      pos->last_line, pos->last_column);
+	else if (pos->first_column != pos->last_column)
+		rc = asprintf(&pos_str, "%s:%d.%d-%d", fname,
+			      pos->first_line, pos->first_column,
+			      pos->last_column);
+	else
+		rc = asprintf(&pos_str, "%s:%d.%d", fname,
+			      pos->first_line, pos->first_column);
+
+	if (rc == -1)
+		die("Couldn't allocate in srcpos string");
+
+	return pos_str;
+}
+
+void
+srcpos_verror(struct srcpos *pos, char const *fmt, va_list va)
+{
+       const char *srcstr;
+
+       srcstr = srcpos_string(pos);
+
+       fprintf(stdout, "Error: %s ", srcstr);
+       vfprintf(stdout, fmt, va);
+       fprintf(stdout, "\n");
+}
+
+void
+srcpos_error(struct srcpos *pos, char const *fmt, ...)
+{
+	va_list va;
+
+	va_start(va, fmt);
+	srcpos_verror(pos, fmt, va);
+	va_end(va);
+}
+
+
+void
+srcpos_warn(struct srcpos *pos, char const *fmt, ...)
+{
+	const char *srcstr;
+	va_list va;
+	va_start(va, fmt);
+
+	srcstr = srcpos_string(pos);
+
+	fprintf(stderr, "Warning: %s ", srcstr);
+	vfprintf(stderr, fmt, va);
+	fprintf(stderr, "\n");
+
+	va_end(va);
 }
diff --git a/scripts/dtc/srcpos.h b/scripts/dtc/srcpos.h
index e17c7c0..bd7966e 100644
--- a/scripts/dtc/srcpos.h
+++ b/scripts/dtc/srcpos.h
@@ -17,69 +17,70 @@
  *                                                                   USA
  */
 
-/*
- * Augment the standard YYLTYPE with a filenum index into an
- * array of all opened filenames.
- */
+#ifndef _SRCPOS_H_
+#define _SRCPOS_H_
 
 #include <stdio.h>
 
-struct dtc_file {
+struct srcfile_state {
+	FILE *f;
+	char *name;
 	char *dir;
-	const char *name;
-	FILE *file;
+	int lineno, colno;
+	struct srcfile_state *prev;
 };
 
-#if ! defined(YYLTYPE) && ! defined(YYLTYPE_IS_DECLARED)
-typedef struct YYLTYPE {
+extern struct srcfile_state *current_srcfile; /* = NULL */
+
+FILE *srcfile_relative_open(const char *fname, char **fullnamep);
+void srcfile_push(const char *fname);
+int srcfile_pop(void);
+
+struct srcpos {
     int first_line;
     int first_column;
     int last_line;
     int last_column;
-    struct dtc_file *file;
-} YYLTYPE;
-
-#define YYLTYPE_IS_DECLARED	1
-#define YYLTYPE_IS_TRIVIAL	1
-#endif
-
-/* Cater to old parser templates. */
-#ifndef YYID
-#define YYID(n)	(n)
-#endif
-
-#define YYLLOC_DEFAULT(Current, Rhs, N)					\
-    do									\
-      if (YYID (N))							\
-	{								\
-	  (Current).first_line   = YYRHSLOC (Rhs, 1).first_line;	\
-	  (Current).first_column = YYRHSLOC (Rhs, 1).first_column;	\
-	  (Current).last_line    = YYRHSLOC (Rhs, N).last_line;		\
-	  (Current).last_column  = YYRHSLOC (Rhs, N).last_column;	\
-	  (Current).file         = YYRHSLOC (Rhs, N).file;		\
-	}								\
-      else								\
-	{								\
-	  (Current).first_line   = (Current).last_line   =		\
-	    YYRHSLOC (Rhs, 0).last_line;				\
-	  (Current).first_column = (Current).last_column =		\
-	    YYRHSLOC (Rhs, 0).last_column;				\
-	  (Current).file         = YYRHSLOC (Rhs, 0).file;		\
-	}								\
-    while (YYID (0))
-
-
-
-extern void yyerror(char const *);
-extern void yyerrorf(char const *, ...) __attribute__((format(printf, 1, 2)));
-
-extern struct dtc_file *srcpos_file;
-
-struct search_path {
-	const char *dir; /* NULL for current directory */
-	struct search_path *prev, *next;
+    struct srcfile_state *file;
 };
 
-extern struct dtc_file *dtc_open_file(const char *fname,
-                                      const struct search_path *search);
-extern void dtc_close_file(struct dtc_file *file);
+#define YYLTYPE struct srcpos
+
+#define YYLLOC_DEFAULT(Current, Rhs, N)						\
+	do {									\
+		if (N) {							\
+			(Current).first_line = YYRHSLOC(Rhs, 1).first_line;	\
+			(Current).first_column = YYRHSLOC(Rhs, 1).first_column;	\
+			(Current).last_line = YYRHSLOC(Rhs, N).last_line;	\
+			(Current).last_column  = YYRHSLOC (Rhs, N).last_column;	\
+			(Current).file = YYRHSLOC(Rhs, N).file;			\
+		} else {							\
+			(Current).first_line = (Current).last_line =		\
+				YYRHSLOC(Rhs, 0).last_line;			\
+			(Current).first_column = (Current).last_column =	\
+				YYRHSLOC(Rhs, 0).last_column;			\
+			(Current).file = YYRHSLOC (Rhs, 0).file;		\
+		}								\
+	} while (0)
+
+
+/*
+ * Fictional source position used for IR nodes that are
+ * created without otherwise knowing a true source position.
+ * For example,constant definitions from the command line.
+ */
+extern struct srcpos srcpos_empty;
+
+extern void srcpos_update(struct srcpos *pos, const char *text, int len);
+extern struct srcpos *srcpos_copy(struct srcpos *pos);
+extern char *srcpos_string(struct srcpos *pos);
+extern void srcpos_dump(struct srcpos *pos);
+
+extern void srcpos_verror(struct srcpos *pos, char const *, va_list va)
+     __attribute__((format(printf, 2, 0)));
+extern void srcpos_error(struct srcpos *pos, char const *, ...)
+     __attribute__((format(printf, 2, 3)));
+extern void srcpos_warn(struct srcpos *pos, char const *, ...)
+     __attribute__((format(printf, 2, 3)));
+
+#endif /* _SRCPOS_H_ */
diff --git a/scripts/dtc/treesource.c b/scripts/dtc/treesource.c
index 1521ff1..c09aafa 100644
--- a/scripts/dtc/treesource.c
+++ b/scripts/dtc/treesource.c
@@ -32,8 +32,8 @@
 	the_boot_info = NULL;
 	treesource_error = 0;
 
-	srcpos_file = dtc_open_file(fname, NULL);
-	yyin = srcpos_file->file;
+	srcfile_push(fname);
+	yyin = current_srcfile->f;
 
 	if (yyparse() != 0)
 		die("Unable to parse input tree\n");
@@ -63,26 +63,20 @@
 {
 	const char *str = val.val;
 	int i;
-	int newchunk = 1;
 	struct marker *m = val.markers;
 
 	assert(str[val.len-1] == '\0');
 
+	while (m && (m->offset == 0)) {
+		if (m->type == LABEL)
+			fprintf(f, "%s: ", m->ref);
+		m = m->next;
+	}
+	fprintf(f, "\"");
+
 	for (i = 0; i < (val.len-1); i++) {
 		char c = str[i];
 
-		if (newchunk) {
-			while (m && (m->offset <= i)) {
-				if (m->type == LABEL) {
-					assert(m->offset == i);
-					fprintf(f, "%s: ", m->ref);
-				}
-				m = m->next;
-			}
-			fprintf(f, "\"");
-			newchunk = 0;
-		}
-
 		switch (c) {
 		case '\a':
 			fprintf(f, "\\a");
@@ -113,7 +107,14 @@
 			break;
 		case '\0':
 			fprintf(f, "\", ");
-			newchunk = 1;
+			while (m && (m->offset < i)) {
+				if (m->type == LABEL) {
+					assert(m->offset == (i+1));
+					fprintf(f, "%s: ", m->ref);
+				}
+				m = m->next;
+			}
+			fprintf(f, "\"");
 			break;
 		default:
 			if (isprint(c))
@@ -234,10 +235,11 @@
 {
 	struct property *prop;
 	struct node *child;
+	struct label *l;
 
 	write_prefix(f, level);
-	if (tree->label)
-		fprintf(f, "%s: ", tree->label);
+	for_each_label(tree->labels, l)
+		fprintf(f, "%s: ", l->label);
 	if (tree->name && (*tree->name))
 		fprintf(f, "%s {\n", tree->name);
 	else
@@ -245,8 +247,8 @@
 
 	for_each_property(tree, prop) {
 		write_prefix(f, level+1);
-		if (prop->label)
-			fprintf(f, "%s: ", prop->label);
+		for_each_label(prop->labels, l)
+			fprintf(f, "%s: ", l->label);
 		fprintf(f, "%s", prop->name);
 		write_propval(f, prop);
 	}
@@ -266,8 +268,10 @@
 	fprintf(f, "/dts-v1/;\n\n");
 
 	for (re = bi->reservelist; re; re = re->next) {
-		if (re->label)
-			fprintf(f, "%s: ", re->label);
+		struct label *l;
+
+		for_each_label(re->labels, l)
+			fprintf(f, "%s: ", l->label);
 		fprintf(f, "/memreserve/\t0x%016llx 0x%016llx;\n",
 			(unsigned long long)re->re.address,
 			(unsigned long long)re->re.size);
diff --git a/scripts/dtc/util.c b/scripts/dtc/util.c
new file mode 100644
index 0000000..d7ac27d
--- /dev/null
+++ b/scripts/dtc/util.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2008 Jon Loeliger, Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+ *                                                                   USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "util.h"
+
+char *xstrdup(const char *s)
+{
+	int len = strlen(s) + 1;
+	char *dup = xmalloc(len);
+
+	memcpy(dup, s, len);
+
+	return dup;
+}
+
+char *join_path(const char *path, const char *name)
+{
+	int lenp = strlen(path);
+	int lenn = strlen(name);
+	int len;
+	int needslash = 1;
+	char *str;
+
+	len = lenp + lenn + 2;
+	if ((lenp > 0) && (path[lenp-1] == '/')) {
+		needslash = 0;
+		len--;
+	}
+
+	str = xmalloc(len);
+	memcpy(str, path, lenp);
+	if (needslash) {
+		str[lenp] = '/';
+		lenp++;
+	}
+	memcpy(str+lenp, name, lenn+1);
+	return str;
+}
diff --git a/scripts/dtc/util.h b/scripts/dtc/util.h
new file mode 100644
index 0000000..9cead84
--- /dev/null
+++ b/scripts/dtc/util.h
@@ -0,0 +1,56 @@
+#ifndef _UTIL_H
+#define _UTIL_H
+
+/*
+ * Copyright 2008 Jon Loeliger, Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+ *                                                                   USA
+ */
+
+static inline void __attribute__((noreturn)) die(char * str, ...)
+{
+	va_list ap;
+
+	va_start(ap, str);
+	fprintf(stderr, "FATAL ERROR: ");
+	vfprintf(stderr, str, ap);
+	exit(1);
+}
+
+static inline void *xmalloc(size_t len)
+{
+	void *new = malloc(len);
+
+	if (!new)
+		die("malloc() failed\n");
+
+	return new;
+}
+
+static inline void *xrealloc(void *p, size_t len)
+{
+	void *new = realloc(p, len);
+
+	if (!new)
+		die("realloc() failed (len=%d)\n", len);
+
+	return new;
+}
+
+extern char *xstrdup(const char *s);
+extern char *join_path(const char *path, const char *name);
+
+#endif /* _UTIL_H */
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index 658ff42..6158b86 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.2.0"
+#define DTC_VERSION "DTC 1.2.0-g37c0b6a0"
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 5958fff..55caecd 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -243,6 +243,8 @@
 		echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
 		echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
 		echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
+		echo "$output_file" | grep -q "\.xz$" && \
+				compr="xz --check=crc32 --lzma2=dict=1MiB"
 		echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
 		echo "$output_file" | grep -q "\.cpio$" && compr="cat"
 		shift
diff --git a/scripts/genksyms/parse.c_shipped b/scripts/genksyms/parse.c_shipped
index eaee44e..809b949 100644
--- a/scripts/genksyms/parse.c_shipped
+++ b/scripts/genksyms/parse.c_shipped
@@ -160,7 +160,7 @@
 
 
 #include <assert.h>
-#include <malloc.h>
+#include <stdlib.h>
 #include "genksyms.h"
 
 static int is_typedef;
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
index 10d7dc7..09a265c 100644
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -24,7 +24,7 @@
 %{
 
 #include <assert.h>
-#include <malloc.h>
+#include <stdlib.h>
 #include "genksyms.h"
 
 static int is_typedef;
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index d21ec3a..139e0ff 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
 use strict;
 
 my $P = $0;
-my $V = '0.26-beta6';
+my $V = '0.26';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -40,7 +40,7 @@
 my $output_multiline = 1;
 my $output_separator = ", ";
 my $output_roles = 0;
-my $output_rolestats = 0;
+my $output_rolestats = 1;
 my $scm = 0;
 my $web = 0;
 my $subsystem = 0;
@@ -494,6 +494,40 @@
 
 exit($exit);
 
+sub range_is_maintained {
+    my ($start, $end) = @_;
+
+    for (my $i = $start; $i < $end; $i++) {
+	my $line = $typevalue[$i];
+	if ($line =~ m/^(\C):\s*(.*)/) {
+	    my $type = $1;
+	    my $value = $2;
+	    if ($type eq 'S') {
+		if ($value =~ /(maintain|support)/i) {
+		    return 1;
+		}
+	    }
+	}
+    }
+    return 0;
+}
+
+sub range_has_maintainer {
+    my ($start, $end) = @_;
+
+    for (my $i = $start; $i < $end; $i++) {
+	my $line = $typevalue[$i];
+	if ($line =~ m/^(\C):\s*(.*)/) {
+	    my $type = $1;
+	    my $value = $2;
+	    if ($type eq 'M') {
+		return 1;
+	    }
+	}
+    }
+    return 0;
+}
+
 sub get_maintainers {
     %email_hash_name = ();
     %email_hash_address = ();
@@ -556,7 +590,9 @@
 				my $file_pd = ($file  =~ tr@/@@);
 				$value_pd++ if (substr($value,-1,1) ne "/");
 				$value_pd = -1 if ($value =~ /^\.\*/);
-				if ($value_pd >= $file_pd) {
+				if ($value_pd >= $file_pd &&
+				    range_is_maintained($start, $end) &&
+				    range_has_maintainer($start, $end)) {
 				    $exact_pattern_match_hash{$file} = 1;
 				}
 				if ($pattern_depth == 0 ||
@@ -720,7 +756,8 @@
   --help => show this help information
 
 Default options:
-  [--email --git --m --n --l --multiline --pattern-depth=0 --remove-duplicates]
+  [--email --nogit --git-fallback --m --n --l --multiline -pattern-depth=0
+   --remove-duplicates --rolestats]
 
 Notes:
   Using "-f directory" may give unexpected results:
diff --git a/scripts/headers.sh b/scripts/headers.sh
index 1ddcdd3..978b42b 100755
--- a/scripts/headers.sh
+++ b/scripts/headers.sh
@@ -13,7 +13,7 @@
 	fi
 }
 
-archs=$(ls ${srctree}/arch)
+archs=${HDR_ARCH_LIST:-$(ls ${srctree}/arch)}
 
 for arch in ${archs}; do
 	case ${arch} in
diff --git a/scripts/headers_check.pl b/scripts/headers_check.pl
index 50d6cfd..7957e7a 100644
--- a/scripts/headers_check.pl
+++ b/scripts/headers_check.pl
@@ -64,10 +64,10 @@
 
 sub check_declarations
 {
-	if ($line =~m/^\s*extern\b/) {
+	if ($line =~m/^(\s*extern|unsigned|char|short|int|long|void)\b/) {
 		printf STDERR "$filename:$lineno: " .
-		              "userspace cannot call function or variable " .
-		              "defined in the kernel\n";
+			      "userspace cannot reference function or " .
+			      "variable defined in the kernel\n";
 	}
 }
 
diff --git a/scripts/headers_install.pl b/scripts/headers_install.pl
index 4ca3be3..efb3be1 100644
--- a/scripts/headers_install.pl
+++ b/scripts/headers_install.pl
@@ -45,6 +45,13 @@
 	close $in;
 
 	system $unifdef . " $tmpfile > $installdir/$file";
+	# unifdef will exit 0 on success, and will exit 1 when the
+	# file was processed successfully but no changes were made,
+	# so abort only when it's higher than that.
+	my $e = $? >> 8;
+	if ($e > 1) {
+		die "$tmpfile: $!\n";
+	}
 	unlink $tmpfile;
 }
 exit 0;
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 5459a38..659326c 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -529,8 +529,6 @@
 		}
 		break;
 	case savedefconfig:
-		conf_read(NULL);
-		break;
 	case silentoldconfig:
 	case oldaskconfig:
 	case oldconfig:
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 9df8011..61c35bf 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -440,12 +440,11 @@
 	fputs("\"\n", out);
 }
 
-static void conf_write_symbol(struct symbol *sym, enum symbol_type type,
-                              FILE *out, bool write_no)
+static void conf_write_symbol(struct symbol *sym, FILE *out, bool write_no)
 {
 	const char *str;
 
-	switch (type) {
+	switch (sym->type) {
 	case S_BOOLEAN:
 	case S_TRISTATE:
 		switch (sym_get_tristate_value(sym)) {
@@ -532,7 +531,7 @@
 						goto next_menu;
 				}
 			}
-			conf_write_symbol(sym, sym->type, out, true);
+			conf_write_symbol(sym, out, true);
 		}
 next_menu:
 		if (menu->list != NULL) {
@@ -561,7 +560,6 @@
 	const char *basename;
 	const char *str;
 	char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
-	enum symbol_type type;
 	time_t now;
 	int use_timestamp = 1;
 	char *env;
@@ -633,14 +631,8 @@
 			if (!(sym->flags & SYMBOL_WRITE))
 				goto next;
 			sym->flags &= ~SYMBOL_WRITE;
-			type = sym->type;
-			if (type == S_TRISTATE) {
-				sym_calc_value(modules_sym);
-				if (modules_sym->curr.tri == no)
-					type = S_BOOLEAN;
-			}
 			/* Write config symbol to file */
-			conf_write_symbol(sym, type, out, true);
+			conf_write_symbol(sym, out, true);
 		}
 
 next:
@@ -833,8 +825,7 @@
 		       " * Automatically generated C config: don't edit\n"
 		       " * %s\n"
 		       " * %s"
-		       " */\n"
-		       "#define AUTOCONF_INCLUDED\n",
+		       " */\n",
 		       rootmenu.prompt->text, ctime(&now));
 
 	for_all_symbols(i, sym) {
@@ -843,7 +834,7 @@
 			continue;
 
 		/* write symbol to config file */
-		conf_write_symbol(sym, sym->type, out, false);
+		conf_write_symbol(sym, out, false);
 
 		/* update autoconf and tristate files */
 		switch (sym->type) {
@@ -946,7 +937,7 @@
 	int cnt, def;
 
 	/*
-	 * If choice is mod then we may have more items slected
+	 * If choice is mod then we may have more items selected
 	 * and if no then no-one.
 	 * In both cases stop.
 	 */
@@ -1042,10 +1033,10 @@
 
 	/*
 	 * We have different type of choice blocks.
-	 * If curr.tri equal to mod then we can select several
+	 * If curr.tri equals to mod then we can select several
 	 * choice symbols in one block.
 	 * In this case we do nothing.
-	 * If curr.tri equal yes then only one symbol can be
+	 * If curr.tri equals yes then only one symbol can be
 	 * selected in a choice block and we set it to yes,
 	 * and the rest to no.
 	 */
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 330e7c0..0010034 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -64,7 +64,7 @@
 	return e2 ? expr_alloc_two(E_OR, e1, e2) : e1;
 }
 
-struct expr *expr_copy(struct expr *org)
+struct expr *expr_copy(const struct expr *org)
 {
 	struct expr *e;
 
@@ -1013,6 +1013,48 @@
 #endif
 }
 
+static inline struct expr *
+expr_get_leftmost_symbol(const struct expr *e)
+{
+
+	if (e == NULL)
+		return NULL;
+
+	while (e->type != E_SYMBOL)
+		e = e->left.expr;
+
+	return expr_copy(e);
+}
+
+/*
+ * Given expression `e1' and `e2', returns the leaf of the longest
+ * sub-expression of `e1' not containing 'e2.
+ */
+struct expr *expr_simplify_unmet_dep(struct expr *e1, struct expr *e2)
+{
+	struct expr *ret;
+
+	switch (e1->type) {
+	case E_OR:
+		return expr_alloc_and(
+		    expr_simplify_unmet_dep(e1->left.expr, e2),
+		    expr_simplify_unmet_dep(e1->right.expr, e2));
+	case E_AND: {
+		struct expr *e;
+		e = expr_alloc_and(expr_copy(e1), expr_copy(e2));
+		e = expr_eliminate_dups(e);
+		ret = (!expr_eq(e, e1)) ? e1 : NULL;
+		expr_free(e);
+		break;
+		}
+	default:
+		ret = e1;
+		break;
+	}
+
+	return expr_get_leftmost_symbol(ret);
+}
+
 void expr_print(struct expr *e, void (*fn)(void *, struct symbol *, const char *), void *data, int prevtoken)
 {
 	if (!e) {
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index e57826c..3d238db 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -192,7 +192,7 @@
 struct expr *expr_alloc_comp(enum expr_type type, struct symbol *s1, struct symbol *s2);
 struct expr *expr_alloc_and(struct expr *e1, struct expr *e2);
 struct expr *expr_alloc_or(struct expr *e1, struct expr *e2);
-struct expr *expr_copy(struct expr *org);
+struct expr *expr_copy(const struct expr *org);
 void expr_free(struct expr *e);
 int expr_eq(struct expr *e1, struct expr *e2);
 void expr_eliminate_eq(struct expr **ep1, struct expr **ep2);
@@ -207,6 +207,7 @@
 struct expr *expr_extract_eq_or(struct expr **ep1, struct expr **ep2);
 void expr_extract_eq(enum expr_type type, struct expr **ep, struct expr **ep1, struct expr **ep2);
 struct expr *expr_trans_compare(struct expr *e, enum expr_type type, struct symbol *sym);
+struct expr *expr_simplify_unmet_dep(struct expr *e1, struct expr *e2);
 
 void expr_fprint(struct expr *e, FILE *out);
 struct gstr; /* forward */
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 3f7240d..febf0c9 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -14,6 +14,7 @@
 static inline const char *gettext(const char *txt) { return txt; }
 static inline void textdomain(const char *domainname) {}
 static inline void bindtextdomain(const char *name, const char *dir) {}
+static inline char *bind_textdomain_codeset(const char *dn, char *c) { return c; }
 #endif
 
 #ifdef __cplusplus
@@ -67,10 +68,12 @@
 	enum symbol_type stype;
 };
 
+#ifdef YYDEBUG
+extern int zconfdebug;
+#endif
+
 int zconfparse(void);
 void zconfdump(FILE *out);
-
-extern int zconfdebug;
 void zconf_starthelp(void);
 FILE *zconf_fopen(const char *name);
 void zconf_initscan(const char *name);
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 5f77dcb..5fdf10d 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -203,7 +203,7 @@
 	}
 }
 
-static int menu_range_valid_sym(struct symbol *sym, struct symbol *sym2)
+static int menu_validate_number(struct symbol *sym, struct symbol *sym2)
 {
 	return sym2->type == S_INT || sym2->type == S_HEX ||
 	       (sym2->type == S_UNKNOWN && sym_string_valid(sym, sym2->name));
@@ -221,6 +221,15 @@
 				prop_warn(prop,
 				    "default for config symbol '%s'"
 				    " must be a single symbol", sym->name);
+			if (prop->expr->type != E_SYMBOL)
+				break;
+			sym2 = prop_get_symbol(prop);
+			if (sym->type == S_HEX || sym->type == S_INT) {
+				if (!menu_validate_number(sym, sym2))
+					prop_warn(prop,
+					    "'%s': number is invalid",
+					    sym->name);
+			}
 			break;
 		case P_SELECT:
 			sym2 = prop_get_symbol(prop);
@@ -240,8 +249,8 @@
 			if (sym->type != S_INT && sym->type != S_HEX)
 				prop_warn(prop, "range is only allowed "
 				                "for int or hex symbols");
-			if (!menu_range_valid_sym(sym, prop->expr->left.sym) ||
-			    !menu_range_valid_sym(sym, prop->expr->right.sym))
+			if (!menu_validate_number(sym, prop->expr->left.sym) ||
+			    !menu_validate_number(sym, prop->expr->right.sym))
 				prop_warn(prop, "range is invalid");
 			break;
 		default:
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 272a987..db56377 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -248,7 +248,7 @@
 "Only relevant lines are shown.\n"
 "\n\n"
 "Search examples:\n"
-"Examples: USB   = > find all symbols containing USB\n"
+"Examples: USB  => find all symbols containing USB\n"
 "          ^USB => find all symbols starting with USB\n"
 "          USB$ => find all symbols ending with USB\n"
 "\n");
@@ -1266,9 +1266,13 @@
 			if (child->sym == sym_get_choice_value(menu->sym))
 				item_make(child, ':', "<X> %s",
 						_(menu_get_prompt(child)));
-			else
+			else if (child->sym)
 				item_make(child, ':', "    %s",
 						_(menu_get_prompt(child)));
+			else
+				item_make(child, ':', "*** %s ***",
+						_(menu_get_prompt(child)));
+
 			if (child->sym == active){
 				last_top_row = top_row(curses_menu);
 				selected_index = i;
@@ -1334,7 +1338,7 @@
 			break;
 
 		child = item_data();
-		if (!child || !menu_is_visible(child))
+		if (!child || !menu_is_visible(child) || !child->sym)
 			continue;
 		switch (res) {
 		case ' ':
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index af6e9f3..a796c95 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -351,12 +351,16 @@
 			}
 		calc_newval:
 			if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) {
+				struct expr *e;
+				e = expr_simplify_unmet_dep(sym->rev_dep.expr,
+				    sym->dir_dep.expr);
 				fprintf(stderr, "warning: (");
-				expr_fprint(sym->rev_dep.expr, stderr);
+				expr_fprint(e, stderr);
 				fprintf(stderr, ") selects %s which has unmet direct dependencies (",
 					sym->name);
 				expr_fprint(sym->dir_dep.expr, stderr);
 				fprintf(stderr, ")\n");
+				expr_free(e);
 			}
 			newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri);
 		}
@@ -686,7 +690,7 @@
 		switch (sym->type) {
 		case S_BOOLEAN:
 		case S_TRISTATE:
-			/* The visibility imay limit the value from yes => mod */
+			/* The visibility may limit the value from yes => mod */
 			val = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri);
 			break;
 		default:
diff --git a/scripts/mkuboot.sh b/scripts/mkuboot.sh
index 2e3d3cd..446739c 100755
--- a/scripts/mkuboot.sh
+++ b/scripts/mkuboot.sh
@@ -11,7 +11,7 @@
 	if [ -z "${MKIMAGE}" ]; then
 		# Doesn't exist
 		echo '"mkimage" command not found - U-Boot images will not be built' >&2
-		exit 0;
+		exit 1;
 	fi
 fi
 
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 33122ca..e8fba95 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -790,6 +790,7 @@
 {
 	".comment*",
 	".debug*",
+	".zdebug*",		/* Compressed debug sections. */
 	".GCC-command-line",	/* mn10300 */
 	".mdebug*",        /* alpha, score, mips etc. */
 	".pdr",            /* alpha, score, mips etc. */
@@ -1441,7 +1442,7 @@
 	int section = shndx2secindex(sechdr->sh_info);
 
 	return (void *)elf->hdr + sechdrs[section].sh_offset +
-		r->r_offset - sechdrs[section].sh_addr;
+		r->r_offset;
 }
 
 static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
@@ -1614,7 +1615,7 @@
  * A module includes a number of sections that are discarded
  * either when loaded or when used as built-in.
  * For loaded modules all functions marked __init and all data
- * marked __initdata will be discarded when the module has been intialized.
+ * marked __initdata will be discarded when the module has been initialized.
  * Likewise for modules used built-in the sections marked __exit
  * are discarded because __exit marked function are supposed to be called
  * only when a module is unloaded which never happens for built-in modules.
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 49b74e1..b0b2357 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -25,8 +25,44 @@
 	chown -R root:root "$pdir"
 	chmod -R go-w "$pdir"
 
+	# Attempt to find the correct Debian architecture
+	local forcearch="" debarch=""
+	case "$UTS_MACHINE" in
+	i386|ia64|alpha)
+		debarch="$UTS_MACHINE" ;;
+	x86_64)
+		debarch=amd64 ;;
+	sparc*)
+		debarch=sparc ;;
+	s390*)
+		debarch=s390 ;;
+	ppc*)
+		debarch=powerpc ;;
+	parisc*)
+		debarch=hppa ;;
+	mips*)
+		debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y .config && echo el) ;;
+	arm*)
+		debarch=arm$(grep -q CONFIG_AEABI=y .config && echo el) ;;
+	*)
+		echo "" >&2
+		echo "** ** **  WARNING  ** ** **" >&2
+		echo "" >&2
+		echo "Your architecture doesn't have it's equivalent" >&2
+		echo "Debian userspace architecture defined!" >&2
+		echo "Falling back to using your current userspace instead!" >&2
+		echo "Please add support for $UTS_MACHINE to ${0} ..." >&2
+		echo "" >&2
+	esac
+	if [ -n "$KBUILD_DEBARCH" ] ; then
+		debarch="$KBUILD_DEBARCH"
+	fi
+	if [ -n "$debarch" ] ; then
+		forcearch="-DArchitecture=$debarch"
+	fi
+
 	# Create the package
-	dpkg-gencontrol -isp -p$pname -P"$pdir"
+	dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
 	dpkg --build "$pdir" ..
 }
 
@@ -40,17 +76,27 @@
 fi
 tmpdir="$objtree/debian/tmp"
 fwdir="$objtree/debian/fwtmp"
+kernel_headers_dir="$objtree/debian/hdrtmp"
+libc_headers_dir="$objtree/debian/headertmp"
 packagename=linux-image-$version
 fwpackagename=linux-firmware-image
+kernel_headers_packagename=linux-headers-$version
+libc_headers_packagename=linux-libc-dev
 
 if [ "$ARCH" = "um" ] ; then
 	packagename=user-mode-linux-$version
 fi
 
 # Setup the directory structure
-rm -rf "$tmpdir" "$fwdir"
-mkdir -p "$tmpdir/DEBIAN" "$tmpdir/lib" "$tmpdir/boot" "$tmpdir/usr/share/doc/$packagename"
-mkdir -p "$fwdir/DEBIAN" "$fwdir/lib" "$fwdir/usr/share/doc/$fwpackagename"
+rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir"
+mkdir -m 755 -p "$tmpdir/DEBIAN"
+mkdir -p  "$tmpdir/lib" "$tmpdir/boot" "$tmpdir/usr/share/doc/$packagename"
+mkdir -m 755 -p "$fwdir/DEBIAN"
+mkdir -p "$fwdir/lib" "$fwdir/usr/share/doc/$fwpackagename"
+mkdir -m 755 -p "$libc_headers_dir/DEBIAN"
+mkdir -p "$libc_headers_dir/usr/share/doc/$libc_headers_packagename"
+mkdir -m 755 -p "$kernel_headers_dir/DEBIAN"
+mkdir -p "$kernel_headers_dir/usr/share/doc/$kernel_headers_packagename"
 if [ "$ARCH" = "um" ] ; then
 	mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin"
 fi
@@ -81,6 +127,9 @@
 	fi
 fi
 
+make headers_check
+make headers_install INSTALL_HDR_PATH="$libc_headers_dir/usr"
+
 # Install the maintainer scripts
 # Note: hook scripts under /etc/kernel are also executed by official Debian
 # kernel packages, as well as kernel packages built using make-kpkg
@@ -188,6 +237,30 @@
 
 fi
 
+# Build header package
+find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$
+find arch/x86/include include scripts -type f >> /tmp/files$$
+(cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
+destdir=$kernel_headers_dir/usr/src/linux-headers-$version
+mkdir -p "$destdir"
+tar -c -f - -T /tmp/files$$ | (cd $destdir; tar -xf -)
+(cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
+rm -f /tmp/files$$ /tmp/objfiles$$
+arch=$(dpkg --print-architecture)
+
+cat <<EOF >> debian/control
+
+Package: $kernel_headers_packagename
+Provides: linux-headers, linux-headers-2.6
+Architecture: $arch
+Description: Linux kernel headers for $KERNELRELEASE on $arch
+ This package provides kernel header files for $KERNELRELEASE on $arch
+ .
+ This is useful for people who need to build external modules
+EOF
+
+create_package "$kernel_headers_packagename" "$kernel_headers_dir"
+
 # Do we have firmware? Move it out of the way and build it into a package.
 if [ -e "$tmpdir/lib/firmware" ]; then
 	mv "$tmpdir/lib/firmware" "$fwdir/lib/"
@@ -203,6 +276,18 @@
 	create_package "$fwpackagename" "$fwdir"
 fi
 
+cat <<EOF >> debian/control
+
+Package: $libc_headers_packagename
+Section: devel
+Provides: linux-kernel-headers
+Architecture: any
+Description: Linux support headers for userspace development
+ This package provides userspaces headers from the Linux kernel.  These headers
+ are used by the installed headers for GNU glibc and other system libraries.
+EOF
+
+create_package "$libc_headers_packagename" "$libc_headers_dir"
 create_package "$packagename" "$tmpdir"
 
 exit 0
diff --git a/scripts/tags.sh b/scripts/tags.sh
index bbbe584..92fdc45 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -123,7 +123,7 @@
 	-I ____cacheline_internodealigned_in_smp                \
 	-I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL                      \
 	-I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
-	--extra=+f --c-kinds=-px                                \
+	--extra=+f --c-kinds=+px                                \
 	--regex-asm='/^ENTRY\(([^)]*)\).*/\1/'                  \
 	--regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
 	--regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/'		\
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
new file mode 100644
index 0000000..17a5798
--- /dev/null
+++ b/scripts/xz_wrap.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# This is a wrapper for xz to compress the kernel image using appropriate
+# compression options depending on the architecture.
+#
+# Author: Lasse Collin <lasse.collin@tukaani.org>
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+
+BCJ=
+LZMA2OPTS=
+
+case $ARCH in
+	x86|x86_64)     BCJ=--x86 ;;
+	powerpc)        BCJ=--powerpc ;;
+	ia64)           BCJ=--ia64; LZMA2OPTS=pb=4 ;;
+	arm)            BCJ=--arm ;;
+	sparc)          BCJ=--sparc ;;
+esac
+
+exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
diff --git a/security/Kconfig b/security/Kconfig
index e80da95..95accd4 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -21,6 +21,37 @@
 
 	  If you are unsure as to whether this is required, answer N.
 
+config TRUSTED_KEYS
+	tristate "TRUSTED KEYS"
+	depends on KEYS && TCG_TPM
+	select CRYPTO
+	select CRYPTO_HMAC
+	select CRYPTO_SHA1
+	help
+	  This option provides support for creating, sealing, and unsealing
+	  keys in the kernel. Trusted keys are random number symmetric keys,
+	  generated and RSA-sealed by the TPM. The TPM only unseals the keys,
+	  if the boot PCRs and other criteria match.  Userspace will only ever
+	  see encrypted blobs.
+
+	  If you are unsure as to whether this is required, answer N.
+
+config ENCRYPTED_KEYS
+	tristate "ENCRYPTED KEYS"
+	depends on KEYS && TRUSTED_KEYS
+	select CRYPTO_AES
+	select CRYPTO_CBC
+	select CRYPTO_SHA256
+	select CRYPTO_RNG
+	help
+	  This option provides support for create/encrypting/decrypting keys
+	  in the kernel.  Encrypted keys are kernel generated random numbers,
+	  which are encrypted/decrypted with a 'master' symmetric key. The
+	  'master' key can be either a trusted-key or user-key type.
+	  Userspace only ever sees/stores encrypted blobs.
+
+	  If you are unsure as to whether this is required, answer N.
+
 config KEYS_DEBUG_PROC_KEYS
 	bool "Enable the /proc/keys file by which keys may be viewed"
 	depends on KEYS
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index be36fea..ab8c6d8 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -15,12 +15,11 @@
 #ifndef __AA_FILE_H
 #define __AA_FILE_H
 
-#include <linux/path.h>
-
 #include "domain.h"
 #include "match.h"
 
 struct aa_profile;
+struct path;
 
 /*
  * We use MAY_EXEC, MAY_WRITE, MAY_READ, MAY_APPEND and the following flags
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 734a6d3..a4a8639 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -15,6 +15,7 @@
 #ifndef __AA_MATCH_H
 #define __AA_MATCH_H
 
+#include <linux/kref.h>
 #include <linux/workqueue.h>
 
 #define DFA_NOMATCH			0
@@ -27,7 +28,7 @@
  * The format used for transition tables is based on the GNU flex table
  * file format (--tables-file option; see Table File Format in the flex
  * info pages and the flex sources for documentation). The magic number
- * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because
+ * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
  * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
  * slightly differently (see the apparmor-parser package).
  */
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 74d5447..6c94105 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -13,6 +13,8 @@
 	request_key_auth.o \
 	user_defined.o
 
+obj-$(CONFIG_TRUSTED_KEYS) += trusted_defined.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted_defined.o
 obj-$(CONFIG_KEYS_COMPAT) += compat.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/encrypted_defined.c b/security/keys/encrypted_defined.c
new file mode 100644
index 0000000..32d27c8
--- /dev/null
+++ b/security/keys/encrypted_defined.c
@@ -0,0 +1,903 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/key-type.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+
+#include "encrypted_defined.h"
+
+static const char KEY_TRUSTED_PREFIX[] = "trusted:";
+static const char KEY_USER_PREFIX[] = "user:";
+static const char hash_alg[] = "sha256";
+static const char hmac_alg[] = "hmac(sha256)";
+static const char blkcipher_alg[] = "cbc(aes)";
+static unsigned int ivsize;
+static int blksize;
+
+#define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1)
+#define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1)
+#define HASH_SIZE SHA256_DIGEST_SIZE
+#define MAX_DATA_SIZE 4096
+#define MIN_DATA_SIZE  20
+
+struct sdesc {
+	struct shash_desc shash;
+	char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+enum {
+	Opt_err = -1, Opt_new, Opt_load, Opt_update
+};
+
+static const match_table_t key_tokens = {
+	{Opt_new, "new"},
+	{Opt_load, "load"},
+	{Opt_update, "update"},
+	{Opt_err, NULL}
+};
+
+static int aes_get_sizes(void)
+{
+	struct crypto_blkcipher *tfm;
+
+	tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
+		       PTR_ERR(tfm));
+		return PTR_ERR(tfm);
+	}
+	ivsize = crypto_blkcipher_ivsize(tfm);
+	blksize = crypto_blkcipher_blocksize(tfm);
+	crypto_free_blkcipher(tfm);
+	return 0;
+}
+
+/*
+ * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key
+ *
+ * key-type:= "trusted:" | "encrypted:"
+ * desc:= master-key description
+ *
+ * Verify that 'key-type' is valid and that 'desc' exists. On key update,
+ * only the master key description is permitted to change, not the key-type.
+ * The key-type remains constant.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int valid_master_desc(const char *new_desc, const char *orig_desc)
+{
+	if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
+		if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
+			goto out;
+		if (orig_desc)
+			if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
+				goto out;
+	} else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
+		if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
+			goto out;
+		if (orig_desc)
+			if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
+				goto out;
+	} else
+		goto out;
+	return 0;
+out:
+	return -EINVAL;
+}
+
+/*
+ * datablob_parse - parse the keyctl data
+ *
+ * datablob format:
+ * new <master-key name> <decrypted data length>
+ * load <master-key name> <decrypted data length> <encrypted iv + data>
+ * update <new-master-key name>
+ *
+ * Tokenizes a copy of the keyctl data, returning a pointer to each token,
+ * which is null terminated.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, char **master_desc,
+			  char **decrypted_datalen, char **hex_encoded_iv)
+{
+	substring_t args[MAX_OPT_ARGS];
+	int ret = -EINVAL;
+	int key_cmd;
+	char *p;
+
+	p = strsep(&datablob, " \t");
+	if (!p)
+		return ret;
+	key_cmd = match_token(p, key_tokens, args);
+
+	*master_desc = strsep(&datablob, " \t");
+	if (!*master_desc)
+		goto out;
+
+	if (valid_master_desc(*master_desc, NULL) < 0)
+		goto out;
+
+	if (decrypted_datalen) {
+		*decrypted_datalen = strsep(&datablob, " \t");
+		if (!*decrypted_datalen)
+			goto out;
+	}
+
+	switch (key_cmd) {
+	case Opt_new:
+		if (!decrypted_datalen)
+			break;
+		ret = 0;
+		break;
+	case Opt_load:
+		if (!decrypted_datalen)
+			break;
+		*hex_encoded_iv = strsep(&datablob, " \t");
+		if (!*hex_encoded_iv)
+			break;
+		ret = 0;
+		break;
+	case Opt_update:
+		if (decrypted_datalen)
+			break;
+		ret = 0;
+		break;
+	case Opt_err:
+		break;
+	}
+out:
+	return ret;
+}
+
+/*
+ * datablob_format - format as an ascii string, before copying to userspace
+ */
+static char *datablob_format(struct encrypted_key_payload *epayload,
+			     size_t asciiblob_len)
+{
+	char *ascii_buf, *bufp;
+	u8 *iv = epayload->iv;
+	int len;
+	int i;
+
+	ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL);
+	if (!ascii_buf)
+		goto out;
+
+	ascii_buf[asciiblob_len] = '\0';
+
+	/* copy datablob master_desc and datalen strings */
+	len = sprintf(ascii_buf, "%s %s ", epayload->master_desc,
+		      epayload->datalen);
+
+	/* convert the hex encoded iv, encrypted-data and HMAC to ascii */
+	bufp = &ascii_buf[len];
+	for (i = 0; i < (asciiblob_len - len) / 2; i++)
+		bufp = pack_hex_byte(bufp, iv[i]);
+out:
+	return ascii_buf;
+}
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+static struct key *request_trusted_key(const char *trusted_desc,
+				       u8 **master_key, size_t *master_keylen)
+{
+	struct trusted_key_payload *tpayload;
+	struct key *tkey;
+
+	tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+	if (IS_ERR(tkey))
+		goto error;
+
+	down_read(&tkey->sem);
+	tpayload = rcu_dereference(tkey->payload.data);
+	*master_key = tpayload->key;
+	*master_keylen = tpayload->key_len;
+error:
+	return tkey;
+}
+
+/*
+ * request_user_key - request the user key
+ *
+ * Use a user provided key to encrypt/decrypt an encrypted-key.
+ */
+static struct key *request_user_key(const char *master_desc, u8 **master_key,
+				    size_t *master_keylen)
+{
+	struct user_key_payload *upayload;
+	struct key *ukey;
+
+	ukey = request_key(&key_type_user, master_desc, NULL);
+	if (IS_ERR(ukey))
+		goto error;
+
+	down_read(&ukey->sem);
+	upayload = rcu_dereference(ukey->payload.data);
+	*master_key = upayload->data;
+	*master_keylen = upayload->datalen;
+error:
+	return ukey;
+}
+
+static struct sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+	struct sdesc *sdesc;
+	int size;
+
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+	sdesc = kmalloc(size, GFP_KERNEL);
+	if (!sdesc)
+		return ERR_PTR(-ENOMEM);
+	sdesc->shash.tfm = alg;
+	sdesc->shash.flags = 0x0;
+	return sdesc;
+}
+
+static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
+		     const u8 *buf, unsigned int buflen)
+{
+	struct sdesc *sdesc;
+	int ret;
+
+	sdesc = alloc_sdesc(hmacalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_setkey(hmacalg, key, keylen);
+	if (!ret)
+		ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
+	kfree(sdesc);
+	return ret;
+}
+
+static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen)
+{
+	struct sdesc *sdesc;
+	int ret;
+
+	sdesc = alloc_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("encrypted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
+	kfree(sdesc);
+	return ret;
+}
+
+enum derived_key_type { ENC_KEY, AUTH_KEY };
+
+/* Derive authentication/encryption key from trusted key */
+static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
+			   const u8 *master_key, size_t master_keylen)
+{
+	u8 *derived_buf;
+	unsigned int derived_buf_len;
+	int ret;
+
+	derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen;
+	if (derived_buf_len < HASH_SIZE)
+		derived_buf_len = HASH_SIZE;
+
+	derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
+	if (!derived_buf) {
+		pr_err("encrypted_key: out of memory\n");
+		return -ENOMEM;
+	}
+	if (key_type)
+		strcpy(derived_buf, "AUTH_KEY");
+	else
+		strcpy(derived_buf, "ENC_KEY");
+
+	memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
+	       master_keylen);
+	ret = calc_hash(derived_key, derived_buf, derived_buf_len);
+	kfree(derived_buf);
+	return ret;
+}
+
+static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
+			       unsigned int key_len, const u8 *iv,
+			       unsigned int ivsize)
+{
+	int ret;
+
+	desc->tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(desc->tfm)) {
+		pr_err("encrypted_key: failed to load %s transform (%ld)\n",
+		       blkcipher_alg, PTR_ERR(desc->tfm));
+		return PTR_ERR(desc->tfm);
+	}
+	desc->flags = 0;
+
+	ret = crypto_blkcipher_setkey(desc->tfm, key, key_len);
+	if (ret < 0) {
+		pr_err("encrypted_key: failed to setkey (%d)\n", ret);
+		crypto_free_blkcipher(desc->tfm);
+		return ret;
+	}
+	crypto_blkcipher_set_iv(desc->tfm, iv, ivsize);
+	return 0;
+}
+
+static struct key *request_master_key(struct encrypted_key_payload *epayload,
+				      u8 **master_key, size_t *master_keylen)
+{
+	struct key *mkey = NULL;
+
+	if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
+		     KEY_TRUSTED_PREFIX_LEN)) {
+		mkey = request_trusted_key(epayload->master_desc +
+					   KEY_TRUSTED_PREFIX_LEN,
+					   master_key, master_keylen);
+	} else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX,
+			    KEY_USER_PREFIX_LEN)) {
+		mkey = request_user_key(epayload->master_desc +
+					KEY_USER_PREFIX_LEN,
+					master_key, master_keylen);
+	} else
+		goto out;
+
+	if (IS_ERR(mkey))
+		pr_info("encrypted_key: key %s not found",
+			epayload->master_desc);
+	if (mkey)
+		dump_master_key(*master_key, *master_keylen);
+out:
+	return mkey;
+}
+
+/* Before returning data to userspace, encrypt decrypted data. */
+static int derived_key_encrypt(struct encrypted_key_payload *epayload,
+			       const u8 *derived_key,
+			       unsigned int derived_keylen)
+{
+	struct scatterlist sg_in[2];
+	struct scatterlist sg_out[1];
+	struct blkcipher_desc desc;
+	unsigned int encrypted_datalen;
+	unsigned int padlen;
+	char pad[16];
+	int ret;
+
+	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+	padlen = encrypted_datalen - epayload->decrypted_datalen;
+
+	ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
+				  epayload->iv, ivsize);
+	if (ret < 0)
+		goto out;
+	dump_decrypted_data(epayload);
+
+	memset(pad, 0, sizeof pad);
+	sg_init_table(sg_in, 2);
+	sg_set_buf(&sg_in[0], epayload->decrypted_data,
+		   epayload->decrypted_datalen);
+	sg_set_buf(&sg_in[1], pad, padlen);
+
+	sg_init_table(sg_out, 1);
+	sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
+
+	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen);
+	crypto_free_blkcipher(desc.tfm);
+	if (ret < 0)
+		pr_err("encrypted_key: failed to encrypt (%d)\n", ret);
+	else
+		dump_encrypted_data(epayload, encrypted_datalen);
+out:
+	return ret;
+}
+
+static int datablob_hmac_append(struct encrypted_key_payload *epayload,
+				const u8 *master_key, size_t master_keylen)
+{
+	u8 derived_key[HASH_SIZE];
+	u8 *digest;
+	int ret;
+
+	ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	digest = epayload->master_desc + epayload->datablob_len;
+	ret = calc_hmac(digest, derived_key, sizeof derived_key,
+			epayload->master_desc, epayload->datablob_len);
+	if (!ret)
+		dump_hmac(NULL, digest, HASH_SIZE);
+out:
+	return ret;
+}
+
+/* verify HMAC before decrypting encrypted key */
+static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
+				const u8 *master_key, size_t master_keylen)
+{
+	u8 derived_key[HASH_SIZE];
+	u8 digest[HASH_SIZE];
+	int ret;
+
+	ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	ret = calc_hmac(digest, derived_key, sizeof derived_key,
+			epayload->master_desc, epayload->datablob_len);
+	if (ret < 0)
+		goto out;
+	ret = memcmp(digest, epayload->master_desc + epayload->datablob_len,
+		     sizeof digest);
+	if (ret) {
+		ret = -EINVAL;
+		dump_hmac("datablob",
+			  epayload->master_desc + epayload->datablob_len,
+			  HASH_SIZE);
+		dump_hmac("calc", digest, HASH_SIZE);
+	}
+out:
+	return ret;
+}
+
+static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+			       const u8 *derived_key,
+			       unsigned int derived_keylen)
+{
+	struct scatterlist sg_in[1];
+	struct scatterlist sg_out[2];
+	struct blkcipher_desc desc;
+	unsigned int encrypted_datalen;
+	char pad[16];
+	int ret;
+
+	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+	ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
+				  epayload->iv, ivsize);
+	if (ret < 0)
+		goto out;
+	dump_encrypted_data(epayload, encrypted_datalen);
+
+	memset(pad, 0, sizeof pad);
+	sg_init_table(sg_in, 1);
+	sg_init_table(sg_out, 2);
+	sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
+	sg_set_buf(&sg_out[0], epayload->decrypted_data,
+		   epayload->decrypted_datalen);
+	sg_set_buf(&sg_out[1], pad, sizeof pad);
+
+	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen);
+	crypto_free_blkcipher(desc.tfm);
+	if (ret < 0)
+		goto out;
+	dump_decrypted_data(epayload);
+out:
+	return ret;
+}
+
+/* Allocate memory for decrypted key and datablob. */
+static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
+							 const char *master_desc,
+							 const char *datalen)
+{
+	struct encrypted_key_payload *epayload = NULL;
+	unsigned short datablob_len;
+	unsigned short decrypted_datalen;
+	unsigned int encrypted_datalen;
+	long dlen;
+	int ret;
+
+	ret = strict_strtol(datalen, 10, &dlen);
+	if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE)
+		return ERR_PTR(-EINVAL);
+
+	decrypted_datalen = dlen;
+	encrypted_datalen = roundup(decrypted_datalen, blksize);
+
+	datablob_len = strlen(master_desc) + 1 + strlen(datalen) + 1
+	    + ivsize + 1 + encrypted_datalen;
+
+	ret = key_payload_reserve(key, decrypted_datalen + datablob_len
+				  + HASH_SIZE + 1);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	epayload = kzalloc(sizeof(*epayload) + decrypted_datalen +
+			   datablob_len + HASH_SIZE + 1, GFP_KERNEL);
+	if (!epayload)
+		return ERR_PTR(-ENOMEM);
+
+	epayload->decrypted_datalen = decrypted_datalen;
+	epayload->datablob_len = datablob_len;
+	return epayload;
+}
+
+static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
+				 const char *hex_encoded_iv)
+{
+	struct key *mkey;
+	u8 derived_key[HASH_SIZE];
+	u8 *master_key;
+	u8 *hmac;
+	const char *hex_encoded_data;
+	unsigned int encrypted_datalen;
+	size_t master_keylen;
+	size_t asciilen;
+	int ret;
+
+	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+	asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2;
+	if (strlen(hex_encoded_iv) != asciilen)
+		return -EINVAL;
+
+	hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
+	hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+	hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen);
+
+	hmac = epayload->master_desc + epayload->datablob_len;
+	hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE);
+
+	mkey = request_master_key(epayload, &master_key, &master_keylen);
+	if (IS_ERR(mkey))
+		return PTR_ERR(mkey);
+
+	ret = datablob_hmac_verify(epayload, master_key, master_keylen);
+	if (ret < 0) {
+		pr_err("encrypted_key: bad hmac (%d)\n", ret);
+		goto out;
+	}
+
+	ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key);
+	if (ret < 0)
+		pr_err("encrypted_key: failed to decrypt key (%d)\n", ret);
+out:
+	up_read(&mkey->sem);
+	key_put(mkey);
+	return ret;
+}
+
+static void __ekey_init(struct encrypted_key_payload *epayload,
+			const char *master_desc, const char *datalen)
+{
+	epayload->master_desc = epayload->decrypted_data
+	    + epayload->decrypted_datalen;
+	epayload->datalen = epayload->master_desc + strlen(master_desc) + 1;
+	epayload->iv = epayload->datalen + strlen(datalen) + 1;
+	epayload->encrypted_data = epayload->iv + ivsize + 1;
+
+	memcpy(epayload->master_desc, master_desc, strlen(master_desc));
+	memcpy(epayload->datalen, datalen, strlen(datalen));
+}
+
+/*
+ * encrypted_init - initialize an encrypted key
+ *
+ * For a new key, use a random number for both the iv and data
+ * itself.  For an old key, decrypt the hex encoded data.
+ */
+static int encrypted_init(struct encrypted_key_payload *epayload,
+			  const char *master_desc, const char *datalen,
+			  const char *hex_encoded_iv)
+{
+	int ret = 0;
+
+	__ekey_init(epayload, master_desc, datalen);
+	if (!hex_encoded_iv) {
+		get_random_bytes(epayload->iv, ivsize);
+
+		get_random_bytes(epayload->decrypted_data,
+				 epayload->decrypted_datalen);
+	} else
+		ret = encrypted_key_decrypt(epayload, hex_encoded_iv);
+	return ret;
+}
+
+/*
+ * encrypted_instantiate - instantiate an encrypted key
+ *
+ * Decrypt an existing encrypted datablob or create a new encrypted key
+ * based on a kernel random number.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_instantiate(struct key *key, const void *data,
+				 size_t datalen)
+{
+	struct encrypted_key_payload *epayload = NULL;
+	char *datablob = NULL;
+	char *master_desc = NULL;
+	char *decrypted_datalen = NULL;
+	char *hex_encoded_iv = NULL;
+	int ret;
+
+	if (datalen <= 0 || datalen > 32767 || !data)
+		return -EINVAL;
+
+	datablob = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!datablob)
+		return -ENOMEM;
+	datablob[datalen] = 0;
+	memcpy(datablob, data, datalen);
+	ret = datablob_parse(datablob, &master_desc, &decrypted_datalen,
+			     &hex_encoded_iv);
+	if (ret < 0)
+		goto out;
+
+	epayload = encrypted_key_alloc(key, master_desc, decrypted_datalen);
+	if (IS_ERR(epayload)) {
+		ret = PTR_ERR(epayload);
+		goto out;
+	}
+	ret = encrypted_init(epayload, master_desc, decrypted_datalen,
+			     hex_encoded_iv);
+	if (ret < 0) {
+		kfree(epayload);
+		goto out;
+	}
+
+	rcu_assign_pointer(key->payload.data, epayload);
+out:
+	kfree(datablob);
+	return ret;
+}
+
+static void encrypted_rcu_free(struct rcu_head *rcu)
+{
+	struct encrypted_key_payload *epayload;
+
+	epayload = container_of(rcu, struct encrypted_key_payload, rcu);
+	memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
+	kfree(epayload);
+}
+
+/*
+ * encrypted_update - update the master key description
+ *
+ * Change the master key description for an existing encrypted key.
+ * The next read will return an encrypted datablob using the new
+ * master key description.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_update(struct key *key, const void *data, size_t datalen)
+{
+	struct encrypted_key_payload *epayload = key->payload.data;
+	struct encrypted_key_payload *new_epayload;
+	char *buf;
+	char *new_master_desc = NULL;
+	int ret = 0;
+
+	if (datalen <= 0 || datalen > 32767 || !data)
+		return -EINVAL;
+
+	buf = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf[datalen] = 0;
+	memcpy(buf, data, datalen);
+	ret = datablob_parse(buf, &new_master_desc, NULL, NULL);
+	if (ret < 0)
+		goto out;
+
+	ret = valid_master_desc(new_master_desc, epayload->master_desc);
+	if (ret < 0)
+		goto out;
+
+	new_epayload = encrypted_key_alloc(key, new_master_desc,
+					   epayload->datalen);
+	if (IS_ERR(new_epayload)) {
+		ret = PTR_ERR(new_epayload);
+		goto out;
+	}
+
+	__ekey_init(new_epayload, new_master_desc, epayload->datalen);
+
+	memcpy(new_epayload->iv, epayload->iv, ivsize);
+	memcpy(new_epayload->decrypted_data, epayload->decrypted_data,
+	       epayload->decrypted_datalen);
+
+	rcu_assign_pointer(key->payload.data, new_epayload);
+	call_rcu(&epayload->rcu, encrypted_rcu_free);
+out:
+	kfree(buf);
+	return ret;
+}
+
+/*
+ * encrypted_read - format and copy the encrypted data to userspace
+ *
+ * The resulting datablob format is:
+ * <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
+ *
+ * On success, return to userspace the encrypted key datablob size.
+ */
+static long encrypted_read(const struct key *key, char __user *buffer,
+			   size_t buflen)
+{
+	struct encrypted_key_payload *epayload;
+	struct key *mkey;
+	u8 *master_key;
+	size_t master_keylen;
+	char derived_key[HASH_SIZE];
+	char *ascii_buf;
+	size_t asciiblob_len;
+	int ret;
+
+	epayload = rcu_dereference_protected(key->payload.data,
+				  rwsem_is_locked(&((struct key *)key)->sem));
+
+	/* returns the hex encoded iv, encrypted-data, and hmac as ascii */
+	asciiblob_len = epayload->datablob_len + ivsize + 1
+	    + roundup(epayload->decrypted_datalen, blksize)
+	    + (HASH_SIZE * 2);
+
+	if (!buffer || buflen < asciiblob_len)
+		return asciiblob_len;
+
+	mkey = request_master_key(epayload, &master_key, &master_keylen);
+	if (IS_ERR(mkey))
+		return PTR_ERR(mkey);
+
+	ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key);
+	if (ret < 0)
+		goto out;
+
+	ret = datablob_hmac_append(epayload, master_key, master_keylen);
+	if (ret < 0)
+		goto out;
+
+	ascii_buf = datablob_format(epayload, asciiblob_len);
+	if (!ascii_buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	up_read(&mkey->sem);
+	key_put(mkey);
+
+	if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
+		ret = -EFAULT;
+	kfree(ascii_buf);
+
+	return asciiblob_len;
+out:
+	up_read(&mkey->sem);
+	key_put(mkey);
+	return ret;
+}
+
+/*
+ * encrypted_destroy - before freeing the key, clear the decrypted data
+ *
+ * Before freeing the key, clear the memory containing the decrypted
+ * key data.
+ */
+static void encrypted_destroy(struct key *key)
+{
+	struct encrypted_key_payload *epayload = key->payload.data;
+
+	if (!epayload)
+		return;
+
+	memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
+	kfree(key->payload.data);
+}
+
+struct key_type key_type_encrypted = {
+	.name = "encrypted",
+	.instantiate = encrypted_instantiate,
+	.update = encrypted_update,
+	.match = user_match,
+	.destroy = encrypted_destroy,
+	.describe = user_describe,
+	.read = encrypted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_encrypted);
+
+static void encrypted_shash_release(void)
+{
+	if (hashalg)
+		crypto_free_shash(hashalg);
+	if (hmacalg)
+		crypto_free_shash(hmacalg);
+}
+
+static int __init encrypted_shash_alloc(void)
+{
+	int ret;
+
+	hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hmacalg)) {
+		pr_info("encrypted_key: could not allocate crypto %s\n",
+			hmac_alg);
+		return PTR_ERR(hmacalg);
+	}
+
+	hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hashalg)) {
+		pr_info("encrypted_key: could not allocate crypto %s\n",
+			hash_alg);
+		ret = PTR_ERR(hashalg);
+		goto hashalg_fail;
+	}
+
+	return 0;
+
+hashalg_fail:
+	crypto_free_shash(hmacalg);
+	return ret;
+}
+
+static int __init init_encrypted(void)
+{
+	int ret;
+
+	ret = encrypted_shash_alloc();
+	if (ret < 0)
+		return ret;
+	ret = register_key_type(&key_type_encrypted);
+	if (ret < 0)
+		goto out;
+	return aes_get_sizes();
+out:
+	encrypted_shash_release();
+	return ret;
+
+}
+
+static void __exit cleanup_encrypted(void)
+{
+	encrypted_shash_release();
+	unregister_key_type(&key_type_encrypted);
+}
+
+late_initcall(init_encrypted);
+module_exit(cleanup_encrypted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted_defined.h b/security/keys/encrypted_defined.h
new file mode 100644
index 0000000..cef5e2f
--- /dev/null
+++ b/security/keys/encrypted_defined.h
@@ -0,0 +1,54 @@
+#ifndef __ENCRYPTED_KEY_H
+#define __ENCRYPTED_KEY_H
+
+#define ENCRYPTED_DEBUG 0
+
+#if ENCRYPTED_DEBUG
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+	print_hex_dump(KERN_ERR, "master key: ", DUMP_PREFIX_NONE, 32, 1,
+		       master_key, master_keylen, 0);
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+	print_hex_dump(KERN_ERR, "decrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+		       epayload->decrypted_data,
+		       epayload->decrypted_datalen, 0);
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+				       unsigned int encrypted_datalen)
+{
+	print_hex_dump(KERN_ERR, "encrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+		       epayload->encrypted_data, encrypted_datalen, 0);
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+			     unsigned int hmac_size)
+{
+	if (str)
+		pr_info("encrypted_key: %s", str);
+	print_hex_dump(KERN_ERR, "hmac: ", DUMP_PREFIX_NONE, 32, 1, digest,
+		       hmac_size, 0);
+}
+#else
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+				       unsigned int encrypted_datalen)
+{
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+			     unsigned int hmac_size)
+{
+}
+#endif
+#endif
diff --git a/security/keys/trusted_defined.c b/security/keys/trusted_defined.c
new file mode 100644
index 0000000..975e9f2
--- /dev/null
+++ b/security/keys/trusted_defined.c
@@ -0,0 +1,1175 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Author:
+ * David Safford <safford@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <linux/key-type.h>
+#include <linux/rcupdate.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <linux/capability.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include "trusted_defined.h"
+
+static const char hmac_alg[] = "hmac(sha1)";
+static const char hash_alg[] = "sha1";
+
+struct sdesc {
+	struct shash_desc shash;
+	char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+static struct sdesc *init_sdesc(struct crypto_shash *alg)
+{
+	struct sdesc *sdesc;
+	int size;
+
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+	sdesc = kmalloc(size, GFP_KERNEL);
+	if (!sdesc)
+		return ERR_PTR(-ENOMEM);
+	sdesc->shash.tfm = alg;
+	sdesc->shash.flags = 0x0;
+	return sdesc;
+}
+
+static int TSS_sha1(const unsigned char *data, unsigned int datalen,
+		    unsigned char *digest)
+{
+	struct sdesc *sdesc;
+	int ret;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+	kfree(sdesc);
+	return ret;
+}
+
+static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
+		       unsigned int keylen, ...)
+{
+	struct sdesc *sdesc;
+	va_list argp;
+	unsigned int dlen;
+	unsigned char *data;
+	int ret;
+
+	sdesc = init_sdesc(hmacalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hmac_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_setkey(hmacalg, key, keylen);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+
+	va_start(argp, keylen);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		data = va_arg(argp, unsigned char *);
+		if (data == NULL)
+			return -EINVAL;
+		ret = crypto_shash_update(&sdesc->shash, data, dlen);
+		if (ret < 0)
+			goto out;
+	}
+	va_end(argp);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, digest);
+out:
+	kfree(sdesc);
+	return ret;
+}
+
+/*
+ * calculate authorization info fields to send to TPM
+ */
+static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+			unsigned int keylen, unsigned char *h1,
+			unsigned char *h2, unsigned char h3, ...)
+{
+	unsigned char paramdigest[SHA1_DIGEST_SIZE];
+	struct sdesc *sdesc;
+	unsigned int dlen;
+	unsigned char *data;
+	unsigned char c;
+	int ret;
+	va_list argp;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	c = h3;
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+	va_start(argp, h3);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		data = va_arg(argp, unsigned char *);
+		ret = crypto_shash_update(&sdesc->shash, data, dlen);
+		if (ret < 0) {
+			va_end(argp);
+			goto out;
+		}
+	}
+	va_end(argp);
+	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
+				  paramdigest, TPM_NONCE_SIZE, h1,
+				  TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
+out:
+	kfree(sdesc);
+	return ret;
+}
+
+/*
+ * verify the AUTH1_COMMAND (Seal) result from TPM
+ */
+static int TSS_checkhmac1(unsigned char *buffer,
+			  const uint32_t command,
+			  const unsigned char *ononce,
+			  const unsigned char *key,
+			  unsigned int keylen, ...)
+{
+	uint32_t bufsize;
+	uint16_t tag;
+	uint32_t ordinal;
+	uint32_t result;
+	unsigned char *enonce;
+	unsigned char *continueflag;
+	unsigned char *authdata;
+	unsigned char testhmac[SHA1_DIGEST_SIZE];
+	unsigned char paramdigest[SHA1_DIGEST_SIZE];
+	struct sdesc *sdesc;
+	unsigned int dlen;
+	unsigned int dpos;
+	va_list argp;
+	int ret;
+
+	bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+	tag = LOAD16(buffer, 0);
+	ordinal = command;
+	result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+	if (tag == TPM_TAG_RSP_COMMAND)
+		return 0;
+	if (tag != TPM_TAG_RSP_AUTH1_COMMAND)
+		return -EINVAL;
+	authdata = buffer + bufsize - SHA1_DIGEST_SIZE;
+	continueflag = authdata - 1;
+	enonce = continueflag - TPM_NONCE_SIZE;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+				  sizeof result);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+				  sizeof ordinal);
+	if (ret < 0)
+		goto out;
+	va_start(argp, keylen);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		dpos = va_arg(argp, unsigned int);
+		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+		if (ret < 0) {
+			va_end(argp);
+			goto out;
+		}
+	}
+	va_end(argp);
+	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (ret < 0)
+		goto out;
+
+	ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest,
+			  TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce,
+			  1, continueflag, 0, 0);
+	if (ret < 0)
+		goto out;
+
+	if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
+		ret = -EINVAL;
+out:
+	kfree(sdesc);
+	return ret;
+}
+
+/*
+ * verify the AUTH2_COMMAND (unseal) result from TPM
+ */
+static int TSS_checkhmac2(unsigned char *buffer,
+			  const uint32_t command,
+			  const unsigned char *ononce,
+			  const unsigned char *key1,
+			  unsigned int keylen1,
+			  const unsigned char *key2,
+			  unsigned int keylen2, ...)
+{
+	uint32_t bufsize;
+	uint16_t tag;
+	uint32_t ordinal;
+	uint32_t result;
+	unsigned char *enonce1;
+	unsigned char *continueflag1;
+	unsigned char *authdata1;
+	unsigned char *enonce2;
+	unsigned char *continueflag2;
+	unsigned char *authdata2;
+	unsigned char testhmac1[SHA1_DIGEST_SIZE];
+	unsigned char testhmac2[SHA1_DIGEST_SIZE];
+	unsigned char paramdigest[SHA1_DIGEST_SIZE];
+	struct sdesc *sdesc;
+	unsigned int dlen;
+	unsigned int dpos;
+	va_list argp;
+	int ret;
+
+	bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+	tag = LOAD16(buffer, 0);
+	ordinal = command;
+	result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+
+	if (tag == TPM_TAG_RSP_COMMAND)
+		return 0;
+	if (tag != TPM_TAG_RSP_AUTH2_COMMAND)
+		return -EINVAL;
+	authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1
+			+ SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE);
+	authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE);
+	continueflag1 = authdata1 - 1;
+	continueflag2 = authdata2 - 1;
+	enonce1 = continueflag1 - TPM_NONCE_SIZE;
+	enonce2 = continueflag2 - TPM_NONCE_SIZE;
+
+	sdesc = init_sdesc(hashalg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+	ret = crypto_shash_init(&sdesc->shash);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+				  sizeof result);
+	if (ret < 0)
+		goto out;
+	ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+				  sizeof ordinal);
+	if (ret < 0)
+		goto out;
+
+	va_start(argp, keylen2);
+	for (;;) {
+		dlen = va_arg(argp, unsigned int);
+		if (dlen == 0)
+			break;
+		dpos = va_arg(argp, unsigned int);
+		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+		if (ret < 0) {
+			va_end(argp);
+			goto out;
+		}
+	}
+	va_end(argp);
+	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (ret < 0)
+		goto out;
+
+	ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE,
+			  paramdigest, TPM_NONCE_SIZE, enonce1,
+			  TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
+	if (ret < 0)
+		goto out;
+	if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE,
+			  paramdigest, TPM_NONCE_SIZE, enonce2,
+			  TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
+	if (ret < 0)
+		goto out;
+	if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
+		ret = -EINVAL;
+out:
+	kfree(sdesc);
+	return ret;
+}
+
+/*
+ * For key specific tpm requests, we will generate and send our
+ * own TPM command packets using the drivers send function.
+ */
+static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
+			    size_t buflen)
+{
+	int rc;
+
+	dump_tpm_buf(cmd);
+	rc = tpm_send(chip_num, cmd, buflen);
+	dump_tpm_buf(cmd);
+	if (rc > 0)
+		/* Can't return positive return codes values to keyctl */
+		rc = -EPERM;
+	return rc;
+}
+
+/*
+ * get a random value from TPM
+ */
+static int tpm_get_random(struct tpm_buf *tb, unsigned char *buf, uint32_t len)
+{
+	int ret;
+
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_COMMAND);
+	store32(tb, TPM_GETRANDOM_SIZE);
+	store32(tb, TPM_ORD_GETRANDOM);
+	store32(tb, len);
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, sizeof tb->data);
+	if (!ret)
+		memcpy(buf, tb->data + TPM_GETRANDOM_SIZE, len);
+	return ret;
+}
+
+static int my_get_random(unsigned char *buf, int len)
+{
+	struct tpm_buf *tb;
+	int ret;
+
+	tb = kmalloc(sizeof *tb, GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+	ret = tpm_get_random(tb, buf, len);
+
+	kfree(tb);
+	return ret;
+}
+
+/*
+ * Lock a trusted key, by extending a selected PCR.
+ *
+ * Prevents a trusted key that is sealed to PCRs from being accessed.
+ * This uses the tpm driver's extend function.
+ */
+static int pcrlock(const int pcrnum)
+{
+	unsigned char hash[SHA1_DIGEST_SIZE];
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	ret = my_get_random(hash, SHA1_DIGEST_SIZE);
+	if (ret < 0)
+		return ret;
+	return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
+}
+
+/*
+ * Create an object specific authorisation protocol (OSAP) session
+ */
+static int osap(struct tpm_buf *tb, struct osapsess *s,
+		const unsigned char *key, uint16_t type, uint32_t handle)
+{
+	unsigned char enonce[TPM_NONCE_SIZE];
+	unsigned char ononce[TPM_NONCE_SIZE];
+	int ret;
+
+	ret = tpm_get_random(tb, ononce, TPM_NONCE_SIZE);
+	if (ret < 0)
+		return ret;
+
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_COMMAND);
+	store32(tb, TPM_OSAP_SIZE);
+	store32(tb, TPM_ORD_OSAP);
+	store16(tb, type);
+	store32(tb, handle);
+	storebytes(tb, ononce, TPM_NONCE_SIZE);
+
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0)
+		return ret;
+
+	s->handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+	memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]),
+	       TPM_NONCE_SIZE);
+	memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) +
+				  TPM_NONCE_SIZE]), TPM_NONCE_SIZE);
+	return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE,
+			   enonce, TPM_NONCE_SIZE, ononce, 0, 0);
+}
+
+/*
+ * Create an object independent authorisation protocol (oiap) session
+ */
+static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
+{
+	int ret;
+
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_COMMAND);
+	store32(tb, TPM_OIAP_SIZE);
+	store32(tb, TPM_ORD_OIAP);
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0)
+		return ret;
+
+	*handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+	memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)],
+	       TPM_NONCE_SIZE);
+	return 0;
+}
+
+struct tpm_digests {
+	unsigned char encauth[SHA1_DIGEST_SIZE];
+	unsigned char pubauth[SHA1_DIGEST_SIZE];
+	unsigned char xorwork[SHA1_DIGEST_SIZE * 2];
+	unsigned char xorhash[SHA1_DIGEST_SIZE];
+	unsigned char nonceodd[TPM_NONCE_SIZE];
+};
+
+/*
+ * Have the TPM seal(encrypt) the trusted key, possibly based on
+ * Platform Configuration Registers (PCRs). AUTH1 for sealing key.
+ */
+static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+		    uint32_t keyhandle, const unsigned char *keyauth,
+		    const unsigned char *data, uint32_t datalen,
+		    unsigned char *blob, uint32_t *bloblen,
+		    const unsigned char *blobauth,
+		    const unsigned char *pcrinfo, uint32_t pcrinfosize)
+{
+	struct osapsess sess;
+	struct tpm_digests *td;
+	unsigned char cont;
+	uint32_t ordinal;
+	uint32_t pcrsize;
+	uint32_t datsize;
+	int sealinfosize;
+	int encdatasize;
+	int storedsize;
+	int ret;
+	int i;
+
+	/* alloc some work space for all the hashes */
+	td = kmalloc(sizeof *td, GFP_KERNEL);
+	if (!td)
+		return -ENOMEM;
+
+	/* get session for sealing key */
+	ret = osap(tb, &sess, keyauth, keytype, keyhandle);
+	if (ret < 0)
+		return ret;
+	dump_sess(&sess);
+
+	/* calculate encrypted authorization value */
+	memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE);
+	memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
+	ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
+	if (ret < 0)
+		return ret;
+
+	ret = tpm_get_random(tb, td->nonceodd, TPM_NONCE_SIZE);
+	if (ret < 0)
+		return ret;
+	ordinal = htonl(TPM_ORD_SEAL);
+	datsize = htonl(datalen);
+	pcrsize = htonl(pcrinfosize);
+	cont = 0;
+
+	/* encrypt data authorization key */
+	for (i = 0; i < SHA1_DIGEST_SIZE; ++i)
+		td->encauth[i] = td->xorhash[i] ^ blobauth[i];
+
+	/* calculate authorization HMAC value */
+	if (pcrinfosize == 0) {
+		/* no pcr info specified */
+		ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+				   sess.enonce, td->nonceodd, cont,
+				   sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+				   td->encauth, sizeof(uint32_t), &pcrsize,
+				   sizeof(uint32_t), &datsize, datalen, data, 0,
+				   0);
+	} else {
+		/* pcr info specified */
+		ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+				   sess.enonce, td->nonceodd, cont,
+				   sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+				   td->encauth, sizeof(uint32_t), &pcrsize,
+				   pcrinfosize, pcrinfo, sizeof(uint32_t),
+				   &datsize, datalen, data, 0, 0);
+	}
+	if (ret < 0)
+		return ret;
+
+	/* build and send the TPM request packet */
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+	store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
+	store32(tb, TPM_ORD_SEAL);
+	store32(tb, keyhandle);
+	storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
+	store32(tb, pcrinfosize);
+	storebytes(tb, pcrinfo, pcrinfosize);
+	store32(tb, datalen);
+	storebytes(tb, data, datalen);
+	store32(tb, sess.handle);
+	storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
+	store8(tb, cont);
+	storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
+
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0)
+		return ret;
+
+	/* calculate the size of the returned Blob */
+	sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
+	encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) +
+			     sizeof(uint32_t) + sealinfosize);
+	storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize +
+	    sizeof(uint32_t) + encdatasize;
+
+	/* check the HMAC in the response */
+	ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret,
+			     SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0,
+			     0);
+
+	/* copy the returned blob to caller */
+	if (!ret) {
+		memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
+		*bloblen = storedsize;
+	}
+	return ret;
+}
+
+/*
+ * use the AUTH2_COMMAND form of unseal, to authorize both key and blob
+ */
+static int tpm_unseal(struct tpm_buf *tb,
+		      uint32_t keyhandle, const unsigned char *keyauth,
+		      const unsigned char *blob, int bloblen,
+		      const unsigned char *blobauth,
+		      unsigned char *data, unsigned int *datalen)
+{
+	unsigned char nonceodd[TPM_NONCE_SIZE];
+	unsigned char enonce1[TPM_NONCE_SIZE];
+	unsigned char enonce2[TPM_NONCE_SIZE];
+	unsigned char authdata1[SHA1_DIGEST_SIZE];
+	unsigned char authdata2[SHA1_DIGEST_SIZE];
+	uint32_t authhandle1 = 0;
+	uint32_t authhandle2 = 0;
+	unsigned char cont = 0;
+	uint32_t ordinal;
+	uint32_t keyhndl;
+	int ret;
+
+	/* sessions for unsealing key and data */
+	ret = oiap(tb, &authhandle1, enonce1);
+	if (ret < 0) {
+		pr_info("trusted_key: oiap failed (%d)\n", ret);
+		return ret;
+	}
+	ret = oiap(tb, &authhandle2, enonce2);
+	if (ret < 0) {
+		pr_info("trusted_key: oiap failed (%d)\n", ret);
+		return ret;
+	}
+
+	ordinal = htonl(TPM_ORD_UNSEAL);
+	keyhndl = htonl(SRKHANDLE);
+	ret = tpm_get_random(tb, nonceodd, TPM_NONCE_SIZE);
+	if (ret < 0) {
+		pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
+		return ret;
+	}
+	ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
+			   enonce1, nonceodd, cont, sizeof(uint32_t),
+			   &ordinal, bloblen, blob, 0, 0);
+	if (ret < 0)
+		return ret;
+	ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE,
+			   enonce2, nonceodd, cont, sizeof(uint32_t),
+			   &ordinal, bloblen, blob, 0, 0);
+	if (ret < 0)
+		return ret;
+
+	/* build and send TPM request packet */
+	INIT_BUF(tb);
+	store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
+	store32(tb, TPM_UNSEAL_SIZE + bloblen);
+	store32(tb, TPM_ORD_UNSEAL);
+	store32(tb, keyhandle);
+	storebytes(tb, blob, bloblen);
+	store32(tb, authhandle1);
+	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+	store8(tb, cont);
+	storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
+	store32(tb, authhandle2);
+	storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+	store8(tb, cont);
+	storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
+
+	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+	if (ret < 0) {
+		pr_info("trusted_key: authhmac failed (%d)\n", ret);
+		return ret;
+	}
+
+	*datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+	ret = TSS_checkhmac2(tb->data, ordinal, nonceodd,
+			     keyauth, SHA1_DIGEST_SIZE,
+			     blobauth, SHA1_DIGEST_SIZE,
+			     sizeof(uint32_t), TPM_DATA_OFFSET,
+			     *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
+			     0);
+	if (ret < 0) {
+		pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret);
+		return ret;
+	}
+	memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
+	return 0;
+}
+
+/*
+ * Have the TPM seal(encrypt) the symmetric key
+ */
+static int key_seal(struct trusted_key_payload *p,
+		    struct trusted_key_options *o)
+{
+	struct tpm_buf *tb;
+	int ret;
+
+	tb = kzalloc(sizeof *tb, GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+
+	/* include migratable flag at end of sealed key */
+	p->key[p->key_len] = p->migratable;
+
+	ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
+		       p->key, p->key_len + 1, p->blob, &p->blob_len,
+		       o->blobauth, o->pcrinfo, o->pcrinfo_len);
+	if (ret < 0)
+		pr_info("trusted_key: srkseal failed (%d)\n", ret);
+
+	kfree(tb);
+	return ret;
+}
+
+/*
+ * Have the TPM unseal(decrypt) the symmetric key
+ */
+static int key_unseal(struct trusted_key_payload *p,
+		      struct trusted_key_options *o)
+{
+	struct tpm_buf *tb;
+	int ret;
+
+	tb = kzalloc(sizeof *tb, GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+
+	ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+			 o->blobauth, p->key, &p->key_len);
+	if (ret < 0)
+		pr_info("trusted_key: srkunseal failed (%d)\n", ret);
+	else
+		/* pull migratable flag out of sealed key */
+		p->migratable = p->key[--p->key_len];
+
+	kfree(tb);
+	return ret;
+}
+
+enum {
+	Opt_err = -1,
+	Opt_new, Opt_load, Opt_update,
+	Opt_keyhandle, Opt_keyauth, Opt_blobauth,
+	Opt_pcrinfo, Opt_pcrlock, Opt_migratable
+};
+
+static const match_table_t key_tokens = {
+	{Opt_new, "new"},
+	{Opt_load, "load"},
+	{Opt_update, "update"},
+	{Opt_keyhandle, "keyhandle=%s"},
+	{Opt_keyauth, "keyauth=%s"},
+	{Opt_blobauth, "blobauth=%s"},
+	{Opt_pcrinfo, "pcrinfo=%s"},
+	{Opt_pcrlock, "pcrlock=%s"},
+	{Opt_migratable, "migratable=%s"},
+	{Opt_err, NULL}
+};
+
+/* can have zero or more token= options */
+static int getoptions(char *c, struct trusted_key_payload *pay,
+		      struct trusted_key_options *opt)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *p = c;
+	int token;
+	int res;
+	unsigned long handle;
+	unsigned long lock;
+
+	while ((p = strsep(&c, " \t"))) {
+		if (*p == '\0' || *p == ' ' || *p == '\t')
+			continue;
+		token = match_token(p, key_tokens, args);
+
+		switch (token) {
+		case Opt_pcrinfo:
+			opt->pcrinfo_len = strlen(args[0].from) / 2;
+			if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
+				return -EINVAL;
+			hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len);
+			break;
+		case Opt_keyhandle:
+			res = strict_strtoul(args[0].from, 16, &handle);
+			if (res < 0)
+				return -EINVAL;
+			opt->keytype = SEAL_keytype;
+			opt->keyhandle = handle;
+			break;
+		case Opt_keyauth:
+			if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+				return -EINVAL;
+			hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE);
+			break;
+		case Opt_blobauth:
+			if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+				return -EINVAL;
+			hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE);
+			break;
+		case Opt_migratable:
+			if (*args[0].from == '0')
+				pay->migratable = 0;
+			else
+				return -EINVAL;
+			break;
+		case Opt_pcrlock:
+			res = strict_strtoul(args[0].from, 10, &lock);
+			if (res < 0)
+				return -EINVAL;
+			opt->pcrlock = lock;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+/*
+ * datablob_parse - parse the keyctl data and fill in the
+ * 		    payload and options structures
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, struct trusted_key_payload *p,
+			  struct trusted_key_options *o)
+{
+	substring_t args[MAX_OPT_ARGS];
+	long keylen;
+	int ret = -EINVAL;
+	int key_cmd;
+	char *c;
+
+	/* main command */
+	c = strsep(&datablob, " \t");
+	if (!c)
+		return -EINVAL;
+	key_cmd = match_token(c, key_tokens, args);
+	switch (key_cmd) {
+	case Opt_new:
+		/* first argument is key size */
+		c = strsep(&datablob, " \t");
+		if (!c)
+			return -EINVAL;
+		ret = strict_strtol(c, 10, &keylen);
+		if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
+			return -EINVAL;
+		p->key_len = keylen;
+		ret = getoptions(datablob, p, o);
+		if (ret < 0)
+			return ret;
+		ret = Opt_new;
+		break;
+	case Opt_load:
+		/* first argument is sealed blob */
+		c = strsep(&datablob, " \t");
+		if (!c)
+			return -EINVAL;
+		p->blob_len = strlen(c) / 2;
+		if (p->blob_len > MAX_BLOB_SIZE)
+			return -EINVAL;
+		hex2bin(p->blob, c, p->blob_len);
+		ret = getoptions(datablob, p, o);
+		if (ret < 0)
+			return ret;
+		ret = Opt_load;
+		break;
+	case Opt_update:
+		/* all arguments are options */
+		ret = getoptions(datablob, p, o);
+		if (ret < 0)
+			return ret;
+		ret = Opt_update;
+		break;
+	case Opt_err:
+		return -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static struct trusted_key_options *trusted_options_alloc(void)
+{
+	struct trusted_key_options *options;
+
+	options = kzalloc(sizeof *options, GFP_KERNEL);
+	if (options) {
+		/* set any non-zero defaults */
+		options->keytype = SRK_keytype;
+		options->keyhandle = SRKHANDLE;
+	}
+	return options;
+}
+
+static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+{
+	struct trusted_key_payload *p = NULL;
+	int ret;
+
+	ret = key_payload_reserve(key, sizeof *p);
+	if (ret < 0)
+		return p;
+	p = kzalloc(sizeof *p, GFP_KERNEL);
+	if (p)
+		p->migratable = 1; /* migratable by default */
+	return p;
+}
+
+/*
+ * trusted_instantiate - create a new trusted key
+ *
+ * Unseal an existing trusted blob or, for a new key, get a
+ * random key, then seal and create a trusted key-type key,
+ * adding it to the specified keyring.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int trusted_instantiate(struct key *key, const void *data,
+			       size_t datalen)
+{
+	struct trusted_key_payload *payload = NULL;
+	struct trusted_key_options *options = NULL;
+	char *datablob;
+	int ret = 0;
+	int key_cmd;
+
+	if (datalen <= 0 || datalen > 32767 || !data)
+		return -EINVAL;
+
+	datablob = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!datablob)
+		return -ENOMEM;
+	memcpy(datablob, data, datalen);
+	datablob[datalen] = '\0';
+
+	options = trusted_options_alloc();
+	if (!options) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	payload = trusted_payload_alloc(key);
+	if (!payload) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	key_cmd = datablob_parse(datablob, payload, options);
+	if (key_cmd < 0) {
+		ret = key_cmd;
+		goto out;
+	}
+
+	dump_payload(payload);
+	dump_options(options);
+
+	switch (key_cmd) {
+	case Opt_load:
+		ret = key_unseal(payload, options);
+		dump_payload(payload);
+		dump_options(options);
+		if (ret < 0)
+			pr_info("trusted_key: key_unseal failed (%d)\n", ret);
+		break;
+	case Opt_new:
+		ret = my_get_random(payload->key, payload->key_len);
+		if (ret < 0) {
+			pr_info("trusted_key: key_create failed (%d)\n", ret);
+			goto out;
+		}
+		ret = key_seal(payload, options);
+		if (ret < 0)
+			pr_info("trusted_key: key_seal failed (%d)\n", ret);
+		break;
+	default:
+		ret = -EINVAL;
+		goto out;
+	}
+	if (!ret && options->pcrlock)
+		ret = pcrlock(options->pcrlock);
+out:
+	kfree(datablob);
+	kfree(options);
+	if (!ret)
+		rcu_assign_pointer(key->payload.data, payload);
+	else
+		kfree(payload);
+	return ret;
+}
+
+static void trusted_rcu_free(struct rcu_head *rcu)
+{
+	struct trusted_key_payload *p;
+
+	p = container_of(rcu, struct trusted_key_payload, rcu);
+	memset(p->key, 0, p->key_len);
+	kfree(p);
+}
+
+/*
+ * trusted_update - reseal an existing key with new PCR values
+ */
+static int trusted_update(struct key *key, const void *data, size_t datalen)
+{
+	struct trusted_key_payload *p = key->payload.data;
+	struct trusted_key_payload *new_p;
+	struct trusted_key_options *new_o;
+	char *datablob;
+	int ret = 0;
+
+	if (!p->migratable)
+		return -EPERM;
+	if (datalen <= 0 || datalen > 32767 || !data)
+		return -EINVAL;
+
+	datablob = kmalloc(datalen + 1, GFP_KERNEL);
+	if (!datablob)
+		return -ENOMEM;
+	new_o = trusted_options_alloc();
+	if (!new_o) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	new_p = trusted_payload_alloc(key);
+	if (!new_p) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	memcpy(datablob, data, datalen);
+	datablob[datalen] = '\0';
+	ret = datablob_parse(datablob, new_p, new_o);
+	if (ret != Opt_update) {
+		ret = -EINVAL;
+		goto out;
+	}
+	/* copy old key values, and reseal with new pcrs */
+	new_p->migratable = p->migratable;
+	new_p->key_len = p->key_len;
+	memcpy(new_p->key, p->key, p->key_len);
+	dump_payload(p);
+	dump_payload(new_p);
+
+	ret = key_seal(new_p, new_o);
+	if (ret < 0) {
+		pr_info("trusted_key: key_seal failed (%d)\n", ret);
+		kfree(new_p);
+		goto out;
+	}
+	if (new_o->pcrlock) {
+		ret = pcrlock(new_o->pcrlock);
+		if (ret < 0) {
+			pr_info("trusted_key: pcrlock failed (%d)\n", ret);
+			kfree(new_p);
+			goto out;
+		}
+	}
+	rcu_assign_pointer(key->payload.data, new_p);
+	call_rcu(&p->rcu, trusted_rcu_free);
+out:
+	kfree(datablob);
+	kfree(new_o);
+	return ret;
+}
+
+/*
+ * trusted_read - copy the sealed blob data to userspace in hex.
+ * On success, return to userspace the trusted key datablob size.
+ */
+static long trusted_read(const struct key *key, char __user *buffer,
+			 size_t buflen)
+{
+	struct trusted_key_payload *p;
+	char *ascii_buf;
+	char *bufp;
+	int i;
+
+	p = rcu_dereference_protected(key->payload.data,
+			rwsem_is_locked(&((struct key *)key)->sem));
+	if (!p)
+		return -EINVAL;
+	if (!buffer || buflen <= 0)
+		return 2 * p->blob_len;
+	ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+	if (!ascii_buf)
+		return -ENOMEM;
+
+	bufp = ascii_buf;
+	for (i = 0; i < p->blob_len; i++)
+		bufp = pack_hex_byte(bufp, p->blob[i]);
+	if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
+		kfree(ascii_buf);
+		return -EFAULT;
+	}
+	kfree(ascii_buf);
+	return 2 * p->blob_len;
+}
+
+/*
+ * trusted_destroy - before freeing the key, clear the decrypted data
+ */
+static void trusted_destroy(struct key *key)
+{
+	struct trusted_key_payload *p = key->payload.data;
+
+	if (!p)
+		return;
+	memset(p->key, 0, p->key_len);
+	kfree(key->payload.data);
+}
+
+struct key_type key_type_trusted = {
+	.name = "trusted",
+	.instantiate = trusted_instantiate,
+	.update = trusted_update,
+	.match = user_match,
+	.destroy = trusted_destroy,
+	.describe = user_describe,
+	.read = trusted_read,
+};
+
+EXPORT_SYMBOL_GPL(key_type_trusted);
+
+static void trusted_shash_release(void)
+{
+	if (hashalg)
+		crypto_free_shash(hashalg);
+	if (hmacalg)
+		crypto_free_shash(hmacalg);
+}
+
+static int __init trusted_shash_alloc(void)
+{
+	int ret;
+
+	hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hmacalg)) {
+		pr_info("trusted_key: could not allocate crypto %s\n",
+			hmac_alg);
+		return PTR_ERR(hmacalg);
+	}
+
+	hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hashalg)) {
+		pr_info("trusted_key: could not allocate crypto %s\n",
+			hash_alg);
+		ret = PTR_ERR(hashalg);
+		goto hashalg_fail;
+	}
+
+	return 0;
+
+hashalg_fail:
+	crypto_free_shash(hmacalg);
+	return ret;
+}
+
+static int __init init_trusted(void)
+{
+	int ret;
+
+	ret = trusted_shash_alloc();
+	if (ret < 0)
+		return ret;
+	ret = register_key_type(&key_type_trusted);
+	if (ret < 0)
+		trusted_shash_release();
+	return ret;
+}
+
+static void __exit cleanup_trusted(void)
+{
+	trusted_shash_release();
+	unregister_key_type(&key_type_trusted);
+}
+
+late_initcall(init_trusted);
+module_exit(cleanup_trusted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/trusted_defined.h b/security/keys/trusted_defined.h
new file mode 100644
index 0000000..3249fbd
--- /dev/null
+++ b/security/keys/trusted_defined.h
@@ -0,0 +1,134 @@
+#ifndef __TRUSTED_KEY_H
+#define __TRUSTED_KEY_H
+
+/* implementation specific TPM constants */
+#define MAX_PCRINFO_SIZE		64
+#define MAX_BUF_SIZE			512
+#define TPM_GETRANDOM_SIZE		14
+#define TPM_OSAP_SIZE			36
+#define TPM_OIAP_SIZE			10
+#define TPM_SEAL_SIZE			87
+#define TPM_UNSEAL_SIZE			104
+#define TPM_SIZE_OFFSET			2
+#define TPM_RETURN_OFFSET		6
+#define TPM_DATA_OFFSET			10
+
+#define LOAD32(buffer, offset)	(ntohl(*(uint32_t *)&buffer[offset]))
+#define LOAD32N(buffer, offset)	(*(uint32_t *)&buffer[offset])
+#define LOAD16(buffer, offset)	(ntohs(*(uint16_t *)&buffer[offset]))
+
+struct tpm_buf {
+	int len;
+	unsigned char data[MAX_BUF_SIZE];
+};
+
+#define INIT_BUF(tb) (tb->len = 0)
+
+struct osapsess {
+	uint32_t handle;
+	unsigned char secret[SHA1_DIGEST_SIZE];
+	unsigned char enonce[TPM_NONCE_SIZE];
+};
+
+/* discrete values, but have to store in uint16_t for TPM use */
+enum {
+	SEAL_keytype = 1,
+	SRK_keytype = 4
+};
+
+struct trusted_key_options {
+	uint16_t keytype;
+	uint32_t keyhandle;
+	unsigned char keyauth[SHA1_DIGEST_SIZE];
+	unsigned char blobauth[SHA1_DIGEST_SIZE];
+	uint32_t pcrinfo_len;
+	unsigned char pcrinfo[MAX_PCRINFO_SIZE];
+	int pcrlock;
+};
+
+#define TPM_DEBUG 0
+
+#if TPM_DEBUG
+static inline void dump_options(struct trusted_key_options *o)
+{
+	pr_info("trusted_key: sealing key type %d\n", o->keytype);
+	pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
+	pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
+	pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+	print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
+		       16, 1, o->pcrinfo, o->pcrinfo_len, 0);
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+	pr_info("trusted_key: key_len %d\n", p->key_len);
+	print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+		       16, 1, p->key, p->key_len, 0);
+	pr_info("trusted_key: bloblen %d\n", p->blob_len);
+	print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+		       16, 1, p->blob, p->blob_len, 0);
+	pr_info("trusted_key: migratable %d\n", p->migratable);
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+	print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
+		       16, 1, &s->handle, 4, 0);
+	pr_info("trusted-key: secret:\n");
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+		       16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
+	pr_info("trusted-key: enonce:\n");
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+		       16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+	int len;
+
+	pr_info("\ntrusted-key: tpm buffer\n");
+	len = LOAD32(buf, TPM_SIZE_OFFSET);
+	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
+}
+#else
+static inline void dump_options(struct trusted_key_options *o)
+{
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+}
+#endif
+
+static inline void store8(struct tpm_buf *buf, const unsigned char value)
+{
+	buf->data[buf->len++] = value;
+}
+
+static inline void store16(struct tpm_buf *buf, const uint16_t value)
+{
+	*(uint16_t *) & buf->data[buf->len] = htons(value);
+	buf->len += sizeof value;
+}
+
+static inline void store32(struct tpm_buf *buf, const uint32_t value)
+{
+	*(uint32_t *) & buf->data[buf->len] = htonl(value);
+	buf->len += sizeof value;
+}
+
+static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
+			      const int len)
+{
+	memcpy(buf->data + buf->len, in, len);
+	buf->len += len;
+}
+#endif
diff --git a/security/security.c b/security/security.c
index e5fb07a..739e403 100644
--- a/security/security.c
+++ b/security/security.c
@@ -513,6 +513,15 @@
 	return security_ops->inode_permission(inode, mask);
 }
 
+int security_inode_exec_permission(struct inode *inode, unsigned int flags)
+{
+	if (unlikely(IS_PRIVATE(inode)))
+		return 0;
+	if (flags)
+		return -ECHILD;
+	return security_ops->inode_permission(inode, MAY_EXEC);
+}
+
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	if (unlikely(IS_PRIVATE(dentry->d_inode)))
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6f637d2..e276eb4 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2525,7 +2525,10 @@
 	sid = tsec->sid;
 	newsid = tsec->create_sid;
 
-	if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+	if ((sbsec->flags & SE_SBINITIALIZED) &&
+	    (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
+		newsid = sbsec->mntpoint_sid;
+	else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
 		rc = security_transition_sid(sid, dsec->sid,
 					     inode_mode_to_security_class(inode->i_mode),
 					     &newsid);
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index e94e82f..5615081 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -15,7 +15,6 @@
 #include <linux/audit.h>
 #include <linux/lsm_audit.h>
 #include <linux/in6.h>
-#include <linux/path.h>
 #include <asm/system.h>
 #include "flask.h"
 #include "av_permissions.h"
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 8858d2b..7ed3663 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -142,7 +142,7 @@
 	    "node_bind", "name_connect", NULL } },
 	{ "memprotect", { "mmap_zero", NULL } },
 	{ "peer", { "recv", NULL } },
-	{ "capability2", { "mac_override", "mac_admin", NULL } },
+	{ "capability2", { "mac_override", "mac_admin", "syslog", NULL } },
 	{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
 	{ "tun_socket",
 	  { COMMON_SOCK_PERMS, NULL } },
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 75ec0c6..8b02b21 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -65,6 +65,8 @@
 	{ RTM_NEWADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 	{ RTM_DELADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 	{ RTM_GETADDRLABEL,	NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_GETDCB,		NETLINK_ROUTE_SOCKET__NLMSG_READ  },
+	{ RTM_SETDCB,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 };
 
 static struct nlmsg_perm nlmsg_firewall_perms[] =
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 073fd5b..ea39cb7 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -141,19 +141,24 @@
 				 size_t count, loff_t *ppos)
 
 {
-	char *page;
+	char *page = NULL;
 	ssize_t length;
 	int new_value;
 
+	length = -ENOMEM;
 	if (count >= PAGE_SIZE)
-		return -ENOMEM;
-	if (*ppos != 0) {
-		/* No partial writes. */
-		return -EINVAL;
-	}
+		goto out;
+
+	/* No partial writes. */
+	length = EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
 	if (!page)
-		return -ENOMEM;
+		goto out;
+
 	length = -EFAULT;
 	if (copy_from_user(page, buf, count))
 		goto out;
@@ -268,20 +273,25 @@
 				 size_t count, loff_t *ppos)
 
 {
-	char *page;
+	char *page = NULL;
 	ssize_t length;
 	int new_value;
 	extern int selinux_disable(void);
 
+	length = -ENOMEM;
 	if (count >= PAGE_SIZE)
-		return -ENOMEM;
-	if (*ppos != 0) {
-		/* No partial writes. */
-		return -EINVAL;
-	}
+		goto out;;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
 	if (!page)
-		return -ENOMEM;
+		goto out;
+
 	length = -EFAULT;
 	if (copy_from_user(page, buf, count))
 		goto out;
@@ -292,7 +302,7 @@
 
 	if (new_value) {
 		length = selinux_disable();
-		if (length < 0)
+		if (length)
 			goto out;
 		audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
 			"selinux=0 auid=%u ses=%u",
@@ -493,7 +503,6 @@
 			      size_t count, loff_t *ppos)
 
 {
-	int ret;
 	ssize_t length;
 	void *data = NULL;
 
@@ -503,17 +512,19 @@
 	if (length)
 		goto out;
 
-	if (*ppos != 0) {
-		/* No partial writes. */
-		length = -EINVAL;
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
 		goto out;
-	}
 
-	if ((count > 64 * 1024 * 1024)
-	    || (data = vmalloc(count)) == NULL) {
-		length = -ENOMEM;
+	length = -EFBIG;
+	if (count > 64 * 1024 * 1024)
 		goto out;
-	}
+
+	length = -ENOMEM;
+	data = vmalloc(count);
+	if (!data)
+		goto out;
 
 	length = -EFAULT;
 	if (copy_from_user(data, buf, count) != 0)
@@ -523,23 +534,19 @@
 	if (length)
 		goto out;
 
-	ret = sel_make_bools();
-	if (ret) {
-		length = ret;
+	length = sel_make_bools();
+	if (length)
 		goto out1;
-	}
 
-	ret = sel_make_classes();
-	if (ret) {
-		length = ret;
+	length = sel_make_classes();
+	if (length)
 		goto out1;
-	}
 
-	ret = sel_make_policycap();
-	if (ret)
-		length = ret;
-	else
-		length = count;
+	length = sel_make_policycap();
+	if (length)
+		goto out1;
+
+	length = count;
 
 out1:
 	audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
@@ -559,26 +566,26 @@
 
 static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
 {
-	char *canon;
+	char *canon = NULL;
 	u32 sid, len;
 	ssize_t length;
 
 	length = task_has_security(current, SECURITY__CHECK_CONTEXT);
 	if (length)
-		return length;
+		goto out;
 
 	length = security_context_to_sid(buf, size, &sid);
-	if (length < 0)
-		return length;
+	if (length)
+		goto out;
 
 	length = security_sid_to_context(sid, &canon, &len);
-	if (length < 0)
-		return length;
+	if (length)
+		goto out;
 
+	length = -ERANGE;
 	if (len > SIMPLE_TRANSACTION_LIMIT) {
 		printk(KERN_ERR "SELinux: %s:  context size (%u) exceeds "
 			"payload max\n", __func__, len);
-		length = -ERANGE;
 		goto out;
 	}
 
@@ -602,23 +609,28 @@
 static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
 				      size_t count, loff_t *ppos)
 {
-	char *page;
+	char *page = NULL;
 	ssize_t length;
 	unsigned int new_value;
 
 	length = task_has_security(current, SECURITY__SETCHECKREQPROT);
 	if (length)
-		return length;
+		goto out;
 
+	length = -ENOMEM;
 	if (count >= PAGE_SIZE)
-		return -ENOMEM;
-	if (*ppos != 0) {
-		/* No partial writes. */
-		return -EINVAL;
-	}
+		goto out;
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	length = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
 	if (!page)
-		return -ENOMEM;
+		goto out;
+
 	length = -EFAULT;
 	if (copy_from_user(page, buf, count))
 		goto out;
@@ -693,7 +705,7 @@
 
 static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
 {
-	char *scon, *tcon;
+	char *scon = NULL, *tcon = NULL;
 	u32 ssid, tsid;
 	u16 tclass;
 	struct av_decision avd;
@@ -701,27 +713,29 @@
 
 	length = task_has_security(current, SECURITY__COMPUTE_AV);
 	if (length)
-		return length;
+		goto out;
 
 	length = -ENOMEM;
 	scon = kzalloc(size + 1, GFP_KERNEL);
 	if (!scon)
-		return length;
+		goto out;
 
+	length = -ENOMEM;
 	tcon = kzalloc(size + 1, GFP_KERNEL);
 	if (!tcon)
 		goto out;
 
 	length = -EINVAL;
 	if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
-		goto out2;
+		goto out;
 
 	length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
+
 	length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	security_compute_av_user(ssid, tsid, tclass, &avd);
 
@@ -730,133 +744,131 @@
 			  avd.allowed, 0xffffffff,
 			  avd.auditallow, avd.auditdeny,
 			  avd.seqno, avd.flags);
-out2:
-	kfree(tcon);
 out:
+	kfree(tcon);
 	kfree(scon);
 	return length;
 }
 
 static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
 {
-	char *scon, *tcon;
+	char *scon = NULL, *tcon = NULL;
 	u32 ssid, tsid, newsid;
 	u16 tclass;
 	ssize_t length;
-	char *newcon;
+	char *newcon = NULL;
 	u32 len;
 
 	length = task_has_security(current, SECURITY__COMPUTE_CREATE);
 	if (length)
-		return length;
+		goto out;
 
 	length = -ENOMEM;
 	scon = kzalloc(size + 1, GFP_KERNEL);
 	if (!scon)
-		return length;
+		goto out;
 
+	length = -ENOMEM;
 	tcon = kzalloc(size + 1, GFP_KERNEL);
 	if (!tcon)
 		goto out;
 
 	length = -EINVAL;
 	if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
-		goto out2;
+		goto out;
 
 	length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
+
 	length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = security_transition_sid_user(ssid, tsid, tclass, &newsid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = security_sid_to_context(newsid, &newcon, &len);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
+	length = -ERANGE;
 	if (len > SIMPLE_TRANSACTION_LIMIT) {
 		printk(KERN_ERR "SELinux: %s:  context size (%u) exceeds "
 			"payload max\n", __func__, len);
-		length = -ERANGE;
-		goto out3;
+		goto out;
 	}
 
 	memcpy(buf, newcon, len);
 	length = len;
-out3:
-	kfree(newcon);
-out2:
-	kfree(tcon);
 out:
+	kfree(newcon);
+	kfree(tcon);
 	kfree(scon);
 	return length;
 }
 
 static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
 {
-	char *scon, *tcon;
+	char *scon = NULL, *tcon = NULL;
 	u32 ssid, tsid, newsid;
 	u16 tclass;
 	ssize_t length;
-	char *newcon;
+	char *newcon = NULL;
 	u32 len;
 
 	length = task_has_security(current, SECURITY__COMPUTE_RELABEL);
 	if (length)
-		return length;
+		goto out;
 
 	length = -ENOMEM;
 	scon = kzalloc(size + 1, GFP_KERNEL);
 	if (!scon)
-		return length;
+		goto out;
 
+	length = -ENOMEM;
 	tcon = kzalloc(size + 1, GFP_KERNEL);
 	if (!tcon)
 		goto out;
 
 	length = -EINVAL;
 	if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
-		goto out2;
+		goto out;
 
 	length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
+
 	length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = security_change_sid(ssid, tsid, tclass, &newsid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = security_sid_to_context(newsid, &newcon, &len);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
-	if (len > SIMPLE_TRANSACTION_LIMIT) {
-		length = -ERANGE;
-		goto out3;
-	}
+	length = -ERANGE;
+	if (len > SIMPLE_TRANSACTION_LIMIT)
+		goto out;
 
 	memcpy(buf, newcon, len);
 	length = len;
-out3:
-	kfree(newcon);
-out2:
-	kfree(tcon);
 out:
+	kfree(newcon);
+	kfree(tcon);
 	kfree(scon);
 	return length;
 }
 
 static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
 {
-	char *con, *user, *ptr;
-	u32 sid, *sids;
+	char *con = NULL, *user = NULL, *ptr;
+	u32 sid, *sids = NULL;
 	ssize_t length;
 	char *newcon;
 	int i, rc;
@@ -864,28 +876,29 @@
 
 	length = task_has_security(current, SECURITY__COMPUTE_USER);
 	if (length)
-		return length;
+		goto out;;
 
 	length = -ENOMEM;
 	con = kzalloc(size + 1, GFP_KERNEL);
 	if (!con)
-		return length;
+		goto out;;
 
+	length = -ENOMEM;
 	user = kzalloc(size + 1, GFP_KERNEL);
 	if (!user)
 		goto out;
 
 	length = -EINVAL;
 	if (sscanf(buf, "%s %s", con, user) != 2)
-		goto out2;
+		goto out;
 
 	length = security_context_to_sid(con, strlen(con) + 1, &sid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = security_get_user_sids(sid, user, &sids, &nsids);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = sprintf(buf, "%u", nsids) + 1;
 	ptr = buf + length;
@@ -893,82 +906,80 @@
 		rc = security_sid_to_context(sids[i], &newcon, &len);
 		if (rc) {
 			length = rc;
-			goto out3;
+			goto out;
 		}
 		if ((length + len) >= SIMPLE_TRANSACTION_LIMIT) {
 			kfree(newcon);
 			length = -ERANGE;
-			goto out3;
+			goto out;
 		}
 		memcpy(ptr, newcon, len);
 		kfree(newcon);
 		ptr += len;
 		length += len;
 	}
-out3:
-	kfree(sids);
-out2:
-	kfree(user);
 out:
+	kfree(sids);
+	kfree(user);
 	kfree(con);
 	return length;
 }
 
 static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
 {
-	char *scon, *tcon;
+	char *scon = NULL, *tcon = NULL;
 	u32 ssid, tsid, newsid;
 	u16 tclass;
 	ssize_t length;
-	char *newcon;
+	char *newcon = NULL;
 	u32 len;
 
 	length = task_has_security(current, SECURITY__COMPUTE_MEMBER);
 	if (length)
-		return length;
+		goto out;
 
 	length = -ENOMEM;
 	scon = kzalloc(size + 1, GFP_KERNEL);
 	if (!scon)
-		return length;
+		goto out;;
 
+	length = -ENOMEM;
 	tcon = kzalloc(size + 1, GFP_KERNEL);
 	if (!tcon)
 		goto out;
 
 	length = -EINVAL;
 	if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
-		goto out2;
+		goto out;
 
 	length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
+
 	length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = security_member_sid(ssid, tsid, tclass, &newsid);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
 	length = security_sid_to_context(newsid, &newcon, &len);
-	if (length < 0)
-		goto out2;
+	if (length)
+		goto out;
 
+	length = -ERANGE;
 	if (len > SIMPLE_TRANSACTION_LIMIT) {
 		printk(KERN_ERR "SELinux: %s:  context size (%u) exceeds "
 			"payload max\n", __func__, len);
-		length = -ERANGE;
-		goto out3;
+		goto out;
 	}
 
 	memcpy(buf, newcon, len);
 	length = len;
-out3:
-	kfree(newcon);
-out2:
-	kfree(tcon);
 out:
+	kfree(newcon);
+	kfree(tcon);
 	kfree(scon);
 	return length;
 }
@@ -978,7 +989,6 @@
 	struct inode *ret = new_inode(sb);
 
 	if (ret) {
-		ret->i_ino = get_next_ino();
 		ret->i_mode = mode;
 		ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
 	}
@@ -998,16 +1008,14 @@
 
 	mutex_lock(&sel_mutex);
 
-	if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
-		ret = -EINVAL;
+	ret = -EINVAL;
+	if (index >= bool_num || strcmp(name, bool_pending_names[index]))
 		goto out;
-	}
 
+	ret = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
-	if (!page) {
-		ret = -ENOMEM;
+	if (!page)
 		goto out;
-	}
 
 	cur_enforcing = security_get_bool_value(index);
 	if (cur_enforcing < 0) {
@@ -1019,8 +1027,7 @@
 	ret = simple_read_from_buffer(buf, count, ppos, page, length);
 out:
 	mutex_unlock(&sel_mutex);
-	if (page)
-		free_page((unsigned long)page);
+	free_page((unsigned long)page);
 	return ret;
 }
 
@@ -1040,26 +1047,23 @@
 	if (length)
 		goto out;
 
-	if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
-		length = -EINVAL;
+	length = -EINVAL;
+	if (index >= bool_num || strcmp(name, bool_pending_names[index]))
 		goto out;
-	}
 
-	if (count >= PAGE_SIZE) {
-		length = -ENOMEM;
+	length = -ENOMEM;
+	if (count >= PAGE_SIZE)
 		goto out;
-	}
 
-	if (*ppos != 0) {
-		/* No partial writes. */
-		length = -EINVAL;
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
 		goto out;
-	}
+
+	length = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
-	if (!page) {
-		length = -ENOMEM;
+	if (!page)
 		goto out;
-	}
 
 	length = -EFAULT;
 	if (copy_from_user(page, buf, count))
@@ -1077,8 +1081,7 @@
 
 out:
 	mutex_unlock(&sel_mutex);
-	if (page)
-		free_page((unsigned long) page);
+	free_page((unsigned long) page);
 	return length;
 }
 
@@ -1102,19 +1105,19 @@
 	if (length)
 		goto out;
 
-	if (count >= PAGE_SIZE) {
-		length = -ENOMEM;
+	length = -ENOMEM;
+	if (count >= PAGE_SIZE)
 		goto out;
-	}
-	if (*ppos != 0) {
-		/* No partial writes. */
+
+	/* No partial writes. */
+	length = -EINVAL;
+	if (*ppos != 0)
 		goto out;
-	}
+
+	length = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
-	if (!page) {
-		length = -ENOMEM;
+	if (!page)
 		goto out;
-	}
 
 	length = -EFAULT;
 	if (copy_from_user(page, buf, count))
@@ -1124,15 +1127,16 @@
 	if (sscanf(page, "%d", &new_value) != 1)
 		goto out;
 
+	length = 0;
 	if (new_value && bool_pending_values)
-		security_set_bools(bool_num, bool_pending_values);
+		length = security_set_bools(bool_num, bool_pending_values);
 
-	length = count;
+	if (!length)
+		length = count;
 
 out:
 	mutex_unlock(&sel_mutex);
-	if (page)
-		free_page((unsigned long) page);
+	free_page((unsigned long) page);
 	return length;
 }
 
@@ -1145,31 +1149,35 @@
 {
 	struct list_head *node;
 
-	spin_lock(&dcache_lock);
+	spin_lock(&de->d_lock);
 	node = de->d_subdirs.next;
 	while (node != &de->d_subdirs) {
 		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
+
+		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
 		list_del_init(node);
 
 		if (d->d_inode) {
-			d = dget_locked(d);
-			spin_unlock(&dcache_lock);
+			dget_dlock(d);
+			spin_unlock(&de->d_lock);
+			spin_unlock(&d->d_lock);
 			d_delete(d);
 			simple_unlink(de->d_inode, d);
 			dput(d);
-			spin_lock(&dcache_lock);
-		}
+			spin_lock(&de->d_lock);
+		} else
+			spin_unlock(&d->d_lock);
 		node = de->d_subdirs.next;
 	}
 
-	spin_unlock(&dcache_lock);
+	spin_unlock(&de->d_lock);
 }
 
 #define BOOL_DIR_NAME "booleans"
 
 static int sel_make_bools(void)
 {
-	int i, ret = 0;
+	int i, ret;
 	ssize_t len;
 	struct dentry *dentry = NULL;
 	struct dentry *dir = bool_dir;
@@ -1190,38 +1198,40 @@
 
 	sel_remove_entries(dir);
 
+	ret = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
 	if (!page)
-		return -ENOMEM;
+		goto out;
 
 	ret = security_get_bools(&num, &names, &values);
-	if (ret != 0)
+	if (ret)
 		goto out;
 
 	for (i = 0; i < num; i++) {
+		ret = -ENOMEM;
 		dentry = d_alloc_name(dir, names[i]);
-		if (!dentry) {
-			ret = -ENOMEM;
-			goto err;
-		}
-		inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
-		if (!inode) {
-			ret = -ENOMEM;
-			goto err;
-		}
+		if (!dentry)
+			goto out;
 
+		ret = -ENOMEM;
+		inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
+		if (!inode)
+			goto out;
+
+		ret = -EINVAL;
 		len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
-		if (len < 0) {
-			ret = -EINVAL;
-			goto err;
-		} else if (len >= PAGE_SIZE) {
-			ret = -ENAMETOOLONG;
-			goto err;
-		}
+		if (len < 0)
+			goto out;
+
+		ret = -ENAMETOOLONG;
+		if (len >= PAGE_SIZE)
+			goto out;
+
 		isec = (struct inode_security_struct *)inode->i_security;
 		ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid);
 		if (ret)
-			goto err;
+			goto out;
+
 		isec->sid = sid;
 		isec->initialized = 1;
 		inode->i_fop = &sel_bool_ops;
@@ -1231,10 +1241,12 @@
 	bool_num = num;
 	bool_pending_names = names;
 	bool_pending_values = values;
+
+	free_page((unsigned long)page);
+	return 0;
 out:
 	free_page((unsigned long)page);
-	return ret;
-err:
+
 	if (names) {
 		for (i = 0; i < num; i++)
 			kfree(names[i]);
@@ -1242,8 +1254,8 @@
 	}
 	kfree(values);
 	sel_remove_entries(dir);
-	ret = -ENOMEM;
-	goto out;
+
+	return ret;
 }
 
 #define NULL_FILE_NAME "null"
@@ -1265,47 +1277,41 @@
 					     size_t count, loff_t *ppos)
 
 {
-	char *page;
+	char *page = NULL;
 	ssize_t ret;
 	int new_value;
 
-	if (count >= PAGE_SIZE) {
-		ret = -ENOMEM;
+	ret = task_has_security(current, SECURITY__SETSECPARAM);
+	if (ret)
 		goto out;
-	}
 
-	if (*ppos != 0) {
-		/* No partial writes. */
-		ret = -EINVAL;
+	ret = -ENOMEM;
+	if (count >= PAGE_SIZE)
 		goto out;
-	}
 
+	/* No partial writes. */
+	ret = -EINVAL;
+	if (*ppos != 0)
+		goto out;
+
+	ret = -ENOMEM;
 	page = (char *)get_zeroed_page(GFP_KERNEL);
-	if (!page) {
-		ret = -ENOMEM;
+	if (!page)
 		goto out;
-	}
 
-	if (copy_from_user(page, buf, count)) {
-		ret = -EFAULT;
-		goto out_free;
-	}
-
-	if (sscanf(page, "%u", &new_value) != 1) {
-		ret = -EINVAL;
+	ret = -EFAULT;
+	if (copy_from_user(page, buf, count))
 		goto out;
-	}
 
-	if (new_value != avc_cache_threshold) {
-		ret = task_has_security(current, SECURITY__SETSECPARAM);
-		if (ret)
-			goto out_free;
-		avc_cache_threshold = new_value;
-	}
+	ret = -EINVAL;
+	if (sscanf(page, "%u", &new_value) != 1)
+		goto out;
+
+	avc_cache_threshold = new_value;
+
 	ret = count;
-out_free:
-	free_page((unsigned long)page);
 out:
+	free_page((unsigned long)page);
 	return ret;
 }
 
@@ -1313,19 +1319,18 @@
 				       size_t count, loff_t *ppos)
 {
 	char *page;
-	ssize_t ret = 0;
+	ssize_t length;
 
 	page = (char *)__get_free_page(GFP_KERNEL);
-	if (!page) {
-		ret = -ENOMEM;
-		goto out;
-	}
-	ret = avc_get_hash_stats(page);
-	if (ret >= 0)
-		ret = simple_read_from_buffer(buf, count, ppos, page, ret);
+	if (!page)
+		return -ENOMEM;
+
+	length = avc_get_hash_stats(page);
+	if (length >= 0)
+		length = simple_read_from_buffer(buf, count, ppos, page, length);
 	free_page((unsigned long)page);
-out:
-	return ret;
+
+	return length;
 }
 
 static const struct file_operations sel_avc_cache_threshold_ops = {
@@ -1407,7 +1412,7 @@
 
 static int sel_make_avc_files(struct dentry *dir)
 {
-	int i, ret = 0;
+	int i;
 	static struct tree_descr files[] = {
 		{ "cache_threshold",
 		  &sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR },
@@ -1422,22 +1427,19 @@
 		struct dentry *dentry;
 
 		dentry = d_alloc_name(dir, files[i].name);
-		if (!dentry) {
-			ret = -ENOMEM;
-			goto out;
-		}
+		if (!dentry)
+			return -ENOMEM;
 
 		inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
-		if (!inode) {
-			ret = -ENOMEM;
-			goto out;
-		}
+		if (!inode)
+			return -ENOMEM;
+
 		inode->i_fop = files[i].ops;
 		inode->i_ino = ++sel_last_ino;
 		d_add(dentry, inode);
 	}
-out:
-	return ret;
+
+	return 0;
 }
 
 static ssize_t sel_read_initcon(struct file *file, char __user *buf,
@@ -1451,7 +1453,7 @@
 	inode = file->f_path.dentry->d_inode;
 	sid = inode->i_ino&SEL_INO_MASK;
 	ret = security_sid_to_context(sid, &con, &len);
-	if (ret < 0)
+	if (ret)
 		return ret;
 
 	ret = simple_read_from_buffer(buf, count, ppos, con, len);
@@ -1466,28 +1468,25 @@
 
 static int sel_make_initcon_files(struct dentry *dir)
 {
-	int i, ret = 0;
+	int i;
 
 	for (i = 1; i <= SECINITSID_NUM; i++) {
 		struct inode *inode;
 		struct dentry *dentry;
 		dentry = d_alloc_name(dir, security_get_initial_sid_context(i));
-		if (!dentry) {
-			ret = -ENOMEM;
-			goto out;
-		}
+		if (!dentry)
+			return -ENOMEM;
 
 		inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
-		if (!inode) {
-			ret = -ENOMEM;
-			goto out;
-		}
+		if (!inode)
+			return -ENOMEM;
+
 		inode->i_fop = &sel_initcon_ops;
 		inode->i_ino = i|SEL_INITCON_INO_OFFSET;
 		d_add(dentry, inode);
 	}
-out:
-	return ret;
+
+	return 0;
 }
 
 static inline unsigned int sel_div(unsigned long a, unsigned long b)
@@ -1523,15 +1522,13 @@
 	unsigned long ino = file->f_path.dentry->d_inode->i_ino;
 
 	page = (char *)__get_free_page(GFP_KERNEL);
-	if (!page) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!page)
+		return -ENOMEM;
 
 	len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
 	rc = simple_read_from_buffer(buf, count, ppos, page, len);
 	free_page((unsigned long)page);
-out:
+
 	return rc;
 }
 
@@ -1548,15 +1545,13 @@
 	unsigned long ino = file->f_path.dentry->d_inode->i_ino;
 
 	page = (char *)__get_free_page(GFP_KERNEL);
-	if (!page) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!page)
+		return -ENOMEM;
 
 	len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
 	rc = simple_read_from_buffer(buf, count, ppos, page, len);
 	free_page((unsigned long)page);
-out:
+
 	return rc;
 }
 
@@ -1587,39 +1582,37 @@
 static int sel_make_perm_files(char *objclass, int classvalue,
 				struct dentry *dir)
 {
-	int i, rc = 0, nperms;
+	int i, rc, nperms;
 	char **perms;
 
 	rc = security_get_permissions(objclass, &perms, &nperms);
 	if (rc)
-		goto out;
+		return rc;
 
 	for (i = 0; i < nperms; i++) {
 		struct inode *inode;
 		struct dentry *dentry;
 
+		rc = -ENOMEM;
 		dentry = d_alloc_name(dir, perms[i]);
-		if (!dentry) {
-			rc = -ENOMEM;
-			goto out1;
-		}
+		if (!dentry)
+			goto out;
 
+		rc = -ENOMEM;
 		inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
-		if (!inode) {
-			rc = -ENOMEM;
-			goto out1;
-		}
+		if (!inode)
+			goto out;
+
 		inode->i_fop = &sel_perm_ops;
 		/* i+1 since perm values are 1-indexed */
 		inode->i_ino = sel_perm_to_ino(classvalue, i + 1);
 		d_add(dentry, inode);
 	}
-
-out1:
+	rc = 0;
+out:
 	for (i = 0; i < nperms; i++)
 		kfree(perms[i]);
 	kfree(perms);
-out:
 	return rc;
 }
 
@@ -1631,34 +1624,27 @@
 	int rc;
 
 	dentry = d_alloc_name(dir, "index");
-	if (!dentry) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!dentry)
+		return -ENOMEM;
 
 	inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
-	if (!inode) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!inode)
+		return -ENOMEM;
 
 	inode->i_fop = &sel_class_ops;
 	inode->i_ino = sel_class_to_ino(index);
 	d_add(dentry, inode);
 
 	dentry = d_alloc_name(dir, "perms");
-	if (!dentry) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!dentry)
+		return -ENOMEM;
 
 	rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino);
 	if (rc)
-		goto out;
+		return rc;
 
 	rc = sel_make_perm_files(classname, index, dentry);
 
-out:
 	return rc;
 }
 
@@ -1688,15 +1674,15 @@
 
 static int sel_make_classes(void)
 {
-	int rc = 0, nclasses, i;
+	int rc, nclasses, i;
 	char **classes;
 
 	/* delete any existing entries */
 	sel_remove_classes();
 
 	rc = security_get_classes(&classes, &nclasses);
-	if (rc < 0)
-		goto out;
+	if (rc)
+		return rc;
 
 	/* +2 since classes are 1-indexed */
 	last_class_ino = sel_class_to_ino(nclasses + 2);
@@ -1704,29 +1690,27 @@
 	for (i = 0; i < nclasses; i++) {
 		struct dentry *class_name_dir;
 
+		rc = -ENOMEM;
 		class_name_dir = d_alloc_name(class_dir, classes[i]);
-		if (!class_name_dir) {
-			rc = -ENOMEM;
-			goto out1;
-		}
+		if (!class_name_dir)
+			goto out;
 
 		rc = sel_make_dir(class_dir->d_inode, class_name_dir,
 				&last_class_ino);
 		if (rc)
-			goto out1;
+			goto out;
 
 		/* i+1 since class values are 1-indexed */
 		rc = sel_make_class_dir_entries(classes[i], i + 1,
 				class_name_dir);
 		if (rc)
-			goto out1;
+			goto out;
 	}
-
-out1:
+	rc = 0;
+out:
 	for (i = 0; i < nclasses; i++)
 		kfree(classes[i]);
 	kfree(classes);
-out:
 	return rc;
 }
 
@@ -1763,14 +1747,12 @@
 static int sel_make_dir(struct inode *dir, struct dentry *dentry,
 			unsigned long *ino)
 {
-	int ret = 0;
 	struct inode *inode;
 
 	inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
-	if (!inode) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!inode)
+		return -ENOMEM;
+
 	inode->i_op = &simple_dir_inode_operations;
 	inode->i_fop = &simple_dir_operations;
 	inode->i_ino = ++(*ino);
@@ -1779,8 +1761,8 @@
 	d_add(dentry, inode);
 	/* bump link count on parent directory, too */
 	inc_nlink(dir);
-out:
-	return ret;
+
+	return 0;
 }
 
 static int sel_fill_super(struct super_block *sb, void *data, int silent)
@@ -1816,11 +1798,10 @@
 
 	root_inode = sb->s_root->d_inode;
 
+	ret = -ENOMEM;
 	dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME);
-	if (!dentry) {
-		ret = -ENOMEM;
+	if (!dentry)
 		goto err;
-	}
 
 	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
 	if (ret)
@@ -1828,17 +1809,16 @@
 
 	bool_dir = dentry;
 
+	ret = -ENOMEM;
 	dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
-	if (!dentry) {
-		ret = -ENOMEM;
+	if (!dentry)
 		goto err;
-	}
 
+	ret = -ENOMEM;
 	inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO);
-	if (!inode) {
-		ret = -ENOMEM;
+	if (!inode)
 		goto err;
-	}
+
 	inode->i_ino = ++sel_last_ino;
 	isec = (struct inode_security_struct *)inode->i_security;
 	isec->sid = SECINITSID_DEVNULL;
@@ -1849,11 +1829,10 @@
 	d_add(dentry, inode);
 	selinux_null = dentry;
 
+	ret = -ENOMEM;
 	dentry = d_alloc_name(sb->s_root, "avc");
-	if (!dentry) {
-		ret = -ENOMEM;
+	if (!dentry)
 		goto err;
-	}
 
 	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
 	if (ret)
@@ -1863,11 +1842,10 @@
 	if (ret)
 		goto err;
 
+	ret = -ENOMEM;
 	dentry = d_alloc_name(sb->s_root, "initial_contexts");
-	if (!dentry) {
-		ret = -ENOMEM;
+	if (!dentry)
 		goto err;
-	}
 
 	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
 	if (ret)
@@ -1877,11 +1855,10 @@
 	if (ret)
 		goto err;
 
+	ret = -ENOMEM;
 	dentry = d_alloc_name(sb->s_root, "class");
-	if (!dentry) {
-		ret = -ENOMEM;
+	if (!dentry)
 		goto err;
-	}
 
 	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
 	if (ret)
@@ -1889,11 +1866,10 @@
 
 	class_dir = dentry;
 
+	ret = -ENOMEM;
 	dentry = d_alloc_name(sb->s_root, "policy_capabilities");
-	if (!dentry) {
-		ret = -ENOMEM;
+	if (!dentry)
 		goto err;
-	}
 
 	ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
 	if (ret)
@@ -1901,12 +1877,11 @@
 
 	policycap_dir = dentry;
 
-out:
-	return ret;
+	return 0;
 err:
 	printk(KERN_ERR "SELinux: %s:  failed while creating inodes\n",
 		__func__);
-	goto out;
+	return ret;
 }
 
 static struct dentry *sel_mount(struct file_system_type *fs_type,
@@ -1930,14 +1905,16 @@
 	if (!selinux_enabled)
 		return 0;
 	err = register_filesystem(&sel_fs_type);
-	if (!err) {
-		selinuxfs_mount = kern_mount(&sel_fs_type);
-		if (IS_ERR(selinuxfs_mount)) {
-			printk(KERN_ERR "selinuxfs:  could not mount!\n");
-			err = PTR_ERR(selinuxfs_mount);
-			selinuxfs_mount = NULL;
-		}
+	if (err)
+		return err;
+
+	selinuxfs_mount = kern_mount(&sel_fs_type);
+	if (IS_ERR(selinuxfs_mount)) {
+		printk(KERN_ERR "selinuxfs:  could not mount!\n");
+		err = PTR_ERR(selinuxfs_mount);
+		selinuxfs_mount = NULL;
 	}
+
 	return err;
 }
 
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 655fe1c..c3f845c 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -193,6 +193,7 @@
 {
 	struct policydb *p;
 	struct cond_bool_datum *booldatum;
+	struct flex_array *fa;
 
 	booldatum = datum;
 	p = datap;
@@ -200,7 +201,10 @@
 	if (!booldatum->value || booldatum->value > p->p_bools.nprim)
 		return -EINVAL;
 
-	p->p_bool_val_to_name[booldatum->value - 1] = key;
+	fa = p->sym_val_to_name[SYM_BOOLS];
+	if (flex_array_put_ptr(fa, booldatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
 	p->bool_val_to_struct[booldatum->value - 1] = booldatum;
 
 	return 0;
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index b4eff7a..1ef8e4e 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -45,7 +45,7 @@
 	len = 1; /* for the beginning ":" */
 	for (l = 0; l < 2; l++) {
 		int index_sens = context->range.level[l].sens;
-		len += strlen(policydb.p_sens_val_to_name[index_sens - 1]);
+		len += strlen(sym_name(&policydb, SYM_LEVELS, index_sens - 1));
 
 		/* categories */
 		head = -2;
@@ -55,17 +55,17 @@
 			if (i - prev > 1) {
 				/* one or more negative bits are skipped */
 				if (head != prev) {
-					nm = policydb.p_cat_val_to_name[prev];
+					nm = sym_name(&policydb, SYM_CATS, prev);
 					len += strlen(nm) + 1;
 				}
-				nm = policydb.p_cat_val_to_name[i];
+				nm = sym_name(&policydb, SYM_CATS, i);
 				len += strlen(nm) + 1;
 				head = i;
 			}
 			prev = i;
 		}
 		if (prev != head) {
-			nm = policydb.p_cat_val_to_name[prev];
+			nm = sym_name(&policydb, SYM_CATS, prev);
 			len += strlen(nm) + 1;
 		}
 		if (l == 0) {
@@ -102,8 +102,8 @@
 	scontextp++;
 
 	for (l = 0; l < 2; l++) {
-		strcpy(scontextp,
-		       policydb.p_sens_val_to_name[context->range.level[l].sens - 1]);
+		strcpy(scontextp, sym_name(&policydb, SYM_LEVELS,
+					   context->range.level[l].sens - 1));
 		scontextp += strlen(scontextp);
 
 		/* categories */
@@ -118,7 +118,7 @@
 						*scontextp++ = '.';
 					else
 						*scontextp++ = ',';
-					nm = policydb.p_cat_val_to_name[prev];
+					nm = sym_name(&policydb, SYM_CATS, prev);
 					strcpy(scontextp, nm);
 					scontextp += strlen(nm);
 				}
@@ -126,7 +126,7 @@
 					*scontextp++ = ':';
 				else
 					*scontextp++ = ',';
-				nm = policydb.p_cat_val_to_name[i];
+				nm = sym_name(&policydb, SYM_CATS, i);
 				strcpy(scontextp, nm);
 				scontextp += strlen(nm);
 				head = i;
@@ -139,7 +139,7 @@
 				*scontextp++ = '.';
 			else
 				*scontextp++ = ',';
-			nm = policydb.p_cat_val_to_name[prev];
+			nm = sym_name(&policydb, SYM_CATS, prev);
 			strcpy(scontextp, nm);
 			scontextp += strlen(nm);
 		}
@@ -166,7 +166,7 @@
 	if (!l->sens || l->sens > p->p_levels.nprim)
 		return 0;
 	levdatum = hashtab_search(p->p_levels.table,
-				  p->p_sens_val_to_name[l->sens - 1]);
+				  sym_name(p, SYM_LEVELS, l->sens - 1));
 	if (!levdatum)
 		return 0;
 
@@ -482,7 +482,8 @@
 
 	for (l = 0; l < 2; l++) {
 		levdatum = hashtab_search(newp->p_levels.table,
-			oldp->p_sens_val_to_name[c->range.level[l].sens - 1]);
+					  sym_name(oldp, SYM_LEVELS,
+						   c->range.level[l].sens - 1));
 
 		if (!levdatum)
 			return -EINVAL;
@@ -493,7 +494,7 @@
 			int rc;
 
 			catdatum = hashtab_search(newp->p_cats.table,
-						  oldp->p_cat_val_to_name[i]);
+						  sym_name(oldp, SYM_CATS, i));
 			if (!catdatum)
 				return -EINVAL;
 			rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 94f630d..be9de38 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -148,32 +148,30 @@
 	int rc;
 	struct role_datum *role;
 
+	rc = -ENOMEM;
 	role = kzalloc(sizeof(*role), GFP_KERNEL);
-	if (!role) {
-		rc = -ENOMEM;
+	if (!role)
 		goto out;
-	}
+
+	rc = -EINVAL;
 	role->value = ++p->p_roles.nprim;
-	if (role->value != OBJECT_R_VAL) {
-		rc = -EINVAL;
-		goto out_free_role;
-	}
+	if (role->value != OBJECT_R_VAL)
+		goto out;
+
+	rc = -ENOMEM;
 	key = kstrdup(OBJECT_R, GFP_KERNEL);
-	if (!key) {
-		rc = -ENOMEM;
-		goto out_free_role;
-	}
+	if (!key)
+		goto out;
+
 	rc = hashtab_insert(p->p_roles.table, key, role);
 	if (rc)
-		goto out_free_key;
-out:
-	return rc;
+		goto out;
 
-out_free_key:
+	return 0;
+out:
 	kfree(key);
-out_free_role:
 	kfree(role);
-	goto out;
+	return rc;
 }
 
 static u32 rangetr_hash(struct hashtab *h, const void *k)
@@ -213,35 +211,33 @@
 	for (i = 0; i < SYM_NUM; i++) {
 		rc = symtab_init(&p->symtab[i], symtab_sizes[i]);
 		if (rc)
-			goto out_free_symtab;
+			goto out;
 	}
 
 	rc = avtab_init(&p->te_avtab);
 	if (rc)
-		goto out_free_symtab;
+		goto out;
 
 	rc = roles_init(p);
 	if (rc)
-		goto out_free_symtab;
+		goto out;
 
 	rc = cond_policydb_init(p);
 	if (rc)
-		goto out_free_symtab;
+		goto out;
 
 	p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
 	if (!p->range_tr)
-		goto out_free_symtab;
+		goto out;
 
 	ebitmap_init(&p->policycaps);
 	ebitmap_init(&p->permissive_map);
 
+	return 0;
 out:
-	return rc;
-
-out_free_symtab:
 	for (i = 0; i < SYM_NUM; i++)
 		hashtab_destroy(p->symtab[i].table);
-	goto out;
+	return rc;
 }
 
 /*
@@ -258,12 +254,17 @@
 {
 	struct policydb *p;
 	struct common_datum *comdatum;
+	struct flex_array *fa;
 
 	comdatum = datum;
 	p = datap;
 	if (!comdatum->value || comdatum->value > p->p_commons.nprim)
 		return -EINVAL;
-	p->p_common_val_to_name[comdatum->value - 1] = key;
+
+	fa = p->sym_val_to_name[SYM_COMMONS];
+	if (flex_array_put_ptr(fa, comdatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
 	return 0;
 }
 
@@ -271,12 +272,16 @@
 {
 	struct policydb *p;
 	struct class_datum *cladatum;
+	struct flex_array *fa;
 
 	cladatum = datum;
 	p = datap;
 	if (!cladatum->value || cladatum->value > p->p_classes.nprim)
 		return -EINVAL;
-	p->p_class_val_to_name[cladatum->value - 1] = key;
+	fa = p->sym_val_to_name[SYM_CLASSES];
+	if (flex_array_put_ptr(fa, cladatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
 	p->class_val_to_struct[cladatum->value - 1] = cladatum;
 	return 0;
 }
@@ -285,6 +290,7 @@
 {
 	struct policydb *p;
 	struct role_datum *role;
+	struct flex_array *fa;
 
 	role = datum;
 	p = datap;
@@ -292,7 +298,11 @@
 	    || role->value > p->p_roles.nprim
 	    || role->bounds > p->p_roles.nprim)
 		return -EINVAL;
-	p->p_role_val_to_name[role->value - 1] = key;
+
+	fa = p->sym_val_to_name[SYM_ROLES];
+	if (flex_array_put_ptr(fa, role->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
 	p->role_val_to_struct[role->value - 1] = role;
 	return 0;
 }
@@ -301,6 +311,7 @@
 {
 	struct policydb *p;
 	struct type_datum *typdatum;
+	struct flex_array *fa;
 
 	typdatum = datum;
 	p = datap;
@@ -310,8 +321,15 @@
 		    || typdatum->value > p->p_types.nprim
 		    || typdatum->bounds > p->p_types.nprim)
 			return -EINVAL;
-		p->p_type_val_to_name[typdatum->value - 1] = key;
-		p->type_val_to_struct[typdatum->value - 1] = typdatum;
+		fa = p->sym_val_to_name[SYM_TYPES];
+		if (flex_array_put_ptr(fa, typdatum->value - 1, key,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
+
+		fa = p->type_val_to_struct_array;
+		if (flex_array_put_ptr(fa, typdatum->value - 1, typdatum,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
 	}
 
 	return 0;
@@ -321,6 +339,7 @@
 {
 	struct policydb *p;
 	struct user_datum *usrdatum;
+	struct flex_array *fa;
 
 	usrdatum = datum;
 	p = datap;
@@ -328,7 +347,11 @@
 	    || usrdatum->value > p->p_users.nprim
 	    || usrdatum->bounds > p->p_users.nprim)
 		return -EINVAL;
-	p->p_user_val_to_name[usrdatum->value - 1] = key;
+
+	fa = p->sym_val_to_name[SYM_USERS];
+	if (flex_array_put_ptr(fa, usrdatum->value - 1, key,
+			       GFP_KERNEL | __GFP_ZERO))
+		BUG();
 	p->user_val_to_struct[usrdatum->value - 1] = usrdatum;
 	return 0;
 }
@@ -337,6 +360,7 @@
 {
 	struct policydb *p;
 	struct level_datum *levdatum;
+	struct flex_array *fa;
 
 	levdatum = datum;
 	p = datap;
@@ -345,7 +369,10 @@
 		if (!levdatum->level->sens ||
 		    levdatum->level->sens > p->p_levels.nprim)
 			return -EINVAL;
-		p->p_sens_val_to_name[levdatum->level->sens - 1] = key;
+		fa = p->sym_val_to_name[SYM_LEVELS];
+		if (flex_array_put_ptr(fa, levdatum->level->sens - 1, key,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
 	}
 
 	return 0;
@@ -355,6 +382,7 @@
 {
 	struct policydb *p;
 	struct cat_datum *catdatum;
+	struct flex_array *fa;
 
 	catdatum = datum;
 	p = datap;
@@ -362,7 +390,10 @@
 	if (!catdatum->isalias) {
 		if (!catdatum->value || catdatum->value > p->p_cats.nprim)
 			return -EINVAL;
-		p->p_cat_val_to_name[catdatum->value - 1] = key;
+		fa = p->sym_val_to_name[SYM_CATS];
+		if (flex_array_put_ptr(fa, catdatum->value - 1, key,
+				       GFP_KERNEL | __GFP_ZERO))
+			BUG();
 	}
 
 	return 0;
@@ -380,47 +411,6 @@
 	cat_index,
 };
 
-/*
- * Define the common val_to_name array and the class
- * val_to_name and val_to_struct arrays in a policy
- * database structure.
- *
- * Caller must clean up upon failure.
- */
-static int policydb_index_classes(struct policydb *p)
-{
-	int rc;
-
-	p->p_common_val_to_name =
-		kmalloc(p->p_commons.nprim * sizeof(char *), GFP_KERNEL);
-	if (!p->p_common_val_to_name) {
-		rc = -ENOMEM;
-		goto out;
-	}
-
-	rc = hashtab_map(p->p_commons.table, common_index, p);
-	if (rc)
-		goto out;
-
-	p->class_val_to_struct =
-		kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)), GFP_KERNEL);
-	if (!p->class_val_to_struct) {
-		rc = -ENOMEM;
-		goto out;
-	}
-
-	p->p_class_val_to_name =
-		kmalloc(p->p_classes.nprim * sizeof(char *), GFP_KERNEL);
-	if (!p->p_class_val_to_name) {
-		rc = -ENOMEM;
-		goto out;
-	}
-
-	rc = hashtab_map(p->p_classes.table, class_index, p);
-out:
-	return rc;
-}
-
 #ifdef DEBUG_HASHES
 static void symtab_hash_eval(struct symtab *s)
 {
@@ -458,9 +448,9 @@
  *
  * Caller must clean up on failure.
  */
-static int policydb_index_others(struct policydb *p)
+static int policydb_index(struct policydb *p)
 {
-	int i, rc = 0;
+	int i, rc;
 
 	printk(KERN_DEBUG "SELinux:  %d users, %d roles, %d types, %d bools",
 	       p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
@@ -477,47 +467,63 @@
 	symtab_hash_eval(p->symtab);
 #endif
 
+	rc = -ENOMEM;
+	p->class_val_to_struct =
+		kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)),
+			GFP_KERNEL);
+	if (!p->class_val_to_struct)
+		goto out;
+
+	rc = -ENOMEM;
 	p->role_val_to_struct =
 		kmalloc(p->p_roles.nprim * sizeof(*(p->role_val_to_struct)),
 			GFP_KERNEL);
-	if (!p->role_val_to_struct) {
-		rc = -ENOMEM;
+	if (!p->role_val_to_struct)
 		goto out;
-	}
 
+	rc = -ENOMEM;
 	p->user_val_to_struct =
 		kmalloc(p->p_users.nprim * sizeof(*(p->user_val_to_struct)),
 			GFP_KERNEL);
-	if (!p->user_val_to_struct) {
-		rc = -ENOMEM;
+	if (!p->user_val_to_struct)
 		goto out;
-	}
 
-	p->type_val_to_struct =
-		kmalloc(p->p_types.nprim * sizeof(*(p->type_val_to_struct)),
-			GFP_KERNEL);
-	if (!p->type_val_to_struct) {
-		rc = -ENOMEM;
+	/* Yes, I want the sizeof the pointer, not the structure */
+	rc = -ENOMEM;
+	p->type_val_to_struct_array = flex_array_alloc(sizeof(struct type_datum *),
+						       p->p_types.nprim,
+						       GFP_KERNEL | __GFP_ZERO);
+	if (!p->type_val_to_struct_array)
 		goto out;
-	}
 
-	if (cond_init_bool_indexes(p)) {
-		rc = -ENOMEM;
+	rc = flex_array_prealloc(p->type_val_to_struct_array, 0,
+				 p->p_types.nprim - 1, GFP_KERNEL | __GFP_ZERO);
+	if (rc)
 		goto out;
-	}
 
-	for (i = SYM_ROLES; i < SYM_NUM; i++) {
-		p->sym_val_to_name[i] =
-			kmalloc(p->symtab[i].nprim * sizeof(char *), GFP_KERNEL);
-		if (!p->sym_val_to_name[i]) {
-			rc = -ENOMEM;
+	rc = -ENOMEM;
+	if (cond_init_bool_indexes(p))
+		goto out;
+
+	for (i = 0; i < SYM_NUM; i++) {
+		rc = -ENOMEM;
+		p->sym_val_to_name[i] = flex_array_alloc(sizeof(char *),
+							 p->symtab[i].nprim,
+							 GFP_KERNEL | __GFP_ZERO);
+		if (!p->sym_val_to_name[i])
 			goto out;
-		}
+
+		rc = flex_array_prealloc(p->sym_val_to_name[i],
+					 0, p->symtab[i].nprim - 1,
+					 GFP_KERNEL | __GFP_ZERO);
+		if (rc)
+			goto out;
+
 		rc = hashtab_map(p->symtab[i].table, index_f[i], p);
 		if (rc)
 			goto out;
 	}
-
+	rc = 0;
 out:
 	return rc;
 }
@@ -540,9 +546,11 @@
 	struct common_datum *comdatum;
 
 	kfree(key);
-	comdatum = datum;
-	hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
-	hashtab_destroy(comdatum->permissions.table);
+	if (datum) {
+		comdatum = datum;
+		hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
+		hashtab_destroy(comdatum->permissions.table);
+	}
 	kfree(datum);
 	return 0;
 }
@@ -554,38 +562,40 @@
 	struct constraint_expr *e, *etmp;
 
 	kfree(key);
-	cladatum = datum;
-	hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
-	hashtab_destroy(cladatum->permissions.table);
-	constraint = cladatum->constraints;
-	while (constraint) {
-		e = constraint->expr;
-		while (e) {
-			ebitmap_destroy(&e->names);
-			etmp = e;
-			e = e->next;
-			kfree(etmp);
+	if (datum) {
+		cladatum = datum;
+		hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
+		hashtab_destroy(cladatum->permissions.table);
+		constraint = cladatum->constraints;
+		while (constraint) {
+			e = constraint->expr;
+			while (e) {
+				ebitmap_destroy(&e->names);
+				etmp = e;
+				e = e->next;
+				kfree(etmp);
+			}
+			ctemp = constraint;
+			constraint = constraint->next;
+			kfree(ctemp);
 		}
-		ctemp = constraint;
-		constraint = constraint->next;
-		kfree(ctemp);
-	}
 
-	constraint = cladatum->validatetrans;
-	while (constraint) {
-		e = constraint->expr;
-		while (e) {
-			ebitmap_destroy(&e->names);
-			etmp = e;
-			e = e->next;
-			kfree(etmp);
+		constraint = cladatum->validatetrans;
+		while (constraint) {
+			e = constraint->expr;
+			while (e) {
+				ebitmap_destroy(&e->names);
+				etmp = e;
+				e = e->next;
+				kfree(etmp);
+			}
+			ctemp = constraint;
+			constraint = constraint->next;
+			kfree(ctemp);
 		}
-		ctemp = constraint;
-		constraint = constraint->next;
-		kfree(ctemp);
-	}
 
-	kfree(cladatum->comkey);
+		kfree(cladatum->comkey);
+	}
 	kfree(datum);
 	return 0;
 }
@@ -595,9 +605,11 @@
 	struct role_datum *role;
 
 	kfree(key);
-	role = datum;
-	ebitmap_destroy(&role->dominates);
-	ebitmap_destroy(&role->types);
+	if (datum) {
+		role = datum;
+		ebitmap_destroy(&role->dominates);
+		ebitmap_destroy(&role->types);
+	}
 	kfree(datum);
 	return 0;
 }
@@ -614,11 +626,13 @@
 	struct user_datum *usrdatum;
 
 	kfree(key);
-	usrdatum = datum;
-	ebitmap_destroy(&usrdatum->roles);
-	ebitmap_destroy(&usrdatum->range.level[0].cat);
-	ebitmap_destroy(&usrdatum->range.level[1].cat);
-	ebitmap_destroy(&usrdatum->dfltlevel.cat);
+	if (datum) {
+		usrdatum = datum;
+		ebitmap_destroy(&usrdatum->roles);
+		ebitmap_destroy(&usrdatum->range.level[0].cat);
+		ebitmap_destroy(&usrdatum->range.level[1].cat);
+		ebitmap_destroy(&usrdatum->dfltlevel.cat);
+	}
 	kfree(datum);
 	return 0;
 }
@@ -628,9 +642,11 @@
 	struct level_datum *levdatum;
 
 	kfree(key);
-	levdatum = datum;
-	ebitmap_destroy(&levdatum->level->cat);
-	kfree(levdatum->level);
+	if (datum) {
+		levdatum = datum;
+		ebitmap_destroy(&levdatum->level->cat);
+		kfree(levdatum->level);
+	}
 	kfree(datum);
 	return 0;
 }
@@ -695,13 +711,16 @@
 		hashtab_destroy(p->symtab[i].table);
 	}
 
-	for (i = 0; i < SYM_NUM; i++)
-		kfree(p->sym_val_to_name[i]);
+	for (i = 0; i < SYM_NUM; i++) {
+		if (p->sym_val_to_name[i])
+			flex_array_free(p->sym_val_to_name[i]);
+	}
 
 	kfree(p->class_val_to_struct);
 	kfree(p->role_val_to_struct);
 	kfree(p->user_val_to_struct);
-	kfree(p->type_val_to_struct);
+	if (p->type_val_to_struct_array)
+		flex_array_free(p->type_val_to_struct_array);
 
 	avtab_destroy(&p->te_avtab);
 
@@ -785,19 +804,21 @@
 
 	head = p->ocontexts[OCON_ISID];
 	for (c = head; c; c = c->next) {
+		rc = -EINVAL;
 		if (!c->context[0].user) {
-			printk(KERN_ERR "SELinux:  SID %s was never "
-			       "defined.\n", c->u.name);
-			rc = -EINVAL;
+			printk(KERN_ERR "SELinux:  SID %s was never defined.\n",
+				c->u.name);
 			goto out;
 		}
-		if (sidtab_insert(s, c->sid[0], &c->context[0])) {
-			printk(KERN_ERR "SELinux:  unable to load initial "
-			       "SID %s.\n", c->u.name);
-			rc = -EINVAL;
+
+		rc = sidtab_insert(s, c->sid[0], &c->context[0]);
+		if (rc) {
+			printk(KERN_ERR "SELinux:  unable to load initial SID %s.\n",
+				c->u.name);
 			goto out;
 		}
 	}
+	rc = 0;
 out:
 	return rc;
 }
@@ -846,8 +867,7 @@
 		 * Role must be authorized for the type.
 		 */
 		role = p->role_val_to_struct[c->role - 1];
-		if (!ebitmap_get_bit(&role->types,
-				     c->type - 1))
+		if (!ebitmap_get_bit(&role->types, c->type - 1))
 			/* role may not be associated with type */
 			return 0;
 
@@ -858,8 +878,7 @@
 		if (!usrdatum)
 			return 0;
 
-		if (!ebitmap_get_bit(&usrdatum->roles,
-				     c->role - 1))
+		if (!ebitmap_get_bit(&usrdatum->roles, c->role - 1))
 			/* user may not be associated with role */
 			return 0;
 	}
@@ -881,20 +900,22 @@
 	int rc;
 
 	rc = next_entry(buf, fp, sizeof(u32));
-	if (rc < 0)
+	if (rc)
 		goto out;
 
+	rc = -EINVAL;
 	items = le32_to_cpu(buf[0]);
 	if (items > ARRAY_SIZE(buf)) {
 		printk(KERN_ERR "SELinux: mls:  range overflow\n");
-		rc = -EINVAL;
 		goto out;
 	}
+
 	rc = next_entry(buf, fp, sizeof(u32) * items);
-	if (rc < 0) {
+	if (rc) {
 		printk(KERN_ERR "SELinux: mls:  truncated range\n");
 		goto out;
 	}
+
 	r->level[0].sens = le32_to_cpu(buf[0]);
 	if (items > 1)
 		r->level[1].sens = le32_to_cpu(buf[1]);
@@ -903,15 +924,13 @@
 
 	rc = ebitmap_read(&r->level[0].cat, fp);
 	if (rc) {
-		printk(KERN_ERR "SELinux: mls:  error reading low "
-		       "categories\n");
+		printk(KERN_ERR "SELinux: mls:  error reading low categories\n");
 		goto out;
 	}
 	if (items > 1) {
 		rc = ebitmap_read(&r->level[1].cat, fp);
 		if (rc) {
-			printk(KERN_ERR "SELinux: mls:  error reading high "
-			       "categories\n");
+			printk(KERN_ERR "SELinux: mls:  error reading high categories\n");
 			goto bad_high;
 		}
 	} else {
@@ -922,12 +941,11 @@
 		}
 	}
 
-	rc = 0;
-out:
-	return rc;
+	return 0;
 bad_high:
 	ebitmap_destroy(&r->level[0].cat);
-	goto out;
+out:
+	return rc;
 }
 
 /*
@@ -942,7 +960,7 @@
 	int rc;
 
 	rc = next_entry(buf, fp, sizeof buf);
-	if (rc < 0) {
+	if (rc) {
 		printk(KERN_ERR "SELinux: context truncated\n");
 		goto out;
 	}
@@ -950,19 +968,20 @@
 	c->role = le32_to_cpu(buf[1]);
 	c->type = le32_to_cpu(buf[2]);
 	if (p->policyvers >= POLICYDB_VERSION_MLS) {
-		if (mls_read_range_helper(&c->range, fp)) {
-			printk(KERN_ERR "SELinux: error reading MLS range of "
-			       "context\n");
-			rc = -EINVAL;
+		rc = mls_read_range_helper(&c->range, fp);
+		if (rc) {
+			printk(KERN_ERR "SELinux: error reading MLS range of context\n");
 			goto out;
 		}
 	}
 
+	rc = -EINVAL;
 	if (!policydb_context_isvalid(p, c)) {
 		printk(KERN_ERR "SELinux:  invalid security context\n");
 		context_destroy(c);
-		rc = -EINVAL;
+		goto out;
 	}
+	rc = 0;
 out:
 	return rc;
 }
@@ -981,37 +1000,36 @@
 	__le32 buf[2];
 	u32 len;
 
+	rc = -ENOMEM;
 	perdatum = kzalloc(sizeof(*perdatum), GFP_KERNEL);
-	if (!perdatum) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!perdatum)
+		goto bad;
 
 	rc = next_entry(buf, fp, sizeof buf);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
 	perdatum->value = le32_to_cpu(buf[1]);
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_KERNEL);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
+
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
 	rc = hashtab_insert(h, key, perdatum);
 	if (rc)
 		goto bad;
-out:
-	return rc;
+
+	return 0;
 bad:
 	perm_destroy(key, perdatum, NULL);
-	goto out;
+	return rc;
 }
 
 static int common_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1022,14 +1040,13 @@
 	u32 len, nel;
 	int i, rc;
 
+	rc = -ENOMEM;
 	comdatum = kzalloc(sizeof(*comdatum), GFP_KERNEL);
-	if (!comdatum) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!comdatum)
+		goto bad;
 
 	rc = next_entry(buf, fp, sizeof buf);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
@@ -1041,13 +1058,13 @@
 	comdatum->permissions.nprim = le32_to_cpu(buf[2]);
 	nel = le32_to_cpu(buf[3]);
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_KERNEL);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
+
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
@@ -1060,11 +1077,10 @@
 	rc = hashtab_insert(h, key, comdatum);
 	if (rc)
 		goto bad;
-out:
-	return rc;
+	return 0;
 bad:
 	common_destroy(key, comdatum, NULL);
-	goto out;
+	return rc;
 }
 
 static int read_cons_helper(struct constraint_node **nodep, int ncons,
@@ -1088,7 +1104,7 @@
 			*nodep = c;
 
 		rc = next_entry(buf, fp, (sizeof(u32) * 2));
-		if (rc < 0)
+		if (rc)
 			return rc;
 		c->permissions = le32_to_cpu(buf[0]);
 		nexpr = le32_to_cpu(buf[1]);
@@ -1105,7 +1121,7 @@
 				c->expr = e;
 
 			rc = next_entry(buf, fp, (sizeof(u32) * 3));
-			if (rc < 0)
+			if (rc)
 				return rc;
 			e->expr_type = le32_to_cpu(buf[0]);
 			e->attr = le32_to_cpu(buf[1]);
@@ -1133,8 +1149,9 @@
 				if (depth == (CEXPR_MAXDEPTH - 1))
 					return -EINVAL;
 				depth++;
-				if (ebitmap_read(&e->names, fp))
-					return -EINVAL;
+				rc = ebitmap_read(&e->names, fp);
+				if (rc)
+					return rc;
 				break;
 			default:
 				return -EINVAL;
@@ -1157,14 +1174,13 @@
 	u32 len, len2, ncons, nel;
 	int i, rc;
 
+	rc = -ENOMEM;
 	cladatum = kzalloc(sizeof(*cladatum), GFP_KERNEL);
-	if (!cladatum) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!cladatum)
+		goto bad;
 
 	rc = next_entry(buf, fp, sizeof(u32)*6);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
@@ -1179,33 +1195,30 @@
 
 	ncons = le32_to_cpu(buf[5]);
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_KERNEL);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
+
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
 	if (len2) {
+		rc = -ENOMEM;
 		cladatum->comkey = kmalloc(len2 + 1, GFP_KERNEL);
-		if (!cladatum->comkey) {
-			rc = -ENOMEM;
+		if (!cladatum->comkey)
 			goto bad;
-		}
 		rc = next_entry(cladatum->comkey, fp, len2);
-		if (rc < 0)
+		if (rc)
 			goto bad;
 		cladatum->comkey[len2] = '\0';
 
-		cladatum->comdatum = hashtab_search(p->p_commons.table,
-						    cladatum->comkey);
+		rc = -EINVAL;
+		cladatum->comdatum = hashtab_search(p->p_commons.table, cladatum->comkey);
 		if (!cladatum->comdatum) {
-			printk(KERN_ERR "SELinux:  unknown common %s\n",
-			       cladatum->comkey);
-			rc = -EINVAL;
+			printk(KERN_ERR "SELinux:  unknown common %s\n", cladatum->comkey);
 			goto bad;
 		}
 	}
@@ -1222,7 +1235,7 @@
 	if (p->policyvers >= POLICYDB_VERSION_VALIDATETRANS) {
 		/* grab the validatetrans rules */
 		rc = next_entry(buf, fp, sizeof(u32));
-		if (rc < 0)
+		if (rc)
 			goto bad;
 		ncons = le32_to_cpu(buf[0]);
 		rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp);
@@ -1234,12 +1247,10 @@
 	if (rc)
 		goto bad;
 
-	rc = 0;
-out:
-	return rc;
+	return 0;
 bad:
 	cls_destroy(key, cladatum, NULL);
-	goto out;
+	return rc;
 }
 
 static int role_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1250,17 +1261,16 @@
 	__le32 buf[3];
 	u32 len;
 
+	rc = -ENOMEM;
 	role = kzalloc(sizeof(*role), GFP_KERNEL);
-	if (!role) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!role)
+		goto bad;
 
 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
 		to_read = 3;
 
 	rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
@@ -1268,13 +1278,13 @@
 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
 		role->bounds = le32_to_cpu(buf[2]);
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_KERNEL);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
+
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
@@ -1287,10 +1297,10 @@
 		goto bad;
 
 	if (strcmp(key, OBJECT_R) == 0) {
+		rc = -EINVAL;
 		if (role->value != OBJECT_R_VAL) {
 			printk(KERN_ERR "SELinux: Role %s has wrong value %d\n",
 			       OBJECT_R, role->value);
-			rc = -EINVAL;
 			goto bad;
 		}
 		rc = 0;
@@ -1300,11 +1310,10 @@
 	rc = hashtab_insert(h, key, role);
 	if (rc)
 		goto bad;
-out:
-	return rc;
+	return 0;
 bad:
 	role_destroy(key, role, NULL);
-	goto out;
+	return rc;
 }
 
 static int type_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1315,17 +1324,16 @@
 	__le32 buf[4];
 	u32 len;
 
+	rc = -ENOMEM;
 	typdatum = kzalloc(sizeof(*typdatum), GFP_KERNEL);
-	if (!typdatum) {
-		rc = -ENOMEM;
-		return rc;
-	}
+	if (!typdatum)
+		goto bad;
 
 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
 		to_read = 4;
 
 	rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
@@ -1343,24 +1351,22 @@
 		typdatum->primary = le32_to_cpu(buf[2]);
 	}
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_KERNEL);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
 	rc = hashtab_insert(h, key, typdatum);
 	if (rc)
 		goto bad;
-out:
-	return rc;
+	return 0;
 bad:
 	type_destroy(key, typdatum, NULL);
-	goto out;
+	return rc;
 }
 
 
@@ -1376,22 +1382,18 @@
 	memset(lp, 0, sizeof(*lp));
 
 	rc = next_entry(buf, fp, sizeof buf);
-	if (rc < 0) {
+	if (rc) {
 		printk(KERN_ERR "SELinux: mls: truncated level\n");
-		goto bad;
+		return rc;
 	}
 	lp->sens = le32_to_cpu(buf[0]);
 
-	if (ebitmap_read(&lp->cat, fp)) {
-		printk(KERN_ERR "SELinux: mls:  error reading level "
-		       "categories\n");
-		goto bad;
+	rc = ebitmap_read(&lp->cat, fp);
+	if (rc) {
+		printk(KERN_ERR "SELinux: mls:  error reading level categories\n");
+		return rc;
 	}
-
 	return 0;
-
-bad:
-	return -EINVAL;
 }
 
 static int user_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1402,17 +1404,16 @@
 	__le32 buf[3];
 	u32 len;
 
+	rc = -ENOMEM;
 	usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL);
-	if (!usrdatum) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!usrdatum)
+		goto bad;
 
 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
 		to_read = 3;
 
 	rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
@@ -1420,13 +1421,12 @@
 	if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
 		usrdatum->bounds = le32_to_cpu(buf[2]);
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_KERNEL);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
@@ -1446,11 +1446,10 @@
 	rc = hashtab_insert(h, key, usrdatum);
 	if (rc)
 		goto bad;
-out:
-	return rc;
+	return 0;
 bad:
 	user_destroy(key, usrdatum, NULL);
-	goto out;
+	return rc;
 }
 
 static int sens_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1461,47 +1460,43 @@
 	__le32 buf[2];
 	u32 len;
 
+	rc = -ENOMEM;
 	levdatum = kzalloc(sizeof(*levdatum), GFP_ATOMIC);
-	if (!levdatum) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!levdatum)
+		goto bad;
 
 	rc = next_entry(buf, fp, sizeof buf);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
 	levdatum->isalias = le32_to_cpu(buf[1]);
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_ATOMIC);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
+	rc = -ENOMEM;
 	levdatum->level = kmalloc(sizeof(struct mls_level), GFP_ATOMIC);
-	if (!levdatum->level) {
-		rc = -ENOMEM;
+	if (!levdatum->level)
 		goto bad;
-	}
-	if (mls_read_level(levdatum->level, fp)) {
-		rc = -EINVAL;
+
+	rc = mls_read_level(levdatum->level, fp);
+	if (rc)
 		goto bad;
-	}
 
 	rc = hashtab_insert(h, key, levdatum);
 	if (rc)
 		goto bad;
-out:
-	return rc;
+	return 0;
 bad:
 	sens_destroy(key, levdatum, NULL);
-	goto out;
+	return rc;
 }
 
 static int cat_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1512,39 +1507,35 @@
 	__le32 buf[3];
 	u32 len;
 
+	rc = -ENOMEM;
 	catdatum = kzalloc(sizeof(*catdatum), GFP_ATOMIC);
-	if (!catdatum) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	if (!catdatum)
+		goto bad;
 
 	rc = next_entry(buf, fp, sizeof buf);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
 	len = le32_to_cpu(buf[0]);
 	catdatum->value = le32_to_cpu(buf[1]);
 	catdatum->isalias = le32_to_cpu(buf[2]);
 
+	rc = -ENOMEM;
 	key = kmalloc(len + 1, GFP_ATOMIC);
-	if (!key) {
-		rc = -ENOMEM;
+	if (!key)
 		goto bad;
-	}
 	rc = next_entry(key, fp, len);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	key[len] = '\0';
 
 	rc = hashtab_insert(h, key, catdatum);
 	if (rc)
 		goto bad;
-out:
-	return rc;
-
+	return 0;
 bad:
 	cat_destroy(key, catdatum, NULL);
-	goto out;
+	return rc;
 }
 
 static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) =
@@ -1585,9 +1576,9 @@
 			printk(KERN_ERR
 			       "SELinux: boundary violated policy: "
 			       "user=%s role=%s bounds=%s\n",
-			       p->p_user_val_to_name[user->value - 1],
-			       p->p_role_val_to_name[bit],
-			       p->p_user_val_to_name[upper->value - 1]);
+			       sym_name(p, SYM_USERS, user->value - 1),
+			       sym_name(p, SYM_ROLES, bit),
+			       sym_name(p, SYM_USERS, upper->value - 1));
 
 			return -EINVAL;
 		}
@@ -1622,9 +1613,9 @@
 			printk(KERN_ERR
 			       "SELinux: boundary violated policy: "
 			       "role=%s type=%s bounds=%s\n",
-			       p->p_role_val_to_name[role->value - 1],
-			       p->p_type_val_to_name[bit],
-			       p->p_role_val_to_name[upper->value - 1]);
+			       sym_name(p, SYM_ROLES, role->value - 1),
+			       sym_name(p, SYM_TYPES, bit),
+			       sym_name(p, SYM_ROLES, upper->value - 1));
 
 			return -EINVAL;
 		}
@@ -1648,12 +1639,15 @@
 			return -EINVAL;
 		}
 
-		upper = p->type_val_to_struct[upper->bounds - 1];
+		upper = flex_array_get_ptr(p->type_val_to_struct_array,
+					   upper->bounds - 1);
+		BUG_ON(!upper);
+
 		if (upper->attribute) {
 			printk(KERN_ERR "SELinux: type %s: "
 			       "bounded by attribute %s",
 			       (char *) key,
-			       p->p_type_val_to_name[upper->value - 1]);
+			       sym_name(p, SYM_TYPES, upper->value - 1));
 			return -EINVAL;
 		}
 	}
@@ -2066,13 +2060,14 @@
 
 	rc = policydb_init(p);
 	if (rc)
-		goto out;
+		return rc;
 
 	/* Read the magic number and string length. */
 	rc = next_entry(buf, fp, sizeof(u32) * 2);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
+	rc = -EINVAL;
 	if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) {
 		printk(KERN_ERR "SELinux:  policydb magic number 0x%x does "
 		       "not match expected magic number 0x%x\n",
@@ -2080,6 +2075,7 @@
 		goto bad;
 	}
 
+	rc = -EINVAL;
 	len = le32_to_cpu(buf[1]);
 	if (len != strlen(POLICYDB_STRING)) {
 		printk(KERN_ERR "SELinux:  policydb string length %d does not "
@@ -2087,19 +2083,23 @@
 		       len, strlen(POLICYDB_STRING));
 		goto bad;
 	}
+
+	rc = -ENOMEM;
 	policydb_str = kmalloc(len + 1, GFP_KERNEL);
 	if (!policydb_str) {
 		printk(KERN_ERR "SELinux:  unable to allocate memory for policydb "
 		       "string of length %d\n", len);
-		rc = -ENOMEM;
 		goto bad;
 	}
+
 	rc = next_entry(policydb_str, fp, len);
-	if (rc < 0) {
+	if (rc) {
 		printk(KERN_ERR "SELinux:  truncated policydb string identifier\n");
 		kfree(policydb_str);
 		goto bad;
 	}
+
+	rc = -EINVAL;
 	policydb_str[len] = '\0';
 	if (strcmp(policydb_str, POLICYDB_STRING)) {
 		printk(KERN_ERR "SELinux:  policydb string %s does not match "
@@ -2113,9 +2113,10 @@
 
 	/* Read the version and table sizes. */
 	rc = next_entry(buf, fp, sizeof(u32)*4);
-	if (rc < 0)
+	if (rc)
 		goto bad;
 
+	rc = -EINVAL;
 	p->policyvers = le32_to_cpu(buf[0]);
 	if (p->policyvers < POLICYDB_VERSION_MIN ||
 	    p->policyvers > POLICYDB_VERSION_MAX) {
@@ -2128,6 +2129,7 @@
 	if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
 		p->mls_enabled = 1;
 
+		rc = -EINVAL;
 		if (p->policyvers < POLICYDB_VERSION_MLS) {
 			printk(KERN_ERR "SELinux: security policydb version %d "
 				"(MLS) not backwards compatible\n",
@@ -2138,14 +2140,19 @@
 	p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
 	p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
 
-	if (p->policyvers >= POLICYDB_VERSION_POLCAP &&
-	    ebitmap_read(&p->policycaps, fp) != 0)
-		goto bad;
+	if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+		rc = ebitmap_read(&p->policycaps, fp);
+		if (rc)
+			goto bad;
+	}
 
-	if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE &&
-	    ebitmap_read(&p->permissive_map, fp) != 0)
-		goto bad;
+	if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+		rc = ebitmap_read(&p->permissive_map, fp);
+		if (rc)
+			goto bad;
+	}
 
+	rc = -EINVAL;
 	info = policydb_lookup_compat(p->policyvers);
 	if (!info) {
 		printk(KERN_ERR "SELinux:  unable to find policy compat info "
@@ -2153,6 +2160,7 @@
 		goto bad;
 	}
 
+	rc = -EINVAL;
 	if (le32_to_cpu(buf[2]) != info->sym_num ||
 		le32_to_cpu(buf[3]) != info->ocon_num) {
 		printk(KERN_ERR "SELinux:  policydb table sizes (%d,%d) do "
@@ -2164,7 +2172,7 @@
 
 	for (i = 0; i < info->sym_num; i++) {
 		rc = next_entry(buf, fp, sizeof(u32)*2);
-		if (rc < 0)
+		if (rc)
 			goto bad;
 		nprim = le32_to_cpu(buf[0]);
 		nel = le32_to_cpu(buf[1]);
@@ -2188,78 +2196,73 @@
 	}
 
 	rc = next_entry(buf, fp, sizeof(u32));
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	nel = le32_to_cpu(buf[0]);
 	ltr = NULL;
 	for (i = 0; i < nel; i++) {
+		rc = -ENOMEM;
 		tr = kzalloc(sizeof(*tr), GFP_KERNEL);
-		if (!tr) {
-			rc = -ENOMEM;
+		if (!tr)
 			goto bad;
-		}
 		if (ltr)
 			ltr->next = tr;
 		else
 			p->role_tr = tr;
 		rc = next_entry(buf, fp, sizeof(u32)*3);
-		if (rc < 0)
+		if (rc)
 			goto bad;
+
+		rc = -EINVAL;
 		tr->role = le32_to_cpu(buf[0]);
 		tr->type = le32_to_cpu(buf[1]);
 		tr->new_role = le32_to_cpu(buf[2]);
 		if (!policydb_role_isvalid(p, tr->role) ||
 		    !policydb_type_isvalid(p, tr->type) ||
-		    !policydb_role_isvalid(p, tr->new_role)) {
-			rc = -EINVAL;
+		    !policydb_role_isvalid(p, tr->new_role))
 			goto bad;
-		}
 		ltr = tr;
 	}
 
 	rc = next_entry(buf, fp, sizeof(u32));
-	if (rc < 0)
+	if (rc)
 		goto bad;
 	nel = le32_to_cpu(buf[0]);
 	lra = NULL;
 	for (i = 0; i < nel; i++) {
+		rc = -ENOMEM;
 		ra = kzalloc(sizeof(*ra), GFP_KERNEL);
-		if (!ra) {
-			rc = -ENOMEM;
+		if (!ra)
 			goto bad;
-		}
 		if (lra)
 			lra->next = ra;
 		else
 			p->role_allow = ra;
 		rc = next_entry(buf, fp, sizeof(u32)*2);
-		if (rc < 0)
+		if (rc)
 			goto bad;
+
+		rc = -EINVAL;
 		ra->role = le32_to_cpu(buf[0]);
 		ra->new_role = le32_to_cpu(buf[1]);
 		if (!policydb_role_isvalid(p, ra->role) ||
-		    !policydb_role_isvalid(p, ra->new_role)) {
-			rc = -EINVAL;
+		    !policydb_role_isvalid(p, ra->new_role))
 			goto bad;
-		}
 		lra = ra;
 	}
 
-	rc = policydb_index_classes(p);
+	rc = policydb_index(p);
 	if (rc)
 		goto bad;
 
-	rc = policydb_index_others(p);
-	if (rc)
-		goto bad;
-
+	rc = -EINVAL;
 	p->process_class = string_to_security_class(p, "process");
 	if (!p->process_class)
 		goto bad;
-	p->process_trans_perms = string_to_av_perm(p, p->process_class,
-						   "transition");
-	p->process_trans_perms |= string_to_av_perm(p, p->process_class,
-						    "dyntransition");
+
+	rc = -EINVAL;
+	p->process_trans_perms = string_to_av_perm(p, p->process_class, "transition");
+	p->process_trans_perms |= string_to_av_perm(p, p->process_class, "dyntransition");
 	if (!p->process_trans_perms)
 		goto bad;
 
@@ -2312,8 +2315,6 @@
 out:
 	return rc;
 bad:
-	if (!rc)
-		rc = -EINVAL;
 	policydb_destroy(p);
 	goto out;
 }
@@ -3076,7 +3077,7 @@
 	if (!info) {
 		printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
 		    "version %d", p->policyvers);
-		return rc;
+		return -EINVAL;
 	}
 
 	buf[0] = cpu_to_le32(p->policyvers);
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 95d3d7d..4e3ab9d 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -203,21 +203,13 @@
 #define p_cats symtab[SYM_CATS]
 
 	/* symbol names indexed by (value - 1) */
-	char **sym_val_to_name[SYM_NUM];
-#define p_common_val_to_name sym_val_to_name[SYM_COMMONS]
-#define p_class_val_to_name sym_val_to_name[SYM_CLASSES]
-#define p_role_val_to_name sym_val_to_name[SYM_ROLES]
-#define p_type_val_to_name sym_val_to_name[SYM_TYPES]
-#define p_user_val_to_name sym_val_to_name[SYM_USERS]
-#define p_bool_val_to_name sym_val_to_name[SYM_BOOLS]
-#define p_sens_val_to_name sym_val_to_name[SYM_LEVELS]
-#define p_cat_val_to_name sym_val_to_name[SYM_CATS]
+	struct flex_array *sym_val_to_name[SYM_NUM];
 
 	/* class, role, and user attributes indexed by (value - 1) */
 	struct class_datum **class_val_to_struct;
 	struct role_datum **role_val_to_struct;
 	struct user_datum **user_val_to_struct;
-	struct type_datum **type_val_to_struct;
+	struct flex_array *type_val_to_struct_array;
 
 	/* type enforcement access vectors and transitions */
 	struct avtab te_avtab;
@@ -321,6 +313,13 @@
 	return 0;
 }
 
+static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr)
+{
+	struct flex_array *fa = p->sym_val_to_name[sym_num];
+
+	return flex_array_get_ptr(fa, element_nr);
+}
+
 extern u16 string_to_security_class(struct policydb *p, const char *name);
 extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
 
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 223c1ff..a03cfaf 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -464,7 +464,7 @@
 	if (!permissions)
 		return;
 
-	tclass_name = policydb.p_class_val_to_name[tclass - 1];
+	tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1);
 	tclass_dat = policydb.class_val_to_struct[tclass - 1];
 	common_dat = tclass_dat->comdatum;
 
@@ -530,12 +530,18 @@
 	struct context lo_scontext;
 	struct context lo_tcontext;
 	struct av_decision lo_avd;
-	struct type_datum *source
-		= policydb.type_val_to_struct[scontext->type - 1];
-	struct type_datum *target
-		= policydb.type_val_to_struct[tcontext->type - 1];
+	struct type_datum *source;
+	struct type_datum *target;
 	u32 masked = 0;
 
+	source = flex_array_get_ptr(policydb.type_val_to_struct_array,
+				    scontext->type - 1);
+	BUG_ON(!source);
+
+	target = flex_array_get_ptr(policydb.type_val_to_struct_array,
+				    tcontext->type - 1);
+	BUG_ON(!target);
+
 	if (source->bounds) {
 		memset(&lo_avd, 0, sizeof(lo_avd));
 
@@ -701,16 +707,16 @@
 	char *o = NULL, *n = NULL, *t = NULL;
 	u32 olen, nlen, tlen;
 
-	if (context_struct_to_string(ocontext, &o, &olen) < 0)
+	if (context_struct_to_string(ocontext, &o, &olen))
 		goto out;
-	if (context_struct_to_string(ncontext, &n, &nlen) < 0)
+	if (context_struct_to_string(ncontext, &n, &nlen))
 		goto out;
-	if (context_struct_to_string(tcontext, &t, &tlen) < 0)
+	if (context_struct_to_string(tcontext, &t, &tlen))
 		goto out;
 	audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
 		  "security_validate_transition:  denied for"
 		  " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s",
-		  o, n, t, policydb.p_class_val_to_name[tclass-1]);
+		  o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
 out:
 	kfree(o);
 	kfree(n);
@@ -801,10 +807,11 @@
 	struct context *old_context, *new_context;
 	struct type_datum *type;
 	int index;
-	int rc = -EINVAL;
+	int rc;
 
 	read_lock(&policy_rwlock);
 
+	rc = -EINVAL;
 	old_context = sidtab_search(&sidtab, old_sid);
 	if (!old_context) {
 		printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
@@ -812,6 +819,7 @@
 		goto out;
 	}
 
+	rc = -EINVAL;
 	new_context = sidtab_search(&sidtab, new_sid);
 	if (!new_context) {
 		printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
@@ -819,28 +827,27 @@
 		goto out;
 	}
 
+	rc = 0;
 	/* type/domain unchanged */
-	if (old_context->type == new_context->type) {
-		rc = 0;
+	if (old_context->type == new_context->type)
 		goto out;
-	}
 
 	index = new_context->type;
 	while (true) {
-		type = policydb.type_val_to_struct[index - 1];
+		type = flex_array_get_ptr(policydb.type_val_to_struct_array,
+					  index - 1);
 		BUG_ON(!type);
 
 		/* not bounded anymore */
-		if (!type->bounds) {
-			rc = -EPERM;
+		rc = -EPERM;
+		if (!type->bounds)
 			break;
-		}
 
 		/* @newsid is bounded by @oldsid */
-		if (type->bounds == old_context->type) {
-			rc = 0;
+		rc = 0;
+		if (type->bounds == old_context->type)
 			break;
-		}
+
 		index = type->bounds;
 	}
 
@@ -1005,9 +1012,9 @@
 	}
 
 	/* Compute the size of the context. */
-	*scontext_len += strlen(policydb.p_user_val_to_name[context->user - 1]) + 1;
-	*scontext_len += strlen(policydb.p_role_val_to_name[context->role - 1]) + 1;
-	*scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1;
+	*scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1;
+	*scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1;
+	*scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1;
 	*scontext_len += mls_compute_context_len(context);
 
 	if (!scontext)
@@ -1023,12 +1030,12 @@
 	 * Copy the user name, role name and type name into the context.
 	 */
 	sprintf(scontextp, "%s:%s:%s",
-		policydb.p_user_val_to_name[context->user - 1],
-		policydb.p_role_val_to_name[context->role - 1],
-		policydb.p_type_val_to_name[context->type - 1]);
-	scontextp += strlen(policydb.p_user_val_to_name[context->user - 1]) +
-		     1 + strlen(policydb.p_role_val_to_name[context->role - 1]) +
-		     1 + strlen(policydb.p_type_val_to_name[context->type - 1]);
+		sym_name(&policydb, SYM_USERS, context->user - 1),
+		sym_name(&policydb, SYM_ROLES, context->role - 1),
+		sym_name(&policydb, SYM_TYPES, context->type - 1));
+	scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) +
+		     1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) +
+		     1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1));
 
 	mls_sid_to_context(context, &scontextp);
 
@@ -1187,16 +1194,13 @@
 	if (rc)
 		goto out;
 
-	if ((p - scontext) < scontext_len) {
-		rc = -EINVAL;
+	rc = -EINVAL;
+	if ((p - scontext) < scontext_len)
 		goto out;
-	}
 
 	/* Check the validity of the new context. */
-	if (!policydb_context_isvalid(pol, ctx)) {
-		rc = -EINVAL;
+	if (!policydb_context_isvalid(pol, ctx))
 		goto out;
-	}
 	rc = 0;
 out:
 	if (rc)
@@ -1235,27 +1239,26 @@
 
 	if (force) {
 		/* Save another copy for storing in uninterpreted form */
+		rc = -ENOMEM;
 		str = kstrdup(scontext2, gfp_flags);
-		if (!str) {
-			kfree(scontext2);
-			return -ENOMEM;
-		}
+		if (!str)
+			goto out;
 	}
 
 	read_lock(&policy_rwlock);
-	rc = string_to_context_struct(&policydb, &sidtab,
-				      scontext2, scontext_len,
-				      &context, def_sid);
+	rc = string_to_context_struct(&policydb, &sidtab, scontext2,
+				      scontext_len, &context, def_sid);
 	if (rc == -EINVAL && force) {
 		context.str = str;
 		context.len = scontext_len;
 		str = NULL;
 	} else if (rc)
-		goto out;
+		goto out_unlock;
 	rc = sidtab_context_to_sid(&sidtab, &context, sid);
 	context_destroy(&context);
-out:
+out_unlock:
 	read_unlock(&policy_rwlock);
+out:
 	kfree(scontext2);
 	kfree(str);
 	return rc;
@@ -1319,18 +1322,18 @@
 	char *s = NULL, *t = NULL, *n = NULL;
 	u32 slen, tlen, nlen;
 
-	if (context_struct_to_string(scontext, &s, &slen) < 0)
+	if (context_struct_to_string(scontext, &s, &slen))
 		goto out;
-	if (context_struct_to_string(tcontext, &t, &tlen) < 0)
+	if (context_struct_to_string(tcontext, &t, &tlen))
 		goto out;
-	if (context_struct_to_string(newcontext, &n, &nlen) < 0)
+	if (context_struct_to_string(newcontext, &n, &nlen))
 		goto out;
 	audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
 		  "security_compute_sid:  invalid context %s"
 		  " for scontext=%s"
 		  " tcontext=%s"
 		  " tclass=%s",
-		  n, s, t, policydb.p_class_val_to_name[tclass-1]);
+		  n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
 out:
 	kfree(s);
 	kfree(t);
@@ -1569,22 +1572,17 @@
 
 static inline int convert_context_handle_invalid_context(struct context *context)
 {
-	int rc = 0;
+	char *s;
+	u32 len;
 
-	if (selinux_enforcing) {
-		rc = -EINVAL;
-	} else {
-		char *s;
-		u32 len;
+	if (selinux_enforcing)
+		return -EINVAL;
 
-		if (!context_struct_to_string(context, &s, &len)) {
-			printk(KERN_WARNING
-		       "SELinux:  Context %s would be invalid if enforcing\n",
-			       s);
-			kfree(s);
-		}
+	if (!context_struct_to_string(context, &s, &len)) {
+		printk(KERN_WARNING "SELinux:  Context %s would be invalid if enforcing\n", s);
+		kfree(s);
 	}
-	return rc;
+	return 0;
 }
 
 struct convert_context_args {
@@ -1621,17 +1619,17 @@
 
 	if (c->str) {
 		struct context ctx;
+
+		rc = -ENOMEM;
 		s = kstrdup(c->str, GFP_KERNEL);
-		if (!s) {
-			rc = -ENOMEM;
+		if (!s)
 			goto out;
-		}
+
 		rc = string_to_context_struct(args->newp, NULL, s,
 					      c->len, &ctx, SECSID_NULL);
 		kfree(s);
 		if (!rc) {
-			printk(KERN_INFO
-		       "SELinux:  Context %s became valid (mapped).\n",
+			printk(KERN_INFO "SELinux:  Context %s became valid (mapped).\n",
 			       c->str);
 			/* Replace string with mapped representation. */
 			kfree(c->str);
@@ -1643,8 +1641,7 @@
 			goto out;
 		} else {
 			/* Other error condition, e.g. ENOMEM. */
-			printk(KERN_ERR
-		       "SELinux:   Unable to map context %s, rc = %d.\n",
+			printk(KERN_ERR "SELinux:   Unable to map context %s, rc = %d.\n",
 			       c->str, -rc);
 			goto out;
 		}
@@ -1654,25 +1651,26 @@
 	if (rc)
 		goto out;
 
-	rc = -EINVAL;
-
 	/* Convert the user. */
+	rc = -EINVAL;
 	usrdatum = hashtab_search(args->newp->p_users.table,
-				  args->oldp->p_user_val_to_name[c->user - 1]);
+				  sym_name(args->oldp, SYM_USERS, c->user - 1));
 	if (!usrdatum)
 		goto bad;
 	c->user = usrdatum->value;
 
 	/* Convert the role. */
+	rc = -EINVAL;
 	role = hashtab_search(args->newp->p_roles.table,
-			      args->oldp->p_role_val_to_name[c->role - 1]);
+			      sym_name(args->oldp, SYM_ROLES, c->role - 1));
 	if (!role)
 		goto bad;
 	c->role = role->value;
 
 	/* Convert the type. */
+	rc = -EINVAL;
 	typdatum = hashtab_search(args->newp->p_types.table,
-				  args->oldp->p_type_val_to_name[c->type - 1]);
+				  sym_name(args->oldp, SYM_TYPES, c->type - 1));
 	if (!typdatum)
 		goto bad;
 	c->type = typdatum->value;
@@ -1700,6 +1698,7 @@
 		oc = args->newp->ocontexts[OCON_ISID];
 		while (oc && oc->sid[0] != SECINITSID_UNLABELED)
 			oc = oc->next;
+		rc = -EINVAL;
 		if (!oc) {
 			printk(KERN_ERR "SELinux:  unable to look up"
 				" the initial SIDs list\n");
@@ -1719,19 +1718,20 @@
 	}
 
 	context_destroy(&oldc);
+
 	rc = 0;
 out:
 	return rc;
 bad:
 	/* Map old representation to string and save it. */
-	if (context_struct_to_string(&oldc, &s, &len))
-		return -ENOMEM;
+	rc = context_struct_to_string(&oldc, &s, &len);
+	if (rc)
+		return rc;
 	context_destroy(&oldc);
 	context_destroy(c);
 	c->str = s;
 	c->len = len;
-	printk(KERN_INFO
-	       "SELinux:  Context %s became invalid (unmapped).\n",
+	printk(KERN_INFO "SELinux:  Context %s became invalid (unmapped).\n",
 	       c->str);
 	rc = 0;
 	goto out;
@@ -2012,7 +2012,7 @@
 		      u32 addrlen,
 		      u32 *out_sid)
 {
-	int rc = 0;
+	int rc;
 	struct ocontext *c;
 
 	read_lock(&policy_rwlock);
@@ -2021,10 +2021,9 @@
 	case AF_INET: {
 		u32 addr;
 
-		if (addrlen != sizeof(u32)) {
-			rc = -EINVAL;
+		rc = -EINVAL;
+		if (addrlen != sizeof(u32))
 			goto out;
-		}
 
 		addr = *((u32 *)addrp);
 
@@ -2038,10 +2037,9 @@
 	}
 
 	case AF_INET6:
-		if (addrlen != sizeof(u64) * 2) {
-			rc = -EINVAL;
+		rc = -EINVAL;
+		if (addrlen != sizeof(u64) * 2)
 			goto out;
-		}
 		c = policydb.ocontexts[OCON_NODE6];
 		while (c) {
 			if (match_ipv6_addrmask(addrp, c->u.node6.addr,
@@ -2052,6 +2050,7 @@
 		break;
 
 	default:
+		rc = 0;
 		*out_sid = SECINITSID_NODE;
 		goto out;
 	}
@@ -2069,6 +2068,7 @@
 		*out_sid = SECINITSID_NODE;
 	}
 
+	rc = 0;
 out:
 	read_unlock(&policy_rwlock);
 	return rc;
@@ -2113,24 +2113,22 @@
 
 	context_init(&usercon);
 
+	rc = -EINVAL;
 	fromcon = sidtab_search(&sidtab, fromsid);
-	if (!fromcon) {
-		rc = -EINVAL;
+	if (!fromcon)
 		goto out_unlock;
-	}
 
+	rc = -EINVAL;
 	user = hashtab_search(policydb.p_users.table, username);
-	if (!user) {
-		rc = -EINVAL;
+	if (!user)
 		goto out_unlock;
-	}
+
 	usercon.user = user->value;
 
+	rc = -ENOMEM;
 	mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
-	if (!mysids) {
-		rc = -ENOMEM;
+	if (!mysids)
 		goto out_unlock;
-	}
 
 	ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
 		role = policydb.role_val_to_struct[i];
@@ -2147,12 +2145,11 @@
 			if (mynel < maxnel) {
 				mysids[mynel++] = sid;
 			} else {
+				rc = -ENOMEM;
 				maxnel += SIDS_NEL;
 				mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC);
-				if (!mysids2) {
-					rc = -ENOMEM;
+				if (!mysids2)
 					goto out_unlock;
-				}
 				memcpy(mysids2, mysids, mynel * sizeof(*mysids2));
 				kfree(mysids);
 				mysids = mysids2;
@@ -2160,7 +2157,7 @@
 			}
 		}
 	}
-
+	rc = 0;
 out_unlock:
 	read_unlock(&policy_rwlock);
 	if (rc || !mynel) {
@@ -2168,9 +2165,9 @@
 		goto out;
 	}
 
+	rc = -ENOMEM;
 	mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
 	if (!mysids2) {
-		rc = -ENOMEM;
 		kfree(mysids);
 		goto out;
 	}
@@ -2211,7 +2208,7 @@
 	u16 sclass;
 	struct genfs *genfs;
 	struct ocontext *c;
-	int rc = 0, cmp = 0;
+	int rc, cmp = 0;
 
 	while (path[0] == '/' && path[1] == '/')
 		path++;
@@ -2219,6 +2216,7 @@
 	read_lock(&policy_rwlock);
 
 	sclass = unmap_class(orig_sclass);
+	*sid = SECINITSID_UNLABELED;
 
 	for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
 		cmp = strcmp(fstype, genfs->fstype);
@@ -2226,11 +2224,9 @@
 			break;
 	}
 
-	if (!genfs || cmp) {
-		*sid = SECINITSID_UNLABELED;
-		rc = -ENOENT;
+	rc = -ENOENT;
+	if (!genfs || cmp)
 		goto out;
-	}
 
 	for (c = genfs->head; c; c = c->next) {
 		len = strlen(c->u.name);
@@ -2239,21 +2235,18 @@
 			break;
 	}
 
-	if (!c) {
-		*sid = SECINITSID_UNLABELED;
-		rc = -ENOENT;
+	rc = -ENOENT;
+	if (!c)
 		goto out;
-	}
 
 	if (!c->sid[0]) {
-		rc = sidtab_context_to_sid(&sidtab,
-					   &c->context[0],
-					   &c->sid[0]);
+		rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]);
 		if (rc)
 			goto out;
 	}
 
 	*sid = c->sid[0];
+	rc = 0;
 out:
 	read_unlock(&policy_rwlock);
 	return rc;
@@ -2285,8 +2278,7 @@
 	if (c) {
 		*behavior = c->v.behavior;
 		if (!c->sid[0]) {
-			rc = sidtab_context_to_sid(&sidtab,
-						   &c->context[0],
+			rc = sidtab_context_to_sid(&sidtab, &c->context[0],
 						   &c->sid[0]);
 			if (rc)
 				goto out;
@@ -2309,34 +2301,39 @@
 
 int security_get_bools(int *len, char ***names, int **values)
 {
-	int i, rc = -ENOMEM;
+	int i, rc;
 
 	read_lock(&policy_rwlock);
 	*names = NULL;
 	*values = NULL;
 
+	rc = 0;
 	*len = policydb.p_bools.nprim;
-	if (!*len) {
-		rc = 0;
+	if (!*len)
 		goto out;
-	}
 
-       *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
+	rc = -ENOMEM;
+	*names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
 	if (!*names)
 		goto err;
 
-       *values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
+	rc = -ENOMEM;
+	*values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
 	if (!*values)
 		goto err;
 
 	for (i = 0; i < *len; i++) {
 		size_t name_len;
+
 		(*values)[i] = policydb.bool_val_to_struct[i]->state;
-		name_len = strlen(policydb.p_bool_val_to_name[i]) + 1;
-	       (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
+		name_len = strlen(sym_name(&policydb, SYM_BOOLS, i)) + 1;
+
+		rc = -ENOMEM;
+		(*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
 		if (!(*names)[i])
 			goto err;
-		strncpy((*names)[i], policydb.p_bool_val_to_name[i], name_len);
+
+		strncpy((*names)[i], sym_name(&policydb, SYM_BOOLS, i), name_len);
 		(*names)[i][name_len - 1] = 0;
 	}
 	rc = 0;
@@ -2355,24 +2352,23 @@
 
 int security_set_bools(int len, int *values)
 {
-	int i, rc = 0;
+	int i, rc;
 	int lenp, seqno = 0;
 	struct cond_node *cur;
 
 	write_lock_irq(&policy_rwlock);
 
+	rc = -EFAULT;
 	lenp = policydb.p_bools.nprim;
-	if (len != lenp) {
-		rc = -EFAULT;
+	if (len != lenp)
 		goto out;
-	}
 
 	for (i = 0; i < len; i++) {
 		if (!!values[i] != policydb.bool_val_to_struct[i]->state) {
 			audit_log(current->audit_context, GFP_ATOMIC,
 				AUDIT_MAC_CONFIG_CHANGE,
 				"bool=%s val=%d old_val=%d auid=%u ses=%u",
-				policydb.p_bool_val_to_name[i],
+				sym_name(&policydb, SYM_BOOLS, i),
 				!!values[i],
 				policydb.bool_val_to_struct[i]->state,
 				audit_get_loginuid(current),
@@ -2391,7 +2387,7 @@
 	}
 
 	seqno = ++latest_granting;
-
+	rc = 0;
 out:
 	write_unlock_irq(&policy_rwlock);
 	if (!rc) {
@@ -2405,16 +2401,15 @@
 
 int security_get_bool_value(int bool)
 {
-	int rc = 0;
+	int rc;
 	int len;
 
 	read_lock(&policy_rwlock);
 
+	rc = -EFAULT;
 	len = policydb.p_bools.nprim;
-	if (bool >= len) {
-		rc = -EFAULT;
+	if (bool >= len)
 		goto out;
-	}
 
 	rc = policydb.bool_val_to_struct[bool]->state;
 out:
@@ -2464,8 +2459,9 @@
 	struct context newcon;
 	char *s;
 	u32 len;
-	int rc = 0;
+	int rc;
 
+	rc = 0;
 	if (!ss_initialized || !policydb.mls_enabled) {
 		*new_sid = sid;
 		goto out;
@@ -2474,19 +2470,20 @@
 	context_init(&newcon);
 
 	read_lock(&policy_rwlock);
+
+	rc = -EINVAL;
 	context1 = sidtab_search(&sidtab, sid);
 	if (!context1) {
 		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 			__func__, sid);
-		rc = -EINVAL;
 		goto out_unlock;
 	}
 
+	rc = -EINVAL;
 	context2 = sidtab_search(&sidtab, mls_sid);
 	if (!context2) {
 		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 			__func__, mls_sid);
-		rc = -EINVAL;
 		goto out_unlock;
 	}
 
@@ -2500,20 +2497,17 @@
 	/* Check the validity of the new context. */
 	if (!policydb_context_isvalid(&policydb, &newcon)) {
 		rc = convert_context_handle_invalid_context(&newcon);
-		if (rc)
-			goto bad;
+		if (rc) {
+			if (!context_struct_to_string(&newcon, &s, &len)) {
+				audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
+					  "security_sid_mls_copy: invalid context %s", s);
+				kfree(s);
+			}
+			goto out_unlock;
+		}
 	}
 
 	rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid);
-	goto out_unlock;
-
-bad:
-	if (!context_struct_to_string(&newcon, &s, &len)) {
-		audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
-			  "security_sid_mls_copy: invalid context %s", s);
-		kfree(s);
-	}
-
 out_unlock:
 	read_unlock(&policy_rwlock);
 	context_destroy(&newcon);
@@ -2549,6 +2543,8 @@
 	struct context *nlbl_ctx;
 	struct context *xfrm_ctx;
 
+	*peer_sid = SECSID_NULL;
+
 	/* handle the common (which also happens to be the set of easy) cases
 	 * right away, these two if statements catch everything involving a
 	 * single or absent peer SID/label */
@@ -2567,40 +2563,37 @@
 	/* we don't need to check ss_initialized here since the only way both
 	 * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
 	 * security server was initialized and ss_initialized was true */
-	if (!policydb.mls_enabled) {
-		*peer_sid = SECSID_NULL;
+	if (!policydb.mls_enabled)
 		return 0;
-	}
 
 	read_lock(&policy_rwlock);
 
+	rc = -EINVAL;
 	nlbl_ctx = sidtab_search(&sidtab, nlbl_sid);
 	if (!nlbl_ctx) {
 		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 		       __func__, nlbl_sid);
-		rc = -EINVAL;
-		goto out_slowpath;
+		goto out;
 	}
+	rc = -EINVAL;
 	xfrm_ctx = sidtab_search(&sidtab, xfrm_sid);
 	if (!xfrm_ctx) {
 		printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 		       __func__, xfrm_sid);
-		rc = -EINVAL;
-		goto out_slowpath;
+		goto out;
 	}
 	rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
+	if (rc)
+		goto out;
 
-out_slowpath:
+	/* at present NetLabel SIDs/labels really only carry MLS
+	 * information so if the MLS portion of the NetLabel SID
+	 * matches the MLS portion of the labeled XFRM SID/label
+	 * then pass along the XFRM SID as it is the most
+	 * expressive */
+	*peer_sid = xfrm_sid;
+out:
 	read_unlock(&policy_rwlock);
-	if (rc == 0)
-		/* at present NetLabel SIDs/labels really only carry MLS
-		 * information so if the MLS portion of the NetLabel SID
-		 * matches the MLS portion of the labeled XFRM SID/label
-		 * then pass along the XFRM SID as it is the most
-		 * expressive */
-		*peer_sid = xfrm_sid;
-	else
-		*peer_sid = SECSID_NULL;
 	return rc;
 }
 
@@ -2619,10 +2612,11 @@
 
 int security_get_classes(char ***classes, int *nclasses)
 {
-	int rc = -ENOMEM;
+	int rc;
 
 	read_lock(&policy_rwlock);
 
+	rc = -ENOMEM;
 	*nclasses = policydb.p_classes.nprim;
 	*classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
 	if (!*classes)
@@ -2630,7 +2624,7 @@
 
 	rc = hashtab_map(policydb.p_classes.table, get_classes_callback,
 			*classes);
-	if (rc < 0) {
+	if (rc) {
 		int i;
 		for (i = 0; i < *nclasses; i++)
 			kfree((*classes)[i]);
@@ -2657,19 +2651,20 @@
 
 int security_get_permissions(char *class, char ***perms, int *nperms)
 {
-	int rc = -ENOMEM, i;
+	int rc, i;
 	struct class_datum *match;
 
 	read_lock(&policy_rwlock);
 
+	rc = -EINVAL;
 	match = hashtab_search(policydb.p_classes.table, class);
 	if (!match) {
 		printk(KERN_ERR "SELinux: %s:  unrecognized class %s\n",
 			__func__, class);
-		rc = -EINVAL;
 		goto out;
 	}
 
+	rc = -ENOMEM;
 	*nperms = match->permissions.nprim;
 	*perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
 	if (!*perms)
@@ -2678,13 +2673,13 @@
 	if (match->comdatum) {
 		rc = hashtab_map(match->comdatum->permissions.table,
 				get_permissions_callback, *perms);
-		if (rc < 0)
+		if (rc)
 			goto err;
 	}
 
 	rc = hashtab_map(match->permissions.table, get_permissions_callback,
 			*perms);
-	if (rc < 0)
+	if (rc)
 		goto err;
 
 out:
@@ -2796,36 +2791,39 @@
 	switch (field) {
 	case AUDIT_SUBJ_USER:
 	case AUDIT_OBJ_USER:
+		rc = -EINVAL;
 		userdatum = hashtab_search(policydb.p_users.table, rulestr);
 		if (!userdatum)
-			rc = -EINVAL;
-		else
-			tmprule->au_ctxt.user = userdatum->value;
+			goto out;
+		tmprule->au_ctxt.user = userdatum->value;
 		break;
 	case AUDIT_SUBJ_ROLE:
 	case AUDIT_OBJ_ROLE:
+		rc = -EINVAL;
 		roledatum = hashtab_search(policydb.p_roles.table, rulestr);
 		if (!roledatum)
-			rc = -EINVAL;
-		else
-			tmprule->au_ctxt.role = roledatum->value;
+			goto out;
+		tmprule->au_ctxt.role = roledatum->value;
 		break;
 	case AUDIT_SUBJ_TYPE:
 	case AUDIT_OBJ_TYPE:
+		rc = -EINVAL;
 		typedatum = hashtab_search(policydb.p_types.table, rulestr);
 		if (!typedatum)
-			rc = -EINVAL;
-		else
-			tmprule->au_ctxt.type = typedatum->value;
+			goto out;
+		tmprule->au_ctxt.type = typedatum->value;
 		break;
 	case AUDIT_SUBJ_SEN:
 	case AUDIT_SUBJ_CLR:
 	case AUDIT_OBJ_LEV_LOW:
 	case AUDIT_OBJ_LEV_HIGH:
 		rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC);
+		if (rc)
+			goto out;
 		break;
 	}
-
+	rc = 0;
+out:
 	read_unlock(&policy_rwlock);
 
 	if (rc) {
@@ -3050,7 +3048,7 @@
 int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
 				   u32 *sid)
 {
-	int rc = -EIDRM;
+	int rc;
 	struct context *ctx;
 	struct context ctx_new;
 
@@ -3061,16 +3059,15 @@
 
 	read_lock(&policy_rwlock);
 
-	if (secattr->flags & NETLBL_SECATTR_CACHE) {
+	if (secattr->flags & NETLBL_SECATTR_CACHE)
 		*sid = *(u32 *)secattr->cache->data;
-		rc = 0;
-	} else if (secattr->flags & NETLBL_SECATTR_SECID) {
+	else if (secattr->flags & NETLBL_SECATTR_SECID)
 		*sid = secattr->attr.secid;
-		rc = 0;
-	} else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
+	else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
+		rc = -EIDRM;
 		ctx = sidtab_search(&sidtab, SECINITSID_NETMSG);
 		if (ctx == NULL)
-			goto netlbl_secattr_to_sid_return;
+			goto out;
 
 		context_init(&ctx_new);
 		ctx_new.user = ctx->user;
@@ -3078,34 +3075,35 @@
 		ctx_new.type = ctx->type;
 		mls_import_netlbl_lvl(&ctx_new, secattr);
 		if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
-			if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
-						  secattr->attr.mls.cat) != 0)
-				goto netlbl_secattr_to_sid_return;
+			rc = ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
+						   secattr->attr.mls.cat);
+			if (rc)
+				goto out;
 			memcpy(&ctx_new.range.level[1].cat,
 			       &ctx_new.range.level[0].cat,
 			       sizeof(ctx_new.range.level[0].cat));
 		}
-		if (mls_context_isvalid(&policydb, &ctx_new) != 1)
-			goto netlbl_secattr_to_sid_return_cleanup;
+		rc = -EIDRM;
+		if (!mls_context_isvalid(&policydb, &ctx_new))
+			goto out_free;
 
 		rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid);
-		if (rc != 0)
-			goto netlbl_secattr_to_sid_return_cleanup;
+		if (rc)
+			goto out_free;
 
 		security_netlbl_cache_add(secattr, *sid);
 
 		ebitmap_destroy(&ctx_new.range.level[0].cat);
-	} else {
+	} else
 		*sid = SECSID_NULL;
-		rc = 0;
-	}
 
-netlbl_secattr_to_sid_return:
+	read_unlock(&policy_rwlock);
+	return 0;
+out_free:
+	ebitmap_destroy(&ctx_new.range.level[0].cat);
+out:
 	read_unlock(&policy_rwlock);
 	return rc;
-netlbl_secattr_to_sid_return_cleanup:
-	ebitmap_destroy(&ctx_new.range.level[0].cat);
-	goto netlbl_secattr_to_sid_return;
 }
 
 /**
@@ -3127,28 +3125,23 @@
 		return 0;
 
 	read_lock(&policy_rwlock);
+
+	rc = -ENOENT;
 	ctx = sidtab_search(&sidtab, sid);
-	if (ctx == NULL) {
-		rc = -ENOENT;
-		goto netlbl_sid_to_secattr_failure;
-	}
-	secattr->domain = kstrdup(policydb.p_type_val_to_name[ctx->type - 1],
+	if (ctx == NULL)
+		goto out;
+
+	rc = -ENOMEM;
+	secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1),
 				  GFP_ATOMIC);
-	if (secattr->domain == NULL) {
-		rc = -ENOMEM;
-		goto netlbl_sid_to_secattr_failure;
-	}
+	if (secattr->domain == NULL)
+		goto out;
+
 	secattr->attr.secid = sid;
 	secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID;
 	mls_export_netlbl_lvl(ctx, secattr);
 	rc = mls_export_netlbl_cat(ctx, secattr);
-	if (rc != 0)
-		goto netlbl_sid_to_secattr_failure;
-	read_unlock(&policy_rwlock);
-
-	return 0;
-
-netlbl_sid_to_secattr_failure:
+out:
 	read_unlock(&policy_rwlock);
 	return rc;
 }
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index e817989..5840a35 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -147,6 +147,17 @@
 	return rc;
 }
 
+static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
+{
+	BUG_ON(loc >= SIDTAB_CACHE_LEN);
+
+	while (loc > 0) {
+		s->cache[loc] = s->cache[loc - 1];
+		loc--;
+	}
+	s->cache[0] = n;
+}
+
 static inline u32 sidtab_search_context(struct sidtab *s,
 						  struct context *context)
 {
@@ -156,14 +167,33 @@
 	for (i = 0; i < SIDTAB_SIZE; i++) {
 		cur = s->htable[i];
 		while (cur) {
-			if (context_cmp(&cur->context, context))
+			if (context_cmp(&cur->context, context)) {
+				sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
 				return cur->sid;
+			}
 			cur = cur->next;
 		}
 	}
 	return 0;
 }
 
+static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
+{
+	int i;
+	struct sidtab_node *node;
+
+	for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
+		node = s->cache[i];
+		if (unlikely(!node))
+			return 0;
+		if (context_cmp(&node->context, context)) {
+			sidtab_update_cache(s, node, i);
+			return node->sid;
+		}
+	}
+	return 0;
+}
+
 int sidtab_context_to_sid(struct sidtab *s,
 			  struct context *context,
 			  u32 *out_sid)
@@ -174,7 +204,9 @@
 
 	*out_sid = SECSID_NULL;
 
-	sid = sidtab_search_context(s, context);
+	sid  = sidtab_search_cache(s, context);
+	if (!sid)
+		sid = sidtab_search_context(s, context);
 	if (!sid) {
 		spin_lock_irqsave(&s->lock, flags);
 		/* Rescan now that we hold the lock. */
@@ -259,12 +291,15 @@
 void sidtab_set(struct sidtab *dst, struct sidtab *src)
 {
 	unsigned long flags;
+	int i;
 
 	spin_lock_irqsave(&src->lock, flags);
 	dst->htable = src->htable;
 	dst->nel = src->nel;
 	dst->next_sid = src->next_sid;
 	dst->shutdown = 0;
+	for (i = 0; i < SIDTAB_CACHE_LEN; i++)
+		dst->cache[i] = NULL;
 	spin_unlock_irqrestore(&src->lock, flags);
 }
 
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index 64ea5b1..84dc154 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -26,6 +26,8 @@
 	unsigned int nel;	/* number of elements */
 	unsigned int next_sid;	/* next SID to allocate */
 	unsigned char shutdown;
+#define SIDTAB_CACHE_LEN	3
+	struct sidtab_node *cache[SIDTAB_CACHE_LEN];
 	spinlock_t lock;
 };
 
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 43ae747..129c4eb 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -51,11 +51,18 @@
  */
 struct inode_smack {
 	char		*smk_inode;	/* label of the fso */
+	char		*smk_task;	/* label of the task */
 	struct mutex	smk_lock;	/* initialization lock */
 	int		smk_flags;	/* smack inode flags */
 };
 
+struct task_smack {
+	char		*smk_task;	/* label used for access control */
+	char		*smk_forked;	/* label when forked */
+};
+
 #define	SMK_INODE_INSTANT	0x01	/* inode is instantiated */
+#define	SMK_INODE_TRANSMUTE	0x02	/* directory is transmuting */
 
 /*
  * A label access rule.
@@ -161,6 +168,10 @@
 #define SMACK_CIPSO_MAXCATNUM           239     /* CIPSO 2.2 standard */
 
 /*
+ * Flag for transmute access
+ */
+#define MAY_TRANSMUTE	64
+/*
  * Just to make the common cases easier to deal with
  */
 #define MAY_ANY		(MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
@@ -191,6 +202,7 @@
 /*
  * These functions are in smack_access.c
  */
+int smk_access_entry(char *, char *);
 int smk_access(char *, char *, int, struct smk_audit_info *);
 int smk_curacc(char *, u32, struct smk_audit_info *);
 int smack_to_cipso(const char *, struct smack_cipso *);
@@ -234,6 +246,15 @@
 }
 
 /*
+ * Is the directory transmuting?
+ */
+static inline int smk_inode_transmutable(const struct inode *isp)
+{
+	struct inode_smack *sip = isp->i_security;
+	return (sip->smk_flags & SMK_INODE_TRANSMUTE) != 0;
+}
+
+/*
  * Present a pointer to the smack label in an inode blob.
  */
 static inline char *smk_of_inode(const struct inode *isp)
@@ -243,6 +264,30 @@
 }
 
 /*
+ * Present a pointer to the smack label in an task blob.
+ */
+static inline char *smk_of_task(const struct task_smack *tsp)
+{
+	return tsp->smk_task;
+}
+
+/*
+ * Present a pointer to the forked smack label in an task blob.
+ */
+static inline char *smk_of_forked(const struct task_smack *tsp)
+{
+	return tsp->smk_forked;
+}
+
+/*
+ * Present a pointer to the smack label in the current task blob.
+ */
+static inline char *smk_of_current(void)
+{
+	return smk_of_task(current_security());
+}
+
+/*
  * logging functions
  */
 #define SMACK_AUDIT_DENIED 0x1
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index f4fac64..7ba8478 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -67,6 +67,46 @@
 int log_policy = SMACK_AUDIT_DENIED;
 
 /**
+ * smk_access_entry - look up matching access rule
+ * @subject_label: a pointer to the subject's Smack label
+ * @object_label: a pointer to the object's Smack label
+ *
+ * This function looks up the subject/object pair in the
+ * access rule list and returns pointer to the matching rule if found,
+ * NULL otherwise.
+ *
+ * NOTE:
+ * Even though Smack labels are usually shared on smack_list
+ * labels that come in off the network can't be imported
+ * and added to the list for locking reasons.
+ *
+ * Therefore, it is necessary to check the contents of the labels,
+ * not just the pointer values. Of course, in most cases the labels
+ * will be on the list, so checking the pointers may be a worthwhile
+ * optimization.
+ */
+int smk_access_entry(char *subject_label, char *object_label)
+{
+	u32 may = MAY_NOT;
+	struct smack_rule *srp;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(srp, &smack_rule_list, list) {
+		if (srp->smk_subject == subject_label ||
+		    strcmp(srp->smk_subject, subject_label) == 0) {
+			if (srp->smk_object == object_label ||
+			    strcmp(srp->smk_object, object_label) == 0) {
+				may = srp->smk_access;
+				break;
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	return may;
+}
+
+/**
  * smk_access - determine if a subject has a specific access to an object
  * @subject_label: a pointer to the subject's Smack label
  * @object_label: a pointer to the object's Smack label
@@ -90,7 +130,6 @@
 	       struct smk_audit_info *a)
 {
 	u32 may = MAY_NOT;
-	struct smack_rule *srp;
 	int rc = 0;
 
 	/*
@@ -144,18 +183,7 @@
 	 * access (e.g. read is included in readwrite) it's
 	 * good.
 	 */
-	rcu_read_lock();
-	list_for_each_entry_rcu(srp, &smack_rule_list, list) {
-		if (srp->smk_subject == subject_label ||
-		    strcmp(srp->smk_subject, subject_label) == 0) {
-			if (srp->smk_object == object_label ||
-			    strcmp(srp->smk_object, object_label) == 0) {
-				may = srp->smk_access;
-				break;
-			}
-		}
-	}
-	rcu_read_unlock();
+	may = smk_access_entry(subject_label, object_label);
 	/*
 	 * This is a bit map operation.
 	 */
@@ -185,7 +213,7 @@
 int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
 {
 	int rc;
-	char *sp = current_security();
+	char *sp = smk_of_current();
 
 	rc = smk_access(sp, obj_label, mode, NULL);
 	if (rc == 0)
@@ -196,7 +224,7 @@
 	 * only one that gets privilege and current does not
 	 * have that label.
 	 */
-	if (smack_onlycap != NULL && smack_onlycap != current->cred->security)
+	if (smack_onlycap != NULL && smack_onlycap != sp)
 		goto out_audit;
 
 	if (capable(CAP_MAC_OVERRIDE))
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ccb71a0..533bf32 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -3,12 +3,14 @@
  *
  *  This file contains the smack hook function implementations.
  *
- *  Author:
+ *  Authors:
  *	Casey Schaufler <casey@schaufler-ca.com>
+ *	Jarkko Sakkinen <ext-jarkko.2.sakkinen@nokia.com>
  *
  *  Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
  *  Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
  *                Paul Moore <paul.moore@hp.com>
+ *  Copyright (C) 2010 Nokia Corporation
  *
  *	This program is free software; you can redistribute it and/or modify
  *	it under the terms of the GNU General Public License version 2,
@@ -35,6 +37,9 @@
 
 #define task_security(task)	(task_cred_xxx((task), security))
 
+#define TRANS_TRUE	"TRUE"
+#define TRANS_TRUE_SIZE	4
+
 /**
  * smk_fetch - Fetch the smack label from a file.
  * @ip: a pointer to the inode
@@ -43,7 +48,7 @@
  * Returns a pointer to the master list entry for the Smack label
  * or NULL if there was no label to fetch.
  */
-static char *smk_fetch(struct inode *ip, struct dentry *dp)
+static char *smk_fetch(const char *name, struct inode *ip, struct dentry *dp)
 {
 	int rc;
 	char in[SMK_LABELLEN];
@@ -51,7 +56,7 @@
 	if (ip->i_op->getxattr == NULL)
 		return NULL;
 
-	rc = ip->i_op->getxattr(dp, XATTR_NAME_SMACK, in, SMK_LABELLEN);
+	rc = ip->i_op->getxattr(dp, name, in, SMK_LABELLEN);
 	if (rc < 0)
 		return NULL;
 
@@ -103,8 +108,8 @@
 	if (rc != 0)
 		return rc;
 
-	sp = current_security();
-	tsp = task_security(ctp);
+	sp = smk_of_current();
+	tsp = smk_of_task(task_security(ctp));
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
 	smk_ad_setfield_u_tsk(&ad, ctp);
 
@@ -138,8 +143,8 @@
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
 	smk_ad_setfield_u_tsk(&ad, ptp);
 
-	sp = current_security();
-	tsp = task_security(ptp);
+	sp = smk_of_current();
+	tsp = smk_of_task(task_security(ptp));
 	/* we won't log here, because rc can be overriden */
 	rc = smk_access(tsp, sp, MAY_READWRITE, NULL);
 	if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE))
@@ -160,7 +165,7 @@
 static int smack_syslog(int typefrom_file)
 {
 	int rc = 0;
-	char *sp = current_security();
+	char *sp = smk_of_current();
 
 	if (capable(CAP_MAC_OVERRIDE))
 		return 0;
@@ -391,6 +396,40 @@
 }
 
 /*
+ * BPRM hooks
+ */
+
+static int smack_bprm_set_creds(struct linux_binprm *bprm)
+{
+	struct task_smack *tsp = bprm->cred->security;
+	struct inode_smack *isp;
+	struct dentry *dp;
+	int rc;
+
+	rc = cap_bprm_set_creds(bprm);
+	if (rc != 0)
+		return rc;
+
+	if (bprm->cred_prepared)
+		return 0;
+
+	if (bprm->file == NULL || bprm->file->f_dentry == NULL)
+		return 0;
+
+	dp = bprm->file->f_dentry;
+
+	if (dp->d_inode == NULL)
+		return 0;
+
+	isp = dp->d_inode->i_security;
+
+	if (isp->smk_task != NULL)
+		tsp->smk_task = isp->smk_task;
+
+	return 0;
+}
+
+/*
  * Inode hooks
  */
 
@@ -402,7 +441,7 @@
  */
 static int smack_inode_alloc_security(struct inode *inode)
 {
-	inode->i_security = new_inode_smack(current_security());
+	inode->i_security = new_inode_smack(smk_of_current());
 	if (inode->i_security == NULL)
 		return -ENOMEM;
 	return 0;
@@ -434,6 +473,8 @@
 				     char **name, void **value, size_t *len)
 {
 	char *isp = smk_of_inode(inode);
+	char *dsp = smk_of_inode(dir);
+	u32 may;
 
 	if (name) {
 		*name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL);
@@ -442,6 +483,16 @@
 	}
 
 	if (value) {
+		may = smk_access_entry(smk_of_current(), dsp);
+
+		/*
+		 * If the access rule allows transmutation and
+		 * the directory requests transmutation then
+		 * by all means transmute.
+		 */
+		if (((may & MAY_TRANSMUTE) != 0) && smk_inode_transmutable(dir))
+			isp = dsp;
+
 		*value = kstrdup(isp, GFP_KERNEL);
 		if (*value == NULL)
 			return -ENOMEM;
@@ -664,7 +715,8 @@
 
 	if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
 	    strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
-	    strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
+	    strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
 		if (!capable(CAP_MAC_ADMIN))
 			rc = -EPERM;
 		/*
@@ -674,6 +726,12 @@
 		if (size == 0 || size >= SMK_LABELLEN ||
 		    smk_import(value, size) == NULL)
 			rc = -EINVAL;
+	} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+		if (!capable(CAP_MAC_ADMIN))
+			rc = -EPERM;
+		if (size != TRANS_TRUE_SIZE ||
+		    strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
+			rc = -EINVAL;
 	} else
 		rc = cap_inode_setxattr(dentry, name, value, size, flags);
 
@@ -700,26 +758,23 @@
 static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
 				      const void *value, size_t size, int flags)
 {
-	struct inode_smack *isp;
 	char *nsp;
+	struct inode_smack *isp = dentry->d_inode->i_security;
 
-	/*
-	 * Not SMACK
-	 */
-	if (strcmp(name, XATTR_NAME_SMACK))
-		return;
-
-	isp = dentry->d_inode->i_security;
-
-	/*
-	 * No locking is done here. This is a pointer
-	 * assignment.
-	 */
-	nsp = smk_import(value, size);
-	if (nsp != NULL)
-		isp->smk_inode = nsp;
-	else
-		isp->smk_inode = smack_known_invalid.smk_known;
+	if (strcmp(name, XATTR_NAME_SMACK) == 0) {
+		nsp = smk_import(value, size);
+		if (nsp != NULL)
+			isp->smk_inode = nsp;
+		else
+			isp->smk_inode = smack_known_invalid.smk_known;
+	} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
+		nsp = smk_import(value, size);
+		if (nsp != NULL)
+			isp->smk_task = nsp;
+		else
+			isp->smk_task = smack_known_invalid.smk_known;
+	} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
+		isp->smk_flags |= SMK_INODE_TRANSMUTE;
 
 	return;
 }
@@ -752,12 +807,15 @@
  */
 static int smack_inode_removexattr(struct dentry *dentry, const char *name)
 {
+	struct inode_smack *isp;
 	struct smk_audit_info ad;
 	int rc = 0;
 
 	if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
 	    strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
-	    strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
+	    strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+	    strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
 		if (!capable(CAP_MAC_ADMIN))
 			rc = -EPERM;
 	} else
@@ -768,6 +826,11 @@
 	if (rc == 0)
 		rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
 
+	if (rc == 0) {
+		isp = dentry->d_inode->i_security;
+		isp->smk_task = NULL;
+	}
+
 	return rc;
 }
 
@@ -895,7 +958,7 @@
  */
 static int smack_file_alloc_security(struct file *file)
 {
-	file->f_security = current_security();
+	file->f_security = smk_of_current();
 	return 0;
 }
 
@@ -1005,7 +1068,7 @@
  */
 static int smack_file_set_fowner(struct file *file)
 {
-	file->f_security = current_security();
+	file->f_security = smk_of_current();
 	return 0;
 }
 
@@ -1025,7 +1088,7 @@
 {
 	struct file *file;
 	int rc;
-	char *tsp = tsk->cred->security;
+	char *tsp = smk_of_task(tsk->cred->security);
 	struct smk_audit_info ad;
 
 	/*
@@ -1082,7 +1145,9 @@
  */
 static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
 {
-	cred->security = NULL;
+	cred->security = kzalloc(sizeof(struct task_smack), gfp);
+	if (cred->security == NULL)
+		return -ENOMEM;
 	return 0;
 }
 
@@ -1097,7 +1162,7 @@
  */
 static void smack_cred_free(struct cred *cred)
 {
-	cred->security = NULL;
+	kfree(cred->security);
 }
 
 /**
@@ -1111,7 +1176,16 @@
 static int smack_cred_prepare(struct cred *new, const struct cred *old,
 			      gfp_t gfp)
 {
-	new->security = old->security;
+	struct task_smack *old_tsp = old->security;
+	struct task_smack *new_tsp;
+
+	new_tsp = kzalloc(sizeof(struct task_smack), gfp);
+	if (new_tsp == NULL)
+		return -ENOMEM;
+
+	new_tsp->smk_task = old_tsp->smk_task;
+	new_tsp->smk_forked = old_tsp->smk_task;
+	new->security = new_tsp;
 	return 0;
 }
 
@@ -1124,7 +1198,11 @@
  */
 static void smack_cred_transfer(struct cred *new, const struct cred *old)
 {
-	new->security = old->security;
+	struct task_smack *old_tsp = old->security;
+	struct task_smack *new_tsp = new->security;
+
+	new_tsp->smk_task = old_tsp->smk_task;
+	new_tsp->smk_forked = old_tsp->smk_task;
 }
 
 /**
@@ -1136,12 +1214,13 @@
  */
 static int smack_kernel_act_as(struct cred *new, u32 secid)
 {
+	struct task_smack *new_tsp = new->security;
 	char *smack = smack_from_secid(secid);
 
 	if (smack == NULL)
 		return -EINVAL;
 
-	new->security = smack;
+	new_tsp->smk_task = smack;
 	return 0;
 }
 
@@ -1157,8 +1236,10 @@
 					struct inode *inode)
 {
 	struct inode_smack *isp = inode->i_security;
+	struct task_smack *tsp = new->security;
 
-	new->security = isp->smk_inode;
+	tsp->smk_forked = isp->smk_inode;
+	tsp->smk_task = isp->smk_inode;
 	return 0;
 }
 
@@ -1175,7 +1256,7 @@
 
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
 	smk_ad_setfield_u_tsk(&ad, p);
-	return smk_curacc(task_security(p), access, &ad);
+	return smk_curacc(smk_of_task(task_security(p)), access, &ad);
 }
 
 /**
@@ -1221,7 +1302,7 @@
  */
 static void smack_task_getsecid(struct task_struct *p, u32 *secid)
 {
-	*secid = smack_to_secid(task_security(p));
+	*secid = smack_to_secid(smk_of_task(task_security(p)));
 }
 
 /**
@@ -1333,14 +1414,15 @@
 	 * can write the receiver.
 	 */
 	if (secid == 0)
-		return smk_curacc(task_security(p), MAY_WRITE, &ad);
+		return smk_curacc(smk_of_task(task_security(p)), MAY_WRITE,
+				  &ad);
 	/*
 	 * If the secid isn't 0 we're dealing with some USB IO
 	 * specific behavior. This is not clean. For one thing
 	 * we can't take privilege into account.
 	 */
-	return smk_access(smack_from_secid(secid), task_security(p),
-			  MAY_WRITE, &ad);
+	return smk_access(smack_from_secid(secid),
+			  smk_of_task(task_security(p)), MAY_WRITE, &ad);
 }
 
 /**
@@ -1352,12 +1434,12 @@
 static int smack_task_wait(struct task_struct *p)
 {
 	struct smk_audit_info ad;
-	char *sp = current_security();
-	char *tsp = task_security(p);
+	char *sp = smk_of_current();
+	char *tsp = smk_of_forked(task_security(p));
 	int rc;
 
 	/* we don't log here, we can be overriden */
-	rc = smk_access(sp, tsp, MAY_WRITE, NULL);
+	rc = smk_access(tsp, sp, MAY_WRITE, NULL);
 	if (rc == 0)
 		goto out_log;
 
@@ -1378,7 +1460,7 @@
  out_log:
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
 	smk_ad_setfield_u_tsk(&ad, p);
-	smack_log(sp, tsp, MAY_WRITE, rc, &ad);
+	smack_log(tsp, sp, MAY_WRITE, rc, &ad);
 	return rc;
 }
 
@@ -1392,7 +1474,7 @@
 static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
 {
 	struct inode_smack *isp = inode->i_security;
-	isp->smk_inode = task_security(p);
+	isp->smk_inode = smk_of_task(task_security(p));
 }
 
 /*
@@ -1411,7 +1493,7 @@
  */
 static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
 {
-	char *csp = current_security();
+	char *csp = smk_of_current();
 	struct socket_smack *ssp;
 
 	ssp = kzalloc(sizeof(struct socket_smack), gfp_flags);
@@ -1667,10 +1749,13 @@
 		ssp->smk_in = sp;
 	else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) {
 		ssp->smk_out = sp;
-		rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
-		if (rc != 0)
-			printk(KERN_WARNING "Smack: \"%s\" netlbl error %d.\n",
-			       __func__, -rc);
+		if (sock->sk->sk_family != PF_UNIX) {
+			rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+			if (rc != 0)
+				printk(KERN_WARNING
+					"Smack: \"%s\" netlbl error %d.\n",
+					__func__, -rc);
+		}
 	} else
 		return -EOPNOTSUPP;
 
@@ -1749,7 +1834,7 @@
  */
 static int smack_msg_msg_alloc_security(struct msg_msg *msg)
 {
-	msg->security = current_security();
+	msg->security = smk_of_current();
 	return 0;
 }
 
@@ -1785,7 +1870,7 @@
 {
 	struct kern_ipc_perm *isp = &shp->shm_perm;
 
-	isp->security = current_security();
+	isp->security = smk_of_current();
 	return 0;
 }
 
@@ -1908,7 +1993,7 @@
 {
 	struct kern_ipc_perm *isp = &sma->sem_perm;
 
-	isp->security = current_security();
+	isp->security = smk_of_current();
 	return 0;
 }
 
@@ -2026,7 +2111,7 @@
 {
 	struct kern_ipc_perm *kisp = &msq->q_perm;
 
-	kisp->security = current_security();
+	kisp->security = smk_of_current();
 	return 0;
 }
 
@@ -2198,9 +2283,11 @@
 	struct super_block *sbp;
 	struct superblock_smack *sbsp;
 	struct inode_smack *isp;
-	char *csp = current_security();
+	char *csp = smk_of_current();
 	char *fetched;
 	char *final;
+	char trattr[TRANS_TRUE_SIZE];
+	int transflag = 0;
 	struct dentry *dp;
 
 	if (inode == NULL)
@@ -2267,9 +2354,10 @@
 		break;
 	case SOCKFS_MAGIC:
 		/*
-		 * Casey says sockets get the smack of the task.
+		 * Socket access is controlled by the socket
+		 * structures associated with the task involved.
 		 */
-		final = csp;
+		final = smack_known_star.smk_known;
 		break;
 	case PROC_SUPER_MAGIC:
 		/*
@@ -2296,7 +2384,16 @@
 		/*
 		 * This isn't an understood special case.
 		 * Get the value from the xattr.
-		 *
+		 */
+
+		/*
+		 * UNIX domain sockets use lower level socket data.
+		 */
+		if (S_ISSOCK(inode->i_mode)) {
+			final = smack_known_star.smk_known;
+			break;
+		}
+		/*
 		 * No xattr support means, alas, no SMACK label.
 		 * Use the aforeapplied default.
 		 * It would be curious if the label of the task
@@ -2308,9 +2405,21 @@
 		 * Get the dentry for xattr.
 		 */
 		dp = dget(opt_dentry);
-		fetched = smk_fetch(inode, dp);
-		if (fetched != NULL)
+		fetched = smk_fetch(XATTR_NAME_SMACK, inode, dp);
+		if (fetched != NULL) {
 			final = fetched;
+			if (S_ISDIR(inode->i_mode)) {
+				trattr[0] = '\0';
+				inode->i_op->getxattr(dp,
+					XATTR_NAME_SMACKTRANSMUTE,
+					trattr, TRANS_TRUE_SIZE);
+				if (strncmp(trattr, TRANS_TRUE,
+					    TRANS_TRUE_SIZE) == 0)
+					transflag = SMK_INODE_TRANSMUTE;
+			}
+		}
+		isp->smk_task = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
+
 		dput(dp);
 		break;
 	}
@@ -2320,7 +2429,7 @@
 	else
 		isp->smk_inode = final;
 
-	isp->smk_flags |= SMK_INODE_INSTANT;
+	isp->smk_flags |= (SMK_INODE_INSTANT | transflag);
 
 unlockandout:
 	mutex_unlock(&isp->smk_lock);
@@ -2345,7 +2454,7 @@
 	if (strcmp(name, "current") != 0)
 		return -EINVAL;
 
-	cp = kstrdup(task_security(p), GFP_KERNEL);
+	cp = kstrdup(smk_of_task(task_security(p)), GFP_KERNEL);
 	if (cp == NULL)
 		return -ENOMEM;
 
@@ -2369,6 +2478,8 @@
 static int smack_setprocattr(struct task_struct *p, char *name,
 			     void *value, size_t size)
 {
+	struct task_smack *tsp;
+	struct task_smack *oldtsp;
 	struct cred *new;
 	char *newsmack;
 
@@ -2398,10 +2509,18 @@
 	if (newsmack == smack_known_web.smk_known)
 		return -EPERM;
 
+	oldtsp = p->cred->security;
 	new = prepare_creds();
 	if (new == NULL)
 		return -ENOMEM;
-	new->security = newsmack;
+	tsp = kzalloc(sizeof(struct task_smack), GFP_KERNEL);
+	if (tsp == NULL) {
+		kfree(new);
+		return -ENOMEM;
+	}
+	tsp->smk_task = newsmack;
+	tsp->smk_forked = oldtsp->smk_forked;
+	new->security = tsp;
 	commit_creds(new);
 	return size;
 }
@@ -2418,14 +2537,18 @@
 static int smack_unix_stream_connect(struct sock *sock,
 				     struct sock *other, struct sock *newsk)
 {
-	struct inode *sp = SOCK_INODE(sock->sk_socket);
-	struct inode *op = SOCK_INODE(other->sk_socket);
+	struct socket_smack *ssp = sock->sk_security;
+	struct socket_smack *osp = other->sk_security;
 	struct smk_audit_info ad;
+	int rc = 0;
 
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
 	smk_ad_setfield_u_net_sk(&ad, other);
-	return smk_access(smk_of_inode(sp), smk_of_inode(op),
-				 MAY_READWRITE, &ad);
+
+	if (!capable(CAP_MAC_OVERRIDE))
+		rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+
+	return rc;
 }
 
 /**
@@ -2438,13 +2561,18 @@
  */
 static int smack_unix_may_send(struct socket *sock, struct socket *other)
 {
-	struct inode *sp = SOCK_INODE(sock);
-	struct inode *op = SOCK_INODE(other);
+	struct socket_smack *ssp = sock->sk->sk_security;
+	struct socket_smack *osp = other->sk->sk_security;
 	struct smk_audit_info ad;
+	int rc = 0;
 
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
 	smk_ad_setfield_u_net_sk(&ad, other->sk);
-	return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_WRITE, &ad);
+
+	if (!capable(CAP_MAC_OVERRIDE))
+		rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+
+	return rc;
 }
 
 /**
@@ -2629,7 +2757,7 @@
 
 /**
  * smack_socket_getpeersec_dgram - pull in packet label
- * @sock: the socket
+ * @sock: the peer socket
  * @skb: packet data
  * @secid: pointer to where to put the secid of the packet
  *
@@ -2640,41 +2768,39 @@
 
 {
 	struct netlbl_lsm_secattr secattr;
-	struct sock *sk;
+	struct socket_smack *sp;
 	char smack[SMK_LABELLEN];
-	int family = PF_INET;
-	u32 s;
+	int family = PF_UNSPEC;
+	u32 s = 0;	/* 0 is the invalid secid */
 	int rc;
 
-	/*
-	 * Only works for families with packets.
-	 */
-	if (sock != NULL) {
-		sk = sock->sk;
-		if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
-			return 0;
-		family = sk->sk_family;
+	if (skb != NULL) {
+		if (skb->protocol == htons(ETH_P_IP))
+			family = PF_INET;
+		else if (skb->protocol == htons(ETH_P_IPV6))
+			family = PF_INET6;
 	}
-	/*
-	 * Translate what netlabel gave us.
-	 */
-	netlbl_secattr_init(&secattr);
-	rc = netlbl_skbuff_getattr(skb, family, &secattr);
-	if (rc == 0)
-		smack_from_secattr(&secattr, smack);
-	netlbl_secattr_destroy(&secattr);
+	if (family == PF_UNSPEC && sock != NULL)
+		family = sock->sk->sk_family;
 
-	/*
-	 * Give up if we couldn't get anything
-	 */
-	if (rc != 0)
-		return rc;
-
-	s = smack_to_secid(smack);
+	if (family == PF_UNIX) {
+		sp = sock->sk->sk_security;
+		s = smack_to_secid(sp->smk_out);
+	} else if (family == PF_INET || family == PF_INET6) {
+		/*
+		 * Translate what netlabel gave us.
+		 */
+		netlbl_secattr_init(&secattr);
+		rc = netlbl_skbuff_getattr(skb, family, &secattr);
+		if (rc == 0) {
+			smack_from_secattr(&secattr, smack);
+			s = smack_to_secid(smack);
+		}
+		netlbl_secattr_destroy(&secattr);
+	}
+	*secid = s;
 	if (s == 0)
 		return -EINVAL;
-
-	*secid = s;
 	return 0;
 }
 
@@ -2695,7 +2821,7 @@
 		return;
 
 	ssp = sk->sk_security;
-	ssp->smk_in = ssp->smk_out = current_security();
+	ssp->smk_in = ssp->smk_out = smk_of_current();
 	/* cssp->smk_packet is already set in smack_inet_csk_clone() */
 }
 
@@ -2816,7 +2942,7 @@
 static int smack_key_alloc(struct key *key, const struct cred *cred,
 			   unsigned long flags)
 {
-	key->security = cred->security;
+	key->security = smk_of_task(cred->security);
 	return 0;
 }
 
@@ -2845,6 +2971,7 @@
 {
 	struct key *keyp;
 	struct smk_audit_info ad;
+	char *tsp = smk_of_task(cred->security);
 
 	keyp = key_ref_to_ptr(key_ref);
 	if (keyp == NULL)
@@ -2858,14 +2985,14 @@
 	/*
 	 * This should not occur
 	 */
-	if (cred->security == NULL)
+	if (tsp == NULL)
 		return -EACCES;
 #ifdef CONFIG_AUDIT
 	smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
 	ad.a.u.key_struct.key = keyp->serial;
 	ad.a.u.key_struct.key_desc = keyp->description;
 #endif
-	return smk_access(cred->security, keyp->security,
+	return smk_access(tsp, keyp->security,
 				 MAY_READWRITE, &ad);
 }
 #endif /* CONFIG_KEYS */
@@ -3067,6 +3194,8 @@
 	.sb_mount = 			smack_sb_mount,
 	.sb_umount = 			smack_sb_umount,
 
+	.bprm_set_creds =		smack_bprm_set_creds,
+
 	.inode_alloc_security = 	smack_inode_alloc_security,
 	.inode_free_security = 		smack_inode_free_security,
 	.inode_init_security = 		smack_inode_init_security,
@@ -3203,9 +3332,16 @@
 static __init int smack_init(void)
 {
 	struct cred *cred;
+	struct task_smack *tsp;
 
-	if (!security_module_enable(&smack_ops))
+	tsp = kzalloc(sizeof(struct task_smack), GFP_KERNEL);
+	if (tsp == NULL)
+		return -ENOMEM;
+
+	if (!security_module_enable(&smack_ops)) {
+		kfree(tsp);
 		return 0;
+	}
 
 	printk(KERN_INFO "Smack:  Initializing.\n");
 
@@ -3213,7 +3349,9 @@
 	 * Set the security state for the initial task.
 	 */
 	cred = (struct cred *) current->cred;
-	cred->security = &smack_known_floor.smk_known;
+	tsp->smk_forked = smack_known_floor.smk_known;
+	tsp->smk_task = smack_known_floor.smk_known;
+	cred->security = tsp;
 
 	/* initialize the smack_know_list */
 	init_smack_know_list();
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index dc1fd62..362d5ed 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -109,9 +109,12 @@
  * SMK_ACCESSLEN: Maximum length for a rule access field
  * SMK_LOADLEN: Smack rule length
  */
-#define SMK_ACCESS    "rwxa"
-#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
-#define SMK_LOADLEN   (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
+#define SMK_OACCESS	"rwxa"
+#define SMK_ACCESS	"rwxat"
+#define SMK_OACCESSLEN	(sizeof(SMK_OACCESS) - 1)
+#define SMK_ACCESSLEN	(sizeof(SMK_ACCESS) - 1)
+#define SMK_OLOADLEN	(SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
+#define SMK_LOADLEN	(SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
 
 /**
  * smk_netlabel_audit_set - fill a netlbl_audit struct
@@ -121,7 +124,7 @@
 {
 	nap->loginuid = audit_get_loginuid(current);
 	nap->sessionid = audit_get_sessionid(current);
-	nap->secid = smack_to_secid(current_security());
+	nap->secid = smack_to_secid(smk_of_current());
 }
 
 /*
@@ -175,6 +178,8 @@
 		seq_putc(s, 'x');
 	if (srp->smk_access & MAY_APPEND)
 		seq_putc(s, 'a');
+	if (srp->smk_access & MAY_TRANSMUTE)
+		seq_putc(s, 't');
 	if (srp->smk_access == 0)
 		seq_putc(s, '-');
 
@@ -273,10 +278,15 @@
 	if (!capable(CAP_MAC_ADMIN))
 		return -EPERM;
 
-	if (*ppos != 0 || count != SMK_LOADLEN)
+	if (*ppos != 0)
+		return -EINVAL;
+	/*
+	 * Minor hack for backward compatability
+	 */
+	if (count < (SMK_OLOADLEN) || count > SMK_LOADLEN)
 		return -EINVAL;
 
-	data = kzalloc(count, GFP_KERNEL);
+	data = kzalloc(SMK_LOADLEN, GFP_KERNEL);
 	if (data == NULL)
 		return -ENOMEM;
 
@@ -285,6 +295,12 @@
 		goto out;
 	}
 
+	/*
+	 * More on the minor hack for backward compatability
+	 */
+	if (count == (SMK_OLOADLEN))
+		data[SMK_OLOADLEN] = '-';
+
 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
 	if (rule == NULL) {
 		rc = -ENOMEM;
@@ -345,6 +361,17 @@
 		goto out_free_rule;
 	}
 
+	switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
+	case '-':
+		break;
+	case 't':
+	case 'T':
+		rule->smk_access |= MAY_TRANSMUTE;
+		break;
+	default:
+		goto out_free_rule;
+	}
+
 	rc = smk_set_access(rule);
 
 	if (!rc)
@@ -1160,7 +1187,7 @@
 				 size_t count, loff_t *ppos)
 {
 	char in[SMK_LABELLEN];
-	char *sp = current->cred->security;
+	char *sp = smk_of_task(current->cred->security);
 
 	if (!capable(CAP_MAC_ADMIN))
 		return -EPERM;
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 1d0bf8f..d1e05b0 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <net/sock.h>
 #include "common.h"
+#include "../../fs/internal.h"
 
 /**
  * tomoyo_encode: Convert binary string to ascii string.
diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c
index a351dd0..2b50cbe 100644
--- a/sound/ac97_bus.c
+++ b/sound/ac97_bus.c
@@ -19,8 +19,8 @@
 
 /*
  * Let drivers decide whether they want to support given codec from their
- * probe method.  Drivers have direct access to the struct snd_ac97 structure and may
- * decide based on the id field amongst other things.
+ * probe method. Drivers have direct access to the struct snd_ac97
+ * structure and may  decide based on the id field amongst other things.
  */
 static int ac97_bus_match(struct device *dev, struct device_driver *drv)
 {
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 91852e4..3687a6c 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -1114,7 +1114,6 @@
 	of_node_put(onyx->codec.node);
 	if (onyx->codec_info)
 		kfree(onyx->codec_info);
-	i2c_set_clientdata(client, onyx);
 	kfree(onyx);
 	return 0;
 }
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index de8e03a..faa3174 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -287,10 +287,9 @@
 		free_irq(linein_detect_irq, &rt->line_in_notify);
 	if (rt->line_out_notify.gpio_private)
 		free_irq(lineout_detect_irq, &rt->line_out_notify);
-	cancel_delayed_work(&rt->headphone_notify.work);
-	cancel_delayed_work(&rt->line_in_notify.work);
-	cancel_delayed_work(&rt->line_out_notify.work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&rt->headphone_notify.work);
+	cancel_delayed_work_sync(&rt->line_in_notify.work);
+	cancel_delayed_work_sync(&rt->line_out_notify.work);
 	mutex_destroy(&rt->headphone_notify.mutex);
 	mutex_destroy(&rt->line_in_notify.mutex);
 	mutex_destroy(&rt->line_out_notify.mutex);
diff --git a/sound/aoa/core/gpio-pmf.c b/sound/aoa/core/gpio-pmf.c
index 7e267c9..c8d8a1a 100644
--- a/sound/aoa/core/gpio-pmf.c
+++ b/sound/aoa/core/gpio-pmf.c
@@ -107,10 +107,9 @@
 
 	/* make sure no work is pending before freeing
 	 * all things */
-	cancel_delayed_work(&rt->headphone_notify.work);
-	cancel_delayed_work(&rt->line_in_notify.work);
-	cancel_delayed_work(&rt->line_out_notify.work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&rt->headphone_notify.work);
+	cancel_delayed_work_sync(&rt->line_in_notify.work);
+	cancel_delayed_work_sync(&rt->line_out_notify.work);
 
 	mutex_destroy(&rt->headphone_notify.mutex);
 	mutex_destroy(&rt->line_in_notify.mutex);
diff --git a/sound/core/control.c b/sound/core/control.c
index 45a8180..9ce00ed 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1488,7 +1488,7 @@
 }
 
 /*
- * Frequently used control callbacks
+ * Frequently used control callbacks/helpers
  */
 int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_info *uinfo)
@@ -1513,3 +1513,29 @@
 }
 
 EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
+
+/**
+ * snd_ctl_enum_info - fills the info structure for an enumerated control
+ * @info: the structure to be filled
+ * @channels: the number of the control's channels; often one
+ * @items: the number of control values; also the size of @names
+ * @names: an array containing the names of all control values
+ *
+ * Sets all required fields in @info to their appropriate values.
+ * If the control's accessibility is not the default (readable and writable),
+ * the caller has to fill @info->access.
+ */
+int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
+		      unsigned int items, const char *const names[])
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	info->count = channels;
+	info->value.enumerated.items = items;
+	if (info->value.enumerated.item >= items)
+		info->value.enumerated.item = items - 1;
+	strlcpy(info->value.enumerated.name,
+		names[info->value.enumerated.item],
+		sizeof(info->value.enumerated.name));
+	return 0;
+}
+EXPORT_SYMBOL(snd_ctl_enum_info);
diff --git a/sound/core/init.c b/sound/core/init.c
index 57b792e2..3e65da2 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -642,7 +642,7 @@
  *  external accesses.  Thus, you should call this function at the end
  *  of the initialization of the card.
  *
- *  Returns zero otherwise a negative error code if the registrain failed.
+ *  Returns zero otherwise a negative error code if the registration failed.
  */
 int snd_card_register(struct snd_card *card)
 {
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index b753ec6..a2e4eb3 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -453,8 +453,10 @@
 	} else {
 		*params = *save;
 		max = snd_pcm_hw_param_max(pcm, params, var, max, &maxdir);
-		if (max < 0)
+		if (max < 0) {
+			kfree(save);
 			return max;
+		}
 		last = 1;
 	}
  _end:
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 11446a1..a82e3756 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -373,6 +373,27 @@
 			   (unsigned long)new_hw_ptr,
 			   (unsigned long)runtime->hw_ptr_base);
 	}
+
+	if (runtime->no_period_wakeup) {
+		/*
+		 * Without regular period interrupts, we have to check
+		 * the elapsed time to detect xruns.
+		 */
+		jdelta = jiffies - runtime->hw_ptr_jiffies;
+		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
+			goto no_delta_check;
+		hdelta = jdelta - delta * HZ / runtime->rate;
+		while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) {
+			delta += runtime->buffer_size;
+			hw_base += runtime->buffer_size;
+			if (hw_base >= runtime->boundary)
+				hw_base = 0;
+			new_hw_ptr = hw_base + pos;
+			hdelta -= runtime->hw_ptr_buffer_jiffies;
+		}
+		goto no_delta_check;
+	}
+
 	/* something must be really wrong */
 	if (delta >= runtime->buffer_size + runtime->period_size) {
 		hw_ptr_error(substream,
@@ -442,6 +463,7 @@
 			     (long)old_hw_ptr);
 	}
 
+ no_delta_check:
 	if (runtime->status->hw_ptr == new_hw_ptr)
 		return 0;
 
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index e82c1f9..4be45e7 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -422,6 +422,9 @@
 	runtime->info = params->info;
 	runtime->rate_num = params->rate_num;
 	runtime->rate_den = params->rate_den;
+	runtime->no_period_wakeup =
+			(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
+			(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
 
 	bits = snd_pcm_format_physical_width(runtime->format);
 	runtime->sample_bits = bits;
@@ -984,7 +987,7 @@
 	if (push)
 		snd_pcm_update_hw_ptr(substream);
 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
-	 * a delta betwen the current jiffies, this gives a large enough
+	 * a delta between the current jiffies, this gives a large enough
 	 * delta, effectively to skip the check once.
 	 */
 	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c
index bf09a5a..119fddb6 100644
--- a/sound/core/seq/seq.c
+++ b/sound/core/seq/seq.c
@@ -32,6 +32,7 @@
 #include "seq_timer.h"
 #include "seq_system.h"
 #include "seq_info.h"
+#include <sound/minors.h>
 #include <sound/seq_device.h>
 
 #if defined(CONFIG_SND_SEQ_DUMMY_MODULE)
@@ -73,6 +74,9 @@
 module_param(seq_default_timer_resolution, int, 0644);
 MODULE_PARM_DESC(seq_default_timer_resolution, "The default timer resolution in Hz.");
 
+MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_SEQUENCER);
+MODULE_ALIAS("devname:snd/seq");
+
 /*
  *  INIT PART
  */
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 66691fe..1c7a3ef 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -188,14 +188,22 @@
 };
 
 #ifdef CONFIG_SND_DYNAMIC_MINORS
-static int snd_find_free_minor(void)
+static int snd_find_free_minor(int type)
 {
 	int minor;
 
+	/* static minors for module auto loading */
+	if (type == SNDRV_DEVICE_TYPE_SEQUENCER)
+		return SNDRV_MINOR_SEQUENCER;
+	if (type == SNDRV_DEVICE_TYPE_TIMER)
+		return SNDRV_MINOR_TIMER;
+
 	for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor) {
-		/* skip minors still used statically for autoloading devices */
-		if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL ||
-		    minor == SNDRV_MINOR_SEQUENCER)
+		/* skip static minors still used for module auto loading */
+		if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL)
+			continue;
+		if (minor == SNDRV_MINOR_SEQUENCER ||
+		    minor == SNDRV_MINOR_TIMER)
 			continue;
 		if (!snd_minors[minor])
 			return minor;
@@ -269,7 +277,7 @@
 	preg->private_data = private_data;
 	mutex_lock(&sound_mutex);
 #ifdef CONFIG_SND_DYNAMIC_MINORS
-	minor = snd_find_free_minor();
+	minor = snd_find_free_minor(type);
 #else
 	minor = snd_kernel_minor(type, card, dev);
 	if (minor >= 0 && snd_minors[minor])
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 13afb60..ed01632 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -34,8 +34,8 @@
 #include <sound/initval.h>
 #include <linux/kmod.h>
 
-#if defined(CONFIG_SND_HPET) || defined(CONFIG_SND_HPET_MODULE)
-#define DEFAULT_TIMER_LIMIT 3
+#if defined(CONFIG_SND_HRTIMER) || defined(CONFIG_SND_HRTIMER_MODULE)
+#define DEFAULT_TIMER_LIMIT 4
 #elif defined(CONFIG_SND_RTCTIMER) || defined(CONFIG_SND_RTCTIMER_MODULE)
 #define DEFAULT_TIMER_LIMIT 2
 #else
@@ -52,6 +52,9 @@
 module_param(timer_tstamp_monotonic, int, 0444);
 MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
 
+MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
+MODULE_ALIAS("devname:snd/timer");
+
 struct snd_timer_user {
 	struct snd_timer_instance *timeri;
 	int tread;		/* enhanced read with timestamps and events */
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index a1282c1..5cfcb90 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1143,8 +1143,8 @@
 					     (resource->start) + 1);
 	if (ml403_ac97cr->port == NULL) {
 		snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": "
-			   "unable to remap memory region (%x to %x)\n",
-			   resource->start, resource->end);
+			   "unable to remap memory region (%pR)\n",
+			   resource);
 		snd_ml403_ac97cr_free(ml403_ac97cr);
 		return -EBUSY;
 	}
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 971a84a..c424d32 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -57,8 +57,7 @@
 {
 	chip->init = 1;	/* don't schedule new work */
 	mb();
-	cancel_delayed_work(&chip->work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&chip->work);
 	kfree(chip);
 }
 
@@ -141,7 +140,7 @@
 {
 	chip->init = 1;
 	mb();
-	flush_scheduled_work();
+	flush_delayed_work_sync(&chip->work);
 	ak4113_init_regs(chip);
 	/* bring up statistics / event queing */
 	chip->init = 0;
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 0341451..d9fb537 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -67,8 +67,7 @@
 {
 	chip->init = 1;	/* don't schedule new work */
 	mb();
-	cancel_delayed_work(&chip->work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&chip->work);
 	kfree(chip);
 }
 
@@ -154,7 +153,7 @@
 {
 	chip->init = 1;
 	mb();
-	flush_scheduled_work();
+	flush_delayed_work_sync(&chip->work);
 	ak4114_init_regs(chip);
 	/* bring up statistics / event queing */
 	chip->init = 0;
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 265abcc..9b915e2 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -264,7 +264,7 @@
 		snd_printd("OPL3-SA [0x%lx] detect (1) = 0x%x (0x%x)\n", port, tmp, tmp1);
 		return -ENODEV;
 	}
-	/* try if the MIC register is accesible */
+	/* try if the MIC register is accessible */
 	tmp = snd_opl3sa2_read(chip, OPL3SA2_MIC);
 	snd_opl3sa2_write(chip, OPL3SA2_MIC, 0x8a);
 	if (((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MIC)) & 0x9f) != 0x8a) {
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 12e3465..9823d59 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -209,7 +209,7 @@
         tristate
 
 config SND_OXYGEN
-	tristate "C-Media 8788 (Oxygen)"
+	tristate "C-Media 8786, 8787, 8788 (Oxygen)"
 	select SND_OXYGEN_LIB
 	select SND_PCM
 	select SND_MPU401_UART
@@ -217,13 +217,18 @@
 	  Say Y here to include support for sound cards based on the
 	  C-Media CMI8788 (Oxygen HD Audio) chip:
 	   * Asound A-8788
+	   * Asus Xonar DG
 	   * AuzenTech X-Meridian
+	   * AuzenTech X-Meridian 2G
 	   * Bgears b-Enspirer
 	   * Club3D Theatron DTS
 	   * HT-Omega Claro (plus)
 	   * HT-Omega Claro halo (XT)
+	   * Kuroutoshikou CMI8787-HG2PCI
 	   * Razer Barracuda AC-1
 	   * Sondigo Inferno
+	   * TempoTec/MediaTek HiFier Fantasia
+	   * TempoTec/MediaTek HiFier Serenade
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-oxygen.
@@ -578,18 +583,6 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-hdspm.
 
-config SND_HIFIER
-	tristate "TempoTec HiFier Fantasia"
-	select SND_OXYGEN_LIB
-	select SND_PCM
-	select SND_MPU401_UART
-	help
-	  Say Y here to include support for the MediaTek/TempoTec HiFier
-	  Fantasia sound card.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called snd-hifier.
-
 config SND_ICE1712
 	tristate "ICEnsemble ICE1712 (Envy24)"
 	select SND_MPU401_UART
@@ -826,8 +819,8 @@
 	  Say Y here to include support for sound cards based on the
 	  Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
 	  Essence ST (Deluxe), and Essence STX.
-	  Support for the HDAV1.3 (Deluxe) is incomplete; for the
-	  HDAV1.3 Slim and Xense, missing.
+	  Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
+	  for the Xense, missing.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-virtuoso.
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index a7630e9..0fc614c 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1014,8 +1014,7 @@
 {
 	if (ac97) {
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-		cancel_delayed_work(&ac97->power_work);
-		flush_scheduled_work();
+		cancel_delayed_work_sync(&ac97->power_work);
 #endif
 		snd_ac97_proc_done(ac97);
 		if (ac97->bus)
@@ -2456,8 +2455,7 @@
 	if (ac97->build_ops->suspend)
 		ac97->build_ops->suspend(ac97);
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-	cancel_delayed_work(&ac97->power_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&ac97->power_work);
 #endif
 	snd_ac97_powerdown(ac97);
 }
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 2f3cacb..6117595 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1,6 +1,6 @@
 /*
  *  azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
- *  Copyright (C) 2002, 2005 - 2009 by Andreas Mohr <andi AT lisas.de>
+ *  Copyright (C) 2002, 2005 - 2010 by Andreas Mohr <andi AT lisas.de>
  *
  *  Framework borrowed from Bart Hartgers's als4000.c.
  *  Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801),
@@ -175,6 +175,7 @@
 
 #include <asm/io.h>
 #include <linux/init.h>
+#include <linux/bug.h> /* WARN_ONCE */
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -201,14 +202,15 @@
 
 /* === Debug settings ===
   Further diagnostic functionality than the settings below
-  does not need to be provided, since one can easily write a bash script
+  does not need to be provided, since one can easily write a POSIX shell script
   to dump the card's I/O ports (those listed in lspci -v -v):
-  function dump()
+  dump()
   {
     local descr=$1; local addr=$2; local count=$3
 
     echo "${descr}: ${count} @ ${addr}:"
-    dd if=/dev/port skip=$[${addr}] count=${count} bs=1 2>/dev/null| hexdump -C
+    dd if=/dev/port skip=`printf %d ${addr}` count=${count} bs=1 \
+      2>/dev/null| hexdump -C
   }
   and then use something like
   "dump joy200 0x200 8", "dump mpu388 0x388 4", "dump joy 0xb400 8",
@@ -216,14 +218,14 @@
   possibly within a "while true; do ... sleep 1; done" loop.
   Tweaking ports could be done using
   VALSTRING="`printf "%02x" $value`"
-  printf "\x""$VALSTRING"|dd of=/dev/port seek=$[${addr}] bs=1 2>/dev/null
+  printf "\x""$VALSTRING"|dd of=/dev/port seek=`printf %d ${addr}` bs=1 \
+    2>/dev/null
 */
 
 #define DEBUG_MISC	0
 #define DEBUG_CALLS	0
 #define DEBUG_MIXER	0
 #define DEBUG_CODEC	0
-#define DEBUG_IO	0
 #define DEBUG_TIMER	0
 #define DEBUG_GAME	0
 #define DEBUG_PM	0
@@ -291,19 +293,23 @@
 module_param(seqtimer_scaling, int, 0444);
 MODULE_PARM_DESC(seqtimer_scaling, "Set 1024000Hz sequencer timer scale factor (lockup danger!). Default 128.");
 
-struct snd_azf3328_codec_data {
-	unsigned long io_base;
-	struct snd_pcm_substream *substream;
-	bool running;
-	const char *name;
-};
-
 enum snd_azf3328_codec_type {
+  /* warning: fixed indices (also used for bitmask checks!) */
   AZF_CODEC_PLAYBACK = 0,
   AZF_CODEC_CAPTURE = 1,
   AZF_CODEC_I2S_OUT = 2,
 };
 
+struct snd_azf3328_codec_data {
+	unsigned long io_base; /* keep first! (avoid offset calc) */
+	unsigned int dma_base; /* helper to avoid an indirection in hotpath */
+	spinlock_t *lock; /* TODO: convert to our own per-codec lock member */
+	struct snd_pcm_substream *substream;
+	bool running;
+	enum snd_azf3328_codec_type type;
+	const char *name;
+};
+
 struct snd_azf3328 {
 	/* often-used fields towards beginning, then grouped */
 
@@ -362,6 +368,9 @@
 static int
 snd_azf3328_io_reg_setb(unsigned reg, u8 mask, bool do_set)
 {
+	/* Well, strictly spoken, the inb/outb sequence isn't atomic
+	   and would need locking. However we currently don't care
+	   since it potentially complicates matters. */
 	u8 prev = inb(reg), new;
 
 	new = (do_set) ? (prev|mask) : (prev & ~mask);
@@ -413,6 +422,21 @@
 	outl(value, codec->io_base + reg);
 }
 
+static inline void
+snd_azf3328_codec_outl_multi(const struct snd_azf3328_codec_data *codec,
+			     unsigned reg, const void *buffer, int count
+)
+{
+	unsigned long addr = codec->io_base + reg;
+	if (count) {
+		const u32 *buf = buffer;
+		do {
+			outl(*buf++, addr);
+			addr += 4;
+		} while (--count);
+	}
+}
+
 static inline u32
 snd_azf3328_codec_inl(const struct snd_azf3328_codec_data *codec, unsigned reg)
 {
@@ -943,38 +967,43 @@
 }
 
 static void
-snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
-			       enum snd_azf3328_codec_type codec_type,
+snd_azf3328_codec_setfmt(struct snd_azf3328_codec_data *codec,
 			       enum azf_freq_t bitrate,
 			       unsigned int format_width,
 			       unsigned int channels
 )
 {
 	unsigned long flags;
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	u16 val = 0xff00;
+	u8 freq = 0;
 
 	snd_azf3328_dbgcallenter();
 	switch (bitrate) {
-	case AZF_FREQ_4000:  val |= SOUNDFORMAT_FREQ_SUSPECTED_4000; break;
-	case AZF_FREQ_4800:  val |= SOUNDFORMAT_FREQ_SUSPECTED_4800; break;
-	case AZF_FREQ_5512:
-		/* the AZF3328 names it "5510" for some strange reason */
-			     val |= SOUNDFORMAT_FREQ_5510; break;
-	case AZF_FREQ_6620:  val |= SOUNDFORMAT_FREQ_6620; break;
-	case AZF_FREQ_8000:  val |= SOUNDFORMAT_FREQ_8000; break;
-	case AZF_FREQ_9600:  val |= SOUNDFORMAT_FREQ_9600; break;
-	case AZF_FREQ_11025: val |= SOUNDFORMAT_FREQ_11025; break;
-	case AZF_FREQ_13240: val |= SOUNDFORMAT_FREQ_SUSPECTED_13240; break;
-	case AZF_FREQ_16000: val |= SOUNDFORMAT_FREQ_16000; break;
-	case AZF_FREQ_22050: val |= SOUNDFORMAT_FREQ_22050; break;
-	case AZF_FREQ_32000: val |= SOUNDFORMAT_FREQ_32000; break;
+#define AZF_FMT_XLATE(in_freq, out_bits) \
+	do { \
+		case AZF_FREQ_ ## in_freq: \
+			freq = SOUNDFORMAT_FREQ_ ## out_bits; \
+			break; \
+	} while (0);
+	AZF_FMT_XLATE(4000, SUSPECTED_4000)
+	AZF_FMT_XLATE(4800, SUSPECTED_4800)
+	/* the AZF3328 names it "5510" for some strange reason: */
+	AZF_FMT_XLATE(5512, 5510)
+	AZF_FMT_XLATE(6620, 6620)
+	AZF_FMT_XLATE(8000, 8000)
+	AZF_FMT_XLATE(9600, 9600)
+	AZF_FMT_XLATE(11025, 11025)
+	AZF_FMT_XLATE(13240, SUSPECTED_13240)
+	AZF_FMT_XLATE(16000, 16000)
+	AZF_FMT_XLATE(22050, 22050)
+	AZF_FMT_XLATE(32000, 32000)
 	default:
 		snd_printk(KERN_WARNING "unknown bitrate %d, assuming 44.1kHz!\n", bitrate);
 		/* fall-through */
-	case AZF_FREQ_44100: val |= SOUNDFORMAT_FREQ_44100; break;
-	case AZF_FREQ_48000: val |= SOUNDFORMAT_FREQ_48000; break;
-	case AZF_FREQ_66200: val |= SOUNDFORMAT_FREQ_SUSPECTED_66200; break;
+	AZF_FMT_XLATE(44100, 44100)
+	AZF_FMT_XLATE(48000, 48000)
+	AZF_FMT_XLATE(66200, SUSPECTED_66200)
+#undef AZF_FMT_XLATE
 	}
 	/* val = 0xff07; 3m27.993s (65301Hz; -> 64000Hz???) hmm, 66120, 65967, 66123 */
 	/* val = 0xff09; 17m15.098s (13123,478Hz; -> 12000Hz???) hmm, 13237.2Hz? */
@@ -986,13 +1015,15 @@
 	/* val = 0xff0d; 41m23.135s (5523,600Hz; -> 5512Hz???) */
 	/* val = 0xff0e; 28m30.777s (8017Hz; -> 8000Hz???) */
 
+	val |= freq;
+
 	if (channels == 2)
 		val |= SOUNDFORMAT_FLAG_2CHANNELS;
 
 	if (format_width == 16)
 		val |= SOUNDFORMAT_FLAG_16BIT;
 
-	spin_lock_irqsave(&chip->reg_lock, flags);
+	spin_lock_irqsave(codec->lock, flags);
 
 	/* set bitrate/format */
 	snd_azf3328_codec_outw(codec, IDX_IO_CODEC_SOUNDFORMAT, val);
@@ -1004,7 +1035,8 @@
 	 * (FIXME: yes, it works, but what exactly am I doing here?? :)
 	 * FIXME: does this have some side effects for full-duplex
 	 * or other dramatic side effects? */
-	if (codec_type == AZF_CODEC_PLAYBACK) /* only do it for playback */
+	/* do it for non-capture codecs only */
+	if (codec->type != AZF_CODEC_CAPTURE)
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
 			snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS) |
 			DMA_RUN_SOMETHING1 |
@@ -1014,20 +1046,19 @@
 			DMA_SOMETHING_ELSE
 		);
 
-	spin_unlock_irqrestore(&chip->reg_lock, flags);
+	spin_unlock_irqrestore(codec->lock, flags);
 	snd_azf3328_dbgcallleave();
 }
 
 static inline void
-snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328 *chip,
-			    enum snd_azf3328_codec_type codec_type
+snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328_codec_data *codec
 )
 {
 	/* choose lowest frequency for low power consumption.
 	 * While this will cause louder noise due to rather coarse frequency,
 	 * it should never matter since output should always
 	 * get disabled properly when idle anyway. */
-	snd_azf3328_codec_setfmt(chip, codec_type, AZF_FREQ_4000, 8, 1);
+	snd_azf3328_codec_setfmt(codec, AZF_FREQ_4000, 8, 1);
 }
 
 static void
@@ -1101,69 +1132,87 @@
 		/* ...and adjust clock, too
 		 * (reduce noise and power consumption) */
 		if (!enable)
-			snd_azf3328_codec_setfmt_lowpower(
-				chip,
-				codec_type
-			);
+			snd_azf3328_codec_setfmt_lowpower(codec);
 		codec->running = enable;
 	}
 }
 
 static void
-snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip,
-				enum snd_azf3328_codec_type codec_type,
+snd_azf3328_codec_setdmaa(struct snd_azf3328_codec_data *codec,
 				unsigned long addr,
-				unsigned int count,
-				unsigned int size
+				unsigned int period_bytes,
+				unsigned int buffer_bytes
 )
 {
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	snd_azf3328_dbgcallenter();
+	WARN_ONCE(period_bytes & 1, "odd period length!?\n");
+	WARN_ONCE(buffer_bytes != 2 * period_bytes,
+		 "missed our input expectations! %u vs. %u\n",
+		 buffer_bytes, period_bytes);
 	if (!codec->running) {
 		/* AZF3328 uses a two buffer pointer DMA transfer approach */
 
-		unsigned long flags, addr_area2;
+		unsigned long flags;
 
 		/* width 32bit (prevent overflow): */
-		u32 count_areas, lengths;
+		u32 area_length;
+		struct codec_setup_io {
+			u32 dma_start_1;
+			u32 dma_start_2;
+			u32 dma_lengths;
+		} __attribute__((packed)) setup_io;
 
-		count_areas = size/2;
-		addr_area2 = addr+count_areas;
-		snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n",
-				addr, count_areas, addr_area2, count_areas);
+		area_length = buffer_bytes/2;
 
-		count_areas--; /* max. index */
+		setup_io.dma_start_1 = addr;
+		setup_io.dma_start_2 = addr+area_length;
+
+		snd_azf3328_dbgcodec(
+			"setdma: buffers %08x[%u] / %08x[%u], %u, %u\n",
+				setup_io.dma_start_1, area_length,
+				setup_io.dma_start_2, area_length,
+				period_bytes, buffer_bytes);
+
+		/* Hmm, are we really supposed to decrement this by 1??
+		   Most definitely certainly not: configuring full length does
+		   work properly (i.e. likely better), and BTW we
+		   violated possibly differing frame sizes with this...
+
+		area_length--; |* max. index *|
+		*/
 
 		/* build combined I/O buffer length word */
-		lengths = (count_areas << 16) | (count_areas);
-		spin_lock_irqsave(&chip->reg_lock, flags);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_1, addr);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_2,
-								addr_area2);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_LENGTHS,
-								lengths);
-		spin_unlock_irqrestore(&chip->reg_lock, flags);
+		setup_io.dma_lengths = (area_length << 16) | (area_length);
+
+		spin_lock_irqsave(codec->lock, flags);
+		snd_azf3328_codec_outl_multi(
+			codec, IDX_IO_CODEC_DMA_START_1, &setup_io, 3
+		);
+		spin_unlock_irqrestore(codec->lock, flags);
 	}
 	snd_azf3328_dbgcallleave();
 }
 
 static int
-snd_azf3328_codec_prepare(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_prepare(struct snd_pcm_substream *substream)
 {
-#if 0
-	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = runtime->private_data;
+#if 0
         unsigned int size = snd_pcm_lib_buffer_bytes(substream);
 	unsigned int count = snd_pcm_lib_period_bytes(substream);
 #endif
 
 	snd_azf3328_dbgcallenter();
+
+	codec->dma_base = runtime->dma_addr;
+
 #if 0
-	snd_azf3328_codec_setfmt(chip, AZF_CODEC_...,
+	snd_azf3328_codec_setfmt(codec,
 		runtime->rate,
 		snd_pcm_format_width(runtime->format),
 		runtime->channels);
-	snd_azf3328_codec_setdmaa(chip, AZF_CODEC_...,
+	snd_azf3328_codec_setdmaa(codec,
 					runtime->dma_addr, count, size);
 #endif
 	snd_azf3328_dbgcallleave();
@@ -1171,24 +1220,23 @@
 }
 
 static int
-snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type,
-			struct snd_pcm_substream *substream, int cmd)
+snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
 	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = runtime->private_data;
 	int result = 0;
 	u16 flags1;
 	bool previously_muted = 0;
-	bool is_playback_codec = (AZF_CODEC_PLAYBACK == codec_type);
+	bool is_main_mixer_playback_codec = (AZF_CODEC_PLAYBACK == codec->type);
 
-	snd_azf3328_dbgcalls("snd_azf3328_codec_trigger cmd %d\n", cmd);
+	snd_azf3328_dbgcalls("snd_azf3328_pcm_trigger cmd %d\n", cmd);
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 		snd_azf3328_dbgcodec("START %s\n", codec->name);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* mute WaveOut (avoid clicking during setup) */
 			previously_muted =
 				snd_azf3328_mixer_set_mute(
@@ -1196,12 +1244,12 @@
 				);
 		}
 
-		snd_azf3328_codec_setfmt(chip, codec_type,
+		snd_azf3328_codec_setfmt(codec,
 			runtime->rate,
 			snd_pcm_format_width(runtime->format),
 			runtime->channels);
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		/* first, remember current value: */
 		flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
 
@@ -1211,14 +1259,14 @@
 
 		/* FIXME: clear interrupts or what??? */
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_IRQTYPE, 0xffff);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 
-		snd_azf3328_codec_setdmaa(chip, codec_type, runtime->dma_addr,
+		snd_azf3328_codec_setdmaa(codec, runtime->dma_addr,
 			snd_pcm_lib_period_bytes(substream),
 			snd_pcm_lib_buffer_bytes(substream)
 		);
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 #ifdef WIN9X
 		/* FIXME: enable playback/recording??? */
 		flags1 |= DMA_RUN_SOMETHING1 | DMA_RUN_SOMETHING2;
@@ -1242,10 +1290,10 @@
 			DMA_EPILOGUE_SOMETHING |
 			DMA_SOMETHING_ELSE);
 #endif
-		spin_unlock(&chip->reg_lock);
-		snd_azf3328_ctrl_codec_activity(chip, codec_type, 1);
+		spin_unlock(codec->lock);
+		snd_azf3328_ctrl_codec_activity(chip, codec->type, 1);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* now unmute WaveOut */
 			if (!previously_muted)
 				snd_azf3328_mixer_set_mute(
@@ -1258,19 +1306,19 @@
 	case SNDRV_PCM_TRIGGER_RESUME:
 		snd_azf3328_dbgcodec("RESUME %s\n", codec->name);
 		/* resume codec if we were active */
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		if (codec->running)
 			snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
 				snd_azf3328_codec_inw(
 					codec, IDX_IO_CODEC_DMA_FLAGS
 				) | DMA_RESUME
 			);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		snd_azf3328_dbgcodec("STOP %s\n", codec->name);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* mute WaveOut (avoid clicking during setup) */
 			previously_muted =
 				snd_azf3328_mixer_set_mute(
@@ -1278,7 +1326,7 @@
 				);
 		}
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		/* first, remember current value: */
 		flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
 
@@ -1293,10 +1341,10 @@
 
 		flags1 &= ~DMA_RUN_SOMETHING1;
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
-		spin_unlock(&chip->reg_lock);
-		snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
+		spin_unlock(codec->lock);
+		snd_azf3328_ctrl_codec_activity(chip, codec->type, 0);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* now unmute WaveOut */
 			if (!previously_muted)
 				snd_azf3328_mixer_set_mute(
@@ -1330,67 +1378,29 @@
 	return result;
 }
 
-static int
-snd_azf3328_codec_playback_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_PLAYBACK, substream, cmd);
-}
-
-static int
-snd_azf3328_codec_capture_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_CAPTURE, substream, cmd);
-}
-
-static int
-snd_azf3328_codec_i2s_out_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_I2S_OUT, substream, cmd);
-}
-
 static snd_pcm_uframes_t
-snd_azf3328_codec_pointer(struct snd_pcm_substream *substream,
-			  enum snd_azf3328_codec_type codec_type
+snd_azf3328_pcm_pointer(struct snd_pcm_substream *substream
 )
 {
-	const struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
-	unsigned long bufptr, result;
+	const struct snd_azf3328_codec_data *codec =
+		substream->runtime->private_data;
+	unsigned long result;
 	snd_pcm_uframes_t frmres;
 
-#ifdef QUERY_HARDWARE
-	bufptr = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
-#else
-	bufptr = substream->runtime->dma_addr;
-#endif
 	result = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_CURRPOS);
 
 	/* calculate offset */
-	result -= bufptr;
+#ifdef QUERY_HARDWARE
+	result -= snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
+#else
+	result -= codec->dma_base;
+#endif
 	frmres = bytes_to_frames( substream->runtime, result);
-	snd_azf3328_dbgcodec("%s @ 0x%8lx, frames %8ld\n",
-				codec->name, result, frmres);
+	snd_azf3328_dbgcodec("%08li %s @ 0x%8lx, frames %8ld\n",
+				jiffies, codec->name, result, frmres);
 	return frmres;
 }
 
-static snd_pcm_uframes_t
-snd_azf3328_codec_playback_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_PLAYBACK);
-}
-
-static snd_pcm_uframes_t
-snd_azf3328_codec_capture_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_CAPTURE);
-}
-
-static snd_pcm_uframes_t
-snd_azf3328_codec_i2s_out_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_I2S_OUT);
-}
-
 /******************************************************************/
 
 #ifdef SUPPORT_GAMEPORT
@@ -1532,7 +1542,7 @@
 		}
 	}
 
-	/* trigger next axes sampling, to be evaluated the next time we
+	/* trigger next sampling of axes, to be evaluated the next time we
 	 * enter this function */
 
 	/* for some very, very strange reason we cannot enable
@@ -1624,29 +1634,29 @@
 }
 
 static inline void
-snd_azf3328_codec_interrupt(struct snd_azf3328 *chip, u8 status)
+snd_azf3328_pcm_interrupt(const struct snd_azf3328_codec_data *first_codec,
+			  u8 status
+)
 {
 	u8 which;
 	enum snd_azf3328_codec_type codec_type;
-	const struct snd_azf3328_codec_data *codec;
+	const struct snd_azf3328_codec_data *codec = first_codec;
 
 	for (codec_type = AZF_CODEC_PLAYBACK;
 		 codec_type <= AZF_CODEC_I2S_OUT;
-			 ++codec_type) {
+			 ++codec_type, ++codec) {
 
 		/* skip codec if there's no interrupt for it */
 		if (!(status & (1 << codec_type)))
 			continue;
 
-		codec = &chip->codecs[codec_type];
-
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		which = snd_azf3328_codec_inb(codec, IDX_IO_CODEC_IRQTYPE);
 		/* ack all IRQ types immediately */
 		snd_azf3328_codec_outb(codec, IDX_IO_CODEC_IRQTYPE, which);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 
-		if ((chip->pcm[codec_type]) && (codec->substream)) {
+		if (codec->substream) {
 			snd_pcm_period_elapsed(codec->substream);
 			snd_azf3328_dbgcodec("%s period done (#%x), @ %x\n",
 				codec->name,
@@ -1701,7 +1711,7 @@
 	}
 
 	if (status & (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT))
-		snd_azf3328_codec_interrupt(chip, status);
+		snd_azf3328_pcm_interrupt(chip->codecs, status);
 
 	if (status & IRQ_GAMEPORT)
 		snd_azf3328_gameport_interrupt(chip);
@@ -1789,101 +1799,85 @@
 {
 	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 
 	snd_azf3328_dbgcallenter();
-	chip->codecs[codec_type].substream = substream;
+	codec->substream = substream;
 
 	/* same parameters for all our codecs - at least we think so... */
 	runtime->hw = snd_azf3328_hardware;
 
 	snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
 				   &snd_azf3328_hw_constraints_rates);
+	runtime->private_data = codec;
 	snd_azf3328_dbgcallleave();
 	return 0;
 }
 
 static int
-snd_azf3328_playback_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_playback_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_PLAYBACK);
 }
 
 static int
-snd_azf3328_capture_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_capture_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_CAPTURE);
 }
 
 static int
-snd_azf3328_i2s_out_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_i2s_out_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_I2S_OUT);
 }
 
 static int
-snd_azf3328_pcm_close(struct snd_pcm_substream *substream,
-		      enum snd_azf3328_codec_type codec_type
+snd_azf3328_pcm_close(struct snd_pcm_substream *substream
 )
 {
-	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
+	struct snd_azf3328_codec_data *codec =
+		substream->runtime->private_data;
 
 	snd_azf3328_dbgcallenter();
-	chip->codecs[codec_type].substream = NULL;
+	codec->substream = NULL;
 	snd_azf3328_dbgcallleave();
 	return 0;
 }
 
-static int
-snd_azf3328_playback_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_PLAYBACK);
-}
-
-static int
-snd_azf3328_capture_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_CAPTURE);
-}
-
-static int
-snd_azf3328_i2s_out_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_I2S_OUT);
-}
-
 /******************************************************************/
 
 static struct snd_pcm_ops snd_azf3328_playback_ops = {
-	.open =		snd_azf3328_playback_open,
-	.close =	snd_azf3328_playback_close,
+	.open =		snd_azf3328_pcm_playback_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_playback_trigger,
-	.pointer =	snd_azf3328_codec_playback_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static struct snd_pcm_ops snd_azf3328_capture_ops = {
-	.open =		snd_azf3328_capture_open,
-	.close =	snd_azf3328_capture_close,
+	.open =		snd_azf3328_pcm_capture_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_capture_trigger,
-	.pointer =	snd_azf3328_codec_capture_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
-	.open =		snd_azf3328_i2s_out_open,
-	.close =	snd_azf3328_i2s_out_close,
+	.open =		snd_azf3328_pcm_i2s_out_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_i2s_out_trigger,
-	.pointer =	snd_azf3328_codec_i2s_out_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static int __devinit
@@ -1966,7 +1960,7 @@
 		snd_azf3328_dbgtimer("delay was too low (%d)!\n", delay);
 		delay = 49; /* minimum time is 49 ticks */
 	}
-	snd_azf3328_dbgtimer("setting timer countdown value %d, add COUNTDOWN|IRQ\n", delay);
+	snd_azf3328_dbgtimer("setting timer countdown value %d\n", delay);
 	delay |= TIMER_COUNTDOWN_ENABLE | TIMER_IRQ_ENABLE;
 	spin_lock_irqsave(&chip->reg_lock, flags);
 	snd_azf3328_ctrl_outl(chip, IDX_IO_TIMER_VALUE, delay);
@@ -2180,6 +2174,7 @@
 	};
 	u8 dma_init;
 	enum snd_azf3328_codec_type codec_type;
+	struct snd_azf3328_codec_data *codec_setup;
 
 	*rchip = NULL;
 
@@ -2217,15 +2212,23 @@
 	chip->opl3_io  = pci_resource_start(pci, 3);
 	chip->mixer_io = pci_resource_start(pci, 4);
 
-	chip->codecs[AZF_CODEC_PLAYBACK].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
-	chip->codecs[AZF_CODEC_PLAYBACK].name = "PLAYBACK";
-	chip->codecs[AZF_CODEC_CAPTURE].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
-	chip->codecs[AZF_CODEC_CAPTURE].name = "CAPTURE";
-	chip->codecs[AZF_CODEC_I2S_OUT].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
-	chip->codecs[AZF_CODEC_I2S_OUT].name = "I2S_OUT";
+	codec_setup = &chip->codecs[AZF_CODEC_PLAYBACK];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_PLAYBACK;
+	codec_setup->name = "PLAYBACK";
+
+	codec_setup = &chip->codecs[AZF_CODEC_CAPTURE];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_CAPTURE;
+	codec_setup->name = "CAPTURE";
+
+	codec_setup = &chip->codecs[AZF_CODEC_I2S_OUT];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_I2S_OUT;
+	codec_setup->name = "I2S_OUT";
 
 	if (request_irq(pci->irq, snd_azf3328_interrupt,
 			IRQF_SHARED, card->shortname, chip)) {
@@ -2257,15 +2260,15 @@
 		struct snd_azf3328_codec_data *codec =
 			 &chip->codecs[codec_type];
 
-		/* shutdown codecs to save power */
+		/* shutdown codecs to reduce power / noise */
 			/* have ...ctrl_codec_activity() act properly */
 		codec->running = 1;
 		snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
 
-		spin_lock_irq(&chip->reg_lock);
+		spin_lock_irq(codec->lock);
 		snd_azf3328_codec_outb(codec, IDX_IO_CODEC_DMA_FLAGS,
 						 dma_init);
-		spin_unlock_irq(&chip->reg_lock);
+		spin_unlock_irq(codec->lock);
 	}
 
 	snd_card_set_dev(card, &pci->dev);
@@ -2419,6 +2422,7 @@
 
 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
+	/* same pcm object for playback/capture */
 	snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]);
 	snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]);
 
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 37e1b5d..2958a05 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -637,15 +637,9 @@
 static int snd_bt87x_capture_source_info(struct snd_kcontrol *kcontrol,
 					 struct snd_ctl_elem_info *info)
 {
-	static char *texts[3] = {"TV Tuner", "FM", "Mic/Line"};
+	static const char *const texts[3] = {"TV Tuner", "FM", "Mic/Line"};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, texts);
 }
 
 static int snd_bt87x_capture_source_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
index f19c110..fc53b9b 100644
--- a/sound/pci/ca0106/ca0106.h
+++ b/sound/pci/ca0106/ca0106.h
@@ -188,7 +188,7 @@
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
 						/* PTR[5:0], Default: 0x0 */
 #define PLAYBACK_UNKNOWN3	0x03		/* Not used ?? */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 						/* DMA[31:0], Default: 0x0 */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size. win2000 uses 0x04000000 */
 						/* SIZE[31:16], Default: 0x0 */
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index d2d12c0..01b4938 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1082,7 +1082,7 @@
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct snd_ca0106_pcm *epcm = runtime->private_data;
 	snd_pcm_uframes_t ptr, ptr1, ptr2 = 0;
-	int channel = channel=epcm->channel_id;
+	int channel = epcm->channel_id;
 
 	if (!epcm->running)
 		return 0;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 329968e..b5bb036 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -2507,14 +2507,12 @@
 					struct snd_ctl_elem_info *uinfo)
 {
 	struct cmipci *cm = snd_kcontrol_chip(kcontrol);
-	static char *texts[3] = { "Line-In", "Rear Output", "Bass Output" };
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = cm->chip_version >= 39 ? 3 : 2;
-	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
-		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
-	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-	return 0;
+	static const char *const texts[3] = {
+		"Line-In", "Rear Output", "Bass Output"
+	};
+
+	return snd_ctl_enum_info(uinfo, 1,
+				 cm->chip_version >= 39 ? 3 : 2, texts);
 }
 
 static inline unsigned int get_line_in_mode(struct cmipci *cm)
@@ -2564,14 +2562,9 @@
 static int snd_cmipci_mic_in_mode_info(struct snd_kcontrol *kcontrol,
 				       struct snd_ctl_elem_info *uinfo)
 {
-	static char *texts[2] = { "Mic-In", "Center/LFE Output" };
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = 2;
-	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
-		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
-	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-	return 0;
+	static const char *const texts[2] = { "Mic-In", "Center/LFE Output" };
+
+	return snd_ctl_enum_info(uinfo, 1, 2, texts);
 }
 
 static int snd_cmipci_mic_in_mode_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/cs5535audio/cs5535audio_pm.c b/sound/pci/cs5535audio/cs5535audio_pm.c
index a3301cc..185b000 100644
--- a/sound/pci/cs5535audio/cs5535audio_pm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pm.c
@@ -90,12 +90,7 @@
 	int i;
 
 	pci_set_power_state(pci, PCI_D0);
-	if (pci_restore_state(pci) < 0) {
-		printk(KERN_ERR "cs5535audio: pci_restore_state failed, "
-		       "disabling device\n");
-		snd_card_disconnect(card);
-		return -EIO;
-	}
+	pci_restore_state(pci);
 	if (pci_enable_device(pci) < 0) {
 		printk(KERN_ERR "cs5535audio: pci_enable_device failed, "
 		       "disabling device\n");
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index df47f73..0c701e4 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -114,7 +114,7 @@
 						 */
 #define PLAYBACK_LIST_SIZE	0x01		/* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000  */
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size */
 #define PLAYBACK_POINTER	0x06		/* Playback period pointer. Sample currently in DAC */
 #define PLAYBACK_UNKNOWN1       0x07
diff --git a/sound/pci/emu10k1/p16v.h b/sound/pci/emu10k1/p16v.h
index 1532149..00f4817 100644
--- a/sound/pci/emu10k1/p16v.h
+++ b/sound/pci/emu10k1/p16v.h
@@ -96,7 +96,7 @@
 #define PLAYBACK_LIST_SIZE	0x01		/* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000  */
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
 #define PLAYBACK_UNKNOWN3	0x03		/* Not used */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size. win2000 uses 0x04000000 */
 #define PLAYBACK_POINTER	0x06		/* Playback period pointer. Used with PLAYBACK_LIST_PTR to determine buffer position currently in DAC */
 #define PLAYBACK_FIFO_END_ADDRESS	0x07		/* Playback FIFO end address */
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 23a58f0..7c17f45 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -220,7 +220,7 @@
 #define	RINGB_EN_2CODEC		0x0020
 #define RINGB_SING_BIT_DUAL	0x0040
 
-/* ****Port Adresses**** */
+/* ****Port Addresses**** */
 
 /*   Write & Read */
 #define ESM_INDEX		0x02
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 98b6d02..05e5ec8 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4571,6 +4571,9 @@
 		}
 		memset(cfg->hp_pins + cfg->hp_outs, 0,
 		       sizeof(hda_nid_t) * (AUTO_CFG_MAX_OUTS - cfg->hp_outs));
+		if (!cfg->hp_outs)
+			cfg->line_out_type = AUTO_PIN_HP_OUT;
+
 	}
 
 	/* sort by sequence */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a1c4008..d3d18be 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1235,7 +1235,8 @@
 			pos_adj = 0;
 		} else {
 			ofs = setup_bdle(substream, azx_dev,
-					 &bdl, ofs, pos_adj, 1);
+					 &bdl, ofs, pos_adj,
+					 !substream->runtime->no_period_wakeup);
 			if (ofs < 0)
 				goto error;
 		}
@@ -1247,7 +1248,8 @@
 					 period_bytes - pos_adj, 0);
 		else
 			ofs = setup_bdle(substream, azx_dev, &bdl, ofs,
-					 period_bytes, 1);
+					 period_bytes,
+					 !substream->runtime->no_period_wakeup);
 		if (ofs < 0)
 			goto error;
 	}
@@ -1515,7 +1517,8 @@
 				 /* No full-resume yet implemented */
 				 /* SNDRV_PCM_INFO_RESUME |*/
 				 SNDRV_PCM_INFO_PAUSE |
-				 SNDRV_PCM_INFO_SYNC_START),
+				 SNDRV_PCM_INFO_SYNC_START |
+				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_48000,
 	.rate_min =		48000,
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index f7ff3f7..4678067 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -666,7 +666,7 @@
 	HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
@@ -729,7 +729,7 @@
 	HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	/* 
 	   HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
 	   HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT), */
@@ -775,7 +775,7 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
 	{
@@ -1358,7 +1358,7 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1515,8 +1515,8 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1726,8 +1726,8 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
 #endif
-	HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x18, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1774,7 +1774,7 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -2160,8 +2160,8 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
 
 	{ } /* end */
 };
@@ -2203,8 +2203,8 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = "Channel Mode",
@@ -2232,7 +2232,7 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
 
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -2902,7 +2902,7 @@
 		idx = ad1988_pin_idx(pin);
 		bnid = ad1988_boost_nids[idx];
 		if (bnid) {
-			sprintf(name, "%s Boost", ctlname);
+			sprintf(name, "%s Boost Volume", ctlname);
 			return add_control(spec, AD_CTL_WIDGET_VOL, name,
 					   HDA_COMPOSE_AMP_VAL(bnid, 3, idx, HDA_OUTPUT));
 
@@ -3300,8 +3300,8 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3499,9 +3499,9 @@
 	HDA_CODEC_MUTE("Beep Playback Switch", 0x20, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("Docking Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("Docking Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Docking Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3560,8 +3560,8 @@
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line-In Playback Volume", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Line-In Playback Switch", 0x20, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line-In Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line-In Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3745,9 +3745,9 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3888,9 +3888,9 @@
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Dock Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	{ } /* end */
@@ -4126,8 +4126,8 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	{
@@ -4255,8 +4255,8 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -4494,9 +4494,9 @@
 	HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Line-In Boost", 0x3a, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Line-In Boost Volume", 0x3a, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -4547,7 +4547,7 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT),
-	HDA_CODEC_VOLUME("Digital Mic Boost", 0x1f, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Digital Mic Boost Volume", 0x1f, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 76bd58a..e96581f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -869,16 +869,16 @@
 }
 
 static struct snd_kcontrol_new cxt5045_mixers[] = {
-	HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
 	HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -910,16 +910,16 @@
 };
 
 static struct snd_kcontrol_new cxt5045_mixers_hp530[] = {
-	HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
 	HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -947,7 +947,7 @@
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x1a, AC_VERB_SET_CONNECT_SEL,0x1},
 	{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
 	 AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
@@ -960,7 +960,7 @@
 };
 
 static struct hda_verb cxt5045_benq_init_verbs[] = {
-	/* Int Mic, Mic */
+	/* Internal Mic, Mic */
 	{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
 	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
 	/* Line In,HP, Amp  */
@@ -973,7 +973,7 @@
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x1a, AC_VERB_SET_CONNECT_SEL, 0x1},
 	{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
 	 AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
@@ -1376,7 +1376,7 @@
 static struct snd_kcontrol_new cxt5047_base_mixers[] = {
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x19, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x19, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x1a, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x1a, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x03, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x12, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Volume", 0x10, 0x00, HDA_OUTPUT),
@@ -1796,8 +1796,8 @@
 static struct snd_kcontrol_new cxt5051_capture_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("Docking Mic Volume", 0x15, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Docking Mic Switch", 0x15, 0x00, HDA_INPUT),
 	{}
@@ -1806,8 +1806,8 @@
 static struct snd_kcontrol_new cxt5051_hp_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x15, 0x00, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x15, 0x00, HDA_INPUT),
 	{}
 };
 
@@ -1826,8 +1826,8 @@
 static struct snd_kcontrol_new cxt5051_toshiba_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
 	{}
 };
 
@@ -1847,7 +1847,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */	
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
@@ -1874,7 +1874,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
 	/* SPDIF route: PCM */
@@ -1904,7 +1904,7 @@
 	{0x19, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
@@ -1932,7 +1932,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
 	/* SPDIF route: PCM */
@@ -2111,6 +2111,11 @@
 	{ 2, NULL },
 };
 
+#define HP_PRESENT_PORT_A	(1 << 0)
+#define HP_PRESENT_PORT_D	(1 << 1)
+#define hp_port_a_present(spec)	((spec)->hp_present & HP_PRESENT_PORT_A)
+#define hp_port_d_present(spec)	((spec)->hp_present & HP_PRESENT_PORT_D)
+
 static void cxt5066_update_speaker(struct hda_codec *codec)
 {
 	struct conexant_spec *spec = codec->spec;
@@ -2120,24 +2125,20 @@
 		    spec->hp_present, spec->cur_eapd);
 
 	/* Port A (HP) */
-	pinctl = ((spec->hp_present & 1) && spec->cur_eapd) ? PIN_HP : 0;
+	pinctl = (hp_port_a_present(spec) && spec->cur_eapd) ? PIN_HP : 0;
 	snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
 			pinctl);
 
 	/* Port D (HP/LO) */
-	if (spec->dell_automute) {
-		/* DELL AIO Port Rule: PortA>  PortD>  IntSpk */
-		pinctl = (!(spec->hp_present & 1) && spec->cur_eapd)
-			? PIN_OUT : 0;
-	} else if (spec->thinkpad) {
-		if (spec->cur_eapd)
-			pinctl = spec->port_d_mode;
-		/* Mute dock line-out if Port A (laptop HP) is present */
-		if (spec->hp_present&  1)
+	pinctl = spec->cur_eapd ? spec->port_d_mode : 0;
+	if (spec->dell_automute || spec->thinkpad) {
+		/* Mute if Port A is connected */
+		if (hp_port_a_present(spec))
 			pinctl = 0;
 	} else {
-		pinctl = ((spec->hp_present & 2) && spec->cur_eapd)
-			? spec->port_d_mode : 0;
+		/* Thinkpad/Dell doesn't give pin-D status */
+		if (!hp_port_d_present(spec))
+			pinctl = 0;
 	}
 	snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
 			pinctl);
@@ -2379,8 +2380,8 @@
 	/* Port D */
 	portD = snd_hda_jack_detect(codec, 0x1c);
 
-	spec->hp_present = !!(portA);
-	spec->hp_present |= portD ? 2 : 0;
+	spec->hp_present = portA ? HP_PRESENT_PORT_A : 0;
+	spec->hp_present |= portD ? HP_PRESENT_PORT_D : 0;
 	snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n",
 		portA, portD, spec->hp_present);
 	cxt5066_update_speaker(codec);
@@ -2728,7 +2729,7 @@
 static struct snd_kcontrol_new cxt5066_vostro_mixers[] = {
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "Int Mic Boost Capture Enum",
+		.name = "Internal Mic Boost Capture Enum",
 		.info = cxt5066_mic_boost_mux_enum_info,
 		.get = cxt5066_mic_boost_mux_enum_get,
 		.put = cxt5066_mic_boost_mux_enum_put,
@@ -2954,7 +2955,7 @@
 	{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 
 	/* internal microphone */
-	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */
+	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
 
 	/* EAPD */
 	{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -3009,7 +3010,7 @@
 	{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 
 	/* internal microphone */
-	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */
+	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
 
 	/* EAPD */
 	{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -3097,6 +3098,7 @@
 	SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
@@ -3108,16 +3110,9 @@
 		      CXT5066_LAPTOP),
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD),
  	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
- 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
 	{}
 };
 
@@ -3422,6 +3417,9 @@
 				    AC_VERB_SET_PIN_WIDGET_CONTROL,
 				    present ? 0 : PIN_OUT);
 	}
+	for (i = 0; !present && i < cfg->line_outs; i++)
+		if (snd_hda_jack_detect(codec, cfg->line_out_pins[i]))
+			present = 1;
 	for (i = 0; i < cfg->speaker_outs; i++) {
 		snd_hda_codec_write(codec, cfg->speaker_pins[i], 0,
 				    AC_VERB_SET_PIN_WIDGET_CONTROL,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 31df774..f29b97b 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -31,10 +31,15 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/moduleparam.h>
 #include <sound/core.h>
 #include "hda_codec.h"
 #include "hda_local.h"
 
+static bool static_hdmi_pcm;
+module_param(static_hdmi_pcm, bool, 0644);
+MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+
 /*
  * The HDMI/DisplayPort configuration can be highly dynamic. A graphics device
  * could support two independent pipes, each of them can be connected to one or
@@ -827,7 +832,7 @@
 		*codec_pars = *hinfo;
 
 	eld = &spec->sink_eld[idx];
-	if (eld->sad_count > 0) {
+	if (!static_hdmi_pcm && eld->eld_valid && eld->sad_count > 0) {
 		hdmi_eld_update_pcm_info(eld, hinfo, codec_pars);
 		if (hinfo->channels_min > hinfo->channels_max ||
 		    !hinfo->rates || !hinfo->formats)
@@ -904,23 +909,28 @@
 	spec->pin[spec->num_pins] = pin_nid;
 	spec->num_pins++;
 
-	/*
-	 * It is assumed that converter nodes come first in the node list and
-	 * hence have been registered and usable now.
-	 */
 	return hdmi_read_pin_conn(codec, pin_nid);
 }
 
 static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t nid)
 {
+	int i, found_pin = 0;
 	struct hdmi_spec *spec = codec->spec;
 
-	if (spec->num_cvts >= MAX_HDMI_CVTS) {
-		snd_printk(KERN_WARNING
-			   "HDMI: no space for converter %d\n", nid);
-		return -E2BIG;
+	for (i = 0; i < spec->num_pins; i++)
+		if (nid == spec->pin_cvt[i]) {
+			found_pin = 1;
+			break;
+		}
+
+	if (!found_pin) {
+		snd_printdd("HDMI: Skipping node %d (no connection)\n", nid);
+		return -EINVAL;
 	}
 
+	if (snd_BUG_ON(spec->num_cvts >= MAX_HDMI_CVTS))
+		return -E2BIG;
+
 	spec->cvt[spec->num_cvts] = nid;
 	spec->num_cvts++;
 
@@ -931,6 +941,8 @@
 {
 	hda_nid_t nid;
 	int i, nodes;
+	int num_tmp_cvts = 0;
+	hda_nid_t tmp_cvt[MAX_HDMI_CVTS];
 
 	nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid);
 	if (!nid || nodes < 0) {
@@ -941,6 +953,7 @@
 	for (i = 0; i < nodes; i++, nid++) {
 		unsigned int caps;
 		unsigned int type;
+		unsigned int config;
 
 		caps = snd_hda_param_read(codec, nid, AC_PAR_AUDIO_WIDGET_CAP);
 		type = get_wcaps_type(caps);
@@ -950,17 +963,32 @@
 
 		switch (type) {
 		case AC_WID_AUD_OUT:
-			hdmi_add_cvt(codec, nid);
+			if (num_tmp_cvts >= MAX_HDMI_CVTS) {
+				snd_printk(KERN_WARNING
+					   "HDMI: no space for converter %d\n", nid);
+				continue;
+			}
+			tmp_cvt[num_tmp_cvts] = nid;
+			num_tmp_cvts++;
 			break;
 		case AC_WID_PIN:
 			caps = snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
 			if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP)))
 				continue;
+
+			config = snd_hda_codec_read(codec, nid, 0,
+					     AC_VERB_GET_CONFIG_DEFAULT, 0);
+			if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
+				continue;
+
 			hdmi_add_pin(codec, nid);
 			break;
 		}
 	}
 
+	for (i = 0; i < num_tmp_cvts; i++)
+		hdmi_add_cvt(codec, tmp_cvt[i]);
+
 	/*
 	 * G45/IbexPeak don't support EPSS: the unsolicited pin hot plug event
 	 * can be lost and presence sense verb will become inaccurate if the
@@ -1165,11 +1193,53 @@
 	return 0;
 }
 
+static unsigned int channels_2_6_8[] = {
+	2, 6, 8
+};
+
+static unsigned int channels_2_8[] = {
+	2, 8
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_2_6_8_channels = {
+	.count = ARRAY_SIZE(channels_2_6_8),
+	.list = channels_2_6_8,
+	.mask = 0,
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_2_8_channels = {
+	.count = ARRAY_SIZE(channels_2_8),
+	.list = channels_2_8,
+	.mask = 0,
+};
+
 static int simple_playback_pcm_open(struct hda_pcm_stream *hinfo,
 				    struct hda_codec *codec,
 				    struct snd_pcm_substream *substream)
 {
 	struct hdmi_spec *spec = codec->spec;
+	struct snd_pcm_hw_constraint_list *hw_constraints_channels = NULL;
+
+	switch (codec->preset->id) {
+	case 0x10de0002:
+	case 0x10de0003:
+	case 0x10de0005:
+	case 0x10de0006:
+		hw_constraints_channels = &hw_constraints_2_8_channels;
+		break;
+	case 0x10de0007:
+		hw_constraints_channels = &hw_constraints_2_6_8_channels;
+		break;
+	default:
+		break;
+	}
+
+	if (hw_constraints_channels != NULL) {
+		snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_CHANNELS,
+				hw_constraints_channels);
+	}
+
 	return snd_hda_multi_out_dig_open(codec, &spec->multiout);
 }
 
@@ -1532,7 +1602,7 @@
 { .id = 0x1002793c, .name = "RS600 HDMI",	.patch = patch_atihdmi },
 { .id = 0x10027919, .name = "RS600 HDMI",	.patch = patch_atihdmi },
 { .id = 0x1002791a, .name = "RS690/780 HDMI",	.patch = patch_atihdmi },
-{ .id = 0x1002aa01, .name = "R6xx HDMI",	.patch = patch_atihdmi },
+{ .id = 0x1002aa01, .name = "R6xx HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x10951390, .name = "SiI1390 HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x10951392, .name = "SiI1392 HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x17e80047, .name = "Chrontel HDMI",	.patch = patch_generic_hdmi },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 552a09e..51c08ed 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -231,7 +231,6 @@
 	ALC888_ACER_ASPIRE_8930G,
 	ALC888_ACER_ASPIRE_7730G,
 	ALC883_MEDION,
-	ALC883_MEDION_MD2,
 	ALC883_MEDION_WIM2160,
 	ALC883_LAPTOP_EAPD,
 	ALC883_LENOVO_101E_2ch,
@@ -1678,29 +1677,32 @@
 	u32 val;
 };
 
+struct alc_model_fixup {
+	const int id;
+	const char *name;
+};
+
 struct alc_fixup {
 	unsigned int sku;
 	const struct alc_pincfg *pins;
 	const struct hda_verb *verbs;
+	void (*func)(struct hda_codec *codec, const struct alc_fixup *fix,
+		     int pre_init);
 };
 
-static void alc_pick_fixup(struct hda_codec *codec,
-			   const struct snd_pci_quirk *quirk,
-			   const struct alc_fixup *fix,
-			   int pre_init)
+static void __alc_pick_fixup(struct hda_codec *codec,
+			     const struct alc_fixup *fix,
+			     const char *modelname,
+			     int pre_init)
 {
 	const struct alc_pincfg *cfg;
 	struct alc_spec *spec;
 
-	quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
-	if (!quirk)
-		return;
-	fix += quirk->value;
 	cfg = fix->pins;
 	if (pre_init && fix->sku) {
 #ifdef CONFIG_SND_DEBUG_VERBOSE
 		snd_printdd(KERN_INFO "hda_codec: %s: Apply sku override for %s\n",
-			    codec->chip_name, quirk->name);
+			    codec->chip_name, modelname);
 #endif
 		spec = codec->spec;
 		spec->cdefine.sku_cfg = fix->sku;
@@ -1709,7 +1711,7 @@
 	if (pre_init && cfg) {
 #ifdef CONFIG_SND_DEBUG_VERBOSE
 		snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n",
-			    codec->chip_name, quirk->name);
+			    codec->chip_name, modelname);
 #endif
 		for (; cfg->nid; cfg++)
 			snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
@@ -1717,10 +1719,53 @@
 	if (!pre_init && fix->verbs) {
 #ifdef CONFIG_SND_DEBUG_VERBOSE
 		snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n",
-			    codec->chip_name, quirk->name);
+			    codec->chip_name, modelname);
 #endif
 		add_verb(codec->spec, fix->verbs);
 	}
+	if (fix->func) {
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+		snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-func for %s\n",
+			    codec->chip_name, modelname);
+#endif
+		fix->func(codec, fix, pre_init);
+	}
+}
+
+static void alc_pick_fixup(struct hda_codec *codec,
+				 const struct snd_pci_quirk *quirk,
+				 const struct alc_fixup *fix,
+				 int pre_init)
+{
+	quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+	if (quirk) {
+		fix += quirk->value;
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+		__alc_pick_fixup(codec, fix, quirk->name, pre_init);
+#else
+		__alc_pick_fixup(codec, fix, NULL, pre_init);
+#endif
+	}
+}
+
+static void alc_pick_fixup_model(struct hda_codec *codec,
+				 const struct alc_model_fixup *models,
+				 const struct snd_pci_quirk *quirk,
+				 const struct alc_fixup *fix,
+				 int pre_init)
+{
+	if (codec->modelname && models) {
+		while (models->name) {
+			if (!strcmp(codec->modelname, models->name)) {
+				fix += models->id;
+				break;
+			}
+			models++;
+		}
+		__alc_pick_fixup(codec, fix, codec->modelname, pre_init);
+	} else {
+		alc_pick_fixup(codec, quirk, fix, pre_init);
+	}
 }
 
 static int alc_read_coef_idx(struct hda_codec *codec,
@@ -1981,6 +2026,7 @@
 	{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
 	{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+	{0x15, AC_VERB_SET_EAPD_BTLENABLE, 2},
 	{ }
 };
 
@@ -2120,17 +2166,17 @@
 	{
 		.num_items = 5,
 		.items = {
-			{ "Ext Mic", 0x0 },
+			{ "Mic", 0x0 },
 			{ "Line In", 0x2 },
 			{ "CD", 0x4 },
 			{ "Input Mix", 0xa },
-			{ "Int Mic", 0xb },
+			{ "Internal Mic", 0xb },
 		},
 	},
 	{
 		.num_items = 4,
 		.items = {
-			{ "Ext Mic", 0x0 },
+			{ "Mic", 0x0 },
 			{ "Line In", 0x2 },
 			{ "CD", 0x4 },
 			{ "Input Mix", 0xa },
@@ -2187,7 +2233,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -2205,7 +2251,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -2796,10 +2842,10 @@
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -3307,7 +3353,7 @@
 };
 
 /* auto-toggle front mic */
-static void alc880_uniwill_mic_automute(struct hda_codec *codec)
+static void alc88x_simple_mic_automute(struct hda_codec *codec)
 {
  	unsigned int present;
 	unsigned char bits;
@@ -3329,7 +3375,7 @@
 static void alc880_uniwill_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc880_uniwill_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc880_uniwill_unsol_event(struct hda_codec *codec,
@@ -3340,7 +3386,7 @@
 	 */
 	switch (res >> 28) {
 	case ALC880_MIC_EVENT:
-		alc880_uniwill_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -5023,6 +5069,25 @@
 	return 0;
 }
 
+static const char *alc_get_line_out_pfx(const struct auto_pin_cfg *cfg,
+					bool can_be_master)
+{
+	if (!cfg->hp_outs && !cfg->speaker_outs && can_be_master)
+		return "Master";
+
+	switch (cfg->line_out_type) {
+	case AUTO_PIN_SPEAKER_OUT:
+		return "Speaker";
+	case AUTO_PIN_HP_OUT:
+		return "Headphone";
+	default:
+		if (cfg->line_outs == 1)
+			return "PCM";
+		break;
+	}
+	return NULL;
+}
+
 /* add playback controls from the parsed DAC table */
 static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec,
 					     const struct auto_pin_cfg *cfg)
@@ -5030,6 +5095,7 @@
 	static const char *chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, false);
 	hda_nid_t nid;
 	int i, err;
 
@@ -5037,7 +5103,7 @@
 		if (!spec->multiout.dac_nids[i])
 			continue;
 		nid = alc880_idx_to_mixer(alc880_dac_to_idx(spec->multiout.dac_nids[i]));
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
 					      "Center",
@@ -5064,18 +5130,17 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			else
-				pfx = chname[i];
-			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
+						name, i,
 					  HDA_COMPOSE_AMP_VAL(nid, 3, 0,
 							      HDA_OUTPUT));
 			if (err < 0)
 				return err;
-			err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx,
+			err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
+					       name, i,
 					  HDA_COMPOSE_AMP_VAL(nid, 3, 2,
 							      HDA_INPUT));
 			if (err < 0)
@@ -5155,7 +5220,8 @@
 {
 	struct alc_spec *spec = codec->spec;
 	struct hda_input_mux *imux = &spec->private_imux[0];
-	int i, err, idx, type, type_idx = 0;
+	int i, err, idx, type_idx = 0;
+	const char *prev_label = NULL;
 
 	for (i = 0; i < cfg->num_inputs; i++) {
 		hda_nid_t pin;
@@ -5165,12 +5231,13 @@
 		if (!alc_is_input_pin(codec, pin))
 			continue;
 
-		type = cfg->inputs[i].type;
-		if (i > 0 && type == cfg->inputs[i - 1].type)
+		label = hda_get_autocfg_input_label(codec, cfg, i);
+		if (prev_label && !strcmp(label, prev_label))
 			type_idx++;
 		else
 			type_idx = 0;
-		label = hda_get_autocfg_input_label(codec, cfg, i);
+		prev_label = label;
+
 		if (mixer) {
 			idx = get_connection_index(codec, mixer, pin);
 			if (idx >= 0) {
@@ -7406,7 +7473,7 @@
 	.num_items = 4,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 		{ "Line", 0x2 },
 		{ "CD", 0x4 },
 	},
@@ -7416,7 +7483,7 @@
 	.num_items = 2,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 	},
 };
 
@@ -7851,10 +7918,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -7878,8 +7945,8 @@
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7896,8 +7963,8 @@
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7912,7 +7979,7 @@
 	HDA_BIND_MUTE   ("Headphone Playback Switch", 0x0f, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x07, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7931,7 +7998,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -7946,10 +8013,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7969,7 +8036,7 @@
 	HDA_CODEC_MUTE("Mobile Line Playback Switch", 0x0b, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7982,7 +8049,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8763,10 +8830,10 @@
 	HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8777,11 +8844,11 @@
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8791,11 +8858,11 @@
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8808,10 +8875,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8831,10 +8898,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8855,10 +8922,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8879,10 +8946,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x3, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x1b, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x1b, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x3, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8902,10 +8969,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8926,7 +8993,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8939,20 +9006,20 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc883_targa_8ch_mixer[] = {
 	HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8963,7 +9030,7 @@
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8976,21 +9043,8 @@
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	{ } /* end */
-};
-
-static struct snd_kcontrol_new alc883_medion_md2_mixer[] = {
-	HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Front Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -9037,7 +9091,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9050,7 +9104,7 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9072,10 +9126,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -9096,8 +9150,8 @@
 	HDA_CODEC_MUTE("Enable Headphones", 0x15, 0x00, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Enable LFE", 0x16, 2, 0x00, HDA_OUTPUT),
 	/* Boost mixers */
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT),
 	/* Input mixers */
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
@@ -9111,7 +9165,7 @@
 	HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -9141,7 +9195,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9182,16 +9236,6 @@
 	spec->autocfg.speaker_pins[1] = 0x17;
 }
 
-/* auto-toggle front mic */
-/*
-static void alc883_mitac_mic_automute(struct hda_codec *codec)
-{
-	unsigned char bits = snd_hda_jack_detect(codec, 0x18) ? HDA_AMP_MUTE : 0;
-
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits);
-}
-*/
-
 static struct hda_verb alc883_mitac_verbs[] = {
 	/* HP */
 	{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -9435,18 +9479,8 @@
 		alc888_lenovo_ms7195_rca_automute(codec);
 }
 
-static struct hda_verb alc883_medion_md2_verbs[] = {
-	{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-	{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-
-	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-
-	{0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
-	{ } /* end */
-};
-
 /* toggle speaker-output according to the hp-jack state */
-static void alc883_medion_md2_setup(struct hda_codec *codec)
+static void alc883_lenovo_nb0763_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
 
@@ -9458,15 +9492,6 @@
 #define alc883_targa_init_hook		alc882_targa_init_hook
 #define alc883_targa_unsol_event	alc882_targa_unsol_event
 
-static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
-{
-	unsigned int present;
-
-	present = snd_hda_jack_detect(codec, 0x18);
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1,
-				 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
 static void alc883_clevo_m720_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -9478,7 +9503,7 @@
 static void alc883_clevo_m720_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc883_clevo_m720_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc883_clevo_m720_unsol_event(struct hda_codec *codec,
@@ -9486,7 +9511,7 @@
 {
 	switch (res >> 26) {
 	case ALC880_MIC_EVENT:
-		alc883_clevo_m720_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -9731,7 +9756,6 @@
 	[ALC888_ACER_ASPIRE_8930G]	= "acer-aspire-8930g",
 	[ALC888_ACER_ASPIRE_7730G]	= "acer-aspire-7730g",
 	[ALC883_MEDION]		= "medion",
-	[ALC883_MEDION_MD2]	= "medion-md2",
 	[ALC883_MEDION_WIM2160]	= "medion-wim2160",
 	[ALC883_LAPTOP_EAPD]	= "laptop-eapd",
 	[ALC883_LENOVO_101E_2ch] = "lenovo-101e",
@@ -10379,19 +10403,6 @@
 		.channel_mode = alc883_sixstack_modes,
 		.input_mux = &alc883_capture_source,
 	},
-	[ALC883_MEDION_MD2] = {
-		.mixers = { alc883_medion_md2_mixer},
-		.init_verbs = { alc883_init_verbs, alc883_medion_md2_verbs},
-		.num_dacs = ARRAY_SIZE(alc883_dac_nids),
-		.dac_nids = alc883_dac_nids,
-		.dig_out_nid = ALC883_DIGOUT_NID,
-		.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
-		.channel_mode = alc883_3ST_2ch_modes,
-		.input_mux = &alc883_capture_source,
-		.unsol_event = alc_automute_amp_unsol_event,
-		.setup = alc883_medion_md2_setup,
-		.init_hook = alc_automute_amp,
-	},
 	[ALC883_MEDION_WIM2160] = {
 		.mixers = { alc883_medion_wim2160_mixer },
 		.init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs },
@@ -10468,7 +10479,7 @@
 		.need_dac_fix = 1,
 		.input_mux = &alc883_lenovo_nb0763_capture_source,
 		.unsol_event = alc_automute_amp_unsol_event,
-		.setup = alc883_medion_md2_setup,
+		.setup = alc883_lenovo_nb0763_setup,
 		.init_hook = alc_automute_amp,
 	},
 	[ALC888_LENOVO_MS7195_DIG] = {
@@ -10830,25 +10841,30 @@
 {
 	struct alc_spec *spec = codec->spec;
 	struct auto_pin_cfg *cfg = &spec->autocfg;
-	int i, err, type;
+	int i, err;
 	int type_idx = 0;
 	hda_nid_t nid;
+	const char *prev_label = NULL;
 
 	for (i = 0; i < cfg->num_inputs; i++) {
 		if (cfg->inputs[i].type > AUTO_PIN_MIC)
 			break;
 		nid = cfg->inputs[i].pin;
 		if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) {
-			char label[32];
-			type = cfg->inputs[i].type;
-			if (i > 0 && type == cfg->inputs[i - 1].type)
+			const char *label;
+			char boost_label[32];
+
+			label = hda_get_autocfg_input_label(codec, cfg, i);
+			if (prev_label && !strcmp(label, prev_label))
 				type_idx++;
 			else
 				type_idx = 0;
-			snprintf(label, sizeof(label), "%s Boost",
-				 hda_get_autocfg_input_label(codec, cfg, i));
-			err = add_control(spec, ALC_CTL_WIDGET_VOL, label,
-					  type_idx,
+			prev_label = label;
+
+			snprintf(boost_label, sizeof(boost_label),
+				 "%s Boost Volume", label);
+			err = add_control(spec, ALC_CTL_WIDGET_VOL,
+					  boost_label, type_idx,
 				  HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
 			if (err < 0)
 				return err;
@@ -10857,6 +10873,9 @@
 	return 0;
 }
 
+static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
+					     const struct auto_pin_cfg *cfg);
+
 /* almost identical with ALC880 parser... */
 static int alc882_parse_auto_config(struct hda_codec *codec)
 {
@@ -10874,7 +10893,10 @@
 	err = alc880_auto_fill_dac_nids(spec, &spec->autocfg);
 	if (err < 0)
 		return err;
-	err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
+	if (codec->vendor_id == 0x10ec0887)
+		err = alc861vd_auto_create_multi_out_ctls(spec, &spec->autocfg);
+	else
+		err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
 	if (err < 0)
 		return err;
 	err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
@@ -11090,10 +11112,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
@@ -11194,10 +11216,10 @@
 			    HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -11219,7 +11241,7 @@
 			    HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x1a, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -11230,7 +11252,7 @@
 static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
 	HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Rear Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Rear Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11250,7 +11272,7 @@
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11357,10 +11379,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	{ } /* end */
 };
@@ -11374,10 +11396,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11445,10 +11467,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11632,7 +11654,7 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
@@ -11687,7 +11709,7 @@
 	.num_items = 3,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 		{ "CD", 0x4 },
 	},
 };
@@ -11839,12 +11861,12 @@
 	},
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11875,12 +11897,12 @@
 	},
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11889,10 +11911,10 @@
 	ALC262_HIPPO_MASTER_SWITCH,
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11918,8 +11940,8 @@
 	HDA_BIND_MUTE("Master Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Headphone Mic Boost", 0x15, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Headphone Mic Boost Volume", 0x15, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -12089,13 +12111,8 @@
 	spec->multiout.dac_nids = spec->private_dac_nids;
 	spec->multiout.dac_nids[0] = 2;
 
-	if (!cfg->speaker_pins[0] && !cfg->hp_pins[0])
-		pfx = "Master";
-	else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-		pfx = "Speaker";
-	else if (cfg->line_out_type == AUTO_PIN_HP_OUT)
-		pfx = "Headphone";
-	else
+	pfx = alc_get_line_out_pfx(cfg, true);
+	if (!pfx)
 		pfx = "Front";
 	for (i = 0; i < 2; i++) {
 		err = alc262_add_out_sw_ctl(spec, cfg->line_out_pins[i], pfx, i);
@@ -12996,9 +13013,9 @@
 	HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13007,9 +13024,9 @@
 	HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13113,9 +13130,9 @@
 		.put = alc268_acer_master_sw_put,
 		.private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
 	},
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13131,8 +13148,8 @@
 		.put = alc268_acer_master_sw_put,
 		.private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
 	},
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13224,8 +13241,8 @@
 	HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13258,8 +13275,8 @@
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Capture Volume", 0x23, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Mic Capture Switch", 0x23, 2, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14082,10 +14099,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT),
 	{ } /* end */
@@ -14105,10 +14122,10 @@
 	},
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14126,13 +14143,13 @@
 	},
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x0b, 0x03, HDA_INPUT),
 	HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x0b, 0x03, HDA_INPUT),
-	HDA_CODEC_VOLUME("Dock Mic Boost", 0x1b, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x1b, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14162,30 +14179,30 @@
 static struct snd_kcontrol_new alc269_laptop_analog_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269_laptop_digital_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269vb_laptop_analog_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269vb_laptop_digital_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -14804,12 +14821,23 @@
 }
 #endif /* SND_HDA_NEEDS_RESUME */
 
+static void alc269_fixup_hweq(struct hda_codec *codec,
+			       const struct alc_fixup *fix, int pre_init)
+{
+	int coef;
+
+	coef = alc_read_coef_idx(codec, 0x1e);
+	alc_write_coef_idx(codec, 0x1e, coef | 0x80);
+}
+
 enum {
 	ALC269_FIXUP_SONY_VAIO,
 	ALC275_FIX_SONY_VAIO_GPIO2,
 	ALC269_FIXUP_DELL_M101Z,
 	ALC269_FIXUP_SKU_IGNORE,
 	ALC269_FIXUP_ASUS_G73JW,
+	ALC269_FIXUP_LENOVO_EAPD,
+	ALC275_FIXUP_SONY_HWEQ,
 };
 
 static const struct alc_fixup alc269_fixups[] = {
@@ -14824,6 +14852,7 @@
 			{0x01, AC_VERB_SET_GPIO_MASK, 0x04},
 			{0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
 			{0x01, AC_VERB_SET_GPIO_DATA, 0x00},
+			{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
 			{ }
 		}
 	},
@@ -14844,17 +14873,34 @@
 			{ }
 		}
 	},
+	[ALC269_FIXUP_LENOVO_EAPD] = {
+		.verbs = (const struct hda_verb[]) {
+			{0x14, AC_VERB_SET_EAPD_BTLENABLE, 0},
+			{}
+		}
+	},
+	[ALC275_FIXUP_SONY_HWEQ] = {
+		.func = alc269_fixup_hweq,
+		.verbs = (const struct hda_verb[]) {
+			{0x01, AC_VERB_SET_GPIO_MASK, 0x04},
+			{0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
+			{0x01, AC_VERB_SET_GPIO_DATA, 0x00},
+			{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
+			{ }
+		}
+	}
 };
 
 static struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
-	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
-	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
+	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
 	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
 	{}
 };
 
@@ -15889,13 +15935,16 @@
 	return 0;
 }
 
-static int alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
-				hda_nid_t nid, unsigned int chs)
+static int __alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
+				  hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx,
+	return __add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
 }
 
+#define alc861_create_out_sw(codec, pfx, nid, chs) \
+	__alc861_create_out_sw(codec, pfx, nid, 0, chs)
+
 /* add playback controls from the parsed DAC table */
 static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec,
 					     const struct auto_pin_cfg *cfg)
@@ -15904,26 +15953,15 @@
 	static const char *chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid;
 	int i, err;
 
-	if (cfg->line_outs == 1) {
-		const char *pfx = NULL;
-		if (!cfg->hp_outs)
-			pfx = "Master";
-		else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-			pfx = "Speaker";
-		if (pfx) {
-			nid = spec->multiout.dac_nids[0];
-			return alc861_create_out_sw(codec, pfx, nid, 3);
-		}
-	}
-
 	for (i = 0; i < cfg->line_outs; i++) {
 		nid = spec->multiout.dac_nids[i];
 		if (!nid)
 			continue;
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = alc861_create_out_sw(codec, "Center", nid, 1);
 			if (err < 0)
@@ -15932,7 +15970,10 @@
 			if (err < 0)
 				return err;
 		} else {
-			err = alc861_create_out_sw(codec, chname[i], nid, 3);
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __alc861_create_out_sw(codec, name, nid, i, 3);
 			if (err < 0)
 				return err;
 		}
@@ -16404,8 +16445,8 @@
 static struct hda_input_mux alc861vd_dallas_capture_source = {
 	.num_items = 2,
 	.items = {
-		{ "Ext Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Mic", 0x0 },
+		{ "Internal Mic", 0x1 },
 	},
 };
 
@@ -16484,11 +16525,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16507,11 +16548,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16531,11 +16572,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16546,19 +16587,19 @@
 };
 
 /* Pin assignment: Speaker=0x14, HP = 0x15,
- *                 Ext Mic=0x18, Int Mic = 0x19, CD = 0x1c, PC Beep = 0x1d
+ *                 Mic=0x18, Internal Mic = 0x19, CD = 0x1c, PC Beep = 0x1d
  */
 static struct snd_kcontrol_new alc861vd_dallas_mixer[] = {
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Headphone Playback Switch", 0x0d, 2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -16723,18 +16764,6 @@
 	{}
 };
 
-static void alc861vd_lenovo_mic_automute(struct hda_codec *codec)
-{
-	unsigned int present;
-	unsigned char bits;
-
-	present = snd_hda_jack_detect(codec, 0x18);
-	bits = present ? HDA_AMP_MUTE : 0;
-
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1,
-				 HDA_AMP_MUTE, bits);
-}
-
 static void alc861vd_lenovo_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -16745,7 +16774,7 @@
 static void alc861vd_lenovo_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc861vd_lenovo_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc861vd_lenovo_unsol_event(struct hda_codec *codec,
@@ -16753,7 +16782,7 @@
 {
 	switch (res >> 26) {
 	case ALC880_MIC_EVENT:
-		alc861vd_lenovo_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -17043,12 +17072,13 @@
 #define alc861vd_idx_to_mixer_switch(nid)	((nid) + 0x0c)
 
 /* add playback controls from the parsed DAC table */
-/* Based on ALC880 version. But ALC861VD has separate,
+/* Based on ALC880 version. But ALC861VD and ALC887 have separate,
  * different NIDs for mute/unmute switch and volume control */
 static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
 					     const struct auto_pin_cfg *cfg)
 {
 	static const char *chname[4] = {"Front", "Surround", "CLFE", "Side"};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid_v, nid_s;
 	int i, err;
 
@@ -17062,7 +17092,7 @@
 				alc880_dac_to_idx(
 					spec->multiout.dac_nids[i]));
 
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
 					      "Center",
@@ -17089,24 +17119,17 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
-				if (!cfg->hp_pins)
-					pfx = "Speaker";
-				else
-					pfx = "PCM";
-			} else
-				pfx = chname[i];
-			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
+						name, i,
 					  HDA_COMPOSE_AMP_VAL(nid_v, 3, 0,
 							      HDA_OUTPUT));
 			if (err < 0)
 				return err;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx,
+			err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
+					       name, i,
 					  HDA_COMPOSE_AMP_VAL(nid_s, 3, 2,
 							      HDA_INPUT));
 			if (err < 0)
@@ -17570,13 +17593,13 @@
 	HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
 
-	HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("e-Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -17720,8 +17743,8 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -17732,8 +17755,8 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	{ } /* end */
@@ -18566,13 +18589,13 @@
 	HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
 
-	HDA_CODEC_VOLUME("e-Mic/LineIn Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("e-Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("e-Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic/LineIn Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -18583,13 +18606,13 @@
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -19094,20 +19117,24 @@
 	return 0;
 }
 
-static inline int alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
-			      hda_nid_t nid, unsigned int chs)
+static inline int __alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
+				       hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+	return __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
 }
 
-static inline int alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
-			     hda_nid_t nid, unsigned int chs)
+static inline int __alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
+				      hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx,
+	return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_INPUT));
 }
 
+#define alc662_add_vol_ctl(spec, pfx, nid, chs) \
+	__alc662_add_vol_ctl(spec, pfx, nid, 0, chs)
+#define alc662_add_sw_ctl(spec, pfx, nid, chs) \
+	__alc662_add_sw_ctl(spec, pfx, nid, 0, chs)
 #define alc662_add_stereo_vol(spec, pfx, nid) \
 	alc662_add_vol_ctl(spec, pfx, nid, 3)
 #define alc662_add_stereo_sw(spec, pfx, nid) \
@@ -19121,6 +19148,7 @@
 	static const char *chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid, mix;
 	int i, err;
 
@@ -19131,7 +19159,7 @@
 		mix = alc662_dac_to_mix(codec, cfg->line_out_pins[i], nid);
 		if (!mix)
 			continue;
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = alc662_add_vol_ctl(spec, "Center", nid, 1);
 			if (err < 0)
@@ -19146,22 +19174,13 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
-				if (cfg->hp_outs)
-					pfx = "Speaker";
-				else
-					pfx = "PCM";
-			} else
-				pfx = chname[i];
-			err = alc662_add_vol_ctl(spec, pfx, nid, 3);
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __alc662_add_vol_ctl(spec, name, nid, i, 3);
 			if (err < 0)
 				return err;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			err = alc662_add_sw_ctl(spec, pfx, mix, 3);
+			err = __alc662_add_sw_ctl(spec, name, mix, i, 3);
 			if (err < 0)
 				return err;
 		}
@@ -19358,9 +19377,21 @@
 		alc_inithook(codec);
 }
 
+static void alc272_fixup_mario(struct hda_codec *codec,
+			       const struct alc_fixup *fix, int pre_init) {
+	if (snd_hda_override_amp_caps(codec, 0x2, HDA_OUTPUT,
+				      (0x3b << AC_AMPCAP_OFFSET_SHIFT) |
+				      (0x3b << AC_AMPCAP_NUM_STEPS_SHIFT) |
+				      (0x03 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+				      (0 << AC_AMPCAP_MUTE_SHIFT)))
+		printk(KERN_WARNING
+		       "hda_codec: failed to override amp caps for NID 0x2\n");
+}
+
 enum {
 	ALC662_FIXUP_ASPIRE,
 	ALC662_FIXUP_IDEAPAD,
+	ALC272_FIXUP_MARIO,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
@@ -19376,6 +19407,9 @@
 			{ }
 		}
 	},
+	[ALC272_FIXUP_MARIO] = {
+		.func = alc272_fixup_mario,
+	}
 };
 
 static struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -19386,6 +19420,10 @@
 	{}
 };
 
+static const struct alc_model_fixup alc662_fixup_models[] = {
+	{.id = ALC272_FIXUP_MARIO, .name = "mario"},
+	{}
+};
 
 
 static int patch_alc662(struct hda_codec *codec)
@@ -19485,7 +19523,8 @@
 	codec->patch_ops = alc_patch_ops;
 	if (board_config == ALC662_AUTO) {
 		spec->init_hook = alc662_auto_init;
-		alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 0);
+		alc_pick_fixup_model(codec, alc662_fixup_models,
+				     alc662_fixup_tbl, alc662_fixups, 0);
 	}
 
 	alc_init_jacks(codec);
@@ -19612,9 +19651,9 @@
 	HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x12, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index f03b2ff..4ab019d 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -389,6 +389,9 @@
 	0x11, 0x20, 0
 };
 
+#define STAC92HD88XXX_NUM_DMICS	STAC92HD83XXX_NUM_DMICS
+#define stac92hd88xxx_dmic_nids	stac92hd83xxx_dmic_nids
+
 #define STAC92HD87B_NUM_DMICS	 1
 static hda_nid_t stac92hd87b_dmic_nids[STAC92HD87B_NUM_DMICS + 1] = {
 	0x11, 0
@@ -3591,7 +3594,7 @@
 		if (check_mic_pin(codec, spec->dmic_nids[i],
 		    &fixed, &ext, &dock))
 			return 0;
-	if (!fixed && !ext && !dock)
+	if (!fixed || (!ext && !dock))
 		return 0; /* no input to switch */
 	if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
 		return 0; /* no unsol support */
@@ -5422,7 +5425,7 @@
 	snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7ED, 0);
 	codec->no_trigger_sense = 1;
 	codec->spec = spec;
-	spec->linear_tone_beep = 1;
+	spec->linear_tone_beep = 0;
 	codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
 	spec->digbeep_nid = 0x21;
 	spec->dmic_nids = stac92hd83xxx_dmic_nids;
@@ -5462,15 +5465,21 @@
 		spec->num_dmics = stac92xx_connected_ports(codec,
 				stac92hd87b_dmic_nids,
 				STAC92HD87B_NUM_DMICS);
-		/* Fall through */
+		spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
+		spec->pin_nids = stac92hd88xxx_pin_nids;
+		spec->mono_nid = 0;
+		spec->num_pwrs = 0;
+		break;
 	case 0x111d7666:
 	case 0x111d7667:
 	case 0x111d7668:
 	case 0x111d7669:
+		spec->num_dmics = stac92xx_connected_ports(codec,
+				stac92hd88xxx_dmic_nids,
+				STAC92HD88XXX_NUM_DMICS);
 		spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
 		spec->pin_nids = stac92hd88xxx_pin_nids;
 		spec->mono_nid = 0;
-		spec->digbeep_nid = 0;
 		spec->num_pwrs = 0;
 		break;
 	case 0x111d7604:
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index d1c3f8d..7f4852a 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -263,8 +263,7 @@
 		return;
 	snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
 			    !spec->vt1708_jack_detectect);
-	cancel_delayed_work(&spec->vt1708_hp_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&spec->vt1708_hp_work);
 }
 
 
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index 712c171..7b62de0 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -96,6 +96,11 @@
 		tmp |= ICE1712_DELTA_AP_CCLK | ICE1712_DELTA_AP_CS_CODEC;
 		tmp &= ~ICE1712_DELTA_AP_CS_DIGITAL;
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		tmp |= ICE1712_DELTA_66E_CCLK | ICE1712_DELTA_66E_CS_CHIP_A |
+		       ICE1712_DELTA_66E_CS_CHIP_B;
+		tmp &= ~ICE1712_DELTA_66E_CS_CS8427;
+		break;
 	case ICE1712_SUBDEVICE_VX442:
 		tmp |= ICE1712_VX442_CCLK | ICE1712_VX442_CODEC_CHIP_A | ICE1712_VX442_CODEC_CHIP_B;
 		tmp &= ~ICE1712_VX442_CS_DIGITAL;
@@ -119,6 +124,9 @@
 	case ICE1712_SUBDEVICE_DELTA410:
 		tmp |= ICE1712_DELTA_AP_CS_DIGITAL;
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		tmp |= ICE1712_DELTA_66E_CS_CS8427;
+		break;
 	case ICE1712_SUBDEVICE_VX442:
 		tmp |= ICE1712_VX442_CS_DIGITAL;
 		break;
@@ -276,6 +284,20 @@
 }
 
 /*
+ * AK4524 on Delta66 rev E to choose the chip address
+ */
+static void delta66e_ak4524_lock(struct snd_akm4xxx *ak, int chip)
+{
+	struct snd_ak4xxx_private *priv = (void *)ak->private_value[0];
+	struct snd_ice1712 *ice = ak->private_data[0];
+
+	snd_ice1712_save_gpio_status(ice);
+	priv->cs_mask =
+	priv->cs_addr = chip == 0 ? ICE1712_DELTA_66E_CS_CHIP_A :
+				    ICE1712_DELTA_66E_CS_CHIP_B;
+}
+
+/*
  * AK4528 on VX442 to choose the chip mask
  */
 static void vx442_ak4524_lock(struct snd_akm4xxx *ak, int chip)
@@ -487,6 +509,29 @@
 	.mask_flags = 0,
 };
 
+static struct snd_akm4xxx akm_delta66e __devinitdata = {
+	.type = SND_AK4524,
+	.num_adcs = 4,
+	.num_dacs = 4,
+	.ops = {
+		.lock = delta66e_ak4524_lock,
+		.set_rate_val = delta_ak4524_set_rate_val
+	}
+};
+
+static struct snd_ak4xxx_private akm_delta66e_priv __devinitdata = {
+	.caddr = 2,
+	.cif = 0, /* the default level of the CIF pin from AK4524 */
+	.data_mask = ICE1712_DELTA_66E_DOUT,
+	.clk_mask = ICE1712_DELTA_66E_CCLK,
+	.cs_mask = 0,
+	.cs_addr = 0, /* set later */
+	.cs_none = 0,
+	.add_flags = 0,
+	.mask_flags = 0,
+};
+
+
 static struct snd_akm4xxx akm_delta44 __devinitdata = {
 	.type = SND_AK4524,
 	.num_adcs = 4,
@@ -644,9 +689,11 @@
 		err = snd_ice1712_akm4xxx_init(ak, &akm_delta44, &akm_delta44_priv, ice);
 		break;
 	case ICE1712_SUBDEVICE_VX442:
-	case ICE1712_SUBDEVICE_DELTA66E:
 		err = snd_ice1712_akm4xxx_init(ak, &akm_vx442, &akm_vx442_priv, ice);
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		err = snd_ice1712_akm4xxx_init(ak, &akm_delta66e, &akm_delta66e_priv, ice);
+		break;
 	default:
 		snd_BUG();
 		return -EINVAL;
diff --git a/sound/pci/ice1712/delta.h b/sound/pci/ice1712/delta.h
index 1a0ac6c..11a9c3a 100644
--- a/sound/pci/ice1712/delta.h
+++ b/sound/pci/ice1712/delta.h
@@ -144,6 +144,17 @@
 #define ICE1712_DELTA_1010LT_CS_NONE	0x50	/* nothing */
 #define ICE1712_DELTA_1010LT_WORDCLOCK 0x80	/* sample clock source: 0 = Word Clock Input, 1 = S/PDIF Input ??? */
 
+/* M-Audio Delta 66 rev. E definitions.
+ * Newer revisions of Delta 66 have CS8427 over SPI for
+ * S/PDIF transceiver instead of CS8404/CS8414. */
+/* 0x01 = DFS */
+#define ICE1712_DELTA_66E_CCLK		0x02	/* SPI clock */
+#define ICE1712_DELTA_66E_DIN		0x04	/* data input */
+#define ICE1712_DELTA_66E_DOUT		0x08	/* data output */
+#define ICE1712_DELTA_66E_CS_CS8427	0x10	/* chip select, low = CS8427 */
+#define ICE1712_DELTA_66E_CS_CHIP_A	0x20	/* AK4524 #0 */
+#define ICE1712_DELTA_66E_CS_CHIP_B	0x40	/* AK4524 #1 */
+
 /* Digigram VX442 definitions */
 #define ICE1712_VX442_CCLK		0x02	/* SPI clock */
 #define ICE1712_VX442_DIN		0x04	/* data input */
diff --git a/sound/pci/oxygen/Makefile b/sound/pci/oxygen/Makefile
index acd8f15..0f87265 100644
--- a/sound/pci/oxygen/Makefile
+++ b/sound/pci/oxygen/Makefile
@@ -1,10 +1,8 @@
 snd-oxygen-lib-objs := oxygen_io.o oxygen_lib.o oxygen_mixer.o oxygen_pcm.o
-snd-hifier-objs := hifier.o
-snd-oxygen-objs := oxygen.o
+snd-oxygen-objs := oxygen.o xonar_dg.o
 snd-virtuoso-objs := virtuoso.o xonar_lib.o \
 	xonar_pcm179x.o xonar_cs43xx.o xonar_wm87x6.o xonar_hdmi.o
 
 obj-$(CONFIG_SND_OXYGEN_LIB) += snd-oxygen-lib.o
-obj-$(CONFIG_SND_HIFIER) += snd-hifier.o
 obj-$(CONFIG_SND_OXYGEN) += snd-oxygen.o
 obj-$(CONFIG_SND_VIRTUOSO) += snd-virtuoso.o
diff --git a/sound/pci/oxygen/cs4245.h b/sound/pci/oxygen/cs4245.h
new file mode 100644
index 0000000..5e0197e
--- /dev/null
+++ b/sound/pci/oxygen/cs4245.h
@@ -0,0 +1,107 @@
+#define CS4245_CHIP_ID		0x01
+#define CS4245_POWER_CTRL	0x02
+#define CS4245_DAC_CTRL_1	0x03
+#define CS4245_ADC_CTRL		0x04
+#define CS4245_MCLK_FREQ	0x05
+#define CS4245_SIGNAL_SEL	0x06
+#define CS4245_PGA_B_CTRL	0x07
+#define CS4245_PGA_A_CTRL	0x08
+#define CS4245_ANALOG_IN	0x09
+#define CS4245_DAC_A_CTRL	0x0a
+#define CS4245_DAC_B_CTRL	0x0b
+#define CS4245_DAC_CTRL_2	0x0c
+#define CS4245_INT_STATUS	0x0d
+#define CS4245_INT_MASK		0x0e
+#define CS4245_INT_MODE_MSB	0x0f
+#define CS4245_INT_MODE_LSB	0x10
+
+/* Chip ID */
+#define CS4245_CHIP_PART_MASK	0xf0
+#define CS4245_CHIP_REV_MASK	0x0f
+
+/* Power Control */
+#define CS4245_FREEZE		0x80
+#define CS4245_PDN_MIC		0x08
+#define CS4245_PDN_ADC		0x04
+#define CS4245_PDN_DAC		0x02
+#define CS4245_PDN		0x01
+
+/* DAC Control */
+#define CS4245_DAC_FM_MASK	0xc0
+#define CS4245_DAC_FM_SINGLE	0x00
+#define CS4245_DAC_FM_DOUBLE	0x40
+#define CS4245_DAC_FM_QUAD	0x80
+#define CS4245_DAC_DIF_MASK	0x30
+#define CS4245_DAC_DIF_LJUST	0x00
+#define CS4245_DAC_DIF_I2S	0x10
+#define CS4245_DAC_DIF_RJUST_16	0x20
+#define CS4245_DAC_DIF_RJUST_24	0x30
+#define CS4245_RESERVED_1	0x08
+#define CS4245_MUTE_DAC		0x04
+#define CS4245_DEEMPH		0x02
+#define CS4245_DAC_MASTER	0x01
+
+/* ADC Control */
+#define CS4245_ADC_FM_MASK	0xc0
+#define CS4245_ADC_FM_SINGLE	0x00
+#define CS4245_ADC_FM_DOUBLE	0x40
+#define CS4245_ADC_FM_QUAD	0x80
+#define CS4245_ADC_DIF_MASK	0x10
+#define CS4245_ADC_DIF_LJUST	0x00
+#define CS4245_ADC_DIF_I2S	0x10
+#define CS4245_MUTE_ADC		0x04
+#define CS4245_HPF_FREEZE	0x02
+#define CS4245_ADC_MASTER	0x01
+
+/* MCLK Frequency */
+#define CS4245_MCLK1_MASK	0x70
+#define CS4245_MCLK1_SHIFT	4
+#define CS4245_MCLK2_MASK	0x07
+#define CS4245_MCLK2_SHIFT	0
+#define CS4245_MCLK_1		0
+#define CS4245_MCLK_1_5		1
+#define CS4245_MCLK_2		2
+#define CS4245_MCLK_3		3
+#define CS4245_MCLK_4		4
+
+/* Signal Selection */
+#define CS4245_A_OUT_SEL_MASK	0x60
+#define CS4245_A_OUT_SEL_HIZ	0x00
+#define CS4245_A_OUT_SEL_DAC	0x20
+#define CS4245_A_OUT_SEL_PGA	0x40
+#define CS4245_LOOP		0x02
+#define CS4245_ASYNCH		0x01
+
+/* Channel B/A PGA Control */
+#define CS4245_PGA_GAIN_MASK	0x3f
+
+/* ADC Input Control */
+#define CS4245_PGA_SOFT		0x10
+#define CS4245_PGA_ZERO		0x08
+#define CS4245_SEL_MASK		0x07
+#define CS4245_SEL_MIC		0x00
+#define CS4245_SEL_INPUT_1	0x01
+#define CS4245_SEL_INPUT_2	0x02
+#define CS4245_SEL_INPUT_3	0x03
+#define CS4245_SEL_INPUT_4	0x04
+#define CS4245_SEL_INPUT_5	0x05
+#define CS4245_SEL_INPUT_6	0x06
+
+/* DAC Channel A/B Volume Control */
+#define CS4245_VOL_MASK		0xff
+
+/* DAC Control 2 */
+#define CS4245_DAC_SOFT		0x80
+#define CS4245_DAC_ZERO		0x40
+#define CS4245_INVERT_DAC	0x20
+#define CS4245_INT_ACTIVE_HIGH	0x01
+
+/* Interrupt Status/Mask/Mode */
+#define CS4245_ADC_CLK_ERR	0x08
+#define CS4245_DAC_CLK_ERR	0x04
+#define CS4245_ADC_OVFL		0x02
+#define CS4245_ADC_UNDRFL	0x01
+
+
+#define CS4245_SPI_ADDRESS	(0x9e << 16)
+#define CS4245_SPI_WRITE	(0 << 16)
diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c
deleted file mode 100644
index 5a87d68..0000000
--- a/sound/pci/oxygen/hifier.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * C-Media CMI8788 driver for the MediaTek/TempoTec HiFier Fantasia
- *
- * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
- *
- *
- *  This driver is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License, version 2.
- *
- *  This driver is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this driver; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-/*
- * CMI8788:
- *
- * SPI 0 -> AK4396
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <sound/control.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/tlv.h>
-#include "oxygen.h"
-#include "ak4396.h"
-
-MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
-MODULE_DESCRIPTION("TempoTec HiFier driver");
-MODULE_LICENSE("GPL v2");
-
-static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
-
-module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "card index");
-module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string");
-module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "enable card");
-
-static DEFINE_PCI_DEVICE_TABLE(hifier_ids) = {
-	{ OXYGEN_PCI_SUBID(0x14c3, 0x1710) },
-	{ OXYGEN_PCI_SUBID(0x14c3, 0x1711) },
-	{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
-	{ }
-};
-MODULE_DEVICE_TABLE(pci, hifier_ids);
-
-struct hifier_data {
-	u8 ak4396_regs[5];
-};
-
-static void ak4396_write(struct oxygen *chip, u8 reg, u8 value)
-{
-	struct hifier_data *data = chip->model_data;
-
-	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER  |
-			 OXYGEN_SPI_DATA_LENGTH_2 |
-			 OXYGEN_SPI_CLOCK_160 |
-			 (0 << OXYGEN_SPI_CODEC_SHIFT) |
-			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
-			 AK4396_WRITE | (reg << 8) | value);
-	data->ak4396_regs[reg] = value;
-}
-
-static void ak4396_write_cached(struct oxygen *chip, u8 reg, u8 value)
-{
-	struct hifier_data *data = chip->model_data;
-
-	if (value != data->ak4396_regs[reg])
-		ak4396_write(chip, reg, value);
-}
-
-static void hifier_registers_init(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-
-	ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN);
-	ak4396_write(chip, AK4396_CONTROL_2,
-		     data->ak4396_regs[AK4396_CONTROL_2]);
-	ak4396_write(chip, AK4396_CONTROL_3, AK4396_PCM);
-	ak4396_write(chip, AK4396_LCH_ATT, chip->dac_volume[0]);
-	ak4396_write(chip, AK4396_RCH_ATT, chip->dac_volume[1]);
-}
-
-static void hifier_init(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-
-	data->ak4396_regs[AK4396_CONTROL_2] =
-		AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL;
-	hifier_registers_init(chip);
-
-	snd_component_add(chip->card, "AK4396");
-	snd_component_add(chip->card, "CS5340");
-}
-
-static void hifier_cleanup(struct oxygen *chip)
-{
-}
-
-static void hifier_resume(struct oxygen *chip)
-{
-	hifier_registers_init(chip);
-}
-
-static void set_ak4396_params(struct oxygen *chip,
-			       struct snd_pcm_hw_params *params)
-{
-	struct hifier_data *data = chip->model_data;
-	u8 value;
-
-	value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_DFS_MASK;
-	if (params_rate(params) <= 54000)
-		value |= AK4396_DFS_NORMAL;
-	else if (params_rate(params) <= 108000)
-		value |= AK4396_DFS_DOUBLE;
-	else
-		value |= AK4396_DFS_QUAD;
-
-	msleep(1); /* wait for the new MCLK to become stable */
-
-	if (value != data->ak4396_regs[AK4396_CONTROL_2]) {
-		ak4396_write(chip, AK4396_CONTROL_1,
-			     AK4396_DIF_24_MSB);
-		ak4396_write(chip, AK4396_CONTROL_2, value);
-		ak4396_write(chip, AK4396_CONTROL_1,
-			     AK4396_DIF_24_MSB | AK4396_RSTN);
-	}
-}
-
-static void update_ak4396_volume(struct oxygen *chip)
-{
-	ak4396_write_cached(chip, AK4396_LCH_ATT, chip->dac_volume[0]);
-	ak4396_write_cached(chip, AK4396_RCH_ATT, chip->dac_volume[1]);
-}
-
-static void update_ak4396_mute(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-	u8 value;
-
-	value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_SMUTE;
-	if (chip->dac_mute)
-		value |= AK4396_SMUTE;
-	ak4396_write_cached(chip, AK4396_CONTROL_2, value);
-}
-
-static void set_cs5340_params(struct oxygen *chip,
-			      struct snd_pcm_hw_params *params)
-{
-}
-
-static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
-
-static const struct oxygen_model model_hifier = {
-	.shortname = "C-Media CMI8787",
-	.longname = "C-Media Oxygen HD Audio",
-	.chip = "CMI8788",
-	.init = hifier_init,
-	.cleanup = hifier_cleanup,
-	.resume = hifier_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
-	.set_dac_params = set_ak4396_params,
-	.set_adc_params = set_cs5340_params,
-	.update_dac_volume = update_ak4396_volume,
-	.update_dac_mute = update_ak4396_mute,
-	.dac_tlv = ak4396_db_scale,
-	.model_data_size = sizeof(struct hifier_data),
-	.device_config = PLAYBACK_0_TO_I2S |
-			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_1,
-	.dac_channels = 2,
-	.dac_volume_min = 0,
-	.dac_volume_max = 255,
-	.function_flags = OXYGEN_FUNCTION_SPI,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-};
-
-static int __devinit get_hifier_model(struct oxygen *chip,
-				      const struct pci_device_id *id)
-{
-	chip->model = model_hifier;
-	return 0;
-}
-
-static int __devinit hifier_probe(struct pci_dev *pci,
-				  const struct pci_device_id *pci_id)
-{
-	static int dev;
-	int err;
-
-	if (dev >= SNDRV_CARDS)
-		return -ENODEV;
-	if (!enable[dev]) {
-		++dev;
-		return -ENOENT;
-	}
-	err = oxygen_pci_probe(pci, index[dev], id[dev], THIS_MODULE,
-			       hifier_ids, get_hifier_model);
-	if (err >= 0)
-		++dev;
-	return err;
-}
-
-static struct pci_driver hifier_driver = {
-	.name = "CMI8787HiFier",
-	.id_table = hifier_ids,
-	.probe = hifier_probe,
-	.remove = __devexit_p(oxygen_pci_remove),
-#ifdef CONFIG_PM
-	.suspend = oxygen_pci_suspend,
-	.resume = oxygen_pci_resume,
-#endif
-};
-
-static int __init alsa_card_hifier_init(void)
-{
-	return pci_register_driver(&hifier_driver);
-}
-
-static void __exit alsa_card_hifier_exit(void)
-{
-	pci_unregister_driver(&hifier_driver);
-}
-
-module_init(alsa_card_hifier_init)
-module_exit(alsa_card_hifier_exit)
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 98a8eb3..d7e8ddd 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -20,19 +20,32 @@
 /*
  * CMI8788:
  *
- * SPI 0 -> 1st AK4396 (front)
- * SPI 1 -> 2nd AK4396 (surround)
- * SPI 2 -> 3rd AK4396 (center/LFE)
- * SPI 3 -> WM8785
- * SPI 4 -> 4th AK4396 (back)
+ *   SPI 0 -> 1st AK4396 (front)
+ *   SPI 1 -> 2nd AK4396 (surround)
+ *   SPI 2 -> 3rd AK4396 (center/LFE)
+ *   SPI 3 -> WM8785
+ *   SPI 4 -> 4th AK4396 (back)
  *
- * GPIO 0 -> DFS0 of AK5385
- * GPIO 1 -> DFS1 of AK5385
- * GPIO 8 -> enable headphone amplifier on HT-Omega models
+ *   GPIO 0 -> DFS0 of AK5385
+ *   GPIO 1 -> DFS1 of AK5385
+ *
+ * X-Meridian models:
+ *   GPIO 4 -> enable extension S/PDIF input
+ *   GPIO 6 -> enable on-board S/PDIF input
+ *
+ * Claro models:
+ *   GPIO 6 -> S/PDIF from optical (0) or coaxial (1) input
+ *   GPIO 8 -> enable headphone amplifier
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to ADC input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   CD_IN  <- CD
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to ADC input
  */
 
 #include <linux/delay.h>
@@ -41,18 +54,22 @@
 #include <sound/ac97_codec.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/initval.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include "oxygen.h"
+#include "xonar_dg.h"
 #include "ak4396.h"
 #include "wm8785.h"
 
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("C-Media CMI8788 driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8788}}");
+MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8786}"
+			",{C-Media,CMI8787}"
+			",{C-Media,CMI8788}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
@@ -66,24 +83,46 @@
 MODULE_PARM_DESC(enable, "enable card");
 
 enum {
-	MODEL_CMEDIA_REF,	/* C-Media's reference design */
-	MODEL_MERIDIAN,		/* AuzenTech X-Meridian */
-	MODEL_CLARO,		/* HT-Omega Claro */
-	MODEL_CLARO_HALO,	/* HT-Omega Claro halo */
+	MODEL_CMEDIA_REF,
+	MODEL_MERIDIAN,
+	MODEL_MERIDIAN_2G,
+	MODEL_CLARO,
+	MODEL_CLARO_HALO,
+	MODEL_FANTASIA,
+	MODEL_SERENADE,
+	MODEL_2CH_OUTPUT,
+	MODEL_HG2PCI,
+	MODEL_XONAR_DG,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(oxygen_ids) = {
+	/* C-Media's reference design */
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0216), .driver_data = MODEL_CMEDIA_REF },
+	{ OXYGEN_PCI_SUBID(0x10b0, 0x0217), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0218), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0219), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x0001), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x0010), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x8788), .driver_data = MODEL_CMEDIA_REF },
-	{ OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x147a, 0xa017), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x1a58, 0x0910), .driver_data = MODEL_CMEDIA_REF },
+	/* Asus Xonar DG */
+	{ OXYGEN_PCI_SUBID(0x1043, 0x8467), .driver_data = MODEL_XONAR_DG },
+	/* PCI 2.0 HD Audio */
+	{ OXYGEN_PCI_SUBID(0x13f6, 0x8782), .driver_data = MODEL_2CH_OUTPUT },
+	/* Kuroutoshikou CMI8787-HG2PCI */
+	{ OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_HG2PCI },
+	/* TempoTec HiFier Fantasia */
+	{ OXYGEN_PCI_SUBID(0x14c3, 0x1710), .driver_data = MODEL_FANTASIA },
+	/* TempoTec HiFier Serenade */
+	{ OXYGEN_PCI_SUBID(0x14c3, 0x1711), .driver_data = MODEL_SERENADE },
+	/* AuzenTech X-Meridian */
 	{ OXYGEN_PCI_SUBID(0x415a, 0x5431), .driver_data = MODEL_MERIDIAN },
+	/* AuzenTech X-Meridian 2G */
+	{ OXYGEN_PCI_SUBID(0x5431, 0x017a), .driver_data = MODEL_MERIDIAN_2G },
+	/* HT-Omega Claro */
 	{ OXYGEN_PCI_SUBID(0x7284, 0x9761), .driver_data = MODEL_CLARO },
+	/* HT-Omega Claro halo */
 	{ OXYGEN_PCI_SUBID(0x7284, 0x9781), .driver_data = MODEL_CLARO_HALO },
 	{ }
 };
@@ -95,9 +134,15 @@
 #define GPIO_AK5385_DFS_DOUBLE	0x0001
 #define GPIO_AK5385_DFS_QUAD	0x0002
 
+#define GPIO_MERIDIAN_DIG_MASK	0x0050
+#define GPIO_MERIDIAN_DIG_EXT	0x0010
+#define GPIO_MERIDIAN_DIG_BOARD	0x0040
+
+#define GPIO_CLARO_DIG_COAX	0x0040
 #define GPIO_CLARO_HP		0x0100
 
 struct generic_data {
+	unsigned int dacs;
 	u8 ak4396_regs[4][5];
 	u16 wm8785_regs[3];
 };
@@ -148,7 +193,7 @@
 	struct generic_data *data = chip->model_data;
 	unsigned int i;
 
-	for (i = 0; i < 4; ++i) {
+	for (i = 0; i < data->dacs; ++i) {
 		ak4396_write(chip, i, AK4396_CONTROL_1,
 			     AK4396_DIF_24_MSB | AK4396_RSTN);
 		ak4396_write(chip, i, AK4396_CONTROL_2,
@@ -166,6 +211,7 @@
 {
 	struct generic_data *data = chip->model_data;
 
+	data->dacs = chip->model.dac_channels_pcm / 2;
 	data->ak4396_regs[0][AK4396_CONTROL_2] =
 		AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL;
 	ak4396_registers_init(chip);
@@ -207,6 +253,10 @@
 
 static void meridian_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_MERIDIAN_DIG_MASK);
+	oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+			      GPIO_MERIDIAN_DIG_BOARD, GPIO_MERIDIAN_DIG_MASK);
 	ak4396_init(chip);
 	ak5385_init(chip);
 }
@@ -220,6 +270,8 @@
 
 static void claro_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX);
 	ak4396_init(chip);
 	wm8785_init(chip);
 	claro_enable_hp(chip);
@@ -227,11 +279,24 @@
 
 static void claro_halo_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX);
 	ak4396_init(chip);
 	ak5385_init(chip);
 	claro_enable_hp(chip);
 }
 
+static void fantasia_init(struct oxygen *chip)
+{
+	ak4396_init(chip);
+	snd_component_add(chip->card, "CS5340");
+}
+
+static void stereo_output_init(struct oxygen *chip)
+{
+	ak4396_init(chip);
+}
+
 static void generic_cleanup(struct oxygen *chip)
 {
 }
@@ -268,6 +333,11 @@
 	claro_enable_hp(chip);
 }
 
+static void stereo_resume(struct oxygen *chip)
+{
+	ak4396_registers_init(chip);
+}
+
 static void set_ak4396_params(struct oxygen *chip,
 			      struct snd_pcm_hw_params *params)
 {
@@ -286,7 +356,7 @@
 	msleep(1); /* wait for the new MCLK to become stable */
 
 	if (value != data->ak4396_regs[0][AK4396_CONTROL_2]) {
-		for (i = 0; i < 4; ++i) {
+		for (i = 0; i < data->dacs; ++i) {
 			ak4396_write(chip, i, AK4396_CONTROL_1,
 				     AK4396_DIF_24_MSB);
 			ak4396_write(chip, i, AK4396_CONTROL_2, value);
@@ -298,9 +368,10 @@
 
 static void update_ak4396_volume(struct oxygen *chip)
 {
+	struct generic_data *data = chip->model_data;
 	unsigned int i;
 
-	for (i = 0; i < 4; ++i) {
+	for (i = 0; i < data->dacs; ++i) {
 		ak4396_write_cached(chip, i, AK4396_LCH_ATT,
 				    chip->dac_volume[i * 2]);
 		ak4396_write_cached(chip, i, AK4396_RCH_ATT,
@@ -317,7 +388,7 @@
 	value = data->ak4396_regs[0][AK4396_CONTROL_2] & ~AK4396_SMUTE;
 	if (chip->dac_mute)
 		value |= AK4396_SMUTE;
-	for (i = 0; i < 4; ++i)
+	for (i = 0; i < data->dacs; ++i)
 		ak4396_write_cached(chip, i, AK4396_CONTROL_2, value);
 }
 
@@ -356,6 +427,10 @@
 			      value, GPIO_AK5385_DFS_MASK);
 }
 
+static void set_no_params(struct oxygen *chip, struct snd_pcm_hw_params *params)
+{
+}
+
 static int rolloff_info(struct snd_kcontrol *ctl,
 			struct snd_ctl_elem_info *info)
 {
@@ -363,13 +438,7 @@
 		"Sharp Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -400,7 +469,7 @@
 		reg &= ~AK4396_SLOW;
 	changed = reg != data->ak4396_regs[0][AK4396_CONTROL_2];
 	if (changed) {
-		for (i = 0; i < 4; ++i)
+		for (i = 0; i < data->dacs; ++i)
 			ak4396_write(chip, i, AK4396_CONTROL_2, reg);
 	}
 	mutex_unlock(&chip->mutex);
@@ -421,13 +490,7 @@
 		"None", "High-pass Filter"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -466,6 +529,100 @@
 	.put = hpf_put,
 };
 
+static int meridian_dig_source_info(struct snd_kcontrol *ctl,
+				    struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "On-board", "Extension" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int claro_dig_source_info(struct snd_kcontrol *ctl,
+				 struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "Optical", "Coaxial" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int meridian_dig_source_get(struct snd_kcontrol *ctl,
+				   struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	value->value.enumerated.item[0] =
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) &
+		   GPIO_MERIDIAN_DIG_EXT);
+	return 0;
+}
+
+static int claro_dig_source_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	value->value.enumerated.item[0] =
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) &
+		   GPIO_CLARO_DIG_COAX);
+	return 0;
+}
+
+static int meridian_dig_source_put(struct snd_kcontrol *ctl,
+				   struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 old_reg, new_reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+	new_reg = old_reg & ~GPIO_MERIDIAN_DIG_MASK;
+	if (value->value.enumerated.item[0] == 0)
+		new_reg |= GPIO_MERIDIAN_DIG_BOARD;
+	else
+		new_reg |= GPIO_MERIDIAN_DIG_EXT;
+	changed = new_reg != old_reg;
+	if (changed)
+		oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int claro_dig_source_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 old_reg, new_reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+	new_reg = old_reg & ~GPIO_CLARO_DIG_COAX;
+	if (value->value.enumerated.item[0])
+		new_reg |= GPIO_CLARO_DIG_COAX;
+	changed = new_reg != old_reg;
+	if (changed)
+		oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static const struct snd_kcontrol_new meridian_dig_source_control = {
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "IEC958 Source Capture Enum",
+	.info = meridian_dig_source_info,
+	.get = meridian_dig_source_get,
+	.put = meridian_dig_source_put,
+};
+
+static const struct snd_kcontrol_new claro_dig_source_control = {
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "IEC958 Source Capture Enum",
+	.info = claro_dig_source_info,
+	.get = claro_dig_source_get,
+	.put = claro_dig_source_put,
+};
+
 static int generic_mixer_init(struct oxygen *chip)
 {
 	return snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip));
@@ -484,6 +641,81 @@
 	return 0;
 }
 
+static int meridian_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&meridian_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int claro_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_wm8785_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&claro_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int claro_halo_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&claro_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static void dump_ak4396_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct generic_data *data = chip->model_data;
+	unsigned int dac, i;
+
+	for (dac = 0; dac < data->dacs; ++dac) {
+		snd_iprintf(buffer, "\nAK4396 %u:", dac + 1);
+		for (i = 0; i < 5; ++i)
+			snd_iprintf(buffer, " %02x", data->ak4396_regs[dac][i]);
+	}
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_wm8785_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct generic_data *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nWM8785:");
+	for (i = 0; i < 3; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8785_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_oxygen_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	dump_ak4396_registers(chip, buffer);
+	dump_wm8785_registers(chip, buffer);
+}
+
 static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
 
 static const struct oxygen_model model_generic = {
@@ -494,11 +726,11 @@
 	.mixer_init = generic_wm8785_mixer_init,
 	.cleanup = generic_cleanup,
 	.resume = generic_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_ak4396_params,
 	.set_adc_params = set_wm8785_params,
 	.update_dac_volume = update_ak4396_volume,
 	.update_dac_mute = update_ak4396_mute,
+	.dump_registers = dump_oxygen_registers,
 	.dac_tlv = ak4396_db_scale,
 	.model_data_size = sizeof(struct generic_data),
 	.device_config = PLAYBACK_0_TO_I2S |
@@ -508,11 +740,14 @@
 			 CAPTURE_1_FROM_SPDIF |
 			 CAPTURE_2_FROM_AC97_1 |
 			 AC97_CD_INPUT,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 0,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_SPI |
 			  OXYGEN_FUNCTION_ENABLE_SPI_4_5,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
@@ -520,42 +755,87 @@
 static int __devinit get_oxygen_model(struct oxygen *chip,
 				      const struct pci_device_id *id)
 {
+	static const char *const names[] = {
+		[MODEL_MERIDIAN]	= "AuzenTech X-Meridian",
+		[MODEL_MERIDIAN_2G]	= "AuzenTech X-Meridian 2G",
+		[MODEL_CLARO]		= "HT-Omega Claro",
+		[MODEL_CLARO_HALO]	= "HT-Omega Claro halo",
+		[MODEL_FANTASIA]	= "TempoTec HiFier Fantasia",
+		[MODEL_SERENADE]	= "TempoTec HiFier Serenade",
+		[MODEL_HG2PCI]		= "CMI8787-HG2PCI",
+	};
+
 	chip->model = model_generic;
 	switch (id->driver_data) {
 	case MODEL_MERIDIAN:
+	case MODEL_MERIDIAN_2G:
 		chip->model.init = meridian_init;
-		chip->model.mixer_init = generic_mixer_init;
+		chip->model.mixer_init = meridian_mixer_init;
 		chip->model.resume = meridian_resume;
 		chip->model.set_adc_params = set_ak5385_params;
+		chip->model.dump_registers = dump_ak4396_registers;
 		chip->model.device_config = PLAYBACK_0_TO_I2S |
 					    PLAYBACK_1_TO_SPDIF |
 					    CAPTURE_0_FROM_I2S_2 |
 					    CAPTURE_1_FROM_SPDIF;
+		if (id->driver_data == MODEL_MERIDIAN)
+			chip->model.device_config |= AC97_CD_INPUT;
 		break;
 	case MODEL_CLARO:
 		chip->model.init = claro_init;
+		chip->model.mixer_init = claro_mixer_init;
 		chip->model.cleanup = claro_cleanup;
 		chip->model.suspend = claro_suspend;
 		chip->model.resume = claro_resume;
 		break;
 	case MODEL_CLARO_HALO:
 		chip->model.init = claro_halo_init;
-		chip->model.mixer_init = generic_mixer_init;
+		chip->model.mixer_init = claro_halo_mixer_init;
 		chip->model.cleanup = claro_cleanup;
 		chip->model.suspend = claro_suspend;
 		chip->model.resume = claro_resume;
 		chip->model.set_adc_params = set_ak5385_params;
+		chip->model.dump_registers = dump_ak4396_registers;
 		chip->model.device_config = PLAYBACK_0_TO_I2S |
 					    PLAYBACK_1_TO_SPDIF |
 					    CAPTURE_0_FROM_I2S_2 |
 					    CAPTURE_1_FROM_SPDIF;
 		break;
+	case MODEL_FANTASIA:
+	case MODEL_SERENADE:
+	case MODEL_2CH_OUTPUT:
+	case MODEL_HG2PCI:
+		chip->model.shortname = "C-Media CMI8787";
+		chip->model.chip = "CMI8787";
+		if (id->driver_data == MODEL_FANTASIA)
+			chip->model.init = fantasia_init;
+		else
+			chip->model.init = stereo_output_init;
+		chip->model.resume = stereo_resume;
+		chip->model.mixer_init = generic_mixer_init;
+		chip->model.set_adc_params = set_no_params;
+		chip->model.dump_registers = dump_ak4396_registers;
+		chip->model.device_config = PLAYBACK_0_TO_I2S |
+					    PLAYBACK_1_TO_SPDIF;
+		if (id->driver_data == MODEL_FANTASIA) {
+			chip->model.device_config |= CAPTURE_0_FROM_I2S_1;
+			chip->model.adc_mclks = OXYGEN_MCLKS(256, 128, 128);
+		}
+		chip->model.dac_channels_pcm = 2;
+		chip->model.dac_channels_mixer = 2;
+		break;
+	case MODEL_XONAR_DG:
+		chip->model = model_xonar_dg;
+		break;
 	}
 	if (id->driver_data == MODEL_MERIDIAN ||
+	    id->driver_data == MODEL_MERIDIAN_2G ||
 	    id->driver_data == MODEL_CLARO_HALO) {
 		chip->model.misc_flags = OXYGEN_MISC_MIDI;
 		chip->model.device_config |= MIDI_OUTPUT | MIDI_INPUT;
 	}
+	if (id->driver_data < ARRAY_SIZE(names) && names[id->driver_data])
+		chip->model.shortname = names[id->driver_data];
 	return 0;
 }
 
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 7d5222c..c2ae63d 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -16,6 +16,10 @@
 #define PCM_AC97	5
 #define PCM_COUNT	6
 
+#define OXYGEN_MCLKS(f_single, f_double, f_quad) ((MCLK_##f_single << 0) | \
+						  (MCLK_##f_double << 2) | \
+						  (MCLK_##f_quad   << 4))
+
 #define OXYGEN_IO_SIZE	0x100
 
 #define OXYGEN_EEPROM_ID	0x434d	/* "CM" */
@@ -35,6 +39,7 @@
 #define MIDI_OUTPUT		0x0800
 #define MIDI_INPUT		0x1000
 #define AC97_CD_INPUT		0x2000
+#define AC97_FMIC_SWITCH	0x4000
 
 enum {
 	CONTROL_SPDIF_PCM,
@@ -65,6 +70,7 @@
 struct snd_pcm_hw_params;
 struct snd_kcontrol_new;
 struct snd_rawmidi;
+struct snd_info_buffer;
 struct oxygen;
 
 struct oxygen_model {
@@ -79,8 +85,6 @@
 	void (*resume)(struct oxygen *chip);
 	void (*pcm_hardware_filter)(unsigned int channel,
 				    struct snd_pcm_hardware *hardware);
-	unsigned int (*get_i2s_mclk)(struct oxygen *chip, unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params);
 	void (*set_dac_params)(struct oxygen *chip,
 			       struct snd_pcm_hw_params *params);
 	void (*set_adc_params)(struct oxygen *chip,
@@ -92,15 +96,19 @@
 	void (*uart_input)(struct oxygen *chip);
 	void (*ac97_switch)(struct oxygen *chip,
 			    unsigned int reg, unsigned int mute);
+	void (*dump_registers)(struct oxygen *chip,
+			       struct snd_info_buffer *buffer);
 	const unsigned int *dac_tlv;
-	unsigned long private_data;
 	size_t model_data_size;
 	unsigned int device_config;
-	u8 dac_channels;
+	u8 dac_channels_pcm;
+	u8 dac_channels_mixer;
 	u8 dac_volume_min;
 	u8 dac_volume_max;
 	u8 misc_flags;
 	u8 function_flags;
+	u8 dac_mclks;
+	u8 adc_mclks;
 	u16 dac_i2s_format;
 	u16 adc_i2s_format;
 };
@@ -121,7 +129,6 @@
 	u8 pcm_running;
 	u8 dac_routing;
 	u8 spdif_playback_enable;
-	u8 revision;
 	u8 has_ac97_0;
 	u8 has_ac97_1;
 	u32 spdif_bits;
@@ -167,8 +174,6 @@
 /* oxygen_pcm.c */
 
 int oxygen_pcm_init(struct oxygen *chip);
-unsigned int oxygen_default_i2s_mclk(struct oxygen *chip, unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params);
 
 /* oxygen_io.c */
 
diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
index 09b2b2a..f5164b1 100644
--- a/sound/pci/oxygen/oxygen_io.c
+++ b/sound/pci/oxygen/oxygen_io.c
@@ -197,11 +197,11 @@
 {
 	unsigned int count;
 
-	/* should not need more than 7.68 us (24 * 320 ns) */
+	/* should not need more than 30.72 us (24 * 1.28 us) */
 	count = 10;
 	while ((oxygen_read8(chip, OXYGEN_SPI_CONTROL) & OXYGEN_SPI_BUSY)
 	       && count > 0) {
-		udelay(1);
+		udelay(4);
 		--count;
 	}
 
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index e5ebe56..70b7398 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -202,7 +202,13 @@
 	struct oxygen *chip = entry->private_data;
 	int i, j;
 
-	snd_iprintf(buffer, "CMI8788\n\n");
+	switch (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_PACKAGE_ID_MASK) {
+	case OXYGEN_PACKAGE_ID_8786: i = '6'; break;
+	case OXYGEN_PACKAGE_ID_8787: i = '7'; break;
+	case OXYGEN_PACKAGE_ID_8788: i = '8'; break;
+	default:                     i = '?'; break;
+	}
+	snd_iprintf(buffer, "CMI878%c:\n", i);
 	for (i = 0; i < OXYGEN_IO_SIZE; i += 0x10) {
 		snd_iprintf(buffer, "%02x:", i);
 		for (j = 0; j < 0x10; ++j)
@@ -212,7 +218,7 @@
 	if (mutex_lock_interruptible(&chip->mutex) < 0)
 		return;
 	if (chip->has_ac97_0) {
-		snd_iprintf(buffer, "\nAC97\n");
+		snd_iprintf(buffer, "\nAC97:\n");
 		for (i = 0; i < 0x80; i += 0x10) {
 			snd_iprintf(buffer, "%02x:", i);
 			for (j = 0; j < 0x10; j += 2)
@@ -222,7 +228,7 @@
 		}
 	}
 	if (chip->has_ac97_1) {
-		snd_iprintf(buffer, "\nAC97 2\n");
+		snd_iprintf(buffer, "\nAC97 2:\n");
 		for (i = 0; i < 0x80; i += 0x10) {
 			snd_iprintf(buffer, "%02x:", i);
 			for (j = 0; j < 0x10; j += 2)
@@ -232,13 +238,15 @@
 		}
 	}
 	mutex_unlock(&chip->mutex);
+	if (chip->model.dump_registers)
+		chip->model.dump_registers(chip, buffer);
 }
 
 static void oxygen_proc_init(struct oxygen *chip)
 {
 	struct snd_info_entry *entry;
 
-	if (!snd_card_proc_new(chip->card, "cmi8788", &entry))
+	if (!snd_card_proc_new(chip->card, "oxygen", &entry))
 		snd_info_set_text_ops(entry, chip, oxygen_proc_read);
 }
 #else
@@ -262,7 +270,7 @@
 	 */
 	subdevice = oxygen_read_eeprom(chip, 2);
 	/* use default ID if EEPROM is missing */
-	if (subdevice == 0xffff)
+	if (subdevice == 0xffff && oxygen_read_eeprom(chip, 1) == 0xffff)
 		subdevice = 0x8788;
 	/*
 	 * We use only the subsystem device ID for searching because it is
@@ -364,12 +372,7 @@
 		(IEC958_AES1_CON_PCM_CODER << OXYGEN_SPDIF_CATEGORY_SHIFT);
 	chip->spdif_pcm_bits = chip->spdif_bits;
 
-	if (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2)
-		chip->revision = 2;
-	else
-		chip->revision = 1;
-
-	if (chip->revision == 1)
+	if (!(oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2))
 		oxygen_set_bits8(chip, OXYGEN_MISC,
 				 OXYGEN_MISC_PCI_MEM_W_1_CLOCK);
 
@@ -406,28 +409,40 @@
 		      (OXYGEN_FORMAT_16 << OXYGEN_MULTICH_FORMAT_SHIFT));
 	oxygen_write8(chip, OXYGEN_REC_CHANNELS, OXYGEN_REC_CHANNELS_2_2_2);
 	oxygen_write16(chip, OXYGEN_I2S_MULTICH_FORMAT,
-		       OXYGEN_RATE_48000 | chip->model.dac_i2s_format |
-		       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+		       OXYGEN_RATE_48000 |
+		       chip->model.dac_i2s_format |
+		       OXYGEN_I2S_MCLK(chip->model.dac_mclks) |
+		       OXYGEN_I2S_BITS_16 |
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_BCLK_64);
 	if (chip->model.device_config & CAPTURE_0_FROM_I2S_1)
 		oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-			       OXYGEN_RATE_48000 | chip->model.adc_i2s_format |
-			       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+			       OXYGEN_RATE_48000 |
+			       chip->model.adc_i2s_format |
+			       OXYGEN_I2S_MCLK(chip->model.adc_mclks) |
+			       OXYGEN_I2S_BITS_16 |
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_BCLK_64);
 	else
 		oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_MUTE_MCLK);
 	if (chip->model.device_config & (CAPTURE_0_FROM_I2S_2 |
 					 CAPTURE_2_FROM_I2S_2))
 		oxygen_write16(chip, OXYGEN_I2S_B_FORMAT,
-			       OXYGEN_RATE_48000 | chip->model.adc_i2s_format |
-			       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+			       OXYGEN_RATE_48000 |
+			       chip->model.adc_i2s_format |
+			       OXYGEN_I2S_MCLK(chip->model.adc_mclks) |
+			       OXYGEN_I2S_BITS_16 |
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_BCLK_64);
 	else
 		oxygen_write16(chip, OXYGEN_I2S_B_FORMAT,
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_MUTE_MCLK);
 	oxygen_write16(chip, OXYGEN_I2S_C_FORMAT,
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_MUTE_MCLK);
 	oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
 			    OXYGEN_SPDIF_OUT_ENABLE |
 			    OXYGEN_SPDIF_LOOPBACK);
@@ -557,7 +572,8 @@
 	oxygen_shutdown(chip);
 	if (chip->irq >= 0)
 		free_irq(chip->irq, chip);
-	flush_scheduled_work();
+	flush_work_sync(&chip->spdif_input_bits_work);
+	flush_work_sync(&chip->gpio_work);
 	chip->model.cleanup(chip);
 	kfree(chip->model_data);
 	mutex_destroy(&chip->mutex);
@@ -648,8 +664,8 @@
 
 	strcpy(card->driver, chip->model.chip);
 	strcpy(card->shortname, chip->model.shortname);
-	sprintf(card->longname, "%s (rev %u) at %#lx, irq %i",
-		chip->model.longname, chip->revision, chip->addr, chip->irq);
+	sprintf(card->longname, "%s at %#lx, irq %i",
+		chip->model.longname, chip->addr, chip->irq);
 	strcpy(card->mixername, chip->model.chip);
 	snd_component_add(card, chip->model.chip);
 
@@ -733,7 +749,8 @@
 	spin_unlock_irq(&chip->reg_lock);
 
 	synchronize_irq(chip->irq);
-	flush_scheduled_work();
+	flush_work_sync(&chip->spdif_input_bits_work);
+	flush_work_sync(&chip->gpio_work);
 	chip->interrupt_mask = saved_interrupt_mask;
 
 	pci_disable_device(pci);
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 2849b36..9bff14d 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -31,7 +31,7 @@
 	struct oxygen *chip = ctl->private_data;
 
 	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-	info->count = chip->model.dac_channels;
+	info->count = chip->model.dac_channels_mixer;
 	info->value.integer.min = chip->model.dac_volume_min;
 	info->value.integer.max = chip->model.dac_volume_max;
 	return 0;
@@ -44,7 +44,7 @@
 	unsigned int i;
 
 	mutex_lock(&chip->mutex);
-	for (i = 0; i < chip->model.dac_channels; ++i)
+	for (i = 0; i < chip->model.dac_channels_mixer; ++i)
 		value->value.integer.value[i] = chip->dac_volume[i];
 	mutex_unlock(&chip->mutex);
 	return 0;
@@ -59,7 +59,7 @@
 
 	changed = 0;
 	mutex_lock(&chip->mutex);
-	for (i = 0; i < chip->model.dac_channels; ++i)
+	for (i = 0; i < chip->model.dac_channels_mixer; ++i)
 		if (value->value.integer.value[i] != chip->dac_volume[i]) {
 			chip->dac_volume[i] = value->value.integer.value[i];
 			changed = 1;
@@ -97,6 +97,16 @@
 	return changed;
 }
 
+static unsigned int upmix_item_count(struct oxygen *chip)
+{
+	if (chip->model.dac_channels_pcm < 8)
+		return 2;
+	else if (chip->model.update_center_lfe_mix)
+		return 5;
+	else
+		return 3;
+}
+
 static int upmix_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
 {
 	static const char *const names[5] = {
@@ -107,15 +117,9 @@
 		"Front+Surround+Center/LFE+Back",
 	};
 	struct oxygen *chip = ctl->private_data;
-	unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3;
+	unsigned int count = upmix_item_count(chip);
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = count;
-	if (info->value.enumerated.item >= count)
-		info->value.enumerated.item = count - 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, count, names);
 }
 
 static int upmix_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -188,7 +192,7 @@
 static int upmix_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
-	unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3;
+	unsigned int count = upmix_item_count(chip);
 	int changed;
 
 	if (value->value.enumerated.item[0] >= count)
@@ -430,30 +434,31 @@
 	return 0;
 }
 
-static int spdif_loopback_get(struct snd_kcontrol *ctl,
-			      struct snd_ctl_elem_value *value)
+static int spdif_bit_switch_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
+	u32 bit = ctl->private_value;
 
 	value->value.integer.value[0] =
-		!!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL)
-		   & OXYGEN_SPDIF_LOOPBACK);
+		!!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL) & bit);
 	return 0;
 }
 
-static int spdif_loopback_put(struct snd_kcontrol *ctl,
-			      struct snd_ctl_elem_value *value)
+static int spdif_bit_switch_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
+	u32 bit = ctl->private_value;
 	u32 oldreg, newreg;
 	int changed;
 
 	spin_lock_irq(&chip->reg_lock);
 	oldreg = oxygen_read32(chip, OXYGEN_SPDIF_CONTROL);
 	if (value->value.integer.value[0])
-		newreg = oldreg | OXYGEN_SPDIF_LOOPBACK;
+		newreg = oldreg | bit;
 	else
-		newreg = oldreg & ~OXYGEN_SPDIF_LOOPBACK;
+		newreg = oldreg & ~bit;
 	changed = newreg != oldreg;
 	if (changed)
 		oxygen_write32(chip, OXYGEN_SPDIF_CONTROL, newreg);
@@ -644,6 +649,46 @@
 	return change;
 }
 
+static int mic_fmic_source_info(struct snd_kcontrol *ctl,
+			   struct snd_ctl_elem_info *info)
+{
+	static const char *const names[] = { "Mic Jack", "Front Panel" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int mic_fmic_source_get(struct snd_kcontrol *ctl,
+			       struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] =
+		!!(oxygen_read_ac97(chip, 0, CM9780_JACK) & CM9780_FMIC2MIC);
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int mic_fmic_source_put(struct snd_kcontrol *ctl,
+			       struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 oldreg, newreg;
+	int change;
+
+	mutex_lock(&chip->mutex);
+	oldreg = oxygen_read_ac97(chip, 0, CM9780_JACK);
+	if (value->value.enumerated.item[0])
+		newreg = oldreg | CM9780_FMIC2MIC;
+	else
+		newreg = oldreg & ~CM9780_FMIC2MIC;
+	change = newreg != oldreg;
+	if (change)
+		oxygen_write_ac97(chip, 0, CM9780_JACK, newreg);
+	mutex_unlock(&chip->mutex);
+	return change;
+}
+
 static int ac97_fp_rec_volume_info(struct snd_kcontrol *ctl,
 				   struct snd_ctl_elem_info *info)
 {
@@ -791,8 +836,17 @@
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = SNDRV_CTL_NAME_IEC958("Loopback ", NONE, SWITCH),
 		.info = snd_ctl_boolean_mono_info,
-		.get = spdif_loopback_get,
-		.put = spdif_loopback_put,
+		.get = spdif_bit_switch_get,
+		.put = spdif_bit_switch_put,
+		.private_value = OXYGEN_SPDIF_LOOPBACK,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = SNDRV_CTL_NAME_IEC958("Validity Check ",CAPTURE,SWITCH),
+		.info = snd_ctl_boolean_mono_info,
+		.get = spdif_bit_switch_get,
+		.put = spdif_bit_switch_put,
+		.private_value = OXYGEN_SPDIF_SPDVALID,
 	},
 };
 
@@ -908,6 +962,13 @@
 	AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0),
 	AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1),
 	AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Mic Source Capture Enum",
+		.info = mic_fmic_source_info,
+		.get = mic_fmic_source_get,
+		.put = mic_fmic_source_put,
+	},
 	AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1),
 	AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1),
 	AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1),
@@ -970,7 +1031,10 @@
 				continue;
 		}
 		if (!strcmp(template.name, "Stereo Upmixing") &&
-		    chip->model.dac_channels == 2)
+		    chip->model.dac_channels_pcm == 2)
+			continue;
+		if (!strcmp(template.name, "Mic Source Capture Enum") &&
+		    !(chip->model.device_config & AC97_FMIC_SWITCH))
 			continue;
 		if (!strncmp(template.name, "CD Capture ", 11) &&
 		    !(chip->model.device_config & AC97_CD_INPUT))
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index 8146674..d5533e3 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -39,7 +39,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE |
 		   SNDRV_PCM_FMTBIT_S32_LE,
 	.rates = SNDRV_PCM_RATE_32000 |
@@ -65,7 +66,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE |
 		   SNDRV_PCM_FMTBIT_S32_LE,
 	.rates = SNDRV_PCM_RATE_32000 |
@@ -91,7 +93,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	.rates = SNDRV_PCM_RATE_48000,
 	.rate_min = 48000,
@@ -140,7 +143,7 @@
 		runtime->hw.rate_min = 44100;
 		break;
 	case PCM_MULTICH:
-		runtime->hw.channels_max = chip->model.dac_channels;
+		runtime->hw.channels_max = chip->model.dac_channels_pcm;
 		break;
 	}
 	if (chip->model.pcm_hardware_filter)
@@ -271,17 +274,6 @@
 	}
 }
 
-unsigned int oxygen_default_i2s_mclk(struct oxygen *chip,
-				     unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params)
-{
-	if (params_rate(hw_params) <= 96000)
-		return OXYGEN_I2S_MCLK_256;
-	else
-		return OXYGEN_I2S_MCLK_128;
-}
-EXPORT_SYMBOL(oxygen_default_i2s_mclk);
-
 static unsigned int oxygen_i2s_bits(struct snd_pcm_hw_params *hw_params)
 {
 	if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE)
@@ -341,6 +333,26 @@
 	return 0;
 }
 
+static u16 get_mclk(struct oxygen *chip, unsigned int channel,
+		    struct snd_pcm_hw_params *params)
+{
+	unsigned int mclks, shift;
+
+	if (channel == PCM_MULTICH)
+		mclks = chip->model.dac_mclks;
+	else
+		mclks = chip->model.adc_mclks;
+
+	if (params_rate(params) <= 48000)
+		shift = 0;
+	else if (params_rate(params) <= 96000)
+		shift = 2;
+	else
+		shift = 4;
+
+	return OXYGEN_I2S_MCLK(mclks >> shift);
+}
+
 static int oxygen_rec_a_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
@@ -357,8 +369,8 @@
 			     OXYGEN_REC_FORMAT_A_MASK);
 	oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT,
 			      oxygen_rate(hw_params) |
-			      chip->model.get_i2s_mclk(chip, PCM_A, hw_params) |
 			      chip->model.adc_i2s_format |
+			      get_mclk(chip, PCM_A, hw_params) |
 			      oxygen_i2s_bits(hw_params),
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
@@ -393,9 +405,8 @@
 	if (!is_ac97)
 		oxygen_write16_masked(chip, OXYGEN_I2S_B_FORMAT,
 				      oxygen_rate(hw_params) |
-				      chip->model.get_i2s_mclk(chip, PCM_B,
-							       hw_params) |
 				      chip->model.adc_i2s_format |
+				      get_mclk(chip, PCM_B, hw_params) |
 				      oxygen_i2s_bits(hw_params),
 				      OXYGEN_I2S_RATE_MASK |
 				      OXYGEN_I2S_FORMAT_MASK |
@@ -476,8 +487,7 @@
 	oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
 			      oxygen_rate(hw_params) |
 			      chip->model.dac_i2s_format |
-			      chip->model.get_i2s_mclk(chip, PCM_MULTICH,
-						       hw_params) |
+			      get_mclk(chip, PCM_MULTICH, hw_params) |
 			      oxygen_i2s_bits(hw_params),
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
@@ -530,7 +540,10 @@
 	oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
 	oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
 
-	chip->interrupt_mask |= channel_mask;
+	if (substream->runtime->no_period_wakeup)
+		chip->interrupt_mask &= ~channel_mask;
+	else
+		chip->interrupt_mask |= channel_mask;
 	oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
 	spin_unlock_irq(&chip->reg_lock);
 	return 0;
diff --git a/sound/pci/oxygen/oxygen_regs.h b/sound/pci/oxygen/oxygen_regs.h
index 4dcd41b..63dc7a0 100644
--- a/sound/pci/oxygen/oxygen_regs.h
+++ b/sound/pci/oxygen/oxygen_regs.h
@@ -139,9 +139,11 @@
 #define  OXYGEN_I2S_FORMAT_I2S		0x0000
 #define  OXYGEN_I2S_FORMAT_LJUST	0x0008
 #define  OXYGEN_I2S_MCLK_MASK		0x0030	/* MCLK/LRCK */
-#define  OXYGEN_I2S_MCLK_128		0x0000
-#define  OXYGEN_I2S_MCLK_256		0x0010
-#define  OXYGEN_I2S_MCLK_512		0x0020
+#define  OXYGEN_I2S_MCLK_SHIFT		4
+#define  MCLK_128			0
+#define  MCLK_256			1
+#define  MCLK_512			2
+#define  OXYGEN_I2S_MCLK(f)		(((f) & 3) << OXYGEN_I2S_MCLK_SHIFT)
 #define  OXYGEN_I2S_BITS_MASK		0x00c0
 #define  OXYGEN_I2S_BITS_16		0x0000
 #define  OXYGEN_I2S_BITS_20		0x0040
@@ -238,11 +240,11 @@
 #define  OXYGEN_SPI_DATA_LENGTH_MASK	0x02
 #define  OXYGEN_SPI_DATA_LENGTH_2	0x00
 #define  OXYGEN_SPI_DATA_LENGTH_3	0x02
-#define  OXYGEN_SPI_CLOCK_MASK		0xc0
+#define  OXYGEN_SPI_CLOCK_MASK		0x0c
 #define  OXYGEN_SPI_CLOCK_160		0x00	/* ns */
-#define  OXYGEN_SPI_CLOCK_320		0x40
-#define  OXYGEN_SPI_CLOCK_640		0x80
-#define  OXYGEN_SPI_CLOCK_1280		0xc0
+#define  OXYGEN_SPI_CLOCK_320		0x04
+#define  OXYGEN_SPI_CLOCK_640		0x08
+#define  OXYGEN_SPI_CLOCK_1280		0x0c
 #define  OXYGEN_SPI_CODEC_MASK		0x70	/* 0..5 */
 #define  OXYGEN_SPI_CODEC_SHIFT		4
 #define  OXYGEN_SPI_CEN_MASK		0x80
diff --git a/sound/pci/oxygen/xonar.h b/sound/pci/oxygen/xonar.h
index b35343b..0434c20 100644
--- a/sound/pci/oxygen/xonar.h
+++ b/sound/pci/oxygen/xonar.h
@@ -24,6 +24,8 @@
 void xonar_init_cs53x1(struct oxygen *chip);
 void xonar_set_cs53x1_params(struct oxygen *chip,
 			     struct snd_pcm_hw_params *params);
+
+#define XONAR_GPIO_BIT_INVERT	(1 << 16)
 int xonar_gpio_bit_switch_get(struct snd_kcontrol *ctl,
 			      struct snd_ctl_elem_value *value);
 int xonar_gpio_bit_switch_put(struct snd_kcontrol *ctl,
diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c
index aa27c31..9f72d42 100644
--- a/sound/pci/oxygen/xonar_cs43xx.c
+++ b/sound/pci/oxygen/xonar_cs43xx.c
@@ -22,29 +22,28 @@
  *
  * CMI8788:
  *
- * I²C <-> CS4398 (front)
- *     <-> CS4362A (surround, center/LFE, back)
+ *   I²C <-> CS4398 (addr 1001111) (front)
+ *       <-> CS4362A (addr 0011000) (surround, center/LFE, back)
  *
- * GPI 0 <- external power present (DX only)
+ *   GPI 0 <- external power present (DX only)
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 1 -> enable front panel I/O
- * GPIO 2 -> M0 of CS5361
- * GPIO 3 -> M1 of CS5361
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
- *
- * CS4398:
- *
- * AD0 <- 1
- * AD1 <- 1
- *
- * CS4362A:
- *
- * AD0 <- 0
+ *   GPIO 0 -> enable output to speakers
+ *   GPIO 1 -> route output to front panel
+ *   GPIO 2 -> M0 of CS5361
+ *   GPIO 3 -> M1 of CS5361
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> ?
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN  <- aux
+ *   MIC_IN  <- mic
+ *   FMIC_IN <- front mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input
  */
 
 #include <linux/pci.h>
@@ -63,6 +62,7 @@
 #define GPI_EXT_POWER		0x01
 #define GPIO_D1_OUTPUT_ENABLE	0x0001
 #define GPIO_D1_FRONT_PANEL	0x0002
+#define GPIO_D1_MAGIC		0x00c0
 #define GPIO_D1_INPUT_ROUTE	0x0100
 
 #define I2C_DEVICE_CS4398	0x9e	/* 10011, AD1=1, AD0=1, /W=0 */
@@ -169,12 +169,12 @@
 	cs43xx_registers_init(chip);
 
 	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
-			  GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
+			  GPIO_D1_FRONT_PANEL |
+			  GPIO_D1_MAGIC |
+			  GPIO_D1_INPUT_ROUTE);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
 			    GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
 
-	oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC);
-
 	xonar_init_cs53x1(chip);
 	xonar_enable_output(chip);
 
@@ -284,7 +284,7 @@
 
 static const struct snd_kcontrol_new front_panel_switch = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "Front Panel Switch",
+	.name = "Front Panel Playback Switch",
 	.info = snd_ctl_boolean_mono_info,
 	.get = xonar_gpio_bit_switch_get,
 	.put = xonar_gpio_bit_switch_put,
@@ -298,13 +298,7 @@
 		"Fast Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -380,6 +374,30 @@
 	return 0;
 }
 
+static void dump_cs4362a_registers(struct xonar_cs43xx *data,
+				   struct snd_info_buffer *buffer)
+{
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4362A:");
+	for (i = 1; i <= 14; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4362a_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_d1_registers(struct oxygen *chip,
+			      struct snd_info_buffer *buffer)
+{
+	struct xonar_cs43xx *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4398: 7?");
+	for (i = 2; i <= 8; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4398_regs[i]);
+	snd_iprintf(buffer, "\n");
+	dump_cs4362a_registers(data, buffer);
+}
+
 static const struct oxygen_model model_xonar_d1 = {
 	.longname = "Asus Virtuoso 100",
 	.chip = "AV200",
@@ -388,22 +406,26 @@
 	.cleanup = xonar_d1_cleanup,
 	.suspend = xonar_d1_suspend,
 	.resume = xonar_d1_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_cs43xx_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_cs43xx_volume,
 	.update_dac_mute = update_cs43xx_mute,
 	.update_center_lfe_mix = update_cs43xx_center_lfe_mix,
 	.ac97_switch = xonar_d1_line_mic_ac97_switch,
+	.dump_registers = dump_d1_registers,
 	.dac_tlv = cs4362a_db_scale,
 	.model_data_size = sizeof(struct xonar_cs43xx),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_2,
-	.dac_channels = 8,
+			 CAPTURE_0_FROM_I2S_2 |
+			 AC97_FMIC_SWITCH,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 127 - 60,
 	.dac_volume_max = 127,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
new file mode 100644
index 0000000..e4de0b8
--- /dev/null
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -0,0 +1,572 @@
+/*
+ * card driver for the Xonar DG
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ *
+ *
+ *  This driver is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2.
+ *
+ *  This driver is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this driver; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Xonar DG
+ * --------
+ *
+ * CMI8788:
+ *
+ *   SPI 0 -> CS4245
+ *
+ *   GPIO 3 <- ?
+ *   GPIO 4 <- headphone detect
+ *   GPIO 5 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 6 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 7 -> enable rear headphone amp
+ *   GPIO 8 -> enable output to speakers
+ *
+ * CS4245:
+ *
+ *   input 1 <- aux
+ *   input 2 <- front mic
+ *   input 4 <- line/mic
+ *   aux out -> front panel headphones
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/info.h>
+#include <sound/pcm.h>
+#include <sound/tlv.h>
+#include "oxygen.h"
+#include "xonar_dg.h"
+#include "cs4245.h"
+
+#define GPIO_MAGIC		0x0008
+#define GPIO_HP_DETECT		0x0010
+#define GPIO_INPUT_ROUTE	0x0060
+#define GPIO_HP_REAR		0x0080
+#define GPIO_OUTPUT_ENABLE	0x0100
+
+struct dg {
+	unsigned int output_sel;
+	s8 input_vol[4][2];
+	unsigned int input_sel;
+	u8 hp_vol_att;
+	u8 cs4245_regs[0x11];
+};
+
+static void cs4245_write(struct oxygen *chip, unsigned int reg, u8 value)
+{
+	struct dg *data = chip->model_data;
+
+	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
+			 OXYGEN_SPI_DATA_LENGTH_3 |
+			 OXYGEN_SPI_CLOCK_1280 |
+			 (0 << OXYGEN_SPI_CODEC_SHIFT) |
+			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
+			 CS4245_SPI_ADDRESS |
+			 CS4245_SPI_WRITE |
+			 (value << 8) | reg);
+	data->cs4245_regs[reg] = value;
+}
+
+static void cs4245_write_cached(struct oxygen *chip, unsigned int reg, u8 value)
+{
+	struct dg *data = chip->model_data;
+
+	if (value != data->cs4245_regs[reg])
+		cs4245_write(chip, reg, value);
+}
+
+static void cs4245_registers_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	cs4245_write(chip, CS4245_POWER_CTRL, CS4245_PDN);
+	cs4245_write(chip, CS4245_DAC_CTRL_1,
+		     data->cs4245_regs[CS4245_DAC_CTRL_1]);
+	cs4245_write(chip, CS4245_ADC_CTRL,
+		     data->cs4245_regs[CS4245_ADC_CTRL]);
+	cs4245_write(chip, CS4245_SIGNAL_SEL,
+		     data->cs4245_regs[CS4245_SIGNAL_SEL]);
+	cs4245_write(chip, CS4245_PGA_B_CTRL,
+		     data->cs4245_regs[CS4245_PGA_B_CTRL]);
+	cs4245_write(chip, CS4245_PGA_A_CTRL,
+		     data->cs4245_regs[CS4245_PGA_A_CTRL]);
+	cs4245_write(chip, CS4245_ANALOG_IN,
+		     data->cs4245_regs[CS4245_ANALOG_IN]);
+	cs4245_write(chip, CS4245_DAC_A_CTRL,
+		     data->cs4245_regs[CS4245_DAC_A_CTRL]);
+	cs4245_write(chip, CS4245_DAC_B_CTRL,
+		     data->cs4245_regs[CS4245_DAC_B_CTRL]);
+	cs4245_write(chip, CS4245_DAC_CTRL_2,
+		     CS4245_DAC_SOFT | CS4245_DAC_ZERO | CS4245_INVERT_DAC);
+	cs4245_write(chip, CS4245_INT_MASK, 0);
+	cs4245_write(chip, CS4245_POWER_CTRL, 0);
+}
+
+static void cs4245_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->cs4245_regs[CS4245_DAC_CTRL_1] =
+		CS4245_DAC_FM_SINGLE | CS4245_DAC_DIF_LJUST;
+	data->cs4245_regs[CS4245_ADC_CTRL] =
+		CS4245_ADC_FM_SINGLE | CS4245_ADC_DIF_LJUST;
+	data->cs4245_regs[CS4245_SIGNAL_SEL] =
+		CS4245_A_OUT_SEL_HIZ | CS4245_ASYNCH;
+	data->cs4245_regs[CS4245_PGA_B_CTRL] = 0;
+	data->cs4245_regs[CS4245_PGA_A_CTRL] = 0;
+	data->cs4245_regs[CS4245_ANALOG_IN] =
+		CS4245_PGA_SOFT | CS4245_PGA_ZERO | CS4245_SEL_INPUT_4;
+	data->cs4245_regs[CS4245_DAC_A_CTRL] = 0;
+	data->cs4245_regs[CS4245_DAC_B_CTRL] = 0;
+	cs4245_registers_init(chip);
+	snd_component_add(chip->card, "CS4245");
+}
+
+static void dg_output_enable(struct oxygen *chip)
+{
+	msleep(2500);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
+}
+
+static void dg_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->output_sel = 0;
+	data->input_sel = 3;
+	data->hp_vol_att = 2 * 16;
+
+	cs4245_init(chip);
+
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL,
+			    GPIO_MAGIC | GPIO_HP_DETECT);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_INPUT_ROUTE | GPIO_HP_REAR | GPIO_OUTPUT_ENABLE);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
+			    GPIO_INPUT_ROUTE | GPIO_HP_REAR);
+	dg_output_enable(chip);
+}
+
+static void dg_cleanup(struct oxygen *chip)
+{
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
+}
+
+static void dg_suspend(struct oxygen *chip)
+{
+	dg_cleanup(chip);
+}
+
+static void dg_resume(struct oxygen *chip)
+{
+	cs4245_registers_init(chip);
+	dg_output_enable(chip);
+}
+
+static void set_cs4245_dac_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params)
+{
+	struct dg *data = chip->model_data;
+	u8 value;
+
+	value = data->cs4245_regs[CS4245_DAC_CTRL_1] & ~CS4245_DAC_FM_MASK;
+	if (params_rate(params) <= 50000)
+		value |= CS4245_DAC_FM_SINGLE;
+	else if (params_rate(params) <= 100000)
+		value |= CS4245_DAC_FM_DOUBLE;
+	else
+		value |= CS4245_DAC_FM_QUAD;
+	cs4245_write_cached(chip, CS4245_DAC_CTRL_1, value);
+}
+
+static void set_cs4245_adc_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params)
+{
+	struct dg *data = chip->model_data;
+	u8 value;
+
+	value = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_ADC_FM_MASK;
+	if (params_rate(params) <= 50000)
+		value |= CS4245_ADC_FM_SINGLE;
+	else if (params_rate(params) <= 100000)
+		value |= CS4245_ADC_FM_DOUBLE;
+	else
+		value |= CS4245_ADC_FM_QUAD;
+	cs4245_write_cached(chip, CS4245_ADC_CTRL, value);
+}
+
+static int output_switch_info(struct snd_kcontrol *ctl,
+			      struct snd_ctl_elem_info *info)
+{
+	static const char *const names[3] = {
+		"Speakers", "Headphones", "FP Headphones"
+	};
+
+	return snd_ctl_enum_info(info, 1, 3, names);
+}
+
+static int output_switch_get(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->output_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int output_switch_put(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	u8 reg;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 2)
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	changed = value->value.enumerated.item[0] != data->output_sel;
+	if (changed) {
+		data->output_sel = value->value.enumerated.item[0];
+
+		reg = data->cs4245_regs[CS4245_SIGNAL_SEL] &
+						~CS4245_A_OUT_SEL_MASK;
+		reg |= data->output_sel == 2 ?
+				CS4245_A_OUT_SEL_DAC : CS4245_A_OUT_SEL_HIZ;
+		cs4245_write_cached(chip, CS4245_SIGNAL_SEL, reg);
+
+		cs4245_write_cached(chip, CS4245_DAC_A_CTRL,
+				    data->output_sel ? data->hp_vol_att : 0);
+		cs4245_write_cached(chip, CS4245_DAC_B_CTRL,
+				    data->output_sel ? data->hp_vol_att : 0);
+
+		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+				      data->output_sel == 1 ? GPIO_HP_REAR : 0,
+				      GPIO_HP_REAR);
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int hp_volume_offset_info(struct snd_kcontrol *ctl,
+				 struct snd_ctl_elem_info *info)
+{
+	static const char *const names[3] = {
+		"< 64 ohms", "64-150 ohms", "150-300 ohms"
+	};
+
+	return snd_ctl_enum_info(info, 1, 3, names);
+}
+
+static int hp_volume_offset_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	if (data->hp_vol_att > 2 * 7)
+		value->value.enumerated.item[0] = 0;
+	else if (data->hp_vol_att > 0)
+		value->value.enumerated.item[0] = 1;
+	else
+		value->value.enumerated.item[0] = 2;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int hp_volume_offset_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	static const s8 atts[3] = { 2 * 16, 2 * 7, 0 };
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	s8 att;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 2)
+		return -EINVAL;
+	att = atts[value->value.enumerated.item[0]];
+	mutex_lock(&chip->mutex);
+	changed = att != data->hp_vol_att;
+	if (changed) {
+		data->hp_vol_att = att;
+		if (data->output_sel) {
+			cs4245_write_cached(chip, CS4245_DAC_A_CTRL, att);
+			cs4245_write_cached(chip, CS4245_DAC_B_CTRL, att);
+		}
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int input_vol_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	info->count = 2;
+	info->value.integer.min = 2 * -12;
+	info->value.integer.max = 2 * 12;
+	return 0;
+}
+
+static int input_vol_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+
+	mutex_lock(&chip->mutex);
+	value->value.integer.value[0] = data->input_vol[idx][0];
+	value->value.integer.value[1] = data->input_vol[idx][1];
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_vol_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+	int changed = 0;
+
+	if (value->value.integer.value[0] < 2 * -12 ||
+	    value->value.integer.value[0] > 2 * 12 ||
+	    value->value.integer.value[1] < 2 * -12 ||
+	    value->value.integer.value[1] > 2 * 12)
+		return -EINVAL;
+	mutex_lock(&chip->mutex);
+	changed = data->input_vol[idx][0] != value->value.integer.value[0] ||
+		  data->input_vol[idx][1] != value->value.integer.value[1];
+	if (changed) {
+		data->input_vol[idx][0] = value->value.integer.value[0];
+		data->input_vol[idx][1] = value->value.integer.value[1];
+		if (idx == data->input_sel) {
+			cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
+					    data->input_vol[idx][0]);
+			cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
+					    data->input_vol[idx][1]);
+		}
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static DECLARE_TLV_DB_SCALE(cs4245_pga_db_scale, -1200, 50, 0);
+
+static int input_sel_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	static const char *const names[4] = {
+		"Mic", "Aux", "Front Mic", "Line"
+	};
+
+	return snd_ctl_enum_info(info, 1, 4, names);
+}
+
+static int input_sel_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->input_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_sel_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	static const u8 sel_values[4] = {
+		CS4245_SEL_MIC,
+		CS4245_SEL_INPUT_1,
+		CS4245_SEL_INPUT_2,
+		CS4245_SEL_INPUT_4
+	};
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 3)
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	changed = value->value.enumerated.item[0] != data->input_sel;
+	if (changed) {
+		data->input_sel = value->value.enumerated.item[0];
+
+		cs4245_write(chip, CS4245_ANALOG_IN,
+			     (data->cs4245_regs[CS4245_ANALOG_IN] &
+							~CS4245_SEL_MASK) |
+			     sel_values[data->input_sel]);
+
+		cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
+				    data->input_vol[data->input_sel][0]);
+		cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
+				    data->input_vol[data->input_sel][1]);
+
+		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+				      data->input_sel ? 0 : GPIO_INPUT_ROUTE,
+				      GPIO_INPUT_ROUTE);
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "Active", "Frozen" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	value->value.enumerated.item[0] =
+		!!(data->cs4245_regs[CS4245_ADC_CTRL] & CS4245_HPF_FREEZE);
+	return 0;
+}
+
+static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	u8 reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	reg = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_HPF_FREEZE;
+	if (value->value.enumerated.item[0])
+		reg |= CS4245_HPF_FREEZE;
+	changed = reg != data->cs4245_regs[CS4245_ADC_CTRL];
+	if (changed)
+		cs4245_write(chip, CS4245_ADC_CTRL, reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+#define INPUT_VOLUME(xname, index) { \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.name = xname, \
+	.info = input_vol_info, \
+	.get = input_vol_get, \
+	.put = input_vol_put, \
+	.tlv = { .p = cs4245_pga_db_scale }, \
+	.private_value = index, \
+}
+static const struct snd_kcontrol_new dg_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Analog Output Playback Enum",
+		.info = output_switch_info,
+		.get = output_switch_get,
+		.put = output_switch_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphones Impedance Playback Enum",
+		.info = hp_volume_offset_info,
+		.get = hp_volume_offset_get,
+		.put = hp_volume_offset_put,
+	},
+	INPUT_VOLUME("Mic Capture Volume", 0),
+	INPUT_VOLUME("Aux Capture Volume", 1),
+	INPUT_VOLUME("Front Mic Capture Volume", 2),
+	INPUT_VOLUME("Line Capture Volume", 3),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Capture Source",
+		.info = input_sel_info,
+		.get = input_sel_get,
+		.put = input_sel_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "ADC High-pass Filter Capture Enum",
+		.info = hpf_info,
+		.get = hpf_get,
+		.put = hpf_put,
+	},
+};
+
+static int dg_control_filter(struct snd_kcontrol_new *template)
+{
+	if (!strncmp(template->name, "Master Playback ", 16))
+		return 1;
+	return 0;
+}
+
+static int dg_mixer_init(struct oxygen *chip)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(dg_controls); ++i) {
+		err = snd_ctl_add(chip->card,
+				  snd_ctl_new1(&dg_controls[i], chip));
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
+static void dump_cs4245_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct dg *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4245:");
+	for (i = 1; i <= 0x10; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4245_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+struct oxygen_model model_xonar_dg = {
+	.shortname = "Xonar DG",
+	.longname = "C-Media Oxygen HD Audio",
+	.chip = "CMI8786",
+	.init = dg_init,
+	.control_filter = dg_control_filter,
+	.mixer_init = dg_mixer_init,
+	.cleanup = dg_cleanup,
+	.suspend = dg_suspend,
+	.resume = dg_resume,
+	.set_dac_params = set_cs4245_dac_params,
+	.set_adc_params = set_cs4245_adc_params,
+	.dump_registers = dump_cs4245_registers,
+	.model_data_size = sizeof(struct dg),
+	.device_config = PLAYBACK_0_TO_I2S |
+			 PLAYBACK_1_TO_SPDIF |
+			 CAPTURE_0_FROM_I2S_2,
+	.dac_channels_pcm = 6,
+	.dac_channels_mixer = 0,
+	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+};
diff --git a/sound/pci/oxygen/xonar_dg.h b/sound/pci/oxygen/xonar_dg.h
new file mode 100644
index 0000000..5688d78
--- /dev/null
+++ b/sound/pci/oxygen/xonar_dg.h
@@ -0,0 +1,8 @@
+#ifndef XONAR_DG_H_INCLUDED
+#define XONAR_DG_H_INCLUDED
+
+#include "oxygen.h"
+
+extern struct oxygen_model model_xonar_dg;
+
+#endif
diff --git a/sound/pci/oxygen/xonar_hdmi.c b/sound/pci/oxygen/xonar_hdmi.c
index b12db1f..136dac6 100644
--- a/sound/pci/oxygen/xonar_hdmi.c
+++ b/sound/pci/oxygen/xonar_hdmi.c
@@ -1,5 +1,5 @@
 /*
- * helper functions for HDMI models (Xonar HDAV1.3)
+ * helper functions for HDMI models (Xonar HDAV1.3/HDAV1.3 Slim)
  *
  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
  *
diff --git a/sound/pci/oxygen/xonar_lib.c b/sound/pci/oxygen/xonar_lib.c
index b3ff713..0ebe7f5 100644
--- a/sound/pci/oxygen/xonar_lib.c
+++ b/sound/pci/oxygen/xonar_lib.c
@@ -104,9 +104,10 @@
 {
 	struct oxygen *chip = ctl->private_data;
 	u16 bit = ctl->private_value;
+	bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT;
 
 	value->value.integer.value[0] =
-		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit);
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit) ^ invert;
 	return 0;
 }
 
@@ -115,12 +116,13 @@
 {
 	struct oxygen *chip = ctl->private_data;
 	u16 bit = ctl->private_value;
+	bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT;
 	u16 old_bits, new_bits;
 	int changed;
 
 	spin_lock_irq(&chip->reg_lock);
 	old_bits = oxygen_read16(chip, OXYGEN_GPIO_DATA);
-	if (value->value.integer.value[0])
+	if (!!value->value.integer.value[0] ^ invert)
 		new_bits = old_bits | bit;
 	else
 		new_bits = old_bits & ~bit;
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
index d491fd6..54cad38 100644
--- a/sound/pci/oxygen/xonar_pcm179x.c
+++ b/sound/pci/oxygen/xonar_pcm179x.c
@@ -22,20 +22,26 @@
  *
  * CMI8788:
  *
- * SPI 0 -> 1st PCM1796 (front)
- * SPI 1 -> 2nd PCM1796 (surround)
- * SPI 2 -> 3rd PCM1796 (center/LFE)
- * SPI 4 -> 4th PCM1796 (back)
+ *   SPI 0 -> 1st PCM1796 (front)
+ *   SPI 1 -> 2nd PCM1796 (surround)
+ *   SPI 2 -> 3rd PCM1796 (center/LFE)
+ *   SPI 4 -> 4th PCM1796 (back)
  *
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 5 <- external power present (D2X only)
- * GPIO 7 -> ALT
- * GPIO 8 -> enable output to speakers
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 5 <- external power present (D2X only)
+ *   GPIO 7 -> ALT
+ *   GPIO 8 -> enable output to speakers
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN   <- aux
+ *   VIDEO_IN <- CD
+ *   FMIC_IN  <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  */
 
 /*
@@ -44,52 +50,53 @@
  *
  * CMI8788:
  *
- * I²C <-> PCM1796 (front)
+ *   I²C <-> PCM1796 (addr 1001100) (front)
  *
- * GPI 0 <- external power present
+ *   GPI 0 <- external power present
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 0 -> enable HDMI (0) or speaker (1) output
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 <- daughterboard detection
+ *   GPIO 5 <- daughterboard detection
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> ?
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
- * TXD -> HDMI controller
- * RXD <- HDMI controller
- *
- * PCM1796 front: AD1,0 <- 0,0
+ *   UART <-> HDMI controller
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   CD_IN  <- CD
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  *
  * no daughterboard
  * ----------------
  *
- * GPIO 4 <- 1
+ *   GPIO 4 <- 1
  *
  * H6 daughterboard
  * ----------------
  *
- * GPIO 4 <- 0
- * GPIO 5 <- 0
+ *   GPIO 4 <- 0
+ *   GPIO 5 <- 0
  *
- * I²C <-> PCM1796 (surround)
- *     <-> PCM1796 (center/LFE)
- *     <-> PCM1796 (back)
- *
- * PCM1796 surround:   AD1,0 <- 0,1
- * PCM1796 center/LFE: AD1,0 <- 1,0
- * PCM1796 back:       AD1,0 <- 1,1
+ *   I²C <-> PCM1796 (addr 1001101) (surround)
+ *       <-> PCM1796 (addr 1001110) (center/LFE)
+ *       <-> PCM1796 (addr 1001111) (back)
  *
  * unknown daughterboard
  * ---------------------
  *
- * GPIO 4 <- 0
- * GPIO 5 <- 1
+ *   GPIO 4 <- 0
+ *   GPIO 5 <- 1
  *
- * I²C <-> CS4362A (surround, center/LFE, back)
- *
- * CS4362A: AD0 <- 0
+ *   I²C <-> CS4362A (addr 0011000) (surround, center/LFE, back)
  */
 
 /*
@@ -98,32 +105,35 @@
  *
  * CMI8788:
  *
- * I²C <-> PCM1792A
- *     <-> CS2000 (ST only)
+ *   I²C <-> PCM1792A (addr 1001100)
+ *       <-> CS2000 (addr 1001110) (ST only)
  *
- * ADC1 MCLK -> REF_CLK of CS2000 (ST only)
+ *   ADC1 MCLK -> REF_CLK of CS2000 (ST only)
  *
- * GPI 0 <- external power present (STX only)
+ *   GPI 0 <- external power present (STX only)
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 1 -> route HP to front panel (0) or rear jack (1)
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 7 -> route output to speaker jacks (0) or HP (1)
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 0 -> enable output to speakers
+ *   GPIO 1 -> route HP to front panel (0) or rear jack (1)
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 <- daughterboard detection
+ *   GPIO 5 <- daughterboard detection
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> route output to speaker jacks (0) or HP (1)
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
  * PCM1792A:
  *
- * AD1,0 <- 0,0
- * SCK <- CLK_OUT of CS2000 (ST only)
- *
- * CS2000:
- *
- * AD0 <- 0
+ *   SCK <- CLK_OUT of CS2000 (ST only)
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  *
  * H6 daughterboard
  * ----------------
@@ -133,15 +143,39 @@
  */
 
 /*
- * Xonar HDAV1.3 Slim
- * ------------------
+ * Xonar Xense
+ * -----------
  *
  * CMI8788:
  *
- * GPIO 1 -> enable output
+ *   I²C <-> PCM1796 (addr 1001100) (front)
+ *       <-> CS4362A (addr 0011000) (surround, center/LFE, back)
+ *       <-> CS2000 (addr 1001110)
  *
- * TXD -> HDMI controller
- * RXD <- HDMI controller
+ *   ADC1 MCLK -> REF_CLK of CS2000
+ *
+ *   GPI 0 <- external power present
+ *
+ *   GPIO 0 -> enable output
+ *   GPIO 1 -> route HP to front panel (0) or rear jack (1)
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 -> enable output
+ *   GPIO 5 -> enable output
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> route output to HP (0) or speaker (1)
+ *   GPIO 8 -> route input jack to mic-in (0) or line-in (1)
+ *
+ * CM9780:
+ *
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN   <- aux
+ *   VIDEO_IN <- ?
+ *   FMIC_IN  <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   GPO 1 -> route mic-in from input jack (0) or front panel header (1)
  */
 
 #include <linux/pci.h>
@@ -150,6 +184,7 @@
 #include <sound/ac97_codec.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
@@ -167,12 +202,14 @@
 #define GPIO_INPUT_ROUTE	0x0100
 
 #define GPIO_HDAV_OUTPUT_ENABLE	0x0001
+#define GPIO_HDAV_MAGIC		0x00c0
 
 #define GPIO_DB_MASK		0x0030
 #define GPIO_DB_H6		0x0000
 
 #define GPIO_ST_OUTPUT_ENABLE	0x0001
 #define GPIO_ST_HP_REAR		0x0002
+#define GPIO_ST_MAGIC		0x0040
 #define GPIO_ST_HP		0x0080
 
 #define I2C_DEVICE_PCM1796(i)	(0x98 + ((i) << 1))	/* 10011, ii, /W=0 */
@@ -186,11 +223,12 @@
 	unsigned int dacs;
 	u8 pcm1796_regs[4][5];
 	unsigned int current_rate;
-	bool os_128;
+	bool h6;
 	bool hp_active;
 	s8 hp_gain_offset;
 	bool has_cs2000;
-	u8 cs2000_fun_cfg_1;
+	u8 cs2000_regs[0x1f];
+	bool broken_i2c;
 };
 
 struct xonar_hdav {
@@ -249,16 +287,14 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	oxygen_write_i2c(chip, I2C_DEVICE_CS2000, reg, value);
-	if (reg == CS2000_FUN_CFG_1)
-		data->cs2000_fun_cfg_1 = value;
+	data->cs2000_regs[reg] = value;
 }
 
 static void cs2000_write_cached(struct oxygen *chip, u8 reg, u8 value)
 {
 	struct xonar_pcm179x *data = chip->model_data;
 
-	if (reg != CS2000_FUN_CFG_1 ||
-	    value != data->cs2000_fun_cfg_1)
+	if (value != data->cs2000_regs[reg])
 		cs2000_write(chip, reg, value);
 }
 
@@ -268,6 +304,7 @@
 	unsigned int i;
 	s8 gain_offset;
 
+	msleep(1);
 	gain_offset = data->hp_active ? data->hp_gain_offset : 0;
 	for (i = 0; i < data->dacs; ++i) {
 		/* set ATLD before ATL/ATR */
@@ -282,6 +319,7 @@
 		pcm1796_write(chip, i, 20,
 			      data->pcm1796_regs[0][20 - PCM1796_REG_BASE]);
 		pcm1796_write(chip, i, 21, 0);
+		gain_offset = 0;
 	}
 }
 
@@ -290,10 +328,11 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->pcm1796_regs[0][18 - PCM1796_REG_BASE] = PCM1796_MUTE |
-		PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD;
+		PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
 	data->pcm1796_regs[0][19 - PCM1796_REG_BASE] =
 		PCM1796_FLT_SHARP | PCM1796_ATS_1;
-	data->pcm1796_regs[0][20 - PCM1796_REG_BASE] = PCM1796_OS_64;
+	data->pcm1796_regs[0][20 - PCM1796_REG_BASE] =
+		data->h6 ? PCM1796_OS_64 : PCM1796_OS_128;
 	pcm1796_registers_init(chip);
 	data->current_rate = 48000;
 }
@@ -339,18 +378,20 @@
 	oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS,
 		       OXYGEN_2WIRE_LENGTH_8 |
 		       OXYGEN_2WIRE_INTERRUPT_MASK |
-		       OXYGEN_2WIRE_SPEED_FAST);
+		       OXYGEN_2WIRE_SPEED_STANDARD);
 
 	data->pcm179x.generic.anti_pop_delay = 100;
 	data->pcm179x.generic.output_enable_bit = GPIO_HDAV_OUTPUT_ENABLE;
 	data->pcm179x.generic.ext_power_reg = OXYGEN_GPI_DATA;
 	data->pcm179x.generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
 	data->pcm179x.generic.ext_power_bit = GPI_EXT_POWER;
-	data->pcm179x.dacs = chip->model.private_data ? 4 : 1;
+	data->pcm179x.dacs = chip->model.dac_channels_mixer / 2;
+	data->pcm179x.h6 = chip->model.dac_channels_mixer > 2;
 
 	pcm1796_init(chip);
 
-	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_INPUT_ROUTE);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_HDAV_MAGIC | GPIO_INPUT_ROUTE);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE);
 
 	xonar_init_cs53x1(chip);
@@ -367,7 +408,7 @@
 	oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS,
 		       OXYGEN_2WIRE_LENGTH_8 |
 		       OXYGEN_2WIRE_INTERRUPT_MASK |
-		       OXYGEN_2WIRE_SPEED_FAST);
+		       OXYGEN_2WIRE_SPEED_STANDARD);
 }
 
 static void xonar_st_init_common(struct oxygen *chip)
@@ -375,13 +416,14 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->generic.output_enable_bit = GPIO_ST_OUTPUT_ENABLE;
-	data->dacs = chip->model.private_data ? 4 : 1;
+	data->dacs = chip->model.dac_channels_mixer / 2;
 	data->hp_gain_offset = 2*-18;
 
 	pcm1796_init(chip);
 
 	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
-			  GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP);
+			  GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR |
+			  GPIO_ST_MAGIC | GPIO_ST_HP);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
 			    GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP);
 
@@ -410,9 +452,11 @@
 	cs2000_write(chip, CS2000_RATIO_0 + 1, 0x10);
 	cs2000_write(chip, CS2000_RATIO_0 + 2, 0x00);
 	cs2000_write(chip, CS2000_RATIO_0 + 3, 0x00);
-	cs2000_write(chip, CS2000_FUN_CFG_1, data->cs2000_fun_cfg_1);
+	cs2000_write(chip, CS2000_FUN_CFG_1,
+		     data->cs2000_regs[CS2000_FUN_CFG_1]);
 	cs2000_write(chip, CS2000_FUN_CFG_2, 0);
 	cs2000_write(chip, CS2000_GLOBAL_CFG, CS2000_EN_DEV_CFG_2);
+	msleep(3); /* PLL lock delay */
 }
 
 static void xonar_st_init(struct oxygen *chip)
@@ -420,13 +464,18 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->generic.anti_pop_delay = 100;
+	data->h6 = chip->model.dac_channels_mixer > 2;
 	data->has_cs2000 = 1;
-	data->cs2000_fun_cfg_1 = CS2000_REF_CLK_DIV_1;
+	data->cs2000_regs[CS2000_FUN_CFG_1] = CS2000_REF_CLK_DIV_1;
+	data->broken_i2c = true;
 
 	oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-		       OXYGEN_RATE_48000 | OXYGEN_I2S_FORMAT_I2S |
-		       OXYGEN_I2S_MCLK_128 | OXYGEN_I2S_BITS_16 |
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+		       OXYGEN_RATE_48000 |
+		       OXYGEN_I2S_FORMAT_I2S |
+		       OXYGEN_I2S_MCLK(data->h6 ? MCLK_256 : MCLK_512) |
+		       OXYGEN_I2S_BITS_16 |
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_BCLK_64);
 
 	xonar_st_init_i2c(chip);
 	cs2000_registers_init(chip);
@@ -507,44 +556,16 @@
 	xonar_stx_resume(chip);
 }
 
-static unsigned int mclk_from_rate(struct oxygen *chip, unsigned int rate)
-{
-	struct xonar_pcm179x *data = chip->model_data;
-
-	if (rate <= 32000)
-		return OXYGEN_I2S_MCLK_512;
-	else if (rate <= 48000 && data->os_128)
-		return OXYGEN_I2S_MCLK_512;
-	else if (rate <= 96000)
-		return OXYGEN_I2S_MCLK_256;
-	else
-		return OXYGEN_I2S_MCLK_128;
-}
-
-static unsigned int get_pcm1796_i2s_mclk(struct oxygen *chip,
-					 unsigned int channel,
-					 struct snd_pcm_hw_params *params)
-{
-	if (channel == PCM_MULTICH)
-		return mclk_from_rate(chip, params_rate(params));
-	else
-		return oxygen_default_i2s_mclk(chip, channel, params);
-}
-
 static void update_pcm1796_oversampling(struct oxygen *chip)
 {
 	struct xonar_pcm179x *data = chip->model_data;
 	unsigned int i;
 	u8 reg;
 
-	if (data->current_rate <= 32000)
+	if (data->current_rate <= 48000 && !data->h6)
 		reg = PCM1796_OS_128;
-	else if (data->current_rate <= 48000 && data->os_128)
-		reg = PCM1796_OS_128;
-	else if (data->current_rate <= 96000 || data->os_128)
-		reg = PCM1796_OS_64;
 	else
-		reg = PCM1796_OS_32;
+		reg = PCM1796_OS_64;
 	for (i = 0; i < data->dacs; ++i)
 		pcm1796_write_cached(chip, i, 20, reg);
 }
@@ -554,6 +575,7 @@
 {
 	struct xonar_pcm179x *data = chip->model_data;
 
+	msleep(1);
 	data->current_rate = params_rate(params);
 	update_pcm1796_oversampling(chip);
 }
@@ -570,6 +592,7 @@
 				     + gain_offset);
 		pcm1796_write_cached(chip, i, 17, chip->dac_volume[i * 2 + 1]
 				     + gain_offset);
+		gain_offset = 0;
 	}
 }
 
@@ -579,7 +602,7 @@
 	unsigned int i;
 	u8 value;
 
-	value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD;
+	value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
 	if (chip->dac_mute)
 		value |= PCM1796_MUTE;
 	for (i = 0; i < data->dacs; ++i)
@@ -592,45 +615,35 @@
 	u8 rate_mclk, reg;
 
 	switch (rate) {
-		/* XXX Why is the I2S A MCLK half the actual I2S MCLK? */
 	case 32000:
-		rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256;
+	case 64000:
+		rate_mclk = OXYGEN_RATE_32000;
 		break;
 	case 44100:
-		if (data->os_128)
-			rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
-		else
-			rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_128;
-		break;
-	default: /* 48000 */
-		if (data->os_128)
-			rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
-		else
-			rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_128;
-		break;
-	case 64000:
-		rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256;
-		break;
 	case 88200:
-		rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
-		break;
-	case 96000:
-		rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
-		break;
 	case 176400:
-		rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
+		rate_mclk = OXYGEN_RATE_44100;
 		break;
+	default:
+	case 48000:
+	case 96000:
 	case 192000:
-		rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
+		rate_mclk = OXYGEN_RATE_48000;
 		break;
 	}
+
+	if (rate <= 96000 && (rate > 48000 || data->h6)) {
+		rate_mclk |= OXYGEN_I2S_MCLK(MCLK_256);
+		reg = CS2000_REF_CLK_DIV_1;
+	} else {
+		rate_mclk |= OXYGEN_I2S_MCLK(MCLK_512);
+		reg = CS2000_REF_CLK_DIV_2;
+	}
+
 	oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT, rate_mclk,
 			      OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_MCLK_MASK);
-	if ((rate_mclk & OXYGEN_I2S_MCLK_MASK) <= OXYGEN_I2S_MCLK_128)
-		reg = CS2000_REF_CLK_DIV_1;
-	else
-		reg = CS2000_REF_CLK_DIV_2;
 	cs2000_write_cached(chip, CS2000_FUN_CFG_1, reg);
+	msleep(3); /* PLL lock delay */
 }
 
 static void set_st_params(struct oxygen *chip,
@@ -665,13 +678,7 @@
 		"Sharp Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -719,57 +726,13 @@
 	.put = rolloff_put,
 };
 
-static int os_128_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
-{
-	static const char *const names[2] = { "64x", "128x" };
-
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
-}
-
-static int os_128_get(struct snd_kcontrol *ctl,
-		      struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct xonar_pcm179x *data = chip->model_data;
-
-	value->value.enumerated.item[0] = data->os_128;
-	return 0;
-}
-
-static int os_128_put(struct snd_kcontrol *ctl,
-		      struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct xonar_pcm179x *data = chip->model_data;
-	int changed;
-
-	mutex_lock(&chip->mutex);
-	changed = value->value.enumerated.item[0] != data->os_128;
-	if (changed) {
-		data->os_128 = value->value.enumerated.item[0];
-		if (data->has_cs2000)
-			update_cs2000_rate(chip, data->current_rate);
-		oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
-				      mclk_from_rate(chip, data->current_rate),
-				      OXYGEN_I2S_MCLK_MASK);
-		update_pcm1796_oversampling(chip);
-	}
-	mutex_unlock(&chip->mutex);
-	return changed;
-}
-
-static const struct snd_kcontrol_new os_128_control = {
+static const struct snd_kcontrol_new hdav_hdmi_control = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "DAC Oversampling Playback Enum",
-	.info = os_128_info,
-	.get = os_128_get,
-	.put = os_128_put,
+	.name = "HDMI Playback Switch",
+	.info = snd_ctl_boolean_mono_info,
+	.get = xonar_gpio_bit_switch_get,
+	.put = xonar_gpio_bit_switch_put,
+	.private_value = GPIO_HDAV_OUTPUT_ENABLE | XONAR_GPIO_BIT_INVERT,
 };
 
 static int st_output_switch_info(struct snd_kcontrol *ctl,
@@ -779,13 +742,7 @@
 		"Speakers", "Headphones", "FP Headphones"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item >= 3)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int st_output_switch_get(struct snd_kcontrol *ctl,
@@ -840,13 +797,7 @@
 		"< 64 ohms", "64-300 ohms", "300-600 ohms"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int st_hp_volume_offset_get(struct snd_kcontrol *ctl,
@@ -928,16 +879,25 @@
 	return 0;
 }
 
+static int xonar_st_h6_control_filter(struct snd_kcontrol_new *template)
+{
+	if (!strncmp(template->name, "Master Playback ", 16))
+		/* no volume/mute, as I²C to the third DAC does not work */
+		return 1;
+	return 0;
+}
+
 static int add_pcm1796_controls(struct oxygen *chip)
 {
+	struct xonar_pcm179x *data = chip->model_data;
 	int err;
 
-	err = snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip));
-	if (err < 0)
-		return err;
-	err = snd_ctl_add(chip->card, snd_ctl_new1(&os_128_control, chip));
-	if (err < 0)
-		return err;
+	if (!data->broken_i2c) {
+		err = snd_ctl_add(chip->card,
+				  snd_ctl_new1(&rolloff_control, chip));
+		if (err < 0)
+			return err;
+	}
 	return 0;
 }
 
@@ -956,7 +916,15 @@
 
 static int xonar_hdav_mixer_init(struct oxygen *chip)
 {
-	return add_pcm1796_controls(chip);
+	int err;
+
+	err = snd_ctl_add(chip->card, snd_ctl_new1(&hdav_hdmi_control, chip));
+	if (err < 0)
+		return err;
+	err = add_pcm1796_controls(chip);
+	if (err < 0)
+		return err;
+	return 0;
 }
 
 static int xonar_st_mixer_init(struct oxygen *chip)
@@ -976,6 +944,45 @@
 	return 0;
 }
 
+static void dump_pcm1796_registers(struct oxygen *chip,
+				   struct snd_info_buffer *buffer)
+{
+	struct xonar_pcm179x *data = chip->model_data;
+	unsigned int dac, i;
+
+	for (dac = 0; dac < data->dacs; ++dac) {
+		snd_iprintf(buffer, "\nPCM1796 %u:", dac + 1);
+		for (i = 0; i < 5; ++i)
+			snd_iprintf(buffer, " %02x",
+				    data->pcm1796_regs[dac][i]);
+	}
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_cs2000_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_pcm179x *data = chip->model_data;
+	unsigned int i;
+
+	if (data->has_cs2000) {
+		snd_iprintf(buffer, "\nCS2000:\n00:   ");
+		for (i = 1; i < 0x10; ++i)
+			snd_iprintf(buffer, " %02x", data->cs2000_regs[i]);
+		snd_iprintf(buffer, "\n10:");
+		for (i = 0x10; i < 0x1f; ++i)
+			snd_iprintf(buffer, " %02x", data->cs2000_regs[i]);
+		snd_iprintf(buffer, "\n");
+	}
+}
+
+static void dump_st_registers(struct oxygen *chip,
+			      struct snd_info_buffer *buffer)
+{
+	dump_pcm1796_registers(chip, buffer);
+	dump_cs2000_registers(chip, buffer);
+}
+
 static const struct oxygen_model model_xonar_d2 = {
 	.longname = "Asus Virtuoso 200",
 	.chip = "AV200",
@@ -985,11 +992,11 @@
 	.cleanup = xonar_d2_cleanup,
 	.suspend = xonar_d2_suspend,
 	.resume = xonar_d2_resume,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_pcm1796_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
+	.dump_registers = dump_pcm1796_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_pcm179x),
 	.device_config = PLAYBACK_0_TO_I2S |
@@ -999,13 +1006,16 @@
 			 MIDI_OUTPUT |
 			 MIDI_INPUT |
 			 AC97_CD_INPUT,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.misc_flags = OXYGEN_MISC_MIDI,
 	.function_flags = OXYGEN_FUNCTION_SPI |
 			  OXYGEN_FUNCTION_ENABLE_SPI_4_5,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1018,25 +1028,28 @@
 	.suspend = xonar_hdav_suspend,
 	.resume = xonar_hdav_resume,
 	.pcm_hardware_filter = xonar_hdmi_pcm_hardware_filter,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_hdav_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
 	.uart_input = xonar_hdmi_uart_input,
 	.ac97_switch = xonar_line_mic_ac97_switch,
+	.dump_registers = dump_pcm1796_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_hdav),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
 			 CAPTURE_0_FROM_I2S_2 |
 			 CAPTURE_1_FROM_SPDIF,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 2,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.misc_flags = OXYGEN_MISC_MIDI,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1048,22 +1061,26 @@
 	.cleanup = xonar_st_cleanup,
 	.suspend = xonar_st_suspend,
 	.resume = xonar_st_resume,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_st_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
 	.ac97_switch = xonar_line_mic_ac97_switch,
+	.dump_registers = dump_st_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_pcm179x),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_2,
-	.dac_channels = 2,
+			 CAPTURE_0_FROM_I2S_2 |
+			 AC97_FMIC_SWITCH,
+	.dac_channels_pcm = 2,
+	.dac_channels_mixer = 2,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1089,7 +1106,8 @@
 			break;
 		case GPIO_DB_H6:
 			chip->model.shortname = "Xonar HDAV1.3+H6";
-			chip->model.private_data = 1;
+			chip->model.dac_channels_mixer = 8;
+			chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128);
 			break;
 		}
 		break;
@@ -1102,8 +1120,10 @@
 			break;
 		case GPIO_DB_H6:
 			chip->model.shortname = "Xonar ST+H6";
-			chip->model.dac_channels = 8;
-			chip->model.private_data = 1;
+			chip->model.control_filter = xonar_st_h6_control_filter;
+			chip->model.dac_channels_pcm = 8;
+			chip->model.dac_channels_mixer = 8;
+			chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128);
 			break;
 		}
 		break;
@@ -1114,9 +1134,6 @@
 		chip->model.resume = xonar_stx_resume;
 		chip->model.set_dac_params = set_pcm1796_params;
 		break;
-	case 0x835e:
-		snd_printk(KERN_ERR "the HDAV1.3 Slim is not supported\n");
-		return -ENODEV;
 	default:
 		return -EINVAL;
 	}
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index 200f760..42d1ab1 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -1,5 +1,5 @@
 /*
- * card driver for models with WM8776/WM8766 DACs (Xonar DS)
+ * card driver for models with WM8776/WM8766 DACs (Xonar DS/HDAV1.3 Slim)
  *
  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
  *
@@ -22,26 +22,48 @@
  *
  * CMI8788:
  *
- * SPI 0 -> WM8766 (surround, center/LFE, back)
- * SPI 1 -> WM8776 (front, input)
+ *   SPI 0 -> WM8766 (surround, center/LFE, back)
+ *   SPI 1 -> WM8776 (front, input)
  *
- * GPIO 4 <- headphone detect, 0 = plugged
- * GPIO 6 -> route input jack to mic-in (0) or line-in (1)
- * GPIO 7 -> enable output to front L/R speaker channels
- * GPIO 8 -> enable output to other speaker channels and front panel headphone
+ *   GPIO 4 <- headphone detect, 0 = plugged
+ *   GPIO 6 -> route input jack to mic-in (0) or line-in (1)
+ *   GPIO 7 -> enable output to front L/R speaker channels
+ *   GPIO 8 -> enable output to other speaker channels and front panel headphone
  *
- * WM8766:
+ * WM8776:
  *
- * input 1 <- line
- * input 2 <- mic
- * input 3 <- front mic
- * input 4 <- aux
+ *   input 1 <- line
+ *   input 2 <- mic
+ *   input 3 <- front mic
+ *   input 4 <- aux
+ */
+
+/*
+ * Xonar HDAV1.3 Slim
+ * ------------------
+ *
+ * CMI8788:
+ *
+ *   I²C <-> WM8776 (addr 0011010)
+ *
+ *   GPIO 0  -> disable HDMI output
+ *   GPIO 1  -> enable HP output
+ *   GPIO 6  -> firmware EEPROM I²C clock
+ *   GPIO 7 <-> firmware EEPROM I²C data
+ *
+ *   UART <-> HDMI controller
+ *
+ * WM8776:
+ *
+ *   input 1 <- mic
+ *   input 2 <- aux
  */
 
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -55,6 +77,13 @@
 #define GPIO_DS_OUTPUT_FRONTLR	0x0080
 #define GPIO_DS_OUTPUT_ENABLE	0x0100
 
+#define GPIO_SLIM_HDMI_DISABLE	0x0001
+#define GPIO_SLIM_OUTPUT_ENABLE	0x0002
+#define GPIO_SLIM_FIRMWARE_CLK	0x0040
+#define GPIO_SLIM_FIRMWARE_DATA	0x0080
+
+#define I2C_DEVICE_WM8776	0x34	/* 001101, 0, /W=0 */
+
 #define LC_CONTROL_LIMITER	0x40000000
 #define LC_CONTROL_ALC		0x20000000
 
@@ -66,19 +95,37 @@
 	struct snd_kcontrol *mic_adcmux_control;
 	struct snd_kcontrol *lc_controls[13];
 	struct snd_jack *hp_jack;
+	struct xonar_hdmi hdmi;
 };
 
-static void wm8776_write(struct oxygen *chip,
-			 unsigned int reg, unsigned int value)
+static void wm8776_write_spi(struct oxygen *chip,
+			     unsigned int reg, unsigned int value)
 {
-	struct xonar_wm87x6 *data = chip->model_data;
-
 	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
 			 OXYGEN_SPI_DATA_LENGTH_2 |
 			 OXYGEN_SPI_CLOCK_160 |
 			 (1 << OXYGEN_SPI_CODEC_SHIFT) |
 			 OXYGEN_SPI_CEN_LATCH_CLOCK_LO,
 			 (reg << 9) | value);
+}
+
+static void wm8776_write_i2c(struct oxygen *chip,
+			     unsigned int reg, unsigned int value)
+{
+	oxygen_write_i2c(chip, I2C_DEVICE_WM8776,
+			 (reg << 1) | (value >> 8), value);
+}
+
+static void wm8776_write(struct oxygen *chip,
+			 unsigned int reg, unsigned int value)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	if ((chip->model.function_flags & OXYGEN_FUNCTION_2WIRE_SPI_MASK) ==
+	    OXYGEN_FUNCTION_SPI)
+		wm8776_write_spi(chip, reg, value);
+	else
+		wm8776_write_i2c(chip, reg, value);
 	if (reg < ARRAY_SIZE(data->wm8776_regs)) {
 		if (reg >= WM8776_HPLVOL && reg <= WM8776_DACMASTER)
 			value &= ~WM8776_UPDATE;
@@ -245,17 +292,50 @@
 	snd_component_add(chip->card, "WM8766");
 }
 
+static void xonar_hdav_slim_init(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	data->generic.anti_pop_delay = 300;
+	data->generic.output_enable_bit = GPIO_SLIM_OUTPUT_ENABLE;
+
+	wm8776_init(chip);
+
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_SLIM_HDMI_DISABLE |
+			  GPIO_SLIM_FIRMWARE_CLK |
+			  GPIO_SLIM_FIRMWARE_DATA);
+
+	xonar_hdmi_init(chip, &data->hdmi);
+	xonar_enable_output(chip);
+
+	snd_component_add(chip->card, "WM8776");
+}
+
 static void xonar_ds_cleanup(struct oxygen *chip)
 {
 	xonar_disable_output(chip);
 	wm8776_write(chip, WM8776_RESET, 0);
 }
 
+static void xonar_hdav_slim_cleanup(struct oxygen *chip)
+{
+	xonar_hdmi_cleanup(chip);
+	xonar_disable_output(chip);
+	wm8776_write(chip, WM8776_RESET, 0);
+	msleep(2);
+}
+
 static void xonar_ds_suspend(struct oxygen *chip)
 {
 	xonar_ds_cleanup(chip);
 }
 
+static void xonar_hdav_slim_suspend(struct oxygen *chip)
+{
+	xonar_hdav_slim_cleanup(chip);
+}
+
 static void xonar_ds_resume(struct oxygen *chip)
 {
 	wm8776_registers_init(chip);
@@ -264,6 +344,15 @@
 	xonar_ds_handle_hp_jack(chip);
 }
 
+static void xonar_hdav_slim_resume(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	wm8776_registers_init(chip);
+	xonar_hdmi_resume(chip, &data->hdmi);
+	xonar_enable_output(chip);
+}
+
 static void wm8776_adc_hardware_filter(unsigned int channel,
 				       struct snd_pcm_hardware *hardware)
 {
@@ -278,6 +367,13 @@
 	}
 }
 
+static void xonar_hdav_slim_hardware_filter(unsigned int channel,
+					    struct snd_pcm_hardware *hardware)
+{
+	wm8776_adc_hardware_filter(channel, hardware);
+	xonar_hdmi_pcm_hardware_filter(channel, hardware);
+}
+
 static void set_wm87x6_dac_params(struct oxygen *chip,
 				  struct snd_pcm_hw_params *params)
 {
@@ -294,6 +390,14 @@
 	wm8776_write_cached(chip, WM8776_MSTRCTRL, reg);
 }
 
+static void set_hdav_slim_dac_params(struct oxygen *chip,
+				     struct snd_pcm_hw_params *params)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	xonar_set_hdmi_params(chip, &data->hdmi, params);
+}
+
 static void update_wm8776_volume(struct oxygen *chip)
 {
 	struct xonar_wm87x6 *data = chip->model_data;
@@ -473,11 +577,6 @@
 	const char *const *names;
 
 	max = (ctl->private_value >> 12) & 0xf;
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = max + 1;
-	if (info->value.enumerated.item > max)
-		info->value.enumerated.item = max;
 	switch ((ctl->private_value >> 24) & 0x1f) {
 	case WM8776_ALCCTRL2:
 		names = hld;
@@ -501,8 +600,7 @@
 	default:
 		return -ENXIO;
 	}
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, max + 1, names);
 }
 
 static int wm8776_field_volume_info(struct snd_kcontrol *ctl,
@@ -759,13 +857,8 @@
 	static const char *const names[3] = {
 		"None", "Peak Limiter", "Automatic Level Control"
 	};
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item >= 3)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int wm8776_level_control_get(struct snd_kcontrol *ctl,
@@ -851,13 +944,7 @@
 		"None", "High-pass Filter"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -985,6 +1072,53 @@
 		.private_value = 0,
 	},
 };
+static const struct snd_kcontrol_new hdav_slim_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "HDMI Playback Switch",
+		.info = snd_ctl_boolean_mono_info,
+		.get = xonar_gpio_bit_switch_get,
+		.put = xonar_gpio_bit_switch_put,
+		.private_value = GPIO_SLIM_HDMI_DISABLE | XONAR_GPIO_BIT_INVERT,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphone Playback Volume",
+		.info = wm8776_hp_vol_info,
+		.get = wm8776_hp_vol_get,
+		.put = wm8776_hp_vol_put,
+		.tlv = { .p = wm8776_hp_db_scale },
+	},
+	WM8776_BIT_SWITCH("Headphone Playback Switch",
+			  WM8776_PWRDOWN, WM8776_HPPD, 1, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input Capture Volume",
+		.info = wm8776_input_vol_info,
+		.get = wm8776_input_vol_get,
+		.put = wm8776_input_vol_put,
+		.tlv = { .p = wm8776_adc_db_scale },
+	},
+	WM8776_BIT_SWITCH("Mic Capture Switch",
+			  WM8776_ADCMUX, 1 << 0, 0, 0),
+	WM8776_BIT_SWITCH("Aux Capture Switch",
+			  WM8776_ADCMUX, 1 << 1, 0, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "ADC Filter Capture Enum",
+		.info = hpf_info,
+		.get = hpf_get,
+		.put = hpf_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Level Control Capture Enum",
+		.info = wm8776_level_control_info,
+		.get = wm8776_level_control_get,
+		.put = wm8776_level_control_put,
+		.private_value = 0,
+	},
+};
 static const struct snd_kcontrol_new lc_controls[] = {
 	WM8776_FIELD_CTL_VOLUME("Limiter Threshold",
 				WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf,
@@ -1028,6 +1162,26 @@
 				LC_CONTROL_ALC, wm8776_ngth_db_scale),
 };
 
+static int add_lc_controls(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+	struct snd_kcontrol *ctl;
+	int err;
+
+	BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
+	for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
+		ctl = snd_ctl_new1(&lc_controls[i], chip);
+		if (!ctl)
+			return -ENOMEM;
+		err = snd_ctl_add(chip->card, ctl);
+		if (err < 0)
+			return err;
+		data->lc_controls[i] = ctl;
+	}
+	return 0;
+}
+
 static int xonar_ds_mixer_init(struct oxygen *chip)
 {
 	struct xonar_wm87x6 *data = chip->model_data;
@@ -1049,17 +1203,54 @@
 	}
 	if (!data->line_adcmux_control || !data->mic_adcmux_control)
 		return -ENXIO;
-	BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
-	for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
-		ctl = snd_ctl_new1(&lc_controls[i], chip);
+
+	return add_lc_controls(chip);
+}
+
+static int xonar_hdav_slim_mixer_init(struct oxygen *chip)
+{
+	unsigned int i;
+	struct snd_kcontrol *ctl;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(hdav_slim_controls); ++i) {
+		ctl = snd_ctl_new1(&hdav_slim_controls[i], chip);
 		if (!ctl)
 			return -ENOMEM;
 		err = snd_ctl_add(chip->card, ctl);
 		if (err < 0)
 			return err;
-		data->lc_controls[i] = ctl;
 	}
-	return 0;
+
+	return add_lc_controls(chip);
+}
+
+static void dump_wm8776_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nWM8776:\n00:");
+	for (i = 0; i < 0x10; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8776_regs[i]);
+	snd_iprintf(buffer, "\n10:");
+	for (i = 0x10; i < 0x17; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8776_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_wm87x6_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+
+	dump_wm8776_registers(chip, buffer);
+	snd_iprintf(buffer, "\nWM8766:\n00:");
+	for (i = 0; i < 0x10; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8766_regs[i]);
+	snd_iprintf(buffer, "\n");
 }
 
 static const struct oxygen_model model_xonar_ds = {
@@ -1072,22 +1263,57 @@
 	.suspend = xonar_ds_suspend,
 	.resume = xonar_ds_resume,
 	.pcm_hardware_filter = wm8776_adc_hardware_filter,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_wm87x6_dac_params,
 	.set_adc_params = set_wm8776_adc_params,
 	.update_dac_volume = update_wm87x6_volume,
 	.update_dac_mute = update_wm87x6_mute,
 	.update_center_lfe_mix = update_wm8766_center_lfe_mix,
 	.gpio_changed = xonar_ds_gpio_changed,
+	.dump_registers = dump_wm87x6_registers,
 	.dac_tlv = wm87x6_dac_db_scale,
 	.model_data_size = sizeof(struct xonar_wm87x6),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
 			 CAPTURE_0_FROM_I2S_1,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+};
+
+static const struct oxygen_model model_xonar_hdav_slim = {
+	.shortname = "Xonar HDAV1.3 Slim",
+	.longname = "Asus Virtuoso 200",
+	.chip = "AV200",
+	.init = xonar_hdav_slim_init,
+	.mixer_init = xonar_hdav_slim_mixer_init,
+	.cleanup = xonar_hdav_slim_cleanup,
+	.suspend = xonar_hdav_slim_suspend,
+	.resume = xonar_hdav_slim_resume,
+	.pcm_hardware_filter = xonar_hdav_slim_hardware_filter,
+	.set_dac_params = set_hdav_slim_dac_params,
+	.set_adc_params = set_wm8776_adc_params,
+	.update_dac_volume = update_wm8776_volume,
+	.update_dac_mute = update_wm8776_mute,
+	.uart_input = xonar_hdmi_uart_input,
+	.dump_registers = dump_wm8776_registers,
+	.dac_tlv = wm87x6_dac_db_scale,
+	.model_data_size = sizeof(struct xonar_wm87x6),
+	.device_config = PLAYBACK_0_TO_I2S |
+			 PLAYBACK_1_TO_SPDIF |
+			 CAPTURE_0_FROM_I2S_1,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 2,
+	.dac_volume_min = 255 - 2*60,
+	.dac_volume_max = 255,
+	.function_flags = OXYGEN_FUNCTION_2WIRE,
+	.dac_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
@@ -1099,6 +1325,9 @@
 	case 0x838e:
 		chip->model = model_xonar_ds;
 		break;
+	case 0x835e:
+		chip->model = model_xonar_hdav_slim;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 0b720cf..2d83324 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -60,6 +60,7 @@
 	        "{RME HDSP-9652},"
 		"{RME HDSP-9632}}");
 #ifdef HDSP_FW_LOADER
+MODULE_FIRMWARE("rpm_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware_rev11.bin");
 MODULE_FIRMWARE("digiface_firmware.bin");
@@ -81,6 +82,7 @@
 #define H9632_SS_CHANNELS	 12
 #define H9632_DS_CHANNELS	 8
 #define H9632_QS_CHANNELS	 4
+#define RPM_CHANNELS             6
 
 /* Write registers. These are defined as byte-offsets from the iobase value.
  */
@@ -191,6 +193,25 @@
 #define HDSP_PhoneGain1		  (1<<30)
 #define HDSP_QuadSpeed	  	  (1<<31)
 
+/* RPM uses some of the registers for special purposes */
+#define HDSP_RPM_Inp12            0x04A00
+#define HDSP_RPM_Inp12_Phon_6dB   0x00800  /* Dolby */
+#define HDSP_RPM_Inp12_Phon_0dB   0x00000  /* .. */
+#define HDSP_RPM_Inp12_Phon_n6dB  0x04000  /* inp_0 */
+#define HDSP_RPM_Inp12_Line_0dB   0x04200  /* Dolby+PRO */
+#define HDSP_RPM_Inp12_Line_n6dB  0x00200  /* PRO */
+
+#define HDSP_RPM_Inp34            0x32000
+#define HDSP_RPM_Inp34_Phon_6dB   0x20000  /* SyncRef1 */
+#define HDSP_RPM_Inp34_Phon_0dB   0x00000  /* .. */
+#define HDSP_RPM_Inp34_Phon_n6dB  0x02000  /* SyncRef2 */
+#define HDSP_RPM_Inp34_Line_0dB   0x30000  /* SyncRef1+SyncRef0 */
+#define HDSP_RPM_Inp34_Line_n6dB  0x10000  /* SyncRef0 */
+
+#define HDSP_RPM_Bypass           0x01000
+
+#define HDSP_RPM_Disconnect       0x00001
+
 #define HDSP_ADGainMask       (HDSP_ADGain0|HDSP_ADGain1)
 #define HDSP_ADGainMinus10dBV  HDSP_ADGainMask
 #define HDSP_ADGainPlus4dBu   (HDSP_ADGain0)
@@ -450,7 +471,7 @@
 	u32                   creg_spdif;
 	u32                   creg_spdif_stream;
 	int                   clock_source_locked;
-	char                 *card_name;	     /* digiface/multiface */
+	char                 *card_name;	 /* digiface/multiface/rpm */
 	enum HDSP_IO_Type     io_type;               /* ditto, but for code use */
         unsigned short        firmware_rev;
 	unsigned short	      state;		     /* stores state bits */
@@ -612,6 +633,7 @@
 	switch (hdsp->io_type) {
 	case Multiface:
 	case Digiface:
+	case RPM:
 	default:
 		if (hdsp->firmware_rev == 0xa)
 			return (64 * out) + (32 + (in));
@@ -629,6 +651,7 @@
 	switch (hdsp->io_type) {
 	case Multiface:
 	case Digiface:
+	case RPM:
 	default:
 		if (hdsp->firmware_rev == 0xa)
 			return (64 * out) + in;
@@ -655,7 +678,7 @@
 {
 	if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return 0;
 	if (hdsp_read (hdsp, HDSP_statusRegister) & HDSP_ConfigError) {
-		snd_printk ("Hammerfall-DSP: no Digiface or Multiface connected!\n");
+		snd_printk("Hammerfall-DSP: no IO box connected!\n");
 		hdsp->state &= ~HDSP_FirmwareLoaded;
 		return -EIO;
 	}
@@ -680,7 +703,7 @@
 		}
 	}
 
-	snd_printk("Hammerfall-DSP: no Digiface or Multiface connected!\n");
+	snd_printk("Hammerfall-DSP: no IO box connected!\n");
 	hdsp->state &= ~HDSP_FirmwareLoaded;
 	return -EIO;
 }
@@ -752,17 +775,21 @@
 		hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
 		hdsp_write (hdsp, HDSP_fifoData, 0);
 
-		if (hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT)) {
-			hdsp->io_type = Multiface;
-			hdsp_write (hdsp, HDSP_control2Reg, HDSP_VERSION_BIT);
-			hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
-			hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT);
+		if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT)) {
+			hdsp_write(hdsp, HDSP_control2Reg, HDSP_VERSION_BIT);
+			hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
+			if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT))
+				hdsp->io_type = RPM;
+			else
+				hdsp->io_type = Multiface;
 		} else {
 			hdsp->io_type = Digiface;
 		}
 	} else {
 		/* firmware was already loaded, get iobox type */
-		if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
+		if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
+			hdsp->io_type = RPM;
+		else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
 			hdsp->io_type = Multiface;
 		else
 			hdsp->io_type = Digiface;
@@ -1184,6 +1211,7 @@
 			hdsp->channel_map = channel_map_ds;
 	} else {
 		switch (hdsp->io_type) {
+		case RPM:
 		case Multiface:
 			hdsp->channel_map = channel_map_mf_ss;
 			break;
@@ -3231,6 +3259,318 @@
 HDSP_USE_MIDI_TASKLET("Use Midi Tasklet", 0),
 };
 
+
+static int hdsp_rpm_input12(struct hdsp *hdsp)
+{
+	switch (hdsp->control_register & HDSP_RPM_Inp12) {
+	case HDSP_RPM_Inp12_Phon_6dB:
+		return 0;
+	case HDSP_RPM_Inp12_Phon_n6dB:
+		return 2;
+	case HDSP_RPM_Inp12_Line_0dB:
+		return 3;
+	case HDSP_RPM_Inp12_Line_n6dB:
+		return 4;
+	}
+	return 1;
+}
+
+
+static int snd_hdsp_get_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.enumerated.item[0] = hdsp_rpm_input12(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_input12(struct hdsp *hdsp, int mode)
+{
+	hdsp->control_register &= ~HDSP_RPM_Inp12;
+	switch (mode) {
+	case 0:
+		hdsp->control_register |= HDSP_RPM_Inp12_Phon_6dB;
+		break;
+	case 1:
+		break;
+	case 2:
+		hdsp->control_register |= HDSP_RPM_Inp12_Phon_n6dB;
+		break;
+	case 3:
+		hdsp->control_register |= HDSP_RPM_Inp12_Line_0dB;
+		break;
+	case 4:
+		hdsp->control_register |= HDSP_RPM_Inp12_Line_n6dB;
+		break;
+	default:
+		return -1;
+	}
+
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.enumerated.item[0];
+	if (val < 0)
+		val = 0;
+	if (val > 4)
+		val = 4;
+	spin_lock_irq(&hdsp->lock);
+	if (val != hdsp_rpm_input12(hdsp))
+		change = (hdsp_set_rpm_input12(hdsp, val) == 0) ? 1 : 0;
+	else
+		change = 0;
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+static int snd_hdsp_info_rpm_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"Phono +6dB", "Phono 0dB", "Phono -6dB", "Line 0dB", "Line -6dB"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 5;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+
+static int hdsp_rpm_input34(struct hdsp *hdsp)
+{
+	switch (hdsp->control_register & HDSP_RPM_Inp34) {
+	case HDSP_RPM_Inp34_Phon_6dB:
+		return 0;
+	case HDSP_RPM_Inp34_Phon_n6dB:
+		return 2;
+	case HDSP_RPM_Inp34_Line_0dB:
+		return 3;
+	case HDSP_RPM_Inp34_Line_n6dB:
+		return 4;
+	}
+	return 1;
+}
+
+
+static int snd_hdsp_get_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.enumerated.item[0] = hdsp_rpm_input34(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_input34(struct hdsp *hdsp, int mode)
+{
+	hdsp->control_register &= ~HDSP_RPM_Inp34;
+	switch (mode) {
+	case 0:
+		hdsp->control_register |= HDSP_RPM_Inp34_Phon_6dB;
+		break;
+	case 1:
+		break;
+	case 2:
+		hdsp->control_register |= HDSP_RPM_Inp34_Phon_n6dB;
+		break;
+	case 3:
+		hdsp->control_register |= HDSP_RPM_Inp34_Line_0dB;
+		break;
+	case 4:
+		hdsp->control_register |= HDSP_RPM_Inp34_Line_n6dB;
+		break;
+	default:
+		return -1;
+	}
+
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.enumerated.item[0];
+	if (val < 0)
+		val = 0;
+	if (val > 4)
+		val = 4;
+	spin_lock_irq(&hdsp->lock);
+	if (val != hdsp_rpm_input34(hdsp))
+		change = (hdsp_set_rpm_input34(hdsp, val) == 0) ? 1 : 0;
+	else
+		change = 0;
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+/* RPM Bypass switch */
+static int hdsp_rpm_bypass(struct hdsp *hdsp)
+{
+	return (hdsp->control_register & HDSP_RPM_Bypass) ? 1 : 0;
+}
+
+
+static int snd_hdsp_get_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.integer.value[0] = hdsp_rpm_bypass(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_bypass(struct hdsp *hdsp, int on)
+{
+	if (on)
+		hdsp->control_register |= HDSP_RPM_Bypass;
+	else
+		hdsp->control_register &= ~HDSP_RPM_Bypass;
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	unsigned int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.integer.value[0] & 1;
+	spin_lock_irq(&hdsp->lock);
+	change = (int)val != hdsp_rpm_bypass(hdsp);
+	hdsp_set_rpm_bypass(hdsp, val);
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+static int snd_hdsp_info_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"On", "Off"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 2;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+
+/* RPM Disconnect switch */
+static int hdsp_rpm_disconnect(struct hdsp *hdsp)
+{
+	return (hdsp->control_register & HDSP_RPM_Disconnect) ? 1 : 0;
+}
+
+
+static int snd_hdsp_get_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.integer.value[0] = hdsp_rpm_disconnect(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_disconnect(struct hdsp *hdsp, int on)
+{
+	if (on)
+		hdsp->control_register |= HDSP_RPM_Disconnect;
+	else
+		hdsp->control_register &= ~HDSP_RPM_Disconnect;
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	unsigned int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.integer.value[0] & 1;
+	spin_lock_irq(&hdsp->lock);
+	change = (int)val != hdsp_rpm_disconnect(hdsp);
+	hdsp_set_rpm_disconnect(hdsp, val);
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+static int snd_hdsp_info_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"On", "Off"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 2;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+static struct snd_kcontrol_new snd_hdsp_rpm_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "RPM Bypass",
+		.get = snd_hdsp_get_rpm_bypass,
+		.put = snd_hdsp_put_rpm_bypass,
+		.info = snd_hdsp_info_rpm_bypass
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "RPM Disconnect",
+		.get = snd_hdsp_get_rpm_disconnect,
+		.put = snd_hdsp_put_rpm_disconnect,
+		.info = snd_hdsp_info_rpm_disconnect
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input 1/2",
+		.get = snd_hdsp_get_rpm_input12,
+		.put = snd_hdsp_put_rpm_input12,
+		.info = snd_hdsp_info_rpm_input
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input 3/4",
+		.get = snd_hdsp_get_rpm_input34,
+		.put = snd_hdsp_put_rpm_input34,
+		.info = snd_hdsp_info_rpm_input
+	},
+	HDSP_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
+	HDSP_MIXER("Mixer", 0)
+};
+
 static struct snd_kcontrol_new snd_hdsp_96xx_aeb = HDSP_AEB("Analog Extension Board", 0);
 static struct snd_kcontrol_new snd_hdsp_adat_sync_check = HDSP_ADAT_SYNC_CHECK;
 
@@ -3240,6 +3580,16 @@
 	int err;
 	struct snd_kcontrol *kctl;
 
+	if (hdsp->io_type == RPM) {
+		/* RPM Bypass, Disconnect and Input switches */
+		for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_rpm_controls); idx++) {
+			err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_rpm_controls[idx], hdsp));
+			if (err < 0)
+				return err;
+		}
+		return 0;
+	}
+
 	for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_controls); idx++) {
 		if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_controls[idx], hdsp))) < 0)
 			return err;
@@ -3459,48 +3809,102 @@
 
 	snd_iprintf(buffer, "\n");
 
-	switch (hdsp_spdif_in(hdsp)) {
-	case HDSP_SPDIFIN_OPTICAL:
-		snd_iprintf(buffer, "IEC958 input: Optical\n");
-		break;
-	case HDSP_SPDIFIN_COAXIAL:
-		snd_iprintf(buffer, "IEC958 input: Coaxial\n");
-		break;
-	case HDSP_SPDIFIN_INTERNAL:
-		snd_iprintf(buffer, "IEC958 input: Internal\n");
-		break;
-	case HDSP_SPDIFIN_AES:
-		snd_iprintf(buffer, "IEC958 input: AES\n");
-		break;
-	default:
-		snd_iprintf(buffer, "IEC958 input: ???\n");
-		break;
+	if (hdsp->io_type != RPM) {
+		switch (hdsp_spdif_in(hdsp)) {
+		case HDSP_SPDIFIN_OPTICAL:
+			snd_iprintf(buffer, "IEC958 input: Optical\n");
+			break;
+		case HDSP_SPDIFIN_COAXIAL:
+			snd_iprintf(buffer, "IEC958 input: Coaxial\n");
+			break;
+		case HDSP_SPDIFIN_INTERNAL:
+			snd_iprintf(buffer, "IEC958 input: Internal\n");
+			break;
+		case HDSP_SPDIFIN_AES:
+			snd_iprintf(buffer, "IEC958 input: AES\n");
+			break;
+		default:
+			snd_iprintf(buffer, "IEC958 input: ???\n");
+			break;
+		}
 	}
 
-	if (hdsp->control_register & HDSP_SPDIFOpticalOut)
-		snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n");
-	else
-		snd_iprintf(buffer, "IEC958 output: Coaxial only\n");
+	if (RPM == hdsp->io_type) {
+		if (hdsp->control_register & HDSP_RPM_Bypass)
+			snd_iprintf(buffer, "RPM Bypass: disabled\n");
+		else
+			snd_iprintf(buffer, "RPM Bypass: enabled\n");
+		if (hdsp->control_register & HDSP_RPM_Disconnect)
+			snd_iprintf(buffer, "RPM disconnected\n");
+		else
+			snd_iprintf(buffer, "RPM connected\n");
 
-	if (hdsp->control_register & HDSP_SPDIFProfessional)
-		snd_iprintf(buffer, "IEC958 quality: Professional\n");
-	else
-		snd_iprintf(buffer, "IEC958 quality: Consumer\n");
+		switch (hdsp->control_register & HDSP_RPM_Inp12) {
+		case HDSP_RPM_Inp12_Phon_6dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, 6dB\n");
+			break;
+		case HDSP_RPM_Inp12_Phon_0dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, 0dB\n");
+			break;
+		case HDSP_RPM_Inp12_Phon_n6dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, -6dB\n");
+			break;
+		case HDSP_RPM_Inp12_Line_0dB:
+			snd_iprintf(buffer, "Input 1/2: Line, 0dB\n");
+			break;
+		case HDSP_RPM_Inp12_Line_n6dB:
+			snd_iprintf(buffer, "Input 1/2: Line, -6dB\n");
+			break;
+		default:
+			snd_iprintf(buffer, "Input 1/2: ???\n");
+		}
 
-	if (hdsp->control_register & HDSP_SPDIFEmphasis)
-		snd_iprintf(buffer, "IEC958 emphasis: on\n");
-	else
-		snd_iprintf(buffer, "IEC958 emphasis: off\n");
+		switch (hdsp->control_register & HDSP_RPM_Inp34) {
+		case HDSP_RPM_Inp34_Phon_6dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, 6dB\n");
+			break;
+		case HDSP_RPM_Inp34_Phon_0dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, 0dB\n");
+			break;
+		case HDSP_RPM_Inp34_Phon_n6dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, -6dB\n");
+			break;
+		case HDSP_RPM_Inp34_Line_0dB:
+			snd_iprintf(buffer, "Input 3/4: Line, 0dB\n");
+			break;
+		case HDSP_RPM_Inp34_Line_n6dB:
+			snd_iprintf(buffer, "Input 3/4: Line, -6dB\n");
+			break;
+		default:
+			snd_iprintf(buffer, "Input 3/4: ???\n");
+		}
 
-	if (hdsp->control_register & HDSP_SPDIFNonAudio)
-		snd_iprintf(buffer, "IEC958 NonAudio: on\n");
-	else
-		snd_iprintf(buffer, "IEC958 NonAudio: off\n");
-	if ((x = hdsp_spdif_sample_rate (hdsp)) != 0)
-		snd_iprintf (buffer, "IEC958 sample rate: %d\n", x);
-	else
-		snd_iprintf (buffer, "IEC958 sample rate: Error flag set\n");
+	} else {
+		if (hdsp->control_register & HDSP_SPDIFOpticalOut)
+			snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n");
+		else
+			snd_iprintf(buffer, "IEC958 output: Coaxial only\n");
 
+		if (hdsp->control_register & HDSP_SPDIFProfessional)
+			snd_iprintf(buffer, "IEC958 quality: Professional\n");
+		else
+			snd_iprintf(buffer, "IEC958 quality: Consumer\n");
+
+		if (hdsp->control_register & HDSP_SPDIFEmphasis)
+			snd_iprintf(buffer, "IEC958 emphasis: on\n");
+		else
+			snd_iprintf(buffer, "IEC958 emphasis: off\n");
+
+		if (hdsp->control_register & HDSP_SPDIFNonAudio)
+			snd_iprintf(buffer, "IEC958 NonAudio: on\n");
+		else
+			snd_iprintf(buffer, "IEC958 NonAudio: off\n");
+		x = hdsp_spdif_sample_rate(hdsp);
+		if (x != 0)
+			snd_iprintf(buffer, "IEC958 sample rate: %d\n", x);
+		else
+			snd_iprintf(buffer, "IEC958 sample rate: Error flag set\n");
+	}
 	snd_iprintf(buffer, "\n");
 
 	/* Sync Check */
@@ -3765,7 +4169,7 @@
 			snd_hdsp_midi_input_read (&hdsp->midi[0]);
 		}
 	}
-	if (hdsp->io_type != Multiface && hdsp->io_type != H9632 && midi1 && midi1status) {
+	if (hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632 && midi1 && midi1status) {
 		if (hdsp->use_midi_tasklet) {
 			/* we disable interrupts for this input until processing is done */
 			hdsp->control_register &= ~HDSP_Midi1InterruptEnable;
@@ -4093,7 +4497,7 @@
 				 SNDRV_PCM_RATE_96000),
 	.rate_min =		32000,
 	.rate_max =		96000,
-	.channels_min =		14,
+	.channels_min =		6,
 	.channels_max =		HDSP_MAX_CHANNELS,
 	.buffer_bytes_max =	HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
 	.period_bytes_min =	(64 * 4) * 10,
@@ -4122,7 +4526,7 @@
 				 SNDRV_PCM_RATE_96000),
 	.rate_min =		32000,
 	.rate_max =		96000,
-	.channels_min =		14,
+	.channels_min =		5,
 	.channels_max =		HDSP_MAX_CHANNELS,
 	.buffer_bytes_max =	HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
 	.period_bytes_min =	(64 * 4) * 10,
@@ -4357,10 +4761,12 @@
 			     snd_hdsp_hw_rule_rate_out_channels, hdsp,
 			     SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 
-	hdsp->creg_spdif_stream = hdsp->creg_spdif;
-	hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
-	snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
-		       SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	if (RPM != hdsp->io_type) {
+		hdsp->creg_spdif_stream = hdsp->creg_spdif;
+		hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+		snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
+			SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	}
 	return 0;
 }
 
@@ -4375,9 +4781,11 @@
 
 	spin_unlock_irq(&hdsp->lock);
 
-	hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
-	snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
-		       SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	if (RPM != hdsp->io_type) {
+		hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+		snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
+			SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	}
 	return 0;
 }
 
@@ -4616,7 +5024,7 @@
 		if (hdsp->io_type != H9632)
 		    info.adatsync_sync_check = (unsigned char)hdsp_adatsync_sync_check(hdsp);
 		info.spdif_sync_check = (unsigned char)hdsp_spdif_sync_check(hdsp);
-		for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != H9632) ? 3 : 1); ++i)
+		for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632) ? 3 : 1); ++i)
 			info.adat_sync_check[i] = (unsigned char)hdsp_adat_sync_check(hdsp, i);
 		info.spdif_in = (unsigned char)hdsp_spdif_in(hdsp);
 		info.spdif_out = (unsigned char)hdsp_spdif_out(hdsp);
@@ -4636,6 +5044,9 @@
 			info.phone_gain = (unsigned char)hdsp_phone_gain(hdsp);
 			info.xlr_breakout_cable = (unsigned char)hdsp_xlr_breakout_cable(hdsp);
 
+		} else if (hdsp->io_type == RPM) {
+			info.da_gain = (unsigned char) hdsp_rpm_input12(hdsp);
+			info.ad_gain = (unsigned char) hdsp_rpm_input34(hdsp);
 		}
 		if (hdsp->io_type == H9632 || hdsp->io_type == H9652)
 			info.analog_extension_board = (unsigned char)hdsp_aeb(hdsp);
@@ -4844,6 +5255,14 @@
 		hdsp->ds_in_channels = hdsp->ds_out_channels = MULTIFACE_DS_CHANNELS;
 		break;
 
+	case RPM:
+		hdsp->card_name = "RME Hammerfall DSP + RPM";
+		hdsp->ss_in_channels = RPM_CHANNELS-1;
+		hdsp->ss_out_channels = RPM_CHANNELS;
+		hdsp->ds_in_channels = RPM_CHANNELS-1;
+		hdsp->ds_out_channels = RPM_CHANNELS;
+		break;
+
 	default:
  		/* should never get here */
 		break;
@@ -4930,6 +5349,9 @@
 
 	/* caution: max length of firmware filename is 30! */
 	switch (hdsp->io_type) {
+	case RPM:
+		fwfile = "rpm_firmware.bin";
+		break;
 	case Multiface:
 		if (hdsp->firmware_rev == 0xa)
 			fwfile = "multiface_firmware.bin";
@@ -5100,7 +5522,9 @@
 			return 0;
 		} else {
 			snd_printk(KERN_INFO "Hammerfall-DSP: Firmware already present, initializing card.\n");
-			if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
+			if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
+				hdsp->io_type = RPM;
+			else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
 				hdsp->io_type = Multiface;
 			else
 				hdsp->io_type = Digiface;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 0c98ef9..f5eadfc 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -487,7 +487,7 @@
 	struct snd_kcontrol *playback_mixer_ctls[HDSPM_MAX_CHANNELS];
 	/* but input to much, so not used */
 	struct snd_kcontrol *input_mixer_ctls[HDSPM_MAX_CHANNELS];
-	/* full mixer accessable over mixer ioctl or hwdep-device */
+	/* full mixer accessible over mixer ioctl or hwdep-device */
 	struct hdspm_mixer *mixer;
 
 };
@@ -550,7 +550,7 @@
 	return bit2freq_tab[n];
 }
 
-/* Write/read to/from HDSPM with Adresses in Bytes
+/* Write/read to/from HDSPM with Addresses in Bytes
    not words but only 32Bit writes are allowed */
 
 static inline void hdspm_write(struct hdspm * hdspm, unsigned int reg,
@@ -2908,7 +2908,7 @@
 
 	/* Channel playback mixer as default control 
 	   Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders,
-	   thats too * big for any alsamixer they are accesible via special
+	   thats too * big for any alsamixer they are accessible via special
 	   IOCTL on hwdep and the mixer 2dimensional mixer control
 	*/
 
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 5518371..c94c051 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -1389,15 +1389,9 @@
 
 static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info)
 {
-	static char *texts[3] = {"AC'97", "IEC958", "ZV Port"};
+	static const char *const texts[3] = {"AC'97", "IEC958", "ZV Port"};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, texts);
 }
 
 static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value)
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 581a670..edce8a2 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -51,7 +51,7 @@
 static int snd_ps3_start_delay = CONFIG_SND_PS3_DEFAULT_START_DELAY;
 
 module_param_named(start_delay, snd_ps3_start_delay, uint, 0644);
-MODULE_PARM_DESC(start_delay, "time to insert silent data in milisec");
+MODULE_PARM_DESC(start_delay, "time to insert silent data in ms");
 
 static int index = SNDRV_DEFAULT_IDX1;
 static char *id = SNDRV_DEFAULT_STR1;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 3e598e7..a3efc52 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -20,6 +20,21 @@
 
 if SND_SOC
 
+config SND_SOC_CACHE_LZO
+	bool "Support LZO compression for register caches"
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
+	---help---
+	   Select this to enable LZO compression for register caches.
+	   This will allow machine or CODEC drivers to compress register
+	   caches in memory, reducing the memory consumption at the
+	   expense of performance.  If this is not present and is used
+	   the system will fall back to uncompressed caches.
+
+	   Usually it is safe to disable this option, where cache
+	   compression in used the rbtree option will typically perform
+	   better.
+
 config SND_SOC_AC97_BUS
 	bool
 
@@ -36,7 +51,7 @@
 source "sound/soc/omap/Kconfig"
 source "sound/soc/kirkwood/Kconfig"
 source "sound/soc/pxa/Kconfig"
-source "sound/soc/s3c24xx/Kconfig"
+source "sound/soc/samsung/Kconfig"
 source "sound/soc/s6000/Kconfig"
 source "sound/soc/sh/Kconfig"
 source "sound/soc/txx9/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index eb18344..ce913bf 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -14,7 +14,7 @@
 obj-$(CONFIG_SND_SOC)	+= omap/
 obj-$(CONFIG_SND_SOC)	+= kirkwood/
 obj-$(CONFIG_SND_SOC)	+= pxa/
-obj-$(CONFIG_SND_SOC)	+= s3c24xx/
+obj-$(CONFIG_SND_SOC)	+= samsung/
 obj-$(CONFIG_SND_SOC)	+= s6000/
 obj-$(CONFIG_SND_SOC)	+= sh/
 obj-$(CONFIG_SND_SOC)	+= txx9/
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
index 5f4e59f..1aac2f4 100644
--- a/sound/soc/atmel/playpaq_wm8510.c
+++ b/sound/soc/atmel/playpaq_wm8510.c
@@ -33,7 +33,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/at32ap700x.h>
 #include <mach/portmux.h>
@@ -318,27 +317,28 @@
 static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 
 	/*
 	 * Add DAPM widgets
 	 */
 	for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
-		snd_soc_dapm_new_control(codec, &playpaq_dapm_widgets[i]);
+		snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
 
 
 
 	/*
 	 * Setup audio path interconnects
 	 */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 
 
 	/* always connected pins */
-	snd_soc_dapm_enable_pin(codec, "Int Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_enable_pin(dapm, "Int Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_sync(dapm);
 
 
 
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index e521ada..af3c730 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -44,7 +44,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -140,6 +139,7 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	printk(KERN_DEBUG
@@ -154,25 +154,25 @@
 	}
 
 	/* Add specific widgets */
-	snd_soc_dapm_new_controls(codec, at91sam9g20ek_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, at91sam9g20ek_dapm_widgets,
 				  ARRAY_SIZE(at91sam9g20ek_dapm_widgets));
 	/* Set up specific audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	/* not connected */
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
 
 #ifdef ENABLE_MIC_INPUT
-	snd_soc_dapm_enable_pin(codec, "Int Mic");
+	snd_soc_dapm_enable_pin(dapm, "Int Mic");
 #else
-	snd_soc_dapm_nc_pin(codec, "Int Mic");
+	snd_soc_dapm_nc_pin(dapm, "Int Mic");
 #endif
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
index 86e0f85..da2208e 100644
--- a/sound/soc/atmel/snd-soc-afeb9260.c
+++ b/sound/soc/atmel/snd-soc-afeb9260.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -105,19 +104,20 @@
 static int afeb9260_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add afeb9260 specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up afeb9260 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index b62fcd3..cb99f04 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -13,7 +13,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1xxx_psc.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index 2394bff..83012da 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -20,7 +20,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c
index e4a6253..d3ccb92 100644
--- a/sound/soc/blackfin/bf5xx-ad193x.c
+++ b/sound/soc/blackfin/bf5xx-ad193x.c
@@ -29,7 +29,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index 900ced5..732fb8b 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -35,7 +35,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index 36f2769..e902b24 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -33,7 +33,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/dma.h>
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 01d19e9..06b6981 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -19,10 +19,10 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <sound/jack.h>
+#include <trace/events/asoc.h>
 
 #include "88pm860x-codec.h"
 
@@ -146,7 +146,6 @@
 
 	int			irq[4];
 	unsigned char		name[4][MAX_NAME_LEN];
-	unsigned char		reg_cache[REG_CACHE_SIZE];
 };
 
 /* -9450dB to 0dB in 150dB steps ( mute instead of -9450dB) */
@@ -1172,7 +1171,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable Audio PLL & Audio section */
 			data = AUDIO_PLL | AUDIO_SECTION_RESET
 				| AUDIO_SECTION_ON;
@@ -1185,7 +1184,7 @@
 		pm860x_set_bits(codec->control_data, REG_MISC2, data, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1263,6 +1262,12 @@
 	mask = pm860x->det.hs_shrt | pm860x->det.hook_det | pm860x->det.lo_shrt
 		| pm860x->det.hp_det;
 
+#ifndef CONFIG_SND_SOC_88PM860X_MODULE
+	if (status & (HEADSET_STATUS | MIC_STATUS | SHORT_HS1 | SHORT_HS2 |
+		      SHORT_LO1 | SHORT_LO2))
+		trace_snd_soc_jack_irq(dev_name(pm860x->codec->dev));
+#endif
+
 	if ((pm860x->det.hp_det & SND_JACK_HEADPHONE)
 		&& (status & HEADSET_STATUS))
 		report |= SND_JACK_HEADPHONE;
@@ -1346,6 +1351,7 @@
 static int pm860x_probe(struct snd_soc_codec *codec)
 {
 	struct pm860x_priv *pm860x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i, ret;
 
 	pm860x->codec = codec;
@@ -1358,7 +1364,7 @@
 					   pm860x->name[i], pm860x);
 		if (ret < 0) {
 			dev_err(codec->dev, "Failed to request IRQ!\n");
-			goto out_irq;
+			goto out;
 		}
 	}
 
@@ -1369,22 +1375,20 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "Failed to fill register cache: %d\n",
 			ret);
-		goto out_codec;
+		goto out;
 	}
 
 	snd_soc_add_controls(codec, pm860x_snd_controls,
 			     ARRAY_SIZE(pm860x_snd_controls));
-	snd_soc_dapm_new_controls(codec, pm860x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, pm860x_dapm_widgets,
 				  ARRAY_SIZE(pm860x_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	return 0;
 
-out_codec:
-	i = 3;
-out_irq:
-	for (; i >= 0; i--)
+out:
+	while (--i >= 0)
 		free_irq(pm860x->irq[i], pm860x);
-	return -EINVAL;
+	return ret;
 }
 
 static int pm860x_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 3b5690d..883a312 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -22,6 +22,7 @@
 	select SND_SOC_AK4535 if I2C
 	select SND_SOC_AK4642 if I2C
 	select SND_SOC_AK4671 if I2C
+	select SND_SOC_ALC5623 if I2C
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS42L51 if I2C
 	select SND_SOC_CS4270 if I2C
@@ -54,9 +55,11 @@
 	select SND_SOC_WM8727
 	select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI
+	select SND_SOC_WM8737 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8741 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI
+	select SND_SOC_WM8770 if SPI_MASTER
 	select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8804 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8900 if I2C
@@ -75,6 +78,7 @@
 	select SND_SOC_WM8990 if I2C
 	select SND_SOC_WM8993 if I2C
 	select SND_SOC_WM8994 if MFD_WM8994
+	select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM9081 if I2C
 	select SND_SOC_WM9090 if I2C
 	select SND_SOC_WM9705 if SND_SOC_AC97_BUS
@@ -130,6 +134,9 @@
 config SND_SOC_AK4671
 	tristate
 
+config SND_SOC_ALC5623
+       tristate
+
 config SND_SOC_CQ0093VC
 	tristate
 
@@ -160,6 +167,9 @@
 config SND_SOC_DA7210
         tristate
 
+config SND_SOC_DMIC
+	tristate
+
 config SND_SOC_MAX98088
        tristate
 
@@ -231,6 +241,9 @@
 config SND_SOC_WM8731
 	tristate
 
+config SND_SOC_WM8737
+	tristate
+
 config SND_SOC_WM8741
 	tristate
 
@@ -240,6 +253,9 @@
 config SND_SOC_WM8753
 	tristate
 
+config SND_SOC_WM8770
+	tristate
+
 config SND_SOC_WM8776
 	tristate
 
@@ -294,6 +310,9 @@
 config SND_SOC_WM8994
 	tristate
 
+config SND_SOC_WM8995
+	tristate
+
 config SND_SOC_WM9081
 	tristate
 
@@ -318,3 +337,4 @@
 
 config SND_SOC_WM9090
 	tristate
+
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index f67a2d6..579af9c 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -14,9 +14,11 @@
 snd-soc-cs4270-objs := cs4270.o
 snd-soc-cx20442-objs := cx20442.o
 snd-soc-da7210-objs := da7210.o
+snd-soc-dmic-objs := dmic.o
 snd-soc-l3-objs := l3.o
 snd-soc-max98088-objs := max98088.o
 snd-soc-pcm3008-objs := pcm3008.o
+snd-soc-alc5623-objs := alc5623.o
 snd-soc-spdif-objs := spdif_transciever.o
 snd-soc-ssm2602-objs := ssm2602.o
 snd-soc-stac9766-objs := stac9766.o
@@ -38,9 +40,11 @@
 snd-soc-wm8727-objs := wm8727.o
 snd-soc-wm8728-objs := wm8728.o
 snd-soc-wm8731-objs := wm8731.o
+snd-soc-wm8737-objs := wm8737.o
 snd-soc-wm8741-objs := wm8741.o
 snd-soc-wm8750-objs := wm8750.o
 snd-soc-wm8753-objs := wm8753.o
+snd-soc-wm8770-objs := wm8770.o
 snd-soc-wm8776-objs := wm8776.o
 snd-soc-wm8804-objs := wm8804.o
 snd-soc-wm8900-objs := wm8900.o
@@ -58,7 +62,8 @@
 snd-soc-wm8988-objs := wm8988.o
 snd-soc-wm8990-objs := wm8990.o
 snd-soc-wm8993-objs := wm8993.o
-snd-soc-wm8994-objs := wm8994.o
+snd-soc-wm8994-objs := wm8994.o wm8994-tables.o
+snd-soc-wm8995-objs := wm8995.o
 snd-soc-wm9081-objs := wm9081.o
 snd-soc-wm9705-objs := wm9705.o
 snd-soc-wm9712-objs := wm9712.o
@@ -88,10 +93,12 @@
 obj-$(CONFIG_SND_SOC_CS4270)	+= snd-soc-cs4270.o
 obj-$(CONFIG_SND_SOC_CX20442)	+= snd-soc-cx20442.o
 obj-$(CONFIG_SND_SOC_DA7210)	+= snd-soc-da7210.o
+obj-$(CONFIG_SND_SOC_DMIC)	+= snd-soc-dmic.o
 obj-$(CONFIG_SND_SOC_L3)	+= snd-soc-l3.o
 obj-$(CONFIG_SND_SOC_JZ4740_CODEC)	+= snd-soc-jz4740-codec.o
 obj-$(CONFIG_SND_SOC_MAX98088)	+= snd-soc-max98088.o
 obj-$(CONFIG_SND_SOC_PCM3008)	+= snd-soc-pcm3008.o
+obj-$(CONFIG_SND_SOC_ALC5623)    += snd-soc-alc5623.o
 obj-$(CONFIG_SND_SOC_SPDIF)	+= snd-soc-spdif.o
 obj-$(CONFIG_SND_SOC_SSM2602)	+= snd-soc-ssm2602.o
 obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
@@ -113,9 +120,11 @@
 obj-$(CONFIG_SND_SOC_WM8727)	+= snd-soc-wm8727.o
 obj-$(CONFIG_SND_SOC_WM8728)	+= snd-soc-wm8728.o
 obj-$(CONFIG_SND_SOC_WM8731)	+= snd-soc-wm8731.o
+obj-$(CONFIG_SND_SOC_WM8737)	+= snd-soc-wm8737.o
 obj-$(CONFIG_SND_SOC_WM8741)	+= snd-soc-wm8741.o
 obj-$(CONFIG_SND_SOC_WM8750)	+= snd-soc-wm8750.o
 obj-$(CONFIG_SND_SOC_WM8753)	+= snd-soc-wm8753.o
+obj-$(CONFIG_SND_SOC_WM8770)	+= snd-soc-wm8770.o
 obj-$(CONFIG_SND_SOC_WM8776)	+= snd-soc-wm8776.o
 obj-$(CONFIG_SND_SOC_WM8804)	+= snd-soc-wm8804.o
 obj-$(CONFIG_SND_SOC_WM8900)	+= snd-soc-wm8900.o
@@ -134,6 +143,7 @@
 obj-$(CONFIG_SND_SOC_WM8990)	+= snd-soc-wm8990.o
 obj-$(CONFIG_SND_SOC_WM8993)	+= snd-soc-wm8993.o
 obj-$(CONFIG_SND_SOC_WM8994)	+= snd-soc-wm8994.o
+obj-$(CONFIG_SND_SOC_WM8995)	+= snd-soc-wm8995.o
 obj-$(CONFIG_SND_SOC_WM9081)	+= snd-soc-wm9081.o
 obj-$(CONFIG_SND_SOC_WM9705)	+= snd-soc-wm9705.o
 obj-$(CONFIG_SND_SOC_WM9712)	+= snd-soc-wm9712.o
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index d272534..ab63d52 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -27,7 +27,6 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
-#include <sound/soc-dapm.h>
 #include <linux/spi/spi.h>
 #include "ad1836.h"
 
@@ -220,6 +219,7 @@
 static int ad1836_probe(struct snd_soc_codec *codec)
 {
 	struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 
 	codec->control_data = ad1836->control_data;
@@ -227,7 +227,6 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "failed to set cache I/O: %d\n",
 				ret);
-		kfree(ad1836);
 		return ret;
 	}
 
@@ -252,9 +251,9 @@
 
 	snd_soc_add_controls(codec, ad1836_snd_controls,
 			     ARRAY_SIZE(ad1836_snd_controls));
-	snd_soc_dapm_new_controls(codec, ad1836_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, ad1836_dapm_widgets,
 				  ARRAY_SIZE(ad1836_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index fa2834c..da46479 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -19,12 +19,10 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
-#include <sound/soc-dapm.h>
 #include "ad193x.h"
 
 /* codec private data */
 struct ad193x_priv {
-	u8 reg_cache[AD193X_NUM_REGS];
 	enum snd_soc_control_type bus_type;
 	void *control_data;
 	int sysclk;
@@ -353,6 +351,7 @@
 static int ad193x_probe(struct snd_soc_codec *codec)
 {
 	struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	codec->control_data = ad193x->control_data;
@@ -363,7 +362,6 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "failed to set cache I/O: %d\n",
 				ret);
-		kfree(ad193x);
 		return ret;
 	}
 
@@ -385,9 +383,9 @@
 
 	snd_soc_add_controls(codec, ad193x_snd_controls,
 			     ARRAY_SIZE(ad193x_snd_controls));
-	snd_soc_dapm_new_controls(codec, ad193x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, ad193x_dapm_widgets,
 				  ARRAY_SIZE(ad193x_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 410ccd5..34cb51e 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -29,7 +29,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "ad1980.h"
 
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index cd88c8f..8b38739 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "ak4535.h"
@@ -290,10 +289,11 @@
 
 static int ak4535_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ak4535_dapm_widgets,
-				  ARRAY_SIZE(ak4535_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, ak4535_dapm_widgets,
+				  ARRAY_SIZE(ak4535_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -366,9 +366,9 @@
 static int ak4535_mute(struct snd_soc_dai *dai, int mute)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
+	u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
 	if (!mute)
-		ak4535_write(codec, AK4535_DAC, mute_reg);
+		ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20);
 	else
 		ak4535_write(codec, AK4535_DAC, mute_reg | 0x20);
 	return 0;
@@ -381,11 +381,11 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
-		ak4535_write(codec, AK4535_DAC, mute_reg);
+		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
+		ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20);
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
+		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
 		ak4535_write(codec, AK4535_DAC, mute_reg | 0x20);
 		break;
 	case SND_SOC_BIAS_STANDBY:
@@ -399,7 +399,7 @@
 		ak4535_write(codec, AK4535_PM1, i & (~0x80));
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 90c90b7..f00eba3 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -26,7 +26,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 24f5f49..2ec75ab 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -17,7 +17,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -28,7 +27,6 @@
 struct ak4671_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[AK4671_CACHEREGNUM];
 };
 
 /* ak4671 register cache & default register settings */
@@ -437,10 +435,11 @@
 
 static int ak4671_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ak4671_dapm_widgets,
-				  ARRAY_SIZE(ak4671_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, ak4671_dapm_widgets,
+				  ARRAY_SIZE(ak4671_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -602,7 +601,7 @@
 		snd_soc_write(codec, AK4671_AD_DA_POWER_MANAGEMENT, 0x00);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
new file mode 100644
index 0000000..4f377c9
--- /dev/null
+++ b/sound/soc/codecs/alc5623.c
@@ -0,0 +1,1117 @@
+/*
+ * alc5623.c  --  alc562[123] ALSA Soc Audio driver
+ *
+ * Copyright 2008 Realtek Microelectronics
+ * Author: flove <flove@realtek.com> Ethan <eku@marvell.com>
+ *
+ * Copyright 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ *
+ * Based on WM8753.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/alc5623.h>
+
+#include "alc5623.h"
+
+static int caps_charge = 2000;
+module_param(caps_charge, int, 0);
+MODULE_PARM_DESC(caps_charge, "ALC5623 cap charge time (msecs)");
+
+/* codec private data */
+struct alc5623_priv {
+	enum snd_soc_control_type control_type;
+	void *control_data;
+	struct mutex mutex;
+	u8 id;
+	unsigned int sysclk;
+	u16 reg_cache[ALC5623_VENDOR_ID2+2];
+	unsigned int add_ctrl;
+	unsigned int jack_det_ctrl;
+};
+
+static void alc5623_fill_cache(struct snd_soc_codec *codec)
+{
+	int i, step = codec->driver->reg_cache_step;
+	u16 *cache = codec->reg_cache;
+
+	/* not really efficient ... */
+	for (i = 0 ; i < codec->driver->reg_cache_size ; i += step)
+		cache[i] = codec->hw_read(codec, i);
+}
+
+static inline int alc5623_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, ALC5623_RESET, 0);
+}
+
+static int amp_mixer_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	/* to power-on/off class-d amp generators/speaker */
+	/* need to write to 'index-46h' register :        */
+	/* so write index num (here 0x46) to reg 0x6a     */
+	/* and then 0xffff/0 to reg 0x6c                  */
+	snd_soc_write(w->codec, ALC5623_HID_CTRL_INDEX, 0x46);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0xFFFF);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0);
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * ALC5623 Controls
+ */
+
+static const DECLARE_TLV_DB_SCALE(vol_tlv, -3450, 150, 0);
+static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
+static const unsigned int boost_tlv[] = {
+	TLV_DB_RANGE_HEAD(3),
+	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
+
+static const struct snd_kcontrol_new rt5621_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Speaker Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Speaker Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Headphone Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Headphone Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5622_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Speaker Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Speaker Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Line Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Line Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Line Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Line Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Headphone Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Headphone Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_snd_controls[] = {
+	SOC_DOUBLE_TLV("Auxout Playback Volume",
+			ALC5623_MONO_AUX_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Auxout Playback Switch",
+			ALC5623_MONO_AUX_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("PCM Playback Volume",
+			ALC5623_STEREO_DAC_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("AuxI Capture Volume",
+			ALC5623_AUXIN_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("LineIn Capture Volume",
+			ALC5623_LINE_IN_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_SINGLE_TLV("Mic1 Capture Volume",
+			ALC5623_MIC_VOL, 8, 31, 1, vol_tlv),
+	SOC_SINGLE_TLV("Mic2 Capture Volume",
+			ALC5623_MIC_VOL, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("Rec Capture Volume",
+			ALC5623_ADC_REC_GAIN, 7, 0, 31, 0, adc_rec_tlv),
+	SOC_SINGLE_TLV("Mic 1 Boost Volume",
+			ALC5623_MIC_CTRL, 10, 2, 0, boost_tlv),
+	SOC_SINGLE_TLV("Mic 2 Boost Volume",
+			ALC5623_MIC_CTRL, 8, 2, 0, boost_tlv),
+	SOC_SINGLE_TLV("Digital Boost Volume",
+			ALC5623_ADD_CTRL_REG, 4, 3, 0, dig_tlv),
+};
+
+/*
+ * DAPM Controls
+ */
+static const struct snd_kcontrol_new alc5623_hp_mixer_controls[] = {
+SOC_DAPM_SINGLE("LI2HP Playback Switch", ALC5623_LINE_IN_VOL, 15, 1, 1),
+SOC_DAPM_SINGLE("AUXI2HP Playback Switch", ALC5623_AUXIN_VOL, 15, 1, 1),
+SOC_DAPM_SINGLE("MIC12HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 15, 1, 1),
+SOC_DAPM_SINGLE("MIC22HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 7, 1, 1),
+SOC_DAPM_SINGLE("DAC2HP Playback Switch", ALC5623_STEREO_DAC_VOL, 15, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_hpl_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2HP_L Playback Switch", ALC5623_ADC_REC_GAIN, 15, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_hpr_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2HP_R Playback Switch", ALC5623_ADC_REC_GAIN, 14, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_mono_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2MONO_L Playback Switch", ALC5623_ADC_REC_GAIN, 13, 1, 1),
+SOC_DAPM_SINGLE("ADC2MONO_R Playback Switch", ALC5623_ADC_REC_GAIN, 12, 1, 1),
+SOC_DAPM_SINGLE("LI2MONO Playback Switch", ALC5623_LINE_IN_VOL, 13, 1, 1),
+SOC_DAPM_SINGLE("AUXI2MONO Playback Switch", ALC5623_AUXIN_VOL, 13, 1, 1),
+SOC_DAPM_SINGLE("MIC12MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 13, 1, 1),
+SOC_DAPM_SINGLE("MIC22MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 5, 1, 1),
+SOC_DAPM_SINGLE("DAC2MONO Playback Switch", ALC5623_STEREO_DAC_VOL, 13, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_speaker_mixer_controls[] = {
+SOC_DAPM_SINGLE("LI2SPK Playback Switch", ALC5623_LINE_IN_VOL, 14, 1, 1),
+SOC_DAPM_SINGLE("AUXI2SPK Playback Switch", ALC5623_AUXIN_VOL, 14, 1, 1),
+SOC_DAPM_SINGLE("MIC12SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 14, 1, 1),
+SOC_DAPM_SINGLE("MIC22SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 6, 1, 1),
+SOC_DAPM_SINGLE("DAC2SPK Playback Switch", ALC5623_STEREO_DAC_VOL, 14, 1, 1),
+};
+
+/* Left Record Mixer */
+static const struct snd_kcontrol_new alc5623_captureL_mixer_controls[] = {
+SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 14, 1, 1),
+SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 13, 1, 1),
+SOC_DAPM_SINGLE("LineInL Capture Switch", ALC5623_ADC_REC_MIXER, 12, 1, 1),
+SOC_DAPM_SINGLE("Left AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 11, 1, 1),
+SOC_DAPM_SINGLE("HPMixerL Capture Switch", ALC5623_ADC_REC_MIXER, 10, 1, 1),
+SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 9, 1, 1),
+SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 8, 1, 1),
+};
+
+/* Right Record Mixer */
+static const struct snd_kcontrol_new alc5623_captureR_mixer_controls[] = {
+SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 6, 1, 1),
+SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 5, 1, 1),
+SOC_DAPM_SINGLE("LineInR Capture Switch", ALC5623_ADC_REC_MIXER, 4, 1, 1),
+SOC_DAPM_SINGLE("Right AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 3, 1, 1),
+SOC_DAPM_SINGLE("HPMixerR Capture Switch", ALC5623_ADC_REC_MIXER, 2, 1, 1),
+SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 1, 1, 1),
+SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 0, 1, 1),
+};
+
+static const char *alc5623_spk_n_sour_sel[] = {
+		"RN/-R", "RP/+R", "LN/-R", "Vmid" };
+static const char *alc5623_hpl_out_input_sel[] = {
+		"Vmid", "HP Left Mix"};
+static const char *alc5623_hpr_out_input_sel[] = {
+		"Vmid", "HP Right Mix"};
+static const char *alc5623_spkout_input_sel[] = {
+		"Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"};
+static const char *alc5623_aux_out_input_sel[] = {
+		"Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"};
+
+/* auxout output mux */
+static const struct soc_enum alc5623_aux_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 6, 4, alc5623_aux_out_input_sel);
+static const struct snd_kcontrol_new alc5623_auxout_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_aux_out_input_enum);
+
+/* speaker output mux */
+static const struct soc_enum alc5623_spkout_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 10, 4, alc5623_spkout_input_sel);
+static const struct snd_kcontrol_new alc5623_spkout_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_spkout_input_enum);
+
+/* headphone left output mux */
+static const struct soc_enum alc5623_hpl_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 9, 2, alc5623_hpl_out_input_sel);
+static const struct snd_kcontrol_new alc5623_hpl_out_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_hpl_out_input_enum);
+
+/* headphone right output mux */
+static const struct soc_enum alc5623_hpr_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 8, 2, alc5623_hpr_out_input_sel);
+static const struct snd_kcontrol_new alc5623_hpr_out_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_hpr_out_input_enum);
+
+/* speaker output N select */
+static const struct soc_enum alc5623_spk_n_sour_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 14, 4, alc5623_spk_n_sour_sel);
+static const struct snd_kcontrol_new alc5623_spkoutn_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_spk_n_sour_enum);
+
+static const struct snd_soc_dapm_widget alc5623_dapm_widgets[] = {
+/* Muxes */
+SND_SOC_DAPM_MUX("AuxOut Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_auxout_mux_controls),
+SND_SOC_DAPM_MUX("SpeakerOut Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_spkout_mux_controls),
+SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_hpl_out_mux_controls),
+SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_hpr_out_mux_controls),
+SND_SOC_DAPM_MUX("SpeakerOut N Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_spkoutn_mux_controls),
+
+/* output mixers */
+SND_SOC_DAPM_MIXER("HP Mix", SND_SOC_NOPM, 0, 0,
+	&alc5623_hp_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hp_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPR Mix", ALC5623_PWR_MANAG_ADD2, 4, 0,
+	&alc5623_hpr_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hpr_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPL Mix", ALC5623_PWR_MANAG_ADD2, 5, 0,
+	&alc5623_hpl_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hpl_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPOut Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Mono Mix", ALC5623_PWR_MANAG_ADD2, 2, 0,
+	&alc5623_mono_mixer_controls[0],
+	ARRAY_SIZE(alc5623_mono_mixer_controls)),
+SND_SOC_DAPM_MIXER("Speaker Mix", ALC5623_PWR_MANAG_ADD2, 3, 0,
+	&alc5623_speaker_mixer_controls[0],
+	ARRAY_SIZE(alc5623_speaker_mixer_controls)),
+
+/* input mixers */
+SND_SOC_DAPM_MIXER("Left Capture Mix", ALC5623_PWR_MANAG_ADD2, 1, 0,
+	&alc5623_captureL_mixer_controls[0],
+	ARRAY_SIZE(alc5623_captureL_mixer_controls)),
+SND_SOC_DAPM_MIXER("Right Capture Mix", ALC5623_PWR_MANAG_ADD2, 0, 0,
+	&alc5623_captureR_mixer_controls[0],
+	ARRAY_SIZE(alc5623_captureR_mixer_controls)),
+
+SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback",
+	ALC5623_PWR_MANAG_ADD2, 9, 0),
+SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback",
+	ALC5623_PWR_MANAG_ADD2, 8, 0),
+SND_SOC_DAPM_MIXER("I2S Mix", ALC5623_PWR_MANAG_ADD1, 15, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("AuxI Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Line Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture",
+	ALC5623_PWR_MANAG_ADD2, 7, 0),
+SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture",
+	ALC5623_PWR_MANAG_ADD2, 6, 0),
+SND_SOC_DAPM_PGA("Left Headphone", ALC5623_PWR_MANAG_ADD3, 10, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right Headphone", ALC5623_PWR_MANAG_ADD3, 9, 0, NULL, 0),
+SND_SOC_DAPM_PGA("SpeakerOut", ALC5623_PWR_MANAG_ADD3, 12, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left AuxOut", ALC5623_PWR_MANAG_ADD3, 14, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right AuxOut", ALC5623_PWR_MANAG_ADD3, 13, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left LineIn", ALC5623_PWR_MANAG_ADD3, 7, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right LineIn", ALC5623_PWR_MANAG_ADD3, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left AuxI", ALC5623_PWR_MANAG_ADD3, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right AuxI", ALC5623_PWR_MANAG_ADD3, 4, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC1 PGA", ALC5623_PWR_MANAG_ADD3, 3, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC2 PGA", ALC5623_PWR_MANAG_ADD3, 2, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC1 Pre Amp", ALC5623_PWR_MANAG_ADD3, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC2 Pre Amp", ALC5623_PWR_MANAG_ADD3, 0, 0, NULL, 0),
+SND_SOC_DAPM_MICBIAS("Mic Bias1", ALC5623_PWR_MANAG_ADD1, 11, 0),
+
+SND_SOC_DAPM_OUTPUT("AUXOUTL"),
+SND_SOC_DAPM_OUTPUT("AUXOUTR"),
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("HPR"),
+SND_SOC_DAPM_OUTPUT("SPKOUT"),
+SND_SOC_DAPM_OUTPUT("SPKOUTN"),
+SND_SOC_DAPM_INPUT("LINEINL"),
+SND_SOC_DAPM_INPUT("LINEINR"),
+SND_SOC_DAPM_INPUT("AUXINL"),
+SND_SOC_DAPM_INPUT("AUXINR"),
+SND_SOC_DAPM_INPUT("MIC1"),
+SND_SOC_DAPM_INPUT("MIC2"),
+SND_SOC_DAPM_VMID("Vmid"),
+};
+
+static const char *alc5623_amp_names[] = {"AB Amp", "D Amp"};
+static const struct soc_enum alc5623_amp_enum =
+	SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 13, 2, alc5623_amp_names);
+static const struct snd_kcontrol_new alc5623_amp_mux_controls =
+	SOC_DAPM_ENUM("Route", alc5623_amp_enum);
+
+static const struct snd_soc_dapm_widget alc5623_dapm_amp_widgets[] = {
+SND_SOC_DAPM_PGA_E("D Amp", ALC5623_PWR_MANAG_ADD2, 14, 0, NULL, 0,
+	amp_mixer_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_PGA("AB Amp", ALC5623_PWR_MANAG_ADD2, 15, 0, NULL, 0),
+SND_SOC_DAPM_MUX("AB-D Amp Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_amp_mux_controls),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	/* virtual mixer - mixes left & right channels */
+	{"I2S Mix", NULL,				"Left DAC"},
+	{"I2S Mix", NULL,				"Right DAC"},
+	{"Line Mix", NULL,				"Right LineIn"},
+	{"Line Mix", NULL,				"Left LineIn"},
+	{"AuxI Mix", NULL,				"Left AuxI"},
+	{"AuxI Mix", NULL,				"Right AuxI"},
+	{"AUXOUTL", NULL,				"Left AuxOut"},
+	{"AUXOUTR", NULL,				"Right AuxOut"},
+
+	/* HP mixer */
+	{"HPL Mix", "ADC2HP_L Playback Switch",		"Left Capture Mix"},
+	{"HPL Mix", NULL,				"HP Mix"},
+	{"HPR Mix", "ADC2HP_R Playback Switch",		"Right Capture Mix"},
+	{"HPR Mix", NULL,				"HP Mix"},
+	{"HP Mix", "LI2HP Playback Switch",		"Line Mix"},
+	{"HP Mix", "AUXI2HP Playback Switch",		"AuxI Mix"},
+	{"HP Mix", "MIC12HP Playback Switch",		"MIC1 PGA"},
+	{"HP Mix", "MIC22HP Playback Switch",		"MIC2 PGA"},
+	{"HP Mix", "DAC2HP Playback Switch",		"I2S Mix"},
+
+	/* speaker mixer */
+	{"Speaker Mix", "LI2SPK Playback Switch",	"Line Mix"},
+	{"Speaker Mix", "AUXI2SPK Playback Switch",	"AuxI Mix"},
+	{"Speaker Mix", "MIC12SPK Playback Switch",	"MIC1 PGA"},
+	{"Speaker Mix", "MIC22SPK Playback Switch",	"MIC2 PGA"},
+	{"Speaker Mix", "DAC2SPK Playback Switch",	"I2S Mix"},
+
+	/* mono mixer */
+	{"Mono Mix", "ADC2MONO_L Playback Switch",	"Left Capture Mix"},
+	{"Mono Mix", "ADC2MONO_R Playback Switch",	"Right Capture Mix"},
+	{"Mono Mix", "LI2MONO Playback Switch",		"Line Mix"},
+	{"Mono Mix", "AUXI2MONO Playback Switch",	"AuxI Mix"},
+	{"Mono Mix", "MIC12MONO Playback Switch",	"MIC1 PGA"},
+	{"Mono Mix", "MIC22MONO Playback Switch",	"MIC2 PGA"},
+	{"Mono Mix", "DAC2MONO Playback Switch",	"I2S Mix"},
+
+	/* Left record mixer */
+	{"Left Capture Mix", "LineInL Capture Switch",	"LINEINL"},
+	{"Left Capture Mix", "Left AuxI Capture Switch", "AUXINL"},
+	{"Left Capture Mix", "Mic1 Capture Switch",	"MIC1 Pre Amp"},
+	{"Left Capture Mix", "Mic2 Capture Switch",	"MIC2 Pre Amp"},
+	{"Left Capture Mix", "HPMixerL Capture Switch", "HPL Mix"},
+	{"Left Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"},
+	{"Left Capture Mix", "MonoMixer Capture Switch", "Mono Mix"},
+
+	/*Right record mixer */
+	{"Right Capture Mix", "LineInR Capture Switch",	"LINEINR"},
+	{"Right Capture Mix", "Right AuxI Capture Switch",	"AUXINR"},
+	{"Right Capture Mix", "Mic1 Capture Switch",	"MIC1 Pre Amp"},
+	{"Right Capture Mix", "Mic2 Capture Switch",	"MIC2 Pre Amp"},
+	{"Right Capture Mix", "HPMixerR Capture Switch", "HPR Mix"},
+	{"Right Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"},
+	{"Right Capture Mix", "MonoMixer Capture Switch", "Mono Mix"},
+
+	/* headphone left mux */
+	{"Left Headphone Mux", "HP Left Mix",		"HPL Mix"},
+	{"Left Headphone Mux", "Vmid",			"Vmid"},
+
+	/* headphone right mux */
+	{"Right Headphone Mux", "HP Right Mix",		"HPR Mix"},
+	{"Right Headphone Mux", "Vmid",			"Vmid"},
+
+	/* speaker out mux */
+	{"SpeakerOut Mux", "Vmid",			"Vmid"},
+	{"SpeakerOut Mux", "HPOut Mix",			"HPOut Mix"},
+	{"SpeakerOut Mux", "Speaker Mix",		"Speaker Mix"},
+	{"SpeakerOut Mux", "Mono Mix",			"Mono Mix"},
+
+	/* Mono/Aux Out mux */
+	{"AuxOut Mux", "Vmid",				"Vmid"},
+	{"AuxOut Mux", "HPOut Mix",			"HPOut Mix"},
+	{"AuxOut Mux", "Speaker Mix",			"Speaker Mix"},
+	{"AuxOut Mux", "Mono Mix",			"Mono Mix"},
+
+	/* output pga */
+	{"HPL", NULL,					"Left Headphone"},
+	{"Left Headphone", NULL,			"Left Headphone Mux"},
+	{"HPR", NULL,					"Right Headphone"},
+	{"Right Headphone", NULL,			"Right Headphone Mux"},
+	{"Left AuxOut", NULL,				"AuxOut Mux"},
+	{"Right AuxOut", NULL,				"AuxOut Mux"},
+
+	/* input pga */
+	{"Left LineIn", NULL,				"LINEINL"},
+	{"Right LineIn", NULL,				"LINEINR"},
+	{"Left AuxI", NULL,				"AUXINL"},
+	{"Right AuxI", NULL,				"AUXINR"},
+	{"MIC1 Pre Amp", NULL,				"MIC1"},
+	{"MIC2 Pre Amp", NULL,				"MIC2"},
+	{"MIC1 PGA", NULL,				"MIC1 Pre Amp"},
+	{"MIC2 PGA", NULL,				"MIC2 Pre Amp"},
+
+	/* left ADC */
+	{"Left ADC", NULL,				"Left Capture Mix"},
+
+	/* right ADC */
+	{"Right ADC", NULL,				"Right Capture Mix"},
+
+	{"SpeakerOut N Mux", "RN/-R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "RP/+R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "LN/-R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "Vmid",			"Vmid"},
+
+	{"SPKOUT", NULL,				"SpeakerOut"},
+	{"SPKOUTN", NULL,				"SpeakerOut N Mux"},
+};
+
+static const struct snd_soc_dapm_route intercon_spk[] = {
+	{"SpeakerOut", NULL,				"SpeakerOut Mux"},
+};
+
+static const struct snd_soc_dapm_route intercon_amp_spk[] = {
+	{"AB Amp", NULL,				"SpeakerOut Mux"},
+	{"D Amp", NULL,					"SpeakerOut Mux"},
+	{"AB-D Amp Mux", "AB Amp",			"AB Amp"},
+	{"AB-D Amp Mux", "D Amp",			"D Amp"},
+	{"SpeakerOut", NULL,				"AB-D Amp Mux"},
+};
+
+/* PLL divisors */
+struct _pll_div {
+	u32 pll_in;
+	u32 pll_out;
+	u16 regvalue;
+};
+
+/* Note : pll code from original alc5623 driver. Not sure of how good it is */
+/* usefull only for master mode */
+static const struct _pll_div codec_master_pll_div[] = {
+
+	{  2048000,  8192000,	0x0ea0},
+	{  3686400,  8192000,	0x4e27},
+	{ 12000000,  8192000,	0x456b},
+	{ 13000000,  8192000,	0x495f},
+	{ 13100000,  8192000,	0x0320},
+	{  2048000,  11289600,	0xf637},
+	{  3686400,  11289600,	0x2f22},
+	{ 12000000,  11289600,	0x3e2f},
+	{ 13000000,  11289600,	0x4d5b},
+	{ 13100000,  11289600,	0x363b},
+	{  2048000,  16384000,	0x1ea0},
+	{  3686400,  16384000,	0x9e27},
+	{ 12000000,  16384000,	0x452b},
+	{ 13000000,  16384000,	0x542f},
+	{ 13100000,  16384000,	0x03a0},
+	{  2048000,  16934400,	0xe625},
+	{  3686400,  16934400,	0x9126},
+	{ 12000000,  16934400,	0x4d2c},
+	{ 13000000,  16934400,	0x742f},
+	{ 13100000,  16934400,	0x3c27},
+	{  2048000,  22579200,	0x2aa0},
+	{  3686400,  22579200,	0x2f20},
+	{ 12000000,  22579200,	0x7e2f},
+	{ 13000000,  22579200,	0x742f},
+	{ 13100000,  22579200,	0x3c27},
+	{  2048000,  24576000,	0x2ea0},
+	{  3686400,  24576000,	0xee27},
+	{ 12000000,  24576000,	0x2915},
+	{ 13000000,  24576000,	0x772e},
+	{ 13100000,  24576000,	0x0d20},
+};
+
+static const struct _pll_div codec_slave_pll_div[] = {
+
+	{  1024000,  16384000,  0x3ea0},
+	{  1411200,  22579200,	0x3ea0},
+	{  1536000,  24576000,	0x3ea0},
+	{  2048000,  16384000,  0x1ea0},
+	{  2822400,  22579200,	0x1ea0},
+	{  3072000,  24576000,	0x1ea0},
+
+};
+
+static int alc5623_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+		int source, unsigned int freq_in, unsigned int freq_out)
+{
+	int i;
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int gbl_clk = 0, pll_div = 0;
+	u16 reg;
+
+	if (pll_id < ALC5623_PLL_FR_MCLK || pll_id > ALC5623_PLL_FR_BCK)
+		return -ENODEV;
+
+	/* Disable PLL power */
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_PLL,
+				0);
+
+	/* pll is not used in slave mode */
+	reg = snd_soc_read(codec, ALC5623_DAI_CONTROL);
+	if (reg & ALC5623_DAI_SDP_SLAVE_MODE)
+		return 0;
+
+	if (!freq_in || !freq_out)
+		return 0;
+
+	switch (pll_id) {
+	case ALC5623_PLL_FR_MCLK:
+		for (i = 0; i < ARRAY_SIZE(codec_master_pll_div); i++) {
+			if (codec_master_pll_div[i].pll_in == freq_in
+			   && codec_master_pll_div[i].pll_out == freq_out) {
+				/* PLL source from MCLK */
+				pll_div  = codec_master_pll_div[i].regvalue;
+				break;
+			}
+		}
+		break;
+	case ALC5623_PLL_FR_BCK:
+		for (i = 0; i < ARRAY_SIZE(codec_slave_pll_div); i++) {
+			if (codec_slave_pll_div[i].pll_in == freq_in
+			   && codec_slave_pll_div[i].pll_out == freq_out) {
+				/* PLL source from Bitclk */
+				gbl_clk = ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK;
+				pll_div = codec_slave_pll_div[i].regvalue;
+				break;
+			}
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!pll_div)
+		return -EINVAL;
+
+	snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk);
+	snd_soc_write(codec, ALC5623_PLL_CTRL, pll_div);
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_PLL,
+				ALC5623_PWR_ADD2_PLL);
+	gbl_clk |= ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL;
+	snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk);
+
+	return 0;
+}
+
+struct _coeff_div {
+	u16 fs;
+	u16 regvalue;
+};
+
+/* codec hifi mclk (after PLL) clock divider coefficients */
+/* values inspired from column BCLK=32Fs of Appendix A table */
+static const struct _coeff_div coeff_div[] = {
+	{256*8, 0x3a69},
+	{384*8, 0x3c6b},
+	{256*4, 0x2a69},
+	{384*4, 0x2c6b},
+	{256*2, 0x1a69},
+	{384*2, 0x1c6b},
+	{256*1, 0x0a69},
+	{384*1, 0x0c6b},
+};
+
+static int get_coeff(struct snd_soc_codec *codec, int rate)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (coeff_div[i].fs * rate == alc5623->sysclk)
+			return i;
+	}
+	return -EINVAL;
+}
+
+/*
+ * Clock after PLL and dividers
+ */
+static int alc5623_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+		int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+
+	switch (freq) {
+	case  8192000:
+	case 11289600:
+	case 12288000:
+	case 16384000:
+	case 16934400:
+	case 18432000:
+	case 22579200:
+	case 24576000:
+		alc5623->sysclk = freq;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int alc5623_set_dai_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u16 iface = 0;
+
+	/* set master/slave audio interface */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		iface = ALC5623_DAI_SDP_MASTER_MODE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		iface = ALC5623_DAI_SDP_SLAVE_MODE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* interface format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		iface |= ALC5623_DAI_I2S_DF_I2S;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		iface |= ALC5623_DAI_I2S_DF_RIGHT;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iface |= ALC5623_DAI_I2S_DF_LEFT;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		iface |= ALC5623_DAI_I2S_DF_PCM;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		iface |= ALC5623_DAI_I2S_DF_PCM | ALC5623_DAI_I2S_PCM_MODE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* clock inversion */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return snd_soc_write(codec, ALC5623_DAI_CONTROL, iface);
+}
+
+static int alc5623_pcm_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	int coeff, rate;
+	u16 iface;
+
+	iface = snd_soc_read(codec, ALC5623_DAI_CONTROL);
+	iface &= ~ALC5623_DAI_I2S_DL_MASK;
+
+	/* bit size */
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		iface |= ALC5623_DAI_I2S_DL_16;
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		iface |= ALC5623_DAI_I2S_DL_20;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iface |= ALC5623_DAI_I2S_DL_24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		iface |= ALC5623_DAI_I2S_DL_32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* set iface & srate */
+	snd_soc_write(codec, ALC5623_DAI_CONTROL, iface);
+	rate = params_rate(params);
+	coeff = get_coeff(codec, rate);
+	if (coeff < 0)
+		return -EINVAL;
+
+	coeff = coeff_div[coeff].regvalue;
+	dev_dbg(codec->dev, "%s: sysclk=%d,rate=%d,coeff=0x%04x\n",
+		__func__, alc5623->sysclk, rate, coeff);
+	snd_soc_write(codec, ALC5623_STEREO_AD_DA_CLK_CTRL, coeff);
+
+	return 0;
+}
+
+static int alc5623_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	u16 hp_mute = ALC5623_MISC_M_DAC_L_INPUT | ALC5623_MISC_M_DAC_R_INPUT;
+	u16 mute_reg = snd_soc_read(codec, ALC5623_MISC_CTRL) & ~hp_mute;
+
+	if (mute)
+		mute_reg |= hp_mute;
+
+	return snd_soc_write(codec, ALC5623_MISC_CTRL, mute_reg);
+}
+
+#define ALC5623_ADD2_POWER_EN (ALC5623_PWR_ADD2_VREF \
+	| ALC5623_PWR_ADD2_DAC_REF_CIR)
+
+#define ALC5623_ADD3_POWER_EN (ALC5623_PWR_ADD3_MAIN_BIAS \
+	| ALC5623_PWR_ADD3_MIC1_BOOST_AD)
+
+#define ALC5623_ADD1_POWER_EN \
+	(ALC5623_PWR_ADD1_SHORT_CURR_DET_EN | ALC5623_PWR_ADD1_SOFTGEN_EN \
+	| ALC5623_PWR_ADD1_DEPOP_BUF_HP | ALC5623_PWR_ADD1_HP_OUT_AMP \
+	| ALC5623_PWR_ADD1_HP_OUT_ENH_AMP)
+
+#define ALC5623_ADD1_POWER_EN_5622 \
+	(ALC5623_PWR_ADD1_SHORT_CURR_DET_EN \
+	| ALC5623_PWR_ADD1_HP_OUT_AMP)
+
+static void enable_power_depop(struct snd_soc_codec *codec)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD1,
+				ALC5623_PWR_ADD1_SOFTGEN_EN,
+				ALC5623_PWR_ADD1_SOFTGEN_EN);
+
+	snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, ALC5623_ADD3_POWER_EN);
+
+	snd_soc_update_bits(codec, ALC5623_MISC_CTRL,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN);
+
+	msleep(500);
+
+	snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, ALC5623_ADD2_POWER_EN);
+
+	/* avoid writing '1' into 5622 reserved bits */
+	if (alc5623->id == 0x22)
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1,
+			ALC5623_ADD1_POWER_EN_5622);
+	else
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1,
+			ALC5623_ADD1_POWER_EN);
+
+	/* disable HP Depop2 */
+	snd_soc_update_bits(codec, ALC5623_MISC_CTRL,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN,
+				0);
+
+}
+
+static int alc5623_set_bias_level(struct snd_soc_codec *codec,
+				      enum snd_soc_bias_level level)
+{
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		enable_power_depop(codec);
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		/* everything off except vref/vmid, */
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_VREF);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3,
+				ALC5623_PWR_ADD3_MAIN_BIAS);
+		break;
+	case SND_SOC_BIAS_OFF:
+		/* everything off, dac mute, inactive */
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, 0);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, 0);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1, 0);
+		break;
+	}
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define ALC5623_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE \
+			| SNDRV_PCM_FMTBIT_S24_LE \
+			| SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops alc5623_dai_ops = {
+		.hw_params = alc5623_pcm_hw_params,
+		.digital_mute = alc5623_mute,
+		.set_fmt = alc5623_set_dai_fmt,
+		.set_sysclk = alc5623_set_dai_sysclk,
+		.set_pll = alc5623_set_dai_pll,
+};
+
+static struct snd_soc_dai_driver alc5623_dai = {
+	.name = "alc5623-hifi",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =	8000,
+		.rate_max =	48000,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = ALC5623_FORMATS,},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =	8000,
+		.rate_max =	48000,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = ALC5623_FORMATS,},
+
+	.ops = &alc5623_dai_ops,
+};
+
+static int alc5623_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
+{
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int alc5623_resume(struct snd_soc_codec *codec)
+{
+	int i, step = codec->driver->reg_cache_step;
+	u16 *cache = codec->reg_cache;
+
+	/* Sync reg_cache with the hardware */
+	for (i = 2 ; i < codec->driver->reg_cache_size ; i += step)
+		snd_soc_write(codec, i, cache[i]);
+
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* charge alc5623 caps */
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
+		alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		alc5623_set_bias_level(codec, codec->dapm.bias_level);
+	}
+
+	return 0;
+}
+
+static int alc5623_probe(struct snd_soc_codec *codec)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int ret;
+
+	ret = snd_soc_codec_set_cache_io(codec, 8, 16, alc5623->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	alc5623_reset(codec);
+	alc5623_fill_cache(codec);
+
+	/* power on device */
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	if (alc5623->add_ctrl) {
+		snd_soc_write(codec, ALC5623_ADD_CTRL_REG,
+				alc5623->add_ctrl);
+	}
+
+	if (alc5623->jack_det_ctrl) {
+		snd_soc_write(codec, ALC5623_JACK_DET_CTRL,
+				alc5623->jack_det_ctrl);
+	}
+
+	switch (alc5623->id) {
+	case 0x21:
+		snd_soc_add_controls(codec, rt5621_vol_snd_controls,
+			ARRAY_SIZE(rt5621_vol_snd_controls));
+		break;
+	case 0x22:
+		snd_soc_add_controls(codec, rt5622_vol_snd_controls,
+			ARRAY_SIZE(rt5622_vol_snd_controls));
+		break;
+	case 0x23:
+		snd_soc_add_controls(codec, alc5623_vol_snd_controls,
+			ARRAY_SIZE(alc5623_vol_snd_controls));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_add_controls(codec, alc5623_snd_controls,
+			ARRAY_SIZE(alc5623_snd_controls));
+
+	snd_soc_dapm_new_controls(dapm, alc5623_dapm_widgets,
+					ARRAY_SIZE(alc5623_dapm_widgets));
+
+	/* set up audio path interconnects */
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	switch (alc5623->id) {
+	case 0x21:
+	case 0x22:
+		snd_soc_dapm_new_controls(dapm, alc5623_dapm_amp_widgets,
+					ARRAY_SIZE(alc5623_dapm_amp_widgets));
+		snd_soc_dapm_add_routes(dapm, intercon_amp_spk,
+					ARRAY_SIZE(intercon_amp_spk));
+		break;
+	case 0x23:
+		snd_soc_dapm_add_routes(dapm, intercon_spk,
+					ARRAY_SIZE(intercon_spk));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/* power down chip */
+static int alc5623_remove(struct snd_soc_codec *codec)
+{
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_device_alc5623 = {
+	.probe = alc5623_probe,
+	.remove = alc5623_remove,
+	.suspend = alc5623_suspend,
+	.resume = alc5623_resume,
+	.set_bias_level = alc5623_set_bias_level,
+	.reg_cache_size = ALC5623_VENDOR_ID2+2,
+	.reg_word_size = sizeof(u16),
+	.reg_cache_step = 2,
+};
+
+/*
+ * ALC5623 2 wire address is determined by A1 pin
+ * state during powerup.
+ *    low  = 0x1a
+ *    high = 0x1b
+ */
+static int alc5623_i2c_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct alc5623_platform_data *pdata;
+	struct alc5623_priv *alc5623;
+	int ret, vid1, vid2;
+
+	vid1 = i2c_smbus_read_word_data(client, ALC5623_VENDOR_ID1);
+	if (vid1 < 0) {
+		dev_err(&client->dev, "failed to read I2C\n");
+		return -EIO;
+	}
+	vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
+
+	vid2 = i2c_smbus_read_byte_data(client, ALC5623_VENDOR_ID2);
+	if (vid2 < 0) {
+		dev_err(&client->dev, "failed to read I2C\n");
+		return -EIO;
+	}
+
+	if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
+		dev_err(&client->dev, "unknown or wrong codec\n");
+		dev_err(&client->dev, "Expected %x:%lx, got %x:%x\n",
+				0x10ec, id->driver_data,
+				vid1, vid2);
+		return -ENODEV;
+	}
+
+	dev_dbg(&client->dev, "Found codec id : alc56%02x\n", vid2);
+
+	alc5623 = kzalloc(sizeof(struct alc5623_priv), GFP_KERNEL);
+	if (alc5623 == NULL)
+		return -ENOMEM;
+
+	pdata = client->dev.platform_data;
+	if (pdata) {
+		alc5623->add_ctrl = pdata->add_ctrl;
+		alc5623->jack_det_ctrl = pdata->jack_det_ctrl;
+	}
+
+	alc5623->id = vid2;
+	switch (alc5623->id) {
+	case 0x21:
+		alc5623_dai.name = "alc5621-hifi";
+		break;
+	case 0x22:
+		alc5623_dai.name = "alc5622-hifi";
+		break;
+	case 0x23:
+		alc5623_dai.name = "alc5623-hifi";
+		break;
+	default:
+		kfree(alc5623);
+		return -EINVAL;
+	}
+
+	i2c_set_clientdata(client, alc5623);
+	alc5623->control_data = client;
+	alc5623->control_type = SND_SOC_I2C;
+	mutex_init(&alc5623->mutex);
+
+	ret =  snd_soc_register_codec(&client->dev,
+		&soc_codec_device_alc5623, &alc5623_dai, 1);
+	if (ret != 0) {
+		dev_err(&client->dev, "Failed to register codec: %d\n", ret);
+		kfree(alc5623);
+	}
+
+	return ret;
+}
+
+static int alc5623_i2c_remove(struct i2c_client *client)
+{
+	struct alc5623_priv *alc5623 = i2c_get_clientdata(client);
+
+	snd_soc_unregister_codec(&client->dev);
+	kfree(alc5623);
+	return 0;
+}
+
+static const struct i2c_device_id alc5623_i2c_table[] = {
+	{"alc5621", 0x21},
+	{"alc5622", 0x22},
+	{"alc5623", 0x23},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, alc5623_i2c_table);
+
+/*  i2c codec control layer */
+static struct i2c_driver alc5623_i2c_driver = {
+	.driver = {
+		.name = "alc562x-codec",
+		.owner = THIS_MODULE,
+	},
+	.probe = alc5623_i2c_probe,
+	.remove =  __devexit_p(alc5623_i2c_remove),
+	.id_table = alc5623_i2c_table,
+};
+
+static int __init alc5623_modinit(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&alc5623_i2c_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "%s: can't add i2c driver", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+module_init(alc5623_modinit);
+
+static void __exit alc5623_modexit(void)
+{
+	i2c_del_driver(&alc5623_i2c_driver);
+}
+module_exit(alc5623_modexit);
+
+MODULE_DESCRIPTION("ASoC alc5621/2/3 driver");
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/alc5623.h b/sound/soc/codecs/alc5623.h
new file mode 100644
index 0000000..f3d6826
--- /dev/null
+++ b/sound/soc/codecs/alc5623.h
@@ -0,0 +1,161 @@
+/*
+ * alc5623.h  --  alc562[123] ALSA Soc Audio driver
+ *
+ * Copyright 2008 Realtek Microelectronics
+ * Copyright 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * Author: flove <flove@realtek.com>
+ * Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ALC5623_H
+#define _ALC5623_H
+
+#define ALC5623_RESET				0x00
+/*				5621 5622 5623  */
+/* speaker output vol		   2    2       */
+/* line output vol                      4    2  */
+/* HP output vol		   4    0    4  */
+#define ALC5623_SPK_OUT_VOL			0x02
+#define ALC5623_HP_OUT_VOL			0x04
+#define ALC5623_MONO_AUX_OUT_VOL		0x06
+#define ALC5623_AUXIN_VOL			0x08
+#define ALC5623_LINE_IN_VOL			0x0A
+#define ALC5623_STEREO_DAC_VOL			0x0C
+#define ALC5623_MIC_VOL				0x0E
+#define ALC5623_MIC_ROUTING_CTRL		0x10
+#define ALC5623_ADC_REC_GAIN			0x12
+#define ALC5623_ADC_REC_MIXER			0x14
+#define ALC5623_SOFT_VOL_CTRL_TIME		0x16
+/* ALC5623_OUTPUT_MIXER_CTRL :			*/
+/* same remark as for reg 2 line vs speaker	*/
+#define ALC5623_OUTPUT_MIXER_CTRL		0x1C
+#define ALC5623_MIC_CTRL			0x22
+
+#define	ALC5623_DAI_CONTROL			0x34
+#define ALC5623_DAI_SDP_MASTER_MODE		(0 << 15)
+#define ALC5623_DAI_SDP_SLAVE_MODE		(1 << 15)
+#define ALC5623_DAI_I2S_PCM_MODE		(1 << 14)
+#define ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL	(1 <<  7)
+#define ALC5623_DAI_ADC_DATA_L_R_SWAP		(1 <<  5)
+#define ALC5623_DAI_DAC_DATA_L_R_SWAP		(1 <<  4)
+#define ALC5623_DAI_I2S_DL_MASK			(3 <<  2)
+#define ALC5623_DAI_I2S_DL_32			(3 <<  2)
+#define	ALC5623_DAI_I2S_DL_24			(2 <<  2)
+#define ALC5623_DAI_I2S_DL_20			(1 <<  2)
+#define ALC5623_DAI_I2S_DL_16			(0 <<  2)
+#define ALC5623_DAI_I2S_DF_PCM			(3 <<  0)
+#define	ALC5623_DAI_I2S_DF_LEFT			(2 <<  0)
+#define ALC5623_DAI_I2S_DF_RIGHT		(1 <<  0)
+#define ALC5623_DAI_I2S_DF_I2S			(0 <<  0)
+
+#define ALC5623_STEREO_AD_DA_CLK_CTRL		0x36
+#define	ALC5623_COMPANDING_CTRL			0x38
+
+#define	ALC5623_PWR_MANAG_ADD1			0x3A
+#define ALC5623_PWR_ADD1_MAIN_I2S_EN		(1 << 15)
+#define ALC5623_PWR_ADD1_ZC_DET_PD_EN		(1 << 14)
+#define ALC5623_PWR_ADD1_MIC1_BIAS_EN		(1 << 11)
+#define ALC5623_PWR_ADD1_SHORT_CURR_DET_EN	(1 << 10)
+#define ALC5623_PWR_ADD1_SOFTGEN_EN		(1 <<  8) /* rsvd on 5622 */
+#define	ALC5623_PWR_ADD1_DEPOP_BUF_HP		(1 <<  6) /* rsvd on 5622 */
+#define	ALC5623_PWR_ADD1_HP_OUT_AMP		(1 <<  5)
+#define	ALC5623_PWR_ADD1_HP_OUT_ENH_AMP		(1 <<  4) /* rsvd on 5622 */
+#define ALC5623_PWR_ADD1_DEPOP_BUF_AUX		(1 <<  2)
+#define ALC5623_PWR_ADD1_AUX_OUT_AMP		(1 <<  1)
+#define ALC5623_PWR_ADD1_AUX_OUT_ENH_AMP	(1 <<  0) /* rsvd on 5622 */
+
+#define ALC5623_PWR_MANAG_ADD2			0x3C
+#define ALC5623_PWR_ADD2_LINEOUT		(1 << 15) /* rt5623 */
+#define ALC5623_PWR_ADD2_CLASS_AB		(1 << 15) /* rt5621 */
+#define ALC5623_PWR_ADD2_CLASS_D		(1 << 14) /* rt5621 */
+#define ALC5623_PWR_ADD2_VREF			(1 << 13)
+#define ALC5623_PWR_ADD2_PLL			(1 << 12)
+#define ALC5623_PWR_ADD2_DAC_REF_CIR		(1 << 10)
+#define ALC5623_PWR_ADD2_L_DAC_CLK		(1 <<  9)
+#define ALC5623_PWR_ADD2_R_DAC_CLK		(1 <<  8)
+#define ALC5623_PWR_ADD2_L_ADC_CLK_GAIN		(1 <<  7)
+#define ALC5623_PWR_ADD2_R_ADC_CLK_GAIN		(1 <<  6)
+#define ALC5623_PWR_ADD2_L_HP_MIXER		(1 <<  5)
+#define ALC5623_PWR_ADD2_R_HP_MIXER		(1 <<  4)
+#define ALC5623_PWR_ADD2_SPK_MIXER		(1 <<  3)
+#define ALC5623_PWR_ADD2_MONO_MIXER		(1 <<  2)
+#define ALC5623_PWR_ADD2_L_ADC_REC_MIXER	(1 <<  1)
+#define ALC5623_PWR_ADD2_R_ADC_REC_MIXER	(1 <<  0)
+
+#define ALC5623_PWR_MANAG_ADD3			0x3E
+#define ALC5623_PWR_ADD3_MAIN_BIAS		(1 << 15)
+#define ALC5623_PWR_ADD3_AUXOUT_L_VOL_AMP	(1 << 14)
+#define ALC5623_PWR_ADD3_AUXOUT_R_VOL_AMP	(1 << 13)
+#define ALC5623_PWR_ADD3_SPK_OUT		(1 << 12)
+#define ALC5623_PWR_ADD3_HP_L_OUT_VOL		(1 << 10)
+#define ALC5623_PWR_ADD3_HP_R_OUT_VOL		(1 <<  9)
+#define ALC5623_PWR_ADD3_LINEIN_L_VOL		(1 <<  7)
+#define ALC5623_PWR_ADD3_LINEIN_R_VOL		(1 <<  6)
+#define ALC5623_PWR_ADD3_AUXIN_L_VOL		(1 <<  5)
+#define ALC5623_PWR_ADD3_AUXIN_R_VOL		(1 <<  4)
+#define ALC5623_PWR_ADD3_MIC1_FUN_CTRL		(1 <<  3)
+#define ALC5623_PWR_ADD3_MIC2_FUN_CTRL		(1 <<  2)
+#define ALC5623_PWR_ADD3_MIC1_BOOST_AD		(1 <<  1)
+#define ALC5623_PWR_ADD3_MIC2_BOOST_AD		(1 <<  0)
+
+#define ALC5623_ADD_CTRL_REG			0x40
+
+#define	ALC5623_GLOBAL_CLK_CTRL_REG		0x42
+#define ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL	(1 << 15)
+#define ALC5623_GBL_CLK_SYS_SOUR_SEL_MCLK	(0 << 15)
+#define ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK	(1 << 14)
+#define ALC5623_GBL_CLK_PLL_SOUR_SEL_MCLK	(0 << 14)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV8	(3 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV4	(2 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV2	(1 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV1	(0 <<  1)
+#define ALC5623_GBL_CLK_PLL_PRE_DIV2		(1 <<  0)
+#define ALC5623_GBL_CLK_PLL_PRE_DIV1		(0 <<  0)
+
+#define ALC5623_PLL_CTRL			0x44
+#define ALC5623_PLL_CTRL_N_VAL(n)		(((n)&0xff) << 8)
+#define ALC5623_PLL_CTRL_K_VAL(k)		(((k)&0x7)  << 4)
+#define ALC5623_PLL_CTRL_M_VAL(m)		((m)&0xf)
+
+#define ALC5623_GPIO_OUTPUT_PIN_CTRL		0x4A
+#define ALC5623_GPIO_PIN_CONFIG			0x4C
+#define ALC5623_GPIO_PIN_POLARITY		0x4E
+#define ALC5623_GPIO_PIN_STICKY			0x50
+#define ALC5623_GPIO_PIN_WAKEUP			0x52
+#define ALC5623_GPIO_PIN_STATUS			0x54
+#define ALC5623_GPIO_PIN_SHARING		0x56
+#define	ALC5623_OVER_CURR_STATUS		0x58
+#define ALC5623_JACK_DET_CTRL			0x5A
+
+#define ALC5623_MISC_CTRL			0x5E
+#define ALC5623_MISC_DISABLE_FAST_VREG		(1 << 15)
+#define ALC5623_MISC_SPK_CLASS_AB_OC_PD		(1 << 13) /* 5621 */
+#define ALC5623_MISC_SPK_CLASS_AB_OC_DET	(1 << 12) /* 5621 */
+#define ALC5623_MISC_HP_DEPOP_MODE3_EN		(1 << 10)
+#define ALC5623_MISC_HP_DEPOP_MODE2_EN		(1 <<  9)
+#define ALC5623_MISC_HP_DEPOP_MODE1_EN		(1 <<  8)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE3_EN	(1 <<  6)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE2_EN	(1 <<  5)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE1_EN	(1 <<  4)
+#define ALC5623_MISC_M_DAC_L_INPUT		(1 <<  3)
+#define ALC5623_MISC_M_DAC_R_INPUT		(1 <<  2)
+#define ALC5623_MISC_IRQOUT_INV_CTRL		(1 <<  0)
+
+#define	ALC5623_PSEDUEO_SPATIAL_CTRL		0x60
+#define ALC5623_EQ_CTRL				0x62
+#define ALC5623_EQ_MODE_ENABLE			0x66
+#define ALC5623_AVC_CTRL			0x68
+#define ALC5623_HID_CTRL_INDEX			0x6A
+#define ALC5623_HID_CTRL_DATA			0x6C
+#define ALC5623_VENDOR_ID1			0x7C
+#define ALC5623_VENDOR_ID2			0x7E
+
+#define ALC5623_PLL_FR_MCLK			0
+#define ALC5623_PLL_FR_BCK			1
+#endif
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 8236439..46dbfd0 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -36,8 +36,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dai.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include <mach/dm365.h>
@@ -116,7 +114,7 @@
 			     DAVINCI_VC_REG12_POWER_ALL_OFF);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 6d4bdc6..8b51245 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -106,6 +106,21 @@
 #define CS4270_MUTE_DAC_A	0x01
 #define CS4270_MUTE_DAC_B	0x02
 
+/* Power-on default values for the registers
+ *
+ * This array contains the power-on default values of the registers, with the
+ * exception of the "CHIPID" register (01h).  The lower four bits of that
+ * register contain the hardware revision, so it is treated as volatile.
+ *
+ * Also note that on the CS4270, the first readable register is 1, but ASoC
+ * assumes the first register is 0.  Therfore, the array must have an entry for
+ * register 0, but we use cs4270_reg_is_readable() to tell ASoC that it can't
+ * be read.
+ */
+static const u8 cs4270_default_reg_cache[CS4270_LASTREG + 1] = {
+	0x00, 0x00, 0x00, 0x30, 0x00, 0x60, 0x20, 0x00, 0x00
+};
+
 static const char *supply_names[] = {
 	"va", "vd", "vlc"
 };
@@ -114,7 +129,6 @@
 struct cs4270_private {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[CS4270_NUMREGS];
 	unsigned int mclk; /* Input frequency of the MCLK pin */
 	unsigned int mode; /* The mode (I2S or left-justified) */
 	unsigned int slave_mode;
@@ -179,6 +193,20 @@
 /* The number of MCLK/LRCK ratios supported by the CS4270 */
 #define NUM_MCLK_RATIOS		ARRAY_SIZE(cs4270_mode_ratios)
 
+static int cs4270_reg_is_readable(unsigned int reg)
+{
+	return (reg >= CS4270_FIRSTREG) && (reg <= CS4270_LASTREG);
+}
+
+static int cs4270_reg_is_volatile(unsigned int reg)
+{
+	/* Unreadable registers are considered volatile */
+	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
+		return 1;
+
+	return reg == CS4270_CHIPID;
+}
+
 /**
  * cs4270_set_dai_sysclk - determine the CS4270 samples rates.
  * @codec_dai: the codec DAI
@@ -264,97 +292,6 @@
 }
 
 /**
- * cs4270_fill_cache - pre-fill the CS4270 register cache.
- * @codec: the codec for this CS4270
- *
- * This function fills in the CS4270 register cache by reading the register
- * values from the hardware.
- *
- * This CS4270 registers are cached to avoid excessive I2C I/O operations.
- * After the initial read to pre-fill the cache, the CS4270 never updates
- * the register values, so we won't have a cache coherency problem.
- *
- * We use the auto-increment feature of the CS4270 to read all registers in
- * one shot.
- */
-static int cs4270_fill_cache(struct snd_soc_codec *codec)
-{
-	u8 *cache = codec->reg_cache;
-	struct i2c_client *i2c_client = codec->control_data;
-	s32 length;
-
-	length = i2c_smbus_read_i2c_block_data(i2c_client,
-		CS4270_FIRSTREG | CS4270_I2C_INCR, CS4270_NUMREGS, cache);
-
-	if (length != CS4270_NUMREGS) {
-		dev_err(codec->dev, "i2c read failure, addr=0x%x\n",
-		       i2c_client->addr);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-/**
- * cs4270_read_reg_cache - read from the CS4270 register cache.
- * @codec: the codec for this CS4270
- * @reg: the register to read
- *
- * This function returns the value for a given register.  It reads only from
- * the register cache, not the hardware itself.
- *
- * This CS4270 registers are cached to avoid excessive I2C I/O operations.
- * After the initial read to pre-fill the cache, the CS4270 never updates
- * the register values, so we won't have a cache coherency problem.
- */
-static unsigned int cs4270_read_reg_cache(struct snd_soc_codec *codec,
-	unsigned int reg)
-{
-	u8 *cache = codec->reg_cache;
-
-	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
-		return -EIO;
-
-	return cache[reg - CS4270_FIRSTREG];
-}
-
-/**
- * cs4270_i2c_write - write to a CS4270 register via the I2C bus.
- * @codec: the codec for this CS4270
- * @reg: the register to write
- * @value: the value to write to the register
- *
- * This function writes the given value to the given CS4270 register, and
- * also updates the register cache.
- *
- * Note that we don't use the hw_write function pointer of snd_soc_codec.
- * That's because it's too clunky: the hw_write_t prototype does not match
- * i2c_smbus_write_byte_data(), and it's just another layer of overhead.
- */
-static int cs4270_i2c_write(struct snd_soc_codec *codec, unsigned int reg,
-			    unsigned int value)
-{
-	u8 *cache = codec->reg_cache;
-
-	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
-		return -EIO;
-
-	/* Only perform an I2C operation if the new value is different */
-	if (cache[reg - CS4270_FIRSTREG] != value) {
-		struct i2c_client *client = codec->control_data;
-		if (i2c_smbus_write_byte_data(client, reg, value)) {
-			dev_err(codec->dev, "i2c write failed\n");
-			return -EIO;
-		}
-
-		/* We've written to the hardware, so update the cache */
-		cache[reg - CS4270_FIRSTREG] = value;
-	}
-
-	return 0;
-}
-
-/**
  * cs4270_hw_params - program the CS4270 with the given hardware parameters.
  * @substream: the audio stream
  * @params: the hardware parameters to set
@@ -551,15 +488,16 @@
 static int cs4270_probe(struct snd_soc_codec *codec)
 {
 	struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
-	int i, ret, reg;
+	int i, ret;
 
 	codec->control_data = cs4270->control_data;
 
-	/* The I2C interface is set up, so pre-fill our register cache */
-
-	ret = cs4270_fill_cache(codec);
+	/* Tell ASoC what kind of I/O to use to read the registers.  ASoC will
+	 * then do the I2C transactions itself.
+	 */
+	ret = snd_soc_codec_set_cache_io(codec, 8, 8, cs4270->control_type);
 	if (ret < 0) {
-		dev_err(codec->dev, "failed to fill register cache\n");
+		dev_err(codec->dev, "failed to set cache I/O (ret=%i)\n", ret);
 		return ret;
 	}
 
@@ -568,10 +506,7 @@
 	 * this feature disabled by default.  An application (e.g. alsactl) can
 	 * re-enabled it by using the controls.
 	 */
-
-	reg = cs4270_read_reg_cache(codec, CS4270_MUTE);
-	reg &= ~CS4270_MUTE_AUTO;
-	ret = cs4270_i2c_write(codec, CS4270_MUTE, reg);
+	ret = snd_soc_update_bits(codec, CS4270_MUTE, CS4270_MUTE_AUTO, 0);
 	if (ret < 0) {
 		dev_err(codec->dev, "i2c write failed\n");
 		return ret;
@@ -582,10 +517,8 @@
 	 * playback has started.  An application (e.g. alsactl) can
 	 * re-enabled it by using the controls.
 	 */
-
-	reg = cs4270_read_reg_cache(codec, CS4270_TRANS);
-	reg &= ~(CS4270_TRANS_SOFT | CS4270_TRANS_ZERO);
-	ret = cs4270_i2c_write(codec, CS4270_TRANS, reg);
+	ret = snd_soc_update_bits(codec, CS4270_TRANS,
+		CS4270_TRANS_SOFT | CS4270_TRANS_ZERO, 0);
 	if (ret < 0) {
 		dev_err(codec->dev, "i2c write failed\n");
 		return ret;
@@ -708,15 +641,16 @@
  * Assign this variable to the codec_dev field of the machine driver's
  * snd_soc_device structure.
  */
-static struct snd_soc_codec_driver soc_codec_device_cs4270 = {
-	.probe =	cs4270_probe,
-	.remove =	cs4270_remove,
-	.suspend =	cs4270_soc_suspend,
-	.resume =	cs4270_soc_resume,
-	.read = cs4270_read_reg_cache,
-	.write = cs4270_i2c_write,
-	.reg_cache_size = CS4270_NUMREGS,
-	.reg_word_size = sizeof(u8),
+static const struct snd_soc_codec_driver soc_codec_device_cs4270 = {
+	.probe =		cs4270_probe,
+	.remove =		cs4270_remove,
+	.suspend =		cs4270_soc_suspend,
+	.resume =		cs4270_soc_resume,
+	.volatile_register =	cs4270_reg_is_volatile,
+	.readable_register =	cs4270_reg_is_readable,
+	.reg_cache_size =	CS4270_LASTREG + 1,
+	.reg_word_size =	sizeof(u8),
+	.reg_cache_default =	cs4270_default_reg_cache,
 };
 
 /**
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index cb086ea..8fb7070 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -26,7 +26,6 @@
 #include <linux/slab.h>
 #include <sound/core.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <sound/pcm_params.h>
@@ -47,7 +46,6 @@
 	unsigned int mclk;
 	unsigned int audio_mode;	/* The mode (I2S or left-justified) */
 	enum master_slave_mode func;
-	u8 reg_cache[CS42L51_NUMREGS];
 };
 
 #define CS42L51_FORMATS ( \
@@ -519,6 +517,7 @@
 static int cs42l51_probe(struct snd_soc_codec *codec)
 {
 	struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, reg;
 
 	codec->control_data = cs42l51->control_data;
@@ -550,9 +549,9 @@
 
 	snd_soc_add_controls(codec, cs42l51_snd_controls,
 		ARRAY_SIZE(cs42l51_snd_controls));
-	snd_soc_dapm_new_controls(codec, cs42l51_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, cs42l51_dapm_widgets,
 		ARRAY_SIZE(cs42l51_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, cs42l51_routes,
+	snd_soc_dapm_add_routes(dapm, cs42l51_routes,
 		ARRAY_SIZE(cs42l51_routes));
 
 	return 0;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index e8d27c8..03d1e86 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -18,7 +18,7 @@
 
 #include <sound/core.h>
 #include <sound/initval.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 
 #include "cx20442.h"
 
@@ -26,7 +26,6 @@
 struct cx20442_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[1];
 };
 
 #define CX20442_PM		0x0
@@ -89,10 +88,11 @@
 
 static int cx20442_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, cx20442_dapm_widgets,
-				  ARRAY_SIZE(cx20442_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, cx20442_audio_map,
+	snd_soc_dapm_new_controls(dapm, cx20442_dapm_widgets,
+				  ARRAY_SIZE(cx20442_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, cx20442_audio_map,
 				ARRAY_SIZE(cx20442_audio_map));
 
 	return 0;
@@ -263,7 +263,7 @@
 	/* Prevent the codec driver from further accessing the modem */
 	codec->hw_write = NULL;
 	cx20442->control_data = NULL;
-	codec->pop_time = 0;
+	codec->card->pop_time = 0;
 }
 
 /* Line discipline .hangup() */
@@ -291,7 +291,7 @@
 		/* Set up codec driver access to modem controls */
 		cx20442->control_data = tty;
 		codec->hw_write = (hw_write_t)tty->ops->write;
-		codec->pop_time = 1;
+		codec->card->pop_time = 1;
 	}
 }
 
@@ -348,7 +348,7 @@
 
 	cx20442->control_data = NULL;
 	codec->hw_write = NULL;
-	codec->pop_time = 0;
+	codec->card->pop_time = 0;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 58bb9b9..92fd9d7 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
new file mode 100644
index 0000000..57e9dac
--- /dev/null
+++ b/sound/soc/codecs/dmic.c
@@ -0,0 +1,81 @@
+/*
+ * dmic.c  --  SoC audio for Generic Digital MICs
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+static struct snd_soc_dai_driver dmic_dai = {
+	.name = "dmic-hifi",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 8,
+		.rates = SNDRV_PCM_RATE_CONTINUOUS,
+		.formats = SNDRV_PCM_FMTBIT_S32_LE
+			| SNDRV_PCM_FMTBIT_S24_LE
+			| SNDRV_PCM_FMTBIT_S16_LE,
+	},
+};
+
+static struct snd_soc_codec_driver soc_dmic = {};
+
+static int __devinit dmic_dev_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev,
+			&soc_dmic, &dmic_dai, 1);
+}
+
+static int __devexit dmic_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+MODULE_ALIAS("platform:dmic-codec");
+
+static struct platform_driver dmic_driver = {
+	.driver = {
+		.name = "dmic-codec",
+		.owner = THIS_MODULE,
+	},
+	.probe = dmic_dev_probe,
+	.remove = __devexit_p(dmic_dev_remove),
+};
+
+static int __init dmic_init(void)
+{
+	return platform_driver_register(&dmic_driver);
+}
+module_init(dmic_init);
+
+static void __exit dmic_exit(void)
+{
+	platform_driver_unregister(&dmic_driver);
+}
+module_exit(dmic_exit);
+
+MODULE_DESCRIPTION("Generic DMIC driver");
+MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 16253ec..f7cd346 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -22,7 +22,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/initval.h>
-#include <sound/soc-dapm.h>
 #include <sound/soc.h>
 
 #define JZ4740_REG_CODEC_1 0x0
@@ -266,7 +265,7 @@
 		break;
 	case SND_SOC_BIAS_STANDBY:
 		/* The only way to clear the suspend flag is to reset the codec */
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			jz4740_codec_wakeup(codec);
 
 		mask = JZ4740_CODEC_1_VREF_DISABLE |
@@ -288,23 +287,25 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
 
 static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	snd_soc_update_bits(codec, JZ4740_REG_CODEC_1,
 			JZ4740_CODEC_1_SW2_ENABLE, JZ4740_CODEC_1_SW2_ENABLE);
 
 	snd_soc_add_controls(codec, jz4740_codec_controls,
 		ARRAY_SIZE(jz4740_codec_controls));
 
-	snd_soc_dapm_new_controls(codec, jz4740_codec_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, jz4740_codec_dapm_widgets,
 		ARRAY_SIZE(jz4740_codec_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, jz4740_codec_dapm_routes,
+	snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
 		ARRAY_SIZE(jz4740_codec_dapm_routes));
 
 	snd_soc_dapm_new_widgets(codec);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 6447dbb..89498f9 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <linux/slab.h>
@@ -1229,15 +1228,17 @@
 
 static int max98088_add_widgets(struct snd_soc_codec *codec)
 {
-       snd_soc_dapm_new_controls(codec, max98088_dapm_widgets,
+       struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+       snd_soc_dapm_new_controls(dapm, max98088_dapm_widgets,
                                  ARRAY_SIZE(max98088_dapm_widgets));
 
-       snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+       snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
        snd_soc_add_controls(codec, max98088_snd_controls,
                             ARRAY_SIZE(max98088_snd_controls));
 
-       snd_soc_dapm_new_widgets(codec);
+       snd_soc_dapm_new_widgets(dapm);
        return 0;
 }
 
@@ -1622,7 +1623,7 @@
                break;
 
        case SND_SOC_BIAS_STANDBY:
-               if (codec->bias_level == SND_SOC_BIAS_OFF)
+               if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
                        max98088_sync_cache(codec);
 
                snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
@@ -1635,7 +1636,7 @@
                codec->cache_sync = 1;
                break;
        }
-       codec->bias_level = level;
+       codec->dapm.bias_level = level;
        return 0;
 }
 
@@ -1957,7 +1958,7 @@
                return ret;
        }
 
-       /* initalize private data */
+       /* initialize private data */
 
        max98088->sysclk = (unsigned)-1;
        max98088->eq_textcnt = 0;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 6f38d61..2727bef 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -38,7 +38,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "ssm2602.h"
@@ -207,10 +206,11 @@
 
 static int ssm2602_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ssm2602_dapm_widgets,
-				  ARRAY_SIZE(ssm2602_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_conn, ARRAY_SIZE(audio_conn));
+	snd_soc_dapm_new_controls(dapm, ssm2602_dapm_widgets,
+				  ARRAY_SIZE(ssm2602_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_conn, ARRAY_SIZE(audio_conn));
 
 	return 0;
 }
@@ -493,7 +493,7 @@
 		break;
 
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 061f9e5..78b2b50 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -236,7 +236,7 @@
 		stac9766_ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index e8652b1..54a30ef 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 
@@ -391,11 +390,12 @@
 
 static int tlv320aic23_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
-				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
+				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -574,7 +574,7 @@
 		tlv320aic23_write(codec, TLV320AIC23_PWR, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index 6b7d71e..e2a7608 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "tlv320aic26.h"
@@ -31,7 +30,6 @@
 struct aic26 {
 	struct spi_device *spi;
 	struct snd_soc_codec codec;
-	u16 reg_cache[AIC26_NUM_REGS];	/* shadow registers */
 	int master;
 	int datfm;
 	int mclk;
@@ -355,7 +353,6 @@
  */
 static int aic26_probe(struct snd_soc_codec *codec)
 {
-	struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
 	int ret, err, i, reg;
 
 	dev_info(codec->dev, "Probing AIC26 SoC CODEC driver\n");
@@ -373,7 +370,7 @@
 	aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
 
 	/* Fill register cache */
-	for (i = 0; i < ARRAY_SIZE(aic26->reg_cache); i++)
+	for (i = 0; i < codec->driver->reg_cache_size; i++)
 		aic26_reg_read(codec, i);
 
 	/* Register the sysfs files for debugging */
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 77b8f9a..3bedab2 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -46,7 +46,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/tlv320aic3x.h>
@@ -61,6 +60,8 @@
 	"DRVDD",	/* ADC Analog and Output Driver Voltage */
 };
 
+static LIST_HEAD(reset_list);
+
 struct aic3x_priv;
 
 struct aic3x_disable_nb {
@@ -77,6 +78,7 @@
 	struct aic3x_setup_data *setup;
 	void *control_data;
 	unsigned int sysclk;
+	struct list_head list;
 	int master;
 	int gpio_reset;
 	int power;
@@ -183,7 +185,7 @@
 
 	if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
 		/* find dapm widget path assoc with kcontrol */
-		list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+		list_for_each_entry(path, &widget->dapm->card->paths, list) {
 			if (path->kcontrol != kcontrol)
 				continue;
 
@@ -199,7 +201,7 @@
 		}
 
 		if (found)
-			snd_soc_dapm_sync(widget->codec);
+			snd_soc_dapm_sync(widget->dapm);
 	}
 
 	ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
@@ -788,17 +790,19 @@
 static int aic3x_add_widgets(struct snd_soc_codec *codec)
 {
 	struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	if (aic3x->model == AIC3X_MODEL_3007) {
-		snd_soc_dapm_new_controls(codec, aic3007_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, aic3007_dapm_widgets,
 			ARRAY_SIZE(aic3007_dapm_widgets));
-		snd_soc_dapm_add_routes(codec, intercon_3007, ARRAY_SIZE(intercon_3007));
+		snd_soc_dapm_add_routes(dapm, intercon_3007,
+					ARRAY_SIZE(intercon_3007));
 	}
 
 	return 0;
@@ -1075,7 +1079,7 @@
 		 * Put codec to reset and require cache sync as at least one
 		 * of the supplies was disabled
 		 */
-		if (aic3x->gpio_reset >= 0)
+		if (gpio_is_valid(aic3x->gpio_reset))
 			gpio_set_value(aic3x->gpio_reset, 0);
 		aic3x->codec->cache_sync = 1;
 	}
@@ -1102,7 +1106,7 @@
 		if (!codec->cache_sync)
 			goto out;
 
-		if (aic3x->gpio_reset >= 0) {
+		if (gpio_is_valid(aic3x->gpio_reset)) {
 			udelay(1);
 			gpio_set_value(aic3x->gpio_reset, 1);
 		}
@@ -1135,7 +1139,7 @@
 	case SND_SOC_BIAS_ON:
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY &&
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY &&
 		    aic3x->master) {
 			/* enable pll */
 			reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
@@ -1146,7 +1150,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		if (!aic3x->power)
 			aic3x_set_power(codec, 1);
-		if (codec->bias_level == SND_SOC_BIAS_PREPARE &&
+		if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE &&
 		    aic3x->master) {
 			/* disable pll */
 			reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
@@ -1159,7 +1163,7 @@
 			aic3x_set_power(codec, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1344,14 +1348,28 @@
 	return 0;
 }
 
+static bool aic3x_is_shared_reset(struct aic3x_priv *aic3x)
+{
+	struct aic3x_priv *a;
+
+	list_for_each_entry(a, &reset_list, list) {
+		if (gpio_is_valid(aic3x->gpio_reset) &&
+		    aic3x->gpio_reset == a->gpio_reset)
+			return true;
+	}
+
+	return false;
+}
+
 static int aic3x_probe(struct snd_soc_codec *codec)
 {
 	struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
 	int ret, i;
 
+	INIT_LIST_HEAD(&aic3x->list);
 	codec->control_data = aic3x->control_data;
 	aic3x->codec = codec;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 8, 8, aic3x->control_type);
 	if (ret != 0) {
@@ -1359,7 +1377,8 @@
 		return ret;
 	}
 
-	if (aic3x->gpio_reset >= 0) {
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x)) {
 		ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
 		if (ret != 0)
 			goto err_gpio;
@@ -1405,6 +1424,7 @@
 		snd_soc_add_controls(codec, &aic3x_classd_amp_gain_ctrl, 1);
 
 	aic3x_add_widgets(codec);
+	list_add(&aic3x->list, &reset_list);
 
 	return 0;
 
@@ -1414,10 +1434,10 @@
 					      &aic3x->disable_nb[i].nb);
 	regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
 err_get:
-	if (aic3x->gpio_reset >= 0)
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x))
 		gpio_free(aic3x->gpio_reset);
 err_gpio:
-	kfree(aic3x);
 	return ret;
 }
 
@@ -1427,7 +1447,9 @@
 	int i;
 
 	aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF);
-	if (aic3x->gpio_reset >= 0) {
+	list_del(&aic3x->list);
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x)) {
 		gpio_set_value(aic3x->gpio_reset, 0);
 		gpio_free(aic3x->gpio_reset);
 	}
@@ -1523,21 +1545,6 @@
 	.remove = aic3x_i2c_remove,
 	.id_table = aic3x_i2c_id,
 };
-
-static inline void aic3x_i2c_init(void)
-{
-	int ret;
-
-	ret = i2c_add_driver(&aic3x_i2c_driver);
-	if (ret)
-		printk(KERN_ERR "%s: error regsitering i2c driver, %d\n",
-		       __func__, ret);
-}
-
-static inline void aic3x_i2c_exit(void)
-{
-	i2c_del_driver(&aic3x_i2c_driver);
-}
 #endif
 
 static int __init aic3x_modinit(void)
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index c5ab8c8..71d7be8 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -36,21 +36,21 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
 #include <sound/tlv320dac33-plat.h>
 #include "tlv320dac33.h"
 
-#define DAC33_BUFFER_SIZE_BYTES		24576	/* bytes, 12288 16 bit words,
-						 * 6144 stereo */
-#define DAC33_BUFFER_SIZE_SAMPLES	6144
-
-#define NSAMPLE_MAX		5700
-
-#define MODE7_LTHR		10
-#define MODE7_UTHR		(DAC33_BUFFER_SIZE_SAMPLES - 10)
+/*
+ * The internal FIFO is 24576 bytes long
+ * It can be configured to hold 16bit or 24bit samples
+ * In 16bit configuration the FIFO can hold 6144 stereo samples
+ * In 24bit configuration the FIFO can hold 4096 stereo samples
+ */
+#define DAC33_FIFO_SIZE_16BIT	6144
+#define DAC33_FIFO_SIZE_24BIT	4096
+#define DAC33_MODE7_MARGIN	10	/* Safety margin for FIFO in Mode7 */
 
 #define BURST_BASEFREQ_HZ	49152000
 
@@ -100,16 +100,11 @@
 	unsigned int refclk;
 
 	unsigned int alarm_threshold;	/* set to be half of LATENCY_TIME_MS */
-	unsigned int nsample_min;	/* nsample should not be lower than
-					 * this */
-	unsigned int nsample_max;	/* nsample should not be higher than
-					 * this */
 	enum dac33_fifo_modes fifo_mode;/* FIFO mode selection */
+	unsigned int fifo_size;		/* Size of the FIFO in samples */
 	unsigned int nsample;		/* burst read amount from host */
 	int mode1_latency;		/* latency caused by the i2c writes in
 					 * us */
-	int auto_fifo_config; 		/* Configure the FIFO based on the
-					 * period size */
 	u8 burst_bclkdiv;		/* BCLK divider value in burst mode */
 	unsigned int burst_rate;	/* Interface speed in Burst modes */
 
@@ -303,7 +298,6 @@
 	if (unlikely(!dac33->chip_power))
 		return;
 
-	/* 44-46: DAC Control Registers */
 	/* A : DAC sample rate Fsref/1.5 */
 	dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(0));
 	/* B : DAC src=normal, not muted */
@@ -316,8 +310,6 @@
 	 clock source = internal osc (?) */
 	dac33_write(codec, DAC33_ANA_VOL_SOFT_STEP_CTRL, DAC33_VOLCLKEN);
 
-	dac33_write(codec, DAC33_PWR_CTRL, DAC33_PDNALLB);
-
 	/* Restore only selected registers (gains mostly) */
 	dac33_write(codec, DAC33_LDAC_DIG_VOL_CTRL,
 		    dac33_read_reg_cache(codec, DAC33_LDAC_DIG_VOL_CTRL));
@@ -328,6 +320,10 @@
 		    dac33_read_reg_cache(codec, DAC33_LINEL_TO_LLO_VOL));
 	dac33_write(codec, DAC33_LINER_TO_RLO_VOL,
 		    dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
+
+	dac33_write(codec, DAC33_OUT_AMP_CTRL,
+		    dac33_read_reg_cache(codec, DAC33_OUT_AMP_CTRL));
+
 }
 
 static inline int dac33_read_id(struct snd_soc_codec *codec)
@@ -357,6 +353,21 @@
 	dac33_write(codec, DAC33_PWR_CTRL, reg);
 }
 
+static inline void dac33_disable_digital(struct snd_soc_codec *codec)
+{
+	u8 reg;
+
+	/* Stop the DAI clock */
+	reg = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_B);
+	reg &= ~DAC33_BCLKON;
+	dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_B, reg);
+
+	/* Power down the Oscillator, and DACs */
+	reg = dac33_read_reg_cache(codec, DAC33_PWR_CTRL);
+	reg &= ~(DAC33_OSCPDNB | DAC33_DACRPDNB | DAC33_DACLPDNB);
+	dac33_write(codec, DAC33_PWR_CTRL, reg);
+}
+
 static int dac33_hard_power(struct snd_soc_codec *codec, int power)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
@@ -405,7 +416,7 @@
 	return ret;
 }
 
-static int playback_event(struct snd_soc_dapm_widget *w,
+static int dac33_playback_event(struct snd_soc_dapm_widget *w,
 		struct snd_kcontrol *kcontrol, int event)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(w->codec);
@@ -417,77 +428,13 @@
 			dac33_prepare_chip(dac33->substream);
 		}
 		break;
+	case SND_SOC_DAPM_POST_PMD:
+		dac33_disable_digital(w->codec);
+		break;
 	}
 	return 0;
 }
 
-static int dac33_get_nsample(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = dac33->nsample;
-
-	return 0;
-}
-
-static int dac33_set_nsample(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (dac33->nsample == ucontrol->value.integer.value[0])
-		return 0;
-
-	if (ucontrol->value.integer.value[0] < dac33->nsample_min ||
-	    ucontrol->value.integer.value[0] > dac33->nsample_max) {
-		ret = -EINVAL;
-	} else {
-		dac33->nsample = ucontrol->value.integer.value[0];
-		/* Re calculate the burst time */
-		dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
-						      dac33->nsample);
-	}
-
-	return ret;
-}
-
-static int dac33_get_uthr(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = dac33->uthr;
-
-	return 0;
-}
-
-static int dac33_set_uthr(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (dac33->substream)
-		return -EBUSY;
-
-	if (dac33->uthr == ucontrol->value.integer.value[0])
-		return 0;
-
-	if (ucontrol->value.integer.value[0] < (MODE7_LTHR + 10) ||
-	    ucontrol->value.integer.value[0] > MODE7_UTHR)
-		ret = -EINVAL;
-	else
-		dac33->uthr = ucontrol->value.integer.value[0];
-
-	return ret;
-}
-
 static int dac33_get_fifo_mode(struct snd_kcontrol *kcontrol,
 			 struct snd_ctl_elem_value *ucontrol)
 {
@@ -572,13 +519,6 @@
 		 dac33_get_fifo_mode, dac33_set_fifo_mode),
 };
 
-static const struct snd_kcontrol_new dac33_fifo_snd_controls[] = {
-	SOC_SINGLE_EXT("nSample", 0, 0, 5900, 0,
-		dac33_get_nsample, dac33_set_nsample),
-	SOC_SINGLE_EXT("UTHR", 0, 0, MODE7_UTHR, 0,
-		 dac33_get_uthr, dac33_set_uthr),
-};
-
 /* Analog bypass */
 static const struct snd_kcontrol_new dac33_dapm_abypassl_control =
 	SOC_DAPM_SINGLE("Switch", DAC33_LINEL_TO_LLO_VOL, 7, 1, 1);
@@ -586,6 +526,25 @@
 static const struct snd_kcontrol_new dac33_dapm_abypassr_control =
 	SOC_DAPM_SINGLE("Switch", DAC33_LINER_TO_RLO_VOL, 7, 1, 1);
 
+/* LOP L/R invert selection */
+static const char *dac33_lr_lom_texts[] = {"DAC", "LOP"};
+
+static const struct soc_enum dac33_left_lom_enum =
+	SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 3,
+			ARRAY_SIZE(dac33_lr_lom_texts),
+			dac33_lr_lom_texts);
+
+static const struct snd_kcontrol_new dac33_dapm_left_lom_control =
+SOC_DAPM_ENUM("Route", dac33_left_lom_enum);
+
+static const struct soc_enum dac33_right_lom_enum =
+	SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 2,
+			ARRAY_SIZE(dac33_lr_lom_texts),
+			dac33_lr_lom_texts);
+
+static const struct snd_kcontrol_new dac33_dapm_right_lom_control =
+SOC_DAPM_ENUM("Route", dac33_right_lom_enum);
+
 static const struct snd_soc_dapm_widget dac33_dapm_widgets[] = {
 	SND_SOC_DAPM_OUTPUT("LEFT_LO"),
 	SND_SOC_DAPM_OUTPUT("RIGHT_LO"),
@@ -593,8 +552,8 @@
 	SND_SOC_DAPM_INPUT("LINEL"),
 	SND_SOC_DAPM_INPUT("LINER"),
 
-	SND_SOC_DAPM_DAC("DACL", "Left Playback", DAC33_LDAC_PWR_CTRL, 2, 0),
-	SND_SOC_DAPM_DAC("DACR", "Right Playback", DAC33_RDAC_PWR_CTRL, 2, 0),
+	SND_SOC_DAPM_DAC("DACL", "Left Playback", SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DACR", "Right Playback", SND_SOC_NOPM, 0, 0),
 
 	/* Analog bypass */
 	SND_SOC_DAPM_SWITCH("Analog Left Bypass", SND_SOC_NOPM, 0, 0,
@@ -602,12 +561,30 @@
 	SND_SOC_DAPM_SWITCH("Analog Right Bypass", SND_SOC_NOPM, 0, 0,
 				&dac33_dapm_abypassr_control),
 
-	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amp Power",
+	SND_SOC_DAPM_MUX("Left LOM Inverted From", SND_SOC_NOPM, 0, 0,
+		&dac33_dapm_left_lom_control),
+	SND_SOC_DAPM_MUX("Right LOM Inverted From", SND_SOC_NOPM, 0, 0,
+		&dac33_dapm_right_lom_control),
+	/*
+	 * For DAPM path, when only the anlog bypass path is enabled, and the
+	 * LOP inverted from the corresponding DAC side.
+	 * This is needed, so we can attach the DAC power supply in this case.
+	 */
+	SND_SOC_DAPM_PGA("Left Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Right Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amplifier",
 			 DAC33_OUT_AMP_PWR_CTRL, 6, 3, 3, 0),
-	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amp Power",
+	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amplifier",
 			 DAC33_OUT_AMP_PWR_CTRL, 4, 3, 3, 0),
 
-	SND_SOC_DAPM_PRE("Prepare Playback", playback_event),
+	SND_SOC_DAPM_SUPPLY("Left DAC Power",
+			    DAC33_LDAC_PWR_CTRL, 2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("Right DAC Power",
+			    DAC33_RDAC_PWR_CTRL, 2, 0, NULL, 0),
+
+	SND_SOC_DAPM_PRE("Pre Playback", dac33_playback_event),
+	SND_SOC_DAPM_POST("Post Playback", dac33_playback_event),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -615,24 +592,39 @@
 	{"Analog Left Bypass", "Switch", "LINEL"},
 	{"Analog Right Bypass", "Switch", "LINER"},
 
-	{"Output Left Amp Power", NULL, "DACL"},
-	{"Output Right Amp Power", NULL, "DACR"},
+	{"Output Left Amplifier", NULL, "DACL"},
+	{"Output Right Amplifier", NULL, "DACR"},
 
-	{"Output Left Amp Power", NULL, "Analog Left Bypass"},
-	{"Output Right Amp Power", NULL, "Analog Right Bypass"},
+	{"Left Bypass PGA", NULL, "Analog Left Bypass"},
+	{"Right Bypass PGA", NULL, "Analog Right Bypass"},
+
+	{"Left LOM Inverted From", "DAC", "Left Bypass PGA"},
+	{"Right LOM Inverted From", "DAC", "Right Bypass PGA"},
+	{"Left LOM Inverted From", "LOP", "Analog Left Bypass"},
+	{"Right LOM Inverted From", "LOP", "Analog Right Bypass"},
+
+	{"Output Left Amplifier", NULL, "Left LOM Inverted From"},
+	{"Output Right Amplifier", NULL, "Right LOM Inverted From"},
+
+	{"DACL", NULL, "Left DAC Power"},
+	{"DACR", NULL, "Right DAC Power"},
+
+	{"Left Bypass PGA", NULL, "Left DAC Power"},
+	{"Right Bypass PGA", NULL, "Right DAC Power"},
 
 	/* output */
-	{"LEFT_LO", NULL, "Output Left Amp Power"},
-	{"RIGHT_LO", NULL, "Output Right Amp Power"},
+	{"LEFT_LO", NULL, "Output Left Amplifier"},
+	{"RIGHT_LO", NULL, "Output Right Amplifier"},
 };
 
 static int dac33_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, dac33_dapm_widgets,
-				  ARRAY_SIZE(dac33_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, dac33_dapm_widgets,
+				  ARRAY_SIZE(dac33_dapm_widgets));
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -640,16 +632,18 @@
 static int dac33_set_bias_level(struct snd_soc_codec *codec,
 				enum snd_soc_bias_level level)
 {
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		dac33_soft_power(codec, 1);
+		if (!dac33->substream)
+			dac33_soft_power(codec, 1);
 		break;
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Coming from OFF, switch on the codec */
 			ret = dac33_hard_power(codec, 1);
 			if (ret != 0)
@@ -660,14 +654,14 @@
 		break;
 	case SND_SOC_BIAS_OFF:
 		/* Do not power off, when the codec is already off */
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			return 0;
 		ret = dac33_hard_power(codec, 0);
 		if (ret != 0)
 			return ret;
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -705,7 +699,7 @@
 		spin_unlock_irq(&dac33->lock);
 
 		dac33_write16(codec, DAC33_PREFILL_MSB,
-				DAC33_THRREG(MODE7_LTHR));
+				DAC33_THRREG(DAC33_MODE7_MARGIN));
 
 		/* Enable Upper Threshold IRQ */
 		dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MUT);
@@ -815,6 +809,8 @@
 	/* Stream started, save the substream pointer */
 	dac33->substream = substream;
 
+	snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
+
 	return 0;
 }
 
@@ -826,18 +822,17 @@
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 
 	dac33->substream = NULL;
-
-	/* Reset the nSample restrictions */
-	dac33->nsample_min = 0;
-	dac33->nsample_max = NSAMPLE_MAX;
 }
 
+#define CALC_BURST_RATE(bclkdiv, bclk_per_sample) \
+	(BURST_BASEFREQ_HZ / bclkdiv / bclk_per_sample)
 static int dac33_hw_params(struct snd_pcm_substream *substream,
 			   struct snd_pcm_hw_params *params,
 			   struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_codec *codec = rtd->codec;
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 
 	/* Check parameters for validity */
 	switch (params_rate(params)) {
@@ -852,6 +847,12 @@
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
+		dac33->fifo_size = DAC33_FIFO_SIZE_16BIT;
+		dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 32);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dac33->fifo_size = DAC33_FIFO_SIZE_24BIT;
+		dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 64);
 		break;
 	default:
 		dev_err(codec->dev, "unsupported format %d\n",
@@ -906,6 +907,9 @@
 		aictrl_a |= (DAC33_NCYCL_16 | DAC33_WLEN_16);
 		fifoctrl_a |= DAC33_WIDTH;
 		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aictrl_a |= (DAC33_NCYCL_32 | DAC33_WLEN_24);
+		break;
 	default:
 		dev_err(codec->dev, "unsupported format %d\n",
 			substream->runtime->format);
@@ -1040,7 +1044,10 @@
 		dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C,
 							dac33->burst_bclkdiv);
 	else
-		dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32);
+		if (substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE)
+			dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32);
+		else
+			dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 16);
 
 	switch (dac33->fifo_mode) {
 	case DAC33_FIFO_MODE1:
@@ -1053,7 +1060,8 @@
 		 * at the bottom, and also at the top of the FIFO
 		 */
 		dac33_write16(codec, DAC33_UTHR_MSB, DAC33_THRREG(dac33->uthr));
-		dac33_write16(codec, DAC33_LTHR_MSB, DAC33_THRREG(MODE7_LTHR));
+		dac33_write16(codec, DAC33_LTHR_MSB,
+			      DAC33_THRREG(DAC33_MODE7_MARGIN));
 		break;
 	default:
 		break;
@@ -1082,42 +1090,21 @@
 		/* Number of samples under i2c latency */
 		dac33->alarm_threshold = US_TO_SAMPLES(rate,
 						dac33->mode1_latency);
-		nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
-				dac33->alarm_threshold;
+		nsample_limit = dac33->fifo_size - dac33->alarm_threshold;
 
-		if (dac33->auto_fifo_config) {
-			if (period_size <= dac33->alarm_threshold)
-				/*
-				 * Configure nSamaple to number of periods,
-				 * which covers the latency requironment.
-				 */
-				dac33->nsample = period_size *
-				       ((dac33->alarm_threshold / period_size) +
-				       (dac33->alarm_threshold % period_size ?
-				       1 : 0));
-			else if (period_size > nsample_limit)
-				dac33->nsample = nsample_limit;
-			else
-				dac33->nsample = period_size;
-		} else {
-			/* nSample time shall not be shorter than i2c latency */
-			dac33->nsample_min = dac33->alarm_threshold;
+		if (period_size <= dac33->alarm_threshold)
 			/*
-			 * nSample should not be bigger than alsa buffer minus
-			 * size of one period to avoid overruns
+			 * Configure nSamaple to number of periods,
+			 * which covers the latency requironment.
 			 */
-			dac33->nsample_max = substream->runtime->buffer_size -
-						period_size;
-
-			if (dac33->nsample_max > nsample_limit)
-				dac33->nsample_max = nsample_limit;
-
-			/* Correct the nSample if it is outside of the ranges */
-			if (dac33->nsample < dac33->nsample_min)
-				dac33->nsample = dac33->nsample_min;
-			if (dac33->nsample > dac33->nsample_max)
-				dac33->nsample = dac33->nsample_max;
-		}
+			dac33->nsample = period_size *
+				((dac33->alarm_threshold / period_size) +
+				(dac33->alarm_threshold % period_size ?
+				1 : 0));
+		else if (period_size > nsample_limit)
+			dac33->nsample = nsample_limit;
+		else
+			dac33->nsample = period_size;
 
 		dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
 						      dac33->nsample);
@@ -1125,19 +1112,16 @@
 		dac33->t_stamp2 = 0;
 		break;
 	case DAC33_FIFO_MODE7:
-		if (dac33->auto_fifo_config) {
-			dac33->uthr = UTHR_FROM_PERIOD_SIZE(
-					period_size,
-					rate,
-					dac33->burst_rate) + 9;
-			if (dac33->uthr > MODE7_UTHR)
-				dac33->uthr = MODE7_UTHR;
-			if (dac33->uthr < (MODE7_LTHR + 10))
-				dac33->uthr = (MODE7_LTHR + 10);
-		}
+		dac33->uthr = UTHR_FROM_PERIOD_SIZE(period_size, rate,
+						    dac33->burst_rate) + 9;
+		if (dac33->uthr > (dac33->fifo_size - DAC33_MODE7_MARGIN))
+			dac33->uthr = dac33->fifo_size - DAC33_MODE7_MARGIN;
+		if (dac33->uthr < (DAC33_MODE7_MARGIN + 10))
+			dac33->uthr = (DAC33_MODE7_MARGIN + 10);
+
 		dac33->mode7_us_to_lthr =
 				SAMPLES_TO_US(substream->runtime->rate,
-					dac33->uthr - MODE7_LTHR + 1);
+					dac33->uthr - DAC33_MODE7_MARGIN + 1);
 		dac33->t_stamp1 = 0;
 		break;
 	default:
@@ -1255,8 +1239,8 @@
 			samples += (samples_in - samples_out);
 
 			if (likely(samples > 0))
-				delay = samples > DAC33_BUFFER_SIZE_SAMPLES ?
-					DAC33_BUFFER_SIZE_SAMPLES : samples;
+				delay = samples > dac33->fifo_size ?
+					dac33->fifo_size : samples;
 			else
 				delay = 0;
 		}
@@ -1308,7 +1292,7 @@
 			samples_in = US_TO_SAMPLES(
 					dac33->burst_rate,
 					time_delta);
-			delay = MODE7_LTHR + samples_in - samples_out;
+			delay = DAC33_MODE7_MARGIN + samples_in - samples_out;
 
 			if (unlikely(delay > uthr))
 				delay = uthr;
@@ -1415,7 +1399,7 @@
 
 	codec->control_data = dac33->control_data;
 	codec->hw_write = (hw_write_t) i2c_master_send;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 	dac33->codec = codec;
 
 	/* Read the tlv320dac33 ID registers */
@@ -1459,14 +1443,10 @@
 	snd_soc_add_controls(codec, dac33_snd_controls,
 			     ARRAY_SIZE(dac33_snd_controls));
 	/* Only add the FIFO controls, if we have valid IRQ number */
-	if (dac33->irq >= 0) {
+	if (dac33->irq >= 0)
 		snd_soc_add_controls(codec, dac33_mode_snd_controls,
 				     ARRAY_SIZE(dac33_mode_snd_controls));
-		/* FIFO usage controls only, if autoio config is not selected */
-		if (!dac33->auto_fifo_config)
-			snd_soc_add_controls(codec, dac33_fifo_snd_controls,
-					ARRAY_SIZE(dac33_fifo_snd_controls));
-	}
+
 	dac33_add_widgets(codec);
 
 err_power:
@@ -1515,7 +1495,7 @@
 
 #define DAC33_RATES	(SNDRV_PCM_RATE_44100 | \
 			 SNDRV_PCM_RATE_48000)
-#define DAC33_FORMATS	SNDRV_PCM_FMTBIT_S16_LE
+#define DAC33_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct snd_soc_dai_ops dac33_dai_ops = {
 	.startup	= dac33_startup,
@@ -1563,17 +1543,11 @@
 
 	dac33->power_gpio = pdata->power_gpio;
 	dac33->burst_bclkdiv = pdata->burst_bclkdiv;
-	/* Pre calculate the burst rate */
-	dac33->burst_rate = BURST_BASEFREQ_HZ / dac33->burst_bclkdiv / 32;
 	dac33->keep_bclk = pdata->keep_bclk;
-	dac33->auto_fifo_config = pdata->auto_fifo_config;
 	dac33->mode1_latency = pdata->mode1_latency;
 	if (!dac33->mode1_latency)
 		dac33->mode1_latency = 10000; /* 10ms */
 	dac33->irq = client->irq;
-	dac33->nsample = NSAMPLE_MAX;
-	dac33->nsample_max = NSAMPLE_MAX;
-	dac33->uthr = MODE7_UTHR;
 	/* Disable FIFO use by default */
 	dac33->fifo_mode = DAC33_FIFO_BYPASS;
 
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index d2c2430..1f1ac81 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <sound/tpa6130a2-plat.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 
 #include "tpa6130a2.h"
@@ -42,7 +41,7 @@
 	unsigned char regs[TPA6130A2_CACHEREGNUM];
 	struct regulator *supply;
 	int power_gpio;
-	unsigned char power_state;
+	u8 power_state:1;
 	enum tpa_model id;
 };
 
@@ -117,7 +116,7 @@
 	return ret;
 }
 
-static int tpa6130a2_power(int power)
+static int tpa6130a2_power(u8 power)
 {
 	struct	tpa6130a2_data *data;
 	u8	val;
@@ -127,17 +126,19 @@
 	data = i2c_get_clientdata(tpa6130a2_client);
 
 	mutex_lock(&data->mutex);
-	if (power && !data->power_state) {
-		/* Power on */
-		if (data->power_gpio >= 0)
-			gpio_set_value(data->power_gpio, 1);
+	if (power == data->power_state)
+		goto exit;
 
+	if (power) {
 		ret = regulator_enable(data->supply);
 		if (ret != 0) {
 			dev_err(&tpa6130a2_client->dev,
 				"Failed to enable supply: %d\n", ret);
 			goto exit;
 		}
+		/* Power on */
+		if (data->power_gpio >= 0)
+			gpio_set_value(data->power_gpio, 1);
 
 		data->power_state = 1;
 		ret = tpa6130a2_initialize();
@@ -150,12 +151,7 @@
 			data->power_state = 0;
 			goto exit;
 		}
-
-		/* Clear SWS */
-		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
-		val &= ~TPA6130A2_SWS;
-		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-	} else if (!power && data->power_state) {
+	} else {
 		/* set SWS */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= TPA6130A2_SWS;
@@ -300,6 +296,7 @@
 		/* Enable amplifier */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= channel;
+		val &= ~TPA6130A2_SWS;
 		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
 
 		/* Unmute channel */
@@ -320,72 +317,24 @@
 	}
 }
 
-static int tpa6130a2_left_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tpa6130a2_right_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tpa6130a2_supply_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
+int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable)
 {
 	int ret = 0;
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
+	if (enable) {
 		ret = tpa6130a2_power(1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
+		if (ret < 0)
+			return ret;
+		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
+					 1);
+	} else {
+		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
+					 0);
 		ret = tpa6130a2_power(0);
-		break;
 	}
+
 	return ret;
 }
-
-static const struct snd_soc_dapm_widget tpa6130a2_dapm_widgets[] = {
-	SND_SOC_DAPM_PGA_E("TPA6130A2 Left", SND_SOC_NOPM,
-			0, 0, NULL, 0, tpa6130a2_left_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("TPA6130A2 Right", SND_SOC_NOPM,
-			0, 0, NULL, 0, tpa6130a2_right_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_SUPPLY("TPA6130A2 Enable", SND_SOC_NOPM,
-			0, 0, tpa6130a2_supply_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	/* Outputs */
-	SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Left"),
-	SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Right"),
-};
-
-static const struct snd_soc_dapm_route audio_map[] = {
-	{"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Left"},
-	{"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Right"},
-
-	{"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Enable"},
-	{"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Enable"},
-};
+EXPORT_SYMBOL_GPL(tpa6130a2_stereo_enable);
 
 int tpa6130a2_add_controls(struct snd_soc_codec *codec)
 {
@@ -396,18 +345,12 @@
 
 	data = i2c_get_clientdata(tpa6130a2_client);
 
-	snd_soc_dapm_new_controls(codec, tpa6130a2_dapm_widgets,
-				ARRAY_SIZE(tpa6130a2_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-
 	if (data->id == TPA6140A2)
 		return snd_soc_add_controls(codec, tpa6140a2_controls,
 						ARRAY_SIZE(tpa6140a2_controls));
 	else
 		return snd_soc_add_controls(codec, tpa6130a2_controls,
 						ARRAY_SIZE(tpa6130a2_controls));
-
 }
 EXPORT_SYMBOL_GPL(tpa6130a2_add_controls);
 
diff --git a/sound/soc/codecs/tpa6130a2.h b/sound/soc/codecs/tpa6130a2.h
index 57e867f..5df49c8 100644
--- a/sound/soc/codecs/tpa6130a2.h
+++ b/sound/soc/codecs/tpa6130a2.h
@@ -57,5 +57,6 @@
 #define TPA6130A2_VERSION_MASK		(0x0f)
 
 extern int tpa6130a2_add_controls(struct snd_soc_codec *codec);
+extern int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable);
 
 #endif /* __TPA6130A2_H__ */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index cbebec6..e4d464b 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -32,7 +32,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -233,6 +232,16 @@
 	return 0;
 }
 
+static inline void twl4030_wait_ms(int time)
+{
+	if (time < 60) {
+		time *= 1000;
+		usleep_range(time, time + 500);
+	} else {
+		msleep(time);
+	}
+}
+
 static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable)
 {
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
@@ -338,10 +347,14 @@
 	twl4030_write(codec, TWL4030_REG_ANAMICL,
 		reg | TWL4030_CNCL_OFFSET_START);
 
-	/* wait for offset cancellation to complete */
+	/*
+	 * Wait for offset cancellation to complete.
+	 * Since this takes a while, do not slam the i2c.
+	 * Start polling the status after ~20ms.
+	 */
+	msleep(20);
 	do {
-		/* this takes a little while, so don't slam i2c */
-		udelay(2000);
+		usleep_range(1000, 2000);
 		twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte,
 				    TWL4030_REG_ANAMICL);
 	} while ((i++ < 100) &&
@@ -725,9 +738,12 @@
 	/* Base values for ramp delay calculation: 2^19 - 2^26 */
 	unsigned int ramp_base[] = {524288, 1048576, 2097152, 4194304,
 				    8388608, 16777216, 33554432, 67108864};
+	unsigned int delay;
 
 	hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET);
 	hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET);
+	delay = (ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
+		twl4030->sysclk) + 1;
 
 	/* Enable external mute control, this dramatically reduces
 	 * the pop-noise */
@@ -751,16 +767,14 @@
 		hs_pop |= TWL4030_RAMP_EN;
 		twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
 		/* Wait ramp delay time + 1, so the VMID can settle */
-		mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
-			twl4030->sysclk) + 1);
+		twl4030_wait_ms(delay);
 	} else {
 		/* Headset ramp-down _not_ according to
 		 * the TRM, but in a way that it is working */
 		hs_pop &= ~TWL4030_RAMP_EN;
 		twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
 		/* Wait ramp delay time + 1, so the VMID can settle */
-		mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
-			twl4030->sysclk) + 1);
+		twl4030_wait_ms(delay);
 		/* Bypass the reg_cache to mute the headset */
 		twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
 					hs_gain & (~0x0f),
@@ -835,7 +849,7 @@
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
 
 	if (twl4030->digimic_delay)
-		mdelay(twl4030->digimic_delay);
+		twl4030_wait_ms(twl4030->digimic_delay);
 	return 0;
 }
 
@@ -1621,10 +1635,11 @@
 
 static int twl4030_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, twl4030_dapm_widgets,
-				 ARRAY_SIZE(twl4030_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, twl4030_dapm_widgets,
+				 ARRAY_SIZE(twl4030_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -1638,14 +1653,14 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			twl4030_codec_enable(codec, 1);
 		break;
 	case SND_SOC_BIAS_OFF:
 		twl4030_codec_enable(codec, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1709,6 +1724,7 @@
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
 
+	snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
 	if (twl4030->master_substream) {
 		twl4030->slave_substream = substream;
 		/* The DAI has one configuration for playback and capture, so
@@ -1833,7 +1849,7 @@
 	case SNDRV_PCM_FORMAT_S16_LE:
 		format |= TWL4030_DATA_WIDTH_16S_16W;
 		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S32_LE:
 		format |= TWL4030_DATA_WIDTH_32S_24W;
 		break;
 	default:
@@ -2166,7 +2182,7 @@
 }
 
 #define TWL4030_RATES	 (SNDRV_PCM_RATE_8000_48000)
-#define TWL4030_FORMATS	 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE)
+#define TWL4030_FORMATS	 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct snd_soc_dai_ops twl4030_dai_hifi_ops = {
 	.startup	= twl4030_startup,
@@ -2245,7 +2261,7 @@
 	snd_soc_codec_set_drvdata(codec, twl4030);
 	/* Set the defaults, and power up the codec */
 	twl4030->sysclk = twl4030_codec_get_mclk() / 1000;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	twl4030_init_chip(codec);
 
@@ -2257,9 +2273,12 @@
 
 static int twl4030_soc_remove(struct snd_soc_codec *codec)
 {
+	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
+
 	/* Reset registers to their chip default before leaving */
 	twl4030_reset_registers(codec);
 	twl4030_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	kfree(twl4030);
 	return 0;
 }
 
@@ -2291,10 +2310,7 @@
 
 static int __devexit twl4030_codec_remove(struct platform_device *pdev)
 {
-	struct twl4030_priv *twl4030 = dev_get_drvdata(&pdev->dev);
-
 	snd_soc_unregister_codec(&pdev->dev);
-	kfree(twl4030);
 	return 0;
 }
 
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 10f6e52..4bbf1b1 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -34,14 +34,46 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
 #include "twl6040.h"
 
-#define TWL6040_RATES	 (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-#define TWL6040_FORMATS	 (SNDRV_PCM_FMTBIT_S32_LE)
+#define TWL6040_RATES		SNDRV_PCM_RATE_8000_96000
+#define TWL6040_FORMATS	(SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TWL6040_OUTHS_0dB 0x00
+#define TWL6040_OUTHS_M30dB 0x0F
+#define TWL6040_OUTHF_0dB 0x03
+#define TWL6040_OUTHF_M52dB 0x1D
+
+#define TWL6040_RAMP_NONE	0
+#define TWL6040_RAMP_UP		1
+#define TWL6040_RAMP_DOWN	2
+
+#define TWL6040_HSL_VOL_MASK	0x0F
+#define TWL6040_HSL_VOL_SHIFT	0
+#define TWL6040_HSR_VOL_MASK	0xF0
+#define TWL6040_HSR_VOL_SHIFT	4
+#define TWL6040_HF_VOL_MASK	0x1F
+#define TWL6040_HF_VOL_SHIFT	0
+
+struct twl6040_output {
+	u16 active;
+	u16 left_vol;
+	u16 right_vol;
+	u16 left_step;
+	u16 right_step;
+	unsigned int step_delay;
+	u16 ramp;
+	u16 mute;
+	struct completion ramp_done;
+};
+
+struct twl6040_jack_data {
+	struct snd_soc_jack *jack;
+	int report;
+};
 
 /* codec private data */
 struct twl6040_data {
@@ -53,6 +85,17 @@
 	unsigned int sysclk;
 	struct snd_pcm_hw_constraint_list *sysclk_constraints;
 	struct completion ready;
+	struct twl6040_jack_data hs_jack;
+	struct snd_soc_codec *codec;
+	struct workqueue_struct *workqueue;
+	struct delayed_work delayed_work;
+	struct mutex mutex;
+	struct twl6040_output headset;
+	struct twl6040_output handsfree;
+	struct workqueue_struct *hf_workqueue;
+	struct workqueue_struct *hs_workqueue;
+	struct delayed_work hs_delayed_work;
+	struct delayed_work hf_delayed_work;
 };
 
 /*
@@ -201,7 +244,7 @@
 	if (reg >= TWL6040_CACHEREGNUM)
 		return -EIO;
 
-	twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &value, reg);
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &value, reg);
 	twl6040_write_reg_cache(codec, reg, value);
 
 	return value;
@@ -217,7 +260,7 @@
 		return -EIO;
 
 	twl6040_write_reg_cache(codec, reg, value);
-	return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, reg);
+	return twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, value, reg);
 }
 
 static void twl6040_init_vio_regs(struct snd_soc_codec *codec)
@@ -254,6 +297,305 @@
 	}
 }
 
+/*
+ * Ramp HS PGA volume to minimise pops at stream startup and shutdown.
+ */
+static inline int twl6040_hs_ramp_step(struct snd_soc_codec *codec,
+			unsigned int left_step, unsigned int right_step)
+{
+
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *headset = &priv->headset;
+	int left_complete = 0, right_complete = 0;
+	u8 reg, val;
+
+	/* left channel */
+	left_step = (left_step > 0xF) ? 0xF : left_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN);
+	val = (~reg & TWL6040_HSL_VOL_MASK);
+
+	if (headset->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < headset->left_vol) {
+			val += left_step;
+			reg &= ~TWL6040_HSL_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+					(reg | (~val & TWL6040_HSL_VOL_MASK)));
+		} else {
+			left_complete = 1;
+		}
+	} else if (headset->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0x0) {
+			val -= left_step;
+			reg &= ~TWL6040_HSL_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN, reg |
+						(~val & TWL6040_HSL_VOL_MASK));
+		} else {
+			left_complete = 1;
+		}
+	}
+
+	/* right channel */
+	right_step = (right_step > 0xF) ? 0xF : right_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN);
+	val = (~reg & TWL6040_HSR_VOL_MASK) >> TWL6040_HSR_VOL_SHIFT;
+
+	if (headset->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < headset->right_vol) {
+			val += right_step;
+			reg &= ~TWL6040_HSR_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+				(reg | (~val << TWL6040_HSR_VOL_SHIFT)));
+		} else {
+			right_complete = 1;
+		}
+	} else if (headset->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0x0) {
+			val -= right_step;
+			reg &= ~TWL6040_HSR_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+					 reg | (~val << TWL6040_HSR_VOL_SHIFT));
+		} else {
+			right_complete = 1;
+		}
+	}
+
+	return left_complete & right_complete;
+}
+
+/*
+ * Ramp HF PGA volume to minimise pops at stream startup and shutdown.
+ */
+static inline int twl6040_hf_ramp_step(struct snd_soc_codec *codec,
+			unsigned int left_step, unsigned int right_step)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *handsfree = &priv->handsfree;
+	int left_complete = 0, right_complete = 0;
+	u16 reg, val;
+
+	/* left channel */
+	left_step = (left_step > 0x1D) ? 0x1D : left_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFLGAIN);
+	reg = 0x1D - reg;
+	val = (reg & TWL6040_HF_VOL_MASK);
+	if (handsfree->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < handsfree->left_vol) {
+			val += left_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFLGAIN,
+						reg | (0x1D - val));
+		} else {
+			left_complete = 1;
+		}
+	} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0) {
+			val -= left_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFLGAIN,
+						reg | (0x1D - val));
+		} else {
+			left_complete = 1;
+		}
+	}
+
+	/* right channel */
+	right_step = (right_step > 0x1D) ? 0x1D : right_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFRGAIN);
+	reg = 0x1D - reg;
+	val = (reg & TWL6040_HF_VOL_MASK);
+	if (handsfree->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < handsfree->right_vol) {
+			val += right_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFRGAIN,
+						reg | (0x1D - val));
+		} else {
+			right_complete = 1;
+		}
+	} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0) {
+			val -= right_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFRGAIN,
+						reg | (0x1D - val));
+		}
+	}
+
+	return left_complete & right_complete;
+}
+
+/*
+ * This work ramps both output PGAs at stream start/stop time to
+ * minimise pop associated with DAPM power switching.
+ */
+static void twl6040_pga_hs_work(struct work_struct *work)
+{
+	struct twl6040_data *priv =
+		container_of(work, struct twl6040_data, hs_delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_output *headset = &priv->headset;
+	unsigned int delay = headset->step_delay;
+	int i, headset_complete;
+
+	/* do we need to ramp at all ? */
+	if (headset->ramp == TWL6040_RAMP_NONE)
+		return;
+
+	/* HS PGA volumes have 4 bits of resolution to ramp */
+	for (i = 0; i <= 16; i++) {
+		headset_complete = 1;
+		if (headset->ramp != TWL6040_RAMP_NONE)
+			headset_complete = twl6040_hs_ramp_step(codec,
+							headset->left_step,
+							headset->right_step);
+
+		/* ramp finished ? */
+		if (headset_complete)
+			break;
+
+		/*
+		 * TODO: tune: delay is longer over 0dB
+		 * as increases are larger.
+		 */
+		if (i >= 8)
+			schedule_timeout_interruptible(msecs_to_jiffies(delay +
+							(delay >> 1)));
+		else
+			schedule_timeout_interruptible(msecs_to_jiffies(delay));
+	}
+
+	if (headset->ramp == TWL6040_RAMP_DOWN) {
+		headset->active = 0;
+		complete(&headset->ramp_done);
+	} else {
+		headset->active = 1;
+	}
+	headset->ramp = TWL6040_RAMP_NONE;
+}
+
+static void twl6040_pga_hf_work(struct work_struct *work)
+{
+	struct twl6040_data *priv =
+		container_of(work, struct twl6040_data, hf_delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_output *handsfree = &priv->handsfree;
+	unsigned int delay = handsfree->step_delay;
+	int i, handsfree_complete;
+
+	/* do we need to ramp at all ? */
+	if (handsfree->ramp == TWL6040_RAMP_NONE)
+		return;
+
+	/* HF PGA volumes have 5 bits of resolution to ramp */
+	for (i = 0; i <= 32; i++) {
+		handsfree_complete = 1;
+		if (handsfree->ramp != TWL6040_RAMP_NONE)
+			handsfree_complete = twl6040_hf_ramp_step(codec,
+							handsfree->left_step,
+							handsfree->right_step);
+
+		/* ramp finished ? */
+		if (handsfree_complete)
+			break;
+
+		/*
+		 * TODO: tune: delay is longer over 0dB
+		 * as increases are larger.
+		 */
+		if (i >= 16)
+			schedule_timeout_interruptible(msecs_to_jiffies(delay +
+						       (delay >> 1)));
+		else
+			schedule_timeout_interruptible(msecs_to_jiffies(delay));
+	}
+
+
+	if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		handsfree->active = 0;
+		complete(&handsfree->ramp_done);
+	} else
+		handsfree->active = 1;
+	handsfree->ramp = TWL6040_RAMP_NONE;
+}
+
+static int pga_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out;
+	struct delayed_work *work;
+	struct workqueue_struct *queue;
+
+	switch (w->shift) {
+	case 2:
+	case 3:
+		out = &priv->headset;
+		work = &priv->hs_delayed_work;
+		queue = priv->hs_workqueue;
+		out->step_delay = 5;	/* 5 ms between volume ramp steps */
+		break;
+	case 4:
+		out = &priv->handsfree;
+		work = &priv->hf_delayed_work;
+		queue = priv->hf_workqueue;
+		out->step_delay = 5;	/* 5 ms between volume ramp steps */
+		if (SND_SOC_DAPM_EVENT_ON(event))
+			priv->non_lp++;
+		else
+			priv->non_lp--;
+		break;
+	default:
+		return -1;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (out->active)
+			break;
+
+		/* don't use volume ramp for power-up */
+		out->left_step = out->left_vol;
+		out->right_step = out->right_vol;
+
+		if (!delayed_work_pending(work)) {
+			out->ramp = TWL6040_RAMP_UP;
+			queue_delayed_work(queue, work,
+					msecs_to_jiffies(1));
+		}
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		if (!out->active)
+			break;
+
+		if (!delayed_work_pending(work)) {
+			/* use volume ramp for power-down */
+			out->left_step = 1;
+			out->right_step = 1;
+			out->ramp = TWL6040_RAMP_DOWN;
+			INIT_COMPLETION(out->ramp_done);
+
+			queue_delayed_work(queue, work,
+					msecs_to_jiffies(1));
+
+			wait_for_completion_timeout(&out->ramp_done,
+					msecs_to_jiffies(2000));
+		}
+		break;
+	}
+
+	return 0;
+}
+
 /* twl6040 codec manual power-up sequence */
 static void twl6040_power_up(struct snd_soc_codec *codec)
 {
@@ -382,6 +724,47 @@
 	return 0;
 }
 
+void twl6040_hs_jack_report(struct snd_soc_codec *codec,
+				struct snd_soc_jack *jack, int report)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	int status;
+
+	mutex_lock(&priv->mutex);
+
+	/* Sync status */
+	status = twl6040_read_reg_volatile(codec, TWL6040_REG_STATUS);
+	if (status & TWL6040_PLUGCOMP)
+		snd_soc_jack_report(jack, report, report);
+	else
+		snd_soc_jack_report(jack, 0, report);
+
+	mutex_unlock(&priv->mutex);
+}
+
+void twl6040_hs_jack_detect(struct snd_soc_codec *codec,
+				struct snd_soc_jack *jack, int report)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_jack_data *hs_jack = &priv->hs_jack;
+
+	hs_jack->jack = jack;
+	hs_jack->report = report;
+
+	twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report);
+}
+EXPORT_SYMBOL_GPL(twl6040_hs_jack_detect);
+
+static void twl6040_accessory_work(struct work_struct *work)
+{
+	struct twl6040_data *priv = container_of(work,
+					struct twl6040_data, delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_jack_data *hs_jack = &priv->hs_jack;
+
+	twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report);
+}
+
 /* audio interrupt handler */
 static irqreturn_t twl6040_naudint_handler(int irq, void *data)
 {
@@ -389,33 +772,180 @@
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 	u8 intid;
 
-	twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
 
-	switch (intid) {
-	case TWL6040_THINT:
+	if (intid & TWL6040_THINT)
 		dev_alert(codec->dev, "die temp over-limit detection\n");
-		break;
-	case TWL6040_PLUGINT:
-	case TWL6040_UNPLUGINT:
-	case TWL6040_HOOKINT:
-		break;
-	case TWL6040_HFINT:
+
+	if ((intid & TWL6040_PLUGINT) || (intid & TWL6040_UNPLUGINT))
+		queue_delayed_work(priv->workqueue, &priv->delayed_work,
+							msecs_to_jiffies(200));
+
+	if (intid & TWL6040_HOOKINT)
+		dev_info(codec->dev, "hook detection\n");
+
+	if (intid & TWL6040_HFINT)
 		dev_alert(codec->dev, "hf drivers over current detection\n");
-		break;
-	case TWL6040_VIBINT:
+
+	if (intid & TWL6040_VIBINT)
 		dev_alert(codec->dev, "vib drivers over current detection\n");
-		break;
-	case TWL6040_READYINT:
+
+	if (intid & TWL6040_READYINT)
 		complete(&priv->ready);
-		break;
-	default:
-		dev_err(codec->dev, "unknown audio interrupt %d\n", intid);
-		break;
-	}
 
 	return IRQ_HANDLED;
 }
 
+static int twl6040_put_volsw(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = NULL;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int ret;
+	unsigned int reg = mc->reg;
+
+	/* For HS and HF we shadow the values and only actually write
+	 * them out when active in order to ensure the amplifier comes on
+	 * as quietly as possible. */
+	switch (reg) {
+	case TWL6040_REG_HSGAIN:
+		out = &twl6040_priv->headset;
+		break;
+	default:
+		break;
+	}
+
+	if (out) {
+		out->left_vol = ucontrol->value.integer.value[0];
+		out->right_vol = ucontrol->value.integer.value[1];
+		if (!out->active)
+			return 1;
+	}
+
+	ret = snd_soc_put_volsw(kcontrol, ucontrol);
+	if (ret < 0)
+		return ret;
+
+	return 1;
+}
+
+static int twl6040_get_volsw(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = &twl6040_priv->headset;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int reg = mc->reg;
+
+	switch (reg) {
+	case TWL6040_REG_HSGAIN:
+		out = &twl6040_priv->headset;
+		ucontrol->value.integer.value[0] = out->left_vol;
+		ucontrol->value.integer.value[1] = out->right_vol;
+		return 0;
+
+	default:
+		break;
+	}
+
+	return snd_soc_get_volsw(kcontrol, ucontrol);
+}
+
+static int twl6040_put_volsw_2r_vu(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = NULL;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int ret;
+	unsigned int reg = mc->reg;
+
+	/* For HS and HF we shadow the values and only actually write
+	 * them out when active in order to ensure the amplifier comes on
+	 * as quietly as possible. */
+	switch (reg) {
+	case TWL6040_REG_HFLGAIN:
+	case TWL6040_REG_HFRGAIN:
+		out = &twl6040_priv->handsfree;
+		break;
+	default:
+		break;
+	}
+
+	if (out) {
+		out->left_vol = ucontrol->value.integer.value[0];
+		out->right_vol = ucontrol->value.integer.value[1];
+		if (!out->active)
+			return 1;
+	}
+
+	ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
+	if (ret < 0)
+		return ret;
+
+	return 1;
+}
+
+static int twl6040_get_volsw_2r(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = &twl6040_priv->handsfree;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int reg = mc->reg;
+
+	/* If these are cached registers use the cache */
+	switch (reg) {
+	case TWL6040_REG_HFLGAIN:
+	case TWL6040_REG_HFRGAIN:
+		out = &twl6040_priv->handsfree;
+		ucontrol->value.integer.value[0] = out->left_vol;
+		ucontrol->value.integer.value[1] = out->right_vol;
+		return 0;
+
+	default:
+		break;
+	}
+
+	return snd_soc_get_volsw_2r(kcontrol, ucontrol);
+}
+
+/* double control with volume update */
+#define SOC_TWL6040_DOUBLE_TLV(xname, xreg, shift_left, shift_right, xmax,\
+							xinvert, tlv_array)\
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
+		 SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+	.tlv.p = (tlv_array), \
+	.info = snd_soc_info_volsw, .get = twl6040_get_volsw, \
+	.put = twl6040_put_volsw, \
+	.private_value = (unsigned long)&(struct soc_mixer_control) \
+		{.reg = xreg, .shift = shift_left, .rshift = shift_right,\
+		 .max = xmax, .platform_max = xmax, .invert = xinvert} }
+
+/* double control with volume update */
+#define SOC_TWL6040_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax,\
+				xinvert, tlv_array)\
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+		SNDRV_CTL_ELEM_ACCESS_READWRITE | \
+		SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+	.tlv.p = (tlv_array), \
+	.info = snd_soc_info_volsw_2r, \
+	.get = twl6040_get_volsw_2r, .put = twl6040_put_volsw_2r_vu, \
+	.private_value = (unsigned long)&(struct soc_mixer_control) \
+		{.reg = reg_left, .rreg = reg_right, .shift = xshift, \
+		 .rshift = xshift, .max = xmax, .invert = xinvert}, }
+
 /*
  * MICATT volume control:
  * from -6 to 0 dB in 6 dB steps
@@ -424,9 +954,15 @@
 
 /*
  * MICGAIN volume control:
- * from 6 to 30 dB in 6 dB steps
+ * from -6 to 30 dB in 6 dB steps
  */
-static DECLARE_TLV_DB_SCALE(mic_amp_tlv, 600, 600, 0);
+static DECLARE_TLV_DB_SCALE(mic_amp_tlv, -600, 600, 0);
+
+/*
+ * AFMGAIN volume control:
+ * from 18 to 24 dB in 6 dB steps
+ */
+static DECLARE_TLV_DB_SCALE(afm_amp_tlv, 1800, 600, 0);
 
 /*
  * HSGAIN volume control:
@@ -455,8 +991,30 @@
 	{"Headset Mic", "Sub Mic", "Aux/FM Right", "Off"};
 
 static const struct soc_enum twl6040_enum[] = {
-	SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 3, twl6040_amicl_texts),
-	SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 3, twl6040_amicr_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 4, twl6040_amicl_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 4, twl6040_amicr_texts),
+};
+
+static const char *twl6040_hs_texts[] = {
+	"Off", "HS DAC", "Line-In amp"
+};
+
+static const struct soc_enum twl6040_hs_enum[] = {
+	SOC_ENUM_SINGLE(TWL6040_REG_HSLCTL, 5, ARRAY_SIZE(twl6040_hs_texts),
+			twl6040_hs_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_HSRCTL, 5, ARRAY_SIZE(twl6040_hs_texts),
+			twl6040_hs_texts),
+};
+
+static const char *twl6040_hf_texts[] = {
+	"Off", "HF DAC", "Line-In amp"
+};
+
+static const struct soc_enum twl6040_hf_enum[] = {
+	SOC_ENUM_SINGLE(TWL6040_REG_HFLCTL, 2, ARRAY_SIZE(twl6040_hf_texts),
+			twl6040_hf_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_HFRCTL, 2, ARRAY_SIZE(twl6040_hf_texts),
+			twl6040_hf_texts),
 };
 
 static const struct snd_kcontrol_new amicl_control =
@@ -466,18 +1024,18 @@
 	SOC_DAPM_ENUM("Route", twl6040_enum[1]);
 
 /* Headset DAC playback switches */
-static const struct snd_kcontrol_new hsdacl_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSLCTL, 5, 1, 0);
+static const struct snd_kcontrol_new hsl_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hs_enum[0]);
 
-static const struct snd_kcontrol_new hsdacr_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSRCTL, 5, 1, 0);
+static const struct snd_kcontrol_new hsr_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hs_enum[1]);
 
 /* Handsfree DAC playback switches */
-static const struct snd_kcontrol_new hfdacl_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 2, 1, 0);
+static const struct snd_kcontrol_new hfl_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hf_enum[0]);
 
-static const struct snd_kcontrol_new hfdacr_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFRCTL, 2, 1, 0);
+static const struct snd_kcontrol_new hfr_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hf_enum[1]);
 
 static const struct snd_kcontrol_new ep_driver_switch_controls =
 	SOC_DAPM_SINGLE("Switch", TWL6040_REG_EARCTL, 0, 1, 0);
@@ -489,10 +1047,14 @@
 	SOC_DOUBLE_TLV("Capture Volume",
 		TWL6040_REG_MICGAIN, 0, 3, 4, 0, mic_amp_tlv),
 
+	/* AFM gains */
+	SOC_DOUBLE_TLV("Aux FM Volume",
+		TWL6040_REG_LINEGAIN, 0, 4, 0xF, 0, afm_amp_tlv),
+
 	/* Playback gains */
-	SOC_DOUBLE_TLV("Headset Playback Volume",
+	SOC_TWL6040_DOUBLE_TLV("Headset Playback Volume",
 		TWL6040_REG_HSGAIN, 0, 4, 0xF, 1, hs_tlv),
-	SOC_DOUBLE_R_TLV("Handsfree Playback Volume",
+	SOC_TWL6040_DOUBLE_R_TLV("Handsfree Playback Volume",
 		TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1, hf_tlv),
 	SOC_SINGLE_TLV("Earphone Playback Volume",
 		TWL6040_REG_EARCTL, 1, 0xF, 1, ep_tlv),
@@ -525,6 +1087,12 @@
 	SND_SOC_DAPM_PGA("MicAmpR",
 			TWL6040_REG_MICRCTL, 0, 0, NULL, 0),
 
+	/* Auxiliary FM PGAs */
+	SND_SOC_DAPM_PGA("AFMAmpL",
+			TWL6040_REG_MICLCTL, 1, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("AFMAmpR",
+			TWL6040_REG_MICRCTL, 1, 0, NULL, 0),
+
 	/* ADCs */
 	SND_SOC_DAPM_ADC("ADC Left", "Left Front Capture",
 			TWL6040_REG_MICLCTL, 2, 0),
@@ -559,29 +1127,33 @@
 			twl6040_power_mode_event,
 			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
-	/* Analog playback switches */
-	SND_SOC_DAPM_SWITCH("HSDAC Left Playback",
-			SND_SOC_NOPM, 0, 0, &hsdacl_switch_controls),
-	SND_SOC_DAPM_SWITCH("HSDAC Right Playback",
-			SND_SOC_NOPM, 0, 0, &hsdacr_switch_controls),
-	SND_SOC_DAPM_SWITCH("HFDAC Left Playback",
-			SND_SOC_NOPM, 0, 0, &hfdacl_switch_controls),
-	SND_SOC_DAPM_SWITCH("HFDAC Right Playback",
-			SND_SOC_NOPM, 0, 0, &hfdacr_switch_controls),
+	SND_SOC_DAPM_MUX("HF Left Playback",
+			SND_SOC_NOPM, 0, 0, &hfl_mux_controls),
+	SND_SOC_DAPM_MUX("HF Right Playback",
+			SND_SOC_NOPM, 0, 0, &hfr_mux_controls),
+	/* Analog playback Muxes */
+	SND_SOC_DAPM_MUX("HS Left Playback",
+			SND_SOC_NOPM, 0, 0, &hsl_mux_controls),
+	SND_SOC_DAPM_MUX("HS Right Playback",
+			SND_SOC_NOPM, 0, 0, &hsr_mux_controls),
 
 	/* Analog playback drivers */
-	SND_SOC_DAPM_PGA_E("Handsfree Left Driver",
+	SND_SOC_DAPM_OUT_DRV_E("Handsfree Left Driver",
 			TWL6040_REG_HFLCTL, 4, 0, NULL, 0,
-			twl6040_power_mode_event,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("Handsfree Right Driver",
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Handsfree Right Driver",
 			TWL6040_REG_HFRCTL, 4, 0, NULL, 0,
-			twl6040_power_mode_event,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA("Headset Left Driver",
-			TWL6040_REG_HSLCTL, 2, 0, NULL, 0),
-	SND_SOC_DAPM_PGA("Headset Right Driver",
-			TWL6040_REG_HSRCTL, 2, 0, NULL, 0),
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Headset Left Driver",
+			TWL6040_REG_HSLCTL, 2, 0, NULL, 0,
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Headset Right Driver",
+			TWL6040_REG_HSRCTL, 2, 0, NULL, 0,
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 	SND_SOC_DAPM_SWITCH_E("Earphone Driver",
 			SND_SOC_NOPM, 0, 0, &ep_driver_switch_controls,
 			twl6040_power_mode_event,
@@ -611,12 +1183,18 @@
 	{"ADC Left", NULL, "MicAmpL"},
 	{"ADC Right", NULL, "MicAmpR"},
 
-	/* Headset playback path */
-	{"HSDAC Left Playback", "Switch", "HSDAC Left"},
-	{"HSDAC Right Playback", "Switch", "HSDAC Right"},
+	/* AFM path */
+	{"AFMAmpL", "NULL", "AFML"},
+	{"AFMAmpR", "NULL", "AFMR"},
 
-	{"Headset Left Driver", NULL, "HSDAC Left Playback"},
-	{"Headset Right Driver", NULL, "HSDAC Right Playback"},
+	{"HS Left Playback", "HS DAC", "HSDAC Left"},
+	{"HS Left Playback", "Line-In amp", "AFMAmpL"},
+
+	{"HS Right Playback", "HS DAC", "HSDAC Right"},
+	{"HS Right Playback", "Line-In amp", "AFMAmpR"},
+
+	{"Headset Left Driver", "NULL", "HS Left Playback"},
+	{"Headset Right Driver", "NULL", "HS Right Playback"},
 
 	{"HSOL", NULL, "Headset Left Driver"},
 	{"HSOR", NULL, "Headset Right Driver"},
@@ -625,12 +1203,14 @@
 	{"Earphone Driver", "Switch", "HSDAC Left"},
 	{"EP", NULL, "Earphone Driver"},
 
-	/* Handsfree playback path */
-	{"HFDAC Left Playback", "Switch", "HFDAC Left"},
-	{"HFDAC Right Playback", "Switch", "HFDAC Right"},
+	{"HF Left Playback", "HF DAC", "HFDAC Left"},
+	{"HF Left Playback", "Line-In amp", "AFMAmpL"},
 
-	{"HFDAC Left PGA", NULL, "HFDAC Left Playback"},
-	{"HFDAC Right PGA", NULL, "HFDAC Right Playback"},
+	{"HF Right Playback", "HF DAC", "HFDAC Right"},
+	{"HF Right Playback", "Line-In amp", "AFMAmpR"},
+
+	{"HFDAC Left PGA", NULL, "HF Left Playback"},
+	{"HFDAC Right PGA", NULL, "HF Right Playback"},
 
 	{"Handsfree Left Driver", "Switch", "HFDAC Left PGA"},
 	{"Handsfree Right Driver", "Switch", "HFDAC Right PGA"},
@@ -641,12 +1221,12 @@
 
 static int twl6040_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, twl6040_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, twl6040_dapm_widgets,
 				 ARRAY_SIZE(twl6040_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
-
-	snd_soc_dapm_new_widgets(codec);
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_widgets(dapm);
 
 	return 0;
 }
@@ -659,10 +1239,10 @@
 	u8 intid;
 
 	time_left = wait_for_completion_timeout(&priv->ready,
-				msecs_to_jiffies(48));
+				msecs_to_jiffies(144));
 
 	if (!time_left) {
-		twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid,
+		twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid,
 							TWL6040_REG_INTID);
 		if (!(intid & TWL6040_READYINT)) {
 			dev_err(codec->dev, "timeout waiting for READYINT\n");
@@ -713,6 +1293,15 @@
 
 		/* initialize vdd/vss registers with reg_cache */
 		twl6040_init_vdd_regs(codec);
+
+		/* Set external boost GPO */
+		twl6040_write(codec, TWL6040_REG_GPOCTL, 0x02);
+
+		/* Set initial minimal gain values */
+		twl6040_write(codec, TWL6040_REG_HSGAIN, 0xFF);
+		twl6040_write(codec, TWL6040_REG_EARCTL, 0x1E);
+		twl6040_write(codec, TWL6040_REG_HFLGAIN, 0x1D);
+		twl6040_write(codec, TWL6040_REG_HFRGAIN, 0x1D);
 		break;
 	case SND_SOC_BIAS_OFF:
 		if (!priv->codec_powered)
@@ -739,7 +1328,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -772,23 +1361,6 @@
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 
-	if (!priv->sysclk) {
-		dev_err(codec->dev,
-			"no mclk configured, call set_sysclk() on init\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * capture is not supported at 17.64 MHz,
-	 * it's reserved for headset low-power playback scenario
-	 */
-	if ((priv->sysclk == 17640000) && substream->stream) {
-		dev_err(codec->dev,
-			"capture mode is not supported at %dHz\n",
-			priv->sysclk);
-		return -EINVAL;
-	}
-
 	snd_pcm_hw_constraint_list(substream->runtime, 0,
 				SNDRV_PCM_HW_PARAM_RATE,
 				priv->sysclk_constraints);
@@ -814,10 +1386,17 @@
 
 	rate = params_rate(params);
 	switch (rate) {
+	case 11250:
+	case 22500:
+	case 44100:
 	case 88200:
 		lppllctl |= TWL6040_LPLLFIN;
 		priv->sysclk = 17640000;
 		break;
+	case 8000:
+	case 16000:
+	case 32000:
+	case 48000:
 	case 96000:
 		lppllctl &= ~TWL6040_LPLLFIN;
 		priv->sysclk = 19200000;
@@ -832,31 +1411,37 @@
 	return 0;
 }
 
-static int twl6040_trigger(struct snd_pcm_substream *substream,
-			int cmd, struct snd_soc_dai *dai)
+static int twl6040_prepare(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-		/*
-		 * low-power playback mode is restricted
-		 * for headset path only
-		 */
-		if ((priv->sysclk == 17640000) && priv->non_lp) {
+	if (!priv->sysclk) {
+		dev_err(codec->dev,
+			"no mclk configured, call set_sysclk() on init\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * capture is not supported at 17.64 MHz,
+	 * it's reserved for headset low-power playback scenario
+	 */
+	if ((priv->sysclk == 17640000) &&
+			substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		dev_err(codec->dev,
+			"capture mode is not supported at %dHz\n",
+			priv->sysclk);
+		return -EINVAL;
+	}
+
+	if ((priv->sysclk == 17640000) && priv->non_lp) {
 			dev_err(codec->dev,
 				"some enabled paths aren't supported at %dHz\n",
 				priv->sysclk);
 			return -EPERM;
-		}
-		break;
-	default:
-		break;
 	}
-
 	return 0;
 }
 
@@ -970,7 +1555,7 @@
 static struct snd_soc_dai_ops twl6040_dai_ops = {
 	.startup	= twl6040_startup,
 	.hw_params	= twl6040_hw_params,
-	.trigger	= twl6040_trigger,
+	.prepare	= twl6040_prepare,
 	.set_sysclk	= twl6040_set_dai_sysclk,
 };
 
@@ -1004,6 +1589,7 @@
 static int twl6040_resume(struct snd_soc_codec *codec)
 {
 	twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	twl6040_set_bias_level(codec, codec->dapm.suspend_bias_level);
 
 	return 0;
 }
@@ -1018,24 +1604,41 @@
 	struct twl6040_data *priv;
 	int audpwron, naudint;
 	int ret = 0;
+	u8 icrev, intmr = TWL6040_ALLINT_MSK;
 
 	priv = kzalloc(sizeof(struct twl6040_data), GFP_KERNEL);
 	if (priv == NULL)
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, priv);
 
-	if (twl_codec) {
+	priv->codec = codec;
+
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &icrev, TWL6040_REG_ASICREV);
+
+	if (twl_codec && (icrev > 0))
 		audpwron = twl_codec->audpwron_gpio;
-		naudint = twl_codec->naudint_irq;
-	} else {
+	else
 		audpwron = -EINVAL;
+
+	if (twl_codec)
+		naudint = twl_codec->naudint_irq;
+	else
 		naudint = 0;
-	}
 
 	priv->audpwron = audpwron;
 	priv->naudint = naudint;
+	priv->workqueue = create_singlethread_workqueue("twl6040-codec");
+
+	if (!priv->workqueue)
+		goto work_err;
+
+	INIT_DELAYED_WORK(&priv->delayed_work, twl6040_accessory_work);
+
+	mutex_init(&priv->mutex);
 
 	init_completion(&priv->ready);
+	init_completion(&priv->headset.ramp_done);
+	init_completion(&priv->handsfree.ramp_done);
 
 	if (gpio_is_valid(audpwron)) {
 		ret = gpio_request(audpwron, "audpwron");
@@ -1047,7 +1650,14 @@
 			goto gpio2_err;
 
 		priv->codec_powered = 0;
+
+		/* enable only codec ready interrupt */
+		intmr &= ~(TWL6040_READYMSK | TWL6040_PLUGMSK);
+
+		/* reset interrupt status to allow correct power up sequence */
+		twl6040_read_reg_volatile(codec, TWL6040_REG_INTID);
 	}
+	twl6040_write(codec, TWL6040_REG_INTMR, intmr);
 
 	if (naudint) {
 		/* audio interrupt */
@@ -1057,25 +1667,29 @@
 				"twl6040_codec", codec);
 		if (ret)
 			goto gpio2_err;
-	} else {
-		if (gpio_is_valid(audpwron)) {
-			/* enable only codec ready interrupt */
-			twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
-					~TWL6040_READYMSK & TWL6040_ALLINT_MSK);
-		} else {
-			/* no interrupts at all */
-			twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
-						TWL6040_ALLINT_MSK);
-		}
 	}
 
 	/* init vio registers */
 	twl6040_init_vio_regs(codec);
 
+	priv->hf_workqueue = create_singlethread_workqueue("twl6040-hf");
+	if (priv->hf_workqueue == NULL) {
+		ret = -ENOMEM;
+		goto irq_err;
+	}
+	priv->hs_workqueue = create_singlethread_workqueue("twl6040-hs");
+	if (priv->hs_workqueue == NULL) {
+		ret = -ENOMEM;
+		goto wq_err;
+	}
+
+	INIT_DELAYED_WORK(&priv->hs_delayed_work, twl6040_pga_hs_work);
+	INIT_DELAYED_WORK(&priv->hf_delayed_work, twl6040_pga_hf_work);
+
 	/* power on device */
 	ret = twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 	if (ret)
-		goto irq_err;
+		goto bias_err;
 
 	snd_soc_add_controls(codec, twl6040_snd_controls,
 				ARRAY_SIZE(twl6040_snd_controls));
@@ -1083,6 +1697,10 @@
 
 	return 0;
 
+bias_err:
+	destroy_workqueue(priv->hs_workqueue);
+wq_err:
+	destroy_workqueue(priv->hf_workqueue);
 irq_err:
 	if (naudint)
 		free_irq(naudint, codec);
@@ -1090,6 +1708,8 @@
 	if (gpio_is_valid(audpwron))
 		gpio_free(audpwron);
 gpio1_err:
+	destroy_workqueue(priv->workqueue);
+work_err:
 	kfree(priv);
 	return ret;
 }
@@ -1108,6 +1728,9 @@
 	if (naudint)
 		free_irq(naudint, codec);
 
+	destroy_workqueue(priv->workqueue);
+	destroy_workqueue(priv->hf_workqueue);
+	destroy_workqueue(priv->hs_workqueue);
 	kfree(priv);
 
 	return 0;
diff --git a/sound/soc/codecs/twl6040.h b/sound/soc/codecs/twl6040.h
index f7c77fa..23aeed0 100644
--- a/sound/soc/codecs/twl6040.h
+++ b/sound/soc/codecs/twl6040.h
@@ -79,6 +79,7 @@
 
 /* INTMR (0x04) fields */
 
+#define TWL6040_PLUGMSK			0x02
 #define TWL6040_READYMSK		0x40
 #define TWL6040_ALLINT_MSK		0x7B
 
@@ -135,4 +136,11 @@
 #define TWL6040_HPPLL_ID		1
 #define TWL6040_LPPLL_ID		2
 
+/* STATUS (0x2E) fields */
+
+#define TWL6040_PLUGCOMP		0x02
+
+void twl6040_hs_jack_detect(struct snd_soc_codec *codec,
+			    struct snd_soc_jack *jack, int report);
+
 #endif /* End of __TWL6040_H__ */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 464f0cf..e76847a 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -19,7 +19,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include <sound/uda134x.h>
@@ -389,7 +388,7 @@
 			pd->power(0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 0c6c725..c5ca8cf 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -27,7 +27,6 @@
 #include <sound/control.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/uda1380.h>
 
@@ -36,7 +35,6 @@
 /* codec private data */
 struct uda1380_priv {
 	struct snd_soc_codec *codec;
-	u16 reg_cache[UDA1380_CACHEREGNUM];
 	unsigned int dac_clk;
 	struct work_struct work;
 	void *control_data;
@@ -414,10 +412,11 @@
 
 static int uda1380_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
-				  ARRAY_SIZE(uda1380_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
+				  ARRAY_SIZE(uda1380_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -603,7 +602,7 @@
 	int reg;
 	struct uda1380_platform_data *pdata = codec->dev->platform_data;
 
-	if (codec->bias_level == level)
+	if (codec->dapm.bias_level == level)
 		return 0;
 
 	switch (level) {
@@ -613,7 +612,7 @@
 		uda1380_write(codec, UDA1380_PM, R02_PON_BIAS | pm);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			if (gpio_is_valid(pdata->gpio_power)) {
 				gpio_set_value(pdata->gpio_power, 1);
 				mdelay(1);
@@ -636,7 +635,7 @@
 		for (reg = UDA1380_MVOL; reg < UDA1380_CACHEREGNUM; reg++)
 			set_bit(reg - 0x10, &uda1380_cache_dirty);
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 0c47c78..d3ffa2f 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -25,8 +25,7 @@
 #include <linux/slab.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dai.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 
 #include "wl1273.h"
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 4bcd168..80ddf4f 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -36,7 +36,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -705,6 +704,7 @@
 /* Called from the machine driver */
 int wm2000_add_controls(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	if (!wm2000_i2c) {
@@ -712,12 +712,12 @@
 		return -ENODEV;
 	}
 
-	ret = snd_soc_dapm_new_controls(codec, wm2000_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, wm2000_dapm_widgets,
 					ARRAY_SIZE(wm2000_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret < 0)
 		return ret;
 
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 7611add..6d6dc9e 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -24,9 +24,9 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
+#include <trace/events/asoc.h>
 
 #include "wm8350.h"
 
@@ -54,6 +54,7 @@
 
 struct wm8350_jack_data {
 	struct snd_soc_jack *jack;
+	struct delayed_work work;
 	int report;
 	int short_report;
 };
@@ -230,8 +231,9 @@
  */
 static void wm8350_pga_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-	    container_of(work, struct snd_soc_codec, delayed_work.work);
+	struct snd_soc_dapm_context *dapm =
+	    container_of(work, struct snd_soc_dapm_context, delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
 	struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
 	struct wm8350_output *out1 = &wm8350_data->out1,
 	    *out2 = &wm8350_data->out2;
@@ -302,8 +304,8 @@
 		out->ramp = WM8350_RAMP_UP;
 		out->active = 1;
 
-		if (!delayed_work_pending(&codec->delayed_work))
-			schedule_delayed_work(&codec->delayed_work,
+		if (!delayed_work_pending(&codec->dapm.delayed_work))
+			schedule_delayed_work(&codec->dapm.delayed_work,
 					      msecs_to_jiffies(1));
 		break;
 
@@ -311,8 +313,8 @@
 		out->ramp = WM8350_RAMP_DOWN;
 		out->active = 0;
 
-		if (!delayed_work_pending(&codec->delayed_work))
-			schedule_delayed_work(&codec->delayed_work,
+		if (!delayed_work_pending(&codec->dapm.delayed_work))
+			schedule_delayed_work(&codec->dapm.delayed_work,
 					      msecs_to_jiffies(1));
 		break;
 	}
@@ -786,9 +788,10 @@
 
 static int wm8350_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	ret = snd_soc_dapm_new_controls(codec,
+	ret = snd_soc_dapm_new_controls(dapm,
 					wm8350_dapm_widgets,
 					ARRAY_SIZE(wm8350_dapm_widgets));
 	if (ret != 0) {
@@ -797,7 +800,7 @@
 	}
 
 	/* set up audio paths */
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret != 0) {
 		dev_err(codec->dev, "DAPM route register failed\n");
 		return ret;
@@ -1184,7 +1187,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
 						    priv->supplies);
 			if (ret != 0)
@@ -1317,7 +1320,7 @@
 				       priv->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1334,37 +1337,13 @@
 	return 0;
 }
 
-static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
+static void wm8350_hp_work(struct wm8350_data *priv,
+			   struct wm8350_jack_data *jack,
+			   u16 mask)
 {
-	struct wm8350_data *priv = data;
 	struct wm8350 *wm8350 = priv->codec.control_data;
 	u16 reg;
 	int report;
-	int mask;
-	struct wm8350_jack_data *jack = NULL;
-
-	switch (irq - wm8350->irq_base) {
-	case WM8350_IRQ_CODEC_JCK_DET_L:
-		jack = &priv->hpl;
-		mask = WM8350_JACK_L_LVL;
-		break;
-
-	case WM8350_IRQ_CODEC_JCK_DET_R:
-		jack = &priv->hpr;
-		mask = WM8350_JACK_R_LVL;
-		break;
-
-	default:
-		BUG();
-	}
-
-	if (!jack->jack) {
-		dev_warn(wm8350->dev, "Jack interrupt called with no jack\n");
-		return IRQ_NONE;
-	}
-
-	/* Debounce */
-	msleep(200);
 
 	reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS);
 	if (reg & mask)
@@ -1374,6 +1353,54 @@
 
 	snd_soc_jack_report(jack->jack, report, jack->report);
 
+}
+
+static void wm8350_hpl_work(struct work_struct *work)
+{
+	struct wm8350_data *priv =
+	    container_of(work, struct wm8350_data, hpl.work.work);
+
+	wm8350_hp_work(priv, &priv->hpl, WM8350_JACK_L_LVL);
+}
+
+static void wm8350_hpr_work(struct work_struct *work)
+{
+	struct wm8350_data *priv =
+	    container_of(work, struct wm8350_data, hpr.work.work);
+	
+	wm8350_hp_work(priv, &priv->hpr, WM8350_JACK_R_LVL);
+}
+
+static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
+{
+	struct wm8350_data *priv = data;
+	struct wm8350 *wm8350 = priv->codec.control_data;
+	struct wm8350_jack_data *jack = NULL;
+
+	switch (irq - wm8350->irq_base) {
+	case WM8350_IRQ_CODEC_JCK_DET_L:
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+		trace_snd_soc_jack_irq("WM8350 HPL");
+#endif
+		jack = &priv->hpl;
+		break;
+
+	case WM8350_IRQ_CODEC_JCK_DET_R:
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+		trace_snd_soc_jack_irq("WM8350 HPR");
+#endif
+		jack = &priv->hpr;
+		break;
+
+	default:
+		BUG();
+	}
+
+	if (device_may_wakeup(wm8350->dev))
+		pm_wakeup_event(wm8350->dev, 250);
+
+	schedule_delayed_work(&jack->work, 200);
+
 	return IRQ_HANDLED;
 }
 
@@ -1436,6 +1463,10 @@
 	u16 reg;
 	int report = 0;
 
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+	trace_snd_soc_jack_irq("WM8350 mic");
+#endif
+
 	reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS);
 	if (reg & WM8350_JACK_MICSCD_LVL)
 		report |= priv->mic.short_report;
@@ -1550,7 +1581,9 @@
 	/* Put the codec into reset if it wasn't already */
 	wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8350_pga_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8350_pga_work);
+	INIT_DELAYED_WORK(&priv->hpl.work, wm8350_hpl_work);
+	INIT_DELAYED_WORK(&priv->hpr.work, wm8350_hpr_work);
 
 	/* Enable the codec */
 	wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
@@ -1626,7 +1659,6 @@
 {
 	struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
 	struct wm8350 *wm8350 = dev_get_platdata(codec->dev);
-	int ret;
 
 	wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
 			  WM8350_JDL_ENA | WM8350_JDR_ENA);
@@ -1641,15 +1673,12 @@
 	priv->hpr.jack = NULL;
 	priv->mic.jack = NULL;
 
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(&codec->delayed_work);
+	cancel_delayed_work_sync(&priv->hpl.work);
+	cancel_delayed_work_sync(&priv->hpr.work);
 
 	/* if there was any work waiting then we run it now and
 	 * wait for its completion */
-	if (ret) {
-		schedule_delayed_work(&codec->delayed_work, 0);
-		flush_scheduled_work();
-	}
+	flush_delayed_work_sync(&codec->dapm.delayed_work);
 
 	wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 8502997..3c3bc07 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -911,10 +910,11 @@
 
 static int wm8400_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8400_dapm_widgets,
-				  ARRAY_SIZE(wm8400_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8400_dapm_widgets,
+				  ARRAY_SIZE(wm8400_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1219,7 +1219,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(power),
 						    &power[0]);
 			if (ret != 0) {
@@ -1306,7 +1306,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 8f10709..db0dced 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8510.h"
@@ -216,10 +215,11 @@
 
 static int wm8510_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8510_dapm_widgets,
-				  ARRAY_SIZE(wm8510_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8510_dapm_widgets,
+				  ARRAY_SIZE(wm8510_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -478,7 +478,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		power1 |= WM8510_POWER1_BIASEN | WM8510_POWER1_BUFIOEN;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8510_POWER1, power1 | 0x3);
 			mdelay(100);
@@ -495,7 +495,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index deca79e..5eb2f50 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -109,10 +108,11 @@
 
 static int wm8523_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8523_dapm_widgets,
-				  ARRAY_SIZE(wm8523_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8523_dapm_widgets,
+				  ARRAY_SIZE(wm8523_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -327,7 +327,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
 						    wm8523->supplies);
 			if (ret != 0) {
@@ -366,7 +366,7 @@
 				       wm8523->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 8725d4e..8f6b5ee 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -31,7 +31,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <asm/div64.h>
@@ -191,7 +190,6 @@
 struct wm8580_priv {
 	enum snd_soc_control_type control_type;
 	struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
-	u16 reg_cache[WM8580_MAX_REGISTER + 1];
 	struct pll_state a;
 	struct pll_state b;
 	int sysclk[2];
@@ -302,10 +300,11 @@
 
 static int wm8580_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets,
-				  ARRAY_SIZE(wm8580_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets,
+				  ARRAY_SIZE(wm8580_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -507,13 +506,13 @@
 	}
 
 	/* Look up the SYSCLK ratio; accept only exact matches */
-	ratio = wm8580->sysclk[dai->id] / params_rate(params);
+	ratio = wm8580->sysclk[dai->driver->id] / params_rate(params);
 	for (i = 0; i < ARRAY_SIZE(wm8580_sysclk_ratios); i++)
 		if (ratio == wm8580_sysclk_ratios[i])
 			break;
 	if (i == ARRAY_SIZE(wm8580_sysclk_ratios)) {
 		dev_err(codec->dev, "Invalid clock ratio %d/%d\n",
-			wm8580->sysclk[dai->id], params_rate(params));
+			wm8580->sysclk[dai->driver->id], params_rate(params));
 		return -EINVAL;
 	}
 	paifa |= i;
@@ -716,7 +715,7 @@
 
 	switch (clk_id) {
 	case WM8580_CLKSRC_ADCMCLK:
-		if (dai->id != WM8580_DAI_PAIFTX)
+		if (dai->driver->id != WM8580_DAI_PAIFTX)
 			return -EINVAL;
 		sel = 0 << sel_shift;
 		break;
@@ -735,7 +734,7 @@
 	}
 
 	/* We really should validate PLL settings but not yet */
-	wm8580->sysclk[dai->id] = freq;
+	wm8580->sysclk[dai->driver->id] = freq;
 
 	return snd_soc_update_bits(codec, WM8580_CLKSEL, sel_mask, sel);
 }
@@ -767,7 +766,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Power up and get individual control of the DACs */
 			reg = snd_soc_read(codec, WM8580_PWRDN1);
 			reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD);
@@ -785,7 +784,7 @@
 		snd_soc_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -905,7 +904,7 @@
 	.set_bias_level = wm8580_set_bias_level,
 	.reg_cache_size = ARRAY_SIZE(wm8580_reg),
 	.reg_word_size = sizeof(u16),
-	.reg_cache_default = &wm8580_reg,
+	.reg_cache_default = wm8580_reg,
 };
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 54fbd76..97c3038 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 
@@ -34,7 +33,6 @@
 /* codec private data */
 struct wm8711_priv {
 	enum snd_soc_control_type bus_type;
-	u16 reg_cache[WM8711_CACHEREGNUM];
 	unsigned int sysclk;
 };
 
@@ -93,10 +91,11 @@
 
 static int wm8711_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8711_dapm_widgets,
-				  ARRAY_SIZE(wm8711_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8711_dapm_widgets,
+				  ARRAY_SIZE(wm8711_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -318,7 +317,7 @@
 		snd_soc_write(codec, WM8711_PWR, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 075f35e..736b035 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -73,10 +72,11 @@
 
 static int wm8728_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8728_dapm_widgets,
-				  ARRAY_SIZE(wm8728_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8728_dapm_widgets,
+				  ARRAY_SIZE(wm8728_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -180,7 +180,7 @@
 	case SND_SOC_BIAS_ON:
 	case SND_SOC_BIAS_PREPARE:
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Power everything up... */
 			reg = snd_soc_read(codec, WM8728_DACCTL);
 			snd_soc_write(codec, WM8728_DACCTL, reg & ~0x4);
@@ -197,7 +197,7 @@
 		snd_soc_write(codec, WM8728_DACCTL, reg | 0x4);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index e725c09..0a67c31 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -44,9 +43,10 @@
 struct wm8731_priv {
 	enum snd_soc_control_type control_type;
 	struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES];
-	u16 reg_cache[WM8731_CACHEREGNUM];
 	unsigned int sysclk;
 	int sysclk_type;
+	int playback_fs;
+	bool deemph;
 };
 
 
@@ -65,16 +65,79 @@
 #define wm8731_reset(c)	snd_soc_write(c, WM8731_RESET, 0)
 
 static const char *wm8731_input_select[] = {"Line In", "Mic"};
-static const char *wm8731_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
 
-static const struct soc_enum wm8731_enum[] = {
-	SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select),
-	SOC_ENUM_SINGLE(WM8731_APDIGI, 1, 4, wm8731_deemph),
-};
+static const struct soc_enum wm8731_insel_enum =
+	SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select);
+
+static int wm8731_deemph[] = { 0, 32000, 44100, 48000 };
+
+static int wm8731_set_deemph(struct snd_soc_codec *codec)
+{
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+	int val, i, best;
+
+	/* If we're using deemphasis select the nearest available sample
+	 * rate.
+	 */
+	if (wm8731->deemph) {
+		best = 1;
+		for (i = 2; i < ARRAY_SIZE(wm8731_deemph); i++) {
+			if (abs(wm8731_deemph[i] - wm8731->playback_fs) <
+			    abs(wm8731_deemph[best] - wm8731->playback_fs))
+				best = i;
+		}
+
+		val = best << 1;
+	} else {
+		best = 0;
+		val = 0;
+	}
+
+	dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n",
+		best, wm8731_deemph[best]);
+
+	return snd_soc_update_bits(codec, WM8731_APDIGI, 0x6, val);
+}
+
+static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8731->deemph;
+
+	return 0;
+}
+
+static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+	int deemph = ucontrol->value.enumerated.item[0];
+	int ret = 0;
+
+	if (deemph > 1)
+		return -EINVAL;
+
+	mutex_lock(&codec->mutex);
+	if (wm8731->deemph != deemph) {
+		wm8731->deemph = deemph;
+
+		wm8731_set_deemph(codec);
+
+		ret = 1;
+	}
+	mutex_unlock(&codec->mutex);
+
+	return ret;
+}
 
 static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 2000, 0);
 
 static const struct snd_kcontrol_new wm8731_snd_controls[] = {
 
@@ -87,7 +150,7 @@
 		 in_tlv),
 SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1),
 
-SOC_SINGLE("Mic Boost (+20dB)", WM8731_APANA, 0, 1, 0),
+SOC_SINGLE_TLV("Mic Boost Volume", WM8731_APANA, 0, 1, 0, mic_tlv),
 SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1),
 
 SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1,
@@ -96,7 +159,8 @@
 SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1),
 SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0),
 
-SOC_ENUM("Playback De-emphasis", wm8731_enum[1]),
+SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0,
+		    wm8731_get_deemph, wm8731_put_deemph),
 };
 
 /* Output Mixer */
@@ -108,7 +172,7 @@
 
 /* Input mux */
 static const struct snd_kcontrol_new wm8731_input_mux_controls =
-SOC_DAPM_ENUM("Input Select", wm8731_enum[0]);
+SOC_DAPM_ENUM("Input Select", wm8731_insel_enum);
 
 static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0),
@@ -165,10 +229,11 @@
 
 static int wm8731_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
-				  ARRAY_SIZE(wm8731_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
+				  ARRAY_SIZE(wm8731_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -239,6 +304,8 @@
 	u16 srate = (coeff_div[i].sr << 2) |
 		(coeff_div[i].bosr << 1) | coeff_div[i].usb;
 
+	wm8731->playback_fs = params_rate(params);
+
 	snd_soc_write(codec, WM8731_SRATE, srate);
 
 	/* bit size */
@@ -253,6 +320,8 @@
 		break;
 	}
 
+	wm8731_set_deemph(codec);
+
 	snd_soc_write(codec, WM8731_IFACE, iface);
 	return 0;
 }
@@ -319,7 +388,7 @@
 		return -EINVAL;
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	return 0;
 }
@@ -399,7 +468,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
 						    wm8731->supplies);
 			if (ret != 0)
@@ -428,7 +497,7 @@
 				       wm8731->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -542,7 +611,6 @@
 err_regulator_get:
 	regulator_bulk_free(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
 
-	kfree(wm8731);
 	return ret;
 }
 
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
new file mode 100644
index 0000000..30c67d0
--- /dev/null
+++ b/sound/soc/codecs/wm8737.c
@@ -0,0 +1,754 @@
+/*
+ * wm8737.c  --  WM8737 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8737.h"
+
+#define WM8737_NUM_SUPPLIES 4
+static const char *wm8737_supply_names[WM8737_NUM_SUPPLIES] = {
+	"DCVDD",
+	"DBVDD",
+	"AVDD",
+	"MVDD",
+};
+
+/* codec private data */
+struct wm8737_priv {
+	enum snd_soc_control_type control_type;
+	struct regulator_bulk_data supplies[WM8737_NUM_SUPPLIES];
+	unsigned int mclk;
+};
+
+static const u16 wm8737_reg[WM8737_REGISTER_COUNT] = {
+	0x00C3,     /* R0  - Left PGA volume */
+	0x00C3,     /* R1  - Right PGA volume */
+	0x0007,     /* R2  - AUDIO path L */
+	0x0007,     /* R3  - AUDIO path R */
+	0x0000,     /* R4  - 3D Enhance */
+	0x0000,     /* R5  - ADC Control */
+	0x0000,     /* R6  - Power Management */
+	0x000A,     /* R7  - Audio Format */
+	0x0000,     /* R8  - Clocking */
+	0x000F,     /* R9  - MIC Preamp Control */
+	0x0003,     /* R10 - Misc Bias Control */
+	0x0000,     /* R11 - Noise Gate */
+	0x007C,     /* R12 - ALC1 */
+	0x0000,     /* R13 - ALC2 */
+	0x0032,     /* R14 - ALC3 */
+};
+
+static int wm8737_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, WM8737_RESET, 0);
+}
+
+static const unsigned int micboost_tlv[] = {
+	TLV_DB_RANGE_HEAD(4),
+	0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
+	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
+	2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
+	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
+static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_max_tlv, -1200, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_target_tlv, -1800, 100, 0);
+
+static const char *micbias_enum_text[] = {
+	"25%",
+	"50%",
+	"75%",
+	"100%",
+};
+
+static const struct soc_enum micbias_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 0, 4, micbias_enum_text);
+
+static const char *low_cutoff_text[] = {
+	"Low", "High"
+};
+
+static const struct soc_enum low_3d =
+	SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 6, 2, low_cutoff_text);
+
+static const char *high_cutoff_text[] = {
+	"High", "Low"
+};
+
+static const struct soc_enum high_3d =
+	SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 5, 2, high_cutoff_text);
+
+static const char *alc_fn_text[] = {
+	"Disabled", "Right", "Left", "Stereo"
+};
+
+static const struct soc_enum alc_fn =
+	SOC_ENUM_SINGLE(WM8737_ALC1, 7, 4, alc_fn_text);
+
+static const char *alc_hold_text[] = {
+	"0", "2.67ms", "5.33ms", "10.66ms", "21.32ms", "42.64ms", "85.28ms",
+	"170.56ms", "341.12ms", "682.24ms", "1.364s", "2.728s", "5.458s",
+	"10.916s", "21.832s", "43.691s"
+};
+
+static const struct soc_enum alc_hold =
+	SOC_ENUM_SINGLE(WM8737_ALC2, 0, 16, alc_hold_text);
+
+static const char *alc_atk_text[] = {
+	"8.4ms", "16.8ms", "33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms",
+	"1.075s", "2.15s", "4.3s", "8.6s"
+};
+
+static const struct soc_enum alc_atk =
+	SOC_ENUM_SINGLE(WM8737_ALC3, 0, 11, alc_atk_text);
+
+static const char *alc_dcy_text[] = {
+	"33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms", "1.075s", "2.15s",
+	"4.3s", "8.6s", "17.2s", "34.41s"
+};
+
+static const struct soc_enum alc_dcy =
+	SOC_ENUM_SINGLE(WM8737_ALC3, 4, 11, alc_dcy_text);
+
+static const struct snd_kcontrol_new wm8737_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Mic Boost Volume", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+		 6, 3, 0, micboost_tlv),
+SOC_DOUBLE_R("Mic Boost Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	     4, 1, 0),
+SOC_DOUBLE("Mic ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	   3, 1, 0),
+
+SOC_DOUBLE_R_TLV("Capture Volume", WM8737_LEFT_PGA_VOLUME,
+		 WM8737_RIGHT_PGA_VOLUME, 0, 255, 0, pga_tlv),
+SOC_DOUBLE("Capture ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	   2, 1, 0),
+
+SOC_DOUBLE("INPUT1 DC Bias Switch", WM8737_MISC_BIAS_CONTROL, 0, 1, 1, 0),
+
+SOC_ENUM("Mic PGA Bias", micbias_enum),
+SOC_SINGLE("ADC Low Power Switch", WM8737_ADC_CONTROL, 2, 1, 0),
+SOC_SINGLE("High Pass Filter Switch", WM8737_ADC_CONTROL, 0, 1, 1),
+SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0),
+
+SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0),
+SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0),
+SOC_ENUM("3D Low Cut-off", low_3d),
+SOC_ENUM("3D High Cut-off", low_3d),
+SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv),
+
+SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0),
+SOC_SINGLE_TLV("Noise Gate Threshold Volume", WM8737_NOISE_GATE, 2, 7, 0,
+	       ng_tlv),
+
+SOC_ENUM("ALC", alc_fn),
+SOC_SINGLE_TLV("ALC Max Gain Volume", WM8737_ALC1, 4, 7, 0, alc_max_tlv),
+SOC_SINGLE_TLV("ALC Target Volume", WM8737_ALC1, 0, 15, 0, alc_target_tlv),
+SOC_ENUM("ALC Hold Time", alc_hold),
+SOC_SINGLE("ALC ZC Switch", WM8737_ALC2, 4, 1, 0),
+SOC_ENUM("ALC Attack Time", alc_atk),
+SOC_ENUM("ALC Decay Time", alc_dcy),
+};
+
+static const char *linsel_text[] = {
+	"LINPUT1", "LINPUT2", "LINPUT3", "LINPUT1 DC",
+};
+
+static const struct soc_enum linsel_enum =
+	SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_L, 7, 4, linsel_text);
+
+static const struct snd_kcontrol_new linsel_mux =
+	SOC_DAPM_ENUM("LINSEL", linsel_enum);
+
+
+static const char *rinsel_text[] = {
+	"RINPUT1", "RINPUT2", "RINPUT3", "RINPUT1 DC",
+};
+
+static const struct soc_enum rinsel_enum =
+	SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_R, 7, 4, rinsel_text);
+
+static const struct snd_kcontrol_new rinsel_mux =
+	SOC_DAPM_ENUM("RINSEL", rinsel_enum);
+
+static const char *bypass_text[] = {
+	"Direct", "Preamp"
+};
+
+static const struct soc_enum lbypass_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 2, 2, bypass_text);
+
+static const struct snd_kcontrol_new lbypass_mux =
+	SOC_DAPM_ENUM("Left Bypass", lbypass_enum);
+
+
+static const struct soc_enum rbypass_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 3, 2, bypass_text);
+
+static const struct snd_kcontrol_new rbypass_mux =
+	SOC_DAPM_ENUM("Left Bypass", rbypass_enum);
+
+static const struct snd_soc_dapm_widget wm8737_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("LINPUT1"),
+SND_SOC_DAPM_INPUT("LINPUT2"),
+SND_SOC_DAPM_INPUT("LINPUT3"),
+SND_SOC_DAPM_INPUT("RINPUT1"),
+SND_SOC_DAPM_INPUT("RINPUT2"),
+SND_SOC_DAPM_INPUT("RINPUT3"),
+SND_SOC_DAPM_INPUT("LACIN"),
+SND_SOC_DAPM_INPUT("RACIN"),
+
+SND_SOC_DAPM_MUX("LINSEL", SND_SOC_NOPM, 0, 0, &linsel_mux),
+SND_SOC_DAPM_MUX("RINSEL", SND_SOC_NOPM, 0, 0, &rinsel_mux),
+
+SND_SOC_DAPM_MUX("Left Preamp Mux", SND_SOC_NOPM, 0, 0, &lbypass_mux),
+SND_SOC_DAPM_MUX("Right Preamp Mux", SND_SOC_NOPM, 0, 0, &rbypass_mux),
+
+SND_SOC_DAPM_PGA("PGAL", WM8737_POWER_MANAGEMENT, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("PGAR", WM8737_POWER_MANAGEMENT, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_DAC("ADCL", NULL, WM8737_POWER_MANAGEMENT, 3, 0),
+SND_SOC_DAPM_DAC("ADCR", NULL, WM8737_POWER_MANAGEMENT, 2, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF", "Capture", 0, WM8737_POWER_MANAGEMENT, 6, 0),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	{ "LINSEL", "LINPUT1", "LINPUT1" },
+	{ "LINSEL", "LINPUT2", "LINPUT2" },
+	{ "LINSEL", "LINPUT3", "LINPUT3" },
+	{ "LINSEL", "LINPUT1 DC", "LINPUT1" },
+
+	{ "RINSEL", "RINPUT1", "RINPUT1" },
+	{ "RINSEL", "RINPUT2", "RINPUT2" },
+	{ "RINSEL", "RINPUT3", "RINPUT3" },
+	{ "RINSEL", "RINPUT1 DC", "RINPUT1" },
+
+	{ "Left Preamp Mux", "Preamp", "LINSEL" },
+	{ "Left Preamp Mux", "Direct", "LACIN" },
+
+	{ "Right Preamp Mux", "Preamp", "RINSEL" },
+	{ "Right Preamp Mux", "Direct", "RACIN" },
+
+	{ "PGAL", NULL, "Left Preamp Mux" },
+	{ "PGAR", NULL, "Right Preamp Mux" },
+
+	{ "ADCL", NULL, "PGAL" },
+	{ "ADCR", NULL, "PGAR" },
+
+	{ "AIF", NULL, "ADCL" },
+	{ "AIF", NULL, "ADCR" },
+};
+
+static int wm8737_add_widgets(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm8737_dapm_widgets,
+				  ARRAY_SIZE(wm8737_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	return 0;
+}
+
+/* codec mclk clock divider coefficients */
+static const struct {
+	u32 mclk;
+	u32 rate;
+	u8 usb;
+	u8 sr;
+} coeff_div[] = {
+	{ 12288000,  8000, 0,  0x4 },
+	{ 12288000, 12000, 0,  0x8 },
+	{ 12288000, 16000, 0,  0xa },
+	{ 12288000, 24000, 0, 0x1c },
+	{ 12288000, 32000, 0,  0xc },
+	{ 12288000, 48000, 0,    0 },
+	{ 12288000, 96000, 0,  0xe },
+
+	{ 11289600,  8000, 0, 0x14 },
+	{ 11289600, 11025, 0, 0x18 },
+	{ 11289600, 22050, 0, 0x1a },
+	{ 11289600, 44100, 0, 0x10 },
+	{ 11289600, 88200, 0, 0x1e },
+
+	{ 18432000,  8000, 0,  0x5 },
+	{ 18432000, 12000, 0,  0x9 },
+	{ 18432000, 16000, 0,  0xb },
+	{ 18432000, 24000, 0, 0x1b },
+	{ 18432000, 32000, 0,  0xd },
+	{ 18432000, 48000, 0,  0x1 },
+	{ 18432000, 96000, 0, 0x1f },
+
+	{ 16934400,  8000, 0, 0x15 },
+	{ 16934400, 11025, 0, 0x19 },
+	{ 16934400, 22050, 0, 0x1b },
+	{ 16934400, 44100, 0, 0x11 },
+	{ 16934400, 88200, 0, 0x1f },
+
+	{ 12000000,  8000, 1,  0x4 },
+	{ 12000000, 11025, 1, 0x19 },
+	{ 12000000, 12000, 1,  0x8 },
+	{ 12000000, 16000, 1,  0xa },
+	{ 12000000, 22050, 1, 0x1b },
+	{ 12000000, 24000, 1, 0x1c },
+	{ 12000000, 32000, 1,  0xc },
+	{ 12000000, 44100, 1, 0x11 },
+	{ 12000000, 48000, 1,  0x0 },
+	{ 12000000, 88200, 1, 0x1f },
+	{ 12000000, 96000, 1,  0xe },
+};
+
+static int wm8737_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int i;
+	u16 clocking = 0;
+	u16 af = 0;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (coeff_div[i].rate != params_rate(params))
+			continue;
+
+		if (coeff_div[i].mclk == wm8737->mclk)
+			break;
+
+		if (coeff_div[i].mclk == wm8737->mclk * 2) {
+			clocking |= WM8737_CLKDIV2;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(coeff_div)) {
+		dev_err(codec->dev, "%dHz MCLK can't support %dHz\n",
+			wm8737->mclk, params_rate(params));
+		return -EINVAL;
+	}
+
+	clocking |= coeff_div[i].usb | (coeff_div[i].sr << WM8737_SR_SHIFT);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		af |= 0x8;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		af |= 0x10;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		af |= 0x18;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT, WM8737_WL_MASK, af);
+	snd_soc_update_bits(codec, WM8737_CLOCKING,
+			    WM8737_USB_MODE | WM8737_CLKDIV2 | WM8737_SR_MASK,
+			    clocking);
+
+	return 0;
+}
+
+static int wm8737_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+				 int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (freq == coeff_div[i].mclk ||
+		    freq == coeff_div[i].mclk * 2) {
+			wm8737->mclk = freq;
+			return 0;
+		}
+	}
+
+	dev_err(codec->dev, "MCLK rate %dHz not supported\n", freq);
+
+	return -EINVAL;
+}
+
+
+static int wm8737_set_dai_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u16 af = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		af |= WM8737_MS;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		af |= 0x2;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		af |= 0x1;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		af |= 0x3;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		af |= 0x13;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		af |= WM8737_LRP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT,
+			    WM8737_FORMAT_MASK | WM8737_LRP | WM8737_MS, af);
+
+	return 0;
+}
+
+static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+
+	case SND_SOC_BIAS_PREPARE:
+		/* VMID at 2*75k */
+		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+				    WM8737_VMIDSEL_MASK, 0);
+		break;
+
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
+						    wm8737->supplies);
+			if (ret != 0) {
+				dev_err(codec->dev,
+					"Failed to enable supplies: %d\n",
+					ret);
+				return ret;
+			}
+
+			snd_soc_cache_sync(codec);
+
+			/* Fast VMID ramp at 2*2.5k */
+			snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+					    WM8737_VMIDSEL_MASK, 0x4);
+
+			/* Bring VMID up */
+			snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+					    WM8737_VMID_MASK |
+					    WM8737_VREF_MASK,
+					    WM8737_VMID_MASK |
+					    WM8737_VREF_MASK);
+
+			msleep(500);
+		}
+
+		/* VMID at 2*300k */
+		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+				    WM8737_VMIDSEL_MASK, 2);
+
+		break;
+
+	case SND_SOC_BIAS_OFF:
+		snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+				    WM8737_VMID_MASK | WM8737_VREF_MASK, 0);
+
+		regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies),
+				       wm8737->supplies);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define WM8737_RATES SNDRV_PCM_RATE_8000_96000
+
+#define WM8737_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8737_dai_ops = {
+	.hw_params	= wm8737_hw_params,
+	.set_sysclk	= wm8737_set_dai_sysclk,
+	.set_fmt	= wm8737_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver wm8737_dai = {
+	.name = "wm8737",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,  /* Mono modes not yet supported */
+		.channels_max = 2,
+		.rates = WM8737_RATES,
+		.formats = WM8737_FORMATS,
+	},
+	.ops = &wm8737_dai_ops,
+};
+
+#ifdef CONFIG_PM
+static int wm8737_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8737_resume(struct snd_soc_codec *codec)
+{
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8737_suspend NULL
+#define wm8737_resume NULL
+#endif
+
+static int wm8737_probe(struct snd_soc_codec *codec)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int ret, i;
+
+	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8737->control_type);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(wm8737->supplies); i++)
+		wm8737->supplies[i].supply = wm8737_supply_names[i];
+
+	ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8737->supplies),
+				 wm8737->supplies);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
+				    wm8737->supplies);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		goto err_get;
+	}
+
+	ret = wm8737_reset(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset\n");
+		goto err_enable;
+	}
+
+	snd_soc_update_bits(codec, WM8737_LEFT_PGA_VOLUME, WM8737_LVU,
+			    WM8737_LVU);
+	snd_soc_update_bits(codec, WM8737_RIGHT_PGA_VOLUME, WM8737_RVU,
+			    WM8737_RVU);
+
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* Bias level configuration will have done an extra enable */
+	regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+
+	snd_soc_add_controls(codec, wm8737_snd_controls,
+			     ARRAY_SIZE(wm8737_snd_controls));
+	wm8737_add_widgets(codec);
+
+	return 0;
+
+err_enable:
+	regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+err_get:
+	regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+
+	return ret;
+}
+
+static int wm8737_remove(struct snd_soc_codec *codec)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
+	.probe		= wm8737_probe,
+	.remove		= wm8737_remove,
+	.suspend	= wm8737_suspend,
+	.resume		= wm8737_resume,
+	.set_bias_level = wm8737_set_bias_level,
+
+	.reg_cache_size = WM8737_REGISTER_COUNT - 1, /* Skip reset */
+	.reg_word_size	= sizeof(u16),
+	.reg_cache_default = wm8737_reg,
+};
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8737_i2c_probe(struct i2c_client *i2c,
+				      const struct i2c_device_id *id)
+{
+	struct wm8737_priv *wm8737;
+	int ret;
+
+	wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
+	if (wm8737 == NULL)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, wm8737);
+	wm8737->control_type = SND_SOC_I2C;
+
+	ret =  snd_soc_register_codec(&i2c->dev,
+				      &soc_codec_dev_wm8737, &wm8737_dai, 1);
+	if (ret < 0)
+		kfree(wm8737);
+	return ret;
+
+}
+
+static __devexit int wm8737_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	kfree(i2c_get_clientdata(client));
+	return 0;
+}
+
+static const struct i2c_device_id wm8737_i2c_id[] = {
+	{ "wm8737", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, wm8737_i2c_id);
+
+static struct i2c_driver wm8737_i2c_driver = {
+	.driver = {
+		.name = "wm8737",
+		.owner = THIS_MODULE,
+	},
+	.probe =    wm8737_i2c_probe,
+	.remove =   __devexit_p(wm8737_i2c_remove),
+	.id_table = wm8737_i2c_id,
+};
+#endif
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8737_spi_probe(struct spi_device *spi)
+{
+	struct wm8737_priv *wm8737;
+	int ret;
+
+	wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
+	if (wm8737 == NULL)
+		return -ENOMEM;
+
+	wm8737->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8737);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8737, &wm8737_dai, 1);
+	if (ret < 0)
+		kfree(wm8737);
+	return ret;
+}
+
+static int __devexit wm8737_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8737_spi_driver = {
+	.driver = {
+		.name	= "wm8737",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= wm8737_spi_probe,
+	.remove		= __devexit_p(wm8737_spi_remove),
+};
+#endif /* CONFIG_SPI_MASTER */
+
+static int __init wm8737_modinit(void)
+{
+	int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	ret = i2c_add_driver(&wm8737_i2c_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "Failed to register WM8737 I2C driver: %d\n",
+		       ret);
+	}
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8737_spi_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "Failed to register WM8737 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return 0;
+}
+module_init(wm8737_modinit);
+
+static void __exit wm8737_exit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8737_spi_driver);
+#endif
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_driver(&wm8737_i2c_driver);
+#endif
+}
+module_exit(wm8737_exit);
+
+MODULE_DESCRIPTION("ASoC WM8737 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8737.h b/sound/soc/codecs/wm8737.h
new file mode 100644
index 0000000..23d14c8
--- /dev/null
+++ b/sound/soc/codecs/wm8737.h
@@ -0,0 +1,322 @@
+#ifndef _WM8737_H
+#define _WM8737_H
+
+/*
+ * wm8737.c  --  WM8523 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Register values.
+ */
+#define WM8737_LEFT_PGA_VOLUME                  0x00
+#define WM8737_RIGHT_PGA_VOLUME                 0x01
+#define WM8737_AUDIO_PATH_L                     0x02
+#define WM8737_AUDIO_PATH_R                     0x03
+#define WM8737_3D_ENHANCE                       0x04
+#define WM8737_ADC_CONTROL                      0x05
+#define WM8737_POWER_MANAGEMENT                 0x06
+#define WM8737_AUDIO_FORMAT                     0x07
+#define WM8737_CLOCKING                         0x08
+#define WM8737_MIC_PREAMP_CONTROL               0x09
+#define WM8737_MISC_BIAS_CONTROL                0x0A
+#define WM8737_NOISE_GATE                       0x0B
+#define WM8737_ALC1                             0x0C
+#define WM8737_ALC2                             0x0D
+#define WM8737_ALC3                             0x0E
+#define WM8737_RESET                            0x0F
+
+#define WM8737_REGISTER_COUNT                   16
+#define WM8737_MAX_REGISTER                     0x0F
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Left PGA volume
+ */
+#define WM8737_LVU                              0x0100  /* LVU */
+#define WM8737_LVU_MASK                         0x0100  /* LVU */
+#define WM8737_LVU_SHIFT                             8  /* LVU */
+#define WM8737_LVU_WIDTH                             1  /* LVU */
+#define WM8737_LINVOL_MASK                      0x00FF  /* LINVOL - [7:0] */
+#define WM8737_LINVOL_SHIFT                          0  /* LINVOL - [7:0] */
+#define WM8737_LINVOL_WIDTH                          8  /* LINVOL - [7:0] */
+
+/*
+ * R1 (0x01) - Right PGA volume
+ */
+#define WM8737_RVU                              0x0100  /* RVU */
+#define WM8737_RVU_MASK                         0x0100  /* RVU */
+#define WM8737_RVU_SHIFT                             8  /* RVU */
+#define WM8737_RVU_WIDTH                             1  /* RVU */
+#define WM8737_RINVOL_MASK                      0x00FF  /* RINVOL - [7:0] */
+#define WM8737_RINVOL_SHIFT                          0  /* RINVOL - [7:0] */
+#define WM8737_RINVOL_WIDTH                          8  /* RINVOL - [7:0] */
+
+/*
+ * R2 (0x02) - AUDIO path L
+ */
+#define WM8737_LINSEL_MASK                      0x0180  /* LINSEL - [8:7] */
+#define WM8737_LINSEL_SHIFT                          7  /* LINSEL - [8:7] */
+#define WM8737_LINSEL_WIDTH                          2  /* LINSEL - [8:7] */
+#define WM8737_LMICBOOST_MASK                   0x0060  /* LMICBOOST - [6:5] */
+#define WM8737_LMICBOOST_SHIFT                       5  /* LMICBOOST - [6:5] */
+#define WM8737_LMICBOOST_WIDTH                       2  /* LMICBOOST - [6:5] */
+#define WM8737_LMBE                             0x0010  /* LMBE */
+#define WM8737_LMBE_MASK                        0x0010  /* LMBE */
+#define WM8737_LMBE_SHIFT                            4  /* LMBE */
+#define WM8737_LMBE_WIDTH                            1  /* LMBE */
+#define WM8737_LMZC                             0x0008  /* LMZC */
+#define WM8737_LMZC_MASK                        0x0008  /* LMZC */
+#define WM8737_LMZC_SHIFT                            3  /* LMZC */
+#define WM8737_LMZC_WIDTH                            1  /* LMZC */
+#define WM8737_LPZC                             0x0004  /* LPZC */
+#define WM8737_LPZC_MASK                        0x0004  /* LPZC */
+#define WM8737_LPZC_SHIFT                            2  /* LPZC */
+#define WM8737_LPZC_WIDTH                            1  /* LPZC */
+#define WM8737_LZCTO_MASK                       0x0003  /* LZCTO - [1:0] */
+#define WM8737_LZCTO_SHIFT                           0  /* LZCTO - [1:0] */
+#define WM8737_LZCTO_WIDTH                           2  /* LZCTO - [1:0] */
+
+/*
+ * R3 (0x03) - AUDIO path R
+ */
+#define WM8737_RINSEL_MASK                      0x0180  /* RINSEL - [8:7] */
+#define WM8737_RINSEL_SHIFT                          7  /* RINSEL - [8:7] */
+#define WM8737_RINSEL_WIDTH                          2  /* RINSEL - [8:7] */
+#define WM8737_RMICBOOST_MASK                   0x0060  /* RMICBOOST - [6:5] */
+#define WM8737_RMICBOOST_SHIFT                       5  /* RMICBOOST - [6:5] */
+#define WM8737_RMICBOOST_WIDTH                       2  /* RMICBOOST - [6:5] */
+#define WM8737_RMBE                             0x0010  /* RMBE */
+#define WM8737_RMBE_MASK                        0x0010  /* RMBE */
+#define WM8737_RMBE_SHIFT                            4  /* RMBE */
+#define WM8737_RMBE_WIDTH                            1  /* RMBE */
+#define WM8737_RMZC                             0x0008  /* RMZC */
+#define WM8737_RMZC_MASK                        0x0008  /* RMZC */
+#define WM8737_RMZC_SHIFT                            3  /* RMZC */
+#define WM8737_RMZC_WIDTH                            1  /* RMZC */
+#define WM8737_RPZC                             0x0004  /* RPZC */
+#define WM8737_RPZC_MASK                        0x0004  /* RPZC */
+#define WM8737_RPZC_SHIFT                            2  /* RPZC */
+#define WM8737_RPZC_WIDTH                            1  /* RPZC */
+#define WM8737_RZCTO_MASK                       0x0003  /* RZCTO - [1:0] */
+#define WM8737_RZCTO_SHIFT                           0  /* RZCTO - [1:0] */
+#define WM8737_RZCTO_WIDTH                           2  /* RZCTO - [1:0] */
+
+/*
+ * R4 (0x04) - 3D Enhance
+ */
+#define WM8737_DIV2                             0x0080  /* DIV2 */
+#define WM8737_DIV2_MASK                        0x0080  /* DIV2 */
+#define WM8737_DIV2_SHIFT                            7  /* DIV2 */
+#define WM8737_DIV2_WIDTH                            1  /* DIV2 */
+#define WM8737_3DLC                             0x0040  /* 3DLC */
+#define WM8737_3DLC_MASK                        0x0040  /* 3DLC */
+#define WM8737_3DLC_SHIFT                            6  /* 3DLC */
+#define WM8737_3DLC_WIDTH                            1  /* 3DLC */
+#define WM8737_3DUC                             0x0020  /* 3DUC */
+#define WM8737_3DUC_MASK                        0x0020  /* 3DUC */
+#define WM8737_3DUC_SHIFT                            5  /* 3DUC */
+#define WM8737_3DUC_WIDTH                            1  /* 3DUC */
+#define WM8737_3DDEPTH_MASK                     0x001E  /* 3DDEPTH - [4:1] */
+#define WM8737_3DDEPTH_SHIFT                         1  /* 3DDEPTH - [4:1] */
+#define WM8737_3DDEPTH_WIDTH                         4  /* 3DDEPTH - [4:1] */
+#define WM8737_3DE                              0x0001  /* 3DE */
+#define WM8737_3DE_MASK                         0x0001  /* 3DE */
+#define WM8737_3DE_SHIFT                             0  /* 3DE */
+#define WM8737_3DE_WIDTH                             1  /* 3DE */
+
+/*
+ * R5 (0x05) - ADC Control
+ */
+#define WM8737_MONOMIX_MASK                     0x0180  /* MONOMIX - [8:7] */
+#define WM8737_MONOMIX_SHIFT                         7  /* MONOMIX - [8:7] */
+#define WM8737_MONOMIX_WIDTH                         2  /* MONOMIX - [8:7] */
+#define WM8737_POLARITY_MASK                    0x0060  /* POLARITY - [6:5] */
+#define WM8737_POLARITY_SHIFT                        5  /* POLARITY - [6:5] */
+#define WM8737_POLARITY_WIDTH                        2  /* POLARITY - [6:5] */
+#define WM8737_HPOR                             0x0010  /* HPOR */
+#define WM8737_HPOR_MASK                        0x0010  /* HPOR */
+#define WM8737_HPOR_SHIFT                            4  /* HPOR */
+#define WM8737_HPOR_WIDTH                            1  /* HPOR */
+#define WM8737_LP                               0x0004  /* LP */
+#define WM8737_LP_MASK                          0x0004  /* LP */
+#define WM8737_LP_SHIFT                              2  /* LP */
+#define WM8737_LP_WIDTH                              1  /* LP */
+#define WM8737_MONOUT                           0x0002  /* MONOUT */
+#define WM8737_MONOUT_MASK                      0x0002  /* MONOUT */
+#define WM8737_MONOUT_SHIFT                          1  /* MONOUT */
+#define WM8737_MONOUT_WIDTH                          1  /* MONOUT */
+#define WM8737_ADCHPD                           0x0001  /* ADCHPD */
+#define WM8737_ADCHPD_MASK                      0x0001  /* ADCHPD */
+#define WM8737_ADCHPD_SHIFT                          0  /* ADCHPD */
+#define WM8737_ADCHPD_WIDTH                          1  /* ADCHPD */
+
+/*
+ * R6 (0x06) - Power Management
+ */
+#define WM8737_VMID                             0x0100  /* VMID */
+#define WM8737_VMID_MASK                        0x0100  /* VMID */
+#define WM8737_VMID_SHIFT                            8  /* VMID */
+#define WM8737_VMID_WIDTH                            1  /* VMID */
+#define WM8737_VREF                             0x0080  /* VREF */
+#define WM8737_VREF_MASK                        0x0080  /* VREF */
+#define WM8737_VREF_SHIFT                            7  /* VREF */
+#define WM8737_VREF_WIDTH                            1  /* VREF */
+#define WM8737_AI                               0x0040  /* AI */
+#define WM8737_AI_MASK                          0x0040  /* AI */
+#define WM8737_AI_SHIFT                              6  /* AI */
+#define WM8737_AI_WIDTH                              1  /* AI */
+#define WM8737_PGL                              0x0020  /* PGL */
+#define WM8737_PGL_MASK                         0x0020  /* PGL */
+#define WM8737_PGL_SHIFT                             5  /* PGL */
+#define WM8737_PGL_WIDTH                             1  /* PGL */
+#define WM8737_PGR                              0x0010  /* PGR */
+#define WM8737_PGR_MASK                         0x0010  /* PGR */
+#define WM8737_PGR_SHIFT                             4  /* PGR */
+#define WM8737_PGR_WIDTH                             1  /* PGR */
+#define WM8737_ADL                              0x0008  /* ADL */
+#define WM8737_ADL_MASK                         0x0008  /* ADL */
+#define WM8737_ADL_SHIFT                             3  /* ADL */
+#define WM8737_ADL_WIDTH                             1  /* ADL */
+#define WM8737_ADR                              0x0004  /* ADR */
+#define WM8737_ADR_MASK                         0x0004  /* ADR */
+#define WM8737_ADR_SHIFT                             2  /* ADR */
+#define WM8737_ADR_WIDTH                             1  /* ADR */
+#define WM8737_MICBIAS_MASK                     0x0003  /* MICBIAS - [1:0] */
+#define WM8737_MICBIAS_SHIFT                         0  /* MICBIAS - [1:0] */
+#define WM8737_MICBIAS_WIDTH                         2  /* MICBIAS - [1:0] */
+
+/*
+ * R7 (0x07) - Audio Format
+ */
+#define WM8737_SDODIS                           0x0080  /* SDODIS */
+#define WM8737_SDODIS_MASK                      0x0080  /* SDODIS */
+#define WM8737_SDODIS_SHIFT                          7  /* SDODIS */
+#define WM8737_SDODIS_WIDTH                          1  /* SDODIS */
+#define WM8737_MS                               0x0040  /* MS */
+#define WM8737_MS_MASK                          0x0040  /* MS */
+#define WM8737_MS_SHIFT                              6  /* MS */
+#define WM8737_MS_WIDTH                              1  /* MS */
+#define WM8737_LRP                              0x0010  /* LRP */
+#define WM8737_LRP_MASK                         0x0010  /* LRP */
+#define WM8737_LRP_SHIFT                             4  /* LRP */
+#define WM8737_LRP_WIDTH                             1  /* LRP */
+#define WM8737_WL_MASK                          0x000C  /* WL - [3:2] */
+#define WM8737_WL_SHIFT                              2  /* WL - [3:2] */
+#define WM8737_WL_WIDTH                              2  /* WL - [3:2] */
+#define WM8737_FORMAT_MASK                      0x0003  /* FORMAT - [1:0] */
+#define WM8737_FORMAT_SHIFT                          0  /* FORMAT - [1:0] */
+#define WM8737_FORMAT_WIDTH                          2  /* FORMAT - [1:0] */
+
+/*
+ * R8 (0x08) - Clocking
+ */
+#define WM8737_AUTODETECT                       0x0080  /* AUTODETECT */
+#define WM8737_AUTODETECT_MASK                  0x0080  /* AUTODETECT */
+#define WM8737_AUTODETECT_SHIFT                      7  /* AUTODETECT */
+#define WM8737_AUTODETECT_WIDTH                      1  /* AUTODETECT */
+#define WM8737_CLKDIV2                          0x0040  /* CLKDIV2 */
+#define WM8737_CLKDIV2_MASK                     0x0040  /* CLKDIV2 */
+#define WM8737_CLKDIV2_SHIFT                         6  /* CLKDIV2 */
+#define WM8737_CLKDIV2_WIDTH                         1  /* CLKDIV2 */
+#define WM8737_SR_MASK                          0x003E  /* SR - [5:1] */
+#define WM8737_SR_SHIFT                              1  /* SR - [5:1] */
+#define WM8737_SR_WIDTH                              5  /* SR - [5:1] */
+#define WM8737_USB_MODE                         0x0001  /* USB MODE */
+#define WM8737_USB_MODE_MASK                    0x0001  /* USB MODE */
+#define WM8737_USB_MODE_SHIFT                        0  /* USB MODE */
+#define WM8737_USB_MODE_WIDTH                        1  /* USB MODE */
+
+/*
+ * R9 (0x09) - MIC Preamp Control
+ */
+#define WM8737_RBYPEN                           0x0008  /* RBYPEN */
+#define WM8737_RBYPEN_MASK                      0x0008  /* RBYPEN */
+#define WM8737_RBYPEN_SHIFT                          3  /* RBYPEN */
+#define WM8737_RBYPEN_WIDTH                          1  /* RBYPEN */
+#define WM8737_LBYPEN                           0x0004  /* LBYPEN */
+#define WM8737_LBYPEN_MASK                      0x0004  /* LBYPEN */
+#define WM8737_LBYPEN_SHIFT                          2  /* LBYPEN */
+#define WM8737_LBYPEN_WIDTH                          1  /* LBYPEN */
+#define WM8737_MBCTRL_MASK                      0x0003  /* MBCTRL - [1:0] */
+#define WM8737_MBCTRL_SHIFT                          0  /* MBCTRL - [1:0] */
+#define WM8737_MBCTRL_WIDTH                          2  /* MBCTRL - [1:0] */
+
+/*
+ * R10 (0x0A) - Misc Bias Control
+ */
+#define WM8737_VMIDSEL_MASK                     0x000C  /* VMIDSEL - [3:2] */
+#define WM8737_VMIDSEL_SHIFT                         2  /* VMIDSEL - [3:2] */
+#define WM8737_VMIDSEL_WIDTH                         2  /* VMIDSEL - [3:2] */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE           0x0002  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_MASK      0x0002  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_SHIFT          1  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_WIDTH          1  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE           0x0001  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_MASK      0x0001  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_SHIFT          0  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_WIDTH          1  /* RINPUT1 DC BIAS ENABLE */
+
+/*
+ * R11 (0x0B) - Noise Gate
+ */
+#define WM8737_NGTH_MASK                        0x001C  /* NGTH - [4:2] */
+#define WM8737_NGTH_SHIFT                            2  /* NGTH - [4:2] */
+#define WM8737_NGTH_WIDTH                            3  /* NGTH - [4:2] */
+#define WM8737_NGAT                             0x0001  /* NGAT */
+#define WM8737_NGAT_MASK                        0x0001  /* NGAT */
+#define WM8737_NGAT_SHIFT                            0  /* NGAT */
+#define WM8737_NGAT_WIDTH                            1  /* NGAT */
+
+/*
+ * R12 (0x0C) - ALC1
+ */
+#define WM8737_ALCSEL_MASK                      0x0180  /* ALCSEL - [8:7] */
+#define WM8737_ALCSEL_SHIFT                          7  /* ALCSEL - [8:7] */
+#define WM8737_ALCSEL_WIDTH                          2  /* ALCSEL - [8:7] */
+#define WM8737_MAX_GAIN_MASK                    0x0070  /* MAX GAIN - [6:4] */
+#define WM8737_MAX_GAIN_SHIFT                        4  /* MAX GAIN - [6:4] */
+#define WM8737_MAX_GAIN_WIDTH                        3  /* MAX GAIN - [6:4] */
+#define WM8737_ALCL_MASK                        0x000F  /* ALCL - [3:0] */
+#define WM8737_ALCL_SHIFT                            0  /* ALCL - [3:0] */
+#define WM8737_ALCL_WIDTH                            4  /* ALCL - [3:0] */
+
+/*
+ * R13 (0x0D) - ALC2
+ */
+#define WM8737_ALCZCE                           0x0010  /* ALCZCE */
+#define WM8737_ALCZCE_MASK                      0x0010  /* ALCZCE */
+#define WM8737_ALCZCE_SHIFT                          4  /* ALCZCE */
+#define WM8737_ALCZCE_WIDTH                          1  /* ALCZCE */
+#define WM8737_HLD_MASK                         0x000F  /* HLD - [3:0] */
+#define WM8737_HLD_SHIFT                             0  /* HLD - [3:0] */
+#define WM8737_HLD_WIDTH                             4  /* HLD - [3:0] */
+
+/*
+ * R14 (0x0E) - ALC3
+ */
+#define WM8737_DCY_MASK                         0x00F0  /* DCY - [7:4] */
+#define WM8737_DCY_SHIFT                             4  /* DCY - [7:4] */
+#define WM8737_DCY_WIDTH                             4  /* DCY - [7:4] */
+#define WM8737_ATK_MASK                         0x000F  /* ATK - [3:0] */
+#define WM8737_ATK_SHIFT                             0  /* ATK - [3:0] */
+#define WM8737_ATK_WIDTH                             4  /* ATK - [3:0] */
+
+/*
+ * R15 (0x0F) - Reset
+ */
+#define WM8737_RESET_MASK                       0x01FF  /* RESET - [8:0] */
+#define WM8737_RESET_SHIFT                           0  /* RESET - [8:0] */
+#define WM8737_RESET_WIDTH                           9  /* RESET - [8:0] */
+
+#endif
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index aea60ef..494f2d3 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -94,10 +93,11 @@
 
 static int wm8741_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8741_dapm_widgets,
-				  ARRAY_SIZE(wm8741_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8741_dapm_widgets,
+				  ARRAY_SIZE(wm8741_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -455,7 +455,7 @@
 	.resume =	wm8741_resume,
 	.reg_cache_size = ARRAY_SIZE(wm8741_reg_defaults),
 	.reg_word_size = sizeof(u16),
-	.reg_cache_default = &wm8741_reg_defaults,
+	.reg_cache_default = wm8741_reg_defaults,
 };
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 6c924cd..38f38fd 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8750.h"
@@ -53,7 +52,6 @@
 struct wm8750_priv {
 	unsigned int sysclk;
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[ARRAY_SIZE(wm8750_reg)];
 };
 
 #define wm8750_reset(c)	snd_soc_write(c, WM8750_RESET, 0)
@@ -399,10 +397,11 @@
 
 static int wm8750_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
-				  ARRAY_SIZE(wm8750_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
+				  ARRAY_SIZE(wm8750_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -615,7 +614,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Set VMID to 5k */
 			snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
 
@@ -630,7 +629,7 @@
 		snd_soc_write(codec, WM8750_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 87caae5..79b02ae 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -45,7 +45,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -623,10 +622,11 @@
 
 static int wm8753_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
-				  ARRAY_SIZE(wm8753_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
+				  ARRAY_SIZE(wm8753_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1245,7 +1245,7 @@
 		snd_soc_write(codec, WM8753_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1435,9 +1435,11 @@
 
 static void wm8753_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-		container_of(work, struct snd_soc_codec, delayed_work.work);
-	wm8753_set_bias_level(codec, codec->bias_level);
+	struct snd_soc_dapm_context *dapm =
+		container_of(work, struct snd_soc_dapm_context,
+			     delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
+	wm8753_set_bias_level(codec, dapm->bias_level);
 }
 
 static int wm8753_suspend(struct snd_soc_codec *codec, pm_message_t state)
@@ -1466,41 +1468,22 @@
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
 	/* charge wm8753 caps */
-	if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
 		wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
-		codec->bias_level = SND_SOC_BIAS_ON;
-		schedule_delayed_work(&codec->delayed_work,
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		schedule_delayed_work(&codec->dapm.delayed_work,
 			msecs_to_jiffies(caps_charge));
 	}
 
 	return 0;
 }
 
-/*
- * This function forces any delayed work to be queued and run.
- */
-static int run_delayed_work(struct delayed_work *dwork)
-{
-	int ret;
-
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(dwork);
-
-	/* if there was any work waiting then we run it now and
-	 * wait for it's completion */
-	if (ret) {
-		schedule_delayed_work(dwork, 0);
-		flush_scheduled_work();
-	}
-	return ret;
-}
-
 static int wm8753_probe(struct snd_soc_codec *codec)
 {
 	struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8753_work);
 
 	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8753->control_type);
 	if (ret < 0) {
@@ -1519,7 +1502,7 @@
 
 	/* charge output caps */
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
-	schedule_delayed_work(&codec->delayed_work,
+	schedule_delayed_work(&codec->dapm.delayed_work,
 			      msecs_to_jiffies(caps_charge));
 
 	/* set the update bits */
@@ -1544,7 +1527,7 @@
 /* power down chip */
 static int wm8753_remove(struct snd_soc_codec *codec)
 {
-	run_delayed_work(&codec->delayed_work);
+	flush_delayed_work_sync(&codec->dapm.delayed_work);
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
 	return 0;
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
new file mode 100644
index 0000000..19b92ba
--- /dev/null
+++ b/sound/soc/codecs/wm8770.c
@@ -0,0 +1,749 @@
+/*
+ * wm8770.c  --  WM8770 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8770.h"
+
+#define WM8770_NUM_SUPPLIES 3
+static const char *wm8770_supply_names[WM8770_NUM_SUPPLIES] = {
+	"AVDD1",
+	"AVDD2",
+	"DVDD"
+};
+
+static const u16 wm8770_reg_defs[WM8770_CACHEREGNUM] = {
+	0x7f, 0x7f, 0x7f, 0x7f,
+	0x7f, 0x7f, 0x7f, 0x7f,
+	0x7f, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0, 0x90, 0,
+	0, 0x22, 0x22, 0x3e,
+	0xc, 0xc, 0x100, 0x189,
+	0x189, 0x8770
+};
+
+struct wm8770_priv {
+	enum snd_soc_control_type control_type;
+	struct regulator_bulk_data supplies[WM8770_NUM_SUPPLIES];
+	struct notifier_block disable_nb[WM8770_NUM_SUPPLIES];
+	struct snd_soc_codec *codec;
+	int sysclk;
+};
+
+static int vout12supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+static int vout34supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+
+/*
+ * We can't use the same notifier block for more than one supply and
+ * there's no way I can see to get from a callback to the caller
+ * except container_of().
+ */
+#define WM8770_REGULATOR_EVENT(n) \
+static int wm8770_regulator_event_##n(struct notifier_block *nb, \
+				      unsigned long event, void *data)    \
+{ \
+	struct wm8770_priv *wm8770 = container_of(nb, struct wm8770_priv, \
+				     disable_nb[n]); \
+	if (event & REGULATOR_EVENT_DISABLE) { \
+		wm8770->codec->cache_sync = 1; \
+	} \
+	return 0; \
+}
+
+WM8770_REGULATOR_EVENT(0)
+WM8770_REGULATOR_EVENT(1)
+WM8770_REGULATOR_EVENT(2)
+
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_SCALE(dac_dig_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(dac_alg_tlv, -12700, 100, 1);
+
+static const char *dac_phase_text[][2] = {
+	{ "DAC1 Normal", "DAC1 Inverted" },
+	{ "DAC2 Normal", "DAC2 Inverted" },
+	{ "DAC3 Normal", "DAC3 Inverted" },
+	{ "DAC4 Normal", "DAC4 Inverted" },
+};
+
+static const struct soc_enum dac_phase[] = {
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 0, 1, 2, dac_phase_text[0]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 2, 3, 2, dac_phase_text[1]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 4, 5, 2, dac_phase_text[2]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 6, 7, 2, dac_phase_text[3]),
+};
+
+static const struct snd_kcontrol_new wm8770_snd_controls[] = {
+	/* global DAC playback controls */
+	SOC_SINGLE_TLV("DAC Playback Volume", WM8770_MSDIGVOL, 0, 255, 0,
+		dac_dig_tlv),
+	SOC_SINGLE("DAC Playback Switch", WM8770_DACMUTE, 4, 1, 1),
+	SOC_SINGLE("DAC Playback ZC Switch", WM8770_DACCTRL1, 0, 1, 0),
+
+	/* global VOUT playback controls */
+	SOC_SINGLE_TLV("VOUT Playback Volume", WM8770_MSALGVOL, 0, 127, 0,
+		dac_alg_tlv),
+	SOC_SINGLE("VOUT Playback ZC Switch", WM8770_MSALGVOL, 7, 1, 0),
+
+	/* VOUT1/2/3/4 specific controls */
+	SOC_DOUBLE_R_TLV("VOUT1 Playback Volume", WM8770_VOUT1LVOL,
+		WM8770_VOUT1RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT1 Playback ZC Switch", WM8770_VOUT1LVOL,
+		WM8770_VOUT1RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT2 Playback Volume", WM8770_VOUT2LVOL,
+		WM8770_VOUT2RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT2 Playback ZC Switch", WM8770_VOUT2LVOL,
+		WM8770_VOUT2RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT3 Playback Volume", WM8770_VOUT3LVOL,
+		WM8770_VOUT3RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT3 Playback ZC Switch", WM8770_VOUT3LVOL,
+		WM8770_VOUT3RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT4 Playback Volume", WM8770_VOUT4LVOL,
+		WM8770_VOUT4RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT4 Playback ZC Switch", WM8770_VOUT4LVOL,
+		WM8770_VOUT4RVOL, 7, 1, 0),
+
+	/* DAC1/2/3/4 specific controls */
+	SOC_DOUBLE_R_TLV("DAC1 Playback Volume", WM8770_DAC1LVOL,
+		WM8770_DAC1RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC1 Deemphasis Switch", WM8770_DACCTRL2, 0, 1, 0),
+	SOC_ENUM("DAC1 Phase", dac_phase[0]),
+	SOC_DOUBLE_R_TLV("DAC2 Playback Volume", WM8770_DAC2LVOL,
+		WM8770_DAC2RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC2 Deemphasis Switch", WM8770_DACCTRL2, 1, 1, 0),
+	SOC_ENUM("DAC2 Phase", dac_phase[1]),
+	SOC_DOUBLE_R_TLV("DAC3 Playback Volume", WM8770_DAC3LVOL,
+		WM8770_DAC3RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC3 Deemphasis Switch", WM8770_DACCTRL2, 2, 1, 0),
+	SOC_ENUM("DAC3 Phase", dac_phase[2]),
+	SOC_DOUBLE_R_TLV("DAC4 Playback Volume", WM8770_DAC4LVOL,
+		WM8770_DAC4RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC4 Deemphasis Switch", WM8770_DACCTRL2, 3, 1, 0),
+	SOC_ENUM("DAC4 Phase", dac_phase[3]),
+
+	/* ADC specific controls */
+	SOC_DOUBLE_R_TLV("Capture Volume", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
+		0, 31, 0, adc_tlv),
+	SOC_DOUBLE_R("Capture Switch", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
+		5, 1, 1),
+
+	/* other controls */
+	SOC_SINGLE("ADC 128x Oversampling Switch", WM8770_MSTRCTRL, 3, 1, 0),
+	SOC_SINGLE("ADC Highpass Filter Switch", WM8770_IFACECTRL, 8, 1, 1)
+};
+
+static const char *ain_text[] = {
+	"AIN1", "AIN2", "AIN3", "AIN4",
+	"AIN5", "AIN6", "AIN7", "AIN8"
+};
+
+static const struct soc_enum ain_enum =
+	SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
+
+static const struct snd_kcontrol_new ain_mux =
+	SOC_DAPM_ENUM("Capture Mux", ain_enum);
+
+static const struct snd_kcontrol_new vout1_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC1 Switch", WM8770_OUTMUX1, 0, 1, 0),
+	SOC_DAPM_SINGLE("AUX1 Switch", WM8770_OUTMUX1, 1, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 2, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout2_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC2 Switch", WM8770_OUTMUX1, 3, 1, 0),
+	SOC_DAPM_SINGLE("AUX2 Switch", WM8770_OUTMUX1, 4, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 5, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout3_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC3 Switch", WM8770_OUTMUX2, 0, 1, 0),
+	SOC_DAPM_SINGLE("AUX3 Switch", WM8770_OUTMUX2, 1, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 2, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout4_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC4 Switch", WM8770_OUTMUX2, 3, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 4, 1, 0)
+};
+
+static const struct snd_soc_dapm_widget wm8770_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("AUX1"),
+	SND_SOC_DAPM_INPUT("AUX2"),
+	SND_SOC_DAPM_INPUT("AUX3"),
+
+	SND_SOC_DAPM_INPUT("AIN1"),
+	SND_SOC_DAPM_INPUT("AIN2"),
+	SND_SOC_DAPM_INPUT("AIN3"),
+	SND_SOC_DAPM_INPUT("AIN4"),
+	SND_SOC_DAPM_INPUT("AIN5"),
+	SND_SOC_DAPM_INPUT("AIN6"),
+	SND_SOC_DAPM_INPUT("AIN7"),
+	SND_SOC_DAPM_INPUT("AIN8"),
+
+	SND_SOC_DAPM_MUX("Capture Mux", WM8770_ADCMUX, 8, 1, &ain_mux),
+
+	SND_SOC_DAPM_ADC("ADC", "Capture", WM8770_PWDNCTRL, 1, 1),
+
+	SND_SOC_DAPM_DAC("DAC1", "Playback", WM8770_PWDNCTRL, 2, 1),
+	SND_SOC_DAPM_DAC("DAC2", "Playback", WM8770_PWDNCTRL, 3, 1),
+	SND_SOC_DAPM_DAC("DAC3", "Playback", WM8770_PWDNCTRL, 4, 1),
+	SND_SOC_DAPM_DAC("DAC4", "Playback", WM8770_PWDNCTRL, 5, 1),
+
+	SND_SOC_DAPM_SUPPLY("VOUT12 Supply", SND_SOC_NOPM, 0, 0,
+		vout12supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("VOUT34 Supply", SND_SOC_NOPM, 0, 0,
+		vout34supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER("VOUT1 Mixer", SND_SOC_NOPM, 0, 0,
+		vout1_mix_controls, ARRAY_SIZE(vout1_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT2 Mixer", SND_SOC_NOPM, 0, 0,
+		vout2_mix_controls, ARRAY_SIZE(vout2_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT3 Mixer", SND_SOC_NOPM, 0, 0,
+		vout3_mix_controls, ARRAY_SIZE(vout3_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT4 Mixer", SND_SOC_NOPM, 0, 0,
+		vout4_mix_controls, ARRAY_SIZE(vout4_mix_controls)),
+
+	SND_SOC_DAPM_OUTPUT("VOUT1"),
+	SND_SOC_DAPM_OUTPUT("VOUT2"),
+	SND_SOC_DAPM_OUTPUT("VOUT3"),
+	SND_SOC_DAPM_OUTPUT("VOUT4")
+};
+
+static const struct snd_soc_dapm_route wm8770_intercon[] = {
+	{ "Capture Mux", "AIN1", "AIN1" },
+	{ "Capture Mux", "AIN2", "AIN2" },
+	{ "Capture Mux", "AIN3", "AIN3" },
+	{ "Capture Mux", "AIN4", "AIN4" },
+	{ "Capture Mux", "AIN5", "AIN5" },
+	{ "Capture Mux", "AIN6", "AIN6" },
+	{ "Capture Mux", "AIN7", "AIN7" },
+	{ "Capture Mux", "AIN8", "AIN8" },
+
+	{ "ADC", NULL, "Capture Mux" },
+
+	{ "VOUT1 Mixer", NULL, "VOUT12 Supply" },
+	{ "VOUT1 Mixer", "DAC1 Switch", "DAC1" },
+	{ "VOUT1 Mixer", "AUX1 Switch", "AUX1" },
+	{ "VOUT1 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT2 Mixer", NULL, "VOUT12 Supply" },
+	{ "VOUT2 Mixer", "DAC2 Switch", "DAC2" },
+	{ "VOUT2 Mixer", "AUX2 Switch", "AUX2" },
+	{ "VOUT2 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT3 Mixer", NULL, "VOUT34 Supply" },
+	{ "VOUT3 Mixer", "DAC3 Switch", "DAC3" },
+	{ "VOUT3 Mixer", "AUX3 Switch", "AUX3" },
+	{ "VOUT3 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT4 Mixer", NULL, "VOUT34 Supply" },
+	{ "VOUT4 Mixer", "DAC4 Switch", "DAC4" },
+	{ "VOUT4 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT1", NULL, "VOUT1 Mixer" },
+	{ "VOUT2", NULL, "VOUT2 Mixer" },
+	{ "VOUT3", NULL, "VOUT3 Mixer" },
+	{ "VOUT4", NULL, "VOUT4 Mixer" }
+};
+
+static int vout12supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0x180);
+		break;
+	}
+
+	return 0;
+}
+
+static int vout34supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0x180);
+		break;
+	}
+
+	return 0;
+}
+
+static int wm8770_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, WM8770_RESET, 0);
+}
+
+static int wm8770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec;
+	int iface, master;
+
+	codec = dai->codec;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		master = 0x100;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		master = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	iface = 0;
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		iface |= 0x2;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iface |= 0x1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		iface |= 0xc;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		iface |= 0x8;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		iface |= 0x4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8770_IFACECTRL, 0xf, iface);
+	snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x100, master);
+
+	return 0;
+}
+
+static const int mclk_ratios[] = {
+	128,
+	192,
+	256,
+	384,
+	512,
+	768
+};
+
+static int wm8770_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec;
+	struct wm8770_priv *wm8770;
+	int i;
+	int iface;
+	int shift;
+	int ratio;
+
+	codec = dai->codec;
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+
+	iface = 0;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		iface |= 0x10;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iface |= 0x20;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		iface |= 0x30;
+		break;
+	}
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		i = 0;
+		shift = 4;
+		break;
+	case SNDRV_PCM_STREAM_CAPTURE:
+		i = 2;
+		shift = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Only need to set MCLK/LRCLK ratio if we're master */
+	if (snd_soc_read(codec, WM8770_MSTRCTRL) & 0x100) {
+		for (; i < ARRAY_SIZE(mclk_ratios); ++i) {
+			ratio = wm8770->sysclk / params_rate(params);
+			if (ratio == mclk_ratios[i])
+				break;
+		}
+
+		if (i == ARRAY_SIZE(mclk_ratios)) {
+			dev_err(codec->dev,
+				"Unable to configure MCLK ratio %d/%d\n",
+				wm8770->sysclk, params_rate(params));
+			return -EINVAL;
+		}
+
+		dev_dbg(codec->dev, "MCLK is %dfs\n", mclk_ratios[i]);
+
+		snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x7 << shift,
+				    i << shift);
+	}
+
+	snd_soc_update_bits(codec, WM8770_IFACECTRL, 0x30, iface);
+
+	return 0;
+}
+
+static int wm8770_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec;
+
+	codec = dai->codec;
+	return snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10,
+				   !!mute << 4);
+}
+
+static int wm8770_set_sysclk(struct snd_soc_dai *dai,
+			     int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec;
+	struct wm8770_priv *wm8770;
+
+	codec = dai->codec;
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770->sysclk = freq;
+	return 0;
+}
+
+static void wm8770_sync_cache(struct snd_soc_codec *codec)
+{
+	int i;
+	u16 *cache;
+
+	if (!codec->cache_sync)
+		return;
+
+	codec->cache_only = 0;
+	cache = codec->reg_cache;
+	for (i = 0; i < codec->driver->reg_cache_size; i++) {
+		if (i == WM8770_RESET || cache[i] == wm8770_reg_defs[i])
+			continue;
+		snd_soc_write(codec, i, cache[i]);
+	}
+	codec->cache_sync = 0;
+}
+
+static int wm8770_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	int ret;
+	struct wm8770_priv *wm8770;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
+						    wm8770->supplies);
+			if (ret) {
+				dev_err(codec->dev,
+					"Failed to enable supplies: %d\n",
+					ret);
+				return ret;
+			}
+			wm8770_sync_cache(codec);
+			/* global powerup */
+			snd_soc_write(codec, WM8770_PWDNCTRL, 0);
+		}
+		break;
+	case SND_SOC_BIAS_OFF:
+		/* global powerdown */
+		snd_soc_write(codec, WM8770_PWDNCTRL, 1);
+		regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies),
+				       wm8770->supplies);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define WM8770_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8770_dai_ops = {
+	.digital_mute = wm8770_mute,
+	.hw_params = wm8770_hw_params,
+	.set_fmt = wm8770_set_fmt,
+	.set_sysclk = wm8770_set_sysclk,
+};
+
+static struct snd_soc_dai_driver wm8770_dai = {
+	.name = "wm8770-hifi",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = WM8770_FORMATS
+	},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_96000,
+		.formats = WM8770_FORMATS
+	},
+	.ops = &wm8770_dai_ops,
+	.symmetric_rates = 1
+};
+
+#ifdef CONFIG_PM
+static int wm8770_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8770_resume(struct snd_soc_codec *codec)
+{
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8770_suspend NULL
+#define wm8770_resume NULL
+#endif
+
+static int wm8770_probe(struct snd_soc_codec *codec)
+{
+	struct wm8770_priv *wm8770;
+	int ret;
+	int i;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770->codec = codec;
+
+	codec->dapm.idle_bias_off = 1;
+
+	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8770->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++)
+		wm8770->supplies[i].supply = wm8770_supply_names[i];
+
+	ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8770->supplies),
+				 wm8770->supplies);
+	if (ret) {
+		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	wm8770->disable_nb[0].notifier_call = wm8770_regulator_event_0;
+	wm8770->disable_nb[1].notifier_call = wm8770_regulator_event_1;
+	wm8770->disable_nb[2].notifier_call = wm8770_regulator_event_2;
+
+	/* This should really be moved into the regulator core */
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) {
+		ret = regulator_register_notifier(wm8770->supplies[i].consumer,
+						  &wm8770->disable_nb[i]);
+		if (ret) {
+			dev_err(codec->dev,
+				"Failed to register regulator notifier: %d\n",
+				ret);
+		}
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
+				    wm8770->supplies);
+	if (ret) {
+		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		goto err_reg_get;
+	}
+
+	ret = wm8770_reset(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		goto err_reg_enable;
+	}
+
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* latch the volume update bits */
+	snd_soc_update_bits(codec, WM8770_MSDIGVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_MSALGVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT1RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT2RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT3RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT4RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC1RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC2RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC3RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC4RVOL, 0x100, 0x100);
+
+	/* mute all DACs */
+	snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10, 0x10);
+
+	snd_soc_add_controls(codec, wm8770_snd_controls,
+			     ARRAY_SIZE(wm8770_snd_controls));
+	snd_soc_dapm_new_controls(&codec->dapm, wm8770_dapm_widgets,
+				  ARRAY_SIZE(wm8770_dapm_widgets));
+	snd_soc_dapm_add_routes(&codec->dapm, wm8770_intercon,
+				ARRAY_SIZE(wm8770_intercon));
+	return 0;
+
+err_reg_enable:
+	regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+err_reg_get:
+	regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+	return ret;
+}
+
+static int wm8770_remove(struct snd_soc_codec *codec)
+{
+	struct wm8770_priv *wm8770;
+	int i;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i)
+		regulator_unregister_notifier(wm8770->supplies[i].consumer,
+					      &wm8770->disable_nb[i]);
+	regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8770 = {
+	.probe = wm8770_probe,
+	.remove = wm8770_remove,
+	.suspend = wm8770_suspend,
+	.resume = wm8770_resume,
+	.set_bias_level = wm8770_set_bias_level,
+	.reg_cache_size = ARRAY_SIZE(wm8770_reg_defs),
+	.reg_word_size = sizeof (u16),
+	.reg_cache_default = wm8770_reg_defs
+};
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8770_spi_probe(struct spi_device *spi)
+{
+	struct wm8770_priv *wm8770;
+	int ret;
+
+	wm8770 = kzalloc(sizeof(struct wm8770_priv), GFP_KERNEL);
+	if (!wm8770)
+		return -ENOMEM;
+
+	wm8770->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8770);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8770, &wm8770_dai, 1);
+	if (ret < 0)
+		kfree(wm8770);
+	return ret;
+}
+
+static int __devexit wm8770_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8770_spi_driver = {
+	.driver = {
+		.name = "wm8770",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8770_spi_probe,
+	.remove = __devexit_p(wm8770_spi_remove)
+};
+#endif
+
+static int __init wm8770_modinit(void)
+{
+	int ret = 0;
+
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8770_spi_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8770 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return ret;
+}
+module_init(wm8770_modinit);
+
+static void __exit wm8770_exit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8770_spi_driver);
+#endif
+}
+module_exit(wm8770_exit);
+
+MODULE_DESCRIPTION("ASoC WM8770 driver");
+MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8770.h b/sound/soc/codecs/wm8770.h
new file mode 100644
index 0000000..5f1b3bd
--- /dev/null
+++ b/sound/soc/codecs/wm8770.h
@@ -0,0 +1,51 @@
+/*
+ * wm8770.h  --  WM8770 ASoC driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8770_H
+#define _WM8770_H
+
+/* Registers */
+#define WM8770_VOUT1LVOL                0
+#define WM8770_VOUT1RVOL                0x1
+#define WM8770_VOUT2LVOL                0x2
+#define WM8770_VOUT2RVOL                0x3
+#define WM8770_VOUT3LVOL                0x4
+#define WM8770_VOUT3RVOL                0x5
+#define WM8770_VOUT4LVOL                0x6
+#define WM8770_VOUT4RVOL                0x7
+#define WM8770_MSALGVOL                 0x8
+#define WM8770_DAC1LVOL                 0x9
+#define WM8770_DAC1RVOL                 0xa
+#define WM8770_DAC2LVOL                 0xb
+#define WM8770_DAC2RVOL                 0xc
+#define WM8770_DAC3LVOL                 0xd
+#define WM8770_DAC3RVOL                 0xe
+#define WM8770_DAC4LVOL                 0xf
+#define WM8770_DAC4RVOL                 0x10
+#define WM8770_MSDIGVOL                 0x11
+#define WM8770_DACPHASE                 0x12
+#define WM8770_DACCTRL1                 0x13
+#define WM8770_DACMUTE                  0x14
+#define WM8770_DACCTRL2                 0x15
+#define WM8770_IFACECTRL                0x16
+#define WM8770_MSTRCTRL                 0x17
+#define WM8770_PWDNCTRL                 0x18
+#define WM8770_ADCLCTRL                 0x19
+#define WM8770_ADCRCTRL                 0x1a
+#define WM8770_ADCMUX                   0x1b
+#define WM8770_OUTMUX1                  0x1c
+#define WM8770_OUTMUX2                  0x1d
+#define WM8770_RESET                    0x31
+
+#define WM8770_CACHEREGNUM 0x20
+
+#endif
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 0132a27..8e7953b 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -306,7 +305,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Disable the global powerdown; DAPM does the rest */
 			snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 0);
 		}
@@ -317,7 +316,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -404,6 +403,7 @@
 static int wm8776_probe(struct snd_soc_codec *codec)
 {
 	struct wm8776_priv *wm8776 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 
 	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8776->control_type);
@@ -427,9 +427,9 @@
 
 	snd_soc_add_controls(codec, wm8776_snd_controls,
 			     ARRAY_SIZE(wm8776_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8776_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8776_dapm_widgets,
 				  ARRAY_SIZE(wm8776_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+	snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 4599e8e..6dae1b4 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -515,7 +514,7 @@
 		snd_soc_update_bits(codec, WM8804_PWRDN, 0x9, 0);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies),
 						    wm8804->supplies);
 			if (ret) {
@@ -537,7 +536,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -581,7 +580,7 @@
 	wm8804 = snd_soc_codec_get_drvdata(codec);
 	wm8804->codec = codec;
 
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 8, 8, wm8804->control_type);
 	if (ret < 0) {
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index aca4b1e..cd09599 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -140,7 +139,6 @@
 
 struct wm8900_priv {
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[WM8900_MAXREG];
 
 	u32 fll_in; /* FLL input frequency */
 	u32 fll_out; /* FLL output frequency */
@@ -611,10 +609,11 @@
 
 static int wm8900_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8900_dapm_widgets,
-				  ARRAY_SIZE(wm8900_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8900_dapm_widgets,
+				  ARRAY_SIZE(wm8900_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1051,7 +1050,7 @@
 
 	case SND_SOC_BIAS_STANDBY:
 		/* Charge capacitors if initial power up */
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* STARTUP_BIAS_ENA on */
 			snd_soc_write(codec, WM8900_REG_POWER1,
 				     WM8900_REG_POWER1_STARTUP_BIAS_ENA);
@@ -1119,7 +1118,7 @@
 			     WM8900_REG_POWER2_SYSCLK_ENA);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 622b602..987476a 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -29,9 +29,9 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/wm8903.h>
+#include <trace/events/asoc.h>
 
 #include "wm8903.h"
 
@@ -214,15 +214,14 @@
 
 struct wm8903_priv {
 
-	u16 reg_cache[ARRAY_SIZE(wm8903_reg_defaults)];
-
 	int sysclk;
 	int irq;
 
-	/* Reference counts */
+	int fs;
+	int deemph;
+
+	/* Reference count */
 	int class_w_users;
-	int playback_active;
-	int capture_active;
 
 	struct completion wseq;
 
@@ -231,9 +230,6 @@
 	int mic_short;
 	int mic_last_report;
 	int mic_delay;
-
-	struct snd_pcm_substream *master_substream;
-	struct snd_pcm_substream *slave_substream;
 };
 
 static int wm8903_volatile_register(unsigned int reg)
@@ -463,6 +459,72 @@
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
 
 
+static int wm8903_deemph[] = { 0, 32000, 44100, 48000 };
+
+static int wm8903_set_deemph(struct snd_soc_codec *codec)
+{
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+	int val, i, best;
+
+	/* If we're using deemphasis select the nearest available sample
+	 * rate.
+	 */
+	if (wm8903->deemph) {
+		best = 1;
+		for (i = 2; i < ARRAY_SIZE(wm8903_deemph); i++) {
+			if (abs(wm8903_deemph[i] - wm8903->fs) <
+			    abs(wm8903_deemph[best] - wm8903->fs))
+				best = i;
+		}
+
+		val = best << WM8903_DEEMPH_SHIFT;
+	} else {
+		best = 0;
+		val = 0;
+	}
+
+	dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n",
+		best, wm8903_deemph[best]);
+
+	return snd_soc_update_bits(codec, WM8903_DAC_DIGITAL_1,
+				   WM8903_DEEMPH_MASK, val);
+}
+
+static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8903->deemph;
+
+	return 0;
+}
+
+static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+	int deemph = ucontrol->value.enumerated.item[0];
+	int ret = 0;
+
+	if (deemph > 1)
+		return -EINVAL;
+
+	mutex_lock(&codec->mutex);
+	if (wm8903->deemph != deemph) {
+		wm8903->deemph = deemph;
+
+		wm8903_set_deemph(codec);
+
+		ret = 1;
+	}
+	mutex_unlock(&codec->mutex);
+
+	return ret;
+}
+
 /* ALSA can only do steps of .01dB */
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 
@@ -475,6 +537,23 @@
 static const DECLARE_TLV_DB_SCALE(drc_tlv_max, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_tlv_startup, -300, 50, 0);
 
+static const char *hpf_mode_text[] = {
+	"Hi-fi", "Voice 1", "Voice 2", "Voice 3"
+};
+
+static const struct soc_enum hpf_mode =
+	SOC_ENUM_SINGLE(WM8903_ADC_DIGITAL_0, 5, 4, hpf_mode_text);
+
+static const char *osr_text[] = {
+	"Low power", "High performance"
+};
+
+static const struct soc_enum adc_osr =
+	SOC_ENUM_SINGLE(WM8903_ANALOGUE_ADC_0, 0, 2, osr_text);
+
+static const struct soc_enum dac_osr =
+	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 0, 2, osr_text);
+
 static const char *drc_slope_text[] = {
 	"1", "1/2", "1/4", "1/8", "1/16", "0"
 };
@@ -537,13 +616,6 @@
 static const struct soc_enum mute_mode =
 	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 9, 2, mute_mode_text);
 
-static const char *dac_deemphasis_text[] = {
-	"Disabled", "32kHz", "44.1kHz", "48kHz"
-};
-
-static const struct soc_enum dac_deemphasis =
-	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 1, 4, dac_deemphasis_text);
-
 static const char *companding_text[] = {
 	"ulaw", "alaw"
 };
@@ -613,6 +685,9 @@
 	   6, 1, 0),
 
 /* ADCs */
+SOC_ENUM("ADC OSR", adc_osr),
+SOC_SINGLE("HPF Switch", WM8903_ADC_DIGITAL_0, 4, 1, 0),
+SOC_ENUM("HPF Mode", hpf_mode),
 SOC_SINGLE("DRC Switch", WM8903_DRC_0, 15, 1, 0),
 SOC_ENUM("DRC Compressor Slope R0", drc_slope_r0),
 SOC_ENUM("DRC Compressor Slope R1", drc_slope_r1),
@@ -642,14 +717,16 @@
 	       12, 0, digital_sidetone_tlv),
 
 /* DAC */
+SOC_ENUM("DAC OSR", dac_osr),
 SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8903_DAC_DIGITAL_VOLUME_LEFT,
 		 WM8903_DAC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
 SOC_ENUM("DAC Soft Mute Rate", soft_mute),
 SOC_ENUM("DAC Mute Mode", mute_mode),
 SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0),
-SOC_ENUM("DAC De-emphasis", dac_deemphasis),
 SOC_ENUM("DAC Companding Mode", dac_companding),
 SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0),
+SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0,
+		    wm8903_get_deemph, wm8903_put_deemph),
 
 /* Headphones */
 SOC_DOUBLE_R("Headphone Switch",
@@ -923,10 +1000,11 @@
 
 static int wm8903_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8903_dapm_widgets,
-				  ARRAY_SIZE(wm8903_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8903_dapm_widgets,
+				  ARRAY_SIZE(wm8903_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -934,7 +1012,7 @@
 static int wm8903_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
-	u16 reg, reg2;
+	u16 reg;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
@@ -946,7 +1024,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			snd_soc_write(codec, WM8903_CLOCK_RATES_2,
 				     WM8903_CLK_SYS_ENA);
 
@@ -958,23 +1036,15 @@
 			wm8903_run_sequence(codec, 0);
 			wm8903_sync_reg_cache(codec, codec->reg_cache);
 
-			/* Enable low impedence charge pump output */
-			reg = snd_soc_read(codec,
-					  WM8903_CONTROL_INTERFACE_TEST_1);
-			snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
-				     reg | WM8903_TEST_KEY);
-			reg2 = snd_soc_read(codec, WM8903_CHARGE_PUMP_TEST_1);
-			snd_soc_write(codec, WM8903_CHARGE_PUMP_TEST_1,
-				     reg2 | WM8903_CP_SW_KELVIN_MODE_MASK);
-			snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
-				     reg);
-
 			/* By default no bypass paths are enabled so
 			 * enable Class W support.
 			 */
 			dev_dbg(codec->dev, "Enabling Class W\n");
-			snd_soc_write(codec, WM8903_CLASS_W_0, reg |
-				     WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V);
+			snd_soc_update_bits(codec, WM8903_CLASS_W_0,
+					    WM8903_CP_DYN_FREQ |
+					    WM8903_CP_DYN_V,
+					    WM8903_CP_DYN_FREQ |
+					    WM8903_CP_DYN_V);
 		}
 
 		reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
@@ -991,7 +1061,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1222,58 +1292,6 @@
 	{ 0,      0 },
 };
 
-static int wm8903_startup(struct snd_pcm_substream *substream,
-			  struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-	struct snd_pcm_runtime *master_runtime;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		wm8903->playback_active++;
-	else
-		wm8903->capture_active++;
-
-	/* The DAI has shared clocks so if we already have a playback or
-	 * capture going then constrain this substream to match it.
-	 */
-	if (wm8903->master_substream) {
-		master_runtime = wm8903->master_substream->runtime;
-
-		dev_dbg(codec->dev, "Constraining to %d bits\n",
-			master_runtime->sample_bits);
-
-		snd_pcm_hw_constraint_minmax(substream->runtime,
-					     SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
-					     master_runtime->sample_bits,
-					     master_runtime->sample_bits);
-
-		wm8903->slave_substream = substream;
-	} else
-		wm8903->master_substream = substream;
-
-	return 0;
-}
-
-static void wm8903_shutdown(struct snd_pcm_substream *substream,
-			    struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		wm8903->playback_active--;
-	else
-		wm8903->capture_active--;
-
-	if (wm8903->master_substream == substream)
-		wm8903->master_substream = wm8903->slave_substream;
-
-	wm8903->slave_substream = NULL;
-}
-
 static int wm8903_hw_params(struct snd_pcm_substream *substream,
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
@@ -1298,11 +1316,6 @@
 	u16 clock1 = snd_soc_read(codec, WM8903_CLOCK_RATES_1);
 	u16 dac_digital1 = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
 
-	if (substream == wm8903->slave_substream) {
-		dev_dbg(codec->dev, "Ignoring hw_params for slave substream\n");
-		return 0;
-	}
-
 	/* Enable sloping stopband filter for low sample rates */
 	if (fs <= 24000)
 		dac_digital1 |= WM8903_DAC_SB_FILT;
@@ -1320,19 +1333,6 @@
 		}
 	}
 
-	/* Constraints should stop us hitting this but let's make sure */
-	if (wm8903->capture_active)
-		switch (sample_rates[dsp_config].rate) {
-		case 88200:
-		case 96000:
-			dev_err(codec->dev, "%dHz unsupported by ADC\n",
-				fs);
-			return -EINVAL;
-
-		default:
-			break;
-		}
-
 	dev_dbg(codec->dev, "DSP fs = %dHz\n", sample_rates[dsp_config].rate);
 	clock1 &= ~WM8903_SAMPLE_RATE_MASK;
 	clock1 |= sample_rates[dsp_config].value;
@@ -1428,6 +1428,9 @@
 	aif2 |= bclk_divs[bclk_div].div;
 	aif3 |= bclk / fs;
 
+	wm8903->fs = params_rate(params);
+	wm8903_set_deemph(codec);
+
 	snd_soc_write(codec, WM8903_CLOCK_RATES_0, clock0);
 	snd_soc_write(codec, WM8903_CLOCK_RATES_1, clock1);
 	snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
@@ -1521,6 +1524,11 @@
 	mic_report = wm8903->mic_last_report;
 	int_pol = snd_soc_read(codec, WM8903_INTERRUPT_POLARITY_1);
 
+#ifndef CONFIG_SND_SOC_WM8903_MODULE
+	if (int_val & (WM8903_MICSHRT_EINT | WM8903_MICDET_EINT))
+		trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
 	if (int_val & WM8903_MICSHRT_EINT) {
 		dev_dbg(codec->dev, "Microphone short (pol=%x)\n", int_pol);
 
@@ -1571,8 +1579,6 @@
 			SNDRV_PCM_FMTBIT_S24_LE)
 
 static struct snd_soc_dai_ops wm8903_dai_ops = {
-	.startup	= wm8903_startup,
-	.shutdown	= wm8903_shutdown,
 	.hw_params	= wm8903_hw_params,
 	.digital_mute	= wm8903_digital_mute,
 	.set_fmt	= wm8903_set_dai_fmt,
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index 996435e..e8490f3 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -19,10 +19,6 @@
 			     struct snd_soc_jack *jack,
 			     int det, int shrt);
 
-#define WM8903_MCLK_DIV_2 1
-#define WM8903_CLK_SYS    2
-#define WM8903_BCLK       3
-#define WM8903_LRCLK      4
 
 /*
  * Register values.
@@ -98,8 +94,6 @@
 #define WM8903_INTERRUPT_STATUS_1_MASK          0x7A
 #define WM8903_INTERRUPT_POLARITY_1             0x7B
 #define WM8903_INTERRUPT_CONTROL                0x7E
-#define WM8903_CONTROL_INTERFACE_TEST_1         0x81
-#define WM8903_CHARGE_PUMP_TEST_1               0x95
 #define WM8903_CLOCK_RATE_TEST_4                0xA4
 #define WM8903_ANALOGUE_OUTPUT_BIAS_0           0xAC
 
@@ -1206,25 +1200,6 @@
 #define WM8903_IRQ_POL_WIDTH                         1  /* IRQ_POL */
 
 /*
- * R129 (0x81) - Control Interface Test 1
- */
-#define WM8903_USER_KEY                         0x0002  /* USER_KEY */
-#define WM8903_USER_KEY_MASK                    0x0002  /* USER_KEY */
-#define WM8903_USER_KEY_SHIFT                        1  /* USER_KEY */
-#define WM8903_USER_KEY_WIDTH                        1  /* USER_KEY */
-#define WM8903_TEST_KEY                         0x0001  /* TEST_KEY */
-#define WM8903_TEST_KEY_MASK                    0x0001  /* TEST_KEY */
-#define WM8903_TEST_KEY_SHIFT                        0  /* TEST_KEY */
-#define WM8903_TEST_KEY_WIDTH                        1  /* TEST_KEY */
-
-/*
- * R149 (0x95) - Charge Pump Test 1
- */
-#define WM8903_CP_SW_KELVIN_MODE_MASK           0x0006  /* CP_SW_KELVIN_MODE - [2:1] */
-#define WM8903_CP_SW_KELVIN_MODE_SHIFT               1  /* CP_SW_KELVIN_MODE - [2:1] */
-#define WM8903_CP_SW_KELVIN_MODE_WIDTH               2  /* CP_SW_KELVIN_MODE - [2:1] */
-
-/*
  * R164 (0xA4) - Clock Rate Test 4
  */
 #define WM8903_ADC_DIG_MIC                      0x0200  /* ADC_DIG_MIC */
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 1ec12ef..9de44a4 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8904.h>
@@ -1427,10 +1426,11 @@
 static int wm8904_add_widgets(struct snd_soc_codec *codec)
 {
 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, wm8904_core_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8904_core_dapm_widgets,
 				  ARRAY_SIZE(wm8904_core_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, core_intercon,
+	snd_soc_dapm_add_routes(dapm, core_intercon,
 				ARRAY_SIZE(core_intercon));
 
 	switch (wm8904->devtype) {
@@ -1442,20 +1442,20 @@
 		snd_soc_add_controls(codec, wm8904_snd_controls,
 				     ARRAY_SIZE(wm8904_snd_controls));
 
-		snd_soc_dapm_new_controls(codec, wm8904_adc_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_adc_dapm_widgets,
 					  ARRAY_SIZE(wm8904_adc_dapm_widgets));
-		snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dac_dapm_widgets));
-		snd_soc_dapm_new_controls(codec, wm8904_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dapm_widgets));
 
-		snd_soc_dapm_add_routes(codec, core_intercon,
+		snd_soc_dapm_add_routes(dapm, core_intercon,
 					ARRAY_SIZE(core_intercon));
-		snd_soc_dapm_add_routes(codec, adc_intercon,
+		snd_soc_dapm_add_routes(dapm, adc_intercon,
 					ARRAY_SIZE(adc_intercon));
-		snd_soc_dapm_add_routes(codec, dac_intercon,
+		snd_soc_dapm_add_routes(dapm, dac_intercon,
 					ARRAY_SIZE(dac_intercon));
-		snd_soc_dapm_add_routes(codec, wm8904_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8904_intercon,
 					ARRAY_SIZE(wm8904_intercon));
 		break;
 
@@ -1463,17 +1463,17 @@
 		snd_soc_add_controls(codec, wm8904_dac_snd_controls,
 				     ARRAY_SIZE(wm8904_dac_snd_controls));
 
-		snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dac_dapm_widgets));
 
-		snd_soc_dapm_add_routes(codec, dac_intercon,
+		snd_soc_dapm_add_routes(dapm, dac_intercon,
 					ARRAY_SIZE(dac_intercon));
-		snd_soc_dapm_add_routes(codec, wm8912_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8912_intercon,
 					ARRAY_SIZE(wm8912_intercon));
 		break;
 	}
 
-	snd_soc_dapm_new_widgets(codec);
+	snd_soc_dapm_new_widgets(dapm);
 	return 0;
 }
 
@@ -1589,7 +1589,7 @@
 		       - wm8904->fs);
 	for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
 		cur_val = abs((wm8904->sysclk_rate /
-			       clk_sys_rates[i].ratio) - wm8904->fs);;
+			       clk_sys_rates[i].ratio) - wm8904->fs);
 		if (cur_val < best_val) {
 			best = i;
 			best_val = cur_val;
@@ -2138,7 +2138,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies),
 						    wm8904->supplies);
 			if (ret != 0) {
@@ -2197,7 +2197,7 @@
 				       wm8904->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -2373,7 +2373,7 @@
 	int ret, i;
 
 	codec->cache_sync = 1;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	switch (wm8904->devtype) {
 	case WM8904:
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 23086e2..25580e3 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -35,7 +35,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -43,7 +42,6 @@
 
 struct wm8940_priv {
 	unsigned int sysclk;
-	u16 reg_cache[WM8940_CACHEREGNUM];
 	enum snd_soc_control_type control_type;
 	void *control_data;
 };
@@ -291,13 +289,14 @@
 
 static int wm8940_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	ret = snd_soc_dapm_new_controls(codec, wm8940_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, wm8940_dapm_widgets,
 					ARRAY_SIZE(wm8940_dapm_widgets));
 	if (ret)
 		goto error_ret;
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret)
 		goto error_ret;
 
@@ -735,7 +734,6 @@
 		return ret;
 
 	return ret;
-;
 }
 
 static int wm8940_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 2ac35b0..7167dfc 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8955.h>
@@ -576,13 +575,14 @@
 
 static int wm8955_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	snd_soc_add_controls(codec, wm8955_snd_controls,
 			     ARRAY_SIZE(wm8955_snd_controls));
 
-	snd_soc_dapm_new_controls(codec, wm8955_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8955_dapm_widgets,
 				  ARRAY_SIZE(wm8955_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, wm8955_intercon,
+	snd_soc_dapm_add_routes(dapm, wm8955_intercon,
 				ARRAY_SIZE(wm8955_intercon));
 
 	return 0;
@@ -786,7 +786,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies),
 						    wm8955->supplies);
 			if (ret != 0) {
@@ -850,7 +850,7 @@
 				       wm8955->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index ff6ff2f..4393394 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8960.h>
@@ -72,7 +71,6 @@
 };
 
 struct wm8960_priv {
-	u16 reg_cache[WM8960_CACHEREGNUM];
 	enum snd_soc_control_type control_type;
 	void *control_data;
 	int (*set_bias_level)(struct snd_soc_codec *,
@@ -389,27 +387,28 @@
 {
 	struct wm8960_data *pdata = codec->dev->platform_data;
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct snd_soc_dapm_widget *w;
 
-	snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets,
 				  ARRAY_SIZE(wm8960_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	/* In capless mode OUT3 is used to provide VMID for the
 	 * headphone outputs, otherwise it is used as a mono mixer.
 	 */
 	if (pdata && pdata->capless) {
-		snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_capless,
+		snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_capless,
 					  ARRAY_SIZE(wm8960_dapm_widgets_capless));
 
-		snd_soc_dapm_add_routes(codec, audio_paths_capless,
+		snd_soc_dapm_add_routes(dapm, audio_paths_capless,
 					ARRAY_SIZE(audio_paths_capless));
 	} else {
-		snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_out3,
+		snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_out3,
 					  ARRAY_SIZE(wm8960_dapm_widgets_out3));
 
-		snd_soc_dapm_add_routes(codec, audio_paths_out3,
+		snd_soc_dapm_add_routes(dapm, audio_paths_out3,
 					ARRAY_SIZE(audio_paths_out3));
 	}
 
@@ -418,7 +417,9 @@
 	 * list each time to find the desired power state do so now
 	 * and save the result.
 	 */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &codec->card->widgets, list) {
+		if (w->dapm != &codec->dapm)
+			continue;
 		if (strcmp(w->name, "LOUT1 PGA") == 0)
 			wm8960->lout1 = w;
 		if (strcmp(w->name, "ROUT1 PGA") == 0)
@@ -573,7 +574,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable anti-pop features */
 			snd_soc_write(codec, WM8960_APOP1,
 				      WM8960_POBCTRL | WM8960_SOFT_ST |
@@ -611,7 +612,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -627,7 +628,7 @@
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
-		switch (codec->bias_level) {
+		switch (codec->dapm.bias_level) {
 		case SND_SOC_BIAS_STANDBY:
 			/* Enable anti pop mode */
 			snd_soc_update_bits(codec, WM8960_APOP1,
@@ -682,7 +683,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		switch (codec->bias_level) {
+		switch (codec->dapm.bias_level) {
 		case SND_SOC_BIAS_PREPARE:
 			/* Disable HP discharge */
 			snd_soc_update_bits(codec, WM8960_APOP2,
@@ -706,7 +707,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 8340485..55252e7 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -290,7 +289,6 @@
 struct wm8961_priv {
 	enum snd_soc_control_type control_type;
 	int sysclk;
-	u16 reg_cache[WM8961_MAX_REGISTER];
 };
 
 static int wm8961_volatile_register(unsigned int reg)
@@ -882,7 +880,7 @@
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
 			/* Enable bias generation */
 			reg = snd_soc_read(codec, WM8961_ANTI_POP);
 			reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN;
@@ -897,7 +895,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_PREPARE) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE) {
 			/* VREF off */
 			reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
 			reg &= ~WM8961_VREF;
@@ -919,7 +917,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -959,6 +957,7 @@
 
 static int wm8961_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 	u16 reg;
 
@@ -1024,9 +1023,9 @@
 
 	snd_soc_add_controls(codec, wm8961_snd_controls,
 				ARRAY_SIZE(wm8961_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8961_dapm_widgets,
 				  ARRAY_SIZE(wm8961_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 7c421cc..b9cb1fc 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -29,10 +29,10 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8962.h>
+#include <trace/events/asoc.h>
 
 #include "wm8962.h"
 
@@ -1956,7 +1956,7 @@
 
 static int wm8962_reset(struct snd_soc_codec *codec)
 {
-	return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0);
+	return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243);
 }
 
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
@@ -2677,6 +2677,7 @@
 static int wm8962_add_widgets(struct snd_soc_codec *codec)
 {
 	struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	snd_soc_add_controls(codec, wm8962_snd_controls,
 			     ARRAY_SIZE(wm8962_snd_controls));
@@ -2688,26 +2689,26 @@
 				     ARRAY_SIZE(wm8962_spk_stereo_controls));
 
 
-	snd_soc_dapm_new_controls(codec, wm8962_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8962_dapm_widgets,
 				  ARRAY_SIZE(wm8962_dapm_widgets));
 	if (pdata && pdata->spk_mono)
-		snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_mono_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_mono_widgets,
 					  ARRAY_SIZE(wm8962_dapm_spk_mono_widgets));
 	else
-		snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_stereo_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_stereo_widgets,
 					  ARRAY_SIZE(wm8962_dapm_spk_stereo_widgets));
 
-	snd_soc_dapm_add_routes(codec, wm8962_intercon,
+	snd_soc_dapm_add_routes(dapm, wm8962_intercon,
 				ARRAY_SIZE(wm8962_intercon));
 	if (pdata && pdata->spk_mono)
-		snd_soc_dapm_add_routes(codec, wm8962_spk_mono_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8962_spk_mono_intercon,
 					ARRAY_SIZE(wm8962_spk_mono_intercon));
 	else
-		snd_soc_dapm_add_routes(codec, wm8962_spk_stereo_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8962_spk_stereo_intercon,
 					ARRAY_SIZE(wm8962_spk_stereo_intercon));
 
 
-	snd_soc_dapm_disable_pin(codec, "Beep");
+	snd_soc_dapm_disable_pin(dapm, "Beep");
 
 	return 0;
 }
@@ -2814,7 +2815,7 @@
 	struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
-	if (level == codec->bias_level)
+	if (level == codec->dapm.bias_level)
 		return 0;
 
 	switch (level) {
@@ -2828,7 +2829,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
 						    wm8962->supplies);
 			if (ret != 0) {
@@ -2878,7 +2879,7 @@
 				       wm8962->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -3348,6 +3349,12 @@
 	if (active & (WM8962_MICSCD_EINT | WM8962_MICD_EINT)) {
 		dev_dbg(codec->dev, "Microphone event detected\n");
 
+#ifndef CONFIG_SND_SOC_WM8962_MODULE
+		trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
+		pm_wakeup_event(codec->dev, 300);
+
 		schedule_delayed_work(&wm8962->mic_work,
 				      msecs_to_jiffies(250));
 	}
@@ -3433,6 +3440,7 @@
 	struct wm8962_priv *wm8962 =
 		container_of(work, struct wm8962_priv, beep_work);
 	struct snd_soc_codec *codec = wm8962->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 	int reg = 0;
 	int best = 0;
@@ -3449,16 +3457,16 @@
 
 		reg = WM8962_BEEP_ENA | (best << WM8962_BEEP_RATE_SHIFT);
 
-		snd_soc_dapm_enable_pin(codec, "Beep");
+		snd_soc_dapm_enable_pin(dapm, "Beep");
 	} else {
 		dev_dbg(codec->dev, "Disabling beep\n");
-		snd_soc_dapm_disable_pin(codec, "Beep");
+		snd_soc_dapm_disable_pin(dapm, "Beep");
 	}
 
 	snd_soc_update_bits(codec, WM8962_BEEP_GENERATOR_1,
 			    WM8962_BEEP_ENA | WM8962_BEEP_RATE_MASK, reg);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 /* For usability define a way of injecting beep events for the device -
@@ -3706,7 +3714,7 @@
 	INIT_DELAYED_WORK(&wm8962->mic_work, wm8962_mic_work);
 
 	codec->cache_sync = 1;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_I2C);
 	if (ret != 0) {
@@ -3865,7 +3873,6 @@
 err_get:
 	regulator_bulk_free(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
 err:
-	kfree(wm8962);
 	return ret;
 }
 
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 9f18db6..572bb80 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8971.h"
@@ -333,10 +332,11 @@
 
 static int wm8971_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8971_dapm_widgets,
-				  ARRAY_SIZE(wm8971_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8971_dapm_widgets,
+				  ARRAY_SIZE(wm8971_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -553,7 +553,7 @@
 		snd_soc_write(codec, WM8971_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -590,9 +590,11 @@
 
 static void wm8971_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-		container_of(work, struct snd_soc_codec, delayed_work.work);
-	wm8971_set_bias_level(codec, codec->bias_level);
+	struct snd_soc_dapm_context *dapm =
+		container_of(work, struct snd_soc_dapm_context,
+			     delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
+	wm8971_set_bias_level(codec, codec->dapm.bias_level);
 }
 
 static int wm8971_suspend(struct snd_soc_codec *codec, pm_message_t state)
@@ -620,11 +622,11 @@
 	wm8971_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
 	/* charge wm8971 caps */
-	if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
 		reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
 		snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
-		codec->bias_level = SND_SOC_BIAS_ON;
-		queue_delayed_work(wm8971_workq, &codec->delayed_work,
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
 			msecs_to_jiffies(1000));
 	}
 
@@ -643,7 +645,7 @@
 		return ret;
 	}
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8971_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8971_work);
 	wm8971_workq = create_workqueue("wm8971");
 	if (wm8971_workq == NULL)
 		return -ENOMEM;
@@ -653,8 +655,8 @@
 	/* charge output caps - set vmid to 5k for quick power up */
 	reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
 	snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
-	codec->bias_level = SND_SOC_BIAS_STANDBY;
-	queue_delayed_work(wm8971_workq, &codec->delayed_work,
+	codec->dapm.bias_level = SND_SOC_BIAS_STANDBY;
+	queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
 		msecs_to_jiffies(1000));
 
 	/* set the update bits */
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index b4363f6..ca646a8 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -52,7 +51,6 @@
 
 struct wm8974_priv {
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[WM8974_CACHEREGNUM];
 };
 
 #define wm8974_reset(c)	snd_soc_write(c, WM8974_RESET, 0)
@@ -274,10 +272,11 @@
 
 static int wm8974_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8974_dapm_widgets,
-				  ARRAY_SIZE(wm8974_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8974_dapm_widgets,
+				  ARRAY_SIZE(wm8974_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -530,7 +529,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8974_POWER1, power1 | 0x3);
 			mdelay(100);
@@ -547,7 +546,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 13b979a..4bbc344 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -60,7 +59,6 @@
 	unsigned int f_opclk;
 	int mclk_idx;
 	enum wm8978_sysclk_src sysclk;
-	u16 reg_cache[WM8978_CACHEREGNUM];
 };
 
 static const char *wm8978_companding[] = {"Off", "NC", "u-law", "A-law"};
@@ -355,11 +353,12 @@
 
 static int wm8978_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8978_dapm_widgets,
-				  ARRAY_SIZE(wm8978_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, wm8978_dapm_widgets,
+				  ARRAY_SIZE(wm8978_dapm_widgets));
 	/* set up the WM8978 audio map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -837,7 +836,7 @@
 		/* bit 3: enable bias, bit 2: enable I/O tie off buffer */
 		power1 |= 0xc;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1,
 				      power1 | 0x3);
@@ -857,7 +856,7 @@
 
 	dev_dbg(codec->dev, "%s: %d, %x\n", __func__, level, power1);
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index fd2e7cc..bae510a 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -533,10 +532,11 @@
 
 static int wm8985_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8985_dapm_widgets,
-				  ARRAY_SIZE(wm8985_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map,
+	snd_soc_dapm_new_controls(dapm, wm8985_dapm_widgets,
+				  ARRAY_SIZE(wm8985_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map,
 				ARRAY_SIZE(audio_map));
 	return 0;
 }
@@ -879,7 +879,7 @@
 				    1 << WM8985_VMIDSEL_SHIFT);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8985->supplies),
 						    wm8985->supplies);
 			if (ret) {
@@ -939,7 +939,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index d7f2597..d7170f1 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -25,7 +25,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8988.h"
@@ -54,7 +53,6 @@
 	unsigned int sysclk;
 	enum snd_soc_control_type control_type;
 	struct snd_pcm_hw_constraint_list *sysclk_constraints;
-	u16 reg_cache[WM8988_NUM_REG];
 };
 
 
@@ -677,7 +675,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* VREF, VMID=2x5k */
 			snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
 
@@ -693,7 +691,7 @@
 		snd_soc_write(codec, WM8988_PWR1, 0x0000);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -759,6 +757,7 @@
 static int wm8988_probe(struct snd_soc_codec *codec)
 {
 	struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 	u16 reg;
 
@@ -790,9 +789,9 @@
 
 	snd_soc_add_controls(codec, wm8988_snd_controls,
 				ARRAY_SIZE(wm8988_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8988_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8988_dapm_widgets,
 				  ARRAY_SIZE(wm8988_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 264828e..5c87a63 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -914,11 +913,12 @@
 
 static int wm8990_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8990_dapm_widgets,
-				  ARRAY_SIZE(wm8990_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, wm8990_dapm_widgets,
+				  ARRAY_SIZE(wm8990_dapm_widgets));
 	/* set up the WM8990 audio map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1170,7 +1170,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable all output discharge bits */
 			snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
 				WM8990_DIS_RLINE | WM8990_DIS_OUT3 |
@@ -1266,7 +1266,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 589e3fa..18c0d9c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -24,7 +24,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/wm8993.h>
 
@@ -226,7 +225,6 @@
 
 struct wm8993_priv {
 	struct wm_hubs_data hubs_data;
-	u16 reg_cache[WM8993_REGISTER_COUNT];
 	struct regulator_bulk_data supplies[WM8993_NUM_SUPPLIES];
 	struct wm8993_platform_data pdata;
 	enum snd_soc_control_type control_type;
@@ -735,6 +733,7 @@
 					    0);
 		}
 		wm8993->class_w_users++;
+		wm8993->hubs_data.class_w = true;
 	}
 
 	/* Implement the change */
@@ -751,6 +750,7 @@
 					    WM8993_CP_DYN_V);
 		}
 		wm8993->class_w_users--;
+		wm8993->hubs_data.class_w = false;
 	}
 
 	dev_dbg(codec->dev, "Indirect DAC use count now %d\n",
@@ -968,7 +968,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8993->supplies),
 						    wm8993->supplies);
 			if (ret != 0)
@@ -1029,6 +1029,12 @@
 				    WM8993_VMID_SEL_MASK | WM8993_BIAS_ENA,
 				    0);
 
+		snd_soc_update_bits(codec, WM8993_ANTIPOP2,
+				    WM8993_STARTUP_BIAS_ENA |
+				    WM8993_VMID_BUF_ENA |
+				    WM8993_VMID_RAMP_MASK |
+				    WM8993_BIAS_SRC, 0);
+
 #ifdef CONFIG_REGULATOR
                /* Post 2.6.34 we will be able to get a callback when
                 * the regulators are disabled which we can use but
@@ -1043,7 +1049,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1225,7 +1231,7 @@
 		       - wm8993->fs);
 	for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
 		cur_val = abs((wm8993->sysclk_rate /
-			       clk_sys_rates[i].ratio) - wm8993->fs);;
+			       clk_sys_rates[i].ratio) - wm8993->fs);
 		if (cur_val < best_val) {
 			best = i;
 			best_val = cur_val;
@@ -1422,6 +1428,7 @@
 static int wm8993_probe(struct snd_soc_codec *codec)
 {
 	struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, i, val;
 
 	wm8993->hubs_data.hp_startup_mode = 1;
@@ -1503,11 +1510,11 @@
 				     ARRAY_SIZE(wm8993_eq_controls));
 	}
 
-	snd_soc_dapm_new_controls(codec, wm8993_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8993_dapm_widgets,
 				  ARRAY_SIZE(wm8993_dapm_widgets));
 	wm_hubs_add_analogue_controls(codec);
 
-	snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+	snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes));
 	wm_hubs_add_analogue_routes(codec, wm8993->pdata.lineout1_diff,
 				    wm8993->pdata.lineout2_diff);
 
diff --git a/sound/soc/codecs/wm8994-tables.c b/sound/soc/codecs/wm8994-tables.c
new file mode 100644
index 0000000..68e9b02
--- /dev/null
+++ b/sound/soc/codecs/wm8994-tables.c
@@ -0,0 +1,3147 @@
+#include "wm8994.h"
+
+const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE] = {
+	{ 0xFFFF, 0xFFFF }, /* R0     - Software Reset */
+	{ 0x3B37, 0x3B37 }, /* R1     - Power Management (1) */
+	{ 0x6BF0, 0x6BF0 }, /* R2     - Power Management (2) */
+	{ 0x3FF0, 0x3FF0 }, /* R3     - Power Management (3) */
+	{ 0x3F3F, 0x3F3F }, /* R4     - Power Management (4) */
+	{ 0x3F0F, 0x3F0F }, /* R5     - Power Management (5) */
+	{ 0x003F, 0x003F }, /* R6     - Power Management (6) */
+	{ 0x0000, 0x0000 }, /* R7 */
+	{ 0x0000, 0x0000 }, /* R8 */
+	{ 0x0000, 0x0000 }, /* R9 */
+	{ 0x0000, 0x0000 }, /* R10 */
+	{ 0x0000, 0x0000 }, /* R11 */
+	{ 0x0000, 0x0000 }, /* R12 */
+	{ 0x0000, 0x0000 }, /* R13 */
+	{ 0x0000, 0x0000 }, /* R14 */
+	{ 0x0000, 0x0000 }, /* R15 */
+	{ 0x0000, 0x0000 }, /* R16 */
+	{ 0x0000, 0x0000 }, /* R17 */
+	{ 0x0000, 0x0000 }, /* R18 */
+	{ 0x0000, 0x0000 }, /* R19 */
+	{ 0x0000, 0x0000 }, /* R20 */
+	{ 0x01C0, 0x01C0 }, /* R21    - Input Mixer (1) */
+	{ 0x0000, 0x0000 }, /* R22 */
+	{ 0x0000, 0x0000 }, /* R23 */
+	{ 0x00DF, 0x01DF }, /* R24    - Left Line Input 1&2 Volume */
+	{ 0x00DF, 0x01DF }, /* R25    - Left Line Input 3&4 Volume */
+	{ 0x00DF, 0x01DF }, /* R26    - Right Line Input 1&2 Volume */
+	{ 0x00DF, 0x01DF }, /* R27    - Right Line Input 3&4 Volume */
+	{ 0x00FF, 0x01FF }, /* R28    - Left Output Volume */
+	{ 0x00FF, 0x01FF }, /* R29    - Right Output Volume */
+	{ 0x0077, 0x0077 }, /* R30    - Line Outputs Volume */
+	{ 0x0030, 0x0030 }, /* R31    - HPOUT2 Volume */
+	{ 0x00FF, 0x01FF }, /* R32    - Left OPGA Volume */
+	{ 0x00FF, 0x01FF }, /* R33    - Right OPGA Volume */
+	{ 0x007F, 0x007F }, /* R34    - SPKMIXL Attenuation */
+	{ 0x017F, 0x017F }, /* R35    - SPKMIXR Attenuation */
+	{ 0x003F, 0x003F }, /* R36    - SPKOUT Mixers */
+	{ 0x003F, 0x003F }, /* R37    - ClassD */
+	{ 0x00FF, 0x01FF }, /* R38    - Speaker Volume Left */
+	{ 0x00FF, 0x01FF }, /* R39    - Speaker Volume Right */
+	{ 0x00FF, 0x00FF }, /* R40    - Input Mixer (2) */
+	{ 0x01B7, 0x01B7 }, /* R41    - Input Mixer (3) */
+	{ 0x01B7, 0x01B7 }, /* R42    - Input Mixer (4) */
+	{ 0x01C7, 0x01C7 }, /* R43    - Input Mixer (5) */
+	{ 0x01C7, 0x01C7 }, /* R44    - Input Mixer (6) */
+	{ 0x01FF, 0x01FF }, /* R45    - Output Mixer (1) */
+	{ 0x01FF, 0x01FF }, /* R46    - Output Mixer (2) */
+	{ 0x0FFF, 0x0FFF }, /* R47    - Output Mixer (3) */
+	{ 0x0FFF, 0x0FFF }, /* R48    - Output Mixer (4) */
+	{ 0x0FFF, 0x0FFF }, /* R49    - Output Mixer (5) */
+	{ 0x0FFF, 0x0FFF }, /* R50    - Output Mixer (6) */
+	{ 0x0038, 0x0038 }, /* R51    - HPOUT2 Mixer */
+	{ 0x0077, 0x0077 }, /* R52    - Line Mixer (1) */
+	{ 0x0077, 0x0077 }, /* R53    - Line Mixer (2) */
+	{ 0x03FF, 0x03FF }, /* R54    - Speaker Mixer */
+	{ 0x00C1, 0x00C1 }, /* R55    - Additional Control */
+	{ 0x00F0, 0x00F0 }, /* R56    - AntiPOP (1) */
+	{ 0x01EF, 0x01EF }, /* R57    - AntiPOP (2) */
+	{ 0x00FF, 0x00FF }, /* R58    - MICBIAS */
+	{ 0x000F, 0x000F }, /* R59    - LDO 1 */
+	{ 0x0007, 0x0007 }, /* R60    - LDO 2 */
+	{ 0x0000, 0x0000 }, /* R61 */
+	{ 0x0000, 0x0000 }, /* R62 */
+	{ 0x0000, 0x0000 }, /* R63 */
+	{ 0x0000, 0x0000 }, /* R64 */
+	{ 0x0000, 0x0000 }, /* R65 */
+	{ 0x0000, 0x0000 }, /* R66 */
+	{ 0x0000, 0x0000 }, /* R67 */
+	{ 0x0000, 0x0000 }, /* R68 */
+	{ 0x0000, 0x0000 }, /* R69 */
+	{ 0x0000, 0x0000 }, /* R70 */
+	{ 0x0000, 0x0000 }, /* R71 */
+	{ 0x0000, 0x0000 }, /* R72 */
+	{ 0x0000, 0x0000 }, /* R73 */
+	{ 0x0000, 0x0000 }, /* R74 */
+	{ 0x0000, 0x0000 }, /* R75 */
+	{ 0x8000, 0x8000 }, /* R76    - Charge Pump (1) */
+	{ 0x0000, 0x0000 }, /* R77 */
+	{ 0x0000, 0x0000 }, /* R78 */
+	{ 0x0000, 0x0000 }, /* R79 */
+	{ 0x0000, 0x0000 }, /* R80 */
+	{ 0x0301, 0x0301 }, /* R81    - Class W (1) */
+	{ 0x0000, 0x0000 }, /* R82 */
+	{ 0x0000, 0x0000 }, /* R83 */
+	{ 0x333F, 0x333F }, /* R84    - DC Servo (1) */
+	{ 0x0FEF, 0x0FEF }, /* R85    - DC Servo (2) */
+	{ 0x0000, 0x0000 }, /* R86 */
+	{ 0xFFFF, 0xFFFF }, /* R87    - DC Servo (4) */
+	{ 0x0333, 0x0000 }, /* R88    - DC Servo Readback */
+	{ 0x0000, 0x0000 }, /* R89 */
+	{ 0x0000, 0x0000 }, /* R90 */
+	{ 0x0000, 0x0000 }, /* R91 */
+	{ 0x0000, 0x0000 }, /* R92 */
+	{ 0x0000, 0x0000 }, /* R93 */
+	{ 0x0000, 0x0000 }, /* R94 */
+	{ 0x0000, 0x0000 }, /* R95 */
+	{ 0x00EE, 0x00EE }, /* R96    - Analogue HP (1) */
+	{ 0x0000, 0x0000 }, /* R97 */
+	{ 0x0000, 0x0000 }, /* R98 */
+	{ 0x0000, 0x0000 }, /* R99 */
+	{ 0x0000, 0x0000 }, /* R100 */
+	{ 0x0000, 0x0000 }, /* R101 */
+	{ 0x0000, 0x0000 }, /* R102 */
+	{ 0x0000, 0x0000 }, /* R103 */
+	{ 0x0000, 0x0000 }, /* R104 */
+	{ 0x0000, 0x0000 }, /* R105 */
+	{ 0x0000, 0x0000 }, /* R106 */
+	{ 0x0000, 0x0000 }, /* R107 */
+	{ 0x0000, 0x0000 }, /* R108 */
+	{ 0x0000, 0x0000 }, /* R109 */
+	{ 0x0000, 0x0000 }, /* R110 */
+	{ 0x0000, 0x0000 }, /* R111 */
+	{ 0x0000, 0x0000 }, /* R112 */
+	{ 0x0000, 0x0000 }, /* R113 */
+	{ 0x0000, 0x0000 }, /* R114 */
+	{ 0x0000, 0x0000 }, /* R115 */
+	{ 0x0000, 0x0000 }, /* R116 */
+	{ 0x0000, 0x0000 }, /* R117 */
+	{ 0x0000, 0x0000 }, /* R118 */
+	{ 0x0000, 0x0000 }, /* R119 */
+	{ 0x0000, 0x0000 }, /* R120 */
+	{ 0x0000, 0x0000 }, /* R121 */
+	{ 0x0000, 0x0000 }, /* R122 */
+	{ 0x0000, 0x0000 }, /* R123 */
+	{ 0x0000, 0x0000 }, /* R124 */
+	{ 0x0000, 0x0000 }, /* R125 */
+	{ 0x0000, 0x0000 }, /* R126 */
+	{ 0x0000, 0x0000 }, /* R127 */
+	{ 0x0000, 0x0000 }, /* R128 */
+	{ 0x0000, 0x0000 }, /* R129 */
+	{ 0x0000, 0x0000 }, /* R130 */
+	{ 0x0000, 0x0000 }, /* R131 */
+	{ 0x0000, 0x0000 }, /* R132 */
+	{ 0x0000, 0x0000 }, /* R133 */
+	{ 0x0000, 0x0000 }, /* R134 */
+	{ 0x0000, 0x0000 }, /* R135 */
+	{ 0x0000, 0x0000 }, /* R136 */
+	{ 0x0000, 0x0000 }, /* R137 */
+	{ 0x0000, 0x0000 }, /* R138 */
+	{ 0x0000, 0x0000 }, /* R139 */
+	{ 0x0000, 0x0000 }, /* R140 */
+	{ 0x0000, 0x0000 }, /* R141 */
+	{ 0x0000, 0x0000 }, /* R142 */
+	{ 0x0000, 0x0000 }, /* R143 */
+	{ 0x0000, 0x0000 }, /* R144 */
+	{ 0x0000, 0x0000 }, /* R145 */
+	{ 0x0000, 0x0000 }, /* R146 */
+	{ 0x0000, 0x0000 }, /* R147 */
+	{ 0x0000, 0x0000 }, /* R148 */
+	{ 0x0000, 0x0000 }, /* R149 */
+	{ 0x0000, 0x0000 }, /* R150 */
+	{ 0x0000, 0x0000 }, /* R151 */
+	{ 0x0000, 0x0000 }, /* R152 */
+	{ 0x0000, 0x0000 }, /* R153 */
+	{ 0x0000, 0x0000 }, /* R154 */
+	{ 0x0000, 0x0000 }, /* R155 */
+	{ 0x0000, 0x0000 }, /* R156 */
+	{ 0x0000, 0x0000 }, /* R157 */
+	{ 0x0000, 0x0000 }, /* R158 */
+	{ 0x0000, 0x0000 }, /* R159 */
+	{ 0x0000, 0x0000 }, /* R160 */
+	{ 0x0000, 0x0000 }, /* R161 */
+	{ 0x0000, 0x0000 }, /* R162 */
+	{ 0x0000, 0x0000 }, /* R163 */
+	{ 0x0000, 0x0000 }, /* R164 */
+	{ 0x0000, 0x0000 }, /* R165 */
+	{ 0x0000, 0x0000 }, /* R166 */
+	{ 0x0000, 0x0000 }, /* R167 */
+	{ 0x0000, 0x0000 }, /* R168 */
+	{ 0x0000, 0x0000 }, /* R169 */
+	{ 0x0000, 0x0000 }, /* R170 */
+	{ 0x0000, 0x0000 }, /* R171 */
+	{ 0x0000, 0x0000 }, /* R172 */
+	{ 0x0000, 0x0000 }, /* R173 */
+	{ 0x0000, 0x0000 }, /* R174 */
+	{ 0x0000, 0x0000 }, /* R175 */
+	{ 0x0000, 0x0000 }, /* R176 */
+	{ 0x0000, 0x0000 }, /* R177 */
+	{ 0x0000, 0x0000 }, /* R178 */
+	{ 0x0000, 0x0000 }, /* R179 */
+	{ 0x0000, 0x0000 }, /* R180 */
+	{ 0x0000, 0x0000 }, /* R181 */
+	{ 0x0000, 0x0000 }, /* R182 */
+	{ 0x0000, 0x0000 }, /* R183 */
+	{ 0x0000, 0x0000 }, /* R184 */
+	{ 0x0000, 0x0000 }, /* R185 */
+	{ 0x0000, 0x0000 }, /* R186 */
+	{ 0x0000, 0x0000 }, /* R187 */
+	{ 0x0000, 0x0000 }, /* R188 */
+	{ 0x0000, 0x0000 }, /* R189 */
+	{ 0x0000, 0x0000 }, /* R190 */
+	{ 0x0000, 0x0000 }, /* R191 */
+	{ 0x0000, 0x0000 }, /* R192 */
+	{ 0x0000, 0x0000 }, /* R193 */
+	{ 0x0000, 0x0000 }, /* R194 */
+	{ 0x0000, 0x0000 }, /* R195 */
+	{ 0x0000, 0x0000 }, /* R196 */
+	{ 0x0000, 0x0000 }, /* R197 */
+	{ 0x0000, 0x0000 }, /* R198 */
+	{ 0x0000, 0x0000 }, /* R199 */
+	{ 0x0000, 0x0000 }, /* R200 */
+	{ 0x0000, 0x0000 }, /* R201 */
+	{ 0x0000, 0x0000 }, /* R202 */
+	{ 0x0000, 0x0000 }, /* R203 */
+	{ 0x0000, 0x0000 }, /* R204 */
+	{ 0x0000, 0x0000 }, /* R205 */
+	{ 0x0000, 0x0000 }, /* R206 */
+	{ 0x0000, 0x0000 }, /* R207 */
+	{ 0x0000, 0x0000 }, /* R208 */
+	{ 0x0000, 0x0000 }, /* R209 */
+	{ 0x0000, 0x0000 }, /* R210 */
+	{ 0x0000, 0x0000 }, /* R211 */
+	{ 0x0000, 0x0000 }, /* R212 */
+	{ 0x0000, 0x0000 }, /* R213 */
+	{ 0x0000, 0x0000 }, /* R214 */
+	{ 0x0000, 0x0000 }, /* R215 */
+	{ 0x0000, 0x0000 }, /* R216 */
+	{ 0x0000, 0x0000 }, /* R217 */
+	{ 0x0000, 0x0000 }, /* R218 */
+	{ 0x0000, 0x0000 }, /* R219 */
+	{ 0x0000, 0x0000 }, /* R220 */
+	{ 0x0000, 0x0000 }, /* R221 */
+	{ 0x0000, 0x0000 }, /* R222 */
+	{ 0x0000, 0x0000 }, /* R223 */
+	{ 0x0000, 0x0000 }, /* R224 */
+	{ 0x0000, 0x0000 }, /* R225 */
+	{ 0x0000, 0x0000 }, /* R226 */
+	{ 0x0000, 0x0000 }, /* R227 */
+	{ 0x0000, 0x0000 }, /* R228 */
+	{ 0x0000, 0x0000 }, /* R229 */
+	{ 0x0000, 0x0000 }, /* R230 */
+	{ 0x0000, 0x0000 }, /* R231 */
+	{ 0x0000, 0x0000 }, /* R232 */
+	{ 0x0000, 0x0000 }, /* R233 */
+	{ 0x0000, 0x0000 }, /* R234 */
+	{ 0x0000, 0x0000 }, /* R235 */
+	{ 0x0000, 0x0000 }, /* R236 */
+	{ 0x0000, 0x0000 }, /* R237 */
+	{ 0x0000, 0x0000 }, /* R238 */
+	{ 0x0000, 0x0000 }, /* R239 */
+	{ 0x0000, 0x0000 }, /* R240 */
+	{ 0x0000, 0x0000 }, /* R241 */
+	{ 0x0000, 0x0000 }, /* R242 */
+	{ 0x0000, 0x0000 }, /* R243 */
+	{ 0x0000, 0x0000 }, /* R244 */
+	{ 0x0000, 0x0000 }, /* R245 */
+	{ 0x0000, 0x0000 }, /* R246 */
+	{ 0x0000, 0x0000 }, /* R247 */
+	{ 0x0000, 0x0000 }, /* R248 */
+	{ 0x0000, 0x0000 }, /* R249 */
+	{ 0x0000, 0x0000 }, /* R250 */
+	{ 0x0000, 0x0000 }, /* R251 */
+	{ 0x0000, 0x0000 }, /* R252 */
+	{ 0x0000, 0x0000 }, /* R253 */
+	{ 0x0000, 0x0000 }, /* R254 */
+	{ 0x0000, 0x0000 }, /* R255 */
+	{ 0x000F, 0x0000 }, /* R256   - Chip Revision */
+	{ 0x0074, 0x0074 }, /* R257   - Control Interface */
+	{ 0x0000, 0x0000 }, /* R258 */
+	{ 0x0000, 0x0000 }, /* R259 */
+	{ 0x0000, 0x0000 }, /* R260 */
+	{ 0x0000, 0x0000 }, /* R261 */
+	{ 0x0000, 0x0000 }, /* R262 */
+	{ 0x0000, 0x0000 }, /* R263 */
+	{ 0x0000, 0x0000 }, /* R264 */
+	{ 0x0000, 0x0000 }, /* R265 */
+	{ 0x0000, 0x0000 }, /* R266 */
+	{ 0x0000, 0x0000 }, /* R267 */
+	{ 0x0000, 0x0000 }, /* R268 */
+	{ 0x0000, 0x0000 }, /* R269 */
+	{ 0x0000, 0x0000 }, /* R270 */
+	{ 0x0000, 0x0000 }, /* R271 */
+	{ 0x807F, 0x837F }, /* R272   - Write Sequencer Ctrl (1) */
+	{ 0x017F, 0x0000 }, /* R273   - Write Sequencer Ctrl (2) */
+	{ 0x0000, 0x0000 }, /* R274 */
+	{ 0x0000, 0x0000 }, /* R275 */
+	{ 0x0000, 0x0000 }, /* R276 */
+	{ 0x0000, 0x0000 }, /* R277 */
+	{ 0x0000, 0x0000 }, /* R278 */
+	{ 0x0000, 0x0000 }, /* R279 */
+	{ 0x0000, 0x0000 }, /* R280 */
+	{ 0x0000, 0x0000 }, /* R281 */
+	{ 0x0000, 0x0000 }, /* R282 */
+	{ 0x0000, 0x0000 }, /* R283 */
+	{ 0x0000, 0x0000 }, /* R284 */
+	{ 0x0000, 0x0000 }, /* R285 */
+	{ 0x0000, 0x0000 }, /* R286 */
+	{ 0x0000, 0x0000 }, /* R287 */
+	{ 0x0000, 0x0000 }, /* R288 */
+	{ 0x0000, 0x0000 }, /* R289 */
+	{ 0x0000, 0x0000 }, /* R290 */
+	{ 0x0000, 0x0000 }, /* R291 */
+	{ 0x0000, 0x0000 }, /* R292 */
+	{ 0x0000, 0x0000 }, /* R293 */
+	{ 0x0000, 0x0000 }, /* R294 */
+	{ 0x0000, 0x0000 }, /* R295 */
+	{ 0x0000, 0x0000 }, /* R296 */
+	{ 0x0000, 0x0000 }, /* R297 */
+	{ 0x0000, 0x0000 }, /* R298 */
+	{ 0x0000, 0x0000 }, /* R299 */
+	{ 0x0000, 0x0000 }, /* R300 */
+	{ 0x0000, 0x0000 }, /* R301 */
+	{ 0x0000, 0x0000 }, /* R302 */
+	{ 0x0000, 0x0000 }, /* R303 */
+	{ 0x0000, 0x0000 }, /* R304 */
+	{ 0x0000, 0x0000 }, /* R305 */
+	{ 0x0000, 0x0000 }, /* R306 */
+	{ 0x0000, 0x0000 }, /* R307 */
+	{ 0x0000, 0x0000 }, /* R308 */
+	{ 0x0000, 0x0000 }, /* R309 */
+	{ 0x0000, 0x0000 }, /* R310 */
+	{ 0x0000, 0x0000 }, /* R311 */
+	{ 0x0000, 0x0000 }, /* R312 */
+	{ 0x0000, 0x0000 }, /* R313 */
+	{ 0x0000, 0x0000 }, /* R314 */
+	{ 0x0000, 0x0000 }, /* R315 */
+	{ 0x0000, 0x0000 }, /* R316 */
+	{ 0x0000, 0x0000 }, /* R317 */
+	{ 0x0000, 0x0000 }, /* R318 */
+	{ 0x0000, 0x0000 }, /* R319 */
+	{ 0x0000, 0x0000 }, /* R320 */
+	{ 0x0000, 0x0000 }, /* R321 */
+	{ 0x0000, 0x0000 }, /* R322 */
+	{ 0x0000, 0x0000 }, /* R323 */
+	{ 0x0000, 0x0000 }, /* R324 */
+	{ 0x0000, 0x0000 }, /* R325 */
+	{ 0x0000, 0x0000 }, /* R326 */
+	{ 0x0000, 0x0000 }, /* R327 */
+	{ 0x0000, 0x0000 }, /* R328 */
+	{ 0x0000, 0x0000 }, /* R329 */
+	{ 0x0000, 0x0000 }, /* R330 */
+	{ 0x0000, 0x0000 }, /* R331 */
+	{ 0x0000, 0x0000 }, /* R332 */
+	{ 0x0000, 0x0000 }, /* R333 */
+	{ 0x0000, 0x0000 }, /* R334 */
+	{ 0x0000, 0x0000 }, /* R335 */
+	{ 0x0000, 0x0000 }, /* R336 */
+	{ 0x0000, 0x0000 }, /* R337 */
+	{ 0x0000, 0x0000 }, /* R338 */
+	{ 0x0000, 0x0000 }, /* R339 */
+	{ 0x0000, 0x0000 }, /* R340 */
+	{ 0x0000, 0x0000 }, /* R341 */
+	{ 0x0000, 0x0000 }, /* R342 */
+	{ 0x0000, 0x0000 }, /* R343 */
+	{ 0x0000, 0x0000 }, /* R344 */
+	{ 0x0000, 0x0000 }, /* R345 */
+	{ 0x0000, 0x0000 }, /* R346 */
+	{ 0x0000, 0x0000 }, /* R347 */
+	{ 0x0000, 0x0000 }, /* R348 */
+	{ 0x0000, 0x0000 }, /* R349 */
+	{ 0x0000, 0x0000 }, /* R350 */
+	{ 0x0000, 0x0000 }, /* R351 */
+	{ 0x0000, 0x0000 }, /* R352 */
+	{ 0x0000, 0x0000 }, /* R353 */
+	{ 0x0000, 0x0000 }, /* R354 */
+	{ 0x0000, 0x0000 }, /* R355 */
+	{ 0x0000, 0x0000 }, /* R356 */
+	{ 0x0000, 0x0000 }, /* R357 */
+	{ 0x0000, 0x0000 }, /* R358 */
+	{ 0x0000, 0x0000 }, /* R359 */
+	{ 0x0000, 0x0000 }, /* R360 */
+	{ 0x0000, 0x0000 }, /* R361 */
+	{ 0x0000, 0x0000 }, /* R362 */
+	{ 0x0000, 0x0000 }, /* R363 */
+	{ 0x0000, 0x0000 }, /* R364 */
+	{ 0x0000, 0x0000 }, /* R365 */
+	{ 0x0000, 0x0000 }, /* R366 */
+	{ 0x0000, 0x0000 }, /* R367 */
+	{ 0x0000, 0x0000 }, /* R368 */
+	{ 0x0000, 0x0000 }, /* R369 */
+	{ 0x0000, 0x0000 }, /* R370 */
+	{ 0x0000, 0x0000 }, /* R371 */
+	{ 0x0000, 0x0000 }, /* R372 */
+	{ 0x0000, 0x0000 }, /* R373 */
+	{ 0x0000, 0x0000 }, /* R374 */
+	{ 0x0000, 0x0000 }, /* R375 */
+	{ 0x0000, 0x0000 }, /* R376 */
+	{ 0x0000, 0x0000 }, /* R377 */
+	{ 0x0000, 0x0000 }, /* R378 */
+	{ 0x0000, 0x0000 }, /* R379 */
+	{ 0x0000, 0x0000 }, /* R380 */
+	{ 0x0000, 0x0000 }, /* R381 */
+	{ 0x0000, 0x0000 }, /* R382 */
+	{ 0x0000, 0x0000 }, /* R383 */
+	{ 0x0000, 0x0000 }, /* R384 */
+	{ 0x0000, 0x0000 }, /* R385 */
+	{ 0x0000, 0x0000 }, /* R386 */
+	{ 0x0000, 0x0000 }, /* R387 */
+	{ 0x0000, 0x0000 }, /* R388 */
+	{ 0x0000, 0x0000 }, /* R389 */
+	{ 0x0000, 0x0000 }, /* R390 */
+	{ 0x0000, 0x0000 }, /* R391 */
+	{ 0x0000, 0x0000 }, /* R392 */
+	{ 0x0000, 0x0000 }, /* R393 */
+	{ 0x0000, 0x0000 }, /* R394 */
+	{ 0x0000, 0x0000 }, /* R395 */
+	{ 0x0000, 0x0000 }, /* R396 */
+	{ 0x0000, 0x0000 }, /* R397 */
+	{ 0x0000, 0x0000 }, /* R398 */
+	{ 0x0000, 0x0000 }, /* R399 */
+	{ 0x0000, 0x0000 }, /* R400 */
+	{ 0x0000, 0x0000 }, /* R401 */
+	{ 0x0000, 0x0000 }, /* R402 */
+	{ 0x0000, 0x0000 }, /* R403 */
+	{ 0x0000, 0x0000 }, /* R404 */
+	{ 0x0000, 0x0000 }, /* R405 */
+	{ 0x0000, 0x0000 }, /* R406 */
+	{ 0x0000, 0x0000 }, /* R407 */
+	{ 0x0000, 0x0000 }, /* R408 */
+	{ 0x0000, 0x0000 }, /* R409 */
+	{ 0x0000, 0x0000 }, /* R410 */
+	{ 0x0000, 0x0000 }, /* R411 */
+	{ 0x0000, 0x0000 }, /* R412 */
+	{ 0x0000, 0x0000 }, /* R413 */
+	{ 0x0000, 0x0000 }, /* R414 */
+	{ 0x0000, 0x0000 }, /* R415 */
+	{ 0x0000, 0x0000 }, /* R416 */
+	{ 0x0000, 0x0000 }, /* R417 */
+	{ 0x0000, 0x0000 }, /* R418 */
+	{ 0x0000, 0x0000 }, /* R419 */
+	{ 0x0000, 0x0000 }, /* R420 */
+	{ 0x0000, 0x0000 }, /* R421 */
+	{ 0x0000, 0x0000 }, /* R422 */
+	{ 0x0000, 0x0000 }, /* R423 */
+	{ 0x0000, 0x0000 }, /* R424 */
+	{ 0x0000, 0x0000 }, /* R425 */
+	{ 0x0000, 0x0000 }, /* R426 */
+	{ 0x0000, 0x0000 }, /* R427 */
+	{ 0x0000, 0x0000 }, /* R428 */
+	{ 0x0000, 0x0000 }, /* R429 */
+	{ 0x0000, 0x0000 }, /* R430 */
+	{ 0x0000, 0x0000 }, /* R431 */
+	{ 0x0000, 0x0000 }, /* R432 */
+	{ 0x0000, 0x0000 }, /* R433 */
+	{ 0x0000, 0x0000 }, /* R434 */
+	{ 0x0000, 0x0000 }, /* R435 */
+	{ 0x0000, 0x0000 }, /* R436 */
+	{ 0x0000, 0x0000 }, /* R437 */
+	{ 0x0000, 0x0000 }, /* R438 */
+	{ 0x0000, 0x0000 }, /* R439 */
+	{ 0x0000, 0x0000 }, /* R440 */
+	{ 0x0000, 0x0000 }, /* R441 */
+	{ 0x0000, 0x0000 }, /* R442 */
+	{ 0x0000, 0x0000 }, /* R443 */
+	{ 0x0000, 0x0000 }, /* R444 */
+	{ 0x0000, 0x0000 }, /* R445 */
+	{ 0x0000, 0x0000 }, /* R446 */
+	{ 0x0000, 0x0000 }, /* R447 */
+	{ 0x0000, 0x0000 }, /* R448 */
+	{ 0x0000, 0x0000 }, /* R449 */
+	{ 0x0000, 0x0000 }, /* R450 */
+	{ 0x0000, 0x0000 }, /* R451 */
+	{ 0x0000, 0x0000 }, /* R452 */
+	{ 0x0000, 0x0000 }, /* R453 */
+	{ 0x0000, 0x0000 }, /* R454 */
+	{ 0x0000, 0x0000 }, /* R455 */
+	{ 0x0000, 0x0000 }, /* R456 */
+	{ 0x0000, 0x0000 }, /* R457 */
+	{ 0x0000, 0x0000 }, /* R458 */
+	{ 0x0000, 0x0000 }, /* R459 */
+	{ 0x0000, 0x0000 }, /* R460 */
+	{ 0x0000, 0x0000 }, /* R461 */
+	{ 0x0000, 0x0000 }, /* R462 */
+	{ 0x0000, 0x0000 }, /* R463 */
+	{ 0x0000, 0x0000 }, /* R464 */
+	{ 0x0000, 0x0000 }, /* R465 */
+	{ 0x0000, 0x0000 }, /* R466 */
+	{ 0x0000, 0x0000 }, /* R467 */
+	{ 0x0000, 0x0000 }, /* R468 */
+	{ 0x0000, 0x0000 }, /* R469 */
+	{ 0x0000, 0x0000 }, /* R470 */
+	{ 0x0000, 0x0000 }, /* R471 */
+	{ 0x0000, 0x0000 }, /* R472 */
+	{ 0x0000, 0x0000 }, /* R473 */
+	{ 0x0000, 0x0000 }, /* R474 */
+	{ 0x0000, 0x0000 }, /* R475 */
+	{ 0x0000, 0x0000 }, /* R476 */
+	{ 0x0000, 0x0000 }, /* R477 */
+	{ 0x0000, 0x0000 }, /* R478 */
+	{ 0x0000, 0x0000 }, /* R479 */
+	{ 0x0000, 0x0000 }, /* R480 */
+	{ 0x0000, 0x0000 }, /* R481 */
+	{ 0x0000, 0x0000 }, /* R482 */
+	{ 0x0000, 0x0000 }, /* R483 */
+	{ 0x0000, 0x0000 }, /* R484 */
+	{ 0x0000, 0x0000 }, /* R485 */
+	{ 0x0000, 0x0000 }, /* R486 */
+	{ 0x0000, 0x0000 }, /* R487 */
+	{ 0x0000, 0x0000 }, /* R488 */
+	{ 0x0000, 0x0000 }, /* R489 */
+	{ 0x0000, 0x0000 }, /* R490 */
+	{ 0x0000, 0x0000 }, /* R491 */
+	{ 0x0000, 0x0000 }, /* R492 */
+	{ 0x0000, 0x0000 }, /* R493 */
+	{ 0x0000, 0x0000 }, /* R494 */
+	{ 0x0000, 0x0000 }, /* R495 */
+	{ 0x0000, 0x0000 }, /* R496 */
+	{ 0x0000, 0x0000 }, /* R497 */
+	{ 0x0000, 0x0000 }, /* R498 */
+	{ 0x0000, 0x0000 }, /* R499 */
+	{ 0x0000, 0x0000 }, /* R500 */
+	{ 0x0000, 0x0000 }, /* R501 */
+	{ 0x0000, 0x0000 }, /* R502 */
+	{ 0x0000, 0x0000 }, /* R503 */
+	{ 0x0000, 0x0000 }, /* R504 */
+	{ 0x0000, 0x0000 }, /* R505 */
+	{ 0x0000, 0x0000 }, /* R506 */
+	{ 0x0000, 0x0000 }, /* R507 */
+	{ 0x0000, 0x0000 }, /* R508 */
+	{ 0x0000, 0x0000 }, /* R509 */
+	{ 0x0000, 0x0000 }, /* R510 */
+	{ 0x0000, 0x0000 }, /* R511 */
+	{ 0x001F, 0x001F }, /* R512   - AIF1 Clocking (1) */
+	{ 0x003F, 0x003F }, /* R513   - AIF1 Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R514 */
+	{ 0x0000, 0x0000 }, /* R515 */
+	{ 0x001F, 0x001F }, /* R516   - AIF2 Clocking (1) */
+	{ 0x003F, 0x003F }, /* R517   - AIF2 Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R518 */
+	{ 0x0000, 0x0000 }, /* R519 */
+	{ 0x001F, 0x001F }, /* R520   - Clocking (1) */
+	{ 0x0777, 0x0777 }, /* R521   - Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R522 */
+	{ 0x0000, 0x0000 }, /* R523 */
+	{ 0x0000, 0x0000 }, /* R524 */
+	{ 0x0000, 0x0000 }, /* R525 */
+	{ 0x0000, 0x0000 }, /* R526 */
+	{ 0x0000, 0x0000 }, /* R527 */
+	{ 0x00FF, 0x00FF }, /* R528   - AIF1 Rate */
+	{ 0x00FF, 0x00FF }, /* R529   - AIF2 Rate */
+	{ 0x000F, 0x0000 }, /* R530   - Rate Status */
+	{ 0x0000, 0x0000 }, /* R531 */
+	{ 0x0000, 0x0000 }, /* R532 */
+	{ 0x0000, 0x0000 }, /* R533 */
+	{ 0x0000, 0x0000 }, /* R534 */
+	{ 0x0000, 0x0000 }, /* R535 */
+	{ 0x0000, 0x0000 }, /* R536 */
+	{ 0x0000, 0x0000 }, /* R537 */
+	{ 0x0000, 0x0000 }, /* R538 */
+	{ 0x0000, 0x0000 }, /* R539 */
+	{ 0x0000, 0x0000 }, /* R540 */
+	{ 0x0000, 0x0000 }, /* R541 */
+	{ 0x0000, 0x0000 }, /* R542 */
+	{ 0x0000, 0x0000 }, /* R543 */
+	{ 0x0007, 0x0007 }, /* R544   - FLL1 Control (1) */
+	{ 0x3F77, 0x3F77 }, /* R545   - FLL1 Control (2) */
+	{ 0xFFFF, 0xFFFF }, /* R546   - FLL1 Control (3) */
+	{ 0x7FEF, 0x7FEF }, /* R547   - FLL1 Control (4) */
+	{ 0x1FDB, 0x1FDB }, /* R548   - FLL1 Control (5) */
+	{ 0x0000, 0x0000 }, /* R549 */
+	{ 0x0000, 0x0000 }, /* R550 */
+	{ 0x0000, 0x0000 }, /* R551 */
+	{ 0x0000, 0x0000 }, /* R552 */
+	{ 0x0000, 0x0000 }, /* R553 */
+	{ 0x0000, 0x0000 }, /* R554 */
+	{ 0x0000, 0x0000 }, /* R555 */
+	{ 0x0000, 0x0000 }, /* R556 */
+	{ 0x0000, 0x0000 }, /* R557 */
+	{ 0x0000, 0x0000 }, /* R558 */
+	{ 0x0000, 0x0000 }, /* R559 */
+	{ 0x0000, 0x0000 }, /* R560 */
+	{ 0x0000, 0x0000 }, /* R561 */
+	{ 0x0000, 0x0000 }, /* R562 */
+	{ 0x0000, 0x0000 }, /* R563 */
+	{ 0x0000, 0x0000 }, /* R564 */
+	{ 0x0000, 0x0000 }, /* R565 */
+	{ 0x0000, 0x0000 }, /* R566 */
+	{ 0x0000, 0x0000 }, /* R567 */
+	{ 0x0000, 0x0000 }, /* R568 */
+	{ 0x0000, 0x0000 }, /* R569 */
+	{ 0x0000, 0x0000 }, /* R570 */
+	{ 0x0000, 0x0000 }, /* R571 */
+	{ 0x0000, 0x0000 }, /* R572 */
+	{ 0x0000, 0x0000 }, /* R573 */
+	{ 0x0000, 0x0000 }, /* R574 */
+	{ 0x0000, 0x0000 }, /* R575 */
+	{ 0x0007, 0x0007 }, /* R576   - FLL2 Control (1) */
+	{ 0x3F77, 0x3F77 }, /* R577   - FLL2 Control (2) */
+	{ 0xFFFF, 0xFFFF }, /* R578   - FLL2 Control (3) */
+	{ 0x7FEF, 0x7FEF }, /* R579   - FLL2 Control (4) */
+	{ 0x1FDB, 0x1FDB }, /* R580   - FLL2 Control (5) */
+	{ 0x0000, 0x0000 }, /* R581 */
+	{ 0x0000, 0x0000 }, /* R582 */
+	{ 0x0000, 0x0000 }, /* R583 */
+	{ 0x0000, 0x0000 }, /* R584 */
+	{ 0x0000, 0x0000 }, /* R585 */
+	{ 0x0000, 0x0000 }, /* R586 */
+	{ 0x0000, 0x0000 }, /* R587 */
+	{ 0x0000, 0x0000 }, /* R588 */
+	{ 0x0000, 0x0000 }, /* R589 */
+	{ 0x0000, 0x0000 }, /* R590 */
+	{ 0x0000, 0x0000 }, /* R591 */
+	{ 0x0000, 0x0000 }, /* R592 */
+	{ 0x0000, 0x0000 }, /* R593 */
+	{ 0x0000, 0x0000 }, /* R594 */
+	{ 0x0000, 0x0000 }, /* R595 */
+	{ 0x0000, 0x0000 }, /* R596 */
+	{ 0x0000, 0x0000 }, /* R597 */
+	{ 0x0000, 0x0000 }, /* R598 */
+	{ 0x0000, 0x0000 }, /* R599 */
+	{ 0x0000, 0x0000 }, /* R600 */
+	{ 0x0000, 0x0000 }, /* R601 */
+	{ 0x0000, 0x0000 }, /* R602 */
+	{ 0x0000, 0x0000 }, /* R603 */
+	{ 0x0000, 0x0000 }, /* R604 */
+	{ 0x0000, 0x0000 }, /* R605 */
+	{ 0x0000, 0x0000 }, /* R606 */
+	{ 0x0000, 0x0000 }, /* R607 */
+	{ 0x0000, 0x0000 }, /* R608 */
+	{ 0x0000, 0x0000 }, /* R609 */
+	{ 0x0000, 0x0000 }, /* R610 */
+	{ 0x0000, 0x0000 }, /* R611 */
+	{ 0x0000, 0x0000 }, /* R612 */
+	{ 0x0000, 0x0000 }, /* R613 */
+	{ 0x0000, 0x0000 }, /* R614 */
+	{ 0x0000, 0x0000 }, /* R615 */
+	{ 0x0000, 0x0000 }, /* R616 */
+	{ 0x0000, 0x0000 }, /* R617 */
+	{ 0x0000, 0x0000 }, /* R618 */
+	{ 0x0000, 0x0000 }, /* R619 */
+	{ 0x0000, 0x0000 }, /* R620 */
+	{ 0x0000, 0x0000 }, /* R621 */
+	{ 0x0000, 0x0000 }, /* R622 */
+	{ 0x0000, 0x0000 }, /* R623 */
+	{ 0x0000, 0x0000 }, /* R624 */
+	{ 0x0000, 0x0000 }, /* R625 */
+	{ 0x0000, 0x0000 }, /* R626 */
+	{ 0x0000, 0x0000 }, /* R627 */
+	{ 0x0000, 0x0000 }, /* R628 */
+	{ 0x0000, 0x0000 }, /* R629 */
+	{ 0x0000, 0x0000 }, /* R630 */
+	{ 0x0000, 0x0000 }, /* R631 */
+	{ 0x0000, 0x0000 }, /* R632 */
+	{ 0x0000, 0x0000 }, /* R633 */
+	{ 0x0000, 0x0000 }, /* R634 */
+	{ 0x0000, 0x0000 }, /* R635 */
+	{ 0x0000, 0x0000 }, /* R636 */
+	{ 0x0000, 0x0000 }, /* R637 */
+	{ 0x0000, 0x0000 }, /* R638 */
+	{ 0x0000, 0x0000 }, /* R639 */
+	{ 0x0000, 0x0000 }, /* R640 */
+	{ 0x0000, 0x0000 }, /* R641 */
+	{ 0x0000, 0x0000 }, /* R642 */
+	{ 0x0000, 0x0000 }, /* R643 */
+	{ 0x0000, 0x0000 }, /* R644 */
+	{ 0x0000, 0x0000 }, /* R645 */
+	{ 0x0000, 0x0000 }, /* R646 */
+	{ 0x0000, 0x0000 }, /* R647 */
+	{ 0x0000, 0x0000 }, /* R648 */
+	{ 0x0000, 0x0000 }, /* R649 */
+	{ 0x0000, 0x0000 }, /* R650 */
+	{ 0x0000, 0x0000 }, /* R651 */
+	{ 0x0000, 0x0000 }, /* R652 */
+	{ 0x0000, 0x0000 }, /* R653 */
+	{ 0x0000, 0x0000 }, /* R654 */
+	{ 0x0000, 0x0000 }, /* R655 */
+	{ 0x0000, 0x0000 }, /* R656 */
+	{ 0x0000, 0x0000 }, /* R657 */
+	{ 0x0000, 0x0000 }, /* R658 */
+	{ 0x0000, 0x0000 }, /* R659 */
+	{ 0x0000, 0x0000 }, /* R660 */
+	{ 0x0000, 0x0000 }, /* R661 */
+	{ 0x0000, 0x0000 }, /* R662 */
+	{ 0x0000, 0x0000 }, /* R663 */
+	{ 0x0000, 0x0000 }, /* R664 */
+	{ 0x0000, 0x0000 }, /* R665 */
+	{ 0x0000, 0x0000 }, /* R666 */
+	{ 0x0000, 0x0000 }, /* R667 */
+	{ 0x0000, 0x0000 }, /* R668 */
+	{ 0x0000, 0x0000 }, /* R669 */
+	{ 0x0000, 0x0000 }, /* R670 */
+	{ 0x0000, 0x0000 }, /* R671 */
+	{ 0x0000, 0x0000 }, /* R672 */
+	{ 0x0000, 0x0000 }, /* R673 */
+	{ 0x0000, 0x0000 }, /* R674 */
+	{ 0x0000, 0x0000 }, /* R675 */
+	{ 0x0000, 0x0000 }, /* R676 */
+	{ 0x0000, 0x0000 }, /* R677 */
+	{ 0x0000, 0x0000 }, /* R678 */
+	{ 0x0000, 0x0000 }, /* R679 */
+	{ 0x0000, 0x0000 }, /* R680 */
+	{ 0x0000, 0x0000 }, /* R681 */
+	{ 0x0000, 0x0000 }, /* R682 */
+	{ 0x0000, 0x0000 }, /* R683 */
+	{ 0x0000, 0x0000 }, /* R684 */
+	{ 0x0000, 0x0000 }, /* R685 */
+	{ 0x0000, 0x0000 }, /* R686 */
+	{ 0x0000, 0x0000 }, /* R687 */
+	{ 0x0000, 0x0000 }, /* R688 */
+	{ 0x0000, 0x0000 }, /* R689 */
+	{ 0x0000, 0x0000 }, /* R690 */
+	{ 0x0000, 0x0000 }, /* R691 */
+	{ 0x0000, 0x0000 }, /* R692 */
+	{ 0x0000, 0x0000 }, /* R693 */
+	{ 0x0000, 0x0000 }, /* R694 */
+	{ 0x0000, 0x0000 }, /* R695 */
+	{ 0x0000, 0x0000 }, /* R696 */
+	{ 0x0000, 0x0000 }, /* R697 */
+	{ 0x0000, 0x0000 }, /* R698 */
+	{ 0x0000, 0x0000 }, /* R699 */
+	{ 0x0000, 0x0000 }, /* R700 */
+	{ 0x0000, 0x0000 }, /* R701 */
+	{ 0x0000, 0x0000 }, /* R702 */
+	{ 0x0000, 0x0000 }, /* R703 */
+	{ 0x0000, 0x0000 }, /* R704 */
+	{ 0x0000, 0x0000 }, /* R705 */
+	{ 0x0000, 0x0000 }, /* R706 */
+	{ 0x0000, 0x0000 }, /* R707 */
+	{ 0x0000, 0x0000 }, /* R708 */
+	{ 0x0000, 0x0000 }, /* R709 */
+	{ 0x0000, 0x0000 }, /* R710 */
+	{ 0x0000, 0x0000 }, /* R711 */
+	{ 0x0000, 0x0000 }, /* R712 */
+	{ 0x0000, 0x0000 }, /* R713 */
+	{ 0x0000, 0x0000 }, /* R714 */
+	{ 0x0000, 0x0000 }, /* R715 */
+	{ 0x0000, 0x0000 }, /* R716 */
+	{ 0x0000, 0x0000 }, /* R717 */
+	{ 0x0000, 0x0000 }, /* R718 */
+	{ 0x0000, 0x0000 }, /* R719 */
+	{ 0x0000, 0x0000 }, /* R720 */
+	{ 0x0000, 0x0000 }, /* R721 */
+	{ 0x0000, 0x0000 }, /* R722 */
+	{ 0x0000, 0x0000 }, /* R723 */
+	{ 0x0000, 0x0000 }, /* R724 */
+	{ 0x0000, 0x0000 }, /* R725 */
+	{ 0x0000, 0x0000 }, /* R726 */
+	{ 0x0000, 0x0000 }, /* R727 */
+	{ 0x0000, 0x0000 }, /* R728 */
+	{ 0x0000, 0x0000 }, /* R729 */
+	{ 0x0000, 0x0000 }, /* R730 */
+	{ 0x0000, 0x0000 }, /* R731 */
+	{ 0x0000, 0x0000 }, /* R732 */
+	{ 0x0000, 0x0000 }, /* R733 */
+	{ 0x0000, 0x0000 }, /* R734 */
+	{ 0x0000, 0x0000 }, /* R735 */
+	{ 0x0000, 0x0000 }, /* R736 */
+	{ 0x0000, 0x0000 }, /* R737 */
+	{ 0x0000, 0x0000 }, /* R738 */
+	{ 0x0000, 0x0000 }, /* R739 */
+	{ 0x0000, 0x0000 }, /* R740 */
+	{ 0x0000, 0x0000 }, /* R741 */
+	{ 0x0000, 0x0000 }, /* R742 */
+	{ 0x0000, 0x0000 }, /* R743 */
+	{ 0x0000, 0x0000 }, /* R744 */
+	{ 0x0000, 0x0000 }, /* R745 */
+	{ 0x0000, 0x0000 }, /* R746 */
+	{ 0x0000, 0x0000 }, /* R747 */
+	{ 0x0000, 0x0000 }, /* R748 */
+	{ 0x0000, 0x0000 }, /* R749 */
+	{ 0x0000, 0x0000 }, /* R750 */
+	{ 0x0000, 0x0000 }, /* R751 */
+	{ 0x0000, 0x0000 }, /* R752 */
+	{ 0x0000, 0x0000 }, /* R753 */
+	{ 0x0000, 0x0000 }, /* R754 */
+	{ 0x0000, 0x0000 }, /* R755 */
+	{ 0x0000, 0x0000 }, /* R756 */
+	{ 0x0000, 0x0000 }, /* R757 */
+	{ 0x0000, 0x0000 }, /* R758 */
+	{ 0x0000, 0x0000 }, /* R759 */
+	{ 0x0000, 0x0000 }, /* R760 */
+	{ 0x0000, 0x0000 }, /* R761 */
+	{ 0x0000, 0x0000 }, /* R762 */
+	{ 0x0000, 0x0000 }, /* R763 */
+	{ 0x0000, 0x0000 }, /* R764 */
+	{ 0x0000, 0x0000 }, /* R765 */
+	{ 0x0000, 0x0000 }, /* R766 */
+	{ 0x0000, 0x0000 }, /* R767 */
+	{ 0xE1F8, 0xE1F8 }, /* R768   - AIF1 Control (1) */
+	{ 0xCD1F, 0xCD1F }, /* R769   - AIF1 Control (2) */
+	{ 0xF000, 0xF000 }, /* R770   - AIF1 Master/Slave */
+	{ 0x01F0, 0x01F0 }, /* R771   - AIF1 BCLK */
+	{ 0x0FFF, 0x0FFF }, /* R772   - AIF1ADC LRCLK */
+	{ 0x0FFF, 0x0FFF }, /* R773   - AIF1DAC LRCLK */
+	{ 0x0003, 0x0003 }, /* R774   - AIF1DAC Data */
+	{ 0x0003, 0x0003 }, /* R775   - AIF1ADC Data */
+	{ 0x0000, 0x0000 }, /* R776 */
+	{ 0x0000, 0x0000 }, /* R777 */
+	{ 0x0000, 0x0000 }, /* R778 */
+	{ 0x0000, 0x0000 }, /* R779 */
+	{ 0x0000, 0x0000 }, /* R780 */
+	{ 0x0000, 0x0000 }, /* R781 */
+	{ 0x0000, 0x0000 }, /* R782 */
+	{ 0x0000, 0x0000 }, /* R783 */
+	{ 0xF1F8, 0xF1F8 }, /* R784   - AIF2 Control (1) */
+	{ 0xFD1F, 0xFD1F }, /* R785   - AIF2 Control (2) */
+	{ 0xF000, 0xF000 }, /* R786   - AIF2 Master/Slave */
+	{ 0x01F0, 0x01F0 }, /* R787   - AIF2 BCLK */
+	{ 0x0FFF, 0x0FFF }, /* R788   - AIF2ADC LRCLK */
+	{ 0x0FFF, 0x0FFF }, /* R789   - AIF2DAC LRCLK */
+	{ 0x0003, 0x0003 }, /* R790   - AIF2DAC Data */
+	{ 0x0003, 0x0003 }, /* R791   - AIF2ADC Data */
+	{ 0x0000, 0x0000 }, /* R792 */
+	{ 0x0000, 0x0000 }, /* R793 */
+	{ 0x0000, 0x0000 }, /* R794 */
+	{ 0x0000, 0x0000 }, /* R795 */
+	{ 0x0000, 0x0000 }, /* R796 */
+	{ 0x0000, 0x0000 }, /* R797 */
+	{ 0x0000, 0x0000 }, /* R798 */
+	{ 0x0000, 0x0000 }, /* R799 */
+	{ 0x0000, 0x0000 }, /* R800 */
+	{ 0x0000, 0x0000 }, /* R801 */
+	{ 0x0000, 0x0000 }, /* R802 */
+	{ 0x0000, 0x0000 }, /* R803 */
+	{ 0x0000, 0x0000 }, /* R804 */
+	{ 0x0000, 0x0000 }, /* R805 */
+	{ 0x0000, 0x0000 }, /* R806 */
+	{ 0x0000, 0x0000 }, /* R807 */
+	{ 0x0000, 0x0000 }, /* R808 */
+	{ 0x0000, 0x0000 }, /* R809 */
+	{ 0x0000, 0x0000 }, /* R810 */
+	{ 0x0000, 0x0000 }, /* R811 */
+	{ 0x0000, 0x0000 }, /* R812 */
+	{ 0x0000, 0x0000 }, /* R813 */
+	{ 0x0000, 0x0000 }, /* R814 */
+	{ 0x0000, 0x0000 }, /* R815 */
+	{ 0x0000, 0x0000 }, /* R816 */
+	{ 0x0000, 0x0000 }, /* R817 */
+	{ 0x0000, 0x0000 }, /* R818 */
+	{ 0x0000, 0x0000 }, /* R819 */
+	{ 0x0000, 0x0000 }, /* R820 */
+	{ 0x0000, 0x0000 }, /* R821 */
+	{ 0x0000, 0x0000 }, /* R822 */
+	{ 0x0000, 0x0000 }, /* R823 */
+	{ 0x0000, 0x0000 }, /* R824 */
+	{ 0x0000, 0x0000 }, /* R825 */
+	{ 0x0000, 0x0000 }, /* R826 */
+	{ 0x0000, 0x0000 }, /* R827 */
+	{ 0x0000, 0x0000 }, /* R828 */
+	{ 0x0000, 0x0000 }, /* R829 */
+	{ 0x0000, 0x0000 }, /* R830 */
+	{ 0x0000, 0x0000 }, /* R831 */
+	{ 0x0000, 0x0000 }, /* R832 */
+	{ 0x0000, 0x0000 }, /* R833 */
+	{ 0x0000, 0x0000 }, /* R834 */
+	{ 0x0000, 0x0000 }, /* R835 */
+	{ 0x0000, 0x0000 }, /* R836 */
+	{ 0x0000, 0x0000 }, /* R837 */
+	{ 0x0000, 0x0000 }, /* R838 */
+	{ 0x0000, 0x0000 }, /* R839 */
+	{ 0x0000, 0x0000 }, /* R840 */
+	{ 0x0000, 0x0000 }, /* R841 */
+	{ 0x0000, 0x0000 }, /* R842 */
+	{ 0x0000, 0x0000 }, /* R843 */
+	{ 0x0000, 0x0000 }, /* R844 */
+	{ 0x0000, 0x0000 }, /* R845 */
+	{ 0x0000, 0x0000 }, /* R846 */
+	{ 0x0000, 0x0000 }, /* R847 */
+	{ 0x0000, 0x0000 }, /* R848 */
+	{ 0x0000, 0x0000 }, /* R849 */
+	{ 0x0000, 0x0000 }, /* R850 */
+	{ 0x0000, 0x0000 }, /* R851 */
+	{ 0x0000, 0x0000 }, /* R852 */
+	{ 0x0000, 0x0000 }, /* R853 */
+	{ 0x0000, 0x0000 }, /* R854 */
+	{ 0x0000, 0x0000 }, /* R855 */
+	{ 0x0000, 0x0000 }, /* R856 */
+	{ 0x0000, 0x0000 }, /* R857 */
+	{ 0x0000, 0x0000 }, /* R858 */
+	{ 0x0000, 0x0000 }, /* R859 */
+	{ 0x0000, 0x0000 }, /* R860 */
+	{ 0x0000, 0x0000 }, /* R861 */
+	{ 0x0000, 0x0000 }, /* R862 */
+	{ 0x0000, 0x0000 }, /* R863 */
+	{ 0x0000, 0x0000 }, /* R864 */
+	{ 0x0000, 0x0000 }, /* R865 */
+	{ 0x0000, 0x0000 }, /* R866 */
+	{ 0x0000, 0x0000 }, /* R867 */
+	{ 0x0000, 0x0000 }, /* R868 */
+	{ 0x0000, 0x0000 }, /* R869 */
+	{ 0x0000, 0x0000 }, /* R870 */
+	{ 0x0000, 0x0000 }, /* R871 */
+	{ 0x0000, 0x0000 }, /* R872 */
+	{ 0x0000, 0x0000 }, /* R873 */
+	{ 0x0000, 0x0000 }, /* R874 */
+	{ 0x0000, 0x0000 }, /* R875 */
+	{ 0x0000, 0x0000 }, /* R876 */
+	{ 0x0000, 0x0000 }, /* R877 */
+	{ 0x0000, 0x0000 }, /* R878 */
+	{ 0x0000, 0x0000 }, /* R879 */
+	{ 0x0000, 0x0000 }, /* R880 */
+	{ 0x0000, 0x0000 }, /* R881 */
+	{ 0x0000, 0x0000 }, /* R882 */
+	{ 0x0000, 0x0000 }, /* R883 */
+	{ 0x0000, 0x0000 }, /* R884 */
+	{ 0x0000, 0x0000 }, /* R885 */
+	{ 0x0000, 0x0000 }, /* R886 */
+	{ 0x0000, 0x0000 }, /* R887 */
+	{ 0x0000, 0x0000 }, /* R888 */
+	{ 0x0000, 0x0000 }, /* R889 */
+	{ 0x0000, 0x0000 }, /* R890 */
+	{ 0x0000, 0x0000 }, /* R891 */
+	{ 0x0000, 0x0000 }, /* R892 */
+	{ 0x0000, 0x0000 }, /* R893 */
+	{ 0x0000, 0x0000 }, /* R894 */
+	{ 0x0000, 0x0000 }, /* R895 */
+	{ 0x0000, 0x0000 }, /* R896 */
+	{ 0x0000, 0x0000 }, /* R897 */
+	{ 0x0000, 0x0000 }, /* R898 */
+	{ 0x0000, 0x0000 }, /* R899 */
+	{ 0x0000, 0x0000 }, /* R900 */
+	{ 0x0000, 0x0000 }, /* R901 */
+	{ 0x0000, 0x0000 }, /* R902 */
+	{ 0x0000, 0x0000 }, /* R903 */
+	{ 0x0000, 0x0000 }, /* R904 */
+	{ 0x0000, 0x0000 }, /* R905 */
+	{ 0x0000, 0x0000 }, /* R906 */
+	{ 0x0000, 0x0000 }, /* R907 */
+	{ 0x0000, 0x0000 }, /* R908 */
+	{ 0x0000, 0x0000 }, /* R909 */
+	{ 0x0000, 0x0000 }, /* R910 */
+	{ 0x0000, 0x0000 }, /* R911 */
+	{ 0x0000, 0x0000 }, /* R912 */
+	{ 0x0000, 0x0000 }, /* R913 */
+	{ 0x0000, 0x0000 }, /* R914 */
+	{ 0x0000, 0x0000 }, /* R915 */
+	{ 0x0000, 0x0000 }, /* R916 */
+	{ 0x0000, 0x0000 }, /* R917 */
+	{ 0x0000, 0x0000 }, /* R918 */
+	{ 0x0000, 0x0000 }, /* R919 */
+	{ 0x0000, 0x0000 }, /* R920 */
+	{ 0x0000, 0x0000 }, /* R921 */
+	{ 0x0000, 0x0000 }, /* R922 */
+	{ 0x0000, 0x0000 }, /* R923 */
+	{ 0x0000, 0x0000 }, /* R924 */
+	{ 0x0000, 0x0000 }, /* R925 */
+	{ 0x0000, 0x0000 }, /* R926 */
+	{ 0x0000, 0x0000 }, /* R927 */
+	{ 0x0000, 0x0000 }, /* R928 */
+	{ 0x0000, 0x0000 }, /* R929 */
+	{ 0x0000, 0x0000 }, /* R930 */
+	{ 0x0000, 0x0000 }, /* R931 */
+	{ 0x0000, 0x0000 }, /* R932 */
+	{ 0x0000, 0x0000 }, /* R933 */
+	{ 0x0000, 0x0000 }, /* R934 */
+	{ 0x0000, 0x0000 }, /* R935 */
+	{ 0x0000, 0x0000 }, /* R936 */
+	{ 0x0000, 0x0000 }, /* R937 */
+	{ 0x0000, 0x0000 }, /* R938 */
+	{ 0x0000, 0x0000 }, /* R939 */
+	{ 0x0000, 0x0000 }, /* R940 */
+	{ 0x0000, 0x0000 }, /* R941 */
+	{ 0x0000, 0x0000 }, /* R942 */
+	{ 0x0000, 0x0000 }, /* R943 */
+	{ 0x0000, 0x0000 }, /* R944 */
+	{ 0x0000, 0x0000 }, /* R945 */
+	{ 0x0000, 0x0000 }, /* R946 */
+	{ 0x0000, 0x0000 }, /* R947 */
+	{ 0x0000, 0x0000 }, /* R948 */
+	{ 0x0000, 0x0000 }, /* R949 */
+	{ 0x0000, 0x0000 }, /* R950 */
+	{ 0x0000, 0x0000 }, /* R951 */
+	{ 0x0000, 0x0000 }, /* R952 */
+	{ 0x0000, 0x0000 }, /* R953 */
+	{ 0x0000, 0x0000 }, /* R954 */
+	{ 0x0000, 0x0000 }, /* R955 */
+	{ 0x0000, 0x0000 }, /* R956 */
+	{ 0x0000, 0x0000 }, /* R957 */
+	{ 0x0000, 0x0000 }, /* R958 */
+	{ 0x0000, 0x0000 }, /* R959 */
+	{ 0x0000, 0x0000 }, /* R960 */
+	{ 0x0000, 0x0000 }, /* R961 */
+	{ 0x0000, 0x0000 }, /* R962 */
+	{ 0x0000, 0x0000 }, /* R963 */
+	{ 0x0000, 0x0000 }, /* R964 */
+	{ 0x0000, 0x0000 }, /* R965 */
+	{ 0x0000, 0x0000 }, /* R966 */
+	{ 0x0000, 0x0000 }, /* R967 */
+	{ 0x0000, 0x0000 }, /* R968 */
+	{ 0x0000, 0x0000 }, /* R969 */
+	{ 0x0000, 0x0000 }, /* R970 */
+	{ 0x0000, 0x0000 }, /* R971 */
+	{ 0x0000, 0x0000 }, /* R972 */
+	{ 0x0000, 0x0000 }, /* R973 */
+	{ 0x0000, 0x0000 }, /* R974 */
+	{ 0x0000, 0x0000 }, /* R975 */
+	{ 0x0000, 0x0000 }, /* R976 */
+	{ 0x0000, 0x0000 }, /* R977 */
+	{ 0x0000, 0x0000 }, /* R978 */
+	{ 0x0000, 0x0000 }, /* R979 */
+	{ 0x0000, 0x0000 }, /* R980 */
+	{ 0x0000, 0x0000 }, /* R981 */
+	{ 0x0000, 0x0000 }, /* R982 */
+	{ 0x0000, 0x0000 }, /* R983 */
+	{ 0x0000, 0x0000 }, /* R984 */
+	{ 0x0000, 0x0000 }, /* R985 */
+	{ 0x0000, 0x0000 }, /* R986 */
+	{ 0x0000, 0x0000 }, /* R987 */
+	{ 0x0000, 0x0000 }, /* R988 */
+	{ 0x0000, 0x0000 }, /* R989 */
+	{ 0x0000, 0x0000 }, /* R990 */
+	{ 0x0000, 0x0000 }, /* R991 */
+	{ 0x0000, 0x0000 }, /* R992 */
+	{ 0x0000, 0x0000 }, /* R993 */
+	{ 0x0000, 0x0000 }, /* R994 */
+	{ 0x0000, 0x0000 }, /* R995 */
+	{ 0x0000, 0x0000 }, /* R996 */
+	{ 0x0000, 0x0000 }, /* R997 */
+	{ 0x0000, 0x0000 }, /* R998 */
+	{ 0x0000, 0x0000 }, /* R999 */
+	{ 0x0000, 0x0000 }, /* R1000 */
+	{ 0x0000, 0x0000 }, /* R1001 */
+	{ 0x0000, 0x0000 }, /* R1002 */
+	{ 0x0000, 0x0000 }, /* R1003 */
+	{ 0x0000, 0x0000 }, /* R1004 */
+	{ 0x0000, 0x0000 }, /* R1005 */
+	{ 0x0000, 0x0000 }, /* R1006 */
+	{ 0x0000, 0x0000 }, /* R1007 */
+	{ 0x0000, 0x0000 }, /* R1008 */
+	{ 0x0000, 0x0000 }, /* R1009 */
+	{ 0x0000, 0x0000 }, /* R1010 */
+	{ 0x0000, 0x0000 }, /* R1011 */
+	{ 0x0000, 0x0000 }, /* R1012 */
+	{ 0x0000, 0x0000 }, /* R1013 */
+	{ 0x0000, 0x0000 }, /* R1014 */
+	{ 0x0000, 0x0000 }, /* R1015 */
+	{ 0x0000, 0x0000 }, /* R1016 */
+	{ 0x0000, 0x0000 }, /* R1017 */
+	{ 0x0000, 0x0000 }, /* R1018 */
+	{ 0x0000, 0x0000 }, /* R1019 */
+	{ 0x0000, 0x0000 }, /* R1020 */
+	{ 0x0000, 0x0000 }, /* R1021 */
+	{ 0x0000, 0x0000 }, /* R1022 */
+	{ 0x0000, 0x0000 }, /* R1023 */
+	{ 0x00FF, 0x01FF }, /* R1024  - AIF1 ADC1 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1025  - AIF1 ADC1 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1026  - AIF1 DAC1 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1027  - AIF1 DAC1 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1028  - AIF1 ADC2 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1029  - AIF1 ADC2 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1030  - AIF1 DAC2 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1031  - AIF1 DAC2 Right Volume */
+	{ 0x0000, 0x0000 }, /* R1032 */
+	{ 0x0000, 0x0000 }, /* R1033 */
+	{ 0x0000, 0x0000 }, /* R1034 */
+	{ 0x0000, 0x0000 }, /* R1035 */
+	{ 0x0000, 0x0000 }, /* R1036 */
+	{ 0x0000, 0x0000 }, /* R1037 */
+	{ 0x0000, 0x0000 }, /* R1038 */
+	{ 0x0000, 0x0000 }, /* R1039 */
+	{ 0xF800, 0xF800 }, /* R1040  - AIF1 ADC1 Filters */
+	{ 0x7800, 0x7800 }, /* R1041  - AIF1 ADC2 Filters */
+	{ 0x0000, 0x0000 }, /* R1042 */
+	{ 0x0000, 0x0000 }, /* R1043 */
+	{ 0x0000, 0x0000 }, /* R1044 */
+	{ 0x0000, 0x0000 }, /* R1045 */
+	{ 0x0000, 0x0000 }, /* R1046 */
+	{ 0x0000, 0x0000 }, /* R1047 */
+	{ 0x0000, 0x0000 }, /* R1048 */
+	{ 0x0000, 0x0000 }, /* R1049 */
+	{ 0x0000, 0x0000 }, /* R1050 */
+	{ 0x0000, 0x0000 }, /* R1051 */
+	{ 0x0000, 0x0000 }, /* R1052 */
+	{ 0x0000, 0x0000 }, /* R1053 */
+	{ 0x0000, 0x0000 }, /* R1054 */
+	{ 0x0000, 0x0000 }, /* R1055 */
+	{ 0x02B6, 0x02B6 }, /* R1056  - AIF1 DAC1 Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1057  - AIF1 DAC1 Filters (2) */
+	{ 0x02B6, 0x02B6 }, /* R1058  - AIF1 DAC2 Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1059  - AIF1 DAC2 Filters (2) */
+	{ 0x0000, 0x0000 }, /* R1060 */
+	{ 0x0000, 0x0000 }, /* R1061 */
+	{ 0x0000, 0x0000 }, /* R1062 */
+	{ 0x0000, 0x0000 }, /* R1063 */
+	{ 0x0000, 0x0000 }, /* R1064 */
+	{ 0x0000, 0x0000 }, /* R1065 */
+	{ 0x0000, 0x0000 }, /* R1066 */
+	{ 0x0000, 0x0000 }, /* R1067 */
+	{ 0x0000, 0x0000 }, /* R1068 */
+	{ 0x0000, 0x0000 }, /* R1069 */
+	{ 0x0000, 0x0000 }, /* R1070 */
+	{ 0x0000, 0x0000 }, /* R1071 */
+	{ 0x0000, 0x0000 }, /* R1072 */
+	{ 0x0000, 0x0000 }, /* R1073 */
+	{ 0x0000, 0x0000 }, /* R1074 */
+	{ 0x0000, 0x0000 }, /* R1075 */
+	{ 0x0000, 0x0000 }, /* R1076 */
+	{ 0x0000, 0x0000 }, /* R1077 */
+	{ 0x0000, 0x0000 }, /* R1078 */
+	{ 0x0000, 0x0000 }, /* R1079 */
+	{ 0x0000, 0x0000 }, /* R1080 */
+	{ 0x0000, 0x0000 }, /* R1081 */
+	{ 0x0000, 0x0000 }, /* R1082 */
+	{ 0x0000, 0x0000 }, /* R1083 */
+	{ 0x0000, 0x0000 }, /* R1084 */
+	{ 0x0000, 0x0000 }, /* R1085 */
+	{ 0x0000, 0x0000 }, /* R1086 */
+	{ 0x0000, 0x0000 }, /* R1087 */
+	{ 0xFFFF, 0xFFFF }, /* R1088  - AIF1 DRC1 (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1089  - AIF1 DRC1 (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1090  - AIF1 DRC1 (3) */
+	{ 0x07FF, 0x07FF }, /* R1091  - AIF1 DRC1 (4) */
+	{ 0x03FF, 0x03FF }, /* R1092  - AIF1 DRC1 (5) */
+	{ 0x0000, 0x0000 }, /* R1093 */
+	{ 0x0000, 0x0000 }, /* R1094 */
+	{ 0x0000, 0x0000 }, /* R1095 */
+	{ 0x0000, 0x0000 }, /* R1096 */
+	{ 0x0000, 0x0000 }, /* R1097 */
+	{ 0x0000, 0x0000 }, /* R1098 */
+	{ 0x0000, 0x0000 }, /* R1099 */
+	{ 0x0000, 0x0000 }, /* R1100 */
+	{ 0x0000, 0x0000 }, /* R1101 */
+	{ 0x0000, 0x0000 }, /* R1102 */
+	{ 0x0000, 0x0000 }, /* R1103 */
+	{ 0xFFFF, 0xFFFF }, /* R1104  - AIF1 DRC2 (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1105  - AIF1 DRC2 (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1106  - AIF1 DRC2 (3) */
+	{ 0x07FF, 0x07FF }, /* R1107  - AIF1 DRC2 (4) */
+	{ 0x03FF, 0x03FF }, /* R1108  - AIF1 DRC2 (5) */
+	{ 0x0000, 0x0000 }, /* R1109 */
+	{ 0x0000, 0x0000 }, /* R1110 */
+	{ 0x0000, 0x0000 }, /* R1111 */
+	{ 0x0000, 0x0000 }, /* R1112 */
+	{ 0x0000, 0x0000 }, /* R1113 */
+	{ 0x0000, 0x0000 }, /* R1114 */
+	{ 0x0000, 0x0000 }, /* R1115 */
+	{ 0x0000, 0x0000 }, /* R1116 */
+	{ 0x0000, 0x0000 }, /* R1117 */
+	{ 0x0000, 0x0000 }, /* R1118 */
+	{ 0x0000, 0x0000 }, /* R1119 */
+	{ 0x0000, 0x0000 }, /* R1120 */
+	{ 0x0000, 0x0000 }, /* R1121 */
+	{ 0x0000, 0x0000 }, /* R1122 */
+	{ 0x0000, 0x0000 }, /* R1123 */
+	{ 0x0000, 0x0000 }, /* R1124 */
+	{ 0x0000, 0x0000 }, /* R1125 */
+	{ 0x0000, 0x0000 }, /* R1126 */
+	{ 0x0000, 0x0000 }, /* R1127 */
+	{ 0x0000, 0x0000 }, /* R1128 */
+	{ 0x0000, 0x0000 }, /* R1129 */
+	{ 0x0000, 0x0000 }, /* R1130 */
+	{ 0x0000, 0x0000 }, /* R1131 */
+	{ 0x0000, 0x0000 }, /* R1132 */
+	{ 0x0000, 0x0000 }, /* R1133 */
+	{ 0x0000, 0x0000 }, /* R1134 */
+	{ 0x0000, 0x0000 }, /* R1135 */
+	{ 0x0000, 0x0000 }, /* R1136 */
+	{ 0x0000, 0x0000 }, /* R1137 */
+	{ 0x0000, 0x0000 }, /* R1138 */
+	{ 0x0000, 0x0000 }, /* R1139 */
+	{ 0x0000, 0x0000 }, /* R1140 */
+	{ 0x0000, 0x0000 }, /* R1141 */
+	{ 0x0000, 0x0000 }, /* R1142 */
+	{ 0x0000, 0x0000 }, /* R1143 */
+	{ 0x0000, 0x0000 }, /* R1144 */
+	{ 0x0000, 0x0000 }, /* R1145 */
+	{ 0x0000, 0x0000 }, /* R1146 */
+	{ 0x0000, 0x0000 }, /* R1147 */
+	{ 0x0000, 0x0000 }, /* R1148 */
+	{ 0x0000, 0x0000 }, /* R1149 */
+	{ 0x0000, 0x0000 }, /* R1150 */
+	{ 0x0000, 0x0000 }, /* R1151 */
+	{ 0xFFFF, 0xFFFF }, /* R1152  - AIF1 DAC1 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1153  - AIF1 DAC1 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1154  - AIF1 DAC1 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1155  - AIF1 DAC1 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1156  - AIF1 DAC1 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1157  - AIF1 DAC1 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1158  - AIF1 DAC1 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1159  - AIF1 DAC1 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1160  - AIF1 DAC1 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1161  - AIF1 DAC1 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1162  - AIF1 DAC1 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1163  - AIF1 DAC1 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1164  - AIF1 DAC1 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1165  - AIF1 DAC1 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1166  - AIF1 DAC1 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1167  - AIF1 DAC1 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1168  - AIF1 DAC1 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1169  - AIF1 DAC1 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1170  - AIF1 DAC1 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1171  - AIF1 DAC1 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1172 */
+	{ 0x0000, 0x0000 }, /* R1173 */
+	{ 0x0000, 0x0000 }, /* R1174 */
+	{ 0x0000, 0x0000 }, /* R1175 */
+	{ 0x0000, 0x0000 }, /* R1176 */
+	{ 0x0000, 0x0000 }, /* R1177 */
+	{ 0x0000, 0x0000 }, /* R1178 */
+	{ 0x0000, 0x0000 }, /* R1179 */
+	{ 0x0000, 0x0000 }, /* R1180 */
+	{ 0x0000, 0x0000 }, /* R1181 */
+	{ 0x0000, 0x0000 }, /* R1182 */
+	{ 0x0000, 0x0000 }, /* R1183 */
+	{ 0xFFFF, 0xFFFF }, /* R1184  - AIF1 DAC2 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1185  - AIF1 DAC2 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1186  - AIF1 DAC2 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1187  - AIF1 DAC2 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1188  - AIF1 DAC2 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1189  - AIF1 DAC2 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1190  - AIF1 DAC2 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1191  - AIF1 DAC2 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1192  - AIF1 DAC2 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1193  - AIF1 DAC2 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1194  - AIF1 DAC2 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1195  - AIF1 DAC2 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1196  - AIF1 DAC2 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1197  - AIF1 DAC2 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1198  - AIF1 DAC2 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1199  - AIF1 DAC2 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1200  - AIF1 DAC2 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1201  - AIF1 DAC2 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1202  - AIF1 DAC2 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1203  - AIF1 DAC2 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1204 */
+	{ 0x0000, 0x0000 }, /* R1205 */
+	{ 0x0000, 0x0000 }, /* R1206 */
+	{ 0x0000, 0x0000 }, /* R1207 */
+	{ 0x0000, 0x0000 }, /* R1208 */
+	{ 0x0000, 0x0000 }, /* R1209 */
+	{ 0x0000, 0x0000 }, /* R1210 */
+	{ 0x0000, 0x0000 }, /* R1211 */
+	{ 0x0000, 0x0000 }, /* R1212 */
+	{ 0x0000, 0x0000 }, /* R1213 */
+	{ 0x0000, 0x0000 }, /* R1214 */
+	{ 0x0000, 0x0000 }, /* R1215 */
+	{ 0x0000, 0x0000 }, /* R1216 */
+	{ 0x0000, 0x0000 }, /* R1217 */
+	{ 0x0000, 0x0000 }, /* R1218 */
+	{ 0x0000, 0x0000 }, /* R1219 */
+	{ 0x0000, 0x0000 }, /* R1220 */
+	{ 0x0000, 0x0000 }, /* R1221 */
+	{ 0x0000, 0x0000 }, /* R1222 */
+	{ 0x0000, 0x0000 }, /* R1223 */
+	{ 0x0000, 0x0000 }, /* R1224 */
+	{ 0x0000, 0x0000 }, /* R1225 */
+	{ 0x0000, 0x0000 }, /* R1226 */
+	{ 0x0000, 0x0000 }, /* R1227 */
+	{ 0x0000, 0x0000 }, /* R1228 */
+	{ 0x0000, 0x0000 }, /* R1229 */
+	{ 0x0000, 0x0000 }, /* R1230 */
+	{ 0x0000, 0x0000 }, /* R1231 */
+	{ 0x0000, 0x0000 }, /* R1232 */
+	{ 0x0000, 0x0000 }, /* R1233 */
+	{ 0x0000, 0x0000 }, /* R1234 */
+	{ 0x0000, 0x0000 }, /* R1235 */
+	{ 0x0000, 0x0000 }, /* R1236 */
+	{ 0x0000, 0x0000 }, /* R1237 */
+	{ 0x0000, 0x0000 }, /* R1238 */
+	{ 0x0000, 0x0000 }, /* R1239 */
+	{ 0x0000, 0x0000 }, /* R1240 */
+	{ 0x0000, 0x0000 }, /* R1241 */
+	{ 0x0000, 0x0000 }, /* R1242 */
+	{ 0x0000, 0x0000 }, /* R1243 */
+	{ 0x0000, 0x0000 }, /* R1244 */
+	{ 0x0000, 0x0000 }, /* R1245 */
+	{ 0x0000, 0x0000 }, /* R1246 */
+	{ 0x0000, 0x0000 }, /* R1247 */
+	{ 0x0000, 0x0000 }, /* R1248 */
+	{ 0x0000, 0x0000 }, /* R1249 */
+	{ 0x0000, 0x0000 }, /* R1250 */
+	{ 0x0000, 0x0000 }, /* R1251 */
+	{ 0x0000, 0x0000 }, /* R1252 */
+	{ 0x0000, 0x0000 }, /* R1253 */
+	{ 0x0000, 0x0000 }, /* R1254 */
+	{ 0x0000, 0x0000 }, /* R1255 */
+	{ 0x0000, 0x0000 }, /* R1256 */
+	{ 0x0000, 0x0000 }, /* R1257 */
+	{ 0x0000, 0x0000 }, /* R1258 */
+	{ 0x0000, 0x0000 }, /* R1259 */
+	{ 0x0000, 0x0000 }, /* R1260 */
+	{ 0x0000, 0x0000 }, /* R1261 */
+	{ 0x0000, 0x0000 }, /* R1262 */
+	{ 0x0000, 0x0000 }, /* R1263 */
+	{ 0x0000, 0x0000 }, /* R1264 */
+	{ 0x0000, 0x0000 }, /* R1265 */
+	{ 0x0000, 0x0000 }, /* R1266 */
+	{ 0x0000, 0x0000 }, /* R1267 */
+	{ 0x0000, 0x0000 }, /* R1268 */
+	{ 0x0000, 0x0000 }, /* R1269 */
+	{ 0x0000, 0x0000 }, /* R1270 */
+	{ 0x0000, 0x0000 }, /* R1271 */
+	{ 0x0000, 0x0000 }, /* R1272 */
+	{ 0x0000, 0x0000 }, /* R1273 */
+	{ 0x0000, 0x0000 }, /* R1274 */
+	{ 0x0000, 0x0000 }, /* R1275 */
+	{ 0x0000, 0x0000 }, /* R1276 */
+	{ 0x0000, 0x0000 }, /* R1277 */
+	{ 0x0000, 0x0000 }, /* R1278 */
+	{ 0x0000, 0x0000 }, /* R1279 */
+	{ 0x00FF, 0x01FF }, /* R1280  - AIF2 ADC Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1281  - AIF2 ADC Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1282  - AIF2 DAC Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1283  - AIF2 DAC Right Volume */
+	{ 0x0000, 0x0000 }, /* R1284 */
+	{ 0x0000, 0x0000 }, /* R1285 */
+	{ 0x0000, 0x0000 }, /* R1286 */
+	{ 0x0000, 0x0000 }, /* R1287 */
+	{ 0x0000, 0x0000 }, /* R1288 */
+	{ 0x0000, 0x0000 }, /* R1289 */
+	{ 0x0000, 0x0000 }, /* R1290 */
+	{ 0x0000, 0x0000 }, /* R1291 */
+	{ 0x0000, 0x0000 }, /* R1292 */
+	{ 0x0000, 0x0000 }, /* R1293 */
+	{ 0x0000, 0x0000 }, /* R1294 */
+	{ 0x0000, 0x0000 }, /* R1295 */
+	{ 0xF800, 0xF800 }, /* R1296  - AIF2 ADC Filters */
+	{ 0x0000, 0x0000 }, /* R1297 */
+	{ 0x0000, 0x0000 }, /* R1298 */
+	{ 0x0000, 0x0000 }, /* R1299 */
+	{ 0x0000, 0x0000 }, /* R1300 */
+	{ 0x0000, 0x0000 }, /* R1301 */
+	{ 0x0000, 0x0000 }, /* R1302 */
+	{ 0x0000, 0x0000 }, /* R1303 */
+	{ 0x0000, 0x0000 }, /* R1304 */
+	{ 0x0000, 0x0000 }, /* R1305 */
+	{ 0x0000, 0x0000 }, /* R1306 */
+	{ 0x0000, 0x0000 }, /* R1307 */
+	{ 0x0000, 0x0000 }, /* R1308 */
+	{ 0x0000, 0x0000 }, /* R1309 */
+	{ 0x0000, 0x0000 }, /* R1310 */
+	{ 0x0000, 0x0000 }, /* R1311 */
+	{ 0x02B6, 0x02B6 }, /* R1312  - AIF2 DAC Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1313  - AIF2 DAC Filters (2) */
+	{ 0x0000, 0x0000 }, /* R1314 */
+	{ 0x0000, 0x0000 }, /* R1315 */
+	{ 0x0000, 0x0000 }, /* R1316 */
+	{ 0x0000, 0x0000 }, /* R1317 */
+	{ 0x0000, 0x0000 }, /* R1318 */
+	{ 0x0000, 0x0000 }, /* R1319 */
+	{ 0x0000, 0x0000 }, /* R1320 */
+	{ 0x0000, 0x0000 }, /* R1321 */
+	{ 0x0000, 0x0000 }, /* R1322 */
+	{ 0x0000, 0x0000 }, /* R1323 */
+	{ 0x0000, 0x0000 }, /* R1324 */
+	{ 0x0000, 0x0000 }, /* R1325 */
+	{ 0x0000, 0x0000 }, /* R1326 */
+	{ 0x0000, 0x0000 }, /* R1327 */
+	{ 0x0000, 0x0000 }, /* R1328 */
+	{ 0x0000, 0x0000 }, /* R1329 */
+	{ 0x0000, 0x0000 }, /* R1330 */
+	{ 0x0000, 0x0000 }, /* R1331 */
+	{ 0x0000, 0x0000 }, /* R1332 */
+	{ 0x0000, 0x0000 }, /* R1333 */
+	{ 0x0000, 0x0000 }, /* R1334 */
+	{ 0x0000, 0x0000 }, /* R1335 */
+	{ 0x0000, 0x0000 }, /* R1336 */
+	{ 0x0000, 0x0000 }, /* R1337 */
+	{ 0x0000, 0x0000 }, /* R1338 */
+	{ 0x0000, 0x0000 }, /* R1339 */
+	{ 0x0000, 0x0000 }, /* R1340 */
+	{ 0x0000, 0x0000 }, /* R1341 */
+	{ 0x0000, 0x0000 }, /* R1342 */
+	{ 0x0000, 0x0000 }, /* R1343 */
+	{ 0xFFFF, 0xFFFF }, /* R1344  - AIF2 DRC (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1345  - AIF2 DRC (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1346  - AIF2 DRC (3) */
+	{ 0x07FF, 0x07FF }, /* R1347  - AIF2 DRC (4) */
+	{ 0x03FF, 0x03FF }, /* R1348  - AIF2 DRC (5) */
+	{ 0x0000, 0x0000 }, /* R1349 */
+	{ 0x0000, 0x0000 }, /* R1350 */
+	{ 0x0000, 0x0000 }, /* R1351 */
+	{ 0x0000, 0x0000 }, /* R1352 */
+	{ 0x0000, 0x0000 }, /* R1353 */
+	{ 0x0000, 0x0000 }, /* R1354 */
+	{ 0x0000, 0x0000 }, /* R1355 */
+	{ 0x0000, 0x0000 }, /* R1356 */
+	{ 0x0000, 0x0000 }, /* R1357 */
+	{ 0x0000, 0x0000 }, /* R1358 */
+	{ 0x0000, 0x0000 }, /* R1359 */
+	{ 0x0000, 0x0000 }, /* R1360 */
+	{ 0x0000, 0x0000 }, /* R1361 */
+	{ 0x0000, 0x0000 }, /* R1362 */
+	{ 0x0000, 0x0000 }, /* R1363 */
+	{ 0x0000, 0x0000 }, /* R1364 */
+	{ 0x0000, 0x0000 }, /* R1365 */
+	{ 0x0000, 0x0000 }, /* R1366 */
+	{ 0x0000, 0x0000 }, /* R1367 */
+	{ 0x0000, 0x0000 }, /* R1368 */
+	{ 0x0000, 0x0000 }, /* R1369 */
+	{ 0x0000, 0x0000 }, /* R1370 */
+	{ 0x0000, 0x0000 }, /* R1371 */
+	{ 0x0000, 0x0000 }, /* R1372 */
+	{ 0x0000, 0x0000 }, /* R1373 */
+	{ 0x0000, 0x0000 }, /* R1374 */
+	{ 0x0000, 0x0000 }, /* R1375 */
+	{ 0x0000, 0x0000 }, /* R1376 */
+	{ 0x0000, 0x0000 }, /* R1377 */
+	{ 0x0000, 0x0000 }, /* R1378 */
+	{ 0x0000, 0x0000 }, /* R1379 */
+	{ 0x0000, 0x0000 }, /* R1380 */
+	{ 0x0000, 0x0000 }, /* R1381 */
+	{ 0x0000, 0x0000 }, /* R1382 */
+	{ 0x0000, 0x0000 }, /* R1383 */
+	{ 0x0000, 0x0000 }, /* R1384 */
+	{ 0x0000, 0x0000 }, /* R1385 */
+	{ 0x0000, 0x0000 }, /* R1386 */
+	{ 0x0000, 0x0000 }, /* R1387 */
+	{ 0x0000, 0x0000 }, /* R1388 */
+	{ 0x0000, 0x0000 }, /* R1389 */
+	{ 0x0000, 0x0000 }, /* R1390 */
+	{ 0x0000, 0x0000 }, /* R1391 */
+	{ 0x0000, 0x0000 }, /* R1392 */
+	{ 0x0000, 0x0000 }, /* R1393 */
+	{ 0x0000, 0x0000 }, /* R1394 */
+	{ 0x0000, 0x0000 }, /* R1395 */
+	{ 0x0000, 0x0000 }, /* R1396 */
+	{ 0x0000, 0x0000 }, /* R1397 */
+	{ 0x0000, 0x0000 }, /* R1398 */
+	{ 0x0000, 0x0000 }, /* R1399 */
+	{ 0x0000, 0x0000 }, /* R1400 */
+	{ 0x0000, 0x0000 }, /* R1401 */
+	{ 0x0000, 0x0000 }, /* R1402 */
+	{ 0x0000, 0x0000 }, /* R1403 */
+	{ 0x0000, 0x0000 }, /* R1404 */
+	{ 0x0000, 0x0000 }, /* R1405 */
+	{ 0x0000, 0x0000 }, /* R1406 */
+	{ 0x0000, 0x0000 }, /* R1407 */
+	{ 0xFFFF, 0xFFFF }, /* R1408  - AIF2 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1409  - AIF2 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1410  - AIF2 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1411  - AIF2 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1412  - AIF2 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1413  - AIF2 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1414  - AIF2 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1415  - AIF2 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1416  - AIF2 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1417  - AIF2 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1418  - AIF2 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1419  - AIF2 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1420  - AIF2 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1421  - AIF2 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1422  - AIF2 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1423  - AIF2 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1424  - AIF2 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1425  - AIF2 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1426  - AIF2 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1427  - AIF2 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1428 */
+	{ 0x0000, 0x0000 }, /* R1429 */
+	{ 0x0000, 0x0000 }, /* R1430 */
+	{ 0x0000, 0x0000 }, /* R1431 */
+	{ 0x0000, 0x0000 }, /* R1432 */
+	{ 0x0000, 0x0000 }, /* R1433 */
+	{ 0x0000, 0x0000 }, /* R1434 */
+	{ 0x0000, 0x0000 }, /* R1435 */
+	{ 0x0000, 0x0000 }, /* R1436 */
+	{ 0x0000, 0x0000 }, /* R1437 */
+	{ 0x0000, 0x0000 }, /* R1438 */
+	{ 0x0000, 0x0000 }, /* R1439 */
+	{ 0x0000, 0x0000 }, /* R1440 */
+	{ 0x0000, 0x0000 }, /* R1441 */
+	{ 0x0000, 0x0000 }, /* R1442 */
+	{ 0x0000, 0x0000 }, /* R1443 */
+	{ 0x0000, 0x0000 }, /* R1444 */
+	{ 0x0000, 0x0000 }, /* R1445 */
+	{ 0x0000, 0x0000 }, /* R1446 */
+	{ 0x0000, 0x0000 }, /* R1447 */
+	{ 0x0000, 0x0000 }, /* R1448 */
+	{ 0x0000, 0x0000 }, /* R1449 */
+	{ 0x0000, 0x0000 }, /* R1450 */
+	{ 0x0000, 0x0000 }, /* R1451 */
+	{ 0x0000, 0x0000 }, /* R1452 */
+	{ 0x0000, 0x0000 }, /* R1453 */
+	{ 0x0000, 0x0000 }, /* R1454 */
+	{ 0x0000, 0x0000 }, /* R1455 */
+	{ 0x0000, 0x0000 }, /* R1456 */
+	{ 0x0000, 0x0000 }, /* R1457 */
+	{ 0x0000, 0x0000 }, /* R1458 */
+	{ 0x0000, 0x0000 }, /* R1459 */
+	{ 0x0000, 0x0000 }, /* R1460 */
+	{ 0x0000, 0x0000 }, /* R1461 */
+	{ 0x0000, 0x0000 }, /* R1462 */
+	{ 0x0000, 0x0000 }, /* R1463 */
+	{ 0x0000, 0x0000 }, /* R1464 */
+	{ 0x0000, 0x0000 }, /* R1465 */
+	{ 0x0000, 0x0000 }, /* R1466 */
+	{ 0x0000, 0x0000 }, /* R1467 */
+	{ 0x0000, 0x0000 }, /* R1468 */
+	{ 0x0000, 0x0000 }, /* R1469 */
+	{ 0x0000, 0x0000 }, /* R1470 */
+	{ 0x0000, 0x0000 }, /* R1471 */
+	{ 0x0000, 0x0000 }, /* R1472 */
+	{ 0x0000, 0x0000 }, /* R1473 */
+	{ 0x0000, 0x0000 }, /* R1474 */
+	{ 0x0000, 0x0000 }, /* R1475 */
+	{ 0x0000, 0x0000 }, /* R1476 */
+	{ 0x0000, 0x0000 }, /* R1477 */
+	{ 0x0000, 0x0000 }, /* R1478 */
+	{ 0x0000, 0x0000 }, /* R1479 */
+	{ 0x0000, 0x0000 }, /* R1480 */
+	{ 0x0000, 0x0000 }, /* R1481 */
+	{ 0x0000, 0x0000 }, /* R1482 */
+	{ 0x0000, 0x0000 }, /* R1483 */
+	{ 0x0000, 0x0000 }, /* R1484 */
+	{ 0x0000, 0x0000 }, /* R1485 */
+	{ 0x0000, 0x0000 }, /* R1486 */
+	{ 0x0000, 0x0000 }, /* R1487 */
+	{ 0x0000, 0x0000 }, /* R1488 */
+	{ 0x0000, 0x0000 }, /* R1489 */
+	{ 0x0000, 0x0000 }, /* R1490 */
+	{ 0x0000, 0x0000 }, /* R1491 */
+	{ 0x0000, 0x0000 }, /* R1492 */
+	{ 0x0000, 0x0000 }, /* R1493 */
+	{ 0x0000, 0x0000 }, /* R1494 */
+	{ 0x0000, 0x0000 }, /* R1495 */
+	{ 0x0000, 0x0000 }, /* R1496 */
+	{ 0x0000, 0x0000 }, /* R1497 */
+	{ 0x0000, 0x0000 }, /* R1498 */
+	{ 0x0000, 0x0000 }, /* R1499 */
+	{ 0x0000, 0x0000 }, /* R1500 */
+	{ 0x0000, 0x0000 }, /* R1501 */
+	{ 0x0000, 0x0000 }, /* R1502 */
+	{ 0x0000, 0x0000 }, /* R1503 */
+	{ 0x0000, 0x0000 }, /* R1504 */
+	{ 0x0000, 0x0000 }, /* R1505 */
+	{ 0x0000, 0x0000 }, /* R1506 */
+	{ 0x0000, 0x0000 }, /* R1507 */
+	{ 0x0000, 0x0000 }, /* R1508 */
+	{ 0x0000, 0x0000 }, /* R1509 */
+	{ 0x0000, 0x0000 }, /* R1510 */
+	{ 0x0000, 0x0000 }, /* R1511 */
+	{ 0x0000, 0x0000 }, /* R1512 */
+	{ 0x0000, 0x0000 }, /* R1513 */
+	{ 0x0000, 0x0000 }, /* R1514 */
+	{ 0x0000, 0x0000 }, /* R1515 */
+	{ 0x0000, 0x0000 }, /* R1516 */
+	{ 0x0000, 0x0000 }, /* R1517 */
+	{ 0x0000, 0x0000 }, /* R1518 */
+	{ 0x0000, 0x0000 }, /* R1519 */
+	{ 0x0000, 0x0000 }, /* R1520 */
+	{ 0x0000, 0x0000 }, /* R1521 */
+	{ 0x0000, 0x0000 }, /* R1522 */
+	{ 0x0000, 0x0000 }, /* R1523 */
+	{ 0x0000, 0x0000 }, /* R1524 */
+	{ 0x0000, 0x0000 }, /* R1525 */
+	{ 0x0000, 0x0000 }, /* R1526 */
+	{ 0x0000, 0x0000 }, /* R1527 */
+	{ 0x0000, 0x0000 }, /* R1528 */
+	{ 0x0000, 0x0000 }, /* R1529 */
+	{ 0x0000, 0x0000 }, /* R1530 */
+	{ 0x0000, 0x0000 }, /* R1531 */
+	{ 0x0000, 0x0000 }, /* R1532 */
+	{ 0x0000, 0x0000 }, /* R1533 */
+	{ 0x0000, 0x0000 }, /* R1534 */
+	{ 0x0000, 0x0000 }, /* R1535 */
+	{ 0x01EF, 0x01EF }, /* R1536  - DAC1 Mixer Volumes */
+	{ 0x0037, 0x0037 }, /* R1537  - DAC1 Left Mixer Routing */
+	{ 0x0037, 0x0037 }, /* R1538  - DAC1 Right Mixer Routing */
+	{ 0x01EF, 0x01EF }, /* R1539  - DAC2 Mixer Volumes */
+	{ 0x0037, 0x0037 }, /* R1540  - DAC2 Left Mixer Routing */
+	{ 0x0037, 0x0037 }, /* R1541  - DAC2 Right Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1542  - AIF1 ADC1 Left Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1543  - AIF1 ADC1 Right Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1544  - AIF1 ADC2 Left Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1545  - AIF1 ADC2 Right mixer Routing */
+	{ 0x0000, 0x0000 }, /* R1546 */
+	{ 0x0000, 0x0000 }, /* R1547 */
+	{ 0x0000, 0x0000 }, /* R1548 */
+	{ 0x0000, 0x0000 }, /* R1549 */
+	{ 0x0000, 0x0000 }, /* R1550 */
+	{ 0x0000, 0x0000 }, /* R1551 */
+	{ 0x02FF, 0x03FF }, /* R1552  - DAC1 Left Volume */
+	{ 0x02FF, 0x03FF }, /* R1553  - DAC1 Right Volume */
+	{ 0x02FF, 0x03FF }, /* R1554  - DAC2 Left Volume */
+	{ 0x02FF, 0x03FF }, /* R1555  - DAC2 Right Volume */
+	{ 0x0003, 0x0003 }, /* R1556  - DAC Softmute */
+	{ 0x0000, 0x0000 }, /* R1557 */
+	{ 0x0000, 0x0000 }, /* R1558 */
+	{ 0x0000, 0x0000 }, /* R1559 */
+	{ 0x0000, 0x0000 }, /* R1560 */
+	{ 0x0000, 0x0000 }, /* R1561 */
+	{ 0x0000, 0x0000 }, /* R1562 */
+	{ 0x0000, 0x0000 }, /* R1563 */
+	{ 0x0000, 0x0000 }, /* R1564 */
+	{ 0x0000, 0x0000 }, /* R1565 */
+	{ 0x0000, 0x0000 }, /* R1566 */
+	{ 0x0000, 0x0000 }, /* R1567 */
+	{ 0x0003, 0x0003 }, /* R1568  - Oversampling */
+	{ 0x03C3, 0x03C3 }, /* R1569  - Sidetone */
+};
+
+const __devinitdata u16 wm8994_reg_defaults[WM8994_CACHE_SIZE] = {
+	0x8994,     /* R0     - Software Reset */
+	0x0000,     /* R1     - Power Management (1) */
+	0x6000,     /* R2     - Power Management (2) */
+	0x0000,     /* R3     - Power Management (3) */
+	0x0000,     /* R4     - Power Management (4) */
+	0x0000,     /* R5     - Power Management (5) */
+	0x0000,     /* R6     - Power Management (6) */
+	0x0000,     /* R7 */
+	0x0000,     /* R8 */
+	0x0000,     /* R9 */
+	0x0000,     /* R10 */
+	0x0000,     /* R11 */
+	0x0000,     /* R12 */
+	0x0000,     /* R13 */
+	0x0000,     /* R14 */
+	0x0000,     /* R15 */
+	0x0000,     /* R16 */
+	0x0000,     /* R17 */
+	0x0000,     /* R18 */
+	0x0000,     /* R19 */
+	0x0000,     /* R20 */
+	0x0000,     /* R21    - Input Mixer (1) */
+	0x0000,     /* R22 */
+	0x0000,     /* R23 */
+	0x008B,     /* R24    - Left Line Input 1&2 Volume */
+	0x008B,     /* R25    - Left Line Input 3&4 Volume */
+	0x008B,     /* R26    - Right Line Input 1&2 Volume */
+	0x008B,     /* R27    - Right Line Input 3&4 Volume */
+	0x006D,     /* R28    - Left Output Volume */
+	0x006D,     /* R29    - Right Output Volume */
+	0x0066,     /* R30    - Line Outputs Volume */
+	0x0020,     /* R31    - HPOUT2 Volume */
+	0x0079,     /* R32    - Left OPGA Volume */
+	0x0079,     /* R33    - Right OPGA Volume */
+	0x0003,     /* R34    - SPKMIXL Attenuation */
+	0x0003,     /* R35    - SPKMIXR Attenuation */
+	0x0011,     /* R36    - SPKOUT Mixers */
+	0x0140,     /* R37    - ClassD */
+	0x0079,     /* R38    - Speaker Volume Left */
+	0x0079,     /* R39    - Speaker Volume Right */
+	0x0000,     /* R40    - Input Mixer (2) */
+	0x0000,     /* R41    - Input Mixer (3) */
+	0x0000,     /* R42    - Input Mixer (4) */
+	0x0000,     /* R43    - Input Mixer (5) */
+	0x0000,     /* R44    - Input Mixer (6) */
+	0x0000,     /* R45    - Output Mixer (1) */
+	0x0000,     /* R46    - Output Mixer (2) */
+	0x0000,     /* R47    - Output Mixer (3) */
+	0x0000,     /* R48    - Output Mixer (4) */
+	0x0000,     /* R49    - Output Mixer (5) */
+	0x0000,     /* R50    - Output Mixer (6) */
+	0x0000,     /* R51    - HPOUT2 Mixer */
+	0x0000,     /* R52    - Line Mixer (1) */
+	0x0000,     /* R53    - Line Mixer (2) */
+	0x0000,     /* R54    - Speaker Mixer */
+	0x0000,     /* R55    - Additional Control */
+	0x0000,     /* R56    - AntiPOP (1) */
+	0x0000,     /* R57    - AntiPOP (2) */
+	0x0000,     /* R58    - MICBIAS */
+	0x000D,     /* R59    - LDO 1 */
+	0x0003,     /* R60    - LDO 2 */
+	0x0000,     /* R61 */
+	0x0000,     /* R62 */
+	0x0000,     /* R63 */
+	0x0000,     /* R64 */
+	0x0000,     /* R65 */
+	0x0000,     /* R66 */
+	0x0000,     /* R67 */
+	0x0000,     /* R68 */
+	0x0000,     /* R69 */
+	0x0000,     /* R70 */
+	0x0000,     /* R71 */
+	0x0000,     /* R72 */
+	0x0000,     /* R73 */
+	0x0000,     /* R74 */
+	0x0000,     /* R75 */
+	0x1F25,     /* R76    - Charge Pump (1) */
+	0x0000,     /* R77 */
+	0x0000,     /* R78 */
+	0x0000,     /* R79 */
+	0x0000,     /* R80 */
+	0x0004,     /* R81    - Class W (1) */
+	0x0000,     /* R82 */
+	0x0000,     /* R83 */
+	0x0000,     /* R84    - DC Servo (1) */
+	0x054A,     /* R85    - DC Servo (2) */
+	0x0000,     /* R86 */
+	0x0000,     /* R87    - DC Servo (4) */
+	0x0000,     /* R88    - DC Servo Readback */
+	0x0000,     /* R89 */
+	0x0000,     /* R90 */
+	0x0000,     /* R91 */
+	0x0000,     /* R92 */
+	0x0000,     /* R93 */
+	0x0000,     /* R94 */
+	0x0000,     /* R95 */
+	0x0000,     /* R96    - Analogue HP (1) */
+	0x0000,     /* R97 */
+	0x0000,     /* R98 */
+	0x0000,     /* R99 */
+	0x0000,     /* R100 */
+	0x0000,     /* R101 */
+	0x0000,     /* R102 */
+	0x0000,     /* R103 */
+	0x0000,     /* R104 */
+	0x0000,     /* R105 */
+	0x0000,     /* R106 */
+	0x0000,     /* R107 */
+	0x0000,     /* R108 */
+	0x0000,     /* R109 */
+	0x0000,     /* R110 */
+	0x0000,     /* R111 */
+	0x0000,     /* R112 */
+	0x0000,     /* R113 */
+	0x0000,     /* R114 */
+	0x0000,     /* R115 */
+	0x0000,     /* R116 */
+	0x0000,     /* R117 */
+	0x0000,     /* R118 */
+	0x0000,     /* R119 */
+	0x0000,     /* R120 */
+	0x0000,     /* R121 */
+	0x0000,     /* R122 */
+	0x0000,     /* R123 */
+	0x0000,     /* R124 */
+	0x0000,     /* R125 */
+	0x0000,     /* R126 */
+	0x0000,     /* R127 */
+	0x0000,     /* R128 */
+	0x0000,     /* R129 */
+	0x0000,     /* R130 */
+	0x0000,     /* R131 */
+	0x0000,     /* R132 */
+	0x0000,     /* R133 */
+	0x0000,     /* R134 */
+	0x0000,     /* R135 */
+	0x0000,     /* R136 */
+	0x0000,     /* R137 */
+	0x0000,     /* R138 */
+	0x0000,     /* R139 */
+	0x0000,     /* R140 */
+	0x0000,     /* R141 */
+	0x0000,     /* R142 */
+	0x0000,     /* R143 */
+	0x0000,     /* R144 */
+	0x0000,     /* R145 */
+	0x0000,     /* R146 */
+	0x0000,     /* R147 */
+	0x0000,     /* R148 */
+	0x0000,     /* R149 */
+	0x0000,     /* R150 */
+	0x0000,     /* R151 */
+	0x0000,     /* R152 */
+	0x0000,     /* R153 */
+	0x0000,     /* R154 */
+	0x0000,     /* R155 */
+	0x0000,     /* R156 */
+	0x0000,     /* R157 */
+	0x0000,     /* R158 */
+	0x0000,     /* R159 */
+	0x0000,     /* R160 */
+	0x0000,     /* R161 */
+	0x0000,     /* R162 */
+	0x0000,     /* R163 */
+	0x0000,     /* R164 */
+	0x0000,     /* R165 */
+	0x0000,     /* R166 */
+	0x0000,     /* R167 */
+	0x0000,     /* R168 */
+	0x0000,     /* R169 */
+	0x0000,     /* R170 */
+	0x0000,     /* R171 */
+	0x0000,     /* R172 */
+	0x0000,     /* R173 */
+	0x0000,     /* R174 */
+	0x0000,     /* R175 */
+	0x0000,     /* R176 */
+	0x0000,     /* R177 */
+	0x0000,     /* R178 */
+	0x0000,     /* R179 */
+	0x0000,     /* R180 */
+	0x0000,     /* R181 */
+	0x0000,     /* R182 */
+	0x0000,     /* R183 */
+	0x0000,     /* R184 */
+	0x0000,     /* R185 */
+	0x0000,     /* R186 */
+	0x0000,     /* R187 */
+	0x0000,     /* R188 */
+	0x0000,     /* R189 */
+	0x0000,     /* R190 */
+	0x0000,     /* R191 */
+	0x0000,     /* R192 */
+	0x0000,     /* R193 */
+	0x0000,     /* R194 */
+	0x0000,     /* R195 */
+	0x0000,     /* R196 */
+	0x0000,     /* R197 */
+	0x0000,     /* R198 */
+	0x0000,     /* R199 */
+	0x0000,     /* R200 */
+	0x0000,     /* R201 */
+	0x0000,     /* R202 */
+	0x0000,     /* R203 */
+	0x0000,     /* R204 */
+	0x0000,     /* R205 */
+	0x0000,     /* R206 */
+	0x0000,     /* R207 */
+	0x0000,     /* R208 */
+	0x0000,     /* R209 */
+	0x0000,     /* R210 */
+	0x0000,     /* R211 */
+	0x0000,     /* R212 */
+	0x0000,     /* R213 */
+	0x0000,     /* R214 */
+	0x0000,     /* R215 */
+	0x0000,     /* R216 */
+	0x0000,     /* R217 */
+	0x0000,     /* R218 */
+	0x0000,     /* R219 */
+	0x0000,     /* R220 */
+	0x0000,     /* R221 */
+	0x0000,     /* R222 */
+	0x0000,     /* R223 */
+	0x0000,     /* R224 */
+	0x0000,     /* R225 */
+	0x0000,     /* R226 */
+	0x0000,     /* R227 */
+	0x0000,     /* R228 */
+	0x0000,     /* R229 */
+	0x0000,     /* R230 */
+	0x0000,     /* R231 */
+	0x0000,     /* R232 */
+	0x0000,     /* R233 */
+	0x0000,     /* R234 */
+	0x0000,     /* R235 */
+	0x0000,     /* R236 */
+	0x0000,     /* R237 */
+	0x0000,     /* R238 */
+	0x0000,     /* R239 */
+	0x0000,     /* R240 */
+	0x0000,     /* R241 */
+	0x0000,     /* R242 */
+	0x0000,     /* R243 */
+	0x0000,     /* R244 */
+	0x0000,     /* R245 */
+	0x0000,     /* R246 */
+	0x0000,     /* R247 */
+	0x0000,     /* R248 */
+	0x0000,     /* R249 */
+	0x0000,     /* R250 */
+	0x0000,     /* R251 */
+	0x0000,     /* R252 */
+	0x0000,     /* R253 */
+	0x0000,     /* R254 */
+	0x0000,     /* R255 */
+	0x0003,     /* R256   - Chip Revision */
+	0x8004,     /* R257   - Control Interface */
+	0x0000,     /* R258 */
+	0x0000,     /* R259 */
+	0x0000,     /* R260 */
+	0x0000,     /* R261 */
+	0x0000,     /* R262 */
+	0x0000,     /* R263 */
+	0x0000,     /* R264 */
+	0x0000,     /* R265 */
+	0x0000,     /* R266 */
+	0x0000,     /* R267 */
+	0x0000,     /* R268 */
+	0x0000,     /* R269 */
+	0x0000,     /* R270 */
+	0x0000,     /* R271 */
+	0x0000,     /* R272   - Write Sequencer Ctrl (1) */
+	0x0000,     /* R273   - Write Sequencer Ctrl (2) */
+	0x0000,     /* R274 */
+	0x0000,     /* R275 */
+	0x0000,     /* R276 */
+	0x0000,     /* R277 */
+	0x0000,     /* R278 */
+	0x0000,     /* R279 */
+	0x0000,     /* R280 */
+	0x0000,     /* R281 */
+	0x0000,     /* R282 */
+	0x0000,     /* R283 */
+	0x0000,     /* R284 */
+	0x0000,     /* R285 */
+	0x0000,     /* R286 */
+	0x0000,     /* R287 */
+	0x0000,     /* R288 */
+	0x0000,     /* R289 */
+	0x0000,     /* R290 */
+	0x0000,     /* R291 */
+	0x0000,     /* R292 */
+	0x0000,     /* R293 */
+	0x0000,     /* R294 */
+	0x0000,     /* R295 */
+	0x0000,     /* R296 */
+	0x0000,     /* R297 */
+	0x0000,     /* R298 */
+	0x0000,     /* R299 */
+	0x0000,     /* R300 */
+	0x0000,     /* R301 */
+	0x0000,     /* R302 */
+	0x0000,     /* R303 */
+	0x0000,     /* R304 */
+	0x0000,     /* R305 */
+	0x0000,     /* R306 */
+	0x0000,     /* R307 */
+	0x0000,     /* R308 */
+	0x0000,     /* R309 */
+	0x0000,     /* R310 */
+	0x0000,     /* R311 */
+	0x0000,     /* R312 */
+	0x0000,     /* R313 */
+	0x0000,     /* R314 */
+	0x0000,     /* R315 */
+	0x0000,     /* R316 */
+	0x0000,     /* R317 */
+	0x0000,     /* R318 */
+	0x0000,     /* R319 */
+	0x0000,     /* R320 */
+	0x0000,     /* R321 */
+	0x0000,     /* R322 */
+	0x0000,     /* R323 */
+	0x0000,     /* R324 */
+	0x0000,     /* R325 */
+	0x0000,     /* R326 */
+	0x0000,     /* R327 */
+	0x0000,     /* R328 */
+	0x0000,     /* R329 */
+	0x0000,     /* R330 */
+	0x0000,     /* R331 */
+	0x0000,     /* R332 */
+	0x0000,     /* R333 */
+	0x0000,     /* R334 */
+	0x0000,     /* R335 */
+	0x0000,     /* R336 */
+	0x0000,     /* R337 */
+	0x0000,     /* R338 */
+	0x0000,     /* R339 */
+	0x0000,     /* R340 */
+	0x0000,     /* R341 */
+	0x0000,     /* R342 */
+	0x0000,     /* R343 */
+	0x0000,     /* R344 */
+	0x0000,     /* R345 */
+	0x0000,     /* R346 */
+	0x0000,     /* R347 */
+	0x0000,     /* R348 */
+	0x0000,     /* R349 */
+	0x0000,     /* R350 */
+	0x0000,     /* R351 */
+	0x0000,     /* R352 */
+	0x0000,     /* R353 */
+	0x0000,     /* R354 */
+	0x0000,     /* R355 */
+	0x0000,     /* R356 */
+	0x0000,     /* R357 */
+	0x0000,     /* R358 */
+	0x0000,     /* R359 */
+	0x0000,     /* R360 */
+	0x0000,     /* R361 */
+	0x0000,     /* R362 */
+	0x0000,     /* R363 */
+	0x0000,     /* R364 */
+	0x0000,     /* R365 */
+	0x0000,     /* R366 */
+	0x0000,     /* R367 */
+	0x0000,     /* R368 */
+	0x0000,     /* R369 */
+	0x0000,     /* R370 */
+	0x0000,     /* R371 */
+	0x0000,     /* R372 */
+	0x0000,     /* R373 */
+	0x0000,     /* R374 */
+	0x0000,     /* R375 */
+	0x0000,     /* R376 */
+	0x0000,     /* R377 */
+	0x0000,     /* R378 */
+	0x0000,     /* R379 */
+	0x0000,     /* R380 */
+	0x0000,     /* R381 */
+	0x0000,     /* R382 */
+	0x0000,     /* R383 */
+	0x0000,     /* R384 */
+	0x0000,     /* R385 */
+	0x0000,     /* R386 */
+	0x0000,     /* R387 */
+	0x0000,     /* R388 */
+	0x0000,     /* R389 */
+	0x0000,     /* R390 */
+	0x0000,     /* R391 */
+	0x0000,     /* R392 */
+	0x0000,     /* R393 */
+	0x0000,     /* R394 */
+	0x0000,     /* R395 */
+	0x0000,     /* R396 */
+	0x0000,     /* R397 */
+	0x0000,     /* R398 */
+	0x0000,     /* R399 */
+	0x0000,     /* R400 */
+	0x0000,     /* R401 */
+	0x0000,     /* R402 */
+	0x0000,     /* R403 */
+	0x0000,     /* R404 */
+	0x0000,     /* R405 */
+	0x0000,     /* R406 */
+	0x0000,     /* R407 */
+	0x0000,     /* R408 */
+	0x0000,     /* R409 */
+	0x0000,     /* R410 */
+	0x0000,     /* R411 */
+	0x0000,     /* R412 */
+	0x0000,     /* R413 */
+	0x0000,     /* R414 */
+	0x0000,     /* R415 */
+	0x0000,     /* R416 */
+	0x0000,     /* R417 */
+	0x0000,     /* R418 */
+	0x0000,     /* R419 */
+	0x0000,     /* R420 */
+	0x0000,     /* R421 */
+	0x0000,     /* R422 */
+	0x0000,     /* R423 */
+	0x0000,     /* R424 */
+	0x0000,     /* R425 */
+	0x0000,     /* R426 */
+	0x0000,     /* R427 */
+	0x0000,     /* R428 */
+	0x0000,     /* R429 */
+	0x0000,     /* R430 */
+	0x0000,     /* R431 */
+	0x0000,     /* R432 */
+	0x0000,     /* R433 */
+	0x0000,     /* R434 */
+	0x0000,     /* R435 */
+	0x0000,     /* R436 */
+	0x0000,     /* R437 */
+	0x0000,     /* R438 */
+	0x0000,     /* R439 */
+	0x0000,     /* R440 */
+	0x0000,     /* R441 */
+	0x0000,     /* R442 */
+	0x0000,     /* R443 */
+	0x0000,     /* R444 */
+	0x0000,     /* R445 */
+	0x0000,     /* R446 */
+	0x0000,     /* R447 */
+	0x0000,     /* R448 */
+	0x0000,     /* R449 */
+	0x0000,     /* R450 */
+	0x0000,     /* R451 */
+	0x0000,     /* R452 */
+	0x0000,     /* R453 */
+	0x0000,     /* R454 */
+	0x0000,     /* R455 */
+	0x0000,     /* R456 */
+	0x0000,     /* R457 */
+	0x0000,     /* R458 */
+	0x0000,     /* R459 */
+	0x0000,     /* R460 */
+	0x0000,     /* R461 */
+	0x0000,     /* R462 */
+	0x0000,     /* R463 */
+	0x0000,     /* R464 */
+	0x0000,     /* R465 */
+	0x0000,     /* R466 */
+	0x0000,     /* R467 */
+	0x0000,     /* R468 */
+	0x0000,     /* R469 */
+	0x0000,     /* R470 */
+	0x0000,     /* R471 */
+	0x0000,     /* R472 */
+	0x0000,     /* R473 */
+	0x0000,     /* R474 */
+	0x0000,     /* R475 */
+	0x0000,     /* R476 */
+	0x0000,     /* R477 */
+	0x0000,     /* R478 */
+	0x0000,     /* R479 */
+	0x0000,     /* R480 */
+	0x0000,     /* R481 */
+	0x0000,     /* R482 */
+	0x0000,     /* R483 */
+	0x0000,     /* R484 */
+	0x0000,     /* R485 */
+	0x0000,     /* R486 */
+	0x0000,     /* R487 */
+	0x0000,     /* R488 */
+	0x0000,     /* R489 */
+	0x0000,     /* R490 */
+	0x0000,     /* R491 */
+	0x0000,     /* R492 */
+	0x0000,     /* R493 */
+	0x0000,     /* R494 */
+	0x0000,     /* R495 */
+	0x0000,     /* R496 */
+	0x0000,     /* R497 */
+	0x0000,     /* R498 */
+	0x0000,     /* R499 */
+	0x0000,     /* R500 */
+	0x0000,     /* R501 */
+	0x0000,     /* R502 */
+	0x0000,     /* R503 */
+	0x0000,     /* R504 */
+	0x0000,     /* R505 */
+	0x0000,     /* R506 */
+	0x0000,     /* R507 */
+	0x0000,     /* R508 */
+	0x0000,     /* R509 */
+	0x0000,     /* R510 */
+	0x0000,     /* R511 */
+	0x0000,     /* R512   - AIF1 Clocking (1) */
+	0x0000,     /* R513   - AIF1 Clocking (2) */
+	0x0000,     /* R514 */
+	0x0000,     /* R515 */
+	0x0000,     /* R516   - AIF2 Clocking (1) */
+	0x0000,     /* R517   - AIF2 Clocking (2) */
+	0x0000,     /* R518 */
+	0x0000,     /* R519 */
+	0x0000,     /* R520   - Clocking (1) */
+	0x0000,     /* R521   - Clocking (2) */
+	0x0000,     /* R522 */
+	0x0000,     /* R523 */
+	0x0000,     /* R524 */
+	0x0000,     /* R525 */
+	0x0000,     /* R526 */
+	0x0000,     /* R527 */
+	0x0083,     /* R528   - AIF1 Rate */
+	0x0083,     /* R529   - AIF2 Rate */
+	0x0000,     /* R530   - Rate Status */
+	0x0000,     /* R531 */
+	0x0000,     /* R532 */
+	0x0000,     /* R533 */
+	0x0000,     /* R534 */
+	0x0000,     /* R535 */
+	0x0000,     /* R536 */
+	0x0000,     /* R537 */
+	0x0000,     /* R538 */
+	0x0000,     /* R539 */
+	0x0000,     /* R540 */
+	0x0000,     /* R541 */
+	0x0000,     /* R542 */
+	0x0000,     /* R543 */
+	0x0000,     /* R544   - FLL1 Control (1) */
+	0x0000,     /* R545   - FLL1 Control (2) */
+	0x0000,     /* R546   - FLL1 Control (3) */
+	0x0000,     /* R547   - FLL1 Control (4) */
+	0x0C80,     /* R548   - FLL1 Control (5) */
+	0x0000,     /* R549 */
+	0x0000,     /* R550 */
+	0x0000,     /* R551 */
+	0x0000,     /* R552 */
+	0x0000,     /* R553 */
+	0x0000,     /* R554 */
+	0x0000,     /* R555 */
+	0x0000,     /* R556 */
+	0x0000,     /* R557 */
+	0x0000,     /* R558 */
+	0x0000,     /* R559 */
+	0x0000,     /* R560 */
+	0x0000,     /* R561 */
+	0x0000,     /* R562 */
+	0x0000,     /* R563 */
+	0x0000,     /* R564 */
+	0x0000,     /* R565 */
+	0x0000,     /* R566 */
+	0x0000,     /* R567 */
+	0x0000,     /* R568 */
+	0x0000,     /* R569 */
+	0x0000,     /* R570 */
+	0x0000,     /* R571 */
+	0x0000,     /* R572 */
+	0x0000,     /* R573 */
+	0x0000,     /* R574 */
+	0x0000,     /* R575 */
+	0x0000,     /* R576   - FLL2 Control (1) */
+	0x0000,     /* R577   - FLL2 Control (2) */
+	0x0000,     /* R578   - FLL2 Control (3) */
+	0x0000,     /* R579   - FLL2 Control (4) */
+	0x0C80,     /* R580   - FLL2 Control (5) */
+	0x0000,     /* R581 */
+	0x0000,     /* R582 */
+	0x0000,     /* R583 */
+	0x0000,     /* R584 */
+	0x0000,     /* R585 */
+	0x0000,     /* R586 */
+	0x0000,     /* R587 */
+	0x0000,     /* R588 */
+	0x0000,     /* R589 */
+	0x0000,     /* R590 */
+	0x0000,     /* R591 */
+	0x0000,     /* R592 */
+	0x0000,     /* R593 */
+	0x0000,     /* R594 */
+	0x0000,     /* R595 */
+	0x0000,     /* R596 */
+	0x0000,     /* R597 */
+	0x0000,     /* R598 */
+	0x0000,     /* R599 */
+	0x0000,     /* R600 */
+	0x0000,     /* R601 */
+	0x0000,     /* R602 */
+	0x0000,     /* R603 */
+	0x0000,     /* R604 */
+	0x0000,     /* R605 */
+	0x0000,     /* R606 */
+	0x0000,     /* R607 */
+	0x0000,     /* R608 */
+	0x0000,     /* R609 */
+	0x0000,     /* R610 */
+	0x0000,     /* R611 */
+	0x0000,     /* R612 */
+	0x0000,     /* R613 */
+	0x0000,     /* R614 */
+	0x0000,     /* R615 */
+	0x0000,     /* R616 */
+	0x0000,     /* R617 */
+	0x0000,     /* R618 */
+	0x0000,     /* R619 */
+	0x0000,     /* R620 */
+	0x0000,     /* R621 */
+	0x0000,     /* R622 */
+	0x0000,     /* R623 */
+	0x0000,     /* R624 */
+	0x0000,     /* R625 */
+	0x0000,     /* R626 */
+	0x0000,     /* R627 */
+	0x0000,     /* R628 */
+	0x0000,     /* R629 */
+	0x0000,     /* R630 */
+	0x0000,     /* R631 */
+	0x0000,     /* R632 */
+	0x0000,     /* R633 */
+	0x0000,     /* R634 */
+	0x0000,     /* R635 */
+	0x0000,     /* R636 */
+	0x0000,     /* R637 */
+	0x0000,     /* R638 */
+	0x0000,     /* R639 */
+	0x0000,     /* R640 */
+	0x0000,     /* R641 */
+	0x0000,     /* R642 */
+	0x0000,     /* R643 */
+	0x0000,     /* R644 */
+	0x0000,     /* R645 */
+	0x0000,     /* R646 */
+	0x0000,     /* R647 */
+	0x0000,     /* R648 */
+	0x0000,     /* R649 */
+	0x0000,     /* R650 */
+	0x0000,     /* R651 */
+	0x0000,     /* R652 */
+	0x0000,     /* R653 */
+	0x0000,     /* R654 */
+	0x0000,     /* R655 */
+	0x0000,     /* R656 */
+	0x0000,     /* R657 */
+	0x0000,     /* R658 */
+	0x0000,     /* R659 */
+	0x0000,     /* R660 */
+	0x0000,     /* R661 */
+	0x0000,     /* R662 */
+	0x0000,     /* R663 */
+	0x0000,     /* R664 */
+	0x0000,     /* R665 */
+	0x0000,     /* R666 */
+	0x0000,     /* R667 */
+	0x0000,     /* R668 */
+	0x0000,     /* R669 */
+	0x0000,     /* R670 */
+	0x0000,     /* R671 */
+	0x0000,     /* R672 */
+	0x0000,     /* R673 */
+	0x0000,     /* R674 */
+	0x0000,     /* R675 */
+	0x0000,     /* R676 */
+	0x0000,     /* R677 */
+	0x0000,     /* R678 */
+	0x0000,     /* R679 */
+	0x0000,     /* R680 */
+	0x0000,     /* R681 */
+	0x0000,     /* R682 */
+	0x0000,     /* R683 */
+	0x0000,     /* R684 */
+	0x0000,     /* R685 */
+	0x0000,     /* R686 */
+	0x0000,     /* R687 */
+	0x0000,     /* R688 */
+	0x0000,     /* R689 */
+	0x0000,     /* R690 */
+	0x0000,     /* R691 */
+	0x0000,     /* R692 */
+	0x0000,     /* R693 */
+	0x0000,     /* R694 */
+	0x0000,     /* R695 */
+	0x0000,     /* R696 */
+	0x0000,     /* R697 */
+	0x0000,     /* R698 */
+	0x0000,     /* R699 */
+	0x0000,     /* R700 */
+	0x0000,     /* R701 */
+	0x0000,     /* R702 */
+	0x0000,     /* R703 */
+	0x0000,     /* R704 */
+	0x0000,     /* R705 */
+	0x0000,     /* R706 */
+	0x0000,     /* R707 */
+	0x0000,     /* R708 */
+	0x0000,     /* R709 */
+	0x0000,     /* R710 */
+	0x0000,     /* R711 */
+	0x0000,     /* R712 */
+	0x0000,     /* R713 */
+	0x0000,     /* R714 */
+	0x0000,     /* R715 */
+	0x0000,     /* R716 */
+	0x0000,     /* R717 */
+	0x0000,     /* R718 */
+	0x0000,     /* R719 */
+	0x0000,     /* R720 */
+	0x0000,     /* R721 */
+	0x0000,     /* R722 */
+	0x0000,     /* R723 */
+	0x0000,     /* R724 */
+	0x0000,     /* R725 */
+	0x0000,     /* R726 */
+	0x0000,     /* R727 */
+	0x0000,     /* R728 */
+	0x0000,     /* R729 */
+	0x0000,     /* R730 */
+	0x0000,     /* R731 */
+	0x0000,     /* R732 */
+	0x0000,     /* R733 */
+	0x0000,     /* R734 */
+	0x0000,     /* R735 */
+	0x0000,     /* R736 */
+	0x0000,     /* R737 */
+	0x0000,     /* R738 */
+	0x0000,     /* R739 */
+	0x0000,     /* R740 */
+	0x0000,     /* R741 */
+	0x0000,     /* R742 */
+	0x0000,     /* R743 */
+	0x0000,     /* R744 */
+	0x0000,     /* R745 */
+	0x0000,     /* R746 */
+	0x0000,     /* R747 */
+	0x0000,     /* R748 */
+	0x0000,     /* R749 */
+	0x0000,     /* R750 */
+	0x0000,     /* R751 */
+	0x0000,     /* R752 */
+	0x0000,     /* R753 */
+	0x0000,     /* R754 */
+	0x0000,     /* R755 */
+	0x0000,     /* R756 */
+	0x0000,     /* R757 */
+	0x0000,     /* R758 */
+	0x0000,     /* R759 */
+	0x0000,     /* R760 */
+	0x0000,     /* R761 */
+	0x0000,     /* R762 */
+	0x0000,     /* R763 */
+	0x0000,     /* R764 */
+	0x0000,     /* R765 */
+	0x0000,     /* R766 */
+	0x0000,     /* R767 */
+	0x4050,     /* R768   - AIF1 Control (1) */
+	0x4000,     /* R769   - AIF1 Control (2) */
+	0x0000,     /* R770   - AIF1 Master/Slave */
+	0x0040,     /* R771   - AIF1 BCLK */
+	0x0040,     /* R772   - AIF1ADC LRCLK */
+	0x0040,     /* R773   - AIF1DAC LRCLK */
+	0x0004,     /* R774   - AIF1DAC Data */
+	0x0100,     /* R775   - AIF1ADC Data */
+	0x0000,     /* R776 */
+	0x0000,     /* R777 */
+	0x0000,     /* R778 */
+	0x0000,     /* R779 */
+	0x0000,     /* R780 */
+	0x0000,     /* R781 */
+	0x0000,     /* R782 */
+	0x0000,     /* R783 */
+	0x4050,     /* R784   - AIF2 Control (1) */
+	0x4000,     /* R785   - AIF2 Control (2) */
+	0x0000,     /* R786   - AIF2 Master/Slave */
+	0x0040,     /* R787   - AIF2 BCLK */
+	0x0040,     /* R788   - AIF2ADC LRCLK */
+	0x0040,     /* R789   - AIF2DAC LRCLK */
+	0x0000,     /* R790   - AIF2DAC Data */
+	0x0000,     /* R791   - AIF2ADC Data */
+	0x0000,     /* R792 */
+	0x0000,     /* R793 */
+	0x0000,     /* R794 */
+	0x0000,     /* R795 */
+	0x0000,     /* R796 */
+	0x0000,     /* R797 */
+	0x0000,     /* R798 */
+	0x0000,     /* R799 */
+	0x0000,     /* R800 */
+	0x0000,     /* R801 */
+	0x0000,     /* R802 */
+	0x0000,     /* R803 */
+	0x0000,     /* R804 */
+	0x0000,     /* R805 */
+	0x0000,     /* R806 */
+	0x0000,     /* R807 */
+	0x0000,     /* R808 */
+	0x0000,     /* R809 */
+	0x0000,     /* R810 */
+	0x0000,     /* R811 */
+	0x0000,     /* R812 */
+	0x0000,     /* R813 */
+	0x0000,     /* R814 */
+	0x0000,     /* R815 */
+	0x0000,     /* R816 */
+	0x0000,     /* R817 */
+	0x0000,     /* R818 */
+	0x0000,     /* R819 */
+	0x0000,     /* R820 */
+	0x0000,     /* R821 */
+	0x0000,     /* R822 */
+	0x0000,     /* R823 */
+	0x0000,     /* R824 */
+	0x0000,     /* R825 */
+	0x0000,     /* R826 */
+	0x0000,     /* R827 */
+	0x0000,     /* R828 */
+	0x0000,     /* R829 */
+	0x0000,     /* R830 */
+	0x0000,     /* R831 */
+	0x0000,     /* R832 */
+	0x0000,     /* R833 */
+	0x0000,     /* R834 */
+	0x0000,     /* R835 */
+	0x0000,     /* R836 */
+	0x0000,     /* R837 */
+	0x0000,     /* R838 */
+	0x0000,     /* R839 */
+	0x0000,     /* R840 */
+	0x0000,     /* R841 */
+	0x0000,     /* R842 */
+	0x0000,     /* R843 */
+	0x0000,     /* R844 */
+	0x0000,     /* R845 */
+	0x0000,     /* R846 */
+	0x0000,     /* R847 */
+	0x0000,     /* R848 */
+	0x0000,     /* R849 */
+	0x0000,     /* R850 */
+	0x0000,     /* R851 */
+	0x0000,     /* R852 */
+	0x0000,     /* R853 */
+	0x0000,     /* R854 */
+	0x0000,     /* R855 */
+	0x0000,     /* R856 */
+	0x0000,     /* R857 */
+	0x0000,     /* R858 */
+	0x0000,     /* R859 */
+	0x0000,     /* R860 */
+	0x0000,     /* R861 */
+	0x0000,     /* R862 */
+	0x0000,     /* R863 */
+	0x0000,     /* R864 */
+	0x0000,     /* R865 */
+	0x0000,     /* R866 */
+	0x0000,     /* R867 */
+	0x0000,     /* R868 */
+	0x0000,     /* R869 */
+	0x0000,     /* R870 */
+	0x0000,     /* R871 */
+	0x0000,     /* R872 */
+	0x0000,     /* R873 */
+	0x0000,     /* R874 */
+	0x0000,     /* R875 */
+	0x0000,     /* R876 */
+	0x0000,     /* R877 */
+	0x0000,     /* R878 */
+	0x0000,     /* R879 */
+	0x0000,     /* R880 */
+	0x0000,     /* R881 */
+	0x0000,     /* R882 */
+	0x0000,     /* R883 */
+	0x0000,     /* R884 */
+	0x0000,     /* R885 */
+	0x0000,     /* R886 */
+	0x0000,     /* R887 */
+	0x0000,     /* R888 */
+	0x0000,     /* R889 */
+	0x0000,     /* R890 */
+	0x0000,     /* R891 */
+	0x0000,     /* R892 */
+	0x0000,     /* R893 */
+	0x0000,     /* R894 */
+	0x0000,     /* R895 */
+	0x0000,     /* R896 */
+	0x0000,     /* R897 */
+	0x0000,     /* R898 */
+	0x0000,     /* R899 */
+	0x0000,     /* R900 */
+	0x0000,     /* R901 */
+	0x0000,     /* R902 */
+	0x0000,     /* R903 */
+	0x0000,     /* R904 */
+	0x0000,     /* R905 */
+	0x0000,     /* R906 */
+	0x0000,     /* R907 */
+	0x0000,     /* R908 */
+	0x0000,     /* R909 */
+	0x0000,     /* R910 */
+	0x0000,     /* R911 */
+	0x0000,     /* R912 */
+	0x0000,     /* R913 */
+	0x0000,     /* R914 */
+	0x0000,     /* R915 */
+	0x0000,     /* R916 */
+	0x0000,     /* R917 */
+	0x0000,     /* R918 */
+	0x0000,     /* R919 */
+	0x0000,     /* R920 */
+	0x0000,     /* R921 */
+	0x0000,     /* R922 */
+	0x0000,     /* R923 */
+	0x0000,     /* R924 */
+	0x0000,     /* R925 */
+	0x0000,     /* R926 */
+	0x0000,     /* R927 */
+	0x0000,     /* R928 */
+	0x0000,     /* R929 */
+	0x0000,     /* R930 */
+	0x0000,     /* R931 */
+	0x0000,     /* R932 */
+	0x0000,     /* R933 */
+	0x0000,     /* R934 */
+	0x0000,     /* R935 */
+	0x0000,     /* R936 */
+	0x0000,     /* R937 */
+	0x0000,     /* R938 */
+	0x0000,     /* R939 */
+	0x0000,     /* R940 */
+	0x0000,     /* R941 */
+	0x0000,     /* R942 */
+	0x0000,     /* R943 */
+	0x0000,     /* R944 */
+	0x0000,     /* R945 */
+	0x0000,     /* R946 */
+	0x0000,     /* R947 */
+	0x0000,     /* R948 */
+	0x0000,     /* R949 */
+	0x0000,     /* R950 */
+	0x0000,     /* R951 */
+	0x0000,     /* R952 */
+	0x0000,     /* R953 */
+	0x0000,     /* R954 */
+	0x0000,     /* R955 */
+	0x0000,     /* R956 */
+	0x0000,     /* R957 */
+	0x0000,     /* R958 */
+	0x0000,     /* R959 */
+	0x0000,     /* R960 */
+	0x0000,     /* R961 */
+	0x0000,     /* R962 */
+	0x0000,     /* R963 */
+	0x0000,     /* R964 */
+	0x0000,     /* R965 */
+	0x0000,     /* R966 */
+	0x0000,     /* R967 */
+	0x0000,     /* R968 */
+	0x0000,     /* R969 */
+	0x0000,     /* R970 */
+	0x0000,     /* R971 */
+	0x0000,     /* R972 */
+	0x0000,     /* R973 */
+	0x0000,     /* R974 */
+	0x0000,     /* R975 */
+	0x0000,     /* R976 */
+	0x0000,     /* R977 */
+	0x0000,     /* R978 */
+	0x0000,     /* R979 */
+	0x0000,     /* R980 */
+	0x0000,     /* R981 */
+	0x0000,     /* R982 */
+	0x0000,     /* R983 */
+	0x0000,     /* R984 */
+	0x0000,     /* R985 */
+	0x0000,     /* R986 */
+	0x0000,     /* R987 */
+	0x0000,     /* R988 */
+	0x0000,     /* R989 */
+	0x0000,     /* R990 */
+	0x0000,     /* R991 */
+	0x0000,     /* R992 */
+	0x0000,     /* R993 */
+	0x0000,     /* R994 */
+	0x0000,     /* R995 */
+	0x0000,     /* R996 */
+	0x0000,     /* R997 */
+	0x0000,     /* R998 */
+	0x0000,     /* R999 */
+	0x0000,     /* R1000 */
+	0x0000,     /* R1001 */
+	0x0000,     /* R1002 */
+	0x0000,     /* R1003 */
+	0x0000,     /* R1004 */
+	0x0000,     /* R1005 */
+	0x0000,     /* R1006 */
+	0x0000,     /* R1007 */
+	0x0000,     /* R1008 */
+	0x0000,     /* R1009 */
+	0x0000,     /* R1010 */
+	0x0000,     /* R1011 */
+	0x0000,     /* R1012 */
+	0x0000,     /* R1013 */
+	0x0000,     /* R1014 */
+	0x0000,     /* R1015 */
+	0x0000,     /* R1016 */
+	0x0000,     /* R1017 */
+	0x0000,     /* R1018 */
+	0x0000,     /* R1019 */
+	0x0000,     /* R1020 */
+	0x0000,     /* R1021 */
+	0x0000,     /* R1022 */
+	0x0000,     /* R1023 */
+	0x00C0,     /* R1024  - AIF1 ADC1 Left Volume */
+	0x00C0,     /* R1025  - AIF1 ADC1 Right Volume */
+	0x00C0,     /* R1026  - AIF1 DAC1 Left Volume */
+	0x00C0,     /* R1027  - AIF1 DAC1 Right Volume */
+	0x00C0,     /* R1028  - AIF1 ADC2 Left Volume */
+	0x00C0,     /* R1029  - AIF1 ADC2 Right Volume */
+	0x00C0,     /* R1030  - AIF1 DAC2 Left Volume */
+	0x00C0,     /* R1031  - AIF1 DAC2 Right Volume */
+	0x0000,     /* R1032 */
+	0x0000,     /* R1033 */
+	0x0000,     /* R1034 */
+	0x0000,     /* R1035 */
+	0x0000,     /* R1036 */
+	0x0000,     /* R1037 */
+	0x0000,     /* R1038 */
+	0x0000,     /* R1039 */
+	0x0000,     /* R1040  - AIF1 ADC1 Filters */
+	0x0000,     /* R1041  - AIF1 ADC2 Filters */
+	0x0000,     /* R1042 */
+	0x0000,     /* R1043 */
+	0x0000,     /* R1044 */
+	0x0000,     /* R1045 */
+	0x0000,     /* R1046 */
+	0x0000,     /* R1047 */
+	0x0000,     /* R1048 */
+	0x0000,     /* R1049 */
+	0x0000,     /* R1050 */
+	0x0000,     /* R1051 */
+	0x0000,     /* R1052 */
+	0x0000,     /* R1053 */
+	0x0000,     /* R1054 */
+	0x0000,     /* R1055 */
+	0x0200,     /* R1056  - AIF1 DAC1 Filters (1) */
+	0x0010,     /* R1057  - AIF1 DAC1 Filters (2) */
+	0x0200,     /* R1058  - AIF1 DAC2 Filters (1) */
+	0x0010,     /* R1059  - AIF1 DAC2 Filters (2) */
+	0x0000,     /* R1060 */
+	0x0000,     /* R1061 */
+	0x0000,     /* R1062 */
+	0x0000,     /* R1063 */
+	0x0000,     /* R1064 */
+	0x0000,     /* R1065 */
+	0x0000,     /* R1066 */
+	0x0000,     /* R1067 */
+	0x0000,     /* R1068 */
+	0x0000,     /* R1069 */
+	0x0000,     /* R1070 */
+	0x0000,     /* R1071 */
+	0x0000,     /* R1072 */
+	0x0000,     /* R1073 */
+	0x0000,     /* R1074 */
+	0x0000,     /* R1075 */
+	0x0000,     /* R1076 */
+	0x0000,     /* R1077 */
+	0x0000,     /* R1078 */
+	0x0000,     /* R1079 */
+	0x0000,     /* R1080 */
+	0x0000,     /* R1081 */
+	0x0000,     /* R1082 */
+	0x0000,     /* R1083 */
+	0x0000,     /* R1084 */
+	0x0000,     /* R1085 */
+	0x0000,     /* R1086 */
+	0x0000,     /* R1087 */
+	0x0098,     /* R1088  - AIF1 DRC1 (1) */
+	0x0845,     /* R1089  - AIF1 DRC1 (2) */
+	0x0000,     /* R1090  - AIF1 DRC1 (3) */
+	0x0000,     /* R1091  - AIF1 DRC1 (4) */
+	0x0000,     /* R1092  - AIF1 DRC1 (5) */
+	0x0000,     /* R1093 */
+	0x0000,     /* R1094 */
+	0x0000,     /* R1095 */
+	0x0000,     /* R1096 */
+	0x0000,     /* R1097 */
+	0x0000,     /* R1098 */
+	0x0000,     /* R1099 */
+	0x0000,     /* R1100 */
+	0x0000,     /* R1101 */
+	0x0000,     /* R1102 */
+	0x0000,     /* R1103 */
+	0x0098,     /* R1104  - AIF1 DRC2 (1) */
+	0x0845,     /* R1105  - AIF1 DRC2 (2) */
+	0x0000,     /* R1106  - AIF1 DRC2 (3) */
+	0x0000,     /* R1107  - AIF1 DRC2 (4) */
+	0x0000,     /* R1108  - AIF1 DRC2 (5) */
+	0x0000,     /* R1109 */
+	0x0000,     /* R1110 */
+	0x0000,     /* R1111 */
+	0x0000,     /* R1112 */
+	0x0000,     /* R1113 */
+	0x0000,     /* R1114 */
+	0x0000,     /* R1115 */
+	0x0000,     /* R1116 */
+	0x0000,     /* R1117 */
+	0x0000,     /* R1118 */
+	0x0000,     /* R1119 */
+	0x0000,     /* R1120 */
+	0x0000,     /* R1121 */
+	0x0000,     /* R1122 */
+	0x0000,     /* R1123 */
+	0x0000,     /* R1124 */
+	0x0000,     /* R1125 */
+	0x0000,     /* R1126 */
+	0x0000,     /* R1127 */
+	0x0000,     /* R1128 */
+	0x0000,     /* R1129 */
+	0x0000,     /* R1130 */
+	0x0000,     /* R1131 */
+	0x0000,     /* R1132 */
+	0x0000,     /* R1133 */
+	0x0000,     /* R1134 */
+	0x0000,     /* R1135 */
+	0x0000,     /* R1136 */
+	0x0000,     /* R1137 */
+	0x0000,     /* R1138 */
+	0x0000,     /* R1139 */
+	0x0000,     /* R1140 */
+	0x0000,     /* R1141 */
+	0x0000,     /* R1142 */
+	0x0000,     /* R1143 */
+	0x0000,     /* R1144 */
+	0x0000,     /* R1145 */
+	0x0000,     /* R1146 */
+	0x0000,     /* R1147 */
+	0x0000,     /* R1148 */
+	0x0000,     /* R1149 */
+	0x0000,     /* R1150 */
+	0x0000,     /* R1151 */
+	0x6318,     /* R1152  - AIF1 DAC1 EQ Gains (1) */
+	0x6300,     /* R1153  - AIF1 DAC1 EQ Gains (2) */
+	0x0FCA,     /* R1154  - AIF1 DAC1 EQ Band 1 A */
+	0x0400,     /* R1155  - AIF1 DAC1 EQ Band 1 B */
+	0x00D8,     /* R1156  - AIF1 DAC1 EQ Band 1 PG */
+	0x1EB5,     /* R1157  - AIF1 DAC1 EQ Band 2 A */
+	0xF145,     /* R1158  - AIF1 DAC1 EQ Band 2 B */
+	0x0B75,     /* R1159  - AIF1 DAC1 EQ Band 2 C */
+	0x01C5,     /* R1160  - AIF1 DAC1 EQ Band 2 PG */
+	0x1C58,     /* R1161  - AIF1 DAC1 EQ Band 3 A */
+	0xF373,     /* R1162  - AIF1 DAC1 EQ Band 3 B */
+	0x0A54,     /* R1163  - AIF1 DAC1 EQ Band 3 C */
+	0x0558,     /* R1164  - AIF1 DAC1 EQ Band 3 PG */
+	0x168E,     /* R1165  - AIF1 DAC1 EQ Band 4 A */
+	0xF829,     /* R1166  - AIF1 DAC1 EQ Band 4 B */
+	0x07AD,     /* R1167  - AIF1 DAC1 EQ Band 4 C */
+	0x1103,     /* R1168  - AIF1 DAC1 EQ Band 4 PG */
+	0x0564,     /* R1169  - AIF1 DAC1 EQ Band 5 A */
+	0x0559,     /* R1170  - AIF1 DAC1 EQ Band 5 B */
+	0x4000,     /* R1171  - AIF1 DAC1 EQ Band 5 PG */
+	0x0000,     /* R1172 */
+	0x0000,     /* R1173 */
+	0x0000,     /* R1174 */
+	0x0000,     /* R1175 */
+	0x0000,     /* R1176 */
+	0x0000,     /* R1177 */
+	0x0000,     /* R1178 */
+	0x0000,     /* R1179 */
+	0x0000,     /* R1180 */
+	0x0000,     /* R1181 */
+	0x0000,     /* R1182 */
+	0x0000,     /* R1183 */
+	0x6318,     /* R1184  - AIF1 DAC2 EQ Gains (1) */
+	0x6300,     /* R1185  - AIF1 DAC2 EQ Gains (2) */
+	0x0FCA,     /* R1186  - AIF1 DAC2 EQ Band 1 A */
+	0x0400,     /* R1187  - AIF1 DAC2 EQ Band 1 B */
+	0x00D8,     /* R1188  - AIF1 DAC2 EQ Band 1 PG */
+	0x1EB5,     /* R1189  - AIF1 DAC2 EQ Band 2 A */
+	0xF145,     /* R1190  - AIF1 DAC2 EQ Band 2 B */
+	0x0B75,     /* R1191  - AIF1 DAC2 EQ Band 2 C */
+	0x01C5,     /* R1192  - AIF1 DAC2 EQ Band 2 PG */
+	0x1C58,     /* R1193  - AIF1 DAC2 EQ Band 3 A */
+	0xF373,     /* R1194  - AIF1 DAC2 EQ Band 3 B */
+	0x0A54,     /* R1195  - AIF1 DAC2 EQ Band 3 C */
+	0x0558,     /* R1196  - AIF1 DAC2 EQ Band 3 PG */
+	0x168E,     /* R1197  - AIF1 DAC2 EQ Band 4 A */
+	0xF829,     /* R1198  - AIF1 DAC2 EQ Band 4 B */
+	0x07AD,     /* R1199  - AIF1 DAC2 EQ Band 4 C */
+	0x1103,     /* R1200  - AIF1 DAC2 EQ Band 4 PG */
+	0x0564,     /* R1201  - AIF1 DAC2 EQ Band 5 A */
+	0x0559,     /* R1202  - AIF1 DAC2 EQ Band 5 B */
+	0x4000,     /* R1203  - AIF1 DAC2 EQ Band 5 PG */
+	0x0000,     /* R1204 */
+	0x0000,     /* R1205 */
+	0x0000,     /* R1206 */
+	0x0000,     /* R1207 */
+	0x0000,     /* R1208 */
+	0x0000,     /* R1209 */
+	0x0000,     /* R1210 */
+	0x0000,     /* R1211 */
+	0x0000,     /* R1212 */
+	0x0000,     /* R1213 */
+	0x0000,     /* R1214 */
+	0x0000,     /* R1215 */
+	0x0000,     /* R1216 */
+	0x0000,     /* R1217 */
+	0x0000,     /* R1218 */
+	0x0000,     /* R1219 */
+	0x0000,     /* R1220 */
+	0x0000,     /* R1221 */
+	0x0000,     /* R1222 */
+	0x0000,     /* R1223 */
+	0x0000,     /* R1224 */
+	0x0000,     /* R1225 */
+	0x0000,     /* R1226 */
+	0x0000,     /* R1227 */
+	0x0000,     /* R1228 */
+	0x0000,     /* R1229 */
+	0x0000,     /* R1230 */
+	0x0000,     /* R1231 */
+	0x0000,     /* R1232 */
+	0x0000,     /* R1233 */
+	0x0000,     /* R1234 */
+	0x0000,     /* R1235 */
+	0x0000,     /* R1236 */
+	0x0000,     /* R1237 */
+	0x0000,     /* R1238 */
+	0x0000,     /* R1239 */
+	0x0000,     /* R1240 */
+	0x0000,     /* R1241 */
+	0x0000,     /* R1242 */
+	0x0000,     /* R1243 */
+	0x0000,     /* R1244 */
+	0x0000,     /* R1245 */
+	0x0000,     /* R1246 */
+	0x0000,     /* R1247 */
+	0x0000,     /* R1248 */
+	0x0000,     /* R1249 */
+	0x0000,     /* R1250 */
+	0x0000,     /* R1251 */
+	0x0000,     /* R1252 */
+	0x0000,     /* R1253 */
+	0x0000,     /* R1254 */
+	0x0000,     /* R1255 */
+	0x0000,     /* R1256 */
+	0x0000,     /* R1257 */
+	0x0000,     /* R1258 */
+	0x0000,     /* R1259 */
+	0x0000,     /* R1260 */
+	0x0000,     /* R1261 */
+	0x0000,     /* R1262 */
+	0x0000,     /* R1263 */
+	0x0000,     /* R1264 */
+	0x0000,     /* R1265 */
+	0x0000,     /* R1266 */
+	0x0000,     /* R1267 */
+	0x0000,     /* R1268 */
+	0x0000,     /* R1269 */
+	0x0000,     /* R1270 */
+	0x0000,     /* R1271 */
+	0x0000,     /* R1272 */
+	0x0000,     /* R1273 */
+	0x0000,     /* R1274 */
+	0x0000,     /* R1275 */
+	0x0000,     /* R1276 */
+	0x0000,     /* R1277 */
+	0x0000,     /* R1278 */
+	0x0000,     /* R1279 */
+	0x00C0,     /* R1280  - AIF2 ADC Left Volume */
+	0x00C0,     /* R1281  - AIF2 ADC Right Volume */
+	0x00C0,     /* R1282  - AIF2 DAC Left Volume */
+	0x00C0,     /* R1283  - AIF2 DAC Right Volume */
+	0x0000,     /* R1284 */
+	0x0000,     /* R1285 */
+	0x0000,     /* R1286 */
+	0x0000,     /* R1287 */
+	0x0000,     /* R1288 */
+	0x0000,     /* R1289 */
+	0x0000,     /* R1290 */
+	0x0000,     /* R1291 */
+	0x0000,     /* R1292 */
+	0x0000,     /* R1293 */
+	0x0000,     /* R1294 */
+	0x0000,     /* R1295 */
+	0x0000,     /* R1296  - AIF2 ADC Filters */
+	0x0000,     /* R1297 */
+	0x0000,     /* R1298 */
+	0x0000,     /* R1299 */
+	0x0000,     /* R1300 */
+	0x0000,     /* R1301 */
+	0x0000,     /* R1302 */
+	0x0000,     /* R1303 */
+	0x0000,     /* R1304 */
+	0x0000,     /* R1305 */
+	0x0000,     /* R1306 */
+	0x0000,     /* R1307 */
+	0x0000,     /* R1308 */
+	0x0000,     /* R1309 */
+	0x0000,     /* R1310 */
+	0x0000,     /* R1311 */
+	0x0200,     /* R1312  - AIF2 DAC Filters (1) */
+	0x0010,     /* R1313  - AIF2 DAC Filters (2) */
+	0x0000,     /* R1314 */
+	0x0000,     /* R1315 */
+	0x0000,     /* R1316 */
+	0x0000,     /* R1317 */
+	0x0000,     /* R1318 */
+	0x0000,     /* R1319 */
+	0x0000,     /* R1320 */
+	0x0000,     /* R1321 */
+	0x0000,     /* R1322 */
+	0x0000,     /* R1323 */
+	0x0000,     /* R1324 */
+	0x0000,     /* R1325 */
+	0x0000,     /* R1326 */
+	0x0000,     /* R1327 */
+	0x0000,     /* R1328 */
+	0x0000,     /* R1329 */
+	0x0000,     /* R1330 */
+	0x0000,     /* R1331 */
+	0x0000,     /* R1332 */
+	0x0000,     /* R1333 */
+	0x0000,     /* R1334 */
+	0x0000,     /* R1335 */
+	0x0000,     /* R1336 */
+	0x0000,     /* R1337 */
+	0x0000,     /* R1338 */
+	0x0000,     /* R1339 */
+	0x0000,     /* R1340 */
+	0x0000,     /* R1341 */
+	0x0000,     /* R1342 */
+	0x0000,     /* R1343 */
+	0x0098,     /* R1344  - AIF2 DRC (1) */
+	0x0845,     /* R1345  - AIF2 DRC (2) */
+	0x0000,     /* R1346  - AIF2 DRC (3) */
+	0x0000,     /* R1347  - AIF2 DRC (4) */
+	0x0000,     /* R1348  - AIF2 DRC (5) */
+	0x0000,     /* R1349 */
+	0x0000,     /* R1350 */
+	0x0000,     /* R1351 */
+	0x0000,     /* R1352 */
+	0x0000,     /* R1353 */
+	0x0000,     /* R1354 */
+	0x0000,     /* R1355 */
+	0x0000,     /* R1356 */
+	0x0000,     /* R1357 */
+	0x0000,     /* R1358 */
+	0x0000,     /* R1359 */
+	0x0000,     /* R1360 */
+	0x0000,     /* R1361 */
+	0x0000,     /* R1362 */
+	0x0000,     /* R1363 */
+	0x0000,     /* R1364 */
+	0x0000,     /* R1365 */
+	0x0000,     /* R1366 */
+	0x0000,     /* R1367 */
+	0x0000,     /* R1368 */
+	0x0000,     /* R1369 */
+	0x0000,     /* R1370 */
+	0x0000,     /* R1371 */
+	0x0000,     /* R1372 */
+	0x0000,     /* R1373 */
+	0x0000,     /* R1374 */
+	0x0000,     /* R1375 */
+	0x0000,     /* R1376 */
+	0x0000,     /* R1377 */
+	0x0000,     /* R1378 */
+	0x0000,     /* R1379 */
+	0x0000,     /* R1380 */
+	0x0000,     /* R1381 */
+	0x0000,     /* R1382 */
+	0x0000,     /* R1383 */
+	0x0000,     /* R1384 */
+	0x0000,     /* R1385 */
+	0x0000,     /* R1386 */
+	0x0000,     /* R1387 */
+	0x0000,     /* R1388 */
+	0x0000,     /* R1389 */
+	0x0000,     /* R1390 */
+	0x0000,     /* R1391 */
+	0x0000,     /* R1392 */
+	0x0000,     /* R1393 */
+	0x0000,     /* R1394 */
+	0x0000,     /* R1395 */
+	0x0000,     /* R1396 */
+	0x0000,     /* R1397 */
+	0x0000,     /* R1398 */
+	0x0000,     /* R1399 */
+	0x0000,     /* R1400 */
+	0x0000,     /* R1401 */
+	0x0000,     /* R1402 */
+	0x0000,     /* R1403 */
+	0x0000,     /* R1404 */
+	0x0000,     /* R1405 */
+	0x0000,     /* R1406 */
+	0x0000,     /* R1407 */
+	0x6318,     /* R1408  - AIF2 EQ Gains (1) */
+	0x6300,     /* R1409  - AIF2 EQ Gains (2) */
+	0x0FCA,     /* R1410  - AIF2 EQ Band 1 A */
+	0x0400,     /* R1411  - AIF2 EQ Band 1 B */
+	0x00D8,     /* R1412  - AIF2 EQ Band 1 PG */
+	0x1EB5,     /* R1413  - AIF2 EQ Band 2 A */
+	0xF145,     /* R1414  - AIF2 EQ Band 2 B */
+	0x0B75,     /* R1415  - AIF2 EQ Band 2 C */
+	0x01C5,     /* R1416  - AIF2 EQ Band 2 PG */
+	0x1C58,     /* R1417  - AIF2 EQ Band 3 A */
+	0xF373,     /* R1418  - AIF2 EQ Band 3 B */
+	0x0A54,     /* R1419  - AIF2 EQ Band 3 C */
+	0x0558,     /* R1420  - AIF2 EQ Band 3 PG */
+	0x168E,     /* R1421  - AIF2 EQ Band 4 A */
+	0xF829,     /* R1422  - AIF2 EQ Band 4 B */
+	0x07AD,     /* R1423  - AIF2 EQ Band 4 C */
+	0x1103,     /* R1424  - AIF2 EQ Band 4 PG */
+	0x0564,     /* R1425  - AIF2 EQ Band 5 A */
+	0x0559,     /* R1426  - AIF2 EQ Band 5 B */
+	0x4000,     /* R1427  - AIF2 EQ Band 5 PG */
+	0x0000,     /* R1428 */
+	0x0000,     /* R1429 */
+	0x0000,     /* R1430 */
+	0x0000,     /* R1431 */
+	0x0000,     /* R1432 */
+	0x0000,     /* R1433 */
+	0x0000,     /* R1434 */
+	0x0000,     /* R1435 */
+	0x0000,     /* R1436 */
+	0x0000,     /* R1437 */
+	0x0000,     /* R1438 */
+	0x0000,     /* R1439 */
+	0x0000,     /* R1440 */
+	0x0000,     /* R1441 */
+	0x0000,     /* R1442 */
+	0x0000,     /* R1443 */
+	0x0000,     /* R1444 */
+	0x0000,     /* R1445 */
+	0x0000,     /* R1446 */
+	0x0000,     /* R1447 */
+	0x0000,     /* R1448 */
+	0x0000,     /* R1449 */
+	0x0000,     /* R1450 */
+	0x0000,     /* R1451 */
+	0x0000,     /* R1452 */
+	0x0000,     /* R1453 */
+	0x0000,     /* R1454 */
+	0x0000,     /* R1455 */
+	0x0000,     /* R1456 */
+	0x0000,     /* R1457 */
+	0x0000,     /* R1458 */
+	0x0000,     /* R1459 */
+	0x0000,     /* R1460 */
+	0x0000,     /* R1461 */
+	0x0000,     /* R1462 */
+	0x0000,     /* R1463 */
+	0x0000,     /* R1464 */
+	0x0000,     /* R1465 */
+	0x0000,     /* R1466 */
+	0x0000,     /* R1467 */
+	0x0000,     /* R1468 */
+	0x0000,     /* R1469 */
+	0x0000,     /* R1470 */
+	0x0000,     /* R1471 */
+	0x0000,     /* R1472 */
+	0x0000,     /* R1473 */
+	0x0000,     /* R1474 */
+	0x0000,     /* R1475 */
+	0x0000,     /* R1476 */
+	0x0000,     /* R1477 */
+	0x0000,     /* R1478 */
+	0x0000,     /* R1479 */
+	0x0000,     /* R1480 */
+	0x0000,     /* R1481 */
+	0x0000,     /* R1482 */
+	0x0000,     /* R1483 */
+	0x0000,     /* R1484 */
+	0x0000,     /* R1485 */
+	0x0000,     /* R1486 */
+	0x0000,     /* R1487 */
+	0x0000,     /* R1488 */
+	0x0000,     /* R1489 */
+	0x0000,     /* R1490 */
+	0x0000,     /* R1491 */
+	0x0000,     /* R1492 */
+	0x0000,     /* R1493 */
+	0x0000,     /* R1494 */
+	0x0000,     /* R1495 */
+	0x0000,     /* R1496 */
+	0x0000,     /* R1497 */
+	0x0000,     /* R1498 */
+	0x0000,     /* R1499 */
+	0x0000,     /* R1500 */
+	0x0000,     /* R1501 */
+	0x0000,     /* R1502 */
+	0x0000,     /* R1503 */
+	0x0000,     /* R1504 */
+	0x0000,     /* R1505 */
+	0x0000,     /* R1506 */
+	0x0000,     /* R1507 */
+	0x0000,     /* R1508 */
+	0x0000,     /* R1509 */
+	0x0000,     /* R1510 */
+	0x0000,     /* R1511 */
+	0x0000,     /* R1512 */
+	0x0000,     /* R1513 */
+	0x0000,     /* R1514 */
+	0x0000,     /* R1515 */
+	0x0000,     /* R1516 */
+	0x0000,     /* R1517 */
+	0x0000,     /* R1518 */
+	0x0000,     /* R1519 */
+	0x0000,     /* R1520 */
+	0x0000,     /* R1521 */
+	0x0000,     /* R1522 */
+	0x0000,     /* R1523 */
+	0x0000,     /* R1524 */
+	0x0000,     /* R1525 */
+	0x0000,     /* R1526 */
+	0x0000,     /* R1527 */
+	0x0000,     /* R1528 */
+	0x0000,     /* R1529 */
+	0x0000,     /* R1530 */
+	0x0000,     /* R1531 */
+	0x0000,     /* R1532 */
+	0x0000,     /* R1533 */
+	0x0000,     /* R1534 */
+	0x0000,     /* R1535 */
+	0x0000,     /* R1536  - DAC1 Mixer Volumes */
+	0x0000,     /* R1537  - DAC1 Left Mixer Routing */
+	0x0000,     /* R1538  - DAC1 Right Mixer Routing */
+	0x0000,     /* R1539  - DAC2 Mixer Volumes */
+	0x0000,     /* R1540  - DAC2 Left Mixer Routing */
+	0x0000,     /* R1541  - DAC2 Right Mixer Routing */
+	0x0000,     /* R1542  - AIF1 ADC1 Left Mixer Routing */
+	0x0000,     /* R1543  - AIF1 ADC1 Right Mixer Routing */
+	0x0000,     /* R1544  - AIF1 ADC2 Left Mixer Routing */
+	0x0000,     /* R1545  - AIF1 ADC2 Right mixer Routing */
+	0x0000,     /* R1546 */
+	0x0000,     /* R1547 */
+	0x0000,     /* R1548 */
+	0x0000,     /* R1549 */
+	0x0000,     /* R1550 */
+	0x0000,     /* R1551 */
+	0x02C0,     /* R1552  - DAC1 Left Volume */
+	0x02C0,     /* R1553  - DAC1 Right Volume */
+	0x02C0,     /* R1554  - DAC2 Left Volume */
+	0x02C0,     /* R1555  - DAC2 Right Volume */
+	0x0000,     /* R1556  - DAC Softmute */
+	0x0000,     /* R1557 */
+	0x0000,     /* R1558 */
+	0x0000,     /* R1559 */
+	0x0000,     /* R1560 */
+	0x0000,     /* R1561 */
+	0x0000,     /* R1562 */
+	0x0000,     /* R1563 */
+	0x0000,     /* R1564 */
+	0x0000,     /* R1565 */
+	0x0000,     /* R1566 */
+	0x0000,     /* R1567 */
+	0x0002,     /* R1568  - Oversampling */
+	0x0000,     /* R1569  - Sidetone */
+};
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 4d3e6f1..247a6a9 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -18,15 +18,17 @@
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <sound/core.h>
+#include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
+#include <trace/events/asoc.h>
 
 #include <linux/mfd/wm8994/core.h>
 #include <linux/mfd/wm8994/registers.h>
@@ -57,8 +59,6 @@
 	WM8994_AIF2_EQ_GAINS_1,
 };
 
-#define WM8994_REG_CACHE_SIZE  0x621
-
 struct wm8994_micdet {
 	struct snd_soc_jack *jack;
 	int det;
@@ -71,7 +71,6 @@
 	enum snd_soc_control_type control_type;
 	void *control_data;
 	struct snd_soc_codec *codec;
-	u16 reg_cache[WM8994_REG_CACHE_SIZE + 1];
 	int sysclk[2];
 	int sysclk_rate[2];
 	int mclk[2];
@@ -81,6 +80,8 @@
 	int dac_rates[2];
 	int lrclk_shared[2];
 
+	int mbc_ena[3];
+
 	/* Platform dependant DRC configuration */
 	const char **drc_texts;
 	int drc_cfg[WM8994_NUM_DRC];
@@ -92,1588 +93,22 @@
 	int retune_mobile_cfg[WM8994_NUM_EQ];
 	struct soc_enum retune_mobile_enum;
 
+	/* Platform dependant MBC configuration */
+	int mbc_cfg;
+	const char **mbc_texts;
+	struct soc_enum mbc_enum;
+
 	struct wm8994_micdet micdet[2];
 
+	wm8958_micdet_cb jack_cb;
+	void *jack_cb_data;
+	bool jack_is_mic;
+	bool jack_is_video;
+
 	int revision;
 	struct wm8994_pdata *pdata;
 };
 
-static const struct {
-	unsigned short readable;   /* Mask of readable bits */
-	unsigned short writable;   /* Mask of writable bits */
-} access_masks[] = {
-	{ 0xFFFF, 0xFFFF }, /* R0     - Software Reset */
-	{ 0x3B37, 0x3B37 }, /* R1     - Power Management (1) */
-	{ 0x6BF0, 0x6BF0 }, /* R2     - Power Management (2) */
-	{ 0x3FF0, 0x3FF0 }, /* R3     - Power Management (3) */
-	{ 0x3F3F, 0x3F3F }, /* R4     - Power Management (4) */
-	{ 0x3F0F, 0x3F0F }, /* R5     - Power Management (5) */
-	{ 0x003F, 0x003F }, /* R6     - Power Management (6) */
-	{ 0x0000, 0x0000 }, /* R7 */
-	{ 0x0000, 0x0000 }, /* R8 */
-	{ 0x0000, 0x0000 }, /* R9 */
-	{ 0x0000, 0x0000 }, /* R10 */
-	{ 0x0000, 0x0000 }, /* R11 */
-	{ 0x0000, 0x0000 }, /* R12 */
-	{ 0x0000, 0x0000 }, /* R13 */
-	{ 0x0000, 0x0000 }, /* R14 */
-	{ 0x0000, 0x0000 }, /* R15 */
-	{ 0x0000, 0x0000 }, /* R16 */
-	{ 0x0000, 0x0000 }, /* R17 */
-	{ 0x0000, 0x0000 }, /* R18 */
-	{ 0x0000, 0x0000 }, /* R19 */
-	{ 0x0000, 0x0000 }, /* R20 */
-	{ 0x01C0, 0x01C0 }, /* R21    - Input Mixer (1) */
-	{ 0x0000, 0x0000 }, /* R22 */
-	{ 0x0000, 0x0000 }, /* R23 */
-	{ 0x00DF, 0x01DF }, /* R24    - Left Line Input 1&2 Volume */
-	{ 0x00DF, 0x01DF }, /* R25    - Left Line Input 3&4 Volume */
-	{ 0x00DF, 0x01DF }, /* R26    - Right Line Input 1&2 Volume */
-	{ 0x00DF, 0x01DF }, /* R27    - Right Line Input 3&4 Volume */
-	{ 0x00FF, 0x01FF }, /* R28    - Left Output Volume */
-	{ 0x00FF, 0x01FF }, /* R29    - Right Output Volume */
-	{ 0x0077, 0x0077 }, /* R30    - Line Outputs Volume */
-	{ 0x0030, 0x0030 }, /* R31    - HPOUT2 Volume */
-	{ 0x00FF, 0x01FF }, /* R32    - Left OPGA Volume */
-	{ 0x00FF, 0x01FF }, /* R33    - Right OPGA Volume */
-	{ 0x007F, 0x007F }, /* R34    - SPKMIXL Attenuation */
-	{ 0x017F, 0x017F }, /* R35    - SPKMIXR Attenuation */
-	{ 0x003F, 0x003F }, /* R36    - SPKOUT Mixers */
-	{ 0x003F, 0x003F }, /* R37    - ClassD */
-	{ 0x00FF, 0x01FF }, /* R38    - Speaker Volume Left */
-	{ 0x00FF, 0x01FF }, /* R39    - Speaker Volume Right */
-	{ 0x00FF, 0x00FF }, /* R40    - Input Mixer (2) */
-	{ 0x01B7, 0x01B7 }, /* R41    - Input Mixer (3) */
-	{ 0x01B7, 0x01B7 }, /* R42    - Input Mixer (4) */
-	{ 0x01C7, 0x01C7 }, /* R43    - Input Mixer (5) */
-	{ 0x01C7, 0x01C7 }, /* R44    - Input Mixer (6) */
-	{ 0x01FF, 0x01FF }, /* R45    - Output Mixer (1) */
-	{ 0x01FF, 0x01FF }, /* R46    - Output Mixer (2) */
-	{ 0x0FFF, 0x0FFF }, /* R47    - Output Mixer (3) */
-	{ 0x0FFF, 0x0FFF }, /* R48    - Output Mixer (4) */
-	{ 0x0FFF, 0x0FFF }, /* R49    - Output Mixer (5) */
-	{ 0x0FFF, 0x0FFF }, /* R50    - Output Mixer (6) */
-	{ 0x0038, 0x0038 }, /* R51    - HPOUT2 Mixer */
-	{ 0x0077, 0x0077 }, /* R52    - Line Mixer (1) */
-	{ 0x0077, 0x0077 }, /* R53    - Line Mixer (2) */
-	{ 0x03FF, 0x03FF }, /* R54    - Speaker Mixer */
-	{ 0x00C1, 0x00C1 }, /* R55    - Additional Control */
-	{ 0x00F0, 0x00F0 }, /* R56    - AntiPOP (1) */
-	{ 0x01EF, 0x01EF }, /* R57    - AntiPOP (2) */
-	{ 0x00FF, 0x00FF }, /* R58    - MICBIAS */
-	{ 0x000F, 0x000F }, /* R59    - LDO 1 */
-	{ 0x0007, 0x0007 }, /* R60    - LDO 2 */
-	{ 0x0000, 0x0000 }, /* R61 */
-	{ 0x0000, 0x0000 }, /* R62 */
-	{ 0x0000, 0x0000 }, /* R63 */
-	{ 0x0000, 0x0000 }, /* R64 */
-	{ 0x0000, 0x0000 }, /* R65 */
-	{ 0x0000, 0x0000 }, /* R66 */
-	{ 0x0000, 0x0000 }, /* R67 */
-	{ 0x0000, 0x0000 }, /* R68 */
-	{ 0x0000, 0x0000 }, /* R69 */
-	{ 0x0000, 0x0000 }, /* R70 */
-	{ 0x0000, 0x0000 }, /* R71 */
-	{ 0x0000, 0x0000 }, /* R72 */
-	{ 0x0000, 0x0000 }, /* R73 */
-	{ 0x0000, 0x0000 }, /* R74 */
-	{ 0x0000, 0x0000 }, /* R75 */
-	{ 0x8000, 0x8000 }, /* R76    - Charge Pump (1) */
-	{ 0x0000, 0x0000 }, /* R77 */
-	{ 0x0000, 0x0000 }, /* R78 */
-	{ 0x0000, 0x0000 }, /* R79 */
-	{ 0x0000, 0x0000 }, /* R80 */
-	{ 0x0301, 0x0301 }, /* R81    - Class W (1) */
-	{ 0x0000, 0x0000 }, /* R82 */
-	{ 0x0000, 0x0000 }, /* R83 */
-	{ 0x333F, 0x333F }, /* R84    - DC Servo (1) */
-	{ 0x0FEF, 0x0FEF }, /* R85    - DC Servo (2) */
-	{ 0x0000, 0x0000 }, /* R86 */
-	{ 0xFFFF, 0xFFFF }, /* R87    - DC Servo (4) */
-	{ 0x0333, 0x0000 }, /* R88    - DC Servo Readback */
-	{ 0x0000, 0x0000 }, /* R89 */
-	{ 0x0000, 0x0000 }, /* R90 */
-	{ 0x0000, 0x0000 }, /* R91 */
-	{ 0x0000, 0x0000 }, /* R92 */
-	{ 0x0000, 0x0000 }, /* R93 */
-	{ 0x0000, 0x0000 }, /* R94 */
-	{ 0x0000, 0x0000 }, /* R95 */
-	{ 0x00EE, 0x00EE }, /* R96    - Analogue HP (1) */
-	{ 0x0000, 0x0000 }, /* R97 */
-	{ 0x0000, 0x0000 }, /* R98 */
-	{ 0x0000, 0x0000 }, /* R99 */
-	{ 0x0000, 0x0000 }, /* R100 */
-	{ 0x0000, 0x0000 }, /* R101 */
-	{ 0x0000, 0x0000 }, /* R102 */
-	{ 0x0000, 0x0000 }, /* R103 */
-	{ 0x0000, 0x0000 }, /* R104 */
-	{ 0x0000, 0x0000 }, /* R105 */
-	{ 0x0000, 0x0000 }, /* R106 */
-	{ 0x0000, 0x0000 }, /* R107 */
-	{ 0x0000, 0x0000 }, /* R108 */
-	{ 0x0000, 0x0000 }, /* R109 */
-	{ 0x0000, 0x0000 }, /* R110 */
-	{ 0x0000, 0x0000 }, /* R111 */
-	{ 0x0000, 0x0000 }, /* R112 */
-	{ 0x0000, 0x0000 }, /* R113 */
-	{ 0x0000, 0x0000 }, /* R114 */
-	{ 0x0000, 0x0000 }, /* R115 */
-	{ 0x0000, 0x0000 }, /* R116 */
-	{ 0x0000, 0x0000 }, /* R117 */
-	{ 0x0000, 0x0000 }, /* R118 */
-	{ 0x0000, 0x0000 }, /* R119 */
-	{ 0x0000, 0x0000 }, /* R120 */
-	{ 0x0000, 0x0000 }, /* R121 */
-	{ 0x0000, 0x0000 }, /* R122 */
-	{ 0x0000, 0x0000 }, /* R123 */
-	{ 0x0000, 0x0000 }, /* R124 */
-	{ 0x0000, 0x0000 }, /* R125 */
-	{ 0x0000, 0x0000 }, /* R126 */
-	{ 0x0000, 0x0000 }, /* R127 */
-	{ 0x0000, 0x0000 }, /* R128 */
-	{ 0x0000, 0x0000 }, /* R129 */
-	{ 0x0000, 0x0000 }, /* R130 */
-	{ 0x0000, 0x0000 }, /* R131 */
-	{ 0x0000, 0x0000 }, /* R132 */
-	{ 0x0000, 0x0000 }, /* R133 */
-	{ 0x0000, 0x0000 }, /* R134 */
-	{ 0x0000, 0x0000 }, /* R135 */
-	{ 0x0000, 0x0000 }, /* R136 */
-	{ 0x0000, 0x0000 }, /* R137 */
-	{ 0x0000, 0x0000 }, /* R138 */
-	{ 0x0000, 0x0000 }, /* R139 */
-	{ 0x0000, 0x0000 }, /* R140 */
-	{ 0x0000, 0x0000 }, /* R141 */
-	{ 0x0000, 0x0000 }, /* R142 */
-	{ 0x0000, 0x0000 }, /* R143 */
-	{ 0x0000, 0x0000 }, /* R144 */
-	{ 0x0000, 0x0000 }, /* R145 */
-	{ 0x0000, 0x0000 }, /* R146 */
-	{ 0x0000, 0x0000 }, /* R147 */
-	{ 0x0000, 0x0000 }, /* R148 */
-	{ 0x0000, 0x0000 }, /* R149 */
-	{ 0x0000, 0x0000 }, /* R150 */
-	{ 0x0000, 0x0000 }, /* R151 */
-	{ 0x0000, 0x0000 }, /* R152 */
-	{ 0x0000, 0x0000 }, /* R153 */
-	{ 0x0000, 0x0000 }, /* R154 */
-	{ 0x0000, 0x0000 }, /* R155 */
-	{ 0x0000, 0x0000 }, /* R156 */
-	{ 0x0000, 0x0000 }, /* R157 */
-	{ 0x0000, 0x0000 }, /* R158 */
-	{ 0x0000, 0x0000 }, /* R159 */
-	{ 0x0000, 0x0000 }, /* R160 */
-	{ 0x0000, 0x0000 }, /* R161 */
-	{ 0x0000, 0x0000 }, /* R162 */
-	{ 0x0000, 0x0000 }, /* R163 */
-	{ 0x0000, 0x0000 }, /* R164 */
-	{ 0x0000, 0x0000 }, /* R165 */
-	{ 0x0000, 0x0000 }, /* R166 */
-	{ 0x0000, 0x0000 }, /* R167 */
-	{ 0x0000, 0x0000 }, /* R168 */
-	{ 0x0000, 0x0000 }, /* R169 */
-	{ 0x0000, 0x0000 }, /* R170 */
-	{ 0x0000, 0x0000 }, /* R171 */
-	{ 0x0000, 0x0000 }, /* R172 */
-	{ 0x0000, 0x0000 }, /* R173 */
-	{ 0x0000, 0x0000 }, /* R174 */
-	{ 0x0000, 0x0000 }, /* R175 */
-	{ 0x0000, 0x0000 }, /* R176 */
-	{ 0x0000, 0x0000 }, /* R177 */
-	{ 0x0000, 0x0000 }, /* R178 */
-	{ 0x0000, 0x0000 }, /* R179 */
-	{ 0x0000, 0x0000 }, /* R180 */
-	{ 0x0000, 0x0000 }, /* R181 */
-	{ 0x0000, 0x0000 }, /* R182 */
-	{ 0x0000, 0x0000 }, /* R183 */
-	{ 0x0000, 0x0000 }, /* R184 */
-	{ 0x0000, 0x0000 }, /* R185 */
-	{ 0x0000, 0x0000 }, /* R186 */
-	{ 0x0000, 0x0000 }, /* R187 */
-	{ 0x0000, 0x0000 }, /* R188 */
-	{ 0x0000, 0x0000 }, /* R189 */
-	{ 0x0000, 0x0000 }, /* R190 */
-	{ 0x0000, 0x0000 }, /* R191 */
-	{ 0x0000, 0x0000 }, /* R192 */
-	{ 0x0000, 0x0000 }, /* R193 */
-	{ 0x0000, 0x0000 }, /* R194 */
-	{ 0x0000, 0x0000 }, /* R195 */
-	{ 0x0000, 0x0000 }, /* R196 */
-	{ 0x0000, 0x0000 }, /* R197 */
-	{ 0x0000, 0x0000 }, /* R198 */
-	{ 0x0000, 0x0000 }, /* R199 */
-	{ 0x0000, 0x0000 }, /* R200 */
-	{ 0x0000, 0x0000 }, /* R201 */
-	{ 0x0000, 0x0000 }, /* R202 */
-	{ 0x0000, 0x0000 }, /* R203 */
-	{ 0x0000, 0x0000 }, /* R204 */
-	{ 0x0000, 0x0000 }, /* R205 */
-	{ 0x0000, 0x0000 }, /* R206 */
-	{ 0x0000, 0x0000 }, /* R207 */
-	{ 0x0000, 0x0000 }, /* R208 */
-	{ 0x0000, 0x0000 }, /* R209 */
-	{ 0x0000, 0x0000 }, /* R210 */
-	{ 0x0000, 0x0000 }, /* R211 */
-	{ 0x0000, 0x0000 }, /* R212 */
-	{ 0x0000, 0x0000 }, /* R213 */
-	{ 0x0000, 0x0000 }, /* R214 */
-	{ 0x0000, 0x0000 }, /* R215 */
-	{ 0x0000, 0x0000 }, /* R216 */
-	{ 0x0000, 0x0000 }, /* R217 */
-	{ 0x0000, 0x0000 }, /* R218 */
-	{ 0x0000, 0x0000 }, /* R219 */
-	{ 0x0000, 0x0000 }, /* R220 */
-	{ 0x0000, 0x0000 }, /* R221 */
-	{ 0x0000, 0x0000 }, /* R222 */
-	{ 0x0000, 0x0000 }, /* R223 */
-	{ 0x0000, 0x0000 }, /* R224 */
-	{ 0x0000, 0x0000 }, /* R225 */
-	{ 0x0000, 0x0000 }, /* R226 */
-	{ 0x0000, 0x0000 }, /* R227 */
-	{ 0x0000, 0x0000 }, /* R228 */
-	{ 0x0000, 0x0000 }, /* R229 */
-	{ 0x0000, 0x0000 }, /* R230 */
-	{ 0x0000, 0x0000 }, /* R231 */
-	{ 0x0000, 0x0000 }, /* R232 */
-	{ 0x0000, 0x0000 }, /* R233 */
-	{ 0x0000, 0x0000 }, /* R234 */
-	{ 0x0000, 0x0000 }, /* R235 */
-	{ 0x0000, 0x0000 }, /* R236 */
-	{ 0x0000, 0x0000 }, /* R237 */
-	{ 0x0000, 0x0000 }, /* R238 */
-	{ 0x0000, 0x0000 }, /* R239 */
-	{ 0x0000, 0x0000 }, /* R240 */
-	{ 0x0000, 0x0000 }, /* R241 */
-	{ 0x0000, 0x0000 }, /* R242 */
-	{ 0x0000, 0x0000 }, /* R243 */
-	{ 0x0000, 0x0000 }, /* R244 */
-	{ 0x0000, 0x0000 }, /* R245 */
-	{ 0x0000, 0x0000 }, /* R246 */
-	{ 0x0000, 0x0000 }, /* R247 */
-	{ 0x0000, 0x0000 }, /* R248 */
-	{ 0x0000, 0x0000 }, /* R249 */
-	{ 0x0000, 0x0000 }, /* R250 */
-	{ 0x0000, 0x0000 }, /* R251 */
-	{ 0x0000, 0x0000 }, /* R252 */
-	{ 0x0000, 0x0000 }, /* R253 */
-	{ 0x0000, 0x0000 }, /* R254 */
-	{ 0x0000, 0x0000 }, /* R255 */
-	{ 0x000F, 0x0000 }, /* R256   - Chip Revision */
-	{ 0x0074, 0x0074 }, /* R257   - Control Interface */
-	{ 0x0000, 0x0000 }, /* R258 */
-	{ 0x0000, 0x0000 }, /* R259 */
-	{ 0x0000, 0x0000 }, /* R260 */
-	{ 0x0000, 0x0000 }, /* R261 */
-	{ 0x0000, 0x0000 }, /* R262 */
-	{ 0x0000, 0x0000 }, /* R263 */
-	{ 0x0000, 0x0000 }, /* R264 */
-	{ 0x0000, 0x0000 }, /* R265 */
-	{ 0x0000, 0x0000 }, /* R266 */
-	{ 0x0000, 0x0000 }, /* R267 */
-	{ 0x0000, 0x0000 }, /* R268 */
-	{ 0x0000, 0x0000 }, /* R269 */
-	{ 0x0000, 0x0000 }, /* R270 */
-	{ 0x0000, 0x0000 }, /* R271 */
-	{ 0x807F, 0x837F }, /* R272   - Write Sequencer Ctrl (1) */
-	{ 0x017F, 0x0000 }, /* R273   - Write Sequencer Ctrl (2) */
-	{ 0x0000, 0x0000 }, /* R274 */
-	{ 0x0000, 0x0000 }, /* R275 */
-	{ 0x0000, 0x0000 }, /* R276 */
-	{ 0x0000, 0x0000 }, /* R277 */
-	{ 0x0000, 0x0000 }, /* R278 */
-	{ 0x0000, 0x0000 }, /* R279 */
-	{ 0x0000, 0x0000 }, /* R280 */
-	{ 0x0000, 0x0000 }, /* R281 */
-	{ 0x0000, 0x0000 }, /* R282 */
-	{ 0x0000, 0x0000 }, /* R283 */
-	{ 0x0000, 0x0000 }, /* R284 */
-	{ 0x0000, 0x0000 }, /* R285 */
-	{ 0x0000, 0x0000 }, /* R286 */
-	{ 0x0000, 0x0000 }, /* R287 */
-	{ 0x0000, 0x0000 }, /* R288 */
-	{ 0x0000, 0x0000 }, /* R289 */
-	{ 0x0000, 0x0000 }, /* R290 */
-	{ 0x0000, 0x0000 }, /* R291 */
-	{ 0x0000, 0x0000 }, /* R292 */
-	{ 0x0000, 0x0000 }, /* R293 */
-	{ 0x0000, 0x0000 }, /* R294 */
-	{ 0x0000, 0x0000 }, /* R295 */
-	{ 0x0000, 0x0000 }, /* R296 */
-	{ 0x0000, 0x0000 }, /* R297 */
-	{ 0x0000, 0x0000 }, /* R298 */
-	{ 0x0000, 0x0000 }, /* R299 */
-	{ 0x0000, 0x0000 }, /* R300 */
-	{ 0x0000, 0x0000 }, /* R301 */
-	{ 0x0000, 0x0000 }, /* R302 */
-	{ 0x0000, 0x0000 }, /* R303 */
-	{ 0x0000, 0x0000 }, /* R304 */
-	{ 0x0000, 0x0000 }, /* R305 */
-	{ 0x0000, 0x0000 }, /* R306 */
-	{ 0x0000, 0x0000 }, /* R307 */
-	{ 0x0000, 0x0000 }, /* R308 */
-	{ 0x0000, 0x0000 }, /* R309 */
-	{ 0x0000, 0x0000 }, /* R310 */
-	{ 0x0000, 0x0000 }, /* R311 */
-	{ 0x0000, 0x0000 }, /* R312 */
-	{ 0x0000, 0x0000 }, /* R313 */
-	{ 0x0000, 0x0000 }, /* R314 */
-	{ 0x0000, 0x0000 }, /* R315 */
-	{ 0x0000, 0x0000 }, /* R316 */
-	{ 0x0000, 0x0000 }, /* R317 */
-	{ 0x0000, 0x0000 }, /* R318 */
-	{ 0x0000, 0x0000 }, /* R319 */
-	{ 0x0000, 0x0000 }, /* R320 */
-	{ 0x0000, 0x0000 }, /* R321 */
-	{ 0x0000, 0x0000 }, /* R322 */
-	{ 0x0000, 0x0000 }, /* R323 */
-	{ 0x0000, 0x0000 }, /* R324 */
-	{ 0x0000, 0x0000 }, /* R325 */
-	{ 0x0000, 0x0000 }, /* R326 */
-	{ 0x0000, 0x0000 }, /* R327 */
-	{ 0x0000, 0x0000 }, /* R328 */
-	{ 0x0000, 0x0000 }, /* R329 */
-	{ 0x0000, 0x0000 }, /* R330 */
-	{ 0x0000, 0x0000 }, /* R331 */
-	{ 0x0000, 0x0000 }, /* R332 */
-	{ 0x0000, 0x0000 }, /* R333 */
-	{ 0x0000, 0x0000 }, /* R334 */
-	{ 0x0000, 0x0000 }, /* R335 */
-	{ 0x0000, 0x0000 }, /* R336 */
-	{ 0x0000, 0x0000 }, /* R337 */
-	{ 0x0000, 0x0000 }, /* R338 */
-	{ 0x0000, 0x0000 }, /* R339 */
-	{ 0x0000, 0x0000 }, /* R340 */
-	{ 0x0000, 0x0000 }, /* R341 */
-	{ 0x0000, 0x0000 }, /* R342 */
-	{ 0x0000, 0x0000 }, /* R343 */
-	{ 0x0000, 0x0000 }, /* R344 */
-	{ 0x0000, 0x0000 }, /* R345 */
-	{ 0x0000, 0x0000 }, /* R346 */
-	{ 0x0000, 0x0000 }, /* R347 */
-	{ 0x0000, 0x0000 }, /* R348 */
-	{ 0x0000, 0x0000 }, /* R349 */
-	{ 0x0000, 0x0000 }, /* R350 */
-	{ 0x0000, 0x0000 }, /* R351 */
-	{ 0x0000, 0x0000 }, /* R352 */
-	{ 0x0000, 0x0000 }, /* R353 */
-	{ 0x0000, 0x0000 }, /* R354 */
-	{ 0x0000, 0x0000 }, /* R355 */
-	{ 0x0000, 0x0000 }, /* R356 */
-	{ 0x0000, 0x0000 }, /* R357 */
-	{ 0x0000, 0x0000 }, /* R358 */
-	{ 0x0000, 0x0000 }, /* R359 */
-	{ 0x0000, 0x0000 }, /* R360 */
-	{ 0x0000, 0x0000 }, /* R361 */
-	{ 0x0000, 0x0000 }, /* R362 */
-	{ 0x0000, 0x0000 }, /* R363 */
-	{ 0x0000, 0x0000 }, /* R364 */
-	{ 0x0000, 0x0000 }, /* R365 */
-	{ 0x0000, 0x0000 }, /* R366 */
-	{ 0x0000, 0x0000 }, /* R367 */
-	{ 0x0000, 0x0000 }, /* R368 */
-	{ 0x0000, 0x0000 }, /* R369 */
-	{ 0x0000, 0x0000 }, /* R370 */
-	{ 0x0000, 0x0000 }, /* R371 */
-	{ 0x0000, 0x0000 }, /* R372 */
-	{ 0x0000, 0x0000 }, /* R373 */
-	{ 0x0000, 0x0000 }, /* R374 */
-	{ 0x0000, 0x0000 }, /* R375 */
-	{ 0x0000, 0x0000 }, /* R376 */
-	{ 0x0000, 0x0000 }, /* R377 */
-	{ 0x0000, 0x0000 }, /* R378 */
-	{ 0x0000, 0x0000 }, /* R379 */
-	{ 0x0000, 0x0000 }, /* R380 */
-	{ 0x0000, 0x0000 }, /* R381 */
-	{ 0x0000, 0x0000 }, /* R382 */
-	{ 0x0000, 0x0000 }, /* R383 */
-	{ 0x0000, 0x0000 }, /* R384 */
-	{ 0x0000, 0x0000 }, /* R385 */
-	{ 0x0000, 0x0000 }, /* R386 */
-	{ 0x0000, 0x0000 }, /* R387 */
-	{ 0x0000, 0x0000 }, /* R388 */
-	{ 0x0000, 0x0000 }, /* R389 */
-	{ 0x0000, 0x0000 }, /* R390 */
-	{ 0x0000, 0x0000 }, /* R391 */
-	{ 0x0000, 0x0000 }, /* R392 */
-	{ 0x0000, 0x0000 }, /* R393 */
-	{ 0x0000, 0x0000 }, /* R394 */
-	{ 0x0000, 0x0000 }, /* R395 */
-	{ 0x0000, 0x0000 }, /* R396 */
-	{ 0x0000, 0x0000 }, /* R397 */
-	{ 0x0000, 0x0000 }, /* R398 */
-	{ 0x0000, 0x0000 }, /* R399 */
-	{ 0x0000, 0x0000 }, /* R400 */
-	{ 0x0000, 0x0000 }, /* R401 */
-	{ 0x0000, 0x0000 }, /* R402 */
-	{ 0x0000, 0x0000 }, /* R403 */
-	{ 0x0000, 0x0000 }, /* R404 */
-	{ 0x0000, 0x0000 }, /* R405 */
-	{ 0x0000, 0x0000 }, /* R406 */
-	{ 0x0000, 0x0000 }, /* R407 */
-	{ 0x0000, 0x0000 }, /* R408 */
-	{ 0x0000, 0x0000 }, /* R409 */
-	{ 0x0000, 0x0000 }, /* R410 */
-	{ 0x0000, 0x0000 }, /* R411 */
-	{ 0x0000, 0x0000 }, /* R412 */
-	{ 0x0000, 0x0000 }, /* R413 */
-	{ 0x0000, 0x0000 }, /* R414 */
-	{ 0x0000, 0x0000 }, /* R415 */
-	{ 0x0000, 0x0000 }, /* R416 */
-	{ 0x0000, 0x0000 }, /* R417 */
-	{ 0x0000, 0x0000 }, /* R418 */
-	{ 0x0000, 0x0000 }, /* R419 */
-	{ 0x0000, 0x0000 }, /* R420 */
-	{ 0x0000, 0x0000 }, /* R421 */
-	{ 0x0000, 0x0000 }, /* R422 */
-	{ 0x0000, 0x0000 }, /* R423 */
-	{ 0x0000, 0x0000 }, /* R424 */
-	{ 0x0000, 0x0000 }, /* R425 */
-	{ 0x0000, 0x0000 }, /* R426 */
-	{ 0x0000, 0x0000 }, /* R427 */
-	{ 0x0000, 0x0000 }, /* R428 */
-	{ 0x0000, 0x0000 }, /* R429 */
-	{ 0x0000, 0x0000 }, /* R430 */
-	{ 0x0000, 0x0000 }, /* R431 */
-	{ 0x0000, 0x0000 }, /* R432 */
-	{ 0x0000, 0x0000 }, /* R433 */
-	{ 0x0000, 0x0000 }, /* R434 */
-	{ 0x0000, 0x0000 }, /* R435 */
-	{ 0x0000, 0x0000 }, /* R436 */
-	{ 0x0000, 0x0000 }, /* R437 */
-	{ 0x0000, 0x0000 }, /* R438 */
-	{ 0x0000, 0x0000 }, /* R439 */
-	{ 0x0000, 0x0000 }, /* R440 */
-	{ 0x0000, 0x0000 }, /* R441 */
-	{ 0x0000, 0x0000 }, /* R442 */
-	{ 0x0000, 0x0000 }, /* R443 */
-	{ 0x0000, 0x0000 }, /* R444 */
-	{ 0x0000, 0x0000 }, /* R445 */
-	{ 0x0000, 0x0000 }, /* R446 */
-	{ 0x0000, 0x0000 }, /* R447 */
-	{ 0x0000, 0x0000 }, /* R448 */
-	{ 0x0000, 0x0000 }, /* R449 */
-	{ 0x0000, 0x0000 }, /* R450 */
-	{ 0x0000, 0x0000 }, /* R451 */
-	{ 0x0000, 0x0000 }, /* R452 */
-	{ 0x0000, 0x0000 }, /* R453 */
-	{ 0x0000, 0x0000 }, /* R454 */
-	{ 0x0000, 0x0000 }, /* R455 */
-	{ 0x0000, 0x0000 }, /* R456 */
-	{ 0x0000, 0x0000 }, /* R457 */
-	{ 0x0000, 0x0000 }, /* R458 */
-	{ 0x0000, 0x0000 }, /* R459 */
-	{ 0x0000, 0x0000 }, /* R460 */
-	{ 0x0000, 0x0000 }, /* R461 */
-	{ 0x0000, 0x0000 }, /* R462 */
-	{ 0x0000, 0x0000 }, /* R463 */
-	{ 0x0000, 0x0000 }, /* R464 */
-	{ 0x0000, 0x0000 }, /* R465 */
-	{ 0x0000, 0x0000 }, /* R466 */
-	{ 0x0000, 0x0000 }, /* R467 */
-	{ 0x0000, 0x0000 }, /* R468 */
-	{ 0x0000, 0x0000 }, /* R469 */
-	{ 0x0000, 0x0000 }, /* R470 */
-	{ 0x0000, 0x0000 }, /* R471 */
-	{ 0x0000, 0x0000 }, /* R472 */
-	{ 0x0000, 0x0000 }, /* R473 */
-	{ 0x0000, 0x0000 }, /* R474 */
-	{ 0x0000, 0x0000 }, /* R475 */
-	{ 0x0000, 0x0000 }, /* R476 */
-	{ 0x0000, 0x0000 }, /* R477 */
-	{ 0x0000, 0x0000 }, /* R478 */
-	{ 0x0000, 0x0000 }, /* R479 */
-	{ 0x0000, 0x0000 }, /* R480 */
-	{ 0x0000, 0x0000 }, /* R481 */
-	{ 0x0000, 0x0000 }, /* R482 */
-	{ 0x0000, 0x0000 }, /* R483 */
-	{ 0x0000, 0x0000 }, /* R484 */
-	{ 0x0000, 0x0000 }, /* R485 */
-	{ 0x0000, 0x0000 }, /* R486 */
-	{ 0x0000, 0x0000 }, /* R487 */
-	{ 0x0000, 0x0000 }, /* R488 */
-	{ 0x0000, 0x0000 }, /* R489 */
-	{ 0x0000, 0x0000 }, /* R490 */
-	{ 0x0000, 0x0000 }, /* R491 */
-	{ 0x0000, 0x0000 }, /* R492 */
-	{ 0x0000, 0x0000 }, /* R493 */
-	{ 0x0000, 0x0000 }, /* R494 */
-	{ 0x0000, 0x0000 }, /* R495 */
-	{ 0x0000, 0x0000 }, /* R496 */
-	{ 0x0000, 0x0000 }, /* R497 */
-	{ 0x0000, 0x0000 }, /* R498 */
-	{ 0x0000, 0x0000 }, /* R499 */
-	{ 0x0000, 0x0000 }, /* R500 */
-	{ 0x0000, 0x0000 }, /* R501 */
-	{ 0x0000, 0x0000 }, /* R502 */
-	{ 0x0000, 0x0000 }, /* R503 */
-	{ 0x0000, 0x0000 }, /* R504 */
-	{ 0x0000, 0x0000 }, /* R505 */
-	{ 0x0000, 0x0000 }, /* R506 */
-	{ 0x0000, 0x0000 }, /* R507 */
-	{ 0x0000, 0x0000 }, /* R508 */
-	{ 0x0000, 0x0000 }, /* R509 */
-	{ 0x0000, 0x0000 }, /* R510 */
-	{ 0x0000, 0x0000 }, /* R511 */
-	{ 0x001F, 0x001F }, /* R512   - AIF1 Clocking (1) */
-	{ 0x003F, 0x003F }, /* R513   - AIF1 Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R514 */
-	{ 0x0000, 0x0000 }, /* R515 */
-	{ 0x001F, 0x001F }, /* R516   - AIF2 Clocking (1) */
-	{ 0x003F, 0x003F }, /* R517   - AIF2 Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R518 */
-	{ 0x0000, 0x0000 }, /* R519 */
-	{ 0x001F, 0x001F }, /* R520   - Clocking (1) */
-	{ 0x0777, 0x0777 }, /* R521   - Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R522 */
-	{ 0x0000, 0x0000 }, /* R523 */
-	{ 0x0000, 0x0000 }, /* R524 */
-	{ 0x0000, 0x0000 }, /* R525 */
-	{ 0x0000, 0x0000 }, /* R526 */
-	{ 0x0000, 0x0000 }, /* R527 */
-	{ 0x00FF, 0x00FF }, /* R528   - AIF1 Rate */
-	{ 0x00FF, 0x00FF }, /* R529   - AIF2 Rate */
-	{ 0x000F, 0x0000 }, /* R530   - Rate Status */
-	{ 0x0000, 0x0000 }, /* R531 */
-	{ 0x0000, 0x0000 }, /* R532 */
-	{ 0x0000, 0x0000 }, /* R533 */
-	{ 0x0000, 0x0000 }, /* R534 */
-	{ 0x0000, 0x0000 }, /* R535 */
-	{ 0x0000, 0x0000 }, /* R536 */
-	{ 0x0000, 0x0000 }, /* R537 */
-	{ 0x0000, 0x0000 }, /* R538 */
-	{ 0x0000, 0x0000 }, /* R539 */
-	{ 0x0000, 0x0000 }, /* R540 */
-	{ 0x0000, 0x0000 }, /* R541 */
-	{ 0x0000, 0x0000 }, /* R542 */
-	{ 0x0000, 0x0000 }, /* R543 */
-	{ 0x0007, 0x0007 }, /* R544   - FLL1 Control (1) */
-	{ 0x3F77, 0x3F77 }, /* R545   - FLL1 Control (2) */
-	{ 0xFFFF, 0xFFFF }, /* R546   - FLL1 Control (3) */
-	{ 0x7FEF, 0x7FEF }, /* R547   - FLL1 Control (4) */
-	{ 0x1FDB, 0x1FDB }, /* R548   - FLL1 Control (5) */
-	{ 0x0000, 0x0000 }, /* R549 */
-	{ 0x0000, 0x0000 }, /* R550 */
-	{ 0x0000, 0x0000 }, /* R551 */
-	{ 0x0000, 0x0000 }, /* R552 */
-	{ 0x0000, 0x0000 }, /* R553 */
-	{ 0x0000, 0x0000 }, /* R554 */
-	{ 0x0000, 0x0000 }, /* R555 */
-	{ 0x0000, 0x0000 }, /* R556 */
-	{ 0x0000, 0x0000 }, /* R557 */
-	{ 0x0000, 0x0000 }, /* R558 */
-	{ 0x0000, 0x0000 }, /* R559 */
-	{ 0x0000, 0x0000 }, /* R560 */
-	{ 0x0000, 0x0000 }, /* R561 */
-	{ 0x0000, 0x0000 }, /* R562 */
-	{ 0x0000, 0x0000 }, /* R563 */
-	{ 0x0000, 0x0000 }, /* R564 */
-	{ 0x0000, 0x0000 }, /* R565 */
-	{ 0x0000, 0x0000 }, /* R566 */
-	{ 0x0000, 0x0000 }, /* R567 */
-	{ 0x0000, 0x0000 }, /* R568 */
-	{ 0x0000, 0x0000 }, /* R569 */
-	{ 0x0000, 0x0000 }, /* R570 */
-	{ 0x0000, 0x0000 }, /* R571 */
-	{ 0x0000, 0x0000 }, /* R572 */
-	{ 0x0000, 0x0000 }, /* R573 */
-	{ 0x0000, 0x0000 }, /* R574 */
-	{ 0x0000, 0x0000 }, /* R575 */
-	{ 0x0007, 0x0007 }, /* R576   - FLL2 Control (1) */
-	{ 0x3F77, 0x3F77 }, /* R577   - FLL2 Control (2) */
-	{ 0xFFFF, 0xFFFF }, /* R578   - FLL2 Control (3) */
-	{ 0x7FEF, 0x7FEF }, /* R579   - FLL2 Control (4) */
-	{ 0x1FDB, 0x1FDB }, /* R580   - FLL2 Control (5) */
-	{ 0x0000, 0x0000 }, /* R581 */
-	{ 0x0000, 0x0000 }, /* R582 */
-	{ 0x0000, 0x0000 }, /* R583 */
-	{ 0x0000, 0x0000 }, /* R584 */
-	{ 0x0000, 0x0000 }, /* R585 */
-	{ 0x0000, 0x0000 }, /* R586 */
-	{ 0x0000, 0x0000 }, /* R587 */
-	{ 0x0000, 0x0000 }, /* R588 */
-	{ 0x0000, 0x0000 }, /* R589 */
-	{ 0x0000, 0x0000 }, /* R590 */
-	{ 0x0000, 0x0000 }, /* R591 */
-	{ 0x0000, 0x0000 }, /* R592 */
-	{ 0x0000, 0x0000 }, /* R593 */
-	{ 0x0000, 0x0000 }, /* R594 */
-	{ 0x0000, 0x0000 }, /* R595 */
-	{ 0x0000, 0x0000 }, /* R596 */
-	{ 0x0000, 0x0000 }, /* R597 */
-	{ 0x0000, 0x0000 }, /* R598 */
-	{ 0x0000, 0x0000 }, /* R599 */
-	{ 0x0000, 0x0000 }, /* R600 */
-	{ 0x0000, 0x0000 }, /* R601 */
-	{ 0x0000, 0x0000 }, /* R602 */
-	{ 0x0000, 0x0000 }, /* R603 */
-	{ 0x0000, 0x0000 }, /* R604 */
-	{ 0x0000, 0x0000 }, /* R605 */
-	{ 0x0000, 0x0000 }, /* R606 */
-	{ 0x0000, 0x0000 }, /* R607 */
-	{ 0x0000, 0x0000 }, /* R608 */
-	{ 0x0000, 0x0000 }, /* R609 */
-	{ 0x0000, 0x0000 }, /* R610 */
-	{ 0x0000, 0x0000 }, /* R611 */
-	{ 0x0000, 0x0000 }, /* R612 */
-	{ 0x0000, 0x0000 }, /* R613 */
-	{ 0x0000, 0x0000 }, /* R614 */
-	{ 0x0000, 0x0000 }, /* R615 */
-	{ 0x0000, 0x0000 }, /* R616 */
-	{ 0x0000, 0x0000 }, /* R617 */
-	{ 0x0000, 0x0000 }, /* R618 */
-	{ 0x0000, 0x0000 }, /* R619 */
-	{ 0x0000, 0x0000 }, /* R620 */
-	{ 0x0000, 0x0000 }, /* R621 */
-	{ 0x0000, 0x0000 }, /* R622 */
-	{ 0x0000, 0x0000 }, /* R623 */
-	{ 0x0000, 0x0000 }, /* R624 */
-	{ 0x0000, 0x0000 }, /* R625 */
-	{ 0x0000, 0x0000 }, /* R626 */
-	{ 0x0000, 0x0000 }, /* R627 */
-	{ 0x0000, 0x0000 }, /* R628 */
-	{ 0x0000, 0x0000 }, /* R629 */
-	{ 0x0000, 0x0000 }, /* R630 */
-	{ 0x0000, 0x0000 }, /* R631 */
-	{ 0x0000, 0x0000 }, /* R632 */
-	{ 0x0000, 0x0000 }, /* R633 */
-	{ 0x0000, 0x0000 }, /* R634 */
-	{ 0x0000, 0x0000 }, /* R635 */
-	{ 0x0000, 0x0000 }, /* R636 */
-	{ 0x0000, 0x0000 }, /* R637 */
-	{ 0x0000, 0x0000 }, /* R638 */
-	{ 0x0000, 0x0000 }, /* R639 */
-	{ 0x0000, 0x0000 }, /* R640 */
-	{ 0x0000, 0x0000 }, /* R641 */
-	{ 0x0000, 0x0000 }, /* R642 */
-	{ 0x0000, 0x0000 }, /* R643 */
-	{ 0x0000, 0x0000 }, /* R644 */
-	{ 0x0000, 0x0000 }, /* R645 */
-	{ 0x0000, 0x0000 }, /* R646 */
-	{ 0x0000, 0x0000 }, /* R647 */
-	{ 0x0000, 0x0000 }, /* R648 */
-	{ 0x0000, 0x0000 }, /* R649 */
-	{ 0x0000, 0x0000 }, /* R650 */
-	{ 0x0000, 0x0000 }, /* R651 */
-	{ 0x0000, 0x0000 }, /* R652 */
-	{ 0x0000, 0x0000 }, /* R653 */
-	{ 0x0000, 0x0000 }, /* R654 */
-	{ 0x0000, 0x0000 }, /* R655 */
-	{ 0x0000, 0x0000 }, /* R656 */
-	{ 0x0000, 0x0000 }, /* R657 */
-	{ 0x0000, 0x0000 }, /* R658 */
-	{ 0x0000, 0x0000 }, /* R659 */
-	{ 0x0000, 0x0000 }, /* R660 */
-	{ 0x0000, 0x0000 }, /* R661 */
-	{ 0x0000, 0x0000 }, /* R662 */
-	{ 0x0000, 0x0000 }, /* R663 */
-	{ 0x0000, 0x0000 }, /* R664 */
-	{ 0x0000, 0x0000 }, /* R665 */
-	{ 0x0000, 0x0000 }, /* R666 */
-	{ 0x0000, 0x0000 }, /* R667 */
-	{ 0x0000, 0x0000 }, /* R668 */
-	{ 0x0000, 0x0000 }, /* R669 */
-	{ 0x0000, 0x0000 }, /* R670 */
-	{ 0x0000, 0x0000 }, /* R671 */
-	{ 0x0000, 0x0000 }, /* R672 */
-	{ 0x0000, 0x0000 }, /* R673 */
-	{ 0x0000, 0x0000 }, /* R674 */
-	{ 0x0000, 0x0000 }, /* R675 */
-	{ 0x0000, 0x0000 }, /* R676 */
-	{ 0x0000, 0x0000 }, /* R677 */
-	{ 0x0000, 0x0000 }, /* R678 */
-	{ 0x0000, 0x0000 }, /* R679 */
-	{ 0x0000, 0x0000 }, /* R680 */
-	{ 0x0000, 0x0000 }, /* R681 */
-	{ 0x0000, 0x0000 }, /* R682 */
-	{ 0x0000, 0x0000 }, /* R683 */
-	{ 0x0000, 0x0000 }, /* R684 */
-	{ 0x0000, 0x0000 }, /* R685 */
-	{ 0x0000, 0x0000 }, /* R686 */
-	{ 0x0000, 0x0000 }, /* R687 */
-	{ 0x0000, 0x0000 }, /* R688 */
-	{ 0x0000, 0x0000 }, /* R689 */
-	{ 0x0000, 0x0000 }, /* R690 */
-	{ 0x0000, 0x0000 }, /* R691 */
-	{ 0x0000, 0x0000 }, /* R692 */
-	{ 0x0000, 0x0000 }, /* R693 */
-	{ 0x0000, 0x0000 }, /* R694 */
-	{ 0x0000, 0x0000 }, /* R695 */
-	{ 0x0000, 0x0000 }, /* R696 */
-	{ 0x0000, 0x0000 }, /* R697 */
-	{ 0x0000, 0x0000 }, /* R698 */
-	{ 0x0000, 0x0000 }, /* R699 */
-	{ 0x0000, 0x0000 }, /* R700 */
-	{ 0x0000, 0x0000 }, /* R701 */
-	{ 0x0000, 0x0000 }, /* R702 */
-	{ 0x0000, 0x0000 }, /* R703 */
-	{ 0x0000, 0x0000 }, /* R704 */
-	{ 0x0000, 0x0000 }, /* R705 */
-	{ 0x0000, 0x0000 }, /* R706 */
-	{ 0x0000, 0x0000 }, /* R707 */
-	{ 0x0000, 0x0000 }, /* R708 */
-	{ 0x0000, 0x0000 }, /* R709 */
-	{ 0x0000, 0x0000 }, /* R710 */
-	{ 0x0000, 0x0000 }, /* R711 */
-	{ 0x0000, 0x0000 }, /* R712 */
-	{ 0x0000, 0x0000 }, /* R713 */
-	{ 0x0000, 0x0000 }, /* R714 */
-	{ 0x0000, 0x0000 }, /* R715 */
-	{ 0x0000, 0x0000 }, /* R716 */
-	{ 0x0000, 0x0000 }, /* R717 */
-	{ 0x0000, 0x0000 }, /* R718 */
-	{ 0x0000, 0x0000 }, /* R719 */
-	{ 0x0000, 0x0000 }, /* R720 */
-	{ 0x0000, 0x0000 }, /* R721 */
-	{ 0x0000, 0x0000 }, /* R722 */
-	{ 0x0000, 0x0000 }, /* R723 */
-	{ 0x0000, 0x0000 }, /* R724 */
-	{ 0x0000, 0x0000 }, /* R725 */
-	{ 0x0000, 0x0000 }, /* R726 */
-	{ 0x0000, 0x0000 }, /* R727 */
-	{ 0x0000, 0x0000 }, /* R728 */
-	{ 0x0000, 0x0000 }, /* R729 */
-	{ 0x0000, 0x0000 }, /* R730 */
-	{ 0x0000, 0x0000 }, /* R731 */
-	{ 0x0000, 0x0000 }, /* R732 */
-	{ 0x0000, 0x0000 }, /* R733 */
-	{ 0x0000, 0x0000 }, /* R734 */
-	{ 0x0000, 0x0000 }, /* R735 */
-	{ 0x0000, 0x0000 }, /* R736 */
-	{ 0x0000, 0x0000 }, /* R737 */
-	{ 0x0000, 0x0000 }, /* R738 */
-	{ 0x0000, 0x0000 }, /* R739 */
-	{ 0x0000, 0x0000 }, /* R740 */
-	{ 0x0000, 0x0000 }, /* R741 */
-	{ 0x0000, 0x0000 }, /* R742 */
-	{ 0x0000, 0x0000 }, /* R743 */
-	{ 0x0000, 0x0000 }, /* R744 */
-	{ 0x0000, 0x0000 }, /* R745 */
-	{ 0x0000, 0x0000 }, /* R746 */
-	{ 0x0000, 0x0000 }, /* R747 */
-	{ 0x0000, 0x0000 }, /* R748 */
-	{ 0x0000, 0x0000 }, /* R749 */
-	{ 0x0000, 0x0000 }, /* R750 */
-	{ 0x0000, 0x0000 }, /* R751 */
-	{ 0x0000, 0x0000 }, /* R752 */
-	{ 0x0000, 0x0000 }, /* R753 */
-	{ 0x0000, 0x0000 }, /* R754 */
-	{ 0x0000, 0x0000 }, /* R755 */
-	{ 0x0000, 0x0000 }, /* R756 */
-	{ 0x0000, 0x0000 }, /* R757 */
-	{ 0x0000, 0x0000 }, /* R758 */
-	{ 0x0000, 0x0000 }, /* R759 */
-	{ 0x0000, 0x0000 }, /* R760 */
-	{ 0x0000, 0x0000 }, /* R761 */
-	{ 0x0000, 0x0000 }, /* R762 */
-	{ 0x0000, 0x0000 }, /* R763 */
-	{ 0x0000, 0x0000 }, /* R764 */
-	{ 0x0000, 0x0000 }, /* R765 */
-	{ 0x0000, 0x0000 }, /* R766 */
-	{ 0x0000, 0x0000 }, /* R767 */
-	{ 0xE1F8, 0xE1F8 }, /* R768   - AIF1 Control (1) */
-	{ 0xCD1F, 0xCD1F }, /* R769   - AIF1 Control (2) */
-	{ 0xF000, 0xF000 }, /* R770   - AIF1 Master/Slave */
-	{ 0x01F0, 0x01F0 }, /* R771   - AIF1 BCLK */
-	{ 0x0FFF, 0x0FFF }, /* R772   - AIF1ADC LRCLK */
-	{ 0x0FFF, 0x0FFF }, /* R773   - AIF1DAC LRCLK */
-	{ 0x0003, 0x0003 }, /* R774   - AIF1DAC Data */
-	{ 0x0003, 0x0003 }, /* R775   - AIF1ADC Data */
-	{ 0x0000, 0x0000 }, /* R776 */
-	{ 0x0000, 0x0000 }, /* R777 */
-	{ 0x0000, 0x0000 }, /* R778 */
-	{ 0x0000, 0x0000 }, /* R779 */
-	{ 0x0000, 0x0000 }, /* R780 */
-	{ 0x0000, 0x0000 }, /* R781 */
-	{ 0x0000, 0x0000 }, /* R782 */
-	{ 0x0000, 0x0000 }, /* R783 */
-	{ 0xF1F8, 0xF1F8 }, /* R784   - AIF2 Control (1) */
-	{ 0xFD1F, 0xFD1F }, /* R785   - AIF2 Control (2) */
-	{ 0xF000, 0xF000 }, /* R786   - AIF2 Master/Slave */
-	{ 0x01F0, 0x01F0 }, /* R787   - AIF2 BCLK */
-	{ 0x0FFF, 0x0FFF }, /* R788   - AIF2ADC LRCLK */
-	{ 0x0FFF, 0x0FFF }, /* R789   - AIF2DAC LRCLK */
-	{ 0x0003, 0x0003 }, /* R790   - AIF2DAC Data */
-	{ 0x0003, 0x0003 }, /* R791   - AIF2ADC Data */
-	{ 0x0000, 0x0000 }, /* R792 */
-	{ 0x0000, 0x0000 }, /* R793 */
-	{ 0x0000, 0x0000 }, /* R794 */
-	{ 0x0000, 0x0000 }, /* R795 */
-	{ 0x0000, 0x0000 }, /* R796 */
-	{ 0x0000, 0x0000 }, /* R797 */
-	{ 0x0000, 0x0000 }, /* R798 */
-	{ 0x0000, 0x0000 }, /* R799 */
-	{ 0x0000, 0x0000 }, /* R800 */
-	{ 0x0000, 0x0000 }, /* R801 */
-	{ 0x0000, 0x0000 }, /* R802 */
-	{ 0x0000, 0x0000 }, /* R803 */
-	{ 0x0000, 0x0000 }, /* R804 */
-	{ 0x0000, 0x0000 }, /* R805 */
-	{ 0x0000, 0x0000 }, /* R806 */
-	{ 0x0000, 0x0000 }, /* R807 */
-	{ 0x0000, 0x0000 }, /* R808 */
-	{ 0x0000, 0x0000 }, /* R809 */
-	{ 0x0000, 0x0000 }, /* R810 */
-	{ 0x0000, 0x0000 }, /* R811 */
-	{ 0x0000, 0x0000 }, /* R812 */
-	{ 0x0000, 0x0000 }, /* R813 */
-	{ 0x0000, 0x0000 }, /* R814 */
-	{ 0x0000, 0x0000 }, /* R815 */
-	{ 0x0000, 0x0000 }, /* R816 */
-	{ 0x0000, 0x0000 }, /* R817 */
-	{ 0x0000, 0x0000 }, /* R818 */
-	{ 0x0000, 0x0000 }, /* R819 */
-	{ 0x0000, 0x0000 }, /* R820 */
-	{ 0x0000, 0x0000 }, /* R821 */
-	{ 0x0000, 0x0000 }, /* R822 */
-	{ 0x0000, 0x0000 }, /* R823 */
-	{ 0x0000, 0x0000 }, /* R824 */
-	{ 0x0000, 0x0000 }, /* R825 */
-	{ 0x0000, 0x0000 }, /* R826 */
-	{ 0x0000, 0x0000 }, /* R827 */
-	{ 0x0000, 0x0000 }, /* R828 */
-	{ 0x0000, 0x0000 }, /* R829 */
-	{ 0x0000, 0x0000 }, /* R830 */
-	{ 0x0000, 0x0000 }, /* R831 */
-	{ 0x0000, 0x0000 }, /* R832 */
-	{ 0x0000, 0x0000 }, /* R833 */
-	{ 0x0000, 0x0000 }, /* R834 */
-	{ 0x0000, 0x0000 }, /* R835 */
-	{ 0x0000, 0x0000 }, /* R836 */
-	{ 0x0000, 0x0000 }, /* R837 */
-	{ 0x0000, 0x0000 }, /* R838 */
-	{ 0x0000, 0x0000 }, /* R839 */
-	{ 0x0000, 0x0000 }, /* R840 */
-	{ 0x0000, 0x0000 }, /* R841 */
-	{ 0x0000, 0x0000 }, /* R842 */
-	{ 0x0000, 0x0000 }, /* R843 */
-	{ 0x0000, 0x0000 }, /* R844 */
-	{ 0x0000, 0x0000 }, /* R845 */
-	{ 0x0000, 0x0000 }, /* R846 */
-	{ 0x0000, 0x0000 }, /* R847 */
-	{ 0x0000, 0x0000 }, /* R848 */
-	{ 0x0000, 0x0000 }, /* R849 */
-	{ 0x0000, 0x0000 }, /* R850 */
-	{ 0x0000, 0x0000 }, /* R851 */
-	{ 0x0000, 0x0000 }, /* R852 */
-	{ 0x0000, 0x0000 }, /* R853 */
-	{ 0x0000, 0x0000 }, /* R854 */
-	{ 0x0000, 0x0000 }, /* R855 */
-	{ 0x0000, 0x0000 }, /* R856 */
-	{ 0x0000, 0x0000 }, /* R857 */
-	{ 0x0000, 0x0000 }, /* R858 */
-	{ 0x0000, 0x0000 }, /* R859 */
-	{ 0x0000, 0x0000 }, /* R860 */
-	{ 0x0000, 0x0000 }, /* R861 */
-	{ 0x0000, 0x0000 }, /* R862 */
-	{ 0x0000, 0x0000 }, /* R863 */
-	{ 0x0000, 0x0000 }, /* R864 */
-	{ 0x0000, 0x0000 }, /* R865 */
-	{ 0x0000, 0x0000 }, /* R866 */
-	{ 0x0000, 0x0000 }, /* R867 */
-	{ 0x0000, 0x0000 }, /* R868 */
-	{ 0x0000, 0x0000 }, /* R869 */
-	{ 0x0000, 0x0000 }, /* R870 */
-	{ 0x0000, 0x0000 }, /* R871 */
-	{ 0x0000, 0x0000 }, /* R872 */
-	{ 0x0000, 0x0000 }, /* R873 */
-	{ 0x0000, 0x0000 }, /* R874 */
-	{ 0x0000, 0x0000 }, /* R875 */
-	{ 0x0000, 0x0000 }, /* R876 */
-	{ 0x0000, 0x0000 }, /* R877 */
-	{ 0x0000, 0x0000 }, /* R878 */
-	{ 0x0000, 0x0000 }, /* R879 */
-	{ 0x0000, 0x0000 }, /* R880 */
-	{ 0x0000, 0x0000 }, /* R881 */
-	{ 0x0000, 0x0000 }, /* R882 */
-	{ 0x0000, 0x0000 }, /* R883 */
-	{ 0x0000, 0x0000 }, /* R884 */
-	{ 0x0000, 0x0000 }, /* R885 */
-	{ 0x0000, 0x0000 }, /* R886 */
-	{ 0x0000, 0x0000 }, /* R887 */
-	{ 0x0000, 0x0000 }, /* R888 */
-	{ 0x0000, 0x0000 }, /* R889 */
-	{ 0x0000, 0x0000 }, /* R890 */
-	{ 0x0000, 0x0000 }, /* R891 */
-	{ 0x0000, 0x0000 }, /* R892 */
-	{ 0x0000, 0x0000 }, /* R893 */
-	{ 0x0000, 0x0000 }, /* R894 */
-	{ 0x0000, 0x0000 }, /* R895 */
-	{ 0x0000, 0x0000 }, /* R896 */
-	{ 0x0000, 0x0000 }, /* R897 */
-	{ 0x0000, 0x0000 }, /* R898 */
-	{ 0x0000, 0x0000 }, /* R899 */
-	{ 0x0000, 0x0000 }, /* R900 */
-	{ 0x0000, 0x0000 }, /* R901 */
-	{ 0x0000, 0x0000 }, /* R902 */
-	{ 0x0000, 0x0000 }, /* R903 */
-	{ 0x0000, 0x0000 }, /* R904 */
-	{ 0x0000, 0x0000 }, /* R905 */
-	{ 0x0000, 0x0000 }, /* R906 */
-	{ 0x0000, 0x0000 }, /* R907 */
-	{ 0x0000, 0x0000 }, /* R908 */
-	{ 0x0000, 0x0000 }, /* R909 */
-	{ 0x0000, 0x0000 }, /* R910 */
-	{ 0x0000, 0x0000 }, /* R911 */
-	{ 0x0000, 0x0000 }, /* R912 */
-	{ 0x0000, 0x0000 }, /* R913 */
-	{ 0x0000, 0x0000 }, /* R914 */
-	{ 0x0000, 0x0000 }, /* R915 */
-	{ 0x0000, 0x0000 }, /* R916 */
-	{ 0x0000, 0x0000 }, /* R917 */
-	{ 0x0000, 0x0000 }, /* R918 */
-	{ 0x0000, 0x0000 }, /* R919 */
-	{ 0x0000, 0x0000 }, /* R920 */
-	{ 0x0000, 0x0000 }, /* R921 */
-	{ 0x0000, 0x0000 }, /* R922 */
-	{ 0x0000, 0x0000 }, /* R923 */
-	{ 0x0000, 0x0000 }, /* R924 */
-	{ 0x0000, 0x0000 }, /* R925 */
-	{ 0x0000, 0x0000 }, /* R926 */
-	{ 0x0000, 0x0000 }, /* R927 */
-	{ 0x0000, 0x0000 }, /* R928 */
-	{ 0x0000, 0x0000 }, /* R929 */
-	{ 0x0000, 0x0000 }, /* R930 */
-	{ 0x0000, 0x0000 }, /* R931 */
-	{ 0x0000, 0x0000 }, /* R932 */
-	{ 0x0000, 0x0000 }, /* R933 */
-	{ 0x0000, 0x0000 }, /* R934 */
-	{ 0x0000, 0x0000 }, /* R935 */
-	{ 0x0000, 0x0000 }, /* R936 */
-	{ 0x0000, 0x0000 }, /* R937 */
-	{ 0x0000, 0x0000 }, /* R938 */
-	{ 0x0000, 0x0000 }, /* R939 */
-	{ 0x0000, 0x0000 }, /* R940 */
-	{ 0x0000, 0x0000 }, /* R941 */
-	{ 0x0000, 0x0000 }, /* R942 */
-	{ 0x0000, 0x0000 }, /* R943 */
-	{ 0x0000, 0x0000 }, /* R944 */
-	{ 0x0000, 0x0000 }, /* R945 */
-	{ 0x0000, 0x0000 }, /* R946 */
-	{ 0x0000, 0x0000 }, /* R947 */
-	{ 0x0000, 0x0000 }, /* R948 */
-	{ 0x0000, 0x0000 }, /* R949 */
-	{ 0x0000, 0x0000 }, /* R950 */
-	{ 0x0000, 0x0000 }, /* R951 */
-	{ 0x0000, 0x0000 }, /* R952 */
-	{ 0x0000, 0x0000 }, /* R953 */
-	{ 0x0000, 0x0000 }, /* R954 */
-	{ 0x0000, 0x0000 }, /* R955 */
-	{ 0x0000, 0x0000 }, /* R956 */
-	{ 0x0000, 0x0000 }, /* R957 */
-	{ 0x0000, 0x0000 }, /* R958 */
-	{ 0x0000, 0x0000 }, /* R959 */
-	{ 0x0000, 0x0000 }, /* R960 */
-	{ 0x0000, 0x0000 }, /* R961 */
-	{ 0x0000, 0x0000 }, /* R962 */
-	{ 0x0000, 0x0000 }, /* R963 */
-	{ 0x0000, 0x0000 }, /* R964 */
-	{ 0x0000, 0x0000 }, /* R965 */
-	{ 0x0000, 0x0000 }, /* R966 */
-	{ 0x0000, 0x0000 }, /* R967 */
-	{ 0x0000, 0x0000 }, /* R968 */
-	{ 0x0000, 0x0000 }, /* R969 */
-	{ 0x0000, 0x0000 }, /* R970 */
-	{ 0x0000, 0x0000 }, /* R971 */
-	{ 0x0000, 0x0000 }, /* R972 */
-	{ 0x0000, 0x0000 }, /* R973 */
-	{ 0x0000, 0x0000 }, /* R974 */
-	{ 0x0000, 0x0000 }, /* R975 */
-	{ 0x0000, 0x0000 }, /* R976 */
-	{ 0x0000, 0x0000 }, /* R977 */
-	{ 0x0000, 0x0000 }, /* R978 */
-	{ 0x0000, 0x0000 }, /* R979 */
-	{ 0x0000, 0x0000 }, /* R980 */
-	{ 0x0000, 0x0000 }, /* R981 */
-	{ 0x0000, 0x0000 }, /* R982 */
-	{ 0x0000, 0x0000 }, /* R983 */
-	{ 0x0000, 0x0000 }, /* R984 */
-	{ 0x0000, 0x0000 }, /* R985 */
-	{ 0x0000, 0x0000 }, /* R986 */
-	{ 0x0000, 0x0000 }, /* R987 */
-	{ 0x0000, 0x0000 }, /* R988 */
-	{ 0x0000, 0x0000 }, /* R989 */
-	{ 0x0000, 0x0000 }, /* R990 */
-	{ 0x0000, 0x0000 }, /* R991 */
-	{ 0x0000, 0x0000 }, /* R992 */
-	{ 0x0000, 0x0000 }, /* R993 */
-	{ 0x0000, 0x0000 }, /* R994 */
-	{ 0x0000, 0x0000 }, /* R995 */
-	{ 0x0000, 0x0000 }, /* R996 */
-	{ 0x0000, 0x0000 }, /* R997 */
-	{ 0x0000, 0x0000 }, /* R998 */
-	{ 0x0000, 0x0000 }, /* R999 */
-	{ 0x0000, 0x0000 }, /* R1000 */
-	{ 0x0000, 0x0000 }, /* R1001 */
-	{ 0x0000, 0x0000 }, /* R1002 */
-	{ 0x0000, 0x0000 }, /* R1003 */
-	{ 0x0000, 0x0000 }, /* R1004 */
-	{ 0x0000, 0x0000 }, /* R1005 */
-	{ 0x0000, 0x0000 }, /* R1006 */
-	{ 0x0000, 0x0000 }, /* R1007 */
-	{ 0x0000, 0x0000 }, /* R1008 */
-	{ 0x0000, 0x0000 }, /* R1009 */
-	{ 0x0000, 0x0000 }, /* R1010 */
-	{ 0x0000, 0x0000 }, /* R1011 */
-	{ 0x0000, 0x0000 }, /* R1012 */
-	{ 0x0000, 0x0000 }, /* R1013 */
-	{ 0x0000, 0x0000 }, /* R1014 */
-	{ 0x0000, 0x0000 }, /* R1015 */
-	{ 0x0000, 0x0000 }, /* R1016 */
-	{ 0x0000, 0x0000 }, /* R1017 */
-	{ 0x0000, 0x0000 }, /* R1018 */
-	{ 0x0000, 0x0000 }, /* R1019 */
-	{ 0x0000, 0x0000 }, /* R1020 */
-	{ 0x0000, 0x0000 }, /* R1021 */
-	{ 0x0000, 0x0000 }, /* R1022 */
-	{ 0x0000, 0x0000 }, /* R1023 */
-	{ 0x00FF, 0x01FF }, /* R1024  - AIF1 ADC1 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1025  - AIF1 ADC1 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1026  - AIF1 DAC1 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1027  - AIF1 DAC1 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1028  - AIF1 ADC2 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1029  - AIF1 ADC2 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1030  - AIF1 DAC2 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1031  - AIF1 DAC2 Right Volume */
-	{ 0x0000, 0x0000 }, /* R1032 */
-	{ 0x0000, 0x0000 }, /* R1033 */
-	{ 0x0000, 0x0000 }, /* R1034 */
-	{ 0x0000, 0x0000 }, /* R1035 */
-	{ 0x0000, 0x0000 }, /* R1036 */
-	{ 0x0000, 0x0000 }, /* R1037 */
-	{ 0x0000, 0x0000 }, /* R1038 */
-	{ 0x0000, 0x0000 }, /* R1039 */
-	{ 0xF800, 0xF800 }, /* R1040  - AIF1 ADC1 Filters */
-	{ 0x7800, 0x7800 }, /* R1041  - AIF1 ADC2 Filters */
-	{ 0x0000, 0x0000 }, /* R1042 */
-	{ 0x0000, 0x0000 }, /* R1043 */
-	{ 0x0000, 0x0000 }, /* R1044 */
-	{ 0x0000, 0x0000 }, /* R1045 */
-	{ 0x0000, 0x0000 }, /* R1046 */
-	{ 0x0000, 0x0000 }, /* R1047 */
-	{ 0x0000, 0x0000 }, /* R1048 */
-	{ 0x0000, 0x0000 }, /* R1049 */
-	{ 0x0000, 0x0000 }, /* R1050 */
-	{ 0x0000, 0x0000 }, /* R1051 */
-	{ 0x0000, 0x0000 }, /* R1052 */
-	{ 0x0000, 0x0000 }, /* R1053 */
-	{ 0x0000, 0x0000 }, /* R1054 */
-	{ 0x0000, 0x0000 }, /* R1055 */
-	{ 0x02B6, 0x02B6 }, /* R1056  - AIF1 DAC1 Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1057  - AIF1 DAC1 Filters (2) */
-	{ 0x02B6, 0x02B6 }, /* R1058  - AIF1 DAC2 Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1059  - AIF1 DAC2 Filters (2) */
-	{ 0x0000, 0x0000 }, /* R1060 */
-	{ 0x0000, 0x0000 }, /* R1061 */
-	{ 0x0000, 0x0000 }, /* R1062 */
-	{ 0x0000, 0x0000 }, /* R1063 */
-	{ 0x0000, 0x0000 }, /* R1064 */
-	{ 0x0000, 0x0000 }, /* R1065 */
-	{ 0x0000, 0x0000 }, /* R1066 */
-	{ 0x0000, 0x0000 }, /* R1067 */
-	{ 0x0000, 0x0000 }, /* R1068 */
-	{ 0x0000, 0x0000 }, /* R1069 */
-	{ 0x0000, 0x0000 }, /* R1070 */
-	{ 0x0000, 0x0000 }, /* R1071 */
-	{ 0x0000, 0x0000 }, /* R1072 */
-	{ 0x0000, 0x0000 }, /* R1073 */
-	{ 0x0000, 0x0000 }, /* R1074 */
-	{ 0x0000, 0x0000 }, /* R1075 */
-	{ 0x0000, 0x0000 }, /* R1076 */
-	{ 0x0000, 0x0000 }, /* R1077 */
-	{ 0x0000, 0x0000 }, /* R1078 */
-	{ 0x0000, 0x0000 }, /* R1079 */
-	{ 0x0000, 0x0000 }, /* R1080 */
-	{ 0x0000, 0x0000 }, /* R1081 */
-	{ 0x0000, 0x0000 }, /* R1082 */
-	{ 0x0000, 0x0000 }, /* R1083 */
-	{ 0x0000, 0x0000 }, /* R1084 */
-	{ 0x0000, 0x0000 }, /* R1085 */
-	{ 0x0000, 0x0000 }, /* R1086 */
-	{ 0x0000, 0x0000 }, /* R1087 */
-	{ 0xFFFF, 0xFFFF }, /* R1088  - AIF1 DRC1 (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1089  - AIF1 DRC1 (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1090  - AIF1 DRC1 (3) */
-	{ 0x07FF, 0x07FF }, /* R1091  - AIF1 DRC1 (4) */
-	{ 0x03FF, 0x03FF }, /* R1092  - AIF1 DRC1 (5) */
-	{ 0x0000, 0x0000 }, /* R1093 */
-	{ 0x0000, 0x0000 }, /* R1094 */
-	{ 0x0000, 0x0000 }, /* R1095 */
-	{ 0x0000, 0x0000 }, /* R1096 */
-	{ 0x0000, 0x0000 }, /* R1097 */
-	{ 0x0000, 0x0000 }, /* R1098 */
-	{ 0x0000, 0x0000 }, /* R1099 */
-	{ 0x0000, 0x0000 }, /* R1100 */
-	{ 0x0000, 0x0000 }, /* R1101 */
-	{ 0x0000, 0x0000 }, /* R1102 */
-	{ 0x0000, 0x0000 }, /* R1103 */
-	{ 0xFFFF, 0xFFFF }, /* R1104  - AIF1 DRC2 (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1105  - AIF1 DRC2 (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1106  - AIF1 DRC2 (3) */
-	{ 0x07FF, 0x07FF }, /* R1107  - AIF1 DRC2 (4) */
-	{ 0x03FF, 0x03FF }, /* R1108  - AIF1 DRC2 (5) */
-	{ 0x0000, 0x0000 }, /* R1109 */
-	{ 0x0000, 0x0000 }, /* R1110 */
-	{ 0x0000, 0x0000 }, /* R1111 */
-	{ 0x0000, 0x0000 }, /* R1112 */
-	{ 0x0000, 0x0000 }, /* R1113 */
-	{ 0x0000, 0x0000 }, /* R1114 */
-	{ 0x0000, 0x0000 }, /* R1115 */
-	{ 0x0000, 0x0000 }, /* R1116 */
-	{ 0x0000, 0x0000 }, /* R1117 */
-	{ 0x0000, 0x0000 }, /* R1118 */
-	{ 0x0000, 0x0000 }, /* R1119 */
-	{ 0x0000, 0x0000 }, /* R1120 */
-	{ 0x0000, 0x0000 }, /* R1121 */
-	{ 0x0000, 0x0000 }, /* R1122 */
-	{ 0x0000, 0x0000 }, /* R1123 */
-	{ 0x0000, 0x0000 }, /* R1124 */
-	{ 0x0000, 0x0000 }, /* R1125 */
-	{ 0x0000, 0x0000 }, /* R1126 */
-	{ 0x0000, 0x0000 }, /* R1127 */
-	{ 0x0000, 0x0000 }, /* R1128 */
-	{ 0x0000, 0x0000 }, /* R1129 */
-	{ 0x0000, 0x0000 }, /* R1130 */
-	{ 0x0000, 0x0000 }, /* R1131 */
-	{ 0x0000, 0x0000 }, /* R1132 */
-	{ 0x0000, 0x0000 }, /* R1133 */
-	{ 0x0000, 0x0000 }, /* R1134 */
-	{ 0x0000, 0x0000 }, /* R1135 */
-	{ 0x0000, 0x0000 }, /* R1136 */
-	{ 0x0000, 0x0000 }, /* R1137 */
-	{ 0x0000, 0x0000 }, /* R1138 */
-	{ 0x0000, 0x0000 }, /* R1139 */
-	{ 0x0000, 0x0000 }, /* R1140 */
-	{ 0x0000, 0x0000 }, /* R1141 */
-	{ 0x0000, 0x0000 }, /* R1142 */
-	{ 0x0000, 0x0000 }, /* R1143 */
-	{ 0x0000, 0x0000 }, /* R1144 */
-	{ 0x0000, 0x0000 }, /* R1145 */
-	{ 0x0000, 0x0000 }, /* R1146 */
-	{ 0x0000, 0x0000 }, /* R1147 */
-	{ 0x0000, 0x0000 }, /* R1148 */
-	{ 0x0000, 0x0000 }, /* R1149 */
-	{ 0x0000, 0x0000 }, /* R1150 */
-	{ 0x0000, 0x0000 }, /* R1151 */
-	{ 0xFFFF, 0xFFFF }, /* R1152  - AIF1 DAC1 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1153  - AIF1 DAC1 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1154  - AIF1 DAC1 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1155  - AIF1 DAC1 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1156  - AIF1 DAC1 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1157  - AIF1 DAC1 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1158  - AIF1 DAC1 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1159  - AIF1 DAC1 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1160  - AIF1 DAC1 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1161  - AIF1 DAC1 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1162  - AIF1 DAC1 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1163  - AIF1 DAC1 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1164  - AIF1 DAC1 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1165  - AIF1 DAC1 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1166  - AIF1 DAC1 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1167  - AIF1 DAC1 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1168  - AIF1 DAC1 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1169  - AIF1 DAC1 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1170  - AIF1 DAC1 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1171  - AIF1 DAC1 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1172 */
-	{ 0x0000, 0x0000 }, /* R1173 */
-	{ 0x0000, 0x0000 }, /* R1174 */
-	{ 0x0000, 0x0000 }, /* R1175 */
-	{ 0x0000, 0x0000 }, /* R1176 */
-	{ 0x0000, 0x0000 }, /* R1177 */
-	{ 0x0000, 0x0000 }, /* R1178 */
-	{ 0x0000, 0x0000 }, /* R1179 */
-	{ 0x0000, 0x0000 }, /* R1180 */
-	{ 0x0000, 0x0000 }, /* R1181 */
-	{ 0x0000, 0x0000 }, /* R1182 */
-	{ 0x0000, 0x0000 }, /* R1183 */
-	{ 0xFFFF, 0xFFFF }, /* R1184  - AIF1 DAC2 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1185  - AIF1 DAC2 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1186  - AIF1 DAC2 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1187  - AIF1 DAC2 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1188  - AIF1 DAC2 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1189  - AIF1 DAC2 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1190  - AIF1 DAC2 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1191  - AIF1 DAC2 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1192  - AIF1 DAC2 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1193  - AIF1 DAC2 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1194  - AIF1 DAC2 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1195  - AIF1 DAC2 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1196  - AIF1 DAC2 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1197  - AIF1 DAC2 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1198  - AIF1 DAC2 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1199  - AIF1 DAC2 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1200  - AIF1 DAC2 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1201  - AIF1 DAC2 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1202  - AIF1 DAC2 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1203  - AIF1 DAC2 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1204 */
-	{ 0x0000, 0x0000 }, /* R1205 */
-	{ 0x0000, 0x0000 }, /* R1206 */
-	{ 0x0000, 0x0000 }, /* R1207 */
-	{ 0x0000, 0x0000 }, /* R1208 */
-	{ 0x0000, 0x0000 }, /* R1209 */
-	{ 0x0000, 0x0000 }, /* R1210 */
-	{ 0x0000, 0x0000 }, /* R1211 */
-	{ 0x0000, 0x0000 }, /* R1212 */
-	{ 0x0000, 0x0000 }, /* R1213 */
-	{ 0x0000, 0x0000 }, /* R1214 */
-	{ 0x0000, 0x0000 }, /* R1215 */
-	{ 0x0000, 0x0000 }, /* R1216 */
-	{ 0x0000, 0x0000 }, /* R1217 */
-	{ 0x0000, 0x0000 }, /* R1218 */
-	{ 0x0000, 0x0000 }, /* R1219 */
-	{ 0x0000, 0x0000 }, /* R1220 */
-	{ 0x0000, 0x0000 }, /* R1221 */
-	{ 0x0000, 0x0000 }, /* R1222 */
-	{ 0x0000, 0x0000 }, /* R1223 */
-	{ 0x0000, 0x0000 }, /* R1224 */
-	{ 0x0000, 0x0000 }, /* R1225 */
-	{ 0x0000, 0x0000 }, /* R1226 */
-	{ 0x0000, 0x0000 }, /* R1227 */
-	{ 0x0000, 0x0000 }, /* R1228 */
-	{ 0x0000, 0x0000 }, /* R1229 */
-	{ 0x0000, 0x0000 }, /* R1230 */
-	{ 0x0000, 0x0000 }, /* R1231 */
-	{ 0x0000, 0x0000 }, /* R1232 */
-	{ 0x0000, 0x0000 }, /* R1233 */
-	{ 0x0000, 0x0000 }, /* R1234 */
-	{ 0x0000, 0x0000 }, /* R1235 */
-	{ 0x0000, 0x0000 }, /* R1236 */
-	{ 0x0000, 0x0000 }, /* R1237 */
-	{ 0x0000, 0x0000 }, /* R1238 */
-	{ 0x0000, 0x0000 }, /* R1239 */
-	{ 0x0000, 0x0000 }, /* R1240 */
-	{ 0x0000, 0x0000 }, /* R1241 */
-	{ 0x0000, 0x0000 }, /* R1242 */
-	{ 0x0000, 0x0000 }, /* R1243 */
-	{ 0x0000, 0x0000 }, /* R1244 */
-	{ 0x0000, 0x0000 }, /* R1245 */
-	{ 0x0000, 0x0000 }, /* R1246 */
-	{ 0x0000, 0x0000 }, /* R1247 */
-	{ 0x0000, 0x0000 }, /* R1248 */
-	{ 0x0000, 0x0000 }, /* R1249 */
-	{ 0x0000, 0x0000 }, /* R1250 */
-	{ 0x0000, 0x0000 }, /* R1251 */
-	{ 0x0000, 0x0000 }, /* R1252 */
-	{ 0x0000, 0x0000 }, /* R1253 */
-	{ 0x0000, 0x0000 }, /* R1254 */
-	{ 0x0000, 0x0000 }, /* R1255 */
-	{ 0x0000, 0x0000 }, /* R1256 */
-	{ 0x0000, 0x0000 }, /* R1257 */
-	{ 0x0000, 0x0000 }, /* R1258 */
-	{ 0x0000, 0x0000 }, /* R1259 */
-	{ 0x0000, 0x0000 }, /* R1260 */
-	{ 0x0000, 0x0000 }, /* R1261 */
-	{ 0x0000, 0x0000 }, /* R1262 */
-	{ 0x0000, 0x0000 }, /* R1263 */
-	{ 0x0000, 0x0000 }, /* R1264 */
-	{ 0x0000, 0x0000 }, /* R1265 */
-	{ 0x0000, 0x0000 }, /* R1266 */
-	{ 0x0000, 0x0000 }, /* R1267 */
-	{ 0x0000, 0x0000 }, /* R1268 */
-	{ 0x0000, 0x0000 }, /* R1269 */
-	{ 0x0000, 0x0000 }, /* R1270 */
-	{ 0x0000, 0x0000 }, /* R1271 */
-	{ 0x0000, 0x0000 }, /* R1272 */
-	{ 0x0000, 0x0000 }, /* R1273 */
-	{ 0x0000, 0x0000 }, /* R1274 */
-	{ 0x0000, 0x0000 }, /* R1275 */
-	{ 0x0000, 0x0000 }, /* R1276 */
-	{ 0x0000, 0x0000 }, /* R1277 */
-	{ 0x0000, 0x0000 }, /* R1278 */
-	{ 0x0000, 0x0000 }, /* R1279 */
-	{ 0x00FF, 0x01FF }, /* R1280  - AIF2 ADC Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1281  - AIF2 ADC Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1282  - AIF2 DAC Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1283  - AIF2 DAC Right Volume */
-	{ 0x0000, 0x0000 }, /* R1284 */
-	{ 0x0000, 0x0000 }, /* R1285 */
-	{ 0x0000, 0x0000 }, /* R1286 */
-	{ 0x0000, 0x0000 }, /* R1287 */
-	{ 0x0000, 0x0000 }, /* R1288 */
-	{ 0x0000, 0x0000 }, /* R1289 */
-	{ 0x0000, 0x0000 }, /* R1290 */
-	{ 0x0000, 0x0000 }, /* R1291 */
-	{ 0x0000, 0x0000 }, /* R1292 */
-	{ 0x0000, 0x0000 }, /* R1293 */
-	{ 0x0000, 0x0000 }, /* R1294 */
-	{ 0x0000, 0x0000 }, /* R1295 */
-	{ 0xF800, 0xF800 }, /* R1296  - AIF2 ADC Filters */
-	{ 0x0000, 0x0000 }, /* R1297 */
-	{ 0x0000, 0x0000 }, /* R1298 */
-	{ 0x0000, 0x0000 }, /* R1299 */
-	{ 0x0000, 0x0000 }, /* R1300 */
-	{ 0x0000, 0x0000 }, /* R1301 */
-	{ 0x0000, 0x0000 }, /* R1302 */
-	{ 0x0000, 0x0000 }, /* R1303 */
-	{ 0x0000, 0x0000 }, /* R1304 */
-	{ 0x0000, 0x0000 }, /* R1305 */
-	{ 0x0000, 0x0000 }, /* R1306 */
-	{ 0x0000, 0x0000 }, /* R1307 */
-	{ 0x0000, 0x0000 }, /* R1308 */
-	{ 0x0000, 0x0000 }, /* R1309 */
-	{ 0x0000, 0x0000 }, /* R1310 */
-	{ 0x0000, 0x0000 }, /* R1311 */
-	{ 0x02B6, 0x02B6 }, /* R1312  - AIF2 DAC Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1313  - AIF2 DAC Filters (2) */
-	{ 0x0000, 0x0000 }, /* R1314 */
-	{ 0x0000, 0x0000 }, /* R1315 */
-	{ 0x0000, 0x0000 }, /* R1316 */
-	{ 0x0000, 0x0000 }, /* R1317 */
-	{ 0x0000, 0x0000 }, /* R1318 */
-	{ 0x0000, 0x0000 }, /* R1319 */
-	{ 0x0000, 0x0000 }, /* R1320 */
-	{ 0x0000, 0x0000 }, /* R1321 */
-	{ 0x0000, 0x0000 }, /* R1322 */
-	{ 0x0000, 0x0000 }, /* R1323 */
-	{ 0x0000, 0x0000 }, /* R1324 */
-	{ 0x0000, 0x0000 }, /* R1325 */
-	{ 0x0000, 0x0000 }, /* R1326 */
-	{ 0x0000, 0x0000 }, /* R1327 */
-	{ 0x0000, 0x0000 }, /* R1328 */
-	{ 0x0000, 0x0000 }, /* R1329 */
-	{ 0x0000, 0x0000 }, /* R1330 */
-	{ 0x0000, 0x0000 }, /* R1331 */
-	{ 0x0000, 0x0000 }, /* R1332 */
-	{ 0x0000, 0x0000 }, /* R1333 */
-	{ 0x0000, 0x0000 }, /* R1334 */
-	{ 0x0000, 0x0000 }, /* R1335 */
-	{ 0x0000, 0x0000 }, /* R1336 */
-	{ 0x0000, 0x0000 }, /* R1337 */
-	{ 0x0000, 0x0000 }, /* R1338 */
-	{ 0x0000, 0x0000 }, /* R1339 */
-	{ 0x0000, 0x0000 }, /* R1340 */
-	{ 0x0000, 0x0000 }, /* R1341 */
-	{ 0x0000, 0x0000 }, /* R1342 */
-	{ 0x0000, 0x0000 }, /* R1343 */
-	{ 0xFFFF, 0xFFFF }, /* R1344  - AIF2 DRC (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1345  - AIF2 DRC (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1346  - AIF2 DRC (3) */
-	{ 0x07FF, 0x07FF }, /* R1347  - AIF2 DRC (4) */
-	{ 0x03FF, 0x03FF }, /* R1348  - AIF2 DRC (5) */
-	{ 0x0000, 0x0000 }, /* R1349 */
-	{ 0x0000, 0x0000 }, /* R1350 */
-	{ 0x0000, 0x0000 }, /* R1351 */
-	{ 0x0000, 0x0000 }, /* R1352 */
-	{ 0x0000, 0x0000 }, /* R1353 */
-	{ 0x0000, 0x0000 }, /* R1354 */
-	{ 0x0000, 0x0000 }, /* R1355 */
-	{ 0x0000, 0x0000 }, /* R1356 */
-	{ 0x0000, 0x0000 }, /* R1357 */
-	{ 0x0000, 0x0000 }, /* R1358 */
-	{ 0x0000, 0x0000 }, /* R1359 */
-	{ 0x0000, 0x0000 }, /* R1360 */
-	{ 0x0000, 0x0000 }, /* R1361 */
-	{ 0x0000, 0x0000 }, /* R1362 */
-	{ 0x0000, 0x0000 }, /* R1363 */
-	{ 0x0000, 0x0000 }, /* R1364 */
-	{ 0x0000, 0x0000 }, /* R1365 */
-	{ 0x0000, 0x0000 }, /* R1366 */
-	{ 0x0000, 0x0000 }, /* R1367 */
-	{ 0x0000, 0x0000 }, /* R1368 */
-	{ 0x0000, 0x0000 }, /* R1369 */
-	{ 0x0000, 0x0000 }, /* R1370 */
-	{ 0x0000, 0x0000 }, /* R1371 */
-	{ 0x0000, 0x0000 }, /* R1372 */
-	{ 0x0000, 0x0000 }, /* R1373 */
-	{ 0x0000, 0x0000 }, /* R1374 */
-	{ 0x0000, 0x0000 }, /* R1375 */
-	{ 0x0000, 0x0000 }, /* R1376 */
-	{ 0x0000, 0x0000 }, /* R1377 */
-	{ 0x0000, 0x0000 }, /* R1378 */
-	{ 0x0000, 0x0000 }, /* R1379 */
-	{ 0x0000, 0x0000 }, /* R1380 */
-	{ 0x0000, 0x0000 }, /* R1381 */
-	{ 0x0000, 0x0000 }, /* R1382 */
-	{ 0x0000, 0x0000 }, /* R1383 */
-	{ 0x0000, 0x0000 }, /* R1384 */
-	{ 0x0000, 0x0000 }, /* R1385 */
-	{ 0x0000, 0x0000 }, /* R1386 */
-	{ 0x0000, 0x0000 }, /* R1387 */
-	{ 0x0000, 0x0000 }, /* R1388 */
-	{ 0x0000, 0x0000 }, /* R1389 */
-	{ 0x0000, 0x0000 }, /* R1390 */
-	{ 0x0000, 0x0000 }, /* R1391 */
-	{ 0x0000, 0x0000 }, /* R1392 */
-	{ 0x0000, 0x0000 }, /* R1393 */
-	{ 0x0000, 0x0000 }, /* R1394 */
-	{ 0x0000, 0x0000 }, /* R1395 */
-	{ 0x0000, 0x0000 }, /* R1396 */
-	{ 0x0000, 0x0000 }, /* R1397 */
-	{ 0x0000, 0x0000 }, /* R1398 */
-	{ 0x0000, 0x0000 }, /* R1399 */
-	{ 0x0000, 0x0000 }, /* R1400 */
-	{ 0x0000, 0x0000 }, /* R1401 */
-	{ 0x0000, 0x0000 }, /* R1402 */
-	{ 0x0000, 0x0000 }, /* R1403 */
-	{ 0x0000, 0x0000 }, /* R1404 */
-	{ 0x0000, 0x0000 }, /* R1405 */
-	{ 0x0000, 0x0000 }, /* R1406 */
-	{ 0x0000, 0x0000 }, /* R1407 */
-	{ 0xFFFF, 0xFFFF }, /* R1408  - AIF2 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1409  - AIF2 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1410  - AIF2 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1411  - AIF2 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1412  - AIF2 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1413  - AIF2 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1414  - AIF2 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1415  - AIF2 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1416  - AIF2 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1417  - AIF2 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1418  - AIF2 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1419  - AIF2 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1420  - AIF2 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1421  - AIF2 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1422  - AIF2 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1423  - AIF2 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1424  - AIF2 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1425  - AIF2 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1426  - AIF2 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1427  - AIF2 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1428 */
-	{ 0x0000, 0x0000 }, /* R1429 */
-	{ 0x0000, 0x0000 }, /* R1430 */
-	{ 0x0000, 0x0000 }, /* R1431 */
-	{ 0x0000, 0x0000 }, /* R1432 */
-	{ 0x0000, 0x0000 }, /* R1433 */
-	{ 0x0000, 0x0000 }, /* R1434 */
-	{ 0x0000, 0x0000 }, /* R1435 */
-	{ 0x0000, 0x0000 }, /* R1436 */
-	{ 0x0000, 0x0000 }, /* R1437 */
-	{ 0x0000, 0x0000 }, /* R1438 */
-	{ 0x0000, 0x0000 }, /* R1439 */
-	{ 0x0000, 0x0000 }, /* R1440 */
-	{ 0x0000, 0x0000 }, /* R1441 */
-	{ 0x0000, 0x0000 }, /* R1442 */
-	{ 0x0000, 0x0000 }, /* R1443 */
-	{ 0x0000, 0x0000 }, /* R1444 */
-	{ 0x0000, 0x0000 }, /* R1445 */
-	{ 0x0000, 0x0000 }, /* R1446 */
-	{ 0x0000, 0x0000 }, /* R1447 */
-	{ 0x0000, 0x0000 }, /* R1448 */
-	{ 0x0000, 0x0000 }, /* R1449 */
-	{ 0x0000, 0x0000 }, /* R1450 */
-	{ 0x0000, 0x0000 }, /* R1451 */
-	{ 0x0000, 0x0000 }, /* R1452 */
-	{ 0x0000, 0x0000 }, /* R1453 */
-	{ 0x0000, 0x0000 }, /* R1454 */
-	{ 0x0000, 0x0000 }, /* R1455 */
-	{ 0x0000, 0x0000 }, /* R1456 */
-	{ 0x0000, 0x0000 }, /* R1457 */
-	{ 0x0000, 0x0000 }, /* R1458 */
-	{ 0x0000, 0x0000 }, /* R1459 */
-	{ 0x0000, 0x0000 }, /* R1460 */
-	{ 0x0000, 0x0000 }, /* R1461 */
-	{ 0x0000, 0x0000 }, /* R1462 */
-	{ 0x0000, 0x0000 }, /* R1463 */
-	{ 0x0000, 0x0000 }, /* R1464 */
-	{ 0x0000, 0x0000 }, /* R1465 */
-	{ 0x0000, 0x0000 }, /* R1466 */
-	{ 0x0000, 0x0000 }, /* R1467 */
-	{ 0x0000, 0x0000 }, /* R1468 */
-	{ 0x0000, 0x0000 }, /* R1469 */
-	{ 0x0000, 0x0000 }, /* R1470 */
-	{ 0x0000, 0x0000 }, /* R1471 */
-	{ 0x0000, 0x0000 }, /* R1472 */
-	{ 0x0000, 0x0000 }, /* R1473 */
-	{ 0x0000, 0x0000 }, /* R1474 */
-	{ 0x0000, 0x0000 }, /* R1475 */
-	{ 0x0000, 0x0000 }, /* R1476 */
-	{ 0x0000, 0x0000 }, /* R1477 */
-	{ 0x0000, 0x0000 }, /* R1478 */
-	{ 0x0000, 0x0000 }, /* R1479 */
-	{ 0x0000, 0x0000 }, /* R1480 */
-	{ 0x0000, 0x0000 }, /* R1481 */
-	{ 0x0000, 0x0000 }, /* R1482 */
-	{ 0x0000, 0x0000 }, /* R1483 */
-	{ 0x0000, 0x0000 }, /* R1484 */
-	{ 0x0000, 0x0000 }, /* R1485 */
-	{ 0x0000, 0x0000 }, /* R1486 */
-	{ 0x0000, 0x0000 }, /* R1487 */
-	{ 0x0000, 0x0000 }, /* R1488 */
-	{ 0x0000, 0x0000 }, /* R1489 */
-	{ 0x0000, 0x0000 }, /* R1490 */
-	{ 0x0000, 0x0000 }, /* R1491 */
-	{ 0x0000, 0x0000 }, /* R1492 */
-	{ 0x0000, 0x0000 }, /* R1493 */
-	{ 0x0000, 0x0000 }, /* R1494 */
-	{ 0x0000, 0x0000 }, /* R1495 */
-	{ 0x0000, 0x0000 }, /* R1496 */
-	{ 0x0000, 0x0000 }, /* R1497 */
-	{ 0x0000, 0x0000 }, /* R1498 */
-	{ 0x0000, 0x0000 }, /* R1499 */
-	{ 0x0000, 0x0000 }, /* R1500 */
-	{ 0x0000, 0x0000 }, /* R1501 */
-	{ 0x0000, 0x0000 }, /* R1502 */
-	{ 0x0000, 0x0000 }, /* R1503 */
-	{ 0x0000, 0x0000 }, /* R1504 */
-	{ 0x0000, 0x0000 }, /* R1505 */
-	{ 0x0000, 0x0000 }, /* R1506 */
-	{ 0x0000, 0x0000 }, /* R1507 */
-	{ 0x0000, 0x0000 }, /* R1508 */
-	{ 0x0000, 0x0000 }, /* R1509 */
-	{ 0x0000, 0x0000 }, /* R1510 */
-	{ 0x0000, 0x0000 }, /* R1511 */
-	{ 0x0000, 0x0000 }, /* R1512 */
-	{ 0x0000, 0x0000 }, /* R1513 */
-	{ 0x0000, 0x0000 }, /* R1514 */
-	{ 0x0000, 0x0000 }, /* R1515 */
-	{ 0x0000, 0x0000 }, /* R1516 */
-	{ 0x0000, 0x0000 }, /* R1517 */
-	{ 0x0000, 0x0000 }, /* R1518 */
-	{ 0x0000, 0x0000 }, /* R1519 */
-	{ 0x0000, 0x0000 }, /* R1520 */
-	{ 0x0000, 0x0000 }, /* R1521 */
-	{ 0x0000, 0x0000 }, /* R1522 */
-	{ 0x0000, 0x0000 }, /* R1523 */
-	{ 0x0000, 0x0000 }, /* R1524 */
-	{ 0x0000, 0x0000 }, /* R1525 */
-	{ 0x0000, 0x0000 }, /* R1526 */
-	{ 0x0000, 0x0000 }, /* R1527 */
-	{ 0x0000, 0x0000 }, /* R1528 */
-	{ 0x0000, 0x0000 }, /* R1529 */
-	{ 0x0000, 0x0000 }, /* R1530 */
-	{ 0x0000, 0x0000 }, /* R1531 */
-	{ 0x0000, 0x0000 }, /* R1532 */
-	{ 0x0000, 0x0000 }, /* R1533 */
-	{ 0x0000, 0x0000 }, /* R1534 */
-	{ 0x0000, 0x0000 }, /* R1535 */
-	{ 0x01EF, 0x01EF }, /* R1536  - DAC1 Mixer Volumes */
-	{ 0x0037, 0x0037 }, /* R1537  - DAC1 Left Mixer Routing */
-	{ 0x0037, 0x0037 }, /* R1538  - DAC1 Right Mixer Routing */
-	{ 0x01EF, 0x01EF }, /* R1539  - DAC2 Mixer Volumes */
-	{ 0x0037, 0x0037 }, /* R1540  - DAC2 Left Mixer Routing */
-	{ 0x0037, 0x0037 }, /* R1541  - DAC2 Right Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1542  - AIF1 ADC1 Left Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1543  - AIF1 ADC1 Right Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1544  - AIF1 ADC2 Left Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1545  - AIF1 ADC2 Right mixer Routing */
-	{ 0x0000, 0x0000 }, /* R1546 */
-	{ 0x0000, 0x0000 }, /* R1547 */
-	{ 0x0000, 0x0000 }, /* R1548 */
-	{ 0x0000, 0x0000 }, /* R1549 */
-	{ 0x0000, 0x0000 }, /* R1550 */
-	{ 0x0000, 0x0000 }, /* R1551 */
-	{ 0x02FF, 0x03FF }, /* R1552  - DAC1 Left Volume */
-	{ 0x02FF, 0x03FF }, /* R1553  - DAC1 Right Volume */
-	{ 0x02FF, 0x03FF }, /* R1554  - DAC2 Left Volume */
-	{ 0x02FF, 0x03FF }, /* R1555  - DAC2 Right Volume */
-	{ 0x0003, 0x0003 }, /* R1556  - DAC Softmute */
-	{ 0x0000, 0x0000 }, /* R1557 */
-	{ 0x0000, 0x0000 }, /* R1558 */
-	{ 0x0000, 0x0000 }, /* R1559 */
-	{ 0x0000, 0x0000 }, /* R1560 */
-	{ 0x0000, 0x0000 }, /* R1561 */
-	{ 0x0000, 0x0000 }, /* R1562 */
-	{ 0x0000, 0x0000 }, /* R1563 */
-	{ 0x0000, 0x0000 }, /* R1564 */
-	{ 0x0000, 0x0000 }, /* R1565 */
-	{ 0x0000, 0x0000 }, /* R1566 */
-	{ 0x0000, 0x0000 }, /* R1567 */
-	{ 0x0003, 0x0003 }, /* R1568  - Oversampling */
-	{ 0x03C3, 0x03C3 }, /* R1569  - Sidetone */
-};
-
 static int wm8994_readable(unsigned int reg)
 {
 	switch (reg) {
@@ -1696,14 +131,14 @@
 		break;
 	}
 
-	if (reg >= ARRAY_SIZE(access_masks))
+	if (reg >= WM8994_CACHE_SIZE)
 		return 0;
-	return access_masks[reg].readable != 0;
+	return wm8994_access_masks[reg].readable != 0;
 }
 
 static int wm8994_volatile(unsigned int reg)
 {
-	if (reg >= WM8994_REG_CACHE_SIZE)
+	if (reg >= WM8994_CACHE_SIZE)
 		return 1;
 
 	switch (reg) {
@@ -1714,6 +149,8 @@
 	case WM8994_RATE_STATUS:
 	case WM8994_LDO_1:
 	case WM8994_LDO_2:
+	case WM8958_DSP2_EXECCONTROL:
+	case WM8958_MIC_DETECT_3:
 		return 1;
 	default:
 		return 0;
@@ -1723,14 +160,16 @@
 static int wm8994_write(struct snd_soc_codec *codec, unsigned int reg,
 	unsigned int value)
 {
-	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	int ret;
 
 	BUG_ON(reg > WM8994_MAX_REGISTER);
 
-	if (!wm8994_volatile(reg))
-		wm8994->reg_cache[reg] = value;
-
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+	if (!wm8994_volatile(reg)) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret != 0)
+			dev_err(codec->dev, "Cache write to %x failed: %d\n",
+				reg, ret);
+	}
 
 	return wm8994_reg_write(codec->control_data, reg, value);
 }
@@ -1738,14 +177,22 @@
 static unsigned int wm8994_read(struct snd_soc_codec *codec,
 				unsigned int reg)
 {
-	u16 *reg_cache = codec->reg_cache;
+	unsigned int val;
+	int ret;
 
 	BUG_ON(reg > WM8994_MAX_REGISTER);
 
-	if (wm8994_volatile(reg))
-		return wm8994_reg_read(codec->control_data, reg);
-	else
-		return reg_cache[reg];
+	if (!wm8994_volatile(reg) && wm8994_readable(reg) &&
+	    reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_read(codec, reg, &val);
+		if (ret >= 0)
+			return val;
+		else
+			dev_err(codec->dev, "Cache read from %x failed: %d\n",
+				reg, ret);
+	}
+
+	return wm8994_reg_read(codec->control_data, reg);
 }
 
 static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
@@ -1837,7 +284,7 @@
 
 	snd_soc_update_bits(codec, WM8994_CLOCKING_1, WM8994_SYSCLK_SRC, new);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	return 0;
 }
@@ -1864,6 +311,19 @@
 static const struct soc_enum sidetone_hpf =
 	SOC_ENUM_SINGLE(WM8994_SIDETONE, 7, 7, sidetone_hpf_text);
 
+static const char *adc_hpf_text[] = {
+	"HiFi", "Voice 1", "Voice 2", "Voice 3"
+};
+
+static const struct soc_enum aif1adc1_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF1_ADC1_FILTERS, 13, 4, adc_hpf_text);
+
+static const struct soc_enum aif1adc2_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF1_ADC2_FILTERS, 13, 4, adc_hpf_text);
+
+static const struct soc_enum aif2adc_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF2_ADC_FILTERS, 13, 4, adc_hpf_text);
+
 static const DECLARE_TLV_DB_SCALE(aif_tlv, 0, 600, 0);
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0);
@@ -2071,21 +531,252 @@
 	return 0;
 }
 
-static const char *aifdac_src_text[] = {
+static const char *aif_chan_src_text[] = {
 	"Left", "Right"
 };
 
+static const struct soc_enum aif1adcl_src =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 15, 2, aif_chan_src_text);
+
+static const struct soc_enum aif1adcr_src =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 14, 2, aif_chan_src_text);
+
+static const struct soc_enum aif2adcl_src =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 15, 2, aif_chan_src_text);
+
+static const struct soc_enum aif2adcr_src =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 14, 2, aif_chan_src_text);
+
 static const struct soc_enum aif1dacl_src =
-	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aif_chan_src_text);
 
 static const struct soc_enum aif1dacr_src =
-	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aif_chan_src_text);
 
 static const struct soc_enum aif2dacl_src =
-	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aif_chan_src_text);
 
 static const struct soc_enum aif2dacr_src =
-	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aif_chan_src_text);
+
+static const char *osr_text[] = {
+	"Low Power", "High Performance",
+};
+
+static const struct soc_enum dac_osr =
+	SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 0, 2, osr_text);
+
+static const struct soc_enum adc_osr =
+	SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 1, 2, osr_text);
+
+static void wm8958_mbc_apply(struct snd_soc_codec *codec, int mbc, int start)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994_pdata *pdata = wm8994->pdata;
+	int pwr_reg = snd_soc_read(codec, WM8994_POWER_MANAGEMENT_5);
+	int ena, reg, aif, i;
+
+	switch (mbc) {
+	case 0:
+		pwr_reg &= (WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA);
+		aif = 0;
+		break;
+	case 1:
+		pwr_reg &= (WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
+		aif = 0;
+		break;
+	case 2:
+		pwr_reg &= (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA);
+		aif = 1;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	/* We can only enable the MBC if the AIF is enabled and we
+	 * want it to be enabled. */
+	ena = pwr_reg && wm8994->mbc_ena[mbc];
+
+	reg = snd_soc_read(codec, WM8958_DSP2_PROGRAM);
+
+	dev_dbg(codec->dev, "MBC %d startup: %d, power: %x, DSP: %x\n",
+		mbc, start, pwr_reg, reg);
+
+	if (start && ena) {
+		/* If the DSP is already running then noop */
+		if (reg & WM8958_DSP2_ENA)
+			return;
+
+		/* Switch the clock over to the appropriate AIF */
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8958_DSP2CLK_SRC | WM8958_DSP2CLK_ENA,
+				    aif << WM8958_DSP2CLK_SRC_SHIFT |
+				    WM8958_DSP2CLK_ENA);
+
+		snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
+				    WM8958_DSP2_ENA, WM8958_DSP2_ENA);
+
+		/* If we've got user supplied MBC settings use them */
+		if (pdata && pdata->num_mbc_cfgs) {
+			struct wm8958_mbc_cfg *cfg
+				= &pdata->mbc_cfgs[wm8994->mbc_cfg];
+
+			for (i = 0; i < ARRAY_SIZE(cfg->coeff_regs); i++)
+				snd_soc_write(codec, i + WM8958_MBC_BAND_1_K_1,
+					      cfg->coeff_regs[i]);
+
+			for (i = 0; i < ARRAY_SIZE(cfg->cutoff_regs); i++)
+				snd_soc_write(codec,
+					      i + WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1,
+					      cfg->cutoff_regs[i]);
+		}
+
+		/* Run the DSP */
+		snd_soc_write(codec, WM8958_DSP2_EXECCONTROL,
+			      WM8958_DSP2_RUNR);
+
+		/* And we're off! */
+		snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
+				    WM8958_MBC_ENA | WM8958_MBC_SEL_MASK,
+				    mbc << WM8958_MBC_SEL_SHIFT |
+				    WM8958_MBC_ENA);
+	} else {
+		/* If the DSP is already stopped then noop */
+		if (!(reg & WM8958_DSP2_ENA))
+			return;
+
+		snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
+				    WM8958_MBC_ENA, 0);	
+		snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
+				    WM8958_DSP2_ENA, 0);
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8958_DSP2CLK_ENA, 0);
+	}
+}
+
+static int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
+		    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	int mbc;
+
+	switch (w->shift) {
+	case 13:
+	case 12:
+		mbc = 2;
+		break;
+	case 11:
+	case 10:
+		mbc = 1;
+		break;
+	case 9:
+	case 8:
+		mbc = 0;
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		wm8958_mbc_apply(codec, mbc, 1);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wm8958_mbc_apply(codec, mbc, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994_pdata *pdata = wm8994->pdata;
+	int value = ucontrol->value.integer.value[0];
+	int reg;
+
+	/* Don't allow on the fly reconfiguration */
+	reg = snd_soc_read(codec, WM8994_CLOCKING_1);
+	if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
+		return -EBUSY;
+
+	if (value >= pdata->num_mbc_cfgs)
+		return -EINVAL;
+
+	wm8994->mbc_cfg = value;
+
+	return 0;
+}
+
+static int wm8958_get_mbc_enum(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8994->mbc_cfg;
+
+	return 0;
+}
+
+static int wm8958_mbc_info(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 1;
+	return 0;
+}
+
+static int wm8958_mbc_get(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	int mbc = kcontrol->private_value;
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wm8994->mbc_ena[mbc];
+
+	return 0;
+}
+
+static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	int mbc = kcontrol->private_value;
+	int i;
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	if (ucontrol->value.integer.value[0] > 1)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(wm8994->mbc_ena); i++) {
+		if (mbc != i && wm8994->mbc_ena[i]) {
+			dev_dbg(codec->dev, "MBC %d active already\n", mbc);
+			return -EBUSY;
+		}
+	}
+
+	wm8994->mbc_ena[mbc] = ucontrol->value.integer.value[0];
+
+	wm8958_mbc_apply(codec, mbc, wm8994->mbc_ena[mbc]);
+
+	return 0;
+}
+
+#define WM8958_MBC_SWITCH(xname, xval) {\
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+	.info = wm8958_mbc_info, \
+	.get = wm8958_mbc_get, .put = wm8958_mbc_put, \
+	.private_value = xval }
 
 static const struct snd_kcontrol_new wm8994_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
@@ -2098,10 +789,15 @@
 		 WM8994_AIF2_ADC_RIGHT_VOLUME,
 		 1, 119, 0, digital_tlv),
 
+SOC_ENUM("AIF1ADCL Source", aif1adcl_src),
+SOC_ENUM("AIF1ADCR Source", aif1adcr_src),
+SOC_ENUM("AIF2ADCL Source", aif2adcl_src),
+SOC_ENUM("AIF2ADCR Source", aif2adcr_src),
+
 SOC_ENUM("AIF1DACL Source", aif1dacl_src),
 SOC_ENUM("AIF1DACR Source", aif1dacr_src),
-SOC_ENUM("AIF2DACL Source", aif1dacl_src),
-SOC_ENUM("AIF2DACR Source", aif1dacr_src),
+SOC_ENUM("AIF2DACL Source", aif2dacl_src),
+SOC_ENUM("AIF2DACR Source", aif2dacr_src),
 
 SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
 		 WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
@@ -2140,6 +836,18 @@
 SOC_ENUM("Sidetone HPF Mux", sidetone_hpf),
 SOC_SINGLE("Sidetone HPF Switch", WM8994_SIDETONE, 6, 1, 0),
 
+SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
+SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
+SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("ADC OSR", adc_osr),
+SOC_ENUM("DAC OSR", dac_osr),
+
 SOC_DOUBLE_R_TLV("DAC1 Volume", WM8994_DAC1_LEFT_VOLUME,
 		 WM8994_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 SOC_DOUBLE_R("DAC1 Switch", WM8994_DAC1_LEFT_VOLUME,
@@ -2162,15 +870,15 @@
 
 SOC_SINGLE_TLV("AIF1DAC1 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
-SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
+SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC1_FILTERS_2,
 	   8, 1, 0),
 SOC_SINGLE_TLV("AIF1DAC2 3D Stereo Volume", WM8994_AIF1_DAC2_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
 SOC_SINGLE("AIF1DAC2 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
 	   8, 1, 0),
-SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2,
+SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF2_DAC_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
-SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
+SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
 	   8, 1, 0),
 };
 
@@ -2209,6 +917,13 @@
 	       eq_tlv),
 };
 
+static const struct snd_kcontrol_new wm8958_snd_controls[] = {
+SOC_SINGLE_TLV("AIF3 Boost Volume", WM8958_AIF3_CONTROL_2, 10, 3, 0, aif_tlv),
+WM8958_MBC_SWITCH("AIF1DAC1 MBC Switch", 0),
+WM8958_MBC_SWITCH("AIF1DAC2 MBC Switch", 1),
+WM8958_MBC_SWITCH("AIF2DAC MBC Switch", 2),
+};
+
 static int clk_sys_event(struct snd_soc_dapm_widget *w,
 			 struct snd_kcontrol *kcontrol, int event)
 {
@@ -2228,6 +943,7 @@
 
 static void wm8994_update_class_w(struct snd_soc_codec *codec)
 {
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	int enable = 1;
 	int source = 0;  /* GCC flow analysis can't track enable */
 	int reg, reg_r;
@@ -2278,11 +994,13 @@
 				    WM8994_CP_DYN_PWR |
 				    WM8994_CP_DYN_SRC_SEL_MASK,
 				    source | WM8994_CP_DYN_PWR);
+		wm8994->hubs.class_w = true;
 		
 	} else {
 		dev_dbg(codec->dev, "Class W disabled\n");
 		snd_soc_update_bits(codec, WM8994_CLASS_W_1,
 				    WM8994_CP_DYN_PWR, 0);
+		wm8994->hubs.class_w = false;
 	}
 }
 
@@ -2512,14 +1230,47 @@
 	SOC_DAPM_ENUM("AIF2ADC Mux", aif2adc_enum);
 
 static const char *aif3adc_text[] = {
-	"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT",
+	"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", "Mono PCM",
 };
 
-static const struct soc_enum aif3adc_enum =
+static const struct soc_enum wm8994_aif3adc_enum =
 	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 3, aif3adc_text);
 
-static const struct snd_kcontrol_new aif3adc_mux =
-	SOC_DAPM_ENUM("AIF3ADC Mux", aif3adc_enum);
+static const struct snd_kcontrol_new wm8994_aif3adc_mux =
+	SOC_DAPM_ENUM("AIF3ADC Mux", wm8994_aif3adc_enum);
+
+static const struct soc_enum wm8958_aif3adc_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 4, aif3adc_text);
+
+static const struct snd_kcontrol_new wm8958_aif3adc_mux =
+	SOC_DAPM_ENUM("AIF3ADC Mux", wm8958_aif3adc_enum);
+
+static const char *mono_pcm_out_text[] = {
+	"None", "AIF2ADCL", "AIF2ADCR", 
+};
+
+static const struct soc_enum mono_pcm_out_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 9, 3, mono_pcm_out_text);
+
+static const struct snd_kcontrol_new mono_pcm_out_mux =
+	SOC_DAPM_ENUM("Mono PCM Out Mux", mono_pcm_out_enum);
+
+static const char *aif2dac_src_text[] = {
+	"AIF2", "AIF3",
+};
+
+/* Note that these two control shouldn't be simultaneously switched to AIF3 */
+static const struct soc_enum aif2dacl_src_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 7, 2, aif2dac_src_text);
+
+static const struct snd_kcontrol_new aif2dacl_src_mux =
+	SOC_DAPM_ENUM("AIF2DACL Mux", aif2dacl_src_enum);
+
+static const struct soc_enum aif2dacr_src_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 8, 2, aif2dac_src_text);
+
+static const struct snd_kcontrol_new aif2dacr_src_mux =
+	SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("DMIC1DAT"),
@@ -2540,19 +1291,23 @@
 		     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
 SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture",
 		     0, WM8994_POWER_MANAGEMENT_4, 8, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 9, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 8, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
 		     0, WM8994_POWER_MANAGEMENT_4, 11, 0),
 SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
 		     0, WM8994_POWER_MANAGEMENT_4, 10, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 11, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 10, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
 		   aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)),
@@ -2581,10 +1336,12 @@
 		     WM8994_POWER_MANAGEMENT_4, 13, 0),
 SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
 		     WM8994_POWER_MANAGEMENT_4, 12, 0),
-SND_SOC_DAPM_AIF_IN("AIF2DACL", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 13, 0),
-SND_SOC_DAPM_AIF_IN("AIF2DACR", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 12, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
 SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
@@ -2593,7 +1350,6 @@
 SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux),
 SND_SOC_DAPM_MUX("AIF2DAC Mux", SND_SOC_NOPM, 0, 0, &aif2dac_mux),
 SND_SOC_DAPM_MUX("AIF2ADC Mux", SND_SOC_NOPM, 0, 0, &aif2adc_mux),
-SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &aif3adc_mux),
 
 SND_SOC_DAPM_AIF_IN("AIF3DACDAT", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
@@ -2631,8 +1387,18 @@
 SND_SOC_DAPM_POST("Debug log", post_ev),
 };
 
-static const struct snd_soc_dapm_route intercon[] = {
+static const struct snd_soc_dapm_widget wm8994_specific_dapm_widgets[] = {
+SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8994_aif3adc_mux),
+};
 
+static const struct snd_soc_dapm_widget wm8958_dapm_widgets[] = {
+SND_SOC_DAPM_MUX("Mono PCM Out Mux", SND_SOC_NOPM, 0, 0, &mono_pcm_out_mux),
+SND_SOC_DAPM_MUX("AIF2DACL Mux", SND_SOC_NOPM, 0, 0, &aif2dacl_src_mux),
+SND_SOC_DAPM_MUX("AIF2DACR Mux", SND_SOC_NOPM, 0, 0, &aif2dacr_src_mux),
+SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8958_aif3adc_mux),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
 	{ "CLK_SYS", NULL, "AIF1CLK", check_clk_sys },
 	{ "CLK_SYS", NULL, "AIF2CLK", check_clk_sys },
 
@@ -2740,9 +1506,6 @@
 	{ "AIF1DAC2L", NULL, "AIF1DAC Mux" },
 	{ "AIF1DAC2R", NULL, "AIF1DAC Mux" },
 
-	{ "AIF2DACL", NULL, "AIF2DAC Mux" },
-	{ "AIF2DACR", NULL, "AIF2DAC Mux" },
-
 	{ "AIF1DAC Mux", "AIF1DACDAT", "AIF1DACDAT" },
 	{ "AIF1DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
 	{ "AIF2DAC Mux", "AIF2DACDAT", "AIF2DACDAT" },
@@ -2815,6 +1578,26 @@
 	{ "Right Headphone Mux", "DAC", "DAC1R" },
 };
 
+static const struct snd_soc_dapm_route wm8994_intercon[] = {
+	{ "AIF2DACL", NULL, "AIF2DAC Mux" },
+	{ "AIF2DACR", NULL, "AIF2DAC Mux" },
+};
+
+static const struct snd_soc_dapm_route wm8958_intercon[] = {
+	{ "AIF2DACL", NULL, "AIF2DACL Mux" },
+	{ "AIF2DACR", NULL, "AIF2DACR Mux" },
+
+	{ "AIF2DACL Mux", "AIF2", "AIF2DAC Mux" },
+	{ "AIF2DACL Mux", "AIF3", "AIF3DACDAT" },
+	{ "AIF2DACR Mux", "AIF2", "AIF2DAC Mux" },
+	{ "AIF2DACR Mux", "AIF3", "AIF3DACDAT" },
+
+	{ "Mono PCM Out Mux", "AIF2ADCL", "AIF2ADCL" },
+	{ "Mono PCM Out Mux", "AIF2ADCR", "AIF2ADCR" },
+
+	{ "AIF3ADC Mux", "Mono PCM", "Mono PCM Out Mux" },
+};
+
 /* The size in bits of the FLL divide multiplied by 10
  * to allow rounding later */
 #define FIXED_FLL_SIZE ((1 << 16) * 10)
@@ -2930,6 +1713,7 @@
 		/* Allow no source specification when stopping */
 		if (freq_out)
 			return -EINVAL;
+		src = wm8994->fll[id].src;
 		break;
 	case WM8994_FLL_SRC_MCLK1:
 	case WM8994_FLL_SRC_MCLK2:
@@ -3094,6 +1878,7 @@
 static int wm8994_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
+	struct wm8994 *control = codec->control_data;
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 
 	switch (level) {
@@ -3107,16 +1892,36 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
-			/* Tweak DC servo and DSP configuration for
-			 * improved performance. */
-			if (wm8994->revision < 4) {
-				/* Tweak DC servo and DSP configuration for
-				 * improved performance. */
-				snd_soc_write(codec, 0x102, 0x3);
-				snd_soc_write(codec, 0x56, 0x3);
-				snd_soc_write(codec, 0x817, 0);
-				snd_soc_write(codec, 0x102, 0);
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			pm_runtime_get_sync(codec->dev);
+
+			switch (control->type) {
+			case WM8994:
+				if (wm8994->revision < 4) {
+					/* Tweak DC servo and DSP
+					 * configuration for improved
+					 * performance. */
+					snd_soc_write(codec, 0x102, 0x3);
+					snd_soc_write(codec, 0x56, 0x3);
+					snd_soc_write(codec, 0x817, 0);
+					snd_soc_write(codec, 0x102, 0);
+				}
+				break;
+
+			case WM8958:
+				if (wm8994->revision == 0) {
+					/* Optimise performance for rev A */
+					snd_soc_write(codec, 0x102, 0x3);
+					snd_soc_write(codec, 0xcb, 0x81);
+					snd_soc_write(codec, 0x817, 0);
+					snd_soc_write(codec, 0x102, 0);
+
+					snd_soc_update_bits(codec,
+							    WM8958_CHARGE_PUMP_2,
+							    WM8958_CP_DISCH,
+							    WM8958_CP_DISCH);
+				}
+				break;
 			}
 
 			/* Discharge LINEOUT1 & 2 */
@@ -3151,7 +1956,7 @@
 		break;
 
 	case SND_SOC_BIAS_OFF:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
 			/* Switch over to startup biases */
 			snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
 					    WM8994_BIAS_SRC |
@@ -3183,16 +1988,19 @@
 					    WM8994_STARTUP_BIAS_ENA |
 					    WM8994_VMID_BUF_ENA |
 					    WM8994_VMID_RAMP_MASK, 0);
+
+			pm_runtime_put(codec->dev);
 		}
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
 static int wm8994_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
 	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
 	int ms_reg;
 	int aif1_reg;
 	int ms = 0;
@@ -3277,6 +2085,13 @@
 		return -EINVAL;
 	}
 
+	/* The AIF2 format configuration needs to be mirrored to AIF3
+	 * on WM8958 if it's in use so just do it all the time. */
+	if (control->type == WM8958 && dai->id == 2)
+		snd_soc_update_bits(codec, WM8958_AIF3_CONTROL_1,
+				    WM8994_AIF1_LRCLK_INV |
+				    WM8958_AIF3_FMT_MASK, aif1);
+
 	snd_soc_update_bits(codec, aif1_reg,
 			    WM8994_AIF1_BCLK_INV | WM8994_AIF1_LRCLK_INV |
 			    WM8994_AIF1_FMT_MASK,
@@ -3317,12 +2132,15 @@
 			    struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	int aif1_reg;
+	int aif2_reg;
 	int bclk_reg;
 	int lrclk_reg;
 	int rate_reg;
 	int aif1 = 0;
+	int aif2 = 0;
 	int bclk = 0;
 	int lrclk = 0;
 	int rate_val = 0;
@@ -3333,6 +2151,7 @@
 	switch (dai->id) {
 	case 1:
 		aif1_reg = WM8994_AIF1_CONTROL_1;
+		aif2_reg = WM8994_AIF1_CONTROL_2;
 		bclk_reg = WM8994_AIF1_BCLK;
 		rate_reg = WM8994_AIF1_RATE;
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
@@ -3345,6 +2164,7 @@
 		break;
 	case 2:
 		aif1_reg = WM8994_AIF2_CONTROL_1;
+		aif2_reg = WM8994_AIF2_CONTROL_2;
 		bclk_reg = WM8994_AIF2_BCLK;
 		rate_reg = WM8994_AIF2_RATE;
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
@@ -3355,6 +2175,14 @@
 			dev_dbg(codec->dev, "AIF2 using split LRCLK\n");
 		}
 		break;
+	case 3:
+		switch (control->type) {
+		case WM8958:
+			aif1_reg = WM8958_AIF3_CONTROL_1;
+			break;
+		default:
+			return 0;
+		}
 	default:
 		return -EINVAL;
 	}
@@ -3392,6 +2220,10 @@
 	dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n",
 		dai->id, wm8994->aifclk[id], bclk_rate);
 
+	if (params_channels(params) == 1 &&
+	    (snd_soc_read(codec, aif1_reg) & 0x18) == 0x18)
+		aif2 |= WM8994_AIF1_MONO;
+
 	if (wm8994->aifclk[id] == 0) {
 		dev_err(dai->dev, "AIF%dCLK not configured\n", dai->id);
 		return -EINVAL;
@@ -3435,6 +2267,7 @@
 		lrclk, bclk_rate / lrclk);
 
 	snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
+	snd_soc_update_bits(codec, aif2_reg, WM8994_AIF1_MONO, aif2);
 	snd_soc_update_bits(codec, bclk_reg, WM8994_AIF1_BCLK_DIV_MASK, bclk);
 	snd_soc_update_bits(codec, lrclk_reg, WM8994_AIF1DAC_RATE_MASK,
 			    lrclk);
@@ -3458,6 +2291,47 @@
 	return 0;
 }
 
+static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
+	int aif1_reg;
+	int aif1 = 0;
+
+	switch (dai->id) {
+	case 3:
+		switch (control->type) {
+		case WM8958:
+			aif1_reg = WM8958_AIF3_CONTROL_1;
+			break;
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		aif1 |= 0x20;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		aif1 |= 0x40;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aif1 |= 0x60;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
+}
+
 static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
 {
 	struct snd_soc_codec *codec = codec_dai->codec;
@@ -3539,6 +2413,7 @@
 };
 
 static struct snd_soc_dai_ops wm8994_aif3_dai_ops = {
+	.hw_params	= wm8994_aif3_hw_params,
 	.set_tristate	= wm8994_set_tristate,
 };
 
@@ -3548,14 +2423,14 @@
 		.id = 1,
 		.playback = {
 			.stream_name = "AIF1 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF1 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3567,14 +2442,14 @@
 		.id = 2,
 		.playback = {
 			.stream_name = "AIF2 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF2 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3586,14 +2461,14 @@
 		.id = 3,
 		.playback = {
 			.stream_name = "AIF3 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF3 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3625,26 +2500,12 @@
 static int wm8994_resume(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-	u16 *reg_cache = codec->reg_cache;
 	int i, ret;
 
 	/* Restore the registers */
-	for (i = 1; i < ARRAY_SIZE(wm8994->reg_cache); i++) {
-		switch (i) {
-		case WM8994_LDO_1:
-		case WM8994_LDO_2:
-		case WM8994_SOFTWARE_RESET:
-			/* Handled by other MFD drivers */
-			continue;
-		default:
-			break;
-		}
-
-		if (!access_masks[i].writable)
-			continue;
-
-		wm8994_reg_write(codec->control_data, i, reg_cache[i]);
-	}
+	ret = snd_soc_cache_sync(codec);
+	if (ret != 0)
+		dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
 
 	wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
@@ -3794,6 +2655,34 @@
 	dev_dbg(codec->dev, "%d ReTune Mobile configurations\n",
 		pdata->num_retune_mobile_cfgs);
 
+	if (pdata->num_mbc_cfgs) {
+		struct snd_kcontrol_new control[] = {
+			SOC_ENUM_EXT("MBC Mode", wm8994->mbc_enum,
+				     wm8958_get_mbc_enum, wm8958_put_mbc_enum),
+		};
+
+		/* We need an array of texts for the enum API */
+		wm8994->mbc_texts = kmalloc(sizeof(char *)
+					    * pdata->num_mbc_cfgs, GFP_KERNEL);
+		if (!wm8994->mbc_texts) {
+			dev_err(wm8994->codec->dev,
+				"Failed to allocate %d MBC config texts\n",
+				pdata->num_mbc_cfgs);
+			return;
+		}
+
+		for (i = 0; i < pdata->num_mbc_cfgs; i++)
+			wm8994->mbc_texts[i] = pdata->mbc_cfgs[i].name;
+
+		wm8994->mbc_enum.max = pdata->num_mbc_cfgs;
+		wm8994->mbc_enum.texts = wm8994->mbc_texts;
+
+		ret = snd_soc_add_controls(wm8994->codec, control, 1);
+		if (ret != 0)
+			dev_err(wm8994->codec->dev,
+				"Failed to add MBC mode controls: %d\n", ret);
+	}
+
 	if (pdata->num_retune_mobile_cfgs)
 		wm8994_handle_retune_mobile_pdata(wm8994);
 	else
@@ -3823,8 +2712,12 @@
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct wm8994_micdet *micdet;
+	struct wm8994 *control = codec->control_data;
 	int reg;
 
+	if (control->type != WM8994)
+		return -EINVAL;
+
 	switch (micbias) {
 	case 1:
 		micdet = &wm8994->micdet[0];
@@ -3863,6 +2756,10 @@
 	int reg;
 	int report;
 
+#ifndef CONFIG_SND_SOC_WM8994_MODULE
+	trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
 	reg = snd_soc_read(codec, WM8994_INTERRUPT_RAW_STATUS_2);
 	if (reg < 0) {
 		dev_err(codec->dev, "Failed to read microphone status: %d\n",
@@ -3891,77 +2788,251 @@
 	return IRQ_HANDLED;
 }
 
+/* Default microphone detection handler for WM8958 - the user can
+ * override this if they wish.
+ */
+static void wm8958_default_micdet(u16 status, void *data)
+{
+	struct snd_soc_codec *codec = data;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	int report = 0;
+
+	/* If nothing present then clear our statuses */
+	if (!(status & WM8958_MICD_STS)) {
+		wm8994->jack_is_video = false;
+		wm8994->jack_is_mic = false;
+		goto done;
+	}
+
+	/* Assume anything over 475 ohms is a microphone and remember
+	 * that we've seen one (since buttons override it) */
+	if (status & 0x600)
+		wm8994->jack_is_mic = true;
+	if (wm8994->jack_is_mic)
+		report |= SND_JACK_MICROPHONE;
+
+	/* Video has an impedence of approximately 75 ohms; assume
+	 * this isn't used as a button and remember it since buttons
+	 * override it. */
+	if (status & 0x40)
+		wm8994->jack_is_video = true;
+	if (wm8994->jack_is_video)
+		report |= SND_JACK_VIDEOOUT;
+
+	/* Everything else is buttons; just assign slots */
+	if (status & 0x4)
+		report |= SND_JACK_BTN_0;
+	if (status & 0x8)
+		report |= SND_JACK_BTN_1;
+	if (status & 0x10)
+		report |= SND_JACK_BTN_2;
+	if (status & 0x20)
+		report |= SND_JACK_BTN_3;
+	if (status & 0x80)
+		report |= SND_JACK_BTN_4;
+	if (status & 0x100)
+		report |= SND_JACK_BTN_5;
+
+done:
+	snd_soc_jack_report(wm8994->micdet[0].jack,
+			    SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+			    SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
+			    SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT,
+			    report);
+}
+
+/**
+ * wm8958_mic_detect - Enable microphone detection via the WM8958 IRQ
+ *
+ * @codec:   WM8958 codec
+ * @jack:    jack to report detection events on
+ *
+ * Enable microphone detection functionality for the WM8958.  By
+ * default simple detection which supports the detection of up to 6
+ * buttons plus video and microphone functionality is supported.
+ *
+ * The WM8958 has an advanced jack detection facility which is able to
+ * support complex accessory detection, especially when used in
+ * conjunction with external circuitry.  In order to provide maximum
+ * flexiblity a callback is provided which allows a completely custom
+ * detection algorithm.
+ */
+int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+		      wm8958_micdet_cb cb, void *cb_data)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = codec->control_data;
+
+	if (control->type != WM8958)
+		return -EINVAL;
+
+	if (jack) {
+		if (!cb) {
+			dev_dbg(codec->dev, "Using default micdet callback\n");
+			cb = wm8958_default_micdet;
+			cb_data = codec;
+		}
+
+		wm8994->micdet[0].jack = jack;
+		wm8994->jack_cb = cb;
+		wm8994->jack_cb_data = cb_data;
+
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_ENA, WM8958_MICD_ENA);
+	} else {
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_ENA, 0);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wm8958_mic_detect);
+
+static irqreturn_t wm8958_mic_irq(int irq, void *data)
+{
+	struct wm8994_priv *wm8994 = data;
+	struct snd_soc_codec *codec = wm8994->codec;
+	int reg;
+
+	reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
+	if (reg < 0) {
+		dev_err(codec->dev, "Failed to read mic detect status: %d\n",
+			reg);
+		return IRQ_NONE;
+	}
+
+	if (!(reg & WM8958_MICD_VALID)) {
+		dev_dbg(codec->dev, "Mic detect data not valid\n");
+		goto out;
+	}
+
+#ifndef CONFIG_SND_SOC_WM8994_MODULE
+	trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
+	if (wm8994->jack_cb)
+		wm8994->jack_cb(reg, wm8994->jack_cb_data);
+	else
+		dev_warn(codec->dev, "Accessory detection with no callback\n");
+
+out:
+	return IRQ_HANDLED;
+}
+
 static int wm8994_codec_probe(struct snd_soc_codec *codec)
 {
+	struct wm8994 *control;
 	struct wm8994_priv *wm8994;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, i;
 
 	codec->control_data = dev_get_drvdata(codec->dev->parent);
+	control = codec->control_data;
 
 	wm8994 = kzalloc(sizeof(struct wm8994_priv), GFP_KERNEL);
 	if (wm8994 == NULL)
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, wm8994);
 
-	codec->reg_cache = &wm8994->reg_cache;
-
 	wm8994->pdata = dev_get_platdata(codec->dev->parent);
 	wm8994->codec = codec;
 
-	/* Fill the cache with physical values we inherited; don't reset */
-	ret = wm8994_bulk_read(codec->control_data, 0,
-			       ARRAY_SIZE(wm8994->reg_cache) - 1,
-			       codec->reg_cache);
-	if (ret < 0) {
-		dev_err(codec->dev, "Failed to fill register cache: %d\n",
-			ret);
-		goto err;
-	}
+	pm_runtime_enable(codec->dev);
+	pm_runtime_resume(codec->dev);
 
-	/* Clear the cached values for unreadable/volatile registers to
-	 * avoid potential confusion.
-	 */
-	for (i = 0; i < ARRAY_SIZE(wm8994->reg_cache); i++)
-		if (wm8994_volatile(i) || !wm8994_readable(i))
-			wm8994->reg_cache[i] = 0;
+	/* Read our current status back from the chip - we don't want to
+	 * reset as this may interfere with the GPIO or LDO operation. */
+	for (i = 0; i < WM8994_CACHE_SIZE; i++) {
+		if (!wm8994_readable(i) || wm8994_volatile(i))
+			continue;
+
+		ret = wm8994_reg_read(codec->control_data, i);
+		if (ret <= 0)
+			continue;
+
+		ret = snd_soc_cache_write(codec, i, ret);
+		if (ret != 0) {
+			dev_err(codec->dev,
+				"Failed to initialise cache for 0x%x: %d\n",
+				i, ret);
+			goto err;
+		}
+	}
 
 	/* Set revision-specific configuration */
 	wm8994->revision = snd_soc_read(codec, WM8994_CHIP_REVISION);
-	switch (wm8994->revision) {
-	case 2:
-	case 3:
-		wm8994->hubs.dcs_codes = -5;
-		wm8994->hubs.hp_startup_mode = 1;
+	switch (control->type) {
+	case WM8994:
+		switch (wm8994->revision) {
+		case 2:
+		case 3:
+			wm8994->hubs.dcs_codes = -5;
+			wm8994->hubs.hp_startup_mode = 1;
+			wm8994->hubs.dcs_readback_mode = 1;
+			break;
+		default:
+			wm8994->hubs.dcs_readback_mode = 1;
+			break;
+		}
+
+	case WM8958:
 		wm8994->hubs.dcs_readback_mode = 1;
 		break;
+
 	default:
-		wm8994->hubs.dcs_readback_mode = 1;
 		break;
 	}
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
-				 wm8994_mic_irq, "Mic 1 detect", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic1 detect IRQ: %d\n", ret);
+	switch (control->type) {
+	case WM8994:
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_DET,
+					 wm8994_mic_irq, "Mic 1 detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic1 detect IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
-				 wm8994_mic_irq, "Mic 1 short", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic1 short IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_SHRT,
+					 wm8994_mic_irq, "Mic 1 short",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic1 short IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
-				 wm8994_mic_irq, "Mic 2 detect", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic2 detect IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC2_DET,
+					 wm8994_mic_irq, "Mic 2 detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic2 detect IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
-				 wm8994_mic_irq, "Mic 2 short", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic2 short IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC2_SHRT,
+					 wm8994_mic_irq, "Mic 2 short",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic2 short IRQ: %d\n",
+				 ret);
+		break;
+
+	case WM8958:
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_DET,
+					 wm8958_mic_irq, "Mic detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic detect IRQ: %d\n",
+				 ret);
+		break;
+	}
 
 	/* Remember if AIFnLRCLK is configured as a GPIO.  This should be
 	 * configured on init - if a system wants to do this dynamically
@@ -4034,10 +3105,36 @@
 	wm_hubs_add_analogue_controls(codec);
 	snd_soc_add_controls(codec, wm8994_snd_controls,
 			     ARRAY_SIZE(wm8994_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8994_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
 				  ARRAY_SIZE(wm8994_dapm_widgets));
+
+	switch (control->type) {
+	case WM8994:
+		snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
+					  ARRAY_SIZE(wm8994_specific_dapm_widgets));
+		break;
+	case WM8958:
+		snd_soc_add_controls(codec, wm8958_snd_controls,
+				     ARRAY_SIZE(wm8958_snd_controls));
+		snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
+					  ARRAY_SIZE(wm8958_dapm_widgets));
+		break;
+	}
+		
+
 	wm_hubs_add_analogue_routes(codec, 0, 0);
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	switch (control->type) {
+	case WM8994:
+		snd_soc_dapm_add_routes(dapm, wm8994_intercon,
+					ARRAY_SIZE(wm8994_intercon));
+		break;
+	case WM8958:
+		snd_soc_dapm_add_routes(dapm, wm8958_intercon,
+					ARRAY_SIZE(wm8958_intercon));
+		break;
+	}
 
 	return 0;
 
@@ -4054,13 +3151,29 @@
 static int  wm8994_codec_remove(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = codec->control_data;
 
 	wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994);
+	pm_runtime_disable(codec->dev);
+
+	switch (control->type) {
+	case WM8994:
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
+				wm8994);
+		break;
+
+	case WM8958:
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
+				wm8994);
+		break;
+	}
 	kfree(wm8994->retune_mobile_texts);
 	kfree(wm8994->drc_texts);
 	kfree(wm8994);
@@ -4073,11 +3186,16 @@
 	.remove =	wm8994_codec_remove,
 	.suspend =	wm8994_suspend,
 	.resume =	wm8994_resume,
-	.read = wm8994_read,
-	.write = wm8994_write,
+	.read =		wm8994_read,
+	.write =	wm8994_write,
 	.readable_register = wm8994_readable,
 	.volatile_register = wm8994_volatile,
 	.set_bias_level = wm8994_set_bias_level,
+
+	.reg_cache_size = WM8994_CACHE_SIZE,
+	.reg_cache_default = wm8994_reg_defaults,
+	.reg_word_size = 2,
+	.compress_type = SND_SOC_RBTREE_COMPRESSION,
 };
 
 static int __devinit wm8994_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index d8dce26..0c355bf 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -28,7 +28,21 @@
 #define WM8994_FLL_SRC_LRCLK  3
 #define WM8994_FLL_SRC_BCLK   4
 
+typedef void (*wm8958_micdet_cb)(u16 status, void *data);
+
 int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
 		      int micbias, int det, int shrt);
+int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+		      wm8958_micdet_cb cb, void *cb_data);
+
+#define WM8994_CACHE_SIZE 1570
+
+struct wm8994_access_mask {
+	unsigned short readable;   /* Mask of readable bits */
+	unsigned short writable;   /* Mask of writable bits */
+};
+
+extern const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE];
+extern const __devinitdata  u16 wm8994_reg_defaults[WM8994_CACHE_SIZE];
 
 #endif
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
new file mode 100644
index 0000000..6045cbd
--- /dev/null
+++ b/sound/soc/codecs/wm8995.c
@@ -0,0 +1,1818 @@
+/*
+ * wm8995.c  --  WM8995 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * Based on wm8994.c and wm_hubs.c by Mark Brown
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8995.h"
+
+static const u16 wm8995_reg_defs[WM8995_MAX_REGISTER + 1] = {
+	[0]     = 0x8995, [5]     = 0x0100, [16]    = 0x000b, [17]    = 0x000b,
+	[24]    = 0x02c0, [25]    = 0x02c0, [26]    = 0x02c0, [27]    = 0x02c0,
+	[28]    = 0x000f, [32]    = 0x0005, [33]    = 0x0005, [40]    = 0x0003,
+	[41]    = 0x0013, [48]    = 0x0004, [56]    = 0x09f8, [64]    = 0x1f25,
+	[69]    = 0x0004, [82]    = 0xaaaa, [84]    = 0x2a2a, [146]   = 0x0060,
+	[256]   = 0x0002, [257]   = 0x8004, [520]   = 0x0010, [528]   = 0x0083,
+	[529]   = 0x0083, [548]   = 0x0c80, [580]   = 0x0c80, [768]   = 0x4050,
+	[769]   = 0x4000, [771]   = 0x0040, [772]   = 0x0040, [773]   = 0x0040,
+	[774]   = 0x0004, [775]   = 0x0100, [784]   = 0x4050, [785]   = 0x4000,
+	[787]   = 0x0040, [788]   = 0x0040, [789]   = 0x0040, [1024]  = 0x00c0,
+	[1025]  = 0x00c0, [1026]  = 0x00c0, [1027]  = 0x00c0, [1028]  = 0x00c0,
+	[1029]  = 0x00c0, [1030]  = 0x00c0, [1031]  = 0x00c0, [1056]  = 0x0200,
+	[1057]  = 0x0010, [1058]  = 0x0200, [1059]  = 0x0010, [1088]  = 0x0098,
+	[1089]  = 0x0845, [1104]  = 0x0098, [1105]  = 0x0845, [1152]  = 0x6318,
+	[1153]  = 0x6300, [1154]  = 0x0fca, [1155]  = 0x0400, [1156]  = 0x00d8,
+	[1157]  = 0x1eb5, [1158]  = 0xf145, [1159]  = 0x0b75, [1160]  = 0x01c5,
+	[1161]  = 0x1c58, [1162]  = 0xf373, [1163]  = 0x0a54, [1164]  = 0x0558,
+	[1165]  = 0x168e, [1166]  = 0xf829, [1167]  = 0x07ad, [1168]  = 0x1103,
+	[1169]  = 0x0564, [1170]  = 0x0559, [1171]  = 0x4000, [1184]  = 0x6318,
+	[1185]  = 0x6300, [1186]  = 0x0fca, [1187]  = 0x0400, [1188]  = 0x00d8,
+	[1189]  = 0x1eb5, [1190]  = 0xf145, [1191]  = 0x0b75, [1192]  = 0x01c5,
+	[1193]  = 0x1c58, [1194]  = 0xf373, [1195]  = 0x0a54, [1196]  = 0x0558,
+	[1197]  = 0x168e, [1198]  = 0xf829, [1199]  = 0x07ad, [1200]  = 0x1103,
+	[1201]  = 0x0564, [1202]  = 0x0559, [1203]  = 0x4000, [1280]  = 0x00c0,
+	[1281]  = 0x00c0, [1282]  = 0x00c0, [1283]  = 0x00c0, [1312]  = 0x0200,
+	[1313]  = 0x0010, [1344]  = 0x0098, [1345]  = 0x0845, [1408]  = 0x6318,
+	[1409]  = 0x6300, [1410]  = 0x0fca, [1411]  = 0x0400, [1412]  = 0x00d8,
+	[1413]  = 0x1eb5, [1414]  = 0xf145, [1415]  = 0x0b75, [1416]  = 0x01c5,
+	[1417]  = 0x1c58, [1418]  = 0xf373, [1419]  = 0x0a54, [1420]  = 0x0558,
+	[1421]  = 0x168e, [1422]  = 0xf829, [1423]  = 0x07ad, [1424]  = 0x1103,
+	[1425]  = 0x0564, [1426]  = 0x0559, [1427]  = 0x4000, [1568]  = 0x0002,
+	[1792]  = 0xa100, [1793]  = 0xa101, [1794]  = 0xa101, [1795]  = 0xa101,
+	[1796]  = 0xa101, [1797]  = 0xa101, [1798]  = 0xa101, [1799]  = 0xa101,
+	[1800]  = 0xa101, [1801]  = 0xa101, [1802]  = 0xa101, [1803]  = 0xa101,
+	[1804]  = 0xa101, [1805]  = 0xa101, [1825]  = 0x0055, [1848]  = 0x3fff,
+	[1849]  = 0x1fff, [2049]  = 0x0001, [2050]  = 0x0069, [2056]  = 0x0002,
+	[2057]  = 0x0003, [2058]  = 0x0069, [12288] = 0x0001, [12289] = 0x0001,
+	[12291] = 0x0006, [12292] = 0x0040, [12293] = 0x0001, [12294] = 0x000f,
+	[12295] = 0x0006, [12296] = 0x0001, [12297] = 0x0003, [12298] = 0x0104,
+	[12300] = 0x0060, [12301] = 0x0011, [12302] = 0x0401, [12304] = 0x0050,
+	[12305] = 0x0003, [12306] = 0x0100, [12308] = 0x0051, [12309] = 0x0003,
+	[12310] = 0x0104, [12311] = 0x000a, [12312] = 0x0060, [12313] = 0x003b,
+	[12314] = 0x0502, [12315] = 0x0100, [12316] = 0x2fff, [12320] = 0x2fff,
+	[12324] = 0x2fff, [12328] = 0x2fff, [12332] = 0x2fff, [12336] = 0x2fff,
+	[12340] = 0x2fff, [12344] = 0x2fff, [12348] = 0x2fff, [12352] = 0x0001,
+	[12353] = 0x0001, [12355] = 0x0006, [12356] = 0x0040, [12357] = 0x0001,
+	[12358] = 0x000f, [12359] = 0x0006, [12360] = 0x0001, [12361] = 0x0003,
+	[12362] = 0x0104, [12364] = 0x0060, [12365] = 0x0011, [12366] = 0x0401,
+	[12368] = 0x0050, [12369] = 0x0003, [12370] = 0x0100, [12372] = 0x0060,
+	[12373] = 0x003b, [12374] = 0x0502, [12375] = 0x0100, [12376] = 0x2fff,
+	[12380] = 0x2fff, [12384] = 0x2fff, [12388] = 0x2fff, [12392] = 0x2fff,
+	[12396] = 0x2fff, [12400] = 0x2fff, [12404] = 0x2fff, [12408] = 0x2fff,
+	[12412] = 0x2fff, [12416] = 0x0001, [12417] = 0x0001, [12419] = 0x0006,
+	[12420] = 0x0040, [12421] = 0x0001, [12422] = 0x000f, [12423] = 0x0006,
+	[12424] = 0x0001, [12425] = 0x0003, [12426] = 0x0106, [12428] = 0x0061,
+	[12429] = 0x0011, [12430] = 0x0401, [12432] = 0x0050, [12433] = 0x0003,
+	[12434] = 0x0102, [12436] = 0x0051, [12437] = 0x0003, [12438] = 0x0106,
+	[12439] = 0x000a, [12440] = 0x0061, [12441] = 0x003b, [12442] = 0x0502,
+	[12443] = 0x0100, [12444] = 0x2fff, [12448] = 0x2fff, [12452] = 0x2fff,
+	[12456] = 0x2fff, [12460] = 0x2fff, [12464] = 0x2fff, [12468] = 0x2fff,
+	[12472] = 0x2fff, [12476] = 0x2fff, [12480] = 0x0001, [12481] = 0x0001,
+	[12483] = 0x0006, [12484] = 0x0040, [12485] = 0x0001, [12486] = 0x000f,
+	[12487] = 0x0006, [12488] = 0x0001, [12489] = 0x0003, [12490] = 0x0106,
+	[12492] = 0x0061, [12493] = 0x0011, [12494] = 0x0401, [12496] = 0x0050,
+	[12497] = 0x0003, [12498] = 0x0102, [12500] = 0x0061, [12501] = 0x003b,
+	[12502] = 0x0502, [12503] = 0x0100, [12504] = 0x2fff, [12508] = 0x2fff,
+	[12512] = 0x2fff, [12516] = 0x2fff, [12520] = 0x2fff, [12524] = 0x2fff,
+	[12528] = 0x2fff, [12532] = 0x2fff, [12536] = 0x2fff, [12540] = 0x2fff,
+	[12544] = 0x0060, [12546] = 0x0601, [12548] = 0x0050, [12550] = 0x0100,
+	[12552] = 0x0001, [12554] = 0x0104, [12555] = 0x0100, [12556] = 0x2fff,
+	[12560] = 0x2fff, [12564] = 0x2fff, [12568] = 0x2fff, [12572] = 0x2fff,
+	[12576] = 0x2fff, [12580] = 0x2fff, [12584] = 0x2fff, [12588] = 0x2fff,
+	[12592] = 0x2fff, [12596] = 0x2fff, [12600] = 0x2fff, [12604] = 0x2fff,
+	[12608] = 0x0061, [12610] = 0x0601, [12612] = 0x0050, [12614] = 0x0102,
+	[12616] = 0x0001, [12618] = 0x0106, [12619] = 0x0100, [12620] = 0x2fff,
+	[12624] = 0x2fff, [12628] = 0x2fff, [12632] = 0x2fff, [12636] = 0x2fff,
+	[12640] = 0x2fff, [12644] = 0x2fff, [12648] = 0x2fff, [12652] = 0x2fff,
+	[12656] = 0x2fff, [12660] = 0x2fff, [12664] = 0x2fff, [12668] = 0x2fff,
+	[12672] = 0x0060, [12674] = 0x0601, [12676] = 0x0061, [12678] = 0x0601,
+	[12680] = 0x0050, [12682] = 0x0300, [12684] = 0x0001, [12686] = 0x0304,
+	[12688] = 0x0040, [12690] = 0x000f, [12692] = 0x0001, [12695] = 0x0100
+};
+
+struct fll_config {
+	int src;
+	int in;
+	int out;
+};
+
+struct wm8995_priv {
+	enum snd_soc_control_type control_type;
+	int sysclk[2];
+	int mclk[2];
+	int aifclk[2];
+	struct fll_config fll[2], fll_suspend[2];
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
+static const DECLARE_TLV_DB_SCALE(in1lr_pga_tlv, -1650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(in1l_boost_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0);
+
+static const char *in1l_text[] = {
+	"Differential", "Single-ended IN1LN", "Single-ended IN1LP"
+};
+
+static const SOC_ENUM_SINGLE_DECL(in1l_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
+				  2, in1l_text);
+
+static const char *in1r_text[] = {
+	"Differential", "Single-ended IN1RN", "Single-ended IN1RP"
+};
+
+static const SOC_ENUM_SINGLE_DECL(in1r_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
+				  0, in1r_text);
+
+static const char *dmic_src_text[] = {
+	"DMICDAT1", "DMICDAT2", "DMICDAT3"
+};
+
+static const SOC_ENUM_SINGLE_DECL(dmic_src1_enum, WM8995_POWER_MANAGEMENT_5,
+				  8, dmic_src_text);
+static const SOC_ENUM_SINGLE_DECL(dmic_src2_enum, WM8995_POWER_MANAGEMENT_5,
+				  6, dmic_src_text);
+
+static const struct snd_kcontrol_new wm8995_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("DAC1 Volume", WM8995_DAC1_LEFT_VOLUME,
+		WM8995_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R("DAC1 Switch", WM8995_DAC1_LEFT_VOLUME,
+		WM8995_DAC1_RIGHT_VOLUME, 9, 1, 1),
+
+	SOC_DOUBLE_R_TLV("DAC2 Volume", WM8995_DAC2_LEFT_VOLUME,
+		WM8995_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R("DAC2 Switch", WM8995_DAC2_LEFT_VOLUME,
+		WM8995_DAC2_RIGHT_VOLUME, 9, 1, 1),
+
+	SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8995_AIF1_DAC1_LEFT_VOLUME,
+		WM8995_AIF1_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8995_AIF1_DAC2_LEFT_VOLUME,
+		WM8995_AIF1_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8995_AIF2_DAC_LEFT_VOLUME,
+		WM8995_AIF2_DAC_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+
+	SOC_DOUBLE_R_TLV("IN1LR Volume", WM8995_LEFT_LINE_INPUT_1_VOLUME,
+		WM8995_RIGHT_LINE_INPUT_1_VOLUME, 0, 31, 0, in1lr_pga_tlv),
+
+	SOC_SINGLE_TLV("IN1L Boost", WM8995_LEFT_LINE_INPUT_CONTROL,
+		4, 3, 0, in1l_boost_tlv),
+
+	SOC_ENUM("IN1L Mode", in1l_enum),
+	SOC_ENUM("IN1R Mode", in1r_enum),
+
+	SOC_ENUM("DMIC1 SRC", dmic_src1_enum),
+	SOC_ENUM("DMIC2 SRC", dmic_src2_enum),
+
+	SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8995_DAC1_MIXER_VOLUMES, 0, 5,
+		24, 0, sidetone_tlv),
+	SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8995_DAC2_MIXER_VOLUMES, 0, 5,
+		24, 0, sidetone_tlv),
+
+	SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8995_AIF1_ADC1_LEFT_VOLUME,
+		WM8995_AIF1_ADC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8995_AIF1_ADC2_LEFT_VOLUME,
+		WM8995_AIF1_ADC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8995_AIF2_ADC_LEFT_VOLUME,
+		WM8995_AIF2_ADC_RIGHT_VOLUME, 0, 96, 0, digital_tlv)
+};
+
+static void wm8995_update_class_w(struct snd_soc_codec *codec)
+{
+	int enable = 1;
+	int source = 0;  /* GCC flow analysis can't track enable */
+	int reg, reg_r;
+
+	/* We also need the same setting for L/R and only one path */
+	reg = snd_soc_read(codec, WM8995_DAC1_LEFT_MIXER_ROUTING);
+	switch (reg) {
+	case WM8995_AIF2DACL_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF2DAC\n");
+		source = 2 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	case WM8995_AIF1DAC2L_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF1DAC2\n");
+		source = 1 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	case WM8995_AIF1DAC1L_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF1DAC1\n");
+		source = 0 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	default:
+		dev_dbg(codec->dev, "DAC mixer setting: %x\n", reg);
+		enable = 0;
+		break;
+	}
+
+	reg_r = snd_soc_read(codec, WM8995_DAC1_RIGHT_MIXER_ROUTING);
+	if (reg_r != reg) {
+		dev_dbg(codec->dev, "Left and right DAC mixers different\n");
+		enable = 0;
+	}
+
+	if (enable) {
+		dev_dbg(codec->dev, "Class W enabled\n");
+		snd_soc_update_bits(codec, WM8995_CLASS_W_1,
+				    WM8995_CP_DYN_PWR_MASK |
+				    WM8995_CP_DYN_SRC_SEL_MASK,
+				    source | WM8995_CP_DYN_PWR);
+	} else {
+		dev_dbg(codec->dev, "Class W disabled\n");
+		snd_soc_update_bits(codec, WM8995_CLASS_W_1,
+				    WM8995_CP_DYN_PWR_MASK, 0);
+	}
+}
+
+static int check_clk_sys(struct snd_soc_dapm_widget *source,
+			 struct snd_soc_dapm_widget *sink)
+{
+	unsigned int reg;
+	const char *clk;
+
+	reg = snd_soc_read(source->codec, WM8995_CLOCKING_1);
+	/* Check what we're currently using for CLK_SYS */
+	if (reg & WM8995_SYSCLK_SRC)
+		clk = "AIF2CLK";
+	else
+		clk = "AIF1CLK";
+	return !strcmp(source->name, clk);
+}
+
+static int wm8995_put_class_w(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_codec *codec;
+	int ret;
+
+	w = snd_kcontrol_chip(kcontrol);
+	codec = w->codec;
+	ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
+	wm8995_update_class_w(codec);
+	return ret;
+}
+
+static int hp_supply_event(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+
+	codec = w->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Enable the headphone amp */
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    WM8995_HPOUT1L_ENA |
+				    WM8995_HPOUT1R_ENA);
+
+		/* Enable the second stage */
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_DLY_MASK |
+				    WM8995_HPOUT1R_DLY_MASK,
+				    WM8995_HPOUT1L_DLY |
+				    WM8995_HPOUT1R_DLY);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
+				    WM8995_CP_ENA_MASK, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static void dc_servo_cmd(struct snd_soc_codec *codec,
+			 unsigned int reg, unsigned int val, unsigned int mask)
+{
+	int timeout = 10;
+
+	dev_dbg(codec->dev, "%s: reg = %#x, val = %#x, mask = %#x\n",
+		__func__, reg, val, mask);
+
+	snd_soc_write(codec, reg, val);
+	while (timeout--) {
+		msleep(10);
+		val = snd_soc_read(codec, WM8995_DC_SERVO_READBACK_0);
+		if ((val & mask) == mask)
+			return;
+	}
+
+	dev_err(codec->dev, "Timed out waiting for DC Servo\n");
+}
+
+static int hp_event(struct snd_soc_dapm_widget *w,
+		    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+	unsigned int reg;
+
+	codec = w->codec;
+	reg = snd_soc_read(codec, WM8995_ANALOGUE_HP_1);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
+				    WM8995_CP_ENA_MASK, WM8995_CP_ENA);
+
+		msleep(5);
+
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    WM8995_HPOUT1L_ENA | WM8995_HPOUT1R_ENA);
+
+		udelay(20);
+
+		reg |= WM8995_HPOUT1L_DLY | WM8995_HPOUT1R_DLY;
+		snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
+
+		snd_soc_write(codec, WM8995_DC_SERVO_1, WM8995_DCS_ENA_CHAN_0 |
+			      WM8995_DCS_ENA_CHAN_1);
+
+		dc_servo_cmd(codec, WM8995_DC_SERVO_2,
+			     WM8995_DCS_TRIG_STARTUP_0 |
+			     WM8995_DCS_TRIG_STARTUP_1,
+			     WM8995_DCS_TRIG_DAC_WR_0 |
+			     WM8995_DCS_TRIG_DAC_WR_1);
+
+		reg |= WM8995_HPOUT1R_OUTP | WM8995_HPOUT1R_RMV_SHORT |
+		       WM8995_HPOUT1L_OUTP | WM8995_HPOUT1L_RMV_SHORT;
+		snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
+
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_OUTP_MASK |
+				    WM8995_HPOUT1R_OUTP_MASK |
+				    WM8995_HPOUT1L_RMV_SHORT_MASK |
+				    WM8995_HPOUT1R_RMV_SHORT_MASK, 0);
+
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_DLY_MASK |
+				    WM8995_HPOUT1R_DLY_MASK, 0);
+
+		snd_soc_write(codec, WM8995_DC_SERVO_1, 0);
+
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    0);
+		break;
+	}
+
+	return 0;
+}
+
+static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
+{
+	struct wm8995_priv *wm8995;
+	int rate;
+	int reg1 = 0;
+	int offset;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	if (aif)
+		offset = 4;
+	else
+		offset = 0;
+
+	switch (wm8995->sysclk[aif]) {
+	case WM8995_SYSCLK_MCLK1:
+		rate = wm8995->mclk[0];
+		break;
+	case WM8995_SYSCLK_MCLK2:
+		reg1 |= 0x8;
+		rate = wm8995->mclk[1];
+		break;
+	case WM8995_SYSCLK_FLL1:
+		reg1 |= 0x10;
+		rate = wm8995->fll[0].out;
+		break;
+	case WM8995_SYSCLK_FLL2:
+		reg1 |= 0x18;
+		rate = wm8995->fll[1].out;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rate >= 13500000) {
+		rate /= 2;
+		reg1 |= WM8995_AIF1CLK_DIV;
+
+		dev_dbg(codec->dev, "Dividing AIF%d clock to %dHz\n",
+			aif + 1, rate);
+	}
+
+	wm8995->aifclk[aif] = rate;
+
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1 + offset,
+			    WM8995_AIF1CLK_SRC_MASK | WM8995_AIF1CLK_DIV_MASK,
+			    reg1);
+	return 0;
+}
+
+static int configure_clock(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	int old, new;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	/* Bring up the AIF clocks first */
+	configure_aif_clock(codec, 0);
+	configure_aif_clock(codec, 1);
+
+	/*
+	 * Then switch CLK_SYS over to the higher of them; a change
+	 * can only happen as a result of a clocking change which can
+	 * only be made outside of DAPM so we can safely redo the
+	 * clocking.
+	 */
+
+	/* If they're equal it doesn't matter which is used */
+	if (wm8995->aifclk[0] == wm8995->aifclk[1])
+		return 0;
+
+	if (wm8995->aifclk[0] < wm8995->aifclk[1])
+		new = WM8995_SYSCLK_SRC;
+	else
+		new = 0;
+
+	old = snd_soc_read(codec, WM8995_CLOCKING_1) & WM8995_SYSCLK_SRC;
+
+	/* If there's no change then we're done. */
+	if (old == new)
+		return 0;
+
+	snd_soc_update_bits(codec, WM8995_CLOCKING_1,
+			    WM8995_SYSCLK_SRC_MASK, new);
+
+	snd_soc_dapm_sync(&codec->dapm);
+
+	return 0;
+}
+
+static int clk_sys_event(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		return configure_clock(codec);
+
+	case SND_SOC_DAPM_POST_PMD:
+		configure_clock(codec);
+		break;
+	}
+
+	return 0;
+}
+
+static const char *sidetone_text[] = {
+	"ADC/DMIC1", "DMIC2",
+};
+
+static const struct soc_enum sidetone1_enum =
+	SOC_ENUM_SINGLE(WM8995_SIDETONE, 0, 2, sidetone_text);
+
+static const struct snd_kcontrol_new sidetone1_mux =
+	SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum);
+
+static const struct soc_enum sidetone2_enum =
+	SOC_ENUM_SINGLE(WM8995_SIDETONE, 1, 2, sidetone_text);
+
+static const struct snd_kcontrol_new sidetone2_mux =
+	SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum);
+
+static const struct snd_kcontrol_new aif1adc1l_mix[] = {
+	SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc1r_mix[] = {
+	SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc2l_mix[] = {
+	SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc2r_mix[] = {
+	SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new dac1l_mix[] = {
+	WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		5, 1, 0),
+	WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		4, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		2, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new dac1r_mix[] = {
+	WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		5, 1, 0),
+	WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		4, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		2, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif2dac2l_mix[] = {
+	SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		5, 1, 0),
+	SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		4, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		2, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif2dac2r_mix[] = {
+	SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		5, 1, 0),
+	SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		4, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		2, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new in1l_pga =
+	SOC_DAPM_SINGLE("IN1L Switch", WM8995_POWER_MANAGEMENT_2, 5, 1, 0);
+
+static const struct snd_kcontrol_new in1r_pga =
+	SOC_DAPM_SINGLE("IN1R Switch", WM8995_POWER_MANAGEMENT_2, 4, 1, 0);
+
+static const char *adc_mux_text[] = {
+	"ADC",
+	"DMIC",
+};
+
+static const struct soc_enum adc_enum =
+	SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text);
+
+static const struct snd_kcontrol_new adcl_mux =
+	SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum);
+
+static const struct snd_kcontrol_new adcr_mux =
+	SOC_DAPM_ENUM_VIRT("ADCR Mux", adc_enum);
+
+static const char *spk_src_text[] = {
+	"DAC1L", "DAC1R", "DAC2L", "DAC2R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(spk1l_src_enum, WM8995_LEFT_PDM_SPEAKER_1,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk1r_src_enum, WM8995_RIGHT_PDM_SPEAKER_1,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk2l_src_enum, WM8995_LEFT_PDM_SPEAKER_2,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk2r_src_enum, WM8995_RIGHT_PDM_SPEAKER_2,
+				  0, spk_src_text);
+
+static const struct snd_kcontrol_new spk1l_mux =
+	SOC_DAPM_ENUM("SPK1L SRC", spk1l_src_enum);
+static const struct snd_kcontrol_new spk1r_mux =
+	SOC_DAPM_ENUM("SPK1R SRC", spk1r_src_enum);
+static const struct snd_kcontrol_new spk2l_mux =
+	SOC_DAPM_ENUM("SPK2L SRC", spk2l_src_enum);
+static const struct snd_kcontrol_new spk2r_mux =
+	SOC_DAPM_ENUM("SPK2R SRC", spk2r_src_enum);
+
+static const struct snd_soc_dapm_widget wm8995_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("DMIC1DAT"),
+	SND_SOC_DAPM_INPUT("DMIC2DAT"),
+
+	SND_SOC_DAPM_INPUT("IN1L"),
+	SND_SOC_DAPM_INPUT("IN1R"),
+
+	SND_SOC_DAPM_MIXER("IN1L PGA", SND_SOC_NOPM, 0, 0,
+		&in1l_pga, 1),
+	SND_SOC_DAPM_MIXER("IN1R PGA", SND_SOC_NOPM, 0, 0,
+		&in1r_pga, 1),
+
+	SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8995_POWER_MANAGEMENT_1, 8, 0),
+	SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8995_POWER_MANAGEMENT_1, 9, 0),
+
+	SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8995_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8995_AIF2_CLOCKING_1, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8995_CLOCKING_1, 3, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8995_CLOCKING_1, 2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("SYSDSPCLK", WM8995_CLOCKING_1, 1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", 0,
+		WM8995_POWER_MANAGEMENT_3, 9, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", 0,
+		WM8995_POWER_MANAGEMENT_3, 8, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0,
+	SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
+		0, WM8995_POWER_MANAGEMENT_3, 11, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
+		0, WM8995_POWER_MANAGEMENT_3, 10, 0),
+
+	SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0,
+		&adcl_mux),
+	SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0,
+		&adcr_mux),
+
+	SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8995_POWER_MANAGEMENT_3, 5, 0),
+	SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8995_POWER_MANAGEMENT_3, 4, 0),
+	SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8995_POWER_MANAGEMENT_3, 3, 0),
+	SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8995_POWER_MANAGEMENT_3, 2, 0),
+
+	SND_SOC_DAPM_ADC("ADCL", NULL, WM8995_POWER_MANAGEMENT_3, 1, 0),
+	SND_SOC_DAPM_ADC("ADCR", NULL, WM8995_POWER_MANAGEMENT_3, 0, 0),
+
+	SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC1R Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc1r_mix, ARRAY_SIZE(aif1adc1r_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC2L Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc2l_mix, ARRAY_SIZE(aif1adc2l_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC2R Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc2r_mix, ARRAY_SIZE(aif1adc2r_mix)),
+
+	SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		9, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		8, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM,
+		0, 0),
+
+	SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		11, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		10, 0),
+
+	SND_SOC_DAPM_MIXER("AIF2DAC2L Mixer", SND_SOC_NOPM, 0, 0,
+		aif2dac2l_mix, ARRAY_SIZE(aif2dac2l_mix)),
+	SND_SOC_DAPM_MIXER("AIF2DAC2R Mixer", SND_SOC_NOPM, 0, 0,
+		aif2dac2r_mix, ARRAY_SIZE(aif2dac2r_mix)),
+
+	SND_SOC_DAPM_DAC("DAC2L", NULL, WM8995_POWER_MANAGEMENT_4, 3, 0),
+	SND_SOC_DAPM_DAC("DAC2R", NULL, WM8995_POWER_MANAGEMENT_4, 2, 0),
+	SND_SOC_DAPM_DAC("DAC1L", NULL, WM8995_POWER_MANAGEMENT_4, 1, 0),
+	SND_SOC_DAPM_DAC("DAC1R", NULL, WM8995_POWER_MANAGEMENT_4, 0, 0),
+
+	SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, dac1l_mix,
+		ARRAY_SIZE(dac1l_mix)),
+	SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, dac1r_mix,
+		ARRAY_SIZE(dac1r_mix)),
+
+	SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &sidetone1_mux),
+	SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &sidetone2_mux),
+
+	SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SUPPLY("Headphone Supply", SND_SOC_NOPM, 0, 0,
+		hp_supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_MUX("SPK1L Driver", WM8995_LEFT_PDM_SPEAKER_1,
+		4, 0, &spk1l_mux),
+	SND_SOC_DAPM_MUX("SPK1R Driver", WM8995_RIGHT_PDM_SPEAKER_1,
+		4, 0, &spk1r_mux),
+	SND_SOC_DAPM_MUX("SPK2L Driver", WM8995_LEFT_PDM_SPEAKER_2,
+		4, 0, &spk2l_mux),
+	SND_SOC_DAPM_MUX("SPK2R Driver", WM8995_RIGHT_PDM_SPEAKER_2,
+		4, 0, &spk2r_mux),
+
+	SND_SOC_DAPM_SUPPLY("LDO2", WM8995_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
+
+	SND_SOC_DAPM_OUTPUT("HP1L"),
+	SND_SOC_DAPM_OUTPUT("HP1R"),
+	SND_SOC_DAPM_OUTPUT("SPK1L"),
+	SND_SOC_DAPM_OUTPUT("SPK1R"),
+	SND_SOC_DAPM_OUTPUT("SPK2L"),
+	SND_SOC_DAPM_OUTPUT("SPK2R")
+};
+
+static const struct snd_soc_dapm_route wm8995_intercon[] = {
+	{ "CLK_SYS", NULL, "AIF1CLK", check_clk_sys },
+	{ "CLK_SYS", NULL, "AIF2CLK", check_clk_sys },
+
+	{ "DSP1CLK", NULL, "CLK_SYS" },
+	{ "DSP2CLK", NULL, "CLK_SYS" },
+	{ "SYSDSPCLK", NULL, "CLK_SYS" },
+
+	{ "AIF1ADC1L", NULL, "AIF1CLK" },
+	{ "AIF1ADC1L", NULL, "DSP1CLK" },
+	{ "AIF1ADC1R", NULL, "AIF1CLK" },
+	{ "AIF1ADC1R", NULL, "DSP1CLK" },
+	{ "AIF1ADC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1ADC2L", NULL, "AIF1CLK" },
+	{ "AIF1ADC2L", NULL, "DSP1CLK" },
+	{ "AIF1ADC2R", NULL, "AIF1CLK" },
+	{ "AIF1ADC2R", NULL, "DSP1CLK" },
+	{ "AIF1ADC2R", NULL, "SYSDSPCLK" },
+
+	{ "DMIC1L", NULL, "DMIC1DAT" },
+	{ "DMIC1L", NULL, "CLK_SYS" },
+	{ "DMIC1R", NULL, "DMIC1DAT" },
+	{ "DMIC1R", NULL, "CLK_SYS" },
+	{ "DMIC2L", NULL, "DMIC2DAT" },
+	{ "DMIC2L", NULL, "CLK_SYS" },
+	{ "DMIC2R", NULL, "DMIC2DAT" },
+	{ "DMIC2R", NULL, "CLK_SYS" },
+
+	{ "ADCL", NULL, "AIF1CLK" },
+	{ "ADCL", NULL, "DSP1CLK" },
+	{ "ADCL", NULL, "SYSDSPCLK" },
+
+	{ "ADCR", NULL, "AIF1CLK" },
+	{ "ADCR", NULL, "DSP1CLK" },
+	{ "ADCR", NULL, "SYSDSPCLK" },
+
+	{ "IN1L PGA", "IN1L Switch", "IN1L" },
+	{ "IN1R PGA", "IN1R Switch", "IN1R" },
+	{ "IN1L PGA", NULL, "LDO2" },
+	{ "IN1R PGA", NULL, "LDO2" },
+
+	{ "ADCL", NULL, "IN1L PGA" },
+	{ "ADCR", NULL, "IN1R PGA" },
+
+	{ "ADCL Mux", "ADC", "ADCL" },
+	{ "ADCL Mux", "DMIC", "DMIC1L" },
+	{ "ADCR Mux", "ADC", "ADCR" },
+	{ "ADCR Mux", "DMIC", "DMIC1R" },
+
+	/* AIF1 outputs */
+	{ "AIF1ADC1L", NULL, "AIF1ADC1L Mixer" },
+	{ "AIF1ADC1L Mixer", "ADC/DMIC Switch", "ADCL Mux" },
+
+	{ "AIF1ADC1R", NULL, "AIF1ADC1R Mixer" },
+	{ "AIF1ADC1R Mixer", "ADC/DMIC Switch", "ADCR Mux" },
+
+	{ "AIF1ADC2L", NULL, "AIF1ADC2L Mixer" },
+	{ "AIF1ADC2L Mixer", "DMIC Switch", "DMIC2L" },
+
+	{ "AIF1ADC2R", NULL, "AIF1ADC2R Mixer" },
+	{ "AIF1ADC2R Mixer", "DMIC Switch", "DMIC2R" },
+
+	/* Sidetone */
+	{ "Left Sidetone", "ADC/DMIC1", "AIF1ADC1L" },
+	{ "Left Sidetone", "DMIC2", "AIF1ADC2L" },
+	{ "Right Sidetone", "ADC/DMIC1", "AIF1ADC1R" },
+	{ "Right Sidetone", "DMIC2", "AIF1ADC2R" },
+
+	{ "AIF1DAC1L", NULL, "AIF1CLK" },
+	{ "AIF1DAC1L", NULL, "DSP1CLK" },
+	{ "AIF1DAC1R", NULL, "AIF1CLK" },
+	{ "AIF1DAC1R", NULL, "DSP1CLK" },
+	{ "AIF1DAC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1DAC2L", NULL, "AIF1CLK" },
+	{ "AIF1DAC2L", NULL, "DSP1CLK" },
+	{ "AIF1DAC2R", NULL, "AIF1CLK" },
+	{ "AIF1DAC2R", NULL, "DSP1CLK" },
+	{ "AIF1DAC2R", NULL, "SYSDSPCLK" },
+
+	{ "DAC1L", NULL, "AIF1CLK" },
+	{ "DAC1L", NULL, "DSP1CLK" },
+	{ "DAC1L", NULL, "SYSDSPCLK" },
+
+	{ "DAC1R", NULL, "AIF1CLK" },
+	{ "DAC1R", NULL, "DSP1CLK" },
+	{ "DAC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1DAC1L", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC1R", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC2L", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC2R", NULL, "AIF1DACDAT" },
+
+	/* DAC1 inputs */
+	{ "DAC1L", NULL, "DAC1L Mixer" },
+	{ "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
+	{ "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
+	{ "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
+	{ "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
+
+	{ "DAC1R", NULL, "DAC1R Mixer" },
+	{ "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
+	{ "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
+	{ "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" },
+	{ "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" },
+
+	/* DAC2/AIF2 outputs */
+	{ "DAC2L", NULL, "AIF2DAC2L Mixer" },
+	{ "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
+	{ "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
+
+	{ "DAC2R", NULL, "AIF2DAC2R Mixer" },
+	{ "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
+	{ "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
+
+	/* Output stages */
+	{ "Headphone PGA", NULL, "DAC1L" },
+	{ "Headphone PGA", NULL, "DAC1R" },
+
+	{ "Headphone PGA", NULL, "DAC2L" },
+	{ "Headphone PGA", NULL, "DAC2R" },
+
+	{ "Headphone PGA", NULL, "Headphone Supply" },
+	{ "Headphone PGA", NULL, "CLK_SYS" },
+	{ "Headphone PGA", NULL, "LDO2" },
+
+	{ "HP1L", NULL, "Headphone PGA" },
+	{ "HP1R", NULL, "Headphone PGA" },
+
+	{ "SPK1L Driver", "DAC1L", "DAC1L" },
+	{ "SPK1L Driver", "DAC1R", "DAC1R" },
+	{ "SPK1L Driver", "DAC2L", "DAC2L" },
+	{ "SPK1L Driver", "DAC2R", "DAC2R" },
+	{ "SPK1L Driver", NULL, "CLK_SYS" },
+
+	{ "SPK1R Driver", "DAC1L", "DAC1L" },
+	{ "SPK1R Driver", "DAC1R", "DAC1R" },
+	{ "SPK1R Driver", "DAC2L", "DAC2L" },
+	{ "SPK1R Driver", "DAC2R", "DAC2R" },
+	{ "SPK1R Driver", NULL, "CLK_SYS" },
+
+	{ "SPK2L Driver", "DAC1L", "DAC1L" },
+	{ "SPK2L Driver", "DAC1R", "DAC1R" },
+	{ "SPK2L Driver", "DAC2L", "DAC2L" },
+	{ "SPK2L Driver", "DAC2R", "DAC2R" },
+	{ "SPK2L Driver", NULL, "CLK_SYS" },
+
+	{ "SPK2R Driver", "DAC1L", "DAC1L" },
+	{ "SPK2R Driver", "DAC1R", "DAC1R" },
+	{ "SPK2R Driver", "DAC2L", "DAC2L" },
+	{ "SPK2R Driver", "DAC2R", "DAC2R" },
+	{ "SPK2R Driver", NULL, "CLK_SYS" },
+
+	{ "SPK1L", NULL, "SPK1L Driver" },
+	{ "SPK1R", NULL, "SPK1R Driver" },
+	{ "SPK2L", NULL, "SPK2L Driver" },
+	{ "SPK2R", NULL, "SPK2R Driver" }
+};
+
+static int wm8995_volatile(unsigned int reg)
+{
+	/* out of bounds registers are generally considered
+	 * volatile to support register banks that are partially
+	 * owned by something else for e.g. a DSP
+	 */
+	if (reg > WM8995_MAX_CACHED_REGISTER)
+		return 1;
+
+	switch (reg) {
+	case WM8995_SOFTWARE_RESET:
+	case WM8995_DC_SERVO_READBACK_0:
+	case WM8995_INTERRUPT_STATUS_1:
+	case WM8995_INTERRUPT_STATUS_2:
+	case WM8995_INTERRUPT_STATUS_1_MASK:
+	case WM8995_INTERRUPT_STATUS_2_MASK:
+	case WM8995_INTERRUPT_CONTROL:
+	case WM8995_ACCESSORY_DETECT_MODE1:
+	case WM8995_ACCESSORY_DETECT_MODE2:
+	case WM8995_HEADPHONE_DETECT1:
+	case WM8995_HEADPHONE_DETECT2:
+		return 1;
+	}
+
+	return 0;
+}
+
+static int wm8995_aif_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int mute_reg;
+
+	switch (dai->id) {
+	case 0:
+		mute_reg = WM8995_AIF1_DAC1_FILTERS_1;
+		break;
+	case 1:
+		mute_reg = WM8995_AIF2_DAC_FILTERS_1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, mute_reg, WM8995_AIF1DAC1_MUTE_MASK,
+			    !!mute << WM8995_AIF1DAC1_MUTE_SHIFT);
+	return 0;
+}
+
+static int wm8995_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec;
+	int master;
+	int aif;
+
+	codec = dai->codec;
+
+	master = 0;
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		master = WM8995_AIF1_MSTR;
+		break;
+	default:
+		dev_err(dai->dev, "Unknown master/slave configuration\n");
+		return -EINVAL;
+	}
+
+	aif = 0;
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_B:
+		aif |= WM8995_AIF1_LRCLK_INV;
+	case SND_SOC_DAIFMT_DSP_A:
+		aif |= (0x3 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		aif |= (0x2 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		aif |= (0x1 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	default:
+		dev_err(dai->dev, "Unknown dai format\n");
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_A:
+	case SND_SOC_DAIFMT_DSP_B:
+		/* frame inversion not valid for DSP modes */
+		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+		case SND_SOC_DAIFMT_NB_NF:
+			break;
+		case SND_SOC_DAIFMT_IB_NF:
+			aif |= WM8995_AIF1_BCLK_INV;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_RIGHT_J:
+	case SND_SOC_DAIFMT_LEFT_J:
+		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+		case SND_SOC_DAIFMT_NB_NF:
+			break;
+		case SND_SOC_DAIFMT_IB_IF:
+			aif |= WM8995_AIF1_BCLK_INV | WM8995_AIF1_LRCLK_INV;
+			break;
+		case SND_SOC_DAIFMT_IB_NF:
+			aif |= WM8995_AIF1_BCLK_INV;
+			break;
+		case SND_SOC_DAIFMT_NB_IF:
+			aif |= WM8995_AIF1_LRCLK_INV;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8995_AIF1_CONTROL_1,
+			    WM8995_AIF1_BCLK_INV_MASK |
+			    WM8995_AIF1_LRCLK_INV_MASK |
+			    WM8995_AIF1_FMT_MASK, aif);
+	snd_soc_update_bits(codec, WM8995_AIF1_MASTER_SLAVE,
+			    WM8995_AIF1_MSTR_MASK, master);
+	return 0;
+}
+
+static const int srs[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100,
+	48000, 88200, 96000
+};
+
+static const int fs_ratios[] = {
+	-1 /* reserved */,
+	128, 192, 256, 384, 512, 768, 1024, 1408, 1536
+};
+
+static const int bclk_divs[] = {
+	10, 15, 20, 30, 40, 55, 60, 80, 110, 120, 160, 220, 240, 320, 440, 480
+};
+
+static int wm8995_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+	int aif1_reg;
+	int bclk_reg;
+	int lrclk_reg;
+	int rate_reg;
+	int bclk_rate;
+	int aif1;
+	int lrclk, bclk;
+	int i, rate_val, best, best_val, cur_val;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (dai->id) {
+	case 0:
+		aif1_reg = WM8995_AIF1_CONTROL_1;
+		bclk_reg = WM8995_AIF1_BCLK;
+		rate_reg = WM8995_AIF1_RATE;
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
+			wm8995->lrclk_shared[0] */) {
+			lrclk_reg = WM8995_AIF1DAC_LRCLK;
+		} else {
+			lrclk_reg = WM8995_AIF1ADC_LRCLK;
+			dev_dbg(codec->dev, "AIF1 using split LRCLK\n");
+		}
+		break;
+	case 1:
+		aif1_reg = WM8995_AIF2_CONTROL_1;
+		bclk_reg = WM8995_AIF2_BCLK;
+		rate_reg = WM8995_AIF2_RATE;
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
+		    wm8995->lrclk_shared[1] */) {
+			lrclk_reg = WM8995_AIF2DAC_LRCLK;
+		} else {
+			lrclk_reg = WM8995_AIF2ADC_LRCLK;
+			dev_dbg(codec->dev, "AIF2 using split LRCLK\n");
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	bclk_rate = snd_soc_params_to_bclk(params);
+	if (bclk_rate < 0)
+		return bclk_rate;
+
+	aif1 = 0;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		aif1 |= (0x1 << WM8995_AIF1_WL_SHIFT);
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		aif1 |= (0x2 << WM8995_AIF1_WL_SHIFT);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aif1 |= (0x3 << WM8995_AIF1_WL_SHIFT);
+		break;
+	default:
+		dev_err(dai->dev, "Unsupported word length %u\n",
+			params_format(params));
+		return -EINVAL;
+	}
+
+	/* try to find a suitable sample rate */
+	for (i = 0; i < ARRAY_SIZE(srs); ++i)
+		if (srs[i] == params_rate(params))
+			break;
+	if (i == ARRAY_SIZE(srs)) {
+		dev_err(dai->dev, "Sample rate %d is not supported\n",
+			params_rate(params));
+		return -EINVAL;
+	}
+	rate_val = i << WM8995_AIF1_SR_SHIFT;
+
+	dev_dbg(dai->dev, "Sample rate is %dHz\n", srs[i]);
+	dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n",
+		dai->id + 1, wm8995->aifclk[dai->id], bclk_rate);
+
+	/* AIFCLK/fs ratio; look for a close match in either direction */
+	best = 1;
+	best_val = abs((fs_ratios[1] * params_rate(params))
+		       - wm8995->aifclk[dai->id]);
+	for (i = 2; i < ARRAY_SIZE(fs_ratios); i++) {
+		cur_val = abs((fs_ratios[i] * params_rate(params))
+			      - wm8995->aifclk[dai->id]);
+		if (cur_val >= best_val)
+			continue;
+		best = i;
+		best_val = cur_val;
+	}
+	rate_val |= best;
+
+	dev_dbg(dai->dev, "Selected AIF%dCLK/fs = %d\n",
+		dai->id + 1, fs_ratios[best]);
+
+	/*
+	 * We may not get quite the right frequency if using
+	 * approximate clocks so look for the closest match that is
+	 * higher than the target (we need to ensure that there enough
+	 * BCLKs to clock out the samples).
+	 */
+	best = 0;
+	bclk = 0;
+	for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
+		cur_val = (wm8995->aifclk[dai->id] * 10 / bclk_divs[i]) - bclk_rate;
+		if (cur_val < 0) /* BCLK table is sorted */
+			break;
+		best = i;
+	}
+	bclk |= best << WM8995_AIF1_BCLK_DIV_SHIFT;
+
+	bclk_rate = wm8995->aifclk[dai->id] * 10 / bclk_divs[best];
+	dev_dbg(dai->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n",
+		bclk_divs[best], bclk_rate);
+
+	lrclk = bclk_rate / params_rate(params);
+	dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
+		lrclk, bclk_rate / lrclk);
+
+	snd_soc_update_bits(codec, aif1_reg,
+			    WM8995_AIF1_WL_MASK, aif1);
+	snd_soc_update_bits(codec, bclk_reg,
+			    WM8995_AIF1_BCLK_DIV_MASK, bclk);
+	snd_soc_update_bits(codec, lrclk_reg,
+			    WM8995_AIF1DAC_RATE_MASK, lrclk);
+	snd_soc_update_bits(codec, rate_reg,
+			    WM8995_AIF1_SR_MASK |
+			    WM8995_AIF1CLK_RATE_MASK, rate_val);
+	return 0;
+}
+
+static int wm8995_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int reg, val, mask;
+
+	switch (codec_dai->id) {
+	case 0:
+		reg = WM8995_AIF1_MASTER_SLAVE;
+		mask = WM8995_AIF1_TRI;
+		break;
+	case 1:
+		reg = WM8995_AIF2_MASTER_SLAVE;
+		mask = WM8995_AIF2_TRI;
+		break;
+	case 2:
+		reg = WM8995_POWER_MANAGEMENT_5;
+		mask = WM8995_AIF3_TRI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (tristate)
+		val = mask;
+	else
+		val = 0;
+
+	return snd_soc_update_bits(codec, reg, mask, reg);
+}
+
+/* The size in bits of the FLL divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_FLL_SIZE ((1 << 16) * 10)
+
+struct fll_div {
+	u16 outdiv;
+	u16 n;
+	u16 k;
+	u16 clk_ref_div;
+	u16 fll_fratio;
+};
+
+static int wm8995_get_fll_config(struct fll_div *fll,
+				 int freq_in, int freq_out)
+{
+	u64 Kpart;
+	unsigned int K, Ndiv, Nmod;
+
+	pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out);
+
+	/* Scale the input frequency down to <= 13.5MHz */
+	fll->clk_ref_div = 0;
+	while (freq_in > 13500000) {
+		fll->clk_ref_div++;
+		freq_in /= 2;
+
+		if (fll->clk_ref_div > 3)
+			return -EINVAL;
+	}
+	pr_debug("CLK_REF_DIV=%d, Fref=%dHz\n", fll->clk_ref_div, freq_in);
+
+	/* Scale the output to give 90MHz<=Fvco<=100MHz */
+	fll->outdiv = 3;
+	while (freq_out * (fll->outdiv + 1) < 90000000) {
+		fll->outdiv++;
+		if (fll->outdiv > 63)
+			return -EINVAL;
+	}
+	freq_out *= fll->outdiv + 1;
+	pr_debug("OUTDIV=%d, Fvco=%dHz\n", fll->outdiv, freq_out);
+
+	if (freq_in > 1000000) {
+		fll->fll_fratio = 0;
+	} else if (freq_in > 256000) {
+		fll->fll_fratio = 1;
+		freq_in *= 2;
+	} else if (freq_in > 128000) {
+		fll->fll_fratio = 2;
+		freq_in *= 4;
+	} else if (freq_in > 64000) {
+		fll->fll_fratio = 3;
+		freq_in *= 8;
+	} else {
+		fll->fll_fratio = 4;
+		freq_in *= 16;
+	}
+	pr_debug("FLL_FRATIO=%d, Fref=%dHz\n", fll->fll_fratio, freq_in);
+
+	/* Now, calculate N.K */
+	Ndiv = freq_out / freq_in;
+
+	fll->n = Ndiv;
+	Nmod = freq_out % freq_in;
+	pr_debug("Nmod=%d\n", Nmod);
+
+	/* Calculate fractional part - scale up so we can round. */
+	Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+
+	do_div(Kpart, freq_in);
+
+	K = Kpart & 0xFFFFFFFF;
+
+	if ((K % 10) >= 5)
+		K += 5;
+
+	/* Move down to proper range now rounding is done */
+	fll->k = K / 10;
+
+	pr_debug("N=%x K=%x\n", fll->n, fll->k);
+
+	return 0;
+}
+
+static int wm8995_set_fll(struct snd_soc_dai *dai, int id,
+			  int src, unsigned int freq_in,
+			  unsigned int freq_out)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+	int reg_offset, ret;
+	struct fll_div fll;
+	u16 reg, aif1, aif2;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	aif1 = snd_soc_read(codec, WM8995_AIF1_CLOCKING_1)
+	       & WM8995_AIF1CLK_ENA;
+
+	aif2 = snd_soc_read(codec, WM8995_AIF2_CLOCKING_1)
+	       & WM8995_AIF2CLK_ENA;
+
+	switch (id) {
+	case WM8995_FLL1:
+		reg_offset = 0;
+		id = 0;
+		break;
+	case WM8995_FLL2:
+		reg_offset = 0x20;
+		id = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (src) {
+	case 0:
+		/* Allow no source specification when stopping */
+		if (freq_out)
+			return -EINVAL;
+		break;
+	case WM8995_FLL_SRC_MCLK1:
+	case WM8995_FLL_SRC_MCLK2:
+	case WM8995_FLL_SRC_LRCLK:
+	case WM8995_FLL_SRC_BCLK:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Are we changing anything? */
+	if (wm8995->fll[id].src == src &&
+	    wm8995->fll[id].in == freq_in && wm8995->fll[id].out == freq_out)
+		return 0;
+
+	/* If we're stopping the FLL redo the old config - no
+	 * registers will actually be written but we avoid GCC flow
+	 * analysis bugs spewing warnings.
+	 */
+	if (freq_out)
+		ret = wm8995_get_fll_config(&fll, freq_in, freq_out);
+	else
+		ret = wm8995_get_fll_config(&fll, wm8995->fll[id].in,
+					    wm8995->fll[id].out);
+	if (ret < 0)
+		return ret;
+
+	/* Gate the AIF clocks while we reclock */
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
+			    WM8995_AIF1CLK_ENA_MASK, 0);
+	snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
+			    WM8995_AIF2CLK_ENA_MASK, 0);
+
+	/* We always need to disable the FLL while reconfiguring */
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
+			    WM8995_FLL1_ENA_MASK, 0);
+
+	reg = (fll.outdiv << WM8995_FLL1_OUTDIV_SHIFT) |
+	      (fll.fll_fratio << WM8995_FLL1_FRATIO_SHIFT);
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_2 + reg_offset,
+			    WM8995_FLL1_OUTDIV_MASK |
+			    WM8995_FLL1_FRATIO_MASK, reg);
+
+	snd_soc_write(codec, WM8995_FLL1_CONTROL_3 + reg_offset, fll.k);
+
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_4 + reg_offset,
+			    WM8995_FLL1_N_MASK,
+			    fll.n << WM8995_FLL1_N_SHIFT);
+
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_5 + reg_offset,
+			    WM8995_FLL1_REFCLK_DIV_MASK |
+			    WM8995_FLL1_REFCLK_SRC_MASK,
+			    (fll.clk_ref_div << WM8995_FLL1_REFCLK_DIV_SHIFT) |
+			    (src - 1));
+
+	if (freq_out)
+		snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
+				    WM8995_FLL1_ENA_MASK, WM8995_FLL1_ENA);
+
+	wm8995->fll[id].in = freq_in;
+	wm8995->fll[id].out = freq_out;
+	wm8995->fll[id].src = src;
+
+	/* Enable any gated AIF clocks */
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
+			    WM8995_AIF1CLK_ENA_MASK, aif1);
+	snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
+			    WM8995_AIF2CLK_ENA_MASK, aif2);
+
+	configure_clock(codec);
+
+	return 0;
+}
+
+static int wm8995_set_dai_sysclk(struct snd_soc_dai *dai,
+				 int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (dai->id) {
+	case 0:
+	case 1:
+		break;
+	default:
+		/* AIF3 shares clocking with AIF1/2 */
+		return -EINVAL;
+	}
+
+	switch (clk_id) {
+	case WM8995_SYSCLK_MCLK1:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
+		wm8995->mclk[0] = freq;
+		dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n",
+			dai->id + 1, freq);
+		break;
+	case WM8995_SYSCLK_MCLK2:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
+		wm8995->mclk[1] = freq;
+		dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n",
+			dai->id + 1, freq);
+		break;
+	case WM8995_SYSCLK_FLL1:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL1;
+		dev_dbg(dai->dev, "AIF%d using FLL1\n", dai->id + 1);
+		break;
+	case WM8995_SYSCLK_FLL2:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL2;
+		dev_dbg(dai->dev, "AIF%d using FLL2\n", dai->id + 1);
+		break;
+	case WM8995_SYSCLK_OPCLK:
+	default:
+		dev_err(dai->dev, "Unknown clock source %d\n", clk_id);
+		return -EINVAL;
+	}
+
+	configure_clock(codec);
+
+	return 0;
+}
+
+static int wm8995_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = snd_soc_cache_sync(codec);
+			if (ret) {
+				dev_err(codec->dev,
+					"Failed to sync cache: %d\n", ret);
+				return ret;
+			}
+
+			snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+					    WM8995_BG_ENA_MASK, WM8995_BG_ENA);
+
+		}
+		break;
+	case SND_SOC_BIAS_OFF:
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_BG_ENA_MASK, 0);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8995_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8995_resume(struct snd_soc_codec *codec)
+{
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8995_suspend NULL
+#define wm8995_resume NULL
+#endif
+
+static int wm8995_remove(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	struct i2c_client *i2c;
+
+	i2c = container_of(codec->dev, struct i2c_client, dev);
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8995_probe(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	codec->dapm.idle_bias_off = 1;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	ret = snd_soc_codec_set_cache_io(codec, 16, 16, wm8995->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache i/o: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_read(codec, WM8995_SOFTWARE_RESET);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to read device ID: %d\n", ret);
+		return ret;
+	}
+
+	if (ret != 0x8995) {
+		dev_err(codec->dev, "Invalid device ID: %#x\n", ret);
+		return -EINVAL;
+	}
+
+	ret = snd_soc_write(codec, WM8995_SOFTWARE_RESET, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		return ret;
+	}
+
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* Latch volume updates (right only; we always do left then right). */
+	snd_soc_update_bits(codec, WM8995_AIF1_DAC1_RIGHT_VOLUME,
+			    WM8995_AIF1DAC1_VU_MASK, WM8995_AIF1DAC1_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_DAC2_RIGHT_VOLUME,
+			    WM8995_AIF1DAC2_VU_MASK, WM8995_AIF1DAC2_VU);
+	snd_soc_update_bits(codec, WM8995_AIF2_DAC_RIGHT_VOLUME,
+			    WM8995_AIF2DAC_VU_MASK, WM8995_AIF2DAC_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_ADC1_RIGHT_VOLUME,
+			    WM8995_AIF1ADC1_VU_MASK, WM8995_AIF1ADC1_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_ADC2_RIGHT_VOLUME,
+			    WM8995_AIF1ADC2_VU_MASK, WM8995_AIF1ADC2_VU);
+	snd_soc_update_bits(codec, WM8995_AIF2_ADC_RIGHT_VOLUME,
+			    WM8995_AIF2ADC_VU_MASK, WM8995_AIF1ADC2_VU);
+	snd_soc_update_bits(codec, WM8995_DAC1_RIGHT_VOLUME,
+			    WM8995_DAC1_VU_MASK, WM8995_DAC1_VU);
+	snd_soc_update_bits(codec, WM8995_DAC2_RIGHT_VOLUME,
+			    WM8995_DAC2_VU_MASK, WM8995_DAC2_VU);
+	snd_soc_update_bits(codec, WM8995_RIGHT_LINE_INPUT_1_VOLUME,
+			    WM8995_IN1_VU_MASK, WM8995_IN1_VU);
+
+	wm8995_update_class_w(codec);
+
+	snd_soc_add_controls(codec, wm8995_snd_controls,
+			     ARRAY_SIZE(wm8995_snd_controls));
+	snd_soc_dapm_new_controls(&codec->dapm, wm8995_dapm_widgets,
+				  ARRAY_SIZE(wm8995_dapm_widgets));
+	snd_soc_dapm_add_routes(&codec->dapm, wm8995_intercon,
+				ARRAY_SIZE(wm8995_intercon));
+
+	return 0;
+}
+
+#define WM8995_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8995_aif1_dai_ops = {
+	.set_sysclk = wm8995_set_dai_sysclk,
+	.set_fmt = wm8995_set_dai_fmt,
+	.hw_params = wm8995_hw_params,
+	.digital_mute = wm8995_aif_mute,
+	.set_pll = wm8995_set_fll,
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_ops wm8995_aif2_dai_ops = {
+	.set_sysclk = wm8995_set_dai_sysclk,
+	.set_fmt = wm8995_set_dai_fmt,
+	.hw_params = wm8995_hw_params,
+	.digital_mute = wm8995_aif_mute,
+	.set_pll = wm8995_set_fll,
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_ops wm8995_aif3_dai_ops = {
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_driver wm8995_dai[] = {
+	{
+		.name = "wm8995-aif1",
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif1_dai_ops
+	},
+	{
+		.name = "wm8995-aif2",
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif2_dai_ops
+	},
+	{
+		.name = "wm8995-aif3",
+		.playback = {
+			.stream_name = "AIF3 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF3 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif3_dai_ops
+	}
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
+	.probe = wm8995_probe,
+	.remove = wm8995_remove,
+	.suspend = wm8995_suspend,
+	.resume = wm8995_resume,
+	.set_bias_level = wm8995_set_bias_level,
+	.reg_cache_size = ARRAY_SIZE(wm8995_reg_defs),
+	.reg_word_size = sizeof(u16),
+	.reg_cache_default = wm8995_reg_defs,
+	.volatile_register = wm8995_volatile,
+	.compress_type = SND_SOC_RBTREE_COMPRESSION
+};
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8995_spi_probe(struct spi_device *spi)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+	if (!wm8995)
+		return -ENOMEM;
+
+	wm8995->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8995);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8995, wm8995_dai,
+				     ARRAY_SIZE(wm8995_dai));
+	if (ret < 0)
+		kfree(wm8995);
+	return ret;
+}
+
+static int __devexit wm8995_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8995_spi_driver = {
+	.driver = {
+		.name = "wm8995",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8995_spi_probe,
+	.remove = __devexit_p(wm8995_spi_remove)
+};
+#endif
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8995_i2c_probe(struct i2c_client *i2c,
+				      const struct i2c_device_id *id)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+	if (!wm8995)
+		return -ENOMEM;
+
+	wm8995->control_type = SND_SOC_I2C;
+	i2c_set_clientdata(i2c, wm8995);
+
+	ret = snd_soc_register_codec(&i2c->dev,
+				     &soc_codec_dev_wm8995, wm8995_dai,
+				     ARRAY_SIZE(wm8995_dai));
+	if (ret < 0)
+		kfree(wm8995);
+	return ret;
+}
+
+static __devexit int wm8995_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	kfree(i2c_get_clientdata(client));
+	return 0;
+}
+
+static const struct i2c_device_id wm8995_i2c_id[] = {
+	{"wm8995", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, wm8995_i2c_id);
+
+static struct i2c_driver wm8995_i2c_driver = {
+	.driver = {
+		.name = "wm8995",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8995_i2c_probe,
+	.remove = __devexit_p(wm8995_i2c_remove),
+	.id_table = wm8995_i2c_id
+};
+#endif
+
+static int __init wm8995_modinit(void)
+{
+	int ret = 0;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	ret = i2c_add_driver(&wm8995_i2c_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8995 I2C driver: %d\n",
+		       ret);
+	}
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8995_spi_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8995 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return ret;
+}
+
+module_init(wm8995_modinit);
+
+static void __exit wm8995_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_driver(&wm8995_i2c_driver);
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8995_spi_driver);
+#endif
+}
+
+module_exit(wm8995_exit);
+
+MODULE_DESCRIPTION("ASoC WM8995 driver");
+MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8995.h b/sound/soc/codecs/wm8995.h
new file mode 100644
index 0000000..5642121
--- /dev/null
+++ b/sound/soc/codecs/wm8995.h
@@ -0,0 +1,4269 @@
+/*
+ * wm8995.h  --  WM8995 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8995_H
+#define _WM8995_H
+
+#include <asm/types.h>
+
+/*
+ * Register values.
+ */
+#define WM8995_SOFTWARE_RESET                   0x00
+#define WM8995_POWER_MANAGEMENT_1               0x01
+#define WM8995_POWER_MANAGEMENT_2               0x02
+#define WM8995_POWER_MANAGEMENT_3               0x03
+#define WM8995_POWER_MANAGEMENT_4               0x04
+#define WM8995_POWER_MANAGEMENT_5               0x05
+#define WM8995_LEFT_LINE_INPUT_1_VOLUME         0x10
+#define WM8995_RIGHT_LINE_INPUT_1_VOLUME        0x11
+#define WM8995_LEFT_LINE_INPUT_CONTROL          0x12
+#define WM8995_DAC1_LEFT_VOLUME                 0x18
+#define WM8995_DAC1_RIGHT_VOLUME                0x19
+#define WM8995_DAC2_LEFT_VOLUME                 0x1A
+#define WM8995_DAC2_RIGHT_VOLUME                0x1B
+#define WM8995_OUTPUT_VOLUME_ZC_1               0x1C
+#define WM8995_MICBIAS_1                        0x20
+#define WM8995_MICBIAS_2                        0x21
+#define WM8995_LDO_1                            0x28
+#define WM8995_LDO_2                            0x29
+#define WM8995_ACCESSORY_DETECT_MODE1           0x30
+#define WM8995_ACCESSORY_DETECT_MODE2           0x31
+#define WM8995_HEADPHONE_DETECT1                0x34
+#define WM8995_HEADPHONE_DETECT2                0x35
+#define WM8995_MIC_DETECT_1                     0x38
+#define WM8995_MIC_DETECT_2                     0x39
+#define WM8995_CHARGE_PUMP_1                    0x40
+#define WM8995_CLASS_W_1                        0x45
+#define WM8995_DC_SERVO_1                       0x50
+#define WM8995_DC_SERVO_2                       0x51
+#define WM8995_DC_SERVO_3                       0x52
+#define WM8995_DC_SERVO_5                       0x54
+#define WM8995_DC_SERVO_6                       0x55
+#define WM8995_DC_SERVO_7                       0x56
+#define WM8995_DC_SERVO_READBACK_0              0x57
+#define WM8995_ANALOGUE_HP_1                    0x60
+#define WM8995_ANALOGUE_HP_2                    0x61
+#define WM8995_CHIP_REVISION                    0x100
+#define WM8995_CONTROL_INTERFACE_1              0x101
+#define WM8995_CONTROL_INTERFACE_2              0x102
+#define WM8995_WRITE_SEQUENCER_CTRL_1           0x110
+#define WM8995_WRITE_SEQUENCER_CTRL_2           0x111
+#define WM8995_AIF1_CLOCKING_1                  0x200
+#define WM8995_AIF1_CLOCKING_2                  0x201
+#define WM8995_AIF2_CLOCKING_1                  0x204
+#define WM8995_AIF2_CLOCKING_2                  0x205
+#define WM8995_CLOCKING_1                       0x208
+#define WM8995_CLOCKING_2                       0x209
+#define WM8995_AIF1_RATE                        0x210
+#define WM8995_AIF2_RATE                        0x211
+#define WM8995_RATE_STATUS                      0x212
+#define WM8995_FLL1_CONTROL_1                   0x220
+#define WM8995_FLL1_CONTROL_2                   0x221
+#define WM8995_FLL1_CONTROL_3                   0x222
+#define WM8995_FLL1_CONTROL_4                   0x223
+#define WM8995_FLL1_CONTROL_5                   0x224
+#define WM8995_FLL2_CONTROL_1                   0x240
+#define WM8995_FLL2_CONTROL_2                   0x241
+#define WM8995_FLL2_CONTROL_3                   0x242
+#define WM8995_FLL2_CONTROL_4                   0x243
+#define WM8995_FLL2_CONTROL_5                   0x244
+#define WM8995_AIF1_CONTROL_1                   0x300
+#define WM8995_AIF1_CONTROL_2                   0x301
+#define WM8995_AIF1_MASTER_SLAVE                0x302
+#define WM8995_AIF1_BCLK                        0x303
+#define WM8995_AIF1ADC_LRCLK                    0x304
+#define WM8995_AIF1DAC_LRCLK                    0x305
+#define WM8995_AIF1DAC_DATA                     0x306
+#define WM8995_AIF1ADC_DATA                     0x307
+#define WM8995_AIF2_CONTROL_1                   0x310
+#define WM8995_AIF2_CONTROL_2                   0x311
+#define WM8995_AIF2_MASTER_SLAVE                0x312
+#define WM8995_AIF2_BCLK                        0x313
+#define WM8995_AIF2ADC_LRCLK                    0x314
+#define WM8995_AIF2DAC_LRCLK                    0x315
+#define WM8995_AIF2DAC_DATA                     0x316
+#define WM8995_AIF2ADC_DATA                     0x317
+#define WM8995_AIF1_ADC1_LEFT_VOLUME            0x400
+#define WM8995_AIF1_ADC1_RIGHT_VOLUME           0x401
+#define WM8995_AIF1_DAC1_LEFT_VOLUME            0x402
+#define WM8995_AIF1_DAC1_RIGHT_VOLUME           0x403
+#define WM8995_AIF1_ADC2_LEFT_VOLUME            0x404
+#define WM8995_AIF1_ADC2_RIGHT_VOLUME           0x405
+#define WM8995_AIF1_DAC2_LEFT_VOLUME            0x406
+#define WM8995_AIF1_DAC2_RIGHT_VOLUME           0x407
+#define WM8995_AIF1_ADC1_FILTERS                0x410
+#define WM8995_AIF1_ADC2_FILTERS                0x411
+#define WM8995_AIF1_DAC1_FILTERS_1              0x420
+#define WM8995_AIF1_DAC1_FILTERS_2              0x421
+#define WM8995_AIF1_DAC2_FILTERS_1              0x422
+#define WM8995_AIF1_DAC2_FILTERS_2              0x423
+#define WM8995_AIF1_DRC1_1                      0x440
+#define WM8995_AIF1_DRC1_2                      0x441
+#define WM8995_AIF1_DRC1_3                      0x442
+#define WM8995_AIF1_DRC1_4                      0x443
+#define WM8995_AIF1_DRC1_5                      0x444
+#define WM8995_AIF1_DRC2_1                      0x450
+#define WM8995_AIF1_DRC2_2                      0x451
+#define WM8995_AIF1_DRC2_3                      0x452
+#define WM8995_AIF1_DRC2_4                      0x453
+#define WM8995_AIF1_DRC2_5                      0x454
+#define WM8995_AIF1_DAC1_EQ_GAINS_1             0x480
+#define WM8995_AIF1_DAC1_EQ_GAINS_2             0x481
+#define WM8995_AIF1_DAC1_EQ_BAND_1_A            0x482
+#define WM8995_AIF1_DAC1_EQ_BAND_1_B            0x483
+#define WM8995_AIF1_DAC1_EQ_BAND_1_PG           0x484
+#define WM8995_AIF1_DAC1_EQ_BAND_2_A            0x485
+#define WM8995_AIF1_DAC1_EQ_BAND_2_B            0x486
+#define WM8995_AIF1_DAC1_EQ_BAND_2_C            0x487
+#define WM8995_AIF1_DAC1_EQ_BAND_2_PG           0x488
+#define WM8995_AIF1_DAC1_EQ_BAND_3_A            0x489
+#define WM8995_AIF1_DAC1_EQ_BAND_3_B            0x48A
+#define WM8995_AIF1_DAC1_EQ_BAND_3_C            0x48B
+#define WM8995_AIF1_DAC1_EQ_BAND_3_PG           0x48C
+#define WM8995_AIF1_DAC1_EQ_BAND_4_A            0x48D
+#define WM8995_AIF1_DAC1_EQ_BAND_4_B            0x48E
+#define WM8995_AIF1_DAC1_EQ_BAND_4_C            0x48F
+#define WM8995_AIF1_DAC1_EQ_BAND_4_PG           0x490
+#define WM8995_AIF1_DAC1_EQ_BAND_5_A            0x491
+#define WM8995_AIF1_DAC1_EQ_BAND_5_B            0x492
+#define WM8995_AIF1_DAC1_EQ_BAND_5_PG           0x493
+#define WM8995_AIF1_DAC2_EQ_GAINS_1             0x4A0
+#define WM8995_AIF1_DAC2_EQ_GAINS_2             0x4A1
+#define WM8995_AIF1_DAC2_EQ_BAND_1_A            0x4A2
+#define WM8995_AIF1_DAC2_EQ_BAND_1_B            0x4A3
+#define WM8995_AIF1_DAC2_EQ_BAND_1_PG           0x4A4
+#define WM8995_AIF1_DAC2_EQ_BAND_2_A            0x4A5
+#define WM8995_AIF1_DAC2_EQ_BAND_2_B            0x4A6
+#define WM8995_AIF1_DAC2_EQ_BAND_2_C            0x4A7
+#define WM8995_AIF1_DAC2_EQ_BAND_2_PG           0x4A8
+#define WM8995_AIF1_DAC2_EQ_BAND_3_A            0x4A9
+#define WM8995_AIF1_DAC2_EQ_BAND_3_B            0x4AA
+#define WM8995_AIF1_DAC2_EQ_BAND_3_C            0x4AB
+#define WM8995_AIF1_DAC2_EQ_BAND_3_PG           0x4AC
+#define WM8995_AIF1_DAC2_EQ_BAND_4_A            0x4AD
+#define WM8995_AIF1_DAC2_EQ_BAND_4_B            0x4AE
+#define WM8995_AIF1_DAC2_EQ_BAND_4_C            0x4AF
+#define WM8995_AIF1_DAC2_EQ_BAND_4_PG           0x4B0
+#define WM8995_AIF1_DAC2_EQ_BAND_5_A            0x4B1
+#define WM8995_AIF1_DAC2_EQ_BAND_5_B            0x4B2
+#define WM8995_AIF1_DAC2_EQ_BAND_5_PG           0x4B3
+#define WM8995_AIF2_ADC_LEFT_VOLUME             0x500
+#define WM8995_AIF2_ADC_RIGHT_VOLUME            0x501
+#define WM8995_AIF2_DAC_LEFT_VOLUME             0x502
+#define WM8995_AIF2_DAC_RIGHT_VOLUME            0x503
+#define WM8995_AIF2_ADC_FILTERS                 0x510
+#define WM8995_AIF2_DAC_FILTERS_1               0x520
+#define WM8995_AIF2_DAC_FILTERS_2               0x521
+#define WM8995_AIF2_DRC_1                       0x540
+#define WM8995_AIF2_DRC_2                       0x541
+#define WM8995_AIF2_DRC_3                       0x542
+#define WM8995_AIF2_DRC_4                       0x543
+#define WM8995_AIF2_DRC_5                       0x544
+#define WM8995_AIF2_EQ_GAINS_1                  0x580
+#define WM8995_AIF2_EQ_GAINS_2                  0x581
+#define WM8995_AIF2_EQ_BAND_1_A                 0x582
+#define WM8995_AIF2_EQ_BAND_1_B                 0x583
+#define WM8995_AIF2_EQ_BAND_1_PG                0x584
+#define WM8995_AIF2_EQ_BAND_2_A                 0x585
+#define WM8995_AIF2_EQ_BAND_2_B                 0x586
+#define WM8995_AIF2_EQ_BAND_2_C                 0x587
+#define WM8995_AIF2_EQ_BAND_2_PG                0x588
+#define WM8995_AIF2_EQ_BAND_3_A                 0x589
+#define WM8995_AIF2_EQ_BAND_3_B                 0x58A
+#define WM8995_AIF2_EQ_BAND_3_C                 0x58B
+#define WM8995_AIF2_EQ_BAND_3_PG                0x58C
+#define WM8995_AIF2_EQ_BAND_4_A                 0x58D
+#define WM8995_AIF2_EQ_BAND_4_B                 0x58E
+#define WM8995_AIF2_EQ_BAND_4_C                 0x58F
+#define WM8995_AIF2_EQ_BAND_4_PG                0x590
+#define WM8995_AIF2_EQ_BAND_5_A                 0x591
+#define WM8995_AIF2_EQ_BAND_5_B                 0x592
+#define WM8995_AIF2_EQ_BAND_5_PG                0x593
+#define WM8995_DAC1_MIXER_VOLUMES               0x600
+#define WM8995_DAC1_LEFT_MIXER_ROUTING          0x601
+#define WM8995_DAC1_RIGHT_MIXER_ROUTING         0x602
+#define WM8995_DAC2_MIXER_VOLUMES               0x603
+#define WM8995_DAC2_LEFT_MIXER_ROUTING          0x604
+#define WM8995_DAC2_RIGHT_MIXER_ROUTING         0x605
+#define WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING     0x606
+#define WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING    0x607
+#define WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING     0x608
+#define WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING    0x609
+#define WM8995_DAC_SOFTMUTE                     0x610
+#define WM8995_OVERSAMPLING                     0x620
+#define WM8995_SIDETONE                         0x621
+#define WM8995_GPIO_1                           0x700
+#define WM8995_GPIO_2                           0x701
+#define WM8995_GPIO_3                           0x702
+#define WM8995_GPIO_4                           0x703
+#define WM8995_GPIO_5                           0x704
+#define WM8995_GPIO_6                           0x705
+#define WM8995_GPIO_7                           0x706
+#define WM8995_GPIO_8                           0x707
+#define WM8995_GPIO_9                           0x708
+#define WM8995_GPIO_10                          0x709
+#define WM8995_GPIO_11                          0x70A
+#define WM8995_GPIO_12                          0x70B
+#define WM8995_GPIO_13                          0x70C
+#define WM8995_GPIO_14                          0x70D
+#define WM8995_PULL_CONTROL_1                   0x720
+#define WM8995_PULL_CONTROL_2                   0x721
+#define WM8995_INTERRUPT_STATUS_1               0x730
+#define WM8995_INTERRUPT_STATUS_2               0x731
+#define WM8995_INTERRUPT_RAW_STATUS_2           0x732
+#define WM8995_INTERRUPT_STATUS_1_MASK          0x738
+#define WM8995_INTERRUPT_STATUS_2_MASK          0x739
+#define WM8995_INTERRUPT_CONTROL                0x740
+#define WM8995_LEFT_PDM_SPEAKER_1               0x800
+#define WM8995_RIGHT_PDM_SPEAKER_1              0x801
+#define WM8995_PDM_SPEAKER_1_MUTE_SEQUENCE      0x802
+#define WM8995_LEFT_PDM_SPEAKER_2               0x808
+#define WM8995_RIGHT_PDM_SPEAKER_2              0x809
+#define WM8995_PDM_SPEAKER_2_MUTE_SEQUENCE      0x80A
+#define WM8995_WRITE_SEQUENCER_0                0x3000
+#define WM8995_WRITE_SEQUENCER_1                0x3001
+#define WM8995_WRITE_SEQUENCER_2                0x3002
+#define WM8995_WRITE_SEQUENCER_3                0x3003
+#define WM8995_WRITE_SEQUENCER_4                0x3004
+#define WM8995_WRITE_SEQUENCER_5                0x3005
+#define WM8995_WRITE_SEQUENCER_6                0x3006
+#define WM8995_WRITE_SEQUENCER_7                0x3007
+#define WM8995_WRITE_SEQUENCER_8                0x3008
+#define WM8995_WRITE_SEQUENCER_9                0x3009
+#define WM8995_WRITE_SEQUENCER_10               0x300A
+#define WM8995_WRITE_SEQUENCER_11               0x300B
+#define WM8995_WRITE_SEQUENCER_12               0x300C
+#define WM8995_WRITE_SEQUENCER_13               0x300D
+#define WM8995_WRITE_SEQUENCER_14               0x300E
+#define WM8995_WRITE_SEQUENCER_15               0x300F
+#define WM8995_WRITE_SEQUENCER_16               0x3010
+#define WM8995_WRITE_SEQUENCER_17               0x3011
+#define WM8995_WRITE_SEQUENCER_18               0x3012
+#define WM8995_WRITE_SEQUENCER_19               0x3013
+#define WM8995_WRITE_SEQUENCER_20               0x3014
+#define WM8995_WRITE_SEQUENCER_21               0x3015
+#define WM8995_WRITE_SEQUENCER_22               0x3016
+#define WM8995_WRITE_SEQUENCER_23               0x3017
+#define WM8995_WRITE_SEQUENCER_24               0x3018
+#define WM8995_WRITE_SEQUENCER_25               0x3019
+#define WM8995_WRITE_SEQUENCER_26               0x301A
+#define WM8995_WRITE_SEQUENCER_27               0x301B
+#define WM8995_WRITE_SEQUENCER_28               0x301C
+#define WM8995_WRITE_SEQUENCER_29               0x301D
+#define WM8995_WRITE_SEQUENCER_30               0x301E
+#define WM8995_WRITE_SEQUENCER_31               0x301F
+#define WM8995_WRITE_SEQUENCER_32               0x3020
+#define WM8995_WRITE_SEQUENCER_33               0x3021
+#define WM8995_WRITE_SEQUENCER_34               0x3022
+#define WM8995_WRITE_SEQUENCER_35               0x3023
+#define WM8995_WRITE_SEQUENCER_36               0x3024
+#define WM8995_WRITE_SEQUENCER_37               0x3025
+#define WM8995_WRITE_SEQUENCER_38               0x3026
+#define WM8995_WRITE_SEQUENCER_39               0x3027
+#define WM8995_WRITE_SEQUENCER_40               0x3028
+#define WM8995_WRITE_SEQUENCER_41               0x3029
+#define WM8995_WRITE_SEQUENCER_42               0x302A
+#define WM8995_WRITE_SEQUENCER_43               0x302B
+#define WM8995_WRITE_SEQUENCER_44               0x302C
+#define WM8995_WRITE_SEQUENCER_45               0x302D
+#define WM8995_WRITE_SEQUENCER_46               0x302E
+#define WM8995_WRITE_SEQUENCER_47               0x302F
+#define WM8995_WRITE_SEQUENCER_48               0x3030
+#define WM8995_WRITE_SEQUENCER_49               0x3031
+#define WM8995_WRITE_SEQUENCER_50               0x3032
+#define WM8995_WRITE_SEQUENCER_51               0x3033
+#define WM8995_WRITE_SEQUENCER_52               0x3034
+#define WM8995_WRITE_SEQUENCER_53               0x3035
+#define WM8995_WRITE_SEQUENCER_54               0x3036
+#define WM8995_WRITE_SEQUENCER_55               0x3037
+#define WM8995_WRITE_SEQUENCER_56               0x3038
+#define WM8995_WRITE_SEQUENCER_57               0x3039
+#define WM8995_WRITE_SEQUENCER_58               0x303A
+#define WM8995_WRITE_SEQUENCER_59               0x303B
+#define WM8995_WRITE_SEQUENCER_60               0x303C
+#define WM8995_WRITE_SEQUENCER_61               0x303D
+#define WM8995_WRITE_SEQUENCER_62               0x303E
+#define WM8995_WRITE_SEQUENCER_63               0x303F
+#define WM8995_WRITE_SEQUENCER_64               0x3040
+#define WM8995_WRITE_SEQUENCER_65               0x3041
+#define WM8995_WRITE_SEQUENCER_66               0x3042
+#define WM8995_WRITE_SEQUENCER_67               0x3043
+#define WM8995_WRITE_SEQUENCER_68               0x3044
+#define WM8995_WRITE_SEQUENCER_69               0x3045
+#define WM8995_WRITE_SEQUENCER_70               0x3046
+#define WM8995_WRITE_SEQUENCER_71               0x3047
+#define WM8995_WRITE_SEQUENCER_72               0x3048
+#define WM8995_WRITE_SEQUENCER_73               0x3049
+#define WM8995_WRITE_SEQUENCER_74               0x304A
+#define WM8995_WRITE_SEQUENCER_75               0x304B
+#define WM8995_WRITE_SEQUENCER_76               0x304C
+#define WM8995_WRITE_SEQUENCER_77               0x304D
+#define WM8995_WRITE_SEQUENCER_78               0x304E
+#define WM8995_WRITE_SEQUENCER_79               0x304F
+#define WM8995_WRITE_SEQUENCER_80               0x3050
+#define WM8995_WRITE_SEQUENCER_81               0x3051
+#define WM8995_WRITE_SEQUENCER_82               0x3052
+#define WM8995_WRITE_SEQUENCER_83               0x3053
+#define WM8995_WRITE_SEQUENCER_84               0x3054
+#define WM8995_WRITE_SEQUENCER_85               0x3055
+#define WM8995_WRITE_SEQUENCER_86               0x3056
+#define WM8995_WRITE_SEQUENCER_87               0x3057
+#define WM8995_WRITE_SEQUENCER_88               0x3058
+#define WM8995_WRITE_SEQUENCER_89               0x3059
+#define WM8995_WRITE_SEQUENCER_90               0x305A
+#define WM8995_WRITE_SEQUENCER_91               0x305B
+#define WM8995_WRITE_SEQUENCER_92               0x305C
+#define WM8995_WRITE_SEQUENCER_93               0x305D
+#define WM8995_WRITE_SEQUENCER_94               0x305E
+#define WM8995_WRITE_SEQUENCER_95               0x305F
+#define WM8995_WRITE_SEQUENCER_96               0x3060
+#define WM8995_WRITE_SEQUENCER_97               0x3061
+#define WM8995_WRITE_SEQUENCER_98               0x3062
+#define WM8995_WRITE_SEQUENCER_99               0x3063
+#define WM8995_WRITE_SEQUENCER_100              0x3064
+#define WM8995_WRITE_SEQUENCER_101              0x3065
+#define WM8995_WRITE_SEQUENCER_102              0x3066
+#define WM8995_WRITE_SEQUENCER_103              0x3067
+#define WM8995_WRITE_SEQUENCER_104              0x3068
+#define WM8995_WRITE_SEQUENCER_105              0x3069
+#define WM8995_WRITE_SEQUENCER_106              0x306A
+#define WM8995_WRITE_SEQUENCER_107              0x306B
+#define WM8995_WRITE_SEQUENCER_108              0x306C
+#define WM8995_WRITE_SEQUENCER_109              0x306D
+#define WM8995_WRITE_SEQUENCER_110              0x306E
+#define WM8995_WRITE_SEQUENCER_111              0x306F
+#define WM8995_WRITE_SEQUENCER_112              0x3070
+#define WM8995_WRITE_SEQUENCER_113              0x3071
+#define WM8995_WRITE_SEQUENCER_114              0x3072
+#define WM8995_WRITE_SEQUENCER_115              0x3073
+#define WM8995_WRITE_SEQUENCER_116              0x3074
+#define WM8995_WRITE_SEQUENCER_117              0x3075
+#define WM8995_WRITE_SEQUENCER_118              0x3076
+#define WM8995_WRITE_SEQUENCER_119              0x3077
+#define WM8995_WRITE_SEQUENCER_120              0x3078
+#define WM8995_WRITE_SEQUENCER_121              0x3079
+#define WM8995_WRITE_SEQUENCER_122              0x307A
+#define WM8995_WRITE_SEQUENCER_123              0x307B
+#define WM8995_WRITE_SEQUENCER_124              0x307C
+#define WM8995_WRITE_SEQUENCER_125              0x307D
+#define WM8995_WRITE_SEQUENCER_126              0x307E
+#define WM8995_WRITE_SEQUENCER_127              0x307F
+#define WM8995_WRITE_SEQUENCER_128              0x3080
+#define WM8995_WRITE_SEQUENCER_129              0x3081
+#define WM8995_WRITE_SEQUENCER_130              0x3082
+#define WM8995_WRITE_SEQUENCER_131              0x3083
+#define WM8995_WRITE_SEQUENCER_132              0x3084
+#define WM8995_WRITE_SEQUENCER_133              0x3085
+#define WM8995_WRITE_SEQUENCER_134              0x3086
+#define WM8995_WRITE_SEQUENCER_135              0x3087
+#define WM8995_WRITE_SEQUENCER_136              0x3088
+#define WM8995_WRITE_SEQUENCER_137              0x3089
+#define WM8995_WRITE_SEQUENCER_138              0x308A
+#define WM8995_WRITE_SEQUENCER_139              0x308B
+#define WM8995_WRITE_SEQUENCER_140              0x308C
+#define WM8995_WRITE_SEQUENCER_141              0x308D
+#define WM8995_WRITE_SEQUENCER_142              0x308E
+#define WM8995_WRITE_SEQUENCER_143              0x308F
+#define WM8995_WRITE_SEQUENCER_144              0x3090
+#define WM8995_WRITE_SEQUENCER_145              0x3091
+#define WM8995_WRITE_SEQUENCER_146              0x3092
+#define WM8995_WRITE_SEQUENCER_147              0x3093
+#define WM8995_WRITE_SEQUENCER_148              0x3094
+#define WM8995_WRITE_SEQUENCER_149              0x3095
+#define WM8995_WRITE_SEQUENCER_150              0x3096
+#define WM8995_WRITE_SEQUENCER_151              0x3097
+#define WM8995_WRITE_SEQUENCER_152              0x3098
+#define WM8995_WRITE_SEQUENCER_153              0x3099
+#define WM8995_WRITE_SEQUENCER_154              0x309A
+#define WM8995_WRITE_SEQUENCER_155              0x309B
+#define WM8995_WRITE_SEQUENCER_156              0x309C
+#define WM8995_WRITE_SEQUENCER_157              0x309D
+#define WM8995_WRITE_SEQUENCER_158              0x309E
+#define WM8995_WRITE_SEQUENCER_159              0x309F
+#define WM8995_WRITE_SEQUENCER_160              0x30A0
+#define WM8995_WRITE_SEQUENCER_161              0x30A1
+#define WM8995_WRITE_SEQUENCER_162              0x30A2
+#define WM8995_WRITE_SEQUENCER_163              0x30A3
+#define WM8995_WRITE_SEQUENCER_164              0x30A4
+#define WM8995_WRITE_SEQUENCER_165              0x30A5
+#define WM8995_WRITE_SEQUENCER_166              0x30A6
+#define WM8995_WRITE_SEQUENCER_167              0x30A7
+#define WM8995_WRITE_SEQUENCER_168              0x30A8
+#define WM8995_WRITE_SEQUENCER_169              0x30A9
+#define WM8995_WRITE_SEQUENCER_170              0x30AA
+#define WM8995_WRITE_SEQUENCER_171              0x30AB
+#define WM8995_WRITE_SEQUENCER_172              0x30AC
+#define WM8995_WRITE_SEQUENCER_173              0x30AD
+#define WM8995_WRITE_SEQUENCER_174              0x30AE
+#define WM8995_WRITE_SEQUENCER_175              0x30AF
+#define WM8995_WRITE_SEQUENCER_176              0x30B0
+#define WM8995_WRITE_SEQUENCER_177              0x30B1
+#define WM8995_WRITE_SEQUENCER_178              0x30B2
+#define WM8995_WRITE_SEQUENCER_179              0x30B3
+#define WM8995_WRITE_SEQUENCER_180              0x30B4
+#define WM8995_WRITE_SEQUENCER_181              0x30B5
+#define WM8995_WRITE_SEQUENCER_182              0x30B6
+#define WM8995_WRITE_SEQUENCER_183              0x30B7
+#define WM8995_WRITE_SEQUENCER_184              0x30B8
+#define WM8995_WRITE_SEQUENCER_185              0x30B9
+#define WM8995_WRITE_SEQUENCER_186              0x30BA
+#define WM8995_WRITE_SEQUENCER_187              0x30BB
+#define WM8995_WRITE_SEQUENCER_188              0x30BC
+#define WM8995_WRITE_SEQUENCER_189              0x30BD
+#define WM8995_WRITE_SEQUENCER_190              0x30BE
+#define WM8995_WRITE_SEQUENCER_191              0x30BF
+#define WM8995_WRITE_SEQUENCER_192              0x30C0
+#define WM8995_WRITE_SEQUENCER_193              0x30C1
+#define WM8995_WRITE_SEQUENCER_194              0x30C2
+#define WM8995_WRITE_SEQUENCER_195              0x30C3
+#define WM8995_WRITE_SEQUENCER_196              0x30C4
+#define WM8995_WRITE_SEQUENCER_197              0x30C5
+#define WM8995_WRITE_SEQUENCER_198              0x30C6
+#define WM8995_WRITE_SEQUENCER_199              0x30C7
+#define WM8995_WRITE_SEQUENCER_200              0x30C8
+#define WM8995_WRITE_SEQUENCER_201              0x30C9
+#define WM8995_WRITE_SEQUENCER_202              0x30CA
+#define WM8995_WRITE_SEQUENCER_203              0x30CB
+#define WM8995_WRITE_SEQUENCER_204              0x30CC
+#define WM8995_WRITE_SEQUENCER_205              0x30CD
+#define WM8995_WRITE_SEQUENCER_206              0x30CE
+#define WM8995_WRITE_SEQUENCER_207              0x30CF
+#define WM8995_WRITE_SEQUENCER_208              0x30D0
+#define WM8995_WRITE_SEQUENCER_209              0x30D1
+#define WM8995_WRITE_SEQUENCER_210              0x30D2
+#define WM8995_WRITE_SEQUENCER_211              0x30D3
+#define WM8995_WRITE_SEQUENCER_212              0x30D4
+#define WM8995_WRITE_SEQUENCER_213              0x30D5
+#define WM8995_WRITE_SEQUENCER_214              0x30D6
+#define WM8995_WRITE_SEQUENCER_215              0x30D7
+#define WM8995_WRITE_SEQUENCER_216              0x30D8
+#define WM8995_WRITE_SEQUENCER_217              0x30D9
+#define WM8995_WRITE_SEQUENCER_218              0x30DA
+#define WM8995_WRITE_SEQUENCER_219              0x30DB
+#define WM8995_WRITE_SEQUENCER_220              0x30DC
+#define WM8995_WRITE_SEQUENCER_221              0x30DD
+#define WM8995_WRITE_SEQUENCER_222              0x30DE
+#define WM8995_WRITE_SEQUENCER_223              0x30DF
+#define WM8995_WRITE_SEQUENCER_224              0x30E0
+#define WM8995_WRITE_SEQUENCER_225              0x30E1
+#define WM8995_WRITE_SEQUENCER_226              0x30E2
+#define WM8995_WRITE_SEQUENCER_227              0x30E3
+#define WM8995_WRITE_SEQUENCER_228              0x30E4
+#define WM8995_WRITE_SEQUENCER_229              0x30E5
+#define WM8995_WRITE_SEQUENCER_230              0x30E6
+#define WM8995_WRITE_SEQUENCER_231              0x30E7
+#define WM8995_WRITE_SEQUENCER_232              0x30E8
+#define WM8995_WRITE_SEQUENCER_233              0x30E9
+#define WM8995_WRITE_SEQUENCER_234              0x30EA
+#define WM8995_WRITE_SEQUENCER_235              0x30EB
+#define WM8995_WRITE_SEQUENCER_236              0x30EC
+#define WM8995_WRITE_SEQUENCER_237              0x30ED
+#define WM8995_WRITE_SEQUENCER_238              0x30EE
+#define WM8995_WRITE_SEQUENCER_239              0x30EF
+#define WM8995_WRITE_SEQUENCER_240              0x30F0
+#define WM8995_WRITE_SEQUENCER_241              0x30F1
+#define WM8995_WRITE_SEQUENCER_242              0x30F2
+#define WM8995_WRITE_SEQUENCER_243              0x30F3
+#define WM8995_WRITE_SEQUENCER_244              0x30F4
+#define WM8995_WRITE_SEQUENCER_245              0x30F5
+#define WM8995_WRITE_SEQUENCER_246              0x30F6
+#define WM8995_WRITE_SEQUENCER_247              0x30F7
+#define WM8995_WRITE_SEQUENCER_248              0x30F8
+#define WM8995_WRITE_SEQUENCER_249              0x30F9
+#define WM8995_WRITE_SEQUENCER_250              0x30FA
+#define WM8995_WRITE_SEQUENCER_251              0x30FB
+#define WM8995_WRITE_SEQUENCER_252              0x30FC
+#define WM8995_WRITE_SEQUENCER_253              0x30FD
+#define WM8995_WRITE_SEQUENCER_254              0x30FE
+#define WM8995_WRITE_SEQUENCER_255              0x30FF
+#define WM8995_WRITE_SEQUENCER_256              0x3100
+#define WM8995_WRITE_SEQUENCER_257              0x3101
+#define WM8995_WRITE_SEQUENCER_258              0x3102
+#define WM8995_WRITE_SEQUENCER_259              0x3103
+#define WM8995_WRITE_SEQUENCER_260              0x3104
+#define WM8995_WRITE_SEQUENCER_261              0x3105
+#define WM8995_WRITE_SEQUENCER_262              0x3106
+#define WM8995_WRITE_SEQUENCER_263              0x3107
+#define WM8995_WRITE_SEQUENCER_264              0x3108
+#define WM8995_WRITE_SEQUENCER_265              0x3109
+#define WM8995_WRITE_SEQUENCER_266              0x310A
+#define WM8995_WRITE_SEQUENCER_267              0x310B
+#define WM8995_WRITE_SEQUENCER_268              0x310C
+#define WM8995_WRITE_SEQUENCER_269              0x310D
+#define WM8995_WRITE_SEQUENCER_270              0x310E
+#define WM8995_WRITE_SEQUENCER_271              0x310F
+#define WM8995_WRITE_SEQUENCER_272              0x3110
+#define WM8995_WRITE_SEQUENCER_273              0x3111
+#define WM8995_WRITE_SEQUENCER_274              0x3112
+#define WM8995_WRITE_SEQUENCER_275              0x3113
+#define WM8995_WRITE_SEQUENCER_276              0x3114
+#define WM8995_WRITE_SEQUENCER_277              0x3115
+#define WM8995_WRITE_SEQUENCER_278              0x3116
+#define WM8995_WRITE_SEQUENCER_279              0x3117
+#define WM8995_WRITE_SEQUENCER_280              0x3118
+#define WM8995_WRITE_SEQUENCER_281              0x3119
+#define WM8995_WRITE_SEQUENCER_282              0x311A
+#define WM8995_WRITE_SEQUENCER_283              0x311B
+#define WM8995_WRITE_SEQUENCER_284              0x311C
+#define WM8995_WRITE_SEQUENCER_285              0x311D
+#define WM8995_WRITE_SEQUENCER_286              0x311E
+#define WM8995_WRITE_SEQUENCER_287              0x311F
+#define WM8995_WRITE_SEQUENCER_288              0x3120
+#define WM8995_WRITE_SEQUENCER_289              0x3121
+#define WM8995_WRITE_SEQUENCER_290              0x3122
+#define WM8995_WRITE_SEQUENCER_291              0x3123
+#define WM8995_WRITE_SEQUENCER_292              0x3124
+#define WM8995_WRITE_SEQUENCER_293              0x3125
+#define WM8995_WRITE_SEQUENCER_294              0x3126
+#define WM8995_WRITE_SEQUENCER_295              0x3127
+#define WM8995_WRITE_SEQUENCER_296              0x3128
+#define WM8995_WRITE_SEQUENCER_297              0x3129
+#define WM8995_WRITE_SEQUENCER_298              0x312A
+#define WM8995_WRITE_SEQUENCER_299              0x312B
+#define WM8995_WRITE_SEQUENCER_300              0x312C
+#define WM8995_WRITE_SEQUENCER_301              0x312D
+#define WM8995_WRITE_SEQUENCER_302              0x312E
+#define WM8995_WRITE_SEQUENCER_303              0x312F
+#define WM8995_WRITE_SEQUENCER_304              0x3130
+#define WM8995_WRITE_SEQUENCER_305              0x3131
+#define WM8995_WRITE_SEQUENCER_306              0x3132
+#define WM8995_WRITE_SEQUENCER_307              0x3133
+#define WM8995_WRITE_SEQUENCER_308              0x3134
+#define WM8995_WRITE_SEQUENCER_309              0x3135
+#define WM8995_WRITE_SEQUENCER_310              0x3136
+#define WM8995_WRITE_SEQUENCER_311              0x3137
+#define WM8995_WRITE_SEQUENCER_312              0x3138
+#define WM8995_WRITE_SEQUENCER_313              0x3139
+#define WM8995_WRITE_SEQUENCER_314              0x313A
+#define WM8995_WRITE_SEQUENCER_315              0x313B
+#define WM8995_WRITE_SEQUENCER_316              0x313C
+#define WM8995_WRITE_SEQUENCER_317              0x313D
+#define WM8995_WRITE_SEQUENCER_318              0x313E
+#define WM8995_WRITE_SEQUENCER_319              0x313F
+#define WM8995_WRITE_SEQUENCER_320              0x3140
+#define WM8995_WRITE_SEQUENCER_321              0x3141
+#define WM8995_WRITE_SEQUENCER_322              0x3142
+#define WM8995_WRITE_SEQUENCER_323              0x3143
+#define WM8995_WRITE_SEQUENCER_324              0x3144
+#define WM8995_WRITE_SEQUENCER_325              0x3145
+#define WM8995_WRITE_SEQUENCER_326              0x3146
+#define WM8995_WRITE_SEQUENCER_327              0x3147
+#define WM8995_WRITE_SEQUENCER_328              0x3148
+#define WM8995_WRITE_SEQUENCER_329              0x3149
+#define WM8995_WRITE_SEQUENCER_330              0x314A
+#define WM8995_WRITE_SEQUENCER_331              0x314B
+#define WM8995_WRITE_SEQUENCER_332              0x314C
+#define WM8995_WRITE_SEQUENCER_333              0x314D
+#define WM8995_WRITE_SEQUENCER_334              0x314E
+#define WM8995_WRITE_SEQUENCER_335              0x314F
+#define WM8995_WRITE_SEQUENCER_336              0x3150
+#define WM8995_WRITE_SEQUENCER_337              0x3151
+#define WM8995_WRITE_SEQUENCER_338              0x3152
+#define WM8995_WRITE_SEQUENCER_339              0x3153
+#define WM8995_WRITE_SEQUENCER_340              0x3154
+#define WM8995_WRITE_SEQUENCER_341              0x3155
+#define WM8995_WRITE_SEQUENCER_342              0x3156
+#define WM8995_WRITE_SEQUENCER_343              0x3157
+#define WM8995_WRITE_SEQUENCER_344              0x3158
+#define WM8995_WRITE_SEQUENCER_345              0x3159
+#define WM8995_WRITE_SEQUENCER_346              0x315A
+#define WM8995_WRITE_SEQUENCER_347              0x315B
+#define WM8995_WRITE_SEQUENCER_348              0x315C
+#define WM8995_WRITE_SEQUENCER_349              0x315D
+#define WM8995_WRITE_SEQUENCER_350              0x315E
+#define WM8995_WRITE_SEQUENCER_351              0x315F
+#define WM8995_WRITE_SEQUENCER_352              0x3160
+#define WM8995_WRITE_SEQUENCER_353              0x3161
+#define WM8995_WRITE_SEQUENCER_354              0x3162
+#define WM8995_WRITE_SEQUENCER_355              0x3163
+#define WM8995_WRITE_SEQUENCER_356              0x3164
+#define WM8995_WRITE_SEQUENCER_357              0x3165
+#define WM8995_WRITE_SEQUENCER_358              0x3166
+#define WM8995_WRITE_SEQUENCER_359              0x3167
+#define WM8995_WRITE_SEQUENCER_360              0x3168
+#define WM8995_WRITE_SEQUENCER_361              0x3169
+#define WM8995_WRITE_SEQUENCER_362              0x316A
+#define WM8995_WRITE_SEQUENCER_363              0x316B
+#define WM8995_WRITE_SEQUENCER_364              0x316C
+#define WM8995_WRITE_SEQUENCER_365              0x316D
+#define WM8995_WRITE_SEQUENCER_366              0x316E
+#define WM8995_WRITE_SEQUENCER_367              0x316F
+#define WM8995_WRITE_SEQUENCER_368              0x3170
+#define WM8995_WRITE_SEQUENCER_369              0x3171
+#define WM8995_WRITE_SEQUENCER_370              0x3172
+#define WM8995_WRITE_SEQUENCER_371              0x3173
+#define WM8995_WRITE_SEQUENCER_372              0x3174
+#define WM8995_WRITE_SEQUENCER_373              0x3175
+#define WM8995_WRITE_SEQUENCER_374              0x3176
+#define WM8995_WRITE_SEQUENCER_375              0x3177
+#define WM8995_WRITE_SEQUENCER_376              0x3178
+#define WM8995_WRITE_SEQUENCER_377              0x3179
+#define WM8995_WRITE_SEQUENCER_378              0x317A
+#define WM8995_WRITE_SEQUENCER_379              0x317B
+#define WM8995_WRITE_SEQUENCER_380              0x317C
+#define WM8995_WRITE_SEQUENCER_381              0x317D
+#define WM8995_WRITE_SEQUENCER_382              0x317E
+#define WM8995_WRITE_SEQUENCER_383              0x317F
+#define WM8995_WRITE_SEQUENCER_384              0x3180
+#define WM8995_WRITE_SEQUENCER_385              0x3181
+#define WM8995_WRITE_SEQUENCER_386              0x3182
+#define WM8995_WRITE_SEQUENCER_387              0x3183
+#define WM8995_WRITE_SEQUENCER_388              0x3184
+#define WM8995_WRITE_SEQUENCER_389              0x3185
+#define WM8995_WRITE_SEQUENCER_390              0x3186
+#define WM8995_WRITE_SEQUENCER_391              0x3187
+#define WM8995_WRITE_SEQUENCER_392              0x3188
+#define WM8995_WRITE_SEQUENCER_393              0x3189
+#define WM8995_WRITE_SEQUENCER_394              0x318A
+#define WM8995_WRITE_SEQUENCER_395              0x318B
+#define WM8995_WRITE_SEQUENCER_396              0x318C
+#define WM8995_WRITE_SEQUENCER_397              0x318D
+#define WM8995_WRITE_SEQUENCER_398              0x318E
+#define WM8995_WRITE_SEQUENCER_399              0x318F
+#define WM8995_WRITE_SEQUENCER_400              0x3190
+#define WM8995_WRITE_SEQUENCER_401              0x3191
+#define WM8995_WRITE_SEQUENCER_402              0x3192
+#define WM8995_WRITE_SEQUENCER_403              0x3193
+#define WM8995_WRITE_SEQUENCER_404              0x3194
+#define WM8995_WRITE_SEQUENCER_405              0x3195
+#define WM8995_WRITE_SEQUENCER_406              0x3196
+#define WM8995_WRITE_SEQUENCER_407              0x3197
+#define WM8995_WRITE_SEQUENCER_408              0x3198
+#define WM8995_WRITE_SEQUENCER_409              0x3199
+#define WM8995_WRITE_SEQUENCER_410              0x319A
+#define WM8995_WRITE_SEQUENCER_411              0x319B
+#define WM8995_WRITE_SEQUENCER_412              0x319C
+#define WM8995_WRITE_SEQUENCER_413              0x319D
+#define WM8995_WRITE_SEQUENCER_414              0x319E
+#define WM8995_WRITE_SEQUENCER_415              0x319F
+#define WM8995_WRITE_SEQUENCER_416              0x31A0
+#define WM8995_WRITE_SEQUENCER_417              0x31A1
+#define WM8995_WRITE_SEQUENCER_418              0x31A2
+#define WM8995_WRITE_SEQUENCER_419              0x31A3
+#define WM8995_WRITE_SEQUENCER_420              0x31A4
+#define WM8995_WRITE_SEQUENCER_421              0x31A5
+#define WM8995_WRITE_SEQUENCER_422              0x31A6
+#define WM8995_WRITE_SEQUENCER_423              0x31A7
+#define WM8995_WRITE_SEQUENCER_424              0x31A8
+#define WM8995_WRITE_SEQUENCER_425              0x31A9
+#define WM8995_WRITE_SEQUENCER_426              0x31AA
+#define WM8995_WRITE_SEQUENCER_427              0x31AB
+#define WM8995_WRITE_SEQUENCER_428              0x31AC
+#define WM8995_WRITE_SEQUENCER_429              0x31AD
+#define WM8995_WRITE_SEQUENCER_430              0x31AE
+#define WM8995_WRITE_SEQUENCER_431              0x31AF
+#define WM8995_WRITE_SEQUENCER_432              0x31B0
+#define WM8995_WRITE_SEQUENCER_433              0x31B1
+#define WM8995_WRITE_SEQUENCER_434              0x31B2
+#define WM8995_WRITE_SEQUENCER_435              0x31B3
+#define WM8995_WRITE_SEQUENCER_436              0x31B4
+#define WM8995_WRITE_SEQUENCER_437              0x31B5
+#define WM8995_WRITE_SEQUENCER_438              0x31B6
+#define WM8995_WRITE_SEQUENCER_439              0x31B7
+#define WM8995_WRITE_SEQUENCER_440              0x31B8
+#define WM8995_WRITE_SEQUENCER_441              0x31B9
+#define WM8995_WRITE_SEQUENCER_442              0x31BA
+#define WM8995_WRITE_SEQUENCER_443              0x31BB
+#define WM8995_WRITE_SEQUENCER_444              0x31BC
+#define WM8995_WRITE_SEQUENCER_445              0x31BD
+#define WM8995_WRITE_SEQUENCER_446              0x31BE
+#define WM8995_WRITE_SEQUENCER_447              0x31BF
+#define WM8995_WRITE_SEQUENCER_448              0x31C0
+#define WM8995_WRITE_SEQUENCER_449              0x31C1
+#define WM8995_WRITE_SEQUENCER_450              0x31C2
+#define WM8995_WRITE_SEQUENCER_451              0x31C3
+#define WM8995_WRITE_SEQUENCER_452              0x31C4
+#define WM8995_WRITE_SEQUENCER_453              0x31C5
+#define WM8995_WRITE_SEQUENCER_454              0x31C6
+#define WM8995_WRITE_SEQUENCER_455              0x31C7
+#define WM8995_WRITE_SEQUENCER_456              0x31C8
+#define WM8995_WRITE_SEQUENCER_457              0x31C9
+#define WM8995_WRITE_SEQUENCER_458              0x31CA
+#define WM8995_WRITE_SEQUENCER_459              0x31CB
+#define WM8995_WRITE_SEQUENCER_460              0x31CC
+#define WM8995_WRITE_SEQUENCER_461              0x31CD
+#define WM8995_WRITE_SEQUENCER_462              0x31CE
+#define WM8995_WRITE_SEQUENCER_463              0x31CF
+#define WM8995_WRITE_SEQUENCER_464              0x31D0
+#define WM8995_WRITE_SEQUENCER_465              0x31D1
+#define WM8995_WRITE_SEQUENCER_466              0x31D2
+#define WM8995_WRITE_SEQUENCER_467              0x31D3
+#define WM8995_WRITE_SEQUENCER_468              0x31D4
+#define WM8995_WRITE_SEQUENCER_469              0x31D5
+#define WM8995_WRITE_SEQUENCER_470              0x31D6
+#define WM8995_WRITE_SEQUENCER_471              0x31D7
+#define WM8995_WRITE_SEQUENCER_472              0x31D8
+#define WM8995_WRITE_SEQUENCER_473              0x31D9
+#define WM8995_WRITE_SEQUENCER_474              0x31DA
+#define WM8995_WRITE_SEQUENCER_475              0x31DB
+#define WM8995_WRITE_SEQUENCER_476              0x31DC
+#define WM8995_WRITE_SEQUENCER_477              0x31DD
+#define WM8995_WRITE_SEQUENCER_478              0x31DE
+#define WM8995_WRITE_SEQUENCER_479              0x31DF
+#define WM8995_WRITE_SEQUENCER_480              0x31E0
+#define WM8995_WRITE_SEQUENCER_481              0x31E1
+#define WM8995_WRITE_SEQUENCER_482              0x31E2
+#define WM8995_WRITE_SEQUENCER_483              0x31E3
+#define WM8995_WRITE_SEQUENCER_484              0x31E4
+#define WM8995_WRITE_SEQUENCER_485              0x31E5
+#define WM8995_WRITE_SEQUENCER_486              0x31E6
+#define WM8995_WRITE_SEQUENCER_487              0x31E7
+#define WM8995_WRITE_SEQUENCER_488              0x31E8
+#define WM8995_WRITE_SEQUENCER_489              0x31E9
+#define WM8995_WRITE_SEQUENCER_490              0x31EA
+#define WM8995_WRITE_SEQUENCER_491              0x31EB
+#define WM8995_WRITE_SEQUENCER_492              0x31EC
+#define WM8995_WRITE_SEQUENCER_493              0x31ED
+#define WM8995_WRITE_SEQUENCER_494              0x31EE
+#define WM8995_WRITE_SEQUENCER_495              0x31EF
+#define WM8995_WRITE_SEQUENCER_496              0x31F0
+#define WM8995_WRITE_SEQUENCER_497              0x31F1
+#define WM8995_WRITE_SEQUENCER_498              0x31F2
+#define WM8995_WRITE_SEQUENCER_499              0x31F3
+#define WM8995_WRITE_SEQUENCER_500              0x31F4
+#define WM8995_WRITE_SEQUENCER_501              0x31F5
+#define WM8995_WRITE_SEQUENCER_502              0x31F6
+#define WM8995_WRITE_SEQUENCER_503              0x31F7
+#define WM8995_WRITE_SEQUENCER_504              0x31F8
+#define WM8995_WRITE_SEQUENCER_505              0x31F9
+#define WM8995_WRITE_SEQUENCER_506              0x31FA
+#define WM8995_WRITE_SEQUENCER_507              0x31FB
+#define WM8995_WRITE_SEQUENCER_508              0x31FC
+#define WM8995_WRITE_SEQUENCER_509              0x31FD
+#define WM8995_WRITE_SEQUENCER_510              0x31FE
+#define WM8995_WRITE_SEQUENCER_511              0x31FF
+
+#define WM8995_REGISTER_COUNT                   725
+#define WM8995_MAX_REGISTER                     0x31FF
+
+#define WM8995_MAX_CACHED_REGISTER		WM8995_MAX_REGISTER
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Software Reset
+ */
+#define WM8995_SW_RESET_MASK                    0xFFFF	/* SW_RESET - [15:0] */
+#define WM8995_SW_RESET_SHIFT                        0	/* SW_RESET - [15:0] */
+#define WM8995_SW_RESET_WIDTH                       16	/* SW_RESET - [15:0] */
+
+/*
+ * R1 (0x01) - Power Management (1)
+ */
+#define WM8995_MICB2_ENA                        0x0200	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_MASK                   0x0200	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_SHIFT                       9	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_WIDTH                       1	/* MICB2_ENA */
+#define WM8995_MICB1_ENA                        0x0100	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_MASK                   0x0100	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_SHIFT                       8	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_WIDTH                       1	/* MICB1_ENA */
+#define WM8995_HPOUT2L_ENA                      0x0080	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_MASK                 0x0080	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_SHIFT                     7	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_WIDTH                     1	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2R_ENA                      0x0040	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_MASK                 0x0040	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_SHIFT                     6	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_WIDTH                     1	/* HPOUT2R_ENA */
+#define WM8995_HPOUT1L_ENA                      0x0020	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_MASK                 0x0020	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_SHIFT                     5	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_WIDTH                     1	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1R_ENA                      0x0010	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_MASK                 0x0010	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_SHIFT                     4	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_WIDTH                     1	/* HPOUT1R_ENA */
+#define WM8995_BG_ENA                           0x0001	/* BG_ENA */
+#define WM8995_BG_ENA_MASK                      0x0001	/* BG_ENA */
+#define WM8995_BG_ENA_SHIFT                          0	/* BG_ENA */
+#define WM8995_BG_ENA_WIDTH                          1	/* BG_ENA */
+
+/*
+ * R2 (0x02) - Power Management (2)
+ */
+#define WM8995_OPCLK_ENA                        0x0800	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_MASK                   0x0800	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_SHIFT                      11	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_WIDTH                       1	/* OPCLK_ENA */
+#define WM8995_IN1L_ENA                         0x0020	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_MASK                    0x0020	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_SHIFT                        5	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_WIDTH                        1	/* IN1L_ENA */
+#define WM8995_IN1R_ENA                         0x0010	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_MASK                    0x0010	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_SHIFT                        4	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_WIDTH                        1	/* IN1R_ENA */
+#define WM8995_LDO2_ENA                         0x0002	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_MASK                    0x0002	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_SHIFT                        1	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_WIDTH                        1	/* LDO2_ENA */
+
+/*
+ * R3 (0x03) - Power Management (3)
+ */
+#define WM8995_AIF2ADCL_ENA                     0x2000	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_MASK                0x2000	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_SHIFT                   13	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_WIDTH                    1	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCR_ENA                     0x1000	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_MASK                0x1000	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_SHIFT                   12	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_WIDTH                    1	/* AIF2ADCR_ENA */
+#define WM8995_AIF1ADC2L_ENA                    0x0800	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_MASK               0x0800	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_SHIFT                  11	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_WIDTH                   1	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2R_ENA                    0x0400	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_MASK               0x0400	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_SHIFT                  10	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_WIDTH                   1	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC1L_ENA                    0x0200	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_MASK               0x0200	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_SHIFT                   9	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_WIDTH                   1	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1R_ENA                    0x0100	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_MASK               0x0100	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_SHIFT                   8	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_WIDTH                   1	/* AIF1ADC1R_ENA */
+#define WM8995_DMIC3L_ENA                       0x0080	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_MASK                  0x0080	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_SHIFT                      7	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_WIDTH                      1	/* DMIC3L_ENA */
+#define WM8995_DMIC3R_ENA                       0x0040	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_MASK                  0x0040	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_SHIFT                      6	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_WIDTH                      1	/* DMIC3R_ENA */
+#define WM8995_DMIC2L_ENA                       0x0020	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_MASK                  0x0020	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_SHIFT                      5	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_WIDTH                      1	/* DMIC2L_ENA */
+#define WM8995_DMIC2R_ENA                       0x0010	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_MASK                  0x0010	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_SHIFT                      4	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_WIDTH                      1	/* DMIC2R_ENA */
+#define WM8995_DMIC1L_ENA                       0x0008	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_MASK                  0x0008	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_SHIFT                      3	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_WIDTH                      1	/* DMIC1L_ENA */
+#define WM8995_DMIC1R_ENA                       0x0004	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_MASK                  0x0004	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_SHIFT                      2	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_WIDTH                      1	/* DMIC1R_ENA */
+#define WM8995_ADCL_ENA                         0x0002	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_MASK                    0x0002	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_SHIFT                        1	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_WIDTH                        1	/* ADCL_ENA */
+#define WM8995_ADCR_ENA                         0x0001	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_MASK                    0x0001	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_SHIFT                        0	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_WIDTH                        1	/* ADCR_ENA */
+
+/*
+ * R4 (0x04) - Power Management (4)
+ */
+#define WM8995_AIF2DACL_ENA                     0x2000	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_MASK                0x2000	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_SHIFT                   13	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_WIDTH                    1	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACR_ENA                     0x1000	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_MASK                0x1000	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_SHIFT                   12	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_WIDTH                    1	/* AIF2DACR_ENA */
+#define WM8995_AIF1DAC2L_ENA                    0x0800	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_MASK               0x0800	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_SHIFT                  11	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_WIDTH                   1	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2R_ENA                    0x0400	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_MASK               0x0400	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_SHIFT                  10	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_WIDTH                   1	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC1L_ENA                    0x0200	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_MASK               0x0200	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_SHIFT                   9	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_WIDTH                   1	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1R_ENA                    0x0100	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_MASK               0x0100	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_SHIFT                   8	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_WIDTH                   1	/* AIF1DAC1R_ENA */
+#define WM8995_DAC2L_ENA                        0x0008	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_MASK                   0x0008	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_SHIFT                       3	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_WIDTH                       1	/* DAC2L_ENA */
+#define WM8995_DAC2R_ENA                        0x0004	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_MASK                   0x0004	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_SHIFT                       2	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_WIDTH                       1	/* DAC2R_ENA */
+#define WM8995_DAC1L_ENA                        0x0002	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_MASK                   0x0002	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_SHIFT                       1	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_WIDTH                       1	/* DAC1L_ENA */
+#define WM8995_DAC1R_ENA                        0x0001	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_MASK                   0x0001	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_SHIFT                       0	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_WIDTH                       1	/* DAC1R_ENA */
+
+/*
+ * R5 (0x05) - Power Management (5)
+ */
+#define WM8995_DMIC_SRC2_MASK                   0x0300	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC2_SHIFT                       8	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC2_WIDTH                       2	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC1_MASK                   0x00C0	/* DMIC_SRC1 - [7:6] */
+#define WM8995_DMIC_SRC1_SHIFT                       6	/* DMIC_SRC1 - [7:6] */
+#define WM8995_DMIC_SRC1_WIDTH                       2	/* DMIC_SRC1 - [7:6] */
+#define WM8995_AIF3_TRI                         0x0020	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_MASK                    0x0020	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_SHIFT                        5	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_WIDTH                        1	/* AIF3_TRI */
+#define WM8995_AIF3_ADCDAT_SRC_MASK             0x0018	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF3_ADCDAT_SRC_SHIFT                 3	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF3_ADCDAT_SRC_WIDTH                 2	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF2_ADCDAT_SRC                  0x0004	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_MASK             0x0004	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_SHIFT                 2	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_WIDTH                 1	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC                  0x0002	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_MASK             0x0002	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_SHIFT                 1	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_WIDTH                 1	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC                  0x0001	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_MASK             0x0001	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_SHIFT                 0	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_WIDTH                 1	/* AIF1_DACDAT_SRC */
+
+/*
+ * R16 (0x10) - Left Line Input 1 Volume
+ */
+#define WM8995_IN1_VU                           0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_MASK                      0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_SHIFT                          7	/* IN1_VU */
+#define WM8995_IN1_VU_WIDTH                          1	/* IN1_VU */
+#define WM8995_IN1L_ZC                          0x0020	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_MASK                     0x0020	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_SHIFT                         5	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_WIDTH                         1	/* IN1L_ZC */
+#define WM8995_IN1L_VOL_MASK                    0x001F	/* IN1L_VOL - [4:0] */
+#define WM8995_IN1L_VOL_SHIFT                        0	/* IN1L_VOL - [4:0] */
+#define WM8995_IN1L_VOL_WIDTH                        5	/* IN1L_VOL - [4:0] */
+
+/*
+ * R17 (0x11) - Right Line Input 1 Volume
+ */
+#define WM8995_IN1_VU                           0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_MASK                      0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_SHIFT                          7	/* IN1_VU */
+#define WM8995_IN1_VU_WIDTH                          1	/* IN1_VU */
+#define WM8995_IN1R_ZC                          0x0020	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_MASK                     0x0020	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_SHIFT                         5	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_WIDTH                         1	/* IN1R_ZC */
+#define WM8995_IN1R_VOL_MASK                    0x001F	/* IN1R_VOL - [4:0] */
+#define WM8995_IN1R_VOL_SHIFT                        0	/* IN1R_VOL - [4:0] */
+#define WM8995_IN1R_VOL_WIDTH                        5	/* IN1R_VOL - [4:0] */
+
+/*
+ * R18 (0x12) - Left Line Input Control
+ */
+#define WM8995_IN1L_BOOST_MASK                  0x0030	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_BOOST_SHIFT                      4	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_BOOST_WIDTH                      2	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_MODE_MASK                   0x000C	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1L_MODE_SHIFT                       2	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1L_MODE_WIDTH                       2	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1R_MODE_MASK                   0x0003	/* IN1R_MODE - [1:0] */
+#define WM8995_IN1R_MODE_SHIFT                       0	/* IN1R_MODE - [1:0] */
+#define WM8995_IN1R_MODE_WIDTH                       2	/* IN1R_MODE - [1:0] */
+
+/*
+ * R24 (0x18) - DAC1 Left Volume
+ */
+#define WM8995_DAC1L_MUTE                       0x0200	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_MASK                  0x0200	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_SHIFT                      9	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_WIDTH                      1	/* DAC1L_MUTE */
+#define WM8995_DAC1_VU                          0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_MASK                     0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_SHIFT                         8	/* DAC1_VU */
+#define WM8995_DAC1_VU_WIDTH                         1	/* DAC1_VU */
+#define WM8995_DAC1L_VOL_MASK                   0x00FF	/* DAC1L_VOL - [7:0] */
+#define WM8995_DAC1L_VOL_SHIFT                       0	/* DAC1L_VOL - [7:0] */
+#define WM8995_DAC1L_VOL_WIDTH                       8	/* DAC1L_VOL - [7:0] */
+
+/*
+ * R25 (0x19) - DAC1 Right Volume
+ */
+#define WM8995_DAC1R_MUTE                       0x0200	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_MASK                  0x0200	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_SHIFT                      9	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_WIDTH                      1	/* DAC1R_MUTE */
+#define WM8995_DAC1_VU                          0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_MASK                     0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_SHIFT                         8	/* DAC1_VU */
+#define WM8995_DAC1_VU_WIDTH                         1	/* DAC1_VU */
+#define WM8995_DAC1R_VOL_MASK                   0x00FF	/* DAC1R_VOL - [7:0] */
+#define WM8995_DAC1R_VOL_SHIFT                       0	/* DAC1R_VOL - [7:0] */
+#define WM8995_DAC1R_VOL_WIDTH                       8	/* DAC1R_VOL - [7:0] */
+
+/*
+ * R26 (0x1A) - DAC2 Left Volume
+ */
+#define WM8995_DAC2L_MUTE                       0x0200	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_MASK                  0x0200	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_SHIFT                      9	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_WIDTH                      1	/* DAC2L_MUTE */
+#define WM8995_DAC2_VU                          0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_MASK                     0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_SHIFT                         8	/* DAC2_VU */
+#define WM8995_DAC2_VU_WIDTH                         1	/* DAC2_VU */
+#define WM8995_DAC2L_VOL_MASK                   0x00FF	/* DAC2L_VOL - [7:0] */
+#define WM8995_DAC2L_VOL_SHIFT                       0	/* DAC2L_VOL - [7:0] */
+#define WM8995_DAC2L_VOL_WIDTH                       8	/* DAC2L_VOL - [7:0] */
+
+/*
+ * R27 (0x1B) - DAC2 Right Volume
+ */
+#define WM8995_DAC2R_MUTE                       0x0200	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_MASK                  0x0200	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_SHIFT                      9	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_WIDTH                      1	/* DAC2R_MUTE */
+#define WM8995_DAC2_VU                          0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_MASK                     0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_SHIFT                         8	/* DAC2_VU */
+#define WM8995_DAC2_VU_WIDTH                         1	/* DAC2_VU */
+#define WM8995_DAC2R_VOL_MASK                   0x00FF	/* DAC2R_VOL - [7:0] */
+#define WM8995_DAC2R_VOL_SHIFT                       0	/* DAC2R_VOL - [7:0] */
+#define WM8995_DAC2R_VOL_WIDTH                       8	/* DAC2R_VOL - [7:0] */
+
+/*
+ * R28 (0x1C) - Output Volume ZC (1)
+ */
+#define WM8995_HPOUT2L_ZC                       0x0008	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_MASK                  0x0008	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_SHIFT                      3	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_WIDTH                      1	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2R_ZC                       0x0004	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_MASK                  0x0004	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_SHIFT                      2	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_WIDTH                      1	/* HPOUT2R_ZC */
+#define WM8995_HPOUT1L_ZC                       0x0002	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_MASK                  0x0002	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_SHIFT                      1	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_WIDTH                      1	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1R_ZC                       0x0001	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_MASK                  0x0001	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_SHIFT                      0	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_WIDTH                      1	/* HPOUT1R_ZC */
+
+/*
+ * R32 (0x20) - MICBIAS (1)
+ */
+#define WM8995_MICB1_MODE                       0x0008	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_MASK                  0x0008	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_SHIFT                      3	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_WIDTH                      1	/* MICB1_MODE */
+#define WM8995_MICB1_LVL_MASK                   0x0006	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_LVL_SHIFT                       1	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_LVL_WIDTH                       2	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_DISCH                      0x0001	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_MASK                 0x0001	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_SHIFT                     0	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_WIDTH                     1	/* MICB1_DISCH */
+
+/*
+ * R33 (0x21) - MICBIAS (2)
+ */
+#define WM8995_MICB2_MODE                       0x0008	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_MASK                  0x0008	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_SHIFT                      3	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_WIDTH                      1	/* MICB2_MODE */
+#define WM8995_MICB2_LVL_MASK                   0x0006	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_LVL_SHIFT                       1	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_LVL_WIDTH                       2	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_DISCH                      0x0001	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_MASK                 0x0001	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_SHIFT                     0	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_WIDTH                     1	/* MICB2_DISCH */
+
+/*
+ * R40 (0x28) - LDO 1
+ */
+#define WM8995_LDO1_MODE                        0x0020	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_MASK                   0x0020	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_SHIFT                       5	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_WIDTH                       1	/* LDO1_MODE */
+#define WM8995_LDO1_VSEL_MASK                   0x0006	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_VSEL_SHIFT                       1	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_VSEL_WIDTH                       2	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_DISCH                       0x0001	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_MASK                  0x0001	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_SHIFT                      0	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_WIDTH                      1	/* LDO1_DISCH */
+
+/*
+ * R41 (0x29) - LDO 2
+ */
+#define WM8995_LDO2_MODE                        0x0020	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_MASK                   0x0020	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_SHIFT                       5	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_WIDTH                       1	/* LDO2_MODE */
+#define WM8995_LDO2_VSEL_MASK                   0x001E	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_VSEL_SHIFT                       1	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_VSEL_WIDTH                       4	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_DISCH                       0x0001	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_MASK                  0x0001	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_SHIFT                      0	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_WIDTH                      1	/* LDO2_DISCH */
+
+/*
+ * R48 (0x30) - Accessory Detect Mode1
+ */
+#define WM8995_JD_MODE_MASK                     0x0003	/* JD_MODE - [1:0] */
+#define WM8995_JD_MODE_SHIFT                         0	/* JD_MODE - [1:0] */
+#define WM8995_JD_MODE_WIDTH                         2	/* JD_MODE - [1:0] */
+
+/*
+ * R49 (0x31) - Accessory Detect Mode2
+ */
+#define WM8995_VID_ENA                          0x0001	/* VID_ENA */
+#define WM8995_VID_ENA_MASK                     0x0001	/* VID_ENA */
+#define WM8995_VID_ENA_SHIFT                         0	/* VID_ENA */
+#define WM8995_VID_ENA_WIDTH                         1	/* VID_ENA */
+
+/*
+ * R52 (0x34) - Headphone Detect1
+ */
+#define WM8995_HP_RAMPRATE                      0x0002	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_MASK                 0x0002	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_SHIFT                     1	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_WIDTH                     1	/* HP_RAMPRATE */
+#define WM8995_HP_POLL                          0x0001	/* HP_POLL */
+#define WM8995_HP_POLL_MASK                     0x0001	/* HP_POLL */
+#define WM8995_HP_POLL_SHIFT                         0	/* HP_POLL */
+#define WM8995_HP_POLL_WIDTH                         1	/* HP_POLL */
+
+/*
+ * R53 (0x35) - Headphone Detect2
+ */
+#define WM8995_HP_DONE                          0x0080	/* HP_DONE */
+#define WM8995_HP_DONE_MASK                     0x0080	/* HP_DONE */
+#define WM8995_HP_DONE_SHIFT                         7	/* HP_DONE */
+#define WM8995_HP_DONE_WIDTH                         1	/* HP_DONE */
+#define WM8995_HP_LVL_MASK                      0x007F	/* HP_LVL - [6:0] */
+#define WM8995_HP_LVL_SHIFT                          0	/* HP_LVL - [6:0] */
+#define WM8995_HP_LVL_WIDTH                          7	/* HP_LVL - [6:0] */
+
+/*
+ * R56 (0x38) - Mic Detect (1)
+ */
+#define WM8995_MICD_RATE_MASK                   0x7800	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_RATE_SHIFT                      11	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_RATE_WIDTH                       4	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_LVL_SEL_MASK                0x01F8	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_LVL_SEL_SHIFT                    3	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_LVL_SEL_WIDTH                    6	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_DBTIME                      0x0002	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_MASK                 0x0002	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_SHIFT                     1	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_WIDTH                     1	/* MICD_DBTIME */
+#define WM8995_MICD_ENA                         0x0001	/* MICD_ENA */
+#define WM8995_MICD_ENA_MASK                    0x0001	/* MICD_ENA */
+#define WM8995_MICD_ENA_SHIFT                        0	/* MICD_ENA */
+#define WM8995_MICD_ENA_WIDTH                        1	/* MICD_ENA */
+
+/*
+ * R57 (0x39) - Mic Detect (2)
+ */
+#define WM8995_MICD_LVL_MASK                    0x01FC	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_LVL_SHIFT                        2	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_LVL_WIDTH                        7	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_VALID                       0x0002	/* MICD_VALID */
+#define WM8995_MICD_VALID_MASK                  0x0002	/* MICD_VALID */
+#define WM8995_MICD_VALID_SHIFT                      1	/* MICD_VALID */
+#define WM8995_MICD_VALID_WIDTH                      1	/* MICD_VALID */
+#define WM8995_MICD_STS                         0x0001	/* MICD_STS */
+#define WM8995_MICD_STS_MASK                    0x0001	/* MICD_STS */
+#define WM8995_MICD_STS_SHIFT                        0	/* MICD_STS */
+#define WM8995_MICD_STS_WIDTH                        1	/* MICD_STS */
+
+/*
+ * R64 (0x40) - Charge Pump (1)
+ */
+#define WM8995_CP_ENA                           0x8000	/* CP_ENA */
+#define WM8995_CP_ENA_MASK                      0x8000	/* CP_ENA */
+#define WM8995_CP_ENA_SHIFT                         15	/* CP_ENA */
+#define WM8995_CP_ENA_WIDTH                          1	/* CP_ENA */
+
+/*
+ * R69 (0x45) - Class W (1)
+ */
+#define WM8995_CP_DYN_SRC_SEL_MASK              0x0300	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_SRC_SEL_SHIFT                  8	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_SRC_SEL_WIDTH                  2	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_PWR                       0x0001	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_MASK                  0x0001	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_SHIFT                      0	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_WIDTH                      1	/* CP_DYN_PWR */
+
+/*
+ * R80 (0x50) - DC Servo (1)
+ */
+#define WM8995_DCS_ENA_CHAN_3                   0x0008	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_MASK              0x0008	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_SHIFT                  3	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_WIDTH                  1	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_2                   0x0004	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_MASK              0x0004	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_SHIFT                  2	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_WIDTH                  1	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_1                   0x0002	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_MASK              0x0002	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_SHIFT                  1	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_WIDTH                  1	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_0                   0x0001	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_MASK              0x0001	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_SHIFT                  0	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_WIDTH                  1	/* DCS_ENA_CHAN_0 */
+
+/*
+ * R81 (0x51) - DC Servo (2)
+ */
+#define WM8995_DCS_TRIG_SINGLE_3                0x8000	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_MASK           0x8000	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_SHIFT              15	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_WIDTH               1	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_2                0x4000	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_MASK           0x4000	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_SHIFT              14	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_WIDTH               1	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_1                0x2000	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_MASK           0x2000	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_SHIFT              13	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_WIDTH               1	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_0                0x1000	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_MASK           0x1000	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_SHIFT              12	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_WIDTH               1	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SERIES_3                0x0800	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_MASK           0x0800	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_SHIFT              11	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_WIDTH               1	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_2                0x0400	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_MASK           0x0400	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_SHIFT              10	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_WIDTH               1	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_1                0x0200	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_MASK           0x0200	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_SHIFT               9	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_WIDTH               1	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_0                0x0100	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_MASK           0x0100	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_SHIFT               8	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_WIDTH               1	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_STARTUP_3               0x0080	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_MASK          0x0080	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_SHIFT              7	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_WIDTH              1	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_2               0x0040	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_MASK          0x0040	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_SHIFT              6	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_WIDTH              1	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_1               0x0020	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_MASK          0x0020	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_SHIFT              5	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_WIDTH              1	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_0               0x0010	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_MASK          0x0010	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_SHIFT              4	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_WIDTH              1	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_DAC_WR_3                0x0008	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_MASK           0x0008	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_SHIFT               3	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_WIDTH               1	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_2                0x0004	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_MASK           0x0004	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_SHIFT               2	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_WIDTH               1	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_1                0x0002	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_MASK           0x0002	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_SHIFT               1	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_WIDTH               1	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_0                0x0001	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_MASK           0x0001	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_SHIFT               0	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_WIDTH               1	/* DCS_TRIG_DAC_WR_0 */
+
+/*
+ * R82 (0x52) - DC Servo (3)
+ */
+#define WM8995_DCS_TIMER_PERIOD_23_MASK         0x0F00	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_23_SHIFT             8	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_23_WIDTH             4	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_01_MASK         0x000F	/* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8995_DCS_TIMER_PERIOD_01_SHIFT             0	/* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8995_DCS_TIMER_PERIOD_01_WIDTH             4	/* DCS_TIMER_PERIOD_01 - [3:0] */
+
+/*
+ * R84 (0x54) - DC Servo (5)
+ */
+#define WM8995_DCS_SERIES_NO_23_MASK            0x7F00	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_23_SHIFT                8	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_23_WIDTH                7	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_01_MASK            0x007F	/* DCS_SERIES_NO_01 - [6:0] */
+#define WM8995_DCS_SERIES_NO_01_SHIFT                0	/* DCS_SERIES_NO_01 - [6:0] */
+#define WM8995_DCS_SERIES_NO_01_WIDTH                7	/* DCS_SERIES_NO_01 - [6:0] */
+
+/*
+ * R85 (0x55) - DC Servo (6)
+ */
+#define WM8995_DCS_DAC_WR_VAL_3_MASK            0xFF00	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_3_SHIFT                8	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_3_WIDTH                8	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_2_MASK            0x00FF	/* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_2_SHIFT                0	/* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_2_WIDTH                8	/* DCS_DAC_WR_VAL_2 - [7:0] */
+
+/*
+ * R86 (0x56) - DC Servo (7)
+ */
+#define WM8995_DCS_DAC_WR_VAL_1_MASK            0xFF00	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_1_SHIFT                8	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_1_WIDTH                8	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_0_MASK            0x00FF	/* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_0_SHIFT                0	/* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_0_WIDTH                8	/* DCS_DAC_WR_VAL_0 - [7:0] */
+
+/*
+ * R87 (0x57) - DC Servo Readback 0
+ */
+#define WM8995_DCS_CAL_COMPLETE_MASK            0x0F00	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_CAL_COMPLETE_SHIFT                8	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_CAL_COMPLETE_WIDTH                4	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_DAC_WR_COMPLETE_MASK         0x00F0	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_DAC_WR_COMPLETE_SHIFT             4	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_DAC_WR_COMPLETE_WIDTH             4	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_STARTUP_COMPLETE_MASK        0x000F	/* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8995_DCS_STARTUP_COMPLETE_SHIFT            0	/* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8995_DCS_STARTUP_COMPLETE_WIDTH            4	/* DCS_STARTUP_COMPLETE - [3:0] */
+
+/*
+ * R96 (0x60) - Analogue HP (1)
+ */
+#define WM8995_HPOUT1L_RMV_SHORT                0x0080	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_MASK           0x0080	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_SHIFT               7	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_WIDTH               1	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_OUTP                     0x0040	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_MASK                0x0040	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_SHIFT                    6	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_WIDTH                    1	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_DLY                      0x0020	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_MASK                 0x0020	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_SHIFT                     5	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_WIDTH                     1	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1R_RMV_SHORT                0x0008	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_MASK           0x0008	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_SHIFT               3	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_WIDTH               1	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_OUTP                     0x0004	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_MASK                0x0004	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_SHIFT                    2	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_WIDTH                    1	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_DLY                      0x0002	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_MASK                 0x0002	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_SHIFT                     1	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_WIDTH                     1	/* HPOUT1R_DLY */
+
+/*
+ * R97 (0x61) - Analogue HP (2)
+ */
+#define WM8995_HPOUT2L_RMV_SHORT                0x0080	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_MASK           0x0080	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_SHIFT               7	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_WIDTH               1	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_OUTP                     0x0040	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_MASK                0x0040	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_SHIFT                    6	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_WIDTH                    1	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_DLY                      0x0020	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_MASK                 0x0020	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_SHIFT                     5	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_WIDTH                     1	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2R_RMV_SHORT                0x0008	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_MASK           0x0008	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_SHIFT               3	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_WIDTH               1	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_OUTP                     0x0004	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_MASK                0x0004	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_SHIFT                    2	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_WIDTH                    1	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_DLY                      0x0002	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_MASK                 0x0002	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_SHIFT                     1	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_WIDTH                     1	/* HPOUT2R_DLY */
+
+/*
+ * R256 (0x100) - Chip Revision
+ */
+#define WM8995_CHIP_REV_MASK                    0x000F	/* CHIP_REV - [3:0] */
+#define WM8995_CHIP_REV_SHIFT                        0	/* CHIP_REV - [3:0] */
+#define WM8995_CHIP_REV_WIDTH                        4	/* CHIP_REV - [3:0] */
+
+/*
+ * R257 (0x101) - Control Interface (1)
+ */
+#define WM8995_REG_SYNC                         0x8000	/* REG_SYNC */
+#define WM8995_REG_SYNC_MASK                    0x8000	/* REG_SYNC */
+#define WM8995_REG_SYNC_SHIFT                       15	/* REG_SYNC */
+#define WM8995_REG_SYNC_WIDTH                        1	/* REG_SYNC */
+#define WM8995_SPI_CONTRD                       0x0040	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_MASK                  0x0040	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_SHIFT                      6	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_WIDTH                      1	/* SPI_CONTRD */
+#define WM8995_SPI_4WIRE                        0x0020	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_MASK                   0x0020	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_SHIFT                       5	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_WIDTH                       1	/* SPI_4WIRE */
+#define WM8995_SPI_CFG                          0x0010	/* SPI_CFG */
+#define WM8995_SPI_CFG_MASK                     0x0010	/* SPI_CFG */
+#define WM8995_SPI_CFG_SHIFT                         4	/* SPI_CFG */
+#define WM8995_SPI_CFG_WIDTH                         1	/* SPI_CFG */
+#define WM8995_AUTO_INC                         0x0004	/* AUTO_INC */
+#define WM8995_AUTO_INC_MASK                    0x0004	/* AUTO_INC */
+#define WM8995_AUTO_INC_SHIFT                        2	/* AUTO_INC */
+#define WM8995_AUTO_INC_WIDTH                        1	/* AUTO_INC */
+
+/*
+ * R258 (0x102) - Control Interface (2)
+ */
+#define WM8995_CTRL_IF_SRC                      0x0001	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_MASK                 0x0001	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_SHIFT                     0	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_WIDTH                     1	/* CTRL_IF_SRC */
+
+/*
+ * R272 (0x110) - Write Sequencer Ctrl (1)
+ */
+#define WM8995_WSEQ_ENA                         0x8000	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_MASK                    0x8000	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_SHIFT                       15	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_WIDTH                        1	/* WSEQ_ENA */
+#define WM8995_WSEQ_ABORT                       0x0200	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_MASK                  0x0200	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_SHIFT                      9	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_WIDTH                      1	/* WSEQ_ABORT */
+#define WM8995_WSEQ_START                       0x0100	/* WSEQ_START */
+#define WM8995_WSEQ_START_MASK                  0x0100	/* WSEQ_START */
+#define WM8995_WSEQ_START_SHIFT                      8	/* WSEQ_START */
+#define WM8995_WSEQ_START_WIDTH                      1	/* WSEQ_START */
+#define WM8995_WSEQ_START_INDEX_MASK            0x007F	/* WSEQ_START_INDEX - [6:0] */
+#define WM8995_WSEQ_START_INDEX_SHIFT                0	/* WSEQ_START_INDEX - [6:0] */
+#define WM8995_WSEQ_START_INDEX_WIDTH                7	/* WSEQ_START_INDEX - [6:0] */
+
+/*
+ * R273 (0x111) - Write Sequencer Ctrl (2)
+ */
+#define WM8995_WSEQ_BUSY                        0x0100	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_MASK                   0x0100	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_SHIFT                       8	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_WIDTH                       1	/* WSEQ_BUSY */
+#define WM8995_WSEQ_CURRENT_INDEX_MASK          0x007F	/* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8995_WSEQ_CURRENT_INDEX_SHIFT              0	/* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8995_WSEQ_CURRENT_INDEX_WIDTH              7	/* WSEQ_CURRENT_INDEX - [6:0] */
+
+/*
+ * R512 (0x200) - AIF1 Clocking (1)
+ */
+#define WM8995_AIF1CLK_SRC_MASK                 0x0018	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_SRC_SHIFT                     3	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_SRC_WIDTH                     2	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_INV                      0x0004	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_MASK                 0x0004	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_SHIFT                     2	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_WIDTH                     1	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_DIV                      0x0002	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_MASK                 0x0002	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_SHIFT                     1	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_WIDTH                     1	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_ENA                      0x0001	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_MASK                 0x0001	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_SHIFT                     0	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_WIDTH                     1	/* AIF1CLK_ENA */
+
+/*
+ * R513 (0x201) - AIF1 Clocking (2)
+ */
+#define WM8995_AIF1DAC_DIV_MASK                 0x0038	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1DAC_DIV_SHIFT                     3	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1DAC_DIV_WIDTH                     3	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1ADC_DIV_MASK                 0x0007	/* AIF1ADC_DIV - [2:0] */
+#define WM8995_AIF1ADC_DIV_SHIFT                     0	/* AIF1ADC_DIV - [2:0] */
+#define WM8995_AIF1ADC_DIV_WIDTH                     3	/* AIF1ADC_DIV - [2:0] */
+
+/*
+ * R516 (0x204) - AIF2 Clocking (1)
+ */
+#define WM8995_AIF2CLK_SRC_MASK                 0x0018	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_SRC_SHIFT                     3	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_SRC_WIDTH                     2	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_INV                      0x0004	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_MASK                 0x0004	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_SHIFT                     2	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_WIDTH                     1	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_DIV                      0x0002	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_MASK                 0x0002	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_SHIFT                     1	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_WIDTH                     1	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_ENA                      0x0001	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_MASK                 0x0001	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_SHIFT                     0	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_WIDTH                     1	/* AIF2CLK_ENA */
+
+/*
+ * R517 (0x205) - AIF2 Clocking (2)
+ */
+#define WM8995_AIF2DAC_DIV_MASK                 0x0038	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2DAC_DIV_SHIFT                     3	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2DAC_DIV_WIDTH                     3	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2ADC_DIV_MASK                 0x0007	/* AIF2ADC_DIV - [2:0] */
+#define WM8995_AIF2ADC_DIV_SHIFT                     0	/* AIF2ADC_DIV - [2:0] */
+#define WM8995_AIF2ADC_DIV_WIDTH                     3	/* AIF2ADC_DIV - [2:0] */
+
+/*
+ * R520 (0x208) - Clocking (1)
+ */
+#define WM8995_LFCLK_ENA                        0x0020	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_MASK                   0x0020	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_SHIFT                       5	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_WIDTH                       1	/* LFCLK_ENA */
+#define WM8995_TOCLK_ENA                        0x0010	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_MASK                   0x0010	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_SHIFT                       4	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_WIDTH                       1	/* TOCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA                   0x0008	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_MASK              0x0008	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_SHIFT                  3	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_WIDTH                  1	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA                   0x0004	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_MASK              0x0004	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_SHIFT                  2	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_WIDTH                  1	/* AIF2DSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA                    0x0002	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_MASK               0x0002	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_SHIFT                   1	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_WIDTH                   1	/* SYSDSPCLK_ENA */
+#define WM8995_SYSCLK_SRC                       0x0001	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_MASK                  0x0001	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_SHIFT                      0	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_WIDTH                      1	/* SYSCLK_SRC */
+
+/*
+ * R521 (0x209) - Clocking (2)
+ */
+#define WM8995_TOCLK_DIV_MASK                   0x0700	/* TOCLK_DIV - [10:8] */
+#define WM8995_TOCLK_DIV_SHIFT                       8	/* TOCLK_DIV - [10:8] */
+#define WM8995_TOCLK_DIV_WIDTH                       3	/* TOCLK_DIV - [10:8] */
+#define WM8995_DBCLK_DIV_MASK                   0x00F0	/* DBCLK_DIV - [7:4] */
+#define WM8995_DBCLK_DIV_SHIFT                       4	/* DBCLK_DIV - [7:4] */
+#define WM8995_DBCLK_DIV_WIDTH                       4	/* DBCLK_DIV - [7:4] */
+#define WM8995_OPCLK_DIV_MASK                   0x0007	/* OPCLK_DIV - [2:0] */
+#define WM8995_OPCLK_DIV_SHIFT                       0	/* OPCLK_DIV - [2:0] */
+#define WM8995_OPCLK_DIV_WIDTH                       3	/* OPCLK_DIV - [2:0] */
+
+/*
+ * R528 (0x210) - AIF1 Rate
+ */
+#define WM8995_AIF1_SR_MASK                     0x00F0	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1_SR_SHIFT                         4	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1_SR_WIDTH                         4	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1CLK_RATE_MASK                0x000F	/* AIF1CLK_RATE - [3:0] */
+#define WM8995_AIF1CLK_RATE_SHIFT                    0	/* AIF1CLK_RATE - [3:0] */
+#define WM8995_AIF1CLK_RATE_WIDTH                    4	/* AIF1CLK_RATE - [3:0] */
+
+/*
+ * R529 (0x211) - AIF2 Rate
+ */
+#define WM8995_AIF2_SR_MASK                     0x00F0	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2_SR_SHIFT                         4	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2_SR_WIDTH                         4	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2CLK_RATE_MASK                0x000F	/* AIF2CLK_RATE - [3:0] */
+#define WM8995_AIF2CLK_RATE_SHIFT                    0	/* AIF2CLK_RATE - [3:0] */
+#define WM8995_AIF2CLK_RATE_WIDTH                    4	/* AIF2CLK_RATE - [3:0] */
+
+/*
+ * R530 (0x212) - Rate Status
+ */
+#define WM8995_SR_ERROR_MASK                    0x000F	/* SR_ERROR - [3:0] */
+#define WM8995_SR_ERROR_SHIFT                        0	/* SR_ERROR - [3:0] */
+#define WM8995_SR_ERROR_WIDTH                        4	/* SR_ERROR - [3:0] */
+
+/*
+ * R544 (0x220) - FLL1 Control (1)
+ */
+#define WM8995_FLL1_OSC_ENA                     0x0002	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_MASK                0x0002	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_SHIFT                    1	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_WIDTH                    1	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_ENA                         0x0001	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_MASK                    0x0001	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_SHIFT                        0	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_WIDTH                        1	/* FLL1_ENA */
+
+/*
+ * R545 (0x221) - FLL1 Control (2)
+ */
+#define WM8995_FLL1_OUTDIV_MASK                 0x3F00	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_OUTDIV_SHIFT                     8	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_OUTDIV_WIDTH                     6	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_CTRL_RATE_MASK              0x0070	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_CTRL_RATE_SHIFT                  4	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_CTRL_RATE_WIDTH                  3	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_FRATIO_MASK                 0x0007	/* FLL1_FRATIO - [2:0] */
+#define WM8995_FLL1_FRATIO_SHIFT                     0	/* FLL1_FRATIO - [2:0] */
+#define WM8995_FLL1_FRATIO_WIDTH                     3	/* FLL1_FRATIO - [2:0] */
+
+/*
+ * R546 (0x222) - FLL1 Control (3)
+ */
+#define WM8995_FLL1_K_MASK                      0xFFFF	/* FLL1_K - [15:0] */
+#define WM8995_FLL1_K_SHIFT                          0	/* FLL1_K - [15:0] */
+#define WM8995_FLL1_K_WIDTH                         16	/* FLL1_K - [15:0] */
+
+/*
+ * R547 (0x223) - FLL1 Control (4)
+ */
+#define WM8995_FLL1_N_MASK                      0x7FE0	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_N_SHIFT                          5	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_N_WIDTH                         10	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_LOOP_GAIN_MASK              0x000F	/* FLL1_LOOP_GAIN - [3:0] */
+#define WM8995_FLL1_LOOP_GAIN_SHIFT                  0	/* FLL1_LOOP_GAIN - [3:0] */
+#define WM8995_FLL1_LOOP_GAIN_WIDTH                  4	/* FLL1_LOOP_GAIN - [3:0] */
+
+/*
+ * R548 (0x224) - FLL1 Control (5)
+ */
+#define WM8995_FLL1_FRC_NCO_VAL_MASK            0x1F80	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO_VAL_SHIFT                7	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO_VAL_WIDTH                6	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO                     0x0040	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_MASK                0x0040	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_SHIFT                    6	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_WIDTH                    1	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_REFCLK_DIV_MASK             0x0018	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_DIV_SHIFT                 3	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_DIV_WIDTH                 2	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_SRC_MASK             0x0003	/* FLL1_REFCLK_SRC - [1:0] */
+#define WM8995_FLL1_REFCLK_SRC_SHIFT                 0	/* FLL1_REFCLK_SRC - [1:0] */
+#define WM8995_FLL1_REFCLK_SRC_WIDTH                 2	/* FLL1_REFCLK_SRC - [1:0] */
+
+/*
+ * R576 (0x240) - FLL2 Control (1)
+ */
+#define WM8995_FLL2_OSC_ENA                     0x0002	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_MASK                0x0002	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_SHIFT                    1	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_WIDTH                    1	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_ENA                         0x0001	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_MASK                    0x0001	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_SHIFT                        0	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_WIDTH                        1	/* FLL2_ENA */
+
+/*
+ * R577 (0x241) - FLL2 Control (2)
+ */
+#define WM8995_FLL2_OUTDIV_MASK                 0x3F00	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_OUTDIV_SHIFT                     8	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_OUTDIV_WIDTH                     6	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_CTRL_RATE_MASK              0x0070	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_CTRL_RATE_SHIFT                  4	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_CTRL_RATE_WIDTH                  3	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_FRATIO_MASK                 0x0007	/* FLL2_FRATIO - [2:0] */
+#define WM8995_FLL2_FRATIO_SHIFT                     0	/* FLL2_FRATIO - [2:0] */
+#define WM8995_FLL2_FRATIO_WIDTH                     3	/* FLL2_FRATIO - [2:0] */
+
+/*
+ * R578 (0x242) - FLL2 Control (3)
+ */
+#define WM8995_FLL2_K_MASK                      0xFFFF	/* FLL2_K - [15:0] */
+#define WM8995_FLL2_K_SHIFT                          0	/* FLL2_K - [15:0] */
+#define WM8995_FLL2_K_WIDTH                         16	/* FLL2_K - [15:0] */
+
+/*
+ * R579 (0x243) - FLL2 Control (4)
+ */
+#define WM8995_FLL2_N_MASK                      0x7FE0	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_N_SHIFT                          5	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_N_WIDTH                         10	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_LOOP_GAIN_MASK              0x000F	/* FLL2_LOOP_GAIN - [3:0] */
+#define WM8995_FLL2_LOOP_GAIN_SHIFT                  0	/* FLL2_LOOP_GAIN - [3:0] */
+#define WM8995_FLL2_LOOP_GAIN_WIDTH                  4	/* FLL2_LOOP_GAIN - [3:0] */
+
+/*
+ * R580 (0x244) - FLL2 Control (5)
+ */
+#define WM8995_FLL2_FRC_NCO_VAL_MASK            0x1F80	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO_VAL_SHIFT                7	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO_VAL_WIDTH                6	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO                     0x0040	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_MASK                0x0040	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_SHIFT                    6	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_WIDTH                    1	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_REFCLK_DIV_MASK             0x0018	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_DIV_SHIFT                 3	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_DIV_WIDTH                 2	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_SRC_MASK             0x0003	/* FLL2_REFCLK_SRC - [1:0] */
+#define WM8995_FLL2_REFCLK_SRC_SHIFT                 0	/* FLL2_REFCLK_SRC - [1:0] */
+#define WM8995_FLL2_REFCLK_SRC_WIDTH                 2	/* FLL2_REFCLK_SRC - [1:0] */
+
+/*
+ * R768 (0x300) - AIF1 Control (1)
+ */
+#define WM8995_AIF1ADCL_SRC                     0x8000	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_MASK                0x8000	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_SHIFT                   15	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_WIDTH                    1	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCR_SRC                     0x4000	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_MASK                0x4000	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_SHIFT                   14	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_WIDTH                    1	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADC_TDM                      0x2000	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_MASK                 0x2000	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_SHIFT                    13	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_WIDTH                     1	/* AIF1ADC_TDM */
+#define WM8995_AIF1_BCLK_INV                    0x0100	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_MASK               0x0100	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_SHIFT                   8	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_WIDTH                   1	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_LRCLK_INV                   0x0080	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_MASK              0x0080	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_SHIFT                  7	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_WIDTH                  1	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_WL_MASK                     0x0060	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_WL_SHIFT                         5	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_WL_WIDTH                         2	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_FMT_MASK                    0x0018	/* AIF1_FMT - [4:3] */
+#define WM8995_AIF1_FMT_SHIFT                        3	/* AIF1_FMT - [4:3] */
+#define WM8995_AIF1_FMT_WIDTH                        2	/* AIF1_FMT - [4:3] */
+
+/*
+ * R769 (0x301) - AIF1 Control (2)
+ */
+#define WM8995_AIF1DACL_SRC                     0x8000	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_MASK                0x8000	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_SHIFT                   15	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_WIDTH                    1	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACR_SRC                     0x4000	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_MASK                0x4000	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_SHIFT                   14	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_WIDTH                    1	/* AIF1DACR_SRC */
+#define WM8995_AIF1DAC_BOOST_MASK               0x0C00	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_BOOST_SHIFT                  10	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_BOOST_WIDTH                   2	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_COMP                     0x0010	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_MASK                0x0010	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_SHIFT                    4	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_WIDTH                    1	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMPMODE                 0x0008	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_MASK            0x0008	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_SHIFT                3	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_WIDTH                1	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1ADC_COMP                     0x0004	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_MASK                0x0004	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_SHIFT                    2	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_WIDTH                    1	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMPMODE                 0x0002	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_MASK            0x0002	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_SHIFT                1	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_WIDTH                1	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1_LOOPBACK                    0x0001	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_MASK               0x0001	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_SHIFT                   0	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_WIDTH                   1	/* AIF1_LOOPBACK */
+
+/*
+ * R770 (0x302) - AIF1 Master/Slave
+ */
+#define WM8995_AIF1_TRI                         0x8000	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_MASK                    0x8000	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_SHIFT                       15	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_WIDTH                        1	/* AIF1_TRI */
+#define WM8995_AIF1_MSTR                        0x4000	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_MASK                   0x4000	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_SHIFT                      14	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_WIDTH                       1	/* AIF1_MSTR */
+#define WM8995_AIF1_CLK_FRC                     0x2000	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_MASK                0x2000	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_SHIFT                   13	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_WIDTH                    1	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC                   0x1000	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_MASK              0x1000	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_SHIFT                 12	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_WIDTH                  1	/* AIF1_LRCLK_FRC */
+
+/*
+ * R771 (0x303) - AIF1 BCLK
+ */
+#define WM8995_AIF1_BCLK_DIV_MASK               0x00F0	/* AIF1_BCLK_DIV - [7:4] */
+#define WM8995_AIF1_BCLK_DIV_SHIFT                   4	/* AIF1_BCLK_DIV - [7:4] */
+#define WM8995_AIF1_BCLK_DIV_WIDTH                   4	/* AIF1_BCLK_DIV - [7:4] */
+
+/*
+ * R772 (0x304) - AIF1ADC LRCLK
+ */
+#define WM8995_AIF1ADC_LRCLK_DIR                0x0800	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_MASK           0x0800	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_SHIFT              11	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_WIDTH               1	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_RATE_MASK                0x07FF	/* AIF1ADC_RATE - [10:0] */
+#define WM8995_AIF1ADC_RATE_SHIFT                    0	/* AIF1ADC_RATE - [10:0] */
+#define WM8995_AIF1ADC_RATE_WIDTH                   11	/* AIF1ADC_RATE - [10:0] */
+
+/*
+ * R773 (0x305) - AIF1DAC LRCLK
+ */
+#define WM8995_AIF1DAC_LRCLK_DIR                0x0800	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_MASK           0x0800	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_SHIFT              11	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_WIDTH               1	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_RATE_MASK                0x07FF	/* AIF1DAC_RATE - [10:0] */
+#define WM8995_AIF1DAC_RATE_SHIFT                    0	/* AIF1DAC_RATE - [10:0] */
+#define WM8995_AIF1DAC_RATE_WIDTH                   11	/* AIF1DAC_RATE - [10:0] */
+
+/*
+ * R774 (0x306) - AIF1DAC Data
+ */
+#define WM8995_AIF1DACL_DAT_INV                 0x0002	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_MASK            0x0002	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_SHIFT                1	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_WIDTH                1	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV                 0x0001	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_MASK            0x0001	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_SHIFT                0	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_WIDTH                1	/* AIF1DACR_DAT_INV */
+
+/*
+ * R775 (0x307) - AIF1ADC Data
+ */
+#define WM8995_AIF1ADCL_DAT_INV                 0x0002	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_MASK            0x0002	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_SHIFT                1	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_WIDTH                1	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV                 0x0001	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_MASK            0x0001	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_SHIFT                0	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_WIDTH                1	/* AIF1ADCR_DAT_INV */
+
+/*
+ * R784 (0x310) - AIF2 Control (1)
+ */
+#define WM8995_AIF2ADCL_SRC                     0x8000	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_MASK                0x8000	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_SHIFT                   15	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_WIDTH                    1	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCR_SRC                     0x4000	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_MASK                0x4000	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_SHIFT                   14	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_WIDTH                    1	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADC_TDM                      0x2000	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_MASK                 0x2000	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_SHIFT                    13	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_WIDTH                     1	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_CHAN                 0x1000	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_MASK            0x1000	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_SHIFT               12	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_WIDTH                1	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2_BCLK_INV                    0x0100	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_MASK               0x0100	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_SHIFT                   8	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_WIDTH                   1	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_LRCLK_INV                   0x0080	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_MASK              0x0080	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_SHIFT                  7	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_WIDTH                  1	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_WL_MASK                     0x0060	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_WL_SHIFT                         5	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_WL_WIDTH                         2	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_FMT_MASK                    0x0018	/* AIF2_FMT - [4:3] */
+#define WM8995_AIF2_FMT_SHIFT                        3	/* AIF2_FMT - [4:3] */
+#define WM8995_AIF2_FMT_WIDTH                        2	/* AIF2_FMT - [4:3] */
+
+/*
+ * R785 (0x311) - AIF2 Control (2)
+ */
+#define WM8995_AIF2DACL_SRC                     0x8000	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_MASK                0x8000	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_SHIFT                   15	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_WIDTH                    1	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACR_SRC                     0x4000	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_MASK                0x4000	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_SHIFT                   14	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_WIDTH                    1	/* AIF2DACR_SRC */
+#define WM8995_AIF2DAC_TDM                      0x2000	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_MASK                 0x2000	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_SHIFT                    13	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_WIDTH                     1	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_CHAN                 0x1000	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_MASK            0x1000	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_SHIFT               12	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_WIDTH                1	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_BOOST_MASK               0x0C00	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_BOOST_SHIFT                  10	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_BOOST_WIDTH                   2	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_COMP                     0x0010	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_MASK                0x0010	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_SHIFT                    4	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_WIDTH                    1	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMPMODE                 0x0008	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_MASK            0x0008	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_SHIFT                3	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_WIDTH                1	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2ADC_COMP                     0x0004	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_MASK                0x0004	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_SHIFT                    2	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_WIDTH                    1	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMPMODE                 0x0002	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_MASK            0x0002	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_SHIFT                1	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_WIDTH                1	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2_LOOPBACK                    0x0001	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_MASK               0x0001	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_SHIFT                   0	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_WIDTH                   1	/* AIF2_LOOPBACK */
+
+/*
+ * R786 (0x312) - AIF2 Master/Slave
+ */
+#define WM8995_AIF2_TRI                         0x8000	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_MASK                    0x8000	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_SHIFT                       15	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_WIDTH                        1	/* AIF2_TRI */
+#define WM8995_AIF2_MSTR                        0x4000	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_MASK                   0x4000	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_SHIFT                      14	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_WIDTH                       1	/* AIF2_MSTR */
+#define WM8995_AIF2_CLK_FRC                     0x2000	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_MASK                0x2000	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_SHIFT                   13	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_WIDTH                    1	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC                   0x1000	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_MASK              0x1000	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_SHIFT                 12	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_WIDTH                  1	/* AIF2_LRCLK_FRC */
+
+/*
+ * R787 (0x313) - AIF2 BCLK
+ */
+#define WM8995_AIF2_BCLK_DIV_MASK               0x00F0	/* AIF2_BCLK_DIV - [7:4] */
+#define WM8995_AIF2_BCLK_DIV_SHIFT                   4	/* AIF2_BCLK_DIV - [7:4] */
+#define WM8995_AIF2_BCLK_DIV_WIDTH                   4	/* AIF2_BCLK_DIV - [7:4] */
+
+/*
+ * R788 (0x314) - AIF2ADC LRCLK
+ */
+#define WM8995_AIF2ADC_LRCLK_DIR                0x0800	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_MASK           0x0800	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_SHIFT              11	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_WIDTH               1	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_RATE_MASK                0x07FF	/* AIF2ADC_RATE - [10:0] */
+#define WM8995_AIF2ADC_RATE_SHIFT                    0	/* AIF2ADC_RATE - [10:0] */
+#define WM8995_AIF2ADC_RATE_WIDTH                   11	/* AIF2ADC_RATE - [10:0] */
+
+/*
+ * R789 (0x315) - AIF2DAC LRCLK
+ */
+#define WM8995_AIF2DAC_LRCLK_DIR                0x0800	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_MASK           0x0800	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_SHIFT              11	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_WIDTH               1	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_RATE_MASK                0x07FF	/* AIF2DAC_RATE - [10:0] */
+#define WM8995_AIF2DAC_RATE_SHIFT                    0	/* AIF2DAC_RATE - [10:0] */
+#define WM8995_AIF2DAC_RATE_WIDTH                   11	/* AIF2DAC_RATE - [10:0] */
+
+/*
+ * R790 (0x316) - AIF2DAC Data
+ */
+#define WM8995_AIF2DACL_DAT_INV                 0x0002	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_MASK            0x0002	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_SHIFT                1	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_WIDTH                1	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV                 0x0001	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_MASK            0x0001	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_SHIFT                0	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_WIDTH                1	/* AIF2DACR_DAT_INV */
+
+/*
+ * R791 (0x317) - AIF2ADC Data
+ */
+#define WM8995_AIF2ADCL_DAT_INV                 0x0002	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_MASK            0x0002	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_SHIFT                1	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_WIDTH                1	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV                 0x0001	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_MASK            0x0001	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_SHIFT                0	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_WIDTH                1	/* AIF2ADCR_DAT_INV */
+
+/*
+ * R1024 (0x400) - AIF1 ADC1 Left Volume
+ */
+#define WM8995_AIF1ADC1_VU                      0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_MASK                 0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_SHIFT                     8	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_WIDTH                     1	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1L_VOL_MASK               0x00FF	/* AIF1ADC1L_VOL - [7:0] */
+#define WM8995_AIF1ADC1L_VOL_SHIFT                   0	/* AIF1ADC1L_VOL - [7:0] */
+#define WM8995_AIF1ADC1L_VOL_WIDTH                   8	/* AIF1ADC1L_VOL - [7:0] */
+
+/*
+ * R1025 (0x401) - AIF1 ADC1 Right Volume
+ */
+#define WM8995_AIF1ADC1_VU                      0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_MASK                 0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_SHIFT                     8	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_WIDTH                     1	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1R_VOL_MASK               0x00FF	/* AIF1ADC1R_VOL - [7:0] */
+#define WM8995_AIF1ADC1R_VOL_SHIFT                   0	/* AIF1ADC1R_VOL - [7:0] */
+#define WM8995_AIF1ADC1R_VOL_WIDTH                   8	/* AIF1ADC1R_VOL - [7:0] */
+
+/*
+ * R1026 (0x402) - AIF1 DAC1 Left Volume
+ */
+#define WM8995_AIF1DAC1_VU                      0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_MASK                 0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_SHIFT                     8	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_WIDTH                     1	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1L_VOL_MASK               0x00FF	/* AIF1DAC1L_VOL - [7:0] */
+#define WM8995_AIF1DAC1L_VOL_SHIFT                   0	/* AIF1DAC1L_VOL - [7:0] */
+#define WM8995_AIF1DAC1L_VOL_WIDTH                   8	/* AIF1DAC1L_VOL - [7:0] */
+
+/*
+ * R1027 (0x403) - AIF1 DAC1 Right Volume
+ */
+#define WM8995_AIF1DAC1_VU                      0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_MASK                 0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_SHIFT                     8	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_WIDTH                     1	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1R_VOL_MASK               0x00FF	/* AIF1DAC1R_VOL - [7:0] */
+#define WM8995_AIF1DAC1R_VOL_SHIFT                   0	/* AIF1DAC1R_VOL - [7:0] */
+#define WM8995_AIF1DAC1R_VOL_WIDTH                   8	/* AIF1DAC1R_VOL - [7:0] */
+
+/*
+ * R1028 (0x404) - AIF1 ADC2 Left Volume
+ */
+#define WM8995_AIF1ADC2_VU                      0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_MASK                 0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_SHIFT                     8	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_WIDTH                     1	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2L_VOL_MASK               0x00FF	/* AIF1ADC2L_VOL - [7:0] */
+#define WM8995_AIF1ADC2L_VOL_SHIFT                   0	/* AIF1ADC2L_VOL - [7:0] */
+#define WM8995_AIF1ADC2L_VOL_WIDTH                   8	/* AIF1ADC2L_VOL - [7:0] */
+
+/*
+ * R1029 (0x405) - AIF1 ADC2 Right Volume
+ */
+#define WM8995_AIF1ADC2_VU                      0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_MASK                 0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_SHIFT                     8	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_WIDTH                     1	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2R_VOL_MASK               0x00FF	/* AIF1ADC2R_VOL - [7:0] */
+#define WM8995_AIF1ADC2R_VOL_SHIFT                   0	/* AIF1ADC2R_VOL - [7:0] */
+#define WM8995_AIF1ADC2R_VOL_WIDTH                   8	/* AIF1ADC2R_VOL - [7:0] */
+
+/*
+ * R1030 (0x406) - AIF1 DAC2 Left Volume
+ */
+#define WM8995_AIF1DAC2_VU                      0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_MASK                 0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_SHIFT                     8	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_WIDTH                     1	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2L_VOL_MASK               0x00FF	/* AIF1DAC2L_VOL - [7:0] */
+#define WM8995_AIF1DAC2L_VOL_SHIFT                   0	/* AIF1DAC2L_VOL - [7:0] */
+#define WM8995_AIF1DAC2L_VOL_WIDTH                   8	/* AIF1DAC2L_VOL - [7:0] */
+
+/*
+ * R1031 (0x407) - AIF1 DAC2 Right Volume
+ */
+#define WM8995_AIF1DAC2_VU                      0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_MASK                 0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_SHIFT                     8	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_WIDTH                     1	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2R_VOL_MASK               0x00FF	/* AIF1DAC2R_VOL - [7:0] */
+#define WM8995_AIF1DAC2R_VOL_SHIFT                   0	/* AIF1DAC2R_VOL - [7:0] */
+#define WM8995_AIF1DAC2R_VOL_WIDTH                   8	/* AIF1DAC2R_VOL - [7:0] */
+
+/*
+ * R1040 (0x410) - AIF1 ADC1 Filters
+ */
+#define WM8995_AIF1ADC_4FS                      0x8000	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_MASK                 0x8000	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_SHIFT                    15	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_WIDTH                     1	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC1L_HPF                    0x1000	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_MASK               0x1000	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_SHIFT                  12	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_WIDTH                   1	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1R_HPF                    0x0800	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_MASK               0x0800	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_SHIFT                  11	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_WIDTH                   1	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1_HPF_MODE                0x0008	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_MASK           0x0008	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_SHIFT               3	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_WIDTH               1	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_CUT_MASK            0x0007	/* AIF1ADC1_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC1_HPF_CUT_SHIFT                0	/* AIF1ADC1_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC1_HPF_CUT_WIDTH                3	/* AIF1ADC1_HPF_CUT - [2:0] */
+
+/*
+ * R1041 (0x411) - AIF1 ADC2 Filters
+ */
+#define WM8995_AIF1ADC2L_HPF                    0x1000	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_MASK               0x1000	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_SHIFT                  12	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_WIDTH                   1	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2R_HPF                    0x0800	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_MASK               0x0800	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_SHIFT                  11	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_WIDTH                   1	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2_HPF_MODE                0x0008	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_MASK           0x0008	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_SHIFT               3	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_WIDTH               1	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_CUT_MASK            0x0007	/* AIF1ADC2_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC2_HPF_CUT_SHIFT                0	/* AIF1ADC2_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC2_HPF_CUT_WIDTH                3	/* AIF1ADC2_HPF_CUT - [2:0] */
+
+/*
+ * R1056 (0x420) - AIF1 DAC1 Filters (1)
+ */
+#define WM8995_AIF1DAC1_MUTE                    0x0200	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_MASK               0x0200	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_SHIFT                   9	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_WIDTH                   1	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MONO                    0x0080	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_MASK               0x0080	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_SHIFT                   7	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_WIDTH                   1	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MUTERATE                0x0020	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_MASK           0x0020	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_SHIFT               5	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_WIDTH               1	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP             0x0010	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_MASK        0x0010	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_SHIFT            4	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_WIDTH            1	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_DEEMP_MASK              0x0006	/* AIF1DAC1_DEEMP - [2:1] */
+#define WM8995_AIF1DAC1_DEEMP_SHIFT                  1	/* AIF1DAC1_DEEMP - [2:1] */
+#define WM8995_AIF1DAC1_DEEMP_WIDTH                  2	/* AIF1DAC1_DEEMP - [2:1] */
+
+/*
+ * R1057 (0x421) - AIF1 DAC1 Filters (2)
+ */
+#define WM8995_AIF1DAC1_3D_GAIN_MASK            0x3E00	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_GAIN_SHIFT                9	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_GAIN_WIDTH                5	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_ENA                  0x0100	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_MASK             0x0100	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_SHIFT                 8	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_WIDTH                 1	/* AIF1DAC1_3D_ENA */
+
+/*
+ * R1058 (0x422) - AIF1 DAC2 Filters (1)
+ */
+#define WM8995_AIF1DAC2_MUTE                    0x0200	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_MASK               0x0200	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_SHIFT                   9	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_WIDTH                   1	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MONO                    0x0080	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_MASK               0x0080	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_SHIFT                   7	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_WIDTH                   1	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MUTERATE                0x0020	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_MASK           0x0020	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_SHIFT               5	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_WIDTH               1	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP             0x0010	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_MASK        0x0010	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_SHIFT            4	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_WIDTH            1	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_DEEMP_MASK              0x0006	/* AIF1DAC2_DEEMP - [2:1] */
+#define WM8995_AIF1DAC2_DEEMP_SHIFT                  1	/* AIF1DAC2_DEEMP - [2:1] */
+#define WM8995_AIF1DAC2_DEEMP_WIDTH                  2	/* AIF1DAC2_DEEMP - [2:1] */
+
+/*
+ * R1059 (0x423) - AIF1 DAC2 Filters (2)
+ */
+#define WM8995_AIF1DAC2_3D_GAIN_MASK            0x3E00	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_GAIN_SHIFT                9	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_GAIN_WIDTH                5	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_ENA                  0x0100	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_MASK             0x0100	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_SHIFT                 8	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_WIDTH                 1	/* AIF1DAC2_3D_ENA */
+
+/*
+ * R1088 (0x440) - AIF1 DRC1 (1)
+ */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_MASK        0xF800	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_SHIFT           11	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_WIDTH            5	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_MASK         0x0600	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_SHIFT             9	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_WIDTH             2	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_NG_ENA                  0x0100	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_MASK             0x0100	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_SHIFT                 8	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_WIDTH                 1	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_SIG_DET_MODE            0x0080	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_MASK       0x0080	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_SHIFT           7	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_WIDTH           1	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET                 0x0040	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_MASK            0x0040	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_SHIFT                6	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_WIDTH                1	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA            0x0020	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_MASK       0x0020	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_SHIFT           5	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_WIDTH           1	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_QR                      0x0010	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_MASK                 0x0010	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_SHIFT                     4	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_WIDTH                     1	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_ANTICLIP                0x0008	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_MASK           0x0008	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_SHIFT               3	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_WIDTH               1	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DAC1_DRC_ENA                 0x0004	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_MASK            0x0004	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_SHIFT                2	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_WIDTH                1	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA                0x0002	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_MASK           0x0002	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_SHIFT               1	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_WIDTH               1	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA                0x0001	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_MASK           0x0001	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_SHIFT               0	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_WIDTH               1	/* AIF1ADC1R_DRC_ENA */
+
+/*
+ * R1089 (0x441) - AIF1 DRC1 (2)
+ */
+#define WM8995_AIF1DRC1_ATK_MASK                0x1E00	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_ATK_SHIFT                    9	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_ATK_WIDTH                    4	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_DCY_MASK                0x01E0	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_DCY_SHIFT                    5	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_DCY_WIDTH                    4	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_MINGAIN_MASK            0x001C	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MINGAIN_SHIFT                2	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MINGAIN_WIDTH                3	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MAXGAIN_MASK            0x0003	/* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC1_MAXGAIN_SHIFT                0	/* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC1_MAXGAIN_WIDTH                2	/* AIF1DRC1_MAXGAIN - [1:0] */
+
+/*
+ * R1090 (0x442) - AIF1 DRC1 (3)
+ */
+#define WM8995_AIF1DRC1_NG_MINGAIN_MASK         0xF000	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_MINGAIN_SHIFT            12	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_MINGAIN_WIDTH             4	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_EXP_MASK             0x0C00	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_NG_EXP_SHIFT                10	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_NG_EXP_WIDTH                 2	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_QR_THR_MASK             0x0300	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_THR_SHIFT                 8	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_THR_WIDTH                 2	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_DCY_MASK             0x00C0	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_QR_DCY_SHIFT                 6	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_QR_DCY_WIDTH                 2	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_HI_COMP_MASK            0x0038	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_HI_COMP_SHIFT                3	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_HI_COMP_WIDTH                3	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_LO_COMP_MASK            0x0007	/* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC1_LO_COMP_SHIFT                0	/* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC1_LO_COMP_WIDTH                3	/* AIF1DRC1_LO_COMP - [2:0] */
+
+/*
+ * R1091 (0x443) - AIF1 DRC1 (4)
+ */
+#define WM8995_AIF1DRC1_KNEE_IP_MASK            0x07E0	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_IP_SHIFT                5	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_IP_WIDTH                6	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_OP_MASK            0x001F	/* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE_OP_SHIFT                0	/* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE_OP_WIDTH                5	/* AIF1DRC1_KNEE_OP - [4:0] */
+
+/*
+ * R1092 (0x444) - AIF1 DRC1 (5)
+ */
+#define WM8995_AIF1DRC1_KNEE2_IP_MASK           0x03E0	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_IP_SHIFT               5	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_IP_WIDTH               5	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_OP_MASK           0x001F	/* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE2_OP_SHIFT               0	/* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE2_OP_WIDTH               5	/* AIF1DRC1_KNEE2_OP - [4:0] */
+
+/*
+ * R1104 (0x450) - AIF1 DRC2 (1)
+ */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_MASK        0xF800	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_SHIFT           11	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_WIDTH            5	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_MASK         0x0600	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_SHIFT             9	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_WIDTH             2	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_NG_ENA                  0x0100	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_MASK             0x0100	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_SHIFT                 8	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_WIDTH                 1	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_SIG_DET_MODE            0x0080	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_MASK       0x0080	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_SHIFT           7	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_WIDTH           1	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET                 0x0040	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_MASK            0x0040	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_SHIFT                6	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_WIDTH                1	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA            0x0020	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_MASK       0x0020	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_SHIFT           5	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_WIDTH           1	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_QR                      0x0010	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_MASK                 0x0010	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_SHIFT                     4	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_WIDTH                     1	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_ANTICLIP                0x0008	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_MASK           0x0008	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_SHIFT               3	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_WIDTH               1	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DAC2_DRC_ENA                 0x0004	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_MASK            0x0004	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_SHIFT                2	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_WIDTH                1	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA                0x0002	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_MASK           0x0002	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_SHIFT               1	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_WIDTH               1	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA                0x0001	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_MASK           0x0001	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_SHIFT               0	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_WIDTH               1	/* AIF1ADC2R_DRC_ENA */
+
+/*
+ * R1105 (0x451) - AIF1 DRC2 (2)
+ */
+#define WM8995_AIF1DRC2_ATK_MASK                0x1E00	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_ATK_SHIFT                    9	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_ATK_WIDTH                    4	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_DCY_MASK                0x01E0	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_DCY_SHIFT                    5	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_DCY_WIDTH                    4	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_MINGAIN_MASK            0x001C	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MINGAIN_SHIFT                2	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MINGAIN_WIDTH                3	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MAXGAIN_MASK            0x0003	/* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC2_MAXGAIN_SHIFT                0	/* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC2_MAXGAIN_WIDTH                2	/* AIF1DRC2_MAXGAIN - [1:0] */
+
+/*
+ * R1106 (0x452) - AIF1 DRC2 (3)
+ */
+#define WM8995_AIF1DRC2_NG_MINGAIN_MASK         0xF000	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_MINGAIN_SHIFT            12	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_MINGAIN_WIDTH             4	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_EXP_MASK             0x0C00	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_NG_EXP_SHIFT                10	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_NG_EXP_WIDTH                 2	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_QR_THR_MASK             0x0300	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_THR_SHIFT                 8	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_THR_WIDTH                 2	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_DCY_MASK             0x00C0	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_QR_DCY_SHIFT                 6	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_QR_DCY_WIDTH                 2	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_HI_COMP_MASK            0x0038	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_HI_COMP_SHIFT                3	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_HI_COMP_WIDTH                3	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_LO_COMP_MASK            0x0007	/* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC2_LO_COMP_SHIFT                0	/* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC2_LO_COMP_WIDTH                3	/* AIF1DRC2_LO_COMP - [2:0] */
+
+/*
+ * R1107 (0x453) - AIF1 DRC2 (4)
+ */
+#define WM8995_AIF1DRC2_KNEE_IP_MASK            0x07E0	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_IP_SHIFT                5	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_IP_WIDTH                6	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_OP_MASK            0x001F	/* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE_OP_SHIFT                0	/* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE_OP_WIDTH                5	/* AIF1DRC2_KNEE_OP - [4:0] */
+
+/*
+ * R1108 (0x454) - AIF1 DRC2 (5)
+ */
+#define WM8995_AIF1DRC2_KNEE2_IP_MASK           0x03E0	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_IP_SHIFT               5	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_IP_WIDTH               5	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_OP_MASK           0x001F	/* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE2_OP_SHIFT               0	/* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE2_OP_WIDTH               5	/* AIF1DRC2_KNEE2_OP - [4:0] */
+
+/*
+ * R1152 (0x480) - AIF1 DAC1 EQ Gains (1)
+ */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_MASK         0xF800	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_SHIFT            11	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_MASK         0x07C0	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_SHIFT             6	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_MASK         0x003E	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_SHIFT             1	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_ENA                  0x0001	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_MASK             0x0001	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_SHIFT                 0	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_WIDTH                 1	/* AIF1DAC1_EQ_ENA */
+
+/*
+ * R1153 (0x481) - AIF1 DAC1 EQ Gains (2)
+ */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_MASK         0xF800	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_SHIFT            11	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_MASK         0x07C0	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_SHIFT             6	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1154 (0x482) - AIF1 DAC1 EQ Band 1 A
+ */
+#define WM8995_AIF1DAC1_EQ_B1_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_A_SHIFT                0	/* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_A_WIDTH               16	/* AIF1DAC1_EQ_B1_A - [15:0] */
+
+/*
+ * R1155 (0x483) - AIF1 DAC1 EQ Band 1 B
+ */
+#define WM8995_AIF1DAC1_EQ_B1_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_B_SHIFT                0	/* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_B_WIDTH               16	/* AIF1DAC1_EQ_B1_B - [15:0] */
+
+/*
+ * R1156 (0x484) - AIF1 DAC1 EQ Band 1 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B1_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_PG_SHIFT               0	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_PG_WIDTH              16	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+
+/*
+ * R1157 (0x485) - AIF1 DAC1 EQ Band 2 A
+ */
+#define WM8995_AIF1DAC1_EQ_B2_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_A_SHIFT                0	/* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_A_WIDTH               16	/* AIF1DAC1_EQ_B2_A - [15:0] */
+
+/*
+ * R1158 (0x486) - AIF1 DAC1 EQ Band 2 B
+ */
+#define WM8995_AIF1DAC1_EQ_B2_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_B_SHIFT                0	/* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_B_WIDTH               16	/* AIF1DAC1_EQ_B2_B - [15:0] */
+
+/*
+ * R1159 (0x487) - AIF1 DAC1 EQ Band 2 C
+ */
+#define WM8995_AIF1DAC1_EQ_B2_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_C_SHIFT                0	/* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_C_WIDTH               16	/* AIF1DAC1_EQ_B2_C - [15:0] */
+
+/*
+ * R1160 (0x488) - AIF1 DAC1 EQ Band 2 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B2_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_PG_SHIFT               0	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_PG_WIDTH              16	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+
+/*
+ * R1161 (0x489) - AIF1 DAC1 EQ Band 3 A
+ */
+#define WM8995_AIF1DAC1_EQ_B3_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_A_SHIFT                0	/* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_A_WIDTH               16	/* AIF1DAC1_EQ_B3_A - [15:0] */
+
+/*
+ * R1162 (0x48A) - AIF1 DAC1 EQ Band 3 B
+ */
+#define WM8995_AIF1DAC1_EQ_B3_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_B_SHIFT                0	/* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_B_WIDTH               16	/* AIF1DAC1_EQ_B3_B - [15:0] */
+
+/*
+ * R1163 (0x48B) - AIF1 DAC1 EQ Band 3 C
+ */
+#define WM8995_AIF1DAC1_EQ_B3_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_C_SHIFT                0	/* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_C_WIDTH               16	/* AIF1DAC1_EQ_B3_C - [15:0] */
+
+/*
+ * R1164 (0x48C) - AIF1 DAC1 EQ Band 3 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B3_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_PG_SHIFT               0	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_PG_WIDTH              16	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+
+/*
+ * R1165 (0x48D) - AIF1 DAC1 EQ Band 4 A
+ */
+#define WM8995_AIF1DAC1_EQ_B4_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_A_SHIFT                0	/* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_A_WIDTH               16	/* AIF1DAC1_EQ_B4_A - [15:0] */
+
+/*
+ * R1166 (0x48E) - AIF1 DAC1 EQ Band 4 B
+ */
+#define WM8995_AIF1DAC1_EQ_B4_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_B_SHIFT                0	/* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_B_WIDTH               16	/* AIF1DAC1_EQ_B4_B - [15:0] */
+
+/*
+ * R1167 (0x48F) - AIF1 DAC1 EQ Band 4 C
+ */
+#define WM8995_AIF1DAC1_EQ_B4_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_C_SHIFT                0	/* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_C_WIDTH               16	/* AIF1DAC1_EQ_B4_C - [15:0] */
+
+/*
+ * R1168 (0x490) - AIF1 DAC1 EQ Band 4 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B4_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_PG_SHIFT               0	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_PG_WIDTH              16	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+
+/*
+ * R1169 (0x491) - AIF1 DAC1 EQ Band 5 A
+ */
+#define WM8995_AIF1DAC1_EQ_B5_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_A_SHIFT                0	/* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_A_WIDTH               16	/* AIF1DAC1_EQ_B5_A - [15:0] */
+
+/*
+ * R1170 (0x492) - AIF1 DAC1 EQ Band 5 B
+ */
+#define WM8995_AIF1DAC1_EQ_B5_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_B_SHIFT                0	/* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_B_WIDTH               16	/* AIF1DAC1_EQ_B5_B - [15:0] */
+
+/*
+ * R1171 (0x493) - AIF1 DAC1 EQ Band 5 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B5_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_PG_SHIFT               0	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_PG_WIDTH              16	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+
+/*
+ * R1184 (0x4A0) - AIF1 DAC2 EQ Gains (1)
+ */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_MASK         0xF800	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_SHIFT            11	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_MASK         0x07C0	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_SHIFT             6	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_MASK         0x003E	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_SHIFT             1	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_ENA                  0x0001	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_MASK             0x0001	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_SHIFT                 0	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_WIDTH                 1	/* AIF1DAC2_EQ_ENA */
+
+/*
+ * R1185 (0x4A1) - AIF1 DAC2 EQ Gains (2)
+ */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_MASK         0xF800	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_SHIFT            11	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_MASK         0x07C0	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_SHIFT             6	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1186 (0x4A2) - AIF1 DAC2 EQ Band 1 A
+ */
+#define WM8995_AIF1DAC2_EQ_B1_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_A_SHIFT                0	/* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_A_WIDTH               16	/* AIF1DAC2_EQ_B1_A - [15:0] */
+
+/*
+ * R1187 (0x4A3) - AIF1 DAC2 EQ Band 1 B
+ */
+#define WM8995_AIF1DAC2_EQ_B1_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_B_SHIFT                0	/* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_B_WIDTH               16	/* AIF1DAC2_EQ_B1_B - [15:0] */
+
+/*
+ * R1188 (0x4A4) - AIF1 DAC2 EQ Band 1 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B1_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_PG_SHIFT               0	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_PG_WIDTH              16	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+
+/*
+ * R1189 (0x4A5) - AIF1 DAC2 EQ Band 2 A
+ */
+#define WM8995_AIF1DAC2_EQ_B2_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_A_SHIFT                0	/* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_A_WIDTH               16	/* AIF1DAC2_EQ_B2_A - [15:0] */
+
+/*
+ * R1190 (0x4A6) - AIF1 DAC2 EQ Band 2 B
+ */
+#define WM8995_AIF1DAC2_EQ_B2_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_B_SHIFT                0	/* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_B_WIDTH               16	/* AIF1DAC2_EQ_B2_B - [15:0] */
+
+/*
+ * R1191 (0x4A7) - AIF1 DAC2 EQ Band 2 C
+ */
+#define WM8995_AIF1DAC2_EQ_B2_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_C_SHIFT                0	/* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_C_WIDTH               16	/* AIF1DAC2_EQ_B2_C - [15:0] */
+
+/*
+ * R1192 (0x4A8) - AIF1 DAC2 EQ Band 2 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B2_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_PG_SHIFT               0	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_PG_WIDTH              16	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+
+/*
+ * R1193 (0x4A9) - AIF1 DAC2 EQ Band 3 A
+ */
+#define WM8995_AIF1DAC2_EQ_B3_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_A_SHIFT                0	/* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_A_WIDTH               16	/* AIF1DAC2_EQ_B3_A - [15:0] */
+
+/*
+ * R1194 (0x4AA) - AIF1 DAC2 EQ Band 3 B
+ */
+#define WM8995_AIF1DAC2_EQ_B3_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_B_SHIFT                0	/* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_B_WIDTH               16	/* AIF1DAC2_EQ_B3_B - [15:0] */
+
+/*
+ * R1195 (0x4AB) - AIF1 DAC2 EQ Band 3 C
+ */
+#define WM8995_AIF1DAC2_EQ_B3_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_C_SHIFT                0	/* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_C_WIDTH               16	/* AIF1DAC2_EQ_B3_C - [15:0] */
+
+/*
+ * R1196 (0x4AC) - AIF1 DAC2 EQ Band 3 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B3_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_PG_SHIFT               0	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_PG_WIDTH              16	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+
+/*
+ * R1197 (0x4AD) - AIF1 DAC2 EQ Band 4 A
+ */
+#define WM8995_AIF1DAC2_EQ_B4_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_A_SHIFT                0	/* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_A_WIDTH               16	/* AIF1DAC2_EQ_B4_A - [15:0] */
+
+/*
+ * R1198 (0x4AE) - AIF1 DAC2 EQ Band 4 B
+ */
+#define WM8995_AIF1DAC2_EQ_B4_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_B_SHIFT                0	/* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_B_WIDTH               16	/* AIF1DAC2_EQ_B4_B - [15:0] */
+
+/*
+ * R1199 (0x4AF) - AIF1 DAC2 EQ Band 4 C
+ */
+#define WM8995_AIF1DAC2_EQ_B4_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_C_SHIFT                0	/* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_C_WIDTH               16	/* AIF1DAC2_EQ_B4_C - [15:0] */
+
+/*
+ * R1200 (0x4B0) - AIF1 DAC2 EQ Band 4 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B4_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_PG_SHIFT               0	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_PG_WIDTH              16	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+
+/*
+ * R1201 (0x4B1) - AIF1 DAC2 EQ Band 5 A
+ */
+#define WM8995_AIF1DAC2_EQ_B5_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_A_SHIFT                0	/* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_A_WIDTH               16	/* AIF1DAC2_EQ_B5_A - [15:0] */
+
+/*
+ * R1202 (0x4B2) - AIF1 DAC2 EQ Band 5 B
+ */
+#define WM8995_AIF1DAC2_EQ_B5_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_B_SHIFT                0	/* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_B_WIDTH               16	/* AIF1DAC2_EQ_B5_B - [15:0] */
+
+/*
+ * R1203 (0x4B3) - AIF1 DAC2 EQ Band 5 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B5_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_PG_SHIFT               0	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_PG_WIDTH              16	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+
+/*
+ * R1280 (0x500) - AIF2 ADC Left Volume
+ */
+#define WM8995_AIF2ADC_VU                       0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_MASK                  0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_SHIFT                      8	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_WIDTH                      1	/* AIF2ADC_VU */
+#define WM8995_AIF2ADCL_VOL_MASK                0x00FF	/* AIF2ADCL_VOL - [7:0] */
+#define WM8995_AIF2ADCL_VOL_SHIFT                    0	/* AIF2ADCL_VOL - [7:0] */
+#define WM8995_AIF2ADCL_VOL_WIDTH                    8	/* AIF2ADCL_VOL - [7:0] */
+
+/*
+ * R1281 (0x501) - AIF2 ADC Right Volume
+ */
+#define WM8995_AIF2ADC_VU                       0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_MASK                  0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_SHIFT                      8	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_WIDTH                      1	/* AIF2ADC_VU */
+#define WM8995_AIF2ADCR_VOL_MASK                0x00FF	/* AIF2ADCR_VOL - [7:0] */
+#define WM8995_AIF2ADCR_VOL_SHIFT                    0	/* AIF2ADCR_VOL - [7:0] */
+#define WM8995_AIF2ADCR_VOL_WIDTH                    8	/* AIF2ADCR_VOL - [7:0] */
+
+/*
+ * R1282 (0x502) - AIF2 DAC Left Volume
+ */
+#define WM8995_AIF2DAC_VU                       0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_MASK                  0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_SHIFT                      8	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_WIDTH                      1	/* AIF2DAC_VU */
+#define WM8995_AIF2DACL_VOL_MASK                0x00FF	/* AIF2DACL_VOL - [7:0] */
+#define WM8995_AIF2DACL_VOL_SHIFT                    0	/* AIF2DACL_VOL - [7:0] */
+#define WM8995_AIF2DACL_VOL_WIDTH                    8	/* AIF2DACL_VOL - [7:0] */
+
+/*
+ * R1283 (0x503) - AIF2 DAC Right Volume
+ */
+#define WM8995_AIF2DAC_VU                       0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_MASK                  0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_SHIFT                      8	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_WIDTH                      1	/* AIF2DAC_VU */
+#define WM8995_AIF2DACR_VOL_MASK                0x00FF	/* AIF2DACR_VOL - [7:0] */
+#define WM8995_AIF2DACR_VOL_SHIFT                    0	/* AIF2DACR_VOL - [7:0] */
+#define WM8995_AIF2DACR_VOL_WIDTH                    8	/* AIF2DACR_VOL - [7:0] */
+
+/*
+ * R1296 (0x510) - AIF2 ADC Filters
+ */
+#define WM8995_AIF2ADC_4FS                      0x8000	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_MASK                 0x8000	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_SHIFT                    15	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_WIDTH                     1	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADCL_HPF                     0x1000	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_MASK                0x1000	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_SHIFT                   12	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_WIDTH                    1	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCR_HPF                     0x0800	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_MASK                0x0800	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_SHIFT                   11	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_WIDTH                    1	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADC_HPF_MODE                 0x0008	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_MASK            0x0008	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_SHIFT                3	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_WIDTH                1	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_CUT_MASK             0x0007	/* AIF2ADC_HPF_CUT - [2:0] */
+#define WM8995_AIF2ADC_HPF_CUT_SHIFT                 0	/* AIF2ADC_HPF_CUT - [2:0] */
+#define WM8995_AIF2ADC_HPF_CUT_WIDTH                 3	/* AIF2ADC_HPF_CUT - [2:0] */
+
+/*
+ * R1312 (0x520) - AIF2 DAC Filters (1)
+ */
+#define WM8995_AIF2DAC_MUTE                     0x0200	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_MASK                0x0200	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_SHIFT                    9	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_WIDTH                    1	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MONO                     0x0080	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_MASK                0x0080	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_SHIFT                    7	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_WIDTH                    1	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MUTERATE                 0x0020	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_MASK            0x0020	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_SHIFT                5	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_WIDTH                1	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_UNMUTE_RAMP              0x0010	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_MASK         0x0010	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_SHIFT             4	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_WIDTH             1	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_DEEMP_MASK               0x0006	/* AIF2DAC_DEEMP - [2:1] */
+#define WM8995_AIF2DAC_DEEMP_SHIFT                   1	/* AIF2DAC_DEEMP - [2:1] */
+#define WM8995_AIF2DAC_DEEMP_WIDTH                   2	/* AIF2DAC_DEEMP - [2:1] */
+
+/*
+ * R1313 (0x521) - AIF2 DAC Filters (2)
+ */
+#define WM8995_AIF2DAC_3D_GAIN_MASK             0x3E00	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_GAIN_SHIFT                 9	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_GAIN_WIDTH                 5	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_ENA                   0x0100	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_MASK              0x0100	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_SHIFT                  8	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_WIDTH                  1	/* AIF2DAC_3D_ENA */
+
+/*
+ * R1344 (0x540) - AIF2 DRC (1)
+ */
+#define WM8995_AIF2DRC_SIG_DET_RMS_MASK         0xF800	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_RMS_SHIFT            11	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_RMS_WIDTH             5	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_PK_MASK          0x0600	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_SIG_DET_PK_SHIFT              9	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_SIG_DET_PK_WIDTH              2	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_NG_ENA                   0x0100	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_MASK              0x0100	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_SHIFT                  8	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_WIDTH                  1	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_SIG_DET_MODE             0x0080	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_MASK        0x0080	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_SHIFT            7	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_WIDTH            1	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET                  0x0040	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_MASK             0x0040	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_SHIFT                 6	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_WIDTH                 1	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA             0x0020	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_MASK        0x0020	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_SHIFT            5	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_WIDTH            1	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_QR                       0x0010	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_MASK                  0x0010	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_SHIFT                      4	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_WIDTH                      1	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_ANTICLIP                 0x0008	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_MASK            0x0008	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_SHIFT                3	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_WIDTH                1	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DAC_DRC_ENA                  0x0004	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_MASK             0x0004	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_SHIFT                 2	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_WIDTH                 1	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA                 0x0002	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_MASK            0x0002	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_SHIFT                1	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_WIDTH                1	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA                 0x0001	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_MASK            0x0001	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_SHIFT                0	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_WIDTH                1	/* AIF2ADCR_DRC_ENA */
+
+/*
+ * R1345 (0x541) - AIF2 DRC (2)
+ */
+#define WM8995_AIF2DRC_ATK_MASK                 0x1E00	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_ATK_SHIFT                     9	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_ATK_WIDTH                     4	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_DCY_MASK                 0x01E0	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_DCY_SHIFT                     5	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_DCY_WIDTH                     4	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_MINGAIN_MASK             0x001C	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MINGAIN_SHIFT                 2	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MINGAIN_WIDTH                 3	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MAXGAIN_MASK             0x0003	/* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8995_AIF2DRC_MAXGAIN_SHIFT                 0	/* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8995_AIF2DRC_MAXGAIN_WIDTH                 2	/* AIF2DRC_MAXGAIN - [1:0] */
+
+/*
+ * R1346 (0x542) - AIF2 DRC (3)
+ */
+#define WM8995_AIF2DRC_NG_MINGAIN_MASK          0xF000	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_MINGAIN_SHIFT             12	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_MINGAIN_WIDTH              4	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_EXP_MASK              0x0C00	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_NG_EXP_SHIFT                 10	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_NG_EXP_WIDTH                  2	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_QR_THR_MASK              0x0300	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_THR_SHIFT                  8	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_THR_WIDTH                  2	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_DCY_MASK              0x00C0	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_QR_DCY_SHIFT                  6	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_QR_DCY_WIDTH                  2	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_HI_COMP_MASK             0x0038	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_HI_COMP_SHIFT                 3	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_HI_COMP_WIDTH                 3	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_LO_COMP_MASK             0x0007	/* AIF2DRC_LO_COMP - [2:0] */
+#define WM8995_AIF2DRC_LO_COMP_SHIFT                 0	/* AIF2DRC_LO_COMP - [2:0] */
+#define WM8995_AIF2DRC_LO_COMP_WIDTH                 3	/* AIF2DRC_LO_COMP - [2:0] */
+
+/*
+ * R1347 (0x543) - AIF2 DRC (4)
+ */
+#define WM8995_AIF2DRC_KNEE_IP_MASK             0x07E0	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_IP_SHIFT                 5	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_IP_WIDTH                 6	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_OP_MASK             0x001F	/* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE_OP_SHIFT                 0	/* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE_OP_WIDTH                 5	/* AIF2DRC_KNEE_OP - [4:0] */
+
+/*
+ * R1348 (0x544) - AIF2 DRC (5)
+ */
+#define WM8995_AIF2DRC_KNEE2_IP_MASK            0x03E0	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_IP_SHIFT                5	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_IP_WIDTH                5	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_OP_MASK            0x001F	/* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE2_OP_SHIFT                0	/* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE2_OP_WIDTH                5	/* AIF2DRC_KNEE2_OP - [4:0] */
+
+/*
+ * R1408 (0x580) - AIF2 EQ Gains (1)
+ */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_MASK          0xF800	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_SHIFT             11	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_WIDTH              5	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_MASK          0x07C0	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_SHIFT              6	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_WIDTH              5	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_MASK          0x003E	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_SHIFT              1	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_WIDTH              5	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_ENA                   0x0001	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_MASK              0x0001	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_SHIFT                  0	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_WIDTH                  1	/* AIF2DAC_EQ_ENA */
+
+/*
+ * R1409 (0x581) - AIF2 EQ Gains (2)
+ */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_MASK          0xF800	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_SHIFT             11	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_WIDTH              5	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_MASK          0x07C0	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_SHIFT              6	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_WIDTH              5	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1410 (0x582) - AIF2 EQ Band 1 A
+ */
+#define WM8995_AIF2DAC_EQ_B1_A_MASK             0xFFFF	/* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_A_SHIFT                 0	/* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_A_WIDTH                16	/* AIF2DAC_EQ_B1_A - [15:0] */
+
+/*
+ * R1411 (0x583) - AIF2 EQ Band 1 B
+ */
+#define WM8995_AIF2DAC_EQ_B1_B_MASK             0xFFFF	/* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_B_SHIFT                 0	/* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_B_WIDTH                16	/* AIF2DAC_EQ_B1_B - [15:0] */
+
+/*
+ * R1412 (0x584) - AIF2 EQ Band 1 PG
+ */
+#define WM8995_AIF2DAC_EQ_B1_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_PG_SHIFT                0	/* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_PG_WIDTH               16	/* AIF2DAC_EQ_B1_PG - [15:0] */
+
+/*
+ * R1413 (0x585) - AIF2 EQ Band 2 A
+ */
+#define WM8995_AIF2DAC_EQ_B2_A_MASK             0xFFFF	/* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_A_SHIFT                 0	/* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_A_WIDTH                16	/* AIF2DAC_EQ_B2_A - [15:0] */
+
+/*
+ * R1414 (0x586) - AIF2 EQ Band 2 B
+ */
+#define WM8995_AIF2DAC_EQ_B2_B_MASK             0xFFFF	/* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_B_SHIFT                 0	/* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_B_WIDTH                16	/* AIF2DAC_EQ_B2_B - [15:0] */
+
+/*
+ * R1415 (0x587) - AIF2 EQ Band 2 C
+ */
+#define WM8995_AIF2DAC_EQ_B2_C_MASK             0xFFFF	/* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_C_SHIFT                 0	/* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_C_WIDTH                16	/* AIF2DAC_EQ_B2_C - [15:0] */
+
+/*
+ * R1416 (0x588) - AIF2 EQ Band 2 PG
+ */
+#define WM8995_AIF2DAC_EQ_B2_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_PG_SHIFT                0	/* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_PG_WIDTH               16	/* AIF2DAC_EQ_B2_PG - [15:0] */
+
+/*
+ * R1417 (0x589) - AIF2 EQ Band 3 A
+ */
+#define WM8995_AIF2DAC_EQ_B3_A_MASK             0xFFFF	/* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_A_SHIFT                 0	/* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_A_WIDTH                16	/* AIF2DAC_EQ_B3_A - [15:0] */
+
+/*
+ * R1418 (0x58A) - AIF2 EQ Band 3 B
+ */
+#define WM8995_AIF2DAC_EQ_B3_B_MASK             0xFFFF	/* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_B_SHIFT                 0	/* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_B_WIDTH                16	/* AIF2DAC_EQ_B3_B - [15:0] */
+
+/*
+ * R1419 (0x58B) - AIF2 EQ Band 3 C
+ */
+#define WM8995_AIF2DAC_EQ_B3_C_MASK             0xFFFF	/* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_C_SHIFT                 0	/* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_C_WIDTH                16	/* AIF2DAC_EQ_B3_C - [15:0] */
+
+/*
+ * R1420 (0x58C) - AIF2 EQ Band 3 PG
+ */
+#define WM8995_AIF2DAC_EQ_B3_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_PG_SHIFT                0	/* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_PG_WIDTH               16	/* AIF2DAC_EQ_B3_PG - [15:0] */
+
+/*
+ * R1421 (0x58D) - AIF2 EQ Band 4 A
+ */
+#define WM8995_AIF2DAC_EQ_B4_A_MASK             0xFFFF	/* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_A_SHIFT                 0	/* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_A_WIDTH                16	/* AIF2DAC_EQ_B4_A - [15:0] */
+
+/*
+ * R1422 (0x58E) - AIF2 EQ Band 4 B
+ */
+#define WM8995_AIF2DAC_EQ_B4_B_MASK             0xFFFF	/* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_B_SHIFT                 0	/* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_B_WIDTH                16	/* AIF2DAC_EQ_B4_B - [15:0] */
+
+/*
+ * R1423 (0x58F) - AIF2 EQ Band 4 C
+ */
+#define WM8995_AIF2DAC_EQ_B4_C_MASK             0xFFFF	/* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_C_SHIFT                 0	/* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_C_WIDTH                16	/* AIF2DAC_EQ_B4_C - [15:0] */
+
+/*
+ * R1424 (0x590) - AIF2 EQ Band 4 PG
+ */
+#define WM8995_AIF2DAC_EQ_B4_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_PG_SHIFT                0	/* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_PG_WIDTH               16	/* AIF2DAC_EQ_B4_PG - [15:0] */
+
+/*
+ * R1425 (0x591) - AIF2 EQ Band 5 A
+ */
+#define WM8995_AIF2DAC_EQ_B5_A_MASK             0xFFFF	/* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_A_SHIFT                 0	/* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_A_WIDTH                16	/* AIF2DAC_EQ_B5_A - [15:0] */
+
+/*
+ * R1426 (0x592) - AIF2 EQ Band 5 B
+ */
+#define WM8995_AIF2DAC_EQ_B5_B_MASK             0xFFFF	/* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_B_SHIFT                 0	/* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_B_WIDTH                16	/* AIF2DAC_EQ_B5_B - [15:0] */
+
+/*
+ * R1427 (0x593) - AIF2 EQ Band 5 PG
+ */
+#define WM8995_AIF2DAC_EQ_B5_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_PG_SHIFT                0	/* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_PG_WIDTH               16	/* AIF2DAC_EQ_B5_PG - [15:0] */
+
+/*
+ * R1536 (0x600) - DAC1 Mixer Volumes
+ */
+#define WM8995_ADCR_DAC1_VOL_MASK               0x03E0	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCR_DAC1_VOL_SHIFT                   5	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCR_DAC1_VOL_WIDTH                   5	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCL_DAC1_VOL_MASK               0x001F	/* ADCL_DAC1_VOL - [4:0] */
+#define WM8995_ADCL_DAC1_VOL_SHIFT                   0	/* ADCL_DAC1_VOL - [4:0] */
+#define WM8995_ADCL_DAC1_VOL_WIDTH                   5	/* ADCL_DAC1_VOL - [4:0] */
+
+/*
+ * R1537 (0x601) - DAC1 Left Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC1L                    0x0020	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_MASK               0x0020	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_SHIFT                   5	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_WIDTH                   1	/* ADCR_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L                    0x0010	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_MASK               0x0010	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_SHIFT                   4	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_WIDTH                   1	/* ADCL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L                0x0004	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_MASK           0x0004	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_SHIFT               2	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_WIDTH               1	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L               0x0002	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_MASK          0x0002	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_SHIFT              1	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_WIDTH              1	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L               0x0001	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_MASK          0x0001	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_SHIFT              0	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_WIDTH              1	/* AIF1DAC1L_TO_DAC1L */
+
+/*
+ * R1538 (0x602) - DAC1 Right Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC1R                    0x0020	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_MASK               0x0020	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_SHIFT                   5	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_WIDTH                   1	/* ADCR_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R                    0x0010	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_MASK               0x0010	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_SHIFT                   4	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_WIDTH                   1	/* ADCL_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R                0x0004	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_MASK           0x0004	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_SHIFT               2	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_WIDTH               1	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R               0x0002	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_MASK          0x0002	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_SHIFT              1	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_WIDTH              1	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R               0x0001	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_MASK          0x0001	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_SHIFT              0	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_WIDTH              1	/* AIF1DAC1R_TO_DAC1R */
+
+/*
+ * R1539 (0x603) - DAC2 Mixer Volumes
+ */
+#define WM8995_ADCR_DAC2_VOL_MASK               0x03E0	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCR_DAC2_VOL_SHIFT                   5	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCR_DAC2_VOL_WIDTH                   5	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCL_DAC2_VOL_MASK               0x001F	/* ADCL_DAC2_VOL - [4:0] */
+#define WM8995_ADCL_DAC2_VOL_SHIFT                   0	/* ADCL_DAC2_VOL - [4:0] */
+#define WM8995_ADCL_DAC2_VOL_WIDTH                   5	/* ADCL_DAC2_VOL - [4:0] */
+
+/*
+ * R1540 (0x604) - DAC2 Left Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC2L                    0x0020	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_MASK               0x0020	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_SHIFT                   5	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_WIDTH                   1	/* ADCR_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L                    0x0010	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_MASK               0x0010	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_SHIFT                   4	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_WIDTH                   1	/* ADCL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L                0x0004	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_MASK           0x0004	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_SHIFT               2	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_WIDTH               1	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L               0x0002	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_MASK          0x0002	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_SHIFT              1	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_WIDTH              1	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L               0x0001	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_MASK          0x0001	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_SHIFT              0	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_WIDTH              1	/* AIF1DAC1L_TO_DAC2L */
+
+/*
+ * R1541 (0x605) - DAC2 Right Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC2R                    0x0020	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_MASK               0x0020	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_SHIFT                   5	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_WIDTH                   1	/* ADCR_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R                    0x0010	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_MASK               0x0010	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_SHIFT                   4	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_WIDTH                   1	/* ADCL_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R                0x0004	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_MASK           0x0004	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_SHIFT               2	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_WIDTH               1	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R               0x0002	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_MASK          0x0002	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_SHIFT              1	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_WIDTH              1	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R               0x0001	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_MASK          0x0001	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_SHIFT              0	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_WIDTH              1	/* AIF1DAC1R_TO_DAC2R */
+
+/*
+ * R1542 (0x606) - AIF1 ADC1 Left Mixer Routing
+ */
+#define WM8995_ADC1L_TO_AIF1ADC1L               0x0002	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_MASK          0x0002	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_SHIFT              1	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_WIDTH              1	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L            0x0001	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_MASK       0x0001	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_SHIFT           0	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_WIDTH           1	/* AIF2DACL_TO_AIF1ADC1L */
+
+/*
+ * R1543 (0x607) - AIF1 ADC1 Right Mixer Routing
+ */
+#define WM8995_ADC1R_TO_AIF1ADC1R               0x0002	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_MASK          0x0002	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_SHIFT              1	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_WIDTH              1	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R            0x0001	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_MASK       0x0001	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_SHIFT           0	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_WIDTH           1	/* AIF2DACR_TO_AIF1ADC1R */
+
+/*
+ * R1544 (0x608) - AIF1 ADC2 Left Mixer Routing
+ */
+#define WM8995_ADC2L_TO_AIF1ADC2L               0x0002	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_MASK          0x0002	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_SHIFT              1	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_WIDTH              1	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L            0x0001	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_MASK       0x0001	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_SHIFT           0	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_WIDTH           1	/* AIF2DACL_TO_AIF1ADC2L */
+
+/*
+ * R1545 (0x609) - AIF1 ADC2 Right mixer Routing
+ */
+#define WM8995_ADC2R_TO_AIF1ADC2R               0x0002	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_MASK          0x0002	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_SHIFT              1	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_WIDTH              1	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R            0x0001	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_MASK       0x0001	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_SHIFT           0	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_WIDTH           1	/* AIF2DACR_TO_AIF1ADC2R */
+
+/*
+ * R1552 (0x610) - DAC Softmute
+ */
+#define WM8995_DAC_SOFTMUTEMODE                 0x0002	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_MASK            0x0002	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_SHIFT                1	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_WIDTH                1	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_MUTERATE                     0x0001	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_MASK                0x0001	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_SHIFT                    0	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_WIDTH                    1	/* DAC_MUTERATE */
+
+/*
+ * R1568 (0x620) - Oversampling
+ */
+#define WM8995_ADC_OSR128                       0x0002	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_MASK                  0x0002	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_SHIFT                      1	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_WIDTH                      1	/* ADC_OSR128 */
+#define WM8995_DAC_OSR128                       0x0001	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_MASK                  0x0001	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_SHIFT                      0	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_WIDTH                      1	/* DAC_OSR128 */
+
+/*
+ * R1569 (0x621) - Sidetone
+ */
+#define WM8995_ST_LPF                           0x1000	/* ST_LPF */
+#define WM8995_ST_LPF_MASK                      0x1000	/* ST_LPF */
+#define WM8995_ST_LPF_SHIFT                         12	/* ST_LPF */
+#define WM8995_ST_LPF_WIDTH                          1	/* ST_LPF */
+#define WM8995_ST_HPF_CUT_MASK                  0x0380	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF_CUT_SHIFT                      7	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF_CUT_WIDTH                      3	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF                           0x0040	/* ST_HPF */
+#define WM8995_ST_HPF_MASK                      0x0040	/* ST_HPF */
+#define WM8995_ST_HPF_SHIFT                          6	/* ST_HPF */
+#define WM8995_ST_HPF_WIDTH                          1	/* ST_HPF */
+#define WM8995_STR_SEL                          0x0002	/* STR_SEL */
+#define WM8995_STR_SEL_MASK                     0x0002	/* STR_SEL */
+#define WM8995_STR_SEL_SHIFT                         1	/* STR_SEL */
+#define WM8995_STR_SEL_WIDTH                         1	/* STR_SEL */
+#define WM8995_STL_SEL                          0x0001	/* STL_SEL */
+#define WM8995_STL_SEL_MASK                     0x0001	/* STL_SEL */
+#define WM8995_STL_SEL_SHIFT                         0	/* STL_SEL */
+#define WM8995_STL_SEL_WIDTH                         1	/* STL_SEL */
+
+/*
+ * R1792 (0x700) - GPIO 1
+ */
+#define WM8995_GP1_DIR                          0x8000	/* GP1_DIR */
+#define WM8995_GP1_DIR_MASK                     0x8000	/* GP1_DIR */
+#define WM8995_GP1_DIR_SHIFT                        15	/* GP1_DIR */
+#define WM8995_GP1_DIR_WIDTH                         1	/* GP1_DIR */
+#define WM8995_GP1_PU                           0x4000	/* GP1_PU */
+#define WM8995_GP1_PU_MASK                      0x4000	/* GP1_PU */
+#define WM8995_GP1_PU_SHIFT                         14	/* GP1_PU */
+#define WM8995_GP1_PU_WIDTH                          1	/* GP1_PU */
+#define WM8995_GP1_PD                           0x2000	/* GP1_PD */
+#define WM8995_GP1_PD_MASK                      0x2000	/* GP1_PD */
+#define WM8995_GP1_PD_SHIFT                         13	/* GP1_PD */
+#define WM8995_GP1_PD_WIDTH                          1	/* GP1_PD */
+#define WM8995_GP1_POL                          0x0400	/* GP1_POL */
+#define WM8995_GP1_POL_MASK                     0x0400	/* GP1_POL */
+#define WM8995_GP1_POL_SHIFT                        10	/* GP1_POL */
+#define WM8995_GP1_POL_WIDTH                         1	/* GP1_POL */
+#define WM8995_GP1_OP_CFG                       0x0200	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_MASK                  0x0200	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_SHIFT                      9	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_WIDTH                      1	/* GP1_OP_CFG */
+#define WM8995_GP1_DB                           0x0100	/* GP1_DB */
+#define WM8995_GP1_DB_MASK                      0x0100	/* GP1_DB */
+#define WM8995_GP1_DB_SHIFT                          8	/* GP1_DB */
+#define WM8995_GP1_DB_WIDTH                          1	/* GP1_DB */
+#define WM8995_GP1_LVL                          0x0040	/* GP1_LVL */
+#define WM8995_GP1_LVL_MASK                     0x0040	/* GP1_LVL */
+#define WM8995_GP1_LVL_SHIFT                         6	/* GP1_LVL */
+#define WM8995_GP1_LVL_WIDTH                         1	/* GP1_LVL */
+#define WM8995_GP1_FN_MASK                      0x001F	/* GP1_FN - [4:0] */
+#define WM8995_GP1_FN_SHIFT                          0	/* GP1_FN - [4:0] */
+#define WM8995_GP1_FN_WIDTH                          5	/* GP1_FN - [4:0] */
+
+/*
+ * R1793 (0x701) - GPIO 2
+ */
+#define WM8995_GP2_DIR                          0x8000	/* GP2_DIR */
+#define WM8995_GP2_DIR_MASK                     0x8000	/* GP2_DIR */
+#define WM8995_GP2_DIR_SHIFT                        15	/* GP2_DIR */
+#define WM8995_GP2_DIR_WIDTH                         1	/* GP2_DIR */
+#define WM8995_GP2_PU                           0x4000	/* GP2_PU */
+#define WM8995_GP2_PU_MASK                      0x4000	/* GP2_PU */
+#define WM8995_GP2_PU_SHIFT                         14	/* GP2_PU */
+#define WM8995_GP2_PU_WIDTH                          1	/* GP2_PU */
+#define WM8995_GP2_PD                           0x2000	/* GP2_PD */
+#define WM8995_GP2_PD_MASK                      0x2000	/* GP2_PD */
+#define WM8995_GP2_PD_SHIFT                         13	/* GP2_PD */
+#define WM8995_GP2_PD_WIDTH                          1	/* GP2_PD */
+#define WM8995_GP2_POL                          0x0400	/* GP2_POL */
+#define WM8995_GP2_POL_MASK                     0x0400	/* GP2_POL */
+#define WM8995_GP2_POL_SHIFT                        10	/* GP2_POL */
+#define WM8995_GP2_POL_WIDTH                         1	/* GP2_POL */
+#define WM8995_GP2_OP_CFG                       0x0200	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_MASK                  0x0200	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_SHIFT                      9	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_WIDTH                      1	/* GP2_OP_CFG */
+#define WM8995_GP2_DB                           0x0100	/* GP2_DB */
+#define WM8995_GP2_DB_MASK                      0x0100	/* GP2_DB */
+#define WM8995_GP2_DB_SHIFT                          8	/* GP2_DB */
+#define WM8995_GP2_DB_WIDTH                          1	/* GP2_DB */
+#define WM8995_GP2_LVL                          0x0040	/* GP2_LVL */
+#define WM8995_GP2_LVL_MASK                     0x0040	/* GP2_LVL */
+#define WM8995_GP2_LVL_SHIFT                         6	/* GP2_LVL */
+#define WM8995_GP2_LVL_WIDTH                         1	/* GP2_LVL */
+#define WM8995_GP2_FN_MASK                      0x001F	/* GP2_FN - [4:0] */
+#define WM8995_GP2_FN_SHIFT                          0	/* GP2_FN - [4:0] */
+#define WM8995_GP2_FN_WIDTH                          5	/* GP2_FN - [4:0] */
+
+/*
+ * R1794 (0x702) - GPIO 3
+ */
+#define WM8995_GP3_DIR                          0x8000	/* GP3_DIR */
+#define WM8995_GP3_DIR_MASK                     0x8000	/* GP3_DIR */
+#define WM8995_GP3_DIR_SHIFT                        15	/* GP3_DIR */
+#define WM8995_GP3_DIR_WIDTH                         1	/* GP3_DIR */
+#define WM8995_GP3_PU                           0x4000	/* GP3_PU */
+#define WM8995_GP3_PU_MASK                      0x4000	/* GP3_PU */
+#define WM8995_GP3_PU_SHIFT                         14	/* GP3_PU */
+#define WM8995_GP3_PU_WIDTH                          1	/* GP3_PU */
+#define WM8995_GP3_PD                           0x2000	/* GP3_PD */
+#define WM8995_GP3_PD_MASK                      0x2000	/* GP3_PD */
+#define WM8995_GP3_PD_SHIFT                         13	/* GP3_PD */
+#define WM8995_GP3_PD_WIDTH                          1	/* GP3_PD */
+#define WM8995_GP3_POL                          0x0400	/* GP3_POL */
+#define WM8995_GP3_POL_MASK                     0x0400	/* GP3_POL */
+#define WM8995_GP3_POL_SHIFT                        10	/* GP3_POL */
+#define WM8995_GP3_POL_WIDTH                         1	/* GP3_POL */
+#define WM8995_GP3_OP_CFG                       0x0200	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_MASK                  0x0200	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_SHIFT                      9	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_WIDTH                      1	/* GP3_OP_CFG */
+#define WM8995_GP3_DB                           0x0100	/* GP3_DB */
+#define WM8995_GP3_DB_MASK                      0x0100	/* GP3_DB */
+#define WM8995_GP3_DB_SHIFT                          8	/* GP3_DB */
+#define WM8995_GP3_DB_WIDTH                          1	/* GP3_DB */
+#define WM8995_GP3_LVL                          0x0040	/* GP3_LVL */
+#define WM8995_GP3_LVL_MASK                     0x0040	/* GP3_LVL */
+#define WM8995_GP3_LVL_SHIFT                         6	/* GP3_LVL */
+#define WM8995_GP3_LVL_WIDTH                         1	/* GP3_LVL */
+#define WM8995_GP3_FN_MASK                      0x001F	/* GP3_FN - [4:0] */
+#define WM8995_GP3_FN_SHIFT                          0	/* GP3_FN - [4:0] */
+#define WM8995_GP3_FN_WIDTH                          5	/* GP3_FN - [4:0] */
+
+/*
+ * R1795 (0x703) - GPIO 4
+ */
+#define WM8995_GP4_DIR                          0x8000	/* GP4_DIR */
+#define WM8995_GP4_DIR_MASK                     0x8000	/* GP4_DIR */
+#define WM8995_GP4_DIR_SHIFT                        15	/* GP4_DIR */
+#define WM8995_GP4_DIR_WIDTH                         1	/* GP4_DIR */
+#define WM8995_GP4_PU                           0x4000	/* GP4_PU */
+#define WM8995_GP4_PU_MASK                      0x4000	/* GP4_PU */
+#define WM8995_GP4_PU_SHIFT                         14	/* GP4_PU */
+#define WM8995_GP4_PU_WIDTH                          1	/* GP4_PU */
+#define WM8995_GP4_PD                           0x2000	/* GP4_PD */
+#define WM8995_GP4_PD_MASK                      0x2000	/* GP4_PD */
+#define WM8995_GP4_PD_SHIFT                         13	/* GP4_PD */
+#define WM8995_GP4_PD_WIDTH                          1	/* GP4_PD */
+#define WM8995_GP4_POL                          0x0400	/* GP4_POL */
+#define WM8995_GP4_POL_MASK                     0x0400	/* GP4_POL */
+#define WM8995_GP4_POL_SHIFT                        10	/* GP4_POL */
+#define WM8995_GP4_POL_WIDTH                         1	/* GP4_POL */
+#define WM8995_GP4_OP_CFG                       0x0200	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_MASK                  0x0200	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_SHIFT                      9	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_WIDTH                      1	/* GP4_OP_CFG */
+#define WM8995_GP4_DB                           0x0100	/* GP4_DB */
+#define WM8995_GP4_DB_MASK                      0x0100	/* GP4_DB */
+#define WM8995_GP4_DB_SHIFT                          8	/* GP4_DB */
+#define WM8995_GP4_DB_WIDTH                          1	/* GP4_DB */
+#define WM8995_GP4_LVL                          0x0040	/* GP4_LVL */
+#define WM8995_GP4_LVL_MASK                     0x0040	/* GP4_LVL */
+#define WM8995_GP4_LVL_SHIFT                         6	/* GP4_LVL */
+#define WM8995_GP4_LVL_WIDTH                         1	/* GP4_LVL */
+#define WM8995_GP4_FN_MASK                      0x001F	/* GP4_FN - [4:0] */
+#define WM8995_GP4_FN_SHIFT                          0	/* GP4_FN - [4:0] */
+#define WM8995_GP4_FN_WIDTH                          5	/* GP4_FN - [4:0] */
+
+/*
+ * R1796 (0x704) - GPIO 5
+ */
+#define WM8995_GP5_DIR                          0x8000	/* GP5_DIR */
+#define WM8995_GP5_DIR_MASK                     0x8000	/* GP5_DIR */
+#define WM8995_GP5_DIR_SHIFT                        15	/* GP5_DIR */
+#define WM8995_GP5_DIR_WIDTH                         1	/* GP5_DIR */
+#define WM8995_GP5_PU                           0x4000	/* GP5_PU */
+#define WM8995_GP5_PU_MASK                      0x4000	/* GP5_PU */
+#define WM8995_GP5_PU_SHIFT                         14	/* GP5_PU */
+#define WM8995_GP5_PU_WIDTH                          1	/* GP5_PU */
+#define WM8995_GP5_PD                           0x2000	/* GP5_PD */
+#define WM8995_GP5_PD_MASK                      0x2000	/* GP5_PD */
+#define WM8995_GP5_PD_SHIFT                         13	/* GP5_PD */
+#define WM8995_GP5_PD_WIDTH                          1	/* GP5_PD */
+#define WM8995_GP5_POL                          0x0400	/* GP5_POL */
+#define WM8995_GP5_POL_MASK                     0x0400	/* GP5_POL */
+#define WM8995_GP5_POL_SHIFT                        10	/* GP5_POL */
+#define WM8995_GP5_POL_WIDTH                         1	/* GP5_POL */
+#define WM8995_GP5_OP_CFG                       0x0200	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_MASK                  0x0200	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_SHIFT                      9	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_WIDTH                      1	/* GP5_OP_CFG */
+#define WM8995_GP5_DB                           0x0100	/* GP5_DB */
+#define WM8995_GP5_DB_MASK                      0x0100	/* GP5_DB */
+#define WM8995_GP5_DB_SHIFT                          8	/* GP5_DB */
+#define WM8995_GP5_DB_WIDTH                          1	/* GP5_DB */
+#define WM8995_GP5_LVL                          0x0040	/* GP5_LVL */
+#define WM8995_GP5_LVL_MASK                     0x0040	/* GP5_LVL */
+#define WM8995_GP5_LVL_SHIFT                         6	/* GP5_LVL */
+#define WM8995_GP5_LVL_WIDTH                         1	/* GP5_LVL */
+#define WM8995_GP5_FN_MASK                      0x001F	/* GP5_FN - [4:0] */
+#define WM8995_GP5_FN_SHIFT                          0	/* GP5_FN - [4:0] */
+#define WM8995_GP5_FN_WIDTH                          5	/* GP5_FN - [4:0] */
+
+/*
+ * R1797 (0x705) - GPIO 6
+ */
+#define WM8995_GP6_DIR                          0x8000	/* GP6_DIR */
+#define WM8995_GP6_DIR_MASK                     0x8000	/* GP6_DIR */
+#define WM8995_GP6_DIR_SHIFT                        15	/* GP6_DIR */
+#define WM8995_GP6_DIR_WIDTH                         1	/* GP6_DIR */
+#define WM8995_GP6_PU                           0x4000	/* GP6_PU */
+#define WM8995_GP6_PU_MASK                      0x4000	/* GP6_PU */
+#define WM8995_GP6_PU_SHIFT                         14	/* GP6_PU */
+#define WM8995_GP6_PU_WIDTH                          1	/* GP6_PU */
+#define WM8995_GP6_PD                           0x2000	/* GP6_PD */
+#define WM8995_GP6_PD_MASK                      0x2000	/* GP6_PD */
+#define WM8995_GP6_PD_SHIFT                         13	/* GP6_PD */
+#define WM8995_GP6_PD_WIDTH                          1	/* GP6_PD */
+#define WM8995_GP6_POL                          0x0400	/* GP6_POL */
+#define WM8995_GP6_POL_MASK                     0x0400	/* GP6_POL */
+#define WM8995_GP6_POL_SHIFT                        10	/* GP6_POL */
+#define WM8995_GP6_POL_WIDTH                         1	/* GP6_POL */
+#define WM8995_GP6_OP_CFG                       0x0200	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_MASK                  0x0200	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_SHIFT                      9	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_WIDTH                      1	/* GP6_OP_CFG */
+#define WM8995_GP6_DB                           0x0100	/* GP6_DB */
+#define WM8995_GP6_DB_MASK                      0x0100	/* GP6_DB */
+#define WM8995_GP6_DB_SHIFT                          8	/* GP6_DB */
+#define WM8995_GP6_DB_WIDTH                          1	/* GP6_DB */
+#define WM8995_GP6_LVL                          0x0040	/* GP6_LVL */
+#define WM8995_GP6_LVL_MASK                     0x0040	/* GP6_LVL */
+#define WM8995_GP6_LVL_SHIFT                         6	/* GP6_LVL */
+#define WM8995_GP6_LVL_WIDTH                         1	/* GP6_LVL */
+#define WM8995_GP6_FN_MASK                      0x001F	/* GP6_FN - [4:0] */
+#define WM8995_GP6_FN_SHIFT                          0	/* GP6_FN - [4:0] */
+#define WM8995_GP6_FN_WIDTH                          5	/* GP6_FN - [4:0] */
+
+/*
+ * R1798 (0x706) - GPIO 7
+ */
+#define WM8995_GP7_DIR                          0x8000	/* GP7_DIR */
+#define WM8995_GP7_DIR_MASK                     0x8000	/* GP7_DIR */
+#define WM8995_GP7_DIR_SHIFT                        15	/* GP7_DIR */
+#define WM8995_GP7_DIR_WIDTH                         1	/* GP7_DIR */
+#define WM8995_GP7_PU                           0x4000	/* GP7_PU */
+#define WM8995_GP7_PU_MASK                      0x4000	/* GP7_PU */
+#define WM8995_GP7_PU_SHIFT                         14	/* GP7_PU */
+#define WM8995_GP7_PU_WIDTH                          1	/* GP7_PU */
+#define WM8995_GP7_PD                           0x2000	/* GP7_PD */
+#define WM8995_GP7_PD_MASK                      0x2000	/* GP7_PD */
+#define WM8995_GP7_PD_SHIFT                         13	/* GP7_PD */
+#define WM8995_GP7_PD_WIDTH                          1	/* GP7_PD */
+#define WM8995_GP7_POL                          0x0400	/* GP7_POL */
+#define WM8995_GP7_POL_MASK                     0x0400	/* GP7_POL */
+#define WM8995_GP7_POL_SHIFT                        10	/* GP7_POL */
+#define WM8995_GP7_POL_WIDTH                         1	/* GP7_POL */
+#define WM8995_GP7_OP_CFG                       0x0200	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_MASK                  0x0200	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_SHIFT                      9	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_WIDTH                      1	/* GP7_OP_CFG */
+#define WM8995_GP7_DB                           0x0100	/* GP7_DB */
+#define WM8995_GP7_DB_MASK                      0x0100	/* GP7_DB */
+#define WM8995_GP7_DB_SHIFT                          8	/* GP7_DB */
+#define WM8995_GP7_DB_WIDTH                          1	/* GP7_DB */
+#define WM8995_GP7_LVL                          0x0040	/* GP7_LVL */
+#define WM8995_GP7_LVL_MASK                     0x0040	/* GP7_LVL */
+#define WM8995_GP7_LVL_SHIFT                         6	/* GP7_LVL */
+#define WM8995_GP7_LVL_WIDTH                         1	/* GP7_LVL */
+#define WM8995_GP7_FN_MASK                      0x001F	/* GP7_FN - [4:0] */
+#define WM8995_GP7_FN_SHIFT                          0	/* GP7_FN - [4:0] */
+#define WM8995_GP7_FN_WIDTH                          5	/* GP7_FN - [4:0] */
+
+/*
+ * R1799 (0x707) - GPIO 8
+ */
+#define WM8995_GP8_DIR                          0x8000	/* GP8_DIR */
+#define WM8995_GP8_DIR_MASK                     0x8000	/* GP8_DIR */
+#define WM8995_GP8_DIR_SHIFT                        15	/* GP8_DIR */
+#define WM8995_GP8_DIR_WIDTH                         1	/* GP8_DIR */
+#define WM8995_GP8_PU                           0x4000	/* GP8_PU */
+#define WM8995_GP8_PU_MASK                      0x4000	/* GP8_PU */
+#define WM8995_GP8_PU_SHIFT                         14	/* GP8_PU */
+#define WM8995_GP8_PU_WIDTH                          1	/* GP8_PU */
+#define WM8995_GP8_PD                           0x2000	/* GP8_PD */
+#define WM8995_GP8_PD_MASK                      0x2000	/* GP8_PD */
+#define WM8995_GP8_PD_SHIFT                         13	/* GP8_PD */
+#define WM8995_GP8_PD_WIDTH                          1	/* GP8_PD */
+#define WM8995_GP8_POL                          0x0400	/* GP8_POL */
+#define WM8995_GP8_POL_MASK                     0x0400	/* GP8_POL */
+#define WM8995_GP8_POL_SHIFT                        10	/* GP8_POL */
+#define WM8995_GP8_POL_WIDTH                         1	/* GP8_POL */
+#define WM8995_GP8_OP_CFG                       0x0200	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_MASK                  0x0200	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_SHIFT                      9	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_WIDTH                      1	/* GP8_OP_CFG */
+#define WM8995_GP8_DB                           0x0100	/* GP8_DB */
+#define WM8995_GP8_DB_MASK                      0x0100	/* GP8_DB */
+#define WM8995_GP8_DB_SHIFT                          8	/* GP8_DB */
+#define WM8995_GP8_DB_WIDTH                          1	/* GP8_DB */
+#define WM8995_GP8_LVL                          0x0040	/* GP8_LVL */
+#define WM8995_GP8_LVL_MASK                     0x0040	/* GP8_LVL */
+#define WM8995_GP8_LVL_SHIFT                         6	/* GP8_LVL */
+#define WM8995_GP8_LVL_WIDTH                         1	/* GP8_LVL */
+#define WM8995_GP8_FN_MASK                      0x001F	/* GP8_FN - [4:0] */
+#define WM8995_GP8_FN_SHIFT                          0	/* GP8_FN - [4:0] */
+#define WM8995_GP8_FN_WIDTH                          5	/* GP8_FN - [4:0] */
+
+/*
+ * R1800 (0x708) - GPIO 9
+ */
+#define WM8995_GP9_DIR                          0x8000	/* GP9_DIR */
+#define WM8995_GP9_DIR_MASK                     0x8000	/* GP9_DIR */
+#define WM8995_GP9_DIR_SHIFT                        15	/* GP9_DIR */
+#define WM8995_GP9_DIR_WIDTH                         1	/* GP9_DIR */
+#define WM8995_GP9_PU                           0x4000	/* GP9_PU */
+#define WM8995_GP9_PU_MASK                      0x4000	/* GP9_PU */
+#define WM8995_GP9_PU_SHIFT                         14	/* GP9_PU */
+#define WM8995_GP9_PU_WIDTH                          1	/* GP9_PU */
+#define WM8995_GP9_PD                           0x2000	/* GP9_PD */
+#define WM8995_GP9_PD_MASK                      0x2000	/* GP9_PD */
+#define WM8995_GP9_PD_SHIFT                         13	/* GP9_PD */
+#define WM8995_GP9_PD_WIDTH                          1	/* GP9_PD */
+#define WM8995_GP9_POL                          0x0400	/* GP9_POL */
+#define WM8995_GP9_POL_MASK                     0x0400	/* GP9_POL */
+#define WM8995_GP9_POL_SHIFT                        10	/* GP9_POL */
+#define WM8995_GP9_POL_WIDTH                         1	/* GP9_POL */
+#define WM8995_GP9_OP_CFG                       0x0200	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_MASK                  0x0200	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_SHIFT                      9	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_WIDTH                      1	/* GP9_OP_CFG */
+#define WM8995_GP9_DB                           0x0100	/* GP9_DB */
+#define WM8995_GP9_DB_MASK                      0x0100	/* GP9_DB */
+#define WM8995_GP9_DB_SHIFT                          8	/* GP9_DB */
+#define WM8995_GP9_DB_WIDTH                          1	/* GP9_DB */
+#define WM8995_GP9_LVL                          0x0040	/* GP9_LVL */
+#define WM8995_GP9_LVL_MASK                     0x0040	/* GP9_LVL */
+#define WM8995_GP9_LVL_SHIFT                         6	/* GP9_LVL */
+#define WM8995_GP9_LVL_WIDTH                         1	/* GP9_LVL */
+#define WM8995_GP9_FN_MASK                      0x001F	/* GP9_FN - [4:0] */
+#define WM8995_GP9_FN_SHIFT                          0	/* GP9_FN - [4:0] */
+#define WM8995_GP9_FN_WIDTH                          5	/* GP9_FN - [4:0] */
+
+/*
+ * R1801 (0x709) - GPIO 10
+ */
+#define WM8995_GP10_DIR                         0x8000	/* GP10_DIR */
+#define WM8995_GP10_DIR_MASK                    0x8000	/* GP10_DIR */
+#define WM8995_GP10_DIR_SHIFT                       15	/* GP10_DIR */
+#define WM8995_GP10_DIR_WIDTH                        1	/* GP10_DIR */
+#define WM8995_GP10_PU                          0x4000	/* GP10_PU */
+#define WM8995_GP10_PU_MASK                     0x4000	/* GP10_PU */
+#define WM8995_GP10_PU_SHIFT                        14	/* GP10_PU */
+#define WM8995_GP10_PU_WIDTH                         1	/* GP10_PU */
+#define WM8995_GP10_PD                          0x2000	/* GP10_PD */
+#define WM8995_GP10_PD_MASK                     0x2000	/* GP10_PD */
+#define WM8995_GP10_PD_SHIFT                        13	/* GP10_PD */
+#define WM8995_GP10_PD_WIDTH                         1	/* GP10_PD */
+#define WM8995_GP10_POL                         0x0400	/* GP10_POL */
+#define WM8995_GP10_POL_MASK                    0x0400	/* GP10_POL */
+#define WM8995_GP10_POL_SHIFT                       10	/* GP10_POL */
+#define WM8995_GP10_POL_WIDTH                        1	/* GP10_POL */
+#define WM8995_GP10_OP_CFG                      0x0200	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_MASK                 0x0200	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_SHIFT                     9	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_WIDTH                     1	/* GP10_OP_CFG */
+#define WM8995_GP10_DB                          0x0100	/* GP10_DB */
+#define WM8995_GP10_DB_MASK                     0x0100	/* GP10_DB */
+#define WM8995_GP10_DB_SHIFT                         8	/* GP10_DB */
+#define WM8995_GP10_DB_WIDTH                         1	/* GP10_DB */
+#define WM8995_GP10_LVL                         0x0040	/* GP10_LVL */
+#define WM8995_GP10_LVL_MASK                    0x0040	/* GP10_LVL */
+#define WM8995_GP10_LVL_SHIFT                        6	/* GP10_LVL */
+#define WM8995_GP10_LVL_WIDTH                        1	/* GP10_LVL */
+#define WM8995_GP10_FN_MASK                     0x001F	/* GP10_FN - [4:0] */
+#define WM8995_GP10_FN_SHIFT                         0	/* GP10_FN - [4:0] */
+#define WM8995_GP10_FN_WIDTH                         5	/* GP10_FN - [4:0] */
+
+/*
+ * R1802 (0x70A) - GPIO 11
+ */
+#define WM8995_GP11_DIR                         0x8000	/* GP11_DIR */
+#define WM8995_GP11_DIR_MASK                    0x8000	/* GP11_DIR */
+#define WM8995_GP11_DIR_SHIFT                       15	/* GP11_DIR */
+#define WM8995_GP11_DIR_WIDTH                        1	/* GP11_DIR */
+#define WM8995_GP11_PU                          0x4000	/* GP11_PU */
+#define WM8995_GP11_PU_MASK                     0x4000	/* GP11_PU */
+#define WM8995_GP11_PU_SHIFT                        14	/* GP11_PU */
+#define WM8995_GP11_PU_WIDTH                         1	/* GP11_PU */
+#define WM8995_GP11_PD                          0x2000	/* GP11_PD */
+#define WM8995_GP11_PD_MASK                     0x2000	/* GP11_PD */
+#define WM8995_GP11_PD_SHIFT                        13	/* GP11_PD */
+#define WM8995_GP11_PD_WIDTH                         1	/* GP11_PD */
+#define WM8995_GP11_POL                         0x0400	/* GP11_POL */
+#define WM8995_GP11_POL_MASK                    0x0400	/* GP11_POL */
+#define WM8995_GP11_POL_SHIFT                       10	/* GP11_POL */
+#define WM8995_GP11_POL_WIDTH                        1	/* GP11_POL */
+#define WM8995_GP11_OP_CFG                      0x0200	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_MASK                 0x0200	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_SHIFT                     9	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_WIDTH                     1	/* GP11_OP_CFG */
+#define WM8995_GP11_DB                          0x0100	/* GP11_DB */
+#define WM8995_GP11_DB_MASK                     0x0100	/* GP11_DB */
+#define WM8995_GP11_DB_SHIFT                         8	/* GP11_DB */
+#define WM8995_GP11_DB_WIDTH                         1	/* GP11_DB */
+#define WM8995_GP11_LVL                         0x0040	/* GP11_LVL */
+#define WM8995_GP11_LVL_MASK                    0x0040	/* GP11_LVL */
+#define WM8995_GP11_LVL_SHIFT                        6	/* GP11_LVL */
+#define WM8995_GP11_LVL_WIDTH                        1	/* GP11_LVL */
+#define WM8995_GP11_FN_MASK                     0x001F	/* GP11_FN - [4:0] */
+#define WM8995_GP11_FN_SHIFT                         0	/* GP11_FN - [4:0] */
+#define WM8995_GP11_FN_WIDTH                         5	/* GP11_FN - [4:0] */
+
+/*
+ * R1803 (0x70B) - GPIO 12
+ */
+#define WM8995_GP12_DIR                         0x8000	/* GP12_DIR */
+#define WM8995_GP12_DIR_MASK                    0x8000	/* GP12_DIR */
+#define WM8995_GP12_DIR_SHIFT                       15	/* GP12_DIR */
+#define WM8995_GP12_DIR_WIDTH                        1	/* GP12_DIR */
+#define WM8995_GP12_PU                          0x4000	/* GP12_PU */
+#define WM8995_GP12_PU_MASK                     0x4000	/* GP12_PU */
+#define WM8995_GP12_PU_SHIFT                        14	/* GP12_PU */
+#define WM8995_GP12_PU_WIDTH                         1	/* GP12_PU */
+#define WM8995_GP12_PD                          0x2000	/* GP12_PD */
+#define WM8995_GP12_PD_MASK                     0x2000	/* GP12_PD */
+#define WM8995_GP12_PD_SHIFT                        13	/* GP12_PD */
+#define WM8995_GP12_PD_WIDTH                         1	/* GP12_PD */
+#define WM8995_GP12_POL                         0x0400	/* GP12_POL */
+#define WM8995_GP12_POL_MASK                    0x0400	/* GP12_POL */
+#define WM8995_GP12_POL_SHIFT                       10	/* GP12_POL */
+#define WM8995_GP12_POL_WIDTH                        1	/* GP12_POL */
+#define WM8995_GP12_OP_CFG                      0x0200	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_MASK                 0x0200	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_SHIFT                     9	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_WIDTH                     1	/* GP12_OP_CFG */
+#define WM8995_GP12_DB                          0x0100	/* GP12_DB */
+#define WM8995_GP12_DB_MASK                     0x0100	/* GP12_DB */
+#define WM8995_GP12_DB_SHIFT                         8	/* GP12_DB */
+#define WM8995_GP12_DB_WIDTH                         1	/* GP12_DB */
+#define WM8995_GP12_LVL                         0x0040	/* GP12_LVL */
+#define WM8995_GP12_LVL_MASK                    0x0040	/* GP12_LVL */
+#define WM8995_GP12_LVL_SHIFT                        6	/* GP12_LVL */
+#define WM8995_GP12_LVL_WIDTH                        1	/* GP12_LVL */
+#define WM8995_GP12_FN_MASK                     0x001F	/* GP12_FN - [4:0] */
+#define WM8995_GP12_FN_SHIFT                         0	/* GP12_FN - [4:0] */
+#define WM8995_GP12_FN_WIDTH                         5	/* GP12_FN - [4:0] */
+
+/*
+ * R1804 (0x70C) - GPIO 13
+ */
+#define WM8995_GP13_DIR                         0x8000	/* GP13_DIR */
+#define WM8995_GP13_DIR_MASK                    0x8000	/* GP13_DIR */
+#define WM8995_GP13_DIR_SHIFT                       15	/* GP13_DIR */
+#define WM8995_GP13_DIR_WIDTH                        1	/* GP13_DIR */
+#define WM8995_GP13_PU                          0x4000	/* GP13_PU */
+#define WM8995_GP13_PU_MASK                     0x4000	/* GP13_PU */
+#define WM8995_GP13_PU_SHIFT                        14	/* GP13_PU */
+#define WM8995_GP13_PU_WIDTH                         1	/* GP13_PU */
+#define WM8995_GP13_PD                          0x2000	/* GP13_PD */
+#define WM8995_GP13_PD_MASK                     0x2000	/* GP13_PD */
+#define WM8995_GP13_PD_SHIFT                        13	/* GP13_PD */
+#define WM8995_GP13_PD_WIDTH                         1	/* GP13_PD */
+#define WM8995_GP13_POL                         0x0400	/* GP13_POL */
+#define WM8995_GP13_POL_MASK                    0x0400	/* GP13_POL */
+#define WM8995_GP13_POL_SHIFT                       10	/* GP13_POL */
+#define WM8995_GP13_POL_WIDTH                        1	/* GP13_POL */
+#define WM8995_GP13_OP_CFG                      0x0200	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_MASK                 0x0200	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_SHIFT                     9	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_WIDTH                     1	/* GP13_OP_CFG */
+#define WM8995_GP13_DB                          0x0100	/* GP13_DB */
+#define WM8995_GP13_DB_MASK                     0x0100	/* GP13_DB */
+#define WM8995_GP13_DB_SHIFT                         8	/* GP13_DB */
+#define WM8995_GP13_DB_WIDTH                         1	/* GP13_DB */
+#define WM8995_GP13_LVL                         0x0040	/* GP13_LVL */
+#define WM8995_GP13_LVL_MASK                    0x0040	/* GP13_LVL */
+#define WM8995_GP13_LVL_SHIFT                        6	/* GP13_LVL */
+#define WM8995_GP13_LVL_WIDTH                        1	/* GP13_LVL */
+#define WM8995_GP13_FN_MASK                     0x001F	/* GP13_FN - [4:0] */
+#define WM8995_GP13_FN_SHIFT                         0	/* GP13_FN - [4:0] */
+#define WM8995_GP13_FN_WIDTH                         5	/* GP13_FN - [4:0] */
+
+/*
+ * R1805 (0x70D) - GPIO 14
+ */
+#define WM8995_GP14_DIR                         0x8000	/* GP14_DIR */
+#define WM8995_GP14_DIR_MASK                    0x8000	/* GP14_DIR */
+#define WM8995_GP14_DIR_SHIFT                       15	/* GP14_DIR */
+#define WM8995_GP14_DIR_WIDTH                        1	/* GP14_DIR */
+#define WM8995_GP14_PU                          0x4000	/* GP14_PU */
+#define WM8995_GP14_PU_MASK                     0x4000	/* GP14_PU */
+#define WM8995_GP14_PU_SHIFT                        14	/* GP14_PU */
+#define WM8995_GP14_PU_WIDTH                         1	/* GP14_PU */
+#define WM8995_GP14_PD                          0x2000	/* GP14_PD */
+#define WM8995_GP14_PD_MASK                     0x2000	/* GP14_PD */
+#define WM8995_GP14_PD_SHIFT                        13	/* GP14_PD */
+#define WM8995_GP14_PD_WIDTH                         1	/* GP14_PD */
+#define WM8995_GP14_POL                         0x0400	/* GP14_POL */
+#define WM8995_GP14_POL_MASK                    0x0400	/* GP14_POL */
+#define WM8995_GP14_POL_SHIFT                       10	/* GP14_POL */
+#define WM8995_GP14_POL_WIDTH                        1	/* GP14_POL */
+#define WM8995_GP14_OP_CFG                      0x0200	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_MASK                 0x0200	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_SHIFT                     9	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_WIDTH                     1	/* GP14_OP_CFG */
+#define WM8995_GP14_DB                          0x0100	/* GP14_DB */
+#define WM8995_GP14_DB_MASK                     0x0100	/* GP14_DB */
+#define WM8995_GP14_DB_SHIFT                         8	/* GP14_DB */
+#define WM8995_GP14_DB_WIDTH                         1	/* GP14_DB */
+#define WM8995_GP14_LVL                         0x0040	/* GP14_LVL */
+#define WM8995_GP14_LVL_MASK                    0x0040	/* GP14_LVL */
+#define WM8995_GP14_LVL_SHIFT                        6	/* GP14_LVL */
+#define WM8995_GP14_LVL_WIDTH                        1	/* GP14_LVL */
+#define WM8995_GP14_FN_MASK                     0x001F	/* GP14_FN - [4:0] */
+#define WM8995_GP14_FN_SHIFT                         0	/* GP14_FN - [4:0] */
+#define WM8995_GP14_FN_WIDTH                         5	/* GP14_FN - [4:0] */
+
+/*
+ * R1824 (0x720) - Pull Control (1)
+ */
+#define WM8995_DMICDAT3_PD                      0x4000	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_MASK                 0x4000	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_SHIFT                    14	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_WIDTH                     1	/* DMICDAT3_PD */
+#define WM8995_DMICDAT2_PD                      0x1000	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_MASK                 0x1000	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_SHIFT                    12	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_WIDTH                     1	/* DMICDAT2_PD */
+#define WM8995_DMICDAT1_PD                      0x0400	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_MASK                 0x0400	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_SHIFT                    10	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_WIDTH                     1	/* DMICDAT1_PD */
+#define WM8995_MCLK2_PU                         0x0200	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_MASK                    0x0200	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_SHIFT                        9	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_WIDTH                        1	/* MCLK2_PU */
+#define WM8995_MCLK2_PD                         0x0100	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_MASK                    0x0100	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_SHIFT                        8	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_WIDTH                        1	/* MCLK2_PD */
+#define WM8995_MCLK1_PU                         0x0080	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_MASK                    0x0080	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_SHIFT                        7	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_WIDTH                        1	/* MCLK1_PU */
+#define WM8995_MCLK1_PD                         0x0040	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_MASK                    0x0040	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_SHIFT                        6	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_WIDTH                        1	/* MCLK1_PD */
+#define WM8995_DACDAT1_PU                       0x0020	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_MASK                  0x0020	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_SHIFT                      5	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_WIDTH                      1	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PD                       0x0010	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_MASK                  0x0010	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_SHIFT                      4	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_WIDTH                      1	/* DACDAT1_PD */
+#define WM8995_DACLRCLK1_PU                     0x0008	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_MASK                0x0008	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_SHIFT                    3	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_WIDTH                    1	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PD                     0x0004	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_MASK                0x0004	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_SHIFT                    2	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_WIDTH                    1	/* DACLRCLK1_PD */
+#define WM8995_BCLK1_PU                         0x0002	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_MASK                    0x0002	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_SHIFT                        1	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_WIDTH                        1	/* BCLK1_PU */
+#define WM8995_BCLK1_PD                         0x0001	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_MASK                    0x0001	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_SHIFT                        0	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_WIDTH                        1	/* BCLK1_PD */
+
+/*
+ * R1825 (0x721) - Pull Control (2)
+ */
+#define WM8995_LDO1ENA_PD                       0x0010	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_MASK                  0x0010	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_SHIFT                      4	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_WIDTH                      1	/* LDO1ENA_PD */
+#define WM8995_MODE_PD                          0x0004	/* MODE_PD */
+#define WM8995_MODE_PD_MASK                     0x0004	/* MODE_PD */
+#define WM8995_MODE_PD_SHIFT                         2	/* MODE_PD */
+#define WM8995_MODE_PD_WIDTH                         1	/* MODE_PD */
+#define WM8995_CSNADDR_PD                       0x0001	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_MASK                  0x0001	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_SHIFT                      0	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_WIDTH                      1	/* CSNADDR_PD */
+
+/*
+ * R1840 (0x730) - Interrupt Status 1
+ */
+#define WM8995_GP14_EINT                        0x2000	/* GP14_EINT */
+#define WM8995_GP14_EINT_MASK                   0x2000	/* GP14_EINT */
+#define WM8995_GP14_EINT_SHIFT                      13	/* GP14_EINT */
+#define WM8995_GP14_EINT_WIDTH                       1	/* GP14_EINT */
+#define WM8995_GP13_EINT                        0x1000	/* GP13_EINT */
+#define WM8995_GP13_EINT_MASK                   0x1000	/* GP13_EINT */
+#define WM8995_GP13_EINT_SHIFT                      12	/* GP13_EINT */
+#define WM8995_GP13_EINT_WIDTH                       1	/* GP13_EINT */
+#define WM8995_GP12_EINT                        0x0800	/* GP12_EINT */
+#define WM8995_GP12_EINT_MASK                   0x0800	/* GP12_EINT */
+#define WM8995_GP12_EINT_SHIFT                      11	/* GP12_EINT */
+#define WM8995_GP12_EINT_WIDTH                       1	/* GP12_EINT */
+#define WM8995_GP11_EINT                        0x0400	/* GP11_EINT */
+#define WM8995_GP11_EINT_MASK                   0x0400	/* GP11_EINT */
+#define WM8995_GP11_EINT_SHIFT                      10	/* GP11_EINT */
+#define WM8995_GP11_EINT_WIDTH                       1	/* GP11_EINT */
+#define WM8995_GP10_EINT                        0x0200	/* GP10_EINT */
+#define WM8995_GP10_EINT_MASK                   0x0200	/* GP10_EINT */
+#define WM8995_GP10_EINT_SHIFT                       9	/* GP10_EINT */
+#define WM8995_GP10_EINT_WIDTH                       1	/* GP10_EINT */
+#define WM8995_GP9_EINT                         0x0100	/* GP9_EINT */
+#define WM8995_GP9_EINT_MASK                    0x0100	/* GP9_EINT */
+#define WM8995_GP9_EINT_SHIFT                        8	/* GP9_EINT */
+#define WM8995_GP9_EINT_WIDTH                        1	/* GP9_EINT */
+#define WM8995_GP8_EINT                         0x0080	/* GP8_EINT */
+#define WM8995_GP8_EINT_MASK                    0x0080	/* GP8_EINT */
+#define WM8995_GP8_EINT_SHIFT                        7	/* GP8_EINT */
+#define WM8995_GP8_EINT_WIDTH                        1	/* GP8_EINT */
+#define WM8995_GP7_EINT                         0x0040	/* GP7_EINT */
+#define WM8995_GP7_EINT_MASK                    0x0040	/* GP7_EINT */
+#define WM8995_GP7_EINT_SHIFT                        6	/* GP7_EINT */
+#define WM8995_GP7_EINT_WIDTH                        1	/* GP7_EINT */
+#define WM8995_GP6_EINT                         0x0020	/* GP6_EINT */
+#define WM8995_GP6_EINT_MASK                    0x0020	/* GP6_EINT */
+#define WM8995_GP6_EINT_SHIFT                        5	/* GP6_EINT */
+#define WM8995_GP6_EINT_WIDTH                        1	/* GP6_EINT */
+#define WM8995_GP5_EINT                         0x0010	/* GP5_EINT */
+#define WM8995_GP5_EINT_MASK                    0x0010	/* GP5_EINT */
+#define WM8995_GP5_EINT_SHIFT                        4	/* GP5_EINT */
+#define WM8995_GP5_EINT_WIDTH                        1	/* GP5_EINT */
+#define WM8995_GP4_EINT                         0x0008	/* GP4_EINT */
+#define WM8995_GP4_EINT_MASK                    0x0008	/* GP4_EINT */
+#define WM8995_GP4_EINT_SHIFT                        3	/* GP4_EINT */
+#define WM8995_GP4_EINT_WIDTH                        1	/* GP4_EINT */
+#define WM8995_GP3_EINT                         0x0004	/* GP3_EINT */
+#define WM8995_GP3_EINT_MASK                    0x0004	/* GP3_EINT */
+#define WM8995_GP3_EINT_SHIFT                        2	/* GP3_EINT */
+#define WM8995_GP3_EINT_WIDTH                        1	/* GP3_EINT */
+#define WM8995_GP2_EINT                         0x0002	/* GP2_EINT */
+#define WM8995_GP2_EINT_MASK                    0x0002	/* GP2_EINT */
+#define WM8995_GP2_EINT_SHIFT                        1	/* GP2_EINT */
+#define WM8995_GP2_EINT_WIDTH                        1	/* GP2_EINT */
+#define WM8995_GP1_EINT                         0x0001	/* GP1_EINT */
+#define WM8995_GP1_EINT_MASK                    0x0001	/* GP1_EINT */
+#define WM8995_GP1_EINT_SHIFT                        0	/* GP1_EINT */
+#define WM8995_GP1_EINT_WIDTH                        1	/* GP1_EINT */
+
+/*
+ * R1841 (0x731) - Interrupt Status 2
+ */
+#define WM8995_DCS_DONE_23_EINT                 0x1000	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_MASK            0x1000	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_SHIFT               12	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_WIDTH                1	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_01_EINT                 0x0800	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_MASK            0x0800	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_SHIFT               11	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_WIDTH                1	/* DCS_DONE_01_EINT */
+#define WM8995_WSEQ_DONE_EINT                   0x0400	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_MASK              0x0400	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_SHIFT                 10	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_WIDTH                  1	/* WSEQ_DONE_EINT */
+#define WM8995_FIFOS_ERR_EINT                   0x0200	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_MASK              0x0200	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_SHIFT                  9	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_WIDTH                  1	/* FIFOS_ERR_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT             0x0100	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_MASK        0x0100	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_SHIFT            8	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_WIDTH            1	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT            0x0080	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_MASK       0x0080	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_SHIFT           7	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_WIDTH           1	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT            0x0040	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_MASK       0x0040	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_SHIFT           6	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_WIDTH           1	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_SRC2_LOCK_EINT                   0x0020	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_MASK              0x0020	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_SHIFT                  5	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_WIDTH                  1	/* SRC2_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT                   0x0010	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_MASK              0x0010	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_SHIFT                  4	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_WIDTH                  1	/* SRC1_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT                   0x0008	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_MASK              0x0008	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_SHIFT                  3	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_WIDTH                  1	/* FLL2_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT                   0x0004	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_MASK              0x0004	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_SHIFT                  2	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_WIDTH                  1	/* FLL1_LOCK_EINT */
+#define WM8995_HP_DONE_EINT                     0x0002	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_MASK                0x0002	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_SHIFT                    1	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_WIDTH                    1	/* HP_DONE_EINT */
+#define WM8995_MICD_EINT                        0x0001	/* MICD_EINT */
+#define WM8995_MICD_EINT_MASK                   0x0001	/* MICD_EINT */
+#define WM8995_MICD_EINT_SHIFT                       0	/* MICD_EINT */
+#define WM8995_MICD_EINT_WIDTH                       1	/* MICD_EINT */
+
+/*
+ * R1842 (0x732) - Interrupt Raw Status 2
+ */
+#define WM8995_DCS_DONE_23_STS                  0x1000	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_MASK             0x1000	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_SHIFT                12	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_WIDTH                 1	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_01_STS                  0x0800	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_MASK             0x0800	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_SHIFT                11	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_WIDTH                 1	/* DCS_DONE_01_STS */
+#define WM8995_WSEQ_DONE_STS                    0x0400	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_MASK               0x0400	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_SHIFT                  10	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_WIDTH                   1	/* WSEQ_DONE_STS */
+#define WM8995_FIFOS_ERR_STS                    0x0200	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_MASK               0x0200	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_SHIFT                   9	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_WIDTH                   1	/* FIFOS_ERR_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS              0x0100	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_MASK         0x0100	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_SHIFT             8	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_WIDTH             1	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS             0x0080	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_MASK        0x0080	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_SHIFT            7	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_WIDTH            1	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS             0x0040	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_MASK        0x0040	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_SHIFT            6	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_WIDTH            1	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_SRC2_LOCK_STS                    0x0020	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_MASK               0x0020	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_SHIFT                   5	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_WIDTH                   1	/* SRC2_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS                    0x0010	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_MASK               0x0010	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_SHIFT                   4	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_WIDTH                   1	/* SRC1_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS                    0x0008	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_MASK               0x0008	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_SHIFT                   3	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_WIDTH                   1	/* FLL2_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS                    0x0004	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_MASK               0x0004	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_SHIFT                   2	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_WIDTH                   1	/* FLL1_LOCK_STS */
+
+/*
+ * R1848 (0x738) - Interrupt Status 1 Mask
+ */
+#define WM8995_IM_GP14_EINT                     0x2000	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_MASK                0x2000	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_SHIFT                   13	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_WIDTH                    1	/* IM_GP14_EINT */
+#define WM8995_IM_GP13_EINT                     0x1000	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_MASK                0x1000	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_SHIFT                   12	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_WIDTH                    1	/* IM_GP13_EINT */
+#define WM8995_IM_GP12_EINT                     0x0800	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_MASK                0x0800	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_SHIFT                   11	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_WIDTH                    1	/* IM_GP12_EINT */
+#define WM8995_IM_GP11_EINT                     0x0400	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_MASK                0x0400	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_SHIFT                   10	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_WIDTH                    1	/* IM_GP11_EINT */
+#define WM8995_IM_GP10_EINT                     0x0200	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_MASK                0x0200	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_SHIFT                    9	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_WIDTH                    1	/* IM_GP10_EINT */
+#define WM8995_IM_GP9_EINT                      0x0100	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_MASK                 0x0100	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_SHIFT                     8	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_WIDTH                     1	/* IM_GP9_EINT */
+#define WM8995_IM_GP8_EINT                      0x0080	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_MASK                 0x0080	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_SHIFT                     7	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_WIDTH                     1	/* IM_GP8_EINT */
+#define WM8995_IM_GP7_EINT                      0x0040	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_MASK                 0x0040	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_SHIFT                     6	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_WIDTH                     1	/* IM_GP7_EINT */
+#define WM8995_IM_GP6_EINT                      0x0020	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_MASK                 0x0020	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_SHIFT                     5	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_WIDTH                     1	/* IM_GP6_EINT */
+#define WM8995_IM_GP5_EINT                      0x0010	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_MASK                 0x0010	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_SHIFT                     4	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_WIDTH                     1	/* IM_GP5_EINT */
+#define WM8995_IM_GP4_EINT                      0x0008	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_MASK                 0x0008	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_SHIFT                     3	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_WIDTH                     1	/* IM_GP4_EINT */
+#define WM8995_IM_GP3_EINT                      0x0004	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_MASK                 0x0004	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_SHIFT                     2	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_WIDTH                     1	/* IM_GP3_EINT */
+#define WM8995_IM_GP2_EINT                      0x0002	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_MASK                 0x0002	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_SHIFT                     1	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_WIDTH                     1	/* IM_GP2_EINT */
+#define WM8995_IM_GP1_EINT                      0x0001	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_MASK                 0x0001	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_SHIFT                     0	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_WIDTH                     1	/* IM_GP1_EINT */
+
+/*
+ * R1849 (0x739) - Interrupt Status 2 Mask
+ */
+#define WM8995_IM_DCS_DONE_23_EINT              0x1000	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_MASK         0x1000	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_SHIFT            12	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_WIDTH             1	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT              0x0800	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_MASK         0x0800	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_SHIFT            11	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_WIDTH             1	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT                0x0400	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_MASK           0x0400	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_SHIFT              10	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_WIDTH               1	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT                0x0200	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_MASK           0x0200	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_SHIFT               9	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_WIDTH               1	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT          0x0100	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_MASK     0x0100	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_SHIFT         8	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_WIDTH         1	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT         0x0080	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_MASK    0x0080	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_SHIFT        7	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_WIDTH        1	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT         0x0040	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_MASK    0x0040	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_SHIFT        6	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_WIDTH        1	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT                0x0020	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_MASK           0x0020	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_SHIFT               5	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_WIDTH               1	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT                0x0010	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_MASK           0x0010	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_SHIFT               4	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_WIDTH               1	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT                0x0008	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_MASK           0x0008	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_SHIFT               3	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_WIDTH               1	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT                0x0004	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_MASK           0x0004	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_SHIFT               2	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_WIDTH               1	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_HP_DONE_EINT                  0x0002	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_MASK             0x0002	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_SHIFT                 1	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_WIDTH                 1	/* IM_HP_DONE_EINT */
+#define WM8995_IM_MICD_EINT                     0x0001	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_MASK                0x0001	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_SHIFT                    0	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_WIDTH                    1	/* IM_MICD_EINT */
+
+/*
+ * R1856 (0x740) - Interrupt Control
+ */
+#define WM8995_IM_IRQ                           0x0001	/* IM_IRQ */
+#define WM8995_IM_IRQ_MASK                      0x0001	/* IM_IRQ */
+#define WM8995_IM_IRQ_SHIFT                          0	/* IM_IRQ */
+#define WM8995_IM_IRQ_WIDTH                          1	/* IM_IRQ */
+
+/*
+ * R2048 (0x800) - Left PDM Speaker 1
+ */
+#define WM8995_SPK1L_ENA                        0x0010	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_MASK                   0x0010	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_SHIFT                       4	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_WIDTH                       1	/* SPK1L_ENA */
+#define WM8995_SPK1L_MUTE                       0x0008	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_MASK                  0x0008	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_SHIFT                      3	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_WIDTH                      1	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_ZC                    0x0004	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_MASK               0x0004	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_SHIFT                   2	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_WIDTH                   1	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_SRC_MASK                   0x0003	/* SPK1L_SRC - [1:0] */
+#define WM8995_SPK1L_SRC_SHIFT                       0	/* SPK1L_SRC - [1:0] */
+#define WM8995_SPK1L_SRC_WIDTH                       2	/* SPK1L_SRC - [1:0] */
+
+/*
+ * R2049 (0x801) - Right PDM Speaker 1
+ */
+#define WM8995_SPK1R_ENA                        0x0010	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_MASK                   0x0010	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_SHIFT                       4	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_WIDTH                       1	/* SPK1R_ENA */
+#define WM8995_SPK1R_MUTE                       0x0008	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_MASK                  0x0008	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_SHIFT                      3	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_WIDTH                      1	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_ZC                    0x0004	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_MASK               0x0004	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_SHIFT                   2	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_WIDTH                   1	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_SRC_MASK                   0x0003	/* SPK1R_SRC - [1:0] */
+#define WM8995_SPK1R_SRC_SHIFT                       0	/* SPK1R_SRC - [1:0] */
+#define WM8995_SPK1R_SRC_WIDTH                       2	/* SPK1R_SRC - [1:0] */
+
+/*
+ * R2050 (0x802) - PDM Speaker 1 Mute Sequence
+ */
+#define WM8995_SPK1_MUTE_SEQ1_MASK              0x00FF	/* SPK1_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK1_MUTE_SEQ1_SHIFT                  0	/* SPK1_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK1_MUTE_SEQ1_WIDTH                  8	/* SPK1_MUTE_SEQ1 - [7:0] */
+
+/*
+ * R2056 (0x808) - Left PDM Speaker 2
+ */
+#define WM8995_SPK2L_ENA                        0x0010	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_MASK                   0x0010	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_SHIFT                       4	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_WIDTH                       1	/* SPK2L_ENA */
+#define WM8995_SPK2L_MUTE                       0x0008	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_MASK                  0x0008	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_SHIFT                      3	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_WIDTH                      1	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_ZC                    0x0004	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_MASK               0x0004	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_SHIFT                   2	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_WIDTH                   1	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_SRC_MASK                   0x0003	/* SPK2L_SRC - [1:0] */
+#define WM8995_SPK2L_SRC_SHIFT                       0	/* SPK2L_SRC - [1:0] */
+#define WM8995_SPK2L_SRC_WIDTH                       2	/* SPK2L_SRC - [1:0] */
+
+/*
+ * R2057 (0x809) - Right PDM Speaker 2
+ */
+#define WM8995_SPK2R_ENA                        0x0010	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_MASK                   0x0010	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_SHIFT                       4	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_WIDTH                       1	/* SPK2R_ENA */
+#define WM8995_SPK2R_MUTE                       0x0008	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_MASK                  0x0008	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_SHIFT                      3	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_WIDTH                      1	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_ZC                    0x0004	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_MASK               0x0004	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_SHIFT                   2	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_WIDTH                   1	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_SRC_MASK                   0x0003	/* SPK2R_SRC - [1:0] */
+#define WM8995_SPK2R_SRC_SHIFT                       0	/* SPK2R_SRC - [1:0] */
+#define WM8995_SPK2R_SRC_WIDTH                       2	/* SPK2R_SRC - [1:0] */
+
+/*
+ * R2058 (0x80A) - PDM Speaker 2 Mute Sequence
+ */
+#define WM8995_SPK2_MUTE_SEQ1_MASK              0x00FF	/* SPK2_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK2_MUTE_SEQ1_SHIFT                  0	/* SPK2_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK2_MUTE_SEQ1_WIDTH                  8	/* SPK2_MUTE_SEQ1 - [7:0] */
+
+#define WM8995_CLASS_W_SWITCH(xname, reg, shift, max, invert) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_volsw, \
+	.get = snd_soc_dapm_get_volsw, .put = wm8995_put_class_w, \
+	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) \
+}
+
+struct wm8995_reg_access {
+	u16 read;
+	u16 write;
+	u16 vol;
+};
+
+/* Sources for AIF1/2 SYSCLK - use with set_dai_sysclk() */
+enum clk_src {
+	WM8995_SYSCLK_MCLK1 = 1,
+	WM8995_SYSCLK_MCLK2,
+	WM8995_SYSCLK_FLL1,
+	WM8995_SYSCLK_FLL2,
+	WM8995_SYSCLK_OPCLK
+};
+
+#define WM8995_FLL1 1
+#define WM8995_FLL2 2
+
+#define WM8995_FLL_SRC_MCLK1  1
+#define WM8995_FLL_SRC_MCLK2  2
+#define WM8995_FLL_SRC_LRCLK  3
+#define WM8995_FLL_SRC_BCLK   4
+
+#endif /* _WM8995_H */
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index a486670..43825b2 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -158,7 +157,6 @@
 struct wm9081_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u16 reg_cache[WM9081_MAX_REGISTER + 1];
 	int sysclk_source;
 	int mclk_rate;
 	int sysclk_rate;
@@ -591,6 +589,10 @@
 	reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT;
 	snd_soc_write(codec, WM9081_FLL_CONTROL_5, reg5);
 
+	/* Set gain to the recommended value */
+	snd_soc_update_bits(codec, WM9081_FLL_CONTROL_4,
+			    WM9081_FLL_GAIN_MASK, 0);
+
 	/* Enable the FLL */
 	snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA);
 
@@ -805,7 +807,7 @@
 
 	case SND_SOC_BIAS_STANDBY:
 		/* Initial cold start */
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Disable LINEOUT discharge */
 			reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL);
 			reg &= ~WM9081_LINEOUT_DISCH;
@@ -865,7 +867,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1228,6 +1230,7 @@
 static int wm9081_probe(struct snd_soc_codec *codec)
 {
 	struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 	u16 reg;
 
@@ -1269,9 +1272,9 @@
 				     ARRAY_SIZE(wm9081_eq_controls));
 	}
 
-	snd_soc_dapm_new_controls(codec, wm9081_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm9081_dapm_widgets,
 				  ARRAY_SIZE(wm9081_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 6e5f64f..a788c42 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -28,7 +28,6 @@
 #include <linux/slab.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/wm9090.h>
 
@@ -442,31 +441,32 @@
 static int wm9090_add_controls(struct snd_soc_codec *codec)
 {
 	struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 
-	snd_soc_dapm_new_controls(codec, wm9090_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm9090_dapm_widgets,
 				  ARRAY_SIZE(wm9090_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	snd_soc_add_controls(codec, wm9090_controls,
 			     ARRAY_SIZE(wm9090_controls));
 
 	if (wm9090->pdata.lin1_diff) {
-		snd_soc_dapm_add_routes(codec, audio_map_in1_diff,
+		snd_soc_dapm_add_routes(dapm, audio_map_in1_diff,
 					ARRAY_SIZE(audio_map_in1_diff));
 	} else {
-		snd_soc_dapm_add_routes(codec, audio_map_in1_se,
+		snd_soc_dapm_add_routes(dapm, audio_map_in1_se,
 					ARRAY_SIZE(audio_map_in1_se));
 		snd_soc_add_controls(codec, wm9090_in1_se_controls,
 				     ARRAY_SIZE(wm9090_in1_se_controls));
 	}
 
 	if (wm9090->pdata.lin2_diff) {
-		snd_soc_dapm_add_routes(codec, audio_map_in2_diff,
+		snd_soc_dapm_add_routes(dapm, audio_map_in2_diff,
 					ARRAY_SIZE(audio_map_in2_diff));
 	} else {
-		snd_soc_dapm_add_routes(codec, audio_map_in2_se,
+		snd_soc_dapm_add_routes(dapm, audio_map_in2_se,
 					ARRAY_SIZE(audio_map_in2_se));
 		snd_soc_add_controls(codec, wm9090_in2_se_controls,
 				     ARRAY_SIZE(wm9090_in2_se_controls));
@@ -513,7 +513,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Restore the register cache */
 			for (i = 1; i < codec->driver->reg_cache_size; i++) {
 				if (reg_cache[i] == wm9090_reg_defaults[i])
@@ -543,7 +543,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index a144acd..47b357a 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -19,7 +19,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "wm9705.h"
 
@@ -203,9 +202,11 @@
 
 static int wm9705_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9705_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm9705_dapm_widgets,
 					ARRAY_SIZE(wm9705_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index d2f224d..bf5d4ef 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -20,7 +20,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include "wm9712.h"
 
 #define WM9712_VERSION "0.4"
@@ -432,10 +431,11 @@
 
 static int wm9712_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9712_dapm_widgets,
-				  ARRAY_SIZE(wm9712_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm9712_dapm_widgets,
+				  ARRAY_SIZE(wm9712_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -570,7 +570,7 @@
 		ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 7da13b0..38ed985 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -26,7 +26,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "wm9713.h"
 
@@ -647,10 +646,12 @@
 
 static int wm9713_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9713_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm9713_dapm_widgets,
 				  ARRAY_SIZE(wm9713_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1147,7 +1148,7 @@
 		ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 0e24092..c466982 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -22,7 +22,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -94,41 +93,61 @@
 	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
 	u16 reg, reg_l, reg_r, dcs_cfg;
 
-	/* Set for 32 series updates */
-	snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
-			    WM8993_DCS_SERIES_NO_01_MASK,
-			    32 << WM8993_DCS_SERIES_NO_01_SHIFT);
-	wait_for_dc_servo(codec,
-			  WM8993_DCS_TRIG_SERIES_0 | WM8993_DCS_TRIG_SERIES_1);
+	/* If we're using a digital only path and have a previously
+	 * callibrated DC servo offset stored then use that. */
+	if (hubs->class_w && hubs->class_w_dcs) {
+		dev_dbg(codec->dev, "Using cached DC servo offset %x\n",
+			hubs->class_w_dcs);
+		snd_soc_write(codec, WM8993_DC_SERVO_3, hubs->class_w_dcs);
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_DAC_WR_0 |
+				  WM8993_DCS_TRIG_DAC_WR_1);
+		return;
+	}
+
+	/* Devices not using a DCS code correction have startup mode */
+	if (hubs->dcs_codes) {
+		/* Set for 32 series updates */
+		snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
+				    WM8993_DCS_SERIES_NO_01_MASK,
+				    32 << WM8993_DCS_SERIES_NO_01_SHIFT);
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_SERIES_0 |
+				  WM8993_DCS_TRIG_SERIES_1);
+	} else {
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_STARTUP_0 |
+				  WM8993_DCS_TRIG_STARTUP_1);
+	}
+
+	/* Different chips in the family support different readback
+	 * methods.
+	 */
+	switch (hubs->dcs_readback_mode) {
+	case 0:
+		reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1)
+			& WM8993_DCS_INTEG_CHAN_0_MASK;
+		reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2)
+			& WM8993_DCS_INTEG_CHAN_1_MASK;
+		break;
+	case 1:
+		reg = snd_soc_read(codec, WM8993_DC_SERVO_3);
+		reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK)
+			>> WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
+		break;
+	default:
+		WARN(1, "Unknown DCS readback method\n");
+		break;
+	}
+
+	dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
 
 	/* Apply correction to DC servo result */
 	if (hubs->dcs_codes) {
 		dev_dbg(codec->dev, "Applying %d code DC servo correction\n",
 			hubs->dcs_codes);
 
-		/* Different chips in the family support different
-		 * readback methods.
-		 */
-		switch (hubs->dcs_readback_mode) {
-		case 0:
-			reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1)
-				& WM8993_DCS_INTEG_CHAN_0_MASK;;
-			reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2)
-				& WM8993_DCS_INTEG_CHAN_1_MASK;
-			break;
-		case 1:
-			reg = snd_soc_read(codec, WM8993_DC_SERVO_3);
-			reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK)
-				>> WM8993_DCS_DAC_WR_VAL_1_SHIFT;
-			reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
-			break;
-		default:
-			WARN(1, "Unknown DCS readback method\n");
-			break;
-		}
-
-		dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
-
 		/* HPOUT1L */
 		if (reg_l + hubs->dcs_codes > 0 &&
 		    reg_l + hubs->dcs_codes < 0xff)
@@ -148,7 +167,15 @@
 		wait_for_dc_servo(codec,
 				  WM8993_DCS_TRIG_DAC_WR_0 |
 				  WM8993_DCS_TRIG_DAC_WR_1);
+	} else {
+		dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		dcs_cfg |= reg_r;
 	}
+
+	/* Save the callibrated offset if we're in class W mode and
+	 * therefore don't have any analogue signal mixed in. */
+	if (hubs->class_w)
+		hubs->class_w_dcs = dcs_cfg;
 }
 
 /*
@@ -163,6 +190,9 @@
 
 	ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
 
+	/* Updating the analogue gains invalidates the DC servo cache */
+	hubs->class_w_dcs = 0;
+
 	/* If we're applying an offset correction then updating the
 	 * callibration would be likely to introduce further offsets. */
 	if (hubs->dcs_codes)
@@ -791,6 +821,8 @@
 
 int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* Latch volume update bits & default ZC on */
 	snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_1_2_VOLUME,
 			    WM8993_IN1_VU, WM8993_IN1_VU);
@@ -819,7 +851,7 @@
 	snd_soc_add_controls(codec, analogue_snd_controls,
 			     ARRAY_SIZE(analogue_snd_controls));
 
-	snd_soc_dapm_new_controls(codec, analogue_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, analogue_dapm_widgets,
 				  ARRAY_SIZE(analogue_dapm_widgets));
 	return 0;
 }
@@ -828,24 +860,26 @@
 int wm_hubs_add_analogue_routes(struct snd_soc_codec *codec,
 				int lineout1_diff, int lineout2_diff)
 {
-	snd_soc_dapm_add_routes(codec, analogue_routes,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_add_routes(dapm, analogue_routes,
 				ARRAY_SIZE(analogue_routes));
 
 	if (lineout1_diff)
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout1_diff_routes,
 					ARRAY_SIZE(lineout1_diff_routes));
 	else
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout1_se_routes,
 					ARRAY_SIZE(lineout1_se_routes));
 
 	if (lineout2_diff)
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout2_diff_routes,
 					ARRAY_SIZE(lineout2_diff_routes));
 	else
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout2_se_routes,
 					ARRAY_SIZE(lineout2_se_routes));
 
@@ -872,7 +906,7 @@
 	 * VMID as an output and can disable it.
 	 */
 	if (lineout1_diff && lineout2_diff)
-		codec->idle_bias_off = 1;
+		codec->dapm.idle_bias_off = 1;
 
 	if (lineout1fb)
 		snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
index e51c166..f8a5e97 100644
--- a/sound/soc/codecs/wm_hubs.h
+++ b/sound/soc/codecs/wm_hubs.h
@@ -23,6 +23,9 @@
 	int dcs_codes;
 	int dcs_readback_mode;
 	int hp_startup_mode;
+
+	bool class_w;
+	u16 class_w_dcs;
 };
 
 extern int wm_hubs_add_analogue_controls(struct snd_soc_codec *);
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index bc9e6b0..0c2d6ba 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -18,7 +18,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/dma.h>
 #include <asm/mach-types.h>
@@ -27,7 +26,6 @@
 #include <mach/edma.h>
 #include <mach/mux.h>
 
-#include "../codecs/tlv320aic3x.h"
 #include "davinci-pcm.h"
 #include "davinci-i2s.h"
 #include "davinci-mcasp.h"
@@ -132,26 +130,27 @@
 static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add davinci-evm specific widgets */
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* Set up davinci-evm specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* not connected */
-	snd_soc_dapm_disable_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_disable_pin(codec, "HPLCOM");
-	snd_soc_dapm_disable_pin(codec, "HPRCOM");
+	snd_soc_dapm_disable_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_disable_pin(dapm, "HPLCOM");
+	snd_soc_dapm_disable_pin(dapm, "HPRCOM");
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c
index 6c6666a..0fe558c 100644
--- a/sound/soc/davinci/davinci-sffsdr.c
+++ b/sound/soc/davinci/davinci-sffsdr.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/dma.h>
 #include <asm/mach-types.h>
diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c
index 4f48733..9ac93f6 100644
--- a/sound/soc/ep93xx/ep93xx-i2s.c
+++ b/sound/soc/ep93xx/ep93xx-i2s.c
@@ -352,13 +352,13 @@
 	.playback	= {
 		.channels_min	= 2,
 		.channels_max	= 2,
-		.rates		= SNDRV_PCM_RATE_8000_48000,
+		.rates		= SNDRV_PCM_RATE_8000_96000,
 		.formats	= EP93XX_I2S_FORMATS,
 	},
 	.capture	= {
 		 .channels_min	= 2,
 		 .channels_max	= 2,
-		 .rates		= SNDRV_PCM_RATE_8000_48000,
+		 .rates		= SNDRV_PCM_RATE_8000_96000,
 		 .formats	= EP93XX_I2S_FORMATS,
 	},
 	.ops		= &ep93xx_i2s_dai_ops,
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c
index 2f121dd..0667077 100644
--- a/sound/soc/ep93xx/ep93xx-pcm.c
+++ b/sound/soc/ep93xx/ep93xx-pcm.c
@@ -35,9 +35,9 @@
 				   SNDRV_PCM_INFO_INTERLEAVED	|
 				   SNDRV_PCM_INFO_BLOCK_TRANSFER),
 				   
-	.rates			= SNDRV_PCM_RATE_8000_48000,
+	.rates			= SNDRV_PCM_RATE_8000_96000,
 	.rate_min		= SNDRV_PCM_RATE_8000,
-	.rate_max		= SNDRV_PCM_RATE_48000,
+	.rate_max		= SNDRV_PCM_RATE_96000,
 	
 	.formats		= (SNDRV_PCM_FMTBIT_S16_LE |
 				   SNDRV_PCM_FMTBIT_S24_LE |
diff --git a/sound/soc/ep93xx/snappercl15.c b/sound/soc/ep93xx/snappercl15.c
index 28ab5ff..dfe1d7f 100644
--- a/sound/soc/ep93xx/snappercl15.c
+++ b/sound/soc/ep93xx/snappercl15.c
@@ -15,7 +15,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -79,11 +78,12 @@
 static int snappercl15_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	return 0;
 }
 
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index dd4fffd..e20c9e1 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-types.h>
 
 #include "../codecs/tlv320aic23.h"
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 390b6ff..30894ea 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -456,13 +456,13 @@
 static struct snd_soc_dai_driver imx_ssi_dai = {
 	.probe = imx_ssi_dai_probe,
 	.playback = {
-		.channels_min = 2,
+		.channels_min = 1,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	},
 	.capture = {
-		.channels_min = 2,
+		.channels_min = 1,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/imx/phycore-ac97.c b/sound/soc/imx/phycore-ac97.c
index 9eabc28..a7deb5c 100644
--- a/sound/soc/imx/phycore-ac97.c
+++ b/sound/soc/imx/phycore-ac97.c
@@ -17,7 +17,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-types.h>
 
 static struct snd_soc_card imx_phycore;
diff --git a/sound/soc/imx/wm1133-ev1.c b/sound/soc/imx/wm1133-ev1.c
index 30fdb15..75b4c72 100644
--- a/sound/soc/imx/wm1133-ev1.c
+++ b/sound/soc/imx/wm1133-ev1.c
@@ -19,7 +19,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audmux.h>
 
@@ -213,11 +212,12 @@
 static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, wm1133_ev1_widgets,
+	snd_soc_dapm_new_controls(dapm, wm1133_ev1_widgets,
 				  ARRAY_SIZE(wm1133_ev1_widgets));
 
-	snd_soc_dapm_add_routes(codec, wm1133_ev1_map,
+	snd_soc_dapm_add_routes(dapm, wm1133_ev1_map,
 				ARRAY_SIZE(wm1133_ev1_map));
 
 	/* Headphone jack detection */
@@ -234,7 +234,7 @@
 	wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE,
 			       SND_JACK_BTN_0);
 
-	snd_soc_dapm_force_enable_pin(codec, "Mic Bias");
+	snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
 
 	return 0;
 }
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index f3cffd1..419bf4f 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -28,7 +28,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "jz4740-i2s.h"
diff --git a/sound/soc/jz4740/qi_lb60.c b/sound/soc/jz4740/qi_lb60.c
index ef1a99e..49723e3 100644
--- a/sound/soc/jz4740/qi_lb60.c
+++ b/sound/soc/jz4740/qi_lb60.c
@@ -19,7 +19,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <linux/gpio.h>
 
 #define QI_LB60_SND_GPIO JZ_GPIO_PORTB(29)
@@ -59,10 +58,11 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_nc_pin(codec, "LIN");
-	snd_soc_dapm_nc_pin(codec, "RIN");
+	snd_soc_dapm_nc_pin(dapm, "LIN");
+	snd_soc_dapm_nc_pin(dapm, "RIN");
 
 	ret = snd_soc_dai_set_fmt(cpu_dai, QI_LB60_DAIFMT);
 	if (ret < 0) {
@@ -70,9 +70,11 @@
 		return ret;
 	}
 
-	snd_soc_dapm_new_controls(codec, qi_lb60_widgets, ARRAY_SIZE(qi_lb60_widgets));
-	snd_soc_dapm_add_routes(codec, qi_lb60_routes, ARRAY_SIZE(qi_lb60_routes));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_new_controls(dapm, qi_lb60_widgets,
+				  ARRAY_SIZE(qi_lb60_widgets));
+	snd_soc_dapm_add_routes(dapm, qi_lb60_routes,
+				ARRAY_SIZE(qi_lb60_routes));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 16ec2a2..8f49e16 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -11,10 +11,19 @@
 
 config SND_KIRKWOOD_SOC_OPENRD
 	tristate "SoC Audio support for Kirkwood Openrd Client"
-	depends on SND_KIRKWOOD_SOC && MACH_OPENRD_CLIENT
+	depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
 	select SND_KIRKWOOD_SOC_I2S
 	select SND_SOC_CS42L51
 	help
 	  Say Y if you want to add support for SoC audio on
 	  Openrd Client.
 
+config SND_KIRKWOOD_SOC_T5325
+	tristate "SoC Audio support for HP t5325"
+	depends on SND_KIRKWOOD_SOC && MACH_T5325
+	select SND_KIRKWOOD_SOC_I2S
+	select SND_SOC_ALC5623
+	help
+	  Say Y if you want to add support for SoC audio on
+	  the HP t5325 thin client.
+
diff --git a/sound/soc/kirkwood/Makefile b/sound/soc/kirkwood/Makefile
index 33a16dc..3e62ae9 100644
--- a/sound/soc/kirkwood/Makefile
+++ b/sound/soc/kirkwood/Makefile
@@ -5,5 +5,7 @@
 obj-$(CONFIG_SND_KIRKWOOD_SOC_I2S) += snd-soc-kirkwood-i2s.o
 
 snd-soc-openrd-objs := kirkwood-openrd.o
+snd-soc-t5325-objs := kirkwood-t5325.o
 
 obj-$(CONFIG_SND_KIRKWOOD_SOC_OPENRD) += snd-soc-openrd.o
+obj-$(CONFIG_SND_KIRKWOOD_SOC_T5325) += snd-soc-t5325.o
diff --git a/sound/soc/kirkwood/kirkwood-openrd.c b/sound/soc/kirkwood/kirkwood-openrd.c
index 9d7c81e..d863afb 100644
--- a/sound/soc/kirkwood/kirkwood-openrd.c
+++ b/sound/soc/kirkwood/kirkwood-openrd.c
@@ -86,7 +86,7 @@
 {
 	int ret;
 
-	if (!machine_is_openrd_client())
+	if (!machine_is_openrd_client() && !machine_is_openrd_ultimate())
 		return 0;
 
 	openrd_client_snd_device = platform_device_alloc("soc-audio", -1);
diff --git a/sound/soc/kirkwood/kirkwood-t5325.c b/sound/soc/kirkwood/kirkwood-t5325.c
new file mode 100644
index 0000000..c8d2195
--- /dev/null
+++ b/sound/soc/kirkwood/kirkwood-t5325.c
@@ -0,0 +1,141 @@
+/*
+ * kirkwood-t5325.c
+ *
+ * (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <mach/kirkwood.h>
+#include <plat/audio.h>
+#include <asm/mach-types.h>
+#include "../codecs/alc5623.h"
+
+static int t5325_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret;
+	unsigned int freq, fmt;
+
+	fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+	if (ret < 0)
+		return ret;
+
+	freq = params_rate(params) * 256;
+
+	return snd_soc_dai_set_sysclk(codec_dai, 0, freq, SND_SOC_CLOCK_IN);
+
+}
+
+static struct snd_soc_ops t5325_ops = {
+	.hw_params = t5325_hw_params,
+};
+
+static const struct snd_soc_dapm_widget t5325_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route t5325_route[] = {
+	{ "Headphone Jack",	NULL,	"HPL" },
+	{ "Headphone Jack",	NULL,	"HPR" },
+
+	{"Speaker",		NULL,	"SPKOUT"},
+	{"Speaker",		NULL,	"SPKOUTN"},
+
+	{ "MIC1",		NULL,	"Mic Jack" },
+	{ "MIC2",		NULL,	"Mic Jack" },
+};
+
+static int t5325_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, t5325_dapm_widgets,
+				ARRAY_SIZE(t5325_dapm_widgets));
+
+	snd_soc_dapm_add_routes(dapm, t5325_route, ARRAY_SIZE(t5325_route));
+
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link t5325_dai[] = {
+{
+	.name = "ALC5621",
+	.stream_name = "ALC5621 HiFi",
+	.cpu_dai_name = "kirkwood-i2s",
+	.platform_name = "kirkwood-pcm-audio",
+	.codec_dai_name = "alc5621-hifi",
+	.codec_name = "alc562x-codec.0-001a",
+	.ops = &t5325_ops,
+	.init = t5325_dai_init,
+},
+};
+
+
+static struct snd_soc_card t5325 = {
+	.name = "t5325",
+	.dai_link = t5325_dai,
+	.num_links = ARRAY_SIZE(t5325_dai),
+};
+
+static struct platform_device *t5325_snd_device;
+
+static int __init t5325_init(void)
+{
+	int ret;
+
+	if (!machine_is_t5325())
+		return 0;
+
+	t5325_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!t5325_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(t5325_snd_device,
+			&t5325);
+
+	ret = platform_device_add(t5325_snd_device);
+	if (ret) {
+		printk(KERN_ERR "%s: platform_device_add failed\n", __func__);
+		platform_device_put(t5325_snd_device);
+	}
+
+	return ret;
+}
+module_init(t5325_init);
+
+static void __exit t5325_exit(void)
+{
+	platform_device_unregister(t5325_snd_device);
+}
+module_exit(t5325_exit);
+
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_DESCRIPTION("ALSA SoC t5325 audio client");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/nuc900/nuc900-audio.c b/sound/soc/nuc900/nuc900-audio.c
index 161f5b6..38a2d0d 100644
--- a/sound/soc/nuc900/nuc900-audio.c
+++ b/sound/soc/nuc900/nuc900-audio.c
@@ -18,7 +18,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "nuc900-audio.h"
 
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c
index 979dd50..1617504 100644
--- a/sound/soc/omap/am3517evm.c
+++ b/sound/soc/omap/am3517evm.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -114,20 +113,21 @@
 static int am3517evm_aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add am3517-evm specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up davinci-evm specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic In");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 438146a..2101bdc 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -26,7 +26,7 @@
 #include <linux/spinlock.h>
 #include <linux/tty.h>
 
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -94,6 +94,7 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct soc_enum *control = (struct soc_enum *)kcontrol->private_value;
 	unsigned short pins;
 	int pin, changed = 0;
@@ -112,48 +113,48 @@
 
 	/* Setup pins after corresponding bits if changed */
 	pin = !!(pins & (1 << AMS_DELTA_MOUTHPIECE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Mouthpiece")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Mouthpiece")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Mouthpiece");
+			snd_soc_dapm_enable_pin(dapm, "Mouthpiece");
 		else
-			snd_soc_dapm_disable_pin(codec, "Mouthpiece");
+			snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_EARPIECE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Earpiece")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Earpiece")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Earpiece");
+			snd_soc_dapm_enable_pin(dapm, "Earpiece");
 		else
-			snd_soc_dapm_disable_pin(codec, "Earpiece");
+			snd_soc_dapm_disable_pin(dapm, "Earpiece");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_MICROPHONE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Microphone")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Microphone")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Microphone");
+			snd_soc_dapm_enable_pin(dapm, "Microphone");
 		else
-			snd_soc_dapm_disable_pin(codec, "Microphone");
+			snd_soc_dapm_disable_pin(dapm, "Microphone");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_SPEAKER));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Speaker")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Speaker")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Speaker");
+			snd_soc_dapm_enable_pin(dapm, "Speaker");
 		else
-			snd_soc_dapm_disable_pin(codec, "Speaker");
+			snd_soc_dapm_disable_pin(dapm, "Speaker");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_AGC));
 	if (pin != ams_delta_audio_agc) {
 		ams_delta_audio_agc = pin;
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "AGCIN");
+			snd_soc_dapm_enable_pin(dapm, "AGCIN");
 		else
-			snd_soc_dapm_disable_pin(codec, "AGCIN");
+			snd_soc_dapm_disable_pin(dapm, "AGCIN");
 	}
 	if (changed)
-		snd_soc_dapm_sync(codec);
+		snd_soc_dapm_sync(dapm);
 
 	mutex_unlock(&codec->mutex);
 
@@ -164,19 +165,20 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned short pins, mode;
 
-	pins = ((snd_soc_dapm_get_pin_status(codec, "Mouthpiece") <<
+	pins = ((snd_soc_dapm_get_pin_status(dapm, "Mouthpiece") <<
 							AMS_DELTA_MOUTHPIECE) |
-			(snd_soc_dapm_get_pin_status(codec, "Earpiece") <<
+			(snd_soc_dapm_get_pin_status(dapm, "Earpiece") <<
 							AMS_DELTA_EARPIECE));
 	if (pins)
-		pins |= (snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+		pins |= (snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
 							AMS_DELTA_MICROPHONE);
 	else
-		pins = ((snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+		pins = ((snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
 							AMS_DELTA_MICROPHONE) |
-			(snd_soc_dapm_get_pin_status(codec, "Speaker") <<
+			(snd_soc_dapm_get_pin_status(dapm, "Speaker") <<
 							AMS_DELTA_SPEAKER) |
 			(ams_delta_audio_agc << AMS_DELTA_AGC));
 
@@ -300,6 +302,7 @@
 static void cx81801_close(struct tty_struct *tty)
 {
 	struct snd_soc_codec *codec = tty->disc_data;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	del_timer_sync(&cx81801_timer);
 
@@ -312,12 +315,12 @@
 	v253_ops.close(tty);
 
 	/* Revert back to default audio input/output constellation */
-	snd_soc_dapm_disable_pin(codec, "Mouthpiece");
-	snd_soc_dapm_enable_pin(codec, "Earpiece");
-	snd_soc_dapm_enable_pin(codec, "Microphone");
-	snd_soc_dapm_disable_pin(codec, "Speaker");
-	snd_soc_dapm_disable_pin(codec, "AGCIN");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
+	snd_soc_dapm_enable_pin(dapm, "Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Microphone");
+	snd_soc_dapm_disable_pin(dapm, "Speaker");
+	snd_soc_dapm_disable_pin(dapm, "AGCIN");
+	snd_soc_dapm_sync(dapm);
 }
 
 /* Line discipline .hangup() */
@@ -432,16 +435,16 @@
 	case SND_SOC_BIAS_ON:
 	case SND_SOC_BIAS_PREPARE:
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
 						AMS_DELTA_LATCH2_MODEM_NRESET);
 		break;
 	case SND_SOC_BIAS_OFF:
-		if (codec->bias_level != SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level != SND_SOC_BIAS_OFF)
 			ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
 						0);
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -492,6 +495,7 @@
 static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	struct snd_soc_card *card = rtd->card;
 	int ret;
@@ -541,7 +545,7 @@
 	}
 
 	/* Add board specific DAPM widgets and routes */
-	ret = snd_soc_dapm_new_controls(codec, ams_delta_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, ams_delta_dapm_widgets,
 					ARRAY_SIZE(ams_delta_dapm_widgets));
 	if (ret) {
 		dev_warn(card->dev,
@@ -550,7 +554,7 @@
 		return 0;
 	}
 
-	ret = snd_soc_dapm_add_routes(codec, ams_delta_audio_map,
+	ret = snd_soc_dapm_add_routes(dapm, ams_delta_audio_map,
 					ARRAY_SIZE(ams_delta_audio_map));
 	if (ret) {
 		dev_warn(card->dev,
@@ -560,13 +564,13 @@
 	}
 
 	/* Set up initial pin constellation */
-	snd_soc_dapm_disable_pin(codec, "Mouthpiece");
-	snd_soc_dapm_enable_pin(codec, "Earpiece");
-	snd_soc_dapm_enable_pin(codec, "Microphone");
-	snd_soc_dapm_disable_pin(codec, "Speaker");
-	snd_soc_dapm_disable_pin(codec, "AGCIN");
-	snd_soc_dapm_disable_pin(codec, "AGCOUT");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
+	snd_soc_dapm_enable_pin(dapm, "Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Microphone");
+	snd_soc_dapm_disable_pin(dapm, "Speaker");
+	snd_soc_dapm_disable_pin(dapm, "AGCIN");
+	snd_soc_dapm_disable_pin(dapm, "AGCOUT");
+	snd_soc_dapm_sync(dapm);
 
 	/* Add virtual switch */
 	ret = snd_soc_add_controls(codec, ams_delta_audio_controls,
diff --git a/sound/soc/omap/igep0020.c b/sound/soc/omap/igep0020.c
index fd3a40f..0ae3470 100644
--- a/sound/soc/omap/igep0020.c
+++ b/sound/soc/omap/igep0020.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index a3b6d89..83d213b 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -27,7 +27,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -36,7 +35,6 @@
 
 #include "omap-mcbsp.h"
 #include "omap-pcm.h"
-#include "../codecs/tlv320aic3x.h"
 
 #define N810_HEADSET_AMP_GPIO	10
 #define N810_SPEAKER_AMP_GPIO	101
@@ -58,6 +56,7 @@
 
 static void n810_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int hp = 0, line1l = 0;
 
 	switch (n810_jack_func) {
@@ -72,25 +71,25 @@
 	}
 
 	if (n810_spk_func)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	if (hp)
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	else
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 	if (line1l)
-		snd_soc_dapm_enable_pin(codec, "LINE1L");
+		snd_soc_dapm_enable_pin(dapm, "LINE1L");
 	else
-		snd_soc_dapm_disable_pin(codec, "LINE1L");
+		snd_soc_dapm_disable_pin(dapm, "LINE1L");
 
 	if (n810_dmic_func)
-		snd_soc_dapm_enable_pin(codec, "DMic");
+		snd_soc_dapm_enable_pin(dapm, "DMic");
 	else
-		snd_soc_dapm_disable_pin(codec, "DMic");
+		snd_soc_dapm_disable_pin(dapm, "DMic");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int n810_startup(struct snd_pcm_substream *substream)
@@ -274,17 +273,18 @@
 static int n810_aic33_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Not connected */
-	snd_soc_dapm_nc_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_nc_pin(codec, "HPLCOM");
-	snd_soc_dapm_nc_pin(codec, "HPRCOM");
-	snd_soc_dapm_nc_pin(codec, "MIC3L");
-	snd_soc_dapm_nc_pin(codec, "MIC3R");
-	snd_soc_dapm_nc_pin(codec, "LINE1R");
-	snd_soc_dapm_nc_pin(codec, "LINE2L");
-	snd_soc_dapm_nc_pin(codec, "LINE2R");
+	snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_nc_pin(dapm, "HPLCOM");
+	snd_soc_dapm_nc_pin(dapm, "HPRCOM");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L");
+	snd_soc_dapm_nc_pin(dapm, "MIC3R");
+	snd_soc_dapm_nc_pin(dapm, "LINE1R");
+	snd_soc_dapm_nc_pin(dapm, "LINE2L");
+	snd_soc_dapm_nc_pin(dapm, "LINE2R");
 
 	/* Add N810 specific controls */
 	err = snd_soc_add_controls(codec, aic33_n810_controls,
@@ -293,13 +293,13 @@
 		return err;
 
 	/* Add N810 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic33_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic33_dapm_widgets,
 				  ARRAY_SIZE(aic33_dapm_widgets));
 
 	/* Set up N810 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 7e84f24..d203f4d 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -102,6 +102,17 @@
 static const int omap24xx_dma_reqs[][2] = {};
 #endif
 
+#if defined(CONFIG_ARCH_OMAP4)
+static const int omap44xx_dma_reqs[][2] = {
+	{ OMAP44XX_DMA_MCBSP1_TX, OMAP44XX_DMA_MCBSP1_RX },
+	{ OMAP44XX_DMA_MCBSP2_TX, OMAP44XX_DMA_MCBSP2_RX },
+	{ OMAP44XX_DMA_MCBSP3_TX, OMAP44XX_DMA_MCBSP3_RX },
+	{ OMAP44XX_DMA_MCBSP4_TX, OMAP44XX_DMA_MCBSP4_RX },
+};
+#else
+static const int omap44xx_dma_reqs[][2] = {};
+#endif
+
 #if defined(CONFIG_ARCH_OMAP2420)
 static const unsigned long omap2420_mcbsp_port[][2] = {
 	{ OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1,
@@ -147,6 +158,21 @@
 static const unsigned long omap34xx_mcbsp_port[][2] = {};
 #endif
 
+#if defined(CONFIG_ARCH_OMAP4)
+static const unsigned long omap44xx_mcbsp_port[][2] = {
+	{ OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DRR },
+};
+#else
+static const unsigned long omap44xx_mcbsp_port[][2] = {};
+#endif
+
 static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -224,7 +250,7 @@
 	 * 2 channels (stereo): size is 128 / 2 = 64 frames (2 * 64 words)
 	 * 4 channels: size is 128 / 4 = 32 frames (4 * 32 words)
 	 */
-	if (cpu_is_omap343x()) {
+	if (cpu_is_omap343x() || cpu_is_omap44xx()) {
 		/*
 		* Rule for the buffer size. We should not allow
 		* smaller buffer than the FIFO size to avoid underruns
@@ -332,6 +358,9 @@
 	} else if (cpu_is_omap343x()) {
 		dma = omap24xx_dma_reqs[bus_id][substream->stream];
 		port = omap34xx_mcbsp_port[bus_id][substream->stream];
+	 } else if (cpu_is_omap44xx()) {
+		dma = omap44xx_dma_reqs[bus_id][substream->stream];
+		port = omap44xx_mcbsp_port[bus_id][substream->stream];
 	} else {
 		return -ENODEV;
 	}
@@ -498,11 +527,11 @@
 	regs->spcr2	|= XINTM(3) | FREE;
 	regs->spcr1	|= RINTM(3);
 	/* RFIG and XFIG are not defined in 34xx */
-	if (!cpu_is_omap34xx()) {
+	if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) {
 		regs->rcr2	|= RFIG;
 		regs->xcr2	|= XFIG;
 	}
-	if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+	if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
 		regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
 		regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
 	}
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
index ffdcc5a..110c106 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/omap/omap-mcbsp.h
@@ -50,6 +50,10 @@
 #undef  NUM_LINKS
 #define NUM_LINKS	3
 #endif
+#if defined(CONFIG_ARCH_OMAP4)
+#undef  NUM_LINKS
+#define NUM_LINKS	4
+#endif
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
 #undef  NUM_LINKS
 #define NUM_LINKS	5
diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c
index cf3fc8a..29b60d6 100644
--- a/sound/soc/omap/omap2evm.c
+++ b/sound/soc/omap/omap2evm.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3beagle.c b/sound/soc/omap/omap3beagle.c
index e56832b..40db813 100644
--- a/sound/soc/omap/omap3beagle.c
+++ b/sound/soc/omap/omap3beagle.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3evm.c b/sound/soc/omap/omap3evm.c
index 810f1e36..0daa044 100644
--- a/sound/soc/omap/omap3evm.c
+++ b/sound/soc/omap/omap3evm.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 4ee33ce..8047c521e 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -28,7 +28,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <plat/mcbsp.h>
@@ -170,51 +169,53 @@
 static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* All TWL4030 output pins are floating */
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "HSOL");
-	snd_soc_dapm_nc_pin(codec, "HSOR");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
-	snd_soc_dapm_nc_pin(codec, "HFL");
-	snd_soc_dapm_nc_pin(codec, "HFR");
-	snd_soc_dapm_nc_pin(codec, "VIBRA");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "HSOL");
+	snd_soc_dapm_nc_pin(dapm, "HSOR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "HFL");
+	snd_soc_dapm_nc_pin(dapm, "HFR");
+	snd_soc_dapm_nc_pin(dapm, "VIBRA");
 
-	ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, omap3pandora_out_dapm_widgets,
 				ARRAY_SIZE(omap3pandora_out_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	snd_soc_dapm_add_routes(codec, omap3pandora_out_map,
+	snd_soc_dapm_add_routes(dapm, omap3pandora_out_map,
 		ARRAY_SIZE(omap3pandora_out_map));
 
-	return snd_soc_dapm_sync(codec);
+	return snd_soc_dapm_sync(dapm);
 }
 
 static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Not comnnected */
-	snd_soc_dapm_nc_pin(codec, "HSMIC");
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "HSMIC");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
 
-	ret = snd_soc_dapm_new_controls(codec, omap3pandora_in_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, omap3pandora_in_dapm_widgets,
 				ARRAY_SIZE(omap3pandora_in_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	snd_soc_dapm_add_routes(codec, omap3pandora_in_map,
+	snd_soc_dapm_add_routes(dapm, omap3pandora_in_map,
 		ARRAY_SIZE(omap3pandora_in_map));
 
-	return snd_soc_dapm_sync(codec);
+	return snd_soc_dapm_sync(dapm);
 }
 
 static struct snd_soc_ops omap3pandora_ops = {
diff --git a/sound/soc/omap/osk5912.c b/sound/soc/omap/osk5912.c
index 65ae00e..7e75e77 100644
--- a/sound/soc/omap/osk5912.c
+++ b/sound/soc/omap/osk5912.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -116,19 +115,20 @@
 static int osk_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add osk5912 specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up osk5912 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/overo.c b/sound/soc/omap/overo.c
index e95a607..bbcf380 100644
--- a/sound/soc/omap/overo.c
+++ b/sound/soc/omap/overo.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 04b5723..09fb0df 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -30,14 +30,12 @@
 #include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <plat/mcbsp.h>
 
 #include <asm/mach-types.h>
 
 #include "omap-mcbsp.h"
 #include "omap-pcm.h"
-#include "../codecs/tlv320aic3x.h"
 
 #define RX51_TVOUT_SEL_GPIO		40
 #define RX51_JACK_DETECT_GPIO		177
@@ -58,19 +56,21 @@
 
 static void rx51_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (rx51_spk_func)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 	if (rx51_dmic_func)
-		snd_soc_dapm_enable_pin(codec, "DMic");
+		snd_soc_dapm_enable_pin(dapm, "DMic");
 	else
-		snd_soc_dapm_disable_pin(codec, "DMic");
+		snd_soc_dapm_disable_pin(dapm, "DMic");
 
 	gpio_set_value(RX51_TVOUT_SEL_GPIO,
 		       rx51_jack_func == RX51_JACK_TVOUT);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int rx51_startup(struct snd_pcm_substream *substream)
@@ -244,12 +244,13 @@
 static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "MIC3L");
-	snd_soc_dapm_nc_pin(codec, "MIC3R");
-	snd_soc_dapm_nc_pin(codec, "LINE1R");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L");
+	snd_soc_dapm_nc_pin(dapm, "MIC3R");
+	snd_soc_dapm_nc_pin(dapm, "LINE1R");
 
 	/* Add RX-51 specific controls */
 	err = snd_soc_add_controls(codec, aic34_rx51_controls,
@@ -258,13 +259,13 @@
 		return err;
 
 	/* Add RX-51 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic34_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic34_dapm_widgets,
 				  ARRAY_SIZE(aic34_dapm_widgets));
 
 	/* Set up RX-51 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	/* AV jack detection */
 	err = snd_soc_jack_new(codec, "AV Jack",
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index 07fbcf7..3f72d17 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -28,7 +28,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -191,39 +190,40 @@
 static int sdp3430_twl4030_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add SDP3430 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, sdp3430_twl4030_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, sdp3430_twl4030_dapm_widgets,
 				ARRAY_SIZE(sdp3430_twl4030_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up SDP3430 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* SDP3430 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
 	/* TWL4030 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "AUXL");
-	snd_soc_dapm_nc_pin(codec, "AUXR");
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "AUXL");
+	snd_soc_dapm_nc_pin(dapm, "AUXR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
 
-	snd_soc_dapm_nc_pin(codec, "OUTL");
-	snd_soc_dapm_nc_pin(codec, "OUTR");
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "OUTL");
+	snd_soc_dapm_nc_pin(dapm, "OUTR");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/omap/sdp4430.c b/sound/soc/omap/sdp4430.c
index 4b4463d..189e039 100644
--- a/sound/soc/omap/sdp4430.c
+++ b/sound/soc/omap/sdp4430.c
@@ -24,7 +24,7 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
+#include <sound/jack.h>
 
 #include <asm/mach-types.h>
 #include <plat/hardware.h>
@@ -66,6 +66,21 @@
 	.hw_params = sdp4430_hw_params,
 };
 
+/* Headset jack */
+static struct snd_soc_jack hs_jack;
+
+/*Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin hs_jack_pins[] = {
+	{
+		.pin = "Headset Mic",
+		.mask = SND_JACK_MICROPHONE,
+	},
+	{
+		.pin = "Headset Stereophone",
+		.mask = SND_JACK_HEADPHONE,
+	},
+};
+
 static int sdp4430_get_power_mode(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
@@ -102,6 +117,7 @@
 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
 	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
 	SND_SOC_DAPM_SPK("Earphone Spk", NULL),
+	SND_SOC_DAPM_INPUT("Aux/FM Stereo In"),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -124,11 +140,16 @@
 
 	/* Earphone speaker */
 	{"Earphone Spk", NULL, "EP"},
+
+	/* Aux/FM Stereo In: AFML, AFMR */
+	{"AFML", NULL, "Aux/FM Stereo In"},
+	{"AFMR", NULL, "Aux/FM Stereo In"},
 };
 
 static int sdp4430_twl6040_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add SDP4430 specific controls */
@@ -138,25 +159,39 @@
 		return ret;
 
 	/* Add SDP4430 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, sdp4430_twl6040_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, sdp4430_twl6040_dapm_widgets,
 				ARRAY_SIZE(sdp4430_twl6040_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up SDP4430 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* SDP4430 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_enable_pin(codec, "Headset Mic");
-	snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "AFML");
+	snd_soc_dapm_enable_pin(dapm, "AFMR");
+	snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
 
-	/* TWL6040 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "AFML");
-	snd_soc_dapm_nc_pin(codec, "AFMR");
+	ret = snd_soc_dapm_sync(dapm);
+	if (ret)
+		return ret;
 
-	ret = snd_soc_dapm_sync(codec);
+	/* Headset jack detection */
+	ret = snd_soc_jack_new(codec, "Headset Jack",
+				SND_JACK_HEADSET, &hs_jack);
+	if (ret)
+		return ret;
+
+	ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
+				hs_jack_pins);
+
+	if (machine_is_omap_4430sdp())
+		twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET);
+	else
+		snd_soc_jack_report(&hs_jack, SND_JACK_HEADSET, SND_JACK_HEADSET);
 
 	return ret;
 }
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
index 718031e..0170994 100644
--- a/sound/soc/omap/zoom2.c
+++ b/sound/soc/omap/zoom2.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -162,35 +161,36 @@
 static int zoom2_twl4030_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add Zoom2 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, zoom2_twl4030_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, zoom2_twl4030_dapm_widgets,
 				ARRAY_SIZE(zoom2_twl4030_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up Zoom2 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* Zoom2 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_enable_pin(codec, "Headset Mic");
-	snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
-	snd_soc_dapm_enable_pin(codec, "Aux In");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Aux In");
 
 	/* TWL4030 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 
 	return ret;
 }
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index f451acd..fc592f0 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/corgi.h>
@@ -48,51 +47,53 @@
 
 static void corgi_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	switch (corgi_jack_func) {
 	case CORGI_HP:
 		/* set = unmute headphone */
 		gpio_set_value(CORGI_GPIO_MUTE_L, 1);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 1);
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_MIC:
 		/* reset = mute headphone */
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 0);
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_LINE:
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 0);
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_enable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_enable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_HEADSET:
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 1);
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		break;
 	}
 
 	if (corgi_spk_func == CORGI_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int corgi_startup(struct snd_pcm_substream *substream)
@@ -279,10 +280,11 @@
 static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
 
 	/* Add corgi specific controls */
 	err = snd_soc_add_controls(codec, wm8731_corgi_controls,
@@ -291,13 +293,13 @@
 		return err;
 
 	/* Add corgi specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
 				  ARRAY_SIZE(wm8731_dapm_widgets));
 
 	/* Set up corgi specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index c82cedb..28333e7 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audio.h>
 #include <mach/eseries-gpio.h>
@@ -92,23 +91,24 @@
 static int e740_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_nc_pin(codec, "HPOUTL");
-	snd_soc_dapm_nc_pin(codec, "HPOUTR");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "CDINL");
-	snd_soc_dapm_nc_pin(codec, "CDINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "HPOUTL");
+	snd_soc_dapm_nc_pin(dapm, "HPOUTR");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "CDINL");
+	snd_soc_dapm_nc_pin(dapm, "CDINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	snd_soc_dapm_new_controls(codec, e740_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e740_dapm_widgets,
 					ARRAY_SIZE(e740_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 4c14380..01bf316 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audio.h>
 #include <mach/eseries-gpio.h>
@@ -74,23 +73,24 @@
 static int e750_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_nc_pin(codec, "LOUT");
-	snd_soc_dapm_nc_pin(codec, "ROUT");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "CDINL");
-	snd_soc_dapm_nc_pin(codec, "CDINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "LOUT");
+	snd_soc_dapm_nc_pin(dapm, "ROUT");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "CDINL");
+	snd_soc_dapm_nc_pin(dapm, "CDINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	snd_soc_dapm_new_controls(codec, e750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e750_dapm_widgets,
 					ARRAY_SIZE(e750_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index d42e5fe..c6a37c6 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/audio.h>
@@ -75,12 +74,13 @@
 static int e800_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, e800_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e800_dapm_widgets,
 					ARRAY_SIZE(e800_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index eadf9d3..fc22e6e 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/audio.h>
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 5ef0526..67dcc36 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/uda1380.h>
 
 #include <mach/magician.h>
@@ -44,27 +43,29 @@
 
 static void magician_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (magician_spk_switch)
-		snd_soc_dapm_enable_pin(codec, "Speaker");
+		snd_soc_dapm_enable_pin(dapm, "Speaker");
 	else
-		snd_soc_dapm_disable_pin(codec, "Speaker");
+		snd_soc_dapm_disable_pin(dapm, "Speaker");
 	if (magician_hp_switch)
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	else
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 
 	switch (magician_in_sel) {
 	case MAGICIAN_MIC:
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
 		break;
 	case MAGICIAN_MIC_EXT:
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
 		break;
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int magician_startup(struct snd_pcm_substream *substream)
@@ -399,15 +400,16 @@
 static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "VOUTLHP");
-	snd_soc_dapm_nc_pin(codec, "VOUTRHP");
+	snd_soc_dapm_nc_pin(dapm, "VOUTLHP");
+	snd_soc_dapm_nc_pin(dapm, "VOUTRHP");
 
 	/* FIXME: is anything connected here? */
-	snd_soc_dapm_nc_pin(codec, "VINL");
-	snd_soc_dapm_nc_pin(codec, "VINR");
+	snd_soc_dapm_nc_pin(dapm, "VINL");
+	snd_soc_dapm_nc_pin(dapm, "VINR");
 
 	/* Add magician specific controls */
 	err = snd_soc_add_controls(codec, uda1380_magician_controls,
@@ -416,13 +418,13 @@
 		return err;
 
 	/* Add magician specific widgets */
-	snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
 				  ARRAY_SIZE(uda1380_dapm_widgets));
 
 	/* Set up magician specific audio path interconnects */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index f284cc5..0d70fc8 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -50,7 +50,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/ac97_codec.h>
 
@@ -130,13 +129,14 @@
 static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned short reg;
 
 	/* Add mioa701 specific widgets */
-	snd_soc_dapm_new_controls(codec, ARRAY_AND_SIZE(mioa701_dapm_widgets));
+	snd_soc_dapm_new_controls(dapm, ARRAY_AND_SIZE(mioa701_dapm_widgets));
 
 	/* Set up mioa701 specific audio path audio_mapnects */
-	snd_soc_dapm_add_routes(codec, ARRAY_AND_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, ARRAY_AND_SIZE(audio_map));
 
 	/* Prepare GPIO8 for rear speaker amplifier */
 	reg = codec->driver->read(codec, AC97_GPIO_CFG);
@@ -146,12 +146,12 @@
 	reg = codec->driver->read(codec, AC97_3D_CONTROL);
 	codec->driver->write(codec, AC97_3D_CONTROL, reg | 0xc000);
 
-	snd_soc_dapm_enable_pin(codec, "Front Speaker");
-	snd_soc_dapm_enable_pin(codec, "Rear Speaker");
-	snd_soc_dapm_enable_pin(codec, "Front Mic");
-	snd_soc_dapm_enable_pin(codec, "GSM Line In");
-	snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_enable_pin(dapm, "Front Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Rear Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Front Mic");
+	snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+	snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 13f6d48..857db96 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -77,37 +76,38 @@
 static int palm27x_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* add palm27x specific widgets */
-	err = snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets,
+	err = snd_soc_dapm_new_controls(dapm, palm27x_dapm_widgets,
 				ARRAY_SIZE(palm27x_dapm_widgets));
 	if (err)
 		return err;
 
 	/* set up palm27x specific audio path audio_map */
-	err = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	err = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (err)
 		return err;
 
 	/* connected pins */
 	if (machine_is_palmld())
-		snd_soc_dapm_enable_pin(codec, "MIC1");
-	snd_soc_dapm_enable_pin(codec, "HPOUTL");
-	snd_soc_dapm_enable_pin(codec, "HPOUTR");
-	snd_soc_dapm_enable_pin(codec, "LOUT2");
-	snd_soc_dapm_enable_pin(codec, "ROUT2");
+		snd_soc_dapm_enable_pin(dapm, "MIC1");
+	snd_soc_dapm_enable_pin(dapm, "HPOUTL");
+	snd_soc_dapm_enable_pin(dapm, "HPOUTR");
+	snd_soc_dapm_enable_pin(dapm, "LOUT2");
+	snd_soc_dapm_enable_pin(dapm, "ROUT2");
 
 	/* not connected pins */
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONOOUT");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONOOUT");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	err = snd_soc_dapm_sync(codec);
+	err = snd_soc_dapm_sync(dapm);
 	if (err)
 		return err;
 
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 84edd03..6298ee1 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <asm/hardware/locomo.h>
@@ -46,6 +45,8 @@
 
 static void poodle_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	if (poodle_jack_func == POODLE_HP) {
 		/* set = unmute headphone */
@@ -53,23 +54,23 @@
 			POODLE_LOCOMO_GPIO_MUTE_L, 1);
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_R, 1);
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	} else {
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_L, 0);
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_R, 0);
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 	}
 
 	/* set the enpoints to their new connetion states */
 	if (poodle_spk_func == POODLE_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int poodle_startup(struct snd_pcm_substream *substream)
@@ -244,11 +245,12 @@
 static int poodle_wm8731_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
-	snd_soc_dapm_enable_pin(codec, "MICIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
+	snd_soc_dapm_enable_pin(dapm, "MICIN");
 
 	/* Add poodle specific controls */
 	err = snd_soc_add_controls(codec, wm8731_poodle_controls,
@@ -257,13 +259,13 @@
 		return err;
 
 	/* Add poodle specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
 				  ARRAY_SIZE(wm8731_dapm_widgets));
 
 	/* Set up poodle specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index b439eee..8ad93ee 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -20,6 +20,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/pxa2xx_ssp.h>
 
 #include <asm/irq.h>
 
@@ -33,7 +34,6 @@
 #include <mach/hardware.h>
 #include <mach/dma.h>
 #include <mach/audio.h>
-#include <plat/ssp.h>
 
 #include "../../arm/pxa2xx-pcm.h"
 #include "pxa-ssp.h"
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 2cda82bc..0fd60f4 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -22,7 +22,6 @@
 #include <linux/gpio.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 
diff --git a/sound/soc/pxa/saarb.c b/sound/soc/pxa/saarb.c
index d63cb47..9595189 100644
--- a/sound/soc/pxa/saarb.c
+++ b/sound/soc/pxa/saarb.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -133,20 +132,21 @@
 static int saarb_pm860x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_new_controls(codec, saarb_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, saarb_dapm_widgets,
 				  ARRAY_SIZE(saarb_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Speaker");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 1");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 3");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic 2");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 0b30d7d..c2acb69 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/spitz.h>
@@ -46,61 +45,63 @@
 
 static void spitz_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (spitz_spk_func == SPITZ_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* set up jack connection */
 	switch (spitz_jack_func) {
 	case SPITZ_HP:
 		/* enable and unmute hp jack, disable mic bias */
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 1);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
 		break;
 	case SPITZ_MIC:
 		/* enable mic jack and bias, mute hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	case SPITZ_LINE:
 		/* enable line jack, disable mic bias and mute hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_enable_pin(codec, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_enable_pin(dapm, "Line Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	case SPITZ_HEADSET:
 		/* enable and unmute headset jack enable mic bias, mute L hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
 		break;
 	case SPITZ_HP_OFF:
 
 		/* jack removed, everything off */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	}
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int spitz_startup(struct snd_pcm_substream *substream)
@@ -281,16 +282,17 @@
 static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "RINPUT1");
-	snd_soc_dapm_nc_pin(codec, "LINPUT2");
-	snd_soc_dapm_nc_pin(codec, "RINPUT2");
-	snd_soc_dapm_nc_pin(codec, "LINPUT3");
-	snd_soc_dapm_nc_pin(codec, "RINPUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONO1");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONO1");
 
 	/* Add spitz specific controls */
 	err = snd_soc_add_controls(codec, wm8750_spitz_controls,
@@ -299,13 +301,13 @@
 		return err;
 
 	/* Add spitz specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 				  ARRAY_SIZE(wm8750_dapm_widgets));
 
 	/* Set up spitz specific audio paths */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/tavorevb3.c b/sound/soc/pxa/tavorevb3.c
index 248c283..f881f65 100644
--- a/sound/soc/pxa/tavorevb3.c
+++ b/sound/soc/pxa/tavorevb3.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -133,20 +132,21 @@
 static int evb3_pm860x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_new_controls(codec, evb3_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, evb3_dapm_widgets,
 				  ARRAY_SIZE(evb3_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Speaker");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 1");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 3");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic 2");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index 7b983f9..f75804e 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/tosa.h>
@@ -49,31 +48,33 @@
 
 static void tosa_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	switch (tosa_jack_func) {
 	case TOSA_HP:
-		snd_soc_dapm_disable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case TOSA_MIC_INT:
-		snd_soc_dapm_enable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case TOSA_HEADSET:
-		snd_soc_dapm_disable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		break;
 	}
 
 	if (tosa_spk_func == TOSA_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Speaker");
+		snd_soc_dapm_enable_pin(dapm, "Speaker");
 	else
-		snd_soc_dapm_disable_pin(codec, "Speaker");
+		snd_soc_dapm_disable_pin(dapm, "Speaker");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int tosa_startup(struct snd_pcm_substream *substream)
@@ -191,10 +192,11 @@
 static int tosa_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONOOUT");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONOOUT");
 
 	/* add tosa specific controls */
 	err = snd_soc_add_controls(codec, tosa_controls,
@@ -203,13 +205,13 @@
 		return err;
 
 	/* add tosa specific widgets */
-	snd_soc_dapm_new_controls(codec, tosa_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tosa_dapm_widgets,
 				  ARRAY_SIZE(tosa_dapm_widgets));
 
 	/* set up tosa specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index 4cc841b..2d4f896 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -140,22 +139,23 @@
 static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* NC codec pins */
-	snd_soc_dapm_disable_pin(codec, "LINPUT3");
-	snd_soc_dapm_disable_pin(codec, "RINPUT3");
-	snd_soc_dapm_disable_pin(codec, "OUT3");
-	snd_soc_dapm_disable_pin(codec, "MONO");
+	snd_soc_dapm_disable_pin(dapm, "LINPUT3");
+	snd_soc_dapm_disable_pin(dapm, "RINPUT3");
+	snd_soc_dapm_disable_pin(dapm, "OUT3");
+	snd_soc_dapm_disable_pin(dapm, "MONO");
 
 	/* Add z2 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 				 ARRAY_SIZE(wm8750_dapm_widgets));
 
 	/* Set up z2 specific audio paths */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		goto err;
 
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index d27e05a..b222a7d 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "../codecs/wm9713.h"
 #include "pxa2xx-ac97.h"
@@ -73,21 +72,22 @@
 static int zylonite_wm9713_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	if (clk_pout)
 		snd_soc_dai_set_pll(rtd->codec_dai, 0, 0,
 				    clk_get_rate(pout), 0);
 
-	snd_soc_dapm_new_controls(codec, zylonite_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, zylonite_dapm_widgets,
 				  ARRAY_SIZE(zylonite_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* Static setup for now */
-	snd_soc_dapm_enable_pin(codec, "Headphone");
-	snd_soc_dapm_enable_pin(codec, "Headset Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Headphone");
+	snd_soc_dapm_enable_pin(dapm, "Headset Earpiece");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
deleted file mode 100644
index d85bf8a..0000000
--- a/sound/soc/s3c24xx/Kconfig
+++ /dev/null
@@ -1,171 +0,0 @@
-config SND_S3C24XX_SOC
-	tristate "SoC Audio for the Samsung S3CXXXX chips"
-	depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210
-	select S3C64XX_DMA if ARCH_S3C64XX
-	select S3C2410_DMA if ARCH_S3C2410
-	help
-	  Say Y or M if you want to add support for codecs attached to
-	  the S3C24XX AC97 or I2S interfaces. You will also need to
-	  select the audio interfaces to support below.
-
-config SND_S3C24XX_SOC_I2S
-	tristate
-	select S3C2410_DMA
-
-config SND_S3C_I2SV2_SOC
-	tristate
-
-config SND_S3C2412_SOC_I2S
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C2410_DMA
-
-config SND_S3C64XX_SOC_I2S
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C64XX_DMA
-
-config SND_S3C64XX_SOC_I2S_V4
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C64XX_DMA
-
-config SND_S3C_SOC_PCM
-	tristate
-
-config SND_S3C_SOC_AC97
-	tristate
-	select SND_SOC_AC97_BUS
-
-config SND_S5P_SOC_SPDIF
-	tristate
-	select SND_SOC_SPDIF
-
-config SND_S3C24XX_SOC_NEO1973_WM8753
-	tristate "SoC I2S Audio support for NEO1973 - WM8753"
-	depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA01
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_WM8753
-	help
-	  Say Y if you want to add support for SoC audio on smdk2440
-	  with the WM8753.
-
-config SND_S3C24XX_SOC_NEO1973_GTA02_WM8753
-	tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
-	depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA02
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_WM8753
-	help
-	  This driver provides audio support for the Openmoko Neo FreeRunner
-	  smartphone.
-	  
-config SND_S3C24XX_SOC_JIVE_WM8750
-	tristate "SoC I2S Audio support for Jive"
-	depends on SND_S3C24XX_SOC && MACH_JIVE
-	select SND_SOC_WM8750
-	select SND_S3C2412_SOC_I2S
-	help
-	  Sat Y if you want to add support for SoC audio on the Jive.
-
-config SND_S3C64XX_SOC_WM8580
-	tristate "SoC I2S Audio support for WM8580 on SMDK64XX"
-	depends on SND_S3C24XX_SOC && MACH_SMDK6410
-	select SND_SOC_WM8580
-	select SND_S3C64XX_SOC_I2S_V4
-	help
-	  Say Y if you want to add support for SoC audio on the SMDK6410.
-
-config SND_S3C24XX_SOC_SMDK2443_WM9710
-	tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
-	depends on SND_S3C24XX_SOC && MACH_SMDK2443
-	select S3C2410_DMA
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_S3C_SOC_AC97
-	help
-	  Say Y if you want to add support for SoC audio on smdk2443
-	  with the WM9710.
-
-config SND_S3C24XX_SOC_LN2440SBC_ALC650
-	tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select S3C2410_DMA
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_S3C_SOC_AC97
-	help
-	  Say Y if you want to add support for SoC audio on ln2440sbc
-	  with the ALC650.
-
-config SND_S3C24XX_SOC_S3C24XX_UDA134X
-	tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
-       	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-       	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_L3
-       	select SND_SOC_UDA134X
-
-config SND_S3C24XX_SOC_SIMTEC
-	tristate
-	help
-	  Internal node for common S3C24XX/Simtec suppor
-
-config SND_S3C24XX_SOC_SIMTEC_TLV320AIC23
-	tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_TLV320AIC23
-	select SND_S3C24XX_SOC_SIMTEC
-
-config SND_S3C24XX_SOC_SIMTEC_HERMES
-	tristate "SoC I2S Audio support for Simtec Hermes board"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_TLV320AIC3X
-	select SND_S3C24XX_SOC_SIMTEC
-
-config SND_S3C24XX_SOC_RX1950_UDA1380
-	tristate "Audio support for the HP iPAQ RX1950"
-	depends on SND_S3C24XX_SOC && MACH_RX1950
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_UDA1380
-	help
-	  This driver provides audio support for HP iPAQ RX1950 PDA.
-
-config SND_SOC_SMDK_WM9713
-	tristate "SoC AC97 Audio support for SMDK with WM9713"
-	depends on SND_S3C24XX_SOC && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
-	select SND_SOC_WM9713
-	select SND_S3C_SOC_AC97
-	help
-	  Sat Y if you want to add support for SoC audio on the SMDK.
-
-config SND_S3C64XX_SOC_SMARTQ
-	tristate "SoC I2S Audio support for SmartQ board"
-	depends on SND_S3C24XX_SOC && MACH_SMARTQ
-	select SND_S3C64XX_SOC_I2S
-	select SND_SOC_WM8750
-
-config SND_S5PC110_SOC_AQUILA_WM8994
-	tristate "SoC I2S Audio support for AQUILA - WM8994"
-	depends on SND_S3C24XX_SOC && MACH_AQUILA
-	select SND_S3C64XX_SOC_I2S_V4
-	select SND_SOC_WM8994
-	help
-	  Say Y if you want to add support for SoC audio on aquila
-	  with the WM8994.
-
-config SND_S5PV210_SOC_GONI_WM8994
-	tristate "SoC I2S Audio support for GONI - WM8994"
-	depends on SND_S3C24XX_SOC && MACH_GONI
-	select SND_S3C64XX_SOC_I2S_V4
-	select SND_SOC_WM8994
-	help
-	  Say Y if you want to add support for SoC audio on goni
-	  with the WM8994.
-
-config SND_SOC_SMDK_SPDIF
-	tristate "SoC S/PDIF Audio support for SMDK"
-	depends on SND_S3C24XX_SOC && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210)
-	select SND_S5P_SOC_SPDIF
-	help
-	  Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
diff --git a/sound/soc/s3c24xx/Makefile b/sound/soc/s3c24xx/Makefile
deleted file mode 100644
index ee8f41d..0000000
--- a/sound/soc/s3c24xx/Makefile
+++ /dev/null
@@ -1,55 +0,0 @@
-# S3c24XX Platform Support
-snd-soc-s3c24xx-objs := s3c-dma.o
-snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
-snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
-snd-soc-s3c64xx-i2s-objs := s3c64xx-i2s.o
-snd-soc-s3c-ac97-objs := s3c-ac97.o
-snd-soc-s3c64xx-i2s-v4-objs := s3c64xx-i2s-v4.o
-snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
-snd-soc-s3c-pcm-objs := s3c-pcm.o
-snd-soc-samsung-spdif-objs := spdif.o
-
-obj-$(CONFIG_SND_S3C24XX_SOC) += snd-soc-s3c24xx.o
-obj-$(CONFIG_SND_S3C24XX_SOC_I2S) += snd-soc-s3c24xx-i2s.o
-obj-$(CONFIG_SND_S3C_SOC_AC97) += snd-soc-s3c-ac97.o
-obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
-obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += snd-soc-s3c64xx-i2s.o
-obj-$(CONFIG_SND_S3C64XX_SOC_I2S_V4) += snd-soc-s3c64xx-i2s-v4.o
-obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
-obj-$(CONFIG_SND_S3C_SOC_PCM) += snd-soc-s3c-pcm.o
-obj-$(CONFIG_SND_S5P_SOC_SPDIF) += snd-soc-samsung-spdif.o
-
-# S3C24XX Machine Support
-snd-soc-jive-wm8750-objs := jive_wm8750.o
-snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
-snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
-snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
-snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
-snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
-snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
-snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
-snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
-snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o
-snd-soc-smdk64xx-wm8580-objs := smdk64xx_wm8580.o
-snd-soc-smdk-wm9713-objs := smdk_wm9713.o
-snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
-snd-soc-aquila-wm8994-objs := aquila_wm8994.o
-snd-soc-goni-wm8994-objs := goni_wm8994.o
-snd-soc-smdk-spdif-objs := smdk_spdif.o
-
-obj-$(CONFIG_SND_S3C24XX_SOC_JIVE_WM8750) += snd-soc-jive-wm8750.o
-obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
-obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
-obj-$(CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
-obj-$(CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC) += snd-soc-s3c24xx-simtec.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
-obj-$(CONFIG_SND_S3C24XX_SOC_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o
-obj-$(CONFIG_SND_S3C64XX_SOC_WM8580) += snd-soc-smdk64xx-wm8580.o
-obj-$(CONFIG_SND_SOC_SMDK_WM9713) += snd-soc-smdk-wm9713.o
-obj-$(CONFIG_SND_S3C64XX_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
-obj-$(CONFIG_SND_S5PC110_SOC_AQUILA_WM8994) += snd-soc-aquila-wm8994.o
-obj-$(CONFIG_SND_S5PV210_SOC_GONI_WM8994) += snd-soc-goni-wm8994.o
-obj-$(CONFIG_SND_SOC_SMDK_SPDIF) += snd-soc-smdk-spdif.o
diff --git a/sound/soc/s3c24xx/aquila_wm8994.c b/sound/soc/s3c24xx/aquila_wm8994.c
deleted file mode 100644
index 235d197..0000000
--- a/sound/soc/s3c24xx/aquila_wm8994.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * aquila_wm8994.c
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/jack.h>
-#include <asm/mach-types.h>
-#include <mach/gpio.h>
-#include <mach/regs-clock.h>
-
-#include <linux/mfd/wm8994/core.h>
-#include <linux/mfd/wm8994/registers.h>
-#include "../codecs/wm8994.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-static struct snd_soc_card aquila;
-static struct platform_device *aquila_snd_device;
-
-/* 3.5 pie jack */
-static struct snd_soc_jack jack;
-
-/* 3.5 pie jack detection DAPM pins */
-static struct snd_soc_jack_pin jack_pins[] = {
-	{
-		.pin = "Headset Mic",
-		.mask = SND_JACK_MICROPHONE,
-	}, {
-		.pin = "Headset Stereophone",
-		.mask = SND_JACK_HEADPHONE | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-	},
-};
-
-/* 3.5 pie jack detection gpios */
-static struct snd_soc_jack_gpio jack_gpios[] = {
-	{
-		.gpio = S5PV210_GPH0(6),
-		.name = "DET_3.5",
-		.report = SND_JACK_HEADSET | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-		.debounce_time = 200,
-	},
-};
-
-static const struct snd_soc_dapm_widget aquila_dapm_widgets[] = {
-	SND_SOC_DAPM_SPK("Ext Spk", NULL),
-	SND_SOC_DAPM_SPK("Ext Rcv", NULL),
-	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
-	SND_SOC_DAPM_MIC("Headset Mic", NULL),
-	SND_SOC_DAPM_MIC("Main Mic", NULL),
-	SND_SOC_DAPM_MIC("2nd Mic", NULL),
-	SND_SOC_DAPM_LINE("Radio In", NULL),
-};
-
-static const struct snd_soc_dapm_route aquila_dapm_routes[] = {
-	{"Ext Spk", NULL, "SPKOUTLP"},
-	{"Ext Spk", NULL, "SPKOUTLN"},
-
-	{"Ext Rcv", NULL, "HPOUT2N"},
-	{"Ext Rcv", NULL, "HPOUT2P"},
-
-	{"Headset Stereophone", NULL, "HPOUT1L"},
-	{"Headset Stereophone", NULL, "HPOUT1R"},
-
-	{"IN1RN", NULL, "Headset Mic"},
-	{"IN1RP", NULL, "Headset Mic"},
-
-	{"IN1RN", NULL, "2nd Mic"},
-	{"IN1RP", NULL, "2nd Mic"},
-
-	{"IN1LN", NULL, "Main Mic"},
-	{"IN1LP", NULL, "Main Mic"},
-
-	{"IN2LN", NULL, "Radio In"},
-	{"IN2RN", NULL, "Radio In"},
-};
-
-static int aquila_wm8994_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int ret;
-
-	/* add aquila specific widgets */
-	snd_soc_dapm_new_controls(codec, aquila_dapm_widgets,
-			ARRAY_SIZE(aquila_dapm_widgets));
-
-	/* set up aquila specific audio routes */
-	snd_soc_dapm_add_routes(codec, aquila_dapm_routes,
-			ARRAY_SIZE(aquila_dapm_routes));
-
-	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN");
-	snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1P");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2P");
-	snd_soc_dapm_nc_pin(codec, "SPKOUTRN");
-	snd_soc_dapm_nc_pin(codec, "SPKOUTRP");
-
-	snd_soc_dapm_sync(codec);
-
-	/* Headset jack detection */
-	ret = snd_soc_jack_new(&aquila, "Headset Jack",
-			SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
-			&jack);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_pins(&jack, ARRAY_SIZE(jack_pins), jack_pins);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_gpios(&jack, ARRAY_SIZE(jack_gpios), jack_gpios);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int aquila_hifi_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	/* set the cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the cpu system clock */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-			0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_ops aquila_hifi_ops = {
-	.hw_params = aquila_hifi_hw_params,
-};
-
-static int aquila_voice_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	if (params_rate(params) != 8000)
-		return -EINVAL;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
-			SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_dai_driver voice_dai = {
-	.name = "aquila-voice-dai",
-	.playback = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-	.capture = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-};
-
-static struct snd_soc_ops aquila_voice_ops = {
-	.hw_params = aquila_voice_hw_params,
-};
-
-static struct snd_soc_dai_link aquila_dai[] = {
-{
-	.name = "WM8994",
-	.stream_name = "WM8994 HiFi",
-	.cpu_dai_name = "s3c64xx-i2s-v4",
-	.codec_dai_name = "wm8994-hifi",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.init = aquila_wm8994_init,
-	.ops = &aquila_hifi_ops,
-}, {
-	.name = "WM8994 Voice",
-	.stream_name = "Voice",
-	.cpu_dai_name = "aquila-voice-dai",
-	.codec_dai_name = "wm8994-voice",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.ops = &aquila_voice_ops,
-},
-};
-
-static struct snd_soc_card aquila = {
-	.name = "aquila",
-	.dai_link = aquila_dai,
-	.num_links = ARRAY_SIZE(aquila_dai),
-};
-
-static int __init aquila_init(void)
-{
-	int ret;
-
-	if (!machine_is_aquila())
-		return -ENODEV;
-
-	aquila_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!aquila_snd_device)
-		return -ENOMEM;
-
-	/* register voice DAI here */
-	ret = snd_soc_register_dai(&aquila_snd_device->dev, &voice_dai);
-	if (ret)
-		return ret;
-
-	platform_set_drvdata(aquila_snd_device, &aquila);
-	ret = platform_device_add(aquila_snd_device);
-
-	if (ret)
-		platform_device_put(aquila_snd_device);
-
-	return ret;
-}
-
-static void __exit aquila_exit(void)
-{
-	platform_device_unregister(aquila_snd_device);
-}
-
-module_init(aquila_init);
-module_exit(aquila_exit);
-
-/* Module information */
-MODULE_DESCRIPTION("ALSA SoC WM8994 Aquila(S5PC110)");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/goni_wm8994.c b/sound/soc/s3c24xx/goni_wm8994.c
deleted file mode 100644
index 694f702..0000000
--- a/sound/soc/s3c24xx/goni_wm8994.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * goni_wm8994.c
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/jack.h>
-#include <asm/mach-types.h>
-#include <mach/gpio.h>
-#include <mach/regs-clock.h>
-
-#include <linux/mfd/wm8994/core.h>
-#include <linux/mfd/wm8994/registers.h>
-#include "../codecs/wm8994.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-static struct snd_soc_card goni;
-static struct platform_device *goni_snd_device;
-
-/* 3.5 pie jack */
-static struct snd_soc_jack jack;
-
-/* 3.5 pie jack detection DAPM pins */
-static struct snd_soc_jack_pin jack_pins[] = {
-	{
-		.pin = "Headset Mic",
-		.mask = SND_JACK_MICROPHONE,
-	}, {
-		.pin = "Headset Stereophone",
-		.mask = SND_JACK_HEADPHONE | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-	},
-};
-
-/* 3.5 pie jack detection gpios */
-static struct snd_soc_jack_gpio jack_gpios[] = {
-	{
-		.gpio = S5PV210_GPH0(6),
-		.name = "DET_3.5",
-		.report = SND_JACK_HEADSET | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-		.debounce_time = 200,
-	},
-};
-
-static const struct snd_soc_dapm_widget goni_dapm_widgets[] = {
-	SND_SOC_DAPM_SPK("Ext Left Spk", NULL),
-	SND_SOC_DAPM_SPK("Ext Right Spk", NULL),
-	SND_SOC_DAPM_SPK("Ext Rcv", NULL),
-	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
-	SND_SOC_DAPM_MIC("Headset Mic", NULL),
-	SND_SOC_DAPM_MIC("Main Mic", NULL),
-	SND_SOC_DAPM_MIC("2nd Mic", NULL),
-	SND_SOC_DAPM_LINE("Radio In", NULL),
-};
-
-static const struct snd_soc_dapm_route goni_dapm_routes[] = {
-	{"Ext Left Spk", NULL, "SPKOUTLP"},
-	{"Ext Left Spk", NULL, "SPKOUTLN"},
-
-	{"Ext Right Spk", NULL, "SPKOUTRP"},
-	{"Ext Right Spk", NULL, "SPKOUTRN"},
-
-	{"Ext Rcv", NULL, "HPOUT2N"},
-	{"Ext Rcv", NULL, "HPOUT2P"},
-
-	{"Headset Stereophone", NULL, "HPOUT1L"},
-	{"Headset Stereophone", NULL, "HPOUT1R"},
-
-	{"IN1RN", NULL, "Headset Mic"},
-	{"IN1RP", NULL, "Headset Mic"},
-
-	{"IN1RN", NULL, "2nd Mic"},
-	{"IN1RP", NULL, "2nd Mic"},
-
-	{"IN1LN", NULL, "Main Mic"},
-	{"IN1LP", NULL, "Main Mic"},
-
-	{"IN2LN", NULL, "Radio In"},
-	{"IN2RN", NULL, "Radio In"},
-};
-
-static int goni_wm8994_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int ret;
-
-	/* add goni specific widgets */
-	snd_soc_dapm_new_controls(codec, goni_dapm_widgets,
-			ARRAY_SIZE(goni_dapm_widgets));
-
-	/* set up goni specific audio routes */
-	snd_soc_dapm_add_routes(codec, goni_dapm_routes,
-			ARRAY_SIZE(goni_dapm_routes));
-
-	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN");
-	snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1P");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2P");
-
-	snd_soc_dapm_sync(codec);
-
-	/* Headset jack detection */
-	ret = snd_soc_jack_new(&goni, "Headset Jack",
-			SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
-			&jack);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_pins(&jack, ARRAY_SIZE(jack_pins), jack_pins);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_gpios(&jack, ARRAY_SIZE(jack_gpios), jack_gpios);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int goni_hifi_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	/* set the cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the cpu system clock */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-			0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_ops goni_hifi_ops = {
-	.hw_params = goni_hifi_hw_params,
-};
-
-static int goni_voice_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	if (params_rate(params) != 8000)
-		return -EINVAL;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
-			SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_dai_driver voice_dai = {
-	.name = "goni-voice-dai",
-	.id = 0,
-	.playback = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-	.capture = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-};
-
-static struct snd_soc_ops goni_voice_ops = {
-	.hw_params = goni_voice_hw_params,
-};
-
-static struct snd_soc_dai_link goni_dai[] = {
-{
-	.name = "WM8994",
-	.stream_name = "WM8994 HiFi",
-	.cpu_dai_name = "s3c64xx-i2s-v4",
-	.codec_dai_name = "wm8994-hifi",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.init = goni_wm8994_init,
-	.ops = &goni_hifi_ops,
-}, {
-	.name = "WM8994 Voice",
-	.stream_name = "Voice",
-	.cpu_dai_name = "goni-voice-dai",
-	.codec_dai_name = "wm8994-voice",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.ops = &goni_voice_ops,
-},
-};
-
-static struct snd_soc_card goni = {
-	.name = "goni",
-	.dai_link = goni_dai,
-	.num_links = ARRAY_SIZE(goni_dai),
-};
-
-static int __init goni_init(void)
-{
-	int ret;
-
-	if (!machine_is_goni())
-		return -ENODEV;
-
-	goni_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!goni_snd_device)
-		return -ENOMEM;
-
-	/* register voice DAI here */
-	ret = snd_soc_register_dai(&goni_snd_device->dev, &voice_dai);
-	if (ret)
-		return ret;
-
-	platform_set_drvdata(goni_snd_device, &goni);
-	ret = platform_device_add(goni_snd_device);
-
-	if (ret)
-		platform_device_put(goni_snd_device);
-
-	return ret;
-}
-
-static void __exit goni_exit(void)
-{
-	platform_device_unregister(goni_snd_device);
-}
-
-module_init(goni_init);
-module_exit(goni_exit);
-
-/* Module information */
-MODULE_DESCRIPTION("ALSA SoC WM8994 GONI(S5PV210)");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/jive_wm8750.c b/sound/soc/s3c24xx/jive_wm8750.c
deleted file mode 100644
index 49605cd..0000000
--- a/sound/soc/s3c24xx/jive_wm8750.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/* sound/soc/s3c24xx/jive_wm8750.c
- *
- * Copyright 2007,2008 Simtec Electronics
- *
- * Based on sound/soc/pxa/spitz.c
- *	Copyright 2005 Wolfson Microelectronics PLC.
- *	Copyright 2005 Openedhand Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include <asm/mach-types.h>
-
-#include "s3c-dma.h"
-#include "s3c2412-i2s.h"
-
-#include "../codecs/wm8750.h"
-
-static const struct snd_soc_dapm_route audio_map[] = {
-	{ "Headphone Jack", NULL, "LOUT1" },
-	{ "Headphone Jack", NULL, "ROUT1" },
-	{ "Internal Speaker", NULL, "LOUT2" },
-	{ "Internal Speaker", NULL, "ROUT2" },
-	{ "LINPUT1", NULL, "Line Input" },
-	{ "RINPUT1", NULL, "Line Input" },
-};
-
-static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = {
-	SND_SOC_DAPM_HP("Headphone Jack", NULL),
-	SND_SOC_DAPM_SPK("Internal Speaker", NULL),
-	SND_SOC_DAPM_LINE("Line In", NULL),
-};
-
-static int jive_hw_params(struct snd_pcm_substream *substream,
-			  struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct s3c_i2sv2_rate_calc div;
-	unsigned int clk = 0;
-	int ret = 0;
-
-	switch (params_rate(params)) {
-	case 8000:
-	case 16000:
-	case 48000:
-	case 96000:
-		clk = 12288000;
-		break;
-	case 11025:
-	case 22050:
-	case 44100:
-		clk = 11289600;
-		break;
-	}
-
-	s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
-				s3c_i2sv2_get_clock(cpu_dai));
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-				  SND_SOC_DAIFMT_NB_NF |
-				  SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* set cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-				  SND_SOC_DAIFMT_NB_NF |
-				  SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock for DAC and ADC */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
-				     SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_RCLK, div.fs_div);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_PRESCALER,
-				     div.clk_div - 1);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_ops jive_ops = {
-	.hw_params	= jive_hw_params,
-};
-
-static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int err;
-
-	/* These endpoints are not being used. */
-	snd_soc_dapm_nc_pin(codec, "LINPUT2");
-	snd_soc_dapm_nc_pin(codec, "RINPUT2");
-	snd_soc_dapm_nc_pin(codec, "LINPUT3");
-	snd_soc_dapm_nc_pin(codec, "RINPUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONO");
-
-	/* Add jive specific widgets */
-	err = snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
-					ARRAY_SIZE(wm8750_dapm_widgets));
-	if (err) {
-		printk(KERN_ERR "%s: failed to add widgets (%d)\n",
-		       __func__, err);
-		return err;
-	}
-
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static struct snd_soc_dai_link jive_dai = {
-	.name		= "wm8750",
-	.stream_name	= "WM8750",
-	.cpu_dai_name	= "s3c2412-i2s",
-	.codec_dai_name = "wm8750-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
-	.codec_name	= "wm8750-codec.0-0x1a",
-	.init		= jive_wm8750_init,
-	.ops		= &jive_ops,
-};
-
-/* jive audio machine driver */
-static struct snd_soc_card snd_soc_machine_jive = {
-	.name		= "Jive",
-	.dai_link	= &jive_dai,
-	.num_links	= 1,
-};
-
-static struct platform_device *jive_snd_device;
-
-static int __init jive_init(void)
-{
-	int ret;
-
-	if (!machine_is_jive())
-		return 0;
-
-	printk("JIVE WM8750 Audio support\n");
-
-	jive_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!jive_snd_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(jive_snd_device, &snd_soc_machine_jive);
-	ret = platform_device_add(jive_snd_device);
-
-	if (ret)
-		platform_device_put(jive_snd_device);
-
-	return ret;
-}
-
-static void __exit jive_exit(void)
-{
-	platform_device_unregister(jive_snd_device);
-}
-
-module_init(jive_init);
-module_exit(jive_exit);
-
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("ALSA SoC Jive Audio support");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/ln2440sbc_alc650.c b/sound/soc/s3c24xx/ln2440sbc_alc650.c
deleted file mode 100644
index abe64ab..0000000
--- a/sound/soc/s3c24xx/ln2440sbc_alc650.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * SoC audio for ln2440sbc
- *
- * Copyright 2007 KonekTel, a.s.
- * Author: Ivan Kuten
- *         ivan.kuten@promwad.com
- *
- * Heavily based on smdk2443_wm9710.c
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
-
-static struct snd_soc_card ln2440sbc;
-
-static struct snd_soc_dai_link ln2440sbc_dai[] = {
-{
-	.name = "AC97",
-	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "s3c-ac97",
-	.codec_dai_name = "ac97-hifi",
-	.codec_name = "ac97-codec",
-	.platform_name = "s3c24xx-pcm-audio",
-},
-};
-
-static struct snd_soc_card ln2440sbc = {
-	.name = "LN2440SBC",
-	.dai_link = ln2440sbc_dai,
-	.num_links = ARRAY_SIZE(ln2440sbc_dai),
-};
-
-static struct platform_device *ln2440sbc_snd_ac97_device;
-
-static int __init ln2440sbc_init(void)
-{
-	int ret;
-
-	ln2440sbc_snd_ac97_device = platform_device_alloc("soc-audio", -1);
-	if (!ln2440sbc_snd_ac97_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(ln2440sbc_snd_ac97_device, &ln2440sbc);
-	ret = platform_device_add(ln2440sbc_snd_ac97_device);
-
-	if (ret)
-		platform_device_put(ln2440sbc_snd_ac97_device);
-
-	return ret;
-}
-
-static void __exit ln2440sbc_exit(void)
-{
-	platform_device_unregister(ln2440sbc_snd_ac97_device);
-}
-
-module_init(ln2440sbc_init);
-module_exit(ln2440sbc_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ivan Kuten");
-MODULE_DESCRIPTION("ALSA SoC ALC650 LN2440SBC");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
deleted file mode 100644
index e97bdf1..0000000
--- a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
+++ /dev/null
@@ -1,504 +0,0 @@
-/*
- * neo1973_gta02_wm8753.c  --  SoC audio for Openmoko Freerunner(GTA02)
- *
- * Copyright 2007 Openmoko Inc
- * Author: Graeme Gregory <graeme@openmoko.org>
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory <linux@wolfsonmicro.com>
- * Copyright 2009 Wolfson Microelectronics
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include <asm/mach-types.h>
-
-#include <plat/regs-iis.h>
-
-#include <mach/regs-clock.h>
-#include <asm/io.h>
-#include <mach/gta02.h>
-#include "../codecs/wm8753.h"
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-
-static struct snd_soc_card neo1973_gta02;
-
-static int neo1973_gta02_hifi_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned int pll_out = 0, bclk = 0;
-	int ret = 0;
-	unsigned long iis_clkrate;
-
-	iis_clkrate = s3c24xx_i2s_get_clockrate();
-
-	switch (params_rate(params)) {
-	case 8000:
-	case 16000:
-		pll_out = 12288000;
-		break;
-	case 48000:
-		bclk = WM8753_BCLK_DIV_4;
-		pll_out = 12288000;
-		break;
-	case 96000:
-		bclk = WM8753_BCLK_DIV_2;
-		pll_out = 12288000;
-		break;
-	case 11025:
-		bclk = WM8753_BCLK_DIV_16;
-		pll_out = 11289600;
-		break;
-	case 22050:
-		bclk = WM8753_BCLK_DIV_8;
-		pll_out = 11289600;
-		break;
-	case 44100:
-		bclk = WM8753_BCLK_DIV_4;
-		pll_out = 11289600;
-		break;
-	case 88200:
-		bclk = WM8753_BCLK_DIV_2;
-		pll_out = 11289600;
-		break;
-	}
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai,
-		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
-		SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai,
-		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
-		SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock for DAC and ADC */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
-		SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set MCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
-		S3C2410_IISMOD_32FS);
-	if (ret < 0)
-		return ret;
-
-	/* set codec BCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(codec_dai,
-					WM8753_BCLKDIV, bclk);
-	if (ret < 0)
-		return ret;
-
-	/* set prescaler division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
-		S3C24XX_PRESCALE(4, 4));
-	if (ret < 0)
-		return ret;
-
-	/* codec PLL input is PCLK/4 */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0,
-		iis_clkrate / 4, pll_out);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int neo1973_gta02_hifi_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-
-	/* disable the PLL */
-	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0);
-}
-
-/*
- * Neo1973 WM8753 HiFi DAI opserations.
- */
-static struct snd_soc_ops neo1973_gta02_hifi_ops = {
-	.hw_params = neo1973_gta02_hifi_hw_params,
-	.hw_free = neo1973_gta02_hifi_hw_free,
-};
-
-static int neo1973_gta02_voice_hw_params(
-	struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pcmdiv = 0;
-	int ret = 0;
-	unsigned long iis_clkrate;
-
-	iis_clkrate = s3c24xx_i2s_get_clockrate();
-
-	if (params_rate(params) != 8000)
-		return -EINVAL;
-	if (params_channels(params) != 1)
-		return -EINVAL;
-
-	pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
-
-	/* todo: gg check mode (DSP_B) against CSR datasheet */
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
-		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock for DAC and ADC */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK,
-		12288000, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set codec PCM division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV,
-					pcmdiv);
-	if (ret < 0)
-		return ret;
-
-	/* configure and enable PLL for 12.288MHz output */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
-		iis_clkrate / 4, 12288000);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int neo1973_gta02_voice_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-
-	/* disable the PLL */
-	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
-}
-
-static struct snd_soc_ops neo1973_gta02_voice_ops = {
-	.hw_params = neo1973_gta02_voice_hw_params,
-	.hw_free = neo1973_gta02_voice_hw_free,
-};
-
-#define LM4853_AMP 1
-#define LM4853_SPK 2
-
-static u8 lm4853_state;
-
-/* This has no effect, it exists only to maintain compatibility with
- * existing ALSA state files.
- */
-static int lm4853_set_state(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	int val = ucontrol->value.integer.value[0];
-
-	if (val)
-		lm4853_state |= LM4853_AMP;
-	else
-		lm4853_state &= ~LM4853_AMP;
-
-	return 0;
-}
-
-static int lm4853_get_state(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = lm4853_state & LM4853_AMP;
-
-	return 0;
-}
-
-static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	int val = ucontrol->value.integer.value[0];
-
-	if (val) {
-		lm4853_state |= LM4853_SPK;
-		gpio_set_value(GTA02_GPIO_HP_IN, 0);
-	} else {
-		lm4853_state &= ~LM4853_SPK;
-		gpio_set_value(GTA02_GPIO_HP_IN, 1);
-	}
-
-	return 0;
-}
-
-static int lm4853_get_spk(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = (lm4853_state & LM4853_SPK) >> 1;
-
-	return 0;
-}
-
-static int lm4853_event(struct snd_soc_dapm_widget *w,
-			struct snd_kcontrol *k,
-			int event)
-{
-	gpio_set_value(GTA02_GPIO_AMP_SHUT, SND_SOC_DAPM_EVENT_OFF(event));
-
-	return 0;
-}
-
-static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
-	SND_SOC_DAPM_SPK("Stereo Out", lm4853_event),
-	SND_SOC_DAPM_LINE("GSM Line Out", NULL),
-	SND_SOC_DAPM_LINE("GSM Line In", NULL),
-	SND_SOC_DAPM_MIC("Headset Mic", NULL),
-	SND_SOC_DAPM_MIC("Handset Mic", NULL),
-	SND_SOC_DAPM_SPK("Handset Spk", NULL),
-};
-
-
-/* example machine audio_mapnections */
-static const struct snd_soc_dapm_route audio_map[] = {
-
-	/* Connections to the lm4853 amp */
-	{"Stereo Out", NULL, "LOUT1"},
-	{"Stereo Out", NULL, "ROUT1"},
-
-	/* Connections to the GSM Module */
-	{"GSM Line Out", NULL, "MONO1"},
-	{"GSM Line Out", NULL, "MONO2"},
-	{"RXP", NULL, "GSM Line In"},
-	{"RXN", NULL, "GSM Line In"},
-
-	/* Connections to Headset */
-	{"MIC1", NULL, "Mic Bias"},
-	{"Mic Bias", NULL, "Headset Mic"},
-
-	/* Call Mic */
-	{"MIC2", NULL, "Mic Bias"},
-	{"MIC2N", NULL, "Mic Bias"},
-	{"Mic Bias", NULL, "Handset Mic"},
-
-	/* Call Speaker */
-	{"Handset Spk", NULL, "LOUT2"},
-	{"Handset Spk", NULL, "ROUT2"},
-
-	/* Connect the ALC pins */
-	{"ACIN", NULL, "ACOP"},
-};
-
-static const struct snd_kcontrol_new wm8753_neo1973_gta02_controls[] = {
-	SOC_DAPM_PIN_SWITCH("Stereo Out"),
-	SOC_DAPM_PIN_SWITCH("GSM Line Out"),
-	SOC_DAPM_PIN_SWITCH("GSM Line In"),
-	SOC_DAPM_PIN_SWITCH("Headset Mic"),
-	SOC_DAPM_PIN_SWITCH("Handset Mic"),
-	SOC_DAPM_PIN_SWITCH("Handset Spk"),
-
-	/* This has no effect, it exists only to maintain compatibility with
-	 * existing ALSA state files.
-	 */
-	SOC_SINGLE_EXT("Amp State Switch", 6, 0, 1, 0,
-		lm4853_get_state,
-		lm4853_set_state),
-	SOC_SINGLE_EXT("Amp Spk Switch", 7, 0, 1, 0,
-		lm4853_get_spk,
-		lm4853_set_spk),
-};
-
-/*
- * This is an example machine initialisation for a wm8753 connected to a
- * neo1973 GTA02.
- */
-static int neo1973_gta02_wm8753_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int err;
-
-	/* set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT4");
-	snd_soc_dapm_nc_pin(codec, "LINE1");
-	snd_soc_dapm_nc_pin(codec, "LINE2");
-
-	/* Add neo1973 gta02 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
-				  ARRAY_SIZE(wm8753_dapm_widgets));
-
-	/* add neo1973 gta02 specific controls */
-	err = snd_soc_add_controls(codec, wm8753_neo1973_gta02_controls,
-		ARRAY_SIZE(wm8753_neo1973_gta02_controls));
-
-	if (err < 0)
-		return err;
-
-	/* set up neo1973 gta02 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-
-	/* set endpoints to default off mode */
-	snd_soc_dapm_disable_pin(codec, "Stereo Out");
-	snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-	snd_soc_dapm_disable_pin(codec, "GSM Line In");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic");
-	snd_soc_dapm_disable_pin(codec, "Handset Mic");
-	snd_soc_dapm_disable_pin(codec, "Handset Spk");
-
-	/* allow audio paths from the GSM modem to run during suspend */
-	snd_soc_dapm_ignore_suspend(codec, "Stereo Out");
-	snd_soc_dapm_ignore_suspend(codec, "GSM Line Out");
-	snd_soc_dapm_ignore_suspend(codec, "GSM Line In");
-	snd_soc_dapm_ignore_suspend(codec, "Headset Mic");
-	snd_soc_dapm_ignore_suspend(codec, "Handset Mic");
-	snd_soc_dapm_ignore_suspend(codec, "Handset Spk");
-
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-/*
- * BT Codec DAI
- */
-static struct snd_soc_dai_driver bt_dai = {
-	.name = "bluetooth-dai",
-	.playback = {
-		.channels_min = 1,
-		.channels_max = 1,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-	.capture = {
-		.channels_min = 1,
-		.channels_max = 1,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-};
-
-static struct snd_soc_dai_link neo1973_gta02_dai[] = {
-{ /* Hifi Playback - for similatious use with voice below */
-	.name = "WM8753",
-	.stream_name = "WM8753 HiFi",
-	.cpu_dai_name = "s3c24xx-i2s",
-	.codec_dai_name = "wm8753-hifi",
-	.init = neo1973_gta02_wm8753_init,
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8753-codec.0-0x1a",
-	.ops = &neo1973_gta02_hifi_ops,
-},
-{ /* Voice via BT */
-	.name = "Bluetooth",
-	.stream_name = "Voice",
-	.cpu_dai_name = "bluetooth-dai",
-	.codec_dai_name = "wm8753-voice",
-	.ops = &neo1973_gta02_voice_ops,
-	.codec_name = "wm8753-codec.0-0x1a",
-	.platform_name = "s3c24xx-pcm-audio",
-},
-};
-
-static struct snd_soc_card neo1973_gta02 = {
-	.name = "neo1973-gta02",
-	.dai_link = neo1973_gta02_dai,
-	.num_links = ARRAY_SIZE(neo1973_gta02_dai),
-};
-
-static struct platform_device *neo1973_gta02_snd_device;
-
-static int __init neo1973_gta02_init(void)
-{
-	int ret;
-
-	if (!machine_is_neo1973_gta02()) {
-		printk(KERN_INFO
-		       "Only GTA02 is supported by this ASoC driver\n");
-		return -ENODEV;
-	}
-
-	neo1973_gta02_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!neo1973_gta02_snd_device)
-		return -ENOMEM;
-
-	/* register bluetooth DAI here */
-	ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, -1, &bt_dai);
-	if (ret) {
-		platform_device_put(neo1973_gta02_snd_device);
-		return ret;
-	}
-
-	platform_set_drvdata(neo1973_gta02_snd_device, &neo1973_gta02);
-	ret = platform_device_add(neo1973_gta02_snd_device);
-
-	if (ret) {
-		platform_device_put(neo1973_gta02_snd_device);
-		return ret;
-	}
-
-	/* Initialise GPIOs used by amp */
-	ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN");
-	if (ret) {
-		pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN);
-		goto err_unregister_device;
-	}
-
-	ret = gpio_direction_output(GTA02_GPIO_HP_IN, 1);
-	if (ret) {
-		pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_HP_IN);
-		goto err_free_gpio_hp_in;
-	}
-
-	ret = gpio_request(GTA02_GPIO_AMP_SHUT, "GTA02_AMP_SHUT");
-	if (ret) {
-		pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_AMP_SHUT);
-		goto err_free_gpio_hp_in;
-	}
-
-	ret = gpio_direction_output(GTA02_GPIO_AMP_SHUT, 1);
-	if (ret) {
-		pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_AMP_SHUT);
-		goto err_free_gpio_amp_shut;
-	}
-
-	return 0;
-
-err_free_gpio_amp_shut:
-	gpio_free(GTA02_GPIO_AMP_SHUT);
-err_free_gpio_hp_in:
-	gpio_free(GTA02_GPIO_HP_IN);
-err_unregister_device:
-	platform_device_unregister(neo1973_gta02_snd_device);
-	return ret;
-}
-module_init(neo1973_gta02_init);
-
-static void __exit neo1973_gta02_exit(void)
-{
-	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev, -1);
-	platform_device_unregister(neo1973_gta02_snd_device);
-	gpio_free(GTA02_GPIO_HP_IN);
-	gpio_free(GTA02_GPIO_AMP_SHUT);
-}
-module_exit(neo1973_gta02_exit);
-
-/* Module information */
-MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org");
-MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973 GTA02");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/neo1973_wm8753.c b/sound/soc/s3c24xx/neo1973_wm8753.c
deleted file mode 100644
index f4f2ee7..0000000
--- a/sound/soc/s3c24xx/neo1973_wm8753.c
+++ /dev/null
@@ -1,704 +0,0 @@
-/*
- * neo1973_wm8753.c  --  SoC audio for Neo1973
- *
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/tlv.h>
-
-#include <asm/mach-types.h>
-#include <asm/hardware/scoop.h>
-#include <mach/regs-clock.h>
-#include <mach/regs-gpio.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-#include <mach/spi-gpio.h>
-
-#include <plat/regs-iis.h>
-
-#include "../codecs/wm8753.h"
-#include "lm4857.h"
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-
-/* define the scenarios */
-#define NEO_AUDIO_OFF			0
-#define NEO_GSM_CALL_AUDIO_HANDSET	1
-#define NEO_GSM_CALL_AUDIO_HEADSET	2
-#define NEO_GSM_CALL_AUDIO_BLUETOOTH	3
-#define NEO_STEREO_TO_SPEAKERS		4
-#define NEO_STEREO_TO_HEADPHONES	5
-#define NEO_CAPTURE_HANDSET		6
-#define NEO_CAPTURE_HEADSET		7
-#define NEO_CAPTURE_BLUETOOTH		8
-
-static struct snd_soc_card neo1973;
-static struct i2c_client *i2c;
-
-static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned int pll_out = 0, bclk = 0;
-	int ret = 0;
-	unsigned long iis_clkrate;
-
-	pr_debug("Entered %s\n", __func__);
-
-	iis_clkrate = s3c24xx_i2s_get_clockrate();
-
-	switch (params_rate(params)) {
-	case 8000:
-	case 16000:
-		pll_out = 12288000;
-		break;
-	case 48000:
-		bclk = WM8753_BCLK_DIV_4;
-		pll_out = 12288000;
-		break;
-	case 96000:
-		bclk = WM8753_BCLK_DIV_2;
-		pll_out = 12288000;
-		break;
-	case 11025:
-		bclk = WM8753_BCLK_DIV_16;
-		pll_out = 11289600;
-		break;
-	case 22050:
-		bclk = WM8753_BCLK_DIV_8;
-		pll_out = 11289600;
-		break;
-	case 44100:
-		bclk = WM8753_BCLK_DIV_4;
-		pll_out = 11289600;
-		break;
-	case 88200:
-		bclk = WM8753_BCLK_DIV_2;
-		pll_out = 11289600;
-		break;
-	}
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai,
-		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
-		SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai,
-		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
-		SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock for DAC and ADC */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
-		SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set MCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
-		S3C2410_IISMOD_32FS);
-	if (ret < 0)
-		return ret;
-
-	/* set codec BCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_BCLKDIV, bclk);
-	if (ret < 0)
-		return ret;
-
-	/* set prescaler division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
-		S3C24XX_PRESCALE(4, 4));
-	if (ret < 0)
-		return ret;
-
-	/* codec PLL input is PCLK/4 */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0,
-		iis_clkrate / 4, pll_out);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int neo1973_hifi_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-
-	pr_debug("Entered %s\n", __func__);
-
-	/* disable the PLL */
-	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0);
-}
-
-/*
- * Neo1973 WM8753 HiFi DAI opserations.
- */
-static struct snd_soc_ops neo1973_hifi_ops = {
-	.hw_params = neo1973_hifi_hw_params,
-	.hw_free = neo1973_hifi_hw_free,
-};
-
-static int neo1973_voice_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pcmdiv = 0;
-	int ret = 0;
-	unsigned long iis_clkrate;
-
-	pr_debug("Entered %s\n", __func__);
-
-	iis_clkrate = s3c24xx_i2s_get_clockrate();
-
-	if (params_rate(params) != 8000)
-		return -EINVAL;
-	if (params_channels(params) != 1)
-		return -EINVAL;
-
-	pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
-
-	/* todo: gg check mode (DSP_B) against CSR datasheet */
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
-		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock for DAC and ADC */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK, 12288000,
-		SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set codec PCM division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV, pcmdiv);
-	if (ret < 0)
-		return ret;
-
-	/* configure and enable PLL for 12.288MHz output */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
-		iis_clkrate / 4, 12288000);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int neo1973_voice_hw_free(struct snd_pcm_substream *substream)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-
-	pr_debug("Entered %s\n", __func__);
-
-	/* disable the PLL */
-	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
-}
-
-static struct snd_soc_ops neo1973_voice_ops = {
-	.hw_params = neo1973_voice_hw_params,
-	.hw_free = neo1973_voice_hw_free,
-};
-
-static int neo1973_scenario;
-
-static int neo1973_get_scenario(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = neo1973_scenario;
-	return 0;
-}
-
-static int set_scenario_endpoints(struct snd_soc_codec *codec, int scenario)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	switch (neo1973_scenario) {
-	case NEO_AUDIO_OFF:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		break;
-	case NEO_GSM_CALL_AUDIO_HANDSET:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
-		break;
-	case NEO_GSM_CALL_AUDIO_HEADSET:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		break;
-	case NEO_GSM_CALL_AUDIO_BLUETOOTH:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		break;
-	case NEO_STEREO_TO_SPEAKERS:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		break;
-	case NEO_STEREO_TO_HEADPHONES:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		break;
-	case NEO_CAPTURE_HANDSET:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
-		break;
-	case NEO_CAPTURE_HEADSET:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		break;
-	case NEO_CAPTURE_BLUETOOTH:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		break;
-	default:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-	}
-
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static int neo1973_set_scenario(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (neo1973_scenario == ucontrol->value.integer.value[0])
-		return 0;
-
-	neo1973_scenario = ucontrol->value.integer.value[0];
-	set_scenario_endpoints(codec, neo1973_scenario);
-	return 1;
-}
-
-static u8 lm4857_regs[4] = {0x00, 0x40, 0x80, 0xC0};
-
-static void lm4857_write_regs(void)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	if (i2c_master_send(i2c, lm4857_regs, 4) != 4)
-		printk(KERN_ERR "lm4857: i2c write failed\n");
-}
-
-static int lm4857_get_reg(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct soc_mixer_control *mc =
-		(struct soc_mixer_control *)kcontrol->private_value;
-	int reg = mc->reg;
-	int shift = mc->shift;
-	int mask = mc->max;
-
-	pr_debug("Entered %s\n", __func__);
-
-	ucontrol->value.integer.value[0] = (lm4857_regs[reg] >> shift) & mask;
-	return 0;
-}
-
-static int lm4857_set_reg(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	struct soc_mixer_control *mc =
-		(struct soc_mixer_control *)kcontrol->private_value;
-	int reg = mc->reg;
-	int shift = mc->shift;
-	int mask = mc->max;
-
-	if (((lm4857_regs[reg] >> shift) & mask) ==
-		ucontrol->value.integer.value[0])
-		return 0;
-
-	lm4857_regs[reg] &= ~(mask << shift);
-	lm4857_regs[reg] |= ucontrol->value.integer.value[0] << shift;
-	lm4857_write_regs();
-	return 1;
-}
-
-static int lm4857_get_mode(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	u8 value = lm4857_regs[LM4857_CTRL] & 0x0F;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (value)
-		value -= 5;
-
-	ucontrol->value.integer.value[0] = value;
-	return 0;
-}
-
-static int lm4857_set_mode(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
-{
-	u8 value = ucontrol->value.integer.value[0];
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (value)
-		value += 5;
-
-	if ((lm4857_regs[LM4857_CTRL] & 0x0F) == value)
-		return 0;
-
-	lm4857_regs[LM4857_CTRL] &= 0xF0;
-	lm4857_regs[LM4857_CTRL] |= value;
-	lm4857_write_regs();
-	return 1;
-}
-
-static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
-	SND_SOC_DAPM_LINE("Audio Out", NULL),
-	SND_SOC_DAPM_LINE("GSM Line Out", NULL),
-	SND_SOC_DAPM_LINE("GSM Line In", NULL),
-	SND_SOC_DAPM_MIC("Headset Mic", NULL),
-	SND_SOC_DAPM_MIC("Call Mic", NULL),
-};
-
-
-static const struct snd_soc_dapm_route dapm_routes[] = {
-
-	/* Connections to the lm4857 amp */
-	{"Audio Out", NULL, "LOUT1"},
-	{"Audio Out", NULL, "ROUT1"},
-
-	/* Connections to the GSM Module */
-	{"GSM Line Out", NULL, "MONO1"},
-	{"GSM Line Out", NULL, "MONO2"},
-	{"RXP", NULL, "GSM Line In"},
-	{"RXN", NULL, "GSM Line In"},
-
-	/* Connections to Headset */
-	{"MIC1", NULL, "Mic Bias"},
-	{"Mic Bias", NULL, "Headset Mic"},
-
-	/* Call Mic */
-	{"MIC2", NULL, "Mic Bias"},
-	{"MIC2N", NULL, "Mic Bias"},
-	{"Mic Bias", NULL, "Call Mic"},
-
-	/* Connect the ALC pins */
-	{"ACIN", NULL, "ACOP"},
-};
-
-static const char *lm4857_mode[] = {
-	"Off",
-	"Call Speaker",
-	"Stereo Speakers",
-	"Stereo Speakers + Headphones",
-	"Headphones"
-};
-
-static const struct soc_enum lm4857_mode_enum[] = {
-	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lm4857_mode), lm4857_mode),
-};
-
-static const char *neo_scenarios[] = {
-	"Off",
-	"GSM Handset",
-	"GSM Headset",
-	"GSM Bluetooth",
-	"Speakers",
-	"Headphones",
-	"Capture Handset",
-	"Capture Headset",
-	"Capture Bluetooth"
-};
-
-static const struct soc_enum neo_scenario_enum[] = {
-	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(neo_scenarios), neo_scenarios),
-};
-
-static const DECLARE_TLV_DB_SCALE(stereo_tlv, -4050, 150, 0);
-static const DECLARE_TLV_DB_SCALE(mono_tlv, -3450, 150, 0);
-
-static const struct snd_kcontrol_new wm8753_neo1973_controls[] = {
-	SOC_SINGLE_EXT_TLV("Amp Left Playback Volume", LM4857_LVOL, 0, 31, 0,
-		lm4857_get_reg, lm4857_set_reg, stereo_tlv),
-	SOC_SINGLE_EXT_TLV("Amp Right Playback Volume", LM4857_RVOL, 0, 31, 0,
-		lm4857_get_reg, lm4857_set_reg, stereo_tlv),
-	SOC_SINGLE_EXT_TLV("Amp Mono Playback Volume", LM4857_MVOL, 0, 31, 0,
-		lm4857_get_reg, lm4857_set_reg, mono_tlv),
-	SOC_ENUM_EXT("Amp Mode", lm4857_mode_enum[0],
-		lm4857_get_mode, lm4857_set_mode),
-	SOC_ENUM_EXT("Neo Mode", neo_scenario_enum[0],
-		neo1973_get_scenario, neo1973_set_scenario),
-	SOC_SINGLE_EXT("Amp Spk 3D Playback Switch", LM4857_LVOL, 5, 1, 0,
-		lm4857_get_reg, lm4857_set_reg),
-	SOC_SINGLE_EXT("Amp HP 3d Playback Switch", LM4857_RVOL, 5, 1, 0,
-		lm4857_get_reg, lm4857_set_reg),
-	SOC_SINGLE_EXT("Amp Fast Wakeup Playback Switch", LM4857_CTRL, 5, 1, 0,
-		lm4857_get_reg, lm4857_set_reg),
-	SOC_SINGLE_EXT("Amp Earpiece 6dB Playback Switch", LM4857_CTRL, 4, 1, 0,
-		lm4857_get_reg, lm4857_set_reg),
-};
-
-/*
- * This is an example machine initialisation for a wm8753 connected to a
- * neo1973 II. It is missing logic to detect hp/mic insertions and logic
- * to re-route the audio in such an event.
- */
-static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int err;
-
-	pr_debug("Entered %s\n", __func__);
-
-	/* set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "LOUT2");
-	snd_soc_dapm_nc_pin(codec, "ROUT2");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT4");
-	snd_soc_dapm_nc_pin(codec, "LINE1");
-	snd_soc_dapm_nc_pin(codec, "LINE2");
-
-	/* Add neo1973 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
-				  ARRAY_SIZE(wm8753_dapm_widgets));
-
-	/* set endpoints to default mode */
-	set_scenario_endpoints(codec, NEO_AUDIO_OFF);
-
-	/* add neo1973 specific controls */
-	err = snd_soc_add_controls(codec, wm8753_neo1973_controls,
-				ARRAY_SIZE(8753_neo1973_controls));
-	if (err < 0)
-		return err;
-
-	/* set up neo1973 specific audio routes */
-	err = snd_soc_dapm_add_routes(codec, dapm_routes,
-				      ARRAY_SIZE(dapm_routes));
-
-	snd_soc_dapm_sync(codec);
-	return 0;
-}
-
-/*
- * BT Codec DAI
- */
-static struct snd_soc_dai bt_dai = {
-	.name = "bluetooth-dai",
-	.playback = {
-		.channels_min = 1,
-		.channels_max = 1,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-	.capture = {
-		.channels_min = 1,
-		.channels_max = 1,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-};
-
-static struct snd_soc_dai_link neo1973_dai[] = {
-{ /* Hifi Playback - for similatious use with voice below */
-	.name = "WM8753",
-	.stream_name = "WM8753 HiFi",
-	.platform_name = "s3c24xx-pcm-audio",
-	.cpu_dai_name = "s3c24xx-i2s",
-	.codec_dai_name = "wm8753-hifi",
-	.codec_name = "wm8753-codec.0-0x1a",
-	.init = neo1973_wm8753_init,
-	.ops = &neo1973_hifi_ops,
-},
-{ /* Voice via BT */
-	.name = "Bluetooth",
-	.stream_name = "Voice",
-	.platform_name = "s3c24xx-pcm-audio",
-	.cpu_dai_name = "bluetooth-dai",
-	.codec_dai_name = "wm8753-voice",
-	.codec_name = "wm8753-codec.0-0x1a",
-	.ops = &neo1973_voice_ops,
-},
-};
-
-static struct snd_soc_card neo1973 = {
-	.name = "neo1973",
-	.dai_link = neo1973_dai,
-	.num_links = ARRAY_SIZE(neo1973_dai),
-};
-
-static int lm4857_i2c_probe(struct i2c_client *client,
-			    const struct i2c_device_id *id)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	i2c = client;
-
-	lm4857_write_regs();
-	return 0;
-}
-
-static int lm4857_i2c_remove(struct i2c_client *client)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	i2c = NULL;
-
-	return 0;
-}
-
-static u8 lm4857_state;
-
-static int lm4857_suspend(struct i2c_client *dev, pm_message_t state)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	dev_dbg(&dev->dev, "lm4857_suspend\n");
-	lm4857_state = lm4857_regs[LM4857_CTRL] & 0xf;
-	if (lm4857_state) {
-		lm4857_regs[LM4857_CTRL] &= 0xf0;
-		lm4857_write_regs();
-	}
-	return 0;
-}
-
-static int lm4857_resume(struct i2c_client *dev)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	if (lm4857_state) {
-		lm4857_regs[LM4857_CTRL] |= (lm4857_state & 0x0f);
-		lm4857_write_regs();
-	}
-	return 0;
-}
-
-static void lm4857_shutdown(struct i2c_client *dev)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	dev_dbg(&dev->dev, "lm4857_shutdown\n");
-	lm4857_regs[LM4857_CTRL] &= 0xf0;
-	lm4857_write_regs();
-}
-
-static const struct i2c_device_id lm4857_i2c_id[] = {
-	{ "neo1973_lm4857", 0 },
-	{ }
-};
-
-static struct i2c_driver lm4857_i2c_driver = {
-	.driver = {
-		.name = "LM4857 I2C Amp",
-		.owner = THIS_MODULE,
-	},
-	.suspend =        lm4857_suspend,
-	.resume	=         lm4857_resume,
-	.shutdown =       lm4857_shutdown,
-	.probe =          lm4857_i2c_probe,
-	.remove =         lm4857_i2c_remove,
-	.id_table =       lm4857_i2c_id,
-};
-
-static struct platform_device *neo1973_snd_device;
-
-static int __init neo1973_init(void)
-{
-	int ret;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (!machine_is_neo1973_gta01()) {
-		printk(KERN_INFO
-			"Only GTA01 hardware supported by ASoC driver\n");
-		return -ENODEV;
-	}
-
-	neo1973_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!neo1973_snd_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(neo1973_snd_device, &neo1973);
-	ret = platform_device_add(neo1973_snd_device);
-
-	if (ret) {
-		platform_device_put(neo1973_snd_device);
-		return ret;
-	}
-
-	ret = i2c_add_driver(&lm4857_i2c_driver);
-
-	if (ret != 0)
-		platform_device_unregister(neo1973_snd_device);
-
-	return ret;
-}
-
-static void __exit neo1973_exit(void)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	i2c_del_driver(&lm4857_i2c_driver);
-	platform_device_unregister(neo1973_snd_device);
-}
-
-module_init(neo1973_init);
-module_exit(neo1973_exit);
-
-/* Module information */
-MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org, www.openmoko.org");
-MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/rx1950_uda1380.c b/sound/soc/s3c24xx/rx1950_uda1380.c
deleted file mode 100644
index 468cc11..0000000
--- a/sound/soc/s3c24xx/rx1950_uda1380.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * rx1950.c  --  ALSA Soc Audio Layer
- *
- * Copyright (c) 2010 Vasily Khoruzhick <anarsoul@gmail.com>
- *
- * Based on smdk2440.c and magician.c
- *
- * Authors: Graeme Gregory graeme.gregory@wolfsonmicro.com
- *          Philipp Zabel <philipp.zabel@gmail.com>
- *          Denis Grigoriev <dgreenday@gmail.com>
- *          Vasily Khoruzhick <anarsoul@gmail.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/clk.h>
-
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/uda1380.h>
-#include <sound/jack.h>
-
-#include <plat/regs-iis.h>
-
-#include <mach/regs-clock.h>
-
-#include <asm/mach-types.h>
-
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-#include "../codecs/uda1380.h"
-
-static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd);
-static int rx1950_startup(struct snd_pcm_substream *substream);
-static int rx1950_hw_params(struct snd_pcm_substream *substream,
-				struct snd_pcm_hw_params *params);
-static int rx1950_spk_power(struct snd_soc_dapm_widget *w,
-				struct snd_kcontrol *kcontrol, int event);
-
-static unsigned int rates[] = {
-	16000,
-	44100,
-	48000,
-};
-
-static struct snd_pcm_hw_constraint_list hw_rates = {
-	.count = ARRAY_SIZE(rates),
-	.list = rates,
-	.mask = 0,
-};
-
-static struct snd_soc_jack hp_jack;
-
-static struct snd_soc_jack_pin hp_jack_pins[] = {
-	{
-		.pin	= "Headphone Jack",
-		.mask	= SND_JACK_HEADPHONE,
-	},
-	{
-		.pin	= "Speaker",
-		.mask	= SND_JACK_HEADPHONE,
-		.invert	= 1,
-	},
-};
-
-static struct snd_soc_jack_gpio hp_jack_gpios[] = {
-	[0] = {
-		.gpio			= S3C2410_GPG(12),
-		.name			= "hp-gpio",
-		.report			= SND_JACK_HEADPHONE,
-		.invert			= 1,
-		.debounce_time		= 200,
-	},
-};
-
-static struct snd_soc_ops rx1950_ops = {
-	.startup	= rx1950_startup,
-	.hw_params	= rx1950_hw_params,
-};
-
-/* s3c24xx digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link rx1950_uda1380_dai[] = {
-	{
-		.name		= "uda1380",
-		.stream_name	= "UDA1380 Duplex",
-		.cpu_dai_name	= "s3c24xx-iis",
-		.codec_dai_name	= "uda1380-hifi",
-		.init		= rx1950_uda1380_init,
-		.platform_name	= "s3c24xx-pcm-audio",
-		.codec_name	= "uda1380-codec.0-001a",
-		.ops		= &rx1950_ops,
-	},
-};
-
-static struct snd_soc_card rx1950_asoc = {
-	.name = "rx1950",
-	.dai_link = rx1950_uda1380_dai,
-	.num_links = ARRAY_SIZE(rx1950_uda1380_dai),
-};
-
-/* rx1950 machine dapm widgets */
-static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
-	SND_SOC_DAPM_HP("Headphone Jack", NULL),
-	SND_SOC_DAPM_MIC("Mic Jack", NULL),
-	SND_SOC_DAPM_SPK("Speaker", rx1950_spk_power),
-};
-
-/* rx1950 machine audio_map */
-static const struct snd_soc_dapm_route audio_map[] = {
-	/* headphone connected to VOUTLHP, VOUTRHP */
-	{"Headphone Jack", NULL, "VOUTLHP"},
-	{"Headphone Jack", NULL, "VOUTRHP"},
-
-	/* ext speaker connected to VOUTL, VOUTR  */
-	{"Speaker", NULL, "VOUTL"},
-	{"Speaker", NULL, "VOUTR"},
-
-	/* mic is connected to VINM */
-	{"VINM", NULL, "Mic Jack"},
-};
-
-static struct platform_device *s3c24xx_snd_device;
-
-static int rx1950_startup(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-
-	runtime->hw.rate_min = hw_rates.list[0];
-	runtime->hw.rate_max = hw_rates.list[hw_rates.count - 1];
-	runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
-
-	return snd_pcm_hw_constraint_list(runtime, 0,
-					SNDRV_PCM_HW_PARAM_RATE,
-					&hw_rates);
-}
-
-static int rx1950_spk_power(struct snd_soc_dapm_widget *w,
-				struct snd_kcontrol *kcontrol, int event)
-{
-	if (SND_SOC_DAPM_EVENT_ON(event))
-		gpio_set_value(S3C2410_GPA(1), 1);
-	else
-		gpio_set_value(S3C2410_GPA(1), 0);
-
-	return 0;
-}
-
-static int rx1950_hw_params(struct snd_pcm_substream *substream,
-				struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	int div;
-	int ret;
-	unsigned int rate = params_rate(params);
-	int clk_source, fs_mode;
-
-	switch (rate) {
-	case 16000:
-	case 48000:
-		clk_source = S3C24XX_CLKSRC_PCLK;
-		fs_mode = S3C2410_IISMOD_256FS;
-		div = s3c24xx_i2s_get_clockrate() / (256 * rate);
-		if (s3c24xx_i2s_get_clockrate() % (256 * rate) > (128 * rate))
-			div++;
-		break;
-	case 44100:
-	case 88200:
-		clk_source = S3C24XX_CLKSRC_MPLL;
-		fs_mode = S3C2410_IISMOD_384FS;
-		div = 1;
-		break;
-	default:
-		printk(KERN_ERR "%s: rate %d is not supported\n",
-			__func__, rate);
-		return -EINVAL;
-	}
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* set cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* select clock source */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source, rate,
-			SND_SOC_CLOCK_OUT);
-	if (ret < 0)
-		return ret;
-
-	/* set MCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
-		fs_mode);
-	if (ret < 0)
-		return ret;
-
-	/* set BCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
-		S3C2410_IISMOD_32FS);
-	if (ret < 0)
-		return ret;
-
-	/* set prescaler division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
-		S3C24XX_PRESCALE(div, div));
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int err;
-
-	/* Add rx1950 specific widgets */
-	err = snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
-				  ARRAY_SIZE(uda1380_dapm_widgets));
-
-	if (err)
-		return err;
-
-	/* Set up rx1950 specific audio path audio_mapnects */
-	err = snd_soc_dapm_add_routes(codec, audio_map,
-				      ARRAY_SIZE(audio_map));
-
-	if (err)
-		return err;
-
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Speaker");
-
-	snd_soc_dapm_sync(codec);
-
-	snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
-		&hp_jack);
-
-	snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
-		hp_jack_pins);
-
-	snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
-		hp_jack_gpios);
-
-	return 0;
-}
-
-static int __init rx1950_init(void)
-{
-	int ret;
-
-	if (!machine_is_rx1950())
-		return -ENODEV;
-
-	/* configure some gpios */
-	ret = gpio_request(S3C2410_GPA(1), "speaker-power");
-	if (ret)
-		goto err_gpio;
-
-	ret = gpio_direction_output(S3C2410_GPA(1), 0);
-	if (ret)
-		goto err_gpio_conf;
-
-	s3c24xx_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!s3c24xx_snd_device) {
-		ret = -ENOMEM;
-		goto err_plat_alloc;
-	}
-
-	platform_set_drvdata(s3c24xx_snd_device, &rx1950_asoc);
-	ret = platform_device_add(s3c24xx_snd_device);
-
-	if (ret) {
-		platform_device_put(s3c24xx_snd_device);
-		goto err_plat_add;
-	}
-
-	return 0;
-
-err_plat_add:
-err_plat_alloc:
-err_gpio_conf:
-	gpio_free(S3C2410_GPA(1));
-
-err_gpio:
-	return ret;
-}
-
-static void __exit rx1950_exit(void)
-{
-	platform_device_unregister(s3c24xx_snd_device);
-	snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
-		hp_jack_gpios);
-	gpio_free(S3C2410_GPA(1));
-}
-
-module_init(rx1950_init);
-module_exit(rx1950_exit);
-
-/* Module information */
-MODULE_AUTHOR("Vasily Khoruzhick");
-MODULE_DESCRIPTION("ALSA SoC RX1950");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c-ac97.c b/sound/soc/s3c24xx/s3c-ac97.c
deleted file mode 100644
index f891eb7..0000000
--- a/sound/soc/s3c24xx/s3c-ac97.c
+++ /dev/null
@@ -1,520 +0,0 @@
-/* sound/soc/s3c24xx/s3c-ac97.c
- *
- * ALSA SoC Audio Layer - S3C AC97 Controller driver
- * 	Evolved from s3c2443-ac97.c
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
- * 	Credits: Graeme Gregory, Sean Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-
-#include <sound/soc.h>
-
-#include <plat/regs-ac97.h>
-#include <mach/dma.h>
-#include <plat/audio.h>
-
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
-
-#define AC_CMD_ADDR(x) (x << 16)
-#define AC_CMD_DATA(x) (x & 0xffff)
-
-struct s3c_ac97_info {
-	struct clk         *ac97_clk;
-	void __iomem	   *regs;
-	struct mutex       lock;
-	struct completion  done;
-};
-static struct s3c_ac97_info s3c_ac97;
-
-static struct s3c2410_dma_client s3c_dma_client_out = {
-	.name = "AC97 PCMOut"
-};
-
-static struct s3c2410_dma_client s3c_dma_client_in = {
-	.name = "AC97 PCMIn"
-};
-
-static struct s3c2410_dma_client s3c_dma_client_micin = {
-	.name = "AC97 MicIn"
-};
-
-static struct s3c_dma_params s3c_ac97_pcm_out = {
-	.client		= &s3c_dma_client_out,
-	.dma_size	= 4,
-};
-
-static struct s3c_dma_params s3c_ac97_pcm_in = {
-	.client		= &s3c_dma_client_in,
-	.dma_size	= 4,
-};
-
-static struct s3c_dma_params s3c_ac97_mic_in = {
-	.client		= &s3c_dma_client_micin,
-	.dma_size	= 4,
-};
-
-static void s3c_ac97_activate(struct snd_ac97 *ac97)
-{
-	u32 ac_glbctrl, stat;
-
-	stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
-	if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
-		return; /* Return if already active */
-
-	INIT_COMPLETION(s3c_ac97.done);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	ac_glbctrl |= S3C_AC97_GLBCTRL_TRANSFERDATAENABLE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
-		pr_err("AC97: Unable to activate!");
-}
-
-static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
-	unsigned short reg)
-{
-	u32 ac_glbctrl, ac_codec_cmd;
-	u32 stat, addr, data;
-
-	mutex_lock(&s3c_ac97.lock);
-
-	s3c_ac97_activate(ac97);
-
-	INIT_COMPLETION(s3c_ac97.done);
-
-	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-	ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg);
-	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
-	udelay(50);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
-		pr_err("AC97: Unable to read!");
-
-	stat = readl(s3c_ac97.regs + S3C_AC97_STAT);
-	addr = (stat >> 16) & 0x7f;
-	data = (stat & 0xffff);
-
-	if (addr != reg)
-		pr_err("s3c-ac97: req addr = %02x, rep addr = %02x\n",
-			reg, addr);
-
-	mutex_unlock(&s3c_ac97.lock);
-
-	return (unsigned short)data;
-}
-
-static void s3c_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
-	unsigned short val)
-{
-	u32 ac_glbctrl, ac_codec_cmd;
-
-	mutex_lock(&s3c_ac97.lock);
-
-	s3c_ac97_activate(ac97);
-
-	INIT_COMPLETION(s3c_ac97.done);
-
-	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-	ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val);
-	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
-	udelay(50);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
-		pr_err("AC97: Unable to write!");
-
-	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-	ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
-	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
-	mutex_unlock(&s3c_ac97.lock);
-}
-
-static void s3c_ac97_cold_reset(struct snd_ac97 *ac97)
-{
-	pr_debug("AC97: Cold reset\n");
-	writel(S3C_AC97_GLBCTRL_COLDRESET,
-			s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-}
-
-static void s3c_ac97_warm_reset(struct snd_ac97 *ac97)
-{
-	u32 stat;
-
-	stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
-	if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
-		return; /* Return if already active */
-
-	pr_debug("AC97: Warm reset\n");
-
-	writel(S3C_AC97_GLBCTRL_WARMRESET, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	s3c_ac97_activate(ac97);
-}
-
-static irqreturn_t s3c_ac97_irq(int irq, void *dev_id)
-{
-	u32 ac_glbctrl, ac_glbstat;
-
-	ac_glbstat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT);
-
-	if (ac_glbstat & S3C_AC97_GLBSTAT_CODECREADY) {
-
-		ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-		ac_glbctrl &= ~S3C_AC97_GLBCTRL_CODECREADYIE;
-		writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-		complete(&s3c_ac97.done);
-	}
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= (1<<30); /* Clear interrupt */
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	return IRQ_HANDLED;
-}
-
-struct snd_ac97_bus_ops soc_ac97_ops = {
-	.read       = s3c_ac97_read,
-	.write      = s3c_ac97_write,
-	.warm_reset = s3c_ac97_warm_reset,
-	.reset      = s3c_ac97_cold_reset,
-};
-EXPORT_SYMBOL_GPL(soc_ac97_ops);
-
-static int s3c_ac97_hw_params(struct snd_pcm_substream *substream,
-				  struct snd_pcm_hw_params *params,
-				  struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct s3c_dma_params *dma_data;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = &s3c_ac97_pcm_out;
-	else
-		dma_data = &s3c_ac97_pcm_in;
-
-	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
-
-	return 0;
-}
-
-static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
-				struct snd_soc_dai *dai)
-{
-	u32 ac_glbctrl;
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct s3c_dma_params *dma_data =
-		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMINTM_MASK;
-	else
-		ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMOUTTM_MASK;
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-			ac_glbctrl |= S3C_AC97_GLBCTRL_PCMINTM_DMA;
-		else
-			ac_glbctrl |= S3C_AC97_GLBCTRL_PCMOUTTM_DMA;
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		break;
-	}
-
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
-
-	return 0;
-}
-
-static int s3c_ac97_hw_mic_params(struct snd_pcm_substream *substream,
-				      struct snd_pcm_hw_params *params,
-				      struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		return -ENODEV;
-	else
-		snd_soc_dai_set_dma_data(cpu_dai, substream, &s3c_ac97_mic_in);
-
-	return 0;
-}
-
-static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream,
-				    int cmd, struct snd_soc_dai *dai)
-{
-	u32 ac_glbctrl;
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct s3c_dma_params *dma_data =
-		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK;
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		ac_glbctrl |= S3C_AC97_GLBCTRL_MICINTM_DMA;
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		break;
-	}
-
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
-
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c_ac97_dai_ops = {
-	.hw_params	= s3c_ac97_hw_params,
-	.trigger	= s3c_ac97_trigger,
-};
-
-static struct snd_soc_dai_ops s3c_ac97_mic_dai_ops = {
-	.hw_params	= s3c_ac97_hw_mic_params,
-	.trigger	= s3c_ac97_mic_trigger,
-};
-
-static struct snd_soc_dai_driver s3c_ac97_dai[] = {
-	[S3C_AC97_DAI_PCM] = {
-		.name =	"s3c-ac97",
-		.ac97_control = 1,
-		.playback = {
-			.stream_name = "AC97 Playback",
-			.channels_min = 2,
-			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-		.capture = {
-			.stream_name = "AC97 Capture",
-			.channels_min = 2,
-			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-		.ops = &s3c_ac97_dai_ops,
-	},
-	[S3C_AC97_DAI_MIC] = {
-		.name = "s3c-ac97-mic",
-		.ac97_control = 1,
-		.capture = {
-			.stream_name = "AC97 Mic Capture",
-			.channels_min = 1,
-			.channels_max = 1,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-		.ops = &s3c_ac97_mic_dai_ops,
-	},
-};
-
-static __devinit int s3c_ac97_probe(struct platform_device *pdev)
-{
-	struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
-	struct s3c_audio_pdata *ac97_pdata;
-	int ret;
-
-	ac97_pdata = pdev->dev.platform_data;
-	if (!ac97_pdata || !ac97_pdata->cfg_gpio) {
-		dev_err(&pdev->dev, "cfg_gpio callback not provided!\n");
-		return -EINVAL;
-	}
-
-	/* Check for availability of necessary resource */
-	dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!dmatx_res) {
-		dev_err(&pdev->dev, "Unable to get AC97-TX dma resource\n");
-		return -ENXIO;
-	}
-
-	dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!dmarx_res) {
-		dev_err(&pdev->dev, "Unable to get AC97-RX dma resource\n");
-		return -ENXIO;
-	}
-
-	dmamic_res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
-	if (!dmamic_res) {
-		dev_err(&pdev->dev, "Unable to get AC97-MIC dma resource\n");
-		return -ENXIO;
-	}
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem_res) {
-		dev_err(&pdev->dev, "Unable to get register resource\n");
-		return -ENXIO;
-	}
-
-	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!irq_res) {
-		dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
-		return -ENXIO;
-	}
-
-	if (!request_mem_region(mem_res->start,
-				resource_size(mem_res), "s3c-ac97")) {
-		dev_err(&pdev->dev, "Unable to request register region\n");
-		return -EBUSY;
-	}
-
-	s3c_ac97_pcm_out.channel = dmatx_res->start;
-	s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
-	s3c_ac97_pcm_in.channel = dmarx_res->start;
-	s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
-	s3c_ac97_mic_in.channel = dmamic_res->start;
-	s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
-
-	init_completion(&s3c_ac97.done);
-	mutex_init(&s3c_ac97.lock);
-
-	s3c_ac97.regs = ioremap(mem_res->start, resource_size(mem_res));
-	if (s3c_ac97.regs == NULL) {
-		dev_err(&pdev->dev, "Unable to ioremap register region\n");
-		ret = -ENXIO;
-		goto err1;
-	}
-
-	s3c_ac97.ac97_clk = clk_get(&pdev->dev, "ac97");
-	if (IS_ERR(s3c_ac97.ac97_clk)) {
-		dev_err(&pdev->dev, "s3c-ac97 failed to get ac97_clock\n");
-		ret = -ENODEV;
-		goto err2;
-	}
-	clk_enable(s3c_ac97.ac97_clk);
-
-	if (ac97_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		ret = -EINVAL;
-		goto err3;
-	}
-
-	ret = request_irq(irq_res->start, s3c_ac97_irq,
-					IRQF_DISABLED, "AC97", NULL);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "s3c-ac97: interrupt request failed.\n");
-		goto err4;
-	}
-
-	ret = snd_soc_register_dais(&pdev->dev, s3c_ac97_dai,
-			ARRAY_SIZE(s3c_ac97_dai));
-	if (ret)
-		goto err5;
-
-	return 0;
-
-err5:
-	free_irq(irq_res->start, NULL);
-err4:
-err3:
-	clk_disable(s3c_ac97.ac97_clk);
-	clk_put(s3c_ac97.ac97_clk);
-err2:
-	iounmap(s3c_ac97.regs);
-err1:
-	release_mem_region(mem_res->start, resource_size(mem_res));
-
-	return ret;
-}
-
-static __devexit int s3c_ac97_remove(struct platform_device *pdev)
-{
-	struct resource *mem_res, *irq_res;
-
-	snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c_ac97_dai));
-
-	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (irq_res)
-		free_irq(irq_res->start, NULL);
-
-	clk_disable(s3c_ac97.ac97_clk);
-	clk_put(s3c_ac97.ac97_clk);
-
-	iounmap(s3c_ac97.regs);
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (mem_res)
-		release_mem_region(mem_res->start, resource_size(mem_res));
-
-	return 0;
-}
-
-static struct platform_driver s3c_ac97_driver = {
-	.probe  = s3c_ac97_probe,
-	.remove = s3c_ac97_remove,
-	.driver = {
-		.name = "s3c-ac97",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c_ac97_init(void)
-{
-	return platform_driver_register(&s3c_ac97_driver);
-}
-module_init(s3c_ac97_init);
-
-static void __exit s3c_ac97_exit(void)
-{
-	platform_driver_unregister(&s3c_ac97_driver);
-}
-module_exit(s3c_ac97_exit);
-
-MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
-MODULE_DESCRIPTION("AC97 driver for the Samsung SoC");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-ac97");
diff --git a/sound/soc/s3c24xx/s3c-ac97.h b/sound/soc/s3c24xx/s3c-ac97.h
deleted file mode 100644
index 5dcedd0..0000000
--- a/sound/soc/s3c24xx/s3c-ac97.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* sound/soc/s3c24xx/s3c-ac97.h
- *
- * ALSA SoC Audio Layer - S3C AC97 Controller driver
- * 	Evolved from s3c2443-ac97.h
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
- * 	Credits: Graeme Gregory, Sean Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __S3C_AC97_H_
-#define __S3C_AC97_H_
-
-#define S3C_AC97_DAI_PCM 0
-#define S3C_AC97_DAI_MIC 1
-
-#endif /* __S3C_AC97_H_ */
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/s3c24xx/s3c-dma.c
deleted file mode 100644
index 243f79b..0000000
--- a/sound/soc/s3c24xx/s3c-dma.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * s3c-dma.c  --  ALSA Soc Audio Layer
- *
- * (c) 2006 Wolfson Microelectronics PLC.
- * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- * Copyright 2004-2005 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <asm/dma.h>
-#include <mach/hardware.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-
-static const struct snd_pcm_hardware s3c_dma_hardware = {
-	.info			= SNDRV_PCM_INFO_INTERLEAVED |
-				    SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				    SNDRV_PCM_INFO_MMAP |
-				    SNDRV_PCM_INFO_MMAP_VALID |
-				    SNDRV_PCM_INFO_PAUSE |
-				    SNDRV_PCM_INFO_RESUME,
-	.formats		= SNDRV_PCM_FMTBIT_S16_LE |
-				    SNDRV_PCM_FMTBIT_U16_LE |
-				    SNDRV_PCM_FMTBIT_U8 |
-				    SNDRV_PCM_FMTBIT_S8,
-	.channels_min		= 2,
-	.channels_max		= 2,
-	.buffer_bytes_max	= 128*1024,
-	.period_bytes_min	= PAGE_SIZE,
-	.period_bytes_max	= PAGE_SIZE*2,
-	.periods_min		= 2,
-	.periods_max		= 128,
-	.fifo_size		= 32,
-};
-
-struct s3c24xx_runtime_data {
-	spinlock_t lock;
-	int state;
-	unsigned int dma_loaded;
-	unsigned int dma_limit;
-	unsigned int dma_period;
-	dma_addr_t dma_start;
-	dma_addr_t dma_pos;
-	dma_addr_t dma_end;
-	struct s3c_dma_params *params;
-};
-
-/* s3c_dma_enqueue
- *
- * place a dma buffer onto the queue for the dma system
- * to handle.
-*/
-static void s3c_dma_enqueue(struct snd_pcm_substream *substream)
-{
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
-	dma_addr_t pos = prtd->dma_pos;
-	unsigned int limit;
-	int ret;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (s3c_dma_has_circular())
-		limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
-	else
-		limit = prtd->dma_limit;
-
-	pr_debug("%s: loaded %d, limit %d\n",
-				__func__, prtd->dma_loaded, limit);
-
-	while (prtd->dma_loaded < limit) {
-		unsigned long len = prtd->dma_period;
-
-		pr_debug("dma_loaded: %d\n", prtd->dma_loaded);
-
-		if ((pos + len) > prtd->dma_end) {
-			len  = prtd->dma_end - pos;
-			pr_debug("%s: corrected dma len %ld\n", __func__, len);
-		}
-
-		ret = s3c2410_dma_enqueue(prtd->params->channel,
-			substream, pos, len);
-
-		if (ret == 0) {
-			prtd->dma_loaded++;
-			pos += prtd->dma_period;
-			if (pos >= prtd->dma_end)
-				pos = prtd->dma_start;
-		} else
-			break;
-	}
-
-	prtd->dma_pos = pos;
-}
-
-static void s3c24xx_audio_buffdone(struct s3c2410_dma_chan *channel,
-				void *dev_id, int size,
-				enum s3c2410_dma_buffresult result)
-{
-	struct snd_pcm_substream *substream = dev_id;
-	struct s3c24xx_runtime_data *prtd;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR)
-		return;
-
-	prtd = substream->runtime->private_data;
-
-	if (substream)
-		snd_pcm_period_elapsed(substream);
-
-	spin_lock(&prtd->lock);
-	if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) {
-		prtd->dma_loaded--;
-		s3c_dma_enqueue(substream);
-	}
-
-	spin_unlock(&prtd->lock);
-}
-
-static int s3c_dma_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	unsigned long totbytes = params_buffer_bytes(params);
-	struct s3c_dma_params *dma =
-		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-	int ret = 0;
-
-
-	pr_debug("Entered %s\n", __func__);
-
-	/* return if this is a bufferless transfer e.g.
-	 * codec <--> BT codec or GSM modem -- lg FIXME */
-	if (!dma)
-		return 0;
-
-	/* this may get called several times by oss emulation
-	 * with different params -HW */
-	if (prtd->params == NULL) {
-		/* prepare DMA */
-		prtd->params = dma;
-
-		pr_debug("params %p, client %p, channel %d\n", prtd->params,
-			prtd->params->client, prtd->params->channel);
-
-		ret = s3c2410_dma_request(prtd->params->channel,
-					  prtd->params->client, NULL);
-
-		if (ret < 0) {
-			printk(KERN_ERR "failed to get dma channel\n");
-			return ret;
-		}
-
-		/* use the circular buffering if we have it available. */
-		if (s3c_dma_has_circular())
-			s3c2410_dma_setflags(prtd->params->channel,
-					     S3C2410_DMAF_CIRCULAR);
-	}
-
-	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
-				    s3c24xx_audio_buffdone);
-
-	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
-
-	runtime->dma_bytes = totbytes;
-
-	spin_lock_irq(&prtd->lock);
-	prtd->dma_loaded = 0;
-	prtd->dma_limit = runtime->hw.periods_min;
-	prtd->dma_period = params_period_bytes(params);
-	prtd->dma_start = runtime->dma_addr;
-	prtd->dma_pos = prtd->dma_start;
-	prtd->dma_end = prtd->dma_start + totbytes;
-	spin_unlock_irq(&prtd->lock);
-
-	return 0;
-}
-
-static int s3c_dma_hw_free(struct snd_pcm_substream *substream)
-{
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
-
-	pr_debug("Entered %s\n", __func__);
-
-	/* TODO - do we need to ensure DMA flushed */
-	snd_pcm_set_runtime_buffer(substream, NULL);
-
-	if (prtd->params) {
-		s3c2410_dma_free(prtd->params->channel, prtd->params->client);
-		prtd->params = NULL;
-	}
-
-	return 0;
-}
-
-static int s3c_dma_prepare(struct snd_pcm_substream *substream)
-{
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
-	int ret = 0;
-
-	pr_debug("Entered %s\n", __func__);
-
-	/* return if this is a bufferless transfer e.g.
-	 * codec <--> BT codec or GSM modem -- lg FIXME */
-	if (!prtd->params)
-		return 0;
-
-	/* channel needs configuring for mem=>device, increment memory addr,
-	 * sync to pclk, half-word transfers to the IIS-FIFO. */
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		s3c2410_dma_devconfig(prtd->params->channel,
-				      S3C2410_DMASRC_MEM,
-				      prtd->params->dma_addr);
-	} else {
-		s3c2410_dma_devconfig(prtd->params->channel,
-				      S3C2410_DMASRC_HW,
-				      prtd->params->dma_addr);
-	}
-
-	s3c2410_dma_config(prtd->params->channel,
-			   prtd->params->dma_size);
-
-	/* flush the DMA channel */
-	s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH);
-	prtd->dma_loaded = 0;
-	prtd->dma_pos = prtd->dma_start;
-
-	/* enqueue dma buffers */
-	s3c_dma_enqueue(substream);
-
-	return ret;
-}
-
-static int s3c_dma_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
-	int ret = 0;
-
-	pr_debug("Entered %s\n", __func__);
-
-	spin_lock(&prtd->lock);
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		prtd->state |= ST_RUNNING;
-		s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START);
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		prtd->state &= ~ST_RUNNING;
-		s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP);
-		break;
-
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	spin_unlock(&prtd->lock);
-
-	return ret;
-}
-
-static snd_pcm_uframes_t
-s3c_dma_pointer(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
-	unsigned long res;
-	dma_addr_t src, dst;
-
-	pr_debug("Entered %s\n", __func__);
-
-	spin_lock(&prtd->lock);
-	s3c2410_dma_getposition(prtd->params->channel, &src, &dst);
-
-	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		res = dst - prtd->dma_start;
-	else
-		res = src - prtd->dma_start;
-
-	spin_unlock(&prtd->lock);
-
-	pr_debug("Pointer %x %x\n", src, dst);
-
-	/* we seem to be getting the odd error from the pcm library due
-	 * to out-of-bounds pointers. this is maybe due to the dma engine
-	 * not having loaded the new values for the channel before being
-	 * callled... (todo - fix )
-	 */
-
-	if (res >= snd_pcm_lib_buffer_bytes(substream)) {
-		if (res == snd_pcm_lib_buffer_bytes(substream))
-			res = 0;
-	}
-
-	return bytes_to_frames(substream->runtime, res);
-}
-
-static int s3c_dma_open(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd;
-
-	pr_debug("Entered %s\n", __func__);
-
-	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-	snd_soc_set_runtime_hwparams(substream, &s3c_dma_hardware);
-
-	prtd = kzalloc(sizeof(struct s3c24xx_runtime_data), GFP_KERNEL);
-	if (prtd == NULL)
-		return -ENOMEM;
-
-	spin_lock_init(&prtd->lock);
-
-	runtime->private_data = prtd;
-	return 0;
-}
-
-static int s3c_dma_close(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (!prtd)
-		pr_debug("s3c_dma_close called with prtd == NULL\n");
-
-	kfree(prtd);
-
-	return 0;
-}
-
-static int s3c_dma_mmap(struct snd_pcm_substream *substream,
-	struct vm_area_struct *vma)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-
-	pr_debug("Entered %s\n", __func__);
-
-	return dma_mmap_writecombine(substream->pcm->card->dev, vma,
-				     runtime->dma_area,
-				     runtime->dma_addr,
-				     runtime->dma_bytes);
-}
-
-static struct snd_pcm_ops s3c_dma_ops = {
-	.open		= s3c_dma_open,
-	.close		= s3c_dma_close,
-	.ioctl		= snd_pcm_lib_ioctl,
-	.hw_params	= s3c_dma_hw_params,
-	.hw_free	= s3c_dma_hw_free,
-	.prepare	= s3c_dma_prepare,
-	.trigger	= s3c_dma_trigger,
-	.pointer	= s3c_dma_pointer,
-	.mmap		= s3c_dma_mmap,
-};
-
-static int s3c_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
-{
-	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
-	struct snd_dma_buffer *buf = &substream->dma_buffer;
-	size_t size = s3c_dma_hardware.buffer_bytes_max;
-
-	pr_debug("Entered %s\n", __func__);
-
-	buf->dev.type = SNDRV_DMA_TYPE_DEV;
-	buf->dev.dev = pcm->card->dev;
-	buf->private_data = NULL;
-	buf->area = dma_alloc_writecombine(pcm->card->dev, size,
-					   &buf->addr, GFP_KERNEL);
-	if (!buf->area)
-		return -ENOMEM;
-	buf->bytes = size;
-	return 0;
-}
-
-static void s3c_dma_free_dma_buffers(struct snd_pcm *pcm)
-{
-	struct snd_pcm_substream *substream;
-	struct snd_dma_buffer *buf;
-	int stream;
-
-	pr_debug("Entered %s\n", __func__);
-
-	for (stream = 0; stream < 2; stream++) {
-		substream = pcm->streams[stream].substream;
-		if (!substream)
-			continue;
-
-		buf = &substream->dma_buffer;
-		if (!buf->area)
-			continue;
-
-		dma_free_writecombine(pcm->card->dev, buf->bytes,
-				      buf->area, buf->addr);
-		buf->area = NULL;
-	}
-}
-
-static u64 s3c_dma_mask = DMA_BIT_MASK(32);
-
-static int s3c_dma_new(struct snd_card *card,
-	struct snd_soc_dai *dai, struct snd_pcm *pcm)
-{
-	int ret = 0;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &s3c_dma_mask;
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = 0xffffffff;
-
-	if (dai->driver->playback.channels_min) {
-		ret = s3c_preallocate_dma_buffer(pcm,
-			SNDRV_PCM_STREAM_PLAYBACK);
-		if (ret)
-			goto out;
-	}
-
-	if (dai->driver->capture.channels_min) {
-		ret = s3c_preallocate_dma_buffer(pcm,
-			SNDRV_PCM_STREAM_CAPTURE);
-		if (ret)
-			goto out;
-	}
- out:
-	return ret;
-}
-
-static struct snd_soc_platform_driver s3c24xx_soc_platform = {
-	.ops		= &s3c_dma_ops,
-	.pcm_new	= s3c_dma_new,
-	.pcm_free	= s3c_dma_free_dma_buffers,
-};
-
-static int __devinit s3c24xx_soc_platform_probe(struct platform_device *pdev)
-{
-	return snd_soc_register_platform(&pdev->dev, &s3c24xx_soc_platform);
-}
-
-static int __devexit s3c24xx_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
-}
-
-static struct platform_driver s3c24xx_pcm_driver = {
-	.driver = {
-		.name = "s3c24xx-pcm-audio",
-		.owner = THIS_MODULE,
-	},
-
-	.probe = s3c24xx_soc_platform_probe,
-	.remove = __devexit_p(s3c24xx_soc_platform_remove),
-};
-
-static int __init snd_s3c24xx_pcm_init(void)
-{
-	return platform_driver_register(&s3c24xx_pcm_driver);
-}
-module_init(snd_s3c24xx_pcm_init);
-
-static void __exit snd_s3c24xx_pcm_exit(void)
-{
-	platform_driver_unregister(&s3c24xx_pcm_driver);
-}
-module_exit(snd_s3c24xx_pcm_exit);
-
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("Samsung S3C Audio DMA module");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c24xx-pcm-audio");
diff --git a/sound/soc/s3c24xx/s3c-dma.h b/sound/soc/s3c24xx/s3c-dma.h
deleted file mode 100644
index 748c07d..0000000
--- a/sound/soc/s3c24xx/s3c-dma.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- *  s3c-dma.h --
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  ALSA PCM interface for the Samsung S3C24xx CPU
- */
-
-#ifndef _S3C_AUDIO_H
-#define _S3C_AUDIO_H
-
-#define ST_RUNNING		(1<<0)
-#define ST_OPENED		(1<<1)
-
-struct s3c_dma_params {
-	struct s3c2410_dma_client *client;	/* stream identifier */
-	int channel;				/* Channel ID */
-	dma_addr_t dma_addr;
-	int dma_size;			/* Size of the DMA transfer */
-};
-
-#define S3C24XX_DAI_I2S			0
-
-/* platform data */
-extern struct snd_ac97_bus_ops s3c24xx_ac97_ops;
-
-#endif
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/s3c24xx/s3c-i2s-v2.c
deleted file mode 100644
index b3866d5..0000000
--- a/sound/soc/s3c24xx/s3c-i2s-v2.c
+++ /dev/null
@@ -1,757 +0,0 @@
-/* sound/soc/s3c24xx/s3c-i2c-v2.c
- *
- * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs.
- *
- * Copyright (c) 2006 Wolfson Microelectronics PLC.
- *	Graeme Gregory graeme.gregory@wolfsonmicro.com
- *	linux@wolfsonmicro.com
- *
- * Copyright (c) 2008, 2007, 2004-2005 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/dma.h>
-
-#include "regs-i2s-v2.h"
-#include "s3c-i2s-v2.h"
-#include "s3c-dma.h"
-
-#undef S3C_IIS_V2_SUPPORTED
-
-#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) \
-	|| defined(CONFIG_CPU_S5PV210)
-#define S3C_IIS_V2_SUPPORTED
-#endif
-
-#ifdef CONFIG_PLAT_S3C64XX
-#define S3C_IIS_V2_SUPPORTED
-#endif
-
-#ifndef S3C_IIS_V2_SUPPORTED
-#error Unsupported CPU model
-#endif
-
-#define S3C2412_I2S_DEBUG_CON 0
-
-static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
-{
-	return snd_soc_dai_get_drvdata(cpu_dai);
-}
-
-#define bit_set(v, b) (((v) & (b)) ? 1 : 0)
-
-#if S3C2412_I2S_DEBUG_CON
-static void dbg_showcon(const char *fn, u32 con)
-{
-	printk(KERN_DEBUG "%s: LRI=%d, TXFEMPT=%d, RXFEMPT=%d, TXFFULL=%d, RXFFULL=%d\n", fn,
-	       bit_set(con, S3C2412_IISCON_LRINDEX),
-	       bit_set(con, S3C2412_IISCON_TXFIFO_EMPTY),
-	       bit_set(con, S3C2412_IISCON_RXFIFO_EMPTY),
-	       bit_set(con, S3C2412_IISCON_TXFIFO_FULL),
-	       bit_set(con, S3C2412_IISCON_RXFIFO_FULL));
-
-	printk(KERN_DEBUG "%s: PAUSE: TXDMA=%d, RXDMA=%d, TXCH=%d, RXCH=%d\n",
-	       fn,
-	       bit_set(con, S3C2412_IISCON_TXDMA_PAUSE),
-	       bit_set(con, S3C2412_IISCON_RXDMA_PAUSE),
-	       bit_set(con, S3C2412_IISCON_TXCH_PAUSE),
-	       bit_set(con, S3C2412_IISCON_RXCH_PAUSE));
-	printk(KERN_DEBUG "%s: ACTIVE: TXDMA=%d, RXDMA=%d, IIS=%d\n", fn,
-	       bit_set(con, S3C2412_IISCON_TXDMA_ACTIVE),
-	       bit_set(con, S3C2412_IISCON_RXDMA_ACTIVE),
-	       bit_set(con, S3C2412_IISCON_IIS_ACTIVE));
-}
-#else
-static inline void dbg_showcon(const char *fn, u32 con)
-{
-}
-#endif
-
-
-/* Turn on or off the transmission path. */
-static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on)
-{
-	void __iomem *regs = i2s->regs;
-	u32 fic, con, mod;
-
-	pr_debug("%s(%d)\n", __func__, on);
-
-	fic = readl(regs + S3C2412_IISFIC);
-	con = readl(regs + S3C2412_IISCON);
-	mod = readl(regs + S3C2412_IISMOD);
-
-	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
-
-	if (on) {
-		con |= S3C2412_IISCON_TXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE;
-		con &= ~S3C2412_IISCON_TXDMA_PAUSE;
-		con &= ~S3C2412_IISCON_TXCH_PAUSE;
-
-		switch (mod & S3C2412_IISMOD_MODE_MASK) {
-		case S3C2412_IISMOD_MODE_TXONLY:
-		case S3C2412_IISMOD_MODE_TXRX:
-			/* do nothing, we are in the right mode */
-			break;
-
-		case S3C2412_IISMOD_MODE_RXONLY:
-			mod &= ~S3C2412_IISMOD_MODE_MASK;
-			mod |= S3C2412_IISMOD_MODE_TXRX;
-			break;
-
-		default:
-			dev_err(i2s->dev, "TXEN: Invalid MODE %x in IISMOD\n",
-				mod & S3C2412_IISMOD_MODE_MASK);
-			break;
-		}
-
-		writel(con, regs + S3C2412_IISCON);
-		writel(mod, regs + S3C2412_IISMOD);
-	} else {
-		/* Note, we do not have any indication that the FIFO problems
-		 * tha the S3C2410/2440 had apply here, so we should be able
-		 * to disable the DMA and TX without resetting the FIFOS.
-		 */
-
-		con |=  S3C2412_IISCON_TXDMA_PAUSE;
-		con |=  S3C2412_IISCON_TXCH_PAUSE;
-		con &= ~S3C2412_IISCON_TXDMA_ACTIVE;
-
-		switch (mod & S3C2412_IISMOD_MODE_MASK) {
-		case S3C2412_IISMOD_MODE_TXRX:
-			mod &= ~S3C2412_IISMOD_MODE_MASK;
-			mod |= S3C2412_IISMOD_MODE_RXONLY;
-			break;
-
-		case S3C2412_IISMOD_MODE_TXONLY:
-			mod &= ~S3C2412_IISMOD_MODE_MASK;
-			con &= ~S3C2412_IISCON_IIS_ACTIVE;
-			break;
-
-		default:
-			dev_err(i2s->dev, "TXDIS: Invalid MODE %x in IISMOD\n",
-				mod & S3C2412_IISMOD_MODE_MASK);
-			break;
-		}
-
-		writel(mod, regs + S3C2412_IISMOD);
-		writel(con, regs + S3C2412_IISCON);
-	}
-
-	fic = readl(regs + S3C2412_IISFIC);
-	dbg_showcon(__func__, con);
-	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
-}
-
-static void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on)
-{
-	void __iomem *regs = i2s->regs;
-	u32 fic, con, mod;
-
-	pr_debug("%s(%d)\n", __func__, on);
-
-	fic = readl(regs + S3C2412_IISFIC);
-	con = readl(regs + S3C2412_IISCON);
-	mod = readl(regs + S3C2412_IISMOD);
-
-	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
-
-	if (on) {
-		con |= S3C2412_IISCON_RXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE;
-		con &= ~S3C2412_IISCON_RXDMA_PAUSE;
-		con &= ~S3C2412_IISCON_RXCH_PAUSE;
-
-		switch (mod & S3C2412_IISMOD_MODE_MASK) {
-		case S3C2412_IISMOD_MODE_TXRX:
-		case S3C2412_IISMOD_MODE_RXONLY:
-			/* do nothing, we are in the right mode */
-			break;
-
-		case S3C2412_IISMOD_MODE_TXONLY:
-			mod &= ~S3C2412_IISMOD_MODE_MASK;
-			mod |= S3C2412_IISMOD_MODE_TXRX;
-			break;
-
-		default:
-			dev_err(i2s->dev, "RXEN: Invalid MODE %x in IISMOD\n",
-				mod & S3C2412_IISMOD_MODE_MASK);
-		}
-
-		writel(mod, regs + S3C2412_IISMOD);
-		writel(con, regs + S3C2412_IISCON);
-	} else {
-		/* See txctrl notes on FIFOs. */
-
-		con &= ~S3C2412_IISCON_RXDMA_ACTIVE;
-		con |=  S3C2412_IISCON_RXDMA_PAUSE;
-		con |=  S3C2412_IISCON_RXCH_PAUSE;
-
-		switch (mod & S3C2412_IISMOD_MODE_MASK) {
-		case S3C2412_IISMOD_MODE_RXONLY:
-			con &= ~S3C2412_IISCON_IIS_ACTIVE;
-			mod &= ~S3C2412_IISMOD_MODE_MASK;
-			break;
-
-		case S3C2412_IISMOD_MODE_TXRX:
-			mod &= ~S3C2412_IISMOD_MODE_MASK;
-			mod |= S3C2412_IISMOD_MODE_TXONLY;
-			break;
-
-		default:
-			dev_err(i2s->dev, "RXDIS: Invalid MODE %x in IISMOD\n",
-				mod & S3C2412_IISMOD_MODE_MASK);
-		}
-
-		writel(con, regs + S3C2412_IISCON);
-		writel(mod, regs + S3C2412_IISMOD);
-	}
-
-	fic = readl(regs + S3C2412_IISFIC);
-	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
-}
-
-#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-
-/*
- * Wait for the LR signal to allow synchronisation to the L/R clock
- * from the codec. May only be needed for slave mode.
- */
-static int s3c2412_snd_lrsync(struct s3c_i2sv2_info *i2s)
-{
-	u32 iiscon;
-	unsigned long loops = msecs_to_loops(5);
-
-	pr_debug("Entered %s\n", __func__);
-
-	while (--loops) {
-		iiscon = readl(i2s->regs + S3C2412_IISCON);
-		if (iiscon & S3C2412_IISCON_LRINDEX)
-			break;
-
-		cpu_relax();
-	}
-
-	if (!loops) {
-		printk(KERN_ERR "%s: timeout\n", __func__);
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-/*
- * Set S3C2412 I2S DAI format
- */
-static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
-			       unsigned int fmt)
-{
-	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
-	u32 iismod;
-
-	pr_debug("Entered %s\n", __func__);
-
-	iismod = readl(i2s->regs + S3C2412_IISMOD);
-	pr_debug("hw_params r: IISMOD: %x \n", iismod);
-
-	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-	case SND_SOC_DAIFMT_CBM_CFM:
-		i2s->master = 0;
-		iismod |= S3C2412_IISMOD_SLAVE;
-		break;
-	case SND_SOC_DAIFMT_CBS_CFS:
-		i2s->master = 1;
-		iismod &= ~S3C2412_IISMOD_SLAVE;
-		break;
-	default:
-		pr_err("unknwon master/slave format\n");
-		return -EINVAL;
-	}
-
-	iismod &= ~S3C2412_IISMOD_SDF_MASK;
-
-	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-	case SND_SOC_DAIFMT_RIGHT_J:
-		iismod |= S3C2412_IISMOD_LR_RLOW;
-		iismod |= S3C2412_IISMOD_SDF_MSB;
-		break;
-	case SND_SOC_DAIFMT_LEFT_J:
-		iismod |= S3C2412_IISMOD_LR_RLOW;
-		iismod |= S3C2412_IISMOD_SDF_LSB;
-		break;
-	case SND_SOC_DAIFMT_I2S:
-		iismod &= ~S3C2412_IISMOD_LR_RLOW;
-		iismod |= S3C2412_IISMOD_SDF_IIS;
-		break;
-	default:
-		pr_err("Unknown data format\n");
-		return -EINVAL;
-	}
-
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	pr_debug("hw_params w: IISMOD: %x \n", iismod);
-	return 0;
-}
-
-static int s3c_i2sv2_hw_params(struct snd_pcm_substream *substream,
-				 struct snd_pcm_hw_params *params,
-				 struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = to_info(dai);
-	struct s3c_dma_params *dma_data;
-	u32 iismod;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = i2s->dma_playback;
-	else
-		dma_data = i2s->dma_capture;
-
-	snd_soc_dai_set_dma_data(dai, substream, dma_data);
-
-	/* Working copies of register */
-	iismod = readl(i2s->regs + S3C2412_IISMOD);
-	pr_debug("%s: r: IISMOD: %x\n", __func__, iismod);
-
-	iismod &= ~S3C64XX_IISMOD_BLC_MASK;
-	/* Sample size */
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S8:
-		iismod |= S3C64XX_IISMOD_BLC_8BIT;
-		break;
-	case SNDRV_PCM_FORMAT_S16_LE:
-		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
-		iismod |= S3C64XX_IISMOD_BLC_24BIT;
-		break;
-	}
-
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
-
-	return 0;
-}
-
-static int s3c_i2sv2_set_sysclk(struct snd_soc_dai *cpu_dai,
-				  int clk_id, unsigned int freq, int dir)
-{
-	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
-	u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
-
-	pr_debug("Entered %s\n", __func__);
-	pr_debug("%s r: IISMOD: %x\n", __func__, iismod);
-
-	switch (clk_id) {
-	case S3C_I2SV2_CLKSRC_PCLK:
-		iismod &= ~S3C2412_IISMOD_IMS_SYSMUX;
-		break;
-
-	case S3C_I2SV2_CLKSRC_AUDIOBUS:
-		iismod |= S3C2412_IISMOD_IMS_SYSMUX;
-		break;
-
-	case S3C_I2SV2_CLKSRC_CDCLK:
-		/* Error if controller doesn't have the CDCLKCON bit */
-		if (!(i2s->feature & S3C_FEATURE_CDCLKCON))
-			return -EINVAL;
-
-		switch (dir) {
-		case SND_SOC_CLOCK_IN:
-			iismod |= S3C64XX_IISMOD_CDCLKCON;
-			break;
-		case SND_SOC_CLOCK_OUT:
-			iismod &= ~S3C64XX_IISMOD_CDCLKCON;
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	pr_debug("%s w: IISMOD: %x\n", __func__, iismod);
-
-	return 0;
-}
-
-static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
-			       struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct s3c_i2sv2_info *i2s = to_info(rtd->cpu_dai);
-	int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
-	unsigned long irqs;
-	int ret = 0;
-	struct s3c_dma_params *dma_data =
-		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
-	pr_debug("Entered %s\n", __func__);
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-		/* On start, ensure that the FIFOs are cleared and reset. */
-
-		writel(capture ? S3C2412_IISFIC_RXFLUSH : S3C2412_IISFIC_TXFLUSH,
-		       i2s->regs + S3C2412_IISFIC);
-
-		/* clear again, just in case */
-		writel(0x0, i2s->regs + S3C2412_IISFIC);
-
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (!i2s->master) {
-			ret = s3c2412_snd_lrsync(i2s);
-			if (ret)
-				goto exit_err;
-		}
-
-		local_irq_save(irqs);
-
-		if (capture)
-			s3c2412_snd_rxctrl(i2s, 1);
-		else
-			s3c2412_snd_txctrl(i2s, 1);
-
-		local_irq_restore(irqs);
-
-		/*
-		 * Load the next buffer to DMA to meet the reqirement
-		 * of the auto reload mechanism of S3C24XX.
-		 * This call won't bother S3C64XX.
-		 */
-		s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
-
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		local_irq_save(irqs);
-
-		if (capture)
-			s3c2412_snd_rxctrl(i2s, 0);
-		else
-			s3c2412_snd_txctrl(i2s, 0);
-
-		local_irq_restore(irqs);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-exit_err:
-	return ret;
-}
-
-/*
- * Set S3C2412 Clock dividers
- */
-static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
-				  int div_id, int div)
-{
-	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
-	u32 reg;
-
-	pr_debug("%s(%p, %d, %d)\n", __func__, cpu_dai, div_id, div);
-
-	switch (div_id) {
-	case S3C_I2SV2_DIV_BCLK:
-		switch (div) {
-		case 16:
-			div = S3C2412_IISMOD_BCLK_16FS;
-			break;
-
-		case 32:
-			div = S3C2412_IISMOD_BCLK_32FS;
-			break;
-
-		case 24:
-			div = S3C2412_IISMOD_BCLK_24FS;
-			break;
-
-		case 48:
-			div = S3C2412_IISMOD_BCLK_48FS;
-			break;
-
-		default:
-			return -EINVAL;
-		}
-
-		reg = readl(i2s->regs + S3C2412_IISMOD);
-		reg &= ~S3C2412_IISMOD_BCLK_MASK;
-		writel(reg | div, i2s->regs + S3C2412_IISMOD);
-
-		pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD));
-		break;
-
-	case S3C_I2SV2_DIV_RCLK:
-		switch (div) {
-		case 256:
-			div = S3C2412_IISMOD_RCLK_256FS;
-			break;
-
-		case 384:
-			div = S3C2412_IISMOD_RCLK_384FS;
-			break;
-
-		case 512:
-			div = S3C2412_IISMOD_RCLK_512FS;
-			break;
-
-		case 768:
-			div = S3C2412_IISMOD_RCLK_768FS;
-			break;
-
-		default:
-			return -EINVAL;
-		}
-
-		reg = readl(i2s->regs + S3C2412_IISMOD);
-		reg &= ~S3C2412_IISMOD_RCLK_MASK;
-		writel(reg | div, i2s->regs + S3C2412_IISMOD);
-		pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD));
-		break;
-
-	case S3C_I2SV2_DIV_PRESCALER:
-		if (div >= 0) {
-			writel((div << 8) | S3C2412_IISPSR_PSREN,
-			       i2s->regs + S3C2412_IISPSR);
-		} else {
-			writel(0x0, i2s->regs + S3C2412_IISPSR);
-		}
-		pr_debug("%s: PSR=%08x\n", __func__, readl(i2s->regs + S3C2412_IISPSR));
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static snd_pcm_sframes_t s3c2412_i2s_delay(struct snd_pcm_substream *substream,
-					   struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = to_info(dai);
-	u32 reg = readl(i2s->regs + S3C2412_IISFIC);
-	snd_pcm_sframes_t delay;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		delay = S3C2412_IISFIC_TXCOUNT(reg);
-	else
-		delay = S3C2412_IISFIC_RXCOUNT(reg);
-
-	return delay;
-}
-
-struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai)
-{
-	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
-	u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
-
-	if (iismod & S3C2412_IISMOD_IMS_SYSMUX)
-		return i2s->iis_cclk;
-	else
-		return i2s->iis_pclk;
-}
-EXPORT_SYMBOL_GPL(s3c_i2sv2_get_clock);
-
-/* default table of all avaialable root fs divisors */
-static unsigned int iis_fs_tab[] = { 256, 512, 384, 768 };
-
-int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
-			    unsigned int *fstab,
-			    unsigned int rate, struct clk *clk)
-{
-	unsigned long clkrate = clk_get_rate(clk);
-	unsigned int div;
-	unsigned int fsclk;
-	unsigned int actual;
-	unsigned int fs;
-	unsigned int fsdiv;
-	signed int deviation = 0;
-	unsigned int best_fs = 0;
-	unsigned int best_div = 0;
-	unsigned int best_rate = 0;
-	unsigned int best_deviation = INT_MAX;
-
-	pr_debug("Input clock rate %ldHz\n", clkrate);
-
-	if (fstab == NULL)
-		fstab = iis_fs_tab;
-
-	for (fs = 0; fs < ARRAY_SIZE(iis_fs_tab); fs++) {
-		fsdiv = iis_fs_tab[fs];
-
-		fsclk = clkrate / fsdiv;
-		div = fsclk / rate;
-
-		if ((fsclk % rate) > (rate / 2))
-			div++;
-
-		if (div <= 1)
-			continue;
-
-		actual = clkrate / (fsdiv * div);
-		deviation = actual - rate;
-
-		printk(KERN_DEBUG "%ufs: div %u => result %u, deviation %d\n",
-		       fsdiv, div, actual, deviation);
-
-		deviation = abs(deviation);
-
-		if (deviation < best_deviation) {
-			best_fs = fsdiv;
-			best_div = div;
-			best_rate = actual;
-			best_deviation = deviation;
-		}
-
-		if (deviation == 0)
-			break;
-	}
-
-	printk(KERN_DEBUG "best: fs=%u, div=%u, rate=%u\n",
-	       best_fs, best_div, best_rate);
-
-	info->fs_div = best_fs;
-	info->clk_div = best_div;
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(s3c_i2sv2_iis_calc_rate);
-
-int s3c_i2sv2_probe(struct snd_soc_dai *dai,
-		    struct s3c_i2sv2_info *i2s,
-		    unsigned long base)
-{
-	struct device *dev = dai->dev;
-	unsigned int iismod;
-
-	i2s->dev = dev;
-
-	/* record our i2s structure for later use in the callbacks */
-	snd_soc_dai_set_drvdata(dai, i2s);
-
-	i2s->regs = ioremap(base, 0x100);
-	if (i2s->regs == NULL) {
-		dev_err(dev, "cannot ioremap registers\n");
-		return -ENXIO;
-	}
-
-	i2s->iis_pclk = clk_get(dev, "iis");
-	if (IS_ERR(i2s->iis_pclk)) {
-		dev_err(dev, "failed to get iis_clock\n");
-		iounmap(i2s->regs);
-		return -ENOENT;
-	}
-
-	clk_enable(i2s->iis_pclk);
-
-	/* Mark ourselves as in TXRX mode so we can run through our cleanup
-	 * process without warnings. */
-	iismod = readl(i2s->regs + S3C2412_IISMOD);
-	iismod |= S3C2412_IISMOD_MODE_TXRX;
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	s3c2412_snd_txctrl(i2s, 0);
-	s3c2412_snd_rxctrl(i2s, 0);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(s3c_i2sv2_probe);
-
-#ifdef CONFIG_PM
-static int s3c2412_i2s_suspend(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = to_info(dai);
-	u32 iismod;
-
-	if (dai->active) {
-		i2s->suspend_iismod = readl(i2s->regs + S3C2412_IISMOD);
-		i2s->suspend_iiscon = readl(i2s->regs + S3C2412_IISCON);
-		i2s->suspend_iispsr = readl(i2s->regs + S3C2412_IISPSR);
-
-		/* some basic suspend checks */
-
-		iismod = readl(i2s->regs + S3C2412_IISMOD);
-
-		if (iismod & S3C2412_IISCON_RXDMA_ACTIVE)
-			pr_warning("%s: RXDMA active?\n", __func__);
-
-		if (iismod & S3C2412_IISCON_TXDMA_ACTIVE)
-			pr_warning("%s: TXDMA active?\n", __func__);
-
-		if (iismod & S3C2412_IISCON_IIS_ACTIVE)
-			pr_warning("%s: IIS active\n", __func__);
-	}
-
-	return 0;
-}
-
-static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = to_info(dai);
-
-	pr_info("dai_active %d, IISMOD %08x, IISCON %08x\n",
-		dai->active, i2s->suspend_iismod, i2s->suspend_iiscon);
-
-	if (dai->active) {
-		writel(i2s->suspend_iiscon, i2s->regs + S3C2412_IISCON);
-		writel(i2s->suspend_iismod, i2s->regs + S3C2412_IISMOD);
-		writel(i2s->suspend_iispsr, i2s->regs + S3C2412_IISPSR);
-
-		writel(S3C2412_IISFIC_RXFLUSH | S3C2412_IISFIC_TXFLUSH,
-		       i2s->regs + S3C2412_IISFIC);
-
-		ndelay(250);
-		writel(0x0, i2s->regs + S3C2412_IISFIC);
-	}
-
-	return 0;
-}
-#else
-#define s3c2412_i2s_suspend NULL
-#define s3c2412_i2s_resume  NULL
-#endif
-
-int s3c_i2sv2_register_dai(struct device *dev, int id,
-		struct snd_soc_dai_driver *drv)
-{
-	struct snd_soc_dai_ops *ops = drv->ops;
-
-	ops->trigger = s3c2412_i2s_trigger;
-	if (!ops->hw_params)
-		ops->hw_params = s3c_i2sv2_hw_params;
-	ops->set_fmt = s3c2412_i2s_set_fmt;
-	ops->set_clkdiv = s3c2412_i2s_set_clkdiv;
-	ops->set_sysclk = s3c_i2sv2_set_sysclk;
-
-	/* Allow overriding by (for example) IISv4 */
-	if (!ops->delay)
-		ops->delay = s3c2412_i2s_delay;
-
-	drv->suspend = s3c2412_i2s_suspend;
-	drv->resume = s3c2412_i2s_resume;
-
-	return snd_soc_register_dai(dev, drv);
-}
-EXPORT_SYMBOL_GPL(s3c_i2sv2_register_dai);
-
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.h b/sound/soc/s3c24xx/s3c-i2s-v2.h
deleted file mode 100644
index d458301..0000000
--- a/sound/soc/s3c24xx/s3c-i2s-v2.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* sound/soc/s3c24xx/s3c-i2s-v2.h
- *
- * ALSA Soc Audio Layer - S3C_I2SV2 I2S driver
- *
- * Copyright (c) 2007 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
-*/
-
-/* This code is the core support for the I2S block found in a number of
- * Samsung SoC devices which is unofficially named I2S-V2. Currently the
- * S3C2412 and the S3C64XX series use this block to provide 1 or 2 I2S
- * channels via configurable GPIO.
- */
-
-#ifndef __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H
-#define __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H __FILE__
-
-#define S3C_I2SV2_DIV_BCLK	(1)
-#define S3C_I2SV2_DIV_RCLK	(2)
-#define S3C_I2SV2_DIV_PRESCALER	(3)
-
-#define S3C_I2SV2_CLKSRC_PCLK		0
-#define S3C_I2SV2_CLKSRC_AUDIOBUS	1
-#define S3C_I2SV2_CLKSRC_CDCLK		2
-
-/* Set this flag for I2S controllers that have the bit IISMOD[12]
- * bridge/break RCLK signal and external Xi2sCDCLK pin.
- */
-#define S3C_FEATURE_CDCLKCON	(1 << 0)
-
-/**
- * struct s3c_i2sv2_info - S3C I2S-V2 information
- * @dev: The parent device passed to use from the probe.
- * @regs: The pointer to the device registe block.
- * @feature: Set of bit-flags indicating features of the controller.
- * @master: True if the I2S core is the I2S bit clock master.
- * @dma_playback: DMA information for playback channel.
- * @dma_capture: DMA information for capture channel.
- * @suspend_iismod: PM save for the IISMOD register.
- * @suspend_iiscon: PM save for the IISCON register.
- * @suspend_iispsr: PM save for the IISPSR register.
- *
- * This is the private codec state for the hardware associated with an
- * I2S channel such as the register mappings and clock sources.
- */
-struct s3c_i2sv2_info {
-	struct device	*dev;
-	void __iomem	*regs;
-
-	u32		feature;
-
-	struct clk	*iis_pclk;
-	struct clk	*iis_cclk;
-
-	unsigned char	 master;
-
-	struct s3c_dma_params	*dma_playback;
-	struct s3c_dma_params	*dma_capture;
-
-	u32		 suspend_iismod;
-	u32		 suspend_iiscon;
-	u32		 suspend_iispsr;
-
-	unsigned long	base;
-};
-
-extern struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai);
-
-struct s3c_i2sv2_rate_calc {
-	unsigned int	clk_div;	/* for prescaler */
-	unsigned int	fs_div;		/* for root frame clock */
-};
-
-extern int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
-				   unsigned int *fstab,
-				   unsigned int rate, struct clk *clk);
-
-/**
- * s3c_i2sv2_probe - probe for i2s device helper
- * @dai: The ASoC DAI structure supplied to the original probe.
- * @i2s: Our local i2s structure to fill in.
- * @base: The base address for the registers.
- */
-extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
-			   struct s3c_i2sv2_info *i2s,
-			   unsigned long base);
-
-/**
- * s3c_i2sv2_register_dai - register dai with soc core
- * @dev: DAI device
- * @id: DAI ID
- * @drv: The driver structure to register
- *
- * Fill in any missing fields and then register the given dai with the
- * soc core.
- */
-extern int s3c_i2sv2_register_dai(struct device *dev, int id,
-		struct snd_soc_dai_driver *drv);
-
-#endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
diff --git a/sound/soc/s3c24xx/s3c-pcm.c b/sound/soc/s3c24xx/s3c-pcm.c
deleted file mode 100644
index 2e020e1..0000000
--- a/sound/soc/s3c24xx/s3c-pcm.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/* sound/soc/s3c24xx/s3c-pcm.c
- *
- * ALSA SoC Audio Layer - S3C PCM-Controller driver
- *
- * Copyright (c) 2009 Samsung Electronics Co. Ltd
- * Author: Jaswinder Singh <jassi.brar@samsung.com>
- * based upon I2S drivers by Ben Dooks.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <sound/soc.h>
-
-#include <plat/audio.h>
-#include <plat/dma.h>
-
-#include "s3c-dma.h"
-#include "s3c-pcm.h"
-
-static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
-	.name		= "PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c_pcm_dma_client_in = {
-	.name		= "PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c_pcm_stereo_out[] = {
-	[0] = {
-		.client		= &s3c_pcm_dma_client_out,
-		.dma_size	= 4,
-	},
-	[1] = {
-		.client		= &s3c_pcm_dma_client_out,
-		.dma_size	= 4,
-	},
-};
-
-static struct s3c_dma_params s3c_pcm_stereo_in[] = {
-	[0] = {
-		.client		= &s3c_pcm_dma_client_in,
-		.dma_size	= 4,
-	},
-	[1] = {
-		.client		= &s3c_pcm_dma_client_in,
-		.dma_size	= 4,
-	},
-};
-
-static struct s3c_pcm_info s3c_pcm[2];
-
-static void s3c_pcm_snd_txctrl(struct s3c_pcm_info *pcm, int on)
-{
-	void __iomem *regs = pcm->regs;
-	u32 ctl, clkctl;
-
-	clkctl = readl(regs + S3C_PCM_CLKCTL);
-	ctl = readl(regs + S3C_PCM_CTL);
-	ctl &= ~(S3C_PCM_CTL_TXDIPSTICK_MASK
-			 << S3C_PCM_CTL_TXDIPSTICK_SHIFT);
-
-	if (on) {
-		ctl |= S3C_PCM_CTL_TXDMA_EN;
-		ctl |= S3C_PCM_CTL_TXFIFO_EN;
-		ctl |= S3C_PCM_CTL_ENABLE;
-		ctl |= (0x4<<S3C_PCM_CTL_TXDIPSTICK_SHIFT);
-		clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
-	} else {
-		ctl &= ~S3C_PCM_CTL_TXDMA_EN;
-		ctl &= ~S3C_PCM_CTL_TXFIFO_EN;
-
-		if (!(ctl & S3C_PCM_CTL_RXFIFO_EN)) {
-			ctl &= ~S3C_PCM_CTL_ENABLE;
-			if (!pcm->idleclk)
-				clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
-		}
-	}
-
-	writel(clkctl, regs + S3C_PCM_CLKCTL);
-	writel(ctl, regs + S3C_PCM_CTL);
-}
-
-static void s3c_pcm_snd_rxctrl(struct s3c_pcm_info *pcm, int on)
-{
-	void __iomem *regs = pcm->regs;
-	u32 ctl, clkctl;
-
-	ctl = readl(regs + S3C_PCM_CTL);
-	clkctl = readl(regs + S3C_PCM_CLKCTL);
-	ctl &= ~(S3C_PCM_CTL_RXDIPSTICK_MASK
-			 << S3C_PCM_CTL_RXDIPSTICK_SHIFT);
-
-	if (on) {
-		ctl |= S3C_PCM_CTL_RXDMA_EN;
-		ctl |= S3C_PCM_CTL_RXFIFO_EN;
-		ctl |= S3C_PCM_CTL_ENABLE;
-		ctl |= (0x20<<S3C_PCM_CTL_RXDIPSTICK_SHIFT);
-		clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
-	} else {
-		ctl &= ~S3C_PCM_CTL_RXDMA_EN;
-		ctl &= ~S3C_PCM_CTL_RXFIFO_EN;
-
-		if (!(ctl & S3C_PCM_CTL_TXFIFO_EN)) {
-			ctl &= ~S3C_PCM_CTL_ENABLE;
-			if (!pcm->idleclk)
-				clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
-		}
-	}
-
-	writel(clkctl, regs + S3C_PCM_CLKCTL);
-	writel(ctl, regs + S3C_PCM_CTL);
-}
-
-static int s3c_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
-			       struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(rtd->cpu_dai);
-	unsigned long flags;
-
-	dev_dbg(pcm->dev, "Entered %s\n", __func__);
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		spin_lock_irqsave(&pcm->lock, flags);
-
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-			s3c_pcm_snd_rxctrl(pcm, 1);
-		else
-			s3c_pcm_snd_txctrl(pcm, 1);
-
-		spin_unlock_irqrestore(&pcm->lock, flags);
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		spin_lock_irqsave(&pcm->lock, flags);
-
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-			s3c_pcm_snd_rxctrl(pcm, 0);
-		else
-			s3c_pcm_snd_txctrl(pcm, 0);
-
-		spin_unlock_irqrestore(&pcm->lock, flags);
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int s3c_pcm_hw_params(struct snd_pcm_substream *substream,
-				 struct snd_pcm_hw_params *params,
-				 struct snd_soc_dai *socdai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(rtd->cpu_dai);
-	struct s3c_dma_params *dma_data;
-	void __iomem *regs = pcm->regs;
-	struct clk *clk;
-	int sclk_div, sync_div;
-	unsigned long flags;
-	u32 clkctl;
-
-	dev_dbg(pcm->dev, "Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = pcm->dma_playback;
-	else
-		dma_data = pcm->dma_capture;
-
-	snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
-
-	/* Strictly check for sample size */
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S16_LE:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	spin_lock_irqsave(&pcm->lock, flags);
-
-	/* Get hold of the PCMSOURCE_CLK */
-	clkctl = readl(regs + S3C_PCM_CLKCTL);
-	if (clkctl & S3C_PCM_CLKCTL_SERCLKSEL_PCLK)
-		clk = pcm->pclk;
-	else
-		clk = pcm->cclk;
-
-	/* Set the SCLK divider */
-	sclk_div = clk_get_rate(clk) / pcm->sclk_per_fs /
-					params_rate(params) / 2 - 1;
-
-	clkctl &= ~(S3C_PCM_CLKCTL_SCLKDIV_MASK
-			<< S3C_PCM_CLKCTL_SCLKDIV_SHIFT);
-	clkctl |= ((sclk_div & S3C_PCM_CLKCTL_SCLKDIV_MASK)
-			<< S3C_PCM_CLKCTL_SCLKDIV_SHIFT);
-
-	/* Set the SYNC divider */
-	sync_div = pcm->sclk_per_fs - 1;
-
-	clkctl &= ~(S3C_PCM_CLKCTL_SYNCDIV_MASK
-				<< S3C_PCM_CLKCTL_SYNCDIV_SHIFT);
-	clkctl |= ((sync_div & S3C_PCM_CLKCTL_SYNCDIV_MASK)
-				<< S3C_PCM_CLKCTL_SYNCDIV_SHIFT);
-
-	writel(clkctl, regs + S3C_PCM_CLKCTL);
-
-	spin_unlock_irqrestore(&pcm->lock, flags);
-
-	dev_dbg(pcm->dev, "PCMSOURCE_CLK-%lu SCLK=%ufs SCLK_DIV=%d SYNC_DIV=%d\n",
-				clk_get_rate(clk), pcm->sclk_per_fs,
-				sclk_div, sync_div);
-
-	return 0;
-}
-
-static int s3c_pcm_set_fmt(struct snd_soc_dai *cpu_dai,
-			       unsigned int fmt)
-{
-	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(cpu_dai);
-	void __iomem *regs = pcm->regs;
-	unsigned long flags;
-	int ret = 0;
-	u32 ctl;
-
-	dev_dbg(pcm->dev, "Entered %s\n", __func__);
-
-	spin_lock_irqsave(&pcm->lock, flags);
-
-	ctl = readl(regs + S3C_PCM_CTL);
-
-	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-	case SND_SOC_DAIFMT_NB_NF:
-		/* Nothing to do, NB_NF by default */
-		break;
-	default:
-		dev_err(pcm->dev, "Unsupported clock inversion!\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-	case SND_SOC_DAIFMT_CBS_CFS:
-		/* Nothing to do, Master by default */
-		break;
-	default:
-		dev_err(pcm->dev, "Unsupported master/slave format!\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
-	case SND_SOC_DAIFMT_CONT:
-		pcm->idleclk = 1;
-		break;
-	case SND_SOC_DAIFMT_GATED:
-		pcm->idleclk = 0;
-		break;
-	default:
-		dev_err(pcm->dev, "Invalid Clock gating request!\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-	case SND_SOC_DAIFMT_DSP_A:
-		ctl |= S3C_PCM_CTL_TXMSB_AFTER_FSYNC;
-		ctl |= S3C_PCM_CTL_RXMSB_AFTER_FSYNC;
-		break;
-	case SND_SOC_DAIFMT_DSP_B:
-		ctl &= ~S3C_PCM_CTL_TXMSB_AFTER_FSYNC;
-		ctl &= ~S3C_PCM_CTL_RXMSB_AFTER_FSYNC;
-		break;
-	default:
-		dev_err(pcm->dev, "Unsupported data format!\n");
-		ret = -EINVAL;
-		goto exit;
-	}
-
-	writel(ctl, regs + S3C_PCM_CTL);
-
-exit:
-	spin_unlock_irqrestore(&pcm->lock, flags);
-
-	return ret;
-}
-
-static int s3c_pcm_set_clkdiv(struct snd_soc_dai *cpu_dai,
-						int div_id, int div)
-{
-	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(cpu_dai);
-
-	switch (div_id) {
-	case S3C_PCM_SCLK_PER_FS:
-		pcm->sclk_per_fs = div;
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int s3c_pcm_set_sysclk(struct snd_soc_dai *cpu_dai,
-				  int clk_id, unsigned int freq, int dir)
-{
-	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(cpu_dai);
-	void __iomem *regs = pcm->regs;
-	u32 clkctl = readl(regs + S3C_PCM_CLKCTL);
-
-	switch (clk_id) {
-	case S3C_PCM_CLKSRC_PCLK:
-		clkctl |= S3C_PCM_CLKCTL_SERCLKSEL_PCLK;
-		break;
-
-	case S3C_PCM_CLKSRC_MUX:
-		clkctl &= ~S3C_PCM_CLKCTL_SERCLKSEL_PCLK;
-
-		if (clk_get_rate(pcm->cclk) != freq)
-			clk_set_rate(pcm->cclk, freq);
-
-		break;
-
-	default:
-		return -EINVAL;
-	}
-
-	writel(clkctl, regs + S3C_PCM_CLKCTL);
-
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c_pcm_dai_ops = {
-	.set_sysclk	= s3c_pcm_set_sysclk,
-	.set_clkdiv	= s3c_pcm_set_clkdiv,
-	.trigger	= s3c_pcm_trigger,
-	.hw_params	= s3c_pcm_hw_params,
-	.set_fmt	= s3c_pcm_set_fmt,
-};
-
-#define S3C_PCM_RATES  SNDRV_PCM_RATE_8000_96000
-
-#define S3C_PCM_DAI_DECLARE			\
-	.symmetric_rates = 1,					\
-	.ops = &s3c_pcm_dai_ops,				\
-	.playback = {						\
-		.channels_min	= 2,				\
-		.channels_max	= 2,				\
-		.rates		= S3C_PCM_RATES,		\
-		.formats	= SNDRV_PCM_FMTBIT_S16_LE,	\
-	},							\
-	.capture = {						\
-		.channels_min	= 2,				\
-		.channels_max	= 2,				\
-		.rates		= S3C_PCM_RATES,		\
-		.formats	= SNDRV_PCM_FMTBIT_S16_LE,	\
-	}
-
-struct snd_soc_dai_driver s3c_pcm_dai[] = {
-	[0] = {
-		.name	= "samsung-pcm.0",
-		S3C_PCM_DAI_DECLARE,
-	},
-	[1] = {
-		.name	= "samsung-pcm.1",
-		S3C_PCM_DAI_DECLARE,
-	},
-};
-EXPORT_SYMBOL_GPL(s3c_pcm_dai);
-
-static __devinit int s3c_pcm_dev_probe(struct platform_device *pdev)
-{
-	struct s3c_pcm_info *pcm;
-	struct resource *mem_res, *dmatx_res, *dmarx_res;
-	struct s3c_audio_pdata *pcm_pdata;
-	int ret;
-
-	/* Check for valid device index */
-	if ((pdev->id < 0) || pdev->id >= ARRAY_SIZE(s3c_pcm)) {
-		dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
-		return -EINVAL;
-	}
-
-	pcm_pdata = pdev->dev.platform_data;
-
-	/* Check for availability of necessary resource */
-	dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!dmatx_res) {
-		dev_err(&pdev->dev, "Unable to get PCM-TX dma resource\n");
-		return -ENXIO;
-	}
-
-	dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!dmarx_res) {
-		dev_err(&pdev->dev, "Unable to get PCM-RX dma resource\n");
-		return -ENXIO;
-	}
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem_res) {
-		dev_err(&pdev->dev, "Unable to get register resource\n");
-		return -ENXIO;
-	}
-
-	if (pcm_pdata && pcm_pdata->cfg_gpio && pcm_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		return -EINVAL;
-	}
-
-	pcm = &s3c_pcm[pdev->id];
-	pcm->dev = &pdev->dev;
-
-	spin_lock_init(&pcm->lock);
-
-	/* Default is 128fs */
-	pcm->sclk_per_fs = 128;
-
-	pcm->cclk = clk_get(&pdev->dev, "audio-bus");
-	if (IS_ERR(pcm->cclk)) {
-		dev_err(&pdev->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(pcm->cclk);
-		goto err1;
-	}
-	clk_enable(pcm->cclk);
-
-	/* record our pcm structure for later use in the callbacks */
-	dev_set_drvdata(&pdev->dev, pcm);
-
-	if (!request_mem_region(mem_res->start,
-				resource_size(mem_res), "samsung-pcm")) {
-		dev_err(&pdev->dev, "Unable to request register region\n");
-		ret = -EBUSY;
-		goto err2;
-	}
-
-	pcm->regs = ioremap(mem_res->start, 0x100);
-	if (pcm->regs == NULL) {
-		dev_err(&pdev->dev, "cannot ioremap registers\n");
-		ret = -ENXIO;
-		goto err3;
-	}
-
-	pcm->pclk = clk_get(&pdev->dev, "pcm");
-	if (IS_ERR(pcm->pclk)) {
-		dev_err(&pdev->dev, "failed to get pcm_clock\n");
-		ret = -ENOENT;
-		goto err4;
-	}
-	clk_enable(pcm->pclk);
-
-	ret = snd_soc_register_dai(&pdev->dev, &s3c_pcm_dai[pdev->id]);
-	if (ret != 0) {
-		dev_err(&pdev->dev, "failed to get pcm_clock\n");
-		goto err5;
-	}
-
-	s3c_pcm_stereo_in[pdev->id].dma_addr = mem_res->start
-							+ S3C_PCM_RXFIFO;
-	s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start
-							+ S3C_PCM_TXFIFO;
-
-	s3c_pcm_stereo_in[pdev->id].channel = dmarx_res->start;
-	s3c_pcm_stereo_out[pdev->id].channel = dmatx_res->start;
-
-	pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
-	pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
-
-	return 0;
-
-err5:
-	clk_disable(pcm->pclk);
-	clk_put(pcm->pclk);
-err4:
-	iounmap(pcm->regs);
-err3:
-	release_mem_region(mem_res->start, resource_size(mem_res));
-err2:
-	clk_disable(pcm->cclk);
-	clk_put(pcm->cclk);
-err1:
-	return ret;
-}
-
-static __devexit int s3c_pcm_dev_remove(struct platform_device *pdev)
-{
-	struct s3c_pcm_info *pcm = &s3c_pcm[pdev->id];
-	struct resource *mem_res;
-
-	snd_soc_unregister_dai(&pdev->dev);
-
-	iounmap(pcm->regs);
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	release_mem_region(mem_res->start, resource_size(mem_res));
-
-	clk_disable(pcm->cclk);
-	clk_disable(pcm->pclk);
-	clk_put(pcm->pclk);
-	clk_put(pcm->cclk);
-
-	return 0;
-}
-
-static struct platform_driver s3c_pcm_driver = {
-	.probe  = s3c_pcm_dev_probe,
-	.remove = s3c_pcm_dev_remove,
-	.driver = {
-		.name = "samsung-pcm",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c_pcm_init(void)
-{
-	return platform_driver_register(&s3c_pcm_driver);
-}
-module_init(s3c_pcm_init);
-
-static void __exit s3c_pcm_exit(void)
-{
-	platform_driver_unregister(&s3c_pcm_driver);
-}
-module_exit(s3c_pcm_exit);
-
-/* Module information */
-MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
-MODULE_DESCRIPTION("S3C PCM Controller Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-pcm");
diff --git a/sound/soc/s3c24xx/s3c-pcm.h b/sound/soc/s3c24xx/s3c-pcm.h
deleted file mode 100644
index f60baa1..0000000
--- a/sound/soc/s3c24xx/s3c-pcm.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*  sound/soc/s3c24xx/s3c-pcm.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __S3C_PCM_H
-#define __S3C_PCM_H __FILE__
-
-/*Register Offsets */
-#define S3C_PCM_CTL	(0x00)
-#define S3C_PCM_CLKCTL	(0x04)
-#define S3C_PCM_TXFIFO	(0x08)
-#define S3C_PCM_RXFIFO	(0x0C)
-#define S3C_PCM_IRQCTL	(0x10)
-#define S3C_PCM_IRQSTAT	(0x14)
-#define S3C_PCM_FIFOSTAT	(0x18)
-#define S3C_PCM_CLRINT	(0x20)
-
-/* PCM_CTL Bit-Fields */
-#define S3C_PCM_CTL_TXDIPSTICK_MASK		(0x3f)
-#define S3C_PCM_CTL_TXDIPSTICK_SHIFT	(13)
-#define S3C_PCM_CTL_RXDIPSTICK_MASK		(0x3f)
-#define S3C_PCM_CTL_RXDIPSTICK_SHIFT	(7)
-#define S3C_PCM_CTL_TXDMA_EN		(0x1<<6)
-#define S3C_PCM_CTL_RXDMA_EN		(0x1<<5)
-#define S3C_PCM_CTL_TXMSB_AFTER_FSYNC	(0x1<<4)
-#define S3C_PCM_CTL_RXMSB_AFTER_FSYNC	(0x1<<3)
-#define S3C_PCM_CTL_TXFIFO_EN		(0x1<<2)
-#define S3C_PCM_CTL_RXFIFO_EN		(0x1<<1)
-#define S3C_PCM_CTL_ENABLE			(0x1<<0)
-
-/* PCM_CLKCTL Bit-Fields */
-#define S3C_PCM_CLKCTL_SERCLK_EN		(0x1<<19)
-#define S3C_PCM_CLKCTL_SERCLKSEL_PCLK	(0x1<<18)
-#define S3C_PCM_CLKCTL_SCLKDIV_MASK		(0x1ff)
-#define S3C_PCM_CLKCTL_SYNCDIV_MASK		(0x1ff)
-#define S3C_PCM_CLKCTL_SCLKDIV_SHIFT	(9)
-#define S3C_PCM_CLKCTL_SYNCDIV_SHIFT	(0)
-
-/* PCM_TXFIFO Bit-Fields */
-#define S3C_PCM_TXFIFO_DVALID	(0x1<<16)
-#define S3C_PCM_TXFIFO_DATA_MSK	(0xffff<<0)
-
-/* PCM_RXFIFO Bit-Fields */
-#define S3C_PCM_RXFIFO_DVALID	(0x1<<16)
-#define S3C_PCM_RXFIFO_DATA_MSK	(0xffff<<0)
-
-/* PCM_IRQCTL Bit-Fields */
-#define S3C_PCM_IRQCTL_IRQEN		(0x1<<14)
-#define S3C_PCM_IRQCTL_WRDEN		(0x1<<12)
-#define S3C_PCM_IRQCTL_TXEMPTYEN		(0x1<<11)
-#define S3C_PCM_IRQCTL_TXALMSTEMPTYEN	(0x1<<10)
-#define S3C_PCM_IRQCTL_TXFULLEN		(0x1<<9)
-#define S3C_PCM_IRQCTL_TXALMSTFULLEN	(0x1<<8)
-#define S3C_PCM_IRQCTL_TXSTARVEN		(0x1<<7)
-#define S3C_PCM_IRQCTL_TXERROVRFLEN		(0x1<<6)
-#define S3C_PCM_IRQCTL_RXEMPTEN		(0x1<<5)
-#define S3C_PCM_IRQCTL_RXALMSTEMPTEN	(0x1<<4)
-#define S3C_PCM_IRQCTL_RXFULLEN		(0x1<<3)
-#define S3C_PCM_IRQCTL_RXALMSTFULLEN	(0x1<<2)
-#define S3C_PCM_IRQCTL_RXSTARVEN		(0x1<<1)
-#define S3C_PCM_IRQCTL_RXERROVRFLEN		(0x1<<0)
-
-/* PCM_IRQSTAT Bit-Fields */
-#define S3C_PCM_IRQSTAT_IRQPND		(0x1<<13)
-#define S3C_PCM_IRQSTAT_WRD_XFER		(0x1<<12)
-#define S3C_PCM_IRQSTAT_TXEMPTY		(0x1<<11)
-#define S3C_PCM_IRQSTAT_TXALMSTEMPTY	(0x1<<10)
-#define S3C_PCM_IRQSTAT_TXFULL		(0x1<<9)
-#define S3C_PCM_IRQSTAT_TXALMSTFULL		(0x1<<8)
-#define S3C_PCM_IRQSTAT_TXSTARV		(0x1<<7)
-#define S3C_PCM_IRQSTAT_TXERROVRFL		(0x1<<6)
-#define S3C_PCM_IRQSTAT_RXEMPT		(0x1<<5)
-#define S3C_PCM_IRQSTAT_RXALMSTEMPT		(0x1<<4)
-#define S3C_PCM_IRQSTAT_RXFULL		(0x1<<3)
-#define S3C_PCM_IRQSTAT_RXALMSTFULL		(0x1<<2)
-#define S3C_PCM_IRQSTAT_RXSTARV		(0x1<<1)
-#define S3C_PCM_IRQSTAT_RXERROVRFL		(0x1<<0)
-
-/* PCM_FIFOSTAT Bit-Fields */
-#define S3C_PCM_FIFOSTAT_TXCNT_MSK		(0x3f<<14)
-#define S3C_PCM_FIFOSTAT_TXFIFOEMPTY	(0x1<<13)
-#define S3C_PCM_FIFOSTAT_TXFIFOALMSTEMPTY	(0x1<<12)
-#define S3C_PCM_FIFOSTAT_TXFIFOFULL		(0x1<<11)
-#define S3C_PCM_FIFOSTAT_TXFIFOALMSTFULL	(0x1<<10)
-#define S3C_PCM_FIFOSTAT_RXCNT_MSK		(0x3f<<4)
-#define S3C_PCM_FIFOSTAT_RXFIFOEMPTY	(0x1<<3)
-#define S3C_PCM_FIFOSTAT_RXFIFOALMSTEMPTY	(0x1<<2)
-#define S3C_PCM_FIFOSTAT_RXFIFOFULL		(0x1<<1)
-#define S3C_PCM_FIFOSTAT_RXFIFOALMSTFULL	(0x1<<0)
-
-#define S3C_PCM_CLKSRC_PCLK	0
-#define S3C_PCM_CLKSRC_MUX	1
-
-#define S3C_PCM_SCLK_PER_FS	0
-
-/**
- * struct s3c_pcm_info - S3C PCM Controller information
- * @dev: The parent device passed to use from the probe.
- * @regs: The pointer to the device register block.
- * @dma_playback: DMA information for playback channel.
- * @dma_capture: DMA information for capture channel.
- */
-struct s3c_pcm_info {
-	spinlock_t lock;
-	struct device	*dev;
-	void __iomem	*regs;
-
-	unsigned int sclk_per_fs;
-
-	/* Whether to keep PCMSCLK enabled even when idle(no active xfer) */
-	unsigned int idleclk;
-
-	struct clk	*pclk;
-	struct clk	*cclk;
-
-	struct s3c_dma_params	*dma_playback;
-	struct s3c_dma_params	*dma_capture;
-};
-
-#endif /* __S3C_PCM_H */
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.c b/sound/soc/s3c24xx/s3c2412-i2s.c
deleted file mode 100644
index 4a861cf..0000000
--- a/sound/soc/s3c24xx/s3c2412-i2s.c
+++ /dev/null
@@ -1,212 +0,0 @@
-/* sound/soc/s3c24xx/s3c2412-i2s.c
- *
- * ALSA Soc Audio Layer - S3C2412 I2S driver
- *
- * Copyright (c) 2006 Wolfson Microelectronics PLC.
- *	Graeme Gregory graeme.gregory@wolfsonmicro.com
- *	linux@wolfsonmicro.com
- *
- * Copyright (c) 2007, 2004-2005 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <sound/soc.h>
-#include <mach/hardware.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "regs-i2s-v2.h"
-#include "s3c2412-i2s.h"
-
-#define S3C2412_I2S_DEBUG 0
-
-static struct s3c2410_dma_client s3c2412_dma_client_out = {
-	.name		= "I2S PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c2412_dma_client_in = {
-	.name		= "I2S PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = {
-	.client		= &s3c2412_dma_client_out,
-	.channel	= DMACH_I2S_OUT,
-	.dma_addr	= S3C2410_PA_IIS + S3C2412_IISTXD,
-	.dma_size	= 4,
-};
-
-static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
-	.client		= &s3c2412_dma_client_in,
-	.channel	= DMACH_I2S_IN,
-	.dma_addr	= S3C2410_PA_IIS + S3C2412_IISRXD,
-	.dma_size	= 4,
-};
-
-static struct s3c_i2sv2_info s3c2412_i2s;
-
-static int s3c2412_i2s_probe(struct snd_soc_dai *dai)
-{
-	int ret;
-
-	pr_debug("Entered %s\n", __func__);
-
-	ret = s3c_i2sv2_probe(dai, &s3c2412_i2s, S3C2410_PA_IIS);
-	if (ret)
-		return ret;
-
-	s3c2412_i2s.dma_capture = &s3c2412_i2s_pcm_stereo_in;
-	s3c2412_i2s.dma_playback = &s3c2412_i2s_pcm_stereo_out;
-
-	s3c2412_i2s.iis_cclk = clk_get(dai->dev, "i2sclk");
-	if (s3c2412_i2s.iis_cclk == NULL) {
-		pr_err("failed to get i2sclk clock\n");
-		iounmap(s3c2412_i2s.regs);
-		return -ENODEV;
-	}
-
-	/* Set MPLL as the source for IIS CLK */
-
-	clk_set_parent(s3c2412_i2s.iis_cclk, clk_get(NULL, "mpll"));
-	clk_enable(s3c2412_i2s.iis_cclk);
-
-	s3c2412_i2s.iis_cclk = s3c2412_i2s.iis_pclk;
-
-	/* Configure the I2S pins in correct mode */
-	s3c2410_gpio_cfgpin(S3C2410_GPE0, S3C2410_GPE0_I2SLRCK);
-	s3c2410_gpio_cfgpin(S3C2410_GPE1, S3C2410_GPE1_I2SSCLK);
-	s3c2410_gpio_cfgpin(S3C2410_GPE2, S3C2410_GPE2_CDCLK);
-	s3c2410_gpio_cfgpin(S3C2410_GPE3, S3C2410_GPE3_I2SSDI);
-	s3c2410_gpio_cfgpin(S3C2410_GPE4, S3C2410_GPE4_I2SSDO);
-
-	return 0;
-}
-
-static int s3c2412_i2s_remove(struct snd_soc_dai *dai)
-{
-	clk_disable(s3c2412_i2s.iis_cclk);
-	clk_put(s3c2412_i2s.iis_cclk);
-	iounmap(s3c2412_i2s.regs);
-
-	return 0;
-}
-
-static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
-				 struct snd_pcm_hw_params *params,
-				 struct snd_soc_dai *cpu_dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(cpu_dai);
-	struct s3c_dma_params *dma_data;
-	u32 iismod;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = i2s->dma_playback;
-	else
-		dma_data = i2s->dma_capture;
-
-	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
-
-	iismod = readl(i2s->regs + S3C2412_IISMOD);
-	pr_debug("%s: r: IISMOD: %x\n", __func__, iismod);
-
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S8:
-		iismod |= S3C2412_IISMOD_8BIT;
-		break;
-	case SNDRV_PCM_FORMAT_S16_LE:
-		iismod &= ~S3C2412_IISMOD_8BIT;
-		break;
-	}
-
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
-
-	return 0;
-}
-
-#define S3C2412_I2S_RATES \
-	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
-	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
-	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-
-static struct snd_soc_dai_ops s3c2412_i2s_dai_ops = {
-	.hw_params	= s3c2412_i2s_hw_params,
-};
-
-static struct snd_soc_dai_driver s3c2412_i2s_dai = {
-	.probe		= s3c2412_i2s_probe,
-	.remove	= s3c2412_i2s_remove,
-	.playback = {
-		.channels_min	= 2,
-		.channels_max	= 2,
-		.rates		= S3C2412_I2S_RATES,
-		.formats	= SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,
-	},
-	.capture = {
-		.channels_min	= 2,
-		.channels_max	= 2,
-		.rates		= S3C2412_I2S_RATES,
-		.formats	= SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,
-	},
-	.ops = &s3c2412_i2s_dai_ops,
-};
-
-static __devinit int s3c2412_iis_dev_probe(struct platform_device *pdev)
-{
-	return snd_soc_register_dai(&pdev->dev, &s3c2412_i2s_dai);
-}
-
-static __devexit int s3c2412_iis_dev_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_dai(&pdev->dev);
-	return 0;
-}
-
-static struct platform_driver s3c2412_iis_driver = {
-	.probe  = s3c2412_iis_dev_probe,
-	.remove = s3c2412_iis_dev_remove,
-	.driver = {
-		.name = "s3c2412-iis",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c2412_i2s_init(void)
-{
-	return platform_driver_register(&s3c2412_iis_driver);
-}
-module_init(s3c2412_i2s_init);
-
-static void __exit s3c2412_i2s_exit(void)
-{
-	platform_driver_unregister(&s3c2412_iis_driver);
-}
-module_exit(s3c2412_i2s_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C2412 I2S SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2412-iis");
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.h b/sound/soc/s3c24xx/s3c2412-i2s.h
deleted file mode 100644
index 01a0471..0000000
--- a/sound/soc/s3c24xx/s3c2412-i2s.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* sound/soc/s3c24xx/s3c2412-i2s.c
- *
- * ALSA Soc Audio Layer - S3C2412 I2S driver
- *
- * Copyright (c) 2007 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
-*/
-
-#ifndef __SND_SOC_S3C24XX_S3C2412_I2S_H
-#define __SND_SOC_S3C24XX_S3C2412_I2S_H __FILE__
-
-#include "s3c-i2s-v2.h"
-
-#define S3C2412_DIV_BCLK	S3C_I2SV2_DIV_BCLK
-#define S3C2412_DIV_RCLK	S3C_I2SV2_DIV_RCLK
-#define S3C2412_DIV_PRESCALER	S3C_I2SV2_DIV_PRESCALER
-
-#define S3C2412_CLKSRC_PCLK	S3C_I2SV2_CLKSRC_PCLK
-#define S3C2412_CLKSRC_I2SCLK	S3C_I2SV2_CLKSRC_AUDIOBUS
-
-#endif /* __SND_SOC_S3C24XX_S3C2412_I2S_H */
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/s3c24xx/s3c24xx-i2s.c
deleted file mode 100644
index e060daa..0000000
--- a/sound/soc/s3c24xx/s3c24xx-i2s.c
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
- * s3c24xx-i2s.c  --  ALSA Soc Audio Layer
- *
- * (c) 2006 Wolfson Microelectronics PLC.
- * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- * Copyright 2004-2005 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/jiffies.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <sound/soc.h>
-
-#include <mach/hardware.h>
-#include <mach/regs-gpio.h>
-#include <mach/regs-clock.h>
-
-#include <asm/dma.h>
-#include <mach/dma.h>
-
-#include <plat/regs-iis.h>
-
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-
-static struct s3c2410_dma_client s3c24xx_dma_client_out = {
-	.name = "I2S PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c24xx_dma_client_in = {
-	.name = "I2S PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_out = {
-	.client		= &s3c24xx_dma_client_out,
-	.channel	= DMACH_I2S_OUT,
-	.dma_addr	= S3C2410_PA_IIS + S3C2410_IISFIFO,
-	.dma_size	= 2,
-};
-
-static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_in = {
-	.client		= &s3c24xx_dma_client_in,
-	.channel	= DMACH_I2S_IN,
-	.dma_addr	= S3C2410_PA_IIS + S3C2410_IISFIFO,
-	.dma_size	= 2,
-};
-
-struct s3c24xx_i2s_info {
-	void __iomem	*regs;
-	struct clk	*iis_clk;
-	u32		iiscon;
-	u32		iismod;
-	u32		iisfcon;
-	u32		iispsr;
-};
-static struct s3c24xx_i2s_info s3c24xx_i2s;
-
-static void s3c24xx_snd_txctrl(int on)
-{
-	u32 iisfcon;
-	u32 iiscon;
-	u32 iismod;
-
-	pr_debug("Entered %s\n", __func__);
-
-	iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
-	iiscon  = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
-	iismod  = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
-
-	pr_debug("r: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
-
-	if (on) {
-		iisfcon |= S3C2410_IISFCON_TXDMA | S3C2410_IISFCON_TXENABLE;
-		iiscon  |= S3C2410_IISCON_TXDMAEN | S3C2410_IISCON_IISEN;
-		iiscon  &= ~S3C2410_IISCON_TXIDLE;
-		iismod  |= S3C2410_IISMOD_TXMODE;
-
-		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
-		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
-		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
-	} else {
-		/* note, we have to disable the FIFOs otherwise bad things
-		 * seem to happen when the DMA stops. According to the
-		 * Samsung supplied kernel, this should allow the DMA
-		 * engine and FIFOs to reset. If this isn't allowed, the
-		 * DMA engine will simply freeze randomly.
-		 */
-
-		iisfcon &= ~S3C2410_IISFCON_TXENABLE;
-		iisfcon &= ~S3C2410_IISFCON_TXDMA;
-		iiscon  |=  S3C2410_IISCON_TXIDLE;
-		iiscon  &= ~S3C2410_IISCON_TXDMAEN;
-		iismod  &= ~S3C2410_IISMOD_TXMODE;
-
-		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
-		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
-		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
-	}
-
-	pr_debug("w: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
-}
-
-static void s3c24xx_snd_rxctrl(int on)
-{
-	u32 iisfcon;
-	u32 iiscon;
-	u32 iismod;
-
-	pr_debug("Entered %s\n", __func__);
-
-	iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
-	iiscon  = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
-	iismod  = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
-
-	pr_debug("r: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
-
-	if (on) {
-		iisfcon |= S3C2410_IISFCON_RXDMA | S3C2410_IISFCON_RXENABLE;
-		iiscon  |= S3C2410_IISCON_RXDMAEN | S3C2410_IISCON_IISEN;
-		iiscon  &= ~S3C2410_IISCON_RXIDLE;
-		iismod  |= S3C2410_IISMOD_RXMODE;
-
-		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
-		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
-		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
-	} else {
-		/* note, we have to disable the FIFOs otherwise bad things
-		 * seem to happen when the DMA stops. According to the
-		 * Samsung supplied kernel, this should allow the DMA
-		 * engine and FIFOs to reset. If this isn't allowed, the
-		 * DMA engine will simply freeze randomly.
-		 */
-
-		iisfcon &= ~S3C2410_IISFCON_RXENABLE;
-		iisfcon &= ~S3C2410_IISFCON_RXDMA;
-		iiscon  |= S3C2410_IISCON_RXIDLE;
-		iiscon  &= ~S3C2410_IISCON_RXDMAEN;
-		iismod  &= ~S3C2410_IISMOD_RXMODE;
-
-		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
-		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
-		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
-	}
-
-	pr_debug("w: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
-}
-
-/*
- * Wait for the LR signal to allow synchronisation to the L/R clock
- * from the codec. May only be needed for slave mode.
- */
-static int s3c24xx_snd_lrsync(void)
-{
-	u32 iiscon;
-	int timeout = 50; /* 5ms */
-
-	pr_debug("Entered %s\n", __func__);
-
-	while (1) {
-		iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
-		if (iiscon & S3C2410_IISCON_LRINDEX)
-			break;
-
-		if (!timeout--)
-			return -ETIMEDOUT;
-		udelay(100);
-	}
-
-	return 0;
-}
-
-/*
- * Check whether CPU is the master or slave
- */
-static inline int s3c24xx_snd_is_clkmaster(void)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	return (readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & S3C2410_IISMOD_SLAVE) ? 0:1;
-}
-
-/*
- * Set S3C24xx I2S DAI format
- */
-static int s3c24xx_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
-		unsigned int fmt)
-{
-	u32 iismod;
-
-	pr_debug("Entered %s\n", __func__);
-
-	iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
-	pr_debug("hw_params r: IISMOD: %x \n", iismod);
-
-	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
-	case SND_SOC_DAIFMT_CBM_CFM:
-		iismod |= S3C2410_IISMOD_SLAVE;
-		break;
-	case SND_SOC_DAIFMT_CBS_CFS:
-		iismod &= ~S3C2410_IISMOD_SLAVE;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
-	case SND_SOC_DAIFMT_LEFT_J:
-		iismod |= S3C2410_IISMOD_MSB;
-		break;
-	case SND_SOC_DAIFMT_I2S:
-		iismod &= ~S3C2410_IISMOD_MSB;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
-	pr_debug("hw_params w: IISMOD: %x \n", iismod);
-	return 0;
-}
-
-static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream,
-				 struct snd_pcm_hw_params *params,
-				 struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct s3c_dma_params *dma_data;
-	u32 iismod;
-
-	pr_debug("Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = &s3c24xx_i2s_pcm_stereo_out;
-	else
-		dma_data = &s3c24xx_i2s_pcm_stereo_in;
-
-	snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
-
-	/* Working copies of register */
-	iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
-	pr_debug("hw_params r: IISMOD: %x\n", iismod);
-
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S8:
-		iismod &= ~S3C2410_IISMOD_16BIT;
-		dma_data->dma_size = 1;
-		break;
-	case SNDRV_PCM_FORMAT_S16_LE:
-		iismod |= S3C2410_IISMOD_16BIT;
-		dma_data->dma_size = 2;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
-	pr_debug("hw_params w: IISMOD: %x\n", iismod);
-	return 0;
-}
-
-static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
-			       struct snd_soc_dai *dai)
-{
-	int ret = 0;
-	struct s3c_dma_params *dma_data =
-		snd_soc_dai_get_dma_data(dai, substream);
-
-	pr_debug("Entered %s\n", __func__);
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (!s3c24xx_snd_is_clkmaster()) {
-			ret = s3c24xx_snd_lrsync();
-			if (ret)
-				goto exit_err;
-		}
-
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-			s3c24xx_snd_rxctrl(1);
-		else
-			s3c24xx_snd_txctrl(1);
-
-		s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-			s3c24xx_snd_rxctrl(0);
-		else
-			s3c24xx_snd_txctrl(0);
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-exit_err:
-	return ret;
-}
-
-/*
- * Set S3C24xx Clock source
- */
-static int s3c24xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
-	int clk_id, unsigned int freq, int dir)
-{
-	u32 iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
-
-	pr_debug("Entered %s\n", __func__);
-
-	iismod &= ~S3C2440_IISMOD_MPLL;
-
-	switch (clk_id) {
-	case S3C24XX_CLKSRC_PCLK:
-		break;
-	case S3C24XX_CLKSRC_MPLL:
-		iismod |= S3C2440_IISMOD_MPLL;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
-	return 0;
-}
-
-/*
- * Set S3C24xx Clock dividers
- */
-static int s3c24xx_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
-	int div_id, int div)
-{
-	u32 reg;
-
-	pr_debug("Entered %s\n", __func__);
-
-	switch (div_id) {
-	case S3C24XX_DIV_BCLK:
-		reg = readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & ~S3C2410_IISMOD_FS_MASK;
-		writel(reg | div, s3c24xx_i2s.regs + S3C2410_IISMOD);
-		break;
-	case S3C24XX_DIV_MCLK:
-		reg = readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & ~(S3C2410_IISMOD_384FS);
-		writel(reg | div, s3c24xx_i2s.regs + S3C2410_IISMOD);
-		break;
-	case S3C24XX_DIV_PRESCALER:
-		writel(div, s3c24xx_i2s.regs + S3C2410_IISPSR);
-		reg = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
-		writel(reg | S3C2410_IISCON_PSCEN, s3c24xx_i2s.regs + S3C2410_IISCON);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/*
- * To avoid duplicating clock code, allow machine driver to
- * get the clockrate from here.
- */
-u32 s3c24xx_i2s_get_clockrate(void)
-{
-	return clk_get_rate(s3c24xx_i2s.iis_clk);
-}
-EXPORT_SYMBOL_GPL(s3c24xx_i2s_get_clockrate);
-
-static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	s3c24xx_i2s.regs = ioremap(S3C2410_PA_IIS, 0x100);
-	if (s3c24xx_i2s.regs == NULL)
-		return -ENXIO;
-
-	s3c24xx_i2s.iis_clk = clk_get(dai->dev, "iis");
-	if (s3c24xx_i2s.iis_clk == NULL) {
-		pr_err("failed to get iis_clock\n");
-		iounmap(s3c24xx_i2s.regs);
-		return -ENODEV;
-	}
-	clk_enable(s3c24xx_i2s.iis_clk);
-
-	/* Configure the I2S pins in correct mode */
-	s3c2410_gpio_cfgpin(S3C2410_GPE0, S3C2410_GPE0_I2SLRCK);
-	s3c2410_gpio_cfgpin(S3C2410_GPE1, S3C2410_GPE1_I2SSCLK);
-	s3c2410_gpio_cfgpin(S3C2410_GPE2, S3C2410_GPE2_CDCLK);
-	s3c2410_gpio_cfgpin(S3C2410_GPE3, S3C2410_GPE3_I2SSDI);
-	s3c2410_gpio_cfgpin(S3C2410_GPE4, S3C2410_GPE4_I2SSDO);
-
-	writel(S3C2410_IISCON_IISEN, s3c24xx_i2s.regs + S3C2410_IISCON);
-
-	s3c24xx_snd_txctrl(0);
-	s3c24xx_snd_rxctrl(0);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int s3c24xx_i2s_suspend(struct snd_soc_dai *cpu_dai)
-{
-	pr_debug("Entered %s\n", __func__);
-
-	s3c24xx_i2s.iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
-	s3c24xx_i2s.iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
-	s3c24xx_i2s.iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
-	s3c24xx_i2s.iispsr = readl(s3c24xx_i2s.regs + S3C2410_IISPSR);
-
-	clk_disable(s3c24xx_i2s.iis_clk);
-
-	return 0;
-}
-
-static int s3c24xx_i2s_resume(struct snd_soc_dai *cpu_dai)
-{
-	pr_debug("Entered %s\n", __func__);
-	clk_enable(s3c24xx_i2s.iis_clk);
-
-	writel(s3c24xx_i2s.iiscon, s3c24xx_i2s.regs + S3C2410_IISCON);
-	writel(s3c24xx_i2s.iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
-	writel(s3c24xx_i2s.iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
-	writel(s3c24xx_i2s.iispsr, s3c24xx_i2s.regs + S3C2410_IISPSR);
-
-	return 0;
-}
-#else
-#define s3c24xx_i2s_suspend NULL
-#define s3c24xx_i2s_resume NULL
-#endif
-
-
-#define S3C24XX_I2S_RATES \
-	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
-	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
-	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-
-static struct snd_soc_dai_ops s3c24xx_i2s_dai_ops = {
-	.trigger	= s3c24xx_i2s_trigger,
-	.hw_params	= s3c24xx_i2s_hw_params,
-	.set_fmt	= s3c24xx_i2s_set_fmt,
-	.set_clkdiv	= s3c24xx_i2s_set_clkdiv,
-	.set_sysclk	= s3c24xx_i2s_set_sysclk,
-};
-
-static struct snd_soc_dai_driver s3c24xx_i2s_dai = {
-	.probe = s3c24xx_i2s_probe,
-	.suspend = s3c24xx_i2s_suspend,
-	.resume = s3c24xx_i2s_resume,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C24XX_I2S_RATES,
-		.formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C24XX_I2S_RATES,
-		.formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,},
-	.ops = &s3c24xx_i2s_dai_ops,
-};
-
-static __devinit int s3c24xx_iis_dev_probe(struct platform_device *pdev)
-{
-	return snd_soc_register_dai(&pdev->dev, &s3c24xx_i2s_dai);
-}
-
-static __devexit int s3c24xx_iis_dev_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_dai(&pdev->dev);
-	return 0;
-}
-
-static struct platform_driver s3c24xx_iis_driver = {
-	.probe  = s3c24xx_iis_dev_probe,
-	.remove = s3c24xx_iis_dev_remove,
-	.driver = {
-		.name = "s3c24xx-iis",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c24xx_i2s_init(void)
-{
-	return platform_driver_register(&s3c24xx_iis_driver);
-}
-module_init(s3c24xx_i2s_init);
-
-static void __exit s3c24xx_i2s_exit(void)
-{
-	platform_driver_unregister(&s3c24xx_iis_driver);
-}
-module_exit(s3c24xx_i2s_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("s3c24xx I2S SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c24xx-iis");
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c
deleted file mode 100644
index c4c1114..0000000
--- a/sound/soc/s3c24xx/s3c24xx_simtec.c
+++ /dev/null
@@ -1,395 +0,0 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec.c
- *
- * Copyright 2009 Simtec Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/clk.h>
-#include <linux/i2c.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include <plat/audio-simtec.h>
-
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-#include "s3c24xx_simtec.h"
-
-static struct s3c24xx_audio_simtec_pdata *pdata;
-static struct clk *xtal_clk;
-
-static int spk_gain;
-static int spk_unmute;
-
-/**
- * speaker_gain_get - read the speaker gain setting.
- * @kcontrol: The control for the speaker gain.
- * @ucontrol: The value that needs to be updated.
- *
- * Read the value for the AMP gain control.
- */
-static int speaker_gain_get(struct snd_kcontrol *kcontrol,
-			    struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = spk_gain;
-	return 0;
-}
-
-/**
- * speaker_gain_set - set the value of the speaker amp gain
- * @value: The value to write.
- */
-static void speaker_gain_set(int value)
-{
-	gpio_set_value_cansleep(pdata->amp_gain[0], value & 1);
-	gpio_set_value_cansleep(pdata->amp_gain[1], value >> 1);
-}
-
-/**
- * speaker_gain_put - set the speaker gain setting.
- * @kcontrol: The control for the speaker gain.
- * @ucontrol: The value that needs to be set.
- *
- * Set the value of the speaker gain from the specified
- * @ucontrol setting.
- *
- * Note, if the speaker amp is muted, then we do not set a gain value
- * as at-least one of the ICs that is fitted will try and power up even
- * if the main control is set to off.
- */
-static int speaker_gain_put(struct snd_kcontrol *kcontrol,
-			    struct snd_ctl_elem_value *ucontrol)
-{
-	int value = ucontrol->value.integer.value[0];
-
-	spk_gain = value;
-
-	if (!spk_unmute)
-		speaker_gain_set(value);
-
-	return 0;
-}
-
-static const struct snd_kcontrol_new amp_gain_controls[] = {
-	SOC_SINGLE_EXT("Speaker Gain", 0, 0, 3, 0,
-		       speaker_gain_get, speaker_gain_put),
-};
-
-/**
- * spk_unmute_state - set the unmute state of the speaker
- * @to: zero to unmute, non-zero to ununmute.
- */
-static void spk_unmute_state(int to)
-{
-	pr_debug("%s: to=%d\n", __func__, to);
-
-	spk_unmute = to;
-	gpio_set_value(pdata->amp_gpio, to);
-
-	/* if we're umuting, also re-set the gain */
-	if (to && pdata->amp_gain[0] > 0)
-		speaker_gain_set(spk_gain);
-}
-
-/**
- * speaker_unmute_get - read the speaker unmute setting.
- * @kcontrol: The control for the speaker gain.
- * @ucontrol: The value that needs to be updated.
- *
- * Read the value for the AMP gain control.
- */
-static int speaker_unmute_get(struct snd_kcontrol *kcontrol,
-			    struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = spk_unmute;
-	return 0;
-}
-
-/**
- * speaker_unmute_put - set the speaker unmute setting.
- * @kcontrol: The control for the speaker gain.
- * @ucontrol: The value that needs to be set.
- *
- * Set the value of the speaker gain from the specified
- * @ucontrol setting.
- */
-static int speaker_unmute_put(struct snd_kcontrol *kcontrol,
-			    struct snd_ctl_elem_value *ucontrol)
-{
-	spk_unmute_state(ucontrol->value.integer.value[0]);
-	return 0;
-}
-
-/* This is added as a manual control as the speaker amps create clicks
- * when their power state is changed, which are far more noticeable than
- * anything produced by the CODEC itself.
- */
-static const struct snd_kcontrol_new amp_unmute_controls[] = {
-	SOC_SINGLE_EXT("Speaker Switch", 0, 0, 1, 0,
-		       speaker_unmute_get, speaker_unmute_put),
-};
-
-void simtec_audio_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	if (pdata->amp_gpio > 0) {
-		pr_debug("%s: adding amp routes\n", __func__);
-
-		snd_soc_add_controls(codec, amp_unmute_controls,
-				     ARRAY_SIZE(amp_unmute_controls));
-	}
-
-	if (pdata->amp_gain[0] > 0) {
-		pr_debug("%s: adding amp controls\n", __func__);
-		snd_soc_add_controls(codec, amp_gain_controls,
-				     ARRAY_SIZE(amp_gain_controls));
-	}
-}
-EXPORT_SYMBOL_GPL(simtec_audio_init);
-
-#define CODEC_CLOCK 12000000
-
-/**
- * simtec_hw_params - update hardware parameters
- * @substream: The audio substream instance.
- * @params: The parameters requested.
- *
- * Update the codec data routing and configuration  settings
- * from the supplied data.
- */
-static int simtec_hw_params(struct snd_pcm_substream *substream,
-			    struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	int ret;
-
-	/* Set the CODEC as the bus clock master, I2S */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-				  SND_SOC_DAIFMT_NB_NF |
-				  SND_SOC_DAIFMT_CBM_CFM);
-	if (ret) {
-		pr_err("%s: failed set cpu dai format\n", __func__);
-		return ret;
-	}
-
-	/* Set the CODEC as the bus clock master */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-				  SND_SOC_DAIFMT_NB_NF |
-				  SND_SOC_DAIFMT_CBM_CFM);
-	if (ret) {
-		pr_err("%s: failed set codec dai format\n", __func__);
-		return ret;
-	}
-
-	ret = snd_soc_dai_set_sysclk(codec_dai, 0,
-				     CODEC_CLOCK, SND_SOC_CLOCK_IN);
-	if (ret) {
-		pr_err( "%s: failed setting codec sysclk\n", __func__);
-		return ret;
-	}
-
-	if (pdata->use_mpllin) {
-		ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_MPLL,
-					     0, SND_SOC_CLOCK_OUT);
-
-		if (ret) {
-			pr_err("%s: failed to set MPLLin as clksrc\n",
-			       __func__);
-			return ret;
-		}
-	}
-
-	if (pdata->output_cdclk) {
-		int cdclk_scale;
-
-		cdclk_scale = clk_get_rate(xtal_clk) / CODEC_CLOCK;
-		cdclk_scale--;
-
-		ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
-					     cdclk_scale);
-	}
-
-	return 0;
-}
-
-static int simtec_call_startup(struct s3c24xx_audio_simtec_pdata *pd)
-{
-	/* call any board supplied startup code, this currently only
-	 * covers the bast/vr1000 which have a CPLD in the way of the
-	 * LRCLK */
-	if (pd->startup)
-		pd->startup();
-
-	return 0;
-}
-
-static struct snd_soc_ops simtec_snd_ops = {
-	.hw_params	= simtec_hw_params,
-};
-
-/**
- * attach_gpio_amp - get and configure the necessary gpios
- * @dev: The device we're probing.
- * @pd: The platform data supplied by the board.
- *
- * If there is a GPIO based amplifier attached to the board, claim
- * the necessary GPIO lines for it, and set default values.
- */
-static int attach_gpio_amp(struct device *dev,
-			   struct s3c24xx_audio_simtec_pdata *pd)
-{
-	int ret;
-
-	/* attach gpio amp gain (if any) */
-	if (pdata->amp_gain[0] > 0) {
-		ret = gpio_request(pd->amp_gain[0], "gpio-amp-gain0");
-		if (ret) {
-			dev_err(dev, "cannot get amp gpio gain0\n");
-			return ret;
-		}
-
-		ret = gpio_request(pd->amp_gain[1], "gpio-amp-gain1");
-		if (ret) {
-			dev_err(dev, "cannot get amp gpio gain1\n");
-			gpio_free(pdata->amp_gain[0]);
-			return ret;
-		}
-
-		gpio_direction_output(pd->amp_gain[0], 0);
-		gpio_direction_output(pd->amp_gain[1], 0);
-	}
-
-	/* note, currently we assume GPA0 isn't valid amp */
-	if (pdata->amp_gpio > 0) {
-		ret = gpio_request(pd->amp_gpio, "gpio-amp");
-		if (ret) {
-			dev_err(dev, "cannot get amp gpio %d (%d)\n",
-				pd->amp_gpio, ret);
-			goto err_amp;
-		}
-
-		/* set the amp off at startup */
-		spk_unmute_state(0);
-	}
-
-	return 0;
-
-err_amp:
-	if (pd->amp_gain[0] > 0) {
-		gpio_free(pd->amp_gain[0]);
-		gpio_free(pd->amp_gain[1]);
-	}
-
-	return ret;
-}
-
-static void detach_gpio_amp(struct s3c24xx_audio_simtec_pdata *pd)
-{
-	if (pd->amp_gain[0] > 0) {
-		gpio_free(pd->amp_gain[0]);
-		gpio_free(pd->amp_gain[1]);
-	}
-
-	if (pd->amp_gpio > 0)
-		gpio_free(pd->amp_gpio);
-}
-
-#ifdef CONFIG_PM
-int simtec_audio_resume(struct device *dev)
-{
-	simtec_call_startup(pdata);
-	return 0;
-}
-
-const struct dev_pm_ops simtec_audio_pmops = {
-	.resume	= simtec_audio_resume,
-};
-EXPORT_SYMBOL_GPL(simtec_audio_pmops);
-#endif
-
-int __devinit simtec_audio_core_probe(struct platform_device *pdev,
-				      struct snd_soc_card *card)
-{
-	struct platform_device *snd_dev;
-	int ret;
-
-	card->dai_link->ops = &simtec_snd_ops;
-
-	pdata = pdev->dev.platform_data;
-	if (!pdata) {
-		dev_err(&pdev->dev, "no platform data supplied\n");
-		return -EINVAL;
-	}
-
-	simtec_call_startup(pdata);
-
-	xtal_clk = clk_get(&pdev->dev, "xtal");
-	if (IS_ERR(xtal_clk)) {
-		dev_err(&pdev->dev, "could not get clkout0\n");
-		return -EINVAL;
-	}
-
-	dev_info(&pdev->dev, "xtal rate is %ld\n", clk_get_rate(xtal_clk));
-
-	ret = attach_gpio_amp(&pdev->dev, pdata);
-	if (ret)
-		goto err_clk;
-
-	snd_dev = platform_device_alloc("soc-audio", -1);
-	if (!snd_dev) {
-		dev_err(&pdev->dev, "failed to alloc soc-audio devicec\n");
-		ret = -ENOMEM;
-		goto err_gpio;
-	}
-
-	platform_set_drvdata(snd_dev, card);
-
-	ret = platform_device_add(snd_dev);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to add soc-audio dev\n");
-		goto err_pdev;
-	}
-
-	platform_set_drvdata(pdev, snd_dev);
-	return 0;
-
-err_pdev:
-	platform_device_put(snd_dev);
-
-err_gpio:
-	detach_gpio_amp(pdata);
-
-err_clk:
-	clk_put(xtal_clk);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(simtec_audio_core_probe);
-
-int __devexit simtec_audio_remove(struct platform_device *pdev)
-{
-	struct platform_device *snd_dev = platform_get_drvdata(pdev);
-
-	platform_device_unregister(snd_dev);
-
-	detach_gpio_amp(pdata);
-	clk_put(xtal_clk);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(simtec_audio_remove);
-
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("ALSA SoC Simtec Audio common support");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h
deleted file mode 100644
index e63d5ff..0000000
--- a/sound/soc/s3c24xx/s3c24xx_simtec.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec.h
- *
- * Copyright 2009 Simtec Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-extern void simtec_audio_init(struct snd_soc_pcm_runtime *rtd);
-
-extern int simtec_audio_core_probe(struct platform_device *pdev,
-				   struct snd_soc_card *card);
-
-extern int simtec_audio_remove(struct platform_device *pdev);
-
-#ifdef CONFIG_PM
-extern const struct dev_pm_ops simtec_audio_pmops;
-#define simtec_audio_pm &simtec_audio_pmops
-#else
-#define simtec_audio_pm NULL
-#endif
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c b/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
deleted file mode 100644
index f884537..0000000
--- a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
- *
- * Copyright 2009 Simtec Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include <plat/audio-simtec.h>
-
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-#include "s3c24xx_simtec.h"
-
-#include "../codecs/tlv320aic3x.h"
-
-static const struct snd_soc_dapm_widget dapm_widgets[] = {
-	SND_SOC_DAPM_LINE("GSM Out", NULL),
-	SND_SOC_DAPM_LINE("GSM In", NULL),
-	SND_SOC_DAPM_LINE("Line In", NULL),
-	SND_SOC_DAPM_LINE("Line Out", NULL),
-	SND_SOC_DAPM_LINE("ZV", NULL),
-	SND_SOC_DAPM_MIC("Mic Jack", NULL),
-	SND_SOC_DAPM_HP("Headphone Jack", NULL),
-};
-
-static const struct snd_soc_dapm_route base_map[] = {
-	/* Headphone connected to HP{L,R}OUT and HP{L,R}COM */
-
-	{ "Headphone Jack", NULL, "HPLOUT" },
-	{ "Headphone Jack", NULL, "HPLCOM" },
-	{ "Headphone Jack", NULL, "HPROUT" },
-	{ "Headphone Jack", NULL, "HPRCOM" },
-
-	/* ZV connected to Line1 */
-
-	{ "LINE1L", NULL, "ZV" },
-	{ "LINE1R", NULL, "ZV" },
-
-	/* Line In connected to Line2 */
-
-	{ "LINE2L", NULL, "Line In" },
-	{ "LINE2R", NULL, "Line In" },
-
-	/* Microphone connected to MIC3R and MIC_BIAS */
-
-	{ "MIC3L", NULL, "Mic Jack" },
-
-	/* GSM connected to MONO_LOUT and MIC3L (in) */
-
-	{ "GSM Out", NULL, "MONO_LOUT" },
-	{ "MIC3L", NULL, "GSM In" },
-
-	/* Speaker is connected to LINEOUT{LN,LP,RN,RP}, however we are
-	 * not using the DAPM to power it up and down as there it makes
-	 * a click when powering up. */
-};
-
-/**
- * simtec_hermes_init - initialise and add controls
- * @codec; The codec instance to attach to.
- *
- * Attach our controls and configure the necessary codec
- * mappings for our sound card instance.
-*/
-static int simtec_hermes_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	snd_soc_dapm_new_controls(codec, dapm_widgets,
-				  ARRAY_SIZE(dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
-
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
-
-	simtec_audio_init(rtd);
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static struct snd_soc_dai_link simtec_dai_aic33 = {
-	.name		= "tlv320aic33",
-	.stream_name	= "TLV320AIC33",
-	.codec_name	= "tlv320aic3x-codec.0-0x1a",
-	.cpu_dai_name	= "s3c24xx-i2s",
-	.codec_dai_name = "tlv320aic3x-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
-	.init		= simtec_hermes_init,
-};
-
-/* simtec audio machine driver */
-static struct snd_soc_card snd_soc_machine_simtec_aic33 = {
-	.name		= "Simtec-Hermes",
-	.dai_link	= &simtec_dai_aic33,
-	.num_links	= 1,
-};
-
-static int __devinit simtec_audio_hermes_probe(struct platform_device *pd)
-{
-	dev_info(&pd->dev, "probing....\n");
-	return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic33);
-}
-
-static struct platform_driver simtec_audio_hermes_platdrv = {
-	.driver	= {
-		.owner	= THIS_MODULE,
-		.name	= "s3c24xx-simtec-hermes-snd",
-		.pm	= simtec_audio_pm,
-	},
-	.probe	= simtec_audio_hermes_probe,
-	.remove	= __devexit_p(simtec_audio_remove),
-};
-
-MODULE_ALIAS("platform:s3c24xx-simtec-hermes-snd");
-
-static int __init simtec_hermes_modinit(void)
-{
-	return platform_driver_register(&simtec_audio_hermes_platdrv);
-}
-
-static void __exit simtec_hermes_modexit(void)
-{
-	platform_driver_unregister(&simtec_audio_hermes_platdrv);
-}
-
-module_init(simtec_hermes_modinit);
-module_exit(simtec_hermes_modexit);
-
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c b/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
deleted file mode 100644
index c096759..0000000
--- a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
- *
- * Copyright 2009 Simtec Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include <plat/audio-simtec.h>
-
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-#include "s3c24xx_simtec.h"
-
-#include "../codecs/tlv320aic23.h"
-
-/* supported machines:
- *
- * Machine	Connections		AMP
- * -------	-----------		---
- * BAST		MIC, HPOUT, LOUT, LIN	TPA2001D1 (HPOUTL,R) (gain hardwired)
- * VR1000	HPOUT, LIN		None
- * VR2000	LIN, LOUT, MIC, HP	LM4871 (HPOUTL,R)
- * DePicture	LIN, LOUT, MIC, HP	LM4871 (HPOUTL,R)
- * Anubis	LIN, LOUT, MIC, HP	TPA2001D1 (HPOUTL,R)
- */
-
-static const struct snd_soc_dapm_widget dapm_widgets[] = {
-	SND_SOC_DAPM_HP("Headphone Jack", NULL),
-	SND_SOC_DAPM_LINE("Line In", NULL),
-	SND_SOC_DAPM_LINE("Line Out", NULL),
-	SND_SOC_DAPM_MIC("Mic Jack", NULL),
-};
-
-static const struct snd_soc_dapm_route base_map[] = {
-	{ "Headphone Jack", NULL, "LHPOUT"},
-	{ "Headphone Jack", NULL, "RHPOUT"},
-
-	{ "Line Out", NULL, "LOUT" },
-	{ "Line Out", NULL, "ROUT" },
-
-	{ "LLINEIN", NULL, "Line In"},
-	{ "RLINEIN", NULL, "Line In"},
-
-	{ "MICIN", NULL, "Mic Jack"},
-};
-
-/**
- * simtec_tlv320aic23_init - initialise and add controls
- * @codec; The codec instance to attach to.
- *
- * Attach our controls and configure the necessary codec
- * mappings for our sound card instance.
-*/
-static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	snd_soc_dapm_new_controls(codec, dapm_widgets,
-				  ARRAY_SIZE(dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
-
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
-
-	simtec_audio_init(rtd);
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static struct snd_soc_dai_link simtec_dai_aic23 = {
-	.name		= "tlv320aic23",
-	.stream_name	= "TLV320AIC23",
-	.codec_name	= "tlv320aic3x-codec.0-0x1a",
-	.cpu_dai_name	= "s3c24xx-i2s",
-	.codec_dai_name = "tlv320aic3x-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
-	.init		= simtec_tlv320aic23_init,
-};
-
-/* simtec audio machine driver */
-static struct snd_soc_card snd_soc_machine_simtec_aic23 = {
-	.name		= "Simtec",
-	.dai_link	= &simtec_dai_aic23,
-	.num_links	= 1,
-};
-
-static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd)
-{
-	return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic23);
-}
-
-static struct platform_driver simtec_audio_tlv320aic23_platdrv = {
-	.driver	= {
-		.owner	= THIS_MODULE,
-		.name	= "s3c24xx-simtec-tlv320aic23",
-		.pm	= simtec_audio_pm,
-	},
-	.probe	= simtec_audio_tlv320aic23_probe,
-	.remove	= __devexit_p(simtec_audio_remove),
-};
-
-MODULE_ALIAS("platform:s3c24xx-simtec-tlv320aic23");
-
-static int __init simtec_tlv320aic23_modinit(void)
-{
-	return platform_driver_register(&simtec_audio_tlv320aic23_platdrv);
-}
-
-static void __exit simtec_tlv320aic23_modexit(void)
-{
-	platform_driver_unregister(&simtec_audio_tlv320aic23_platdrv);
-}
-
-module_init(simtec_tlv320aic23_modinit);
-module_exit(simtec_tlv320aic23_modexit);
-
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c24xx_uda134x.c b/sound/soc/s3c24xx/s3c24xx_uda134x.c
deleted file mode 100644
index bd48ffb..0000000
--- a/sound/soc/s3c24xx/s3c24xx_uda134x.c
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Modifications by Christian Pellegrin <chripell@evolware.org>
- *
- * s3c24xx_uda134x.c  --  S3C24XX_UDA134X ALSA SoC Audio board driver
- *
- * Copyright 2007 Dension Audio Systems Ltd.
- * Author: Zoltan Devai
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/s3c24xx_uda134x.h>
-#include <sound/uda134x.h>
-
-#include <plat/regs-iis.h>
-
-#include "s3c-dma.h"
-#include "s3c24xx-i2s.h"
-#include "../codecs/uda134x.h"
-
-
-/* #define ENFORCE_RATES 1 */
-/*
-  Unfortunately the S3C24XX in master mode has a limited capacity of
-  generating the clock for the codec. If you define this only rates
-  that are really available will be enforced. But be careful, most
-  user level application just want the usual sampling frequencies (8,
-  11.025, 22.050, 44.1 kHz) and anyway resampling is a costly
-  operation for embedded systems. So if you aren't very lucky or your
-  hardware engineer wasn't very forward-looking it's better to leave
-  this undefined. If you do so an approximate value for the requested
-  sampling rate in the range -/+ 5% will be chosen. If this in not
-  possible an error will be returned.
-*/
-
-static struct clk *xtal;
-static struct clk *pclk;
-/* this is need because we don't have a place where to keep the
- * pointers to the clocks in each substream. We get the clocks only
- * when we are actually using them so we don't block stuff like
- * frequency change or oscillator power-off */
-static int clk_users;
-static DEFINE_MUTEX(clk_lock);
-
-static unsigned int rates[33 * 2];
-#ifdef ENFORCE_RATES
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
-	.count	= ARRAY_SIZE(rates),
-	.list	= rates,
-	.mask	= 0,
-};
-#endif
-
-static struct platform_device *s3c24xx_uda134x_snd_device;
-
-static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
-{
-	int ret = 0;
-#ifdef ENFORCE_RATES
-	struct snd_pcm_runtime *runtime = substream->runtime;
-#endif
-
-	mutex_lock(&clk_lock);
-	pr_debug("%s %d\n", __func__, clk_users);
-	if (clk_users == 0) {
-		xtal = clk_get(&s3c24xx_uda134x_snd_device->dev, "xtal");
-		if (!xtal) {
-			printk(KERN_ERR "%s cannot get xtal\n", __func__);
-			ret = -EBUSY;
-		} else {
-			pclk = clk_get(&s3c24xx_uda134x_snd_device->dev,
-				       "pclk");
-			if (!pclk) {
-				printk(KERN_ERR "%s cannot get pclk\n",
-				       __func__);
-				clk_put(xtal);
-				ret = -EBUSY;
-			}
-		}
-		if (!ret) {
-			int i, j;
-
-			for (i = 0; i < 2; i++) {
-				int fs = i ? 256 : 384;
-
-				rates[i*33] = clk_get_rate(xtal) / fs;
-				for (j = 1; j < 33; j++)
-					rates[i*33 + j] = clk_get_rate(pclk) /
-						(j * fs);
-			}
-		}
-	}
-	clk_users += 1;
-	mutex_unlock(&clk_lock);
-	if (!ret) {
-#ifdef ENFORCE_RATES
-		ret = snd_pcm_hw_constraint_list(runtime, 0,
-						 SNDRV_PCM_HW_PARAM_RATE,
-						 &hw_constraints_rates);
-		if (ret < 0)
-			printk(KERN_ERR "%s cannot set constraints\n",
-			       __func__);
-#endif
-	}
-	return ret;
-}
-
-static void s3c24xx_uda134x_shutdown(struct snd_pcm_substream *substream)
-{
-	mutex_lock(&clk_lock);
-	pr_debug("%s %d\n", __func__, clk_users);
-	clk_users -= 1;
-	if (clk_users == 0) {
-		clk_put(xtal);
-		xtal = NULL;
-		clk_put(pclk);
-		pclk = NULL;
-	}
-	mutex_unlock(&clk_lock);
-}
-
-static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
-					struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned int clk = 0;
-	int ret = 0;
-	int clk_source, fs_mode;
-	unsigned long rate = params_rate(params);
-	long err, cerr;
-	unsigned int div;
-	int i, bi;
-
-	err = 999999;
-	bi = 0;
-	for (i = 0; i < 2*33; i++) {
-		cerr = rates[i] - rate;
-		if (cerr < 0)
-			cerr = -cerr;
-		if (cerr < err) {
-			err = cerr;
-			bi = i;
-		}
-	}
-	if (bi / 33 == 1)
-		fs_mode = S3C2410_IISMOD_256FS;
-	else
-		fs_mode = S3C2410_IISMOD_384FS;
-	if (bi % 33 == 0) {
-		clk_source = S3C24XX_CLKSRC_MPLL;
-		div = 1;
-	} else {
-		clk_source = S3C24XX_CLKSRC_PCLK;
-		div = bi % 33;
-	}
-	pr_debug("%s desired rate %lu, %d\n", __func__, rate, bi);
-
-	clk = (fs_mode == S3C2410_IISMOD_384FS ? 384 : 256) * rate;
-	pr_debug("%s will use: %s %s %d sysclk %d err %ld\n", __func__,
-		 fs_mode == S3C2410_IISMOD_384FS ? "384FS" : "256FS",
-		 clk_source == S3C24XX_CLKSRC_MPLL ? "MPLLin" : "PCLK",
-		 div, clk, err);
-
-	if ((err * 100 / rate) > 5) {
-		printk(KERN_ERR "S3C24XX_UDA134X: effective frequency "
-		       "too different from desired (%ld%%)\n",
-		       err * 100 / rate);
-		return -EINVAL;
-	}
-
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source , clk,
-			SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, fs_mode);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
-			S3C2410_IISMOD_32FS);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
-			S3C24XX_PRESCALE(div, div));
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock for DAC and ADC */
-	ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk,
-			SND_SOC_CLOCK_OUT);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_ops s3c24xx_uda134x_ops = {
-	.startup = s3c24xx_uda134x_startup,
-	.shutdown = s3c24xx_uda134x_shutdown,
-	.hw_params = s3c24xx_uda134x_hw_params,
-};
-
-static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
-	.name = "UDA134X",
-	.stream_name = "UDA134X",
-	.codec_name = "uda134x-hifi",
-	.codec_dai_name = "uda134x-hifi",
-	.cpu_dai_name = "s3c24xx-i2s",
-	.ops = &s3c24xx_uda134x_ops,
-	.platform_name	= "s3c24xx-pcm-audio",
-};
-
-static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
-	.name = "S3C24XX_UDA134X",
-	.dai_link = &s3c24xx_uda134x_dai_link,
-	.num_links = 1,
-};
-
-static struct s3c24xx_uda134x_platform_data *s3c24xx_uda134x_l3_pins;
-
-static void setdat(int v)
-{
-	gpio_set_value(s3c24xx_uda134x_l3_pins->l3_data, v > 0);
-}
-
-static void setclk(int v)
-{
-	gpio_set_value(s3c24xx_uda134x_l3_pins->l3_clk, v > 0);
-}
-
-static void setmode(int v)
-{
-	gpio_set_value(s3c24xx_uda134x_l3_pins->l3_mode, v > 0);
-}
-
-/* FIXME - This must be codec platform data but in which board file ?? */
-static struct uda134x_platform_data s3c24xx_uda134x = {
-	.l3 = {
-		.setdat = setdat,
-		.setclk = setclk,
-		.setmode = setmode,
-		.data_hold = 1,
-		.data_setup = 1,
-		.clock_high = 1,
-		.mode_hold = 1,
-		.mode = 1,
-		.mode_setup = 1,
-	},
-};
-
-static int s3c24xx_uda134x_setup_pin(int pin, char *fun)
-{
-	if (gpio_request(pin, "s3c24xx_uda134x") < 0) {
-		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
-		       "l3 %s pin already in use", fun);
-		return -EBUSY;
-	}
-	gpio_direction_output(pin, 0);
-	return 0;
-}
-
-static int s3c24xx_uda134x_probe(struct platform_device *pdev)
-{
-	int ret;
-
-	printk(KERN_INFO "S3C24XX_UDA134X SoC Audio driver\n");
-
-	s3c24xx_uda134x_l3_pins = pdev->dev.platform_data;
-	if (s3c24xx_uda134x_l3_pins == NULL) {
-		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
-		       "unable to find platform data\n");
-		return -ENODEV;
-	}
-	s3c24xx_uda134x.power = s3c24xx_uda134x_l3_pins->power;
-	s3c24xx_uda134x.model = s3c24xx_uda134x_l3_pins->model;
-
-	if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_data,
-				      "data") < 0)
-		return -EBUSY;
-	if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_clk,
-				      "clk") < 0) {
-		gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
-		return -EBUSY;
-	}
-	if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_mode,
-				      "mode") < 0) {
-		gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
-		gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
-		return -EBUSY;
-	}
-
-	s3c24xx_uda134x_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!s3c24xx_uda134x_snd_device) {
-		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
-		       "Unable to register\n");
-		return -ENOMEM;
-	}
-
-	platform_set_drvdata(s3c24xx_uda134x_snd_device,
-			     &snd_soc_s3c24xx_uda134x);
-	ret = platform_device_add(s3c24xx_uda134x_snd_device);
-	if (ret) {
-		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n");
-		platform_device_put(s3c24xx_uda134x_snd_device);
-	}
-
-	return ret;
-}
-
-static int s3c24xx_uda134x_remove(struct platform_device *pdev)
-{
-	platform_device_unregister(s3c24xx_uda134x_snd_device);
-	gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
-	gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
-	gpio_free(s3c24xx_uda134x_l3_pins->l3_mode);
-	return 0;
-}
-
-static struct platform_driver s3c24xx_uda134x_driver = {
-	.probe  = s3c24xx_uda134x_probe,
-	.remove = s3c24xx_uda134x_remove,
-	.driver = {
-		.name = "s3c24xx_uda134x",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c24xx_uda134x_init(void)
-{
-	return platform_driver_register(&s3c24xx_uda134x_driver);
-}
-
-static void __exit s3c24xx_uda134x_exit(void)
-{
-	platform_driver_unregister(&s3c24xx_uda134x_driver);
-}
-
-
-module_init(s3c24xx_uda134x_init);
-module_exit(s3c24xx_uda134x_exit);
-
-MODULE_AUTHOR("Zoltan Devai, Christian Pellegrin <chripell@evolware.org>");
-MODULE_DESCRIPTION("S3C24XX_UDA134X ALSA SoC audio driver");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c b/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
deleted file mode 100644
index a962847..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s-v4.c
- *
- * ALSA SoC Audio Layer - S3C64XX I2Sv4 driver
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "regs-i2s-v2.h"
-#include "s3c64xx-i2s.h"
-
-static struct s3c2410_dma_client s3c64xx_dma_client_out = {
-	.name		= "I2Sv4 PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c64xx_dma_client_in = {
-	.name		= "I2Sv4 PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_out;
-static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_in;
-static struct s3c_i2sv2_info s3c64xx_i2sv4;
-
-static int s3c64xx_i2sv4_probe(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4;
-	int ret = 0;
-
-	snd_soc_dai_set_drvdata(dai, i2s);
-
-	ret = s3c_i2sv2_probe(dai, i2s, i2s->base);
-
-	return ret;
-}
-
-static int s3c_i2sv4_hw_params(struct snd_pcm_substream *substream,
-				 struct snd_pcm_hw_params *params,
-				 struct snd_soc_dai *cpu_dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(cpu_dai);
-	struct s3c_dma_params *dma_data;
-	u32 iismod;
-
-	dev_dbg(cpu_dai->dev, "Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = i2s->dma_playback;
-	else
-		dma_data = i2s->dma_capture;
-
-	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
-
-	iismod = readl(i2s->regs + S3C2412_IISMOD);
-	dev_dbg(cpu_dai->dev, "%s: r: IISMOD: %x\n", __func__, iismod);
-
-	iismod &= ~S3C64XX_IISMOD_BLC_MASK;
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S8:
-		iismod |= S3C64XX_IISMOD_BLC_8BIT;
-		break;
-	case SNDRV_PCM_FORMAT_S16_LE:
-		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
-		iismod |= S3C64XX_IISMOD_BLC_24BIT;
-		break;
-	}
-
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	dev_dbg(cpu_dai->dev, "%s: w: IISMOD: %x\n", __func__, iismod);
-
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c64xx_i2sv4_dai_ops = {
-	.hw_params	= s3c_i2sv4_hw_params,
-};
-
-static struct snd_soc_dai_driver s3c64xx_i2s_v4_dai = {
-	.symmetric_rates = 1,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,
-	},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,
-	},
-	.probe = s3c64xx_i2sv4_probe,
-	.ops = &s3c64xx_i2sv4_dai_ops,
-};
-
-static __devinit int s3c64xx_i2sv4_dev_probe(struct platform_device *pdev)
-{
-	struct s3c_audio_pdata *i2s_pdata;
-	struct s3c_i2sv2_info *i2s;
-	struct resource *res;
-	int ret;
-
-	i2s = &s3c64xx_i2sv4;
-
-	i2s->feature |= S3C_FEATURE_CDCLKCON;
-
-	i2s->dma_capture = &s3c64xx_i2sv4_pcm_stereo_in;
-	i2s->dma_playback = &s3c64xx_i2sv4_pcm_stereo_out;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_playback->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_capture->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
-		return -ENXIO;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res),
-				"s3c64xx-i2s-v4")) {
-		dev_err(&pdev->dev, "Unable to request SFR region\n");
-		return -EBUSY;
-	}
-	i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD;
-	i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD;
-
-	i2s->dma_capture->client = &s3c64xx_dma_client_in;
-	i2s->dma_capture->dma_size = 4;
-	i2s->dma_playback->client = &s3c64xx_dma_client_out;
-	i2s->dma_playback->dma_size = 4;
-
-	i2s->base = res->start;
-
-	i2s_pdata = pdev->dev.platform_data;
-	if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		return -EINVAL;
-	}
-
-	i2s->iis_cclk = clk_get(&pdev->dev, "audio-bus");
-	if (IS_ERR(i2s->iis_cclk)) {
-		dev_err(&pdev->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(i2s->iis_cclk);
-		goto err;
-	}
-
-	clk_enable(i2s->iis_cclk);
-
-	ret = s3c_i2sv2_register_dai(&pdev->dev, pdev->id, &s3c64xx_i2s_v4_dai);
-	if (ret != 0)
-		goto err_i2sv2;
-
-	return 0;
-
-err_i2sv2:
-	clk_put(i2s->iis_cclk);
-err:
-	return ret;
-}
-
-static __devexit int s3c64xx_i2sv4_dev_remove(struct platform_device *pdev)
-{
-	struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4;
-	struct resource *res;
-
-	snd_soc_unregister_dai(&pdev->dev);
-	clk_put(i2s->iis_cclk);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res)
-		release_mem_region(res->start, resource_size(res));
-	else
-		dev_warn(&pdev->dev, "Unable to get I2S SFR address\n");
-		
-	return 0;
-}
-
-static struct platform_driver s3c64xx_i2sv4_driver = {
-	.probe  = s3c64xx_i2sv4_dev_probe,
-	.remove = s3c64xx_i2sv4_dev_remove,
-	.driver = {
-		.name = "s3c64xx-iis-v4",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c64xx_i2sv4_init(void)
-{
-	return platform_driver_register(&s3c64xx_i2sv4_driver);
-}
-module_init(s3c64xx_i2sv4_init);
-
-static void __exit s3c64xx_i2sv4_exit(void)
-{
-	platform_driver_unregister(&s3c64xx_i2sv4_driver);
-}
-module_exit(s3c64xx_i2sv4_exit);
-
-/* Module information */
-MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
-MODULE_DESCRIPTION("S3C64XX I2Sv4 SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c64xx-iis-v4");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.c b/sound/soc/s3c24xx/s3c64xx-i2s.c
deleted file mode 100644
index ae7acb6..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s.c
- *
- * ALSA SoC Audio Layer - S3C64XX I2S driver
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <sound/soc.h>
-
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "regs-i2s-v2.h"
-#include "s3c64xx-i2s.h"
-
-/* The value should be set to maximum of the total number
- * of I2Sv3 controllers that any supported SoC has.
- */
-#define MAX_I2SV3	2
-
-static struct s3c2410_dma_client s3c64xx_dma_client_out = {
-	.name		= "I2S PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c64xx_dma_client_in = {
-	.name		= "I2S PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_out[MAX_I2SV3];
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_in[MAX_I2SV3];
-static struct s3c_i2sv2_info s3c64xx_i2s[MAX_I2SV3];
-
-struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai);
-	u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
-
-	if (iismod & S3C2412_IISMOD_IMS_SYSMUX)
-		return i2s->iis_cclk;
-	else
-		return i2s->iis_pclk;
-}
-EXPORT_SYMBOL_GPL(s3c64xx_i2s_get_clock);
-
-static int s3c64xx_i2s_probe(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s;
-	int ret;
-
-	if (dai->id >= MAX_I2SV3) {
-		dev_err(dai->dev, "id %d out of range\n", dai->id);
-		return -EINVAL;
-	}
-
-	i2s = &s3c64xx_i2s[dai->id];
-	snd_soc_dai_set_drvdata(dai, i2s);
-
-	i2s->iis_cclk = clk_get(dai->dev, "audio-bus");
-	if (IS_ERR(i2s->iis_cclk)) {
-		dev_err(dai->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(i2s->iis_cclk);
-		goto err;
-	}
-
-	clk_enable(i2s->iis_cclk);
-
-	ret = s3c_i2sv2_probe(dai, i2s, i2s->base);
-	if (ret)
-		goto err_clk;
-
-	return 0;
-
-err_clk:
-	clk_disable(i2s->iis_cclk);
-	clk_put(i2s->iis_cclk);
-err:
-	kfree(i2s);
-	return ret;
-}
-
-static int s3c64xx_i2s_remove(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai);
-
-	clk_disable(i2s->iis_cclk);
-	clk_put(i2s->iis_cclk);
-	kfree(i2s);
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops;
-
-static struct snd_soc_dai_driver s3c64xx_i2s_dai[MAX_I2SV3] = {
-{
-	.name = "s3c64xx-i2s-0",
-	.probe = s3c64xx_i2s_probe,
-	.remove = s3c64xx_i2s_remove,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.ops = &s3c64xx_i2s_dai_ops,
-	.symmetric_rates = 1,
-}, {
-	.name = "s3c64xx-i2s-1",
-	.probe = s3c64xx_i2s_probe,
-	.remove = s3c64xx_i2s_remove,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.ops = &s3c64xx_i2s_dai_ops,
-	.symmetric_rates = 1,
-},};
-
-static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
-{
-	struct s3c_audio_pdata *i2s_pdata;
-	struct s3c_i2sv2_info *i2s;
-	struct resource *res;
-	int i, ret;
-
-	if (pdev->id >= MAX_I2SV3) {
-		dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
-		return -EINVAL;
-	}
-
-	i2s = &s3c64xx_i2s[pdev->id];
-
-	i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id];
-	i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id];
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_playback->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_capture->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
-		return -ENXIO;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res),
-				"s3c64xx-i2s")) {
-		dev_err(&pdev->dev, "Unable to request SFR region\n");
-		return -EBUSY;
-	}
-	i2s->base = res->start;
-
-	i2s_pdata = pdev->dev.platform_data;
-	if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		return -EINVAL;
-	}
-	i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD;
-	i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD;
-
-	i2s->dma_capture->client = &s3c64xx_dma_client_in;
-	i2s->dma_capture->dma_size = 4;
-	i2s->dma_playback->client = &s3c64xx_dma_client_out;
-	i2s->dma_playback->dma_size = 4;
-
-	for (i = 0; i < ARRAY_SIZE(s3c64xx_i2s_dai); i++) {
-		ret = s3c_i2sv2_register_dai(&pdev->dev, i,
-						&s3c64xx_i2s_dai[i]);
-		if (ret != 0)
-			return ret;
-	}
-
-	return 0;
-}
-
-static __devexit int s3c64xx_iis_dev_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c64xx_i2s_dai));
-	return 0;
-}
-
-static struct platform_driver s3c64xx_iis_driver = {
-	.probe  = s3c64xx_iis_dev_probe,
-	.remove = s3c64xx_iis_dev_remove,
-	.driver = {
-		.name = "s3c64xx-iis",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c64xx_i2s_init(void)
-{
-	return platform_driver_register(&s3c64xx_iis_driver);
-}
-module_init(s3c64xx_i2s_init);
-
-static void __exit s3c64xx_i2s_exit(void)
-{
-	platform_driver_unregister(&s3c64xx_iis_driver);
-}
-module_exit(s3c64xx_i2s_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C64XX I2S SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c64xx-iis");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.h b/sound/soc/s3c24xx/s3c64xx-i2s.h
deleted file mode 100644
index de4075d..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s.h
- *
- * ALSA SoC Audio Layer - S3C64XX I2S driver
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __SND_SOC_S3C24XX_S3C64XX_I2S_H
-#define __SND_SOC_S3C24XX_S3C64XX_I2S_H __FILE__
-
-struct clk;
-
-#include "s3c-i2s-v2.h"
-
-#define S3C64XX_DIV_BCLK	S3C_I2SV2_DIV_BCLK
-#define S3C64XX_DIV_RCLK	S3C_I2SV2_DIV_RCLK
-#define S3C64XX_DIV_PRESCALER	S3C_I2SV2_DIV_PRESCALER
-
-#define S3C64XX_CLKSRC_PCLK	S3C_I2SV2_CLKSRC_PCLK
-#define S3C64XX_CLKSRC_MUX	S3C_I2SV2_CLKSRC_AUDIOBUS
-#define S3C64XX_CLKSRC_CDCLK    S3C_I2SV2_CLKSRC_CDCLK
-
-#define S3C64XX_I2S_RATES \
-	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
-	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
-	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-
-#define S3C64XX_I2S_FMTS \
-	(SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
-	 SNDRV_PCM_FMTBIT_S24_LE)
-
-struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai);
-
-#endif /* __SND_SOC_S3C24XX_S3C64XX_I2S_H */
diff --git a/sound/soc/s3c24xx/smartq_wm8987.c b/sound/soc/s3c24xx/smartq_wm8987.c
deleted file mode 100644
index dd20ca7..0000000
--- a/sound/soc/s3c24xx/smartq_wm8987.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/* sound/soc/s3c24xx/smartq_wm8987.c
- *
- * Copyright 2010 Maurus Cuelenaere <mcuelenaere@gmail.com>
- *
- * Based on smdk6410_wm8987.c
- *     Copyright 2007 Wolfson Microelectronics PLC. - linux@wolfsonmicro.com
- *     Graeme Gregory - graeme.gregory@wolfsonmicro.com
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
-#include <sound/jack.h>
-
-#include <asm/mach-types.h>
-
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-#include "../codecs/wm8750.h"
-
-/*
- * WM8987 is register compatible with WM8750, so using that as base driver.
- */
-
-static struct snd_soc_card snd_soc_smartq;
-
-static int smartq_hifi_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
-	struct s3c_i2sv2_rate_calc div;
-	unsigned int clk = 0;
-	int ret;
-
-	s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
-				s3c_i2sv2_get_clock(cpu_dai));
-
-	switch (params_rate(params)) {
-	case 8000:
-	case 16000:
-	case 32000:
-	case 48000:
-	case 96000:
-		clk = 12288000;
-		break;
-	case 11025:
-	case 22050:
-	case 44100:
-	case 88200:
-		clk = 11289600;
-		break;
-	}
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-					     SND_SOC_DAIFMT_NB_NF |
-					     SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* set cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-					   SND_SOC_DAIFMT_NB_NF |
-					   SND_SOC_DAIFMT_CBS_CFS);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock for DAC and ADC */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
-				     SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set MCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, div.fs_div);
-	if (ret < 0)
-		return ret;
-
-	/* set prescaler division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_PRESCALER,
-				     div.clk_div - 1);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-/*
- * SmartQ WM8987 HiFi DAI operations.
- */
-static struct snd_soc_ops smartq_hifi_ops = {
-	.hw_params = smartq_hifi_hw_params,
-};
-
-static struct snd_soc_jack smartq_jack;
-
-static struct snd_soc_jack_pin smartq_jack_pins[] = {
-	/* Disable speaker when headphone is plugged in */
-	{
-		.pin	= "Internal Speaker",
-		.mask	= SND_JACK_HEADPHONE,
-	},
-};
-
-static struct snd_soc_jack_gpio smartq_jack_gpios[] = {
-	{
-		.gpio		= S3C64XX_GPL(12),
-		.name		= "headphone detect",
-		.report		= SND_JACK_HEADPHONE,
-		.debounce_time	= 200,
-	},
-};
-
-static const struct snd_kcontrol_new wm8987_smartq_controls[] = {
-	SOC_DAPM_PIN_SWITCH("Internal Speaker"),
-	SOC_DAPM_PIN_SWITCH("Headphone Jack"),
-	SOC_DAPM_PIN_SWITCH("Internal Mic"),
-};
-
-static int smartq_speaker_event(struct snd_soc_dapm_widget *w,
-				struct snd_kcontrol *k,
-				int event)
-{
-	gpio_set_value(S3C64XX_GPK(12), SND_SOC_DAPM_EVENT_OFF(event));
-
-	return 0;
-}
-
-static const struct snd_soc_dapm_widget wm8987_dapm_widgets[] = {
-	SND_SOC_DAPM_SPK("Internal Speaker", smartq_speaker_event),
-	SND_SOC_DAPM_HP("Headphone Jack", NULL),
-	SND_SOC_DAPM_MIC("Internal Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route audio_map[] = {
-	{"Headphone Jack", NULL, "LOUT2"},
-	{"Headphone Jack", NULL, "ROUT2"},
-
-	{"Internal Speaker", NULL, "LOUT2"},
-	{"Internal Speaker", NULL, "ROUT2"},
-
-	{"Mic Bias", NULL, "Internal Mic"},
-	{"LINPUT2", NULL, "Mic Bias"},
-};
-
-static int smartq_wm8987_init(struct snd_soc_codec *codec)
-{
-	int err = 0;
-
-	/* Add SmartQ specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8987_dapm_widgets,
-				  ARRAY_SIZE(wm8987_dapm_widgets));
-
-	/* add SmartQ specific controls */
-	err = snd_soc_add_controls(codec, wm8987_smartq_controls,
-				   ARRAY_SIZE(wm8987_smartq_controls));
-
-	if (err < 0)
-		return err;
-
-	/* setup SmartQ specific audio path */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-
-	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "LINPUT1");
-	snd_soc_dapm_nc_pin(codec, "RINPUT1");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "ROUT1");
-
-	/* set endpoints to default off mode */
-	snd_soc_dapm_enable_pin(codec, "Internal Speaker");
-	snd_soc_dapm_enable_pin(codec, "Internal Mic");
-	snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-
-	err = snd_soc_dapm_sync(codec);
-	if (err)
-		return err;
-
-	/* Headphone jack detection */
-	err = snd_soc_jack_new(&snd_soc_smartq, "Headphone Jack",
-			       SND_JACK_HEADPHONE, &smartq_jack);
-	if (err)
-		return err;
-
-	err = snd_soc_jack_add_pins(&smartq_jack, ARRAY_SIZE(smartq_jack_pins),
-				    smartq_jack_pins);
-	if (err)
-		return err;
-
-	err = snd_soc_jack_add_gpios(&smartq_jack,
-				     ARRAY_SIZE(smartq_jack_gpios),
-				     smartq_jack_gpios);
-
-	return err;
-}
-
-static struct snd_soc_dai_link smartq_dai[] = {
-	{
-		.name		= "wm8987",
-		.stream_name	= "SmartQ Hi-Fi",
-		.cpu_dai_name	= "s3c64xx-i2s.0",
-		.codec_dai_name	= "wm8750-hifi",
-		.platform_name	= "s3c24xx-pcm-audio",
-		.codec_name	= "wm8750-codec.0-0x1a",
-		.init		= smartq_wm8987_init,
-		.ops		= &smartq_hifi_ops,
-	},
-};
-
-static struct snd_soc_card snd_soc_smartq = {
-	.name = "SmartQ",
-	.dai_link = smartq_dai,
-	.num_links = ARRAY_SIZE(smartq_dai),
-};
-
-static struct platform_device *smartq_snd_device;
-
-static int __init smartq_init(void)
-{
-	int ret;
-
-	if (!machine_is_smartq7() && !machine_is_smartq5()) {
-		pr_info("Only SmartQ is supported by this ASoC driver\n");
-		return -ENODEV;
-	}
-
-	smartq_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!smartq_snd_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(smartq_snd_device, &snd_soc_smartq);
-
-	ret = platform_device_add(smartq_snd_device);
-	if (ret) {
-		platform_device_put(smartq_snd_device);
-		return ret;
-	}
-
-	/* Initialise GPIOs used by amplifiers */
-	ret = gpio_request(S3C64XX_GPK(12), "amplifiers shutdown");
-	if (ret) {
-		dev_err(&smartq_snd_device->dev, "Failed to register GPK12\n");
-		goto err_unregister_device;
-	}
-
-	/* Disable amplifiers */
-	ret = gpio_direction_output(S3C64XX_GPK(12), 1);
-	if (ret) {
-		dev_err(&smartq_snd_device->dev, "Failed to configure GPK12\n");
-		goto err_free_gpio_amp_shut;
-	}
-
-	return 0;
-
-err_free_gpio_amp_shut:
-	gpio_free(S3C64XX_GPK(12));
-err_unregister_device:
-	platform_device_unregister(smartq_snd_device);
-
-	return ret;
-}
-
-static void __exit smartq_exit(void)
-{
-	snd_soc_jack_free_gpios(&smartq_jack, ARRAY_SIZE(smartq_jack_gpios),
-				smartq_jack_gpios);
-
-	platform_device_unregister(smartq_snd_device);
-}
-
-module_init(smartq_init);
-module_exit(smartq_exit);
-
-/* Module information */
-MODULE_AUTHOR("Maurus Cuelenaere <mcuelenaere@gmail.com>");
-MODULE_DESCRIPTION("ALSA SoC SmartQ WM8987");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/smdk2443_wm9710.c b/sound/soc/s3c24xx/smdk2443_wm9710.c
deleted file mode 100644
index 4613288..0000000
--- a/sound/soc/s3c24xx/smdk2443_wm9710.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * smdk2443_wm9710.c  --  SoC audio for smdk2443
- *
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
-
-static struct snd_soc_card smdk2443;
-
-static struct snd_soc_dai_link smdk2443_dai[] = {
-{
-	.name = "AC97",
-	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "s3c-ac97",
-	.codec_dai_name = "ac97-hifi",
-	.codec_name = "ac97-codec",
-	.platform_name = "s3c24xx-pcm-audio",
-},
-};
-
-static struct snd_soc_card smdk2443 = {
-	.name = "SMDK2443",
-	.dai_link = smdk2443_dai,
-	.num_links = ARRAY_SIZE(smdk2443_dai),
-};
-
-static struct platform_device *smdk2443_snd_ac97_device;
-
-static int __init smdk2443_init(void)
-{
-	int ret;
-
-	smdk2443_snd_ac97_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk2443_snd_ac97_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(smdk2443_snd_ac97_device, &smdk2443);
-	ret = platform_device_add(smdk2443_snd_ac97_device);
-
-	if (ret)
-		platform_device_put(smdk2443_snd_ac97_device);
-
-	return ret;
-}
-
-static void __exit smdk2443_exit(void)
-{
-	platform_device_unregister(smdk2443_snd_ac97_device);
-}
-
-module_init(smdk2443_init);
-module_exit(smdk2443_exit);
-
-/* Module information */
-MODULE_AUTHOR("Graeme Gregory, graeme.gregory@wolfsonmicro.com, www.wolfsonmicro.com");
-MODULE_DESCRIPTION("ALSA SoC WM9710 SMDK2443");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/smdk64xx_wm8580.c b/sound/soc/s3c24xx/smdk64xx_wm8580.c
deleted file mode 100644
index 052e499..0000000
--- a/sound/soc/s3c24xx/smdk64xx_wm8580.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- *  smdk64xx_wm8580.c
- *
- *  Copyright (c) 2009 Samsung Electronics Co. Ltd
- *  Author: Jaswinder Singh <jassi.brar@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include "../codecs/wm8580.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-/*
- * Default CFG switch settings to use this driver:
- *
- *   SMDK6410: Set CFG1 1-3 Off, CFG2 1-4 On
- */
-
-/* SMDK64XX has a 12MHZ crystal attached to WM8580 */
-#define SMDK64XX_WM8580_FREQ 12000000
-
-static int smdk64xx_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pll_out;
-	int bfs, rfs, ret;
-
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_U8:
-	case SNDRV_PCM_FORMAT_S8:
-		bfs = 16;
-		break;
-	case SNDRV_PCM_FORMAT_U16_LE:
-	case SNDRV_PCM_FORMAT_S16_LE:
-		bfs = 32;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* The Fvco for WM8580 PLLs must fall within [90,100]MHz.
-	 * This criterion can't be met if we request PLL output
-	 * as {8000x256, 64000x256, 11025x256}Hz.
-	 * As a wayout, we rather change rfs to a minimum value that
-	 * results in (params_rate(params) * rfs), and itself, acceptable
-	 * to both - the CODEC and the CPU.
-	 */
-	switch (params_rate(params)) {
-	case 16000:
-	case 22050:
-	case 32000:
-	case 44100:
-	case 48000:
-	case 88200:
-	case 96000:
-		rfs = 256;
-		break;
-	case 64000:
-		rfs = 384;
-		break;
-	case 8000:
-	case 11025:
-		rfs = 512;
-		break;
-	default:
-		return -EINVAL;
-	}
-	pll_out = params_rate(params) * rfs;
-
-	/* Set the Codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
-					 | SND_SOC_DAIFMT_NB_NF
-					 | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* Set the AP DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
-					 | SND_SOC_DAIFMT_NB_NF
-					 | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_CDCLK,
-					0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* We use PCLK for basic ops in SoC-Slave mode */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-					0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* Set WM8580 to drive MCLK from its PLLA */
-	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
-					WM8580_CLKSRC_PLLA);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
-					SMDK64XX_WM8580_FREQ, pll_out);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
-				     pll_out, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_BCLK, bfs);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, rfs);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-/*
- * SMDK64XX WM8580 DAI operations.
- */
-static struct snd_soc_ops smdk64xx_ops = {
-	.hw_params = smdk64xx_hw_params,
-};
-
-/* SMDK64xx Playback widgets */
-static const struct snd_soc_dapm_widget wm8580_dapm_widgets_pbk[] = {
-	SND_SOC_DAPM_HP("Front", NULL),
-	SND_SOC_DAPM_HP("Center+Sub", NULL),
-	SND_SOC_DAPM_HP("Rear", NULL),
-};
-
-/* SMDK64xx Capture widgets */
-static const struct snd_soc_dapm_widget wm8580_dapm_widgets_cpt[] = {
-	SND_SOC_DAPM_MIC("MicIn", NULL),
-	SND_SOC_DAPM_LINE("LineIn", NULL),
-};
-
-/* SMDK-PAIFTX connections */
-static const struct snd_soc_dapm_route audio_map_tx[] = {
-	/* MicIn feeds AINL */
-	{"AINL", NULL, "MicIn"},
-
-	/* LineIn feeds AINL/R */
-	{"AINL", NULL, "LineIn"},
-	{"AINR", NULL, "LineIn"},
-};
-
-/* SMDK-PAIFRX connections */
-static const struct snd_soc_dapm_route audio_map_rx[] = {
-	/* Front Left/Right are fed VOUT1L/R */
-	{"Front", NULL, "VOUT1L"},
-	{"Front", NULL, "VOUT1R"},
-
-	/* Center/Sub are fed VOUT2L/R */
-	{"Center+Sub", NULL, "VOUT2L"},
-	{"Center+Sub", NULL, "VOUT2R"},
-
-	/* Rear Left/Right are fed VOUT3L/R */
-	{"Rear", NULL, "VOUT3L"},
-	{"Rear", NULL, "VOUT3R"},
-};
-
-static int smdk64xx_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	/* Add smdk64xx specific Capture widgets */
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_cpt,
-				  ARRAY_SIZE(wm8580_dapm_widgets_cpt));
-
-	/* Set up PAIFTX audio path */
-	snd_soc_dapm_add_routes(codec, audio_map_tx, ARRAY_SIZE(audio_map_tx));
-
-	/* Enabling the microphone requires the fitting of a 0R
-	 * resistor to connect the line from the microphone jack.
-	 */
-	snd_soc_dapm_disable_pin(codec, "MicIn");
-
-	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static int smdk64xx_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	/* Add smdk64xx specific Playback widgets */
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_pbk,
-				  ARRAY_SIZE(wm8580_dapm_widgets_pbk));
-
-	/* Set up PAIFRX audio path */
-	snd_soc_dapm_add_routes(codec, audio_map_rx, ARRAY_SIZE(audio_map_rx));
-
-	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static struct snd_soc_dai_link smdk64xx_dai[] = {
-{ /* Primary Playback i/f */
-	.name = "WM8580 PAIF RX",
-	.stream_name = "Playback",
-	.cpu_dai_name = "s3c64xx-iis-v4",
-	.codec_dai_name = "wm8580-hifi-playback",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8580-codec.0-001b",
-	.init = smdk64xx_wm8580_init_paifrx,
-	.ops = &smdk64xx_ops,
-},
-{ /* Primary Capture i/f */
-	.name = "WM8580 PAIF TX",
-	.stream_name = "Capture",
-	.cpu_dai_name = "s3c64xx-iis-v4",
-	.codec_dai_name = "wm8580-hifi-capture",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8580-codec.0-001b",
-	.init = smdk64xx_wm8580_init_paiftx,
-	.ops = &smdk64xx_ops,
-},
-};
-
-static struct snd_soc_card smdk64xx = {
-	.name = "SMDK64xx 5.1",
-	.dai_link = smdk64xx_dai,
-	.num_links = ARRAY_SIZE(smdk64xx_dai),
-};
-
-static struct platform_device *smdk64xx_snd_device;
-
-static int __init smdk64xx_audio_init(void)
-{
-	int ret;
-
-	smdk64xx_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk64xx_snd_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(smdk64xx_snd_device, &smdk64xx);
-	ret = platform_device_add(smdk64xx_snd_device);
-
-	if (ret)
-		platform_device_put(smdk64xx_snd_device);
-
-	return ret;
-}
-module_init(smdk64xx_audio_init);
-
-MODULE_AUTHOR("Jaswinder Singh, jassi.brar@samsung.com");
-MODULE_DESCRIPTION("ALSA SoC SMDK64XX WM8580");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/smdk_spdif.c b/sound/soc/s3c24xx/smdk_spdif.c
deleted file mode 100644
index c8bd904..0000000
--- a/sound/soc/s3c24xx/smdk_spdif.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * smdk_spdif.c  --  S/PDIF audio for SMDK
- *
- * Copyright 2010 Samsung Electronics Co. Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/clk.h>
-
-#include <plat/devs.h>
-
-#include <sound/soc.h>
-
-#include "s3c-dma.h"
-#include "spdif.h"
-
-/* Audio clock settings are belonged to board specific part. Every
- * board can set audio source clock setting which is matched with H/W
- * like this function-'set_audio_clock_heirachy'.
- */
-static int set_audio_clock_heirachy(struct platform_device *pdev)
-{
-	struct clk *fout_epll, *mout_epll, *sclk_audio0, *sclk_spdif;
-	int ret;
-
-	fout_epll = clk_get(NULL, "fout_epll");
-	if (IS_ERR(fout_epll)) {
-		printk(KERN_WARNING "%s: Cannot find fout_epll.\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	mout_epll = clk_get(NULL, "mout_epll");
-	if (IS_ERR(mout_epll)) {
-		printk(KERN_WARNING "%s: Cannot find mout_epll.\n",
-				__func__);
-		ret = -EINVAL;
-		goto out1;
-	}
-
-	sclk_audio0 = clk_get(&pdev->dev, "sclk_audio");
-	if (IS_ERR(sclk_audio0)) {
-		printk(KERN_WARNING "%s: Cannot find sclk_audio.\n",
-				__func__);
-		ret = -EINVAL;
-		goto out2;
-	}
-
-	sclk_spdif = clk_get(NULL, "sclk_spdif");
-	if (IS_ERR(sclk_spdif)) {
-		printk(KERN_WARNING "%s: Cannot find sclk_spdif.\n",
-				__func__);
-		ret = -EINVAL;
-		goto out3;
-	}
-
-	/* Set audio clock heirachy for S/PDIF */
-	clk_set_parent(mout_epll, fout_epll);
-	clk_set_parent(sclk_audio0, mout_epll);
-	clk_set_parent(sclk_spdif, sclk_audio0);
-
-	clk_put(sclk_spdif);
-out3:
-	clk_put(sclk_audio0);
-out2:
-	clk_put(mout_epll);
-out1:
-	clk_put(fout_epll);
-
-	return ret;
-}
-
-/* We should haved to set clock directly on this part because of clock
- * scheme of Samsudng SoCs did not support to set rates from abstrct
- * clock of it's heirachy.
- */
-static int set_audio_clock_rate(unsigned long epll_rate,
-				unsigned long audio_rate)
-{
-	struct clk *fout_epll, *sclk_spdif;
-
-	fout_epll = clk_get(NULL, "fout_epll");
-	if (IS_ERR(fout_epll)) {
-		printk(KERN_ERR "%s: failed to get fout_epll\n", __func__);
-		return -ENOENT;
-	}
-
-	clk_set_rate(fout_epll, epll_rate);
-	clk_put(fout_epll);
-
-	sclk_spdif = clk_get(NULL, "sclk_spdif");
-	if (IS_ERR(sclk_spdif)) {
-		printk(KERN_ERR "%s: failed to get sclk_spdif\n", __func__);
-		return -ENOENT;
-	}
-
-	clk_set_rate(sclk_spdif, audio_rate);
-	clk_put(sclk_spdif);
-
-	return 0;
-}
-
-static int smdk_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned long pll_out, rclk_rate;
-	int ret, ratio;
-
-	switch (params_rate(params)) {
-	case 44100:
-		pll_out = 45158400;
-		break;
-	case 32000:
-	case 48000:
-	case 96000:
-		pll_out = 49152000;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Setting ratio to 512fs helps to use S/PDIF with HDMI without
-	 * modify S/PDIF ASoC machine driver.
-	 */
-	ratio = 512;
-	rclk_rate = params_rate(params) * ratio;
-
-	/* Set audio source clock rates */
-	ret = set_audio_clock_rate(pll_out, rclk_rate);
-	if (ret < 0)
-		return ret;
-
-	/* Set S/PDIF uses internal source clock */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, SND_SOC_SPDIF_INT_MCLK,
-					rclk_rate, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return ret;
-}
-
-static struct snd_soc_ops smdk_spdif_ops = {
-	.hw_params = smdk_hw_params,
-};
-
-static struct snd_soc_card smdk;
-
-static struct snd_soc_dai_link smdk_dai = {
-	.name = "S/PDIF",
-	.stream_name = "S/PDIF PCM Playback",
-	.platform_name = "s3c24xx-pcm-audio",
-	.cpu_dai_name = "samsung-spdif",
-	.codec_dai_name = "dit-hifi",
-	.codec_name = "spdif-dit",
-	.ops = &smdk_spdif_ops,
-};
-
-static struct snd_soc_card smdk = {
-	.name = "SMDK-S/PDIF",
-	.dai_link = &smdk_dai,
-	.num_links = 1,
-};
-
-static struct platform_device *smdk_snd_spdif_dit_device;
-static struct platform_device *smdk_snd_spdif_device;
-
-static int __init smdk_init(void)
-{
-	int ret;
-
-	smdk_snd_spdif_dit_device = platform_device_alloc("spdif-dit", -1);
-	if (!smdk_snd_spdif_dit_device)
-		return -ENOMEM;
-
-	ret = platform_device_add(smdk_snd_spdif_dit_device);
-	if (ret)
-		goto err2;
-
-	smdk_snd_spdif_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk_snd_spdif_device) {
-		ret = -ENOMEM;
-		goto err2;
-	}
-
-	platform_set_drvdata(smdk_snd_spdif_device, &smdk);
-
-	ret = platform_device_add(smdk_snd_spdif_device);
-	if (ret)
-		goto err1;
-
-	/* Set audio clock heirachy manually */
-	ret = set_audio_clock_heirachy(smdk_snd_spdif_device);
-	if (ret)
-		goto err1;
-
-	return 0;
-err1:
-	platform_device_put(smdk_snd_spdif_device);
-err2:
-	platform_device_put(smdk_snd_spdif_dit_device);
-	return ret;
-}
-
-static void __exit smdk_exit(void)
-{
-	platform_device_unregister(smdk_snd_spdif_device);
-}
-
-module_init(smdk_init);
-module_exit(smdk_exit);
-
-MODULE_AUTHOR("Seungwhan Youn, <sw.youn@samsung.com>");
-MODULE_DESCRIPTION("ALSA SoC SMDK+S/PDIF");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/smdk_wm9713.c b/sound/soc/s3c24xx/smdk_wm9713.c
deleted file mode 100644
index 33ba8fd..0000000
--- a/sound/soc/s3c24xx/smdk_wm9713.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * smdk_wm9713.c  --  SoC audio for SMDK
- *
- * Copyright 2010 Samsung Electronics Co. Ltd.
- * Author: Jaswinder Singh Brar <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <sound/soc.h>
-
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
-
-static struct snd_soc_card smdk;
-
-/*
- * Default CFG switch settings to use this driver:
- *
- *   SMDK6410: Set CFG1 1-3 On, CFG2 1-4 Off
- *   SMDKC100: Set CFG6 1-3 On, CFG7 1   On
- *   SMDKC110: Set CFGB10 1-2 Off, CFGB12 1-3 On
- *   SMDKV210: Set CFGB10 1-2 Off, CFGB12 1-3 On
- */
-
-/*
- Playback (HeadPhone):-
-	$ amixer sset 'Headphone' unmute
-	$ amixer sset 'Right Headphone Out Mux' 'Headphone'
-	$ amixer sset 'Left Headphone Out Mux' 'Headphone'
-	$ amixer sset 'Right HP Mixer PCM' unmute
-	$ amixer sset 'Left HP Mixer PCM' unmute
-
- Capture (LineIn):-
-	$ amixer sset 'Right Capture Source' 'Line'
-	$ amixer sset 'Left Capture Source' 'Line'
-*/
-
-static struct snd_soc_dai_link smdk_dai = {
-	.name = "AC97",
-	.stream_name = "AC97 PCM",
-	.platform_name = "s3c24xx-pcm-audio",
-	.cpu_dai_name = "s3c-ac97",
-	.codec_dai_name = "wm9713-hifi",
-	.codec_name = "wm9713-codec",
-};
-
-static struct snd_soc_card smdk = {
-	.name = "SMDK WM9713",
-	.dai_link = &smdk_dai,
-	.num_links = 1,
-};
-
-static struct platform_device *smdk_snd_wm9713_device;
-static struct platform_device *smdk_snd_ac97_device;
-
-static int __init smdk_init(void)
-{
-	int ret;
-
-	smdk_snd_wm9713_device = platform_device_alloc("wm9713-codec", -1);
-	if (!smdk_snd_wm9713_device)
-		return -ENOMEM;
-
-	ret = platform_device_add(smdk_snd_wm9713_device);
-	if (ret)
-		goto err;
-
-	smdk_snd_ac97_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk_snd_ac97_device) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	platform_set_drvdata(smdk_snd_ac97_device, &smdk);
-
-	ret = platform_device_add(smdk_snd_ac97_device);
-	if (ret) {
-		platform_device_put(smdk_snd_ac97_device);
-		goto err;
-	}
-
-	return 0;
-err:
-	platform_device_put(smdk_snd_wm9713_device);
-	return ret;
-}
-
-static void __exit smdk_exit(void)
-{
-	platform_device_unregister(smdk_snd_ac97_device);
-	platform_device_unregister(smdk_snd_wm9713_device);
-}
-
-module_init(smdk_init);
-module_exit(smdk_exit);
-
-/* Module information */
-MODULE_AUTHOR("Jaswinder Singh Brar, jassi.brar@samsung.com");
-MODULE_DESCRIPTION("ALSA SoC SMDK+WM9713");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/spdif.c b/sound/soc/s3c24xx/spdif.c
deleted file mode 100644
index ce554e9..0000000
--- a/sound/soc/s3c24xx/spdif.c
+++ /dev/null
@@ -1,501 +0,0 @@
-/* sound/soc/s3c24xx/spdif.c
- *
- * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- *		http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <plat/audio.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "spdif.h"
-
-/* Registers */
-#define CLKCON				0x00
-#define CON				0x04
-#define BSTAS				0x08
-#define CSTAS				0x0C
-#define DATA_OUTBUF			0x10
-#define DCNT				0x14
-#define BSTAS_S				0x18
-#define DCNT_S				0x1C
-
-#define CLKCTL_MASK			0x7
-#define CLKCTL_MCLK_EXT			(0x1 << 2)
-#define CLKCTL_PWR_ON			(0x1 << 0)
-
-#define CON_MASK			0x3ffffff
-#define CON_FIFO_TH_SHIFT		19
-#define CON_FIFO_TH_MASK		(0x7 << 19)
-#define CON_USERDATA_23RDBIT		(0x1 << 12)
-
-#define CON_SW_RESET			(0x1 << 5)
-
-#define CON_MCLKDIV_MASK		(0x3 << 3)
-#define CON_MCLKDIV_256FS		(0x0 << 3)
-#define CON_MCLKDIV_384FS		(0x1 << 3)
-#define CON_MCLKDIV_512FS		(0x2 << 3)
-
-#define CON_PCM_MASK			(0x3 << 1)
-#define CON_PCM_16BIT			(0x0 << 1)
-#define CON_PCM_20BIT			(0x1 << 1)
-#define CON_PCM_24BIT			(0x2 << 1)
-
-#define CON_PCM_DATA			(0x1 << 0)
-
-#define CSTAS_MASK			0x3fffffff
-#define CSTAS_SAMP_FREQ_MASK		(0xF << 24)
-#define CSTAS_SAMP_FREQ_44		(0x0 << 24)
-#define CSTAS_SAMP_FREQ_48		(0x2 << 24)
-#define CSTAS_SAMP_FREQ_32		(0x3 << 24)
-#define CSTAS_SAMP_FREQ_96		(0xA << 24)
-
-#define CSTAS_CATEGORY_MASK		(0xFF << 8)
-#define CSTAS_CATEGORY_CODE_CDP		(0x01 << 8)
-
-#define CSTAS_NO_COPYRIGHT		(0x1 << 2)
-
-/**
- * struct samsung_spdif_info - Samsung S/PDIF Controller information
- * @lock: Spin lock for S/PDIF.
- * @dev: The parent device passed to use from the probe.
- * @regs: The pointer to the device register block.
- * @clk_rate: Current clock rate for calcurate ratio.
- * @pclk: The peri-clock pointer for spdif master operation.
- * @sclk: The source clock pointer for making sync signals.
- * @save_clkcon: Backup clkcon reg. in suspend.
- * @save_con: Backup con reg. in suspend.
- * @save_cstas: Backup cstas reg. in suspend.
- * @dma_playback: DMA information for playback channel.
- */
-struct samsung_spdif_info {
-	spinlock_t	lock;
-	struct device	*dev;
-	void __iomem	*regs;
-	unsigned long	clk_rate;
-	struct clk	*pclk;
-	struct clk	*sclk;
-	u32		saved_clkcon;
-	u32		saved_con;
-	u32		saved_cstas;
-	struct s3c_dma_params	*dma_playback;
-};
-
-static struct s3c2410_dma_client spdif_dma_client_out = {
-	.name		= "S/PDIF Stereo out",
-};
-
-static struct s3c_dma_params spdif_stereo_out;
-static struct samsung_spdif_info spdif_info;
-
-static inline struct samsung_spdif_info *to_info(struct snd_soc_dai *cpu_dai)
-{
-	return snd_soc_dai_get_drvdata(cpu_dai);
-}
-
-static void spdif_snd_txctrl(struct samsung_spdif_info *spdif, int on)
-{
-	void __iomem *regs = spdif->regs;
-	u32 clkcon;
-
-	dev_dbg(spdif->dev, "Entered %s\n", __func__);
-
-	clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
-	if (on)
-		writel(clkcon | CLKCTL_PWR_ON, regs + CLKCON);
-	else
-		writel(clkcon & ~CLKCTL_PWR_ON, regs + CLKCON);
-}
-
-static int spdif_set_sysclk(struct snd_soc_dai *cpu_dai,
-				int clk_id, unsigned int freq, int dir)
-{
-	struct samsung_spdif_info *spdif = to_info(cpu_dai);
-	u32 clkcon;
-
-	dev_dbg(spdif->dev, "Entered %s\n", __func__);
-
-	clkcon = readl(spdif->regs + CLKCON);
-
-	if (clk_id == SND_SOC_SPDIF_INT_MCLK)
-		clkcon &= ~CLKCTL_MCLK_EXT;
-	else
-		clkcon |= CLKCTL_MCLK_EXT;
-
-	writel(clkcon, spdif->regs + CLKCON);
-
-	spdif->clk_rate = freq;
-
-	return 0;
-}
-
-static int spdif_trigger(struct snd_pcm_substream *substream, int cmd,
-				struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
-	unsigned long flags;
-
-	dev_dbg(spdif->dev, "Entered %s\n", __func__);
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		spin_lock_irqsave(&spdif->lock, flags);
-		spdif_snd_txctrl(spdif, 1);
-		spin_unlock_irqrestore(&spdif->lock, flags);
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		spin_lock_irqsave(&spdif->lock, flags);
-		spdif_snd_txctrl(spdif, 0);
-		spin_unlock_irqrestore(&spdif->lock, flags);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int spdif_sysclk_ratios[] = {
-	512, 384, 256,
-};
-
-static int spdif_hw_params(struct snd_pcm_substream *substream,
-				struct snd_pcm_hw_params *params,
-				struct snd_soc_dai *socdai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
-	void __iomem *regs = spdif->regs;
-	struct s3c_dma_params *dma_data;
-	u32 con, clkcon, cstas;
-	unsigned long flags;
-	int i, ratio;
-
-	dev_dbg(spdif->dev, "Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = spdif->dma_playback;
-	else {
-		dev_err(spdif->dev, "Capture is not supported\n");
-		return -EINVAL;
-	}
-
-	snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
-
-	spin_lock_irqsave(&spdif->lock, flags);
-
-	con = readl(regs + CON) & CON_MASK;
-	cstas = readl(regs + CSTAS) & CSTAS_MASK;
-	clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
-
-	con &= ~CON_FIFO_TH_MASK;
-	con |= (0x7 << CON_FIFO_TH_SHIFT);
-	con |= CON_USERDATA_23RDBIT;
-	con |= CON_PCM_DATA;
-
-	con &= ~CON_PCM_MASK;
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S16_LE:
-		con |= CON_PCM_16BIT;
-		break;
-	default:
-		dev_err(spdif->dev, "Unsupported data size.\n");
-		goto err;
-	}
-
-	ratio = spdif->clk_rate / params_rate(params);
-	for (i = 0; i < ARRAY_SIZE(spdif_sysclk_ratios); i++)
-		if (ratio == spdif_sysclk_ratios[i])
-			break;
-	if (i == ARRAY_SIZE(spdif_sysclk_ratios)) {
-		dev_err(spdif->dev, "Invalid clock ratio %ld/%d\n",
-				spdif->clk_rate, params_rate(params));
-		goto err;
-	}
-
-	con &= ~CON_MCLKDIV_MASK;
-	switch (ratio) {
-	case 256:
-		con |= CON_MCLKDIV_256FS;
-		break;
-	case 384:
-		con |= CON_MCLKDIV_384FS;
-		break;
-	case 512:
-		con |= CON_MCLKDIV_512FS;
-		break;
-	}
-
-	cstas &= ~CSTAS_SAMP_FREQ_MASK;
-	switch (params_rate(params)) {
-	case 44100:
-		cstas |= CSTAS_SAMP_FREQ_44;
-		break;
-	case 48000:
-		cstas |= CSTAS_SAMP_FREQ_48;
-		break;
-	case 32000:
-		cstas |= CSTAS_SAMP_FREQ_32;
-		break;
-	case 96000:
-		cstas |= CSTAS_SAMP_FREQ_96;
-		break;
-	default:
-		dev_err(spdif->dev, "Invalid sampling rate %d\n",
-				params_rate(params));
-		goto err;
-	}
-
-	cstas &= ~CSTAS_CATEGORY_MASK;
-	cstas |= CSTAS_CATEGORY_CODE_CDP;
-	cstas |= CSTAS_NO_COPYRIGHT;
-
-	writel(con, regs + CON);
-	writel(cstas, regs + CSTAS);
-	writel(clkcon, regs + CLKCON);
-
-	spin_unlock_irqrestore(&spdif->lock, flags);
-
-	return 0;
-err:
-	spin_unlock_irqrestore(&spdif->lock, flags);
-	return -EINVAL;
-}
-
-static void spdif_shutdown(struct snd_pcm_substream *substream,
-				struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
-	void __iomem *regs = spdif->regs;
-	u32 con, clkcon;
-
-	dev_dbg(spdif->dev, "Entered %s\n", __func__);
-
-	con = readl(regs + CON) & CON_MASK;
-	clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
-
-	writel(con | CON_SW_RESET, regs + CON);
-	cpu_relax();
-
-	writel(clkcon & ~CLKCTL_PWR_ON, regs + CLKCON);
-}
-
-#ifdef CONFIG_PM
-static int spdif_suspend(struct snd_soc_dai *cpu_dai)
-{
-	struct samsung_spdif_info *spdif = to_info(cpu_dai);
-	u32 con = spdif->saved_con;
-
-	dev_dbg(spdif->dev, "Entered %s\n", __func__);
-
-	spdif->saved_clkcon = readl(spdif->regs	+ CLKCON) & CLKCTL_MASK;
-	spdif->saved_con = readl(spdif->regs + CON) & CON_MASK;
-	spdif->saved_cstas = readl(spdif->regs + CSTAS) & CSTAS_MASK;
-
-	writel(con | CON_SW_RESET, spdif->regs + CON);
-	cpu_relax();
-
-	return 0;
-}
-
-static int spdif_resume(struct snd_soc_dai *cpu_dai)
-{
-	struct samsung_spdif_info *spdif = to_info(cpu_dai);
-
-	dev_dbg(spdif->dev, "Entered %s\n", __func__);
-
-	writel(spdif->saved_clkcon, spdif->regs	+ CLKCON);
-	writel(spdif->saved_con, spdif->regs + CON);
-	writel(spdif->saved_cstas, spdif->regs + CSTAS);
-
-	return 0;
-}
-#else
-#define spdif_suspend NULL
-#define spdif_resume NULL
-#endif
-
-static struct snd_soc_dai_ops spdif_dai_ops = {
-	.set_sysclk	= spdif_set_sysclk,
-	.trigger	= spdif_trigger,
-	.hw_params	= spdif_hw_params,
-	.shutdown	= spdif_shutdown,
-};
-
-struct snd_soc_dai_driver samsung_spdif_dai = {
-	.name = "samsung-spdif",
-	.playback = {
-		.stream_name = "S/PDIF Playback",
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = (SNDRV_PCM_RATE_32000 |
-				SNDRV_PCM_RATE_44100 |
-				SNDRV_PCM_RATE_48000 |
-				SNDRV_PCM_RATE_96000),
-		.formats = SNDRV_PCM_FMTBIT_S16_LE, },
-	.ops = &spdif_dai_ops,
-	.suspend = spdif_suspend,
-	.resume = spdif_resume,
-};
-
-static __devinit int spdif_probe(struct platform_device *pdev)
-{
-	struct s3c_audio_pdata *spdif_pdata;
-	struct resource *mem_res, *dma_res;
-	struct samsung_spdif_info *spdif;
-	int ret;
-
-	spdif_pdata = pdev->dev.platform_data;
-
-	dev_dbg(&pdev->dev, "Entered %s\n", __func__);
-
-	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!dma_res) {
-		dev_err(&pdev->dev, "Unable to get dma resource.\n");
-		return -ENXIO;
-	}
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem_res) {
-		dev_err(&pdev->dev, "Unable to get register resource.\n");
-		return -ENXIO;
-	}
-
-	if (spdif_pdata && spdif_pdata->cfg_gpio
-			&& spdif_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure GPIO pins\n");
-		return -EINVAL;
-	}
-
-	spdif = &spdif_info;
-	spdif->dev = &pdev->dev;
-
-	spin_lock_init(&spdif->lock);
-
-	spdif->pclk = clk_get(&pdev->dev, "spdif");
-	if (IS_ERR(spdif->pclk)) {
-		dev_err(&pdev->dev, "failed to get peri-clock\n");
-		ret = -ENOENT;
-		goto err0;
-	}
-	clk_enable(spdif->pclk);
-
-	spdif->sclk = clk_get(&pdev->dev, "sclk_spdif");
-	if (IS_ERR(spdif->sclk)) {
-		dev_err(&pdev->dev, "failed to get internal source clock\n");
-		ret = -ENOENT;
-		goto err1;
-	}
-	clk_enable(spdif->sclk);
-
-	/* Request S/PDIF Register's memory region */
-	if (!request_mem_region(mem_res->start,
-				resource_size(mem_res), "samsung-spdif")) {
-		dev_err(&pdev->dev, "Unable to request register region\n");
-		ret = -EBUSY;
-		goto err2;
-	}
-
-	spdif->regs = ioremap(mem_res->start, 0x100);
-	if (spdif->regs == NULL) {
-		dev_err(&pdev->dev, "Cannot ioremap registers\n");
-		ret = -ENXIO;
-		goto err3;
-	}
-
-	dev_set_drvdata(&pdev->dev, spdif);
-
-	ret = snd_soc_register_dai(&pdev->dev, &samsung_spdif_dai);
-	if (ret != 0) {
-		dev_err(&pdev->dev, "fail to register dai\n");
-		goto err4;
-	}
-
-	spdif_stereo_out.dma_size = 2;
-	spdif_stereo_out.client = &spdif_dma_client_out;
-	spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
-	spdif_stereo_out.channel = dma_res->start;
-
-	spdif->dma_playback = &spdif_stereo_out;
-
-	return 0;
-
-err4:
-	iounmap(spdif->regs);
-err3:
-	release_mem_region(mem_res->start, resource_size(mem_res));
-err2:
-	clk_disable(spdif->sclk);
-	clk_put(spdif->sclk);
-err1:
-	clk_disable(spdif->pclk);
-	clk_put(spdif->pclk);
-err0:
-	return ret;
-}
-
-static __devexit int spdif_remove(struct platform_device *pdev)
-{
-	struct samsung_spdif_info *spdif = &spdif_info;
-	struct resource *mem_res;
-
-	snd_soc_unregister_dai(&pdev->dev);
-
-	iounmap(spdif->regs);
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (mem_res)
-		release_mem_region(mem_res->start, resource_size(mem_res));
-
-	clk_disable(spdif->sclk);
-	clk_put(spdif->sclk);
-	clk_disable(spdif->pclk);
-	clk_put(spdif->pclk);
-
-	return 0;
-}
-
-static struct platform_driver samsung_spdif_driver = {
-	.probe	= spdif_probe,
-	.remove	= spdif_remove,
-	.driver	= {
-		.name	= "samsung-spdif",
-		.owner	= THIS_MODULE,
-	},
-};
-
-static int __init spdif_init(void)
-{
-	return platform_driver_register(&samsung_spdif_driver);
-}
-module_init(spdif_init);
-
-static void __exit spdif_exit(void)
-{
-	platform_driver_unregister(&samsung_spdif_driver);
-}
-module_exit(spdif_exit);
-
-MODULE_AUTHOR("Seungwhan Youn, <sw.youn@samsung.com>");
-MODULE_DESCRIPTION("Samsung S/PDIF Controller Driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-spdif");
diff --git a/sound/soc/s3c24xx/spdif.h b/sound/soc/s3c24xx/spdif.h
deleted file mode 100644
index 3ed5559..0000000
--- a/sound/soc/s3c24xx/spdif.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* sound/soc/s3c24xx/spdif.h
- *
- * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- *		http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __SND_SOC_SAMSUNG_SPDIF_H
-#define __SND_SOC_SAMSUNG_SPDIF_H	__FILE__
-
-#define SND_SOC_SPDIF_INT_MCLK		0
-#define SND_SOC_SPDIF_EXT_MCLK		1
-
-#endif	/* __SND_SOC_SAMSUNG_SPDIF_H */
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
index c1244c5..5890e43 100644
--- a/sound/soc/s6000/s6105-ipcam.c
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -18,11 +18,9 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <variant/dmac.h>
 
-#include "../codecs/tlv320aic3x.h"
 #include "s6000-pcm.h"
 #include "s6000-i2s.h"
 
@@ -107,6 +105,7 @@
 			   struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = kcontrol->private_data;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
 	char *differential = "Audio Out Differential";
 	char *stereo = "Audio Out Stereo";
@@ -114,10 +113,10 @@
 	if (kcontrol->private_value == val)
 		return 0;
 	kcontrol->private_value = val;
-	snd_soc_dapm_disable_pin(codec, val ? differential : stereo);
-	snd_soc_dapm_sync(codec);
-	snd_soc_dapm_enable_pin(codec, val ? stereo : differential);
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
+	snd_soc_dapm_sync(dapm);
+	snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
+	snd_soc_dapm_sync(dapm);
 
 	return 1;
 }
@@ -137,35 +136,36 @@
 static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add s6105 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* Set up s6105 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* not present */
-	snd_soc_dapm_nc_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_nc_pin(codec, "LINE2L");
-	snd_soc_dapm_nc_pin(codec, "LINE2R");
+	snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_nc_pin(dapm, "LINE2L");
+	snd_soc_dapm_nc_pin(dapm, "LINE2R");
 
 	/* not connected */
-	snd_soc_dapm_nc_pin(codec, "MIC3L"); /* LINE2L on this chip */
-	snd_soc_dapm_nc_pin(codec, "MIC3R"); /* LINE2R on this chip */
-	snd_soc_dapm_nc_pin(codec, "LLOUT");
-	snd_soc_dapm_nc_pin(codec, "RLOUT");
-	snd_soc_dapm_nc_pin(codec, "HPRCOM");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L"); /* LINE2L on this chip */
+	snd_soc_dapm_nc_pin(dapm, "MIC3R"); /* LINE2R on this chip */
+	snd_soc_dapm_nc_pin(dapm, "LLOUT");
+	snd_soc_dapm_nc_pin(dapm, "RLOUT");
+	snd_soc_dapm_nc_pin(dapm, "HPRCOM");
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Audio In");
+	snd_soc_dapm_enable_pin(dapm, "Audio In");
 
 	/* must correspond to audio_out_mux.private_value initializer */
-	snd_soc_dapm_disable_pin(codec, "Audio Out Differential");
-	snd_soc_dapm_sync(codec);
-	snd_soc_dapm_enable_pin(codec, "Audio Out Stereo");
+	snd_soc_dapm_disable_pin(dapm, "Audio Out Differential");
+	snd_soc_dapm_sync(dapm);
+	snd_soc_dapm_enable_pin(dapm, "Audio Out Stereo");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec));
 
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
new file mode 100644
index 0000000..a6a6b5f
--- /dev/null
+++ b/sound/soc/samsung/Kconfig
@@ -0,0 +1,171 @@
+config SND_SOC_SAMSUNG
+	tristate "ASoC support for Samsung"
+	depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PV310
+	select S3C64XX_DMA if ARCH_S3C64XX
+	select S3C2410_DMA if ARCH_S3C2410
+	help
+	  Say Y or M if you want to add support for codecs attached to
+	  the Samsung SoCs' Audio interfaces. You will also need to
+	  select the audio interfaces to support below.
+
+config SND_S3C24XX_I2S
+	tristate
+	select S3C2410_DMA
+
+config SND_S3C_I2SV2_SOC
+	tristate
+
+config SND_S3C2412_SOC_I2S
+	tristate
+	select SND_S3C_I2SV2_SOC
+	select S3C2410_DMA
+
+config SND_SAMSUNG_PCM
+	tristate
+
+config SND_SAMSUNG_AC97
+	tristate
+	select SND_SOC_AC97_BUS
+
+config SND_SAMSUNG_SPDIF
+	tristate
+	select SND_SOC_SPDIF
+
+config SND_SAMSUNG_I2S
+	tristate
+
+config SND_SOC_SAMSUNG_NEO1973_WM8753
+	tristate "SoC I2S Audio support for NEO1973 - WM8753"
+	depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA01
+	select SND_S3C24XX_I2S
+	select SND_SOC_WM8753
+	help
+	  Say Y if you want to add support for SoC audio on smdk2440
+	  with the WM8753.
+
+config SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753
+	tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
+	depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA02
+	select SND_S3C24XX_I2S
+	select SND_SOC_WM8753
+	help
+	  This driver provides audio support for the Openmoko Neo FreeRunner
+	  smartphone.
+	  
+config SND_SOC_SAMSUNG_JIVE_WM8750
+	tristate "SoC I2S Audio support for Jive"
+	depends on SND_SOC_SAMSUNG && MACH_JIVE
+	select SND_SOC_WM8750
+	select SND_S3C2412_SOC_I2S
+	help
+	  Sat Y if you want to add support for SoC audio on the Jive.
+
+config SND_SOC_SAMSUNG_SMDK_WM8580
+	tristate "SoC I2S Audio support for WM8580 on SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDK6442 || MACH_SMDKV210 || MACH_SMDKC110)
+	select SND_SOC_WM8580
+	select SND_SAMSUNG_I2S
+	help
+	  Say Y if you want to add support for SoC audio on the SMDKs.
+
+config SND_SOC_SAMSUNG_SMDK_WM8994
+	tristate "SoC I2S Audio support for WM8994 on SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDKV310 || MACH_SMDKC210)
+	select SND_SOC_WM8994
+	select SND_SAMSUNG_I2S
+	help
+		Say Y if you want to add support for SoC audio on the SMDKs.
+
+config SND_SOC_SAMSUNG_SMDK2443_WM9710
+	tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
+	depends on SND_SOC_SAMSUNG && MACH_SMDK2443
+	select S3C2410_DMA
+	select AC97_BUS
+	select SND_SOC_AC97_CODEC
+	select SND_SAMSUNG_AC97
+	help
+	  Say Y if you want to add support for SoC audio on smdk2443
+	  with the WM9710.
+
+config SND_SOC_SAMSUNG_LN2440SBC_ALC650
+	tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select S3C2410_DMA
+	select AC97_BUS
+	select SND_SOC_AC97_CODEC
+	select SND_SAMSUNG_AC97
+	help
+	  Say Y if you want to add support for SoC audio on ln2440sbc
+	  with the ALC650.
+
+config SND_SOC_SAMSUNG_S3C24XX_UDA134X
+	tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_L3
+	select SND_SOC_UDA134X
+
+config SND_SOC_SAMSUNG_SIMTEC
+	tristate
+	help
+	  Internal node for common S3C24XX/Simtec suppor
+
+config SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23
+	tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_TLV320AIC23
+	select SND_SOC_SAMSUNG_SIMTEC
+
+config SND_SOC_SAMSUNG_SIMTEC_HERMES
+	tristate "SoC I2S Audio support for Simtec Hermes board"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_TLV320AIC3X
+	select SND_SOC_SAMSUNG_SIMTEC
+
+config SND_SOC_SAMSUNG_H1940_UDA1380
+	tristate "Audio support for the HP iPAQ H1940"
+	depends on SND_SOC_SAMSUNG && ARCH_H1940
+	select SND_S3C24XX_I2S
+	select SND_SOC_UDA1380
+	help
+	  This driver provides audio support for HP iPAQ h1940 PDA.
+
+config SND_SOC_SAMSUNG_RX1950_UDA1380
+	tristate "Audio support for the HP iPAQ RX1950"
+	depends on SND_SOC_SAMSUNG && MACH_RX1950
+	select SND_S3C24XX_I2S
+	select SND_SOC_UDA1380
+	help
+	  This driver provides audio support for HP iPAQ RX1950 PDA.
+
+config SND_SOC_SAMSUNG_SMDK_WM9713
+	tristate "SoC AC97 Audio support for SMDK with WM9713"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210)
+	select SND_SOC_WM9713
+	select SND_SAMSUNG_AC97
+	help
+	  Sat Y if you want to add support for SoC audio on the SMDK.
+
+config SND_SOC_SMARTQ
+	tristate "SoC I2S Audio support for SmartQ board"
+	depends on SND_SOC_SAMSUNG && MACH_SMARTQ
+	select SND_SAMSUNG_I2S
+	select SND_SOC_WM8750
+
+config SND_SOC_GONI_AQUILA_WM8994
+	tristate "SoC I2S Audio support for AQUILA/GONI - WM8994"
+	depends on SND_SOC_SAMSUNG && (MACH_GONI || MACH_AQUILA)
+	select SND_SAMSUNG_I2S
+	select SND_SOC_WM8994
+	help
+	  Say Y if you want to add support for SoC audio on goni or aquila
+	  with the WM8994.
+
+config SND_SOC_SAMSUNG_SMDK_SPDIF
+	tristate "SoC S/PDIF Audio support for SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210)
+	select SND_SAMSUNG_SPDIF
+	help
+	  Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
new file mode 100644
index 0000000..705d4e8
--- /dev/null
+++ b/sound/soc/samsung/Makefile
@@ -0,0 +1,55 @@
+# S3c24XX Platform Support
+snd-soc-s3c24xx-objs := dma.o
+snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
+snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
+snd-soc-ac97-objs := ac97.o
+snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
+snd-soc-samsung-spdif-objs := spdif.o
+snd-soc-pcm-objs := pcm.o
+snd-soc-i2s-objs := i2s.o
+
+obj-$(CONFIG_SND_SOC_SAMSUNG) += snd-soc-s3c24xx.o
+obj-$(CONFIG_SND_S3C24XX_I2S) += snd-soc-s3c24xx-i2s.o
+obj-$(CONFIG_SND_SAMSUNG_AC97) += snd-soc-ac97.o
+obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
+obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
+obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o
+obj-$(CONFIG_SND_SAMSUNG_PCM) += snd-soc-pcm.o
+obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-i2s.o
+
+# S3C24XX Machine Support
+snd-soc-jive-wm8750-objs := jive_wm8750.o
+snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
+snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
+snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
+snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
+snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
+snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
+snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
+snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
+snd-soc-h1940-uda1380-objs := h1940_uda1380.o
+snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o
+snd-soc-smdk-wm8580-objs := smdk_wm8580.o
+snd-soc-smdk-wm8994-objs := smdk_wm8994.o
+snd-soc-smdk-wm9713-objs := smdk_wm9713.o
+snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
+snd-soc-goni-wm8994-objs := goni_wm8994.o
+snd-soc-smdk-spdif-objs := smdk_spdif.o
+
+obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC) += snd-soc-s3c24xx-simtec.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_H1940_UDA1380) += snd-soc-h1940-uda1380.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8580) += snd-soc-smdk-wm8580.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994) += snd-soc-smdk-wm8994.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM9713) += snd-soc-smdk-wm9713.o
+obj-$(CONFIG_SND_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF) += snd-soc-smdk-spdif.o
+obj-$(CONFIG_SND_SOC_GONI_AQUILA_WM8994) += snd-soc-goni-wm8994.o
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
new file mode 100644
index 0000000..4770a95
--- /dev/null
+++ b/sound/soc/samsung/ac97.c
@@ -0,0 +1,520 @@
+/* sound/soc/samsung/ac97.c
+ *
+ * ALSA SoC Audio Layer - S3C AC97 Controller driver
+ * 	Evolved from s3c2443-ac97.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
+ * 	Credits: Graeme Gregory, Sean Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <sound/soc.h>
+
+#include <plat/regs-ac97.h>
+#include <mach/dma.h>
+#include <plat/audio.h>
+
+#include "dma.h"
+#include "ac97.h"
+
+#define AC_CMD_ADDR(x) (x << 16)
+#define AC_CMD_DATA(x) (x & 0xffff)
+
+struct s3c_ac97_info {
+	struct clk         *ac97_clk;
+	void __iomem	   *regs;
+	struct mutex       lock;
+	struct completion  done;
+};
+static struct s3c_ac97_info s3c_ac97;
+
+static struct s3c2410_dma_client s3c_dma_client_out = {
+	.name = "AC97 PCMOut"
+};
+
+static struct s3c2410_dma_client s3c_dma_client_in = {
+	.name = "AC97 PCMIn"
+};
+
+static struct s3c2410_dma_client s3c_dma_client_micin = {
+	.name = "AC97 MicIn"
+};
+
+static struct s3c_dma_params s3c_ac97_pcm_out = {
+	.client		= &s3c_dma_client_out,
+	.dma_size	= 4,
+};
+
+static struct s3c_dma_params s3c_ac97_pcm_in = {
+	.client		= &s3c_dma_client_in,
+	.dma_size	= 4,
+};
+
+static struct s3c_dma_params s3c_ac97_mic_in = {
+	.client		= &s3c_dma_client_micin,
+	.dma_size	= 4,
+};
+
+static void s3c_ac97_activate(struct snd_ac97 *ac97)
+{
+	u32 ac_glbctrl, stat;
+
+	stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
+	if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
+		return; /* Return if already active */
+
+	INIT_COMPLETION(s3c_ac97.done);
+
+	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON;
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	msleep(1);
+
+	ac_glbctrl |= S3C_AC97_GLBCTRL_TRANSFERDATAENABLE;
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	msleep(1);
+
+	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+
+	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
+		pr_err("AC97: Unable to activate!");
+}
+
+static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
+	unsigned short reg)
+{
+	u32 ac_glbctrl, ac_codec_cmd;
+	u32 stat, addr, data;
+
+	mutex_lock(&s3c_ac97.lock);
+
+	s3c_ac97_activate(ac97);
+
+	INIT_COMPLETION(s3c_ac97.done);
+
+	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
+	ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg);
+	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
+
+	udelay(50);
+
+	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+
+	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
+		pr_err("AC97: Unable to read!");
+
+	stat = readl(s3c_ac97.regs + S3C_AC97_STAT);
+	addr = (stat >> 16) & 0x7f;
+	data = (stat & 0xffff);
+
+	if (addr != reg)
+		pr_err("ac97: req addr = %02x, rep addr = %02x\n",
+			reg, addr);
+
+	mutex_unlock(&s3c_ac97.lock);
+
+	return (unsigned short)data;
+}
+
+static void s3c_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
+	unsigned short val)
+{
+	u32 ac_glbctrl, ac_codec_cmd;
+
+	mutex_lock(&s3c_ac97.lock);
+
+	s3c_ac97_activate(ac97);
+
+	INIT_COMPLETION(s3c_ac97.done);
+
+	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
+	ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val);
+	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
+
+	udelay(50);
+
+	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+
+	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
+		pr_err("AC97: Unable to write!");
+
+	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
+	ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
+	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
+
+	mutex_unlock(&s3c_ac97.lock);
+}
+
+static void s3c_ac97_cold_reset(struct snd_ac97 *ac97)
+{
+	pr_debug("AC97: Cold reset\n");
+	writel(S3C_AC97_GLBCTRL_COLDRESET,
+			s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	msleep(1);
+
+	writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	msleep(1);
+}
+
+static void s3c_ac97_warm_reset(struct snd_ac97 *ac97)
+{
+	u32 stat;
+
+	stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
+	if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
+		return; /* Return if already active */
+
+	pr_debug("AC97: Warm reset\n");
+
+	writel(S3C_AC97_GLBCTRL_WARMRESET, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	msleep(1);
+
+	writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	msleep(1);
+
+	s3c_ac97_activate(ac97);
+}
+
+static irqreturn_t s3c_ac97_irq(int irq, void *dev_id)
+{
+	u32 ac_glbctrl, ac_glbstat;
+
+	ac_glbstat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT);
+
+	if (ac_glbstat & S3C_AC97_GLBSTAT_CODECREADY) {
+
+		ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+		ac_glbctrl &= ~S3C_AC97_GLBCTRL_CODECREADYIE;
+		writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+
+		complete(&s3c_ac97.done);
+	}
+
+	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	ac_glbctrl |= (1<<30); /* Clear interrupt */
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+
+	return IRQ_HANDLED;
+}
+
+struct snd_ac97_bus_ops soc_ac97_ops = {
+	.read       = s3c_ac97_read,
+	.write      = s3c_ac97_write,
+	.warm_reset = s3c_ac97_warm_reset,
+	.reset      = s3c_ac97_cold_reset,
+};
+EXPORT_SYMBOL_GPL(soc_ac97_ops);
+
+static int s3c_ac97_hw_params(struct snd_pcm_substream *substream,
+				  struct snd_pcm_hw_params *params,
+				  struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct s3c_dma_params *dma_data;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dma_data = &s3c_ac97_pcm_out;
+	else
+		dma_data = &s3c_ac97_pcm_in;
+
+	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
+
+	return 0;
+}
+
+static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	u32 ac_glbctrl;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c_dma_params *dma_data =
+		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMINTM_MASK;
+	else
+		ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMOUTTM_MASK;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			ac_glbctrl |= S3C_AC97_GLBCTRL_PCMINTM_DMA;
+		else
+			ac_glbctrl |= S3C_AC97_GLBCTRL_PCMOUTTM_DMA;
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		break;
+	}
+
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+
+	s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
+
+	return 0;
+}
+
+static int s3c_ac97_hw_mic_params(struct snd_pcm_substream *substream,
+				      struct snd_pcm_hw_params *params,
+				      struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		return -ENODEV;
+	else
+		snd_soc_dai_set_dma_data(cpu_dai, substream, &s3c_ac97_mic_in);
+
+	return 0;
+}
+
+static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream,
+				    int cmd, struct snd_soc_dai *dai)
+{
+	u32 ac_glbctrl;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c_dma_params *dma_data =
+		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
+	ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		ac_glbctrl |= S3C_AC97_GLBCTRL_MICINTM_DMA;
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		break;
+	}
+
+	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
+
+	s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops s3c_ac97_dai_ops = {
+	.hw_params	= s3c_ac97_hw_params,
+	.trigger	= s3c_ac97_trigger,
+};
+
+static struct snd_soc_dai_ops s3c_ac97_mic_dai_ops = {
+	.hw_params	= s3c_ac97_hw_mic_params,
+	.trigger	= s3c_ac97_mic_trigger,
+};
+
+static struct snd_soc_dai_driver s3c_ac97_dai[] = {
+	[S3C_AC97_DAI_PCM] = {
+		.name =	"samsung-ac97",
+		.ac97_control = 1,
+		.playback = {
+			.stream_name = "AC97 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+		.capture = {
+			.stream_name = "AC97 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+		.ops = &s3c_ac97_dai_ops,
+	},
+	[S3C_AC97_DAI_MIC] = {
+		.name = "samsung-ac97-mic",
+		.ac97_control = 1,
+		.capture = {
+			.stream_name = "AC97 Mic Capture",
+			.channels_min = 1,
+			.channels_max = 1,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+		.ops = &s3c_ac97_mic_dai_ops,
+	},
+};
+
+static __devinit int s3c_ac97_probe(struct platform_device *pdev)
+{
+	struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
+	struct s3c_audio_pdata *ac97_pdata;
+	int ret;
+
+	ac97_pdata = pdev->dev.platform_data;
+	if (!ac97_pdata || !ac97_pdata->cfg_gpio) {
+		dev_err(&pdev->dev, "cfg_gpio callback not provided!\n");
+		return -EINVAL;
+	}
+
+	/* Check for availability of necessary resource */
+	dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!dmatx_res) {
+		dev_err(&pdev->dev, "Unable to get AC97-TX dma resource\n");
+		return -ENXIO;
+	}
+
+	dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (!dmarx_res) {
+		dev_err(&pdev->dev, "Unable to get AC97-RX dma resource\n");
+		return -ENXIO;
+	}
+
+	dmamic_res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+	if (!dmamic_res) {
+		dev_err(&pdev->dev, "Unable to get AC97-MIC dma resource\n");
+		return -ENXIO;
+	}
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_res) {
+		dev_err(&pdev->dev, "Unable to get register resource\n");
+		return -ENXIO;
+	}
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_res) {
+		dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
+		return -ENXIO;
+	}
+
+	if (!request_mem_region(mem_res->start,
+				resource_size(mem_res), "ac97")) {
+		dev_err(&pdev->dev, "Unable to request register region\n");
+		return -EBUSY;
+	}
+
+	s3c_ac97_pcm_out.channel = dmatx_res->start;
+	s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
+	s3c_ac97_pcm_in.channel = dmarx_res->start;
+	s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
+	s3c_ac97_mic_in.channel = dmamic_res->start;
+	s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
+
+	init_completion(&s3c_ac97.done);
+	mutex_init(&s3c_ac97.lock);
+
+	s3c_ac97.regs = ioremap(mem_res->start, resource_size(mem_res));
+	if (s3c_ac97.regs == NULL) {
+		dev_err(&pdev->dev, "Unable to ioremap register region\n");
+		ret = -ENXIO;
+		goto err1;
+	}
+
+	s3c_ac97.ac97_clk = clk_get(&pdev->dev, "ac97");
+	if (IS_ERR(s3c_ac97.ac97_clk)) {
+		dev_err(&pdev->dev, "ac97 failed to get ac97_clock\n");
+		ret = -ENODEV;
+		goto err2;
+	}
+	clk_enable(s3c_ac97.ac97_clk);
+
+	if (ac97_pdata->cfg_gpio(pdev)) {
+		dev_err(&pdev->dev, "Unable to configure gpio\n");
+		ret = -EINVAL;
+		goto err3;
+	}
+
+	ret = request_irq(irq_res->start, s3c_ac97_irq,
+					IRQF_DISABLED, "AC97", NULL);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "ac97: interrupt request failed.\n");
+		goto err4;
+	}
+
+	ret = snd_soc_register_dais(&pdev->dev, s3c_ac97_dai,
+			ARRAY_SIZE(s3c_ac97_dai));
+	if (ret)
+		goto err5;
+
+	return 0;
+
+err5:
+	free_irq(irq_res->start, NULL);
+err4:
+err3:
+	clk_disable(s3c_ac97.ac97_clk);
+	clk_put(s3c_ac97.ac97_clk);
+err2:
+	iounmap(s3c_ac97.regs);
+err1:
+	release_mem_region(mem_res->start, resource_size(mem_res));
+
+	return ret;
+}
+
+static __devexit int s3c_ac97_remove(struct platform_device *pdev)
+{
+	struct resource *mem_res, *irq_res;
+
+	snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c_ac97_dai));
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (irq_res)
+		free_irq(irq_res->start, NULL);
+
+	clk_disable(s3c_ac97.ac97_clk);
+	clk_put(s3c_ac97.ac97_clk);
+
+	iounmap(s3c_ac97.regs);
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (mem_res)
+		release_mem_region(mem_res->start, resource_size(mem_res));
+
+	return 0;
+}
+
+static struct platform_driver s3c_ac97_driver = {
+	.probe  = s3c_ac97_probe,
+	.remove = s3c_ac97_remove,
+	.driver = {
+		.name = "samsung-ac97",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init s3c_ac97_init(void)
+{
+	return platform_driver_register(&s3c_ac97_driver);
+}
+module_init(s3c_ac97_init);
+
+static void __exit s3c_ac97_exit(void)
+{
+	platform_driver_unregister(&s3c_ac97_driver);
+}
+module_exit(s3c_ac97_exit);
+
+MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("AC97 driver for the Samsung SoC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:samsung-ac97");
diff --git a/sound/soc/samsung/ac97.h b/sound/soc/samsung/ac97.h
new file mode 100644
index 0000000..0d0e1b5
--- /dev/null
+++ b/sound/soc/samsung/ac97.h
@@ -0,0 +1,21 @@
+/* sound/soc/samsung/ac97.h
+ *
+ * ALSA SoC Audio Layer - S3C AC97 Controller driver
+ *	Evolved from s3c2443-ac97.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *	Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *	Credits: Graeme Gregory, Sean Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __S3C_AC97_H_
+#define __S3C_AC97_H_
+
+#define S3C_AC97_DAI_PCM 0
+#define S3C_AC97_DAI_MIC 1
+
+#endif /* __S3C_AC97_H_ */
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
new file mode 100644
index 0000000..2124019
--- /dev/null
+++ b/sound/soc/samsung/dma.c
@@ -0,0 +1,502 @@
+/*
+ * dma.c  --  ALSA Soc Audio Layer
+ *
+ * (c) 2006 Wolfson Microelectronics PLC.
+ * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ * Copyright 2004-2005 Simtec Electronics
+ *	http://armlinux.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <asm/dma.h>
+#include <mach/hardware.h>
+#include <mach/dma.h>
+
+#include "dma.h"
+
+static const struct snd_pcm_hardware dma_hardware = {
+	.info			= SNDRV_PCM_INFO_INTERLEAVED |
+				    SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				    SNDRV_PCM_INFO_MMAP |
+				    SNDRV_PCM_INFO_MMAP_VALID |
+				    SNDRV_PCM_INFO_PAUSE |
+				    SNDRV_PCM_INFO_RESUME,
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_U16_LE |
+				    SNDRV_PCM_FMTBIT_U8 |
+				    SNDRV_PCM_FMTBIT_S8,
+	.channels_min		= 2,
+	.channels_max		= 2,
+	.buffer_bytes_max	= 128*1024,
+	.period_bytes_min	= PAGE_SIZE,
+	.period_bytes_max	= PAGE_SIZE*2,
+	.periods_min		= 2,
+	.periods_max		= 128,
+	.fifo_size		= 32,
+};
+
+struct runtime_data {
+	spinlock_t lock;
+	int state;
+	unsigned int dma_loaded;
+	unsigned int dma_limit;
+	unsigned int dma_period;
+	dma_addr_t dma_start;
+	dma_addr_t dma_pos;
+	dma_addr_t dma_end;
+	struct s3c_dma_params *params;
+};
+
+/* dma_enqueue
+ *
+ * place a dma buffer onto the queue for the dma system
+ * to handle.
+*/
+static void dma_enqueue(struct snd_pcm_substream *substream)
+{
+	struct runtime_data *prtd = substream->runtime->private_data;
+	dma_addr_t pos = prtd->dma_pos;
+	unsigned int limit;
+	int ret;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (s3c_dma_has_circular())
+		limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period;
+	else
+		limit = prtd->dma_limit;
+
+	pr_debug("%s: loaded %d, limit %d\n",
+				__func__, prtd->dma_loaded, limit);
+
+	while (prtd->dma_loaded < limit) {
+		unsigned long len = prtd->dma_period;
+
+		pr_debug("dma_loaded: %d\n", prtd->dma_loaded);
+
+		if ((pos + len) > prtd->dma_end) {
+			len  = prtd->dma_end - pos;
+			pr_debug("%s: corrected dma len %ld\n", __func__, len);
+		}
+
+		ret = s3c2410_dma_enqueue(prtd->params->channel,
+			substream, pos, len);
+
+		if (ret == 0) {
+			prtd->dma_loaded++;
+			pos += prtd->dma_period;
+			if (pos >= prtd->dma_end)
+				pos = prtd->dma_start;
+		} else
+			break;
+	}
+
+	prtd->dma_pos = pos;
+}
+
+static void audio_buffdone(struct s3c2410_dma_chan *channel,
+				void *dev_id, int size,
+				enum s3c2410_dma_buffresult result)
+{
+	struct snd_pcm_substream *substream = dev_id;
+	struct runtime_data *prtd;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR)
+		return;
+
+	prtd = substream->runtime->private_data;
+
+	if (substream)
+		snd_pcm_period_elapsed(substream);
+
+	spin_lock(&prtd->lock);
+	if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) {
+		prtd->dma_loaded--;
+		dma_enqueue(substream);
+	}
+
+	spin_unlock(&prtd->lock);
+}
+
+static int dma_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct runtime_data *prtd = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	unsigned long totbytes = params_buffer_bytes(params);
+	struct s3c_dma_params *dma =
+		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+	int ret = 0;
+
+
+	pr_debug("Entered %s\n", __func__);
+
+	/* return if this is a bufferless transfer e.g.
+	 * codec <--> BT codec or GSM modem -- lg FIXME */
+	if (!dma)
+		return 0;
+
+	/* this may get called several times by oss emulation
+	 * with different params -HW */
+	if (prtd->params == NULL) {
+		/* prepare DMA */
+		prtd->params = dma;
+
+		pr_debug("params %p, client %p, channel %d\n", prtd->params,
+			prtd->params->client, prtd->params->channel);
+
+		ret = s3c2410_dma_request(prtd->params->channel,
+					  prtd->params->client, NULL);
+
+		if (ret < 0) {
+			printk(KERN_ERR "failed to get dma channel\n");
+			return ret;
+		}
+
+		/* use the circular buffering if we have it available. */
+		if (s3c_dma_has_circular())
+			s3c2410_dma_setflags(prtd->params->channel,
+					     S3C2410_DMAF_CIRCULAR);
+	}
+
+	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
+				    audio_buffdone);
+
+	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+	runtime->dma_bytes = totbytes;
+
+	spin_lock_irq(&prtd->lock);
+	prtd->dma_loaded = 0;
+	prtd->dma_limit = runtime->hw.periods_min;
+	prtd->dma_period = params_period_bytes(params);
+	prtd->dma_start = runtime->dma_addr;
+	prtd->dma_pos = prtd->dma_start;
+	prtd->dma_end = prtd->dma_start + totbytes;
+	spin_unlock_irq(&prtd->lock);
+
+	return 0;
+}
+
+static int dma_hw_free(struct snd_pcm_substream *substream)
+{
+	struct runtime_data *prtd = substream->runtime->private_data;
+
+	pr_debug("Entered %s\n", __func__);
+
+	/* TODO - do we need to ensure DMA flushed */
+	snd_pcm_set_runtime_buffer(substream, NULL);
+
+	if (prtd->params) {
+		s3c2410_dma_free(prtd->params->channel, prtd->params->client);
+		prtd->params = NULL;
+	}
+
+	return 0;
+}
+
+static int dma_prepare(struct snd_pcm_substream *substream)
+{
+	struct runtime_data *prtd = substream->runtime->private_data;
+	int ret = 0;
+
+	pr_debug("Entered %s\n", __func__);
+
+	/* return if this is a bufferless transfer e.g.
+	 * codec <--> BT codec or GSM modem -- lg FIXME */
+	if (!prtd->params)
+		return 0;
+
+	/* channel needs configuring for mem=>device, increment memory addr,
+	 * sync to pclk, half-word transfers to the IIS-FIFO. */
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		s3c2410_dma_devconfig(prtd->params->channel,
+				      S3C2410_DMASRC_MEM,
+				      prtd->params->dma_addr);
+	} else {
+		s3c2410_dma_devconfig(prtd->params->channel,
+				      S3C2410_DMASRC_HW,
+				      prtd->params->dma_addr);
+	}
+
+	s3c2410_dma_config(prtd->params->channel,
+			   prtd->params->dma_size);
+
+	/* flush the DMA channel */
+	s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH);
+	prtd->dma_loaded = 0;
+	prtd->dma_pos = prtd->dma_start;
+
+	/* enqueue dma buffers */
+	dma_enqueue(substream);
+
+	return ret;
+}
+
+static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct runtime_data *prtd = substream->runtime->private_data;
+	int ret = 0;
+
+	pr_debug("Entered %s\n", __func__);
+
+	spin_lock(&prtd->lock);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		prtd->state |= ST_RUNNING;
+		s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		prtd->state &= ~ST_RUNNING;
+		s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	spin_unlock(&prtd->lock);
+
+	return ret;
+}
+
+static snd_pcm_uframes_t
+dma_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct runtime_data *prtd = runtime->private_data;
+	unsigned long res;
+	dma_addr_t src, dst;
+
+	pr_debug("Entered %s\n", __func__);
+
+	spin_lock(&prtd->lock);
+	s3c2410_dma_getposition(prtd->params->channel, &src, &dst);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		res = dst - prtd->dma_start;
+	else
+		res = src - prtd->dma_start;
+
+	spin_unlock(&prtd->lock);
+
+	pr_debug("Pointer %x %x\n", src, dst);
+
+	/* we seem to be getting the odd error from the pcm library due
+	 * to out-of-bounds pointers. this is maybe due to the dma engine
+	 * not having loaded the new values for the channel before being
+	 * callled... (todo - fix )
+	 */
+
+	if (res >= snd_pcm_lib_buffer_bytes(substream)) {
+		if (res == snd_pcm_lib_buffer_bytes(substream))
+			res = 0;
+	}
+
+	return bytes_to_frames(substream->runtime, res);
+}
+
+static int dma_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct runtime_data *prtd;
+
+	pr_debug("Entered %s\n", __func__);
+
+	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+	snd_soc_set_runtime_hwparams(substream, &dma_hardware);
+
+	prtd = kzalloc(sizeof(struct runtime_data), GFP_KERNEL);
+	if (prtd == NULL)
+		return -ENOMEM;
+
+	spin_lock_init(&prtd->lock);
+
+	runtime->private_data = prtd;
+	return 0;
+}
+
+static int dma_close(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct runtime_data *prtd = runtime->private_data;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (!prtd)
+		pr_debug("dma_close called with prtd == NULL\n");
+
+	kfree(prtd);
+
+	return 0;
+}
+
+static int dma_mmap(struct snd_pcm_substream *substream,
+	struct vm_area_struct *vma)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	pr_debug("Entered %s\n", __func__);
+
+	return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+				     runtime->dma_area,
+				     runtime->dma_addr,
+				     runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops dma_ops = {
+	.open		= dma_open,
+	.close		= dma_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= dma_hw_params,
+	.hw_free	= dma_hw_free,
+	.prepare	= dma_prepare,
+	.trigger	= dma_trigger,
+	.pointer	= dma_pointer,
+	.mmap		= dma_mmap,
+};
+
+static int preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+{
+	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+	struct snd_dma_buffer *buf = &substream->dma_buffer;
+	size_t size = dma_hardware.buffer_bytes_max;
+
+	pr_debug("Entered %s\n", __func__);
+
+	buf->dev.type = SNDRV_DMA_TYPE_DEV;
+	buf->dev.dev = pcm->card->dev;
+	buf->private_data = NULL;
+	buf->area = dma_alloc_writecombine(pcm->card->dev, size,
+					   &buf->addr, GFP_KERNEL);
+	if (!buf->area)
+		return -ENOMEM;
+	buf->bytes = size;
+	return 0;
+}
+
+static void dma_free_dma_buffers(struct snd_pcm *pcm)
+{
+	struct snd_pcm_substream *substream;
+	struct snd_dma_buffer *buf;
+	int stream;
+
+	pr_debug("Entered %s\n", __func__);
+
+	for (stream = 0; stream < 2; stream++) {
+		substream = pcm->streams[stream].substream;
+		if (!substream)
+			continue;
+
+		buf = &substream->dma_buffer;
+		if (!buf->area)
+			continue;
+
+		dma_free_writecombine(pcm->card->dev, buf->bytes,
+				      buf->area, buf->addr);
+		buf->area = NULL;
+	}
+}
+
+static u64 dma_mask = DMA_BIT_MASK(32);
+
+static int dma_new(struct snd_card *card,
+	struct snd_soc_dai *dai, struct snd_pcm *pcm)
+{
+	int ret = 0;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (!card->dev->dma_mask)
+		card->dev->dma_mask = &dma_mask;
+	if (!card->dev->coherent_dma_mask)
+		card->dev->coherent_dma_mask = 0xffffffff;
+
+	if (dai->driver->playback.channels_min) {
+		ret = preallocate_dma_buffer(pcm,
+			SNDRV_PCM_STREAM_PLAYBACK);
+		if (ret)
+			goto out;
+	}
+
+	if (dai->driver->capture.channels_min) {
+		ret = preallocate_dma_buffer(pcm,
+			SNDRV_PCM_STREAM_CAPTURE);
+		if (ret)
+			goto out;
+	}
+out:
+	return ret;
+}
+
+static struct snd_soc_platform_driver samsung_asoc_platform = {
+	.ops		= &dma_ops,
+	.pcm_new	= dma_new,
+	.pcm_free	= dma_free_dma_buffers,
+};
+
+static int __devinit samsung_asoc_platform_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_platform(&pdev->dev, &samsung_asoc_platform);
+}
+
+static int __devexit samsung_asoc_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver asoc_dma_driver = {
+	.driver = {
+		.name = "samsung-audio",
+		.owner = THIS_MODULE,
+	},
+
+	.probe = samsung_asoc_platform_probe,
+	.remove = __devexit_p(samsung_asoc_platform_remove),
+};
+
+static int __init samsung_asoc_init(void)
+{
+	return platform_driver_register(&asoc_dma_driver);
+}
+module_init(samsung_asoc_init);
+
+static void __exit samsung_asoc_exit(void)
+{
+	platform_driver_unregister(&asoc_dma_driver);
+}
+module_exit(samsung_asoc_exit);
+
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("Samsung ASoC DMA Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:samsung-audio");
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
new file mode 100644
index 0000000..f8cd2b4
--- /dev/null
+++ b/sound/soc/samsung/dma.h
@@ -0,0 +1,30 @@
+/*
+ *  dma.h --
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  ALSA PCM interface for the Samsung S3C24xx CPU
+ */
+
+#ifndef _S3C_AUDIO_H
+#define _S3C_AUDIO_H
+
+#define ST_RUNNING		(1<<0)
+#define ST_OPENED		(1<<1)
+
+struct s3c_dma_params {
+	struct s3c2410_dma_client *client;	/* stream identifier */
+	int channel;				/* Channel ID */
+	dma_addr_t dma_addr;
+	int dma_size;			/* Size of the DMA transfer */
+};
+
+#define S3C24XX_DAI_I2S			0
+
+/* platform data */
+extern struct snd_ac97_bus_ops s3c24xx_ac97_ops;
+
+#endif
diff --git a/sound/soc/samsung/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c
new file mode 100644
index 0000000..34dd9ef
--- /dev/null
+++ b/sound/soc/samsung/goni_wm8994.c
@@ -0,0 +1,314 @@
+/*
+ * goni_wm8994.c
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <asm/mach-types.h>
+#include <mach/gpio.h>
+#include <mach/regs-clock.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include "../codecs/wm8994.h"
+#include "dma.h"
+#include "i2s.h"
+
+#define MACHINE_NAME	0
+#define CPU_VOICE_DAI	1
+
+static const char *aquila_str[] = {
+	[MACHINE_NAME] = "aquila",
+	[CPU_VOICE_DAI] = "aquila-voice-dai",
+};
+
+static struct snd_soc_card goni;
+static struct platform_device *goni_snd_device;
+
+/* 3.5 pie jack */
+static struct snd_soc_jack jack;
+
+/* 3.5 pie jack detection DAPM pins */
+static struct snd_soc_jack_pin jack_pins[] = {
+	{
+		.pin = "Headset Mic",
+		.mask = SND_JACK_MICROPHONE,
+	}, {
+		.pin = "Headset Stereophone",
+		.mask = SND_JACK_HEADPHONE | SND_JACK_MECHANICAL |
+			SND_JACK_AVOUT,
+	},
+};
+
+/* 3.5 pie jack detection gpios */
+static struct snd_soc_jack_gpio jack_gpios[] = {
+	{
+		.gpio = S5PV210_GPH0(6),
+		.name = "DET_3.5",
+		.report = SND_JACK_HEADSET | SND_JACK_MECHANICAL |
+			SND_JACK_AVOUT,
+		.debounce_time = 200,
+	},
+};
+
+static const struct snd_soc_dapm_widget goni_dapm_widgets[] = {
+	SND_SOC_DAPM_SPK("Ext Left Spk", NULL),
+	SND_SOC_DAPM_SPK("Ext Right Spk", NULL),
+	SND_SOC_DAPM_SPK("Ext Rcv", NULL),
+	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Main Mic", NULL),
+	SND_SOC_DAPM_MIC("2nd Mic", NULL),
+	SND_SOC_DAPM_LINE("Radio In", NULL),
+};
+
+static const struct snd_soc_dapm_route goni_dapm_routes[] = {
+	{"Ext Left Spk", NULL, "SPKOUTLP"},
+	{"Ext Left Spk", NULL, "SPKOUTLN"},
+
+	{"Ext Right Spk", NULL, "SPKOUTRP"},
+	{"Ext Right Spk", NULL, "SPKOUTRN"},
+
+	{"Ext Rcv", NULL, "HPOUT2N"},
+	{"Ext Rcv", NULL, "HPOUT2P"},
+
+	{"Headset Stereophone", NULL, "HPOUT1L"},
+	{"Headset Stereophone", NULL, "HPOUT1R"},
+
+	{"IN1RN", NULL, "Headset Mic"},
+	{"IN1RP", NULL, "Headset Mic"},
+
+	{"IN1RN", NULL, "2nd Mic"},
+	{"IN1RP", NULL, "2nd Mic"},
+
+	{"IN1LN", NULL, "Main Mic"},
+	{"IN1LP", NULL, "Main Mic"},
+
+	{"IN2LN", NULL, "Radio In"},
+	{"IN2RN", NULL, "Radio In"},
+};
+
+static int goni_wm8994_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int ret;
+
+	/* add goni specific widgets */
+	snd_soc_dapm_new_controls(dapm, goni_dapm_widgets,
+			ARRAY_SIZE(goni_dapm_widgets));
+
+	/* set up goni specific audio routes */
+	snd_soc_dapm_add_routes(dapm, goni_dapm_routes,
+			ARRAY_SIZE(goni_dapm_routes));
+
+	/* set endpoints to not connected */
+	snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
+	snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+
+	if (machine_is_aquila()) {
+		snd_soc_dapm_nc_pin(dapm, "SPKOUTRN");
+		snd_soc_dapm_nc_pin(dapm, "SPKOUTRP");
+	}
+
+	snd_soc_dapm_sync(dapm);
+
+	/* Headset jack detection */
+	ret = snd_soc_jack_new(codec, "Headset Jack",
+			SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
+			&jack);
+	if (ret)
+		return ret;
+
+	ret = snd_soc_jack_add_pins(&jack, ARRAY_SIZE(jack_pins), jack_pins);
+	if (ret)
+		return ret;
+
+	ret = snd_soc_jack_add_gpios(&jack, ARRAY_SIZE(jack_gpios), jack_gpios);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int goni_hifi_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	unsigned int pll_out = 24000000;
+	int ret = 0;
+
+	/* set the cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec FLL */
+	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, 0, pll_out,
+			params_rate(params) * 256);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
+			params_rate(params) * 256, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_ops goni_hifi_ops = {
+	.hw_params = goni_hifi_hw_params,
+};
+
+static int goni_voice_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pll_out = 24000000;
+	int ret = 0;
+
+	if (params_rate(params) != 8000)
+		return -EINVAL;
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
+			SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec FLL */
+	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, 0, pll_out,
+			params_rate(params) * 256);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2,
+			params_rate(params) * 256, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_dai_driver voice_dai = {
+	.name = "goni-voice-dai",
+	.id = 0,
+	.playback = {
+		.channels_min = 1,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+	.capture = {
+		.channels_min = 1,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+};
+
+static struct snd_soc_ops goni_voice_ops = {
+	.hw_params = goni_voice_hw_params,
+};
+
+static struct snd_soc_dai_link goni_dai[] = {
+{
+	.name = "WM8994",
+	.stream_name = "WM8994 HiFi",
+	.cpu_dai_name = "samsung-i2s.0",
+	.codec_dai_name = "wm8994-hifi",
+	.platform_name = "samsung-audio",
+	.codec_name = "wm8994-codec.0-0x1a",
+	.init = goni_wm8994_init,
+	.ops = &goni_hifi_ops,
+}, {
+	.name = "WM8994 Voice",
+	.stream_name = "Voice",
+	.cpu_dai_name = "goni-voice-dai",
+	.codec_dai_name = "wm8994-voice",
+	.platform_name = "samsung-audio",
+	.codec_name = "wm8994-codec.0-0x1a",
+	.ops = &goni_voice_ops,
+},
+};
+
+static struct snd_soc_card goni = {
+	.name = "goni",
+	.dai_link = goni_dai,
+	.num_links = ARRAY_SIZE(goni_dai),
+};
+
+static int __init goni_init(void)
+{
+	int ret;
+
+	if (machine_is_aquila()) {
+		voice_dai.name = aquila_str[CPU_VOICE_DAI];
+		goni_dai[1].cpu_dai_name = aquila_str[CPU_VOICE_DAI];
+		goni.name = aquila_str[MACHINE_NAME];
+	} else if (!machine_is_goni())
+		return -ENODEV;
+
+	goni_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!goni_snd_device)
+		return -ENOMEM;
+
+	/* register voice DAI here */
+	ret = snd_soc_register_dai(&goni_snd_device->dev, &voice_dai);
+	if (ret) {
+		platform_device_put(goni_snd_device);
+		return ret;
+	}
+
+	platform_set_drvdata(goni_snd_device, &goni);
+	ret = platform_device_add(goni_snd_device);
+
+	if (ret) {
+		snd_soc_unregister_dai(&goni_snd_device->dev);
+		platform_device_put(goni_snd_device);
+	}
+
+	return ret;
+}
+
+static void __exit goni_exit(void)
+{
+	snd_soc_unregister_dai(&goni_snd_device->dev);
+	platform_device_unregister(goni_snd_device);
+}
+
+module_init(goni_init);
+module_exit(goni_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("ALSA SoC WM8994 GONI(S5PV210)");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
new file mode 100644
index 0000000..c45f7ce
--- /dev/null
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -0,0 +1,296 @@
+/*
+ * h1940-uda1380.c  --  ALSA Soc Audio Layer
+ *
+ * Copyright (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Copyright (c) 2010 Vasily Khoruzhick <anarsoul@gmail.com>
+ *
+ * Based on version from Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+
+#include <sound/soc.h>
+#include <sound/uda1380.h>
+#include <sound/jack.h>
+
+#include <plat/regs-iis.h>
+
+#include <mach/h1940-latch.h>
+
+#include <asm/mach-types.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "../codecs/uda1380.h"
+
+static unsigned int rates[] = {
+	11025,
+	22050,
+	44100,
+};
+
+static struct snd_pcm_hw_constraint_list hw_rates = {
+	.count = ARRAY_SIZE(rates),
+	.list = rates,
+	.mask = 0,
+};
+
+static struct snd_soc_jack hp_jack;
+
+static struct snd_soc_jack_pin hp_jack_pins[] = {
+	{
+		.pin	= "Headphone Jack",
+		.mask	= SND_JACK_HEADPHONE,
+	},
+	{
+		.pin	= "Speaker",
+		.mask	= SND_JACK_HEADPHONE,
+		.invert	= 1,
+	},
+};
+
+static struct snd_soc_jack_gpio hp_jack_gpios[] = {
+	{
+		.gpio			= S3C2410_GPG(4),
+		.name			= "hp-gpio",
+		.report			= SND_JACK_HEADPHONE,
+		.invert			= 1,
+		.debounce_time		= 200,
+	},
+};
+
+static int h1940_startup(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	runtime->hw.rate_min = hw_rates.list[0];
+	runtime->hw.rate_max = hw_rates.list[hw_rates.count - 1];
+	runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
+
+	return snd_pcm_hw_constraint_list(runtime, 0,
+					SNDRV_PCM_HW_PARAM_RATE,
+					&hw_rates);
+}
+
+static int h1940_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int div;
+	int ret;
+	unsigned int rate = params_rate(params);
+
+	switch (rate) {
+	case 11025:
+	case 22050:
+	case 44100:
+		div = s3c24xx_i2s_get_clockrate() / (384 * rate);
+		if (s3c24xx_i2s_get_clockrate() % (384 * rate) > (192 * rate))
+			div++;
+		break;
+	default:
+		dev_err(&rtd->dev, "%s: rate %d is not supported\n",
+			__func__, rate);
+		return -EINVAL;
+	}
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* select clock source */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_PCLK, rate,
+			SND_SOC_CLOCK_OUT);
+	if (ret < 0)
+		return ret;
+
+	/* set MCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
+		S3C2410_IISMOD_384FS);
+	if (ret < 0)
+		return ret;
+
+	/* set BCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
+		S3C2410_IISMOD_32FS);
+	if (ret < 0)
+		return ret;
+
+	/* set prescaler division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+		S3C24XX_PRESCALE(div, div));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_ops h1940_ops = {
+	.startup	= h1940_startup,
+	.hw_params	= h1940_hw_params,
+};
+
+static int h1940_spk_power(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol, int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		gpio_set_value(H1940_LATCH_AUDIO_POWER, 1);
+	else
+		gpio_set_value(H1940_LATCH_AUDIO_POWER, 0);
+
+	return 0;
+}
+
+/* h1940 machine dapm widgets */
+static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", h1940_spk_power),
+};
+
+/* h1940 machine audio_map */
+static const struct snd_soc_dapm_route audio_map[] = {
+	/* headphone connected to VOUTLHP, VOUTRHP */
+	{"Headphone Jack", NULL, "VOUTLHP"},
+	{"Headphone Jack", NULL, "VOUTRHP"},
+
+	/* ext speaker connected to VOUTL, VOUTR  */
+	{"Speaker", NULL, "VOUTL"},
+	{"Speaker", NULL, "VOUTR"},
+
+	/* mic is connected to VINM */
+	{"VINM", NULL, "Mic Jack"},
+};
+
+static struct platform_device *s3c24xx_snd_device;
+
+static int h1940_uda1380_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err;
+
+	/* Add h1940 specific widgets */
+	err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
+				  ARRAY_SIZE(uda1380_dapm_widgets));
+	if (err)
+		return err;
+
+	/* Set up h1940 specific audio path audio_mapnects */
+	err = snd_soc_dapm_add_routes(dapm, audio_map,
+				      ARRAY_SIZE(audio_map));
+	if (err)
+		return err;
+
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
+		&hp_jack);
+
+	snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
+		hp_jack_pins);
+
+	snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+
+	return 0;
+}
+
+/* s3c24xx digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link h1940_uda1380_dai[] = {
+	{
+		.name		= "uda1380",
+		.stream_name	= "UDA1380 Duplex",
+		.cpu_dai_name	= "s3c24xx-iis",
+		.codec_dai_name	= "uda1380-hifi",
+		.init		= h1940_uda1380_init,
+		.platform_name	= "samsung-audio",
+		.codec_name	= "uda1380-codec.0-001a",
+		.ops		= &h1940_ops,
+	},
+};
+
+static struct snd_soc_card h1940_asoc = {
+	.name = "h1940",
+	.dai_link = h1940_uda1380_dai,
+	.num_links = ARRAY_SIZE(h1940_uda1380_dai),
+};
+
+static int __init h1940_init(void)
+{
+	int ret;
+
+	if (!machine_is_h1940())
+		return -ENODEV;
+
+	/* configure some gpios */
+	ret = gpio_request(H1940_LATCH_AUDIO_POWER, "speaker-power");
+	if (ret)
+		goto err_out;
+
+	ret = gpio_direction_output(H1940_LATCH_AUDIO_POWER, 0);
+	if (ret)
+		goto err_gpio;
+
+	s3c24xx_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!s3c24xx_snd_device) {
+		ret = -ENOMEM;
+		goto err_gpio;
+	}
+
+	platform_set_drvdata(s3c24xx_snd_device, &h1940_asoc);
+	ret = platform_device_add(s3c24xx_snd_device);
+
+	if (ret)
+		goto err_plat;
+
+	return 0;
+
+err_plat:
+	platform_device_put(s3c24xx_snd_device);
+err_gpio:
+	gpio_free(H1940_LATCH_AUDIO_POWER);
+
+err_out:
+	return ret;
+}
+
+static void __exit h1940_exit(void)
+{
+	platform_device_unregister(s3c24xx_snd_device);
+	snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+	gpio_free(H1940_LATCH_AUDIO_POWER);
+}
+
+module_init(h1940_init);
+module_exit(h1940_exit);
+
+/* Module information */
+MODULE_AUTHOR("Arnaud Patard, Vasily Khoruzhick");
+MODULE_DESCRIPTION("ALSA SoC H1940");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
new file mode 100644
index 0000000..d00ac3a
--- /dev/null
+++ b/sound/soc/samsung/i2s.c
@@ -0,0 +1,1258 @@
+/* sound/soc/samsung/i2s.c
+ *
+ * ALSA SoC Audio Layer - Samsung I2S Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <plat/audio.h>
+
+#include "dma.h"
+#include "i2s.h"
+
+#define I2SCON		0x0
+#define I2SMOD		0x4
+#define I2SFIC		0x8
+#define I2SPSR		0xc
+#define I2STXD		0x10
+#define I2SRXD		0x14
+#define I2SFICS		0x18
+#define I2STXDS		0x1c
+
+#define CON_RSTCLR		(1 << 31)
+#define CON_FRXOFSTATUS		(1 << 26)
+#define CON_FRXORINTEN		(1 << 25)
+#define CON_FTXSURSTAT		(1 << 24)
+#define CON_FTXSURINTEN		(1 << 23)
+#define CON_TXSDMA_PAUSE	(1 << 20)
+#define CON_TXSDMA_ACTIVE	(1 << 18)
+
+#define CON_FTXURSTATUS		(1 << 17)
+#define CON_FTXURINTEN		(1 << 16)
+#define CON_TXFIFO2_EMPTY	(1 << 15)
+#define CON_TXFIFO1_EMPTY	(1 << 14)
+#define CON_TXFIFO2_FULL	(1 << 13)
+#define CON_TXFIFO1_FULL	(1 << 12)
+
+#define CON_LRINDEX		(1 << 11)
+#define CON_TXFIFO_EMPTY	(1 << 10)
+#define CON_RXFIFO_EMPTY	(1 << 9)
+#define CON_TXFIFO_FULL		(1 << 8)
+#define CON_RXFIFO_FULL		(1 << 7)
+#define CON_TXDMA_PAUSE		(1 << 6)
+#define CON_RXDMA_PAUSE		(1 << 5)
+#define CON_TXCH_PAUSE		(1 << 4)
+#define CON_RXCH_PAUSE		(1 << 3)
+#define CON_TXDMA_ACTIVE	(1 << 2)
+#define CON_RXDMA_ACTIVE	(1 << 1)
+#define CON_ACTIVE		(1 << 0)
+
+#define MOD_OPCLK_CDCLK_OUT	(0 << 30)
+#define MOD_OPCLK_CDCLK_IN	(1 << 30)
+#define MOD_OPCLK_BCLK_OUT	(2 << 30)
+#define MOD_OPCLK_PCLK		(3 << 30)
+#define MOD_OPCLK_MASK		(3 << 30)
+#define MOD_TXS_IDMA		(1 << 28) /* Sec_TXFIFO use I-DMA */
+
+#define MOD_BLCS_SHIFT	26
+#define MOD_BLCS_16BIT	(0 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_8BIT	(1 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_24BIT	(2 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_MASK	(3 << MOD_BLCS_SHIFT)
+#define MOD_BLCP_SHIFT	24
+#define MOD_BLCP_16BIT	(0 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_8BIT	(1 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_24BIT	(2 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_MASK	(3 << MOD_BLCP_SHIFT)
+
+#define MOD_C2DD_HHALF		(1 << 21) /* Discard Higher-half */
+#define MOD_C2DD_LHALF		(1 << 20) /* Discard Lower-half */
+#define MOD_C1DD_HHALF		(1 << 19)
+#define MOD_C1DD_LHALF		(1 << 18)
+#define MOD_DC2_EN		(1 << 17)
+#define MOD_DC1_EN		(1 << 16)
+#define MOD_BLC_16BIT		(0 << 13)
+#define MOD_BLC_8BIT		(1 << 13)
+#define MOD_BLC_24BIT		(2 << 13)
+#define MOD_BLC_MASK		(3 << 13)
+
+#define MOD_IMS_SYSMUX		(1 << 10)
+#define MOD_SLAVE		(1 << 11)
+#define MOD_TXONLY		(0 << 8)
+#define MOD_RXONLY		(1 << 8)
+#define MOD_TXRX		(2 << 8)
+#define MOD_MASK		(3 << 8)
+#define MOD_LR_LLOW		(0 << 7)
+#define MOD_LR_RLOW		(1 << 7)
+#define MOD_SDF_IIS		(0 << 5)
+#define MOD_SDF_MSB		(1 << 5)
+#define MOD_SDF_LSB		(2 << 5)
+#define MOD_SDF_MASK		(3 << 5)
+#define MOD_RCLK_256FS		(0 << 3)
+#define MOD_RCLK_512FS		(1 << 3)
+#define MOD_RCLK_384FS		(2 << 3)
+#define MOD_RCLK_768FS		(3 << 3)
+#define MOD_RCLK_MASK		(3 << 3)
+#define MOD_BCLK_32FS		(0 << 1)
+#define MOD_BCLK_48FS		(1 << 1)
+#define MOD_BCLK_16FS		(2 << 1)
+#define MOD_BCLK_24FS		(3 << 1)
+#define MOD_BCLK_MASK		(3 << 1)
+#define MOD_8BIT		(1 << 0)
+
+#define MOD_CDCLKCON		(1 << 12)
+
+#define PSR_PSREN		(1 << 15)
+
+#define FIC_TX2COUNT(x)		(((x) >>  24) & 0xf)
+#define FIC_TX1COUNT(x)		(((x) >>  16) & 0xf)
+
+#define FIC_TXFLUSH		(1 << 15)
+#define FIC_RXFLUSH		(1 << 7)
+#define FIC_TXCOUNT(x)		(((x) >>  8) & 0xf)
+#define FIC_RXCOUNT(x)		(((x) >>  0) & 0xf)
+#define FICS_TXCOUNT(x)		(((x) >>  8) & 0x7f)
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+struct i2s_dai {
+	/* Platform device for this DAI */
+	struct platform_device *pdev;
+	/* IOREMAP'd SFRs */
+	void __iomem	*addr;
+	/* Physical base address of SFRs */
+	u32	base;
+	/* Rate of RCLK source clock */
+	unsigned long rclk_srcrate;
+	/* Frame Clock */
+	unsigned frmclk;
+	/*
+	 * Specifically requested RCLK,BCLK by MACHINE Driver.
+	 * 0 indicates CPU driver is free to choose any value.
+	 */
+	unsigned rfs, bfs;
+	/* I2S Controller's core clock */
+	struct clk *clk;
+	/* Clock for generating I2S signals */
+	struct clk *op_clk;
+	/* Array of clock names for op_clk */
+	const char **src_clk;
+	/* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */
+	struct i2s_dai *pri_dai;
+	/* Pointer to the Secondary_Fifo if it has one, NULL otherwise */
+	struct i2s_dai *sec_dai;
+#define DAI_OPENED	(1 << 0) /* Dai is opened */
+#define DAI_MANAGER	(1 << 1) /* Dai is the manager */
+	unsigned mode;
+	/* Driver for this DAI */
+	struct snd_soc_dai_driver i2s_dai_drv;
+	/* DMA parameters */
+	struct s3c_dma_params dma_playback;
+	struct s3c_dma_params dma_capture;
+	u32	quirks;
+	u32	suspend_i2smod;
+	u32	suspend_i2scon;
+	u32	suspend_i2spsr;
+};
+
+/* Lock for cross i/f checks */
+static DEFINE_SPINLOCK(lock);
+
+/* If this is the 'overlay' stereo DAI */
+static inline bool is_secondary(struct i2s_dai *i2s)
+{
+	return i2s->pri_dai ? true : false;
+}
+
+/* If operating in SoC-Slave mode */
+static inline bool is_slave(struct i2s_dai *i2s)
+{
+	return (readl(i2s->addr + I2SMOD) & MOD_SLAVE) ? true : false;
+}
+
+/* If this interface of the controller is transmitting data */
+static inline bool tx_active(struct i2s_dai *i2s)
+{
+	u32 active;
+
+	if (!i2s)
+		return false;
+
+	active = readl(i2s->addr + I2SMOD);
+
+	if (is_secondary(i2s))
+		active &= CON_TXSDMA_ACTIVE;
+	else
+		active &= CON_TXDMA_ACTIVE;
+
+	return active ? true : false;
+}
+
+/* If the other interface of the controller is transmitting data */
+static inline bool other_tx_active(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	return tx_active(other);
+}
+
+/* If any interface of the controller is transmitting data */
+static inline bool any_tx_active(struct i2s_dai *i2s)
+{
+	return tx_active(i2s) || other_tx_active(i2s);
+}
+
+/* If this interface of the controller is receiving data */
+static inline bool rx_active(struct i2s_dai *i2s)
+{
+	u32 active;
+
+	if (!i2s)
+		return false;
+
+	active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE;
+
+	return active ? true : false;
+}
+
+/* If the other interface of the controller is receiving data */
+static inline bool other_rx_active(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	return rx_active(other);
+}
+
+/* If any interface of the controller is receiving data */
+static inline bool any_rx_active(struct i2s_dai *i2s)
+{
+	return rx_active(i2s) || other_rx_active(i2s);
+}
+
+/* If the other DAI is transmitting or receiving data */
+static inline bool other_active(struct i2s_dai *i2s)
+{
+	return other_rx_active(i2s) || other_tx_active(i2s);
+}
+
+/* If this DAI is transmitting or receiving data */
+static inline bool this_active(struct i2s_dai *i2s)
+{
+	return tx_active(i2s) || rx_active(i2s);
+}
+
+/* If the controller is active anyway */
+static inline bool any_active(struct i2s_dai *i2s)
+{
+	return this_active(i2s) || other_active(i2s);
+}
+
+static inline struct i2s_dai *to_info(struct snd_soc_dai *dai)
+{
+	return snd_soc_dai_get_drvdata(dai);
+}
+
+static inline bool is_opened(struct i2s_dai *i2s)
+{
+	if (i2s && (i2s->mode & DAI_OPENED))
+		return true;
+	else
+		return false;
+}
+
+static inline bool is_manager(struct i2s_dai *i2s)
+{
+	if (is_opened(i2s) && (i2s->mode & DAI_MANAGER))
+		return true;
+	else
+		return false;
+}
+
+/* Read RCLK of I2S (in multiples of LRCLK) */
+static inline unsigned get_rfs(struct i2s_dai *i2s)
+{
+	u32 rfs = (readl(i2s->addr + I2SMOD) >> 3) & 0x3;
+
+	switch (rfs) {
+	case 3:	return 768;
+	case 2: return 384;
+	case 1:	return 512;
+	default: return 256;
+	}
+}
+
+/* Write RCLK of I2S (in multiples of LRCLK) */
+static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
+{
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	mod &= ~MOD_RCLK_MASK;
+
+	switch (rfs) {
+	case 768:
+		mod |= MOD_RCLK_768FS;
+		break;
+	case 512:
+		mod |= MOD_RCLK_512FS;
+		break;
+	case 384:
+		mod |= MOD_RCLK_384FS;
+		break;
+	default:
+		mod |= MOD_RCLK_256FS;
+		break;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+}
+
+/* Read Bit-Clock of I2S (in multiples of LRCLK) */
+static inline unsigned get_bfs(struct i2s_dai *i2s)
+{
+	u32 bfs = (readl(i2s->addr + I2SMOD) >> 1) & 0x3;
+
+	switch (bfs) {
+	case 3: return 24;
+	case 2: return 16;
+	case 1:	return 48;
+	default: return 32;
+	}
+}
+
+/* Write Bit-Clock of I2S (in multiples of LRCLK) */
+static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
+{
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	mod &= ~MOD_BCLK_MASK;
+
+	switch (bfs) {
+	case 48:
+		mod |= MOD_BCLK_48FS;
+		break;
+	case 32:
+		mod |= MOD_BCLK_32FS;
+		break;
+	case 24:
+		mod |= MOD_BCLK_24FS;
+		break;
+	case 16:
+		mod |= MOD_BCLK_16FS;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Wrong BCLK Divider!\n");
+		return;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+}
+
+/* Sample-Size */
+static inline int get_blc(struct i2s_dai *i2s)
+{
+	int blc = readl(i2s->addr + I2SMOD);
+
+	blc = (blc >> 13) & 0x3;
+
+	switch (blc) {
+	case 2: return 24;
+	case 1:	return 8;
+	default: return 16;
+	}
+}
+
+/* TX Channel Control */
+static void i2s_txctrl(struct i2s_dai *i2s, int on)
+{
+	void __iomem *addr = i2s->addr;
+	u32 con = readl(addr + I2SCON);
+	u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+
+	if (on) {
+		con |= CON_ACTIVE;
+		con &= ~CON_TXCH_PAUSE;
+
+		if (is_secondary(i2s)) {
+			con |= CON_TXSDMA_ACTIVE;
+			con &= ~CON_TXSDMA_PAUSE;
+		} else {
+			con |= CON_TXDMA_ACTIVE;
+			con &= ~CON_TXDMA_PAUSE;
+		}
+
+		if (any_rx_active(i2s))
+			mod |= MOD_TXRX;
+		else
+			mod |= MOD_TXONLY;
+	} else {
+		if (is_secondary(i2s)) {
+			con |=  CON_TXSDMA_PAUSE;
+			con &= ~CON_TXSDMA_ACTIVE;
+		} else {
+			con |=  CON_TXDMA_PAUSE;
+			con &= ~CON_TXDMA_ACTIVE;
+		}
+
+		if (other_tx_active(i2s)) {
+			writel(con, addr + I2SCON);
+			return;
+		}
+
+		con |=  CON_TXCH_PAUSE;
+
+		if (any_rx_active(i2s))
+			mod |= MOD_RXONLY;
+		else
+			con &= ~CON_ACTIVE;
+	}
+
+	writel(mod, addr + I2SMOD);
+	writel(con, addr + I2SCON);
+}
+
+/* RX Channel Control */
+static void i2s_rxctrl(struct i2s_dai *i2s, int on)
+{
+	void __iomem *addr = i2s->addr;
+	u32 con = readl(addr + I2SCON);
+	u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+
+	if (on) {
+		con |= CON_RXDMA_ACTIVE | CON_ACTIVE;
+		con &= ~(CON_RXDMA_PAUSE | CON_RXCH_PAUSE);
+
+		if (any_tx_active(i2s))
+			mod |= MOD_TXRX;
+		else
+			mod |= MOD_RXONLY;
+	} else {
+		con |=  CON_RXDMA_PAUSE | CON_RXCH_PAUSE;
+		con &= ~CON_RXDMA_ACTIVE;
+
+		if (any_tx_active(i2s))
+			mod |= MOD_TXONLY;
+		else
+			con &= ~CON_ACTIVE;
+	}
+
+	writel(mod, addr + I2SMOD);
+	writel(con, addr + I2SCON);
+}
+
+/* Flush FIFO of an interface */
+static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush)
+{
+	void __iomem *fic;
+	u32 val;
+
+	if (!i2s)
+		return;
+
+	if (is_secondary(i2s))
+		fic = i2s->addr + I2SFICS;
+	else
+		fic = i2s->addr + I2SFIC;
+
+	/* Flush the FIFO */
+	writel(readl(fic) | flush, fic);
+
+	/* Be patient */
+	val = msecs_to_loops(1) / 1000; /* 1 usec */
+	while (--val)
+		cpu_relax();
+
+	writel(readl(fic) & ~flush, fic);
+}
+
+static int i2s_set_sysclk(struct snd_soc_dai *dai,
+	  int clk_id, unsigned int rfs, int dir)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	switch (clk_id) {
+	case SAMSUNG_I2S_CDCLK:
+		/* Shouldn't matter in GATING(CLOCK_IN) mode */
+		if (dir == SND_SOC_CLOCK_IN)
+			rfs = 0;
+
+		if ((rfs && other->rfs && (other->rfs != rfs)) ||
+				(any_active(i2s) &&
+				(((dir == SND_SOC_CLOCK_IN)
+					&& !(mod & MOD_CDCLKCON)) ||
+				((dir == SND_SOC_CLOCK_OUT)
+					&& (mod & MOD_CDCLKCON))))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		}
+
+		if (dir == SND_SOC_CLOCK_IN)
+			mod |= MOD_CDCLKCON;
+		else
+			mod &= ~MOD_CDCLKCON;
+
+		i2s->rfs = rfs;
+		break;
+
+	case SAMSUNG_I2S_RCLKSRC_0: /* clock corrsponding to IISMOD[10] := 0 */
+	case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */
+		if ((i2s->quirks & QUIRK_NO_MUXPSR)
+				|| (clk_id == SAMSUNG_I2S_RCLKSRC_0))
+			clk_id = 0;
+		else
+			clk_id = 1;
+
+		if (!any_active(i2s)) {
+			if (i2s->op_clk) {
+				if ((clk_id && !(mod & MOD_IMS_SYSMUX)) ||
+					(!clk_id && (mod & MOD_IMS_SYSMUX))) {
+					clk_disable(i2s->op_clk);
+					clk_put(i2s->op_clk);
+				} else {
+					i2s->rclk_srcrate =
+						clk_get_rate(i2s->op_clk);
+					return 0;
+				}
+			}
+
+			i2s->op_clk = clk_get(&i2s->pdev->dev,
+						i2s->src_clk[clk_id]);
+			clk_enable(i2s->op_clk);
+			i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
+
+			/* Over-ride the other's */
+			if (other) {
+				other->op_clk = i2s->op_clk;
+				other->rclk_srcrate = i2s->rclk_srcrate;
+			}
+		} else if ((!clk_id && (mod & MOD_IMS_SYSMUX))
+				|| (clk_id && !(mod & MOD_IMS_SYSMUX))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		} else {
+			/* Call can't be on the active DAI */
+			i2s->op_clk = other->op_clk;
+			i2s->rclk_srcrate = other->rclk_srcrate;
+			return 0;
+		}
+
+		if (clk_id == 0)
+			mod &= ~MOD_IMS_SYSMUX;
+		else
+			mod |= MOD_IMS_SYSMUX;
+		break;
+
+	default:
+		dev_err(&i2s->pdev->dev, "We don't serve that!\n");
+		return -EINVAL;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+
+	return 0;
+}
+
+static int i2s_set_fmt(struct snd_soc_dai *dai,
+	unsigned int fmt)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 mod = readl(i2s->addr + I2SMOD);
+	u32 tmp = 0;
+
+	/* Format is priority */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_RIGHT_J:
+		tmp |= MOD_LR_RLOW;
+		tmp |= MOD_SDF_MSB;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		tmp |= MOD_LR_RLOW;
+		tmp |= MOD_SDF_LSB;
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		tmp |= MOD_SDF_IIS;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Format not supported\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * INV flag is relative to the FORMAT flag - if set it simply
+	 * flips the polarity specified by the Standard
+	 */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		if (tmp & MOD_LR_RLOW)
+			tmp &= ~MOD_LR_RLOW;
+		else
+			tmp |= MOD_LR_RLOW;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Polarity not supported\n");
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		tmp |= MOD_SLAVE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		/* Set default source clock in Master mode */
+		if (i2s->rclk_srcrate == 0)
+			i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
+							0, SND_SOC_CLOCK_IN);
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "master/slave format not supported\n");
+		return -EINVAL;
+	}
+
+	if (any_active(i2s) &&
+			((mod & (MOD_SDF_MASK | MOD_LR_RLOW
+				| MOD_SLAVE)) != tmp)) {
+		dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	mod &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE);
+	mod |= tmp;
+	writel(mod, i2s->addr + I2SMOD);
+
+	return 0;
+}
+
+static int i2s_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	if (!is_secondary(i2s))
+		mod &= ~(MOD_DC2_EN | MOD_DC1_EN);
+
+	switch (params_channels(params)) {
+	case 6:
+		mod |= MOD_DC2_EN;
+	case 4:
+		mod |= MOD_DC1_EN;
+		break;
+	case 2:
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "%d channels not supported\n",
+				params_channels(params));
+		return -EINVAL;
+	}
+
+	if (is_secondary(i2s))
+		mod &= ~MOD_BLCS_MASK;
+	else
+		mod &= ~MOD_BLCP_MASK;
+
+	if (is_manager(i2s))
+		mod &= ~MOD_BLC_MASK;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S8:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_8BIT;
+		else
+			mod |= MOD_BLCP_8BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_8BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_16BIT;
+		else
+			mod |= MOD_BLCP_16BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_16BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_24BIT;
+		else
+			mod |= MOD_BLCP_24BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_24BIT;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Format(%d) not supported\n",
+				params_format(params));
+		return -EINVAL;
+	}
+	writel(mod, i2s->addr + I2SMOD);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snd_soc_dai_set_dma_data(dai, substream,
+			(void *)&i2s->dma_playback);
+	else
+		snd_soc_dai_set_dma_data(dai, substream,
+			(void *)&i2s->dma_capture);
+
+	i2s->frmclk = params_rate(params);
+
+	return 0;
+}
+
+/* We set constraints on the substream acc to the version of I2S */
+static int i2s_startup(struct snd_pcm_substream *substream,
+	  struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+
+	i2s->mode |= DAI_OPENED;
+
+	if (is_manager(other))
+		i2s->mode &= ~DAI_MANAGER;
+	else
+		i2s->mode |= DAI_MANAGER;
+
+	/* Enforce set_sysclk in Master mode */
+	i2s->rclk_srcrate = 0;
+
+	spin_unlock_irqrestore(&lock, flags);
+
+	return 0;
+}
+
+static void i2s_shutdown(struct snd_pcm_substream *substream,
+	struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+
+	i2s->mode &= ~DAI_OPENED;
+	i2s->mode &= ~DAI_MANAGER;
+
+	if (is_opened(other))
+		other->mode |= DAI_MANAGER;
+
+	/* Reset any constraint on RFS and BFS */
+	i2s->rfs = 0;
+	i2s->bfs = 0;
+
+	spin_unlock_irqrestore(&lock, flags);
+
+	/* Gate CDCLK by default */
+	if (!is_opened(other))
+		i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
+				0, SND_SOC_CLOCK_IN);
+}
+
+static int config_setup(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned rfs, bfs, blc;
+	u32 psr;
+
+	blc = get_blc(i2s);
+
+	bfs = i2s->bfs;
+
+	if (!bfs && other)
+		bfs = other->bfs;
+
+	/* Select least possible multiple(2) if no constraint set */
+	if (!bfs)
+		bfs = blc * 2;
+
+	rfs = i2s->rfs;
+
+	if (!rfs && other)
+		rfs = other->rfs;
+
+	if ((rfs == 256 || rfs == 512) && (blc == 24)) {
+		dev_err(&i2s->pdev->dev,
+			"%d-RFS not supported for 24-blc\n", rfs);
+		return -EINVAL;
+	}
+
+	if (!rfs) {
+		if (bfs == 16 || bfs == 32)
+			rfs = 256;
+		else
+			rfs = 384;
+	}
+
+	/* If already setup and running */
+	if (any_active(i2s) && (get_rfs(i2s) != rfs || get_bfs(i2s) != bfs)) {
+		dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	/* Don't bother RFS, BFS & PSR in Slave mode */
+	if (is_slave(i2s))
+		return 0;
+
+	set_bfs(i2s, bfs);
+	set_rfs(i2s, rfs);
+
+	if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
+		psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
+		writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
+		dev_dbg(&i2s->pdev->dev,
+			"RCLK_SRC=%luHz PSR=%u, RCLK=%dfs, BCLK=%dfs\n",
+				i2s->rclk_srcrate, psr, rfs, bfs);
+	}
+
+	return 0;
+}
+
+static int i2s_trigger(struct snd_pcm_substream *substream,
+	int cmd, struct snd_soc_dai *dai)
+{
+	int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct i2s_dai *i2s = to_info(rtd->cpu_dai);
+	unsigned long flags;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		local_irq_save(flags);
+
+		if (config_setup(i2s)) {
+			local_irq_restore(flags);
+			return -EINVAL;
+		}
+
+		if (capture)
+			i2s_rxctrl(i2s, 1);
+		else
+			i2s_txctrl(i2s, 1);
+
+		local_irq_restore(flags);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		local_irq_save(flags);
+
+		if (capture)
+			i2s_rxctrl(i2s, 0);
+		else
+			i2s_txctrl(i2s, 0);
+
+		if (capture)
+			i2s_fifo(i2s, FIC_RXFLUSH);
+		else
+			i2s_fifo(i2s, FIC_TXFLUSH);
+
+		local_irq_restore(flags);
+		break;
+	}
+
+	return 0;
+}
+
+static int i2s_set_clkdiv(struct snd_soc_dai *dai,
+	int div_id, int div)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	switch (div_id) {
+	case SAMSUNG_I2S_DIV_BCLK:
+		if ((any_active(i2s) && div && (get_bfs(i2s) != div))
+			|| (other && other->bfs && (other->bfs != div))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		}
+		i2s->bfs = div;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev,
+			"Invalid clock divider(%d)\n", div_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static snd_pcm_sframes_t
+i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 reg = readl(i2s->addr + I2SFIC);
+	snd_pcm_sframes_t delay;
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		delay = FIC_RXCOUNT(reg);
+	else if (is_secondary(i2s))
+		delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS));
+	else
+		delay = FIC_TXCOUNT(reg);
+
+	return delay;
+}
+
+#ifdef CONFIG_PM
+static int i2s_suspend(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+
+	if (dai->active) {
+		i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
+		i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
+		i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+	}
+
+	return 0;
+}
+
+static int i2s_resume(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+
+	if (dai->active) {
+		writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
+		writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
+		writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+	}
+
+	return 0;
+}
+#else
+#define i2s_suspend NULL
+#define i2s_resume  NULL
+#endif
+
+static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (other && other->clk) /* If this is probe on secondary */
+		goto probe_exit;
+
+	i2s->addr = ioremap(i2s->base, 0x100);
+	if (i2s->addr == NULL) {
+		dev_err(&i2s->pdev->dev, "cannot ioremap registers\n");
+		return -ENXIO;
+	}
+
+	i2s->clk = clk_get(&i2s->pdev->dev, "iis");
+	if (IS_ERR(i2s->clk)) {
+		dev_err(&i2s->pdev->dev, "failed to get i2s_clock\n");
+		iounmap(i2s->addr);
+		return -ENOENT;
+	}
+	clk_enable(i2s->clk);
+
+	if (other) {
+		other->addr = i2s->addr;
+		other->clk = i2s->clk;
+	}
+
+	if (i2s->quirks & QUIRK_NEED_RSTCLR)
+		writel(CON_RSTCLR, i2s->addr + I2SCON);
+
+probe_exit:
+	/* Reset any constraint on RFS and BFS */
+	i2s->rfs = 0;
+	i2s->bfs = 0;
+	i2s_txctrl(i2s, 0);
+	i2s_rxctrl(i2s, 0);
+	i2s_fifo(i2s, FIC_TXFLUSH);
+	i2s_fifo(other, FIC_TXFLUSH);
+	i2s_fifo(i2s, FIC_RXFLUSH);
+
+	/* Gate CDCLK by default */
+	if (!is_opened(other))
+		i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
+				0, SND_SOC_CLOCK_IN);
+
+	return 0;
+}
+
+static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (!other || !other->clk) {
+
+		if (i2s->quirks & QUIRK_NEED_RSTCLR)
+			writel(0, i2s->addr + I2SCON);
+
+		clk_disable(i2s->clk);
+		clk_put(i2s->clk);
+
+		iounmap(i2s->addr);
+	}
+
+	i2s->clk = NULL;
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops samsung_i2s_dai_ops = {
+	.trigger = i2s_trigger,
+	.hw_params = i2s_hw_params,
+	.set_fmt = i2s_set_fmt,
+	.set_clkdiv = i2s_set_clkdiv,
+	.set_sysclk = i2s_set_sysclk,
+	.startup = i2s_startup,
+	.shutdown = i2s_shutdown,
+	.delay = i2s_delay,
+};
+
+#define SAMSUNG_I2S_RATES	SNDRV_PCM_RATE_8000_96000
+
+#define SAMSUNG_I2S_FMTS	(SNDRV_PCM_FMTBIT_S8 | \
+					SNDRV_PCM_FMTBIT_S16_LE | \
+					SNDRV_PCM_FMTBIT_S24_LE)
+
+static __devinit
+struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
+{
+	struct i2s_dai *i2s;
+
+	i2s = kzalloc(sizeof(struct i2s_dai), GFP_KERNEL);
+	if (i2s == NULL)
+		return NULL;
+
+	i2s->pdev = pdev;
+	i2s->pri_dai = NULL;
+	i2s->sec_dai = NULL;
+	i2s->i2s_dai_drv.symmetric_rates = 1;
+	i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe;
+	i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove;
+	i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops;
+	i2s->i2s_dai_drv.suspend = i2s_suspend;
+	i2s->i2s_dai_drv.resume = i2s_resume;
+	i2s->i2s_dai_drv.playback.channels_min = 2;
+	i2s->i2s_dai_drv.playback.channels_max = 2;
+	i2s->i2s_dai_drv.playback.rates = SAMSUNG_I2S_RATES;
+	i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS;
+
+	if (!sec) {
+		i2s->i2s_dai_drv.capture.channels_min = 2;
+		i2s->i2s_dai_drv.capture.channels_max = 2;
+		i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;
+		i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
+	} else {	/* Create a new platform_device for Secondary */
+		i2s->pdev = platform_device_register_resndata(NULL,
+				pdev->name, pdev->id + SAMSUNG_I2S_SECOFF,
+				NULL, 0, NULL, 0);
+		if (IS_ERR(i2s->pdev)) {
+			kfree(i2s);
+			return NULL;
+		}
+	}
+
+	/* Pre-assign snd_soc_dai_set_drvdata */
+	dev_set_drvdata(&i2s->pdev->dev, i2s);
+
+	return i2s;
+}
+
+static __devinit int samsung_i2s_probe(struct platform_device *pdev)
+{
+	u32 dma_pl_chan, dma_cp_chan, dma_pl_sec_chan;
+	struct i2s_dai *pri_dai, *sec_dai = NULL;
+	struct s3c_audio_pdata *i2s_pdata;
+	struct samsung_i2s *i2s_cfg;
+	struct resource *res;
+	u32 regs_base, quirks;
+	int ret = 0;
+
+	/* Call during Seconday interface registration */
+	if (pdev->id >= SAMSUNG_I2S_SECOFF) {
+		sec_dai = dev_get_drvdata(&pdev->dev);
+		snd_soc_register_dai(&sec_dai->pdev->dev,
+			&sec_dai->i2s_dai_drv);
+		return 0;
+	}
+
+	i2s_pdata = pdev->dev.platform_data;
+	if (i2s_pdata == NULL) {
+		dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
+		return -ENXIO;
+	}
+	dma_pl_chan = res->start;
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
+		return -ENXIO;
+	}
+	dma_cp_chan = res->start;
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+	if (res)
+		dma_pl_sec_chan = res->start;
+	else
+		dma_pl_sec_chan = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
+		return -ENXIO;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res),
+							"samsung-i2s")) {
+		dev_err(&pdev->dev, "Unable to request SFR region\n");
+		return -EBUSY;
+	}
+	regs_base = res->start;
+
+	i2s_cfg = &i2s_pdata->type.i2s;
+	quirks = i2s_cfg->quirks;
+
+	pri_dai = i2s_alloc_dai(pdev, false);
+	if (!pri_dai) {
+		dev_err(&pdev->dev, "Unable to alloc I2S_pri\n");
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
+	pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
+	pri_dai->dma_playback.client =
+		(struct s3c2410_dma_client *)&pri_dai->dma_playback;
+	pri_dai->dma_capture.client =
+		(struct s3c2410_dma_client *)&pri_dai->dma_capture;
+	pri_dai->dma_playback.channel = dma_pl_chan;
+	pri_dai->dma_capture.channel = dma_cp_chan;
+	pri_dai->src_clk = i2s_cfg->src_clk;
+	pri_dai->dma_playback.dma_size = 4;
+	pri_dai->dma_capture.dma_size = 4;
+	pri_dai->base = regs_base;
+	pri_dai->quirks = quirks;
+
+	if (quirks & QUIRK_PRI_6CHAN)
+		pri_dai->i2s_dai_drv.playback.channels_max = 6;
+
+	if (quirks & QUIRK_SEC_DAI) {
+		sec_dai = i2s_alloc_dai(pdev, true);
+		if (!sec_dai) {
+			dev_err(&pdev->dev, "Unable to alloc I2S_sec\n");
+			ret = -ENOMEM;
+			goto err2;
+		}
+		sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
+		sec_dai->dma_playback.client =
+			(struct s3c2410_dma_client *)&sec_dai->dma_playback;
+		/* Use iDMA always if SysDMA not provided */
+		sec_dai->dma_playback.channel = dma_pl_sec_chan ? : -1;
+		sec_dai->src_clk = i2s_cfg->src_clk;
+		sec_dai->dma_playback.dma_size = 4;
+		sec_dai->base = regs_base;
+		sec_dai->quirks = quirks;
+		sec_dai->pri_dai = pri_dai;
+		pri_dai->sec_dai = sec_dai;
+	}
+
+	if (i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
+		dev_err(&pdev->dev, "Unable to configure gpio\n");
+		ret = -EINVAL;
+		goto err3;
+	}
+
+	snd_soc_register_dai(&pri_dai->pdev->dev, &pri_dai->i2s_dai_drv);
+
+	return 0;
+err3:
+	kfree(sec_dai);
+err2:
+	kfree(pri_dai);
+err1:
+	release_mem_region(regs_base, resource_size(res));
+
+	return ret;
+}
+
+static __devexit int samsung_i2s_remove(struct platform_device *pdev)
+{
+	struct i2s_dai *i2s, *other;
+
+	i2s = dev_get_drvdata(&pdev->dev);
+	other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (other) {
+		other->pri_dai = NULL;
+		other->sec_dai = NULL;
+	} else {
+		struct resource *res;
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (res)
+			release_mem_region(res->start, resource_size(res));
+	}
+
+	i2s->pri_dai = NULL;
+	i2s->sec_dai = NULL;
+
+	kfree(i2s);
+
+	snd_soc_unregister_dai(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver samsung_i2s_driver = {
+	.probe  = samsung_i2s_probe,
+	.remove = samsung_i2s_remove,
+	.driver = {
+		.name = "samsung-i2s",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init samsung_i2s_init(void)
+{
+	return platform_driver_register(&samsung_i2s_driver);
+}
+module_init(samsung_i2s_init);
+
+static void __exit samsung_i2s_exit(void)
+{
+	platform_driver_unregister(&samsung_i2s_driver);
+}
+module_exit(samsung_i2s_exit);
+
+/* Module information */
+MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("Samsung I2S Interface");
+MODULE_ALIAS("platform:samsung-i2s");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/i2s.h b/sound/soc/samsung/i2s.h
new file mode 100644
index 0000000..8e15f6a
--- /dev/null
+++ b/sound/soc/samsung/i2s.h
@@ -0,0 +1,29 @@
+/* sound/soc/samsung/i2s.h
+ *
+ * ALSA SoC Audio Layer - Samsung I2S Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SND_SOC_SAMSUNG_I2S_H
+#define __SND_SOC_SAMSUNG_I2S_H
+
+/*
+ * Maximum number of I2S blocks that any SoC can have.
+ * The secondary interface of a CPU dai(if there exists any),
+ * is indexed at [cpu-dai's ID + SAMSUNG_I2S_SECOFF]
+ */
+#define SAMSUNG_I2S_SECOFF	4
+
+#define SAMSUNG_I2S_DIV_BCLK	1
+
+#define SAMSUNG_I2S_RCLKSRC_0	0
+#define SAMSUNG_I2S_RCLKSRC_1	1
+#define SAMSUNG_I2S_CDCLK		2
+
+#endif /* __SND_SOC_SAMSUNG_I2S_H */
diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
new file mode 100644
index 0000000..0880252
--- /dev/null
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -0,0 +1,191 @@
+/* sound/soc/samsung/jive_wm8750.c
+ *
+ * Copyright 2007,2008 Simtec Electronics
+ *
+ * Based on sound/soc/pxa/spitz.c
+ *	Copyright 2005 Wolfson Microelectronics PLC.
+ *	Copyright 2005 Openedhand Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include <asm/mach-types.h>
+
+#include "dma.h"
+#include "s3c2412-i2s.h"
+
+#include "../codecs/wm8750.h"
+
+static const struct snd_soc_dapm_route audio_map[] = {
+	{ "Headphone Jack", NULL, "LOUT1" },
+	{ "Headphone Jack", NULL, "ROUT1" },
+	{ "Internal Speaker", NULL, "LOUT2" },
+	{ "Internal Speaker", NULL, "ROUT2" },
+	{ "LINPUT1", NULL, "Line Input" },
+	{ "RINPUT1", NULL, "Line Input" },
+};
+
+static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_SPK("Internal Speaker", NULL),
+	SND_SOC_DAPM_LINE("Line In", NULL),
+};
+
+static int jive_hw_params(struct snd_pcm_substream *substream,
+			  struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct s3c_i2sv2_rate_calc div;
+	unsigned int clk = 0;
+	int ret = 0;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 48000:
+	case 96000:
+		clk = 12288000;
+		break;
+	case 11025:
+	case 22050:
+	case 44100:
+		clk = 11289600;
+		break;
+	}
+
+	s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
+				s3c_i2sv2_get_clock(cpu_dai));
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+				  SND_SOC_DAIFMT_NB_NF |
+				  SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+				  SND_SOC_DAIFMT_NB_NF |
+				  SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_RCLK, div.fs_div);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C2412_DIV_PRESCALER,
+				     div.clk_div - 1);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_ops jive_ops = {
+	.hw_params	= jive_hw_params,
+};
+
+static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err;
+
+	/* These endpoints are not being used. */
+	snd_soc_dapm_nc_pin(dapm, "LINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONO");
+
+	/* Add jive specific widgets */
+	err = snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
+					ARRAY_SIZE(wm8750_dapm_widgets));
+	if (err) {
+		printk(KERN_ERR "%s: failed to add widgets (%d)\n",
+		       __func__, err);
+		return err;
+	}
+
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link jive_dai = {
+	.name		= "wm8750",
+	.stream_name	= "WM8750",
+	.cpu_dai_name	= "s3c2412-i2s",
+	.codec_dai_name = "wm8750-hifi",
+	.platform_name	= "samsung-audio",
+	.codec_name	= "wm8750-codec.0-0x1a",
+	.init		= jive_wm8750_init,
+	.ops		= &jive_ops,
+};
+
+/* jive audio machine driver */
+static struct snd_soc_card snd_soc_machine_jive = {
+	.name		= "Jive",
+	.dai_link	= &jive_dai,
+	.num_links	= 1,
+};
+
+static struct platform_device *jive_snd_device;
+
+static int __init jive_init(void)
+{
+	int ret;
+
+	if (!machine_is_jive())
+		return 0;
+
+	printk("JIVE WM8750 Audio support\n");
+
+	jive_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!jive_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(jive_snd_device, &snd_soc_machine_jive);
+	ret = platform_device_add(jive_snd_device);
+
+	if (ret)
+		platform_device_put(jive_snd_device);
+
+	return ret;
+}
+
+static void __exit jive_exit(void)
+{
+	platform_device_unregister(jive_snd_device);
+}
+
+module_init(jive_init);
+module_exit(jive_exit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("ALSA SoC Jive Audio support");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/lm4857.h b/sound/soc/samsung/lm4857.h
similarity index 100%
rename from sound/soc/s3c24xx/lm4857.h
rename to sound/soc/samsung/lm4857.h
diff --git a/sound/soc/samsung/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c
new file mode 100644
index 0000000..a2bb34d
--- /dev/null
+++ b/sound/soc/samsung/ln2440sbc_alc650.c
@@ -0,0 +1,77 @@
+/*
+ * SoC audio for ln2440sbc
+ *
+ * Copyright 2007 KonekTel, a.s.
+ * Author: Ivan Kuten
+ *         ivan.kuten@promwad.com
+ *
+ * Heavily based on smdk2443_wm9710.c
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include "dma.h"
+#include "ac97.h"
+
+static struct snd_soc_card ln2440sbc;
+
+static struct snd_soc_dai_link ln2440sbc_dai[] = {
+{
+	.name = "AC97",
+	.stream_name = "AC97 HiFi",
+	.cpu_dai_name = "samsung-ac97",
+	.codec_dai_name = "ac97-hifi",
+	.codec_name = "ac97-codec",
+	.platform_name = "samsung-audio",
+},
+};
+
+static struct snd_soc_card ln2440sbc = {
+	.name = "LN2440SBC",
+	.dai_link = ln2440sbc_dai,
+	.num_links = ARRAY_SIZE(ln2440sbc_dai),
+};
+
+static struct platform_device *ln2440sbc_snd_ac97_device;
+
+static int __init ln2440sbc_init(void)
+{
+	int ret;
+
+	ln2440sbc_snd_ac97_device = platform_device_alloc("soc-audio", -1);
+	if (!ln2440sbc_snd_ac97_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(ln2440sbc_snd_ac97_device, &ln2440sbc);
+	ret = platform_device_add(ln2440sbc_snd_ac97_device);
+
+	if (ret)
+		platform_device_put(ln2440sbc_snd_ac97_device);
+
+	return ret;
+}
+
+static void __exit ln2440sbc_exit(void)
+{
+	platform_device_unregister(ln2440sbc_snd_ac97_device);
+}
+
+module_init(ln2440sbc_init);
+module_exit(ln2440sbc_exit);
+
+/* Module information */
+MODULE_AUTHOR("Ivan Kuten");
+MODULE_DESCRIPTION("ALSA SoC ALC650 LN2440SBC");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/neo1973_gta02_wm8753.c b/sound/soc/samsung/neo1973_gta02_wm8753.c
new file mode 100644
index 0000000..3eec610
--- /dev/null
+++ b/sound/soc/samsung/neo1973_gta02_wm8753.c
@@ -0,0 +1,504 @@
+/*
+ * neo1973_gta02_wm8753.c  --  SoC audio for Openmoko Freerunner(GTA02)
+ *
+ * Copyright 2007 Openmoko Inc
+ * Author: Graeme Gregory <graeme@openmoko.org>
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory <linux@wolfsonmicro.com>
+ * Copyright 2009 Wolfson Microelectronics
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include <asm/mach-types.h>
+
+#include <plat/regs-iis.h>
+
+#include <mach/regs-clock.h>
+#include <asm/io.h>
+#include <mach/gta02.h>
+#include "../codecs/wm8753.h"
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+
+static struct snd_soc_card neo1973_gta02;
+
+static int neo1973_gta02_hifi_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	unsigned int pll_out = 0, bclk = 0;
+	int ret = 0;
+	unsigned long iis_clkrate;
+
+	iis_clkrate = s3c24xx_i2s_get_clockrate();
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+		pll_out = 12288000;
+		break;
+	case 48000:
+		bclk = WM8753_BCLK_DIV_4;
+		pll_out = 12288000;
+		break;
+	case 96000:
+		bclk = WM8753_BCLK_DIV_2;
+		pll_out = 12288000;
+		break;
+	case 11025:
+		bclk = WM8753_BCLK_DIV_16;
+		pll_out = 11289600;
+		break;
+	case 22050:
+		bclk = WM8753_BCLK_DIV_8;
+		pll_out = 11289600;
+		break;
+	case 44100:
+		bclk = WM8753_BCLK_DIV_4;
+		pll_out = 11289600;
+		break;
+	case 88200:
+		bclk = WM8753_BCLK_DIV_2;
+		pll_out = 11289600;
+		break;
+	}
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai,
+		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai,
+		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
+		SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* set MCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
+		S3C2410_IISMOD_32FS);
+	if (ret < 0)
+		return ret;
+
+	/* set codec BCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(codec_dai,
+					WM8753_BCLKDIV, bclk);
+	if (ret < 0)
+		return ret;
+
+	/* set prescaler division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+		S3C24XX_PRESCALE(4, 4));
+	if (ret < 0)
+		return ret;
+
+	/* codec PLL input is PCLK/4 */
+	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0,
+		iis_clkrate / 4, pll_out);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int neo1973_gta02_hifi_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	/* disable the PLL */
+	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0);
+}
+
+/*
+ * Neo1973 WM8753 HiFi DAI opserations.
+ */
+static struct snd_soc_ops neo1973_gta02_hifi_ops = {
+	.hw_params = neo1973_gta02_hifi_hw_params,
+	.hw_free = neo1973_gta02_hifi_hw_free,
+};
+
+static int neo1973_gta02_voice_hw_params(
+	struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pcmdiv = 0;
+	int ret = 0;
+	unsigned long iis_clkrate;
+
+	iis_clkrate = s3c24xx_i2s_get_clockrate();
+
+	if (params_rate(params) != 8000)
+		return -EINVAL;
+	if (params_channels(params) != 1)
+		return -EINVAL;
+
+	pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
+
+	/* todo: gg check mode (DSP_B) against CSR datasheet */
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK,
+		12288000, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* set codec PCM division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV,
+					pcmdiv);
+	if (ret < 0)
+		return ret;
+
+	/* configure and enable PLL for 12.288MHz output */
+	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
+		iis_clkrate / 4, 12288000);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int neo1973_gta02_voice_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	/* disable the PLL */
+	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
+}
+
+static struct snd_soc_ops neo1973_gta02_voice_ops = {
+	.hw_params = neo1973_gta02_voice_hw_params,
+	.hw_free = neo1973_gta02_voice_hw_free,
+};
+
+#define LM4853_AMP 1
+#define LM4853_SPK 2
+
+static u8 lm4853_state;
+
+/* This has no effect, it exists only to maintain compatibility with
+ * existing ALSA state files.
+ */
+static int lm4853_set_state(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int val = ucontrol->value.integer.value[0];
+
+	if (val)
+		lm4853_state |= LM4853_AMP;
+	else
+		lm4853_state &= ~LM4853_AMP;
+
+	return 0;
+}
+
+static int lm4853_get_state(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = lm4853_state & LM4853_AMP;
+
+	return 0;
+}
+
+static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	int val = ucontrol->value.integer.value[0];
+
+	if (val) {
+		lm4853_state |= LM4853_SPK;
+		gpio_set_value(GTA02_GPIO_HP_IN, 0);
+	} else {
+		lm4853_state &= ~LM4853_SPK;
+		gpio_set_value(GTA02_GPIO_HP_IN, 1);
+	}
+
+	return 0;
+}
+
+static int lm4853_get_spk(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = (lm4853_state & LM4853_SPK) >> 1;
+
+	return 0;
+}
+
+static int lm4853_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *k,
+			int event)
+{
+	gpio_set_value(GTA02_GPIO_AMP_SHUT, SND_SOC_DAPM_EVENT_OFF(event));
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
+	SND_SOC_DAPM_SPK("Stereo Out", lm4853_event),
+	SND_SOC_DAPM_LINE("GSM Line Out", NULL),
+	SND_SOC_DAPM_LINE("GSM Line In", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Handset Mic", NULL),
+	SND_SOC_DAPM_SPK("Handset Spk", NULL),
+};
+
+
+/* example machine audio_mapnections */
+static const struct snd_soc_dapm_route audio_map[] = {
+
+	/* Connections to the lm4853 amp */
+	{"Stereo Out", NULL, "LOUT1"},
+	{"Stereo Out", NULL, "ROUT1"},
+
+	/* Connections to the GSM Module */
+	{"GSM Line Out", NULL, "MONO1"},
+	{"GSM Line Out", NULL, "MONO2"},
+	{"RXP", NULL, "GSM Line In"},
+	{"RXN", NULL, "GSM Line In"},
+
+	/* Connections to Headset */
+	{"MIC1", NULL, "Mic Bias"},
+	{"Mic Bias", NULL, "Headset Mic"},
+
+	/* Call Mic */
+	{"MIC2", NULL, "Mic Bias"},
+	{"MIC2N", NULL, "Mic Bias"},
+	{"Mic Bias", NULL, "Handset Mic"},
+
+	/* Call Speaker */
+	{"Handset Spk", NULL, "LOUT2"},
+	{"Handset Spk", NULL, "ROUT2"},
+
+	/* Connect the ALC pins */
+	{"ACIN", NULL, "ACOP"},
+};
+
+static const struct snd_kcontrol_new wm8753_neo1973_gta02_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Stereo Out"),
+	SOC_DAPM_PIN_SWITCH("GSM Line Out"),
+	SOC_DAPM_PIN_SWITCH("GSM Line In"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	SOC_DAPM_PIN_SWITCH("Handset Mic"),
+	SOC_DAPM_PIN_SWITCH("Handset Spk"),
+
+	/* This has no effect, it exists only to maintain compatibility with
+	 * existing ALSA state files.
+	 */
+	SOC_SINGLE_EXT("Amp State Switch", 6, 0, 1, 0,
+		lm4853_get_state,
+		lm4853_set_state),
+	SOC_SINGLE_EXT("Amp Spk Switch", 7, 0, 1, 0,
+		lm4853_get_spk,
+		lm4853_set_spk),
+};
+
+/*
+ * This is an example machine initialisation for a wm8753 connected to a
+ * neo1973 GTA02.
+ */
+static int neo1973_gta02_wm8753_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err;
+
+	/* set up NC codec pins */
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT4");
+	snd_soc_dapm_nc_pin(dapm, "LINE1");
+	snd_soc_dapm_nc_pin(dapm, "LINE2");
+
+	/* Add neo1973 gta02 specific widgets */
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
+				  ARRAY_SIZE(wm8753_dapm_widgets));
+
+	/* add neo1973 gta02 specific controls */
+	err = snd_soc_add_controls(codec, wm8753_neo1973_gta02_controls,
+		ARRAY_SIZE(wm8753_neo1973_gta02_controls));
+
+	if (err < 0)
+		return err;
+
+	/* set up neo1973 gta02 specific audio path audio_map */
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+
+	/* set endpoints to default off mode */
+	snd_soc_dapm_disable_pin(dapm, "Stereo Out");
+	snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+	snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Handset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Handset Spk");
+
+	/* allow audio paths from the GSM modem to run during suspend */
+	snd_soc_dapm_ignore_suspend(dapm, "Stereo Out");
+	snd_soc_dapm_ignore_suspend(dapm, "GSM Line Out");
+	snd_soc_dapm_ignore_suspend(dapm, "GSM Line In");
+	snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Handset Spk");
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+/*
+ * BT Codec DAI
+ */
+static struct snd_soc_dai_driver bt_dai = {
+	.name = "bluetooth-dai",
+	.playback = {
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+	.capture = {
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+};
+
+static struct snd_soc_dai_link neo1973_gta02_dai[] = {
+{ /* Hifi Playback - for similatious use with voice below */
+	.name = "WM8753",
+	.stream_name = "WM8753 HiFi",
+	.cpu_dai_name = "s3c24xx-i2s",
+	.codec_dai_name = "wm8753-hifi",
+	.init = neo1973_gta02_wm8753_init,
+	.platform_name = "samsung-audio",
+	.codec_name = "wm8753-codec.0-0x1a",
+	.ops = &neo1973_gta02_hifi_ops,
+},
+{ /* Voice via BT */
+	.name = "Bluetooth",
+	.stream_name = "Voice",
+	.cpu_dai_name = "bluetooth-dai",
+	.codec_dai_name = "wm8753-voice",
+	.ops = &neo1973_gta02_voice_ops,
+	.codec_name = "wm8753-codec.0-0x1a",
+	.platform_name = "samsung-audio",
+},
+};
+
+static struct snd_soc_card neo1973_gta02 = {
+	.name = "neo1973-gta02",
+	.dai_link = neo1973_gta02_dai,
+	.num_links = ARRAY_SIZE(neo1973_gta02_dai),
+};
+
+static struct platform_device *neo1973_gta02_snd_device;
+
+static int __init neo1973_gta02_init(void)
+{
+	int ret;
+
+	if (!machine_is_neo1973_gta02()) {
+		printk(KERN_INFO
+		       "Only GTA02 is supported by this ASoC driver\n");
+		return -ENODEV;
+	}
+
+	neo1973_gta02_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!neo1973_gta02_snd_device)
+		return -ENOMEM;
+
+	/* register bluetooth DAI here */
+	ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, &bt_dai);
+	if (ret)
+		goto err_put_device;
+
+	platform_set_drvdata(neo1973_gta02_snd_device, &neo1973_gta02);
+	ret = platform_device_add(neo1973_gta02_snd_device);
+
+	if (ret)
+		goto err_unregister_dai;
+
+	/* Initialise GPIOs used by amp */
+	ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN");
+	if (ret) {
+		pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN);
+		goto err_del_device;
+	}
+
+	ret = gpio_direction_output(GTA02_GPIO_HP_IN, 1);
+	if (ret) {
+		pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_HP_IN);
+		goto err_free_gpio_hp_in;
+	}
+
+	ret = gpio_request(GTA02_GPIO_AMP_SHUT, "GTA02_AMP_SHUT");
+	if (ret) {
+		pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_AMP_SHUT);
+		goto err_free_gpio_hp_in;
+	}
+
+	ret = gpio_direction_output(GTA02_GPIO_AMP_SHUT, 1);
+	if (ret) {
+		pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_AMP_SHUT);
+		goto err_free_gpio_amp_shut;
+	}
+
+	return 0;
+
+err_free_gpio_amp_shut:
+	gpio_free(GTA02_GPIO_AMP_SHUT);
+err_free_gpio_hp_in:
+	gpio_free(GTA02_GPIO_HP_IN);
+err_del_device:
+	platform_device_del(neo1973_gta02_snd_device);
+err_unregister_dai:
+	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev);
+err_put_device:
+	platform_device_put(neo1973_gta02_snd_device);
+	return ret;
+}
+module_init(neo1973_gta02_init);
+
+static void __exit neo1973_gta02_exit(void)
+{
+	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev);
+	platform_device_unregister(neo1973_gta02_snd_device);
+	gpio_free(GTA02_GPIO_HP_IN);
+	gpio_free(GTA02_GPIO_AMP_SHUT);
+}
+module_exit(neo1973_gta02_exit);
+
+/* Module information */
+MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org");
+MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973 GTA02");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
new file mode 100644
index 0000000..c7a2451
--- /dev/null
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -0,0 +1,706 @@
+/*
+ * neo1973_wm8753.c  --  SoC audio for Neo1973
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include <asm/mach-types.h>
+#include <asm/hardware/scoop.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-gpio.h>
+#include <mach/hardware.h>
+#include <linux/io.h>
+#include <mach/spi-gpio.h>
+
+#include <plat/regs-iis.h>
+
+#include "../codecs/wm8753.h"
+#include "lm4857.h"
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+
+/* define the scenarios */
+#define NEO_AUDIO_OFF			0
+#define NEO_GSM_CALL_AUDIO_HANDSET	1
+#define NEO_GSM_CALL_AUDIO_HEADSET	2
+#define NEO_GSM_CALL_AUDIO_BLUETOOTH	3
+#define NEO_STEREO_TO_SPEAKERS		4
+#define NEO_STEREO_TO_HEADPHONES	5
+#define NEO_CAPTURE_HANDSET		6
+#define NEO_CAPTURE_HEADSET		7
+#define NEO_CAPTURE_BLUETOOTH		8
+
+static struct snd_soc_card neo1973;
+static struct i2c_client *i2c;
+
+static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	unsigned int pll_out = 0, bclk = 0;
+	int ret = 0;
+	unsigned long iis_clkrate;
+
+	pr_debug("Entered %s\n", __func__);
+
+	iis_clkrate = s3c24xx_i2s_get_clockrate();
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+		pll_out = 12288000;
+		break;
+	case 48000:
+		bclk = WM8753_BCLK_DIV_4;
+		pll_out = 12288000;
+		break;
+	case 96000:
+		bclk = WM8753_BCLK_DIV_2;
+		pll_out = 12288000;
+		break;
+	case 11025:
+		bclk = WM8753_BCLK_DIV_16;
+		pll_out = 11289600;
+		break;
+	case 22050:
+		bclk = WM8753_BCLK_DIV_8;
+		pll_out = 11289600;
+		break;
+	case 44100:
+		bclk = WM8753_BCLK_DIV_4;
+		pll_out = 11289600;
+		break;
+	case 88200:
+		bclk = WM8753_BCLK_DIV_2;
+		pll_out = 11289600;
+		break;
+	}
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai,
+		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai,
+		SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
+		SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* set MCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
+		S3C2410_IISMOD_32FS);
+	if (ret < 0)
+		return ret;
+
+	/* set codec BCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_BCLKDIV, bclk);
+	if (ret < 0)
+		return ret;
+
+	/* set prescaler division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+		S3C24XX_PRESCALE(4, 4));
+	if (ret < 0)
+		return ret;
+
+	/* codec PLL input is PCLK/4 */
+	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0,
+		iis_clkrate / 4, pll_out);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int neo1973_hifi_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	pr_debug("Entered %s\n", __func__);
+
+	/* disable the PLL */
+	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0);
+}
+
+/*
+ * Neo1973 WM8753 HiFi DAI opserations.
+ */
+static struct snd_soc_ops neo1973_hifi_ops = {
+	.hw_params = neo1973_hifi_hw_params,
+	.hw_free = neo1973_hifi_hw_free,
+};
+
+static int neo1973_voice_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pcmdiv = 0;
+	int ret = 0;
+	unsigned long iis_clkrate;
+
+	pr_debug("Entered %s\n", __func__);
+
+	iis_clkrate = s3c24xx_i2s_get_clockrate();
+
+	if (params_rate(params) != 8000)
+		return -EINVAL;
+	if (params_channels(params) != 1)
+		return -EINVAL;
+
+	pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
+
+	/* todo: gg check mode (DSP_B) against CSR datasheet */
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK, 12288000,
+		SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* set codec PCM division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV, pcmdiv);
+	if (ret < 0)
+		return ret;
+
+	/* configure and enable PLL for 12.288MHz output */
+	ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
+		iis_clkrate / 4, 12288000);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int neo1973_voice_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+	pr_debug("Entered %s\n", __func__);
+
+	/* disable the PLL */
+	return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
+}
+
+static struct snd_soc_ops neo1973_voice_ops = {
+	.hw_params = neo1973_voice_hw_params,
+	.hw_free = neo1973_voice_hw_free,
+};
+
+static int neo1973_scenario;
+
+static int neo1973_get_scenario(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = neo1973_scenario;
+	return 0;
+}
+
+static int set_scenario_endpoints(struct snd_soc_codec *codec, int scenario)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	pr_debug("Entered %s\n", __func__);
+
+	switch (neo1973_scenario) {
+	case NEO_AUDIO_OFF:
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		break;
+	case NEO_GSM_CALL_AUDIO_HANDSET:
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
+		break;
+	case NEO_GSM_CALL_AUDIO_HEADSET:
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		break;
+	case NEO_GSM_CALL_AUDIO_BLUETOOTH:
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		break;
+	case NEO_STEREO_TO_SPEAKERS:
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		break;
+	case NEO_STEREO_TO_HEADPHONES:
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		break;
+	case NEO_CAPTURE_HANDSET:
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
+		break;
+	case NEO_CAPTURE_HEADSET:
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		break;
+	case NEO_CAPTURE_BLUETOOTH:
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		break;
+	default:
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+	}
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static int neo1973_set_scenario(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (neo1973_scenario == ucontrol->value.integer.value[0])
+		return 0;
+
+	neo1973_scenario = ucontrol->value.integer.value[0];
+	set_scenario_endpoints(codec, neo1973_scenario);
+	return 1;
+}
+
+static u8 lm4857_regs[4] = {0x00, 0x40, 0x80, 0xC0};
+
+static void lm4857_write_regs(void)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	if (i2c_master_send(i2c, lm4857_regs, 4) != 4)
+		printk(KERN_ERR "lm4857: i2c write failed\n");
+}
+
+static int lm4857_get_reg(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int reg = mc->reg;
+	int shift = mc->shift;
+	int mask = mc->max;
+
+	pr_debug("Entered %s\n", __func__);
+
+	ucontrol->value.integer.value[0] = (lm4857_regs[reg] >> shift) & mask;
+	return 0;
+}
+
+static int lm4857_set_reg(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int reg = mc->reg;
+	int shift = mc->shift;
+	int mask = mc->max;
+
+	if (((lm4857_regs[reg] >> shift) & mask) ==
+		ucontrol->value.integer.value[0])
+		return 0;
+
+	lm4857_regs[reg] &= ~(mask << shift);
+	lm4857_regs[reg] |= ucontrol->value.integer.value[0] << shift;
+	lm4857_write_regs();
+	return 1;
+}
+
+static int lm4857_get_mode(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	u8 value = lm4857_regs[LM4857_CTRL] & 0x0F;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (value)
+		value -= 5;
+
+	ucontrol->value.integer.value[0] = value;
+	return 0;
+}
+
+static int lm4857_set_mode(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	u8 value = ucontrol->value.integer.value[0];
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (value)
+		value += 5;
+
+	if ((lm4857_regs[LM4857_CTRL] & 0x0F) == value)
+		return 0;
+
+	lm4857_regs[LM4857_CTRL] &= 0xF0;
+	lm4857_regs[LM4857_CTRL] |= value;
+	lm4857_write_regs();
+	return 1;
+}
+
+static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
+	SND_SOC_DAPM_LINE("Audio Out", NULL),
+	SND_SOC_DAPM_LINE("GSM Line Out", NULL),
+	SND_SOC_DAPM_LINE("GSM Line In", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Call Mic", NULL),
+};
+
+
+static const struct snd_soc_dapm_route dapm_routes[] = {
+
+	/* Connections to the lm4857 amp */
+	{"Audio Out", NULL, "LOUT1"},
+	{"Audio Out", NULL, "ROUT1"},
+
+	/* Connections to the GSM Module */
+	{"GSM Line Out", NULL, "MONO1"},
+	{"GSM Line Out", NULL, "MONO2"},
+	{"RXP", NULL, "GSM Line In"},
+	{"RXN", NULL, "GSM Line In"},
+
+	/* Connections to Headset */
+	{"MIC1", NULL, "Mic Bias"},
+	{"Mic Bias", NULL, "Headset Mic"},
+
+	/* Call Mic */
+	{"MIC2", NULL, "Mic Bias"},
+	{"MIC2N", NULL, "Mic Bias"},
+	{"Mic Bias", NULL, "Call Mic"},
+
+	/* Connect the ALC pins */
+	{"ACIN", NULL, "ACOP"},
+};
+
+static const char *lm4857_mode[] = {
+	"Off",
+	"Call Speaker",
+	"Stereo Speakers",
+	"Stereo Speakers + Headphones",
+	"Headphones"
+};
+
+static const struct soc_enum lm4857_mode_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lm4857_mode), lm4857_mode),
+};
+
+static const char *neo_scenarios[] = {
+	"Off",
+	"GSM Handset",
+	"GSM Headset",
+	"GSM Bluetooth",
+	"Speakers",
+	"Headphones",
+	"Capture Handset",
+	"Capture Headset",
+	"Capture Bluetooth"
+};
+
+static const struct soc_enum neo_scenario_enum[] = {
+	SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(neo_scenarios), neo_scenarios),
+};
+
+static const DECLARE_TLV_DB_SCALE(stereo_tlv, -4050, 150, 0);
+static const DECLARE_TLV_DB_SCALE(mono_tlv, -3450, 150, 0);
+
+static const struct snd_kcontrol_new wm8753_neo1973_controls[] = {
+	SOC_SINGLE_EXT_TLV("Amp Left Playback Volume", LM4857_LVOL, 0, 31, 0,
+		lm4857_get_reg, lm4857_set_reg, stereo_tlv),
+	SOC_SINGLE_EXT_TLV("Amp Right Playback Volume", LM4857_RVOL, 0, 31, 0,
+		lm4857_get_reg, lm4857_set_reg, stereo_tlv),
+	SOC_SINGLE_EXT_TLV("Amp Mono Playback Volume", LM4857_MVOL, 0, 31, 0,
+		lm4857_get_reg, lm4857_set_reg, mono_tlv),
+	SOC_ENUM_EXT("Amp Mode", lm4857_mode_enum[0],
+		lm4857_get_mode, lm4857_set_mode),
+	SOC_ENUM_EXT("Neo Mode", neo_scenario_enum[0],
+		neo1973_get_scenario, neo1973_set_scenario),
+	SOC_SINGLE_EXT("Amp Spk 3D Playback Switch", LM4857_LVOL, 5, 1, 0,
+		lm4857_get_reg, lm4857_set_reg),
+	SOC_SINGLE_EXT("Amp HP 3d Playback Switch", LM4857_RVOL, 5, 1, 0,
+		lm4857_get_reg, lm4857_set_reg),
+	SOC_SINGLE_EXT("Amp Fast Wakeup Playback Switch", LM4857_CTRL, 5, 1, 0,
+		lm4857_get_reg, lm4857_set_reg),
+	SOC_SINGLE_EXT("Amp Earpiece 6dB Playback Switch", LM4857_CTRL, 4, 1, 0,
+		lm4857_get_reg, lm4857_set_reg),
+};
+
+/*
+ * This is an example machine initialisation for a wm8753 connected to a
+ * neo1973 II. It is missing logic to detect hp/mic insertions and logic
+ * to re-route the audio in such an event.
+ */
+static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err;
+
+	pr_debug("Entered %s\n", __func__);
+
+	/* set up NC codec pins */
+	snd_soc_dapm_nc_pin(dapm, "LOUT2");
+	snd_soc_dapm_nc_pin(dapm, "ROUT2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT4");
+	snd_soc_dapm_nc_pin(dapm, "LINE1");
+	snd_soc_dapm_nc_pin(dapm, "LINE2");
+
+	/* Add neo1973 specific widgets */
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
+				  ARRAY_SIZE(wm8753_dapm_widgets));
+
+	/* set endpoints to default mode */
+	set_scenario_endpoints(codec, NEO_AUDIO_OFF);
+
+	/* add neo1973 specific controls */
+	err = snd_soc_add_controls(codec, wm8753_neo1973_controls,
+				ARRAY_SIZE(8753_neo1973_controls));
+	if (err < 0)
+		return err;
+
+	/* set up neo1973 specific audio routes */
+	err = snd_soc_dapm_add_routes(dapm, dapm_routes,
+				      ARRAY_SIZE(dapm_routes));
+
+	snd_soc_dapm_sync(dapm);
+	return 0;
+}
+
+/*
+ * BT Codec DAI
+ */
+static struct snd_soc_dai bt_dai = {
+	.name = "bluetooth-dai",
+	.playback = {
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+	.capture = {
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
+};
+
+static struct snd_soc_dai_link neo1973_dai[] = {
+{ /* Hifi Playback - for similatious use with voice below */
+	.name = "WM8753",
+	.stream_name = "WM8753 HiFi",
+	.platform_name = "samsung-audio",
+	.cpu_dai_name = "s3c24xx-i2s",
+	.codec_dai_name = "wm8753-hifi",
+	.codec_name = "wm8753-codec.0-0x1a",
+	.init = neo1973_wm8753_init,
+	.ops = &neo1973_hifi_ops,
+},
+{ /* Voice via BT */
+	.name = "Bluetooth",
+	.stream_name = "Voice",
+	.platform_name = "samsung-audio",
+	.cpu_dai_name = "bluetooth-dai",
+	.codec_dai_name = "wm8753-voice",
+	.codec_name = "wm8753-codec.0-0x1a",
+	.ops = &neo1973_voice_ops,
+},
+};
+
+static struct snd_soc_card neo1973 = {
+	.name = "neo1973",
+	.dai_link = neo1973_dai,
+	.num_links = ARRAY_SIZE(neo1973_dai),
+};
+
+static int lm4857_i2c_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	i2c = client;
+
+	lm4857_write_regs();
+	return 0;
+}
+
+static int lm4857_i2c_remove(struct i2c_client *client)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	i2c = NULL;
+
+	return 0;
+}
+
+static u8 lm4857_state;
+
+static int lm4857_suspend(struct i2c_client *dev, pm_message_t state)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	dev_dbg(&dev->dev, "lm4857_suspend\n");
+	lm4857_state = lm4857_regs[LM4857_CTRL] & 0xf;
+	if (lm4857_state) {
+		lm4857_regs[LM4857_CTRL] &= 0xf0;
+		lm4857_write_regs();
+	}
+	return 0;
+}
+
+static int lm4857_resume(struct i2c_client *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	if (lm4857_state) {
+		lm4857_regs[LM4857_CTRL] |= (lm4857_state & 0x0f);
+		lm4857_write_regs();
+	}
+	return 0;
+}
+
+static void lm4857_shutdown(struct i2c_client *dev)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	dev_dbg(&dev->dev, "lm4857_shutdown\n");
+	lm4857_regs[LM4857_CTRL] &= 0xf0;
+	lm4857_write_regs();
+}
+
+static const struct i2c_device_id lm4857_i2c_id[] = {
+	{ "neo1973_lm4857", 0 },
+	{ }
+};
+
+static struct i2c_driver lm4857_i2c_driver = {
+	.driver = {
+		.name = "LM4857 I2C Amp",
+		.owner = THIS_MODULE,
+	},
+	.suspend =        lm4857_suspend,
+	.resume	=         lm4857_resume,
+	.shutdown =       lm4857_shutdown,
+	.probe =          lm4857_i2c_probe,
+	.remove =         lm4857_i2c_remove,
+	.id_table =       lm4857_i2c_id,
+};
+
+static struct platform_device *neo1973_snd_device;
+
+static int __init neo1973_init(void)
+{
+	int ret;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (!machine_is_neo1973_gta01()) {
+		printk(KERN_INFO
+			"Only GTA01 hardware supported by ASoC driver\n");
+		return -ENODEV;
+	}
+
+	neo1973_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!neo1973_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(neo1973_snd_device, &neo1973);
+	ret = platform_device_add(neo1973_snd_device);
+
+	if (ret) {
+		platform_device_put(neo1973_snd_device);
+		return ret;
+	}
+
+	ret = i2c_add_driver(&lm4857_i2c_driver);
+
+	if (ret != 0)
+		platform_device_unregister(neo1973_snd_device);
+
+	return ret;
+}
+
+static void __exit neo1973_exit(void)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	i2c_del_driver(&lm4857_i2c_driver);
+	platform_device_unregister(neo1973_snd_device);
+}
+
+module_init(neo1973_init);
+module_exit(neo1973_exit);
+
+/* Module information */
+MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org, www.openmoko.org");
+MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
new file mode 100644
index 0000000..48d0b75
--- /dev/null
+++ b/sound/soc/samsung/pcm.c
@@ -0,0 +1,552 @@
+/* sound/soc/samsung/pcm.c
+ *
+ * ALSA SoC Audio Layer - S3C PCM-Controller driver
+ *
+ * Copyright (c) 2009 Samsung Electronics Co. Ltd
+ * Author: Jaswinder Singh <jassi.brar@samsung.com>
+ * based upon I2S drivers by Ben Dooks.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include <plat/audio.h>
+#include <plat/dma.h>
+
+#include "dma.h"
+#include "pcm.h"
+
+static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
+	.name		= "PCM Stereo out"
+};
+
+static struct s3c2410_dma_client s3c_pcm_dma_client_in = {
+	.name		= "PCM Stereo in"
+};
+
+static struct s3c_dma_params s3c_pcm_stereo_out[] = {
+	[0] = {
+		.client		= &s3c_pcm_dma_client_out,
+		.dma_size	= 4,
+	},
+	[1] = {
+		.client		= &s3c_pcm_dma_client_out,
+		.dma_size	= 4,
+	},
+};
+
+static struct s3c_dma_params s3c_pcm_stereo_in[] = {
+	[0] = {
+		.client		= &s3c_pcm_dma_client_in,
+		.dma_size	= 4,
+	},
+	[1] = {
+		.client		= &s3c_pcm_dma_client_in,
+		.dma_size	= 4,
+	},
+};
+
+static struct s3c_pcm_info s3c_pcm[2];
+
+static void s3c_pcm_snd_txctrl(struct s3c_pcm_info *pcm, int on)
+{
+	void __iomem *regs = pcm->regs;
+	u32 ctl, clkctl;
+
+	clkctl = readl(regs + S3C_PCM_CLKCTL);
+	ctl = readl(regs + S3C_PCM_CTL);
+	ctl &= ~(S3C_PCM_CTL_TXDIPSTICK_MASK
+			 << S3C_PCM_CTL_TXDIPSTICK_SHIFT);
+
+	if (on) {
+		ctl |= S3C_PCM_CTL_TXDMA_EN;
+		ctl |= S3C_PCM_CTL_TXFIFO_EN;
+		ctl |= S3C_PCM_CTL_ENABLE;
+		ctl |= (0x4<<S3C_PCM_CTL_TXDIPSTICK_SHIFT);
+		clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
+	} else {
+		ctl &= ~S3C_PCM_CTL_TXDMA_EN;
+		ctl &= ~S3C_PCM_CTL_TXFIFO_EN;
+
+		if (!(ctl & S3C_PCM_CTL_RXFIFO_EN)) {
+			ctl &= ~S3C_PCM_CTL_ENABLE;
+			if (!pcm->idleclk)
+				clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
+		}
+	}
+
+	writel(clkctl, regs + S3C_PCM_CLKCTL);
+	writel(ctl, regs + S3C_PCM_CTL);
+}
+
+static void s3c_pcm_snd_rxctrl(struct s3c_pcm_info *pcm, int on)
+{
+	void __iomem *regs = pcm->regs;
+	u32 ctl, clkctl;
+
+	ctl = readl(regs + S3C_PCM_CTL);
+	clkctl = readl(regs + S3C_PCM_CLKCTL);
+	ctl &= ~(S3C_PCM_CTL_RXDIPSTICK_MASK
+			 << S3C_PCM_CTL_RXDIPSTICK_SHIFT);
+
+	if (on) {
+		ctl |= S3C_PCM_CTL_RXDMA_EN;
+		ctl |= S3C_PCM_CTL_RXFIFO_EN;
+		ctl |= S3C_PCM_CTL_ENABLE;
+		ctl |= (0x20<<S3C_PCM_CTL_RXDIPSTICK_SHIFT);
+		clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
+	} else {
+		ctl &= ~S3C_PCM_CTL_RXDMA_EN;
+		ctl &= ~S3C_PCM_CTL_RXFIFO_EN;
+
+		if (!(ctl & S3C_PCM_CTL_TXFIFO_EN)) {
+			ctl &= ~S3C_PCM_CTL_ENABLE;
+			if (!pcm->idleclk)
+				clkctl |= S3C_PCM_CLKCTL_SERCLK_EN;
+		}
+	}
+
+	writel(clkctl, regs + S3C_PCM_CLKCTL);
+	writel(ctl, regs + S3C_PCM_CTL);
+}
+
+static int s3c_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
+			       struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+	unsigned long flags;
+
+	dev_dbg(pcm->dev, "Entered %s\n", __func__);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		spin_lock_irqsave(&pcm->lock, flags);
+
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			s3c_pcm_snd_rxctrl(pcm, 1);
+		else
+			s3c_pcm_snd_txctrl(pcm, 1);
+
+		spin_unlock_irqrestore(&pcm->lock, flags);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		spin_lock_irqsave(&pcm->lock, flags);
+
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			s3c_pcm_snd_rxctrl(pcm, 0);
+		else
+			s3c_pcm_snd_txctrl(pcm, 0);
+
+		spin_unlock_irqrestore(&pcm->lock, flags);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int s3c_pcm_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *socdai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+	struct s3c_dma_params *dma_data;
+	void __iomem *regs = pcm->regs;
+	struct clk *clk;
+	int sclk_div, sync_div;
+	unsigned long flags;
+	u32 clkctl;
+
+	dev_dbg(pcm->dev, "Entered %s\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dma_data = pcm->dma_playback;
+	else
+		dma_data = pcm->dma_capture;
+
+	snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
+
+	/* Strictly check for sample size */
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&pcm->lock, flags);
+
+	/* Get hold of the PCMSOURCE_CLK */
+	clkctl = readl(regs + S3C_PCM_CLKCTL);
+	if (clkctl & S3C_PCM_CLKCTL_SERCLKSEL_PCLK)
+		clk = pcm->pclk;
+	else
+		clk = pcm->cclk;
+
+	/* Set the SCLK divider */
+	sclk_div = clk_get_rate(clk) / pcm->sclk_per_fs /
+					params_rate(params) / 2 - 1;
+
+	clkctl &= ~(S3C_PCM_CLKCTL_SCLKDIV_MASK
+			<< S3C_PCM_CLKCTL_SCLKDIV_SHIFT);
+	clkctl |= ((sclk_div & S3C_PCM_CLKCTL_SCLKDIV_MASK)
+			<< S3C_PCM_CLKCTL_SCLKDIV_SHIFT);
+
+	/* Set the SYNC divider */
+	sync_div = pcm->sclk_per_fs - 1;
+
+	clkctl &= ~(S3C_PCM_CLKCTL_SYNCDIV_MASK
+				<< S3C_PCM_CLKCTL_SYNCDIV_SHIFT);
+	clkctl |= ((sync_div & S3C_PCM_CLKCTL_SYNCDIV_MASK)
+				<< S3C_PCM_CLKCTL_SYNCDIV_SHIFT);
+
+	writel(clkctl, regs + S3C_PCM_CLKCTL);
+
+	spin_unlock_irqrestore(&pcm->lock, flags);
+
+	dev_dbg(pcm->dev, "PCMSOURCE_CLK-%lu SCLK=%ufs SCLK_DIV=%d SYNC_DIV=%d\n",
+				clk_get_rate(clk), pcm->sclk_per_fs,
+				sclk_div, sync_div);
+
+	return 0;
+}
+
+static int s3c_pcm_set_fmt(struct snd_soc_dai *cpu_dai,
+			       unsigned int fmt)
+{
+	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(cpu_dai);
+	void __iomem *regs = pcm->regs;
+	unsigned long flags;
+	int ret = 0;
+	u32 ctl;
+
+	dev_dbg(pcm->dev, "Entered %s\n", __func__);
+
+	spin_lock_irqsave(&pcm->lock, flags);
+
+	ctl = readl(regs + S3C_PCM_CTL);
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		/* Nothing to do, NB_NF by default */
+		break;
+	default:
+		dev_err(pcm->dev, "Unsupported clock inversion!\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		/* Nothing to do, Master by default */
+		break;
+	default:
+		dev_err(pcm->dev, "Unsupported master/slave format!\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
+	case SND_SOC_DAIFMT_CONT:
+		pcm->idleclk = 1;
+		break;
+	case SND_SOC_DAIFMT_GATED:
+		pcm->idleclk = 0;
+		break;
+	default:
+		dev_err(pcm->dev, "Invalid Clock gating request!\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_A:
+		ctl |= S3C_PCM_CTL_TXMSB_AFTER_FSYNC;
+		ctl |= S3C_PCM_CTL_RXMSB_AFTER_FSYNC;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		ctl &= ~S3C_PCM_CTL_TXMSB_AFTER_FSYNC;
+		ctl &= ~S3C_PCM_CTL_RXMSB_AFTER_FSYNC;
+		break;
+	default:
+		dev_err(pcm->dev, "Unsupported data format!\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	writel(ctl, regs + S3C_PCM_CTL);
+
+exit:
+	spin_unlock_irqrestore(&pcm->lock, flags);
+
+	return ret;
+}
+
+static int s3c_pcm_set_clkdiv(struct snd_soc_dai *cpu_dai,
+						int div_id, int div)
+{
+	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(cpu_dai);
+
+	switch (div_id) {
+	case S3C_PCM_SCLK_PER_FS:
+		pcm->sclk_per_fs = div;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int s3c_pcm_set_sysclk(struct snd_soc_dai *cpu_dai,
+				  int clk_id, unsigned int freq, int dir)
+{
+	struct s3c_pcm_info *pcm = snd_soc_dai_get_drvdata(cpu_dai);
+	void __iomem *regs = pcm->regs;
+	u32 clkctl = readl(regs + S3C_PCM_CLKCTL);
+
+	switch (clk_id) {
+	case S3C_PCM_CLKSRC_PCLK:
+		clkctl |= S3C_PCM_CLKCTL_SERCLKSEL_PCLK;
+		break;
+
+	case S3C_PCM_CLKSRC_MUX:
+		clkctl &= ~S3C_PCM_CLKCTL_SERCLKSEL_PCLK;
+
+		if (clk_get_rate(pcm->cclk) != freq)
+			clk_set_rate(pcm->cclk, freq);
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	writel(clkctl, regs + S3C_PCM_CLKCTL);
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops s3c_pcm_dai_ops = {
+	.set_sysclk	= s3c_pcm_set_sysclk,
+	.set_clkdiv	= s3c_pcm_set_clkdiv,
+	.trigger	= s3c_pcm_trigger,
+	.hw_params	= s3c_pcm_hw_params,
+	.set_fmt	= s3c_pcm_set_fmt,
+};
+
+#define S3C_PCM_RATES  SNDRV_PCM_RATE_8000_96000
+
+#define S3C_PCM_DAI_DECLARE			\
+	.symmetric_rates = 1,					\
+	.ops = &s3c_pcm_dai_ops,				\
+	.playback = {						\
+		.channels_min	= 2,				\
+		.channels_max	= 2,				\
+		.rates		= S3C_PCM_RATES,		\
+		.formats	= SNDRV_PCM_FMTBIT_S16_LE,	\
+	},							\
+	.capture = {						\
+		.channels_min	= 2,				\
+		.channels_max	= 2,				\
+		.rates		= S3C_PCM_RATES,		\
+		.formats	= SNDRV_PCM_FMTBIT_S16_LE,	\
+	}
+
+struct snd_soc_dai_driver s3c_pcm_dai[] = {
+	[0] = {
+		.name	= "samsung-pcm.0",
+		S3C_PCM_DAI_DECLARE,
+	},
+	[1] = {
+		.name	= "samsung-pcm.1",
+		S3C_PCM_DAI_DECLARE,
+	},
+};
+EXPORT_SYMBOL_GPL(s3c_pcm_dai);
+
+static __devinit int s3c_pcm_dev_probe(struct platform_device *pdev)
+{
+	struct s3c_pcm_info *pcm;
+	struct resource *mem_res, *dmatx_res, *dmarx_res;
+	struct s3c_audio_pdata *pcm_pdata;
+	int ret;
+
+	/* Check for valid device index */
+	if ((pdev->id < 0) || pdev->id >= ARRAY_SIZE(s3c_pcm)) {
+		dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
+		return -EINVAL;
+	}
+
+	pcm_pdata = pdev->dev.platform_data;
+
+	/* Check for availability of necessary resource */
+	dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!dmatx_res) {
+		dev_err(&pdev->dev, "Unable to get PCM-TX dma resource\n");
+		return -ENXIO;
+	}
+
+	dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (!dmarx_res) {
+		dev_err(&pdev->dev, "Unable to get PCM-RX dma resource\n");
+		return -ENXIO;
+	}
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_res) {
+		dev_err(&pdev->dev, "Unable to get register resource\n");
+		return -ENXIO;
+	}
+
+	if (pcm_pdata && pcm_pdata->cfg_gpio && pcm_pdata->cfg_gpio(pdev)) {
+		dev_err(&pdev->dev, "Unable to configure gpio\n");
+		return -EINVAL;
+	}
+
+	pcm = &s3c_pcm[pdev->id];
+	pcm->dev = &pdev->dev;
+
+	spin_lock_init(&pcm->lock);
+
+	/* Default is 128fs */
+	pcm->sclk_per_fs = 128;
+
+	pcm->cclk = clk_get(&pdev->dev, "audio-bus");
+	if (IS_ERR(pcm->cclk)) {
+		dev_err(&pdev->dev, "failed to get audio-bus\n");
+		ret = PTR_ERR(pcm->cclk);
+		goto err1;
+	}
+	clk_enable(pcm->cclk);
+
+	/* record our pcm structure for later use in the callbacks */
+	dev_set_drvdata(&pdev->dev, pcm);
+
+	if (!request_mem_region(mem_res->start,
+				resource_size(mem_res), "samsung-pcm")) {
+		dev_err(&pdev->dev, "Unable to request register region\n");
+		ret = -EBUSY;
+		goto err2;
+	}
+
+	pcm->regs = ioremap(mem_res->start, 0x100);
+	if (pcm->regs == NULL) {
+		dev_err(&pdev->dev, "cannot ioremap registers\n");
+		ret = -ENXIO;
+		goto err3;
+	}
+
+	pcm->pclk = clk_get(&pdev->dev, "pcm");
+	if (IS_ERR(pcm->pclk)) {
+		dev_err(&pdev->dev, "failed to get pcm_clock\n");
+		ret = -ENOENT;
+		goto err4;
+	}
+	clk_enable(pcm->pclk);
+
+	ret = snd_soc_register_dai(&pdev->dev, &s3c_pcm_dai[pdev->id]);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "failed to get pcm_clock\n");
+		goto err5;
+	}
+
+	s3c_pcm_stereo_in[pdev->id].dma_addr = mem_res->start
+							+ S3C_PCM_RXFIFO;
+	s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start
+							+ S3C_PCM_TXFIFO;
+
+	s3c_pcm_stereo_in[pdev->id].channel = dmarx_res->start;
+	s3c_pcm_stereo_out[pdev->id].channel = dmatx_res->start;
+
+	pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
+	pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
+
+	return 0;
+
+err5:
+	clk_disable(pcm->pclk);
+	clk_put(pcm->pclk);
+err4:
+	iounmap(pcm->regs);
+err3:
+	release_mem_region(mem_res->start, resource_size(mem_res));
+err2:
+	clk_disable(pcm->cclk);
+	clk_put(pcm->cclk);
+err1:
+	return ret;
+}
+
+static __devexit int s3c_pcm_dev_remove(struct platform_device *pdev)
+{
+	struct s3c_pcm_info *pcm = &s3c_pcm[pdev->id];
+	struct resource *mem_res;
+
+	snd_soc_unregister_dai(&pdev->dev);
+
+	iounmap(pcm->regs);
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(mem_res->start, resource_size(mem_res));
+
+	clk_disable(pcm->cclk);
+	clk_disable(pcm->pclk);
+	clk_put(pcm->pclk);
+	clk_put(pcm->cclk);
+
+	return 0;
+}
+
+static struct platform_driver s3c_pcm_driver = {
+	.probe  = s3c_pcm_dev_probe,
+	.remove = s3c_pcm_dev_remove,
+	.driver = {
+		.name = "samsung-pcm",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init s3c_pcm_init(void)
+{
+	return platform_driver_register(&s3c_pcm_driver);
+}
+module_init(s3c_pcm_init);
+
+static void __exit s3c_pcm_exit(void)
+{
+	platform_driver_unregister(&s3c_pcm_driver);
+}
+module_exit(s3c_pcm_exit);
+
+/* Module information */
+MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("S3C PCM Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:samsung-pcm");
diff --git a/sound/soc/samsung/pcm.h b/sound/soc/samsung/pcm.h
new file mode 100644
index 0000000..03393dc
--- /dev/null
+++ b/sound/soc/samsung/pcm.h
@@ -0,0 +1,124 @@
+/*  sound/soc/samsung/pcm.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __S3C_PCM_H
+#define __S3C_PCM_H __FILE__
+
+/*Register Offsets */
+#define S3C_PCM_CTL	(0x00)
+#define S3C_PCM_CLKCTL	(0x04)
+#define S3C_PCM_TXFIFO	(0x08)
+#define S3C_PCM_RXFIFO	(0x0C)
+#define S3C_PCM_IRQCTL	(0x10)
+#define S3C_PCM_IRQSTAT	(0x14)
+#define S3C_PCM_FIFOSTAT	(0x18)
+#define S3C_PCM_CLRINT	(0x20)
+
+/* PCM_CTL Bit-Fields */
+#define S3C_PCM_CTL_TXDIPSTICK_MASK		(0x3f)
+#define S3C_PCM_CTL_TXDIPSTICK_SHIFT	(13)
+#define S3C_PCM_CTL_RXDIPSTICK_MASK		(0x3f)
+#define S3C_PCM_CTL_RXDIPSTICK_SHIFT	(7)
+#define S3C_PCM_CTL_TXDMA_EN		(0x1<<6)
+#define S3C_PCM_CTL_RXDMA_EN		(0x1<<5)
+#define S3C_PCM_CTL_TXMSB_AFTER_FSYNC	(0x1<<4)
+#define S3C_PCM_CTL_RXMSB_AFTER_FSYNC	(0x1<<3)
+#define S3C_PCM_CTL_TXFIFO_EN		(0x1<<2)
+#define S3C_PCM_CTL_RXFIFO_EN		(0x1<<1)
+#define S3C_PCM_CTL_ENABLE			(0x1<<0)
+
+/* PCM_CLKCTL Bit-Fields */
+#define S3C_PCM_CLKCTL_SERCLK_EN		(0x1<<19)
+#define S3C_PCM_CLKCTL_SERCLKSEL_PCLK	(0x1<<18)
+#define S3C_PCM_CLKCTL_SCLKDIV_MASK		(0x1ff)
+#define S3C_PCM_CLKCTL_SYNCDIV_MASK		(0x1ff)
+#define S3C_PCM_CLKCTL_SCLKDIV_SHIFT	(9)
+#define S3C_PCM_CLKCTL_SYNCDIV_SHIFT	(0)
+
+/* PCM_TXFIFO Bit-Fields */
+#define S3C_PCM_TXFIFO_DVALID	(0x1<<16)
+#define S3C_PCM_TXFIFO_DATA_MSK	(0xffff<<0)
+
+/* PCM_RXFIFO Bit-Fields */
+#define S3C_PCM_RXFIFO_DVALID	(0x1<<16)
+#define S3C_PCM_RXFIFO_DATA_MSK	(0xffff<<0)
+
+/* PCM_IRQCTL Bit-Fields */
+#define S3C_PCM_IRQCTL_IRQEN		(0x1<<14)
+#define S3C_PCM_IRQCTL_WRDEN		(0x1<<12)
+#define S3C_PCM_IRQCTL_TXEMPTYEN		(0x1<<11)
+#define S3C_PCM_IRQCTL_TXALMSTEMPTYEN	(0x1<<10)
+#define S3C_PCM_IRQCTL_TXFULLEN		(0x1<<9)
+#define S3C_PCM_IRQCTL_TXALMSTFULLEN	(0x1<<8)
+#define S3C_PCM_IRQCTL_TXSTARVEN		(0x1<<7)
+#define S3C_PCM_IRQCTL_TXERROVRFLEN		(0x1<<6)
+#define S3C_PCM_IRQCTL_RXEMPTEN		(0x1<<5)
+#define S3C_PCM_IRQCTL_RXALMSTEMPTEN	(0x1<<4)
+#define S3C_PCM_IRQCTL_RXFULLEN		(0x1<<3)
+#define S3C_PCM_IRQCTL_RXALMSTFULLEN	(0x1<<2)
+#define S3C_PCM_IRQCTL_RXSTARVEN		(0x1<<1)
+#define S3C_PCM_IRQCTL_RXERROVRFLEN		(0x1<<0)
+
+/* PCM_IRQSTAT Bit-Fields */
+#define S3C_PCM_IRQSTAT_IRQPND		(0x1<<13)
+#define S3C_PCM_IRQSTAT_WRD_XFER		(0x1<<12)
+#define S3C_PCM_IRQSTAT_TXEMPTY		(0x1<<11)
+#define S3C_PCM_IRQSTAT_TXALMSTEMPTY	(0x1<<10)
+#define S3C_PCM_IRQSTAT_TXFULL		(0x1<<9)
+#define S3C_PCM_IRQSTAT_TXALMSTFULL		(0x1<<8)
+#define S3C_PCM_IRQSTAT_TXSTARV		(0x1<<7)
+#define S3C_PCM_IRQSTAT_TXERROVRFL		(0x1<<6)
+#define S3C_PCM_IRQSTAT_RXEMPT		(0x1<<5)
+#define S3C_PCM_IRQSTAT_RXALMSTEMPT		(0x1<<4)
+#define S3C_PCM_IRQSTAT_RXFULL		(0x1<<3)
+#define S3C_PCM_IRQSTAT_RXALMSTFULL		(0x1<<2)
+#define S3C_PCM_IRQSTAT_RXSTARV		(0x1<<1)
+#define S3C_PCM_IRQSTAT_RXERROVRFL		(0x1<<0)
+
+/* PCM_FIFOSTAT Bit-Fields */
+#define S3C_PCM_FIFOSTAT_TXCNT_MSK		(0x3f<<14)
+#define S3C_PCM_FIFOSTAT_TXFIFOEMPTY	(0x1<<13)
+#define S3C_PCM_FIFOSTAT_TXFIFOALMSTEMPTY	(0x1<<12)
+#define S3C_PCM_FIFOSTAT_TXFIFOFULL		(0x1<<11)
+#define S3C_PCM_FIFOSTAT_TXFIFOALMSTFULL	(0x1<<10)
+#define S3C_PCM_FIFOSTAT_RXCNT_MSK		(0x3f<<4)
+#define S3C_PCM_FIFOSTAT_RXFIFOEMPTY	(0x1<<3)
+#define S3C_PCM_FIFOSTAT_RXFIFOALMSTEMPTY	(0x1<<2)
+#define S3C_PCM_FIFOSTAT_RXFIFOFULL		(0x1<<1)
+#define S3C_PCM_FIFOSTAT_RXFIFOALMSTFULL	(0x1<<0)
+
+#define S3C_PCM_CLKSRC_PCLK	0
+#define S3C_PCM_CLKSRC_MUX	1
+
+#define S3C_PCM_SCLK_PER_FS	0
+
+/**
+ * struct s3c_pcm_info - S3C PCM Controller information
+ * @dev: The parent device passed to use from the probe.
+ * @regs: The pointer to the device register block.
+ * @dma_playback: DMA information for playback channel.
+ * @dma_capture: DMA information for capture channel.
+ */
+struct s3c_pcm_info {
+	spinlock_t lock;
+	struct device	*dev;
+	void __iomem	*regs;
+
+	unsigned int sclk_per_fs;
+
+	/* Whether to keep PCMSCLK enabled even when idle(no active xfer) */
+	unsigned int idleclk;
+
+	struct clk	*pclk;
+	struct clk	*cclk;
+
+	struct s3c_dma_params	*dma_playback;
+	struct s3c_dma_params	*dma_capture;
+};
+
+#endif /* __S3C_PCM_H */
diff --git a/sound/soc/s3c24xx/regs-i2s-v2.h b/sound/soc/samsung/regs-i2s-v2.h
similarity index 100%
rename from sound/soc/s3c24xx/regs-i2s-v2.h
rename to sound/soc/samsung/regs-i2s-v2.h
diff --git a/sound/soc/samsung/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
new file mode 100644
index 0000000..f400274
--- /dev/null
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -0,0 +1,320 @@
+/*
+ * rx1950.c  --  ALSA Soc Audio Layer
+ *
+ * Copyright (c) 2010 Vasily Khoruzhick <anarsoul@gmail.com>
+ *
+ * Based on smdk2440.c and magician.c
+ *
+ * Authors: Graeme Gregory graeme.gregory@wolfsonmicro.com
+ *          Philipp Zabel <philipp.zabel@gmail.com>
+ *          Denis Grigoriev <dgreenday@gmail.com>
+ *          Vasily Khoruzhick <anarsoul@gmail.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+
+#include <sound/soc.h>
+#include <sound/uda1380.h>
+#include <sound/jack.h>
+
+#include <plat/regs-iis.h>
+
+#include <mach/regs-clock.h>
+
+#include <asm/mach-types.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "../codecs/uda1380.h"
+
+static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd);
+static int rx1950_startup(struct snd_pcm_substream *substream);
+static int rx1950_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params);
+static int rx1950_spk_power(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol, int event);
+
+static unsigned int rates[] = {
+	16000,
+	44100,
+	48000,
+};
+
+static struct snd_pcm_hw_constraint_list hw_rates = {
+	.count = ARRAY_SIZE(rates),
+	.list = rates,
+	.mask = 0,
+};
+
+static struct snd_soc_jack hp_jack;
+
+static struct snd_soc_jack_pin hp_jack_pins[] = {
+	{
+		.pin	= "Headphone Jack",
+		.mask	= SND_JACK_HEADPHONE,
+	},
+	{
+		.pin	= "Speaker",
+		.mask	= SND_JACK_HEADPHONE,
+		.invert	= 1,
+	},
+};
+
+static struct snd_soc_jack_gpio hp_jack_gpios[] = {
+	[0] = {
+		.gpio			= S3C2410_GPG(12),
+		.name			= "hp-gpio",
+		.report			= SND_JACK_HEADPHONE,
+		.invert			= 1,
+		.debounce_time		= 200,
+	},
+};
+
+static struct snd_soc_ops rx1950_ops = {
+	.startup	= rx1950_startup,
+	.hw_params	= rx1950_hw_params,
+};
+
+/* s3c24xx digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link rx1950_uda1380_dai[] = {
+	{
+		.name		= "uda1380",
+		.stream_name	= "UDA1380 Duplex",
+		.cpu_dai_name	= "s3c24xx-iis",
+		.codec_dai_name	= "uda1380-hifi",
+		.init		= rx1950_uda1380_init,
+		.platform_name	= "samsung-audio",
+		.codec_name	= "uda1380-codec.0-001a",
+		.ops		= &rx1950_ops,
+	},
+};
+
+static struct snd_soc_card rx1950_asoc = {
+	.name = "rx1950",
+	.dai_link = rx1950_uda1380_dai,
+	.num_links = ARRAY_SIZE(rx1950_uda1380_dai),
+};
+
+/* rx1950 machine dapm widgets */
+static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", rx1950_spk_power),
+};
+
+/* rx1950 machine audio_map */
+static const struct snd_soc_dapm_route audio_map[] = {
+	/* headphone connected to VOUTLHP, VOUTRHP */
+	{"Headphone Jack", NULL, "VOUTLHP"},
+	{"Headphone Jack", NULL, "VOUTRHP"},
+
+	/* ext speaker connected to VOUTL, VOUTR  */
+	{"Speaker", NULL, "VOUTL"},
+	{"Speaker", NULL, "VOUTR"},
+
+	/* mic is connected to VINM */
+	{"VINM", NULL, "Mic Jack"},
+};
+
+static struct platform_device *s3c24xx_snd_device;
+
+static int rx1950_startup(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	runtime->hw.rate_min = hw_rates.list[0];
+	runtime->hw.rate_max = hw_rates.list[hw_rates.count - 1];
+	runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
+
+	return snd_pcm_hw_constraint_list(runtime, 0,
+					SNDRV_PCM_HW_PARAM_RATE,
+					&hw_rates);
+}
+
+static int rx1950_spk_power(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol, int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		gpio_set_value(S3C2410_GPA(1), 1);
+	else
+		gpio_set_value(S3C2410_GPA(1), 0);
+
+	return 0;
+}
+
+static int rx1950_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int div;
+	int ret;
+	unsigned int rate = params_rate(params);
+	int clk_source, fs_mode;
+
+	switch (rate) {
+	case 16000:
+	case 48000:
+		clk_source = S3C24XX_CLKSRC_PCLK;
+		fs_mode = S3C2410_IISMOD_256FS;
+		div = s3c24xx_i2s_get_clockrate() / (256 * rate);
+		if (s3c24xx_i2s_get_clockrate() % (256 * rate) > (128 * rate))
+			div++;
+		break;
+	case 44100:
+	case 88200:
+		clk_source = S3C24XX_CLKSRC_MPLL;
+		fs_mode = S3C2410_IISMOD_384FS;
+		div = 1;
+		break;
+	default:
+		printk(KERN_ERR "%s: rate %d is not supported\n",
+			__func__, rate);
+		return -EINVAL;
+	}
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* select clock source */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source, rate,
+			SND_SOC_CLOCK_OUT);
+	if (ret < 0)
+		return ret;
+
+	/* set MCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
+		fs_mode);
+	if (ret < 0)
+		return ret;
+
+	/* set BCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
+		S3C2410_IISMOD_32FS);
+	if (ret < 0)
+		return ret;
+
+	/* set prescaler division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+		S3C24XX_PRESCALE(div, div));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err;
+
+	/* Add rx1950 specific widgets */
+	err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
+				  ARRAY_SIZE(uda1380_dapm_widgets));
+
+	if (err)
+		return err;
+
+	/* Set up rx1950 specific audio path audio_mapnects */
+	err = snd_soc_dapm_add_routes(dapm, audio_map,
+				      ARRAY_SIZE(audio_map));
+
+	if (err)
+		return err;
+
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
+		&hp_jack);
+
+	snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
+		hp_jack_pins);
+
+	snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+
+	return 0;
+}
+
+static int __init rx1950_init(void)
+{
+	int ret;
+
+	if (!machine_is_rx1950())
+		return -ENODEV;
+
+	/* configure some gpios */
+	ret = gpio_request(S3C2410_GPA(1), "speaker-power");
+	if (ret)
+		goto err_gpio;
+
+	ret = gpio_direction_output(S3C2410_GPA(1), 0);
+	if (ret)
+		goto err_gpio_conf;
+
+	s3c24xx_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!s3c24xx_snd_device) {
+		ret = -ENOMEM;
+		goto err_plat_alloc;
+	}
+
+	platform_set_drvdata(s3c24xx_snd_device, &rx1950_asoc);
+	ret = platform_device_add(s3c24xx_snd_device);
+
+	if (ret) {
+		platform_device_put(s3c24xx_snd_device);
+		goto err_plat_add;
+	}
+
+	return 0;
+
+err_plat_add:
+err_plat_alloc:
+err_gpio_conf:
+	gpio_free(S3C2410_GPA(1));
+
+err_gpio:
+	return ret;
+}
+
+static void __exit rx1950_exit(void)
+{
+	platform_device_unregister(s3c24xx_snd_device);
+	snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+	gpio_free(S3C2410_GPA(1));
+}
+
+module_init(rx1950_init);
+module_exit(rx1950_exit);
+
+/* Module information */
+MODULE_AUTHOR("Vasily Khoruzhick");
+MODULE_DESCRIPTION("ALSA SoC RX1950");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
new file mode 100644
index 0000000..094f36e
--- /dev/null
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -0,0 +1,757 @@
+/* sound/soc/samsung/s3c-i2c-v2.c
+ *
+ * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs.
+ *
+ * Copyright (c) 2006 Wolfson Microelectronics PLC.
+ *	Graeme Gregory graeme.gregory@wolfsonmicro.com
+ *	linux@wolfsonmicro.com
+ *
+ * Copyright (c) 2008, 2007, 2004-2005 Simtec Electronics
+ *	http://armlinux.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <mach/dma.h>
+
+#include "regs-i2s-v2.h"
+#include "s3c-i2s-v2.h"
+#include "dma.h"
+
+#undef S3C_IIS_V2_SUPPORTED
+
+#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413) \
+	|| defined(CONFIG_CPU_S5PV210)
+#define S3C_IIS_V2_SUPPORTED
+#endif
+
+#ifdef CONFIG_PLAT_S3C64XX
+#define S3C_IIS_V2_SUPPORTED
+#endif
+
+#ifndef S3C_IIS_V2_SUPPORTED
+#error Unsupported CPU model
+#endif
+
+#define S3C2412_I2S_DEBUG_CON 0
+
+static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
+{
+	return snd_soc_dai_get_drvdata(cpu_dai);
+}
+
+#define bit_set(v, b) (((v) & (b)) ? 1 : 0)
+
+#if S3C2412_I2S_DEBUG_CON
+static void dbg_showcon(const char *fn, u32 con)
+{
+	printk(KERN_DEBUG "%s: LRI=%d, TXFEMPT=%d, RXFEMPT=%d, TXFFULL=%d, RXFFULL=%d\n", fn,
+	       bit_set(con, S3C2412_IISCON_LRINDEX),
+	       bit_set(con, S3C2412_IISCON_TXFIFO_EMPTY),
+	       bit_set(con, S3C2412_IISCON_RXFIFO_EMPTY),
+	       bit_set(con, S3C2412_IISCON_TXFIFO_FULL),
+	       bit_set(con, S3C2412_IISCON_RXFIFO_FULL));
+
+	printk(KERN_DEBUG "%s: PAUSE: TXDMA=%d, RXDMA=%d, TXCH=%d, RXCH=%d\n",
+	       fn,
+	       bit_set(con, S3C2412_IISCON_TXDMA_PAUSE),
+	       bit_set(con, S3C2412_IISCON_RXDMA_PAUSE),
+	       bit_set(con, S3C2412_IISCON_TXCH_PAUSE),
+	       bit_set(con, S3C2412_IISCON_RXCH_PAUSE));
+	printk(KERN_DEBUG "%s: ACTIVE: TXDMA=%d, RXDMA=%d, IIS=%d\n", fn,
+	       bit_set(con, S3C2412_IISCON_TXDMA_ACTIVE),
+	       bit_set(con, S3C2412_IISCON_RXDMA_ACTIVE),
+	       bit_set(con, S3C2412_IISCON_IIS_ACTIVE));
+}
+#else
+static inline void dbg_showcon(const char *fn, u32 con)
+{
+}
+#endif
+
+
+/* Turn on or off the transmission path. */
+static void s3c2412_snd_txctrl(struct s3c_i2sv2_info *i2s, int on)
+{
+	void __iomem *regs = i2s->regs;
+	u32 fic, con, mod;
+
+	pr_debug("%s(%d)\n", __func__, on);
+
+	fic = readl(regs + S3C2412_IISFIC);
+	con = readl(regs + S3C2412_IISCON);
+	mod = readl(regs + S3C2412_IISMOD);
+
+	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
+
+	if (on) {
+		con |= S3C2412_IISCON_TXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE;
+		con &= ~S3C2412_IISCON_TXDMA_PAUSE;
+		con &= ~S3C2412_IISCON_TXCH_PAUSE;
+
+		switch (mod & S3C2412_IISMOD_MODE_MASK) {
+		case S3C2412_IISMOD_MODE_TXONLY:
+		case S3C2412_IISMOD_MODE_TXRX:
+			/* do nothing, we are in the right mode */
+			break;
+
+		case S3C2412_IISMOD_MODE_RXONLY:
+			mod &= ~S3C2412_IISMOD_MODE_MASK;
+			mod |= S3C2412_IISMOD_MODE_TXRX;
+			break;
+
+		default:
+			dev_err(i2s->dev, "TXEN: Invalid MODE %x in IISMOD\n",
+				mod & S3C2412_IISMOD_MODE_MASK);
+			break;
+		}
+
+		writel(con, regs + S3C2412_IISCON);
+		writel(mod, regs + S3C2412_IISMOD);
+	} else {
+		/* Note, we do not have any indication that the FIFO problems
+		 * tha the S3C2410/2440 had apply here, so we should be able
+		 * to disable the DMA and TX without resetting the FIFOS.
+		 */
+
+		con |=  S3C2412_IISCON_TXDMA_PAUSE;
+		con |=  S3C2412_IISCON_TXCH_PAUSE;
+		con &= ~S3C2412_IISCON_TXDMA_ACTIVE;
+
+		switch (mod & S3C2412_IISMOD_MODE_MASK) {
+		case S3C2412_IISMOD_MODE_TXRX:
+			mod &= ~S3C2412_IISMOD_MODE_MASK;
+			mod |= S3C2412_IISMOD_MODE_RXONLY;
+			break;
+
+		case S3C2412_IISMOD_MODE_TXONLY:
+			mod &= ~S3C2412_IISMOD_MODE_MASK;
+			con &= ~S3C2412_IISCON_IIS_ACTIVE;
+			break;
+
+		default:
+			dev_err(i2s->dev, "TXDIS: Invalid MODE %x in IISMOD\n",
+				mod & S3C2412_IISMOD_MODE_MASK);
+			break;
+		}
+
+		writel(mod, regs + S3C2412_IISMOD);
+		writel(con, regs + S3C2412_IISCON);
+	}
+
+	fic = readl(regs + S3C2412_IISFIC);
+	dbg_showcon(__func__, con);
+	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
+}
+
+static void s3c2412_snd_rxctrl(struct s3c_i2sv2_info *i2s, int on)
+{
+	void __iomem *regs = i2s->regs;
+	u32 fic, con, mod;
+
+	pr_debug("%s(%d)\n", __func__, on);
+
+	fic = readl(regs + S3C2412_IISFIC);
+	con = readl(regs + S3C2412_IISCON);
+	mod = readl(regs + S3C2412_IISMOD);
+
+	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
+
+	if (on) {
+		con |= S3C2412_IISCON_RXDMA_ACTIVE | S3C2412_IISCON_IIS_ACTIVE;
+		con &= ~S3C2412_IISCON_RXDMA_PAUSE;
+		con &= ~S3C2412_IISCON_RXCH_PAUSE;
+
+		switch (mod & S3C2412_IISMOD_MODE_MASK) {
+		case S3C2412_IISMOD_MODE_TXRX:
+		case S3C2412_IISMOD_MODE_RXONLY:
+			/* do nothing, we are in the right mode */
+			break;
+
+		case S3C2412_IISMOD_MODE_TXONLY:
+			mod &= ~S3C2412_IISMOD_MODE_MASK;
+			mod |= S3C2412_IISMOD_MODE_TXRX;
+			break;
+
+		default:
+			dev_err(i2s->dev, "RXEN: Invalid MODE %x in IISMOD\n",
+				mod & S3C2412_IISMOD_MODE_MASK);
+		}
+
+		writel(mod, regs + S3C2412_IISMOD);
+		writel(con, regs + S3C2412_IISCON);
+	} else {
+		/* See txctrl notes on FIFOs. */
+
+		con &= ~S3C2412_IISCON_RXDMA_ACTIVE;
+		con |=  S3C2412_IISCON_RXDMA_PAUSE;
+		con |=  S3C2412_IISCON_RXCH_PAUSE;
+
+		switch (mod & S3C2412_IISMOD_MODE_MASK) {
+		case S3C2412_IISMOD_MODE_RXONLY:
+			con &= ~S3C2412_IISCON_IIS_ACTIVE;
+			mod &= ~S3C2412_IISMOD_MODE_MASK;
+			break;
+
+		case S3C2412_IISMOD_MODE_TXRX:
+			mod &= ~S3C2412_IISMOD_MODE_MASK;
+			mod |= S3C2412_IISMOD_MODE_TXONLY;
+			break;
+
+		default:
+			dev_err(i2s->dev, "RXDIS: Invalid MODE %x in IISMOD\n",
+				mod & S3C2412_IISMOD_MODE_MASK);
+		}
+
+		writel(con, regs + S3C2412_IISCON);
+		writel(mod, regs + S3C2412_IISMOD);
+	}
+
+	fic = readl(regs + S3C2412_IISFIC);
+	pr_debug("%s: IIS: CON=%x MOD=%x FIC=%x\n", __func__, con, mod, fic);
+}
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+/*
+ * Wait for the LR signal to allow synchronisation to the L/R clock
+ * from the codec. May only be needed for slave mode.
+ */
+static int s3c2412_snd_lrsync(struct s3c_i2sv2_info *i2s)
+{
+	u32 iiscon;
+	unsigned long loops = msecs_to_loops(5);
+
+	pr_debug("Entered %s\n", __func__);
+
+	while (--loops) {
+		iiscon = readl(i2s->regs + S3C2412_IISCON);
+		if (iiscon & S3C2412_IISCON_LRINDEX)
+			break;
+
+		cpu_relax();
+	}
+
+	if (!loops) {
+		printk(KERN_ERR "%s: timeout\n", __func__);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * Set S3C2412 I2S DAI format
+ */
+static int s3c2412_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
+			       unsigned int fmt)
+{
+	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+	u32 iismod;
+
+	pr_debug("Entered %s\n", __func__);
+
+	iismod = readl(i2s->regs + S3C2412_IISMOD);
+	pr_debug("hw_params r: IISMOD: %x \n", iismod);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		i2s->master = 0;
+		iismod |= S3C2412_IISMOD_SLAVE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		i2s->master = 1;
+		iismod &= ~S3C2412_IISMOD_SLAVE;
+		break;
+	default:
+		pr_err("unknwon master/slave format\n");
+		return -EINVAL;
+	}
+
+	iismod &= ~S3C2412_IISMOD_SDF_MASK;
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_RIGHT_J:
+		iismod |= S3C2412_IISMOD_LR_RLOW;
+		iismod |= S3C2412_IISMOD_SDF_MSB;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iismod |= S3C2412_IISMOD_LR_RLOW;
+		iismod |= S3C2412_IISMOD_SDF_LSB;
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		iismod &= ~S3C2412_IISMOD_LR_RLOW;
+		iismod |= S3C2412_IISMOD_SDF_IIS;
+		break;
+	default:
+		pr_err("Unknown data format\n");
+		return -EINVAL;
+	}
+
+	writel(iismod, i2s->regs + S3C2412_IISMOD);
+	pr_debug("hw_params w: IISMOD: %x \n", iismod);
+	return 0;
+}
+
+static int s3c_i2sv2_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct s3c_i2sv2_info *i2s = to_info(dai);
+	struct s3c_dma_params *dma_data;
+	u32 iismod;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dma_data = i2s->dma_playback;
+	else
+		dma_data = i2s->dma_capture;
+
+	snd_soc_dai_set_dma_data(dai, substream, dma_data);
+
+	/* Working copies of register */
+	iismod = readl(i2s->regs + S3C2412_IISMOD);
+	pr_debug("%s: r: IISMOD: %x\n", __func__, iismod);
+
+	iismod &= ~S3C64XX_IISMOD_BLC_MASK;
+	/* Sample size */
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S8:
+		iismod |= S3C64XX_IISMOD_BLC_8BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iismod |= S3C64XX_IISMOD_BLC_24BIT;
+		break;
+	}
+
+	writel(iismod, i2s->regs + S3C2412_IISMOD);
+	pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
+
+	return 0;
+}
+
+static int s3c_i2sv2_set_sysclk(struct snd_soc_dai *cpu_dai,
+				  int clk_id, unsigned int freq, int dir)
+{
+	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+	u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
+
+	pr_debug("Entered %s\n", __func__);
+	pr_debug("%s r: IISMOD: %x\n", __func__, iismod);
+
+	switch (clk_id) {
+	case S3C_I2SV2_CLKSRC_PCLK:
+		iismod &= ~S3C2412_IISMOD_IMS_SYSMUX;
+		break;
+
+	case S3C_I2SV2_CLKSRC_AUDIOBUS:
+		iismod |= S3C2412_IISMOD_IMS_SYSMUX;
+		break;
+
+	case S3C_I2SV2_CLKSRC_CDCLK:
+		/* Error if controller doesn't have the CDCLKCON bit */
+		if (!(i2s->feature & S3C_FEATURE_CDCLKCON))
+			return -EINVAL;
+
+		switch (dir) {
+		case SND_SOC_CLOCK_IN:
+			iismod |= S3C64XX_IISMOD_CDCLKCON;
+			break;
+		case SND_SOC_CLOCK_OUT:
+			iismod &= ~S3C64XX_IISMOD_CDCLKCON;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	writel(iismod, i2s->regs + S3C2412_IISMOD);
+	pr_debug("%s w: IISMOD: %x\n", __func__, iismod);
+
+	return 0;
+}
+
+static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+			       struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c_i2sv2_info *i2s = to_info(rtd->cpu_dai);
+	int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
+	unsigned long irqs;
+	int ret = 0;
+	struct s3c_dma_params *dma_data =
+		snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+	pr_debug("Entered %s\n", __func__);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		/* On start, ensure that the FIFOs are cleared and reset. */
+
+		writel(capture ? S3C2412_IISFIC_RXFLUSH : S3C2412_IISFIC_TXFLUSH,
+		       i2s->regs + S3C2412_IISFIC);
+
+		/* clear again, just in case */
+		writel(0x0, i2s->regs + S3C2412_IISFIC);
+
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		if (!i2s->master) {
+			ret = s3c2412_snd_lrsync(i2s);
+			if (ret)
+				goto exit_err;
+		}
+
+		local_irq_save(irqs);
+
+		if (capture)
+			s3c2412_snd_rxctrl(i2s, 1);
+		else
+			s3c2412_snd_txctrl(i2s, 1);
+
+		local_irq_restore(irqs);
+
+		/*
+		 * Load the next buffer to DMA to meet the reqirement
+		 * of the auto reload mechanism of S3C24XX.
+		 * This call won't bother S3C64XX.
+		 */
+		s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
+
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		local_irq_save(irqs);
+
+		if (capture)
+			s3c2412_snd_rxctrl(i2s, 0);
+		else
+			s3c2412_snd_txctrl(i2s, 0);
+
+		local_irq_restore(irqs);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+exit_err:
+	return ret;
+}
+
+/*
+ * Set S3C2412 Clock dividers
+ */
+static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
+				  int div_id, int div)
+{
+	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+	u32 reg;
+
+	pr_debug("%s(%p, %d, %d)\n", __func__, cpu_dai, div_id, div);
+
+	switch (div_id) {
+	case S3C_I2SV2_DIV_BCLK:
+		switch (div) {
+		case 16:
+			div = S3C2412_IISMOD_BCLK_16FS;
+			break;
+
+		case 32:
+			div = S3C2412_IISMOD_BCLK_32FS;
+			break;
+
+		case 24:
+			div = S3C2412_IISMOD_BCLK_24FS;
+			break;
+
+		case 48:
+			div = S3C2412_IISMOD_BCLK_48FS;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+		reg = readl(i2s->regs + S3C2412_IISMOD);
+		reg &= ~S3C2412_IISMOD_BCLK_MASK;
+		writel(reg | div, i2s->regs + S3C2412_IISMOD);
+
+		pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD));
+		break;
+
+	case S3C_I2SV2_DIV_RCLK:
+		switch (div) {
+		case 256:
+			div = S3C2412_IISMOD_RCLK_256FS;
+			break;
+
+		case 384:
+			div = S3C2412_IISMOD_RCLK_384FS;
+			break;
+
+		case 512:
+			div = S3C2412_IISMOD_RCLK_512FS;
+			break;
+
+		case 768:
+			div = S3C2412_IISMOD_RCLK_768FS;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+		reg = readl(i2s->regs + S3C2412_IISMOD);
+		reg &= ~S3C2412_IISMOD_RCLK_MASK;
+		writel(reg | div, i2s->regs + S3C2412_IISMOD);
+		pr_debug("%s: MOD=%08x\n", __func__, readl(i2s->regs + S3C2412_IISMOD));
+		break;
+
+	case S3C_I2SV2_DIV_PRESCALER:
+		if (div >= 0) {
+			writel((div << 8) | S3C2412_IISPSR_PSREN,
+			       i2s->regs + S3C2412_IISPSR);
+		} else {
+			writel(0x0, i2s->regs + S3C2412_IISPSR);
+		}
+		pr_debug("%s: PSR=%08x\n", __func__, readl(i2s->regs + S3C2412_IISPSR));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static snd_pcm_sframes_t s3c2412_i2s_delay(struct snd_pcm_substream *substream,
+					   struct snd_soc_dai *dai)
+{
+	struct s3c_i2sv2_info *i2s = to_info(dai);
+	u32 reg = readl(i2s->regs + S3C2412_IISFIC);
+	snd_pcm_sframes_t delay;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		delay = S3C2412_IISFIC_TXCOUNT(reg);
+	else
+		delay = S3C2412_IISFIC_RXCOUNT(reg);
+
+	return delay;
+}
+
+struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai)
+{
+	struct s3c_i2sv2_info *i2s = to_info(cpu_dai);
+	u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
+
+	if (iismod & S3C2412_IISMOD_IMS_SYSMUX)
+		return i2s->iis_cclk;
+	else
+		return i2s->iis_pclk;
+}
+EXPORT_SYMBOL_GPL(s3c_i2sv2_get_clock);
+
+/* default table of all avaialable root fs divisors */
+static unsigned int iis_fs_tab[] = { 256, 512, 384, 768 };
+
+int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
+			    unsigned int *fstab,
+			    unsigned int rate, struct clk *clk)
+{
+	unsigned long clkrate = clk_get_rate(clk);
+	unsigned int div;
+	unsigned int fsclk;
+	unsigned int actual;
+	unsigned int fs;
+	unsigned int fsdiv;
+	signed int deviation = 0;
+	unsigned int best_fs = 0;
+	unsigned int best_div = 0;
+	unsigned int best_rate = 0;
+	unsigned int best_deviation = INT_MAX;
+
+	pr_debug("Input clock rate %ldHz\n", clkrate);
+
+	if (fstab == NULL)
+		fstab = iis_fs_tab;
+
+	for (fs = 0; fs < ARRAY_SIZE(iis_fs_tab); fs++) {
+		fsdiv = iis_fs_tab[fs];
+
+		fsclk = clkrate / fsdiv;
+		div = fsclk / rate;
+
+		if ((fsclk % rate) > (rate / 2))
+			div++;
+
+		if (div <= 1)
+			continue;
+
+		actual = clkrate / (fsdiv * div);
+		deviation = actual - rate;
+
+		printk(KERN_DEBUG "%ufs: div %u => result %u, deviation %d\n",
+		       fsdiv, div, actual, deviation);
+
+		deviation = abs(deviation);
+
+		if (deviation < best_deviation) {
+			best_fs = fsdiv;
+			best_div = div;
+			best_rate = actual;
+			best_deviation = deviation;
+		}
+
+		if (deviation == 0)
+			break;
+	}
+
+	printk(KERN_DEBUG "best: fs=%u, div=%u, rate=%u\n",
+	       best_fs, best_div, best_rate);
+
+	info->fs_div = best_fs;
+	info->clk_div = best_div;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(s3c_i2sv2_iis_calc_rate);
+
+int s3c_i2sv2_probe(struct snd_soc_dai *dai,
+		    struct s3c_i2sv2_info *i2s,
+		    unsigned long base)
+{
+	struct device *dev = dai->dev;
+	unsigned int iismod;
+
+	i2s->dev = dev;
+
+	/* record our i2s structure for later use in the callbacks */
+	snd_soc_dai_set_drvdata(dai, i2s);
+
+	i2s->regs = ioremap(base, 0x100);
+	if (i2s->regs == NULL) {
+		dev_err(dev, "cannot ioremap registers\n");
+		return -ENXIO;
+	}
+
+	i2s->iis_pclk = clk_get(dev, "iis");
+	if (IS_ERR(i2s->iis_pclk)) {
+		dev_err(dev, "failed to get iis_clock\n");
+		iounmap(i2s->regs);
+		return -ENOENT;
+	}
+
+	clk_enable(i2s->iis_pclk);
+
+	/* Mark ourselves as in TXRX mode so we can run through our cleanup
+	 * process without warnings. */
+	iismod = readl(i2s->regs + S3C2412_IISMOD);
+	iismod |= S3C2412_IISMOD_MODE_TXRX;
+	writel(iismod, i2s->regs + S3C2412_IISMOD);
+	s3c2412_snd_txctrl(i2s, 0);
+	s3c2412_snd_rxctrl(i2s, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(s3c_i2sv2_probe);
+
+#ifdef CONFIG_PM
+static int s3c2412_i2s_suspend(struct snd_soc_dai *dai)
+{
+	struct s3c_i2sv2_info *i2s = to_info(dai);
+	u32 iismod;
+
+	if (dai->active) {
+		i2s->suspend_iismod = readl(i2s->regs + S3C2412_IISMOD);
+		i2s->suspend_iiscon = readl(i2s->regs + S3C2412_IISCON);
+		i2s->suspend_iispsr = readl(i2s->regs + S3C2412_IISPSR);
+
+		/* some basic suspend checks */
+
+		iismod = readl(i2s->regs + S3C2412_IISMOD);
+
+		if (iismod & S3C2412_IISCON_RXDMA_ACTIVE)
+			pr_warning("%s: RXDMA active?\n", __func__);
+
+		if (iismod & S3C2412_IISCON_TXDMA_ACTIVE)
+			pr_warning("%s: TXDMA active?\n", __func__);
+
+		if (iismod & S3C2412_IISCON_IIS_ACTIVE)
+			pr_warning("%s: IIS active\n", __func__);
+	}
+
+	return 0;
+}
+
+static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
+{
+	struct s3c_i2sv2_info *i2s = to_info(dai);
+
+	pr_info("dai_active %d, IISMOD %08x, IISCON %08x\n",
+		dai->active, i2s->suspend_iismod, i2s->suspend_iiscon);
+
+	if (dai->active) {
+		writel(i2s->suspend_iiscon, i2s->regs + S3C2412_IISCON);
+		writel(i2s->suspend_iismod, i2s->regs + S3C2412_IISMOD);
+		writel(i2s->suspend_iispsr, i2s->regs + S3C2412_IISPSR);
+
+		writel(S3C2412_IISFIC_RXFLUSH | S3C2412_IISFIC_TXFLUSH,
+		       i2s->regs + S3C2412_IISFIC);
+
+		ndelay(250);
+		writel(0x0, i2s->regs + S3C2412_IISFIC);
+	}
+
+	return 0;
+}
+#else
+#define s3c2412_i2s_suspend NULL
+#define s3c2412_i2s_resume  NULL
+#endif
+
+int s3c_i2sv2_register_dai(struct device *dev, int id,
+		struct snd_soc_dai_driver *drv)
+{
+	struct snd_soc_dai_ops *ops = drv->ops;
+
+	ops->trigger = s3c2412_i2s_trigger;
+	if (!ops->hw_params)
+		ops->hw_params = s3c_i2sv2_hw_params;
+	ops->set_fmt = s3c2412_i2s_set_fmt;
+	ops->set_clkdiv = s3c2412_i2s_set_clkdiv;
+	ops->set_sysclk = s3c_i2sv2_set_sysclk;
+
+	/* Allow overriding by (for example) IISv4 */
+	if (!ops->delay)
+		ops->delay = s3c2412_i2s_delay;
+
+	drv->suspend = s3c2412_i2s_suspend;
+	drv->resume = s3c2412_i2s_resume;
+
+	return snd_soc_register_dai(dev, drv);
+}
+EXPORT_SYMBOL_GPL(s3c_i2sv2_register_dai);
+
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
new file mode 100644
index 0000000..f8297d9
--- /dev/null
+++ b/sound/soc/samsung/s3c-i2s-v2.h
@@ -0,0 +1,106 @@
+/* sound/soc/samsung/s3c-i2s-v2.h
+ *
+ * ALSA Soc Audio Layer - S3C_I2SV2 I2S driver
+ *
+ * Copyright (c) 2007 Simtec Electronics
+ *	http://armlinux.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+*/
+
+/* This code is the core support for the I2S block found in a number of
+ * Samsung SoC devices which is unofficially named I2S-V2. Currently the
+ * S3C2412 and the S3C64XX series use this block to provide 1 or 2 I2S
+ * channels via configurable GPIO.
+ */
+
+#ifndef __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H
+#define __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H __FILE__
+
+#define S3C_I2SV2_DIV_BCLK	(1)
+#define S3C_I2SV2_DIV_RCLK	(2)
+#define S3C_I2SV2_DIV_PRESCALER	(3)
+
+#define S3C_I2SV2_CLKSRC_PCLK		0
+#define S3C_I2SV2_CLKSRC_AUDIOBUS	1
+#define S3C_I2SV2_CLKSRC_CDCLK		2
+
+/* Set this flag for I2S controllers that have the bit IISMOD[12]
+ * bridge/break RCLK signal and external Xi2sCDCLK pin.
+ */
+#define S3C_FEATURE_CDCLKCON	(1 << 0)
+
+/**
+ * struct s3c_i2sv2_info - S3C I2S-V2 information
+ * @dev: The parent device passed to use from the probe.
+ * @regs: The pointer to the device registe block.
+ * @feature: Set of bit-flags indicating features of the controller.
+ * @master: True if the I2S core is the I2S bit clock master.
+ * @dma_playback: DMA information for playback channel.
+ * @dma_capture: DMA information for capture channel.
+ * @suspend_iismod: PM save for the IISMOD register.
+ * @suspend_iiscon: PM save for the IISCON register.
+ * @suspend_iispsr: PM save for the IISPSR register.
+ *
+ * This is the private codec state for the hardware associated with an
+ * I2S channel such as the register mappings and clock sources.
+ */
+struct s3c_i2sv2_info {
+	struct device	*dev;
+	void __iomem	*regs;
+
+	u32		feature;
+
+	struct clk	*iis_pclk;
+	struct clk	*iis_cclk;
+
+	unsigned char	 master;
+
+	struct s3c_dma_params	*dma_playback;
+	struct s3c_dma_params	*dma_capture;
+
+	u32		 suspend_iismod;
+	u32		 suspend_iiscon;
+	u32		 suspend_iispsr;
+
+	unsigned long	base;
+};
+
+extern struct clk *s3c_i2sv2_get_clock(struct snd_soc_dai *cpu_dai);
+
+struct s3c_i2sv2_rate_calc {
+	unsigned int	clk_div;	/* for prescaler */
+	unsigned int	fs_div;		/* for root frame clock */
+};
+
+extern int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
+				   unsigned int *fstab,
+				   unsigned int rate, struct clk *clk);
+
+/**
+ * s3c_i2sv2_probe - probe for i2s device helper
+ * @dai: The ASoC DAI structure supplied to the original probe.
+ * @i2s: Our local i2s structure to fill in.
+ * @base: The base address for the registers.
+ */
+extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
+			   struct s3c_i2sv2_info *i2s,
+			   unsigned long base);
+
+/**
+ * s3c_i2sv2_register_dai - register dai with soc core
+ * @dev: DAI device
+ * @id: DAI ID
+ * @drv: The driver structure to register
+ *
+ * Fill in any missing fields and then register the given dai with the
+ * soc core.
+ */
+extern int s3c_i2sv2_register_dai(struct device *dev, int id,
+		struct snd_soc_dai_driver *drv);
+
+#endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
new file mode 100644
index 0000000..7ea8378
--- /dev/null
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -0,0 +1,212 @@
+/* sound/soc/samsung/s3c2412-i2s.c
+ *
+ * ALSA Soc Audio Layer - S3C2412 I2S driver
+ *
+ * Copyright (c) 2006 Wolfson Microelectronics PLC.
+ *	Graeme Gregory graeme.gregory@wolfsonmicro.com
+ *	linux@wolfsonmicro.com
+ *
+ * Copyright (c) 2007, 2004-2005 Simtec Electronics
+ *	http://armlinux.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <mach/hardware.h>
+
+#include <mach/regs-gpio.h>
+#include <mach/dma.h>
+
+#include "dma.h"
+#include "regs-i2s-v2.h"
+#include "s3c2412-i2s.h"
+
+#define S3C2412_I2S_DEBUG 0
+
+static struct s3c2410_dma_client s3c2412_dma_client_out = {
+	.name		= "I2S PCM Stereo out"
+};
+
+static struct s3c2410_dma_client s3c2412_dma_client_in = {
+	.name		= "I2S PCM Stereo in"
+};
+
+static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = {
+	.client		= &s3c2412_dma_client_out,
+	.channel	= DMACH_I2S_OUT,
+	.dma_addr	= S3C2410_PA_IIS + S3C2412_IISTXD,
+	.dma_size	= 4,
+};
+
+static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
+	.client		= &s3c2412_dma_client_in,
+	.channel	= DMACH_I2S_IN,
+	.dma_addr	= S3C2410_PA_IIS + S3C2412_IISRXD,
+	.dma_size	= 4,
+};
+
+static struct s3c_i2sv2_info s3c2412_i2s;
+
+static int s3c2412_i2s_probe(struct snd_soc_dai *dai)
+{
+	int ret;
+
+	pr_debug("Entered %s\n", __func__);
+
+	ret = s3c_i2sv2_probe(dai, &s3c2412_i2s, S3C2410_PA_IIS);
+	if (ret)
+		return ret;
+
+	s3c2412_i2s.dma_capture = &s3c2412_i2s_pcm_stereo_in;
+	s3c2412_i2s.dma_playback = &s3c2412_i2s_pcm_stereo_out;
+
+	s3c2412_i2s.iis_cclk = clk_get(dai->dev, "i2sclk");
+	if (s3c2412_i2s.iis_cclk == NULL) {
+		pr_err("failed to get i2sclk clock\n");
+		iounmap(s3c2412_i2s.regs);
+		return -ENODEV;
+	}
+
+	/* Set MPLL as the source for IIS CLK */
+
+	clk_set_parent(s3c2412_i2s.iis_cclk, clk_get(NULL, "mpll"));
+	clk_enable(s3c2412_i2s.iis_cclk);
+
+	s3c2412_i2s.iis_cclk = s3c2412_i2s.iis_pclk;
+
+	/* Configure the I2S pins in correct mode */
+	s3c2410_gpio_cfgpin(S3C2410_GPE0, S3C2410_GPE0_I2SLRCK);
+	s3c2410_gpio_cfgpin(S3C2410_GPE1, S3C2410_GPE1_I2SSCLK);
+	s3c2410_gpio_cfgpin(S3C2410_GPE2, S3C2410_GPE2_CDCLK);
+	s3c2410_gpio_cfgpin(S3C2410_GPE3, S3C2410_GPE3_I2SSDI);
+	s3c2410_gpio_cfgpin(S3C2410_GPE4, S3C2410_GPE4_I2SSDO);
+
+	return 0;
+}
+
+static int s3c2412_i2s_remove(struct snd_soc_dai *dai)
+{
+	clk_disable(s3c2412_i2s.iis_cclk);
+	clk_put(s3c2412_i2s.iis_cclk);
+	iounmap(s3c2412_i2s.regs);
+
+	return 0;
+}
+
+static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *cpu_dai)
+{
+	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+	struct s3c_dma_params *dma_data;
+	u32 iismod;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dma_data = i2s->dma_playback;
+	else
+		dma_data = i2s->dma_capture;
+
+	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
+
+	iismod = readl(i2s->regs + S3C2412_IISMOD);
+	pr_debug("%s: r: IISMOD: %x\n", __func__, iismod);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S8:
+		iismod |= S3C2412_IISMOD_8BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		iismod &= ~S3C2412_IISMOD_8BIT;
+		break;
+	}
+
+	writel(iismod, i2s->regs + S3C2412_IISMOD);
+	pr_debug("%s: w: IISMOD: %x\n", __func__, iismod);
+
+	return 0;
+}
+
+#define S3C2412_I2S_RATES \
+	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
+	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+static struct snd_soc_dai_ops s3c2412_i2s_dai_ops = {
+	.hw_params	= s3c2412_i2s_hw_params,
+};
+
+static struct snd_soc_dai_driver s3c2412_i2s_dai = {
+	.probe		= s3c2412_i2s_probe,
+	.remove	= s3c2412_i2s_remove,
+	.playback = {
+		.channels_min	= 2,
+		.channels_max	= 2,
+		.rates		= S3C2412_I2S_RATES,
+		.formats	= SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.channels_min	= 2,
+		.channels_max	= 2,
+		.rates		= S3C2412_I2S_RATES,
+		.formats	= SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.ops = &s3c2412_i2s_dai_ops,
+};
+
+static __devinit int s3c2412_iis_dev_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_dai(&pdev->dev, &s3c2412_i2s_dai);
+}
+
+static __devexit int s3c2412_iis_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_dai(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver s3c2412_iis_driver = {
+	.probe  = s3c2412_iis_dev_probe,
+	.remove = s3c2412_iis_dev_remove,
+	.driver = {
+		.name = "s3c2412-iis",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init s3c2412_i2s_init(void)
+{
+	return platform_driver_register(&s3c2412_iis_driver);
+}
+module_init(s3c2412_i2s_init);
+
+static void __exit s3c2412_i2s_exit(void)
+{
+	platform_driver_unregister(&s3c2412_iis_driver);
+}
+module_exit(s3c2412_i2s_exit);
+
+/* Module information */
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("S3C2412 I2S SoC Interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c2412-iis");
diff --git a/sound/soc/samsung/s3c2412-i2s.h b/sound/soc/samsung/s3c2412-i2s.h
new file mode 100644
index 0000000..02ad579
--- /dev/null
+++ b/sound/soc/samsung/s3c2412-i2s.h
@@ -0,0 +1,27 @@
+/* sound/soc/samsung/s3c2412-i2s.c
+ *
+ * ALSA Soc Audio Layer - S3C2412 I2S driver
+ *
+ * Copyright (c) 2007 Simtec Electronics
+ *	http://armlinux.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+*/
+
+#ifndef __SND_SOC_S3C24XX_S3C2412_I2S_H
+#define __SND_SOC_S3C24XX_S3C2412_I2S_H __FILE__
+
+#include "s3c-i2s-v2.h"
+
+#define S3C2412_DIV_BCLK	S3C_I2SV2_DIV_BCLK
+#define S3C2412_DIV_RCLK	S3C_I2SV2_DIV_RCLK
+#define S3C2412_DIV_PRESCALER	S3C_I2SV2_DIV_PRESCALER
+
+#define S3C2412_CLKSRC_PCLK	S3C_I2SV2_CLKSRC_PCLK
+#define S3C2412_CLKSRC_I2SCLK	S3C_I2SV2_CLKSRC_AUDIOBUS
+
+#endif /* __SND_SOC_S3C24XX_S3C2412_I2S_H */
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
new file mode 100644
index 0000000..13e41ed
--- /dev/null
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -0,0 +1,519 @@
+/*
+ * s3c24xx-i2s.c  --  ALSA Soc Audio Layer
+ *
+ * (c) 2006 Wolfson Microelectronics PLC.
+ * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ * Copyright 2004-2005 Simtec Electronics
+ *	http://armlinux.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-clock.h>
+
+#include <asm/dma.h>
+#include <mach/dma.h>
+
+#include <plat/regs-iis.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+
+static struct s3c2410_dma_client s3c24xx_dma_client_out = {
+	.name = "I2S PCM Stereo out"
+};
+
+static struct s3c2410_dma_client s3c24xx_dma_client_in = {
+	.name = "I2S PCM Stereo in"
+};
+
+static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_out = {
+	.client		= &s3c24xx_dma_client_out,
+	.channel	= DMACH_I2S_OUT,
+	.dma_addr	= S3C2410_PA_IIS + S3C2410_IISFIFO,
+	.dma_size	= 2,
+};
+
+static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_in = {
+	.client		= &s3c24xx_dma_client_in,
+	.channel	= DMACH_I2S_IN,
+	.dma_addr	= S3C2410_PA_IIS + S3C2410_IISFIFO,
+	.dma_size	= 2,
+};
+
+struct s3c24xx_i2s_info {
+	void __iomem	*regs;
+	struct clk	*iis_clk;
+	u32		iiscon;
+	u32		iismod;
+	u32		iisfcon;
+	u32		iispsr;
+};
+static struct s3c24xx_i2s_info s3c24xx_i2s;
+
+static void s3c24xx_snd_txctrl(int on)
+{
+	u32 iisfcon;
+	u32 iiscon;
+	u32 iismod;
+
+	pr_debug("Entered %s\n", __func__);
+
+	iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
+	iiscon  = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
+	iismod  = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
+
+	pr_debug("r: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
+
+	if (on) {
+		iisfcon |= S3C2410_IISFCON_TXDMA | S3C2410_IISFCON_TXENABLE;
+		iiscon  |= S3C2410_IISCON_TXDMAEN | S3C2410_IISCON_IISEN;
+		iiscon  &= ~S3C2410_IISCON_TXIDLE;
+		iismod  |= S3C2410_IISMOD_TXMODE;
+
+		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
+		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
+		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
+	} else {
+		/* note, we have to disable the FIFOs otherwise bad things
+		 * seem to happen when the DMA stops. According to the
+		 * Samsung supplied kernel, this should allow the DMA
+		 * engine and FIFOs to reset. If this isn't allowed, the
+		 * DMA engine will simply freeze randomly.
+		 */
+
+		iisfcon &= ~S3C2410_IISFCON_TXENABLE;
+		iisfcon &= ~S3C2410_IISFCON_TXDMA;
+		iiscon  |=  S3C2410_IISCON_TXIDLE;
+		iiscon  &= ~S3C2410_IISCON_TXDMAEN;
+		iismod  &= ~S3C2410_IISMOD_TXMODE;
+
+		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
+		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
+		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
+	}
+
+	pr_debug("w: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
+}
+
+static void s3c24xx_snd_rxctrl(int on)
+{
+	u32 iisfcon;
+	u32 iiscon;
+	u32 iismod;
+
+	pr_debug("Entered %s\n", __func__);
+
+	iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
+	iiscon  = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
+	iismod  = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
+
+	pr_debug("r: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
+
+	if (on) {
+		iisfcon |= S3C2410_IISFCON_RXDMA | S3C2410_IISFCON_RXENABLE;
+		iiscon  |= S3C2410_IISCON_RXDMAEN | S3C2410_IISCON_IISEN;
+		iiscon  &= ~S3C2410_IISCON_RXIDLE;
+		iismod  |= S3C2410_IISMOD_RXMODE;
+
+		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
+		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
+		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
+	} else {
+		/* note, we have to disable the FIFOs otherwise bad things
+		 * seem to happen when the DMA stops. According to the
+		 * Samsung supplied kernel, this should allow the DMA
+		 * engine and FIFOs to reset. If this isn't allowed, the
+		 * DMA engine will simply freeze randomly.
+		 */
+
+		iisfcon &= ~S3C2410_IISFCON_RXENABLE;
+		iisfcon &= ~S3C2410_IISFCON_RXDMA;
+		iiscon  |= S3C2410_IISCON_RXIDLE;
+		iiscon  &= ~S3C2410_IISCON_RXDMAEN;
+		iismod  &= ~S3C2410_IISMOD_RXMODE;
+
+		writel(iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
+		writel(iiscon,  s3c24xx_i2s.regs + S3C2410_IISCON);
+		writel(iismod,  s3c24xx_i2s.regs + S3C2410_IISMOD);
+	}
+
+	pr_debug("w: IISCON: %x IISMOD: %x IISFCON: %x\n", iiscon, iismod, iisfcon);
+}
+
+/*
+ * Wait for the LR signal to allow synchronisation to the L/R clock
+ * from the codec. May only be needed for slave mode.
+ */
+static int s3c24xx_snd_lrsync(void)
+{
+	u32 iiscon;
+	int timeout = 50; /* 5ms */
+
+	pr_debug("Entered %s\n", __func__);
+
+	while (1) {
+		iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
+		if (iiscon & S3C2410_IISCON_LRINDEX)
+			break;
+
+		if (!timeout--)
+			return -ETIMEDOUT;
+		udelay(100);
+	}
+
+	return 0;
+}
+
+/*
+ * Check whether CPU is the master or slave
+ */
+static inline int s3c24xx_snd_is_clkmaster(void)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	return (readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & S3C2410_IISMOD_SLAVE) ? 0:1;
+}
+
+/*
+ * Set S3C24xx I2S DAI format
+ */
+static int s3c24xx_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
+		unsigned int fmt)
+{
+	u32 iismod;
+
+	pr_debug("Entered %s\n", __func__);
+
+	iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
+	pr_debug("hw_params r: IISMOD: %x \n", iismod);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		iismod |= S3C2410_IISMOD_SLAVE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		iismod &= ~S3C2410_IISMOD_SLAVE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_LEFT_J:
+		iismod |= S3C2410_IISMOD_MSB;
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		iismod &= ~S3C2410_IISMOD_MSB;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
+	pr_debug("hw_params w: IISMOD: %x \n", iismod);
+	return 0;
+}
+
+static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c_dma_params *dma_data;
+	u32 iismod;
+
+	pr_debug("Entered %s\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dma_data = &s3c24xx_i2s_pcm_stereo_out;
+	else
+		dma_data = &s3c24xx_i2s_pcm_stereo_in;
+
+	snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
+
+	/* Working copies of register */
+	iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
+	pr_debug("hw_params r: IISMOD: %x\n", iismod);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S8:
+		iismod &= ~S3C2410_IISMOD_16BIT;
+		dma_data->dma_size = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		iismod |= S3C2410_IISMOD_16BIT;
+		dma_data->dma_size = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
+	pr_debug("hw_params w: IISMOD: %x\n", iismod);
+	return 0;
+}
+
+static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+			       struct snd_soc_dai *dai)
+{
+	int ret = 0;
+	struct s3c_dma_params *dma_data =
+		snd_soc_dai_get_dma_data(dai, substream);
+
+	pr_debug("Entered %s\n", __func__);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		if (!s3c24xx_snd_is_clkmaster()) {
+			ret = s3c24xx_snd_lrsync();
+			if (ret)
+				goto exit_err;
+		}
+
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			s3c24xx_snd_rxctrl(1);
+		else
+			s3c24xx_snd_txctrl(1);
+
+		s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+			s3c24xx_snd_rxctrl(0);
+		else
+			s3c24xx_snd_txctrl(0);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+exit_err:
+	return ret;
+}
+
+/*
+ * Set S3C24xx Clock source
+ */
+static int s3c24xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
+	int clk_id, unsigned int freq, int dir)
+{
+	u32 iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
+
+	pr_debug("Entered %s\n", __func__);
+
+	iismod &= ~S3C2440_IISMOD_MPLL;
+
+	switch (clk_id) {
+	case S3C24XX_CLKSRC_PCLK:
+		break;
+	case S3C24XX_CLKSRC_MPLL:
+		iismod |= S3C2440_IISMOD_MPLL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
+	return 0;
+}
+
+/*
+ * Set S3C24xx Clock dividers
+ */
+static int s3c24xx_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
+	int div_id, int div)
+{
+	u32 reg;
+
+	pr_debug("Entered %s\n", __func__);
+
+	switch (div_id) {
+	case S3C24XX_DIV_BCLK:
+		reg = readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & ~S3C2410_IISMOD_FS_MASK;
+		writel(reg | div, s3c24xx_i2s.regs + S3C2410_IISMOD);
+		break;
+	case S3C24XX_DIV_MCLK:
+		reg = readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & ~(S3C2410_IISMOD_384FS);
+		writel(reg | div, s3c24xx_i2s.regs + S3C2410_IISMOD);
+		break;
+	case S3C24XX_DIV_PRESCALER:
+		writel(div, s3c24xx_i2s.regs + S3C2410_IISPSR);
+		reg = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
+		writel(reg | S3C2410_IISCON_PSCEN, s3c24xx_i2s.regs + S3C2410_IISCON);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * To avoid duplicating clock code, allow machine driver to
+ * get the clockrate from here.
+ */
+u32 s3c24xx_i2s_get_clockrate(void)
+{
+	return clk_get_rate(s3c24xx_i2s.iis_clk);
+}
+EXPORT_SYMBOL_GPL(s3c24xx_i2s_get_clockrate);
+
+static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	s3c24xx_i2s.regs = ioremap(S3C2410_PA_IIS, 0x100);
+	if (s3c24xx_i2s.regs == NULL)
+		return -ENXIO;
+
+	s3c24xx_i2s.iis_clk = clk_get(dai->dev, "iis");
+	if (s3c24xx_i2s.iis_clk == NULL) {
+		pr_err("failed to get iis_clock\n");
+		iounmap(s3c24xx_i2s.regs);
+		return -ENODEV;
+	}
+	clk_enable(s3c24xx_i2s.iis_clk);
+
+	/* Configure the I2S pins in correct mode */
+	s3c2410_gpio_cfgpin(S3C2410_GPE0, S3C2410_GPE0_I2SLRCK);
+	s3c2410_gpio_cfgpin(S3C2410_GPE1, S3C2410_GPE1_I2SSCLK);
+	s3c2410_gpio_cfgpin(S3C2410_GPE2, S3C2410_GPE2_CDCLK);
+	s3c2410_gpio_cfgpin(S3C2410_GPE3, S3C2410_GPE3_I2SSDI);
+	s3c2410_gpio_cfgpin(S3C2410_GPE4, S3C2410_GPE4_I2SSDO);
+
+	writel(S3C2410_IISCON_IISEN, s3c24xx_i2s.regs + S3C2410_IISCON);
+
+	s3c24xx_snd_txctrl(0);
+	s3c24xx_snd_rxctrl(0);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3c24xx_i2s_suspend(struct snd_soc_dai *cpu_dai)
+{
+	pr_debug("Entered %s\n", __func__);
+
+	s3c24xx_i2s.iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
+	s3c24xx_i2s.iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
+	s3c24xx_i2s.iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
+	s3c24xx_i2s.iispsr = readl(s3c24xx_i2s.regs + S3C2410_IISPSR);
+
+	clk_disable(s3c24xx_i2s.iis_clk);
+
+	return 0;
+}
+
+static int s3c24xx_i2s_resume(struct snd_soc_dai *cpu_dai)
+{
+	pr_debug("Entered %s\n", __func__);
+	clk_enable(s3c24xx_i2s.iis_clk);
+
+	writel(s3c24xx_i2s.iiscon, s3c24xx_i2s.regs + S3C2410_IISCON);
+	writel(s3c24xx_i2s.iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
+	writel(s3c24xx_i2s.iisfcon, s3c24xx_i2s.regs + S3C2410_IISFCON);
+	writel(s3c24xx_i2s.iispsr, s3c24xx_i2s.regs + S3C2410_IISPSR);
+
+	return 0;
+}
+#else
+#define s3c24xx_i2s_suspend NULL
+#define s3c24xx_i2s_resume NULL
+#endif
+
+
+#define S3C24XX_I2S_RATES \
+	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
+	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+static struct snd_soc_dai_ops s3c24xx_i2s_dai_ops = {
+	.trigger	= s3c24xx_i2s_trigger,
+	.hw_params	= s3c24xx_i2s_hw_params,
+	.set_fmt	= s3c24xx_i2s_set_fmt,
+	.set_clkdiv	= s3c24xx_i2s_set_clkdiv,
+	.set_sysclk	= s3c24xx_i2s_set_sysclk,
+};
+
+static struct snd_soc_dai_driver s3c24xx_i2s_dai = {
+	.probe = s3c24xx_i2s_probe,
+	.suspend = s3c24xx_i2s_suspend,
+	.resume = s3c24xx_i2s_resume,
+	.playback = {
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = S3C24XX_I2S_RATES,
+		.formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,},
+	.capture = {
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = S3C24XX_I2S_RATES,
+		.formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE,},
+	.ops = &s3c24xx_i2s_dai_ops,
+};
+
+static __devinit int s3c24xx_iis_dev_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_dai(&pdev->dev, &s3c24xx_i2s_dai);
+}
+
+static __devexit int s3c24xx_iis_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_dai(&pdev->dev);
+	return 0;
+}
+
+static struct platform_driver s3c24xx_iis_driver = {
+	.probe  = s3c24xx_iis_dev_probe,
+	.remove = s3c24xx_iis_dev_remove,
+	.driver = {
+		.name = "s3c24xx-iis",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init s3c24xx_i2s_init(void)
+{
+	return platform_driver_register(&s3c24xx_iis_driver);
+}
+module_init(s3c24xx_i2s_init);
+
+static void __exit s3c24xx_i2s_exit(void)
+{
+	platform_driver_unregister(&s3c24xx_iis_driver);
+}
+module_exit(s3c24xx_i2s_exit);
+
+/* Module information */
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("s3c24xx I2S SoC Interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c24xx-iis");
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.h b/sound/soc/samsung/s3c24xx-i2s.h
similarity index 100%
rename from sound/soc/s3c24xx/s3c24xx-i2s.h
rename to sound/soc/samsung/s3c24xx-i2s.h
diff --git a/sound/soc/samsung/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
new file mode 100644
index 0000000..a434032d
--- /dev/null
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -0,0 +1,394 @@
+/* sound/soc/samsung/s3c24xx_simtec.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include <plat/audio-simtec.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "s3c24xx_simtec.h"
+
+static struct s3c24xx_audio_simtec_pdata *pdata;
+static struct clk *xtal_clk;
+
+static int spk_gain;
+static int spk_unmute;
+
+/**
+ * speaker_gain_get - read the speaker gain setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be updated.
+ *
+ * Read the value for the AMP gain control.
+ */
+static int speaker_gain_get(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = spk_gain;
+	return 0;
+}
+
+/**
+ * speaker_gain_set - set the value of the speaker amp gain
+ * @value: The value to write.
+ */
+static void speaker_gain_set(int value)
+{
+	gpio_set_value_cansleep(pdata->amp_gain[0], value & 1);
+	gpio_set_value_cansleep(pdata->amp_gain[1], value >> 1);
+}
+
+/**
+ * speaker_gain_put - set the speaker gain setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be set.
+ *
+ * Set the value of the speaker gain from the specified
+ * @ucontrol setting.
+ *
+ * Note, if the speaker amp is muted, then we do not set a gain value
+ * as at-least one of the ICs that is fitted will try and power up even
+ * if the main control is set to off.
+ */
+static int speaker_gain_put(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	int value = ucontrol->value.integer.value[0];
+
+	spk_gain = value;
+
+	if (!spk_unmute)
+		speaker_gain_set(value);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new amp_gain_controls[] = {
+	SOC_SINGLE_EXT("Speaker Gain", 0, 0, 3, 0,
+		       speaker_gain_get, speaker_gain_put),
+};
+
+/**
+ * spk_unmute_state - set the unmute state of the speaker
+ * @to: zero to unmute, non-zero to ununmute.
+ */
+static void spk_unmute_state(int to)
+{
+	pr_debug("%s: to=%d\n", __func__, to);
+
+	spk_unmute = to;
+	gpio_set_value(pdata->amp_gpio, to);
+
+	/* if we're umuting, also re-set the gain */
+	if (to && pdata->amp_gain[0] > 0)
+		speaker_gain_set(spk_gain);
+}
+
+/**
+ * speaker_unmute_get - read the speaker unmute setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be updated.
+ *
+ * Read the value for the AMP gain control.
+ */
+static int speaker_unmute_get(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = spk_unmute;
+	return 0;
+}
+
+/**
+ * speaker_unmute_put - set the speaker unmute setting.
+ * @kcontrol: The control for the speaker gain.
+ * @ucontrol: The value that needs to be set.
+ *
+ * Set the value of the speaker gain from the specified
+ * @ucontrol setting.
+ */
+static int speaker_unmute_put(struct snd_kcontrol *kcontrol,
+			    struct snd_ctl_elem_value *ucontrol)
+{
+	spk_unmute_state(ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+/* This is added as a manual control as the speaker amps create clicks
+ * when their power state is changed, which are far more noticeable than
+ * anything produced by the CODEC itself.
+ */
+static const struct snd_kcontrol_new amp_unmute_controls[] = {
+	SOC_SINGLE_EXT("Speaker Switch", 0, 0, 1, 0,
+		       speaker_unmute_get, speaker_unmute_put),
+};
+
+void simtec_audio_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+
+	if (pdata->amp_gpio > 0) {
+		pr_debug("%s: adding amp routes\n", __func__);
+
+		snd_soc_add_controls(codec, amp_unmute_controls,
+				     ARRAY_SIZE(amp_unmute_controls));
+	}
+
+	if (pdata->amp_gain[0] > 0) {
+		pr_debug("%s: adding amp controls\n", __func__);
+		snd_soc_add_controls(codec, amp_gain_controls,
+				     ARRAY_SIZE(amp_gain_controls));
+	}
+}
+EXPORT_SYMBOL_GPL(simtec_audio_init);
+
+#define CODEC_CLOCK 12000000
+
+/**
+ * simtec_hw_params - update hardware parameters
+ * @substream: The audio substream instance.
+ * @params: The parameters requested.
+ *
+ * Update the codec data routing and configuration  settings
+ * from the supplied data.
+ */
+static int simtec_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret;
+
+	/* Set the CODEC as the bus clock master, I2S */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+				  SND_SOC_DAIFMT_NB_NF |
+				  SND_SOC_DAIFMT_CBM_CFM);
+	if (ret) {
+		pr_err("%s: failed set cpu dai format\n", __func__);
+		return ret;
+	}
+
+	/* Set the CODEC as the bus clock master */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+				  SND_SOC_DAIFMT_NB_NF |
+				  SND_SOC_DAIFMT_CBM_CFM);
+	if (ret) {
+		pr_err("%s: failed set codec dai format\n", __func__);
+		return ret;
+	}
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0,
+				     CODEC_CLOCK, SND_SOC_CLOCK_IN);
+	if (ret) {
+		pr_err( "%s: failed setting codec sysclk\n", __func__);
+		return ret;
+	}
+
+	if (pdata->use_mpllin) {
+		ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_MPLL,
+					     0, SND_SOC_CLOCK_OUT);
+
+		if (ret) {
+			pr_err("%s: failed to set MPLLin as clksrc\n",
+			       __func__);
+			return ret;
+		}
+	}
+
+	if (pdata->output_cdclk) {
+		int cdclk_scale;
+
+		cdclk_scale = clk_get_rate(xtal_clk) / CODEC_CLOCK;
+		cdclk_scale--;
+
+		ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+					     cdclk_scale);
+	}
+
+	return 0;
+}
+
+static int simtec_call_startup(struct s3c24xx_audio_simtec_pdata *pd)
+{
+	/* call any board supplied startup code, this currently only
+	 * covers the bast/vr1000 which have a CPLD in the way of the
+	 * LRCLK */
+	if (pd->startup)
+		pd->startup();
+
+	return 0;
+}
+
+static struct snd_soc_ops simtec_snd_ops = {
+	.hw_params	= simtec_hw_params,
+};
+
+/**
+ * attach_gpio_amp - get and configure the necessary gpios
+ * @dev: The device we're probing.
+ * @pd: The platform data supplied by the board.
+ *
+ * If there is a GPIO based amplifier attached to the board, claim
+ * the necessary GPIO lines for it, and set default values.
+ */
+static int attach_gpio_amp(struct device *dev,
+			   struct s3c24xx_audio_simtec_pdata *pd)
+{
+	int ret;
+
+	/* attach gpio amp gain (if any) */
+	if (pdata->amp_gain[0] > 0) {
+		ret = gpio_request(pd->amp_gain[0], "gpio-amp-gain0");
+		if (ret) {
+			dev_err(dev, "cannot get amp gpio gain0\n");
+			return ret;
+		}
+
+		ret = gpio_request(pd->amp_gain[1], "gpio-amp-gain1");
+		if (ret) {
+			dev_err(dev, "cannot get amp gpio gain1\n");
+			gpio_free(pdata->amp_gain[0]);
+			return ret;
+		}
+
+		gpio_direction_output(pd->amp_gain[0], 0);
+		gpio_direction_output(pd->amp_gain[1], 0);
+	}
+
+	/* note, currently we assume GPA0 isn't valid amp */
+	if (pdata->amp_gpio > 0) {
+		ret = gpio_request(pd->amp_gpio, "gpio-amp");
+		if (ret) {
+			dev_err(dev, "cannot get amp gpio %d (%d)\n",
+				pd->amp_gpio, ret);
+			goto err_amp;
+		}
+
+		/* set the amp off at startup */
+		spk_unmute_state(0);
+	}
+
+	return 0;
+
+err_amp:
+	if (pd->amp_gain[0] > 0) {
+		gpio_free(pd->amp_gain[0]);
+		gpio_free(pd->amp_gain[1]);
+	}
+
+	return ret;
+}
+
+static void detach_gpio_amp(struct s3c24xx_audio_simtec_pdata *pd)
+{
+	if (pd->amp_gain[0] > 0) {
+		gpio_free(pd->amp_gain[0]);
+		gpio_free(pd->amp_gain[1]);
+	}
+
+	if (pd->amp_gpio > 0)
+		gpio_free(pd->amp_gpio);
+}
+
+#ifdef CONFIG_PM
+int simtec_audio_resume(struct device *dev)
+{
+	simtec_call_startup(pdata);
+	return 0;
+}
+
+const struct dev_pm_ops simtec_audio_pmops = {
+	.resume	= simtec_audio_resume,
+};
+EXPORT_SYMBOL_GPL(simtec_audio_pmops);
+#endif
+
+int __devinit simtec_audio_core_probe(struct platform_device *pdev,
+				      struct snd_soc_card *card)
+{
+	struct platform_device *snd_dev;
+	int ret;
+
+	card->dai_link->ops = &simtec_snd_ops;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data supplied\n");
+		return -EINVAL;
+	}
+
+	simtec_call_startup(pdata);
+
+	xtal_clk = clk_get(&pdev->dev, "xtal");
+	if (IS_ERR(xtal_clk)) {
+		dev_err(&pdev->dev, "could not get clkout0\n");
+		return -EINVAL;
+	}
+
+	dev_info(&pdev->dev, "xtal rate is %ld\n", clk_get_rate(xtal_clk));
+
+	ret = attach_gpio_amp(&pdev->dev, pdata);
+	if (ret)
+		goto err_clk;
+
+	snd_dev = platform_device_alloc("soc-audio", -1);
+	if (!snd_dev) {
+		dev_err(&pdev->dev, "failed to alloc soc-audio devicec\n");
+		ret = -ENOMEM;
+		goto err_gpio;
+	}
+
+	platform_set_drvdata(snd_dev, card);
+
+	ret = platform_device_add(snd_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to add soc-audio dev\n");
+		goto err_pdev;
+	}
+
+	platform_set_drvdata(pdev, snd_dev);
+	return 0;
+
+err_pdev:
+	platform_device_put(snd_dev);
+
+err_gpio:
+	detach_gpio_amp(pdata);
+
+err_clk:
+	clk_put(xtal_clk);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(simtec_audio_core_probe);
+
+int __devexit simtec_audio_remove(struct platform_device *pdev)
+{
+	struct platform_device *snd_dev = platform_get_drvdata(pdev);
+
+	platform_device_unregister(snd_dev);
+
+	detach_gpio_amp(pdata);
+	clk_put(xtal_clk);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(simtec_audio_remove);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("ALSA SoC Simtec Audio common support");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/s3c24xx_simtec.h b/sound/soc/samsung/s3c24xx_simtec.h
new file mode 100644
index 0000000..8270748
--- /dev/null
+++ b/sound/soc/samsung/s3c24xx_simtec.h
@@ -0,0 +1,22 @@
+/* sound/soc/samsung/s3c24xx_simtec.h
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+extern void simtec_audio_init(struct snd_soc_pcm_runtime *rtd);
+
+extern int simtec_audio_core_probe(struct platform_device *pdev,
+				   struct snd_soc_card *card);
+
+extern int simtec_audio_remove(struct platform_device *pdev);
+
+#ifdef CONFIG_PM
+extern const struct dev_pm_ops simtec_audio_pmops;
+#define simtec_audio_pm &simtec_audio_pmops
+#else
+#define simtec_audio_pm NULL
+#endif
diff --git a/sound/soc/samsung/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c
new file mode 100644
index 0000000..bb4292e
--- /dev/null
+++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c
@@ -0,0 +1,144 @@
+/* sound/soc/samsung/s3c24xx_simtec_hermes.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include <plat/audio-simtec.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "s3c24xx_simtec.h"
+
+static const struct snd_soc_dapm_widget dapm_widgets[] = {
+	SND_SOC_DAPM_LINE("GSM Out", NULL),
+	SND_SOC_DAPM_LINE("GSM In", NULL),
+	SND_SOC_DAPM_LINE("Line In", NULL),
+	SND_SOC_DAPM_LINE("Line Out", NULL),
+	SND_SOC_DAPM_LINE("ZV", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route base_map[] = {
+	/* Headphone connected to HP{L,R}OUT and HP{L,R}COM */
+
+	{ "Headphone Jack", NULL, "HPLOUT" },
+	{ "Headphone Jack", NULL, "HPLCOM" },
+	{ "Headphone Jack", NULL, "HPROUT" },
+	{ "Headphone Jack", NULL, "HPRCOM" },
+
+	/* ZV connected to Line1 */
+
+	{ "LINE1L", NULL, "ZV" },
+	{ "LINE1R", NULL, "ZV" },
+
+	/* Line In connected to Line2 */
+
+	{ "LINE2L", NULL, "Line In" },
+	{ "LINE2R", NULL, "Line In" },
+
+	/* Microphone connected to MIC3R and MIC_BIAS */
+
+	{ "MIC3L", NULL, "Mic Jack" },
+
+	/* GSM connected to MONO_LOUT and MIC3L (in) */
+
+	{ "GSM Out", NULL, "MONO_LOUT" },
+	{ "MIC3L", NULL, "GSM In" },
+
+	/* Speaker is connected to LINEOUT{LN,LP,RN,RP}, however we are
+	 * not using the DAPM to power it up and down as there it makes
+	 * a click when powering up. */
+};
+
+/**
+ * simtec_hermes_init - initialise and add controls
+ * @codec; The codec instance to attach to.
+ *
+ * Attach our controls and configure the necessary codec
+ * mappings for our sound card instance.
+*/
+static int simtec_hermes_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
+				  ARRAY_SIZE(dapm_widgets));
+
+	snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map));
+
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+
+	simtec_audio_init(rtd);
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link simtec_dai_aic33 = {
+	.name		= "tlv320aic33",
+	.stream_name	= "TLV320AIC33",
+	.codec_name	= "tlv320aic3x-codec.0-0x1a",
+	.cpu_dai_name	= "s3c24xx-i2s",
+	.codec_dai_name = "tlv320aic3x-hifi",
+	.platform_name	= "samsung-audio",
+	.init		= simtec_hermes_init,
+};
+
+/* simtec audio machine driver */
+static struct snd_soc_card snd_soc_machine_simtec_aic33 = {
+	.name		= "Simtec-Hermes",
+	.dai_link	= &simtec_dai_aic33,
+	.num_links	= 1,
+};
+
+static int __devinit simtec_audio_hermes_probe(struct platform_device *pd)
+{
+	dev_info(&pd->dev, "probing....\n");
+	return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic33);
+}
+
+static struct platform_driver simtec_audio_hermes_platdrv = {
+	.driver	= {
+		.owner	= THIS_MODULE,
+		.name	= "s3c24xx-simtec-hermes-snd",
+		.pm	= simtec_audio_pm,
+	},
+	.probe	= simtec_audio_hermes_probe,
+	.remove	= __devexit_p(simtec_audio_remove),
+};
+
+MODULE_ALIAS("platform:s3c24xx-simtec-hermes-snd");
+
+static int __init simtec_hermes_modinit(void)
+{
+	return platform_driver_register(&simtec_audio_hermes_platdrv);
+}
+
+static void __exit simtec_hermes_modexit(void)
+{
+	platform_driver_unregister(&simtec_audio_hermes_platdrv);
+}
+
+module_init(simtec_hermes_modinit);
+module_exit(simtec_hermes_modexit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
new file mode 100644
index 0000000..fbba4e3
--- /dev/null
+++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
@@ -0,0 +1,134 @@
+/* sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
+ *
+ * Copyright 2009 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include <plat/audio-simtec.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "s3c24xx_simtec.h"
+
+#include "../codecs/tlv320aic23.h"
+
+/* supported machines:
+ *
+ * Machine	Connections		AMP
+ * -------	-----------		---
+ * BAST		MIC, HPOUT, LOUT, LIN	TPA2001D1 (HPOUTL,R) (gain hardwired)
+ * VR1000	HPOUT, LIN		None
+ * VR2000	LIN, LOUT, MIC, HP	LM4871 (HPOUTL,R)
+ * DePicture	LIN, LOUT, MIC, HP	LM4871 (HPOUTL,R)
+ * Anubis	LIN, LOUT, MIC, HP	TPA2001D1 (HPOUTL,R)
+ */
+
+static const struct snd_soc_dapm_widget dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_LINE("Line In", NULL),
+	SND_SOC_DAPM_LINE("Line Out", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route base_map[] = {
+	{ "Headphone Jack", NULL, "LHPOUT"},
+	{ "Headphone Jack", NULL, "RHPOUT"},
+
+	{ "Line Out", NULL, "LOUT" },
+	{ "Line Out", NULL, "ROUT" },
+
+	{ "LLINEIN", NULL, "Line In"},
+	{ "RLINEIN", NULL, "Line In"},
+
+	{ "MICIN", NULL, "Mic Jack"},
+};
+
+/**
+ * simtec_tlv320aic23_init - initialise and add controls
+ * @codec; The codec instance to attach to.
+ *
+ * Attach our controls and configure the necessary codec
+ * mappings for our sound card instance.
+*/
+static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
+				  ARRAY_SIZE(dapm_widgets));
+
+	snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map));
+
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+
+	simtec_audio_init(rtd);
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link simtec_dai_aic23 = {
+	.name		= "tlv320aic23",
+	.stream_name	= "TLV320AIC23",
+	.codec_name	= "tlv320aic3x-codec.0-0x1a",
+	.cpu_dai_name	= "s3c24xx-i2s",
+	.codec_dai_name = "tlv320aic3x-hifi",
+	.platform_name	= "samsung-audio",
+	.init		= simtec_tlv320aic23_init,
+};
+
+/* simtec audio machine driver */
+static struct snd_soc_card snd_soc_machine_simtec_aic23 = {
+	.name		= "Simtec",
+	.dai_link	= &simtec_dai_aic23,
+	.num_links	= 1,
+};
+
+static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd)
+{
+	return simtec_audio_core_probe(pd, &snd_soc_machine_simtec_aic23);
+}
+
+static struct platform_driver simtec_audio_tlv320aic23_platdrv = {
+	.driver	= {
+		.owner	= THIS_MODULE,
+		.name	= "s3c24xx-simtec-tlv320aic23",
+		.pm	= simtec_audio_pm,
+	},
+	.probe	= simtec_audio_tlv320aic23_probe,
+	.remove	= __devexit_p(simtec_audio_remove),
+};
+
+MODULE_ALIAS("platform:s3c24xx-simtec-tlv320aic23");
+
+static int __init simtec_tlv320aic23_modinit(void)
+{
+	return platform_driver_register(&simtec_audio_tlv320aic23_platdrv);
+}
+
+static void __exit simtec_tlv320aic23_modexit(void)
+{
+	platform_driver_unregister(&simtec_audio_tlv320aic23_platdrv);
+}
+
+module_init(simtec_tlv320aic23_modinit);
+module_exit(simtec_tlv320aic23_modexit);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
new file mode 100644
index 0000000..cdc8ecb
--- /dev/null
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -0,0 +1,367 @@
+/*
+ * Modifications by Christian Pellegrin <chripell@evolware.org>
+ *
+ * s3c24xx_uda134x.c  --  S3C24XX_UDA134X ALSA SoC Audio board driver
+ *
+ * Copyright 2007 Dension Audio Systems Ltd.
+ * Author: Zoltan Devai
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/s3c24xx_uda134x.h>
+#include <sound/uda134x.h>
+
+#include <plat/regs-iis.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "../codecs/uda134x.h"
+
+
+/* #define ENFORCE_RATES 1 */
+/*
+  Unfortunately the S3C24XX in master mode has a limited capacity of
+  generating the clock for the codec. If you define this only rates
+  that are really available will be enforced. But be careful, most
+  user level application just want the usual sampling frequencies (8,
+  11.025, 22.050, 44.1 kHz) and anyway resampling is a costly
+  operation for embedded systems. So if you aren't very lucky or your
+  hardware engineer wasn't very forward-looking it's better to leave
+  this undefined. If you do so an approximate value for the requested
+  sampling rate in the range -/+ 5% will be chosen. If this in not
+  possible an error will be returned.
+*/
+
+static struct clk *xtal;
+static struct clk *pclk;
+/* this is need because we don't have a place where to keep the
+ * pointers to the clocks in each substream. We get the clocks only
+ * when we are actually using them so we don't block stuff like
+ * frequency change or oscillator power-off */
+static int clk_users;
+static DEFINE_MUTEX(clk_lock);
+
+static unsigned int rates[33 * 2];
+#ifdef ENFORCE_RATES
+static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+	.count	= ARRAY_SIZE(rates),
+	.list	= rates,
+	.mask	= 0,
+};
+#endif
+
+static struct platform_device *s3c24xx_uda134x_snd_device;
+
+static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
+{
+	int ret = 0;
+#ifdef ENFORCE_RATES
+	struct snd_pcm_runtime *runtime = substream->runtime;
+#endif
+
+	mutex_lock(&clk_lock);
+	pr_debug("%s %d\n", __func__, clk_users);
+	if (clk_users == 0) {
+		xtal = clk_get(&s3c24xx_uda134x_snd_device->dev, "xtal");
+		if (!xtal) {
+			printk(KERN_ERR "%s cannot get xtal\n", __func__);
+			ret = -EBUSY;
+		} else {
+			pclk = clk_get(&s3c24xx_uda134x_snd_device->dev,
+				       "pclk");
+			if (!pclk) {
+				printk(KERN_ERR "%s cannot get pclk\n",
+				       __func__);
+				clk_put(xtal);
+				ret = -EBUSY;
+			}
+		}
+		if (!ret) {
+			int i, j;
+
+			for (i = 0; i < 2; i++) {
+				int fs = i ? 256 : 384;
+
+				rates[i*33] = clk_get_rate(xtal) / fs;
+				for (j = 1; j < 33; j++)
+					rates[i*33 + j] = clk_get_rate(pclk) /
+						(j * fs);
+			}
+		}
+	}
+	clk_users += 1;
+	mutex_unlock(&clk_lock);
+	if (!ret) {
+#ifdef ENFORCE_RATES
+		ret = snd_pcm_hw_constraint_list(runtime, 0,
+						 SNDRV_PCM_HW_PARAM_RATE,
+						 &hw_constraints_rates);
+		if (ret < 0)
+			printk(KERN_ERR "%s cannot set constraints\n",
+			       __func__);
+#endif
+	}
+	return ret;
+}
+
+static void s3c24xx_uda134x_shutdown(struct snd_pcm_substream *substream)
+{
+	mutex_lock(&clk_lock);
+	pr_debug("%s %d\n", __func__, clk_users);
+	clk_users -= 1;
+	if (clk_users == 0) {
+		clk_put(xtal);
+		xtal = NULL;
+		clk_put(pclk);
+		pclk = NULL;
+	}
+	mutex_unlock(&clk_lock);
+}
+
+static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
+					struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	unsigned int clk = 0;
+	int ret = 0;
+	int clk_source, fs_mode;
+	unsigned long rate = params_rate(params);
+	long err, cerr;
+	unsigned int div;
+	int i, bi;
+
+	err = 999999;
+	bi = 0;
+	for (i = 0; i < 2*33; i++) {
+		cerr = rates[i] - rate;
+		if (cerr < 0)
+			cerr = -cerr;
+		if (cerr < err) {
+			err = cerr;
+			bi = i;
+		}
+	}
+	if (bi / 33 == 1)
+		fs_mode = S3C2410_IISMOD_256FS;
+	else
+		fs_mode = S3C2410_IISMOD_384FS;
+	if (bi % 33 == 0) {
+		clk_source = S3C24XX_CLKSRC_MPLL;
+		div = 1;
+	} else {
+		clk_source = S3C24XX_CLKSRC_PCLK;
+		div = bi % 33;
+	}
+	pr_debug("%s desired rate %lu, %d\n", __func__, rate, bi);
+
+	clk = (fs_mode == S3C2410_IISMOD_384FS ? 384 : 256) * rate;
+	pr_debug("%s will use: %s %s %d sysclk %d err %ld\n", __func__,
+		 fs_mode == S3C2410_IISMOD_384FS ? "384FS" : "256FS",
+		 clk_source == S3C24XX_CLKSRC_MPLL ? "MPLLin" : "PCLK",
+		 div, clk, err);
+
+	if ((err * 100 / rate) > 5) {
+		printk(KERN_ERR "S3C24XX_UDA134X: effective frequency "
+		       "too different from desired (%ld%%)\n",
+		       err * 100 / rate);
+		return -EINVAL;
+	}
+
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source , clk,
+			SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, fs_mode);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
+			S3C2410_IISMOD_32FS);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+			S3C24XX_PRESCALE(div, div));
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk,
+			SND_SOC_CLOCK_OUT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_ops s3c24xx_uda134x_ops = {
+	.startup = s3c24xx_uda134x_startup,
+	.shutdown = s3c24xx_uda134x_shutdown,
+	.hw_params = s3c24xx_uda134x_hw_params,
+};
+
+static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
+	.name = "UDA134X",
+	.stream_name = "UDA134X",
+	.codec_name = "uda134x-hifi",
+	.codec_dai_name = "uda134x-hifi",
+	.cpu_dai_name = "s3c24xx-i2s",
+	.ops = &s3c24xx_uda134x_ops,
+	.platform_name	= "samsung-audio",
+};
+
+static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
+	.name = "S3C24XX_UDA134X",
+	.dai_link = &s3c24xx_uda134x_dai_link,
+	.num_links = 1,
+};
+
+static struct s3c24xx_uda134x_platform_data *s3c24xx_uda134x_l3_pins;
+
+static void setdat(int v)
+{
+	gpio_set_value(s3c24xx_uda134x_l3_pins->l3_data, v > 0);
+}
+
+static void setclk(int v)
+{
+	gpio_set_value(s3c24xx_uda134x_l3_pins->l3_clk, v > 0);
+}
+
+static void setmode(int v)
+{
+	gpio_set_value(s3c24xx_uda134x_l3_pins->l3_mode, v > 0);
+}
+
+/* FIXME - This must be codec platform data but in which board file ?? */
+static struct uda134x_platform_data s3c24xx_uda134x = {
+	.l3 = {
+		.setdat = setdat,
+		.setclk = setclk,
+		.setmode = setmode,
+		.data_hold = 1,
+		.data_setup = 1,
+		.clock_high = 1,
+		.mode_hold = 1,
+		.mode = 1,
+		.mode_setup = 1,
+	},
+};
+
+static int s3c24xx_uda134x_setup_pin(int pin, char *fun)
+{
+	if (gpio_request(pin, "s3c24xx_uda134x") < 0) {
+		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
+		       "l3 %s pin already in use", fun);
+		return -EBUSY;
+	}
+	gpio_direction_output(pin, 0);
+	return 0;
+}
+
+static int s3c24xx_uda134x_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	printk(KERN_INFO "S3C24XX_UDA134X SoC Audio driver\n");
+
+	s3c24xx_uda134x_l3_pins = pdev->dev.platform_data;
+	if (s3c24xx_uda134x_l3_pins == NULL) {
+		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
+		       "unable to find platform data\n");
+		return -ENODEV;
+	}
+	s3c24xx_uda134x.power = s3c24xx_uda134x_l3_pins->power;
+	s3c24xx_uda134x.model = s3c24xx_uda134x_l3_pins->model;
+
+	if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_data,
+				      "data") < 0)
+		return -EBUSY;
+	if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_clk,
+				      "clk") < 0) {
+		gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
+		return -EBUSY;
+	}
+	if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_mode,
+				      "mode") < 0) {
+		gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
+		gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
+		return -EBUSY;
+	}
+
+	s3c24xx_uda134x_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!s3c24xx_uda134x_snd_device) {
+		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: "
+		       "Unable to register\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(s3c24xx_uda134x_snd_device,
+			     &snd_soc_s3c24xx_uda134x);
+	ret = platform_device_add(s3c24xx_uda134x_snd_device);
+	if (ret) {
+		printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n");
+		platform_device_put(s3c24xx_uda134x_snd_device);
+	}
+
+	return ret;
+}
+
+static int s3c24xx_uda134x_remove(struct platform_device *pdev)
+{
+	platform_device_unregister(s3c24xx_uda134x_snd_device);
+	gpio_free(s3c24xx_uda134x_l3_pins->l3_data);
+	gpio_free(s3c24xx_uda134x_l3_pins->l3_clk);
+	gpio_free(s3c24xx_uda134x_l3_pins->l3_mode);
+	return 0;
+}
+
+static struct platform_driver s3c24xx_uda134x_driver = {
+	.probe  = s3c24xx_uda134x_probe,
+	.remove = s3c24xx_uda134x_remove,
+	.driver = {
+		.name = "s3c24xx_uda134x",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init s3c24xx_uda134x_init(void)
+{
+	return platform_driver_register(&s3c24xx_uda134x_driver);
+}
+
+static void __exit s3c24xx_uda134x_exit(void)
+{
+	platform_driver_unregister(&s3c24xx_uda134x_driver);
+}
+
+
+module_init(s3c24xx_uda134x_init);
+module_exit(s3c24xx_uda134x_exit);
+
+MODULE_AUTHOR("Zoltan Devai, Christian Pellegrin <chripell@evolware.org>");
+MODULE_DESCRIPTION("S3C24XX_UDA134X ALSA SoC audio driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
new file mode 100644
index 0000000..61e2b52
--- /dev/null
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -0,0 +1,290 @@
+/* sound/soc/samsung/smartq_wm8987.c
+ *
+ * Copyright 2010 Maurus Cuelenaere <mcuelenaere@gmail.com>
+ *
+ * Based on smdk6410_wm8987.c
+ *     Copyright 2007 Wolfson Microelectronics PLC. - linux@wolfsonmicro.com
+ *     Graeme Gregory - graeme.gregory@wolfsonmicro.com
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#include <asm/mach-types.h>
+
+#include "dma.h"
+#include "i2s.h"
+
+#include "../codecs/wm8750.h"
+
+/*
+ * WM8987 is register compatible with WM8750, so using that as base driver.
+ */
+
+static struct snd_soc_card snd_soc_smartq;
+
+static int smartq_hifi_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	unsigned int clk = 0;
+	int ret;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 32000:
+	case 48000:
+	case 96000:
+		clk = 12288000;
+		break;
+	case 11025:
+	case 22050:
+	case 44100:
+	case 88200:
+		clk = 11289600;
+		break;
+	}
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+					     SND_SOC_DAIFMT_NB_NF |
+					     SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+					   SND_SOC_DAIFMT_NB_NF |
+					   SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* Use PCLK for I2S signal generation */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
+					0, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* Gate the RCLK output on PAD */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
+					0, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* set the codec system clock for DAC and ADC */
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * SmartQ WM8987 HiFi DAI operations.
+ */
+static struct snd_soc_ops smartq_hifi_ops = {
+	.hw_params = smartq_hifi_hw_params,
+};
+
+static struct snd_soc_jack smartq_jack;
+
+static struct snd_soc_jack_pin smartq_jack_pins[] = {
+	/* Disable speaker when headphone is plugged in */
+	{
+		.pin	= "Internal Speaker",
+		.mask	= SND_JACK_HEADPHONE,
+	},
+};
+
+static struct snd_soc_jack_gpio smartq_jack_gpios[] = {
+	{
+		.gpio		= S3C64XX_GPL(12),
+		.name		= "headphone detect",
+		.report		= SND_JACK_HEADPHONE,
+		.debounce_time	= 200,
+	},
+};
+
+static const struct snd_kcontrol_new wm8987_smartq_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Internal Speaker"),
+	SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+	SOC_DAPM_PIN_SWITCH("Internal Mic"),
+};
+
+static int smartq_speaker_event(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *k,
+				int event)
+{
+	gpio_set_value(S3C64XX_GPK(12), SND_SOC_DAPM_EVENT_OFF(event));
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget wm8987_dapm_widgets[] = {
+	SND_SOC_DAPM_SPK("Internal Speaker", smartq_speaker_event),
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_MIC("Internal Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route audio_map[] = {
+	{"Headphone Jack", NULL, "LOUT2"},
+	{"Headphone Jack", NULL, "ROUT2"},
+
+	{"Internal Speaker", NULL, "LOUT2"},
+	{"Internal Speaker", NULL, "ROUT2"},
+
+	{"Mic Bias", NULL, "Internal Mic"},
+	{"LINPUT2", NULL, "Mic Bias"},
+};
+
+static int smartq_wm8987_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err = 0;
+
+	/* Add SmartQ specific widgets */
+	snd_soc_dapm_new_controls(dapm, wm8987_dapm_widgets,
+				  ARRAY_SIZE(wm8987_dapm_widgets));
+
+	/* add SmartQ specific controls */
+	err = snd_soc_add_controls(codec, wm8987_smartq_controls,
+				   ARRAY_SIZE(wm8987_smartq_controls));
+
+	if (err < 0)
+		return err;
+
+	/* setup SmartQ specific audio path */
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+
+	/* set endpoints to not connected */
+	snd_soc_dapm_nc_pin(dapm, "LINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "ROUT1");
+
+	/* set endpoints to default off mode */
+	snd_soc_dapm_enable_pin(dapm, "Internal Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Internal Mic");
+	snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+
+	err = snd_soc_dapm_sync(dapm);
+	if (err)
+		return err;
+
+	/* Headphone jack detection */
+	err = snd_soc_jack_new(codec, "Headphone Jack",
+			       SND_JACK_HEADPHONE, &smartq_jack);
+	if (err)
+		return err;
+
+	err = snd_soc_jack_add_pins(&smartq_jack, ARRAY_SIZE(smartq_jack_pins),
+				    smartq_jack_pins);
+	if (err)
+		return err;
+
+	err = snd_soc_jack_add_gpios(&smartq_jack,
+				     ARRAY_SIZE(smartq_jack_gpios),
+				     smartq_jack_gpios);
+
+	return err;
+}
+
+static struct snd_soc_dai_link smartq_dai[] = {
+	{
+		.name		= "wm8987",
+		.stream_name	= "SmartQ Hi-Fi",
+		.cpu_dai_name	= "samsung-i2s.0",
+		.codec_dai_name	= "wm8750-hifi",
+		.platform_name	= "samsung-audio",
+		.codec_name	= "wm8750-codec.0-0x1a",
+		.init		= smartq_wm8987_init,
+		.ops		= &smartq_hifi_ops,
+	},
+};
+
+static struct snd_soc_card snd_soc_smartq = {
+	.name = "SmartQ",
+	.dai_link = smartq_dai,
+	.num_links = ARRAY_SIZE(smartq_dai),
+};
+
+static struct platform_device *smartq_snd_device;
+
+static int __init smartq_init(void)
+{
+	int ret;
+
+	if (!machine_is_smartq7() && !machine_is_smartq5()) {
+		pr_info("Only SmartQ is supported by this ASoC driver\n");
+		return -ENODEV;
+	}
+
+	smartq_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!smartq_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smartq_snd_device, &snd_soc_smartq);
+
+	ret = platform_device_add(smartq_snd_device);
+	if (ret) {
+		platform_device_put(smartq_snd_device);
+		return ret;
+	}
+
+	/* Initialise GPIOs used by amplifiers */
+	ret = gpio_request(S3C64XX_GPK(12), "amplifiers shutdown");
+	if (ret) {
+		dev_err(&smartq_snd_device->dev, "Failed to register GPK12\n");
+		goto err_unregister_device;
+	}
+
+	/* Disable amplifiers */
+	ret = gpio_direction_output(S3C64XX_GPK(12), 1);
+	if (ret) {
+		dev_err(&smartq_snd_device->dev, "Failed to configure GPK12\n");
+		goto err_free_gpio_amp_shut;
+	}
+
+	return 0;
+
+err_free_gpio_amp_shut:
+	gpio_free(S3C64XX_GPK(12));
+err_unregister_device:
+	platform_device_unregister(smartq_snd_device);
+
+	return ret;
+}
+
+static void __exit smartq_exit(void)
+{
+	gpio_free(S3C64XX_GPK(12));
+	snd_soc_jack_free_gpios(&smartq_jack, ARRAY_SIZE(smartq_jack_gpios),
+				smartq_jack_gpios);
+
+	platform_device_unregister(smartq_snd_device);
+}
+
+module_init(smartq_init);
+module_exit(smartq_exit);
+
+/* Module information */
+MODULE_AUTHOR("Maurus Cuelenaere <mcuelenaere@gmail.com>");
+MODULE_DESCRIPTION("ALSA SoC SmartQ WM8987");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
new file mode 100644
index 0000000..3be7e7e
--- /dev/null
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -0,0 +1,73 @@
+/*
+ * smdk2443_wm9710.c  --  SoC audio for smdk2443
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC.
+ * Author: Graeme Gregory
+ *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#include "dma.h"
+#include "ac97.h"
+
+static struct snd_soc_card smdk2443;
+
+static struct snd_soc_dai_link smdk2443_dai[] = {
+{
+	.name = "AC97",
+	.stream_name = "AC97 HiFi",
+	.cpu_dai_name = "samsung-ac97",
+	.codec_dai_name = "ac97-hifi",
+	.codec_name = "ac97-codec",
+	.platform_name = "samsung-audio",
+},
+};
+
+static struct snd_soc_card smdk2443 = {
+	.name = "SMDK2443",
+	.dai_link = smdk2443_dai,
+	.num_links = ARRAY_SIZE(smdk2443_dai),
+};
+
+static struct platform_device *smdk2443_snd_ac97_device;
+
+static int __init smdk2443_init(void)
+{
+	int ret;
+
+	smdk2443_snd_ac97_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk2443_snd_ac97_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smdk2443_snd_ac97_device, &smdk2443);
+	ret = platform_device_add(smdk2443_snd_ac97_device);
+
+	if (ret)
+		platform_device_put(smdk2443_snd_ac97_device);
+
+	return ret;
+}
+
+static void __exit smdk2443_exit(void)
+{
+	platform_device_unregister(smdk2443_snd_ac97_device);
+}
+
+module_init(smdk2443_init);
+module_exit(smdk2443_exit);
+
+/* Module information */
+MODULE_AUTHOR("Graeme Gregory, graeme.gregory@wolfsonmicro.com, www.wolfsonmicro.com");
+MODULE_DESCRIPTION("ALSA SoC WM9710 SMDK2443");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_spdif.c b/sound/soc/samsung/smdk_spdif.c
new file mode 100644
index 0000000..b5c3fad
--- /dev/null
+++ b/sound/soc/samsung/smdk_spdif.c
@@ -0,0 +1,226 @@
+/*
+ * smdk_spdif.c  --  S/PDIF audio for SMDK
+ *
+ * Copyright 2010 Samsung Electronics Co. Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+
+#include <plat/devs.h>
+
+#include <sound/soc.h>
+
+#include "dma.h"
+#include "spdif.h"
+
+/* Audio clock settings are belonged to board specific part. Every
+ * board can set audio source clock setting which is matched with H/W
+ * like this function-'set_audio_clock_heirachy'.
+ */
+static int set_audio_clock_heirachy(struct platform_device *pdev)
+{
+	struct clk *fout_epll, *mout_epll, *sclk_audio0, *sclk_spdif;
+	int ret = 0;
+
+	fout_epll = clk_get(NULL, "fout_epll");
+	if (IS_ERR(fout_epll)) {
+		printk(KERN_WARNING "%s: Cannot find fout_epll.\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	mout_epll = clk_get(NULL, "mout_epll");
+	if (IS_ERR(mout_epll)) {
+		printk(KERN_WARNING "%s: Cannot find mout_epll.\n",
+				__func__);
+		ret = -EINVAL;
+		goto out1;
+	}
+
+	sclk_audio0 = clk_get(&pdev->dev, "sclk_audio");
+	if (IS_ERR(sclk_audio0)) {
+		printk(KERN_WARNING "%s: Cannot find sclk_audio.\n",
+				__func__);
+		ret = -EINVAL;
+		goto out2;
+	}
+
+	sclk_spdif = clk_get(NULL, "sclk_spdif");
+	if (IS_ERR(sclk_spdif)) {
+		printk(KERN_WARNING "%s: Cannot find sclk_spdif.\n",
+				__func__);
+		ret = -EINVAL;
+		goto out3;
+	}
+
+	/* Set audio clock hierarchy for S/PDIF */
+	clk_set_parent(mout_epll, fout_epll);
+	clk_set_parent(sclk_audio0, mout_epll);
+	clk_set_parent(sclk_spdif, sclk_audio0);
+
+	clk_put(sclk_spdif);
+out3:
+	clk_put(sclk_audio0);
+out2:
+	clk_put(mout_epll);
+out1:
+	clk_put(fout_epll);
+
+	return ret;
+}
+
+/* We should haved to set clock directly on this part because of clock
+ * scheme of Samsudng SoCs did not support to set rates from abstrct
+ * clock of it's hierarchy.
+ */
+static int set_audio_clock_rate(unsigned long epll_rate,
+				unsigned long audio_rate)
+{
+	struct clk *fout_epll, *sclk_spdif;
+
+	fout_epll = clk_get(NULL, "fout_epll");
+	if (IS_ERR(fout_epll)) {
+		printk(KERN_ERR "%s: failed to get fout_epll\n", __func__);
+		return -ENOENT;
+	}
+
+	clk_set_rate(fout_epll, epll_rate);
+	clk_put(fout_epll);
+
+	sclk_spdif = clk_get(NULL, "sclk_spdif");
+	if (IS_ERR(sclk_spdif)) {
+		printk(KERN_ERR "%s: failed to get sclk_spdif\n", __func__);
+		return -ENOENT;
+	}
+
+	clk_set_rate(sclk_spdif, audio_rate);
+	clk_put(sclk_spdif);
+
+	return 0;
+}
+
+static int smdk_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	unsigned long pll_out, rclk_rate;
+	int ret, ratio;
+
+	switch (params_rate(params)) {
+	case 44100:
+		pll_out = 45158400;
+		break;
+	case 32000:
+	case 48000:
+	case 96000:
+		pll_out = 49152000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Setting ratio to 512fs helps to use S/PDIF with HDMI without
+	 * modify S/PDIF ASoC machine driver.
+	 */
+	ratio = 512;
+	rclk_rate = params_rate(params) * ratio;
+
+	/* Set audio source clock rates */
+	ret = set_audio_clock_rate(pll_out, rclk_rate);
+	if (ret < 0)
+		return ret;
+
+	/* Set S/PDIF uses internal source clock */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, SND_SOC_SPDIF_INT_MCLK,
+					rclk_rate, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return ret;
+}
+
+static struct snd_soc_ops smdk_spdif_ops = {
+	.hw_params = smdk_hw_params,
+};
+
+static struct snd_soc_dai_link smdk_dai = {
+	.name = "S/PDIF",
+	.stream_name = "S/PDIF PCM Playback",
+	.platform_name = "samsung-audio",
+	.cpu_dai_name = "samsung-spdif",
+	.codec_dai_name = "dit-hifi",
+	.codec_name = "spdif-dit",
+	.ops = &smdk_spdif_ops,
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK-S/PDIF",
+	.dai_link = &smdk_dai,
+	.num_links = 1,
+};
+
+static struct platform_device *smdk_snd_spdif_dit_device;
+static struct platform_device *smdk_snd_spdif_device;
+
+static int __init smdk_init(void)
+{
+	int ret;
+
+	smdk_snd_spdif_dit_device = platform_device_alloc("spdif-dit", -1);
+	if (!smdk_snd_spdif_dit_device)
+		return -ENOMEM;
+
+	ret = platform_device_add(smdk_snd_spdif_dit_device);
+	if (ret)
+		goto err1;
+
+	smdk_snd_spdif_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_spdif_device) {
+		ret = -ENOMEM;
+		goto err2;
+	}
+
+	platform_set_drvdata(smdk_snd_spdif_device, &smdk);
+
+	ret = platform_device_add(smdk_snd_spdif_device);
+	if (ret)
+		goto err3;
+
+	/* Set audio clock hierarchy manually */
+	ret = set_audio_clock_heirachy(smdk_snd_spdif_device);
+	if (ret)
+		goto err4;
+
+	return 0;
+err4:
+	platform_device_del(smdk_snd_spdif_device);
+err3:
+	platform_device_put(smdk_snd_spdif_device);
+err2:
+	platform_device_del(smdk_snd_spdif_dit_device);
+err1:
+	platform_device_put(smdk_snd_spdif_dit_device);
+	return ret;
+}
+
+static void __exit smdk_exit(void)
+{
+	platform_device_unregister(smdk_snd_spdif_device);
+	platform_device_unregister(smdk_snd_spdif_dit_device);
+}
+
+module_init(smdk_init);
+module_exit(smdk_exit);
+
+MODULE_AUTHOR("Seungwhan Youn, <sw.youn@samsung.com>");
+MODULE_DESCRIPTION("ALSA SoC SMDK+S/PDIF");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
new file mode 100644
index 0000000..b2cff1a
--- /dev/null
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -0,0 +1,292 @@
+/*
+ *  smdk_wm8580.c
+ *
+ *  Copyright (c) 2009 Samsung Electronics Co. Ltd
+ *  Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <asm/mach-types.h>
+
+#include "../codecs/wm8580.h"
+#include "dma.h"
+#include "i2s.h"
+
+/*
+ * Default CFG switch settings to use this driver:
+ *
+ *   SMDK6410: Set CFG1 1-3 Off, CFG2 1-4 On
+ */
+
+/* SMDK has a 12MHZ crystal attached to WM8580 */
+#define SMDK_WM8580_FREQ 12000000
+
+static int smdk_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pll_out;
+	int bfs, rfs, ret;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_U8:
+	case SNDRV_PCM_FORMAT_S8:
+		bfs = 16;
+		break;
+	case SNDRV_PCM_FORMAT_U16_LE:
+	case SNDRV_PCM_FORMAT_S16_LE:
+		bfs = 32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* The Fvco for WM8580 PLLs must fall within [90,100]MHz.
+	 * This criterion can't be met if we request PLL output
+	 * as {8000x256, 64000x256, 11025x256}Hz.
+	 * As a wayout, we rather change rfs to a minimum value that
+	 * results in (params_rate(params) * rfs), and itself, acceptable
+	 * to both - the CODEC and the CPU.
+	 */
+	switch (params_rate(params)) {
+	case 16000:
+	case 22050:
+	case 32000:
+	case 44100:
+	case 48000:
+	case 88200:
+	case 96000:
+		rfs = 256;
+		break;
+	case 64000:
+		rfs = 384;
+		break;
+	case 8000:
+	case 11025:
+		rfs = 512;
+		break;
+	default:
+		return -EINVAL;
+	}
+	pll_out = params_rate(params) * rfs;
+
+	/* Set the Codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* Set the AP DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* Set WM8580 to drive MCLK from its PLLA */
+	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
+					WM8580_CLKSRC_PLLA);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
+					SMDK_WM8580_FREQ, pll_out);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
+				     pll_out, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * SMDK WM8580 DAI operations.
+ */
+static struct snd_soc_ops smdk_ops = {
+	.hw_params = smdk_hw_params,
+};
+
+/* SMDK Playback widgets */
+static const struct snd_soc_dapm_widget wm8580_dapm_widgets_pbk[] = {
+	SND_SOC_DAPM_HP("Front", NULL),
+	SND_SOC_DAPM_HP("Center+Sub", NULL),
+	SND_SOC_DAPM_HP("Rear", NULL),
+};
+
+/* SMDK Capture widgets */
+static const struct snd_soc_dapm_widget wm8580_dapm_widgets_cpt[] = {
+	SND_SOC_DAPM_MIC("MicIn", NULL),
+	SND_SOC_DAPM_LINE("LineIn", NULL),
+};
+
+/* SMDK-PAIFTX connections */
+static const struct snd_soc_dapm_route audio_map_tx[] = {
+	/* MicIn feeds AINL */
+	{"AINL", NULL, "MicIn"},
+
+	/* LineIn feeds AINL/R */
+	{"AINL", NULL, "LineIn"},
+	{"AINR", NULL, "LineIn"},
+};
+
+/* SMDK-PAIFRX connections */
+static const struct snd_soc_dapm_route audio_map_rx[] = {
+	/* Front Left/Right are fed VOUT1L/R */
+	{"Front", NULL, "VOUT1L"},
+	{"Front", NULL, "VOUT1R"},
+
+	/* Center/Sub are fed VOUT2L/R */
+	{"Center+Sub", NULL, "VOUT2L"},
+	{"Center+Sub", NULL, "VOUT2R"},
+
+	/* Rear Left/Right are fed VOUT3L/R */
+	{"Rear", NULL, "VOUT3L"},
+	{"Rear", NULL, "VOUT3R"},
+};
+
+static int smdk_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* Add smdk specific Capture widgets */
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_cpt,
+				  ARRAY_SIZE(wm8580_dapm_widgets_cpt));
+
+	/* Set up PAIFTX audio path */
+	snd_soc_dapm_add_routes(dapm, audio_map_tx, ARRAY_SIZE(audio_map_tx));
+
+	/* Enabling the microphone requires the fitting of a 0R
+	 * resistor to connect the line from the microphone jack.
+	 */
+	snd_soc_dapm_disable_pin(dapm, "MicIn");
+
+	/* signal a DAPM event */
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static int smdk_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* Add smdk specific Playback widgets */
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_pbk,
+				  ARRAY_SIZE(wm8580_dapm_widgets_pbk));
+
+	/* Set up PAIFRX audio path */
+	snd_soc_dapm_add_routes(dapm, audio_map_rx, ARRAY_SIZE(audio_map_rx));
+
+	/* signal a DAPM event */
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+enum {
+	PRI_PLAYBACK = 0,
+	PRI_CAPTURE,
+	SEC_PLAYBACK,
+};
+
+static struct snd_soc_dai_link smdk_dai[] = {
+	[PRI_PLAYBACK] = { /* Primary Playback i/f */
+		.name = "WM8580 PAIF RX",
+		.stream_name = "Playback",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8580-hifi-playback",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paifrx,
+		.ops = &smdk_ops,
+	},
+	[PRI_CAPTURE] = { /* Primary Capture i/f */
+		.name = "WM8580 PAIF TX",
+		.stream_name = "Capture",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8580-hifi-capture",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paiftx,
+		.ops = &smdk_ops,
+	},
+	[SEC_PLAYBACK] = { /* Sec_Fifo Playback i/f */
+		.name = "Sec_FIFO TX",
+		.stream_name = "Playback",
+		.cpu_dai_name = "samsung-i2s.x",
+		.codec_dai_name = "wm8580-hifi-playback",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paifrx,
+		.ops = &smdk_ops,
+	},
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK-I2S",
+	.dai_link = smdk_dai,
+	.num_links = 2,
+};
+
+static struct platform_device *smdk_snd_device;
+
+static int __init smdk_audio_init(void)
+{
+	int ret;
+	char *str;
+
+	if (machine_is_smdkc100() || machine_is_smdk6442()
+			|| machine_is_smdkv210() || machine_is_smdkc110()) {
+		smdk.num_links = 3;
+		/* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */
+		str = (char *)smdk_dai[SEC_PLAYBACK].cpu_dai_name;
+		str[strlen(str) - 1] = '0' + SAMSUNG_I2S_SECOFF;
+	} else if (machine_is_smdk6410()) {
+		str = (char *)smdk_dai[PRI_PLAYBACK].cpu_dai_name;
+		str[strlen(str) - 1] = '2';
+		str = (char *)smdk_dai[PRI_CAPTURE].cpu_dai_name;
+		str[strlen(str) - 1] = '2';
+	}
+
+	smdk_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smdk_snd_device, &smdk);
+	ret = platform_device_add(smdk_snd_device);
+
+	if (ret)
+		platform_device_put(smdk_snd_device);
+
+	return ret;
+}
+module_init(smdk_audio_init);
+
+static void __exit smdk_audio_exit(void)
+{
+	platform_device_unregister(smdk_snd_device);
+}
+module_exit(smdk_audio_exit);
+
+MODULE_AUTHOR("Jaswinder Singh, jassi.brar@samsung.com");
+MODULE_DESCRIPTION("ALSA SoC SMDK WM8580");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
new file mode 100644
index 0000000..e7c1009
--- /dev/null
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -0,0 +1,176 @@
+/*
+ *  smdk_wm8994.c
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include "../codecs/wm8994.h"
+
+ /*
+  * Default CFG switch settings to use this driver:
+  *	SMDKV310: CFG5-1000, CFG7-111111
+  */
+
+ /*
+  * Configure audio route as :-
+  * $ amixer sset 'DAC1' on,on
+  * $ amixer sset 'Right Headphone Mux' 'DAC'
+  * $ amixer sset 'Left Headphone Mux' 'DAC'
+  * $ amixer sset 'DAC1R Mixer AIF1.1' on
+  * $ amixer sset 'DAC1L Mixer AIF1.1' on
+  * $ amixer sset 'IN2L' on
+  * $ amixer sset 'IN2L PGA IN2LN' on
+  * $ amixer sset 'MIXINL IN2L' on
+  * $ amixer sset 'AIF1ADC1L Mixer ADC/DMIC' on
+  * $ amixer sset 'IN2R' on
+  * $ amixer sset 'IN2R PGA IN2RN' on
+  * $ amixer sset 'MIXINR IN2R' on
+  * $ amixer sset 'AIF1ADC1R Mixer ADC/DMIC' on
+  */
+
+/* SMDK has a 16.934MHZ crystal attached to WM8994 */
+#define SMDK_WM8994_FREQ 16934000
+
+static int smdk_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pll_out;
+	int ret;
+
+	/* AIF1CLK should be >=3MHz for optimal performance */
+	if (params_rate(params) == 8000 || params_rate(params) == 11025)
+		pll_out = params_rate(params) * 512;
+	else
+		pll_out = params_rate(params) * 256;
+
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+					SMDK_WM8994_FREQ, pll_out);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
+					pll_out, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * SMDK WM8994 DAI operations.
+ */
+static struct snd_soc_ops smdk_ops = {
+	.hw_params = smdk_hw_params,
+};
+
+static int smdk_wm8994_init_paiftx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* HeadPhone */
+	snd_soc_dapm_enable_pin(dapm, "HPOUT1R");
+	snd_soc_dapm_enable_pin(dapm, "HPOUT1L");
+
+	/* MicIn */
+	snd_soc_dapm_enable_pin(dapm, "IN1LN");
+	snd_soc_dapm_enable_pin(dapm, "IN1RN");
+
+	/* LineIn */
+	snd_soc_dapm_enable_pin(dapm, "IN2LN");
+	snd_soc_dapm_enable_pin(dapm, "IN2RN");
+
+	/* Other pins NC */
+	snd_soc_dapm_nc_pin(dapm, "HPOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "HPOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTLN");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTLP");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTRP");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTRN");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "IN1LP");
+	snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
+	snd_soc_dapm_nc_pin(dapm, "IN1RP");
+	snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link smdk_dai[] = {
+	{ /* Primary DAI i/f */
+		.name = "WM8994 AIF1",
+		.stream_name = "Pri_Dai",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8994-aif1",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8994-codec",
+		.init = smdk_wm8994_init_paiftx,
+		.ops = &smdk_ops,
+	}, { /* Sec_Fifo Playback i/f */
+		.name = "Sec_FIFO TX",
+		.stream_name = "Sec_Dai",
+		.cpu_dai_name = "samsung-i2s.4",
+		.codec_dai_name = "wm8994-aif1",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8994-codec",
+		.ops = &smdk_ops,
+	},
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK-I2S",
+	.dai_link = smdk_dai,
+	.num_links = ARRAY_SIZE(smdk_dai),
+};
+
+static struct platform_device *smdk_snd_device;
+
+static int __init smdk_audio_init(void)
+{
+	int ret;
+
+	smdk_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smdk_snd_device, &smdk);
+
+	ret = platform_device_add(smdk_snd_device);
+	if (ret)
+		platform_device_put(smdk_snd_device);
+
+	return ret;
+}
+module_init(smdk_audio_init);
+
+static void __exit smdk_audio_exit(void)
+{
+	platform_device_unregister(smdk_snd_device);
+}
+module_exit(smdk_audio_exit);
+
+MODULE_DESCRIPTION("ALSA SoC SMDK WM8994");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c
new file mode 100644
index 0000000..ae5fed6
--- /dev/null
+++ b/sound/soc/samsung/smdk_wm9713.c
@@ -0,0 +1,111 @@
+/*
+ * smdk_wm9713.c  --  SoC audio for SMDK
+ *
+ * Copyright 2010 Samsung Electronics Co. Ltd.
+ * Author: Jaswinder Singh Brar <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <sound/soc.h>
+
+#include "dma.h"
+#include "ac97.h"
+
+static struct snd_soc_card smdk;
+
+/*
+ * Default CFG switch settings to use this driver:
+ *
+ *   SMDK6410: Set CFG1 1-3 On, CFG2 1-4 Off
+ *   SMDKC100: Set CFG6 1-3 On, CFG7 1   On
+ *   SMDKC110: Set CFGB10 1-2 Off, CFGB12 1-3 On
+ *   SMDKV210: Set CFGB10 1-2 Off, CFGB12 1-3 On
+ *   SMDKV310: Set CFG2 1-2 Off, CFG4 All On, CFG7 All Off, CFG8 1-On
+ */
+
+/*
+ Playback (HeadPhone):-
+	$ amixer sset 'Headphone' unmute
+	$ amixer sset 'Right Headphone Out Mux' 'Headphone'
+	$ amixer sset 'Left Headphone Out Mux' 'Headphone'
+	$ amixer sset 'Right HP Mixer PCM' unmute
+	$ amixer sset 'Left HP Mixer PCM' unmute
+
+ Capture (LineIn):-
+	$ amixer sset 'Right Capture Source' 'Line'
+	$ amixer sset 'Left Capture Source' 'Line'
+*/
+
+static struct snd_soc_dai_link smdk_dai = {
+	.name = "AC97",
+	.stream_name = "AC97 PCM",
+	.platform_name = "samsung-audio",
+	.cpu_dai_name = "samsung-ac97",
+	.codec_dai_name = "wm9713-hifi",
+	.codec_name = "wm9713-codec",
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK WM9713",
+	.dai_link = &smdk_dai,
+	.num_links = 1,
+};
+
+static struct platform_device *smdk_snd_wm9713_device;
+static struct platform_device *smdk_snd_ac97_device;
+
+static int __init smdk_init(void)
+{
+	int ret;
+
+	smdk_snd_wm9713_device = platform_device_alloc("wm9713-codec", -1);
+	if (!smdk_snd_wm9713_device)
+		return -ENOMEM;
+
+	ret = platform_device_add(smdk_snd_wm9713_device);
+	if (ret)
+		goto err1;
+
+	smdk_snd_ac97_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_ac97_device) {
+		ret = -ENOMEM;
+		goto err2;
+	}
+
+	platform_set_drvdata(smdk_snd_ac97_device, &smdk);
+
+	ret = platform_device_add(smdk_snd_ac97_device);
+	if (ret)
+		goto err3;
+
+	return 0;
+
+err3:
+	platform_device_put(smdk_snd_ac97_device);
+err2:
+	platform_device_del(smdk_snd_wm9713_device);
+err1:
+	platform_device_put(smdk_snd_wm9713_device);
+	return ret;
+}
+
+static void __exit smdk_exit(void)
+{
+	platform_device_unregister(smdk_snd_ac97_device);
+	platform_device_unregister(smdk_snd_wm9713_device);
+}
+
+module_init(smdk_init);
+module_exit(smdk_exit);
+
+/* Module information */
+MODULE_AUTHOR("Jaswinder Singh Brar, jassi.brar@samsung.com");
+MODULE_DESCRIPTION("ALSA SoC SMDK+WM9713");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
new file mode 100644
index 0000000..f081640
--- /dev/null
+++ b/sound/soc/samsung/spdif.c
@@ -0,0 +1,501 @@
+/* sound/soc/samsung/spdif.c
+ *
+ * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <plat/audio.h>
+#include <mach/dma.h>
+
+#include "dma.h"
+#include "spdif.h"
+
+/* Registers */
+#define CLKCON				0x00
+#define CON				0x04
+#define BSTAS				0x08
+#define CSTAS				0x0C
+#define DATA_OUTBUF			0x10
+#define DCNT				0x14
+#define BSTAS_S				0x18
+#define DCNT_S				0x1C
+
+#define CLKCTL_MASK			0x7
+#define CLKCTL_MCLK_EXT			(0x1 << 2)
+#define CLKCTL_PWR_ON			(0x1 << 0)
+
+#define CON_MASK			0x3ffffff
+#define CON_FIFO_TH_SHIFT		19
+#define CON_FIFO_TH_MASK		(0x7 << 19)
+#define CON_USERDATA_23RDBIT		(0x1 << 12)
+
+#define CON_SW_RESET			(0x1 << 5)
+
+#define CON_MCLKDIV_MASK		(0x3 << 3)
+#define CON_MCLKDIV_256FS		(0x0 << 3)
+#define CON_MCLKDIV_384FS		(0x1 << 3)
+#define CON_MCLKDIV_512FS		(0x2 << 3)
+
+#define CON_PCM_MASK			(0x3 << 1)
+#define CON_PCM_16BIT			(0x0 << 1)
+#define CON_PCM_20BIT			(0x1 << 1)
+#define CON_PCM_24BIT			(0x2 << 1)
+
+#define CON_PCM_DATA			(0x1 << 0)
+
+#define CSTAS_MASK			0x3fffffff
+#define CSTAS_SAMP_FREQ_MASK		(0xF << 24)
+#define CSTAS_SAMP_FREQ_44		(0x0 << 24)
+#define CSTAS_SAMP_FREQ_48		(0x2 << 24)
+#define CSTAS_SAMP_FREQ_32		(0x3 << 24)
+#define CSTAS_SAMP_FREQ_96		(0xA << 24)
+
+#define CSTAS_CATEGORY_MASK		(0xFF << 8)
+#define CSTAS_CATEGORY_CODE_CDP		(0x01 << 8)
+
+#define CSTAS_NO_COPYRIGHT		(0x1 << 2)
+
+/**
+ * struct samsung_spdif_info - Samsung S/PDIF Controller information
+ * @lock: Spin lock for S/PDIF.
+ * @dev: The parent device passed to use from the probe.
+ * @regs: The pointer to the device register block.
+ * @clk_rate: Current clock rate for calcurate ratio.
+ * @pclk: The peri-clock pointer for spdif master operation.
+ * @sclk: The source clock pointer for making sync signals.
+ * @save_clkcon: Backup clkcon reg. in suspend.
+ * @save_con: Backup con reg. in suspend.
+ * @save_cstas: Backup cstas reg. in suspend.
+ * @dma_playback: DMA information for playback channel.
+ */
+struct samsung_spdif_info {
+	spinlock_t	lock;
+	struct device	*dev;
+	void __iomem	*regs;
+	unsigned long	clk_rate;
+	struct clk	*pclk;
+	struct clk	*sclk;
+	u32		saved_clkcon;
+	u32		saved_con;
+	u32		saved_cstas;
+	struct s3c_dma_params	*dma_playback;
+};
+
+static struct s3c2410_dma_client spdif_dma_client_out = {
+	.name		= "S/PDIF Stereo out",
+};
+
+static struct s3c_dma_params spdif_stereo_out;
+static struct samsung_spdif_info spdif_info;
+
+static inline struct samsung_spdif_info *to_info(struct snd_soc_dai *cpu_dai)
+{
+	return snd_soc_dai_get_drvdata(cpu_dai);
+}
+
+static void spdif_snd_txctrl(struct samsung_spdif_info *spdif, int on)
+{
+	void __iomem *regs = spdif->regs;
+	u32 clkcon;
+
+	dev_dbg(spdif->dev, "Entered %s\n", __func__);
+
+	clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
+	if (on)
+		writel(clkcon | CLKCTL_PWR_ON, regs + CLKCON);
+	else
+		writel(clkcon & ~CLKCTL_PWR_ON, regs + CLKCON);
+}
+
+static int spdif_set_sysclk(struct snd_soc_dai *cpu_dai,
+				int clk_id, unsigned int freq, int dir)
+{
+	struct samsung_spdif_info *spdif = to_info(cpu_dai);
+	u32 clkcon;
+
+	dev_dbg(spdif->dev, "Entered %s\n", __func__);
+
+	clkcon = readl(spdif->regs + CLKCON);
+
+	if (clk_id == SND_SOC_SPDIF_INT_MCLK)
+		clkcon &= ~CLKCTL_MCLK_EXT;
+	else
+		clkcon |= CLKCTL_MCLK_EXT;
+
+	writel(clkcon, spdif->regs + CLKCON);
+
+	spdif->clk_rate = freq;
+
+	return 0;
+}
+
+static int spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
+	unsigned long flags;
+
+	dev_dbg(spdif->dev, "Entered %s\n", __func__);
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		spin_lock_irqsave(&spdif->lock, flags);
+		spdif_snd_txctrl(spdif, 1);
+		spin_unlock_irqrestore(&spdif->lock, flags);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		spin_lock_irqsave(&spdif->lock, flags);
+		spdif_snd_txctrl(spdif, 0);
+		spin_unlock_irqrestore(&spdif->lock, flags);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int spdif_sysclk_ratios[] = {
+	512, 384, 256,
+};
+
+static int spdif_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *socdai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
+	void __iomem *regs = spdif->regs;
+	struct s3c_dma_params *dma_data;
+	u32 con, clkcon, cstas;
+	unsigned long flags;
+	int i, ratio;
+
+	dev_dbg(spdif->dev, "Entered %s\n", __func__);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		dma_data = spdif->dma_playback;
+	else {
+		dev_err(spdif->dev, "Capture is not supported\n");
+		return -EINVAL;
+	}
+
+	snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_data);
+
+	spin_lock_irqsave(&spdif->lock, flags);
+
+	con = readl(regs + CON) & CON_MASK;
+	cstas = readl(regs + CSTAS) & CSTAS_MASK;
+	clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
+
+	con &= ~CON_FIFO_TH_MASK;
+	con |= (0x7 << CON_FIFO_TH_SHIFT);
+	con |= CON_USERDATA_23RDBIT;
+	con |= CON_PCM_DATA;
+
+	con &= ~CON_PCM_MASK;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		con |= CON_PCM_16BIT;
+		break;
+	default:
+		dev_err(spdif->dev, "Unsupported data size.\n");
+		goto err;
+	}
+
+	ratio = spdif->clk_rate / params_rate(params);
+	for (i = 0; i < ARRAY_SIZE(spdif_sysclk_ratios); i++)
+		if (ratio == spdif_sysclk_ratios[i])
+			break;
+	if (i == ARRAY_SIZE(spdif_sysclk_ratios)) {
+		dev_err(spdif->dev, "Invalid clock ratio %ld/%d\n",
+				spdif->clk_rate, params_rate(params));
+		goto err;
+	}
+
+	con &= ~CON_MCLKDIV_MASK;
+	switch (ratio) {
+	case 256:
+		con |= CON_MCLKDIV_256FS;
+		break;
+	case 384:
+		con |= CON_MCLKDIV_384FS;
+		break;
+	case 512:
+		con |= CON_MCLKDIV_512FS;
+		break;
+	}
+
+	cstas &= ~CSTAS_SAMP_FREQ_MASK;
+	switch (params_rate(params)) {
+	case 44100:
+		cstas |= CSTAS_SAMP_FREQ_44;
+		break;
+	case 48000:
+		cstas |= CSTAS_SAMP_FREQ_48;
+		break;
+	case 32000:
+		cstas |= CSTAS_SAMP_FREQ_32;
+		break;
+	case 96000:
+		cstas |= CSTAS_SAMP_FREQ_96;
+		break;
+	default:
+		dev_err(spdif->dev, "Invalid sampling rate %d\n",
+				params_rate(params));
+		goto err;
+	}
+
+	cstas &= ~CSTAS_CATEGORY_MASK;
+	cstas |= CSTAS_CATEGORY_CODE_CDP;
+	cstas |= CSTAS_NO_COPYRIGHT;
+
+	writel(con, regs + CON);
+	writel(cstas, regs + CSTAS);
+	writel(clkcon, regs + CLKCON);
+
+	spin_unlock_irqrestore(&spdif->lock, flags);
+
+	return 0;
+err:
+	spin_unlock_irqrestore(&spdif->lock, flags);
+	return -EINVAL;
+}
+
+static void spdif_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct samsung_spdif_info *spdif = to_info(rtd->cpu_dai);
+	void __iomem *regs = spdif->regs;
+	u32 con, clkcon;
+
+	dev_dbg(spdif->dev, "Entered %s\n", __func__);
+
+	con = readl(regs + CON) & CON_MASK;
+	clkcon = readl(regs + CLKCON) & CLKCTL_MASK;
+
+	writel(con | CON_SW_RESET, regs + CON);
+	cpu_relax();
+
+	writel(clkcon & ~CLKCTL_PWR_ON, regs + CLKCON);
+}
+
+#ifdef CONFIG_PM
+static int spdif_suspend(struct snd_soc_dai *cpu_dai)
+{
+	struct samsung_spdif_info *spdif = to_info(cpu_dai);
+	u32 con = spdif->saved_con;
+
+	dev_dbg(spdif->dev, "Entered %s\n", __func__);
+
+	spdif->saved_clkcon = readl(spdif->regs	+ CLKCON) & CLKCTL_MASK;
+	spdif->saved_con = readl(spdif->regs + CON) & CON_MASK;
+	spdif->saved_cstas = readl(spdif->regs + CSTAS) & CSTAS_MASK;
+
+	writel(con | CON_SW_RESET, spdif->regs + CON);
+	cpu_relax();
+
+	return 0;
+}
+
+static int spdif_resume(struct snd_soc_dai *cpu_dai)
+{
+	struct samsung_spdif_info *spdif = to_info(cpu_dai);
+
+	dev_dbg(spdif->dev, "Entered %s\n", __func__);
+
+	writel(spdif->saved_clkcon, spdif->regs	+ CLKCON);
+	writel(spdif->saved_con, spdif->regs + CON);
+	writel(spdif->saved_cstas, spdif->regs + CSTAS);
+
+	return 0;
+}
+#else
+#define spdif_suspend NULL
+#define spdif_resume NULL
+#endif
+
+static struct snd_soc_dai_ops spdif_dai_ops = {
+	.set_sysclk	= spdif_set_sysclk,
+	.trigger	= spdif_trigger,
+	.hw_params	= spdif_hw_params,
+	.shutdown	= spdif_shutdown,
+};
+
+struct snd_soc_dai_driver samsung_spdif_dai = {
+	.name = "samsung-spdif",
+	.playback = {
+		.stream_name = "S/PDIF Playback",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = (SNDRV_PCM_RATE_32000 |
+				SNDRV_PCM_RATE_44100 |
+				SNDRV_PCM_RATE_48000 |
+				SNDRV_PCM_RATE_96000),
+		.formats = SNDRV_PCM_FMTBIT_S16_LE, },
+	.ops = &spdif_dai_ops,
+	.suspend = spdif_suspend,
+	.resume = spdif_resume,
+};
+
+static __devinit int spdif_probe(struct platform_device *pdev)
+{
+	struct s3c_audio_pdata *spdif_pdata;
+	struct resource *mem_res, *dma_res;
+	struct samsung_spdif_info *spdif;
+	int ret;
+
+	spdif_pdata = pdev->dev.platform_data;
+
+	dev_dbg(&pdev->dev, "Entered %s\n", __func__);
+
+	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!dma_res) {
+		dev_err(&pdev->dev, "Unable to get dma resource.\n");
+		return -ENXIO;
+	}
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_res) {
+		dev_err(&pdev->dev, "Unable to get register resource.\n");
+		return -ENXIO;
+	}
+
+	if (spdif_pdata && spdif_pdata->cfg_gpio
+			&& spdif_pdata->cfg_gpio(pdev)) {
+		dev_err(&pdev->dev, "Unable to configure GPIO pins\n");
+		return -EINVAL;
+	}
+
+	spdif = &spdif_info;
+	spdif->dev = &pdev->dev;
+
+	spin_lock_init(&spdif->lock);
+
+	spdif->pclk = clk_get(&pdev->dev, "spdif");
+	if (IS_ERR(spdif->pclk)) {
+		dev_err(&pdev->dev, "failed to get peri-clock\n");
+		ret = -ENOENT;
+		goto err0;
+	}
+	clk_enable(spdif->pclk);
+
+	spdif->sclk = clk_get(&pdev->dev, "sclk_spdif");
+	if (IS_ERR(spdif->sclk)) {
+		dev_err(&pdev->dev, "failed to get internal source clock\n");
+		ret = -ENOENT;
+		goto err1;
+	}
+	clk_enable(spdif->sclk);
+
+	/* Request S/PDIF Register's memory region */
+	if (!request_mem_region(mem_res->start,
+				resource_size(mem_res), "samsung-spdif")) {
+		dev_err(&pdev->dev, "Unable to request register region\n");
+		ret = -EBUSY;
+		goto err2;
+	}
+
+	spdif->regs = ioremap(mem_res->start, 0x100);
+	if (spdif->regs == NULL) {
+		dev_err(&pdev->dev, "Cannot ioremap registers\n");
+		ret = -ENXIO;
+		goto err3;
+	}
+
+	dev_set_drvdata(&pdev->dev, spdif);
+
+	ret = snd_soc_register_dai(&pdev->dev, &samsung_spdif_dai);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "fail to register dai\n");
+		goto err4;
+	}
+
+	spdif_stereo_out.dma_size = 2;
+	spdif_stereo_out.client = &spdif_dma_client_out;
+	spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
+	spdif_stereo_out.channel = dma_res->start;
+
+	spdif->dma_playback = &spdif_stereo_out;
+
+	return 0;
+
+err4:
+	iounmap(spdif->regs);
+err3:
+	release_mem_region(mem_res->start, resource_size(mem_res));
+err2:
+	clk_disable(spdif->sclk);
+	clk_put(spdif->sclk);
+err1:
+	clk_disable(spdif->pclk);
+	clk_put(spdif->pclk);
+err0:
+	return ret;
+}
+
+static __devexit int spdif_remove(struct platform_device *pdev)
+{
+	struct samsung_spdif_info *spdif = &spdif_info;
+	struct resource *mem_res;
+
+	snd_soc_unregister_dai(&pdev->dev);
+
+	iounmap(spdif->regs);
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (mem_res)
+		release_mem_region(mem_res->start, resource_size(mem_res));
+
+	clk_disable(spdif->sclk);
+	clk_put(spdif->sclk);
+	clk_disable(spdif->pclk);
+	clk_put(spdif->pclk);
+
+	return 0;
+}
+
+static struct platform_driver samsung_spdif_driver = {
+	.probe	= spdif_probe,
+	.remove	= spdif_remove,
+	.driver	= {
+		.name	= "samsung-spdif",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init spdif_init(void)
+{
+	return platform_driver_register(&samsung_spdif_driver);
+}
+module_init(spdif_init);
+
+static void __exit spdif_exit(void)
+{
+	platform_driver_unregister(&samsung_spdif_driver);
+}
+module_exit(spdif_exit);
+
+MODULE_AUTHOR("Seungwhan Youn, <sw.youn@samsung.com>");
+MODULE_DESCRIPTION("Samsung S/PDIF Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:samsung-spdif");
diff --git a/sound/soc/samsung/spdif.h b/sound/soc/samsung/spdif.h
new file mode 100644
index 0000000..4f72cb4
--- /dev/null
+++ b/sound/soc/samsung/spdif.h
@@ -0,0 +1,19 @@
+/* sound/soc/samsung/spdif.h
+ *
+ * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *		http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SND_SOC_SAMSUNG_SPDIF_H
+#define __SND_SOC_SAMSUNG_SPDIF_H	__FILE__
+
+#define SND_SOC_SPDIF_INT_MCLK		0
+#define SND_SOC_SPDIF_EXT_MCLK		1
+
+#endif	/* __SND_SOC_SAMSUNG_SPDIF_H */
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 7f0a496..d8e06a6 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -48,7 +48,7 @@
 
 config SND_FSI_AK4642
 	tristate "FSI-AK4642 sound support"
-	depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE
+	depends on SND_SOC_SH4_FSI && I2C
 	select SND_SOC_AK4642
 	help
 	  This option enables generic sound support for the
@@ -56,7 +56,7 @@
 
 config SND_FSI_DA7210
 	tristate "FSI-DA7210 sound support"
-	depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE
+	depends on SND_SOC_SH4_FSI && I2C
 	select SND_SOC_DA7210
 	help
 	  This option enables generic sound support for the
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
index d96602d..a14820a 100644
--- a/sound/soc/sh/fsi-ak4642.c
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -12,6 +12,14 @@
 #include <linux/platform_device.h>
 #include <sound/sh_fsi.h>
 
+struct fsi_ak4642_data {
+	const char *name;
+	const char *card;
+	const char *cpu_dai;
+	const char *codec;
+	const char *platform;
+};
+
 static int fsi_ak4642_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_dai *dai = rtd->codec_dai;
@@ -27,37 +35,42 @@
 }
 
 static struct snd_soc_dai_link fsi_dai_link = {
-	.name		= "AK4642",
-	.stream_name	= "AK4642",
-	.cpu_dai_name	= "fsia-dai", /* fsi A */
 	.codec_dai_name	= "ak4642-hifi",
-#ifdef CONFIG_MACH_AP4EVB
-	.platform_name	= "sh_fsi2",
-	.codec_name	= "ak4642-codec.0-0013",
-#else
-	.platform_name	= "sh_fsi.0",
-	.codec_name	= "ak4642-codec.0-0012",
-#endif
 	.init		= fsi_ak4642_dai_init,
-	.ops		= NULL,
 };
 
 static struct snd_soc_card fsi_soc_card  = {
-	.name		= "FSI (AK4642)",
 	.dai_link	= &fsi_dai_link,
 	.num_links	= 1,
 };
 
 static struct platform_device *fsi_snd_device;
 
-static int __init fsi_ak4642_init(void)
+static int fsi_ak4642_probe(struct platform_device *pdev)
 {
 	int ret = -ENOMEM;
+	const struct platform_device_id	*id_entry;
+	struct fsi_ak4642_data *pdata;
+
+	id_entry = pdev->id_entry;
+	if (!id_entry) {
+		dev_err(&pdev->dev, "unknown fsi ak4642\n");
+		return -ENODEV;
+	}
+
+	pdata = (struct fsi_ak4642_data *)id_entry->driver_data;
 
 	fsi_snd_device = platform_device_alloc("soc-audio", FSI_PORT_A);
 	if (!fsi_snd_device)
 		goto out;
 
+	fsi_dai_link.name		= pdata->name;
+	fsi_dai_link.stream_name	= pdata->name;
+	fsi_dai_link.cpu_dai_name	= pdata->cpu_dai;
+	fsi_dai_link.platform_name	= pdata->platform;
+	fsi_dai_link.codec_name		= pdata->codec;
+	fsi_soc_card.name		= pdata->card;
+
 	platform_set_drvdata(fsi_snd_device, &fsi_soc_card);
 	ret = platform_device_add(fsi_snd_device);
 
@@ -68,9 +81,108 @@
 	return ret;
 }
 
-static void __exit fsi_ak4642_exit(void)
+static int fsi_ak4642_remove(struct platform_device *pdev)
 {
 	platform_device_unregister(fsi_snd_device);
+	return 0;
+}
+
+static struct fsi_ak4642_data fsi_a_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSIA (AK4642)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_b_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSIB (AK4642)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_a_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSIA (AK4643)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_b_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSIB (AK4643)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi2_a_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSI2A (AK4642)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_b_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSI2B (AK4642)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_a_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSI2A (AK4643)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_b_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSI2B (AK4643)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi2",
+};
+
+static struct platform_device_id fsi_id_table[] = {
+	/* FSI */
+	{ "sh_fsi_a_ak4642",	(kernel_ulong_t)&fsi_a_ak4642 },
+	{ "sh_fsi_b_ak4642",	(kernel_ulong_t)&fsi_b_ak4642 },
+	{ "sh_fsi_a_ak4643",	(kernel_ulong_t)&fsi_a_ak4643 },
+	{ "sh_fsi_b_ak4643",	(kernel_ulong_t)&fsi_b_ak4643 },
+
+	/* FSI 2 */
+	{ "sh_fsi2_a_ak4642",	(kernel_ulong_t)&fsi2_a_ak4642 },
+	{ "sh_fsi2_b_ak4642",	(kernel_ulong_t)&fsi2_b_ak4642 },
+	{ "sh_fsi2_a_ak4643",	(kernel_ulong_t)&fsi2_a_ak4643 },
+	{ "sh_fsi2_b_ak4643",	(kernel_ulong_t)&fsi2_b_ak4643 },
+	{},
+};
+
+static struct platform_driver fsi_ak4642 = {
+	.driver = {
+		.name	= "fsi-ak4642-audio",
+	},
+	.probe		= fsi_ak4642_probe,
+	.remove		= fsi_ak4642_remove,
+	.id_table	= fsi_id_table,
+};
+
+static int __init fsi_ak4642_init(void)
+{
+	return platform_driver_register(&fsi_ak4642);
+}
+
+static void __exit fsi_ak4642_exit(void)
+{
+	platform_driver_unregister(&fsi_ak4642);
 }
 
 module_init(fsi_ak4642_init);
diff --git a/sound/soc/sh/fsi-da7210.c b/sound/soc/sh/fsi-da7210.c
index a6adb6e..e8df9da 100644
--- a/sound/soc/sh/fsi-da7210.c
+++ b/sound/soc/sh/fsi-da7210.c
@@ -18,7 +18,7 @@
 	struct snd_soc_dai *dai = rtd->codec_dai;
 
 	return snd_soc_dai_set_fmt(dai,
-				   SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				   SND_SOC_DAIFMT_I2S |
 				   SND_SOC_DAIFMT_CBM_CFM);
 }
 
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 4c2404b..2b06402 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -19,20 +19,26 @@
 #include <sound/soc.h>
 #include <sound/sh_fsi.h>
 
-#define DO_FMT		0x0000
-#define DOFF_CTL	0x0004
-#define DOFF_ST		0x0008
-#define DI_FMT		0x000C
-#define DIFF_CTL	0x0010
-#define DIFF_ST		0x0014
-#define CKG1		0x0018
-#define CKG2		0x001C
-#define DIDT		0x0020
-#define DODT		0x0024
-#define MUTE_ST		0x0028
-#define OUT_SEL		0x0030
-#define REG_END		OUT_SEL
+/* PortA/PortB register */
+#define REG_DO_FMT	0x0000
+#define REG_DOFF_CTL	0x0004
+#define REG_DOFF_ST	0x0008
+#define REG_DI_FMT	0x000C
+#define REG_DIFF_CTL	0x0010
+#define REG_DIFF_ST	0x0014
+#define REG_CKG1	0x0018
+#define REG_CKG2	0x001C
+#define REG_DIDT	0x0020
+#define REG_DODT	0x0024
+#define REG_MUTE_ST	0x0028
+#define REG_OUT_SEL	0x0030
 
+/* master register */
+#define MST_CLK_RST	0x0210
+#define MST_SOFT_RST	0x0214
+#define MST_FIFO_SZ	0x0218
+
+/* core register (depend on FSI version) */
 #define A_MST_CTLR	0x0180
 #define B_MST_CTLR	0x01A0
 #define CPU_INT_ST	0x01F4
@@ -41,22 +47,23 @@
 #define INT_ST		0x0200
 #define IEMSK		0x0204
 #define IMSK		0x0208
-#define MUTE		0x020C
-#define CLK_RST		0x0210
-#define SOFT_RST	0x0214
-#define FIFO_SZ		0x0218
-#define MREG_START	A_MST_CTLR
-#define MREG_END	FIFO_SZ
 
 /* DO_FMT */
 /* DI_FMT */
+#define CR_BWS_24	(0x0 << 20) /* FSI2 */
+#define CR_BWS_16	(0x1 << 20) /* FSI2 */
+#define CR_BWS_20	(0x2 << 20) /* FSI2 */
+
+#define CR_DTMD_PCM		(0x0 << 8) /* FSI2 */
+#define CR_DTMD_SPDIF_PCM	(0x1 << 8) /* FSI2 */
+#define CR_DTMD_SPDIF_STREAM	(0x2 << 8) /* FSI2 */
+
 #define CR_MONO		(0x0 << 4)
 #define CR_MONO_D	(0x1 << 4)
 #define CR_PCM		(0x2 << 4)
 #define CR_I2S		(0x3 << 4)
 #define CR_TDM		(0x4 << 4)
 #define CR_TDM_D	(0x5 << 4)
-#define CR_SPDIF	0x00100120
 
 /* DOFF_CTL */
 /* DIFF_CTL */
@@ -93,6 +100,10 @@
 #define IR		(1 <<  4) /* Interrupt Reset */
 #define FSISR		(1 <<  0) /* Software Reset */
 
+/* OUT_SEL (FSI2) */
+#define DMMD		(1 << 4) /* SPDIF output timing 0: Biphase only */
+				 /*			1: Biphase and serial */
+
 /* FIFO_SZ */
 #define FIFO_SZ_MASK	0x7
 
@@ -123,6 +134,9 @@
 	int buff_len;
 	int period_len;
 	int period_num;
+
+	int uerr_num;
+	int oerr_num;
 };
 
 struct fsi_priv {
@@ -133,8 +147,6 @@
 	struct fsi_stream capture;
 
 	long rate;
-
-	u32 mst_ctrl;
 };
 
 struct fsi_core {
@@ -143,6 +155,8 @@
 	u32 int_st;
 	u32 iemsk;
 	u32 imsk;
+	u32 a_mclk;
+	u32 b_mclk;
 };
 
 struct fsi_master {
@@ -182,62 +196,22 @@
 	__fsi_reg_write(reg, val);
 }
 
-static void fsi_reg_write(struct fsi_priv *fsi, u32 reg, u32 data)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
+#define fsi_reg_write(p, r, d)\
+	__fsi_reg_write((u32)(p->base + REG_##r), d)
 
-	__fsi_reg_write((u32)(fsi->base + reg), data);
-}
+#define fsi_reg_read(p, r)\
+	__fsi_reg_read((u32)(p->base + REG_##r))
 
-static u32 fsi_reg_read(struct fsi_priv *fsi, u32 reg)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return 0;
-	}
+#define fsi_reg_mask_set(p, r, m, d)\
+	__fsi_reg_mask_set((u32)(p->base + REG_##r), m, d)
 
-	return __fsi_reg_read((u32)(fsi->base + reg));
-}
-
-static void fsi_reg_mask_set(struct fsi_priv *fsi, u32 reg, u32 mask, u32 data)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
-	__fsi_reg_mask_set((u32)(fsi->base + reg), mask, data);
-}
-
-static void fsi_master_write(struct fsi_master *master, u32 reg, u32 data)
-{
-	unsigned long flags;
-
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
-	spin_lock_irqsave(&master->lock, flags);
-	__fsi_reg_write((u32)(master->base + reg), data);
-	spin_unlock_irqrestore(&master->lock, flags);
-}
-
-static u32 fsi_master_read(struct fsi_master *master, u32 reg)
+#define fsi_master_read(p, r) _fsi_master_read(p, MST_##r)
+#define fsi_core_read(p, r)   _fsi_master_read(p, p->core->r)
+static u32 _fsi_master_read(struct fsi_master *master, u32 reg)
 {
 	u32 ret;
 	unsigned long flags;
 
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return 0;
-	}
-
 	spin_lock_irqsave(&master->lock, flags);
 	ret = __fsi_reg_read((u32)(master->base + reg));
 	spin_unlock_irqrestore(&master->lock, flags);
@@ -245,17 +219,13 @@
 	return ret;
 }
 
-static void fsi_master_mask_set(struct fsi_master *master,
+#define fsi_master_mask_set(p, r, m, d) _fsi_master_mask_set(p, MST_##r, m, d)
+#define fsi_core_mask_set(p, r, m, d)  _fsi_master_mask_set(p, p->core->r, m, d)
+static void _fsi_master_mask_set(struct fsi_master *master,
 			       u32 reg, u32 mask, u32 data)
 {
 	unsigned long flags;
 
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
 	spin_lock_irqsave(&master->lock, flags);
 	__fsi_reg_mask_set((u32)(master->base + reg), mask, data);
 	spin_unlock_irqrestore(&master->lock, flags);
@@ -359,27 +329,41 @@
 	io->buff_offset	= 0;
 	io->period_len	= period_len;
 	io->period_num	= 0;
+	io->oerr_num	= -1; /* ignore 1st err */
+	io->uerr_num	= -1; /* ignore 1st err */
 }
 
 static void fsi_stream_pop(struct fsi_priv *fsi, int is_play)
 {
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
+	struct snd_soc_dai *dai = fsi_get_dai(io->substream);
+
+
+	if (io->oerr_num > 0)
+		dev_err(dai->dev, "over_run = %d\n", io->oerr_num);
+
+	if (io->uerr_num > 0)
+		dev_err(dai->dev, "under_run = %d\n", io->uerr_num);
 
 	io->substream	= NULL;
 	io->buff_len	= 0;
 	io->buff_offset	= 0;
 	io->period_len	= 0;
 	io->period_num	= 0;
+	io->oerr_num	= 0;
+	io->uerr_num	= 0;
 }
 
 static int fsi_get_fifo_data_num(struct fsi_priv *fsi, int is_play)
 {
 	u32 status;
-	u32 reg = is_play ? DOFF_ST : DIFF_ST;
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
 	int data_num;
 
-	status = fsi_reg_read(fsi, reg);
+	status = is_play ?
+		fsi_reg_read(fsi, DOFF_ST) :
+		fsi_reg_read(fsi, DIFF_ST);
+
 	data_num = 0x1ff & (status >> 8);
 	data_num *= io->chan_num;
 
@@ -406,6 +390,27 @@
 	return frames_to_bytes(runtime, 1) / io->chan_num;
 }
 
+static void fsi_count_fifo_err(struct fsi_priv *fsi)
+{
+	u32 ostatus = fsi_reg_read(fsi, DOFF_ST);
+	u32 istatus = fsi_reg_read(fsi, DIFF_ST);
+
+	if (ostatus & ERR_OVER)
+		fsi->playback.oerr_num++;
+
+	if (ostatus & ERR_UNDER)
+		fsi->playback.uerr_num++;
+
+	if (istatus & ERR_OVER)
+		fsi->capture.oerr_num++;
+
+	if (istatus & ERR_UNDER)
+		fsi->capture.uerr_num++;
+
+	fsi_reg_write(fsi, DOFF_ST, 0);
+	fsi_reg_write(fsi, DIFF_ST, 0);
+}
+
 /*
  *		dma function
  */
@@ -473,8 +478,8 @@
 	u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play));
 	struct fsi_master *master = fsi_get_master(fsi);
 
-	fsi_master_mask_set(master, master->core->imsk,  data, data);
-	fsi_master_mask_set(master, master->core->iemsk, data, data);
+	fsi_core_mask_set(master, imsk,  data, data);
+	fsi_core_mask_set(master, iemsk, data, data);
 }
 
 static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
@@ -482,18 +487,13 @@
 	u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play));
 	struct fsi_master *master = fsi_get_master(fsi);
 
-	fsi_master_mask_set(master, master->core->imsk,  data, 0);
-	fsi_master_mask_set(master, master->core->iemsk, data, 0);
+	fsi_core_mask_set(master, imsk,  data, 0);
+	fsi_core_mask_set(master, iemsk, data, 0);
 }
 
 static u32 fsi_irq_get_status(struct fsi_master *master)
 {
-	return fsi_master_read(master, master->core->int_st);
-}
-
-static void fsi_irq_clear_all_status(struct fsi_master *master)
-{
-	fsi_master_write(master, master->core->int_st, 0);
+	return fsi_core_read(master, int_st);
 }
 
 static void fsi_irq_clear_status(struct fsi_priv *fsi)
@@ -505,7 +505,7 @@
 	data |= AB_IO(1, fsi_get_port_shift(fsi, 1));
 
 	/* clear interrupt factor */
-	fsi_master_mask_set(master, master->core->int_st, data, 0);
+	fsi_core_mask_set(master, int_st, data, 0);
 }
 
 /*
@@ -516,17 +516,19 @@
 static void fsi_spdif_clk_ctrl(struct fsi_priv *fsi, int enable)
 {
 	struct fsi_master *master = fsi_get_master(fsi);
-	u32 val = BP | SE;
+	u32 mask, val;
 
 	if (master->core->ver < 2) {
 		pr_err("fsi: register access err (%s)\n", __func__);
 		return;
 	}
 
-	if (enable)
-		fsi_master_mask_set(master, fsi->mst_ctrl, val, val);
-	else
-		fsi_master_mask_set(master, fsi->mst_ctrl, val, 0);
+	mask = BP | SE;
+	val = enable ? mask : 0;
+
+	fsi_is_port_a(fsi) ?
+		fsi_core_mask_set(master, a_mclk, mask, val) :
+		fsi_core_mask_set(master, b_mclk, mask, val);
 }
 
 /*
@@ -550,7 +552,7 @@
 {
 	struct fsi_master *master = fsi_get_master(fsi);
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
-	u32 ctrl, shift, i;
+	u32 shift, i;
 
 	/* get on-chip RAM capacity */
 	shift = fsi_master_read(master, FIFO_SZ);
@@ -583,13 +585,17 @@
 	dev_dbg(dai->dev, "%d channel %d store\n",
 		io->chan_num, io->fifo_max_num);
 
-	ctrl = is_play ? DOFF_CTL : DIFF_CTL;
-
-	/* set interrupt generation factor */
-	fsi_reg_write(fsi, ctrl, IRQ_HALF);
-
-	/* clear FIFO */
-	fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR);
+	/*
+	 * set interrupt generation factor
+	 * clear FIFO
+	 */
+	if (is_play) {
+		fsi_reg_write(fsi,	DOFF_CTL, IRQ_HALF);
+		fsi_reg_mask_set(fsi,	DOFF_CTL, FIFO_CLR, FIFO_CLR);
+	} else {
+		fsi_reg_write(fsi,	DIFF_CTL, IRQ_HALF);
+		fsi_reg_mask_set(fsi,	DIFF_CTL, FIFO_CLR, FIFO_CLR);
+	}
 }
 
 static void fsi_soft_all_reset(struct fsi_master *master)
@@ -604,13 +610,12 @@
 	mdelay(10);
 }
 
-static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int startup, int stream)
+static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int stream)
 {
 	struct snd_pcm_runtime *runtime;
 	struct snd_pcm_substream *substream = NULL;
 	int is_play = fsi_stream_is_play(stream);
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
-	u32 status_reg = is_play ? DOFF_ST : DIFF_ST;
 	int data_residue_num;
 	int data_num;
 	int data_num_max;
@@ -698,35 +703,20 @@
 	/* update buff_offset */
 	io->buff_offset += fsi_num2offset(data_num, ch_width);
 
-	/* check fifo status */
-	if (!startup) {
-		struct snd_soc_dai *dai = fsi_get_dai(substream);
-		u32 status = fsi_reg_read(fsi, status_reg);
-
-		if (status & ERR_OVER)
-			dev_err(dai->dev, "over run\n");
-		if (status & ERR_UNDER)
-			dev_err(dai->dev, "under run\n");
-	}
-	fsi_reg_write(fsi, status_reg, 0);
-
-	/* re-enable irq */
-	fsi_irq_enable(fsi, is_play);
-
 	if (over_period)
 		snd_pcm_period_elapsed(substream);
 
 	return 0;
 }
 
-static int fsi_data_pop(struct fsi_priv *fsi, int startup)
+static int fsi_data_pop(struct fsi_priv *fsi)
 {
-	return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_CAPTURE);
+	return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_CAPTURE);
 }
 
-static int fsi_data_push(struct fsi_priv *fsi, int startup)
+static int fsi_data_push(struct fsi_priv *fsi)
 {
-	return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_PLAYBACK);
+	return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_PLAYBACK);
 }
 
 static irqreturn_t fsi_interrupt(int irq, void *data)
@@ -739,15 +729,19 @@
 	fsi_master_mask_set(master, SOFT_RST, IR, IR);
 
 	if (int_st & AB_IO(1, AO_SHIFT))
-		fsi_data_push(&master->fsia, 0);
+		fsi_data_push(&master->fsia);
 	if (int_st & AB_IO(1, BO_SHIFT))
-		fsi_data_push(&master->fsib, 0);
+		fsi_data_push(&master->fsib);
 	if (int_st & AB_IO(1, AI_SHIFT))
-		fsi_data_pop(&master->fsia, 0);
+		fsi_data_pop(&master->fsia);
 	if (int_st & AB_IO(1, BI_SHIFT))
-		fsi_data_pop(&master->fsib, 0);
+		fsi_data_pop(&master->fsib);
 
-	fsi_irq_clear_all_status(master);
+	fsi_count_fifo_err(&master->fsia);
+	fsi_count_fifo_err(&master->fsib);
+
+	fsi_irq_clear_status(&master->fsia);
+	fsi_irq_clear_status(&master->fsib);
 
 	return IRQ_HANDLED;
 }
@@ -764,7 +758,6 @@
 	struct fsi_stream *io;
 	u32 flags = fsi_get_info_flags(fsi);
 	u32 fmt;
-	u32 reg;
 	u32 data;
 	int is_play = fsi_is_play(substream);
 	int is_master;
@@ -796,7 +789,6 @@
 
 	/* do fmt, di fmt */
 	data = 0;
-	reg = is_play ? DO_FMT : DI_FMT;
 	fmt = is_play ? SH_FSI_GET_OFMT(flags) : SH_FSI_GET_IFMT(flags);
 	switch (fmt) {
 	case SH_FSI_FMT_MONO:
@@ -830,16 +822,18 @@
 			dev_err(dai->dev, "This FSI can not use SPDIF\n");
 			return -EINVAL;
 		}
-		data = CR_SPDIF;
+		data = CR_BWS_16 | CR_DTMD_SPDIF_PCM | CR_PCM;
 		io->chan_num = 2;
 		fsi_spdif_clk_ctrl(fsi, 1);
-		fsi_reg_mask_set(fsi, OUT_SEL, 0x0010, 0x0010);
+		fsi_reg_mask_set(fsi, OUT_SEL, DMMD, DMMD);
 		break;
 	default:
 		dev_err(dai->dev, "unknown format.\n");
 		return -EINVAL;
 	}
-	fsi_reg_write(fsi, reg, data);
+	is_play ?
+		fsi_reg_write(fsi, DO_FMT, data) :
+		fsi_reg_write(fsi, DI_FMT, data);
 
 	/* irq clear */
 	fsi_irq_disable(fsi, is_play);
@@ -883,7 +877,8 @@
 		fsi_stream_push(fsi, is_play, substream,
 				frames_to_bytes(runtime, runtime->buffer_size),
 				frames_to_bytes(runtime, runtime->period_size));
-		ret = is_play ? fsi_data_push(fsi, 1) : fsi_data_pop(fsi, 1);
+		ret = is_play ? fsi_data_push(fsi) : fsi_data_pop(fsi);
+		fsi_irq_enable(fsi, is_play);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		fsi_irq_disable(fsi, is_play);
@@ -1174,12 +1169,10 @@
 	/* FSI A setting */
 	master->fsia.base	= master->base;
 	master->fsia.master	= master;
-	master->fsia.mst_ctrl	= A_MST_CTLR;
 
 	/* FSI B setting */
 	master->fsib.base	= master->base + 0x40;
 	master->fsib.master	= master;
-	master->fsib.mst_ctrl	= B_MST_CTLR;
 
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_resume(&pdev->dev);
@@ -1266,6 +1259,8 @@
 	.int_st	= CPU_INT_ST,
 	.iemsk	= CPU_IEMSK,
 	.imsk	= CPU_IMSK,
+	.a_mclk	= A_MST_CTLR,
+	.b_mclk	= B_MST_CTLR,
 };
 
 static struct platform_device_id fsi_id_table[] = {
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index ac6c49c..6088a6a 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -8,11 +8,11 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/clkdev.h>
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
 
-#include <asm/clkdev.h>
 #include <asm/clock.h>
 
 #include <cpu/sh7722.h>
@@ -20,7 +20,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "../codecs/wm8978.h"
 #include "siu.h"
@@ -140,11 +139,12 @@
 static int migor_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, migor_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, migor_dapm_widgets,
 				  ARRAY_SIZE(migor_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/sh/sh7760-ac97.c b/sound/soc/sh/sh7760-ac97.c
index f8e0ab8..917d3ce 100644
--- a/sound/soc/sh/sh7760-ac97.c
+++ b/sound/soc/sh/sh7760-ac97.c
@@ -12,7 +12,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/io.h>
 
 #define IPSEL 0xFE400034
@@ -23,7 +22,7 @@
 
 static int machine_init(struct snd_soc_pcm_runtime *rtd)
 {
-	snd_soc_dapm_sync(rtd->codec);
+	snd_soc_dapm_sync(&rtd->codec->dapm);
 	return 0;
 }
 
diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h
index 9f4dcb9..83c3430 100644
--- a/sound/soc/sh/siu.h
+++ b/sound/soc/sh/siu.h
@@ -75,7 +75,7 @@
 
 #include <sound/core.h>
 #include <sound/pcm.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #define SIU_PERIOD_BYTES_MAX	8192		/* DMA transfer/period size */
 #define SIU_PERIOD_BYTES_MIN	256		/* DMA transfer/period size */
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index af53b64..4973c29 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -28,7 +28,7 @@
 #include <asm/siu.h>
 
 #include <sound/control.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #include "siu.h"
 
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index ed29c9e..a423bab 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -29,7 +29,7 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #include <asm/siu.h>
 
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index d214f02..8c2a21a 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -14,27 +14,34 @@
 #include <linux/i2c.h>
 #include <linux/spi/spi.h>
 #include <sound/soc.h>
+#include <linux/lzo.h>
+#include <linux/bitmap.h>
+#include <linux/rbtree.h>
 
 static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 		snd_soc_codec_volatile_register(codec, reg)) {
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[2];
 	int ret;
 
@@ -42,16 +49,17 @@
 	data[1] = value & 0x00ff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 2);
 	if (ret == 2)
 		return 0;
@@ -77,7 +85,7 @@
 	msg[1] = data[0];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -94,23 +102,27 @@
 static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 		snd_soc_codec_volatile_register(codec, reg)) {
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[2];
 	int ret;
 
@@ -118,16 +130,17 @@
 	data[1] = value & 0x00ff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 2);
 	if (ret == 2)
 		return 0;
@@ -153,7 +166,7 @@
 	msg[1] = data[1];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -170,24 +183,25 @@
 static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u8 *cache = codec->reg_cache;
 	u8 data[2];
+	int ret;
 
 	reg &= 0xff;
 	data[0] = reg;
 	data[1] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	if (codec->hw_write(codec->control_data, data, 2) == 2)
 		return 0;
 	else
@@ -197,7 +211,8 @@
 static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u8 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	reg &= 0xff;
 	if (reg >= codec->driver->reg_cache_size ||
@@ -205,10 +220,14 @@
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 #if defined(CONFIG_SPI_MASTER)
@@ -227,7 +246,7 @@
 	msg[1] = data[1];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -244,24 +263,25 @@
 static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
 			      unsigned int value)
 {
-	u16 *reg_cache = codec->reg_cache;
 	u8 data[3];
+	int ret;
 
 	data[0] = reg;
 	data[1] = (value >> 8) & 0xff;
 	data[2] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-	    reg < codec->driver->reg_cache_size)
-		reg_cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	if (codec->hw_write(codec->control_data, data, 3) == 3)
 		return 0;
 	else
@@ -271,17 +291,22 @@
 static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
 				      unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 	    snd_soc_codec_volatile_register(codec, reg)) {
 		if (codec->cache_only)
 			return -1;
 
+		BUG_ON(!codec->hw_read);
 		return codec->hw_read(codec, reg);
-	} else {
-		return cache[reg];
 	}
+
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 #if defined(CONFIG_SPI_MASTER)
@@ -301,7 +326,7 @@
 	msg[2] = data[2];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -420,7 +445,8 @@
 static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u8 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	reg &= 0xff;
 	if (reg >= codec->driver->reg_cache_size ||
@@ -428,16 +454,19 @@
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u8 *cache = codec->reg_cache;
 	u8 data[3];
 	int ret;
 
@@ -447,16 +476,17 @@
 
 	reg &= 0xff;
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 3);
 	if (ret == 3)
 		return 0;
@@ -483,7 +513,7 @@
 	msg[2] = data[2];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -534,23 +564,28 @@
 static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
 				       unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 	    snd_soc_codec_volatile_register(codec, reg)) {
 		if (codec->cache_only)
 			return -1;
 
+		BUG_ON(!codec->hw_read);
 		return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+
+	return val;
 }
 
 static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
 			       unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[4];
 	int ret;
 
@@ -560,16 +595,17 @@
 	data[3] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 4);
 	if (ret == 4)
 		return 0;
@@ -597,7 +633,7 @@
 	msg[3] = data[3];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -692,8 +728,8 @@
 		return -EINVAL;
 	}
 
-	codec->driver->write = io_types[i].write;
-	codec->driver->read = io_types[i].read;
+	codec->write = io_types[i].write;
+	codec->read = io_types[i].read;
 
 	switch (control) {
 	case SND_SOC_CUSTOM:
@@ -724,3 +760,930 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
+
+struct snd_soc_rbtree_node {
+	struct rb_node node;
+	unsigned int reg;
+	unsigned int value;
+	unsigned int defval;
+} __attribute__ ((packed));
+
+struct snd_soc_rbtree_ctx {
+	struct rb_root root;
+};
+
+static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
+	struct rb_root *root, unsigned int reg)
+{
+	struct rb_node *node;
+	struct snd_soc_rbtree_node *rbnode;
+
+	node = root->rb_node;
+	while (node) {
+		rbnode = container_of(node, struct snd_soc_rbtree_node, node);
+		if (rbnode->reg < reg)
+			node = node->rb_left;
+		else if (rbnode->reg > reg)
+			node = node->rb_right;
+		else
+			return rbnode;
+	}
+
+	return NULL;
+}
+
+static int snd_soc_rbtree_insert(struct rb_root *root,
+				 struct snd_soc_rbtree_node *rbnode)
+{
+	struct rb_node **new, *parent;
+	struct snd_soc_rbtree_node *rbnode_tmp;
+
+	parent = NULL;
+	new = &root->rb_node;
+	while (*new) {
+		rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
+					  node);
+		parent = *new;
+		if (rbnode_tmp->reg < rbnode->reg)
+			new = &((*new)->rb_left);
+		else if (rbnode_tmp->reg > rbnode->reg)
+			new = &((*new)->rb_right);
+		else
+			return 0;
+	}
+
+	/* insert the node into the rbtree */
+	rb_link_node(&rbnode->node, parent, new);
+	rb_insert_color(&rbnode->node, root);
+
+	return 1;
+}
+
+static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct rb_node *node;
+	struct snd_soc_rbtree_node *rbnode;
+	unsigned int val;
+	int ret;
+
+	rbtree_ctx = codec->reg_cache;
+	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+		rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
+		if (rbnode->value == rbnode->defval)
+			continue;
+		ret = snd_soc_cache_read(codec, rbnode->reg, &val);
+		if (ret)
+			return ret;
+		ret = snd_soc_write(codec, rbnode->reg, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			rbnode->reg, val);
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
+				      unsigned int reg, unsigned int value)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbnode;
+
+	rbtree_ctx = codec->reg_cache;
+	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
+	if (rbnode) {
+		if (rbnode->value == value)
+			return 0;
+		rbnode->value = value;
+	} else {
+		/* bail out early, no need to create the rbnode yet */
+		if (!value)
+			return 0;
+		/*
+		 * for uninitialized registers whose value is changed
+		 * from the default zero, create an rbnode and insert
+		 * it into the tree.
+		 */
+		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
+		if (!rbnode)
+			return -ENOMEM;
+		rbnode->reg = reg;
+		rbnode->value = value;
+		snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
+				     unsigned int reg, unsigned int *value)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbnode;
+
+	rbtree_ctx = codec->reg_cache;
+	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
+	if (rbnode) {
+		*value = rbnode->value;
+	} else {
+		/* uninitialized registers default to 0 */
+		*value = 0;
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
+{
+	struct rb_node *next;
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbtree_node;
+
+	/* if we've already been called then just return */
+	rbtree_ctx = codec->reg_cache;
+	if (!rbtree_ctx)
+		return 0;
+
+	/* free up the rbtree */
+	next = rb_first(&rbtree_ctx->root);
+	while (next) {
+		rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
+		next = rb_next(&rbtree_node->node);
+		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+		kfree(rbtree_node);
+	}
+
+	/* release the resources */
+	kfree(codec->reg_cache);
+	codec->reg_cache = NULL;
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+
+	codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+	if (!codec->reg_cache)
+		return -ENOMEM;
+
+	rbtree_ctx = codec->reg_cache;
+	rbtree_ctx->root = RB_ROOT;
+
+	if (!codec->reg_def_copy)
+		return 0;
+
+/*
+ * populate the rbtree with the initialized registers.  All other
+ * registers will be inserted into the tree when they are first written.
+ *
+ * The reasoning behind this, is that we need to step through and
+ * dereference the cache in u8/u16 increments without sacrificing
+ * portability.  This could also be done using memcpy() but that would
+ * be slightly more cryptic.
+ */
+#define snd_soc_rbtree_populate(cache)					\
+({									\
+	int ret, i;							\
+	struct snd_soc_rbtree_node *rbtree_node;			\
+									\
+	ret = 0;							\
+	cache = codec->reg_def_copy;					\
+	for (i = 0; i < codec->driver->reg_cache_size; ++i) {		\
+		if (!cache[i])						\
+			continue;					\
+		rbtree_node = kzalloc(sizeof *rbtree_node, GFP_KERNEL);	\
+		if (!rbtree_node) {					\
+			ret = -ENOMEM;					\
+			snd_soc_cache_exit(codec);			\
+			break;						\
+		}							\
+		rbtree_node->reg = i;					\
+		rbtree_node->value = cache[i];				\
+		rbtree_node->defval = cache[i];				\
+		snd_soc_rbtree_insert(&rbtree_ctx->root,		\
+				      rbtree_node);			\
+	}								\
+	ret;								\
+})
+
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		const u8 *cache;
+
+		return snd_soc_rbtree_populate(cache);
+	}
+	case 2: {
+		const u16 *cache;
+
+		return snd_soc_rbtree_populate(cache);
+	}
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_SND_SOC_CACHE_LZO
+struct snd_soc_lzo_ctx {
+	void *wmem;
+	void *dst;
+	const void *src;
+	size_t src_len;
+	size_t dst_len;
+	size_t decompressed_size;
+	unsigned long *sync_bmp;
+	int sync_bmp_nbits;
+};
+
+#define LZO_BLOCK_NUM 8
+static int snd_soc_lzo_block_count(void)
+{
+	return LZO_BLOCK_NUM;
+}
+
+static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+	if (!lzo_ctx->wmem)
+		return -ENOMEM;
+	return 0;
+}
+
+static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	size_t compress_size;
+	int ret;
+
+	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
+			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
+	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
+		return -EINVAL;
+	lzo_ctx->dst_len = compress_size;
+	return 0;
+}
+
+static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	size_t dst_len;
+	int ret;
+
+	dst_len = lzo_ctx->dst_len;
+	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
+				    lzo_ctx->dst, &dst_len);
+	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
+		return -EINVAL;
+	return 0;
+}
+
+static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
+		struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = snd_soc_lzo_compress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
+		struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = snd_soc_lzo_decompress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
+		unsigned int reg)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return (reg * codec_drv->reg_word_size) /
+	       DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+}
+
+static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
+		unsigned int reg)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return reg % (DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()) /
+		      codec_drv->reg_word_size);
+}
+
+static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+}
+
+static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	unsigned int val;
+	int i;
+	int ret;
+
+	lzo_blocks = codec->reg_cache;
+	for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
+		ret = snd_soc_cache_read(codec, i, &val);
+		if (ret)
+			return ret;
+		ret = snd_soc_write(codec, i, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			i, val);
+	}
+
+	return 0;
+}
+
+static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
+				   unsigned int reg, unsigned int value)
+{
+	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	/* index of the compressed lzo block */
+	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
+	/* register index within the decompressed block */
+	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
+	/* size of the compressed block */
+	blksize = snd_soc_lzo_get_blksize(codec);
+	lzo_blocks = codec->reg_cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		goto out;
+	}
+
+	/* write the new value to the cache */
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+		cache = lzo_block->dst;
+		if (cache[blkpos] == value) {
+			kfree(lzo_block->dst);
+			goto out;
+		}
+		cache[blkpos] = value;
+	}
+	break;
+	case 2: {
+		u16 *cache;
+		cache = lzo_block->dst;
+		if (cache[blkpos] == value) {
+			kfree(lzo_block->dst);
+			goto out;
+		}
+		cache[blkpos] = value;
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	/* prepare the source to be the decompressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* compress the block */
+	ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		kfree(lzo_block->src);
+		goto out;
+	}
+
+	/* set the bit so we know we have to sync this register */
+	set_bit(reg, lzo_block->sync_bmp);
+	kfree(tmp_dst);
+	kfree(lzo_block->src);
+	return 0;
+out:
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return ret;
+}
+
+static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
+				  unsigned int reg, unsigned int *value)
+{
+	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	*value = 0;
+	/* index of the compressed lzo block */
+	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
+	/* register index within the decompressed block */
+	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
+	/* size of the compressed block */
+	blksize = snd_soc_lzo_get_blksize(codec);
+	lzo_blocks = codec->reg_cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
+	if (ret >= 0) {
+		/* fetch the value from the cache */
+		switch (codec->driver->reg_word_size) {
+		case 1: {
+			u8 *cache;
+			cache = lzo_block->dst;
+			*value = cache[blkpos];
+		}
+		break;
+		case 2: {
+			u16 *cache;
+			cache = lzo_block->dst;
+			*value = cache[blkpos];
+		}
+		break;
+		default:
+			BUG();
+		}
+	}
+
+	kfree(lzo_block->dst);
+	/* restore the pointer and length of the compressed block */
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return 0;
+}
+
+static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	int i, blkcount;
+
+	lzo_blocks = codec->reg_cache;
+	if (!lzo_blocks)
+		return 0;
+
+	blkcount = snd_soc_lzo_block_count();
+	/*
+	 * the pointer to the bitmap used for syncing the cache
+	 * is shared amongst all lzo_blocks.  Ensure it is freed
+	 * only once.
+	 */
+	if (lzo_blocks[0])
+		kfree(lzo_blocks[0]->sync_bmp);
+	for (i = 0; i < blkcount; ++i) {
+		if (lzo_blocks[i]) {
+			kfree(lzo_blocks[i]->wmem);
+			kfree(lzo_blocks[i]->dst);
+		}
+		/* each lzo_block is a pointer returned by kmalloc or NULL */
+		kfree(lzo_blocks[i]);
+	}
+	kfree(lzo_blocks);
+	codec->reg_cache = NULL;
+	return 0;
+}
+
+static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	size_t reg_size, bmp_size;
+	const struct snd_soc_codec_driver *codec_drv;
+	int ret, tofree, i, blksize, blkcount;
+	const char *p, *end;
+	unsigned long *sync_bmp;
+
+	ret = 0;
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+
+	/*
+	 * If we have not been given a default register cache
+	 * then allocate a dummy zero-ed out region, compress it
+	 * and remember to free it afterwards.
+	 */
+	tofree = 0;
+	if (!codec->reg_def_copy)
+		tofree = 1;
+
+	if (!codec->reg_def_copy) {
+		codec->reg_def_copy = kzalloc(reg_size,
+						       GFP_KERNEL);
+		if (!codec->reg_def_copy)
+			return -ENOMEM;
+	}
+
+	blkcount = snd_soc_lzo_block_count();
+	codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
+				   GFP_KERNEL);
+	if (!codec->reg_cache) {
+		ret = -ENOMEM;
+		goto err_tofree;
+	}
+	lzo_blocks = codec->reg_cache;
+
+	/*
+	 * allocate a bitmap to be used when syncing the cache with
+	 * the hardware.  Each time a register is modified, the corresponding
+	 * bit is set in the bitmap, so we know that we have to sync
+	 * that register.
+	 */
+	bmp_size = codec_drv->reg_cache_size;
+	sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
+			   GFP_KERNEL);
+	if (!sync_bmp) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	bitmap_zero(sync_bmp, bmp_size);
+
+	/* allocate the lzo blocks and initialize them */
+	for (i = 0; i < blkcount; ++i) {
+		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
+					GFP_KERNEL);
+		if (!lzo_blocks[i]) {
+			kfree(sync_bmp);
+			ret = -ENOMEM;
+			goto err;
+		}
+		lzo_blocks[i]->sync_bmp = sync_bmp;
+		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
+		/* alloc the working space for the compressed block */
+		ret = snd_soc_lzo_prepare(lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	blksize = snd_soc_lzo_get_blksize(codec);
+	p = codec->reg_def_copy;
+	end = codec->reg_def_copy + reg_size;
+	/* compress the register map and fill the lzo blocks */
+	for (i = 0; i < blkcount; ++i, p += blksize) {
+		lzo_blocks[i]->src = p;
+		if (p + blksize > end)
+			lzo_blocks[i]->src_len = end - p;
+		else
+			lzo_blocks[i]->src_len = blksize;
+		ret = snd_soc_lzo_compress_cache_block(codec,
+						       lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+		lzo_blocks[i]->decompressed_size =
+			lzo_blocks[i]->src_len;
+	}
+
+	if (tofree) {
+		kfree(codec->reg_def_copy);
+		codec->reg_def_copy = NULL;
+	}
+	return 0;
+err:
+	snd_soc_cache_exit(codec);
+err_tofree:
+	if (tofree) {
+		kfree(codec->reg_def_copy);
+		codec->reg_def_copy = NULL;
+	}
+	return ret;
+}
+#endif
+
+static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
+{
+	int i;
+	int ret;
+	const struct snd_soc_codec_driver *codec_drv;
+	unsigned int val;
+
+	codec_drv = codec->driver;
+	for (i = 0; i < codec_drv->reg_cache_size; ++i) {
+		ret = snd_soc_cache_read(codec, i, &val);
+		if (ret)
+			return ret;
+		if (codec_drv->reg_cache_default) {
+			switch (codec_drv->reg_word_size) {
+			case 1: {
+				const u8 *cache;
+
+				cache = codec_drv->reg_cache_default;
+				if (cache[i] == val)
+					continue;
+			}
+			break;
+			case 2: {
+				const u16 *cache;
+
+				cache = codec_drv->reg_cache_default;
+				if (cache[i] == val)
+					continue;
+			}
+			break;
+			default:
+				BUG();
+			}
+		}
+		ret = snd_soc_write(codec, i, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			i, val);
+	}
+	return 0;
+}
+
+static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
+				    unsigned int reg, unsigned int value)
+{
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+
+		cache = codec->reg_cache;
+		cache[reg] = value;
+	}
+	break;
+	case 2: {
+		u16 *cache;
+
+		cache = codec->reg_cache;
+		cache[reg] = value;
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
+				   unsigned int reg, unsigned int *value)
+{
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+
+		cache = codec->reg_cache;
+		*value = cache[reg];
+	}
+	break;
+	case 2: {
+		u16 *cache;
+
+		cache = codec->reg_cache;
+		*value = cache[reg];
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
+{
+	if (!codec->reg_cache)
+		return 0;
+	kfree(codec->reg_cache);
+	codec->reg_cache = NULL;
+	return 0;
+}
+
+static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+
+	/*
+	 * for flat compression, we don't need to keep a copy of the
+	 * original defaults register cache as it will definitely not
+	 * be marked as __devinitconst
+	 */
+	kfree(codec->reg_def_copy);
+	codec->reg_def_copy = NULL;
+
+	if (codec_drv->reg_cache_default)
+		codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
+					   reg_size, GFP_KERNEL);
+	else
+		codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
+	if (!codec->reg_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* an array of all supported compression types */
+static const struct snd_soc_cache_ops cache_types[] = {
+	/* Flat *must* be the first entry for fallback */
+	{
+		.id = SND_SOC_FLAT_COMPRESSION,
+		.name = "flat",
+		.init = snd_soc_flat_cache_init,
+		.exit = snd_soc_flat_cache_exit,
+		.read = snd_soc_flat_cache_read,
+		.write = snd_soc_flat_cache_write,
+		.sync = snd_soc_flat_cache_sync
+	},
+#ifdef CONFIG_SND_SOC_CACHE_LZO
+	{
+		.id = SND_SOC_LZO_COMPRESSION,
+		.name = "LZO",
+		.init = snd_soc_lzo_cache_init,
+		.exit = snd_soc_lzo_cache_exit,
+		.read = snd_soc_lzo_cache_read,
+		.write = snd_soc_lzo_cache_write,
+		.sync = snd_soc_lzo_cache_sync
+	},
+#endif
+	{
+		.id = SND_SOC_RBTREE_COMPRESSION,
+		.name = "rbtree",
+		.init = snd_soc_rbtree_cache_init,
+		.exit = snd_soc_rbtree_cache_exit,
+		.read = snd_soc_rbtree_cache_read,
+		.write = snd_soc_rbtree_cache_write,
+		.sync = snd_soc_rbtree_cache_sync
+	}
+};
+
+int snd_soc_cache_init(struct snd_soc_codec *codec)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
+		if (cache_types[i].id == codec->compress_type)
+			break;
+
+	/* Fall back to flat compression */
+	if (i == ARRAY_SIZE(cache_types)) {
+		dev_warn(codec->dev, "Could not match compress type: %d\n",
+			 codec->compress_type);
+		i = 0;
+	}
+
+	mutex_init(&codec->cache_rw_mutex);
+	codec->cache_ops = &cache_types[i];
+
+	if (codec->cache_ops->init) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		return codec->cache_ops->init(codec);
+	}
+	return -EINVAL;
+}
+
+/*
+ * NOTE: keep in mind that this function might be called
+ * multiple times.
+ */
+int snd_soc_cache_exit(struct snd_soc_codec *codec)
+{
+	if (codec->cache_ops && codec->cache_ops->exit) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		return codec->cache_ops->exit(codec);
+	}
+	return -EINVAL;
+}
+
+/**
+ * snd_soc_cache_read: Fetch the value of a given register from the cache.
+ *
+ * @codec: CODEC to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ */
+int snd_soc_cache_read(struct snd_soc_codec *codec,
+		       unsigned int reg, unsigned int *value)
+{
+	int ret;
+
+	mutex_lock(&codec->cache_rw_mutex);
+
+	if (value && codec->cache_ops && codec->cache_ops->read) {
+		ret = codec->cache_ops->read(codec, reg, value);
+		mutex_unlock(&codec->cache_rw_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&codec->cache_rw_mutex);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_read);
+
+/**
+ * snd_soc_cache_write: Set the value of a given register in the cache.
+ *
+ * @codec: CODEC to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ */
+int snd_soc_cache_write(struct snd_soc_codec *codec,
+			unsigned int reg, unsigned int value)
+{
+	int ret;
+
+	mutex_lock(&codec->cache_rw_mutex);
+
+	if (codec->cache_ops && codec->cache_ops->write) {
+		ret = codec->cache_ops->write(codec, reg, value);
+		mutex_unlock(&codec->cache_rw_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&codec->cache_rw_mutex);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_write);
+
+/**
+ * snd_soc_cache_sync: Sync the register cache with the hardware.
+ *
+ * @codec: CODEC to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile.  In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ */
+int snd_soc_cache_sync(struct snd_soc_codec *codec)
+{
+	int ret;
+
+	if (!codec->cache_sync) {
+		return 0;
+	}
+
+	if (codec->cache_ops && codec->cache_ops->sync) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		ret = codec->cache_ops->sync(codec);
+		if (!ret)
+			codec->cache_sync = 0;
+		return ret;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 85b7d54..bac7291 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -33,12 +33,15 @@
 #include <linux/slab.h>
 #include <sound/ac97_codec.h>
 #include <sound/core.h>
+#include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/asoc.h>
+
 #define NAME_SIZE	32
 
 static DEFINE_MUTEX(pcm_mutex);
@@ -67,25 +70,6 @@
 module_param(pmdown_time, int, 0);
 MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
 
-/*
- * This function forces any delayed work to be queued and run.
- */
-static int run_delayed_work(struct delayed_work *dwork)
-{
-	int ret;
-
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(dwork);
-
-	/* if there was any work waiting then we run it now and
-	 * wait for it's completion */
-	if (ret) {
-		schedule_delayed_work(dwork, 0);
-		flush_scheduled_work();
-	}
-	return ret;
-}
-
 /* codec register dump */
 static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
 {
@@ -114,7 +98,7 @@
 			 * the register being volatile and the device being
 			 * powered off.
 			 */
-			ret = codec->driver->read(codec, i);
+			ret = snd_soc_read(codec, i);
 			if (ret >= 0)
 				count += snprintf(buf + count,
 						  PAGE_SIZE - count,
@@ -225,7 +209,7 @@
 		start++;
 	if (strict_strtoul(start, 16, &value))
 		return -EINVAL;
-	codec->driver->write(codec, reg, value);
+	snd_soc_write(codec, reg, value);
 	return buf_size;
 }
 
@@ -238,8 +222,10 @@
 
 static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
 {
-	codec->debugfs_codec_root = debugfs_create_dir(codec->name ,
-						       debugfs_root);
+	struct dentry *debugfs_card_root = codec->card->debugfs_card_root;
+
+	codec->debugfs_codec_root = debugfs_create_dir(codec->name,
+						       debugfs_card_root);
 	if (!codec->debugfs_codec_root) {
 		printk(KERN_WARNING
 		       "ASoC: Failed to create codec debugfs directory\n");
@@ -253,20 +239,13 @@
 		printk(KERN_WARNING
 		       "ASoC: Failed to create codec register debugfs file\n");
 
-	codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
-						     codec->debugfs_codec_root,
-						     &codec->pop_time);
-	if (!codec->debugfs_pop_time)
-		printk(KERN_WARNING
-		       "Failed to create pop time debugfs file\n");
-
-	codec->debugfs_dapm = debugfs_create_dir("dapm",
+	codec->dapm.debugfs_dapm = debugfs_create_dir("dapm",
 						 codec->debugfs_codec_root);
-	if (!codec->debugfs_dapm)
+	if (!codec->dapm.debugfs_dapm)
 		printk(KERN_WARNING
 		       "Failed to create DAPM debugfs directory\n");
 
-	snd_soc_dapm_debugfs_init(codec);
+	snd_soc_dapm_debugfs_init(&codec->dapm);
 }
 
 static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
@@ -374,6 +353,29 @@
 	.llseek = default_llseek,/* read accesses f_pos */
 };
 
+static void soc_init_card_debugfs(struct snd_soc_card *card)
+{
+	card->debugfs_card_root = debugfs_create_dir(card->name,
+						     debugfs_root);
+	if (!card->debugfs_card_root) {
+		dev_warn(card->dev,
+			 "ASoC: Failed to create codec debugfs directory\n");
+		return;
+	}
+
+	card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
+						    card->debugfs_card_root,
+						    &card->pop_time);
+	if (!card->debugfs_pop_time)
+		dev_warn(card->dev,
+		       "Failed to create pop time debugfs file\n");
+}
+
+static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
+{
+	debugfs_remove_recursive(card->debugfs_card_root);
+}
+
 #else
 
 static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec)
@@ -383,6 +385,14 @@
 static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
 {
 }
+
+static inline void soc_init_card_debugfs(struct snd_soc_card *card)
+{
+}
+
+static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card)
+{
+}
 #endif
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
@@ -497,7 +507,7 @@
 		}
 	}
 
-	/* Check that the codec and cpu DAI's are compatible */
+	/* Check that the codec and cpu DAIs are compatible */
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		runtime->hw.rate_min =
 			max(codec_dai_drv->playback.rate_min,
@@ -846,7 +856,7 @@
 }
 
 /*
- * Free's resources allocated by hw_params, can be called multiple times
+ * Frees resources allocated by hw_params, can be called multiple times
  */
 static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
 {
@@ -870,7 +880,7 @@
 	if (platform->driver->ops->hw_free)
 		platform->driver->ops->hw_free(substream);
 
-	/* now free hw params for the DAI's  */
+	/* now free hw params for the DAIs  */
 	if (codec_dai->driver->ops->hw_free)
 		codec_dai->driver->ops->hw_free(substream, codec_dai);
 
@@ -958,6 +968,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct snd_soc_codec *codec;
 	int i;
 
 	/* If the initialization of this soc device failed, there is no codec
@@ -976,7 +987,7 @@
 	/* we're going to block userspace touching us until resume completes */
 	snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot);
 
-	/* mute any active DAC's */
+	/* mute any active DACs */
 	for (i = 0; i < card->num_rtd; i++) {
 		struct snd_soc_dai *dai = card->rtd[i].codec_dai;
 		struct snd_soc_dai_driver *drv = dai->driver;
@@ -1016,8 +1027,8 @@
 
 	/* close any waiting streams and save state */
 	for (i = 0; i < card->num_rtd; i++) {
-		run_delayed_work(&card->rtd[i].delayed_work);
-		card->rtd[i].codec->suspend_bias_level = card->rtd[i].codec->bias_level;
+		flush_delayed_work_sync(&card->rtd[i].delayed_work);
+		card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
 	}
 
 	for (i = 0; i < card->num_rtd; i++) {
@@ -1036,12 +1047,11 @@
 	}
 
 	/* suspend all CODECs */
-	for (i = 0; i < card->num_rtd; i++) {
-		struct snd_soc_codec *codec = card->rtd[i].codec;
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
 		/* If there are paths active then the CODEC will be held with
 		 * bias _ON and should not be suspended. */
 		if (!codec->suspended && codec->driver->suspend) {
-			switch (codec->bias_level) {
+			switch (codec->dapm.bias_level) {
 			case SND_SOC_BIAS_STANDBY:
 			case SND_SOC_BIAS_OFF:
 				codec->driver->suspend(codec, PMSG_SUSPEND);
@@ -1078,6 +1088,7 @@
 	struct snd_soc_card *card =
 			container_of(work, struct snd_soc_card, deferred_resume_work);
 	struct platform_device *pdev = to_platform_device(card->dev);
+	struct snd_soc_codec *codec;
 	int i;
 
 	/* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
@@ -1103,14 +1114,13 @@
 			cpu_dai->driver->resume(cpu_dai);
 	}
 
-	for (i = 0; i < card->num_rtd; i++) {
-		struct snd_soc_codec *codec = card->rtd[i].codec;
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
 		/* If the CODEC was idle over suspend then it will have been
 		 * left with bias OFF or STANDBY and suspended so we must now
 		 * resume.  Otherwise the suspend was suppressed.
 		 */
 		if (codec->driver->resume && codec->suspended) {
-			switch (codec->bias_level) {
+			switch (codec->dapm.bias_level) {
 			case SND_SOC_BIAS_STANDBY:
 			case SND_SOC_BIAS_OFF:
 				codec->driver->resume(codec);
@@ -1249,9 +1259,6 @@
 		if (!strcmp(codec->name, dai_link->codec_name)) {
 			rtd->codec = codec;
 
-			if (!try_module_get(codec->dev->driver->owner))
-				return -ENODEV;
-
 			/* CODEC found, so find CODEC DAI from registered DAIs from this CODEC*/
 			list_for_each_entry(codec_dai, &dai_list, list) {
 				if (codec->dev == codec_dai->dev &&
@@ -1277,10 +1284,6 @@
 	/* no, then find CPU DAI from registered DAIs*/
 	list_for_each_entry(platform, &platform_list, list) {
 		if (!strcmp(platform->name, dai_link->platform_name)) {
-
-			if (!try_module_get(platform->dev->driver->owner))
-				return -ENODEV;
-
 			rtd->platform = platform;
 			goto out;
 		}
@@ -1299,6 +1302,27 @@
 	return 1;
 }
 
+static void soc_remove_codec(struct snd_soc_codec *codec)
+{
+	int err;
+
+	if (codec->driver->remove) {
+		err = codec->driver->remove(codec);
+		if (err < 0)
+			dev_err(codec->dev,
+				"asoc: failed to remove %s: %d\n",
+				codec->name, err);
+	}
+
+	/* Make sure all DAPM widgets are freed */
+	snd_soc_dapm_free(&codec->dapm);
+
+	soc_cleanup_codec_debugfs(codec);
+	codec->probed = 0;
+	list_del(&codec->card_list);
+	module_put(codec->dev->driver->owner);
+}
+
 static void soc_remove_dai_link(struct snd_soc_card *card, int num)
 {
 	struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
@@ -1310,6 +1334,7 @@
 	/* unregister the rtd device */
 	if (rtd->dev_registered) {
 		device_remove_file(&rtd->dev, &dev_attr_pmdown_time);
+		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
 		device_unregister(&rtd->dev);
 		rtd->dev_registered = 0;
 	}
@@ -1338,22 +1363,8 @@
 	}
 
 	/* remove the CODEC */
-	if (codec && codec->probed) {
-		if (codec->driver->remove) {
-			err = codec->driver->remove(codec);
-			if (err < 0)
-				printk(KERN_ERR "asoc: failed to remove %s\n", codec->name);
-		}
-
-		/* Make sure all DAPM widgets are freed */
-		snd_soc_dapm_free(codec);
-
-		soc_cleanup_codec_debugfs(codec);
-		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
-		codec->probed = 0;
-		list_del(&codec->card_list);
-		module_put(codec->dev->driver->owner);
-	}
+	if (codec && codec->probed)
+		soc_remove_codec(codec);
 
 	/* remove the cpu_dai */
 	if (cpu_dai && cpu_dai->probed) {
@@ -1368,8 +1379,126 @@
 	}
 }
 
+static void soc_set_name_prefix(struct snd_soc_card *card,
+				struct snd_soc_codec *codec)
+{
+	int i;
+
+	if (card->codec_conf == NULL)
+		return;
+
+	for (i = 0; i < card->num_configs; i++) {
+		struct snd_soc_codec_conf *map = &card->codec_conf[i];
+		if (map->dev_name && !strcmp(codec->name, map->dev_name)) {
+			codec->name_prefix = map->name_prefix;
+			break;
+		}
+	}
+}
+
+static int soc_probe_codec(struct snd_soc_card *card,
+			   struct snd_soc_codec *codec)
+{
+	int ret = 0;
+
+	codec->card = card;
+	codec->dapm.card = card;
+	soc_set_name_prefix(card, codec);
+
+	if (codec->driver->probe) {
+		ret = codec->driver->probe(codec);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"asoc: failed to probe CODEC %s: %d\n",
+				codec->name, ret);
+			return ret;
+		}
+	}
+
+	soc_init_codec_debugfs(codec);
+
+	/* mark codec as probed and add to card codec list */
+	if (!try_module_get(codec->dev->driver->owner))
+		return -ENODEV;
+
+	codec->probed = 1;
+	list_add(&codec->card_list, &card->codec_dev_list);
+	list_add(&codec->dapm.list, &card->dapm_list);
+
+	return ret;
+}
+
 static void rtd_release(struct device *dev) {}
 
+static int soc_post_component_init(struct snd_soc_card *card,
+				   struct snd_soc_codec *codec,
+				   int num, int dailess)
+{
+	struct snd_soc_dai_link *dai_link = NULL;
+	struct snd_soc_aux_dev *aux_dev = NULL;
+	struct snd_soc_pcm_runtime *rtd;
+	const char *temp, *name;
+	int ret = 0;
+
+	if (!dailess) {
+		dai_link = &card->dai_link[num];
+		rtd = &card->rtd[num];
+		name = dai_link->name;
+	} else {
+		aux_dev = &card->aux_dev[num];
+		rtd = &card->rtd_aux[num];
+		name = aux_dev->name;
+	}
+
+	/* machine controls, routes and widgets are not prefixed */
+	temp = codec->name_prefix;
+	codec->name_prefix = NULL;
+
+	/* do machine specific initialization */
+	if (!dailess && dai_link->init)
+		ret = dai_link->init(rtd);
+	else if (dailess && aux_dev->init)
+		ret = aux_dev->init(&codec->dapm);
+	if (ret < 0) {
+		dev_err(card->dev, "asoc: failed to init %s: %d\n", name, ret);
+		return ret;
+	}
+	codec->name_prefix = temp;
+
+	/* Make sure all DAPM widgets are instantiated */
+	snd_soc_dapm_new_widgets(&codec->dapm);
+	snd_soc_dapm_sync(&codec->dapm);
+
+	/* register the rtd device */
+	rtd->codec = codec;
+	rtd->card = card;
+	rtd->dev.parent = card->dev;
+	rtd->dev.release = rtd_release;
+	rtd->dev.init_name = name;
+	ret = device_register(&rtd->dev);
+	if (ret < 0) {
+		dev_err(card->dev,
+			"asoc: failed to register runtime device: %d\n", ret);
+		return ret;
+	}
+	rtd->dev_registered = 1;
+
+	/* add DAPM sysfs entries for this codec */
+	ret = snd_soc_dapm_sys_add(&rtd->dev);
+	if (ret < 0)
+		dev_err(codec->dev,
+			"asoc: failed to add codec dapm sysfs entries: %d\n",
+			ret);
+
+	/* add codec sysfs entries */
+	ret = device_create_file(&rtd->dev, &dev_attr_codec_reg);
+	if (ret < 0)
+		dev_err(codec->dev,
+			"asoc: failed to add codec sysfs files: %d\n", ret);
+
+	return 0;
+}
+
 static int soc_probe_dai_link(struct snd_soc_card *card, int num)
 {
 	struct snd_soc_dai_link *dai_link = &card->dai_link[num];
@@ -1383,10 +1512,7 @@
 
 	/* config components */
 	codec_dai->codec = codec;
-	codec->card = card;
 	cpu_dai->platform = platform;
-	rtd->card = card;
-	rtd->dev.parent = card->dev;
 	codec_dai->card = card;
 	cpu_dai->card = card;
 
@@ -1410,20 +1536,9 @@
 
 	/* probe the CODEC */
 	if (!codec->probed) {
-		if (codec->driver->probe) {
-			ret = codec->driver->probe(codec);
-			if (ret < 0) {
-				printk(KERN_ERR "asoc: failed to probe CODEC %s\n",
-						codec->name);
-				return ret;
-			}
-		}
-
-		soc_init_codec_debugfs(codec);
-
-		/* mark codec as probed and add to card codec list */
-		codec->probed = 1;
-		list_add(&codec->card_list, &card->codec_dev_list);
+		ret = soc_probe_codec(card, codec);
+		if (ret < 0)
+			return ret;
 	}
 
 	/* probe the platform */
@@ -1437,6 +1552,10 @@
 			}
 		}
 		/* mark platform as probed and add to card platform list */
+
+		if (!try_module_get(platform->dev->driver->owner))
+			return -ENODEV;
+
 		platform->probed = 1;
 		list_add(&platform->card_list, &card->platform_dev_list);
 	}
@@ -1460,43 +1579,14 @@
 	/* DAPM dai link stream work */
 	INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
 
-	/* now that all clients have probed, initialise the DAI link */
-	if (dai_link->init) {
-		ret = dai_link->init(rtd);
-		if (ret < 0) {
-			printk(KERN_ERR "asoc: failed to init %s\n", dai_link->stream_name);
-			return ret;
-		}
-	}
-
-	/* Make sure all DAPM widgets are instantiated */
-	snd_soc_dapm_new_widgets(codec);
-	snd_soc_dapm_sync(codec);
-
-	/* register the rtd device */
-	rtd->dev.release = rtd_release;
-	rtd->dev.init_name = dai_link->name;
-	ret = device_register(&rtd->dev);
-	if (ret < 0) {
-		printk(KERN_ERR "asoc: failed to register DAI runtime device %d\n", ret);
+	ret = soc_post_component_init(card, codec, num, 0);
+	if (ret)
 		return ret;
-	}
 
-	rtd->dev_registered = 1;
 	ret = device_create_file(&rtd->dev, &dev_attr_pmdown_time);
 	if (ret < 0)
 		printk(KERN_WARNING "asoc: failed to add pmdown_time sysfs\n");
 
-	/* add DAPM sysfs entries for this codec */
-	ret = snd_soc_dapm_sys_add(&rtd->dev);
-	if (ret < 0)
-		printk(KERN_WARNING "asoc: failed to add codec dapm sysfs entries\n");
-
-	/* add codec sysfs entries */
-	ret = device_create_file(&rtd->dev, &dev_attr_codec_reg);
-	if (ret < 0)
-		printk(KERN_WARNING "asoc: failed to add codec sysfs files\n");
-
 	/* create the pcm */
 	ret = soc_new_pcm(rtd, num);
 	if (ret < 0) {
@@ -1551,9 +1641,85 @@
 }
 #endif
 
+static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
+{
+	struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num];
+	struct snd_soc_codec *codec;
+	int ret = -ENODEV;
+
+	/* find CODEC from registered CODECs*/
+	list_for_each_entry(codec, &codec_list, list) {
+		if (!strcmp(codec->name, aux_dev->codec_name)) {
+			if (codec->probed) {
+				dev_err(codec->dev,
+					"asoc: codec already probed");
+				ret = -EBUSY;
+				goto out;
+			}
+			goto found;
+		}
+	}
+	/* codec not found */
+	dev_err(card->dev, "asoc: codec %s not found", aux_dev->codec_name);
+	goto out;
+
+found:
+	if (!try_module_get(codec->dev->driver->owner))
+		return -ENODEV;
+
+	ret = soc_probe_codec(card, codec);
+	if (ret < 0)
+		return ret;
+
+	ret = soc_post_component_init(card, codec, num, 1);
+
+out:
+	return ret;
+}
+
+static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
+{
+	struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
+	struct snd_soc_codec *codec = rtd->codec;
+
+	/* unregister the rtd device */
+	if (rtd->dev_registered) {
+		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
+		device_unregister(&rtd->dev);
+		rtd->dev_registered = 0;
+	}
+
+	if (codec && codec->probed)
+		soc_remove_codec(codec);
+}
+
+static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
+				    enum snd_soc_compress_type compress_type)
+{
+	int ret;
+
+	if (codec->cache_init)
+		return 0;
+
+	/* override the compress_type if necessary */
+	if (compress_type && codec->compress_type != compress_type)
+		codec->compress_type = compress_type;
+	ret = snd_soc_cache_init(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache compression type: %d\n",
+			ret);
+		return ret;
+	}
+	codec->cache_init = 1;
+	return 0;
+}
+
 static void snd_soc_instantiate_card(struct snd_soc_card *card)
 {
 	struct platform_device *pdev = to_platform_device(card->dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_codec_conf *codec_conf;
+	enum snd_soc_compress_type compress_type;
 	int ret, i;
 
 	mutex_lock(&card->mutex);
@@ -1573,6 +1739,39 @@
 		return;
 	}
 
+	/* initialize the register cache for each available codec */
+	list_for_each_entry(codec, &codec_list, list) {
+		if (codec->cache_init)
+			continue;
+		/* check to see if we need to override the compress_type */
+		for (i = 0; i < card->num_configs; ++i) {
+			codec_conf = &card->codec_conf[i];
+			if (!strcmp(codec->name, codec_conf->dev_name)) {
+				compress_type = codec_conf->compress_type;
+				if (compress_type && compress_type
+				    != codec->compress_type)
+					break;
+			}
+		}
+		if (i == card->num_configs) {
+			/* no need to override the compress_type so
+			 * go ahead and do the standard thing */
+			ret = snd_soc_init_codec_cache(codec, 0);
+			if (ret < 0) {
+				mutex_unlock(&card->mutex);
+				return;
+			}
+			continue;
+		}
+		/* override the compress_type with the one supplied in
+		 * the machine driver */
+		ret = snd_soc_init_codec_cache(codec, compress_type);
+		if (ret < 0) {
+			mutex_unlock(&card->mutex);
+			return;
+		}
+	}
+
 	/* card bind complete so register a sound card */
 	ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
 			card->owner, 0, &card->snd_card);
@@ -1605,6 +1804,15 @@
 		}
 	}
 
+	for (i = 0; i < card->num_aux_devs; i++) {
+		ret = soc_probe_aux_dev(card, i);
+		if (ret < 0) {
+			pr_err("asoc: failed to add auxiliary devices %s: %d\n",
+			       card->name, ret);
+			goto probe_aux_dev_err;
+		}
+	}
+
 	snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
 		 "%s",  card->name);
 	snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
@@ -1613,7 +1821,7 @@
 	ret = snd_card_register(card->snd_card);
 	if (ret < 0) {
 		printk(KERN_ERR "asoc: failed to register soundcard for %s\n", card->name);
-		goto probe_dai_err;
+		goto probe_aux_dev_err;
 	}
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
@@ -1623,8 +1831,8 @@
 		if (ret < 0) {
 			printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name);
 			while (--i >= 0)
-				soc_unregister_ac97_dai_link(&card->rtd[i]);
-			goto probe_dai_err;
+				soc_unregister_ac97_dai_link(card->rtd[i].codec);
+			goto probe_aux_dev_err;
 		}
 	}
 #endif
@@ -1633,6 +1841,10 @@
 	mutex_unlock(&card->mutex);
 	return;
 
+probe_aux_dev_err:
+	for (i = 0; i < card->num_aux_devs; i++)
+		soc_remove_aux_dev(card, i);
+
 probe_dai_err:
 	for (i = 0; i < card->num_links; i++)
 		soc_remove_dai_link(card, i);
@@ -1668,6 +1880,11 @@
 	INIT_LIST_HEAD(&card->dai_dev_list);
 	INIT_LIST_HEAD(&card->codec_dev_list);
 	INIT_LIST_HEAD(&card->platform_dev_list);
+	INIT_LIST_HEAD(&card->widgets);
+	INIT_LIST_HEAD(&card->paths);
+	INIT_LIST_HEAD(&card->dapm_list);
+
+	soc_init_card_debugfs(card);
 
 	ret = snd_soc_register_card(card);
 	if (ret != 0) {
@@ -1689,13 +1906,19 @@
 		/* make sure any delayed work runs */
 		for (i = 0; i < card->num_rtd; i++) {
 			struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-			run_delayed_work(&rtd->delayed_work);
+			flush_delayed_work_sync(&rtd->delayed_work);
 		}
 
+		/* remove auxiliary devices */
+		for (i = 0; i < card->num_aux_devs; i++)
+			soc_remove_aux_dev(card, i);
+
 		/* remove and free each DAI */
 		for (i = 0; i < card->num_rtd; i++)
 			soc_remove_dai_link(card, i);
 
+		soc_cleanup_card_debugfs(card);
+
 		/* remove the card */
 		if (card->remove)
 			card->remove(pdev);
@@ -1720,7 +1943,7 @@
 	 * now, we're shutting down so no imminent restart. */
 	for (i = 0; i < card->num_rtd; i++) {
 		struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-		run_delayed_work(&rtd->delayed_work);
+		flush_delayed_work_sync(&rtd->delayed_work);
 	}
 
 	snd_soc_dapm_shutdown(card);
@@ -1879,6 +2102,27 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
 
+unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg)
+{
+	unsigned int ret;
+
+	ret = codec->read(codec, reg);
+	dev_dbg(codec->dev, "read %x => %x\n", reg, ret);
+	trace_snd_soc_reg_read(codec, reg, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_read);
+
+unsigned int snd_soc_write(struct snd_soc_codec *codec,
+			   unsigned int reg, unsigned int val)
+{
+	dev_dbg(codec->dev, "write %x = %x\n", reg, val);
+	trace_snd_soc_reg_write(codec, reg, val);
+	return codec->write(codec, reg, val);
+}
+EXPORT_SYMBOL_GPL(snd_soc_write);
+
 /**
  * snd_soc_update_bits - update codec register bits
  * @codec: audio codec
@@ -2019,14 +2263,22 @@
 	const struct snd_kcontrol_new *controls, int num_controls)
 {
 	struct snd_card *card = codec->card->snd_card;
+	char prefixed_name[44], *name;
 	int err, i;
 
 	for (i = 0; i < num_controls; i++) {
 		const struct snd_kcontrol_new *control = &controls[i];
-		err = snd_ctl_add(card, snd_soc_cnew(control, codec, NULL));
+		if (codec->name_prefix) {
+			snprintf(prefixed_name, sizeof(prefixed_name), "%s %s",
+				 codec->name_prefix, control->name);
+			name = prefixed_name;
+		} else {
+			name = control->name;
+		}
+		err = snd_ctl_add(card, snd_soc_cnew(control, codec, name));
 		if (err < 0) {
 			dev_err(codec->dev, "%s: Failed to add %s: %d\n",
-				codec->name, control->name, err);
+				codec->name, name, err);
 			return err;
 		}
 	}
@@ -2863,10 +3115,12 @@
 	if (!card->name || !card->dev)
 		return -EINVAL;
 
-	card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) * card->num_links,
-			GFP_KERNEL);
+	card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) *
+			    (card->num_links + card->num_aux_devs),
+			    GFP_KERNEL);
 	if (card->rtd == NULL)
 		return -ENOMEM;
+	card->rtd_aux = &card->rtd[card->num_links];
 
 	for (i = 0; i < card->num_links; i++)
 		card->rtd[i].dai_link = &card->dai_link[i];
@@ -2908,7 +3162,7 @@
  * Simplify DAI link configuration by removing ".-1" from device names
  * and sanitizing names.
  */
-static inline char *fmt_single_name(struct device *dev, int *id)
+static char *fmt_single_name(struct device *dev, int *id)
 {
 	char *found, name[NAME_SIZE];
 	int id1, id2;
@@ -2916,7 +3170,7 @@
 	if (dev_name(dev) == NULL)
 		return NULL;
 
-	strncpy(name, dev_name(dev), NAME_SIZE);
+	strlcpy(name, dev_name(dev), NAME_SIZE);
 
 	/* are we a "%s.%d" name (platform and SPI components) */
 	found = strstr(name, dev->driver->name);
@@ -2939,7 +3193,7 @@
 
 			/* sanitize component name for DAI link creation */
 			snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name);
-			strncpy(name, tmp, NAME_SIZE);
+			strlcpy(name, tmp, NAME_SIZE);
 		} else
 			*id = 0;
 	}
@@ -3204,9 +3458,11 @@
  * @codec: codec to register
  */
 int snd_soc_register_codec(struct device *dev,
-		struct snd_soc_codec_driver *codec_drv,
-		struct snd_soc_dai_driver *dai_drv, int num_dai)
+			   const struct snd_soc_codec_driver *codec_drv,
+			   struct snd_soc_dai_driver *dai_drv,
+			   int num_dai)
 {
+	size_t reg_size;
 	struct snd_soc_codec *codec;
 	int ret, i;
 
@@ -3223,30 +3479,37 @@
 		return -ENOMEM;
 	}
 
-	/* allocate CODEC register cache */
-	if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
+	if (codec_drv->compress_type)
+		codec->compress_type = codec_drv->compress_type;
+	else
+		codec->compress_type = SND_SOC_FLAT_COMPRESSION;
 
-		if (codec_drv->reg_cache_default)
-			codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
-				codec_drv->reg_cache_size * codec_drv->reg_word_size, GFP_KERNEL);
-		else
-			codec->reg_cache = kzalloc(codec_drv->reg_cache_size *
-				codec_drv->reg_word_size, GFP_KERNEL);
-
-		if (codec->reg_cache == NULL) {
-			kfree(codec->name);
-			kfree(codec);
-			return -ENOMEM;
-		}
-	}
-
+	codec->write = codec_drv->write;
+	codec->read = codec_drv->read;
+	codec->dapm.bias_level = SND_SOC_BIAS_OFF;
+	codec->dapm.dev = dev;
+	codec->dapm.codec = codec;
 	codec->dev = dev;
 	codec->driver = codec_drv;
-	codec->bias_level = SND_SOC_BIAS_OFF;
 	codec->num_dai = num_dai;
 	mutex_init(&codec->mutex);
-	INIT_LIST_HEAD(&codec->dapm_widgets);
-	INIT_LIST_HEAD(&codec->dapm_paths);
+
+	/* allocate CODEC register cache */
+	if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
+		reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+		/* it is necessary to make a copy of the default register cache
+		 * because in the case of using a compression type that requires
+		 * the default register cache to be marked as __devinitconst the
+		 * kernel might have freed the array by the time we initialize
+		 * the cache.
+		 */
+		codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
+					      reg_size, GFP_KERNEL);
+		if (!codec->reg_def_copy) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
 
 	for (i = 0; i < num_dai; i++) {
 		fixup_codec_formats(&dai_drv[i].playback);
@@ -3257,7 +3520,7 @@
 	if (num_dai) {
 		ret = snd_soc_register_dais(dev, dai_drv, num_dai);
 		if (ret < 0)
-			goto error;
+			goto fail;
 	}
 
 	mutex_lock(&client_mutex);
@@ -3268,9 +3531,9 @@
 	pr_debug("Registered codec '%s'\n", codec->name);
 	return 0;
 
-error:
-	if (codec->reg_cache)
-		kfree(codec->reg_cache);
+fail:
+	kfree(codec->reg_def_copy);
+	codec->reg_def_copy = NULL;
 	kfree(codec->name);
 	kfree(codec);
 	return ret;
@@ -3304,8 +3567,8 @@
 
 	pr_debug("Unregistered codec '%s'\n", codec->name);
 
-	if (codec->reg_cache)
-		kfree(codec->reg_cache);
+	snd_soc_cache_exit(codec);
+	kfree(codec->reg_def_copy);
 	kfree(codec->name);
 	kfree(codec);
 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c721502..499730a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -42,9 +42,11 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 
+#include <trace/events/asoc.h>
+
 /* dapm power sequences - make this per codec in the future */
 static int dapm_up_seq[] = {
 	[snd_soc_dapm_pre] = 0,
@@ -54,12 +56,14 @@
 	[snd_soc_dapm_aif_out] = 3,
 	[snd_soc_dapm_mic] = 4,
 	[snd_soc_dapm_mux] = 5,
+	[snd_soc_dapm_virt_mux] = 5,
 	[snd_soc_dapm_value_mux] = 5,
 	[snd_soc_dapm_dac] = 6,
 	[snd_soc_dapm_mixer] = 7,
 	[snd_soc_dapm_mixer_named_ctl] = 7,
 	[snd_soc_dapm_pga] = 8,
 	[snd_soc_dapm_adc] = 9,
+	[snd_soc_dapm_out_drv] = 10,
 	[snd_soc_dapm_hp] = 10,
 	[snd_soc_dapm_spk] = 10,
 	[snd_soc_dapm_post] = 11,
@@ -70,6 +74,7 @@
 	[snd_soc_dapm_adc] = 1,
 	[snd_soc_dapm_hp] = 2,
 	[snd_soc_dapm_spk] = 2,
+	[snd_soc_dapm_out_drv] = 2,
 	[snd_soc_dapm_pga] = 4,
 	[snd_soc_dapm_mixer_named_ctl] = 5,
 	[snd_soc_dapm_mixer] = 5,
@@ -77,6 +82,7 @@
 	[snd_soc_dapm_mic] = 7,
 	[snd_soc_dapm_micbias] = 8,
 	[snd_soc_dapm_mux] = 9,
+	[snd_soc_dapm_virt_mux] = 9,
 	[snd_soc_dapm_value_mux] = 9,
 	[snd_soc_dapm_aif_in] = 10,
 	[snd_soc_dapm_aif_out] = 10,
@@ -90,17 +96,24 @@
 		schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
 }
 
-static void pop_dbg(u32 pop_time, const char *fmt, ...)
+static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
 {
 	va_list args;
+	char *buf;
+
+	if (!pop_time)
+		return;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return;
 
 	va_start(args, fmt);
-
-	if (pop_time) {
-		vprintk(fmt, args);
-	}
-
+	vsnprintf(buf, PAGE_SIZE, fmt, args);
+	dev_info(dev, "%s", buf);
 	va_end(args);
+
+	kfree(buf);
 }
 
 /* create a new dapm widget */
@@ -120,36 +133,45 @@
  * Returns 0 for success else error.
  */
 static int snd_soc_dapm_set_bias_level(struct snd_soc_card *card,
-		struct snd_soc_codec *codec, enum snd_soc_bias_level level)
+				       struct snd_soc_dapm_context *dapm,
+				       enum snd_soc_bias_level level)
 {
 	int ret = 0;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		dev_dbg(codec->dev, "Setting full bias\n");
+		dev_dbg(dapm->dev, "Setting full bias\n");
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		dev_dbg(codec->dev, "Setting bias prepare\n");
+		dev_dbg(dapm->dev, "Setting bias prepare\n");
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		dev_dbg(codec->dev, "Setting standby bias\n");
+		dev_dbg(dapm->dev, "Setting standby bias\n");
 		break;
 	case SND_SOC_BIAS_OFF:
-		dev_dbg(codec->dev, "Setting bias off\n");
+		dev_dbg(dapm->dev, "Setting bias off\n");
 		break;
 	default:
-		dev_err(codec->dev, "Setting invalid bias %d\n", level);
+		dev_err(dapm->dev, "Setting invalid bias %d\n", level);
 		return -EINVAL;
 	}
 
+	trace_snd_soc_bias_level_start(card, level);
+
 	if (card && card->set_bias_level)
 		ret = card->set_bias_level(card, level);
 	if (ret == 0) {
-		if (codec->driver->set_bias_level)
-			ret = codec->driver->set_bias_level(codec, level);
+		if (dapm->codec && dapm->codec->driver->set_bias_level)
+			ret = dapm->codec->driver->set_bias_level(dapm->codec, level);
 		else
-			codec->bias_level = level;
+			dapm->bias_level = level;
 	}
+	if (ret == 0) {
+		if (card && card->set_bias_level_post)
+			ret = card->set_bias_level_post(card, level);
+	}
+
+	trace_snd_soc_bias_level_done(card, level);
 
 	return ret;
 }
@@ -196,6 +218,20 @@
 		}
 	}
 	break;
+	case snd_soc_dapm_virt_mux: {
+		struct soc_enum *e = (struct soc_enum *)w->kcontrols[i].private_value;
+
+		p->connect = 0;
+		/* since a virtual mux has no backing registers to
+		 * decide which path to connect, it will try to match
+		 * with the first enumeration.  This is to ensure
+		 * that the default mux choice (the first) will be
+		 * correctly powered up during initialization.
+		 */
+		if (!strcmp(p->name, e->texts[0]))
+			p->connect = 1;
+	}
+	break;
 	case snd_soc_dapm_value_mux: {
 		struct soc_enum *e = (struct soc_enum *)
 			w->kcontrols[i].private_value;
@@ -217,6 +253,7 @@
 	break;
 	/* does not effect routing - always connected */
 	case snd_soc_dapm_pga:
+	case snd_soc_dapm_out_drv:
 	case snd_soc_dapm_output:
 	case snd_soc_dapm_adc:
 	case snd_soc_dapm_input:
@@ -241,7 +278,7 @@
 }
 
 /* connect mux widget to its interconnecting audio paths */
-static int dapm_connect_mux(struct snd_soc_codec *codec,
+static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
 	struct snd_soc_dapm_path *path, const char *control_name,
 	const struct snd_kcontrol_new *kcontrol)
@@ -251,7 +288,7 @@
 
 	for (i = 0; i < e->max; i++) {
 		if (!(strcmp(control_name, e->texts[i]))) {
-			list_add(&path->list, &codec->dapm_paths);
+			list_add(&path->list, &dapm->card->paths);
 			list_add(&path->list_sink, &dest->sources);
 			list_add(&path->list_source, &src->sinks);
 			path->name = (char*)e->texts[i];
@@ -264,7 +301,7 @@
 }
 
 /* connect mixer widget to its interconnecting audio paths */
-static int dapm_connect_mixer(struct snd_soc_codec *codec,
+static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
 	struct snd_soc_dapm_path *path, const char *control_name)
 {
@@ -273,7 +310,7 @@
 	/* search for mixer kcontrol */
 	for (i = 0; i < dest->num_kcontrols; i++) {
 		if (!strcmp(control_name, dest->kcontrols[i].name)) {
-			list_add(&path->list, &codec->dapm_paths);
+			list_add(&path->list, &dapm->card->paths);
 			list_add(&path->list_sink, &dest->sources);
 			list_add(&path->list_source, &src->sinks);
 			path->name = dest->kcontrols[i].name;
@@ -290,6 +327,8 @@
 	int change, power;
 	unsigned int old, new;
 	struct snd_soc_codec *codec = widget->codec;
+	struct snd_soc_dapm_context *dapm = widget->dapm;
+	struct snd_soc_card *card = dapm->card;
 
 	/* check for valid widgets */
 	if (widget->reg < 0 || widget->id == snd_soc_dapm_input ||
@@ -309,24 +348,26 @@
 
 	change = old != new;
 	if (change) {
-		pop_dbg(codec->pop_time, "pop test %s : %s in %d ms\n",
+		pop_dbg(dapm->dev, card->pop_time,
+			"pop test %s : %s in %d ms\n",
 			widget->name, widget->power ? "on" : "off",
-			codec->pop_time);
-		pop_wait(codec->pop_time);
+			card->pop_time);
+		pop_wait(card->pop_time);
 		snd_soc_write(codec, widget->reg, new);
 	}
-	pr_debug("reg %x old %x new %x change %d\n", widget->reg,
-		 old, new, change);
+	dev_dbg(dapm->dev, "reg %x old %x new %x change %d\n", widget->reg,
+		old, new, change);
 	return change;
 }
 
 /* create new dapm mixer control */
-static int dapm_new_mixer(struct snd_soc_codec *codec,
+static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	int i, ret = 0;
 	size_t name_len;
 	struct snd_soc_dapm_path *path;
+	struct snd_card *card = dapm->codec->card->snd_card;
 
 	/* add kcontrol */
 	for (i = 0; i < w->num_kcontrols; i++) {
@@ -368,11 +409,11 @@
 
 			path->kcontrol = snd_soc_cnew(&w->kcontrols[i], w,
 				path->long_name);
-			ret = snd_ctl_add(codec->card->snd_card, path->kcontrol);
+			ret = snd_ctl_add(card, path->kcontrol);
 			if (ret < 0) {
-				printk(KERN_ERR "asoc: failed to add dapm kcontrol %s: %d\n",
-				       path->long_name,
-				       ret);
+				dev_err(dapm->dev,
+					"asoc: failed to add dapm kcontrol %s: %d\n",
+					path->long_name, ret);
 				kfree(path->long_name);
 				path->long_name = NULL;
 				return ret;
@@ -383,20 +424,22 @@
 }
 
 /* create new dapm mux control */
-static int dapm_new_mux(struct snd_soc_codec *codec,
+static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	struct snd_soc_dapm_path *path = NULL;
 	struct snd_kcontrol *kcontrol;
+	struct snd_card *card = dapm->codec->card->snd_card;
 	int ret = 0;
 
 	if (!w->num_kcontrols) {
-		printk(KERN_ERR "asoc: mux %s has no controls\n", w->name);
+		dev_err(dapm->dev, "asoc: mux %s has no controls\n", w->name);
 		return -EINVAL;
 	}
 
 	kcontrol = snd_soc_cnew(&w->kcontrols[0], w, w->name);
-	ret = snd_ctl_add(codec->card->snd_card, kcontrol);
+	ret = snd_ctl_add(card, kcontrol);
+
 	if (ret < 0)
 		goto err;
 
@@ -406,26 +449,27 @@
 	return ret;
 
 err:
-	printk(KERN_ERR "asoc: failed to add kcontrol %s\n", w->name);
+	dev_err(dapm->dev, "asoc: failed to add kcontrol %s\n", w->name);
 	return ret;
 }
 
 /* create new dapm volume control */
-static int dapm_new_pga(struct snd_soc_codec *codec,
+static int dapm_new_pga(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	if (w->num_kcontrols)
-		pr_err("asoc: PGA controls not supported: '%s'\n", w->name);
+		dev_err(w->dapm->dev,
+			"asoc: PGA controls not supported: '%s'\n", w->name);
 
 	return 0;
 }
 
 /* reset 'walked' bit for each dapm path */
-static inline void dapm_clear_walk(struct snd_soc_codec *codec)
+static inline void dapm_clear_walk(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_path *p;
 
-	list_for_each_entry(p, &codec->dapm_paths, list)
+	list_for_each_entry(p, &dapm->card->paths, list)
 		p->walked = 0;
 }
 
@@ -435,13 +479,14 @@
  */
 static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget)
 {
-	int level = snd_power_get_state(widget->codec->card->snd_card);
+	int level = snd_power_get_state(widget->dapm->codec->card->snd_card);
 
 	switch (level) {
 	case SNDRV_CTL_POWER_D3hot:
 	case SNDRV_CTL_POWER_D3cold:
 		if (widget->ignore_suspend)
-			pr_debug("%s ignoring suspend\n", widget->name);
+			dev_dbg(widget->dapm->dev, "%s ignoring suspend\n",
+				widget->name);
 		return widget->ignore_suspend;
 	default:
 		return 1;
@@ -572,7 +617,7 @@
 
 	/* call any power change event handlers */
 	if (w->event)
-		pr_debug("power %s event for %s flags %x\n",
+		dev_dbg(w->dapm->dev, "power %s event for %s flags %x\n",
 			 w->power ? "on" : "off",
 			 w->name, w->event_flags);
 
@@ -621,9 +666,9 @@
 	int in, out;
 
 	in = is_connected_input_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	out = is_connected_output_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	return out != 0 && in != 0;
 }
 
@@ -634,7 +679,7 @@
 
 	if (w->active) {
 		in = is_connected_input_ep(w);
-		dapm_clear_walk(w->codec);
+		dapm_clear_walk(w->dapm);
 		return in != 0;
 	} else {
 		return dapm_generic_check_power(w);
@@ -648,7 +693,7 @@
 
 	if (w->active) {
 		out = is_connected_output_ep(w);
-		dapm_clear_walk(w->codec);
+		dapm_clear_walk(w->dapm);
 		return out != 0;
 	} else {
 		return dapm_generic_check_power(w);
@@ -674,7 +719,7 @@
 		}
 	}
 
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 
 	return power;
 }
@@ -687,8 +732,8 @@
 		return sort[a->id] - sort[b->id];
 	if (a->reg != b->reg)
 		return a->reg - b->reg;
-	if (a->codec != b->codec)
-		return (unsigned long)a->codec - (unsigned long)b->codec;
+	if (a->dapm != b->dapm)
+		return (unsigned long)a->dapm - (unsigned long)b->dapm;
 
 	return 0;
 }
@@ -709,12 +754,57 @@
 	list_add_tail(&new_widget->power_list, list);
 }
 
+static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
+				 struct snd_soc_dapm_widget *w, int event)
+{
+	struct snd_soc_card *card = dapm->card;
+	const char *ev_name;
+	int power, ret;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		ev_name = "PRE_PMU";
+		power = 1;
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		ev_name = "POST_PMU";
+		power = 1;
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		ev_name = "PRE_PMD";
+		power = 0;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ev_name = "POST_PMD";
+		power = 0;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	if (w->power != power)
+		return;
+
+	if (w->event && (w->event_flags & event)) {
+		pop_dbg(dapm->dev, card->pop_time, "pop test : %s %s\n",
+			w->name, ev_name);
+		trace_snd_soc_dapm_widget_event_start(w, event);
+		ret = w->event(w, NULL, event);
+		trace_snd_soc_dapm_widget_event_done(w, event);
+		if (ret < 0)
+			pr_err("%s: %s event failed: %d\n",
+			       ev_name, w->name, ret);
+	}
+}
+
 /* Apply the coalesced changes from a DAPM sequence */
-static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
+static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
 				   struct list_head *pending)
 {
+	struct snd_soc_card *card = dapm->card;
 	struct snd_soc_dapm_widget *w;
-	int reg, power, ret;
+	int reg, power;
 	unsigned int value = 0;
 	unsigned int mask = 0;
 	unsigned int cur_mask;
@@ -735,64 +825,26 @@
 		if (power)
 			value |= cur_mask;
 
-		pop_dbg(codec->pop_time,
+		pop_dbg(dapm->dev, card->pop_time,
 			"pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
 			w->name, reg, value, mask);
 
-		/* power up pre event */
-		if (w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_PRE_PMU)) {
-			pop_dbg(codec->pop_time, "pop test : %s PRE_PMU\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU);
-			if (ret < 0)
-				pr_err("%s: pre event failed: %d\n",
-				       w->name, ret);
-		}
-
-		/* power down pre event */
-		if (!w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_PRE_PMD)) {
-			pop_dbg(codec->pop_time, "pop test : %s PRE_PMD\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD);
-			if (ret < 0)
-				pr_err("%s: pre event failed: %d\n",
-				       w->name, ret);
-		}
+		/* Check for events */
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMU);
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMD);
 	}
 
 	if (reg >= 0) {
-		pop_dbg(codec->pop_time,
+		pop_dbg(dapm->dev, card->pop_time,
 			"pop test : Applying 0x%x/0x%x to %x in %dms\n",
-			value, mask, reg, codec->pop_time);
-		pop_wait(codec->pop_time);
-		snd_soc_update_bits(codec, reg, mask, value);
+			value, mask, reg, card->pop_time);
+		pop_wait(card->pop_time);
+		snd_soc_update_bits(dapm->codec, reg, mask, value);
 	}
 
 	list_for_each_entry(w, pending, power_list) {
-		/* power up post event */
-		if (w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_POST_PMU)) {
-			pop_dbg(codec->pop_time, "pop test : %s POST_PMU\n",
-				w->name);
-			ret = w->event(w,
-				       NULL, SND_SOC_DAPM_POST_PMU);
-			if (ret < 0)
-				pr_err("%s: post event failed: %d\n",
-				       w->name, ret);
-		}
-
-		/* power down post event */
-		if (!w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_POST_PMD)) {
-			pop_dbg(codec->pop_time, "pop test : %s POST_PMD\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD);
-			if (ret < 0)
-				pr_err("%s: post event failed: %d\n",
-				       w->name, ret);
-		}
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMU);
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMD);
 	}
 }
 
@@ -804,26 +856,29 @@
  * Currently anything that requires more than a single write is not
  * handled.
  */
-static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list,
-			 int event, int sort[])
+static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
+			 struct list_head *list, int event, int sort[])
 {
 	struct snd_soc_dapm_widget *w, *n;
 	LIST_HEAD(pending);
 	int cur_sort = -1;
 	int cur_reg = SND_SOC_NOPM;
+	struct snd_soc_dapm_context *cur_dapm = NULL;
 	int ret;
 
 	list_for_each_entry_safe(w, n, list, power_list) {
 		ret = 0;
 
 		/* Do we need to apply any queued changes? */
-		if (sort[w->id] != cur_sort || w->reg != cur_reg) {
+		if (sort[w->id] != cur_sort || w->reg != cur_reg ||
+		    w->dapm != cur_dapm) {
 			if (!list_empty(&pending))
-				dapm_seq_run_coalesced(codec, &pending);
+				dapm_seq_run_coalesced(cur_dapm, &pending);
 
 			INIT_LIST_HEAD(&pending);
 			cur_sort = -1;
 			cur_reg = SND_SOC_NOPM;
+			cur_dapm = NULL;
 		}
 
 		switch (w->id) {
@@ -867,19 +922,55 @@
 			/* Queue it up for application */
 			cur_sort = sort[w->id];
 			cur_reg = w->reg;
+			cur_dapm = w->dapm;
 			list_move(&w->power_list, &pending);
 			break;
 		}
 
 		if (ret < 0)
-			pr_err("Failed to apply widget power: %d\n",
-			       ret);
+			dev_err(w->dapm->dev,
+				"Failed to apply widget power: %d\n", ret);
 	}
 
 	if (!list_empty(&pending))
-		dapm_seq_run_coalesced(codec, &pending);
+		dapm_seq_run_coalesced(dapm, &pending);
 }
 
+static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
+{
+	struct snd_soc_dapm_update *update = dapm->update;
+	struct snd_soc_dapm_widget *w;
+	int ret;
+
+	if (!update)
+		return;
+
+	w = update->widget;
+
+	if (w->event &&
+	    (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
+		ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
+		if (ret != 0)
+			pr_err("%s DAPM pre-event failed: %d\n",
+			       w->name, ret);
+	}
+
+	ret = snd_soc_update_bits(w->codec, update->reg, update->mask,
+				  update->val);
+	if (ret < 0)
+		pr_err("%s DAPM update failed: %d\n", w->name, ret);
+
+	if (w->event &&
+	    (w->event_flags & SND_SOC_DAPM_POST_REG)) {
+		ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
+		if (ret != 0)
+			pr_err("%s DAPM post-event failed: %d\n",
+			       w->name, ret);
+	}
+}
+
+
+
 /*
  * Scan each dapm widget for complete audio path.
  * A complete path is a route that has valid endpoints i.e.:-
@@ -889,20 +980,26 @@
  *  o Input pin to Output pin (bypass, sidetone)
  *  o DAC to ADC (loopback).
  */
-static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
+static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
 {
-	struct snd_soc_card *card = codec->card;
+	struct snd_soc_card *card = dapm->codec->card;
 	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_context *d;
 	LIST_HEAD(up_list);
 	LIST_HEAD(down_list);
 	int ret = 0;
 	int power;
-	int sys_power = 0;
+
+	trace_snd_soc_dapm_start(card);
+
+	list_for_each_entry(d, &card->dapm_list, list)
+		if (d->n_widgets)
+			d->dev_power = 0;
 
 	/* Check which widgets we need to power and store them in
 	 * lists indicating if they should be powered up or down.
 	 */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &card->widgets, list) {
 		switch (w->id) {
 		case snd_soc_dapm_pre:
 			dapm_seq_insert(w, &down_list, dapm_down_seq);
@@ -920,11 +1017,13 @@
 			else
 				power = 1;
 			if (power)
-				sys_power = 1;
+				w->dapm->dev_power = 1;
 
 			if (w->power == power)
 				continue;
 
+			trace_snd_soc_dapm_widget_power(w, power);
+
 			if (power)
 				dapm_seq_insert(w, &up_list, dapm_up_seq);
 			else
@@ -938,26 +1037,26 @@
 	/* If there are no DAPM widgets then try to figure out power from the
 	 * event type.
 	 */
-	if (list_empty(&codec->dapm_widgets)) {
+	if (!dapm->n_widgets) {
 		switch (event) {
 		case SND_SOC_DAPM_STREAM_START:
 		case SND_SOC_DAPM_STREAM_RESUME:
-			sys_power = 1;
+			dapm->dev_power = 1;
 			break;
 		case SND_SOC_DAPM_STREAM_STOP:
-			sys_power = !!codec->active;
+			dapm->dev_power = !!dapm->codec->active;
 			break;
 		case SND_SOC_DAPM_STREAM_SUSPEND:
-			sys_power = 0;
+			dapm->dev_power = 0;
 			break;
 		case SND_SOC_DAPM_STREAM_NOP:
-			switch (codec->bias_level) {
+			switch (dapm->bias_level) {
 				case SND_SOC_BIAS_STANDBY:
 				case SND_SOC_BIAS_OFF:
-					sys_power = 0;
+					dapm->dev_power = 0;
 					break;
 				default:
-					sys_power = 1;
+					dapm->dev_power = 1;
 					break;
 			}
 			break;
@@ -966,52 +1065,71 @@
 		}
 	}
 
-	if (sys_power && codec->bias_level == SND_SOC_BIAS_OFF) {
-		ret = snd_soc_dapm_set_bias_level(card, codec,
-						  SND_SOC_BIAS_STANDBY);
-		if (ret != 0)
-			pr_err("Failed to turn on bias: %d\n", ret);
-	}
+	list_for_each_entry(d, &dapm->card->dapm_list, list) {
+		if (d->dev_power && d->bias_level == SND_SOC_BIAS_OFF) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_STANDBY);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to turn on bias: %d\n", ret);
+		}
 
-	/* If we're changing to all on or all off then prepare */
-	if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) ||
-	    (!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_PREPARE);
-		if (ret != 0)
-			pr_err("Failed to prepare bias: %d\n", ret);
+		/* If we're changing to all on or all off then prepare */
+		if ((d->dev_power && d->bias_level == SND_SOC_BIAS_STANDBY) ||
+		    (!d->dev_power && d->bias_level == SND_SOC_BIAS_ON)) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_PREPARE);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to prepare bias: %d\n", ret);
+		}
 	}
 
 	/* Power down widgets first; try to avoid amplifying pops. */
-	dapm_seq_run(codec, &down_list, event, dapm_down_seq);
+	dapm_seq_run(dapm, &down_list, event, dapm_down_seq);
+
+	dapm_widget_update(dapm);
 
 	/* Now power up. */
-	dapm_seq_run(codec, &up_list, event, dapm_up_seq);
+	dapm_seq_run(dapm, &up_list, event, dapm_up_seq);
 
-	/* If we just powered the last thing off drop to standby bias */
-	if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_STANDBY);
-		if (ret != 0)
-			pr_err("Failed to apply standby bias: %d\n", ret);
+	list_for_each_entry(d, &dapm->card->dapm_list, list) {
+		/* If we just powered the last thing off drop to standby bias */
+		if (d->bias_level == SND_SOC_BIAS_PREPARE && !d->dev_power) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_STANDBY);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to apply standby bias: %d\n",
+					ret);
+		}
+
+		/* If we're in standby and can support bias off then do that */
+		if (d->bias_level == SND_SOC_BIAS_STANDBY &&
+		    d->idle_bias_off) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_OFF);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to turn off bias: %d\n", ret);
+		}
+
+		/* If we just powered up then move to active bias */
+		if (d->bias_level == SND_SOC_BIAS_PREPARE && d->dev_power) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_ON);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to apply active bias: %d\n",
+					ret);
+		}
 	}
 
-	/* If we're in standby and can support bias off then do that */
-	if (codec->bias_level == SND_SOC_BIAS_STANDBY &&
-	    codec->idle_bias_off) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF);
-		if (ret != 0)
-			pr_err("Failed to turn off bias: %d\n", ret);
-	}
+	pop_dbg(dapm->dev, card->pop_time,
+		"DAPM sequencing finished, waiting %dms\n", card->pop_time);
+	pop_wait(card->pop_time);
 
-	/* If we just powered up then move to active bias */
-	if (codec->bias_level == SND_SOC_BIAS_PREPARE && sys_power) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_ON);
-		if (ret != 0)
-			pr_err("Failed to apply active bias: %d\n", ret);
-	}
-
-	pop_dbg(codec->pop_time, "DAPM sequencing finished, waiting %dms\n",
-		codec->pop_time);
-	pop_wait(codec->pop_time);
+	trace_snd_soc_dapm_done(card);
 
 	return 0;
 }
@@ -1038,9 +1156,9 @@
 		return -ENOMEM;
 
 	in = is_connected_input_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	out = is_connected_output_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 
 	ret = snprintf(buf, PAGE_SIZE, "%s: %s  in %d out %d",
 		       w->name, w->power ? "On" : "Off", in, out);
@@ -1090,29 +1208,29 @@
 	.llseek = default_llseek,
 };
 
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 	struct dentry *d;
 
-	if (!codec->debugfs_dapm)
+	if (!dapm->debugfs_dapm)
 		return;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
-		if (!w->name)
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (!w->name || w->dapm != dapm)
 			continue;
 
 		d = debugfs_create_file(w->name, 0444,
-					codec->debugfs_dapm, w,
+					dapm->debugfs_dapm, w,
 					&dapm_widget_power_fops);
 		if (!d)
-			printk(KERN_WARNING
-			       "ASoC: Failed to create %s debugfs file\n",
-			       w->name);
+			dev_warn(w->dapm->dev,
+				"ASoC: Failed to create %s debugfs file\n",
+				w->name);
 	}
 }
 #else
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm)
 {
 }
 #endif
@@ -1126,6 +1244,7 @@
 	int found = 0;
 
 	if (widget->id != snd_soc_dapm_mux &&
+	    widget->id != snd_soc_dapm_virt_mux &&
 	    widget->id != snd_soc_dapm_value_mux)
 		return -ENODEV;
 
@@ -1133,7 +1252,7 @@
 		return 0;
 
 	/* find dapm widget path assoc with kcontrol */
-	list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+	list_for_each_entry(path, &widget->dapm->card->paths, list) {
 		if (path->kcontrol != kcontrol)
 			continue;
 
@@ -1149,7 +1268,7 @@
 	}
 
 	if (found)
-		dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP);
+		dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
 
 	return 0;
 }
@@ -1167,7 +1286,7 @@
 		return -ENODEV;
 
 	/* find dapm widget path assoc with kcontrol */
-	list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+	list_for_each_entry(path, &widget->dapm->card->paths, list) {
 		if (path->kcontrol != kcontrol)
 			continue;
 
@@ -1178,7 +1297,7 @@
 	}
 
 	if (found)
-		dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP);
+		dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
 
 	return 0;
 }
@@ -1194,7 +1313,9 @@
 	int count = 0;
 	char *state = "not set";
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &codec->card->widgets, list) {
+		if (w->dapm != &codec->dapm)
+			continue;
 
 		/* only display widgets that burnm power */
 		switch (w->id) {
@@ -1206,6 +1327,7 @@
 		case snd_soc_dapm_dac:
 		case snd_soc_dapm_adc:
 		case snd_soc_dapm_pga:
+		case snd_soc_dapm_out_drv:
 		case snd_soc_dapm_mixer:
 		case snd_soc_dapm_mixer_named_ctl:
 		case snd_soc_dapm_supply:
@@ -1218,7 +1340,7 @@
 		}
 	}
 
-	switch (codec->bias_level) {
+	switch (codec->dapm.bias_level) {
 	case SND_SOC_BIAS_ON:
 		state = "On";
 		break;
@@ -1250,31 +1372,50 @@
 }
 
 /* free all dapm widgets and resources */
-static void dapm_free_widgets(struct snd_soc_codec *codec)
+static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
 	struct snd_soc_dapm_path *p, *next_p;
 
-	list_for_each_entry_safe(w, next_w, &codec->dapm_widgets, list) {
+	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		list_del(&w->list);
+		/*
+		 * remove source and sink paths associated to this widget.
+		 * While removing the path, remove reference to it from both
+		 * source and sink widgets so that path is removed only once.
+		 */
+		list_for_each_entry_safe(p, next_p, &w->sources, list_sink) {
+			list_del(&p->list_sink);
+			list_del(&p->list_source);
+			list_del(&p->list);
+			kfree(p->long_name);
+			kfree(p);
+		}
+		list_for_each_entry_safe(p, next_p, &w->sinks, list_source) {
+			list_del(&p->list_sink);
+			list_del(&p->list_source);
+			list_del(&p->list);
+			kfree(p->long_name);
+			kfree(p);
+		}
+		kfree(w->name);
 		kfree(w);
 	}
-
-	list_for_each_entry_safe(p, next_p, &codec->dapm_paths, list) {
-		list_del(&p->list);
-		kfree(p->long_name);
-		kfree(p);
-	}
 }
 
-static int snd_soc_dapm_set_pin(struct snd_soc_codec *codec,
+static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
 				const char *pin, int status)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
-			pr_debug("dapm: %s: pin %s\n", codec->name, pin);
+			dev_dbg(w->dapm->dev, "dapm: pin %s = %d\n",
+				pin, status);
 			w->connected = status;
 			/* Allow disabling of forced pins */
 			if (status == 0)
@@ -1283,46 +1424,72 @@
 		}
 	}
 
-	pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 
 /**
  * snd_soc_dapm_sync - scan and power dapm paths
- * @codec: audio codec
+ * @dapm: DAPM context
  *
  * Walks all dapm audio paths and powers widgets according to their
  * stream or path usage.
  *
  * Returns 0 for success.
  */
-int snd_soc_dapm_sync(struct snd_soc_codec *codec)
+int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
 {
-	return dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP);
+	return dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
 
-static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
+static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
 				  const struct snd_soc_dapm_route *route)
 {
 	struct snd_soc_dapm_path *path;
 	struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w;
-	const char *sink = route->sink;
+	struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL;
+	const char *sink;
 	const char *control = route->control;
-	const char *source = route->source;
+	const char *source;
+	char prefixed_sink[80];
+	char prefixed_source[80];
 	int ret = 0;
 
-	/* find src and dest widgets */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	if (dapm->codec->name_prefix) {
+		snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
+			 dapm->codec->name_prefix, route->sink);
+		sink = prefixed_sink;
+		snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
+			 dapm->codec->name_prefix, route->source);
+		source = prefixed_source;
+	} else {
+		sink = route->sink;
+		source = route->source;
+	}
 
+	/*
+	 * find src and dest widgets over all widgets but favor a widget from
+	 * current DAPM context
+	 */
+	list_for_each_entry(w, &dapm->card->widgets, list) {
 		if (!wsink && !(strcmp(w->name, sink))) {
-			wsink = w;
+			wtsink = w;
+			if (w->dapm == dapm)
+				wsink = w;
 			continue;
 		}
 		if (!wsource && !(strcmp(w->name, source))) {
-			wsource = w;
+			wtsource = w;
+			if (w->dapm == dapm)
+				wsource = w;
 		}
 	}
+	/* use widget from another DAPM context if not found from this */
+	if (!wsink)
+		wsink = wtsink;
+	if (!wsource)
+		wsource = wtsource;
 
 	if (wsource == NULL || wsink == NULL)
 		return -ENODEV;
@@ -1356,7 +1523,7 @@
 
 	/* connect static paths */
 	if (control == NULL) {
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 1;
@@ -1368,6 +1535,7 @@
 	case snd_soc_dapm_adc:
 	case snd_soc_dapm_dac:
 	case snd_soc_dapm_pga:
+	case snd_soc_dapm_out_drv:
 	case snd_soc_dapm_input:
 	case snd_soc_dapm_output:
 	case snd_soc_dapm_micbias:
@@ -1377,14 +1545,15 @@
 	case snd_soc_dapm_supply:
 	case snd_soc_dapm_aif_in:
 	case snd_soc_dapm_aif_out:
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 1;
 		return 0;
 	case snd_soc_dapm_mux:
+	case snd_soc_dapm_virt_mux:
 	case snd_soc_dapm_value_mux:
-		ret = dapm_connect_mux(codec, wsource, wsink, path, control,
+		ret = dapm_connect_mux(dapm, wsource, wsink, path, control,
 			&wsink->kcontrols[0]);
 		if (ret != 0)
 			goto err;
@@ -1392,7 +1561,7 @@
 	case snd_soc_dapm_switch:
 	case snd_soc_dapm_mixer:
 	case snd_soc_dapm_mixer_named_ctl:
-		ret = dapm_connect_mixer(codec, wsource, wsink, path, control);
+		ret = dapm_connect_mixer(dapm, wsource, wsink, path, control);
 		if (ret != 0)
 			goto err;
 		break;
@@ -1400,7 +1569,7 @@
 	case snd_soc_dapm_mic:
 	case snd_soc_dapm_line:
 	case snd_soc_dapm_spk:
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 0;
@@ -1409,15 +1578,15 @@
 	return 0;
 
 err:
-	printk(KERN_WARNING "asoc: no dapm match for %s --> %s --> %s\n", source,
-		control, sink);
+	dev_warn(dapm->dev, "asoc: no dapm match for %s --> %s --> %s\n",
+		 source, control, sink);
 	kfree(path);
 	return ret;
 }
 
 /**
  * snd_soc_dapm_add_routes - Add routes between DAPM widgets
- * @codec: codec
+ * @dapm: DAPM context
  * @route: audio routes
  * @num: number of routes
  *
@@ -1428,17 +1597,16 @@
  * Returns 0 for success else error. On error all resources can be freed
  * with a call to snd_soc_card_free().
  */
-int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
+int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
 			    const struct snd_soc_dapm_route *route, int num)
 {
 	int i, ret;
 
 	for (i = 0; i < num; i++) {
-		ret = snd_soc_dapm_add_route(codec, route);
+		ret = snd_soc_dapm_add_route(dapm, route);
 		if (ret < 0) {
-			printk(KERN_ERR "Failed to add route %s->%s\n",
-			       route->source,
-			       route->sink);
+			dev_err(dapm->dev, "Failed to add route %s->%s\n",
+				route->source, route->sink);
 			return ret;
 		}
 		route++;
@@ -1450,17 +1618,17 @@
 
 /**
  * snd_soc_dapm_new_widgets - add new dapm widgets
- * @codec: audio codec
+ * @dapm: DAPM context
  *
  * Checks the codec for any new dapm widgets and creates them if found.
  *
  * Returns 0 for success.
  */
-int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec)
+int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list)
+	list_for_each_entry(w, &dapm->card->widgets, list)
 	{
 		if (w->new)
 			continue;
@@ -1470,12 +1638,13 @@
 		case snd_soc_dapm_mixer:
 		case snd_soc_dapm_mixer_named_ctl:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_mixer(codec, w);
+			dapm_new_mixer(dapm, w);
 			break;
 		case snd_soc_dapm_mux:
+		case snd_soc_dapm_virt_mux:
 		case snd_soc_dapm_value_mux:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_mux(codec, w);
+			dapm_new_mux(dapm, w);
 			break;
 		case snd_soc_dapm_adc:
 		case snd_soc_dapm_aif_out:
@@ -1486,8 +1655,9 @@
 			w->power_check = dapm_dac_check_power;
 			break;
 		case snd_soc_dapm_pga:
+		case snd_soc_dapm_out_drv:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_pga(codec, w);
+			dapm_new_pga(dapm, w);
 			break;
 		case snd_soc_dapm_input:
 		case snd_soc_dapm_output:
@@ -1508,7 +1678,7 @@
 		w->new = 1;
 	}
 
-	dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP);
+	dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
@@ -1569,13 +1739,12 @@
 		(struct soc_mixer_control *)kcontrol->private_value;
 	unsigned int reg = mc->reg;
 	unsigned int shift = mc->shift;
-	unsigned int rshift = mc->rshift;
 	int max = mc->max;
 	unsigned int mask = (1 << fls(max)) - 1;
 	unsigned int invert = mc->invert;
-	unsigned int val, val2, val_mask;
-	int connect;
-	int ret;
+	unsigned int val, val_mask;
+	int connect, change;
+	struct snd_soc_dapm_update update;
 
 	val = (ucontrol->value.integer.value[0] & mask);
 
@@ -1583,18 +1752,12 @@
 		val = max - val;
 	val_mask = mask << shift;
 	val = val << shift;
-	if (shift != rshift) {
-		val2 = (ucontrol->value.integer.value[1] & mask);
-		if (invert)
-			val2 = max - val2;
-		val_mask |= mask << rshift;
-		val |= val2 << rshift;
-	}
 
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 
-	if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
+	change = snd_soc_test_bits(widget->codec, reg, val_mask, val);
+	if (change) {
 		if (val)
 			/* new connection */
 			connect = invert ? 0:1;
@@ -1602,28 +1765,20 @@
 			/* old connection must be powered down */
 			connect = invert ? 1:0;
 
+		update.kcontrol = kcontrol;
+		update.widget = widget;
+		update.reg = reg;
+		update.mask = mask;
+		update.val = val;
+		widget->dapm->update = &update;
+
 		dapm_mixer_update_power(widget, kcontrol, connect);
+
+		widget->dapm->update = NULL;
 	}
 
-	if (widget->event) {
-		if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-			ret = widget->event(widget, kcontrol,
-						SND_SOC_DAPM_PRE_REG);
-			if (ret < 0) {
-				ret = 1;
-				goto out;
-			}
-		}
-		ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
-		if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-			ret = widget->event(widget, kcontrol,
-						SND_SOC_DAPM_POST_REG);
-	} else
-		ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
 
@@ -1671,7 +1826,7 @@
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned int val, mux, change;
 	unsigned int mask, bitmask;
-	int ret = 0;
+	struct snd_soc_dapm_update update;
 
 	for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
 		;
@@ -1690,24 +1845,20 @@
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 	change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+	widget->dapm->update = &update;
+
 	dapm_mux_update_power(widget, kcontrol, change, mux, e);
 
-	if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_PRE_REG);
-		if (ret < 0)
-			goto out;
-	}
+	widget->dapm->update = NULL;
 
-	ret = snd_soc_update_bits(widget->codec, e->reg, mask, val);
-
-	if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_POST_REG);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return change;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double);
 
@@ -1819,7 +1970,7 @@
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned int val, mux, change;
 	unsigned int mask;
-	int ret = 0;
+	struct snd_soc_dapm_update update;
 
 	if (ucontrol->value.enumerated.item[0] > e->max - 1)
 		return -EINVAL;
@@ -1836,24 +1987,20 @@
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 	change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+	widget->dapm->update = &update;
+
 	dapm_mux_update_power(widget, kcontrol, change, mux, e);
 
-	if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_PRE_REG);
-		if (ret < 0)
-			goto out;
-	}
+	widget->dapm->update = NULL;
 
-	ret = snd_soc_update_bits(widget->codec, e->reg, mask, val);
-
-	if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_POST_REG);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return change;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_value_enum_double);
 
@@ -1892,7 +2039,7 @@
 	mutex_lock(&codec->mutex);
 
 	ucontrol->value.integer.value[0] =
-		snd_soc_dapm_get_pin_status(codec, pin);
+		snd_soc_dapm_get_pin_status(&codec->dapm, pin);
 
 	mutex_unlock(&codec->mutex);
 
@@ -1915,11 +2062,11 @@
 	mutex_lock(&codec->mutex);
 
 	if (ucontrol->value.integer.value[0])
-		snd_soc_dapm_enable_pin(codec, pin);
+		snd_soc_dapm_enable_pin(&codec->dapm, pin);
 	else
-		snd_soc_dapm_disable_pin(codec, pin);
+		snd_soc_dapm_disable_pin(&codec->dapm, pin);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	mutex_unlock(&codec->mutex);
 
@@ -1929,26 +2076,43 @@
 
 /**
  * snd_soc_dapm_new_control - create new dapm control
- * @codec: audio codec
+ * @dapm: DAPM context
  * @widget: widget template
  *
  * Creates a new dapm control based upon the template.
  *
  * Returns 0 for success else error.
  */
-int snd_soc_dapm_new_control(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget)
 {
 	struct snd_soc_dapm_widget *w;
+	size_t name_len;
 
 	if ((w = dapm_cnew_widget(widget)) == NULL)
 		return -ENOMEM;
 
-	w->codec = codec;
+	name_len = strlen(widget->name) + 1;
+	if (dapm->codec->name_prefix)
+		name_len += 1 + strlen(dapm->codec->name_prefix);
+	w->name = kmalloc(name_len, GFP_KERNEL);
+	if (w->name == NULL) {
+		kfree(w);
+		return -ENOMEM;
+	}
+	if (dapm->codec->name_prefix)
+		snprintf(w->name, name_len, "%s %s",
+			dapm->codec->name_prefix, widget->name);
+	else
+		snprintf(w->name, name_len, "%s", widget->name);
+
+	dapm->n_widgets++;
+	w->dapm = dapm;
+	w->codec = dapm->codec;
 	INIT_LIST_HEAD(&w->sources);
 	INIT_LIST_HEAD(&w->sinks);
 	INIT_LIST_HEAD(&w->list);
-	list_add(&w->list, &codec->dapm_widgets);
+	list_add(&w->list, &dapm->card->widgets);
 
 	/* machine layer set ups unconnected pins and insertions */
 	w->connected = 1;
@@ -1958,7 +2122,7 @@
 
 /**
  * snd_soc_dapm_new_controls - create new dapm controls
- * @codec: audio codec
+ * @dapm: DAPM context
  * @widget: widget array
  * @num: number of widgets
  *
@@ -1966,18 +2130,18 @@
  *
  * Returns 0 for success else error.
  */
-int snd_soc_dapm_new_controls(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget,
 	int num)
 {
 	int i, ret;
 
 	for (i = 0; i < num; i++) {
-		ret = snd_soc_dapm_new_control(codec, widget);
+		ret = snd_soc_dapm_new_control(dapm, widget);
 		if (ret < 0) {
-			printk(KERN_ERR
-			       "ASoC: Failed to create DAPM control %s: %d\n",
-			       widget->name, ret);
+			dev_err(dapm->dev,
+				"ASoC: Failed to create DAPM control %s: %d\n",
+				widget->name, ret);
 			return ret;
 		}
 		widget++;
@@ -1986,34 +2150,17 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls);
 
-
-/**
- * snd_soc_dapm_stream_event - send a stream event to the dapm core
- * @codec: audio codec
- * @stream: stream name
- * @event: stream event
- *
- * Sends a stream event to the dapm core. The core then makes any
- * necessary widget power changes.
- *
- * Returns 0 for success else error.
- */
-int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
+static void soc_dapm_stream_event(struct snd_soc_dapm_context *dapm,
 	const char *stream, int event)
 {
-	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dapm_widget *w;
 
-	if (stream == NULL)
-		return 0;
-
-	mutex_lock(&codec->mutex);
-	list_for_each_entry(w, &codec->dapm_widgets, list)
+	list_for_each_entry(w, &dapm->card->widgets, list)
 	{
-		if (!w->sname)
+		if (!w->sname || w->dapm != dapm)
 			continue;
-		pr_debug("widget %s\n %s stream %s event %d\n",
-			 w->name, w->sname, stream, event);
+		dev_dbg(w->dapm->dev, "widget %s\n %s stream %s event %d\n",
+			w->name, w->sname, stream, event);
 		if (strstr(w->sname, stream)) {
 			switch(event) {
 			case SND_SOC_DAPM_STREAM_START:
@@ -2031,7 +2178,30 @@
 		}
 	}
 
-	dapm_power_widgets(codec, event);
+	dapm_power_widgets(dapm, event);
+}
+
+/**
+ * snd_soc_dapm_stream_event - send a stream event to the dapm core
+ * @rtd: PCM runtime data
+ * @stream: stream name
+ * @event: stream event
+ *
+ * Sends a stream event to the dapm core. The core then makes any
+ * necessary widget power changes.
+ *
+ * Returns 0 for success else error.
+ */
+int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
+	const char *stream, int event)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+
+	if (stream == NULL)
+		return 0;
+
+	mutex_lock(&codec->mutex);
+	soc_dapm_stream_event(&codec->dapm, stream, event);
 	mutex_unlock(&codec->mutex);
 	return 0;
 }
@@ -2039,7 +2209,7 @@
 
 /**
  * snd_soc_dapm_enable_pin - enable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Enables input/output pin and its parents or children widgets iff there is
@@ -2047,15 +2217,15 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 1);
+	return snd_soc_dapm_set_pin(dapm, pin, 1);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
 
 /**
  * snd_soc_dapm_force_enable_pin - force a pin to be enabled
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Enables input/output pin regardless of any other state.  This is
@@ -2065,42 +2235,47 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
+				  const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
-			pr_debug("dapm: %s: pin %s\n", codec->name, pin);
+			dev_dbg(w->dapm->dev,
+				"dapm: force enable pin %s\n", pin);
 			w->connected = 1;
 			w->force = 1;
 			return 0;
 		}
 	}
 
-	pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin);
 
 /**
  * snd_soc_dapm_disable_pin - disable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Disables input/output pin and its parents or children widgets.
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
+			     const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 0);
+	return snd_soc_dapm_set_pin(dapm, pin, 0);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin);
 
 /**
  * snd_soc_dapm_nc_pin - permanently disable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Marks the specified pin as being not connected, disabling it along
@@ -2112,26 +2287,29 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 0);
+	return snd_soc_dapm_set_pin(dapm, pin, 0);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin);
 
 /**
  * snd_soc_dapm_get_pin_status - get audio pin status
- * @codec: audio codec
+ * @dapm: DAPM context
  * @pin: audio signal pin endpoint (or start point)
  *
  * Get audio pin status - connected or disconnected.
  *
  * Returns 1 for connected otherwise 0.
  */
-int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
+				const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin))
 			return w->connected;
 	}
@@ -2142,7 +2320,7 @@
 
 /**
  * snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint
- * @codec: audio codec
+ * @dapm: DAPM context
  * @pin: audio signal pin endpoint (or start point)
  *
  * Mark the given endpoint or pin as ignoring suspend.  When the
@@ -2151,18 +2329,21 @@
  * normal means at suspend time, it will not be turned on if it was not
  * already enabled.
  */
-int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
+				const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
 			w->ignore_suspend = 1;
 			return 0;
 		}
 	}
 
-	pr_err("Unknown DAPM pin: %s\n", pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
@@ -2173,20 +2354,23 @@
  *
  * Free all dapm widgets and resources.
  */
-void snd_soc_dapm_free(struct snd_soc_codec *codec)
+void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm)
 {
-	snd_soc_dapm_sys_remove(codec->dev);
-	dapm_free_widgets(codec);
+	snd_soc_dapm_sys_remove(dapm->dev);
+	dapm_free_widgets(dapm);
+	list_del(&dapm->list);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
 
-static void soc_dapm_shutdown_codec(struct snd_soc_codec *codec)
+static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 	LIST_HEAD(down_list);
 	int powerdown = 0;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (w->power) {
 			dapm_seq_insert(w, &down_list, dapm_down_seq);
 			w->power = 0;
@@ -2198,9 +2382,9 @@
 	 * standby.
 	 */
 	if (powerdown) {
-		snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_PREPARE);
-		dapm_seq_run(codec, &down_list, 0, dapm_down_seq);
-		snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_STANDBY);
+		snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_PREPARE);
+		dapm_seq_run(dapm, &down_list, 0, dapm_down_seq);
+		snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_STANDBY);
 	}
 }
 
@@ -2211,10 +2395,10 @@
 {
 	struct snd_soc_codec *codec;
 
-	list_for_each_entry(codec, &card->codec_dev_list, list)
-		soc_dapm_shutdown_codec(codec);
-
-	snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF);
+	list_for_each_entry(codec, &card->codec_dev_list, list) {
+		soc_dapm_shutdown_codec(&codec->dapm);
+		snd_soc_dapm_set_bias_level(card, &codec->dapm, SND_SOC_BIAS_OFF);
+	}
 }
 
 /* Module information */
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 8a0a920..ac5a5bc 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -13,11 +13,11 @@
 
 #include <sound/jack.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
+#include <trace/events/asoc.h>
 
 /**
  * snd_soc_jack_new - Create a new jack
@@ -60,14 +60,18 @@
 void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
 {
 	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_jack_pin *pin;
 	int enable;
 	int oldstatus;
 
+	trace_snd_soc_jack_report(jack, mask, status);
+
 	if (!jack)
 		return;
 
 	codec = jack->codec;
+	dapm =  &codec->dapm;
 
 	mutex_lock(&codec->mutex);
 
@@ -81,6 +85,8 @@
 	if (mask && (jack->status == oldstatus))
 		goto out;
 
+	trace_snd_soc_jack_notify(jack, status);
+
 	list_for_each_entry(pin, &jack->pins, list) {
 		enable = pin->mask & jack->status;
 
@@ -88,15 +94,15 @@
 			enable = !enable;
 
 		if (enable)
-			snd_soc_dapm_enable_pin(codec, pin->pin);
+			snd_soc_dapm_enable_pin(dapm, pin->pin);
 		else
-			snd_soc_dapm_disable_pin(codec, pin->pin);
+			snd_soc_dapm_disable_pin(dapm, pin->pin);
 	}
 
 	/* Report before the DAPM sync to help users updating micbias status */
 	blocking_notifier_call_chain(&jack->notifier, status, NULL);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_jack_report(jack->jack, status);
 
@@ -207,6 +213,12 @@
 static irqreturn_t gpio_handler(int irq, void *data)
 {
 	struct snd_soc_jack_gpio *gpio = data;
+	struct device *dev = gpio->jack->codec->card->dev;
+
+	trace_snd_soc_jack_irq(gpio->name);
+
+	if (device_may_wakeup(dev))
+		pm_wakeup_event(dev, gpio->debounce_time + 50);
 
 	schedule_delayed_work(&gpio->work,
 			      msecs_to_jiffies(gpio->debounce_time));
@@ -263,11 +275,12 @@
 		INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
 		gpios[i].jack = jack;
 
-		ret = request_irq(gpio_to_irq(gpios[i].gpio),
-				gpio_handler,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				jack->codec->dev->driver->name,
-				&gpios[i]);
+		ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio),
+					      gpio_handler,
+					      IRQF_TRIGGER_RISING |
+					      IRQF_TRIGGER_FALLING,
+					      jack->codec->dev->driver->name,
+					      &gpios[i]);
 		if (ret)
 			goto err;
 
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 6914821..5b792d2 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -76,7 +76,10 @@
 		format = 1 << UAC_FORMAT_TYPE_I_PCM;
 	}
 	if (format & (1 << UAC_FORMAT_TYPE_I_PCM)) {
-		if (sample_width > sample_bytes * 8) {
+		if (chip->usb_id == USB_ID(0x0582, 0x0016) /* Edirol SD-90 */ &&
+		    sample_width == 24 && sample_bytes == 2)
+			sample_bytes = 3;
+		else if (sample_width > sample_bytes * 8) {
 			snd_printk(KERN_INFO "%d:%u:%d : sample bitwidth %d in over sample bytes %d\n",
 				   chip->dev->devnum, fp->iface, fp->altsetting,
 				   sample_width, sample_bytes);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 25bce7e..db2dc5f 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -850,8 +850,8 @@
 		return;
 	}
 
-	memset(urb->transfer_buffer + count, 0xFD, 9 - count);
-	urb->transfer_buffer_length = count;
+	memset(urb->transfer_buffer + count, 0xFD, ep->max_transfer - count);
+	urb->transfer_buffer_length = ep->max_transfer;
 }
 
 static struct usb_protocol_ops snd_usbmidi_122l_ops = {
@@ -1295,6 +1295,13 @@
 	case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */
 		ep->max_transfer = 4;
 		break;
+		/*
+		 * Some devices only work with 9 bytes packet size:
+		 */
+	case USB_ID(0x0644, 0x800E): /* Tascam US-122L */
+	case USB_ID(0x0644, 0x800F): /* Tascam US-144 */
+		ep->max_transfer = 9;
+		break;
 	}
 	for (i = 0; i < OUTPUT_URBS; ++i) {
 		buffer = usb_alloc_coherent(umidi->dev,
@@ -1729,13 +1736,7 @@
 {
 	static const char *const names[] = { "High Load", "Light Load" };
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item > 1)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int roland_load_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index f2d74d6..7df89b3 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1633,18 +1633,11 @@
 static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
 {
 	struct usb_mixer_elem_info *cval = kcontrol->private_data;
-	char **itemlist = (char **)kcontrol->private_value;
+	const char **itemlist = (const char **)kcontrol->private_value;
 
 	if (snd_BUG_ON(!itemlist))
 		return -EINVAL;
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = cval->max;
-	if (uinfo->value.enumerated.item >= cval->max)
-		uinfo->value.enumerated.item = cval->max - 1;
-	strlcpy(uinfo->value.enumerated.name, itemlist[uinfo->value.enumerated.item],
-		sizeof(uinfo->value.enumerated.name));
-	return 0;
+	return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist);
 }
 
 /* get callback for selector unit */
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index ad7079d..3599987 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -705,11 +705,11 @@
 		.data = (const struct snd_usb_audio_quirk[]) {
 			{
 				.ifnum = 0,
-				.type = QUIRK_IGNORE_INTERFACE
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
 			},
 			{
 				.ifnum = 1,
-				.type = QUIRK_IGNORE_INTERFACE
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
 			},
 			{
 				.ifnum = 2,
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 6ef68e4..084e6fc 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -273,29 +273,26 @@
 					  struct file *file, poll_table *wait)
 {
 	struct us122l	*us122l = hw->private_data;
-	struct usb_stream *s = us122l->sk.s;
 	unsigned	*polled;
 	unsigned int	mask;
 
 	poll_wait(file, &us122l->sk.sleep, wait);
 
-	switch (s->state) {
-	case usb_stream_ready:
-		if (us122l->first == file)
-			polled = &s->periods_polled;
-		else
-			polled = &us122l->second_periods_polled;
-		if (*polled != s->periods_done) {
-			*polled = s->periods_done;
-			mask = POLLIN | POLLOUT | POLLWRNORM;
-			break;
+	mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
+	if (mutex_trylock(&us122l->mutex)) {
+		struct usb_stream *s = us122l->sk.s;
+		if (s && s->state == usb_stream_ready) {
+			if (us122l->first == file)
+				polled = &s->periods_polled;
+			else
+				polled = &us122l->second_periods_polled;
+			if (*polled != s->periods_done) {
+				*polled = s->periods_done;
+				mask = POLLIN | POLLOUT | POLLWRNORM;
+			} else
+				mask = 0;
 		}
-		/* Fall through */
-		mask = 0;
-		break;
-	default:
-		mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
-		break;
+		mutex_unlock(&us122l->mutex);
 	}
 	return mask;
 }
@@ -381,6 +378,7 @@
 {
 	struct usb_stream_config *cfg;
 	struct us122l *us122l = hw->private_data;
+	struct usb_stream *s;
 	unsigned min_period_frames;
 	int err = 0;
 	bool high_speed;
@@ -426,18 +424,18 @@
 	snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
 
 	mutex_lock(&us122l->mutex);
+	s = us122l->sk.s;
 	if (!us122l->master)
 		us122l->master = file;
 	else if (us122l->master != file) {
-		if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
+		if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
 			err = -EIO;
 			goto unlock;
 		}
 		us122l->slave = file;
 	}
-	if (!us122l->sk.s ||
-	    memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
-	    us122l->sk.s->state == usb_stream_xrun) {
+	if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
+	    s->state == usb_stream_xrun) {
 		us122l_stop(us122l);
 		if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
 			err = -EIO;
@@ -448,6 +446,7 @@
 	mutex_unlock(&us122l->mutex);
 free:
 	kfree(cfg);
+	wake_up_all(&us122l->sk.sleep);
 	return err;
 }
 
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1b9b13e..2b5387d 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -227,7 +227,7 @@
   CFLAGS_OPTIMIZE = -O6
 endif
 
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
new file mode 100644
index 0000000..15130b5
--- /dev/null
+++ b/tools/perf/arch/s390/Makefile
@@ -0,0 +1,4 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
+endif
diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c
new file mode 100644
index 0000000..e19653e
--- /dev/null
+++ b/tools/perf/arch/s390/util/dwarf-regs.c
@@ -0,0 +1,22 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ *    Copyright IBM Corp. 2010
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <libio.h>
+#include <dwarf-regs.h>
+
+#define NUM_GPRS 16
+
+static const char *gpr_names[NUM_GPRS] = {
+	"%r0", "%r1",  "%r2",  "%r3",  "%r4",  "%r5",  "%r6",  "%r7",
+	"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
+};
+
+const char *get_arch_regstr(unsigned int n)
+{
+	return (n >= NUM_GPRS) ? NULL : gpr_names[n];
+}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7bc0490..7069bd3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -331,6 +331,9 @@
 			else if (err ==  ENODEV && cpu_list) {
 				die("No such device - did you specify"
 					" an out-of-range profile CPU?\n");
+			} else if (err == ENOENT) {
+				die("%s event is not supported. ",
+				     event_name(evsel));
 			} else if (err == EINVAL && sample_id_all_avail) {
 				/*
 				 * Old kernel, no attr->sample_id_type_all field
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7a4ebeb..abd4b84 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -489,7 +489,8 @@
 
 	err = pthread_attr_init(&attr);
 	BUG_ON(err);
-	err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
+	err = pthread_attr_setstacksize(&attr,
+			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
 	BUG_ON(err);
 	err = pthread_mutex_lock(&start_work_mutex);
 	BUG_ON(err);
@@ -1861,7 +1862,7 @@
 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
-	if (rec_argv)
+	if (rec_argv == NULL)
 		return -ENOMEM;
 
 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 02b2d80..c385a63 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -316,6 +316,8 @@
 				      "\t Consider tweaking"
 				      " /proc/sys/kernel/perf_event_paranoid or running as root.",
 				      system_wide ? "system-wide " : "");
+			} else if (errno == ENOENT) {
+				error("%s event is not supported. ", event_name(counter));
 			} else {
 				error("open_counter returned with %d (%s). "
 				      "/bin/dmesg may provide additional information.\n",
@@ -683,8 +685,7 @@
 		nr_counters = ARRAY_SIZE(default_attrs);
 
 		for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
-			pos = perf_evsel__new(default_attrs[c].type,
-					      default_attrs[c].config,
+			pos = perf_evsel__new(&default_attrs[c],
 					      nr_counters);
 			if (pos == NULL)
 				goto out;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1c98434..ed56961 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -234,6 +234,7 @@
 	return err;
 }
 
+#include "util/cpumap.h"
 #include "util/evsel.h"
 #include <sys/types.h>
 
@@ -264,6 +265,7 @@
 	int err = -1, fd;
 	struct thread_map *threads;
 	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
 	unsigned int nr_open_calls = 111, i;
 	int id = trace_event__id("sys_enter_open");
 
@@ -278,7 +280,10 @@
 		return -1;
 	}
 
-	evsel = perf_evsel__new(PERF_TYPE_TRACEPOINT, id, 0);
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.config = id;
+	evsel = perf_evsel__new(&attr, 0);
 	if (evsel == NULL) {
 		pr_debug("perf_evsel__new\n");
 		goto out_thread_map_delete;
@@ -317,6 +322,111 @@
 	return err;
 }
 
+#include <sched.h>
+
+static int test__open_syscall_event_on_all_cpus(void)
+{
+	int err = -1, fd, cpu;
+	struct thread_map *threads;
+	struct cpu_map *cpus;
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+	unsigned int nr_open_calls = 111, i;
+	cpu_set_t *cpu_set;
+	size_t cpu_set_size;
+	int id = trace_event__id("sys_enter_open");
+
+	if (id < 0) {
+		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+		return -1;
+	}
+
+	threads = thread_map__new(-1, getpid());
+	if (threads == NULL) {
+		pr_debug("thread_map__new\n");
+		return -1;
+	}
+
+	cpus = cpu_map__new(NULL);
+	if (threads == NULL) {
+		pr_debug("thread_map__new\n");
+		return -1;
+	}
+
+	cpu_set = CPU_ALLOC(cpus->nr);
+
+	if (cpu_set == NULL)
+		goto out_thread_map_delete;
+
+	cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
+	CPU_ZERO_S(cpu_set_size, cpu_set);
+
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.config = id;
+	evsel = perf_evsel__new(&attr, 0);
+	if (evsel == NULL) {
+		pr_debug("perf_evsel__new\n");
+		goto out_cpu_free;
+	}
+
+	if (perf_evsel__open(evsel, cpus, threads) < 0) {
+		pr_debug("failed to open counter: %s, "
+			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+			 strerror(errno));
+		goto out_evsel_delete;
+	}
+
+	for (cpu = 0; cpu < cpus->nr; ++cpu) {
+		unsigned int ncalls = nr_open_calls + cpu;
+
+		CPU_SET(cpu, cpu_set);
+		sched_setaffinity(0, cpu_set_size, cpu_set);
+		for (i = 0; i < ncalls; ++i) {
+			fd = open("/etc/passwd", O_RDONLY);
+			close(fd);
+		}
+		CPU_CLR(cpu, cpu_set);
+	}
+
+	/*
+	 * Here we need to explicitely preallocate the counts, as if
+	 * we use the auto allocation it will allocate just for 1 cpu,
+	 * as we start by cpu 0.
+	 */
+	if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
+		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
+		goto out_close_fd;
+	}
+
+	for (cpu = 0; cpu < cpus->nr; ++cpu) {
+		unsigned int expected;
+
+		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
+			pr_debug("perf_evsel__open_read_on_cpu\n");
+			goto out_close_fd;
+		}
+
+		expected = nr_open_calls + cpu;
+		if (evsel->counts->cpu[cpu].val != expected) {
+			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
+				 expected, cpu, evsel->counts->cpu[cpu].val);
+			goto out_close_fd;
+		}
+	}
+
+	err = 0;
+out_close_fd:
+	perf_evsel__close_fd(evsel, 1, threads->nr);
+out_evsel_delete:
+	perf_evsel__delete(evsel);
+out_cpu_free:
+	CPU_FREE(cpu_set);
+out_thread_map_delete:
+	thread_map__delete(threads);
+	return err;
+}
+
 static struct test {
 	const char *desc;
 	int (*func)(void);
@@ -330,6 +440,10 @@
 		.func = test__open_syscall_event,
 	},
 	{
+		.desc = "detect open syscall event on all cpus",
+		.func = test__open_syscall_event_on_all_cpus,
+	},
+	{
 		.func = NULL,
 	},
 };
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1e67ab9..6ce4042 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1247,6 +1247,8 @@
 				die("Permission error - are you root?\n"
 					"\t Consider tweaking"
 					" /proc/sys/kernel/perf_event_paranoid.\n");
+			if (err == ENOENT)
+				die("%s event is not supported. ", event_name(evsel));
 			/*
 			 * If it's cycles then fall back to hrtimer
 			 * based cpu-clock-tick sw counter, which
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c95267e..f5cfed6 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -6,14 +6,13 @@
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
 {
 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
 
 	if (evsel != NULL) {
 		evsel->idx	   = idx;
-		evsel->attr.type   = type;
-		evsel->attr.config = config;
+		evsel->attr	   = *attr;
 		INIT_LIST_HEAD(&evsel->node);
 	}
 
@@ -128,59 +127,75 @@
 	return 0;
 }
 
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+			      struct thread_map *threads)
 {
-	int cpu;
+	int cpu, thread;
 
-	if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
+	if (evsel->fd == NULL &&
+	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
 		return -1;
 
 	for (cpu = 0; cpu < cpus->nr; cpu++) {
-		FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
-							cpus->map[cpu], -1, 0);
-		if (FD(evsel, cpu, 0) < 0)
-			goto out_close;
+		for (thread = 0; thread < threads->nr; thread++) {
+			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+								     threads->map[thread],
+								     cpus->map[cpu], -1, 0);
+			if (FD(evsel, cpu, thread) < 0)
+				goto out_close;
+		}
 	}
 
 	return 0;
 
 out_close:
-	while (--cpu >= 0) {
-		close(FD(evsel, cpu, 0));
-		FD(evsel, cpu, 0) = -1;
-	}
+	do {
+		while (--thread >= 0) {
+			close(FD(evsel, cpu, thread));
+			FD(evsel, cpu, thread) = -1;
+		}
+		thread = threads->nr;
+	} while (--cpu >= 0);
 	return -1;
 }
 
+static struct {
+	struct cpu_map map;
+	int cpus[1];
+} empty_cpu_map = {
+	.map.nr	= 1,
+	.cpus	= { -1, },
+};
+
+static struct {
+	struct thread_map map;
+	int threads[1];
+} empty_thread_map = {
+	.map.nr	 = 1,
+	.threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel,
+		     struct cpu_map *cpus, struct thread_map *threads)
+{
+
+	if (cpus == NULL) {
+		/* Work around old compiler warnings about strict aliasing */
+		cpus = &empty_cpu_map.map;
+	}
+
+	if (threads == NULL)
+		threads = &empty_thread_map.map;
+
+	return __perf_evsel__open(evsel, cpus, threads);
+}
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+{
+	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+}
+
 int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
 {
-	int thread;
-
-	if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
-		return -1;
-
-	for (thread = 0; thread < threads->nr; thread++) {
-		FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
-							   threads->map[thread], -1, -1, 0);
-		if (FD(evsel, 0, thread) < 0)
-			goto out_close;
-	}
-
-	return 0;
-
-out_close:
-	while (--thread >= 0) {
-		close(FD(evsel, 0, thread));
-		FD(evsel, 0, thread) = -1;
-	}
-	return -1;
-}
-
-int perf_evsel__open(struct perf_evsel *evsel, 
-		     struct cpu_map *cpus, struct thread_map *threads)
-{
-	if (threads == NULL)
-		return perf_evsel__open_per_cpu(evsel, cpus);
-
-	return perf_evsel__open_per_thread(evsel, threads);
+	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
 }
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a0ccd69..b2d755f 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -37,7 +37,7 @@
 struct cpu_map;
 struct thread_map;
 
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx);
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
 void perf_evsel__delete(struct perf_evsel *evsel);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 649083f..5cb6f4b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -490,6 +490,31 @@
 	return EVT_HANDLED_ALL;
 }
 
+static int store_event_type(const char *orgname)
+{
+	char filename[PATH_MAX], *c;
+	FILE *file;
+	int id, n;
+
+	sprintf(filename, "%s/", debugfs_path);
+	strncat(filename, orgname, strlen(orgname));
+	strcat(filename, "/id");
+
+	c = strchr(filename, ':');
+	if (c)
+		*c = '/';
+
+	file = fopen(filename, "r");
+	if (!file)
+		return 0;
+	n = fscanf(file, "%i", &id);
+	fclose(file);
+	if (n < 1) {
+		pr_err("cannot store event ID\n");
+		return -EINVAL;
+	}
+	return perf_header__push_event(id, orgname);
+}
 
 static enum event_result parse_tracepoint_event(const char **strp,
 				    struct perf_event_attr *attr)
@@ -533,9 +558,13 @@
 		*strp += strlen(sys_name) + evt_length;
 		return parse_multiple_tracepoint_event(sys_name, evt_name,
 						       flags);
-	} else
+	} else {
+		if (store_event_type(evt_name) < 0)
+			return EVT_FAILED;
+
 		return parse_single_tracepoint_event(sys_name, evt_name,
 						     evt_length, attr, strp);
+	}
 }
 
 static enum event_result
@@ -778,41 +807,11 @@
 	return ret;
 }
 
-static int store_event_type(const char *orgname)
-{
-	char filename[PATH_MAX], *c;
-	FILE *file;
-	int id, n;
-
-	sprintf(filename, "%s/", debugfs_path);
-	strncat(filename, orgname, strlen(orgname));
-	strcat(filename, "/id");
-
-	c = strchr(filename, ':');
-	if (c)
-		*c = '/';
-
-	file = fopen(filename, "r");
-	if (!file)
-		return 0;
-	n = fscanf(file, "%i", &id);
-	fclose(file);
-	if (n < 1) {
-		pr_err("cannot store event ID\n");
-		return -EINVAL;
-	}
-	return perf_header__push_event(id, orgname);
-}
-
 int parse_events(const struct option *opt __used, const char *str, int unset __used)
 {
 	struct perf_event_attr attr;
 	enum event_result ret;
 
-	if (strchr(str, ':'))
-		if (store_event_type(str) < 0)
-			return -1;
-
 	for (;;) {
 		memset(&attr, 0, sizeof(attr));
 		ret = parse_event_symbols(&str, &attr);
@@ -824,7 +823,7 @@
 
 		if (ret != EVT_HANDLED_ALL) {
 			struct perf_evsel *evsel;
-			evsel = perf_evsel__new(attr.type, attr.config,
+			evsel = perf_evsel__new(&attr,
 						nr_counters);
 			if (evsel == NULL)
 				return -1;
@@ -1014,8 +1013,15 @@
 
 int perf_evsel_list__create_default(void)
 {
-	struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE,
-						   PERF_COUNT_HW_CPU_CYCLES, 0);
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_HARDWARE;
+	attr.config = PERF_COUNT_HW_CPU_CYCLES;
+
+	evsel = perf_evsel__new(&attr, 0);
+
 	if (evsel == NULL)
 		return -ENOMEM;
 
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6fb4694..313dac2 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1007,7 +1007,7 @@
 	if (size == 0)
 		size = 8;
 
-	if (head + event->header.size >= mmap_size) {
+	if (head + event->header.size > mmap_size) {
 		if (mmaps[map_idx]) {
 			munmap(mmaps[map_idx], mmap_size);
 			mmaps[map_idx] = NULL;
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
new file mode 100644
index 0000000..fd8e1f1
--- /dev/null
+++ b/tools/power/x86/turbostat/Makefile
@@ -0,0 +1,8 @@
+turbostat : turbostat.c
+
+clean :
+	rm -f turbostat
+
+install :
+	install turbostat /usr/bin/turbostat
+	install turbostat.8 /usr/share/man/man8
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
new file mode 100644
index 0000000..ff75125
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -0,0 +1,172 @@
+.TH TURBOSTAT 8
+.SH NAME
+turbostat \- Report processor frequency and idle statistics
+.SH SYNOPSIS
+.ft B
+.B turbostat
+.RB [ "\-v" ]
+.RB [ "\-M MSR#" ]
+.RB command
+.br
+.B turbostat
+.RB [ "\-v" ]
+.RB [ "\-M MSR#" ]
+.RB [ "\-i interval_sec" ]
+.SH DESCRIPTION
+\fBturbostat \fP reports processor topology, frequency
+and idle power state statistics on modern X86 processors.
+Either \fBcommand\fP is forked and statistics are printed
+upon its completion, or statistics are printed periodically.
+
+\fBturbostat \fP
+requires that the processor
+supports an "invariant" TSC, plus the APERF and MPERF MSRs.
+\fBturbostat \fP will report idle cpu power state residency
+on processors that additionally support C-state residency counters.
+
+.SS Options
+The \fB-v\fP option increases verbosity.
+.PP
+The \fB-M MSR#\fP option dumps the specified MSR,
+in addition to the usual frequency and idle statistics.
+.PP
+The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
+The default is 5 seconds.
+.PP
+The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
+displays the statistics gathered since it was forked.
+.PP
+.SH FIELD DESCRIPTIONS
+.nf
+\fBpkg\fP processor package number.
+\fBcore\fP processor core number.
+\fBCPU\fP Linux CPU (logical processor) number.
+\fB%c0\fP percent of the interval that the CPU retired instructions.
+\fBGHz\fP average clock rate while the CPU was in c0 state.
+\fBTSC\fP average GHz that the TSC ran during the entire interval.
+\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
+\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
+.fi
+.PP
+.SH EXAMPLE
+Without any parameters, turbostat prints out counters ever 5 seconds.
+(override interval with "-i sec" option, or specify a command
+for turbostat to fork).
+
+The first row of statistics reflect the average for the entire system.
+Subsequent rows show per-CPU statistics.
+
+.nf
+[root@x980]# ./turbostat
+core CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+          0.04 1.62 3.38   0.11   0.00  99.85   0.00  95.07
+  0   0   0.04 1.62 3.38   0.06   0.00  99.90   0.00  95.07
+  0   6   0.02 1.62 3.38   0.08   0.00  99.90   0.00  95.07
+  1   2   0.10 1.62 3.38   0.29   0.00  99.61   0.00  95.07
+  1   8   0.11 1.62 3.38   0.28   0.00  99.61   0.00  95.07
+  2   4   0.01 1.62 3.38   0.01   0.00  99.98   0.00  95.07
+  2  10   0.01 1.61 3.38   0.02   0.00  99.98   0.00  95.07
+  8   1   0.07 1.62 3.38   0.15   0.00  99.78   0.00  95.07
+  8   7   0.03 1.62 3.38   0.19   0.00  99.78   0.00  95.07
+  9   3   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
+  9   9   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
+ 10   5   0.01 1.62 3.38   0.13   0.00  99.86   0.00  95.07
+ 10  11   0.08 1.62 3.38   0.05   0.00  99.86   0.00  95.07
+.fi
+.SH VERBOSE EXAMPLE
+The "-v" option adds verbosity to the output:
+
+.nf
+GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2)
+12 * 133 = 1600 MHz max efficiency
+25 * 133 = 3333 MHz TSC frequency
+26 * 133 = 3467 MHz max turbo 4 active cores
+26 * 133 = 3467 MHz max turbo 3 active cores
+27 * 133 = 3600 MHz max turbo 2 active cores
+27 * 133 = 3600 MHz max turbo 1 active cores
+
+.fi
+The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
+available at the minimum package voltage.  The \fBTSC frequency\fP is the nominal
+maximum frequency of the processor if turbo-mode were not available.  This frequency
+should be sustainable on all CPUs indefinitely, given nominal power and cooling.
+The remaining rows show what maximum turbo frequency is possible
+depending on the number of idle cores.  Note that this information is
+not available on all processors.
+.SH FORK EXAMPLE
+If turbostat is invoked with a command, it will fork that command
+and output the statistics gathered when the command exits.
+eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
+until ^C while the other CPUs are mostly idle:
+
+.nf
+[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
+
+^Ccore CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+           8.49 3.63 3.38  16.23   0.66  74.63   0.00   0.00
+   0   0   1.22 3.62 3.38  32.18   0.00  66.60   0.00   0.00
+   0   6   0.40 3.61 3.38  33.00   0.00  66.60   0.00   0.00
+   1   2   0.11 3.14 3.38   0.19   3.95  95.75   0.00   0.00
+   1   8   0.05 2.88 3.38   0.25   3.95  95.75   0.00   0.00
+   2   4   0.00 3.13 3.38   0.02   0.00  99.98   0.00   0.00
+   2  10   0.00 3.09 3.38   0.02   0.00  99.98   0.00   0.00
+   8   1   0.04 3.50 3.38  14.43   0.00  85.54   0.00   0.00
+   8   7   0.03 2.98 3.38  14.43   0.00  85.54   0.00   0.00
+   9   3   0.00 3.16 3.38 100.00   0.00   0.00   0.00   0.00
+   9   9  99.93 3.63 3.38   0.06   0.00   0.00   0.00   0.00
+  10   5   0.01 2.82 3.38   0.08   0.00  99.91   0.00   0.00
+  10  11   0.02 3.36 3.38   0.06   0.00  99.91   0.00   0.00
+6.950866 sec
+
+.fi
+Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
+while the other processors are generally in various states of idle.
+
+Note that cpu3 is an HT sibling sharing core9
+with cpu9, and thus it is unable to get to an idle state
+deeper than c1 while cpu9 is busy.
+
+Note that turbostat reports average GHz of 3.61, while
+the arithmetic average of the GHz column above is 3.24.
+This is a weighted average, where the weight is %c0.  ie. it is the total number of
+un-halted cycles elapsed per time divided by the number of CPUs.
+.SH NOTES
+
+.B "turbostat "
+must be run as root.
+
+.B "turbostat "
+reads hardware counters, but doesn't write them.
+So it will not interfere with the OS or other programs, including
+multiple invocations of itself.
+
+\fBturbostat \fP
+may work poorly on Linux-2.6.20 through 2.6.29,
+as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
+in those kernels.
+
+The APERF, MPERF MSRs are defined to count non-halted cycles.
+Although it is not guaranteed by the architecture, turbostat assumes
+that they count at TSC rate, which is true on all processors tested to date.
+
+.SH REFERENCES
+"Intel® Turbo Boost Technology
+in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
+http://download.intel.com/design/processor/applnots/320354.pdf
+
+"Intel® 64 and IA-32 Architectures Software Developer's Manual
+Volume 3B: System Programming Guide"
+http://www.intel.com/products/processor/manuals/
+
+.SH FILES
+.ta
+.nf
+/dev/cpu/*/msr
+.fi
+
+.SH "SEE ALSO"
+msr(4), vmstat(8)
+.PP
+.SH AUTHORS
+.nf
+Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
new file mode 100644
index 0000000..4c6983d
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -0,0 +1,1048 @@
+/*
+ * turbostat -- show CPU frequency and C-state residency
+ * on modern Intel turbo-capable processors.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <string.h>
+#include <ctype.h>
+
+#define MSR_TSC	0x10
+#define MSR_NEHALEM_PLATFORM_INFO	0xCE
+#define MSR_NEHALEM_TURBO_RATIO_LIMIT	0x1AD
+#define MSR_APERF	0xE8
+#define MSR_MPERF	0xE7
+#define MSR_PKG_C2_RESIDENCY	0x60D	/* SNB only */
+#define MSR_PKG_C3_RESIDENCY	0x3F8
+#define MSR_PKG_C6_RESIDENCY	0x3F9
+#define MSR_PKG_C7_RESIDENCY	0x3FA	/* SNB only */
+#define MSR_CORE_C3_RESIDENCY	0x3FC
+#define MSR_CORE_C6_RESIDENCY	0x3FD
+#define MSR_CORE_C7_RESIDENCY	0x3FE	/* SNB only */
+
+char *proc_stat = "/proc/stat";
+unsigned int interval_sec = 5;	/* set with -i interval_sec */
+unsigned int verbose;		/* set with -v */
+unsigned int skip_c0;
+unsigned int skip_c1;
+unsigned int do_nhm_cstates;
+unsigned int do_snb_cstates;
+unsigned int has_aperf;
+unsigned int units = 1000000000;	/* Ghz etc */
+unsigned int genuine_intel;
+unsigned int has_invariant_tsc;
+unsigned int do_nehalem_platform_info;
+unsigned int do_nehalem_turbo_ratio_limit;
+unsigned int extra_msr_offset;
+double bclk;
+unsigned int show_pkg;
+unsigned int show_core;
+unsigned int show_cpu;
+
+int aperf_mperf_unstable;
+int backwards_count;
+char *progname;
+int need_reinitialize;
+
+int num_cpus;
+
+typedef struct per_cpu_counters {
+	unsigned long long tsc;		/* per thread */
+	unsigned long long aperf;	/* per thread */
+	unsigned long long mperf;	/* per thread */
+	unsigned long long c1;	/* per thread (calculated) */
+	unsigned long long c3;	/* per core */
+	unsigned long long c6;	/* per core */
+	unsigned long long c7;	/* per core */
+	unsigned long long pc2;	/* per package */
+	unsigned long long pc3;	/* per package */
+	unsigned long long pc6;	/* per package */
+	unsigned long long pc7;	/* per package */
+	unsigned long long extra_msr;	/* per thread */
+	int pkg;
+	int core;
+	int cpu;
+	struct per_cpu_counters *next;
+} PCC;
+
+PCC *pcc_even;
+PCC *pcc_odd;
+PCC *pcc_delta;
+PCC *pcc_average;
+struct timeval tv_even;
+struct timeval tv_odd;
+struct timeval tv_delta;
+
+unsigned long long get_msr(int cpu, off_t offset)
+{
+	ssize_t retval;
+	unsigned long long msr;
+	char pathname[32];
+	int fd;
+
+	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+	fd = open(pathname, O_RDONLY);
+	if (fd < 0) {
+		perror(pathname);
+		need_reinitialize = 1;
+		return 0;
+	}
+
+	retval = pread(fd, &msr, sizeof msr, offset);
+	if (retval != sizeof msr) {
+		fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
+			cpu, offset, retval);
+		exit(-2);
+	}
+
+	close(fd);
+	return msr;
+}
+
+void print_header()
+{
+	if (show_pkg)
+		fprintf(stderr, "pkg ");
+	if (show_core)
+		fprintf(stderr, "core");
+	if (show_cpu)
+		fprintf(stderr, " CPU");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c0 ");
+	if (has_aperf)
+		fprintf(stderr, "  GHz");
+	fprintf(stderr, "  TSC");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c1 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c3 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c6 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "   %%c7 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "  %%pc2 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "  %%pc3 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "  %%pc6 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "  %%pc7 ");
+	if (extra_msr_offset)
+		fprintf(stderr, "       MSR 0x%x ", extra_msr_offset);
+
+	putc('\n', stderr);
+}
+
+void dump_pcc(PCC *pcc)
+{
+	fprintf(stderr, "package: %d ", pcc->pkg);
+	fprintf(stderr, "core:: %d ", pcc->core);
+	fprintf(stderr, "CPU: %d ", pcc->cpu);
+	fprintf(stderr, "TSC: %016llX\n", pcc->tsc);
+	fprintf(stderr, "c3: %016llX\n", pcc->c3);
+	fprintf(stderr, "c6: %016llX\n", pcc->c6);
+	fprintf(stderr, "c7: %016llX\n", pcc->c7);
+	fprintf(stderr, "aperf: %016llX\n", pcc->aperf);
+	fprintf(stderr, "pc2: %016llX\n", pcc->pc2);
+	fprintf(stderr, "pc3: %016llX\n", pcc->pc3);
+	fprintf(stderr, "pc6: %016llX\n", pcc->pc6);
+	fprintf(stderr, "pc7: %016llX\n", pcc->pc7);
+	fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr);
+}
+
+void dump_list(PCC *pcc)
+{
+	printf("dump_list 0x%p\n", pcc);
+
+	for (; pcc; pcc = pcc->next)
+		dump_pcc(pcc);
+}
+
+void print_pcc(PCC *p)
+{
+	double interval_float;
+
+	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
+
+	/* topology columns, print blanks on 1st (average) line */
+	if (p == pcc_average) {
+		if (show_pkg)
+			fprintf(stderr, "    ");
+		if (show_core)
+			fprintf(stderr, "    ");
+		if (show_cpu)
+			fprintf(stderr, "    ");
+	} else {
+		if (show_pkg)
+			fprintf(stderr, "%4d", p->pkg);
+		if (show_core)
+			fprintf(stderr, "%4d", p->core);
+		if (show_cpu)
+			fprintf(stderr, "%4d", p->cpu);
+	}
+
+	/* %c0 */
+	if (do_nhm_cstates) {
+		if (!skip_c0)
+			fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc);
+		else
+			fprintf(stderr, "   ****");
+	}
+
+	/* GHz */
+	if (has_aperf) {
+		if (!aperf_mperf_unstable) {
+			fprintf(stderr, "%5.2f",
+				1.0 * p->tsc / units * p->aperf /
+				p->mperf / interval_float);
+		} else {
+			if (p->aperf > p->tsc || p->mperf > p->tsc) {
+				fprintf(stderr, " ****");
+			} else {
+				fprintf(stderr, "%4.1f*",
+					1.0 * p->tsc /
+					units * p->aperf /
+					p->mperf / interval_float);
+			}
+		}
+	}
+
+	/* TSC */
+	fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
+
+	if (do_nhm_cstates) {
+		if (!skip_c1)
+			fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
+		else
+			fprintf(stderr, "   ****");
+	}
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c3/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c6/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c7/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc2/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc3/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc6/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc7/p->tsc);
+	if (extra_msr_offset)
+		fprintf(stderr, "  0x%016llx", p->extra_msr);
+	putc('\n', stderr);
+}
+
+void print_counters(PCC *cnt)
+{
+	PCC *pcc;
+
+	print_header();
+
+	if (num_cpus > 1)
+		print_pcc(pcc_average);
+
+	for (pcc = cnt; pcc != NULL; pcc = pcc->next)
+		print_pcc(pcc);
+
+}
+
+#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
+
+
+int compute_delta(PCC *after, PCC *before, PCC *delta)
+{
+	int errors = 0;
+	int perf_err = 0;
+
+	skip_c0 = skip_c1 = 0;
+
+	for ( ; after && before && delta;
+		after = after->next, before = before->next, delta = delta->next) {
+		if (before->cpu != after->cpu) {
+			printf("cpu configuration changed: %d != %d\n",
+				before->cpu, after->cpu);
+			return -1;
+		}
+
+		if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
+			fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
+				before->cpu, before->tsc, after->tsc);
+			errors++;
+		}
+		/* check for TSC < 1 Mcycles over interval */
+		if (delta->tsc < (1000 * 1000)) {
+			fprintf(stderr, "Insanely slow TSC rate,"
+				" TSC stops in idle?\n");
+			fprintf(stderr, "You can disable all c-states"
+				" by booting with \"idle=poll\"\n");
+			fprintf(stderr, "or just the deep ones with"
+				" \"processor.max_cstate=1\"\n");
+			exit(-3);
+		}
+		if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
+			fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
+				before->cpu, before->c3, after->c3);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
+			fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
+				before->cpu, before->c6, after->c6);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
+			fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
+				before->cpu, before->c7, after->c7);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
+			fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc2, after->pc2);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
+			fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc3, after->pc3);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
+			fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc6, after->pc6);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
+			fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc7, after->pc7);
+			errors++;
+		}
+
+		perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
+		if (perf_err) {
+			fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
+				before->cpu, before->aperf, after->aperf);
+		}
+		perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
+		if (perf_err) {
+			fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
+				before->cpu, before->mperf, after->mperf);
+		}
+		if (perf_err) {
+			if (!aperf_mperf_unstable) {
+				fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
+				fprintf(stderr, "* Frequency results do not cover entire interval *\n");
+				fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
+
+				aperf_mperf_unstable = 1;
+			}
+			/*
+			 * mperf delta is likely a huge "positive" number
+			 * can not use it for calculating c0 time
+			 */
+			skip_c0 = 1;
+			skip_c1 = 1;
+		}
+
+		/*
+		 * As mperf and tsc collection are not atomic,
+		 * it is possible for mperf's non-halted cycles
+		 * to exceed TSC's all cycles: show c1 = 0% in that case.
+		 */
+		if (delta->mperf > delta->tsc)
+			delta->c1 = 0;
+		else /* normal case, derive c1 */
+			delta->c1 = delta->tsc - delta->mperf
+				- delta->c3 - delta->c6 - delta->c7;
+
+		if (delta->mperf == 0)
+			delta->mperf = 1;	/* divide by 0 protection */
+
+		/*
+		 * for "extra msr", just copy the latest w/o subtracting
+		 */
+		delta->extra_msr = after->extra_msr;
+		if (errors) {
+			fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
+			dump_pcc(before);
+			fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
+			dump_pcc(after);
+			errors = 0;
+		}
+	}
+	return 0;
+}
+
+void compute_average(PCC *delta, PCC *avg)
+{
+	PCC *sum;
+
+	sum = calloc(1, sizeof(PCC));
+	if (sum == NULL) {
+		perror("calloc sum");
+		exit(1);
+	}
+
+	for (; delta; delta = delta->next) {
+		sum->tsc += delta->tsc;
+		sum->c1 += delta->c1;
+		sum->c3 += delta->c3;
+		sum->c6 += delta->c6;
+		sum->c7 += delta->c7;
+		sum->aperf += delta->aperf;
+		sum->mperf += delta->mperf;
+		sum->pc2 += delta->pc2;
+		sum->pc3 += delta->pc3;
+		sum->pc6 += delta->pc6;
+		sum->pc7 += delta->pc7;
+	}
+	avg->tsc = sum->tsc/num_cpus;
+	avg->c1 = sum->c1/num_cpus;
+	avg->c3 = sum->c3/num_cpus;
+	avg->c6 = sum->c6/num_cpus;
+	avg->c7 = sum->c7/num_cpus;
+	avg->aperf = sum->aperf/num_cpus;
+	avg->mperf = sum->mperf/num_cpus;
+	avg->pc2 = sum->pc2/num_cpus;
+	avg->pc3 = sum->pc3/num_cpus;
+	avg->pc6 = sum->pc6/num_cpus;
+	avg->pc7 = sum->pc7/num_cpus;
+
+	free(sum);
+}
+
+void get_counters(PCC *pcc)
+{
+	for ( ; pcc; pcc = pcc->next) {
+		pcc->tsc = get_msr(pcc->cpu, MSR_TSC);
+		if (do_nhm_cstates)
+			pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY);
+		if (do_nhm_cstates)
+			pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY);
+		if (do_snb_cstates)
+			pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY);
+		if (has_aperf)
+			pcc->aperf = get_msr(pcc->cpu, MSR_APERF);
+		if (has_aperf)
+			pcc->mperf = get_msr(pcc->cpu, MSR_MPERF);
+		if (do_snb_cstates)
+			pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY);
+		if (do_nhm_cstates)
+			pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY);
+		if (do_nhm_cstates)
+			pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY);
+		if (do_snb_cstates)
+			pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY);
+		if (extra_msr_offset)
+			pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset);
+	}
+}
+
+
+void print_nehalem_info()
+{
+	unsigned long long msr;
+	unsigned int ratio;
+
+	if (!do_nehalem_platform_info)
+		return;
+
+	msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO);
+
+	ratio = (msr >> 40) & 0xFF;
+	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
+		ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 8) & 0xFF;
+	fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
+		ratio, bclk, ratio * bclk);
+
+	if (verbose > 1)
+		fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
+
+	if (!do_nehalem_turbo_ratio_limit)
+		return;
+
+	msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT);
+
+	ratio = (msr >> 24) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 16) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 8) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 0) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+}
+
+void free_counter_list(PCC *list)
+{
+	PCC *p;
+
+	for (p = list; p; ) {
+		PCC *free_me;
+
+		free_me = p;
+		p = p->next;
+		free(free_me);
+	}
+	return;
+}
+
+void free_all_counters(void)
+{
+	free_counter_list(pcc_even);
+	pcc_even = NULL;
+
+	free_counter_list(pcc_odd);
+	pcc_odd = NULL;
+
+	free_counter_list(pcc_delta);
+	pcc_delta = NULL;
+
+	free_counter_list(pcc_average);
+	pcc_average = NULL;
+}
+
+void insert_cpu_counters(PCC **list, PCC *new)
+{
+	PCC *prev;
+
+	/*
+	 * list was empty
+	 */
+	if (*list == NULL) {
+		new->next = *list;
+		*list = new;
+		return;
+	}
+
+	show_cpu = 1;	/* there is more than one CPU */
+
+	/*
+	 * insert on front of list.
+	 * It is sorted by ascending package#, core#, cpu#
+	 */
+	if (((*list)->pkg > new->pkg) ||
+	    (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
+	    (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
+		new->next = *list;
+		*list = new;
+		return;
+	}
+
+	prev = *list;
+
+	while (prev->next && (prev->next->pkg < new->pkg)) {
+		prev = prev->next;
+		show_pkg = 1;	/* there is more than 1 package */
+	}
+
+	while (prev->next && (prev->next->pkg == new->pkg)
+		&& (prev->next->core < new->core)) {
+		prev = prev->next;
+		show_core = 1;	/* there is more than 1 core */
+	}
+
+	while (prev->next && (prev->next->pkg == new->pkg)
+		&& (prev->next->core == new->core)
+		&& (prev->next->cpu < new->cpu)) {
+		prev = prev->next;
+	}
+
+	/*
+	 * insert after "prev"
+	 */
+	new->next = prev->next;
+	prev->next = new;
+
+	return;
+}
+
+void alloc_new_cpu_counters(int pkg, int core, int cpu)
+{
+	PCC *new;
+
+	if (verbose > 1)
+		printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_cpu_counters(&pcc_odd, new);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_cpu_counters(&pcc_even, new);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_cpu_counters(&pcc_delta, new);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	pcc_average = new;
+}
+
+int get_physical_package_id(int cpu)
+{
+	char path[64];
+	FILE *filep;
+	int pkg;
+
+	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
+	filep = fopen(path, "r");
+	if (filep == NULL) {
+		perror(path);
+		exit(1);
+	}
+	fscanf(filep, "%d", &pkg);
+	fclose(filep);
+	return pkg;
+}
+
+int get_core_id(int cpu)
+{
+	char path[64];
+	FILE *filep;
+	int core;
+
+	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
+	filep = fopen(path, "r");
+	if (filep == NULL) {
+		perror(path);
+		exit(1);
+	}
+	fscanf(filep, "%d", &core);
+	fclose(filep);
+	return core;
+}
+
+/*
+ * run func(index, cpu) on every cpu in /proc/stat
+ */
+
+int for_all_cpus(void (func)(int, int, int))
+{
+	FILE *fp;
+	int cpu_count;
+	int retval;
+
+	fp = fopen(proc_stat, "r");
+	if (fp == NULL) {
+		perror(proc_stat);
+		exit(1);
+	}
+
+	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
+	if (retval != 0) {
+		perror("/proc/stat format");
+		exit(1);
+	}
+
+	for (cpu_count = 0; ; cpu_count++) {
+		int cpu;
+
+		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
+		if (retval != 1)
+			break;
+
+		func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
+	}
+	fclose(fp);
+	return cpu_count;
+}
+
+void re_initialize(void)
+{
+	printf("turbostat: topology changed, re-initializing.\n");
+	free_all_counters();
+	num_cpus = for_all_cpus(alloc_new_cpu_counters);
+	need_reinitialize = 0;
+	printf("num_cpus is now %d\n", num_cpus);
+}
+
+void dummy(int pkg, int core, int cpu) { return; }
+/*
+ * check to see if a cpu came on-line
+ */
+void verify_num_cpus()
+{
+	int new_num_cpus;
+
+	new_num_cpus = for_all_cpus(dummy);
+
+	if (new_num_cpus != num_cpus) {
+		if (verbose)
+			printf("num_cpus was %d, is now  %d\n",
+				num_cpus, new_num_cpus);
+		need_reinitialize = 1;
+	}
+
+	return;
+}
+
+void turbostat_loop()
+{
+restart:
+	get_counters(pcc_even);
+	gettimeofday(&tv_even, (struct timezone *)NULL);
+
+	while (1) {
+		verify_num_cpus();
+		if (need_reinitialize) {
+			re_initialize();
+			goto restart;
+		}
+		sleep(interval_sec);
+		get_counters(pcc_odd);
+		gettimeofday(&tv_odd, (struct timezone *)NULL);
+
+		compute_delta(pcc_odd, pcc_even, pcc_delta);
+		timersub(&tv_odd, &tv_even, &tv_delta);
+		compute_average(pcc_delta, pcc_average);
+		print_counters(pcc_delta);
+		if (need_reinitialize) {
+			re_initialize();
+			goto restart;
+		}
+		sleep(interval_sec);
+		get_counters(pcc_even);
+		gettimeofday(&tv_even, (struct timezone *)NULL);
+		compute_delta(pcc_even, pcc_odd, pcc_delta);
+		timersub(&tv_even, &tv_odd, &tv_delta);
+		compute_average(pcc_delta, pcc_average);
+		print_counters(pcc_delta);
+	}
+}
+
+void check_dev_msr()
+{
+	struct stat sb;
+
+	if (stat("/dev/cpu/0/msr", &sb)) {
+		fprintf(stderr, "no /dev/cpu/0/msr\n");
+		fprintf(stderr, "Try \"# modprobe msr\"\n");
+		exit(-5);
+	}
+}
+
+void check_super_user()
+{
+	if (getuid() != 0) {
+		fprintf(stderr, "must be root\n");
+		exit(-6);
+	}
+}
+
+int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	if (family != 6)
+		return 0;
+
+	switch (model) {
+	case 0x1A:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
+	case 0x1E:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
+	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
+	case 0x25:	/* Westmere Client - Clarkdale, Arrandale */
+	case 0x2C:	/* Westmere EP - Gulftown */
+	case 0x2A:	/* SNB */
+	case 0x2D:	/* SNB Xeon */
+		return 1;
+	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
+	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
+	default:
+		return 0;
+	}
+}
+
+int is_snb(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	switch (model) {
+	case 0x2A:
+	case 0x2D:
+		return 1;
+	}
+	return 0;
+}
+
+double discover_bclk(unsigned int family, unsigned int model)
+{
+	if (is_snb(family, model))
+		return 100.00;
+	else
+		return 133.33;
+}
+
+void check_cpuid()
+{
+	unsigned int eax, ebx, ecx, edx, max_level;
+	unsigned int fms, family, model, stepping;
+
+	eax = ebx = ecx = edx = 0;
+
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
+
+	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+		genuine_intel = 1;
+
+	if (verbose)
+		fprintf(stderr, "%.4s%.4s%.4s ",
+			(char *)&ebx, (char *)&edx, (char *)&ecx);
+
+	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	family = (fms >> 8) & 0xf;
+	model = (fms >> 4) & 0xf;
+	stepping = fms & 0xf;
+	if (family == 6 || family == 0xf)
+		model += ((fms >> 16) & 0xf) << 4;
+
+	if (verbose)
+		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
+			max_level, family, model, stepping, family, model, stepping);
+
+	if (!(edx & (1 << 5))) {
+		fprintf(stderr, "CPUID: no MSR\n");
+		exit(1);
+	}
+
+	/*
+	 * check max extended function levels of CPUID.
+	 * This is needed to check for invariant TSC.
+	 * This check is valid for both Intel and AMD.
+	 */
+	ebx = ecx = edx = 0;
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
+
+	if (max_level < 0x80000007) {
+		fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
+		exit(1);
+	}
+
+	/*
+	 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
+	 * this check is valid for both Intel and AMD
+	 */
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
+	has_invariant_tsc = edx && (1 << 8);
+
+	if (!has_invariant_tsc) {
+		fprintf(stderr, "No invariant TSC\n");
+		exit(1);
+	}
+
+	/*
+	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
+	 * this check is valid for both Intel and AMD
+	 */
+
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
+	has_aperf = ecx && (1 << 0);
+	if (!has_aperf) {
+		fprintf(stderr, "No APERF MSR\n");
+		exit(1);
+	}
+
+	do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
+	do_nhm_cstates = genuine_intel;	/* all Intel w/ non-stop TSC have NHM counters */
+	do_snb_cstates = is_snb(family, model);
+	bclk = discover_bclk(family, model);
+
+	do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
+}
+
+
+void usage()
+{
+	fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
+		progname);
+	exit(1);
+}
+
+
+/*
+ * in /dev/cpu/ return success for names that are numbers
+ * ie. filter out ".", "..", "microcode".
+ */
+int dir_filter(const struct dirent *dirp)
+{
+	if (isdigit(dirp->d_name[0]))
+		return 1;
+	else
+		return 0;
+}
+
+int open_dev_cpu_msr(int dummy1)
+{
+	return 0;
+}
+
+void turbostat_init()
+{
+	check_cpuid();
+
+	check_dev_msr();
+	check_super_user();
+
+	num_cpus = for_all_cpus(alloc_new_cpu_counters);
+
+	if (verbose)
+		print_nehalem_info();
+}
+
+int fork_it(char **argv)
+{
+	int retval;
+	pid_t child_pid;
+	get_counters(pcc_even);
+	gettimeofday(&tv_even, (struct timezone *)NULL);
+
+	child_pid = fork();
+	if (!child_pid) {
+		/* child */
+		execvp(argv[0], argv);
+	} else {
+		int status;
+
+		/* parent */
+		if (child_pid == -1) {
+			perror("fork");
+			exit(1);
+		}
+
+		signal(SIGINT, SIG_IGN);
+		signal(SIGQUIT, SIG_IGN);
+		if (waitpid(child_pid, &status, 0) == -1) {
+			perror("wait");
+			exit(1);
+		}
+	}
+	get_counters(pcc_odd);
+	gettimeofday(&tv_odd, (struct timezone *)NULL);
+	retval = compute_delta(pcc_odd, pcc_even, pcc_delta);
+
+	timersub(&tv_odd, &tv_even, &tv_delta);
+	compute_average(pcc_delta, pcc_average);
+	if (!retval)
+		print_counters(pcc_delta);
+
+	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);;
+
+	return 0;
+}
+
+void cmdline(int argc, char **argv)
+{
+	int opt;
+
+	progname = argv[0];
+
+	while ((opt = getopt(argc, argv, "+vi:M:")) != -1) {
+		switch (opt) {
+		case 'v':
+			verbose++;
+			break;
+		case 'i':
+			interval_sec = atoi(optarg);
+			break;
+		case 'M':
+			sscanf(optarg, "%x", &extra_msr_offset);
+			if (verbose > 1)
+				fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
+			break;
+		default:
+			usage();
+		}
+	}
+}
+
+int main(int argc, char **argv)
+{
+	cmdline(argc, argv);
+
+	if (verbose > 1)
+		fprintf(stderr, "turbostat Dec 6, 2010"
+			" - Len Brown <lenb@kernel.org>\n");
+	if (verbose > 1)
+		fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
+
+	turbostat_init();
+
+	/*
+	 * if any params left, it must be a command to fork
+	 */
+	if (argc - optind)
+		return fork_it(argv + optind);
+	else
+		turbostat_loop();
+
+	return 0;
+}
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
new file mode 100644
index 0000000..f458237
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -0,0 +1,8 @@
+x86_energy_perf_policy : x86_energy_perf_policy.c
+
+clean :
+	rm -f x86_energy_perf_policy
+
+install :
+	install x86_energy_perf_policy /usr/bin/
+	install x86_energy_perf_policy.8 /usr/share/man/man8/
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
new file mode 100644
index 0000000..8eaaad6
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -0,0 +1,104 @@
+.\"  This page Copyright (C) 2010 Len Brown <len.brown@intel.com>
+.\"  Distributed under the GPL, Copyleft 1994.
+.TH X86_ENERGY_PERF_POLICY 8
+.SH NAME
+x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS
+.SH SYNOPSIS
+.ft B
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB "\-r"
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'performance'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'normal'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'powersave'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB n
+.br
+.SH DESCRIPTION
+\fBx86_energy_perf_policy\fP
+allows software to convey
+its policy for the relative importance of performance
+versus energy savings to the processor.
+
+The processor uses this information in model-specific ways
+when it must select trade-offs between performance and
+energy efficiency.
+
+This policy hint does not supersede Processor Performance states
+(P-states) or CPU Idle power states (C-states), but allows
+software to have influence where it would otherwise be unable
+to express a preference.
+
+For example, this setting may tell the hardware how
+aggressively or conservatively to control frequency
+in the "turbo range" above the explicitly OS-controlled
+P-state frequency range.  It may also tell the hardware
+how aggressively is should enter the OS requested C-states.
+
+Support for this feature is indicated by CPUID.06H.ECX.bit3
+per the Intel Architectures Software Developer's Manual.
+
+.SS Options
+\fB-c\fP limits operation to a single CPU.
+The default is to operate on all CPUs.
+Note that MSR_IA32_ENERGY_PERF_BIAS is defined per
+logical processor, but that the initial implementations
+of the MSR were shared among all processors in each package.
+.PP
+\fB-v\fP increases verbosity.  By default
+x86_energy_perf_policy is silent.
+.PP
+\fB-r\fP is for "read-only" mode - the unchanged state
+is read and displayed.
+.PP
+.I performance
+Set a policy where performance is paramount.
+The processor will be unwilling to sacrifice any performance
+for the sake of energy saving. This is the hardware default.
+.PP
+.I normal
+Set a policy with a normal balance between performance and energy efficiency.
+The processor will tolerate minor performance compromise
+for potentially significant energy savings.
+This reasonable default for most desktops and servers.
+.PP
+.I powersave
+Set a policy where the processor can accept
+a measurable performance hit to maximize energy efficiency.
+.PP
+.I n
+Set MSR_IA32_ENERGY_PERF_BIAS to the specified number.
+The range of valid numbers is 0-15, where 0 is maximum
+performance and 15 is maximum energy efficiency.
+
+.SH NOTES
+.B "x86_energy_perf_policy "
+runs only as root.
+.SH FILES
+.ta
+.nf
+/dev/cpu/*/msr
+.fi
+
+.SH "SEE ALSO"
+msr(4)
+.PP
+.SH AUTHORS
+.nf
+Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
new file mode 100644
index 0000000..d9678a3
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -0,0 +1,325 @@
+/*
+ * x86_energy_perf_policy -- set the energy versus performance
+ * policy preference bias on recent X86 processors.
+ */
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+
+unsigned int verbose;		/* set with -v */
+unsigned int read_only;		/* set with -r */
+char *progname;
+unsigned long long new_bias;
+int cpu = -1;
+
+/*
+ * Usage:
+ *
+ * -c cpu: limit action to a single CPU (default is all CPUs)
+ * -v: verbose output (can invoke more than once)
+ * -r: read-only, don't change any settings
+ *
+ *  performance
+ *	Performance is paramount.
+ *	Unwilling to sacrafice any performance
+ *	for the sake of energy saving. (hardware default)
+ *
+ *  normal
+ *	Can tolerate minor performance compromise
+ *	for potentially significant energy savings.
+ *	(reasonable default for most desktops and servers)
+ *
+ *  powersave
+ *	Can tolerate significant performance hit
+ *	to maximize energy savings.
+ *
+ * n
+ *	a numerical value to write to the underlying MSR.
+ */
+void usage(void)
+{
+	printf("%s: [-c cpu] [-v] "
+		"(-r | 'performance' | 'normal' | 'powersave' | n)\n",
+		progname);
+	exit(1);
+}
+
+#define MSR_IA32_ENERGY_PERF_BIAS	0x000001b0
+
+#define	BIAS_PERFORMANCE		0
+#define BIAS_BALANCE			6
+#define	BIAS_POWERSAVE			15
+
+void cmdline(int argc, char **argv)
+{
+	int opt;
+
+	progname = argv[0];
+
+	while ((opt = getopt(argc, argv, "+rvc:")) != -1) {
+		switch (opt) {
+		case 'c':
+			cpu = atoi(optarg);
+			break;
+		case 'r':
+			read_only = 1;
+			break;
+		case 'v':
+			verbose++;
+			break;
+		default:
+			usage();
+		}
+	}
+	/* if -r, then should be no additional optind */
+	if (read_only && (argc > optind))
+		usage();
+
+	/*
+	 * if no -r , then must be one additional optind
+	 */
+	if (!read_only) {
+
+		if (argc != optind + 1) {
+			printf("must supply -r or policy param\n");
+			usage();
+			}
+
+		if (!strcmp("performance", argv[optind])) {
+			new_bias = BIAS_PERFORMANCE;
+		} else if (!strcmp("normal", argv[optind])) {
+			new_bias = BIAS_BALANCE;
+		} else if (!strcmp("powersave", argv[optind])) {
+			new_bias = BIAS_POWERSAVE;
+		} else {
+			char *endptr;
+
+			new_bias = strtoull(argv[optind], &endptr, 0);
+			if (endptr == argv[optind] ||
+				new_bias > BIAS_POWERSAVE) {
+					fprintf(stderr, "invalid value: %s\n",
+						argv[optind]);
+				usage();
+			}
+		}
+	}
+}
+
+/*
+ * validate_cpuid()
+ * returns on success, quietly exits on failure (make verbose with -v)
+ */
+void validate_cpuid(void)
+{
+	unsigned int eax, ebx, ecx, edx, max_level;
+	char brand[16];
+	unsigned int fms, family, model, stepping;
+
+	eax = ebx = ecx = edx = 0;
+
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx),
+		"=d" (edx) : "a" (0));
+
+	if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) {
+		if (verbose)
+			fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel",
+				(char *)&ebx, (char *)&edx, (char *)&ecx);
+		exit(1);
+	}
+
+	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	family = (fms >> 8) & 0xf;
+	model = (fms >> 4) & 0xf;
+	stepping = fms & 0xf;
+	if (family == 6 || family == 0xf)
+		model += ((fms >> 16) & 0xf) << 4;
+
+	if (verbose > 1)
+		printf("CPUID %s %d levels family:model:stepping "
+			"0x%x:%x:%x (%d:%d:%d)\n", brand, max_level,
+			family, model, stepping, family, model, stepping);
+
+	if (!(edx & (1 << 5))) {
+		if (verbose)
+			printf("CPUID: no MSR\n");
+		exit(1);
+	}
+
+	/*
+	 * Support for MSR_IA32_ENERGY_PERF_BIAS
+	 * is indicated by CPUID.06H.ECX.bit3
+	 */
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6));
+	if (verbose)
+		printf("CPUID.06H.ECX: 0x%x\n", ecx);
+	if (!(ecx & (1 << 3))) {
+		if (verbose)
+			printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n");
+		exit(1);
+	}
+	return;	/* success */
+}
+
+unsigned long long get_msr(int cpu, int offset)
+{
+	unsigned long long msr;
+	char msr_path[32];
+	int retval;
+	int fd;
+
+	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
+	fd = open(msr_path, O_RDONLY);
+	if (fd < 0) {
+		printf("Try \"# modprobe msr\"\n");
+		perror(msr_path);
+		exit(1);
+	}
+
+	retval = pread(fd, &msr, sizeof msr, offset);
+
+	if (retval != sizeof msr) {
+		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+	close(fd);
+	return msr;
+}
+
+unsigned long long  put_msr(int cpu, unsigned long long new_msr, int offset)
+{
+	unsigned long long old_msr;
+	char msr_path[32];
+	int retval;
+	int fd;
+
+	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
+	fd = open(msr_path, O_RDWR);
+	if (fd < 0) {
+		perror(msr_path);
+		exit(1);
+	}
+
+	retval = pread(fd, &old_msr, sizeof old_msr, offset);
+	if (retval != sizeof old_msr) {
+		perror("pwrite");
+		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+
+	retval = pwrite(fd, &new_msr, sizeof new_msr, offset);
+	if (retval != sizeof new_msr) {
+		perror("pwrite");
+		printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+
+	close(fd);
+
+	return old_msr;
+}
+
+void print_msr(int cpu)
+{
+	printf("cpu%d: 0x%016llx\n",
+		cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS));
+}
+
+void update_msr(int cpu)
+{
+	unsigned long long previous_msr;
+
+	previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS);
+
+	if (verbose)
+		printf("cpu%d  msr0x%x 0x%016llx -> 0x%016llx\n",
+			cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias);
+
+	return;
+}
+
+char *proc_stat = "/proc/stat";
+/*
+ * run func() on every cpu in /dev/cpu
+ */
+void for_every_cpu(void (func)(int))
+{
+	FILE *fp;
+	int retval;
+
+	fp = fopen(proc_stat, "r");
+	if (fp == NULL) {
+		perror(proc_stat);
+		exit(1);
+	}
+
+	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
+	if (retval != 0) {
+		perror("/proc/stat format");
+		exit(1);
+	}
+
+	while (1) {
+		int cpu;
+
+		retval = fscanf(fp,
+			"cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
+			&cpu);
+		if (retval != 1)
+			return;
+
+		func(cpu);
+	}
+	fclose(fp);
+}
+
+int main(int argc, char **argv)
+{
+	cmdline(argc, argv);
+
+	if (verbose > 1)
+		printf("x86_energy_perf_policy Nov 24, 2010"
+				" - Len Brown <lenb@kernel.org>\n");
+	if (verbose > 1 && !read_only)
+		printf("new_bias %lld\n", new_bias);
+
+	validate_cpuid();
+
+	if (cpu != -1) {
+		if (read_only)
+			print_msr(cpu);
+		else
+			update_msr(cpu);
+	} else {
+		if (read_only)
+			for_every_cpu(print_msr);
+		else
+			for_every_cpu(update_msr);
+	}
+
+	return 0;
+}
diff --git a/tools/slub/slabinfo.c b/tools/slub/slabinfo.c
new file mode 100644
index 0000000..516551c
--- /dev/null
+++ b/tools/slub/slabinfo.c
@@ -0,0 +1,1364 @@
+/*
+ * Slabinfo: Tool to get reports about slabs
+ *
+ * (C) 2007 sgi, Christoph Lameter
+ *
+ * Compile by:
+ *
+ * gcc -o slabinfo slabinfo.c
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <strings.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <getopt.h>
+#include <regex.h>
+#include <errno.h>
+
+#define MAX_SLABS 500
+#define MAX_ALIASES 500
+#define MAX_NODES 1024
+
+struct slabinfo {
+	char *name;
+	int alias;
+	int refs;
+	int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
+	int hwcache_align, object_size, objs_per_slab;
+	int sanity_checks, slab_size, store_user, trace;
+	int order, poison, reclaim_account, red_zone;
+	unsigned long partial, objects, slabs, objects_partial, objects_total;
+	unsigned long alloc_fastpath, alloc_slowpath;
+	unsigned long free_fastpath, free_slowpath;
+	unsigned long free_frozen, free_add_partial, free_remove_partial;
+	unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill;
+	unsigned long cpuslab_flush, deactivate_full, deactivate_empty;
+	unsigned long deactivate_to_head, deactivate_to_tail;
+	unsigned long deactivate_remote_frees, order_fallback;
+	int numa[MAX_NODES];
+	int numa_partial[MAX_NODES];
+} slabinfo[MAX_SLABS];
+
+struct aliasinfo {
+	char *name;
+	char *ref;
+	struct slabinfo *slab;
+} aliasinfo[MAX_ALIASES];
+
+int slabs = 0;
+int actual_slabs = 0;
+int aliases = 0;
+int alias_targets = 0;
+int highest_node = 0;
+
+char buffer[4096];
+
+int show_empty = 0;
+int show_report = 0;
+int show_alias = 0;
+int show_slab = 0;
+int skip_zero = 1;
+int show_numa = 0;
+int show_track = 0;
+int show_first_alias = 0;
+int validate = 0;
+int shrink = 0;
+int show_inverted = 0;
+int show_single_ref = 0;
+int show_totals = 0;
+int sort_size = 0;
+int sort_active = 0;
+int set_debug = 0;
+int show_ops = 0;
+int show_activity = 0;
+
+/* Debug options */
+int sanity = 0;
+int redzone = 0;
+int poison = 0;
+int tracking = 0;
+int tracing = 0;
+
+int page_size;
+
+regex_t pattern;
+
+static void fatal(const char *x, ...)
+{
+	va_list ap;
+
+	va_start(ap, x);
+	vfprintf(stderr, x, ap);
+	va_end(ap);
+	exit(EXIT_FAILURE);
+}
+
+static void usage(void)
+{
+	printf("slabinfo 5/7/2007. (c) 2007 sgi.\n\n"
+		"slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
+		"-a|--aliases           Show aliases\n"
+		"-A|--activity          Most active slabs first\n"
+		"-d<options>|--debug=<options> Set/Clear Debug options\n"
+		"-D|--display-active    Switch line format to activity\n"
+		"-e|--empty             Show empty slabs\n"
+		"-f|--first-alias       Show first alias\n"
+		"-h|--help              Show usage information\n"
+		"-i|--inverted          Inverted list\n"
+		"-l|--slabs             Show slabs\n"
+		"-n|--numa              Show NUMA information\n"
+		"-o|--ops		Show kmem_cache_ops\n"
+		"-s|--shrink            Shrink slabs\n"
+		"-r|--report		Detailed report on single slabs\n"
+		"-S|--Size              Sort by size\n"
+		"-t|--tracking          Show alloc/free information\n"
+		"-T|--Totals            Show summary information\n"
+		"-v|--validate          Validate slabs\n"
+		"-z|--zero              Include empty slabs\n"
+		"-1|--1ref              Single reference\n"
+		"\nValid debug options (FZPUT may be combined)\n"
+		"a / A          Switch on all debug options (=FZUP)\n"
+		"-              Switch off all debug options\n"
+		"f / F          Sanity Checks (SLAB_DEBUG_FREE)\n"
+		"z / Z          Redzoning\n"
+		"p / P          Poisoning\n"
+		"u / U          Tracking\n"
+		"t / T          Tracing\n"
+	);
+}
+
+static unsigned long read_obj(const char *name)
+{
+	FILE *f = fopen(name, "r");
+
+	if (!f)
+		buffer[0] = 0;
+	else {
+		if (!fgets(buffer, sizeof(buffer), f))
+			buffer[0] = 0;
+		fclose(f);
+		if (buffer[strlen(buffer)] == '\n')
+			buffer[strlen(buffer)] = 0;
+	}
+	return strlen(buffer);
+}
+
+
+/*
+ * Get the contents of an attribute
+ */
+static unsigned long get_obj(const char *name)
+{
+	if (!read_obj(name))
+		return 0;
+
+	return atol(buffer);
+}
+
+static unsigned long get_obj_and_str(const char *name, char **x)
+{
+	unsigned long result = 0;
+	char *p;
+
+	*x = NULL;
+
+	if (!read_obj(name)) {
+		x = NULL;
+		return 0;
+	}
+	result = strtoul(buffer, &p, 10);
+	while (*p == ' ')
+		p++;
+	if (*p)
+		*x = strdup(p);
+	return result;
+}
+
+static void set_obj(struct slabinfo *s, const char *name, int n)
+{
+	char x[100];
+	FILE *f;
+
+	snprintf(x, 100, "%s/%s", s->name, name);
+	f = fopen(x, "w");
+	if (!f)
+		fatal("Cannot write to %s\n", x);
+
+	fprintf(f, "%d\n", n);
+	fclose(f);
+}
+
+static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
+{
+	char x[100];
+	FILE *f;
+	size_t l;
+
+	snprintf(x, 100, "%s/%s", s->name, name);
+	f = fopen(x, "r");
+	if (!f) {
+		buffer[0] = 0;
+		l = 0;
+	} else {
+		l = fread(buffer, 1, sizeof(buffer), f);
+		buffer[l] = 0;
+		fclose(f);
+	}
+	return l;
+}
+
+
+/*
+ * Put a size string together
+ */
+static int store_size(char *buffer, unsigned long value)
+{
+	unsigned long divisor = 1;
+	char trailer = 0;
+	int n;
+
+	if (value > 1000000000UL) {
+		divisor = 100000000UL;
+		trailer = 'G';
+	} else if (value > 1000000UL) {
+		divisor = 100000UL;
+		trailer = 'M';
+	} else if (value > 1000UL) {
+		divisor = 100;
+		trailer = 'K';
+	}
+
+	value /= divisor;
+	n = sprintf(buffer, "%ld",value);
+	if (trailer) {
+		buffer[n] = trailer;
+		n++;
+		buffer[n] = 0;
+	}
+	if (divisor != 1) {
+		memmove(buffer + n - 2, buffer + n - 3, 4);
+		buffer[n-2] = '.';
+		n++;
+	}
+	return n;
+}
+
+static void decode_numa_list(int *numa, char *t)
+{
+	int node;
+	int nr;
+
+	memset(numa, 0, MAX_NODES * sizeof(int));
+
+	if (!t)
+		return;
+
+	while (*t == 'N') {
+		t++;
+		node = strtoul(t, &t, 10);
+		if (*t == '=') {
+			t++;
+			nr = strtoul(t, &t, 10);
+			numa[node] = nr;
+			if (node > highest_node)
+				highest_node = node;
+		}
+		while (*t == ' ')
+			t++;
+	}
+}
+
+static void slab_validate(struct slabinfo *s)
+{
+	if (strcmp(s->name, "*") == 0)
+		return;
+
+	set_obj(s, "validate", 1);
+}
+
+static void slab_shrink(struct slabinfo *s)
+{
+	if (strcmp(s->name, "*") == 0)
+		return;
+
+	set_obj(s, "shrink", 1);
+}
+
+int line = 0;
+
+static void first_line(void)
+{
+	if (show_activity)
+		printf("Name                   Objects      Alloc       Free   %%Fast Fallb O\n");
+	else
+		printf("Name                   Objects Objsize    Space "
+			"Slabs/Part/Cpu  O/S O %%Fr %%Ef Flg\n");
+}
+
+/*
+ * Find the shortest alias of a slab
+ */
+static struct aliasinfo *find_one_alias(struct slabinfo *find)
+{
+	struct aliasinfo *a;
+	struct aliasinfo *best = NULL;
+
+	for(a = aliasinfo;a < aliasinfo + aliases; a++) {
+		if (a->slab == find &&
+			(!best || strlen(best->name) < strlen(a->name))) {
+				best = a;
+				if (strncmp(a->name,"kmall", 5) == 0)
+					return best;
+			}
+	}
+	return best;
+}
+
+static unsigned long slab_size(struct slabinfo *s)
+{
+	return 	s->slabs * (page_size << s->order);
+}
+
+static unsigned long slab_activity(struct slabinfo *s)
+{
+	return 	s->alloc_fastpath + s->free_fastpath +
+		s->alloc_slowpath + s->free_slowpath;
+}
+
+static void slab_numa(struct slabinfo *s, int mode)
+{
+	int node;
+
+	if (strcmp(s->name, "*") == 0)
+		return;
+
+	if (!highest_node) {
+		printf("\n%s: No NUMA information available.\n", s->name);
+		return;
+	}
+
+	if (skip_zero && !s->slabs)
+		return;
+
+	if (!line) {
+		printf("\n%-21s:", mode ? "NUMA nodes" : "Slab");
+		for(node = 0; node <= highest_node; node++)
+			printf(" %4d", node);
+		printf("\n----------------------");
+		for(node = 0; node <= highest_node; node++)
+			printf("-----");
+		printf("\n");
+	}
+	printf("%-21s ", mode ? "All slabs" : s->name);
+	for(node = 0; node <= highest_node; node++) {
+		char b[20];
+
+		store_size(b, s->numa[node]);
+		printf(" %4s", b);
+	}
+	printf("\n");
+	if (mode) {
+		printf("%-21s ", "Partial slabs");
+		for(node = 0; node <= highest_node; node++) {
+			char b[20];
+
+			store_size(b, s->numa_partial[node]);
+			printf(" %4s", b);
+		}
+		printf("\n");
+	}
+	line++;
+}
+
+static void show_tracking(struct slabinfo *s)
+{
+	printf("\n%s: Kernel object allocation\n", s->name);
+	printf("-----------------------------------------------------------------------\n");
+	if (read_slab_obj(s, "alloc_calls"))
+		printf(buffer);
+	else
+		printf("No Data\n");
+
+	printf("\n%s: Kernel object freeing\n", s->name);
+	printf("------------------------------------------------------------------------\n");
+	if (read_slab_obj(s, "free_calls"))
+		printf(buffer);
+	else
+		printf("No Data\n");
+
+}
+
+static void ops(struct slabinfo *s)
+{
+	if (strcmp(s->name, "*") == 0)
+		return;
+
+	if (read_slab_obj(s, "ops")) {
+		printf("\n%s: kmem_cache operations\n", s->name);
+		printf("--------------------------------------------\n");
+		printf(buffer);
+	} else
+		printf("\n%s has no kmem_cache operations\n", s->name);
+}
+
+static const char *onoff(int x)
+{
+	if (x)
+		return "On ";
+	return "Off";
+}
+
+static void slab_stats(struct slabinfo *s)
+{
+	unsigned long total_alloc;
+	unsigned long total_free;
+	unsigned long total;
+
+	if (!s->alloc_slab)
+		return;
+
+	total_alloc = s->alloc_fastpath + s->alloc_slowpath;
+	total_free = s->free_fastpath + s->free_slowpath;
+
+	if (!total_alloc)
+		return;
+
+	printf("\n");
+	printf("Slab Perf Counter       Alloc     Free %%Al %%Fr\n");
+	printf("--------------------------------------------------\n");
+	printf("Fastpath             %8lu %8lu %3lu %3lu\n",
+		s->alloc_fastpath, s->free_fastpath,
+		s->alloc_fastpath * 100 / total_alloc,
+		s->free_fastpath * 100 / total_free);
+	printf("Slowpath             %8lu %8lu %3lu %3lu\n",
+		total_alloc - s->alloc_fastpath, s->free_slowpath,
+		(total_alloc - s->alloc_fastpath) * 100 / total_alloc,
+		s->free_slowpath * 100 / total_free);
+	printf("Page Alloc           %8lu %8lu %3lu %3lu\n",
+		s->alloc_slab, s->free_slab,
+		s->alloc_slab * 100 / total_alloc,
+		s->free_slab * 100 / total_free);
+	printf("Add partial          %8lu %8lu %3lu %3lu\n",
+		s->deactivate_to_head + s->deactivate_to_tail,
+		s->free_add_partial,
+		(s->deactivate_to_head + s->deactivate_to_tail) * 100 / total_alloc,
+		s->free_add_partial * 100 / total_free);
+	printf("Remove partial       %8lu %8lu %3lu %3lu\n",
+		s->alloc_from_partial, s->free_remove_partial,
+		s->alloc_from_partial * 100 / total_alloc,
+		s->free_remove_partial * 100 / total_free);
+
+	printf("RemoteObj/SlabFrozen %8lu %8lu %3lu %3lu\n",
+		s->deactivate_remote_frees, s->free_frozen,
+		s->deactivate_remote_frees * 100 / total_alloc,
+		s->free_frozen * 100 / total_free);
+
+	printf("Total                %8lu %8lu\n\n", total_alloc, total_free);
+
+	if (s->cpuslab_flush)
+		printf("Flushes %8lu\n", s->cpuslab_flush);
+
+	if (s->alloc_refill)
+		printf("Refill %8lu\n", s->alloc_refill);
+
+	total = s->deactivate_full + s->deactivate_empty +
+			s->deactivate_to_head + s->deactivate_to_tail;
+
+	if (total)
+		printf("Deactivate Full=%lu(%lu%%) Empty=%lu(%lu%%) "
+			"ToHead=%lu(%lu%%) ToTail=%lu(%lu%%)\n",
+			s->deactivate_full, (s->deactivate_full * 100) / total,
+			s->deactivate_empty, (s->deactivate_empty * 100) / total,
+			s->deactivate_to_head, (s->deactivate_to_head * 100) / total,
+			s->deactivate_to_tail, (s->deactivate_to_tail * 100) / total);
+}
+
+static void report(struct slabinfo *s)
+{
+	if (strcmp(s->name, "*") == 0)
+		return;
+
+	printf("\nSlabcache: %-20s  Aliases: %2d Order : %2d Objects: %lu\n",
+		s->name, s->aliases, s->order, s->objects);
+	if (s->hwcache_align)
+		printf("** Hardware cacheline aligned\n");
+	if (s->cache_dma)
+		printf("** Memory is allocated in a special DMA zone\n");
+	if (s->destroy_by_rcu)
+		printf("** Slabs are destroyed via RCU\n");
+	if (s->reclaim_account)
+		printf("** Reclaim accounting active\n");
+
+	printf("\nSizes (bytes)     Slabs              Debug                Memory\n");
+	printf("------------------------------------------------------------------------\n");
+	printf("Object : %7d  Total  : %7ld   Sanity Checks : %s  Total: %7ld\n",
+			s->object_size, s->slabs, onoff(s->sanity_checks),
+			s->slabs * (page_size << s->order));
+	printf("SlabObj: %7d  Full   : %7ld   Redzoning     : %s  Used : %7ld\n",
+			s->slab_size, s->slabs - s->partial - s->cpu_slabs,
+			onoff(s->red_zone), s->objects * s->object_size);
+	printf("SlabSiz: %7d  Partial: %7ld   Poisoning     : %s  Loss : %7ld\n",
+			page_size << s->order, s->partial, onoff(s->poison),
+			s->slabs * (page_size << s->order) - s->objects * s->object_size);
+	printf("Loss   : %7d  CpuSlab: %7d   Tracking      : %s  Lalig: %7ld\n",
+			s->slab_size - s->object_size, s->cpu_slabs, onoff(s->store_user),
+			(s->slab_size - s->object_size) * s->objects);
+	printf("Align  : %7d  Objects: %7d   Tracing       : %s  Lpadd: %7ld\n",
+			s->align, s->objs_per_slab, onoff(s->trace),
+			((page_size << s->order) - s->objs_per_slab * s->slab_size) *
+			s->slabs);
+
+	ops(s);
+	show_tracking(s);
+	slab_numa(s, 1);
+	slab_stats(s);
+}
+
+static void slabcache(struct slabinfo *s)
+{
+	char size_str[20];
+	char dist_str[40];
+	char flags[20];
+	char *p = flags;
+
+	if (strcmp(s->name, "*") == 0)
+		return;
+
+	if (actual_slabs == 1) {
+		report(s);
+		return;
+	}
+
+	if (skip_zero && !show_empty && !s->slabs)
+		return;
+
+	if (show_empty && s->slabs)
+		return;
+
+	store_size(size_str, slab_size(s));
+	snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs,
+						s->partial, s->cpu_slabs);
+
+	if (!line++)
+		first_line();
+
+	if (s->aliases)
+		*p++ = '*';
+	if (s->cache_dma)
+		*p++ = 'd';
+	if (s->hwcache_align)
+		*p++ = 'A';
+	if (s->poison)
+		*p++ = 'P';
+	if (s->reclaim_account)
+		*p++ = 'a';
+	if (s->red_zone)
+		*p++ = 'Z';
+	if (s->sanity_checks)
+		*p++ = 'F';
+	if (s->store_user)
+		*p++ = 'U';
+	if (s->trace)
+		*p++ = 'T';
+
+	*p = 0;
+	if (show_activity) {
+		unsigned long total_alloc;
+		unsigned long total_free;
+
+		total_alloc = s->alloc_fastpath + s->alloc_slowpath;
+		total_free = s->free_fastpath + s->free_slowpath;
+
+		printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d\n",
+			s->name, s->objects,
+			total_alloc, total_free,
+			total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0,
+			total_free ? (s->free_fastpath * 100 / total_free) : 0,
+			s->order_fallback, s->order);
+	}
+	else
+		printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n",
+			s->name, s->objects, s->object_size, size_str, dist_str,
+			s->objs_per_slab, s->order,
+			s->slabs ? (s->partial * 100) / s->slabs : 100,
+			s->slabs ? (s->objects * s->object_size * 100) /
+				(s->slabs * (page_size << s->order)) : 100,
+			flags);
+}
+
+/*
+ * Analyze debug options. Return false if something is amiss.
+ */
+static int debug_opt_scan(char *opt)
+{
+	if (!opt || !opt[0] || strcmp(opt, "-") == 0)
+		return 1;
+
+	if (strcasecmp(opt, "a") == 0) {
+		sanity = 1;
+		poison = 1;
+		redzone = 1;
+		tracking = 1;
+		return 1;
+	}
+
+	for ( ; *opt; opt++)
+		switch (*opt) {
+		case 'F' : case 'f':
+			if (sanity)
+				return 0;
+			sanity = 1;
+			break;
+		case 'P' : case 'p':
+			if (poison)
+				return 0;
+			poison = 1;
+			break;
+
+		case 'Z' : case 'z':
+			if (redzone)
+				return 0;
+			redzone = 1;
+			break;
+
+		case 'U' : case 'u':
+			if (tracking)
+				return 0;
+			tracking = 1;
+			break;
+
+		case 'T' : case 't':
+			if (tracing)
+				return 0;
+			tracing = 1;
+			break;
+		default:
+			return 0;
+		}
+	return 1;
+}
+
+static int slab_empty(struct slabinfo *s)
+{
+	if (s->objects > 0)
+		return 0;
+
+	/*
+	 * We may still have slabs even if there are no objects. Shrinking will
+	 * remove them.
+	 */
+	if (s->slabs != 0)
+		set_obj(s, "shrink", 1);
+
+	return 1;
+}
+
+static void slab_debug(struct slabinfo *s)
+{
+	if (strcmp(s->name, "*") == 0)
+		return;
+
+	if (sanity && !s->sanity_checks) {
+		set_obj(s, "sanity", 1);
+	}
+	if (!sanity && s->sanity_checks) {
+		if (slab_empty(s))
+			set_obj(s, "sanity", 0);
+		else
+			fprintf(stderr, "%s not empty cannot disable sanity checks\n", s->name);
+	}
+	if (redzone && !s->red_zone) {
+		if (slab_empty(s))
+			set_obj(s, "red_zone", 1);
+		else
+			fprintf(stderr, "%s not empty cannot enable redzoning\n", s->name);
+	}
+	if (!redzone && s->red_zone) {
+		if (slab_empty(s))
+			set_obj(s, "red_zone", 0);
+		else
+			fprintf(stderr, "%s not empty cannot disable redzoning\n", s->name);
+	}
+	if (poison && !s->poison) {
+		if (slab_empty(s))
+			set_obj(s, "poison", 1);
+		else
+			fprintf(stderr, "%s not empty cannot enable poisoning\n", s->name);
+	}
+	if (!poison && s->poison) {
+		if (slab_empty(s))
+			set_obj(s, "poison", 0);
+		else
+			fprintf(stderr, "%s not empty cannot disable poisoning\n", s->name);
+	}
+	if (tracking && !s->store_user) {
+		if (slab_empty(s))
+			set_obj(s, "store_user", 1);
+		else
+			fprintf(stderr, "%s not empty cannot enable tracking\n", s->name);
+	}
+	if (!tracking && s->store_user) {
+		if (slab_empty(s))
+			set_obj(s, "store_user", 0);
+		else
+			fprintf(stderr, "%s not empty cannot disable tracking\n", s->name);
+	}
+	if (tracing && !s->trace) {
+		if (slabs == 1)
+			set_obj(s, "trace", 1);
+		else
+			fprintf(stderr, "%s can only enable trace for one slab at a time\n", s->name);
+	}
+	if (!tracing && s->trace)
+		set_obj(s, "trace", 1);
+}
+
+static void totals(void)
+{
+	struct slabinfo *s;
+
+	int used_slabs = 0;
+	char b1[20], b2[20], b3[20], b4[20];
+	unsigned long long max = 1ULL << 63;
+
+	/* Object size */
+	unsigned long long min_objsize = max, max_objsize = 0, avg_objsize;
+
+	/* Number of partial slabs in a slabcache */
+	unsigned long long min_partial = max, max_partial = 0,
+				avg_partial, total_partial = 0;
+
+	/* Number of slabs in a slab cache */
+	unsigned long long min_slabs = max, max_slabs = 0,
+				avg_slabs, total_slabs = 0;
+
+	/* Size of the whole slab */
+	unsigned long long min_size = max, max_size = 0,
+				avg_size, total_size = 0;
+
+	/* Bytes used for object storage in a slab */
+	unsigned long long min_used = max, max_used = 0,
+				avg_used, total_used = 0;
+
+	/* Waste: Bytes used for alignment and padding */
+	unsigned long long min_waste = max, max_waste = 0,
+				avg_waste, total_waste = 0;
+	/* Number of objects in a slab */
+	unsigned long long min_objects = max, max_objects = 0,
+				avg_objects, total_objects = 0;
+	/* Waste per object */
+	unsigned long long min_objwaste = max,
+				max_objwaste = 0, avg_objwaste,
+				total_objwaste = 0;
+
+	/* Memory per object */
+	unsigned long long min_memobj = max,
+				max_memobj = 0, avg_memobj,
+				total_objsize = 0;
+
+	/* Percentage of partial slabs per slab */
+	unsigned long min_ppart = 100, max_ppart = 0,
+				avg_ppart, total_ppart = 0;
+
+	/* Number of objects in partial slabs */
+	unsigned long min_partobj = max, max_partobj = 0,
+				avg_partobj, total_partobj = 0;
+
+	/* Percentage of partial objects of all objects in a slab */
+	unsigned long min_ppartobj = 100, max_ppartobj = 0,
+				avg_ppartobj, total_ppartobj = 0;
+
+
+	for (s = slabinfo; s < slabinfo + slabs; s++) {
+		unsigned long long size;
+		unsigned long used;
+		unsigned long long wasted;
+		unsigned long long objwaste;
+		unsigned long percentage_partial_slabs;
+		unsigned long percentage_partial_objs;
+
+		if (!s->slabs || !s->objects)
+			continue;
+
+		used_slabs++;
+
+		size = slab_size(s);
+		used = s->objects * s->object_size;
+		wasted = size - used;
+		objwaste = s->slab_size - s->object_size;
+
+		percentage_partial_slabs = s->partial * 100 / s->slabs;
+		if (percentage_partial_slabs > 100)
+			percentage_partial_slabs = 100;
+
+		percentage_partial_objs = s->objects_partial * 100
+							/ s->objects;
+
+		if (percentage_partial_objs > 100)
+			percentage_partial_objs = 100;
+
+		if (s->object_size < min_objsize)
+			min_objsize = s->object_size;
+		if (s->partial < min_partial)
+			min_partial = s->partial;
+		if (s->slabs < min_slabs)
+			min_slabs = s->slabs;
+		if (size < min_size)
+			min_size = size;
+		if (wasted < min_waste)
+			min_waste = wasted;
+		if (objwaste < min_objwaste)
+			min_objwaste = objwaste;
+		if (s->objects < min_objects)
+			min_objects = s->objects;
+		if (used < min_used)
+			min_used = used;
+		if (s->objects_partial < min_partobj)
+			min_partobj = s->objects_partial;
+		if (percentage_partial_slabs < min_ppart)
+			min_ppart = percentage_partial_slabs;
+		if (percentage_partial_objs < min_ppartobj)
+			min_ppartobj = percentage_partial_objs;
+		if (s->slab_size < min_memobj)
+			min_memobj = s->slab_size;
+
+		if (s->object_size > max_objsize)
+			max_objsize = s->object_size;
+		if (s->partial > max_partial)
+			max_partial = s->partial;
+		if (s->slabs > max_slabs)
+			max_slabs = s->slabs;
+		if (size > max_size)
+			max_size = size;
+		if (wasted > max_waste)
+			max_waste = wasted;
+		if (objwaste > max_objwaste)
+			max_objwaste = objwaste;
+		if (s->objects > max_objects)
+			max_objects = s->objects;
+		if (used > max_used)
+			max_used = used;
+		if (s->objects_partial > max_partobj)
+			max_partobj = s->objects_partial;
+		if (percentage_partial_slabs > max_ppart)
+			max_ppart = percentage_partial_slabs;
+		if (percentage_partial_objs > max_ppartobj)
+			max_ppartobj = percentage_partial_objs;
+		if (s->slab_size > max_memobj)
+			max_memobj = s->slab_size;
+
+		total_partial += s->partial;
+		total_slabs += s->slabs;
+		total_size += size;
+		total_waste += wasted;
+
+		total_objects += s->objects;
+		total_used += used;
+		total_partobj += s->objects_partial;
+		total_ppart += percentage_partial_slabs;
+		total_ppartobj += percentage_partial_objs;
+
+		total_objwaste += s->objects * objwaste;
+		total_objsize += s->objects * s->slab_size;
+	}
+
+	if (!total_objects) {
+		printf("No objects\n");
+		return;
+	}
+	if (!used_slabs) {
+		printf("No slabs\n");
+		return;
+	}
+
+	/* Per slab averages */
+	avg_partial = total_partial / used_slabs;
+	avg_slabs = total_slabs / used_slabs;
+	avg_size = total_size / used_slabs;
+	avg_waste = total_waste / used_slabs;
+
+	avg_objects = total_objects / used_slabs;
+	avg_used = total_used / used_slabs;
+	avg_partobj = total_partobj / used_slabs;
+	avg_ppart = total_ppart / used_slabs;
+	avg_ppartobj = total_ppartobj / used_slabs;
+
+	/* Per object object sizes */
+	avg_objsize = total_used / total_objects;
+	avg_objwaste = total_objwaste / total_objects;
+	avg_partobj = total_partobj * 100 / total_objects;
+	avg_memobj = total_objsize / total_objects;
+
+	printf("Slabcache Totals\n");
+	printf("----------------\n");
+	printf("Slabcaches : %3d      Aliases  : %3d->%-3d Active: %3d\n",
+			slabs, aliases, alias_targets, used_slabs);
+
+	store_size(b1, total_size);store_size(b2, total_waste);
+	store_size(b3, total_waste * 100 / total_used);
+	printf("Memory used: %6s   # Loss   : %6s   MRatio:%6s%%\n", b1, b2, b3);
+
+	store_size(b1, total_objects);store_size(b2, total_partobj);
+	store_size(b3, total_partobj * 100 / total_objects);
+	printf("# Objects  : %6s   # PartObj: %6s   ORatio:%6s%%\n", b1, b2, b3);
+
+	printf("\n");
+	printf("Per Cache    Average         Min         Max       Total\n");
+	printf("---------------------------------------------------------\n");
+
+	store_size(b1, avg_objects);store_size(b2, min_objects);
+	store_size(b3, max_objects);store_size(b4, total_objects);
+	printf("#Objects  %10s  %10s  %10s  %10s\n",
+			b1,	b2,	b3,	b4);
+
+	store_size(b1, avg_slabs);store_size(b2, min_slabs);
+	store_size(b3, max_slabs);store_size(b4, total_slabs);
+	printf("#Slabs    %10s  %10s  %10s  %10s\n",
+			b1,	b2,	b3,	b4);
+
+	store_size(b1, avg_partial);store_size(b2, min_partial);
+	store_size(b3, max_partial);store_size(b4, total_partial);
+	printf("#PartSlab %10s  %10s  %10s  %10s\n",
+			b1,	b2,	b3,	b4);
+	store_size(b1, avg_ppart);store_size(b2, min_ppart);
+	store_size(b3, max_ppart);
+	store_size(b4, total_partial * 100  / total_slabs);
+	printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n",
+			b1,	b2,	b3,	b4);
+
+	store_size(b1, avg_partobj);store_size(b2, min_partobj);
+	store_size(b3, max_partobj);
+	store_size(b4, total_partobj);
+	printf("PartObjs  %10s  %10s  %10s  %10s\n",
+			b1,	b2,	b3,	b4);
+
+	store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj);
+	store_size(b3, max_ppartobj);
+	store_size(b4, total_partobj * 100 / total_objects);
+	printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n",
+			b1,	b2,	b3,	b4);
+
+	store_size(b1, avg_size);store_size(b2, min_size);
+	store_size(b3, max_size);store_size(b4, total_size);
+	printf("Memory    %10s  %10s  %10s  %10s\n",
+			b1,	b2,	b3,	b4);
+
+	store_size(b1, avg_used);store_size(b2, min_used);
+	store_size(b3, max_used);store_size(b4, total_used);
+	printf("Used      %10s  %10s  %10s  %10s\n",
+			b1,	b2,	b3,	b4);
+
+	store_size(b1, avg_waste);store_size(b2, min_waste);
+	store_size(b3, max_waste);store_size(b4, total_waste);
+	printf("Loss      %10s  %10s  %10s  %10s\n",
+			b1,	b2,	b3,	b4);
+
+	printf("\n");
+	printf("Per Object   Average         Min         Max\n");
+	printf("---------------------------------------------\n");
+
+	store_size(b1, avg_memobj);store_size(b2, min_memobj);
+	store_size(b3, max_memobj);
+	printf("Memory    %10s  %10s  %10s\n",
+			b1,	b2,	b3);
+	store_size(b1, avg_objsize);store_size(b2, min_objsize);
+	store_size(b3, max_objsize);
+	printf("User      %10s  %10s  %10s\n",
+			b1,	b2,	b3);
+
+	store_size(b1, avg_objwaste);store_size(b2, min_objwaste);
+	store_size(b3, max_objwaste);
+	printf("Loss      %10s  %10s  %10s\n",
+			b1,	b2,	b3);
+}
+
+static void sort_slabs(void)
+{
+	struct slabinfo *s1,*s2;
+
+	for (s1 = slabinfo; s1 < slabinfo + slabs; s1++) {
+		for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
+			int result;
+
+			if (sort_size)
+				result = slab_size(s1) < slab_size(s2);
+			else if (sort_active)
+				result = slab_activity(s1) < slab_activity(s2);
+			else
+				result = strcasecmp(s1->name, s2->name);
+
+			if (show_inverted)
+				result = -result;
+
+			if (result > 0) {
+				struct slabinfo t;
+
+				memcpy(&t, s1, sizeof(struct slabinfo));
+				memcpy(s1, s2, sizeof(struct slabinfo));
+				memcpy(s2, &t, sizeof(struct slabinfo));
+			}
+		}
+	}
+}
+
+static void sort_aliases(void)
+{
+	struct aliasinfo *a1,*a2;
+
+	for (a1 = aliasinfo; a1 < aliasinfo + aliases; a1++) {
+		for (a2 = a1 + 1; a2 < aliasinfo + aliases; a2++) {
+			char *n1, *n2;
+
+			n1 = a1->name;
+			n2 = a2->name;
+			if (show_alias && !show_inverted) {
+				n1 = a1->ref;
+				n2 = a2->ref;
+			}
+			if (strcasecmp(n1, n2) > 0) {
+				struct aliasinfo t;
+
+				memcpy(&t, a1, sizeof(struct aliasinfo));
+				memcpy(a1, a2, sizeof(struct aliasinfo));
+				memcpy(a2, &t, sizeof(struct aliasinfo));
+			}
+		}
+	}
+}
+
+static void link_slabs(void)
+{
+	struct aliasinfo *a;
+	struct slabinfo *s;
+
+	for (a = aliasinfo; a < aliasinfo + aliases; a++) {
+
+		for (s = slabinfo; s < slabinfo + slabs; s++)
+			if (strcmp(a->ref, s->name) == 0) {
+				a->slab = s;
+				s->refs++;
+				break;
+			}
+		if (s == slabinfo + slabs)
+			fatal("Unresolved alias %s\n", a->ref);
+	}
+}
+
+static void alias(void)
+{
+	struct aliasinfo *a;
+	char *active = NULL;
+
+	sort_aliases();
+	link_slabs();
+
+	for(a = aliasinfo; a < aliasinfo + aliases; a++) {
+
+		if (!show_single_ref && a->slab->refs == 1)
+			continue;
+
+		if (!show_inverted) {
+			if (active) {
+				if (strcmp(a->slab->name, active) == 0) {
+					printf(" %s", a->name);
+					continue;
+				}
+			}
+			printf("\n%-12s <- %s", a->slab->name, a->name);
+			active = a->slab->name;
+		}
+		else
+			printf("%-20s -> %s\n", a->name, a->slab->name);
+	}
+	if (active)
+		printf("\n");
+}
+
+
+static void rename_slabs(void)
+{
+	struct slabinfo *s;
+	struct aliasinfo *a;
+
+	for (s = slabinfo; s < slabinfo + slabs; s++) {
+		if (*s->name != ':')
+			continue;
+
+		if (s->refs > 1 && !show_first_alias)
+			continue;
+
+		a = find_one_alias(s);
+
+		if (a)
+			s->name = a->name;
+		else {
+			s->name = "*";
+			actual_slabs--;
+		}
+	}
+}
+
+static int slab_mismatch(char *slab)
+{
+	return regexec(&pattern, slab, 0, NULL, 0);
+}
+
+static void read_slab_dir(void)
+{
+	DIR *dir;
+	struct dirent *de;
+	struct slabinfo *slab = slabinfo;
+	struct aliasinfo *alias = aliasinfo;
+	char *p;
+	char *t;
+	int count;
+
+	if (chdir("/sys/kernel/slab") && chdir("/sys/slab"))
+		fatal("SYSFS support for SLUB not active\n");
+
+	dir = opendir(".");
+	while ((de = readdir(dir))) {
+		if (de->d_name[0] == '.' ||
+			(de->d_name[0] != ':' && slab_mismatch(de->d_name)))
+				continue;
+		switch (de->d_type) {
+		   case DT_LNK:
+			alias->name = strdup(de->d_name);
+			count = readlink(de->d_name, buffer, sizeof(buffer));
+
+			if (count < 0)
+				fatal("Cannot read symlink %s\n", de->d_name);
+
+			buffer[count] = 0;
+			p = buffer + count;
+			while (p > buffer && p[-1] != '/')
+				p--;
+			alias->ref = strdup(p);
+			alias++;
+			break;
+		   case DT_DIR:
+			if (chdir(de->d_name))
+				fatal("Unable to access slab %s\n", slab->name);
+			slab->name = strdup(de->d_name);
+			slab->alias = 0;
+			slab->refs = 0;
+			slab->aliases = get_obj("aliases");
+			slab->align = get_obj("align");
+			slab->cache_dma = get_obj("cache_dma");
+			slab->cpu_slabs = get_obj("cpu_slabs");
+			slab->destroy_by_rcu = get_obj("destroy_by_rcu");
+			slab->hwcache_align = get_obj("hwcache_align");
+			slab->object_size = get_obj("object_size");
+			slab->objects = get_obj("objects");
+			slab->objects_partial = get_obj("objects_partial");
+			slab->objects_total = get_obj("objects_total");
+			slab->objs_per_slab = get_obj("objs_per_slab");
+			slab->order = get_obj("order");
+			slab->partial = get_obj("partial");
+			slab->partial = get_obj_and_str("partial", &t);
+			decode_numa_list(slab->numa_partial, t);
+			free(t);
+			slab->poison = get_obj("poison");
+			slab->reclaim_account = get_obj("reclaim_account");
+			slab->red_zone = get_obj("red_zone");
+			slab->sanity_checks = get_obj("sanity_checks");
+			slab->slab_size = get_obj("slab_size");
+			slab->slabs = get_obj_and_str("slabs", &t);
+			decode_numa_list(slab->numa, t);
+			free(t);
+			slab->store_user = get_obj("store_user");
+			slab->trace = get_obj("trace");
+			slab->alloc_fastpath = get_obj("alloc_fastpath");
+			slab->alloc_slowpath = get_obj("alloc_slowpath");
+			slab->free_fastpath = get_obj("free_fastpath");
+			slab->free_slowpath = get_obj("free_slowpath");
+			slab->free_frozen= get_obj("free_frozen");
+			slab->free_add_partial = get_obj("free_add_partial");
+			slab->free_remove_partial = get_obj("free_remove_partial");
+			slab->alloc_from_partial = get_obj("alloc_from_partial");
+			slab->alloc_slab = get_obj("alloc_slab");
+			slab->alloc_refill = get_obj("alloc_refill");
+			slab->free_slab = get_obj("free_slab");
+			slab->cpuslab_flush = get_obj("cpuslab_flush");
+			slab->deactivate_full = get_obj("deactivate_full");
+			slab->deactivate_empty = get_obj("deactivate_empty");
+			slab->deactivate_to_head = get_obj("deactivate_to_head");
+			slab->deactivate_to_tail = get_obj("deactivate_to_tail");
+			slab->deactivate_remote_frees = get_obj("deactivate_remote_frees");
+			slab->order_fallback = get_obj("order_fallback");
+			chdir("..");
+			if (slab->name[0] == ':')
+				alias_targets++;
+			slab++;
+			break;
+		   default :
+			fatal("Unknown file type %lx\n", de->d_type);
+		}
+	}
+	closedir(dir);
+	slabs = slab - slabinfo;
+	actual_slabs = slabs;
+	aliases = alias - aliasinfo;
+	if (slabs > MAX_SLABS)
+		fatal("Too many slabs\n");
+	if (aliases > MAX_ALIASES)
+		fatal("Too many aliases\n");
+}
+
+static void output_slabs(void)
+{
+	struct slabinfo *slab;
+
+	for (slab = slabinfo; slab < slabinfo + slabs; slab++) {
+
+		if (slab->alias)
+			continue;
+
+
+		if (show_numa)
+			slab_numa(slab, 0);
+		else if (show_track)
+			show_tracking(slab);
+		else if (validate)
+			slab_validate(slab);
+		else if (shrink)
+			slab_shrink(slab);
+		else if (set_debug)
+			slab_debug(slab);
+		else if (show_ops)
+			ops(slab);
+		else if (show_slab)
+			slabcache(slab);
+		else if (show_report)
+			report(slab);
+	}
+}
+
+struct option opts[] = {
+	{ "aliases", 0, NULL, 'a' },
+	{ "activity", 0, NULL, 'A' },
+	{ "debug", 2, NULL, 'd' },
+	{ "display-activity", 0, NULL, 'D' },
+	{ "empty", 0, NULL, 'e' },
+	{ "first-alias", 0, NULL, 'f' },
+	{ "help", 0, NULL, 'h' },
+	{ "inverted", 0, NULL, 'i'},
+	{ "numa", 0, NULL, 'n' },
+	{ "ops", 0, NULL, 'o' },
+	{ "report", 0, NULL, 'r' },
+	{ "shrink", 0, NULL, 's' },
+	{ "slabs", 0, NULL, 'l' },
+	{ "track", 0, NULL, 't'},
+	{ "validate", 0, NULL, 'v' },
+	{ "zero", 0, NULL, 'z' },
+	{ "1ref", 0, NULL, '1'},
+	{ NULL, 0, NULL, 0 }
+};
+
+int main(int argc, char *argv[])
+{
+	int c;
+	int err;
+	char *pattern_source;
+
+	page_size = getpagesize();
+
+	while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTS",
+						opts, NULL)) != -1)
+		switch (c) {
+		case '1':
+			show_single_ref = 1;
+			break;
+		case 'a':
+			show_alias = 1;
+			break;
+		case 'A':
+			sort_active = 1;
+			break;
+		case 'd':
+			set_debug = 1;
+			if (!debug_opt_scan(optarg))
+				fatal("Invalid debug option '%s'\n", optarg);
+			break;
+		case 'D':
+			show_activity = 1;
+			break;
+		case 'e':
+			show_empty = 1;
+			break;
+		case 'f':
+			show_first_alias = 1;
+			break;
+		case 'h':
+			usage();
+			return 0;
+		case 'i':
+			show_inverted = 1;
+			break;
+		case 'n':
+			show_numa = 1;
+			break;
+		case 'o':
+			show_ops = 1;
+			break;
+		case 'r':
+			show_report = 1;
+			break;
+		case 's':
+			shrink = 1;
+			break;
+		case 'l':
+			show_slab = 1;
+			break;
+		case 't':
+			show_track = 1;
+			break;
+		case 'v':
+			validate = 1;
+			break;
+		case 'z':
+			skip_zero = 0;
+			break;
+		case 'T':
+			show_totals = 1;
+			break;
+		case 'S':
+			sort_size = 1;
+			break;
+
+		default:
+			fatal("%s: Invalid option '%c'\n", argv[0], optopt);
+
+	}
+
+	if (!show_slab && !show_alias && !show_track && !show_report
+		&& !validate && !shrink && !set_debug && !show_ops)
+			show_slab = 1;
+
+	if (argc > optind)
+		pattern_source = argv[optind];
+	else
+		pattern_source = ".*";
+
+	err = regcomp(&pattern, pattern_source, REG_ICASE|REG_NOSUB);
+	if (err)
+		fatal("%s: Invalid pattern '%s' code %d\n",
+			argv[0], pattern_source, err);
+	read_slab_dir();
+	if (show_alias)
+		alias();
+	else
+	if (show_totals)
+		totals();
+	else {
+		link_slabs();
+		rename_slabs();
+		sort_slabs();
+		output_slabs();
+	}
+	return 0;
+}
diff --git a/tools/testing/ktest/compare-ktest-sample.pl b/tools/testing/ktest/compare-ktest-sample.pl
new file mode 100755
index 0000000..9a571e7
--- /dev/null
+++ b/tools/testing/ktest/compare-ktest-sample.pl
@@ -0,0 +1,30 @@
+#!/usr/bin/perl
+
+open (IN,"ktest.pl");
+while (<IN>) {
+    if (/\$opt\{"?([A-Z].*?)(\[.*\])?"?\}/ ||
+	/set_test_option\("(.*?)"/) {
+	$opt{$1} = 1;
+    }
+}
+close IN;
+
+open (IN, "sample.conf");
+while (<IN>) {
+    if (/^\s*#?\s*(\S+)\s*=/) {
+	$samp{$1} = 1;
+    }
+}
+close IN;
+
+foreach $opt (keys %opt) {
+    if (!defined($samp{$opt})) {
+	print "opt = $opt\n";
+    }
+}
+
+foreach $samp (keys %samp) {
+    if (!defined($opt{$samp})) {
+	print "samp = $samp\n";
+    }
+}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
new file mode 100755
index 0000000..e1c62ee
--- /dev/null
+++ b/tools/testing/ktest/ktest.pl
@@ -0,0 +1,2023 @@
+#!/usr/bin/perl -w
+#
+# Copywrite 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+# Licensed under the terms of the GNU GPL License version 2
+#
+
+use strict;
+use IPC::Open2;
+use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
+use File::Path qw(mkpath);
+use File::Copy qw(cp);
+use FileHandle;
+
+my $VERSION = "0.2";
+
+$| = 1;
+
+my %opt;
+my %repeat_tests;
+my %repeats;
+my %default;
+
+#default opts
+$default{"NUM_TESTS"}		= 1;
+$default{"REBOOT_TYPE"}		= "grub";
+$default{"TEST_TYPE"}		= "test";
+$default{"BUILD_TYPE"}		= "randconfig";
+$default{"MAKE_CMD"}		= "make";
+$default{"TIMEOUT"}		= 120;
+$default{"TMP_DIR"}		= "/tmp/ktest";
+$default{"SLEEP_TIME"}		= 60;	# sleep time between tests
+$default{"BUILD_NOCLEAN"}	= 0;
+$default{"REBOOT_ON_ERROR"}	= 0;
+$default{"POWEROFF_ON_ERROR"}	= 0;
+$default{"REBOOT_ON_SUCCESS"}	= 1;
+$default{"POWEROFF_ON_SUCCESS"}	= 0;
+$default{"BUILD_OPTIONS"}	= "";
+$default{"BISECT_SLEEP_TIME"}	= 60;   # sleep time between bisects
+$default{"CLEAR_LOG"}		= 0;
+$default{"SUCCESS_LINE"}	= "login:";
+$default{"BOOTED_TIMEOUT"}	= 1;
+$default{"DIE_ON_FAILURE"}	= 1;
+$default{"SSH_EXEC"}		= "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
+$default{"SCP_TO_TARGET"}	= "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE";
+$default{"REBOOT"}		= "ssh \$SSH_USER\@\$MACHINE reboot";
+$default{"STOP_AFTER_SUCCESS"}	= 10;
+$default{"STOP_AFTER_FAILURE"}	= 60;
+$default{"LOCALVERSION"}	= "-test";
+
+my $ktest_config;
+my $version;
+my $machine;
+my $ssh_user;
+my $tmpdir;
+my $builddir;
+my $outputdir;
+my $output_config;
+my $test_type;
+my $build_type;
+my $build_options;
+my $reboot_type;
+my $reboot_script;
+my $power_cycle;
+my $reboot;
+my $reboot_on_error;
+my $poweroff_on_error;
+my $die_on_failure;
+my $powercycle_after_reboot;
+my $poweroff_after_halt;
+my $ssh_exec;
+my $scp_to_target;
+my $power_off;
+my $grub_menu;
+my $grub_number;
+my $target;
+my $make;
+my $post_install;
+my $noclean;
+my $minconfig;
+my $addconfig;
+my $in_bisect = 0;
+my $bisect_bad = "";
+my $reverse_bisect;
+my $in_patchcheck = 0;
+my $run_test;
+my $redirect;
+my $buildlog;
+my $dmesg;
+my $monitor_fp;
+my $monitor_pid;
+my $monitor_cnt = 0;
+my $sleep_time;
+my $bisect_sleep_time;
+my $store_failures;
+my $timeout;
+my $booted_timeout;
+my $console;
+my $success_line;
+my $stop_after_success;
+my $stop_after_failure;
+my $build_target;
+my $target_image;
+my $localversion;
+my $iteration = 0;
+my $successes = 0;
+
+my %entered_configs;
+my %config_help;
+
+$config_help{"MACHINE"} = << "EOF"
+ The machine hostname that you will test.
+EOF
+    ;
+$config_help{"SSH_USER"} = << "EOF"
+ The box is expected to have ssh on normal bootup, provide the user
+  (most likely root, since you need privileged operations)
+EOF
+    ;
+$config_help{"BUILD_DIR"} = << "EOF"
+ The directory that contains the Linux source code (full path).
+EOF
+    ;
+$config_help{"OUTPUT_DIR"} = << "EOF"
+ The directory that the objects will be built (full path).
+ (can not be same as BUILD_DIR)
+EOF
+    ;
+$config_help{"BUILD_TARGET"} = << "EOF"
+ The location of the compiled file to copy to the target.
+ (relative to OUTPUT_DIR)
+EOF
+    ;
+$config_help{"TARGET_IMAGE"} = << "EOF"
+ The place to put your image on the test machine.
+EOF
+    ;
+$config_help{"POWER_CYCLE"} = << "EOF"
+ A script or command to reboot the box.
+
+ Here is a digital loggers power switch example
+ POWER_CYCLE = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin\@power/outlet?5=CCL'
+
+ Here is an example to reboot a virtual box on the current host
+ with the name "Guest".
+ POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
+EOF
+    ;
+$config_help{"CONSOLE"} = << "EOF"
+ The script or command that reads the console
+
+  If you use ttywatch server, something like the following would work.
+CONSOLE = nc -d localhost 3001
+
+ For a virtual machine with guest name "Guest".
+CONSOLE =  virsh console Guest
+EOF
+    ;
+$config_help{"LOCALVERSION"} = << "EOF"
+ Required version ending to differentiate the test
+ from other linux builds on the system.
+EOF
+    ;
+$config_help{"REBOOT_TYPE"} = << "EOF"
+ Way to reboot the box to the test kernel.
+ Only valid options so far are "grub" and "script".
+
+ If you specify grub, it will assume grub version 1
+ and will search in /boot/grub/menu.lst for the title \$GRUB_MENU
+ and select that target to reboot to the kernel. If this is not
+ your setup, then specify "script" and have a command or script
+ specified in REBOOT_SCRIPT to boot to the target.
+
+ The entry in /boot/grub/menu.lst must be entered in manually.
+ The test will not modify that file.
+EOF
+    ;
+$config_help{"GRUB_MENU"} = << "EOF"
+ The grub title name for the test kernel to boot
+ (Only mandatory if REBOOT_TYPE = grub)
+
+ Note, ktest.pl will not update the grub menu.lst, you need to
+ manually add an option for the test. ktest.pl will search
+ the grub menu.lst for this option to find what kernel to
+ reboot into.
+
+ For example, if in the /boot/grub/menu.lst the test kernel title has:
+ title Test Kernel
+ kernel vmlinuz-test
+ GRUB_MENU = Test Kernel
+EOF
+    ;
+$config_help{"REBOOT_SCRIPT"} = << "EOF"
+ A script to reboot the target into the test kernel
+ (Only mandatory if REBOOT_TYPE = script)
+EOF
+    ;
+
+
+sub get_ktest_config {
+    my ($config) = @_;
+
+    return if (defined($opt{$config}));
+
+    if (defined($config_help{$config})) {
+	print "\n";
+	print $config_help{$config};
+    }
+
+    for (;;) {
+	print "$config = ";
+	if (defined($default{$config})) {
+	    print "\[$default{$config}\] ";
+	}
+	$entered_configs{$config} = <STDIN>;
+	$entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/;
+	if ($entered_configs{$config} =~ /^\s*$/) {
+	    if ($default{$config}) {
+		$entered_configs{$config} = $default{$config};
+	    } else {
+		print "Your answer can not be blank\n";
+		next;
+	    }
+	}
+	last;
+    }
+}
+
+sub get_ktest_configs {
+    get_ktest_config("MACHINE");
+    get_ktest_config("SSH_USER");
+    get_ktest_config("BUILD_DIR");
+    get_ktest_config("OUTPUT_DIR");
+    get_ktest_config("BUILD_TARGET");
+    get_ktest_config("TARGET_IMAGE");
+    get_ktest_config("POWER_CYCLE");
+    get_ktest_config("CONSOLE");
+    get_ktest_config("LOCALVERSION");
+
+    my $rtype = $opt{"REBOOT_TYPE"};
+
+    if (!defined($rtype)) {
+	if (!defined($opt{"GRUB_MENU"})) {
+	    get_ktest_config("REBOOT_TYPE");
+	    $rtype = $entered_configs{"REBOOT_TYPE"};
+	} else {
+	    $rtype = "grub";
+	}
+    }
+
+    if ($rtype eq "grub") {
+	get_ktest_config("GRUB_MENU");
+    } else {
+	get_ktest_config("REBOOT_SCRIPT");
+    }
+}
+
+sub set_value {
+    my ($lvalue, $rvalue) = @_;
+
+    if (defined($opt{$lvalue})) {
+	die "Error: Option $lvalue defined more than once!\n";
+    }
+    if ($rvalue =~ /^\s*$/) {
+	delete $opt{$lvalue};
+    } else {
+	$opt{$lvalue} = $rvalue;
+    }
+}
+
+sub read_config {
+    my ($config) = @_;
+
+    open(IN, $config) || die "can't read file $config";
+
+    my $name = $config;
+    $name =~ s,.*/(.*),$1,;
+
+    my $test_num = 0;
+    my $default = 1;
+    my $repeat = 1;
+    my $num_tests_set = 0;
+    my $skip = 0;
+    my $rest;
+
+    while (<IN>) {
+
+	# ignore blank lines and comments
+	next if (/^\s*$/ || /\s*\#/);
+
+	if (/^\s*TEST_START(.*)/) {
+
+	    $rest = $1;
+
+	    if ($num_tests_set) {
+		die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+	    }
+
+	    my $old_test_num = $test_num;
+	    my $old_repeat = $repeat;
+
+	    $test_num += $repeat;
+	    $default = 0;
+	    $repeat = 1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) {
+		$repeat = $1;
+		$rest = $2;
+		$repeat_tests{"$test_num"} = $repeat;
+	    }
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after TEST_START\n$_";
+	    }
+
+	    if ($skip) {
+		$test_num = $old_test_num;
+		$repeat = $old_repeat;
+	    }
+
+	} elsif (/^\s*DEFAULTS(.*)$/) {
+	    $default = 1;
+
+	    $rest = $1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after DEFAULTS\n$_";
+	    }
+
+	} elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
+
+	    next if ($skip);
+
+	    my $lvalue = $1;
+	    my $rvalue = $2;
+
+	    if (!$default &&
+		($lvalue eq "NUM_TESTS" ||
+		 $lvalue eq "LOG_FILE" ||
+		 $lvalue eq "CLEAR_LOG")) {
+		die "$name: $.: $lvalue must be set in DEFAULTS section\n";
+	    }
+
+	    if ($lvalue eq "NUM_TESTS") {
+		if ($test_num) {
+		    die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+		}
+		if (!$default) {
+		    die "$name: $.: NUM_TESTS must be set in default section\n";
+		}
+		$num_tests_set = 1;
+	    }
+
+	    if ($default || $lvalue =~ /\[\d+\]$/) {
+		set_value($lvalue, $rvalue);
+	    } else {
+		my $val = "$lvalue\[$test_num\]";
+		set_value($val, $rvalue);
+
+		if ($repeat > 1) {
+		    $repeats{$val} = $repeat;
+		}
+	    }
+	} else {
+	    die "$name: $.: Garbage found in config\n$_";
+	}
+    }
+
+    close(IN);
+
+    if ($test_num) {
+	$test_num += $repeat - 1;
+	$opt{"NUM_TESTS"} = $test_num;
+    }
+
+    # make sure we have all mandatory configs
+    get_ktest_configs;
+
+    # set any defaults
+
+    foreach my $default (keys %default) {
+	if (!defined($opt{$default})) {
+	    $opt{$default} = $default{$default};
+	}
+    }
+}
+
+sub _logit {
+    if (defined($opt{"LOG_FILE"})) {
+	open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+	print OUT @_;
+	close(OUT);
+    }
+}
+
+sub logit {
+    if (defined($opt{"LOG_FILE"})) {
+	_logit @_;
+    } else {
+	print @_;
+    }
+}
+
+sub doprint {
+    print @_;
+    _logit @_;
+}
+
+sub run_command;
+
+sub reboot {
+    # try to reboot normally
+    if (run_command $reboot) {
+	if (defined($powercycle_after_reboot)) {
+	    sleep $powercycle_after_reboot;
+	    run_command "$power_cycle";
+	}
+    } else {
+	# nope? power cycle it.
+	run_command "$power_cycle";
+    }
+}
+
+sub do_not_reboot {
+    my $i = $iteration;
+
+    return $test_type eq "build" ||
+	($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
+	($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
+}
+
+sub dodie {
+    doprint "CRITICAL FAILURE... ", @_, "\n";
+
+    my $i = $iteration;
+
+    if ($reboot_on_error && !do_not_reboot) {
+
+	doprint "REBOOTING\n";
+	reboot;
+
+    } elsif ($poweroff_on_error && defined($power_off)) {
+	doprint "POWERING OFF\n";
+	`$power_off`;
+    }
+
+    die @_, "\n";
+}
+
+sub open_console {
+    my ($fp) = @_;
+
+    my $flags;
+
+    my $pid = open($fp, "$console|") or
+	dodie "Can't open console $console";
+
+    $flags = fcntl($fp, F_GETFL, 0) or
+	dodie "Can't get flags for the socket: $!";
+    $flags = fcntl($fp, F_SETFL, $flags | O_NONBLOCK) or
+	dodie "Can't set flags for the socket: $!";
+
+    return $pid;
+}
+
+sub close_console {
+    my ($fp, $pid) = @_;
+
+    doprint "kill child process $pid\n";
+    kill 2, $pid;
+
+    print "closing!\n";
+    close($fp);
+}
+
+sub start_monitor {
+    if ($monitor_cnt++) {
+	return;
+    }
+    $monitor_fp = \*MONFD;
+    $monitor_pid = open_console $monitor_fp;
+
+    return;
+
+    open(MONFD, "Stop perl from warning about single use of MONFD");
+}
+
+sub end_monitor {
+    if (--$monitor_cnt) {
+	return;
+    }
+    close_console($monitor_fp, $monitor_pid);
+}
+
+sub wait_for_monitor {
+    my ($time) = @_;
+    my $line;
+
+    doprint "** Wait for monitor to settle down **\n";
+
+    # read the monitor and wait for the system to calm down
+    do {
+	$line = wait_for_input($monitor_fp, $time);
+	print "$line" if (defined($line));
+    } while (defined($line));
+    print "** Monitor flushed **\n";
+}
+
+sub fail {
+
+	if ($die_on_failure) {
+		dodie @_;
+	}
+
+	doprint "FAILED\n";
+
+	my $i = $iteration;
+
+	# no need to reboot for just building.
+	if (!do_not_reboot) {
+	    doprint "REBOOTING\n";
+	    reboot;
+	    start_monitor;
+	    wait_for_monitor $sleep_time;
+	    end_monitor;
+	}
+
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "KTEST RESULT: TEST $i Failed: ", @_, "\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+
+	return 1 if (!defined($store_failures));
+
+	my @t = localtime;
+	my $date = sprintf "%04d%02d%02d%02d%02d%02d",
+		1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+
+	my $type = $build_type;
+	if ($type =~ /useconfig/) {
+	    $type = "useconfig";
+	}
+
+	my $dir = "$machine-$test_type-$type-fail-$date";
+	my $faildir = "$store_failures/$dir";
+
+	if (!-d $faildir) {
+	    mkpath($faildir) or
+		die "can't create $faildir";
+	}
+	if (-f "$output_config") {
+	    cp "$output_config", "$faildir/config" or
+		die "failed to copy .config";
+	}
+	if (-f $buildlog) {
+	    cp $buildlog, "$faildir/buildlog" or
+		die "failed to move $buildlog";
+	}
+	if (-f $dmesg) {
+	    cp $dmesg, "$faildir/dmesg" or
+		die "failed to move $dmesg";
+	}
+
+	doprint "*** Saved info to $faildir ***\n";
+
+	return 1;
+}
+
+sub run_command {
+    my ($command) = @_;
+    my $dolog = 0;
+    my $dord = 0;
+    my $pid;
+
+    $command =~ s/\$SSH_USER/$ssh_user/g;
+    $command =~ s/\$MACHINE/$machine/g;
+
+    doprint("$command ... ");
+
+    $pid = open(CMD, "$command 2>&1 |") or
+	(fail "unable to exec $command" and return 0);
+
+    if (defined($opt{"LOG_FILE"})) {
+	open(LOG, ">>$opt{LOG_FILE}") or
+	    dodie "failed to write to log";
+	$dolog = 1;
+    }
+
+    if (defined($redirect)) {
+	open (RD, ">$redirect") or
+	    dodie "failed to write to redirect $redirect";
+	$dord = 1;
+    }
+
+    while (<CMD>) {
+	print LOG if ($dolog);
+	print RD  if ($dord);
+    }
+
+    waitpid($pid, 0);
+    my $failed = $?;
+
+    close(CMD);
+    close(LOG) if ($dolog);
+    close(RD)  if ($dord);
+
+    if ($failed) {
+	doprint "FAILED!\n";
+    } else {
+	doprint "SUCCESS\n";
+    }
+
+    return !$failed;
+}
+
+sub run_ssh {
+    my ($cmd) = @_;
+    my $cp_exec = $ssh_exec;
+
+    $cp_exec =~ s/\$SSH_COMMAND/$cmd/g;
+    return run_command "$cp_exec";
+}
+
+sub run_scp {
+    my ($src, $dst) = @_;
+    my $cp_scp = $scp_to_target;
+
+    $cp_scp =~ s/\$SRC_FILE/$src/g;
+    $cp_scp =~ s/\$DST_FILE/$dst/g;
+
+    return run_command "$cp_scp";
+}
+
+sub get_grub_index {
+
+    if ($reboot_type ne "grub") {
+	return;
+    }
+    return if (defined($grub_number));
+
+    doprint "Find grub menu ... ";
+    $grub_number = -1;
+
+    my $ssh_grub = $ssh_exec;
+    $ssh_grub =~ s,\$SSH_COMMAND,cat /boot/grub/menu.lst,g;
+
+    open(IN, "$ssh_grub |")
+	or die "unable to get menu.lst";
+
+    while (<IN>) {
+	if (/^\s*title\s+$grub_menu\s*$/) {
+	    $grub_number++;
+	    last;
+	} elsif (/^\s*title\s/) {
+	    $grub_number++;
+	}
+    }
+    close(IN);
+
+    die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
+	if ($grub_number < 0);
+    doprint "$grub_number\n";
+}
+
+sub wait_for_input
+{
+    my ($fp, $time) = @_;
+    my $rin;
+    my $ready;
+    my $line;
+    my $ch;
+
+    if (!defined($time)) {
+	$time = $timeout;
+    }
+
+    $rin = '';
+    vec($rin, fileno($fp), 1) = 1;
+    $ready = select($rin, undef, undef, $time);
+
+    $line = "";
+
+    # try to read one char at a time
+    while (sysread $fp, $ch, 1) {
+	$line .= $ch;
+	last if ($ch eq "\n");
+    }
+
+    if (!length($line)) {
+	return undef;
+    }
+
+    return $line;
+}
+
+sub reboot_to {
+    if ($reboot_type eq "grub") {
+	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'";
+	return;
+    }
+
+    run_command "$reboot_script";
+}
+
+sub get_sha1 {
+    my ($commit) = @_;
+
+    doprint "git rev-list --max-count=1 $commit ... ";
+    my $sha1 = `git rev-list --max-count=1 $commit`;
+    my $ret = $?;
+
+    logit $sha1;
+
+    if ($ret) {
+	doprint "FAILED\n";
+	dodie "Failed to get git $commit";
+    }
+
+    print "SUCCESS\n";
+
+    chomp $sha1;
+
+    return $sha1;
+}
+
+sub monitor {
+    my $booted = 0;
+    my $bug = 0;
+    my $skip_call_trace = 0;
+    my $loops;
+
+    wait_for_monitor 5;
+
+    my $line;
+    my $full_line = "";
+
+    open(DMESG, "> $dmesg") or
+	die "unable to write to $dmesg";
+
+    reboot_to;
+
+    my $success_start;
+    my $failure_start;
+
+    for (;;) {
+
+	if ($booted) {
+	    $line = wait_for_input($monitor_fp, $booted_timeout);
+	} else {
+	    $line = wait_for_input($monitor_fp);
+	}
+
+	last if (!defined($line));
+
+	doprint $line;
+	print DMESG $line;
+
+	# we are not guaranteed to get a full line
+	$full_line .= $line;
+
+	if ($full_line =~ /$success_line/) {
+	    $booted = 1;
+	    $success_start = time;
+	}
+
+	if ($booted && defined($stop_after_success) &&
+	    $stop_after_success >= 0) {
+	    my $now = time;
+	    if ($now - $success_start >= $stop_after_success) {
+		doprint "Test forced to stop after $stop_after_success seconds after success\n";
+		last;
+	    }
+	}
+
+	if ($full_line =~ /\[ backtrace testing \]/) {
+	    $skip_call_trace = 1;
+	}
+
+	if ($full_line =~ /call trace:/i) {
+	    if (!$skip_call_trace) {
+		$bug = 1;
+		$failure_start = time;
+	    }
+	}
+
+	if ($bug && defined($stop_after_failure) &&
+	    $stop_after_failure >= 0) {
+	    my $now = time;
+	    if ($now - $failure_start >= $stop_after_failure) {
+		doprint "Test forced to stop after $stop_after_failure seconds after failure\n";
+		last;
+	    }
+	}
+
+	if ($full_line =~ /\[ end of backtrace testing \]/) {
+	    $skip_call_trace = 0;
+	}
+
+	if ($full_line =~ /Kernel panic -/) {
+	    $bug = 1;
+	}
+
+	if ($line =~ /\n/) {
+	    $full_line = "";
+	}
+    }
+
+    close(DMESG);
+
+    if ($bug) {
+	return 0 if ($in_bisect);
+	fail "failed - got a bug report" and return 0;
+    }
+
+    if (!$booted) {
+	return 0 if ($in_bisect);
+	fail "failed - never got a boot prompt." and return 0;
+    }
+
+    return 1;
+}
+
+sub install {
+
+    run_scp "$outputdir/$build_target", "$target_image" or
+	dodie "failed to copy image";
+
+    my $install_mods = 0;
+
+    # should we process modules?
+    $install_mods = 0;
+    open(IN, "$output_config") or dodie("Can't read config file");
+    while (<IN>) {
+	if (/CONFIG_MODULES(=y)?/) {
+	    $install_mods = 1 if (defined($1));
+	    last;
+	}
+    }
+    close(IN);
+
+    if (!$install_mods) {
+	doprint "No modules needed\n";
+	return;
+    }
+
+    run_command "$make INSTALL_MOD_PATH=$tmpdir modules_install" or
+	dodie "Failed to install modules";
+
+    my $modlib = "/lib/modules/$version";
+    my $modtar = "ktest-mods.tar.bz2";
+
+    run_ssh "rm -rf $modlib" or
+	dodie "failed to remove old mods: $modlib";
+
+    # would be nice if scp -r did not follow symbolic links
+    run_command "cd $tmpdir && tar -cjf $modtar lib/modules/$version" or
+	dodie "making tarball";
+
+    run_scp "$tmpdir/$modtar", "/tmp" or
+	dodie "failed to copy modules";
+
+    unlink "$tmpdir/$modtar";
+
+    run_ssh "'(cd / && tar xf /tmp/$modtar)'" or
+	dodie "failed to tar modules";
+
+    run_ssh "rm -f /tmp/$modtar";
+
+    return if (!defined($post_install));
+
+    my $cp_post_install = $post_install;
+    $cp_post_install = s/\$KERNEL_VERSION/$version/g;
+    run_command "$cp_post_install" or
+	dodie "Failed to run post install";
+}
+
+sub check_buildlog {
+    my ($patch) = @_;
+
+    my @files = `git show $patch | diffstat -l`;
+
+    open(IN, "git show $patch |") or
+	dodie "failed to show $patch";
+    while (<IN>) {
+	if (m,^--- a/(.*),) {
+	    chomp $1;
+	    $files[$#files] = $1;
+	}
+    }
+    close(IN);
+
+    open(IN, $buildlog) or dodie "Can't open $buildlog";
+    while (<IN>) {
+	if (/^\s*(.*?):.*(warning|error)/) {
+	    my $err = $1;
+	    foreach my $file (@files) {
+		my $fullpath = "$builddir/$file";
+		if ($file eq $err || $fullpath eq $err) {
+		    fail "$file built with warnings" and return 0;
+		}
+	    }
+	}
+    }
+    close(IN);
+
+    return 1;
+}
+
+sub build {
+    my ($type) = @_;
+    my $defconfig = "";
+
+    unlink $buildlog;
+
+    if ($type =~ /^useconfig:(.*)/) {
+	run_command "cp $1 $output_config" or
+	    dodie "could not copy $1 to .config";
+
+	$type = "oldconfig";
+    }
+
+    # old config can ask questions
+    if ($type eq "oldconfig") {
+	$type = "oldnoconfig";
+
+	# allow for empty configs
+	run_command "touch $output_config";
+
+	run_command "mv $output_config $outputdir/config_temp" or
+	    dodie "moving .config";
+
+	if (!$noclean && !run_command "$make mrproper") {
+	    dodie "make mrproper";
+	}
+
+	run_command "mv $outputdir/config_temp $output_config" or
+	    dodie "moving config_temp";
+
+    } elsif (!$noclean) {
+	unlink "$output_config";
+	run_command "$make mrproper" or
+	    dodie "make mrproper";
+    }
+
+    # add something to distinguish this build
+    open(OUT, "> $outputdir/localversion") or dodie("Can't make localversion file");
+    print OUT "$localversion\n";
+    close(OUT);
+
+    if (defined($minconfig)) {
+	$defconfig = "KCONFIG_ALLCONFIG=$minconfig";
+    }
+
+    run_command "$defconfig $make $type" or
+	dodie "failed make config";
+
+    $redirect = "$buildlog";
+    if (!run_command "$make $build_options") {
+	undef $redirect;
+	# bisect may need this to pass
+	return 0 if ($in_bisect);
+	fail "failed build" and return 0;
+    }
+    undef $redirect;
+
+    return 1;
+}
+
+sub halt {
+    if (!run_ssh "halt" or defined($power_off)) {
+	if (defined($poweroff_after_halt)) {
+	    sleep $poweroff_after_halt;
+	    run_command "$power_off";
+	}
+    } else {
+	# nope? the zap it!
+	run_command "$power_off";
+    }
+}
+
+sub success {
+    my ($i) = @_;
+
+    $successes++;
+
+    doprint "\n\n*******************************************\n";
+    doprint     "*******************************************\n";
+    doprint     "KTEST RESULT: TEST $i SUCCESS!!!!         **\n";
+    doprint     "*******************************************\n";
+    doprint     "*******************************************\n";
+
+    if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
+	doprint "Reboot and wait $sleep_time seconds\n";
+	reboot;
+	start_monitor;
+	wait_for_monitor $sleep_time;
+	end_monitor;
+    }
+}
+
+sub get_version {
+    # get the release name
+    doprint "$make kernelrelease ... ";
+    $version = `$make kernelrelease | tail -1`;
+    chomp($version);
+    doprint "$version\n";
+}
+
+sub child_run_test {
+    my $failed = 0;
+
+    # child should have no power
+    $reboot_on_error = 0;
+    $poweroff_on_error = 0;
+    $die_on_failure = 1;
+
+    run_command $run_test or $failed = 1;
+    exit $failed;
+}
+
+my $child_done;
+
+sub child_finished {
+    $child_done = 1;
+}
+
+sub do_run_test {
+    my $child_pid;
+    my $child_exit;
+    my $line;
+    my $full_line;
+    my $bug = 0;
+
+    wait_for_monitor 1;
+
+    doprint "run test $run_test\n";
+
+    $child_done = 0;
+
+    $SIG{CHLD} = qw(child_finished);
+
+    $child_pid = fork;
+
+    child_run_test if (!$child_pid);
+
+    $full_line = "";
+
+    do {
+	$line = wait_for_input($monitor_fp, 1);
+	if (defined($line)) {
+
+	    # we are not guaranteed to get a full line
+	    $full_line .= $line;
+
+	    if ($full_line =~ /call trace:/i) {
+		$bug = 1;
+	    }
+
+	    if ($full_line =~ /Kernel panic -/) {
+		$bug = 1;
+	    }
+
+	    if ($line =~ /\n/) {
+		$full_line = "";
+	    }
+	}
+    } while (!$child_done && !$bug);
+
+    if ($bug) {
+	doprint "Detected kernel crash!\n";
+	# kill the child with extreme prejudice
+	kill 9, $child_pid;
+    }
+
+    waitpid $child_pid, 0;
+    $child_exit = $?;
+
+    if ($bug || $child_exit) {
+	return 0 if $in_bisect;
+	fail "test failed" and return 0;
+    }
+    return 1;
+}
+
+sub run_git_bisect {
+    my ($command) = @_;
+
+    doprint "$command ... ";
+
+    my $output = `$command 2>&1`;
+    my $ret = $?;
+
+    logit $output;
+
+    if ($ret) {
+	doprint "FAILED\n";
+	dodie "Failed to git bisect";
+    }
+
+    doprint "SUCCESS\n";
+    if ($output =~ m/^(Bisecting: .*\(roughly \d+ steps?\))\s+\[([[:xdigit:]]+)\]/) {
+	doprint "$1 [$2]\n";
+    } elsif ($output =~ m/^([[:xdigit:]]+) is the first bad commit/) {
+	$bisect_bad = $1;
+	doprint "Found bad commit... $1\n";
+	return 0;
+    } else {
+	# we already logged it, just print it now.
+	print $output;
+    }
+
+    return 1;
+}
+
+# returns 1 on success, 0 on failure
+sub run_bisect_test {
+    my ($type, $buildtype) = @_;
+
+    my $failed = 0;
+    my $result;
+    my $output;
+    my $ret;
+
+    $in_bisect = 1;
+
+    build $buildtype or $failed = 1;
+
+    if ($type ne "build") {
+	dodie "Failed on build" if $failed;
+
+	# Now boot the box
+	get_grub_index;
+	get_version;
+	install;
+
+	start_monitor;
+	monitor or $failed = 1;
+
+	if ($type ne "boot") {
+	    dodie "Failed on boot" if $failed;
+
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+    }
+
+    if ($failed) {
+	$result = 0;
+
+	# reboot the box to a good kernel
+	if ($type ne "build") {
+	    doprint "Reboot and sleep $bisect_sleep_time seconds\n";
+	    reboot;
+	    start_monitor;
+	    wait_for_monitor $bisect_sleep_time;
+	    end_monitor;
+	}
+    } else {
+	$result = 1;
+    }
+    $in_bisect = 0;
+
+    return $result;
+}
+
+sub run_bisect {
+    my ($type) = @_;
+    my $buildtype = "oldconfig";
+
+    # We should have a minconfig to use?
+    if (defined($minconfig)) {
+	$buildtype = "useconfig:$minconfig";
+    }
+
+    my $ret = run_bisect_test $type, $buildtype;
+
+
+    # Are we looking for where it worked, not failed?
+    if ($reverse_bisect) {
+	$ret = !$ret;
+    }
+
+    if ($ret) {
+	return "good";
+    } else {
+	return  "bad";
+    }
+}
+
+sub bisect {
+    my ($i) = @_;
+
+    my $result;
+
+    die "BISECT_GOOD[$i] not defined\n"	if (!defined($opt{"BISECT_GOOD[$i]"}));
+    die "BISECT_BAD[$i] not defined\n"	if (!defined($opt{"BISECT_BAD[$i]"}));
+    die "BISECT_TYPE[$i] not defined\n"	if (!defined($opt{"BISECT_TYPE[$i]"}));
+
+    my $good = $opt{"BISECT_GOOD[$i]"};
+    my $bad = $opt{"BISECT_BAD[$i]"};
+    my $type = $opt{"BISECT_TYPE[$i]"};
+    my $start = $opt{"BISECT_START[$i]"};
+    my $replay = $opt{"BISECT_REPLAY[$i]"};
+
+    # convert to true sha1's
+    $good = get_sha1($good);
+    $bad = get_sha1($bad);
+
+    if (defined($opt{"BISECT_REVERSE[$i]"}) &&
+	$opt{"BISECT_REVERSE[$i]"} == 1) {
+	doprint "Performing a reverse bisect (bad is good, good is bad!)\n";
+	$reverse_bisect = 1;
+    } else {
+	$reverse_bisect = 0;
+    }
+
+    # Can't have a test without having a test to run
+    if ($type eq "test" && !defined($run_test)) {
+	$type = "boot";
+    }
+
+    my $check = $opt{"BISECT_CHECK[$i]"};
+    if (defined($check) && $check ne "0") {
+
+	# get current HEAD
+	my $head = get_sha1("HEAD");
+
+	if ($check ne "good") {
+	    doprint "TESTING BISECT BAD [$bad]\n";
+	    run_command "git checkout $bad" or
+		die "Failed to checkout $bad";
+
+	    $result = run_bisect $type;
+
+	    if ($result ne "bad") {
+		fail "Tested BISECT_BAD [$bad] and it succeeded" and return 0;
+	    }
+	}
+
+	if ($check ne "bad") {
+	    doprint "TESTING BISECT GOOD [$good]\n";
+	    run_command "git checkout $good" or
+		die "Failed to checkout $good";
+
+	    $result = run_bisect $type;
+
+	    if ($result ne "good") {
+		fail "Tested BISECT_GOOD [$good] and it failed" and return 0;
+	    }
+	}
+
+	# checkout where we started
+	run_command "git checkout $head" or
+	    die "Failed to checkout $head";
+    }
+
+    run_command "git bisect start" or
+	dodie "could not start bisect";
+
+    run_command "git bisect good $good" or
+	dodie "could not set bisect good to $good";
+
+    run_git_bisect "git bisect bad $bad" or
+	dodie "could not set bisect bad to $bad";
+
+    if (defined($replay)) {
+	run_command "git bisect replay $replay" or
+	    dodie "failed to run replay";
+    }
+
+    if (defined($start)) {
+	run_command "git checkout $start" or
+	    dodie "failed to checkout $start";
+    }
+
+    my $test;
+    do {
+	$result = run_bisect $type;
+	$test = run_git_bisect "git bisect $result";
+    } while ($test);
+
+    run_command "git bisect log" or
+	dodie "could not capture git bisect log";
+
+    run_command "git bisect reset" or
+	dodie "could not reset git bisect";
+
+    doprint "Bad commit was [$bisect_bad]\n";
+
+    success $i;
+}
+
+my %config_ignore;
+my %config_set;
+
+my %config_list;
+my %null_config;
+
+my %dependency;
+
+sub process_config_ignore {
+    my ($config) = @_;
+
+    open (IN, $config)
+	or dodie "Failed to read $config";
+
+    while (<IN>) {
+	if (/^(.*?(CONFIG\S*)(=.*| is not set))/) {
+	    $config_ignore{$2} = $1;
+	}
+    }
+
+    close(IN);
+}
+
+sub read_current_config {
+    my ($config_ref) = @_;
+
+    %{$config_ref} = ();
+    undef %{$config_ref};
+
+    my @key = keys %{$config_ref};
+    if ($#key >= 0) {
+	print "did not delete!\n";
+	exit;
+    }
+    open (IN, "$output_config");
+
+    while (<IN>) {
+	if (/^(CONFIG\S+)=(.*)/) {
+	    ${$config_ref}{$1} = $2;
+	}
+    }
+    close(IN);
+}
+
+sub get_dependencies {
+    my ($config) = @_;
+
+    my $arr = $dependency{$config};
+    if (!defined($arr)) {
+	return ();
+    }
+
+    my @deps = @{$arr};
+
+    foreach my $dep (@{$arr}) {
+	print "ADD DEP $dep\n";
+	@deps = (@deps, get_dependencies $dep);
+    }
+
+    return @deps;
+}
+
+sub create_config {
+    my @configs = @_;
+
+    open(OUT, ">$output_config") or dodie "Can not write to $output_config";
+
+    foreach my $config (@configs) {
+	print OUT "$config_set{$config}\n";
+	my @deps = get_dependencies $config;
+	foreach my $dep (@deps) {
+	    print OUT "$config_set{$dep}\n";
+	}
+    }
+
+    foreach my $config (keys %config_ignore) {
+	print OUT "$config_ignore{$config}\n";
+    }
+    close(OUT);
+
+#    exit;
+    run_command "$make oldnoconfig" or
+	dodie "failed make config oldconfig";
+
+}
+
+sub compare_configs {
+    my (%a, %b) = @_;
+
+    foreach my $item (keys %a) {
+	if (!defined($b{$item})) {
+	    print "diff $item\n";
+	    return 1;
+	}
+	delete $b{$item};
+    }
+
+    my @keys = keys %b;
+    if ($#keys) {
+	print "diff2 $keys[0]\n";
+    }
+    return -1 if ($#keys >= 0);
+
+    return 0;
+}
+
+sub run_config_bisect_test {
+    my ($type) = @_;
+
+    return run_bisect_test $type, "oldconfig";
+}
+
+sub process_passed {
+    my (%configs) = @_;
+
+    doprint "These configs had no failure: (Enabling them for further compiles)\n";
+    # Passed! All these configs are part of a good compile.
+    # Add them to the min options.
+    foreach my $config (keys %configs) {
+	if (defined($config_list{$config})) {
+	    doprint " removing $config\n";
+	    $config_ignore{$config} = $config_list{$config};
+	    delete $config_list{$config};
+	}
+    }
+    doprint "config copied to $outputdir/config_good\n";
+    run_command "cp -f $output_config $outputdir/config_good";
+}
+
+sub process_failed {
+    my ($config) = @_;
+
+    doprint "\n\n***************************************\n";
+    doprint "Found bad config: $config\n";
+    doprint "***************************************\n\n";
+}
+
+sub run_config_bisect {
+
+    my @start_list = keys %config_list;
+
+    if ($#start_list < 0) {
+	doprint "No more configs to test!!!\n";
+	return -1;
+    }
+
+    doprint "***** RUN TEST ***\n";
+    my $type = $opt{"CONFIG_BISECT_TYPE[$iteration]"};
+    my $ret;
+    my %current_config;
+
+    my $count = $#start_list + 1;
+    doprint "  $count configs to test\n";
+
+    my $half = int($#start_list / 2);
+
+    do {
+	my @tophalf = @start_list[0 .. $half];
+
+	create_config @tophalf;
+	read_current_config \%current_config;
+
+	$count = $#tophalf + 1;
+	doprint "Testing $count configs\n";
+	my $found = 0;
+	# make sure we test something
+	foreach my $config (@tophalf) {
+	    if (defined($current_config{$config})) {
+		logit " $config\n";
+		$found = 1;
+	    }
+	}
+	if (!$found) {
+	    # try the other half
+	    doprint "Top half produced no set configs, trying bottom half\n";
+	    @tophalf = @start_list[$half .. $#start_list];
+	    create_config @tophalf;
+	    read_current_config \%current_config;
+	    foreach my $config (@tophalf) {
+		if (defined($current_config{$config})) {
+		    logit " $config\n";
+		    $found = 1;
+		}
+	    }
+	    if (!$found) {
+		doprint "Failed: Can't make new config with current configs\n";
+		foreach my $config (@start_list) {
+		    doprint "  CONFIG: $config\n";
+		}
+		return -1;
+	    }
+	    $count = $#tophalf + 1;
+	    doprint "Testing $count configs\n";
+	}
+
+	$ret = run_config_bisect_test $type;
+
+	if ($ret) {
+	    process_passed %current_config;
+	    return 0;
+	}
+
+	doprint "This config had a failure.\n";
+	doprint "Removing these configs that were not set in this config:\n";
+	doprint "config copied to $outputdir/config_bad\n";
+	run_command "cp -f $output_config $outputdir/config_bad";
+
+	# A config exists in this group that was bad.
+	foreach my $config (keys %config_list) {
+	    if (!defined($current_config{$config})) {
+		doprint " removing $config\n";
+		delete $config_list{$config};
+	    }
+	}
+
+	@start_list = @tophalf;
+
+	if ($#start_list == 0) {
+	    process_failed $start_list[0];
+	    return 1;
+	}
+
+	# remove half the configs we are looking at and see if
+	# they are good.
+	$half = int($#start_list / 2);
+    } while ($half > 0);
+
+    # we found a single config, try it again
+    my @tophalf = @start_list[0 .. 0];
+
+    $ret = run_config_bisect_test $type;
+    if ($ret) {
+	process_passed %current_config;
+	return 0;
+    }
+
+    process_failed $start_list[0];
+    return 1;
+}
+
+sub config_bisect {
+    my ($i) = @_;
+
+    my $start_config = $opt{"CONFIG_BISECT[$i]"};
+
+    my $tmpconfig = "$tmpdir/use_config";
+
+    # Make the file with the bad config and the min config
+    if (defined($minconfig)) {
+	# read the min config for things to ignore
+	run_command "cp $minconfig $tmpconfig" or
+	    dodie "failed to copy $minconfig to $tmpconfig";
+    } else {
+	unlink $tmpconfig;
+    }
+
+    # Add other configs
+    if (defined($addconfig)) {
+	run_command "cat $addconfig >> $tmpconfig" or
+	    dodie "failed to append $addconfig";
+    }
+
+    my $defconfig = "";
+    if (-f $tmpconfig) {
+	$defconfig = "KCONFIG_ALLCONFIG=$tmpconfig";
+	process_config_ignore $tmpconfig;
+    }
+
+    # now process the start config
+    run_command "cp $start_config $output_config" or
+	dodie "failed to copy $start_config to $output_config";
+
+    # read directly what we want to check
+    my %config_check;
+    open (IN, $output_config)
+	or dodie "faied to open $output_config";
+
+    while (<IN>) {
+	if (/^((CONFIG\S*)=.*)/) {
+	    $config_check{$2} = $1;
+	}
+    }
+    close(IN);
+
+    # Now run oldconfig with the minconfig (and addconfigs)
+    run_command "$defconfig $make oldnoconfig" or
+	dodie "failed make config oldconfig";
+
+    # check to see what we lost (or gained)
+    open (IN, $output_config)
+	or dodie "Failed to read $start_config";
+
+    my %removed_configs;
+    my %added_configs;
+
+    while (<IN>) {
+	if (/^((CONFIG\S*)=.*)/) {
+	    # save off all options
+	    $config_set{$2} = $1;
+	    if (defined($config_check{$2})) {
+		if (defined($config_ignore{$2})) {
+		    $removed_configs{$2} = $1;
+		} else {
+		    $config_list{$2} = $1;
+		}
+	    } elsif (!defined($config_ignore{$2})) {
+		$added_configs{$2} = $1;
+		$config_list{$2} = $1;
+	    }
+	}
+    }
+    close(IN);
+
+    my @confs = keys %removed_configs;
+    if ($#confs >= 0) {
+	doprint "Configs overridden by default configs and removed from check:\n";
+	foreach my $config (@confs) {
+	    doprint " $config\n";
+	}
+    }
+    @confs = keys %added_configs;
+    if ($#confs >= 0) {
+	doprint "Configs appearing in make oldconfig and added:\n";
+	foreach my $config (@confs) {
+	    doprint " $config\n";
+	}
+    }
+
+    my %config_test;
+    my $once = 0;
+
+    # Sometimes kconfig does weird things. We must make sure
+    # that the config we autocreate has everything we need
+    # to test, otherwise we may miss testing configs, or
+    # may not be able to create a new config.
+    # Here we create a config with everything set.
+    create_config (keys %config_list);
+    read_current_config \%config_test;
+    foreach my $config (keys %config_list) {
+	if (!defined($config_test{$config})) {
+	    if (!$once) {
+		$once = 1;
+		doprint "Configs not produced by kconfig (will not be checked):\n";
+	    }
+	    doprint "  $config\n";
+	    delete $config_list{$config};
+	}
+    }
+    my $ret;
+    do {
+	$ret = run_config_bisect;
+    } while (!$ret);
+
+    return $ret if ($ret < 0);
+
+    success $i;
+}
+
+sub patchcheck {
+    my ($i) = @_;
+
+    die "PATCHCHECK_START[$i] not defined\n"
+	if (!defined($opt{"PATCHCHECK_START[$i]"}));
+    die "PATCHCHECK_TYPE[$i] not defined\n"
+	if (!defined($opt{"PATCHCHECK_TYPE[$i]"}));
+
+    my $start = $opt{"PATCHCHECK_START[$i]"};
+
+    my $end = "HEAD";
+    if (defined($opt{"PATCHCHECK_END[$i]"})) {
+	$end = $opt{"PATCHCHECK_END[$i]"};
+    }
+
+    # Get the true sha1's since we can use things like HEAD~3
+    $start = get_sha1($start);
+    $end = get_sha1($end);
+
+    my $type = $opt{"PATCHCHECK_TYPE[$i]"};
+
+    # Can't have a test without having a test to run
+    if ($type eq "test" && !defined($run_test)) {
+	$type = "boot";
+    }
+
+    open (IN, "git log --pretty=oneline $end|") or
+	dodie "could not get git list";
+
+    my @list;
+
+    while (<IN>) {
+	chomp;
+	$list[$#list+1] = $_;
+	last if (/^$start/);
+    }
+    close(IN);
+
+    if ($list[$#list] !~ /^$start/) {
+	fail "SHA1 $start not found";
+    }
+
+    # go backwards in the list
+    @list = reverse @list;
+
+    my $save_clean = $noclean;
+
+    $in_patchcheck = 1;
+    foreach my $item (@list) {
+	my $sha1 = $item;
+	$sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+
+	doprint "\nProcessing commit $item\n\n";
+
+	run_command "git checkout $sha1" or
+	    die "Failed to checkout $sha1";
+
+	# only clean on the first and last patch
+	if ($item eq $list[0] ||
+	    $item eq $list[$#list]) {
+	    $noclean = $save_clean;
+	} else {
+	    $noclean = 1;
+	}
+
+	if (defined($minconfig)) {
+	    build "useconfig:$minconfig" or return 0;
+	} else {
+	    # ?? no config to use?
+	    build "oldconfig" or return 0;
+	}
+
+	check_buildlog $sha1 or return 0;
+
+	next if ($type eq "build");
+
+	get_grub_index;
+	get_version;
+	install;
+
+	my $failed = 0;
+
+	start_monitor;
+	monitor or $failed = 1;
+
+	if (!$failed && $type ne "boot"){
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+	return 0 if ($failed);
+
+    }
+    $in_patchcheck = 0;
+    success $i;
+
+    return 1;
+}
+
+$#ARGV < 1 or die "ktest.pl version: $VERSION\n   usage: ktest.pl config-file\n";
+
+if ($#ARGV == 0) {
+    $ktest_config = $ARGV[0];
+    if (! -f $ktest_config) {
+	print "$ktest_config does not exist.\n";
+	my $ans;
+        for (;;) {
+	    print "Create it? [Y/n] ";
+	    $ans = <STDIN>;
+	    chomp $ans;
+	    if ($ans =~ /^\s*$/) {
+		$ans = "y";
+	    }
+	    last if ($ans =~ /^y$/i || $ans =~ /^n$/i);
+	    print "Please answer either 'y' or 'n'.\n";
+	}
+	if ($ans !~ /^y$/i) {
+	    exit 0;
+	}
+    }
+} else {
+    $ktest_config = "ktest.conf";
+}
+
+if (! -f $ktest_config) {
+    open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
+    print OUT << "EOF"
+# Generated by ktest.pl
+#
+# Define each test with TEST_START
+# The config options below it will override the defaults
+TEST_START
+
+DEFAULTS
+EOF
+;
+    close(OUT);
+}
+read_config $ktest_config;
+
+# Append any configs entered in manually to the config file.
+my @new_configs = keys %entered_configs;
+if ($#new_configs >= 0) {
+    print "\nAppending entered in configs to $ktest_config\n";
+    open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
+    foreach my $config (@new_configs) {
+	print OUT "$config = $entered_configs{$config}\n";
+	$opt{$config} = $entered_configs{$config};
+    }
+}
+
+if ($opt{"CLEAR_LOG"} && defined($opt{"LOG_FILE"})) {
+    unlink $opt{"LOG_FILE"};
+}
+
+doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
+
+for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
+
+    if (!$i) {
+	doprint "DEFAULT OPTIONS:\n";
+    } else {
+	doprint "\nTEST $i OPTIONS";
+	if (defined($repeat_tests{$i})) {
+	    $repeat = $repeat_tests{$i};
+	    doprint " ITERATE $repeat";
+	}
+	doprint "\n";
+    }
+
+    foreach my $option (sort keys %opt) {
+
+	if ($option =~ /\[(\d+)\]$/) {
+	    next if ($i != $1);
+	} else {
+	    next if ($i);
+	}
+
+	doprint "$option = $opt{$option}\n";
+    }
+}
+
+sub set_test_option {
+    my ($name, $i) = @_;
+
+    my $option = "$name\[$i\]";
+
+    if (defined($opt{$option})) {
+	return $opt{$option};
+    }
+
+    foreach my $test (keys %repeat_tests) {
+	if ($i >= $test &&
+	    $i < $test + $repeat_tests{$test}) {
+	    $option = "$name\[$test\]";
+	    if (defined($opt{$option})) {
+		return $opt{$option};
+	    }
+	}
+    }
+
+    if (defined($opt{$name})) {
+	return $opt{$name};
+    }
+
+    return undef;
+}
+
+# First we need to do is the builds
+for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+
+    $iteration = $i;
+
+    my $makecmd = set_test_option("MAKE_CMD", $i);
+
+    $machine = set_test_option("MACHINE", $i);
+    $ssh_user = set_test_option("SSH_USER", $i);
+    $tmpdir = set_test_option("TMP_DIR", $i);
+    $outputdir = set_test_option("OUTPUT_DIR", $i);
+    $builddir = set_test_option("BUILD_DIR", $i);
+    $test_type = set_test_option("TEST_TYPE", $i);
+    $build_type = set_test_option("BUILD_TYPE", $i);
+    $build_options = set_test_option("BUILD_OPTIONS", $i);
+    $power_cycle = set_test_option("POWER_CYCLE", $i);
+    $reboot = set_test_option("REBOOT", $i);
+    $noclean = set_test_option("BUILD_NOCLEAN", $i);
+    $minconfig = set_test_option("MIN_CONFIG", $i);
+    $run_test = set_test_option("TEST", $i);
+    $addconfig = set_test_option("ADD_CONFIG", $i);
+    $reboot_type = set_test_option("REBOOT_TYPE", $i);
+    $grub_menu = set_test_option("GRUB_MENU", $i);
+    $post_install = set_test_option("POST_INSTALL", $i);
+    $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
+    $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
+    $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
+    $die_on_failure = set_test_option("DIE_ON_FAILURE", $i);
+    $power_off = set_test_option("POWER_OFF", $i);
+    $powercycle_after_reboot = set_test_option("POWERCYCLE_AFTER_REBOOT", $i);
+    $poweroff_after_halt = set_test_option("POWEROFF_AFTER_HALT", $i);
+    $sleep_time = set_test_option("SLEEP_TIME", $i);
+    $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i);
+    $store_failures = set_test_option("STORE_FAILURES", $i);
+    $timeout = set_test_option("TIMEOUT", $i);
+    $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i);
+    $console = set_test_option("CONSOLE", $i);
+    $success_line = set_test_option("SUCCESS_LINE", $i);
+    $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
+    $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
+    $build_target = set_test_option("BUILD_TARGET", $i);
+    $ssh_exec = set_test_option("SSH_EXEC", $i);
+    $scp_to_target = set_test_option("SCP_TO_TARGET", $i);
+    $target_image = set_test_option("TARGET_IMAGE", $i);
+    $localversion = set_test_option("LOCALVERSION", $i);
+
+    chdir $builddir || die "can't change directory to $builddir";
+
+    if (!-d $tmpdir) {
+	mkpath($tmpdir) or
+	    die "can't create $tmpdir";
+    }
+
+    $ENV{"SSH_USER"} = $ssh_user;
+    $ENV{"MACHINE"} = $machine;
+
+    $target = "$ssh_user\@$machine";
+
+    $buildlog = "$tmpdir/buildlog-$machine";
+    $dmesg = "$tmpdir/dmesg-$machine";
+    $make = "$makecmd O=$outputdir";
+    $output_config = "$outputdir/.config";
+
+    if ($reboot_type eq "grub") {
+	dodie "GRUB_MENU not defined" if (!defined($grub_menu));
+    } elsif (!defined($reboot_script)) {
+	dodie "REBOOT_SCRIPT not defined"
+    }
+
+    my $run_type = $build_type;
+    if ($test_type eq "patchcheck") {
+	$run_type = $opt{"PATCHCHECK_TYPE[$i]"};
+    } elsif ($test_type eq "bisect") {
+	$run_type = $opt{"BISECT_TYPE[$i]"};
+    } elsif ($test_type eq "config_bisect") {
+	$run_type = $opt{"CONFIG_BISECT_TYPE[$i]"};
+    }
+
+    # mistake in config file?
+    if (!defined($run_type)) {
+	$run_type = "ERROR";
+    }
+
+    doprint "\n\n";
+    doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n";
+
+    unlink $dmesg;
+    unlink $buildlog;
+
+    if (!defined($minconfig)) {
+	$minconfig = $addconfig;
+
+    } elsif (defined($addconfig)) {
+	run_command "cat $addconfig $minconfig > $tmpdir/add_config" or
+	    dodie "Failed to create temp config";
+	$minconfig = "$tmpdir/add_config";
+    }
+
+    my $checkout = $opt{"CHECKOUT[$i]"};
+    if (defined($checkout)) {
+	run_command "git checkout $checkout" or
+	    die "failed to checkout $checkout";
+    }
+
+    if ($test_type eq "bisect") {
+	bisect $i;
+	next;
+    } elsif ($test_type eq "config_bisect") {
+	config_bisect $i;
+	next;
+    } elsif ($test_type eq "patchcheck") {
+	patchcheck $i;
+	next;
+    }
+
+    if ($build_type ne "nobuild") {
+	build $build_type or next;
+    }
+
+    if ($test_type ne "build") {
+	get_grub_index;
+	get_version;
+	install;
+
+	my $failed = 0;
+	start_monitor;
+	monitor or $failed = 1;;
+
+	if (!$failed && $test_type ne "boot" && defined($run_test)) {
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+	next if ($failed);
+    }
+
+    success $i;
+}
+
+if ($opt{"POWEROFF_ON_SUCCESS"}) {
+    halt;
+} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) {
+    reboot;
+}
+
+doprint "\n    $successes of $opt{NUM_TESTS} tests were successful\n\n";
+
+exit 0;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
new file mode 100644
index 0000000..3408c59
--- /dev/null
+++ b/tools/testing/ktest/sample.conf
@@ -0,0 +1,622 @@
+#
+# Config file for ktest.pl
+#
+# Note, all paths must be absolute
+#
+
+# Options set in the beginning of the file are considered to be
+# default options. These options can be overriden by test specific
+# options, with the following exceptions:
+#
+#  LOG_FILE
+#  CLEAR_LOG
+#  POWEROFF_ON_SUCCESS
+#  REBOOT_ON_SUCCESS
+#
+# Test specific options are set after the label:
+#
+# TEST_START
+#
+# The options after a TEST_START label are specific to that test.
+# Each TEST_START label will set up a new test. If you want to
+# perform a test more than once, you can add the ITERATE label
+# to it followed by the number of times you want that test
+# to iterate. If the ITERATE is left off, the test will only
+# be performed once.
+#
+# TEST_START ITERATE 10
+#
+# You can skip a test by adding SKIP (before or after the ITERATE
+# and number)
+#
+# TEST_START SKIP
+#
+# TEST_START SKIP ITERATE 10
+#
+# TEST_START ITERATE 10 SKIP
+#
+# The SKIP label causes the options and the test itself to be ignored.
+# This is useful to set up several different tests in one config file, and
+# only enabling the ones you want to use for a current test run.
+#
+# You can add default options anywhere in the file as well
+# with the DEFAULTS tag. This allows you to have default options
+# after the test options to keep the test options at the top
+# of the file. You can even place the DEFAULTS tag between
+# test cases (but not in the middle of a single test case)
+#
+# TEST_START
+# MIN_CONFIG = /home/test/config-test1
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-default
+#
+# TEST_START ITERATE 10
+#
+# The above will run the first test with MIN_CONFIG set to
+# /home/test/config-test-1. Then 10 tests will be executed
+# with MIN_CONFIG with /home/test/config-default.
+#
+# You can also disable defaults with the SKIP option
+#
+# DEFAULTS SKIP
+# MIN_CONFIG = /home/test/config-use-sometimes
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-most-times
+#
+# The above will ignore the first MIN_CONFIG. If you want to
+# use the first MIN_CONFIG, remove the SKIP from the first
+# DEFAULTS tag and add it to the second. Be careful, options
+# may only be declared once per test or default. If you have
+# the same option name under the same test or as default
+# ktest will fail to execute, and no tests will run.
+#
+
+
+#### Mandatory Default Options ####
+
+# These options must be in the default section, although most
+# may be overridden by test options.
+
+# The machine hostname that you will test
+#MACHINE = target
+
+# The box is expected to have ssh on normal bootup, provide the user
+#  (most likely root, since you need privileged operations)
+#SSH_USER = root
+
+# The directory that contains the Linux source code
+#BUILD_DIR = /home/test/linux.git
+
+# The directory that the objects will be built
+# (can not be same as BUILD_DIR)
+#OUTPUT_DIR = /home/test/build/target
+
+# The location of the compiled file to copy to the target
+# (relative to OUTPUT_DIR)
+#BUILD_TARGET = arch/x86/boot/bzImage
+
+# The place to put your image on the test machine
+#TARGET_IMAGE = /boot/vmlinuz-test
+
+# A script or command to reboot the box
+#
+# Here is a digital loggers power switch example
+#POWER_CYCLE = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
+#
+# Here is an example to reboot a virtual box on the current host
+# with the name "Guest".
+#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
+
+# The script or command that reads the console
+#
+#  If you use ttywatch server, something like the following would work.
+#CONSOLE = nc -d localhost 3001
+#
+# For a virtual machine with guest name "Guest".
+#CONSOLE =  virsh console Guest
+
+# Required version ending to differentiate the test
+# from other linux builds on the system.
+#LOCALVERSION = -test
+
+# The grub title name for the test kernel to boot
+# (Only mandatory if REBOOT_TYPE = grub)
+#
+# Note, ktest.pl will not update the grub menu.lst, you need to
+# manually add an option for the test. ktest.pl will search
+# the grub menu.lst for this option to find what kernel to
+# reboot into.
+#
+# For example, if in the /boot/grub/menu.lst the test kernel title has:
+# title Test Kernel
+# kernel vmlinuz-test
+#GRUB_MENU = Test Kernel
+
+# A script to reboot the target into the test kernel
+# (Only mandatory if REBOOT_TYPE = script)
+#REBOOT_SCRIPT =
+
+#### Optional Config Options (all have defaults) ####
+
+# Start a test setup. If you leave this off, all options
+# will be default and the test will run once.
+# This is a label and not really an option (it takes no value).
+# You can append ITERATE and a number after it to iterate the
+# test a number of times, or SKIP to ignore this test.
+#
+#TEST_START
+#TEST_START ITERATE 5
+#TEST_START SKIP
+
+# Have the following options as default again. Used after tests
+# have already been defined by TEST_START. Optionally, you can
+# just define all default options before the first TEST_START
+# and you do not need this option.
+#
+# This is a label and not really an option (it takes no value).
+# You can append SKIP to this label and the options within this
+# section will be ignored.
+#
+# DEFAULTS
+# DEFAULTS SKIP
+
+# The default test type (default test)
+# The test types may be:
+#   build - only build the kernel, do nothing else
+#   boot - build and boot the kernel
+#   test - build, boot and if TEST is set, run the test script
+#          (If TEST is not set, it defaults back to boot)
+#   bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
+#   patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
+#TEST_TYPE = test
+
+# Test to run if there is a successful boot and TEST_TYPE is test.
+# Must exit with 0 on success and non zero on error
+# default (undefined)
+#TEST = ssh user@machine /root/run_test
+
+# The build type is any make config type or special command
+#  (default randconfig)
+#   nobuild - skip the clean and build step
+#   useconfig:/path/to/config - use the given config and run
+#              oldconfig on it.
+# This option is ignored if TEST_TYPE is patchcheck or bisect
+#BUILD_TYPE = randconfig
+
+# The make command (default make)
+# If you are building a 32bit x86 on a 64 bit host
+#MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386
+
+# Any build options for the make of the kernel (not for other makes, like configs)
+# (default "")
+#BUILD_OPTIONS = -j20
+
+# If you need an initrd, you can add a script or code here to install
+# it. The environment variable KERNEL_VERSION will be set to the
+# kernel version that is used. Remember to add the initrd line
+# to your grub menu.lst file.
+#
+# Here's a couple of examples to use:
+#POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
+#
+# or on some systems:
+#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
+
+# Way to reboot the box to the test kernel.
+# Only valid options so far are "grub" and "script"
+# (default grub)
+# If you specify grub, it will assume grub version 1
+# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
+# and select that target to reboot to the kernel. If this is not
+# your setup, then specify "script" and have a command or script
+# specified in REBOOT_SCRIPT to boot to the target.
+#
+# The entry in /boot/grub/menu.lst must be entered in manually.
+# The test will not modify that file.
+#REBOOT_TYPE = grub
+
+# The min config that is needed to build for the machine
+# A nice way to create this is with the following:
+#
+#   $ ssh target
+#   $ lsmod > mymods
+#   $ scp mymods host:/tmp
+#   $ exit
+#   $ cd linux.git
+#   $ rm .config
+#   $ make LSMOD=mymods localyesconfig
+#   $ grep '^CONFIG' .config > /home/test/config-min
+#
+# If you want even less configs:
+#
+#   log in directly to target (do not ssh)
+#
+#   $ su
+#   # lsmod | cut -d' ' -f1 | xargs rmmod
+#
+#   repeat the above several times
+#
+#   # lsmod > mymods
+#   # reboot
+#
+# May need to reboot to get your network back to copy the mymods
+# to the host, and then remove the previous .config and run the
+# localyesconfig again. The CONFIG_MIN generated like this will
+# not guarantee network activity to the box so the TEST_TYPE of
+# test may fail.
+#
+# You might also want to set:
+#   CONFIG_CMDLINE="<your options here>"
+#  randconfig may set the above and override your real command
+#  line options.
+# (default undefined)
+#MIN_CONFIG = /home/test/config-min
+
+# Sometimes there's options that just break the boot and
+# you do not care about. Here are a few:
+#   # CONFIG_STAGING is not set
+#  Staging drivers are horrible, and can break the build.
+#   # CONFIG_SCSI_DEBUG is not set
+#  SCSI_DEBUG may change your root partition
+#   # CONFIG_KGDB_SERIAL_CONSOLE is not set
+#  KGDB may cause oops waiting for a connection that's not there.
+# This option points to the file containing config options that will be prepended
+# to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
+#
+# Note, config options in MIN_CONFIG will override these options.
+#
+# (default undefined)
+#ADD_CONFIG = /home/test/config-broken
+
+# The location on the host where to write temp files
+# (default /tmp/ktest)
+#TMP_DIR = /tmp/ktest
+
+# Optional log file to write the status (recommended)
+#  Note, this is a DEFAULT section only option.
+# (default undefined)
+#LOG_FILE = /home/test/logfiles/target.log
+
+# Remove old logfile if it exists before starting all tests.
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#CLEAR_LOG = 0
+
+# Line to define a successful boot up in console output.
+# This is what the line contains, not the entire line. If you need
+# the entire line to match, then use regural expression syntax like:
+#  (do not add any quotes around it)
+#
+#  SUCCESS_LINE = ^MyBox Login:$
+#
+# (default "login:")
+#SUCCESS_LINE = login:
+
+# In case the console constantly fills the screen, having
+# a specified time to stop the test after success is recommended.
+# (in seconds)
+# (default 10)
+#STOP_AFTER_SUCCESS = 10
+
+# In case the console constantly fills the screen, having
+# a specified time to stop the test after failure is recommended.
+# (in seconds)
+# (default 60)
+#STOP_AFTER_FAILURE = 60
+
+# Stop testing if a build fails. If set, the script will end if
+# a failure is detected, otherwise it will save off the .config,
+# dmesg and bootlog in a directory called
+# MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss
+# if the STORE_FAILURES directory is set.
+# (default 1)
+# Note, even if this is set to zero, there are some errors that still
+# stop the tests.
+#DIE_ON_FAILURE = 1
+
+# Directory to store failure directories on failure. If this is not
+# set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
+# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
+# (default undefined)
+#STORE_FAILURES = /home/test/failures
+
+# Build without doing a make mrproper, or removing .config
+# (default 0)
+#BUILD_NOCLEAN = 0
+
+# As the test reads the console, after it hits the SUCCESS_LINE
+# the time it waits for the monitor to settle down between reads
+# can usually be lowered.
+# (in seconds) (default 1)
+#BOOTED_TIMEOUT = 1
+
+# The timeout in seconds when we consider the box hung after
+# the console stop producing output. Be sure to leave enough
+# time here to get pass a reboot. Some machines may not produce
+# any console output for a long time during a reboot. You do
+# not want the test to fail just because the system was in
+# the process of rebooting to the test kernel.
+# (default 120)
+#TIMEOUT = 120
+
+# In between tests, a reboot of the box may occur, and this
+# is the time to wait for the console after it stops producing
+# output. Some machines may not produce a large lag on reboot
+# so this should accommodate it.
+# The difference between this and TIMEOUT, is that TIMEOUT happens
+# when rebooting to the test kernel. This sleep time happens
+# after a test has completed and we are about to start running
+# another test. If a reboot to the reliable kernel happens,
+# we wait SLEEP_TIME for the console to stop producing output
+# before starting the next test.
+# (default 60)
+#SLEEP_TIME = 60
+
+# The time in between bisects to sleep (in seconds)
+# (default 60)
+#BISECT_SLEEP_TIME = 60
+
+# Reboot the target box on error (default 0)
+#REBOOT_ON_ERROR = 0
+
+# Power off the target on error (ignored if REBOOT_ON_ERROR is set)
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#POWEROFF_ON_ERROR = 0
+
+# Power off the target after all tests have completed successfully
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#POWEROFF_ON_SUCCESS = 0
+
+# Reboot the target after all test completed successfully (default 1)
+# (ignored if POWEROFF_ON_SUCCESS is set)
+#REBOOT_ON_SUCCESS = 1
+
+# In case there are isses with rebooting, you can specify this
+# to always powercycle after this amount of time after calling
+# reboot.
+# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
+# makes it powercycle immediately after rebooting. Do not define
+# it if you do not want it.
+# (default undefined)
+#POWERCYCLE_AFTER_REBOOT = 5
+
+# In case there's isses with halting, you can specify this
+# to always poweroff after this amount of time after calling
+# halt.
+# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
+# makes it poweroff immediately after halting. Do not define
+# it if you do not want it.
+# (default undefined)
+#POWEROFF_AFTER_HALT = 20
+
+# A script or command to power off the box (default undefined)
+# Needed for POWEROFF_ON_ERROR and SUCCESS
+#
+# Example for digital loggers power switch:
+#POWER_OFF = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
+#
+# Example for a virtual guest call "Guest".
+#POWER_OFF = virsh destroy Guest
+
+# The way to execute a command on the target
+# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
+# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
+#SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND";
+
+# The way to copy a file to the target
+# (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE)
+# The variables SSH_USER, MACHINE, SRC_FILE and DST_FILE are defined.
+#SCP_TO_TARGET = scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE
+
+# The nice way to reboot the target
+# (default ssh $SSH_USER@$MACHINE reboot)
+# The variables SSH_USER and MACHINE are defined.
+#REBOOT = ssh $SSH_USER@$MACHINE reboot
+
+#### Per test run options ####
+# The following options are only allowed in TEST_START sections.
+# They are ignored in the DEFAULTS sections.
+#
+# All of these are optional and undefined by default, although
+#  some of these options are required for TEST_TYPE of patchcheck
+#  and bisect.
+#
+#
+# CHECKOUT = branch
+#
+#  If the BUILD_DIR is a git repository, then you can set this option
+#  to checkout the given branch before running the TEST. If you
+#  specify this for the first run, that branch will be used for
+#  all preceding tests until a new CHECKOUT is set.
+#
+#
+#
+# For TEST_TYPE = patchcheck
+#
+#  This expects the BUILD_DIR to be a git repository, and
+#  will checkout the PATCHCHECK_START commit.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  The MIN_CONFIG will be used for all builds of the patchcheck. The build type
+#  used for patchcheck is oldconfig.
+#
+#  PATCHCHECK_START is required and is the first patch to
+#   test (the SHA1 of the commit). You may also specify anything
+#   that git checkout allows (branch name, tage, HEAD~3).
+#
+#  PATCHCHECK_END is the last patch to check (default HEAD)
+#
+#  PATCHCHECK_TYPE is required and is the type of test to run:
+#      build, boot, test.
+#
+#   Note, the build test will look for warnings, if a warning occurred
+#     in a file that a commit touches, the build will fail.
+#
+#   If BUILD_NOCLEAN is set, then make mrproper will not be run on
+#   any of the builds, just like all other TEST_TYPE tests. But
+#   what makes patchcheck different from the other tests, is if
+#   BUILD_NOCLEAN is not set, only the first and last patch run
+#   make mrproper. This helps speed up the test.
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = patchcheck
+#   CHECKOUT = mybranch
+#   PATCHCHECK_TYPE = boot
+#   PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
+#   PATCHCHECK_END = HEAD~2
+#
+#
+#
+# For TEST_TYPE = bisect
+#
+#  You can specify a git bisect if the BUILD_DIR is a git repository.
+#  The MIN_CONFIG will be used for all builds of the bisect. The build type
+#  used for bisecting is oldconfig.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  BISECT_TYPE is the type of test to perform:
+#	build	- bad fails to build
+#	boot	- bad builds but fails to boot
+#	test	- bad boots but fails a test
+#
+# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
+# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
+#
+# The above three options are required for a bisect operation.
+#
+# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
+#
+#   If an operation failed in the bisect that was not expected to
+#   fail. Then the test ends. The state of the BUILD_DIR will be
+#   left off at where the failure occurred. You can examine the
+#   reason for the failure, and perhaps even find a git commit
+#   that would work to continue with. You can run:
+#
+#   git bisect log > /path/to/replay/file
+#
+#   The adding:
+#
+#    BISECT_REPLAY= /path/to/replay/file
+#
+#   And running the test again. The test will perform the initial
+#    git bisect start, git bisect good, and git bisect bad, and
+#    then it will run git bisect replay on this file, before
+#    continuing with the bisect.
+#
+# BISECT_START = commit (optional, default undefined)
+#
+#   As with BISECT_REPLAY, if the test failed on a commit that
+#   just happen to have a bad commit in the middle of the bisect,
+#   and you need to skip it. If BISECT_START is defined, it
+#   will checkout that commit after doing the initial git bisect start,
+#   git bisect good, git bisect bad, and running the git bisect replay
+#   if the BISECT_REPLAY is set.
+#
+# BISECT_REVERSE = 1 (optional, default 0)
+#
+#   In those strange instances where it was broken forever
+#   and you are trying to find where it started to work!
+#   Set BISECT_GOOD to the commit that was last known to fail
+#   Set BISECT_BAD to the commit that is known to start working.
+#   With BISECT_REVERSE = 1, The test will consider failures as
+#   good, and success as bad.
+#
+# BISECT_CHECK = 1 (optional, default 0)
+#
+#   Just to be sure the good is good and bad is bad, setting
+#   BISECT_CHECK to 1 will start the bisect by first checking
+#   out BISECT_BAD and makes sure it fails, then it will check
+#   out BISECT_GOOD and makes sure it succeeds before starting
+#   the bisect (it works for BISECT_REVERSE too).
+#
+#   You can limit the test to just check BISECT_GOOD or
+#   BISECT_BAD with BISECT_CHECK = good or
+#   BISECT_CHECK = bad, respectively.
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = bisect
+#   BISECT_GOOD = v2.6.36
+#   BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
+#   BISECT_TYPE = build
+#   MIN_CONFIG = /home/test/config-bisect
+#
+#
+#
+# For TEST_TYPE = config_bisect
+#
+#  In those cases that you have two different configs. One of them
+#  work, the other does not, and you do not know what config causes
+#  the problem.
+#  The TEST_TYPE config_bisect will bisect the bad config looking for
+#  what config causes the failure.
+#
+#  The way it works is this:
+#
+#   First it finds a config to work with. Since a different version, or
+#   MIN_CONFIG may cause different dependecies, it must run through this
+#   preparation.
+#
+#   Overwrites any config set in the bad config with a config set in
+#   either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
+#   are minimal and do not disable configs you want to test:
+#   (ie.  # CONFIG_FOO is not set).
+#
+#   An oldconfig is run on the bad config and any new config that
+#   appears will be added to the configs to test.
+#
+#   Finally, it generates a config with the above result and runs it
+#   again through make oldconfig to produce a config that should be
+#   satisfied by kconfig.
+#
+#   Then it starts the bisect.
+#
+#   The configs to test are cut in half. If all the configs in this
+#   half depend on a config in the other half, then the other half
+#   is tested instead. If no configs are enabled by either half, then
+#   this means a circular dependency exists and the test fails.
+#
+#   A config is created with the test half, and the bisect test is run.
+#
+#   If the bisect succeeds, then all configs in the generated config
+#   are removed from the configs to test and added to the configs that
+#   will be enabled for all builds (they will be enabled, but not be part
+#   of the configs to examine).
+#
+#   If the bisect fails, then all test configs that were not enabled by
+#   the config file are removed from the test. These configs will not
+#   be enabled in future tests. Since current config failed, we consider
+#   this to be a subset of the config that we started with.
+#
+#   When we are down to one config, it is considered the bad config.
+#
+#   Note, the config chosen may not be the true bad config. Due to
+#   dependencies and selections of the kbuild system, mulitple
+#   configs may be needed to cause a failure. If you disable the
+#   config that was found and restart the test, if the test fails
+#   again, it is recommended to rerun the config_bisect with a new
+#   bad config without the found config enabled.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  CONFIG_BISECT_TYPE is the type of test to perform:
+#	build	- bad fails to build
+#	boot	- bad builds but fails to boot
+#	test	- bad boots but fails a test
+#
+#   CONFIG_BISECT is the config that failed to boot
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = config_bisect
+#   CONFIG_BISECT_TYPE = build
+#   CONFIG_BISECT = /home/test/¢onfig-bad
+#   MIN_CONFIG = /home/test/config-min
+#
diff --git a/usr/Kconfig b/usr/Kconfig
index c2c7fe2..4780dea 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -72,6 +72,15 @@
 	  Support loading of a LZMA encoded initial ramdisk or cpio buffer
 	  If unsure, say N.
 
+config RD_XZ
+	bool "Support initial ramdisks compressed using XZ" if EMBEDDED
+	default !EMBEDDED
+	depends on BLK_DEV_INITRD
+	select DECOMPRESS_XZ
+	help
+	  Support loading of a XZ encoded initial ramdisk or cpio buffer.
+	  If unsure, say N.
+
 config RD_LZO
 	bool "Support initial ramdisks compressed using LZO" if EMBEDDED
 	default !EMBEDDED
@@ -139,6 +148,15 @@
 	  three. Compression is slowest. The initramfs size is about 33%
 	  smaller with LZMA in comparison to gzip.
 
+config INITRAMFS_COMPRESSION_XZ
+	bool "XZ"
+	depends on RD_XZ
+	help
+	  XZ uses the LZMA2 algorithm. The initramfs size is about 30%
+	  smaller with XZ in comparison to gzip. Decompression speed
+	  is better than that of bzip2 but worse than gzip and LZO.
+	  Compression is slow.
+
 config INITRAMFS_COMPRESSION_LZO
 	bool "LZO"
 	depends on RD_LZO
diff --git a/usr/Makefile b/usr/Makefile
index 6faa444..029ffe6 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -15,6 +15,9 @@
 # Lzma
 suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA)   = .lzma
 
+# XZ
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_XZ)     = .xz
+
 # Lzo
 suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO)   = .lzo
 
@@ -50,7 +53,7 @@
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio
+targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.xz initramfs_data.cpio.lzo initramfs_data.cpio
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index b2b3c2d..7f06884 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -104,6 +104,8 @@
 	char s[256];
 	time_t mtime = time(NULL);
 
+	if (name[0] == '/')
+		name++;
 	sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
 	       "%08X%08X%08X%08X%08X%08X%08X",
 		"070701",		/* magic */
@@ -152,6 +154,8 @@
 	char s[256];
 	time_t mtime = time(NULL);
 
+	if (name[0] == '/')
+		name++;
 	sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
 	       "%08X%08X%08X%08X%08X%08X%08X",
 		"070701",		/* magic */
@@ -245,6 +249,8 @@
 	else
 		mode |= S_IFCHR;
 
+	if (name[0] == '/')
+		name++;
 	sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
 	       "%08X%08X%08X%08X%08X%08X%08X",
 		"070701",		/* magic */
@@ -303,18 +309,18 @@
 
 	mode |= S_IFREG;
 
-	retval = stat (location, &buf);
-	if (retval) {
-		fprintf (stderr, "File %s could not be located\n", location);
-		goto error;
-	}
-
 	file = open (location, O_RDONLY);
 	if (file < 0) {
 		fprintf (stderr, "File %s could not be opened for reading\n", location);
 		goto error;
 	}
 
+	retval = fstat(file, &buf);
+	if (retval) {
+		fprintf(stderr, "File %s could not be stat()'ed\n", location);
+		goto error;
+	}
+
 	filebuf = malloc(buf.st_size);
 	if (!filebuf) {
 		fprintf (stderr, "out of memory\n");
@@ -332,6 +338,8 @@
 		/* data goes on last link */
 		if (i == nlinks) size = buf.st_size;
 
+		if (name[0] == '/')
+			name++;
 		namesize = strlen(name) + 1;
 		sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
 		       "%08lX%08X%08X%08X%08X%08X%08X",
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 7f1178f..f63ccb0 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -15,3 +15,6 @@
 
 config KVM_MMIO
        bool
+
+config KVM_ASYNC_PF
+       bool
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 7c98928..ae72ae6 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -55,58 +55,31 @@
 	return index;
 }
 
-static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
+static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
 {
-	struct kvm_assigned_dev_kernel *assigned_dev;
-	int i;
+	struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
+	u32 vector;
+	int index;
 
-	assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
-				    interrupt_work);
+	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
+		spin_lock(&assigned_dev->intx_lock);
+		disable_irq_nosync(irq);
+		assigned_dev->host_irq_disabled = true;
+		spin_unlock(&assigned_dev->intx_lock);
+	}
 
-	spin_lock_irq(&assigned_dev->assigned_dev_lock);
 	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
-		struct kvm_guest_msix_entry *guest_entries =
-			assigned_dev->guest_msix_entries;
-		for (i = 0; i < assigned_dev->entries_nr; i++) {
-			if (!(guest_entries[i].flags &
-					KVM_ASSIGNED_MSIX_PENDING))
-				continue;
-			guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
+		index = find_index_from_host_irq(assigned_dev, irq);
+		if (index >= 0) {
+			vector = assigned_dev->
+					guest_msix_entries[index].vector;
 			kvm_set_irq(assigned_dev->kvm,
-				    assigned_dev->irq_source_id,
-				    guest_entries[i].vector, 1);
+				    assigned_dev->irq_source_id, vector, 1);
 		}
 	} else
 		kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
 			    assigned_dev->guest_irq, 1);
 
-	spin_unlock_irq(&assigned_dev->assigned_dev_lock);
-}
-
-static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
-{
-	unsigned long flags;
-	struct kvm_assigned_dev_kernel *assigned_dev =
-		(struct kvm_assigned_dev_kernel *) dev_id;
-
-	spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
-	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
-		int index = find_index_from_host_irq(assigned_dev, irq);
-		if (index < 0)
-			goto out;
-		assigned_dev->guest_msix_entries[index].flags |=
-			KVM_ASSIGNED_MSIX_PENDING;
-	}
-
-	schedule_work(&assigned_dev->interrupt_work);
-
-	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
-		disable_irq_nosync(irq);
-		assigned_dev->host_irq_disabled = true;
-	}
-
-out:
-	spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -114,7 +87,6 @@
 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
 {
 	struct kvm_assigned_dev_kernel *dev;
-	unsigned long flags;
 
 	if (kian->gsi == -1)
 		return;
@@ -127,12 +99,12 @@
 	/* The guest irq may be shared so this ack may be
 	 * from another device.
 	 */
-	spin_lock_irqsave(&dev->assigned_dev_lock, flags);
+	spin_lock(&dev->intx_lock);
 	if (dev->host_irq_disabled) {
 		enable_irq(dev->host_irq);
 		dev->host_irq_disabled = false;
 	}
-	spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
+	spin_unlock(&dev->intx_lock);
 }
 
 static void deassign_guest_irq(struct kvm *kvm,
@@ -141,6 +113,9 @@
 	kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
 	assigned_dev->ack_notifier.gsi = -1;
 
+	kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
+		    assigned_dev->guest_irq, 0);
+
 	if (assigned_dev->irq_source_id != -1)
 		kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
 	assigned_dev->irq_source_id = -1;
@@ -152,28 +127,19 @@
 			      struct kvm_assigned_dev_kernel *assigned_dev)
 {
 	/*
-	 * In kvm_free_device_irq, cancel_work_sync return true if:
-	 * 1. work is scheduled, and then cancelled.
-	 * 2. work callback is executed.
-	 *
-	 * The first one ensured that the irq is disabled and no more events
-	 * would happen. But for the second one, the irq may be enabled (e.g.
-	 * for MSI). So we disable irq here to prevent further events.
+	 * We disable irq here to prevent further events.
 	 *
 	 * Notice this maybe result in nested disable if the interrupt type is
 	 * INTx, but it's OK for we are going to free it.
 	 *
 	 * If this function is a part of VM destroy, please ensure that till
 	 * now, the kvm state is still legal for probably we also have to wait
-	 * interrupt_work done.
+	 * on a currently running IRQ handler.
 	 */
 	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
 		int i;
 		for (i = 0; i < assigned_dev->entries_nr; i++)
-			disable_irq_nosync(assigned_dev->
-					   host_msix_entries[i].vector);
-
-		cancel_work_sync(&assigned_dev->interrupt_work);
+			disable_irq(assigned_dev->host_msix_entries[i].vector);
 
 		for (i = 0; i < assigned_dev->entries_nr; i++)
 			free_irq(assigned_dev->host_msix_entries[i].vector,
@@ -185,8 +151,7 @@
 		pci_disable_msix(assigned_dev->dev);
 	} else {
 		/* Deal with MSI and INTx */
-		disable_irq_nosync(assigned_dev->host_irq);
-		cancel_work_sync(&assigned_dev->interrupt_work);
+		disable_irq(assigned_dev->host_irq);
 
 		free_irq(assigned_dev->host_irq, (void *)assigned_dev);
 
@@ -232,7 +197,8 @@
 {
 	kvm_free_assigned_irq(kvm, assigned_dev);
 
-	pci_reset_function(assigned_dev->dev);
+	__pci_reset_function(assigned_dev->dev);
+	pci_restore_state(assigned_dev->dev);
 
 	pci_release_regions(assigned_dev->dev);
 	pci_disable_device(assigned_dev->dev);
@@ -265,8 +231,8 @@
 	 * on the same interrupt line is not a happy situation: there
 	 * are going to be long delays in accepting, acking, etc.
 	 */
-	if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
-			0, "kvm_assigned_intx_device", (void *)dev))
+	if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
+				 IRQF_ONESHOT, dev->irq_name, (void *)dev))
 		return -EIO;
 	return 0;
 }
@@ -284,8 +250,8 @@
 	}
 
 	dev->host_irq = dev->dev->irq;
-	if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
-			"kvm_assigned_msi_device", (void *)dev)) {
+	if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
+				 0, dev->irq_name, (void *)dev)) {
 		pci_disable_msi(dev->dev);
 		return -EIO;
 	}
@@ -310,10 +276,9 @@
 		return r;
 
 	for (i = 0; i < dev->entries_nr; i++) {
-		r = request_irq(dev->host_msix_entries[i].vector,
-				kvm_assigned_dev_intr, 0,
-				"kvm_assigned_msix_device",
-				(void *)dev);
+		r = request_threaded_irq(dev->host_msix_entries[i].vector,
+					 NULL, kvm_assigned_dev_thread,
+					 0, dev->irq_name, (void *)dev);
 		if (r)
 			goto err;
 	}
@@ -370,6 +335,9 @@
 	if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
 		return r;
 
+	snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
+		 pci_name(dev->dev));
+
 	switch (host_irq_type) {
 	case KVM_DEV_IRQ_HOST_INTX:
 		r = assigned_device_enable_host_intx(kvm, dev);
@@ -547,6 +515,7 @@
 	}
 
 	pci_reset_function(dev);
+	pci_save_state(dev);
 
 	match->assigned_dev_id = assigned_dev->assigned_dev_id;
 	match->host_segnr = assigned_dev->segnr;
@@ -554,12 +523,10 @@
 	match->host_devfn = assigned_dev->devfn;
 	match->flags = assigned_dev->flags;
 	match->dev = dev;
-	spin_lock_init(&match->assigned_dev_lock);
+	spin_lock_init(&match->intx_lock);
 	match->irq_source_id = -1;
 	match->kvm = kvm;
 	match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
-	INIT_WORK(&match->interrupt_work,
-		  kvm_assigned_dev_interrupt_work_handler);
 
 	list_add(&match->list, &kvm->arch.assigned_dev_head);
 
@@ -579,6 +546,7 @@
 	mutex_unlock(&kvm->lock);
 	return r;
 out_list_del:
+	pci_restore_state(dev);
 	list_del(&match->list);
 	pci_release_regions(dev);
 out_disable:
@@ -651,9 +619,9 @@
 			r = -ENOMEM;
 			goto msix_nr_out;
 		}
-		adev->guest_msix_entries = kzalloc(
-				sizeof(struct kvm_guest_msix_entry) *
-				entry_nr->entry_nr, GFP_KERNEL);
+		adev->guest_msix_entries =
+			kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
+				GFP_KERNEL);
 		if (!adev->guest_msix_entries) {
 			kfree(adev->host_msix_entries);
 			r = -ENOMEM;
@@ -706,7 +674,7 @@
 				  unsigned long arg)
 {
 	void __user *argp = (void __user *)arg;
-	int r = -ENOTTY;
+	int r;
 
 	switch (ioctl) {
 	case KVM_ASSIGN_PCI_DEVICE: {
@@ -724,7 +692,6 @@
 		r = -EOPNOTSUPP;
 		break;
 	}
-#ifdef KVM_CAP_ASSIGN_DEV_IRQ
 	case KVM_ASSIGN_DEV_IRQ: {
 		struct kvm_assigned_irq assigned_irq;
 
@@ -747,8 +714,6 @@
 			goto out;
 		break;
 	}
-#endif
-#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
 	case KVM_DEASSIGN_PCI_DEVICE: {
 		struct kvm_assigned_pci_dev assigned_dev;
 
@@ -760,7 +725,6 @@
 			goto out;
 		break;
 	}
-#endif
 #ifdef KVM_CAP_IRQ_ROUTING
 	case KVM_SET_GSI_ROUTING: {
 		struct kvm_irq_routing routing;
@@ -813,6 +777,9 @@
 		break;
 	}
 #endif
+	default:
+		r = -ENOTTY;
+		break;
 	}
 out:
 	return r;
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
new file mode 100644
index 0000000..74268b4
--- /dev/null
+++ b/virt/kvm/async_pf.c
@@ -0,0 +1,216 @@
+/*
+ * kvm asynchronous fault support
+ *
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Author:
+ *      Gleb Natapov <gleb@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mmu_context.h>
+
+#include "async_pf.h"
+#include <trace/events/kvm.h>
+
+static struct kmem_cache *async_pf_cache;
+
+int kvm_async_pf_init(void)
+{
+	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
+
+	if (!async_pf_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void kvm_async_pf_deinit(void)
+{
+	if (async_pf_cache)
+		kmem_cache_destroy(async_pf_cache);
+	async_pf_cache = NULL;
+}
+
+void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	INIT_LIST_HEAD(&vcpu->async_pf.done);
+	INIT_LIST_HEAD(&vcpu->async_pf.queue);
+	spin_lock_init(&vcpu->async_pf.lock);
+}
+
+static void async_pf_execute(struct work_struct *work)
+{
+	struct page *page = NULL;
+	struct kvm_async_pf *apf =
+		container_of(work, struct kvm_async_pf, work);
+	struct mm_struct *mm = apf->mm;
+	struct kvm_vcpu *vcpu = apf->vcpu;
+	unsigned long addr = apf->addr;
+	gva_t gva = apf->gva;
+
+	might_sleep();
+
+	use_mm(mm);
+	down_read(&mm->mmap_sem);
+	get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+	up_read(&mm->mmap_sem);
+	unuse_mm(mm);
+
+	spin_lock(&vcpu->async_pf.lock);
+	list_add_tail(&apf->link, &vcpu->async_pf.done);
+	apf->page = page;
+	apf->done = true;
+	spin_unlock(&vcpu->async_pf.lock);
+
+	/*
+	 * apf may be freed by kvm_check_async_pf_completion() after
+	 * this point
+	 */
+
+	trace_kvm_async_pf_completed(addr, page, gva);
+
+	if (waitqueue_active(&vcpu->wq))
+		wake_up_interruptible(&vcpu->wq);
+
+	mmdrop(mm);
+	kvm_put_kvm(vcpu->kvm);
+}
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+{
+	/* cancel outstanding work queue item */
+	while (!list_empty(&vcpu->async_pf.queue)) {
+		struct kvm_async_pf *work =
+			list_entry(vcpu->async_pf.queue.next,
+				   typeof(*work), queue);
+		cancel_work_sync(&work->work);
+		list_del(&work->queue);
+		if (!work->done) /* work was canceled */
+			kmem_cache_free(async_pf_cache, work);
+	}
+
+	spin_lock(&vcpu->async_pf.lock);
+	while (!list_empty(&vcpu->async_pf.done)) {
+		struct kvm_async_pf *work =
+			list_entry(vcpu->async_pf.done.next,
+				   typeof(*work), link);
+		list_del(&work->link);
+		if (work->page)
+			put_page(work->page);
+		kmem_cache_free(async_pf_cache, work);
+	}
+	spin_unlock(&vcpu->async_pf.lock);
+
+	vcpu->async_pf.queued = 0;
+}
+
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+{
+	struct kvm_async_pf *work;
+
+	while (!list_empty_careful(&vcpu->async_pf.done) &&
+	      kvm_arch_can_inject_async_page_present(vcpu)) {
+		spin_lock(&vcpu->async_pf.lock);
+		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+					      link);
+		list_del(&work->link);
+		spin_unlock(&vcpu->async_pf.lock);
+
+		if (work->page)
+			kvm_arch_async_page_ready(vcpu, work);
+		kvm_arch_async_page_present(vcpu, work);
+
+		list_del(&work->queue);
+		vcpu->async_pf.queued--;
+		if (work->page)
+			put_page(work->page);
+		kmem_cache_free(async_pf_cache, work);
+	}
+}
+
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+		       struct kvm_arch_async_pf *arch)
+{
+	struct kvm_async_pf *work;
+
+	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
+		return 0;
+
+	/* setup delayed work */
+
+	/*
+	 * do alloc nowait since if we are going to sleep anyway we
+	 * may as well sleep faulting in page
+	 */
+	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+	if (!work)
+		return 0;
+
+	work->page = NULL;
+	work->done = false;
+	work->vcpu = vcpu;
+	work->gva = gva;
+	work->addr = gfn_to_hva(vcpu->kvm, gfn);
+	work->arch = *arch;
+	work->mm = current->mm;
+	atomic_inc(&work->mm->mm_count);
+	kvm_get_kvm(work->vcpu->kvm);
+
+	/* this can't really happen otherwise gfn_to_pfn_async
+	   would succeed */
+	if (unlikely(kvm_is_error_hva(work->addr)))
+		goto retry_sync;
+
+	INIT_WORK(&work->work, async_pf_execute);
+	if (!schedule_work(&work->work))
+		goto retry_sync;
+
+	list_add_tail(&work->queue, &vcpu->async_pf.queue);
+	vcpu->async_pf.queued++;
+	kvm_arch_async_page_not_present(vcpu, work);
+	return 1;
+retry_sync:
+	kvm_put_kvm(work->vcpu->kvm);
+	mmdrop(work->mm);
+	kmem_cache_free(async_pf_cache, work);
+	return 0;
+}
+
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
+{
+	struct kvm_async_pf *work;
+
+	if (!list_empty_careful(&vcpu->async_pf.done))
+		return 0;
+
+	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
+	if (!work)
+		return -ENOMEM;
+
+	work->page = bad_page;
+	get_page(bad_page);
+	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
+
+	spin_lock(&vcpu->async_pf.lock);
+	list_add_tail(&work->link, &vcpu->async_pf.done);
+	spin_unlock(&vcpu->async_pf.lock);
+
+	vcpu->async_pf.queued++;
+	return 0;
+}
diff --git a/virt/kvm/async_pf.h b/virt/kvm/async_pf.h
new file mode 100644
index 0000000..e7ef644
--- /dev/null
+++ b/virt/kvm/async_pf.h
@@ -0,0 +1,36 @@
+/*
+ * kvm asynchronous fault support
+ *
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Author:
+ *      Gleb Natapov <gleb@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __KVM_ASYNC_PF_H__
+#define __KVM_ASYNC_PF_H__
+
+#ifdef CONFIG_KVM_ASYNC_PF
+int kvm_async_pf_init(void);
+void kvm_async_pf_deinit(void);
+void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
+#else
+#define kvm_async_pf_init() (0)
+#define kvm_async_pf_deinit() do{}while(0)
+#define kvm_async_pf_vcpu_init(C) do{}while(0)
+#endif
+
+#endif
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index c1f1e3c..2ca4535 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -44,14 +44,19 @@
  */
 
 struct _irqfd {
-	struct kvm               *kvm;
-	struct eventfd_ctx       *eventfd;
-	int                       gsi;
-	struct list_head          list;
-	poll_table                pt;
-	wait_queue_t              wait;
-	struct work_struct        inject;
-	struct work_struct        shutdown;
+	/* Used for MSI fast-path */
+	struct kvm *kvm;
+	wait_queue_t wait;
+	/* Update side is protected by irqfds.lock */
+	struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
+	/* Used for level IRQ fast-path */
+	int gsi;
+	struct work_struct inject;
+	/* Used for setup/shutdown */
+	struct eventfd_ctx *eventfd;
+	struct list_head list;
+	poll_table pt;
+	struct work_struct shutdown;
 };
 
 static struct workqueue_struct *irqfd_cleanup_wq;
@@ -125,14 +130,22 @@
 {
 	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
 	unsigned long flags = (unsigned long)key;
+	struct kvm_kernel_irq_routing_entry *irq;
+	struct kvm *kvm = irqfd->kvm;
 
-	if (flags & POLLIN)
+	if (flags & POLLIN) {
+		rcu_read_lock();
+		irq = rcu_dereference(irqfd->irq_entry);
 		/* An event has been signaled, inject an interrupt */
-		schedule_work(&irqfd->inject);
+		if (irq)
+			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+		else
+			schedule_work(&irqfd->inject);
+		rcu_read_unlock();
+	}
 
 	if (flags & POLLHUP) {
 		/* The eventfd is closing, detach from KVM */
-		struct kvm *kvm = irqfd->kvm;
 		unsigned long flags;
 
 		spin_lock_irqsave(&kvm->irqfds.lock, flags);
@@ -163,9 +176,31 @@
 	add_wait_queue(wqh, &irqfd->wait);
 }
 
+/* Must be called under irqfds.lock */
+static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+			 struct kvm_irq_routing_table *irq_rt)
+{
+	struct kvm_kernel_irq_routing_entry *e;
+	struct hlist_node *n;
+
+	if (irqfd->gsi >= irq_rt->nr_rt_entries) {
+		rcu_assign_pointer(irqfd->irq_entry, NULL);
+		return;
+	}
+
+	hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
+		/* Only fast-path MSI. */
+		if (e->type == KVM_IRQ_ROUTING_MSI)
+			rcu_assign_pointer(irqfd->irq_entry, e);
+		else
+			rcu_assign_pointer(irqfd->irq_entry, NULL);
+	}
+}
+
 static int
 kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
 {
+	struct kvm_irq_routing_table *irq_rt;
 	struct _irqfd *irqfd, *tmp;
 	struct file *file = NULL;
 	struct eventfd_ctx *eventfd = NULL;
@@ -215,6 +250,10 @@
 		goto fail;
 	}
 
+	irq_rt = rcu_dereference_protected(kvm->irq_routing,
+					   lockdep_is_held(&kvm->irqfds.lock));
+	irqfd_update(kvm, irqfd, irq_rt);
+
 	events = file->f_op->poll(file, &irqfd->pt);
 
 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -271,8 +310,17 @@
 	spin_lock_irq(&kvm->irqfds.lock);
 
 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-		if (irqfd->eventfd == eventfd && irqfd->gsi == gsi)
+		if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+			/*
+			 * This rcu_assign_pointer is needed for when
+			 * another thread calls kvm_irqfd_update before
+			 * we flush workqueue below.
+			 * It is paired with synchronize_rcu done by caller
+			 * of that function.
+			 */
+			rcu_assign_pointer(irqfd->irq_entry, NULL);
 			irqfd_deactivate(irqfd);
+		}
 	}
 
 	spin_unlock_irq(&kvm->irqfds.lock);
@@ -322,6 +370,25 @@
 }
 
 /*
+ * Change irq_routing and irqfd.
+ * Caller must invoke synchronize_rcu afterwards.
+ */
+void kvm_irq_routing_update(struct kvm *kvm,
+			    struct kvm_irq_routing_table *irq_rt)
+{
+	struct _irqfd *irqfd;
+
+	spin_lock_irq(&kvm->irqfds.lock);
+
+	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+
+	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
+		irqfd_update(kvm, irqfd, irq_rt);
+
+	spin_unlock_irq(&kvm->irqfds.lock);
+}
+
+/*
  * create a host-wide workqueue for issuing deferred shutdown requests
  * aggregated from all vm* instances. We need our own isolated single-thread
  * queue to prevent deadlock against flushing the normal work-queue.
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 8edca91..9f614b4 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -114,8 +114,8 @@
 	return r;
 }
 
-static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
-		       struct kvm *kvm, int irq_source_id, int level)
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+		struct kvm *kvm, int irq_source_id, int level)
 {
 	struct kvm_lapic_irq irq;
 
@@ -409,8 +409,9 @@
 
 	mutex_lock(&kvm->irq_lock);
 	old = kvm->irq_routing;
-	rcu_assign_pointer(kvm->irq_routing, new);
+	kvm_irq_routing_update(kvm, new);
 	mutex_unlock(&kvm->irq_lock);
+
 	synchronize_rcu();
 
 	new = old;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5225052..f29abeb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -55,6 +55,7 @@
 #include <asm-generic/bitops/le.h>
 
 #include "coalesced_mmio.h"
+#include "async_pf.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/kvm.h>
@@ -89,7 +90,8 @@
 
 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
 
-static bool kvm_rebooting;
+bool kvm_rebooting;
+EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
@@ -102,8 +104,26 @@
 inline int kvm_is_mmio_pfn(pfn_t pfn)
 {
 	if (pfn_valid(pfn)) {
-		struct page *page = compound_head(pfn_to_page(pfn));
-		return PageReserved(page);
+		int reserved;
+		struct page *tail = pfn_to_page(pfn);
+		struct page *head = compound_trans_head(tail);
+		reserved = PageReserved(head);
+		if (head != tail) {
+			/*
+			 * "head" is not a dangling pointer
+			 * (compound_trans_head takes care of that)
+			 * but the hugepage may have been splitted
+			 * from under us (and we may not hold a
+			 * reference count on the head page so it can
+			 * be reused before we run PageReferenced), so
+			 * we've to check PageTail before returning
+			 * what we just read.
+			 */
+			smp_rmb();
+			if (PageTail(tail))
+				return reserved;
+		}
+		return PageReserved(tail);
 	}
 
 	return true;
@@ -167,8 +187,12 @@
 
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
+	int dirty_count = kvm->tlbs_dirty;
+
+	smp_mb();
 	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 		++kvm->stat.remote_tlb_flush;
+	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 
 void kvm_reload_remote_mmus(struct kvm *kvm)
@@ -186,6 +210,7 @@
 	vcpu->kvm = kvm;
 	vcpu->vcpu_id = id;
 	init_waitqueue_head(&vcpu->wq);
+	kvm_async_pf_vcpu_init(vcpu);
 
 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 	if (!page) {
@@ -247,7 +272,7 @@
 	idx = srcu_read_lock(&kvm->srcu);
 	spin_lock(&kvm->mmu_lock);
 	kvm->mmu_notifier_seq++;
-	need_tlb_flush = kvm_unmap_hva(kvm, address);
+	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 
@@ -291,6 +316,7 @@
 	kvm->mmu_notifier_count++;
 	for (; start < end; start += PAGE_SIZE)
 		need_tlb_flush |= kvm_unmap_hva(kvm, start);
+	need_tlb_flush |= kvm->tlbs_dirty;
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 
@@ -344,6 +370,22 @@
 	return young;
 }
 
+static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
+				       struct mm_struct *mm,
+				       unsigned long address)
+{
+	struct kvm *kvm = mmu_notifier_to_kvm(mn);
+	int young, idx;
+
+	idx = srcu_read_lock(&kvm->srcu);
+	spin_lock(&kvm->mmu_lock);
+	young = kvm_test_age_hva(kvm, address);
+	spin_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+
+	return young;
+}
+
 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
 				     struct mm_struct *mm)
 {
@@ -360,6 +402,7 @@
 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
+	.test_young		= kvm_mmu_notifier_test_young,
 	.change_pte		= kvm_mmu_notifier_change_pte,
 	.release		= kvm_mmu_notifier_release,
 };
@@ -381,11 +424,15 @@
 
 static struct kvm *kvm_create_vm(void)
 {
-	int r = 0, i;
-	struct kvm *kvm = kvm_arch_create_vm();
+	int r, i;
+	struct kvm *kvm = kvm_arch_alloc_vm();
 
-	if (IS_ERR(kvm))
-		goto out;
+	if (!kvm)
+		return ERR_PTR(-ENOMEM);
+
+	r = kvm_arch_init_vm(kvm);
+	if (r)
+		goto out_err_nodisable;
 
 	r = hardware_enable_all();
 	if (r)
@@ -399,23 +446,19 @@
 	r = -ENOMEM;
 	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
 	if (!kvm->memslots)
-		goto out_err;
+		goto out_err_nosrcu;
 	if (init_srcu_struct(&kvm->srcu))
-		goto out_err;
+		goto out_err_nosrcu;
 	for (i = 0; i < KVM_NR_BUSES; i++) {
 		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
 					GFP_KERNEL);
-		if (!kvm->buses[i]) {
-			cleanup_srcu_struct(&kvm->srcu);
+		if (!kvm->buses[i])
 			goto out_err;
-		}
 	}
 
 	r = kvm_init_mmu_notifier(kvm);
-	if (r) {
-		cleanup_srcu_struct(&kvm->srcu);
+	if (r)
 		goto out_err;
-	}
 
 	kvm->mm = current->mm;
 	atomic_inc(&kvm->mm->mm_count);
@@ -429,19 +472,35 @@
 	spin_lock(&kvm_lock);
 	list_add(&kvm->vm_list, &vm_list);
 	spin_unlock(&kvm_lock);
-out:
+
 	return kvm;
 
 out_err:
+	cleanup_srcu_struct(&kvm->srcu);
+out_err_nosrcu:
 	hardware_disable_all();
 out_err_nodisable:
 	for (i = 0; i < KVM_NR_BUSES; i++)
 		kfree(kvm->buses[i]);
 	kfree(kvm->memslots);
-	kfree(kvm);
+	kvm_arch_free_vm(kvm);
 	return ERR_PTR(r);
 }
 
+static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+	if (!memslot->dirty_bitmap)
+		return;
+
+	if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
+		vfree(memslot->dirty_bitmap_head);
+	else
+		kfree(memslot->dirty_bitmap_head);
+
+	memslot->dirty_bitmap = NULL;
+	memslot->dirty_bitmap_head = NULL;
+}
+
 /*
  * Free any memory in @free but not in @dont.
  */
@@ -454,7 +513,7 @@
 		vfree(free->rmap);
 
 	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
-		vfree(free->dirty_bitmap);
+		kvm_destroy_dirty_bitmap(free);
 
 
 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
@@ -465,7 +524,6 @@
 	}
 
 	free->npages = 0;
-	free->dirty_bitmap = NULL;
 	free->rmap = NULL;
 }
 
@@ -499,6 +557,9 @@
 	kvm_arch_flush_shadow(kvm);
 #endif
 	kvm_arch_destroy_vm(kvm);
+	kvm_free_physmem(kvm);
+	cleanup_srcu_struct(&kvm->srcu);
+	kvm_arch_free_vm(kvm);
 	hardware_disable_all();
 	mmdrop(mm);
 }
@@ -528,6 +589,27 @@
 }
 
 /*
+ * Allocation size is twice as large as the actual dirty bitmap size.
+ * This makes it possible to do double buffering: see x86's
+ * kvm_vm_ioctl_get_dirty_log().
+ */
+static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
+
+	if (dirty_bytes > PAGE_SIZE)
+		memslot->dirty_bitmap = vzalloc(dirty_bytes);
+	else
+		memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
+
+	if (!memslot->dirty_bitmap)
+		return -ENOMEM;
+
+	memslot->dirty_bitmap_head = memslot->dirty_bitmap;
+	return 0;
+}
+
+/*
  * Allocate some memory and give it an address in the guest physical address
  * space.
  *
@@ -604,13 +686,11 @@
 	/* Allocate if a slot is being created */
 #ifndef CONFIG_S390
 	if (npages && !new.rmap) {
-		new.rmap = vmalloc(npages * sizeof(*new.rmap));
+		new.rmap = vzalloc(npages * sizeof(*new.rmap));
 
 		if (!new.rmap)
 			goto out_free;
 
-		memset(new.rmap, 0, npages * sizeof(*new.rmap));
-
 		new.user_alloc = user_alloc;
 		new.userspace_addr = mem->userspace_addr;
 	}
@@ -633,14 +713,11 @@
 			     >> KVM_HPAGE_GFN_SHIFT(level));
 		lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
 
-		new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
+		new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
 
 		if (!new.lpage_info[i])
 			goto out_free;
 
-		memset(new.lpage_info[i], 0,
-		       lpages * sizeof(*new.lpage_info[i]));
-
 		if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
 			new.lpage_info[i][0].write_count = 1;
 		if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
@@ -661,12 +738,8 @@
 
 	/* Allocate page dirty bitmap if needed */
 	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
-		unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
-
-		new.dirty_bitmap = vmalloc(dirty_bytes);
-		if (!new.dirty_bitmap)
+		if (kvm_create_dirty_bitmap(&new) < 0)
 			goto out_free;
-		memset(new.dirty_bitmap, 0, dirty_bytes);
 		/* destroy any largepage mappings for dirty tracking */
 		if (old.npages)
 			flush_shadow = 1;
@@ -685,6 +758,7 @@
 		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 		if (mem->slot >= slots->nmemslots)
 			slots->nmemslots = mem->slot + 1;
+		slots->generation++;
 		slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
 
 		old_memslots = kvm->memslots;
@@ -719,6 +793,7 @@
 	memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 	if (mem->slot >= slots->nmemslots)
 		slots->nmemslots = mem->slot + 1;
+	slots->generation++;
 
 	/* actual memory is freed via old in kvm_free_physmem_slot below */
 	if (!npages) {
@@ -849,10 +924,10 @@
 }
 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
 
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
+						gfn_t gfn)
 {
 	int i;
-	struct kvm_memslots *slots = kvm_memslots(kvm);
 
 	for (i = 0; i < slots->nmemslots; ++i) {
 		struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -863,6 +938,11 @@
 	}
 	return NULL;
 }
+
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+{
+	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
+}
 EXPORT_SYMBOL_GPL(gfn_to_memslot);
 
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -925,12 +1005,9 @@
 	return memslot - slots->memslots;
 }
 
-static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
+static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
 				     gfn_t *nr_pages)
 {
-	struct kvm_memory_slot *slot;
-
-	slot = gfn_to_memslot(kvm, gfn);
 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
 		return bad_hva();
 
@@ -942,28 +1019,61 @@
 
 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 {
-	return gfn_to_hva_many(kvm, gfn, NULL);
+	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_hva);
 
-static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
+static pfn_t get_fault_pfn(void)
+{
+	get_page(fault_page);
+	return fault_pfn;
+}
+
+static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
+			bool *async, bool write_fault, bool *writable)
 {
 	struct page *page[1];
-	int npages;
+	int npages = 0;
 	pfn_t pfn;
 
-	if (atomic)
+	/* we can do it either atomically or asynchronously, not both */
+	BUG_ON(atomic && async);
+
+	BUG_ON(!write_fault && !writable);
+
+	if (writable)
+		*writable = true;
+
+	if (atomic || async)
 		npages = __get_user_pages_fast(addr, 1, 1, page);
-	else {
+
+	if (unlikely(npages != 1) && !atomic) {
 		might_sleep();
-		npages = get_user_pages_fast(addr, 1, 1, page);
+
+		if (writable)
+			*writable = write_fault;
+
+		npages = get_user_pages_fast(addr, 1, write_fault, page);
+
+		/* map read fault as writable if possible */
+		if (unlikely(!write_fault) && npages == 1) {
+			struct page *wpage[1];
+
+			npages = __get_user_pages_fast(addr, 1, 1, wpage);
+			if (npages == 1) {
+				*writable = true;
+				put_page(page[0]);
+				page[0] = wpage[0];
+			}
+			npages = 1;
+		}
 	}
 
 	if (unlikely(npages != 1)) {
 		struct vm_area_struct *vma;
 
 		if (atomic)
-			goto return_fault_page;
+			return get_fault_pfn();
 
 		down_read(&current->mm->mmap_sem);
 		if (is_hwpoison_address(addr)) {
@@ -972,19 +1082,20 @@
 			return page_to_pfn(hwpoison_page);
 		}
 
-		vma = find_vma(current->mm, addr);
+		vma = find_vma_intersection(current->mm, addr, addr+1);
 
-		if (vma == NULL || addr < vma->vm_start ||
-		    !(vma->vm_flags & VM_PFNMAP)) {
-			up_read(&current->mm->mmap_sem);
-return_fault_page:
-			get_page(fault_page);
-			return page_to_pfn(fault_page);
+		if (vma == NULL)
+			pfn = get_fault_pfn();
+		else if ((vma->vm_flags & VM_PFNMAP)) {
+			pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+				vma->vm_pgoff;
+			BUG_ON(!kvm_is_mmio_pfn(pfn));
+		} else {
+			if (async && (vma->vm_flags & VM_WRITE))
+				*async = true;
+			pfn = get_fault_pfn();
 		}
-
-		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 		up_read(&current->mm->mmap_sem);
-		BUG_ON(!kvm_is_mmio_pfn(pfn));
 	} else
 		pfn = page_to_pfn(page[0]);
 
@@ -993,40 +1104,58 @@
 
 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
 {
-	return hva_to_pfn(kvm, addr, true);
+	return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
 
-static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
+static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
+			  bool write_fault, bool *writable)
 {
 	unsigned long addr;
 
+	if (async)
+		*async = false;
+
 	addr = gfn_to_hva(kvm, gfn);
 	if (kvm_is_error_hva(addr)) {
 		get_page(bad_page);
 		return page_to_pfn(bad_page);
 	}
 
-	return hva_to_pfn(kvm, addr, atomic);
+	return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
 }
 
 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
 {
-	return __gfn_to_pfn(kvm, gfn, true);
+	return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
 
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+		       bool write_fault, bool *writable)
+{
+	return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
+
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
-	return __gfn_to_pfn(kvm, gfn, false);
+	return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn);
 
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+		      bool *writable)
+{
+	return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
+
 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 			 struct kvm_memory_slot *slot, gfn_t gfn)
 {
 	unsigned long addr = gfn_to_hva_memslot(slot, gfn);
-	return hva_to_pfn(kvm, addr, false);
+	return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
 }
 
 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
@@ -1035,7 +1164,7 @@
 	unsigned long addr;
 	gfn_t entry;
 
-	addr = gfn_to_hva_many(kvm, gfn, &entry);
+	addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
 	if (kvm_is_error_hva(addr))
 		return -1;
 
@@ -1219,9 +1348,51 @@
 	return 0;
 }
 
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			      gpa_t gpa)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	int offset = offset_in_page(gpa);
+	gfn_t gfn = gpa >> PAGE_SHIFT;
+
+	ghc->gpa = gpa;
+	ghc->generation = slots->generation;
+	ghc->memslot = __gfn_to_memslot(slots, gfn);
+	ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+	if (!kvm_is_error_hva(ghc->hva))
+		ghc->hva += offset;
+	else
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			   void *data, unsigned long len)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	int r;
+
+	if (slots->generation != ghc->generation)
+		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+
+	if (kvm_is_error_hva(ghc->hva))
+		return -EFAULT;
+
+	r = copy_to_user((void __user *)ghc->hva, data, len);
+	if (r)
+		return -EFAULT;
+	mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
-	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
+	return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
+				    offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
 
@@ -1244,11 +1415,9 @@
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest);
 
-void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			     gfn_t gfn)
 {
-	struct kvm_memory_slot *memslot;
-
-	memslot = gfn_to_memslot(kvm, gfn);
 	if (memslot && memslot->dirty_bitmap) {
 		unsigned long rel_gfn = gfn - memslot->base_gfn;
 
@@ -1256,6 +1425,14 @@
 	}
 }
 
+void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+{
+	struct kvm_memory_slot *memslot;
+
+	memslot = gfn_to_memslot(kvm, gfn);
+	mark_page_dirty_in_slot(kvm, memslot, gfn);
+}
+
 /*
  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  */
@@ -1457,6 +1634,7 @@
 		if (arg)
 			goto out;
 		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
+		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
 		break;
 	case KVM_GET_REGS: {
 		struct kvm_regs *kvm_regs;
@@ -1824,7 +2002,7 @@
 
 static int kvm_dev_ioctl_create_vm(void)
 {
-	int fd, r;
+	int r;
 	struct kvm *kvm;
 
 	kvm = kvm_create_vm();
@@ -1837,11 +2015,11 @@
 		return r;
 	}
 #endif
-	fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
-	if (fd < 0)
+	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
+	if (r < 0)
 		kvm_put_kvm(kvm);
 
-	return fd;
+	return r;
 }
 
 static long kvm_dev_ioctl_check_extension_generic(long arg)
@@ -1922,7 +2100,7 @@
 	&kvm_chardev_ops,
 };
 
-static void hardware_enable(void *junk)
+static void hardware_enable_nolock(void *junk)
 {
 	int cpu = raw_smp_processor_id();
 	int r;
@@ -1942,7 +2120,14 @@
 	}
 }
 
-static void hardware_disable(void *junk)
+static void hardware_enable(void *junk)
+{
+	spin_lock(&kvm_lock);
+	hardware_enable_nolock(junk);
+	spin_unlock(&kvm_lock);
+}
+
+static void hardware_disable_nolock(void *junk)
 {
 	int cpu = raw_smp_processor_id();
 
@@ -1952,13 +2137,20 @@
 	kvm_arch_hardware_disable(NULL);
 }
 
+static void hardware_disable(void *junk)
+{
+	spin_lock(&kvm_lock);
+	hardware_disable_nolock(junk);
+	spin_unlock(&kvm_lock);
+}
+
 static void hardware_disable_all_nolock(void)
 {
 	BUG_ON(!kvm_usage_count);
 
 	kvm_usage_count--;
 	if (!kvm_usage_count)
-		on_each_cpu(hardware_disable, NULL, 1);
+		on_each_cpu(hardware_disable_nolock, NULL, 1);
 }
 
 static void hardware_disable_all(void)
@@ -1977,7 +2169,7 @@
 	kvm_usage_count++;
 	if (kvm_usage_count == 1) {
 		atomic_set(&hardware_enable_failed, 0);
-		on_each_cpu(hardware_enable, NULL, 1);
+		on_each_cpu(hardware_enable_nolock, NULL, 1);
 
 		if (atomic_read(&hardware_enable_failed)) {
 			hardware_disable_all_nolock();
@@ -2008,27 +2200,19 @@
 	case CPU_STARTING:
 		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
 		       cpu);
-		spin_lock(&kvm_lock);
 		hardware_enable(NULL);
-		spin_unlock(&kvm_lock);
 		break;
 	}
 	return NOTIFY_OK;
 }
 
 
-asmlinkage void kvm_handle_fault_on_reboot(void)
+asmlinkage void kvm_spurious_fault(void)
 {
-	if (kvm_rebooting) {
-		/* spin while reset goes on */
-		local_irq_enable();
-		while (true)
-			cpu_relax();
-	}
 	/* Fault while not rebooting.  We want the trace. */
 	BUG();
 }
-EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
+EXPORT_SYMBOL_GPL(kvm_spurious_fault);
 
 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
 		      void *v)
@@ -2041,7 +2225,7 @@
 	 */
 	printk(KERN_INFO "kvm: exiting hardware virtualization\n");
 	kvm_rebooting = true;
-	on_each_cpu(hardware_disable, NULL, 1);
+	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	return NOTIFY_OK;
 }
 
@@ -2211,7 +2395,7 @@
 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
 {
 	if (kvm_usage_count)
-		hardware_disable(NULL);
+		hardware_disable_nolock(NULL);
 	return 0;
 }
 
@@ -2219,7 +2403,7 @@
 {
 	if (kvm_usage_count) {
 		WARN_ON(spin_is_locked(&kvm_lock));
-		hardware_enable(NULL);
+		hardware_enable_nolock(NULL);
 	}
 	return 0;
 }
@@ -2336,6 +2520,10 @@
 		goto out_free_5;
 	}
 
+	r = kvm_async_pf_init();
+	if (r)
+		goto out_free;
+
 	kvm_chardev_ops.owner = module;
 	kvm_vm_fops.owner = module;
 	kvm_vcpu_fops.owner = module;
@@ -2343,7 +2531,7 @@
 	r = misc_register(&kvm_dev);
 	if (r) {
 		printk(KERN_ERR "kvm: misc device register failed\n");
-		goto out_free;
+		goto out_unreg;
 	}
 
 	kvm_preempt_ops.sched_in = kvm_sched_in;
@@ -2353,6 +2541,8 @@
 
 	return 0;
 
+out_unreg:
+	kvm_async_pf_deinit();
 out_free:
 	kmem_cache_destroy(kvm_vcpu_cache);
 out_free_5:
@@ -2385,11 +2575,12 @@
 	kvm_exit_debug();
 	misc_deregister(&kvm_dev);
 	kmem_cache_destroy(kvm_vcpu_cache);
+	kvm_async_pf_deinit();
 	sysdev_unregister(&kvm_sysdev);
 	sysdev_class_unregister(&kvm_sysdev_class);
 	unregister_reboot_notifier(&kvm_reboot_notifier);
 	unregister_cpu_notifier(&kvm_cpu_notifier);
-	on_each_cpu(hardware_disable, NULL, 1);
+	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	kvm_arch_hardware_unsetup();
 	kvm_arch_exit();
 	free_cpumask_var(cpus_hardware_enabled);